summaryrefslogtreecommitdiff
path: root/include/linux/platform_data/syscon.h
blob: 2c089dd3e2bda3baf5cef201ef43bca709e12c0b (plain)
1
2
3
4
5
6
7
8
9
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef PLATFORM_DATA_SYSCON_H
#define PLATFORM_DATA_SYSCON_H

struct syscon_platform_data {
	const char *label;
};

#endif
ption> Russell King's ARM Linux kernel treeRussell King
summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile7
-rw-r--r--drivers/accel/Kconfig2
-rw-r--r--drivers/accel/Makefile2
-rw-r--r--drivers/accel/amdxdna/Makefile2
-rw-r--r--drivers/accel/amdxdna/TODO1
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c258
-rw-r--r--drivers/accel/amdxdna/aie2_error.c95
-rw-r--r--drivers/accel/amdxdna/aie2_message.c668
-rw-r--r--drivers/accel/amdxdna/aie2_msg_priv.h88
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c474
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h59
-rw-r--r--drivers/accel/amdxdna/aie2_smu.c49
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c156
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.h55
-rw-r--r--drivers/accel/amdxdna/amdxdna_error.h59
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c148
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h6
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c14
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox_helper.h6
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c155
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.h10
-rw-r--r--drivers/accel/amdxdna/amdxdna_pm.c94
-rw-r--r--drivers/accel/amdxdna/amdxdna_pm.h18
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.c232
-rw-r--r--drivers/accel/amdxdna/amdxdna_ubuf.h19
-rw-r--r--drivers/accel/amdxdna/npu1_regs.c8
-rw-r--r--drivers/accel/amdxdna/npu2_regs.c2
-rw-r--r--drivers/accel/amdxdna/npu4_regs.c12
-rw-r--r--drivers/accel/amdxdna/npu5_regs.c2
-rw-r--r--drivers/accel/amdxdna/npu6_regs.c2
-rw-r--r--drivers/accel/ethosu/Kconfig11
-rw-r--r--drivers/accel/ethosu/Makefile4
-rw-r--r--drivers/accel/ethosu/ethosu_device.h197
-rw-r--r--drivers/accel/ethosu/ethosu_drv.c403
-rw-r--r--drivers/accel/ethosu/ethosu_drv.h15
-rw-r--r--drivers/accel/ethosu/ethosu_gem.c704
-rw-r--r--drivers/accel/ethosu/ethosu_gem.h46
-rw-r--r--drivers/accel/ethosu/ethosu_job.c497
-rw-r--r--drivers/accel/ethosu/ethosu_job.h40
-rw-r--r--drivers/accel/habanalabs/Kconfig23
-rw-r--r--drivers/accel/habanalabs/common/Makefile5
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c324
-rw-r--r--drivers/accel/habanalabs/common/device.c23
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h56
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c6
-rw-r--r--drivers/accel/habanalabs/common/hldio.c437
-rw-r--r--drivers/accel/habanalabs/common/hldio.h146
-rw-r--r--drivers/accel/habanalabs/common/memory.c9
-rw-r--r--drivers/accel/habanalabs/common/memory_mgr.c5
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c11
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c19
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c386
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h9
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c2
-rw-r--r--drivers/accel/ivpu/Makefile1
-rw-r--r--drivers/accel/ivpu/ivpu_debugfs.c38
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c18
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h5
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c229
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h16
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c161
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h22
-rw-r--r--drivers/accel/ivpu/ivpu_gem_userptr.c213
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c59
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h10
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.c23
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs.h2
-rw-r--r--drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h3
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c10
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c3
-rw-r--r--drivers/accel/ivpu/ivpu_job.c257
-rw-r--r--drivers/accel/ivpu/ivpu_job.h49
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c2
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c9
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h2
-rw-r--r--drivers/accel/ivpu/ivpu_ms.c25
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c18
-rw-r--r--drivers/accel/ivpu/ivpu_sysfs.c3
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h653
-rw-r--r--drivers/accel/qaic/Kconfig1
-rw-r--r--drivers/accel/qaic/Makefile2
-rw-r--r--drivers/accel/qaic/qaic.h42
-rw-r--r--drivers/accel/qaic/qaic_control.c27
-rw-r--r--drivers/accel/qaic/qaic_data.c176
-rw-r--r--drivers/accel/qaic/qaic_debugfs.c5
-rw-r--r--drivers/accel/qaic/qaic_drv.c119
-rw-r--r--drivers/accel/qaic/qaic_ras.c6
-rw-r--r--drivers/accel/qaic/qaic_ssr.c815
-rw-r--r--drivers/accel/qaic/qaic_ssr.h17
-rw-r--r--drivers/accel/qaic/qaic_sysfs.c109
-rw-r--r--drivers/accel/qaic/qaic_timesync.c9
-rw-r--r--drivers/accel/qaic/qaic_timesync.h3
-rw-r--r--drivers/accel/qaic/sahara.c164
-rw-r--r--drivers/accel/rocket/Kconfig24
-rw-r--r--drivers/accel/rocket/Makefile10
-rw-r--r--drivers/accel/rocket/rocket_core.c110
-rw-r--r--drivers/accel/rocket/rocket_core.h64
-rw-r--r--drivers/accel/rocket/rocket_device.c60
-rw-r--r--drivers/accel/rocket/rocket_device.h30
-rw-r--r--drivers/accel/rocket/rocket_drv.c290
-rw-r--r--drivers/accel/rocket/rocket_drv.h32
-rw-r--r--drivers/accel/rocket/rocket_gem.c182
-rw-r--r--drivers/accel/rocket/rocket_gem.h34
-rw-r--r--drivers/accel/rocket/rocket_job.c637
-rw-r--r--drivers/accel/rocket/rocket_job.h52
-rw-r--r--drivers/accel/rocket/rocket_registers.h4404
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/acpi_dbg.c26
-rw-r--r--drivers/acpi/acpi_mrrm.c46
-rw-r--r--drivers/acpi/acpi_processor.c4
-rw-r--r--drivers/acpi/acpi_tad.c74
-rw-r--r--drivers/acpi/acpi_video.c4
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h3
-rw-r--r--drivers/acpi/acpica/dsmethod.c21
-rw-r--r--drivers/acpi/acpica/evglock.c4
-rw-r--r--drivers/acpi/acpica/nswalk.c9
-rw-r--r--drivers/acpi/acpica/psopinfo.c4
-rw-r--r--drivers/acpi/acpica/tbprint.c14
-rw-r--r--drivers/acpi/apei/einj-core.c115
-rw-r--r--drivers/acpi/apei/erst-dbg.c8
-rw-r--r--drivers/acpi/apei/ghes.c71
-rw-r--r--drivers/acpi/arm64/Kconfig3
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/gtdt.c63
-rw-r--r--drivers/acpi/arm64/mpam.c411
-rw-r--r--drivers/acpi/battery.c55
-rw-r--r--drivers/acpi/button.c4
-rw-r--r--drivers/acpi/cppc_acpi.c24
-rw-r--r--drivers/acpi/device_sysfs.c2
-rw-r--r--drivers/acpi/dptf/Makefile1
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c2
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c94
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/fan.h48
-rw-r--r--drivers/acpi/fan_attr.c2
-rw-r--r--drivers/acpi/fan_core.c277
-rw-r--r--drivers/acpi/fan_hwmon.c32
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/irq.c19
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/numa/hmat.c91
-rw-r--r--drivers/acpi/numa/srat.c2
-rw-r--r--drivers/acpi/osl.c6
-rw-r--r--drivers/acpi/pci_irq.c3
-rw-r--r--drivers/acpi/pci_link.c10
-rw-r--r--drivers/acpi/platform_profile.c7
-rw-r--r--drivers/acpi/power.c90
-rw-r--r--drivers/acpi/pptt.c280
-rw-r--r--drivers/acpi/prmt.c25
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_idle.c72
-rw-r--r--drivers/acpi/processor_thermal.c52
-rw-r--r--drivers/acpi/property.c333
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/riscv/Kconfig7
-rw-r--r--drivers/acpi/riscv/Makefile1
-rw-r--r--drivers/acpi/riscv/init.c2
-rw-r--r--drivers/acpi/riscv/init.h1
-rw-r--r--drivers/acpi/riscv/irq.c75
-rw-r--r--drivers/acpi/riscv/rimt.c520
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/scan.c12
-rw-r--r--drivers/acpi/sleep.c14
-rw-r--r--drivers/acpi/sleep.h3
-rw-r--r--drivers/acpi/spcr.c13
-rw-r--r--drivers/acpi/tables.c2
-rw-r--r--drivers/acpi/thermal.c11
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/acpi/x86/lpss.c2
-rw-r--r--drivers/acpi/x86/s2idle.c65
-rw-r--r--drivers/amba/Kconfig2
-rw-r--r--drivers/amba/bus.c9
-rw-r--r--drivers/amba/tegra-ahb.c1
-rw-r--r--drivers/android/Kconfig16
-rw-r--r--drivers/android/Makefile3
-rw-r--r--drivers/android/binder.c200
-rw-r--r--drivers/android/binder/Makefile9
-rw-r--r--drivers/android/binder/allocation.rs602
-rw-r--r--drivers/android/binder/context.rs180
-rw-r--r--drivers/android/binder/deferred_close.rs204
-rw-r--r--drivers/android/binder/defs.rs182
-rw-r--r--drivers/android/binder/error.rs100
-rw-r--r--drivers/android/binder/freeze.rs398
-rw-r--r--drivers/android/binder/node.rs1131
-rw-r--r--drivers/android/binder/node/wrapper.rs78
-rw-r--r--drivers/android/binder/page_range.rs734
-rw-r--r--drivers/android/binder/page_range_helper.c24
-rw-r--r--drivers/android/binder/page_range_helper.h15
-rw-r--r--drivers/android/binder/process.rs1745
-rw-r--r--drivers/android/binder/range_alloc/array.rs251
-rw-r--r--drivers/android/binder/range_alloc/mod.rs329
-rw-r--r--drivers/android/binder/range_alloc/tree.rs488
-rw-r--r--drivers/android/binder/rust_binder.h23
-rw-r--r--drivers/android/binder/rust_binder_events.c59
-rw-r--r--drivers/android/binder/rust_binder_events.h36
-rw-r--r--drivers/android/binder/rust_binder_internal.h87
-rw-r--r--drivers/android/binder/rust_binder_main.rs611
-rw-r--r--drivers/android/binder/rust_binderfs.c795
-rw-r--r--drivers/android/binder/stats.rs89
-rw-r--r--drivers/android/binder/thread.rs1596
-rw-r--r--drivers/android/binder/trace.rs16
-rw-r--r--drivers/android/binder/transaction.rs456
-rw-r--r--drivers/android/binder_internal.h4
-rw-r--r--drivers/android/binder_netlink.c32
-rw-r--r--drivers/android/binder_netlink.h21
-rw-r--r--drivers/android/binder_trace.h37
-rw-r--r--drivers/android/binderfs.c93
-rw-r--r--drivers/android/dbitmap.h1
-rw-r--r--drivers/android/tests/binder_alloc_kunit.c2
-rw-r--r--drivers/ata/libata-acpi.c67
-rw-r--r--drivers/ata/libata-core.c38
-rw-r--r--drivers/ata/libata-scsi.c16
-rw-r--r--drivers/ata/libata-sff.c9
-rw-r--r--drivers/ata/libata.h4
-rw-r--r--drivers/ata/pata_it821x.c5
-rw-r--r--drivers/ata/pata_pcmcia.c1
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/auxdisplay/line-display.c240
-rw-r--r--drivers/auxdisplay/line-display.h4
-rw-r--r--drivers/base/Kconfig6
-rw-r--r--drivers/base/arch_topology.c98
-rw-r--r--drivers/base/auxiliary.c25
-rw-r--r--drivers/base/base.h25
-rw-r--r--drivers/base/bus.c41
-rw-r--r--drivers/base/core.c33
-rw-r--r--drivers/base/cpu.c28
-rw-r--r--drivers/base/dd.c12
-rw-r--r--drivers/base/devcoredump.c136
-rw-r--r--drivers/base/devres.c46
-rw-r--r--drivers/base/devtmpfs.c30
-rw-r--r--drivers/base/faux.c1
-rw-r--r--drivers/base/firmware_loader/Kconfig2
-rw-r--r--drivers/base/firmware_loader/main.c71
-rw-r--r--drivers/base/firmware_loader/sysfs.c10
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c6
-rw-r--r--drivers/base/memory.c82
-rw-r--r--drivers/base/node.c140
-rw-r--r--drivers/base/platform.c71
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/generic_ops.c85
-rw-r--r--drivers/base/power/main.c79
-rw-r--r--drivers/base/power/runtime-test.c249
-rw-r--r--drivers/base/power/runtime.c45
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/base/power/wakeup.c24
-rw-r--r--drivers/base/property.c2
-rw-r--r--drivers/base/regmap/internal.h2
-rw-r--r--drivers/base/regmap/regcache-flat.c107
-rw-r--r--drivers/base/regmap/regcache-maple.c47
-rw-r--r--drivers/base/regmap/regcache-rbtree.c31
-rw-r--r--drivers/base/regmap/regcache.c17
-rw-r--r--drivers/base/regmap/regmap-i3c.c9
-rw-r--r--drivers/base/regmap/regmap-kunit.c22
-rw-r--r--drivers/base/regmap/regmap-mmio.c1
-rw-r--r--drivers/base/regmap/regmap-sdw-mbq.c26
-rw-r--r--drivers/base/regmap/regmap-slimbus.c6
-rw-r--r--drivers/base/regmap/regmap.c13
-rw-r--r--drivers/base/swnode.c35
-rw-r--r--drivers/base/syscore.c82
-rw-r--r--drivers/bcma/main.c6
-rw-r--r--drivers/block/Kconfig10
-rw-r--r--drivers/block/Makefile4
-rw-r--r--drivers/block/amiflop.c10
-rw-r--r--drivers/block/aoe/aoeblk.c4
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/aoe/aoemain.c2
-rw-r--r--drivers/block/brd.c75
-rw-r--r--drivers/block/drbd/drbd_bitmap.c10
-rw-r--r--drivers/block/drbd/drbd_nl.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c14
-rw-r--r--drivers/block/floppy.c61
-rw-r--r--drivers/block/loop.c19
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/nbd.c54
-rw-r--r--drivers/block/null_blk/main.c83
-rw-r--r--drivers/block/null_blk/null_blk.h3
-rw-r--r--drivers/block/null_blk/zoned.c6
-rw-r--r--drivers/block/ps3disk.c4
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rnbd/rnbd-clt.c6
-rw-r--r--drivers/block/rnbd/rnbd-proto.h15
-rw-r--r--drivers/block/rnull.rs80
-rw-r--r--drivers/block/rnull/Kconfig13
-rw-r--r--drivers/block/rnull/Makefile3
-rw-r--r--drivers/block/rnull/configfs.rs263
-rw-r--r--drivers/block/rnull/rnull.rs103
-rw-r--r--drivers/block/sunvdc.c7
-rw-r--r--drivers/block/swim.c4
-rw-r--r--drivers/block/ublk_drv.c639
-rw-r--r--drivers/block/virtio_blk.c32
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/block/zloop.c165
-rw-r--r--drivers/block/zram/zram_drv.c516
-rw-r--r--drivers/block/zram/zram_drv.h2
-rw-r--r--drivers/bluetooth/Kconfig7
-rw-r--r--drivers/bluetooth/bpa10x.c6
-rw-r--r--drivers/bluetooth/btbcm.c4
-rw-r--r--drivers/bluetooth/btintel.c3
-rw-r--r--drivers/bluetooth/btintel_pcie.c468
-rw-r--r--drivers/bluetooth/btintel_pcie.h6
-rw-r--r--drivers/bluetooth/btmtksdio.c15
-rw-r--r--drivers/bluetooth/btmtkuart.c6
-rw-r--r--drivers/bluetooth/btnxpuart.c6
-rw-r--r--drivers/bluetooth/btrtl.c44
-rw-r--r--drivers/bluetooth/btusb.c122
-rw-r--r--drivers/bluetooth/h4_recv.h153
-rw-r--r--drivers/bluetooth/hci_ag6xx.c2
-rw-r--r--drivers/bluetooth/hci_aml.c2
-rw-r--r--drivers/bluetooth/hci_ath.c2
-rw-r--r--drivers/bluetooth/hci_bcm.c8
-rw-r--r--drivers/bluetooth/hci_bcsp.c3
-rw-r--r--drivers/bluetooth/hci_h4.c6
-rw-r--r--drivers/bluetooth/hci_h5.c53
-rw-r--r--drivers/bluetooth/hci_intel.c5
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/bluetooth/hci_mrvl.c6
-rw-r--r--drivers/bluetooth/hci_nokia.c4
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/bluetooth/hci_uart.h10
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c9
-rw-r--r--drivers/bus/fsl-mc/mc-sys.c2
-rw-r--r--drivers/bus/mhi/ep/internal.h2
-rw-r--r--drivers/bus/mhi/ep/main.c41
-rw-r--r--drivers/bus/mhi/host/init.c5
-rw-r--r--drivers/bus/mhi/host/internal.h3
-rw-r--r--drivers/bus/mhi/host/main.c1
-rw-r--r--drivers/bus/mhi/host/pci_generic.c111
-rw-r--r--drivers/bus/mhi/host/pm.c29
-rw-r--r--drivers/bus/mvebu-mbus.c16
-rw-r--r--drivers/bus/stm32_rifsc.c597
-rw-r--r--drivers/bus/sunxi-rsb.c2
-rw-r--r--drivers/bus/ti-sysc.c11
-rw-r--r--drivers/cache/Kconfig37
-rw-r--r--drivers/cache/Makefile2
-rw-r--r--drivers/cache/hisi_soc_hha.c194
-rw-r--r--drivers/cache/sifive_ccache.c8
-rw-r--r--drivers/cdx/Kconfig1
-rw-r--r--drivers/cdx/cdx.c8
-rw-r--r--drivers/cdx/cdx_msi.c1
-rw-r--r--drivers/cdx/controller/Kconfig1
-rw-r--r--drivers/cdx/controller/bitfield.h90
-rw-r--r--drivers/cdx/controller/cdx_controller.c5
-rw-r--r--drivers/cdx/controller/cdx_rpmsg.c2
-rw-r--r--drivers/cdx/controller/mcdi.c43
-rw-r--r--drivers/cdx/controller/mcdi.h242
-rw-r--r--drivers/cdx/controller/mcdi_functions.c1
-rw-r--r--drivers/cdx/controller/mcdi_functions.h3
-rw-r--r--drivers/cdx/controller/mcdid.h63
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/adi.c8
-rw-r--r--drivers/char/apm-emulation.c10
-rw-r--r--drivers/char/applicom.c5
-rw-r--r--drivers/char/hangcheck-timer.c24
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/hw_random/Kconfig3
-rw-r--r--drivers/char/hw_random/bcm2835-rng.c11
-rw-r--r--drivers/char/hw_random/cn10k-rng.c2
-rw-r--r--drivers/char/hw_random/core.c11
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c4
-rw-r--r--drivers/char/hw_random/n2rng.h4
-rw-r--r--drivers/char/hw_random/s390-trng.c3
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c2
-rw-r--r--drivers/char/ipmi/Kconfig7
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c4
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c16
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c630
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c17
-rw-r--r--drivers/char/ipmi/ipmi_si.h7
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c77
-rw-r--r--drivers/char/ipmi/ipmi_si_ls2k.c189
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c10
-rw-r--r--drivers/char/mem.c105
-rw-r--r--drivers/char/misc.c21
-rw-r--r--drivers/char/misc_minor_kunit.c (renamed from drivers/misc/misc_minor_kunit.c)95
-rw-r--r--drivers/char/mwave/3780i.c218
-rw-r--r--drivers/char/mwave/3780i.h12
-rw-r--r--drivers/char/mwave/Makefile6
-rw-r--r--drivers/char/mwave/README10
-rw-r--r--drivers/char/mwave/mwavedd.c337
-rw-r--r--drivers/char/mwave/mwavedd.h76
-rw-r--r--drivers/char/mwave/mwavepub.h22
-rw-r--r--drivers/char/mwave/smapi.c244
-rw-r--r--drivers/char/mwave/smapi.h6
-rw-r--r--drivers/char/mwave/tp3780i.c209
-rw-r--r--drivers/char/mwave/tp3780i.h30
-rw-r--r--drivers/char/random.c44
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/tpm-chip.c37
-rw-r--r--drivers/char/tpm/tpm-dev-common.c3
-rw-r--r--drivers/char/tpm/tpm-interface.c22
-rw-r--r--drivers/char/tpm/tpm.h3
-rw-r--r--drivers/char/tpm/tpm1-cmd.c5
-rw-r--r--drivers/char/tpm/tpm2-cmd.c191
-rw-r--r--drivers/char/tpm/tpm2-sessions.c303
-rw-r--r--drivers/char/tpm/tpm_crb.c33
-rw-r--r--drivers/char/tpm/tpm_loongson.c84
-rw-r--r--drivers/char/tpm/tpm_ppi.c89
-rw-r--r--drivers/char/tpm/tpm_tis_core.c7
-rw-r--r--drivers/char/xillybus/xillybus_core.c2
-rw-r--r--drivers/char/xillybus/xillyusb.c4
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile4
-rw-r--r--drivers/clk/actions/owl-common.c1
-rw-r--r--drivers/clk/actions/owl-common.h2
-rw-r--r--drivers/clk/actions/owl-composite.c8
-rw-r--r--drivers/clk/actions/owl-composite.h2
-rw-r--r--drivers/clk/actions/owl-divider.c13
-rw-r--r--drivers/clk/actions/owl-divider.h2
-rw-r--r--drivers/clk/actions/owl-factor.c12
-rw-r--r--drivers/clk/actions/owl-factor.h2
-rw-r--r--drivers/clk/actions/owl-gate.h2
-rw-r--r--drivers/clk/actions/owl-mux.h2
-rw-r--r--drivers/clk/actions/owl-pll.c25
-rw-r--r--drivers/clk/actions/owl-pll.h2
-rw-r--r--drivers/clk/at91/clk-audio-pll.c42
-rw-r--r--drivers/clk/at91/clk-h32mx.c33
-rw-r--r--drivers/clk/at91/clk-master.c3
-rw-r--r--drivers/clk/at91/clk-peripheral.c49
-rw-r--r--drivers/clk/at91/clk-pll.c12
-rw-r--r--drivers/clk/at91/clk-plldiv.c34
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c111
-rw-r--r--drivers/clk/at91/clk-usb.c20
-rw-r--r--drivers/clk/at91/pmc.c12
-rw-r--r--drivers/clk/at91/pmc.h4
-rw-r--r--drivers/clk/at91/sam9x60.c2
-rw-r--r--drivers/clk/at91/sam9x7.c6
-rw-r--r--drivers/clk/at91/sama7d65.c4
-rw-r--r--drivers/clk/at91/sama7g5.c2
-rw-r--r--drivers/clk/axs10x/i2s_pll_clock.c14
-rw-r--r--drivers/clk/axs10x/pll_clock.c12
-rw-r--r--drivers/clk/baikal-t1/ccu-div.c27
-rw-r--r--drivers/clk/baikal-t1/ccu-pll.c14
-rw-r--r--drivers/clk/bcm/clk-iproc-asiu.c25
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c72
-rw-r--r--drivers/clk/clk-apple-nco.c14
-rw-r--r--drivers/clk/clk-axi-clkgen.c2
-rw-r--r--drivers/clk/clk-axm5516.c1
-rw-r--r--drivers/clk/clk-bm1880.c21
-rw-r--r--drivers/clk/clk-cdce706.c16
-rw-r--r--drivers/clk/clk-cdce925.c50
-rw-r--r--drivers/clk/clk-cs2000-cp.c14
-rw-r--r--drivers/clk/clk-divider.c23
-rw-r--r--drivers/clk/clk-en7523.c64
-rw-r--r--drivers/clk/clk-ep93xx.c21
-rw-r--r--drivers/clk/clk-fixed-factor.c16
-rw-r--r--drivers/clk/clk-fractional-divider.c25
-rw-r--r--drivers/clk/clk-gemini.c15
-rw-r--r--drivers/clk/clk-highbank.c26
-rw-r--r--drivers/clk/clk-hsdk-pll.c12
-rw-r--r--drivers/clk/clk-lan966x.c2
-rw-r--r--drivers/clk/clk-lmk04832.c53
-rw-r--r--drivers/clk/clk-loongson1.c12
-rw-r--r--drivers/clk/clk-loongson2.c122
-rw-r--r--drivers/clk/clk-max9485.c27
-rw-r--r--drivers/clk/clk-milbeaut.c22
-rw-r--r--drivers/clk/clk-multiplier.c12
-rw-r--r--drivers/clk/clk-rp1.c1022
-rw-r--r--drivers/clk/clk-rpmi.c620
-rw-r--r--drivers/clk/clk-s2mps11.c8
-rw-r--r--drivers/clk/clk-scmi.c46
-rw-r--r--drivers/clk/clk-scpi.c18
-rw-r--r--drivers/clk/clk-si514.c24
-rw-r--r--drivers/clk/clk-si521xx.c14
-rw-r--r--drivers/clk/clk-si5341.c22
-rw-r--r--drivers/clk/clk-si544.c10
-rw-r--r--drivers/clk/clk-si570.c24
-rw-r--r--drivers/clk/clk-sp7021.c44
-rw-r--r--drivers/clk/clk-sparx5.c10
-rw-r--r--drivers/clk/clk-stm32f4.c26
-rw-r--r--drivers/clk/clk-tps68470.c12
-rw-r--r--drivers/clk/clk-versaclock3.c70
-rw-r--r--drivers/clk/clk-versaclock5.c71
-rw-r--r--drivers/clk/clk-versaclock7.c30
-rw-r--r--drivers/clk/clk-vt8500.c59
-rw-r--r--drivers/clk/clk-wm831x.c14
-rw-r--r--drivers/clk/clk-xgene.c41
-rw-r--r--drivers/clk/clk.c66
-rw-r--r--drivers/clk/davinci/psc-da850.c7
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c18
-rw-r--r--drivers/clk/hisilicon/clk-hi6220-stub.c12
-rw-r--r--drivers/clk/hisilicon/clkdivider-hi6220.c12
-rw-r--r--drivers/clk/imx/Kconfig1
-rw-r--r--drivers/clk/imx/Makefile1
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c13
-rw-r--r--drivers/clk/imx/clk-imx8mp-audiomix.c39
-rw-r--r--drivers/clk/imx/clk-imx8ulp-sim-lpav.c156
-rw-r--r--drivers/clk/imx/clk-imx95-blk-ctl.c57
-rw-r--r--drivers/clk/imx/clk-vf610.c12
-rw-r--r--drivers/clk/ingenic/cgu.c12
-rw-r--r--drivers/clk/ingenic/jz4725b-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4740-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4755-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c2
-rw-r--r--drivers/clk/ingenic/jz4780-cgu.c26
-rw-r--r--drivers/clk/ingenic/pm.c14
-rw-r--r--drivers/clk/ingenic/pm.h2
-rw-r--r--drivers/clk/ingenic/tcu.c12
-rw-r--r--drivers/clk/ingenic/x1000-cgu.c21
-rw-r--r--drivers/clk/ingenic/x1830-cgu.c2
-rw-r--r--drivers/clk/keystone/sci-clk.c9
-rw-r--r--drivers/clk/keystone/syscon-clk.c2
-rw-r--r--drivers/clk/mediatek/Kconfig71
-rw-r--r--drivers/clk/mediatek/Makefile13
-rw-r--r--drivers/clk/mediatek/clk-gate.c117
-rw-r--r--drivers/clk/mediatek/clk-gate.h3
-rw-r--r--drivers/clk/mediatek/clk-mt7622-aud.c1
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8196-apmixedsys.c204
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp0.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-disp1.c170
-rw-r--r--drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c118
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mcu.c167
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mdpsys.c186
-rw-r--r--drivers/clk/mediatek/clk-mt8196-mfg.c150
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl0.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ovl1.c154
-rw-r--r--drivers/clk/mediatek/clk-mt8196-peri_ao.c142
-rw-r--r--drivers/clk/mediatek/clk-mt8196-pextp.c131
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen.c985
-rw-r--r--drivers/clk/mediatek/clk-mt8196-topckgen2.c568
-rw-r--r--drivers/clk/mediatek/clk-mt8196-ufs_ao.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdec.c253
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vdisp_ao.c80
-rw-r--r--drivers/clk/mediatek/clk-mt8196-venc.c236
-rw-r--r--drivers/clk/mediatek/clk-mt8196-vlpckgen.c725
-rw-r--r--drivers/clk/mediatek/clk-mtk.c16
-rw-r--r--drivers/clk/mediatek/clk-mtk.h22
-rw-r--r--drivers/clk/mediatek/clk-mux.c122
-rw-r--r--drivers/clk/mediatek/clk-mux.h87
-rw-r--r--drivers/clk/mediatek/clk-pll.c58
-rw-r--r--drivers/clk/mediatek/clk-pll.h11
-rw-r--r--drivers/clk/mediatek/clk-pllfh.c2
-rw-r--r--drivers/clk/meson/Kconfig13
-rw-r--r--drivers/clk/meson/Makefile1
-rw-r--r--drivers/clk/meson/a1-peripherals.c995
-rw-r--r--drivers/clk/meson/a1-pll.c124
-rw-r--r--drivers/clk/meson/axg-aoclk.c153
-rw-r--r--drivers/clk/meson/axg.c237
-rw-r--r--drivers/clk/meson/c3-peripherals.c2037
-rw-r--r--drivers/clk/meson/c3-pll.c245
-rw-r--r--drivers/clk/meson/clk-regmap.h20
-rw-r--r--drivers/clk/meson/g12a-aoclk.c238
-rw-r--r--drivers/clk/meson/g12a.c1994
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c123
-rw-r--r--drivers/clk/meson/gxbb.c611
-rw-r--r--drivers/clk/meson/meson-aoclk.c32
-rw-r--r--drivers/clk/meson/meson-aoclk.h2
-rw-r--r--drivers/clk/meson/meson-clkc-utils.c86
-rw-r--r--drivers/clk/meson/meson-clkc-utils.h89
-rw-r--r--drivers/clk/meson/meson-eeclk.c60
-rw-r--r--drivers/clk/meson/meson-eeclk.h24
-rw-r--r--drivers/clk/meson/meson8-ddr.c62
-rw-r--r--drivers/clk/meson/meson8b.c746
-rw-r--r--drivers/clk/meson/s4-peripherals.c1160
-rw-r--r--drivers/clk/meson/s4-pll.c82
-rw-r--r--drivers/clk/microchip/Kconfig2
-rw-r--r--drivers/clk/microchip/clk-core.c55
-rw-r--r--drivers/clk/microchip/clk-mpfs.c227
-rw-r--r--drivers/clk/mmp/Kconfig10
-rw-r--r--drivers/clk/mmp/Makefile5
-rw-r--r--drivers/clk/mmp/clk-audio.c18
-rw-r--r--drivers/clk/mmp/clk-frac.c27
-rw-r--r--drivers/clk/mmp/clk-pxa1908-apmu.c7
-rw-r--r--drivers/clk/mstar/clk-msc313-cpupll.c18
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c12
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c15
-rw-r--r--drivers/clk/mvebu/clk-corediv.c18
-rw-r--r--drivers/clk/mvebu/clk-cpu.c12
-rw-r--r--drivers/clk/mvebu/common.c12
-rw-r--r--drivers/clk/mvebu/cp110-system-controller.c20
-rw-r--r--drivers/clk/mvebu/dove-divider.c16
-rw-r--r--drivers/clk/mxs/clk-div.c8
-rw-r--r--drivers/clk/mxs/clk-frac.c16
-rw-r--r--drivers/clk/mxs/clk-ref.c16
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-divider.c12
-rw-r--r--drivers/clk/nuvoton/clk-ma35d1-pll.c28
-rw-r--r--drivers/clk/nxp/clk-lpc18xx-cgu.c20
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c60
-rw-r--r--drivers/clk/pistachio/clk-pll.c20
-rw-r--r--drivers/clk/qcom/Kconfig72
-rw-r--r--drivers/clk/qcom/Makefile6
-rw-r--r--drivers/clk/qcom/a53-pll.c1
-rw-r--r--drivers/clk/qcom/a7-pll.c3
-rw-r--r--drivers/clk/qcom/apss-ipq-pll.c1
-rw-r--r--drivers/clk/qcom/apss-ipq5424.c258
-rw-r--r--drivers/clk/qcom/camcc-milos.c2
-rw-r--r--drivers/clk/qcom/camcc-sdm845.c3
-rw-r--r--drivers/clk/qcom/camcc-sm6350.c13
-rw-r--r--drivers/clk/qcom/camcc-sm7150.c11
-rw-r--r--drivers/clk/qcom/camcc-sm8250.c3
-rw-r--r--drivers/clk/qcom/camcc-sm8450.c3
-rw-r--r--drivers/clk/qcom/camcc-sm8550.c10
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c162
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h6
-rw-r--r--drivers/clk/qcom/clk-branch.c8
-rw-r--r--drivers/clk/qcom/clk-branch.h4
-rw-r--r--drivers/clk/qcom/clk-cbf-8996.c1
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c1
-rw-r--r--drivers/clk/qcom/clk-rcg.c2
-rw-r--r--drivers/clk/qcom/clk-rcg2.c8
-rw-r--r--drivers/clk/qcom/clk-regmap-divider.c27
-rw-r--r--drivers/clk/qcom/clk-rpmh.c29
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c8
-rw-r--r--drivers/clk/qcom/common.c4
-rw-r--r--drivers/clk/qcom/dispcc-glymur.c1982
-rw-r--r--drivers/clk/qcom/dispcc-milos.c2
-rw-r--r--drivers/clk/qcom/dispcc-sc7280.c8
-rw-r--r--drivers/clk/qcom/dispcc-sm6350.c7
-rw-r--r--drivers/clk/qcom/dispcc-sm7150.c9
-rw-r--r--drivers/clk/qcom/dispcc-x1e80100.c3
-rw-r--r--drivers/clk/qcom/ecpricc-qdu1000.c30
-rw-r--r--drivers/clk/qcom/gcc-glymur.c8615
-rw-r--r--drivers/clk/qcom/gcc-ipq5424.c28
-rw-r--r--drivers/clk/qcom/gcc-ipq6018.c60
-rw-r--r--drivers/clk/qcom/gcc-msm8917.c617
-rw-r--r--drivers/clk/qcom/gcc-qcs404.c2
-rw-r--r--drivers/clk/qcom/gcc-qcs615.c6
-rw-r--r--drivers/clk/qcom/gcc-sc8280xp.c5
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c72
-rw-r--r--drivers/clk/qcom/gcc-sm8750.c1
-rw-r--r--drivers/clk/qcom/gcc-x1e80100.c699
-rw-r--r--drivers/clk/qcom/gpucc-sa8775p.c6
-rw-r--r--drivers/clk/qcom/gpucc-sc7180.c2
-rw-r--r--drivers/clk/qcom/gpucc-sm6350.c4
-rw-r--r--drivers/clk/qcom/gpucc-sm8150.c2
-rw-r--r--drivers/clk/qcom/gpucc-sm8250.c2
-rw-r--r--drivers/clk/qcom/hfpll.c1
-rw-r--r--drivers/clk/qcom/ipq-cmn-pll.c1
-rw-r--r--drivers/clk/qcom/lpassaudiocc-sc7280.c4
-rw-r--r--drivers/clk/qcom/lpasscc-sc8280xp.c4
-rw-r--r--drivers/clk/qcom/lpasscc-sm6115.c2
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c2
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c3
-rw-r--r--drivers/clk/qcom/nsscc-ipq5424.c1340
-rw-r--r--drivers/clk/qcom/nsscc-ipq9574.c2
-rw-r--r--drivers/clk/qcom/tcsrcc-glymur.c313
-rw-r--r--drivers/clk/qcom/tcsrcc-x1e80100.c4
-rw-r--r--drivers/clk/qcom/videocc-milos.c2
-rw-r--r--drivers/clk/qcom/videocc-sm8750.c463
-rw-r--r--drivers/clk/renesas/clk-div6.c6
-rw-r--r--drivers/clk/renesas/clk-mstp.c20
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c6
-rw-r--r--drivers/clk/renesas/r9a07g043-cpg.c140
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c162
-rw-r--r--drivers/clk/renesas/r9a08g045-cpg.c29
-rw-r--r--drivers/clk/renesas/r9a09g047-cpg.c178
-rw-r--r--drivers/clk/renesas/r9a09g056-cpg.c134
-rw-r--r--drivers/clk/renesas/r9a09g057-cpg.c113
-rw-r--r--drivers/clk/renesas/r9a09g077-cpg.c98
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c2
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c15
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c18
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c188
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c63
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h1
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.c536
-rw-r--r--drivers/clk/renesas/rzv2h-cpg.h31
-rw-r--r--drivers/clk/rockchip/Kconfig14
-rw-r--r--drivers/clk/rockchip/Makefile2
-rw-r--r--drivers/clk/rockchip/clk-cpu.c165
-rw-r--r--drivers/clk/rockchip/clk-ddr.c13
-rw-r--r--drivers/clk/rockchip/clk-half-divider.c12
-rw-r--r--drivers/clk/rockchip/clk-pll.c23
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c12
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c2
-rw-r--r--drivers/clk/rockchip/clk-rk3506.c869
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c5
-rw-r--r--drivers/clk/rockchip/clk-rv1126b.c1117
-rw-r--r--drivers/clk/rockchip/clk.c24
-rw-r--r--drivers/clk/rockchip/clk.h96
-rw-r--r--drivers/clk/rockchip/rst-rk3506.c226
-rw-r--r--drivers/clk/rockchip/rst-rv1126b.c443
-rw-r--r--drivers/clk/samsung/Kconfig10
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-acpm.c185
-rw-r--r--drivers/clk/samsung/clk-artpec8.c1044
-rw-r--r--drivers/clk/samsung/clk-cpu.c12
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c2
-rw-r--r--drivers/clk/samsung/clk-exynos990.c1240
-rw-r--r--drivers/clk/samsung/clk-exynosautov920.c90
-rw-r--r--drivers/clk/samsung/clk-fsd.c28
-rw-r--r--drivers/clk/samsung/clk-pll.c198
-rw-r--r--drivers/clk/samsung/clk-pll.h2
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c12
-rw-r--r--drivers/clk/samsung/clk.c12
-rw-r--r--drivers/clk/sifive/fu540-prci.h2
-rw-r--r--drivers/clk/sifive/fu740-prci.h2
-rw-r--r--drivers/clk/sifive/sifive-prci.c11
-rw-r--r--drivers/clk/sifive/sifive-prci.h4
-rw-r--r--drivers/clk/socfpga/Kconfig2
-rw-r--r--drivers/clk/socfpga/Makefile2
-rw-r--r--drivers/clk/socfpga/clk-agilex5.c561
-rw-r--r--drivers/clk/socfpga/clk-gate-s10.c53
-rw-r--r--drivers/clk/socfpga/clk-periph-s10.c41
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c36
-rw-r--r--drivers/clk/socfpga/stratix10-clk.h43
-rw-r--r--drivers/clk/sophgo/clk-cv18xx-ip.c10
-rw-r--r--drivers/clk/sophgo/clk-sg2042-clkgen.c17
-rw-r--r--drivers/clk/sophgo/clk-sg2042-pll.c26
-rw-r--r--drivers/clk/spacemit/ccu-k1.c65
-rw-r--r--drivers/clk/spacemit/ccu_ddn.c23
-rw-r--r--drivers/clk/spacemit/ccu_ddn.h6
-rw-r--r--drivers/clk/spacemit/ccu_mix.c12
-rw-r--r--drivers/clk/spacemit/ccu_mix.h2
-rw-r--r--drivers/clk/spacemit/ccu_pll.c10
-rw-r--r--drivers/clk/spear/clk-aux-synth.c12
-rw-r--r--drivers/clk/spear/clk-frac-synth.c12
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c12
-rw-r--r--drivers/clk/spear/clk-vco-pll.c23
-rw-r--r--drivers/clk/sprd/div.c13
-rw-r--r--drivers/clk/sprd/pll.c8
-rw-r--r--drivers/clk/sprd/sc9860-clk.c8
-rw-r--r--drivers/clk/st/clk-flexgen.c80
-rw-r--r--drivers/clk/st/clkgen-fsyn.c33
-rw-r--r--drivers/clk/st/clkgen-pll.c38
-rw-r--r--drivers/clk/stm32/Kconfig7
-rw-r--r--drivers/clk/stm32/Makefile1
-rw-r--r--drivers/clk/stm32/clk-stm32-core.c28
-rw-r--r--drivers/clk/stm32/clk-stm32mp1.c13
-rw-r--r--drivers/clk/stm32/clk-stm32mp21.c1586
-rw-r--r--drivers/clk/stm32/stm32mp21_rcc.h651
-rw-r--r--drivers/clk/sunxi-ng/Kconfig5
-rw-r--r--drivers/clk/sunxi-ng/Makefile2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c469
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.c23
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun55i-a523.h14
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-rtc.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h18
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/clk/tegra/Kconfig2
-rw-r--r--drivers/clk/tegra/clk-audio-sync.c10
-rw-r--r--drivers/clk/tegra/clk-bpmp.c2
-rw-r--r--drivers/clk/tegra/clk-dfll.c2
-rw-r--r--drivers/clk/tegra/clk-divider.c28
-rw-r--r--drivers/clk/tegra/clk-periph.c8
-rw-r--r--drivers/clk/tegra/clk-pll.c52
-rw-r--r--drivers/clk/tegra/clk-super.c9
-rw-r--r--drivers/clk/tegra/clk-tegra114.c30
-rw-r--r--drivers/clk/tegra/clk-tegra124-dfll-fcpu.c158
-rw-r--r--drivers/clk/tegra/clk-tegra210-emc.c24
-rw-r--r--drivers/clk/tegra/clk-tegra210.c12
-rw-r--r--drivers/clk/tegra/clk-tegra30.c1
-rw-r--r--drivers/clk/tegra/clk.h2
-rw-r--r--drivers/clk/thead/clk-th1520-ap.c504
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-dra7-atl.c12
-rw-r--r--drivers/clk/ti/clkt_dpll.c36
-rw-r--r--drivers/clk/ti/clock.h6
-rw-r--r--drivers/clk/ti/composite.c6
-rw-r--r--drivers/clk/ti/divider.c12
-rw-r--r--drivers/clk/ti/dpll.c10
-rw-r--r--drivers/clk/ti/dpll3xxx.c7
-rw-r--r--drivers/clk/ti/dpll44xx.c89
-rw-r--r--drivers/clk/ti/fapll.c48
-rw-r--r--drivers/clk/ux500/clk-prcmu.c14
-rw-r--r--drivers/clk/versatile/clk-icst.c72
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c16
-rw-r--r--drivers/clk/visconti/clkc-tmpv770x.c79
-rw-r--r--drivers/clk/visconti/pll-tmpv770x.c5
-rw-r--r--drivers/clk/visconti/pll.c17
-rw-r--r--drivers/clk/x86/clk-cgu.c35
-rw-r--r--drivers/clk/xilinx/clk-xlnx-clock-wizard.c89
-rw-r--r--drivers/clk/xilinx/xlnx_vcu.c15
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clk/zynqmp/divider.c23
-rw-r--r--drivers/clk/zynqmp/pll.c24
-rw-r--r--drivers/clocksource/Kconfig24
-rw-r--r--drivers/clocksource/Makefile4
-rw-r--r--drivers/clocksource/arm_arch_timer.c686
-rw-r--r--drivers/clocksource/arm_arch_timer_mmio.c442
-rw-r--r--drivers/clocksource/arm_global_timer.c44
-rw-r--r--drivers/clocksource/clps711x-timer.c23
-rw-r--r--drivers/clocksource/hyperv_timer.c10
-rw-r--r--drivers/clocksource/ingenic-sysost.c27
-rw-r--r--drivers/clocksource/scx200_hrt.c1
-rw-r--r--drivers/clocksource/sh_cmt.c106
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c12
-rw-r--r--drivers/clocksource/timer-cs5535.c1
-rw-r--r--drivers/clocksource/timer-econet-en751221.c2
-rw-r--r--drivers/clocksource/timer-nxp-pit.c383
-rw-r--r--drivers/clocksource/timer-nxp-stm.c25
-rw-r--r--drivers/clocksource/timer-ralink.c11
-rw-r--r--drivers/clocksource/timer-rda.c9
-rw-r--r--drivers/clocksource/timer-realtek.c150
-rw-r--r--drivers/clocksource/timer-rtl-otto.c42
-rw-r--r--drivers/clocksource/timer-sp804.c24
-rw-r--r--drivers/clocksource/timer-sprd.c24
-rw-r--r--drivers/clocksource/timer-stm32-lp.c2
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/clocksource/timer-tegra186.c38
-rw-r--r--drivers/clocksource/timer-ti-dm.c119
-rw-r--r--drivers/clocksource/timer-vf-pit.c194
-rw-r--r--drivers/comedi/Kconfig9
-rw-r--r--drivers/comedi/comedi_buf.c276
-rw-r--r--drivers/comedi/comedi_fops.c189
-rw-r--r--drivers/comedi/comedi_internal.h12
-rw-r--r--drivers/comedi/drivers.c134
-rw-r--r--drivers/comedi/drivers/8255.c20
-rw-r--r--drivers/comedi/drivers/Makefile1
-rw-r--r--drivers/comedi/drivers/adl_pci7250.c220
-rw-r--r--drivers/comedi/drivers/c6xdigio.c46
-rw-r--r--drivers/comedi/drivers/comedi_bond.c4
-rw-r--r--drivers/comedi/drivers/multiq3.c9
-rw-r--r--drivers/comedi/drivers/ni_670x.c2
-rw-r--r--drivers/comedi/drivers/pcl818.c5
-rw-r--r--drivers/comedi/kcomedilib/kcomedilib_main.c120
-rw-r--r--drivers/counter/microchip-tcb-capture.c2
-rw-r--r--drivers/counter/ti-ecap-capture.c12
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c11
-rw-r--r--drivers/cpufreq/airoha-cpufreq.c1
-rw-r--r--drivers/cpufreq/amd-pstate.c49
-rw-r--r--drivers/cpufreq/armada-37xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c47
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c19
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c3
-rw-r--r--drivers/cpufreq/cpufreq.c75
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c25
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.h23
-rw-r--r--drivers/cpufreq/freq_table.c22
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c410
-rw-r--r--drivers/cpufreq/longhaul.c3
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c136
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c37
-rw-r--r--drivers/cpufreq/qcom-cpufreq-nvmem.c40
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs16
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c10
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c12
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/sh-cpufreq.c6
-rw-r--r--drivers/cpufreq/spear-cpufreq.c2
-rw-r--r--drivers/cpufreq/speedstep-lib.c12
-rw-r--r--drivers/cpufreq/speedstep-lib.h10
-rw-r--r--drivers/cpufreq/sun50i-cpufreq-nvmem.c11
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c185
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpufreq/ti-cpufreq.c12
-rw-r--r--drivers/cpufreq/virtual-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c11
-rw-r--r--drivers/cpuidle/cpuidle-psci.c16
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c11
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c5
-rw-r--r--drivers/cpuidle/cpuidle.c20
-rw-r--r--drivers/cpuidle/driver.c10
-rw-r--r--drivers/cpuidle/governor.c4
-rw-r--r--drivers/cpuidle/governors/menu.c95
-rw-r--r--drivers/cpuidle/governors/teo.c159
-rw-r--r--drivers/cpuidle/poll_state.c4
-rw-r--r--drivers/cpuidle/sysfs.c34
-rw-r--r--drivers/crypto/Kconfig17
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c85
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c35
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c145
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h27
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c2
-rw-r--r--drivers/crypto/aspeed/aspeed-hace-crypto.c2
-rw-r--r--drivers/crypto/atmel-i2c.c2
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c9
-rw-r--r--drivers/crypto/caam/blob_gen.c86
-rw-r--r--drivers/crypto/caam/caamalg.c128
-rw-r--r--drivers/crypto/caam/caamalg_desc.c87
-rw-r--r--drivers/crypto/caam/caamalg_desc.h13
-rw-r--r--drivers/crypto/caam/caamrng.c4
-rw-r--r--drivers/crypto/caam/ctrl.c10
-rw-r--r--drivers/crypto/caam/desc.h9
-rw-r--r--drivers/crypto/caam/desc_constr.h8
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_mbx.c2
-rw-r--r--drivers/crypto/ccp/Kconfig1
-rw-r--r--drivers/crypto/ccp/Makefile7
-rw-r--r--drivers/crypto/ccp/ccp-dev.c2
-rw-r--r--drivers/crypto/ccp/hsti.c8
-rw-r--r--drivers/crypto/ccp/psp-dev.c20
-rw-r--r--drivers/crypto/ccp/psp-dev.h8
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.c864
-rw-r--r--drivers/crypto/ccp/sev-dev-tio.h123
-rw-r--r--drivers/crypto/ccp/sev-dev-tsm.c405
-rw-r--r--drivers/crypto/ccp/sev-dev.c445
-rw-r--r--drivers/crypto/ccp/sev-dev.h20
-rw-r--r--drivers/crypto/ccp/sfs.c311
-rw-r--r--drivers/crypto/ccp/sfs.h47
-rw-r--r--drivers/crypto/ccp/sp-dev.h2
-rw-r--r--drivers/crypto/ccp/sp-pci.c19
-rw-r--r--drivers/crypto/ccp/sp-platform.c17
-rw-r--r--drivers/crypto/ccree/cc_buffer_mgr.c6
-rw-r--r--drivers/crypto/chelsio/Kconfig6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c259
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h1
-rw-r--r--drivers/crypto/hifn_795x.c7
-rw-r--r--drivers/crypto/hisilicon/Kconfig1
-rw-r--r--drivers/crypto/hisilicon/debugfs.c1
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_crypto.c403
-rw-r--r--drivers/crypto/hisilicon/hpre/hpre_main.c179
-rw-r--r--drivers/crypto/hisilicon/qm.c302
-rw-r--r--drivers/crypto/hisilicon/sec/sec_drv.c3
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c8
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_main.c229
-rw-r--r--drivers/crypto/hisilicon/sgl.c5
-rw-r--r--drivers/crypto/hisilicon/zip/dae_main.c19
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c234
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/intel/iaa/iaa_crypto_main.c2
-rw-r--r--drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c5
-rw-r--r--drivers/crypto/intel/qat/Kconfig7
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_aer.c6
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c40
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c112
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_isr.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c8
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_sriov.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.c19
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_telemetry.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c52
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h5
-rw-r--r--drivers/crypto/intel/qat/qat_common/adf_vf_isr.c3
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_algs.c191
-rw-r--r--drivers/crypto/intel/qat/qat_common/qat_uclo.c20
-rw-r--r--drivers/crypto/loongson/Kconfig5
-rw-r--r--drivers/crypto/loongson/Makefile1
-rw-r--r--drivers/crypto/loongson/loongson-rng.c209
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c7
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c7
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c6
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c6
-rw-r--r--drivers/crypto/omap-aes.c15
-rw-r--r--drivers/crypto/omap-aes.h2
-rw-r--r--drivers/crypto/omap-des.c17
-rw-r--r--drivers/crypto/omap-sham.c15
-rw-r--r--drivers/crypto/qce/core.c3
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_skcipher.c3
-rw-r--r--drivers/crypto/starfive/jh7110-aes.c12
-rw-r--r--drivers/crypto/starfive/jh7110-hash.c9
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c2
-rw-r--r--drivers/crypto/tegra/tegra-se-hash.c3
-rw-r--r--drivers/crypto/tegra/tegra-se-main.c2
-rw-r--r--drivers/crypto/ti/Kconfig15
-rw-r--r--drivers/crypto/ti/Makefile3
-rw-r--r--drivers/crypto/ti/dthev2-aes.c538
-rw-r--r--drivers/crypto/ti/dthev2-common.c217
-rw-r--r--drivers/crypto/ti/dthev2-common.h109
-rw-r--r--drivers/crypto/xilinx/Makefile1
-rw-r--r--drivers/crypto/xilinx/xilinx-trng.c430
-rw-r--r--drivers/cxl/acpi.c99
-rw-r--r--drivers/cxl/core/cdat.c40
-rw-r--r--drivers/cxl/core/core.h12
-rw-r--r--drivers/cxl/core/features.c3
-rw-r--r--drivers/cxl/core/hdm.c110
-rw-r--r--drivers/cxl/core/memdev.c60
-rw-r--r--drivers/cxl/core/pci.c140
-rw-r--r--drivers/cxl/core/port.c320
-rw-r--r--drivers/cxl/core/region.c487
-rw-r--r--drivers/cxl/core/trace.h2
-rw-r--r--drivers/cxl/cxl.h84
-rw-r--r--drivers/cxl/cxlmem.h2
-rw-r--r--drivers/cxl/cxlpci.h3
-rw-r--r--drivers/cxl/pci.c2
-rw-r--r--drivers/cxl/port.c47
-rw-r--r--drivers/dax/device.c37
-rw-r--r--drivers/dax/super.c4
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c110
-rw-r--r--drivers/devfreq/governor.h127
-rw-r--r--drivers/devfreq/governor_passive.c27
-rw-r--r--drivers/devfreq/governor_performance.c2
-rw-r--r--drivers/devfreq/governor_powersave.c2
-rw-r--r--drivers/devfreq/governor_simpleondemand.c6
-rw-r--r--drivers/devfreq/governor_userspace.c2
-rw-r--r--drivers/devfreq/hisi_uncore_freq.c6
-rw-r--r--drivers/devfreq/mtk-cci-devfreq.c5
-rw-r--r--drivers/devfreq/tegra30-devfreq.c15
-rw-r--r--drivers/dibs/Kconfig23
-rw-r--r--drivers/dibs/Makefile8
-rw-r--r--drivers/dibs/dibs_loopback.c361
-rw-r--r--drivers/dibs/dibs_loopback.h57
-rw-r--r--drivers/dibs/dibs_main.c274
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf-mapping.c248
-rw-r--r--drivers/dma-buf/dma-buf.c10
-rw-r--r--drivers/dma-buf/dma-fence.c54
-rw-r--r--drivers/dma-buf/dma-heap.c4
-rw-r--r--drivers/dma-buf/heaps/Kconfig10
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c47
-rw-r--r--drivers/dma-buf/heaps/system_heap.c33
-rw-r--r--drivers/dma-buf/sw_sync.c4
-rw-r--r--drivers/dma-buf/sync_debug.c2
-rw-r--r--drivers/dma/Kconfig6
-rw-r--r--drivers/dma/at_hdmac.c6
-rw-r--r--drivers/dma/bcm2835-dma.c1
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c22
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/dma/fsl-edma-common.c45
-rw-r--r--drivers/dma/fsl-edma-main.c1
-rw-r--r--drivers/dma/fsl-qdma.c1
-rw-r--r--drivers/dma/idxd/defaults.c6
-rw-r--r--drivers/dma/idxd/device.c19
-rw-r--r--drivers/dma/idxd/init.c2
-rw-r--r--drivers/dma/idxd/registers.h5
-rw-r--r--drivers/dma/imx-sdma.c2
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/ioat/hw.h3
-rw-r--r--drivers/dma/ioat/init.c1
-rw-r--r--drivers/dma/k3dma.c1
-rw-r--r--drivers/dma/mmp_pdma.c289
-rw-r--r--drivers/dma/mmp_tdma.c4
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/nbpfaxi.c6
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/qcom/gpi.c11
-rw-r--r--drivers/dma/sh/Kconfig2
-rw-r--r--drivers/dma/sh/rcar-dmac.c16
-rw-r--r--drivers/dma/sh/shdma-base.c25
-rw-r--r--drivers/dma/sh/shdmac.c17
-rw-r--r--drivers/dma/sh/usb-dmac.c11
-rw-r--r--drivers/dma/sprd-dma.c1
-rw-r--r--drivers/dma/st_fdma.c1
-rw-r--r--drivers/dma/tegra210-adma.c1
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c94
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c5
-rw-r--r--drivers/dpll/dpll_netlink.c118
-rw-r--r--drivers/dpll/dpll_nl.c6
-rw-r--r--drivers/dpll/dpll_nl.h1
-rw-r--r--drivers/dpll/zl3073x/Makefile3
-rw-r--r--drivers/dpll/zl3073x/core.c544
-rw-r--r--drivers/dpll/zl3073x/core.h232
-rw-r--r--drivers/dpll/zl3073x/devlink.c155
-rw-r--r--drivers/dpll/zl3073x/devlink.h3
-rw-r--r--drivers/dpll/zl3073x/dpll.c878
-rw-r--r--drivers/dpll/zl3073x/dpll.h2
-rw-r--r--drivers/dpll/zl3073x/flash.c666
-rw-r--r--drivers/dpll/zl3073x/flash.h29
-rw-r--r--drivers/dpll/zl3073x/fw.c419
-rw-r--r--drivers/dpll/zl3073x/fw.h52
-rw-r--r--drivers/dpll/zl3073x/out.c157
-rw-r--r--drivers/dpll/zl3073x/out.h93
-rw-r--r--drivers/dpll/zl3073x/prop.c19
-rw-r--r--drivers/dpll/zl3073x/ref.c204
-rw-r--r--drivers/dpll/zl3073x/ref.h134
-rw-r--r--drivers/dpll/zl3073x/regs.h54
-rw-r--r--drivers/dpll/zl3073x/synth.c87
-rw-r--r--drivers/dpll/zl3073x/synth.h72
-rw-r--r--drivers/edac/Kconfig36
-rw-r--r--drivers/edac/Makefile5
-rw-r--r--drivers/edac/a72_edac.c225
-rw-r--r--drivers/edac/altera_edac.c26
-rw-r--r--drivers/edac/amd64_edac.c59
-rw-r--r--drivers/edac/amd64_edac.h7
-rw-r--r--[-rwxr-xr-x]drivers/edac/ecs.c0
-rw-r--r--drivers/edac/edac_mc_sysfs.c380
-rw-r--r--drivers/edac/ghes_edac.c7
-rw-r--r--drivers/edac/i10nm_base.c30
-rw-r--r--drivers/edac/ie31200_edac.c10
-rw-r--r--drivers/edac/igen6_edac.c2
-rw-r--r--drivers/edac/imh_base.c602
-rw-r--r--[-rwxr-xr-x]drivers/edac/mem_repair.c0
-rw-r--r--[-rwxr-xr-x]drivers/edac/scrub.c0
-rw-r--r--drivers/edac/skx_base.c35
-rw-r--r--drivers/edac/skx_common.c87
-rw-r--r--drivers/edac/skx_common.h126
-rw-r--r--drivers/edac/versalnet_edac.c962
-rw-r--r--drivers/eisa/eisa-bus.c2
-rw-r--r--drivers/extcon/Kconfig13
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-fsa9480.c2
-rw-r--r--drivers/extcon/extcon-max14526.c302
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/firewire/core-card.c493
-rw-r--r--drivers/firewire/core-cdev.c38
-rw-r--r--drivers/firewire/core-device.c221
-rw-r--r--drivers/firewire/core-topology.c92
-rw-r--r--drivers/firewire/core-transaction.c208
-rw-r--r--drivers/firewire/core.h27
-rw-r--r--drivers/firewire/init_ohci1394_dma.c10
-rw-r--r--drivers/firewire/ohci.c394
-rw-r--r--drivers/firmware/arm_ffa/driver.c37
-rw-r--r--drivers/firmware/arm_scmi/bus.c13
-rw-r--r--drivers/firmware/arm_scmi/common.h32
-rw-r--r--drivers/firmware/arm_scmi/driver.c59
-rw-r--r--drivers/firmware/arm_scmi/quirks.c15
-rw-r--r--drivers/firmware/arm_scmi/transports/mailbox.c7
-rw-r--r--drivers/firmware/arm_scmi/transports/optee.c2
-rw-r--r--drivers/firmware/arm_scmi/transports/virtio.c3
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c111
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx95.rst25
-rw-r--r--drivers/firmware/arm_scmi/voltage.c2
-rw-r--r--drivers/firmware/broadcom/bcm47xx_sprom.c2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c175
-rw-r--r--drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c1
-rw-r--r--drivers/firmware/efi/Kconfig7
-rw-r--r--drivers/firmware/efi/arm-runtime.c4
-rw-r--r--drivers/firmware/efi/cper-arm.c52
-rw-r--r--drivers/firmware/efi/cper.c62
-rw-r--r--drivers/firmware/efi/efi-init.c29
-rw-r--r--drivers/firmware/efi/efi.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c2
-rw-r--r--drivers/firmware/efi/libstub/efistub.h31
-rw-r--r--drivers/firmware/efi/libstub/gop.c137
-rw-r--r--drivers/firmware/efi/libstub/x86-5lvl.c4
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c118
-rw-r--r--drivers/firmware/efi/memattr.c7
-rw-r--r--drivers/firmware/efi/riscv-runtime.c14
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c17
-rw-r--r--drivers/firmware/efi/stmm/mm_communication.h6
-rw-r--r--drivers/firmware/imx/imx-scu-irq.c32
-rw-r--r--drivers/firmware/imx/imx-scu.c11
-rw-r--r--drivers/firmware/meson/Kconfig2
-rw-r--r--drivers/firmware/meson/meson_sm.c7
-rw-r--r--drivers/firmware/qcom/qcom_scm.c142
-rw-r--r--drivers/firmware/qcom/qcom_scm.h7
-rw-r--r--drivers/firmware/qcom/qcom_tzmem.c64
-rw-r--r--drivers/firmware/samsung/Makefile4
-rw-r--r--drivers/firmware/samsung/exynos-acpm-dvfs.c80
-rw-r--r--drivers/firmware/samsung/exynos-acpm-dvfs.h21
-rw-r--r--drivers/firmware/samsung/exynos-acpm-pmic.c25
-rw-r--r--drivers/firmware/samsung/exynos-acpm.c26
-rw-r--r--drivers/firmware/stratix10-rsu.c279
-rw-r--r--drivers/firmware/stratix10-svc.c768
-rw-r--r--drivers/firmware/tegra/bpmp-tegra186.c5
-rw-r--r--drivers/firmware/ti_sci.c204
-rw-r--r--drivers/firmware/ti_sci.h10
-rw-r--r--drivers/firmware/xilinx/Makefile2
-rw-r--r--drivers/firmware/xilinx/zynqmp-debug.c13
-rw-r--r--drivers/firmware/xilinx/zynqmp-ufs.c118
-rw-r--r--drivers/firmware/xilinx/zynqmp.c160
-rw-r--r--drivers/fpga/altera-cvp.c20
-rw-r--r--drivers/fpga/xilinx-spi.c7
-rw-r--r--drivers/fsi/fsi-occ.c16
-rw-r--r--drivers/fwctl/mlx5/main.c9
-rw-r--r--drivers/fwctl/pds/main.c18
-rw-r--r--drivers/gnss/ubx.c8
-rw-r--r--drivers/gpib/Kconfig (renamed from drivers/staging/gpib/Kconfig)8
-rw-r--r--drivers/gpib/Makefile (renamed from drivers/staging/gpib/Makefile)2
-rw-r--r--drivers/gpib/TODO (renamed from drivers/staging/gpib/TODO)14
-rw-r--r--drivers/gpib/agilent_82350b/Makefile (renamed from drivers/staging/gpib/agilent_82350b/Makefile)0
-rw-r--r--drivers/gpib/agilent_82350b/agilent_82350b.c (renamed from drivers/staging/gpib/agilent_82350b/agilent_82350b.c)12
-rw-r--r--drivers/gpib/agilent_82350b/agilent_82350b.h (renamed from drivers/staging/gpib/agilent_82350b/agilent_82350b.h)0
-rw-r--r--drivers/gpib/agilent_82357a/Makefile (renamed from drivers/staging/gpib/agilent_82357a/Makefile)0
-rw-r--r--drivers/gpib/agilent_82357a/agilent_82357a.c (renamed from drivers/staging/gpib/agilent_82357a/agilent_82357a.c)18
-rw-r--r--drivers/gpib/agilent_82357a/agilent_82357a.h (renamed from drivers/staging/gpib/agilent_82357a/agilent_82357a.h)10
-rw-r--r--drivers/gpib/cb7210/Makefile (renamed from drivers/staging/gpib/cb7210/Makefile)0
-rw-r--r--drivers/gpib/cb7210/cb7210.c (renamed from drivers/staging/gpib/cb7210/cb7210.c)12
-rw-r--r--drivers/gpib/cb7210/cb7210.h (renamed from drivers/staging/gpib/cb7210/cb7210.h)4
-rw-r--r--drivers/gpib/cec/Makefile (renamed from drivers/staging/gpib/cec/Makefile)0
-rw-r--r--drivers/gpib/cec/cec.h (renamed from drivers/staging/gpib/cec/cec.h)0
-rw-r--r--drivers/gpib/cec/cec_gpib.c (renamed from drivers/staging/gpib/cec/cec_gpib.c)2
-rw-r--r--drivers/gpib/common/Makefile (renamed from drivers/staging/gpib/common/Makefile)0
-rw-r--r--drivers/gpib/common/gpib_os.c (renamed from drivers/staging/gpib/common/gpib_os.c)2
-rw-r--r--drivers/gpib/common/iblib.c (renamed from drivers/staging/gpib/common/iblib.c)2
-rw-r--r--drivers/gpib/common/ibsys.h (renamed from drivers/staging/gpib/common/ibsys.h)0
-rw-r--r--drivers/gpib/eastwood/Makefile (renamed from drivers/staging/gpib/eastwood/Makefile)0
-rw-r--r--drivers/gpib/eastwood/fluke_gpib.c (renamed from drivers/staging/gpib/eastwood/fluke_gpib.c)2
-rw-r--r--drivers/gpib/eastwood/fluke_gpib.h (renamed from drivers/staging/gpib/eastwood/fluke_gpib.h)0
-rw-r--r--drivers/gpib/fmh_gpib/Makefile (renamed from drivers/staging/gpib/fmh_gpib/Makefile)0
-rw-r--r--drivers/gpib/fmh_gpib/fmh_gpib.c (renamed from drivers/staging/gpib/fmh_gpib/fmh_gpib.c)7
-rw-r--r--drivers/gpib/fmh_gpib/fmh_gpib.h (renamed from drivers/staging/gpib/fmh_gpib/fmh_gpib.h)0
-rw-r--r--drivers/gpib/gpio/Makefile (renamed from drivers/staging/gpib/gpio/Makefile)0
-rw-r--r--drivers/gpib/gpio/gpib_bitbang.c (renamed from drivers/staging/gpib/gpio/gpib_bitbang.c)16
-rw-r--r--drivers/gpib/hp_82335/Makefile (renamed from drivers/staging/gpib/hp_82335/Makefile)0
-rw-r--r--drivers/gpib/hp_82335/hp82335.c (renamed from drivers/staging/gpib/hp_82335/hp82335.c)0
-rw-r--r--drivers/gpib/hp_82335/hp82335.h (renamed from drivers/staging/gpib/hp_82335/hp82335.h)0
-rw-r--r--drivers/gpib/hp_82341/Makefile (renamed from drivers/staging/gpib/hp_82341/Makefile)0
-rw-r--r--drivers/gpib/hp_82341/hp_82341.c (renamed from drivers/staging/gpib/hp_82341/hp_82341.c)12
-rw-r--r--drivers/gpib/hp_82341/hp_82341.h (renamed from drivers/staging/gpib/hp_82341/hp_82341.h)40
-rw-r--r--drivers/gpib/include/amcc5920.h (renamed from drivers/staging/gpib/include/amcc5920.h)0
-rw-r--r--drivers/gpib/include/amccs5933.h (renamed from drivers/staging/gpib/include/amccs5933.h)4
-rw-r--r--drivers/gpib/include/gpibP.h (renamed from drivers/staging/gpib/include/gpibP.h)4
-rw-r--r--drivers/gpib/include/gpib_cmd.h (renamed from drivers/staging/gpib/include/gpib_cmd.h)0
-rw-r--r--drivers/gpib/include/gpib_pci_ids.h (renamed from drivers/staging/gpib/include/gpib_pci_ids.h)0
-rw-r--r--drivers/gpib/include/gpib_proto.h (renamed from drivers/staging/gpib/include/gpib_proto.h)0
-rw-r--r--drivers/gpib/include/gpib_state_machines.h (renamed from drivers/staging/gpib/include/gpib_state_machines.h)0
-rw-r--r--drivers/gpib/include/gpib_types.h (renamed from drivers/staging/gpib/include/gpib_types.h)5
-rw-r--r--drivers/gpib/include/nec7210.h (renamed from drivers/staging/gpib/include/nec7210.h)26
-rw-r--r--drivers/gpib/include/nec7210_registers.h (renamed from drivers/staging/gpib/include/nec7210_registers.h)4
-rw-r--r--drivers/gpib/include/plx9050.h (renamed from drivers/staging/gpib/include/plx9050.h)8
-rw-r--r--drivers/gpib/include/quancom_pci.h (renamed from drivers/staging/gpib/include/quancom_pci.h)0
-rw-r--r--drivers/gpib/include/tms9914.h (renamed from drivers/staging/gpib/include/tms9914.h)90
-rw-r--r--drivers/gpib/include/tnt4882_registers.h (renamed from drivers/staging/gpib/include/tnt4882_registers.h)22
-rw-r--r--drivers/gpib/ines/Makefile (renamed from drivers/staging/gpib/ines/Makefile)0
-rw-r--r--drivers/gpib/ines/ines.h (renamed from drivers/staging/gpib/ines/ines.h)12
-rw-r--r--drivers/gpib/ines/ines_gpib.c (renamed from drivers/staging/gpib/ines/ines_gpib.c)4
-rw-r--r--drivers/gpib/lpvo_usb_gpib/Makefile (renamed from drivers/staging/gpib/lpvo_usb_gpib/Makefile)0
-rw-r--r--drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c (renamed from drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c)0
-rw-r--r--drivers/gpib/nec7210/Makefile (renamed from drivers/staging/gpib/nec7210/Makefile)0
-rw-r--r--drivers/gpib/nec7210/board.h (renamed from drivers/staging/gpib/nec7210/board.h)0
-rw-r--r--drivers/gpib/nec7210/nec7210.c (renamed from drivers/staging/gpib/nec7210/nec7210.c)6
-rw-r--r--drivers/gpib/ni_usb/Makefile (renamed from drivers/staging/gpib/ni_usb/Makefile)0
-rw-r--r--drivers/gpib/ni_usb/ni_usb_gpib.c (renamed from drivers/staging/gpib/ni_usb/ni_usb_gpib.c)21
-rw-r--r--drivers/gpib/ni_usb/ni_usb_gpib.h (renamed from drivers/staging/gpib/ni_usb/ni_usb_gpib.h)10
-rw-r--r--drivers/gpib/pc2/Makefile (renamed from drivers/staging/gpib/pc2/Makefile)0
-rw-r--r--drivers/gpib/pc2/pc2_gpib.c (renamed from drivers/staging/gpib/pc2/pc2_gpib.c)4
-rw-r--r--drivers/gpib/tms9914/Makefile (renamed from drivers/staging/gpib/tms9914/Makefile)0
-rw-r--r--drivers/gpib/tms9914/tms9914.c (renamed from drivers/staging/gpib/tms9914/tms9914.c)12
-rw-r--r--drivers/gpib/tnt4882/Makefile (renamed from drivers/staging/gpib/tnt4882/Makefile)0
-rw-r--r--drivers/gpib/tnt4882/mite.c (renamed from drivers/staging/gpib/tnt4882/mite.c)0
-rw-r--r--drivers/gpib/tnt4882/mite.h (renamed from drivers/staging/gpib/tnt4882/mite.h)10
-rw-r--r--drivers/gpib/tnt4882/tnt4882_gpib.c (renamed from drivers/staging/gpib/tnt4882/tnt4882_gpib.c)5
-rw-r--r--drivers/gpio/Kconfig96
-rw-r--r--drivers/gpio/Makefile6
-rw-r--r--drivers/gpio/TODO28
-rw-r--r--drivers/gpio/gpio-104-idio-16.c1
-rw-r--r--drivers/gpio/gpio-aggregator.c393
-rw-r--r--drivers/gpio/gpio-amdpt.c44
-rw-r--r--drivers/gpio/gpio-aspeed.c12
-rw-r--r--drivers/gpio/gpio-ath79.c88
-rw-r--r--drivers/gpio/gpio-blzp1600.c39
-rw-r--r--drivers/gpio/gpio-brcmstb.c130
-rw-r--r--drivers/gpio/gpio-bt8xx.c30
-rw-r--r--drivers/gpio/gpio-cadence.c2
-rw-r--r--drivers/gpio/gpio-dwapb.c180
-rw-r--r--drivers/gpio/gpio-elkhartlake.c36
-rw-r--r--drivers/gpio/gpio-ep93xx.c33
-rw-r--r--drivers/gpio/gpio-ftgpio010.c46
-rw-r--r--drivers/gpio/gpio-fxl6408.c13
-rw-r--r--drivers/gpio/gpio-ge.c25
-rw-r--r--drivers/gpio/gpio-grgpio.c95
-rw-r--r--drivers/gpio/gpio-hisi.c48
-rw-r--r--drivers/gpio/gpio-hlwd.c105
-rw-r--r--drivers/gpio/gpio-htc-egpio.c21
-rw-r--r--drivers/gpio/gpio-idio-16.c5
-rw-r--r--drivers/gpio/gpio-idt3243x.c45
-rw-r--r--drivers/gpio/gpio-ixp4xx.c72
-rw-r--r--drivers/gpio/gpio-latch.c2
-rw-r--r--drivers/gpio/gpio-ljca.c14
-rw-r--r--drivers/gpio/gpio-loongson-64bit.c239
-rw-r--r--drivers/gpio/gpio-loongson1.c40
-rw-r--r--drivers/gpio/gpio-max7360.c257
-rw-r--r--drivers/gpio/gpio-menz127.c55
-rw-r--r--drivers/gpio/gpio-ml-ioh.c12
-rw-r--r--drivers/gpio/gpio-mlxbf.c25
-rw-r--r--drivers/gpio/gpio-mlxbf2.c89
-rw-r--r--drivers/gpio/gpio-mlxbf3.c103
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c57
-rw-r--r--drivers/gpio/gpio-mmio.c596
-rw-r--r--drivers/gpio/gpio-mpc5200.c78
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c105
-rw-r--r--drivers/gpio/gpio-mpfs.c2
-rw-r--r--drivers/gpio/gpio-mpsse.c229
-rw-r--r--drivers/gpio/gpio-msc313.c8
-rw-r--r--drivers/gpio/gpio-mt7621.c80
-rw-r--r--drivers/gpio/gpio-mvebu.c6
-rw-r--r--drivers/gpio/gpio-mxc.c14
-rw-r--r--drivers/gpio/gpio-mxs.c31
-rw-r--r--drivers/gpio/gpio-nct6694.c499
-rw-r--r--drivers/gpio/gpio-nomadik.c27
-rw-r--r--drivers/gpio/gpio-omap.c15
-rw-r--r--drivers/gpio/gpio-pca953x.c13
-rw-r--r--drivers/gpio/gpio-pch.c12
-rw-r--r--drivers/gpio/gpio-pci-idio-16.c1
-rw-r--r--drivers/gpio/gpio-pisosr.c8
-rw-r--r--drivers/gpio/gpio-pl061.c17
-rw-r--r--drivers/gpio/gpio-pxa.c12
-rw-r--r--drivers/gpio/gpio-qixis-fpga.c111
-rw-r--r--drivers/gpio/gpio-rda.c35
-rw-r--r--drivers/gpio/gpio-realtek-otto.c41
-rw-r--r--drivers/gpio/gpio-regmap.c74
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c12
-rw-r--r--drivers/gpio/gpio-shared-proxy.c334
-rw-r--r--drivers/gpio/gpio-sifive.c74
-rw-r--r--drivers/gpio/gpio-sim.c3
-rw-r--r--drivers/gpio/gpio-sodaville.c20
-rw-r--r--drivers/gpio/gpio-spacemit-k1.c29
-rw-r--r--drivers/gpio/gpio-stmpe.c34
-rw-r--r--drivers/gpio/gpio-tb10x.c91
-rw-r--r--drivers/gpio/gpio-tegra186.c188
-rw-r--r--drivers/gpio/gpio-tqmx86.c9
-rw-r--r--drivers/gpio/gpio-ts4800.c39
-rw-r--r--drivers/gpio/gpio-twl4030.c4
-rw-r--r--drivers/gpio/gpio-uniphier.c9
-rw-r--r--drivers/gpio/gpio-usbio.c248
-rw-r--r--drivers/gpio/gpio-vf610.c31
-rw-r--r--drivers/gpio/gpio-virtuser.c8
-rw-r--r--drivers/gpio/gpio-visconti.c25
-rw-r--r--drivers/gpio/gpio-wcd934x.c2
-rw-r--r--drivers/gpio/gpio-wm831x.c5
-rw-r--r--drivers/gpio/gpio-wm8994.c6
-rw-r--r--drivers/gpio/gpio-xgene-sb.c58
-rw-r--r--drivers/gpio/gpio-xgene.c8
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c34
-rw-r--r--drivers/gpio/gpio-xilinx.c15
-rw-r--r--drivers/gpio/gpio-xra1403.c3
-rw-r--r--drivers/gpio/gpio-zynq.c15
-rw-r--r--drivers/gpio/gpiolib-acpi-core.c34
-rw-r--r--drivers/gpio/gpiolib-acpi-quirks.c12
-rw-r--r--drivers/gpio/gpiolib-cdev.c183
-rw-r--r--drivers/gpio/gpiolib-legacy.c44
-rw-r--r--drivers/gpio/gpiolib-of.c81
-rw-r--r--drivers/gpio/gpiolib-shared.c656
-rw-r--r--drivers/gpio/gpiolib-shared.h71
-rw-r--r--drivers/gpio/gpiolib-swnode.c5
-rw-r--r--drivers/gpio/gpiolib-sysfs.c62
-rw-r--r--drivers/gpio/gpiolib.c429
-rw-r--r--drivers/gpio/gpiolib.h85
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile12
-rw-r--r--drivers/gpu/drm/adp/adp_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c779
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c297
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c263
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c200
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c716
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c435
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c258
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c940
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c179
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c400
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3818
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.c839
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v1_0.h (renamed from drivers/gpu/drm/amd/amdgpu/dce_v11_0.h)14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c134
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c114
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c173
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c85
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c101
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c56
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h1
-rw-r--r--drivers/gpu/drm/amd/display/Makefile1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1026
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c858
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c71
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c122
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c45
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c286
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c157
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c130
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c1314
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c2855
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c180
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h635
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c198
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c123
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile140
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c)417
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h)23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h)15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c)151
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h)57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c)21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c)477
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h)76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c)7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h (renamed from drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c51
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h141
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c69
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c146
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c1471
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h104
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h1450
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h121
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h131
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h (renamed from drivers/gpu/drm/amd/display/dc/inc/link.h)15
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c179
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c115
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c169
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c93
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c94
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h36
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h896
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c75
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h8
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c47
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c17
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c33
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c8
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h16
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h12
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c11
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c13
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c87
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c61
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c122
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h149
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c33
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h5
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h99
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h10
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h30
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h210
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h6
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h36
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c119
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c86
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c405
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h14
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h6
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c9
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c159
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h557
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c26
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c7
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c198
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h150
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h89
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c21
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c24
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c611
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c513
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h176
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h95
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h1
-rw-r--r--drivers/gpu/drm/amd/ras/Makefile34
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/Makefile33
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c285
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h54
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c182
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h27
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c648
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h83
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c94
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c125
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h30
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c190
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h41
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c279
-rw-r--r--drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h110
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile44
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras.h370
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.c672
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca.h164
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c379
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h71
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.c522
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cmd.h426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core.c603
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core_status.h37
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.c315
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_cper.h304
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c1339
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h197
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.c70
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx.h43
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c426
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h259
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c317
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h93
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.c81
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1.h50
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c105
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.c96
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio.h46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c123
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.c322
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_process.h53
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.c750
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp.h145
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c46
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h31
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h231
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.c707
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc.h166
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c511
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h314
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c31
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/armada/armada_debugfs.c1
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c1
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c15
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c1
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c8
-rw-r--r--drivers/gpu/drm/ast/Makefile3
-rw-r--r--drivers/gpu/drm/ast/ast_2000.c108
-rw-r--r--drivers/gpu/drm/ast/ast_2100.c138
-rw-r--r--drivers/gpu/drm/ast/ast_2200.c92
-rw-r--r--drivers/gpu/drm/ast/ast_2300.c135
-rw-r--r--drivers/gpu/drm/ast/ast_2400.c100
-rw-r--r--drivers/gpu/drm/ast/ast_2500.c106
-rw-r--r--drivers/gpu/drm/ast/ast_2600.c72
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c69
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h126
-rw-r--r--drivers/gpu/drm/ast/ast_main.c394
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c76
-rw-r--r--drivers/gpu/drm/ast/ast_reg.h1
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h60
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c21
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c15
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c3
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c53
-rw-r--r--drivers/gpu/drm/bridge/Kconfig29
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c23
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c63
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c7
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig1
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c211
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/imx/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c158
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c65
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c12
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c64
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c33
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c68
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c3
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c353
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c20
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c20
-rw-r--r--drivers/gpu/drm/bridge/ssd2825.c775
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig15
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-dp.c2097
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c235
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h14
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c18
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c112
-rw-r--r--drivers/gpu/drm/bridge/waveshare-dsi.c203
-rw-r--r--drivers/gpu/drm/ci/gitlab-ci.yml2
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c4
-rw-r--r--drivers/gpu/drm/clients/drm_fbdev_client.c37
-rw-r--r--drivers/gpu/drm/clients/drm_log.c43
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c85
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c236
-rw-r--r--drivers/gpu/drm/drm_atomic.c225
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c31
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c5
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c183
-rw-r--r--drivers/gpu/drm/drm_bridge.c72
-rw-r--r--drivers/gpu/drm/drm_buddy.c395
-rw-r--r--drivers/gpu/drm/drm_client.c198
-rw-r--r--drivers/gpu/drm/drm_client_event.c29
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c44
-rw-r--r--drivers/gpu/drm/drm_client_sysrq.c65
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c77
-rw-r--r--drivers/gpu/drm/drm_colorop.c599
-rw-r--r--drivers/gpu/drm/drm_connector.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c35
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_displayid.c58
-rw-r--r--drivers/gpu/drm/drm_displayid_internal.h2
-rw-r--r--drivers/gpu/drm/drm_draw.c2
-rw-r--r--drivers/gpu/drm/drm_draw_internal.h2
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c171
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c152
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c25
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c21
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c24
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_format_helper.c186
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c120
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c10
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c114
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c11
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c445
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c510
-rw-r--r--drivers/gpu/drm/drm_internal.h15
-rw-r--r--drivers/gpu/drm/drm_ioctl.c8
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c3
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c119
-rw-r--r--drivers/gpu/drm/drm_mm.c1
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_mode_object.c18
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c6
-rw-r--r--drivers/gpu/drm/drm_of.c7
-rw-r--r--drivers/gpu/drm/drm_pagemap.c150
-rw-r--r--drivers/gpu/drm/drm_panel.c73
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c113
-rw-r--r--drivers/gpu/drm/drm_panic.c60
-rw-r--r--drivers/gpu/drm/drm_panic_qr.rs2
-rw-r--r--drivers/gpu/drm/drm_plane.c65
-rw-r--r--drivers/gpu/drm/drm_prime.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c1
-rw-r--r--drivers/gpu/drm/drm_sysfs.c41
-rw-r--r--drivers/gpu/drm/drm_vblank.c180
-rw-r--r--drivers/gpu/drm/drm_vblank_helper.c176
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c32
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c1
-rw-r--r--drivers/gpu/drm/gma500/backlight.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/fbdev.c60
-rw-r--r--drivers/gpu/drm/gma500/gem.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c1
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c3
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/opregion.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c37
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c33
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c97
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h13
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c76
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c1
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c12
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug2
-rw-r--r--drivers/gpu/drm/i915/Makefile19
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c53
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c15
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c63
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c96
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.h6
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c41
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c184
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h176
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c357
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf_regs.h33
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c554
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c352
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_pipeline.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_color_regs.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_colorop.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c263
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h32
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.c295
-rw-r--r--drivers/gpu/drm/i915/display/intel_dbuf_bw.h37
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c212
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h107
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c384
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_conversion.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c138
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_jiffies.h43
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c81
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c110
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rpm.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h96
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_utils.h31
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_wl.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c400
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c159
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_test.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c67
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h197
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_encoder.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c218
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c72
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c144
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c56
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c84
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c2327
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h90
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.c27
-rw-r--r--drivers/gpu/drm/i915/display/intel_panic.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_pfit.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.c132
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c21
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c654
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_sbi.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c190
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h75
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c26
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c498
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_wm.c9
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.c157
-rw-r--r--drivers/gpu/drm/i915/display/skl_prefill.h46
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c287
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.h30
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c276
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane_regs.h139
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c395
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h3
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark_regs.h52
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.c88
-rw-r--r--drivers/gpu/drm/i915/display/vlv_clock.h38
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c60
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c40
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c62
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c103
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h56
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c46
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c22
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c105
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h34
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_wait.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c11
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c7
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c72
-rw-r--r--drivers/gpu/drm/i915/gt/gen2_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sa_media.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_tlb.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_wopcm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c38
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_context.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c3
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c9
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_tlb.c6
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c12
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c291
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_active.c5
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/i915_config.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c24
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c4
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c173
-rw-r--r--drivers/gpu/drm/i915/i915_driver.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h31
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_getparam.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c108
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c113
-rw-r--r--drivers/gpu/drm/i915/i915_jiffies.h16
-rw-r--r--drivers/gpu/drm/i915/i915_list_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.c18
-rw-r--r--drivers/gpu/drm/i915/i915_mmio_range.h19
-rw-r--r--drivers/gpu/drm/i915/i915_module.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c67
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_ptr_util.h66
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h10
-rw-r--r--drivers/gpu/drm/i915/i915_request.c2
-rw-r--r--drivers/gpu/drm/i915/i915_request.h5
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c11
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.c36
-rw-r--r--drivers/gpu/drm/i915/i915_timer_util.h23
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c31
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h251
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c26
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h6
-rw-r--r--drivers/gpu/drm/i915/i915_wait_util.h119
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c37
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c266
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c3
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c77
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h3
-rw-r--r--drivers/gpu/drm/i915/intel_step.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h8
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c3
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c108
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h14
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.c7
-rw-r--r--drivers/gpu/drm/i915/soc/intel_rom.h6
-rw-r--r--drivers/gpu/drm/i915/vlv_iosf_sb.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c5
-rw-r--r--drivers/gpu/drm/imagination/Kconfig4
-rw-r--r--drivers/gpu/drm/imagination/pvr_ccb.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.c24
-rw-r--r--drivers/gpu/drm/imagination/pvr_device.h25
-rw-r--r--drivers/gpu/drm/imagination/pvr_drv.c23
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.c159
-rw-r--r--drivers/gpu/drm/imagination/pvr_power.h15
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c16
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c8
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c4
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c10
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h4
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c28
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c2
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c31
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c18
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c23
-rw-r--r--drivers/gpu/drm/imx/lcdc/imx-lcdc.c1
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c13
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c4
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c2
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_layer.c4
-rw-r--r--drivers/gpu/drm/loongson/lsdc_benchmark.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_crtc.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_debugfs.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c32
-rw-r--r--drivers/gpu/drm/loongson/lsdc_i2c.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_irq.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a1000.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_output_7a2000.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_pixpll.c1
-rw-r--r--drivers/gpu/drm/loongson/lsdc_plane.c3
-rw-r--r--drivers/gpu/drm/loongson/lsdc_ttm.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c13
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c1
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig23
-rw-r--r--drivers/gpu/drm/mediatek/Makefile3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.c33
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ddp_comp.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c14
-rw-r--r--drivers/gpu/drm/mediatek/mtk_gem.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c539
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.c456
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_common.h198
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c396
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h263
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_v2.c1521
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c28
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c1
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh3.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200eh5.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ew3.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200wb.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_vga_bmc.c1
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_catalog.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_catalog.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c54
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_catalog.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c61
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c477
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c441
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h35
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c666
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h34
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h74
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c108
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_preempt.c44
-rw-r--r--drivers/gpu/drm/msm/adreno/a8xx_gpu.c1201
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h420
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h332
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h470
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c40
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h57
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h541
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c35
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c135
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c47
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c446
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c31
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c13
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c29
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c47
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c7
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.h13
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c10
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c9
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c117
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.h5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c78
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c16
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c34
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c21
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c81
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c16
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c11
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c61
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c103
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c26
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h20
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_trace.h12
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c13
-rw-r--r--drivers/gpu/drm/msm/msm_kms.c14
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c5
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c4
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml2877
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml40
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml52
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml278
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml7
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml121
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml299
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_common.xml12
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml534
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml11
-rw-r--r--drivers/gpu/drm/msm/registers/gen_header.py214
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_kms.c5
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c1
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c24
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c33
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c311
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c113
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/enum.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/fw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c320
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c185
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c4
-rw-r--r--drivers/gpu/drm/nova/Kconfig2
-rw-r--r--drivers/gpu/drm/nova/driver.rs8
-rw-r--r--drivers/gpu/drm/nova/file.rs26
-rw-r--r--drivers/gpu/drm/nova/gem.rs10
-rw-r--r--drivers/gpu/drm/nova/nova.rs1
-rw-r--r--drivers/gpu/drm/nova/uapi.rs61
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_overlay.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c3
-rw-r--r--drivers/gpu/drm/panel/Kconfig85
-rw-r--r--drivers/gpu/drm/panel/Makefile6
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c65
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8279.c2
-rw-r--r--drivers/gpu/drm/panel/panel-hydis-hv101hd1.c188
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c1629
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c69
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c21
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c192
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c2
-rw-r--r--drivers/gpu/drm/panel/panel-lg-ld070wx3.c184
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c2
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c408
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c198
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c804
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c7
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c8
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c385
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c981
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c105
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c225
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c153
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c7
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c2
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-tddi.c277
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c71
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c68
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h24
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c243
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_dump.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c9
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c336
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h38
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c115
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c28
-rw-r--r--drivers/gpu/drm/panthor/Makefile2
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.c64
-rw-r--r--drivers/gpu/drm/panthor/panthor_devfreq.h2
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.c48
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h25
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c38
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c140
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.h32
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.c42
-rw-r--r--drivers/gpu/drm/panthor/panthor_gem.h12
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c128
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_heap.c1
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.c224
-rw-r--r--drivers/gpu/drm/panthor/panthor_hw.h56
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c224
-rw-r--r--drivers/gpu/drm/panthor/panthor_pwr.c549
-rw-r--r--drivers/gpu/drm/panthor/panthor_pwr.h23
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h86
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c409
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c14
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c30
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c523
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/r100.c215
-rw-r--r--drivers/gpu/drm/radeon/r200.c34
-rw-r--r--drivers/gpu/drm/radeon/r300.c66
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c449
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c113
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c6
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c1
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c7
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c4
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c280
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h342
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c1
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c8
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig9
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c42
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c2
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c163
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c21
-rw-r--r--drivers/gpu/drm/rockchip/dw_dp-rockchip.c150
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c80
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c263
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c12
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c142
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.h21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop2_reg.c64
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c1
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c54
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c100
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c2
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h11
-rw-r--r--drivers/gpu/drm/scheduler/tests/tests_basic.c4
-rw-r--r--drivers/gpu/drm/sitronix/st7571-i2c.c48
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c1
-rw-r--r--drivers/gpu/drm/sitronix/st7735r.c1
-rw-r--r--drivers/gpu/drm/solomon/ssd130x-spi.c3
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c87
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c1
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c19
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c5
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c1
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c7
-rw-r--r--drivers/gpu/drm/stm/drv.c13
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c14
-rw-r--r--drivers/gpu/drm/stm/ltdc.c198
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/stm/lvds.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c113
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h16
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c218
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h65
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c187
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c44
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c248
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c51
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.h6
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h36
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c176
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c21
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c1
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c1
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c17
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c155
-rw-r--r--drivers/gpu/drm/tegra/Makefile1
-rw-r--r--drivers/gpu/drm/tegra/dc.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tegra/drm.h1
-rw-r--r--drivers/gpu/drm/tegra/dsi.c65
-rw-r--r--drivers/gpu/drm/tegra/fb.c1
-rw-r--r--drivers/gpu/drm/tegra/fbdev.c11
-rw-r--r--drivers/gpu/drm/tegra/gem.c8
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c5
-rw-r--r--drivers/gpu/drm/tegra/hub.c1
-rw-r--r--drivers/gpu/drm/tegra/nvjpg.c330
-rw-r--r--drivers/gpu/drm/tegra/sor.c5
-rw-r--r--drivers/gpu/drm/tegra/uapi.c7
-rw-r--r--drivers/gpu/drm/tests/.kunitconfig2
-rw-r--r--drivers/gpu/drm/tests/Makefile3
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c105
-rw-r--r--drivers/gpu/drm/tests/drm_exec_test.c22
-rw-r--r--drivers/gpu/drm/tests/drm_fixp_test.c71
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c49
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c448
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h9
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h76
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c25
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_oldi.c23
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c8
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h2
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c9
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig16
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/bochs.c13
-rw-r--r--drivers/gpu/drm/tiny/cirrus-qemu.c12
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c1
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c1
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c1
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c1
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c1
-rw-r--r--drivers/gpu/drm/tiny/pixpaper.c1166
-rw-r--r--drivers/gpu/drm/tiny/repaper.c17
-rw-r--r--drivers/gpu/drm/tiny/sharp-memory.c27
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_test.c28
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c73
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_device_test.c33
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c22
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h7
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c1
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c24
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_resource_test.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c82
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c38
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c45
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool_internal.h25
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c37
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c11
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c1
-rw-r--r--drivers/gpu/drm/tyr/Kconfig19
-rw-r--r--drivers/gpu/drm/tyr/Makefile3
-rw-r--r--drivers/gpu/drm/tyr/driver.rs205
-rw-r--r--drivers/gpu/drm/tyr/file.rs56
-rw-r--r--drivers/gpu/drm/tyr/gem.rs18
-rw-r--r--drivers/gpu/drm/tyr/gpu.rs219
-rw-r--r--drivers/gpu/drm/tyr/regs.rs108
-rw-r--r--drivers/gpu/drm/tyr/tyr.rs22
-rw-r--r--drivers/gpu/drm/udl/udl_edid.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c26
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h35
-rw-r--r--drivers/gpu/drm/v3d/v3d_fence.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c12
-rw-r--r--drivers/gpu/drm/v3d/v3d_gemfs.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c70
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c87
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c3
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c9
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c1
-rw-r--r--drivers/gpu/drm/vc4/Kconfig1
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c138
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c7
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c1
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c38
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c21
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c3
-rw-r--r--drivers/gpu/drm/vkms/Kconfig1
-rw-r--r--drivers/gpu/drm/vkms/Makefile5
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile3
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_color_test.c414
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_config_test.c122
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c143
-rw-r--r--drivers/gpu/drm/vkms/vkms_colorop.c120
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c136
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.h28
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.c15
-rw-r--r--drivers/gpu/drm/vkms/vkms_config.h54
-rw-r--r--drivers/gpu/drm/vkms/vkms_configfs.c843
-rw-r--r--drivers/gpu/drm/vkms/vkms_configfs.h8
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.c35
-rw-r--r--drivers/gpu/drm/vkms/vkms_connector.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c88
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c27
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h34
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c331
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_luts.c811
-rw-r--r--drivers/gpu/drm/vkms/vkms_luts.h12
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c20
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c23
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c6
-rw-r--r--drivers/gpu/drm/xe/Kconfig3
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug17
-rw-r--r--drivers/gpu/drm/xe/Makefile42
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h5
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_klvs_abi.h27
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h4
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h112
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h26
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h2
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h31
-rw-r--r--drivers/gpu/drm/xe/display/ext/i915_utils.c26
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c125
-rw-r--r--drivers/gpu/drm/xe/display/intel_fb_bo.c3
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c88
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c62
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.h4
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.c61
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_rpm.h11
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_wa.c3
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c10
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c87
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_panic.c102
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c15
-rw-r--r--drivers/gpu/drm/xe/display/xe_stolen.c123
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_gpu_commands.h6
-rw-r--r--drivers/gpu/drm/xe/instructions/xe_mi_commands.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h7
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gt_regs.h35
-rw-r--r--drivers/gpu/drm/xe/regs/xe_hw_error_regs.h20
-rw-r--r--drivers/gpu/drm/xe/regs/xe_i2c_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_irq_regs.h9
-rw-r--r--drivers/gpu/drm/xe/regs/xe_lrc_layout.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h11
-rw-r--r--drivers/gpu/drm/xe/regs/xe_regs.h2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c36
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c46
-rw-r--r--drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c208
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c776
-rw-r--r--drivers/gpu/drm/xe/tests/xe_live_test_mod.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_migrate.c66
-rw-r--r--drivers/gpu/drm/xe/tests/xe_mocs.c2
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c257
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c16
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h15
-rw-r--r--drivers/gpu/drm/xe/tests/xe_rtp_test.c6
-rw-r--r--drivers/gpu/drm/xe/tests/xe_wa_test.c90
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h4
-rw-r--r--drivers/gpu/drm/xe/xe_bb.c35
-rw-r--r--drivers/gpu/drm/xe/xe_bb.h3
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c993
-rw-r--r--drivers/gpu/drm/xe/xe_bo.h86
-rw-r--r--drivers/gpu/drm/xe/xe_bo_doc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c21
-rw-r--r--drivers/gpu/drm/xe/xe_bo_types.h25
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c1078
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h24
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c142
-rw-r--r--drivers/gpu/drm/xe/xe_dep_job_types.h29
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.c143
-rw-r--r--drivers/gpu/drm/xe/xe_dep_scheduler.h21
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c4
-rw-r--r--drivers/gpu/drm/xe/xe_device.c235
-rw-r--r--drivers/gpu/drm/xe/xe_device.h1
-rw-r--r--drivers/gpu/drm/xe/xe_device_sysfs.c112
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h157
-rw-r--r--drivers/gpu/drm/xe/xe_device_wa_oob.rules3
-rw-r--r--drivers/gpu/drm/xe/xe_dma_buf.c121
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c45
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c62
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c311
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.h29
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue_types.h43
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c27
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_force_wake_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c211
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h8
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gpu_scheduler.h29
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c117
-rw-r--r--drivers/gpu/drm/xe/xe_gt.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_clock.c26
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c208
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_freq.c65
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.c29
-rw-r--r--drivers/gpu/drm/xe/xe_gt_idle.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.c86
-rw-r--r--drivers/gpu/drm/xe/xe_gt_mcr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c688
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_gt_printk.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c60
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c382
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h16
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c750
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h12
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c461
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c1022
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h48
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c21
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_printk.h7
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c477
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h10
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h34
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.c57
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_stats_types.h33
-rw-r--r--drivers/gpu/drm/xe/xe_gt_throttle.c355
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c596
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h40
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c72
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.h8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h112
-rw-r--r--drivers/gpu/drm/xe/xe_guard.h119
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c393
-rw-r--r--drivers/gpu/drm/xe/xe_guc.h5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c135
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c59
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_capture.c29
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c458
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h14
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c13
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_fwif.h38
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pagefault.c95
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pagefault.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c235
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay.c17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_relay_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c723
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.h9
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.c242
-rw-r--r--drivers/gpu/drm/xe/xe_guc_tlb_inval.h19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_heci_gsc.c4
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c325
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h18
-rw-r--r--drivers/gpu/drm/xe/xe_huc.c10
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c64
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_group.c10
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.c182
-rw-r--r--drivers/gpu/drm/xe/xe_hw_error.h15
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c61
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.c48
-rw-r--r--drivers/gpu/drm/xe/xe_i2c.h6
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c160
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.c464
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw.h17
-rw-r--r--drivers/gpu/drm/xe/xe_late_bind_fw_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_lmtt.c44
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c280
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.h25
-rw-r--r--drivers/gpu/drm/xe/xe_map.h22
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.c57
-rw-r--r--drivers/gpu/drm/xe/xe_memirq.h2
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c812
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h45
-rw-r--r--drivers/gpu/drm/xe/xe_migrate_doc.h2
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c62
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h4
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.c226
-rw-r--r--drivers/gpu/drm/xe/xe_mmio_gem.h20
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.c42
-rw-r--r--drivers/gpu/drm/xe/xe_mocs.h8
-rw-r--r--drivers/gpu/drm/xe/xe_module.c29
-rw-r--r--drivers/gpu/drm/xe/xe_nvm.c13
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c79
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.c444
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault.h19
-rw-r--r--drivers/gpu/drm/xe/xe_pagefault_types.h136
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c145
-rw-r--r--drivers/gpu/drm/xe/xe_pat.h12
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c412
-rw-r--r--drivers/gpu/drm/xe/xe_pci.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.c115
-rw-r--r--drivers/gpu/drm/xe/xe_pci_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pci_types.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c40
-rw-r--r--drivers/gpu/drm/xe/xe_pcode_api.h6
-rw-r--r--drivers/gpu/drm/xe/xe_platform_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c120
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h19
-rw-r--r--drivers/gpu/drm/xe/xe_pmu.c11
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c11
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_printk.h129
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.c294
-rw-r--r--drivers/gpu/drm/xe/xe_psmi.h14
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c449
-rw-r--r--drivers/gpu/drm/xe/xe_pt.h3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_types.h5
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c1
-rw-r--r--drivers/gpu/drm/xe/xe_pxp_submit.c34
-rw-r--r--drivers/gpu/drm/xe/xe_query.c31
-rw-r--r--drivers/gpu/drm/xe/xe_range_fence.h4
-rw-r--r--drivers/gpu/drm/xe/xe_reg_whitelist.c10
-rw-r--r--drivers/gpu/drm/xe/xe_res_cursor.h10
-rw-r--r--drivers/gpu/drm/xe/xe_ring_ops.c45
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c38
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.h32
-rw-r--r--drivers/gpu/drm/xe/xe_rtp_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sa.c22
-rw-r--r--drivers/gpu/drm/xe/xe_sa.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sa_types.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c25
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.h13
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job_types.h11
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.c17
-rw-r--r--drivers/gpu/drm/xe/xe_sriov.h1
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet.c520
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_packet_types.h75
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.c175
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf.h22
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.c279
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_control.h22
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c395
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h18
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_helpers.h27
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.c365
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration.h30
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h37
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.c438
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision.h45
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h36
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c647
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_pf_types.h25
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_printk.h12
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c213
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.h8
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.c480
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs.h35
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h51
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf_types.h14
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vfio.c80
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.c178
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode.h5
-rw-r--r--drivers/gpu/drm/xe/xe_survivability_mode_types.h8
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c751
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h100
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c91
-rw-r--r--drivers/gpu/drm/xe/xe_sync.h3
-rw-r--r--drivers/gpu/drm/xe/xe_sync_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c74
-rw-r--r--drivers/gpu/drm/xe/xe_tile.h14
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.c142
-rw-r--r--drivers/gpu/drm/xe/xe_tile_debugfs.h16
-rw-r--r--drivers/gpu/drm/xe/xe_tile_printk.h127
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c253
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h15
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_printk.h33
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c112
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h9
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h23
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sysfs.c12
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.c433
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval.h46
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.c285
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_job.h34
-rw-r--r--drivers/gpu/drm/xe/xe_tlb_inval_types.h130
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h63
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c18
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_sys_mgr.c6
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.c28
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr.h3
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c29
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.h2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c29
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_abi.h130
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h9
-rw-r--r--drivers/gpu/drm/xe/xe_uc_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.c322
-rw-r--r--drivers/gpu/drm/xe/xe_userptr.h107
-rw-r--r--drivers/gpu/drm/xe/xe_validation.c278
-rw-r--r--drivers/gpu/drm/xe/xe_validation.h192
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c1411
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h69
-rw-r--r--drivers/gpu/drm/xe/xe_vm_doc.h8
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.c431
-rw-r--r--drivers/gpu/drm/xe/xe_vm_madvise.h15
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h164
-rw-r--r--drivers/gpu/drm/xe/xe_vram.c291
-rw-r--r--drivers/gpu/drm/xe/xe_vram.h12
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vram_types.h85
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c120
-rw-r--r--drivers/gpu/drm/xe/xe_wa.h10
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules27
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c1
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c1
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_kms.c7
-rw-r--r--drivers/gpu/host1x/bus.c12
-rw-r--r--drivers/gpu/host1x/dev.c20
-rw-r--r--drivers/gpu/host1x/dev.h3
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c106
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c56
-rw-r--r--drivers/gpu/host1x/syncpt.c4
-rw-r--r--drivers/gpu/nova-core/bitfield.rs330
-rw-r--r--drivers/gpu/nova-core/dma.rs36
-rw-r--r--drivers/gpu/nova-core/driver.rs93
-rw-r--r--drivers/gpu/nova-core/falcon.rs354
-rw-r--r--drivers/gpu/nova-core/falcon/gsp.rs41
-rw-r--r--drivers/gpu/nova-core/falcon/hal.rs16
-rw-r--r--drivers/gpu/nova-core/falcon/hal/ga102.rs83
-rw-r--r--drivers/gpu/nova-core/falcon/sec2.rs19
-rw-r--r--drivers/gpu/nova-core/fb.rs106
-rw-r--r--drivers/gpu/nova-core/fb/hal.rs6
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga100.rs16
-rw-r--r--drivers/gpu/nova-core/fb/hal/ga102.rs8
-rw-r--r--drivers/gpu/nova-core/fb/hal/tu102.rs25
-rw-r--r--drivers/gpu/nova-core/firmware.rs140
-rw-r--r--drivers/gpu/nova-core/firmware/booter.rs401
-rw-r--r--drivers/gpu/nova-core/firmware/fwsec.rs205
-rw-r--r--drivers/gpu/nova-core/firmware/gsp.rs258
-rw-r--r--drivers/gpu/nova-core/firmware/riscv.rs95
-rw-r--r--drivers/gpu/nova-core/gfw.rs48
-rw-r--r--drivers/gpu/nova-core/gpu.rs300
-rw-r--r--drivers/gpu/nova-core/gsp.rs161
-rw-r--r--drivers/gpu/nova-core/gsp/boot.rs252
-rw-r--r--drivers/gpu/nova-core/gsp/cmdq.rs679
-rw-r--r--drivers/gpu/nova-core/gsp/commands.rs227
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs928
-rw-r--r--drivers/gpu/nova-core/gsp/fw/commands.rs128
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144.rs31
-rw-r--r--drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs951
-rw-r--r--drivers/gpu/nova-core/gsp/sequencer.rs407
-rw-r--r--drivers/gpu/nova-core/nova_core.rs6
-rw-r--r--drivers/gpu/nova-core/num.rs217
-rw-r--r--drivers/gpu/nova-core/regs.rs169
-rw-r--r--drivers/gpu/nova-core/regs/macros.rs818
-rw-r--r--drivers/gpu/nova-core/sbuffer.rs227
-rw-r--r--drivers/gpu/nova-core/util.rs55
-rw-r--r--drivers/gpu/nova-core/vbios.rs569
-rw-r--r--drivers/greybus/gb-beagleplay.c12
-rw-r--r--drivers/greybus/operation.c2
-rw-r--r--drivers/greybus/svc.c3
-rw-r--r--drivers/hid/Kconfig18
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c12
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_common.h3
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c4
-rw-r--r--drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c2
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c563
-rw-r--r--drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c29
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c6
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c1395
-rw-r--r--drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c724
-rw-r--r--drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c86
-rw-r--r--drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c321
-rw-r--r--drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c305
-rw-r--r--drivers/hid/bpf/progs/XPPen__Deco02.bpf.c359
-rw-r--r--drivers/hid/bpf/progs/hid_report_helpers.h10
-rw-r--r--drivers/hid/hid-apple.c1
-rw-r--r--drivers/hid/hid-asus.c10
-rw-r--r--drivers/hid/hid-core.c44
-rw-r--r--drivers/hid/hid-corsair-void.c5
-rw-r--r--drivers/hid/hid-cp2112.c37
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-elecom.c6
-rw-r--r--drivers/hid/hid-evision.c21
-rw-r--r--drivers/hid/hid-generic.c9
-rw-r--r--drivers/hid/hid-haptic.c580
-rw-r--r--drivers/hid/hid-haptic.h127
-rw-r--r--drivers/hid/hid-ids.h21
-rw-r--r--drivers/hid/hid-input.c53
-rw-r--r--drivers/hid/hid-lenovo.c21
-rw-r--r--drivers/hid/hid-lg-g15.c483
-rw-r--r--drivers/hid/hid-logitech-dj.c192
-rw-r--r--drivers/hid/hid-logitech-hidpp.c33
-rw-r--r--drivers/hid/hid-multitouch.c75
-rw-r--r--drivers/hid/hid-nintendo.c11
-rw-r--r--drivers/hid/hid-ntrig.c7
-rw-r--r--drivers/hid/hid-playstation.c1075
-rw-r--r--drivers/hid/hid-quirks.c20
-rw-r--r--drivers/hid/hid-steelseries.c108
-rw-r--r--drivers/hid/hid-uclogic-core.c19
-rw-r--r--drivers/hid/hid-uclogic-params.c61
-rw-r--r--drivers/hid/hid-uclogic-params.h5
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c125
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h8
-rw-r--r--drivers/hid/hid-universal-pidff.c57
-rw-r--r--drivers/hid/hid-winwing.c171
-rw-r--r--drivers/hid/hidraw.c224
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c74
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-elan.c11
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c118
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c31
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c15
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c18
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c6
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h3
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c43
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h26
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c10
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h4
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c3
-rw-r--r--drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c3
-rw-r--r--drivers/hid/usbhid/hid-pidff.c716
-rw-r--r--drivers/hid/usbhid/hid-pidff.h2
-rw-r--r--drivers/hsi/controllers/omap_ssi_port.c11
-rw-r--r--drivers/hv/Kconfig44
-rw-r--r--drivers/hv/Makefile13
-rw-r--r--drivers/hv/channel.c77
-rw-r--r--drivers/hv/channel_mgmt.c27
-rw-r--r--drivers/hv/connection.c6
-rw-r--r--drivers/hv/hv.c377
-rw-r--r--drivers/hv/hv_common.c49
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hv/hv_utils_transport.c10
-rw-r--r--drivers/hv/hyperv_vmbus.h76
-rw-r--r--drivers/hv/mshv.h2
-rw-r--r--drivers/hv/mshv_common.c101
-rw-r--r--drivers/hv/mshv_eventfd.c8
-rw-r--r--drivers/hv/mshv_irq.c4
-rw-r--r--drivers/hv/mshv_regions.c555
-rw-r--r--drivers/hv/mshv_root.h57
-rw-r--r--drivers/hv/mshv_root_hv_call.c196
-rw-r--r--drivers/hv/mshv_root_main.c835
-rw-r--r--drivers/hv/mshv_synic.c6
-rw-r--r--drivers/hv/mshv_vtl.h25
-rw-r--r--drivers/hv/mshv_vtl_main.c1392
-rw-r--r--drivers/hv/ring_buffer.c5
-rw-r--r--drivers/hv/vmbus_drv.c212
-rw-r--r--drivers/hwmon/Kconfig86
-rw-r--r--drivers/hwmon/Makefile5
-rw-r--r--drivers/hwmon/adm1026.c16
-rw-r--r--drivers/hwmon/adm1029.c3
-rw-r--r--drivers/hwmon/adm9240.c17
-rw-r--r--drivers/hwmon/adt7410.c11
-rw-r--r--drivers/hwmon/adt7411.c59
-rw-r--r--drivers/hwmon/adt7x10.c27
-rw-r--r--drivers/hwmon/aht10.c43
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c37
-rw-r--r--drivers/hwmon/aspeed-g6-pwm-tach.c3
-rw-r--r--drivers/hwmon/asus-ec-sensors.c397
-rw-r--r--drivers/hwmon/asus_rog_ryujin.c48
-rw-r--r--drivers/hwmon/cgbc-hwmon.c3
-rw-r--r--drivers/hwmon/chipcap2.c7
-rw-r--r--drivers/hwmon/coretemp.c76
-rw-r--r--drivers/hwmon/corsair-cpro.c8
-rw-r--r--drivers/hwmon/corsair-psu.c13
-rw-r--r--drivers/hwmon/cros_ec_hwmon.c313
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c108
-rw-r--r--drivers/hwmon/drivetemp.c5
-rw-r--r--drivers/hwmon/emc1403.c46
-rw-r--r--drivers/hwmon/emc2103.c4
-rw-r--r--drivers/hwmon/emc2305.c8
-rw-r--r--drivers/hwmon/ftsteutates.c84
-rw-r--r--drivers/hwmon/gpd-fan.c683
-rw-r--r--drivers/hwmon/hs3001.c10
-rw-r--r--drivers/hwmon/hwmon.c56
-rw-r--r--drivers/hwmon/i5500_temp.c3
-rw-r--r--drivers/hwmon/ina238.c597
-rw-r--r--drivers/hwmon/ina2xx.c28
-rw-r--r--drivers/hwmon/ina3221.c19
-rw-r--r--drivers/hwmon/jc42.c11
-rw-r--r--drivers/hwmon/k10temp.c22
-rw-r--r--drivers/hwmon/lenovo-ec-sensors.c34
-rw-r--r--drivers/hwmon/lm75.c21
-rw-r--r--drivers/hwmon/lm78.c5
-rw-r--r--drivers/hwmon/lm87.c16
-rw-r--r--drivers/hwmon/lm90.c25
-rw-r--r--drivers/hwmon/lm92.c11
-rw-r--r--drivers/hwmon/lm95234.c12
-rw-r--r--drivers/hwmon/lm95241.c16
-rw-r--r--drivers/hwmon/lm95245.c16
-rw-r--r--drivers/hwmon/lochnagar-hwmon.c18
-rw-r--r--drivers/hwmon/ltc2947-core.c92
-rw-r--r--drivers/hwmon/ltc4245.c8
-rw-r--r--drivers/hwmon/ltc4282.c71
-rw-r--r--drivers/hwmon/macsmc-hwmon.c851
-rw-r--r--drivers/hwmon/max127.c23
-rw-r--r--drivers/hwmon/max16065.c7
-rw-r--r--drivers/hwmon/max31790.c48
-rw-r--r--drivers/hwmon/max31827.c60
-rw-r--r--drivers/hwmon/max6620.c43
-rw-r--r--drivers/hwmon/max6639.c23
-rw-r--r--drivers/hwmon/max6697.c11
-rw-r--r--drivers/hwmon/mlxreg-fan.c42
-rw-r--r--drivers/hwmon/mr75203.c1
-rw-r--r--drivers/hwmon/nct6694-hwmon.c949
-rw-r--r--drivers/hwmon/nct6775-platform.c4
-rw-r--r--drivers/hwmon/nct7363.c2
-rw-r--r--drivers/hwmon/nct7904.c63
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c11
-rw-r--r--drivers/hwmon/ntc_thermistor.c43
-rw-r--r--drivers/hwmon/nzxt-smart2.c8
-rw-r--r--drivers/hwmon/peci/common.h3
-rw-r--r--drivers/hwmon/peci/cputemp.c90
-rw-r--r--drivers/hwmon/peci/dimmtemp.c36
-rw-r--r--drivers/hwmon/pmbus/Kconfig49
-rw-r--r--drivers/hwmon/pmbus/Makefile5
-rw-r--r--drivers/hwmon/pmbus/adm1275.c11
-rw-r--r--drivers/hwmon/pmbus/isl68137.c23
-rw-r--r--drivers/hwmon/pmbus/max17616.c73
-rw-r--r--drivers/hwmon/pmbus/max34440.c56
-rw-r--r--drivers/hwmon/pmbus/mp2869.c659
-rw-r--r--drivers/hwmon/pmbus/mp2925.c316
-rw-r--r--drivers/hwmon/pmbus/mp29502.c670
-rw-r--r--drivers/hwmon/pmbus/mp5990.c67
-rw-r--r--drivers/hwmon/pmbus/mp9945.c243
-rw-r--r--drivers/hwmon/powr1220.c17
-rw-r--r--drivers/hwmon/pwm-fan.c18
-rw-r--r--drivers/hwmon/sa67mcu-hwmon.c161
-rw-r--r--drivers/hwmon/sbtsi_temp.c63
-rw-r--r--drivers/hwmon/sch56xx-common.c4
-rw-r--r--drivers/hwmon/scmi-hwmon.c9
-rw-r--r--drivers/hwmon/sfctemp.c36
-rw-r--r--drivers/hwmon/sht21.c15
-rw-r--r--drivers/hwmon/sht3x.c27
-rw-r--r--drivers/hwmon/sht4x.c40
-rw-r--r--drivers/hwmon/sy7636a-hwmon.c8
-rw-r--r--drivers/hwmon/tmp102.c24
-rw-r--r--drivers/hwmon/tmp103.c3
-rw-r--r--drivers/hwmon/tmp108.c1
-rw-r--r--drivers/hwmon/tmp401.c8
-rw-r--r--drivers/hwmon/tmp421.c28
-rw-r--r--drivers/hwmon/tmp464.c13
-rw-r--r--drivers/hwmon/tsc1641.c748
-rw-r--r--drivers/hwmon/vt1211.c53
-rw-r--r--drivers/hwmon/vt8231.c18
-rw-r--r--drivers/hwmon/w83781d.c5
-rw-r--r--drivers/hwmon/w83791d.c17
-rw-r--r--drivers/hwmon/w83l786ng.c26
-rw-r--r--drivers/hwtracing/coresight/Kconfig12
-rw-r--r--drivers/hwtracing/coresight/Makefile1
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c63
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c84
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c41
-rw-r--r--drivers/hwtracing/coresight/coresight-ctcu-core.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c5
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-dummy.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c76
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c184
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h11
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c66
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c63
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c73
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c70
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h5
-rw-r--r--drivers/hwtracing/coresight/coresight-tnoc.c246
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c174
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h12
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c38
-rw-r--r--drivers/hwtracing/coresight/coresight-trbe.c25
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c9
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h1
-rw-r--r--drivers/hwtracing/intel_th/core.c22
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c105
-rw-r--r--drivers/i2c/busses/Kconfig27
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2.h1
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c1
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c1
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c12
-rw-r--r--drivers/i2c/busses/i2c-cadence.c1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c17
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c24
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c9
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c7
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c3
-rw-r--r--drivers/i2c/busses/i2c-imx-lpi2c.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c3
-rw-r--r--drivers/i2c/busses/i2c-k1.c90
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c28
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c1
-rw-r--r--drivers/i2c/busses/i2c-nct6694.c196
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c1
-rw-r--r--drivers/i2c/busses/i2c-omap.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c48
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c257
-rw-r--r--drivers/i2c/busses/i2c-qup.c3
-rw-r--r--drivers/i2c/busses/i2c-riic.c4
-rw-r--r--drivers/i2c/busses/i2c-rtl9300.c456
-rw-r--r--drivers/i2c/busses/i2c-rzv2m.c1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/i2c/busses/i2c-sprd.c4
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/i2c/busses/i2c-stm32.c7
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c5
-rw-r--r--drivers/i2c/busses/i2c-tegra.c26
-rw-r--r--drivers/i2c/busses/i2c-usbio.c321
-rw-r--r--drivers/i2c/busses/i2c-viperboard.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c1
-rw-r--r--drivers/i2c/i2c-core-base.c9
-rw-r--r--drivers/i2c/i2c-core-slave.c3
-rw-r--r--drivers/i2c/i2c-mux.c9
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c12
-rw-r--r--drivers/i3c/device.c27
-rw-r--r--drivers/i3c/internals.h18
-rw-r--r--drivers/i3c/master.c109
-rw-r--r--drivers/i3c/master/Kconfig11
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/adi-i3c-master.c1019
-rw-r--r--drivers/i3c/master/dw-i3c-master.c54
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c9
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c7
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c74
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c96
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c11
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h6
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c226
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c75
-rw-r--r--drivers/i3c/master/renesas-i3c.c2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c166
-rw-r--r--drivers/idle/intel_idle.c256
-rw-r--r--drivers/iio/accel/Kconfig19
-rw-r--r--drivers/iio/accel/Makefile4
-rw-r--r--drivers/iio/accel/adxl345_core.c782
-rw-r--r--drivers/iio/accel/adxl355_core.c44
-rw-r--r--drivers/iio/accel/adxl380.c134
-rw-r--r--drivers/iio/accel/adxl380.h4
-rw-r--r--drivers/iio/accel/adxl380_i2c.c4
-rw-r--r--drivers/iio/accel/adxl380_spi.c4
-rw-r--r--drivers/iio/accel/bma180.c13
-rw-r--r--drivers/iio/accel/bma220.h28
-rw-r--r--drivers/iio/accel/bma220_core.c585
-rw-r--r--drivers/iio/accel/bma220_i2c.c69
-rw-r--r--drivers/iio/accel/bma220_spi.c320
-rw-r--r--drivers/iio/accel/bma400.h155
-rw-r--r--drivers/iio/accel/bma400_core.c349
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c12
-rw-r--r--drivers/iio/accel/bmc150-accel.h1
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c3
-rw-r--r--drivers/iio/accel/dmard06.c4
-rw-r--r--drivers/iio/accel/dmard09.c4
-rw-r--r--drivers/iio/accel/dmard10.c4
-rw-r--r--drivers/iio/accel/fxls8962af-core.c1
-rw-r--r--drivers/iio/accel/kxcjk-1013.c4
-rw-r--r--drivers/iio/accel/kxsd9.c3
-rw-r--r--drivers/iio/accel/mc3230.c4
-rw-r--r--drivers/iio/accel/mma7660.c4
-rw-r--r--drivers/iio/accel/mma8452.c7
-rw-r--r--drivers/iio/accel/mma9551_core.c5
-rw-r--r--drivers/iio/accel/msa311.c16
-rw-r--r--drivers/iio/accel/stk8312.c4
-rw-r--r--drivers/iio/accel/stk8ba50.c4
-rw-r--r--drivers/iio/adc/88pm886-gpadc.c393
-rw-r--r--drivers/iio/adc/Kconfig85
-rw-r--r--drivers/iio/adc/Makefile7
-rw-r--r--drivers/iio/adc/ab8500-gpadc.c1
-rw-r--r--drivers/iio/adc/ad4030.c6
-rw-r--r--drivers/iio/adc/ad4080.c126
-rw-r--r--drivers/iio/adc/ad4130.c3
-rw-r--r--drivers/iio/adc/ad7124.c848
-rw-r--r--drivers/iio/adc/ad7173.c221
-rw-r--r--drivers/iio/adc/ad7280a.c2
-rw-r--r--drivers/iio/adc/ad7380.c8
-rw-r--r--drivers/iio/adc/ad7476.c461
-rw-r--r--drivers/iio/adc/ad7768-1.c39
-rw-r--r--drivers/iio/adc/ad7779.c192
-rw-r--r--drivers/iio/adc/ad7949.c4
-rw-r--r--drivers/iio/adc/ad799x.c30
-rw-r--r--drivers/iio/adc/ade9000.c1799
-rw-r--r--drivers/iio/adc/adi-axi-adc.c1
-rw-r--r--drivers/iio/adc/aspeed_adc.c34
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c13
-rw-r--r--drivers/iio/adc/bcm_iproc_adc.c4
-rw-r--r--drivers/iio/adc/cpcap-adc.c6
-rw-r--r--drivers/iio/adc/da9150-gpadc.c5
-rw-r--r--drivers/iio/adc/dln2-adc.c9
-rw-r--r--drivers/iio/adc/exynos_adc.c286
-rw-r--r--drivers/iio/adc/hx711.c2
-rw-r--r--drivers/iio/adc/imx7d_adc.c4
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c6
-rw-r--r--drivers/iio/adc/imx93_adc.c26
-rw-r--r--drivers/iio/adc/intel_dc_ti_adc.c328
-rw-r--r--drivers/iio/adc/max14001.c391
-rw-r--r--drivers/iio/adc/mcp3564.c4
-rw-r--r--drivers/iio/adc/meson_saradc.c8
-rw-r--r--drivers/iio/adc/mt6360-adc.c2
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c3
-rw-r--r--drivers/iio/adc/mxs-lradc-adc.c4
-rw-r--r--drivers/iio/adc/pac1921.c11
-rw-r--r--drivers/iio/adc/pac1934.c33
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/adc/qcom-spmi-rradc.c2
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c8
-rw-r--r--drivers/iio/adc/rn5t618-adc.c4
-rw-r--r--drivers/iio/adc/rockchip_saradc.c6
-rw-r--r--drivers/iio/adc/rohm-bd79112.c551
-rw-r--r--drivers/iio/adc/rohm-bd79124.c39
-rw-r--r--drivers/iio/adc/rtq6056.c2
-rw-r--r--drivers/iio/adc/rzg2l_adc.c2
-rw-r--r--drivers/iio/adc/rzn1-adc.c490
-rw-r--r--drivers/iio/adc/rzt2h_adc.c304
-rw-r--r--drivers/iio/adc/spear_adc.c12
-rw-r--r--drivers/iio/adc/stm32-adc-core.c1
-rw-r--r--drivers/iio/adc/stm32-adc.c7
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c9
-rw-r--r--drivers/iio/adc/stmpe-adc.c4
-rw-r--r--drivers/iio/adc/sun4i-gpadc-iio.c3
-rw-r--r--drivers/iio/adc/ti-adc081c.c40
-rw-r--r--drivers/iio/adc/ti-adc084s021.c4
-rw-r--r--drivers/iio/adc/ti-adc12138.c30
-rw-r--r--drivers/iio/adc/ti-adc128s052.c132
-rw-r--r--drivers/iio/adc/ti-ads1015.c6
-rw-r--r--drivers/iio/adc/ti-ads1100.c1
-rw-r--r--drivers/iio/adc/ti-ads1119.c11
-rw-r--r--drivers/iio/adc/ti-ads131e08.c10
-rw-r--r--drivers/iio/adc/ti-ads7924.c9
-rw-r--r--drivers/iio/adc/ti-tsc2046.c6
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c7
-rw-r--r--drivers/iio/adc/twl4030-madc.c4
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/adc/viperboard_adc.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c47
-rw-r--r--drivers/iio/buffer/industrialio-buffer-cb.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c6
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c2
-rw-r--r--drivers/iio/chemical/atlas-sensor.c2
-rw-r--r--drivers/iio/chemical/bme680_core.c3
-rw-r--r--drivers/iio/chemical/ens160_core.c3
-rw-r--r--drivers/iio/chemical/scd30_core.c2
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c1
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c15
-rw-r--r--drivers/iio/common/ssp_sensors/ssp_dev.c4
-rw-r--r--drivers/iio/dac/Kconfig31
-rw-r--r--drivers/iio/dac/Makefile2
-rw-r--r--drivers/iio/dac/ad3530r.c3
-rw-r--r--drivers/iio/dac/ad5360.c2
-rw-r--r--drivers/iio/dac/ad5380.c4
-rw-r--r--drivers/iio/dac/ad5421.c2
-rw-r--r--drivers/iio/dac/ad5446-i2c.c102
-rw-r--r--drivers/iio/dac/ad5446-spi.c252
-rw-r--r--drivers/iio/dac/ad5446.c506
-rw-r--r--drivers/iio/dac/ad5446.h77
-rw-r--r--drivers/iio/dac/ad5764.c4
-rw-r--r--drivers/iio/dac/ad5791.c4
-rw-r--r--drivers/iio/dac/ds4424.c4
-rw-r--r--drivers/iio/dac/ltc2688.c32
-rw-r--r--drivers/iio/dac/stm32-dac.c19
-rw-r--r--drivers/iio/dac/ti-dac7311.c4
-rw-r--r--drivers/iio/frequency/adf4350.c23
-rw-r--r--drivers/iio/gyro/bmg160_core.c4
-rw-r--r--drivers/iio/gyro/fxas21002c_core.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c3
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c1
-rw-r--r--drivers/iio/health/afe4403.c48
-rw-r--r--drivers/iio/health/afe4404.c48
-rw-r--r--drivers/iio/health/max30100.c38
-rw-r--r--drivers/iio/humidity/am2315.c4
-rw-r--r--drivers/iio/humidity/dht11.c4
-rw-r--r--drivers/iio/humidity/hdc3020.c73
-rw-r--r--drivers/iio/imu/Kconfig2
-rw-r--r--drivers/iio/imu/Makefile2
-rw-r--r--drivers/iio/imu/adis16475.c1
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c383
-rw-r--r--drivers/iio/imu/bmi270/bmi270_i2c.c2
-rw-r--r--drivers/iio/imu/bmi270/bmi270_spi.c2
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c3
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600.h1
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c29
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c65
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_core.c117
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c29
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c1
-rw-r--r--drivers/iio/imu/inv_icm45600/Kconfig70
-rw-r--r--drivers/iio/imu/inv_icm45600/Makefile16
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600.h385
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c782
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c558
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.h101
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_core.c988
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c791
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_i2c.c98
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_i3c.c79
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_spi.c108
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c6
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c1
-rw-r--r--drivers/iio/imu/kmx61.c6
-rw-r--r--drivers/iio/imu/smi330/Kconfig33
-rw-r--r--drivers/iio/imu/smi330/Makefile7
-rw-r--r--drivers/iio/imu/smi330/smi330.h25
-rw-r--r--drivers/iio/imu/smi330/smi330_core.c918
-rw-r--r--drivers/iio/imu/smi330/smi330_i2c.c133
-rw-r--r--drivers/iio/imu/smi330/smi330_spi.c85
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h44
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c71
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c40
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c2
-rw-r--r--drivers/iio/industrialio-backend.c8
-rw-r--r--drivers/iio/industrialio-buffer.c33
-rw-r--r--drivers/iio/industrialio-core.c29
-rw-r--r--drivers/iio/inkern.c81
-rw-r--r--drivers/iio/light/Kconfig13
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/acpi-als.c19
-rw-r--r--drivers/iio/light/adjd_s311.c12
-rw-r--r--drivers/iio/light/al3000a.c2
-rw-r--r--drivers/iio/light/apds9306.c6
-rw-r--r--drivers/iio/light/apds9960.c3
-rw-r--r--drivers/iio/light/bh1745.c7
-rw-r--r--drivers/iio/light/bh1780.c1
-rw-r--r--drivers/iio/light/gp2ap002.c2
-rw-r--r--drivers/iio/light/hid-sensor-als.c5
-rw-r--r--drivers/iio/light/isl29028.c11
-rw-r--r--drivers/iio/light/isl29125.c14
-rw-r--r--drivers/iio/light/ltr390.c205
-rw-r--r--drivers/iio/light/ltr501.c4
-rw-r--r--drivers/iio/light/ltrf216a.c1
-rw-r--r--drivers/iio/light/max44000.c18
-rw-r--r--drivers/iio/light/opt4001.c3
-rw-r--r--drivers/iio/light/opt4060.c7
-rw-r--r--drivers/iio/light/pa12203001.c11
-rw-r--r--drivers/iio/light/rohm-bu27034.c3
-rw-r--r--drivers/iio/light/rpr0521.c10
-rw-r--r--drivers/iio/light/si1145.c5
-rw-r--r--drivers/iio/light/st_uvis25.h5
-rw-r--r--drivers/iio/light/st_uvis25_core.c12
-rw-r--r--drivers/iio/light/stk3310.c4
-rw-r--r--drivers/iio/light/tcs3414.c15
-rw-r--r--drivers/iio/light/tcs3472.c14
-rw-r--r--drivers/iio/light/tsl2583.c12
-rw-r--r--drivers/iio/light/tsl2591.c2
-rw-r--r--drivers/iio/light/us5182d.c12
-rw-r--r--drivers/iio/light/vcnl4000.c22
-rw-r--r--drivers/iio/light/vcnl4035.c11
-rw-r--r--drivers/iio/light/veml3235.c2
-rw-r--r--drivers/iio/light/veml6030.c2
-rw-r--r--drivers/iio/light/veml6040.c3
-rw-r--r--drivers/iio/light/veml6046x00.c1030
-rw-r--r--drivers/iio/light/vl6180.c16
-rw-r--r--drivers/iio/magnetometer/Kconfig15
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/ak8974.c2
-rw-r--r--drivers/iio/magnetometer/ak8975.c1
-rw-r--r--drivers/iio/magnetometer/als31300.c5
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c13
-rw-r--r--drivers/iio/magnetometer/tlv493d.c526
-rw-r--r--drivers/iio/magnetometer/tmag5273.c5
-rw-r--r--drivers/iio/magnetometer/yamaha-yas530.c2
-rw-r--r--drivers/iio/position/hid-sensor-custom-intel-hinge.c2
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/Kconfig12
-rw-r--r--drivers/iio/pressure/Makefile8
-rw-r--r--drivers/iio/pressure/adp810.c225
-rw-r--r--drivers/iio/pressure/bmp280-core.c23
-rw-r--r--drivers/iio/pressure/dlhl60d.c4
-rw-r--r--drivers/iio/pressure/icp10100.c1
-rw-r--r--drivers/iio/pressure/mpl115.c2
-rw-r--r--drivers/iio/pressure/mpl3115.c549
-rw-r--r--drivers/iio/pressure/zpa2326.c2
-rw-r--r--drivers/iio/proximity/d3323aa.c3
-rw-r--r--drivers/iio/proximity/hx9023s.c3
-rw-r--r--drivers/iio/proximity/irsd200.c6
-rw-r--r--drivers/iio/proximity/mb1232.c15
-rw-r--r--drivers/iio/proximity/ping.c4
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c16
-rw-r--r--drivers/iio/proximity/srf04.c8
-rw-r--r--drivers/iio/proximity/srf08.c18
-rw-r--r--drivers/iio/proximity/sx9500.c27
-rw-r--r--drivers/iio/proximity/vl53l0x-i2c.c27
-rw-r--r--drivers/iio/resolver/ad2s1210.c30
-rw-r--r--drivers/iio/temperature/Kconfig8
-rw-r--r--drivers/iio/temperature/mcp9600.c151
-rw-r--r--drivers/iio/temperature/mlx90614.c6
-rw-r--r--drivers/iio/temperature/mlx90632.c5
-rw-r--r--drivers/iio/temperature/mlx90635.c9
-rw-r--r--drivers/iio/test/Kconfig12
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/iio/test/iio-test-multiply.c212
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/addr.c83
-rw-r--r--drivers/infiniband/core/agent.c3
-rw-r--r--drivers/infiniband/core/cm.c13
-rw-r--r--drivers/infiniband/core/cma.c138
-rw-r--r--drivers/infiniband/core/cma_priv.h4
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/core/restrack.c4
-rw-r--r--drivers/infiniband/core/sa_query.c283
-rw-r--r--drivers/infiniband/core/ucma.c122
-rw-r--r--drivers/infiniband/core/umem.c8
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c1
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/Makefile2
-rw-r--r--drivers/infiniband/hw/bng_re/Kconfig10
-rw-r--r--drivers/infiniband/hw/bng_re/Makefile8
-rw-r--r--drivers/infiniband/hw/bng_re/bng_debugfs.c39
-rw-r--r--drivers/infiniband/hw/bng_re/bng_debugfs.h12
-rw-r--r--drivers/infiniband/hw/bng_re/bng_dev.c534
-rw-r--r--drivers/infiniband/hw/bng_re/bng_fw.c767
-rw-r--r--drivers/infiniband/hw/bng_re/bng_fw.h211
-rw-r--r--drivers/infiniband/hw/bng_re/bng_re.h85
-rw-r--r--drivers/infiniband/hw/bng_re/bng_res.c279
-rw-r--r--drivers/infiniband/hw/bng_re/bng_res.h215
-rw-r--r--drivers/infiniband/hw/bng_re/bng_sp.c131
-rw-r--r--drivers/infiniband/hw/bng_re/bng_sp.h47
-rw-r--r--drivers/infiniband/hw/bng_re/bng_tlv.h128
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.c165
-rw-r--r--drivers/infiniband/hw/bnxt_re/debugfs.h19
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c109
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h26
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c173
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c379
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c38
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c106
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h8
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h44
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c18
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c22
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c6
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c110
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.h4
-rw-r--r--drivers/infiniband/hw/hfi1/device.c4
-rw-r--r--drivers/infiniband/hw/hfi1/init.c4
-rw-r--r--drivers/infiniband/hw/hfi1/opfn.c4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c4
-rw-r--r--drivers/infiniband/hw/hns/Makefile4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_bond.c1012
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_bond.h95
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c58
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c153
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h20
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c189
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_srq.c1
-rw-r--r--drivers/infiniband/hw/ionic/Kconfig15
-rw-r--r--drivers/infiniband/hw/ionic/Makefile9
-rw-r--r--drivers/infiniband/hw/ionic/ionic_admin.c1229
-rw-r--r--drivers/infiniband/hw/ionic/ionic_controlpath.c2679
-rw-r--r--drivers/infiniband/hw/ionic/ionic_datapath.c1399
-rw-r--r--drivers/infiniband/hw/ionic/ionic_fw.h1029
-rw-r--r--drivers/infiniband/hw/ionic/ionic_hw_stats.c484
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.c440
-rw-r--r--drivers/infiniband/hw/ionic/ionic_ibdev.h517
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.c111
-rw-r--r--drivers/infiniband/hw/ionic/ionic_lif_cfg.h66
-rw-r--r--drivers/infiniband/hw/ionic/ionic_pgtbl.c143
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.c52
-rw-r--r--drivers/infiniband/hw/ionic/ionic_queue.h234
-rw-r--r--drivers/infiniband/hw/ionic/ionic_res.h154
-rw-r--r--drivers/infiniband/hw/irdma/Kconfig7
-rw-r--r--drivers/infiniband/hw/irdma/Makefile4
-rw-r--r--drivers/infiniband/hw/irdma/cm.c2
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c1535
-rw-r--r--drivers/infiniband/hw/irdma/defs.h264
-rw-r--r--drivers/infiniband/hw/irdma/hmc.c18
-rw-r--r--drivers/infiniband/hw/irdma/hmc.h19
-rw-r--r--drivers/infiniband/hw/irdma/hw.c366
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_hw.h2
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_hw.h5
-rw-r--r--drivers/infiniband/hw/irdma/icrdma_if.c347
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.c170
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_hw.h32
-rw-r--r--drivers/infiniband/hw/irdma/ig3rdma_if.c236
-rw-r--r--drivers/infiniband/hw/irdma/irdma.h22
-rw-r--r--drivers/infiniband/hw/irdma/main.c371
-rw-r--r--drivers/infiniband/hw/irdma/main.h38
-rw-r--r--drivers/infiniband/hw/irdma/pble.c28
-rw-r--r--drivers/infiniband/hw/irdma/protos.h1
-rw-r--r--drivers/infiniband/hw/irdma/puda.c20
-rw-r--r--drivers/infiniband/hw/irdma/puda.h4
-rw-r--r--drivers/infiniband/hw/irdma/type.h228
-rw-r--r--drivers/infiniband/hw/irdma/uda_d.h5
-rw-r--r--drivers/infiniband/hw/irdma/uk.c370
-rw-r--r--drivers/infiniband/hw/irdma/user.h271
-rw-r--r--drivers/infiniband/hw/irdma/utils.c162
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c880
-rw-r--r--drivers/infiniband/hw/irdma/verbs.h55
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.c618
-rw-r--r--drivers/infiniband/hw/irdma/virtchnl.h176
-rw-r--r--drivers/infiniband/hw/mana/cq.c26
-rw-r--r--drivers/infiniband/hw/mana/device.c3
-rw-r--r--drivers/infiniband/hw/mana/main.c5
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h14
-rw-r--r--drivers/infiniband/hw/mana/mr.c6
-rw-r--r--drivers/infiniband/hw/mana/qp.c9
-rw-r--r--drivers/infiniband/hw/mlx4/cm.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c15
-rw-r--r--drivers/infiniband/hw/mlx5/data_direct.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c15
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c65
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c15
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c74
-rw-r--r--drivers/infiniband/hw/mlx5/main.c119
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c11
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c93
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c5
-rw-r--r--drivers/infiniband/hw/mlx5/std_types.c27
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h4
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c3
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_odp.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c59
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c50
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c16
-rw-r--r--drivers/input/ff-core.c2
-rw-r--r--drivers/input/ff-memless.c1
-rw-r--r--drivers/input/gameport/gameport.c1
-rw-r--r--drivers/input/input-compat.c30
-rw-r--r--drivers/input/input-compat.h3
-rw-r--r--drivers/input/input-mt.c14
-rw-r--r--drivers/input/input-poller.c1
-rw-r--r--drivers/input/input.c36
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-packets.c1
-rw-r--r--drivers/input/joystick/psxpad-spi.c6
-rw-r--r--drivers/input/keyboard/Kconfig30
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c12
-rw-r--r--drivers/input/keyboard/imx_sc_key.c2
-rw-r--r--drivers/input/keyboard/max7360-keypad.c308
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c530
-rw-r--r--drivers/input/keyboard/spear-keyboard.c71
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c305
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c13
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c35
-rw-r--r--drivers/input/misc/Kconfig42
-rw-r--r--drivers/input/misc/Makefile4
-rw-r--r--drivers/input/misc/ad714x.c1
-rw-r--r--drivers/input/misc/adxl34x.c1
-rw-r--r--drivers/input/misc/arizona-haptics.c14
-rw-r--r--drivers/input/misc/aw86927.c846
-rw-r--r--drivers/input/misc/cma3000_d0x.c1
-rw-r--r--drivers/input/misc/max7360-rotary.c192
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c1
-rw-r--r--drivers/input/misc/pf1550-onkey.c197
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c12
-rw-r--r--drivers/input/misc/tps6594-pwrbutton.c126
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.h3
-rw-r--r--drivers/input/rmi4/rmi_bus.c1
-rw-r--r--drivers/input/rmi4/rmi_driver.c1
-rw-r--r--drivers/input/serio/Kconfig4
-rw-r--r--drivers/input/serio/hil_mlc.c1
-rw-r--r--drivers/input/serio/hp_sdc.c1
-rw-r--r--drivers/input/serio/i8042.c1
-rw-r--r--drivers/input/serio/libps2.c1
-rw-r--r--drivers/input/serio/ps2-gpio.c2
-rw-r--r--drivers/input/serio/serio.c1
-rw-r--r--drivers/input/sparse-keymap.c1
-rw-r--r--drivers/input/tablet/pegasus_notetaker.c9
-rw-r--r--drivers/input/touch-overlay.c1
-rw-r--r--drivers/input/touchscreen.c1
-rw-r--r--drivers/input/touchscreen/Kconfig22
-rw-r--r--drivers/input/touchscreen/Makefile2
-rw-r--r--drivers/input/touchscreen/ad7879.c1
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c13
-rw-r--r--drivers/input/touchscreen/cyttsp_core.c1
-rw-r--r--drivers/input/touchscreen/fsl-imx25-tcq.c1
-rw-r--r--drivers/input/touchscreen/goodix.c28
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/input/touchscreen/goodix_berlin_core.c1
-rw-r--r--drivers/input/touchscreen/himax_hx852x.c503
-rw-r--r--drivers/input/touchscreen/hynitron-cst816x.c253
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c121
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c4
-rw-r--r--drivers/input/touchscreen/tsc2007_core.c39
-rw-r--r--drivers/input/touchscreen/tsc200x-core.c1
-rw-r--r--drivers/input/touchscreen/wm9705.c1
-rw-r--r--drivers/input/touchscreen/wm9712.c1
-rw-r--r--drivers/input/touchscreen/wm9713.c1
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c1
-rw-r--r--drivers/interconnect/core.c2
-rw-r--r--drivers/interconnect/debugfs-client.c7
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/glymur.c2522
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c39
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h9
-rw-r--r--drivers/interconnect/qcom/kaanapali.c1855
-rw-r--r--drivers/interconnect/qcom/milos.c142
-rw-r--r--drivers/interconnect/qcom/msm8996.c1
-rw-r--r--drivers/interconnect/qcom/qcs615.c511
-rw-r--r--drivers/interconnect/qcom/qcs615.h128
-rw-r--r--drivers/interconnect/qcom/qcs8300.c671
-rw-r--r--drivers/interconnect/qcom/qcs8300.h177
-rw-r--r--drivers/interconnect/qcom/qdu1000.c348
-rw-r--r--drivers/interconnect/qcom/qdu1000.h95
-rw-r--r--drivers/interconnect/qcom/sa8775p.c639
-rw-r--r--drivers/interconnect/qcom/sar2130p.c630
-rw-r--r--drivers/interconnect/qcom/sc7180.c678
-rw-r--r--drivers/interconnect/qcom/sc7180.h149
-rw-r--r--drivers/interconnect/qcom/sc7280.c617
-rw-r--r--drivers/interconnect/qcom/sc7280.h154
-rw-r--r--drivers/interconnect/qcom/sc8180x.c648
-rw-r--r--drivers/interconnect/qcom/sc8180x.h179
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c825
-rw-r--r--drivers/interconnect/qcom/sc8280xp.h209
-rw-r--r--drivers/interconnect/qcom/sdm670.c522
-rw-r--r--drivers/interconnect/qcom/sdm670.h128
-rw-r--r--drivers/interconnect/qcom/sdm845.c766
-rw-r--r--drivers/interconnect/qcom/sdm845.h140
-rw-r--r--drivers/interconnect/qcom/sdx55.c489
-rw-r--r--drivers/interconnect/qcom/sdx55.h70
-rw-r--r--drivers/interconnect/qcom/sdx65.c457
-rw-r--r--drivers/interconnect/qcom/sdx65.h65
-rw-r--r--drivers/interconnect/qcom/sdx75.c395
-rw-r--r--drivers/interconnect/qcom/sdx75.h97
-rw-r--r--drivers/interconnect/qcom/sm6350.c927
-rw-r--r--drivers/interconnect/qcom/sm6350.h139
-rw-r--r--drivers/interconnect/qcom/sm7150.c653
-rw-r--r--drivers/interconnect/qcom/sm7150.h140
-rw-r--r--drivers/interconnect/qcom/sm8150.c706
-rw-r--r--drivers/interconnect/qcom/sm8150.h152
-rw-r--r--drivers/interconnect/qcom/sm8250.c736
-rw-r--r--drivers/interconnect/qcom/sm8250.h168
-rw-r--r--drivers/interconnect/qcom/sm8350.c684
-rw-r--r--drivers/interconnect/qcom/sm8350.h158
-rw-r--r--drivers/interconnect/qcom/sm8450.c601
-rw-r--r--drivers/interconnect/qcom/sm8450.h169
-rw-r--r--drivers/interconnect/qcom/sm8550.c501
-rw-r--r--drivers/interconnect/qcom/sm8550.h138
-rw-r--r--drivers/interconnect/qcom/sm8650.c527
-rw-r--r--drivers/interconnect/qcom/sm8650.h144
-rw-r--r--drivers/interconnect/qcom/sm8750.c602
-rw-r--r--drivers/interconnect/qcom/x1e80100.c610
-rw-r--r--drivers/interconnect/qcom/x1e80100.h192
-rw-r--r--drivers/iommu/Kconfig15
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd/Kconfig5
-rw-r--r--drivers/iommu/amd/Makefile2
-rw-r--r--drivers/iommu/amd/amd_iommu.h1
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h119
-rw-r--r--drivers/iommu/amd/debugfs.c2
-rw-r--r--drivers/iommu/amd/init.c333
-rw-r--r--drivers/iommu/amd/io_pgtable.c560
-rw-r--r--drivers/iommu/amd/io_pgtable_v2.c370
-rw-r--r--drivers/iommu/amd/iommu.c577
-rw-r--r--drivers/iommu/apple-dart.c66
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c18
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c33
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c28
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu.c9
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c21
-rw-r--r--drivers/iommu/dma-iommu.c70
-rw-r--r--drivers/iommu/exynos-iommu.c20
-rw-r--r--drivers/iommu/fsl_pamu_domain.c12
-rw-r--r--drivers/iommu/generic_pt/.kunitconfig14
-rw-r--r--drivers/iommu/generic_pt/Kconfig79
-rw-r--r--drivers/iommu/generic_pt/fmt/Makefile28
-rw-r--r--drivers/iommu/generic_pt/fmt/amdv1.h411
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_amdv1.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_vtdss.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/defs_x86_64.h21
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_amdv1.c15
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_mock.c10
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_template.h48
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_vtdss.c10
-rw-r--r--drivers/iommu/generic_pt/fmt/iommu_x86_64.c11
-rw-r--r--drivers/iommu/generic_pt/fmt/vtdss.h285
-rw-r--r--drivers/iommu/generic_pt/fmt/x86_64.h279
-rw-r--r--drivers/iommu/generic_pt/iommu_pt.h1289
-rw-r--r--drivers/iommu/generic_pt/kunit_generic_pt.h823
-rw-r--r--drivers/iommu/generic_pt/kunit_iommu.h184
-rw-r--r--drivers/iommu/generic_pt/kunit_iommu_pt.h487
-rw-r--r--drivers/iommu/generic_pt/pt_common.h389
-rw-r--r--drivers/iommu/generic_pt/pt_defs.h332
-rw-r--r--drivers/iommu/generic_pt/pt_fmt_defaults.h295
-rw-r--r--drivers/iommu/generic_pt/pt_iter.h636
-rw-r--r--drivers/iommu/generic_pt/pt_log2.h122
-rw-r--r--drivers/iommu/intel/Kconfig6
-rw-r--r--drivers/iommu/intel/debugfs.c29
-rw-r--r--drivers/iommu/intel/iommu.c940
-rw-r--r--drivers/iommu/intel/iommu.h106
-rw-r--r--drivers/iommu/intel/nested.c7
-rw-r--r--drivers/iommu/intel/pasid.c44
-rw-r--r--drivers/iommu/intel/pasid.h1
-rw-r--r--drivers/iommu/intel/perf.c10
-rw-r--r--drivers/iommu/intel/perf.h5
-rw-r--r--drivers/iommu/intel/prq.c7
-rw-r--r--drivers/iommu/intel/svm.c1
-rw-r--r--drivers/iommu/io-pgtable-arm-selftests.c214
-rw-r--r--drivers/iommu/io-pgtable-arm.c203
-rw-r--r--drivers/iommu/io-pgtable-dart.c139
-rw-r--r--drivers/iommu/io-pgtable.c4
-rw-r--r--drivers/iommu/iommu-pages.c136
-rw-r--r--drivers/iommu/iommu-pages.h51
-rw-r--r--drivers/iommu/iommu-priv.h2
-rw-r--r--drivers/iommu/iommu-sva.c29
-rw-r--r--drivers/iommu/iommu.c70
-rw-r--r--drivers/iommu/iommufd/Kconfig1
-rw-r--r--drivers/iommu/iommufd/device.c3
-rw-r--r--drivers/iommu/iommufd/driver.c2
-rw-r--r--drivers/iommu/iommufd/eventq.c9
-rw-r--r--drivers/iommu/iommufd/io_pagetable.c90
-rw-r--r--drivers/iommu/iommufd/io_pagetable.h54
-rw-r--r--drivers/iommu/iommufd/ioas.c12
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h21
-rw-r--r--drivers/iommu/iommufd/iommufd_test.h21
-rw-r--r--drivers/iommu/iommufd/iova_bitmap.c5
-rw-r--r--drivers/iommu/iommufd/main.c69
-rw-r--r--drivers/iommu/iommufd/pages.c414
-rw-r--r--drivers/iommu/iommufd/selftest.c571
-rw-r--r--drivers/iommu/ipmmu-vmsa.c12
-rw-r--r--drivers/iommu/msm_iommu.c11
-rw-r--r--drivers/iommu/mtk_iommu.c174
-rw-r--r--drivers/iommu/mtk_iommu_v1.c35
-rw-r--r--drivers/iommu/omap-iommu.c21
-rw-r--r--drivers/iommu/omap-iommu.h2
-rw-r--r--drivers/iommu/riscv/iommu-platform.c17
-rw-r--r--drivers/iommu/riscv/iommu.c19
-rw-r--r--drivers/iommu/rockchip-iommu.c20
-rw-r--r--drivers/iommu/s390-iommu.c42
-rw-r--r--drivers/iommu/sprd-iommu.c3
-rw-r--r--drivers/iommu/sun50i-iommu.c10
-rw-r--r--drivers/iommu/tegra-smmu.c15
-rw-r--r--drivers/iommu/virtio-iommu.c6
-rw-r--r--drivers/irqchip/Kconfig17
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/exynos-combiner.c14
-rw-r--r--drivers/irqchip/irq-aclint-sswi.c3
-rw-r--r--drivers/irqchip/irq-apple-aic.c69
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c12
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c256
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c15
-rw-r--r--drivers/irqchip/irq-bcm2712-mip.c11
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c29
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c31
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c25
-rw-r--r--drivers/irqchip/irq-gic-its-msi-parent.c91
-rw-r--r--drivers/irqchip/irq-gic-v2m.c13
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c12
-rw-r--r--drivers/irqchip/irq-gic-v3.c225
-rw-r--r--drivers/irqchip/irq-gic-v5-irs.c2
-rw-r--r--drivers/irqchip/irq-gic-v5-its.c26
-rw-r--r--drivers/irqchip/irq-gic-v5.c7
-rw-r--r--drivers/irqchip/irq-gic.c3
-rw-r--r--drivers/irqchip/irq-i8259.c12
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c16
-rw-r--r--drivers/irqchip/irq-imx-mu-msi.c28
-rw-r--r--drivers/irqchip/irq-loongson-eiointc.c117
-rw-r--r--drivers/irqchip/irq-loongson-htpic.c10
-rw-r--r--drivers/irqchip/irq-loongson-htvec.c12
-rw-r--r--drivers/irqchip/irq-loongson-pch-lpc.c21
-rw-r--r--drivers/irqchip/irq-loongson-pch-pic.c12
-rw-r--r--drivers/irqchip/irq-mchp-eic.c17
-rw-r--r--drivers/irqchip/irq-meson-gpio.c17
-rw-r--r--drivers/irqchip/irq-msi-lib.c14
-rw-r--r--drivers/irqchip/irq-mst-intc.c12
-rw-r--r--drivers/irqchip/irq-mtk-cirq.c12
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c2
-rw-r--r--drivers/irqchip/irq-nvic.c3
-rw-r--r--drivers/irqchip/irq-partition-percpu.c241
-rw-r--r--drivers/irqchip/irq-qcom-mpm.c6
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c3
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c51
-rw-r--r--drivers/irqchip/irq-renesas-rzv2h.c32
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c13
-rw-r--r--drivers/irqchip/irq-riscv-imsic-platform.c4
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.c20
-rw-r--r--drivers/irqchip/irq-riscv-imsic-state.h4
-rw-r--r--drivers/irqchip/irq-riscv-intc.c3
-rw-r--r--drivers/irqchip/irq-riscv-rpmi-sysmsi.c328
-rw-r--r--drivers/irqchip/irq-sa11x0.c12
-rw-r--r--drivers/irqchip/irq-sg2042-msi.c26
-rw-r--r--drivers/irqchip/irq-sifive-plic.c173
-rw-r--r--drivers/irqchip/irq-starfive-jh8100-intc.c6
-rw-r--r--drivers/irqchip/irq-sun6i-r.c18
-rw-r--r--drivers/irqchip/irq-tegra.c12
-rw-r--r--drivers/irqchip/irq-ts4800.c1
-rw-r--r--drivers/irqchip/irq-vic.c12
-rw-r--r--drivers/irqchip/irqchip.c10
-rw-r--r--drivers/irqchip/qcom-irq-combiner.c6
-rw-r--r--drivers/irqchip/qcom-pdc.c5
-rw-r--r--drivers/isdn/capi/kcapi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c18
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/isdn/mISDN/socket.c4
-rw-r--r--drivers/leds/Kconfig10
-rw-r--r--drivers/leds/flash/leds-qcom-flash.c87
-rw-r--r--drivers/leds/flash/leds-rt4505.c2
-rw-r--r--drivers/leds/flash/leds-rt8515.c2
-rw-r--r--drivers/leds/flash/leds-sgm3140.c3
-rw-r--r--drivers/leds/flash/leds-tps6131x.c2
-rw-r--r--drivers/leds/led-class.c23
-rw-r--r--drivers/leds/leds-cros_ec.c5
-rw-r--r--drivers/leds/leds-is31fl319x.c8
-rw-r--r--drivers/leds/leds-is31fl32xx.c47
-rw-r--r--drivers/leds/leds-lp50xx.c67
-rw-r--r--drivers/leds/leds-lp55xx-common.c2
-rw-r--r--drivers/leds/leds-max5970.c2
-rw-r--r--drivers/leds/leds-max77705.c4
-rw-r--r--drivers/leds/leds-netxbig.c36
-rw-r--r--drivers/leds/leds-pwm.c27
-rw-r--r--drivers/leds/leds-qnap-mcu.c175
-rw-r--r--drivers/leds/leds-upboard.c2
-rw-r--r--drivers/leds/rgb/leds-ktd202x.c4
-rw-r--r--drivers/leds/rgb/leds-ncp5623.c2
-rw-r--r--drivers/leds/rgb/leds-qcom-lpg.c10
-rw-r--r--drivers/leds/trigger/ledtrig-cpu.c14
-rw-r--r--drivers/leds/trigger/ledtrig-input-events.c2
-rw-r--r--drivers/macintosh/mac_hid.c3
-rw-r--r--drivers/macintosh/via-pmu-backlight.c2
-rw-r--r--drivers/macintosh/via-pmu.c12
-rw-r--r--drivers/mailbox/Kconfig21
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/arm_mhuv3.c2
-rw-r--r--drivers/mailbox/mailbox-test.c2
-rw-r--r--drivers/mailbox/mailbox-th1520.c4
-rw-r--r--drivers/mailbox/mailbox.c65
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c57
-rw-r--r--drivers/mailbox/mtk-gpueb-mailbox.c319
-rw-r--r--drivers/mailbox/omap-mailbox.c35
-rw-r--r--drivers/mailbox/pcc.c8
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c1
-rw-r--r--drivers/mailbox/riscv-sbi-mpxy-mbox.c1019
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c24
-rw-r--r--drivers/md/Kconfig31
-rw-r--r--drivers/md/Makefile5
-rw-r--r--drivers/md/bcache/alloc.c25
-rw-r--r--drivers/md/bcache/bcache.h6
-rw-r--r--drivers/md/bcache/bset.h8
-rw-r--r--drivers/md/bcache/btree.c53
-rw-r--r--drivers/md/bcache/debug.c3
-rw-r--r--drivers/md/bcache/io.c3
-rw-r--r--drivers/md/bcache/journal.c93
-rw-r--r--drivers/md/bcache/journal.h13
-rw-r--r--drivers/md/bcache/movinggc.c8
-rw-r--r--drivers/md/bcache/super.c35
-rw-r--r--drivers/md/bcache/sysfs.c15
-rw-r--r--drivers/md/bcache/writeback.c13
-rw-r--r--drivers/md/dm-bufio.c12
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-core.h2
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-ima.c70
-rw-r--r--drivers/md/dm-integrity.c361
-rw-r--r--drivers/md/dm-log-writes.c2
-rw-r--r--drivers/md/dm-pcache/Kconfig17
-rw-r--r--drivers/md/dm-pcache/Makefile3
-rw-r--r--drivers/md/dm-pcache/backing_dev.c374
-rw-r--r--drivers/md/dm-pcache/backing_dev.h127
-rw-r--r--drivers/md/dm-pcache/cache.c445
-rw-r--r--drivers/md/dm-pcache/cache.h635
-rw-r--r--drivers/md/dm-pcache/cache_dev.c303
-rw-r--r--drivers/md/dm-pcache/cache_dev.h70
-rw-r--r--drivers/md/dm-pcache/cache_gc.c170
-rw-r--r--drivers/md/dm-pcache/cache_key.c888
-rw-r--r--drivers/md/dm-pcache/cache_req.c836
-rw-r--r--drivers/md/dm-pcache/cache_segment.c305
-rw-r--r--drivers/md/dm-pcache/cache_writeback.c261
-rw-r--r--drivers/md/dm-pcache/dm_pcache.c497
-rw-r--r--drivers/md/dm-pcache/dm_pcache.h67
-rw-r--r--drivers/md/dm-pcache/pcache_internal.h117
-rw-r--r--drivers/md/dm-pcache/segment.c61
-rw-r--r--drivers/md/dm-pcache/segment.h74
-rw-r--r--drivers/md/dm-raid.c37
-rw-r--r--drivers/md/dm-region-hash.c2
-rw-r--r--drivers/md/dm-stripe.c10
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-target.c3
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/dm-vdo/data-vio.c17
-rw-r--r--drivers/md/dm-vdo/indexer/volume-index.c4
-rw-r--r--drivers/md/dm-vdo/logger.c2
-rw-r--r--drivers/md/dm-vdo/vio.c2
-rw-r--r--drivers/md/dm-verity-fec.c6
-rw-r--r--drivers/md/dm-zone.c63
-rw-r--r--drivers/md/dm.c49
-rw-r--r--drivers/md/dm.h3
-rw-r--r--drivers/md/md-bitmap.c89
-rw-r--r--drivers/md/md-bitmap.h107
-rw-r--r--drivers/md/md-cluster.c6
-rw-r--r--drivers/md/md-linear.c17
-rw-r--r--drivers/md/md-llbitmap.c1626
-rw-r--r--drivers/md/md.c641
-rw-r--r--drivers/md/md.h34
-rw-r--r--drivers/md/raid0.c51
-rw-r--r--drivers/md/raid1-10.c2
-rw-r--r--drivers/md/raid1.c121
-rw-r--r--drivers/md/raid1.h4
-rw-r--r--drivers/md/raid10.c109
-rw-r--r--drivers/md/raid10.h2
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c82
-rw-r--r--drivers/media/cec/core/cec-core.c3
-rw-r--r--drivers/media/cec/platform/cec-gpio/cec-gpio.c2
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c1
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile6
-rw-r--r--drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c6
-rw-r--r--drivers/media/cec/usb/pulse8/pulse8-cec.c4
-rw-r--r--drivers/media/cec/usb/rainshadow/rainshadow-cec.c4
-rw-r--r--drivers/media/common/b2c2/flexcop-sram.c2
-rw-r--r--drivers/media/common/b2c2/flexcop.c22
-rw-r--r--drivers/media/common/cx2341x.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_fops.c4
-rw-r--r--drivers/media/common/siano/smsir.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c1
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c17
-rw-r--r--drivers/media/dvb-core/dmxdev.c4
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c2
-rw-r--r--drivers/media/dvb-core/dvb_demux.c28
-rw-r--r--drivers/media/dvb-core/dvb_ringbuffer.c36
-rw-r--r--drivers/media/dvb-core/dvbdev.c4
-rw-r--r--drivers/media/dvb-frontends/Kconfig4
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c3
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c2
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c3
-rw-r--r--drivers/media/dvb-frontends/lgdt330x.c4
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c7
-rw-r--r--drivers/media/i2c/Kconfig60
-rw-r--r--drivers/media/i2c/Makefile6
-rw-r--r--drivers/media/i2c/adv7180.c338
-rw-r--r--drivers/media/i2c/adv7604.c6
-rw-r--r--drivers/media/i2c/adv7842.c17
-rw-r--r--drivers/media/i2c/ar0521.c13
-rw-r--r--drivers/media/i2c/ccs/ccs-core.c8
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c4
-rw-r--r--drivers/media/i2c/ds90ub913.c19
-rw-r--r--drivers/media/i2c/ds90ub953.c14
-rw-r--r--drivers/media/i2c/dw9719.c128
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c34
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_mode.c9
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_reg.h1
-rw-r--r--drivers/media/i2c/gc0310.c (renamed from drivers/staging/media/atomisp/i2c/atomisp-gc0310.c)0
-rw-r--r--drivers/media/i2c/gc05a2.c8
-rw-r--r--drivers/media/i2c/gc08a3.c8
-rw-r--r--drivers/media/i2c/gc2145.c2
-rw-r--r--drivers/media/i2c/hi556.c92
-rw-r--r--drivers/media/i2c/hi846.c11
-rw-r--r--drivers/media/i2c/hi847.c84
-rw-r--r--drivers/media/i2c/imx111.c1610
-rw-r--r--drivers/media/i2c/imx208.c91
-rw-r--r--drivers/media/i2c/imx214.c262
-rw-r--r--drivers/media/i2c/imx219.c105
-rw-r--r--drivers/media/i2c/imx258.c105
-rw-r--r--drivers/media/i2c/imx274.c5
-rw-r--r--drivers/media/i2c/imx283.c5
-rw-r--r--drivers/media/i2c/imx290.c27
-rw-r--r--drivers/media/i2c/imx296.c4
-rw-r--r--drivers/media/i2c/imx319.c92
-rw-r--r--drivers/media/i2c/imx334.c15
-rw-r--r--drivers/media/i2c/imx335.c522
-rw-r--r--drivers/media/i2c/imx355.c90
-rw-r--r--drivers/media/i2c/imx412.c13
-rw-r--r--drivers/media/i2c/imx415.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c6
-rw-r--r--drivers/media/i2c/max9286.c4
-rw-r--r--drivers/media/i2c/max96717.c18
-rw-r--r--drivers/media/i2c/msp3400-kthreads.c2
-rw-r--r--drivers/media/i2c/mt9m001.c5
-rw-r--r--drivers/media/i2c/mt9m111.c9
-rw-r--r--drivers/media/i2c/mt9m114.c75
-rw-r--r--drivers/media/i2c/mt9p031.c9
-rw-r--r--drivers/media/i2c/mt9t112.c11
-rw-r--r--drivers/media/i2c/mt9v032.c105
-rw-r--r--drivers/media/i2c/mt9v111.c21
-rw-r--r--drivers/media/i2c/og01a1b.c115
-rw-r--r--drivers/media/i2c/og0ve1b.c816
-rw-r--r--drivers/media/i2c/ov02a10.c45
-rw-r--r--drivers/media/i2c/ov02c10.c135
-rw-r--r--drivers/media/i2c/ov02e10.c107
-rw-r--r--drivers/media/i2c/ov08d10.c82
-rw-r--r--drivers/media/i2c/ov08x40.c95
-rw-r--r--drivers/media/i2c/ov13858.c69
-rw-r--r--drivers/media/i2c/ov13b10.c111
-rw-r--r--drivers/media/i2c/ov2659.c5
-rw-r--r--drivers/media/i2c/ov2680.c29
-rw-r--r--drivers/media/i2c/ov2685.c16
-rw-r--r--drivers/media/i2c/ov2735.c1109
-rw-r--r--drivers/media/i2c/ov2740.c91
-rw-r--r--drivers/media/i2c/ov4689.c12
-rw-r--r--drivers/media/i2c/ov5640.c9
-rw-r--r--drivers/media/i2c/ov5645.c13
-rw-r--r--drivers/media/i2c/ov5647.c9
-rw-r--r--drivers/media/i2c/ov5648.c10
-rw-r--r--drivers/media/i2c/ov5670.c105
-rw-r--r--drivers/media/i2c/ov5675.c93
-rw-r--r--drivers/media/i2c/ov5693.c20
-rw-r--r--drivers/media/i2c/ov5695.c16
-rw-r--r--drivers/media/i2c/ov6211.c793
-rw-r--r--drivers/media/i2c/ov64a40.c2
-rw-r--r--drivers/media/i2c/ov6650.c1149
-rw-r--r--drivers/media/i2c/ov7251.c26
-rw-r--r--drivers/media/i2c/ov7740.c11
-rw-r--r--drivers/media/i2c/ov8856.c95
-rw-r--r--drivers/media/i2c/ov8858.c2
-rw-r--r--drivers/media/i2c/ov8865.c50
-rw-r--r--drivers/media/i2c/ov9282.c13
-rw-r--r--drivers/media/i2c/ov9640.c5
-rw-r--r--drivers/media/i2c/ov9650.c5
-rw-r--r--drivers/media/i2c/ov9734.c82
-rw-r--r--drivers/media/i2c/rj54n1cb0c.c17
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c19
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/s5k5baf.c21
-rw-r--r--drivers/media/i2c/s5k6a3.c20
-rw-r--r--drivers/media/i2c/saa6752hs.c2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/st-mipid02.c4
-rw-r--r--drivers/media/i2c/tc358743.c113
-rw-r--r--drivers/media/i2c/tc358743_regs.h57
-rw-r--r--drivers/media/i2c/tc358746.c12
-rw-r--r--drivers/media/i2c/tda1997x.c1
-rw-r--r--drivers/media/i2c/tda9840.c2
-rw-r--r--drivers/media/i2c/tea6415c.c2
-rw-r--r--drivers/media/i2c/tea6420.c2
-rw-r--r--drivers/media/i2c/ths7303.c2
-rw-r--r--drivers/media/i2c/tlv320aic23b.c2
-rw-r--r--drivers/media/i2c/upd64031a.c2
-rw-r--r--drivers/media/i2c/upd64083.c2
-rw-r--r--drivers/media/i2c/vd55g1.c238
-rw-r--r--drivers/media/i2c/vd56g3.c2
-rw-r--r--drivers/media/i2c/vgxy61.c26
-rw-r--r--drivers/media/i2c/vp27smpx.c2
-rw-r--r--drivers/media/i2c/wm8739.c2
-rw-r--r--drivers/media/i2c/wm8775.c2
-rw-r--r--drivers/media/mc/mc-devnode.c6
-rw-r--r--drivers/media/mc/mc-entity.c6
-rw-r--r--drivers/media/mc/mc-request.c36
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c14
-rw-r--r--drivers/media/pci/bt8xx/bttv-vbi.c6
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-v4l2.c60
-rw-r--r--drivers/media/pci/cx18/cx18-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-audio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-audio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-core.h2
-rw-r--r--drivers/media/pci/cx18/cx18-av-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-av-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.c2
-rw-r--r--drivers/media/pci/cx18/cx18-cards.h2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.c2
-rw-r--r--drivers/media/pci/cx18/cx18-controls.h2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c11
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h4
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.c13
-rw-r--r--drivers/media/pci/cx18/cx18-fileops.h2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.c2
-rw-r--r--drivers/media/pci/cx18/cx18-firmware.h2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.c2
-rw-r--r--drivers/media/pci/cx18/cx18-gpio.h2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.h2
-rw-r--r--drivers/media/pci/cx18/cx18-io.c2
-rw-r--r--drivers/media/pci/cx18/cx18-io.h2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c90
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.h10
-rw-r--r--drivers/media/pci/cx18/cx18-irq.c2
-rw-r--r--drivers/media/pci/cx18/cx18-irq.h2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.c2
-rw-r--r--drivers/media/pci/cx18/cx18-mailbox.h2
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c15
-rw-r--r--drivers/media/pci/cx18/cx18-queue.h2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.c2
-rw-r--r--drivers/media/pci/cx18/cx18-scb.h2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c2
-rw-r--r--drivers/media/pci/cx18/cx18-streams.h2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.c2
-rw-r--r--drivers/media/pci/cx18/cx18-vbi.h2
-rw-r--r--drivers/media/pci/cx18/cx18-version.h2
-rw-r--r--drivers/media/pci/cx18/cx18-video.c2
-rw-r--r--drivers/media/pci/cx18/cx18-video.h2
-rw-r--r--drivers/media/pci/cx18/cx23418.h2
-rw-r--r--drivers/media/pci/intel/ipu-bridge.c8
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c4
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c10
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c40
-rw-r--r--drivers/media/pci/intel/ipu6/ipu6-isys-video.c15
-rw-r--r--drivers/media/pci/intel/ivsc/mei_ace.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-cards.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-controls.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c28
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.h24
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.c42
-rw-r--r--drivers/media/pci/ivtv/ivtv-fileops.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-firmware.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-gpio.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c142
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.h8
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-irq.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-mailbox.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-routing.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c17
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-vbi.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-version.h2
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c8
-rw-r--r--drivers/media/pci/mgb4/mgb4_trigger.c7
-rw-r--r--drivers/media/pci/mgb4/mgb4_vin.c7
-rw-r--r--drivers/media/pci/mgb4/mgb4_vout.c4
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-video.c4
-rw-r--r--drivers/media/pci/saa7164/saa7164-encoder.c30
-rw-r--r--drivers/media/pci/saa7164/saa7164-vbi.c25
-rw-r--r--drivers/media/pci/saa7164/saa7164.h10
-rw-r--r--drivers/media/pci/tw68/tw68-core.c4
-rw-r--r--drivers/media/pci/tw68/tw68-reg.h2
-rw-r--r--drivers/media/pci/tw68/tw68-risc.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/tw68/tw68.h2
-rw-r--r--drivers/media/pci/zoran/zoran.h6
-rw-r--r--drivers/media/pci/zoran/zoran_card.c4
-rw-r--r--drivers/media/pci/zoran/zoran_card.h2
-rw-r--r--drivers/media/pci/zoran/zoran_driver.c35
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c151
-rw-r--r--drivers/media/platform/amlogic/c3/isp/Kconfig1
-rw-r--r--drivers/media/platform/amlogic/c3/isp/c3-isp-params.c166
-rw-r--r--drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c7
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c30
-rw-r--r--drivers/media/platform/amphion/vdec.c4
-rw-r--r--drivers/media/platform/amphion/venc.c4
-rw-r--r--drivers/media/platform/amphion/vpu.h2
-rw-r--r--drivers/media/platform/amphion/vpu_core.c40
-rw-r--r--drivers/media/platform/amphion/vpu_drv.c26
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c23
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.c38
-rw-r--r--drivers/media/platform/amphion/vpu_v4l2.h18
-rw-r--r--drivers/media/platform/arm/Kconfig5
-rw-r--r--drivers/media/platform/arm/Makefile2
-rw-r--r--drivers/media/platform/arm/mali-c55/Kconfig18
-rw-r--r--drivers/media/platform/arm/mali-c55/Makefile11
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-capture.c959
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-common.h310
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-core.c917
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-isp.c665
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-params.c819
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-registers.h449
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-resizer.c1156
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-stats.c323
-rw-r--r--drivers/media/platform/arm/mali-c55/mali-c55-tpg.c437
-rw-r--r--drivers/media/platform/aspeed/aspeed-video.c199
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c75
-rw-r--r--drivers/media/platform/chips-media/coda/coda-bit.c2
-rw-r--r--drivers/media/platform/chips-media/coda/coda-common.c54
-rw-r--r--drivers/media/platform/chips-media/coda/coda-jpeg.c4
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.c10
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-helper.h2
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-dec.c23
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu-enc.c31
-rw-r--r--drivers/media/platform/chips-media/wave5/wave5-vpu.h5
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.c29
-rw-r--r--drivers/media/platform/imagination/e5010-jpeg-enc.h5
-rw-r--r--drivers/media/platform/m2m-deinterlace.c33
-rw-r--r--drivers/media/platform/marvell/cafe-driver.c2
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c48
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_dec_hw.c4
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c4
-rw-r--r--drivers/media/platform/mediatek/mdp/mtk_mdp_m2m.c29
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c3
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c16
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c27
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_dbgfs.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c14
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec.c43
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.c21
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/mtk_vcodec_dec_drv.h7
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_av1_req_lat_if.c6
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_h264_req_multi_if.c14
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_hevc_req_multi_if.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp8_req_if.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec/vdec_vp9_req_lat_if.c8
-rw-r--r--drivers/media/platform/mediatek/vcodec/decoder/vdec_vpu_if.c5
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c51
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.c21
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_drv.h6
-rw-r--r--drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c5
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/h264.c2
-rw-r--r--drivers/media/platform/nvidia/tegra-vde/v4l2.c35
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c16
-rw-r--r--drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c51
-rw-r--r--drivers/media/platform/nxp/imx-mipi-csis.c375
-rw-r--r--drivers/media/platform/nxp/imx-pxp.c14
-rw-r--r--drivers/media/platform/nxp/imx7-media-csi.c1
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c58
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-core.h15
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-gasket.c22
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-hw.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-m2m.c296
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-pipe.c2
-rw-r--r--drivers/media/platform/nxp/imx8-isi/imx8-isi-video.c156
-rw-r--r--drivers/media/platform/nxp/imx8mq-mipi-csi2.c5
-rw-r--r--drivers/media/platform/nxp/mx2_emmaprp.c31
-rw-r--r--drivers/media/platform/qcom/camss/Makefile7
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-340.c190
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.c (renamed from drivers/media/platform/qcom/camss/camss-csid-780.c)34
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid-gen3.h (renamed from drivers/media/platform/qcom/camss/camss-csid-780.h)8
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.h3
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c277
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c8
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-340.c320
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c12
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-gen3.c (renamed from drivers/media/platform/qcom/camss/camss-vfe-780.c)76
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-vbif.c31
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-vbif.h19
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c43
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h6
-rw-r--r--drivers/media/platform/qcom/camss/camss-video.c43
-rw-r--r--drivers/media/platform/qcom/camss/camss.c1188
-rw-r--r--drivers/media/platform/qcom/camss/camss.h7
-rw-r--r--drivers/media/platform/qcom/iris/Makefile7
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.c235
-rw-r--r--drivers/media/platform/qcom/iris/iris_buffer.h7
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.c235
-rw-r--r--drivers/media/platform/qcom/iris/iris_common.h18
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.c10
-rw-r--r--drivers/media/platform/qcom/iris/iris_core.h20
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.c687
-rw-r--r--drivers/media/platform/qcom/iris/iris_ctrls.h15
-rw-r--r--drivers/media/platform/qcom/iris/iris_firmware.c33
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_common.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_command.c489
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_defines.h112
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen1_response.c60
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_command.c378
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_defines.h45
-rw-r--r--drivers/media/platform/qcom/iris/iris_hfi_gen2_response.c46
-rw-r--r--drivers/media/platform/qcom/iris/iris_instance.h29
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_common.h94
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen1.c417
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_gen2.c621
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_qcs8300.h189
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sc7280.h26
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8250.c140
-rw-r--r--drivers/media/platform/qcom/iris/iris_platform_sm8750.h22
-rw-r--r--drivers/media/platform/qcom/iris/iris_probe.c41
-rw-r--r--drivers/media/platform/qcom/iris/iris_resources.c2
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.c9
-rw-r--r--drivers/media/platform/qcom/iris/iris_state.h1
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.c39
-rw-r--r--drivers/media/platform/qcom/iris/iris_utils.h2
-rw-r--r--drivers/media/platform/qcom/iris/iris_vb2.c66
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.c308
-rw-r--r--drivers/media/platform/qcom/iris/iris_vdec.h13
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.c616
-rw-r--r--drivers/media/platform/qcom/iris/iris_venc.h27
-rw-r--r--drivers/media/platform/qcom/iris/iris_vidc.c337
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu2.c8
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu3x.c202
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.c922
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_buffer.h24
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.c48
-rw-r--r--drivers/media/platform/qcom/iris/iris_vpu_common.h6
-rw-r--r--drivers/media/platform/qcom/venus/core.c114
-rw-r--r--drivers/media/platform/qcom/venus/core.h22
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c61
-rw-r--r--drivers/media/platform/qcom/venus/firmware.h2
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c12
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c11
-rw-r--r--drivers/media/platform/qcom/venus/hfi_parser.c2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c23
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h34
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v4.c188
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c33
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c25
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h4
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c11
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c13
-rw-r--r--drivers/media/platform/qcom/venus/venc.c13
-rw-r--r--drivers/media/platform/raspberrypi/rp1-cfe/csi2.c2
-rw-r--r--drivers/media/platform/renesas/Kconfig1
-rw-r--r--drivers/media/platform/renesas/Makefile1
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-core.c8
-rw-r--r--drivers/media/platform/renesas/rcar-vin/rcar-v4l2.c2
-rw-r--r--drivers/media/platform/renesas/rcar_drif.c13
-rw-r--r--drivers/media/platform/renesas/rcar_fdp1.c33
-rw-r--r--drivers/media/platform/renesas/rcar_jpu.c45
-rw-r--r--drivers/media/platform/renesas/renesas-ceu.c10
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-core.c2
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-cru.h9
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-csi2.c8
-rw-r--r--drivers/media/platform/renesas/rzg2l-cru/rzg2l-video.c31
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/Kconfig18
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/Makefile5
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-dev.c251
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-subdev.c376
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c531
-rw-r--r--drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc.h130
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c17
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_histo.c6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c18
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_vspx.c1
-rw-r--r--drivers/media/platform/rockchip/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/Makefile1
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c36
-rw-r--r--drivers/media/platform/rockchip/rga/rga.h5
-rw-r--r--drivers/media/platform/rockchip/rkcif/Kconfig18
-rw-r--r--drivers/media/platform/rockchip/rkcif/Makefile8
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-dvp.c865
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-dvp.h25
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-mipi.c777
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-capture-mipi.h23
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-common.h250
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-dev.c303
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-interface.c442
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-interface.h31
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-regs.h153
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-stream.c636
-rw-r--r--drivers/media/platform/rockchip/rkcif/rkcif-stream.h32
-rw-r--r--drivers/media/platform/rockchip/rkisp1/Kconfig1
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h18
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c4
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c123
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c31
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c151
-rw-r--r--drivers/media/platform/rockchip/rkvdec/Makefile2
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-hevc-data.c1848
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-hevc.c820
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-regs.h4
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec-vp9.c4
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.c221
-rw-r--r--drivers/media/platform/rockchip/rkvdec/rkvdec.h21
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-core.h6
-rw-r--r--drivers/media/platform/samsung/exynos-gsc/gsc-m2m.c37
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h5
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-lite.c1
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-m2m.c19
-rw-r--r--drivers/media/platform/samsung/exynos4-is/media-dev.c14
-rw-r--r--drivers/media/platform/samsung/s3c-camif/camif-capture.c26
-rw-r--r--drivers/media/platform/samsung/s5p-g2d/g2d.c44
-rw-r--r--drivers/media/platform/samsung/s5p-jpeg/jpeg-core.c40
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c17
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_cmd_v6.c35
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_common.h6
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_dec.c34
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c38
-rw-r--r--drivers/media/platform/st/Makefile1
-rw-r--r--drivers/media/platform/st/sti/Kconfig1
-rw-r--r--drivers/media/platform/st/sti/Makefile1
-rw-r--r--drivers/media/platform/st/sti/bdisp/bdisp-v4l2.c30
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Kconfig28
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/Makefile11
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.c262
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-common.h60
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c1158
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.h287
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.c244
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-debugfs.h23
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.c235
-rw-r--r--drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-dvb.h17
-rw-r--r--drivers/media/platform/st/sti/delta/delta-mjpeg-dec.c20
-rw-r--r--drivers/media/platform/st/sti/delta/delta-v4l2.c41
-rw-r--r--drivers/media/platform/st/sti/hva/hva-v4l2.c38
-rw-r--r--drivers/media/platform/st/sti/hva/hva.h2
-rw-r--r--drivers/media/platform/st/stm32/dma2d/dma2d.c35
-rw-r--r--drivers/media/platform/st/stm32/stm32-csi.c4
-rw-r--r--drivers/media/platform/st/stm32/stm32-dcmi.c4
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi_capture.c16
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c12
-rw-r--r--drivers/media/platform/sunxi/sun8i-rotate/sun8i_rotate.c12
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.c10
-rw-r--r--drivers/media/platform/synopsys/hdmirx/snps_hdmirx.h6
-rw-r--r--drivers/media/platform/ti/Kconfig3
-rw-r--r--drivers/media/platform/ti/cal/cal.c3
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c4
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c4
-rw-r--r--drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c67
-rw-r--r--drivers/media/platform/ti/omap/omap_vout.c6
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c10
-rw-r--r--drivers/media/platform/ti/omap3isp/ispccdc.c8
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_aewb.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isph3a_af.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isphist.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.c7
-rw-r--r--drivers/media/platform/ti/omap3isp/ispstat.h3
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c36
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.h6
-rw-r--r--drivers/media/platform/ti/vpe/vpe.c28
-rw-r--r--drivers/media/platform/verisilicon/hantro.h4
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c14
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c88
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c17
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_regs.h13
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c2
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h1
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c28
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c22
-rw-r--r--drivers/media/platform/xilinx/xilinx-dma.c10
-rw-r--r--drivers/media/radio/Kconfig17
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aimslab.c2
-rw-r--r--drivers/media/radio/radio-aztech.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-isa.c2
-rw-r--r--drivers/media/radio/radio-isa.h2
-rw-r--r--drivers/media/radio/radio-keene.c4
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-raremono.c4
-rw-r--r--drivers/media/radio/radio-rtrack2.c2
-rw-r--r--drivers/media/radio/radio-terratec.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2159
-rw-r--r--drivers/media/radio/radio-zoltrix.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c10
-rw-r--r--drivers/media/rc/imon.c99
-rw-r--r--drivers/media/rc/ir-hix5hd2.c1
-rw-r--r--drivers/media/rc/lirc_dev.c9
-rw-r--r--drivers/media/rc/pwm-ir-tx.c5
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/rc/st_rc.c2
-rw-r--r--drivers/media/test-drivers/vicodec/vicodec-core.c34
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_channel.c3
-rw-r--r--drivers/media/test-drivers/vim2m.c37
-rw-r--r--drivers/media/test-drivers/vimc/vimc-capture.c4
-rw-r--r--drivers/media/test-drivers/vimc/vimc-core.c2
-rw-r--r--drivers/media/test-drivers/visl/visl-core.c5
-rw-r--r--drivers/media/test-drivers/visl/visl-dec.c2
-rw-r--r--drivers/media/test-drivers/visl/visl.h7
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c106
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.c12
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-rx.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-radio-tx.h4
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.c18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-sdr-cap.h18
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.c10
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-cap.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vbi-out.h6
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.c28
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-cap.h24
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.c8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-common.h8
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.c16
-rw-r--r--drivers/media/test-drivers/vivid/vivid-vid-out.h16
-rw-r--r--drivers/media/tuners/xc2028.c9
-rw-r--r--drivers/media/tuners/xc4000.c8
-rw-r--r--drivers/media/tuners/xc5000.c14
-rw-r--r--drivers/media/usb/au0828/au0828-video.c5
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c12
-rw-r--r--drivers/media/usb/dvb-usb/dtv5100.c5
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c7
-rw-r--r--drivers/media/usb/em28xx/Kconfig1
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c4
-rw-r--r--drivers/media/usb/gspca/gspca.c18
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c69
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-hdw.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c69
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c3
-rw-r--r--drivers/media/usb/stk1160/stk1160-video.c7
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c56
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c134
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c4
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c71
-rw-r--r--drivers/media/usb/uvc/uvc_status.c7
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c128
-rw-r--r--drivers/media/usb/uvc/uvc_video.c34
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h25
-rw-r--r--drivers/media/v4l2-core/Kconfig4
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c119
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c11
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c133
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-priv.h2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-request.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c45
-rw-r--r--drivers/media/v4l2-core/v4l2-device.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c16
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c458
-rw-r--r--drivers/media/v4l2-core/v4l2-isp.c132
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c71
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c42
-rw-r--r--drivers/memory/renesas-rpc-if.c58
-rw-r--r--drivers/memory/samsung/exynos-srom.c10
-rw-r--r--drivers/memory/stm32_omm.c2
-rw-r--r--drivers/memory/tegra/tegra124-emc.c140
-rw-r--r--drivers/memory/tegra/tegra186-emc.c35
-rw-r--r--drivers/memory/tegra/tegra20-emc.c150
-rw-r--r--drivers/memory/tegra/tegra210.c146
-rw-r--r--drivers/memory/tegra/tegra30-emc.c119
-rw-r--r--drivers/memstick/core/memstick.c8
-rw-r--r--drivers/memstick/core/ms_block.c4
-rw-r--r--drivers/memstick/core/mspro_block.c7
-rw-r--r--drivers/memstick/host/jmb38x_ms.c3
-rw-r--r--drivers/memstick/host/rtsx_usb_ms.c5
-rw-r--r--drivers/memstick/host/tifm_ms.c3
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptscsih.c2
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/mfd/88pm886.c1
-rw-r--r--drivers/mfd/Kconfig123
-rw-r--r--drivers/mfd/Makefile11
-rw-r--r--drivers/mfd/adp5585.c1
-rw-r--r--drivers/mfd/altera-sysmgr.c2
-rw-r--r--drivers/mfd/arizona-irq.c5
-rw-r--r--drivers/mfd/bcm2835-pm.c1
-rw-r--r--drivers/mfd/bq257xx.c99
-rw-r--r--drivers/mfd/cs42l43.c32
-rw-r--r--drivers/mfd/da9055-core.c2
-rw-r--r--drivers/mfd/da9063-i2c.c30
-rw-r--r--drivers/mfd/exynos-lpass.c1
-rw-r--r--drivers/mfd/fsl-imx25-tsadc.c1
-rw-r--r--drivers/mfd/intel-lpss-pci.c13
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c2
-rw-r--r--drivers/mfd/kempld-core.c36
-rw-r--r--drivers/mfd/loongson-se.c253
-rw-r--r--drivers/mfd/ls2k-bmc-core.c532
-rw-r--r--drivers/mfd/macsmc.c11
-rw-r--r--drivers/mfd/madera-core.c4
-rw-r--r--drivers/mfd/max7360.c171
-rw-r--r--drivers/mfd/max77620.c15
-rw-r--r--drivers/mfd/max77705.c38
-rw-r--r--drivers/mfd/max8997.c4
-rw-r--r--drivers/mfd/max8998.c4
-rw-r--r--drivers/mfd/mfd-core.c1
-rw-r--r--drivers/mfd/mt6358-irq.c1
-rw-r--r--drivers/mfd/mt6397-irq.c1
-rw-r--r--drivers/mfd/nct6694.c388
-rw-r--r--drivers/mfd/pf1550.c367
-rw-r--r--drivers/mfd/qnap-mcu.c109
-rw-r--r--drivers/mfd/rohm-bd71828.c44
-rw-r--r--drivers/mfd/rohm-bd718x7.c9
-rw-r--r--drivers/mfd/rz-mtu3.c2
-rw-r--r--drivers/mfd/sec-acpm.c23
-rw-r--r--drivers/mfd/sec-irq.c73
-rw-r--r--drivers/mfd/simple-mfd-i2c.c36
-rw-r--r--drivers/mfd/stm32-lptimer.c1
-rw-r--r--drivers/mfd/stmpe-i2c.c14
-rw-r--r--drivers/mfd/stmpe-spi.c14
-rw-r--r--drivers/mfd/stmpe.c9
-rw-r--r--drivers/mfd/sun4i-gpadc.c1
-rw-r--r--drivers/mfd/syscon.c2
-rw-r--r--drivers/mfd/tps6594-core.c59
-rw-r--r--drivers/mfd/tqmx86.c8
-rw-r--r--drivers/mfd/vexpress-sysreg.c25
-rw-r--r--drivers/mfd/wl1273-core.c262
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c7
-rw-r--r--drivers/misc/amd-sbi/Kconfig7
-rw-r--r--drivers/misc/amd-sbi/rmi-core.c194
-rw-r--r--drivers/misc/amd-sbi/rmi-i2c.c124
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/bh1770glc.c4
-rw-r--r--drivers/misc/cardreader/rts5227.c13
-rw-r--r--drivers/misc/cardreader/rts5228.c12
-rw-r--r--drivers/misc/cardreader/rts5249.c16
-rw-r--r--drivers/misc/cardreader/rts5264.c20
-rw-r--r--drivers/misc/cardreader/rts5264.h1
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h2
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c7
-rw-r--r--drivers/misc/cb710/core.c8
-rw-r--r--drivers/misc/dw-xdata-pcie.c5
-rw-r--r--drivers/misc/eeprom/Kconfig18
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at25.c67
-rw-r--r--drivers/misc/eeprom/m24lr.c606
-rw-r--r--drivers/misc/fastrpc.c145
-rw-r--r--drivers/misc/genwqe/card_ddcb.c2
-rw-r--r--drivers/misc/hisi_hikey_usb.c3
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c38
-rw-r--r--drivers/misc/lis3lv02d/Kconfig4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c6
-rw-r--r--drivers/misc/lkdtm/cfi.c2
-rw-r--r--drivers/misc/lkdtm/fortify.c6
-rw-r--r--drivers/misc/lkdtm/perms.c5
-rw-r--r--drivers/misc/mei/Kconfig15
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/bus-fixup.c6
-rw-r--r--drivers/misc/mei/bus.c39
-rw-r--r--drivers/misc/mei/client.c78
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/dma-ring.c8
-rw-r--r--drivers/misc/mei/gsc-me.c20
-rw-r--r--drivers/misc/mei/hbm.c121
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/hw-me.c153
-rw-r--r--drivers/misc/mei/hw-txe.c60
-rw-r--r--drivers/misc/mei/hw.h2
-rw-r--r--drivers/misc/mei/init.c66
-rw-r--r--drivers/misc/mei/interrupt.c43
-rw-r--r--drivers/misc/mei/main.c138
-rw-r--r--drivers/misc/mei/mei_dev.h24
-rw-r--r--drivers/misc/mei/mei_lb.c311
-rw-r--r--drivers/misc/mei/pci-me.c25
-rw-r--r--drivers/misc/mei/pci-txe.c21
-rw-r--r--drivers/misc/mei/platform-vsc.c31
-rw-r--r--drivers/misc/ntsync.c21
-rw-r--r--drivers/misc/ocxl/afu_irq.c2
-rw-r--r--drivers/misc/pci_endpoint_test.c16
-rw-r--r--drivers/misc/rp1/rp1_pci.c3
-rw-r--r--drivers/misc/vmw_balloon.c8
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.h2
-rw-r--r--drivers/mmc/core/block.c104
-rw-r--r--drivers/mmc/core/bus.c12
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/card.h9
-rw-r--r--drivers/mmc/core/core.c32
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/debugfs.c10
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/core/mmc.c74
-rw-r--r--drivers/mmc/core/mmc_ops.c72
-rw-r--r--drivers/mmc/core/mmc_test.c34
-rw-r--r--drivers/mmc/core/regulator.c77
-rw-r--r--drivers/mmc/core/sd.c11
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/core/sdio_bus.c3
-rw-r--r--drivers/mmc/host/Kconfig17
-rw-r--r--drivers/mmc/host/alcor.c8
-rw-r--r--drivers/mmc/host/atmel-mci.c19
-rw-r--r--drivers/mmc/host/au1xmmc.c18
-rw-r--r--drivers/mmc/host/cb710-mmc.c19
-rw-r--r--drivers/mmc/host/cqhci.h1
-rw-r--r--drivers/mmc/host/davinci_mmc.c22
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c13
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c9
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c9
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c28
-rw-r--r--drivers/mmc/host/dw_mmc.c15
-rw-r--r--drivers/mmc/host/dw_mmc.h3
-rw-r--r--drivers/mmc/host/meson-mx-sdhc-clkc.c4
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c339
-rw-r--r--drivers/mmc/host/mmc_spi.c4
-rw-r--r--drivers/mmc/host/mmci.c9
-rw-r--r--drivers/mmc/host/mtk-sd.c18
-rw-r--r--drivers/mmc/host/mvsdio.c2
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/omap.c6
-rw-r--r--drivers/mmc/host/omap_hsmmc.c17
-rw-r--r--drivers/mmc/host/pxamci.c56
-rw-r--r--drivers/mmc/host/renesas_sdhi.h3
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c45
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c18
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c3
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c40
-rw-r--r--drivers/mmc/host/sdhci-acpi.c18
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c162
-rw-r--r--drivers/mmc/host/sdhci-cadence.c70
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c13
-rw-r--r--drivers/mmc/host/sdhci-msm.c63
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c10
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c12
-rw-r--r--drivers/mmc/host/sdhci-of-dwcmshc.c648
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sdhci-omap.c18
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c15
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c68
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c52
-rw-r--r--drivers/mmc/host/sdhci-s3c.c11
-rw-r--r--drivers/mmc/host/sdhci-spear.c6
-rw-r--r--drivers/mmc/host/sdhci-sprd.c10
-rw-r--r--drivers/mmc/host/sdhci-st.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c13
-rw-r--r--drivers/mmc/host/sdhci-uhs2.c3
-rw-r--r--drivers/mmc/host/sdhci-xenon.c13
-rw-r--r--drivers/mmc/host/sdhci.c34
-rw-r--r--drivers/mmc/host/sdhci.h7
-rw-r--r--drivers/mmc/host/sdhci_am654.c11
-rw-r--r--drivers/mmc/host/sh_mmcif.c13
-rw-r--r--drivers/mmc/host/sunxi-mmc.c11
-rw-r--r--drivers/mmc/host/tifm_sd.c4
-rw-r--r--drivers/mmc/host/tmio_mmc.h17
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c33
-rw-r--r--drivers/mmc/host/toshsd.c8
-rw-r--r--drivers/mmc/host/usdhi6rol0.c4
-rw-r--r--drivers/mmc/host/via-sdmmc.c10
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c16
-rw-r--r--drivers/most/most_usb.c27
-rw-r--r--drivers/mtd/chips/cfi_probe.c2
-rw-r--r--drivers/mtd/chips/jedec_probe.c4
-rw-r--r--drivers/mtd/devices/docg3.h2
-rw-r--r--drivers/mtd/devices/mtd_intel_dg.c74
-rw-r--r--drivers/mtd/ftl.c2
-rw-r--r--drivers/mtd/hyperbus/hbmc-am654.c1
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c18
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c4
-rw-r--r--drivers/mtd/maps/pcmciamtd.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c4
-rw-r--r--drivers/mtd/mtdchar.c6
-rw-r--r--drivers/mtd/mtdcore.c61
-rw-r--r--drivers/mtd/mtdoops.c5
-rw-r--r--drivers/mtd/mtdpart.c7
-rw-r--r--drivers/mtd/mtdswap.c4
-rw-r--r--drivers/mtd/nand/Kconfig8
-rw-r--r--drivers/mtd/nand/Makefile1
-rw-r--r--drivers/mtd/nand/core.c131
-rw-r--r--drivers/mtd/nand/ecc-mxic.c14
-rw-r--r--drivers/mtd/nand/ecc-realtek.c464
-rw-r--r--drivers/mtd/nand/ecc.c2
-rw-r--r--drivers/mtd/nand/onenand/onenand_omap2.c1
-rw-r--r--drivers/mtd/nand/onenand/onenand_samsung.c2
-rw-r--r--drivers/mtd/nand/qpic_common.c6
-rw-r--r--drivers/mtd/nand/raw/Kconfig34
-rw-r--r--drivers/mtd/nand/raw/Makefile3
-rw-r--r--drivers/mtd/nand/raw/atmel/nand-controller.c17
-rw-r--r--drivers/mtd/nand/raw/atmel/pmecc.c1
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c276
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c6
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c17
-rw-r--r--drivers/mtd/nand/raw/loongson-nand-controller.c1024
-rw-r--r--drivers/mtd/nand/raw/loongson1-nand-controller.c836
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c2
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c13
-rw-r--r--drivers/mtd/nand/raw/nand_base.c144
-rw-r--r--drivers/mtd/nand/raw/nandsim.c7
-rw-r--r--drivers/mtd/nand/raw/omap2.c27
-rw-r--r--drivers/mtd/nand/raw/pl35x-nand-controller.c3
-rw-r--r--drivers/mtd/nand/raw/renesas-nand-controller.c5
-rw-r--r--drivers/mtd/nand/raw/rockchip-nand-controller.c1
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c1230
-rw-r--r--drivers/mtd/nand/raw/stm32_fmc2_nand.c1
-rw-r--r--drivers/mtd/nand/raw/sunxi_nand.c410
-rw-r--r--drivers/mtd/nand/spi/Makefile2
-rw-r--r--drivers/mtd/nand/spi/core.c76
-rw-r--r--drivers/mtd/nand/spi/esmt.c24
-rw-r--r--drivers/mtd/nand/spi/fmsh.c146
-rw-r--r--drivers/mtd/nand/spi/gigadevice.c107
-rw-r--r--drivers/mtd/rfd_ftl.c4
-rw-r--r--drivers/mtd/sm_ftl.c5
-rw-r--r--drivers/mtd/spi-nor/core.c155
-rw-r--r--drivers/mtd/spi-nor/core.h6
-rw-r--r--drivers/mtd/spi-nor/micron-st.c101
-rw-r--r--drivers/mtd/spi-nor/sfdp.c30
-rw-r--r--drivers/mtd/spi-nor/spansion.c38
-rw-r--r--drivers/mtd/spi-nor/winbond.c24
-rw-r--r--drivers/mtd/ubi/attach.c4
-rw-r--r--drivers/mtd/ubi/block.c4
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c8
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/ubi.h12
-rw-r--r--drivers/mux/mmio.c82
-rw-r--r--drivers/net/Kconfig15
-rw-r--r--drivers/net/Space.c3
-rw-r--r--drivers/net/amt.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c40
-rw-r--r--drivers/net/bonding/bond_main.c264
-rw-r--r--drivers/net/bonding/bond_netlink.c46
-rw-r--r--drivers/net/bonding/bond_options.c47
-rw-r--r--drivers/net/bonding/bond_sysfs.c6
-rw-r--r--drivers/net/can/Kconfig17
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c1
-rw-r--r--drivers/net/can/bxcan.c5
-rw-r--r--drivers/net/can/c_can/c_can_main.c1
-rw-r--r--drivers/net/can/can327.c1
-rw-r--r--drivers/net/can/cc770/cc770.c1
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c1
-rw-r--r--drivers/net/can/dev/bittiming.c63
-rw-r--r--drivers/net/can/dev/calc_bittiming.c124
-rw-r--r--drivers/net/can/dev/dev.c155
-rw-r--r--drivers/net/can/dev/netlink.c879
-rw-r--r--drivers/net/can/dummy_can.c285
-rw-r--r--drivers/net/can/esd/esd_402_pci-core.c4
-rw-r--r--drivers/net/can/esd/esdacc.c2
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c1
-rw-r--r--drivers/net/can/grcan.c1
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c1
-rw-r--r--drivers/net/can/janz-ican3.c1
-rw-r--r--drivers/net/can/kvaser_pciefd/kvaser_pciefd_core.c4
-rw-r--r--drivers/net/can/m_can/m_can.c328
-rw-r--r--drivers/net/can/m_can/m_can.h5
-rw-r--r--drivers/net/can/m_can/m_can_pci.c4
-rw-r--r--drivers/net/can/m_can/m_can_platform.c10
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c4
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c40
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd_user.h4
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c6
-rw-r--r--drivers/net/can/rcar/rcar_can.c293
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c381
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c1
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-tx.c2
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c8
-rw-r--r--drivers/net/can/sja1000/sja1000.c5
-rw-r--r--drivers/net/can/slcan/slcan-core.c1
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/can/spi/hi311x.c34
-rw-r--r--drivers/net/can/spi/mcp251x.c35
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c277
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c114
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h8
-rw-r--r--drivers/net/can/sun4i_can.c4
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/Kconfig11
-rw-r--r--drivers/net/can/usb/Makefile1
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/usb/esd_usb.c65
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_core.c5
-rw-r--r--drivers/net/can/usb/f81604.c1
-rw-r--r--drivers/net/can/usb/gs_usb.c144
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c4
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c4
-rw-r--r--drivers/net/can/usb/nct6694_canfd.c831
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c48
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h4
-rw-r--r--drivers/net/can/usb/ucan.c1
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c1
-rw-r--r--drivers/net/dsa/Kconfig23
-rw-r--r--drivers/net/dsa/Makefile7
-rw-r--r--drivers/net/dsa/b53/b53_common.c384
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c35
-rw-r--r--drivers/net/dsa/b53/b53_priv.h111
-rw-r--r--drivers/net/dsa/b53/b53_regs.h48
-rw-r--r--drivers/net/dsa/dsa_loop.c84
-rw-r--r--drivers/net/dsa/dsa_loop.h20
-rw-r--r--drivers/net/dsa/dsa_loop_bdinfo.c36
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c14
-rw-r--r--drivers/net/dsa/ks8995.c857
-rw-r--r--drivers/net/dsa/lantiq/Kconfig24
-rw-r--r--drivers/net/dsa/lantiq/Makefile3
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.c518
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip.h301
-rw-r--r--drivers/net/dsa/lantiq/lantiq_gswip_common.c1739
-rw-r--r--drivers/net/dsa/lantiq/lantiq_pce.h (renamed from drivers/net/dsa/lantiq_pce.h)9
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.c733
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx.h126
-rw-r--r--drivers/net/dsa/lantiq/mxl-gsw1xx_pce.h154
-rw-r--r--drivers/net/dsa/lantiq_gswip.c2270
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c100
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c80
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c22
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c1
-rw-r--r--drivers/net/dsa/mt7530.c5
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c17
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c70
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h133
-rw-r--r--drivers/net/dsa/ocelot/felix.c74
-rw-r--r--drivers/net/dsa/ocelot/felix.h3
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c3
-rw-r--r--drivers/net/dsa/realtek/realtek.h3
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c2
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c2
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c7
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c11
-rw-r--r--drivers/net/dsa/yt921x.c3006
-rw-r--r--drivers/net/dsa/yt921x.h567
-rw-r--r--drivers/net/ethernet/3com/3c515.c4
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.c443
-rw-r--r--drivers/net/ethernet/airoha/airoha_eth.h99
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.c291
-rw-r--r--drivers/net/ethernet/airoha/airoha_npu.h36
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe.c485
-rw-r--r--drivers/net/ethernet/airoha/airoha_ppe_debugfs.c3
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h122
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c5
-rw-r--r--drivers/net/ethernet/amd/Kconfig1
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h3
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h22
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c125
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c37
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-hwtstamp.c28
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c5
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pps.c74
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ptp.c26
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-selftest.c346
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c66
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ptp.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c34
-rw-r--r--drivers/net/ethernet/broadcom/b44.c37
-rw-r--r--drivers/net/ethernet/broadcom/bnge/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge.h37
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.c258
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_auxr.h84
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_core.c34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_db.h34
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.c482
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_hwrm_lib.h31
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.c2217
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_netdev.h250
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_resc.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.c67
-rw-r--r--drivers/net/ethernet/broadcom/bnge/bnge_rmem.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c71
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c125
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c183
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c42
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c76
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c7
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c31
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c75
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c96
-rw-r--r--drivers/net/ethernet/cadence/macb.h146
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c776
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c58
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c51
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c62
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c158
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_matchall.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c44
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h12
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c24
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h7
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c10
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c40
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h8
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c14
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ptp.c88
-rw-r--r--drivers/net/ethernet/fealnx.c4
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c45
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c11
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig3
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c262
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h34
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_hw.h42
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c23
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c185
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf_common.c19
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/netc_blk_ctrl.c400
-rw-r--r--drivers/net/ethernet/freescale/enetc/ntmp.c15
-rw-r--r--drivers/net/ethernet/freescale/fec.h42
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c210
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c64
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c91
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h14
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c11
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth.h4
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c3
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c40
-rw-r--r--drivers/net/ethernet/google/gve/gve.h24
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_buffer_mgmt_dqo.c5
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c97
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c97
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c27
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c104
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_main.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_reg.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_trace.h84
-rw-r--r--drivers/net/ethernet/hisilicon/hibmcge/hbg_txrx.c217
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c32
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic3/Makefile6
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.c915
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_cmdq.h156
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.c23
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_common.h27
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_csr.h79
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.c776
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_eqs.h122
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.c211
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_cfg.h4
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.c394
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_comm.h34
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hw_intf.h151
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwdev.c541
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.c417
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_hwif.h32
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_irq.c138
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_lld.c9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_main.c69
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.c848
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mbox.h126
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.c21
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_mgmt_interface.h119
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_netdev_ops.c426
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.c152
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_cfg.h20
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_dev.h19
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.c870
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_nic_io.h39
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_pci_id_tbl.h9
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.c336
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rss.h14
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.c226
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_rx.h38
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.c190
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_tx.h30
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.c109
-rw-r--r--drivers/net/ethernet/huawei/hinic3/hinic3_wq.h19
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c59
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c41
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c19
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c55
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c29
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.c119
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adv_rss.h31
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c107
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c12
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile9
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c35
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c546
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c182
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c218
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c101
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c269
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c474
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1007
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h44
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c341
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c896
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h148
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c65
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c299
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h28
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c)2
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c975
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c1922
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.c)1683
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.h)0
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig2
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h71
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c97
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c203
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c108
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c112
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1002
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h210
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c1237
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h33
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.c486
-rw-r--r--drivers/net/ethernet/intel/idpf/xdp.h175
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.c633
-rw-r--r--drivers/net/ethernet/intel/idpf/xsk.h33
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c22
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_i225.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_nvm.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c128
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c79
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h25
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c182
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile4
-rw-r--r--drivers/net/ethernet/intel/libie/adminq.c2
-rw-r--r--drivers/net/ethernet/intel/libie/fwlog.c1115
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c29
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c30
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c16
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.c218
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/debugfs.h28
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/npa.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cn20k/struct.h340
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h73
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c42
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c47
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c92
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn20k.c220
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c2
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c41
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/pcie_cong_event.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_smfs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.c1155
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.h121
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c281
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c213
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/adj_vport.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/vporttbl.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c274
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c399
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c395
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/macsec_fs.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.c799
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/nv_param.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/st.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c1821
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws_pools.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c6
-rw-r--r--drivers/net/ethernet/meta/Kconfig3
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile1
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h29
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h39
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c249
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_ethtool.c224
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c484
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h92
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw_log.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.c66
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_hw_stats.h28
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c34
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c146
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h47
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mdio.c195
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c183
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h20
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c75
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c187
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c145
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h4
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_time.c2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h2
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c1049
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h41
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c1
-rw-r--r--drivers/net/ethernet/microchip/lan865x/lan865x.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h4
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c5
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c8
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c18
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c10
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c183
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c19
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c46
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c365
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c87
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c2
-rw-r--r--drivers/net/ethernet/mucse/Kconfig33
-rw-r--r--drivers/net/ethernet/mucse/Makefile7
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/Makefile11
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe.h71
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_chip.c143
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_hw.h17
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_main.c320
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.c406
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx.h20
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.c191
-rw-r--r--drivers/net/ethernet/mucse/rnpgbe/rnpgbe_mbx_fw.h88
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/tls.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c38
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_api.h131
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.c102
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_aux.h10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c270
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h28
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h118
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c64
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h21
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c61
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c76
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig15
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile1
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/Makefile7
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.c239
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe.h39
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.c2034
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_config.h317
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c847
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.h16
-rw-r--r--drivers/net/ethernet/qualcomm/ppe/ppe_regs.h591
-rw-r--r--drivers/net/ethernet/realtek/Kconfig2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c124
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h16
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c170
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.c76
-rw-r--r--drivers/net/ethernet/renesas/rcar_gen4_ptp.h46
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h46
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.c316
-rw-r--r--drivers/net/ethernet/renesas/rswitch_l2.h15
-rw-r--r--drivers/net/ethernet/renesas/rswitch_main.c (renamed from drivers/net/ethernet/renesas/rswitch.c)177
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c50
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c34
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c17
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/mae.c4
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c3
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c14
-rw-r--r--drivers/net/ethernet/spacemit/Kconfig29
-rw-r--r--drivers/net/ethernet/spacemit/Makefile6
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.c2162
-rw-r--r--drivers/net/ethernet/spacemit/k1_emac.h416
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c37
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-eic7700.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c150
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c160
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c139
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c269
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-renesas-gbeth.c112
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c433
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-s32.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c170
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sophgo.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c54
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c138
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.c159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c72
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c96
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c100
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c39
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c253
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_est.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c141
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_fpe.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c28
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_libpci.h12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c931
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c475
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c97
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c196
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c58
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig12
-rw-r--r--drivers/net/ethernet/ti/Makefile3
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c27
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c52
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c51
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c63
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c21
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c101
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c516
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c7
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c401
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h31
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c7
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.c1746
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth.h262
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_prueth_ptp.h85
-rw-r--r--drivers/net/ethernet/ti/icssm/icssm_switch.h257
-rw-r--r--drivers/net/ethernet/ti/netcp.h5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c68
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c72
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c58
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig1
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c297
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h13
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c205
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c256
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_sriov.c26
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h83
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf.h76
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.c14
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_vf_lib.h1
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c6
-rw-r--r--drivers/net/ethernet/wangxun/ngbevf/ngbevf_main.c5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.c298
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_aml.h5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c47
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c10
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c29
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h39
-rw-r--r--drivers/net/ethernet/wangxun/txgbevf/txgbevf_main.c17
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c10
-rw-r--r--drivers/net/fjes/fjes_main.c5
-rw-r--r--drivers/net/geneve.c4
-rw-r--r--drivers/net/gtp.c9
-rw-r--r--drivers/net/hamradio/6pack.c57
-rw-r--r--drivers/net/hyperv/Kconfig2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c15
-rw-r--r--drivers/net/ipa/ipa_interrupt.c1
-rw-r--r--drivers/net/ipa/ipa_main.c1
-rw-r--r--drivers/net/ipa/ipa_modem.c4
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_uc.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c8
-rw-r--r--drivers/net/macsec.c173
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mctp/mctp-i3c.c8
-rw-r--r--drivers/net/mctp/mctp-usb.c8
-rw-r--r--drivers/net/mdio/Kconfig5
-rw-r--r--drivers/net/mdio/fwnode_mdio.c5
-rw-r--r--drivers/net/mdio/mdio-airoha.c2
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c4
-rw-r--r--drivers/net/mdio/mdio-i2c.c39
-rw-r--r--drivers/net/mdio/of_mdio.c8
-rw-r--r--drivers/net/netconsole.c486
-rw-r--r--drivers/net/netdevsim/Makefile4
-rw-r--r--drivers/net/netdevsim/dev.c62
-rw-r--r--drivers/net/netdevsim/ethtool.c25
-rw-r--r--drivers/net/netdevsim/health.c4
-rw-r--r--drivers/net/netdevsim/ipsec.c1
-rw-r--r--drivers/net/netdevsim/netdev.c74
-rw-r--r--drivers/net/netdevsim/netdevsim.h33
-rw-r--r--drivers/net/netdevsim/psp.c252
-rw-r--r--drivers/net/netkit.c6
-rw-r--r--drivers/net/ovpn/netlink-gen.c1
-rw-r--r--drivers/net/ovpn/netlink-gen.h1
-rw-r--r--drivers/net/ovpn/tcp.c26
-rw-r--r--drivers/net/pcs/Kconfig11
-rw-r--r--drivers/net/pcs/pcs-lynx.c88
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c317
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c5
-rw-r--r--drivers/net/pcs/pcs-xpcs.c136
-rw-r--r--drivers/net/phy/Kconfig13
-rw-r--r--drivers/net/phy/Makefile3
-rw-r--r--drivers/net/phy/adin1100.c7
-rw-r--r--drivers/net/phy/aquantia/aquantia.h52
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c2
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c702
-rw-r--r--drivers/net/phy/as21xxx.c7
-rw-r--r--drivers/net/phy/ax88796b.c5
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c27
-rw-r--r--drivers/net/phy/broadcom.c167
-rw-r--r--drivers/net/phy/dp83640.c87
-rw-r--r--drivers/net/phy/dp83867.c42
-rw-r--r--drivers/net/phy/dp83869.c4
-rw-r--r--drivers/net/phy/dp83td510.c62
-rw-r--r--drivers/net/phy/fixed_phy.c264
-rw-r--r--drivers/net/phy/marvell-88x2222.c13
-rw-r--r--drivers/net/phy/marvell.c47
-rw-r--r--drivers/net/phy/marvell10g.c7
-rw-r--r--drivers/net/phy/mdio-boardinfo.c79
-rw-r--r--drivers/net/phy/mdio-boardinfo.h18
-rw-r--r--drivers/net/phy/mdio-open-alliance.h49
-rw-r--r--drivers/net/phy/mdio-private.h11
-rw-r--r--drivers/net/phy/mdio_bus.c93
-rw-r--r--drivers/net/phy/mdio_bus_provider.c46
-rw-r--r--drivers/net/phy/mdio_device.c60
-rw-r--r--drivers/net/phy/mediatek/mtk-2p5ge.c104
-rw-r--r--drivers/net/phy/micrel.c1365
-rw-r--r--drivers/net/phy/microchip_rds_ptp.c8
-rw-r--r--drivers/net/phy/microchip_t1s.c100
-rw-r--r--drivers/net/phy/motorcomm.c120
-rw-r--r--drivers/net/phy/mscc/mscc.h15
-rw-r--r--drivers/net/phy/mscc/mscc_main.c506
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c21
-rw-r--r--drivers/net/phy/mxl-86110.c392
-rw-r--r--drivers/net/phy/mxl-gpy.c135
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx-macsec.c8
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c22
-rw-r--r--drivers/net/phy/phy-c45.c287
-rw-r--r--drivers/net/phy/phy-caps.h3
-rw-r--r--drivers/net/phy/phy-core.c47
-rw-r--r--drivers/net/phy/phy.c29
-rw-r--r--drivers/net/phy/phy_caps.c4
-rw-r--r--drivers/net/phy/phy_device.c77
-rw-r--r--drivers/net/phy/phylink.c109
-rw-r--r--drivers/net/phy/qcom/at803x.c9
-rw-r--r--drivers/net/phy/qcom/qca807x.c7
-rw-r--r--drivers/net/phy/qt2025.rs10
-rw-r--r--drivers/net/phy/realtek/realtek_main.c688
-rw-r--r--drivers/net/phy/sfp-bus.c107
-rw-r--r--drivers/net/phy/sfp.c85
-rw-r--r--drivers/net/phy/sfp.h4
-rw-r--r--drivers/net/phy/spi_ks8995.c506
-rw-r--r--drivers/net/ppp/Kconfig3
-rw-r--r--drivers/net/ppp/bsd_comp.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c120
-rw-r--r--drivers/net/ppp/ppp_mppe.c108
-rw-r--r--drivers/net/ppp/pppoe.c133
-rw-r--r--drivers/net/ppp/pptp.c8
-rw-r--r--drivers/net/pse-pd/Kconfig11
-rw-r--r--drivers/net/pse-pd/Makefile1
-rw-r--r--drivers/net/pse-pd/pd692x0.c155
-rw-r--r--drivers/net/pse-pd/si3474.c578
-rw-r--r--drivers/net/pse-pd/tps23881.c71
-rw-r--r--drivers/net/sungem_phy.c2
-rw-r--r--drivers/net/team/team_core.c109
-rw-r--r--drivers/net/team/team_nl.c1
-rw-r--r--drivers/net/team/team_nl.h1
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/tun_vnet.h2
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/asix_devices.c41
-rw-r--r--drivers/net/usb/lan78xx.c36
-rw-r--r--drivers/net/usb/qmi_wwan.c6
-rw-r--r--drivers/net/usb/r8152.c8
-rw-r--r--drivers/net/usb/rtl8150.c13
-rw-r--r--drivers/net/usb/usbnet.c297
-rw-r--r--drivers/net/veth.c45
-rw-r--r--drivers/net/virtio_net.c166
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c18
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan/vxlan_core.c25
-rw-r--r--drivers/net/vxlan/vxlan_private.h2
-rw-r--r--drivers/net/wan/framer/pef2256/pef2256.c35
-rw-r--r--drivers/net/wan/hdlc_ppp.c4
-rw-r--r--drivers/net/wireguard/Makefile2
-rw-r--r--drivers/net/wireguard/cookie.c18
-rw-r--r--drivers/net/wireguard/device.c6
-rw-r--r--drivers/net/wireguard/generated/netlink.c73
-rw-r--r--drivers/net/wireguard/generated/netlink.h30
-rw-r--r--drivers/net/wireguard/netlink.c68
-rw-r--r--drivers/net/wireguard/noise.c32
-rw-r--r--drivers/net/wireguard/queueing.h13
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c28
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c253
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h19
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c60
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h39
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c465
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c20
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c21
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c23
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c24
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h11
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c14
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c2
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c75
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c426
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h18
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c13
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h12
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1041
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c24
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c37
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h21
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c256
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h88
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h74
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c60
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c28
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/dr.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-fm.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-gf.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-hr.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-pe.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/rf-wh.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h134
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h286
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h74
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c81
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c72
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c95
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/sap.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/constants.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/d3.c557
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/debugfs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/fw.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/iface.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/key.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/link.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mac80211.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mld.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/mlo.c122
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/notif.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/regulatory.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/roc.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.c1717
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/rx.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/sta.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/stats.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mld/tlc.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c131
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c384
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c809
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c124
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h141
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c164
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c135
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c89
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c433
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c186
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/internal.h53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/trans.c246
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/gen1_2/tx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/nvm_parse.c72
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/if_spi.c3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c113
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c58
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c71
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig6
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/channel.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c302
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h98
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c86
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c65
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h232
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c203
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Kconfig2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/Makefile4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c156
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c127
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.c265
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regd.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_acpi_sar.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Kconfig9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/coredump.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c343
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c382
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c831
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c614
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c378
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c113
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h144
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/npu.c352
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/pci.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h34
-rw-r--r--drivers/net/wireless/mediatek/mt76/npu.c501
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/scan.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb_trace.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/wed.c20
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c35
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c12
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c9
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c27
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c80
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c115
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c215
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h1
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c8
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.h7
-rw-r--r--drivers/net/wireless/realtek/rtw88/led.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822bu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822cu.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/sdio.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig22
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c173
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h446
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c11
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h10
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c911
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h242
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c424
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c353
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h144
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c270
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h115
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c124
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c10
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c478
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h132
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci_be.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c541
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c13
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h80
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c22
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b_rfk.c167
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851bu.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c129
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c16
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852au.c79
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c7
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c6
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c14
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bte.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bu.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c172
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852cu.c69
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c4
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c15
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h45
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.c115
-rw-r--r--drivers/net/wireless/realtek/rtw89/usb.h12
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c87
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h6
-rw-r--r--drivers/net/wireless/silabs/wfx/main.c2
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c11
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c36
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c3
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c281
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c2
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c19
-rw-r--r--drivers/net/wwan/qcom_bam_dmux.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c5
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c1
-rw-r--r--drivers/net/wwan/wwan_hwsim.c2
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nfc/mei_phy.h4
-rw-r--r--drivers/nfc/pn533/pn533.c12
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig3
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c17
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c18
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.h1
-rw-r--r--drivers/ntb/hw/epf/ntb_hw_epf.c118
-rw-r--r--drivers/ntb/ntb_transport.c7
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/Makefile1
-rw-r--r--drivers/nvdimm/badrange.c3
-rw-r--r--drivers/nvdimm/btt.c4
-rw-r--r--drivers/nvdimm/btt_devs.c24
-rw-r--r--drivers/nvdimm/bus.c72
-rw-r--r--drivers/nvdimm/claim.c7
-rw-r--r--drivers/nvdimm/core.c17
-rw-r--r--drivers/nvdimm/dax_devs.c12
-rw-r--r--drivers/nvdimm/dimm.c5
-rw-r--r--drivers/nvdimm/dimm_devs.c48
-rw-r--r--drivers/nvdimm/namespace_devs.c113
-rw-r--r--drivers/nvdimm/nd.h3
-rw-r--r--drivers/nvdimm/pfn_devs.c63
-rw-r--r--drivers/nvdimm/ramdax.c282
-rw-r--r--drivers/nvdimm/region.c18
-rw-r--r--drivers/nvdimm/region_devs.c120
-rw-r--r--drivers/nvdimm/security.c14
-rw-r--r--drivers/nvme/common/auth.c90
-rw-r--r--drivers/nvme/host/apple.c198
-rw-r--r--drivers/nvme/host/auth.c13
-rw-r--r--drivers/nvme/host/core.c41
-rw-r--r--drivers/nvme/host/fabrics.c2
-rw-r--r--drivers/nvme/host/fabrics.h6
-rw-r--r--drivers/nvme/host/fc.c34
-rw-r--r--drivers/nvme/host/ioctl.c14
-rw-r--r--drivers/nvme/host/multipath.c12
-rw-r--r--drivers/nvme/host/nvme.h11
-rw-r--r--drivers/nvme/host/pci.c291
-rw-r--r--drivers/nvme/host/pr.c6
-rw-r--r--drivers/nvme/host/rdma.c1
-rw-r--r--drivers/nvme/host/tcp.c11
-rw-r--r--drivers/nvme/host/zns.c10
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/auth.c21
-rw-r--r--drivers/nvme/target/core.c20
-rw-r--r--drivers/nvme/target/fabrics-cmd-auth.c1
-rw-r--r--drivers/nvme/target/fc.c83
-rw-r--r--drivers/nvme/target/fcloop.c17
-rw-r--r--drivers/nvme/target/loop.c1
-rw-r--r--drivers/nvme/target/nvmet.h2
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/pci-epf.c14
-rw-r--r--drivers/nvme/target/rdma.c12
-rw-r--r--drivers/nvme/target/tcp.c8
-rw-r--r--drivers/nvmem/Kconfig30
-rw-r--r--drivers/nvmem/Makefile6
-rw-r--r--drivers/nvmem/an8855-efuse.c68
-rw-r--r--drivers/nvmem/imx-ocotp-ele.c20
-rw-r--r--drivers/nvmem/layouts.c13
-rw-r--r--drivers/nvmem/layouts/u-boot-env.c4
-rw-r--r--drivers/nvmem/qnap-mcu-eeprom.c111
-rw-r--r--drivers/nvmem/rcar-efuse.c1
-rw-r--r--drivers/nvmem/s32g-ocotp-nvmem.c100
-rw-r--r--drivers/of/address.c4
-rw-r--r--drivers/of/base.c47
-rw-r--r--drivers/of/fdt.c101
-rw-r--r--drivers/of/irq.c94
-rw-r--r--drivers/of/of_kunit_helpers.c5
-rw-r--r--drivers/of/of_reserved_mem.c69
-rw-r--r--drivers/of/overlay.c5
-rw-r--r--drivers/of/unittest.c1
-rw-r--r--drivers/opp/core.c168
-rw-r--r--drivers/opp/cpu.c16
-rw-r--r--drivers/opp/of.c125
-rw-r--r--drivers/parisc/ccio-dma.c54
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/parisc/gsc.c4
-rw-r--r--drivers/parisc/iommu-helpers.h10
-rw-r--r--drivers/parisc/sba_iommu.c54
-rw-r--r--drivers/pci/Kconfig21
-rw-r--r--drivers/pci/Makefile4
-rw-r--r--drivers/pci/bus.c59
-rw-r--r--drivers/pci/controller/Kconfig18
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/cadence/Kconfig31
-rw-r--r--drivers/pci/controller/cadence/Makefile12
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c61
-rw-r--r--drivers/pci/controller/cadence/pci-sky1.c238
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c40
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.c288
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-common.h46
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host-hpa.c368
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c280
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa-regs.h193
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-hpa.c167
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-lga-regs.h230
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c9
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.c30
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h450
-rw-r--r--drivers/pci/controller/cadence/pcie-sg2042.c131
-rw-r--r--drivers/pci/controller/dwc/Kconfig64
-rw-r--r--drivers/pci/controller/dwc/Makefile7
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c1
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c62
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c8
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c89
-rw-r--r--drivers/pci/controller/dwc/pci-meson.c18
-rw-r--r--drivers/pci/controller/dwc/pcie-al.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-amd-mdb.c52
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c2
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c32
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c180
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c120
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h76
-rw-r--r--drivers/pci/controller/dwc/pcie-dw-rockchip.c107
-rw-r--r--drivers/pci/controller/dwc/pcie-keembay.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-nxp-s32g.c406
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.c58
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-common.h2
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c23
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c143
-rw-r--r--drivers/pci/controller/dwc/pcie-rcar-gen4.c30
-rw-r--r--drivers/pci/controller/dwc/pcie-spacemit-k1.c357
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32-ep.c343
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.c370
-rw-r--r--drivers/pci/controller/dwc/pcie-stm32.h19
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c99
-rw-r--r--drivers/pci/controller/pci-host-common.c13
-rw-r--r--drivers/pci/controller/pci-host-common.h1
-rw-r--r--drivers/pci/controller/pci-hyperv.c70
-rw-r--r--drivers/pci/controller/pci-ixp4xx.c6
-rw-r--r--drivers/pci/controller/pci-tegra.c29
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c2
-rw-r--r--drivers/pci/controller/pcie-apple.c43
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c209
-rw-r--r--drivers/pci/controller/pcie-iproc.c22
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c23
-rw-r--r--drivers/pci/controller/pcie-mediatek.c113
-rw-r--r--drivers/pci/controller/pcie-rcar-ep.c2
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c42
-rw-r--r--drivers/pci/controller/pcie-rockchip-ep.c1
-rw-r--r--drivers/pci/controller/pcie-rockchip.h35
-rw-r--r--drivers/pci/controller/pcie-rzg3s-host.c1761
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c7
-rw-r--r--drivers/pci/controller/plda/pcie-plda-host.c3
-rw-r--r--drivers/pci/controller/vmd.c53
-rw-r--r--drivers/pci/doe.c2
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c48
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c153
-rw-r--r--drivers/pci/endpoint/pci-ep-msi.c2
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c159
-rw-r--r--drivers/pci/host-bridge.c1
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c8
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c6
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c3
-rw-r--r--drivers/pci/ide.c815
-rw-r--r--drivers/pci/iov.c30
-rw-r--r--drivers/pci/msi/irqdomain.c143
-rw-r--r--drivers/pci/of_property.c22
-rw-r--r--drivers/pci/p2pdma.c196
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-driver.c9
-rw-r--r--drivers/pci/pci-sysfs.c76
-rw-r--r--drivers/pci/pci.c259
-rw-r--r--drivers/pci/pci.h131
-rw-r--r--drivers/pci/pcie/aer.c51
-rw-r--r--drivers/pci/pcie/aspm.c52
-rw-r--r--drivers/pci/pcie/err.c40
-rw-r--r--drivers/pci/pcie/portdrv.c1
-rw-r--r--drivers/pci/pcie/ptm.c23
-rw-r--r--drivers/pci/probe.c126
-rw-r--r--drivers/pci/pwrctrl/Kconfig15
-rw-r--r--drivers/pci/pwrctrl/Makefile2
-rw-r--r--drivers/pci/pwrctrl/pci-pwrctrl-tc9563.c648
-rw-r--r--drivers/pci/pwrctrl/slot.c12
-rw-r--r--drivers/pci/quirks.c45
-rw-r--r--drivers/pci/rebar.c328
-rw-r--r--drivers/pci/remove.c10
-rw-r--r--drivers/pci/search.c62
-rw-r--r--drivers/pci/setup-bus.c962
-rw-r--r--drivers/pci/setup-res.c120
-rw-r--r--drivers/pci/switch/switchtec.c25
-rw-r--r--drivers/pci/tph.c16
-rw-r--r--drivers/pci/tsm.c900
-rw-r--r--drivers/pci/vgaarb.c31
-rw-r--r--drivers/peci/controller/peci-aspeed.c12
-rw-r--r--drivers/peci/controller/peci-npcm.c1
-rw-r--r--drivers/peci/cpu.c4
-rw-r--r--drivers/perf/Kconfig9
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm-ccn.c2
-rw-r--r--drivers/perf/arm-cmn.c9
-rw-r--r--drivers/perf/arm-ni.c97
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.c52
-rw-r--r--drivers/perf/arm_cspmu/arm_cspmu.h39
-rw-r--r--drivers/perf/arm_cspmu/nvidia_cspmu.c194
-rw-r--r--drivers/perf/arm_pmu.c55
-rw-r--r--drivers/perf/arm_pmu_acpi.c2
-rw-r--r--drivers/perf/arm_pmu_platform.c20
-rw-r--r--drivers/perf/arm_pmuv3.c55
-rw-r--r--drivers/perf/arm_spe_pmu.c164
-rw-r--r--drivers/perf/dwc_pcie_pmu.c161
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c93
-rw-r--r--drivers/perf/fsl_imx9_ddr_perf.c6
-rw-r--r--drivers/perf/fujitsu_uncore_pmu.c613
-rw-r--r--drivers/perf/hisilicon/Makefile3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c557
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_mn_pmu.c411
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_noc_pmu.c443
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c5
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h6
-rw-r--r--drivers/perf/riscv_pmu_sbi.c201
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c38
-rw-r--r--drivers/phy/broadcom/phy-bcm63xx-usbh.c6
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c1
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c1
-rw-r--r--drivers/phy/cadence/cdns-dphy-rx.c3
-rw-r--r--drivers/phy/cadence/cdns-dphy.c154
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8mq-usb.c23
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8qm-hsio.c5
-rw-r--r--drivers/phy/freescale/phy-fsl-lynx-28g.c16
-rw-r--r--drivers/phy/hisilicon/phy-hi6220-usb.c1
-rw-r--r--drivers/phy/hisilicon/phy-histb-combphy.c2
-rw-r--r--drivers/phy/ingenic/phy-ingenic-usb.c8
-rw-r--r--drivers/phy/phy-can-transceiver.c158
-rw-r--r--drivers/phy/phy-core.c27
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ipq806x-usb.c1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-m31-eusb2.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c358
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c181
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v7.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v8_50.h13
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v7.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c159
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h2
-rw-r--r--drivers/phy/renesas/Kconfig7
-rw-r--r--drivers/phy/renesas/Makefile1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-pcie.c2
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c202
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c2
-rw-r--r--drivers/phy/renesas/phy-rzg3e-usb3.c259
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c97
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-csidphy.c67
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c91
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c776
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c70
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-dcphy.c11
-rw-r--r--drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c28
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c51
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usbdp.c3
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c3
-rw-r--r--drivers/phy/samsung/phy-gs101-ufs.c28
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.c40
-rw-r--r--drivers/phy/samsung/phy-samsung-ufs.h7
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c1
-rw-r--r--drivers/phy/sophgo/Kconfig19
-rw-r--r--drivers/phy/sophgo/Makefile2
-rw-r--r--drivers/phy/sophgo/phy-cv1800-usb2.c169
-rw-r--r--drivers/phy/ti/Kconfig2
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c1
-rw-r--r--drivers/phy/ti/phy-dm816x-usb.c1
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/ti/phy-omap-control.c1
-rw-r--r--drivers/phy/ti/phy-omap-usb2.c1
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c1
-rw-r--r--drivers/pinctrl/Kconfig53
-rw-r--r--drivers/pinctrl/Makefile5
-rw-r--r--drivers/pinctrl/bcm/Kconfig12
-rw-r--r--drivers/pinctrl/bcm/Kconfig.stb10
-rw-r--r--drivers/pinctrl/bcm/Makefile2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm6358.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb-bcm2712.c747
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.c442
-rw-r--r--drivers/pinctrl/bcm/pinctrl-brcmstb.h93
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-cs42l43.c23
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c4
-rw-r--r--drivers/pinctrl/cix/Kconfig15
-rw-r--r--drivers/pinctrl/cix/Makefile4
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1-base.c587
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1.c559
-rw-r--r--drivers/pinctrl/cix/pinctrl-sky1.h48
-rw-r--r--drivers/pinctrl/core.c16
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c45
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c68
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c20
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c68
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c37
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c86
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c21
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c43
-rw-r--r--drivers/pinctrl/intel/pinctrl-emmitsburg.c33
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c60
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c36
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h11
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c34
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c26
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c28
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c54
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorpoint.c46
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c26
-rw-r--r--drivers/pinctrl/intel/pinctrl-tangier.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c70
-rw-r--r--drivers/pinctrl/mediatek/Kconfig10
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.c5
-rw-r--r--drivers/pinctrl/mediatek/mtk-eint.h1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-airoha.c2379
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c12
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.h7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt6878.c1478
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7623.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7629.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7986.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7988.c44
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8189.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8196.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt6878.h2248
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c4
-rw-r--r--drivers/pinctrl/meson/pinctrl-amlogic-a4.c6
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-g12a.c8
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c10
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson.c6
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c6
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-ma35.c7
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c187
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c160
-rw-r--r--drivers/pinctrl/nuvoton/pinctrl-wpcm450.c46
-rw-r--r--drivers/pinctrl/nxp/pinctrl-s32cc.c3
-rw-r--r--drivers/pinctrl/pinconf-generic.c71
-rw-r--r--drivers/pinctrl/pinctrl-amd.c41
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c2
-rw-r--r--drivers/pinctrl/pinctrl-aw9523.c6
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c2
-rw-r--r--drivers/pinctrl/pinctrl-eic7700.c2
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.c30
-rw-r--r--drivers/pinctrl/pinctrl-equilibrium.h2
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c53
-rw-r--r--drivers/pinctrl/pinctrl-k210.c2
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c30
-rw-r--r--drivers/pinctrl/pinctrl-max7360.c215
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c40
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c6
-rw-r--r--drivers/pinctrl/pinctrl-mpfs-iomux0.c278
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c4
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c4
-rw-r--r--drivers/pinctrl/pinctrl-pic64gx-gpio2.c356
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c4
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c448
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h4
-rw-r--r--drivers/pinctrl/pinctrl-rp1.c96
-rw-r--r--drivers/pinctrl/pinctrl-scmi.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c15
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c4
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c12
-rw-r--r--drivers/pinctrl/pinctrl-upboard.c1070
-rw-r--r--drivers/pinctrl/pinctrl-zynqmp.c9
-rw-r--r--drivers/pinctrl/pinmux.c70
-rw-r--r--drivers/pinctrl/pinmux.h9
-rw-r--r--drivers/pinctrl/qcom/Kconfig11
-rw-r--r--drivers/pinctrl/qcom/Kconfig.msm18
-rw-r--r--drivers/pinctrl/qcom/Makefile3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-glymur.c1777
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-kaanapali.c1803
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c26
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.h18
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-milos.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c53
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660-lpass-lpi.c160
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c83
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c17
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c2
-rw-r--r--drivers/pinctrl/realtek/Kconfig1
-rw-r--r--drivers/pinctrl/renesas/Kconfig13
-rw-r--r--drivers/pinctrl/renesas/Makefile1
-rw-r--r--drivers/pinctrl/renesas/pfc-emev2.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a73a4.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7778.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77951.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a7796.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77965.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77970.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77980.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77995.c2
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779f0.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c102
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779h0.c7
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7723.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7724.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-sh7734.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c11
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c2
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c349
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzt2h.c813
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzv2m.c12
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c256
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h7
-rw-r--r--drivers/pinctrl/spacemit/pinctrl-k1.c4
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c9
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c2
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32-hdp.c36
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c398
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.h1
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32mp257.c2
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c11
-rw-r--r--drivers/pinctrl/tegra/Kconfig4
-rw-r--r--drivers/pinctrl/tegra/Makefile1
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra186.c1979
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra20.c11
-rw-r--r--drivers/platform/Kconfig4
-rw-r--r--drivers/platform/Makefile2
-rw-r--r--drivers/platform/arm64/Kconfig20
-rw-r--r--drivers/platform/arm64/Makefile1
-rw-r--r--drivers/platform/arm64/lenovo-thinkpad-t14s.c662
-rw-r--r--drivers/platform/chrome/cros_ec.c90
-rw-r--r--drivers/platform/chrome/cros_ec.h3
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c72
-rw-r--r--drivers/platform/chrome/cros_ec_i2c.c9
-rw-r--r--drivers/platform/chrome/cros_ec_ishtp.c7
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c16
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c6
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c15
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c11
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c7
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c6
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c17
-rw-r--r--drivers/platform/chrome/wilco_ec/telemetry.c2
-rw-r--r--drivers/platform/mellanox/mlxbf-pmc.c1
-rw-r--r--drivers/platform/raspberrypi/Kconfig52
-rw-r--r--drivers/platform/raspberrypi/Makefile15
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/TESTING (renamed from drivers/staging/vc04_services/interface/TESTING)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/TODO4
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c)20
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c)4
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c)9
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.c)6
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c)7
-rw-r--r--drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h (renamed from drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h)3
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/Kconfig (renamed from drivers/staging/vc04_services/vchiq-mmal/Kconfig)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/Makefile (renamed from drivers/staging/vc04_services/vchiq-mmal/Makefile)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-common.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h)2
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h)0
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c)7
-rw-r--r--drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h (renamed from drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h)2
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c13
-rw-r--r--drivers/platform/wmi/Kconfig34
-rw-r--r--drivers/platform/wmi/Makefile8
-rw-r--r--drivers/platform/wmi/core.c (renamed from drivers/platform/x86/wmi.c)34
-rw-r--r--drivers/platform/x86/Kconfig87
-rw-r--r--drivers/platform/x86/Makefile9
-rw-r--r--drivers/platform/x86/acer-wmi.c294
-rw-r--r--drivers/platform/x86/amd/hfi/hfi.c11
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c13
-rw-r--r--drivers/platform/x86/amd/hsmp/plat.c4
-rw-r--r--drivers/platform/x86/amd/pmc/pmc-quirks.c40
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.c3
-rw-r--r--drivers/platform/x86/amd/pmc/pmc.h1
-rw-r--r--drivers/platform/x86/amd/pmf/acpi.c87
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c14
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c14
-rw-r--r--drivers/platform/x86/amd/pmf/core.c24
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h100
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c82
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c40
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c110
-rw-r--r--drivers/platform/x86/asus-armoury.c1161
-rw-r--r--drivers/platform/x86/asus-armoury.h1541
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c185
-rw-r--r--drivers/platform/x86/ayaneo-ec.c593
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c104
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c226
-rw-r--r--drivers/platform/x86/dell/dell-lis3lv02d.c1
-rw-r--r--drivers/platform/x86/dell/dell-pc.c9
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c12
-rw-r--r--drivers/platform/x86/dell/dell_rbu.c8
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c4
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c30
-rw-r--r--drivers/platform/x86/huawei-wmi.c4
-rw-r--r--drivers/platform/x86/intel/Kconfig13
-rw-r--r--drivers/platform/x86/intel/Makefile1
-rw-r--r--drivers/platform/x86/intel/chtwc_int33fe.c29
-rw-r--r--drivers/platform/x86/intel/ehl_pse_io.c86
-rw-r--r--drivers/platform/x86/intel/hid.c13
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c5
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c58
-rw-r--r--drivers/platform/x86/intel/int3472/led.c2
-rw-r--r--drivers/platform/x86/intel/pmc/Makefile2
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c16
-rw-r--r--drivers/platform/x86/intel/pmc/core.c320
-rw-r--r--drivers/platform/x86/intel/pmc/core.h43
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c18
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c11
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c36
-rw-r--r--drivers/platform/x86/intel/pmc/ssram_telemetry.c1
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c4
-rw-r--r--drivers/platform/x86/intel/pmc/wcl.c504
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c2
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_mmio.c4
-rw-r--r--drivers/platform/x86/intel/tpmi_power_domains.c2
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h9
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c76
-rw-r--r--drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c4
-rw-r--r--drivers/platform/x86/intel/vsec.c2
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.c218
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.c94
-rw-r--r--drivers/platform/x86/lenovo/think-lmi.h14
-rw-r--r--drivers/platform/x86/lenovo/wmi-capdata01.c2
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.c35
-rw-r--r--drivers/platform/x86/lenovo/yoga-tab2-pro-1380-fastcharger.c5
-rw-r--r--drivers/platform/x86/lg-laptop.c45
-rw-r--r--drivers/platform/x86/meraki-mx100.c404
-rw-r--r--drivers/platform/x86/msi-wmi-platform.c43
-rw-r--r--drivers/platform/x86/oxpec.c121
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c192
-rw-r--r--drivers/platform/x86/portwell-ec.c194
-rw-r--r--drivers/platform/x86/quickstart.c10
-rw-r--r--drivers/platform/x86/redmi-wmi.c130
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c13
-rw-r--r--drivers/platform/x86/uniwill/Kconfig38
-rw-r--r--drivers/platform/x86/uniwill/Makefile8
-rw-r--r--drivers/platform/x86/uniwill/uniwill-acpi.c1912
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.c92
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.h129
-rw-r--r--drivers/platform/x86/x86-android-tablets/Makefile2
-rw-r--r--drivers/platform/x86/x86-android-tablets/acer.c247
-rw-r--r--drivers/platform/x86/x86-android-tablets/asus.c108
-rw-r--r--drivers/platform/x86/x86-android-tablets/core.c121
-rw-r--r--drivers/platform/x86/x86-android-tablets/dmi.c12
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c291
-rw-r--r--drivers/platform/x86/x86-android-tablets/other.c334
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.c34
-rw-r--r--drivers/platform/x86/x86-android-tablets/shared-psy-info.h8
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c4
-rw-r--r--drivers/platform/x86/x86-android-tablets/x86-android-tablets.h28
-rw-r--r--drivers/platform/x86/xiaomi-wmi.c10
-rw-r--r--drivers/pmdomain/Kconfig1
-rw-r--r--drivers/pmdomain/Makefile1
-rw-r--r--drivers/pmdomain/amlogic/meson-secure-pwrc.c95
-rw-r--r--drivers/pmdomain/apple/pmgr-pwrstate.c1
-rw-r--r--drivers/pmdomain/arm/scmi_pm_domain.c13
-rw-r--r--drivers/pmdomain/bcm/bcm2835-power.c17
-rw-r--r--drivers/pmdomain/core.c36
-rw-r--r--drivers/pmdomain/governor.c49
-rw-r--r--drivers/pmdomain/imx/gpc.c3
-rw-r--r--drivers/pmdomain/imx/imx93-blk-ctrl.c23
-rw-r--r--drivers/pmdomain/marvell/Kconfig18
-rw-r--r--drivers/pmdomain/marvell/Makefile3
-rw-r--r--drivers/pmdomain/marvell/pxa1908-power-controller.c274
-rw-r--r--drivers/pmdomain/mediatek/Kconfig17
-rw-r--r--drivers/pmdomain/mediatek/Makefile1
-rw-r--r--drivers/pmdomain/mediatek/airoha-cpu-pmdomain.c8
-rw-r--r--drivers/pmdomain/mediatek/mt6795-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8167-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8173-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8183-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8186-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8188-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8192-pm-domains.h5
-rw-r--r--drivers/pmdomain/mediatek/mt8195-pm-domains.h6
-rw-r--r--drivers/pmdomain/mediatek/mt8196-pm-domains.h625
-rw-r--r--drivers/pmdomain/mediatek/mt8365-pm-domains.h14
-rw-r--r--drivers/pmdomain/mediatek/mtk-mfg-pmdomain.c1044
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.c707
-rw-r--r--drivers/pmdomain/mediatek/mtk-pm-domains.h123
-rw-r--r--drivers/pmdomain/qcom/rpmhpd.c28
-rw-r--r--drivers/pmdomain/qcom/rpmpd.c112
-rw-r--r--drivers/pmdomain/renesas/rcar-gen4-sysc.c1
-rw-r--r--drivers/pmdomain/renesas/rcar-sysc.c3
-rw-r--r--drivers/pmdomain/renesas/rmobile-sysc.c3
-rw-r--r--drivers/pmdomain/rockchip/Kconfig1
-rw-r--r--drivers/pmdomain/rockchip/pm-domains.c43
-rw-r--r--drivers/pmdomain/samsung/exynos-pm-domains.c29
-rw-r--r--drivers/pmdomain/tegra/powergate-bpmp.c1
-rw-r--r--drivers/pmdomain/thead/th1520-pm-domains.c16
-rw-r--r--drivers/pmdomain/ti/ti_sci_pm_domains.c24
-rw-r--r--drivers/pnp/driver.c19
-rw-r--r--drivers/pnp/isapnp/core.c3
-rw-r--r--drivers/power/reset/Kconfig16
-rw-r--r--drivers/power/reset/Makefile2
-rw-r--r--drivers/power/reset/sc27xx-poweroff.c10
-rw-r--r--drivers/power/reset/spacemit-p1-reboot.c88
-rw-r--r--drivers/power/reset/th1520-aon-reboot.c98
-rw-r--r--drivers/power/supply/88pm860x_charger.c8
-rw-r--r--drivers/power/supply/Kconfig58
-rw-r--r--drivers/power/supply/Makefile6
-rw-r--r--drivers/power/supply/ab8500_btemp.c3
-rw-r--r--drivers/power/supply/adc-battery-helper.c327
-rw-r--r--drivers/power/supply/adc-battery-helper.h62
-rw-r--r--drivers/power/supply/apm_power.c3
-rw-r--r--drivers/power/supply/bd71828-power.c1049
-rw-r--r--drivers/power/supply/bq2415x_charger.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq257xx_charger.c755
-rw-r--r--drivers/power/supply/bq27xxx_battery.c21
-rw-r--r--drivers/power/supply/cw2015_battery.c14
-rw-r--r--drivers/power/supply/gpio-charger.c7
-rw-r--r--drivers/power/supply/intel_dc_ti_battery.c391
-rw-r--r--drivers/power/supply/ipaq_micro_battery.c3
-rw-r--r--drivers/power/supply/max17040_battery.c6
-rw-r--r--drivers/power/supply/max77705_charger.c388
-rw-r--r--drivers/power/supply/max77976_charger.c12
-rw-r--r--drivers/power/supply/mt6370-charger.c18
-rw-r--r--drivers/power/supply/pf1550-charger.c641
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/qcom_battmgr.c314
-rw-r--r--drivers/power/supply/rk817_charger.c6
-rw-r--r--drivers/power/supply/rt5033_charger.c2
-rw-r--r--drivers/power/supply/rt9467-charger.c53
-rw-r--r--drivers/power/supply/rt9756.c955
-rw-r--r--drivers/power/supply/rx51_battery.c2
-rw-r--r--drivers/power/supply/sbs-charger.c16
-rw-r--r--drivers/power/supply/sbs-manager.c2
-rw-r--r--drivers/power/supply/ucs1002_power.c2
-rw-r--r--drivers/power/supply/ug3105_battery.c346
-rw-r--r--drivers/power/supply/wm831x_power.c10
-rw-r--r--drivers/powercap/dtpm.c16
-rw-r--r--drivers/powercap/idle_inject.c5
-rw-r--r--drivers/powercap/intel_rapl_common.c39
-rw-r--r--drivers/powercap/intel_rapl_msr.c43
-rw-r--r--drivers/powercap/intel_rapl_tpmi.c2
-rw-r--r--drivers/pps/generators/pps_gen_parport.c3
-rw-r--r--drivers/pps/kapi.c8
-rw-r--r--drivers/pps/pps.c5
-rw-r--r--drivers/ps3/ps3stor_lib.c3
-rw-r--r--drivers/ptp/Kconfig13
-rw-r--r--drivers/ptp/Makefile5
-rw-r--r--drivers/ptp/ptp_chardev.c66
-rw-r--r--drivers/ptp/ptp_clock.c154
-rw-r--r--drivers/ptp/ptp_clockmatrix.c2
-rw-r--r--drivers/ptp/ptp_ines.c31
-rw-r--r--drivers/ptp/ptp_netc.c1043
-rw-r--r--drivers/ptp/ptp_ocp.c73
-rw-r--r--drivers/ptp/ptp_private.h3
-rw-r--r--drivers/ptp/ptp_qoriq.c24
-rw-r--r--drivers/ptp/ptp_qoriq_debugfs.c101
-rw-r--r--drivers/ptp/ptp_sysfs.c2
-rw-r--r--drivers/pwm/Kconfig52
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/core.c116
-rw-r--r--drivers/pwm/pwm-adp5585.c4
-rw-r--r--drivers/pwm/pwm-airoha.c622
-rw-r--r--drivers/pwm/pwm-bcm2835.c28
-rw-r--r--drivers/pwm/pwm-berlin.c4
-rw-r--r--drivers/pwm/pwm-cros-ec.c10
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c35
-rw-r--r--drivers/pwm/pwm-loongson.c2
-rw-r--r--drivers/pwm/pwm-max7360.c209
-rw-r--r--drivers/pwm/pwm-mediatek.c461
-rw-r--r--drivers/pwm/pwm-pca9685.c515
-rw-r--r--drivers/pwm/pwm-rzg2l-gpt.c15
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c154
-rw-r--r--drivers/pwm/pwm_th1520.rs387
-rw-r--r--drivers/rapidio/rio-driver.c2
-rw-r--r--drivers/ras/amd/atl/core.c7
-rw-r--r--drivers/ras/amd/atl/internal.h6
-rw-r--r--drivers/ras/amd/atl/prm.c4
-rw-r--r--drivers/ras/amd/atl/system.c30
-rw-r--r--drivers/ras/amd/atl/umc.c23
-rw-r--r--drivers/ras/cec.c2
-rw-r--r--drivers/ras/ras.c41
-rw-r--r--drivers/regulator/Kconfig111
-rw-r--r--drivers/regulator/Makefile11
-rw-r--r--drivers/regulator/arizona-micsupp.c8
-rw-r--r--drivers/regulator/bd71815-regulator.c8
-rw-r--r--drivers/regulator/bd71828-regulator.c4
-rw-r--r--drivers/regulator/bd718x7-regulator.c8
-rw-r--r--drivers/regulator/bd96801-regulator.c10
-rw-r--r--drivers/regulator/bq257xx-regulator.c186
-rw-r--r--drivers/regulator/core.c173
-rw-r--r--drivers/regulator/fixed.c1
-rw-r--r--drivers/regulator/fp9931.c551
-rw-r--r--drivers/regulator/hi6421-regulator.c10
-rw-r--r--drivers/regulator/hi6421v530-regulator.c4
-rw-r--r--drivers/regulator/hi6421v600-regulator.c6
-rw-r--r--drivers/regulator/irq_helpers.c2
-rw-r--r--drivers/regulator/max77650-regulator.c6
-rw-r--r--drivers/regulator/max77838-regulator.c221
-rw-r--r--drivers/regulator/mt6315-regulator.c6
-rw-r--r--drivers/regulator/mt6316-regulator.c345
-rw-r--r--drivers/regulator/mt6358-regulator.c2
-rw-r--r--drivers/regulator/mt6363-regulator.c938
-rw-r--r--drivers/regulator/of_regulator.c6
-rw-r--r--drivers/regulator/pca9450-regulator.c203
-rw-r--r--drivers/regulator/pf0900-regulator.c975
-rw-r--r--drivers/regulator/pf1550-regulator.c429
-rw-r--r--drivers/regulator/pf530x-regulator.c375
-rw-r--r--drivers/regulator/pf9453-regulator.c42
-rw-r--r--drivers/regulator/qcom-labibb-regulator.c4
-rw-r--r--drivers/regulator/qcom-refgen-regulator.c1
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c1338
-rw-r--r--drivers/regulator/renesas-usb-vbus-regulator.c2
-rw-r--r--drivers/regulator/rt5133-regulator.c642
-rw-r--r--drivers/regulator/rtq2208-regulator.c6
-rw-r--r--drivers/regulator/s2dos05-regulator.c165
-rw-r--r--drivers/regulator/scmi-regulator.c3
-rw-r--r--drivers/regulator/spacemit-p1.c157
-rw-r--r--drivers/regulator/sy7636a-regulator.c27
-rw-r--r--drivers/regulator/tps6524x-regulator.c1
-rw-r--r--drivers/regulator/tps6594-regulator.c2
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c57
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c447
-rw-r--r--drivers/remoteproc/imx_rproc.c637
-rw-r--r--drivers/remoteproc/imx_rproc.h23
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c95
-rw-r--r--drivers/remoteproc/mtk_scp.c65
-rw-r--r--drivers/remoteproc/omap_remoteproc.c3
-rw-r--r--drivers/remoteproc/pru_rproc.c3
-rw-r--r--drivers/remoteproc/qcom_q6v5.c8
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c31
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c71
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c119
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c42
-rw-r--r--drivers/remoteproc/qcom_wcnss.c27
-rw-r--r--drivers/remoteproc/rcar_rproc.c38
-rw-r--r--drivers/remoteproc/remoteproc_core.c31
-rw-r--r--drivers/remoteproc/st_remoteproc.c44
-rw-r--r--drivers/remoteproc/stm32_rproc.c46
-rw-r--r--drivers/remoteproc/ti_k3_common.c49
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c2
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c69
-rw-r--r--drivers/remoteproc/xlnx_r5_remoteproc.c53
-rw-r--r--drivers/resctrl/Kconfig24
-rw-r--r--drivers/resctrl/Makefile4
-rw-r--r--drivers/resctrl/mpam_devices.c2723
-rw-r--r--drivers/resctrl/mpam_internal.h658
-rw-r--r--drivers/resctrl/test_mpam_devices.c389
-rw-r--r--drivers/reset/Kconfig22
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/core.c262
-rw-r--r--drivers/reset/reset-aspeed.c253
-rw-r--r--drivers/reset/reset-bcm6345.c1
-rw-r--r--drivers/reset/reset-eic7700.c429
-rw-r--r--drivers/reset/reset-eyeq.c11
-rw-r--r--drivers/reset/reset-gpio.c19
-rw-r--r--drivers/reset/reset-imx8mp-audiomix.c4
-rw-r--r--drivers/reset/reset-intel-gw.c1
-rw-r--r--drivers/reset/reset-mpfs.c91
-rw-r--r--drivers/reset/reset-qcom-pdc.c1
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c60
-rw-r--r--drivers/reset/reset-th1520.c876
-rw-r--r--drivers/rpmsg/qcom_glink_native.c37
-rw-r--r--drivers/rpmsg/qcom_smd.c4
-rw-r--r--drivers/rpmsg/rpmsg_char.c3
-rw-r--r--drivers/rpmsg/rpmsg_core.c5
-rw-r--r--drivers/rtc/Kconfig48
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/interface.c27
-rw-r--r--drivers/rtc/rtc-amlogic-a4.c14
-rw-r--r--drivers/rtc/rtc-efi.c76
-rw-r--r--drivers/rtc/rtc-isl12022.c1
-rw-r--r--drivers/rtc/rtc-mc13xxx.c13
-rw-r--r--drivers/rtc/rtc-meson.c1
-rw-r--r--drivers/rtc/rtc-nct6694.c297
-rw-r--r--drivers/rtc/rtc-optee.c465
-rw-r--r--drivers/rtc/rtc-pcf2127.c19
-rw-r--r--drivers/rtc/rtc-rx8025.c2
-rw-r--r--drivers/rtc/rtc-s3c.c49
-rw-r--r--drivers/rtc/rtc-s3c.h19
-rw-r--r--drivers/rtc/rtc-sd2405al.c4
-rw-r--r--drivers/rtc/rtc-spacemit-p1.c167
-rw-r--r--drivers/rtc/rtc-x1205.c2
-rw-r--r--drivers/rtc/rtc-zynqmp.c19
-rw-r--r--drivers/s390/block/Kconfig12
-rw-r--r--drivers/s390/block/dasd.c92
-rw-r--r--drivers/s390/block/dasd_devmap.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c19
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/block/dasd_genhd.c80
-rw-r--r--drivers/s390/block/dasd_ioctl.c6
-rw-r--r--drivers/s390/block/dcssblk.c42
-rw-r--r--drivers/s390/block/scm_blk.c3
-rw-r--r--drivers/s390/block/scm_drv.c3
-rw-r--r--drivers/s390/char/Makefile1
-rw-r--r--drivers/s390/char/con3270.c39
-rw-r--r--drivers/s390/char/diag_ftp.c3
-rw-r--r--drivers/s390/char/fs3270.c7
-rw-r--r--drivers/s390/char/hmcdrv_cache.c3
-rw-r--r--drivers/s390/char/hmcdrv_dev.c22
-rw-r--r--drivers/s390/char/hmcdrv_ftp.c3
-rw-r--r--drivers/s390/char/hmcdrv_mod.c3
-rw-r--r--drivers/s390/char/monreader.c3
-rw-r--r--drivers/s390/char/monwriter.c3
-rw-r--r--drivers/s390/char/sclp_ap.c3
-rw-r--r--drivers/s390/char/sclp_cmd.c481
-rw-r--r--drivers/s390/char/sclp_config.c3
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c3
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/s390/char/sclp_early.c3
-rw-r--r--drivers/s390/char/sclp_early_core.c2
-rw-r--r--drivers/s390/char/sclp_ftp.c3
-rw-r--r--drivers/s390/char/sclp_mem.c521
-rw-r--r--drivers/s390/char/sclp_ocf.c3
-rw-r--r--drivers/s390/char/sclp_pci.c3
-rw-r--r--drivers/s390/char/sclp_sd.c6
-rw-r--r--drivers/s390/char/sclp_sdias.c3
-rw-r--r--drivers/s390/char/tape.h21
-rw-r--r--drivers/s390/char/tape_34xx.c31
-rw-r--r--drivers/s390/char/tape_3590.c94
-rw-r--r--drivers/s390/char/tape_char.c142
-rw-r--r--drivers/s390/char/tape_class.c3
-rw-r--r--drivers/s390/char/tape_core.c38
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c83
-rw-r--r--drivers/s390/char/tape_std.h9
-rw-r--r--drivers/s390/char/vmcp.c7
-rw-r--r--drivers/s390/char/vmlogrdr.c3
-rw-r--r--drivers/s390/char/vmur.c3
-rw-r--r--drivers/s390/char/zcore.c3
-rw-r--r--drivers/s390/cio/blacklist.c3
-rw-r--r--drivers/s390/cio/ccwgroup.c6
-rw-r--r--drivers/s390/cio/ccwreq.c3
-rw-r--r--drivers/s390/cio/chp.c5
-rw-r--r--drivers/s390/cio/chsc.c13
-rw-r--r--drivers/s390/cio/chsc_sch.c7
-rw-r--r--drivers/s390/cio/cio.c5
-rw-r--r--drivers/s390/cio/cio_inject.c3
-rw-r--r--drivers/s390/cio/cmf.c5
-rw-r--r--drivers/s390/cio/css.c3
-rw-r--r--drivers/s390/cio/device.c40
-rw-r--r--drivers/s390/cio/device_status.c2
-rw-r--r--drivers/s390/cio/ioasm.c7
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c47
-rw-r--r--drivers/s390/crypto/ap_bus.c196
-rw-r--r--drivers/s390/crypto/ap_bus.h5
-rw-r--r--drivers/s390/crypto/ap_card.c3
-rw-r--r--drivers/s390/crypto/ap_queue.c75
-rw-r--r--drivers/s390/crypto/pkey_api.c3
-rw-r--r--drivers/s390/crypto/pkey_base.c3
-rw-r--r--drivers/s390/crypto/pkey_cca.c3
-rw-r--r--drivers/s390/crypto/pkey_ep11.c3
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c3
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c3
-rw-r--r--drivers/s390/crypto/pkey_uv.c3
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c16
-rw-r--r--drivers/s390/crypto/zcrypt_api.c257
-rw-r--r--drivers/s390/crypto/zcrypt_card.c1
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c3
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c7
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c3
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c3
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c1
-rw-r--r--drivers/s390/net/Kconfig3
-rw-r--r--drivers/s390/net/ctcm_fsms.c17
-rw-r--r--drivers/s390/net/ctcm_main.c3
-rw-r--r--drivers/s390/net/ctcm_mpc.c4
-rw-r--r--drivers/s390/net/ctcm_sysfs.c3
-rw-r--r--drivers/s390/net/ism.h53
-rw-r--r--drivers/s390/net/ism_drv.c576
-rw-r--r--drivers/s390/net/qeth_core_main.c9
-rw-r--r--drivers/s390/net/qeth_core_mpc.c247
-rw-r--r--drivers/s390/net/qeth_core_mpc.h20
-rw-r--r--drivers/s390/net/qeth_core_sys.c3
-rw-r--r--drivers/s390/net/qeth_ethtool.c3
-rw-r--r--drivers/s390/net/qeth_l2_main.c3
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/s390/net/smsgiucv_app.c12
-rw-r--r--drivers/s390/scsi/zfcp_aux.c3
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c3
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c3
-rw-r--r--drivers/s390/scsi/zfcp_fc.c3
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c3
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c3
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c3
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c3
-rw-r--r--drivers/scsi/3w-9xxx.c2
-rw-r--r--drivers/scsi/3w-sas.c2
-rw-r--r--drivers/scsi/3w-xxxx.c2
-rw-r--r--drivers/scsi/BusLogic.c8
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/aacraid/linit.c8
-rw-r--r--drivers/scsi/advansys.c5
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aha1740.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c3
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c1
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c6
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c3
-rw-r--r--drivers/scsi/bfa/bfa_core.c1
-rw-r--r--drivers/scsi/bfa/bfad.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c1
-rw-r--r--drivers/scsi/csiostor/csio_wr.c4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c2
-rw-r--r--drivers/scsi/fdomain.c4
-rw-r--r--drivers/scsi/fnic/fnic_res.c1
-rw-r--r--drivers/scsi/fnic/fnic_trace.c57
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c6
-rw-r--r--drivers/scsi/hosts.c24
-rw-r--r--drivers/scsi/hpsa.c53
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c3
-rw-r--r--drivers/scsi/imm.c2
-rw-r--r--drivers/scsi/initio.c4
-rw-r--r--drivers/scsi/ipr.c17
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/ips.h2
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/isci/task.h10
-rw-r--r--drivers/scsi/libfc/fc_encode.h2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c5
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h56
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c632
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c272
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c6
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h28
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c46
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c100
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h17
-rw-r--r--drivers/scsi/mesh.c1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h38
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h2
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h8
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c13
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c32
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_transport.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c11
-rw-r--r--drivers/scsi/mvsas/mv_init.c2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c2
-rw-r--r--drivers/scsi/mvumi.c2
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c8
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c34
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h5
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c10
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h4
-rw-r--r--drivers/scsi/ppa.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c15
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla1280.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c1777
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h112
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c4
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c13
-rw-r--r--drivers/scsi/qlogicfas408.c2
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/scsi.c12
-rw-r--r--drivers/scsi/scsi_debug.c149
-rw-r--r--drivers/scsi/scsi_error.c7
-rw-r--r--drivers/scsi/scsi_lib.c107
-rw-r--r--drivers/scsi/scsi_logging.c21
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_scan.c74
-rw-r--r--drivers/scsi/scsi_sysfs.c79
-rw-r--r--drivers/scsi/scsi_transport_fc.c5
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsicam.c16
-rw-r--r--drivers/scsi/sd.c112
-rw-r--r--drivers/scsi/sd.h2
-rw-r--r--drivers/scsi/sd_zbc.c20
-rw-r--r--drivers/scsi/sg.c13
-rw-r--r--drivers/scsi/sim710.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c66
-rw-r--r--drivers/scsi/snic/snic_debugfs.c10
-rw-r--r--drivers/scsi/snic/snic_trc.c5
-rw-r--r--drivers/scsi/st.c89
-rw-r--r--drivers/scsi/stex.c4
-rw-r--r--drivers/scsi/storvsc_drv.c102
-rw-r--r--drivers/scsi/wd719x.c2
-rw-r--r--drivers/sh/clk/core.c10
-rw-r--r--drivers/sh/intc/core.c12
-rw-r--r--drivers/siox/siox-bus-gpio.c3
-rw-r--r--drivers/slimbus/Kconfig7
-rw-r--r--drivers/slimbus/Makefile3
-rw-r--r--drivers/slimbus/messaging.c4
-rw-r--r--drivers/slimbus/qcom-ctrl.c735
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c3
-rw-r--r--drivers/soc/amlogic/meson-canvas.c12
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c6
-rw-r--r--drivers/soc/apple/Kconfig3
-rw-r--r--drivers/soc/apple/mailbox.c34
-rw-r--r--drivers/soc/apple/sart.c73
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c14
-rw-r--r--drivers/soc/aspeed/aspeed-p2a-ctrl.c14
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c12
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm.h2
-rw-r--r--drivers/soc/fsl/qbman/qman.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_test_stash.c4
-rw-r--r--drivers/soc/fsl/qe/gpio.c139
-rw-r--r--drivers/soc/hisilicon/kunpeng_hccs.c2
-rw-r--r--drivers/soc/mediatek/mtk-socinfo.c3
-rw-r--r--drivers/soc/mediatek/mtk-svs.c23
-rw-r--r--drivers/soc/microchip/Kconfig12
-rw-r--r--drivers/soc/microchip/Makefile1
-rw-r--r--drivers/soc/microchip/mpfs-control-scb.c38
-rw-r--r--drivers/soc/microchip/mpfs-mss-top-sysreg.c44
-rw-r--r--drivers/soc/qcom/icc-bwmon.c3
-rw-r--r--drivers/soc/qcom/ice.c81
-rw-r--r--drivers/soc/qcom/llcc-qcom.c374
-rw-r--r--drivers/soc/qcom/mdt_loader.c58
-rw-r--r--drivers/soc/qcom/ocmem.c2
-rw-r--r--drivers/soc/qcom/pmic_glink.c9
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c506
-rw-r--r--drivers/soc/qcom/qcom-pbs.c2
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c8
-rw-r--r--drivers/soc/qcom/qcom_pd_mapper.c11
-rw-r--r--drivers/soc/qcom/ramp_controller.c1
-rw-r--r--drivers/soc/qcom/rpm_master_stats.c2
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c7
-rw-r--r--drivers/soc/qcom/smem.c35
-rw-r--r--drivers/soc/qcom/socinfo.c102
-rw-r--r--drivers/soc/qcom/ubwc_config.c38
-rw-r--r--drivers/soc/renesas/Kconfig13
-rw-r--r--drivers/soc/renesas/r9a08g045-sysc.c70
-rw-r--r--drivers/soc/renesas/r9a09g047-sys.c80
-rw-r--r--drivers/soc/renesas/r9a09g056-sys.c69
-rw-r--r--drivers/soc/renesas/r9a09g057-sys.c102
-rw-r--r--drivers/soc/renesas/rcar-rst.c3
-rw-r--r--drivers/soc/renesas/renesas-soc.c16
-rw-r--r--drivers/soc/renesas/rz-sysc.c35
-rw-r--r--drivers/soc/renesas/rz-sysc.h6
-rw-r--r--drivers/soc/rockchip/grf.c50
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-chipid.c18
-rw-r--r--drivers/soc/samsung/exynos-pmu.c411
-rw-r--r--drivers/soc/samsung/exynos-pmu.h37
-rw-r--r--drivers/soc/samsung/gs101-pmu.c446
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c14
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/cbb/tegra194-cbb.c2
-rw-r--r--drivers/soc/tegra/common.c12
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c122
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c63
-rw-r--r--drivers/soc/tegra/pmc.c38
-rw-r--r--drivers/soc/ti/k3-socinfo.c10
-rw-r--r--drivers/soc/ti/knav_dma.c14
-rw-r--r--drivers/soc/ti/pruss.c2
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c8
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c10
-rw-r--r--drivers/soundwire/bus_type.c3
-rw-r--r--drivers/soundwire/debugfs.c2
-rw-r--r--drivers/soundwire/qcom.c5
-rw-r--r--drivers/spi/Kconfig61
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/atmel-quadspi.c134
-rw-r--r--drivers/spi/spi-airoha-snfi.c538
-rw-r--r--drivers/spi/spi-altera-platform.c1
-rw-r--r--drivers/spi/spi-amd-pci.c5
-rw-r--r--drivers/spi/spi-amd.c2
-rw-r--r--drivers/spi/spi-amlogic-spifc-a1.c4
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c1222
-rw-r--r--drivers/spi/spi-amlogic-spisg.c4
-rw-r--r--drivers/spi/spi-apple.c1
-rw-r--r--drivers/spi/spi-aspeed-smc.c747
-rw-r--r--drivers/spi/spi-atmel.c78
-rw-r--r--drivers/spi/spi-axi-spi-engine.c17
-rw-r--r--drivers/spi/spi-bcm2835.c2
-rw-r--r--drivers/spi/spi-bcm63xx.c18
-rw-r--r--drivers/spi/spi-cadence-quadspi.c90
-rw-r--r--drivers/spi/spi-cadence.c106
-rw-r--r--drivers/spi/spi-ch341.c2
-rw-r--r--drivers/spi/spi-cs42l43.c40
-rw-r--r--drivers/spi/spi-davinci.c64
-rw-r--r--drivers/spi/spi-dw-bt1.c4
-rw-r--r--drivers/spi/spi-dw-core.c188
-rw-r--r--drivers/spi/spi-dw-dma.c22
-rw-r--r--drivers/spi/spi-dw-mmio.c13
-rw-r--r--drivers/spi/spi-dw-pci.c8
-rw-r--r--drivers/spi/spi-dw.h12
-rw-r--r--drivers/spi/spi-fsl-dspi.c232
-rw-r--r--drivers/spi/spi-fsl-lpspi.c14
-rw-r--r--drivers/spi/spi-fsl-qspi.c88
-rw-r--r--drivers/spi/spi-geni-qcom.c6
-rw-r--r--drivers/spi/spi-imx.c73
-rw-r--r--drivers/spi/spi-intel-pci.c3
-rw-r--r--drivers/spi/spi-intel.c6
-rw-r--r--drivers/spi/spi-ljca.c2
-rw-r--r--drivers/spi/spi-loopback-test.c12
-rw-r--r--drivers/spi/spi-mem.c5
-rw-r--r--drivers/spi/spi-microchip-core-qspi.c3
-rw-r--r--drivers/spi/spi-microchip-core-spi.c429
-rw-r--r--drivers/spi/spi-mpfs.c (renamed from drivers/spi/spi-microchip-core.c)204
-rw-r--r--drivers/spi/spi-mt65xx.c30
-rw-r--r--drivers/spi/spi-mtk-snfi.c1
-rw-r--r--drivers/spi/spi-mxs.c2
-rw-r--r--drivers/spi/spi-npcm-fiu.c6
-rw-r--r--drivers/spi/spi-nxp-fspi.c151
-rw-r--r--drivers/spi/spi-offload-trigger-adi-util-sigma-delta.c5
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c3
-rw-r--r--drivers/spi/spi-omap2-mcspi.c1
-rw-r--r--drivers/spi/spi-pl022.c13
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-qpic-snand.c60
-rw-r--r--drivers/spi/spi-rb4xx.c36
-rw-r--r--drivers/spi/spi-rockchip-sfc.c12
-rw-r--r--drivers/spi/spi-rpc-if.c12
-rw-r--r--drivers/spi/spi-rzv2h-rspi.c303
-rw-r--r--drivers/spi/spi-s3c64xx.c19
-rw-r--r--drivers/spi/spi-sg2044-nor.c4
-rw-r--r--drivers/spi/spi-sunplus-sp7021.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c174
-rw-r--r--drivers/spi/spi-tle62x0.c2
-rw-r--r--drivers/spi/spi-virtio.c431
-rw-r--r--drivers/spi/spi-xilinx.c2
-rw-r--r--drivers/spi/spi.c97
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c335
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.txt5
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/gpib/uapi/gpib.h104
-rw-r--r--drivers/staging/gpib/uapi/gpib_ioctl.h167
-rw-r--r--drivers/staging/greybus/audio_codec.c16
-rw-r--r--drivers/staging/greybus/audio_helper.c9
-rw-r--r--drivers/staging/greybus/audio_topology.c24
-rw-r--r--drivers/staging/greybus/uart.c8
-rw-r--r--drivers/staging/iio/adc/ad7816.c2
-rw-r--r--drivers/staging/iio/addac/adt7316.c102
-rw-r--r--drivers/staging/iio/frequency/ad9834.c3
-rw-r--r--drivers/staging/iio/frequency/ad9834.h10
-rw-r--r--drivers/staging/media/atomisp/i2c/Kconfig9
-rw-r--r--drivers/staging/media/atomisp/i2c/Makefile1
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-gc2235.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2722.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c9
-rw-r--r--drivers/staging/media/atomisp/pci/runtime/bufq/src/bufq.c4
-rw-r--r--drivers/staging/media/av7110/av7110.c2
-rw-r--r--drivers/staging/media/av7110/av7110_ca.c2
-rw-r--r--drivers/staging/media/av7110/av7110_v4l.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csc-scaler.c28
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c8
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c5
-rw-r--r--drivers/staging/media/ipu3/ipu3.c3
-rw-r--r--drivers/staging/media/ipu3/ipu3.h1
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi-phy.c4
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-csi2.c6
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-queue.c3
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.c35
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-subdev.h1
-rw-r--r--drivers/staging/media/ipu7/ipu7-isys-video.c44
-rw-r--r--drivers/staging/media/ipu7/ipu7.c29
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c29
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c8
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h5
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c5
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_capture.c16
-rw-r--r--drivers/staging/media/sunxi/sun6i-isp/sun6i_isp_params.c6
-rw-r--r--drivers/staging/media/tegra-video/tegra20.c6
-rw-r--r--drivers/staging/most/Kconfig2
-rw-r--r--drivers/staging/most/Makefile1
-rw-r--r--drivers/staging/most/i2c/Kconfig13
-rw-r--r--drivers/staging/most/i2c/Makefile4
-rw-r--r--drivers/staging/most/i2c/i2c.c374
-rw-r--r--drivers/staging/most/video/video.c19
-rw-r--r--drivers/staging/nvec/nvec_ps2.c12
-rw-r--r--drivers/staging/octeon/ethernet-tx.c43
-rw-r--r--drivers/staging/octeon/octeon-stubs.h134
-rw-r--r--drivers/staging/rtl8723bs/Makefile2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c320
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c172
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c38
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_io.c48
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c307
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c210
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c20
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c194
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c313
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c12
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c79
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c65
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com_phycfg.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_intf.c5
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_pwr_seq.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c165
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.h6
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_cmd.c33
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c384
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_halinit.c16
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c5
-rw-r--r--drivers/staging/rtl8723bs/include/basic_types.h44
-rw-r--r--drivers/staging/rtl8723bs/include/drv_types.h9
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com.h2
-rw-r--r--drivers/staging/rtl8723bs/include/hal_com_reg.h4
-rw-r--r--drivers/staging/rtl8723bs/include/hal_intf.h9
-rw-r--r--drivers/staging/rtl8723bs/include/mlme_osdep.h19
-rw-r--r--drivers/staging/rtl8723bs/include/recv_osdep.h40
-rw-r--r--drivers/staging/rtl8723bs/include/rtl8723b_hal.h4
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_efuse.h15
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme.h5
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_mlme_ext.h6
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_recv.h4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c11
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c179
-rw-r--r--drivers/staging/rtl8723bs/os_dep/recv_linux.c225
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c2
-rw-r--r--drivers/staging/sm750fb/sm750.c13
-rw-r--r--drivers/staging/sm750fb/sm750.h6
-rw-r--r--drivers/staging/sm750fb/sm750_accel.c18
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c4
-rw-r--r--drivers/staging/vc04_services/Kconfig49
-rw-r--r--drivers/staging/vc04_services/Makefile14
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c5
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c3
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Kconfig13
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Makefile6
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/TODO17
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c2011
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h142
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c1399
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h112
-rw-r--r--drivers/staging/vc04_services/interface/TODO28
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h164
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h60
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_cfg.h41
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h596
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_debugfs.h22
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c3
-rw-r--r--drivers/target/loopback/tcm_loop.c3
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_configfs.c52
-rw-r--r--drivers/target/target_core_device.c24
-rw-r--r--drivers/target/target_core_fabric_configfs.c2
-rw-r--r--drivers/target/target_core_file.c4
-rw-r--r--drivers/target/target_core_iblock.c9
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/target/target_core_sbc.c51
-rw-r--r--drivers/target/target_core_spc.c49
-rw-r--r--drivers/target/target_core_stat.c268
-rw-r--r--drivers/target/target_core_tpg.c23
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c2
-rw-r--r--drivers/tee/Kconfig9
-rw-r--r--drivers/tee/Makefile2
-rw-r--r--drivers/tee/optee/Kconfig5
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/core.c9
-rw-r--r--drivers/tee/optee/ffa_abi.c146
-rw-r--r--drivers/tee/optee/optee_ffa.h27
-rw-r--r--drivers/tee/optee/optee_msg.h84
-rw-r--r--drivers/tee/optee/optee_private.h15
-rw-r--r--drivers/tee/optee/optee_smc.h37
-rw-r--r--drivers/tee/optee/protmem.c335
-rw-r--r--drivers/tee/optee/smc_abi.c141
-rw-r--r--drivers/tee/qcomtee/Kconfig13
-rw-r--r--drivers/tee/qcomtee/Makefile9
-rw-r--r--drivers/tee/qcomtee/async.c182
-rw-r--r--drivers/tee/qcomtee/call.c820
-rw-r--r--drivers/tee/qcomtee/core.c915
-rw-r--r--drivers/tee/qcomtee/mem_obj.c169
-rw-r--r--drivers/tee/qcomtee/primordial_obj.c113
-rw-r--r--drivers/tee/qcomtee/qcomtee.h185
-rw-r--r--drivers/tee/qcomtee/qcomtee_msg.h304
-rw-r--r--drivers/tee/qcomtee/qcomtee_object.h316
-rw-r--r--drivers/tee/qcomtee/shm.c150
-rw-r--r--drivers/tee/qcomtee/user_obj.c692
-rw-r--r--drivers/tee/tee_core.c342
-rw-r--r--drivers/tee/tee_heap.c500
-rw-r--r--drivers/tee/tee_private.h20
-rw-r--r--drivers/tee/tee_shm.c165
-rw-r--r--drivers/thermal/Kconfig10
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/gov_step_wise.c25
-rw-r--r--drivers/thermal/imx91_thermal.c384
-rw-r--r--drivers/thermal/intel/Kconfig3
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig1
-rw-r--r--drivers/thermal/intel/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/acpi_thermal_rel.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c13
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c20
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h8
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c13
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rapl.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c15
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_soc_slider.c284
-rw-r--r--drivers/thermal/intel/intel_hfi.c12
-rw-r--r--drivers/thermal/k3_j72xx_bandgap.c4
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c2
-rw-r--r--drivers/thermal/qcom/Kconfig3
-rw-r--r--drivers/thermal/qcom/lmh.c4
-rw-r--r--drivers/thermal/renesas/Kconfig21
-rw-r--r--drivers/thermal/renesas/Makefile2
-rw-r--r--drivers/thermal/renesas/rcar_gen3_thermal.c73
-rw-r--r--drivers/thermal/renesas/rcar_thermal.c8
-rw-r--r--drivers/thermal/renesas/rzg3e_thermal.c547
-rw-r--r--drivers/thermal/renesas/rzg3s_thermal.c272
-rw-r--r--drivers/thermal/rockchip_thermal.c50
-rw-r--r--drivers/thermal/tegra/Makefile1
-rw-r--r--drivers/thermal/tegra/soctherm-fuse.c18
-rw-r--r--drivers/thermal/tegra/soctherm.c13
-rw-r--r--drivers/thermal/tegra/soctherm.h11
-rw-r--r--drivers/thermal/tegra/tegra114-soctherm.c209
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c4
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c4
-rw-r--r--drivers/thermal/testing/zone.c31
-rw-r--r--drivers/thermal/thermal-generic-adc.c55
-rw-r--r--drivers/thermal/thermal_hwmon.c2
-rw-r--r--drivers/thunderbolt/Kconfig4
-rw-r--r--drivers/thunderbolt/acpi.c28
-rw-r--r--drivers/thunderbolt/cap.c49
-rw-r--r--drivers/thunderbolt/clx.c12
-rw-r--r--drivers/thunderbolt/ctl.c35
-rw-r--r--drivers/thunderbolt/ctl.h1
-rw-r--r--drivers/thunderbolt/debugfs.c7
-rw-r--r--drivers/thunderbolt/dma_port.c21
-rw-r--r--drivers/thunderbolt/domain.c75
-rw-r--r--drivers/thunderbolt/eeprom.c6
-rw-r--r--drivers/thunderbolt/icm.c8
-rw-r--r--drivers/thunderbolt/lc.c60
-rw-r--r--drivers/thunderbolt/nhi.c24
-rw-r--r--drivers/thunderbolt/nhi.h1
-rw-r--r--drivers/thunderbolt/nhi_regs.h6
-rw-r--r--drivers/thunderbolt/nvm.c42
-rw-r--r--drivers/thunderbolt/path.c14
-rw-r--r--drivers/thunderbolt/property.c38
-rw-r--r--drivers/thunderbolt/retimer.c9
-rw-r--r--drivers/thunderbolt/switch.c146
-rw-r--r--drivers/thunderbolt/tb.c48
-rw-r--r--drivers/thunderbolt/tb.h59
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/tmu.c20
-rw-r--r--drivers/thunderbolt/tunnel.c104
-rw-r--r--drivers/thunderbolt/tunnel.h9
-rw-r--r--drivers/thunderbolt/usb4.c372
-rw-r--r--drivers/thunderbolt/usb4_port.c7
-rw-r--r--drivers/thunderbolt/xdomain.c57
-rw-r--r--drivers/tty/amiserial.c14
-rw-r--r--drivers/tty/hvc/hvc_console.c2
-rw-r--r--drivers/tty/moxa.c169
-rw-r--r--drivers/tty/mxser.c259
-rw-r--r--drivers/tty/n_gsm.c25
-rw-r--r--drivers/tty/n_hdlc.c79
-rw-r--r--drivers/tty/n_tty.c109
-rw-r--r--drivers/tty/pty.c154
-rw-r--r--drivers/tty/serdev/core.c11
-rw-r--r--drivers/tty/serial/8250/8250.h18
-rw-r--r--drivers/tty/serial/8250/8250_core.c97
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/8250/8250_exar.c15
-rw-r--r--drivers/tty/serial/8250/8250_keba.c280
-rw-r--r--drivers/tty/serial/8250/8250_loongson.c238
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c6
-rw-r--r--drivers/tty/serial/8250/8250_of.c2
-rw-r--r--drivers/tty/serial/8250/8250_omap.c181
-rw-r--r--drivers/tty/serial/8250/8250_pci.c48
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c10
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.c7
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.h2
-rw-r--r--drivers/tty/serial/8250/8250_platform.c142
-rw-r--r--drivers/tty/serial/8250/8250_port.c298
-rw-r--r--drivers/tty/serial/8250/8250_rsa.c57
-rw-r--r--drivers/tty/serial/8250/Kconfig40
-rw-r--r--drivers/tty/serial/8250/Makefile4
-rw-r--r--drivers/tty/serial/Kconfig16
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c62
-rw-r--r--drivers/tty/serial/fsl_lpuart.c8
-rw-r--r--drivers/tty/serial/icom.c9
-rw-r--r--drivers/tty/serial/imx.c24
-rw-r--r--drivers/tty/serial/ip22zilog.c352
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c1
-rw-r--r--drivers/tty/serial/kgdboc.c1
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c28
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/mux.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c10
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c157
-rw-r--r--drivers/tty/serial/samsung_tty.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c422
-rw-r--r--drivers/tty/serial/sc16is7xx.h1
-rw-r--r--drivers/tty/serial/sc16is7xx_i2c.c4
-rw-r--r--drivers/tty/serial/sc16is7xx_spi.c4
-rw-r--r--drivers/tty/serial/serial_core.c311
-rw-r--r--drivers/tty/serial/sh-sci.c208
-rw-r--r--drivers/tty/serial/sh-sci.h178
-rw-r--r--drivers/tty/serial/sprd_serial.c6
-rw-r--r--drivers/tty/serial/xilinx_uartps.c15
-rw-r--r--drivers/tty/synclink_gt.c20
-rw-r--r--drivers/tty/sysrq.c3
-rw-r--r--drivers/tty/tty_buffer.c8
-rw-r--r--drivers/tty/tty_port.c168
-rw-r--r--drivers/tty/vt/consolemap.c116
-rw-r--r--drivers/tty/vt/keyboard.c318
-rw-r--r--drivers/tty/vt/selection.c29
-rw-r--r--drivers/tty/vt/vc_screen.c74
-rw-r--r--drivers/tty/vt/vt.c251
-rw-r--r--drivers/tty/vt/vt_ioctl.c194
-rw-r--r--drivers/ufs/core/Makefile1
-rw-r--r--drivers/ufs/core/ufs-mcq.c77
-rw-r--r--drivers/ufs/core/ufs-rpmb.c254
-rw-r--r--drivers/ufs/core/ufs-sysfs.c5
-rw-r--r--drivers/ufs/core/ufs_bsg.c2
-rw-r--r--drivers/ufs/core/ufs_trace.h2
-rw-r--r--drivers/ufs/core/ufs_trace_types.h23
-rw-r--r--drivers/ufs/core/ufshcd-crypto.h18
-rw-r--r--drivers/ufs/core/ufshcd-priv.h54
-rw-r--r--drivers/ufs/core/ufshcd.c1029
-rw-r--r--drivers/ufs/host/Kconfig13
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ti-j721e-ufs.c37
-rw-r--r--drivers/ufs/host/ufs-amd-versal2.c564
-rw-r--r--drivers/ufs/host/ufs-exynos.c10
-rw-r--r--drivers/ufs/host/ufs-mediatek.c456
-rw-r--r--drivers/ufs/host/ufs-mediatek.h5
-rw-r--r--drivers/ufs/host/ufs-qcom.c244
-rw-r--r--drivers/ufs/host/ufs-qcom.h28
-rw-r--r--drivers/ufs/host/ufs-rockchip.c20
-rw-r--r--drivers/ufs/host/ufshcd-dwc.h46
-rw-r--r--drivers/ufs/host/ufshcd-pci.c70
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c33
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.h1
-rw-r--r--drivers/uio/Kconfig14
-rw-r--r--drivers/uio/Makefile1
-rw-r--r--drivers/uio/uio_aec.c2
-rw-r--r--drivers/uio/uio_cif.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c23
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c7
-rw-r--r--drivers/uio/uio_hv_generic.c7
-rw-r--r--drivers/uio/uio_netx.c2
-rw-r--r--drivers/uio/uio_pci_generic_sva.c192
-rw-r--r--drivers/uio/uio_pdrv_genirq.c24
-rw-r--r--drivers/uio/uio_sercos3.c2
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c1
-rw-r--r--drivers/usb/cdns3/cdns3-pci-wrap.c5
-rw-r--r--drivers/usb/cdns3/cdns3-trace.h61
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c9
-rw-r--r--drivers/usb/cdns3/cdnsp-pci.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-trace.h25
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c11
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/otg_fsm.c1
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c12
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/usblp.c3
-rw-r--r--drivers/usb/class/usbtmc.c12
-rw-r--r--drivers/usb/core/Makefile6
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/driver.c58
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/core/hub.c43
-rw-r--r--drivers/usb/core/message.c2
-rw-r--r--drivers/usb/core/offload.c136
-rw-r--r--drivers/usb/core/quirks.c2
-rw-r--r--drivers/usb/core/trace.c6
-rw-r--r--drivers/usb/core/trace.h61
-rw-r--r--drivers/usb/core/urb.c14
-rw-r--r--drivers/usb/core/usb.c53
-rw-r--r--drivers/usb/dwc2/params.c26
-rw-r--r--drivers/usb/dwc2/platform.c17
-rw-r--r--drivers/usb/dwc3/Kconfig22
-rw-r--r--drivers/usb/dwc3/Makefile2
-rw-r--r--drivers/usb/dwc3/core.c39
-rw-r--r--drivers/usb/dwc3/core.h26
-rw-r--r--drivers/usb/dwc3/debug.h18
-rw-r--r--drivers/usb/dwc3/debugfs.c12
-rw-r--r--drivers/usb/dwc3/drd.c2
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c1
-rw-r--r--drivers/usb/dwc3/dwc3-apple.c489
-rw-r--r--drivers/usb/dwc3/dwc3-generic-plat.c233
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c81
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c175
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c1
-rw-r--r--drivers/usb/dwc3/ep0.c1
-rw-r--r--drivers/usb/dwc3/gadget.c12
-rw-r--r--drivers/usb/dwc3/glue.h157
-rw-r--r--drivers/usb/dwc3/host.c7
-rw-r--r--drivers/usb/dwc3/trace.h17
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_acm.c42
-rw-r--r--drivers/usb/gadget/function/f_ecm.c48
-rw-r--r--drivers/usb/gadget/function/f_eem.c7
-rw-r--r--drivers/usb/gadget/function/f_fs.c158
-rw-r--r--drivers/usb/gadget/function/f_hid.c7
-rw-r--r--drivers/usb/gadget/function/f_ncm.c81
-rw-r--r--drivers/usb/gadget/function/f_rndis.c85
-rw-r--r--drivers/usb/gadget/function/uvc.h5
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c8
-rw-r--r--drivers/usb/gadget/legacy/inode.c51
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c3
-rw-r--r--drivers/usb/gadget/legacy/zero.c27
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-gadget.c1
-rw-r--r--drivers/usb/gadget/udc/cdns2/cdns2-trace.h69
-rw-r--r--drivers/usb/gadget/udc/core.c21
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c4
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c9
-rw-r--r--drivers/usb/gadget/udc/trace.h5
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-platform.c40
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c17
-rw-r--r--drivers/usb/host/ohci-platform.c24
-rw-r--r--drivers/usb/host/ohci-s3c2410.c8
-rw-r--r--drivers/usb/host/sl811-hcd.c1
-rw-r--r--drivers/usb/host/uhci-hcd.h1
-rw-r--r--drivers/usb/host/uhci-platform.c28
-rw-r--r--drivers/usb/host/xen-hcd.c4
-rw-r--r--drivers/usb/host/xhci-caps.h165
-rw-r--r--drivers/usb/host/xhci-dbgcap.c23
-rw-r--r--drivers/usb/host/xhci-dbgcap.h1
-rw-r--r--drivers/usb/host/xhci-dbgtty.c23
-rw-r--r--drivers/usb/host/xhci-debugfs.c57
-rw-r--r--drivers/usb/host/xhci-hub.c125
-rw-r--r--drivers/usb/host/xhci-mem.c120
-rw-r--r--drivers/usb/host/xhci-mtk.c1
-rw-r--r--drivers/usb/host/xhci-mtk.h10
-rw-r--r--drivers/usb/host/xhci-pci.c49
-rw-r--r--drivers/usb/host/xhci-plat.c57
-rw-r--r--drivers/usb/host/xhci-plat.h2
-rw-r--r--drivers/usb/host/xhci-port.h5
-rw-r--r--drivers/usb/host/xhci-rcar-regs.h49
-rw-r--r--drivers/usb/host/xhci-rcar.c100
-rw-r--r--drivers/usb/host/xhci-ring.c287
-rw-r--r--drivers/usb/host/xhci-rzg3e-regs.h12
-rw-r--r--drivers/usb/host/xhci-sideband.c124
-rw-r--r--drivers/usb/host/xhci-tegra.c97
-rw-r--r--drivers/usb/host/xhci-trace.h59
-rw-r--r--drivers/usb/host/xhci.c109
-rw-r--r--drivers/usb/host/xhci.h121
-rw-r--r--drivers/usb/misc/Kconfig20
-rw-r--r--drivers/usb/misc/Makefile1
-rw-r--r--drivers/usb/misc/apple-mfi-fastcharge.c1
-rw-r--r--drivers/usb/misc/chaoskey.c16
-rw-r--r--drivers/usb/misc/qcom_eud.c36
-rw-r--r--drivers/usb/misc/usb-ljca.c39
-rw-r--r--drivers/usb/misc/usb251xb.c108
-rw-r--r--drivers/usb/misc/usbio.c749
-rw-r--r--drivers/usb/mon/mon_bin.c14
-rw-r--r--drivers/usb/mtu3/mtu3.h34
-rw-r--r--drivers/usb/mtu3/mtu3_core.c2
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c1
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c2
-rw-r--r--drivers/usb/musb/musb_core.c5
-rw-r--r--drivers/usb/musb/musb_debugfs.c5
-rw-r--r--drivers/usb/musb/musb_dsps.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/phy/phy.c4
-rw-r--r--drivers/usb/renesas_usbhs/common.c51
-rw-r--r--drivers/usb/serial/belkin_sa.c42
-rw-r--r--drivers/usb/serial/ftdi_sio.c201
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/kobil_sct.c210
-rw-r--r--drivers/usb/serial/option.c48
-rw-r--r--drivers/usb/serial/oti6858.c2
-rw-r--r--drivers/usb/storage/protocol.c3
-rw-r--r--drivers/usb/storage/realtek_cr.c4
-rw-r--r--drivers/usb/storage/sddr55.c6
-rw-r--r--drivers/usb/storage/transport.c16
-rw-r--r--drivers/usb/storage/uas.c30
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/anx7411.c3
-rw-r--r--drivers/usb/typec/class.c13
-rw-r--r--drivers/usb/typec/hd3ss3220.c75
-rw-r--r--drivers/usb/typec/mux/ps883x.c135
-rw-r--r--drivers/usb/typec/mux/tusb1046.c2
-rw-r--r--drivers/usb/typec/pd.c95
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c33
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c19
-rw-r--r--drivers/usb/typec/tipd/core.c564
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h5
-rw-r--r--drivers/usb/typec/tipd/trace.h39
-rw-r--r--drivers/usb/typec/ucsi/cros_ec_ucsi.c5
-rw-r--r--drivers/usb/typec/ucsi/debugfs.c68
-rw-r--r--drivers/usb/typec/ucsi/displayport.c11
-rw-r--r--drivers/usb/typec/ucsi/psy.c31
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c173
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h43
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c11
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c88
-rw-r--r--drivers/usb/typec/ucsi/ucsi_huawei_gaokun.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c15
-rw-r--r--drivers/usb/usbip/stub_tx.c9
-rw-r--r--drivers/usb/usbip/vhci_hcd.c118
-rw-r--r--drivers/vdpa/Kconfig8
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c5
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c5
-rw-r--r--drivers/vdpa/mlx5/core/mr.c4
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c23
-rw-r--r--drivers/vdpa/octeon_ep/octep_vdpa_main.c7
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c7
-rw-r--r--drivers/vdpa/solidrun/snet_main.c8
-rw-r--r--drivers/vdpa/vdpa.c5
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c4
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c134
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h7
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c82
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c5
-rw-r--r--drivers/vfio/cdx/Makefile6
-rw-r--r--drivers/vfio/cdx/main.c29
-rw-r--r--drivers/vfio/cdx/private.h14
-rw-r--r--drivers/vfio/debugfs.c19
-rw-r--r--drivers/vfio/device_cdev.c2
-rw-r--r--drivers/vfio/fsl-mc/Kconfig5
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c45
-rw-r--r--drivers/vfio/group.c28
-rw-r--r--drivers/vfio/pci/Kconfig5
-rw-r--r--drivers/vfio/pci/Makefile3
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c177
-rw-r--r--drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.h23
-rw-r--r--drivers/vfio/pci/mlx5/main.c1
-rw-r--r--drivers/vfio/pci/nvgrace-gpu/main.c346
-rw-r--r--drivers/vfio/pci/pds/dirty.c2
-rw-r--r--drivers/vfio/pci/pds/lm.c3
-rw-r--r--drivers/vfio/pci/pds/vfio_dev.c1
-rw-r--r--drivers/vfio/pci/qat/main.c1
-rw-r--r--drivers/vfio/pci/vfio_pci.c6
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c23
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c300
-rw-r--r--drivers/vfio/pci/vfio_pci_dmabuf.c350
-rw-r--r--drivers/vfio/pci/vfio_pci_intrs.c61
-rw-r--r--drivers/vfio/pci/vfio_pci_priv.h28
-rw-r--r--drivers/vfio/pci/virtio/common.h5
-rw-r--r--drivers/vfio/pci/virtio/legacy_io.c38
-rw-r--r--drivers/vfio/pci/virtio/main.c5
-rw-r--r--drivers/vfio/pci/virtio/migrate.c3
-rw-r--r--drivers/vfio/pci/xe/Kconfig12
-rw-r--r--drivers/vfio/pci/xe/Makefile3
-rw-r--r--drivers/vfio/pci/xe/main.c573
-rw-r--r--drivers/vfio/platform/Kconfig5
-rw-r--r--drivers/vfio/platform/reset/Kconfig6
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_amdxgbe.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_bcmflexrm.c2
-rw-r--r--drivers/vfio/platform/reset/vfio_platform_calxedaxgmac.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c3
-rw-r--r--drivers/vfio/platform/vfio_platform.c1
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c40
-rw-r--r--drivers/vfio/platform/vfio_platform_private.h3
-rw-r--r--drivers/vfio/vfio_iommu_type1.c285
-rw-r--r--drivers/vfio/vfio_main.c73
-rw-r--r--drivers/vhost/net.c122
-rw-r--r--drivers/vhost/scsi.c11
-rw-r--r--drivers/vhost/test.c10
-rw-r--r--drivers/vhost/vdpa.c6
-rw-r--r--drivers/vhost/vhost.c80
-rw-r--r--drivers/vhost/vhost.h52
-rw-r--r--drivers/vhost/vringh.c14
-rw-r--r--drivers/vhost/vsock.c10
-rw-r--r--drivers/video/backlight/Kconfig9
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/apple_dwi_bl.c1
-rw-r--r--drivers/video/backlight/as3711_bl.c1
-rw-r--r--drivers/video/backlight/aw99706.c471
-rw-r--r--drivers/video/backlight/backlight.c1
-rw-r--r--drivers/video/backlight/da9052_bl.c1
-rw-r--r--drivers/video/backlight/jornada720_bl.c1
-rw-r--r--drivers/video/backlight/ktd2801-backlight.c1
-rw-r--r--drivers/video/backlight/led_bl.c18
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/mp3309c.c14
-rw-r--r--drivers/video/backlight/rave-sp-backlight.c2
-rw-r--r--drivers/video/backlight/rt4831-backlight.c1
-rw-r--r--drivers/video/fbdev/Kconfig23
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c8
-rw-r--r--drivers/video/fbdev/core/Kconfig2
-rw-r--r--drivers/video/fbdev/core/bitblit.c155
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c2
-rw-r--r--drivers/video/fbdev/core/fb_fillrect.h3
-rw-r--r--drivers/video/fbdev/core/fbcon.c509
-rw-r--r--drivers/video/fbdev/core/fbcon.h17
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c151
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c151
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.c47
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h18
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c167
-rw-r--r--drivers/video/fbdev/core/fbmem.c1
-rw-r--r--drivers/video/fbdev/core/fbmon.c7
-rw-r--r--drivers/video/fbdev/core/softcursor.c18
-rw-r--r--drivers/video/fbdev/core/tileblit.c32
-rw-r--r--drivers/video/fbdev/gbefb.c5
-rw-r--r--drivers/video/fbdev/gxt4500.c2
-rw-r--r--drivers/video/fbdev/hyperv_fb.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c46
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c3
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/video/fbdev/pxafb.c15
-rw-r--r--drivers/video/fbdev/s3fb.c177
-rw-r--r--drivers/video/fbdev/simplefb.c37
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/tcx.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c4
-rw-r--r--drivers/video/fbdev/valkyriefb.c2
-rw-r--r--drivers/video/fbdev/vesafb.c29
-rw-r--r--drivers/video/fbdev/vga16fb.c21
-rw-r--r--drivers/video/fbdev/xen-fbfront.c2
-rw-r--r--drivers/video/screen_info_generic.c55
-rw-r--r--drivers/virt/Kconfig4
-rw-r--r--drivers/virt/coco/Kconfig5
-rw-r--r--drivers/virt/coco/Makefile1
-rw-r--r--drivers/virt/coco/efi_secret/Kconfig2
-rw-r--r--drivers/virt/coco/tsm-core.c163
-rw-r--r--drivers/virtio/virtio.c12
-rw-r--r--drivers/virtio/virtio_balloon.c15
-rw-r--r--drivers/virtio/virtio_debug.c10
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c6
-rw-r--r--drivers/virtio/virtio_ring.c464
-rw-r--r--drivers/virtio/virtio_vdpa.c24
-rw-r--r--drivers/w1/masters/matrox_w1.c10
-rw-r--r--drivers/w1/masters/omap_hdq.c5
-rw-r--r--drivers/w1/slaves/w1_ds28e17.c4
-rw-r--r--drivers/w1/w1.c20
-rw-r--r--drivers/watchdog/Kconfig23
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/aspeed_wdt.c30
-rw-r--r--drivers/watchdog/diag288_wdt.c9
-rw-r--r--drivers/watchdog/intel_oc_wdt.c8
-rw-r--r--drivers/watchdog/loongson1_wdt.c89
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/nct6694_wdt.c307
-rw-r--r--drivers/watchdog/renesas_wwdt.c163
-rw-r--r--drivers/watchdog/rzg2l_wdt.c4
-rw-r--r--drivers/watchdog/rzv2h_wdt.c150
-rw-r--r--drivers/watchdog/s3c2410_wdt.c46
-rw-r--r--drivers/watchdog/starfive-wdt.c4
-rw-r--r--drivers/watchdog/via_wdt.c1
-rw-r--r--drivers/watchdog/visconti_wdt.c5
-rw-r--r--drivers/watchdog/wdat_wdt.c64
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/events/events_base.c37
-rw-r--r--drivers/xen/gntdev-dmabuf.c7
-rw-r--r--drivers/xen/gntdev-dmabuf.h2
-rw-r--r--drivers/xen/gntdev.c38
-rw-r--r--drivers/xen/grant-dma-ops.c20
-rw-r--r--drivers/xen/grant-table.c8
-rw-r--r--drivers/xen/manage.c14
-rw-r--r--drivers/xen/privcmd.c14
-rw-r--r--drivers/xen/pvcalls-back.c4
-rw-r--r--drivers/xen/swiotlb-xen.c44
-rw-r--r--drivers/xen/unpopulated-alloc.c4
-rw-r--r--drivers/xen/xen-acpi-processor.c12
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c20
-rw-r--r--drivers/xen/xenfs/super.c2
-rw-r--r--drivers/zorro/names.c12
8880 files changed, 561703 insertions, 171565 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 4915a63866b0..c0f1fb893ec0 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -161,6 +161,8 @@ source "drivers/greybus/Kconfig"
source "drivers/comedi/Kconfig"
+source "drivers/gpib/Kconfig"
+
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
@@ -251,4 +253,6 @@ source "drivers/hte/Kconfig"
source "drivers/cdx/Kconfig"
+source "drivers/resctrl/Kconfig"
+
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index b5749cf67044..ccc05f1eae3e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -150,6 +150,7 @@ obj-$(CONFIG_VHOST_IOTLB) += vhost/
obj-$(CONFIG_VHOST) += vhost/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_COMEDI) += comedi/
+obj-$(CONFIG_GPIB) += gpib/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
@@ -160,8 +161,8 @@ obj-$(CONFIG_RPMSG) += rpmsg/
obj-$(CONFIG_SOUNDWIRE) += soundwire/
# Virtualization drivers
-obj-$(CONFIG_VIRT_DRIVERS) += virt/
-obj-$(subst m,y,$(CONFIG_HYPERV)) += hv/
+obj-y += virt/
+obj-$(CONFIG_HYPERV) += hv/
obj-$(CONFIG_PM_DEVFREQ) += devfreq/
obj-$(CONFIG_EXTCON) += extcon/
@@ -194,5 +195,7 @@ obj-$(CONFIG_HTE) += hte/
obj-$(CONFIG_DRM_ACCEL) += accel/
obj-$(CONFIG_CDX_BUS) += cdx/
obj-$(CONFIG_DPLL) += dpll/
+obj-y += resctrl/
+obj-$(CONFIG_DIBS) += dibs/
obj-$(CONFIG_S390) += s390/
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index 5b9490367a39..bdf48ccafcf2 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -25,8 +25,10 @@ menuconfig DRM_ACCEL
and debugfs).
source "drivers/accel/amdxdna/Kconfig"
+source "drivers/accel/ethosu/Kconfig"
source "drivers/accel/habanalabs/Kconfig"
source "drivers/accel/ivpu/Kconfig"
source "drivers/accel/qaic/Kconfig"
+source "drivers/accel/rocket/Kconfig"
endif
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
index a301fb6089d4..1d3a7251b950 100644
--- a/drivers/accel/Makefile
+++ b/drivers/accel/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_DRM_ACCEL_AMDXDNA) += amdxdna/
+obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) += ethosu/
obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/
obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/
obj-$(CONFIG_DRM_ACCEL_QAIC) += qaic/
+obj-$(CONFIG_DRM_ACCEL_ROCKET) += rocket/ \ No newline at end of file
diff --git a/drivers/accel/amdxdna/Makefile b/drivers/accel/amdxdna/Makefile
index 0e9adf6890a0..6344aaf523fa 100644
--- a/drivers/accel/amdxdna/Makefile
+++ b/drivers/accel/amdxdna/Makefile
@@ -14,7 +14,9 @@ amdxdna-y := \
amdxdna_mailbox.o \
amdxdna_mailbox_helper.o \
amdxdna_pci_drv.o \
+ amdxdna_pm.o \
amdxdna_sysfs.o \
+ amdxdna_ubuf.o \
npu1_regs.o \
npu2_regs.o \
npu4_regs.o \
diff --git a/drivers/accel/amdxdna/TODO b/drivers/accel/amdxdna/TODO
index ad8ac6e315b6..0e4bbebeaedf 100644
--- a/drivers/accel/amdxdna/TODO
+++ b/drivers/accel/amdxdna/TODO
@@ -1,2 +1 @@
- Add debugfs support
-- Add debug BO support
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index 2cff5419bd2f..42d876a427c5 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -21,6 +21,7 @@
#include "amdxdna_gem.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
static bool force_cmdlist;
module_param(force_cmdlist, bool, 0600);
@@ -46,6 +47,17 @@ static void aie2_job_put(struct amdxdna_sched_job *job)
kref_put(&job->refcnt, aie2_job_release);
}
+static void aie2_hwctx_status_shift_stop(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->old_status = hwctx->status;
+ hwctx->status = HWCTX_STAT_STOP;
+}
+
+static void aie2_hwctx_status_restore(struct amdxdna_hwctx *hwctx)
+{
+ hwctx->status = hwctx->old_status;
+}
+
/* The bad_job is used in aie2_sched_job_timedout, otherwise, set it to NULL */
static void aie2_hwctx_stop(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hwctx,
struct drm_sched_job *bad_job)
@@ -77,7 +89,7 @@ static int aie2_hwctx_restart(struct amdxdna_dev *xdna, struct amdxdna_hwctx *hw
goto out;
}
- ret = aie2_config_cu(hwctx);
+ ret = aie2_config_cu(hwctx, NULL);
if (ret) {
XDNA_ERR(xdna, "Config cu failed, ret %d", ret);
goto out;
@@ -89,25 +101,6 @@ out:
return ret;
}
-void aie2_restart_ctx(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
- if (hwctx->status != HWCTX_STAT_STOP)
- continue;
-
- hwctx->status = hwctx->old_status;
- XDNA_DBG(xdna, "Resetting %s", hwctx->name);
- aie2_hwctx_restart(xdna, hwctx);
- }
- mutex_unlock(&client->hwctx_lock);
-}
-
static struct dma_fence *aie2_cmd_get_out_fence(struct amdxdna_hwctx *hwctx, u64 seq)
{
struct dma_fence *fence, *out_fence = NULL;
@@ -141,34 +134,46 @@ static void aie2_hwctx_wait_for_idle(struct amdxdna_hwctx *hwctx)
dma_fence_put(fence);
}
-void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx)
+static int aie2_hwctx_suspend_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
+ aie2_hwctx_wait_for_idle(hwctx);
+ aie2_hwctx_stop(xdna, hwctx, NULL);
+ aie2_hwctx_status_shift_stop(hwctx);
+
+ return 0;
+}
+
+void aie2_hwctx_suspend(struct amdxdna_client *client)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+
/*
* Command timeout is unlikely. But if it happens, it doesn't
* break the system. aie2_hwctx_stop() will destroy mailbox
* and abort all commands.
*/
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- aie2_hwctx_wait_for_idle(hwctx);
- aie2_hwctx_stop(xdna, hwctx, NULL);
- hwctx->old_status = hwctx->status;
- hwctx->status = HWCTX_STAT_STOP;
+ amdxdna_hwctx_walk(client, NULL, aie2_hwctx_suspend_cb);
}
-void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx)
+static int aie2_hwctx_resume_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
+ aie2_hwctx_status_restore(hwctx);
+ return aie2_hwctx_restart(xdna, hwctx);
+}
+
+int aie2_hwctx_resume(struct amdxdna_client *client)
+{
/*
* The resume path cannot guarantee that mailbox channel can be
* regenerated. If this happen, when submit message to this
* mailbox channel, error will return.
*/
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- hwctx->status = hwctx->old_status;
- aie2_hwctx_restart(xdna, hwctx);
+ return amdxdna_hwctx_walk(client, NULL, aie2_hwctx_resume_cb);
}
static void
@@ -177,12 +182,13 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
struct dma_fence *fence = job->fence;
trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
+
+ amdxdna_pm_suspend_put(job->hwctx->client->xdna);
job->hwctx->priv->completed++;
dma_fence_signal(fence);
up(&job->hwctx->priv->job_sem);
job->job_done = true;
- dma_fence_put(fence);
mmput_async(job->mm);
aie2_job_put(job);
}
@@ -192,15 +198,18 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
struct amdxdna_gem_obj *cmd_abo;
- u32 ret = 0;
+ int ret = 0;
u32 status;
cmd_abo = job->cmd_bo;
- if (unlikely(!data))
+ if (unlikely(job->job_timeout)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
+ ret = -EINVAL;
goto out;
+ }
- if (unlikely(size != sizeof(u32))) {
+ if (unlikely(!data) || unlikely(size != sizeof(u32))) {
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
ret = -EINVAL;
goto out;
@@ -219,11 +228,10 @@ out:
}
static int
-aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
+aie2_sched_drvcmd_resp_handler(void *handle, void __iomem *data, size_t size)
{
struct amdxdna_sched_job *job = handle;
- u32 ret = 0;
- u32 status;
+ int ret = 0;
if (unlikely(!data))
goto out;
@@ -233,8 +241,7 @@ aie2_sched_nocmd_resp_handler(void *handle, void __iomem *data, size_t size)
goto out;
}
- status = readl(data);
- XDNA_DBG(job->hwctx->client->xdna, "Resp status 0x%x", status);
+ job->drv_cmd->result = readl(data);
out:
aie2_sched_notify(job);
@@ -250,9 +257,16 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
u32 fail_cmd_status;
u32 fail_cmd_idx;
u32 cmd_status;
- u32 ret = 0;
+ int ret = 0;
cmd_abo = job->cmd_bo;
+
+ if (unlikely(job->job_timeout)) {
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
ret = -EINVAL;
@@ -307,8 +321,18 @@ aie2_sched_job_run(struct drm_sched_job *sched_job)
kref_get(&job->refcnt);
fence = dma_fence_get(job->fence);
- if (unlikely(!cmd_abo)) {
- ret = aie2_sync_bo(hwctx, job, aie2_sched_nocmd_resp_handler);
+ if (job->drv_cmd) {
+ switch (job->drv_cmd->opcode) {
+ case SYNC_DEBUG_BO:
+ ret = aie2_sync_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
+ break;
+ case ATTACH_DEBUG_BO:
+ ret = aie2_config_debug_bo(hwctx, job, aie2_sched_drvcmd_resp_handler);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
goto out;
}
@@ -355,6 +379,7 @@ aie2_sched_job_timedout(struct drm_sched_job *sched_job)
xdna = hwctx->client->xdna;
trace_xdna_job(sched_job, hwctx->name, "job timedout", job->seq);
+ job->job_timeout = true;
mutex_lock(&xdna->dev_lock);
aie2_hwctx_stop(xdna, hwctx, sched_job);
@@ -524,13 +549,12 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = HWCTX_MAX_CMDS,
.timeout = msecs_to_jiffies(HWCTX_MAX_TIMEOUT),
- .name = hwctx->name,
+ .name = "amdxdna_js",
.dev = xdna->ddev.dev,
};
struct drm_gpu_scheduler *sched;
struct amdxdna_hwctx_priv *priv;
struct amdxdna_gem_obj *heap;
- struct amdxdna_dev_hdl *ndev;
int i, ret;
priv = kzalloc(sizeof(*hwctx->priv), GFP_KERNEL);
@@ -603,10 +627,14 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
goto free_entity;
}
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto free_col_list;
+
ret = aie2_alloc_resource(hwctx);
if (ret) {
XDNA_ERR(xdna, "Alloc hw resource failed, ret %d", ret);
- goto free_col_list;
+ goto suspend_put;
}
ret = aie2_map_host_buf(xdna->dev_handle, hwctx->fw_ctx_id,
@@ -621,10 +649,9 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
XDNA_ERR(xdna, "Create syncobj failed, ret %d", ret);
goto release_resource;
}
+ amdxdna_pm_suspend_put(xdna);
hwctx->status = HWCTX_STAT_INIT;
- ndev = xdna->dev_handle;
- ndev->hwctx_num++;
init_waitqueue_head(&priv->job_free_wq);
XDNA_DBG(xdna, "hwctx %s init completed", hwctx->name);
@@ -633,6 +660,8 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
release_resource:
aie2_release_resource(hwctx);
+suspend_put:
+ amdxdna_pm_suspend_put(xdna);
free_col_list:
kfree(hwctx->col_list);
free_entity:
@@ -655,26 +684,25 @@ free_priv:
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
{
- struct amdxdna_dev_hdl *ndev;
struct amdxdna_dev *xdna;
int idx;
xdna = hwctx->client->xdna;
- ndev = xdna->dev_handle;
- ndev->hwctx_num--;
XDNA_DBG(xdna, "%s sequence number %lld", hwctx->name, hwctx->priv->seq);
- drm_sched_entity_destroy(&hwctx->priv->entity);
-
aie2_hwctx_wait_for_idle(hwctx);
/* Request fw to destroy hwctx and cancel the rest pending requests */
aie2_release_resource(hwctx);
+ mutex_unlock(&xdna->dev_lock);
+ drm_sched_entity_destroy(&hwctx->priv->entity);
+
/* Wait for all submitted jobs to be completed or canceled */
wait_event(hwctx->priv->job_free_wq,
atomic64_read(&hwctx->job_submit_cnt) ==
atomic64_read(&hwctx->job_free_cnt));
+ mutex_lock(&xdna->dev_lock);
drm_sched_fini(&hwctx->priv->sched);
aie2_ctx_syncobj_destroy(hwctx);
@@ -690,6 +718,14 @@ void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx)
kfree(hwctx->cus);
}
+static int aie2_config_cu_resp_handler(void *handle, void __iomem *data, size_t size)
+{
+ struct amdxdna_hwctx *hwctx = handle;
+
+ amdxdna_pm_suspend_put(hwctx->client->xdna);
+ return 0;
+}
+
static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size)
{
struct amdxdna_hwctx_param_config_cu *config = buf;
@@ -721,10 +757,14 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
if (!hwctx->cus)
return -ENOMEM;
- ret = aie2_config_cu(hwctx);
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto free_cus;
+
+ ret = aie2_config_cu(hwctx, aie2_config_cu_resp_handler);
if (ret) {
XDNA_ERR(xdna, "Config CU to firmware failed, ret %d", ret);
- goto free_cus;
+ goto pm_suspend_put;
}
wmb(); /* To avoid locking in command submit when check status */
@@ -732,12 +772,82 @@ static int aie2_hwctx_cu_config(struct amdxdna_hwctx *hwctx, void *buf, u32 size
return 0;
+pm_suspend_put:
+ amdxdna_pm_suspend_put(xdna);
free_cus:
kfree(hwctx->cus);
hwctx->cus = NULL;
return ret;
}
+static void aie2_cmd_wait(struct amdxdna_hwctx *hwctx, u64 seq)
+{
+ struct dma_fence *out_fence = aie2_cmd_get_out_fence(hwctx, seq);
+
+ if (!out_fence) {
+ XDNA_ERR(hwctx->client->xdna, "Failed to get fence");
+ return;
+ }
+
+ dma_fence_wait_timeout(out_fence, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(out_fence);
+}
+
+static int aie2_hwctx_cfg_debug_bo(struct amdxdna_hwctx *hwctx, u32 bo_hdl,
+ bool attach)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drv_cmd cmd = { 0 };
+ struct amdxdna_gem_obj *abo;
+ u64 seq;
+ int ret;
+
+ abo = amdxdna_gem_get_obj(client, bo_hdl, AMDXDNA_BO_DEV);
+ if (!abo) {
+ XDNA_ERR(xdna, "Get bo %d failed", bo_hdl);
+ return -EINVAL;
+ }
+
+ if (attach) {
+ if (abo->assigned_hwctx != AMDXDNA_INVALID_CTX_HANDLE) {
+ ret = -EBUSY;
+ goto put_obj;
+ }
+ cmd.opcode = ATTACH_DEBUG_BO;
+ } else {
+ if (abo->assigned_hwctx != hwctx->id) {
+ ret = -EINVAL;
+ goto put_obj;
+ }
+ cmd.opcode = DETACH_DEBUG_BO;
+ }
+
+ ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
+ &bo_hdl, 1, hwctx->id, &seq);
+ if (ret) {
+ XDNA_ERR(xdna, "Submit command failed");
+ goto put_obj;
+ }
+
+ aie2_cmd_wait(hwctx, seq);
+ if (cmd.result) {
+ XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
+ goto put_obj;
+ }
+
+ if (attach)
+ abo->assigned_hwctx = hwctx->id;
+ else
+ abo->assigned_hwctx = AMDXDNA_INVALID_CTX_HANDLE;
+
+ XDNA_DBG(xdna, "Config debug BO %d to %s", bo_hdl, hwctx->name);
+
+put_obj:
+ amdxdna_gem_put_obj(abo);
+ return ret;
+}
+
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size)
{
struct amdxdna_dev *xdna = hwctx->client->xdna;
@@ -747,14 +857,40 @@ int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *bu
case DRM_AMDXDNA_HWCTX_CONFIG_CU:
return aie2_hwctx_cu_config(hwctx, buf, size);
case DRM_AMDXDNA_HWCTX_ASSIGN_DBG_BUF:
+ return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, true);
case DRM_AMDXDNA_HWCTX_REMOVE_DBG_BUF:
- return -EOPNOTSUPP;
+ return aie2_hwctx_cfg_debug_bo(hwctx, (u32)value, false);
default:
XDNA_DBG(xdna, "Not supported type %d", type);
return -EOPNOTSUPP;
}
}
+int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl)
+{
+ struct amdxdna_client *client = hwctx->client;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_drv_cmd cmd = { 0 };
+ u64 seq;
+ int ret;
+
+ cmd.opcode = SYNC_DEBUG_BO;
+ ret = amdxdna_cmd_submit(client, &cmd, AMDXDNA_INVALID_BO_HANDLE,
+ &debug_bo_hdl, 1, hwctx->id, &seq);
+ if (ret) {
+ XDNA_ERR(xdna, "Submit command failed");
+ return ret;
+ }
+
+ aie2_cmd_wait(hwctx, seq);
+ if (cmd.result) {
+ XDNA_ERR(xdna, "Response failure 0x%x", cmd.result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int aie2_populate_range(struct amdxdna_gem_obj *abo)
{
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
@@ -855,11 +991,15 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
goto free_chain;
}
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto cleanup_job;
+
retry:
ret = drm_gem_lock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
if (ret) {
XDNA_WARN(xdna, "Failed to lock BOs, ret %d", ret);
- goto cleanup_job;
+ goto suspend_put;
}
for (i = 0; i < job->bo_cnt; i++) {
@@ -867,7 +1007,7 @@ retry:
if (ret) {
XDNA_WARN(xdna, "Failed to reserve fences %d", ret);
drm_gem_unlock_reservations(job->bos, job->bo_cnt, &acquire_ctx);
- goto cleanup_job;
+ goto suspend_put;
}
}
@@ -882,12 +1022,12 @@ retry:
msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
} else if (time_after(jiffies, timeout)) {
ret = -ETIME;
- goto cleanup_job;
+ goto suspend_put;
}
ret = aie2_populate_range(abo);
if (ret)
- goto cleanup_job;
+ goto suspend_put;
goto retry;
}
}
@@ -913,6 +1053,8 @@ retry:
return 0;
+suspend_put:
+ amdxdna_pm_suspend_put(xdna);
cleanup_job:
drm_sched_job_cleanup(&job->base);
free_chain:
diff --git a/drivers/accel/amdxdna/aie2_error.c b/drivers/accel/amdxdna/aie2_error.c
index 5ee905632a39..d452008ec4f4 100644
--- a/drivers/accel/amdxdna/aie2_error.c
+++ b/drivers/accel/amdxdna/aie2_error.c
@@ -13,6 +13,7 @@
#include "aie2_msg_priv.h"
#include "aie2_pci.h"
+#include "amdxdna_error.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
@@ -46,6 +47,7 @@ enum aie_module_type {
AIE_MEM_MOD = 0,
AIE_CORE_MOD,
AIE_PL_MOD,
+ AIE_UNKNOWN_MOD,
};
enum aie_error_category {
@@ -143,6 +145,31 @@ static const struct aie_event_category aie_ml_shim_tile_event_cat[] = {
EVENT_CATEGORY(74U, AIE_ERROR_LOCK),
};
+static const enum amdxdna_error_num aie_cat_err_num_map[] = {
+ [AIE_ERROR_SATURATION] = AMDXDNA_ERROR_NUM_AIE_SATURATION,
+ [AIE_ERROR_FP] = AMDXDNA_ERROR_NUM_AIE_FP,
+ [AIE_ERROR_STREAM] = AMDXDNA_ERROR_NUM_AIE_STREAM,
+ [AIE_ERROR_ACCESS] = AMDXDNA_ERROR_NUM_AIE_ACCESS,
+ [AIE_ERROR_BUS] = AMDXDNA_ERROR_NUM_AIE_BUS,
+ [AIE_ERROR_INSTRUCTION] = AMDXDNA_ERROR_NUM_AIE_INSTRUCTION,
+ [AIE_ERROR_ECC] = AMDXDNA_ERROR_NUM_AIE_ECC,
+ [AIE_ERROR_LOCK] = AMDXDNA_ERROR_NUM_AIE_LOCK,
+ [AIE_ERROR_DMA] = AMDXDNA_ERROR_NUM_AIE_DMA,
+ [AIE_ERROR_MEM_PARITY] = AMDXDNA_ERROR_NUM_AIE_MEM_PARITY,
+ [AIE_ERROR_UNKNOWN] = AMDXDNA_ERROR_NUM_UNKNOWN,
+};
+
+static_assert(ARRAY_SIZE(aie_cat_err_num_map) == AIE_ERROR_UNKNOWN + 1);
+
+static const enum amdxdna_error_module aie_err_mod_map[] = {
+ [AIE_MEM_MOD] = AMDXDNA_ERROR_MODULE_AIE_MEMORY,
+ [AIE_CORE_MOD] = AMDXDNA_ERROR_MODULE_AIE_CORE,
+ [AIE_PL_MOD] = AMDXDNA_ERROR_MODULE_AIE_PL,
+ [AIE_UNKNOWN_MOD] = AMDXDNA_ERROR_MODULE_UNKNOWN,
+};
+
+static_assert(ARRAY_SIZE(aie_err_mod_map) == AIE_UNKNOWN_MOD + 1);
+
static enum aie_error_category
aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
{
@@ -176,12 +203,40 @@ aie_get_error_category(u8 row, u8 event_id, enum aie_module_type mod_type)
if (event_id != lut[i].event_id)
continue;
+ if (lut[i].category > AIE_ERROR_UNKNOWN)
+ return AIE_ERROR_UNKNOWN;
+
return lut[i].category;
}
return AIE_ERROR_UNKNOWN;
}
+static void aie2_update_last_async_error(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
+{
+ struct aie_error *errs = err_info;
+ enum amdxdna_error_module err_mod;
+ enum aie_error_category aie_err;
+ enum amdxdna_error_num err_num;
+ struct aie_error *last_err;
+
+ last_err = &errs[num_err - 1];
+ if (last_err->mod_type >= AIE_UNKNOWN_MOD) {
+ err_num = aie_cat_err_num_map[AIE_ERROR_UNKNOWN];
+ err_mod = aie_err_mod_map[AIE_UNKNOWN_MOD];
+ } else {
+ aie_err = aie_get_error_category(last_err->row,
+ last_err->event_id,
+ last_err->mod_type);
+ err_num = aie_cat_err_num_map[aie_err];
+ err_mod = aie_err_mod_map[last_err->mod_type];
+ }
+
+ ndev->last_async_err.err_code = AMDXDNA_ERROR_ENCODE(err_num, err_mod);
+ ndev->last_async_err.ts_us = ktime_to_us(ktime_get_real());
+ ndev->last_async_err.ex_err_code = AMDXDNA_EXTRA_ERR_ENCODE(last_err->row, last_err->col);
+}
+
static u32 aie2_error_backtrack(struct amdxdna_dev_hdl *ndev, void *err_info, u32 num_err)
{
struct aie_error *errs = err_info;
@@ -264,29 +319,14 @@ static void aie2_error_worker(struct work_struct *err_work)
}
mutex_lock(&xdna->dev_lock);
+ aie2_update_last_async_error(e->ndev, info->payload, info->err_cnt);
+
/* Re-sent this event to firmware */
if (aie2_error_event_send(e))
XDNA_WARN(xdna, "Unable to register async event");
mutex_unlock(&xdna->dev_lock);
}
-int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev)
-{
- struct amdxdna_dev *xdna = ndev->xdna;
- struct async_event *e;
- int i, ret;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- for (i = 0; i < ndev->async_events->event_cnt; i++) {
- e = &ndev->async_events->event[i];
- ret = aie2_error_event_send(e);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev)
{
struct amdxdna_dev *xdna = ndev->xdna;
@@ -341,6 +381,10 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
e->size = ASYNC_BUF_SIZE;
e->resp.status = MAX_AIE2_STATUS_CODE;
INIT_WORK(&e->work, aie2_error_worker);
+
+ ret = aie2_error_event_send(e);
+ if (ret)
+ goto free_wq;
}
ndev->async_events = events;
@@ -349,6 +393,8 @@ int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev)
events->event_cnt, events->size);
return 0;
+free_wq:
+ destroy_workqueue(events->wq);
free_buf:
dma_free_noncoherent(xdna->ddev.dev, events->size, events->buf,
events->addr, DMA_FROM_DEVICE);
@@ -356,3 +402,18 @@ free_events:
kfree(events);
return ret;
}
+
+int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev, struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = ndev->xdna;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ args->num_element = 1;
+ args->element_size = sizeof(ndev->last_async_err);
+ if (copy_to_user(u64_to_user_ptr(args->buffer),
+ &ndev->last_async_err, args->element_size))
+ return -EFAULT;
+
+ return 0;
+}
diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
index 82412eec9a4b..d493bb1c3360 100644
--- a/drivers/accel/amdxdna/aie2_message.c
+++ b/drivers/accel/amdxdna/aie2_message.c
@@ -27,6 +27,8 @@
#define DECLARE_AIE2_MSG(name, op) \
DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE)
+#define EXEC_MSG_OPS(xdna) ((xdna)->dev_handle->exec_msg_ops)
+
static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
struct xdna_mailbox_msg *msg)
{
@@ -37,7 +39,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
if (!ndev->mgmt_chann)
return -ENODEV;
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+ drm_WARN_ON(&xdna->ddev, xdna->rpm_on && !mutex_is_locked(&xdna->dev_lock));
ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
if (ret == -ETIME) {
xdna_mailbox_stop_channel(ndev->mgmt_chann);
@@ -45,7 +47,7 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
ndev->mgmt_chann = NULL;
}
- if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) {
+ if (!ret && *hdl->status != AIE2_STATUS_SUCCESS) {
XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
msg->opcode, *hdl->data);
ret = -EINVAL;
@@ -208,6 +210,14 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
hwctx->fw_ctx_id = resp.context_id;
WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
+ if (ndev->force_preempt_enabled) {
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FORCE_PREEMPT, &hwctx->fw_ctx_id);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to enable force preempt %d", ret);
+ return ret;
+ }
+ }
+
cq_pair = &resp.cq_pair[0];
x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
@@ -233,6 +243,7 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
ret = -EINVAL;
goto out_destroy_context;
}
+ ndev->hwctx_num++;
XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d",
hwctx->name, ret, resp.msix_id);
@@ -267,6 +278,7 @@ int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwc
hwctx->fw_ctx_id);
hwctx->priv->mbox_chann = NULL;
hwctx->fw_ctx_id = -1;
+ ndev->hwctx_num--;
return ret;
}
@@ -290,18 +302,25 @@ int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u6
return 0;
}
+static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ u32 *bitmap = arg;
+
+ *bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
+
+ return 0;
+}
+
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
u32 size, u32 *cols_filled)
{
DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
struct amdxdna_dev *xdna = ndev->xdna;
struct amdxdna_client *client;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
dma_addr_t dma_addr;
u32 aie_bitmap = 0;
u8 *buff_addr;
- int ret, idx;
+ int ret;
buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
DMA_FROM_DEVICE, GFP_KERNEL);
@@ -309,12 +328,8 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
return -ENOMEM;
/* Go through each hardware context and mark the AIE columns that are active */
- list_for_each_entry(client, &xdna->client_list, node) {
- idx = srcu_read_lock(&client->hwctx_srcu);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- aie_bitmap |= amdxdna_hwctx_col_map(hwctx);
- srcu_read_unlock(&client->hwctx_srcu, idx);
- }
+ list_for_each_entry(client, &xdna->client_list, node)
+ amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
*cols_filled = 0;
req.dump_buff_addr = dma_addr;
@@ -329,11 +344,6 @@ int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
goto fail;
}
- if (resp.status != AIE2_STATUS_SUCCESS) {
- XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status);
- ret = -EINVAL;
- goto fail;
- }
XDNA_DBG(xdna, "Query NPU status completed");
if (size < resp.size) {
@@ -355,6 +365,55 @@ fail:
return ret;
}
+int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev,
+ char __user *buf, u32 size,
+ struct amdxdna_drm_query_telemetry_header *header)
+{
+ DECLARE_AIE2_MSG(get_telemetry, MSG_OP_GET_TELEMETRY);
+ struct amdxdna_dev *xdna = ndev->xdna;
+ dma_addr_t dma_addr;
+ u8 *addr;
+ int ret;
+
+ if (header->type >= MAX_TELEMETRY_TYPE)
+ return -EINVAL;
+
+ addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
+ DMA_FROM_DEVICE, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ req.buf_addr = dma_addr;
+ req.buf_size = size;
+ req.type = header->type;
+
+ drm_clflush_virt_range(addr, size); /* device can access */
+ ret = aie2_send_mgmt_msg_wait(ndev, &msg);
+ if (ret) {
+ XDNA_ERR(xdna, "Query telemetry failed, status %d", ret);
+ goto free_buf;
+ }
+
+ if (size < resp.size) {
+ ret = -EINVAL;
+ XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
+ goto free_buf;
+ }
+
+ if (copy_to_user(buf, addr, resp.size)) {
+ ret = -EFAULT;
+ XDNA_ERR(xdna, "Failed to copy telemetry to user space");
+ goto free_buf;
+ }
+
+ header->major = resp.major;
+ header->minor = resp.minor;
+
+free_buf:
+ dma_free_noncoherent(xdna->ddev.dev, size, addr, dma_addr, DMA_FROM_DEVICE);
+ return ret;
+}
+
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
void *handle, int (*cb)(void*, void __iomem *, size_t))
{
@@ -374,15 +433,17 @@ int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr,
return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
}
-int aie2_config_cu(struct amdxdna_hwctx *hwctx)
+int aie2_config_cu(struct amdxdna_hwctx *hwctx,
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_dev *xdna = hwctx->client->xdna;
u32 shift = xdna->dev_info->dev_mem_buf_shift;
- DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
+ struct config_cu_req req = { 0 };
+ struct xdna_mailbox_msg msg;
struct drm_gem_object *gobj;
struct amdxdna_gem_obj *abo;
- int ret, i;
+ int i;
if (!chann)
return -ENODEV;
@@ -420,191 +481,386 @@ int aie2_config_cu(struct amdxdna_hwctx *hwctx)
}
req.num_cus = hwctx->cus->num_cus;
- ret = xdna_send_msg_wait(xdna, chann, &msg);
- if (ret == -ETIME)
- aie2_destroy_context(xdna->dev_handle, hwctx);
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.handle = hwctx;
+ msg.opcode = MSG_OP_CONFIG_CU;
+ msg.notify_cb = notify_cb;
+ return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+}
- if (resp.status == AIE2_STATUS_SUCCESS) {
- XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
- return 0;
- }
+static int aie2_init_exec_cu_req(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op)
+{
+ struct execute_buffer_req *cu_req = req;
+ u32 cmd_len;
+ void *cmd;
- XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
- msg.opcode, resp.status, ret);
- return ret;
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (cmd_len > sizeof(cu_req->payload))
+ return -EINVAL;
+
+ cu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (cu_req->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ memcpy(cu_req->payload, cmd, cmd_len);
+
+ *size = sizeof(*cu_req);
+ *msg_op = MSG_OP_EXECUTE_BUFFER_CF;
+ return 0;
}
-int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
- int (*notify_cb)(void *, void __iomem *, size_t))
+static int aie2_init_exec_dpu_req(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op)
{
- struct mailbox_channel *chann = hwctx->priv->mbox_chann;
- struct amdxdna_dev *xdna = hwctx->client->xdna;
- struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
- union {
- struct execute_buffer_req ebuf;
- struct exec_dpu_req dpu;
- } req;
- struct xdna_mailbox_msg msg;
- u32 payload_len;
- void *payload;
- int cu_idx;
- int ret;
- u32 op;
+ struct exec_dpu_req *dpu_req = req;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
- if (!chann)
- return -ENODEV;
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (cmd_len - sizeof(*sn) > sizeof(dpu_req->payload))
+ return -EINVAL;
- payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
- if (!payload) {
- XDNA_ERR(xdna, "Invalid command, cannot get payload");
+ dpu_req->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (dpu_req->cu_idx == INVALID_CU_IDX)
return -EINVAL;
- }
- cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo);
- if (cu_idx < 0) {
- XDNA_DBG(xdna, "Invalid cu idx");
+ dpu_req->inst_buf_addr = sn->buffer;
+ dpu_req->inst_size = sn->buffer_size;
+ dpu_req->inst_prop_cnt = sn->prop_count;
+ memcpy(dpu_req->payload, sn->prop_args, cmd_len - sizeof(*sn));
+
+ *size = sizeof(*dpu_req);
+ *msg_op = MSG_OP_EXEC_DPU;
+ return 0;
+}
+
+static void aie2_init_exec_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt)
+{
+ struct cmd_chain_req *chain_req = req;
+
+ chain_req->buf_addr = slot_addr;
+ chain_req->buf_size = size;
+ chain_req->count = cmd_cnt;
+}
+
+static void aie2_init_npu_chain_req(void *req, u64 slot_addr, size_t size, u32 cmd_cnt)
+{
+ struct cmd_chain_npu_req *npu_chain_req = req;
+
+ npu_chain_req->flags = 0;
+ npu_chain_req->reserved = 0;
+ npu_chain_req->buf_addr = slot_addr;
+ npu_chain_req->buf_size = size;
+ npu_chain_req->count = cmd_cnt;
+}
+
+static int
+aie2_cmdlist_fill_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_execbuf_cf *cf_slot = slot;
+ u32 cmd_len;
+ void *cmd;
+
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (*size < sizeof(*cf_slot) + cmd_len)
return -EINVAL;
- }
- op = amdxdna_cmd_get_op(cmd_abo);
- switch (op) {
+ cf_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (cf_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ cf_slot->arg_cnt = cmd_len / sizeof(u32);
+ memcpy(cf_slot->args, cmd, cmd_len);
+ /* Accurate slot size to hint firmware to do necessary copy */
+ *size = sizeof(*cf_slot) + cmd_len;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_dpu *dpu_slot = slot;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*sn);
+ if (cmd_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+ return -EINVAL;
+
+ if (*size < sizeof(*dpu_slot) + arg_sz)
+ return -EINVAL;
+
+ dpu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (dpu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ dpu_slot->inst_buf_addr = sn->buffer;
+ dpu_slot->inst_size = sn->buffer_size;
+ dpu_slot->inst_prop_cnt = sn->prop_count;
+ dpu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(dpu_slot->args, sn->prop_args, arg_sz);
+
+ /* Accurate slot size to hint firmware to do necessary copy */
+ *size = sizeof(*dpu_slot) + arg_sz;
+ return 0;
+}
+
+static int aie2_cmdlist_unsupp(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ return -EOPNOTSUPP;
+}
+
+static u32 aie2_get_chain_msg_op(u32 cmd_op)
+{
+ switch (cmd_op) {
case ERT_START_CU:
- if (unlikely(payload_len > sizeof(req.ebuf.payload)))
- XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len);
- req.ebuf.cu_idx = cu_idx;
- memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload));
- msg.send_size = sizeof(req.ebuf);
- msg.opcode = MSG_OP_EXECUTE_BUFFER_CF;
- break;
- case ERT_START_NPU: {
- struct amdxdna_cmd_start_npu *sn = payload;
-
- if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload)))
- XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len);
- req.dpu.inst_buf_addr = sn->buffer;
- req.dpu.inst_size = sn->buffer_size;
- req.dpu.inst_prop_cnt = sn->prop_count;
- req.dpu.cu_idx = cu_idx;
- memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload));
- msg.send_size = sizeof(req.dpu);
- msg.opcode = MSG_OP_EXEC_DPU;
+ return MSG_OP_CHAIN_EXEC_BUFFER_CF;
+ case ERT_START_NPU:
+ return MSG_OP_CHAIN_EXEC_DPU;
+ default:
break;
}
- default:
- XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op);
+
+ return MSG_OP_MAX_OPCODE;
+}
+
+static struct aie2_exec_msg_ops legacy_exec_message_ops = {
+ .init_cu_req = aie2_init_exec_cu_req,
+ .init_dpu_req = aie2_init_exec_dpu_req,
+ .init_chain_req = aie2_init_exec_chain_req,
+ .fill_cf_slot = aie2_cmdlist_fill_cf,
+ .fill_dpu_slot = aie2_cmdlist_fill_dpu,
+ .fill_preempt_slot = aie2_cmdlist_unsupp,
+ .fill_elf_slot = aie2_cmdlist_unsupp,
+ .get_chain_msg_op = aie2_get_chain_msg_op,
+};
+
+static int
+aie2_cmdlist_fill_npu_cf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ u32 cmd_len;
+ void *cmd;
+
+ cmd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ if (*size < sizeof(*npu_slot) + cmd_len)
return -EINVAL;
- }
- msg.handle = job;
- msg.notify_cb = notify_cb;
- msg.send_data = (u8 *)&req;
- print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
- 0x40, false);
- ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
- if (ret) {
- XDNA_ERR(xdna, "Send message failed");
- return ret;
- }
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
+ return -EINVAL;
+
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_NON_ELF;
+ npu_slot->arg_cnt = cmd_len / sizeof(u32);
+ memcpy(npu_slot->args, cmd, cmd_len);
+ *size = sizeof(*npu_slot) + cmd_len;
return 0;
}
static int
-aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
- struct amdxdna_gem_obj *abo, u32 *size)
+aie2_cmdlist_fill_npu_dpu(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
{
- struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset;
- int cu_idx = amdxdna_cmd_get_cu_idx(abo);
- u32 payload_len;
- void *payload;
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_start_npu *sn;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ sn = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*sn);
+ if (cmd_len < sizeof(*sn) || arg_sz > MAX_NPU_ARGS_SIZE)
+ return -EINVAL;
- if (cu_idx < 0)
+ if (*size < sizeof(*npu_slot) + arg_sz)
return -EINVAL;
- payload = amdxdna_cmd_get_payload(abo, &payload_len);
- if (!payload)
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
return -EINVAL;
- if (!slot_has_space(*buf, offset, payload_len))
- return -ENOSPC;
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_PARTIAL_ELF;
+ npu_slot->inst_buf_addr = sn->buffer;
+ npu_slot->inst_size = sn->buffer_size;
+ npu_slot->inst_prop_cnt = sn->prop_count;
+ npu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(npu_slot->args, sn->prop_args, arg_sz);
- buf->cu_idx = cu_idx;
- buf->arg_cnt = payload_len / sizeof(u32);
- memcpy(buf->args, payload, payload_len);
- /* Accurate buf size to hint firmware to do necessary copy */
- *size = sizeof(*buf) + payload_len;
+ *size = sizeof(*npu_slot) + arg_sz;
return 0;
}
static int
-aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
- struct amdxdna_gem_obj *abo, u32 *size)
+aie2_cmdlist_fill_npu_preempt(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
{
- struct cmd_chain_slot_dpu *buf = cmd_buf + offset;
- int cu_idx = amdxdna_cmd_get_cu_idx(abo);
- struct amdxdna_cmd_start_npu *sn;
- u32 payload_len;
- void *payload;
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_preempt_data *pd;
+ u32 cmd_len;
u32 arg_sz;
- if (cu_idx < 0)
+ pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*pd);
+ if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
return -EINVAL;
- payload = amdxdna_cmd_get_payload(abo, &payload_len);
- if (!payload)
+ if (*size < sizeof(*npu_slot) + arg_sz)
return -EINVAL;
- sn = payload;
- arg_sz = payload_len - sizeof(*sn);
- if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
+
+ npu_slot->cu_idx = amdxdna_cmd_get_cu_idx(cmd_bo);
+ if (npu_slot->cu_idx == INVALID_CU_IDX)
return -EINVAL;
- if (!slot_has_space(*buf, offset, arg_sz))
- return -ENOSPC;
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_PREEMPT;
+ npu_slot->inst_buf_addr = pd->inst_buf;
+ npu_slot->save_buf_addr = pd->save_buf;
+ npu_slot->restore_buf_addr = pd->restore_buf;
+ npu_slot->inst_size = pd->inst_size;
+ npu_slot->save_size = pd->save_size;
+ npu_slot->restore_size = pd->restore_size;
+ npu_slot->inst_prop_cnt = pd->inst_prop_cnt;
+ npu_slot->arg_cnt = arg_sz / sizeof(u32);
+ memcpy(npu_slot->args, pd->prop_args, arg_sz);
+
+ *size = sizeof(*npu_slot) + arg_sz;
+ return 0;
+}
+
+static int
+aie2_cmdlist_fill_npu_elf(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size)
+{
+ struct cmd_chain_slot_npu *npu_slot = slot;
+ struct amdxdna_cmd_preempt_data *pd;
+ u32 cmd_len;
+ u32 arg_sz;
+
+ pd = amdxdna_cmd_get_payload(cmd_bo, &cmd_len);
+ arg_sz = cmd_len - sizeof(*pd);
+ if (cmd_len < sizeof(*pd) || arg_sz > MAX_NPU_ARGS_SIZE)
+ return -EINVAL;
- buf->inst_buf_addr = sn->buffer;
- buf->inst_size = sn->buffer_size;
- buf->inst_prop_cnt = sn->prop_count;
- buf->cu_idx = cu_idx;
- buf->arg_cnt = arg_sz / sizeof(u32);
- memcpy(buf->args, sn->prop_args, arg_sz);
+ if (*size < sizeof(*npu_slot) + arg_sz)
+ return -EINVAL;
- /* Accurate buf size to hint firmware to do necessary copy */
- *size = sizeof(*buf) + arg_sz;
+ memset(npu_slot, 0, sizeof(*npu_slot));
+ npu_slot->type = EXEC_NPU_TYPE_ELF;
+ npu_slot->inst_buf_addr = pd->inst_buf;
+ npu_slot->save_buf_addr = pd->save_buf;
+ npu_slot->restore_buf_addr = pd->restore_buf;
+ npu_slot->inst_size = pd->inst_size;
+ npu_slot->save_size = pd->save_size;
+ npu_slot->restore_size = pd->restore_size;
+ npu_slot->inst_prop_cnt = pd->inst_prop_cnt;
+ npu_slot->arg_cnt = 1;
+ npu_slot->args[0] = AIE2_EXEC_BUFFER_KERNEL_OP_TXN;
+
+ *size = struct_size(npu_slot, args, npu_slot->arg_cnt);
return 0;
}
-static int
-aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset,
- struct amdxdna_gem_obj *abo, u32 *size)
+static u32 aie2_get_npu_chain_msg_op(u32 cmd_op)
+{
+ return MSG_OP_CHAIN_EXEC_NPU;
+}
+
+static struct aie2_exec_msg_ops npu_exec_message_ops = {
+ .init_cu_req = aie2_init_exec_cu_req,
+ .init_dpu_req = aie2_init_exec_dpu_req,
+ .init_chain_req = aie2_init_npu_chain_req,
+ .fill_cf_slot = aie2_cmdlist_fill_npu_cf,
+ .fill_dpu_slot = aie2_cmdlist_fill_npu_dpu,
+ .fill_preempt_slot = aie2_cmdlist_fill_npu_preempt,
+ .fill_elf_slot = aie2_cmdlist_fill_npu_elf,
+ .get_chain_msg_op = aie2_get_npu_chain_msg_op,
+};
+
+static int aie2_init_exec_req(void *req, struct amdxdna_gem_obj *cmd_abo,
+ size_t *size, u32 *msg_op)
{
- u32 this_op = amdxdna_cmd_get_op(abo);
- void *cmd_buf = cmdbuf_abo->mem.kva;
+ struct amdxdna_dev *xdna = cmd_abo->client->xdna;
int ret;
+ u32 op;
- if (this_op != op) {
- ret = -EINVAL;
- goto done;
- }
+ op = amdxdna_cmd_get_op(cmd_abo);
switch (op) {
case ERT_START_CU:
- ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size);
+ ret = EXEC_MSG_OPS(xdna)->init_cu_req(cmd_abo, req, size, msg_op);
+ if (ret) {
+ XDNA_DBG(xdna, "Init CU req failed ret %d", ret);
+ return ret;
+ }
break;
case ERT_START_NPU:
- ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size);
+ ret = EXEC_MSG_OPS(xdna)->init_dpu_req(cmd_abo, req, size, msg_op);
+ if (ret) {
+ XDNA_DBG(xdna, "Init DPU req failed ret %d", ret);
+ return ret;
+ }
+
break;
default:
+ XDNA_ERR(xdna, "Unsupported op %d", op);
ret = -EOPNOTSUPP;
+ break;
}
-done:
- if (ret) {
- XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d",
- op, ret);
+ return ret;
+}
+
+static int
+aie2_cmdlist_fill_slot(void *slot, struct amdxdna_gem_obj *cmd_abo,
+ size_t *size, u32 *cmd_op)
+{
+ struct amdxdna_dev *xdna = cmd_abo->client->xdna;
+ int ret;
+ u32 op;
+
+ op = amdxdna_cmd_get_op(cmd_abo);
+ if (*cmd_op == ERT_INVALID_CMD)
+ *cmd_op = op;
+ else if (op != *cmd_op)
+ return -EINVAL;
+
+ switch (op) {
+ case ERT_START_CU:
+ ret = EXEC_MSG_OPS(xdna)->fill_cf_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU:
+ ret = EXEC_MSG_OPS(xdna)->fill_dpu_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU_PREEMPT:
+ if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT))
+ return -EOPNOTSUPP;
+ ret = EXEC_MSG_OPS(xdna)->fill_preempt_slot(cmd_abo, slot, size);
+ break;
+ case ERT_START_NPU_PREEMPT_ELF:
+ if (!AIE2_FEATURE_ON(xdna->dev_handle, AIE2_PREEMPT))
+ return -EOPNOTSUPP;
+ ret = EXEC_MSG_OPS(xdna)->fill_elf_slot(cmd_abo, slot, size);
+ break;
+ default:
+ XDNA_INFO(xdna, "Unsupported op %d", op);
+ ret = -EOPNOTSUPP;
+ break;
}
+
return ret;
}
+void aie2_msg_init(struct amdxdna_dev_hdl *ndev)
+{
+ if (AIE2_FEATURE_ON(ndev, AIE2_NPU_COMMAND))
+ ndev->exec_msg_ops = &npu_exec_message_ops;
+ else
+ ndev->exec_msg_ops = &legacy_exec_message_ops;
+}
+
static inline struct amdxdna_gem_obj *
aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
{
@@ -613,29 +869,36 @@ aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
return job->hwctx->priv->cmd_buf[idx];
}
-static void
-aie2_cmdlist_prepare_request(struct cmd_chain_req *req,
- struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt)
+int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
{
- req->buf_addr = cmdbuf_abo->mem.dev_addr;
- req->buf_size = size;
- req->count = cnt;
- drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
- XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d",
- req->buf_addr, size, cnt);
-}
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct xdna_mailbox_msg msg;
+ union exec_req req;
+ int ret;
-static inline u32
-aie2_cmd_op_to_msg_op(u32 op)
-{
- switch (op) {
- case ERT_START_CU:
- return MSG_OP_CHAIN_EXEC_BUFFER_CF;
- case ERT_START_NPU:
- return MSG_OP_CHAIN_EXEC_DPU;
- default:
- return MSG_OP_MAX_OPCODE;
+ if (!chann)
+ return -ENODEV;
+
+ ret = aie2_init_exec_req(&req, cmd_abo, &msg.send_size, &msg.opcode);
+ if (ret)
+ return ret;
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
+ 0x40, false);
+
+ ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+ if (ret) {
+ XDNA_ERR(xdna, "Send message failed");
+ return ret;
}
+
+ return 0;
}
int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
@@ -646,12 +909,13 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
struct amdxdna_client *client = hwctx->client;
struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
+ struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_cmd_chain *payload;
struct xdna_mailbox_msg msg;
- struct cmd_chain_req req;
+ union exec_chain_req req;
u32 payload_len;
u32 offset = 0;
- u32 size;
+ size_t size;
int ret;
u32 op;
u32 i;
@@ -662,41 +926,42 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
payload_len < struct_size(payload, data, payload->command_count))
return -EINVAL;
+ op = ERT_INVALID_CMD;
for (i = 0; i < payload->command_count; i++) {
u32 boh = (u32)(payload->data[i]);
struct amdxdna_gem_obj *abo;
abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD);
if (!abo) {
- XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh);
+ XDNA_ERR(xdna, "Failed to find cmd BO %d", boh);
return -ENOENT;
}
- /* All sub-cmd should have same op, use the first one. */
- if (i == 0)
- op = amdxdna_cmd_get_op(abo);
-
- ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size);
+ size = cmdbuf_abo->mem.size - offset;
+ ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva + offset,
+ abo, &size, &op);
amdxdna_gem_put_obj(abo);
if (ret)
- return -EINVAL;
+ return ret;
offset += size;
}
+ msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op);
+ if (msg.opcode == MSG_OP_MAX_OPCODE)
+ return -EOPNOTSUPP;
/* The offset is the accumulated total size of the cmd buffer */
- aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count);
+ EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr,
+ offset, payload->command_count);
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, offset);
- msg.opcode = aie2_cmd_op_to_msg_op(op);
- if (msg.opcode == MSG_OP_MAX_OPCODE)
- return -EOPNOTSUPP;
msg.handle = job;
msg.notify_cb = notify_cb;
msg.send_data = (u8 *)&req;
msg.send_size = sizeof(req);
ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
if (ret) {
- XDNA_ERR(hwctx->client->xdna, "Send message failed");
+ XDNA_ERR(xdna, "Send message failed");
return ret;
}
@@ -709,23 +974,27 @@ int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
{
struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
struct xdna_mailbox_msg msg;
- struct cmd_chain_req req;
- u32 size;
+ union exec_chain_req req;
+ u32 op = ERT_INVALID_CMD;
+ size_t size;
int ret;
- u32 op;
- op = amdxdna_cmd_get_op(cmd_abo);
- ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size);
+ size = cmdbuf_abo->mem.size;
+ ret = aie2_cmdlist_fill_slot(cmdbuf_abo->mem.kva, cmd_abo, &size, &op);
if (ret)
return ret;
- aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1);
-
- msg.opcode = aie2_cmd_op_to_msg_op(op);
+ msg.opcode = EXEC_MSG_OPS(xdna)->get_chain_msg_op(op);
if (msg.opcode == MSG_OP_MAX_OPCODE)
return -EOPNOTSUPP;
+
+ EXEC_MSG_OPS(xdna)->init_chain_req(&req, cmdbuf_abo->mem.dev_addr,
+ size, 1);
+ drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
+
msg.handle = job;
msg.notify_cb = notify_cb;
msg.send_data = (u8 *)&req;
@@ -750,7 +1019,7 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int ret = 0;
req.src_addr = 0;
- req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr;
+ req.dst_addr = amdxdna_dev_bo_offset(abo);
req.size = abo->mem.size;
/* Device to Host */
@@ -774,3 +1043,32 @@ int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
return 0;
}
+
+int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t))
+{
+ struct mailbox_channel *chann = hwctx->priv->mbox_chann;
+ struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ struct config_debug_bo_req req;
+ struct xdna_mailbox_msg msg;
+
+ if (job->drv_cmd->opcode == ATTACH_DEBUG_BO)
+ req.config = DEBUG_BO_REGISTER;
+ else
+ req.config = DEBUG_BO_UNREGISTER;
+
+ req.offset = amdxdna_dev_bo_offset(abo);
+ req.size = abo->mem.size;
+
+ XDNA_DBG(xdna, "offset 0x%llx size 0x%llx config %d",
+ req.offset, req.size, req.config);
+
+ msg.handle = job;
+ msg.notify_cb = notify_cb;
+ msg.send_data = (u8 *)&req;
+ msg.send_size = sizeof(req);
+ msg.opcode = MSG_OP_CONFIG_DEBUG_BO;
+
+ return xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
+}
diff --git a/drivers/accel/amdxdna/aie2_msg_priv.h b/drivers/accel/amdxdna/aie2_msg_priv.h
index 6df9065b13f6..1c957a6298d3 100644
--- a/drivers/accel/amdxdna/aie2_msg_priv.h
+++ b/drivers/accel/amdxdna/aie2_msg_priv.h
@@ -9,7 +9,8 @@
enum aie2_msg_opcode {
MSG_OP_CREATE_CONTEXT = 0x2,
MSG_OP_DESTROY_CONTEXT = 0x3,
- MSG_OP_SYNC_BO = 0x7,
+ MSG_OP_GET_TELEMETRY = 0x4,
+ MSG_OP_SYNC_BO = 0x7,
MSG_OP_EXECUTE_BUFFER_CF = 0xC,
MSG_OP_QUERY_COL_STATUS = 0xD,
MSG_OP_QUERY_AIE_TILE_INFO = 0xE,
@@ -18,6 +19,8 @@ enum aie2_msg_opcode {
MSG_OP_CONFIG_CU = 0x11,
MSG_OP_CHAIN_EXEC_BUFFER_CF = 0x12,
MSG_OP_CHAIN_EXEC_DPU = 0x13,
+ MSG_OP_CONFIG_DEBUG_BO = 0x14,
+ MSG_OP_CHAIN_EXEC_NPU = 0x18,
MSG_OP_MAX_XRT_OPCODE,
MSG_OP_SUSPEND = 0x101,
MSG_OP_RESUME = 0x102,
@@ -135,6 +138,28 @@ struct destroy_ctx_resp {
enum aie2_msg_status status;
} __packed;
+enum telemetry_type {
+ TELEMETRY_TYPE_DISABLED,
+ TELEMETRY_TYPE_HEALTH,
+ TELEMETRY_TYPE_ERROR_INFO,
+ TELEMETRY_TYPE_PROFILING,
+ TELEMETRY_TYPE_DEBUG,
+ MAX_TELEMETRY_TYPE
+};
+
+struct get_telemetry_req {
+ enum telemetry_type type;
+ __u64 buf_addr;
+ __u32 buf_size;
+} __packed;
+
+struct get_telemetry_resp {
+ __u32 major;
+ __u32 minor;
+ __u32 size;
+ enum aie2_msg_status status;
+} __packed;
+
struct execute_buffer_req {
__u32 cu_idx;
__u32 payload[19];
@@ -148,6 +173,18 @@ struct exec_dpu_req {
__u32 payload[35];
} __packed;
+enum exec_npu_type {
+ EXEC_NPU_TYPE_NON_ELF = 0x1,
+ EXEC_NPU_TYPE_PARTIAL_ELF = 0x2,
+ EXEC_NPU_TYPE_PREEMPT = 0x3,
+ EXEC_NPU_TYPE_ELF = 0x4,
+};
+
+union exec_req {
+ struct execute_buffer_req ebuf;
+ struct exec_dpu_req dpu_req;
+};
+
struct execute_buffer_resp {
enum aie2_msg_status status;
} __packed;
@@ -319,9 +356,6 @@ struct async_event_msg_resp {
} __packed;
#define MAX_CHAIN_CMDBUF_SIZE SZ_4K
-#define slot_has_space(slot, offset, payload_size) \
- (MAX_CHAIN_CMDBUF_SIZE >= (offset) + (payload_size) + \
- sizeof(typeof(slot)))
struct cmd_chain_slot_execbuf_cf {
__u32 cu_idx;
@@ -339,12 +373,41 @@ struct cmd_chain_slot_dpu {
__u32 args[] __counted_by(arg_cnt);
};
+#define MAX_NPU_ARGS_SIZE (26 * sizeof(__u32))
+#define AIE2_EXEC_BUFFER_KERNEL_OP_TXN 3
+struct cmd_chain_slot_npu {
+ enum exec_npu_type type;
+ u64 inst_buf_addr;
+ u64 save_buf_addr;
+ u64 restore_buf_addr;
+ u32 inst_size;
+ u32 save_size;
+ u32 restore_size;
+ u32 inst_prop_cnt;
+ u32 cu_idx;
+ u32 arg_cnt;
+ u32 args[] __counted_by(arg_cnt);
+} __packed;
+
struct cmd_chain_req {
__u64 buf_addr;
__u32 buf_size;
__u32 count;
} __packed;
+struct cmd_chain_npu_req {
+ u32 flags;
+ u32 reserved;
+ u64 buf_addr;
+ u32 buf_size;
+ u32 count;
+} __packed;
+
+union exec_chain_req {
+ struct cmd_chain_npu_req npu_req;
+ struct cmd_chain_req req;
+};
+
struct cmd_chain_resp {
enum aie2_msg_status status;
__u32 fail_cmd_idx;
@@ -365,4 +428,21 @@ struct sync_bo_req {
struct sync_bo_resp {
enum aie2_msg_status status;
} __packed;
+
+#define DEBUG_BO_UNREGISTER 0
+#define DEBUG_BO_REGISTER 1
+struct config_debug_bo_req {
+ __u64 offset;
+ __u64 size;
+ /*
+ * config operations.
+ * DEBUG_BO_REGISTER: Register debug buffer
+ * DEBUG_BO_UNREGISTER: Unregister debug buffer
+ */
+ __u32 config;
+} __packed;
+
+struct config_debug_bo_resp {
+ enum aie2_msg_status status;
+} __packed;
#endif /* _AIE2_MSG_PRIV_H_ */
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index c6cf7068d23c..ceef1c502e9e 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
+#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/iommu.h>
@@ -24,6 +25,7 @@
#include "amdxdna_gem.h"
#include "amdxdna_mailbox.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
static int aie2_max_col = XRS_MAX_COL;
module_param(aie2_max_col, uint, 0600);
@@ -53,6 +55,7 @@ struct mgmt_mbox_chann_info {
static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 fw_minor)
{
+ const struct aie2_fw_feature_tbl *feature;
struct amdxdna_dev *xdna = ndev->xdna;
/*
@@ -76,6 +79,17 @@ static int aie2_check_protocol(struct amdxdna_dev_hdl *ndev, u32 fw_major, u32 f
XDNA_ERR(xdna, "Firmware minor version smaller than supported");
return -EINVAL;
}
+
+ for (feature = ndev->priv->fw_feature_tbl; feature && feature->min_minor;
+ feature++) {
+ if (fw_minor < feature->min_minor)
+ continue;
+ if (feature->max_minor > 0 && fw_minor > feature->max_minor)
+ continue;
+
+ set_bit(feature->feature, &ndev->feature_mask);
+ }
+
return 0;
}
@@ -169,6 +183,10 @@ int aie2_runtime_cfg(struct amdxdna_dev_hdl *ndev,
if (cfg->category != category)
continue;
+ if (cfg->feature_mask &&
+ bitmap_subset(&cfg->feature_mask, &ndev->feature_mask, AIE2_FEATURE_MAX))
+ continue;
+
value = val ? *val : cfg->value;
ret = aie2_set_runtime_cfg(ndev, cfg->type, value);
if (ret) {
@@ -222,15 +240,6 @@ static int aie2_mgmt_fw_init(struct amdxdna_dev_hdl *ndev)
return ret;
}
- if (!ndev->async_events)
- return 0;
-
- ret = aie2_error_async_events_send(ndev);
- if (ret) {
- XDNA_ERR(ndev->xdna, "Send async events failed");
- return ret;
- }
-
return 0;
}
@@ -256,6 +265,8 @@ static int aie2_mgmt_fw_query(struct amdxdna_dev_hdl *ndev)
return ret;
}
+ ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
+
return 0;
}
@@ -337,6 +348,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna)
ndev->mbox = NULL;
aie2_psp_stop(ndev->psp_hdl);
aie2_smu_fini(ndev);
+ aie2_error_async_events_free(ndev);
pci_disable_device(pdev);
ndev->dev_status = AIE2_DEV_INIT;
@@ -423,6 +435,18 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
goto destroy_mgmt_chann;
}
+ ret = aie2_mgmt_fw_query(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "failed to query fw, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
+ ret = aie2_error_async_events_alloc(ndev);
+ if (ret) {
+ XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
+ goto destroy_mgmt_chann;
+ }
+
ndev->dev_status = AIE2_DEV_START;
return 0;
@@ -440,6 +464,39 @@ disable_dev:
return ret;
}
+static int aie2_hw_suspend(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+
+ guard(mutex)(&xdna->dev_lock);
+ list_for_each_entry(client, &xdna->client_list, node)
+ aie2_hwctx_suspend(client);
+
+ aie2_hw_stop(xdna);
+
+ return 0;
+}
+
+static int aie2_hw_resume(struct amdxdna_dev *xdna)
+{
+ struct amdxdna_client *client;
+ int ret;
+
+ ret = aie2_hw_start(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Start hardware failed, %d", ret);
+ return ret;
+ }
+
+ list_for_each_entry(client, &xdna->client_list, node) {
+ ret = aie2_hwctx_resume(client);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
static int aie2_init(struct amdxdna_dev *xdna)
{
struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
@@ -520,22 +577,15 @@ static int aie2_init(struct amdxdna_dev *xdna)
if (!ndev->psp_hdl) {
XDNA_ERR(xdna, "failed to create psp");
ret = -ENOMEM;
- goto free_irq;
+ goto release_fw;
}
xdna->dev_handle = ndev;
ret = aie2_hw_start(xdna);
if (ret) {
XDNA_ERR(xdna, "start npu failed, ret %d", ret);
- goto free_irq;
- }
-
- ret = aie2_mgmt_fw_query(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Query firmware failed, ret %d", ret);
- goto stop_hw;
+ goto release_fw;
}
- ndev->total_col = min(aie2_max_col, ndev->metadata.cols);
xrs_cfg.clk_list.num_levels = ndev->max_dpm_level + 1;
for (i = 0; i < xrs_cfg.clk_list.num_levels; i++)
@@ -552,34 +602,13 @@ static int aie2_init(struct amdxdna_dev *xdna)
goto stop_hw;
}
- ret = aie2_error_async_events_alloc(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Allocate async events failed, ret %d", ret);
- goto stop_hw;
- }
-
- ret = aie2_error_async_events_send(ndev);
- if (ret) {
- XDNA_ERR(xdna, "Send async events failed, ret %d", ret);
- goto async_event_free;
- }
-
- /* Issue a command to make sure firmware handled async events */
- ret = aie2_query_firmware_version(ndev, &ndev->xdna->fw_ver);
- if (ret) {
- XDNA_ERR(xdna, "Re-query firmware version failed");
- goto async_event_free;
- }
-
release_firmware(fw);
+ aie2_msg_init(ndev);
+ amdxdna_pm_init(xdna);
return 0;
-async_event_free:
- aie2_error_async_events_free(ndev);
stop_hw:
aie2_hw_stop(xdna);
-free_irq:
- pci_free_irq_vectors(pdev);
release_fw:
release_firmware(fw);
@@ -588,12 +617,8 @@ release_fw:
static void aie2_fini(struct amdxdna_dev *xdna)
{
- struct pci_dev *pdev = to_pci_dev(xdna->ddev.dev);
- struct amdxdna_dev_hdl *ndev = xdna->dev_handle;
-
+ amdxdna_pm_fini(xdna);
aie2_hw_stop(xdna);
- aie2_error_async_events_free(ndev);
- pci_free_irq_vectors(pdev);
}
static int aie2_get_aie_status(struct amdxdna_client *client,
@@ -752,66 +777,182 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client,
return ret;
}
+static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
+ struct amdxdna_drm_get_array *array_args = arg;
+ struct amdxdna_drm_hwctx_entry __user *buf;
+ u32 size;
+
+ if (!array_args->num_element)
+ return -EINVAL;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->pid = hwctx->client->pid;
+ tmp->context_id = hwctx->id;
+ tmp->start_col = hwctx->start_col;
+ tmp->num_col = hwctx->num_col;
+ tmp->command_submissions = hwctx->priv->seq;
+ tmp->command_completions = hwctx->priv->completed;
+ tmp->pasid = hwctx->client->pasid;
+ tmp->priority = hwctx->qos.priority;
+ tmp->gops = hwctx->qos.gops;
+ tmp->fps = hwctx->qos.fps;
+ tmp->dma_bandwidth = hwctx->qos.dma_bandwidth;
+ tmp->latency = hwctx->qos.latency;
+ tmp->frame_exec_time = hwctx->qos.frame_exec_time;
+ tmp->state = AMDXDNA_HWCTX_STATE_ACTIVE;
+
+ buf = u64_to_user_ptr(array_args->buffer);
+ size = min(sizeof(*tmp), array_args->element_size);
+
+ if (copy_to_user(buf, tmp, size))
+ return -EFAULT;
+
+ array_args->buffer += size;
+ array_args->num_element--;
+
+ return 0;
+}
+
static int aie2_get_hwctx_status(struct amdxdna_client *client,
struct amdxdna_drm_get_info *args)
{
- struct amdxdna_drm_query_hwctx __user *buf;
+ struct amdxdna_drm_get_array array_args;
struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_drm_query_hwctx *tmp;
struct amdxdna_client *tmp_client;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
- bool overflow = false;
- u32 req_bytes = 0;
- u32 hw_i = 0;
- int ret = 0;
- int idx;
+ int ret;
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
+ array_args.element_size = sizeof(struct amdxdna_drm_query_hwctx);
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->buffer_size / array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->buffer_size -= (u32)(array_args.buffer - args->buffer);
+ return 0;
+}
+
+static int aie2_query_resource_info(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_get_resource_info res_info;
+ const struct amdxdna_dev_priv *priv;
+ struct amdxdna_dev_hdl *ndev;
+ struct amdxdna_dev *xdna;
+
+ xdna = client->xdna;
+ ndev = xdna->dev_handle;
+ priv = ndev->priv;
+
+ res_info.npu_clk_max = priv->dpm_clk_tbl[ndev->max_dpm_level].hclk;
+ res_info.npu_tops_max = ndev->max_tops;
+ res_info.npu_task_max = priv->hwctx_limit;
+ res_info.npu_tops_curr = ndev->curr_tops;
+ res_info.npu_task_curr = ndev->hwctx_num;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &res_info, sizeof(res_info)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int aie2_fill_hwctx_map(struct amdxdna_hwctx *hwctx, void *arg)
+{
+ struct amdxdna_dev *xdna = hwctx->client->xdna;
+ u32 *map = arg;
+
+ if (hwctx->fw_ctx_id >= xdna->dev_handle->priv->hwctx_limit) {
+ XDNA_ERR(xdna, "Invalid fw ctx id %d/%d ", hwctx->fw_ctx_id,
+ xdna->dev_handle->priv->hwctx_limit);
+ return -EINVAL;
+ }
+
+ map[hwctx->fw_ctx_id] = hwctx->id;
+ return 0;
+}
+
+static int aie2_get_telemetry(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_query_telemetry_header *header __free(kfree) = NULL;
+ u32 telemetry_data_sz, header_sz, elem_num;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ elem_num = xdna->dev_handle->priv->hwctx_limit;
+ header_sz = struct_size(header, map, elem_num);
+ if (args->buffer_size <= header_sz) {
+ XDNA_ERR(xdna, "Invalid buffer size");
+ return -EINVAL;
+ }
+
+ telemetry_data_sz = args->buffer_size - header_sz;
+ if (telemetry_data_sz > SZ_4M) {
+ XDNA_ERR(xdna, "Buffer size is too big, %d", telemetry_data_sz);
+ return -EINVAL;
+ }
+
+ header = kzalloc(header_sz, GFP_KERNEL);
+ if (!header)
return -ENOMEM;
- buf = u64_to_user_ptr(args->buffer);
+ if (copy_from_user(header, u64_to_user_ptr(args->buffer), sizeof(*header))) {
+ XDNA_ERR(xdna, "Failed to copy telemetry header from user");
+ return -EFAULT;
+ }
+
+ header->map_num_elements = elem_num;
list_for_each_entry(tmp_client, &xdna->client_list, node) {
- idx = srcu_read_lock(&tmp_client->hwctx_srcu);
- amdxdna_for_each_hwctx(tmp_client, hwctx_id, hwctx) {
- req_bytes += sizeof(*tmp);
- if (args->buffer_size < req_bytes) {
- /* Continue iterating to get the required size */
- overflow = true;
- continue;
- }
-
- memset(tmp, 0, sizeof(*tmp));
- tmp->pid = tmp_client->pid;
- tmp->context_id = hwctx->id;
- tmp->start_col = hwctx->start_col;
- tmp->num_col = hwctx->num_col;
- tmp->command_submissions = hwctx->priv->seq;
- tmp->command_completions = hwctx->priv->completed;
-
- if (copy_to_user(&buf[hw_i], tmp, sizeof(*tmp))) {
- ret = -EFAULT;
- srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
- goto out;
- }
- hw_i++;
- }
- srcu_read_unlock(&tmp_client->hwctx_srcu, idx);
+ ret = amdxdna_hwctx_walk(tmp_client, &header->map,
+ aie2_fill_hwctx_map);
+ if (ret)
+ return ret;
}
- if (overflow) {
- XDNA_ERR(xdna, "Invalid buffer size. Given: %u Need: %u.",
- args->buffer_size, req_bytes);
- ret = -EINVAL;
+ ret = aie2_query_telemetry(xdna->dev_handle,
+ u64_to_user_ptr(args->buffer + header_sz),
+ telemetry_data_sz, header);
+ if (ret) {
+ XDNA_ERR(xdna, "Query telemetry failed ret %d", ret);
+ return ret;
}
-out:
- kfree(tmp);
- args->buffer_size = req_bytes;
- return ret;
+ if (copy_to_user(u64_to_user_ptr(args->buffer), header, header_sz)) {
+ XDNA_ERR(xdna, "Copy header failed");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int aie2_get_preempt_state(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_drm_attribute_state state = {};
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_dev_hdl *ndev;
+
+ ndev = xdna->dev_handle;
+ if (args->param == DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE)
+ state.state = ndev->force_preempt_enabled;
+ else if (args->param == DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE)
+ state.state = ndev->frame_boundary_preempt;
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &state, sizeof(state)))
+ return -EFAULT;
+
+ return 0;
}
static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_info *args)
@@ -822,6 +963,10 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
switch (args->param) {
case DRM_AMDXDNA_QUERY_AIE_STATUS:
ret = aie2_get_aie_status(client, args);
@@ -844,12 +989,93 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
case DRM_AMDXDNA_GET_POWER_MODE:
ret = aie2_get_power_mode(client, args);
break;
+ case DRM_AMDXDNA_QUERY_TELEMETRY:
+ ret = aie2_get_telemetry(client, args);
+ break;
+ case DRM_AMDXDNA_QUERY_RESOURCE_INFO:
+ ret = aie2_query_resource_info(client, args);
+ break;
+ case DRM_AMDXDNA_GET_FORCE_PREEMPT_STATE:
+ case DRM_AMDXDNA_GET_FRAME_BOUNDARY_PREEMPT_STATE:
+ ret = aie2_get_preempt_state(client, args);
+ break;
default:
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
ret = -EOPNOTSUPP;
}
+
+ amdxdna_pm_suspend_put(xdna);
XDNA_DBG(xdna, "Got param %d", args->param);
+dev_exit:
+ drm_dev_exit(idx);
+ return ret;
+}
+
+static int aie2_query_ctx_status_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_drm_get_array array_args;
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_client *tmp_client;
+ int ret;
+
+ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
+
+ if (args->element_size > SZ_4K || args->num_element > SZ_1K) {
+ XDNA_DBG(xdna, "Invalid element size %d or number of element %d",
+ args->element_size, args->num_element);
+ return -EINVAL;
+ }
+
+ array_args.element_size = min(args->element_size,
+ sizeof(struct amdxdna_drm_hwctx_entry));
+ array_args.buffer = args->buffer;
+ array_args.num_element = args->num_element * args->element_size /
+ array_args.element_size;
+ list_for_each_entry(tmp_client, &xdna->client_list, node) {
+ ret = amdxdna_hwctx_walk(tmp_client, &array_args,
+ aie2_hwctx_status_cb);
+ if (ret)
+ break;
+ }
+
+ args->element_size = array_args.element_size;
+ args->num_element = (u32)((array_args.buffer - args->buffer) /
+ args->element_size);
+
+ return 0;
+}
+
+static int aie2_get_array(struct amdxdna_client *client,
+ struct amdxdna_drm_get_array *args)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ int ret, idx;
+
+ if (!drm_dev_enter(&xdna->ddev, &idx))
+ return -ENODEV;
+
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
+ switch (args->param) {
+ case DRM_AMDXDNA_HW_CONTEXT_ALL:
+ ret = aie2_query_ctx_status_array(client, args);
+ break;
+ case DRM_AMDXDNA_HW_LAST_ASYNC_ERR:
+ ret = aie2_get_array_async_error(xdna->dev_handle, args);
+ break;
+ default:
+ XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
+ ret = -EOPNOTSUPP;
+ }
+
+ amdxdna_pm_suspend_put(xdna);
+ XDNA_DBG(xdna, "Got param %d", args->param);
+
+dev_exit:
drm_dev_exit(idx);
return ret;
}
@@ -879,6 +1105,38 @@ static int aie2_set_power_mode(struct amdxdna_client *client,
return aie2_pm_set_mode(xdna->dev_handle, power_mode);
}
+static int aie2_set_preempt_state(struct amdxdna_client *client,
+ struct amdxdna_drm_set_state *args)
+{
+ struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
+ struct amdxdna_drm_attribute_state state;
+ u32 val;
+ int ret;
+
+ if (copy_from_user(&state, u64_to_user_ptr(args->buffer), sizeof(state)))
+ return -EFAULT;
+
+ if (state.state > 1)
+ return -EINVAL;
+
+ if (XDNA_MBZ_DBG(client->xdna, state.pad, sizeof(state.pad)))
+ return -EINVAL;
+
+ if (args->param == DRM_AMDXDNA_SET_FORCE_PREEMPT) {
+ ndev->force_preempt_enabled = state.state;
+ } else if (args->param == DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT) {
+ val = state.state;
+ ret = aie2_runtime_cfg(ndev, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
+ &val);
+ if (ret)
+ return ret;
+
+ ndev->frame_boundary_preempt = state.state;
+ }
+
+ return 0;
+}
+
static int aie2_set_state(struct amdxdna_client *client,
struct amdxdna_drm_set_state *args)
{
@@ -888,32 +1146,42 @@ static int aie2_set_state(struct amdxdna_client *client,
if (!drm_dev_enter(&xdna->ddev, &idx))
return -ENODEV;
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret)
+ goto dev_exit;
+
switch (args->param) {
case DRM_AMDXDNA_SET_POWER_MODE:
ret = aie2_set_power_mode(client, args);
break;
+ case DRM_AMDXDNA_SET_FORCE_PREEMPT:
+ case DRM_AMDXDNA_SET_FRAME_BOUNDARY_PREEMPT:
+ ret = aie2_set_preempt_state(client, args);
+ break;
default:
XDNA_ERR(xdna, "Not supported request parameter %u", args->param);
ret = -EOPNOTSUPP;
break;
}
+ amdxdna_pm_suspend_put(xdna);
+dev_exit:
drm_dev_exit(idx);
return ret;
}
const struct amdxdna_dev_ops aie2_ops = {
- .init = aie2_init,
- .fini = aie2_fini,
- .resume = aie2_hw_start,
- .suspend = aie2_hw_stop,
- .get_aie_info = aie2_get_info,
- .set_aie_state = aie2_set_state,
- .hwctx_init = aie2_hwctx_init,
- .hwctx_fini = aie2_hwctx_fini,
- .hwctx_config = aie2_hwctx_config,
- .cmd_submit = aie2_cmd_submit,
+ .init = aie2_init,
+ .fini = aie2_fini,
+ .resume = aie2_hw_resume,
+ .suspend = aie2_hw_suspend,
+ .get_aie_info = aie2_get_info,
+ .set_aie_state = aie2_set_state,
+ .hwctx_init = aie2_hwctx_init,
+ .hwctx_fini = aie2_hwctx_fini,
+ .hwctx_config = aie2_hwctx_config,
+ .hwctx_sync_debug_bo = aie2_hwctx_sync_debug_bo,
+ .cmd_submit = aie2_cmd_submit,
.hmm_invalidate = aie2_hmm_invalidate,
- .hwctx_suspend = aie2_hwctx_suspend,
- .hwctx_resume = aie2_hwctx_resume,
+ .get_array = aie2_get_array,
};
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
index 385914840eaa..a5f9c42155d1 100644
--- a/drivers/accel/amdxdna/aie2_pci.h
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -110,12 +110,15 @@ struct aie_metadata {
enum rt_config_category {
AIE2_RT_CFG_INIT,
AIE2_RT_CFG_CLK_GATING,
+ AIE2_RT_CFG_FORCE_PREEMPT,
+ AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT,
};
struct rt_config {
u32 type;
u32 value;
u32 category;
+ unsigned long feature_mask;
};
struct dpm_clk_freq {
@@ -156,6 +159,19 @@ enum aie2_dev_status {
AIE2_DEV_START,
};
+struct aie2_exec_msg_ops {
+ int (*init_cu_req)(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op);
+ int (*init_dpu_req)(struct amdxdna_gem_obj *cmd_bo, void *req,
+ size_t *size, u32 *msg_op);
+ void (*init_chain_req)(void *req, u64 slot_addr, size_t size, u32 cmd_cnt);
+ int (*fill_cf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_dpu_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_preempt_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ int (*fill_elf_slot)(struct amdxdna_gem_obj *cmd_bo, void *slot, size_t *size);
+ u32 (*get_chain_msg_op)(u32 cmd_op);
+};
+
struct amdxdna_dev_hdl {
struct amdxdna_dev *xdna;
const struct amdxdna_dev_priv *priv;
@@ -173,6 +189,8 @@ struct amdxdna_dev_hdl {
u32 total_col;
struct aie_version version;
struct aie_metadata metadata;
+ unsigned long feature_mask;
+ struct aie2_exec_msg_ops *exec_msg_ops;
/* power management and clock*/
enum amdxdna_power_mode_type pw_mode;
@@ -182,6 +200,10 @@ struct amdxdna_dev_hdl {
u32 clk_gating;
u32 npuclk_freq;
u32 hclk_freq;
+ u32 max_tops;
+ u32 curr_tops;
+ u32 force_preempt_enabled;
+ u32 frame_boundary_preempt;
/* Mailbox and the management channel */
struct mailbox *mbox;
@@ -190,6 +212,8 @@ struct amdxdna_dev_hdl {
enum aie2_dev_status dev_status;
u32 hwctx_num;
+
+ struct amdxdna_async_error last_async_err;
};
#define DEFINE_BAR_OFFSET(reg_name, bar, reg_addr) \
@@ -204,12 +228,27 @@ struct aie2_hw_ops {
int (*set_dpm)(struct amdxdna_dev_hdl *ndev, u32 dpm_level);
};
+enum aie2_fw_feature {
+ AIE2_NPU_COMMAND,
+ AIE2_PREEMPT,
+ AIE2_FEATURE_MAX
+};
+
+struct aie2_fw_feature_tbl {
+ enum aie2_fw_feature feature;
+ u32 max_minor;
+ u32 min_minor;
+};
+
+#define AIE2_FEATURE_ON(ndev, feature) test_bit(feature, &(ndev)->feature_mask)
+
struct amdxdna_dev_priv {
const char *fw_path;
u64 protocol_major;
u64 protocol_minor;
const struct rt_config *rt_config;
const struct dpm_clk_freq *dpm_clk_tbl;
+ const struct aie2_fw_feature_tbl *fw_feature_tbl;
#define COL_ALIGN_NONE 0
#define COL_ALIGN_NATURE 1
@@ -217,6 +256,7 @@ struct amdxdna_dev_priv {
u32 mbox_dev_addr;
/* If mbox_size is 0, use BAR size. See MBOX_SIZE macro */
u32 mbox_size;
+ u32 hwctx_limit;
u32 sram_dev_addr;
struct aie2_bar_off_pair sram_offs[SRAM_MAX_INDEX];
struct aie2_bar_off_pair psp_regs_off[PSP_MAX_REGS];
@@ -234,6 +274,7 @@ extern const struct dpm_clk_freq npu1_dpm_clk_table[];
extern const struct dpm_clk_freq npu4_dpm_clk_table[];
extern const struct rt_config npu1_default_rt_cfg[];
extern const struct rt_config npu4_default_rt_cfg[];
+extern const struct aie2_fw_feature_tbl npu4_fw_feature_table[];
/* aie2_smu.c */
int aie2_smu_init(struct amdxdna_dev_hdl *ndev);
@@ -253,10 +294,12 @@ void aie2_psp_stop(struct psp_device *psp);
/* aie2_error.c */
int aie2_error_async_events_alloc(struct amdxdna_dev_hdl *ndev);
void aie2_error_async_events_free(struct amdxdna_dev_hdl *ndev);
-int aie2_error_async_events_send(struct amdxdna_dev_hdl *ndev);
int aie2_error_async_msg_thread(void *data);
+int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev,
+ struct amdxdna_drm_get_array *args);
/* aie2_message.c */
+void aie2_msg_init(struct amdxdna_dev_hdl *ndev);
int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
@@ -270,9 +313,13 @@ int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwct
int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx);
int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size);
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf, u32 size, u32 *cols_filled);
+int aie2_query_telemetry(struct amdxdna_dev_hdl *ndev,
+ char __user *buf, u32 size,
+ struct amdxdna_drm_query_telemetry_header *header);
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
void *handle, int (*cb)(void*, void __iomem *, size_t));
-int aie2_config_cu(struct amdxdna_hwctx *hwctx);
+int aie2_config_cu(struct amdxdna_hwctx *hwctx,
+ int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
@@ -283,15 +330,17 @@ int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
int (*notify_cb)(void *, void __iomem *, size_t));
int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
int (*notify_cb)(void *, void __iomem *, size_t));
+int aie2_config_debug_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
+ int (*notify_cb)(void *, void __iomem *, size_t));
/* aie2_hwctx.c */
int aie2_hwctx_init(struct amdxdna_hwctx *hwctx);
void aie2_hwctx_fini(struct amdxdna_hwctx *hwctx);
int aie2_hwctx_config(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
-void aie2_hwctx_suspend(struct amdxdna_hwctx *hwctx);
-void aie2_hwctx_resume(struct amdxdna_hwctx *hwctx);
+int aie2_hwctx_sync_debug_bo(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl);
+void aie2_hwctx_suspend(struct amdxdna_client *client);
+int aie2_hwctx_resume(struct amdxdna_client *client);
int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
void aie2_hmm_invalidate(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
-void aie2_restart_ctx(struct amdxdna_client *client);
#endif /* _AIE2_PCI_H_ */
diff --git a/drivers/accel/amdxdna/aie2_smu.c b/drivers/accel/amdxdna/aie2_smu.c
index d303701b0ded..bd94ee96c2bc 100644
--- a/drivers/accel/amdxdna/aie2_smu.c
+++ b/drivers/accel/amdxdna/aie2_smu.c
@@ -11,6 +11,7 @@
#include "aie2_pci.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
#define SMU_RESULT_OK 1
@@ -22,6 +23,13 @@
#define AIE2_SMU_SET_SOFT_DPMLEVEL 0x7
#define AIE2_SMU_SET_HARD_DPMLEVEL 0x8
+#define NPU4_DPM_TOPS(ndev, dpm_level) \
+({ \
+ typeof(ndev) _ndev = ndev; \
+ (4096 * (_ndev)->total_col * \
+ (_ndev)->priv->dpm_clk_tbl[dpm_level].hclk / 1000000); \
+})
+
static int aie2_smu_exec(struct amdxdna_dev_hdl *ndev, u32 reg_cmd,
u32 reg_arg, u32 *out)
{
@@ -59,12 +67,16 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
u32 freq;
int ret;
+ ret = amdxdna_pm_resume_get(ndev->xdna);
+ if (ret)
+ return ret;
+
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_MPNPUCLK_FREQ,
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, &freq);
if (ret) {
XDNA_ERR(ndev->xdna, "Set npu clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].npuclk, ret);
- return ret;
+ goto suspend_put;
}
ndev->npuclk_freq = freq;
@@ -73,49 +85,78 @@ int npu1_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
if (ret) {
XDNA_ERR(ndev->xdna, "Set h clock to %d failed, ret %d\n",
ndev->priv->dpm_clk_tbl[dpm_level].hclk, ret);
- return ret;
+ goto suspend_put;
}
+
+ amdxdna_pm_suspend_put(ndev->xdna);
ndev->hclk_freq = freq;
ndev->dpm_level = dpm_level;
+ ndev->max_tops = 2 * ndev->total_col;
+ ndev->curr_tops = ndev->max_tops * freq / 1028;
XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
ndev->npuclk_freq, ndev->hclk_freq);
return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(ndev->xdna);
+ return ret;
}
int npu4_set_dpm(struct amdxdna_dev_hdl *ndev, u32 dpm_level)
{
int ret;
+ ret = amdxdna_pm_resume_get(ndev->xdna);
+ if (ret)
+ return ret;
+
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_HARD_DPMLEVEL, dpm_level, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Set hard dpm level %d failed, ret %d ",
dpm_level, ret);
- return ret;
+ goto suspend_put;
}
ret = aie2_smu_exec(ndev, AIE2_SMU_SET_SOFT_DPMLEVEL, dpm_level, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Set soft dpm level %d failed, ret %d",
dpm_level, ret);
- return ret;
+ goto suspend_put;
}
+ amdxdna_pm_suspend_put(ndev->xdna);
ndev->npuclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].npuclk;
ndev->hclk_freq = ndev->priv->dpm_clk_tbl[dpm_level].hclk;
ndev->dpm_level = dpm_level;
+ ndev->max_tops = NPU4_DPM_TOPS(ndev, ndev->max_dpm_level);
+ ndev->curr_tops = NPU4_DPM_TOPS(ndev, dpm_level);
XDNA_DBG(ndev->xdna, "MP-NPU clock %d, H clock %d\n",
ndev->npuclk_freq, ndev->hclk_freq);
return 0;
+
+suspend_put:
+ amdxdna_pm_suspend_put(ndev->xdna);
+ return ret;
}
int aie2_smu_init(struct amdxdna_dev_hdl *ndev)
{
int ret;
+ /*
+ * Failing to set power off indicates an unrecoverable hardware or
+ * firmware error.
+ */
+ ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_OFF, 0, NULL);
+ if (ret) {
+ XDNA_ERR(ndev->xdna, "Access power failed, ret %d", ret);
+ return ret;
+ }
+
ret = aie2_smu_exec(ndev, AIE2_SMU_POWER_ON, 0, NULL);
if (ret) {
XDNA_ERR(ndev->xdna, "Power on failed, ret %d", ret);
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index be073224bd69..d17aef89a0ad 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -60,32 +60,6 @@ static struct dma_fence *amdxdna_fence_create(struct amdxdna_hwctx *hwctx)
return &fence->base;
}
-void amdxdna_hwctx_suspend(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- xdna->dev_info->ops->hwctx_suspend(hwctx);
- mutex_unlock(&client->hwctx_lock);
-}
-
-void amdxdna_hwctx_resume(struct amdxdna_client *client)
-{
- struct amdxdna_dev *xdna = client->xdna;
- struct amdxdna_hwctx *hwctx;
- unsigned long hwctx_id;
-
- drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
- mutex_lock(&client->hwctx_lock);
- amdxdna_for_each_hwctx(client, hwctx_id, hwctx)
- xdna->dev_info->ops->hwctx_resume(hwctx);
- mutex_unlock(&client->hwctx_lock);
-}
-
static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
struct srcu_struct *ss)
{
@@ -94,14 +68,30 @@ static void amdxdna_hwctx_destroy_rcu(struct amdxdna_hwctx *hwctx,
synchronize_srcu(ss);
/* At this point, user is not able to submit new commands */
- mutex_lock(&xdna->dev_lock);
xdna->dev_info->ops->hwctx_fini(hwctx);
- mutex_unlock(&xdna->dev_lock);
kfree(hwctx->name);
kfree(hwctx);
}
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg))
+{
+ struct amdxdna_hwctx *hwctx;
+ unsigned long hwctx_id;
+ int ret = 0, idx;
+
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
+ ret = walk(hwctx, arg);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+
+ return ret;
+}
+
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
{
struct amdxdna_cmd *cmd = abo->mem.kva;
@@ -123,14 +113,14 @@ void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size)
return &cmd->data[num_masks];
}
-int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
+u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
{
struct amdxdna_cmd *cmd = abo->mem.kva;
u32 num_masks, i;
u32 *cu_mask;
if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN)
- return -1;
+ return INVALID_CU_IDX;
num_masks = 1 + FIELD_GET(AMDXDNA_CMD_EXTRA_CU_MASK, cmd->header);
cu_mask = cmd->data;
@@ -139,7 +129,7 @@ int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
return ffs(cu_mask[i]) - 1;
}
- return -1;
+ return INVALID_CU_IDX;
}
/*
@@ -152,16 +142,12 @@ void amdxdna_hwctx_remove_all(struct amdxdna_client *client)
struct amdxdna_hwctx *hwctx;
unsigned long hwctx_id;
- mutex_lock(&client->hwctx_lock);
amdxdna_for_each_hwctx(client, hwctx_id, hwctx) {
XDNA_DBG(client->xdna, "PID %d close HW context %d",
client->pid, hwctx->id);
xa_erase(&client->hwctx_xa, hwctx->id);
- mutex_unlock(&client->hwctx_lock);
amdxdna_hwctx_destroy_rcu(hwctx, &client->hwctx_srcu);
- mutex_lock(&client->hwctx_lock);
}
- mutex_unlock(&client->hwctx_lock);
}
int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -175,19 +161,14 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
if (args->ext || args->ext_flags)
return -EINVAL;
- if (!drm_dev_enter(dev, &idx))
- return -ENODEV;
-
hwctx = kzalloc(sizeof(*hwctx), GFP_KERNEL);
- if (!hwctx) {
- ret = -ENOMEM;
- goto exit;
- }
+ if (!hwctx)
+ return -ENOMEM;
if (copy_from_user(&hwctx->qos, u64_to_user_ptr(args->qos_p), sizeof(hwctx->qos))) {
XDNA_ERR(xdna, "Access QoS info failed");
- ret = -EFAULT;
- goto free_hwctx;
+ kfree(hwctx);
+ return -EFAULT;
}
hwctx->client = client;
@@ -195,30 +176,36 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
hwctx->num_tiles = args->num_tiles;
hwctx->mem_size = args->mem_size;
hwctx->max_opc = args->max_opc;
- ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
- XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
- &client->next_hwctxid, GFP_KERNEL);
- if (ret < 0) {
- XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
+
+ guard(mutex)(&xdna->dev_lock);
+
+ if (!drm_dev_enter(dev, &idx)) {
+ ret = -ENODEV;
goto free_hwctx;
}
- hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->id);
+ ret = xdna->dev_info->ops->hwctx_init(hwctx);
+ if (ret) {
+ XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ goto dev_exit;
+ }
+
+ hwctx->name = kasprintf(GFP_KERNEL, "hwctx.%d.%d", client->pid, hwctx->fw_ctx_id);
if (!hwctx->name) {
ret = -ENOMEM;
- goto rm_id;
+ goto fini_hwctx;
}
- mutex_lock(&xdna->dev_lock);
- ret = xdna->dev_info->ops->hwctx_init(hwctx);
- if (ret) {
- mutex_unlock(&xdna->dev_lock);
- XDNA_ERR(xdna, "Init hwctx failed, ret %d", ret);
+ ret = xa_alloc_cyclic(&client->hwctx_xa, &hwctx->id, hwctx,
+ XA_LIMIT(AMDXDNA_INVALID_CTX_HANDLE + 1, MAX_HWCTX_ID),
+ &client->next_hwctxid, GFP_KERNEL);
+ if (ret < 0) {
+ XDNA_ERR(xdna, "Allocate hwctx ID failed, ret %d", ret);
goto free_name;
}
+
args->handle = hwctx->id;
args->syncobj_handle = hwctx->syncobj_hdl;
- mutex_unlock(&xdna->dev_lock);
atomic64_set(&hwctx->job_submit_cnt, 0);
atomic64_set(&hwctx->job_free_cnt, 0);
@@ -228,12 +215,12 @@ int amdxdna_drm_create_hwctx_ioctl(struct drm_device *dev, void *data, struct dr
free_name:
kfree(hwctx->name);
-rm_id:
- xa_erase(&client->hwctx_xa, hwctx->id);
+fini_hwctx:
+ xdna->dev_info->ops->hwctx_fini(hwctx);
+dev_exit:
+ drm_dev_exit(idx);
free_hwctx:
kfree(hwctx);
-exit:
- drm_dev_exit(idx);
return ret;
}
@@ -251,6 +238,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
if (!drm_dev_enter(dev, &idx))
return -ENODEV;
+ mutex_lock(&xdna->dev_lock);
hwctx = xa_erase(&client->hwctx_xa, args->handle);
if (!hwctx) {
ret = -EINVAL;
@@ -267,6 +255,7 @@ int amdxdna_drm_destroy_hwctx_ioctl(struct drm_device *dev, void *data, struct d
XDNA_DBG(xdna, "PID %d destroyed HW context %d", client->pid, args->handle);
out:
+ mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return ret;
}
@@ -339,6 +328,38 @@ unlock_srcu:
return ret;
}
+int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl)
+{
+ struct amdxdna_dev *xdna = client->xdna;
+ struct amdxdna_hwctx *hwctx;
+ struct amdxdna_gem_obj *abo;
+ struct drm_gem_object *gobj;
+ int ret, idx;
+
+ if (!xdna->dev_info->ops->hwctx_sync_debug_bo)
+ return -EOPNOTSUPP;
+
+ gobj = drm_gem_object_lookup(client->filp, debug_bo_hdl);
+ if (!gobj)
+ return -EINVAL;
+
+ abo = to_xdna_obj(gobj);
+ guard(mutex)(&xdna->dev_lock);
+ idx = srcu_read_lock(&client->hwctx_srcu);
+ hwctx = xa_load(&client->hwctx_xa, abo->assigned_hwctx);
+ if (!hwctx) {
+ ret = -EINVAL;
+ goto unlock_srcu;
+ }
+
+ ret = xdna->dev_info->ops->hwctx_sync_debug_bo(hwctx, debug_bo_hdl);
+
+unlock_srcu:
+ srcu_read_unlock(&client->hwctx_srcu, idx);
+ drm_gem_object_put(gobj);
+ return ret;
+}
+
static void
amdxdna_arg_bos_put(struct amdxdna_sched_job *job)
{
@@ -401,9 +422,11 @@ void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
amdxdna_arg_bos_put(job);
amdxdna_gem_put_obj(job->cmd_bo);
+ dma_fence_put(job->fence);
}
int amdxdna_cmd_submit(struct amdxdna_client *client,
+ struct amdxdna_drv_cmd *drv_cmd,
u32 cmd_bo_hdl, u32 *arg_bo_hdls, u32 arg_bo_cnt,
u32 hwctx_hdl, u64 *seq)
{
@@ -417,6 +440,8 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
if (!job)
return -ENOMEM;
+ job->drv_cmd = drv_cmd;
+
if (cmd_bo_hdl != AMDXDNA_INVALID_BO_HANDLE) {
job->cmd_bo = amdxdna_gem_get_obj(client, cmd_bo_hdl, AMDXDNA_BO_CMD);
if (!job->cmd_bo) {
@@ -424,8 +449,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
ret = -EINVAL;
goto free_job;
}
- } else {
- job->cmd_bo = NULL;
}
ret = amdxdna_arg_bos_lookup(client, job, arg_bo_hdls, arg_bo_cnt);
@@ -443,11 +466,6 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
goto unlock_srcu;
}
- if (hwctx->status != HWCTX_STAT_READY) {
- XDNA_ERR(xdna, "HW Context is not ready");
- ret = -EINVAL;
- goto unlock_srcu;
- }
job->hwctx = hwctx;
job->mm = current->mm;
@@ -524,7 +542,7 @@ static int amdxdna_drm_submit_execbuf(struct amdxdna_client *client,
}
}
- ret = amdxdna_cmd_submit(client, cmd_bo_hdl, arg_bo_hdls,
+ ret = amdxdna_cmd_submit(client, NULL, cmd_bo_hdl, arg_bo_hdls,
args->arg_count, args->hwctx, &args->seq);
if (ret)
XDNA_DBG(xdna, "Submit cmds failed, ret %d", ret);
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
index f0a4a8586d85..b6151244d64f 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.h
+++ b/drivers/accel/amdxdna/amdxdna_ctx.h
@@ -13,9 +13,12 @@
struct amdxdna_hwctx_priv;
enum ert_cmd_opcode {
- ERT_START_CU = 0,
- ERT_CMD_CHAIN = 19,
- ERT_START_NPU = 20,
+ ERT_START_CU = 0,
+ ERT_CMD_CHAIN = 19,
+ ERT_START_NPU = 20,
+ ERT_START_NPU_PREEMPT = 21,
+ ERT_START_NPU_PREEMPT_ELF = 22,
+ ERT_INVALID_CMD = ~0U,
};
enum ert_cmd_state {
@@ -54,6 +57,21 @@ struct amdxdna_cmd_chain {
u64 data[] __counted_by(command_count);
};
+/*
+ * Interpretation of the beginning of data payload for ERT_START_NPU_PREEMPT in
+ * amdxdna_cmd. The rest of the payload in amdxdna_cmd is regular kernel args.
+ */
+struct amdxdna_cmd_preempt_data {
+ u64 inst_buf; /* instruction buffer address */
+ u64 save_buf; /* save buffer address */
+ u64 restore_buf; /* restore buffer address */
+ u32 inst_size; /* size of instruction buffer in bytes */
+ u32 save_size; /* size of save buffer in bytes */
+ u32 restore_size; /* size of restore buffer in bytes */
+ u32 inst_prop_cnt; /* properties count */
+ u32 prop_args[]; /* properties and regular kernel arguments */
+};
+
/* Exec buffer command header format */
#define AMDXDNA_CMD_STATE GENMASK(3, 0)
#define AMDXDNA_CMD_EXTRA_CU_MASK GENMASK(11, 10)
@@ -64,6 +82,8 @@ struct amdxdna_cmd {
u32 data[];
};
+#define INVALID_CU_IDX (~0U)
+
struct amdxdna_hwctx {
struct amdxdna_client *client;
struct amdxdna_hwctx_priv *priv;
@@ -95,6 +115,17 @@ struct amdxdna_hwctx {
#define drm_job_to_xdna_job(j) \
container_of(j, struct amdxdna_sched_job, base)
+enum amdxdna_job_opcode {
+ SYNC_DEBUG_BO,
+ ATTACH_DEBUG_BO,
+ DETACH_DEBUG_BO,
+};
+
+struct amdxdna_drv_cmd {
+ enum amdxdna_job_opcode opcode;
+ u32 result;
+};
+
struct amdxdna_sched_job {
struct drm_sched_job base;
struct kref refcnt;
@@ -105,7 +136,9 @@ struct amdxdna_sched_job {
/* user can wait on this fence */
struct dma_fence *out_fence;
bool job_done;
+ bool job_timeout;
u64 seq;
+ struct amdxdna_drv_cmd *drv_cmd;
struct amdxdna_gem_obj *cmd_bo;
size_t bo_cnt;
struct drm_gem_object *bos[] __counted_by(bo_cnt);
@@ -137,21 +170,17 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
}
void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
-int amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
-
-static inline u32 amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx)
-{
- return GENMASK(hwctx->start_col + hwctx->num_col - 1,
- hwctx->start_col);
-}
+u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
-void amdxdna_hwctx_suspend(struct amdxdna_client *client);
-void amdxdna_hwctx_resume(struct amdxdna_client *client);
+int amdxdna_hwctx_walk(struct amdxdna_client *client, void *arg,
+ int (*walk)(struct amdxdna_hwctx *hwctx, void *arg));
+int amdxdna_hwctx_sync_debug_bo(struct amdxdna_client *client, u32 debug_bo_hdl);
int amdxdna_cmd_submit(struct amdxdna_client *client,
- u32 cmd_bo_hdls, u32 *arg_bo_hdls, u32 arg_bo_cnt,
+ struct amdxdna_drv_cmd *drv_cmd, u32 cmd_bo_hdls,
+ u32 *arg_bo_hdls, u32 arg_bo_cnt,
u32 hwctx_hdl, u64 *seq);
int amdxdna_cmd_wait(struct amdxdna_client *client, u32 hwctx_hdl,
diff --git a/drivers/accel/amdxdna/amdxdna_error.h b/drivers/accel/amdxdna/amdxdna_error.h
new file mode 100644
index 000000000000..c51de86ec12b
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_error.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_ERROR_H_
+#define _AMDXDNA_ERROR_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+#define AMDXDNA_ERR_DRV_AIE 4
+#define AMDXDNA_ERR_SEV_CRITICAL 3
+#define AMDXDNA_ERR_CLASS_AIE 2
+
+#define AMDXDNA_ERR_NUM_MASK GENMASK_U64(15, 0)
+#define AMDXDNA_ERR_DRV_MASK GENMASK_U64(23, 16)
+#define AMDXDNA_ERR_SEV_MASK GENMASK_U64(31, 24)
+#define AMDXDNA_ERR_MOD_MASK GENMASK_U64(39, 32)
+#define AMDXDNA_ERR_CLASS_MASK GENMASK_U64(47, 40)
+
+enum amdxdna_error_num {
+ AMDXDNA_ERROR_NUM_AIE_SATURATION = 3,
+ AMDXDNA_ERROR_NUM_AIE_FP,
+ AMDXDNA_ERROR_NUM_AIE_STREAM,
+ AMDXDNA_ERROR_NUM_AIE_ACCESS,
+ AMDXDNA_ERROR_NUM_AIE_BUS,
+ AMDXDNA_ERROR_NUM_AIE_INSTRUCTION,
+ AMDXDNA_ERROR_NUM_AIE_ECC,
+ AMDXDNA_ERROR_NUM_AIE_LOCK,
+ AMDXDNA_ERROR_NUM_AIE_DMA,
+ AMDXDNA_ERROR_NUM_AIE_MEM_PARITY,
+ AMDXDNA_ERROR_NUM_UNKNOWN = 15,
+};
+
+enum amdxdna_error_module {
+ AMDXDNA_ERROR_MODULE_AIE_CORE = 3,
+ AMDXDNA_ERROR_MODULE_AIE_MEMORY,
+ AMDXDNA_ERROR_MODULE_AIE_SHIM,
+ AMDXDNA_ERROR_MODULE_AIE_NOC,
+ AMDXDNA_ERROR_MODULE_AIE_PL,
+ AMDXDNA_ERROR_MODULE_UNKNOWN = 8,
+};
+
+#define AMDXDNA_ERROR_ENCODE(err_num, err_mod) \
+ (FIELD_PREP(AMDXDNA_ERR_NUM_MASK, err_num) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_DRV_MASK, AMDXDNA_ERR_DRV_AIE) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_SEV_MASK, AMDXDNA_ERR_SEV_CRITICAL) | \
+ FIELD_PREP(AMDXDNA_ERR_MOD_MASK, err_mod) | \
+ FIELD_PREP_CONST(AMDXDNA_ERR_CLASS_MASK, AMDXDNA_ERR_CLASS_AIE))
+
+#define AMDXDNA_EXTRA_ERR_COL_MASK GENMASK_U64(7, 0)
+#define AMDXDNA_EXTRA_ERR_ROW_MASK GENMASK_U64(15, 8)
+
+#define AMDXDNA_EXTRA_ERR_ENCODE(row, col) \
+ (FIELD_PREP(AMDXDNA_EXTRA_ERR_COL_MASK, col) | \
+ FIELD_PREP(AMDXDNA_EXTRA_ERR_ROW_MASK, row))
+
+#endif /* _AMDXDNA_ERROR_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 0f85a0105178..dfa916eeb2d9 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -8,6 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <linux/dma-buf.h>
#include <linux/dma-direct.h>
@@ -18,6 +19,7 @@
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
#define XDNA_MAX_CMD_BO_SIZE SZ_32K
@@ -296,7 +298,7 @@ static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
vma->vm_private_data = NULL;
vma->vm_ops = NULL;
- ret = dma_buf_mmap(to_gobj(abo)->dma_buf, vma, 0);
+ ret = dma_buf_mmap(abo->dma_buf, vma, 0);
if (ret) {
XDNA_ERR(xdna, "Failed to mmap dma buf %d", ret);
return ret;
@@ -391,10 +393,45 @@ static const struct dma_buf_ops amdxdna_dmabuf_ops = {
.vunmap = drm_gem_dmabuf_vunmap,
};
+static int amdxdna_gem_obj_vmap(struct amdxdna_gem_obj *abo, void **vaddr)
+{
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
+ int ret;
+
+ if (is_import_bo(abo))
+ ret = dma_buf_vmap_unlocked(abo->dma_buf, &map);
+ else
+ ret = drm_gem_vmap(to_gobj(abo), &map);
+
+ *vaddr = map.vaddr;
+ return ret;
+}
+
+static void amdxdna_gem_obj_vunmap(struct amdxdna_gem_obj *abo)
+{
+ struct iosys_map map;
+
+ if (!abo->mem.kva)
+ return;
+
+ iosys_map_set_vaddr(&map, abo->mem.kva);
+
+ if (is_import_bo(abo))
+ dma_buf_vunmap_unlocked(abo->dma_buf, &map);
+ else
+ drm_gem_vunmap(to_gobj(abo), &map);
+}
+
static struct dma_buf *amdxdna_gem_prime_export(struct drm_gem_object *gobj, int flags)
{
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ if (abo->dma_buf) {
+ get_dma_buf(abo->dma_buf);
+ return abo->dma_buf;
+ }
+
exp_info.ops = &amdxdna_dmabuf_ops;
exp_info.size = gobj->size;
exp_info.flags = flags;
@@ -417,7 +454,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
{
struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(abo->mem.kva);
XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
@@ -430,7 +466,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
if (abo->type == AMDXDNA_BO_DEV_HEAP)
drm_mm_takedown(&abo->mm);
- drm_gem_vunmap(gobj, &map);
+ amdxdna_gem_obj_vunmap(abo);
mutex_destroy(&abo->lock);
if (is_import_bo(abo)) {
@@ -494,6 +530,68 @@ amdxdna_gem_create_object_cb(struct drm_device *dev, size_t size)
return to_gobj(abo);
}
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_shmem_object(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem = drm_gem_shmem_create(dev, size);
+
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->map_wc = false;
+ return to_xdna_obj(&shmem->base);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_ubuf_object(struct drm_device *dev, struct amdxdna_drm_create_bo *args)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ enum amdxdna_ubuf_flag flags = 0;
+ struct amdxdna_drm_va_tbl va_tbl;
+ struct drm_gem_object *gobj;
+ struct dma_buf *dma_buf;
+
+ if (copy_from_user(&va_tbl, u64_to_user_ptr(args->vaddr), sizeof(va_tbl))) {
+ XDNA_DBG(xdna, "Access va table failed");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (va_tbl.num_entries) {
+ if (args->type == AMDXDNA_BO_CMD)
+ flags |= AMDXDNA_UBUF_FLAG_MAP_DMA;
+
+ dma_buf = amdxdna_get_ubuf(dev, flags, va_tbl.num_entries,
+ u64_to_user_ptr(args->vaddr + sizeof(va_tbl)));
+ } else {
+ dma_buf = dma_buf_get(va_tbl.dmabuf_fd);
+ }
+
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ gobj = amdxdna_gem_prime_import(dev, dma_buf);
+ if (IS_ERR(gobj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(gobj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ return to_xdna_obj(gobj);
+}
+
+static struct amdxdna_gem_obj *
+amdxdna_gem_create_object(struct drm_device *dev,
+ struct amdxdna_drm_create_bo *args)
+{
+ size_t aligned_sz = PAGE_ALIGN(args->size);
+
+ if (args->vaddr)
+ return amdxdna_gem_create_ubuf_object(dev, args);
+
+ return amdxdna_gem_create_shmem_object(dev, aligned_sz);
+}
+
struct drm_gem_object *
amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf)
{
@@ -545,16 +643,12 @@ amdxdna_drm_alloc_shmem(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem))
- return ERR_CAST(shmem);
-
- shmem->map_wc = false;
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
- abo = to_xdna_obj(&shmem->base);
abo->client = client;
abo->type = AMDXDNA_BO_SHMEM;
@@ -567,9 +661,7 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
int ret;
@@ -586,25 +678,22 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
goto mm_unlock;
}
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem)) {
- ret = PTR_ERR(shmem);
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo)) {
+ ret = PTR_ERR(abo);
goto mm_unlock;
}
- shmem->map_wc = false;
- abo = to_xdna_obj(&shmem->base);
abo->type = AMDXDNA_BO_DEV_HEAP;
abo->client = client;
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
client->dev_heap = abo;
drm_gem_object_get(to_gobj(abo));
@@ -655,9 +744,7 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
struct drm_file *filp)
{
- struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
- struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
int ret;
@@ -671,27 +758,23 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
return ERR_PTR(-EINVAL);
}
- shmem = drm_gem_shmem_create(dev, args->size);
- if (IS_ERR(shmem))
- return ERR_CAST(shmem);
-
- shmem->map_wc = false;
- abo = to_xdna_obj(&shmem->base);
+ abo = amdxdna_gem_create_object(dev, args);
+ if (IS_ERR(abo))
+ return ERR_CAST(abo);
abo->type = AMDXDNA_BO_CMD;
abo->client = filp->driver_priv;
- ret = drm_gem_vmap(to_gobj(abo), &map);
+ ret = amdxdna_gem_obj_vmap(abo, &abo->mem.kva);
if (ret) {
XDNA_ERR(xdna, "Vmap cmd bo failed, ret %d", ret);
goto release_obj;
}
- abo->mem.kva = map.vaddr;
return abo;
release_obj:
- drm_gem_shmem_free(shmem);
+ drm_gem_object_put(to_gobj(abo));
return ERR_PTR(ret);
}
@@ -702,7 +785,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
struct amdxdna_gem_obj *abo;
int ret;
- if (args->flags || args->vaddr || !args->size)
+ if (args->flags)
return -EINVAL;
XDNA_DBG(xdna, "BO arg type %d vaddr 0x%llx size 0x%llx flags 0x%llx",
@@ -880,6 +963,9 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
XDNA_DBG(xdna, "Sync bo %d offset 0x%llx, size 0x%llx\n",
args->handle, args->offset, args->size);
+ if (args->direction == SYNC_DIRECT_FROM_DEVICE)
+ ret = amdxdna_hwctx_sync_debug_bo(abo->client, args->handle);
+
put_obj:
drm_gem_object_put(gobj);
return ret;
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index ae29db94a9d3..f79fc7f3c93b 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -7,6 +7,7 @@
#define _AMDXDNA_GEM_H_
#include <linux/hmm.h>
+#include "amdxdna_pci_drv.h"
struct amdxdna_umap {
struct vm_area_struct *vma;
@@ -62,6 +63,11 @@ static inline void amdxdna_gem_put_obj(struct amdxdna_gem_obj *abo)
drm_gem_object_put(to_gobj(abo));
}
+static inline u64 amdxdna_dev_bo_offset(struct amdxdna_gem_obj *abo)
+{
+ return abo->mem.dev_addr - abo->client->dev_heap->mem.dev_addr;
+}
+
void amdxdna_umap_put(struct amdxdna_umap *mapp);
struct drm_gem_object *
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
index da1ac89bb78f..858df97cd3fb 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -194,7 +194,8 @@ static void mailbox_release_msg(struct mailbox_channel *mb_chann,
{
MB_DBG(mb_chann, "msg_id 0x%x msg opcode 0x%x",
mb_msg->pkg.header.id, mb_msg->pkg.header.opcode);
- mb_msg->notify_cb(mb_msg->handle, NULL, 0);
+ if (mb_msg->notify_cb)
+ mb_msg->notify_cb(mb_msg->handle, NULL, 0);
kfree(mb_msg);
}
@@ -248,7 +249,7 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
{
struct mailbox_msg *mb_msg;
int msg_id;
- int ret;
+ int ret = 0;
msg_id = header->id;
if (!mailbox_validate_msgid(msg_id)) {
@@ -265,9 +266,11 @@ mailbox_get_resp(struct mailbox_channel *mb_chann, struct xdna_msg_header *heade
MB_DBG(mb_chann, "opcode 0x%x size %d id 0x%x",
header->opcode, header->total_size, header->id);
- ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
- if (unlikely(ret))
- MB_ERR(mb_chann, "Message callback ret %d", ret);
+ if (mb_msg->notify_cb) {
+ ret = mb_msg->notify_cb(mb_msg->handle, data, header->total_size);
+ if (unlikely(ret))
+ MB_ERR(mb_chann, "Message callback ret %d", ret);
+ }
kfree(mb_msg);
return ret;
@@ -513,6 +516,7 @@ xdna_mailbox_create_channel(struct mailbox *mb,
}
mb_chann->bad_state = false;
+ mailbox_reg_write(mb_chann, mb_chann->iohub_int_addr, 0);
MB_DBG(mb_chann, "Mailbox channel created (irq: %d)", mb_chann->msix_irq);
return mb_chann;
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
index 710ff8873d61..556c712cad0a 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
+++ b/drivers/accel/amdxdna/amdxdna_mailbox_helper.h
@@ -16,16 +16,18 @@ struct xdna_notify {
u32 *data;
size_t size;
int error;
+ u32 *status;
};
-#define DECLARE_XDNA_MSG_COMMON(name, op, status) \
+#define DECLARE_XDNA_MSG_COMMON(name, op, s) \
struct name##_req req = { 0 }; \
- struct name##_resp resp = { status }; \
+ struct name##_resp resp = { .status = s }; \
struct xdna_notify hdl = { \
.error = 0, \
.data = (u32 *)&resp, \
.size = sizeof(resp), \
.comp = COMPLETION_INITIALIZER_ONSTACK(hdl.comp), \
+ .status = (u32 *)&resp.status, \
}; \
struct xdna_mailbox_msg msg = { \
.send_data = (u8 *)&req, \
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index f2bf1d374cc7..1973ab67721b 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -13,13 +13,11 @@
#include <drm/gpu_scheduler.h>
#include <linux/iommu.h>
#include <linux/pci.h>
-#include <linux/pm_runtime.h>
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
-
-#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+#include "amdxdna_pm.h"
MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
@@ -27,6 +25,18 @@ MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
/*
+ * 0.0: Initial version
+ * 0.1: Support getting all hardware contexts by DRM_IOCTL_AMDXDNA_GET_ARRAY
+ * 0.2: Support getting last error hardware error
+ * 0.3: Support firmware debug buffer
+ * 0.4: Support getting resource information
+ * 0.5: Support getting telemetry data
+ * 0.6: Support preemption
+ */
+#define AMDXDNA_DRIVER_MAJOR 0
+#define AMDXDNA_DRIVER_MINOR 6
+
+/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
* (device_id, rev_id) pair as a key to select the devices. The devices with
* same device_id have very similar interface to host driver.
@@ -54,17 +64,9 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
struct amdxdna_client *client;
int ret;
- ret = pm_runtime_resume_and_get(ddev->dev);
- if (ret) {
- XDNA_ERR(xdna, "Failed to get rpm, ret %d", ret);
- return ret;
- }
-
client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client) {
- ret = -ENOMEM;
- goto put_rpm;
- }
+ if (!client)
+ return -ENOMEM;
client->pid = pid_nr(rcu_access_pointer(filp->pid));
client->xdna = xdna;
@@ -81,7 +83,6 @@ static int amdxdna_drm_open(struct drm_device *ddev, struct drm_file *filp)
ret = -ENODEV;
goto unbind_sva;
}
- mutex_init(&client->hwctx_lock);
init_srcu_struct(&client->hwctx_srcu);
xa_init_flags(&client->hwctx_xa, XA_FLAGS_ALLOC);
mutex_init(&client->mm_lock);
@@ -100,9 +101,6 @@ unbind_sva:
iommu_sva_unbind_device(client->sva);
failed:
kfree(client);
-put_rpm:
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
return ret;
}
@@ -116,7 +114,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
xa_destroy(&client->hwctx_xa);
cleanup_srcu_struct(&client->hwctx_srcu);
- mutex_destroy(&client->hwctx_lock);
mutex_destroy(&client->mm_lock);
if (client->dev_heap)
drm_gem_object_put(to_gobj(client->dev_heap));
@@ -125,8 +122,6 @@ static void amdxdna_drm_close(struct drm_device *ddev, struct drm_file *filp)
XDNA_DBG(xdna, "pid %d closed", client->pid);
kfree(client);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
}
static int amdxdna_flush(struct file *f, fl_owner_t id)
@@ -142,8 +137,8 @@ static int amdxdna_flush(struct file *f, fl_owner_t id)
mutex_lock(&xdna->dev_lock);
list_del_init(&client->node);
- mutex_unlock(&xdna->dev_lock);
amdxdna_hwctx_remove_all(client);
+ mutex_unlock(&xdna->dev_lock);
drm_dev_exit(idx);
return 0;
@@ -166,6 +161,23 @@ static int amdxdna_drm_get_info_ioctl(struct drm_device *dev, void *data, struct
return ret;
}
+static int amdxdna_drm_get_array_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdxdna_client *client = filp->driver_priv;
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ struct amdxdna_drm_get_array *args = data;
+
+ if (!xdna->dev_info->ops->get_array)
+ return -EOPNOTSUPP;
+
+ if (args->pad || !args->num_element || !args->element_size)
+ return -EINVAL;
+
+ guard(mutex)(&xdna->dev_lock);
+ return xdna->dev_info->ops->get_array(client, args);
+}
+
static int amdxdna_drm_set_state_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
@@ -197,6 +209,7 @@ static const struct drm_ioctl_desc amdxdna_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(AMDXDNA_EXEC_CMD, amdxdna_drm_submit_cmd_ioctl, 0),
/* AIE hardware */
DRM_IOCTL_DEF_DRV(AMDXDNA_GET_INFO, amdxdna_drm_get_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(AMDXDNA_GET_ARRAY, amdxdna_drm_get_array_ioctl, 0),
DRM_IOCTL_DEF_DRV(AMDXDNA_SET_STATE, amdxdna_drm_set_state_ioctl, DRM_ROOT_ONLY),
};
@@ -220,6 +233,8 @@ const struct drm_driver amdxdna_drm_drv = {
.fops = &amdxdna_fops,
.name = "amdxdna_accel_driver",
.desc = "AMD XDNA DRM implementation",
+ .major = AMDXDNA_DRIVER_MAJOR,
+ .minor = AMDXDNA_DRIVER_MINOR,
.open = amdxdna_drm_open,
.postclose = amdxdna_drm_close,
.ioctls = amdxdna_drm_ioctls,
@@ -285,19 +300,12 @@ static int amdxdna_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto failed_dev_fini;
}
- pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(dev);
- pm_runtime_allow(dev);
-
ret = drm_dev_register(&xdna->ddev, 0);
if (ret) {
XDNA_ERR(xdna, "DRM register failed, ret %d", ret);
- pm_runtime_forbid(dev);
goto failed_sysfs_fini;
}
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
return 0;
failed_sysfs_fini:
@@ -314,14 +322,10 @@ destroy_notifier_wq:
static void amdxdna_remove(struct pci_dev *pdev)
{
struct amdxdna_dev *xdna = pci_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
struct amdxdna_client *client;
destroy_workqueue(xdna->notifier_wq);
- pm_runtime_get_noresume(dev);
- pm_runtime_forbid(dev);
-
drm_dev_unplug(&xdna->ddev);
amdxdna_sysfs_fini(xdna);
@@ -330,11 +334,8 @@ static void amdxdna_remove(struct pci_dev *pdev)
struct amdxdna_client, node);
while (client) {
list_del_init(&client->node);
- mutex_unlock(&xdna->dev_lock);
-
amdxdna_hwctx_remove_all(client);
- mutex_lock(&xdna->dev_lock);
client = list_first_entry_or_null(&xdna->client_list,
struct amdxdna_client, node);
}
@@ -343,89 +344,9 @@ static void amdxdna_remove(struct pci_dev *pdev)
mutex_unlock(&xdna->dev_lock);
}
-static int amdxdna_dev_suspend_nolock(struct amdxdna_dev *xdna)
-{
- if (xdna->dev_info->ops->suspend)
- xdna->dev_info->ops->suspend(xdna);
-
- return 0;
-}
-
-static int amdxdna_dev_resume_nolock(struct amdxdna_dev *xdna)
-{
- if (xdna->dev_info->ops->resume)
- return xdna->dev_info->ops->resume(xdna);
-
- return 0;
-}
-
-static int amdxdna_pmops_suspend(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- struct amdxdna_client *client;
-
- mutex_lock(&xdna->dev_lock);
- list_for_each_entry(client, &xdna->client_list, node)
- amdxdna_hwctx_suspend(client);
-
- amdxdna_dev_suspend_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
-
- return 0;
-}
-
-static int amdxdna_pmops_resume(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- struct amdxdna_client *client;
- int ret;
-
- XDNA_INFO(xdna, "firmware resuming...");
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_resume_nolock(xdna);
- if (ret) {
- XDNA_ERR(xdna, "resume NPU firmware failed");
- mutex_unlock(&xdna->dev_lock);
- return ret;
- }
-
- XDNA_INFO(xdna, "hardware context resuming...");
- list_for_each_entry(client, &xdna->client_list, node)
- amdxdna_hwctx_resume(client);
- mutex_unlock(&xdna->dev_lock);
-
- return 0;
-}
-
-static int amdxdna_rpmops_suspend(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- int ret;
-
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_suspend_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
-
- XDNA_DBG(xdna, "Runtime suspend done ret: %d", ret);
- return ret;
-}
-
-static int amdxdna_rpmops_resume(struct device *dev)
-{
- struct amdxdna_dev *xdna = pci_get_drvdata(to_pci_dev(dev));
- int ret;
-
- mutex_lock(&xdna->dev_lock);
- ret = amdxdna_dev_resume_nolock(xdna);
- mutex_unlock(&xdna->dev_lock);
-
- XDNA_DBG(xdna, "Runtime resume done ret: %d", ret);
- return ret;
-}
-
static const struct dev_pm_ops amdxdna_pm_ops = {
- SYSTEM_SLEEP_PM_OPS(amdxdna_pmops_suspend, amdxdna_pmops_resume)
- RUNTIME_PM_OPS(amdxdna_rpmops_suspend, amdxdna_rpmops_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume)
+ RUNTIME_PM_OPS(amdxdna_pm_suspend, amdxdna_pm_resume, NULL)
};
static struct pci_driver amdxdna_pci_driver = {
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.h b/drivers/accel/amdxdna/amdxdna_pci_drv.h
index ab79600911aa..c99477f5e454 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.h
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.h
@@ -6,6 +6,7 @@
#ifndef _AMDXDNA_PCI_DRV_H_
#define _AMDXDNA_PCI_DRV_H_
+#include <drm/drm_print.h>
#include <linux/workqueue.h>
#include <linux/xarray.h>
@@ -50,16 +51,16 @@ struct amdxdna_dev_ops {
int (*init)(struct amdxdna_dev *xdna);
void (*fini)(struct amdxdna_dev *xdna);
int (*resume)(struct amdxdna_dev *xdna);
- void (*suspend)(struct amdxdna_dev *xdna);
+ int (*suspend)(struct amdxdna_dev *xdna);
int (*hwctx_init)(struct amdxdna_hwctx *hwctx);
void (*hwctx_fini)(struct amdxdna_hwctx *hwctx);
int (*hwctx_config)(struct amdxdna_hwctx *hwctx, u32 type, u64 value, void *buf, u32 size);
+ int (*hwctx_sync_debug_bo)(struct amdxdna_hwctx *hwctx, u32 debug_bo_hdl);
void (*hmm_invalidate)(struct amdxdna_gem_obj *abo, unsigned long cur_seq);
- void (*hwctx_suspend)(struct amdxdna_hwctx *hwctx);
- void (*hwctx_resume)(struct amdxdna_hwctx *hwctx);
int (*cmd_submit)(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job, u64 *seq);
int (*get_aie_info)(struct amdxdna_client *client, struct amdxdna_drm_get_info *args);
int (*set_aie_state)(struct amdxdna_client *client, struct amdxdna_drm_set_state *args);
+ int (*get_array)(struct amdxdna_client *client, struct amdxdna_drm_get_array *args);
};
/*
@@ -100,6 +101,7 @@ struct amdxdna_dev {
struct amdxdna_fw_ver fw_ver;
struct rw_semaphore notifier_lock; /* for mmu notifier*/
struct workqueue_struct *notifier_wq;
+ bool rpm_on;
};
/*
@@ -118,8 +120,6 @@ struct amdxdna_device_id {
struct amdxdna_client {
struct list_head node;
pid_t pid;
- struct mutex hwctx_lock; /* protect hwctx */
- /* do NOT wait this srcu when hwctx_lock is held */
struct srcu_struct hwctx_srcu;
struct xarray hwctx_xa;
u32 next_hwctxid;
diff --git a/drivers/accel/amdxdna/amdxdna_pm.c b/drivers/accel/amdxdna/amdxdna_pm.c
new file mode 100644
index 000000000000..fa38e65d617c
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pm.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_drv.h>
+#include <linux/pm_runtime.h>
+
+#include "amdxdna_pm.h"
+
+#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+
+int amdxdna_pm_suspend(struct device *dev)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+ bool rpm;
+
+ if (xdna->dev_info->ops->suspend) {
+ rpm = xdna->rpm_on;
+ xdna->rpm_on = false;
+ ret = xdna->dev_info->ops->suspend(xdna);
+ xdna->rpm_on = rpm;
+ }
+
+ XDNA_DBG(xdna, "Suspend done ret %d", ret);
+ return ret;
+}
+
+int amdxdna_pm_resume(struct device *dev)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev_get_drvdata(dev));
+ int ret = -EOPNOTSUPP;
+ bool rpm;
+
+ if (xdna->dev_info->ops->resume) {
+ rpm = xdna->rpm_on;
+ xdna->rpm_on = false;
+ ret = xdna->dev_info->ops->resume(xdna);
+ xdna->rpm_on = rpm;
+ }
+
+ XDNA_DBG(xdna, "Resume done ret %d", ret);
+ return ret;
+}
+
+int amdxdna_pm_resume_get(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+ int ret;
+
+ if (!xdna->rpm_on)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ XDNA_ERR(xdna, "Resume failed: %d", ret);
+ pm_runtime_set_suspended(dev);
+ }
+
+ return ret;
+}
+
+void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ if (!xdna->rpm_on)
+ return;
+
+ pm_runtime_put_autosuspend(dev);
+}
+
+void amdxdna_pm_init(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, AMDXDNA_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_put_autosuspend(dev);
+ xdna->rpm_on = true;
+}
+
+void amdxdna_pm_fini(struct amdxdna_dev *xdna)
+{
+ struct device *dev = xdna->ddev.dev;
+
+ xdna->rpm_on = false;
+ pm_runtime_get_noresume(dev);
+ pm_runtime_forbid(dev);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_pm.h b/drivers/accel/amdxdna/amdxdna_pm.h
new file mode 100644
index 000000000000..77b2d6e45570
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_pm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef _AMDXDNA_PM_H_
+#define _AMDXDNA_PM_H_
+
+#include "amdxdna_pci_drv.h"
+
+int amdxdna_pm_suspend(struct device *dev);
+int amdxdna_pm_resume(struct device *dev);
+int amdxdna_pm_resume_get(struct amdxdna_dev *xdna);
+void amdxdna_pm_suspend_put(struct amdxdna_dev *xdna);
+void amdxdna_pm_init(struct amdxdna_dev *xdna);
+void amdxdna_pm_fini(struct amdxdna_dev *xdna);
+
+#endif /* _AMDXDNA_PM_H_ */
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.c b/drivers/accel/amdxdna/amdxdna_ubuf.c
new file mode 100644
index 000000000000..077b2261cf2a
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+
+#include <drm/amdxdna_accel.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <linux/dma-buf.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "amdxdna_pci_drv.h"
+#include "amdxdna_ubuf.h"
+
+struct amdxdna_ubuf_priv {
+ struct page **pages;
+ u64 nr_pages;
+ enum amdxdna_ubuf_flag flags;
+ struct mm_struct *mm;
+};
+
+static struct sg_table *amdxdna_ubuf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+ struct sg_table *sg;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->nr_pages, 0,
+ ubuf->nr_pages << PAGE_SHIFT, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA) {
+ ret = dma_map_sgtable(attach->dev, sg, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return sg;
+}
+
+static void amdxdna_ubuf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ struct amdxdna_ubuf_priv *ubuf = attach->dmabuf->priv;
+
+ if (ubuf->flags & AMDXDNA_UBUF_FLAG_MAP_DMA)
+ dma_unmap_sgtable(attach->dev, sg, direction, 0);
+
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static void amdxdna_ubuf_release(struct dma_buf *dbuf)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ unpin_user_pages(ubuf->pages, ubuf->nr_pages);
+ kvfree(ubuf->pages);
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+}
+
+static vm_fault_t amdxdna_ubuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct amdxdna_ubuf_priv *ubuf;
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ ubuf = vma->vm_private_data;
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(ubuf->pages[pgoff]);
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+static const struct vm_operations_struct amdxdna_ubuf_vm_ops = {
+ .fault = amdxdna_ubuf_vm_fault,
+};
+
+static int amdxdna_ubuf_mmap(struct dma_buf *dbuf, struct vm_area_struct *vma)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+
+ vma->vm_ops = &amdxdna_ubuf_vm_ops;
+ vma->vm_private_data = ubuf;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
+
+ return 0;
+}
+
+static int amdxdna_ubuf_vmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ struct amdxdna_ubuf_priv *ubuf = dbuf->priv;
+ void *kva;
+
+ kva = vmap(ubuf->pages, ubuf->nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!kva)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, kva);
+ return 0;
+}
+
+static void amdxdna_ubuf_vunmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ vunmap(map->vaddr);
+}
+
+static const struct dma_buf_ops amdxdna_ubuf_dmabuf_ops = {
+ .map_dma_buf = amdxdna_ubuf_map,
+ .unmap_dma_buf = amdxdna_ubuf_unmap,
+ .release = amdxdna_ubuf_release,
+ .mmap = amdxdna_ubuf_mmap,
+ .vmap = amdxdna_ubuf_vmap,
+ .vunmap = amdxdna_ubuf_vunmap,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(dev);
+ unsigned long lock_limit, new_pinned;
+ struct amdxdna_drm_va_entry *va_ent;
+ struct amdxdna_ubuf_priv *ubuf;
+ u32 npages, start = 0;
+ struct dma_buf *dbuf;
+ int i, ret;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ if (!can_do_mlock())
+ return ERR_PTR(-EPERM);
+
+ ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ if (!ubuf)
+ return ERR_PTR(-ENOMEM);
+
+ ubuf->flags = flags;
+ ubuf->mm = current->mm;
+ mmgrab(ubuf->mm);
+
+ va_ent = kvcalloc(num_entries, sizeof(*va_ent), GFP_KERNEL);
+ if (!va_ent) {
+ ret = -ENOMEM;
+ goto free_ubuf;
+ }
+
+ if (copy_from_user(va_ent, va_entries, sizeof(*va_ent) * num_entries)) {
+ XDNA_DBG(xdna, "Access va entries failed");
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ for (i = 0, exp_info.size = 0; i < num_entries; i++) {
+ if (!IS_ALIGNED(va_ent[i].vaddr, PAGE_SIZE) ||
+ !IS_ALIGNED(va_ent[i].len, PAGE_SIZE)) {
+ XDNA_ERR(xdna, "Invalid address or len %llx, %llx",
+ va_ent[i].vaddr, va_ent[i].len);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+
+ exp_info.size += va_ent[i].len;
+ }
+
+ ubuf->nr_pages = exp_info.size >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ new_pinned = atomic64_add_return(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+ if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
+ XDNA_DBG(xdna, "New pin %ld, limit %ld, cap %d",
+ new_pinned, lock_limit, capable(CAP_IPC_LOCK));
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ ubuf->pages = kvmalloc_array(ubuf->nr_pages, sizeof(*ubuf->pages), GFP_KERNEL);
+ if (!ubuf->pages) {
+ ret = -ENOMEM;
+ goto sub_pin_cnt;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ npages = va_ent[i].len >> PAGE_SHIFT;
+
+ ret = pin_user_pages_fast(va_ent[i].vaddr, npages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ &ubuf->pages[start]);
+ if (ret < 0 || ret != npages) {
+ ret = -ENOMEM;
+ XDNA_ERR(xdna, "Failed to pin pages ret %d", ret);
+ goto destroy_pages;
+ }
+
+ start += ret;
+ }
+
+ exp_info.ops = &amdxdna_ubuf_dmabuf_ops;
+ exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf)) {
+ ret = PTR_ERR(dbuf);
+ goto destroy_pages;
+ }
+ kvfree(va_ent);
+
+ return dbuf;
+
+destroy_pages:
+ if (start)
+ unpin_user_pages(ubuf->pages, start);
+ kvfree(ubuf->pages);
+sub_pin_cnt:
+ atomic64_sub(ubuf->nr_pages, &ubuf->mm->pinned_vm);
+free_ent:
+ kvfree(va_ent);
+free_ubuf:
+ mmdrop(ubuf->mm);
+ kfree(ubuf);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/accel/amdxdna/amdxdna_ubuf.h b/drivers/accel/amdxdna/amdxdna_ubuf.h
new file mode 100644
index 000000000000..e5cb3bdb3ec9
--- /dev/null
+++ b/drivers/accel/amdxdna/amdxdna_ubuf.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025, Advanced Micro Devices, Inc.
+ */
+#ifndef _AMDXDNA_UBUF_H_
+#define _AMDXDNA_UBUF_H_
+
+#include <drm/drm_device.h>
+#include <linux/dma-buf.h>
+
+enum amdxdna_ubuf_flag {
+ AMDXDNA_UBUF_FLAG_MAP_DMA = 1,
+};
+
+struct dma_buf *amdxdna_get_ubuf(struct drm_device *dev,
+ enum amdxdna_ubuf_flag flags,
+ u32 num_entries, void __user *va_entries);
+
+#endif /* _AMDXDNA_UBUF_H_ */
diff --git a/drivers/accel/amdxdna/npu1_regs.c b/drivers/accel/amdxdna/npu1_regs.c
index e4f6dac7d00f..ec407f3b48fc 100644
--- a/drivers/accel/amdxdna/npu1_regs.c
+++ b/drivers/accel/amdxdna/npu1_regs.c
@@ -46,6 +46,7 @@
const struct rt_config npu1_default_rt_cfg[] = {
{ 2, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 4, 1, AIE2_RT_CFG_INIT }, /* Debug BO */
{ 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 0 },
};
@@ -62,16 +63,23 @@ const struct dpm_clk_freq npu1_dpm_clk_table[] = {
{ 0 }
};
+static const struct aie2_fw_feature_tbl npu1_fw_feature_table[] = {
+ { .feature = AIE2_NPU_COMMAND, .min_minor = 8 },
+ { 0 }
+};
+
static const struct amdxdna_dev_priv npu1_dev_priv = {
.fw_path = "amdnpu/1502_00/npu.sbin",
.protocol_major = 0x5,
.protocol_minor = 0x7,
.rt_config = npu1_default_rt_cfg,
.dpm_clk_tbl = npu1_dpm_clk_table,
+ .fw_feature_tbl = npu1_fw_feature_table,
.col_align = COL_ALIGN_NONE,
.mbox_dev_addr = NPU1_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU1_SRAM_BAR_BASE,
+ .hwctx_limit = 6,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU1_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU1_SRAM, MPNPU_SRAM_I2X_MAILBOX_15),
diff --git a/drivers/accel/amdxdna/npu2_regs.c b/drivers/accel/amdxdna/npu2_regs.c
index a081cac75ee0..86f87d0d1354 100644
--- a/drivers/accel/amdxdna/npu2_regs.c
+++ b/drivers/accel/amdxdna/npu2_regs.c
@@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu2_dev_priv = {
.protocol_minor = 0x6,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU2_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU2_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU2_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
diff --git a/drivers/accel/amdxdna/npu4_regs.c b/drivers/accel/amdxdna/npu4_regs.c
index 9f2e33182ec6..986a5f28ba24 100644
--- a/drivers/accel/amdxdna/npu4_regs.c
+++ b/drivers/accel/amdxdna/npu4_regs.c
@@ -63,10 +63,14 @@
const struct rt_config npu4_default_rt_cfg[] = {
{ 5, 1, AIE2_RT_CFG_INIT }, /* PDI APP LOAD MODE */
+ { 10, 1, AIE2_RT_CFG_INIT }, /* DEBUG BUF */
+ { 14, 0, AIE2_RT_CFG_INIT, BIT_U64(AIE2_PREEMPT) }, /* Frame boundary preemption */
{ 1, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 2, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 3, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
{ 4, 1, AIE2_RT_CFG_CLK_GATING }, /* Clock gating on */
+ { 13, 0, AIE2_RT_CFG_FORCE_PREEMPT },
+ { 14, 0, AIE2_RT_CFG_FRAME_BOUNDARY_PREEMPT },
{ 0 },
};
@@ -82,16 +86,24 @@ const struct dpm_clk_freq npu4_dpm_clk_table[] = {
{ 0 }
};
+const struct aie2_fw_feature_tbl npu4_fw_feature_table[] = {
+ { .feature = AIE2_NPU_COMMAND, .min_minor = 15 },
+ { .feature = AIE2_PREEMPT, .min_minor = 12 },
+ { 0 }
+};
+
static const struct amdxdna_dev_priv npu4_dev_priv = {
.fw_path = "amdnpu/17f0_10/npu.sbin",
.protocol_major = 0x6,
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU4_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU4_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU4_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
diff --git a/drivers/accel/amdxdna/npu5_regs.c b/drivers/accel/amdxdna/npu5_regs.c
index 5f1cf83461c4..75ad97f0b937 100644
--- a/drivers/accel/amdxdna/npu5_regs.c
+++ b/drivers/accel/amdxdna/npu5_regs.c
@@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu5_dev_priv = {
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU5_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU5_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU5_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
diff --git a/drivers/accel/amdxdna/npu6_regs.c b/drivers/accel/amdxdna/npu6_regs.c
index 94a7005685a7..758dc013fe13 100644
--- a/drivers/accel/amdxdna/npu6_regs.c
+++ b/drivers/accel/amdxdna/npu6_regs.c
@@ -67,10 +67,12 @@ static const struct amdxdna_dev_priv npu6_dev_priv = {
.protocol_minor = 12,
.rt_config = npu4_default_rt_cfg,
.dpm_clk_tbl = npu4_dpm_clk_table,
+ .fw_feature_tbl = npu4_fw_feature_table,
.col_align = COL_ALIGN_NATURE,
.mbox_dev_addr = NPU6_MBOX_BAR_BASE,
.mbox_size = 0, /* Use BAR size */
.sram_dev_addr = NPU6_SRAM_BAR_BASE,
+ .hwctx_limit = 16,
.sram_offs = {
DEFINE_BAR_OFFSET(MBOX_CHANN_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_0),
DEFINE_BAR_OFFSET(FW_ALIVE_OFF, NPU6_SRAM, MPNPU_SRAM_X2I_MAILBOX_15),
diff --git a/drivers/accel/ethosu/Kconfig b/drivers/accel/ethosu/Kconfig
new file mode 100644
index 000000000000..d25f9b3eb317
--- /dev/null
+++ b/drivers/accel/ethosu/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ARM_ETHOSU
+ tristate "Arm Ethos-U65/U85 NPU"
+ depends on HAS_IOMEM
+ depends on DRM_ACCEL
+ select DRM_GEM_DMA_HELPER
+ select DRM_SCHED
+ select GENERIC_ALLOCATOR
+ help
+ Enables driver for Arm Ethos-U65/U85 NPUs
diff --git a/drivers/accel/ethosu/Makefile b/drivers/accel/ethosu/Makefile
new file mode 100644
index 000000000000..17db5a600416
--- /dev/null
+++ b/drivers/accel/ethosu/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ARM_ETHOSU) := ethosu.o
+ethosu-y += ethosu_drv.o ethosu_gem.o ethosu_job.o
diff --git a/drivers/accel/ethosu/ethosu_device.h b/drivers/accel/ethosu/ethosu_device.h
new file mode 100644
index 000000000000..b189fa783d6a
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_device.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: GPL-2.0-only or MIT */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_DEVICE_H__
+#define __ETHOSU_DEVICE_H__
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/gpu_scheduler.h>
+
+#include <drm/ethosu_accel.h>
+
+struct clk;
+struct gen_pool;
+
+#define NPU_REG_ID 0x0000
+#define NPU_REG_STATUS 0x0004
+#define NPU_REG_CMD 0x0008
+#define NPU_REG_RESET 0x000c
+#define NPU_REG_QBASE 0x0010
+#define NPU_REG_QBASE_HI 0x0014
+#define NPU_REG_QREAD 0x0018
+#define NPU_REG_QCONFIG 0x001c
+#define NPU_REG_QSIZE 0x0020
+#define NPU_REG_PROT 0x0024
+#define NPU_REG_CONFIG 0x0028
+#define NPU_REG_REGIONCFG 0x003c
+#define NPU_REG_AXILIMIT0 0x0040 // U65
+#define NPU_REG_AXILIMIT1 0x0044 // U65
+#define NPU_REG_AXILIMIT2 0x0048 // U65
+#define NPU_REG_AXILIMIT3 0x004c // U65
+#define NPU_REG_MEM_ATTR0 0x0040 // U85
+#define NPU_REG_MEM_ATTR1 0x0044 // U85
+#define NPU_REG_MEM_ATTR2 0x0048 // U85
+#define NPU_REG_MEM_ATTR3 0x004c // U85
+#define NPU_REG_AXI_SRAM 0x0050 // U85
+#define NPU_REG_AXI_EXT 0x0054 // U85
+
+#define NPU_REG_BASEP(x) (0x0080 + (x) * 8)
+#define NPU_REG_BASEP_HI(x) (0x0084 + (x) * 8)
+#define NPU_BASEP_REGION_MAX 8
+
+#define ID_ARCH_MAJOR_MASK GENMASK(31, 28)
+#define ID_ARCH_MINOR_MASK GENMASK(27, 20)
+#define ID_ARCH_PATCH_MASK GENMASK(19, 16)
+#define ID_VER_MAJOR_MASK GENMASK(11, 8)
+#define ID_VER_MINOR_MASK GENMASK(7, 4)
+
+#define CONFIG_MACS_PER_CC_MASK GENMASK(3, 0)
+#define CONFIG_CMD_STREAM_VER_MASK GENMASK(7, 4)
+
+#define STATUS_STATE_RUNNING BIT(0)
+#define STATUS_IRQ_RAISED BIT(1)
+#define STATUS_BUS_STATUS BIT(2)
+#define STATUS_RESET_STATUS BIT(3)
+#define STATUS_CMD_PARSE_ERR BIT(4)
+#define STATUS_CMD_END_REACHED BIT(5)
+
+#define CMD_CLEAR_IRQ BIT(1)
+#define CMD_TRANSITION_TO_RUN BIT(0)
+
+#define RESET_PENDING_CSL BIT(1)
+#define RESET_PENDING_CPL BIT(0)
+
+#define PROT_ACTIVE_CSL BIT(1)
+
+enum ethosu_cmds {
+ NPU_OP_CONV = 0x2,
+ NPU_OP_DEPTHWISE = 0x3,
+ NPU_OP_POOL = 0x5,
+ NPU_OP_ELEMENTWISE = 0x6,
+ NPU_OP_RESIZE = 0x7, // U85 only
+ NPU_OP_DMA_START = 0x10,
+ NPU_SET_IFM_PAD_TOP = 0x100,
+ NPU_SET_IFM_PAD_LEFT = 0x101,
+ NPU_SET_IFM_PAD_RIGHT = 0x102,
+ NPU_SET_IFM_PAD_BOTTOM = 0x103,
+ NPU_SET_IFM_DEPTH_M1 = 0x104,
+ NPU_SET_IFM_PRECISION = 0x105,
+ NPU_SET_IFM_BROADCAST = 0x108,
+ NPU_SET_IFM_WIDTH0_M1 = 0x10a,
+ NPU_SET_IFM_HEIGHT0_M1 = 0x10b,
+ NPU_SET_IFM_HEIGHT1_M1 = 0x10c,
+ NPU_SET_IFM_REGION = 0x10f,
+ NPU_SET_OFM_WIDTH_M1 = 0x111,
+ NPU_SET_OFM_HEIGHT_M1 = 0x112,
+ NPU_SET_OFM_DEPTH_M1 = 0x113,
+ NPU_SET_OFM_PRECISION = 0x114,
+ NPU_SET_OFM_WIDTH0_M1 = 0x11a,
+ NPU_SET_OFM_HEIGHT0_M1 = 0x11b,
+ NPU_SET_OFM_HEIGHT1_M1 = 0x11c,
+ NPU_SET_OFM_REGION = 0x11f,
+ NPU_SET_KERNEL_WIDTH_M1 = 0x120,
+ NPU_SET_KERNEL_HEIGHT_M1 = 0x121,
+ NPU_SET_KERNEL_STRIDE = 0x122,
+ NPU_SET_WEIGHT_REGION = 0x128,
+ NPU_SET_SCALE_REGION = 0x129,
+ NPU_SET_DMA0_SRC_REGION = 0x130,
+ NPU_SET_DMA0_DST_REGION = 0x131,
+ NPU_SET_DMA0_SIZE0 = 0x132,
+ NPU_SET_DMA0_SIZE1 = 0x133,
+ NPU_SET_IFM2_BROADCAST = 0x180,
+ NPU_SET_IFM2_PRECISION = 0x185,
+ NPU_SET_IFM2_WIDTH0_M1 = 0x18a,
+ NPU_SET_IFM2_HEIGHT0_M1 = 0x18b,
+ NPU_SET_IFM2_HEIGHT1_M1 = 0x18c,
+ NPU_SET_IFM2_REGION = 0x18f,
+ NPU_SET_IFM_BASE0 = 0x4000,
+ NPU_SET_IFM_BASE1 = 0x4001,
+ NPU_SET_IFM_BASE2 = 0x4002,
+ NPU_SET_IFM_BASE3 = 0x4003,
+ NPU_SET_IFM_STRIDE_X = 0x4004,
+ NPU_SET_IFM_STRIDE_Y = 0x4005,
+ NPU_SET_IFM_STRIDE_C = 0x4006,
+ NPU_SET_OFM_BASE0 = 0x4010,
+ NPU_SET_OFM_BASE1 = 0x4011,
+ NPU_SET_OFM_BASE2 = 0x4012,
+ NPU_SET_OFM_BASE3 = 0x4013,
+ NPU_SET_OFM_STRIDE_X = 0x4014,
+ NPU_SET_OFM_STRIDE_Y = 0x4015,
+ NPU_SET_OFM_STRIDE_C = 0x4016,
+ NPU_SET_WEIGHT_BASE = 0x4020,
+ NPU_SET_WEIGHT_LENGTH = 0x4021,
+ NPU_SET_SCALE_BASE = 0x4022,
+ NPU_SET_SCALE_LENGTH = 0x4023,
+ NPU_SET_DMA0_SRC = 0x4030,
+ NPU_SET_DMA0_DST = 0x4031,
+ NPU_SET_DMA0_LEN = 0x4032,
+ NPU_SET_DMA0_SRC_STRIDE0 = 0x4033,
+ NPU_SET_DMA0_SRC_STRIDE1 = 0x4034,
+ NPU_SET_DMA0_DST_STRIDE0 = 0x4035,
+ NPU_SET_DMA0_DST_STRIDE1 = 0x4036,
+ NPU_SET_IFM2_BASE0 = 0x4080,
+ NPU_SET_IFM2_BASE1 = 0x4081,
+ NPU_SET_IFM2_BASE2 = 0x4082,
+ NPU_SET_IFM2_BASE3 = 0x4083,
+ NPU_SET_IFM2_STRIDE_X = 0x4084,
+ NPU_SET_IFM2_STRIDE_Y = 0x4085,
+ NPU_SET_IFM2_STRIDE_C = 0x4086,
+ NPU_SET_WEIGHT1_BASE = 0x4090,
+ NPU_SET_WEIGHT1_LENGTH = 0x4091,
+ NPU_SET_SCALE1_BASE = 0x4092,
+ NPU_SET_WEIGHT2_BASE = 0x4092,
+ NPU_SET_SCALE1_LENGTH = 0x4093,
+ NPU_SET_WEIGHT2_LENGTH = 0x4093,
+ NPU_SET_WEIGHT3_BASE = 0x4094,
+ NPU_SET_WEIGHT3_LENGTH = 0x4095,
+};
+
+#define ETHOSU_SRAM_REGION 2 /* Matching Vela compiler */
+
+/**
+ * struct ethosu_device - Ethosu device
+ */
+struct ethosu_device {
+ /** @base: Base drm_device. */
+ struct drm_device base;
+
+ /** @iomem: CPU mapping of the registers. */
+ void __iomem *regs;
+
+ void __iomem *sram;
+ struct gen_pool *srampool;
+ dma_addr_t sramphys;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
+ int irq;
+
+ struct drm_ethosu_npu_info npu_info;
+
+ struct ethosu_job *in_flight_job;
+ /* For in_flight_job and ethosu_job_hw_submit() */
+ struct mutex job_lock;
+
+ /* For dma_fence */
+ spinlock_t fence_lock;
+
+ struct drm_gpu_scheduler sched;
+ /* For ethosu_job_do_push() */
+ struct mutex sched_lock;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+#define to_ethosu_device(drm_dev) \
+ ((struct ethosu_device *)container_of(drm_dev, struct ethosu_device, base))
+
+static inline bool ethosu_is_u65(const struct ethosu_device *ethosudev)
+{
+ return FIELD_GET(ID_ARCH_MAJOR_MASK, ethosudev->npu_info.id) == 1;
+}
+
+#endif
diff --git a/drivers/accel/ethosu/ethosu_drv.c b/drivers/accel/ethosu/ethosu_drv.c
new file mode 100644
index 000000000000..e05a69bf5574
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_drv.c
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-2.0-only or MIT
+// Copyright (C) 2025 Arm, Ltd.
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_utils.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_accel.h>
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_drv.h"
+#include "ethosu_device.h"
+#include "ethosu_gem.h"
+#include "ethosu_job.h"
+
+static int ethosu_ioctl_dev_query(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct ethosu_device *ethosudev = to_ethosu_device(ddev);
+ struct drm_ethosu_dev_query *args = data;
+
+ if (!args->pointer) {
+ switch (args->type) {
+ case DRM_ETHOSU_DEV_QUERY_NPU_INFO:
+ args->size = sizeof(ethosudev->npu_info);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ switch (args->type) {
+ case DRM_ETHOSU_DEV_QUERY_NPU_INFO:
+ if (args->size < offsetofend(struct drm_ethosu_npu_info, sram_size))
+ return -EINVAL;
+ return copy_struct_to_user(u64_to_user_ptr(args->pointer),
+ args->size,
+ &ethosudev->npu_info,
+ sizeof(ethosudev->npu_info), NULL);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ETHOSU_BO_FLAGS DRM_ETHOSU_BO_NO_MMAP
+
+static int ethosu_ioctl_bo_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_create *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (!args->size || (args->flags & ~ETHOSU_BO_FLAGS)) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+
+ ret = ethosu_gem_create_with_handle(file, ddev, &args->size,
+ args->flags, &args->handle);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_ioctl_bo_wait(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_wait *args = data;
+ int cookie, ret;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ ret = drm_gem_dma_resv_wait(file, args->handle, true, timeout);
+
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_ioctl_bo_mmap_offset(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_bo_mmap_offset *args = data;
+ struct drm_gem_object *obj;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ args->offset = drm_vma_node_offset_addr(&obj->vma_node);
+ drm_gem_object_put(obj);
+ return 0;
+}
+
+static int ethosu_ioctl_cmdstream_bo_create(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_ethosu_cmdstream_bo_create *args = data;
+ int cookie, ret;
+
+ if (!drm_dev_enter(ddev, &cookie))
+ return -ENODEV;
+
+ if (!args->size || !args->data || args->pad || args->flags) {
+ ret = -EINVAL;
+ goto out_dev_exit;
+ }
+
+ args->flags |= DRM_ETHOSU_BO_NO_MMAP;
+
+ ret = ethosu_gem_cmdstream_create(file, ddev, args->size, args->data,
+ args->flags, &args->handle);
+
+out_dev_exit:
+ drm_dev_exit(cookie);
+ return ret;
+}
+
+static int ethosu_open(struct drm_device *ddev, struct drm_file *file)
+{
+ int ret = 0;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ struct ethosu_file_priv __free(kfree) *priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+ priv->edev = to_ethosu_device(ddev);
+
+ ret = ethosu_job_open(priv);
+ if (ret)
+ goto err_put_mod;
+
+ file->driver_priv = no_free_ptr(priv);
+ return 0;
+
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void ethosu_postclose(struct drm_device *ddev, struct drm_file *file)
+{
+ ethosu_job_close(file->driver_priv);
+ kfree(file->driver_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc ethosu_drm_driver_ioctls[] = {
+#define ETHOSU_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(ETHOSU_##n, ethosu_ioctl_##func, flags)
+
+ ETHOSU_IOCTL(DEV_QUERY, dev_query, 0),
+ ETHOSU_IOCTL(BO_CREATE, bo_create, 0),
+ ETHOSU_IOCTL(BO_WAIT, bo_wait, 0),
+ ETHOSU_IOCTL(BO_MMAP_OFFSET, bo_mmap_offset, 0),
+ ETHOSU_IOCTL(CMDSTREAM_BO_CREATE, cmdstream_bo_create, 0),
+ ETHOSU_IOCTL(SUBMIT, submit, 0),
+};
+
+DEFINE_DRM_ACCEL_FOPS(ethosu_drm_driver_fops);
+
+/*
+ * Ethosu driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver ethosu_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = ethosu_open,
+ .postclose = ethosu_postclose,
+ .ioctls = ethosu_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(ethosu_drm_driver_ioctls),
+ .fops = &ethosu_drm_driver_fops,
+ .name = "ethosu",
+ .desc = "Arm Ethos-U Accel driver",
+ .major = 1,
+ .minor = 0,
+
+ .gem_create_object = ethosu_gem_create_object,
+};
+
+#define U65_DRAM_AXI_LIMIT_CFG 0x1f3f0002
+#define U65_SRAM_AXI_LIMIT_CFG 0x1f3f00b0
+#define U85_AXI_EXT_CFG 0x00021f3f
+#define U85_AXI_SRAM_CFG 0x00021f3f
+#define U85_MEM_ATTR0_CFG 0x00000000
+#define U85_MEM_ATTR2_CFG 0x000000b7
+
+static int ethosu_reset(struct ethosu_device *ethosudev)
+{
+ int ret;
+ u32 reg;
+
+ writel_relaxed(RESET_PENDING_CSL, ethosudev->regs + NPU_REG_RESET);
+ ret = readl_poll_timeout(ethosudev->regs + NPU_REG_STATUS, reg,
+ !FIELD_GET(STATUS_RESET_STATUS, reg),
+ USEC_PER_MSEC, USEC_PER_SEC);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(PROT_ACTIVE_CSL, readl_relaxed(ethosudev->regs + NPU_REG_PROT))) {
+ dev_warn(ethosudev->base.dev, "Could not reset to non-secure mode (PROT = %x)\n",
+ readl_relaxed(ethosudev->regs + NPU_REG_PROT));
+ }
+
+ /*
+ * Assign region 2 (SRAM) to AXI M0 (AXILIMIT0),
+ * everything else to AXI M1 (AXILIMIT2)
+ */
+ writel_relaxed(0x0000aa8a, ethosudev->regs + NPU_REG_REGIONCFG);
+ if (ethosu_is_u65(ethosudev)) {
+ writel_relaxed(U65_SRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT0);
+ writel_relaxed(U65_DRAM_AXI_LIMIT_CFG, ethosudev->regs + NPU_REG_AXILIMIT2);
+ } else {
+ writel_relaxed(U85_AXI_SRAM_CFG, ethosudev->regs + NPU_REG_AXI_SRAM);
+ writel_relaxed(U85_AXI_EXT_CFG, ethosudev->regs + NPU_REG_AXI_EXT);
+ writel_relaxed(U85_MEM_ATTR0_CFG, ethosudev->regs + NPU_REG_MEM_ATTR0); // SRAM
+ writel_relaxed(U85_MEM_ATTR2_CFG, ethosudev->regs + NPU_REG_MEM_ATTR2); // DRAM
+ }
+
+ if (ethosudev->sram)
+ memset_io(ethosudev->sram, 0, ethosudev->npu_info.sram_size);
+
+ return 0;
+}
+
+static int ethosu_device_resume(struct device *dev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ethosudev->num_clks, ethosudev->clks);
+ if (ret)
+ return ret;
+
+ ret = ethosu_reset(ethosudev);
+ if (!ret)
+ return 0;
+
+ clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks);
+ return ret;
+}
+
+static int ethosu_device_suspend(struct device *dev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(ethosudev->num_clks, ethosudev->clks);
+ return 0;
+}
+
+static int ethosu_sram_init(struct ethosu_device *ethosudev)
+{
+ ethosudev->npu_info.sram_size = 0;
+
+ ethosudev->srampool = of_gen_pool_get(ethosudev->base.dev->of_node, "sram", 0);
+ if (!ethosudev->srampool)
+ return 0;
+
+ ethosudev->npu_info.sram_size = gen_pool_size(ethosudev->srampool);
+
+ ethosudev->sram = (void __iomem *)gen_pool_dma_alloc(ethosudev->srampool,
+ ethosudev->npu_info.sram_size,
+ &ethosudev->sramphys);
+ if (!ethosudev->sram) {
+ dev_err(ethosudev->base.dev, "failed to allocate from SRAM pool\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ethosu_init(struct ethosu_device *ethosudev)
+{
+ int ret;
+ u32 id, config;
+
+ ret = ethosu_device_resume(ethosudev->base.dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(ethosudev->base.dev, 50);
+ pm_runtime_use_autosuspend(ethosudev->base.dev);
+ ret = devm_pm_runtime_set_active_enabled(ethosudev->base.dev);
+ if (ret)
+ return ret;
+ pm_runtime_get_noresume(ethosudev->base.dev);
+
+ ethosudev->npu_info.id = id = readl_relaxed(ethosudev->regs + NPU_REG_ID);
+ ethosudev->npu_info.config = config = readl_relaxed(ethosudev->regs + NPU_REG_CONFIG);
+
+ ethosu_sram_init(ethosudev);
+
+ dev_info(ethosudev->base.dev,
+ "Ethos-U NPU, arch v%ld.%ld.%ld, rev r%ldp%ld, cmd stream ver%ld, %d MACs, %dKB SRAM\n",
+ FIELD_GET(ID_ARCH_MAJOR_MASK, id),
+ FIELD_GET(ID_ARCH_MINOR_MASK, id),
+ FIELD_GET(ID_ARCH_PATCH_MASK, id),
+ FIELD_GET(ID_VER_MAJOR_MASK, id),
+ FIELD_GET(ID_VER_MINOR_MASK, id),
+ FIELD_GET(CONFIG_CMD_STREAM_VER_MASK, config),
+ 1 << FIELD_GET(CONFIG_MACS_PER_CC_MASK, config),
+ ethosudev->npu_info.sram_size / 1024);
+
+ return 0;
+}
+
+static int ethosu_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct ethosu_device *ethosudev;
+
+ ethosudev = devm_drm_dev_alloc(&pdev->dev, &ethosu_drm_driver,
+ struct ethosu_device, base);
+ if (IS_ERR(ethosudev))
+ return -ENOMEM;
+ platform_set_drvdata(pdev, ethosudev);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
+
+ ethosudev->regs = devm_platform_ioremap_resource(pdev, 0);
+
+ ethosudev->num_clks = devm_clk_bulk_get_all(&pdev->dev, &ethosudev->clks);
+ if (ethosudev->num_clks < 0)
+ return ethosudev->num_clks;
+
+ ret = ethosu_job_init(ethosudev);
+ if (ret)
+ return ret;
+
+ ret = ethosu_init(ethosudev);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(&ethosudev->base, 0);
+ if (ret)
+ pm_runtime_dont_use_autosuspend(ethosudev->base.dev);
+
+ pm_runtime_put_autosuspend(ethosudev->base.dev);
+ return ret;
+}
+
+static void ethosu_remove(struct platform_device *pdev)
+{
+ struct ethosu_device *ethosudev = dev_get_drvdata(&pdev->dev);
+
+ drm_dev_unregister(&ethosudev->base);
+ ethosu_job_fini(ethosudev);
+ if (ethosudev->sram)
+ gen_pool_free(ethosudev->srampool, (unsigned long)ethosudev->sram,
+ ethosudev->npu_info.sram_size);
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,ethos-u65" },
+ { .compatible = "arm,ethos-u85" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ethosu_pm_ops,
+ ethosu_device_suspend,
+ ethosu_device_resume,
+ NULL);
+
+static struct platform_driver ethosu_driver = {
+ .probe = ethosu_probe,
+ .remove = ethosu_remove,
+ .driver = {
+ .name = "ethosu",
+ .pm = pm_ptr(&ethosu_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+module_platform_driver(ethosu_driver);
+
+MODULE_AUTHOR("Rob Herring <robh@kernel.org>");
+MODULE_DESCRIPTION("Arm Ethos-U Accel Driver");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/accel/ethosu/ethosu_drv.h b/drivers/accel/ethosu/ethosu_drv.h
new file mode 100644
index 000000000000..9e21dfe94184
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_drv.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright 2025 Arm, Ltd. */
+#ifndef __ETHOSU_DRV_H__
+#define __ETHOSU_DRV_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct ethosu_device;
+
+struct ethosu_file_priv {
+ struct ethosu_device *edev;
+ struct drm_sched_entity sched_entity;
+};
+
+#endif
diff --git a/drivers/accel/ethosu/ethosu_gem.c b/drivers/accel/ethosu/ethosu_gem.c
new file mode 100644
index 000000000000..473b5f5d7514
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_gem.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0-only or MIT
+/* Copyright 2025 Arm, Ltd. */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_device.h"
+#include "ethosu_gem.h"
+
+static void ethosu_gem_free_object(struct drm_gem_object *obj)
+{
+ struct ethosu_gem_object *bo = to_ethosu_bo(obj);
+
+ kfree(bo->info);
+ drm_gem_free_mmap_offset(&bo->base.base);
+ drm_gem_dma_free(&bo->base);
+}
+
+static int ethosu_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct ethosu_gem_object *bo = to_ethosu_bo(obj);
+
+ /* Don't allow mmap on objects that have the NO_MMAP flag set. */
+ if (bo->flags & DRM_ETHOSU_BO_NO_MMAP)
+ return -EINVAL;
+
+ return drm_gem_dma_object_mmap(obj, vma);
+}
+
+static const struct drm_gem_object_funcs ethosu_gem_funcs = {
+ .free = ethosu_gem_free_object,
+ .print_info = drm_gem_dma_object_print_info,
+ .get_sg_table = drm_gem_dma_object_get_sg_table,
+ .vmap = drm_gem_dma_object_vmap,
+ .mmap = ethosu_gem_mmap,
+ .vm_ops = &drm_gem_dma_vm_ops,
+};
+
+/**
+ * ethosu_gem_create_object - Implementation of driver->gem_create_object.
+ * @ddev: DRM device
+ * @size: Size in bytes of the memory the object will reference
+ *
+ * This lets the GEM helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev, size_t size)
+{
+ struct ethosu_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &ethosu_gem_funcs;
+ return &obj->base.base;
+}
+
+/**
+ * ethosu_gem_create_with_handle() - Create a GEM object and attach it to a handle.
+ * @file: DRM file.
+ * @ddev: DRM device.
+ * @size: Size of the GEM object to allocate.
+ * @flags: Combination of drm_ethosu_bo_flags flags.
+ * @handle: Pointer holding the handle pointing to the new GEM object.
+ *
+ * Return: Zero on success
+ */
+int ethosu_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ u64 *size, u32 flags, u32 *handle)
+{
+ struct drm_gem_dma_object *mem;
+ struct ethosu_gem_object *bo;
+ int ret;
+
+ mem = drm_gem_dma_create(ddev, *size);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ bo = to_ethosu_bo(&mem->base);
+ bo->flags = flags;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file, &mem->base, handle);
+ if (!ret)
+ *size = bo->base.base.size;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(&mem->base);
+
+ return ret;
+}
+
+struct dma {
+ s8 region;
+ u64 len;
+ u64 offset;
+ s64 stride[2];
+};
+
+struct dma_state {
+ u16 size0;
+ u16 size1;
+ s8 mode;
+ struct dma src;
+ struct dma dst;
+};
+
+struct buffer {
+ u64 base;
+ u32 length;
+ s8 region;
+};
+
+struct feat_matrix {
+ u64 base[4];
+ s64 stride_x;
+ s64 stride_y;
+ s64 stride_c;
+ s8 region;
+ u8 broadcast;
+ u16 stride_kernel;
+ u16 precision;
+ u16 depth;
+ u16 width;
+ u16 width0;
+ u16 height[3];
+ u8 pad_top;
+ u8 pad_left;
+ u8 pad_bottom;
+ u8 pad_right;
+};
+
+struct cmd_state {
+ struct dma_state dma;
+ struct buffer scale[2];
+ struct buffer weight[4];
+ struct feat_matrix ofm;
+ struct feat_matrix ifm;
+ struct feat_matrix ifm2;
+};
+
+static void cmd_state_init(struct cmd_state *st)
+{
+ /* Initialize to all 1s to detect missing setup */
+ memset(st, 0xff, sizeof(*st));
+}
+
+static u64 cmd_to_addr(u32 *cmd)
+{
+ return ((u64)((cmd[0] & 0xff0000) << 16)) | cmd[1];
+}
+
+static u64 dma_length(struct ethosu_validated_cmdstream_info *info,
+ struct dma_state *dma_st, struct dma *dma)
+{
+ s8 mode = dma_st->mode;
+ u64 len = dma->len;
+
+ if (mode >= 1) {
+ len += dma->stride[0];
+ len *= dma_st->size0;
+ }
+ if (mode == 2) {
+ len += dma->stride[1];
+ len *= dma_st->size1;
+ }
+ if (dma->region >= 0)
+ info->region_size[dma->region] = max(info->region_size[dma->region],
+ len + dma->offset);
+
+ return len;
+}
+
+static u64 feat_matrix_length(struct ethosu_validated_cmdstream_info *info,
+ struct feat_matrix *fm,
+ u32 x, u32 y, u32 c)
+{
+ u32 element_size, storage = fm->precision >> 14;
+ int tile = 0;
+ u64 addr;
+
+ if (fm->region < 0)
+ return U64_MAX;
+
+ switch (storage) {
+ case 0:
+ if (x >= fm->width0 + 1) {
+ x -= fm->width0 + 1;
+ tile += 1;
+ }
+ if (y >= fm->height[tile] + 1) {
+ y -= fm->height[tile] + 1;
+ tile += 2;
+ }
+ break;
+ case 1:
+ if (y >= fm->height[1] + 1) {
+ y -= fm->height[1] + 1;
+ tile = 2;
+ } else if (y >= fm->height[0] + 1) {
+ y -= fm->height[0] + 1;
+ tile = 1;
+ }
+ break;
+ }
+ if (fm->base[tile] == U64_MAX)
+ return U64_MAX;
+
+ addr = fm->base[tile] + y * fm->stride_y;
+
+ switch ((fm->precision >> 6) & 0x3) { // format
+ case 0: //nhwc:
+ addr += x * fm->stride_x + c;
+ break;
+ case 1: //nhcwb16:
+ element_size = BIT((fm->precision >> 1) & 0x3);
+
+ addr += (c / 16) * fm->stride_c + (16 * x + (c & 0xf)) * element_size;
+ break;
+ }
+
+ info->region_size[fm->region] = max(info->region_size[fm->region], addr + 1);
+
+ return addr;
+}
+
+static int calc_sizes(struct drm_device *ddev,
+ struct ethosu_validated_cmdstream_info *info,
+ u16 op, struct cmd_state *st,
+ bool ifm, bool ifm2, bool weight, bool scale)
+{
+ u64 len;
+
+ if (ifm) {
+ if (st->ifm.stride_kernel == U16_MAX)
+ return -EINVAL;
+ u32 stride_y = ((st->ifm.stride_kernel >> 8) & 0x2) +
+ ((st->ifm.stride_kernel >> 1) & 0x1) + 1;
+ u32 stride_x = ((st->ifm.stride_kernel >> 5) & 0x2) +
+ (st->ifm.stride_kernel & 0x1) + 1;
+ u32 ifm_height = st->ofm.height[2] * stride_y +
+ st->ifm.height[2] - (st->ifm.pad_top + st->ifm.pad_bottom);
+ u32 ifm_width = st->ofm.width * stride_x +
+ st->ifm.width - (st->ifm.pad_left + st->ifm.pad_right);
+
+ len = feat_matrix_length(info, &st->ifm, ifm_width,
+ ifm_height, st->ifm.depth);
+ dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n",
+ op, st->ifm.region, st->ifm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (ifm2) {
+ len = feat_matrix_length(info, &st->ifm2, st->ifm.depth,
+ 0, st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n",
+ op, st->ifm2.region, st->ifm2.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (weight) {
+ dev_dbg(ddev->dev, "op %d: W:%d:0x%llx-0x%llx\n",
+ op, st->weight[0].region, st->weight[0].base,
+ st->weight[0].base + st->weight[0].length - 1);
+ if (st->weight[0].region < 0 || st->weight[0].base == U64_MAX ||
+ st->weight[0].length == U32_MAX)
+ return -EINVAL;
+ info->region_size[st->weight[0].region] =
+ max(info->region_size[st->weight[0].region],
+ st->weight[0].base + st->weight[0].length);
+ }
+
+ if (scale) {
+ dev_dbg(ddev->dev, "op %d: S:%d:0x%llx-0x%llx\n",
+ op, st->scale[0].region, st->scale[0].base,
+ st->scale[0].base + st->scale[0].length - 1);
+ if (st->scale[0].region < 0 || st->scale[0].base == U64_MAX ||
+ st->scale[0].length == U32_MAX)
+ return -EINVAL;
+ info->region_size[st->scale[0].region] =
+ max(info->region_size[st->scale[0].region],
+ st->scale[0].base + st->scale[0].length);
+ }
+
+ len = feat_matrix_length(info, &st->ofm, st->ofm.width,
+ st->ofm.height[2], st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n",
+ op, st->ofm.region, st->ofm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ info->output_region[st->ofm.region] = true;
+
+ return 0;
+}
+
+static int calc_sizes_elemwise(struct drm_device *ddev,
+ struct ethosu_validated_cmdstream_info *info,
+ u16 op, struct cmd_state *st,
+ bool ifm, bool ifm2)
+{
+ u32 height, width, depth;
+ u64 len;
+
+ if (ifm) {
+ height = st->ifm.broadcast & 0x1 ? 0 : st->ofm.height[2];
+ width = st->ifm.broadcast & 0x2 ? 0 : st->ofm.width;
+ depth = st->ifm.broadcast & 0x4 ? 0 : st->ofm.depth;
+
+ len = feat_matrix_length(info, &st->ifm, width,
+ height, depth);
+ dev_dbg(ddev->dev, "op %d: IFM:%d:0x%llx-0x%llx\n",
+ op, st->ifm.region, st->ifm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ if (ifm2) {
+ height = st->ifm2.broadcast & 0x1 ? 0 : st->ofm.height[2];
+ width = st->ifm2.broadcast & 0x2 ? 0 : st->ofm.width;
+ depth = st->ifm2.broadcast & 0x4 ? 0 : st->ofm.depth;
+
+ len = feat_matrix_length(info, &st->ifm2, width,
+ height, depth);
+ dev_dbg(ddev->dev, "op %d: IFM2:%d:0x%llx-0x%llx\n",
+ op, st->ifm2.region, st->ifm2.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ }
+
+ len = feat_matrix_length(info, &st->ofm, st->ofm.width,
+ st->ofm.height[2], st->ofm.depth);
+ dev_dbg(ddev->dev, "op %d: OFM:%d:0x%llx-0x%llx\n",
+ op, st->ofm.region, st->ofm.base[0], len);
+ if (len == U64_MAX)
+ return -EINVAL;
+ info->output_region[st->ofm.region] = true;
+
+ return 0;
+}
+
+static int ethosu_gem_cmdstream_copy_and_validate(struct drm_device *ddev,
+ u32 __user *ucmds,
+ struct ethosu_gem_object *bo,
+ u32 size)
+{
+ struct ethosu_validated_cmdstream_info __free(kfree) *info = kzalloc(sizeof(*info), GFP_KERNEL);
+ struct ethosu_device *edev = to_ethosu_device(ddev);
+ u32 *bocmds = bo->base.vaddr;
+ struct cmd_state st;
+ int i, ret;
+
+ if (!info)
+ return -ENOMEM;
+ info->cmd_size = size;
+
+ cmd_state_init(&st);
+
+ for (i = 0; i < size / 4; i++) {
+ bool use_ifm, use_ifm2, use_scale;
+ u64 dstlen, srclen;
+ u16 cmd, param;
+ u32 cmds[2];
+ u64 addr;
+
+ if (get_user(cmds[0], ucmds++))
+ return -EFAULT;
+
+ bocmds[i] = cmds[0];
+
+ cmd = cmds[0];
+ param = cmds[0] >> 16;
+
+ if (cmd & 0x4000) {
+ if (get_user(cmds[1], ucmds++))
+ return -EFAULT;
+
+ i++;
+ bocmds[i] = cmds[1];
+ addr = cmd_to_addr(cmds);
+ }
+
+ switch (cmd) {
+ case NPU_OP_DMA_START:
+ srclen = dma_length(info, &st.dma, &st.dma.src);
+ dstlen = dma_length(info, &st.dma, &st.dma.dst);
+
+ if (st.dma.dst.region >= 0)
+ info->output_region[st.dma.dst.region] = true;
+ dev_dbg(ddev->dev, "cmd: DMA SRC:%d:0x%llx+0x%llx DST:%d:0x%llx+0x%llx\n",
+ st.dma.src.region, st.dma.src.offset, srclen,
+ st.dma.dst.region, st.dma.dst.offset, dstlen);
+ break;
+ case NPU_OP_CONV:
+ case NPU_OP_DEPTHWISE:
+ use_ifm2 = param & 0x1; // weights_ifm2
+ use_scale = !(st.ofm.precision & 0x100);
+ ret = calc_sizes(ddev, info, cmd, &st, true, use_ifm2,
+ !use_ifm2, use_scale);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_POOL:
+ use_ifm = param != 0x4; // pooling mode
+ use_scale = !(st.ofm.precision & 0x100);
+ ret = calc_sizes(ddev, info, cmd, &st, use_ifm, false,
+ false, use_scale);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_ELEMENTWISE:
+ use_ifm2 = !((st.ifm2.broadcast == 8) || (param == 5) ||
+ (param == 6) || (param == 7) || (param == 0x24));
+ use_ifm = st.ifm.broadcast != 8;
+ ret = calc_sizes_elemwise(ddev, info, cmd, &st, use_ifm, use_ifm2);
+ if (ret)
+ return ret;
+ break;
+ case NPU_OP_RESIZE: // U85 only
+ WARN_ON(1); // TODO
+ break;
+ case NPU_SET_KERNEL_WIDTH_M1:
+ st.ifm.width = param;
+ break;
+ case NPU_SET_KERNEL_HEIGHT_M1:
+ st.ifm.height[2] = param;
+ break;
+ case NPU_SET_KERNEL_STRIDE:
+ st.ifm.stride_kernel = param;
+ break;
+ case NPU_SET_IFM_PAD_TOP:
+ st.ifm.pad_top = param & 0x7f;
+ break;
+ case NPU_SET_IFM_PAD_LEFT:
+ st.ifm.pad_left = param & 0x7f;
+ break;
+ case NPU_SET_IFM_PAD_RIGHT:
+ st.ifm.pad_right = param & 0xff;
+ break;
+ case NPU_SET_IFM_PAD_BOTTOM:
+ st.ifm.pad_bottom = param & 0xff;
+ break;
+ case NPU_SET_IFM_DEPTH_M1:
+ st.ifm.depth = param;
+ break;
+ case NPU_SET_IFM_PRECISION:
+ st.ifm.precision = param;
+ break;
+ case NPU_SET_IFM_BROADCAST:
+ st.ifm.broadcast = param;
+ break;
+ case NPU_SET_IFM_REGION:
+ st.ifm.region = param & 0x7f;
+ break;
+ case NPU_SET_IFM_WIDTH0_M1:
+ st.ifm.width0 = param;
+ break;
+ case NPU_SET_IFM_HEIGHT0_M1:
+ st.ifm.height[0] = param;
+ break;
+ case NPU_SET_IFM_HEIGHT1_M1:
+ st.ifm.height[1] = param;
+ break;
+ case NPU_SET_IFM_BASE0:
+ case NPU_SET_IFM_BASE1:
+ case NPU_SET_IFM_BASE2:
+ case NPU_SET_IFM_BASE3:
+ st.ifm.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_X:
+ st.ifm.stride_x = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_Y:
+ st.ifm.stride_y = addr;
+ break;
+ case NPU_SET_IFM_STRIDE_C:
+ st.ifm.stride_c = addr;
+ break;
+
+ case NPU_SET_OFM_WIDTH_M1:
+ st.ofm.width = param;
+ break;
+ case NPU_SET_OFM_HEIGHT_M1:
+ st.ofm.height[2] = param;
+ break;
+ case NPU_SET_OFM_DEPTH_M1:
+ st.ofm.depth = param;
+ break;
+ case NPU_SET_OFM_PRECISION:
+ st.ofm.precision = param;
+ break;
+ case NPU_SET_OFM_REGION:
+ st.ofm.region = param & 0x7;
+ break;
+ case NPU_SET_OFM_WIDTH0_M1:
+ st.ofm.width0 = param;
+ break;
+ case NPU_SET_OFM_HEIGHT0_M1:
+ st.ofm.height[0] = param;
+ break;
+ case NPU_SET_OFM_HEIGHT1_M1:
+ st.ofm.height[1] = param;
+ break;
+ case NPU_SET_OFM_BASE0:
+ case NPU_SET_OFM_BASE1:
+ case NPU_SET_OFM_BASE2:
+ case NPU_SET_OFM_BASE3:
+ st.ofm.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_X:
+ st.ofm.stride_x = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_Y:
+ st.ofm.stride_y = addr;
+ break;
+ case NPU_SET_OFM_STRIDE_C:
+ st.ofm.stride_c = addr;
+ break;
+
+ case NPU_SET_IFM2_BROADCAST:
+ st.ifm2.broadcast = param;
+ break;
+ case NPU_SET_IFM2_PRECISION:
+ st.ifm2.precision = param;
+ break;
+ case NPU_SET_IFM2_REGION:
+ st.ifm2.region = param & 0x7;
+ break;
+ case NPU_SET_IFM2_WIDTH0_M1:
+ st.ifm2.width0 = param;
+ break;
+ case NPU_SET_IFM2_HEIGHT0_M1:
+ st.ifm2.height[0] = param;
+ break;
+ case NPU_SET_IFM2_HEIGHT1_M1:
+ st.ifm2.height[1] = param;
+ break;
+ case NPU_SET_IFM2_BASE0:
+ case NPU_SET_IFM2_BASE1:
+ case NPU_SET_IFM2_BASE2:
+ case NPU_SET_IFM2_BASE3:
+ st.ifm2.base[cmd & 0x3] = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_X:
+ st.ifm2.stride_x = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_Y:
+ st.ifm2.stride_y = addr;
+ break;
+ case NPU_SET_IFM2_STRIDE_C:
+ st.ifm2.stride_c = addr;
+ break;
+
+ case NPU_SET_WEIGHT_REGION:
+ st.weight[0].region = param & 0x7;
+ break;
+ case NPU_SET_SCALE_REGION:
+ st.scale[0].region = param & 0x7;
+ break;
+ case NPU_SET_WEIGHT_BASE:
+ st.weight[0].base = addr;
+ break;
+ case NPU_SET_WEIGHT_LENGTH:
+ st.weight[0].length = cmds[1];
+ break;
+ case NPU_SET_SCALE_BASE:
+ st.scale[0].base = addr;
+ break;
+ case NPU_SET_SCALE_LENGTH:
+ st.scale[0].length = cmds[1];
+ break;
+ case NPU_SET_WEIGHT1_BASE:
+ st.weight[1].base = addr;
+ break;
+ case NPU_SET_WEIGHT1_LENGTH:
+ st.weight[1].length = cmds[1];
+ break;
+ case NPU_SET_SCALE1_BASE: // NPU_SET_WEIGHT2_BASE (U85)
+ if (ethosu_is_u65(edev))
+ st.scale[1].base = addr;
+ else
+ st.weight[2].base = addr;
+ break;
+ case NPU_SET_SCALE1_LENGTH: // NPU_SET_WEIGHT2_LENGTH (U85)
+ if (ethosu_is_u65(edev))
+ st.scale[1].length = cmds[1];
+ else
+ st.weight[1].length = cmds[1];
+ break;
+ case NPU_SET_WEIGHT3_BASE:
+ st.weight[3].base = addr;
+ break;
+ case NPU_SET_WEIGHT3_LENGTH:
+ st.weight[3].length = cmds[1];
+ break;
+
+ case NPU_SET_DMA0_SRC_REGION:
+ if (param & 0x100)
+ st.dma.src.region = -1;
+ else
+ st.dma.src.region = param & 0x7;
+ st.dma.mode = (param >> 9) & 0x3;
+ break;
+ case NPU_SET_DMA0_DST_REGION:
+ if (param & 0x100)
+ st.dma.dst.region = -1;
+ else
+ st.dma.dst.region = param & 0x7;
+ break;
+ case NPU_SET_DMA0_SIZE0:
+ st.dma.size0 = param;
+ break;
+ case NPU_SET_DMA0_SIZE1:
+ st.dma.size1 = param;
+ break;
+ case NPU_SET_DMA0_SRC_STRIDE0:
+ st.dma.src.stride[0] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_SRC_STRIDE1:
+ st.dma.src.stride[1] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_DST_STRIDE0:
+ st.dma.dst.stride[0] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_DST_STRIDE1:
+ st.dma.dst.stride[1] = ((s64)addr << 24) >> 24;
+ break;
+ case NPU_SET_DMA0_SRC:
+ st.dma.src.offset = addr;
+ break;
+ case NPU_SET_DMA0_DST:
+ st.dma.dst.offset = addr;
+ break;
+ case NPU_SET_DMA0_LEN:
+ st.dma.src.len = st.dma.dst.len = addr;
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (i = 0; i < NPU_BASEP_REGION_MAX; i++) {
+ if (!info->region_size[i])
+ continue;
+ dev_dbg(ddev->dev, "region %d max size: 0x%llx\n",
+ i, info->region_size[i]);
+ }
+
+ bo->info = no_free_ptr(info);
+ return 0;
+}
+
+/**
+ * ethosu_gem_cmdstream_create() - Create a GEM object and attach it to a handle.
+ * @file: DRM file.
+ * @ddev: DRM device.
+ * @exclusive_vm: Exclusive VM. Not NULL if the GEM object can't be shared.
+ * @size: Size of the GEM object to allocate.
+ * @flags: Combination of drm_ethosu_bo_flags flags.
+ * @handle: Pointer holding the handle pointing to the new GEM object.
+ *
+ * Return: Zero on success
+ */
+int ethosu_gem_cmdstream_create(struct drm_file *file,
+ struct drm_device *ddev,
+ u32 size, u64 data, u32 flags, u32 *handle)
+{
+ int ret;
+ struct drm_gem_dma_object *mem;
+ struct ethosu_gem_object *bo;
+
+ mem = drm_gem_dma_create(ddev, size);
+ if (IS_ERR(mem))
+ return PTR_ERR(mem);
+
+ bo = to_ethosu_bo(&mem->base);
+ bo->flags = flags;
+
+ ret = ethosu_gem_cmdstream_copy_and_validate(ddev,
+ (void __user *)(uintptr_t)data,
+ bo, size);
+ if (ret)
+ goto fail;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file, &mem->base, handle);
+
+fail:
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put(&mem->base);
+
+ return ret;
+}
diff --git a/drivers/accel/ethosu/ethosu_gem.h b/drivers/accel/ethosu/ethosu_gem.h
new file mode 100644
index 000000000000..3922895a60fb
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_gem.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_GEM_H__
+#define __ETHOSU_GEM_H__
+
+#include "ethosu_device.h"
+#include <drm/drm_gem_dma_helper.h>
+
+struct ethosu_validated_cmdstream_info {
+ u32 cmd_size;
+ u64 region_size[NPU_BASEP_REGION_MAX];
+ bool output_region[NPU_BASEP_REGION_MAX];
+};
+
+/**
+ * struct ethosu_gem_object - Driver specific GEM object.
+ */
+struct ethosu_gem_object {
+ /** @base: Inherit from drm_gem_shmem_object. */
+ struct drm_gem_dma_object base;
+
+ struct ethosu_validated_cmdstream_info *info;
+
+ /** @flags: Combination of drm_ethosu_bo_flags flags. */
+ u32 flags;
+};
+
+static inline
+struct ethosu_gem_object *to_ethosu_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_dma_obj(obj), struct ethosu_gem_object, base);
+}
+
+struct drm_gem_object *ethosu_gem_create_object(struct drm_device *ddev,
+ size_t size);
+
+int ethosu_gem_create_with_handle(struct drm_file *file,
+ struct drm_device *ddev,
+ u64 *size, u32 flags, uint32_t *handle);
+
+int ethosu_gem_cmdstream_create(struct drm_file *file,
+ struct drm_device *ddev,
+ u32 size, u64 data, u32 flags, u32 *handle);
+
+#endif /* __ETHOSU_GEM_H__ */
diff --git a/drivers/accel/ethosu/ethosu_job.c b/drivers/accel/ethosu/ethosu_job.c
new file mode 100644
index 000000000000..26e7a2f64d71
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_job.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+/* Copyright 2025 Arm, Ltd. */
+
+#include <linux/bitfield.h>
+#include <linux/genalloc.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
+#include <drm/ethosu_accel.h>
+
+#include "ethosu_device.h"
+#include "ethosu_drv.h"
+#include "ethosu_gem.h"
+#include "ethosu_job.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct ethosu_job *to_ethosu_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct ethosu_job, base);
+}
+
+static const char *ethosu_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "ethosu";
+}
+
+static const char *ethosu_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "ethosu-npu";
+}
+
+static const struct dma_fence_ops ethosu_fence_ops = {
+ .get_driver_name = ethosu_fence_get_driver_name,
+ .get_timeline_name = ethosu_fence_get_timeline_name,
+};
+
+static void ethosu_job_hw_submit(struct ethosu_device *dev, struct ethosu_job *job)
+{
+ struct drm_gem_dma_object *cmd_bo = to_drm_gem_dma_obj(job->cmd_bo);
+ struct ethosu_validated_cmdstream_info *cmd_info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (int i = 0; i < job->region_cnt; i++) {
+ struct drm_gem_dma_object *bo;
+ int region = job->region_bo_num[i];
+
+ bo = to_drm_gem_dma_obj(job->region_bo[i]);
+ writel_relaxed(lower_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP(region));
+ writel_relaxed(upper_32_bits(bo->dma_addr), dev->regs + NPU_REG_BASEP_HI(region));
+ dev_dbg(dev->base.dev, "Region %d base addr = %pad\n", region, &bo->dma_addr);
+ }
+
+ if (job->sram_size) {
+ writel_relaxed(lower_32_bits(dev->sramphys),
+ dev->regs + NPU_REG_BASEP(ETHOSU_SRAM_REGION));
+ writel_relaxed(upper_32_bits(dev->sramphys),
+ dev->regs + NPU_REG_BASEP_HI(ETHOSU_SRAM_REGION));
+ dev_dbg(dev->base.dev, "Region %d base addr = %pad (SRAM)\n",
+ ETHOSU_SRAM_REGION, &dev->sramphys);
+ }
+
+ writel_relaxed(lower_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE);
+ writel_relaxed(upper_32_bits(cmd_bo->dma_addr), dev->regs + NPU_REG_QBASE_HI);
+ writel_relaxed(cmd_info->cmd_size, dev->regs + NPU_REG_QSIZE);
+
+ writel(CMD_TRANSITION_TO_RUN, dev->regs + NPU_REG_CMD);
+
+ dev_dbg(dev->base.dev,
+ "Submitted cmd at %pad to core\n", &cmd_bo->dma_addr);
+}
+
+static int ethosu_acquire_object_fences(struct ethosu_job *job)
+{
+ int i, ret;
+ struct drm_gem_object **bos = job->region_bo;
+ struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (i = 0; i < job->region_cnt; i++) {
+ bool is_write;
+
+ if (!bos[i])
+ break;
+
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ is_write = info->output_region[job->region_bo_num[i]];
+ ret = drm_sched_job_add_implicit_dependencies(&job->base, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ethosu_attach_object_fences(struct ethosu_job *job)
+{
+ int i;
+ struct dma_fence *fence = job->inference_done_fence;
+ struct drm_gem_object **bos = job->region_bo;
+ struct ethosu_validated_cmdstream_info *info = to_ethosu_bo(job->cmd_bo)->info;
+
+ for (i = 0; i < job->region_cnt; i++)
+ if (info->output_region[job->region_bo_num[i]])
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int ethosu_job_push(struct ethosu_job *job)
+{
+ struct ww_acquire_ctx acquire_ctx;
+ int ret;
+
+ ret = drm_gem_lock_reservations(job->region_bo, job->region_cnt, &acquire_ctx);
+ if (ret)
+ return ret;
+
+ ret = ethosu_acquire_object_fences(job);
+ if (ret)
+ goto out;
+
+ ret = pm_runtime_resume_and_get(job->dev->base.dev);
+ if (!ret) {
+ guard(mutex)(&job->dev->sched_lock);
+
+ drm_sched_job_arm(&job->base);
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+ kref_get(&job->refcount); /* put by scheduler job completion */
+ drm_sched_entity_push_job(&job->base);
+ ethosu_attach_object_fences(job);
+ }
+
+out:
+ drm_gem_unlock_reservations(job->region_bo, job->region_cnt, &acquire_ctx);
+ return ret;
+}
+
+static void ethosu_job_cleanup(struct kref *ref)
+{
+ struct ethosu_job *job = container_of(ref, struct ethosu_job,
+ refcount);
+ unsigned int i;
+
+ pm_runtime_put_autosuspend(job->dev->base.dev);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ for (i = 0; i < job->region_cnt; i++)
+ drm_gem_object_put(job->region_bo[i]);
+
+ drm_gem_object_put(job->cmd_bo);
+
+ kfree(job);
+}
+
+static void ethosu_job_put(struct ethosu_job *job)
+{
+ kref_put(&job->refcount, ethosu_job_cleanup);
+}
+
+static void ethosu_job_free(struct drm_sched_job *sched_job)
+{
+ struct ethosu_job *job = to_ethosu_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+ ethosu_job_put(job);
+}
+
+static struct dma_fence *ethosu_job_run(struct drm_sched_job *sched_job)
+{
+ struct ethosu_job *job = to_ethosu_job(sched_job);
+ struct ethosu_device *dev = job->dev;
+ struct dma_fence *fence = job->done_fence;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ dma_fence_init(fence, &ethosu_fence_ops, &dev->fence_lock,
+ dev->fence_context, ++dev->emit_seqno);
+ dma_fence_get(fence);
+
+ scoped_guard(mutex, &dev->job_lock) {
+ dev->in_flight_job = job;
+ ethosu_job_hw_submit(dev, job);
+ }
+
+ return fence;
+}
+
+static void ethosu_job_handle_irq(struct ethosu_device *dev)
+{
+ u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS);
+
+ if (status & (STATUS_BUS_STATUS | STATUS_CMD_PARSE_ERR)) {
+ dev_err(dev->base.dev, "Error IRQ - %x\n", status);
+ drm_sched_fault(&dev->sched);
+ return;
+ }
+
+ scoped_guard(mutex, &dev->job_lock) {
+ if (dev->in_flight_job) {
+ dma_fence_signal(dev->in_flight_job->done_fence);
+ dev->in_flight_job = NULL;
+ }
+ }
+}
+
+static irqreturn_t ethosu_job_irq_handler_thread(int irq, void *data)
+{
+ struct ethosu_device *dev = data;
+
+ ethosu_job_handle_irq(dev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ethosu_job_irq_handler(int irq, void *data)
+{
+ struct ethosu_device *dev = data;
+ u32 status = readl_relaxed(dev->regs + NPU_REG_STATUS);
+
+ if (!(status & STATUS_IRQ_RAISED))
+ return IRQ_NONE;
+
+ writel_relaxed(CMD_CLEAR_IRQ, dev->regs + NPU_REG_CMD);
+ return IRQ_WAKE_THREAD;
+}
+
+static enum drm_gpu_sched_stat ethosu_job_timedout(struct drm_sched_job *bad)
+{
+ struct ethosu_job *job = to_ethosu_job(bad);
+ struct ethosu_device *dev = job->dev;
+ bool running;
+ u32 *bocmds = to_drm_gem_dma_obj(job->cmd_bo)->vaddr;
+ u32 cmdaddr;
+
+ cmdaddr = readl_relaxed(dev->regs + NPU_REG_QREAD);
+ running = FIELD_GET(STATUS_STATE_RUNNING, readl_relaxed(dev->regs + NPU_REG_STATUS));
+
+ if (running) {
+ int ret;
+ u32 reg;
+
+ ret = readl_relaxed_poll_timeout(dev->regs + NPU_REG_QREAD,
+ reg,
+ reg != cmdaddr,
+ USEC_PER_MSEC, 100 * USEC_PER_MSEC);
+
+ /* If still running and progress is being made, just return */
+ if (!ret)
+ return DRM_GPU_SCHED_STAT_NO_HANG;
+ }
+
+ dev_err(dev->base.dev, "NPU sched timed out: NPU %s, cmdstream offset 0x%x: 0x%x\n",
+ running ? "running" : "stopped",
+ cmdaddr, bocmds[cmdaddr / 4]);
+
+ drm_sched_stop(&dev->sched, bad);
+
+ scoped_guard(mutex, &dev->job_lock)
+ dev->in_flight_job = NULL;
+
+ /* Proceed with reset now. */
+ pm_runtime_force_suspend(dev->base.dev);
+ pm_runtime_force_resume(dev->base.dev);
+
+ /* Restart the scheduler */
+ drm_sched_start(&dev->sched, 0);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static const struct drm_sched_backend_ops ethosu_sched_ops = {
+ .run_job = ethosu_job_run,
+ .timedout_job = ethosu_job_timedout,
+ .free_job = ethosu_job_free
+};
+
+int ethosu_job_init(struct ethosu_device *edev)
+{
+ struct device *dev = edev->base.dev;
+ struct drm_sched_init_args args = {
+ .ops = &ethosu_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(dev),
+ .dev = dev,
+ };
+ int ret;
+
+ spin_lock_init(&edev->fence_lock);
+ ret = devm_mutex_init(dev, &edev->job_lock);
+ if (ret)
+ return ret;
+ ret = devm_mutex_init(dev, &edev->sched_lock);
+ if (ret)
+ return ret;
+
+ edev->irq = platform_get_irq(to_platform_device(dev), 0);
+ if (edev->irq < 0)
+ return edev->irq;
+
+ ret = devm_request_threaded_irq(dev, edev->irq,
+ ethosu_job_irq_handler,
+ ethosu_job_irq_handler_thread,
+ IRQF_SHARED, KBUILD_MODNAME,
+ edev);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ return ret;
+ }
+
+ edev->fence_context = dma_fence_context_alloc(1);
+
+ ret = drm_sched_init(&edev->sched, &args);
+ if (ret) {
+ dev_err(dev, "Failed to create scheduler: %d\n", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&edev->sched);
+ return ret;
+}
+
+void ethosu_job_fini(struct ethosu_device *dev)
+{
+ drm_sched_fini(&dev->sched);
+}
+
+int ethosu_job_open(struct ethosu_file_priv *ethosu_priv)
+{
+ struct ethosu_device *dev = ethosu_priv->edev;
+ struct drm_gpu_scheduler *sched = &dev->sched;
+ int ret;
+
+ ret = drm_sched_entity_init(&ethosu_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ return WARN_ON(ret);
+}
+
+void ethosu_job_close(struct ethosu_file_priv *ethosu_priv)
+{
+ struct drm_sched_entity *entity = &ethosu_priv->sched_entity;
+
+ drm_sched_entity_destroy(entity);
+}
+
+static int ethosu_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_ethosu_job *job)
+{
+ struct ethosu_device *edev = to_ethosu_device(dev);
+ struct ethosu_file_priv *file_priv = file->driver_priv;
+ struct ethosu_job *ejob = NULL;
+ struct ethosu_validated_cmdstream_info *cmd_info;
+ int ret = 0;
+
+ /* BO region 2 is reserved if SRAM is used */
+ if (job->region_bo_handles[ETHOSU_SRAM_REGION] && job->sram_size)
+ return -EINVAL;
+
+ if (edev->npu_info.sram_size < job->sram_size)
+ return -EINVAL;
+
+ ejob = kzalloc(sizeof(*ejob), GFP_KERNEL);
+ if (!ejob)
+ return -ENOMEM;
+
+ kref_init(&ejob->refcount);
+
+ ejob->dev = edev;
+ ejob->sram_size = job->sram_size;
+
+ ejob->done_fence = kzalloc(sizeof(*ejob->done_fence), GFP_KERNEL);
+ if (!ejob->done_fence) {
+ ret = -ENOMEM;
+ goto out_cleanup_job;
+ }
+
+ ret = drm_sched_job_init(&ejob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ejob->cmd_bo = drm_gem_object_lookup(file, job->cmd_bo);
+ if (!ejob->cmd_bo) {
+ ret = -ENOENT;
+ goto out_cleanup_job;
+ }
+ cmd_info = to_ethosu_bo(ejob->cmd_bo)->info;
+ if (!cmd_info) {
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ for (int i = 0; i < NPU_BASEP_REGION_MAX; i++) {
+ struct drm_gem_object *gem;
+
+ /* Can only omit a BO handle if the region is not used or used for SRAM */
+ if (!job->region_bo_handles[i] &&
+ (!cmd_info->region_size[i] || (i == ETHOSU_SRAM_REGION && job->sram_size)))
+ continue;
+
+ if (job->region_bo_handles[i] && !cmd_info->region_size[i]) {
+ dev_err(dev->dev,
+ "Cmdstream BO handle %d set for unused region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ gem = drm_gem_object_lookup(file, job->region_bo_handles[i]);
+ if (!gem) {
+ dev_err(dev->dev,
+ "Invalid BO handle %d for region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -ENOENT;
+ goto out_cleanup_job;
+ }
+
+ ejob->region_bo[ejob->region_cnt] = gem;
+ ejob->region_bo_num[ejob->region_cnt] = i;
+ ejob->region_cnt++;
+
+ if (to_ethosu_bo(gem)->info) {
+ dev_err(dev->dev,
+ "Cmdstream BO handle %d used for region %d\n",
+ job->region_bo_handles[i], i);
+ ret = -EINVAL;
+ goto out_cleanup_job;
+ }
+
+ /* Verify the command stream doesn't have accesses outside the BO */
+ if (cmd_info->region_size[i] > gem->size) {
+ dev_err(dev->dev,
+ "cmd stream region %d size greater than BO size (%llu > %zu)\n",
+ i, cmd_info->region_size[i], gem->size);
+ ret = -EOVERFLOW;
+ goto out_cleanup_job;
+ }
+ }
+ ret = ethosu_job_push(ejob);
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&ejob->base);
+out_put_job:
+ ethosu_job_put(ejob);
+
+ return ret;
+}
+
+int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ethosu_submit *args = data;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->pad) {
+ drm_dbg(dev, "Reserved field in drm_ethosu_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ struct drm_ethosu_job __free(kvfree) *jobs =
+ kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs)
+ return -ENOMEM;
+
+ if (copy_from_user(jobs,
+ (void __user *)(uintptr_t)args->jobs,
+ args->job_count * sizeof(*jobs))) {
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ ret = ethosu_ioctl_submit_job(dev, file, &jobs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/ethosu/ethosu_job.h b/drivers/accel/ethosu/ethosu_job.h
new file mode 100644
index 000000000000..ff1cf448d094
--- /dev/null
+++ b/drivers/accel/ethosu/ethosu_job.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+/* Copyright 2025 Arm, Ltd. */
+
+#ifndef __ETHOSU_JOB_H__
+#define __ETHOSU_JOB_H__
+
+#include <linux/kref.h>
+#include <drm/gpu_scheduler.h>
+
+struct ethosu_device;
+struct ethosu_file_priv;
+
+struct ethosu_job {
+ struct drm_sched_job base;
+ struct ethosu_device *dev;
+
+ struct drm_gem_object *cmd_bo;
+ struct drm_gem_object *region_bo[NPU_BASEP_REGION_MAX];
+ u8 region_bo_num[NPU_BASEP_REGION_MAX];
+ u8 region_cnt;
+ u32 sram_size;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct kref refcount;
+};
+
+int ethosu_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int ethosu_job_init(struct ethosu_device *dev);
+void ethosu_job_fini(struct ethosu_device *dev);
+int ethosu_job_open(struct ethosu_file_priv *ethosu_priv);
+void ethosu_job_close(struct ethosu_file_priv *ethosu_priv);
+
+#endif
diff --git a/drivers/accel/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index 1919fbb169c7..6d1506acbd72 100644
--- a/drivers/accel/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -27,3 +27,26 @@ config DRM_ACCEL_HABANALABS
To compile this driver as a module, choose M here: the
module will be called habanalabs.
+
+if DRM_ACCEL_HABANALABS
+
+config HL_HLDIO
+ bool "Habanalabs NVMe Direct I/O (HLDIO)"
+ depends on PCI_P2PDMA
+ depends on BLOCK
+ help
+ Enable NVMe peer-to-peer direct I/O support for Habanalabs AI
+ accelerators.
+
+ This allows direct data transfers between NVMe storage devices
+ and Habanalabs accelerators without involving system memory,
+ using PCI peer-to-peer DMA capabilities.
+
+ Requirements:
+ - CONFIG_PCI_P2PDMA=y
+ - NVMe device and Habanalabs accelerator under same PCI root complex
+ - IOMMU disabled or in passthrough mode
+ - Hardware supporting PCI P2P DMA
+
+ If unsure, say N
+endif # DRM_ACCEL_HABANALABS
diff --git a/drivers/accel/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile
index e6abffea9f87..b6d00de09db5 100644
--- a/drivers/accel/habanalabs/common/Makefile
+++ b/drivers/accel/habanalabs/common/Makefile
@@ -13,3 +13,8 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_submission.o common/firmware_if.o \
common/security.o common/state_dump.o \
common/memory_mgr.o common/decoder.o
+
+# Conditionally add HLDIO support
+ifdef CONFIG_HL_HLDIO
+HL_COMMON_FILES += common/hldio.o
+endif \ No newline at end of file
diff --git a/drivers/accel/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 4b391807e5f2..5f0820b19ccb 100644
--- a/drivers/accel/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
@@ -6,6 +6,7 @@
*/
#include "habanalabs.h"
+#include "hldio.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include <linux/pci.h>
@@ -602,6 +603,198 @@ static int engines_show(struct seq_file *s, void *data)
return 0;
}
+#ifdef CONFIG_HL_HLDIO
+/* DIO debugfs functions following the standard pattern */
+static int dio_ssd2hl_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ seq_puts(s, "Usage: echo \"fd=N va=0xADDR off=N len=N\" > dio_ssd2hl\n");
+ seq_printf(s, "Last transfer: %zu bytes\\n", dev_entry->dio_stats.last_len_read);
+ seq_puts(s, "Note: All parameters must be page-aligned (4KB)\\n");
+
+ return 0;
+}
+
+static ssize_t dio_ssd2hl_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_ctx *ctx = hdev->kernel_ctx;
+ char kbuf[128];
+ u64 device_va = 0, off_bytes = 0, len_bytes = 0;
+ u32 fd = 0;
+ size_t len_read = 0;
+ int rc, parsed;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ /* Parse: fd=N va=0xADDR off=N len=N */
+ parsed = sscanf(kbuf, "fd=%u va=0x%llx off=%llu len=%llu",
+ &fd, &device_va, &off_bytes, &len_bytes);
+ if (parsed != 4) {
+ dev_err(hdev->dev, "Invalid format. Expected: fd=N va=0xADDR off=N len=N\\n");
+ return -EINVAL;
+ }
+
+ /* Validate file descriptor */
+ if (fd == 0) {
+ dev_err(hdev->dev, "Invalid file descriptor: %u\\n", fd);
+ return -EINVAL;
+ }
+
+ /* Validate alignment requirements */
+ if (!IS_ALIGNED(device_va, PAGE_SIZE) ||
+ !IS_ALIGNED(off_bytes, PAGE_SIZE) ||
+ !IS_ALIGNED(len_bytes, PAGE_SIZE)) {
+ dev_err(hdev->dev,
+ "All parameters must be page-aligned (4KB)\\n");
+ return -EINVAL;
+ }
+
+ /* Validate transfer size */
+ if (len_bytes == 0 || len_bytes > SZ_1G) {
+ dev_err(hdev->dev, "Invalid length: %llu (max 1GB)\\n",
+ len_bytes);
+ return -EINVAL;
+ }
+
+ dev_dbg(hdev->dev, "DIO SSD2HL: fd=%u va=0x%llx off=%llu len=%llu\\n",
+ fd, device_va, off_bytes, len_bytes);
+
+ rc = hl_dio_ssd2hl(hdev, ctx, fd, device_va, off_bytes, len_bytes, &len_read);
+ if (rc < 0) {
+ dev_entry->dio_stats.failed_ops++;
+ dev_err(hdev->dev, "SSD2HL operation failed: %d\\n", rc);
+ return rc;
+ }
+
+ /* Update statistics */
+ dev_entry->dio_stats.total_ops++;
+ dev_entry->dio_stats.successful_ops++;
+ dev_entry->dio_stats.bytes_transferred += len_read;
+ dev_entry->dio_stats.last_len_read = len_read;
+
+ dev_dbg(hdev->dev, "DIO SSD2HL completed: %zu bytes transferred\\n", len_read);
+
+ return count;
+}
+
+static int dio_hl2ssd_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "HL2SSD (device-to-SSD) transfers not implemented\\n");
+ return 0;
+}
+
+static ssize_t dio_hl2ssd_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ dev_dbg(hdev->dev, "HL2SSD operation not implemented\\n");
+ return -EOPNOTSUPP;
+}
+
+static int dio_stats_show(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ struct hl_dio_stats *stats = &dev_entry->dio_stats;
+ u64 avg_bytes_per_op = 0, success_rate = 0;
+
+ if (!hdev->asic_prop.supports_nvme) {
+ seq_puts(s, "NVMe Direct I/O not supported\\n");
+ return 0;
+ }
+
+ if (stats->successful_ops > 0)
+ avg_bytes_per_op = stats->bytes_transferred / stats->successful_ops;
+
+ if (stats->total_ops > 0)
+ success_rate = (stats->successful_ops * 100) / stats->total_ops;
+
+ seq_puts(s, "=== Habanalabs Direct I/O Statistics ===\\n");
+ seq_printf(s, "Total operations: %llu\\n", stats->total_ops);
+ seq_printf(s, "Successful ops: %llu\\n", stats->successful_ops);
+ seq_printf(s, "Failed ops: %llu\\n", stats->failed_ops);
+ seq_printf(s, "Success rate: %llu%%\\n", success_rate);
+ seq_printf(s, "Total bytes: %llu\\n", stats->bytes_transferred);
+ seq_printf(s, "Avg bytes per op: %llu\\n", avg_bytes_per_op);
+ seq_printf(s, "Last transfer: %zu bytes\\n", stats->last_len_read);
+
+ return 0;
+}
+
+static int dio_reset_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "Write '1' to reset DIO statistics\\n");
+ return 0;
+}
+
+static ssize_t dio_reset_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[8];
+ unsigned long val;
+ int rc;
+
+ if (!hdev->asic_prop.supports_nvme)
+ return -EOPNOTSUPP;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ kbuf[count] = 0;
+
+ rc = kstrtoul(kbuf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (val == 1) {
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+ dev_dbg(hdev->dev, "DIO statistics reset\\n");
+ } else {
+ dev_err(hdev->dev, "Write '1' to reset statistics\\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+#endif
+
static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -788,6 +981,113 @@ static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
}
}
+static void dump_cfg_access_entry(struct hl_device *hdev,
+ struct hl_debugfs_cfg_access_entry *entry)
+{
+ char *access_type = "";
+ struct tm tm;
+
+ switch (entry->debugfs_type) {
+ case DEBUGFS_READ32:
+ access_type = "READ32 from";
+ break;
+ case DEBUGFS_WRITE32:
+ access_type = "WRITE32 to";
+ break;
+ case DEBUGFS_READ64:
+ access_type = "READ64 from";
+ break;
+ case DEBUGFS_WRITE64:
+ access_type = "WRITE64 to";
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid DEBUGFS access type (%u)\n", entry->debugfs_type);
+ return;
+ }
+
+ time64_to_tm(entry->seconds_since_epoch, 0, &tm);
+ dev_info(hdev->dev,
+ "%ld-%02d-%02d %02d:%02d:%02d (UTC): %s %#llx\n", tm.tm_year + 1900, tm.tm_mon + 1,
+ tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, access_type, entry->addr);
+}
+
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+ struct hl_debugfs_cfg_access *dbgfs = &hdev->debugfs_cfg_accesses;
+ u32 i, head, count = 0;
+ time64_t entry_time, now;
+ unsigned long flags;
+
+ now = ktime_get_real_seconds();
+
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ head = dbgfs->head;
+ if (head == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i = head - 1;
+
+ /* Walk back until timeout or invalid entry */
+ while (dbgfs->cfg_access_list[i].valid) {
+ entry_time = dbgfs->cfg_access_list[i].seconds_since_epoch;
+ /* Stop when entry is older than timeout */
+ if (now - entry_time > HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC)
+ break;
+
+ /* print single entry under lock */
+ {
+ struct hl_debugfs_cfg_access_entry entry = dbgfs->cfg_access_list[i];
+ /*
+ * We copy the entry out under lock and then print after
+ * releasing the lock to minimize time under lock.
+ */
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+ dump_cfg_access_entry(hdev, &entry);
+ spin_lock_irqsave(&dbgfs->lock, flags);
+ }
+
+ /* mark consumed */
+ dbgfs->cfg_access_list[i].valid = false;
+
+ if (i == 0)
+ i = HL_DBGFS_CFG_ACCESS_HIST_LEN - 1;
+ else
+ i--;
+ count++;
+ if (count >= HL_DBGFS_CFG_ACCESS_HIST_LEN)
+ break;
+ }
+ spin_unlock_irqrestore(&dbgfs->lock, flags);
+}
+
+static void check_if_cfg_access_and_log(struct hl_device *hdev, u64 addr, size_t access_size,
+ enum debugfs_access_type access_type)
+{
+ struct hl_debugfs_cfg_access *dbgfs_cfg_accesses = &hdev->debugfs_cfg_accesses;
+ struct pci_mem_region *mem_reg = &hdev->pci_mem_region[PCI_REGION_CFG];
+ struct hl_debugfs_cfg_access_entry *new_entry;
+ unsigned long flags;
+
+ /* Check if address is in config memory */
+ if (addr >= mem_reg->region_base &&
+ mem_reg->region_size >= access_size &&
+ addr <= mem_reg->region_base + mem_reg->region_size - access_size) {
+
+ spin_lock_irqsave(&dbgfs_cfg_accesses->lock, flags);
+
+ new_entry = &dbgfs_cfg_accesses->cfg_access_list[dbgfs_cfg_accesses->head];
+ new_entry->seconds_since_epoch = ktime_get_real_seconds();
+ new_entry->addr = addr;
+ new_entry->debugfs_type = access_type;
+ new_entry->valid = true;
+ dbgfs_cfg_accesses->head = (dbgfs_cfg_accesses->head + 1)
+ % HL_DBGFS_CFG_ACCESS_HIST_LEN;
+
+ spin_unlock_irqrestore(&dbgfs_cfg_accesses->lock, flags);
+
+ }
+}
+
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type)
{
@@ -805,6 +1105,7 @@ static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
return rc;
}
+ check_if_cfg_access_and_log(hdev, addr, acc_size, acc_type);
rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
if (rc) {
dev_err(hdev->dev,
@@ -1525,6 +1826,13 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"mmu", mmu_show, mmu_asid_va_write},
{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
{"engines", engines_show, NULL},
+#ifdef CONFIG_HL_HLDIO
+ /* DIO entries - only created if NVMe is supported */
+ {"dio_ssd2hl", dio_ssd2hl_show, dio_ssd2hl_write},
+ {"dio_stats", dio_stats_show, NULL},
+ {"dio_reset", dio_reset_show, dio_reset_write},
+ {"dio_hl2ssd", dio_hl2ssd_show, dio_hl2ssd_write},
+#endif
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
@@ -1723,6 +2031,11 @@ static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_ent
&hdev->asic_prop.server_type);
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
+ /* Skip DIO entries if NVMe is not supported */
+ if (strncmp(hl_debugfs_list[i].name, "dio_", 4) == 0 &&
+ !hdev->asic_prop.supports_nvme)
+ continue;
+
debugfs_create_file(hl_debugfs_list[i].name,
0644,
root,
@@ -1762,6 +2075,14 @@ int hl_debugfs_device_init(struct hl_device *hdev)
spin_lock_init(&dev_entry->userptr_spinlock);
mutex_init(&dev_entry->ctx_mem_hash_mutex);
+ spin_lock_init(&hdev->debugfs_cfg_accesses.lock);
+ hdev->debugfs_cfg_accesses.head = 0; /* already zero by alloc but explicit init is fine */
+
+#ifdef CONFIG_HL_HLDIO
+ /* Initialize DIO statistics */
+ memset(&dev_entry->dio_stats, 0, sizeof(dev_entry->dio_stats));
+#endif
+
return 0;
}
@@ -1780,6 +2101,7 @@ void hl_debugfs_device_fini(struct hl_device *hdev)
vfree(entry->state_dump[i]);
kfree(entry->entry_arr);
+
}
void hl_debugfs_add_device(struct hl_device *hdev)
@@ -1792,6 +2114,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
if (!hdev->asic_prop.fw_security_enabled)
add_secured_nodes(dev_entry, dev_entry->root);
+
}
void hl_debugfs_add_file(struct hl_fpriv *hpriv)
@@ -1924,3 +2247,4 @@ void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
up_write(&dev_entry->state_dump_sem);
}
+
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 80fa08bf57bd..999c92d7036e 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -1630,6 +1630,11 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
reset_upon_device_release = hdev->reset_upon_device_release && from_dev_release;
+ if (hdev->cpld_shutdown) {
+ dev_err(hdev->dev, "Cannot reset device, cpld is shutdown! Device is NOT usable\n");
+ return -EIO;
+ }
+
if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
return 0;
@@ -2576,6 +2581,14 @@ void hl_device_fini(struct hl_device *hdev)
if (rc)
dev_err(hdev->dev, "hw_fini failed in device fini while removing device %d\n", rc);
+ /* Reset the H/W (if it accessible). It will be in idle state after this returns */
+ if (!hdev->cpld_shutdown) {
+ rc = hdev->asic_funcs->hw_fini(hdev, true, false);
+ if (rc)
+ dev_err(hdev->dev,
+ "hw_fini failed in device fini while removing device %d\n", rc);
+ }
+
hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
@@ -2943,3 +2956,13 @@ void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *eve
mutex_unlock(&clk_throttle->lock);
}
+
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask)
+{
+ hl_handle_critical_hw_err(hdev, event_id, event_mask);
+ *event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE;
+
+ /* Avoid any new accesses to the H/W */
+ hdev->disabled = true;
+ hdev->cpld_shutdown = true;
+}
diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index 6f27ce4fa01b..d94c2ba22a6a 100644
--- a/drivers/accel/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -90,7 +90,9 @@ struct hl_fpriv;
#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
#define HL_COMMON_DEC_INTERRUPT_ID 0xFFE
-#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_STATE_DUMP_HIST_LEN 5
+#define HL_DBGFS_CFG_ACCESS_HIST_LEN 20
+#define HL_DBGFS_CFG_ACCESS_HIST_TIMEOUT_SEC 2 /* 2s */
/* Default value for device reset trigger , an invalid value */
#define HL_RESET_TRIGGER_DEFAULT 0xFF
@@ -702,6 +704,7 @@ struct hl_hints_range {
* @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
* @supports_engine_modes: true if changing engines/engine_cores modes is supported.
* @support_dynamic_resereved_fw_size: true if we support dynamic reserved size for fw.
+ * @supports_nvme: indicates whether the asic supports NVMe P2P DMA.
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -822,6 +825,7 @@ struct asic_fixed_properties {
u8 supports_advanced_cpucp_rc;
u8 supports_engine_modes;
u8 support_dynamic_resereved_fw_size;
+ u8 supports_nvme;
};
/**
@@ -2274,6 +2278,9 @@ struct hl_vm {
u8 init_done;
};
+#ifdef CONFIG_HL_HLDIO
+#include "hldio.h"
+#endif
/*
* DEBUG, PROFILING STRUCTURE
@@ -2344,7 +2351,6 @@ struct hl_fpriv {
struct mutex ctx_lock;
};
-
/*
* DebugFS
*/
@@ -2372,6 +2378,7 @@ struct hl_debugfs_entry {
struct hl_dbg_device_entry *dev_entry;
};
+
/**
* struct hl_dbg_device_entry - ASIC specific debugfs manager.
* @root: root dentry.
@@ -2403,6 +2410,7 @@ struct hl_debugfs_entry {
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
* @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
+ * @dio_stats: Direct I/O statistics
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -2434,6 +2442,35 @@ struct hl_dbg_device_entry {
u8 i2c_addr;
u8 i2c_reg;
u8 i2c_len;
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio_stats dio_stats;
+#endif
+};
+
+/**
+ * struct hl_debugfs_cfg_access_entry - single debugfs config access object, member of
+ * hl_debugfs_cfg_access.
+ * @seconds_since_epoch: seconds since January 1, 1970, used for time comparisons.
+ * @debugfs_type: the debugfs operation requested, can be READ32, WRITE32, READ64 or WRITE64.
+ * @addr: the requested address to access.
+ * @valid: if set, this entry has valid data for dumping at interrupt time.
+ */
+struct hl_debugfs_cfg_access_entry {
+ ktime_t seconds_since_epoch;
+ enum debugfs_access_type debugfs_type;
+ u64 addr;
+ bool valid;
+};
+
+/**
+ * struct hl_debugfs_cfg_access - saves debugfs config region access requests history.
+ * @cfg_access_list: list of objects describing config region access requests.
+ * @head: next valid index to add new entry to in cfg_access_list.
+ */
+struct hl_debugfs_cfg_access {
+ struct hl_debugfs_cfg_access_entry cfg_access_list[HL_DBGFS_CFG_ACCESS_HIST_LEN];
+ u32 head;
+ spinlock_t lock; /* protects head and entries */
};
/**
@@ -3281,6 +3318,7 @@ struct eq_heartbeat_debug_info {
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
+ * @debugfs_cfg_accesses: list of last debugfs config region accesses.
* @cb_pool: list of pre allocated CBs.
* @cb_pool_lock: protects the CB pool.
* @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
@@ -3305,6 +3343,7 @@ struct eq_heartbeat_debug_info {
* @captured_err_info: holds information about errors.
* @reset_info: holds current device reset information.
* @heartbeat_debug_info: counters used to debug heartbeat failures.
+ * @hldio: describes habanalabs direct storage interaction interface.
* @irq_affinity_mask: mask of available CPU cores for user and decoder interrupt handling.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @fw_inner_major_ver: the major of current loaded preboot inner version.
@@ -3357,6 +3396,7 @@ struct eq_heartbeat_debug_info {
* addresses.
* @is_in_dram_scrub: true if dram scrub operation is on going.
* @disabled: is device disabled.
+ * @cpld_shutdown: is cpld shutdown.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
@@ -3461,6 +3501,7 @@ struct hl_device {
struct hwmon_chip_info *hl_chip_info;
struct hl_dbg_device_entry hl_debugfs;
+ struct hl_debugfs_cfg_access debugfs_cfg_accesses;
struct list_head cb_pool;
spinlock_t cb_pool_lock;
@@ -3496,7 +3537,9 @@ struct hl_device {
struct hl_reset_info reset_info;
struct eq_heartbeat_debug_info heartbeat_debug_info;
-
+#ifdef CONFIG_HL_HLDIO
+ struct hl_dio hldio;
+#endif
cpumask_t irq_affinity_mask;
u32 *stream_master_qid_arr;
@@ -3532,6 +3575,7 @@ struct hl_device {
u16 cpu_pci_msb_addr;
u8 is_in_dram_scrub;
u8 disabled;
+ u8 cpld_shutdown;
u8 late_init_done;
u8 hwmon_initialized;
u8 reset_on_lockup;
@@ -4089,6 +4133,7 @@ void hl_init_cpu_for_irq(struct hl_device *hdev);
void hl_set_irq_affinity(struct hl_device *hdev, int irq);
void hl_eq_heartbeat_event_handle(struct hl_device *hdev);
void hl_handle_clk_change_event(struct hl_device *hdev, u16 event_type, u64 *event_mask);
+void hl_eq_cpld_shutdown_event_handle(struct hl_device *hdev, u16 event_id, u64 *event_mask);
#ifdef CONFIG_DEBUG_FS
@@ -4110,6 +4155,7 @@ void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
unsigned long length);
+void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev);
#else
@@ -4185,6 +4231,10 @@ static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
{
}
+static inline void hl_debugfs_cfg_access_history_dump(struct hl_device *hdev)
+{
+}
+
#endif
/* Security */
diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index dc80ca921d90..fdfdabc85e54 100644
--- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -961,6 +961,12 @@ static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *
case HL_PASSTHROUGH_VERSIONS:
need_input_buff = false;
break;
+ case HL_GET_ERR_COUNTERS_CMD:
+ need_input_buff = true;
+ break;
+ case HL_GET_P_STATE:
+ need_input_buff = false;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/accel/habanalabs/common/hldio.c b/drivers/accel/habanalabs/common/hldio.c
new file mode 100644
index 000000000000..083ae5610875
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2024 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+#include "hldio.h"
+#include <generated/uapi/linux/version.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/blkdev.h>
+#include <linux/vmalloc.h>
+
+/*
+ * NVMe Direct I/O implementation for habanalabs driver
+ *
+ * ASSUMPTIONS
+ * ===========
+ * 1. No IOMMU (well, technically it can work with IOMMU, but it is *almost useless).
+ * 2. Only READ operations (can extend in the future).
+ * 3. No sparse files (can overcome this in the future).
+ * 4. Kernel version >= 6.9
+ * 5. Requiring page alignment is OK (I don't see a solution to this one right,
+ * now, how do we read partial pages?)
+ * 6. Kernel compiled with CONFIG_PCI_P2PDMA. This requires a CUSTOM kernel.
+ * Theoretically I have a slight idea on how this could be solvable, but it
+ * is probably inacceptable for the upstream. Also may not work in the end.
+ * 7. Either make sure our cards and disks are under the same PCI bridge, or
+ * compile a custom kernel to hack around this.
+ */
+
+#define IO_STABILIZE_TIMEOUT 10000000 /* 10 seconds in microseconds */
+
+/*
+ * This struct contains all the useful data I could milk out of the file handle
+ * provided by the user.
+ * @TODO: right now it is retrieved on each IO, but can be done once with some
+ * dedicated IOCTL, call it for example HL_REGISTER_HANDLE.
+ */
+struct hl_dio_fd {
+ /* Back pointer in case we need it in async completion */
+ struct hl_ctx *ctx;
+ /* Associated fd struct */
+ struct file *filp;
+};
+
+/*
+ * This is a single IO descriptor
+ */
+struct hl_direct_io {
+ struct hl_dio_fd f;
+ struct kiocb kio;
+ struct bio_vec *bv;
+ struct iov_iter iter;
+ u64 device_va;
+ u64 off_bytes;
+ u64 len_bytes;
+ u32 type;
+};
+
+bool hl_device_supports_nvme(struct hl_device *hdev)
+{
+ return hdev->asic_prop.supports_nvme;
+}
+
+static int hl_dio_fd_register(struct hl_ctx *ctx, int fd, struct hl_dio_fd *f)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct block_device *bd;
+ struct super_block *sb;
+ struct inode *inode;
+ struct gendisk *gd;
+ struct device *disk_dev;
+ int rc;
+
+ f->filp = fget(fd);
+ if (!f->filp) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (!(f->filp->f_flags & O_DIRECT)) {
+ dev_err(hdev->dev, "file is not in the direct mode\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!f->filp->f_op->read_iter) {
+ dev_err(hdev->dev, "read iter is not supported, need to fall back to legacy\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ inode = file_inode(f->filp);
+ sb = inode->i_sb;
+ bd = sb->s_bdev;
+ gd = bd->bd_disk;
+
+ if (inode->i_blocks << sb->s_blocksize_bits < i_size_read(inode)) {
+ dev_err(hdev->dev, "sparse files are not currently supported\n");
+ rc = -EINVAL;
+ goto fput;
+ }
+
+ if (!bd || !gd) {
+ dev_err(hdev->dev, "invalid block device\n");
+ rc = -ENODEV;
+ goto fput;
+ }
+ /* Get the underlying device from the block device */
+ disk_dev = disk_to_dev(gd);
+ if (!dma_pci_p2pdma_supported(disk_dev)) {
+ dev_err(hdev->dev, "device does not support PCI P2P DMA\n");
+ rc = -EOPNOTSUPP;
+ goto fput;
+ }
+
+ /*
+ * @TODO: Maybe we need additional checks here
+ */
+
+ f->ctx = ctx;
+ rc = 0;
+
+ goto out;
+fput:
+ fput(f->filp);
+out:
+ return rc;
+}
+
+static void hl_dio_fd_unregister(struct hl_dio_fd *f)
+{
+ fput(f->filp);
+}
+
+static long hl_dio_count_io(struct hl_device *hdev)
+{
+ s64 sum = 0;
+ int i;
+
+ for_each_possible_cpu(i)
+ sum += per_cpu(*hdev->hldio.inflight_ios, i);
+
+ return sum;
+}
+
+static bool hl_dio_get_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ if (hdev->hldio.io_enabled) {
+ this_cpu_inc(*hdev->hldio.inflight_ios);
+
+ /* Avoid race conditions */
+ if (!hdev->hldio.io_enabled) {
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+ return false;
+ }
+
+ hl_ctx_get(ctx);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void hl_dio_put_iopath(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+
+ hl_ctx_put(ctx);
+ this_cpu_dec(*hdev->hldio.inflight_ios);
+}
+
+static void hl_dio_set_io_enabled(struct hl_device *hdev, bool enabled)
+{
+ hdev->hldio.io_enabled = enabled;
+}
+
+static bool hl_dio_validate_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ if ((u64)io->device_va & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "device address must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->len_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO length must be 4K aligned\n");
+ return false;
+ }
+
+ if (io->off_bytes & ~PAGE_MASK) {
+ dev_dbg(hdev->dev, "IO offset must be 4K aligned\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct page *hl_dio_va2page(struct hl_device *hdev, struct hl_ctx *ctx, u64 device_va)
+{
+ struct hl_dio *hldio = &hdev->hldio;
+ u64 device_pa;
+ int rc, i;
+
+ rc = hl_mmu_va_to_pa(ctx, device_va, &device_pa);
+ if (rc) {
+ dev_err(hdev->dev, "device virtual address translation error: %#llx (%d)",
+ device_va, rc);
+ return NULL;
+ }
+
+ for (i = 0 ; i < hldio->np2prs ; ++i) {
+ if (device_pa >= hldio->p2prs[i].device_pa &&
+ device_pa < hldio->p2prs[i].device_pa + hldio->p2prs[i].size)
+ return hldio->p2prs[i].p2ppages[(device_pa - hldio->p2prs[i].device_pa) >>
+ PAGE_SHIFT];
+ }
+
+ return NULL;
+}
+
+static ssize_t hl_direct_io(struct hl_device *hdev, struct hl_direct_io *io)
+{
+ u64 npages, device_va;
+ ssize_t rc;
+ int i;
+
+ if (!hl_dio_validate_io(hdev, io))
+ return -EINVAL;
+
+ if (!hl_dio_get_iopath(io->f.ctx)) {
+ dev_info(hdev->dev, "can't schedule a new IO, IO is disabled\n");
+ return -ESHUTDOWN;
+ }
+
+ init_sync_kiocb(&io->kio, io->f.filp);
+ io->kio.ki_pos = io->off_bytes;
+
+ npages = (io->len_bytes >> PAGE_SHIFT);
+
+ /* @TODO: this can be implemented smarter, vmalloc in iopath is not
+ * ideal. Maybe some variation of genpool. Number of pages may differ
+ * greatly, so maybe even use pools of different sizes and chose the
+ * closest one.
+ */
+ io->bv = vzalloc(npages * sizeof(struct bio_vec));
+ if (!io->bv)
+ return -ENOMEM;
+
+ for (i = 0, device_va = io->device_va; i < npages ; ++i, device_va += PAGE_SIZE) {
+ io->bv[i].bv_page = hl_dio_va2page(hdev, io->f.ctx, device_va);
+ if (!io->bv[i].bv_page) {
+ dev_err(hdev->dev, "error getting page struct for device va %#llx",
+ device_va);
+ rc = -EFAULT;
+ goto cleanup;
+ }
+ io->bv[i].bv_offset = 0;
+ io->bv[i].bv_len = PAGE_SIZE;
+ }
+
+ iov_iter_bvec(&io->iter, io->type, io->bv, 1, io->len_bytes);
+ if (io->f.filp->f_op && io->f.filp->f_op->read_iter)
+ rc = io->f.filp->f_op->read_iter(&io->kio, &io->iter);
+ else
+ rc = -EINVAL;
+
+cleanup:
+ vfree(io->bv);
+ hl_dio_put_iopath(io->f.ctx);
+
+ dev_dbg(hdev->dev, "IO ended with %ld\n", rc);
+
+ return rc;
+}
+
+/*
+ * @TODO: This function can be used as a callback for io completion under
+ * kio->ki_complete in order to implement async IO.
+ * Note that on more recent kernels there is no ret2.
+ */
+__maybe_unused static void hl_direct_io_complete(struct kiocb *kio, long ret, long ret2)
+{
+ struct hl_direct_io *io = container_of(kio, struct hl_direct_io, kio);
+
+ dev_dbg(io->f.ctx->hdev->dev, "IO completed with %ld\n", ret);
+
+ /* Do something to copy result to user / notify completion */
+
+ hl_dio_put_iopath(io->f.ctx);
+
+ hl_dio_fd_unregister(&io->f);
+}
+
+/*
+ * DMA disk to ASIC, wait for results. Must be invoked from the user context
+ */
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{
+ struct hl_direct_io *io;
+ ssize_t rc;
+
+ dev_dbg(hdev->dev, "SSD2HL fd=%d va=%#llx len=%#lx\n", fd, device_va, len_bytes);
+
+ io = kzalloc(sizeof(*io), GFP_KERNEL);
+ if (!io) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ *io = (struct hl_direct_io){
+ .device_va = device_va,
+ .len_bytes = len_bytes,
+ .off_bytes = off_bytes,
+ .type = READ,
+ };
+
+ rc = hl_dio_fd_register(ctx, fd, &io->f);
+ if (rc)
+ goto kfree_io;
+
+ rc = hl_direct_io(hdev, io);
+ if (rc >= 0) {
+ *len_read = rc;
+ rc = 0;
+ }
+
+ /* This shall be called only in the case of a sync IO */
+ hl_dio_fd_unregister(&io->f);
+kfree_io:
+ kfree(io);
+out:
+ return rc;
+}
+
+static void hl_p2p_region_fini(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ if (p2pr->p2ppages) {
+ vfree(p2pr->p2ppages);
+ p2pr->p2ppages = NULL;
+ }
+
+ if (p2pr->p2pmem) {
+ dev_dbg(hdev->dev, "freeing P2P mem from %p, size=%#llx\n",
+ p2pr->p2pmem, p2pr->size);
+ pci_free_p2pmem(hdev->pdev, p2pr->p2pmem, p2pr->size);
+ p2pr->p2pmem = NULL;
+ }
+}
+
+void hl_p2p_region_fini_all(struct hl_device *hdev)
+{
+ int i;
+
+ for (i = 0 ; i < hdev->hldio.np2prs ; ++i)
+ hl_p2p_region_fini(hdev, &hdev->hldio.p2prs[i]);
+
+ kvfree(hdev->hldio.p2prs);
+ hdev->hldio.p2prs = NULL;
+ hdev->hldio.np2prs = 0;
+}
+
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{
+ void *addr;
+ int rc, i;
+
+ /* Start by publishing our p2p memory */
+ rc = pci_p2pdma_add_resource(hdev->pdev, p2pr->bar, p2pr->size, p2pr->bar_offset);
+ if (rc) {
+ dev_err(hdev->dev, "error adding p2p resource: %d\n", rc);
+ goto err;
+ }
+
+ /* Alloc all p2p mem */
+ p2pr->p2pmem = pci_alloc_p2pmem(hdev->pdev, p2pr->size);
+ if (!p2pr->p2pmem) {
+ dev_err(hdev->dev, "error allocating p2p memory\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ p2pr->p2ppages = vmalloc((p2pr->size >> PAGE_SHIFT) * sizeof(struct page *));
+ if (!p2pr->p2ppages) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0, addr = p2pr->p2pmem ; i < (p2pr->size >> PAGE_SHIFT) ; ++i, addr += PAGE_SIZE) {
+ p2pr->p2ppages[i] = virt_to_page(addr);
+ if (!p2pr->p2ppages[i]) {
+ rc = -EFAULT;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ hl_p2p_region_fini(hdev, p2pr);
+ return rc;
+}
+
+int hl_dio_start(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "initializing HLDIO\n");
+
+ /* Initialize the IO counter and enable IO */
+ hdev->hldio.inflight_ios = alloc_percpu(s64);
+ if (!hdev->hldio.inflight_ios)
+ return -ENOMEM;
+
+ hl_dio_set_io_enabled(hdev, true);
+
+ return 0;
+}
+
+void hl_dio_stop(struct hl_device *hdev)
+{
+ dev_dbg(hdev->dev, "deinitializing HLDIO\n");
+
+ if (hdev->hldio.io_enabled) {
+ /* Wait for all the IO to finish */
+ hl_dio_set_io_enabled(hdev, false);
+ hl_poll_timeout_condition(hdev, !hl_dio_count_io(hdev), 1000, IO_STABILIZE_TIMEOUT);
+ }
+
+ if (hdev->hldio.inflight_ios) {
+ free_percpu(hdev->hldio.inflight_ios);
+ hdev->hldio.inflight_ios = NULL;
+ }
+}
diff --git a/drivers/accel/habanalabs/common/hldio.h b/drivers/accel/habanalabs/common/hldio.h
new file mode 100644
index 000000000000..2874388f2851
--- /dev/null
+++ b/drivers/accel/habanalabs/common/hldio.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * hldio.h - NVMe Direct I/O (HLDIO) infrastructure for Habana Labs Driver
+ *
+ * This feature requires specific hardware setup and must not be built
+ * under COMPILE_TEST.
+ */
+
+#ifndef __HL_HLDIO_H__
+#define __HL_HLDIO_H__
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/ktime.h> /* ktime functions */
+#include <linux/delay.h> /* usleep_range */
+#include <linux/kernel.h> /* might_sleep_if */
+#include <linux/errno.h> /* error codes */
+
+/* Forward declarations */
+struct hl_device;
+struct file;
+
+/* Enable only if Kconfig selected */
+#ifdef CONFIG_HL_HLDIO
+/**
+ * struct hl_p2p_region - describes a single P2P memory region
+ * @p2ppages: array of page structs for the P2P memory
+ * @p2pmem: virtual address of the P2P memory region
+ * @device_pa: physical address on the device
+ * @bar_offset: offset within the BAR
+ * @size: size of the region in bytes
+ * @bar: BAR number containing this region
+ */
+struct hl_p2p_region {
+ struct page **p2ppages;
+ void *p2pmem;
+ u64 device_pa;
+ u64 bar_offset;
+ u64 size;
+ int bar;
+};
+
+/**
+ * struct hl_dio_stats - Direct I/O statistics
+ * @total_ops: total number of operations attempted
+ * @successful_ops: number of successful operations
+ * @failed_ops: number of failed operations
+ * @bytes_transferred: total bytes successfully transferred
+ * @last_len_read: length of the last read operation
+ */
+struct hl_dio_stats {
+ u64 total_ops;
+ u64 successful_ops;
+ u64 failed_ops;
+ u64 bytes_transferred;
+ size_t last_len_read;
+};
+
+/**
+ * struct hl_dio - describes habanalabs direct storage interaction interface
+ * @p2prs: array of p2p regions
+ * @inflight_ios: percpu counter for inflight ios
+ * @np2prs: number of elements in p2prs
+ * @io_enabled: 1 if io is enabled 0 otherwise
+ */
+struct hl_dio {
+ struct hl_p2p_region *p2prs;
+ s64 __percpu *inflight_ios;
+ u8 np2prs;
+ u8 io_enabled;
+};
+
+int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read);
+void hl_p2p_region_fini_all(struct hl_device *hdev);
+int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr);
+int hl_dio_start(struct hl_device *hdev);
+void hl_dio_stop(struct hl_device *hdev);
+
+/* Init/teardown */
+int hl_hldio_init(struct hl_device *hdev);
+void hl_hldio_fini(struct hl_device *hdev);
+
+/* File operations */
+long hl_hldio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
+
+/* DebugFS hooks */
+#ifdef CONFIG_DEBUG_FS
+void hl_hldio_debugfs_init(struct hl_device *hdev);
+void hl_hldio_debugfs_fini(struct hl_device *hdev);
+#else
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+#endif
+
+#else /* !CONFIG_HL_HLDIO */
+
+struct hl_p2p_region;
+/* Stubs when HLDIO is disabled */
+static inline int hl_dio_ssd2hl(struct hl_device *hdev, struct hl_ctx *ctx, int fd,
+ u64 device_va, off_t off_bytes, size_t len_bytes,
+ size_t *len_read)
+{ return -EOPNOTSUPP; }
+static inline void hl_p2p_region_fini_all(struct hl_device *hdev) {}
+static inline int hl_p2p_region_init(struct hl_device *hdev, struct hl_p2p_region *p2pr)
+{ return -EOPNOTSUPP; }
+static inline int hl_dio_start(struct hl_device *hdev) { return -EOPNOTSUPP; }
+static inline void hl_dio_stop(struct hl_device *hdev) {}
+
+static inline int hl_hldio_init(struct hl_device *hdev) { return 0; }
+static inline void hl_hldio_fini(struct hl_device *hdev) { }
+static inline long hl_hldio_ioctl(struct file *f, unsigned int c,
+ unsigned long a)
+{ return -ENOTTY; }
+static inline void hl_hldio_debugfs_init(struct hl_device *hdev) { }
+static inline void hl_hldio_debugfs_fini(struct hl_device *hdev) { }
+
+#endif /* CONFIG_HL_HLDIO */
+
+/* Simplified polling macro for HLDIO (no simulator support) */
+#define hl_poll_timeout_condition(hdev, cond, sleep_us, timeout_us) \
+({ \
+ ktime_t __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ might_sleep_if(sleep_us); \
+ (void)(hdev); /* keep signature consistent, hdev unused */ \
+ for (;;) { \
+ mb(); /* ensure ordering of memory operations */ \
+ if (cond) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) \
+ break; \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+#ifdef CONFIG_HL_HLDIO
+bool hl_device_supports_nvme(struct hl_device *hdev);
+#else
+static inline bool hl_device_supports_nvme(struct hl_device *hdev) { return false; }
+#endif
+
+#endif /* __HL_HLDIO_H__ */
diff --git a/drivers/accel/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 61472a381904..633db4bff46f 100644
--- a/drivers/accel/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -1837,7 +1837,12 @@ static void hl_release_dmabuf(struct dma_buf *dmabuf)
atomic_dec(&ctx->hdev->dmabuf_export_cnt);
hl_ctx_put(ctx);
- /* Paired with get_file() in export_dmabuf() */
+ /*
+ * Paired with get_file() in export_dmabuf().
+ * 'ctx' can be still used here to get the file pointer, even after hl_ctx_put() was called,
+ * because releasing the compute device file involves another reference decrement, and it
+ * would be possible only after calling fput().
+ */
fput(ctx->hpriv->file_priv->filp);
kfree(hl_dmabuf);
@@ -2332,7 +2337,7 @@ static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
if (rc < 0)
goto destroy_pages;
npages = rc;
- rc = -EFAULT;
+ rc = -ENOMEM;
goto put_pages;
}
userptr->npages = npages;
diff --git a/drivers/accel/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index 99cd83139d46..4401beb99e42 100644
--- a/drivers/accel/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -259,13 +259,8 @@ int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
goto put_mem;
}
-#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
- if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
- user_mem_size)) {
-#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
-#endif
dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
buf->behavior->topic, vma->vm_start);
diff --git a/drivers/accel/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 82f66520ec18..8f55ba3b4e73 100644
--- a/drivers/accel/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
@@ -96,14 +96,21 @@ static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, c
infineon_second_stage_third_instance =
(infineon_second_stage_version >> 16) & mask;
- if (cpucp_info->infineon_second_stage_version)
+ if (cpucp_info->infineon_version && cpucp_info->infineon_second_stage_version)
return sprintf(buf, "%#04x %#04x:%#04x:%#04x\n",
le32_to_cpu(cpucp_info->infineon_version),
infineon_second_stage_first_instance,
infineon_second_stage_second_instance,
infineon_second_stage_third_instance);
- else
+ else if (cpucp_info->infineon_second_stage_version)
+ return sprintf(buf, "%#04x:%#04x:%#04x\n",
+ infineon_second_stage_first_instance,
+ infineon_second_stage_second_instance,
+ infineon_second_stage_third_instance);
+ else if (cpucp_info->infineon_version)
return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version));
+
+ return 0;
}
static DEVICE_ATTR_RO(vrm_ver);
diff --git a/drivers/accel/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index fa893a9b826e..34771d75da9d 100644
--- a/drivers/accel/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -4168,10 +4168,29 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
VM_DONTCOPY | VM_NORESERVE);
+#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
+
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
if (rc)
dev_err(hdev->dev, "dma_mmap_coherent error %d", rc);
+#else
+
+ rc = remap_pfn_range(vma, vma->vm_start,
+ virt_to_phys(cpu_addr) >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+ if (rc)
+ dev_err(hdev->dev, "remap_pfn_range error %d", rc);
+
+ #endif
+
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index 5722e4128d3c..b8c0689dba64 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -728,6 +728,354 @@ static const int gaudi2_dma_core_async_event_id[] = {
[DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE,
};
+const char *gaudi2_engine_id_str[] = {
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_MME),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_1),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_2),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_3),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_4),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_5),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_6),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_0),
+ __stringify(GAUDI2_ENGINE_ID_PDMA_1),
+ __stringify(GAUDI2_ENGINE_ID_ROT_0),
+ __stringify(GAUDI2_ENGINE_ID_ROT_1),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_0),
+ __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC0_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC1_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC2_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC3_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC4_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC5_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC6_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC7_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC8_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC9_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC10_1),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_0),
+ __stringify(GAUDI2_ENGINE_ID_NIC11_1),
+ __stringify(GAUDI2_ENGINE_ID_PCIE),
+ __stringify(GAUDI2_ENGINE_ID_PSOC),
+ __stringify(GAUDI2_ENGINE_ID_ARC_FARM),
+ __stringify(GAUDI2_ENGINE_ID_KDMA),
+ __stringify(GAUDI2_ENGINE_ID_SIZE),
+};
+
+const char *gaudi2_queue_id_str[] = {
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_PDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_0_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_1_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_2_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_3_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_4_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_5_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_6_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_7_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_8_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_9_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_10_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_11_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_12_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_13_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_14_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_15_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_16_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_17_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_18_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_19_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_20_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_21_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_22_3),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_0),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_1),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_2),
+ __stringify(GAUDI2_QUEUE_ID_NIC_23_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_0_3),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_0),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_1),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_2),
+ __stringify(GAUDI2_QUEUE_ID_ROT_1_3),
+ __stringify(GAUDI2_QUEUE_ID_CPU_PQ),
+ __stringify(GAUDI2_QUEUE_ID_SIZE),
+};
+
static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = {
"qman sei intr",
"arc sei intr"
@@ -3150,7 +3498,6 @@ static int gaudi2_early_init(struct hl_device *hdev)
rc = hl_fw_read_preboot_status(hdev);
if (rc) {
if (hdev->reset_on_preboot_fail)
- /* we are already on failure flow, so don't check if hw_fini fails. */
hdev->asic_funcs->hw_fini(hdev, true, false);
goto pci_fini;
}
@@ -3162,6 +3509,13 @@ static int gaudi2_early_init(struct hl_device *hdev)
dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc);
goto pci_fini;
}
+
+ rc = hl_fw_read_preboot_status(hdev);
+ if (rc) {
+ if (hdev->reset_on_preboot_fail)
+ hdev->asic_funcs->hw_fini(hdev, true, false);
+ goto pci_fini;
+ }
}
return 0;
@@ -4836,7 +5190,7 @@ static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw
else
wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC;
- if (fw_reset)
+ if (fw_reset || hdev->cpld_shutdown)
goto skip_engines;
gaudi2_stop_dma_qmans(hdev);
@@ -6484,6 +6838,13 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
+ /*
+ * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP
+ * so vm_insert_page() can handle it safely. Without this, the kernel
+ * may BUG_ON due to VM_PFNMAP.
+ */
+ if (is_vmalloc_addr(cpu_addr))
+ vm_flags_set(vma, VM_MIXEDMAP);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size);
if (rc)
@@ -6774,7 +7135,8 @@ static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parse
struct gaudi2_device *gaudi2 = hdev->asic_specific;
if (!gaudi2_is_queue_enabled(hdev, parser->hw_queue_id)) {
- dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id);
+ dev_err(hdev->dev, "h/w queue %s is disabled\n",
+ GAUDI2_QUEUE_ID_TO_STR(parser->hw_queue_id));
return -EINVAL;
}
@@ -7026,7 +7388,8 @@ static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, msg_info->dma_addr);
if (rc)
dev_err(hdev->dev,
- "Failed to send msg_short packet to H/W queue %d\n", hw_queue_id);
+ "Failed to send msg_short packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
@@ -7052,8 +7415,8 @@ static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queu
timeout_usec);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "H/W queue %d test failed (SOB_OBJ_0 == 0x%x)\n",
- hw_queue_id, tmp);
+ dev_err(hdev->dev, "H/W queue %s test failed (SOB_OBJ_0 == 0x%x)\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id), tmp);
rc = -EIO;
}
@@ -9603,8 +9966,8 @@ static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
gaudi2_print_event(hdev, event_type, true,
- "ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u",
- engine_id, intr_type, q->queue_index);
+ "ARC DCCM Full event: Eng: %s, Intr_type: %u, Qidx: %u",
+ GAUDI2_ENG_ID_TO_STR(engine_id), intr_type, q->queue_index);
return 1;
default:
gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type");
@@ -10172,7 +10535,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
error_count = GAUDI2_NA_EVENT_CAUSE;
- event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ hl_eq_cpld_shutdown_event_handle(hdev, event_type, &event_mask);
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
@@ -10260,6 +10623,7 @@ reset_device:
if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR)
hl_handle_critical_hw_err(hdev, event_type, &event_mask);
+ hl_debugfs_cfg_access_history_dump(hdev);
event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET;
hl_device_cond_reset(hdev, reset_flags, event_mask);
}
@@ -10296,8 +10660,8 @@ static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev,
rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, pkt_size, phys_addr);
if (rc)
- dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %d\n",
- hw_queue_id);
+ dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %s\n",
+ GAUDI2_QUEUE_ID_TO_STR(hw_queue_id));
return rc;
}
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index 05117272cac7..bdf5c1bd2d63 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -240,6 +240,15 @@
#define GAUDI2_NUM_TESTED_QS (GAUDI2_QUEUE_ID_CPU_PQ - GAUDI2_QUEUE_ID_PDMA_0_0)
+extern const char *gaudi2_engine_id_str[];
+extern const char *gaudi2_queue_id_str[];
+
+#define GAUDI2_ENG_ID_TO_STR(initiator) ((initiator) >= GAUDI2_ENGINE_ID_SIZE ? "not found" : \
+ gaudi2_engine_id_str[initiator])
+
+#define GAUDI2_QUEUE_ID_TO_STR(initiator) ((initiator) >= GAUDI2_QUEUE_ID_SIZE ? "not found" : \
+ gaudi2_queue_id_str[initiator])
+
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
diff --git a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 2423620ff358..bc3c57bda5cd 100644
--- a/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -2426,7 +2426,7 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa
WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, 0);
WREG32(base_reg + mmBMON_REDUCTION_OFFSET, 0);
WREG32(base_reg + mmBMON_STM_TRC_OFFSET, 0x7 | (0xA << 8));
- WREG32(base_reg + mmBMON_CR_OFFSET, 0x77 | 0xf << 24);
+ WREG32(base_reg + mmBMON_CR_OFFSET, 0x41);
}
return 0;
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
index 1029e0bab061..dbf76b8a5b4c 100644
--- a/drivers/accel/ivpu/Makefile
+++ b/drivers/accel/ivpu/Makefile
@@ -6,6 +6,7 @@ intel_vpu-y := \
ivpu_fw.o \
ivpu_fw_log.o \
ivpu_gem.o \
+ ivpu_gem_userptr.o \
ivpu_hw.o \
ivpu_hw_btrs.o \
ivpu_hw_ip.o \
diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c
index cd24ccd20ba6..3bd85ee6c26b 100644
--- a/drivers/accel/ivpu/ivpu_debugfs.c
+++ b/drivers/accel/ivpu/ivpu_debugfs.c
@@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent)
DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n");
+static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw,
+ int band, const char *name)
+{
+ seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n",
+ name,
+ hw->hws.grace_period[band],
+ hw->hws.process_grace_period[band],
+ hw->hws.process_quantum[band]);
+}
+
static int priority_bands_show(struct seq_file *s, void *v)
{
struct ivpu_device *vdev = s->private;
struct ivpu_hw_info *hw = vdev->hw;
- for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE;
- band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) {
- switch (band) {
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE:
- seq_puts(s, "Idle: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL:
- seq_puts(s, "Normal: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS:
- seq_puts(s, "Focus: ");
- break;
-
- case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME:
- seq_puts(s, "Realtime: ");
- break;
- }
-
- seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n",
- hw->hws.grace_period[band], hw->hws.process_grace_period[band],
- hw->hws.process_quantum[band]);
- }
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus");
+ print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime");
return 0;
}
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 3289751b4757..3d6fccdefdd6 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -57,7 +57,7 @@ MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
-MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
+MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler");
bool ivpu_disable_mmu_cont_pages;
module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
@@ -134,6 +134,8 @@ bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
return true;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
return true;
+ case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR:
+ return true;
case DRM_IVPU_CAP_MANAGE_CMDQ:
return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
default:
@@ -200,6 +202,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
case DRM_IVPU_PARAM_CAPABILITIES:
args->value = ivpu_is_capable(vdev, args->index);
break;
+ case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
+ args->value = ivpu_fw_preempt_buf_size(vdev);
+ break;
default:
ret = -EINVAL;
break;
@@ -310,6 +315,7 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0),
};
static int ivpu_wait_for_ready(struct ivpu_device *vdev)
@@ -377,8 +383,7 @@ int ivpu_boot(struct ivpu_device *vdev)
drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
- /* Update boot params located at first 4KB of FW memory */
- ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
+ ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp));
ret = ivpu_hw_boot_fw(vdev);
if (ret) {
@@ -450,6 +455,9 @@ int ivpu_shutdown(struct ivpu_device *vdev)
static const struct file_operations ivpu_fops = {
.owner = THIS_MODULE,
DRM_ACCEL_FOPS,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_fdinfo,
+#endif
};
static const struct drm_driver driver = {
@@ -464,6 +472,9 @@ static const struct drm_driver driver = {
.ioctls = ivpu_drm_ioctls,
.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
.fops = &ivpu_fops,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_memory_stats,
+#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -705,6 +716,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 62ab1c654e63..5b34b6f50e69 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -27,6 +27,7 @@
#define PCI_DEVICE_ID_LNL 0x643e
#define PCI_DEVICE_ID_PTL_P 0xb03e
#define PCI_DEVICE_ID_WCL 0xfd3e
+#define PCI_DEVICE_ID_NVL 0xd71d
#define IVPU_HW_IP_37XX 37
#define IVPU_HW_IP_40XX 40
@@ -78,6 +79,7 @@
#define IVPU_DBG_KREF BIT(11)
#define IVPU_DBG_RPM BIT(12)
#define IVPU_DBG_MMU_MAP BIT(13)
+#define IVPU_DBG_IOCTL BIT(14)
#define ivpu_err(vdev, fmt, ...) \
drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
@@ -245,6 +247,8 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_PTL_P:
case PCI_DEVICE_ID_WCL:
return IVPU_HW_IP_50XX;
+ case PCI_DEVICE_ID_NVL:
+ return IVPU_HW_IP_60XX;
default:
dump_stack();
ivpu_err(vdev, "Unknown NPU IP generation\n");
@@ -261,6 +265,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_LNL:
case PCI_DEVICE_ID_PTL_P:
case PCI_DEVICE_ID_WCL:
+ case PCI_DEVICE_ID_NVL:
return IVPU_HW_BTRS_LNL;
default:
dump_stack();
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
index 9db741695401..48386d2cddbb 100644
--- a/drivers/accel/ivpu/ivpu_fw.c
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -17,15 +17,10 @@
#include "ivpu_ipc.h"
#include "ivpu_pm.h"
-#define FW_GLOBAL_MEM_START (2ull * SZ_1G)
-#define FW_GLOBAL_MEM_END (3ull * SZ_1G)
-#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
-#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
-#define FW_RUNTIME_MAX_SIZE SZ_512M
#define FW_SHAVE_NN_MAX_SIZE SZ_2M
-#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
-#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
+#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K
+#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M
#define WATCHDOG_MSS_REDIRECT 32
#define WATCHDOG_NCE_REDIRECT 33
@@ -61,12 +56,14 @@ static struct {
{ IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" },
{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" },
{ IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" },
+ { IVPU_HW_IP_60XX, "intel/vpu/vpu_60xx_v1.bin" },
};
/* Production fw_names from the table above */
MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin");
MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin");
MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin");
+MODULE_FIRMWARE("intel/vpu/vpu_60xx_v1.bin");
static int ivpu_fw_request(struct ivpu_device *vdev)
{
@@ -131,9 +128,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea
return false;
}
-static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size)
+bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range)
{
- if (addr < range_start || addr + size > range_start + range_size)
+ u64 addr_end;
+
+ if (!range || check_add_overflow(addr, size, &addr_end))
+ return false;
+
+ if (addr < range->start || addr_end > range->end)
return false;
return true;
@@ -142,6 +144,12 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range
static u32
ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
{
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX &&
+ ivpu_sched_mode == VPU_SCHEDULING_MODE_OS) {
+ ivpu_warn(vdev, "OS sched mode is not supported, using HW mode\n");
+ return VPU_SCHEDULING_MODE_HW;
+ }
+
if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO)
return ivpu_sched_mode;
@@ -151,11 +159,56 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he
return VPU_SCHEDULING_MODE_HW;
}
+static void
+ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u32 primary_preempt_buf_size, secondary_preempt_buf_size;
+
+ if (fw_hdr->preemption_buffer_1_max_size)
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
+ else
+ primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+
+ if (fw_hdr->preemption_buffer_2_max_size)
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
+ else
+ secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
+
+ ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n",
+ primary_preempt_buf_size, secondary_preempt_buf_size);
+
+ if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE ||
+ secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too small\n");
+ return;
+ }
+
+ if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE ||
+ secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) {
+ ivpu_warn(vdev, "Preemption buffers size too big\n");
+ return;
+ }
+
+ if (fw->sched_mode != VPU_SCHEDULING_MODE_HW)
+ return;
+
+ if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ return;
+
+ vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE);
+ vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE);
+}
+
static int ivpu_fw_parse(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
- u64 runtime_addr, image_load_addr, runtime_size, image_size;
+ struct ivpu_addr_range fw_image_range;
+ u64 boot_params_addr, boot_params_size;
+ u64 fw_version_addr, fw_version_size;
+ u64 runtime_addr, runtime_size;
+ u64 image_load_addr, image_size;
if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
@@ -167,18 +220,37 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
return -EINVAL;
}
- runtime_addr = fw_hdr->boot_params_load_address;
- runtime_size = fw_hdr->runtime_size;
- image_load_addr = fw_hdr->image_load_address;
- image_size = fw_hdr->image_size;
+ boot_params_addr = fw_hdr->boot_params_load_address;
+ boot_params_size = SZ_4K;
- if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
- ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
+ if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr);
return -EINVAL;
}
- if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
- ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
+ fw_version_addr = fw_hdr->firmware_version_load_address;
+ fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K);
+
+ if (fw_version_size != SZ_4K) {
+ ivpu_err(vdev, "Invalid firmware version size: %u\n",
+ fw_hdr->firmware_version_size);
+ return -EINVAL;
+ }
+
+ if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr);
+ return -EINVAL;
+ }
+
+ runtime_addr = fw_hdr->image_load_address;
+ runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size;
+
+ image_load_addr = fw_hdr->image_load_address;
+ image_size = fw_hdr->image_size;
+
+ if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n",
+ runtime_addr, runtime_size);
return -EINVAL;
}
@@ -187,23 +259,25 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
return -EINVAL;
}
- if (image_load_addr < runtime_addr ||
- image_load_addr + image_size > runtime_addr + runtime_size) {
- ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
+ if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n",
image_load_addr, image_size);
return -EINVAL;
}
- if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
- ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size))
return -EINVAL;
- }
- if (fw_hdr->entry_point < image_load_addr ||
- fw_hdr->entry_point >= image_load_addr + image_size) {
+ if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) {
ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
return -EINVAL;
}
+
+ if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
+ ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ return -EINVAL;
+ }
+
ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
fw_hdr->header_version, fw_hdr->image_format);
@@ -217,6 +291,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3))
return -EINVAL;
+ fw->boot_params_addr = boot_params_addr;
+ fw->boot_params_size = boot_params_size;
+ fw->fw_version_addr = fw_version_addr;
+ fw->fw_version_size = fw_version_size;
fw->runtime_addr = runtime_addr;
fw->runtime_size = runtime_size;
fw->image_load_offset = image_load_addr - runtime_addr;
@@ -235,22 +313,13 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr);
ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS");
- if (fw_hdr->preemption_buffer_1_max_size)
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size;
- else
- fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size;
+ ivpu_preemption_config_parse(vdev, fw_hdr);
+ ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n",
+ ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not");
- if (fw_hdr->preemption_buffer_2_max_size)
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size;
- else
- fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size;
- ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n",
- fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size);
-
- if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address,
- fw_hdr->ro_section_size,
- fw_hdr->image_load_address,
- fw_hdr->image_size)) {
+ if (fw_hdr->ro_section_start_address &&
+ !ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size,
+ &fw_image_range)) {
ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n",
fw_hdr->ro_section_start_address, fw_hdr->ro_section_size);
return -EINVAL;
@@ -259,12 +328,18 @@ static int ivpu_fw_parse(struct ivpu_device *vdev)
fw->read_only_addr = fw_hdr->ro_section_start_address;
fw->read_only_size = fw_hdr->ro_section_size;
- ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
- fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
- ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
- fw->runtime_addr, image_load_addr, fw->entry_point);
+ ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n",
+ fw->boot_params_addr, fw->boot_params_size);
+ ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n",
+ fw->fw_version_addr, fw->fw_version_size);
+ ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n",
+ fw->runtime_addr, fw->runtime_size);
+ ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n",
+ fw->image_load_offset, fw->image_size);
ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n",
fw->read_only_addr, fw->read_only_size);
+ ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point);
+ ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size);
return 0;
}
@@ -291,39 +366,33 @@ ivpu_fw_init_wa(struct ivpu_device *vdev)
IVPU_PRINT_WA(disable_d0i3_msg);
}
-static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
-{
- struct ivpu_fw_info *fw = vdev->fw;
- u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
- u64 size = FW_SHARED_MEM_SIZE;
-
- if (start + size > FW_GLOBAL_MEM_END) {
- ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
- return -EINVAL;
- }
-
- ivpu_hw_range_init(&vdev->hw->ranges.global, start, size);
- return 0;
-}
-
static int ivpu_fw_mem_init(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
- struct ivpu_addr_range fw_range;
int log_verb_size;
int ret;
- ret = ivpu_fw_update_global_range(vdev);
- if (ret)
- return ret;
+ fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!fw->mem_bp) {
+ ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n");
+ return -ENOMEM;
+ }
- fw_range.start = fw->runtime_addr;
- fw_range.end = fw->runtime_addr + fw->runtime_size;
- fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size,
- DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
+ if (!fw->mem_fw_ver) {
+ ivpu_err(vdev, "Failed to create firmware version memory buffer\n");
+ ret = -ENOMEM;
+ goto err_free_bp;
+ }
+
+ fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size,
+ DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE);
if (!fw->mem) {
ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_fw_ver;
}
ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr,
@@ -372,6 +441,10 @@ err_free_log_crit:
ivpu_bo_free(fw->mem_log_crit);
err_free_fw_mem:
ivpu_bo_free(fw->mem);
+err_free_fw_ver:
+ ivpu_bo_free(fw->mem_fw_ver);
+err_free_bp:
+ ivpu_bo_free(fw->mem_bp);
return ret;
}
@@ -387,10 +460,14 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
ivpu_bo_free(fw->mem_log_verb);
ivpu_bo_free(fw->mem_log_crit);
ivpu_bo_free(fw->mem);
+ ivpu_bo_free(fw->mem_fw_ver);
+ ivpu_bo_free(fw->mem_bp);
fw->mem_log_verb = NULL;
fw->mem_log_crit = NULL;
fw->mem = NULL;
+ fw->mem_fw_ver = NULL;
+ fw->mem_bp = NULL;
}
int ivpu_fw_init(struct ivpu_device *vdev)
@@ -483,11 +560,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
- boot_params->global_memory_allocator_base);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
- boot_params->global_memory_allocator_size);
-
ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
boot_params->shave_nn_fw_base);
@@ -495,10 +567,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->watchdog_irq_mss);
ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
boot_params->watchdog_irq_nce);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
- boot_params->host_to_vpu_irq);
- ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
- boot_params->job_done_irq);
ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
boot_params->host_version_id);
@@ -546,6 +614,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_
boot_params->system_time_us);
ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n",
boot_params->power_profile);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n",
+ boot_params->vpu_uses_ecc_mca_signal);
}
void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
@@ -572,6 +642,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
return;
}
+ memset(boot_params, 0, sizeof(*boot_params));
vdev->pm->is_warmboot = false;
boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
@@ -647,6 +718,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
boot_params->d0i3_entry_vpu_ts = 0;
if (IVPU_WA(disable_d0i2))
boot_params->power_profile |= BIT(1);
+ boot_params->vpu_uses_ecc_mca_signal =
+ ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0;
boot_params->system_time_us = ktime_to_us(ktime_get_real());
wmb(); /* Flush WC buffers after writing bootparams */
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
index 9a3935be1c05..00945892b55e 100644
--- a/drivers/accel/ivpu/ivpu_fw.h
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_FW_H__
@@ -19,10 +19,16 @@ struct ivpu_fw_info {
const struct firmware *file;
const char *name;
char version[FW_VERSION_STR_SIZE];
+ struct ivpu_bo *mem_bp;
+ struct ivpu_bo *mem_fw_ver;
struct ivpu_bo *mem;
struct ivpu_bo *mem_shave_nn;
struct ivpu_bo *mem_log_crit;
struct ivpu_bo *mem_log_verb;
+ u64 boot_params_addr;
+ u64 boot_params_size;
+ u64 fw_version_addr;
+ u64 fw_version_size;
u64 runtime_addr;
u32 runtime_size;
u64 image_load_offset;
@@ -42,14 +48,20 @@ struct ivpu_fw_info {
u64 last_heartbeat;
};
+bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range);
int ivpu_fw_init(struct ivpu_device *vdev);
void ivpu_fw_fini(struct ivpu_device *vdev);
void ivpu_fw_load(struct ivpu_device *vdev);
-void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp);
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params);
static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
{
return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point;
}
+static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev)
+{
+ return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size;
+}
+
#endif /* __IVPU_FW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 59cfcf3eaded..ece68f570b7e 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -15,6 +15,7 @@
#include <drm/drm_utils.h>
#include "ivpu_drv.h"
+#include "ivpu_fw.h"
#include "ivpu_gem.h"
#include "ivpu_hw.h"
#include "ivpu_mmu.h"
@@ -27,8 +28,8 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
{
ivpu_dbg(vdev, BO,
- "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
- action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id,
+ "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n",
+ action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
(bool)drm_gem_is_imported(&bo->base.base));
}
@@ -43,22 +44,47 @@ static inline void ivpu_bo_unlock(struct ivpu_bo *bo)
dma_resv_unlock(bo->base.base.resv);
}
+static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo)
+{
+ struct sg_table *sgt;
+
+ drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach);
+
+ ivpu_bo_lock(bo);
+
+ sgt = bo->base.sgt;
+ if (!sgt) {
+ sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt));
+ else
+ bo->base.sgt = sgt;
+ }
+
+ ivpu_bo_unlock(bo);
+
+ return sgt;
+}
+
/*
- * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+ * ivpu_bo_bind() - pin the backing physical pages and map them to VPU.
*
* This function pins physical memory pages, then maps the physical pages
* to IOMMU address space and finally updates the VPU MMU page tables
* to allow the VPU to translate VPU address to IOMMU address.
*/
-int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
struct sg_table *sgt;
int ret = 0;
- ivpu_dbg_bo(vdev, bo, "pin");
+ ivpu_dbg_bo(vdev, bo, "bind");
- sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (bo->base.base.import_attach)
+ sgt = ivpu_bo_map_attachment(vdev, bo);
+ else
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
@@ -70,7 +96,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
if (!bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt,
- ivpu_bo_is_snooped(bo));
+ ivpu_bo_is_snooped(bo), ivpu_bo_is_read_only(bo));
if (ret) {
ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
goto unlock;
@@ -99,9 +125,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node);
if (!ret) {
bo->ctx = ctx;
+ bo->ctx_id = ctx->id;
bo->vpu_addr = bo->mm_node.start;
- } else {
- ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret);
+ ivpu_dbg_bo(vdev, bo, "vaddr");
}
ivpu_bo_unlock(bo);
@@ -115,7 +141,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
{
struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
- lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount));
+ dma_resv_assert_held(bo->base.base.resv);
if (bo->mmu_mapped) {
drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -130,13 +156,15 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
bo->ctx = NULL;
}
- if (drm_gem_is_imported(&bo->base.base))
- return;
-
if (bo->base.sgt) {
- dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
- sg_free_table(bo->base.sgt);
- kfree(bo->base.sgt);
+ if (bo->base.base.import_attach) {
+ dma_buf_unmap_attachment(bo->base.base.import_attach,
+ bo->base.sgt, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->base.sgt);
+ kfree(bo->base.sgt);
+ }
bo->base.sgt = NULL;
}
}
@@ -182,10 +210,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
struct device *attach_dev = dev->dev;
struct dma_buf_attachment *attach;
- struct sg_table *sgt;
struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
int ret;
attach = dma_buf_attach(dma_buf, attach_dev);
@@ -194,25 +223,25 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt)) {
- ret = PTR_ERR(sgt);
- goto fail_detach;
- }
-
- obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
- goto fail_unmap;
+ goto fail_detach;
}
obj->import_attach = attach;
obj->resv = dma_buf->resv;
+ bo = to_ivpu_bo(obj);
+
+ mutex_lock(&vdev->bo_list_lock);
+ list_add_tail(&bo->bo_list_node, &vdev->bo_list);
+ mutex_unlock(&vdev->bo_list_lock);
+
+ ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo));
+
return obj;
-fail_unmap:
- dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
@@ -220,7 +249,7 @@ fail_detach:
return ERR_PTR(ret);
}
-static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id)
+static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags)
{
struct drm_gem_shmem_object *shmem;
struct ivpu_bo *bo;
@@ -238,7 +267,6 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
return ERR_CAST(shmem);
bo = to_ivpu_bo(&shmem->base);
- bo->ctx_id = ctx_id;
bo->base.map_wc = flags & DRM_IVPU_BO_WC;
bo->flags = flags;
@@ -246,7 +274,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla
list_add_tail(&bo->bo_list_node, &vdev->bo_list);
mutex_unlock(&vdev->bo_list_lock);
- ivpu_dbg_bo(vdev, bo, "alloc");
+ ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size);
return bo;
}
@@ -259,8 +287,8 @@ static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file)
struct ivpu_addr_range *range;
if (bo->ctx) {
- ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
- file_priv->ctx.id, bo->ctx->id);
+ ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n",
+ bo, file_priv->ctx.id, bo->ctx->id);
return -EALREADY;
}
@@ -281,23 +309,41 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj)
ivpu_dbg_bo(vdev, bo, "free");
+ drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node));
+
mutex_lock(&vdev->bo_list_lock);
list_del(&bo->bo_list_node);
- mutex_unlock(&vdev->bo_list_lock);
drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) &&
!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0);
drm_WARN_ON(&vdev->drm, bo->base.vaddr);
+ ivpu_bo_lock(bo);
ivpu_bo_unbind_locked(bo);
+ ivpu_bo_unlock(bo);
+
+ mutex_unlock(&vdev->bo_list_lock);
+
drm_WARN_ON(&vdev->drm, bo->mmu_mapped);
drm_WARN_ON(&vdev->drm, bo->ctx);
drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+ drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node);
drm_gem_shmem_free(&bo->base);
}
+static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ enum drm_gem_object_status status = 0;
+
+ if (ivpu_bo_is_resident(bo))
+ status |= DRM_GEM_OBJECT_RESIDENT;
+
+ return status;
+}
+
static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.free = ivpu_gem_bo_free,
.open = ivpu_gem_bo_open,
@@ -308,6 +354,7 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs = {
.vmap = drm_gem_shmem_object_vmap,
.vunmap = drm_gem_shmem_object_vunmap,
.mmap = drm_gem_shmem_object_mmap,
+ .status = ivpu_gem_status,
.vm_ops = &drm_gem_shmem_vm_ops,
};
@@ -320,25 +367,33 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
struct ivpu_bo *bo;
int ret;
- if (args->flags & ~DRM_IVPU_BO_FLAGS)
+ if (args->flags & ~DRM_IVPU_BO_FLAGS) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags);
return -EINVAL;
+ }
- if (size == 0)
+ if (size == 0) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size);
return -EINVAL;
+ }
- bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id);
+ bo = ivpu_bo_alloc(vdev, size, args->flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)",
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n",
bo, file_priv->ctx.id, args->size, args->flags);
return PTR_ERR(bo);
}
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0);
+
ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
- if (ret)
- ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)",
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
bo, file_priv->ctx.id, args->size, args->flags);
- else
+ } else {
args->vpu_addr = bo->vpu_addr;
+ drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1);
+ }
drm_gem_object_put(&bo->base.base);
@@ -360,18 +415,21 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end));
drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
- bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+ bo = ivpu_bo_alloc(vdev, size, flags);
if (IS_ERR(bo)) {
- ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n",
bo, range->start, size, flags);
return NULL;
}
ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range);
- if (ret)
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n",
+ bo, ctx->id, size, ret);
goto err_put;
+ }
- ret = ivpu_bo_pin(bo);
+ ret = ivpu_bo_bind(bo);
if (ret)
goto err_put;
@@ -391,6 +449,21 @@ err_put:
return NULL;
}
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags)
+{
+ struct ivpu_addr_range range;
+
+ if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) {
+ ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size);
+ return NULL;
+ }
+
+ if (ivpu_hw_range_init(vdev, &range, addr, size))
+ return NULL;
+
+ return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags);
+}
+
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags)
{
return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags);
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
index aa8ff14f7aae..0c3350f22b55 100644
--- a/drivers/accel/ivpu/ivpu_gem.h
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_GEM_H__
#define __IVPU_GEM_H__
@@ -24,19 +24,22 @@ struct ivpu_bo {
bool mmu_mapped;
};
-int ivpu_bo_pin(struct ivpu_bo *bo);
+int ivpu_bo_bind(struct ivpu_bo *bo);
void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
struct ivpu_addr_range *range, u64 size, u32 flags);
+struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags);
struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags);
void ivpu_bo_free(struct ivpu_bo *bo);
int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
void ivpu_bo_list_print(struct drm_device *dev);
@@ -74,6 +77,16 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
}
+static inline bool ivpu_bo_is_read_only(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_READ_ONLY;
+}
+
+static inline bool ivpu_bo_is_resident(struct ivpu_bo *bo)
+{
+ return !!bo->base.pages;
+}
+
static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
{
if (vpu_addr < bo->vpu_addr)
@@ -96,4 +109,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo));
}
+static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_MAPPABLE;
+}
+
#endif /* __IVPU_GEM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem_userptr.c b/drivers/accel/ivpu/ivpu_gem_userptr.c
new file mode 100644
index 000000000000..25ba606164c0
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_gem_userptr.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2025 Intel Corporation
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/capability.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+
+static struct sg_table *
+ivpu_gem_userptr_dmabuf_map(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sgt = attachment->dmabuf->priv;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sgt;
+}
+
+static void ivpu_gem_userptr_dmabuf_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void ivpu_gem_userptr_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct sg_table *sgt = dma_buf->priv;
+ struct sg_page_iter page_iter;
+ struct page *page;
+
+ for_each_sgtable_page(sgt, &page_iter, 0) {
+ page = sg_page_iter_page(&page_iter);
+ unpin_user_page(page);
+ }
+
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static const struct dma_buf_ops ivpu_gem_userptr_dmabuf_ops = {
+ .map_dma_buf = ivpu_gem_userptr_dmabuf_map,
+ .unmap_dma_buf = ivpu_gem_userptr_dmabuf_unmap,
+ .release = ivpu_gem_userptr_dmabuf_release,
+};
+
+static struct dma_buf *
+ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr,
+ size_t size, uint32_t flags)
+{
+ struct dma_buf_export_info exp_info = {};
+ struct dma_buf *dma_buf;
+ struct sg_table *sgt;
+ struct page **pages;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ unsigned int gup_flags = FOLL_LONGTERM;
+ int ret, i, pinned;
+
+ /* Add FOLL_WRITE only if the BO is not read-only */
+ if (!(flags & DRM_IVPU_BO_READ_ONLY))
+ gup_flags |= FOLL_WRITE;
+
+ pages = kvmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages);
+ if (pinned < 0) {
+ ret = pinned;
+ ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret);
+ goto free_pages_array;
+ }
+
+ if (pinned != nr_pages) {
+ ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages);
+ ret = -EFAULT;
+ goto unpin_pages;
+ }
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto unpin_pages;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL);
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret);
+ goto free_sgt;
+ }
+
+ exp_info.exp_name = "ivpu_userptr_dmabuf";
+ exp_info.owner = THIS_MODULE;
+ exp_info.ops = &ivpu_gem_userptr_dmabuf_ops;
+ exp_info.size = size;
+ exp_info.flags = O_RDWR | O_CLOEXEC;
+ exp_info.priv = sgt;
+
+ dma_buf = dma_buf_export(&exp_info);
+ if (IS_ERR(dma_buf)) {
+ ret = PTR_ERR(dma_buf);
+ ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret);
+ goto free_sg_table;
+ }
+
+ kvfree(pages);
+ return dma_buf;
+
+free_sg_table:
+ sg_free_table(sgt);
+free_sgt:
+ kfree(sgt);
+unpin_pages:
+ for (i = 0; i < pinned; i++)
+ unpin_user_page(pages[i]);
+free_pages_array:
+ kvfree(pages);
+ return ERR_PTR(ret);
+}
+
+static struct ivpu_bo *
+ivpu_bo_create_from_userptr(struct ivpu_device *vdev, void __user *user_ptr,
+ size_t size, uint32_t flags)
+{
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
+
+ dma_buf = ivpu_create_userptr_dmabuf(vdev, user_ptr, size, flags);
+ if (IS_ERR(dma_buf))
+ return ERR_CAST(dma_buf);
+
+ obj = ivpu_gem_prime_import(&vdev->drm, dma_buf);
+ if (IS_ERR(obj)) {
+ dma_buf_put(dma_buf);
+ return ERR_CAST(obj);
+ }
+
+ dma_buf_put(dma_buf);
+
+ bo = to_ivpu_bo(obj);
+ bo->flags = flags;
+
+ return bo;
+}
+
+int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ivpu_bo_create_from_userptr *args = data;
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ void __user *user_ptr = u64_to_user_ptr(args->user_ptr);
+ struct ivpu_bo *bo;
+ int ret;
+
+ if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags);
+ return -EINVAL;
+ }
+
+ if (!args->user_ptr || !args->size) {
+ ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EINVAL;
+ }
+
+ if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) {
+ ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EINVAL;
+ }
+
+ if (!access_ok(user_ptr, args->size)) {
+ ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n",
+ args->user_ptr, args->size);
+ return -EFAULT;
+ }
+
+ bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+ if (ret) {
+ ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n",
+ bo, file_priv->ctx.id, args->size, args->flags);
+ } else {
+ ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n",
+ args->handle, bo->vpu_addr, args->size, bo->flags);
+ args->vpu_addr = bo->vpu_addr;
+ }
+
+ drm_gem_object_put(&bo->base.base);
+
+ return ret;
+}
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 08dcc31b56f4..d69cd0d93569 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -8,6 +8,8 @@
#include "ivpu_hw_btrs.h"
#include "ivpu_hw_ip.h"
+#include <asm/msr-index.h>
+#include <asm/msr.h>
#include <linux/dmi.h>
#include <linux/fault-inject.h>
#include <linux/pm_runtime.h>
@@ -20,6 +22,10 @@ module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
#endif
+#define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */
+
+#define ECC_MCA_SIGNAL_ENABLE_MASK 0xff
+
static char *platform_to_str(u32 platform)
{
switch (platform) {
@@ -147,19 +153,39 @@ static void priority_bands_init(struct ivpu_device *vdev)
vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
}
+int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size)
+{
+ u64 end;
+
+ if (!range || check_add_overflow(start, size, &end)) {
+ ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size);
+ return -EINVAL;
+ }
+
+ range->start = start;
+ range->end = end;
+
+ return 0;
+}
+
static void memory_ranges_init(struct ivpu_device *vdev)
{
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
- ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
- ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
- ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G);
} else {
- ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
- ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
- ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G);
+ ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G);
vdev->hw->ranges.dma = vdev->hw->ranges.user;
}
+
+ drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start,
+ FW_SHARED_MEM_ALIGNMENT));
}
static int wp_enable(struct ivpu_device *vdev)
@@ -373,3 +399,22 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
pm_runtime_mark_last_busy(vdev->drm.dev);
return IRQ_HANDLED;
}
+
+bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev)
+{
+ unsigned long long msr_integrity_caps;
+ int ret;
+
+ if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
+ return false;
+
+ ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps);
+ if (ret) {
+ ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret);
+ return false;
+ }
+
+ ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps);
+
+ return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK;
+}
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
index d79668fe1609..b6d0f0d0dccc 100644
--- a/drivers/accel/ivpu/ivpu_hw.h
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -21,6 +21,7 @@ struct ivpu_hw_info {
bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
} irq;
struct {
+ struct ivpu_addr_range runtime;
struct ivpu_addr_range global;
struct ivpu_addr_range user;
struct ivpu_addr_range shave;
@@ -51,6 +52,8 @@ struct ivpu_hw_info {
};
int ivpu_hw_init(struct ivpu_device *vdev);
+int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start,
+ u64 size);
int ivpu_hw_power_up(struct ivpu_device *vdev);
int ivpu_hw_power_down(struct ivpu_device *vdev);
int ivpu_hw_reset(struct ivpu_device *vdev);
@@ -60,6 +63,7 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev);
void ivpu_hw_irq_enable(struct ivpu_device *vdev);
void ivpu_hw_irq_disable(struct ivpu_device *vdev);
irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
+bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev);
static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
{
@@ -71,12 +75,6 @@ static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
return vdev->hw->irq.ip_irq_handler(vdev, irq);
}
-static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
-{
- range->start = start;
- range->end = start + size;
-}
-
static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
{
return range->end - range->start;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c
index b236c7234daa..06e65c592618 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.c
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.c
@@ -33,7 +33,6 @@
#define PLL_CDYN_DEFAULT 0x80
#define PLL_EPP_DEFAULT 0x80
-#define PLL_CONFIG_DEFAULT 0x0
#define PLL_REF_CLK_FREQ 50000000ull
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
@@ -303,7 +302,7 @@ static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp,
wp->epp = 0;
} else {
wp->target = hw->pll.pn_ratio;
- wp->cfg = enable ? PLL_CONFIG_DEFAULT : 0;
+ wp->cfg = 0;
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
}
@@ -322,6 +321,14 @@ static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
}
+static int wait_for_cdyn_deassert(struct ivpu_device *vdev)
+{
+ if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
+ return 0;
+
+ return REGB_POLL_FLD(VPU_HW_BTRS_LNL_CDYN, CDYN, 0, PLL_TIMEOUT_US);
+}
+
int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
{
struct wp_request wp;
@@ -355,6 +362,14 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
return ret;
}
+ if (!enable) {
+ ret = wait_for_cdyn_deassert(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for CDYN deassert\n");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -674,7 +689,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
- queue_work(system_wq, &vdev->irq_dct_work);
+ queue_work(system_percpu_wq, &vdev->irq_dct_work);
}
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
@@ -753,7 +768,7 @@ int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
}
}
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent)
{
u32 val = 0;
u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h
index d2d82651976d..c4c10e22f30f 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs.h
@@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev);
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq);
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq);
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable);
-void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 dct_percent);
+void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent);
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev);
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
index fff2ef2cada6..a81a9ba540fa 100644
--- a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h
@@ -74,6 +74,9 @@
#define VPU_HW_BTRS_LNL_PLL_FREQ 0x00000148u
#define VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK GENMASK(15, 0)
+#define VPU_HW_BTRS_LNL_CDYN 0x0000014cu
+#define VPU_HW_BTRS_LNL_CDYN_CDYN_MASK GENMASK(15, 0)
+
#define VPU_HW_BTRS_LNL_TILE_FUSE 0x00000150u
#define VPU_HW_BTRS_LNL_TILE_FUSE_VALID_MASK BIT_MASK(0)
#define VPU_HW_BTRS_LNL_TILE_FUSE_CONFIG_MASK GENMASK(6, 1)
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index 2bf9882ab52e..06aa1e7dc50b 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -691,6 +691,13 @@ static void pwr_island_delay_set(struct ivpu_device *vdev)
status = high ? 46 : 3;
break;
+ case PCI_DEVICE_ID_NVL:
+ post = high ? 198 : 17;
+ post1 = 0;
+ post2 = high ? 198 : 17;
+ status = 0;
+ break;
+
default:
dump_stack();
ivpu_err(vdev, "Unknown device ID\n");
@@ -889,6 +896,9 @@ static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
static int soc_cpu_enable(struct ivpu_device *vdev)
{
+ if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX)
+ return 0;
+
return soc_cpu_drive_40xx(vdev, true);
}
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
index 39f83225c181..1f13bf95b2b3 100644
--- a/drivers/accel/ivpu/ivpu_ipc.c
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -141,7 +141,6 @@ ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_rx_msg *rx_msg;
lockdep_assert_held(&ipc->cons_lock);
- lockdep_assert_irqs_disabled();
rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
if (!rx_msg) {
@@ -460,7 +459,7 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
}
}
- queue_work(system_wq, &vdev->irq_ipc_work);
+ queue_work(system_percpu_wq, &vdev->irq_ipc_work);
}
void ivpu_ipc_irq_work_fn(struct work_struct *work)
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index 060f1fc031d3..4f8564e2878a 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
static int ivpu_preemption_buffers_create(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE);
- u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE);
-
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW ||
- ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE)
+ if (ivpu_fw_preempt_buf_size(vdev) == 0)
return 0;
cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user,
- primary_size, DRM_IVPU_BO_WC);
+ vdev->fw->primary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->primary_preempt_buf) {
ivpu_err(vdev, "Failed to create primary preemption buffer\n");
return -ENOMEM;
}
cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma,
- secondary_size, DRM_IVPU_BO_WC);
+ vdev->fw->secondary_preempt_buf_size,
+ DRM_IVPU_BO_WC);
if (!cmdq->secondary_preempt_buf) {
ivpu_err(vdev, "Failed to create secondary preemption buffer\n");
goto err_free_primary;
@@ -66,20 +64,39 @@ err_free_primary:
static void ivpu_preemption_buffers_free(struct ivpu_device *vdev,
struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
- if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW)
- return;
-
if (cmdq->primary_preempt_buf)
ivpu_bo_free(cmdq->primary_preempt_buf);
if (cmdq->secondary_preempt_buf)
ivpu_bo_free(cmdq->secondary_preempt_buf);
}
+static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv,
+ struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+{
+ int ret;
+
+ /* Use preemption buffer provided by the user space */
+ if (job->primary_preempt_buf)
+ return 0;
+
+ if (!cmdq->primary_preempt_buf) {
+ /* Allocate per command queue preemption buffers */
+ ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
+ if (ret)
+ return ret;
+ }
+
+ /* Use preemption buffers allocated by the kernel */
+ job->primary_preempt_buf = cmdq->primary_preempt_buf;
+ job->secondary_preempt_buf = cmdq->secondary_preempt_buf;
+
+ return 0;
+}
+
static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq;
- int ret;
cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
@@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv)
if (!cmdq->mem)
goto err_free_cmdq;
- ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq);
- if (ret)
- ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n");
-
return cmdq;
err_free_cmdq:
@@ -219,11 +232,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem));
- if (!ret)
+ if (!ret) {
ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n",
cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority);
- else
+ } else {
xa_erase(&vdev->db_xa, cmdq->db_id);
+ cmdq->db_id = 0;
+ }
return ret;
}
@@ -333,7 +348,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32
cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id);
if (!cmdq) {
- ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id);
+ ivpu_dbg(vdev, IOCTL, "Failed to find command queue with ID: %u\n", cmdq_id);
return NULL;
}
@@ -427,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION))
entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK;
- if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
- if (cmdq->primary_preempt_buf) {
- entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr;
- entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf);
- }
+ if (job->primary_preempt_buf) {
+ entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr;
+ entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf);
+ }
- if (cmdq->secondary_preempt_buf) {
- entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr;
- entry->secondary_preempt_buf_size =
- ivpu_bo_size(cmdq->secondary_preempt_buf);
- }
+ if (job->secondary_preempt_buf) {
+ entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr;
+ entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf);
}
wmb(); /* Ensure that tail is updated after filling entry */
@@ -522,7 +534,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
job->bo_count = bo_count;
job->done_fence = ivpu_fence_create(vdev);
if (!job->done_fence) {
- ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
+ ivpu_err(vdev, "Failed to create a fence\n");
goto err_free_job;
}
@@ -552,21 +564,26 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device *
return job;
}
-static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status)
{
- struct ivpu_job *job;
-
lockdep_assert_held(&vdev->submitted_jobs_lock);
- job = xa_load(&vdev->submitted_jobs_xa, job_id);
- if (!job)
- return -ENOENT;
+ switch (job_status) {
+ case VPU_JSM_STATUS_PROCESSING_ERR:
+ case VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN ... VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX:
+ {
+ struct ivpu_job *job = xa_load(&vdev->submitted_jobs_xa, job_id);
- if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) {
+ if (!job)
+ return false;
+
+ /* Trigger an engine reset */
guard(mutex)(&job->file_priv->lock);
+ job->job_status = job_status;
+
if (job->file_priv->has_mmu_faults)
- return 0;
+ return false;
/*
* Mark context as faulty and defer destruction of the job to jobs abort thread
@@ -574,23 +591,43 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32
* status and ensure both are handled in the same way
*/
job->file_priv->has_mmu_faults = true;
- queue_work(system_wq, &vdev->context_abort_work);
- return 0;
+ queue_work(system_percpu_wq, &vdev->context_abort_work);
+ return true;
}
+ default:
+ /* Complete job with error status, engine reset not required */
+ break;
+ }
+
+ return false;
+}
- job = ivpu_job_remove_from_submitted_jobs(vdev, job_id);
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+{
+ struct ivpu_job *job;
+
+ lockdep_assert_held(&vdev->submitted_jobs_lock);
+
+ job = xa_load(&vdev->submitted_jobs_xa, job_id);
if (!job)
return -ENOENT;
- if (job->file_priv->has_mmu_faults)
- job_status = DRM_IVPU_JOB_STATUS_ABORTED;
+ ivpu_job_remove_from_submitted_jobs(vdev, job_id);
- job->bos[CMD_BUF_IDX]->job_status = job_status;
+ if (job->job_status == VPU_JSM_STATUS_SUCCESS) {
+ if (job->file_priv->has_mmu_faults)
+ job->job_status = DRM_IVPU_JOB_STATUS_ABORTED;
+ else
+ job->job_status = job_status;
+ }
+
+ job->bos[CMD_BUF_IDX]->job_status = job->job_status;
dma_fence_signal(job->done_fence);
trace_job("done", job);
ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n",
- job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status);
+ job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx,
+ job->job_status);
ivpu_job_destroy(job);
ivpu_stop_job_timeout_detection(vdev);
@@ -650,7 +687,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
else
cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id);
if (!cmdq) {
- ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id);
ret = -EINVAL;
goto err_unlock;
}
@@ -661,6 +697,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id)
goto err_unlock;
}
+ ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n",
+ job->job_id, ret);
+ goto err_unlock;
+ }
+
job->cmdq_id = cmdq->id;
is_first_job = xa_empty(&vdev->submitted_jobs_xa);
@@ -714,7 +757,7 @@ err_unlock:
static int
ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
- u32 buf_count, u32 commands_offset)
+ u32 buf_count, u32 commands_offset, u32 preempt_buffer_index)
{
struct ivpu_file_priv *file_priv = job->file_priv;
struct ivpu_device *vdev = file_priv->vdev;
@@ -727,40 +770,58 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
for (i = 0; i < buf_count; i++) {
struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
- if (!obj)
+ if (!obj) {
+ ivpu_dbg(vdev, IOCTL, "Failed to lookup GEM object with handle %u\n",
+ buf_handles[i]);
return -ENOENT;
+ }
job->bos[i] = to_ivpu_bo(obj);
- ret = ivpu_bo_pin(job->bos[i]);
+ ret = ivpu_bo_bind(job->bos[i]);
if (ret)
return ret;
}
bo = job->bos[CMD_BUF_IDX];
if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) {
- ivpu_warn(vdev, "Buffer is already in use\n");
+ ivpu_dbg(vdev, IOCTL, "Buffer is already in use by another job\n");
return -EBUSY;
}
if (commands_offset >= ivpu_bo_size(bo)) {
- ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u for buffer size %zu\n",
+ commands_offset, ivpu_bo_size(bo));
return -EINVAL;
}
job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
+ if (preempt_buffer_index) {
+ struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index];
+
+ if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) {
+ ivpu_dbg(vdev, IOCTL, "Preemption buffer is too small\n");
+ return -EINVAL;
+ }
+ if (ivpu_bo_is_mappable(preempt_bo)) {
+ ivpu_dbg(vdev, IOCTL, "Preemption buffer cannot be mappable\n");
+ return -EINVAL;
+ }
+ job->primary_preempt_buf = preempt_bo;
+ }
+
ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
&acquire_ctx);
if (ret) {
- ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
+ ivpu_warn_ratelimited(vdev, "Failed to lock reservations: %d\n", ret);
return ret;
}
for (i = 0; i < buf_count; i++) {
ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1);
if (ret) {
- ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+ ivpu_warn_ratelimited(vdev, "Failed to reserve fences: %d\n", ret);
goto unlock_reservations;
}
}
@@ -780,7 +841,7 @@ unlock_reservations:
static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id,
u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset,
- u8 priority)
+ u32 preempt_buffer_index, u8 priority)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_job *job;
@@ -807,16 +868,14 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv,
job = ivpu_job_create(file_priv, engine, buffer_count);
if (!job) {
- ivpu_err(vdev, "Failed to create job\n");
ret = -ENOMEM;
goto err_exit_dev;
}
- ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset);
- if (ret) {
- ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset,
+ preempt_buffer_index);
+ if (ret)
goto err_destroy_job;
- }
down_read(&vdev->pm->reset_lock);
ret = ivpu_job_submit(job, priority, cmdq_id);
@@ -842,58 +901,91 @@ err_free_handles:
int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_submit *args = data;
u8 priority;
- if (args->engine != DRM_IVPU_ENGINE_COMPUTE)
+ if (args->engine != DRM_IVPU_ENGINE_COMPUTE) {
+ ivpu_dbg(vdev, IOCTL, "Invalid engine %d\n", args->engine);
return -EINVAL;
+ }
- if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) {
+ ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority);
return -EINVAL;
+ }
- if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) {
+ ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count);
return -EINVAL;
+ }
- if (!IS_ALIGNED(args->commands_offset, 8))
+ if (!IS_ALIGNED(args->commands_offset, 8)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset);
return -EINVAL;
+ }
- if (!file_priv->ctx.id)
+ if (!file_priv->ctx.id) {
+ ivpu_dbg(vdev, IOCTL, "Context not initialized\n");
return -EINVAL;
+ }
- if (file_priv->has_mmu_faults)
+ if (file_priv->has_mmu_faults) {
+ ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id);
return -EBADFD;
+ }
priority = ivpu_job_to_jsm_priority(args->priority);
return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine,
- (void __user *)args->buffers_ptr, args->commands_offset, priority);
+ (void __user *)args->buffers_ptr, args->commands_offset, 0, priority);
}
int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
struct drm_ivpu_cmdq_submit *args = data;
- if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
return -ENODEV;
+ }
- if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID)
+ if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) {
+ ivpu_dbg(vdev, IOCTL, "Invalid command queue ID %u\n", args->cmdq_id);
return -EINVAL;
+ }
- if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT)
+ if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) {
+ ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count);
return -EINVAL;
+ }
- if (!IS_ALIGNED(args->commands_offset, 8))
+ if (args->preempt_buffer_index >= args->buffer_count) {
+ ivpu_dbg(vdev, IOCTL, "Invalid preemption buffer index %u\n",
+ args->preempt_buffer_index);
return -EINVAL;
+ }
- if (!file_priv->ctx.id)
+ if (!IS_ALIGNED(args->commands_offset, 8)) {
+ ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset);
return -EINVAL;
+ }
- if (file_priv->has_mmu_faults)
+ if (!file_priv->ctx.id) {
+ ivpu_dbg(vdev, IOCTL, "Context not initialized\n");
+ return -EINVAL;
+ }
+
+ if (file_priv->has_mmu_faults) {
+ ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id);
return -EBADFD;
+ }
return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE,
- (void __user *)args->buffers_ptr, args->commands_offset, 0);
+ (void __user *)args->buffers_ptr, args->commands_offset,
+ args->preempt_buffer_index, 0);
}
int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -904,11 +996,15 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
struct ivpu_cmdq *cmdq;
int ret;
- if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
return -ENODEV;
+ }
- if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+ if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) {
+ ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority);
return -EINVAL;
+ }
ret = ivpu_rpm_get(vdev);
if (ret < 0)
@@ -936,8 +1032,10 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file
u32 cmdq_id = 0;
int ret;
- if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ))
+ if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) {
+ ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n");
return -ENODEV;
+ }
ret = ivpu_rpm_get(vdev);
if (ret < 0)
@@ -984,7 +1082,9 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
mutex_lock(&vdev->submitted_jobs_lock);
- ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
+ if (!ivpu_job_handle_engine_error(vdev, payload->job_id, payload->job_status))
+ /* No engine error, complete the job normally */
+ ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
mutex_unlock(&vdev->submitted_jobs_lock);
}
@@ -1012,7 +1112,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW)
if (ivpu_jsm_reset_engine(vdev, 0))
- return;
+ goto runtime_put;
mutex_lock(&vdev->context_list_lock);
xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
@@ -1036,7 +1136,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
goto runtime_put;
if (ivpu_jsm_hws_resume_engine(vdev, 0))
- return;
+ goto runtime_put;
/*
* In hardware scheduling mode NPU already has stopped processing jobs
* and won't send us any further notifications, thus we have to free job related resources
@@ -1049,6 +1149,5 @@ void ivpu_context_abort_work_fn(struct work_struct *work)
mutex_unlock(&vdev->submitted_jobs_lock);
runtime_put:
- pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
index 2e301c2eea7b..3ab61e6a5616 100644
--- a/drivers/accel/ivpu/ivpu_job.h
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_JOB_H__
@@ -15,12 +15,17 @@ struct ivpu_device;
struct ivpu_file_priv;
/**
- * struct ivpu_cmdq - Object representing device queue used to send jobs.
- * @jobq: Pointer to job queue memory shared with the device
- * @mem: Memory allocated for the job queue, shared with device
- * @entry_count Number of job entries in the queue
- * @db_id: Doorbell assigned to this job queue
- * @db_registered: True if doorbell is registered in device
+ * struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU.
+ * Tracks queue memory, preemption buffers, and metadata for job management.
+ * @jobq: Pointer to job queue memory shared with the device
+ * @primary_preempt_buf: Primary preemption buffer for this queue (optional)
+ * @secondary_preempt_buf: Secondary preemption buffer for this queue (optional)
+ * @mem: Memory allocated for the job queue, shared with device
+ * @entry_count: Number of job entries in the queue
+ * @id: Unique command queue ID
+ * @db_id: Doorbell ID assigned to this job queue
+ * @priority: Priority level of the command queue
+ * @is_legacy: True if this is a legacy command queue
*/
struct ivpu_cmdq {
struct vpu_job_queue *jobq;
@@ -35,16 +40,22 @@ struct ivpu_cmdq {
};
/**
- * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer.
- * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW.
- * This is a unit of execution, and be tracked by the job_id for
- * any status reporting from VPU FW through IPC JOB RET/DONE message.
- * @file_priv: The client that submitted this job
- * @job_id: Job ID for KMD tracking and job status reporting from VPU FW
- * @status: Status of the Job from IPC JOB RET/DONE message
- * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job
- * @submit_status_offset: Offset within batch buffer where job completion handler
- will update the job status
+ * struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU.
+ * Each job is a unit of execution, tracked by job_id for status reporting from VPU FW.
+ * The structure holds all resources and metadata needed for job submission, execution,
+ * and completion handling.
+ * @vdev: Pointer to the VPU device
+ * @file_priv: The client context that submitted this job
+ * @done_fence: Fence signaled when job completes
+ * @cmd_buf_vpu_addr: VPU address of the command buffer for this job
+ * @cmdq_id: Command queue ID used for submission
+ * @job_id: Unique job ID for tracking and status reporting
+ * @engine_idx: Engine index for job execution
+ * @job_status: Status reported by firmware for this job
+ * @primary_preempt_buf: Primary preemption buffer for job
+ * @secondary_preempt_buf: Secondary preemption buffer for job (optional)
+ * @bo_count: Number of buffer objects associated with this job
+ * @bos: Array of buffer objects used by the job (batch buffer is at index 0)
*/
struct ivpu_job {
struct ivpu_device *vdev;
@@ -54,6 +65,9 @@ struct ivpu_job {
u32 cmdq_id;
u32 job_id;
u32 engine_idx;
+ u32 job_status;
+ struct ivpu_bo *primary_preempt_buf;
+ struct ivpu_bo *secondary_preempt_buf;
size_t bo_count;
struct ivpu_bo *bos[] __counted_by(bo_count);
};
@@ -71,6 +85,7 @@ void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id)
void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
void ivpu_job_done_consumer_fini(struct ivpu_device *vdev);
+bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status);
void ivpu_context_abort_work_fn(struct work_struct *work);
void ivpu_jobs_abort_all(struct ivpu_device *vdev);
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
index 5ea010568faa..e1baf6b64935 100644
--- a/drivers/accel/ivpu/ivpu_mmu.c
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -970,7 +970,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
}
}
- queue_work(system_wq, &vdev->context_abort_work);
+ queue_work(system_percpu_wq, &vdev->context_abort_work);
}
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
index f0267efa55aa..87ad593ef47d 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.c
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -430,7 +430,7 @@ static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_a
int
ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only)
{
size_t start_vpu_addr = vpu_addr;
struct scatterlist *sg;
@@ -450,6 +450,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
prot = IVPU_MMU_ENTRY_MAPPED;
if (llc_coherent)
prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
+ if (read_only)
+ prot |= IVPU_MMU_ENTRY_FLAG_RO;
mutex_lock(&ctx->lock);
@@ -527,7 +529,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
if (ret)
- ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n",
+ ctx->id, ret);
}
int
@@ -568,7 +571,7 @@ void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
mutex_init(&ctx->lock);
if (!context_id) {
- start = vdev->hw->ranges.global.start;
+ start = vdev->hw->ranges.runtime.start;
end = vdev->hw->ranges.shave.end;
} else {
start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start);
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
index f255310968cf..663a11a9db11 100644
--- a/drivers/accel/ivpu/ivpu_mmu_context.h
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -42,7 +42,7 @@ int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
- u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only);
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
u64 vpu_addr, struct sg_table *sgt);
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c
index 2a043baf10ca..1d9c1cb17924 100644
--- a/drivers/accel/ivpu/ivpu_ms.c
+++ b/drivers/accel/ivpu/ivpu_ms.c
@@ -8,6 +8,7 @@
#include "ivpu_drv.h"
#include "ivpu_gem.h"
+#include "ivpu_hw.h"
#include "ivpu_jsm_msg.h"
#include "ivpu_ms.h"
#include "ivpu_pm.h"
@@ -37,8 +38,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
struct drm_ivpu_metric_streamer_start *args = data;
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_ms_instance *ms;
- u64 single_buff_size;
u32 sample_size;
+ u64 buf_size;
int ret;
if (!args->metric_group_mask || !args->read_period_samples ||
@@ -52,7 +53,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
mutex_lock(&file_priv->ms_lock);
if (get_instance_by_mask(file_priv, args->metric_group_mask)) {
- ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask);
+ ivpu_dbg(vdev, IOCTL, "Instance already exists (mask %#llx)\n",
+ args->metric_group_mask);
ret = -EALREADY;
goto unlock;
}
@@ -69,12 +71,18 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
if (ret)
goto err_free_ms;
- single_buff_size = sample_size *
- ((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER);
- ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS),
- DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
+ buf_size = PAGE_ALIGN((u64)args->read_period_samples * sample_size *
+ MS_READ_PERIOD_MULTIPLIER * MS_NUM_BUFFERS);
+ if (buf_size > ivpu_hw_range_size(&vdev->hw->ranges.global)) {
+ ivpu_dbg(vdev, IOCTL, "Requested MS buffer size %llu exceeds range size %llu\n",
+ buf_size, ivpu_hw_range_size(&vdev->hw->ranges.global));
+ ret = -EINVAL;
+ goto err_free_ms;
+ }
+
+ ms->bo = ivpu_bo_create_global(vdev, buf_size, DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE);
if (!ms->bo) {
- ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size);
+ ivpu_dbg(vdev, IOCTL, "Failed to allocate MS buffer (size %llu)\n", buf_size);
ret = -ENOMEM;
goto err_free_ms;
}
@@ -175,7 +183,8 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file *
ms = get_instance_by_mask(file_priv, args->metric_group_mask);
if (!ms) {
- ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask);
+ ivpu_dbg(vdev, IOCTL, "Instance doesn't exist for mask: %#llx\n",
+ args->metric_group_mask);
ret = -EINVAL;
goto unlock;
}
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 475ddc94f1cf..480c075d87f6 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -54,7 +54,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
{
struct ivpu_fw_info *fw = vdev->fw;
- struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem);
+ struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp);
if (!bp->save_restore_ret_address) {
ivpu_pm_prepare_cold_boot(vdev);
@@ -186,7 +186,7 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
ivpu_hw_diagnose_failure(vdev);
ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
- queue_work(system_unbound_wq, &vdev->pm->recovery_work);
+ queue_work(system_dfl_wq, &vdev->pm->recovery_work);
}
}
@@ -226,7 +226,8 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
/* No-op if already queued */
- queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));
+ queue_delayed_work(system_percpu_wq, &vdev->pm->job_timeout_work,
+ msecs_to_jiffies(timeout_ms));
}
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
@@ -359,7 +360,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
void ivpu_rpm_put(struct ivpu_device *vdev)
{
- pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
@@ -428,7 +428,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev)
struct device *dev = vdev->drm.dev;
pm_runtime_allow(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
@@ -502,6 +501,11 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work)
else
ret = ivpu_pm_dct_disable(vdev);
- if (!ret)
- ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent);
+ if (!ret) {
+ /* Convert percent to U1.7 format */
+ u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100);
+
+ ivpu_hw_btrs_dct_set_status(vdev, enable, val);
+ }
+
}
diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c
index 268ab7744a8b..d250a10caca9 100644
--- a/drivers/accel/ivpu/ivpu_sysfs.c
+++ b/drivers/accel/ivpu/ivpu_sysfs.c
@@ -63,7 +63,8 @@ npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, c
mutex_lock(&vdev->bo_list_lock);
list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
- total_npu_memory += bo->base.base.size;
+ if (ivpu_bo_is_resident(bo))
+ total_npu_memory += ivpu_bo_size(bo);
mutex_unlock(&vdev->bo_list_lock);
return sysfs_emit(buf, "%lld\n", total_npu_memory);
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
index 4b6b2b3d2583..bca6a44dc041 100644
--- a/drivers/accel/ivpu/vpu_jsm_api.h
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -1,15 +1,16 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright (c) 2020-2024, Intel Corporation.
+ * Copyright (c) 2020-2025, Intel Corporation.
+ */
+
+/**
+ * @addtogroup Jsm
+ * @{
*/
/**
* @file
* @brief JSM shared definitions
- *
- * @ingroup Jsm
- * @brief JSM shared definitions
- * @{
*/
#ifndef VPU_JSM_API_H
#define VPU_JSM_API_H
@@ -22,7 +23,7 @@
/*
* Minor version changes when API backward compatibility is preserved.
*/
-#define VPU_JSM_API_VER_MINOR 29
+#define VPU_JSM_API_VER_MINOR 33
/*
* API header changed (field names, documentation, formatting) but API itself has not been changed
@@ -71,9 +72,15 @@
#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU
#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU
#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
-/* Job status returned when the job was preempted mid-inference */
+/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */
#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+/* Job status returned when the job was preempted mid-command */
+#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU
+/* Range of status codes that require engine reset */
+#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN 0xEU
#define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU
+#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU
+#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX 0x1FU
/*
* Host <-> VPU IPC channels.
@@ -134,11 +141,21 @@ enum {
* 2. Native fence queues are only supported on VPU 40xx onwards.
*/
VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U),
-
/*
* Enable turbo mode for testing NPU performance; not recommended for regular usage.
*/
- VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U)
+ VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U),
+ /*
+ * Queue error detection mode flag
+ * For 'interactive' queues (this bit not set), the FW will identify queues that have not
+ * completed a job inside the TDR timeout as in error as part of engine reset sequence.
+ * For 'non-interactive' queues (this bit set), the FW will identify queues that have not
+ * progressed the heartbeat inside the non-interactive no-progress timeout as in error as
+ * part of engine reset sequence. Additionally, there is an upper limit applied to these
+ * queues: even if they progress the heartbeat, if they run longer than non-interactive
+ * timeout, then the FW will also identify them as in error.
+ */
+ VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U)
};
/*
@@ -209,7 +226,7 @@ enum {
*/
#define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2
-/*
+/**
* Job scheduling priority bands for both hardware scheduling and OS scheduling.
*/
enum vpu_job_scheduling_priority_band {
@@ -220,16 +237,16 @@ enum vpu_job_scheduling_priority_band {
VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4,
};
-/*
+/**
* Job format.
* Jobs defines the actual workloads to be executed by a given engine.
*/
struct vpu_job_queue_entry {
- /**< Address of VPU commands batch buffer */
+ /** Address of VPU commands batch buffer */
u64 batch_buf_addr;
- /**< Job ID */
+ /** Job ID */
u32 job_id;
- /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+ /** Flags bit field, see VPU_JOB_FLAGS_* above */
u32 flags;
/**
* Doorbell ring timestamp taken by KMD from SoC's global system clock, in
@@ -237,20 +254,20 @@ struct vpu_job_queue_entry {
* to match other profiling timestamps.
*/
u64 doorbell_timestamp;
- /**< Extra id for job tracking, used only in the firmware perf traces */
+ /** Extra id for job tracking, used only in the firmware perf traces */
u64 host_tracking_id;
- /**< Address of the primary preemption buffer to use for this job */
+ /** Address of the primary preemption buffer to use for this job */
u64 primary_preempt_buf_addr;
- /**< Size of the primary preemption buffer to use for this job */
+ /** Size of the primary preemption buffer to use for this job */
u32 primary_preempt_buf_size;
- /**< Size of secondary preemption buffer to use for this job */
+ /** Size of secondary preemption buffer to use for this job */
u32 secondary_preempt_buf_size;
- /**< Address of secondary preemption buffer to use for this job */
+ /** Address of secondary preemption buffer to use for this job */
u64 secondary_preempt_buf_addr;
u64 reserved_0;
};
-/*
+/**
* Inline command format.
* Inline commands are the commands executed at scheduler level (typically,
* synchronization directives). Inline command and job objects must be of
@@ -258,34 +275,36 @@ struct vpu_job_queue_entry {
*/
struct vpu_inline_cmd {
u64 reserved_0;
- /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
+ /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */
u32 type;
- /* Flags bit field, see VPU_JOB_FLAGS_* above. */
+ /** Flags bit field, see VPU_JOB_FLAGS_* above. */
u32 flags;
- /* Inline command payload. Depends on inline command type. */
- union {
- /* Fence (wait and signal) commands' payload. */
- struct {
- /* Fence object handle. */
+ /** Inline command payload. Depends on inline command type. */
+ union payload {
+ /** Fence (wait and signal) commands' payload. */
+ struct fence {
+ /** Fence object handle. */
u64 fence_handle;
- /* User VA of the current fence value. */
+ /** User VA of the current fence value. */
u64 current_value_va;
- /* User VA of the monitored fence value (read-only). */
+ /** User VA of the monitored fence value (read-only). */
u64 monitored_value_va;
- /* Value to wait for or write in fence location. */
+ /** Value to wait for or write in fence location. */
u64 value;
- /* User VA of the log buffer in which to add log entry on completion. */
+ /** User VA of the log buffer in which to add log entry on completion. */
u64 log_buffer_va;
- /* NPU private data. */
+ /** NPU private data. */
u64 npu_private_data;
} fence;
- /* Other commands do not have a payload. */
- /* Payload definition for future inline commands can be inserted here. */
+ /**
+ * Other commands do not have a payload:
+ * Payload definition for future inline commands can be inserted here.
+ */
u64 reserved_1[6];
} payload;
};
-/*
+/**
* Job queue slots can be populated either with job objects or inline command objects.
*/
union vpu_jobq_slot {
@@ -293,7 +312,7 @@ union vpu_jobq_slot {
struct vpu_inline_cmd inline_cmd;
};
-/*
+/**
* Job queue control registers.
*/
struct vpu_job_queue_header {
@@ -301,18 +320,18 @@ struct vpu_job_queue_header {
u32 head;
u32 tail;
u32 flags;
- /* Set to 1 to indicate priority_band field is valid */
+ /** Set to 1 to indicate priority_band field is valid */
u32 priority_band_valid;
- /*
+ /**
* Priority for the work of this job queue, valid only if the HWS is NOT used
- * and the `priority_band_valid` is set to 1. It is applied only during
- * the VPU_JSM_MSG_REGISTER_DB message processing.
- * The device firmware might use the `priority_band` to optimize the power
+ * and the @ref priority_band_valid is set to 1. It is applied only during
+ * the @ref VPU_JSM_MSG_REGISTER_DB message processing.
+ * The device firmware might use the priority_band to optimize the power
* management logic, but it will not affect the order of jobs.
* Available priority bands: @see enum vpu_job_scheduling_priority_band
*/
u32 priority_band;
- /* Inside realtime band assigns a further priority, limited to 0..31 range */
+ /** Inside realtime band assigns a further priority, limited to 0..31 range */
u32 realtime_priority_level;
u32 reserved_0[9];
};
@@ -337,16 +356,16 @@ enum vpu_trace_entity_type {
VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2,
};
-/*
+/**
* HWS specific log buffer header details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_header {
- /* Written by VPU after adding a log entry. Initialised by host to 0. */
+ /** Written by VPU after adding a log entry. Initialised by host to 0. */
u32 first_free_entry_index;
- /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
+ /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */
u32 wraparound_count;
- /*
+ /**
* This is the number of buffers that can be stored in the log buffer provided by the host.
* It is written by host before passing buffer to VPU. VPU should consider it read-only.
*/
@@ -354,14 +373,14 @@ struct vpu_hws_log_buffer_header {
u64 reserved[2];
};
-/*
+/**
* HWS specific log buffer entry details.
* Total size is 32 bytes.
*/
struct vpu_hws_log_buffer_entry {
- /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
+ /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */
u64 vpu_timestamp;
- /*
+ /**
* Operation type:
* 0 - context state change
* 1 - queue new work
@@ -371,7 +390,7 @@ struct vpu_hws_log_buffer_entry {
*/
u32 operation_type;
u32 reserved;
- /* Operation data depends on operation type */
+ /** Operation data depends on operation type */
u64 operation_data[2];
};
@@ -381,51 +400,54 @@ enum vpu_hws_native_fence_log_type {
VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2
};
-/* HWS native fence log buffer header. */
+/** HWS native fence log buffer header. */
struct vpu_hws_native_fence_log_header {
union {
struct {
- /* Index of the first free entry in buffer. */
+ /** Index of the first free entry in buffer. */
u32 first_free_entry_idx;
- /* Incremented each time NPU wraps around the buffer to write next entry. */
+ /**
+ * Incremented whenever the NPU wraps around the buffer and writes
+ * to the first entry again.
+ */
u32 wraparound_count;
};
- /* Field allowing atomic update of both fields above. */
+ /** Field allowing atomic update of both fields above. */
u64 atomic_wraparound_and_entry_idx;
};
- /* Log buffer type, see enum vpu_hws_native_fence_log_type. */
+ /** Log buffer type, see enum vpu_hws_native_fence_log_type. */
u64 type;
- /* Allocated number of entries in the log buffer. */
+ /** Allocated number of entries in the log buffer. */
u64 entry_nb;
u64 reserved[2];
};
-/* Native fence log operation types. */
+/** Native fence log operation types. */
enum vpu_hws_native_fence_log_op {
VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0,
VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1
};
-/* HWS native fence log entry. */
+/** HWS native fence log entry. */
struct vpu_hws_native_fence_log_entry {
- /* Newly signaled/unblocked fence value. */
+ /** Newly signaled/unblocked fence value. */
u64 fence_value;
- /* Native fence object handle to which this operation belongs. */
+ /** Native fence object handle to which this operation belongs. */
u64 fence_handle;
- /* Operation type, see enum vpu_hws_native_fence_log_op. */
+ /** Operation type, see enum vpu_hws_native_fence_log_op. */
u64 op_type;
u64 reserved_0;
- /*
+ /**
* VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence
* wait was started (in NPU SysTime).
*/
u64 fence_wait_start_ts;
u64 reserved_1;
- /* Timestamp at which fence operation was completed (in NPU SysTime). */
+ /** Timestamp at which fence operation was completed (in NPU SysTime). */
u64 fence_end_ts;
};
-/* Native fence log buffer. */
+/** Native fence log buffer. */
struct vpu_hws_native_fence_log_buffer {
struct vpu_hws_native_fence_log_header header;
struct vpu_hws_native_fence_log_entry entry[];
@@ -435,10 +457,17 @@ struct vpu_hws_native_fence_log_buffer {
* Host <-> VPU IPC messages types.
*/
enum vpu_ipc_msg_type {
+ /** Unsupported command */
VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
- /* IPC Host -> Device, Async commands */
+ /** IPC Host -> Device, base id for async commands */
VPU_JSM_MSG_ASYNC_CMD = 0x1100,
+ /**
+ * Reset engine. The NPU cancels all the jobs currently executing on the target
+ * engine making the engine become idle and then does a HW reset, before returning
+ * to the host.
+ * @see struct vpu_ipc_msg_payload_engine_reset
+ */
VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
/**
* Preempt engine. The NPU stops (preempts) all the jobs currently
@@ -448,10 +477,24 @@ enum vpu_ipc_msg_type {
* the target engine, but it stops processing them (until the queue doorbell
* is rung again); the host is responsible to reset the job queue, either
* after preemption or when resubmitting jobs to the queue.
+ * @see vpu_ipc_msg_payload_engine_preempt
*/
VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
+ /**
+ * OS scheduling doorbell register command
+ * @see vpu_ipc_msg_payload_register_db
+ */
VPU_JSM_MSG_REGISTER_DB = 0x1102,
+ /**
+ * OS scheduling doorbell unregister command
+ * @see vpu_ipc_msg_payload_unregister_db
+ */
VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
+ /**
+ * Query engine heartbeat. Heartbeat is expected to increase monotonically
+ * and increase while work is being progressed by NPU.
+ * @see vpu_ipc_msg_payload_query_engine_hb
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105,
VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106,
@@ -477,6 +520,7 @@ enum vpu_ipc_msg_type {
* aborted and removed from internal scheduling queues. All doorbells assigned
* to the host_ssid are unregistered and any internal FW resources belonging to
* the host_ssid are released.
+ * @see vpu_ipc_msg_payload_ssid_release
*/
VPU_JSM_MSG_SSID_RELEASE = 0x110e,
/**
@@ -504,43 +548,78 @@ enum vpu_ipc_msg_type {
* @see vpu_jsm_metric_streamer_start
*/
VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112,
- /** Control command: Priority band setup */
+ /**
+ * Control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113,
- /** Control command: Create command queue */
+ /**
+ * Control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114,
- /** Control command: Destroy command queue */
+ /**
+ * Control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115,
- /** Control command: Set context scheduling properties */
+ /**
+ * Control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116,
- /*
+ /**
* Register a doorbell to notify VPU of new work. The doorbell may later be
* deallocated or reassigned to another context.
+ * @see vpu_jsm_hws_register_db
*/
VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
- /** Control command: Log buffer setting */
+ /**
+ * Control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118,
- /* Control command: Suspend command queue. */
+ /**
+ * Control command: Suspend command queue.
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119,
- /* Control command: Resume command queue */
+ /**
+ * Control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a,
- /* Control command: Resume engine after reset */
+ /**
+ * Control command: Resume engine after reset
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b,
- /* Control command: Enable survivability/DCT mode */
+ /**
+ * Control command: Enable survivability/DCT mode
+ * @see vpu_ipc_msg_payload_pwr_dct_control
+ */
VPU_JSM_MSG_DCT_ENABLE = 0x111c,
- /* Control command: Disable survivability/DCT mode */
+ /**
+ * Control command: Disable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_DISABLE = 0x111d,
/**
* Dump VPU state. To be used for debug purposes only.
- * NOTE: Please introduce new ASYNC commands before this one. *
+ * This command has no payload.
+ * NOTE: Please introduce new ASYNC commands before this one.
*/
VPU_JSM_MSG_STATE_DUMP = 0x11FF,
- /* IPC Host -> Device, General commands */
+ /** IPC Host -> Device, base id for general commands */
VPU_JSM_MSG_GENERAL_CMD = 0x1200,
+ /** Unsupported command */
VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD,
/**
* Control dyndbg behavior by executing a dyndbg command; equivalent to
- * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
+ * Linux command:
+ * @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim
+ * @see vpu_ipc_msg_payload_dyndbg_control
*/
VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
/**
@@ -548,17 +627,35 @@ enum vpu_ipc_msg_type {
*/
VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202,
- /* IPC Device -> Host, Job completion */
+ /**
+ * IPC Device -> Host, Job completion
+ * @see struct vpu_ipc_msg_payload_job_done
+ */
VPU_JSM_MSG_JOB_DONE = 0x2100,
- /* IPC Device -> Host, Fence signalled */
+ /**
+ * IPC Device -> Host, Fence signalled
+ * @see vpu_ipc_msg_payload_native_fence_signalled
+ */
VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101,
/* IPC Device -> Host, Async command completion */
VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
+ /**
+ * IPC Device -> Host, engine reset complete
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
+ /**
+ * Preempt complete message
+ * @see vpu_ipc_msg_payload_engine_preempt_done
+ */
VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201,
VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202,
VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203,
+ /**
+ * Response to query engine heartbeat.
+ * @see vpu_ipc_msg_payload_query_engine_hb_done
+ */
VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204,
VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205,
VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206,
@@ -575,7 +672,10 @@ enum vpu_ipc_msg_type {
VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c,
/** Response to VPU_JSM_MSG_TRACE_GET_NAME. */
VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d,
- /** Response to VPU_JSM_MSG_SSID_RELEASE. */
+ /**
+ * Response to VPU_JSM_MSG_SSID_RELEASE.
+ * @see vpu_ipc_msg_payload_ssid_release
+ */
VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e,
/**
* Response to VPU_JSM_MSG_METRIC_STREAMER_START.
@@ -605,37 +705,71 @@ enum vpu_ipc_msg_type {
/**
* Asynchronous event sent from the VPU to the host either when the current
* metric buffer is full or when the VPU has collected a multiple of
- * @notify_sample_count samples as indicated through the start command
- * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected
- * metric data.
+ * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated
+ * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns
+ * information about collected metric data.
* @see vpu_jsm_metric_streamer_done
*/
VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213,
- /** Response to control command: Priority band setup */
+ /**
+ * Response to control command: Priority band setup
+ * @see vpu_ipc_msg_payload_hws_priority_band_setup
+ */
VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214,
- /** Response to control command: Create command queue */
+ /**
+ * Response to control command: Create command queue
+ * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp
+ */
VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215,
- /** Response to control command: Destroy command queue */
+ /**
+ * Response to control command: Destroy command queue
+ * @see vpu_ipc_msg_payload_hws_destroy_cmdq
+ */
VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
- /** Response to control command: Set context scheduling properties */
+ /**
+ * Response to control command: Set context scheduling properties
+ * @see vpu_ipc_msg_payload_hws_set_context_sched_properties
+ */
VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
- /** Response to control command: Log buffer setting */
+ /**
+ * Response to control command: Log buffer setting
+ * @see vpu_ipc_msg_payload_hws_set_scheduling_log
+ */
VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218,
- /* IPC Device -> Host, HWS notify index entry of log buffer written */
+ /**
+ * IPC Device -> Host, HWS notify index entry of log buffer written
+ * @see vpu_ipc_msg_payload_hws_scheduling_log_notification
+ */
VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219,
- /* IPC Device -> Host, HWS completion of a context suspend request */
+ /**
+ * IPC Device -> Host, HWS completion of a context suspend request
+ * @see vpu_ipc_msg_payload_hws_suspend_cmdq
+ */
VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a,
- /* Response to control command: Resume command queue */
+ /**
+ * Response to control command: Resume command queue
+ * @see vpu_ipc_msg_payload_hws_resume_cmdq
+ */
VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b,
- /* Response to control command: Resume engine command response */
+ /**
+ * Response to control command: Resume engine command response
+ * @see vpu_ipc_msg_payload_hws_resume_engine
+ */
VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c,
- /* Response to control command: Enable survivability/DCT mode */
+ /**
+ * Response to control command: Enable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d,
- /* Response to control command: Disable survivability/DCT mode */
+ /**
+ * Response to control command: Disable survivability/DCT mode
+ * This command has no payload
+ */
VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e,
/**
* Response to state dump control command.
- * NOTE: Please introduce new ASYNC responses before this one. *
+ * This command has no payload.
+ * NOTE: Please introduce new ASYNC responses before this one.
*/
VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF,
@@ -653,57 +787,66 @@ enum vpu_ipc_msg_type {
enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
-/*
- * Host <-> LRT IPC message payload definitions
+/**
+ * Engine reset request payload
+ * @see VPU_JSM_MSG_ENGINE_RESET
*/
struct vpu_ipc_msg_payload_engine_reset {
- /* Engine to be reset. */
+ /** Engine to be reset. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Engine preemption request struct
+ * @see VPU_JSM_MSG_ENGINE_PREEMPT
+ */
struct vpu_ipc_msg_payload_engine_preempt {
- /* Engine to be preempted. */
+ /** Engine to be preempted. */
u32 engine_idx;
- /* ID of the preemption request. */
+ /** ID of the preemption request. */
u32 preempt_id;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for only OS scheduling.
* @see VPU_JSM_MSG_REGISTER_DB
*/
struct vpu_ipc_msg_payload_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Virtual address in Global GTT pointing to the start of job queue. */
+ /** Virtual address in Global GTT pointing to the start of job queue. */
u64 jobq_base;
- /* Size of the job queue in bytes. */
+ /** Size of the job queue in bytes. */
u32 jobq_size;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
};
/**
- * @brief Unregister doorbell command structure.
+ * Unregister doorbell command structure.
* Request structure to unregister a doorbell for both HW and OS scheduling.
* @see VPU_JSM_MSG_UNREGISTER_DB
*/
struct vpu_ipc_msg_payload_unregister_db {
- /* Index of the doorbell to unregister. */
+ /** Index of the doorbell to unregister. */
u32 db_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
+/**
+ * Heartbeat request structure
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB
+ */
struct vpu_ipc_msg_payload_query_engine_hb {
- /* Engine to return heartbeat value. */
+ /** Engine to return heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -723,10 +866,14 @@ struct vpu_ipc_msg_payload_power_level {
u32 reserved_0;
};
+/**
+ * Structure for requesting ssid release
+ * @see VPU_JSM_MSG_SSID_RELEASE
+ */
struct vpu_ipc_msg_payload_ssid_release {
- /* Host sub-stream ID for the context to be released. */
+ /** Host sub-stream ID for the context to be released. */
u32 host_ssid;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -752,7 +899,7 @@ struct vpu_jsm_metric_streamer_start {
u64 sampling_rate;
/**
* If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message
- * after every @notify_sample_count samples is collected or dropped by the VPU.
+ * after every @ref notify_sample_count samples is collected or dropped by the VPU.
* If set to UINT_MAX the VPU will only generate a notification when the metric
* buffer is full. If set to 0 the VPU will never generate a notification.
*/
@@ -762,9 +909,9 @@ struct vpu_jsm_metric_streamer_start {
* Address and size of the buffer where the VPU will write metric data. The
* VPU writes all counters from enabled metric groups one after another. If
* there is no space left to write data at the next sample period the VPU
- * will switch to the next buffer (@see next_buffer_addr) and will optionally
- * send a notification to the host driver if @notify_sample_count is non-zero.
- * If @next_buffer_addr is NULL the VPU will stop collecting metric data.
+ * will switch to the next buffer (@ref next_buffer_addr) and will optionally
+ * send a notification to the host driver if @ref notify_sample_count is non-zero.
+ * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data.
*/
u64 buffer_addr;
u64 buffer_size;
@@ -827,63 +974,80 @@ struct vpu_jsm_metric_streamer_update {
u64 next_buffer_size;
};
+/**
+ * Device -> host job completion message.
+ * @see VPU_JSM_MSG_JOB_DONE
+ */
struct vpu_ipc_msg_payload_job_done {
- /* Engine to which the job was submitted. */
+ /** Engine to which the job was submitted. */
u32 engine_idx;
- /* Index of the doorbell to which the job was submitted */
+ /** Index of the doorbell to which the job was submitted */
u32 db_idx;
- /* ID of the completed job */
+ /** ID of the completed job */
u32 job_id;
- /* Status of the completed job */
+ /** Status of the completed job */
u32 job_status;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/*
+/**
* Notification message upon native fence signalling.
* @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED
*/
struct vpu_ipc_msg_payload_native_fence_signalled {
- /* Engine ID. */
+ /** Engine ID. */
u32 engine_idx;
- /* Host SSID. */
+ /** Host SSID. */
u32 host_ssid;
- /* CMDQ ID */
+ /** CMDQ ID */
u64 cmdq_id;
- /* Fence object handle. */
+ /** Fence object handle. */
u64 fence_handle;
};
+/**
+ * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure
+ * which contains which queues caused reset if FW was able to detect any error.
+ * @see vpu_ipc_msg_payload_engine_reset_done
+ */
struct vpu_jsm_engine_reset_context {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /* See VPU_ENGINE_RESET_CONTEXT_* defines */
+ /** See VPU_ENGINE_RESET_CONTEXT_* defines */
u64 flags;
};
+/**
+ * Engine reset response.
+ * @see VPU_JSM_MSG_ENGINE_RESET_DONE
+ */
struct vpu_ipc_msg_payload_engine_reset_done {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Number of impacted contexts */
+ /** Number of impacted contexts */
u32 num_impacted_contexts;
- /* Array of impacted command queue ids and their flags */
+ /** Array of impacted command queue ids and their flags */
struct vpu_jsm_engine_reset_context
impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS];
};
+/**
+ * Preemption response struct
+ * @see VPU_JSM_MSG_ENGINE_PREEMPT_DONE
+ */
struct vpu_ipc_msg_payload_engine_preempt_done {
- /* Engine preempted. */
+ /** Engine preempted. */
u32 engine_idx;
- /* ID of the preemption request. */
+ /** ID of the preemption request. */
u32 preempt_id;
};
@@ -912,12 +1076,16 @@ struct vpu_ipc_msg_payload_unregister_db_done {
u32 reserved_0;
};
+/**
+ * Structure for heartbeat response
+ * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE
+ */
struct vpu_ipc_msg_payload_query_engine_hb_done {
- /* Engine returning heartbeat value. */
+ /** Engine returning heartbeat value. */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
- /* Heartbeat value. */
+ /** Heartbeat value. */
u64 heartbeat;
};
@@ -937,7 +1105,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done {
u8 power_limit[16];
};
-/* HWS priority band setup request / response */
+/**
+ * HWS priority band setup request / response
+ * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP
+ */
struct vpu_ipc_msg_payload_hws_priority_band_setup {
/*
* Grace period in 100ns units when preempting another priority band for
@@ -964,15 +1135,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup {
* TDR timeout value in milliseconds. Default value of 0 meaning no timeout.
*/
u32 tdr_timeout;
+ /* Non-interactive queue timeout for no progress of heartbeat in milliseconds.
+ * Default value of 0 meaning no timeout.
+ */
+ u32 non_interactive_no_progress_timeout;
+ /*
+ * Non-interactive queue upper limit timeout value in milliseconds. Default
+ * value of 0 meaning no timeout.
+ */
+ u32 non_interactive_timeout;
};
-/*
+/**
* @brief HWS create command queue request.
* Host will create a command queue via this command.
* Note: Cmdq group is a handle of an object which
* may contain one or more command queues.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq {
/* Process id */
@@ -993,66 +1172,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq {
u32 reserved_0;
};
-/*
- * @brief HWS create command queue response.
- * @see VPU_JSM_MSG_CREATE_CMD_QUEUE
+/**
+ * HWS create command queue response.
* @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP
*/
struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
- /* Process id */
+ /** Process id */
u64 process_id;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Engine for which queue is being created */
+ /** Engine for which queue is being created */
u32 engine_idx;
- /* Command queue group */
+ /** Command queue group */
u64 cmdq_group;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS destroy command queue request / response */
+/**
+ * HWS destroy command queue request / response
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE
+ * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP
+ */
struct vpu_ipc_msg_payload_hws_destroy_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/* HWS set context scheduling properties request / response */
+/**
+ * HWS set context scheduling properties request / response
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES
+ * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP
+ */
struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Priority band to assign to work of this context.
* Available priority bands: @see enum vpu_job_scheduling_priority_band
*/
u32 priority_band;
- /* Inside realtime band assigns a further priority */
+ /** Inside realtime band assigns a further priority */
u32 realtime_priority_level;
- /* Priority relative to other contexts in the same process */
+ /** Priority relative to other contexts in the same process */
s32 in_process_priority;
- /* Zero padding / Reserved */
+ /** Zero padding / Reserved */
u32 reserved_1;
- /*
+ /**
* Context quantum relative to other contexts of same priority in the same process
* Minimum value supported by NPU is 1ms (10000 in 100ns units).
*/
u64 context_quantum;
- /* Grace period when preempting context of the same priority within the same process */
+ /** Grace period when preempting context of the same priority within the same process */
u64 grace_period_same_priority;
- /* Grace period when preempting context of a lower priority within the same process */
+ /** Grace period when preempting context of a lower priority within the same process */
u64 grace_period_lower_priority;
};
-/*
- * @brief Register doorbell command structure.
+/**
+ * Register doorbell command structure.
* This structure supports doorbell registration for both HW and OS scheduling.
* Note: Queue base and size are added here so that the same structure can be used for
* OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored
@@ -1061,27 +1247,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
* @see VPU_JSM_MSG_HWS_REGISTER_DB
*/
struct vpu_jsm_hws_register_db {
- /* Index of the doorbell to register. */
+ /** Index of the doorbell to register. */
u32 db_id;
- /* Host sub-stream ID for the context assigned to the doorbell. */
+ /** Host sub-stream ID for the context assigned to the doorbell. */
u32 host_ssid;
- /* ID of the command queue associated with the doorbell. */
+ /** ID of the command queue associated with the doorbell. */
u64 cmdq_id;
- /* Virtual address pointing to the start of command queue. */
+ /** Virtual address pointing to the start of command queue. */
u64 cmdq_base;
- /* Size of the command queue in bytes. */
+ /** Size of the command queue in bytes. */
u64 cmdq_size;
};
-/*
- * @brief Structure to set another buffer to be used for scheduling-related logging.
+/**
+ * Structure to set another buffer to be used for scheduling-related logging.
* The size of the logging buffer and the number of entries is defined as part of the
* buffer itself as described next.
* The log buffer received from the host is made up of;
- * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'.
+ * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header.
* The header contains the number of log entries in the buffer.
* - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in
- * 'struct vpu_hws_log_buffer_entry'.
+ * @ref vpu_hws_log_buffer_entry.
* The entry contains the VPU timestamp, operation type and data.
* The host should provide the notify index value of log buffer to VPU. This is a
* value defined within the log buffer and when written to will generate the
@@ -1095,30 +1281,30 @@ struct vpu_jsm_hws_register_db {
* @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
*/
struct vpu_ipc_msg_payload_hws_set_scheduling_log {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /*
+ /**
* VPU log buffer virtual address.
* Set to 0 to disable logging for this engine.
*/
u64 vpu_log_buffer_va;
- /*
+ /**
* Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION
* is generated when an event log is written to this index.
*/
u64 notify_index;
- /*
+ /**
* Field is now deprecated, will be removed when KMD is updated to support removal
*/
u32 enable_extra_events;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief The scheduling log notification is generated by VPU when it writes
+/**
+ * The scheduling log notification is generated by VPU when it writes
* an event into the log buffer at the notify_index. VPU notifies host with
* VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous
* message from VPU to host.
@@ -1126,14 +1312,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log {
* @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG
*/
struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
- /* Engine ordinal */
+ /** Engine ordinal */
u32 engine_idx;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
};
-/*
- * @brief HWS suspend command queue request and done structure.
+/**
+ * HWS suspend command queue request and done structure.
* Host will request the suspend of contexts and VPU will;
* - Suspend all work on this context
* - Preempt any running work
@@ -1152,21 +1338,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification {
* @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE
*/
struct vpu_ipc_msg_payload_hws_suspend_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
- /*
+ /**
* Suspend fence value - reported by the VPU suspend context
* completed once suspend is complete.
*/
u64 suspend_fence_value;
};
-/*
- * @brief HWS Resume command queue request / response structure.
+/**
+ * HWS Resume command queue request / response structure.
* Host will request the resume of a context;
* - VPU will resume all work on this context
* - Scheduler will allow this context to be scheduled
@@ -1174,25 +1360,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq {
* @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP
*/
struct vpu_ipc_msg_payload_hws_resume_cmdq {
- /* Host SSID */
+ /** Host SSID */
u32 host_ssid;
- /* Zero Padding */
+ /** Zero Padding */
u32 reserved_0;
- /* Command queue id */
+ /** Command queue id */
u64 cmdq_id;
};
-/*
- * @brief HWS Resume engine request / response structure.
- * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume.
+/**
+ * HWS Resume engine request / response structure.
+ * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume.
* Host shall send this command to resume scheduling of any valid queue.
- * @see VPU_JSM_MSG_HWS_RESUME_ENGINE
+ * @see VPU_JSM_MSG_HWS_ENGINE_RESUME
* @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE
*/
struct vpu_ipc_msg_payload_hws_resume_engine {
- /* Engine to be resumed */
+ /** Engine to be resumed */
u32 engine_idx;
- /* Reserved */
+ /** Reserved */
u32 reserved_0;
};
@@ -1326,7 +1512,7 @@ struct vpu_jsm_metric_streamer_done {
/**
* Metric group description placed in the metric buffer after successful completion
* of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
- * @vpu_jsm_metric_counter_descriptor records.
+ * @ref vpu_jsm_metric_counter_descriptor records.
* @see VPU_JSM_MSG_METRIC_STREAMER_INFO
*/
struct vpu_jsm_metric_group_descriptor {
@@ -1413,29 +1599,24 @@ struct vpu_jsm_metric_counter_descriptor {
};
/**
- * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests.
+ * Payload for @ref VPU_JSM_MSG_DYNDBG_CONTROL requests.
*
- * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug
- * feature, which allows developers to selectively enable / disable MVLOG_DEBUG
- * messages. This is equivalent to the Dynamic Debug functionality provided by
- * Linux
- * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html)
- * The host can control Dynamic Debug behavior by sending dyndbg commands, which
- * have the same syntax as Linux
- * dyndbg commands.
+ * VPU_JSM_MSG_DYNDBG_CONTROL requests are used to control the VPU FW dynamic debug
+ * feature, which allows developers to selectively enable/disable code to obtain
+ * additional FW information. This is equivalent to the dynamic debug functionality
+ * provided by Linux. The host can control dynamic debug behavior by sending dyndbg
+ * commands, using the same syntax as for Linux dynamic debug commands.
*
- * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host
- * still has to set the logging level to MVLOG_DEBUG, using the
- * VPU_JSM_MSG_TRACE_SET_CONFIG command.
+ * @see https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html.
*
- * The host can see the current dynamic debug configuration by executing a
- * special 'show' command. The dyndbg configuration will be printed to the
- * configured logging destination using MVLOG_INFO logging level.
+ * NOTE:
+ * As the dynamic debug feature uses MVLOG messages to provide information, the host
+ * must first set the logging level to MVLOG_DEBUG, using the @ref VPU_JSM_MSG_TRACE_SET_CONFIG
+ * command.
*/
struct vpu_ipc_msg_payload_dyndbg_control {
/**
- * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated
- * string.
+ * Dyndbg command to be executed.
*/
char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN];
};
@@ -1456,7 +1637,7 @@ struct vpu_ipc_msg_payload_pwr_d0i3_enter {
};
/**
- * Payload for VPU_JSM_MSG_DCT_ENABLE message.
+ * Payload for @ref VPU_JSM_MSG_DCT_ENABLE message.
*
* Default values for DCT active/inactive times are 5.3ms and 30ms respectively,
* corresponding to a 85% duty cycle. This payload allows the host to tune these
@@ -1513,28 +1694,28 @@ union vpu_ipc_msg_payload {
struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control;
};
-/*
- * Host <-> LRT IPC message base structure.
+/**
+ * Host <-> NPU IPC message base structure.
*
* NOTE: All instances of this object must be aligned on a 64B boundary
* to allow proper handling of VPU cache operations.
*/
struct vpu_jsm_msg {
- /* Reserved */
+ /** Reserved */
u64 reserved_0;
- /* Message type, see vpu_ipc_msg_type enum. */
+ /** Message type, see @ref vpu_ipc_msg_type. */
u32 type;
- /* Buffer status, see vpu_ipc_msg_status enum. */
+ /** Buffer status, see @ref vpu_ipc_msg_status. */
u32 status;
- /*
+ /**
* Request ID, provided by the host in a request message and passed
* back by VPU in the response message.
*/
u32 request_id;
- /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
+ /** Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
u32 result;
u64 reserved_1;
- /* Message payload depending on message type, see vpu_ipc_msg_payload union. */
+ /** Message payload depending on message type, see vpu_ipc_msg_payload union. */
union vpu_ipc_msg_payload payload;
};
diff --git a/drivers/accel/qaic/Kconfig b/drivers/accel/qaic/Kconfig
index 5e405a19c157..116e42d152ca 100644
--- a/drivers/accel/qaic/Kconfig
+++ b/drivers/accel/qaic/Kconfig
@@ -9,6 +9,7 @@ config DRM_ACCEL_QAIC
depends on PCI && HAS_IOMEM
depends on MHI_BUS
select CRC32
+ select WANT_DEV_COREDUMP
help
Enables driver for Qualcomm's Cloud AI accelerator PCIe cards that are
designed to accelerate Deep Learning inference workloads.
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 1106b876f737..71f727b74da3 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -11,6 +11,8 @@ qaic-y := \
qaic_data.o \
qaic_drv.o \
qaic_ras.o \
+ qaic_ssr.o \
+ qaic_sysfs.o \
qaic_timesync.o \
sahara.o
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index c31081e42cee..fa7a8155658c 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -21,6 +21,7 @@
#define QAIC_DBC_BASE SZ_128K
#define QAIC_DBC_SIZE SZ_4K
+#define QAIC_SSR_DBC_SENTINEL U32_MAX /* No ongoing SSR sentinel */
#define QAIC_NO_PARTITION -1
@@ -47,6 +48,22 @@ enum __packed dev_states {
QAIC_ONLINE,
};
+enum dbc_states {
+ /* DBC is free and can be activated */
+ DBC_STATE_IDLE,
+ /* DBC is activated and a workload is running on device */
+ DBC_STATE_ASSIGNED,
+ /* Sub-system associated with this workload has crashed and it will shutdown soon */
+ DBC_STATE_BEFORE_SHUTDOWN,
+ /* Sub-system associated with this workload has crashed and it has shutdown */
+ DBC_STATE_AFTER_SHUTDOWN,
+ /* Sub-system associated with this workload is shutdown and it will be powered up soon */
+ DBC_STATE_BEFORE_POWER_UP,
+ /* Sub-system associated with this workload is now powered up */
+ DBC_STATE_AFTER_POWER_UP,
+ DBC_STATE_MAX,
+};
+
extern bool datapath_polling;
struct qaic_user {
@@ -97,6 +114,8 @@ struct dma_bridge_chan {
* response queue's head and tail pointer of this DBC.
*/
void __iomem *dbc_base;
+ /* Synchronizes access to Request queue's head and tail pointer */
+ struct mutex req_lock;
/* Head of list where each node is a memory handle queued in request queue */
struct list_head xfer_list;
/* Synchronizes DBC readers during cleanup */
@@ -112,6 +131,8 @@ struct dma_bridge_chan {
unsigned int irq;
/* Polling work item to simulate interrupts */
struct work_struct poll_work;
+ /* Represents various states of this DBC from enum dbc_states */
+ unsigned int state;
};
struct qaic_device {
@@ -159,6 +180,8 @@ struct qaic_device {
struct mhi_device *qts_ch;
/* Work queue for tasks related to MHI "QAIC_TIMESYNC" channel */
struct workqueue_struct *qts_wq;
+ /* MHI "QAIC_TIMESYNC_PERIODIC" channel device */
+ struct mhi_device *mqts_ch;
/* Head of list of page allocated by MHI bootlog device */
struct list_head bootlog;
/* MHI bootlog channel device */
@@ -175,6 +198,14 @@ struct qaic_device {
unsigned int ue_count;
/* Un-correctable non-fatal error count */
unsigned int ue_nf_count;
+ /* MHI SSR channel device */
+ struct mhi_device *ssr_ch;
+ /* Work queue for tasks related to MHI SSR device */
+ struct workqueue_struct *ssr_wq;
+ /* Buffer to collect SSR crashdump via SSR MHI channel */
+ void *ssr_mhi_buf;
+ /* DBC which is under SSR. Sentinel U32_MAX would mean that no SSR in progress */
+ u32 ssr_dbc;
};
struct qaic_drm_device {
@@ -193,6 +224,8 @@ struct qaic_drm_device {
struct list_head users;
/* Synchronizes access to users list */
struct mutex users_mutex;
+ /* Pointer to array of DBC sysfs attributes */
+ void *sysfs_attrs;
};
struct qaic_bo {
@@ -315,6 +348,13 @@ int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm
int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
-void irq_polling_work(struct work_struct *work);
+void qaic_irq_polling_work(struct work_struct *work);
+void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id);
+void qaic_dbc_exit_ssr(struct qaic_device *qdev);
+
+/* qaic_sysfs.c */
+int qaic_sysfs_init(struct qaic_drm_device *qddev);
+void qaic_sysfs_remove(struct qaic_drm_device *qddev);
+void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state);
#endif /* _QAIC_H_ */
diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
index d8bdab69f800..428d8f65bff3 100644
--- a/drivers/accel/qaic/qaic_control.c
+++ b/drivers/accel/qaic/qaic_control.c
@@ -17,6 +17,7 @@
#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
+#include <linux/sched/signal.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
@@ -30,7 +31,7 @@
#define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */
#define QAIC_DBC_Q_GAP SZ_256
#define QAIC_DBC_Q_BUF_ALIGN SZ_4K
-#define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */
+#define QAIC_MANAGE_WIRE_MSG_LENGTH SZ_64K /* Max DMA message length */
#define QAIC_WRAPPER_MAX_SIZE SZ_4K
#define QAIC_MHI_RETRY_WAIT_MS 100
#define QAIC_MHI_RETRY_MAX 20
@@ -309,6 +310,7 @@ static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resou
enable_dbc(qdev, dbc_id, usr);
qdev->dbc[dbc_id].in_use = true;
resources->buf = NULL;
+ set_dbc_state(qdev, dbc_id, DBC_STATE_ASSIGNED);
}
}
@@ -367,7 +369,7 @@ static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrap
if (in_trans->hdr.len % 8 != 0)
return -EINVAL;
- if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH)
+ if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers,
@@ -407,7 +409,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
return -EINVAL;
remaining = in_trans->size - resources->xferred_dma_size;
if (remaining == 0)
- return 0;
+ return -EINVAL;
if (check_add_overflow(xfer_start_addr, remaining, &end))
return -EINVAL;
@@ -495,7 +497,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
nents = sgt->nents;
nents_dma = nents;
- *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
+ *size = QAIC_MANAGE_WIRE_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
for_each_sgtable_dma_sg(sgt, sg, i) {
*size -= sizeof(*asp);
/* Save 1K for possible follow-up transactions. */
@@ -576,7 +578,7 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
/* There should be enough space to hold at least one ASP entry. */
if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
- QAIC_MANAGE_EXT_MSG_LENGTH)
+ QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOMEM;
xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
@@ -645,7 +647,7 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
- if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH)
+ if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
if (!in_trans->queue_size)
@@ -655,8 +657,9 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
return -EINVAL;
nelem = in_trans->queue_size;
- size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem;
- if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size())
+ if (check_mul_overflow((u32)(get_dbc_req_elem_size() + get_dbc_rsp_elem_size()),
+ nelem,
+ &size))
return -EINVAL;
if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size)
@@ -729,7 +732,7 @@ static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_l
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
- if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH)
+ if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_WIRE_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
@@ -810,7 +813,7 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
}
if (ret)
- break;
+ goto out;
}
if (user_len != user_msg->len)
@@ -921,6 +924,7 @@ static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len
}
release_dbc(qdev, dbc_id);
+ set_dbc_state(qdev, dbc_id, DBC_STATE_IDLE);
*msg_len += sizeof(*in_trans);
return 0;
@@ -1052,7 +1056,7 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
init_completion(&elem.xfer_done);
if (likely(!qdev->cntl_lost_buf)) {
/*
- * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH.
+ * The max size of request to device is QAIC_MANAGE_WIRE_MSG_LENGTH.
* The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH.
*/
out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL);
@@ -1079,7 +1083,6 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
list_for_each_entry(w, &wrappers->list, list) {
kref_get(&w->ref_count);
- retry_count = 0;
ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
if (ret) {
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 797289e9d780..60cb4d65d48e 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -18,6 +18,7 @@
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/srcu.h>
+#include <linux/string.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
@@ -165,7 +166,7 @@ static void free_slice(struct kref *kref)
drm_gem_object_put(&slice->bo->base);
sg_free_table(slice->sgt);
kfree(slice->sgt);
- kfree(slice->reqs);
+ kvfree(slice->reqs);
kfree(slice);
}
@@ -404,7 +405,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
goto free_sgt;
}
- slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
+ slice->reqs = kvcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL);
if (!slice->reqs) {
ret = -ENOMEM;
goto free_slice;
@@ -430,7 +431,7 @@ static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo,
return 0;
free_req:
- kfree(slice->reqs);
+ kvfree(slice->reqs);
free_slice:
kfree(slice);
free_sgt:
@@ -643,8 +644,36 @@ static void qaic_free_object(struct drm_gem_object *obj)
kfree(bo);
}
+static struct sg_table *qaic_get_sg_table(struct drm_gem_object *obj)
+{
+ struct qaic_bo *bo = to_qaic_bo(obj);
+ struct scatterlist *sg, *sg_in;
+ struct sg_table *sgt, *sgt_in;
+ int i;
+
+ sgt_in = bo->sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(sgt, sgt_in->orig_nents, GFP_KERNEL)) {
+ kfree(sgt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = sgt->sgl;
+ for_each_sgtable_sg(sgt_in, sg_in, i) {
+ memcpy(sg, sg_in, sizeof(*sg));
+ sg = sg_next(sg);
+ }
+
+ return sgt;
+}
+
static const struct drm_gem_object_funcs qaic_gem_funcs = {
.free = qaic_free_object,
+ .get_sg_table = qaic_get_sg_table,
.print_info = qaic_gem_print_info,
.mmap = qaic_gem_object_mmap,
.vm_ops = &drm_vm_ops,
@@ -953,8 +982,9 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (args->hdr.count == 0)
return -EINVAL;
- arg_size = args->hdr.count * sizeof(*slice_ent);
- if (arg_size / args->hdr.count != sizeof(*slice_ent))
+ if (check_mul_overflow((unsigned long)args->hdr.count,
+ (unsigned long)sizeof(*slice_ent),
+ &arg_size))
return -EINVAL;
if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
@@ -984,18 +1014,12 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
user_data = u64_to_user_ptr(args->data);
- slice_ent = kzalloc(arg_size, GFP_KERNEL);
- if (!slice_ent) {
- ret = -EINVAL;
+ slice_ent = memdup_user(user_data, arg_size);
+ if (IS_ERR(slice_ent)) {
+ ret = PTR_ERR(slice_ent);
goto unlock_dev_srcu;
}
- ret = copy_from_user(slice_ent, user_data, arg_size);
- if (ret) {
- ret = -EFAULT;
- goto free_slice_ent;
- }
-
obj = drm_gem_object_lookup(file_priv, args->hdr.handle);
if (!obj) {
ret = -ENOENT;
@@ -1023,6 +1047,11 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
goto unlock_ch_srcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto unlock_ch_srcu;
+ }
+
ret = qaic_prepare_bo(qdev, bo, &args->hdr);
if (ret)
goto unlock_ch_srcu;
@@ -1300,8 +1329,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
int usr_rcu_id, qdev_rcu_id;
struct qaic_device *qdev;
struct qaic_user *usr;
- u8 __user *user_data;
- unsigned long n;
u64 received_ts;
u32 queue_level;
u64 submit_ts;
@@ -1314,20 +1341,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
received_ts = ktime_get_ns();
size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec);
- n = (unsigned long)size * args->hdr.count;
- if (args->hdr.count == 0 || n / args->hdr.count != size)
+ if (args->hdr.count == 0)
return -EINVAL;
- user_data = u64_to_user_ptr(args->data);
-
- exec = kcalloc(args->hdr.count, size, GFP_KERNEL);
- if (!exec)
- return -ENOMEM;
-
- if (copy_from_user(exec, user_data, n)) {
- ret = -EFAULT;
- goto free_exec;
- }
+ exec = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, size);
+ if (IS_ERR(exec))
+ return PTR_ERR(exec);
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1356,13 +1375,22 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
goto release_ch_rcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto release_ch_rcu;
+ }
+
+ ret = mutex_lock_interruptible(&dbc->req_lock);
+ if (ret)
+ goto release_ch_rcu;
+
head = readl(dbc->dbc_base + REQHP_OFF);
tail = readl(dbc->dbc_base + REQTP_OFF);
if (head == U32_MAX || tail == U32_MAX) {
/* PCI link error */
ret = -ENODEV;
- goto release_ch_rcu;
+ goto unlock_req_lock;
}
queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail);
@@ -1370,11 +1398,12 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc,
head, &tail);
if (ret)
- goto release_ch_rcu;
+ goto unlock_req_lock;
/* Finalize commit to hardware */
submit_ts = ktime_get_ns();
writel(tail, dbc->dbc_base + REQTP_OFF);
+ mutex_unlock(&dbc->req_lock);
update_profiling_data(file_priv, exec, args->hdr.count, is_partial, received_ts,
submit_ts, queue_level);
@@ -1382,13 +1411,15 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
if (datapath_polling)
schedule_work(&dbc->poll_work);
+unlock_req_lock:
+ if (ret)
+ mutex_unlock(&dbc->req_lock);
release_ch_rcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
unlock_dev_srcu:
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
unlock_usr_srcu:
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
-free_exec:
kfree(exec);
return ret;
}
@@ -1483,7 +1514,7 @@ irqreturn_t dbc_irq_handler(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-void irq_polling_work(struct work_struct *work)
+void qaic_irq_polling_work(struct work_struct *work)
{
struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work);
unsigned long flags;
@@ -1701,6 +1732,11 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
goto unlock_ch_srcu;
}
+ if (dbc->id == qdev->ssr_dbc) {
+ ret = -EPIPE;
+ goto unlock_ch_srcu;
+ }
+
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
ret = -ENOENT;
@@ -1721,6 +1757,9 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
if (!dbc->usr)
ret = -EPERM;
+ if (dbc->id == qdev->ssr_dbc)
+ ret = -EPIPE;
+
put_obj:
drm_gem_object_put(obj);
unlock_ch_srcu:
@@ -1741,7 +1780,8 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
struct qaic_device *qdev;
struct qaic_user *usr;
struct qaic_bo *bo;
- int ret, i;
+ int ret = 0;
+ int i;
usr = file_priv->driver_priv;
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1762,18 +1802,12 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
goto unlock_dev_srcu;
}
- ent = kcalloc(args->hdr.count, sizeof(*ent), GFP_KERNEL);
- if (!ent) {
- ret = -EINVAL;
+ ent = memdup_array_user(u64_to_user_ptr(args->data), args->hdr.count, sizeof(*ent));
+ if (IS_ERR(ent)) {
+ ret = PTR_ERR(ent);
goto unlock_dev_srcu;
}
- ret = copy_from_user(ent, u64_to_user_ptr(args->data), args->hdr.count * sizeof(*ent));
- if (ret) {
- ret = -EFAULT;
- goto free_ent;
- }
-
for (i = 0; i < args->hdr.count; i++) {
obj = drm_gem_object_lookup(file_priv, ent[i].handle);
if (!obj) {
@@ -1781,6 +1815,16 @@ int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file
goto free_ent;
}
bo = to_qaic_bo(obj);
+ if (!bo->sliced) {
+ drm_gem_object_put(obj);
+ ret = -EINVAL;
+ goto free_ent;
+ }
+ if (bo->dbc->id != args->hdr.dbc_id) {
+ drm_gem_object_put(obj);
+ ret = -EINVAL;
+ goto free_ent;
+ }
/*
* perf stats ioctl is called before wait ioctl is complete then
* the latency information is invalid.
@@ -1919,6 +1963,17 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
}
+static void sync_empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
+{
+ empty_xfer_list(qdev, dbc);
+ synchronize_srcu(&dbc->ch_lock);
+ /*
+ * Threads holding channel lock, may add more elements in the xfer_list.
+ * Flush out these elements from xfer_list.
+ */
+ empty_xfer_list(qdev, dbc);
+}
+
int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
{
if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle)
@@ -1933,7 +1988,7 @@ int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr)
* enable_dbc - Enable the DBC. DBCs are disabled by removing the context of
* user. Add user context back to DBC to enable it. This function trusts the
* DBC ID passed and expects the DBC to be disabled.
- * @qdev: Qranium device handle
+ * @qdev: qaic device handle
* @dbc_id: ID of the DBC
* @usr: User context
*/
@@ -1947,13 +2002,7 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id];
dbc->usr = NULL;
- empty_xfer_list(qdev, dbc);
- synchronize_srcu(&dbc->ch_lock);
- /*
- * Threads holding channel lock, may add more elements in the xfer_list.
- * Flush out these elements from xfer_list.
- */
- empty_xfer_list(qdev, dbc);
+ sync_empty_xfer_list(qdev, dbc);
}
void release_dbc(struct qaic_device *qdev, u32 dbc_id)
@@ -1994,3 +2043,30 @@ void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail)
*head = readl(dbc->dbc_base + REQHP_OFF);
*tail = readl(dbc->dbc_base + REQTP_OFF);
}
+
+/*
+ * qaic_dbc_enter_ssr - Prepare to enter in sub system reset(SSR) for given DBC ID.
+ * @qdev: qaic device handle
+ * @dbc_id: ID of the DBC which will enter SSR
+ *
+ * The device will automatically deactivate the workload as not
+ * all errors can be silently recovered. The user will be
+ * notified and will need to decide the required recovery
+ * action to take.
+ */
+void qaic_dbc_enter_ssr(struct qaic_device *qdev, u32 dbc_id)
+{
+ qdev->ssr_dbc = dbc_id;
+ release_dbc(qdev, dbc_id);
+}
+
+/*
+ * qaic_dbc_exit_ssr - Prepare to exit from sub system reset(SSR) for given DBC ID.
+ * @qdev: qaic device handle
+ *
+ * The DBC returns to an operational state and begins accepting work after exiting SSR.
+ */
+void qaic_dbc_exit_ssr(struct qaic_device *qdev)
+{
+ qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL;
+}
diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c
index a991b8198dc4..8dc4fe5bb560 100644
--- a/drivers/accel/qaic/qaic_debugfs.c
+++ b/drivers/accel/qaic/qaic_debugfs.c
@@ -218,6 +218,9 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
if (ret)
goto destroy_workqueue;
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->bootlog_ch = mhi_dev;
+
for (i = 0; i < BOOTLOG_POOL_SIZE; i++) {
msg = devm_kzalloc(&qdev->pdev->dev, sizeof(*msg), GFP_KERNEL);
if (!msg) {
@@ -233,8 +236,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d
goto mhi_unprepare;
}
- dev_set_drvdata(&mhi_dev->dev, qdev);
- qdev->bootlog_ch = mhi_dev;
return 0;
mhi_unprepare:
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index e31bcb0ecfc9..4c70bd949d53 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -30,6 +30,7 @@
#include "qaic.h"
#include "qaic_debugfs.h"
#include "qaic_ras.h"
+#include "qaic_ssr.h"
#include "qaic_timesync.h"
#include "sahara.h"
@@ -270,6 +271,13 @@ static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
return ret;
}
+ ret = qaic_sysfs_init(qddev);
+ if (ret) {
+ drm_dev_unregister(drm);
+ pci_dbg(qdev->pdev, "qaic_sysfs_init failed %d\n", ret);
+ return ret;
+ }
+
qaic_debugfs_init(qddev);
return ret;
@@ -281,6 +289,7 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
+ qaic_sysfs_remove(qddev);
drm_dev_unregister(drm);
qddev->partition_id = 0;
/*
@@ -382,6 +391,7 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
qaic_notify_reset(qdev);
/* start tearing things down */
+ qaic_clean_up_ssr(qdev);
for (i = 0; i < qdev->num_dbc; ++i)
release_dbc(qdev, i);
}
@@ -431,11 +441,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev,
qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
if (IS_ERR(qdev->qts_wq))
return NULL;
+ qdev->ssr_wq = qaicm_wq_init(drm, "qaic_ssr");
+ if (IS_ERR(qdev->ssr_wq))
+ return NULL;
ret = qaicm_srcu_init(drm, &qdev->dev_lock);
if (ret)
return NULL;
+ ret = qaic_ssr_init(qdev, drm);
+ if (ret)
+ pci_info(pdev, "QAIC SSR crashdump collection not supported.\n");
+
qdev->qddev = qddev;
qdev->pdev = pdev;
qddev->qdev = qdev;
@@ -454,6 +471,9 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev,
return NULL;
init_waitqueue_head(&qdev->dbc[i].dbc_release);
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
+ ret = drmm_mutex_init(drm, &qdev->dbc[i].req_lock);
+ if (ret)
+ return NULL;
}
return qdev;
@@ -542,7 +562,7 @@ static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
if (!qdev->single_msi)
disable_irq_nosync(qdev->dbc[i].irq);
- INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
+ INIT_WORK(&qdev->dbc[i].poll_work, qaic_irq_polling_work);
}
}
@@ -657,6 +677,92 @@ static const struct pci_error_handlers qaic_pci_err_handler = {
.reset_done = qaic_pci_reset_done,
};
+static bool qaic_is_under_reset(struct qaic_device *qdev)
+{
+ int rcu_id;
+ bool ret;
+
+ rcu_id = srcu_read_lock(&qdev->dev_lock);
+ ret = qdev->dev_state != QAIC_ONLINE;
+ srcu_read_unlock(&qdev->dev_lock, rcu_id);
+ return ret;
+}
+
+static bool qaic_data_path_busy(struct qaic_device *qdev)
+{
+ bool ret = false;
+ int dev_rcu_id;
+ int i;
+
+ dev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->dev_state != QAIC_ONLINE) {
+ srcu_read_unlock(&qdev->dev_lock, dev_rcu_id);
+ return false;
+ }
+ for (i = 0; i < qdev->num_dbc; i++) {
+ struct dma_bridge_chan *dbc = &qdev->dbc[i];
+ unsigned long flags;
+ int ch_rcu_id;
+
+ ch_rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (!dbc->usr || !dbc->in_use) {
+ srcu_read_unlock(&dbc->ch_lock, ch_rcu_id);
+ continue;
+ }
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ ret = !list_empty(&dbc->xfer_list);
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ srcu_read_unlock(&dbc->ch_lock, ch_rcu_id);
+ if (ret)
+ break;
+ }
+ srcu_read_unlock(&qdev->dev_lock, dev_rcu_id);
+ return ret;
+}
+
+static int qaic_pm_suspend(struct device *dev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ dev_dbg(dev, "Suspending..\n");
+ if (qaic_data_path_busy(qdev)) {
+ dev_dbg(dev, "Device's datapath is busy. Aborting suspend..\n");
+ return -EBUSY;
+ }
+ if (qaic_is_under_reset(qdev)) {
+ dev_dbg(dev, "Device is under reset. Aborting suspend..\n");
+ return -EBUSY;
+ }
+ qaic_mqts_ch_stop_timer(qdev->mqts_ch);
+ qaic_pci_reset_prepare(qdev->pdev);
+ pci_save_state(qdev->pdev);
+ pci_disable_device(qdev->pdev);
+ pci_set_power_state(qdev->pdev, PCI_D3hot);
+ return 0;
+}
+
+static int qaic_pm_resume(struct device *dev)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+ int ret;
+
+ dev_dbg(dev, "Resuming..\n");
+ pci_set_power_state(qdev->pdev, PCI_D0);
+ pci_restore_state(qdev->pdev);
+ ret = pci_enable_device(qdev->pdev);
+ if (ret) {
+ dev_err(dev, "pci_enable_device failed on resume %d\n", ret);
+ return ret;
+ }
+ pci_set_master(qdev->pdev);
+ qaic_pci_reset_done(qdev->pdev);
+ return 0;
+}
+
+static const struct dev_pm_ops qaic_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(qaic_pm_suspend, qaic_pm_resume)
+};
+
static struct pci_driver qaic_pci_driver = {
.name = QAIC_NAME,
.id_table = qaic_ids,
@@ -664,6 +770,9 @@ static struct pci_driver qaic_pci_driver = {
.remove = qaic_pci_remove,
.shutdown = qaic_pci_shutdown,
.err_handler = &qaic_pci_err_handler,
+ .driver = {
+ .pm = pm_sleep_ptr(&qaic_pm_ops),
+ },
};
static int __init qaic_init(void)
@@ -699,9 +808,16 @@ static int __init qaic_init(void)
ret = qaic_ras_register();
if (ret)
pr_debug("qaic: qaic_ras_register failed %d\n", ret);
+ ret = qaic_ssr_register();
+ if (ret) {
+ pr_debug("qaic: qaic_ssr_register failed %d\n", ret);
+ goto free_bootlog;
+ }
return 0;
+free_bootlog:
+ qaic_bootlog_unregister();
free_mhi:
mhi_driver_unregister(&qaic_mhi_driver);
free_pci:
@@ -727,6 +843,7 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_ssr_unregister();
qaic_ras_unregister();
qaic_bootlog_unregister();
qaic_timesync_deinit();
diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c
index 914ffc4a9970..f1d52a710136 100644
--- a/drivers/accel/qaic/qaic_ras.c
+++ b/drivers/accel/qaic/qaic_ras.c
@@ -514,21 +514,21 @@ static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr,
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ce_count);
+ return sysfs_emit(buf, "%d\n", qdev->ce_count);
}
static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_count);
+ return sysfs_emit(buf, "%d\n", qdev->ue_count);
}
static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
- return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_nf_count);
+ return sysfs_emit(buf, "%d\n", qdev->ue_nf_count);
}
static DEVICE_ATTR_RO(ce_count);
diff --git a/drivers/accel/qaic/qaic_ssr.c b/drivers/accel/qaic/qaic_ssr.c
new file mode 100644
index 000000000000..9b662d690371
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ssr.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+
+#include <asm/byteorder.h>
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <linux/devcoredump.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+#include <linux/workqueue.h>
+
+#include "qaic.h"
+#include "qaic_ssr.h"
+
+#define SSR_RESP_MSG_SZ 32
+#define SSR_MHI_BUF_SIZE SZ_64K
+#define SSR_MEM_READ_DATA_SIZE ((u64)SSR_MHI_BUF_SIZE - sizeof(struct ssr_crashdump))
+#define SSR_MEM_READ_CHUNK_SIZE ((u64)SSR_MEM_READ_DATA_SIZE - sizeof(struct ssr_memory_read_rsp))
+
+#define DEBUG_TRANSFER_INFO BIT(0)
+#define DEBUG_TRANSFER_INFO_RSP BIT(1)
+#define MEMORY_READ BIT(2)
+#define MEMORY_READ_RSP BIT(3)
+#define DEBUG_TRANSFER_DONE BIT(4)
+#define DEBUG_TRANSFER_DONE_RSP BIT(5)
+#define SSR_EVENT BIT(8)
+#define SSR_EVENT_RSP BIT(9)
+
+#define SSR_EVENT_NACK BIT(0)
+#define BEFORE_SHUTDOWN BIT(1)
+#define AFTER_SHUTDOWN BIT(2)
+#define BEFORE_POWER_UP BIT(3)
+#define AFTER_POWER_UP BIT(4)
+
+struct debug_info_table {
+ /* Save preferences. Default is mandatory */
+ u64 save_perf;
+ /* Base address of the debug region */
+ u64 mem_base;
+ /* Size of debug region in bytes */
+ u64 len;
+ /* Description */
+ char desc[20];
+ /* Filename of debug region */
+ char filename[20];
+};
+
+struct _ssr_hdr {
+ __le32 cmd;
+ __le32 len;
+ __le32 dbc_id;
+};
+
+struct ssr_hdr {
+ u32 cmd;
+ u32 len;
+ u32 dbc_id;
+};
+
+struct ssr_debug_transfer_info {
+ struct ssr_hdr hdr;
+ u32 resv;
+ u64 tbl_addr;
+ u64 tbl_len;
+} __packed;
+
+struct ssr_debug_transfer_info_rsp {
+ struct _ssr_hdr hdr;
+ __le32 ret;
+} __packed;
+
+struct ssr_memory_read {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+ __le64 addr;
+ __le64 len;
+} __packed;
+
+struct ssr_memory_read_rsp {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+ u8 data[];
+} __packed;
+
+struct ssr_debug_transfer_done {
+ struct _ssr_hdr hdr;
+ __le32 resv;
+} __packed;
+
+struct ssr_debug_transfer_done_rsp {
+ struct _ssr_hdr hdr;
+ __le32 ret;
+} __packed;
+
+struct ssr_event {
+ struct ssr_hdr hdr;
+ u32 event;
+} __packed;
+
+struct ssr_event_rsp {
+ struct _ssr_hdr hdr;
+ __le32 event;
+} __packed;
+
+struct ssr_resp {
+ /* Work struct to schedule work coming on QAIC_SSR channel */
+ struct work_struct work;
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Buffer used by MHI for transfer requests */
+ u8 data[] __aligned(8);
+};
+
+/* SSR crashdump book keeping structure */
+struct ssr_dump_info {
+ /* DBC associated with this SSR crashdump */
+ struct dma_bridge_chan *dbc;
+ /*
+ * It will be used when we complete the crashdump download and switch
+ * to waiting on SSR events
+ */
+ struct ssr_resp *resp;
+ /* MEMORY READ request MHI buffer.*/
+ struct ssr_memory_read *read_buf_req;
+ /* TRUE: ->read_buf_req is queued for MHI transaction. FALSE: Otherwise */
+ bool read_buf_req_queued;
+ /* Address of table in host */
+ void *tbl_addr;
+ /* Total size of table */
+ u64 tbl_len;
+ /* Offset of table(->tbl_addr) where the new chunk will be dumped */
+ u64 tbl_off;
+ /* Address of table in device/target */
+ u64 tbl_addr_dev;
+ /* Ptr to the entire dump */
+ void *dump_addr;
+ /* Entire crashdump size */
+ u64 dump_sz;
+ /* Offset of crashdump(->dump_addr) where the new chunk will be dumped */
+ u64 dump_off;
+ /* Points to the table entry we are currently downloading */
+ struct debug_info_table *tbl_ent;
+ /* Offset in the current table entry(->tbl_ent) for next chuck */
+ u64 tbl_ent_off;
+};
+
+struct ssr_crashdump {
+ /*
+ * Points to a book keeping struct maintained by MHI SSR device while
+ * downloading a SSR crashdump. It is NULL when crashdump downloading
+ * not in progress.
+ */
+ struct ssr_dump_info *dump_info;
+ /* Work struct to schedule work coming on QAIC_SSR channel */
+ struct work_struct work;
+ /* Root struct of device, used to access device resources */
+ struct qaic_device *qdev;
+ /* Buffer used by MHI for transfer requests */
+ u8 data[];
+};
+
+#define QAIC_SSR_DUMP_V1_MAGIC 0x1234567890abcdef
+#define QAIC_SSR_DUMP_V1_VER 1
+struct dump_file_meta {
+ u64 magic;
+ u64 version;
+ u64 size; /* Total size of the entire dump */
+ u64 tbl_len; /* Length of the table in byte */
+};
+
+/*
+ * Layout of crashdump
+ * +------------------------------------------+
+ * | Crashdump Meta structure |
+ * | type: struct dump_file_meta |
+ * +------------------------------------------+
+ * | Crashdump Table |
+ * | type: array of struct debug_info_table |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ * | Crashdump |
+ * | |
+ * | |
+ * | |
+ * | |
+ * | |
+ * +------------------------------------------+
+ */
+
+static void free_ssr_dump_info(struct ssr_crashdump *ssr_crash)
+{
+ struct ssr_dump_info *dump_info = ssr_crash->dump_info;
+
+ ssr_crash->dump_info = NULL;
+ if (!dump_info)
+ return;
+ if (!dump_info->read_buf_req_queued)
+ kfree(dump_info->read_buf_req);
+ vfree(dump_info->tbl_addr);
+ vfree(dump_info->dump_addr);
+ kfree(dump_info);
+}
+
+void qaic_clean_up_ssr(struct qaic_device *qdev)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+
+ if (!ssr_crash)
+ return;
+
+ qaic_dbc_exit_ssr(qdev);
+ free_ssr_dump_info(ssr_crash);
+}
+
+static int alloc_dump(struct ssr_dump_info *dump_info)
+{
+ struct debug_info_table *tbl_ent = dump_info->tbl_addr;
+ struct dump_file_meta *dump_meta;
+ u64 tbl_sz_lp = 0;
+ u64 dump_size = 0;
+
+ while (tbl_sz_lp < dump_info->tbl_len) {
+ le64_to_cpus(&tbl_ent->save_perf);
+ le64_to_cpus(&tbl_ent->mem_base);
+ le64_to_cpus(&tbl_ent->len);
+
+ if (tbl_ent->len == 0)
+ return -EINVAL;
+
+ dump_size += tbl_ent->len;
+ tbl_ent++;
+ tbl_sz_lp += sizeof(*tbl_ent);
+ }
+
+ dump_info->dump_sz = dump_size + dump_info->tbl_len + sizeof(*dump_meta);
+ dump_info->dump_addr = vzalloc(dump_info->dump_sz);
+ if (!dump_info->dump_addr)
+ return -ENOMEM;
+
+ /* Copy crashdump meta and table */
+ dump_meta = dump_info->dump_addr;
+ dump_meta->magic = QAIC_SSR_DUMP_V1_MAGIC;
+ dump_meta->version = QAIC_SSR_DUMP_V1_VER;
+ dump_meta->size = dump_info->dump_sz;
+ dump_meta->tbl_len = dump_info->tbl_len;
+ memcpy(dump_info->dump_addr + sizeof(*dump_meta), dump_info->tbl_addr, dump_info->tbl_len);
+ /* Offset by crashdump meta and table (copied above) */
+ dump_info->dump_off = dump_info->tbl_len + sizeof(*dump_meta);
+
+ return 0;
+}
+
+static int send_xfer_done(struct qaic_device *qdev, void *resp, u32 dbc_id)
+{
+ struct ssr_debug_transfer_done *xfer_done;
+ int ret;
+
+ xfer_done = kmalloc(sizeof(*xfer_done), GFP_KERNEL);
+ if (!xfer_done) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret)
+ goto free_xfer_done;
+
+ xfer_done->hdr.cmd = cpu_to_le32(DEBUG_TRANSFER_DONE);
+ xfer_done->hdr.len = cpu_to_le32(sizeof(*xfer_done));
+ xfer_done->hdr.dbc_id = cpu_to_le32(dbc_id);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, xfer_done, sizeof(*xfer_done), MHI_EOT);
+ if (ret)
+ goto free_xfer_done;
+
+ return 0;
+
+free_xfer_done:
+ kfree(xfer_done);
+out:
+ return ret;
+}
+
+static int mem_read_req(struct qaic_device *qdev, u64 dest_addr, u64 dest_len)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ struct ssr_memory_read *read_buf_req;
+ struct ssr_dump_info *dump_info;
+ int ret;
+
+ dump_info = ssr_crash->dump_info;
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, ssr_crash->data, SSR_MEM_READ_DATA_SIZE,
+ MHI_EOT);
+ if (ret)
+ goto out;
+
+ read_buf_req = dump_info->read_buf_req;
+ read_buf_req->hdr.cmd = cpu_to_le32(MEMORY_READ);
+ read_buf_req->hdr.len = cpu_to_le32(sizeof(*read_buf_req));
+ read_buf_req->hdr.dbc_id = cpu_to_le32(qdev->ssr_dbc);
+ read_buf_req->addr = cpu_to_le64(dest_addr);
+ read_buf_req->len = cpu_to_le64(dest_len);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, read_buf_req, sizeof(*read_buf_req),
+ MHI_EOT);
+ if (!ret)
+ dump_info->read_buf_req_queued = true;
+
+out:
+ return ret;
+}
+
+static int ssr_copy_table(struct ssr_dump_info *dump_info, void *data, u64 len)
+{
+ if (len > dump_info->tbl_len - dump_info->tbl_off)
+ return -EINVAL;
+
+ memcpy(dump_info->tbl_addr + dump_info->tbl_off, data, len);
+ dump_info->tbl_off += len;
+
+ /* Entire table has been downloaded, alloc dump memory */
+ if (dump_info->tbl_off == dump_info->tbl_len) {
+ dump_info->tbl_ent = dump_info->tbl_addr;
+ return alloc_dump(dump_info);
+ }
+
+ return 0;
+}
+
+static int ssr_copy_dump(struct ssr_dump_info *dump_info, void *data, u64 len)
+{
+ struct debug_info_table *tbl_ent;
+
+ tbl_ent = dump_info->tbl_ent;
+
+ if (len > tbl_ent->len - dump_info->tbl_ent_off)
+ return -EINVAL;
+
+ memcpy(dump_info->dump_addr + dump_info->dump_off, data, len);
+ dump_info->dump_off += len;
+ dump_info->tbl_ent_off += len;
+
+ /*
+ * Current segment (a entry in table) of the crashdump is complete,
+ * move to next one
+ */
+ if (tbl_ent->len == dump_info->tbl_ent_off) {
+ dump_info->tbl_ent++;
+ dump_info->tbl_ent_off = 0;
+ }
+
+ return 0;
+}
+
+static void ssr_dump_worker(struct work_struct *work)
+{
+ struct ssr_crashdump *ssr_crash = container_of(work, struct ssr_crashdump, work);
+ struct qaic_device *qdev = ssr_crash->qdev;
+ struct ssr_memory_read_rsp *mem_rd_resp;
+ struct debug_info_table *tbl_ent;
+ struct ssr_dump_info *dump_info;
+ u64 dest_addr, dest_len;
+ struct _ssr_hdr *_hdr;
+ struct ssr_hdr hdr;
+ u64 data_len;
+ int ret;
+
+ mem_rd_resp = (struct ssr_memory_read_rsp *)ssr_crash->data;
+ _hdr = &mem_rd_resp->hdr;
+ hdr.cmd = le32_to_cpu(_hdr->cmd);
+ hdr.len = le32_to_cpu(_hdr->len);
+ hdr.dbc_id = le32_to_cpu(_hdr->dbc_id);
+
+ if (hdr.dbc_id != qdev->ssr_dbc)
+ goto reset_device;
+
+ dump_info = ssr_crash->dump_info;
+ if (!dump_info)
+ goto reset_device;
+
+ if (hdr.cmd != MEMORY_READ_RSP)
+ goto free_dump_info;
+
+ if (hdr.len > SSR_MEM_READ_DATA_SIZE)
+ goto free_dump_info;
+
+ data_len = hdr.len - sizeof(*mem_rd_resp);
+
+ if (dump_info->tbl_off < dump_info->tbl_len) /* Chunk belongs to table */
+ ret = ssr_copy_table(dump_info, mem_rd_resp->data, data_len);
+ else /* Chunk belongs to crashdump */
+ ret = ssr_copy_dump(dump_info, mem_rd_resp->data, data_len);
+
+ if (ret)
+ goto free_dump_info;
+
+ if (dump_info->tbl_off < dump_info->tbl_len) {
+ /* Continue downloading table */
+ dest_addr = dump_info->tbl_addr_dev + dump_info->tbl_off;
+ dest_len = min(SSR_MEM_READ_CHUNK_SIZE, dump_info->tbl_len - dump_info->tbl_off);
+ ret = mem_read_req(qdev, dest_addr, dest_len);
+ } else if (dump_info->dump_off < dump_info->dump_sz) {
+ /* Continue downloading crashdump */
+ tbl_ent = dump_info->tbl_ent;
+ dest_addr = tbl_ent->mem_base + dump_info->tbl_ent_off;
+ dest_len = min(SSR_MEM_READ_CHUNK_SIZE, tbl_ent->len - dump_info->tbl_ent_off);
+ ret = mem_read_req(qdev, dest_addr, dest_len);
+ } else {
+ /* Crashdump download complete */
+ ret = send_xfer_done(qdev, dump_info->resp->data, hdr.dbc_id);
+ }
+
+ /* Most likely a MHI xfer has failed */
+ if (ret)
+ goto free_dump_info;
+
+ return;
+
+free_dump_info:
+ /* Free the allocated memory */
+ free_ssr_dump_info(ssr_crash);
+reset_device:
+ /*
+ * After subsystem crashes in device crashdump collection begins but
+ * something went wrong while collecting crashdump, now instead of
+ * handling this error we just reset the device as the best effort has
+ * been made
+ */
+ mhi_soc_reset(qdev->mhi_cntrl);
+}
+
+static struct ssr_dump_info *alloc_dump_info(struct qaic_device *qdev,
+ struct ssr_debug_transfer_info *debug_info)
+{
+ struct ssr_dump_info *dump_info;
+ int ret;
+
+ le64_to_cpus(&debug_info->tbl_len);
+ le64_to_cpus(&debug_info->tbl_addr);
+
+ if (debug_info->tbl_len == 0 ||
+ debug_info->tbl_len % sizeof(struct debug_info_table) != 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Allocate SSR crashdump book keeping structure */
+ dump_info = kzalloc(sizeof(*dump_info), GFP_KERNEL);
+ if (!dump_info) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Buffer used to send MEMORY READ request to device via MHI */
+ dump_info->read_buf_req = kzalloc(sizeof(*dump_info->read_buf_req), GFP_KERNEL);
+ if (!dump_info->read_buf_req) {
+ ret = -ENOMEM;
+ goto free_dump_info;
+ }
+
+ /* Crashdump meta table buffer */
+ dump_info->tbl_addr = vzalloc(debug_info->tbl_len);
+ if (!dump_info->tbl_addr) {
+ ret = -ENOMEM;
+ goto free_read_buf_req;
+ }
+
+ dump_info->tbl_addr_dev = debug_info->tbl_addr;
+ dump_info->tbl_len = debug_info->tbl_len;
+
+ return dump_info;
+
+free_read_buf_req:
+ kfree(dump_info->read_buf_req);
+free_dump_info:
+ kfree(dump_info);
+out:
+ return ERR_PTR(ret);
+}
+
+static int dbg_xfer_info_rsp(struct qaic_device *qdev, struct dma_bridge_chan *dbc,
+ struct ssr_debug_transfer_info *debug_info)
+{
+ struct ssr_debug_transfer_info_rsp *debug_rsp;
+ struct ssr_crashdump *ssr_crash = NULL;
+ int ret = 0, ret2;
+
+ debug_rsp = kmalloc(sizeof(*debug_rsp), GFP_KERNEL);
+ if (!debug_rsp)
+ return -ENOMEM;
+
+ if (!qdev->ssr_mhi_buf) {
+ ret = -ENOMEM;
+ goto send_rsp;
+ }
+
+ if (dbc->state != DBC_STATE_BEFORE_POWER_UP) {
+ ret = -EINVAL;
+ goto send_rsp;
+ }
+
+ ssr_crash = qdev->ssr_mhi_buf;
+ ssr_crash->dump_info = alloc_dump_info(qdev, debug_info);
+ if (IS_ERR(ssr_crash->dump_info)) {
+ ret = PTR_ERR(ssr_crash->dump_info);
+ ssr_crash->dump_info = NULL;
+ }
+
+send_rsp:
+ debug_rsp->hdr.cmd = cpu_to_le32(DEBUG_TRANSFER_INFO_RSP);
+ debug_rsp->hdr.len = cpu_to_le32(sizeof(*debug_rsp));
+ debug_rsp->hdr.dbc_id = cpu_to_le32(dbc->id);
+ /*
+ * 0 = Return an ACK confirming the host is ready to download crashdump
+ * 1 = Return an NACK confirming the host is not ready to download crashdump
+ */
+ debug_rsp->ret = cpu_to_le32(ret ? 1 : 0);
+
+ ret2 = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, debug_rsp, sizeof(*debug_rsp), MHI_EOT);
+ if (ret2) {
+ free_ssr_dump_info(ssr_crash);
+ kfree(debug_rsp);
+ return ret2;
+ }
+
+ return ret;
+}
+
+static void dbg_xfer_done_rsp(struct qaic_device *qdev, struct dma_bridge_chan *dbc,
+ struct ssr_debug_transfer_done_rsp *xfer_rsp)
+{
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ u32 status = le32_to_cpu(xfer_rsp->ret);
+ struct device *dev = &qdev->pdev->dev;
+ struct ssr_dump_info *dump_info;
+
+ dump_info = ssr_crash->dump_info;
+ if (!dump_info)
+ return;
+
+ if (status) {
+ free_ssr_dump_info(ssr_crash);
+ return;
+ }
+
+ dev_coredumpv(dev, dump_info->dump_addr, dump_info->dump_sz, GFP_KERNEL);
+ /* dev_coredumpv will free dump_info->dump_addr */
+ dump_info->dump_addr = NULL;
+ free_ssr_dump_info(ssr_crash);
+}
+
+static void ssr_worker(struct work_struct *work)
+{
+ struct ssr_resp *resp = container_of(work, struct ssr_resp, work);
+ struct ssr_hdr *hdr = (struct ssr_hdr *)resp->data;
+ struct ssr_dump_info *dump_info = NULL;
+ struct qaic_device *qdev = resp->qdev;
+ struct ssr_crashdump *ssr_crash;
+ struct ssr_event_rsp *event_rsp;
+ struct dma_bridge_chan *dbc;
+ struct ssr_event *event;
+ u32 ssr_event_ack;
+ int ret;
+
+ le32_to_cpus(&hdr->cmd);
+ le32_to_cpus(&hdr->len);
+ le32_to_cpus(&hdr->dbc_id);
+
+ if (hdr->len > SSR_RESP_MSG_SZ)
+ goto out;
+
+ if (hdr->dbc_id >= qdev->num_dbc)
+ goto out;
+
+ dbc = &qdev->dbc[hdr->dbc_id];
+
+ switch (hdr->cmd) {
+ case DEBUG_TRANSFER_INFO:
+ ret = dbg_xfer_info_rsp(qdev, dbc, (struct ssr_debug_transfer_info *)resp->data);
+ if (ret)
+ break;
+
+ ssr_crash = qdev->ssr_mhi_buf;
+ dump_info = ssr_crash->dump_info;
+ dump_info->dbc = dbc;
+ dump_info->resp = resp;
+
+ /* Start by downloading debug table */
+ ret = mem_read_req(qdev, dump_info->tbl_addr_dev,
+ min(dump_info->tbl_len, SSR_MEM_READ_CHUNK_SIZE));
+ if (ret) {
+ free_ssr_dump_info(ssr_crash);
+ break;
+ }
+
+ /*
+ * Till now everything went fine, which means that we will be
+ * collecting crashdump chunk by chunk. Do not queue a response
+ * buffer for SSR cmds till the crashdump is complete.
+ */
+ return;
+ case SSR_EVENT:
+ event = (struct ssr_event *)hdr;
+ le32_to_cpus(&event->event);
+ ssr_event_ack = event->event;
+ ssr_crash = qdev->ssr_mhi_buf;
+
+ switch (event->event) {
+ case BEFORE_SHUTDOWN:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_BEFORE_SHUTDOWN);
+ qaic_dbc_enter_ssr(qdev, hdr->dbc_id);
+ break;
+ case AFTER_SHUTDOWN:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_AFTER_SHUTDOWN);
+ break;
+ case BEFORE_POWER_UP:
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_BEFORE_POWER_UP);
+ break;
+ case AFTER_POWER_UP:
+ /*
+ * If dump info is a non NULL value it means that we
+ * have received this SSR event while downloading a
+ * crashdump for this DBC is still in progress. NACK
+ * the SSR event
+ */
+ if (ssr_crash && ssr_crash->dump_info) {
+ free_ssr_dump_info(ssr_crash);
+ ssr_event_ack = SSR_EVENT_NACK;
+ break;
+ }
+
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_AFTER_POWER_UP);
+ break;
+ default:
+ break;
+ }
+
+ event_rsp = kmalloc(sizeof(*event_rsp), GFP_KERNEL);
+ if (!event_rsp)
+ break;
+
+ event_rsp->hdr.cmd = cpu_to_le32(SSR_EVENT_RSP);
+ event_rsp->hdr.len = cpu_to_le32(sizeof(*event_rsp));
+ event_rsp->hdr.dbc_id = cpu_to_le32(hdr->dbc_id);
+ event_rsp->event = cpu_to_le32(ssr_event_ack);
+
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_TO_DEVICE, event_rsp, sizeof(*event_rsp),
+ MHI_EOT);
+ if (ret)
+ kfree(event_rsp);
+
+ if (event->event == AFTER_POWER_UP && ssr_event_ack != SSR_EVENT_NACK) {
+ qaic_dbc_exit_ssr(qdev);
+ set_dbc_state(qdev, hdr->dbc_id, DBC_STATE_IDLE);
+ }
+
+ break;
+ case DEBUG_TRANSFER_DONE_RSP:
+ dbg_xfer_done_rsp(qdev, dbc, (struct ssr_debug_transfer_done_rsp *)hdr);
+ break;
+ default:
+ break;
+ }
+
+out:
+ ret = mhi_queue_buf(qdev->ssr_ch, DMA_FROM_DEVICE, resp->data, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret)
+ kfree(resp);
+}
+
+static int qaic_ssr_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct ssr_resp *resp;
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ resp = kzalloc(sizeof(*resp) + SSR_RESP_MSG_SZ, GFP_KERNEL);
+ if (!resp) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return -ENOMEM;
+ }
+
+ resp->qdev = qdev;
+ INIT_WORK(&resp->work, ssr_worker);
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp->data, SSR_RESP_MSG_SZ, MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->ssr_ch = mhi_dev;
+
+ return 0;
+}
+
+static void qaic_ssr_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ mhi_unprepare_from_transfer(qdev->ssr_ch);
+ qdev->ssr_ch = NULL;
+}
+
+static void qaic_ssr_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ struct _ssr_hdr *hdr = mhi_result->buf_addr;
+ struct ssr_dump_info *dump_info;
+
+ if (mhi_result->transaction_status) {
+ kfree(mhi_result->buf_addr);
+ return;
+ }
+
+ /*
+ * MEMORY READ is used to download crashdump. And crashdump is
+ * downloaded chunk by chunk in a series of MEMORY READ SSR commands.
+ * Hence to avoid too many kmalloc() and kfree() of the same MEMORY READ
+ * request buffer, we allocate only one such buffer and free it only
+ * once.
+ */
+ if (le32_to_cpu(hdr->cmd) == MEMORY_READ) {
+ dump_info = ssr_crash->dump_info;
+ if (dump_info) {
+ dump_info->read_buf_req_queued = false;
+ return;
+ }
+ }
+
+ kfree(mhi_result->buf_addr);
+}
+
+static void qaic_ssr_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct ssr_resp *resp = container_of(mhi_result->buf_addr, struct ssr_resp, data);
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ssr_crashdump *ssr_crash = qdev->ssr_mhi_buf;
+ bool memory_read_rsp = false;
+
+ if (ssr_crash && ssr_crash->data == mhi_result->buf_addr)
+ memory_read_rsp = true;
+
+ if (mhi_result->transaction_status) {
+ /* Do not free SSR crashdump buffer as it allocated via managed APIs */
+ if (!memory_read_rsp)
+ kfree(resp);
+ return;
+ }
+
+ if (memory_read_rsp)
+ queue_work(qdev->ssr_wq, &ssr_crash->work);
+ else
+ queue_work(qdev->ssr_wq, &resp->work);
+}
+
+static const struct mhi_device_id qaic_ssr_mhi_match_table[] = {
+ { .chan = "QAIC_SSR", },
+ {},
+};
+
+static struct mhi_driver qaic_ssr_mhi_driver = {
+ .id_table = qaic_ssr_mhi_match_table,
+ .remove = qaic_ssr_mhi_remove,
+ .probe = qaic_ssr_mhi_probe,
+ .ul_xfer_cb = qaic_ssr_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_ssr_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_ssr",
+ },
+};
+
+int qaic_ssr_init(struct qaic_device *qdev, struct drm_device *drm)
+{
+ struct ssr_crashdump *ssr_crash;
+
+ qdev->ssr_dbc = QAIC_SSR_DBC_SENTINEL;
+
+ /*
+ * Device requests only one SSR at a time. So allocating only one
+ * buffer to download crashdump is good enough.
+ */
+ ssr_crash = drmm_kzalloc(drm, SSR_MHI_BUF_SIZE, GFP_KERNEL);
+ if (!ssr_crash)
+ return -ENOMEM;
+
+ ssr_crash->qdev = qdev;
+ INIT_WORK(&ssr_crash->work, ssr_dump_worker);
+ qdev->ssr_mhi_buf = ssr_crash;
+
+ return 0;
+}
+
+int qaic_ssr_register(void)
+{
+ return mhi_driver_register(&qaic_ssr_mhi_driver);
+}
+
+void qaic_ssr_unregister(void)
+{
+ mhi_driver_unregister(&qaic_ssr_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_ssr.h b/drivers/accel/qaic/qaic_ssr.h
new file mode 100644
index 000000000000..97ccff305750
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ssr.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __QAIC_SSR_H__
+#define __QAIC_SSR_H__
+
+struct drm_device;
+struct qaic_device;
+
+int qaic_ssr_register(void);
+void qaic_ssr_unregister(void);
+void qaic_clean_up_ssr(struct qaic_device *qdev);
+int qaic_ssr_init(struct qaic_device *qdev, struct drm_device *drm);
+#endif /* __QAIC_SSR_H__ */
diff --git a/drivers/accel/qaic/qaic_sysfs.c b/drivers/accel/qaic/qaic_sysfs.c
new file mode 100644
index 000000000000..e0afb0ffb589
--- /dev/null
+++ b/drivers/accel/qaic/qaic_sysfs.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2025, The Linux Foundation. All rights reserved. */
+
+#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/mutex.h>
+#include <linux/sysfs.h>
+
+#include "qaic.h"
+
+#define NAME_LEN 14
+
+struct dbc_attribute {
+ struct device_attribute dev_attr;
+ u32 dbc_id;
+ char name[NAME_LEN];
+};
+
+static ssize_t dbc_state_show(struct device *dev, struct device_attribute *a, char *buf)
+{
+ struct dbc_attribute *dbc_attr = container_of(a, struct dbc_attribute, dev_attr);
+ struct drm_minor *minor = dev_get_drvdata(dev);
+ struct qaic_device *qdev;
+
+ qdev = to_qaic_device(minor->dev);
+ return sysfs_emit(buf, "%d\n", qdev->dbc[dbc_attr->dbc_id].state);
+}
+
+void set_dbc_state(struct qaic_device *qdev, u32 dbc_id, unsigned int state)
+{
+ struct device *kdev = to_accel_kdev(qdev->qddev);
+ char *envp[3] = {};
+ char state_str[16];
+ char id_str[12];
+
+ envp[0] = id_str;
+ envp[1] = state_str;
+
+ if (state >= DBC_STATE_MAX)
+ return;
+ if (dbc_id >= qdev->num_dbc)
+ return;
+ if (state == qdev->dbc[dbc_id].state)
+ return;
+
+ scnprintf(id_str, ARRAY_SIZE(id_str), "DBC_ID=%d", dbc_id);
+ scnprintf(state_str, ARRAY_SIZE(state_str), "DBC_STATE=%d", state);
+
+ qdev->dbc[dbc_id].state = state;
+ kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp);
+}
+
+int qaic_sysfs_init(struct qaic_drm_device *qddev)
+{
+ struct device *kdev = to_accel_kdev(qddev);
+ struct drm_device *drm = to_drm(qddev);
+ u32 num_dbc = qddev->qdev->num_dbc;
+ struct dbc_attribute *dbc_attrs;
+ int i, ret;
+
+ dbc_attrs = drmm_kcalloc(drm, num_dbc, sizeof(*dbc_attrs), GFP_KERNEL);
+ if (!dbc_attrs)
+ return -ENOMEM;
+
+ for (i = 0; i < num_dbc; ++i) {
+ struct dbc_attribute *dbc_attr = &dbc_attrs[i];
+
+ sysfs_attr_init(&dbc_attr->dev_attr.attr);
+ dbc_attr->dbc_id = i;
+ scnprintf(dbc_attr->name, NAME_LEN, "dbc%d_state", i);
+ dbc_attr->dev_attr.attr.name = dbc_attr->name;
+ dbc_attr->dev_attr.attr.mode = 0444;
+ dbc_attr->dev_attr.show = dbc_state_show;
+ ret = sysfs_create_file(&kdev->kobj, &dbc_attr->dev_attr.attr);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; ++j) {
+ dbc_attr = &dbc_attrs[j];
+ sysfs_remove_file(&kdev->kobj, &dbc_attr->dev_attr.attr);
+ }
+ drmm_kfree(drm, dbc_attrs);
+ return ret;
+ }
+ }
+
+ qddev->sysfs_attrs = dbc_attrs;
+ return 0;
+}
+
+void qaic_sysfs_remove(struct qaic_drm_device *qddev)
+{
+ struct dbc_attribute *dbc_attrs = qddev->sysfs_attrs;
+ struct device *kdev = to_accel_kdev(qddev);
+ u32 num_dbc = qddev->qdev->num_dbc;
+ int i;
+
+ if (!dbc_attrs)
+ return;
+
+ qddev->sysfs_attrs = NULL;
+ for (i = 0; i < num_dbc; ++i)
+ sysfs_remove_file(&kdev->kobj, &dbc_attrs[i].dev_attr.attr);
+ drmm_kfree(to_drm(qddev), dbc_attrs);
+}
diff --git a/drivers/accel/qaic/qaic_timesync.c b/drivers/accel/qaic/qaic_timesync.c
index 3fac540f8e03..8af2475f4f36 100644
--- a/drivers/accel/qaic/qaic_timesync.c
+++ b/drivers/accel/qaic/qaic_timesync.c
@@ -171,6 +171,13 @@ mod_timer:
dev_err(mqtsdev->dev, "%s mod_timer error:%d\n", __func__, ret);
}
+void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev)
+{
+ struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+
+ timer_delete_sync(&mqtsdev->timer);
+}
+
static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
@@ -206,6 +213,7 @@ static int qaic_timesync_probe(struct mhi_device *mhi_dev, const struct mhi_devi
timer->expires = jiffies + msecs_to_jiffies(timesync_delay_ms);
add_timer(timer);
dev_set_drvdata(&mhi_dev->dev, mqtsdev);
+ qdev->mqts_ch = mhi_dev;
return 0;
@@ -221,6 +229,7 @@ static void qaic_timesync_remove(struct mhi_device *mhi_dev)
{
struct mqts_dev *mqtsdev = dev_get_drvdata(&mhi_dev->dev);
+ mqtsdev->qdev->mqts_ch = NULL;
timer_delete_sync(&mqtsdev->timer);
mhi_unprepare_from_transfer(mqtsdev->mhi_dev);
kfree(mqtsdev->sync_msg);
diff --git a/drivers/accel/qaic/qaic_timesync.h b/drivers/accel/qaic/qaic_timesync.h
index 851b7acd43bb..77b9c2b55057 100644
--- a/drivers/accel/qaic/qaic_timesync.h
+++ b/drivers/accel/qaic/qaic_timesync.h
@@ -6,6 +6,9 @@
#ifndef __QAIC_TIMESYNC_H__
#define __QAIC_TIMESYNC_H__
+#include <linux/mhi.h>
+
int qaic_timesync_init(void);
void qaic_timesync_deinit(void);
+void qaic_mqts_ch_stop_timer(struct mhi_device *mhi_dev);
#endif /* __QAIC_TIMESYNC_H__ */
diff --git a/drivers/accel/qaic/sahara.c b/drivers/accel/qaic/sahara.c
index 3ebcc1f7ff58..fd3c3b2d1fd3 100644
--- a/drivers/accel/qaic/sahara.c
+++ b/drivers/accel/qaic/sahara.c
@@ -159,6 +159,7 @@ struct sahara_context {
struct sahara_packet *rx;
struct work_struct fw_work;
struct work_struct dump_work;
+ struct work_struct read_data_work;
struct mhi_device *mhi_dev;
const char * const *image_table;
u32 table_size;
@@ -174,7 +175,10 @@ struct sahara_context {
u64 dump_image_offset;
void *mem_dump_freespace;
u64 dump_images_left;
+ u32 read_data_offset;
+ u32 read_data_length;
bool is_mem_dump_mode;
+ bool non_streaming;
};
static const char * const aic100_image_table[] = {
@@ -194,6 +198,7 @@ static const char * const aic200_image_table[] = {
[23] = "qcom/aic200/aop.mbn",
[32] = "qcom/aic200/tz.mbn",
[33] = "qcom/aic200/hypvm.mbn",
+ [38] = "qcom/aic200/xbl_config.elf",
[39] = "qcom/aic200/aic200_abl.elf",
[40] = "qcom/aic200/apdp.mbn",
[41] = "qcom/aic200/devcfg.mbn",
@@ -202,6 +207,7 @@ static const char * const aic200_image_table[] = {
[49] = "qcom/aic200/shrm.elf",
[50] = "qcom/aic200/cpucp.elf",
[51] = "qcom/aic200/aop_devcfg.mbn",
+ [54] = "qcom/aic200/qupv3fw.elf",
[57] = "qcom/aic200/cpucp_dtbs.elf",
[62] = "qcom/aic200/uefi_dtbs.elf",
[63] = "qcom/aic200/xbl_ac_config.mbn",
@@ -213,9 +219,15 @@ static const char * const aic200_image_table[] = {
[69] = "qcom/aic200/dcd.mbn",
[73] = "qcom/aic200/gearvm.mbn",
[74] = "qcom/aic200/sti.bin",
- [75] = "qcom/aic200/pvs.bin",
+ [76] = "qcom/aic200/tz_qti_config.mbn",
+ [78] = "qcom/aic200/pvs.bin",
};
+static bool is_streaming(struct sahara_context *context)
+{
+ return !context->non_streaming;
+}
+
static int sahara_find_image(struct sahara_context *context, u32 image_id)
{
int ret;
@@ -265,6 +277,8 @@ static void sahara_send_reset(struct sahara_context *context)
int ret;
context->is_mem_dump_mode = false;
+ context->read_data_offset = 0;
+ context->read_data_length = 0;
context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD);
context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH);
@@ -319,9 +333,39 @@ static void sahara_hello(struct sahara_context *context)
dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret);
}
+static int read_data_helper(struct sahara_context *context, int buf_index)
+{
+ enum mhi_flags mhi_flag;
+ u32 pkt_data_len;
+ int ret;
+
+ pkt_data_len = min(context->read_data_length, SAHARA_PACKET_MAX_SIZE);
+
+ memcpy(context->tx[buf_index],
+ &context->firmware->data[context->read_data_offset],
+ pkt_data_len);
+
+ context->read_data_offset += pkt_data_len;
+ context->read_data_length -= pkt_data_len;
+
+ if (is_streaming(context) || !context->read_data_length)
+ mhi_flag = MHI_EOT;
+ else
+ mhi_flag = MHI_CHAIN;
+
+ ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
+ context->tx[buf_index], pkt_data_len, mhi_flag);
+ if (ret) {
+ dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void sahara_read_data(struct sahara_context *context)
{
- u32 image_id, data_offset, data_len, pkt_data_len;
+ u32 image_id, data_offset, data_len;
int ret;
int i;
@@ -357,7 +401,7 @@ static void sahara_read_data(struct sahara_context *context)
* and is not needed here on error.
*/
- if (data_len > SAHARA_TRANSFER_MAX_SIZE) {
+ if (context->non_streaming && data_len > SAHARA_TRANSFER_MAX_SIZE) {
dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n",
data_len, SAHARA_TRANSFER_MAX_SIZE);
sahara_send_reset(context);
@@ -378,22 +422,18 @@ static void sahara_read_data(struct sahara_context *context)
return;
}
- for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) {
- pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE);
-
- memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len);
+ context->read_data_offset = data_offset;
+ context->read_data_length = data_len;
- data_offset += pkt_data_len;
- data_len -= pkt_data_len;
+ if (is_streaming(context)) {
+ schedule_work(&context->read_data_work);
+ return;
+ }
- ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE,
- context->tx[i], pkt_data_len,
- !data_len ? MHI_EOT : MHI_CHAIN);
- if (ret) {
- dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n",
- ret);
- return;
- }
+ for (i = 0; i < SAHARA_NUM_TX_BUF && context->read_data_length; ++i) {
+ ret = read_data_helper(context, i);
+ if (ret)
+ break;
}
}
@@ -538,6 +578,7 @@ static void sahara_parse_dump_table(struct sahara_context *context)
struct sahara_memory_dump_meta_v1 *dump_meta;
u64 table_nents;
u64 dump_length;
+ u64 mul_bytes;
int ret;
u64 i;
@@ -551,8 +592,9 @@ static void sahara_parse_dump_table(struct sahara_context *context)
dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0;
- dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length));
- if (dump_length == SIZE_MAX) {
+ if (check_add_overflow(dump_length,
+ le64_to_cpu(dev_table[i].length),
+ &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
@@ -568,14 +610,17 @@ static void sahara_parse_dump_table(struct sahara_context *context)
dev_table[i].filename);
}
- dump_length = size_add(dump_length, sizeof(*dump_meta));
- if (dump_length == SIZE_MAX) {
+ if (check_add_overflow(dump_length, (u64)sizeof(*dump_meta), &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
}
- dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents));
- if (dump_length == SIZE_MAX) {
+ if (check_mul_overflow((u64)sizeof(*image_out_table), table_nents, &mul_bytes)) {
+ /* Discard the dump */
+ sahara_send_reset(context);
+ return;
+ }
+ if (check_add_overflow(dump_length, mul_bytes, &dump_length)) {
/* Discard the dump */
sahara_send_reset(context);
return;
@@ -615,7 +660,7 @@ static void sahara_parse_dump_table(struct sahara_context *context)
/* Request the first chunk of the first image */
context->dump_image = &image_out_table[0];
- dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE);
+ dump_length = min_t(u64, context->dump_image->length, SAHARA_READ_MAX_SIZE);
/* Avoid requesting EOI sized data so that we can identify errors */
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
@@ -663,7 +708,7 @@ static void sahara_parse_dump_image(struct sahara_context *context)
/* Get next image chunk */
dump_length = context->dump_image->length - context->dump_image_offset;
- dump_length = min(dump_length, SAHARA_READ_MAX_SIZE);
+ dump_length = min_t(u64, dump_length, SAHARA_READ_MAX_SIZE);
/* Avoid requesting EOI sized data so that we can identify errors */
if (dump_length == SAHARA_END_OF_IMAGE_LENGTH)
dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2;
@@ -742,6 +787,13 @@ error:
sahara_send_reset(context);
}
+static void sahara_read_data_processing(struct work_struct *work)
+{
+ struct sahara_context *context = container_of(work, struct sahara_context, read_data_work);
+
+ read_data_helper(context, 0);
+}
+
static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
{
struct sahara_context *context;
@@ -756,34 +808,56 @@ static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_
if (!context->rx)
return -ENOMEM;
+ if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
+ context->image_table = aic200_image_table;
+ context->table_size = ARRAY_SIZE(aic200_image_table);
+ } else {
+ context->image_table = aic100_image_table;
+ context->table_size = ARRAY_SIZE(aic100_image_table);
+ context->non_streaming = true;
+ }
+
/*
- * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it
- * will request for READ_DATA. This is larger than
- * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to
- * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a
- * READ_DATA, it requires a transfer of the exact size requested. We
- * can use MHI_CHAIN to link multiple buffers into a single transfer
- * but the remote side will not consume the buffers until it sees an
- * EOT, thus we need to allocate enough buffers to put in the tx fifo
- * to cover an entire READ_DATA request of the max size.
+ * There are two firmware implementations for READ_DATA handling.
+ * The older "SBL" implementation defines a Sahara transfer size, and
+ * expects that the response is a single transport transfer. If the
+ * FW wants to transfer a file that is larger than the transfer size,
+ * the FW will issue multiple READ_DATA commands. For this
+ * implementation, we need to allocate enough buffers to contain the
+ * entire Sahara transfer size.
+ *
+ * The newer "XBL" implementation does not define a maximum transfer
+ * size and instead expects the data to be streamed over using the
+ * transport level MTU. The FW will issue a single READ_DATA command
+ * of whatever size, and consume multiple transport level transfers
+ * until the expected amount of data is consumed. For this
+ * implementation we only need a single buffer of the transport MTU
+ * but we'll need to be able to use it multiple times for a single
+ * READ_DATA request.
+ *
+ * AIC100 is the SBL implementation and defines SAHARA_TRANSFER_MAX_SIZE
+ * and we need 9x SAHARA_PACKET_MAX_SIZE to cover that. We can use
+ * MHI_CHAIN to link multiple buffers into a single transfer but the
+ * remote side will not consume the buffers until it sees an EOT, thus
+ * we need to allocate enough buffers to put in the tx fifo to cover an
+ * entire READ_DATA request of the max size.
+ *
+ * AIC200 is the XBL implementation, and so a single buffer will work.
*/
for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) {
- context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL);
+ context->tx[i] = devm_kzalloc(&mhi_dev->dev,
+ SAHARA_PACKET_MAX_SIZE,
+ GFP_KERNEL);
if (!context->tx[i])
return -ENOMEM;
+ if (is_streaming(context))
+ break;
}
context->mhi_dev = mhi_dev;
INIT_WORK(&context->fw_work, sahara_processing);
INIT_WORK(&context->dump_work, sahara_dump_processing);
-
- if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) {
- context->image_table = aic200_image_table;
- context->table_size = ARRAY_SIZE(aic200_image_table);
- } else {
- context->image_table = aic100_image_table;
- context->table_size = ARRAY_SIZE(aic100_image_table);
- }
+ INIT_WORK(&context->read_data_work, sahara_read_data_processing);
context->active_image_id = SAHARA_IMAGE_ID_NONE;
dev_set_drvdata(&mhi_dev->dev, context);
@@ -814,6 +888,10 @@ static void sahara_mhi_remove(struct mhi_device *mhi_dev)
static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
{
+ struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev);
+
+ if (!mhi_result->transaction_status && context->read_data_length && is_streaming(context))
+ schedule_work(&context->read_data_work);
}
static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
diff --git a/drivers/accel/rocket/Kconfig b/drivers/accel/rocket/Kconfig
new file mode 100644
index 000000000000..16465abe0660
--- /dev/null
+++ b/drivers/accel/rocket/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_ROCKET
+ tristate "Rocket (support for Rockchip NPUs)"
+ depends on DRM_ACCEL
+ depends on (ARCH_ROCKCHIP && ARM64) || COMPILE_TEST
+ depends on ROCKCHIP_IOMMU || COMPILE_TEST
+ depends on MMU
+ select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
+ help
+ Choose this option if you have a Rockchip SoC that contains a
+ compatible Neural Processing Unit (NPU), such as the RK3588. Called by
+ Rockchip either RKNN or RKNPU, it accelerates inference of neural
+ networks.
+
+ The interface exposed to userspace is described in
+ include/uapi/drm/rocket_accel.h and is used by the Rocket userspace
+ driver in Mesa3D.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rocket.
diff --git a/drivers/accel/rocket/Makefile b/drivers/accel/rocket/Makefile
new file mode 100644
index 000000000000..3713dfe223d6
--- /dev/null
+++ b/drivers/accel/rocket/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_DRM_ACCEL_ROCKET) := rocket.o
+
+rocket-y := \
+ rocket_core.o \
+ rocket_device.o \
+ rocket_drv.o \
+ rocket_gem.o \
+ rocket_job.o
diff --git a/drivers/accel/rocket/rocket_core.c b/drivers/accel/rocket/rocket_core.c
new file mode 100644
index 000000000000..abe7719c1db4
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include "rocket_core.h"
+#include "rocket_job.h"
+
+int rocket_core_init(struct rocket_core *core)
+{
+ struct device *dev = core->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ u32 version;
+ int err = 0;
+
+ core->resets[0].id = "srst_a";
+ core->resets[1].id = "srst_h";
+ err = devm_reset_control_bulk_get_exclusive(&pdev->dev, ARRAY_SIZE(core->resets),
+ core->resets);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get resets for core %d\n", core->index);
+
+ err = devm_clk_bulk_get(dev, ARRAY_SIZE(core->clks), core->clks);
+ if (err)
+ return dev_err_probe(dev, err, "failed to get clocks for core %d\n", core->index);
+
+ core->pc_iomem = devm_platform_ioremap_resource_byname(pdev, "pc");
+ if (IS_ERR(core->pc_iomem)) {
+ dev_err(dev, "couldn't find PC registers %ld\n", PTR_ERR(core->pc_iomem));
+ return PTR_ERR(core->pc_iomem);
+ }
+
+ core->cna_iomem = devm_platform_ioremap_resource_byname(pdev, "cna");
+ if (IS_ERR(core->cna_iomem)) {
+ dev_err(dev, "couldn't find CNA registers %ld\n", PTR_ERR(core->cna_iomem));
+ return PTR_ERR(core->cna_iomem);
+ }
+
+ core->core_iomem = devm_platform_ioremap_resource_byname(pdev, "core");
+ if (IS_ERR(core->core_iomem)) {
+ dev_err(dev, "couldn't find CORE registers %ld\n", PTR_ERR(core->core_iomem));
+ return PTR_ERR(core->core_iomem);
+ }
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return err;
+
+ core->iommu_group = iommu_group_get(dev);
+
+ err = rocket_job_init(core);
+ if (err)
+ return err;
+
+ pm_runtime_use_autosuspend(dev);
+
+ /*
+ * As this NPU will be most often used as part of a media pipeline that
+ * ends presenting in a display, choose 50 ms (~3 frames at 60Hz) as an
+ * autosuspend delay as that will keep the device powered up while the
+ * pipeline is running.
+ */
+ pm_runtime_set_autosuspend_delay(dev, 50);
+
+ pm_runtime_enable(dev);
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ rocket_job_fini(core);
+ return err;
+ }
+
+ version = rocket_pc_readl(core, VERSION);
+ version += rocket_pc_readl(core, VERSION_NUM) & 0xffff;
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ dev_info(dev, "Rockchip NPU core %d version: %d\n", core->index, version);
+
+ return 0;
+}
+
+void rocket_core_fini(struct rocket_core *core)
+{
+ pm_runtime_dont_use_autosuspend(core->dev);
+ pm_runtime_disable(core->dev);
+ iommu_group_put(core->iommu_group);
+ core->iommu_group = NULL;
+ rocket_job_fini(core);
+}
+
+void rocket_core_reset(struct rocket_core *core)
+{
+ reset_control_bulk_assert(ARRAY_SIZE(core->resets), core->resets);
+
+ udelay(10);
+
+ reset_control_bulk_deassert(ARRAY_SIZE(core->resets), core->resets);
+}
diff --git a/drivers/accel/rocket/rocket_core.h b/drivers/accel/rocket/rocket_core.h
new file mode 100644
index 000000000000..f6d7382854ca
--- /dev/null
+++ b/drivers/accel/rocket/rocket_core.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_CORE_H__
+#define __ROCKET_CORE_H__
+
+#include <drm/gpu_scheduler.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mutex_types.h>
+#include <linux/reset.h>
+
+#include "rocket_registers.h"
+
+#define rocket_pc_readl(core, reg) \
+ readl((core)->pc_iomem + (REG_PC_##reg))
+#define rocket_pc_writel(core, reg, value) \
+ writel(value, (core)->pc_iomem + (REG_PC_##reg))
+
+#define rocket_cna_readl(core, reg) \
+ readl((core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+#define rocket_cna_writel(core, reg, value) \
+ writel(value, (core)->cna_iomem + (REG_CNA_##reg) - REG_CNA_S_STATUS)
+
+#define rocket_core_readl(core, reg) \
+ readl((core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+#define rocket_core_writel(core, reg, value) \
+ writel(value, (core)->core_iomem + (REG_CORE_##reg) - REG_CORE_S_STATUS)
+
+struct rocket_core {
+ struct device *dev;
+ struct rocket_device *rdev;
+ unsigned int index;
+
+ int irq;
+ void __iomem *pc_iomem;
+ void __iomem *cna_iomem;
+ void __iomem *core_iomem;
+ struct clk_bulk_data clks[4];
+ struct reset_control_bulk_data resets[2];
+
+ struct iommu_group *iommu_group;
+
+ struct mutex job_lock;
+ struct rocket_job *in_flight_job;
+
+ spinlock_t fence_lock;
+
+ struct {
+ struct workqueue_struct *wq;
+ struct work_struct work;
+ atomic_t pending;
+ } reset;
+
+ struct drm_gpu_scheduler sched;
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+int rocket_core_init(struct rocket_core *core);
+void rocket_core_fini(struct rocket_core *core);
+void rocket_core_reset(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_device.c b/drivers/accel/rocket/rocket_device.c
new file mode 100644
index 000000000000..46e6ee1e72c5
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_drv.h>
+#include <linux/array_size.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "rocket_device.h"
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *core_node;
+ struct rocket_device *rdev;
+ struct drm_device *ddev;
+ unsigned int num_cores = 0;
+ int err;
+
+ rdev = devm_drm_dev_alloc(dev, rocket_drm_driver, struct rocket_device, ddev);
+ if (IS_ERR(rdev))
+ return rdev;
+
+ ddev = &rdev->ddev;
+ dev_set_drvdata(dev, rdev);
+
+ for_each_compatible_node(core_node, NULL, "rockchip,rk3588-rknn-core")
+ if (of_device_is_available(core_node))
+ num_cores++;
+
+ rdev->cores = devm_kcalloc(dev, num_cores, sizeof(*rdev->cores), GFP_KERNEL);
+ if (!rdev->cores)
+ return ERR_PTR(-ENOMEM);
+
+ dma_set_max_seg_size(dev, UINT_MAX);
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (err)
+ return ERR_PTR(err);
+
+ err = devm_mutex_init(dev, &rdev->sched_lock);
+ if (err)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_dev_register(ddev, 0);
+ if (err)
+ return ERR_PTR(err);
+
+ return rdev;
+}
+
+void rocket_device_fini(struct rocket_device *rdev)
+{
+ WARN_ON(rdev->num_cores > 0);
+
+ drm_dev_unregister(&rdev->ddev);
+}
diff --git a/drivers/accel/rocket/rocket_device.h b/drivers/accel/rocket/rocket_device.h
new file mode 100644
index 000000000000..ce662abc01d3
--- /dev/null
+++ b/drivers/accel/rocket/rocket_device.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DEVICE_H__
+#define __ROCKET_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+
+#include "rocket_core.h"
+
+struct rocket_device {
+ struct drm_device ddev;
+
+ struct mutex sched_lock;
+
+ struct rocket_core *cores;
+ unsigned int num_cores;
+};
+
+struct rocket_device *rocket_device_init(struct platform_device *pdev,
+ const struct drm_driver *rocket_drm_driver);
+void rocket_device_fini(struct rocket_device *rdev);
+#define to_rocket_device(drm_dev) \
+ ((struct rocket_device *)(container_of((drm_dev), struct rocket_device, ddev)))
+
+#endif /* __ROCKET_DEVICE_H__ */
diff --git a/drivers/accel/rocket/rocket_drv.c b/drivers/accel/rocket/rocket_drv.c
new file mode 100644
index 000000000000..5c0b63f0a8f0
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/rocket_accel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+#include "rocket_job.h"
+
+/*
+ * Facade device, used to expose a single DRM device to userspace, that
+ * schedules jobs to any RKNN cores in the system.
+ */
+static struct platform_device *drm_dev;
+static struct rocket_device *rdev;
+
+static void
+rocket_iommu_domain_destroy(struct kref *kref)
+{
+ struct rocket_iommu_domain *domain = container_of(kref, struct rocket_iommu_domain, kref);
+
+ iommu_domain_free(domain->domain);
+ domain->domain = NULL;
+ kfree(domain);
+}
+
+static struct rocket_iommu_domain*
+rocket_iommu_domain_create(struct device *dev)
+{
+ struct rocket_iommu_domain *domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ void *err;
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->domain = iommu_paging_domain_alloc(dev);
+ if (IS_ERR(domain->domain)) {
+ err = ERR_CAST(domain->domain);
+ kfree(domain);
+ return err;
+ }
+ kref_init(&domain->kref);
+
+ return domain;
+}
+
+struct rocket_iommu_domain *
+rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv)
+{
+ kref_get(&rocket_priv->domain->kref);
+ return rocket_priv->domain;
+}
+
+void
+rocket_iommu_domain_put(struct rocket_iommu_domain *domain)
+{
+ kref_put(&domain->kref, rocket_iommu_domain_destroy);
+}
+
+static int
+rocket_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *rocket_priv;
+ u64 start, end;
+ int ret;
+
+ if (!try_module_get(THIS_MODULE))
+ return -EINVAL;
+
+ rocket_priv = kzalloc(sizeof(*rocket_priv), GFP_KERNEL);
+ if (!rocket_priv) {
+ ret = -ENOMEM;
+ goto err_put_mod;
+ }
+
+ rocket_priv->rdev = rdev;
+ rocket_priv->domain = rocket_iommu_domain_create(rdev->cores[0].dev);
+ if (IS_ERR(rocket_priv->domain)) {
+ ret = PTR_ERR(rocket_priv->domain);
+ goto err_free;
+ }
+
+ file->driver_priv = rocket_priv;
+
+ start = rocket_priv->domain->domain->geometry.aperture_start;
+ end = rocket_priv->domain->domain->geometry.aperture_end;
+ drm_mm_init(&rocket_priv->mm, start, end - start + 1);
+ mutex_init(&rocket_priv->mm_lock);
+
+ ret = rocket_job_open(rocket_priv);
+ if (ret)
+ goto err_mm_takedown;
+
+ return 0;
+
+err_mm_takedown:
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+err_free:
+ kfree(rocket_priv);
+err_put_mod:
+ module_put(THIS_MODULE);
+ return ret;
+}
+
+static void
+rocket_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+
+ rocket_job_close(rocket_priv);
+ mutex_destroy(&rocket_priv->mm_lock);
+ drm_mm_takedown(&rocket_priv->mm);
+ rocket_iommu_domain_put(rocket_priv->domain);
+ kfree(rocket_priv);
+ module_put(THIS_MODULE);
+}
+
+static const struct drm_ioctl_desc rocket_drm_driver_ioctls[] = {
+#define ROCKET_IOCTL(n, func) \
+ DRM_IOCTL_DEF_DRV(ROCKET_##n, rocket_ioctl_##func, 0)
+
+ ROCKET_IOCTL(CREATE_BO, create_bo),
+ ROCKET_IOCTL(SUBMIT, submit),
+ ROCKET_IOCTL(PREP_BO, prep_bo),
+ ROCKET_IOCTL(FINI_BO, fini_bo),
+};
+
+DEFINE_DRM_ACCEL_FOPS(rocket_accel_driver_fops);
+
+/*
+ * Rocket driver version:
+ * - 1.0 - initial interface
+ */
+static const struct drm_driver rocket_drm_driver = {
+ .driver_features = DRIVER_COMPUTE_ACCEL | DRIVER_GEM,
+ .open = rocket_open,
+ .postclose = rocket_postclose,
+ .gem_create_object = rocket_gem_create_object,
+ .ioctls = rocket_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(rocket_drm_driver_ioctls),
+ .fops = &rocket_accel_driver_fops,
+ .name = "rocket",
+ .desc = "rocket DRM",
+};
+
+static int rocket_probe(struct platform_device *pdev)
+{
+ if (rdev == NULL) {
+ /* First core probing, initialize DRM device. */
+ rdev = rocket_device_init(drm_dev, &rocket_drm_driver);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to initialize rocket device\n");
+ return PTR_ERR(rdev);
+ }
+ }
+
+ unsigned int core = rdev->num_cores;
+
+ dev_set_drvdata(&pdev->dev, rdev);
+
+ rdev->cores[core].rdev = rdev;
+ rdev->cores[core].dev = &pdev->dev;
+ rdev->cores[core].index = core;
+
+ rdev->num_cores++;
+
+ return rocket_core_init(&rdev->cores[core]);
+}
+
+static void rocket_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (rdev->cores[core].dev == dev) {
+ rocket_core_fini(&rdev->cores[core]);
+ rdev->num_cores--;
+ break;
+ }
+ }
+
+ if (rdev->num_cores == 0) {
+ /* Last core removed, deinitialize DRM device. */
+ rocket_device_fini(rdev);
+ rdev = NULL;
+ }
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "rockchip,rk3588-rknn-core" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static int find_core_for_dev(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+
+ for (unsigned int core = 0; core < rdev->num_cores; core++) {
+ if (dev == rdev->cores[core].dev)
+ return core;
+ }
+
+ return -1;
+}
+
+static int rocket_device_runtime_resume(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+ int err = 0;
+
+ if (core < 0)
+ return -ENODEV;
+
+ err = clk_bulk_prepare_enable(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+ if (err) {
+ dev_err(dev, "failed to enable (%d) clocks for core %d\n", err, core);
+ return err;
+ }
+
+ return 0;
+}
+
+static int rocket_device_runtime_suspend(struct device *dev)
+{
+ struct rocket_device *rdev = dev_get_drvdata(dev);
+ int core = find_core_for_dev(dev);
+
+ if (core < 0)
+ return -ENODEV;
+
+ if (!rocket_job_is_idle(&rdev->cores[core]))
+ return -EBUSY;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(rdev->cores[core].clks), rdev->cores[core].clks);
+
+ return 0;
+}
+
+EXPORT_GPL_DEV_PM_OPS(rocket_pm_ops) = {
+ RUNTIME_PM_OPS(rocket_device_runtime_suspend, rocket_device_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+static struct platform_driver rocket_driver = {
+ .probe = rocket_probe,
+ .remove = rocket_remove,
+ .driver = {
+ .name = "rocket",
+ .pm = pm_ptr(&rocket_pm_ops),
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init rocket_register(void)
+{
+ drm_dev = platform_device_register_simple("rknn", -1, NULL, 0);
+ if (IS_ERR(drm_dev))
+ return PTR_ERR(drm_dev);
+
+ return platform_driver_register(&rocket_driver);
+}
+
+static void __exit rocket_unregister(void)
+{
+ platform_driver_unregister(&rocket_driver);
+
+ platform_device_unregister(drm_dev);
+}
+
+module_init(rocket_register);
+module_exit(rocket_unregister);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DRM driver for the Rockchip NPU IP");
+MODULE_AUTHOR("Tomeu Vizoso");
diff --git a/drivers/accel/rocket/rocket_drv.h b/drivers/accel/rocket/rocket_drv.h
new file mode 100644
index 000000000000..2c673bb99ccc
--- /dev/null
+++ b/drivers/accel/rocket/rocket_drv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_DRV_H__
+#define __ROCKET_DRV_H__
+
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_device.h"
+
+extern const struct dev_pm_ops rocket_pm_ops;
+
+struct rocket_iommu_domain {
+ struct iommu_domain *domain;
+ struct kref kref;
+};
+
+struct rocket_file_priv {
+ struct rocket_device *rdev;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm mm;
+ struct mutex mm_lock;
+
+ struct drm_sched_entity sched_entity;
+};
+
+struct rocket_iommu_domain *rocket_iommu_domain_get(struct rocket_file_priv *rocket_priv);
+void rocket_iommu_domain_put(struct rocket_iommu_domain *domain);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_gem.c b/drivers/accel/rocket/rocket_gem.c
new file mode 100644
index 000000000000..624c4ecf5a34
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+#include <drm/drm_utils.h>
+#include <drm/rocket_accel.h>
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+
+#include "rocket_drv.h"
+#include "rocket_gem.h"
+
+static void rocket_gem_bo_free(struct drm_gem_object *obj)
+{
+ struct rocket_gem_object *bo = to_rocket_bo(obj);
+ struct rocket_file_priv *rocket_priv = bo->driver_priv;
+ size_t unmapped;
+
+ drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1);
+
+ unmapped = iommu_unmap(bo->domain->domain, bo->mm.start, bo->size);
+ drm_WARN_ON(obj->dev, unmapped != bo->size);
+
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&bo->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ rocket_iommu_domain_put(bo->domain);
+ bo->domain = NULL;
+
+ drm_gem_shmem_free(&bo->base);
+}
+
+static const struct drm_gem_object_funcs rocket_gem_funcs = {
+ .free = rocket_gem_bo_free,
+ .print_info = drm_gem_shmem_object_print_info,
+ .pin = drm_gem_shmem_object_pin,
+ .unpin = drm_gem_shmem_object_unpin,
+ .get_sg_table = drm_gem_shmem_object_get_sg_table,
+ .vmap = drm_gem_shmem_object_vmap,
+ .vunmap = drm_gem_shmem_object_vunmap,
+ .mmap = drm_gem_shmem_object_mmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size)
+{
+ struct rocket_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj->base.base.funcs = &rocket_gem_funcs;
+
+ return &obj->base.base;
+}
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct rocket_file_priv *rocket_priv = file->driver_priv;
+ struct drm_rocket_create_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+ struct sg_table *sgt;
+ int ret;
+
+ shmem_obj = drm_gem_shmem_create(dev, args->size);
+ if (IS_ERR(shmem_obj))
+ return PTR_ERR(shmem_obj);
+
+ gem_obj = &shmem_obj->base;
+ rkt_obj = to_rocket_bo(gem_obj);
+
+ rkt_obj->driver_priv = rocket_priv;
+ rkt_obj->domain = rocket_iommu_domain_get(rocket_priv);
+ rkt_obj->size = args->size;
+ rkt_obj->offset = 0;
+
+ ret = drm_gem_handle_create(file, gem_obj, &args->handle);
+ drm_gem_object_put(gem_obj);
+ if (ret)
+ goto err;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem_obj);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err;
+ }
+
+ mutex_lock(&rocket_priv->mm_lock);
+ ret = drm_mm_insert_node_generic(&rocket_priv->mm, &rkt_obj->mm,
+ rkt_obj->size, PAGE_SIZE,
+ 0, 0);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+ ret = iommu_map_sgtable(rocket_priv->domain->domain,
+ rkt_obj->mm.start,
+ shmem_obj->sgt,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0 || ret < args->size) {
+ drm_err(dev, "failed to map buffer: size=%d request_size=%u\n",
+ ret, args->size);
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ /* iommu_map_sgtable might have aligned the size */
+ rkt_obj->size = ret;
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->dma_address = rkt_obj->mm.start;
+
+ return 0;
+
+err_remove_node:
+ mutex_lock(&rocket_priv->mm_lock);
+ drm_mm_remove_node(&rkt_obj->mm);
+ mutex_unlock(&rocket_priv->mm_lock);
+
+err:
+ drm_gem_shmem_object_free(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_prep_bo *args = data;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+ struct drm_gem_object *gem_obj;
+ struct drm_gem_shmem_object *shmem_obj;
+ long ret = 0;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_prep_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = dma_resv_wait_timeout(gem_obj->resv, DMA_RESV_USAGE_WRITE, true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ shmem_obj = &to_rocket_bo(gem_obj)->base;
+
+ dma_sync_sgtable_for_cpu(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return ret;
+}
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_fini_bo *args = data;
+ struct drm_gem_shmem_object *shmem_obj;
+ struct rocket_gem_object *rkt_obj;
+ struct drm_gem_object *gem_obj;
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_fini_bo struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ rkt_obj = to_rocket_bo(gem_obj);
+ shmem_obj = &rkt_obj->base;
+
+ dma_sync_sgtable_for_device(dev->dev, shmem_obj->sgt, DMA_BIDIRECTIONAL);
+
+ drm_gem_object_put(gem_obj);
+
+ return 0;
+}
diff --git a/drivers/accel/rocket/rocket_gem.h b/drivers/accel/rocket/rocket_gem.h
new file mode 100644
index 000000000000..240430334509
--- /dev/null
+++ b/drivers/accel/rocket/rocket_gem.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_GEM_H__
+#define __ROCKET_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+
+struct rocket_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct rocket_file_priv *driver_priv;
+
+ struct rocket_iommu_domain *domain;
+ struct drm_mm_node mm;
+ size_t size;
+ u32 offset;
+};
+
+struct drm_gem_object *rocket_gem_create_object(struct drm_device *dev, size_t size);
+
+int rocket_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_prep_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_ioctl_fini_bo(struct drm_device *dev, void *data, struct drm_file *file);
+
+static inline
+struct rocket_gem_object *to_rocket_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct rocket_gem_object, base);
+}
+
+#endif
diff --git a/drivers/accel/rocket/rocket_job.c b/drivers/accel/rocket/rocket_job.c
new file mode 100644
index 000000000000..acd606160dc9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#include <drm/drm_print.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/rocket_accel.h>
+#include <linux/interrupt.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "rocket_core.h"
+#include "rocket_device.h"
+#include "rocket_drv.h"
+#include "rocket_job.h"
+#include "rocket_registers.h"
+
+#define JOB_TIMEOUT_MS 500
+
+static struct rocket_job *
+to_rocket_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct rocket_job, base);
+}
+
+static const char *rocket_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "rocket";
+}
+
+static const char *rocket_fence_get_timeline_name(struct dma_fence *fence)
+{
+ return "rockchip-npu";
+}
+
+static const struct dma_fence_ops rocket_fence_ops = {
+ .get_driver_name = rocket_fence_get_driver_name,
+ .get_timeline_name = rocket_fence_get_timeline_name,
+};
+
+static struct dma_fence *rocket_fence_create(struct rocket_core *core)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ dma_fence_init(fence, &rocket_fence_ops, &core->fence_lock,
+ core->fence_context, ++core->emit_seqno);
+
+ return fence;
+}
+
+static int
+rocket_copy_tasks(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_rocket_job *job,
+ struct rocket_job *rjob)
+{
+ int ret = 0;
+
+ if (job->task_struct_size < sizeof(struct drm_rocket_task))
+ return -EINVAL;
+
+ rjob->task_count = job->task_count;
+
+ if (!rjob->task_count)
+ return 0;
+
+ rjob->tasks = kvmalloc_array(job->task_count, sizeof(*rjob->tasks), GFP_KERNEL);
+ if (!rjob->tasks) {
+ drm_dbg(dev, "Failed to allocate task array\n");
+ return -ENOMEM;
+ }
+
+ for (int i = 0; i < rjob->task_count; i++) {
+ struct drm_rocket_task task = {0};
+
+ if (copy_from_user(&task,
+ u64_to_user_ptr(job->tasks) + i * job->task_struct_size,
+ sizeof(task))) {
+ drm_dbg(dev, "Failed to copy incoming tasks\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ if (task.regcmd_count == 0) {
+ drm_dbg(dev, "regcmd_count field in drm_rocket_task should be > 0.\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ rjob->tasks[i].regcmd = task.regcmd;
+ rjob->tasks[i].regcmd_count = task.regcmd_count;
+ }
+
+ return 0;
+
+fail:
+ kvfree(rjob->tasks);
+ return ret;
+}
+
+static void rocket_job_hw_submit(struct rocket_core *core, struct rocket_job *job)
+{
+ struct rocket_task *task;
+ unsigned int extra_bit;
+
+ /* Don't queue the job if a reset is in progress */
+ if (atomic_read(&core->reset.pending))
+ return;
+
+ /* GO ! */
+
+ task = &job->tasks[job->next_task_idx];
+ job->next_task_idx++;
+
+ rocket_pc_writel(core, BASE_ADDRESS, 0x1);
+
+ /* From rknpu, in the TRM this bit is marked as reserved */
+ extra_bit = 0x10000000 * core->index;
+ rocket_cna_writel(core, S_POINTER, CNA_S_POINTER_POINTER_PP_EN(1) |
+ CNA_S_POINTER_EXECUTER_PP_EN(1) |
+ CNA_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_core_writel(core, S_POINTER, CORE_S_POINTER_POINTER_PP_EN(1) |
+ CORE_S_POINTER_EXECUTER_PP_EN(1) |
+ CORE_S_POINTER_POINTER_PP_MODE(1) |
+ extra_bit);
+
+ rocket_pc_writel(core, BASE_ADDRESS, task->regcmd);
+ rocket_pc_writel(core, REGISTER_AMOUNTS,
+ PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT((task->regcmd_count + 1) / 2 - 1));
+
+ rocket_pc_writel(core, INTERRUPT_MASK, PC_INTERRUPT_MASK_DPU_0 | PC_INTERRUPT_MASK_DPU_1);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, PC_INTERRUPT_CLEAR_DPU_0 | PC_INTERRUPT_CLEAR_DPU_1);
+
+ rocket_pc_writel(core, TASK_CON, PC_TASK_CON_RESERVED_0(1) |
+ PC_TASK_CON_TASK_COUNT_CLEAR(1) |
+ PC_TASK_CON_TASK_NUMBER(1) |
+ PC_TASK_CON_TASK_PP_EN(1));
+
+ rocket_pc_writel(core, TASK_DMA_BASE_ADDR, PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(0x0));
+
+ rocket_pc_writel(core, OPERATION_ENABLE, PC_OPERATION_ENABLE_OP_EN(1));
+
+ dev_dbg(core->dev, "Submitted regcmd at 0x%llx to core %d", task->regcmd, core->index);
+}
+
+static int rocket_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct drm_sched_job *job,
+ bool is_write)
+{
+ int i, ret;
+
+ for (i = 0; i < bo_count; i++) {
+ ret = dma_resv_reserve_fences(bos[i]->resv, 1);
+ if (ret)
+ return ret;
+
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ is_write);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rocket_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
+}
+
+static int rocket_job_push(struct rocket_job *job)
+{
+ struct rocket_device *rdev = job->rdev;
+ struct drm_gem_object **bos;
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ bos = kvmalloc_array(job->in_bo_count + job->out_bo_count, sizeof(void *),
+ GFP_KERNEL);
+ memcpy(bos, job->in_bos, job->in_bo_count * sizeof(void *));
+ memcpy(&bos[job->in_bo_count], job->out_bos, job->out_bo_count * sizeof(void *));
+
+ ret = drm_gem_lock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+ if (ret)
+ goto err;
+
+ scoped_guard(mutex, &rdev->sched_lock) {
+ drm_sched_job_arm(&job->base);
+
+ job->inference_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ ret = rocket_acquire_object_fences(job->in_bos, job->in_bo_count, &job->base, false);
+ if (ret)
+ goto err_unlock;
+
+ ret = rocket_acquire_object_fences(job->out_bos, job->out_bo_count, &job->base, true);
+ if (ret)
+ goto err_unlock;
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ drm_sched_entity_push_job(&job->base);
+ }
+
+ rocket_attach_object_fences(job->out_bos, job->out_bo_count, job->inference_done_fence);
+
+err_unlock:
+ drm_gem_unlock_reservations(bos, job->in_bo_count + job->out_bo_count, &acquire_ctx);
+err:
+ kvfree(bos);
+
+ return ret;
+}
+
+static void rocket_job_cleanup(struct kref *ref)
+{
+ struct rocket_job *job = container_of(ref, struct rocket_job,
+ refcount);
+ unsigned int i;
+
+ rocket_iommu_domain_put(job->domain);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+ if (job->in_bos) {
+ for (i = 0; i < job->in_bo_count; i++)
+ drm_gem_object_put(job->in_bos[i]);
+
+ kvfree(job->in_bos);
+ }
+
+ if (job->out_bos) {
+ for (i = 0; i < job->out_bo_count; i++)
+ drm_gem_object_put(job->out_bos[i]);
+
+ kvfree(job->out_bos);
+ }
+
+ kvfree(job->tasks);
+
+ kfree(job);
+}
+
+static void rocket_job_put(struct rocket_job *job)
+{
+ kref_put(&job->refcount, rocket_job_cleanup);
+}
+
+static void rocket_job_free(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ rocket_job_put(job);
+}
+
+static struct rocket_core *sched_to_core(struct rocket_device *rdev,
+ struct drm_gpu_scheduler *sched)
+{
+ unsigned int core;
+
+ for (core = 0; core < rdev->num_cores; core++) {
+ if (&rdev->cores[core].sched == sched)
+ return &rdev->cores[core];
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *rocket_job_run(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+ struct dma_fence *fence = NULL;
+ int ret;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ /*
+ * Nothing to execute: can happen if the job has finished while
+ * we were resetting the NPU.
+ */
+ if (job->next_task_idx == job->task_count)
+ return NULL;
+
+ fence = rocket_fence_create(core);
+ if (IS_ERR(fence))
+ return fence;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ ret = pm_runtime_get_sync(core->dev);
+ if (ret < 0)
+ return fence;
+
+ ret = iommu_attach_group(job->domain->domain, core->iommu_group);
+ if (ret < 0)
+ return fence;
+
+ scoped_guard(mutex, &core->job_lock) {
+ core->in_flight_job = job;
+ rocket_job_hw_submit(core, job);
+ }
+
+ return fence;
+}
+
+static void rocket_job_handle_irq(struct rocket_core *core)
+{
+ pm_runtime_mark_last_busy(core->dev);
+
+ rocket_pc_writel(core, OPERATION_ENABLE, 0x0);
+ rocket_pc_writel(core, INTERRUPT_CLEAR, 0x1ffff);
+
+ scoped_guard(mutex, &core->job_lock)
+ if (core->in_flight_job) {
+ if (core->in_flight_job->next_task_idx < core->in_flight_job->task_count) {
+ rocket_job_hw_submit(core, core->in_flight_job);
+ return;
+ }
+
+ iommu_detach_group(NULL, iommu_group_get(core->dev));
+ dma_fence_signal(core->in_flight_job->done_fence);
+ pm_runtime_put_autosuspend(core->dev);
+ core->in_flight_job = NULL;
+ }
+}
+
+static void
+rocket_reset(struct rocket_core *core, struct drm_sched_job *bad)
+{
+ if (!atomic_read(&core->reset.pending))
+ return;
+
+ drm_sched_stop(&core->sched, bad);
+
+ /*
+ * Remaining interrupts have been handled, but we might still have
+ * stuck jobs. Let's make sure the PM counters stay balanced by
+ * manually calling pm_runtime_put_noidle().
+ */
+ scoped_guard(mutex, &core->job_lock) {
+ if (core->in_flight_job)
+ pm_runtime_put_noidle(core->dev);
+
+ iommu_detach_group(NULL, core->iommu_group);
+
+ core->in_flight_job = NULL;
+ }
+
+ /* Proceed with reset now. */
+ rocket_core_reset(core);
+
+ /* NPU has been reset, we can clear the reset pending bit. */
+ atomic_set(&core->reset.pending, 0);
+
+ /* Restart the scheduler */
+ drm_sched_start(&core->sched, 0);
+}
+
+static enum drm_gpu_sched_stat rocket_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct rocket_job *job = to_rocket_job(sched_job);
+ struct rocket_device *rdev = job->rdev;
+ struct rocket_core *core = sched_to_core(rdev, sched_job->sched);
+
+ dev_err(core->dev, "NPU job timed out");
+
+ atomic_set(&core->reset.pending, 1);
+ rocket_reset(core, sched_job);
+
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+static void rocket_reset_work(struct work_struct *work)
+{
+ struct rocket_core *core;
+
+ core = container_of(work, struct rocket_core, reset.work);
+ rocket_reset(core, NULL);
+}
+
+static const struct drm_sched_backend_ops rocket_sched_ops = {
+ .run_job = rocket_job_run,
+ .timedout_job = rocket_job_timedout,
+ .free_job = rocket_job_free
+};
+
+static irqreturn_t rocket_job_irq_handler_thread(int irq, void *data)
+{
+ struct rocket_core *core = data;
+
+ rocket_job_handle_irq(core);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rocket_job_irq_handler(int irq, void *data)
+{
+ struct rocket_core *core = data;
+ u32 raw_status = rocket_pc_readl(core, INTERRUPT_RAW_STATUS);
+
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR);
+ WARN_ON(raw_status & PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR);
+
+ if (!(raw_status & PC_INTERRUPT_RAW_STATUS_DPU_0 ||
+ raw_status & PC_INTERRUPT_RAW_STATUS_DPU_1))
+ return IRQ_NONE;
+
+ rocket_pc_writel(core, INTERRUPT_MASK, 0x0);
+
+ return IRQ_WAKE_THREAD;
+}
+
+int rocket_job_init(struct rocket_core *core)
+{
+ struct drm_sched_init_args args = {
+ .ops = &rocket_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .credit_limit = 1,
+ .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .name = dev_name(core->dev),
+ .dev = core->dev,
+ };
+ int ret;
+
+ INIT_WORK(&core->reset.work, rocket_reset_work);
+ spin_lock_init(&core->fence_lock);
+ mutex_init(&core->job_lock);
+
+ core->irq = platform_get_irq(to_platform_device(core->dev), 0);
+ if (core->irq < 0)
+ return core->irq;
+
+ ret = devm_request_threaded_irq(core->dev, core->irq,
+ rocket_job_irq_handler,
+ rocket_job_irq_handler_thread,
+ IRQF_SHARED, dev_name(core->dev),
+ core);
+ if (ret) {
+ dev_err(core->dev, "failed to request job irq");
+ return ret;
+ }
+
+ core->reset.wq = alloc_ordered_workqueue("rocket-reset-%d", 0, core->index);
+ if (!core->reset.wq)
+ return -ENOMEM;
+
+ core->fence_context = dma_fence_context_alloc(1);
+
+ args.timeout_wq = core->reset.wq;
+ ret = drm_sched_init(&core->sched, &args);
+ if (ret) {
+ dev_err(core->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+
+ return 0;
+
+err_sched:
+ drm_sched_fini(&core->sched);
+
+ destroy_workqueue(core->reset.wq);
+ return ret;
+}
+
+void rocket_job_fini(struct rocket_core *core)
+{
+ drm_sched_fini(&core->sched);
+
+ cancel_work_sync(&core->reset.work);
+ destroy_workqueue(core->reset.wq);
+}
+
+int rocket_job_open(struct rocket_file_priv *rocket_priv)
+{
+ struct rocket_device *rdev = rocket_priv->rdev;
+ struct drm_gpu_scheduler **scheds = kmalloc_array(rdev->num_cores,
+ sizeof(*scheds),
+ GFP_KERNEL);
+ unsigned int core;
+ int ret;
+
+ for (core = 0; core < rdev->num_cores; core++)
+ scheds[core] = &rdev->cores[core].sched;
+
+ ret = drm_sched_entity_init(&rocket_priv->sched_entity,
+ DRM_SCHED_PRIORITY_NORMAL,
+ scheds,
+ rdev->num_cores, NULL);
+ if (WARN_ON(ret))
+ return ret;
+
+ return 0;
+}
+
+void rocket_job_close(struct rocket_file_priv *rocket_priv)
+{
+ struct drm_sched_entity *entity = &rocket_priv->sched_entity;
+
+ kfree(entity->sched_list);
+ drm_sched_entity_destroy(entity);
+}
+
+int rocket_job_is_idle(struct rocket_core *core)
+{
+ /* If there are any jobs in this HW queue, we're not idle */
+ if (atomic_read(&core->sched.credit_count))
+ return false;
+
+ return true;
+}
+
+static int rocket_ioctl_submit_job(struct drm_device *dev, struct drm_file *file,
+ struct drm_rocket_job *job)
+{
+ struct rocket_device *rdev = to_rocket_device(dev);
+ struct rocket_file_priv *file_priv = file->driver_priv;
+ struct rocket_job *rjob = NULL;
+ int ret = 0;
+
+ if (job->task_count == 0)
+ return -EINVAL;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ kref_init(&rjob->refcount);
+
+ rjob->rdev = rdev;
+
+ ret = drm_sched_job_init(&rjob->base,
+ &file_priv->sched_entity,
+ 1, NULL, file->client_id);
+ if (ret)
+ goto out_put_job;
+
+ ret = rocket_copy_tasks(dev, file, job, rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->in_bo_handles),
+ job->in_bo_handle_count, &rjob->in_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->in_bo_count = job->in_bo_handle_count;
+
+ ret = drm_gem_objects_lookup(file, u64_to_user_ptr(job->out_bo_handles),
+ job->out_bo_handle_count, &rjob->out_bos);
+ if (ret)
+ goto out_cleanup_job;
+
+ rjob->out_bo_count = job->out_bo_handle_count;
+
+ rjob->domain = rocket_iommu_domain_get(file_priv);
+
+ ret = rocket_job_push(rjob);
+ if (ret)
+ goto out_cleanup_job;
+
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&rjob->base);
+out_put_job:
+ rocket_job_put(rjob);
+
+ return ret;
+}
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_rocket_submit *args = data;
+ struct drm_rocket_job *jobs;
+ int ret = 0;
+ unsigned int i = 0;
+
+ if (args->job_count == 0)
+ return 0;
+
+ if (args->job_struct_size < sizeof(struct drm_rocket_job)) {
+ drm_dbg(dev, "job_struct_size field in drm_rocket_submit struct is too small.\n");
+ return -EINVAL;
+ }
+
+ if (args->reserved != 0) {
+ drm_dbg(dev, "Reserved field in drm_rocket_submit struct should be 0.\n");
+ return -EINVAL;
+ }
+
+ jobs = kvmalloc_array(args->job_count, sizeof(*jobs), GFP_KERNEL);
+ if (!jobs) {
+ drm_dbg(dev, "Failed to allocate incoming job array\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < args->job_count; i++) {
+ if (copy_from_user(&jobs[i],
+ u64_to_user_ptr(args->jobs) + i * args->job_struct_size,
+ sizeof(*jobs))) {
+ ret = -EFAULT;
+ drm_dbg(dev, "Failed to copy incoming job array\n");
+ goto exit;
+ }
+ }
+
+
+ for (i = 0; i < args->job_count; i++)
+ rocket_ioctl_submit_job(dev, file, &jobs[i]);
+
+exit:
+ kvfree(jobs);
+
+ return ret;
+}
diff --git a/drivers/accel/rocket/rocket_job.h b/drivers/accel/rocket/rocket_job.h
new file mode 100644
index 000000000000..4ae00feec3b9
--- /dev/null
+++ b/drivers/accel/rocket/rocket_job.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright 2024-2025 Tomeu Vizoso <tomeu@tomeuvizoso.net> */
+
+#ifndef __ROCKET_JOB_H__
+#define __ROCKET_JOB_H__
+
+#include <drm/drm_drv.h>
+#include <drm/gpu_scheduler.h>
+
+#include "rocket_core.h"
+#include "rocket_drv.h"
+
+struct rocket_task {
+ u64 regcmd;
+ u32 regcmd_count;
+};
+
+struct rocket_job {
+ struct drm_sched_job base;
+
+ struct rocket_device *rdev;
+
+ struct drm_gem_object **in_bos;
+ struct drm_gem_object **out_bos;
+
+ u32 in_bo_count;
+ u32 out_bo_count;
+
+ struct rocket_task *tasks;
+ u32 task_count;
+ u32 next_task_idx;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *inference_done_fence;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ struct rocket_iommu_domain *domain;
+
+ struct kref refcount;
+};
+
+int rocket_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file);
+
+int rocket_job_init(struct rocket_core *core);
+void rocket_job_fini(struct rocket_core *core);
+int rocket_job_open(struct rocket_file_priv *rocket_priv);
+void rocket_job_close(struct rocket_file_priv *rocket_priv);
+int rocket_job_is_idle(struct rocket_core *core);
+
+#endif
diff --git a/drivers/accel/rocket/rocket_registers.h b/drivers/accel/rocket/rocket_registers.h
new file mode 100644
index 000000000000..9aef614c3470
--- /dev/null
+++ b/drivers/accel/rocket/rocket_registers.h
@@ -0,0 +1,4404 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR MIT */
+
+#ifndef __ROCKET_REGISTERS_XML__
+#define __ROCKET_REGISTERS_XML__
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
+http://gitlab.freedesktop.org/mesa/mesa/
+git clone https://gitlab.freedesktop.org/mesa/mesa.git
+
+The rules-ng-ng source files this header was generated from are:
+
+- /home/tomeu/src/mesa/src/gallium/drivers/rocket/registers.xml ( 60076 bytes, from Wed Jun 12 10:02:25 2024)
+
+Copyright (C) 2024-2025 by the following authors:
+- Tomeu Vizoso <tomeu@tomeuvizoso.net>
+*/
+
+#define REG_PC_VERSION 0x00000000
+#define PC_VERSION_VERSION__MASK 0xffffffff
+#define PC_VERSION_VERSION__SHIFT 0
+static inline uint32_t PC_VERSION_VERSION(uint32_t val)
+{
+ return ((val) << PC_VERSION_VERSION__SHIFT) & PC_VERSION_VERSION__MASK;
+}
+
+#define REG_PC_VERSION_NUM 0x00000004
+#define PC_VERSION_NUM_VERSION_NUM__MASK 0xffffffff
+#define PC_VERSION_NUM_VERSION_NUM__SHIFT 0
+static inline uint32_t PC_VERSION_NUM_VERSION_NUM(uint32_t val)
+{
+ return ((val) << PC_VERSION_NUM_VERSION_NUM__SHIFT) & PC_VERSION_NUM_VERSION_NUM__MASK;
+}
+
+#define REG_PC_OPERATION_ENABLE 0x00000008
+#define PC_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PC_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PC_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_RESERVED_0__SHIFT) & PC_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PC_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PC_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PC_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PC_OPERATION_ENABLE_OP_EN__SHIFT) & PC_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PC_BASE_ADDRESS 0x00000010
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK 0xfffffff0
+#define PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT 4
+static inline uint32_t PC_BASE_ADDRESS_PC_SOURCE_ADDR(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SOURCE_ADDR__SHIFT) & PC_BASE_ADDRESS_PC_SOURCE_ADDR__MASK;
+}
+#define PC_BASE_ADDRESS_RESERVED_0__MASK 0x0000000e
+#define PC_BASE_ADDRESS_RESERVED_0__SHIFT 1
+static inline uint32_t PC_BASE_ADDRESS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_RESERVED_0__SHIFT) & PC_BASE_ADDRESS_RESERVED_0__MASK;
+}
+#define PC_BASE_ADDRESS_PC_SEL__MASK 0x00000001
+#define PC_BASE_ADDRESS_PC_SEL__SHIFT 0
+static inline uint32_t PC_BASE_ADDRESS_PC_SEL(uint32_t val)
+{
+ return ((val) << PC_BASE_ADDRESS_PC_SEL__SHIFT) & PC_BASE_ADDRESS_PC_SEL__MASK;
+}
+
+#define REG_PC_REGISTER_AMOUNTS 0x00000014
+#define PC_REGISTER_AMOUNTS_RESERVED_0__MASK 0xffff0000
+#define PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT 16
+static inline uint32_t PC_REGISTER_AMOUNTS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_RESERVED_0__SHIFT) & PC_REGISTER_AMOUNTS_RESERVED_0__MASK;
+}
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK 0x0000ffff
+#define PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT 0
+static inline uint32_t PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT(uint32_t val)
+{
+ return ((val) << PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__SHIFT) & PC_REGISTER_AMOUNTS_PC_DATA_AMOUNT__MASK;
+}
+
+#define REG_PC_INTERRUPT_MASK 0x00000020
+#define PC_INTERRUPT_MASK_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_MASK_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_MASK_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_MASK_RESERVED_0__SHIFT) & PC_INTERRUPT_MASK_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_MASK_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_MASK_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_MASK_PPU_1 0x00000800
+#define PC_INTERRUPT_MASK_PPU_0 0x00000400
+#define PC_INTERRUPT_MASK_DPU_1 0x00000200
+#define PC_INTERRUPT_MASK_DPU_0 0x00000100
+#define PC_INTERRUPT_MASK_CORE_1 0x00000080
+#define PC_INTERRUPT_MASK_CORE_0 0x00000040
+#define PC_INTERRUPT_MASK_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_MASK_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_MASK_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_MASK_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_MASK_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_CLEAR 0x00000024
+#define PC_INTERRUPT_CLEAR_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_CLEAR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_CLEAR_RESERVED_0__SHIFT) & PC_INTERRUPT_CLEAR_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_CLEAR_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_CLEAR_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_CLEAR_PPU_1 0x00000800
+#define PC_INTERRUPT_CLEAR_PPU_0 0x00000400
+#define PC_INTERRUPT_CLEAR_DPU_1 0x00000200
+#define PC_INTERRUPT_CLEAR_DPU_0 0x00000100
+#define PC_INTERRUPT_CLEAR_CORE_1 0x00000080
+#define PC_INTERRUPT_CLEAR_CORE_0 0x00000040
+#define PC_INTERRUPT_CLEAR_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_CLEAR_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_CLEAR_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_CLEAR_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_STATUS 0x00000028
+#define PC_INTERRUPT_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_INTERRUPT_RAW_STATUS 0x0000002c
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK 0xffffc000
+#define PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT 14
+static inline uint32_t PC_INTERRUPT_RAW_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_INTERRUPT_RAW_STATUS_RESERVED_0__SHIFT) & PC_INTERRUPT_RAW_STATUS_RESERVED_0__MASK;
+}
+#define PC_INTERRUPT_RAW_STATUS_DMA_WRITE_ERROR 0x00002000
+#define PC_INTERRUPT_RAW_STATUS_DMA_READ_ERROR 0x00001000
+#define PC_INTERRUPT_RAW_STATUS_PPU_1 0x00000800
+#define PC_INTERRUPT_RAW_STATUS_PPU_0 0x00000400
+#define PC_INTERRUPT_RAW_STATUS_DPU_1 0x00000200
+#define PC_INTERRUPT_RAW_STATUS_DPU_0 0x00000100
+#define PC_INTERRUPT_RAW_STATUS_CORE_1 0x00000080
+#define PC_INTERRUPT_RAW_STATUS_CORE_0 0x00000040
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_1 0x00000020
+#define PC_INTERRUPT_RAW_STATUS_CNA_CSC_0 0x00000010
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_1 0x00000008
+#define PC_INTERRUPT_RAW_STATUS_CNA_WEIGHT_0 0x00000004
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_1 0x00000002
+#define PC_INTERRUPT_RAW_STATUS_CNA_FEATURE_0 0x00000001
+
+#define REG_PC_TASK_CON 0x00000030
+#define PC_TASK_CON_RESERVED_0__MASK 0xffffc000
+#define PC_TASK_CON_RESERVED_0__SHIFT 14
+static inline uint32_t PC_TASK_CON_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_RESERVED_0__SHIFT) & PC_TASK_CON_RESERVED_0__MASK;
+}
+#define PC_TASK_CON_TASK_COUNT_CLEAR__MASK 0x00002000
+#define PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT 13
+static inline uint32_t PC_TASK_CON_TASK_COUNT_CLEAR(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_COUNT_CLEAR__SHIFT) & PC_TASK_CON_TASK_COUNT_CLEAR__MASK;
+}
+#define PC_TASK_CON_TASK_PP_EN__MASK 0x00001000
+#define PC_TASK_CON_TASK_PP_EN__SHIFT 12
+static inline uint32_t PC_TASK_CON_TASK_PP_EN(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_PP_EN__SHIFT) & PC_TASK_CON_TASK_PP_EN__MASK;
+}
+#define PC_TASK_CON_TASK_NUMBER__MASK 0x00000fff
+#define PC_TASK_CON_TASK_NUMBER__SHIFT 0
+static inline uint32_t PC_TASK_CON_TASK_NUMBER(uint32_t val)
+{
+ return ((val) << PC_TASK_CON_TASK_NUMBER__SHIFT) & PC_TASK_CON_TASK_NUMBER__MASK;
+}
+
+#define REG_PC_TASK_DMA_BASE_ADDR 0x00000034
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK 0xfffffff0
+#define PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT 4
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__SHIFT) & PC_TASK_DMA_BASE_ADDR_DMA_BASE_ADDR__MASK;
+}
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PC_TASK_DMA_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_DMA_BASE_ADDR_RESERVED_0__SHIFT) & PC_TASK_DMA_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PC_TASK_STATUS 0x0000003c
+#define PC_TASK_STATUS_RESERVED_0__MASK 0xf0000000
+#define PC_TASK_STATUS_RESERVED_0__SHIFT 28
+static inline uint32_t PC_TASK_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_RESERVED_0__SHIFT) & PC_TASK_STATUS_RESERVED_0__MASK;
+}
+#define PC_TASK_STATUS_TASK_STATUS__MASK 0x0fffffff
+#define PC_TASK_STATUS_TASK_STATUS__SHIFT 0
+static inline uint32_t PC_TASK_STATUS_TASK_STATUS(uint32_t val)
+{
+ return ((val) << PC_TASK_STATUS_TASK_STATUS__SHIFT) & PC_TASK_STATUS_TASK_STATUS__MASK;
+}
+
+#define REG_CNA_S_STATUS 0x00001000
+#define CNA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CNA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CNA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_0__SHIFT) & CNA_S_STATUS_RESERVED_0__MASK;
+}
+#define CNA_S_STATUS_STATUS_1__MASK 0x00030000
+#define CNA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CNA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_1__SHIFT) & CNA_S_STATUS_STATUS_1__MASK;
+}
+#define CNA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CNA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CNA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_RESERVED_1__SHIFT) & CNA_S_STATUS_RESERVED_1__MASK;
+}
+#define CNA_S_STATUS_STATUS_0__MASK 0x00000003
+#define CNA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CNA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CNA_S_STATUS_STATUS_0__SHIFT) & CNA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CNA_S_POINTER 0x00001004
+#define CNA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CNA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_0__SHIFT) & CNA_S_POINTER_RESERVED_0__MASK;
+}
+#define CNA_S_POINTER_EXECUTER__MASK 0x00010000
+#define CNA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CNA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER__SHIFT) & CNA_S_POINTER_EXECUTER__MASK;
+}
+#define CNA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CNA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CNA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_RESERVED_1__SHIFT) & CNA_S_POINTER_RESERVED_1__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CNA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CNA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CNA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CNA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_MODE__SHIFT) & CNA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CNA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CNA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CNA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_EXECUTER_PP_EN__SHIFT) & CNA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CNA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CNA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER_PP_EN__SHIFT) & CNA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CNA_S_POINTER_POINTER__MASK 0x00000001
+#define CNA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CNA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CNA_S_POINTER_POINTER__SHIFT) & CNA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CNA_OPERATION_ENABLE 0x00001008
+#define CNA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CNA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_RESERVED_0__SHIFT) & CNA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CNA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CNA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CNA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CNA_OPERATION_ENABLE_OP_EN__SHIFT) & CNA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CNA_CONV_CON1 0x0000100c
+#define CNA_CONV_CON1_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON1_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_0__SHIFT) & CNA_CONV_CON1_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON1_NONALIGN_DMA__MASK 0x40000000
+#define CNA_CONV_CON1_NONALIGN_DMA__SHIFT 30
+static inline uint32_t CNA_CONV_CON1_NONALIGN_DMA(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_NONALIGN_DMA__SHIFT) & CNA_CONV_CON1_NONALIGN_DMA__MASK;
+}
+#define CNA_CONV_CON1_GROUP_LINE_OFF__MASK 0x20000000
+#define CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT 29
+static inline uint32_t CNA_CONV_CON1_GROUP_LINE_OFF(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_GROUP_LINE_OFF__SHIFT) & CNA_CONV_CON1_GROUP_LINE_OFF__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_1__MASK 0x1ffe0000
+#define CNA_CONV_CON1_RESERVED_1__SHIFT 17
+static inline uint32_t CNA_CONV_CON1_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_1__SHIFT) & CNA_CONV_CON1_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON1_DECONV__MASK 0x00010000
+#define CNA_CONV_CON1_DECONV__SHIFT 16
+static inline uint32_t CNA_CONV_CON1_DECONV(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_DECONV__SHIFT) & CNA_CONV_CON1_DECONV__MASK;
+}
+#define CNA_CONV_CON1_ARGB_IN__MASK 0x0000f000
+#define CNA_CONV_CON1_ARGB_IN__SHIFT 12
+static inline uint32_t CNA_CONV_CON1_ARGB_IN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_ARGB_IN__SHIFT) & CNA_CONV_CON1_ARGB_IN__MASK;
+}
+#define CNA_CONV_CON1_RESERVED_2__MASK 0x00000c00
+#define CNA_CONV_CON1_RESERVED_2__SHIFT 10
+static inline uint32_t CNA_CONV_CON1_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_RESERVED_2__SHIFT) & CNA_CONV_CON1_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON1_PROC_PRECISION__MASK 0x00000380
+#define CNA_CONV_CON1_PROC_PRECISION__SHIFT 7
+static inline uint32_t CNA_CONV_CON1_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_PROC_PRECISION__SHIFT) & CNA_CONV_CON1_PROC_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_IN_PRECISION__MASK 0x00000070
+#define CNA_CONV_CON1_IN_PRECISION__SHIFT 4
+static inline uint32_t CNA_CONV_CON1_IN_PRECISION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_IN_PRECISION__SHIFT) & CNA_CONV_CON1_IN_PRECISION__MASK;
+}
+#define CNA_CONV_CON1_CONV_MODE__MASK 0x0000000f
+#define CNA_CONV_CON1_CONV_MODE__SHIFT 0
+static inline uint32_t CNA_CONV_CON1_CONV_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON1_CONV_MODE__SHIFT) & CNA_CONV_CON1_CONV_MODE__MASK;
+}
+
+#define REG_CNA_CONV_CON2 0x00001010
+#define CNA_CONV_CON2_RESERVED_0__MASK 0xff000000
+#define CNA_CONV_CON2_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_CONV_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_0__SHIFT) & CNA_CONV_CON2_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON2_KERNEL_GROUP__MASK 0x00ff0000
+#define CNA_CONV_CON2_KERNEL_GROUP__SHIFT 16
+static inline uint32_t CNA_CONV_CON2_KERNEL_GROUP(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_KERNEL_GROUP__SHIFT) & CNA_CONV_CON2_KERNEL_GROUP__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_1__MASK 0x0000c000
+#define CNA_CONV_CON2_RESERVED_1__SHIFT 14
+static inline uint32_t CNA_CONV_CON2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_1__SHIFT) & CNA_CONV_CON2_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON2_FEATURE_GRAINS__MASK 0x00003ff0
+#define CNA_CONV_CON2_FEATURE_GRAINS__SHIFT 4
+static inline uint32_t CNA_CONV_CON2_FEATURE_GRAINS(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_FEATURE_GRAINS__SHIFT) & CNA_CONV_CON2_FEATURE_GRAINS__MASK;
+}
+#define CNA_CONV_CON2_RESERVED_2__MASK 0x00000008
+#define CNA_CONV_CON2_RESERVED_2__SHIFT 3
+static inline uint32_t CNA_CONV_CON2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_RESERVED_2__SHIFT) & CNA_CONV_CON2_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON2_CSC_WO_EN__MASK 0x00000004
+#define CNA_CONV_CON2_CSC_WO_EN__SHIFT 2
+static inline uint32_t CNA_CONV_CON2_CSC_WO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_WO_EN__SHIFT) & CNA_CONV_CON2_CSC_WO_EN__MASK;
+}
+#define CNA_CONV_CON2_CSC_DO_EN__MASK 0x00000002
+#define CNA_CONV_CON2_CSC_DO_EN__SHIFT 1
+static inline uint32_t CNA_CONV_CON2_CSC_DO_EN(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CSC_DO_EN__SHIFT) & CNA_CONV_CON2_CSC_DO_EN__MASK;
+}
+#define CNA_CONV_CON2_CMD_FIFO_SRST__MASK 0x00000001
+#define CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT 0
+static inline uint32_t CNA_CONV_CON2_CMD_FIFO_SRST(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON2_CMD_FIFO_SRST__SHIFT) & CNA_CONV_CON2_CMD_FIFO_SRST__MASK;
+}
+
+#define REG_CNA_CONV_CON3 0x00001014
+#define CNA_CONV_CON3_RESERVED_0__MASK 0x80000000
+#define CNA_CONV_CON3_RESERVED_0__SHIFT 31
+static inline uint32_t CNA_CONV_CON3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_0__SHIFT) & CNA_CONV_CON3_RESERVED_0__MASK;
+}
+#define CNA_CONV_CON3_NN_MODE__MASK 0x70000000
+#define CNA_CONV_CON3_NN_MODE__SHIFT 28
+static inline uint32_t CNA_CONV_CON3_NN_MODE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_NN_MODE__SHIFT) & CNA_CONV_CON3_NN_MODE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_1__MASK 0x0c000000
+#define CNA_CONV_CON3_RESERVED_1__SHIFT 26
+static inline uint32_t CNA_CONV_CON3_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_1__SHIFT) & CNA_CONV_CON3_RESERVED_1__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__MASK 0x03e00000
+#define CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT 21
+static inline uint32_t CNA_CONV_CON3_ATROUS_Y_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_Y_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_Y_DILATION__MASK;
+}
+#define CNA_CONV_CON3_ATROUS_X_DILATION__MASK 0x001f0000
+#define CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT 16
+static inline uint32_t CNA_CONV_CON3_ATROUS_X_DILATION(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_ATROUS_X_DILATION__SHIFT) & CNA_CONV_CON3_ATROUS_X_DILATION__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_2__MASK 0x0000c000
+#define CNA_CONV_CON3_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_CONV_CON3_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_2__SHIFT) & CNA_CONV_CON3_RESERVED_2__MASK;
+}
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__MASK 0x00003800
+#define CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT 11
+static inline uint32_t CNA_CONV_CON3_DECONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_DECONV_X_STRIDE__MASK 0x00000700
+#define CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT 8
+static inline uint32_t CNA_CONV_CON3_DECONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_DECONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_DECONV_X_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_RESERVED_3__MASK 0x000000c0
+#define CNA_CONV_CON3_RESERVED_3__SHIFT 6
+static inline uint32_t CNA_CONV_CON3_RESERVED_3(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_RESERVED_3__SHIFT) & CNA_CONV_CON3_RESERVED_3__MASK;
+}
+#define CNA_CONV_CON3_CONV_Y_STRIDE__MASK 0x00000038
+#define CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT 3
+static inline uint32_t CNA_CONV_CON3_CONV_Y_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_Y_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_Y_STRIDE__MASK;
+}
+#define CNA_CONV_CON3_CONV_X_STRIDE__MASK 0x00000007
+#define CNA_CONV_CON3_CONV_X_STRIDE__SHIFT 0
+static inline uint32_t CNA_CONV_CON3_CONV_X_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_CONV_CON3_CONV_X_STRIDE__SHIFT) & CNA_CONV_CON3_CONV_X_STRIDE__MASK;
+}
+
+#define REG_CNA_DATA_SIZE0 0x00001020
+#define CNA_DATA_SIZE0_RESERVED_0__MASK 0xf8000000
+#define CNA_DATA_SIZE0_RESERVED_0__SHIFT 27
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__MASK 0x07ff0000
+#define CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_WIDTH__SHIFT) & CNA_DATA_SIZE0_DATAIN_WIDTH__MASK;
+}
+#define CNA_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK 0x000007ff
+#define CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE0_DATAIN_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE0_DATAIN_HEIGHT__SHIFT) & CNA_DATA_SIZE0_DATAIN_HEIGHT__MASK;
+}
+
+#define REG_CNA_DATA_SIZE1 0x00001024
+#define CNA_DATA_SIZE1_RESERVED_0__MASK 0xc0000000
+#define CNA_DATA_SIZE1_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK 0x3fff0000
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT 16
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL_REAL__MASK;
+}
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK 0x0000ffff
+#define CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE1_DATAIN_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE1_DATAIN_CHANNEL__SHIFT) & CNA_DATA_SIZE1_DATAIN_CHANNEL__MASK;
+}
+
+#define REG_CNA_DATA_SIZE2 0x00001028
+#define CNA_DATA_SIZE2_RESERVED_0__MASK 0xfffff800
+#define CNA_DATA_SIZE2_RESERVED_0__SHIFT 11
+static inline uint32_t CNA_DATA_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_RESERVED_0__SHIFT) & CNA_DATA_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK 0x000007ff
+#define CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE2_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE2_DATAOUT_WIDTH__SHIFT) & CNA_DATA_SIZE2_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CNA_DATA_SIZE3 0x0000102c
+#define CNA_DATA_SIZE3_RESERVED_0__MASK 0xff000000
+#define CNA_DATA_SIZE3_RESERVED_0__SHIFT 24
+static inline uint32_t CNA_DATA_SIZE3_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_RESERVED_0__SHIFT) & CNA_DATA_SIZE3_RESERVED_0__MASK;
+}
+#define CNA_DATA_SIZE3_SURF_MODE__MASK 0x00c00000
+#define CNA_DATA_SIZE3_SURF_MODE__SHIFT 22
+static inline uint32_t CNA_DATA_SIZE3_SURF_MODE(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_SURF_MODE__SHIFT) & CNA_DATA_SIZE3_SURF_MODE__MASK;
+}
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK 0x003fffff
+#define CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT 0
+static inline uint32_t CNA_DATA_SIZE3_DATAOUT_ATOMICS(uint32_t val)
+{
+ return ((val) << CNA_DATA_SIZE3_DATAOUT_ATOMICS__SHIFT) & CNA_DATA_SIZE3_DATAOUT_ATOMICS__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE0 0x00001030
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK 0xffffffff
+#define CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE0_WEIGHT_BYTES(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE0_WEIGHT_BYTES__SHIFT) & CNA_WEIGHT_SIZE0_WEIGHT_BYTES__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE1 0x00001034
+#define CNA_WEIGHT_SIZE1_RESERVED_0__MASK 0xfff80000
+#define CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT 19
+static inline uint32_t CNA_WEIGHT_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK 0x0007ffff
+#define CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__SHIFT) & CNA_WEIGHT_SIZE1_WEIGHT_BYTES_PER_KERNEL__MASK;
+}
+
+#define REG_CNA_WEIGHT_SIZE2 0x00001038
+#define CNA_WEIGHT_SIZE2_RESERVED_0__MASK 0xe0000000
+#define CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT 29
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_0__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_0__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK 0x1f000000
+#define CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT 24
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_WIDTH__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_1__MASK 0x00e00000
+#define CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT 21
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_1__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_1__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK 0x001f0000
+#define CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT 16
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_HEIGHT__MASK;
+}
+#define CNA_WEIGHT_SIZE2_RESERVED_2__MASK 0x0000c000
+#define CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT 14
+static inline uint32_t CNA_WEIGHT_SIZE2_RESERVED_2(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_RESERVED_2__SHIFT) & CNA_WEIGHT_SIZE2_RESERVED_2__MASK;
+}
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK 0x00003fff
+#define CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT 0
+static inline uint32_t CNA_WEIGHT_SIZE2_WEIGHT_KERNELS(uint32_t val)
+{
+ return ((val) << CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__SHIFT) & CNA_WEIGHT_SIZE2_WEIGHT_KERNELS__MASK;
+}
+
+#define REG_CNA_CBUF_CON0 0x00001040
+#define CNA_CBUF_CON0_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON0_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_0__SHIFT) & CNA_CBUF_CON0_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_REUSE__MASK 0x00002000
+#define CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT 13
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_REUSE__SHIFT) & CNA_CBUF_CON0_WEIGHT_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_DATA_REUSE__MASK 0x00001000
+#define CNA_CBUF_CON0_DATA_REUSE__SHIFT 12
+static inline uint32_t CNA_CBUF_CON0_DATA_REUSE(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_REUSE__SHIFT) & CNA_CBUF_CON0_DATA_REUSE__MASK;
+}
+#define CNA_CBUF_CON0_RESERVED_1__MASK 0x00000800
+#define CNA_CBUF_CON0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_CBUF_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_RESERVED_1__SHIFT) & CNA_CBUF_CON0_RESERVED_1__MASK;
+}
+#define CNA_CBUF_CON0_FC_DATA_BANK__MASK 0x00000700
+#define CNA_CBUF_CON0_FC_DATA_BANK__SHIFT 8
+static inline uint32_t CNA_CBUF_CON0_FC_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_FC_DATA_BANK__SHIFT) & CNA_CBUF_CON0_FC_DATA_BANK__MASK;
+}
+#define CNA_CBUF_CON0_WEIGHT_BANK__MASK 0x000000f0
+#define CNA_CBUF_CON0_WEIGHT_BANK__SHIFT 4
+static inline uint32_t CNA_CBUF_CON0_WEIGHT_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_WEIGHT_BANK__SHIFT) & CNA_CBUF_CON0_WEIGHT_BANK__MASK;
+}
+#define CNA_CBUF_CON0_DATA_BANK__MASK 0x0000000f
+#define CNA_CBUF_CON0_DATA_BANK__SHIFT 0
+static inline uint32_t CNA_CBUF_CON0_DATA_BANK(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON0_DATA_BANK__SHIFT) & CNA_CBUF_CON0_DATA_BANK__MASK;
+}
+
+#define REG_CNA_CBUF_CON1 0x00001044
+#define CNA_CBUF_CON1_RESERVED_0__MASK 0xffffc000
+#define CNA_CBUF_CON1_RESERVED_0__SHIFT 14
+static inline uint32_t CNA_CBUF_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_RESERVED_0__SHIFT) & CNA_CBUF_CON1_RESERVED_0__MASK;
+}
+#define CNA_CBUF_CON1_DATA_ENTRIES__MASK 0x00003fff
+#define CNA_CBUF_CON1_DATA_ENTRIES__SHIFT 0
+static inline uint32_t CNA_CBUF_CON1_DATA_ENTRIES(uint32_t val)
+{
+ return ((val) << CNA_CBUF_CON1_DATA_ENTRIES__SHIFT) & CNA_CBUF_CON1_DATA_ENTRIES__MASK;
+}
+
+#define REG_CNA_CVT_CON0 0x0000104c
+#define CNA_CVT_CON0_RESERVED_0__MASK 0xf0000000
+#define CNA_CVT_CON0_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_CVT_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_RESERVED_0__SHIFT) & CNA_CVT_CON0_RESERVED_0__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__MASK 0x0fc00000
+#define CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT 22
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_3__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_3__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__MASK 0x003f0000
+#define CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT 16
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_2__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_2__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__MASK 0x0000fc00
+#define CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT 10
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_1__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_1__MASK;
+}
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__MASK 0x000003f0
+#define CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT 4
+static inline uint32_t CNA_CVT_CON0_CVT_TRUNCATE_0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TRUNCATE_0__SHIFT) & CNA_CVT_CON0_CVT_TRUNCATE_0__MASK;
+}
+#define CNA_CVT_CON0_DATA_SIGN__MASK 0x00000008
+#define CNA_CVT_CON0_DATA_SIGN__SHIFT 3
+static inline uint32_t CNA_CVT_CON0_DATA_SIGN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_DATA_SIGN__SHIFT) & CNA_CVT_CON0_DATA_SIGN__MASK;
+}
+#define CNA_CVT_CON0_ROUND_TYPE__MASK 0x00000004
+#define CNA_CVT_CON0_ROUND_TYPE__SHIFT 2
+static inline uint32_t CNA_CVT_CON0_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_ROUND_TYPE__SHIFT) & CNA_CVT_CON0_ROUND_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_TYPE__MASK 0x00000002
+#define CNA_CVT_CON0_CVT_TYPE__SHIFT 1
+static inline uint32_t CNA_CVT_CON0_CVT_TYPE(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_TYPE__SHIFT) & CNA_CVT_CON0_CVT_TYPE__MASK;
+}
+#define CNA_CVT_CON0_CVT_BYPASS__MASK 0x00000001
+#define CNA_CVT_CON0_CVT_BYPASS__SHIFT 0
+static inline uint32_t CNA_CVT_CON0_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON0_CVT_BYPASS__SHIFT) & CNA_CVT_CON0_CVT_BYPASS__MASK;
+}
+
+#define REG_CNA_CVT_CON1 0x00001050
+#define CNA_CVT_CON1_CVT_SCALE0__MASK 0xffff0000
+#define CNA_CVT_CON1_CVT_SCALE0__SHIFT 16
+static inline uint32_t CNA_CVT_CON1_CVT_SCALE0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_SCALE0__SHIFT) & CNA_CVT_CON1_CVT_SCALE0__MASK;
+}
+#define CNA_CVT_CON1_CVT_OFFSET0__MASK 0x0000ffff
+#define CNA_CVT_CON1_CVT_OFFSET0__SHIFT 0
+static inline uint32_t CNA_CVT_CON1_CVT_OFFSET0(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON1_CVT_OFFSET0__SHIFT) & CNA_CVT_CON1_CVT_OFFSET0__MASK;
+}
+
+#define REG_CNA_CVT_CON2 0x00001054
+#define CNA_CVT_CON2_CVT_SCALE1__MASK 0xffff0000
+#define CNA_CVT_CON2_CVT_SCALE1__SHIFT 16
+static inline uint32_t CNA_CVT_CON2_CVT_SCALE1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_SCALE1__SHIFT) & CNA_CVT_CON2_CVT_SCALE1__MASK;
+}
+#define CNA_CVT_CON2_CVT_OFFSET1__MASK 0x0000ffff
+#define CNA_CVT_CON2_CVT_OFFSET1__SHIFT 0
+static inline uint32_t CNA_CVT_CON2_CVT_OFFSET1(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON2_CVT_OFFSET1__SHIFT) & CNA_CVT_CON2_CVT_OFFSET1__MASK;
+}
+
+#define REG_CNA_CVT_CON3 0x00001058
+#define CNA_CVT_CON3_CVT_SCALE2__MASK 0xffff0000
+#define CNA_CVT_CON3_CVT_SCALE2__SHIFT 16
+static inline uint32_t CNA_CVT_CON3_CVT_SCALE2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_SCALE2__SHIFT) & CNA_CVT_CON3_CVT_SCALE2__MASK;
+}
+#define CNA_CVT_CON3_CVT_OFFSET2__MASK 0x0000ffff
+#define CNA_CVT_CON3_CVT_OFFSET2__SHIFT 0
+static inline uint32_t CNA_CVT_CON3_CVT_OFFSET2(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON3_CVT_OFFSET2__SHIFT) & CNA_CVT_CON3_CVT_OFFSET2__MASK;
+}
+
+#define REG_CNA_CVT_CON4 0x0000105c
+#define CNA_CVT_CON4_CVT_SCALE3__MASK 0xffff0000
+#define CNA_CVT_CON4_CVT_SCALE3__SHIFT 16
+static inline uint32_t CNA_CVT_CON4_CVT_SCALE3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_SCALE3__SHIFT) & CNA_CVT_CON4_CVT_SCALE3__MASK;
+}
+#define CNA_CVT_CON4_CVT_OFFSET3__MASK 0x0000ffff
+#define CNA_CVT_CON4_CVT_OFFSET3__SHIFT 0
+static inline uint32_t CNA_CVT_CON4_CVT_OFFSET3(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON4_CVT_OFFSET3__SHIFT) & CNA_CVT_CON4_CVT_OFFSET3__MASK;
+}
+
+#define REG_CNA_FC_CON0 0x00001060
+#define CNA_FC_CON0_FC_SKIP_DATA__MASK 0xffff0000
+#define CNA_FC_CON0_FC_SKIP_DATA__SHIFT 16
+static inline uint32_t CNA_FC_CON0_FC_SKIP_DATA(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_DATA__SHIFT) & CNA_FC_CON0_FC_SKIP_DATA__MASK;
+}
+#define CNA_FC_CON0_RESERVED_0__MASK 0x0000fffe
+#define CNA_FC_CON0_RESERVED_0__SHIFT 1
+static inline uint32_t CNA_FC_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_RESERVED_0__SHIFT) & CNA_FC_CON0_RESERVED_0__MASK;
+}
+#define CNA_FC_CON0_FC_SKIP_EN__MASK 0x00000001
+#define CNA_FC_CON0_FC_SKIP_EN__SHIFT 0
+static inline uint32_t CNA_FC_CON0_FC_SKIP_EN(uint32_t val)
+{
+ return ((val) << CNA_FC_CON0_FC_SKIP_EN__SHIFT) & CNA_FC_CON0_FC_SKIP_EN__MASK;
+}
+
+#define REG_CNA_FC_CON1 0x00001064
+#define CNA_FC_CON1_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON1_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_RESERVED_0__SHIFT) & CNA_FC_CON1_RESERVED_0__MASK;
+}
+#define CNA_FC_CON1_DATA_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON1_DATA_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON1_DATA_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON1_DATA_OFFSET__SHIFT) & CNA_FC_CON1_DATA_OFFSET__MASK;
+}
+
+#define REG_CNA_PAD_CON0 0x00001068
+#define CNA_PAD_CON0_RESERVED_0__MASK 0xffffff00
+#define CNA_PAD_CON0_RESERVED_0__SHIFT 8
+static inline uint32_t CNA_PAD_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_RESERVED_0__SHIFT) & CNA_PAD_CON0_RESERVED_0__MASK;
+}
+#define CNA_PAD_CON0_PAD_LEFT__MASK 0x000000f0
+#define CNA_PAD_CON0_PAD_LEFT__SHIFT 4
+static inline uint32_t CNA_PAD_CON0_PAD_LEFT(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_LEFT__SHIFT) & CNA_PAD_CON0_PAD_LEFT__MASK;
+}
+#define CNA_PAD_CON0_PAD_TOP__MASK 0x0000000f
+#define CNA_PAD_CON0_PAD_TOP__SHIFT 0
+static inline uint32_t CNA_PAD_CON0_PAD_TOP(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON0_PAD_TOP__SHIFT) & CNA_PAD_CON0_PAD_TOP__MASK;
+}
+
+#define REG_CNA_FEATURE_DATA_ADDR 0x00001070
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK 0xffffffff
+#define CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT 0
+static inline uint32_t CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR(uint32_t val)
+{
+ return ((val) << CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__SHIFT) & CNA_FEATURE_DATA_ADDR_FEATURE_BASE_ADDR__MASK;
+}
+
+#define REG_CNA_FC_CON2 0x00001074
+#define CNA_FC_CON2_RESERVED_0__MASK 0xfffe0000
+#define CNA_FC_CON2_RESERVED_0__SHIFT 17
+static inline uint32_t CNA_FC_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_RESERVED_0__SHIFT) & CNA_FC_CON2_RESERVED_0__MASK;
+}
+#define CNA_FC_CON2_WEIGHT_OFFSET__MASK 0x0001ffff
+#define CNA_FC_CON2_WEIGHT_OFFSET__SHIFT 0
+static inline uint32_t CNA_FC_CON2_WEIGHT_OFFSET(uint32_t val)
+{
+ return ((val) << CNA_FC_CON2_WEIGHT_OFFSET__SHIFT) & CNA_FC_CON2_WEIGHT_OFFSET__MASK;
+}
+
+#define REG_CNA_DMA_CON0 0x00001078
+#define CNA_DMA_CON0_OV4K_BYPASS__MASK 0x80000000
+#define CNA_DMA_CON0_OV4K_BYPASS__SHIFT 31
+static inline uint32_t CNA_DMA_CON0_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_OV4K_BYPASS__SHIFT) & CNA_DMA_CON0_OV4K_BYPASS__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_0__MASK 0x7ff00000
+#define CNA_DMA_CON0_RESERVED_0__SHIFT 20
+static inline uint32_t CNA_DMA_CON0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_0__SHIFT) & CNA_DMA_CON0_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK 0x000f0000
+#define CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT 16
+static inline uint32_t CNA_DMA_CON0_WEIGHT_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_WEIGHT_BURST_LEN__SHIFT) & CNA_DMA_CON0_WEIGHT_BURST_LEN__MASK;
+}
+#define CNA_DMA_CON0_RESERVED_1__MASK 0x0000fff0
+#define CNA_DMA_CON0_RESERVED_1__SHIFT 4
+static inline uint32_t CNA_DMA_CON0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_RESERVED_1__SHIFT) & CNA_DMA_CON0_RESERVED_1__MASK;
+}
+#define CNA_DMA_CON0_DATA_BURST_LEN__MASK 0x0000000f
+#define CNA_DMA_CON0_DATA_BURST_LEN__SHIFT 0
+static inline uint32_t CNA_DMA_CON0_DATA_BURST_LEN(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON0_DATA_BURST_LEN__SHIFT) & CNA_DMA_CON0_DATA_BURST_LEN__MASK;
+}
+
+#define REG_CNA_DMA_CON1 0x0000107c
+#define CNA_DMA_CON1_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON1_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_RESERVED_0__SHIFT) & CNA_DMA_CON1_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON1_LINE_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON1_LINE_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON1_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON1_LINE_STRIDE__SHIFT) & CNA_DMA_CON1_LINE_STRIDE__MASK;
+}
+
+#define REG_CNA_DMA_CON2 0x00001080
+#define CNA_DMA_CON2_RESERVED_0__MASK 0xf0000000
+#define CNA_DMA_CON2_RESERVED_0__SHIFT 28
+static inline uint32_t CNA_DMA_CON2_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_RESERVED_0__SHIFT) & CNA_DMA_CON2_RESERVED_0__MASK;
+}
+#define CNA_DMA_CON2_SURF_STRIDE__MASK 0x0fffffff
+#define CNA_DMA_CON2_SURF_STRIDE__SHIFT 0
+static inline uint32_t CNA_DMA_CON2_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << CNA_DMA_CON2_SURF_STRIDE__SHIFT) & CNA_DMA_CON2_SURF_STRIDE__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE0 0x00001084
+#define CNA_FC_DATA_SIZE0_RESERVED_0__MASK 0xc0000000
+#define CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT 30
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK 0x3fff0000
+#define CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_WIDTH(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_WIDTH__SHIFT) & CNA_FC_DATA_SIZE0_DMA_WIDTH__MASK;
+}
+#define CNA_FC_DATA_SIZE0_RESERVED_1__MASK 0x0000f800
+#define CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT 11
+static inline uint32_t CNA_FC_DATA_SIZE0_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_RESERVED_1__SHIFT) & CNA_FC_DATA_SIZE0_RESERVED_1__MASK;
+}
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK 0x000007ff
+#define CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE0_DMA_HEIGHT(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE0_DMA_HEIGHT__SHIFT) & CNA_FC_DATA_SIZE0_DMA_HEIGHT__MASK;
+}
+
+#define REG_CNA_FC_DATA_SIZE1 0x00001088
+#define CNA_FC_DATA_SIZE1_RESERVED_0__MASK 0xffff0000
+#define CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT 16
+static inline uint32_t CNA_FC_DATA_SIZE1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_RESERVED_0__SHIFT) & CNA_FC_DATA_SIZE1_RESERVED_0__MASK;
+}
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK 0x0000ffff
+#define CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT 0
+static inline uint32_t CNA_FC_DATA_SIZE1_DMA_CHANNEL(uint32_t val)
+{
+ return ((val) << CNA_FC_DATA_SIZE1_DMA_CHANNEL__SHIFT) & CNA_FC_DATA_SIZE1_DMA_CHANNEL__MASK;
+}
+
+#define REG_CNA_CLK_GATE 0x00001090
+#define CNA_CLK_GATE_RESERVED_0__MASK 0xffffffe0
+#define CNA_CLK_GATE_RESERVED_0__SHIFT 5
+static inline uint32_t CNA_CLK_GATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_0__SHIFT) & CNA_CLK_GATE_RESERVED_0__MASK;
+}
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK 0x00000010
+#define CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT 4
+static inline uint32_t CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CBUF_CS_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_RESERVED_1__MASK 0x00000008
+#define CNA_CLK_GATE_RESERVED_1__SHIFT 3
+static inline uint32_t CNA_CLK_GATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_RESERVED_1__SHIFT) & CNA_CLK_GATE_RESERVED_1__MASK;
+}
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK 0x00000004
+#define CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT 2
+static inline uint32_t CNA_CLK_GATE_CSC_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CSC_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CSC_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK 0x00000002
+#define CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT 1
+static inline uint32_t CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_WEIGHT_DISABLE_CLKGATE__MASK;
+}
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK 0x00000001
+#define CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT 0
+static inline uint32_t CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE(uint32_t val)
+{
+ return ((val) << CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__SHIFT) & CNA_CLK_GATE_CNA_FEATURE_DISABLE_CLKGATE__MASK;
+}
+
+#define REG_CNA_DCOMP_CTRL 0x00001100
+#define CNA_DCOMP_CTRL_RESERVED_0__MASK 0xfffffff0
+#define CNA_DCOMP_CTRL_RESERVED_0__SHIFT 4
+static inline uint32_t CNA_DCOMP_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_RESERVED_0__SHIFT) & CNA_DCOMP_CTRL_RESERVED_0__MASK;
+}
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK 0x00000008
+#define CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT 3
+static inline uint32_t CNA_DCOMP_CTRL_WT_DEC_BYPASS(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_WT_DEC_BYPASS__SHIFT) & CNA_DCOMP_CTRL_WT_DEC_BYPASS__MASK;
+}
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK 0x00000007
+#define CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT 0
+static inline uint32_t CNA_DCOMP_CTRL_DECOMP_CONTROL(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_CTRL_DECOMP_CONTROL__SHIFT) & CNA_DCOMP_CTRL_DECOMP_CONTROL__MASK;
+}
+
+#define REG_CNA_DCOMP_REGNUM 0x00001104
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK 0xffffffff
+#define CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT 0
+static inline uint32_t CNA_DCOMP_REGNUM_DCOMP_REGNUM(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_REGNUM_DCOMP_REGNUM__SHIFT) & CNA_DCOMP_REGNUM_DCOMP_REGNUM__MASK;
+}
+
+#define REG_CNA_DCOMP_ADDR0 0x00001110
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK 0xffffffff
+#define CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT 0
+static inline uint32_t CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__SHIFT) & CNA_DCOMP_ADDR0_DECOMPRESS_ADDR0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT0 0x00001140
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__SHIFT) & CNA_DCOMP_AMOUNT0_DCOMP_AMOUNT0__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT1 0x00001144
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__SHIFT) & CNA_DCOMP_AMOUNT1_DCOMP_AMOUNT1__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT2 0x00001148
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__SHIFT) & CNA_DCOMP_AMOUNT2_DCOMP_AMOUNT2__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT3 0x0000114c
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__SHIFT) & CNA_DCOMP_AMOUNT3_DCOMP_AMOUNT3__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT4 0x00001150
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__SHIFT) & CNA_DCOMP_AMOUNT4_DCOMP_AMOUNT4__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT5 0x00001154
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__SHIFT) & CNA_DCOMP_AMOUNT5_DCOMP_AMOUNT5__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT6 0x00001158
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__SHIFT) & CNA_DCOMP_AMOUNT6_DCOMP_AMOUNT6__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT7 0x0000115c
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__SHIFT) & CNA_DCOMP_AMOUNT7_DCOMP_AMOUNT7__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT8 0x00001160
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__SHIFT) & CNA_DCOMP_AMOUNT8_DCOMP_AMOUNT8__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT9 0x00001164
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__SHIFT) & CNA_DCOMP_AMOUNT9_DCOMP_AMOUNT9__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT10 0x00001168
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__SHIFT) & CNA_DCOMP_AMOUNT10_DCOMP_AMOUNT10__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT11 0x0000116c
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__SHIFT) & CNA_DCOMP_AMOUNT11_DCOMP_AMOUNT11__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT12 0x00001170
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__SHIFT) & CNA_DCOMP_AMOUNT12_DCOMP_AMOUNT12__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT13 0x00001174
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__SHIFT) & CNA_DCOMP_AMOUNT13_DCOMP_AMOUNT13__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT14 0x00001178
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__SHIFT) & CNA_DCOMP_AMOUNT14_DCOMP_AMOUNT14__MASK;
+}
+
+#define REG_CNA_DCOMP_AMOUNT15 0x0000117c
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK 0xffffffff
+#define CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT 0
+static inline uint32_t CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15(uint32_t val)
+{
+ return ((val) << CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__SHIFT) & CNA_DCOMP_AMOUNT15_DCOMP_AMOUNT15__MASK;
+}
+
+#define REG_CNA_CVT_CON5 0x00001180
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK 0xffffffff
+#define CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT 0
+static inline uint32_t CNA_CVT_CON5_PER_CHANNEL_CVT_EN(uint32_t val)
+{
+ return ((val) << CNA_CVT_CON5_PER_CHANNEL_CVT_EN__SHIFT) & CNA_CVT_CON5_PER_CHANNEL_CVT_EN__MASK;
+}
+
+#define REG_CNA_PAD_CON1 0x00001184
+#define CNA_PAD_CON1_PAD_VALUE__MASK 0xffffffff
+#define CNA_PAD_CON1_PAD_VALUE__SHIFT 0
+static inline uint32_t CNA_PAD_CON1_PAD_VALUE(uint32_t val)
+{
+ return ((val) << CNA_PAD_CON1_PAD_VALUE__SHIFT) & CNA_PAD_CON1_PAD_VALUE__MASK;
+}
+
+#define REG_CORE_S_STATUS 0x00003000
+#define CORE_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define CORE_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t CORE_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_0__SHIFT) & CORE_S_STATUS_RESERVED_0__MASK;
+}
+#define CORE_S_STATUS_STATUS_1__MASK 0x00030000
+#define CORE_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t CORE_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_1__SHIFT) & CORE_S_STATUS_STATUS_1__MASK;
+}
+#define CORE_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define CORE_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t CORE_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_RESERVED_1__SHIFT) & CORE_S_STATUS_RESERVED_1__MASK;
+}
+#define CORE_S_STATUS_STATUS_0__MASK 0x00000003
+#define CORE_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t CORE_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << CORE_S_STATUS_STATUS_0__SHIFT) & CORE_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_CORE_S_POINTER 0x00003004
+#define CORE_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define CORE_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t CORE_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_0__SHIFT) & CORE_S_POINTER_RESERVED_0__MASK;
+}
+#define CORE_S_POINTER_EXECUTER__MASK 0x00010000
+#define CORE_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t CORE_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER__SHIFT) & CORE_S_POINTER_EXECUTER__MASK;
+}
+#define CORE_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define CORE_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t CORE_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_RESERVED_1__SHIFT) & CORE_S_POINTER_RESERVED_1__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t CORE_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_CLEAR__SHIFT) & CORE_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define CORE_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t CORE_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_MODE__SHIFT) & CORE_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define CORE_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define CORE_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t CORE_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_EXECUTER_PP_EN__SHIFT) & CORE_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define CORE_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t CORE_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER_PP_EN__SHIFT) & CORE_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define CORE_S_POINTER_POINTER__MASK 0x00000001
+#define CORE_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t CORE_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << CORE_S_POINTER_POINTER__SHIFT) & CORE_S_POINTER_POINTER__MASK;
+}
+
+#define REG_CORE_OPERATION_ENABLE 0x00003008
+#define CORE_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define CORE_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t CORE_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_RESERVED_0__SHIFT) & CORE_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define CORE_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define CORE_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t CORE_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_OPERATION_ENABLE_OP_EN__SHIFT) & CORE_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_CORE_MAC_GATING 0x0000300c
+#define CORE_MAC_GATING_RESERVED_0__MASK 0xf8000000
+#define CORE_MAC_GATING_RESERVED_0__SHIFT 27
+static inline uint32_t CORE_MAC_GATING_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_RESERVED_0__SHIFT) & CORE_MAC_GATING_RESERVED_0__MASK;
+}
+#define CORE_MAC_GATING_SLCG_OP_EN__MASK 0x07ffffff
+#define CORE_MAC_GATING_SLCG_OP_EN__SHIFT 0
+static inline uint32_t CORE_MAC_GATING_SLCG_OP_EN(uint32_t val)
+{
+ return ((val) << CORE_MAC_GATING_SLCG_OP_EN__SHIFT) & CORE_MAC_GATING_SLCG_OP_EN__MASK;
+}
+
+#define REG_CORE_MISC_CFG 0x00003010
+#define CORE_MISC_CFG_RESERVED_0__MASK 0xfff00000
+#define CORE_MISC_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t CORE_MISC_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_0__SHIFT) & CORE_MISC_CFG_RESERVED_0__MASK;
+}
+#define CORE_MISC_CFG_SOFT_GATING__MASK 0x000fc000
+#define CORE_MISC_CFG_SOFT_GATING__SHIFT 14
+static inline uint32_t CORE_MISC_CFG_SOFT_GATING(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_SOFT_GATING__SHIFT) & CORE_MISC_CFG_SOFT_GATING__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_1__MASK 0x00003800
+#define CORE_MISC_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t CORE_MISC_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_1__SHIFT) & CORE_MISC_CFG_RESERVED_1__MASK;
+}
+#define CORE_MISC_CFG_PROC_PRECISION__MASK 0x00000700
+#define CORE_MISC_CFG_PROC_PRECISION__SHIFT 8
+static inline uint32_t CORE_MISC_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_PROC_PRECISION__SHIFT) & CORE_MISC_CFG_PROC_PRECISION__MASK;
+}
+#define CORE_MISC_CFG_RESERVED_2__MASK 0x000000fc
+#define CORE_MISC_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t CORE_MISC_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_RESERVED_2__SHIFT) & CORE_MISC_CFG_RESERVED_2__MASK;
+}
+#define CORE_MISC_CFG_DW_EN__MASK 0x00000002
+#define CORE_MISC_CFG_DW_EN__SHIFT 1
+static inline uint32_t CORE_MISC_CFG_DW_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_DW_EN__SHIFT) & CORE_MISC_CFG_DW_EN__MASK;
+}
+#define CORE_MISC_CFG_QD_EN__MASK 0x00000001
+#define CORE_MISC_CFG_QD_EN__SHIFT 0
+static inline uint32_t CORE_MISC_CFG_QD_EN(uint32_t val)
+{
+ return ((val) << CORE_MISC_CFG_QD_EN__SHIFT) & CORE_MISC_CFG_QD_EN__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_0 0x00003014
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_HEIGHT__MASK;
+}
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__SHIFT) & CORE_DATAOUT_SIZE_0_DATAOUT_WIDTH__MASK;
+}
+
+#define REG_CORE_DATAOUT_SIZE_1 0x00003018
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__MASK 0xffff0000
+#define CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT 16
+static inline uint32_t CORE_DATAOUT_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_RESERVED_0__SHIFT) & CORE_DATAOUT_SIZE_1_RESERVED_0__MASK;
+}
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK 0x0000ffff
+#define CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT 0
+static inline uint32_t CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL(uint32_t val)
+{
+ return ((val) << CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__SHIFT) & CORE_DATAOUT_SIZE_1_DATAOUT_CHANNEL__MASK;
+}
+
+#define REG_CORE_CLIP_TRUNCATE 0x0000301c
+#define CORE_CLIP_TRUNCATE_RESERVED_0__MASK 0xffffff80
+#define CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT 7
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_0(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_0__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_0__MASK;
+}
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK 0x00000040
+#define CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT 6
+static inline uint32_t CORE_CLIP_TRUNCATE_ROUND_TYPE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_ROUND_TYPE__SHIFT) & CORE_CLIP_TRUNCATE_ROUND_TYPE__MASK;
+}
+#define CORE_CLIP_TRUNCATE_RESERVED_1__MASK 0x00000020
+#define CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT 5
+static inline uint32_t CORE_CLIP_TRUNCATE_RESERVED_1(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_RESERVED_1__SHIFT) & CORE_CLIP_TRUNCATE_RESERVED_1__MASK;
+}
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK 0x0000001f
+#define CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT 0
+static inline uint32_t CORE_CLIP_TRUNCATE_CLIP_TRUNCATE(uint32_t val)
+{
+ return ((val) << CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__SHIFT) & CORE_CLIP_TRUNCATE_CLIP_TRUNCATE__MASK;
+}
+
+#define REG_DPU_S_STATUS 0x00004000
+#define DPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_0__SHIFT) & DPU_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_1__SHIFT) & DPU_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_RESERVED_1__SHIFT) & DPU_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_S_STATUS_STATUS_0__SHIFT) & DPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_S_POINTER 0x00004004
+#define DPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_0__SHIFT) & DPU_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER__SHIFT) & DPU_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_RESERVED_1__SHIFT) & DPU_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_S_POINTER_POINTER__SHIFT) & DPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_OPERATION_ENABLE 0x00004008
+#define DPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_FEATURE_MODE_CFG 0x0000400c
+#define DPU_FEATURE_MODE_CFG_COMB_USE__MASK 0x80000000
+#define DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT 31
+static inline uint32_t DPU_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_TP_EN__MASK 0x40000000
+#define DPU_FEATURE_MODE_CFG_TP_EN__SHIFT 30
+static inline uint32_t DPU_FEATURE_MODE_CFG_TP_EN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_TP_EN__SHIFT) & DPU_FEATURE_MODE_CFG_TP_EN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK 0x3c000000
+#define DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT 26
+static inline uint32_t DPU_FEATURE_MODE_CFG_RGP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_RGP_TYPE__SHIFT) & DPU_FEATURE_MODE_CFG_RGP_TYPE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_NONALIGN__MASK 0x02000000
+#define DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT 25
+static inline uint32_t DPU_FEATURE_MODE_CFG_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_NONALIGN__SHIFT) & DPU_FEATURE_MODE_CFG_NONALIGN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__MASK 0x01fffe00
+#define DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT 9
+static inline uint32_t DPU_FEATURE_MODE_CFG_SURF_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_SURF_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_SURF_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__MASK 0x000001e0
+#define DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT 5
+static inline uint32_t DPU_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000018
+#define DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT 3
+static inline uint32_t DPU_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK 0x00000006
+#define DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT 1
+static inline uint32_t DPU_FEATURE_MODE_CFG_OUTPUT_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_OUTPUT_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_OUTPUT_MODE__MASK;
+}
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_DATA_FORMAT 0x00004010
+#define DPU_DATA_FORMAT_OUT_PRECISION__MASK 0xe0000000
+#define DPU_DATA_FORMAT_OUT_PRECISION__SHIFT 29
+static inline uint32_t DPU_DATA_FORMAT_OUT_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_OUT_PRECISION__SHIFT) & DPU_DATA_FORMAT_OUT_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_IN_PRECISION__MASK 0x1c000000
+#define DPU_DATA_FORMAT_IN_PRECISION__SHIFT 26
+static inline uint32_t DPU_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_IN_PRECISION__SHIFT) & DPU_DATA_FORMAT_IN_PRECISION__MASK;
+}
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK 0x03ff0000
+#define DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT 16
+static inline uint32_t DPU_DATA_FORMAT_EW_TRUNCATE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_EW_TRUNCATE_NEG__SHIFT) & DPU_DATA_FORMAT_EW_TRUNCATE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK 0x0000fc00
+#define DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT 10
+static inline uint32_t DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BN_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK 0x000003f0
+#define DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT 4
+static inline uint32_t DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__SHIFT) & DPU_DATA_FORMAT_BS_MUL_SHIFT_VALUE_NEG__MASK;
+}
+#define DPU_DATA_FORMAT_MC_SURF_OUT__MASK 0x00000008
+#define DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT 3
+static inline uint32_t DPU_DATA_FORMAT_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_MC_SURF_OUT__SHIFT) & DPU_DATA_FORMAT_MC_SURF_OUT__MASK;
+}
+#define DPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define DPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t DPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & DPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_DPU_OFFSET_PEND 0x00004014
+#define DPU_OFFSET_PEND_RESERVED_0__MASK 0xffff0000
+#define DPU_OFFSET_PEND_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_OFFSET_PEND_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_RESERVED_0__SHIFT) & DPU_OFFSET_PEND_RESERVED_0__MASK;
+}
+#define DPU_OFFSET_PEND_OFFSET_PEND__MASK 0x0000ffff
+#define DPU_OFFSET_PEND_OFFSET_PEND__SHIFT 0
+static inline uint32_t DPU_OFFSET_PEND_OFFSET_PEND(uint32_t val)
+{
+ return ((val) << DPU_OFFSET_PEND_OFFSET_PEND__SHIFT) & DPU_OFFSET_PEND_OFFSET_PEND__MASK;
+}
+
+#define REG_DPU_DST_BASE_ADDR 0x00004020
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xffffffff
+#define DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & DPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_DST_SURF_STRIDE 0x00004024
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & DPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define DPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_WIDTH 0x00004030
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_HEIGHT 0x00004034
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xfe000000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 25
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK 0x01c00000
+#define DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT 22
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_MINMAX_CTL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__SHIFT) & DPU_DATA_CUBE_HEIGHT_MINMAX_CTL__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x003fe000
+#define DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_NOTCH_ADDR 0x00004038
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK 0x1fff0000
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK 0x00001fff
+#define DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__SHIFT) & DPU_DATA_CUBE_NOTCH_ADDR_NOTCH_ADDR_0__MASK;
+}
+
+#define REG_DPU_DATA_CUBE_CHANNEL 0x0000403c
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xe0000000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK 0x1fff0000
+#define DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT 16
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_ORIG_CHANNEL__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK 0x0000e000
+#define DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_RESERVED_1__SHIFT) & DPU_DATA_CUBE_CHANNEL_RESERVED_1__MASK;
+}
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_BS_CFG 0x00004040
+#define DPU_BS_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BS_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_0__SHIFT) & DPU_BS_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_ALGO__MASK 0x000f0000
+#define DPU_BS_CFG_BS_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BS_CFG_BS_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_ALGO__SHIFT) & DPU_BS_CFG_BS_ALU_ALGO__MASK;
+}
+#define DPU_BS_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BS_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_1__SHIFT) & DPU_BS_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_SRC__MASK 0x00000100
+#define DPU_BS_CFG_BS_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BS_CFG_BS_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_SRC__SHIFT) & DPU_BS_CFG_BS_ALU_SRC__MASK;
+}
+#define DPU_BS_CFG_BS_RELUX_EN__MASK 0x00000080
+#define DPU_BS_CFG_BS_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BS_CFG_BS_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELUX_EN__SHIFT) & DPU_BS_CFG_BS_RELUX_EN__MASK;
+}
+#define DPU_BS_CFG_BS_RELU_BYPASS__MASK 0x00000040
+#define DPU_BS_CFG_BS_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BS_CFG_BS_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_RELU_BYPASS__SHIFT) & DPU_BS_CFG_BS_RELU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_PRELU__MASK 0x00000020
+#define DPU_BS_CFG_BS_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BS_CFG_BS_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_PRELU__SHIFT) & DPU_BS_CFG_BS_MUL_PRELU__MASK;
+}
+#define DPU_BS_CFG_BS_MUL_BYPASS__MASK 0x00000010
+#define DPU_BS_CFG_BS_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BS_CFG_BS_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_MUL_BYPASS__SHIFT) & DPU_BS_CFG_BS_MUL_BYPASS__MASK;
+}
+#define DPU_BS_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BS_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BS_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_RESERVED_2__SHIFT) & DPU_BS_CFG_RESERVED_2__MASK;
+}
+#define DPU_BS_CFG_BS_ALU_BYPASS__MASK 0x00000002
+#define DPU_BS_CFG_BS_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_CFG_BS_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_ALU_BYPASS__SHIFT) & DPU_BS_CFG_BS_ALU_BYPASS__MASK;
+}
+#define DPU_BS_CFG_BS_BYPASS__MASK 0x00000001
+#define DPU_BS_CFG_BS_BYPASS__SHIFT 0
+static inline uint32_t DPU_BS_CFG_BS_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_CFG_BS_BYPASS__SHIFT) & DPU_BS_CFG_BS_BYPASS__MASK;
+}
+
+#define REG_DPU_BS_ALU_CFG 0x00004044
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BS_ALU_CFG_BS_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_ALU_CFG_BS_ALU_OPERAND__SHIFT) & DPU_BS_ALU_CFG_BS_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BS_MUL_CFG 0x00004048
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_OPERAND__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_OPERAND__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BS_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_0__SHIFT) & DPU_BS_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BS_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BS_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BS_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_RESERVED_1__SHIFT) & DPU_BS_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BS_MUL_CFG_BS_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_TRUNCATE_SRC__MASK;
+}
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__MASK 0x00000001
+#define DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BS_MUL_CFG_BS_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_MUL_CFG_BS_MUL_SRC__SHIFT) & DPU_BS_MUL_CFG_BS_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BS_RELUX_CMP_VALUE 0x0000404c
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__SHIFT) & DPU_BS_RELUX_CMP_VALUE_BS_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_BS_OW_CFG 0x00004050
+#define DPU_BS_OW_CFG_RGP_CNTER__MASK 0xf0000000
+#define DPU_BS_OW_CFG_RGP_CNTER__SHIFT 28
+static inline uint32_t DPU_BS_OW_CFG_RGP_CNTER(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RGP_CNTER__SHIFT) & DPU_BS_OW_CFG_RGP_CNTER__MASK;
+}
+#define DPU_BS_OW_CFG_TP_ORG_EN__MASK 0x08000000
+#define DPU_BS_OW_CFG_TP_ORG_EN__SHIFT 27
+static inline uint32_t DPU_BS_OW_CFG_TP_ORG_EN(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_TP_ORG_EN__SHIFT) & DPU_BS_OW_CFG_TP_ORG_EN__MASK;
+}
+#define DPU_BS_OW_CFG_RESERVED_0__MASK 0x07fff800
+#define DPU_BS_OW_CFG_RESERVED_0__SHIFT 11
+static inline uint32_t DPU_BS_OW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_RESERVED_0__SHIFT) & DPU_BS_OW_CFG_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_2__MASK 0x00000700
+#define DPU_BS_OW_CFG_SIZE_E_2__SHIFT 8
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_2(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_2__SHIFT) & DPU_BS_OW_CFG_SIZE_E_2__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_1__MASK 0x000000e0
+#define DPU_BS_OW_CFG_SIZE_E_1__SHIFT 5
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_1(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_1__SHIFT) & DPU_BS_OW_CFG_SIZE_E_1__MASK;
+}
+#define DPU_BS_OW_CFG_SIZE_E_0__MASK 0x0000001c
+#define DPU_BS_OW_CFG_SIZE_E_0__SHIFT 2
+static inline uint32_t DPU_BS_OW_CFG_SIZE_E_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_SIZE_E_0__SHIFT) & DPU_BS_OW_CFG_SIZE_E_0__MASK;
+}
+#define DPU_BS_OW_CFG_OD_BYPASS__MASK 0x00000002
+#define DPU_BS_OW_CFG_OD_BYPASS__SHIFT 1
+static inline uint32_t DPU_BS_OW_CFG_OD_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OD_BYPASS__SHIFT) & DPU_BS_OW_CFG_OD_BYPASS__MASK;
+}
+#define DPU_BS_OW_CFG_OW_SRC__MASK 0x00000001
+#define DPU_BS_OW_CFG_OW_SRC__SHIFT 0
+static inline uint32_t DPU_BS_OW_CFG_OW_SRC(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_CFG_OW_SRC__SHIFT) & DPU_BS_OW_CFG_OW_SRC__MASK;
+}
+
+#define REG_DPU_BS_OW_OP 0x00004054
+#define DPU_BS_OW_OP_RESERVED_0__MASK 0xffff0000
+#define DPU_BS_OW_OP_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_BS_OW_OP_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_RESERVED_0__SHIFT) & DPU_BS_OW_OP_RESERVED_0__MASK;
+}
+#define DPU_BS_OW_OP_OW_OP__MASK 0x0000ffff
+#define DPU_BS_OW_OP_OW_OP__SHIFT 0
+static inline uint32_t DPU_BS_OW_OP_OW_OP(uint32_t val)
+{
+ return ((val) << DPU_BS_OW_OP_OW_OP__SHIFT) & DPU_BS_OW_OP_OW_OP__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_0 0x00004058
+#define DPU_WDMA_SIZE_0_RESERVED_0__MASK 0xf0000000
+#define DPU_WDMA_SIZE_0_RESERVED_0__SHIFT 28
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_0_TP_PRECISION__MASK 0x08000000
+#define DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT 27
+static inline uint32_t DPU_WDMA_SIZE_0_TP_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_TP_PRECISION__SHIFT) & DPU_WDMA_SIZE_0_TP_PRECISION__MASK;
+}
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK 0x07ff0000
+#define DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_0_SIZE_C_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_SIZE_C_WDMA__SHIFT) & DPU_WDMA_SIZE_0_SIZE_C_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_0_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_0_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_0_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_0_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_0_CHANNEL_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_0_CHANNEL_WDMA__SHIFT) & DPU_WDMA_SIZE_0_CHANNEL_WDMA__MASK;
+}
+
+#define REG_DPU_WDMA_SIZE_1 0x0000405c
+#define DPU_WDMA_SIZE_1_RESERVED_0__MASK 0xe0000000
+#define DPU_WDMA_SIZE_1_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_0__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_0__MASK;
+}
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK 0x1fff0000
+#define DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT 16
+static inline uint32_t DPU_WDMA_SIZE_1_HEIGHT_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_HEIGHT_WDMA__SHIFT) & DPU_WDMA_SIZE_1_HEIGHT_WDMA__MASK;
+}
+#define DPU_WDMA_SIZE_1_RESERVED_1__MASK 0x0000e000
+#define DPU_WDMA_SIZE_1_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_WDMA_SIZE_1_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_RESERVED_1__SHIFT) & DPU_WDMA_SIZE_1_RESERVED_1__MASK;
+}
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK 0x00001fff
+#define DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT 0
+static inline uint32_t DPU_WDMA_SIZE_1_WIDTH_WDMA(uint32_t val)
+{
+ return ((val) << DPU_WDMA_SIZE_1_WIDTH_WDMA__SHIFT) & DPU_WDMA_SIZE_1_WIDTH_WDMA__MASK;
+}
+
+#define REG_DPU_BN_CFG 0x00004060
+#define DPU_BN_CFG_RESERVED_0__MASK 0xfff00000
+#define DPU_BN_CFG_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_BN_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_0__SHIFT) & DPU_BN_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_ALGO__MASK 0x000f0000
+#define DPU_BN_CFG_BN_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_BN_CFG_BN_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_ALGO__SHIFT) & DPU_BN_CFG_BN_ALU_ALGO__MASK;
+}
+#define DPU_BN_CFG_RESERVED_1__MASK 0x0000fe00
+#define DPU_BN_CFG_RESERVED_1__SHIFT 9
+static inline uint32_t DPU_BN_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_1__SHIFT) & DPU_BN_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_SRC__MASK 0x00000100
+#define DPU_BN_CFG_BN_ALU_SRC__SHIFT 8
+static inline uint32_t DPU_BN_CFG_BN_ALU_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_SRC__SHIFT) & DPU_BN_CFG_BN_ALU_SRC__MASK;
+}
+#define DPU_BN_CFG_BN_RELUX_EN__MASK 0x00000080
+#define DPU_BN_CFG_BN_RELUX_EN__SHIFT 7
+static inline uint32_t DPU_BN_CFG_BN_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELUX_EN__SHIFT) & DPU_BN_CFG_BN_RELUX_EN__MASK;
+}
+#define DPU_BN_CFG_BN_RELU_BYPASS__MASK 0x00000040
+#define DPU_BN_CFG_BN_RELU_BYPASS__SHIFT 6
+static inline uint32_t DPU_BN_CFG_BN_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_RELU_BYPASS__SHIFT) & DPU_BN_CFG_BN_RELU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_PRELU__MASK 0x00000020
+#define DPU_BN_CFG_BN_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_BN_CFG_BN_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_PRELU__SHIFT) & DPU_BN_CFG_BN_MUL_PRELU__MASK;
+}
+#define DPU_BN_CFG_BN_MUL_BYPASS__MASK 0x00000010
+#define DPU_BN_CFG_BN_MUL_BYPASS__SHIFT 4
+static inline uint32_t DPU_BN_CFG_BN_MUL_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_MUL_BYPASS__SHIFT) & DPU_BN_CFG_BN_MUL_BYPASS__MASK;
+}
+#define DPU_BN_CFG_RESERVED_2__MASK 0x0000000c
+#define DPU_BN_CFG_RESERVED_2__SHIFT 2
+static inline uint32_t DPU_BN_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_RESERVED_2__SHIFT) & DPU_BN_CFG_RESERVED_2__MASK;
+}
+#define DPU_BN_CFG_BN_ALU_BYPASS__MASK 0x00000002
+#define DPU_BN_CFG_BN_ALU_BYPASS__SHIFT 1
+static inline uint32_t DPU_BN_CFG_BN_ALU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_ALU_BYPASS__SHIFT) & DPU_BN_CFG_BN_ALU_BYPASS__MASK;
+}
+#define DPU_BN_CFG_BN_BYPASS__MASK 0x00000001
+#define DPU_BN_CFG_BN_BYPASS__SHIFT 0
+static inline uint32_t DPU_BN_CFG_BN_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_BN_CFG_BN_BYPASS__SHIFT) & DPU_BN_CFG_BN_BYPASS__MASK;
+}
+
+#define REG_DPU_BN_ALU_CFG 0x00004064
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK 0xffffffff
+#define DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT 0
+static inline uint32_t DPU_BN_ALU_CFG_BN_ALU_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_ALU_CFG_BN_ALU_OPERAND__SHIFT) & DPU_BN_ALU_CFG_BN_ALU_OPERAND__MASK;
+}
+
+#define REG_DPU_BN_MUL_CFG 0x00004068
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK 0xffff0000
+#define DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT 16
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_OPERAND(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_OPERAND__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_OPERAND__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_0__MASK 0x0000c000
+#define DPU_BN_MUL_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_0__SHIFT) & DPU_BN_MUL_CFG_RESERVED_0__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK 0x00003f00
+#define DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT 8
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SHIFT_VALUE__MASK;
+}
+#define DPU_BN_MUL_CFG_RESERVED_1__MASK 0x000000fc
+#define DPU_BN_MUL_CFG_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_BN_MUL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_RESERVED_1__SHIFT) & DPU_BN_MUL_CFG_RESERVED_1__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK 0x00000002
+#define DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT 1
+static inline uint32_t DPU_BN_MUL_CFG_BN_TRUNCATE_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_TRUNCATE_SRC__MASK;
+}
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__MASK 0x00000001
+#define DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT 0
+static inline uint32_t DPU_BN_MUL_CFG_BN_MUL_SRC(uint32_t val)
+{
+ return ((val) << DPU_BN_MUL_CFG_BN_MUL_SRC__SHIFT) & DPU_BN_MUL_CFG_BN_MUL_SRC__MASK;
+}
+
+#define REG_DPU_BN_RELUX_CMP_VALUE 0x0000406c
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__SHIFT) & DPU_BN_RELUX_CMP_VALUE_BN_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_EW_CFG 0x00004070
+#define DPU_EW_CFG_EW_CVT_TYPE__MASK 0x80000000
+#define DPU_EW_CFG_EW_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_EW_CFG_EW_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_TYPE__SHIFT) & DPU_EW_CFG_EW_CVT_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_CVT_ROUND__MASK 0x40000000
+#define DPU_EW_CFG_EW_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_EW_CFG_EW_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_CVT_ROUND__SHIFT) & DPU_EW_CFG_EW_CVT_ROUND__MASK;
+}
+#define DPU_EW_CFG_EW_DATA_MODE__MASK 0x30000000
+#define DPU_EW_CFG_EW_DATA_MODE__SHIFT 28
+static inline uint32_t DPU_EW_CFG_EW_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_DATA_MODE__SHIFT) & DPU_EW_CFG_EW_DATA_MODE__MASK;
+}
+#define DPU_EW_CFG_RESERVED_0__MASK 0x0f000000
+#define DPU_EW_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_EW_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_0__SHIFT) & DPU_EW_CFG_RESERVED_0__MASK;
+}
+#define DPU_EW_CFG_EDATA_SIZE__MASK 0x00c00000
+#define DPU_EW_CFG_EDATA_SIZE__SHIFT 22
+static inline uint32_t DPU_EW_CFG_EDATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EDATA_SIZE__SHIFT) & DPU_EW_CFG_EDATA_SIZE__MASK;
+}
+#define DPU_EW_CFG_EW_EQUAL_EN__MASK 0x00200000
+#define DPU_EW_CFG_EW_EQUAL_EN__SHIFT 21
+static inline uint32_t DPU_EW_CFG_EW_EQUAL_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_EQUAL_EN__SHIFT) & DPU_EW_CFG_EW_EQUAL_EN__MASK;
+}
+#define DPU_EW_CFG_EW_BINARY_EN__MASK 0x00100000
+#define DPU_EW_CFG_EW_BINARY_EN__SHIFT 20
+static inline uint32_t DPU_EW_CFG_EW_BINARY_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BINARY_EN__SHIFT) & DPU_EW_CFG_EW_BINARY_EN__MASK;
+}
+#define DPU_EW_CFG_EW_ALU_ALGO__MASK 0x000f0000
+#define DPU_EW_CFG_EW_ALU_ALGO__SHIFT 16
+static inline uint32_t DPU_EW_CFG_EW_ALU_ALGO(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_ALU_ALGO__SHIFT) & DPU_EW_CFG_EW_ALU_ALGO__MASK;
+}
+#define DPU_EW_CFG_RESERVED_1__MASK 0x0000f800
+#define DPU_EW_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t DPU_EW_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_1__SHIFT) & DPU_EW_CFG_RESERVED_1__MASK;
+}
+#define DPU_EW_CFG_EW_RELUX_EN__MASK 0x00000400
+#define DPU_EW_CFG_EW_RELUX_EN__SHIFT 10
+static inline uint32_t DPU_EW_CFG_EW_RELUX_EN(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELUX_EN__SHIFT) & DPU_EW_CFG_EW_RELUX_EN__MASK;
+}
+#define DPU_EW_CFG_EW_RELU_BYPASS__MASK 0x00000200
+#define DPU_EW_CFG_EW_RELU_BYPASS__SHIFT 9
+static inline uint32_t DPU_EW_CFG_EW_RELU_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_RELU_BYPASS__SHIFT) & DPU_EW_CFG_EW_RELU_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK 0x00000100
+#define DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT 8
+static inline uint32_t DPU_EW_CFG_EW_OP_CVT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_CVT_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_CVT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_LUT_BYPASS__MASK 0x00000080
+#define DPU_EW_CFG_EW_LUT_BYPASS__SHIFT 7
+static inline uint32_t DPU_EW_CFG_EW_LUT_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_LUT_BYPASS__SHIFT) & DPU_EW_CFG_EW_LUT_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_OP_SRC__MASK 0x00000040
+#define DPU_EW_CFG_EW_OP_SRC__SHIFT 6
+static inline uint32_t DPU_EW_CFG_EW_OP_SRC(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_SRC__SHIFT) & DPU_EW_CFG_EW_OP_SRC__MASK;
+}
+#define DPU_EW_CFG_EW_MUL_PRELU__MASK 0x00000020
+#define DPU_EW_CFG_EW_MUL_PRELU__SHIFT 5
+static inline uint32_t DPU_EW_CFG_EW_MUL_PRELU(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_MUL_PRELU__SHIFT) & DPU_EW_CFG_EW_MUL_PRELU__MASK;
+}
+#define DPU_EW_CFG_RESERVED_2__MASK 0x00000018
+#define DPU_EW_CFG_RESERVED_2__SHIFT 3
+static inline uint32_t DPU_EW_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_RESERVED_2__SHIFT) & DPU_EW_CFG_RESERVED_2__MASK;
+}
+#define DPU_EW_CFG_EW_OP_TYPE__MASK 0x00000004
+#define DPU_EW_CFG_EW_OP_TYPE__SHIFT 2
+static inline uint32_t DPU_EW_CFG_EW_OP_TYPE(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_TYPE__SHIFT) & DPU_EW_CFG_EW_OP_TYPE__MASK;
+}
+#define DPU_EW_CFG_EW_OP_BYPASS__MASK 0x00000002
+#define DPU_EW_CFG_EW_OP_BYPASS__SHIFT 1
+static inline uint32_t DPU_EW_CFG_EW_OP_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_OP_BYPASS__SHIFT) & DPU_EW_CFG_EW_OP_BYPASS__MASK;
+}
+#define DPU_EW_CFG_EW_BYPASS__MASK 0x00000001
+#define DPU_EW_CFG_EW_BYPASS__SHIFT 0
+static inline uint32_t DPU_EW_CFG_EW_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_EW_CFG_EW_BYPASS__SHIFT) & DPU_EW_CFG_EW_BYPASS__MASK;
+}
+
+#define REG_DPU_EW_CVT_OFFSET_VALUE 0x00004074
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK 0xffffffff
+#define DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__SHIFT) & DPU_EW_CVT_OFFSET_VALUE_EW_OP_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_EW_CVT_SCALE_VALUE 0x00004078
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK 0xffc00000
+#define DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT 22
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_TRUNCATE__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK 0x003f0000
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT 16
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SHIFT__MASK;
+}
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK 0x0000ffff
+#define DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__SHIFT) & DPU_EW_CVT_SCALE_VALUE_EW_OP_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_EW_RELUX_CMP_VALUE 0x0000407c
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK 0xffffffff
+#define DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT 0
+static inline uint32_t DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT(uint32_t val)
+{
+ return ((val) << DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__SHIFT) & DPU_EW_RELUX_CMP_VALUE_EW_RELUX_CMP_DAT__MASK;
+}
+
+#define REG_DPU_OUT_CVT_OFFSET 0x00004080
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK 0xffffffff
+#define DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__SHIFT) & DPU_OUT_CVT_OFFSET_OUT_CVT_OFFSET__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SCALE 0x00004084
+#define DPU_OUT_CVT_SCALE_RESERVED_0__MASK 0xfffe0000
+#define DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_OUT_CVT_SCALE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_RESERVED_0__SHIFT) & DPU_OUT_CVT_SCALE_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK 0x00010000
+#define DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT 16
+static inline uint32_t DPU_OUT_CVT_SCALE_FP32TOFP16_EN(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_FP32TOFP16_EN__SHIFT) & DPU_OUT_CVT_SCALE_FP32TOFP16_EN__MASK;
+}
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK 0x0000ffff
+#define DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SCALE_OUT_CVT_SCALE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__SHIFT) & DPU_OUT_CVT_SCALE_OUT_CVT_SCALE__MASK;
+}
+
+#define REG_DPU_OUT_CVT_SHIFT 0x00004088
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK 0x80000000
+#define DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT 31
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_TYPE(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_TYPE__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_TYPE__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK 0x40000000
+#define DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT 30
+static inline uint32_t DPU_OUT_CVT_SHIFT_CVT_ROUND(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_CVT_ROUND__SHIFT) & DPU_OUT_CVT_SHIFT_CVT_ROUND__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__MASK 0x3ff00000
+#define DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT 20
+static inline uint32_t DPU_OUT_CVT_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_RESERVED_0__SHIFT) & DPU_OUT_CVT_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK 0x000ff000
+#define DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT 12
+static inline uint32_t DPU_OUT_CVT_SHIFT_MINUS_EXP(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_MINUS_EXP__SHIFT) & DPU_OUT_CVT_SHIFT_MINUS_EXP__MASK;
+}
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK 0x00000fff
+#define DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT 0
+static inline uint32_t DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__SHIFT) & DPU_OUT_CVT_SHIFT_OUT_CVT_SHIFT__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_0 0x00004090
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_0_EW_OPERAND_0(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_0_EW_OPERAND_0__SHIFT) & DPU_EW_OP_VALUE_0_EW_OPERAND_0__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_1 0x00004094
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_1_EW_OPERAND_1(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_1_EW_OPERAND_1__SHIFT) & DPU_EW_OP_VALUE_1_EW_OPERAND_1__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_2 0x00004098
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_2_EW_OPERAND_2(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_2_EW_OPERAND_2__SHIFT) & DPU_EW_OP_VALUE_2_EW_OPERAND_2__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_3 0x0000409c
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_3_EW_OPERAND_3(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_3_EW_OPERAND_3__SHIFT) & DPU_EW_OP_VALUE_3_EW_OPERAND_3__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_4 0x000040a0
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_4_EW_OPERAND_4(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_4_EW_OPERAND_4__SHIFT) & DPU_EW_OP_VALUE_4_EW_OPERAND_4__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_5 0x000040a4
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_5_EW_OPERAND_5(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_5_EW_OPERAND_5__SHIFT) & DPU_EW_OP_VALUE_5_EW_OPERAND_5__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_6 0x000040a8
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_6_EW_OPERAND_6(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_6_EW_OPERAND_6__SHIFT) & DPU_EW_OP_VALUE_6_EW_OPERAND_6__MASK;
+}
+
+#define REG_DPU_EW_OP_VALUE_7 0x000040ac
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK 0xffffffff
+#define DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT 0
+static inline uint32_t DPU_EW_OP_VALUE_7_EW_OPERAND_7(uint32_t val)
+{
+ return ((val) << DPU_EW_OP_VALUE_7_EW_OPERAND_7__SHIFT) & DPU_EW_OP_VALUE_7_EW_OPERAND_7__MASK;
+}
+
+#define REG_DPU_SURFACE_ADD 0x000040c0
+#define DPU_SURFACE_ADD_SURF_ADD__MASK 0xfffffff0
+#define DPU_SURFACE_ADD_SURF_ADD__SHIFT 4
+static inline uint32_t DPU_SURFACE_ADD_SURF_ADD(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_SURF_ADD__SHIFT) & DPU_SURFACE_ADD_SURF_ADD__MASK;
+}
+#define DPU_SURFACE_ADD_RESERVED_0__MASK 0x0000000f
+#define DPU_SURFACE_ADD_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_SURFACE_ADD_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_SURFACE_ADD_RESERVED_0__SHIFT) & DPU_SURFACE_ADD_RESERVED_0__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_CFG 0x00004100
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK 0x00020000
+#define DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT 17
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ACCESS_TYPE__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK 0x00010000
+#define DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_TABLE_ID(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_TABLE_ID__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__MASK 0x0000fc00
+#define DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT 10
+static inline uint32_t DPU_LUT_ACCESS_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_RESERVED_1__SHIFT) & DPU_LUT_ACCESS_CFG_RESERVED_1__MASK;
+}
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK 0x000003ff
+#define DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_CFG_LUT_ADDR(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_CFG_LUT_ADDR__SHIFT) & DPU_LUT_ACCESS_CFG_LUT_ADDR__MASK;
+}
+
+#define REG_DPU_LUT_ACCESS_DATA 0x00004104
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__MASK 0xffff0000
+#define DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT 16
+static inline uint32_t DPU_LUT_ACCESS_DATA_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_RESERVED_0__SHIFT) & DPU_LUT_ACCESS_DATA_RESERVED_0__MASK;
+}
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK 0x0000ffff
+#define DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT 0
+static inline uint32_t DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA(uint32_t val)
+{
+ return ((val) << DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__SHIFT) & DPU_LUT_ACCESS_DATA_LUT_ACCESS_DATA__MASK;
+}
+
+#define REG_DPU_LUT_CFG 0x00004108
+#define DPU_LUT_CFG_RESERVED_0__MASK 0xffffff00
+#define DPU_LUT_CFG_RESERVED_0__SHIFT 8
+static inline uint32_t DPU_LUT_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_RESERVED_0__SHIFT) & DPU_LUT_CFG_RESERVED_0__MASK;
+}
+#define DPU_LUT_CFG_LUT_CAL_SEL__MASK 0x00000080
+#define DPU_LUT_CFG_LUT_CAL_SEL__SHIFT 7
+static inline uint32_t DPU_LUT_CFG_LUT_CAL_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_CAL_SEL__SHIFT) & DPU_LUT_CFG_LUT_CAL_SEL__MASK;
+}
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK 0x00000040
+#define DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT 6
+static inline uint32_t DPU_LUT_CFG_LUT_HYBRID_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_HYBRID_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_HYBRID_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK 0x00000020
+#define DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT 5
+static inline uint32_t DPU_LUT_CFG_LUT_OFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_OFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_OFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK 0x00000010
+#define DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT 4
+static inline uint32_t DPU_LUT_CFG_LUT_UFLOW_PRIORITY(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_UFLOW_PRIORITY__SHIFT) & DPU_LUT_CFG_LUT_UFLOW_PRIORITY__MASK;
+}
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__MASK 0x0000000c
+#define DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT 2
+static inline uint32_t DPU_LUT_CFG_LUT_LO_LE_MUX(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_LO_LE_MUX__SHIFT) & DPU_LUT_CFG_LUT_LO_LE_MUX__MASK;
+}
+#define DPU_LUT_CFG_LUT_EXPAND_EN__MASK 0x00000002
+#define DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT 1
+static inline uint32_t DPU_LUT_CFG_LUT_EXPAND_EN(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_EXPAND_EN__SHIFT) & DPU_LUT_CFG_LUT_EXPAND_EN__MASK;
+}
+#define DPU_LUT_CFG_LUT_ROAD_SEL__MASK 0x00000001
+#define DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT 0
+static inline uint32_t DPU_LUT_CFG_LUT_ROAD_SEL(uint32_t val)
+{
+ return ((val) << DPU_LUT_CFG_LUT_ROAD_SEL__SHIFT) & DPU_LUT_CFG_LUT_ROAD_SEL__MASK;
+}
+
+#define REG_DPU_LUT_INFO 0x0000410c
+#define DPU_LUT_INFO_RESERVED_0__MASK 0xff000000
+#define DPU_LUT_INFO_RESERVED_0__SHIFT 24
+static inline uint32_t DPU_LUT_INFO_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_0__SHIFT) & DPU_LUT_INFO_RESERVED_0__MASK;
+}
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK 0x00ff0000
+#define DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT 16
+static inline uint32_t DPU_LUT_INFO_LUT_LO_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LO_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LO_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK 0x0000ff00
+#define DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT 8
+static inline uint32_t DPU_LUT_INFO_LUT_LE_INDEX_SELECT(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_LUT_LE_INDEX_SELECT__SHIFT) & DPU_LUT_INFO_LUT_LE_INDEX_SELECT__MASK;
+}
+#define DPU_LUT_INFO_RESERVED_1__MASK 0x000000ff
+#define DPU_LUT_INFO_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_LUT_INFO_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_LUT_INFO_RESERVED_1__SHIFT) & DPU_LUT_INFO_RESERVED_1__MASK;
+}
+
+#define REG_DPU_LUT_LE_START 0x00004110
+#define DPU_LUT_LE_START_LUT_LE_START__MASK 0xffffffff
+#define DPU_LUT_LE_START_LUT_LE_START__SHIFT 0
+static inline uint32_t DPU_LUT_LE_START_LUT_LE_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_START_LUT_LE_START__SHIFT) & DPU_LUT_LE_START_LUT_LE_START__MASK;
+}
+
+#define REG_DPU_LUT_LE_END 0x00004114
+#define DPU_LUT_LE_END_LUT_LE_END__MASK 0xffffffff
+#define DPU_LUT_LE_END_LUT_LE_END__SHIFT 0
+static inline uint32_t DPU_LUT_LE_END_LUT_LE_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_END_LUT_LE_END__SHIFT) & DPU_LUT_LE_END_LUT_LE_END__MASK;
+}
+
+#define REG_DPU_LUT_LO_START 0x00004118
+#define DPU_LUT_LO_START_LUT_LO_START__MASK 0xffffffff
+#define DPU_LUT_LO_START_LUT_LO_START__SHIFT 0
+static inline uint32_t DPU_LUT_LO_START_LUT_LO_START(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_START_LUT_LO_START__SHIFT) & DPU_LUT_LO_START_LUT_LO_START__MASK;
+}
+
+#define REG_DPU_LUT_LO_END 0x0000411c
+#define DPU_LUT_LO_END_LUT_LO_END__MASK 0xffffffff
+#define DPU_LUT_LO_END_LUT_LO_END__SHIFT 0
+static inline uint32_t DPU_LUT_LO_END_LUT_LO_END(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_END_LUT_LO_END__SHIFT) & DPU_LUT_LO_END_LUT_LO_END__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SCALE 0x00004120
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LE_SLOPE_SCALE_LUT_LE_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LE_SLOPE_SHIFT 0x00004124
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LE_SLOPE_SHIFT_LUT_LE_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SCALE 0x00004128
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK 0xffff0000
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT 16
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_OFLOW_SCALE__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK 0x0000ffff
+#define DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__SHIFT) & DPU_LUT_LO_SLOPE_SCALE_LUT_LO_SLOPE_UFLOW_SCALE__MASK;
+}
+
+#define REG_DPU_LUT_LO_SLOPE_SHIFT 0x0000412c
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK 0xfffffc00
+#define DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT 10
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_RESERVED_0__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK 0x000003e0
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT 5
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_OFLOW_SHIFT__MASK;
+}
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK 0x0000001f
+#define DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT 0
+static inline uint32_t DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT(uint32_t val)
+{
+ return ((val) << DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__SHIFT) & DPU_LUT_LO_SLOPE_SHIFT_LUT_LO_SLOPE_UFLOW_SHIFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_STATUS 0x00005000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & DPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_S_POINTER 0x00005004
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & DPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_OPERATION_ENABLE 0x00005008
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & DPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_WIDTH 0x0000500c
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_WIDTH_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_HEIGHT 0x00005010
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK 0xe0000000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK 0x1fff0000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_EW_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK 0x0000e000
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_HEIGHT_HEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_DATA_CUBE_CHANNEL 0x00005014
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK 0x00001fff
+#define DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__SHIFT) & DPU_RDMA_RDMA_DATA_CUBE_CHANNEL_CHANNEL__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_BASE_ADDR 0x00005018
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BRDMA_CFG 0x0000501c
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_BRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_BRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BS_BASE_ADDR 0x00005020
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BS_BASE_ADDR_BS_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_NRDMA_CFG 0x00005028
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK 0xffffffe0
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK 0x0000001e
+#define DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_NRDMA_DATA_USE__MASK;
+}
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK 0x00000001
+#define DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_NRDMA_CFG_RESERVED_1__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_BN_BASE_ADDR 0x0000502c
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_BN_BASE_ADDR_BN_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_ERDMA_CFG 0x00005034
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK 0xc0000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT 30
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK 0x20000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT 29
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_SURF_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK 0x10000000
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT 28
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_NONALIGN__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK 0x0ffffff0
+#define DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK 0x0000000c
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT 2
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DATA_SIZE__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK 0x00000002
+#define DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_OV4K_BYPASS__MASK;
+}
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK 0x00000001
+#define DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_ERDMA_CFG_ERDMA_DISABLE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_BASE_ADDR 0x00005038
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK 0xffffffff
+#define DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__SHIFT) & DPU_RDMA_RDMA_EW_BASE_ADDR_EW_BASE_ADDR__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_STRIDE 0x00005040
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_EW_SURF_STRIDE__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_FEATURE_MODE_CFG 0x00005044
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK 0xfffc0000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT 18
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK 0x00038000
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT 15
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_IN_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK 0x00007800
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT 11
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_BURST_LEN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK 0x00000700
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_COMB_USE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK 0x000000e0
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT 5
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_PROC_PRECISION__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK 0x00000010
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_DISABLE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK 0x00000008
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_MRDMA_FP16TOFP32_EN__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK 0x00000006
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT 1
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_CONV_MODE__MASK;
+}
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK 0x00000001
+#define DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__SHIFT) & DPU_RDMA_RDMA_FEATURE_MODE_CFG_FLYING_MODE__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SRC_DMA_CFG 0x00005048
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK 0xfff80000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT 19
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_LINE_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK 0x0007c000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT 14
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK 0x00002000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT 13
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_POOLING_METHOD__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK 0x00001000
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT 12
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_UNPOOLING_EN__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00000e00
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 9
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000001c0
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT 6
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK 0x00000038
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_HEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK 0x00000007
+#define DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__SHIFT) & DPU_RDMA_RDMA_SRC_DMA_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_SURF_NOTCH 0x0000504c
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_SURF_NOTCH_ADDR__MASK;
+}
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_PAD_CFG 0x00005064
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK 0xffff0000
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_VALUE__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK 0x0000ff80
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT 7
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_0__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK 0x00000070
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_TOP__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK 0x00000008
+#define DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT 3
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_RESERVED_1__MASK;
+}
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK 0x00000007
+#define DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__SHIFT) & DPU_RDMA_RDMA_PAD_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_WEIGHT 0x00005068
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK 0xff000000
+#define DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT 24
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_E_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_E_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK 0x00ff0000
+#define DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT 16
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_N_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_N_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK 0x0000ff00
+#define DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT 8
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_B_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_B_WEIGHT__MASK;
+}
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK 0x000000ff
+#define DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_WEIGHT_M_WEIGHT(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__SHIFT) & DPU_RDMA_RDMA_WEIGHT_M_WEIGHT__MASK;
+}
+
+#define REG_DPU_RDMA_RDMA_EW_SURF_NOTCH 0x0000506c
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK 0xfffffff0
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT 4
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_EW_SURF_NOTCH__MASK;
+}
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK 0x0000000f
+#define DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT 0
+static inline uint32_t DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0(uint32_t val)
+{
+ return ((val) << DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__SHIFT) & DPU_RDMA_RDMA_EW_SURF_NOTCH_RESERVED_0__MASK;
+}
+
+#define REG_PPU_S_STATUS 0x00006000
+#define PPU_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_0__SHIFT) & PPU_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_1__SHIFT) & PPU_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_RESERVED_1__SHIFT) & PPU_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_S_STATUS_STATUS_0__SHIFT) & PPU_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_S_POINTER 0x00006004
+#define PPU_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_0__SHIFT) & PPU_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER__SHIFT) & PPU_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_RESERVED_1__SHIFT) & PPU_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_S_POINTER_POINTER__SHIFT) & PPU_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_OPERATION_ENABLE 0x00006008
+#define PPU_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_WIDTH 0x0000600c
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_DATA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_HEIGHT 0x00006010
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_DATA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_IN_CHANNEL 0x00006014
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_DATA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_WIDTH 0x00006018
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__SHIFT) & PPU_DATA_CUBE_OUT_WIDTH_CUBE_OUT_WIDTH__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_HEIGHT 0x0000601c
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__SHIFT) & PPU_DATA_CUBE_OUT_HEIGHT_CUBE_OUT_HEIGHT__MASK;
+}
+
+#define REG_PPU_DATA_CUBE_OUT_CHANNEL 0x00006020
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK 0x00001fff
+#define PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT 0
+static inline uint32_t PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__SHIFT) & PPU_DATA_CUBE_OUT_CHANNEL_CUBE_OUT_CHANNEL__MASK;
+}
+
+#define REG_PPU_OPERATION_MODE_CFG 0x00006024
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__MASK 0x80000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT 31
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_0__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_0__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__MASK 0x40000000
+#define PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT 30
+static inline uint32_t PPU_OPERATION_MODE_CFG_INDEX_EN(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_INDEX_EN__SHIFT) & PPU_OPERATION_MODE_CFG_INDEX_EN__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__MASK 0x20000000
+#define PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT 29
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_1__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_1__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK 0x1fff0000
+#define PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT 16
+static inline uint32_t PPU_OPERATION_MODE_CFG_NOTCH_ADDR(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_NOTCH_ADDR__SHIFT) & PPU_OPERATION_MODE_CFG_NOTCH_ADDR__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__MASK 0x0000ff00
+#define PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT 8
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_2__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_2__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_USE_CNT__MASK 0x000000e0
+#define PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT 5
+static inline uint32_t PPU_OPERATION_MODE_CFG_USE_CNT(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_USE_CNT__SHIFT) & PPU_OPERATION_MODE_CFG_USE_CNT__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK 0x00000010
+#define PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT 4
+static inline uint32_t PPU_OPERATION_MODE_CFG_FLYING_MODE(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_FLYING_MODE__SHIFT) & PPU_OPERATION_MODE_CFG_FLYING_MODE__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__MASK 0x0000000c
+#define PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT 2
+static inline uint32_t PPU_OPERATION_MODE_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_RESERVED_3__SHIFT) & PPU_OPERATION_MODE_CFG_RESERVED_3__MASK;
+}
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK 0x00000003
+#define PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT 0
+static inline uint32_t PPU_OPERATION_MODE_CFG_POOLING_METHOD(uint32_t val)
+{
+ return ((val) << PPU_OPERATION_MODE_CFG_POOLING_METHOD__SHIFT) & PPU_OPERATION_MODE_CFG_POOLING_METHOD__MASK;
+}
+
+#define REG_PPU_POOLING_KERNEL_CFG 0x00006034
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK 0xff000000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT 24
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_0__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK 0x00f00000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT 20
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK 0x000f0000
+#define PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT 16
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_STRIDE_WIDTH__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK 0x0000f000
+#define PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT 12
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_1__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK 0x00000f00
+#define PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_HEIGHT__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK 0x000000f0
+#define PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT 4
+static inline uint32_t PPU_POOLING_KERNEL_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_RESERVED_2__SHIFT) & PPU_POOLING_KERNEL_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK 0x0000000f
+#define PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__SHIFT) & PPU_POOLING_KERNEL_CFG_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_WIDTH 0x00006038
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__SHIFT) & PPU_RECIP_KERNEL_WIDTH_RECIP_KERNEL_WIDTH__MASK;
+}
+
+#define REG_PPU_RECIP_KERNEL_HEIGHT 0x0000603c
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK 0xfffe0000
+#define PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK 0x0001ffff
+#define PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__SHIFT) & PPU_RECIP_KERNEL_HEIGHT_RECIP_KERNEL_HEIGHT__MASK;
+}
+
+#define REG_PPU_POOLING_PADDING_CFG 0x00006040
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__MASK 0xffff8000
+#define PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT 15
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_0__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_0__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK 0x00007000
+#define PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT 12
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_BOTTOM(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_BOTTOM__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_BOTTOM__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__MASK 0x00000800
+#define PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT 11
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_1__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_1__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK 0x00000700
+#define PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT 8
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_RIGHT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_RIGHT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_RIGHT__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__MASK 0x00000080
+#define PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT 7
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_2(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_2__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_2__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__MASK 0x00000070
+#define PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT 4
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_TOP(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_TOP__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_TOP__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__MASK 0x00000008
+#define PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT 3
+static inline uint32_t PPU_POOLING_PADDING_CFG_RESERVED_3(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_RESERVED_3__SHIFT) & PPU_POOLING_PADDING_CFG_RESERVED_3__MASK;
+}
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK 0x00000007
+#define PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT 0
+static inline uint32_t PPU_POOLING_PADDING_CFG_PAD_LEFT(uint32_t val)
+{
+ return ((val) << PPU_POOLING_PADDING_CFG_PAD_LEFT__SHIFT) & PPU_POOLING_PADDING_CFG_PAD_LEFT__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_1_CFG 0x00006044
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK 0xffffffff
+#define PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__SHIFT) & PPU_PADDING_VALUE_1_CFG_PAD_VALUE_0__MASK;
+}
+
+#define REG_PPU_PADDING_VALUE_2_CFG 0x00006048
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK 0xfffffff8
+#define PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT 3
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_RESERVED_0__SHIFT) & PPU_PADDING_VALUE_2_CFG_RESERVED_0__MASK;
+}
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK 0x00000007
+#define PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT 0
+static inline uint32_t PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1(uint32_t val)
+{
+ return ((val) << PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__SHIFT) & PPU_PADDING_VALUE_2_CFG_PAD_VALUE_1__MASK;
+}
+
+#define REG_PPU_DST_BASE_ADDR 0x00006070
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK 0xfffffff0
+#define PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT 4
+static inline uint32_t PPU_DST_BASE_ADDR_DST_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_DST_BASE_ADDR__SHIFT) & PPU_DST_BASE_ADDR_DST_BASE_ADDR__MASK;
+}
+#define PPU_DST_BASE_ADDR_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_BASE_ADDR_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_BASE_ADDR_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_BASE_ADDR_RESERVED_0__SHIFT) & PPU_DST_BASE_ADDR_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DST_SURF_STRIDE 0x0000607c
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_DST_SURF_STRIDE_DST_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__SHIFT) & PPU_DST_SURF_STRIDE_DST_SURF_STRIDE__MASK;
+}
+#define PPU_DST_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_DST_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_DST_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_DST_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_DATA_FORMAT 0x00006084
+#define PPU_DATA_FORMAT_INDEX_ADD__MASK 0xfffffff0
+#define PPU_DATA_FORMAT_INDEX_ADD__SHIFT 4
+static inline uint32_t PPU_DATA_FORMAT_INDEX_ADD(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_INDEX_ADD__SHIFT) & PPU_DATA_FORMAT_INDEX_ADD__MASK;
+}
+#define PPU_DATA_FORMAT_DPU_FLYIN__MASK 0x00000008
+#define PPU_DATA_FORMAT_DPU_FLYIN__SHIFT 3
+static inline uint32_t PPU_DATA_FORMAT_DPU_FLYIN(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_DPU_FLYIN__SHIFT) & PPU_DATA_FORMAT_DPU_FLYIN__MASK;
+}
+#define PPU_DATA_FORMAT_PROC_PRECISION__MASK 0x00000007
+#define PPU_DATA_FORMAT_PROC_PRECISION__SHIFT 0
+static inline uint32_t PPU_DATA_FORMAT_PROC_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_DATA_FORMAT_PROC_PRECISION__SHIFT) & PPU_DATA_FORMAT_PROC_PRECISION__MASK;
+}
+
+#define REG_PPU_MISC_CTRL 0x000060dc
+#define PPU_MISC_CTRL_SURF_LEN__MASK 0xffff0000
+#define PPU_MISC_CTRL_SURF_LEN__SHIFT 16
+static inline uint32_t PPU_MISC_CTRL_SURF_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_SURF_LEN__SHIFT) & PPU_MISC_CTRL_SURF_LEN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_0__MASK 0x0000fe00
+#define PPU_MISC_CTRL_RESERVED_0__SHIFT 9
+static inline uint32_t PPU_MISC_CTRL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_0__SHIFT) & PPU_MISC_CTRL_RESERVED_0__MASK;
+}
+#define PPU_MISC_CTRL_MC_SURF_OUT__MASK 0x00000100
+#define PPU_MISC_CTRL_MC_SURF_OUT__SHIFT 8
+static inline uint32_t PPU_MISC_CTRL_MC_SURF_OUT(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_MC_SURF_OUT__SHIFT) & PPU_MISC_CTRL_MC_SURF_OUT__MASK;
+}
+#define PPU_MISC_CTRL_NONALIGN__MASK 0x00000080
+#define PPU_MISC_CTRL_NONALIGN__SHIFT 7
+static inline uint32_t PPU_MISC_CTRL_NONALIGN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_NONALIGN__SHIFT) & PPU_MISC_CTRL_NONALIGN__MASK;
+}
+#define PPU_MISC_CTRL_RESERVED_1__MASK 0x00000070
+#define PPU_MISC_CTRL_RESERVED_1__SHIFT 4
+static inline uint32_t PPU_MISC_CTRL_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_RESERVED_1__SHIFT) & PPU_MISC_CTRL_RESERVED_1__MASK;
+}
+#define PPU_MISC_CTRL_BURST_LEN__MASK 0x0000000f
+#define PPU_MISC_CTRL_BURST_LEN__SHIFT 0
+static inline uint32_t PPU_MISC_CTRL_BURST_LEN(uint32_t val)
+{
+ return ((val) << PPU_MISC_CTRL_BURST_LEN__SHIFT) & PPU_MISC_CTRL_BURST_LEN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_STATUS 0x00007000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK 0xfffc0000
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT 18
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK 0x00030000
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK 0x0000fffc
+#define PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_STATUS_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK 0x00000003
+#define PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_STATUS_STATUS_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_STATUS_STATUS_0__SHIFT) & PPU_RDMA_RDMA_S_STATUS_STATUS_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_S_POINTER 0x00007004
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK 0xfffe0000
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT 17
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK 0x00010000
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT 16
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK 0x0000ffc0
+#define PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT 6
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_RESERVED_1(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_RESERVED_1__SHIFT) & PPU_RDMA_RDMA_S_POINTER_RESERVED_1__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK 0x00000020
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT 5
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK 0x00000010
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_CLEAR__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK 0x00000008
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT 3
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_MODE__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK 0x00000004
+#define PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_EXECUTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK 0x00000002
+#define PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER_PP_EN__MASK;
+}
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__MASK 0x00000001
+#define PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_S_POINTER_POINTER(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_S_POINTER_POINTER__SHIFT) & PPU_RDMA_RDMA_S_POINTER_POINTER__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_OPERATION_ENABLE 0x00007008
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK 0xfffffffe
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT 1
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK 0x00000001
+#define PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__SHIFT) & PPU_RDMA_RDMA_OPERATION_ENABLE_OP_EN__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_WIDTH 0x0000700c
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_WIDTH_CUBE_IN_WIDTH__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_HEIGHT 0x00007010
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_HEIGHT_CUBE_IN_HEIGHT__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_CUBE_IN_CHANNEL 0x00007014
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK 0xffffe000
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT 13
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK 0x00001fff
+#define PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__SHIFT) & PPU_RDMA_RDMA_CUBE_IN_CHANNEL_CUBE_IN_CHANNEL__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_BASE_ADDR 0x0000701c
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK 0xffffffff
+#define PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__SHIFT) & PPU_RDMA_RDMA_SRC_BASE_ADDR_SRC_BASE_ADDR__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_LINE_STRIDE 0x00007024
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_SRC_LINE_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_LINE_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_SRC_SURF_STRIDE 0x00007028
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK 0xfffffff0
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT 4
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_SRC_SURF_STRIDE__MASK;
+}
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK 0x0000000f
+#define PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_SRC_SURF_STRIDE_RESERVED_0__MASK;
+}
+
+#define REG_PPU_RDMA_RDMA_DATA_FORMAT 0x00007030
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK 0xfffffffc
+#define PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT 2
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_RESERVED_0__MASK;
+}
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK 0x00000003
+#define PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT 0
+static inline uint32_t PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION(uint32_t val)
+{
+ return ((val) << PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__SHIFT) & PPU_RDMA_RDMA_DATA_FORMAT_IN_PRECISION__MASK;
+}
+
+#define REG_DDMA_CFG_OUTSTANDING 0x00008000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & DDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t DDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t DDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & DDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_0 0x00008004
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & DDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_DDMA_WR_WEIGHT_0 0x00008008
+#define DDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t DDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & DDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & DDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_DDMA_CFG_ID_ERROR 0x0000800c
+#define DDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t DDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t DDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & DDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t DDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << DDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & DDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_DDMA_RD_WEIGHT_1 0x00008010
+#define DDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t DDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & DDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t DDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & DDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_FIFO_CLR 0x00008014
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & DDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_ARB 0x00008018
+#define DDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & DDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & DDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & DDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_QOS 0x00008020
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & DDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_RD_CFG 0x00008024
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & DDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WR_CFG 0x00008028
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & DDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & DDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_DDMA_CFG_DMA_WSTRB 0x0000802c
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t DDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << DDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & DDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_DDMA_CFG_STATUS 0x00008030
+#define DDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define DDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_0__SHIFT) & DDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define DDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define DDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t DDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_IDEL__SHIFT) & DDMA_CFG_STATUS_IDEL__MASK;
+}
+#define DDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define DDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t DDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << DDMA_CFG_STATUS_RESERVED_1__SHIFT) & DDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_SDMA_CFG_OUTSTANDING 0x00009000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__MASK 0xffff0000
+#define SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_CFG_OUTSTANDING_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RESERVED_0__SHIFT) & SDMA_CFG_OUTSTANDING_RESERVED_0__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK 0x0000ff00
+#define SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT 8
+static inline uint32_t SDMA_CFG_OUTSTANDING_WR_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_WR_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_WR_OS_CNT__MASK;
+}
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK 0x000000ff
+#define SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT 0
+static inline uint32_t SDMA_CFG_OUTSTANDING_RD_OS_CNT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_OUTSTANDING_RD_OS_CNT__SHIFT) & SDMA_CFG_OUTSTANDING_RD_OS_CNT__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_0 0x00009004
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK 0xff000000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT 24
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_PDP__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK 0x00ff0000
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT 16
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_DPU__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK 0x0000ff00
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_KERNEL__MASK;
+}
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__SHIFT) & SDMA_RD_WEIGHT_0_RD_WEIGHT_FEATURE__MASK;
+}
+
+#define REG_SDMA_WR_WEIGHT_0 0x00009008
+#define SDMA_WR_WEIGHT_0_RESERVED_0__MASK 0xffff0000
+#define SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT 16
+static inline uint32_t SDMA_WR_WEIGHT_0_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_RESERVED_0__SHIFT) & SDMA_WR_WEIGHT_0_RESERVED_0__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK 0x0000ff00
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT 8
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_PDP__MASK;
+}
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK 0x000000ff
+#define SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT 0
+static inline uint32_t SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU(uint32_t val)
+{
+ return ((val) << SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__SHIFT) & SDMA_WR_WEIGHT_0_WR_WEIGHT_DPU__MASK;
+}
+
+#define REG_SDMA_CFG_ID_ERROR 0x0000900c
+#define SDMA_CFG_ID_ERROR_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_0__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK 0x000003c0
+#define SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT 6
+static inline uint32_t SDMA_CFG_ID_ERROR_WR_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_WR_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_WR_RESP_ID__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RESERVED_1__MASK 0x00000020
+#define SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT 5
+static inline uint32_t SDMA_CFG_ID_ERROR_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RESERVED_1__SHIFT) & SDMA_CFG_ID_ERROR_RESERVED_1__MASK;
+}
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK 0x0000001f
+#define SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT 0
+static inline uint32_t SDMA_CFG_ID_ERROR_RD_RESP_ID(uint32_t val)
+{
+ return ((val) << SDMA_CFG_ID_ERROR_RD_RESP_ID__SHIFT) & SDMA_CFG_ID_ERROR_RD_RESP_ID__MASK;
+}
+
+#define REG_SDMA_RD_WEIGHT_1 0x00009010
+#define SDMA_RD_WEIGHT_1_RESERVED_0__MASK 0xffffff00
+#define SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT 8
+static inline uint32_t SDMA_RD_WEIGHT_1_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RESERVED_0__SHIFT) & SDMA_RD_WEIGHT_1_RESERVED_0__MASK;
+}
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK 0x000000ff
+#define SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT 0
+static inline uint32_t SDMA_RD_WEIGHT_1_RD_WEIGHT_PC(uint32_t val)
+{
+ return ((val) << SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__SHIFT) & SDMA_RD_WEIGHT_1_RD_WEIGHT_PC__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_FIFO_CLR 0x00009014
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK 0xfffffffe
+#define SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT 1
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK 0x00000001
+#define SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__SHIFT) & SDMA_CFG_DMA_FIFO_CLR_DMA_FIFO_CLR__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_ARB 0x00009018
+#define SDMA_CFG_DMA_ARB_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_0__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK 0x00000200
+#define SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT 9
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_WR_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK 0x00000100
+#define SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__SHIFT) & SDMA_CFG_DMA_ARB_RD_ARBIT_MODEL__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_1__MASK 0x00000080
+#define SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT 7
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_1__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_1__MASK;
+}
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK 0x00000070
+#define SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_ARB_WR_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_WR_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_WR_FIX_ARB__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RESERVED_2__MASK 0x00000008
+#define SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_ARB_RESERVED_2(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RESERVED_2__SHIFT) & SDMA_CFG_DMA_ARB_RESERVED_2__MASK;
+}
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK 0x00000007
+#define SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_ARB_RD_FIX_ARB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_ARB_RD_FIX_ARB__SHIFT) & SDMA_CFG_DMA_ARB_RD_FIX_ARB__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_QOS 0x00009020
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK 0xfffffc00
+#define SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT 10
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_QOS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK 0x00000300
+#define SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PC_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PC_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK 0x000000c0
+#define SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT 6
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_PPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK 0x00000030
+#define SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT 4
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_DPU_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK 0x0000000c
+#define SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT 2
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_KERNEL_QOS__MASK;
+}
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK 0x00000003
+#define SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__SHIFT) & SDMA_CFG_DMA_RD_QOS_RD_FEATURE_QOS__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_RD_CFG 0x00009024
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_RD_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARLOCK__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARCACHE__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARPROT__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARPROT__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARBURST__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARBURST__MASK;
+}
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_RD_CFG_RD_ARSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__SHIFT) & SDMA_CFG_DMA_RD_CFG_RD_ARSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WR_CFG 0x00009028
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK 0xffffe000
+#define SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT 13
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_RESERVED_0__SHIFT) & SDMA_CFG_DMA_WR_CFG_RESERVED_0__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK 0x00001000
+#define SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT 12
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWLOCK(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWLOCK__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK 0x00000f00
+#define SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT 8
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWCACHE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWCACHE__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK 0x000000e0
+#define SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT 5
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWPROT(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWPROT__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWPROT__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK 0x00000018
+#define SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT 3
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWBURST(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWBURST__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWBURST__MASK;
+}
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK 0x00000007
+#define SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WR_CFG_WR_AWSIZE(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__SHIFT) & SDMA_CFG_DMA_WR_CFG_WR_AWSIZE__MASK;
+}
+
+#define REG_SDMA_CFG_DMA_WSTRB 0x0000902c
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK 0xffffffff
+#define SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT 0
+static inline uint32_t SDMA_CFG_DMA_WSTRB_WR_WSTRB(uint32_t val)
+{
+ return ((val) << SDMA_CFG_DMA_WSTRB_WR_WSTRB__SHIFT) & SDMA_CFG_DMA_WSTRB_WR_WSTRB__MASK;
+}
+
+#define REG_SDMA_CFG_STATUS 0x00009030
+#define SDMA_CFG_STATUS_RESERVED_0__MASK 0xfffffe00
+#define SDMA_CFG_STATUS_RESERVED_0__SHIFT 9
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_0(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_0__SHIFT) & SDMA_CFG_STATUS_RESERVED_0__MASK;
+}
+#define SDMA_CFG_STATUS_IDEL__MASK 0x00000100
+#define SDMA_CFG_STATUS_IDEL__SHIFT 8
+static inline uint32_t SDMA_CFG_STATUS_IDEL(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_IDEL__SHIFT) & SDMA_CFG_STATUS_IDEL__MASK;
+}
+#define SDMA_CFG_STATUS_RESERVED_1__MASK 0x000000ff
+#define SDMA_CFG_STATUS_RESERVED_1__SHIFT 0
+static inline uint32_t SDMA_CFG_STATUS_RESERVED_1(uint32_t val)
+{
+ return ((val) << SDMA_CFG_STATUS_RESERVED_1__SHIFT) & SDMA_CFG_STATUS_RESERVED_1__MASK;
+}
+
+#define REG_GLOBAL_OPERATION_ENABLE 0x0000f008
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK 0xffffff80
+#define GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT 7
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_0(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_0__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_0__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK 0x00000040
+#define GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT 6
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK 0x00000020
+#define GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT 5
+static inline uint32_t GLOBAL_OPERATION_ENABLE_PPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_PPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_PPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK 0x00000010
+#define GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT 4
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_RDMA_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK 0x00000008
+#define GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT 3
+static inline uint32_t GLOBAL_OPERATION_ENABLE_DPU_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_DPU_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_DPU_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK 0x00000004
+#define GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT 2
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CORE_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CORE_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CORE_OP_EN__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK 0x00000002
+#define GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT 1
+static inline uint32_t GLOBAL_OPERATION_ENABLE_RESERVED_1(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_RESERVED_1__SHIFT) & GLOBAL_OPERATION_ENABLE_RESERVED_1__MASK;
+}
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK 0x00000001
+#define GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT 0
+static inline uint32_t GLOBAL_OPERATION_ENABLE_CNA_OP_EN(uint32_t val)
+{
+ return ((val) << GLOBAL_OPERATION_ENABLE_CNA_OP_EN__SHIFT) & GLOBAL_OPERATION_ENABLE_CNA_OP_EN__MASK;
+}
+
+#endif /* __ROCKET_REGISTERS_XML__ */
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index b594780a57d7..ca00a5dbcf75 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -461,7 +461,7 @@ config ACPI_HED
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI && (X86 || ARM64 || LOONGARCH)
+ depends on EFI
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
@@ -547,6 +547,10 @@ if ARM64
source "drivers/acpi/arm64/Kconfig"
endif
+if RISCV
+source "drivers/acpi/riscv/Kconfig"
+endif
+
config ACPI_PPTT
bool
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index d50261d05f3a..515b20d0b698 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -569,11 +569,11 @@ static int acpi_aml_release(struct inode *inode, struct file *file)
return 0;
}
-static int acpi_aml_read_user(char __user *buf, int len)
+static ssize_t acpi_aml_read_user(char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.out_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
@@ -582,7 +582,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
/* sync head before removing logs */
smp_rmb();
p = &crc->buf[crc->tail];
- n = min(len, circ_count_to_end(crc));
+ n = min_t(size_t, len, circ_count_to_end(crc));
if (copy_to_user(buf, p, n)) {
ret = -EFAULT;
goto out;
@@ -599,8 +599,8 @@ out:
static ssize_t acpi_aml_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
@@ -639,11 +639,11 @@ again:
return size > 0 ? size : ret;
}
-static int acpi_aml_write_user(const char __user *buf, int len)
+static ssize_t acpi_aml_write_user(const char __user *buf, size_t len)
{
- int ret;
struct circ_buf *crc = &acpi_aml_io.in_crc;
- int n;
+ ssize_t ret;
+ size_t n;
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
@@ -652,7 +652,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
/* sync tail before inserting cmds */
smp_mb();
p = &crc->buf[crc->head];
- n = min(len, circ_space_to_end(crc));
+ n = min_t(size_t, len, circ_space_to_end(crc));
if (copy_from_user(p, buf, n)) {
ret = -EFAULT;
goto out;
@@ -663,14 +663,14 @@ static int acpi_aml_write_user(const char __user *buf, int len)
ret = n;
out:
acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
- return n;
+ return ret;
}
static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int ret = 0;
- int size = 0;
+ ssize_t ret = 0;
+ ssize_t size = 0;
if (!count)
return 0;
diff --git a/drivers/acpi/acpi_mrrm.c b/drivers/acpi/acpi_mrrm.c
index 47ea3ccc2142..6d69554c940e 100644
--- a/drivers/acpi/acpi_mrrm.c
+++ b/drivers/acpi/acpi_mrrm.c
@@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)
if (!mrrm)
return -ENODEV;
+ if (mrrm->header.revision != 1)
+ return -EINVAL;
+
if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
return -EOPNOTSUPP;
@@ -149,26 +152,49 @@ ATTRIBUTE_GROUPS(memory_range);
static __init int add_boot_memory_ranges(void)
{
- struct kobject *pkobj, *kobj;
+ struct kobject *pkobj, *kobj, **kobjs;
int ret = -EINVAL;
- char *name;
+ char name[16];
+ int i;
pkobj = kobject_create_and_add("memory_ranges", acpi_kobj);
+ if (!pkobj)
+ return -ENOMEM;
- for (int i = 0; i < mrrm_mem_entry_num; i++) {
- name = kasprintf(GFP_KERNEL, "range%d", i);
- if (!name) {
- ret = -ENOMEM;
- break;
- }
+ kobjs = kcalloc(mrrm_mem_entry_num, sizeof(*kobjs), GFP_KERNEL);
+ if (!kobjs) {
+ kobject_put(pkobj);
+ return -ENOMEM;
+ }
+ for (i = 0; i < mrrm_mem_entry_num; i++) {
+ scnprintf(name, sizeof(name), "range%d", i);
kobj = kobject_create_and_add(name, pkobj);
+ if (!kobj) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
ret = sysfs_create_groups(kobj, memory_range_groups);
- if (ret)
- return ret;
+ if (ret) {
+ kobject_put(kobj);
+ goto cleanup;
+ }
+ kobjs[i] = kobj;
}
+ kfree(kobjs);
+ return 0;
+
+cleanup:
+ for (int j = 0; j < i; j++) {
+ if (kobjs[j]) {
+ sysfs_remove_groups(kobjs[j], memory_range_groups);
+ kobject_put(kobjs[j]);
+ }
+ }
+ kfree(kobjs);
+ kobject_put(pkobj);
return ret;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 2a99f5eb6962..7ec1dc04fd11 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -815,7 +815,7 @@ bool acpi_processor_claim_cst_control(void)
cst_control_claimed = true;
return true;
}
-EXPORT_SYMBOL_GPL(acpi_processor_claim_cst_control);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_claim_cst_control, "ACPI_PROCESSOR_IDLE");
/**
* acpi_processor_evaluate_cst - Evaluate the processor _CST control method.
@@ -994,5 +994,5 @@ end:
return ret;
}
-EXPORT_SYMBOL_GPL(acpi_processor_evaluate_cst);
+EXPORT_SYMBOL_NS_GPL(acpi_processor_evaluate_cst, "ACPI_PROCESSOR_IDLE");
#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
diff --git a/drivers/acpi/acpi_tad.c b/drivers/acpi/acpi_tad.c
index 91d7d90c47da..6d870d97ada6 100644
--- a/drivers/acpi/acpi_tad.c
+++ b/drivers/acpi/acpi_tad.c
@@ -90,19 +90,18 @@ static int acpi_tad_set_real_time(struct device *dev, struct acpi_tad_rt *rt)
args[0].buffer.pointer = (u8 *)rt;
args[0].buffer.length = sizeof(*rt);
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_SRT", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
return 0;
}
-static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
+static int acpi_tad_evaluate_grt(struct device *dev, struct acpi_tad_rt *rt)
{
acpi_handle handle = ACPI_HANDLE(dev);
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER };
@@ -111,12 +110,7 @@ static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
acpi_status status;
int ret = -EIO;
- pm_runtime_get_sync(dev);
-
status = acpi_evaluate_object(handle, "_GRT", NULL, &output);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
goto out_free;
@@ -139,6 +133,21 @@ out_free:
return ret;
}
+static int acpi_tad_get_real_time(struct device *dev, struct acpi_tad_rt *rt)
+{
+ int ret;
+
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
+
+ ret = acpi_tad_evaluate_grt(dev, rt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static char *acpi_tad_rt_next_field(char *s, int *val)
{
char *p;
@@ -266,12 +275,11 @@ static int acpi_tad_wake_set(struct device *dev, char *method, u32 timer_id,
args[0].integer.value = timer_id;
args[1].integer.value = value;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, method, &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
@@ -314,12 +322,11 @@ static ssize_t acpi_tad_wake_read(struct device *dev, char *buf, char *method,
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, method, &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
return -EIO;
@@ -370,12 +377,11 @@ static int acpi_tad_clear_status(struct device *dev, u32 timer_id)
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_CWS", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status) || retval)
return -EIO;
@@ -411,12 +417,11 @@ static ssize_t acpi_tad_status_read(struct device *dev, char *buf, u32 timer_id)
args[0].integer.value = timer_id;
- pm_runtime_get_sync(dev);
+ PM_RUNTIME_ACQUIRE(dev, pm);
+ if (PM_RUNTIME_ACQUIRE_ERR(&pm))
+ return -ENXIO;
status = acpi_evaluate_integer(handle, "_GWS", &arg_list, &retval);
-
- pm_runtime_put_sync(dev);
-
if (ACPI_FAILURE(status))
return -EIO;
@@ -563,21 +568,24 @@ static void acpi_tad_remove(struct platform_device *pdev)
device_init_wakeup(dev, false);
- pm_runtime_get_sync(dev);
+ if (dd->capabilities & ACPI_TAD_RT)
+ sysfs_remove_group(&dev->kobj, &acpi_tad_time_attr_group);
if (dd->capabilities & ACPI_TAD_DC_WAKE)
sysfs_remove_group(&dev->kobj, &acpi_tad_dc_attr_group);
sysfs_remove_group(&dev->kobj, &acpi_tad_attr_group);
- acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER);
- acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER);
- if (dd->capabilities & ACPI_TAD_DC_WAKE) {
- acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER);
- acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER);
+ scoped_guard(pm_runtime_noresume, dev) {
+ acpi_tad_disable_timer(dev, ACPI_TAD_AC_TIMER);
+ acpi_tad_clear_status(dev, ACPI_TAD_AC_TIMER);
+ if (dd->capabilities & ACPI_TAD_DC_WAKE) {
+ acpi_tad_disable_timer(dev, ACPI_TAD_DC_TIMER);
+ acpi_tad_clear_status(dev, ACPI_TAD_DC_TIMER);
+ }
}
- pm_runtime_put_sync(dev);
+ pm_runtime_suspend(dev);
pm_runtime_disable(dev);
acpi_remove_cmos_rtc_space_handler(handle);
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 103f29661576..be8e7e18abca 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
- list_for_each_entry(dev, &video->video_device_list, entry)
+ list_for_each_entry(dev, &video->video_device_list, entry) {
acpi_video_dev_remove_notify_handler(dev);
+ cancel_delayed_work_sync(&dev->switch_brightness_work);
+ }
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index fe6d38b43c9a..91241bd6917a 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -37,7 +37,7 @@ struct acpi_db_argument_info {
struct acpi_db_execute_walk {
u32 count;
u32 max_count;
- char name_seg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
+ char name_seg[ACPI_NAMESEG_SIZE + 1];
};
#define PARAM_LIST(pl) pl
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 0c41f0097e8d..f98640086f4e 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -1141,7 +1141,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_PIN_GROUP_FUNCTION 0x91
#define ACPI_RESOURCE_NAME_PIN_GROUP_CONFIG 0x92
#define ACPI_RESOURCE_NAME_CLOCK_INPUT 0x93
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x94
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x93
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 76c5ed02e916..da2c45880cc7 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -450,7 +450,8 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
- ACPI_TYPE_ANY) | ARG_COUNT_IS_MINIMUM,
+ ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) |
+ ARG_COUNT_IS_MINIMUM,
METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
{{"_DSS", METHOD_1ARGS(ACPI_TYPE_INTEGER),
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index fef6fb29ece4..45ec32e81903 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -462,7 +462,6 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
struct acpi_walk_state *next_walk_state = NULL;
union acpi_operand_object *obj_desc;
struct acpi_evaluate_info *info;
- u32 i;
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
@@ -484,10 +483,17 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
}
if (this_walk_state->num_operands < obj_desc->method.param_count) {
- ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]",
+ ACPI_ERROR((AE_INFO, "Missing argument(s) for method [%4.4s]",
acpi_ut_get_node_name(method_node)));
- return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
+ return_ACPI_STATUS(AE_AML_TOO_FEW_ARGUMENTS);
+ }
+
+ else if (this_walk_state->num_operands > obj_desc->method.param_count) {
+ ACPI_ERROR((AE_INFO, "Too many arguments for method [%4.4s]",
+ acpi_ut_get_node_name(method_node)));
+
+ return_ACPI_STATUS(AE_AML_TOO_MANY_ARGUMENTS);
}
/* Init for new method, possibly wait on method mutex */
@@ -546,14 +552,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
*/
- for (i = 0; i < obj_desc->method.param_count; i++) {
- acpi_ut_remove_reference(this_walk_state->operands[i]);
- this_walk_state->operands[i] = NULL;
- }
-
- /* Clear the operand stack */
-
- this_walk_state->num_operands = 0;
+ acpi_ds_clear_operands(this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index fa3e0d00d1ca..df2a4ab0e0da 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -42,6 +42,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
return_ACPI_STATUS(AE_OK);
}
+ if (!acpi_gbl_use_global_lock) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index a2ac06a26e92..5670ff5a43cd 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -169,9 +169,12 @@ acpi_ns_walk_namespace(acpi_object_type type,
if (start_node == ACPI_ROOT_OBJECT) {
start_node = acpi_gbl_root_node;
- if (!start_node) {
- return_ACPI_STATUS(AE_NO_NAMESPACE);
- }
+ }
+
+ /* Avoid walking the namespace if the StartNode is NULL */
+
+ if (!start_node) {
+ return_ACPI_STATUS(AE_NO_NAMESPACE);
}
/* Null child means "get first node" */
diff --git a/drivers/acpi/acpica/psopinfo.c b/drivers/acpi/acpica/psopinfo.c
index 1c8044ffcb97..532ea307a675 100644
--- a/drivers/acpi/acpica/psopinfo.c
+++ b/drivers/acpi/acpica/psopinfo.c
@@ -34,7 +34,7 @@ static const u8 acpi_gbl_argument_count[] =
const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
{
-#ifdef ACPI_DEBUG_OUTPUT
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
const char *opcode_name = "Unknown AML opcode";
#endif
@@ -102,11 +102,11 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode)
default:
break;
}
-#endif
/* Unknown AML opcode */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "%s [%4.4X]\n", opcode_name, opcode));
+#endif
return (&acpi_gbl_aml_op_info[_UNK]);
}
diff --git a/drivers/acpi/acpica/tbprint.c b/drivers/acpi/acpica/tbprint.c
index fd64460a2e26..e5631027f7f1 100644
--- a/drivers/acpi/acpica/tbprint.c
+++ b/drivers/acpi/acpica/tbprint.c
@@ -95,6 +95,11 @@ acpi_tb_print_table_header(acpi_physical_address address,
{
struct acpi_table_header local_header;
+#pragma GCC diagnostic push
+#if defined(__GNUC__) && __GNUC__ >= 11
+#pragma GCC diagnostic ignored "-Wstringop-overread"
+#endif
+
if (ACPI_COMPARE_NAMESEG(header->signature, ACPI_SIG_FACS)) {
/* FACS only has signature and length fields */
@@ -121,6 +126,14 @@ acpi_tb_print_table_header(acpi_physical_address address,
ACPI_CAST_PTR(struct acpi_table_rsdp,
header)->revision,
local_header.oem_id));
+ } else if (acpi_gbl_CDAT && !acpi_ut_valid_nameseg(header->signature)) {
+
+ /* CDAT does not use the common ACPI table header */
+
+ ACPI_INFO(("%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_SIG_CDAT, ACPI_FORMAT_UINT64(address),
+ ACPI_CAST_PTR(struct acpi_table_cdat,
+ header)->length));
} else {
/* Standard ACPI table with full common header */
@@ -135,4 +148,5 @@ acpi_tb_print_table_header(acpi_physical_address address,
local_header.asl_compiler_id,
local_header.asl_compiler_revision));
}
+#pragma GCC diagnostic pop
}
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index 2561b045acc7..305c240a303f 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -182,6 +182,7 @@ bool einj_initialized __ro_after_init;
static void __iomem *einj_param;
static u32 v5param_size;
+static u32 v66param_size;
static bool is_v2;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
@@ -283,6 +284,24 @@ static void check_vendor_extension(u64 paddr,
acpi_os_unmap_iomem(p, sizeof(v));
}
+static u32 einjv2_init(struct einjv2_extension_struct *e)
+{
+ if (e->revision != 1) {
+ pr_info("Unknown v2 extension revision %u\n", e->revision);
+ return 0;
+ }
+ if (e->length < sizeof(*e) || e->length > PAGE_SIZE) {
+ pr_info(FW_BUG "Bad1 v2 extension length %u\n", e->length);
+ return 0;
+ }
+ if ((e->length - sizeof(*e)) % sizeof(e->component_arr[0])) {
+ pr_info(FW_BUG "Bad2 v2 extension length %u\n", e->length);
+ return 0;
+ }
+
+ return (e->length - sizeof(*e)) / sizeof(e->component_arr[0]);
+}
+
static void __iomem *einj_get_parameter_address(void)
{
int i;
@@ -310,28 +329,21 @@ static void __iomem *einj_get_parameter_address(void)
v5param_size = sizeof(v5param);
p = acpi_os_map_iomem(pa_v5, sizeof(*p));
if (p) {
- int offset, len;
-
memcpy_fromio(&v5param, p, v5param_size);
acpi5 = 1;
check_vendor_extension(pa_v5, &v5param);
- if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) {
- len = v5param.einjv2_struct.length;
- offset = offsetof(struct einjv2_extension_struct, component_arr);
- max_nr_components = (len - offset) /
- sizeof(v5param.einjv2_struct.component_arr[0]);
- /*
- * The first call to acpi_os_map_iomem above does not include the
- * component array, instead it is used to read and calculate maximum
- * number of components supported by the system. Below, the mapping
- * is expanded to include the component array.
- */
+ if (available_error_type & ACPI65_EINJV2_SUPP) {
+ struct einjv2_extension_struct *e;
+
+ e = &v5param.einjv2_struct;
+ max_nr_components = einjv2_init(e);
+
+ /* remap including einjv2_extension_struct */
acpi_os_unmap_iomem(p, v5param_size);
- offset = offsetof(struct set_error_type_with_address, einjv2_struct);
- v5param_size = offset + struct_size(&v5param.einjv2_struct,
- component_arr, max_nr_components);
- p = acpi_os_map_iomem(pa_v5, v5param_size);
+ v66param_size = v5param_size - sizeof(*e) + e->length;
+ p = acpi_os_map_iomem(pa_v5, v66param_size);
}
+
return p;
}
}
@@ -527,6 +539,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
u64 param3, u64 param4)
{
struct apei_exec_context ctx;
+ u32 param_size = is_v2 ? v66param_size : v5param_size;
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
int i, rc;
@@ -539,11 +552,11 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
if (acpi5) {
struct set_error_type_with_address *v5param;
- v5param = kmalloc(v5param_size, GFP_KERNEL);
+ v5param = kmalloc(param_size, GFP_KERNEL);
if (!v5param)
return -ENOMEM;
- memcpy_fromio(v5param, einj_param, v5param_size);
+ memcpy_fromio(v5param, einj_param, param_size);
v5param->type = type;
if (type & ACPI5_VENDOR_BIT) {
switch (vendor_flags) {
@@ -601,7 +614,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
break;
}
}
- memcpy_toio(einj_param, v5param, v5param_size);
+ memcpy_toio(einj_param, v5param, param_size);
kfree(v5param);
} else {
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
@@ -656,6 +669,43 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
return rc;
}
+/* Allow almost all types of address except MMIO. */
+static bool is_allowed_range(u64 base_addr, u64 size)
+{
+ int i;
+ /*
+ * MMIO region is usually claimed with IORESOURCE_MEM + IORES_DESC_NONE.
+ * However, IORES_DESC_NONE is treated like a wildcard when we check if
+ * region intersects with known resource. So do an allow list check for
+ * IORES_DESCs that definitely or most likely not MMIO.
+ */
+ int non_mmio_desc[] = {
+ IORES_DESC_CRASH_KERNEL,
+ IORES_DESC_ACPI_TABLES,
+ IORES_DESC_ACPI_NV_STORAGE,
+ IORES_DESC_PERSISTENT_MEMORY,
+ IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+ /* Treat IORES_DESC_DEVICE_PRIVATE_MEMORY as MMIO. */
+ IORES_DESC_RESERVED,
+ IORES_DESC_SOFT_RESERVED,
+ };
+
+ if (region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+ == REGION_INTERSECTS)
+ return true;
+
+ for (i = 0; i < ARRAY_SIZE(non_mmio_desc); ++i) {
+ if (region_intersects(base_addr, size, IORESOURCE_MEM, non_mmio_desc[i])
+ == REGION_INTERSECTS)
+ return true;
+ }
+
+ if (arch_is_platform_page(base_addr))
+ return true;
+
+ return false;
+}
+
/* Inject the specified hardware error */
int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
u64 param4)
@@ -702,19 +752,15 @@ int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2, u64 param3,
* Disallow crazy address masks that give BIOS leeway to pick
* injection address almost anywhere. Insist on page or
* better granularity and that target address is normal RAM or
- * NVDIMM.
+ * as long as is not MMIO.
*/
base_addr = param1 & param2;
size = ~param2 + 1;
- if (((param2 & PAGE_MASK) != PAGE_MASK) ||
- ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
- != REGION_INTERSECTS) &&
- (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_SOFT_RESERVED)
- != REGION_INTERSECTS) &&
- !arch_is_platform_page(base_addr)))
+ if ((param2 & PAGE_MASK) != PAGE_MASK)
+ return -EINVAL;
+
+ if (!is_allowed_range(base_addr, size))
return -EINVAL;
if (is_zero_pfn(base_addr >> PAGE_SHIFT))
@@ -1099,9 +1145,14 @@ static void einj_remove(struct faux_device *fdev)
struct apei_exec_context ctx;
if (einj_param) {
- acpi_size size = (acpi5) ?
- v5param_size :
- sizeof(struct einj_parameter);
+ acpi_size size;
+
+ if (v66param_size)
+ size = v66param_size;
+ else if (acpi5)
+ size = v5param_size;
+ else
+ size = sizeof(struct einj_parameter);
acpi_os_unmap_iomem(einj_param, size);
if (vendor_errors.size)
diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c
index 246076341e8c..ff0e8bf8e97a 100644
--- a/drivers/acpi/apei/erst-dbg.c
+++ b/drivers/acpi/apei/erst-dbg.c
@@ -60,9 +60,8 @@ static long erst_dbg_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case APEI_ERST_CLEAR_RECORD:
- rc = copy_from_user(&record_id, (void __user *)arg,
- sizeof(record_id));
- if (rc)
+ if (copy_from_user(&record_id, (void __user *)arg,
+ sizeof(record_id)))
return -EFAULT;
return erst_clear(record_id);
case APEI_ERST_GET_RECORD_COUNT:
@@ -175,8 +174,7 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf,
erst_dbg_buf = p;
erst_dbg_buf_len = usize;
}
- rc = copy_from_user(erst_dbg_buf, ubuf, usize);
- if (rc) {
+ if (copy_from_user(erst_dbg_buf, ubuf, usize)) {
rc = -EFAULT;
goto out;
}
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index a0d54993edb3..0dc767392a6c 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -22,6 +22,7 @@
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@@ -43,6 +44,7 @@
#include <linux/uuid.h>
#include <linux/ras.h>
#include <linux/task_work.h>
+#include <linux/vmcore_info.h>
#include <acpi/actbl1.h>
#include <acpi/ghes.h>
@@ -505,12 +507,6 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags)
return false;
pfn = PHYS_PFN(physical_addr);
- if (!pfn_valid(pfn) && !arch_is_platform_page(physical_addr)) {
- pr_warn_ratelimited(FW_WARN GHES_PFX
- "Invalid address in generic error data: %#llx\n",
- physical_addr);
- return false;
- }
if (flags == MF_ACTION_REQUIRED && current->mm) {
twcb = (void *)gen_pool_alloc(ghes_estatus_pool, sizeof(*twcb));
@@ -552,26 +548,25 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata,
}
static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
- int sev, bool sync)
+ int sev, bool sync)
{
struct cper_sec_proc_arm *err = acpi_hest_get_payload(gdata);
int flags = sync ? MF_ACTION_REQUIRED : 0;
+ char error_type[120];
bool queued = false;
int sec_sev, i;
char *p;
- log_arm_hw_error(err);
-
sec_sev = ghes_severity(gdata->error_severity);
+ log_arm_hw_error(err, sec_sev);
if (sev != GHES_SEV_RECOVERABLE || sec_sev != GHES_SEV_RECOVERABLE)
return false;
p = (char *)(err + 1);
for (i = 0; i < err->err_info_num; i++) {
struct cper_arm_err_info *err_info = (struct cper_arm_err_info *)p;
- bool is_cache = (err_info->type == CPER_ARM_CACHE_ERROR);
+ bool is_cache = err_info->type & CPER_ARM_CACHE_ERROR;
bool has_pa = (err_info->validation_bits & CPER_ARM_INFO_VALID_PHYSICAL_ADDR);
- const char *error_type = "unknown error";
/*
* The field (err_info->error_info & BIT(26)) is fixed to set to
@@ -585,12 +580,15 @@ static bool ghes_handle_arm_hw_error(struct acpi_hest_generic_data *gdata,
continue;
}
- if (err_info->type < ARRAY_SIZE(cper_proc_error_type_strs))
- error_type = cper_proc_error_type_strs[err_info->type];
+ cper_bits_to_str(error_type, sizeof(error_type),
+ FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type),
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
pr_warn_ratelimited(FW_WARN GHES_PFX
- "Unhandled processor error type: %s\n",
- error_type);
+ "Unhandled processor error type 0x%02x: %s%s\n",
+ err_info->type, error_type,
+ (err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : "");
p += err_info->length;
}
@@ -867,6 +865,40 @@ int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd)
}
EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, "CXL");
+static void ghes_log_hwerr(int sev, guid_t *sec_type)
+{
+ if (sev != CPER_SEV_RECOVERABLE)
+ return;
+
+ if (guid_equal(sec_type, &CPER_SEC_PROC_ARM) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_GENERIC) ||
+ guid_equal(sec_type, &CPER_SEC_PROC_IA)) {
+ hwerr_log_error_type(HWERR_RECOV_CPU);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID) ||
+ guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) {
+ hwerr_log_error_type(HWERR_RECOV_CXL);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PCIE) ||
+ guid_equal(sec_type, &CPER_SEC_PCI_X_BUS)) {
+ hwerr_log_error_type(HWERR_RECOV_PCI);
+ return;
+ }
+
+ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+ hwerr_log_error_type(HWERR_RECOV_MEMORY);
+ return;
+ }
+
+ hwerr_log_error_type(HWERR_RECOV_OTHERS);
+}
+
static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -888,6 +920,7 @@ static void ghes_do_proc(struct ghes *ghes,
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
fru_text = gdata->fru_text;
+ ghes_log_hwerr(sev, sec_type);
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem_err = acpi_hest_get_payload(gdata);
@@ -895,11 +928,9 @@ static void ghes_do_proc(struct ghes *ghes,
arch_apei_report_mem_error(sev, mem_err);
queued = ghes_handle_memory_failure(gdata, sev, sync);
- }
- else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) {
ghes_handle_aer(gdata);
- }
- else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
+ } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
} else if (guid_equal(sec_type, &CPER_SEC_CXL_PROT_ERR)) {
struct cxl_cper_sec_prot_err *prot_err = acpi_hest_get_payload(gdata);
@@ -1207,12 +1238,10 @@ static int ghes_notify_hed(struct notifier_block *this, unsigned long event,
int ret = NOTIFY_DONE;
spin_lock_irqsave(&ghes_notify_lock_irq, flags);
- rcu_read_lock();
list_for_each_entry_rcu(ghes, &ghes_hed, list) {
if (!ghes_proc(ghes))
ret = NOTIFY_OK;
}
- rcu_read_unlock();
spin_unlock_irqrestore(&ghes_notify_lock_irq, flags);
return ret;
diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig
index b3ed6212244c..f2fd79f22e7d 100644
--- a/drivers/acpi/arm64/Kconfig
+++ b/drivers/acpi/arm64/Kconfig
@@ -21,3 +21,6 @@ config ACPI_AGDI
config ACPI_APMT
bool
+
+config ACPI_MPAM
+ bool
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
index 05ecde9eaabe..9390b57cb564 100644
--- a/drivers/acpi/arm64/Makefile
+++ b/drivers/acpi/arm64/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_ACPI_APMT) += apmt.o
obj-$(CONFIG_ACPI_FFH) += ffh.o
obj-$(CONFIG_ACPI_GTDT) += gtdt.o
obj-$(CONFIG_ACPI_IORT) += iort.o
+obj-$(CONFIG_ACPI_MPAM) += mpam.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ARM_AMBA) += amba.o
obj-y += dma.o init.o
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 70f8290b659d..ffc867bac2d6 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -303,40 +303,6 @@ error:
return -EINVAL;
}
-/**
- * acpi_arch_timer_mem_init() - Get the info of all GT blocks in GTDT table.
- * @timer_mem: The pointer to the array of struct arch_timer_mem for returning
- * the result of parsing. The element number of this array should
- * be platform_timer_count(the total number of platform timers).
- * @timer_count: It points to a integer variable which is used for storing the
- * number of GT blocks we have parsed.
- *
- * Return: 0 if success, -EINVAL/-ENODEV if error.
- */
-int __init acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem,
- int *timer_count)
-{
- int ret;
- void *platform_timer;
-
- *timer_count = 0;
- for_each_platform_timer(platform_timer) {
- if (is_timer_block(platform_timer)) {
- ret = gtdt_parse_timer_block(platform_timer, timer_mem);
- if (ret)
- return ret;
- timer_mem++;
- (*timer_count)++;
- }
- }
-
- if (*timer_count)
- pr_info("found %d memory-mapped timer block(s).\n",
- *timer_count);
-
- return 0;
-}
-
/*
* Initialize a SBSA generic Watchdog platform device info from GTDT
*/
@@ -388,11 +354,11 @@ static int __init gtdt_import_sbsa_gwdt(struct acpi_gtdt_watchdog *wd,
return 0;
}
-static int __init gtdt_sbsa_gwdt_init(void)
+static int __init gtdt_platform_timer_init(void)
{
void *platform_timer;
struct acpi_table_header *table;
- int ret, timer_count, gwdt_count = 0;
+ int ret, timer_count, gwdt_count = 0, mmio_timer_count = 0;
if (acpi_disabled)
return 0;
@@ -414,20 +380,41 @@ static int __init gtdt_sbsa_gwdt_init(void)
goto out_put_gtdt;
for_each_platform_timer(platform_timer) {
+ ret = 0;
+
if (is_non_secure_watchdog(platform_timer)) {
ret = gtdt_import_sbsa_gwdt(platform_timer, gwdt_count);
if (ret)
- break;
+ continue;
gwdt_count++;
+ } else if (is_timer_block(platform_timer)) {
+ struct arch_timer_mem atm = {};
+ struct platform_device *pdev;
+
+ ret = gtdt_parse_timer_block(platform_timer, &atm);
+ if (ret)
+ continue;
+
+ pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer",
+ mmio_timer_count, &atm,
+ sizeof(atm));
+ if (IS_ERR(pdev)) {
+ pr_err("Can't register timer %d\n", mmio_timer_count);
+ continue;
+ }
+
+ mmio_timer_count++;
}
}
if (gwdt_count)
pr_info("found %d SBSA generic Watchdog(s).\n", gwdt_count);
+ if (mmio_timer_count)
+ pr_info("found %d Generic MMIO timer(s).\n", mmio_timer_count);
out_put_gtdt:
acpi_put_table(table);
return ret;
}
-device_initcall(gtdt_sbsa_gwdt_init);
+device_initcall(gtdt_platform_timer_init);
diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c
new file mode 100644
index 000000000000..84963a20c3e7
--- /dev/null
+++ b/drivers/acpi/arm64/mpam.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2025 Arm Ltd.
+
+/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */
+
+#define pr_fmt(fmt) "ACPI MPAM: " fmt
+
+#include <linux/acpi.h>
+#include <linux/arm_mpam.h>
+#include <linux/bits.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/platform_device.h>
+
+#include <acpi/processor.h>
+
+/*
+ * Flags for acpi_table_mpam_msc.*_interrupt_flags.
+ * See 2.1.1 Interrupt Flags, Table 5, of DEN0065B_MPAM_ACPI_3.0-bet.
+ */
+#define ACPI_MPAM_MSC_IRQ_MODE BIT(0)
+#define ACPI_MPAM_MSC_IRQ_TYPE_MASK GENMASK(2, 1)
+#define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK BIT(3)
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR 0
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER 1
+#define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID BIT(4)
+
+/*
+ * Encodings for the MSC node body interface type field.
+ * See 2.1 MPAM MSC node, Table 4 of DEN0065B_MPAM_ACPI_3.0-bet.
+ */
+#define ACPI_MPAM_MSC_IFACE_MMIO 0x00
+#define ACPI_MPAM_MSC_IFACE_PCC 0x0a
+
+static bool _is_ppi_partition(u32 flags)
+{
+ u32 aff_type, is_ppi;
+ bool ret;
+
+ is_ppi = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_VALID, flags);
+ if (!is_ppi)
+ return false;
+
+ aff_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_MASK, flags);
+ ret = (aff_type == ACPI_MPAM_MSC_IRQ_AFFINITY_TYPE_PROCESSOR_CONTAINER);
+ if (ret)
+ pr_err_once("Partitioned interrupts not supported\n");
+
+ return ret;
+}
+
+static int acpi_mpam_register_irq(struct platform_device *pdev,
+ u32 intid, u32 flags)
+{
+ int irq;
+ u32 int_type;
+ int trigger;
+
+ if (!intid)
+ return -EINVAL;
+
+ if (_is_ppi_partition(flags))
+ return -EINVAL;
+
+ trigger = FIELD_GET(ACPI_MPAM_MSC_IRQ_MODE, flags);
+ int_type = FIELD_GET(ACPI_MPAM_MSC_IRQ_TYPE_MASK, flags);
+ if (int_type != ACPI_MPAM_MSC_IRQ_TYPE_WIRED)
+ return -EINVAL;
+
+ irq = acpi_register_gsi(&pdev->dev, intid, trigger, ACPI_ACTIVE_HIGH);
+ if (irq < 0)
+ pr_err_once("Failed to register interrupt 0x%x with ACPI\n", intid);
+
+ return irq;
+}
+
+static void acpi_mpam_parse_irqs(struct platform_device *pdev,
+ struct acpi_mpam_msc_node *tbl_msc,
+ struct resource *res, int *res_idx)
+{
+ u32 flags, intid;
+ int irq;
+
+ intid = tbl_msc->overflow_interrupt;
+ flags = tbl_msc->overflow_interrupt_flags;
+ irq = acpi_mpam_register_irq(pdev, intid, flags);
+ if (irq > 0)
+ res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "overflow");
+
+ intid = tbl_msc->error_interrupt;
+ flags = tbl_msc->error_interrupt_flags;
+ irq = acpi_mpam_register_irq(pdev, intid, flags);
+ if (irq > 0)
+ res[(*res_idx)++] = DEFINE_RES_IRQ_NAMED(irq, "error");
+}
+
+static int acpi_mpam_parse_resource(struct mpam_msc *msc,
+ struct acpi_mpam_resource_node *res)
+{
+ int level, nid;
+ u32 cache_id;
+
+ switch (res->locator_type) {
+ case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE:
+ cache_id = res->locator.cache_locator.cache_reference;
+ level = find_acpi_cache_level_from_id(cache_id);
+ if (level <= 0) {
+ pr_err_once("Bad level (%d) for cache with id %u\n", level, cache_id);
+ return -EINVAL;
+ }
+ return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE,
+ level, cache_id);
+ case ACPI_MPAM_LOCATION_TYPE_MEMORY:
+ nid = pxm_to_node(res->locator.memory_locator.proximity_domain);
+ if (nid == NUMA_NO_NODE) {
+ pr_debug("Bad proximity domain %lld, using node 0 instead\n",
+ res->locator.memory_locator.proximity_domain);
+ nid = 0;
+ }
+ return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY,
+ MPAM_CLASS_ID_DEFAULT, nid);
+ default:
+ /* These get discovered later and are treated as unknown */
+ return 0;
+ }
+}
+
+int acpi_mpam_parse_resources(struct mpam_msc *msc,
+ struct acpi_mpam_msc_node *tbl_msc)
+{
+ int i, err;
+ char *ptr, *table_end;
+ struct acpi_mpam_resource_node *resource;
+
+ table_end = (char *)tbl_msc + tbl_msc->length;
+ ptr = (char *)(tbl_msc + 1);
+ for (i = 0; i < tbl_msc->num_resource_nodes; i++) {
+ u64 max_deps, remaining_table;
+
+ if (ptr + sizeof(*resource) > table_end)
+ return -EINVAL;
+
+ resource = (struct acpi_mpam_resource_node *)ptr;
+
+ remaining_table = table_end - ptr;
+ max_deps = remaining_table / sizeof(struct acpi_mpam_func_deps);
+ if (resource->num_functional_deps > max_deps) {
+ pr_debug("MSC has impossible number of functional dependencies\n");
+ return -EINVAL;
+ }
+
+ err = acpi_mpam_parse_resource(msc, resource);
+ if (err)
+ return err;
+
+ ptr += sizeof(*resource);
+ ptr += resource->num_functional_deps * sizeof(struct acpi_mpam_func_deps);
+ }
+
+ return 0;
+}
+
+/*
+ * Creates the device power management link and returns true if the
+ * acpi id is valid and usable for cpu affinity. This is the case
+ * when the linked device is a processor or a processor container.
+ */
+static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc,
+ struct platform_device *pdev,
+ u32 *acpi_id)
+{
+ char hid[sizeof(tbl_msc->hardware_id_linked_device) + 1] = { 0 };
+ bool acpi_id_valid = false;
+ struct acpi_device *buddy;
+ char uid[11];
+ int len;
+
+ memcpy(hid, &tbl_msc->hardware_id_linked_device,
+ sizeof(tbl_msc->hardware_id_linked_device));
+
+ if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) {
+ *acpi_id = tbl_msc->instance_id_linked_device;
+ acpi_id_valid = true;
+ }
+
+ len = snprintf(uid, sizeof(uid), "%u",
+ tbl_msc->instance_id_linked_device);
+ if (len >= sizeof(uid)) {
+ pr_debug("Failed to convert uid of device for power management.");
+ return acpi_id_valid;
+ }
+
+ buddy = acpi_dev_get_first_match_dev(hid, uid, -1);
+ if (buddy) {
+ device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS);
+ acpi_dev_put(buddy);
+ }
+
+ return acpi_id_valid;
+}
+
+static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc,
+ enum mpam_msc_iface *iface)
+{
+ switch (tbl_msc->interface_type) {
+ case ACPI_MPAM_MSC_IFACE_MMIO:
+ *iface = MPAM_IFACE_MMIO;
+ return 0;
+ case ACPI_MPAM_MSC_IFACE_PCC:
+ *iface = MPAM_IFACE_PCC;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct platform_device * __init acpi_mpam_parse_msc(struct acpi_mpam_msc_node *tbl_msc)
+{
+ struct platform_device *pdev __free(platform_device_put) =
+ platform_device_alloc("mpam_msc", tbl_msc->identifier);
+ int next_res = 0, next_prop = 0, err;
+ /* pcc, nrdy, affinity and a sentinel */
+ struct property_entry props[4] = { 0 };
+ /* mmio, 2xirq, no sentinel. */
+ struct resource res[3] = { 0 };
+ struct acpi_device *companion;
+ enum mpam_msc_iface iface;
+ char uid[16];
+ u32 acpi_id;
+
+ if (!pdev)
+ return ERR_PTR(-ENOMEM);
+
+ /* Some power management is described in the namespace: */
+ err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier);
+ if (err > 0 && err < sizeof(uid)) {
+ companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1);
+ if (companion) {
+ ACPI_COMPANION_SET(&pdev->dev, companion);
+ acpi_dev_put(companion);
+ } else {
+ pr_debug("MSC.%u: missing namespace entry\n", tbl_msc->identifier);
+ }
+ }
+
+ if (decode_interface_type(tbl_msc, &iface)) {
+ pr_debug("MSC.%u: unknown interface type\n", tbl_msc->identifier);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (iface == MPAM_IFACE_MMIO) {
+ res[next_res++] = DEFINE_RES_MEM_NAMED(tbl_msc->base_address,
+ tbl_msc->mmio_size,
+ "MPAM:MSC");
+ } else if (iface == MPAM_IFACE_PCC) {
+ props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel",
+ tbl_msc->base_address);
+ }
+
+ acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res);
+
+ WARN_ON_ONCE(next_res > ARRAY_SIZE(res));
+ err = platform_device_add_resources(pdev, res, next_res);
+ if (err)
+ return ERR_PTR(err);
+
+ props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us",
+ tbl_msc->max_nrdy_usec);
+
+ /*
+ * The MSC's CPU affinity is described via its linked power
+ * management device, but only if it points at a Processor or
+ * Processor Container.
+ */
+ if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id))
+ props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", acpi_id);
+
+ WARN_ON_ONCE(next_prop > ARRAY_SIZE(props) - 1);
+ err = device_create_managed_software_node(&pdev->dev, props, NULL);
+ if (err)
+ return ERR_PTR(err);
+
+ /*
+ * Stash the table entry for acpi_mpam_parse_resources() to discover
+ * what this MSC controls.
+ */
+ err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length);
+ if (err)
+ return ERR_PTR(err);
+
+ err = platform_device_add(pdev);
+ if (err)
+ return ERR_PTR(err);
+
+ return_ptr(pdev);
+}
+
+static int __init acpi_mpam_parse(void)
+{
+ char *table_end, *table_offset;
+ struct acpi_mpam_msc_node *tbl_msc;
+ struct platform_device *pdev;
+
+ if (acpi_disabled || !system_supports_mpam())
+ return 0;
+
+ struct acpi_table_header *table __free(acpi_put_table) =
+ acpi_get_table_pointer(ACPI_SIG_MPAM, 0);
+
+ if (IS_ERR(table))
+ return 0;
+
+ if (table->revision < 1) {
+ pr_debug("MPAM ACPI table revision %d not supported\n", table->revision);
+ return 0;
+ }
+
+ table_offset = (char *)(table + 1);
+ table_end = (char *)table + table->length;
+
+ while (table_offset < table_end) {
+ tbl_msc = (struct acpi_mpam_msc_node *)table_offset;
+ if (table_offset + sizeof(*tbl_msc) > table_end ||
+ table_offset + tbl_msc->length > table_end) {
+ pr_err("MSC entry overlaps end of ACPI table\n");
+ return -EINVAL;
+ }
+ table_offset += tbl_msc->length;
+
+ /*
+ * If any of the reserved fields are set, make no attempt to
+ * parse the MSC structure. This MSC will still be counted by
+ * acpi_mpam_count_msc(), meaning the MPAM driver can't probe
+ * against all MSC, and will never be enabled. There is no way
+ * to enable it safely, because we cannot determine safe
+ * system-wide partid and pmg ranges in this situation.
+ */
+ if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) {
+ pr_err_once("Unrecognised MSC, MPAM not usable\n");
+ pr_debug("MSC.%u: reserved field set\n", tbl_msc->identifier);
+ continue;
+ }
+
+ if (!tbl_msc->mmio_size) {
+ pr_debug("MSC.%u: marked as disabled\n", tbl_msc->identifier);
+ continue;
+ }
+
+ pdev = acpi_mpam_parse_msc(tbl_msc);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+
+/**
+ * acpi_mpam_count_msc() - Count the number of MSC described by firmware.
+ *
+ * Returns the number of MSCs, or zero for an error.
+ *
+ * This can be called before or in parallel with acpi_mpam_parse().
+ */
+int acpi_mpam_count_msc(void)
+{
+ char *table_end, *table_offset;
+ struct acpi_mpam_msc_node *tbl_msc;
+ int count = 0;
+
+ if (acpi_disabled || !system_supports_mpam())
+ return 0;
+
+ struct acpi_table_header *table __free(acpi_put_table) =
+ acpi_get_table_pointer(ACPI_SIG_MPAM, 0);
+
+ if (IS_ERR(table))
+ return 0;
+
+ if (table->revision < 1)
+ return 0;
+
+ table_offset = (char *)(table + 1);
+ table_end = (char *)table + table->length;
+
+ while (table_offset < table_end) {
+ tbl_msc = (struct acpi_mpam_msc_node *)table_offset;
+
+ if (table_offset + sizeof(*tbl_msc) > table_end)
+ return -EINVAL;
+ if (tbl_msc->length < sizeof(*tbl_msc))
+ return -EINVAL;
+ if (tbl_msc->length > table_end - table_offset)
+ return -EINVAL;
+ table_offset += tbl_msc->length;
+
+ if (!tbl_msc->mmio_size)
+ continue;
+
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * Call after ACPI devices have been created, which happens behind acpi_scan_init()
+ * called from subsys_initcall(). PCC requires the mailbox driver, which is
+ * initialised from postcore_initcall().
+ */
+subsys_initcall_sync(acpi_mpam_parse);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 6905b56bf3e4..34181fa52e93 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -91,8 +91,7 @@ enum {
};
struct acpi_battery {
- struct mutex lock;
- struct mutex sysfs_lock;
+ struct mutex update_lock;
struct power_supply *bat;
struct power_supply_desc bat_desc;
struct acpi_device *device;
@@ -535,11 +534,9 @@ static int acpi_battery_get_info(struct acpi_battery *battery)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status = AE_ERROR;
- mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle,
use_bix ? "_BIX":"_BIF",
NULL, &buffer);
- mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status)) {
acpi_handle_info(battery->device->handle,
@@ -576,11 +573,8 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
msecs_to_jiffies(cache_time)))
return 0;
- mutex_lock(&battery->lock);
status = acpi_evaluate_object(battery->device->handle, "_BST",
NULL, &buffer);
- mutex_unlock(&battery->lock);
-
if (ACPI_FAILURE(status)) {
acpi_handle_info(battery->device->handle,
"_BST evaluation failed: %s",
@@ -628,11 +622,8 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery)
!test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
return -ENODEV;
- mutex_lock(&battery->lock);
status = acpi_execute_simple_method(battery->device->handle, "_BTP",
battery->alarm);
- mutex_unlock(&battery->lock);
-
if (ACPI_FAILURE(status))
return -ENODEV;
@@ -904,15 +895,12 @@ static int sysfs_add_battery(struct acpi_battery *battery)
static void sysfs_remove_battery(struct acpi_battery *battery)
{
- mutex_lock(&battery->sysfs_lock);
- if (!battery->bat) {
- mutex_unlock(&battery->sysfs_lock);
+ if (!battery->bat)
return;
- }
+
battery_hook_remove_battery(battery);
power_supply_unregister(battery->bat);
battery->bat = NULL;
- mutex_unlock(&battery->sysfs_lock);
}
static void find_battery(const struct dmi_header *dm, void *private)
@@ -1072,6 +1060,9 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
if (!battery)
return;
+
+ guard(mutex)(&battery->update_lock);
+
old = battery->bat;
/*
* On Acer Aspire V5-573G notifications are sometimes triggered too
@@ -1094,21 +1085,22 @@ static void acpi_battery_notify(acpi_handle handle, u32 event, void *data)
}
static int battery_notify(struct notifier_block *nb,
- unsigned long mode, void *_unused)
+ unsigned long mode, void *_unused)
{
struct acpi_battery *battery = container_of(nb, struct acpi_battery,
pm_nb);
- int result;
- switch (mode) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
+ if (mode == PM_POST_SUSPEND || mode == PM_POST_HIBERNATION) {
+ guard(mutex)(&battery->update_lock);
+
if (!acpi_battery_present(battery))
return 0;
if (battery->bat) {
acpi_battery_refresh(battery);
} else {
+ int result;
+
result = acpi_battery_get_info(battery);
if (result)
return result;
@@ -1120,7 +1112,6 @@ static int battery_notify(struct notifier_block *nb,
acpi_battery_init_alarm(battery);
acpi_battery_get_state(battery);
- break;
}
return 0;
@@ -1198,6 +1189,8 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
{
int retry, ret;
+ guard(mutex)(&battery->update_lock);
+
for (retry = 5; retry; retry--) {
ret = acpi_battery_update(battery, false);
if (!ret)
@@ -1208,6 +1201,13 @@ static int acpi_battery_update_retry(struct acpi_battery *battery)
return ret;
}
+static void sysfs_battery_cleanup(struct acpi_battery *battery)
+{
+ guard(mutex)(&battery->update_lock);
+
+ sysfs_remove_battery(battery);
+}
+
static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
@@ -1226,11 +1226,8 @@ static int acpi_battery_add(struct acpi_device *device)
strscpy(acpi_device_name(device), ACPI_BATTERY_DEVICE_NAME);
strscpy(acpi_device_class(device), ACPI_BATTERY_CLASS);
device->driver_data = battery;
- result = devm_mutex_init(&device->dev, &battery->lock);
- if (result)
- return result;
- result = devm_mutex_init(&device->dev, &battery->sysfs_lock);
+ result = devm_mutex_init(&device->dev, &battery->update_lock);
if (result)
return result;
@@ -1262,7 +1259,7 @@ fail_pm:
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
fail:
- sysfs_remove_battery(battery);
+ sysfs_battery_cleanup(battery);
return result;
}
@@ -1281,6 +1278,9 @@ static void acpi_battery_remove(struct acpi_device *device)
device_init_wakeup(&device->dev, 0);
unregister_pm_notifier(&battery->pm_nb);
+
+ guard(mutex)(&battery->update_lock);
+
sysfs_remove_battery(battery);
}
@@ -1297,6 +1297,9 @@ static int acpi_battery_resume(struct device *dev)
return -EINVAL;
battery->update_time = 0;
+
+ guard(mutex)(&battery->update_lock);
+
acpi_battery_update(battery, true);
return 0;
}
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 0a7026040188..3c6dd9b4ba0a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
input_set_drvdata(input, device);
error = input_register_device(input);
- if (error)
+ if (error) {
+ input_free_device(input);
goto err_remove_fs;
+ }
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 6b649031808f..3bdeeee3414e 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -460,7 +460,7 @@ bool acpi_cpc_valid(void)
if (acpi_disabled)
return false;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr)
return false;
@@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
@@ -750,7 +750,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
}
/*
- * Disregard _CPC if the number of entries in the return pachage is not
+ * Disregard _CPC if the number of entries in the return package is not
* as expected, but support future revisions being proper supersets of
* the v3 and only causing more entries to be returned by _CPC.
*/
@@ -1435,7 +1435,7 @@ bool cppc_perf_ctrs_in_pcc(void)
{
int cpu;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct cpc_register_resource *ref_perf_reg;
struct cpc_desc *cpc_desc;
@@ -1876,7 +1876,7 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
* If desired_reg is in the SystemMemory or SystemIo ACPI address space,
* then assume there is no latency.
*/
-unsigned int cppc_get_transition_latency(int cpu_num)
+int cppc_get_transition_latency(int cpu_num)
{
/*
* Expected transition latency is based on the PCCT timing values
@@ -1889,31 +1889,29 @@ unsigned int cppc_get_transition_latency(int cpu_num)
* completion of a command before issuing the next command,
* in microseconds.
*/
- unsigned int latency_ns = 0;
struct cpc_desc *cpc_desc;
struct cpc_register_resource *desired_reg;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
struct cppc_pcc_data *pcc_ss_data;
+ int latency_ns = 0;
cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
if (!cpc_desc)
- return CPUFREQ_ETERNAL;
+ return -ENODATA;
desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
return 0;
- else if (!CPC_IN_PCC(desired_reg))
- return CPUFREQ_ETERNAL;
- if (pcc_ss_id < 0)
- return CPUFREQ_ETERNAL;
+ if (!CPC_IN_PCC(desired_reg) || pcc_ss_id < 0)
+ return -ENODATA;
pcc_ss_data = pcc_data[pcc_ss_id];
if (pcc_ss_data->pcc_mpar)
latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
- latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_nominal * 1000);
+ latency_ns = max_t(int, latency_ns, pcc_ss_data->pcc_mrtt * 1000);
return latency_ns;
}
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 3961fc47152c..cd199fbe4dc9 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -464,7 +464,7 @@ static ssize_t description_show(struct device *dev,
buf[result++] = '\n';
- kfree(str_obj);
+ ACPI_FREE(str_obj);
return result;
}
diff --git a/drivers/acpi/dptf/Makefile b/drivers/acpi/dptf/Makefile
index 297340682f66..e912a3be1d28 100644
--- a/drivers/acpi/dptf/Makefile
+++ b/drivers/acpi/dptf/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ACPI) += int340x_thermal.o
obj-$(CONFIG_DPTF_POWER) += dptf_power.o
obj-$(CONFIG_DPTF_PCH_FIVR) += dptf_pch_fivr.o
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index 952216c67d58..8d7e555929d3 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -41,7 +41,7 @@ static int pch_fivr_read(acpi_handle handle, char *method, struct pch_fivr_resp
ret = 0;
release_buffer:
- kfree(buffer.pointer);
+ ACPI_FREE(buffer.pointer);
return ret;
}
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 776914f31b9e..55ccbb8ddbe3 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -240,6 +240,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC10D9", 0},
{"INTC1100", 0},
{"INTC1101", 0},
+ {"INTC10F7", 0},
+ {"INTC10F8", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
deleted file mode 100644
index a222df059a16..000000000000
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ACPI support for int340x thermal drivers
- *
- * Copyright (C) 2014, Intel Corporation
- * Authors: Zhang Rui <rui.zhang@intel.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/module.h>
-
-#include "../internal.h"
-
-#define INT3401_DEVICE 0X01
-static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT3400"},
- {"INT3401", INT3401_DEVICE},
- {"INT3402"},
- {"INT3403"},
- {"INT3404"},
- {"INT3406"},
- {"INT3407"},
- {"INT3408"},
- {"INT3409"},
- {"INT340A"},
- {"INT340B"},
- {"INT3532"},
- {"INTC1040"},
- {"INTC1041"},
- {"INTC1042"},
- {"INTC1043"},
- {"INTC1044"},
- {"INTC1045"},
- {"INTC1046"},
- {"INTC1047"},
- {"INTC1048"},
- {"INTC1049"},
- {"INTC1050"},
- {"INTC1060"},
- {"INTC1061"},
- {"INTC1062"},
- {"INTC1063"},
- {"INTC1064"},
- {"INTC1065"},
- {"INTC1066"},
- {"INTC1068"},
- {"INTC1069"},
- {"INTC106A"},
- {"INTC106B"},
- {"INTC106C"},
- {"INTC106D"},
- {"INTC10A0"},
- {"INTC10A1"},
- {"INTC10A2"},
- {"INTC10A3"},
- {"INTC10A4"},
- {"INTC10A5"},
- {"INTC10D4"},
- {"INTC10D5"},
- {"INTC10D6"},
- {"INTC10D7"},
- {"INTC10D8"},
- {"INTC10D9"},
- {"INTC10FC"},
- {"INTC10FD"},
- {"INTC10FE"},
- {"INTC10FF"},
- {"INTC1100"},
- {"INTC1101"},
- {"INTC1102"},
- {""},
-};
-
-static int int340x_thermal_handler_attach(struct acpi_device *adev,
- const struct acpi_device_id *id)
-{
- if (IS_ENABLED(CONFIG_INT340X_THERMAL))
- acpi_create_platform_device(adev, NULL);
- /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
- else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
- id->driver_data == INT3401_DEVICE)
- acpi_create_platform_device(adev, NULL);
- return 1;
-}
-
-static struct acpi_scan_handler int340x_thermal_handler = {
- .ids = int340x_thermal_device_ids,
- .attach = int340x_thermal_handler_attach,
-};
-
-void __init acpi_int340x_thermal_init(void)
-{
- acpi_scan_add_handler(&int340x_thermal_handler);
-}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 7855bbf752b1..59b3d50ff01e 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2294,7 +2294,8 @@ static int acpi_ec_init_workqueues(void)
ec_wq = alloc_ordered_workqueue("kec", 0);
if (!ec_query_wq)
- ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
+ ec_query_wq = alloc_workqueue("kec_query", WQ_PERCPU,
+ ec_max_queries);
if (!ec_wq || !ec_query_wq) {
acpi_ec_destroy_workqueues();
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index 8a28a72a7c6a..97ce3212edf3 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -11,6 +11,7 @@
#define _ACPI_FAN_H_
#include <linux/kconfig.h>
+#include <linux/limits.h>
#define ACPI_FAN_DEVICE_IDS \
{"INT3404", }, /* Fan */ \
@@ -21,6 +22,7 @@
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"INTC10D6", }, /* Fan for Panther Lake generation */ \
{"INTC10FE", }, /* Fan for Wildcat Lake generation */ \
+ {"INTC10F5", }, /* Fan for Nova Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
#define ACPI_FPS_NAME_LEN 20
@@ -49,24 +51,64 @@ struct acpi_fan_fst {
};
struct acpi_fan {
+ acpi_handle handle;
bool acpi4;
bool has_fst;
struct acpi_fan_fif fif;
struct acpi_fan_fps *fps;
int fps_count;
+ /* A value of 0 means that trippoint-related functions are not supported */
+ u32 fan_trip_granularity;
+#if IS_REACHABLE(CONFIG_HWMON)
+ struct device *hdev;
+#endif
struct thermal_cooling_device *cdev;
struct device_attribute fst_speed;
struct device_attribute fine_grain_control;
};
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
+/**
+ * acpi_fan_speed_valid - Check if fan speed value is valid
+ * @speeed: Speed value returned by the ACPI firmware
+ *
+ * Check if the fan speed value returned by the ACPI firmware is valid. This function is
+ * necessary as ACPI firmware implementations can return 0xFFFFFFFF to signal that the
+ * ACPI fan does not support speed reporting. Additionally, some buggy ACPI firmware
+ * implementations return a value larger than the 32-bit integer value defined by
+ * the ACPI specification when using placeholder values. Such invalid values are also
+ * detected by this function.
+ *
+ * Returns: True if the fan speed value is valid, false otherwise.
+ */
+static inline bool acpi_fan_speed_valid(u64 speed)
+{
+ return speed < U32_MAX;
+}
+
+/**
+ * acpi_fan_power_valid - Check if fan power value is valid
+ * @power: Power value returned by the ACPI firmware
+ *
+ * Check if the fan power value returned by the ACPI firmware is valid.
+ * See acpi_fan_speed_valid() for details.
+ *
+ * Returns: True if the fan power value is valid, false otherwise.
+ */
+static inline bool acpi_fan_power_valid(u64 power)
+{
+ return power < U32_MAX;
+}
+
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
#if IS_REACHABLE(CONFIG_HWMON)
-int devm_acpi_fan_create_hwmon(struct acpi_device *device);
+int devm_acpi_fan_create_hwmon(struct device *dev);
+void acpi_fan_notify_hwmon(struct device *dev);
#else
-static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
+static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
+static inline void acpi_fan_notify_hwmon(struct device *dev) { };
#endif
#endif
diff --git a/drivers/acpi/fan_attr.c b/drivers/acpi/fan_attr.c
index c1afb7b5ed3d..9b7fa52f3c2a 100644
--- a/drivers/acpi/fan_attr.c
+++ b/drivers/acpi/fan_attr.c
@@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
struct acpi_fan_fst fst;
int status;
- status = acpi_fan_get_fst(acpi_dev, &fst);
+ status = acpi_fan_get_fst(acpi_dev->handle, &fst);
if (status)
return status;
diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c
index 095502086b41..fb08b8549ed7 100644
--- a/drivers/acpi/fan_core.c
+++ b/drivers/acpi/fan_core.c
@@ -7,11 +7,16 @@
* Copyright (C) 2022 Intel Corporation. All rights reserved.
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/uaccess.h>
+#include <linux/uuid.h>
#include <linux/thermal.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -19,6 +24,26 @@
#include "fan.h"
+#define ACPI_FAN_NOTIFY_STATE_CHANGED 0x80
+
+/*
+ * Defined inside the "Fan Noise Signal" section at
+ * https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide.
+ */
+static const guid_t acpi_fan_microsoft_guid = GUID_INIT(0xA7611840, 0x99FE, 0x41AE, 0xA4, 0x88,
+ 0x35, 0xC7, 0x59, 0x26, 0xC8, 0xEB);
+#define ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY 1
+#define ACPI_FAN_DSM_SET_TRIP_POINTS 2
+#define ACPI_FAN_DSM_GET_OPERATING_RANGES 3
+
+/*
+ * Ensures that fans with a very low trip point granularity
+ * do not send too many notifications.
+ */
+static uint min_trip_distance = 100;
+module_param(min_trip_distance, uint, 0);
+MODULE_PARM_DESC(min_trip_distance, "Minimum distance between fan speed trip points in RPM");
+
static const struct acpi_device_id fan_device_ids[] = {
ACPI_FAN_DEVICE_IDS,
{"", 0},
@@ -44,25 +69,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
return 0;
}
-int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
+int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
- status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
- if (ACPI_FAILURE(status)) {
- dev_err(&device->dev, "Get fan state failed\n");
- return -ENODEV;
- }
+ status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -EIO;
obj = buffer.pointer;
- if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
- obj->package.count != 3 ||
- obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
- dev_err(&device->dev, "Invalid _FST data\n");
- ret = -EINVAL;
+ if (!obj)
+ return -ENODATA;
+
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
+ ret = -EPROTO;
+ goto err;
+ }
+
+ if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
+ ret = -EPROTO;
goto err;
}
@@ -81,7 +111,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
struct acpi_fan_fst fst;
int status, i;
- status = acpi_fan_get_fst(device, &fst);
+ status = acpi_fan_get_fst(device->handle, &fst);
if (status)
return status;
@@ -203,18 +233,6 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
* --------------------------------------------------------------------------
*/
-static bool acpi_fan_has_fst(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FST");
-}
-
-static bool acpi_fan_is_acpi4(struct acpi_device *device)
-{
- return acpi_has_method(device->handle, "_FIF") &&
- acpi_has_method(device->handle, "_FPS") &&
- acpi_has_method(device->handle, "_FSL");
-}
-
static int acpi_fan_get_fif(struct acpi_device *device)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -315,6 +333,182 @@ err:
return status;
}
+static int acpi_fan_dsm_init(struct device *dev)
+{
+ union acpi_object dummy = {
+ .package = {
+ .type = ACPI_TYPE_PACKAGE,
+ .count = 0,
+ .elements = NULL,
+ },
+ };
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ union acpi_object *obj;
+ int ret = 0;
+
+ if (!acpi_check_dsm(fan->handle, &acpi_fan_microsoft_guid, 0,
+ BIT(ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY) |
+ BIT(ACPI_FAN_DSM_SET_TRIP_POINTS)))
+ return 0;
+
+ dev_info(dev, "Using Microsoft fan extensions\n");
+
+ obj = acpi_evaluate_dsm_typed(fan->handle, &acpi_fan_microsoft_guid, 0,
+ ACPI_FAN_DSM_GET_TRIP_POINT_GRANULARITY, &dummy,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
+ return -EIO;
+
+ if (obj->integer.value > U32_MAX)
+ ret = -EOVERFLOW;
+ else
+ fan->fan_trip_granularity = obj->integer.value;
+
+ kfree(obj);
+
+ return ret;
+}
+
+static int acpi_fan_dsm_set_trip_points(struct device *dev, u64 upper, u64 lower)
+{
+ union acpi_object args[2] = {
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = lower,
+ },
+ },
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = upper,
+ },
+ },
+ };
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ union acpi_object in = {
+ .package = {
+ .type = ACPI_TYPE_PACKAGE,
+ .count = ARRAY_SIZE(args),
+ .elements = args,
+ },
+ };
+ union acpi_object *obj;
+
+ obj = acpi_evaluate_dsm(fan->handle, &acpi_fan_microsoft_guid, 0,
+ ACPI_FAN_DSM_SET_TRIP_POINTS, &in);
+ kfree(obj);
+
+ return 0;
+}
+
+static int acpi_fan_dsm_start(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ int ret;
+
+ if (!fan->fan_trip_granularity)
+ return 0;
+
+ /*
+ * Some firmware implementations only update the values returned by the
+ * _FST control method when a notification is received. This usually
+ * works with Microsoft Windows as setting up trip points will keep
+ * triggering said notifications, but will cause issues when using _FST
+ * without the Microsoft-specific trip point extension.
+ *
+ * Because of this, an initial notification needs to be triggered to
+ * start the cycle of trip points updates. This is achieved by setting
+ * the trip points sequencially to two separate ranges. As by the
+ * Microsoft specification the firmware should trigger a notification
+ * immediately if the fan speed is outside the trip point range. This
+ * _should_ result in at least one notification as both ranges do not
+ * overlap, meaning that the current fan speed needs to be outside at
+ * least one range.
+ */
+ ret = acpi_fan_dsm_set_trip_points(dev, fan->fan_trip_granularity, 0);
+ if (ret < 0)
+ return ret;
+
+ return acpi_fan_dsm_set_trip_points(dev, fan->fan_trip_granularity * 3,
+ fan->fan_trip_granularity * 2);
+}
+
+static int acpi_fan_dsm_update_trips_points(struct device *dev, struct acpi_fan_fst *fst)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ u64 upper, lower;
+
+ if (!fan->fan_trip_granularity)
+ return 0;
+
+ if (!acpi_fan_speed_valid(fst->speed))
+ return -EINVAL;
+
+ upper = roundup_u64(fst->speed + min_trip_distance, fan->fan_trip_granularity);
+ if (fst->speed <= min_trip_distance) {
+ lower = 0;
+ } else {
+ /*
+ * Valid fan speed values cannot be larger than 32 bit, so
+ * we can safely assume that no overflow will happen here.
+ */
+ lower = rounddown((u32)fst->speed - min_trip_distance, fan->fan_trip_granularity);
+ }
+
+ return acpi_fan_dsm_set_trip_points(dev, upper, lower);
+}
+
+static void acpi_fan_notify_handler(acpi_handle handle, u32 event, void *context)
+{
+ struct device *dev = context;
+ struct acpi_fan_fst fst;
+ int ret;
+
+ switch (event) {
+ case ACPI_FAN_NOTIFY_STATE_CHANGED:
+ /*
+ * The ACPI specification says that we must evaluate _FST when we
+ * receive an ACPI event indicating that the fan state has changed.
+ */
+ ret = acpi_fan_get_fst(handle, &fst);
+ if (ret < 0) {
+ dev_err(dev, "Error retrieving current fan status: %d\n", ret);
+ } else {
+ ret = acpi_fan_dsm_update_trips_points(dev, &fst);
+ if (ret < 0)
+ dev_err(dev, "Failed to update trip points: %d\n", ret);
+ }
+
+ acpi_fan_notify_hwmon(dev);
+ acpi_bus_generate_netlink_event("fan", dev_name(dev), event, 0);
+ break;
+ default:
+ dev_dbg(dev, "Unsupported ACPI notification 0x%x\n", event);
+ break;
+ }
+}
+
+static void acpi_fan_notify_remove(void *data)
+{
+ struct acpi_fan *fan = data;
+
+ acpi_remove_notify_handler(fan->handle, ACPI_DEVICE_NOTIFY, acpi_fan_notify_handler);
+}
+
+static int devm_acpi_fan_notify_init(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+ acpi_status status;
+
+ status = acpi_install_notify_handler(fan->handle, ACPI_DEVICE_NOTIFY,
+ acpi_fan_notify_handler, dev);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return devm_add_action_or_reset(dev, acpi_fan_notify_remove, fan);
+}
+
static int acpi_fan_probe(struct platform_device *pdev)
{
int result = 0;
@@ -323,17 +517,24 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
+ if (!device)
+ return -ENODEV;
+
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
+
+ fan->handle = device->handle;
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
- if (acpi_fan_has_fst(device)) {
+ if (acpi_has_method(device->handle, "_FST")) {
fan->has_fst = true;
- fan->acpi4 = acpi_fan_is_acpi4(device);
+ fan->acpi4 = acpi_has_method(device->handle, "_FIF") &&
+ acpi_has_method(device->handle, "_FPS") &&
+ acpi_has_method(device->handle, "_FSL");
}
if (fan->acpi4) {
@@ -347,10 +548,24 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
if (fan->has_fst) {
- result = devm_acpi_fan_create_hwmon(device);
+ result = acpi_fan_dsm_init(&pdev->dev);
+ if (result)
+ return result;
+
+ result = devm_acpi_fan_create_hwmon(&pdev->dev);
if (result)
return result;
+ result = devm_acpi_fan_notify_init(&pdev->dev);
+ if (result)
+ return result;
+
+ result = acpi_fan_dsm_start(&pdev->dev);
+ if (result) {
+ dev_err(&pdev->dev, "Failed to start Microsoft fan extensions\n");
+ return result;
+ }
+
result = acpi_fan_create_attributes(device);
if (result)
return result;
@@ -436,8 +651,14 @@ static int acpi_fan_suspend(struct device *dev)
static int acpi_fan_resume(struct device *dev)
{
- int result;
struct acpi_fan *fan = dev_get_drvdata(dev);
+ int result;
+
+ if (fan->has_fst) {
+ result = acpi_fan_dsm_start(dev);
+ if (result)
+ dev_err(dev, "Failed to start Microsoft fan extensions: %d\n", result);
+ }
if (fan->acpi4)
return 0;
diff --git a/drivers/acpi/fan_hwmon.c b/drivers/acpi/fan_hwmon.c
index e8d90605106e..d3374f8f524b 100644
--- a/drivers/acpi/fan_hwmon.c
+++ b/drivers/acpi/fan_hwmon.c
@@ -15,10 +15,6 @@
#include "fan.h"
-/* Returned when the ACPI fan does not support speed reporting */
-#define FAN_SPEED_UNAVAILABLE U32_MAX
-#define FAN_POWER_UNAVAILABLE U32_MAX
-
static struct acpi_fan_fps *acpi_fan_get_current_fps(struct acpi_fan *fan, u64 control)
{
unsigned int i;
@@ -77,7 +73,7 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
* when the associated attribute should not be created.
*/
for (i = 0; i < fan->fps_count; i++) {
- if (fan->fps[i].power != FAN_POWER_UNAVAILABLE)
+ if (acpi_fan_power_valid(fan->fps[i].power))
return 0444;
}
@@ -93,13 +89,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
- struct acpi_device *adev = to_acpi_device(dev->parent);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct acpi_fan_fps *fps;
struct acpi_fan_fst fst;
int ret;
- ret = acpi_fan_get_fst(adev, &fst);
+ ret = acpi_fan_get_fst(fan->handle, &fst);
if (ret < 0)
return ret;
@@ -107,7 +102,7 @@ static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_fan:
switch (attr) {
case hwmon_fan_input:
- if (fst.speed == FAN_SPEED_UNAVAILABLE)
+ if (!acpi_fan_speed_valid(fst.speed))
return -ENODEV;
if (fst.speed > LONG_MAX)
@@ -135,7 +130,7 @@ static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
if (!fps)
return -EIO;
- if (fps->power == FAN_POWER_UNAVAILABLE)
+ if (!acpi_fan_power_valid(fps->power))
return -ENODEV;
if (fps->power > LONG_MAX / MICROWATT_PER_MILLIWATT)
@@ -167,12 +162,19 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
.info = acpi_fan_hwmon_info,
};
-int devm_acpi_fan_create_hwmon(struct acpi_device *device)
+void acpi_fan_notify_hwmon(struct device *dev)
+{
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+
+ hwmon_notify_event(fan->hdev, hwmon_fan, hwmon_fan_input, 0);
+}
+
+int devm_acpi_fan_create_hwmon(struct device *dev)
{
- struct acpi_fan *fan = acpi_driver_data(device);
- struct device *hdev;
+ struct acpi_fan *fan = dev_get_drvdata(dev);
+
+ fan->hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan,
+ &acpi_fan_hwmon_chip_info, NULL);
- hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
- &acpi_fan_hwmon_chip_info, NULL);
- return PTR_ERR_OR_ZERO(hdev);
+ return PTR_ERR_OR_ZERO(fan->hdev);
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index e2781864fdce..40f875b265a9 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -27,7 +27,6 @@ static inline void acpi_pci_link_init(void) {}
void acpi_processor_init(void);
void acpi_platform_init(void);
void acpi_pnp_init(void);
-void acpi_int340x_thermal_init(void);
int acpi_sysfs_init(void);
void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
@@ -140,6 +139,7 @@ int __acpi_device_uevent_modalias(const struct acpi_device *adev,
/* --------------------------------------------------------------------------
Power Resource
-------------------------------------------------------------------------- */
+void acpi_power_resources_init(void);
void acpi_power_resources_list_free(struct list_head *list);
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list);
diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c
index 76a856c32c4d..d1595156c86a 100644
--- a/drivers/acpi/irq.c
+++ b/drivers/acpi/irq.c
@@ -300,6 +300,25 @@ int acpi_irq_get(acpi_handle handle, unsigned int index, struct resource *res)
}
EXPORT_SYMBOL_GPL(acpi_irq_get);
+const struct cpumask *acpi_irq_get_affinity(acpi_handle handle,
+ unsigned int index)
+{
+ struct irq_fwspec_info info;
+ struct irq_fwspec fwspec;
+ unsigned long flags;
+
+ if (acpi_irq_parse_one(handle, index, &fwspec, &flags))
+ return NULL;
+
+ if (irq_populate_fwspec_info(&fwspec, &info))
+ return NULL;
+
+ if (!(info.flags & IRQ_FWSPEC_INFO_AFFINITY_VALID))
+ return NULL;
+
+ return info.affinity;
+}
+
/**
* acpi_set_irq_model - Setup the GSI irqdomain information
* @model: the value assigned to acpi_irq_model
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ae035b93da08..3eb56b77cb6d 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2637,7 +2637,7 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
if (ndr_desc->target_node == NUMA_NO_NODE) {
ndr_desc->target_node = phys_to_target_node(spa->address);
dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
- NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ NUMA_NO_NODE, ndr_desc->target_node, &res.start, &res.end);
}
/*
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index 4958301f5417..77a81627aaef 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -74,7 +74,6 @@ struct memory_target {
struct node_cache_attrs cache_attrs;
u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
bool registered;
- bool ext_updated; /* externally updated */
};
struct memory_initiator {
@@ -368,35 +367,6 @@ static void hmat_update_target_access(struct memory_target *target,
}
}
-int hmat_update_target_coordinates(int nid, struct access_coordinate *coord,
- enum access_coordinate_class access)
-{
- struct memory_target *target;
- int pxm;
-
- if (nid == NUMA_NO_NODE)
- return -EINVAL;
-
- pxm = node_to_pxm(nid);
- guard(mutex)(&target_lock);
- target = find_mem_target(pxm);
- if (!target)
- return -ENODEV;
-
- hmat_update_target_access(target, ACPI_HMAT_READ_LATENCY,
- coord->read_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_LATENCY,
- coord->write_latency, access);
- hmat_update_target_access(target, ACPI_HMAT_READ_BANDWIDTH,
- coord->read_bandwidth, access);
- hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
- coord->write_bandwidth, access);
- target->ext_updated = true;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(hmat_update_target_coordinates);
-
static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
{
struct memory_locality *loc;
@@ -773,10 +743,6 @@ static void hmat_update_target_attrs(struct memory_target *target,
u32 best = 0;
int i;
- /* Don't update if an external agent has changed the data. */
- if (target->ext_updated)
- return;
-
/* Don't update for generic port if there's no device handle */
if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
@@ -908,11 +874,33 @@ static void hmat_register_target_devices(struct memory_target *target)
}
}
-static void hmat_register_target(struct memory_target *target)
+static void hmat_hotplug_target(struct memory_target *target)
{
int nid = pxm_to_node(target->memory_pxm);
/*
+ * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP,
+ * "specific purpose", is applied to all the memory in a proximity
+ * domain leading to * the node being marked offline / unplugged, or if
+ * memory-only "hotplug" node is offline.
+ */
+ if (nid == NUMA_NO_NODE || !node_online(nid))
+ return;
+
+ guard(mutex)(&target_lock);
+ if (target->registered)
+ return;
+
+ hmat_register_target_initiators(target);
+ hmat_register_target_cache(target);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
+ hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
+ target->registered = true;
+}
+
+static void hmat_register_target(struct memory_target *target)
+{
+ /*
* Devices may belong to either an offline or online
* node, so unconditionally add them.
*/
@@ -922,32 +910,15 @@ static void hmat_register_target(struct memory_target *target)
* Register generic port perf numbers. The nid may not be
* initialized and is still NUMA_NO_NODE.
*/
- mutex_lock(&target_lock);
- if (*(u16 *)target->gen_port_device_handle) {
- hmat_update_generic_target(target);
- target->registered = true;
+ scoped_guard(mutex, &target_lock) {
+ if (*(u16 *)target->gen_port_device_handle) {
+ hmat_update_generic_target(target);
+ target->registered = true;
+ return;
+ }
}
- mutex_unlock(&target_lock);
-
- /*
- * Skip offline nodes. This can happen when memory
- * marked EFI_MEMORY_SP, "specific purpose", is applied
- * to all the memory in a proximity domain leading to
- * the node being marked offline / unplugged, or if
- * memory-only "hotplug" node is offline.
- */
- if (nid == NUMA_NO_NODE || !node_online(nid))
- return;
- mutex_lock(&target_lock);
- if (!target->registered) {
- hmat_register_target_initiators(target);
- hmat_register_target_cache(target);
- hmat_register_target_perf(target, ACCESS_COORDINATE_LOCAL);
- hmat_register_target_perf(target, ACCESS_COORDINATE_CPU);
- target->registered = true;
- }
- mutex_unlock(&target_lock);
+ hmat_hotplug_target(target);
}
static void hmat_register_targets(void)
@@ -973,7 +944,7 @@ static int hmat_callback(struct notifier_block *self,
if (!target)
return NOTIFY_OK;
- hmat_register_target(target);
+ hmat_hotplug_target(target);
return NOTIFY_OK;
}
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 53816dfab645..aa87ee1583a4 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -237,7 +237,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
struct acpi_srat_generic_affinity *p =
(struct acpi_srat_generic_affinity *)header;
- if (p->device_handle_type == 0) {
+ if (p->device_handle_type == 1) {
/*
* For pci devices this may be the only place they
* are assigned a proximity domain
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 5ff343096ece..05393a7315fe 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -398,7 +398,7 @@ static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
list_del_rcu(&map->list);
INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
- queue_rcu_work(system_wq, &map->track.rwork);
+ queue_rcu_work(system_percpu_wq, &map->track.rwork);
}
/**
@@ -1694,8 +1694,8 @@ acpi_status __init acpi_os_initialize(void)
acpi_status __init acpi_os_initialize1(void)
{
- kacpid_wq = alloc_workqueue("kacpid", 0, 1);
- kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 0);
+ kacpid_wq = alloc_workqueue("kacpid", WQ_PERCPU, 1);
+ kacpi_notify_wq = alloc_workqueue("kacpi_notify", WQ_PERCPU, 0);
kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 630fe0a34bc6..ad81aa03fe2f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -22,6 +22,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/string_choices.h>
struct acpi_prt_entry {
struct acpi_pci_id id;
@@ -468,7 +469,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
pin_name(pin), link_desc, gsi,
(triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ str_low_high(polarity == ACPI_ACTIVE_LOW), dev->irq);
kfree(entry);
return 0;
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index e4560b33b8ad..bed7dc85612e 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -761,7 +761,7 @@ static int acpi_pci_link_resume(struct acpi_pci_link *link)
return 0;
}
-static void irqrouter_resume(void)
+static void irqrouter_resume(void *data)
{
struct acpi_pci_link *link;
@@ -888,10 +888,14 @@ static int __init acpi_irq_balance_set(char *str)
__setup("acpi_irq_balance", acpi_irq_balance_set);
-static struct syscore_ops irqrouter_syscore_ops = {
+static const struct syscore_ops irqrouter_syscore_ops = {
.resume = irqrouter_resume,
};
+static struct syscore irqrouter_syscore = {
+ .ops = &irqrouter_syscore_ops,
+};
+
void __init acpi_pci_link_init(void)
{
if (acpi_noirq)
@@ -904,6 +908,6 @@ void __init acpi_pci_link_init(void)
else
acpi_irq_balance = 0;
}
- register_syscore_ops(&irqrouter_syscore_ops);
+ register_syscore(&irqrouter_syscore);
acpi_scan_add_handler(&pci_link_handler);
}
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index b43f4459a4f6..ea04a8c69215 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -37,6 +37,7 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_MAX_POWER] = "max-power",
[PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
@@ -506,7 +507,8 @@ int platform_profile_cycle(void)
if (err)
return err;
- if (profile == PLATFORM_PROFILE_CUSTOM ||
+ if (profile == PLATFORM_PROFILE_MAX_POWER ||
+ profile == PLATFORM_PROFILE_CUSTOM ||
profile == PLATFORM_PROFILE_LAST)
return -EINVAL;
@@ -515,7 +517,8 @@ int platform_profile_cycle(void)
if (err)
return err;
- /* never iterate into a custom if all drivers supported it */
+ /* never iterate into a custom or max power if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_MAX_POWER, data.aggregate);
clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
next = find_next_bit_wrap(data.aggregate,
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b7243d7563b1..361a7721a6a8 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -23,6 +23,7 @@
#define pr_fmt(fmt) "ACPI: PM: " fmt
+#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -63,6 +64,9 @@ struct acpi_power_resource_entry {
struct acpi_power_resource *resource;
};
+static bool hp_eb_gp12pxp_quirk;
+static bool unused_power_resources_quirk;
+
static LIST_HEAD(acpi_power_resource_list);
static DEFINE_MUTEX(power_resource_list_lock);
@@ -992,6 +996,38 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
}
#ifdef CONFIG_ACPI_SLEEP
+static bool resource_is_gp12pxp(acpi_handle handle)
+{
+ const char *path;
+ bool ret;
+
+ path = acpi_handle_path(handle);
+ ret = path && strcmp(path, "\\_SB_.PCI0.GP12.PXP_") == 0;
+ kfree(path);
+
+ return ret;
+}
+
+static void acpi_resume_on_eb_gp12pxp(struct acpi_power_resource *resource)
+{
+ acpi_handle_notice(resource->device.handle,
+ "HP EB quirk - turning OFF then ON\n");
+
+ __acpi_power_off(resource);
+ __acpi_power_on(resource);
+
+ /*
+ * Use the same delay as DSDT uses in modem _RST method.
+ *
+ * Otherwise we get "Unable to change power state from unknown to D0,
+ * device inaccessible" error for the modem PCI device after thaw.
+ *
+ * This power resource is normally being enabled only during thaw (once)
+ * so this wait is not a performance issue.
+ */
+ msleep(200);
+}
+
void acpi_resume_power_resources(void)
{
struct acpi_power_resource *resource;
@@ -1013,8 +1049,14 @@ void acpi_resume_power_resources(void)
if (state == ACPI_POWER_RESOURCE_STATE_OFF
&& resource->ref_count) {
- acpi_handle_debug(resource->device.handle, "Turning ON\n");
- __acpi_power_on(resource);
+ if (hp_eb_gp12pxp_quirk &&
+ resource_is_gp12pxp(resource->device.handle)) {
+ acpi_resume_on_eb_gp12pxp(resource);
+ } else {
+ acpi_handle_debug(resource->device.handle,
+ "Turning ON\n");
+ __acpi_power_on(resource);
+ }
}
mutex_unlock(&resource->resource_lock);
@@ -1024,6 +1066,41 @@ void acpi_resume_power_resources(void)
}
#endif
+static const struct dmi_system_id dmi_hp_elitebook_gp12pxp_quirk[] = {
+/*
+ * This laptop (and possibly similar models too) has power resource called
+ * "GP12.PXP_" for its WWAN modem.
+ *
+ * For this power resource to turn ON power for the modem it needs certain
+ * internal flag called "ONEN" to be set.
+ * This flag only gets set from this power resource "_OFF" method, while the
+ * actual modem power gets turned off during suspend by "GP12.PTS" method
+ * called from the global "_PTS" (Prepare To Sleep) method.
+ * On the other hand, this power resource "_OFF" method implementation just
+ * sets the aforementioned flag without actually doing anything else (it
+ * doesn't contain any code to actually turn off power).
+ *
+ * The above means that when upon hibernation finish we try to set this
+ * power resource back ON since its "_STA" method returns 0 (while the resource
+ * is still considered in use) its "_ON" method won't do anything since
+ * that "ONEN" flag is not set.
+ * Overall, this means the modem is dead until laptop is rebooted since its
+ * power has been cut by "_PTS" and its PCI configuration was lost and not able
+ * to be restored.
+ *
+ * The easiest way to workaround the issue is to call this power resource
+ * "_OFF" method before calling the "_ON" method to make sure the "ONEN"
+ * flag gets properly set.
+ */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 855 G7 Notebook PC"),
+ },
+ },
+ {}
+};
+
static const struct dmi_system_id dmi_leave_unused_power_resources_on[] = {
{
/*
@@ -1046,7 +1123,7 @@ void acpi_turn_off_unused_power_resources(void)
{
struct acpi_power_resource *resource;
- if (dmi_check_system(dmi_leave_unused_power_resources_on))
+ if (unused_power_resources_quirk)
return;
mutex_lock(&power_resource_list_lock);
@@ -1065,3 +1142,10 @@ void acpi_turn_off_unused_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+
+void __init acpi_power_resources_init(void)
+{
+ hp_eb_gp12pxp_quirk = dmi_check_system(dmi_hp_elitebook_gp12pxp_quirk);
+ unused_power_resources_quirk =
+ dmi_check_system(dmi_leave_unused_power_resources_on);
+}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index 54676e3d82dd..de5f8c018333 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -21,6 +21,25 @@
#include <linux/cacheinfo.h>
#include <acpi/processor.h>
+/*
+ * The acpi_pptt_cache_v1 in actbl2.h, which is imported from acpica,
+ * only contains the cache_id field rather than all the fields of the
+ * Cache Type Structure. Use this alternative structure until it is
+ * resolved in acpica.
+ */
+struct acpi_pptt_cache_v1_full {
+ struct acpi_subtable_header header;
+ u16 reserved;
+ u32 flags;
+ u32 next_level_of_cache;
+ u32 size;
+ u32 number_of_sets;
+ u8 associativity;
+ u8 attributes;
+ u16 line_size;
+ u32 cache_id;
+} __packed;
+
static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr,
u32 pptt_ref)
{
@@ -56,6 +75,18 @@ static struct acpi_pptt_cache *fetch_pptt_cache(struct acpi_table_header *table_
return (struct acpi_pptt_cache *)fetch_pptt_subtable(table_hdr, pptt_ref);
}
+static struct acpi_pptt_cache_v1_full *upgrade_pptt_cache(struct acpi_pptt_cache *cache)
+{
+ if (cache->header.length < sizeof(struct acpi_pptt_cache_v1_full))
+ return NULL;
+
+ /* No use for v1 if the only additional field is invalid */
+ if (!(cache->flags & ACPI_PPTT_CACHE_ID_VALID))
+ return NULL;
+
+ return (struct acpi_pptt_cache_v1_full *)cache;
+}
+
static struct acpi_subtable_header *acpi_get_pptt_resource(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *node,
int resource)
@@ -177,14 +208,14 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
}
/**
- * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache
- * levels and split cache levels (data/instruction).
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the
+ * total number of levels and split cache levels (data/instruction).
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
- * @levels: Number of levels if success.
* @split_levels: Number of split cache levels (data/instruction) if
* success. Can by NULL.
*
+ * Return: number of levels.
* Given a processor node containing a processing unit, walk into it and count
* how many levels exist solely for it, and then walk up each level until we hit
* the root node (ignore the package level because it may be possible to have
@@ -192,14 +223,18 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
* split cache levels (data/instruction) that exist at each level on the way
* up.
*/
-static void acpi_count_levels(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node,
- unsigned int *levels, unsigned int *split_levels)
+static int acpi_count_levels(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *split_levels)
{
+ int current_level = 0;
+
do {
- acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0);
+ acpi_find_cache_level(table_hdr, cpu_node, &current_level, split_levels, 0, 0);
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
} while (cpu_node);
+
+ return current_level;
}
/**
@@ -351,7 +386,6 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
* @this_leaf: Kernel cache info structure being updated
* @found_cache: The PPTT node describing this cache instance
* @cpu_node: A unique reference to describe this cache instance
- * @revision: The revision of the PPTT table
*
* The ACPI spec implies that the fields in the cache structures are used to
* extend and correct the information probed from the hardware. Lets only
@@ -361,10 +395,9 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
*/
static void update_cache_properties(struct cacheinfo *this_leaf,
struct acpi_pptt_cache *found_cache,
- struct acpi_pptt_processor *cpu_node,
- u8 revision)
+ struct acpi_pptt_processor *cpu_node)
{
- struct acpi_pptt_cache_v1* found_cache_v1;
+ struct acpi_pptt_cache_v1_full *found_cache_v1;
this_leaf->fw_token = cpu_node;
if (found_cache->flags & ACPI_PPTT_SIZE_PROPERTY_VALID)
@@ -414,9 +447,8 @@ static void update_cache_properties(struct cacheinfo *this_leaf,
found_cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)
this_leaf->type = CACHE_TYPE_UNIFIED;
- if (revision >= 3 && (found_cache->flags & ACPI_PPTT_CACHE_ID_VALID)) {
- found_cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1,
- found_cache, sizeof(struct acpi_pptt_cache));
+ found_cache_v1 = upgrade_pptt_cache(found_cache);
+ if (found_cache_v1) {
this_leaf->id = found_cache_v1->cache_id;
this_leaf->attributes |= CACHE_ID;
}
@@ -441,8 +473,7 @@ static void cache_setup_acpi_cpu(struct acpi_table_header *table,
pr_debug("found = %p %p\n", found_cache, cpu_node);
if (found_cache)
update_cache_properties(this_leaf, found_cache,
- ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)),
- table->revision);
+ ACPI_TO_POINTER(ACPI_PTR_DIFF(cpu_node, table)));
index++;
}
@@ -645,7 +676,7 @@ int acpi_get_cache_info(unsigned int cpu, unsigned int *levels,
if (!cpu_node)
return -ENOENT;
- acpi_count_levels(table, cpu_node, levels, split_levels);
+ *levels = acpi_count_levels(table, cpu_node, split_levels);
pr_debug("Cache Setup: last_level=%d split_levels=%d\n",
*levels, split_levels ? *split_levels : -1);
@@ -817,3 +848,218 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu)
return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE,
ACPI_PPTT_ACPI_IDENTICAL);
}
+
+/**
+ * acpi_pptt_get_child_cpus() - Find all the CPUs below a PPTT
+ * processor hierarchy node
+ *
+ * @table_hdr: A reference to the PPTT table
+ * @parent_node: A pointer to the processor hierarchy node in the
+ * table_hdr
+ * @cpus: A cpumask to fill with the CPUs below @parent_node
+ *
+ * Walks up the PPTT from every possible CPU to find if the provided
+ * @parent_node is a parent of this CPU.
+ */
+static void acpi_pptt_get_child_cpus(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *parent_node,
+ cpumask_t *cpus)
+{
+ struct acpi_pptt_processor *cpu_node;
+ u32 acpi_id;
+ int cpu;
+
+ cpumask_clear(cpus);
+
+ for_each_possible_cpu(cpu) {
+ acpi_id = get_acpi_id_for_cpu(cpu);
+ cpu_node = acpi_find_processor_node(table_hdr, acpi_id);
+
+ while (cpu_node) {
+ if (cpu_node == parent_node) {
+ cpumask_set_cpu(cpu, cpus);
+ break;
+ }
+ cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
+ }
+ }
+}
+
+/**
+ * acpi_pptt_get_cpus_from_container() - Populate a cpumask with all CPUs in a
+ * processor container
+ * @acpi_cpu_id: The UID of the processor container
+ * @cpus: The resulting CPU mask
+ *
+ * Find the specified Processor Container, and fill @cpus with all the cpus
+ * below it.
+ *
+ * Not all 'Processor Hierarchy' entries in the PPTT are either a CPU
+ * or a Processor Container, they may exist purely to describe a
+ * Private resource. CPUs have to be leaves, so a Processor Container
+ * is a non-leaf that has the 'ACPI Processor ID valid' flag set.
+ */
+void acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus)
+{
+ struct acpi_table_header *table_hdr;
+ struct acpi_subtable_header *entry;
+ unsigned long table_end;
+ u32 proc_sz;
+
+ cpumask_clear(cpus);
+
+ table_hdr = acpi_get_pptt();
+ if (!table_hdr)
+ return;
+
+ table_end = (unsigned long)table_hdr + table_hdr->length;
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
+ sizeof(struct acpi_table_pptt));
+ proc_sz = sizeof(struct acpi_pptt_processor);
+ while ((unsigned long)entry + proc_sz <= table_end) {
+ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR) {
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = (struct acpi_pptt_processor *)entry;
+ if (cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID &&
+ !acpi_pptt_leaf_node(table_hdr, cpu_node) &&
+ cpu_node->acpi_processor_id == acpi_cpu_id) {
+ acpi_pptt_get_child_cpus(table_hdr, cpu_node, cpus);
+ break;
+ }
+ }
+ entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry,
+ entry->length);
+ }
+}
+
+/**
+ * find_acpi_cache_level_from_id() - Get the level of the specified cache
+ * @cache_id: The id field of the cache
+ *
+ * Determine the level relative to any CPU for the cache identified by
+ * cache_id. This allows the property to be found even if the CPUs are offline.
+ *
+ * The returned level can be used to group caches that are peers.
+ *
+ * The PPTT table must be rev 3 or later.
+ *
+ * If one CPU's L2 is shared with another CPU as L3, this function will return
+ * an unpredictable value.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the revision isn't supported or
+ * the cache cannot be found.
+ * Otherwise returns a value which represents the level of the specified cache.
+ */
+int find_acpi_cache_level_from_id(u32 cache_id)
+{
+ int cpu;
+ struct acpi_table_header *table;
+
+ table = acpi_get_pptt();
+ if (!table)
+ return -ENOENT;
+
+ if (table->revision < 3)
+ return -ENOENT;
+
+ for_each_possible_cpu(cpu) {
+ bool empty;
+ int level = 1;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_cache *cache;
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ continue;
+
+ do {
+ int cache_type[] = {CACHE_TYPE_INST, CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED};
+
+ empty = true;
+ for (int i = 0; i < ARRAY_SIZE(cache_type); i++) {
+ struct acpi_pptt_cache_v1_full *cache_v1;
+
+ cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i],
+ level, &cpu_node);
+ if (!cache)
+ continue;
+
+ empty = false;
+
+ cache_v1 = upgrade_pptt_cache(cache);
+ if (cache_v1 && cache_v1->cache_id == cache_id)
+ return level;
+ }
+ level++;
+ } while (!empty);
+ }
+
+ return -ENOENT;
+}
+
+/**
+ * acpi_pptt_get_cpumask_from_cache_id() - Get the cpus associated with the
+ * specified cache
+ * @cache_id: The id field of the cache
+ * @cpus: Where to build the cpumask
+ *
+ * Determine which CPUs are below this cache in the PPTT. This allows the property
+ * to be found even if the CPUs are offline.
+ *
+ * The PPTT table must be rev 3 or later,
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found.
+ * Otherwise returns 0 and sets the cpus in the provided cpumask.
+ */
+int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus)
+{
+ int cpu;
+ struct acpi_table_header *table;
+
+ cpumask_clear(cpus);
+
+ table = acpi_get_pptt();
+ if (!table)
+ return -ENOENT;
+
+ if (table->revision < 3)
+ return -ENOENT;
+
+ for_each_possible_cpu(cpu) {
+ bool empty;
+ int level = 1;
+ u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ struct acpi_pptt_cache *cache;
+ struct acpi_pptt_processor *cpu_node;
+
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ continue;
+
+ do {
+ int cache_type[] = {CACHE_TYPE_INST, CACHE_TYPE_DATA, CACHE_TYPE_UNIFIED};
+
+ empty = true;
+ for (int i = 0; i < ARRAY_SIZE(cache_type); i++) {
+ struct acpi_pptt_cache_v1_full *cache_v1;
+
+ cache = acpi_find_cache_node(table, acpi_cpu_id, cache_type[i],
+ level, &cpu_node);
+
+ if (!cache)
+ continue;
+
+ empty = false;
+
+ cache_v1 = upgrade_pptt_cache(cache);
+ if (cache_v1 && cache_v1->cache_id == cache_id)
+ cpumask_set_cpu(cpu, cpus);
+ }
+ level++;
+ } while (!empty);
+ }
+
+ return 0;
+}
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index be033bbb126a..7b8b5d2015ec 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -150,15 +150,28 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
th = &tm->handlers[cur_handler];
guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+
+ /*
+ * Print an error message if handler_address is NULL, the parse of VA also
+ * can be skipped.
+ */
+ if (unlikely(!handler_info->handler_address)) {
+ pr_info("Skipping handler with NULL address for GUID: %pUL",
+ (guid_t *)handler_info->handler_guid);
+ continue;
+ }
+
th->handler_addr =
(void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
/*
- * Print a warning message if handler_addr is zero which is not expected to
- * ever happen.
+ * Print a warning message and skip the parse of VA if handler_addr is zero
+ * which is not expected to ever happen.
*/
- if (unlikely(!th->handler_addr))
+ if (unlikely(!th->handler_addr)) {
pr_warn("Failed to find VA of handler for GUID: %pUL, PA: 0x%llx",
&th->guid, handler_info->handler_address);
+ continue;
+ }
th->static_data_buffer_addr =
efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
@@ -231,6 +244,12 @@ static struct prm_handler_info *find_prm_handler(const guid_t *guid)
return (struct prm_handler_info *) find_guid_info(guid, GET_HANDLER);
}
+bool acpi_prm_handler_available(const guid_t *guid)
+{
+ return find_prm_handler(guid) && find_prm_module(guid);
+}
+EXPORT_SYMBOL_GPL(acpi_prm_handler_available);
+
/* In-coming PRM commands */
#define PRM_CMD_RUN_SERVICE 0
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 9b6b71a2ffb5..a4498357bd16 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -54,7 +54,7 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
- if (device_declaration && (apic->uid == acpi_id)) {
+ if (apic->uid == acpi_id && (device_declaration || acpi_id < 255)) {
*apic_id = apic->local_apic_id;
return 0;
}
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 2c2dc559e0f8..89f2f08b2554 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -732,18 +732,16 @@ static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
return 0;
}
-static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
- struct cpuidle_device *dev)
+static void acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
+ struct cpuidle_device *dev)
{
int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx;
- struct cpuidle_state *state;
if (max_cstate == 0)
max_cstate = 1;
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
- state = &acpi_idle_driver.states[count];
cx = &pr->power.states[i];
if (!cx->valid)
@@ -751,27 +749,13 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
per_cpu(acpi_cstate[count], dev->cpu) = cx;
- if (lapic_timer_needs_broadcast(pr, cx))
- state->flags |= CPUIDLE_FLAG_TIMER_STOP;
-
- if (cx->type == ACPI_STATE_C3) {
- state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
- if (pr->flags.bm_check)
- state->flags |= CPUIDLE_FLAG_RCU_IDLE;
- }
-
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
-
- if (!count)
- return -EINVAL;
-
- return 0;
}
-static int acpi_processor_setup_cstates(struct acpi_processor *pr)
+static void acpi_processor_setup_cstates(struct acpi_processor *pr)
{
int i, count;
struct acpi_processor_cx *cx;
@@ -818,17 +802,21 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
state->enter_s2idle = acpi_idle_enter_s2idle;
+ if (lapic_timer_needs_broadcast(pr, cx))
+ state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+
+ if (cx->type == ACPI_STATE_C3) {
+ state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
+ if (pr->flags.bm_check)
+ state->flags |= CPUIDLE_FLAG_RCU_IDLE;
+ }
+
count++;
if (count == CPUIDLE_STATE_MAX)
break;
}
drv->state_count = count;
-
- if (!count)
- return -EINVAL;
-
- return 0;
}
static inline void acpi_processor_cstate_first_run_checks(void)
@@ -998,11 +986,6 @@ end:
return ret;
}
-/*
- * flat_state_cnt - the number of composite LPI states after the process of flattening
- */
-static int flat_state_cnt;
-
/**
* combine_lpi_states - combine local and parent LPI states to form a composite LPI state
*
@@ -1045,9 +1028,10 @@ static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
curr_level->composite_states[curr_level->composite_states_size++] = t;
}
-static int flatten_lpi_states(struct acpi_processor *pr,
- struct acpi_lpi_states_array *curr_level,
- struct acpi_lpi_states_array *prev_level)
+static unsigned int flatten_lpi_states(struct acpi_processor *pr,
+ unsigned int flat_state_cnt,
+ struct acpi_lpi_states_array *curr_level,
+ struct acpi_lpi_states_array *prev_level)
{
int i, j, state_count = curr_level->size;
struct acpi_lpi_state *p, *t = curr_level->entries;
@@ -1087,7 +1071,7 @@ static int flatten_lpi_states(struct acpi_processor *pr,
}
kfree(curr_level->entries);
- return 0;
+ return flat_state_cnt;
}
int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
@@ -1102,6 +1086,7 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
acpi_handle handle = pr->handle, pr_ahandle;
struct acpi_device *d = NULL;
struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
+ unsigned int state_count;
/* make sure our architecture has support */
ret = acpi_processor_ffh_lpi_probe(pr->id);
@@ -1114,14 +1099,13 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
if (!acpi_has_method(handle, "_LPI"))
return -EINVAL;
- flat_state_cnt = 0;
prev = &info[0];
curr = &info[1];
handle = pr->handle;
ret = acpi_processor_evaluate_lpi(handle, prev);
if (ret)
return ret;
- flatten_lpi_states(pr, prev, NULL);
+ state_count = flatten_lpi_states(pr, 0, prev, NULL);
status = acpi_get_parent(handle, &pr_ahandle);
while (ACPI_SUCCESS(status)) {
@@ -1143,18 +1127,19 @@ static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
break;
/* flatten all the LPI states in this level of hierarchy */
- flatten_lpi_states(pr, curr, prev);
+ state_count = flatten_lpi_states(pr, state_count, curr, prev);
tmp = prev, prev = curr, curr = tmp;
status = acpi_get_parent(handle, &pr_ahandle);
}
- pr->power.count = flat_state_cnt;
/* reset the index after flattening */
- for (i = 0; i < pr->power.count; i++)
+ for (i = 0; i < state_count; i++)
pr->power.lpi_states[i].index = i;
+ pr->power.count = state_count;
+
/* Tell driver that _LPI is supported. */
pr->flags.has_lpi = 1;
pr->flags.power = 1;
@@ -1246,7 +1231,8 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
if (pr->flags.has_lpi)
return acpi_processor_setup_lpi_states(pr);
- return acpi_processor_setup_cstates(pr);
+ acpi_processor_setup_cstates(pr);
+ return 0;
}
/**
@@ -1266,7 +1252,8 @@ static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
if (pr->flags.has_lpi)
return acpi_processor_ffh_lpi_probe(pr->id);
- return acpi_processor_setup_cpuidle_cx(pr, dev);
+ acpi_processor_setup_cpuidle_cx(pr, dev);
+ return 0;
}
static int acpi_processor_get_power_info(struct acpi_processor *pr)
@@ -1405,6 +1392,9 @@ int acpi_processor_power_init(struct acpi_processor *pr)
if (retval) {
if (acpi_processor_registered == 0)
cpuidle_unregister_driver(&acpi_idle_driver);
+
+ per_cpu(acpi_cpuidle_device, pr->id) = NULL;
+ kfree(dev);
return retval;
}
acpi_processor_registered++;
@@ -1431,3 +1421,5 @@ int acpi_processor_power_exit(struct acpi_processor *pr)
pr->flags.power_setup_done = 0;
return 0;
}
+
+MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 1219adb11ab9..c7b1dc5687ec 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -62,19 +62,14 @@ static int phys_package_first_cpu(int cpu)
return 0;
}
-static int cpu_has_cpufreq(unsigned int cpu)
+static bool cpu_has_cpufreq(unsigned int cpu)
{
- struct cpufreq_policy *policy;
-
if (!acpi_processor_cpufreq_init)
return 0;
- policy = cpufreq_cpu_get(cpu);
- if (policy) {
- cpufreq_cpu_put(policy);
- return 1;
- }
- return 0;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+
+ return policy != NULL;
}
static int cpufreq_get_max_state(unsigned int cpu)
@@ -93,12 +88,31 @@ static int cpufreq_get_cur_state(unsigned int cpu)
return reduction_step(cpu);
}
+static bool cpufreq_update_thermal_limit(unsigned int cpu, struct acpi_processor *pr)
+{
+ unsigned long max_freq;
+ int ret;
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return false;
+
+ max_freq = (policy->cpuinfo.max_freq *
+ (100 - reduction_step(cpu) * cpufreq_thermal_reduction_pctg)) / 100;
+
+ ret = freq_qos_update_request(&pr->thermal_req, max_freq);
+ if (ret < 0) {
+ pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
+ pr->id, ret);
+ }
+
+ return true;
+}
+
static int cpufreq_set_cur_state(unsigned int cpu, int state)
{
- struct cpufreq_policy *policy;
struct acpi_processor *pr;
- unsigned long max_freq;
- int i, ret;
+ int i;
if (!cpu_has_cpufreq(cpu))
return 0;
@@ -120,20 +134,8 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
continue;
- policy = cpufreq_cpu_get(i);
- if (!policy)
+ if (!cpufreq_update_thermal_limit(i, pr))
return -EINVAL;
-
- max_freq = (policy->cpuinfo.max_freq *
- (100 - reduction_step(i) * cpufreq_thermal_reduction_pctg)) / 100;
-
- cpufreq_cpu_put(policy);
-
- ret = freq_qos_update_request(&pr->thermal_req, max_freq);
- if (ret < 0) {
- pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
- pr->id, ret);
- }
}
return 0;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 436019d96027..18e90067d567 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -83,6 +83,7 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
struct fwnode_handle *parent)
{
struct acpi_data_node *dn;
+ acpi_handle scope = NULL;
bool result;
if (acpi_graph_ignore_port(handle))
@@ -98,59 +99,45 @@ static bool acpi_nondev_subnode_extract(union acpi_object *desc,
INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
- result = acpi_extract_properties(handle, desc, &dn->data);
-
- if (handle) {
- acpi_handle scope;
- acpi_status status;
+ /*
+ * The scope for the completion of relative pathname segments and
+ * subnode object lookup is the one of the namespace node (device)
+ * containing the object that has returned the package. That is, it's
+ * the scope of that object's parent device.
+ */
+ if (handle)
+ acpi_get_parent(handle, &scope);
- /*
- * The scope for the subnode object lookup is the one of the
- * namespace node (device) containing the object that has
- * returned the package. That is, it's the scope of that
- * object's parent.
- */
- status = acpi_get_parent(handle, &scope);
- if (ACPI_SUCCESS(status)
- && acpi_enumerate_nondev_subnodes(scope, desc, &dn->data,
- &dn->fwnode))
- result = true;
- } else if (acpi_enumerate_nondev_subnodes(NULL, desc, &dn->data,
- &dn->fwnode)) {
+ /*
+ * Extract properties from the _DSD-equivalent package pointed to by
+ * desc and use scope (if not NULL) for the completion of relative
+ * pathname segments.
+ *
+ * The extracted properties will be held in the new data node dn.
+ */
+ result = acpi_extract_properties(scope, desc, &dn->data);
+ /*
+ * Look for subnodes in the _DSD-equivalent package pointed to by desc
+ * and create child nodes of dn if there are any.
+ */
+ if (acpi_enumerate_nondev_subnodes(scope, desc, &dn->data, &dn->fwnode))
result = true;
- }
-
- if (result) {
- dn->handle = handle;
- dn->data.pointer = desc;
- list_add_tail(&dn->sibling, list);
- return true;
- }
-
- kfree(dn);
- acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
- return false;
-}
-
-static bool acpi_nondev_subnode_data_ok(acpi_handle handle,
- const union acpi_object *link,
- struct list_head *list,
- struct fwnode_handle *parent)
-{
- struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
- acpi_status status;
- status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
- ACPI_TYPE_PACKAGE);
- if (ACPI_FAILURE(status))
+ if (!result) {
+ kfree(dn);
+ acpi_handle_debug(handle, "Invalid properties/subnodes data, skipping\n");
return false;
+ }
- if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
- parent))
- return true;
+ /*
+ * This will be NULL if the desc package is embedded in an outer
+ * _DSD-equivalent package and its scope cannot be determined.
+ */
+ dn->handle = handle;
+ dn->data.pointer = desc;
+ list_add_tail(&dn->sibling, list);
- ACPI_FREE(buf.pointer);
- return false;
+ return true;
}
static bool acpi_nondev_subnode_ok(acpi_handle scope,
@@ -158,9 +145,16 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
struct list_head *list,
struct fwnode_handle *parent)
{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
acpi_handle handle;
acpi_status status;
+ /*
+ * If the scope is unknown, the _DSD-equivalent package being parsed
+ * was embedded in an outer _DSD-equivalent package as a result of
+ * direct evaluation of an object pointed to by a reference. In that
+ * case, using a pathname as the target object pointer is invalid.
+ */
if (!scope)
return false;
@@ -169,7 +163,17 @@ static bool acpi_nondev_subnode_ok(acpi_handle scope,
if (ACPI_FAILURE(status))
return false;
- return acpi_nondev_subnode_data_ok(handle, link, list, parent);
+ status = acpi_evaluate_object_typed(handle, NULL, NULL, &buf,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ if (acpi_nondev_subnode_extract(buf.pointer, handle, link, list,
+ parent))
+ return true;
+
+ ACPI_FREE(buf.pointer);
+ return false;
}
static bool acpi_add_nondev_subnodes(acpi_handle scope,
@@ -180,9 +184,12 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
bool ret = false;
int i;
+ /*
+ * Every element in the links package is expected to represent a link
+ * to a non-device node in a tree containing device-specific data.
+ */
for (i = 0; i < links->package.count; i++) {
union acpi_object *link, *desc;
- acpi_handle handle;
bool result;
link = &links->package.elements[i];
@@ -190,26 +197,53 @@ static bool acpi_add_nondev_subnodes(acpi_handle scope,
if (link->package.count != 2)
continue;
- /* The first one must be a string. */
+ /* The first one (the key) must be a string. */
if (link->package.elements[0].type != ACPI_TYPE_STRING)
continue;
- /* The second one may be a string, a reference or a package. */
+ /* The second one (the target) may be a string or a package. */
switch (link->package.elements[1].type) {
case ACPI_TYPE_STRING:
+ /*
+ * The string is expected to be a full pathname or a
+ * pathname segment relative to the given scope. That
+ * pathname is expected to point to an object returning
+ * a package that contains _DSD-equivalent information.
+ */
result = acpi_nondev_subnode_ok(scope, link, list,
parent);
break;
- case ACPI_TYPE_LOCAL_REFERENCE:
- handle = link->package.elements[1].reference.handle;
- result = acpi_nondev_subnode_data_ok(handle, link, list,
- parent);
- break;
case ACPI_TYPE_PACKAGE:
+ /*
+ * This happens when a reference is used in AML to
+ * point to the target. Since the target is expected
+ * to be a named object, a reference to it will cause it
+ * to be avaluated in place and its return package will
+ * be embedded in the links package at the location of
+ * the reference.
+ *
+ * The target package is expected to contain _DSD-
+ * equivalent information, but the scope in which it
+ * is located in the original AML is unknown. Thus
+ * it cannot contain pathname segments represented as
+ * strings because there is no way to build full
+ * pathnames out of them.
+ */
+ acpi_handle_debug(scope, "subnode %s: Unknown scope\n",
+ link->package.elements[0].string.pointer);
desc = &link->package.elements[1];
result = acpi_nondev_subnode_extract(desc, NULL, link,
list, parent);
break;
+ case ACPI_TYPE_LOCAL_REFERENCE:
+ /*
+ * It is not expected to see any local references in
+ * the links package because referencing a named object
+ * should cause it to be evaluated in place.
+ */
+ acpi_handle_info(scope, "subnode %s: Unexpected reference\n",
+ link->package.elements[0].string.pointer);
+ fallthrough;
default:
result = false;
break;
@@ -369,6 +403,9 @@ static void acpi_untie_nondev_subnodes(struct acpi_device_data *data)
struct acpi_data_node *dn;
list_for_each_entry(dn, &data->subnodes, sibling) {
+ if (!dn->handle)
+ continue;
+
acpi_detach_data(dn->handle, acpi_nondev_subnode_tag);
acpi_untie_nondev_subnodes(&dn->data);
@@ -383,6 +420,9 @@ static bool acpi_tie_nondev_subnodes(struct acpi_device_data *data)
acpi_status status;
bool ret;
+ if (!dn->handle)
+ continue;
+
status = acpi_attach_data(dn->handle, acpi_nondev_subnode_tag, dn);
if (ACPI_FAILURE(status) && status != AE_ALREADY_EXISTS) {
acpi_handle_err(dn->handle, "Can't tag data node\n");
@@ -804,13 +844,35 @@ acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
return NULL;
}
+static unsigned int acpi_fwnode_get_args_count(struct fwnode_handle *fwnode,
+ const char *nargs_prop)
+{
+ const struct acpi_device_data *data;
+ const union acpi_object *obj;
+ int ret;
+
+ data = acpi_device_data_of_node(fwnode);
+ if (!data)
+ return 0;
+
+ ret = acpi_data_get_property(data, nargs_prop, ACPI_TYPE_INTEGER, &obj);
+ if (ret)
+ return 0;
+
+ return obj->integer.value;
+}
+
static int acpi_get_ref_args(struct fwnode_reference_args *args,
struct fwnode_handle *ref_fwnode,
+ const char *nargs_prop,
const union acpi_object **element,
const union acpi_object *end, size_t num_args)
{
u32 nargs = 0, i;
+ if (nargs_prop)
+ num_args = acpi_fwnode_get_args_count(ref_fwnode, nargs_prop);
+
/*
* Assume the following integer elements are all args. Stop counting on
* the first reference (possibly represented as a string) or end of the
@@ -882,45 +944,10 @@ static struct fwnode_handle *acpi_parse_string_ref(const struct fwnode_handle *f
return &dn->fwnode;
}
-/**
- * __acpi_node_get_property_reference - returns handle to the referenced object
- * @fwnode: Firmware node to get the property from
- * @propname: Name of the property
- * @index: Index of the reference to return
- * @num_args: Maximum number of arguments after each reference
- * @args: Location to store the returned reference with optional arguments
- * (may be NULL)
- *
- * Find property with @name, verifify that it is a package containing at least
- * one object reference and if so, store the ACPI device object pointer to the
- * target object in @args->adev. If the reference includes arguments, store
- * them in the @args->args[] array.
- *
- * If there's more than one reference in the property value package, @index is
- * used to select the one to return.
- *
- * It is possible to leave holes in the property value set like in the
- * example below:
- *
- * Package () {
- * "cs-gpios",
- * Package () {
- * ^GPIO, 19, 0, 0,
- * ^GPIO, 20, 0, 0,
- * 0,
- * ^GPIO, 21, 0, 0,
- * }
- * }
- *
- * Calling this function with index %2 or index %3 return %-ENOENT. If the
- * property does not contain any more values %-ENOENT is returned. The NULL
- * entry must be single integer and preferably contain value %0.
- *
- * Return: %0 on success, negative error code on failure.
- */
-int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
- const char *propname, size_t index, size_t num_args,
- struct fwnode_reference_args *args)
+static int acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *propname, const char *nargs_prop,
+ unsigned int args_count, unsigned int index,
+ struct fwnode_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
@@ -996,10 +1023,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
acpi_fwnode_handle(device),
- &element, end, num_args);
+ nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1014,10 +1041,9 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -EINVAL;
element++;
-
ret = acpi_get_ref_args(idx == index ? args : NULL,
- ref_fwnode, &element, end,
- num_args);
+ ref_fwnode, nargs_prop, &element, end,
+ args_count);
if (ret < 0)
return ret;
@@ -1039,6 +1065,50 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
return -ENOENT;
}
+
+/**
+ * __acpi_node_get_property_reference - returns handle to the referenced object
+ * @fwnode: Firmware node to get the property from
+ * @propname: Name of the property
+ * @index: Index of the reference to return
+ * @num_args: Maximum number of arguments after each reference
+ * @args: Location to store the returned reference with optional arguments
+ * (may be NULL)
+ *
+ * Find property with @name, verifify that it is a package containing at least
+ * one object reference and if so, store the ACPI device object pointer to the
+ * target object in @args->adev. If the reference includes arguments, store
+ * them in the @args->args[] array.
+ *
+ * If there's more than one reference in the property value package, @index is
+ * used to select the one to return.
+ *
+ * It is possible to leave holes in the property value set like in the
+ * example below:
+ *
+ * Package () {
+ * "cs-gpios",
+ * Package () {
+ * ^GPIO, 19, 0, 0,
+ * ^GPIO, 20, 0, 0,
+ * 0,
+ * ^GPIO, 21, 0, 0,
+ * }
+ * }
+ *
+ * Calling this function with index %2 or index %3 return %-ENOENT. If the
+ * property does not contain any more values %-ENOENT is returned. The NULL
+ * entry must be single integer and preferably contain value %0.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
+ const char *propname, size_t index,
+ size_t num_args,
+ struct fwnode_reference_args *args)
+{
+ return acpi_fwnode_get_reference_args(fwnode, propname, NULL, num_args, index, args);
+}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
static int acpi_data_prop_read_single(const struct acpi_device_data *data,
@@ -1210,7 +1280,7 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- nval = min_t(u32, nval, obj->package.count);
+ nval = min(nval, obj->package.count);
if (nval == 0)
return -ENODATA;
@@ -1259,13 +1329,14 @@ static int stop_on_next(struct acpi_device *adev, void *data)
return 0;
}
-/**
+/*
* acpi_get_next_subnode - Return the next child node handle for a fwnode
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*/
-struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
+static struct fwnode_handle *
+acpi_get_next_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
@@ -1318,6 +1389,28 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
return NULL;
}
+/*
+ * acpi_get_next_present_subnode - Return the next present child node handle
+ * @fwnode: Firmware node to find the next child node for.
+ * @child: Handle to one of the device's child nodes or a null handle.
+ *
+ * Like acpi_get_next_subnode(), but the device nodes returned by
+ * acpi_get_next_present_subnode() are guaranteed to be present.
+ *
+ * Returns: The fwnode handle of the next present sub-node.
+ */
+static struct fwnode_handle *
+acpi_get_next_present_subnode(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ do {
+ child = acpi_get_next_subnode(fwnode, child);
+ } while (is_acpi_device_node(child) &&
+ !acpi_device_is_present(to_acpi_device_node(child)));
+
+ return child;
+}
+
/**
* acpi_node_get_parent - Return parent fwnode of this fwnode
* @fwnode: Firmware node whose parent to get
@@ -1380,7 +1473,7 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!prev) {
do {
- port = fwnode_get_next_child_node(fwnode, port);
+ port = acpi_get_next_subnode(fwnode, port);
/*
* The names of the port nodes begin with "port@"
* followed by the number of the port node and they also
@@ -1398,14 +1491,17 @@ static struct fwnode_handle *acpi_graph_get_next_endpoint(
if (!port)
return NULL;
- endpoint = fwnode_get_next_child_node(port, prev);
- while (!endpoint) {
- port = fwnode_get_next_child_node(fwnode, port);
- if (!port)
+ do {
+ endpoint = acpi_get_next_subnode(port, prev);
+ if (endpoint)
break;
- if (is_acpi_graph_node(port, "port"))
- endpoint = fwnode_get_next_child_node(port, NULL);
- }
+
+ prev = NULL;
+
+ do {
+ port = acpi_get_next_subnode(fwnode, port);
+ } while (port && !is_acpi_graph_node(port, "port"));
+ } while (port);
/*
* The names of the endpoint nodes begin with "endpoint@" followed by
@@ -1558,16 +1654,6 @@ acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
val, nval);
}
-static int
-acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
- const char *prop, const char *nargs_prop,
- unsigned int args_count, unsigned int index,
- struct fwnode_reference_args *args)
-{
- return __acpi_node_get_property_reference(fwnode, prop, index,
- args_count, args);
-}
-
static const char *acpi_fwnode_get_name(const struct fwnode_handle *fwnode)
{
const struct acpi_device *adev;
@@ -1632,6 +1718,7 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
if (fwnode_property_read_u32(fwnode, "reg", &endpoint->id))
fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id);
+ fwnode_handle_put(port_fwnode);
return 0;
}
@@ -1662,7 +1749,7 @@ static int acpi_fwnode_irq_get(const struct fwnode_handle *fwnode,
.property_read_string_array = \
acpi_fwnode_property_read_string_array, \
.get_parent = acpi_node_get_parent, \
- .get_next_child_node = acpi_get_next_subnode, \
+ .get_next_child_node = acpi_get_next_present_subnode, \
.get_named_child_node = acpi_fwnode_get_named_child_node, \
.get_name = acpi_fwnode_get_name, \
.get_name_prefix = acpi_fwnode_get_name_prefix, \
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b1ab192d7a08..d16906f46484 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/dmi.h>
+#include <linux/string_choices.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -511,6 +512,13 @@ static const struct dmi_system_id irq1_level_low_skip_override[] = {
},
},
{
+ /* Asus Vivobook Pro N6506CU* */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "N6506CU"),
+ },
+ },
+ {
/* LG Electronics 17U70P */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
@@ -773,7 +781,7 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
t ? "level" : "edge",
trig == triggering ? "" : "(!)",
- p ? "low" : "high",
+ str_low_high(p),
pol == polarity ? "" : "(!)");
triggering = trig;
polarity = pol;
diff --git a/drivers/acpi/riscv/Kconfig b/drivers/acpi/riscv/Kconfig
new file mode 100644
index 000000000000..046296a18d00
--- /dev/null
+++ b/drivers/acpi/riscv/Kconfig
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ACPI Configuration for RISC-V
+#
+
+config ACPI_RIMT
+ bool
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index a96fdf1e2cb8..1284a076fa88 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -2,3 +2,4 @@
obj-y += rhct.o init.o irq.o
obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
+obj-$(CONFIG_ACPI_RIMT) += rimt.o
diff --git a/drivers/acpi/riscv/init.c b/drivers/acpi/riscv/init.c
index 673e4d5dd752..7c00f7995e86 100644
--- a/drivers/acpi/riscv/init.c
+++ b/drivers/acpi/riscv/init.c
@@ -10,4 +10,6 @@
void __init acpi_arch_init(void)
{
riscv_acpi_init_gsi_mapping();
+ if (IS_ENABLED(CONFIG_ACPI_RIMT))
+ riscv_acpi_rimt_init();
}
diff --git a/drivers/acpi/riscv/init.h b/drivers/acpi/riscv/init.h
index 0b9a07e4031f..1680aa2aaf23 100644
--- a/drivers/acpi/riscv/init.h
+++ b/drivers/acpi/riscv/init.h
@@ -2,3 +2,4 @@
#include <linux/init.h>
void __init riscv_acpi_init_gsi_mapping(void);
+void __init riscv_acpi_rimt_init(void);
diff --git a/drivers/acpi/riscv/irq.c b/drivers/acpi/riscv/irq.c
index cced960c2aef..d9a2154d6c6a 100644
--- a/drivers/acpi/riscv/irq.c
+++ b/drivers/acpi/riscv/irq.c
@@ -10,6 +10,8 @@
#include "init.h"
+#define RISCV_ACPI_INTC_FLAG_PENDING BIT(0)
+
struct riscv_ext_intc_list {
acpi_handle handle;
u32 gsi_base;
@@ -17,6 +19,7 @@ struct riscv_ext_intc_list {
u32 nr_idcs;
u32 id;
u32 type;
+ u32 flag;
struct list_head list;
};
@@ -69,6 +72,22 @@ static acpi_status riscv_acpi_update_gsi_handle(u32 gsi_base, acpi_handle handle
return AE_NOT_FOUND;
}
+int riscv_acpi_update_gsi_range(u32 gsi_base, u32 nr_irqs)
+{
+ struct riscv_ext_intc_list *ext_intc_element;
+
+ list_for_each_entry(ext_intc_element, &ext_intc_list, list) {
+ if (gsi_base == ext_intc_element->gsi_base &&
+ (ext_intc_element->flag & RISCV_ACPI_INTC_FLAG_PENDING)) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ ext_intc_element->flag &= ~RISCV_ACPI_INTC_FLAG_PENDING;
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
int riscv_acpi_get_gsi_info(struct fwnode_handle *fwnode, u32 *gsi_base,
u32 *id, u32 *nr_irqs, u32 *nr_idcs)
{
@@ -115,20 +134,67 @@ struct fwnode_handle *riscv_acpi_get_gsi_domain_id(u32 gsi)
static int __init riscv_acpi_register_ext_intc(u32 gsi_base, u32 nr_irqs, u32 nr_idcs,
u32 id, u32 type)
{
- struct riscv_ext_intc_list *ext_intc_element;
+ struct riscv_ext_intc_list *ext_intc_element, *node, *prev;
ext_intc_element = kzalloc(sizeof(*ext_intc_element), GFP_KERNEL);
if (!ext_intc_element)
return -ENOMEM;
ext_intc_element->gsi_base = gsi_base;
- ext_intc_element->nr_irqs = nr_irqs;
+
+ /* If nr_irqs is zero, indicate it in flag and set to max range possible */
+ if (nr_irqs) {
+ ext_intc_element->nr_irqs = nr_irqs;
+ } else {
+ ext_intc_element->flag |= RISCV_ACPI_INTC_FLAG_PENDING;
+ ext_intc_element->nr_irqs = U32_MAX - ext_intc_element->gsi_base;
+ }
+
ext_intc_element->nr_idcs = nr_idcs;
ext_intc_element->id = id;
- list_add_tail(&ext_intc_element->list, &ext_intc_list);
+ list_for_each_entry(node, &ext_intc_list, list) {
+ if (node->gsi_base < ext_intc_element->gsi_base)
+ break;
+ }
+
+ /* Adjust the previous node's GSI range if that has pending registration */
+ prev = list_prev_entry(node, list);
+ if (!list_entry_is_head(prev, &ext_intc_list, list)) {
+ if (prev->flag & RISCV_ACPI_INTC_FLAG_PENDING)
+ prev->nr_irqs = ext_intc_element->gsi_base - prev->gsi_base;
+ }
+
+ list_add_tail(&ext_intc_element->list, &node->list);
return 0;
}
+static acpi_status __init riscv_acpi_create_gsi_map_smsi(acpi_handle handle, u32 level,
+ void *context, void **return_value)
+{
+ acpi_status status;
+ u64 gbase;
+
+ if (!acpi_has_method(handle, "_GSB")) {
+ acpi_handle_err(handle, "_GSB method not found\n");
+ return AE_ERROR;
+ }
+
+ status = acpi_evaluate_integer(handle, "_GSB", NULL, &gbase);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to evaluate _GSB method\n");
+ return status;
+ }
+
+ riscv_acpi_register_ext_intc(gbase, 0, 0, 0, ACPI_RISCV_IRQCHIP_SMSI);
+ status = riscv_acpi_update_gsi_handle((u32)gbase, handle);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to find the GSI mapping entry\n");
+ return status;
+ }
+
+ return AE_OK;
+}
+
static acpi_status __init riscv_acpi_create_gsi_map(acpi_handle handle, u32 level,
void *context, void **return_value)
{
@@ -183,6 +249,9 @@ void __init riscv_acpi_init_gsi_mapping(void)
if (acpi_table_parse_madt(ACPI_MADT_TYPE_APLIC, riscv_acpi_aplic_parse_madt, 0) > 0)
acpi_get_devices("RSCV0002", riscv_acpi_create_gsi_map, NULL, NULL);
+
+ /* Unlike PLIC/APLIC, SYSMSI doesn't have MADT */
+ acpi_get_devices("RSCV0006", riscv_acpi_create_gsi_map_smsi, NULL, NULL);
}
static acpi_handle riscv_acpi_get_gsi_handle(u32 gsi)
diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c
new file mode 100644
index 000000000000..7f423405e5ef
--- /dev/null
+++ b/drivers/acpi/riscv/rimt.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024-2025, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#define pr_fmt(fmt) "ACPI: RIMT: " fmt
+
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
+#include <linux/iommu.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include "init.h"
+
+struct rimt_fwnode {
+ struct list_head list;
+ struct acpi_rimt_node *rimt_node;
+ struct fwnode_handle *fwnode;
+};
+
+static LIST_HEAD(rimt_fwnode_list);
+static DEFINE_SPINLOCK(rimt_fwnode_lock);
+
+#define RIMT_TYPE_MASK(type) (1 << (type))
+#define RIMT_IOMMU_TYPE BIT(0)
+
+/* Root pointer to the mapped RIMT table */
+static struct acpi_table_header *rimt_table;
+
+/**
+ * rimt_set_fwnode() - Create rimt_fwnode and use it to register
+ * iommu data in the rimt_fwnode_list
+ *
+ * @rimt_node: RIMT table node associated with the IOMMU
+ * @fwnode: fwnode associated with the RIMT node
+ *
+ * Returns: 0 on success
+ * <0 on failure
+ */
+static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node,
+ struct fwnode_handle *fwnode)
+{
+ struct rimt_fwnode *np;
+
+ np = kzalloc(sizeof(*np), GFP_ATOMIC);
+
+ if (WARN_ON(!np))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&np->list);
+ np->rimt_node = rimt_node;
+ np->fwnode = fwnode;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_add_tail(&np->list, &rimt_fwnode_list);
+ spin_unlock(&rimt_fwnode_lock);
+
+ return 0;
+}
+
+static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node,
+ void *context)
+{
+ acpi_status status = AE_NOT_FOUND;
+ struct device *dev = context;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_IOMMU) {
+ struct acpi_rimt_iommu *iommu_node = (struct acpi_rimt_iommu *)&node->node_data;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev;
+ u16 bdf;
+
+ pdev = to_pci_dev(dev);
+ bdf = PCI_DEVID(pdev->bus->number, pdev->devfn);
+ if ((pci_domain_nr(pdev->bus) == iommu_node->pcie_segment_number) &&
+ bdf == iommu_node->pcie_bdf) {
+ status = AE_OK;
+ } else {
+ status = AE_NOT_FOUND;
+ }
+ } else {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res && res->start == iommu_node->base_address)
+ status = AE_OK;
+ else
+ status = AE_NOT_FOUND;
+ }
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ struct acpi_rimt_pcie_rc *pci_rc;
+ struct pci_bus *bus;
+
+ bus = to_pci_bus(dev);
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+
+ /*
+ * It is assumed that PCI segment numbers maps one-to-one
+ * with root complexes. Each segment number can represent only
+ * one root complex.
+ */
+ status = pci_rc->pcie_segment_number == pci_domain_nr(bus) ?
+ AE_OK : AE_NOT_FOUND;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_rimt_platform_device *ncomp;
+ struct device *plat_dev = dev;
+ struct acpi_device *adev;
+
+ /*
+ * Walk the device tree to find a device with an
+ * ACPI companion; there is no point in scanning
+ * RIMT for a device matching a platform device if
+ * the device does not have an ACPI companion to
+ * start with.
+ */
+ do {
+ adev = ACPI_COMPANION(plat_dev);
+ if (adev)
+ break;
+
+ plat_dev = plat_dev->parent;
+ } while (plat_dev);
+
+ if (!adev)
+ return status;
+
+ status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(plat_dev, "Can't get device full path name\n");
+ return status;
+ }
+
+ ncomp = (struct acpi_rimt_platform_device *)node->node_data;
+ status = !strcmp(ncomp->device_name, buf.pointer) ?
+ AE_OK : AE_NOT_FOUND;
+ acpi_os_free(buf.pointer);
+ }
+
+ return status;
+}
+
+static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type,
+ void *context)
+{
+ struct acpi_rimt_node *rimt_node, *rimt_end;
+ struct acpi_table_rimt *rimt;
+ int i;
+
+ if (!rimt_table)
+ return NULL;
+
+ /* Get the first RIMT node */
+ rimt = (struct acpi_table_rimt *)rimt_table;
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt,
+ rimt->node_offset);
+ rimt_end = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rimt_table->length);
+
+ for (i = 0; i < rimt->num_nodes; i++) {
+ if (WARN_TAINT(rimt_node >= rimt_end, TAINT_FIRMWARE_WORKAROUND,
+ "RIMT node pointer overflows, bad table!\n"))
+ return NULL;
+
+ if (rimt_node->type == type &&
+ ACPI_SUCCESS(rimt_match_node_callback(rimt_node, context)))
+ return rimt_node;
+
+ rimt_node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_node,
+ rimt_node->length);
+ }
+
+ return NULL;
+}
+
+/*
+ * RISC-V supports IOMMU as a PCI device or a platform device.
+ * When it is a platform device, there should be a namespace device as
+ * well along with RIMT. To create the link between RIMT information and
+ * the platform device, the IOMMU driver should register itself with the
+ * RIMT module. This is true for PCI based IOMMU as well.
+ */
+int rimt_iommu_register(struct device *dev)
+{
+ struct fwnode_handle *rimt_fwnode;
+ struct acpi_rimt_node *node;
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev);
+ if (!node) {
+ pr_err("Could not find IOMMU node in RIMT\n");
+ return -ENODEV;
+ }
+
+ if (dev_is_pci(dev)) {
+ rimt_fwnode = acpi_alloc_fwnode_static();
+ if (!rimt_fwnode)
+ return -ENOMEM;
+
+ rimt_fwnode->dev = dev;
+ if (!dev->fwnode)
+ dev->fwnode = rimt_fwnode;
+
+ rimt_set_fwnode(node, rimt_fwnode);
+ } else {
+ rimt_set_fwnode(node, dev->fwnode);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_API
+
+/**
+ * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node
+ *
+ * @node: RIMT table node to be looked-up
+ *
+ * Returns: fwnode_handle pointer on success, NULL on failure
+ */
+static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node)
+{
+ struct fwnode_handle *fwnode = NULL;
+ struct rimt_fwnode *curr;
+
+ spin_lock(&rimt_fwnode_lock);
+ list_for_each_entry(curr, &rimt_fwnode_list, list) {
+ if (curr->rimt_node == node) {
+ fwnode = curr->fwnode;
+ break;
+ }
+ }
+ spin_unlock(&rimt_fwnode_lock);
+
+ return fwnode;
+}
+
+static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_pcie_rc *pci_rc;
+
+ pci_rc = (struct acpi_rimt_pcie_rc *)node->node_data;
+ return pci_rc->flags & ACPI_RIMT_PCIE_ATS_SUPPORTED;
+}
+
+static int rimt_iommu_xlate(struct device *dev, struct acpi_rimt_node *node, u32 deviceid)
+{
+ struct fwnode_handle *rimt_fwnode;
+
+ if (!node)
+ return -ENODEV;
+
+ rimt_fwnode = rimt_get_fwnode(node);
+
+ /*
+ * The IOMMU drivers may not be probed yet.
+ * Defer the IOMMU configuration
+ */
+ if (!rimt_fwnode)
+ return -EPROBE_DEFER;
+
+ return acpi_iommu_fwspec_init(dev, deviceid, rimt_fwnode);
+}
+
+struct rimt_pci_alias_info {
+ struct device *dev;
+ struct acpi_rimt_node *node;
+ const struct iommu_ops *ops;
+};
+
+static int rimt_id_map(struct acpi_rimt_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out)
+{
+ if (rid_in < map->source_id_base ||
+ (rid_in > map->source_id_base + map->num_ids))
+ return -ENXIO;
+
+ *rid_out = map->dest_id_base + (rid_in - map->source_id_base);
+ return 0;
+}
+
+static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node,
+ u32 *id_out, int index)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ struct acpi_rimt_id_mapping *map;
+ struct acpi_rimt_node *parent;
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ return NULL;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping || index >= num_id_mapping)
+ return NULL;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset + index * sizeof(*map));
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ return NULL;
+ }
+
+ parent = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table, map->dest_offset);
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE ||
+ node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ *id_out = map->dest_id_base;
+ return parent;
+ }
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node,
+ u32 id_in, u32 *id_out,
+ u8 type_mask)
+{
+ struct acpi_rimt_platform_device *plat_node;
+ u32 id_mapping_offset, num_id_mapping;
+ struct acpi_rimt_pcie_rc *pci_node;
+ u32 id = id_in;
+
+ /* Parse the ID mapping tree to find specified node type */
+ while (node) {
+ struct acpi_rimt_id_mapping *map;
+ int i, rc = 0;
+ u32 map_id = id;
+
+ if (RIMT_TYPE_MASK(node->type) & type_mask) {
+ if (id_out)
+ *id_out = id;
+ return node;
+ }
+
+ if (node->type == ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX) {
+ pci_node = (struct acpi_rimt_pcie_rc *)&node->node_data;
+ id_mapping_offset = pci_node->id_mapping_offset;
+ num_id_mapping = pci_node->num_id_mappings;
+ } else if (node->type == ACPI_RIMT_NODE_TYPE_PLAT_DEVICE) {
+ plat_node = (struct acpi_rimt_platform_device *)&node->node_data;
+ id_mapping_offset = plat_node->id_mapping_offset;
+ num_id_mapping = plat_node->num_id_mappings;
+ } else {
+ goto fail_map;
+ }
+
+ if (!id_mapping_offset || !num_id_mapping)
+ goto fail_map;
+
+ map = ACPI_ADD_PTR(struct acpi_rimt_id_mapping, node,
+ id_mapping_offset);
+
+ /* Firmware bug! */
+ if (!map->dest_offset) {
+ pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
+ node, node->type);
+ goto fail_map;
+ }
+
+ /* Do the ID translation */
+ for (i = 0; i < num_id_mapping; i++, map++) {
+ rc = rimt_id_map(map, node->type, map_id, &id);
+ if (!rc)
+ break;
+ }
+
+ if (i == num_id_mapping)
+ goto fail_map;
+
+ node = ACPI_ADD_PTR(struct acpi_rimt_node, rimt_table,
+ rc ? 0 : map->dest_offset);
+ }
+
+fail_map:
+ /* Map input ID to output ID unchanged on mapping failure */
+ if (id_out)
+ *id_out = id_in;
+
+ return NULL;
+}
+
+static struct acpi_rimt_node *rimt_node_map_platform_id(struct acpi_rimt_node *node, u32 *id_out,
+ u8 type_mask, int index)
+{
+ struct acpi_rimt_node *parent;
+ u32 id;
+
+ parent = rimt_node_get_id(node, &id, index);
+ if (!parent)
+ return NULL;
+
+ if (!(RIMT_TYPE_MASK(parent->type) & type_mask))
+ parent = rimt_node_map_id(parent, id, id_out, type_mask);
+ else
+ if (id_out)
+ *id_out = id;
+
+ return parent;
+}
+
+static int rimt_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct rimt_pci_alias_info *info = data;
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(info->node, alias, &deviceid, RIMT_IOMMU_TYPE);
+ return rimt_iommu_xlate(info->dev, parent, deviceid);
+}
+
+static int rimt_plat_iommu_map(struct device *dev, struct acpi_rimt_node *node)
+{
+ struct acpi_rimt_node *parent;
+ int err = -ENODEV, i = 0;
+ u32 deviceid = 0;
+
+ do {
+ parent = rimt_node_map_platform_id(node, &deviceid,
+ RIMT_IOMMU_TYPE,
+ i++);
+
+ if (parent)
+ err = rimt_iommu_xlate(dev, parent, deviceid);
+ } while (parent && !err);
+
+ return err;
+}
+
+static int rimt_plat_iommu_map_id(struct device *dev,
+ struct acpi_rimt_node *node,
+ const u32 *in_id)
+{
+ struct acpi_rimt_node *parent;
+ u32 deviceid;
+
+ parent = rimt_node_map_id(node, *in_id, &deviceid, RIMT_IOMMU_TYPE);
+ if (parent)
+ return rimt_iommu_xlate(dev, parent, deviceid);
+
+ return -ENODEV;
+}
+
+/**
+ * rimt_iommu_configure_id - Set-up IOMMU configuration for a device.
+ *
+ * @dev: device to configure
+ * @id_in: optional input id const value pointer
+ *
+ * Returns: 0 on success, <0 on failure
+ */
+int rimt_iommu_configure_id(struct device *dev, const u32 *id_in)
+{
+ struct acpi_rimt_node *node;
+ int err = -ENODEV;
+
+ if (dev_is_pci(dev)) {
+ struct iommu_fwspec *fwspec;
+ struct pci_bus *bus = to_pci_dev(dev)->bus;
+ struct rimt_pci_alias_info info = { .dev = dev };
+
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PCIE_ROOT_COMPLEX, &bus->dev);
+ if (!node)
+ return -ENODEV;
+
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ rimt_pci_iommu_init, &info);
+
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && rimt_pcie_rc_supports_ats(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ } else {
+ node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_PLAT_DEVICE, dev);
+ if (!node)
+ return -ENODEV;
+
+ err = id_in ? rimt_plat_iommu_map_id(dev, node, id_in) :
+ rimt_plat_iommu_map(dev, node);
+ }
+
+ return err;
+}
+
+#endif
+
+void __init riscv_acpi_rimt_init(void)
+{
+ acpi_status status;
+
+ /* rimt_table will be used at runtime after the rimt init,
+ * so we don't need to call acpi_put_table() to release
+ * the RIMT table mapping.
+ */
+ status = acpi_get_table(ACPI_SIG_RIMT, 0, &rimt_table);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_NOT_FOUND) {
+ const char *msg = acpi_format_exception(status);
+
+ pr_err("Failed to get table, %s\n", msg);
+ }
+
+ return;
+ }
+}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index a3f95a3fffde..d3edc3bcbf01 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -487,7 +487,7 @@ static int acpi_battery_read(struct acpi_battery *battery)
if (result)
return result;
- battery->present = state & (1 << battery->id);
+ battery->present = !!(state & (1 << battery->id));
if (!battery->present)
return 0;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fb1fe9f3b1a3..416d87f9bd10 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/acpi_iort.h>
+#include <linux/acpi_rimt.h>
#include <linux/acpi_viot.h>
#include <linux/iommu.h>
#include <linux/signal.h>
@@ -845,6 +846,8 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */
"INT33BD", /* Intel Baytrail Mailbox Device */
+ "INTC10DE", /* Intel CVS LNL */
+ "INTC10E0", /* Intel CVS ARL */
"LATT2021", /* Lattice FW Update Client Driver */
NULL
};
@@ -858,6 +861,8 @@ static const char * const acpi_honor_dep_ids[] = {
"INTC10CF", /* IVSC (MTL) driver must be loaded to allow i2c access to camera sensors */
"RSCV0001", /* RISC-V PLIC */
"RSCV0002", /* RISC-V APLIC */
+ "RSCV0005", /* RISC-V SBI MPXY MBOX */
+ "RSCV0006", /* RISC-V RPMI SYSMSI */
"PNP0C0F", /* PCI Link Device */
NULL
};
@@ -1629,7 +1634,10 @@ static int acpi_iommu_configure_id(struct device *dev, const u32 *id_in)
err = iort_iommu_configure_id(dev, id_in);
if (err && err != -EPROBE_DEFER)
+ err = rimt_iommu_configure_id(dev, id_in);
+ if (err && err != -EPROBE_DEFER)
err = viot_iommu_configure(dev);
+
mutex_unlock(&iommu_probe_device_lock);
return err;
@@ -2389,7 +2397,7 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
* initial enumeration of devices is complete, put it into the unbound
* workqueue.
*/
- queue_work(system_unbound_wq, &cdw->work);
+ queue_work(system_dfl_wq, &cdw->work);
return true;
}
@@ -2702,7 +2710,7 @@ void __init acpi_scan_init(void)
acpi_memory_hotplug_init();
acpi_watchdog_init();
acpi_pnp_init();
- acpi_int340x_thermal_init();
+ acpi_power_resources_init();
acpi_init_lpit();
acpi_scan_add_handler(&generic_device_handler);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index c8ee8e42b0f6..66ec81e306d4 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -642,7 +642,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/*
* Disable all GPE and clear their status bits before interrupts are
* enabled. Some GPEs (like wakeup GPEs) have no handlers and this can
- * prevent them from producing spurious interrups.
+ * prevent them from producing spurious interrupts.
*
* acpi_leave_sleep_state() will reenable specific GPEs later.
*
@@ -884,13 +884,13 @@ bool acpi_s2idle_wakeup(void)
#ifdef CONFIG_PM_SLEEP
static u32 saved_bm_rld;
-static int acpi_save_bm_rld(void)
+static int acpi_save_bm_rld(void *data)
{
acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
return 0;
}
-static void acpi_restore_bm_rld(void)
+static void acpi_restore_bm_rld(void *data)
{
u32 resumed_bm_rld = 0;
@@ -901,14 +901,18 @@ static void acpi_restore_bm_rld(void)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
-static struct syscore_ops acpi_sleep_syscore_ops = {
+static const struct syscore_ops acpi_sleep_syscore_ops = {
.suspend = acpi_save_bm_rld,
.resume = acpi_restore_bm_rld,
};
+static struct syscore acpi_sleep_syscore = {
+ .ops = &acpi_sleep_syscore_ops,
+};
+
static void acpi_sleep_syscore_init(void)
{
- register_syscore_ops(&acpi_sleep_syscore_ops);
+ register_syscore(&acpi_sleep_syscore);
}
#else
static inline void acpi_sleep_syscore_init(void) {}
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index d960a238be4e..9c3cb109c5d2 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -17,10 +17,7 @@ static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
extern int acpi_s2idle_begin(void);
extern int acpi_s2idle_prepare(void);
-extern int acpi_s2idle_prepare_late(void);
-extern void acpi_s2idle_check(void);
extern bool acpi_s2idle_wake(void);
-extern void acpi_s2idle_restore_early(void);
extern void acpi_s2idle_restore(void);
extern void acpi_s2idle_end(void);
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index cd36a97b0ea2..73cb933fdc89 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -141,12 +141,23 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
case ACPI_DBG2_16550_NVIDIA:
uart = "uart";
break;
+ case ACPI_DBG2_RISCV_SBI_CON:
+ uart = "sbi";
+ break;
default:
err = -ENOENT;
goto done;
}
- switch (table->baud_rate) {
+ /*
+ * SPCR 1.09 defines Precise Baud Rate Filed contains a specific
+ * non-zero baud rate which overrides the value of the Configured
+ * Baud Rate field. If this field is zero or not present, Configured
+ * Baud Rate is used.
+ */
+ if (table->header.revision >= 4 && table->precise_baudrate)
+ baud_rate = table->precise_baudrate;
+ else switch (table->baud_rate) {
case 0:
/*
* SPCR 1.04 defines 0 as a preconfigured state of UART.
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index fa9bb8c8ce95..4286e4af1092 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -408,7 +408,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __nonstring_array __initconst
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
- ACPI_SIG_NBFT };
+ ACPI_SIG_NBFT, ACPI_SIG_SWFT, ACPI_SIG_MPAM};
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 5c2defe55898..a511f9ea0267 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -924,7 +924,7 @@ static int acpi_thermal_suspend(struct device *dev)
static int acpi_thermal_resume(struct device *dev)
{
struct acpi_thermal *tz;
- int i, j, power_state;
+ int i, j;
if (!dev)
return -EINVAL;
@@ -939,10 +939,8 @@ static int acpi_thermal_resume(struct device *dev)
if (!acpi_thermal_trip_valid(acpi_trip))
break;
- for (j = 0; j < acpi_trip->devices.count; j++) {
- acpi_bus_update_power(acpi_trip->devices.handles[j],
- &power_state);
- }
+ for (j = 0; j < acpi_trip->devices.count; j++)
+ acpi_bus_update_power(acpi_trip->devices.handles[j], NULL);
}
acpi_queue_thermal_check(tz);
@@ -1062,7 +1060,8 @@ static int __init acpi_thermal_init(void)
}
acpi_thermal_pm_queue = alloc_workqueue("acpi_thermal_pm",
- WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!acpi_thermal_pm_queue)
return -ENODEV;
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index d507d5e08435..4cf74f173c78 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -948,6 +948,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
+ /* https://gitlab.freedesktop.org/drm/amd/-/issues/4512 */
+ {
+ .callback = video_detect_force_native,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82K8"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/x86/lpss.c b/drivers/acpi/x86/lpss.c
index 6daa6372f980..1dcb80ab0d23 100644
--- a/drivers/acpi/x86/lpss.c
+++ b/drivers/acpi/x86/lpss.c
@@ -181,7 +181,7 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
acpi_status status;
u64 uid;
- /* Expected to always be successfull, but better safe then sorry */
+ /* Expected to always be successful, but better safe then sorry */
if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
/* Detect I2C bus shared with PUNIT and ignore its d3 status */
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index dd0b40b9bbe8..6d4d06236f61 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -299,34 +299,13 @@ free_acpi_buffer:
ACPI_FREE(out_obj);
}
-/**
- * acpi_get_lps0_constraint - Get the LPS0 constraint for a device.
- * @adev: Device to get the constraint for.
- *
- * The LPS0 constraint is the shallowest (minimum) power state in which the
- * device can be so as to allow the platform as a whole to achieve additional
- * energy conservation by utilizing a system-wide low-power state.
- *
- * Returns:
- * - ACPI power state value of the constraint for @adev on success.
- * - Otherwise, ACPI_STATE_UNKNOWN.
- */
-int acpi_get_lps0_constraint(struct acpi_device *adev)
-{
- struct lpi_constraints *entry;
-
- for_each_lpi_constraint(entry) {
- if (adev->handle == entry->handle)
- return entry->min_dstate;
- }
-
- return ACPI_STATE_UNKNOWN;
-}
-
static void lpi_check_constraints(void)
{
struct lpi_constraints *entry;
+ if (IS_ERR_OR_NULL(lpi_constraints_table))
+ return;
+
for_each_lpi_constraint(entry) {
struct acpi_device *adev = acpi_fetch_acpi_dev(entry->handle);
@@ -508,11 +487,6 @@ static int lps0_device_attach(struct acpi_device *adev,
lps0_device_handle = adev->handle;
- if (acpi_s2idle_vendor_amd())
- lpi_device_get_constraints_amd();
- else
- lpi_device_get_constraints();
-
/*
* Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set in
* the FADT and the default suspend mode was not set from the command
@@ -539,7 +513,26 @@ static struct acpi_scan_handler lps0_handler = {
.attach = lps0_device_attach,
};
-int acpi_s2idle_prepare_late(void)
+static int acpi_s2idle_begin_lps0(void)
+{
+ if (pm_debug_messages_on && !lpi_constraints_table) {
+ if (acpi_s2idle_vendor_amd())
+ lpi_device_get_constraints_amd();
+ else
+ lpi_device_get_constraints();
+
+ /*
+ * Try to retrieve the constraints only once because failures
+ * to do so usually are sticky.
+ */
+ if (!lpi_constraints_table)
+ lpi_constraints_table = ERR_PTR(-ENODATA);
+ }
+
+ return acpi_s2idle_begin();
+}
+
+static int acpi_s2idle_prepare_late_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -585,7 +578,7 @@ int acpi_s2idle_prepare_late(void)
return 0;
}
-void acpi_s2idle_check(void)
+static void acpi_s2idle_check_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -598,7 +591,7 @@ void acpi_s2idle_check(void)
}
}
-void acpi_s2idle_restore_early(void)
+static void acpi_s2idle_restore_early_lps0(void)
{
struct acpi_s2idle_dev_ops *handler;
@@ -636,12 +629,12 @@ void acpi_s2idle_restore_early(void)
}
static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
- .begin = acpi_s2idle_begin,
+ .begin = acpi_s2idle_begin_lps0,
.prepare = acpi_s2idle_prepare,
- .prepare_late = acpi_s2idle_prepare_late,
- .check = acpi_s2idle_check,
+ .prepare_late = acpi_s2idle_prepare_late_lps0,
+ .check = acpi_s2idle_check_lps0,
.wake = acpi_s2idle_wake,
- .restore_early = acpi_s2idle_restore_early,
+ .restore_early = acpi_s2idle_restore_early_lps0,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
diff --git a/drivers/amba/Kconfig b/drivers/amba/Kconfig
index fb6c7e0b4cce..14bb61ff801e 100644
--- a/drivers/amba/Kconfig
+++ b/drivers/amba/Kconfig
@@ -5,7 +5,7 @@ config ARM_AMBA
if ARM_AMBA
config TEGRA_AHB
- bool
+ bool "Enable AHB driver for NVIDIA Tegra SoCs" if COMPILE_TEST
default y if ARCH_TEGRA
help
Adds AHB configuration functionality for NVIDIA Tegra SoCs,
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 74e34a07ef72..952c45ca6e48 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -291,15 +291,14 @@ static int amba_probe(struct device *dev)
if (ret < 0)
break;
- ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret)
break;
ret = amba_get_enable_pclk(pcdev);
- if (ret) {
- dev_pm_domain_detach(dev, true);
+ if (ret)
break;
- }
pm_runtime_get_noresume(dev);
pm_runtime_set_active(dev);
@@ -314,7 +313,6 @@ static int amba_probe(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
- dev_pm_domain_detach(dev, true);
} while (0);
return ret;
@@ -336,7 +334,6 @@ static void amba_remove(struct device *dev)
pm_runtime_put_noidle(dev);
amba_put_disable_pclk(pcdev);
- dev_pm_domain_detach(dev, true);
}
static void amba_shutdown(struct device *dev)
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index c0e8b765522d..f23c3ed01810 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -144,6 +144,7 @@ int tegra_ahb_enable_smmu(struct device_node *dn)
if (!dev)
return -EPROBE_DEFER;
ahb = dev_get_drvdata(dev);
+ put_device(dev);
val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 5b3b8041f827..e2e402c9d175 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -4,6 +4,7 @@ menu "Android"
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
+ depends on NET
default n
help
Binder is used in Android for both communication between processes,
@@ -13,6 +14,19 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_RUST
+ bool "Rust version of Android Binder IPC Driver"
+ depends on RUST && MMU && !ANDROID_BINDER_IPC
+ help
+ This enables the Rust implementation of the Binder driver.
+
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
+
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
@@ -27,7 +41,7 @@ config ANDROID_BINDERFS
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
- depends on ANDROID_BINDER_IPC
+ depends on ANDROID_BINDER_IPC || ANDROID_BINDER_IPC_RUST
default "binder,hwbinder,vndbinder"
help
Default value for the binder.devices parameter.
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c5d47be0276c..e0c650d3898e 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -2,5 +2,6 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o binder_netlink.o
obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += binder/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 312b462e349d..535fc881c8da 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -74,6 +74,7 @@
#include <linux/cacheflush.h>
+#include "binder_netlink.h"
#include "binder_internal.h"
#include "binder_trace.h"
@@ -850,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else {
if (!internal)
node->local_weak_refs++;
- if (!node->has_weak_ref && list_empty(&node->work.entry)) {
- if (target_list == NULL) {
- pr_err("invalid inc weak node for %d\n",
- node->debug_id);
- return -EINVAL;
- }
- /*
- * See comment above
- */
+ if (!node->has_weak_ref && target_list && list_empty(&node->work.entry))
binder_enqueue_work_ilocked(&node->work, target_list);
- }
}
return 0;
}
@@ -2417,10 +2409,10 @@ err_fd_not_accepted:
/**
* struct binder_ptr_fixup - data to be fixed-up in target buffer
- * @offset offset in target buffer to fixup
- * @skip_size bytes to skip in copy (fixup will be written later)
- * @fixup_data data to write at fixup offset
- * @node list node
+ * @offset: offset in target buffer to fixup
+ * @skip_size: bytes to skip in copy (fixup will be written later)
+ * @fixup_data: data to write at fixup offset
+ * @node: list node
*
* This is used for the pointer fixup list (pf) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2437,10 +2429,10 @@ struct binder_ptr_fixup {
/**
* struct binder_sg_copy - scatter-gather data to be copied
- * @offset offset in target buffer
- * @sender_uaddr user address in source buffer
- * @length bytes to copy
- * @node list node
+ * @offset: offset in target buffer
+ * @sender_uaddr: user address in source buffer
+ * @length: bytes to copy
+ * @node: list node
*
* This is used for the sg copy list (sgc) which is created and consumed
* during binder_transaction() and is only accessed locally. No
@@ -2993,6 +2985,69 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id,
binder_thread_dec_tmpref(from);
}
+/**
+ * binder_netlink_report() - report a transaction failure via netlink
+ * @proc: the binder proc sending the transaction
+ * @t: the binder transaction that failed
+ * @data_size: the user provided data size for the transaction
+ * @error: enum binder_driver_return_protocol returned to sender
+ */
+static void binder_netlink_report(struct binder_proc *proc,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error)
+{
+ const char *context = proc->context->name;
+ struct sk_buff *skb;
+ void *hdr;
+
+ if (!genl_has_listeners(&binder_nl_family, &init_net,
+ BINDER_NLGRP_REPORT))
+ return;
+
+ trace_binder_netlink_report(context, t, data_size, error);
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
+ if (!hdr)
+ goto free_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
+ nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
+ goto cancel_skb;
+
+ if (t->to_proc &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
+ goto cancel_skb;
+
+ if (t->to_thread &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
+ goto cancel_skb;
+
+ if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
+ goto cancel_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
+ nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
+ nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
+ goto cancel_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
+ GFP_KERNEL);
+ return;
+
+cancel_skb:
+ genlmsg_cancel(skb, hdr);
+free_skb:
+ nlmsg_free(skb);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -3042,6 +3097,32 @@ static void binder_transaction(struct binder_proc *proc,
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
binder_inner_proc_unlock(proc);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
+ goto err_alloc_t_failed;
+ }
+ INIT_LIST_HEAD(&t->fd_fixups);
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
+ t->debug_id = t_debug_id;
+ t->start_time = t_start_time;
+ t->from_pid = proc->pid;
+ t->from_tid = thread->pid;
+ t->sender_euid = task_euid(proc->tsk);
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->work.type = BINDER_WORK_TRANSACTION;
+ t->is_async = !reply && (tr->flags & TF_ONE_WAY);
+ t->is_reply = reply;
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -3228,24 +3309,13 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_inner_proc_unlock(proc);
}
+
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
- /* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL) {
- binder_txn_error("%d:%d cannot allocate transaction\n",
- thread->pid, proc->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -ENOMEM;
- return_error_line = __LINE__;
- goto err_alloc_t_failed;
- }
- INIT_LIST_HEAD(&t->fd_fixups);
- binder_stats_created(BINDER_STAT_TRANSACTION);
- spin_lock_init(&t->lock);
-
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
binder_txn_error("%d:%d cannot allocate work for transaction\n",
@@ -3257,9 +3327,6 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = t_debug_id;
- t->start_time = t_start_time;
-
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
@@ -3275,19 +3342,6 @@ static void binder_transaction(struct binder_proc *proc,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
- if (!reply && !(tr->flags & TF_ONE_WAY))
- t->from = thread;
- else
- t->from = NULL;
- t->from_pid = proc->pid;
- t->from_tid = thread->pid;
- t->sender_euid = task_euid(proc->tsk);
- t->to_proc = target_proc;
- t->to_thread = target_thread;
- t->code = tr->code;
- t->flags = tr->flags;
- t->priority = task_nice(current);
-
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
@@ -3680,11 +3734,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (t->buffer->oneway_spam_suspect)
+ if (t->buffer->oneway_spam_suspect) {
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
- else
+ binder_netlink_report(proc, t, tr->data_size,
+ BR_ONEWAY_SPAM_SUSPECT);
+ } else {
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- t->work.type = BINDER_WORK_TRANSACTION;
+ }
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
@@ -3712,7 +3768,6 @@ static void binder_transaction(struct binder_proc *proc,
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
- t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
@@ -3733,8 +3788,11 @@ static void binder_transaction(struct binder_proc *proc,
* process and is put in a pending queue, waiting for the target
* process to be unfrozen.
*/
- if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_netlink_report(proc, t, tr->data_size,
+ return_error);
+ }
binder_enqueue_thread_work(thread, tcomplete);
if (return_error &&
return_error != BR_TRANSACTION_PENDING_FROZEN)
@@ -3783,9 +3841,6 @@ err_get_secctx_failed:
err_alloc_tcomplete_failed:
if (trace_binder_txn_latency_free_enabled())
binder_txn_latency_free(t);
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
@@ -3796,6 +3851,11 @@ err_invalid_target_handle:
binder_dec_node_tmpref(target_node);
}
+ binder_netlink_report(proc, t, tr->data_size, return_error);
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
@@ -3994,14 +4054,15 @@ binder_freeze_notification_done(struct binder_proc *proc,
/**
* binder_free_buf() - free the specified buffer
- * @proc: binder proc that owns buffer
- * @buffer: buffer to be freed
- * @is_failure: failed to send transaction
+ * @proc: binder proc that owns buffer
+ * @thread: binder thread performing the buffer release
+ * @buffer: buffer to be freed
+ * @is_failure: failed to send transaction
*
- * If buffer for an async transaction, enqueue the next async
+ * If the buffer is for an async transaction, enqueue the next async
* transaction from the node.
*
- * Cleanup buffer and free it.
+ * Cleanup the buffer and free it.
*/
static void
binder_free_buf(struct binder_proc *proc,
@@ -4608,6 +4669,8 @@ static int binder_wait_for_work(struct binder_thread *thread,
*
* If we fail to allocate an fd, skip the install and release
* any fds that have already been allocated.
+ *
+ * Return: 0 on success, a negative errno code on failure.
*/
static int binder_apply_fd_fixups(struct binder_proc *proc,
struct binder_transaction *t)
@@ -6324,13 +6387,13 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply,
+ t->code, t->flags, t->priority, t->is_async, t->is_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
@@ -7062,12 +7125,19 @@ static int __init binder_init(void)
}
}
- ret = init_binderfs();
+ ret = genl_register_family(&binder_nl_family);
if (ret)
goto err_init_binder_device_failed;
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binderfs_failed;
+
return ret;
+err_init_binderfs_failed:
+ genl_unregister_family(&binder_nl_family);
+
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
@@ -7088,5 +7158,3 @@ device_initcall(binder_init);
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder/Makefile b/drivers/android/binder/Makefile
new file mode 100644
index 000000000000..09eabb527fa0
--- /dev/null
+++ b/drivers/android/binder/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
+rust_binder-y := \
+ rust_binder_main.o \
+ rust_binderfs.o \
+ rust_binder_events.o \
+ page_range_helper.o
diff --git a/drivers/android/binder/allocation.rs b/drivers/android/binder/allocation.rs
new file mode 100644
index 000000000000..7f65a9c3a0e5
--- /dev/null
+++ b/drivers/android/binder/allocation.rs
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::{size_of, size_of_val, MaybeUninit};
+use core::ops::Range;
+
+use kernel::{
+ bindings,
+ fs::file::{File, FileDescriptorReservation},
+ prelude::*,
+ sync::{aref::ARef, Arc},
+ transmute::{AsBytes, FromBytes},
+ uaccess::UserSliceReader,
+ uapi,
+};
+
+use crate::{
+ deferred_close::DeferredFdCloser,
+ defs::*,
+ node::{Node, NodeRef},
+ process::Process,
+ DArc,
+};
+
+#[derive(Default)]
+pub(crate) struct AllocationInfo {
+ /// Range within the allocation where we can find the offsets to the object descriptors.
+ pub(crate) offsets: Option<Range<usize>>,
+ /// The target node of the transaction this allocation is associated to.
+ /// Not set for replies.
+ pub(crate) target_node: Option<NodeRef>,
+ /// When this allocation is dropped, call `pending_oneway_finished` on the node.
+ ///
+ /// This is used to serialize oneway transaction on the same node. Binder guarantees that
+ /// oneway transactions to the same node are delivered sequentially in the order they are sent.
+ pub(crate) oneway_node: Option<DArc<Node>>,
+ /// Zero the data in the buffer on free.
+ pub(crate) clear_on_free: bool,
+ /// List of files embedded in this transaction.
+ file_list: FileList,
+}
+
+/// Represents an allocation that the kernel is currently using.
+///
+/// When allocations are idle, the range allocator holds the data related to them.
+///
+/// # Invariants
+///
+/// This allocation corresponds to an allocation in the range allocator, so the relevant pages are
+/// marked in use in the page range.
+pub(crate) struct Allocation {
+ pub(crate) offset: usize,
+ size: usize,
+ pub(crate) ptr: usize,
+ pub(crate) process: Arc<Process>,
+ allocation_info: Option<AllocationInfo>,
+ free_on_drop: bool,
+ pub(crate) oneway_spam_detected: bool,
+ #[allow(dead_code)]
+ pub(crate) debug_id: usize,
+}
+
+impl Allocation {
+ pub(crate) fn new(
+ process: Arc<Process>,
+ debug_id: usize,
+ offset: usize,
+ size: usize,
+ ptr: usize,
+ oneway_spam_detected: bool,
+ ) -> Self {
+ Self {
+ process,
+ offset,
+ size,
+ ptr,
+ debug_id,
+ oneway_spam_detected,
+ allocation_info: None,
+ free_on_drop: true,
+ }
+ }
+
+ fn size_check(&self, offset: usize, size: usize) -> Result {
+ let overflow_fail = offset.checked_add(size).is_none();
+ let cmp_size_fail = offset.wrapping_add(size) > self.size;
+ if overflow_fail || cmp_size_fail {
+ return Err(EFAULT);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ self.size_check(offset, size)?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe {
+ self.process
+ .pages
+ .copy_from_user_slice(reader, self.offset + offset, size)
+ }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ self.size_check(offset, size_of::<T>())?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.read(self.offset + offset) }
+ }
+
+ pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ self.size_check(offset, size_of_val::<T>(obj))?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.write(self.offset + offset, obj) }
+ }
+
+ pub(crate) fn fill_zero(&self) -> Result {
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.fill_zero(self.offset, self.size) }
+ }
+
+ pub(crate) fn keep_alive(mut self) {
+ self.process
+ .buffer_make_freeable(self.offset, self.allocation_info.take());
+ self.free_on_drop = false;
+ }
+
+ pub(crate) fn set_info(&mut self, info: AllocationInfo) {
+ self.allocation_info = Some(info);
+ }
+
+ pub(crate) fn get_or_init_info(&mut self) -> &mut AllocationInfo {
+ self.allocation_info.get_or_insert_with(Default::default)
+ }
+
+ pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
+ self.get_or_init_info().offsets = Some(offsets);
+ }
+
+ pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
+ self.get_or_init_info().oneway_node = Some(oneway_node);
+ }
+
+ pub(crate) fn set_info_clear_on_drop(&mut self) {
+ self.get_or_init_info().clear_on_free = true;
+ }
+
+ pub(crate) fn set_info_target_node(&mut self, target_node: NodeRef) {
+ self.get_or_init_info().target_node = Some(target_node);
+ }
+
+ /// Reserve enough space to push at least `num_fds` fds.
+ pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
+ self.get_or_init_info()
+ .file_list
+ .files_to_translate
+ .reserve(num_fds, GFP_KERNEL)?;
+
+ Ok(())
+ }
+
+ pub(crate) fn info_add_fd(
+ &mut self,
+ file: ARef<File>,
+ buffer_offset: usize,
+ close_on_free: bool,
+ ) -> Result {
+ self.get_or_init_info().file_list.files_to_translate.push(
+ FileEntry {
+ file,
+ buffer_offset,
+ close_on_free,
+ },
+ GFP_KERNEL,
+ )?;
+
+ Ok(())
+ }
+
+ pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
+ self.get_or_init_info().file_list.close_on_free = cof.0;
+ }
+
+ pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
+ let file_list = match self.allocation_info.as_mut() {
+ Some(info) => &mut info.file_list,
+ None => return Ok(TranslatedFds::new()),
+ };
+
+ let files = core::mem::take(&mut file_list.files_to_translate);
+
+ let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
+ let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
+
+ let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
+ for file_info in files {
+ let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
+ let fd = res.reserved_fd();
+ self.write::<u32>(file_info.buffer_offset, &fd)?;
+
+ reservations.push(
+ Reservation {
+ res,
+ file: file_info.file,
+ },
+ GFP_KERNEL,
+ )?;
+ if file_info.close_on_free {
+ close_on_free.push(fd, GFP_KERNEL)?;
+ }
+ }
+
+ Ok(TranslatedFds {
+ reservations,
+ close_on_free: FdsCloseOnFree(close_on_free),
+ })
+ }
+
+ /// Should the looper return to userspace when freeing this allocation?
+ pub(crate) fn looper_need_return_on_free(&self) -> bool {
+ // Closing fds involves pushing task_work for execution when we return to userspace. Hence,
+ // we should return to userspace asap if we are closing fds.
+ match self.allocation_info {
+ Some(ref info) => !info.file_list.close_on_free.is_empty(),
+ None => false,
+ }
+ }
+}
+
+impl Drop for Allocation {
+ fn drop(&mut self) {
+ if !self.free_on_drop {
+ return;
+ }
+
+ if let Some(mut info) = self.allocation_info.take() {
+ if let Some(oneway_node) = info.oneway_node.as_ref() {
+ oneway_node.pending_oneway_finished();
+ }
+
+ info.target_node = None;
+
+ if let Some(offsets) = info.offsets.clone() {
+ let view = AllocationView::new(self, offsets.start);
+ for i in offsets.step_by(size_of::<usize>()) {
+ if view.cleanup_object(i).is_err() {
+ pr_warn!("Error cleaning up object at offset {}\n", i)
+ }
+ }
+ }
+
+ for &fd in &info.file_list.close_on_free {
+ let closer = match DeferredFdCloser::new(GFP_KERNEL) {
+ Ok(closer) => closer,
+ Err(kernel::alloc::AllocError) => {
+ // Ignore allocation failures.
+ break;
+ }
+ };
+
+ // Here, we ignore errors. The operation can fail if the fd is not valid, or if the
+ // method is called from a kthread. However, this is always called from a syscall,
+ // so the latter case cannot happen, and we don't care about the first case.
+ let _ = closer.close_fd(fd);
+ }
+
+ if info.clear_on_free {
+ if let Err(e) = self.fill_zero() {
+ pr_warn!("Failed to clear data on free: {:?}", e);
+ }
+ }
+ }
+
+ self.process.buffer_raw_free(self.ptr);
+ }
+}
+
+/// A wrapper around `Allocation` that is being created.
+///
+/// If the allocation is destroyed while wrapped in this wrapper, then the allocation will be
+/// considered to be part of a failed transaction. Successful transactions avoid that by calling
+/// `success`, which skips the destructor.
+#[repr(transparent)]
+pub(crate) struct NewAllocation(pub(crate) Allocation);
+
+impl NewAllocation {
+ pub(crate) fn success(self) -> Allocation {
+ // This skips the destructor.
+ //
+ // SAFETY: This type is `#[repr(transparent)]`, so the layout matches.
+ unsafe { core::mem::transmute(self) }
+ }
+}
+
+impl core::ops::Deref for NewAllocation {
+ type Target = Allocation;
+ fn deref(&self) -> &Allocation {
+ &self.0
+ }
+}
+
+impl core::ops::DerefMut for NewAllocation {
+ fn deref_mut(&mut self) -> &mut Allocation {
+ &mut self.0
+ }
+}
+
+/// A view into the beginning of an allocation.
+///
+/// All attempts to read or write outside of the view will fail. To intentionally access outside of
+/// this view, use the `alloc` field of this struct directly.
+pub(crate) struct AllocationView<'a> {
+ pub(crate) alloc: &'a mut Allocation,
+ limit: usize,
+}
+
+impl<'a> AllocationView<'a> {
+ pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
+ AllocationView { alloc, limit }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.read(offset)
+ }
+
+ pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.write(offset, obj)
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ if offset.checked_add(size).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.copy_into(reader, offset, size)
+ }
+
+ pub(crate) fn transfer_binder_object(
+ &self,
+ offset: usize,
+ obj: &uapi::flat_binder_object,
+ strong: bool,
+ node_ref: NodeRef,
+ ) -> Result {
+ let mut newobj = FlatBinderObject::default();
+ let node = node_ref.node.clone();
+ if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
+ // The receiving process is the owner of the node, so send it a binder object (instead
+ // of a handle).
+ let (ptr, cookie) = node.get_id();
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_BINDER
+ } else {
+ BINDER_TYPE_WEAK_BINDER
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.binder = ptr as _;
+ newobj.cookie = cookie as _;
+ self.write(offset, &newobj)?;
+ // Increment the user ref count on the node. It will be decremented as part of the
+ // destruction of the buffer, when we see a binder or weak-binder object.
+ node.update_refcount(true, 1, strong);
+ } else {
+ // The receiving process is different from the owner, so we need to insert a handle to
+ // the binder object.
+ let handle = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .insert_or_update_handle(node_ref, false)?;
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_HANDLE
+ } else {
+ BINDER_TYPE_WEAK_HANDLE
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.handle = handle;
+ if self.write(offset, &newobj).is_err() {
+ // Decrement ref count on the handle we just created.
+ let _ = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong);
+ return Err(EINVAL);
+ }
+ }
+
+ Ok(())
+ }
+
+ fn cleanup_object(&self, index_offset: usize) -> Result {
+ let offset = self.alloc.read(index_offset)?;
+ let header = self.read::<BinderObjectHeader>(offset)?;
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
+ // populated.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder };
+ let cookie = obj.cookie;
+ self.alloc.process.update_node(ptr, cookie, strong);
+ Ok(())
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
+ // populated.
+ let handle = unsafe { obj.__bindgen_anon_1.handle };
+ self.alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong)
+ }
+ _ => Ok(()),
+ }
+ }
+}
+
+/// A binder object as it is serialized.
+///
+/// # Invariants
+///
+/// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
+/// types.
+#[repr(C)]
+pub(crate) union BinderObject {
+ hdr: uapi::binder_object_header,
+ fbo: uapi::flat_binder_object,
+ fdo: uapi::binder_fd_object,
+ bbo: uapi::binder_buffer_object,
+ fdao: uapi::binder_fd_array_object,
+}
+
+/// A view into a `BinderObject` that can be used in a match statement.
+pub(crate) enum BinderObjectRef<'a> {
+ Binder(&'a mut uapi::flat_binder_object),
+ Handle(&'a mut uapi::flat_binder_object),
+ Fd(&'a mut uapi::binder_fd_object),
+ Ptr(&'a mut uapi::binder_buffer_object),
+ Fda(&'a mut uapi::binder_fd_array_object),
+}
+
+impl BinderObject {
+ pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
+ let object = Self::read_from_inner(|slice| {
+ let read_len = usize::min(slice.len(), reader.len());
+ reader.clone_reader().read_slice(&mut slice[..read_len])?;
+ Ok(())
+ })?;
+
+ // If we used a object type smaller than the largest object size, then we've read more
+ // bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
+ // original reader. Now, we call `skip` so that the caller's reader is advanced by the
+ // right amount.
+ //
+ // The `skip` call fails if the reader doesn't have `size` bytes available. This could
+ // happen if the type header corresponds to an object type that is larger than the rest of
+ // the reader.
+ //
+ // Any extra bytes beyond the size of the object are inaccessible after this call, so
+ // reading them again from the `reader` later does not result in TOCTOU bugs.
+ reader.skip(object.size())?;
+
+ Ok(object)
+ }
+
+ /// Use the provided reader closure to construct a `BinderObject`.
+ ///
+ /// The closure should write the bytes for the object into the provided slice.
+ pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
+ where
+ R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
+ {
+ let mut obj = MaybeUninit::<BinderObject>::zeroed();
+
+ // SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
+ // and the byte array has an alignment requirement of one, so the pointer cast is okay.
+ // Additionally, `obj` was initialized to zeros, so the byte array will not be
+ // uninitialized.
+ (reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
+
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
+ if Self::type_to_size(type_).is_none() {
+ // The value of `obj.hdr_type_` was invalid.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
+ // that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
+ unsafe { Ok(obj.assume_init()) }
+ }
+
+ pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
+ use BinderObjectRef::*;
+ // SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
+ // variants of this union accept all initialized bit patterns.
+ unsafe {
+ match self.hdr.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
+ BINDER_TYPE_FD => Fd(&mut self.fdo),
+ BINDER_TYPE_PTR => Ptr(&mut self.bbo),
+ BINDER_TYPE_FDA => Fda(&mut self.fdao),
+ // SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
+ // other value than the ones checked above.
+ _ => core::hint::unreachable_unchecked(),
+ }
+ }
+ }
+
+ pub(crate) fn size(&self) -> usize {
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { self.hdr.type_ };
+
+ // SAFETY: The type invariants guarantee that the type field is correct.
+ unsafe { Self::type_to_size(type_).unwrap_unchecked() }
+ }
+
+ fn type_to_size(type_: u32) -> Option<usize> {
+ match type_ {
+ BINDER_TYPE_WEAK_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_WEAK_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_FD => Some(size_of::<uapi::binder_fd_object>()),
+ BINDER_TYPE_PTR => Some(size_of::<uapi::binder_buffer_object>()),
+ BINDER_TYPE_FDA => Some(size_of::<uapi::binder_fd_array_object>()),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Default)]
+struct FileList {
+ files_to_translate: KVec<FileEntry>,
+ close_on_free: KVec<u32>,
+}
+
+struct FileEntry {
+ /// The file for which a descriptor will be created in the recipient process.
+ file: ARef<File>,
+ /// The offset in the buffer where the file descriptor is stored.
+ buffer_offset: usize,
+ /// Whether this fd should be closed when the allocation is freed.
+ close_on_free: bool,
+}
+
+pub(crate) struct TranslatedFds {
+ reservations: KVec<Reservation>,
+ /// If commit is called, then these fds should be closed. (If commit is not called, then they
+ /// shouldn't be closed.)
+ close_on_free: FdsCloseOnFree,
+}
+
+struct Reservation {
+ res: FileDescriptorReservation,
+ file: ARef<File>,
+}
+
+impl TranslatedFds {
+ pub(crate) fn new() -> Self {
+ Self {
+ reservations: KVec::new(),
+ close_on_free: FdsCloseOnFree(KVec::new()),
+ }
+ }
+
+ pub(crate) fn commit(self) -> FdsCloseOnFree {
+ for entry in self.reservations {
+ entry.res.fd_install(entry.file);
+ }
+
+ self.close_on_free
+ }
+}
+
+pub(crate) struct FdsCloseOnFree(KVec<u32>);
diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/context.rs
new file mode 100644
index 000000000000..3d135ec03ca7
--- /dev/null
+++ b/drivers/android/binder/context.rs
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ error::Error,
+ list::{List, ListArc, ListLinks},
+ prelude::*,
+ security,
+ str::{CStr, CString},
+ sync::{Arc, Mutex},
+ task::Kuid,
+};
+
+use crate::{error::BinderError, node::NodeRef, process::Process};
+
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` in the module initializer, so it's initialized before first use.
+ pub(crate) unsafe(uninit) static CONTEXTS: Mutex<ContextList> = ContextList {
+ list: List::new(),
+ };
+}
+
+pub(crate) struct ContextList {
+ list: List<Context>,
+}
+
+pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
+ let lock = CONTEXTS.lock();
+
+ let count = lock.list.iter().count();
+
+ let mut ctxs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for ctx in &lock.list {
+ ctxs.push(Arc::from(ctx), GFP_KERNEL)?;
+ }
+ Ok(ctxs)
+}
+
+/// This struct keeps track of the processes using this context, and which process is the context
+/// manager.
+struct Manager {
+ node: Option<NodeRef>,
+ uid: Option<Kuid>,
+ all_procs: List<Process>,
+}
+
+/// There is one context per binder file (/dev/binder, /dev/hwbinder, etc)
+#[pin_data]
+pub(crate) struct Context {
+ #[pin]
+ manager: Mutex<Manager>,
+ pub(crate) name: CString,
+ #[pin]
+ links: ListLinks,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Context { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Context {
+ using ListLinks { self.links };
+ }
+}
+
+impl Context {
+ pub(crate) fn new(name: &CStr) -> Result<Arc<Self>> {
+ let name = CString::try_from(name)?;
+ let list_ctx = ListArc::pin_init::<Error>(
+ try_pin_init!(Context {
+ name,
+ links <- ListLinks::new(),
+ manager <- kernel::new_mutex!(Manager {
+ all_procs: List::new(),
+ node: None,
+ uid: None,
+ }, "Context::manager"),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let ctx = list_ctx.clone_arc();
+ CONTEXTS.lock().list.push_back(list_ctx);
+
+ Ok(ctx)
+ }
+
+ /// Called when the file for this context is unlinked.
+ ///
+ /// No-op if called twice.
+ pub(crate) fn deregister(&self) {
+ // SAFETY: We never add the context to any other linked list than this one, so it is either
+ // in this list, or not in any list.
+ unsafe { CONTEXTS.lock().list.remove(self) };
+ }
+
+ pub(crate) fn register_process(self: &Arc<Self>, proc: ListArc<Process>) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::register_process called on the wrong context.");
+ return;
+ }
+ self.manager.lock().all_procs.push_back(proc);
+ }
+
+ pub(crate) fn deregister_process(self: &Arc<Self>, proc: &Process) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::deregister_process called on the wrong context.");
+ return;
+ }
+ // SAFETY: We just checked that this is the right list.
+ unsafe { self.manager.lock().all_procs.remove(proc) };
+ }
+
+ pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
+ let mut manager = self.manager.lock();
+ if manager.node.is_some() {
+ pr_warn!("BINDER_SET_CONTEXT_MGR already set");
+ return Err(EBUSY);
+ }
+ security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
+
+ // If the context manager has been set before, ensure that we use the same euid.
+ let caller_uid = Kuid::current_euid();
+ if let Some(ref uid) = manager.uid {
+ if *uid != caller_uid {
+ return Err(EPERM);
+ }
+ }
+
+ manager.node = Some(node_ref);
+ manager.uid = Some(caller_uid);
+ Ok(())
+ }
+
+ pub(crate) fn unset_manager_node(&self) {
+ let node_ref = self.manager.lock().node.take();
+ drop(node_ref);
+ }
+
+ pub(crate) fn get_manager_node(&self, strong: bool) -> Result<NodeRef, BinderError> {
+ self.manager
+ .lock()
+ .node
+ .as_ref()
+ .ok_or_else(BinderError::new_dead)?
+ .clone(strong)
+ .map_err(BinderError::from)
+ }
+
+ pub(crate) fn for_each_proc<F>(&self, mut func: F)
+ where
+ F: FnMut(&Process),
+ {
+ let lock = self.manager.lock();
+ for proc in &lock.all_procs {
+ func(&proc);
+ }
+ }
+
+ pub(crate) fn get_all_procs(&self) -> Result<KVec<Arc<Process>>> {
+ let lock = self.manager.lock();
+ let count = lock.all_procs.iter().count();
+
+ let mut procs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for proc in &lock.all_procs {
+ procs.push(Arc::from(proc), GFP_KERNEL)?;
+ }
+ Ok(procs)
+ }
+
+ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>> {
+ let orig = self.get_all_procs()?;
+ let mut backing = KVec::with_capacity(orig.len(), GFP_KERNEL)?;
+ for proc in orig.into_iter().filter(|proc| proc.task.pid() == pid) {
+ backing.push(proc, GFP_KERNEL)?;
+ }
+ Ok(backing)
+ }
+}
diff --git a/drivers/android/binder/deferred_close.rs b/drivers/android/binder/deferred_close.rs
new file mode 100644
index 000000000000..ac895c04d0cb
--- /dev/null
+++ b/drivers/android/binder/deferred_close.rs
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Logic for closing files in a deferred manner.
+//!
+//! This file could make sense to have in `kernel::fs`, but it was rejected for being too
+//! Binder-specific.
+
+use core::mem::MaybeUninit;
+use kernel::{
+ alloc::{AllocError, Flags},
+ bindings,
+ prelude::*,
+};
+
+/// Helper used for closing file descriptors in a way that is safe even if the file is currently
+/// held using `fdget`.
+///
+/// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
+/// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
+pub(crate) struct DeferredFdCloser {
+ inner: KBox<DeferredFdCloserInner>,
+}
+
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Send for DeferredFdCloser {}
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Sync for DeferredFdCloser {}
+
+/// # Invariants
+///
+/// If the `file` pointer is non-null, then it points at a `struct file` and owns a refcount to
+/// that file.
+#[repr(C)]
+struct DeferredFdCloserInner {
+ twork: MaybeUninit<bindings::callback_head>,
+ file: *mut bindings::file,
+}
+
+impl DeferredFdCloser {
+ /// Create a new [`DeferredFdCloser`].
+ pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self {
+ // INVARIANT: The `file` pointer is null, so the type invariant does not apply.
+ inner: KBox::new(
+ DeferredFdCloserInner {
+ twork: MaybeUninit::uninit(),
+ file: core::ptr::null_mut(),
+ },
+ flags,
+ )?,
+ })
+ }
+
+ /// Schedule a task work that closes the file descriptor when this task returns to userspace.
+ ///
+ /// Fails if this is called from a context where we cannot run work when returning to
+ /// userspace. (E.g., from a kthread.)
+ pub(crate) fn close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError> {
+ use bindings::task_work_notify_mode_TWA_RESUME as TWA_RESUME;
+
+ // In this method, we schedule the task work before closing the file. This is because
+ // scheduling a task work is fallible, and we need to know whether it will fail before we
+ // attempt to close the file.
+
+ // Task works are not available on kthreads.
+ let current = kernel::current!();
+
+ // Check if this is a kthread.
+ // SAFETY: Reading `flags` from a task is always okay.
+ if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } {
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // Transfer ownership of the box's allocation to a raw pointer. This disables the
+ // destructor, so we must manually convert it back to a KBox to drop it.
+ //
+ // Until we convert it back to a `KBox`, there are no aliasing requirements on this
+ // pointer.
+ let inner = KBox::into_raw(self.inner);
+
+ // The `callback_head` field is first in the struct, so this cast correctly gives us a
+ // pointer to the field.
+ let callback_head = inner.cast::<bindings::callback_head>();
+ // SAFETY: This pointer offset operation does not go out-of-bounds.
+ let file_field = unsafe { core::ptr::addr_of_mut!((*inner).file) };
+
+ let current = current.as_ptr();
+
+ // SAFETY: This function currently has exclusive access to the `DeferredFdCloserInner`, so
+ // it is okay for us to perform unsynchronized writes to its `callback_head` field.
+ unsafe { bindings::init_task_work(callback_head, Some(Self::do_close_fd)) };
+
+ // SAFETY: This inserts the `DeferredFdCloserInner` into the task workqueue for the current
+ // task. If this operation is successful, then this transfers exclusive ownership of the
+ // `callback_head` field to the C side until it calls `do_close_fd`, and we don't touch or
+ // invalidate the field during that time.
+ //
+ // When the C side calls `do_close_fd`, the safety requirements of that method are
+ // satisfied because when a task work is executed, the callback is given ownership of the
+ // pointer.
+ //
+ // The file pointer is currently null. If it is changed to be non-null before `do_close_fd`
+ // is called, then that change happens due to the write at the end of this function, and
+ // that write has a safety comment that explains why the refcount can be dropped when
+ // `do_close_fd` runs.
+ let res = unsafe { bindings::task_work_add(current, callback_head, TWA_RESUME) };
+
+ if res != 0 {
+ // SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
+ // we may destroy it.
+ unsafe { drop(KBox::from_raw(inner)) };
+
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // This removes the fd from the fd table in `current`. The file is not fully closed until
+ // `filp_close` is called. We are given ownership of one refcount to the file.
+ //
+ // SAFETY: This is safe no matter what `fd` is. If the `fd` is valid (that is, if the
+ // pointer is non-null), then we call `filp_close` on the returned pointer as required by
+ // `file_close_fd`.
+ let file = unsafe { bindings::file_close_fd(fd) };
+ if file.is_null() {
+ // We don't clean up the task work since that might be expensive if the task work queue
+ // is long. Just let it execute and let it clean up for itself.
+ return Err(DeferredFdCloseError::BadFd);
+ }
+
+ // Acquire a second refcount to the file.
+ //
+ // SAFETY: The `file` pointer points at a file with a non-zero refcount.
+ unsafe { bindings::get_file(file) };
+
+ // This method closes the fd, consuming one of our two refcounts. There could be active
+ // light refcounts created from that fd, so we must ensure that the file has a positive
+ // refcount for the duration of those active light refcounts. We do that by holding on to
+ // the second refcount until the current task returns to userspace.
+ //
+ // SAFETY: The `file` pointer is valid. Passing `current->files` as the file table to close
+ // it in is correct, since we just got the `fd` from `file_close_fd` which also uses
+ // `current->files`.
+ //
+ // Note: fl_owner_t is currently a void pointer.
+ unsafe { bindings::filp_close(file, (*current).files as bindings::fl_owner_t) };
+
+ // We update the file pointer that the task work is supposed to fput. This transfers
+ // ownership of our last refcount.
+ //
+ // INVARIANT: This changes the `file` field of a `DeferredFdCloserInner` from null to
+ // non-null. This doesn't break the type invariant for `DeferredFdCloserInner` because we
+ // still own a refcount to the file, so we can pass ownership of that refcount to the
+ // `DeferredFdCloserInner`.
+ //
+ // When `do_close_fd` runs, it must be safe for it to `fput` the refcount. However, this is
+ // the case because all light refcounts that are associated with the fd we closed
+ // previously must be dropped when `do_close_fd`, since light refcounts must be dropped
+ // before returning to userspace.
+ //
+ // SAFETY: Task works are executed on the current thread right before we return to
+ // userspace, so this write is guaranteed to happen before `do_close_fd` is called, which
+ // means that a race is not possible here.
+ unsafe { *file_field = file };
+
+ Ok(())
+ }
+
+ /// # Safety
+ ///
+ /// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
+ /// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
+ /// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
+ unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
+ // SAFETY: The caller just passed us ownership of this box.
+ let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
+ if !inner.file.is_null() {
+ // SAFETY: By the type invariants, we own a refcount to this file, and the caller
+ // guarantees that dropping the refcount now is okay.
+ unsafe { bindings::fput(inner.file) };
+ }
+ // The allocation is freed when `inner` goes out of scope.
+ }
+}
+
+/// Represents a failure to close an fd in a deferred manner.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum DeferredFdCloseError {
+ /// Closing the fd failed because we were unable to schedule a task work.
+ TaskWorkUnavailable,
+ /// Closing the fd failed because the fd does not exist.
+ BadFd,
+}
+
+impl From<DeferredFdCloseError> for Error {
+ fn from(err: DeferredFdCloseError) -> Error {
+ match err {
+ DeferredFdCloseError::TaskWorkUnavailable => ESRCH,
+ DeferredFdCloseError::BadFd => EBADF,
+ }
+ }
+}
diff --git a/drivers/android/binder/defs.rs b/drivers/android/binder/defs.rs
new file mode 100644
index 000000000000..33f51b4139c7
--- /dev/null
+++ b/drivers/android/binder/defs.rs
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use kernel::{
+ transmute::{AsBytes, FromBytes},
+ uapi::{self, *},
+};
+
+macro_rules! pub_no_prefix {
+ ($prefix:ident, $($newname:ident),+ $(,)?) => {
+ $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+ };
+}
+
+pub_no_prefix!(
+ binder_driver_return_protocol_,
+ BR_TRANSACTION,
+ BR_TRANSACTION_SEC_CTX,
+ BR_REPLY,
+ BR_DEAD_REPLY,
+ BR_FAILED_REPLY,
+ BR_FROZEN_REPLY,
+ BR_NOOP,
+ BR_SPAWN_LOOPER,
+ BR_TRANSACTION_COMPLETE,
+ BR_TRANSACTION_PENDING_FROZEN,
+ BR_ONEWAY_SPAM_SUSPECT,
+ BR_OK,
+ BR_ERROR,
+ BR_INCREFS,
+ BR_ACQUIRE,
+ BR_RELEASE,
+ BR_DECREFS,
+ BR_DEAD_BINDER,
+ BR_CLEAR_DEATH_NOTIFICATION_DONE,
+ BR_FROZEN_BINDER,
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ binder_driver_command_protocol_,
+ BC_TRANSACTION,
+ BC_TRANSACTION_SG,
+ BC_REPLY,
+ BC_REPLY_SG,
+ BC_FREE_BUFFER,
+ BC_ENTER_LOOPER,
+ BC_EXIT_LOOPER,
+ BC_REGISTER_LOOPER,
+ BC_INCREFS,
+ BC_ACQUIRE,
+ BC_RELEASE,
+ BC_DECREFS,
+ BC_INCREFS_DONE,
+ BC_ACQUIRE_DONE,
+ BC_REQUEST_DEATH_NOTIFICATION,
+ BC_CLEAR_DEATH_NOTIFICATION,
+ BC_DEAD_BINDER_DONE,
+ BC_REQUEST_FREEZE_NOTIFICATION,
+ BC_CLEAR_FREEZE_NOTIFICATION,
+ BC_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ flat_binder_object_flags_,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS,
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX
+);
+
+pub_no_prefix!(
+ transaction_flags_,
+ TF_ONE_WAY,
+ TF_ACCEPT_FDS,
+ TF_CLEAR_BUF,
+ TF_UPDATE_TXN
+);
+
+pub(crate) use uapi::{
+ BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_FDA, BINDER_TYPE_HANDLE, BINDER_TYPE_PTR,
+ BINDER_TYPE_WEAK_BINDER, BINDER_TYPE_WEAK_HANDLE,
+};
+
+macro_rules! decl_wrapper {
+ ($newname:ident, $wrapped:ty) => {
+ // Define a wrapper around the C type. Use `MaybeUninit` to enforce that the value of
+ // padding bytes must be preserved.
+ #[derive(Copy, Clone)]
+ #[repr(transparent)]
+ pub(crate) struct $newname(MaybeUninit<$wrapped>);
+
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl FromBytes for $newname {}
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl AsBytes for $newname {}
+
+ impl Deref for $newname {
+ type Target = $wrapped;
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_ref() }
+ }
+ }
+
+ impl DerefMut for $newname {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_mut() }
+ }
+ }
+
+ impl Default for $newname {
+ fn default() -> Self {
+ // Create a new value of this type where all bytes (including padding) are zeroed.
+ Self(MaybeUninit::zeroed())
+ }
+ }
+ };
+}
+
+decl_wrapper!(BinderNodeDebugInfo, uapi::binder_node_debug_info);
+decl_wrapper!(BinderNodeInfoForRef, uapi::binder_node_info_for_ref);
+decl_wrapper!(FlatBinderObject, uapi::flat_binder_object);
+decl_wrapper!(BinderFdObject, uapi::binder_fd_object);
+decl_wrapper!(BinderFdArrayObject, uapi::binder_fd_array_object);
+decl_wrapper!(BinderObjectHeader, uapi::binder_object_header);
+decl_wrapper!(BinderBufferObject, uapi::binder_buffer_object);
+decl_wrapper!(BinderTransactionData, uapi::binder_transaction_data);
+decl_wrapper!(
+ BinderTransactionDataSecctx,
+ uapi::binder_transaction_data_secctx
+);
+decl_wrapper!(BinderTransactionDataSg, uapi::binder_transaction_data_sg);
+decl_wrapper!(BinderWriteRead, uapi::binder_write_read);
+decl_wrapper!(BinderVersion, uapi::binder_version);
+decl_wrapper!(BinderFrozenStatusInfo, uapi::binder_frozen_status_info);
+decl_wrapper!(BinderFreezeInfo, uapi::binder_freeze_info);
+decl_wrapper!(BinderFrozenStateInfo, uapi::binder_frozen_state_info);
+decl_wrapper!(BinderHandleCookie, uapi::binder_handle_cookie);
+decl_wrapper!(ExtendedError, uapi::binder_extended_error);
+
+impl BinderVersion {
+ pub(crate) fn current() -> Self {
+ Self(MaybeUninit::new(uapi::binder_version {
+ protocol_version: BINDER_CURRENT_PROTOCOL_VERSION as _,
+ }))
+ }
+}
+
+impl BinderTransactionData {
+ pub(crate) fn with_buffers_size(self, buffers_size: u64) -> BinderTransactionDataSg {
+ BinderTransactionDataSg(MaybeUninit::new(uapi::binder_transaction_data_sg {
+ transaction_data: *self,
+ buffers_size,
+ }))
+ }
+}
+
+impl BinderTransactionDataSecctx {
+ /// View the inner data as wrapped in `BinderTransactionData`.
+ pub(crate) fn tr_data(&mut self) -> &mut BinderTransactionData {
+ // SAFETY: Transparent wrapper is safe to transmute.
+ unsafe {
+ &mut *(&mut self.transaction_data as *mut uapi::binder_transaction_data
+ as *mut BinderTransactionData)
+ }
+ }
+}
+
+impl ExtendedError {
+ pub(crate) fn new(id: u32, command: u32, param: i32) -> Self {
+ Self(MaybeUninit::new(uapi::binder_extended_error {
+ id,
+ command,
+ param,
+ }))
+ }
+}
diff --git a/drivers/android/binder/error.rs b/drivers/android/binder/error.rs
new file mode 100644
index 000000000000..b24497cfa292
--- /dev/null
+++ b/drivers/android/binder/error.rs
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::fmt;
+use kernel::prelude::*;
+
+use crate::defs::*;
+
+pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
+
+/// An error that will be returned to userspace via the `BINDER_WRITE_READ` ioctl rather than via
+/// errno.
+pub(crate) struct BinderError {
+ pub(crate) reply: u32,
+ source: Option<Error>,
+}
+
+impl BinderError {
+ pub(crate) fn new_dead() -> Self {
+ Self {
+ reply: BR_DEAD_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen() -> Self {
+ Self {
+ reply: BR_FROZEN_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen_oneway() -> Self {
+ Self {
+ reply: BR_TRANSACTION_PENDING_FROZEN,
+ source: None,
+ }
+ }
+
+ pub(crate) fn is_dead(&self) -> bool {
+ self.reply == BR_DEAD_REPLY
+ }
+
+ pub(crate) fn as_errno(&self) -> kernel::ffi::c_int {
+ self.source.unwrap_or(EINVAL).to_errno()
+ }
+
+ pub(crate) fn should_pr_warn(&self) -> bool {
+ self.source.is_some()
+ }
+}
+
+/// Convert an errno into a `BinderError` and store the errno used to construct it. The errno
+/// should be stored as the thread's extended error when given to userspace.
+impl From<Error> for BinderError {
+ fn from(source: Error) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(source),
+ }
+ }
+}
+
+impl From<kernel::fs::file::BadFdError> for BinderError {
+ fn from(source: kernel::fs::file::BadFdError) -> Self {
+ BinderError::from(Error::from(source))
+ }
+}
+
+impl From<kernel::alloc::AllocError> for BinderError {
+ fn from(_: kernel::alloc::AllocError) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(ENOMEM),
+ }
+ }
+}
+
+impl fmt::Debug for BinderError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.reply {
+ BR_FAILED_REPLY => match self.source.as_ref() {
+ Some(source) => f
+ .debug_struct("BR_FAILED_REPLY")
+ .field("source", source)
+ .finish(),
+ None => f.pad("BR_FAILED_REPLY"),
+ },
+ BR_DEAD_REPLY => f.pad("BR_DEAD_REPLY"),
+ BR_FROZEN_REPLY => f.pad("BR_FROZEN_REPLY"),
+ BR_TRANSACTION_PENDING_FROZEN => f.pad("BR_TRANSACTION_PENDING_FROZEN"),
+ BR_TRANSACTION_COMPLETE => f.pad("BR_TRANSACTION_COMPLETE"),
+ _ => f
+ .debug_struct("BinderError")
+ .field("reply", &self.reply)
+ .finish(),
+ }
+ }
+}
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
new file mode 100644
index 000000000000..53b60035639a
--- /dev/null
+++ b/drivers/android/binder/freeze.rs
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ alloc::AllocError,
+ list::ListArc,
+ prelude::*,
+ rbtree::{self, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, UniqueArc},
+ uaccess::UserSliceReader,
+};
+
+use crate::{
+ defs::*, node::Node, process::Process, thread::Thread, BinderReturnWriter, DArc, DLArc,
+ DTRWrap, DeliverToRead,
+};
+
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
+pub(crate) struct FreezeCookie(u64);
+
+/// Represents a listener for changes to the frozen state of a process.
+pub(crate) struct FreezeListener {
+ /// The node we are listening for.
+ pub(crate) node: DArc<Node>,
+ /// The cookie of this freeze listener.
+ cookie: FreezeCookie,
+ /// What value of `is_frozen` did we most recently tell userspace about?
+ last_is_frozen: Option<bool>,
+ /// We sent a `BR_FROZEN_BINDER` and we are waiting for `BC_FREEZE_NOTIFICATION_DONE` before
+ /// sending any other commands.
+ is_pending: bool,
+ /// Userspace sent `BC_CLEAR_FREEZE_NOTIFICATION` and we need to reply with
+ /// `BR_CLEAR_FREEZE_NOTIFICATION_DONE` as soon as possible. If `is_pending` is set, then we
+ /// must wait for it to be unset before we can reply.
+ is_clearing: bool,
+ /// Number of cleared duplicates that can't be deleted until userspace sends
+ /// `BC_FREEZE_NOTIFICATION_DONE`.
+ num_pending_duplicates: u64,
+ /// Number of cleared duplicates that can be deleted.
+ num_cleared_duplicates: u64,
+}
+
+impl FreezeListener {
+ /// Is it okay to create a new listener with the same cookie as this one for the provided node?
+ ///
+ /// Under some scenarios, userspace may delete a freeze listener and immediately recreate it
+ /// with the same cookie. This results in duplicate listeners. To avoid issues with ambiguity,
+ /// we allow this only if the new listener is for the same node, and we also require that the
+ /// old listener has already been cleared.
+ fn allow_duplicate(&self, node: &DArc<Node>) -> bool {
+ Arc::ptr_eq(&self.node, node) && self.is_clearing
+ }
+}
+
+type UninitFM = UniqueArc<core::mem::MaybeUninit<DTRWrap<FreezeMessage>>>;
+
+/// Represents a notification that the freeze state has changed.
+pub(crate) struct FreezeMessage {
+ cookie: FreezeCookie,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for FreezeMessage {
+ untracked;
+ }
+}
+
+impl FreezeMessage {
+ fn new(flags: kernel::alloc::Flags) -> Result<UninitFM, AllocError> {
+ UniqueArc::new_uninit(flags)
+ }
+
+ fn init(ua: UninitFM, cookie: FreezeCookie) -> DLArc<FreezeMessage> {
+ match ua.pin_init_with(DTRWrap::new(FreezeMessage { cookie })) {
+ Ok(msg) => ListArc::from(msg),
+ Err(err) => match err {},
+ }
+ }
+}
+
+impl DeliverToRead for FreezeMessage {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let _removed_listener;
+ let mut node_refs = thread.process.node_refs.lock();
+ let Some(mut freeze_entry) = node_refs.freeze_listeners.find_mut(&self.cookie) else {
+ return Ok(true);
+ };
+ let freeze = freeze_entry.get_mut();
+
+ if freeze.num_cleared_duplicates > 0 {
+ freeze.num_cleared_duplicates -= 1;
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ return Ok(true);
+ }
+
+ if freeze.is_pending {
+ return Ok(true);
+ }
+ if freeze.is_clearing {
+ kernel::warn_on!(freeze.num_cleared_duplicates != 0);
+ if freeze.num_pending_duplicates > 0 {
+ // The primary freeze listener was deleted, so convert a pending duplicate back
+ // into the primary one.
+ freeze.num_pending_duplicates -= 1;
+ freeze.is_pending = true;
+ freeze.is_clearing = true;
+ } else {
+ _removed_listener = freeze_entry.remove_node();
+ }
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ Ok(true)
+ } else {
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.last_is_frozen == Some(is_frozen) {
+ return Ok(true);
+ }
+
+ let mut state_info = BinderFrozenStateInfo::default();
+ state_info.is_frozen = is_frozen as u32;
+ state_info.cookie = freeze.cookie.0;
+ freeze.is_pending = true;
+ freeze.last_is_frozen = Some(is_frozen);
+ drop(node_refs);
+
+ writer.write_code(BR_FROZEN_BINDER)?;
+ writer.write_payload(&state_info)?;
+ // BR_FROZEN_BINDER notifications can cause transactions
+ Ok(false)
+ }
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}has frozen binder\n", prefix);
+ Ok(())
+ }
+}
+
+impl FreezeListener {
+ pub(crate) fn on_process_exit(&self, proc: &Arc<Process>) {
+ if !self.is_clearing {
+ self.node.remove_freeze_listener(proc);
+ }
+ }
+}
+
+impl Process {
+ pub(crate) fn request_freeze_notif(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ ) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let msg = FreezeMessage::new(GFP_KERNEL)?;
+ let alloc = RBTreeNodeReservation::new(GFP_KERNEL)?;
+
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ if info.freeze().is_some() {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION already set\n");
+ return Err(EINVAL);
+ }
+ let node_ref = info.node_ref();
+ let freeze_entry = node_refs.freeze_listeners.entry(cookie);
+
+ if let rbtree::Entry::Occupied(ref dupe) = freeze_entry {
+ if !dupe.get().allow_duplicate(&node_ref.node) {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION duplicate cookie\n");
+ return Err(EINVAL);
+ }
+ }
+
+ // All failure paths must come before this call, and all modifications must come after this
+ // call.
+ node_ref.node.add_freeze_listener(self, GFP_KERNEL)?;
+
+ match freeze_entry {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(
+ FreezeListener {
+ cookie,
+ node: node_ref.node.clone(),
+ last_is_frozen: None,
+ is_pending: false,
+ is_clearing: false,
+ num_pending_duplicates: 0,
+ num_cleared_duplicates: 0,
+ },
+ alloc,
+ );
+ }
+ rbtree::Entry::Occupied(mut dupe) => {
+ let dupe = dupe.get_mut();
+ if dupe.is_pending {
+ dupe.num_pending_duplicates += 1;
+ } else {
+ dupe.num_cleared_duplicates += 1;
+ }
+ dupe.last_is_frozen = None;
+ dupe.is_pending = false;
+ dupe.is_clearing = false;
+ }
+ }
+
+ *info.freeze() = Some(cookie);
+ let msg = FreezeMessage::init(msg, cookie);
+ drop(node_refs_guard);
+ let _ = self.push_work(msg);
+ Ok(())
+ }
+
+ pub(crate) fn freeze_notif_done(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let cookie = FreezeCookie(reader.read()?);
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(freeze) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_FREEZE_NOTIFICATION_DONE {:016x} not found\n", cookie.0);
+ return Err(EINVAL);
+ };
+ let mut clear_msg = None;
+ if freeze.num_pending_duplicates > 0 {
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ freeze.num_pending_duplicates -= 1;
+ freeze.num_cleared_duplicates += 1;
+ } else {
+ if !freeze.is_pending {
+ pr_warn!(
+ "BC_FREEZE_NOTIFICATION_DONE {:016x} not pending\n",
+ cookie.0
+ );
+ return Err(EINVAL);
+ }
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen();
+ if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) {
+ // Immediately send another FreezeMessage.
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ freeze.is_pending = false;
+ }
+ drop(node_refs_guard);
+ if let Some(clear_msg) = clear_msg {
+ let _ = self.push_work(clear_msg);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_freeze_notif(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ let Some(info_cookie) = info.freeze() else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n");
+ return Err(EINVAL);
+ };
+ if *info_cookie != cookie {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch\n");
+ return Err(EINVAL);
+ }
+ let Some(listener) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid cookie {}\n", handle);
+ return Err(EINVAL);
+ };
+ listener.is_clearing = true;
+ listener.node.remove_freeze_listener(self);
+ *info.freeze() = None;
+ let mut msg = None;
+ if !listener.is_pending {
+ msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ drop(node_refs_guard);
+
+ if let Some(msg) = msg {
+ let _ = self.push_work(msg);
+ }
+ Ok(())
+ }
+
+ fn get_freeze_cookie(&self, node: &DArc<Node>) -> Option<FreezeCookie> {
+ let node_refs = &mut *self.node_refs.lock();
+ let handle = node_refs.by_node.get(&node.global_id())?;
+ let node_ref = node_refs.by_handle.get_mut(handle)?;
+ *node_ref.freeze()
+ }
+
+ /// Creates a vector of every freeze listener on this process.
+ ///
+ /// Returns pairs of the remote process listening for notifications and the local node it is
+ /// listening on.
+ #[expect(clippy::type_complexity)]
+ fn find_freeze_recipients(&self) -> Result<KVVec<(DArc<Node>, Arc<Process>)>, AllocError> {
+ // Defined before `inner` to drop after releasing spinlock if `push_within_capacity` fails.
+ let mut node_proc_pair;
+
+ // We pre-allocate space for up to 8 recipients before we take the spinlock. However, if
+ // the allocation fails, use a vector with a capacity of zero instead of failing. After
+ // all, there might not be any freeze listeners, in which case this operation could still
+ // succeed.
+ let mut recipients =
+ KVVec::with_capacity(8, GFP_KERNEL).unwrap_or_else(|_err| KVVec::new());
+
+ let mut inner = self.lock_with_nodes();
+ let mut curr = inner.nodes.cursor_front_mut();
+ while let Some(cursor) = curr {
+ let (key, node) = cursor.current();
+ let key = *key;
+ let list = node.freeze_list(&inner.inner);
+ let len = list.len();
+
+ if recipients.spare_capacity_mut().len() < len {
+ drop(inner);
+ recipients.reserve(len, GFP_KERNEL)?;
+ inner = self.lock_with_nodes();
+ // Find the node we were looking at and try again. If the set of nodes was changed,
+ // then just proceed to the next node. This is ok because we don't guarantee the
+ // inclusion of nodes that are added or removed in parallel with this operation.
+ curr = inner.nodes.cursor_lower_bound_mut(&key);
+ continue;
+ }
+
+ for proc in list {
+ node_proc_pair = (node.clone(), proc.clone());
+ recipients
+ .push_within_capacity(node_proc_pair)
+ .map_err(|_| {
+ pr_err!(
+ "push_within_capacity failed even though we checked the capacity\n"
+ );
+ AllocError
+ })?;
+ }
+
+ curr = cursor.move_next();
+ }
+ Ok(recipients)
+ }
+
+ /// Prepare allocations for sending freeze messages.
+ pub(crate) fn prepare_freeze_messages(&self) -> Result<FreezeMessages, AllocError> {
+ let recipients = self.find_freeze_recipients()?;
+ let mut batch = KVVec::with_capacity(recipients.len(), GFP_KERNEL)?;
+ for (node, proc) in recipients {
+ let Some(cookie) = proc.get_freeze_cookie(&node) else {
+ // If the freeze listener was removed in the meantime, just discard the
+ // notification.
+ continue;
+ };
+ let msg_alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let msg = FreezeMessage::init(msg_alloc, cookie);
+ batch.push((proc, msg), GFP_KERNEL)?;
+ }
+
+ Ok(FreezeMessages { batch })
+ }
+}
+
+pub(crate) struct FreezeMessages {
+ batch: KVVec<(Arc<Process>, DLArc<FreezeMessage>)>,
+}
+
+impl FreezeMessages {
+ pub(crate) fn send_messages(self) {
+ for (proc, msg) in self.batch {
+ let _ = proc.push_work(msg);
+ }
+ }
+}
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
new file mode 100644
index 000000000000..c26d113ede96
--- /dev/null
+++ b/drivers/android/binder/node.rs
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::lock::{spinlock::SpinLockBackend, Guard},
+ sync::{Arc, LockedBy, SpinLock},
+};
+
+use crate::{
+ defs::*,
+ error::BinderError,
+ process::{NodeRefInfo, Process, ProcessInner},
+ thread::Thread,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+use core::mem;
+
+mod wrapper;
+pub(crate) use self::wrapper::CritIncrWrapper;
+
+#[derive(Debug)]
+pub(crate) struct CouldNotDeliverCriticalIncrement;
+
+/// Keeps track of how this node is scheduled.
+///
+/// There are two ways to schedule a node to a work list. Just schedule the node itself, or
+/// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
+/// make it possible to "move" a node from one list to another - when `do_work` is called directly
+/// on the `Node`, then it's a no-op if there's also a pending wrapper.
+///
+/// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
+/// of this: weak increments and strong increments. We call such increments "critical" because it
+/// is critical that they are delivered to the thread doing the increment. Some examples:
+///
+/// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
+/// increment. Delivering the node to the thread doing the weak increment is wrong, since the
+/// thread doing the strong increment may have ended a long time ago when the command is actually
+/// processed by userspace.
+///
+/// * We have a weak reference and are about to drop it on one thread. But then another thread does
+/// a zero-to-one strong increment. If the strong increment gets sent to the thread that was
+/// about to drop the weak reference, then the strong increment could be processed after the
+/// other thread has already exited, which would be too late.
+///
+/// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
+/// set. This is because another thread might just have popped the node from a todo list, but not
+/// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
+/// always succeed.
+///
+/// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
+struct DeliveryState {
+ /// Is the `Node` currently scheduled?
+ has_pushed_node: bool,
+
+ /// Is a wrapper currently scheduled?
+ ///
+ /// The wrapper is used only for strong zero2one increments.
+ has_pushed_wrapper: bool,
+
+ /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
+ ///
+ /// Weak zero2one operations are always scheduled using the `Node`.
+ has_weak_zero2one: bool,
+
+ /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
+ ///
+ /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
+ /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
+ has_strong_zero2one: bool,
+}
+
+impl DeliveryState {
+ fn should_normal_push(&self) -> bool {
+ !self.has_pushed_node && !self.has_pushed_wrapper
+ }
+
+ fn did_normal_push(&mut self) {
+ assert!(self.should_normal_push());
+ self.has_pushed_node = true;
+ }
+
+ fn should_push_weak_zero2one(&self) -> bool {
+ !self.has_weak_zero2one && !self.has_strong_zero2one
+ }
+
+ fn can_push_weak_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_weak_zero2one(&mut self) {
+ assert!(self.should_push_weak_zero2one());
+ assert!(self.can_push_weak_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_weak_zero2one = true;
+ }
+
+ fn should_push_strong_zero2one(&self) -> bool {
+ !self.has_strong_zero2one
+ }
+
+ fn can_push_strong_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_strong_zero2one(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(self.can_push_strong_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_strong_zero2one = true;
+ }
+
+ fn did_push_strong_zero2one_wrapper(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(!self.can_push_strong_zero2one_normally());
+ self.has_pushed_wrapper = true;
+ self.has_strong_zero2one = true;
+ }
+}
+
+struct CountState {
+ /// The reference count.
+ count: usize,
+ /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
+ /// even if count is greater than one, we only increment it once in the owning process.)
+ has_count: bool,
+}
+
+impl CountState {
+ fn new() -> Self {
+ Self {
+ count: 0,
+ has_count: false,
+ }
+ }
+}
+
+struct NodeInner {
+ /// Strong refcounts held on this node by `NodeRef` objects.
+ strong: CountState,
+ /// Weak refcounts held on this node by `NodeRef` objects.
+ weak: CountState,
+ delivery_state: DeliveryState,
+ /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
+ /// that is, userspace will not be given the next one until it has finished processing the
+ /// previous oneway transaction. This is done to avoid the case where two oneway transactions
+ /// arrive in opposite order from the order in which they were sent. (E.g., they could be
+ /// delivered to two different threads, which could appear as-if they were sent in opposite
+ /// order.)
+ ///
+ /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
+ /// deliver the next oneway transaction until userspace signals that it has finished processing
+ /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
+ oneway_todo: List<DTRWrap<Transaction>>,
+ /// Keeps track of whether this node has a pending oneway transaction.
+ ///
+ /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
+ /// being delivered directly to the process.
+ has_oneway_transaction: bool,
+ /// List of processes to deliver a notification to when this node is destroyed (usually due to
+ /// the process dying).
+ death_list: List<DTRWrap<NodeDeath>, 1>,
+ /// List of processes to deliver freeze notifications to.
+ freeze_list: KVVec<Arc<Process>>,
+ /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
+ ///
+ /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
+ /// active operations have ended. This avoids the situation an increment and decrement get
+ /// reordered from userspace's perspective.
+ active_inc_refs: u8,
+ /// List of `NodeRefInfo` objects that reference this node.
+ refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+}
+
+#[pin_data]
+pub(crate) struct Node {
+ pub(crate) debug_id: usize,
+ ptr: u64,
+ pub(crate) cookie: u64,
+ pub(crate) flags: u32,
+ pub(crate) owner: Arc<Process>,
+ inner: LockedBy<NodeInner, ProcessInner>,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Node {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+// Make `oneway_todo` work.
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<Transaction> {
+ using ListLinks { self.links.inner };
+ }
+}
+
+impl Node {
+ pub(crate) fn new(
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ owner: Arc<Process>,
+ ) -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner: LockedBy::new(
+ &owner.inner,
+ NodeInner {
+ strong: CountState::new(),
+ weak: CountState::new(),
+ delivery_state: DeliveryState {
+ has_pushed_node: false,
+ has_pushed_wrapper: false,
+ has_weak_zero2one: false,
+ has_strong_zero2one: false,
+ },
+ death_list: List::new(),
+ oneway_todo: List::new(),
+ freeze_list: KVVec::new(),
+ has_oneway_transaction: false,
+ active_inc_refs: 0,
+ refs: List::new(),
+ },
+ ),
+ debug_id: super::next_debug_id(),
+ ptr,
+ cookie,
+ flags,
+ owner,
+ links_track <- AtomicTracker::new(),
+ })
+ }
+
+ pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
+ let inner = self.inner.access_mut(owner_inner);
+ inner.has_oneway_transaction
+ }
+
+ #[inline(never)]
+ pub(crate) fn full_debug_print(
+ &self,
+ m: &SeqFile,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<()> {
+ let inner = self.inner.access_mut(owner_inner);
+ seq_print!(
+ m,
+ " node {}: u{:016x} c{:016x} hs {} hw {} cs {} cw {}",
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ inner.strong.has_count,
+ inner.weak.has_count,
+ inner.strong.count,
+ inner.weak.count,
+ );
+ if !inner.refs.is_empty() {
+ seq_print!(m, " proc");
+ for node_ref in &inner.refs {
+ seq_print!(m, " {}", node_ref.process.task.pid());
+ }
+ }
+ seq_print!(m, "\n");
+ for t in &inner.oneway_todo {
+ t.debug_print_inner(m, " pending async transaction ");
+ }
+ Ok(())
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn insert_node_info(
+ &self,
+ info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+ ) {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .push_front(info);
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn remove_node_info(
+ &self,
+ info: &NodeRefInfo,
+ ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
+ // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
+ // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
+ // the `refs` list of another node.
+ unsafe {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .remove(info)
+ }
+ }
+
+ /// An id that is unique across all binder nodes on the system. Used as the key in the
+ /// `by_node` map.
+ pub(crate) fn global_id(&self) -> usize {
+ self as *const Node as usize
+ }
+
+ pub(crate) fn get_id(&self) -> (u64, u64) {
+ (self.ptr, self.cookie)
+ }
+
+ pub(crate) fn add_death(
+ &self,
+ death: ListArc<DTRWrap<NodeDeath>, 1>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ self.inner.access_mut(guard).death_list.push_back(death);
+ }
+
+ pub(crate) fn inc_ref_done_locked(
+ self: &DArc<Node>,
+ _strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let inner = self.inner.access_mut(owner_inner);
+ if inner.active_inc_refs == 0 {
+ pr_err!("inc_ref_done called when no active inc_refs");
+ return None;
+ }
+
+ inner.active_inc_refs -= 1;
+ if inner.active_inc_refs == 0 {
+ // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
+ // would send a refcount decrement, and if so, tell the caller to schedule us.
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ let should_drop_weak = !weak && has_weak;
+ let should_drop_strong = !strong && has_strong;
+
+ // If we want to drop the ref-count again, tell the caller to schedule a work node for
+ // that.
+ let need_push = should_drop_weak || should_drop_strong;
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn update_refcount_locked(
+ self: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ let need_push = if inc {
+ state.count += count;
+ // TODO: This method shouldn't be used for zero-to-one increments.
+ !is_dead && !state.has_count
+ } else {
+ if state.count < count {
+ pr_err!("Failure: refcount underflow!");
+ return None;
+ }
+ state.count -= count;
+ !is_dead && state.count == 0 && state.has_count
+ };
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one(
+ self: &DArc<Self>,
+ strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ state.count += 1;
+ if is_dead || state.has_count {
+ return Ok(None);
+ }
+
+ // Userspace needs to be notified of this.
+ if !strong && inner.delivery_state.should_push_weak_zero2one() {
+ assert!(inner.delivery_state.can_push_weak_zero2one_normally());
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_weak_zero2one();
+ Ok(Some(list_arc))
+ } else if strong && inner.delivery_state.should_push_strong_zero2one() {
+ if inner.delivery_state.can_push_strong_zero2one_normally() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_strong_zero2one();
+ Ok(Some(list_arc))
+ } else {
+ state.count -= 1;
+ Err(CouldNotDeliverCriticalIncrement)
+ }
+ } else {
+ // Work is already pushed, and we don't need to push again.
+ Ok(None)
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
+ self: &DArc<Self>,
+ strong: bool,
+ wrapper: CritIncrWrapper,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<dyn DeliverToRead>> {
+ match self.incr_refcount_allow_zero2one(strong, owner_inner) {
+ Ok(Some(node)) => Some(node as _),
+ Ok(None) => None,
+ Err(CouldNotDeliverCriticalIncrement) => {
+ assert!(strong);
+ let inner = self.inner.access_mut(owner_inner);
+ inner.strong.count += 1;
+ inner.delivery_state.did_push_strong_zero2one_wrapper();
+ Some(wrapper.init(self.clone()))
+ }
+ }
+ }
+
+ pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
+ self.owner
+ .inner
+ .lock()
+ .update_node_refcount(self, inc, strong, count, None);
+ }
+
+ pub(crate) fn populate_counts(
+ &self,
+ out: &mut BinderNodeInfoForRef,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ let inner = self.inner.access(guard);
+ out.strong_count = inner.strong.count as _;
+ out.weak_count = inner.weak.count as _;
+ }
+
+ pub(crate) fn populate_debug_info(
+ &self,
+ out: &mut BinderNodeDebugInfo,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ out.ptr = self.ptr as _;
+ out.cookie = self.cookie as _;
+ let inner = self.inner.access(guard);
+ if inner.strong.has_count {
+ out.has_strong_ref = 1;
+ }
+ if inner.weak.has_count {
+ out.has_weak_ref = 1;
+ }
+ }
+
+ pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
+ let inner = self.inner.access_mut(guard);
+ inner.strong.has_count = true;
+ inner.weak.has_count = true;
+ }
+
+ fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
+ writer.write_code(code)?;
+ writer.write_payload(&self.ptr)?;
+ writer.write_payload(&self.cookie)?;
+ Ok(())
+ }
+
+ pub(crate) fn submit_oneway(
+ &self,
+ transaction: DLArc<Transaction>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ if guard.is_dead {
+ return Err((BinderError::new_dead(), transaction));
+ }
+
+ let inner = self.inner.access_mut(guard);
+ if inner.has_oneway_transaction {
+ inner.oneway_todo.push_back(transaction);
+ } else {
+ inner.has_oneway_transaction = true;
+ guard.push_work(transaction)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn release(&self) {
+ let mut guard = self.owner.inner.lock();
+ while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
+ drop(guard);
+ work.into_arc().cancel();
+ guard = self.owner.inner.lock();
+ }
+
+ while let Some(death) = self.inner.access_mut(&mut guard).death_list.pop_front() {
+ drop(guard);
+ death.into_arc().set_dead();
+ guard = self.owner.inner.lock();
+ }
+ }
+
+ pub(crate) fn pending_oneway_finished(&self) {
+ let mut guard = self.owner.inner.lock();
+ if guard.is_dead {
+ // Cleanup will happen in `Process::deferred_release`.
+ return;
+ }
+
+ let inner = self.inner.access_mut(&mut guard);
+
+ let transaction = inner.oneway_todo.pop_front();
+ inner.has_oneway_transaction = transaction.is_some();
+ if let Some(transaction) = transaction {
+ match guard.push_work(transaction) {
+ Ok(()) => {}
+ Err((_err, work)) => {
+ // Process is dead.
+ // This shouldn't happen due to the `is_dead` check, but if it does, just drop
+ // the transaction and return.
+ drop(guard);
+ drop(work);
+ }
+ }
+ }
+ }
+
+ /// Finds an outdated transaction that the given transaction can replace.
+ ///
+ /// If one is found, it is removed from the list and returned.
+ pub(crate) fn take_outdated_transaction(
+ &self,
+ new: &Transaction,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Option<DLArc<Transaction>> {
+ let inner = self.inner.access_mut(guard);
+ let mut cursor = inner.oneway_todo.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if new.can_replace(&next) {
+ return Some(next.remove());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ /// This is split into a separate function since it's called by both `Node::do_work` and
+ /// `NodeWrapper::do_work`.
+ fn do_work_locked(
+ &self,
+ writer: &mut BinderReturnWriter<'_>,
+ mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<bool> {
+ let inner = self.inner.access_mut(&mut guard);
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ if weak && !has_weak {
+ inner.weak.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ if strong && !has_strong {
+ inner.strong.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ let no_active_inc_refs = inner.active_inc_refs == 0;
+ let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
+ let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
+ if should_drop_weak {
+ inner.weak.has_count = false;
+ }
+ if should_drop_strong {
+ inner.strong.has_count = false;
+ }
+ if no_active_inc_refs && !weak {
+ // Remove the node if there are no references to it.
+ guard.remove_node(self.ptr);
+ }
+ drop(guard);
+
+ if weak && !has_weak {
+ self.write(writer, BR_INCREFS)?;
+ }
+ if strong && !has_strong {
+ self.write(writer, BR_ACQUIRE)?;
+ }
+ if should_drop_strong {
+ self.write(writer, BR_RELEASE)?;
+ }
+ if should_drop_weak {
+ self.write(writer, BR_DECREFS)?;
+ }
+
+ Ok(true)
+ }
+
+ pub(crate) fn add_freeze_listener(
+ &self,
+ process: &Arc<Process>,
+ flags: kernel::alloc::Flags,
+ ) -> Result {
+ let mut vec_alloc = KVVec::<Arc<Process>>::new();
+ loop {
+ let mut guard = self.owner.inner.lock();
+ // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
+ // listener, no the target.
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ if len >= inner.freeze_list.capacity() {
+ if len >= vec_alloc.capacity() {
+ drop(guard);
+ vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
+ continue;
+ }
+ mem::swap(&mut inner.freeze_list, &mut vec_alloc);
+ for elem in vec_alloc.drain_all() {
+ inner.freeze_list.push_within_capacity(elem)?;
+ }
+ }
+ inner.freeze_list.push_within_capacity(process.clone())?;
+ return Ok(());
+ }
+ }
+
+ pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
+ let _unused_capacity;
+ let mut guard = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
+ if len == inner.freeze_list.len() {
+ pr_warn!(
+ "Could not remove freeze listener for {}\n",
+ p.pid_in_current_ns()
+ );
+ }
+ if inner.freeze_list.is_empty() {
+ _unused_capacity = mem::take(&mut inner.freeze_list);
+ }
+ }
+
+ pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
+ &self.inner.access(guard).freeze_list
+ }
+}
+
+impl DeliverToRead for Node {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let mut owner_inner = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut owner_inner);
+
+ assert!(inner.delivery_state.has_pushed_node);
+ if inner.delivery_state.has_pushed_wrapper {
+ // If the wrapper is scheduled, then we are either a normal push or weak zero2one
+ // increment, and the wrapper is a strong zero2one increment, so the wrapper always
+ // takes precedence over us.
+ assert!(inner.delivery_state.has_strong_zero2one);
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ return Ok(true);
+ }
+
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ inner.delivery_state.has_strong_zero2one = false;
+
+ self.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ );
+ Ok(())
+ }
+}
+
+/// Represents something that holds one or more ref-counts to a `Node`.
+///
+/// Whenever process A holds a refcount to a node owned by a different process B, then process A
+/// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
+/// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
+///
+/// This type is also used for some other cases. For example, a transaction allocation holds a
+/// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
+/// so that the destructor of the allocation will drop a refcount of the `Node`.
+pub(crate) struct NodeRef {
+ pub(crate) node: DArc<Node>,
+ /// How many times does this NodeRef hold a refcount on the Node?
+ strong_node_count: usize,
+ weak_node_count: usize,
+ /// How many times does userspace hold a refcount on this NodeRef?
+ strong_count: usize,
+ weak_count: usize,
+}
+
+impl NodeRef {
+ pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
+ Self {
+ node,
+ strong_node_count: strong_count,
+ weak_node_count: weak_count,
+ strong_count,
+ weak_count,
+ }
+ }
+
+ pub(crate) fn absorb(&mut self, mut other: Self) {
+ assert!(
+ Arc::ptr_eq(&self.node, &other.node),
+ "absorb called with differing nodes"
+ );
+ self.strong_node_count += other.strong_node_count;
+ self.weak_node_count += other.weak_node_count;
+ self.strong_count += other.strong_count;
+ self.weak_count += other.weak_count;
+ other.strong_count = 0;
+ other.weak_count = 0;
+ other.strong_node_count = 0;
+ other.weak_node_count = 0;
+
+ if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
+ let mut guard = self.node.owner.inner.lock();
+ let inner = self.node.inner.access_mut(&mut guard);
+
+ if self.strong_node_count >= 2 {
+ inner.strong.count -= self.strong_node_count - 1;
+ self.strong_node_count = 1;
+ assert_ne!(inner.strong.count, 0);
+ }
+ if self.weak_node_count >= 2 {
+ inner.weak.count -= self.weak_node_count - 1;
+ self.weak_node_count = 1;
+ assert_ne!(inner.weak.count, 0);
+ }
+ }
+ }
+
+ pub(crate) fn get_count(&self) -> (usize, usize) {
+ (self.strong_count, self.weak_count)
+ }
+
+ pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
+ if strong && self.strong_count == 0 {
+ return Err(EINVAL);
+ }
+ Ok(self
+ .node
+ .owner
+ .inner
+ .lock()
+ .new_node_ref(self.node.clone(), strong, None))
+ }
+
+ /// Updates (increments or decrements) the number of references held against the node. If the
+ /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
+ /// its `update_refcount` function called.
+ ///
+ /// Returns whether `self` should be removed (when both counts are zero).
+ pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
+ if strong && self.strong_count == 0 {
+ return false;
+ }
+ let (count, node_count, other_count) = if strong {
+ (
+ &mut self.strong_count,
+ &mut self.strong_node_count,
+ self.weak_count,
+ )
+ } else {
+ (
+ &mut self.weak_count,
+ &mut self.weak_node_count,
+ self.strong_count,
+ )
+ };
+ if inc {
+ if *count == 0 {
+ *node_count = 1;
+ self.node.update_refcount(true, 1, strong);
+ }
+ *count += 1;
+ } else {
+ if *count == 0 {
+ pr_warn!(
+ "pid {} performed invalid decrement on ref\n",
+ kernel::current!().pid()
+ );
+ return false;
+ }
+ *count -= 1;
+ if *count == 0 {
+ self.node.update_refcount(false, *node_count, strong);
+ *node_count = 0;
+ return other_count == 0;
+ }
+ }
+ false
+ }
+}
+
+impl Drop for NodeRef {
+ // This destructor is called conditionally from `Allocation::drop`. That branch is often
+ // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
+ #[inline(always)]
+ fn drop(&mut self) {
+ if self.strong_node_count > 0 {
+ self.node
+ .update_refcount(false, self.strong_node_count, true);
+ }
+ if self.weak_node_count > 0 {
+ self.node
+ .update_refcount(false, self.weak_node_count, false);
+ }
+ }
+}
+
+struct NodeDeathInner {
+ dead: bool,
+ cleared: bool,
+ notification_done: bool,
+ /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
+ /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
+ /// the user.
+ aborted: bool,
+}
+
+/// Used to deliver notifications when a process dies.
+///
+/// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
+/// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
+/// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
+/// once it has processed the notification.
+///
+/// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
+/// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
+/// notification has been removed. Note that if the remote process dies before the kernel has
+/// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
+/// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
+/// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
+///
+/// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
+/// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
+///
+/// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
+/// registration, then the death registration is immediately deleted (we implement this using the
+/// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
+/// deregistering death notifications, so this codepath is not executed under normal circumstances.
+#[pin_data]
+pub(crate) struct NodeDeath {
+ node: DArc<Node>,
+ process: Arc<Process>,
+ pub(crate) cookie: u64,
+ #[pin]
+ links_track: AtomicTracker<0>,
+ /// Used by the owner `Node` to store a list of registered death notifications.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `death_list` list of `self.node`.
+ #[pin]
+ death_links: ListLinks<1>,
+ /// Used by the process to keep track of the death notifications for which we have sent a
+ /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `delivered_deaths` list of `self.process`.
+ #[pin]
+ delivered_links: ListLinks<2>,
+ #[pin]
+ delivered_links_track: AtomicTracker<2>,
+ #[pin]
+ inner: SpinLock<NodeDeathInner>,
+}
+
+impl NodeDeath {
+ /// Constructs a new node death notification object.
+ pub(crate) fn new(
+ node: DArc<Node>,
+ process: Arc<Process>,
+ cookie: u64,
+ ) -> impl PinInit<DTRWrap<Self>> {
+ DTRWrap::new(pin_init!(
+ Self {
+ node,
+ process,
+ cookie,
+ links_track <- AtomicTracker::new(),
+ death_links <- ListLinks::new(),
+ delivered_links <- ListLinks::new(),
+ delivered_links_track <- AtomicTracker::new(),
+ inner <- kernel::new_spinlock!(NodeDeathInner {
+ dead: false,
+ cleared: false,
+ notification_done: false,
+ aborted: false,
+ }, "NodeDeath::inner"),
+ }
+ ))
+ }
+
+ /// Sets the cleared flag to `true`.
+ ///
+ /// It removes `self` from the node's death notification list if needed.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
+ let (needs_removal, needs_queueing) = {
+ // Update state and determine if we need to queue a work item. We only need to do it
+ // when the node is not dead or if the user already completed the death notification.
+ let mut inner = self.inner.lock();
+ if abort {
+ inner.aborted = true;
+ }
+ if inner.cleared {
+ // Already cleared.
+ return false;
+ }
+ inner.cleared = true;
+ (!inner.dead, !inner.dead || inner.notification_done)
+ };
+
+ // Remove death notification from node.
+ if needs_removal {
+ let mut owner_inner = self.node.owner.inner.lock();
+ let node_inner = self.node.inner.access_mut(&mut owner_inner);
+ // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
+ // its owner, so it is either in this death list or in no death list.
+ unsafe { node_inner.death_list.remove(self) };
+ }
+ needs_queueing
+ }
+
+ /// Sets the 'notification done' flag to `true`.
+ pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ inner.notification_done = true;
+ inner.cleared
+ };
+ if needs_queueing {
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+ }
+
+ /// Sets the 'dead' flag to `true` and queues work item if needed.
+ pub(crate) fn set_dead(self: DArc<Self>) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ if inner.cleared {
+ false
+ } else {
+ inner.dead = true;
+ true
+ }
+ };
+ if needs_queueing {
+ // Push the death notification to the target process. There is nothing else to do if
+ // it's already dead.
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let process = death.process.clone();
+ let _ = process.push_work(death);
+ }
+ }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeDeath {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<1> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.death_links };
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for DTRWrap<NodeDeath> {
+ tracked_by wrapped: NodeDeath;
+ }
+}
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for NodeDeath {
+ tracked_by delivered_links_track: AtomicTracker<2>;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<2> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.delivered_links };
+ }
+}
+
+impl DeliverToRead for NodeDeath {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let done = {
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ inner.cleared && (!inner.dead || inner.notification_done)
+ };
+
+ let cookie = self.cookie;
+ let cmd = if done {
+ BR_CLEAR_DEATH_NOTIFICATION_DONE
+ } else {
+ let process = self.process.clone();
+ let mut process_inner = process.inner.lock();
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ // We're still holding the inner lock, so it cannot be aborted while we insert it into
+ // the delivered list.
+ process_inner.death_delivered(self.clone());
+ BR_DEAD_BINDER
+ };
+
+ writer.write_code(cmd)?;
+ writer.write_payload(&cookie)?;
+ // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
+ // get to a death notification.
+ Ok(cmd != BR_DEAD_BINDER)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ let inner = self.inner.lock();
+
+ let dead_binder = inner.dead && !inner.notification_done;
+
+ if dead_binder {
+ if inner.cleared {
+ seq_print!(m, "{}has cleared dead binder\n", prefix);
+ } else {
+ seq_print!(m, "{}has dead binder\n", prefix);
+ }
+ } else {
+ seq_print!(m, "{}has cleared death notification\n", prefix);
+ }
+
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/node/wrapper.rs b/drivers/android/binder/node/wrapper.rs
new file mode 100644
index 000000000000..43294c050502
--- /dev/null
+++ b/drivers/android/binder/node/wrapper.rs
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{list::ListArc, prelude::*, seq_file::SeqFile, seq_print, sync::UniqueArc};
+
+use crate::{node::Node, thread::Thread, BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead};
+
+use core::mem::MaybeUninit;
+
+pub(crate) struct CritIncrWrapper {
+ inner: UniqueArc<MaybeUninit<DTRWrap<NodeWrapper>>>,
+}
+
+impl CritIncrWrapper {
+ pub(crate) fn new() -> Result<Self> {
+ Ok(CritIncrWrapper {
+ inner: UniqueArc::new_uninit(GFP_KERNEL)?,
+ })
+ }
+
+ pub(super) fn init(self, node: DArc<Node>) -> DLArc<dyn DeliverToRead> {
+ match self.inner.pin_init_with(DTRWrap::new(NodeWrapper { node })) {
+ Ok(initialized) => ListArc::from(initialized) as _,
+ Err(err) => match err {},
+ }
+ }
+}
+
+struct NodeWrapper {
+ node: DArc<Node>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeWrapper {
+ untracked;
+ }
+}
+
+impl DeliverToRead for NodeWrapper {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let node = &self.node;
+ let mut owner_inner = node.owner.inner.lock();
+ let inner = node.inner.access_mut(&mut owner_inner);
+
+ let ds = &mut inner.delivery_state;
+
+ assert!(ds.has_pushed_wrapper);
+ assert!(ds.has_strong_zero2one);
+ ds.has_pushed_wrapper = false;
+ ds.has_strong_zero2one = false;
+
+ node.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.node.debug_id,
+ self.node.ptr,
+ self.node.cookie,
+ );
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
new file mode 100644
index 000000000000..9379038f61f5
--- /dev/null
+++ b/drivers/android/binder/page_range.rs
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module has utilities for managing a page range where unused pages may be reclaimed by a
+//! vma shrinker.
+
+// To avoid deadlocks, locks are taken in the order:
+//
+// 1. mmap lock
+// 2. spinlock
+// 3. lru spinlock
+//
+// The shrinker will use trylock methods because it locks them in a different order.
+
+use core::{
+ marker::PhantomPinned,
+ mem::{size_of, size_of_val, MaybeUninit},
+ ptr,
+};
+
+use kernel::{
+ bindings,
+ error::Result,
+ ffi::{c_ulong, c_void},
+ mm::{virt, Mm, MmWithUser},
+ new_mutex, new_spinlock,
+ page::{Page, PAGE_SHIFT, PAGE_SIZE},
+ prelude::*,
+ str::CStr,
+ sync::{aref::ARef, Mutex, SpinLock},
+ task::Pid,
+ transmute::FromBytes,
+ types::Opaque,
+ uaccess::UserSliceReader,
+};
+
+/// Represents a shrinker that can be registered with the kernel.
+///
+/// Each shrinker can be used by many `ShrinkablePageRange` objects.
+#[repr(C)]
+pub(crate) struct Shrinker {
+ inner: Opaque<*mut bindings::shrinker>,
+ list_lru: Opaque<bindings::list_lru>,
+}
+
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Send for Shrinker {}
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Sync for Shrinker {}
+
+impl Shrinker {
+ /// Create a new shrinker.
+ ///
+ /// # Safety
+ ///
+ /// Before using this shrinker with a `ShrinkablePageRange`, the `register` method must have
+ /// been called exactly once, and it must not have returned an error.
+ pub(crate) const unsafe fn new() -> Self {
+ Self {
+ inner: Opaque::uninit(),
+ list_lru: Opaque::uninit(),
+ }
+ }
+
+ /// Register this shrinker with the kernel.
+ pub(crate) fn register(&'static self, name: &CStr) -> Result<()> {
+ // SAFETY: These fields are not yet used, so it's okay to zero them.
+ unsafe {
+ self.inner.get().write(ptr::null_mut());
+ self.list_lru.get().write_bytes(0, 1);
+ }
+
+ // SAFETY: The field is not yet used, so we can initialize it.
+ let ret = unsafe { bindings::__list_lru_init(self.list_lru.get(), false, ptr::null_mut()) };
+ if ret != 0 {
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: The `name` points at a valid c string.
+ let shrinker = unsafe { bindings::shrinker_alloc(0, name.as_char_ptr()) };
+ if shrinker.is_null() {
+ // SAFETY: We initialized it, so its okay to destroy it.
+ unsafe { bindings::list_lru_destroy(self.list_lru.get()) };
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: We're about to register the shrinker, and these are the fields we need to
+ // initialize. (All other fields are already zeroed.)
+ unsafe {
+ (&raw mut (*shrinker).count_objects).write(Some(rust_shrink_count));
+ (&raw mut (*shrinker).scan_objects).write(Some(rust_shrink_scan));
+ (&raw mut (*shrinker).private_data).write(self.list_lru.get().cast());
+ }
+
+ // SAFETY: The new shrinker has been fully initialized, so we can register it.
+ unsafe { bindings::shrinker_register(shrinker) };
+
+ // SAFETY: This initializes the pointer to the shrinker so that we can use it.
+ unsafe { self.inner.get().write(shrinker) };
+
+ Ok(())
+ }
+}
+
+/// A container that manages a page range in a vma.
+///
+/// The pages can be thought of as an array of booleans of whether the pages are usable. The
+/// methods `use_range` and `stop_using_range` set all booleans in a range to true or false
+/// respectively. Initially, no pages are allocated. When a page is not used, it is not freed
+/// immediately. Instead, it is made available to the memory shrinker to free it if the device is
+/// under memory pressure.
+///
+/// It's okay for `use_range` and `stop_using_range` to race with each other, although there's no
+/// way to know whether an index ends up with true or false if a call to `use_range` races with
+/// another call to `stop_using_range` on a given index.
+///
+/// It's also okay for the two methods to race with themselves, e.g. if two threads call
+/// `use_range` on the same index, then that's fine and neither call will return until the page is
+/// allocated and mapped.
+///
+/// The methods that read or write to a range require that the page is marked as in use. So it is
+/// _not_ okay to call `stop_using_range` on a page that is in use by the methods that read or
+/// write to the page.
+#[pin_data(PinnedDrop)]
+pub(crate) struct ShrinkablePageRange {
+ /// Shrinker object registered with the kernel.
+ shrinker: &'static Shrinker,
+ /// Pid using this page range. Only used as debugging information.
+ pid: Pid,
+ /// The mm for the relevant process.
+ mm: ARef<Mm>,
+ /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.
+ #[pin]
+ mm_lock: Mutex<()>,
+ /// Spinlock protecting changes to pages.
+ #[pin]
+ lock: SpinLock<Inner>,
+
+ /// Must not move, since page info has pointers back.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+struct Inner {
+ /// Array of pages.
+ ///
+ /// Since this is also accessed by the shrinker, we can't use a `Box`, which asserts exclusive
+ /// ownership. To deal with that, we manage it using raw pointers.
+ pages: *mut PageInfo,
+ /// Length of the `pages` array.
+ size: usize,
+ /// The address of the vma to insert the pages into.
+ vma_addr: usize,
+}
+
+// SAFETY: proper locking is in place for `Inner`
+unsafe impl Send for Inner {}
+
+type StableMmGuard =
+ kernel::sync::lock::Guard<'static, (), kernel::sync::lock::mutex::MutexBackend>;
+
+/// An array element that describes the current state of a page.
+///
+/// There are three states:
+///
+/// * Free. The page is None. The `lru` element is not queued.
+/// * Available. The page is Some. The `lru` element is queued to the shrinker's lru.
+/// * Used. The page is Some. The `lru` element is not queued.
+///
+/// When an element is available, the shrinker is able to free the page.
+#[repr(C)]
+struct PageInfo {
+ lru: bindings::list_head,
+ page: Option<Page>,
+ range: *const ShrinkablePageRange,
+}
+
+impl PageInfo {
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok, and that the page is not currently set.
+ unsafe fn set_page(me: *mut PageInfo, page: Page) {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for writing, so also valid for reading.
+ if unsafe { (*ptr).is_some() } {
+ pr_err!("set_page called when there is already a page");
+ // SAFETY: We will initialize the page again below.
+ unsafe { ptr::drop_in_place(ptr) };
+ }
+
+ // SAFETY: The pointer is valid for writing.
+ unsafe { ptr::write(ptr, Some(page)) };
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that reading from `me.page` is ok for the duration of 'a.
+ unsafe fn get_page<'a>(me: *const PageInfo) -> Option<&'a Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw const (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).as_ref() }
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok for the duration of 'a.
+ unsafe fn take_page(me: *mut PageInfo) -> Option<Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).take() }
+ }
+
+ /// Add this page to the lru list, if not already in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_add(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_add(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+
+ /// Remove this page from the lru list, if it is in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_del(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_del(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+}
+
+impl ShrinkablePageRange {
+ /// Create a new `ShrinkablePageRange` using the given shrinker.
+ pub(crate) fn new(shrinker: &'static Shrinker) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ shrinker,
+ pid: kernel::current!().pid(),
+ mm: ARef::from(&**kernel::current!().mm().ok_or(ESRCH)?),
+ mm_lock <- new_mutex!((), "ShrinkablePageRange::mm"),
+ lock <- new_spinlock!(Inner {
+ pages: ptr::null_mut(),
+ size: 0,
+ vma_addr: 0,
+ }, "ShrinkablePageRange"),
+ _pin: PhantomPinned,
+ })
+ }
+
+ pub(crate) fn stable_trylock_mm(&self) -> Option<StableMmGuard> {
+ // SAFETY: This extends the duration of the reference. Since this call happens before
+ // `mm_lock` is taken in the destructor of `ShrinkablePageRange`, the destructor will block
+ // until the returned guard is dropped. This ensures that the guard is valid until dropped.
+ let mm_lock = unsafe { &*ptr::from_ref(&self.mm_lock) };
+
+ mm_lock.try_lock()
+ }
+
+ /// Register a vma with this page range. Returns the size of the region.
+ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {
+ let num_bytes = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let num_pages = num_bytes >> PAGE_SHIFT;
+
+ if !ptr::eq::<Mm>(&*self.mm, &**vma.mm()) {
+ pr_debug!("Failed to register with vma: invalid vma->vm_mm");
+ return Err(EINVAL);
+ }
+ if num_pages == 0 {
+ pr_debug!("Failed to register with vma: size zero");
+ return Err(EINVAL);
+ }
+
+ let mut pages = KVVec::<PageInfo>::with_capacity(num_pages, GFP_KERNEL)?;
+
+ // SAFETY: This just initializes the pages array.
+ unsafe {
+ let self_ptr = self as *const ShrinkablePageRange;
+ for i in 0..num_pages {
+ let info = pages.as_mut_ptr().add(i);
+ (&raw mut (*info).range).write(self_ptr);
+ (&raw mut (*info).page).write(None);
+ let lru = &raw mut (*info).lru;
+ (&raw mut (*lru).next).write(lru);
+ (&raw mut (*lru).prev).write(lru);
+ }
+ }
+
+ let mut inner = self.lock.lock();
+ if inner.size > 0 {
+ pr_debug!("Failed to register with vma: already registered");
+ drop(inner);
+ return Err(EBUSY);
+ }
+
+ inner.pages = pages.into_raw_parts().0;
+ inner.size = num_pages;
+ inner.vma_addr = vma.start();
+
+ Ok(num_pages)
+ }
+
+ /// Make sure that the given pages are allocated and mapped.
+ ///
+ /// Must not be called from an atomic context.
+ pub(crate) fn use_range(&self, start: usize, end: usize) -> Result<()> {
+ if start >= end {
+ return Ok(());
+ }
+ let mut inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in start..end {
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // Since we're going to use the page, we should remove it from the lru list so that
+ // the shrinker will not free it.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ } else {
+ // We have to allocate a new page. Use the slow path.
+ drop(inner);
+ // SAFETY: `i < end <= inner.size` so `i` is in bounds.
+ match unsafe { self.use_page_slow(i) } {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("Error in use_page_slow: {:?}", err);
+ return Err(err);
+ }
+ }
+ inner = self.lock.lock();
+ }
+ }
+ Ok(())
+ }
+
+ /// Mark the given page as in use, slow path.
+ ///
+ /// Must not be called from an atomic context.
+ ///
+ /// # Safety
+ ///
+ /// Assumes that `i` is in bounds.
+ #[cold]
+ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
+ let new_page = Page::alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)?;
+
+ let mm_mutex = self.mm_lock.lock();
+ let inner = self.lock.lock();
+
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // The page was already there, or someone else added the page while we didn't hold the
+ // spinlock.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ return Ok(());
+ }
+
+ let vma_addr = inner.vma_addr;
+ // Release the spinlock while we insert the page into the vma.
+ drop(inner);
+
+ // No overflow since we stay in bounds of the vma.
+ let user_page_addr = vma_addr + (i << PAGE_SHIFT);
+
+ // We use `mmput_async` when dropping the `mm` because `use_page_slow` is usually used from
+ // a remote process. If the call to `mmput` races with the process shutting down, then the
+ // caller of `use_page_slow` becomes responsible for cleaning up the `mm`, which doesn't
+ // happen until it returns to userspace. However, the caller might instead go to sleep and
+ // wait for the owner of the `mm` to wake it up, which doesn't happen because it's in the
+ // middle of a shutdown process that won't complete until the `mm` is dropped. This can
+ // amount to a deadlock.
+ //
+ // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
+ // workqueue.
+ MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
+ .mmap_read_lock()
+ .vma_lookup(vma_addr)
+ .ok_or(ESRCH)?
+ .as_mixedmap_vma()
+ .ok_or(ESRCH)?
+ .vm_insert_page(user_page_addr, &new_page)
+ .inspect_err(|err| {
+ pr_warn!(
+ "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
+ user_page_addr,
+ vma_addr,
+ i,
+ err
+ )
+ })?;
+
+ let inner = self.lock.lock();
+
+ // SAFETY: The `page_info` pointer is valid and currently does not have a page. The page
+ // can be written to since we hold the lock.
+ //
+ // We released and reacquired the spinlock since we checked that the page is null, but we
+ // always hold the mm_lock mutex when setting the page to a non-null value, so it's not
+ // possible for someone else to have changed it since our check.
+ unsafe { PageInfo::set_page(page_info, new_page) };
+
+ drop(inner);
+ drop(mm_mutex);
+
+ Ok(())
+ }
+
+ /// If the given page is in use, then mark it as available so that the shrinker can free it.
+ ///
+ /// May be called from an atomic context.
+ pub(crate) fn stop_using_range(&self, start: usize, end: usize) {
+ if start >= end {
+ return;
+ }
+ let inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in (start..end).rev() {
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: Okay for reading since we have the lock.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // SAFETY: The pointer is valid, and it's the right shrinker.
+ unsafe { PageInfo::list_lru_add(page_info, page.nid(), self.shrinker) };
+ }
+ }
+ }
+
+ /// Helper for reading or writing to a range of bytes that may overlap with several pages.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ unsafe fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
+ where
+ T: FnMut(&Page, usize, usize) -> Result,
+ {
+ if size == 0 {
+ return Ok(());
+ }
+
+ let (pages, num_pages) = {
+ let inner = self.lock.lock();
+ (inner.pages, inner.size)
+ };
+ let num_bytes = num_pages << PAGE_SHIFT;
+
+ // Check that the request is within the buffer.
+ if offset.checked_add(size).ok_or(EFAULT)? > num_bytes {
+ return Err(EFAULT);
+ }
+
+ let mut page_index = offset >> PAGE_SHIFT;
+ offset &= PAGE_SIZE - 1;
+ while size > 0 {
+ let available = usize::min(size, PAGE_SIZE - offset);
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { pages.add(page_index) };
+ // SAFETY: The caller guarantees that this page is in the "in use" state for the
+ // duration of this call to `iterate`, so nobody will change the page.
+ let page = unsafe { PageInfo::get_page(page_info) };
+ if page.is_none() {
+ pr_warn!("Page is null!");
+ }
+ let page = page.ok_or(EFAULT)?;
+ cb(page, offset, available)?;
+ size -= available;
+ page_index += 1;
+ offset = 0;
+ }
+ Ok(())
+ }
+
+ /// Copy from userspace into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn copy_from_user_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_from_user_slice`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, to_copy| {
+ page.copy_from_user_slice_raw(reader, offset, to_copy)
+ })
+ }
+ }
+
+ /// Copy from this page range into kernel space.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ let mut out_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `read`.
+ unsafe {
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (out.as_mut_ptr() as *mut u8).add(out_offset);
+ // SAFETY: The pointer points is in-bounds of the `out` variable, so it is valid.
+ page.read_raw(obj_ptr, offset, to_copy)?;
+ out_offset += to_copy;
+ Ok(())
+ })?;
+ }
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Copy from kernel space into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ let mut obj_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `write`.
+ unsafe {
+ self.iterate(offset, size_of_val(obj), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (obj as *const T as *const u8).add(obj_offset);
+ // SAFETY: We have a reference to the object, so the pointer is valid.
+ page.write_raw(obj_ptr, offset, to_copy)?;
+ obj_offset += to_copy;
+ Ok(())
+ })
+ }
+ }
+
+ /// Write zeroes to the given range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn fill_zero(&self, offset: usize, size: usize) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_into`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, len| {
+ page.fill_zero_raw(offset, len)
+ })
+ }
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for ShrinkablePageRange {
+ fn drop(self: Pin<&mut Self>) {
+ let (pages, size) = {
+ let lock = self.lock.lock();
+ (lock.pages, lock.size)
+ };
+
+ if size == 0 {
+ return;
+ }
+
+ // Note: This call is also necessary for the safety of `stable_trylock_mm`.
+ let mm_lock = self.mm_lock.lock();
+
+ // This is the destructor, so unlike the other methods, we only need to worry about races
+ // with the shrinker here. Since we hold the `mm_lock`, we also can't race with the
+ // shrinker, and after this loop, the shrinker will not access any of our pages since we
+ // removed them from the lru list.
+ for i in 0..size {
+ // SAFETY: Loop is in-bounds of the size.
+ let p_ptr = unsafe { pages.add(i) };
+ // SAFETY: No other readers, so we can read.
+ if let Some(p) = unsafe { PageInfo::get_page(p_ptr) } {
+ // SAFETY: The pointer is valid and it's the right shrinker.
+ unsafe { PageInfo::list_lru_del(p_ptr, p.nid(), self.shrinker) };
+ }
+ }
+
+ drop(mm_lock);
+
+ // SAFETY: `pages` was allocated as an `KVVec<PageInfo>` with capacity `size`. Furthermore,
+ // all `size` elements are initialized. Also, the array is no longer shared with the
+ // shrinker due to the above loop.
+ drop(unsafe { KVVec::from_raw_parts(pages, size, size) });
+ }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_count(
+ shrink: *mut bindings::shrinker,
+ _sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe { bindings::list_lru_count(list_lru) }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_scan(
+ shrink: *mut bindings::shrinker,
+ sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Caller guarantees that it is safe to read this field.
+ let nr_to_scan = unsafe { (*sc).nr_to_scan };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe {
+ bindings::list_lru_walk(
+ list_lru,
+ Some(bindings::rust_shrink_free_page_wrap),
+ ptr::null_mut(),
+ nr_to_scan,
+ )
+ }
+}
+
+const LRU_SKIP: bindings::lru_status = bindings::lru_status_LRU_SKIP;
+const LRU_REMOVED_ENTRY: bindings::lru_status = bindings::lru_status_LRU_REMOVED_RETRY;
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_free_page(
+ item: *mut bindings::list_head,
+ lru: *mut bindings::list_lru_one,
+ _cb_arg: *mut c_void,
+) -> bindings::lru_status {
+ // Fields that should survive after unlocking the lru lock.
+ let page;
+ let page_index;
+ let mm;
+ let mmap_read;
+ let mm_mutex;
+ let vma_addr;
+
+ {
+ // CAST: The `list_head` field is first in `PageInfo`.
+ let info = item as *mut PageInfo;
+ // SAFETY: The `range` field of `PageInfo` is immutable.
+ let range = unsafe { &*((*info).range) };
+
+ mm = match range.mm.mmget_not_zero() {
+ Some(mm) => MmWithUser::into_mmput_async(mm),
+ None => return LRU_SKIP,
+ };
+
+ mm_mutex = match range.stable_trylock_mm() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ mmap_read = match mm.mmap_read_trylock() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ // We can't lock it normally here, since we hold the lru lock.
+ let inner = match range.lock.try_lock() {
+ Some(inner) => inner,
+ None => return LRU_SKIP,
+ };
+
+ // SAFETY: The item is in this lru list, so it's okay to remove it.
+ unsafe { bindings::list_lru_isolate(lru, item) };
+
+ // SAFETY: Both pointers are in bounds of the same allocation.
+ page_index = unsafe { info.offset_from(inner.pages) } as usize;
+
+ // SAFETY: We hold the spinlock, so we can take the page.
+ //
+ // This sets the page pointer to zero before we unmap it from the vma. However, we call
+ // `zap_page_range` before we release the mmap lock, so `use_page_slow` will not be able to
+ // insert a new page until after our call to `zap_page_range`.
+ page = unsafe { PageInfo::take_page(info) };
+ vma_addr = inner.vma_addr;
+
+ // From this point on, we don't access this PageInfo or ShrinkablePageRange again, because
+ // they can be freed at any point after we unlock `lru_lock`. This is with the exception of
+ // `mm_mutex` which is kept alive by holding the lock.
+ }
+
+ // SAFETY: The lru lock is locked when this method is called.
+ unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
+
+ if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
+
+ drop(mmap_read);
+ drop(mm_mutex);
+ drop(mm);
+ drop(page);
+
+ // SAFETY: We just unlocked the lru lock, but it should be locked when we return.
+ unsafe { bindings::spin_lock(&raw mut (*lru).lock) };
+
+ LRU_REMOVED_ENTRY
+}
diff --git a/drivers/android/binder/page_range_helper.c b/drivers/android/binder/page_range_helper.c
new file mode 100644
index 000000000000..496887723ee0
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* C helper for page_range.rs to work around a CFI violation.
+ *
+ * Bindgen currently pretends that `enum lru_status` is the same as an integer.
+ * This assumption is fine ABI-wise, but once you add CFI to the mix, it
+ * triggers a CFI violation because `enum lru_status` gets a different CFI tag.
+ *
+ * This file contains a workaround until bindgen can be fixed.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+#include "page_range_helper.h"
+
+unsigned int rust_shrink_free_page(struct list_head *item,
+ struct list_lru_one *list,
+ void *cb_arg);
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg)
+{
+ return rust_shrink_free_page(item, list, cb_arg);
+}
diff --git a/drivers/android/binder/page_range_helper.h b/drivers/android/binder/page_range_helper.h
new file mode 100644
index 000000000000..18dd2dd117b2
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_PAGE_RANGE_HELPER_H
+#define _LINUX_PAGE_RANGE_HELPER_H
+
+#include <linux/list_lru.h>
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg);
+
+#endif /* _LINUX_PAGE_RANGE_HELPER_H */
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
new file mode 100644
index 000000000000..132055b4790f
--- /dev/null
+++ b/drivers/android/binder/process.rs
@@ -0,0 +1,1745 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Process` type, which represents a process using a particular binder
+//! context.
+//!
+//! The `Process` object keeps track of all of the resources that this process owns in the binder
+//! context.
+//!
+//! There is one `Process` object for each binder fd that a process has opened, so processes using
+//! several binder contexts have several `Process` objects. This ensures that the contexts are
+//! fully separated.
+
+use core::mem::take;
+
+use kernel::{
+ bindings,
+ cred::Credential,
+ error::Error,
+ fs::file::{self, File},
+ id_pool::IdPool,
+ list::{List, ListArc, ListArcField, ListLinks},
+ mm,
+ prelude::*,
+ rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::{
+ lock::{spinlock::SpinLockBackend, Guard},
+ Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
+ },
+ task::Task,
+ types::ARef,
+ uaccess::{UserSlice, UserSliceReader},
+ uapi,
+ workqueue::{self, Work},
+};
+
+use crate::{
+ allocation::{Allocation, AllocationInfo, NewAllocation},
+ context::Context,
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
+ page_range::ShrinkablePageRange,
+ range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
+ stats::BinderStats,
+ thread::{PushWorkRes, Thread},
+ BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[path = "freeze.rs"]
+mod freeze;
+use self::freeze::{FreezeCookie, FreezeListener};
+
+struct Mapping {
+ address: usize,
+ alloc: RangeAllocator<AllocationInfo>,
+}
+
+impl Mapping {
+ fn new(address: usize, size: usize) -> Self {
+ Self {
+ address,
+ alloc: RangeAllocator::new(size),
+ }
+ }
+}
+
+// bitflags for defer_work.
+const PROC_DEFER_FLUSH: u8 = 1;
+const PROC_DEFER_RELEASE: u8 = 2;
+
+#[derive(Copy, Clone)]
+pub(crate) enum IsFrozen {
+ Yes,
+ No,
+ InProgress,
+}
+
+impl IsFrozen {
+ /// Whether incoming transactions should be rejected due to freeze.
+ pub(crate) fn is_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => true,
+ }
+ }
+
+ /// Whether freeze notifications consider this process frozen.
+ pub(crate) fn is_fully_frozen(self) -> bool {
+ match self {
+ IsFrozen::Yes => true,
+ IsFrozen::No => false,
+ IsFrozen::InProgress => false,
+ }
+ }
+}
+
+/// The fields of `Process` protected by the spinlock.
+pub(crate) struct ProcessInner {
+ is_manager: bool,
+ pub(crate) is_dead: bool,
+ threads: RBTree<i32, Arc<Thread>>,
+ /// INVARIANT: Threads pushed to this list must be owned by this process.
+ ready_threads: List<Thread>,
+ nodes: RBTree<u64, DArc<Node>>,
+ mapping: Option<Mapping>,
+ work: List<DTRWrap<dyn DeliverToRead>>,
+ delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
+
+ /// The number of requested threads that haven't registered yet.
+ requested_thread_count: u32,
+ /// The maximum number of threads used by the process thread pool.
+ max_threads: u32,
+ /// The number of threads the started and registered with the thread pool.
+ started_thread_count: u32,
+
+ /// Bitmap of deferred work to do.
+ defer_work: u8,
+
+ /// Number of transactions to be transmitted before processes in freeze_wait
+ /// are woken up.
+ outstanding_txns: u32,
+ /// Process is frozen and unable to service binder transactions.
+ pub(crate) is_frozen: IsFrozen,
+ /// Process received sync transactions since last frozen.
+ pub(crate) sync_recv: bool,
+ /// Process received async transactions since last frozen.
+ pub(crate) async_recv: bool,
+ pub(crate) binderfs_file: Option<BinderfsProcFile>,
+ /// Check for oneway spam
+ oneway_spam_detection_enabled: bool,
+}
+
+impl ProcessInner {
+ fn new() -> Self {
+ Self {
+ is_manager: false,
+ is_dead: false,
+ threads: RBTree::new(),
+ ready_threads: List::new(),
+ mapping: None,
+ nodes: RBTree::new(),
+ work: List::new(),
+ delivered_deaths: List::new(),
+ requested_thread_count: 0,
+ max_threads: 0,
+ started_thread_count: 0,
+ defer_work: 0,
+ outstanding_txns: 0,
+ is_frozen: IsFrozen::No,
+ sync_recv: false,
+ async_recv: false,
+ binderfs_file: None,
+ oneway_spam_detection_enabled: false,
+ }
+ }
+
+ /// Schedule the work item for execution on this process.
+ ///
+ /// If any threads are ready for work, then the work item is given directly to that thread and
+ /// it is woken up. Otherwise, it is pushed to the process work list.
+ ///
+ /// This call can fail only if the process is dead. In this case, the work item is returned to
+ /// the caller so that the caller can drop it after releasing the inner process lock. This is
+ /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
+ /// taken while holding the inner process lock.
+ pub(crate) fn push_work(
+ &mut self,
+ work: DLArc<dyn DeliverToRead>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ // Try to find a ready thread to which to push the work.
+ if let Some(thread) = self.ready_threads.pop_front() {
+ // Push to thread while holding state lock. This prevents the thread from giving up
+ // (for example, because of a signal) when we're about to deliver work.
+ match thread.push_work(work) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
+ }
+ } else if self.is_dead {
+ Err((BinderError::new_dead(), work))
+ } else {
+ let sync = work.should_sync_wakeup();
+
+ // Didn't find a thread waiting for proc work; this can happen
+ // in two scenarios:
+ // 1. All threads are busy handling transactions
+ // In that case, one of those threads should call back into
+ // the kernel driver soon and pick up this work.
+ // 2. Threads are using the (e)poll interface, in which case
+ // they may be blocked on the waitqueue without having been
+ // added to waiting_threads. For this case, we just iterate
+ // over all threads not handling transaction work, and
+ // wake them all up. We wake all because we don't know whether
+ // a thread that called into (e)poll is handling non-binder
+ // work currently.
+ self.work.push_back(work);
+
+ // Wake up polling threads, if any.
+ for thread in self.threads.values() {
+ thread.notify_if_poll_ready(sync);
+ }
+
+ Ok(())
+ }
+ }
+
+ pub(crate) fn remove_node(&mut self, ptr: u64) {
+ self.nodes.remove(&ptr);
+ }
+
+ /// Updates the reference count on the given node.
+ pub(crate) fn update_node_refcount(
+ &mut self,
+ node: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ othread: Option<&Thread>,
+ ) {
+ let push = node.update_refcount_locked(inc, strong, count, self);
+
+ // If we decided that we need to push work, push either to the process or to a thread if
+ // one is specified.
+ if let Some(node) = push {
+ if let Some(thread) = othread {
+ thread.push_work_deferred(node);
+ } else {
+ let _ = self.push_work(node);
+ // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
+ // that case, it doesn't care about the notification.
+ }
+ }
+ }
+
+ pub(crate) fn new_node_ref(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> NodeRef {
+ self.update_node_refcount(&node, true, strong, 1, thread);
+ let strong_count = if strong { 1 } else { 0 };
+ NodeRef::new(node, strong_count, 1 - strong_count)
+ }
+
+ pub(crate) fn new_node_ref_with_thread(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
+ let push = match wrapper {
+ None => node
+ .incr_refcount_allow_zero2one(strong, self)?
+ .map(|node| node as _),
+ Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
+ };
+ if let Some(node) = push {
+ thread.push_work_deferred(node);
+ }
+ let strong_count = if strong { 1 } else { 0 };
+ Ok(NodeRef::new(node, strong_count, 1 - strong_count))
+ }
+
+ /// Returns an existing node with the given pointer and cookie, if one exists.
+ ///
+ /// Returns an error if a node with the given pointer but a different cookie exists.
+ fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
+ match self.nodes.get(&ptr) {
+ None => Ok(None),
+ Some(node) => {
+ let (_, node_cookie) = node.get_id();
+ if node_cookie == cookie {
+ Ok(Some(node.clone()))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ }
+
+ fn register_thread(&mut self) -> bool {
+ if self.requested_thread_count == 0 {
+ return false;
+ }
+
+ self.requested_thread_count -= 1;
+ self.started_thread_count += 1;
+ true
+ }
+
+ /// Finds a delivered death notification with the given cookie, removes it from the thread's
+ /// delivered list, and returns it.
+ fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
+ let mut cursor = self.delivered_deaths.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if next.cookie == cookie {
+ return Some(next.remove().into_arc());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ self.delivered_deaths.push_back(death);
+ } else {
+ pr_warn!("Notification added to `delivered_deaths` twice.");
+ }
+ }
+
+ pub(crate) fn add_outstanding_txn(&mut self) {
+ self.outstanding_txns += 1;
+ }
+
+ fn txns_pending_locked(&self) -> bool {
+ if self.outstanding_txns > 0 {
+ return true;
+ }
+ for thread in self.threads.values() {
+ if thread.has_current_transaction() {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// Used to keep track of a node that this process has a handle to.
+#[pin_data]
+pub(crate) struct NodeRefInfo {
+ debug_id: usize,
+ /// The refcount that this process owns to the node.
+ node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
+ death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
+ /// Cookie of the active freeze listener for this node.
+ freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
+ /// Used to store this `NodeRefInfo` in the node's `refs` list.
+ #[pin]
+ links: ListLinks<{ Self::LIST_NODE }>,
+ /// The handle for this `NodeRefInfo`.
+ handle: u32,
+ /// The process that has a handle to the node.
+ pub(crate) process: Arc<Process>,
+}
+
+impl NodeRefInfo {
+ /// The id used for the `Node::refs` list.
+ pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
+ /// The id used for the `ListArc` in `ProcessNodeRefs`.
+ const LIST_PROC: u64 = 0xd703a5263dcc8650;
+
+ fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ debug_id: super::next_debug_id(),
+ node_ref: ListArcField::new(node_ref),
+ death: ListArcField::new(None),
+ freeze: ListArcField::new(None),
+ links <- ListLinks::new(),
+ handle,
+ process,
+ })
+ }
+
+ kernel::list::define_list_arc_field_getter! {
+ pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
+ pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
+ pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
+ pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
+ impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
+ using ListLinks { self.links };
+ }
+}
+
+/// Keeps track of references this process has to nodes owned by other processes.
+///
+/// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
+/// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
+/// extra costs should be eliminated.
+struct ProcessNodeRefs {
+ /// Used to look up nodes using the 32-bit id that this process knows it by.
+ by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to quickly find unused ids in `by_handle`.
+ handle_is_present: IdPool,
+ /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
+ /// the underlying `Node` struct as returned by `Node::global_id`.
+ by_node: RBTree<usize, u32>,
+ /// Used to look up a `FreezeListener` by cookie.
+ ///
+ /// There might be multiple freeze listeners for the same node, but at most one of them is
+ /// active.
+ freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
+}
+
+impl ProcessNodeRefs {
+ fn new() -> Self {
+ Self {
+ by_handle: RBTree::new(),
+ handle_is_present: IdPool::new(),
+ by_node: RBTree::new(),
+ freeze_listeners: RBTree::new(),
+ }
+ }
+}
+
+/// A process using binder.
+///
+/// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
+/// that a process has opened, so processes using several binder contexts have several `Process`
+/// objects. This ensures that the contexts are fully separated.
+#[pin_data]
+pub(crate) struct Process {
+ pub(crate) ctx: Arc<Context>,
+
+ // The task leader (process).
+ pub(crate) task: ARef<Task>,
+
+ // Credential associated with file when `Process` is created.
+ pub(crate) cred: ARef<Credential>,
+
+ #[pin]
+ pub(crate) inner: SpinLock<ProcessInner>,
+
+ #[pin]
+ pub(crate) pages: ShrinkablePageRange,
+
+ // Waitqueue of processes waiting for all outstanding transactions to be
+ // processed.
+ #[pin]
+ freeze_wait: CondVar,
+
+ // Node references are in a different lock to avoid recursive acquisition when
+ // incrementing/decrementing a node in another process.
+ #[pin]
+ node_refs: Mutex<ProcessNodeRefs>,
+
+ // Work node for deferred work item.
+ #[pin]
+ defer_work: Work<Process>,
+
+ // Links for process list in Context.
+ #[pin]
+ links: ListLinks,
+
+ pub(crate) stats: BinderStats,
+}
+
+kernel::impl_has_work! {
+ impl HasWork<Process> for Process { self.defer_work }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Process { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Process {
+ using ListLinks { self.links };
+ }
+}
+
+impl workqueue::WorkItem for Process {
+ type Pointer = Arc<Process>;
+
+ fn run(me: Arc<Self>) {
+ let defer;
+ {
+ let mut inner = me.inner.lock();
+ defer = inner.defer_work;
+ inner.defer_work = 0;
+ }
+
+ if defer & PROC_DEFER_FLUSH != 0 {
+ me.deferred_flush();
+ }
+ if defer & PROC_DEFER_RELEASE != 0 {
+ me.deferred_release();
+ }
+ }
+}
+
+impl Process {
+ fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
+ let current = kernel::current!();
+ let list_process = ListArc::pin_init::<Error>(
+ try_pin_init!(Process {
+ ctx,
+ cred,
+ inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
+ pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
+ node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
+ freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
+ task: current.group_leader().into(),
+ defer_work <- kernel::new_work!("Process::defer_work"),
+ links <- ListLinks::new(),
+ stats: BinderStats::new(),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let process = list_process.clone_arc();
+ process.ctx.register_process(list_process);
+
+ Ok(process)
+ }
+
+ pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
+ self.task.tgid_nr_ns(None)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let inner = self.inner.lock();
+ seq_print!(m, " threads: {}\n", inner.threads.iter().count());
+ seq_print!(
+ m,
+ " requested threads: {}+{}/{}\n",
+ inner.requested_thread_count,
+ inner.started_thread_count,
+ inner.max_threads,
+ );
+ if let Some(mapping) = &inner.mapping {
+ seq_print!(
+ m,
+ " free oneway space: {}\n",
+ mapping.alloc.free_oneway_space()
+ );
+ seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
+ }
+ seq_print!(
+ m,
+ " outstanding transactions: {}\n",
+ inner.outstanding_txns
+ );
+ seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
+ drop(inner);
+
+ {
+ let mut refs = self.node_refs.lock();
+ let (mut count, mut weak, mut strong) = (0, 0, 0);
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let (nstrong, nweak) = node_ref.get_count();
+ count += 1;
+ weak += nweak;
+ strong += nstrong;
+ }
+ seq_print!(m, " refs: {count} s {strong} w {weak}\n");
+ }
+
+ self.stats.debug_print(" ", m);
+
+ Ok(())
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let mut all_threads = KVec::new();
+ let mut all_nodes = KVec::new();
+ loop {
+ let inner = self.inner.lock();
+ let num_threads = inner.threads.iter().count();
+ let num_nodes = inner.nodes.iter().count();
+
+ if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
+ drop(inner);
+ all_threads.reserve(num_threads, GFP_KERNEL)?;
+ all_nodes.reserve(num_nodes, GFP_KERNEL)?;
+ continue;
+ }
+
+ for thread in inner.threads.values() {
+ assert!(all_threads.len() < all_threads.capacity());
+ let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
+ }
+
+ for node in inner.nodes.values() {
+ assert!(all_nodes.len() < all_nodes.capacity());
+ let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
+ }
+
+ break;
+ }
+
+ for thread in all_threads {
+ thread.debug_print(m, print_all)?;
+ }
+
+ let mut inner = self.inner.lock();
+ for node in all_nodes {
+ if print_all || node.has_oneway_transaction(&mut inner) {
+ node.full_debug_print(m, &mut inner)?;
+ }
+ }
+ drop(inner);
+
+ if print_all {
+ let mut refs = self.node_refs.lock();
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let dead = node_ref.node.owner.inner.lock().is_dead;
+ let (strong, weak) = node_ref.get_count();
+ let debug_id = node_ref.node.debug_id;
+
+ seq_print!(
+ m,
+ " ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
+ r.debug_id,
+ r.handle,
+ if dead { "dead " } else { "" }
+ );
+ }
+ }
+
+ let inner = self.inner.lock();
+ for work in &inner.work {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ for _death in &inner.delivered_deaths {
+ seq_print!(m, " has delivered dead binder\n");
+ }
+ if let Some(mapping) = &inner.mapping {
+ mapping.alloc.debug_print(m)?;
+ }
+ drop(inner);
+
+ Ok(())
+ }
+
+ /// Attempts to fetch a work item from the process queue.
+ pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
+ self.inner.lock().work.pop_front()
+ }
+
+ /// Attempts to fetch a work item from the process queue. If none is available, it registers the
+ /// given thread as ready to receive work directly.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain; when
+ /// it is, work will always be delivered directly to the thread (and not through the process
+ /// queue).
+ pub(crate) fn get_work_or_register<'a>(
+ &'a self,
+ thread: &'a Arc<Thread>,
+ ) -> GetWorkOrRegister<'a> {
+ let mut inner = self.inner.lock();
+ // Try to get work from the process queue.
+ if let Some(work) = inner.work.pop_front() {
+ return GetWorkOrRegister::Work(work);
+ }
+
+ // Register the thread as ready.
+ GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
+ }
+
+ fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
+ let id = {
+ let current = kernel::current!();
+ if !core::ptr::eq(current.group_leader(), &*self.task) {
+ pr_err!("get_current_thread was called from the wrong process.");
+ return Err(EINVAL);
+ }
+ current.pid()
+ };
+
+ {
+ let inner = self.inner.lock();
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+ }
+
+ // Allocate a new `Thread` without holding any locks.
+ let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let ta: Arc<Thread> = Thread::new(id, self.into())?;
+
+ let mut inner = self.inner.lock();
+ match inner.threads.entry(id) {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(ta.clone(), reservation);
+ Ok(ta)
+ }
+ rbtree::Entry::Occupied(_entry) => {
+ pr_err!("Cannot create two threads with the same id.");
+ Err(EINVAL)
+ }
+ }
+ }
+
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ // If push_work fails, drop the work item outside the lock.
+ let res = self.inner.lock().push_work(work);
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ fn set_as_manager(
+ self: ArcBorrow<'_, Self>,
+ info: Option<FlatBinderObject>,
+ thread: &Thread,
+ ) -> Result {
+ let (ptr, cookie, flags) = if let Some(obj) = info {
+ (
+ // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
+ // is safe to access the `binder` field.
+ unsafe { obj.__bindgen_anon_1.binder },
+ obj.cookie,
+ obj.flags,
+ )
+ } else {
+ (0, 0, 0)
+ };
+ let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
+ let node = node_ref.node.clone();
+ self.ctx.set_manager_node(node_ref)?;
+ self.inner.lock().is_manager = true;
+
+ // Force the state of the node to prevent the delivery of acquire/increfs.
+ let mut owner_inner = node.owner.inner.lock();
+ node.force_has_count(&mut owner_inner);
+ Ok(())
+ }
+
+ fn get_node_inner(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
+ // Try to find an existing node.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+ }
+
+ // Allocate the node before reacquiring the lock.
+ let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
+ let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+
+ inner.nodes.insert(rbnode);
+ // This can only fail if someone has already pushed the node to a list, but we just created
+ // it and still hold the lock, so it can't fail right now.
+ let node_ref = inner
+ .new_node_ref_with_thread(node, strong, thread, wrapper)
+ .unwrap();
+
+ Ok(Ok(node_ref))
+ }
+
+ pub(crate) fn get_node(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ ) -> Result<NodeRef> {
+ let mut wrapper = None;
+ for _ in 0..2 {
+ match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
+ Err(err) => return Err(err),
+ Ok(Ok(node_ref)) => return Ok(node_ref),
+ Ok(Err(CouldNotDeliverCriticalIncrement)) => {
+ wrapper = Some(CritIncrWrapper::new()?);
+ }
+ }
+ }
+ // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
+ // loop should run at most twice.
+ unreachable!()
+ }
+
+ pub(crate) fn insert_or_update_handle(
+ self: ArcBorrow<'_, Process>,
+ node_ref: NodeRef,
+ is_manager: bool,
+ ) -> Result<u32> {
+ {
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup before inserting.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+ }
+
+ // Reserve memory for tree nodes.
+ let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let info = UniqueArc::new_uninit(GFP_KERNEL)?;
+
+ let mut refs_lock = self.node_refs.lock();
+ let mut refs = &mut *refs_lock;
+
+ let (unused_id, by_handle_slot) = loop {
+ // ID 0 may only be used by the manager.
+ let start = if is_manager { 0 } else { 1 };
+
+ if let Some(res) = refs.handle_is_present.find_unused_id(start) {
+ match refs.by_handle.entry(res.as_u32()) {
+ rbtree::Entry::Vacant(entry) => break (res, entry),
+ rbtree::Entry::Occupied(_) => {
+ pr_err!("Detected mismatch between handle_is_present and by_handle");
+ res.acquire();
+ kernel::warn_on!(true);
+ return Err(EINVAL);
+ }
+ }
+ }
+
+ let grow_request = refs.handle_is_present.grow_request().ok_or(ENOMEM)?;
+ drop(refs_lock);
+ let resizer = grow_request.realloc(GFP_KERNEL)?;
+ refs_lock = self.node_refs.lock();
+ refs = &mut *refs_lock;
+ refs.handle_is_present.grow(resizer);
+ };
+ let handle = unused_id.as_u32();
+
+ // Do a lookup again as node may have been inserted before the lock was reacquired.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+
+ let gid = node_ref.node.global_id();
+ let (info_proc, info_node) = {
+ let info_init = NodeRefInfo::new(node_ref, handle, self.into());
+ match info.pin_init_with(info_init) {
+ Ok(info) => ListArc::pair_from_pin_unique(info),
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Ensure the process is still alive while we insert a new reference.
+ //
+ // This releases the lock before inserting the nodes, but since `is_dead` is set as the
+ // first thing in `deferred_release`, process cleanup will not miss the items inserted into
+ // `refs` below.
+ if self.inner.lock().is_dead {
+ return Err(ESRCH);
+ }
+
+ // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
+ // `info_node` into the right node's `refs` list.
+ unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
+
+ refs.by_node.insert(reserve1.into_node(gid, handle));
+ by_handle_slot.insert(info_proc, reserve2);
+ unused_id.acquire();
+ Ok(handle)
+ }
+
+ pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
+ // When handle is zero, try to get the context manager.
+ if handle == 0 {
+ Ok(self.ctx.get_manager_node(true)?)
+ } else {
+ Ok(self.get_node_from_handle(handle, true)?)
+ }
+ }
+
+ pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
+ self.node_refs
+ .lock()
+ .by_handle
+ .get_mut(&handle)
+ .ok_or(ENOENT)?
+ .node_ref()
+ .clone(strong)
+ }
+
+ pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
+ let mut inner = self.inner.lock();
+ // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
+ let removed = unsafe { inner.delivered_deaths.remove(death) };
+ drop(inner);
+ drop(removed);
+ }
+
+ pub(crate) fn update_ref(
+ self: ArcBorrow<'_, Process>,
+ handle: u32,
+ inc: bool,
+ strong: bool,
+ ) -> Result {
+ if inc && handle == 0 {
+ if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
+ if core::ptr::eq(&*self, &*node_ref.node.owner) {
+ return Err(EINVAL);
+ }
+ let _ = self.insert_or_update_handle(node_ref, true);
+ return Ok(());
+ }
+ }
+
+ // To preserve original binder behaviour, we only fail requests where the manager tries to
+ // increment references on itself.
+ let mut refs = self.node_refs.lock();
+ if let Some(info) = refs.by_handle.get_mut(&handle) {
+ if info.node_ref().update(inc, strong) {
+ // Clean up death if there is one attached to this node reference.
+ if let Some(death) = info.death().take() {
+ death.set_cleared(true);
+ self.remove_from_delivered_deaths(&death);
+ }
+
+ // Remove reference from process tables, and from the node's `refs` list.
+
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ let id = info.node_ref().node.global_id();
+ refs.by_handle.remove(&handle);
+ refs.by_node.remove(&id);
+ refs.handle_is_present.release_id(handle as usize);
+
+ if let Some(shrink) = refs.handle_is_present.shrink_request() {
+ drop(refs);
+ // This intentionally ignores allocation failures.
+ if let Ok(new_bitmap) = shrink.realloc(GFP_KERNEL) {
+ refs = self.node_refs.lock();
+ refs.handle_is_present.shrink(new_bitmap);
+ }
+ }
+ }
+ } else {
+ // All refs are cleared in process exit, so this warning is expected in that case.
+ if !self.inner.lock().is_dead {
+ pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
+ }
+ }
+ Ok(())
+ }
+
+ /// Decrements the refcount of the given node, if one exists.
+ pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ inner.update_node_refcount(&node, false, strong, 1, None);
+ }
+ }
+
+ pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
+ let ptr = reader.read::<u64>()?;
+ let cookie = reader.read::<u64>()?;
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
+ // This only fails if the process is dead.
+ let _ = inner.push_work(node);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn buffer_alloc(
+ self: &Arc<Self>,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ from_pid: i32,
+ ) -> BinderResult<NewAllocation> {
+ use kernel::page::PAGE_SIZE;
+
+ let mut reserve_new_args = ReserveNewArgs {
+ debug_id,
+ size,
+ is_oneway,
+ pid: from_pid,
+ ..ReserveNewArgs::default()
+ };
+
+ let (new_alloc, addr) = loop {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
+ let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
+ ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
+ ReserveNew::NeedAlloc(request) => request,
+ };
+ drop(inner);
+ // We need to allocate memory and then call `reserve_new` again.
+ reserve_new_args = alloc_request.make_alloc()?;
+ };
+
+ let res = Allocation::new(
+ self.clone(),
+ debug_id,
+ new_alloc.offset,
+ size,
+ addr + new_alloc.offset,
+ new_alloc.oneway_spam_detected,
+ );
+
+ // This allocation will be marked as in use until the `Allocation` is used to free it.
+ //
+ // This method can't be called while holding a lock, so we release the lock first. It's
+ // okay for several threads to use the method on the same index at the same time. In that
+ // case, one of the calls will allocate the given page (if missing), and the other call
+ // will wait for the other call to finish allocating the page.
+ //
+ // We will not call `stop_using_range` in parallel with this on the same page, because the
+ // allocation can only be removed via the destructor of the `Allocation` object that we
+ // currently own.
+ match self.pages.use_range(
+ new_alloc.offset / PAGE_SIZE,
+ (new_alloc.offset + size).div_ceil(PAGE_SIZE),
+ ) {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("use_range failure {:?}", err);
+ return Err(err.into());
+ }
+ }
+
+ Ok(NewAllocation(res))
+ }
+
+ pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut()?;
+ let offset = ptr.checked_sub(mapping.address)?;
+ let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
+ let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ Some(alloc)
+ }
+
+ pub(crate) fn buffer_raw_free(&self, ptr: usize) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ let offset = match ptr.checked_sub(mapping.address) {
+ Some(offset) => offset,
+ None => return,
+ };
+
+ let freed_range = match mapping.alloc.reservation_abort(offset) {
+ Ok(freed_range) => freed_range,
+ Err(_) => {
+ pr_warn!(
+ "Pointer {:x} failed to free, base = {:x}\n",
+ ptr,
+ mapping.address
+ );
+ return;
+ }
+ };
+
+ // No more allocations in this range. Mark them as not in use.
+ //
+ // Must be done before we release the lock so that `use_range` is not used on these
+ // indices until `stop_using_range` returns.
+ self.pages
+ .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
+ }
+ }
+
+ pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
+ pr_warn!("Offset {} failed to be marked freeable\n", offset);
+ }
+ }
+ }
+
+ fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
+ use kernel::page::PAGE_SIZE;
+ let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let mapping = Mapping::new(vma.start(), size);
+ let page_count = self.pages.register_with_vma(vma)?;
+ if page_count * PAGE_SIZE != size {
+ return Err(EINVAL);
+ }
+
+ // Save range allocator for later.
+ self.inner.lock().mapping = Some(mapping);
+
+ Ok(())
+ }
+
+ fn version(&self, data: UserSlice) -> Result {
+ data.writer().write(&BinderVersion::current())
+ }
+
+ pub(crate) fn register_thread(&self) -> bool {
+ self.inner.lock().register_thread()
+ }
+
+ fn remove_thread(&self, thread: Arc<Thread>) {
+ self.inner.lock().threads.remove(&thread.id);
+ thread.release();
+ }
+
+ fn set_max_threads(&self, max: u32) {
+ self.inner.lock().max_threads = max;
+ }
+
+ fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
+ self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
+ }
+
+ pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
+ self.inner.lock().oneway_spam_detection_enabled
+ }
+
+ fn get_node_debug_info(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ // Read the starting point.
+ let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
+ let mut out = BinderNodeDebugInfo::default();
+
+ {
+ let inner = self.inner.lock();
+ for (node_ptr, node) in &inner.nodes {
+ if *node_ptr > ptr {
+ node.populate_debug_info(&mut out, &inner);
+ break;
+ }
+ }
+ }
+
+ writer.write(&out)
+ }
+
+ fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut out = reader.read::<BinderNodeInfoForRef>()?;
+
+ if out.strong_count != 0
+ || out.weak_count != 0
+ || out.reserved1 != 0
+ || out.reserved2 != 0
+ || out.reserved3 != 0
+ {
+ return Err(EINVAL);
+ }
+
+ // Only the context manager is allowed to use this ioctl.
+ if !self.inner.lock().is_manager {
+ return Err(EPERM);
+ }
+
+ {
+ let mut node_refs = self.node_refs.lock();
+ let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
+ let node_ref = node_info.node_ref();
+ let owner_inner = node_ref.node.owner.inner.lock();
+ node_ref.node.populate_counts(&mut out, &owner_inner);
+ }
+
+ // Write the result back.
+ writer.write(&out)
+ }
+
+ pub(crate) fn needs_thread(&self) -> bool {
+ let mut inner = self.inner.lock();
+ let ret = inner.requested_thread_count == 0
+ && inner.ready_threads.is_empty()
+ && inner.started_thread_count < inner.max_threads;
+ if ret {
+ inner.requested_thread_count += 1
+ }
+ ret
+ }
+
+ pub(crate) fn request_death(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ thread: &Thread,
+ ) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ // Queue BR_ERROR if we can't allocate memory for the death notification.
+ let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
+ thread.push_return_work(BR_ERROR);
+ })?;
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ // Nothing to do if there is already a death notification request for this handle.
+ if info.death().is_some() {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
+ return Ok(());
+ }
+
+ let death = {
+ let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
+ match death.pin_init_with(death_init) {
+ Ok(death) => death,
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Register the death notification.
+ {
+ let owner = info.node_ref2().node.owner.clone();
+ let mut owner_inner = owner.inner.lock();
+ if owner_inner.is_dead {
+ let death = Arc::from(death);
+ *info.death() = Some(death.clone());
+ drop(owner_inner);
+ death.set_dead();
+ } else {
+ let death = ListArc::from(death);
+ *info.death() = Some(death.clone_arc());
+ info.node_ref().node.add_death(death, &mut owner_inner);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ let Some(death) = info.death().take() else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
+ return Ok(());
+ };
+ if death.cookie != cookie {
+ *info.death() = Some(death);
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
+ return Ok(());
+ }
+
+ // Update state and determine if we need to queue a work item. We only need to do it when
+ // the node is not dead or if the user already completed the death notification.
+ if death.set_cleared(false) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
+ if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ death.set_notification_done(thread);
+ }
+ }
+
+ /// Locks the spinlock and move the `nodes` rbtree out.
+ ///
+ /// This allows you to iterate through `nodes` while also allowing you to give other parts of
+ /// the codebase exclusive access to `ProcessInner`.
+ pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
+ let mut inner = self.inner.lock();
+ WithNodes {
+ nodes: take(&mut inner.nodes),
+ inner,
+ }
+ }
+
+ fn deferred_flush(&self) {
+ let inner = self.inner.lock();
+ for thread in inner.threads.values() {
+ thread.exit_looper();
+ }
+ }
+
+ fn deferred_release(self: Arc<Self>) {
+ let is_manager = {
+ let mut inner = self.inner.lock();
+ inner.is_dead = true;
+ inner.is_frozen = IsFrozen::No;
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_manager
+ };
+
+ if is_manager {
+ self.ctx.unset_manager_node();
+ }
+
+ self.ctx.deregister_process(&self);
+
+ let binderfs_file = self.inner.lock().binderfs_file.take();
+ drop(binderfs_file);
+
+ // Release threads.
+ let threads = {
+ let mut inner = self.inner.lock();
+ let threads = take(&mut inner.threads);
+ let ready = take(&mut inner.ready_threads);
+ drop(inner);
+ drop(ready);
+
+ for thread in threads.values() {
+ thread.release();
+ }
+ threads
+ };
+
+ // Release nodes.
+ {
+ while let Some(node) = {
+ let mut lock = self.inner.lock();
+ lock.nodes.cursor_front_mut().map(|c| c.remove_current().1)
+ } {
+ node.to_key_value().1.release();
+ }
+ }
+
+ // Clean up death listeners and remove nodes from external node info lists.
+ for info in self.node_refs.lock().by_handle.values_mut() {
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ // Remove all death notifications from the nodes (that belong to a different process).
+ let death = if let Some(existing) = info.death().take() {
+ existing
+ } else {
+ continue;
+ };
+ death.set_cleared(false);
+ }
+
+ // Clean up freeze listeners.
+ let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
+ for listener in freeze_listeners.values() {
+ listener.on_process_exit(&self);
+ }
+ drop(freeze_listeners);
+
+ // Release refs on foreign nodes.
+ {
+ let mut refs = self.node_refs.lock();
+ let by_handle = take(&mut refs.by_handle);
+ let by_node = take(&mut refs.by_node);
+ drop(refs);
+ drop(by_node);
+ drop(by_handle);
+ }
+
+ // Cancel all pending work items.
+ while let Some(work) = self.get_work() {
+ work.into_arc().cancel();
+ }
+
+ // Clear delivered_deaths list.
+ //
+ // Scope ensures that MutexGuard is dropped while executing the body.
+ while let Some(delivered_death) = { self.inner.lock().delivered_deaths.pop_front() } {
+ drop(delivered_death);
+ }
+
+ // Free any resources kept alive by allocated buffers.
+ let omapping = self.inner.lock().mapping.take();
+ if let Some(mut mapping) = omapping {
+ let address = mapping.address;
+ mapping
+ .alloc
+ .take_for_each(|offset, size, debug_id, odata| {
+ let ptr = offset + address;
+ let mut alloc =
+ Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ drop(alloc)
+ });
+ }
+
+ // calls to synchronize_rcu() in thread drop will happen here
+ drop(threads);
+ }
+
+ pub(crate) fn drop_outstanding_txn(&self) {
+ let wake = {
+ let mut inner = self.inner.lock();
+ if inner.outstanding_txns == 0 {
+ pr_err!("outstanding_txns underflow");
+ return;
+ }
+ inner.outstanding_txns -= 1;
+ inner.is_frozen.is_frozen() && inner.outstanding_txns == 0
+ };
+
+ if wake {
+ self.freeze_wait.notify_all();
+ }
+ }
+
+ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
+ if info.enable == 0 {
+ let msgs = self.prepare_freeze_messages()?;
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = IsFrozen::No;
+ drop(inner);
+ msgs.send_messages();
+ return Ok(());
+ }
+
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = IsFrozen::InProgress;
+
+ if info.timeout_ms > 0 {
+ let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
+ while jiffies > 0 {
+ if inner.outstanding_txns == 0 {
+ break;
+ }
+
+ match self
+ .freeze_wait
+ .wait_interruptible_timeout(&mut inner, jiffies)
+ {
+ CondVarTimeoutResult::Signal { .. } => {
+ inner.is_frozen = IsFrozen::No;
+ return Err(ERESTARTSYS);
+ }
+ CondVarTimeoutResult::Woken { jiffies: remaining } => {
+ jiffies = remaining;
+ }
+ CondVarTimeoutResult::Timeout => {
+ jiffies = 0;
+ }
+ }
+ }
+ }
+
+ if inner.txns_pending_locked() {
+ inner.is_frozen = IsFrozen::No;
+ Err(EAGAIN)
+ } else {
+ drop(inner);
+ match self.prepare_freeze_messages() {
+ Ok(batch) => {
+ self.inner.lock().is_frozen = IsFrozen::Yes;
+ batch.send_messages();
+ Ok(())
+ }
+ Err(kernel::alloc::AllocError) => {
+ self.inner.lock().is_frozen = IsFrozen::No;
+ Err(ENOMEM)
+ }
+ }
+ }
+ }
+}
+
+fn get_frozen_status(data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ let mut info = reader.read::<BinderFrozenStatusInfo>()?;
+ info.sync_recv = 0;
+ info.async_recv = 0;
+ let mut found = false;
+
+ for ctx in crate::context::get_all_contexts()? {
+ ctx.for_each_proc(|proc| {
+ if proc.task.pid() == info.pid as _ {
+ found = true;
+ let inner = proc.inner.lock();
+ let txns_pending = inner.txns_pending_locked();
+ info.async_recv |= inner.async_recv as u32;
+ info.sync_recv |= inner.sync_recv as u32;
+ info.sync_recv |= (txns_pending as u32) << 1;
+ }
+ });
+ }
+
+ if found {
+ writer.write(&info)?;
+ Ok(())
+ } else {
+ Err(EINVAL)
+ }
+}
+
+fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
+ let info = reader.read::<BinderFreezeInfo>()?;
+
+ // Very unlikely for there to be more than 3, since a process normally uses at most binder and
+ // hwbinder.
+ let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
+
+ let ctxs = crate::context::get_all_contexts()?;
+ for ctx in ctxs {
+ for proc in ctx.get_procs_with_pid(info.pid as i32)? {
+ procs.push(proc, GFP_KERNEL)?;
+ }
+ }
+
+ for proc in procs {
+ proc.ioctl_freeze(&info)?;
+ }
+ Ok(())
+}
+
+/// The ioctl handler.
+impl Process {
+ /// Ioctls that are write-only from the perspective of userspace.
+ ///
+ /// The kernel will only read from the pointer that userspace provided to us.
+ fn ioctl_write_only(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ cmd: u32,
+ reader: &mut UserSliceReader,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ match cmd {
+ uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
+ uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
+ uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
+ uapi::BINDER_SET_CONTEXT_MGR_EXT => {
+ this.set_as_manager(Some(reader.read()?), &thread)?
+ }
+ uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
+ this.set_oneway_spam_detection_enabled(reader.read()?)
+ }
+ uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+
+ /// Ioctls that are read/write from the perspective of userspace.
+ ///
+ /// The kernel will both read from and write to the pointer that userspace provided to us.
+ fn ioctl_write_read(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ data: UserSlice,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+ match cmd {
+ uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
+ uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
+ uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
+ uapi::BINDER_VERSION => this.version(data)?,
+ uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
+ uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+}
+
+/// The file operations supported by `Process`.
+impl Process {
+ pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
+ Self::new(ctx.into(), ARef::from(file.cred()))
+ }
+
+ pub(crate) fn release(this: Arc<Process>, _file: &File) {
+ let binderfs_file;
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_RELEASE;
+ binderfs_file = inner.binderfs_file.take();
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(this);
+ }
+
+ drop(binderfs_file);
+ }
+
+ pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_FLUSH;
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(Arc::from(this));
+ }
+ Ok(())
+ }
+
+ pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
+ use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
+ use kernel::uapi::{_IOC_READ, _IOC_WRITE};
+
+ crate::trace::trace_ioctl(cmd, arg);
+
+ let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
+
+ const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
+
+ match _IOC_DIR(cmd) {
+ _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
+ _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
+ _ => Err(EINVAL),
+ }
+ }
+
+ pub(crate) fn mmap(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ vma: &mm::virt::VmaNew,
+ ) -> Result {
+ // We don't allow mmap to be used in a different process.
+ if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
+ return Err(EINVAL);
+ }
+ if vma.start() == 0 {
+ return Err(EINVAL);
+ }
+
+ vma.try_clear_maywrite().map_err(|_| EPERM)?;
+ vma.set_dontcopy();
+ vma.set_mixedmap();
+
+ // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
+ this.create_mapping(vma)
+ }
+
+ pub(crate) fn poll(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ table: PollTable<'_>,
+ ) -> Result<u32> {
+ let thread = this.get_current_thread()?;
+ let (from_proc, mut mask) = thread.poll(file, table);
+ if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
+ mask |= bindings::POLLIN;
+ }
+ Ok(mask)
+ }
+}
+
+/// Represents that a thread has registered with the `ready_threads` list of its process.
+///
+/// The destructor of this type will unregister the thread from the list of ready threads.
+pub(crate) struct Registration<'a> {
+ thread: &'a Arc<Thread>,
+}
+
+impl<'a> Registration<'a> {
+ fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
+ assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
+ // INVARIANT: We are pushing this thread to the right `ready_threads` list.
+ if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
+ guard.ready_threads.push_front(list_arc);
+ } else {
+ // It is an error to hit this branch, and it should not be reachable. We try to do
+ // something reasonable when the failure path happens. Most likely, the thread in
+ // question will sleep forever.
+ pr_err!("Same thread registered with `ready_threads` twice.");
+ }
+ Self { thread }
+ }
+}
+
+impl Drop for Registration<'_> {
+ fn drop(&mut self) {
+ let mut inner = self.thread.process.inner.lock();
+ // SAFETY: The thread has the invariant that we never push it to any other linked list than
+ // the `ready_threads` list of its parent process. Therefore, the thread is either in that
+ // list, or in no list.
+ unsafe { inner.ready_threads.remove(self.thread) };
+ }
+}
+
+pub(crate) struct WithNodes<'a> {
+ pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
+ pub(crate) nodes: RBTree<u64, DArc<Node>>,
+}
+
+impl Drop for WithNodes<'_> {
+ fn drop(&mut self) {
+ core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
+ if self.nodes.iter().next().is_some() {
+ pr_err!("nodes array was modified while using lock_with_nodes\n");
+ }
+ }
+}
+
+pub(crate) enum GetWorkOrRegister<'a> {
+ Work(DLArc<dyn DeliverToRead>),
+ Register(Registration<'a>),
+}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
new file mode 100644
index 000000000000..07e1dec2ce63
--- /dev/null
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::{PAGE_MASK, PAGE_SIZE},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct ArrayRangeAllocator<T> {
+ /// This stores all ranges that are allocated. Unlike the tree based allocator, we do *not*
+ /// store the free ranges.
+ ///
+ /// Sorted by offset.
+ pub(super) ranges: KVec<Range<T>>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+struct FindEmptyRes {
+ /// Which index in `ranges` should we insert the new range at?
+ ///
+ /// Inserting the new range at this index keeps `ranges` sorted.
+ insert_at_idx: usize,
+ /// Which offset should we insert the new range at?
+ insert_at_offset: usize,
+}
+
+impl<T> ArrayRangeAllocator<T> {
+ pub(crate) fn new(size: usize, alloc: EmptyArrayAlloc<T>) -> Self {
+ Self {
+ ranges: alloc.ranges,
+ size,
+ free_oneway_space: size / 2,
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.ranges.len()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn is_full(&self) -> bool {
+ self.ranges.len() == self.ranges.capacity()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for range in &self.ranges {
+ seq_print!(
+ m,
+ " buffer {}: {} size {} pid {} oneway {}",
+ 0,
+ range.offset,
+ range.size,
+ range.state.pid(),
+ range.state.is_oneway(),
+ );
+ if let DescriptorState::Reserved(_) = range.state {
+ seq_print!(m, " reserved\n");
+ } else {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ Ok(())
+ }
+
+ /// Find somewhere to put a new range.
+ ///
+ /// Unlike the tree implementation, we do not bother to find the smallest gap. The idea is that
+ /// fragmentation isn't a big issue when we don't have many ranges.
+ ///
+ /// Returns the index that the new range should have in `self.ranges` after insertion.
+ fn find_empty_range(&self, size: usize) -> Option<FindEmptyRes> {
+ let after_last_range = self.ranges.last().map(Range::endpoint).unwrap_or(0);
+
+ if size <= self.total_size() - after_last_range {
+ // We can put the range at the end, so just do that.
+ Some(FindEmptyRes {
+ insert_at_idx: self.ranges.len(),
+ insert_at_offset: after_last_range,
+ })
+ } else {
+ let mut end_of_prev = 0;
+ for (i, range) in self.ranges.iter().enumerate() {
+ // Does it fit before the i'th range?
+ if size <= range.offset - end_of_prev {
+ return Some(FindEmptyRes {
+ insert_at_idx: i,
+ insert_at_offset: end_of_prev,
+ });
+ }
+ end_of_prev = range.endpoint();
+ }
+ None
+ }
+ }
+
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ ) -> Result<usize> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ let FindEmptyRes {
+ insert_at_idx,
+ insert_at_offset,
+ } = self.find_empty_range(size).ok_or(ENOSPC)?;
+ self.free_oneway_space = new_oneway_space;
+
+ let new_range = Range {
+ offset: insert_at_offset,
+ size,
+ state: DescriptorState::new(is_oneway, debug_id, pid),
+ };
+ // Insert the value at the given index to keep the array sorted.
+ self.ranges
+ .insert_within_capacity(insert_at_idx, new_range)
+ .ok()
+ .unwrap();
+
+ Ok(insert_at_offset)
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let i = self
+ .ranges
+ .iter()
+ .position(|range| range.offset == offset)
+ .ok_or(EINVAL)?;
+ let range = &self.ranges[i];
+
+ if let DescriptorState::Allocated(_) = range.state {
+ return Err(EPERM);
+ }
+
+ let size = range.size;
+ let offset = range.offset;
+
+ if range.state.is_oneway() {
+ self.free_oneway_space += size;
+ }
+
+ // This computes the range of pages that are no longer used by *any* allocated range. The
+ // caller will mark them as unused, which means that they can be freed if the system comes
+ // under memory pressure.
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ #[expect(clippy::collapsible_if)] // reads better like this
+ if offset % PAGE_SIZE != 0 {
+ if i == 0 || self.ranges[i - 1].endpoint() <= (offset & PAGE_MASK) {
+ freed_range.start_page_idx -= 1;
+ }
+ }
+ if range.endpoint() % PAGE_SIZE != 0 {
+ let page_after = (range.endpoint() & PAGE_MASK) + PAGE_SIZE;
+ if i + 1 == self.ranges.len() || page_after <= self.ranges[i + 1].offset {
+ freed_range.end_page_idx += 1;
+ }
+ }
+
+ self.ranges.remove(i)?;
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Reserved(reservation) = &range.state else {
+ return Err(ENOENT);
+ };
+
+ range.state = DescriptorState::Allocated(reservation.clone().allocate(data.take()));
+ Ok(())
+ }
+
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Allocated(allocation) = &mut range.state else {
+ return Err(ENOENT);
+ };
+
+ let data = allocation.take();
+ let debug_id = allocation.reservation.debug_id;
+ range.state = DescriptorState::Reserved(allocation.reservation.clone());
+ Ok((range.size, debug_id, data))
+ }
+
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for range in self.ranges.iter_mut() {
+ if let DescriptorState::Allocated(allocation) = &mut range.state {
+ callback(
+ range.offset,
+ range.size,
+ allocation.reservation.debug_id,
+ allocation.data.take(),
+ );
+ }
+ }
+ }
+}
+
+pub(crate) struct EmptyArrayAlloc<T> {
+ ranges: KVec<Range<T>>,
+}
+
+impl<T> EmptyArrayAlloc<T> {
+ pub(crate) fn try_new(capacity: usize) -> Result<Self> {
+ Ok(Self {
+ ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,
+ })
+ }
+}
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
new file mode 100644
index 000000000000..2301e2bc1a1f
--- /dev/null
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{page::PAGE_SIZE, prelude::*, seq_file::SeqFile, task::Pid};
+
+mod tree;
+use self::tree::{FromArrayAllocs, ReserveNewTreeAlloc, TreeRangeAllocator};
+
+mod array;
+use self::array::{ArrayRangeAllocator, EmptyArrayAlloc};
+
+enum DescriptorState<T> {
+ Reserved(Reservation),
+ Allocated(Allocation<T>),
+}
+
+impl<T> DescriptorState<T> {
+ fn new(is_oneway: bool, debug_id: usize, pid: Pid) -> Self {
+ DescriptorState::Reserved(Reservation {
+ debug_id,
+ is_oneway,
+ pid,
+ })
+ }
+
+ fn pid(&self) -> Pid {
+ match self {
+ DescriptorState::Reserved(inner) => inner.pid,
+ DescriptorState::Allocated(inner) => inner.reservation.pid,
+ }
+ }
+
+ fn is_oneway(&self) -> bool {
+ match self {
+ DescriptorState::Reserved(inner) => inner.is_oneway,
+ DescriptorState::Allocated(inner) => inner.reservation.is_oneway,
+ }
+ }
+}
+
+#[derive(Clone)]
+struct Reservation {
+ debug_id: usize,
+ is_oneway: bool,
+ pid: Pid,
+}
+
+impl Reservation {
+ fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
+ Allocation {
+ data,
+ reservation: self,
+ }
+ }
+}
+
+struct Allocation<T> {
+ reservation: Reservation,
+ data: Option<T>,
+}
+
+impl<T> Allocation<T> {
+ fn deallocate(self) -> (Reservation, Option<T>) {
+ (self.reservation, self.data)
+ }
+
+ fn debug_id(&self) -> usize {
+ self.reservation.debug_id
+ }
+
+ fn take(&mut self) -> Option<T> {
+ self.data.take()
+ }
+}
+
+/// The array implementation must switch to the tree if it wants to go beyond this number of
+/// ranges.
+const TREE_THRESHOLD: usize = 8;
+
+/// Represents a range of pages that have just become completely free.
+#[derive(Copy, Clone)]
+pub(crate) struct FreedRange {
+ pub(crate) start_page_idx: usize,
+ pub(crate) end_page_idx: usize,
+}
+
+impl FreedRange {
+ fn interior_pages(offset: usize, size: usize) -> FreedRange {
+ FreedRange {
+ // Divide round up
+ start_page_idx: offset.div_ceil(PAGE_SIZE),
+ // Divide round down
+ end_page_idx: (offset + size) / PAGE_SIZE,
+ }
+ }
+}
+
+struct Range<T> {
+ offset: usize,
+ size: usize,
+ state: DescriptorState<T>,
+}
+
+impl<T> Range<T> {
+ fn endpoint(&self) -> usize {
+ self.offset + self.size
+ }
+}
+
+pub(crate) struct RangeAllocator<T> {
+ inner: Impl<T>,
+}
+
+enum Impl<T> {
+ Empty(usize),
+ Array(ArrayRangeAllocator<T>),
+ Tree(TreeRangeAllocator<T>),
+}
+
+impl<T> RangeAllocator<T> {
+ pub(crate) fn new(size: usize) -> Self {
+ Self {
+ inner: Impl::Empty(size),
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(size) => size / 2,
+ Impl::Array(array) => array.free_oneway_space(),
+ Impl::Tree(tree) => tree.free_oneway_space(),
+ }
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(_size) => 0,
+ Impl::Array(array) => array.count_buffers(),
+ Impl::Tree(tree) => tree.count_buffers(),
+ }
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ match &self.inner {
+ Impl::Empty(_size) => Ok(()),
+ Impl::Array(array) => array.debug_print(m),
+ Impl::Tree(tree) => tree.debug_print(m),
+ }
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(&mut self, mut args: ReserveNewArgs<T>) -> Result<ReserveNew<T>> {
+ match &mut self.inner {
+ Impl::Empty(size) => {
+ let empty_array = match args.empty_array_alloc.take() {
+ Some(empty_array) => ArrayRangeAllocator::new(*size, empty_array),
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: true,
+ need_new_tree_alloc: false,
+ need_tree_alloc: false,
+ }))
+ }
+ };
+
+ self.inner = Impl::Array(empty_array);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) if array.is_full() => {
+ let allocs = match args.new_tree_alloc {
+ Some(ref mut allocs) => allocs,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: true,
+ need_tree_alloc: true,
+ }))
+ }
+ };
+
+ let new_tree =
+ TreeRangeAllocator::from_array(array.total_size(), &mut array.ranges, allocs);
+
+ self.inner = Impl::Tree(new_tree);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) => {
+ let offset =
+ array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected: false,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: args.tree_alloc,
+ }))
+ }
+ Impl::Tree(tree) => {
+ let alloc = match args.tree_alloc {
+ Some(alloc) => alloc,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: false,
+ need_tree_alloc: true,
+ }));
+ }
+ };
+ let (offset, oneway_spam_detected) =
+ tree.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid, alloc)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: None,
+ }))
+ }
+ }
+ }
+
+ /// Deletes the allocations at `offset`.
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_abort(offset),
+ Impl::Tree(tree) => {
+ let freed_range = tree.reservation_abort(offset)?;
+ if tree.is_empty() {
+ self.inner = Impl::Empty(tree.total_size());
+ }
+ Ok(freed_range)
+ }
+ }
+ }
+
+ /// Called when an allocation is no longer in use by the kernel.
+ ///
+ /// The value in `data` will be stored, if any. A mutable reference is used to avoid dropping
+ /// the `T` when an error is returned.
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_commit(offset, data),
+ Impl::Tree(tree) => tree.reservation_commit(offset, data),
+ }
+ }
+
+ /// Called when the kernel starts using an allocation.
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reserve_existing(offset),
+ Impl::Tree(tree) => tree.reserve_existing(offset),
+ }
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ match &mut self.inner {
+ Impl::Empty(_size) => {}
+ Impl::Array(array) => array.take_for_each(callback),
+ Impl::Tree(tree) => tree.take_for_each(callback),
+ }
+ }
+}
+
+/// The arguments for `reserve_new`.
+#[derive(Default)]
+pub(crate) struct ReserveNewArgs<T> {
+ pub(crate) size: usize,
+ pub(crate) is_oneway: bool,
+ pub(crate) debug_id: usize,
+ pub(crate) pid: Pid,
+ pub(crate) empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ pub(crate) new_tree_alloc: Option<FromArrayAllocs<T>>,
+ pub(crate) tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// The return type of `ReserveNew`.
+pub(crate) enum ReserveNew<T> {
+ Success(ReserveNewSuccess<T>),
+ NeedAlloc(ReserveNewNeedAlloc<T>),
+}
+
+/// Returned by `reserve_new` when the reservation was successul.
+pub(crate) struct ReserveNewSuccess<T> {
+ pub(crate) offset: usize,
+ pub(crate) oneway_spam_detected: bool,
+
+ // If the user supplied an allocation that we did not end up using, then we return it here.
+ // The caller will kfree it outside of the lock.
+ _empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ _new_tree_alloc: Option<FromArrayAllocs<T>>,
+ _tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// Returned by `reserve_new` to request the caller to make an allocation before calling the method
+/// again.
+pub(crate) struct ReserveNewNeedAlloc<T> {
+ args: ReserveNewArgs<T>,
+ need_empty_array_alloc: bool,
+ need_new_tree_alloc: bool,
+ need_tree_alloc: bool,
+}
+
+impl<T> ReserveNewNeedAlloc<T> {
+ /// Make the necessary allocations for another call to `reserve_new`.
+ pub(crate) fn make_alloc(mut self) -> Result<ReserveNewArgs<T>> {
+ if self.need_empty_array_alloc && self.args.empty_array_alloc.is_none() {
+ self.args.empty_array_alloc = Some(EmptyArrayAlloc::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_new_tree_alloc && self.args.new_tree_alloc.is_none() {
+ self.args.new_tree_alloc = Some(FromArrayAllocs::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_tree_alloc && self.args.tree_alloc.is_none() {
+ self.args.tree_alloc = Some(ReserveNewTreeAlloc::try_new()?);
+ }
+ Ok(self.args)
+ }
+}
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
new file mode 100644
index 000000000000..838fdd2b47ea
--- /dev/null
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::PAGE_SIZE,
+ prelude::*,
+ rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct TreeRangeAllocator<T> {
+ /// This collection contains descriptors for *both* ranges containing an allocation, *and* free
+ /// ranges between allocations. The free ranges get merged, so there are never two free ranges
+ /// next to each other.
+ tree: RBTree<usize, Descriptor<T>>,
+ /// Contains an entry for every free range in `self.tree`. This tree sorts the ranges by size,
+ /// letting us look up the smallest range whose size is at least some lower bound.
+ free_tree: RBTree<FreeKey, ()>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+impl<T> TreeRangeAllocator<T> {
+ pub(crate) fn from_array(
+ size: usize,
+ ranges: &mut KVec<Range<T>>,
+ alloc: &mut FromArrayAllocs<T>,
+ ) -> Self {
+ let mut tree = TreeRangeAllocator {
+ tree: RBTree::new(),
+ free_tree: RBTree::new(),
+ size,
+ free_oneway_space: size / 2,
+ };
+
+ let mut free_offset = 0;
+ for range in ranges.drain_all() {
+ let free_size = range.offset - free_offset;
+ if free_size > 0 {
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree.insert(
+ tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)),
+ );
+ }
+ free_offset = range.endpoint();
+
+ if range.state.is_oneway() {
+ tree.free_oneway_space = tree.free_oneway_space.saturating_sub(range.size);
+ }
+
+ let free_res = alloc.free_tree.pop().unwrap();
+ let tree_node = alloc.tree.pop().unwrap();
+ let mut desc = Descriptor::new(range.offset, range.size);
+ desc.state = Some((range.state, free_res));
+ tree.tree.insert(tree_node.into_node(range.offset, desc));
+ }
+
+ // After the last range, we may need a free range.
+ if free_offset < size {
+ let free_size = size - free_offset;
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree
+ .insert(tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)));
+ }
+
+ tree
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ let mut tree_iter = self.tree.values();
+ // There's always at least one range, because index zero is either the start of a free or
+ // allocated range.
+ let first_value = tree_iter.next().unwrap();
+ if tree_iter.next().is_some() {
+ // There are never two free ranges next to each other, so if there is more than one
+ // descriptor, then at least one of them must hold an allocated range.
+ return false;
+ }
+ // There is only one descriptor. Return true if it is for a free range.
+ first_value.state.is_none()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.tree
+ .values()
+ .filter(|desc| desc.state.is_some())
+ .count()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for desc in self.tree.values() {
+ let state = match &desc.state {
+ Some(state) => &state.0,
+ None => continue,
+ };
+ seq_print!(
+ m,
+ " buffer: {} size {} pid {}",
+ desc.offset,
+ desc.size,
+ state.pid(),
+ );
+ if state.is_oneway() {
+ seq_print!(m, " oneway");
+ }
+ match state {
+ DescriptorState::Reserved(_res) => {
+ seq_print!(m, " reserved\n");
+ }
+ DescriptorState::Allocated(_alloc) => {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn find_best_match(&mut self, size: usize) -> Option<&mut Descriptor<T>> {
+ let free_cursor = self.free_tree.cursor_lower_bound(&(size, 0))?;
+ let ((_, offset), ()) = free_cursor.current();
+ self.tree.get_mut(offset)
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ alloc: ReserveNewTreeAlloc<T>,
+ ) -> Result<(usize, bool)> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circut, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
+ None => {
+ pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
+ return Err(ENOSPC);
+ }
+ Some(desc) => {
+ let found_size = desc.size;
+ let found_offset = desc.offset;
+
+ // In case we need to break up the descriptor
+ let new_desc = Descriptor::new(found_offset + size, found_size - size);
+ let (tree_node, free_tree_node, desc_node_res) = alloc.initialize(new_desc);
+
+ desc.state = Some((
+ DescriptorState::new(is_oneway, debug_id, pid),
+ desc_node_res,
+ ));
+ desc.size = size;
+
+ (found_size, found_offset, tree_node, free_tree_node)
+ }
+ };
+ self.free_oneway_space = new_oneway_space;
+ self.free_tree.remove(&(found_size, found_off));
+
+ if found_size != size {
+ self.tree.insert(tree_node);
+ self.free_tree.insert(free_tree_node);
+ }
+
+ Ok((found_off, oneway_spam_detected))
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ let mut cursor = self.tree.cursor_lower_bound_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ EINVAL
+ })?;
+
+ let (_, desc) = cursor.current_mut();
+
+ if desc.offset != offset {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ return Err(EINVAL);
+ }
+
+ let (reservation, free_node_res) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => {
+ (None, Ok((reservation, free_node_res)))
+ }
+ None => {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (None, Err(EINVAL))
+ }
+ allocated => {
+ pr_warn!(
+ "EPERM from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (allocated, Err(EPERM))
+ }
+ })?;
+
+ let mut size = desc.size;
+ let mut offset = desc.offset;
+ let free_oneway_space_add = if reservation.is_oneway { size } else { 0 };
+
+ self.free_oneway_space += free_oneway_space_add;
+
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ // Compute how large the next free region needs to be to include one more page in
+ // the newly freed range.
+ let add_next_page_needed = match (offset + size) % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => PAGE_SIZE - unalign,
+ };
+ // Compute how large the previous free region needs to be to include one more page
+ // in the newly freed range.
+ let add_prev_page_needed = match offset % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => unalign,
+ };
+
+ // Merge next into current if next is free
+ let remove_next = match cursor.peek_next() {
+ Some((_, next)) if next.state.is_none() => {
+ if next.size >= add_next_page_needed {
+ freed_range.end_page_idx += 1;
+ }
+ self.free_tree.remove(&(next.size, next.offset));
+ size += next.size;
+ true
+ }
+ _ => false,
+ };
+
+ if remove_next {
+ let (_, desc) = cursor.current_mut();
+ desc.size = size;
+ cursor.remove_next();
+ }
+
+ // Merge current into prev if prev is free
+ match cursor.peek_prev_mut() {
+ Some((_, prev)) if prev.state.is_none() => {
+ if prev.size >= add_prev_page_needed {
+ freed_range.start_page_idx -= 1;
+ }
+ // merge previous with current, remove current
+ self.free_tree.remove(&(prev.size, prev.offset));
+ offset = prev.offset;
+ size += prev.size;
+ prev.size = size;
+ cursor.remove_current();
+ }
+ _ => {}
+ };
+
+ self.free_tree
+ .insert(free_node_res.into_node((size, offset), ()));
+
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ let desc = self.tree.get_mut(&offset).ok_or(ENOENT)?;
+
+ desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => (
+ Some((
+ DescriptorState::Allocated(reservation.allocate(data.take())),
+ free_node_res,
+ )),
+ Ok(()),
+ ),
+ other => (other, Err(ENOENT)),
+ })
+ }
+
+ /// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
+ /// [`DescriptorState::Reserved`].
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ let desc = self.tree.get_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ ENOENT
+ })?;
+
+ let (debug_id, data) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Allocated(allocation), free_node_res)) => {
+ let (reservation, data) = allocation.deallocate();
+ let debug_id = reservation.debug_id;
+ (
+ Some((DescriptorState::Reserved(reservation), free_node_res)),
+ Ok((debug_id, data)),
+ )
+ }
+ other => {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ (other, Err(ENOENT))
+ }
+ })?;
+
+ Ok((desc.size, debug_id, data))
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for (_, desc) in self.tree.iter_mut() {
+ if let Some((DescriptorState::Allocated(allocation), _)) = &mut desc.state {
+ callback(
+ desc.offset,
+ desc.size,
+ allocation.debug_id(),
+ allocation.take(),
+ );
+ }
+ }
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+ for (_, desc) in self.tree.iter() {
+ if let Some((state, _)) = &desc.state {
+ if state.is_oneway() && state.pid() == calling_pid {
+ total_alloc_size += desc.size;
+ num_buffers += 1;
+ }
+ }
+ }
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ num_buffers > 50 || total_alloc_size > self.size / 4
+ }
+}
+
+type TreeDescriptorState<T> = (DescriptorState<T>, FreeNodeRes);
+struct Descriptor<T> {
+ size: usize,
+ offset: usize,
+ state: Option<TreeDescriptorState<T>>,
+}
+
+impl<T> Descriptor<T> {
+ fn new(offset: usize, size: usize) -> Self {
+ Self {
+ size,
+ offset,
+ state: None,
+ }
+ }
+
+ fn try_change_state<F, Data>(&mut self, f: F) -> Result<Data>
+ where
+ F: FnOnce(Option<TreeDescriptorState<T>>) -> (Option<TreeDescriptorState<T>>, Result<Data>),
+ {
+ let (new_state, result) = f(self.state.take());
+ self.state = new_state;
+ result
+ }
+}
+
+// (Descriptor.size, Descriptor.offset)
+type FreeKey = (usize, usize);
+type FreeNodeRes = RBTreeNodeReservation<FreeKey, ()>;
+
+/// An allocation for use by `reserve_new`.
+pub(crate) struct ReserveNewTreeAlloc<T> {
+ tree_node_res: RBTreeNodeReservation<usize, Descriptor<T>>,
+ free_tree_node_res: FreeNodeRes,
+ desc_node_res: FreeNodeRes,
+}
+
+impl<T> ReserveNewTreeAlloc<T> {
+ pub(crate) fn try_new() -> Result<Self> {
+ let tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let free_tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let desc_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ Ok(Self {
+ tree_node_res,
+ free_tree_node_res,
+ desc_node_res,
+ })
+ }
+
+ fn initialize(
+ self,
+ desc: Descriptor<T>,
+ ) -> (
+ RBTreeNode<usize, Descriptor<T>>,
+ RBTreeNode<FreeKey, ()>,
+ FreeNodeRes,
+ ) {
+ let size = desc.size;
+ let offset = desc.offset;
+ (
+ self.tree_node_res.into_node(offset, desc),
+ self.free_tree_node_res.into_node((size, offset), ()),
+ self.desc_node_res,
+ )
+ }
+}
+
+/// An allocation for creating a tree from an `ArrayRangeAllocator`.
+pub(crate) struct FromArrayAllocs<T> {
+ tree: KVec<RBTreeNodeReservation<usize, Descriptor<T>>>,
+ free_tree: KVec<RBTreeNodeReservation<FreeKey, ()>>,
+}
+
+impl<T> FromArrayAllocs<T> {
+ pub(crate) fn try_new(len: usize) -> Result<Self> {
+ let num_descriptors = 2 * len + 1;
+
+ let mut tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ let mut free_tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ free_tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ Ok(Self { tree, free_tree })
+ }
+}
diff --git a/drivers/android/binder/rust_binder.h b/drivers/android/binder/rust_binder.h
new file mode 100644
index 000000000000..31806890ed1a
--- /dev/null
+++ b/drivers/android/binder/rust_binder.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_RUST_BINDER_H
+#define _LINUX_RUST_BINDER_H
+
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * These symbols are exposed by `rust_binderfs.c` and exist here so that Rust
+ * Binder can call them.
+ */
+int init_rust_binderfs(void);
+
+struct dentry;
+struct inode;
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid);
+void rust_binderfs_remove_file(struct dentry *dentry);
+
+#endif
diff --git a/drivers/android/binder/rust_binder_events.c b/drivers/android/binder/rust_binder_events.c
new file mode 100644
index 000000000000..488b1470060c
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* rust_binder_events.c
+ *
+ * Rust Binder tracepoints.
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "rust_binder.h"
+
+const char * const binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
+};
+
+const char * const binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY",
+ "BR_FROZEN_REPLY",
+ "BR_ONEWAY_SPAM_SUSPECT",
+ "BR_TRANSACTION_PENDING_FROZEN"
+};
+
+#define CREATE_TRACE_POINTS
+#define CREATE_RUST_TRACE_POINTS
+#include "rust_binder_events.h"
diff --git a/drivers/android/binder/rust_binder_events.h b/drivers/android/binder/rust_binder_events.h
new file mode 100644
index 000000000000..2f3efbf9dba6
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_INCLUDE_PATH
+#define TRACE_SYSTEM rust_binder
+#define TRACE_INCLUDE_FILE rust_binder_events
+#define TRACE_INCLUDE_PATH ../drivers/android/binder
+
+#if !defined(_RUST_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RUST_BINDER_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rust_binder_ioctl,
+ TP_PROTO(unsigned int cmd, unsigned long arg),
+ TP_ARGS(cmd, arg),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned long, arg)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->arg = arg;
+ ),
+ TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
+);
+
+#endif /* _RUST_BINDER_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/android/binder/rust_binder_internal.h b/drivers/android/binder/rust_binder_internal.h
new file mode 100644
index 000000000000..78288fe7964d
--- /dev/null
+++ b/drivers/android/binder/rust_binder_internal.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* rust_binder_internal.h
+ *
+ * This file contains internal data structures used by Rust Binder. Mostly,
+ * these are type definitions used only by binderfs or things that Rust Binder
+ * define and export to binderfs.
+ *
+ * It does not include things exported by binderfs to Rust Binder since this
+ * file is not included as input to bindgen.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+
+#ifndef _LINUX_RUST_BINDER_INTERNAL_H
+#define _LINUX_RUST_BINDER_INTERNAL_H
+
+#define RUST_BINDERFS_SUPER_MAGIC 0x6c6f6f71
+
+#include <linux/seq_file.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * The internal data types in the Rust Binder driver are opaque to C, so we use
+ * void pointer typedefs for these types.
+ */
+typedef void *rust_binder_context;
+
+/**
+ * struct binder_device - information about a binder device node
+ * @minor: the minor number used by this device
+ * @ctx: the Rust Context used by this device, or null for binder-control
+ *
+ * This is used as the private data for files directly in binderfs, but not
+ * files in the binder_logs subdirectory. This struct owns a refcount on `ctx`
+ * and the entry for `minor` in `binderfs_minors`. For binder-control `ctx` is
+ * null.
+ */
+struct binder_device {
+ int minor;
+ rust_binder_context ctx;
+};
+
+int rust_binder_stats_show(struct seq_file *m, void *unused);
+int rust_binder_state_show(struct seq_file *m, void *unused);
+int rust_binder_transactions_show(struct seq_file *m, void *unused);
+int rust_binder_proc_show(struct seq_file *m, void *pid);
+
+extern const struct file_operations rust_binder_fops;
+rust_binder_context rust_binder_new_context(char *name);
+void rust_binder_remove_context(rust_binder_context device);
+
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
+#endif /* _LINUX_RUST_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder/rust_binder_main.rs b/drivers/android/binder/rust_binder_main.rs
new file mode 100644
index 000000000000..c79a9e742240
--- /dev/null
+++ b/drivers/android/binder/rust_binder_main.rs
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Binder -- the Android IPC mechanism.
+#![recursion_limit = "256"]
+#![allow(
+ clippy::as_underscore,
+ clippy::ref_as_ptr,
+ clippy::ptr_as_ptr,
+ clippy::cast_lossless
+)]
+
+use kernel::{
+ bindings::{self, seq_file},
+ fs::File,
+ list::{ListArc, ListArcSafe, ListLinksSelfPtr, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::Arc,
+ task::Pid,
+ transmute::AsBytes,
+ types::ForeignOwnable,
+ uaccess::UserSliceWriter,
+};
+
+use crate::{context::Context, page_range::Shrinker, process::Process, thread::Thread};
+
+use core::{
+ ptr::NonNull,
+ sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+};
+
+mod allocation;
+mod context;
+mod deferred_close;
+mod defs;
+mod error;
+mod node;
+mod page_range;
+mod process;
+mod range_alloc;
+mod stats;
+mod thread;
+mod trace;
+mod transaction;
+
+#[allow(warnings)] // generated bindgen code
+mod binderfs {
+ use kernel::bindings::{dentry, inode};
+
+ extern "C" {
+ pub fn init_rust_binderfs() -> kernel::ffi::c_int;
+ }
+ extern "C" {
+ pub fn rust_binderfs_create_proc_file(
+ nodp: *mut inode,
+ pid: kernel::ffi::c_int,
+ ) -> *mut dentry;
+ }
+ extern "C" {
+ pub fn rust_binderfs_remove_file(dentry: *mut dentry);
+ }
+ pub type rust_binder_context = *mut kernel::ffi::c_void;
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct binder_device {
+ pub minor: kernel::ffi::c_int,
+ pub ctx: rust_binder_context,
+ }
+ impl Default for binder_device {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+ }
+}
+
+module! {
+ type: BinderModule,
+ name: "rust_binder",
+ authors: ["Wedson Almeida Filho", "Alice Ryhl"],
+ description: "Android Binder",
+ license: "GPL",
+}
+
+fn next_debug_id() -> usize {
+ static NEXT_DEBUG_ID: AtomicUsize = AtomicUsize::new(0);
+
+ NEXT_DEBUG_ID.fetch_add(1, Ordering::Relaxed)
+}
+
+/// Provides a single place to write Binder return values via the
+/// supplied `UserSliceWriter`.
+pub(crate) struct BinderReturnWriter<'a> {
+ writer: UserSliceWriter,
+ thread: &'a Thread,
+}
+
+impl<'a> BinderReturnWriter<'a> {
+ fn new(writer: UserSliceWriter, thread: &'a Thread) -> Self {
+ BinderReturnWriter { writer, thread }
+ }
+
+ /// Write a return code back to user space.
+ /// Should be a `BR_` constant from [`defs`] e.g. [`defs::BR_TRANSACTION_COMPLETE`].
+ fn write_code(&mut self, code: u32) -> Result {
+ stats::GLOBAL_STATS.inc_br(code);
+ self.thread.process.stats.inc_br(code);
+ self.writer.write(&code)
+ }
+
+ /// Write something *other than* a return code to user space.
+ fn write_payload<T: AsBytes>(&mut self, payload: &T) -> Result {
+ self.writer.write(payload)
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+}
+
+/// Specifies how a type should be delivered to the read part of a BINDER_WRITE_READ ioctl.
+///
+/// When a value is pushed to the todo list for a process or thread, it is stored as a trait object
+/// with the type `Arc<dyn DeliverToRead>`. Trait objects are a Rust feature that lets you
+/// implement dynamic dispatch over many different types. This lets us store many different types
+/// in the todo list.
+trait DeliverToRead: ListArcSafe + Send + Sync {
+ /// Performs work. Returns true if remaining work items in the queue should be processed
+ /// immediately, or false if it should return to caller before processing additional work
+ /// items.
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool>;
+
+ /// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
+ /// won't be delivered.
+ fn cancel(self: DArc<Self>);
+
+ /// Should we use `wake_up_interruptible_sync` or `wake_up_interruptible` when scheduling this
+ /// work item?
+ ///
+ /// Generally only set to true for non-oneway transactions.
+ fn should_sync_wakeup(&self) -> bool;
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, transaction_prefix: &str) -> Result<()>;
+}
+
+// Wrapper around a `DeliverToRead` with linked list links.
+#[pin_data]
+struct DTRWrap<T: ?Sized> {
+ #[pin]
+ links: ListLinksSelfPtr<DTRWrap<dyn DeliverToRead>>,
+ #[pin]
+ wrapped: T,
+}
+kernel::list::impl_list_arc_safe! {
+ impl{T: ListArcSafe + ?Sized} ListArcSafe<0> for DTRWrap<T> {
+ tracked_by wrapped: T;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<dyn DeliverToRead> {
+ using ListLinksSelfPtr { self.links };
+ }
+}
+
+impl<T: ?Sized> core::ops::Deref for DTRWrap<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.wrapped
+ }
+}
+
+type DArc<T> = kernel::sync::Arc<DTRWrap<T>>;
+type DLArc<T> = kernel::list::ListArc<DTRWrap<T>>;
+
+impl<T: ListArcSafe> DTRWrap<T> {
+ fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- val,
+ })
+ }
+
+ fn arc_try_new(val: T) -> Result<DLArc<T>, kernel::alloc::AllocError> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped: val,
+ }),
+ GFP_KERNEL,
+ )
+ .map_err(|_| kernel::alloc::AllocError)
+ }
+
+ fn arc_pin_init(init: impl PinInit<T>) -> Result<DLArc<T>, kernel::error::Error> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- init,
+ }),
+ GFP_KERNEL,
+ )
+ }
+}
+
+struct DeliverCode {
+ code: u32,
+ skip: AtomicBool,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for DeliverCode { untracked; }
+}
+
+impl DeliverCode {
+ fn new(code: u32) -> Self {
+ Self {
+ code,
+ skip: AtomicBool::new(false),
+ }
+ }
+
+ /// Disable this DeliverCode and make it do nothing.
+ ///
+ /// This is used instead of removing it from the work list, since `LinkedList::remove` is
+ /// unsafe, whereas this method is not.
+ fn skip(&self) {
+ self.skip.store(true, Ordering::Relaxed);
+ }
+}
+
+impl DeliverToRead for DeliverCode {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ if !self.skip.load(Ordering::Relaxed) {
+ writer.write_code(self.code)?;
+ }
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}", prefix);
+ if self.skip.load(Ordering::Relaxed) {
+ seq_print!(m, "(skipped) ");
+ }
+ if self.code == defs::BR_TRANSACTION_COMPLETE {
+ seq_print!(m, "transaction complete\n");
+ } else {
+ seq_print!(m, "transaction error: {}\n", self.code);
+ }
+ Ok(())
+ }
+}
+
+fn ptr_align(value: usize) -> Option<usize> {
+ let size = core::mem::size_of::<usize>() - 1;
+ Some(value.checked_add(size)? & !size)
+}
+
+// SAFETY: We call register in `init`.
+static BINDER_SHRINKER: Shrinker = unsafe { Shrinker::new() };
+
+struct BinderModule {}
+
+impl kernel::Module for BinderModule {
+ fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
+ // SAFETY: The module initializer never runs twice, so we only call this once.
+ unsafe { crate::context::CONTEXTS.init() };
+
+ pr_warn!("Loaded Rust Binder.");
+
+ BINDER_SHRINKER.register(kernel::c_str!("android-binder"))?;
+
+ // SAFETY: The module is being loaded, so we can initialize binderfs.
+ unsafe { kernel::error::to_result(binderfs::init_rust_binderfs())? };
+
+ Ok(Self {})
+ }
+}
+
+/// Makes the inner type Sync.
+#[repr(transparent)]
+pub struct AssertSync<T>(T);
+// SAFETY: Used only to insert `file_operations` into a global, which is safe.
+unsafe impl<T> Sync for AssertSync<T> {}
+
+/// File operations that rust_binderfs.c can use.
+#[no_mangle]
+#[used]
+pub static rust_binder_fops: AssertSync<kernel::bindings::file_operations> = {
+ // SAFETY: All zeroes is safe for the `file_operations` type.
+ let zeroed_ops = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+
+ let ops = kernel::bindings::file_operations {
+ owner: THIS_MODULE.as_ptr(),
+ poll: Some(rust_binder_poll),
+ unlocked_ioctl: Some(rust_binder_ioctl),
+ compat_ioctl: Some(bindings::compat_ptr_ioctl),
+ mmap: Some(rust_binder_mmap),
+ open: Some(rust_binder_open),
+ release: Some(rust_binder_release),
+ flush: Some(rust_binder_flush),
+ ..zeroed_ops
+ };
+ AssertSync(ops)
+};
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_new_context(
+ name: *const kernel::ffi::c_char,
+) -> *mut kernel::ffi::c_void {
+ // SAFETY: The caller will always provide a valid c string here.
+ let name = unsafe { kernel::str::CStr::from_char_ptr(name) };
+ match Context::new(name) {
+ Ok(ctx) => Arc::into_foreign(ctx),
+ Err(_err) => core::ptr::null_mut(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_remove_context(device: *mut kernel::ffi::c_void) {
+ if !device.is_null() {
+ // SAFETY: The caller ensures that the `device` pointer came from a previous call to
+ // `rust_binder_new_device`.
+ let ctx = unsafe { Arc::<Context>::from_foreign(device) };
+ ctx.deregister();
+ drop(ctx);
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_open(
+ inode: *mut bindings::inode,
+ file_ptr: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: The `rust_binderfs.c` file ensures that `i_private` is set to a
+ // `struct binder_device`.
+ let device = unsafe { (*inode).i_private } as *const binderfs::binder_device;
+
+ assert!(!device.is_null());
+
+ // SAFETY: The `rust_binderfs.c` file ensures that `device->ctx` holds a binder context when
+ // using the rust binder fops.
+ let ctx = unsafe { Arc::<Context>::borrow((*device).ctx) };
+
+ // SAFETY: The caller provides a valid file pointer to a new `struct file`.
+ let file = unsafe { File::from_raw_file(file_ptr) };
+ let process = match Process::open(ctx, file) {
+ Ok(process) => process,
+ Err(err) => return err.to_errno(),
+ };
+
+ // SAFETY: This is an `inode` for a newly created binder file.
+ match unsafe { BinderfsProcFile::new(inode, process.task.pid()) } {
+ Ok(Some(file)) => process.inner.lock().binderfs_file = Some(file),
+ Ok(None) => { /* pid already exists */ }
+ Err(err) => return err.to_errno(),
+ }
+
+ // SAFETY: This file is associated with Rust binder, so we own the `private_data` field.
+ unsafe { (*file_ptr).private_data = process.into_foreign() };
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_release(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let process = unsafe { Arc::<Process>::from_foreign((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let file = unsafe { File::from_raw_file(file) };
+ Process::release(process, file);
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_mmap(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the vma is valid.
+ let area = unsafe { kernel::mm::virt::VmaNew::from_raw(vma) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::mmap(f, unsafe { File::from_raw_file(file) }, area) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_poll(
+ file: *mut bindings::file,
+ wait: *mut bindings::poll_table_struct,
+) -> bindings::__poll_t {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let fileref = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the `PollTable` is valid.
+ match Process::poll(f, fileref, unsafe { PollTable::from_raw(wait) }) {
+ Ok(v) => v,
+ Err(_) => bindings::POLLERR,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_flush(
+ file: *mut bindings::file,
+ _id: bindings::fl_owner_t,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ match Process::flush(f) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_stats_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_stats_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_state_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_state_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_proc_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: Accessing the private field of `seq_file` is okay.
+ let pid = (unsafe { (*ptr).private }) as usize as Pid;
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_proc_show_impl(m, pid) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_transactions_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_transactions_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+fn rust_binder_transactions_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder transactions:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, false)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_stats_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder stats:\n");
+ stats::GLOBAL_STATS.debug_print("", m);
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print_stats(m, &ctx)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_state_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_proc_show_impl(m: &SeqFile, pid: Pid) -> Result<()> {
+ seq_print!(m, "binder proc state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_procs_with_pid(pid)?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+struct BinderfsProcFile(NonNull<bindings::dentry>);
+
+// SAFETY: Safe to drop any thread.
+unsafe impl Send for BinderfsProcFile {}
+
+impl BinderfsProcFile {
+ /// # Safety
+ ///
+ /// Takes an inode from a newly created binder file.
+ unsafe fn new(nodp: *mut bindings::inode, pid: i32) -> Result<Option<Self>> {
+ // SAFETY: The caller passes an `inode` for a newly created binder file.
+ let dentry = unsafe { binderfs::rust_binderfs_create_proc_file(nodp, pid) };
+ match kernel::error::from_err_ptr(dentry) {
+ Ok(dentry) => Ok(NonNull::new(dentry).map(Self)),
+ Err(err) if err == EEXIST => Ok(None),
+ Err(err) => Err(err),
+ }
+ }
+}
+
+impl Drop for BinderfsProcFile {
+ fn drop(&mut self) {
+ // SAFETY: This is a dentry from `rust_binderfs_remove_file` that has not been deleted yet.
+ unsafe { binderfs::rust_binderfs_remove_file(self.0.as_ptr()) };
+ }
+}
diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c
new file mode 100644
index 000000000000..c69026df775c
--- /dev/null
+++ b/drivers/android/binder/rust_binderfs.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler_types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/namei.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/fs_parser.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/user_namespace.h>
+#include <linux/xarray.h>
+#include <uapi/asm-generic/errno-base.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+#include "rust_binder.h"
+#include "rust_binder_internal.h"
+
+#define FIRST_INODE 1
+#define SECOND_INODE 2
+#define INODE_OFFSET 3
+#define BINDERFS_MAX_MINOR (1U << MINORBITS)
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
+
+DEFINE_SHOW_ATTRIBUTE(rust_binder_stats);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_state);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_transactions);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_proc);
+
+char *rust_binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(rust_devices, rust_binder_devices_param, charp, 0444);
+
+static dev_t binderfs_dev;
+static DEFINE_MUTEX(binderfs_minors_mutex);
+static DEFINE_IDA(binderfs_minors);
+
+enum binderfs_param {
+ Opt_max,
+ Opt_stats_mode,
+};
+
+enum binderfs_stats_mode {
+ binderfs_stats_mode_unset,
+ binderfs_stats_mode_global,
+};
+
+struct binder_features {
+ bool oneway_spam_detection;
+ bool extended_error;
+ bool freeze_notification;
+};
+
+static const struct constant_table binderfs_param_stats[] = {
+ { "global", binderfs_stats_mode_global },
+ {}
+};
+
+static const struct fs_parameter_spec binderfs_fs_parameters[] = {
+ fsparam_u32("max", Opt_max),
+ fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
+ {}
+};
+
+static struct binder_features binder_features = {
+ .oneway_spam_detection = true,
+ .extended_error = true,
+ .freeze_notification = true,
+};
+
+static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/**
+ * binderfs_binder_device_create - allocate inode from super block of a
+ * binderfs mount
+ * @ref_inode: inode from wich the super block will be taken
+ * @userp: buffer to copy information about new device for userspace to
+ * @req: struct binderfs_device as copied from userspace
+ *
+ * This function allocates a new binder_device and reserves a new minor
+ * number for it.
+ * Minor numbers are limited and tracked globally in binderfs_minors. The
+ * function will stash a struct binder_device for the specific binder
+ * device in i_private of the inode.
+ * It will go on to allocate a new inode from the super block of the
+ * filesystem mount, stash a struct binder_device in its i_private field
+ * and attach a dentry to that inode.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_device_create(struct inode *ref_inode,
+ struct binderfs_device __user *userp,
+ struct binderfs_device *req)
+{
+ int minor, ret;
+ struct dentry *dentry, *root;
+ struct binder_device *device = NULL;
+ rust_binder_context ctx = NULL;
+ struct inode *inode = NULL;
+ struct super_block *sb = ref_inode->i_sb;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ /* Reserve new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ if (++info->device_count <= info->mount_opts.max)
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ else
+ minor = -ENOSPC;
+ if (minor < 0) {
+ --info->device_count;
+ mutex_unlock(&binderfs_minors_mutex);
+ return minor;
+ }
+ mutex_unlock(&binderfs_minors_mutex);
+
+ ret = -ENOMEM;
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ goto err;
+
+ req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+
+ ctx = rust_binder_new_context(req->name);
+ if (!ctx)
+ goto err;
+
+ inode = new_inode(sb);
+ if (!inode)
+ goto err;
+
+ inode->i_ino = minor + INODE_OFFSET;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &rust_binder_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ req->major = MAJOR(binderfs_dev);
+ req->minor = minor;
+ device->ctx = ctx;
+ device->minor = minor;
+
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ root = sb->s_root;
+ dentry = simple_start_creating(root, req->name);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ inode->i_private = device;
+ d_make_persistent(dentry, inode);
+
+ fsnotify_create(root->d_inode, dentry);
+ simple_done_creating(dentry);
+
+ return 0;
+
+err:
+ kfree(device);
+ rust_binder_remove_context(ctx);
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, minor);
+ mutex_unlock(&binderfs_minors_mutex);
+ iput(inode);
+
+ return ret;
+}
+
+/**
+ * binder_ctl_ioctl - handle binder device node allocation requests
+ *
+ * The request handler for the binder-control device. All requests operate on
+ * the binderfs mount the binder-control device resides in:
+ * - BINDER_CTL_ADD
+ * Allocate a new binder device.
+ *
+ * Return: %0 on success, negative errno on failure.
+ */
+static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ struct inode *inode = file_inode(file);
+ struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
+ struct binderfs_device device_req;
+
+ switch (cmd) {
+ case BINDER_CTL_ADD:
+ ret = copy_from_user(&device_req, device, sizeof(device_req));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = binderfs_binder_device_create(inode, device, &device_req);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void binderfs_evict_inode(struct inode *inode)
+{
+ struct binder_device *device = inode->i_private;
+ struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
+
+ clear_inode(inode);
+
+ if (!S_ISCHR(inode->i_mode) || !device)
+ return;
+
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, device->minor);
+ mutex_unlock(&binderfs_minors_mutex);
+
+ /* ctx is null for binder-control, but this function ignores null pointers */
+ rust_binder_remove_context(device->ctx);
+
+ kfree(device);
+}
+
+static int binderfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ int opt;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_max:
+ if (result.uint_32 > BINDERFS_MAX_MINOR)
+ return invalfc(fc, "Bad value for '%s'", param->key);
+
+ ctx->max = result.uint_32;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ctx->stats_mode = result.uint_32;
+ break;
+ default:
+ return invalfc(fc, "Unsupported parameter '%s'", param->key);
+ }
+
+ return 0;
+}
+
+static int binderfs_fs_context_reconfigure(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
+
+ if (info->mount_opts.stats_mode != ctx->stats_mode)
+ return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
+
+ info->mount_opts.stats_mode = ctx->stats_mode;
+ info->mount_opts.max = ctx->max;
+ return 0;
+}
+
+static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct binderfs_info *info = BINDERFS_SB(root->d_sb);
+
+ if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+ seq_printf(seq, ",max=%d", info->mount_opts.max);
+
+ switch (info->mount_opts.stats_mode) {
+ case binderfs_stats_mode_unset:
+ break;
+ case binderfs_stats_mode_global:
+ seq_puts(seq, ",stats=global");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct super_operations binderfs_super_ops = {
+ .evict_inode = binderfs_evict_inode,
+ .show_options = binderfs_show_options,
+ .statfs = simple_statfs,
+};
+
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+ struct binderfs_info *info = dentry->d_sb->s_fs_info;
+
+ return info->control_dentry == dentry;
+}
+
+static int binderfs_rename(struct mnt_idmap *idmap,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (is_binderfs_control_device(old_dentry) ||
+ is_binderfs_control_device(new_dentry))
+ return -EPERM;
+
+ return simple_rename(idmap, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
+}
+
+static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (is_binderfs_control_device(dentry))
+ return -EPERM;
+
+ return simple_unlink(dir, dentry);
+}
+
+static const struct file_operations binder_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .unlocked_ioctl = binder_ctl_ioctl,
+ .compat_ioctl = binder_ctl_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * binderfs_binder_ctl_create - create a new binder-control device
+ * @sb: super block of the binderfs mount
+ *
+ * This function creates a new binder-control device node in the binderfs mount
+ * referred to by @sb.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_ctl_create(struct super_block *sb)
+{
+ int minor, ret;
+ struct dentry *dentry;
+ struct binder_device *device;
+ struct inode *inode = NULL;
+ struct dentry *root = sb->s_root;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ /* If we have already created a binder-control node, return. */
+ if (info->control_dentry) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto out;
+
+ /* Reserve a new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ mutex_unlock(&binderfs_minors_mutex);
+ if (minor < 0) {
+ ret = minor;
+ goto out;
+ }
+
+ inode->i_ino = SECOND_INODE;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &binder_ctl_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ device->minor = minor;
+ device->ctx = NULL;
+
+ dentry = d_alloc_name(root, "binder-control");
+ if (!dentry)
+ goto out;
+
+ inode->i_private = device;
+ info->control_dentry = dentry;
+ d_add(dentry, inode);
+
+ return 0;
+
+out:
+ kfree(device);
+ iput(inode);
+
+ return ret;
+}
+
+static const struct inode_operations binderfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .rename = binderfs_rename,
+ .unlink = binderfs_unlink,
+};
+
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ simple_inode_init_ts(ret);
+ }
+ return ret;
+}
+
+void rust_binderfs_remove_file(struct dentry *dentry)
+{
+ simple_recursive_removal(dentry, NULL);
+}
+
+static struct dentry *rust_binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode;
+
+ new_inode = binderfs_make_inode(parent->d_sb, S_IFREG | 0444);
+ if (!new_inode)
+ return ERR_PTR(-ENOMEM);
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(new_inode);
+ return dentry;
+ }
+
+ d_make_persistent(dentry, new_inode);
+ fsnotify_create(parent->d_inode, dentry);
+ simple_done_creating(dentry);
+ return dentry;
+}
+
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid)
+{
+ struct binderfs_info *info = nodp->i_sb->s_fs_info;
+ struct dentry *dir = info->proc_log_dir;
+ char strbuf[20 + 1];
+ void *data = (void *)(unsigned long) pid;
+
+ if (!dir)
+ return NULL;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", pid);
+ return rust_binderfs_create_file(dir, strbuf, &rust_binder_proc_fops, data);
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode;
+
+ new_inode = binderfs_make_inode(parent->d_sb, S_IFDIR | 0755);
+ if (!new_inode)
+ return ERR_PTR(-ENOMEM);
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ dentry = simple_start_creating(parent, name);
+ if (IS_ERR(dentry)) {
+ iput(new_inode);
+ return dentry;
+ }
+
+ inc_nlink(parent->d_inode);
+ set_nlink(new_inode, 2);
+ d_make_persistent(dentry, new_inode);
+ fsnotify_mkdir(parent->d_inode, dentry);
+ simple_done_creating(dentry);
+ return dentry;
+}
+
+static int binder_features_show(struct seq_file *m, void *unused)
+{
+ bool *feature = m->private;
+
+ seq_printf(m, "%d\n", *feature);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(binder_features);
+
+static int init_binder_features(struct super_block *sb)
+{
+ struct dentry *dentry, *dir;
+
+ dir = binderfs_create_dir(sb->s_root, "features");
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ dentry = rust_binderfs_create_file(dir, "oneway_spam_detection",
+ &binder_features_fops,
+ &binder_features.oneway_spam_detection);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "extended_error",
+ &binder_features_fops,
+ &binder_features.extended_error);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return 0;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "stats",
+ &rust_binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "state",
+ &rust_binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "transactions",
+ &rust_binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
+static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ int ret;
+ struct binderfs_info *info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct inode *inode = NULL;
+ struct binderfs_device device_info = {};
+ const char *name;
+ size_t len;
+
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+
+ /*
+ * The binderfs filesystem can be mounted by userns root in a
+ * non-initial userns. By default such mounts have the SB_I_NODEV flag
+ * set in s_iflags to prevent security issues where userns root can
+ * just create random device nodes via mknod() since it owns the
+ * filesystem mount. But binderfs does not allow to create any files
+ * including devices nodes. The only way to create binder devices nodes
+ * is through the binder-control device which userns root is explicitly
+ * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
+ * necessary and safe.
+ */
+ sb->s_iflags &= ~SB_I_NODEV;
+ sb->s_iflags |= SB_I_NOEXEC;
+ sb->s_magic = RUST_BINDERFS_SUPER_MAGIC;
+ sb->s_op = &binderfs_super_ops;
+ sb->s_time_gran = 1;
+
+ sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ if (!sb->s_fs_info)
+ return -ENOMEM;
+ info = sb->s_fs_info;
+
+ info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+ info->root_gid = make_kgid(sb->s_user_ns, 0);
+ if (!gid_valid(info->root_gid))
+ info->root_gid = GLOBAL_ROOT_GID;
+ info->root_uid = make_kuid(sb->s_user_ns, 0);
+ if (!uid_valid(info->root_uid))
+ info->root_uid = GLOBAL_ROOT_UID;
+ info->mount_opts.max = ctx->max;
+ info->mount_opts.stats_mode = ctx->stats_mode;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_ino = FIRST_INODE;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_mode = S_IFDIR | 0755;
+ simple_inode_init_ts(inode);
+ inode->i_op = &binderfs_dir_inode_operations;
+ set_nlink(inode, 2);
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ ret = init_binder_features(sb);
+ if (ret)
+ return ret;
+
+ if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
+ return init_binder_logs(sb);
+
+ return 0;
+}
+
+static int binderfs_fs_context_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, binderfs_fill_super);
+}
+
+static void binderfs_fs_context_free(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+
+ kfree(ctx);
+}
+
+static const struct fs_context_operations binderfs_fs_context_ops = {
+ .free = binderfs_fs_context_free,
+ .get_tree = binderfs_fs_context_get_tree,
+ .parse_param = binderfs_fs_context_parse_param,
+ .reconfigure = binderfs_fs_context_reconfigure,
+};
+
+static int binderfs_init_fs_context(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx;
+
+ ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max = BINDERFS_MAX_MINOR;
+ ctx->stats_mode = binderfs_stats_mode_unset;
+
+ fc->fs_private = ctx;
+ fc->ops = &binderfs_fs_context_ops;
+
+ return 0;
+}
+
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ /*
+ * During inode eviction struct binderfs_info is needed.
+ * So first wipe the super_block then free struct binderfs_info.
+ */
+ kill_anon_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
+static struct file_system_type binder_fs_type = {
+ .name = "binder",
+ .init_fs_context = binderfs_init_fs_context,
+ .parameters = binderfs_fs_parameters,
+ .kill_sb = binderfs_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
+};
+
+int init_rust_binderfs(void)
+{
+ int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ /* Allocate new major number for binderfs. */
+ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
+ "rust_binder");
+ if (ret)
+ return ret;
+
+ ret = register_filesystem(&binder_fs_type);
+ if (ret) {
+ unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/android/binder/stats.rs b/drivers/android/binder/stats.rs
new file mode 100644
index 000000000000..037002651941
--- /dev/null
+++ b/drivers/android/binder/stats.rs
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Keep track of statistics for binder_logs.
+
+use crate::defs::*;
+use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use kernel::{ioctl::_IOC_NR, seq_file::SeqFile, seq_print};
+
+const BC_COUNT: usize = _IOC_NR(BC_REPLY_SG) as usize + 1;
+const BR_COUNT: usize = _IOC_NR(BR_TRANSACTION_PENDING_FROZEN) as usize + 1;
+
+pub(crate) static GLOBAL_STATS: BinderStats = BinderStats::new();
+
+pub(crate) struct BinderStats {
+ bc: [AtomicU32; BC_COUNT],
+ br: [AtomicU32; BR_COUNT],
+}
+
+impl BinderStats {
+ pub(crate) const fn new() -> Self {
+ #[expect(clippy::declare_interior_mutable_const)]
+ const ZERO: AtomicU32 = AtomicU32::new(0);
+
+ Self {
+ bc: [ZERO; BC_COUNT],
+ br: [ZERO; BR_COUNT],
+ }
+ }
+
+ pub(crate) fn inc_bc(&self, bc: u32) {
+ let idx = _IOC_NR(bc) as usize;
+ if let Some(bc_ref) = self.bc.get(idx) {
+ bc_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn inc_br(&self, br: u32) {
+ let idx = _IOC_NR(br) as usize;
+ if let Some(br_ref) = self.br.get(idx) {
+ br_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn debug_print(&self, prefix: &str, m: &SeqFile) {
+ for (i, cnt) in self.bc.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, command_string(i), cnt);
+ }
+ }
+ for (i, cnt) in self.br.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, return_string(i), cnt);
+ }
+ }
+ }
+}
+
+mod strings {
+ use core::str::from_utf8_unchecked;
+ use kernel::str::{CStr, CStrExt as _};
+
+ extern "C" {
+ static binder_command_strings: [*const u8; super::BC_COUNT];
+ static binder_return_strings: [*const u8; super::BR_COUNT];
+ }
+
+ pub(super) fn command_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_command_strings` is always safe.
+ let c_str_ptr = unsafe { binder_command_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.to_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+
+ pub(super) fn return_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_return_strings` is always safe.
+ let c_str_ptr = unsafe { binder_return_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.to_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+}
+use strings::{command_string, return_string};
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
new file mode 100644
index 000000000000..1a8e6fdc0dc4
--- /dev/null
+++ b/drivers/android/binder/thread.rs
@@ -0,0 +1,1596 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Thread` type, which represents a userspace thread that is using
+//! binder.
+//!
+//! The `Process` object stores all of the threads in an rb tree.
+
+use kernel::{
+ bindings,
+ fs::{File, LocalFile},
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ security,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::{PollCondVar, PollTable},
+ sync::{Arc, SpinLock},
+ task::Task,
+ types::ARef,
+ uaccess::UserSlice,
+ uapi,
+};
+
+use crate::{
+ allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
+ defs::*,
+ error::BinderResult,
+ process::{GetWorkOrRegister, Process},
+ ptr_align,
+ stats::GLOBAL_STATS,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
+};
+
+use core::{
+ mem::size_of,
+ sync::atomic::{AtomicU32, Ordering},
+};
+
+/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
+/// call and is discarded when it returns.
+struct ScatterGatherState {
+ /// A struct that tracks the amount of unused buffer space.
+ unused_buffer_space: UnusedBufferSpace,
+ /// Scatter-gather entries to copy.
+ sg_entries: KVec<ScatterGatherEntry>,
+ /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
+ /// was processed and all of its ancestors. The array is in sorted order.
+ ancestors: KVec<usize>,
+}
+
+/// This entry specifies an additional buffer that should be copied using the scatter-gather
+/// mechanism.
+struct ScatterGatherEntry {
+ /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
+ obj_index: usize,
+ /// Offset in target buffer.
+ offset: usize,
+ /// User address in source buffer.
+ sender_uaddr: usize,
+ /// Number of bytes to copy.
+ length: usize,
+ /// The minimum offset of the next fixup in this buffer.
+ fixup_min_offset: usize,
+ /// The offsets within this buffer that contain pointers which should be translated.
+ pointer_fixups: KVec<PointerFixupEntry>,
+}
+
+/// This entry specifies that a fixup should happen at `target_offset` of the
+/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
+/// and is applied later. Otherwise if `skip` is zero, then the size of the
+/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
+struct PointerFixupEntry {
+ /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
+ skip: usize,
+ /// The translated pointer to write when `skip` is zero.
+ pointer_value: u64,
+ /// The offset at which the value should be written. The offset is relative
+ /// to the original buffer.
+ target_offset: usize,
+}
+
+/// Return type of `apply_and_validate_fixup_in_parent`.
+struct ParentFixupInfo {
+ /// The index of the parent buffer in `sg_entries`.
+ parent_sg_index: usize,
+ /// The number of ancestors of the buffer.
+ ///
+ /// The buffer is considered an ancestor of itself, so this is always at
+ /// least one.
+ num_ancestors: usize,
+ /// New value of `fixup_min_offset` if this fixup is applied.
+ new_min_offset: usize,
+ /// The offset of the fixup in the target buffer.
+ target_offset: usize,
+}
+
+impl ScatterGatherState {
+ /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
+ /// to access a region in its parent buffer. These accesses have various
+ /// restrictions, which this method verifies.
+ ///
+ /// The `parent_offset` and `length` arguments describe the offset and
+ /// length of the access in the parent buffer.
+ ///
+ /// # Detailed restrictions
+ ///
+ /// Obviously the fixup must be in-bounds for the parent buffer.
+ ///
+ /// For safety reasons, we only allow fixups inside a buffer to happen
+ /// at increasing offsets; additionally, we only allow fixup on the last
+ /// buffer object that was verified, or one of its parents.
+ ///
+ /// Example of what is allowed:
+ ///
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = C, offset = 0)
+ /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ ///
+ /// Examples of what is not allowed:
+ ///
+ /// Decreasing offsets within the same parent:
+ /// A
+ /// C (parent = A, offset = 16)
+ /// B (parent = A, offset = 0) // decreasing offset within A
+ ///
+ /// Arcerring to a parent that wasn't the last object or any of its parents:
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = B, offset = 0) // B is not A or any of A's parents
+ fn validate_parent_fixup(
+ &self,
+ parent: usize,
+ parent_offset: usize,
+ length: usize,
+ ) -> Result<ParentFixupInfo> {
+ // Using `position` would also be correct, but `rposition` avoids
+ // quadratic running times.
+ let ancestors_i = self
+ .ancestors
+ .iter()
+ .copied()
+ .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
+ .ok_or(EINVAL)?;
+ let sg_idx = self.ancestors[ancestors_i];
+ let sg_entry = match self.sg_entries.get(sg_idx) {
+ Some(sg_entry) => sg_entry,
+ None => {
+ pr_err!(
+ "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
+ ancestors_i,
+ sg_idx,
+ self.sg_entries.len()
+ );
+ return Err(EINVAL);
+ }
+ };
+ if sg_entry.fixup_min_offset > parent_offset {
+ pr_warn!(
+ "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
+ sg_entry.fixup_min_offset,
+ parent_offset
+ );
+ return Err(EINVAL);
+ }
+ let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
+ if new_min_offset > sg_entry.length {
+ pr_warn!(
+ "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
+ new_min_offset,
+ sg_entry.length
+ );
+ return Err(EINVAL);
+ }
+ let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
+ // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
+ // most `self.ancestors.len()`, which also fits in a usize.
+ Ok(ParentFixupInfo {
+ parent_sg_index: sg_idx,
+ num_ancestors: ancestors_i + 1,
+ new_min_offset,
+ target_offset,
+ })
+ }
+}
+
+/// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
+/// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
+/// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
+struct UnusedBufferSpace {
+ /// The start of the remaining space.
+ offset: usize,
+ /// The end of the remaining space.
+ limit: usize,
+}
+impl UnusedBufferSpace {
+ /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
+ /// into the buffer is returned.
+ fn claim_next(&mut self, size: usize) -> Result<usize> {
+ // We require every chunk to be aligned.
+ let size = ptr_align(size).ok_or(EINVAL)?;
+ let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
+
+ if new_offset <= self.limit {
+ let offset = self.offset;
+ self.offset = new_offset;
+ Ok(offset)
+ } else {
+ Err(EINVAL)
+ }
+ }
+}
+
+pub(crate) enum PushWorkRes {
+ Ok,
+ FailedDead(DLArc<dyn DeliverToRead>),
+}
+
+impl PushWorkRes {
+ fn is_ok(&self) -> bool {
+ match self {
+ PushWorkRes::Ok => true,
+ PushWorkRes::FailedDead(_) => false,
+ }
+ }
+}
+
+/// The fields of `Thread` protected by the spinlock.
+struct InnerThread {
+ /// Determines the looper state of the thread. It is a bit-wise combination of the constants
+ /// prefixed with `LOOPER_`.
+ looper_flags: u32,
+
+ /// Determines whether the looper should return.
+ looper_need_return: bool,
+
+ /// Determines if thread is dead.
+ is_dead: bool,
+
+ /// Work item used to deliver error codes to the thread that started a transaction. Stored here
+ /// so that it can be reused.
+ reply_work: DArc<ThreadError>,
+
+ /// Work item used to deliver error codes to the current thread. Stored here so that it can be
+ /// reused.
+ return_work: DArc<ThreadError>,
+
+ /// Determines whether the work list below should be processed. When set to false, `work_list`
+ /// is treated as if it were empty.
+ process_work_list: bool,
+ /// List of work items to deliver to userspace.
+ work_list: List<DTRWrap<dyn DeliverToRead>>,
+ current_transaction: Option<DArc<Transaction>>,
+
+ /// Extended error information for this thread.
+ extended_error: ExtendedError,
+}
+
+const LOOPER_REGISTERED: u32 = 0x01;
+const LOOPER_ENTERED: u32 = 0x02;
+const LOOPER_EXITED: u32 = 0x04;
+const LOOPER_INVALID: u32 = 0x08;
+const LOOPER_WAITING: u32 = 0x10;
+const LOOPER_WAITING_PROC: u32 = 0x20;
+const LOOPER_POLL: u32 = 0x40;
+
+impl InnerThread {
+ fn new() -> Result<Self> {
+ fn next_err_id() -> u32 {
+ static EE_ID: AtomicU32 = AtomicU32::new(0);
+ EE_ID.fetch_add(1, Ordering::Relaxed)
+ }
+
+ Ok(Self {
+ looper_flags: 0,
+ looper_need_return: false,
+ is_dead: false,
+ process_work_list: false,
+ reply_work: ThreadError::try_new()?,
+ return_work: ThreadError::try_new()?,
+ work_list: List::new(),
+ current_transaction: None,
+ extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
+ })
+ }
+
+ fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
+ if !self.process_work_list {
+ return None;
+ }
+
+ let ret = self.work_list.pop_front();
+ self.process_work_list = !self.work_list.is_empty();
+ ret
+ }
+
+ fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ if self.is_dead {
+ PushWorkRes::FailedDead(work)
+ } else {
+ self.work_list.push_back(work);
+ self.process_work_list = true;
+ PushWorkRes::Ok
+ }
+ }
+
+ fn push_reply_work(&mut self, code: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
+ work.set_error_code(code);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread reply work is already in use.");
+ }
+ }
+
+ fn push_return_work(&mut self, reply: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
+ work.set_error_code(reply);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread return work is already in use.");
+ }
+ }
+
+ /// Used to push work items that do not need to be processed immediately and can wait until the
+ /// thread gets another work item.
+ fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
+ self.work_list.push_back(work);
+ }
+
+ /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
+ /// (that it could respond to) but it has also issued a transaction, it must first wait for the
+ /// previously-issued transaction to complete.
+ ///
+ /// The `thread` parameter should be the thread containing this `ThreadInner`.
+ fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
+ let transaction = self.current_transaction.take().ok_or(EINVAL)?;
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ self.current_transaction = Some(transaction);
+ return Err(EINVAL);
+ }
+ // Find a new current transaction for this thread.
+ self.current_transaction = transaction.find_from(thread).cloned();
+ Ok(transaction)
+ }
+
+ fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
+ match self.current_transaction.take() {
+ None => false,
+ Some(old) => {
+ if !Arc::ptr_eq(transaction, &old) {
+ self.current_transaction = Some(old);
+ return false;
+ }
+ self.current_transaction = old.clone_next();
+ true
+ }
+ }
+ }
+
+ fn looper_enter(&mut self) {
+ self.looper_flags |= LOOPER_ENTERED;
+ if self.looper_flags & LOOPER_REGISTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_register(&mut self, valid: bool) {
+ self.looper_flags |= LOOPER_REGISTERED;
+ if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_exit(&mut self) {
+ self.looper_flags |= LOOPER_EXITED;
+ }
+
+ /// Determines whether the thread is part of a pool, i.e., if it is a looper.
+ fn is_looper(&self) -> bool {
+ self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
+ }
+
+ /// Determines whether the thread should attempt to fetch work items from the process queue.
+ /// This is generally case when the thread is registered as a looper and not part of a
+ /// transaction stack. But if there is local work, we want to return to userspace before we
+ /// deliver any remote work.
+ fn should_use_process_work_queue(&self) -> bool {
+ self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
+ }
+
+ fn poll(&mut self) -> u32 {
+ self.looper_flags |= LOOPER_POLL;
+ if self.process_work_list || self.looper_need_return {
+ bindings::POLLIN
+ } else {
+ 0
+ }
+ }
+}
+
+/// This represents a thread that's used with binder.
+#[pin_data]
+pub(crate) struct Thread {
+ pub(crate) id: i32,
+ pub(crate) process: Arc<Process>,
+ pub(crate) task: ARef<Task>,
+ #[pin]
+ inner: SpinLock<InnerThread>,
+ #[pin]
+ work_condvar: PollCondVar,
+ /// Used to insert this thread into the process' `ready_threads` list.
+ ///
+ /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
+ #[pin]
+ links: ListLinks,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Thread {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Thread {
+ using ListLinks { self.links };
+ }
+}
+
+impl Thread {
+ pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
+ let inner = InnerThread::new()?;
+
+ Arc::pin_init(
+ try_pin_init!(Thread {
+ id,
+ process,
+ task: ARef::from(&**kernel::current!()),
+ inner <- kernel::new_spinlock!(inner, "Thread::inner"),
+ work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
+ links <- ListLinks::new(),
+ links_track <- AtomicTracker::new(),
+ }),
+ GFP_KERNEL,
+ )
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
+ let inner = self.inner.lock();
+
+ if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
+ seq_print!(
+ m,
+ " thread {}: l {:02x} need_return {}\n",
+ self.id,
+ inner.looper_flags,
+ inner.looper_need_return,
+ );
+ }
+
+ let mut t_opt = inner.current_transaction.as_ref();
+ while let Some(t) = t_opt {
+ if Arc::ptr_eq(&t.from, self) {
+ t.debug_print_inner(m, " outgoing transaction ");
+ t_opt = t.from_parent.as_ref();
+ } else if Arc::ptr_eq(&t.to, &self.process) {
+ t.debug_print_inner(m, " incoming transaction ");
+ t_opt = t.find_from(self);
+ } else {
+ t.debug_print_inner(m, " bad transaction ");
+ t_opt = None;
+ }
+ }
+
+ for work in &inner.work_list {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
+ let mut writer = data.writer();
+ let ee = self.inner.lock().extended_error;
+ writer.write(&ee)?;
+ Ok(())
+ }
+
+ pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
+ self.inner.lock().current_transaction = Some(transaction);
+ }
+
+ pub(crate) fn has_current_transaction(&self) -> bool {
+ self.inner.lock().current_transaction.is_some()
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
+ /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
+ /// signal); otherwise it returns indicating that none is available.
+ fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ {
+ let mut inner = self.inner.lock();
+ if inner.looper_need_return {
+ return Ok(inner.pop_work());
+ }
+ }
+
+ // Try once if the caller does not want to wait.
+ if !wait {
+ return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Loop waiting only on the local queue (i.e., not registering with the process queue).
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ return Err(EINTR);
+ }
+ if inner.looper_need_return {
+ return Ok(None);
+ }
+ }
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
+ /// queue if none is available locally.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain. If it
+ /// is, the local version (`get_work_local`) should be used instead.
+ fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ // Try to get work from the thread's work queue, using only a local lock.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+ if inner.looper_need_return {
+ drop(inner);
+ return Ok(self.process.get_work());
+ }
+ }
+
+ // If the caller doesn't want to wait, try to grab work from the process queue.
+ //
+ // We know nothing will have been queued directly to the thread queue because it is not in
+ // a transaction and it is not in the process' ready list.
+ if !wait {
+ return self.process.get_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Get work from the process queue. If none is available, atomically register as ready.
+ let reg = match self.process.get_work_or_register(self) {
+ GetWorkOrRegister::Work(work) => return Ok(Some(work)),
+ GetWorkOrRegister::Register(reg) => reg,
+ };
+
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
+
+ if signal_pending || inner.looper_need_return {
+ // We need to return now. We need to pull the thread off the list of ready threads
+ // (by dropping `reg`), then check the state again after it's off the list to
+ // ensure that something was not queued in the meantime. If something has been
+ // queued, we just return it (instead of the error).
+ drop(inner);
+ drop(reg);
+
+ let res = match self.inner.lock().pop_work() {
+ Some(work) => Ok(Some(work)),
+ None if signal_pending => Err(EINTR),
+ None => Ok(None),
+ };
+ return res;
+ }
+ }
+ }
+
+ /// Push the provided work item to be delivered to user space via this thread.
+ ///
+ /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ let sync = work.should_sync_wakeup();
+
+ let res = self.inner.lock().push_work(work);
+
+ if res.is_ok() {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ res
+ }
+
+ /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
+ /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
+ pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ let mut inner = self.inner.lock();
+ if inner.is_looper() && !inner.is_dead {
+ inner.push_work(work);
+ Ok(())
+ } else {
+ drop(inner);
+ self.process.push_work(work)
+ }
+ }
+
+ pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
+ self.inner.lock().push_work_deferred(work);
+ }
+
+ pub(crate) fn push_return_work(&self, reply: u32) {
+ self.inner.lock().push_return_work(reply);
+ }
+
+ fn translate_object(
+ &self,
+ obj_index: usize,
+ offset: usize,
+ object: BinderObjectRef<'_>,
+ view: &mut AllocationView<'_>,
+ allow_fds: bool,
+ sg_state: &mut ScatterGatherState,
+ ) -> BinderResult {
+ match object {
+ BinderObjectRef::Binder(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
+ // representation.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
+ let cookie = obj.cookie as _;
+ let flags = obj.flags as _;
+ let node = self
+ .process
+ .as_arc_borrow()
+ .get_node(ptr, cookie, flags, strong, self)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Handle(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ let node = self.process.get_node_from_handle(handle, strong)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Fd(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+
+ // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
+ let fd = unsafe { obj.__bindgen_anon_1.fd };
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ let mut obj_write = BinderFdObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FD;
+ // This will be overwritten with the actual fd when the transaction is received.
+ obj_write.__bindgen_anon_1.fd = u32::MAX;
+ obj_write.cookie = obj.cookie;
+ view.write::<BinderFdObject>(offset, &obj_write)?;
+
+ const FD_FIELD_OFFSET: usize =
+ core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
+
+ let field_offset = offset + FD_FIELD_OFFSET;
+
+ view.alloc.info_add_fd(file, field_offset, false)?;
+ }
+ BinderObjectRef::Ptr(obj) => {
+ let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
+ let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
+ Ok(alloc_offset) => alloc_offset,
+ Err(err) => {
+ pr_warn!(
+ "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
+ sg_state.unused_buffer_space.offset,
+ sg_state.unused_buffer_space.limit,
+ obj_length,
+ );
+ return Err(err.into());
+ }
+ };
+
+ let sg_state_idx = sg_state.sg_entries.len();
+ sg_state.sg_entries.push(
+ ScatterGatherEntry {
+ obj_index,
+ offset: alloc_offset,
+ sender_uaddr: obj.buffer as _,
+ length: obj_length,
+ pointer_fixups: KVec::new(),
+ fixup_min_offset: 0,
+ },
+ GFP_KERNEL,
+ )?;
+
+ let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
+
+ if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
+ sg_state.ancestors.clear();
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+ } else {
+ // Another buffer also has a pointer to this buffer, and we need to fixup that
+ // pointer too.
+
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(
+ parent_index,
+ parent_offset,
+ size_of::<u64>(),
+ )?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry.pointer_fixups.push(
+ PointerFixupEntry {
+ skip: 0,
+ pointer_value: buffer_ptr_in_user_space,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )?;
+ }
+
+ let mut obj_write = BinderBufferObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_PTR;
+ obj_write.flags = obj.flags;
+ obj_write.buffer = buffer_ptr_in_user_space;
+ obj_write.length = obj.length;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderBufferObject>(offset, &obj_write)?;
+ }
+ BinderObjectRef::Fda(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+ let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
+ let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
+ view.alloc.info_add_fd_reserve(num_fds)?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry
+ .pointer_fixups
+ .push(
+ PointerFixupEntry {
+ skip: fds_len,
+ pointer_value: 0,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)?;
+
+ let fda_uaddr = parent_entry
+ .sender_uaddr
+ .checked_add(parent_offset)
+ .ok_or(EINVAL)?;
+ let mut fda_bytes = KVec::new();
+ UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
+ .read_all(&mut fda_bytes, GFP_KERNEL)?;
+
+ if fds_len != fda_bytes.len() {
+ pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
+ return Err(EINVAL.into());
+ }
+
+ for i in (0..fds_len).step_by(size_of::<u32>()) {
+ let fd = {
+ let mut fd_bytes = [0u8; size_of::<u32>()];
+ fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
+ u32::from_ne_bytes(fd_bytes)
+ };
+
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ // The `validate_parent_fixup` call ensuers that this addition will not
+ // overflow.
+ view.alloc.info_add_fd(file, info.target_offset + i, true)?;
+ }
+ drop(fda_bytes);
+
+ let mut obj_write = BinderFdArrayObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FDA;
+ obj_write.num_fds = obj.num_fds;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderFdArrayObject>(offset, &obj_write)?;
+ }
+ }
+ Ok(())
+ }
+
+ fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
+ for sg_entry in &mut sg_state.sg_entries {
+ let mut end_of_previous_fixup = sg_entry.offset;
+ let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
+
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
+ for fixup in &mut sg_entry.pointer_fixups {
+ let fixup_len = if fixup.skip == 0 {
+ size_of::<u64>()
+ } else {
+ fixup.skip
+ };
+
+ let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
+ if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
+ pr_warn!(
+ "Fixups oob {} {} {} {}",
+ fixup.target_offset,
+ end_of_previous_fixup,
+ offset_end,
+ target_offset_end
+ );
+ return Err(EINVAL.into());
+ }
+
+ let copy_off = end_of_previous_fixup;
+ let copy_len = fixup.target_offset - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ if fixup.skip == 0 {
+ let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
+ if let Err(err) = res {
+ pr_warn!("Failed copying ptr into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ if let Err(err) = reader.skip(fixup_len) {
+ pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
+ return Err(err.into());
+ }
+ end_of_previous_fixup = target_offset_end;
+ }
+ let copy_off = end_of_previous_fixup;
+ let copy_len = offset_end - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying remainder into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ Ok(())
+ }
+
+ /// This method copies the payload of a transaction into the target process.
+ ///
+ /// The resulting payload will have several different components, which will be stored next to
+ /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
+ /// and those objects have to be translated so that they make sense to the target transaction.
+ pub(crate) fn copy_transaction_data(
+ &self,
+ to_process: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ debug_id: usize,
+ allow_fds: bool,
+ txn_security_ctx_offset: Option<&mut usize>,
+ ) -> BinderResult<NewAllocation> {
+ let trd = &tr.transaction_data;
+ let is_oneway = trd.flags & TF_ONE_WAY != 0;
+ let mut secctx = if let Some(offset) = txn_security_ctx_offset {
+ let secid = self.process.cred.get_secid();
+ let ctx = match security::SecurityCtx::from_secid(secid) {
+ Ok(ctx) => ctx,
+ Err(err) => {
+ pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
+ return Err(err.into());
+ }
+ };
+ Some((offset, ctx))
+ } else {
+ None
+ };
+
+ let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
+ let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
+ let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
+ let aligned_secctx_size = match secctx.as_ref() {
+ Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
+ None => 0,
+ };
+
+ // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+ let len = usize::max(
+ aligned_data_size
+ .checked_add(aligned_offsets_size)
+ .and_then(|sum| sum.checked_add(aligned_buffers_size))
+ .and_then(|sum| sum.checked_add(aligned_secctx_size))
+ .ok_or(ENOMEM)?,
+ size_of::<usize>(),
+ );
+ let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
+ let mut alloc =
+ match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!(
+ "Failed to allocate buffer. len:{}, is_oneway:{}",
+ len,
+ is_oneway
+ );
+ return Err(err);
+ }
+ };
+
+ // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
+ // all bit-patterns.
+ let trd_data_ptr = unsafe { &trd.data.ptr };
+ let mut buffer_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
+ let mut end_of_previous_object = 0;
+ let mut sg_state = None;
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+ {
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
+ alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
+ }
+
+ let offsets_start = aligned_data_size;
+ let offsets_end = aligned_data_size + aligned_offsets_size;
+
+ // This state is used for BINDER_TYPE_PTR objects.
+ let sg_state = sg_state.insert(ScatterGatherState {
+ unused_buffer_space: UnusedBufferSpace {
+ offset: offsets_end,
+ limit: len,
+ },
+ sg_entries: KVec::new(),
+ ancestors: KVec::new(),
+ });
+
+ // Traverse the objects specified.
+ let mut view = AllocationView::new(&mut alloc, data_size);
+ for (index, index_offset) in (offsets_start..offsets_end)
+ .step_by(size_of::<usize>())
+ .enumerate()
+ {
+ let offset = view.alloc.read(index_offset)?;
+
+ if offset < end_of_previous_object {
+ pr_warn!("Got transaction with invalid offset.");
+ return Err(EINVAL.into());
+ }
+
+ // Copy data between two objects.
+ if end_of_previous_object < offset {
+ view.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ offset - end_of_previous_object,
+ )?;
+ }
+
+ let mut object = BinderObject::read_from(&mut buffer_reader)?;
+
+ match self.translate_object(
+ index,
+ offset,
+ object.as_ref(),
+ &mut view,
+ allow_fds,
+ sg_state,
+ ) {
+ Ok(()) => end_of_previous_object = offset + object.size(),
+ Err(err) => {
+ pr_warn!("Error while translating object.");
+ return Err(err);
+ }
+ }
+
+ // Update the indexes containing objects to clean up.
+ let offset_after_object = index_offset + size_of::<usize>();
+ view.alloc
+ .set_info_offsets(offsets_start..offset_after_object);
+ }
+ }
+
+ // Copy remaining raw data.
+ alloc.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ data_size - end_of_previous_object,
+ )?;
+
+ if let Some(sg_state) = sg_state.as_mut() {
+ if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
+ pr_warn!("Failure in apply_sg: {:?}", err);
+ return Err(err);
+ }
+ }
+
+ if let Some((off_out, secctx)) = secctx.as_mut() {
+ if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
+ pr_warn!("Failed to write security context: {:?}", err);
+ return Err(err.into());
+ }
+ **off_out = secctx_off;
+ }
+ Ok(alloc)
+ }
+
+ fn unwind_transaction_stack(self: &Arc<Self>) {
+ let mut thread = self.clone();
+ while let Ok(transaction) = {
+ let mut inner = thread.inner.lock();
+ inner.pop_transaction_to_reply(thread.as_ref())
+ } {
+ let reply = Err(BR_DEAD_REPLY);
+ if !transaction.from.deliver_single_reply(reply, &transaction) {
+ break;
+ }
+
+ thread = transaction.from.clone();
+ }
+ }
+
+ pub(crate) fn deliver_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) {
+ if self.deliver_single_reply(reply, transaction) {
+ transaction.from.unwind_transaction_stack();
+ }
+ }
+
+ /// Delivers a reply to the thread that started a transaction. The reply can either be a
+ /// reply-transaction or an error code to be delivered instead.
+ ///
+ /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
+ /// transaction stack by completing transactions for threads that are dead.
+ fn deliver_single_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) -> bool {
+ if let Ok(transaction) = &reply {
+ transaction.set_outstanding(&mut self.process.inner.lock());
+ }
+
+ {
+ let mut inner = self.inner.lock();
+ if !inner.pop_transaction_replied(transaction) {
+ return false;
+ }
+
+ if inner.is_dead {
+ return true;
+ }
+
+ match reply {
+ Ok(work) => {
+ inner.push_work(work);
+ }
+ Err(code) => inner.push_reply_work(code),
+ }
+ }
+
+ // Notify the thread now that we've released the inner lock.
+ self.work_condvar.notify_sync();
+ false
+ }
+
+ /// Determines if the given transaction is the current transaction for this thread.
+ fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
+ let inner = self.inner.lock();
+ match &inner.current_transaction {
+ None => false,
+ Some(current) => Arc::ptr_eq(current, transaction),
+ }
+ }
+
+ /// Determines the current top of the transaction stack. It fails if the top is in another
+ /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
+ /// [`None`] if the thread is not currently participating in a transaction stack.
+ fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
+ let inner = self.inner.lock();
+ if let Some(cur) = &inner.current_transaction {
+ if core::ptr::eq(self, cur.from.as_ref()) {
+ pr_warn!("got new transaction with bad transaction stack");
+ return Err(EINVAL);
+ }
+ Ok(Some(cur.clone()))
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
+ where
+ T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
+ {
+ if let Err(err) = inner(self, tr) {
+ if err.should_pr_warn() {
+ let mut ee = self.inner.lock().extended_error;
+ ee.command = err.reply;
+ ee.param = err.as_errno();
+ pr_warn!(
+ "Transaction failed: {:?} my_pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ }
+
+ self.push_return_work(err.reply);
+ }
+ }
+
+ fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: Handle's type has no invalid bit patterns.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
+ // could this happen?
+ let top = self.top_of_transaction_stack()?;
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let completion = list_completion.clone_arc();
+ let transaction = Transaction::new(node_ref, top, self, tr)?;
+
+ // Check that the transaction stack hasn't changed while the lock was released, then update
+ // it with the new transaction.
+ {
+ let mut inner = self.inner.lock();
+ if !transaction.is_stacked_on(&inner.current_transaction) {
+ pr_warn!("Transaction stack changed during transaction!");
+ return Err(EINVAL.into());
+ }
+ inner.current_transaction = Some(transaction.clone_arc());
+ // We push the completion as a deferred work so that we wait for the reply before
+ // returning to userland.
+ inner.push_work_deferred(list_completion);
+ }
+
+ if let Err(e) = transaction.submit() {
+ completion.skip();
+ // Define `transaction` first to drop it after `inner`.
+ let transaction;
+ let mut inner = self.inner.lock();
+ transaction = inner.current_transaction.take().unwrap();
+ inner.current_transaction = transaction.clone_next();
+ Err(e)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ let orig = self.inner.lock().pop_transaction_to_reply(self)?;
+ if !orig.from.is_current_transaction(&orig) {
+ return Err(EINVAL.into());
+ }
+
+ // We need to complete the transaction even if we cannot complete building the reply.
+ let out = (|| -> BinderResult<_> {
+ let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let process = orig.from.process.clone();
+ let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
+ let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
+ self.inner.lock().push_work(completion);
+ orig.from.deliver_reply(Ok(reply), &orig);
+ Ok(())
+ })()
+ .map_err(|mut err| {
+ // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
+ // the sender know that the transaction has completed (with an error in this case).
+ pr_warn!(
+ "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
+ err
+ );
+ let reply = Err(BR_FAILED_REPLY);
+ orig.from.deliver_reply(reply, &orig);
+ err.reply = BR_TRANSACTION_COMPLETE;
+ err
+ });
+
+ out
+ }
+
+ fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
+ // union is okay.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ let transaction = Transaction::new(node_ref, None, self, tr)?;
+ let code = if self.process.is_oneway_spam_detection_enabled()
+ && transaction.oneway_spam_detected
+ {
+ BR_ONEWAY_SPAM_SUSPECT
+ } else {
+ BR_TRANSACTION_COMPLETE
+ };
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
+ let completion = list_completion.clone_arc();
+ self.inner.lock().push_work(list_completion);
+ match transaction.submit() {
+ Ok(()) => Ok(()),
+ Err(err) => {
+ completion.skip();
+ Err(err)
+ }
+ }
+ }
+
+ fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
+ let write_start = req.write_buffer.wrapping_add(req.write_consumed);
+ let write_len = req.write_size.saturating_sub(req.write_consumed);
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
+
+ while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
+ let before = reader.len();
+ let cmd = reader.read::<u32>()?;
+ GLOBAL_STATS.inc_bc(cmd);
+ self.process.stats.inc_bc(cmd);
+ match cmd {
+ BC_TRANSACTION => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_TRANSACTION_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_REPLY => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_REPLY_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_FREE_BUFFER => {
+ let buffer = self.process.buffer_get(reader.read()?);
+ if let Some(buffer) = buffer {
+ if buffer.looper_need_return_on_free() {
+ self.inner.lock().looper_need_return = true;
+ }
+ drop(buffer);
+ }
+ }
+ BC_INCREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, false)?
+ }
+ BC_ACQUIRE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, true)?
+ }
+ BC_RELEASE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, true)?
+ }
+ BC_DECREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, false)?
+ }
+ BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
+ BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
+ BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
+ BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
+ BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
+ BC_REGISTER_LOOPER => {
+ let valid = self.process.register_thread();
+ self.inner.lock().looper_register(valid);
+ }
+ BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
+ BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
+ BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
+ BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
+ BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
+
+ // Fail if given an unknown error code.
+ // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
+ _ => return Err(EINVAL),
+ }
+ // Update the number of write bytes consumed.
+ req.write_consumed += (before - reader.len()) as u64;
+ }
+
+ Ok(())
+ }
+
+ fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
+ let read_start = req.read_buffer.wrapping_add(req.read_consumed);
+ let read_len = req.read_size.saturating_sub(req.read_consumed);
+ let mut writer = BinderReturnWriter::new(
+ UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
+ self,
+ );
+ let (in_pool, use_proc_queue) = {
+ let inner = self.inner.lock();
+ (inner.is_looper(), inner.should_use_process_work_queue())
+ };
+
+ let getter = if use_proc_queue {
+ Self::get_work
+ } else {
+ Self::get_work_local
+ };
+
+ // Reserve some room at the beginning of the read buffer so that we can send a
+ // BR_SPAWN_LOOPER if we need to.
+ let mut has_noop_placeholder = false;
+ if req.read_consumed == 0 {
+ if let Err(err) = writer.write_code(BR_NOOP) {
+ pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
+ return Err(err);
+ }
+ has_noop_placeholder = true;
+ }
+
+ // Loop doing work while there is room in the buffer.
+ let initial_len = writer.len();
+ while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
+ match getter(self, wait && initial_len == writer.len()) {
+ Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
+ Ok(true) => {}
+ Ok(false) => break,
+ Err(err) => {
+ return Err(err);
+ }
+ },
+ Ok(None) => {
+ break;
+ }
+ Err(err) => {
+ // Propagate the error if we haven't written anything else.
+ if err != EINTR && err != EAGAIN {
+ pr_warn!("Failure in work getter: {:?}", err);
+ }
+ if initial_len == writer.len() {
+ return Err(err);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ req.read_consumed += read_len - writer.len() as u64;
+
+ // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
+ if has_noop_placeholder && in_pool && self.process.needs_thread() {
+ let mut writer =
+ UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
+ .writer();
+ writer.write(&BR_SPAWN_LOOPER)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut req = reader.read::<BinderWriteRead>()?;
+
+ // Go through the write buffer.
+ let mut ret = Ok(());
+ if req.write_size > 0 {
+ ret = self.write(&mut req);
+ if let Err(err) = ret {
+ pr_warn!(
+ "Write failure {:?} in pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ req.read_consumed = 0;
+ writer.write(&req)?;
+ self.inner.lock().looper_need_return = false;
+ return ret;
+ }
+ }
+
+ // Go through the work queue.
+ if req.read_size > 0 {
+ ret = self.read(&mut req, wait);
+ if ret.is_err() && ret != Err(EINTR) {
+ pr_warn!(
+ "Read failure {:?} in pid:{}",
+ ret,
+ self.process.pid_in_current_ns()
+ );
+ }
+ }
+
+ // Write the request back so that the consumed fields are visible to the caller.
+ writer.write(&req)?;
+
+ self.inner.lock().looper_need_return = false;
+
+ ret
+ }
+
+ pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
+ table.register_wait(file, &self.work_condvar);
+ let mut inner = self.inner.lock();
+ (inner.should_use_process_work_queue(), inner.poll())
+ }
+
+ /// Make the call to `get_work` or `get_work_local` return immediately, if any.
+ pub(crate) fn exit_looper(&self) {
+ let mut inner = self.inner.lock();
+ let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
+ if should_notify {
+ inner.looper_need_return = true;
+ }
+ drop(inner);
+
+ if should_notify {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
+ // Determine if we need to notify. This requires the lock.
+ let inner = self.inner.lock();
+ let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
+ drop(inner);
+
+ // Now that the lock is no longer held, notify the waiters if we have to.
+ if notify {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+ }
+
+ pub(crate) fn release(self: &Arc<Self>) {
+ self.inner.lock().is_dead = true;
+
+ //self.work_condvar.clear();
+ self.unwind_transaction_stack();
+
+ // Cancel all pending work items.
+ while let Ok(Some(work)) = self.get_work_local(false) {
+ work.into_arc().cancel();
+ }
+ }
+}
+
+#[pin_data]
+struct ThreadError {
+ error_code: AtomicU32,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+impl ThreadError {
+ fn try_new() -> Result<DArc<Self>> {
+ DTRWrap::arc_pin_init(pin_init!(Self {
+ error_code: AtomicU32::new(BR_OK),
+ links_track <- AtomicTracker::new(),
+ }))
+ .map(ListArc::into_arc)
+ }
+
+ fn set_error_code(&self, code: u32) {
+ self.error_code.store(code, Ordering::Relaxed);
+ }
+
+ fn is_unused(&self) -> bool {
+ self.error_code.load(Ordering::Relaxed) == BR_OK
+ }
+}
+
+impl DeliverToRead for ThreadError {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let code = self.error_code.load(Ordering::Relaxed);
+ self.error_code.store(BR_OK, Ordering::Relaxed);
+ writer.write_code(code)?;
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}transaction error: {}\n",
+ prefix,
+ self.error_code.load(Ordering::Relaxed)
+ );
+ Ok(())
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for ThreadError {
+ tracked_by links_track: AtomicTracker;
+ }
+}
diff --git a/drivers/android/binder/trace.rs b/drivers/android/binder/trace.rs
new file mode 100644
index 000000000000..af0e4392805e
--- /dev/null
+++ b/drivers/android/binder/trace.rs
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::ffi::{c_uint, c_ulong};
+use kernel::tracepoint::declare_trace;
+
+declare_trace! {
+ unsafe fn rust_binder_ioctl(cmd: c_uint, arg: c_ulong);
+}
+
+#[inline]
+pub(crate) fn trace_ioctl(cmd: u32, arg: usize) {
+ // SAFETY: Always safe to call.
+ unsafe { rust_binder_ioctl(cmd, arg as c_ulong) }
+}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
new file mode 100644
index 000000000000..4bd3c0e417eb
--- /dev/null
+++ b/drivers/android/binder/transaction.rs
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use kernel::{
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, SpinLock},
+ task::Kuid,
+ time::{Instant, Monotonic},
+ types::ScopeGuard,
+};
+
+use crate::{
+ allocation::{Allocation, TranslatedFds},
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{Node, NodeRef},
+ process::{Process, ProcessInner},
+ ptr_align,
+ thread::{PushWorkRes, Thread},
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct Transaction {
+ pub(crate) debug_id: usize,
+ target_node: Option<DArc<Node>>,
+ pub(crate) from_parent: Option<DArc<Transaction>>,
+ pub(crate) from: Arc<Thread>,
+ pub(crate) to: Arc<Process>,
+ #[pin]
+ allocation: SpinLock<Option<Allocation>>,
+ is_outstanding: AtomicBool,
+ code: u32,
+ pub(crate) flags: u32,
+ data_size: usize,
+ offsets_size: usize,
+ data_address: usize,
+ sender_euid: Kuid,
+ txn_security_ctx_off: Option<usize>,
+ pub(crate) oneway_spam_detected: bool,
+ start_time: Instant<Monotonic>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Transaction { untracked; }
+}
+
+impl Transaction {
+ pub(crate) fn new(
+ node_ref: NodeRef,
+ from_parent: Option<DArc<Transaction>>,
+ from: &Arc<Thread>,
+ tr: &BinderTransactionDataSg,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
+ let txn_security_ctx = node_ref.node.flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX != 0;
+ let mut txn_security_ctx_off = if txn_security_ctx { Some(0) } else { None };
+ let to = node_ref.node.owner.clone();
+ let mut alloc = match from.copy_transaction_data(
+ to.clone(),
+ tr,
+ debug_id,
+ allow_fds,
+ txn_security_ctx_off.as_mut(),
+ ) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ if !err.is_dead() {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ }
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_ONE_WAY != 0 {
+ if from_parent.is_some() {
+ pr_warn!("Oneway transaction should not be in a transaction stack.");
+ return Err(EINVAL.into());
+ }
+ alloc.set_info_oneway_node(node_ref.node.clone());
+ }
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ let target_node = node_ref.node.clone();
+ alloc.set_info_target_node(node_ref);
+ let data_address = alloc.ptr;
+
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: Some(target_node),
+ from_parent,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ pub(crate) fn new_reply(
+ from: &Arc<Thread>,
+ to: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ allow_fds: bool,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let mut alloc = match from.copy_transaction_data(to.clone(), tr, debug_id, allow_fds, None)
+ {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: None,
+ from_parent: None,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address: alloc.ptr,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off: None,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_inner(&self, m: &SeqFile, prefix: &str) {
+ seq_print!(
+ m,
+ "{}{}: from {}:{} to {} code {:x} flags {:x} elapsed {}ms",
+ prefix,
+ self.debug_id,
+ self.from.process.task.pid(),
+ self.from.id,
+ self.to.task.pid(),
+ self.code,
+ self.flags,
+ self.start_time.elapsed().as_millis(),
+ );
+ if let Some(target_node) = &self.target_node {
+ seq_print!(m, " node {}", target_node.debug_id);
+ }
+ seq_print!(m, " size {}:{}\n", self.data_size, self.offsets_size);
+ }
+
+ /// Determines if the transaction is stacked on top of the given transaction.
+ pub(crate) fn is_stacked_on(&self, onext: &Option<DArc<Self>>) -> bool {
+ match (&self.from_parent, onext) {
+ (None, None) => true,
+ (Some(from_parent), Some(next)) => Arc::ptr_eq(from_parent, next),
+ _ => false,
+ }
+ }
+
+ /// Returns a pointer to the next transaction on the transaction stack, if there is one.
+ pub(crate) fn clone_next(&self) -> Option<DArc<Self>> {
+ Some(self.from_parent.as_ref()?.clone())
+ }
+
+ /// Searches in the transaction stack for a thread that belongs to the target process. This is
+ /// useful when finding a target for a new transaction: if the node belongs to a process that
+ /// is already part of the transaction stack, we reuse the thread.
+ fn find_target_thread(&self) -> Option<Arc<Thread>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if Arc::ptr_eq(&transaction.from.process, &self.to) {
+ return Some(transaction.from.clone());
+ }
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ /// Searches in the transaction stack for a transaction originating at the given thread.
+ pub(crate) fn find_from(&self, thread: &Thread) -> Option<&DArc<Transaction>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ return Some(transaction);
+ }
+
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ pub(crate) fn set_outstanding(&self, to_process: &mut ProcessInner) {
+ // No race because this method is only called once.
+ if !self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(true, Ordering::Relaxed);
+ to_process.add_outstanding_txn();
+ }
+ }
+
+ /// Decrement `outstanding_txns` in `to` if it hasn't already been decremented.
+ fn drop_outstanding_txn(&self) {
+ // No race because this is called at most twice, and one of the calls are in the
+ // destructor, which is guaranteed to not race with any other operations on the
+ // transaction. It also cannot race with `set_outstanding`, since submission happens
+ // before delivery.
+ if self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(false, Ordering::Relaxed);
+ self.to.drop_outstanding_txn();
+ }
+ }
+
+ /// Submits the transaction to a work queue. Uses a thread if there is one in the transaction
+ /// stack, otherwise uses the destination process.
+ ///
+ /// Not used for replies.
+ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
+ // Defined before `process_inner` so that the destructor runs after releasing the lock.
+ let mut _t_outdated;
+
+ let oneway = self.flags & TF_ONE_WAY != 0;
+ let process = self.to.clone();
+ let mut process_inner = process.inner.lock();
+
+ self.set_outstanding(&mut process_inner);
+
+ if oneway {
+ if let Some(target_node) = self.target_node.clone() {
+ if process_inner.is_frozen.is_frozen() {
+ process_inner.async_recv = true;
+ if self.flags & TF_UPDATE_TXN != 0 {
+ if let Some(t_outdated) =
+ target_node.take_outdated_transaction(&self, &mut process_inner)
+ {
+ // Save the transaction to be dropped after locks are released.
+ _t_outdated = t_outdated;
+ }
+ }
+ }
+ match target_node.submit_oneway(self, &mut process_inner) {
+ Ok(()) => {}
+ Err((err, work)) => {
+ drop(process_inner);
+ // Drop work after releasing process lock.
+ drop(work);
+ return Err(err);
+ }
+ }
+
+ if process_inner.is_frozen.is_frozen() {
+ return Err(BinderError::new_frozen_oneway());
+ } else {
+ return Ok(());
+ }
+ } else {
+ pr_err!("Failed to submit oneway transaction to node.");
+ }
+ }
+
+ if process_inner.is_frozen.is_frozen() {
+ process_inner.sync_recv = true;
+ return Err(BinderError::new_frozen());
+ }
+
+ let res = if let Some(thread) = self.find_target_thread() {
+ match thread.push_work(self) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(me) => Err((BinderError::new_dead(), me)),
+ }
+ } else {
+ process_inner.push_work(self)
+ };
+ drop(process_inner);
+
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ // Drop work after releasing process lock.
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ /// Check whether one oneway transaction can supersede another.
+ pub(crate) fn can_replace(&self, old: &Transaction) -> bool {
+ if self.from.process.task.pid() != old.from.process.task.pid() {
+ return false;
+ }
+
+ if self.flags & old.flags & (TF_ONE_WAY | TF_UPDATE_TXN) != (TF_ONE_WAY | TF_UPDATE_TXN) {
+ return false;
+ }
+
+ let target_node_match = match (self.target_node.as_ref(), old.target_node.as_ref()) {
+ (None, None) => true,
+ (Some(tn1), Some(tn2)) => Arc::ptr_eq(tn1, tn2),
+ _ => false,
+ };
+
+ self.code == old.code && self.flags == old.flags && target_node_match
+ }
+
+ fn prepare_file_list(&self) -> Result<TranslatedFds> {
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ match alloc.translate_fds() {
+ Ok(translated) => {
+ *self.allocation.lock() = Some(alloc);
+ Ok(translated)
+ }
+ Err(err) => {
+ // Free the allocation eagerly.
+ drop(alloc);
+ Err(err)
+ }
+ }
+ }
+}
+
+impl DeliverToRead for Transaction {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let send_failed_reply = ScopeGuard::new(|| {
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_FAILED_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+ self.drop_outstanding_txn();
+ });
+
+ let files = if let Ok(list) = self.prepare_file_list() {
+ list
+ } else {
+ // On failure to process the list, we send a reply back to the sender and ignore the
+ // transaction on the recipient.
+ return Ok(true);
+ };
+
+ let mut tr_sec = BinderTransactionDataSecctx::default();
+ let tr = tr_sec.tr_data();
+ if let Some(target_node) = &self.target_node {
+ let (ptr, cookie) = target_node.get_id();
+ tr.target.ptr = ptr as _;
+ tr.cookie = cookie as _;
+ };
+ tr.code = self.code;
+ tr.flags = self.flags;
+ tr.data_size = self.data_size as _;
+ tr.data.ptr.buffer = self.data_address as _;
+ tr.offsets_size = self.offsets_size as _;
+ if tr.offsets_size > 0 {
+ tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size).unwrap()) as _;
+ }
+ tr.sender_euid = self.sender_euid.into_uid_in_current_ns();
+ tr.sender_pid = 0;
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ // Not a reply and not one-way.
+ tr.sender_pid = self.from.process.pid_in_current_ns();
+ }
+ let code = if self.target_node.is_none() {
+ BR_REPLY
+ } else if self.txn_security_ctx_off.is_some() {
+ BR_TRANSACTION_SEC_CTX
+ } else {
+ BR_TRANSACTION
+ };
+
+ // Write the transaction code and data to the user buffer.
+ writer.write_code(code)?;
+ if let Some(off) = self.txn_security_ctx_off {
+ tr_sec.secctx = (self.data_address + off) as u64;
+ writer.write_payload(&tr_sec)?;
+ } else {
+ writer.write_payload(&*tr)?;
+ }
+
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ // Dismiss the completion of transaction with a failure. No failure paths are allowed from
+ // here on out.
+ send_failed_reply.dismiss();
+
+ // Commit files, and set FDs in FDA to be closed on buffer free.
+ let close_on_free = files.commit();
+ alloc.set_info_close_on_free(close_on_free);
+
+ // It is now the user's responsibility to clear the allocation.
+ alloc.keep_alive();
+
+ self.drop_outstanding_txn();
+
+ // When this is not a reply and not a oneway transaction, update `current_transaction`. If
+ // it's a reply, `current_transaction` has already been updated appropriately.
+ if self.target_node.is_some() && tr_sec.transaction_data.flags & TF_ONE_WAY == 0 {
+ thread.set_current_transaction(self);
+ }
+
+ Ok(false)
+ }
+
+ fn cancel(self: DArc<Self>) {
+ let allocation = self.allocation.lock().take();
+ drop(allocation);
+
+ // If this is not a reply or oneway transaction, then send a dead reply.
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_DEAD_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+
+ self.drop_outstanding_txn();
+ }
+
+ fn should_sync_wakeup(&self) -> bool {
+ self.flags & TF_ONE_WAY == 0
+ }
+
+ fn debug_print(&self, m: &SeqFile, _prefix: &str, tprefix: &str) -> Result<()> {
+ self.debug_print_inner(m, tprefix);
+ Ok(())
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for Transaction {
+ fn drop(self: Pin<&mut Self>) {
+ self.drop_outstanding_txn();
+ }
+}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 8b08976146ba..342574bfd28a 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -537,8 +537,8 @@ struct binder_transaction {
struct binder_proc *to_proc;
struct binder_thread *to_thread;
struct binder_transaction *to_parent;
- unsigned need_reply:1;
- /* unsigned is_dead:1; */ /* not used at the moment */
+ unsigned is_async:1;
+ unsigned is_reply:1;
struct binder_buffer *buffer;
unsigned int code;
diff --git a/drivers/android/binder_netlink.c b/drivers/android/binder_netlink.c
new file mode 100644
index 000000000000..81e8432b5904
--- /dev/null
+++ b/drivers/android/binder_netlink.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "binder_netlink.h"
+
+#include <uapi/linux/android/binder_netlink.h>
+
+/* Ops table for binder */
+static const struct genl_split_ops binder_nl_ops[] = {
+};
+
+static const struct genl_multicast_group binder_nl_mcgrps[] = {
+ [BINDER_NLGRP_REPORT] = { "report", },
+};
+
+struct genl_family binder_nl_family __ro_after_init = {
+ .name = BINDER_FAMILY_NAME,
+ .version = BINDER_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = binder_nl_ops,
+ .n_split_ops = ARRAY_SIZE(binder_nl_ops),
+ .mcgrps = binder_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(binder_nl_mcgrps),
+};
diff --git a/drivers/android/binder_netlink.h b/drivers/android/binder_netlink.h
new file mode 100644
index 000000000000..57399942a5e3
--- /dev/null
+++ b/drivers/android/binder_netlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
+
+#ifndef _LINUX_BINDER_GEN_H
+#define _LINUX_BINDER_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/android/binder_netlink.h>
+
+enum {
+ BINDER_NLGRP_REPORT,
+};
+
+extern struct genl_family binder_nl_family;
+
+#endif /* _LINUX_BINDER_GEN_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 97a78e5623db..fa5eb61cf580 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -402,6 +402,43 @@ TRACE_EVENT(binder_return,
"unknown")
);
+TRACE_EVENT(binder_netlink_report,
+ TP_PROTO(const char *context,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error),
+ TP_ARGS(context, t, data_size, error),
+ TP_STRUCT__entry(
+ __field(const char *, context)
+ __field(u32, error)
+ __field(int, from_pid)
+ __field(int, from_tid)
+ __field(int, to_pid)
+ __field(int, to_tid)
+ __field(bool, is_reply)
+ __field(unsigned int, flags)
+ __field(unsigned int, code)
+ __field(size_t, data_size)
+ ),
+ TP_fast_assign(
+ __entry->context = context;
+ __entry->error = error;
+ __entry->from_pid = t->from_pid;
+ __entry->from_tid = t->from_tid;
+ __entry->to_pid = t->to_proc ? t->to_proc->pid : 0;
+ __entry->to_tid = t->to_thread ? t->to_thread->pid : 0;
+ __entry->is_reply = t->is_reply;
+ __entry->flags = t->flags;
+ __entry->code = t->code;
+ __entry->data_size = data_size;
+ ),
+ TP_printk("from %d:%d to %d:%d context=%s error=%d is_reply=%d flags=0x%x code=0x%x size=%zu",
+ __entry->from_pid, __entry->from_tid,
+ __entry->to_pid, __entry->to_tid,
+ __entry->context, __entry->error, __entry->is_reply,
+ __entry->flags, __entry->code, __entry->data_size)
+);
+
#endif /* _BINDER_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 0d9d95a7fb60..b46bcb91072d 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -59,6 +59,7 @@ struct binder_features {
bool oneway_spam_detection;
bool extended_error;
bool freeze_notification;
+ bool transaction_report;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -76,6 +77,7 @@ static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
.freeze_notification = true,
+ .transaction_report = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -181,28 +183,15 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
}
root = sb->s_root;
- inode_lock(d_inode(root));
-
- /* look it up */
- dentry = lookup_noperm(&QSTR(name), root);
+ dentry = simple_start_creating(root, name);
if (IS_ERR(dentry)) {
- inode_unlock(d_inode(root));
ret = PTR_ERR(dentry);
goto err;
}
-
- if (d_really_is_positive(dentry)) {
- /* already exists */
- dput(dentry);
- inode_unlock(d_inode(root));
- ret = -EEXIST;
- goto err;
- }
-
inode->i_private = device;
- d_instantiate(dentry, inode);
+ d_make_persistent(dentry, inode);
fsnotify_create(root->d_inode, dentry);
- inode_unlock(d_inode(root));
+ simple_done_creating(dentry);
binder_add_device(device);
@@ -222,6 +211,9 @@ err:
/**
* binder_ctl_ioctl - handle binder device node allocation requests
+ * @file: The file pointer for the binder-control device node.
+ * @cmd: The ioctl command.
+ * @arg: The ioctl argument.
*
* The request handler for the binder-control device. All requests operate on
* the binderfs mount the binder-control device resides in:
@@ -408,12 +400,6 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
if (!device)
return -ENOMEM;
- /* If we have already created a binder-control node, return. */
- if (info->control_dentry) {
- ret = 0;
- goto out;
- }
-
ret = -ENOMEM;
inode = new_inode(sb);
if (!inode)
@@ -449,7 +435,8 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
inode->i_private = device;
info->control_dentry = dentry;
- d_add(dentry, inode);
+ d_make_persistent(dentry, inode);
+ dput(dentry);
return 0;
@@ -479,24 +466,6 @@ static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
return ret;
}
-static struct dentry *binderfs_create_dentry(struct dentry *parent,
- const char *name)
-{
- struct dentry *dentry;
-
- dentry = lookup_noperm(&QSTR(name), parent);
- if (IS_ERR(dentry))
- return dentry;
-
- /* Return error if the file/dir already exists. */
- if (d_really_is_positive(dentry)) {
- dput(dentry);
- return ERR_PTR(-EEXIST);
- }
-
- return dentry;
-}
-
struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
const struct file_operations *fops,
void *data)
@@ -506,28 +475,24 @@ struct dentry *binderfs_create_file(struct dentry *parent, const char *name,
struct super_block *sb;
parent_inode = d_inode(parent);
- inode_lock(parent_inode);
- dentry = binderfs_create_dentry(parent, name);
+ dentry = simple_start_creating(parent, name);
if (IS_ERR(dentry))
- goto out;
+ return dentry;
sb = parent_inode->i_sb;
new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
if (!new_inode) {
- dput(dentry);
- dentry = ERR_PTR(-ENOMEM);
- goto out;
+ simple_done_creating(dentry);
+ return ERR_PTR(-ENOMEM);
}
new_inode->i_fop = fops;
new_inode->i_private = data;
- d_instantiate(dentry, new_inode);
+ d_make_persistent(dentry, new_inode);
fsnotify_create(parent_inode, dentry);
-
-out:
- inode_unlock(parent_inode);
- return dentry;
+ simple_done_creating(dentry);
+ return dentry; // borrowed
}
static struct dentry *binderfs_create_dir(struct dentry *parent,
@@ -538,30 +503,26 @@ static struct dentry *binderfs_create_dir(struct dentry *parent,
struct super_block *sb;
parent_inode = d_inode(parent);
- inode_lock(parent_inode);
- dentry = binderfs_create_dentry(parent, name);
+ dentry = simple_start_creating(parent, name);
if (IS_ERR(dentry))
- goto out;
+ return dentry;
sb = parent_inode->i_sb;
new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
if (!new_inode) {
- dput(dentry);
- dentry = ERR_PTR(-ENOMEM);
- goto out;
+ simple_done_creating(dentry);
+ return ERR_PTR(-ENOMEM);
}
new_inode->i_fop = &simple_dir_operations;
new_inode->i_op = &simple_dir_inode_operations;
set_nlink(new_inode, 2);
- d_instantiate(dentry, new_inode);
+ d_make_persistent(dentry, new_inode);
inc_nlink(parent_inode);
fsnotify_mkdir(parent_inode, dentry);
-
-out:
- inode_unlock(parent_inode);
+ simple_done_creating(dentry);
return dentry;
}
@@ -601,6 +562,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "transaction_report",
+ &binder_features_fops,
+ &binder_features.transaction_report);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
@@ -771,7 +738,7 @@ static void binderfs_kill_super(struct super_block *sb)
* During inode eviction struct binderfs_info is needed.
* So first wipe the super_block then free struct binderfs_info.
*/
- kill_litter_super(sb);
+ kill_anon_super(sb);
if (info && info->ipc_ns)
put_ipc_ns(info->ipc_ns);
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
index 956f1bd087d1..c7299ce8b374 100644
--- a/drivers/android/dbitmap.h
+++ b/drivers/android/dbitmap.h
@@ -37,6 +37,7 @@ static inline void dbitmap_free(struct dbitmap *dmap)
{
dmap->nbits = 0;
kfree(dmap->map);
+ dmap->map = NULL;
}
/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
diff --git a/drivers/android/tests/binder_alloc_kunit.c b/drivers/android/tests/binder_alloc_kunit.c
index 9b884d977f76..7f9cc003bbe3 100644
--- a/drivers/android/tests/binder_alloc_kunit.c
+++ b/drivers/android/tests/binder_alloc_kunit.c
@@ -554,7 +554,7 @@ static void binder_alloc_test_exit(struct kunit *test)
static struct kunit_case binder_alloc_test_cases[] = {
KUNIT_CASE(binder_alloc_test_init_freelist),
KUNIT_CASE(binder_alloc_test_mmap),
- KUNIT_CASE(binder_alloc_exhaustive_test),
+ KUNIT_CASE_SLOW(binder_alloc_exhaustive_test),
{}
};
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index f2140fc06ba0..15e18d50dcc6 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -246,6 +246,73 @@ void ata_acpi_bind_dev(struct ata_device *dev)
}
/**
+ * ata_acpi_dev_manage_restart - if the disk should be stopped (spun down) on
+ * system restart.
+ * @dev: target ATA device
+ *
+ * RETURNS:
+ * true if the disk should be stopped, otherwise false.
+ */
+bool ata_acpi_dev_manage_restart(struct ata_device *dev)
+{
+ struct device *tdev;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (dev->link->ap->flags & ATA_FLAG_ACPI_SATA)
+ tdev = &dev->tdev;
+ else
+ tdev = &dev->link->ap->tdev;
+
+ if (!is_acpi_device_node(tdev->fwnode))
+ return false;
+ return acpi_bus_power_manageable(ACPI_HANDLE(tdev));
+}
+
+/**
+ * ata_acpi_port_power_on - set the power state of the ata port to D0
+ * @ap: target ATA port
+ *
+ * This function is called at the beginning of ata_port_probe().
+ */
+void ata_acpi_port_power_on(struct ata_port *ap)
+{
+ acpi_handle handle;
+ int i;
+
+ /*
+ * If ATA_FLAG_ACPI_SATA is set, the acpi fwnode is attached to the
+ * ata_device instead of the ata_port.
+ */
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++) {
+ struct ata_device *dev = &ap->link.device[i];
+
+ if (!is_acpi_device_node(dev->tdev.fwnode))
+ continue;
+ handle = ACPI_HANDLE(&dev->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ continue;
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_dev_err(dev,
+ "acpi: failed to set power state to D0\n");
+ }
+ return;
+ }
+
+ if (!is_acpi_device_node(ap->tdev.fwnode))
+ return;
+ handle = ACPI_HANDLE(&ap->tdev);
+ if (!acpi_bus_power_manageable(handle))
+ return;
+
+ if (acpi_bus_set_power(handle, ACPI_STATE_D0))
+ ata_port_err(ap, "acpi: failed to set power state to D0\n");
+}
+
+/**
* ata_acpi_dissociate - dissociate ATA host from ACPI objects
* @host: target ATA host
*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index ff53f5f029b4..0b24bd169d61 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2174,13 +2174,10 @@ static int ata_read_log_directory(struct ata_device *dev)
}
version = get_unaligned_le16(&dev->gp_log_dir[0]);
- if (version != 0x0001) {
- ata_dev_err(dev, "Invalid log directory version 0x%04x\n",
- version);
- ata_clear_log_directory(dev);
- dev->quirks |= ATA_QUIRK_NO_LOG_DIR;
- return -EINVAL;
- }
+ if (version != 0x0001)
+ ata_dev_warn_once(dev,
+ "Invalid log directory version 0x%04x\n",
+ version);
return 0;
}
@@ -3009,6 +3006,16 @@ int ata_dev_configure(struct ata_device *dev)
}
dev->n_sectors = ata_id_n_sectors(id);
+ if (ata_id_is_locked(id)) {
+ /*
+ * If Security locked, set capacity to zero to prevent
+ * any I/O, e.g. partition scanning, as any I/O to a
+ * locked drive will result in user visible errors.
+ */
+ ata_dev_info(dev,
+ "Security locked, setting capacity to zero\n");
+ dev->n_sectors = 0;
+ }
/* get current R/W Multiple count setting */
if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
@@ -3139,6 +3146,10 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
dev->max_sectors);
+ if (dev->quirks & ATA_QUIRK_MAX_SEC_8191)
+ dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_8191,
+ dev->max_sectors);
+
if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
dev->max_sectors = ATA_MAX_SECTORS_LBA48;
@@ -3991,6 +4002,7 @@ static const char * const ata_quirk_names[] = {
[__ATA_QUIRK_NO_DMA_LOG] = "nodmalog",
[__ATA_QUIRK_NOTRIM] = "notrim",
[__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
+ [__ATA_QUIRK_MAX_SEC_8191] = "maxsec8191",
[__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
[__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
[__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
@@ -4097,6 +4109,12 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 },
{ "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 },
+ /*
+ * These devices time out with higher max sects.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=220693
+ */
+ { "DELLBOSS VD", "MV.R00-0", ATA_QUIRK_MAX_SEC_8191 },
+
/* Devices we expect to fail diagnostics */
/* Devices where NCQ should be avoided */
@@ -4209,6 +4227,10 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
/* Apacer models with LPM issues */
{ "Apacer AS340*", NULL, ATA_QUIRK_NOLPM },
+ /* Silicon Motion models with LPM issues */
+ { "MD619HXCLDE3TC", "TCVAID", ATA_QUIRK_NOLPM },
+ { "MD619GXCLDE3TC", "TCV35D", ATA_QUIRK_NOLPM },
+
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM },
@@ -5904,6 +5926,8 @@ void ata_port_probe(struct ata_port *ap)
struct ata_eh_info *ehi = &ap->link.eh_info;
unsigned long flags;
+ ata_acpi_port_power_on(ap);
+
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2ded5e476d6e..721d3f270c8e 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -351,7 +351,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
/**
* ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd.
* @sdev: SCSI device for which BIOS geometry is to be determined
- * @bdev: block device associated with @sdev
+ * @unused: gendisk associated with @sdev
* @capacity: capacity of SCSI device
* @geom: location to which geometry will be output
*
@@ -366,7 +366,7 @@ EXPORT_SYMBOL_GPL(ata_common_sdev_groups);
* RETURNS:
* Zero.
*/
-int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+int ata_std_bios_param(struct scsi_device *sdev, struct gendisk *unused,
sector_t capacity, int geom[])
{
geom[0] = 255;
@@ -992,6 +992,13 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
return;
}
+ if (ata_id_is_locked(dev->id)) {
+ /* Security locked */
+ /* LOGICAL UNIT ACCESS NOT AUTHORIZED */
+ ata_scsi_set_sense(dev, cmd, DATA_PROTECT, 0x74, 0x71);
+ return;
+ }
+
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
ata_dev_dbg(dev,
"Missing result TF: reporting aborted command\n");
@@ -1095,6 +1102,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim,
*/
sdev->manage_runtime_start_stop = 1;
sdev->manage_shutdown = 1;
+ sdev->manage_restart = ata_acpi_dev_manage_restart(dev);
sdev->force_runtime_start_on_system_start = 1;
}
@@ -4894,8 +4902,10 @@ void ata_scsi_dev_rescan(struct work_struct *work)
spin_unlock_irqrestore(ap->lock, flags);
if (do_resume) {
ret = scsi_resume_device(sdev);
- if (ret == -EWOULDBLOCK)
+ if (ret == -EWOULDBLOCK) {
+ scsi_device_put(sdev);
goto unlock_scan;
+ }
dev->flags &= ~ATA_DFLAG_RESUMING;
}
ret = scsi_rescan_device(sdev);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 7fc407255eb4..785b6e371abf 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -614,7 +614,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
offset = qc->cursg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
@@ -631,7 +631,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int split_len = PAGE_SIZE - offset;
ata_pio_xfer(qc, page, offset, split_len);
- ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len);
+ ata_pio_xfer(qc, page + 1, 0, count - split_len);
} else {
ata_pio_xfer(qc, page, offset, count);
}
@@ -751,7 +751,7 @@ next_sg:
offset = sg->offset + qc->cursg_ofs;
/* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
+ page += offset >> PAGE_SHIFT;
offset %= PAGE_SIZE;
/* don't overrun current sg */
@@ -3191,7 +3191,8 @@ void ata_sff_port_init(struct ata_port *ap)
int __init ata_sff_init(void)
{
- ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
+ ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM | WQ_PERCPU,
+ WQ_MAX_ACTIVE);
if (!ata_sff_wq)
return -ENOMEM;
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index e5b977a8d3e1..0e7ecac73680 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -130,6 +130,8 @@ extern void ata_acpi_on_disable(struct ata_device *dev);
extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
extern void ata_acpi_bind_port(struct ata_port *ap);
extern void ata_acpi_bind_dev(struct ata_device *dev);
+extern void ata_acpi_port_power_on(struct ata_port *ap);
+extern bool ata_acpi_dev_manage_restart(struct ata_device *dev);
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
@@ -140,6 +142,8 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
pm_message_t state) { }
static inline void ata_acpi_bind_port(struct ata_port *ap) {}
static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
+static inline void ata_acpi_port_power_on(struct ata_port *ap) {}
+static inline bool ata_acpi_dev_manage_restart(struct ata_device *dev) { return 0; }
#endif
/* libata-scsi.c */
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 042f6ad1f7c6..fc762dcc61bf 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -75,6 +75,7 @@
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
@@ -632,9 +633,9 @@ static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf)
cbl = "";
if (mode)
- snprintf(mbuf, 8, "%5s%d", mtype, mode - 1);
+ snprintf(mbuf, sizeof(mbuf), "%5s%d", mtype, mode - 1);
else
- strcpy(mbuf, "PIO");
+ strscpy(mbuf, "PIO");
if (buf[52] == 4)
ata_port_info(ap, "%d: %-6s %-8s %s %s\n",
n, mbuf, types[buf[52]], id, cbl);
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
index cf3810933a27..caefcd8c4b3c 100644
--- a/drivers/ata/pata_pcmcia.c
+++ b/drivers/ata/pata_pcmcia.c
@@ -344,6 +344,7 @@ static const struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
+ PCMCIA_DEVICE_PROD_ID2("PCMCIA ATA/ATAPI Adapter", 0x888d7b73),
PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 4fea1149e003..f62e38571440 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -1374,7 +1374,9 @@ fore200e_open(struct atm_vcc *vcc)
vcc->dev_data = NULL;
+ mutex_lock(&fore200e->rate_mtx);
fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
+ mutex_unlock(&fore200e->rate_mtx);
kfree(fore200e_vcc);
return -EINVAL;
diff --git a/drivers/auxdisplay/line-display.c b/drivers/auxdisplay/line-display.c
index 8590a4cd21e0..4e22373fcc1a 100644
--- a/drivers/auxdisplay/line-display.c
+++ b/drivers/auxdisplay/line-display.c
@@ -6,20 +6,23 @@
* Author: Paul Burton <paul.burton@mips.com>
*
* Copyright (C) 2021 Glider bv
+ * Copyright (C) 2025 Jean-François Lessard
*/
#ifndef CONFIG_PANEL_BOOT_MESSAGE
#include <generated/utsrelease.h>
#endif
-#include <linux/container_of.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/timer.h>
@@ -32,6 +35,87 @@
#define DEFAULT_SCROLL_RATE (HZ / 2)
/**
+ * struct linedisp_attachment - Holds the device to linedisp mapping
+ * @list: List entry for the linedisp_attachments list
+ * @device: Pointer to the device where linedisp attributes are added
+ * @linedisp: Pointer to the linedisp mapped to the device
+ * @direct: true for directly attached device using linedisp_attach(),
+ * false for child registered device using linedisp_register()
+ */
+struct linedisp_attachment {
+ struct list_head list;
+ struct device *device;
+ struct linedisp *linedisp;
+ bool direct;
+};
+
+static LIST_HEAD(linedisp_attachments);
+static DEFINE_SPINLOCK(linedisp_attachments_lock);
+
+static int create_attachment(struct device *dev, struct linedisp *linedisp, bool direct)
+{
+ struct linedisp_attachment *attachment;
+
+ attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+ if (!attachment)
+ return -ENOMEM;
+
+ attachment->device = dev;
+ attachment->linedisp = linedisp;
+ attachment->direct = direct;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+ list_add(&attachment->list, &linedisp_attachments);
+
+ return 0;
+}
+
+static struct linedisp *delete_attachment(struct device *dev, bool direct)
+{
+ struct linedisp_attachment *attachment;
+ struct linedisp *linedisp;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+
+ list_for_each_entry(attachment, &linedisp_attachments, list) {
+ if (attachment->device == dev &&
+ attachment->direct == direct)
+ break;
+ }
+
+ if (list_entry_is_head(attachment, &linedisp_attachments, list))
+ return NULL;
+
+ linedisp = attachment->linedisp;
+ list_del(&attachment->list);
+ kfree(attachment);
+
+ return linedisp;
+}
+
+static struct linedisp *to_linedisp(struct device *dev)
+{
+ struct linedisp_attachment *attachment;
+
+ guard(spinlock)(&linedisp_attachments_lock);
+
+ list_for_each_entry(attachment, &linedisp_attachments, list) {
+ if (attachment->device == dev)
+ break;
+ }
+
+ if (list_entry_is_head(attachment, &linedisp_attachments, list))
+ return NULL;
+
+ return attachment->linedisp;
+}
+
+static inline bool should_scroll(struct linedisp *linedisp)
+{
+ return linedisp->message_len > linedisp->num_chars && linedisp->scroll_rate;
+}
+
+/**
* linedisp_scroll() - scroll the display by a character
* @t: really a pointer to the private data structure
*
@@ -62,8 +146,7 @@ static void linedisp_scroll(struct timer_list *t)
linedisp->scroll_pos %= linedisp->message_len;
/* rearm the timer */
- if (linedisp->message_len > num_chars && linedisp->scroll_rate)
- mod_timer(&linedisp->timer, jiffies + linedisp->scroll_rate);
+ mod_timer(&linedisp->timer, jiffies + linedisp->scroll_rate);
}
/**
@@ -113,8 +196,16 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
linedisp->message_len = count;
linedisp->scroll_pos = 0;
- /* update the display */
- linedisp_scroll(&linedisp->timer);
+ if (should_scroll(linedisp)) {
+ /* display scrolling message */
+ linedisp_scroll(&linedisp->timer);
+ } else {
+ /* display static message */
+ memset(linedisp->buf, ' ', linedisp->num_chars);
+ memcpy(linedisp->buf, linedisp->message,
+ umin(linedisp->num_chars, linedisp->message_len));
+ linedisp->ops->update(linedisp);
+ }
return 0;
}
@@ -133,7 +224,7 @@ static int linedisp_display(struct linedisp *linedisp, const char *msg,
static ssize_t message_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
return sysfs_emit(buf, "%s\n", linedisp->message);
}
@@ -152,7 +243,7 @@ static ssize_t message_show(struct device *dev, struct device_attribute *attr,
static ssize_t message_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
int err;
err = linedisp_display(linedisp, buf, count);
@@ -161,10 +252,20 @@ static ssize_t message_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(message);
+static ssize_t num_chars_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct linedisp *linedisp = to_linedisp(dev);
+
+ return sysfs_emit(buf, "%u\n", linedisp->num_chars);
+}
+
+static DEVICE_ATTR_RO(num_chars);
+
static ssize_t scroll_step_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
return sysfs_emit(buf, "%u\n", jiffies_to_msecs(linedisp->scroll_rate));
}
@@ -173,7 +274,7 @@ static ssize_t scroll_step_ms_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
unsigned int ms;
int err;
@@ -181,12 +282,12 @@ static ssize_t scroll_step_ms_store(struct device *dev,
if (err)
return err;
+ timer_delete_sync(&linedisp->timer);
+
linedisp->scroll_rate = msecs_to_jiffies(ms);
- if (linedisp->message && linedisp->message_len > linedisp->num_chars) {
- timer_delete_sync(&linedisp->timer);
- if (linedisp->scroll_rate)
- linedisp_scroll(&linedisp->timer);
- }
+
+ if (should_scroll(linedisp))
+ linedisp_scroll(&linedisp->timer);
return count;
}
@@ -195,7 +296,7 @@ static DEVICE_ATTR_RW(scroll_step_ms);
static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr, char *buf)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
memcpy(buf, &map->map, map->size);
@@ -205,7 +306,7 @@ static ssize_t map_seg_show(struct device *dev, struct device_attribute *attr, c
static ssize_t map_seg_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
if (count != map->size)
@@ -223,6 +324,7 @@ static DEVICE_ATTR(map_seg14, 0644, map_seg_show, map_seg_store);
static struct attribute *linedisp_attrs[] = {
&dev_attr_message.attr,
+ &dev_attr_num_chars.attr,
&dev_attr_scroll_step_ms.attr,
&dev_attr_map_seg7.attr,
&dev_attr_map_seg14.attr,
@@ -232,7 +334,7 @@ static struct attribute *linedisp_attrs[] = {
static umode_t linedisp_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
struct linedisp_map *map = linedisp->map;
umode_t mode = attr->mode;
@@ -263,7 +365,7 @@ static DEFINE_IDA(linedisp_id);
static void linedisp_release(struct device *dev)
{
- struct linedisp *linedisp = container_of(dev, struct linedisp, dev);
+ struct linedisp *linedisp = to_linedisp(dev);
kfree(linedisp->map);
kfree(linedisp->message);
@@ -321,12 +423,101 @@ static int linedisp_init_map(struct linedisp *linedisp)
#endif
/**
+ * linedisp_attach - attach a character line display
+ * @linedisp: pointer to character line display structure
+ * @dev: pointer of the device to attach to
+ * @num_chars: the number of characters that can be displayed
+ * @ops: character line display operations
+ *
+ * Directly attach the line-display sysfs attributes to the passed device.
+ * The caller is responsible for calling linedisp_detach() to release resources
+ * after use.
+ *
+ * Return: zero on success, else a negative error code.
+ */
+int linedisp_attach(struct linedisp *linedisp, struct device *dev,
+ unsigned int num_chars, const struct linedisp_ops *ops)
+{
+ int err;
+
+ memset(linedisp, 0, sizeof(*linedisp));
+ linedisp->ops = ops;
+ linedisp->num_chars = num_chars;
+ linedisp->scroll_rate = DEFAULT_SCROLL_RATE;
+
+ linedisp->buf = kzalloc(linedisp->num_chars, GFP_KERNEL);
+ if (!linedisp->buf)
+ return -ENOMEM;
+
+ /* initialise a character mapping, if required */
+ err = linedisp_init_map(linedisp);
+ if (err)
+ goto out_free_buf;
+
+ /* initialise a timer for scrolling the message */
+ timer_setup(&linedisp->timer, linedisp_scroll, 0);
+
+ err = create_attachment(dev, linedisp, true);
+ if (err)
+ goto out_del_timer;
+
+ /* display a default message */
+ err = linedisp_display(linedisp, LINEDISP_INIT_TEXT, -1);
+ if (err)
+ goto out_del_attach;
+
+ /* add attribute groups to target device */
+ err = device_add_groups(dev, linedisp_groups);
+ if (err)
+ goto out_del_attach;
+
+ return 0;
+
+out_del_attach:
+ delete_attachment(dev, true);
+out_del_timer:
+ timer_delete_sync(&linedisp->timer);
+out_free_buf:
+ kfree(linedisp->buf);
+ return err;
+}
+EXPORT_SYMBOL_NS_GPL(linedisp_attach, "LINEDISP");
+
+/**
+ * linedisp_detach - detach a character line display
+ * @dev: pointer of the device to detach from, that was previously
+ * attached with linedisp_attach()
+ */
+void linedisp_detach(struct device *dev)
+{
+ struct linedisp *linedisp;
+
+ linedisp = delete_attachment(dev, true);
+ if (!linedisp)
+ return;
+
+ timer_delete_sync(&linedisp->timer);
+
+ device_remove_groups(dev, linedisp_groups);
+
+ kfree(linedisp->map);
+ kfree(linedisp->message);
+ kfree(linedisp->buf);
+}
+EXPORT_SYMBOL_NS_GPL(linedisp_detach, "LINEDISP");
+
+/**
* linedisp_register - register a character line display
* @linedisp: pointer to character line display structure
* @parent: parent device
* @num_chars: the number of characters that can be displayed
* @ops: character line display operations
*
+ * Register the line-display sysfs attributes to a new device named
+ * "linedisp.N" added to the passed parent device.
+ * The caller is responsible for calling linedisp_unregister() to release
+ * resources after use.
+ *
* Return: zero on success, else a negative error code.
*/
int linedisp_register(struct linedisp *linedisp, struct device *parent,
@@ -362,19 +553,23 @@ int linedisp_register(struct linedisp *linedisp, struct device *parent,
/* initialise a timer for scrolling the message */
timer_setup(&linedisp->timer, linedisp_scroll, 0);
- err = device_add(&linedisp->dev);
+ err = create_attachment(&linedisp->dev, linedisp, false);
if (err)
goto out_del_timer;
/* display a default message */
err = linedisp_display(linedisp, LINEDISP_INIT_TEXT, -1);
if (err)
- goto out_del_dev;
+ goto out_del_attach;
+
+ err = device_add(&linedisp->dev);
+ if (err)
+ goto out_del_attach;
return 0;
-out_del_dev:
- device_del(&linedisp->dev);
+out_del_attach:
+ delete_attachment(&linedisp->dev, false);
out_del_timer:
timer_delete_sync(&linedisp->timer);
out_put_device:
@@ -391,6 +586,7 @@ EXPORT_SYMBOL_NS_GPL(linedisp_register, "LINEDISP");
void linedisp_unregister(struct linedisp *linedisp)
{
device_del(&linedisp->dev);
+ delete_attachment(&linedisp->dev, false);
timer_delete_sync(&linedisp->timer);
put_device(&linedisp->dev);
}
diff --git a/drivers/auxdisplay/line-display.h b/drivers/auxdisplay/line-display.h
index 4348d7a2f69a..36853b639711 100644
--- a/drivers/auxdisplay/line-display.h
+++ b/drivers/auxdisplay/line-display.h
@@ -6,6 +6,7 @@
* Author: Paul Burton <paul.burton@mips.com>
*
* Copyright (C) 2021 Glider bv
+ * Copyright (C) 2025 Jean-François Lessard
*/
#ifndef _LINEDISP_H
@@ -81,6 +82,9 @@ struct linedisp {
unsigned int id;
};
+int linedisp_attach(struct linedisp *linedisp, struct device *dev,
+ unsigned int num_chars, const struct linedisp_ops *ops);
+void linedisp_detach(struct device *dev);
int linedisp_register(struct linedisp *linedisp, struct device *parent,
unsigned int num_chars, const struct linedisp_ops *ops);
void linedisp_unregister(struct linedisp *linedisp);
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 064eb52ff7e2..1786d87b29e2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -167,6 +167,12 @@ config PM_QOS_KUNIT_TEST
depends on KUNIT=y
default KUNIT_ALL_TESTS
+config PM_RUNTIME_KUNIT_TEST
+ tristate "KUnit Tests for runtime PM" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on PM
+ default KUNIT_ALL_TESTS
+
config HMEM_REPORTING
bool
default n
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 1037169abb45..84ec92bff642 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
* frequency (by keeping the initial capacity_freq_ref value).
*/
cpu_clk = of_clk_get(cpu_node, 0);
- if (!PTR_ERR_OR_ZERO(cpu_clk)) {
+ if (!IS_ERR_OR_NULL(cpu_clk)) {
per_cpu(capacity_freq_ref, cpu) =
clk_get_rate(cpu_clk) / HZ_PER_KHZ;
clk_put(cpu_clk);
@@ -823,12 +823,106 @@ void remove_cpu_topology(unsigned int cpu)
clear_cpu_topology(cpu);
}
+#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
+struct cpu_smt_info {
+ unsigned int thread_num;
+ int core_id;
+};
+
+static bool __init acpi_cpu_is_threaded(int cpu)
+{
+ int is_threaded = acpi_pptt_cpu_is_thread(cpu);
+
+ /*
+ * if the PPTT doesn't have thread information, check for architecture
+ * specific fallback if available
+ */
+ if (is_threaded < 0)
+ is_threaded = arch_cpu_is_threaded();
+
+ return !!is_threaded;
+}
+
+/*
+ * Propagate the topology information of the processor_topology_node tree to the
+ * cpu_topology array.
+ */
__weak int __init parse_acpi_topology(void)
{
+ unsigned int max_smt_thread_num = 1;
+ struct cpu_smt_info *entry;
+ struct xarray hetero_cpu;
+ unsigned long hetero_id;
+ int cpu, topology_id;
+
+ if (acpi_disabled)
+ return 0;
+
+ xa_init(&hetero_cpu);
+
+ for_each_possible_cpu(cpu) {
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0)
+ return topology_id;
+
+ if (acpi_cpu_is_threaded(cpu)) {
+ cpu_topology[cpu].thread_id = topology_id;
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ cpu_topology[cpu].core_id = topology_id;
+
+ /*
+ * In the PPTT, CPUs below a node with the 'identical
+ * implementation' flag have the same number of threads.
+ * Count the number of threads for only one CPU (i.e.
+ * one core_id) among those with the same hetero_id.
+ * See the comment of find_acpi_cpu_topology_hetero_id()
+ * for more details.
+ *
+ * One entry is created for each node having:
+ * - the 'identical implementation' flag
+ * - its parent not having the flag
+ */
+ hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
+ entry = xa_load(&hetero_cpu, hetero_id);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ WARN_ON_ONCE(!entry);
+
+ if (entry) {
+ entry->core_id = topology_id;
+ entry->thread_num = 1;
+ xa_store(&hetero_cpu, hetero_id,
+ entry, GFP_KERNEL);
+ }
+ } else if (entry->core_id == topology_id) {
+ entry->thread_num++;
+ }
+ } else {
+ cpu_topology[cpu].thread_id = -1;
+ cpu_topology[cpu].core_id = topology_id;
+ }
+ topology_id = find_acpi_cpu_topology_cluster(cpu);
+ cpu_topology[cpu].cluster_id = topology_id;
+ topology_id = find_acpi_cpu_topology_package(cpu);
+ cpu_topology[cpu].package_id = topology_id;
+ }
+
+ /*
+ * This is a short loop since the number of XArray elements is the
+ * number of heterogeneous CPU clusters. On a homogeneous system
+ * there's only one entry in the XArray.
+ */
+ xa_for_each(&hetero_cpu, hetero_id, entry) {
+ max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
+ xa_erase(&hetero_cpu, hetero_id);
+ kfree(entry);
+ }
+
+ cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
+ xa_destroy(&hetero_cpu);
return 0;
}
-#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
int cpu, ret;
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 12ffdd843756..04bdbff4dbe5 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -171,17 +171,18 @@
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
- for (; id->name[0]; id++) {
- const char *p = strrchr(dev_name(&auxdev->dev), '.');
- int match_size;
+ const char *auxdev_name = dev_name(&auxdev->dev);
+ const char *p = strrchr(auxdev_name, '.');
+ int match_size;
- if (!p)
- continue;
- match_size = p - dev_name(&auxdev->dev);
+ if (!p)
+ return NULL;
+ match_size = p - auxdev_name;
+ for (; id->name[0]; id++) {
/* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
if (strlen(id->name) == match_size &&
- !strncmp(dev_name(&auxdev->dev), id->name, match_size))
+ !strncmp(auxdev_name, id->name, match_size))
return id;
}
return NULL;
@@ -217,17 +218,14 @@ static int auxiliary_bus_probe(struct device *dev)
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
- ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
+ ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
+ PD_FLAG_DETACH_POWER_OFF);
if (ret) {
dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
return ret;
}
- ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
- if (ret)
- dev_pm_domain_detach(dev, true);
-
- return ret;
+ return auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
}
static void auxiliary_bus_remove(struct device *dev)
@@ -237,7 +235,6 @@ static void auxiliary_bus_remove(struct device *dev)
if (auxdrv->remove)
auxdrv->remove(auxdev);
- dev_pm_domain_detach(dev, true);
}
static void auxiliary_bus_shutdown(struct device *dev)
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 123031a757d9..430cbefbc97f 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -85,6 +85,18 @@ struct driver_private {
};
#define to_driver(obj) container_of(obj, struct driver_private, kobj)
+#ifdef CONFIG_RUST
+/**
+ * struct driver_type - Representation of a Rust driver type.
+ */
+struct driver_type {
+ /**
+ * @id: Representation of core::any::TypeId.
+ */
+ u8 id[16];
+} __packed;
+#endif
+
/**
* struct device_private - structure to hold the private to the driver core portions of the device structure.
*
@@ -100,6 +112,7 @@ struct driver_private {
* @async_driver - pointer to device driver awaiting probe via async_probe
* @device - pointer back to the struct device that this structure is
* associated with.
+ * @driver_type - The type of the bound Rust driver.
* @dead - This device is currently either in the process of or has been
* removed from the system. Any asynchronous events scheduled for this
* device should exit without taking any action.
@@ -116,6 +129,9 @@ struct device_private {
const struct device_driver *async_driver;
char *deferred_probe_reason;
struct device *device;
+#ifdef CONFIG_RUST
+ struct driver_type driver_type;
+#endif
u8 dead:1;
};
#define to_device_private_parent(obj) \
@@ -248,9 +264,18 @@ void device_links_driver_cleanup(struct device *dev);
void device_links_no_driver(struct device *dev);
bool device_links_busy(struct device *dev);
void device_links_unbind_consumers(struct device *dev);
+bool device_link_flag_is_sync_state_only(u32 flags);
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 5e75e1bce551..9eb7771706f0 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -334,6 +334,19 @@ static struct device *next_device(struct klist_iter *i)
return dev;
}
+static struct device *prev_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_prev(i);
+ struct device *dev = NULL;
+ struct device_private *dev_prv;
+
+ if (n) {
+ dev_prv = to_device_private_bus(n);
+ dev = dev_prv->device;
+ }
+ return dev;
+}
+
/**
* bus_for_each_dev - device iterator.
* @bus: bus type.
@@ -414,6 +427,31 @@ struct device *bus_find_device(const struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device);
+struct device *bus_find_device_reverse(const struct bus_type *bus,
+ struct device *start, const void *data,
+ device_match_t match)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!sp)
+ return NULL;
+
+ klist_iter_init_node(&sp->klist_devices, &i,
+ (start ? &start->p->knode_bus : NULL));
+ while ((dev = prev_device(&i))) {
+ if (match(dev, data)) {
+ get_device(dev);
+ break;
+ }
+ }
+ klist_iter_exit(&i);
+ subsys_put(sp);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(bus_find_device_reverse);
+
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -533,8 +571,7 @@ void bus_probe_device(struct device *dev)
if (!sp)
return;
- if (sp->drivers_autoprobe)
- device_initial_probe(dev);
+ device_initial_probe(dev);
mutex_lock(&sp->mutex);
list_for_each_entry(sif, &sp->interfaces, node)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d22d6b23e758..40de2f51a1b1 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -287,7 +287,7 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \
DL_FLAG_CYCLE | \
DL_FLAG_MANAGED)
-static inline bool device_link_flag_is_sync_state_only(u32 flags)
+bool device_link_flag_is_sync_state_only(u32 flags)
{
return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY;
}
@@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data)
return 0;
if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) {
- dev_warn(sup, "sync_state() pending due to %s\n",
+ dev_info(sup, "sync_state() pending due to %s\n",
dev_name(link->consumer));
return 0;
}
@@ -3994,8 +3994,8 @@ const char *device_get_devnode(const struct device *dev,
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4024,8 +4024,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@@ -4055,8 +4055,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
* device_for_each_child_reverse_from - device child iterator in reversed order.
* @parent: parent struct device.
* @from: optional starting point in child list
- * @fn: function to be called for each device.
* @data: data for the callback.
+ * @fn: function to be called for each device.
*
* Iterate over @parent's child devices, starting at @from, and call @fn
* for each, passing it @data. This helper is identical to
@@ -4089,8 +4089,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
- * @match: Callback function to check device
* @data: Data to pass to match function
+ * @match: Callback function to check device
*
* This is similar to the device_for_each_child() function above, but it
* returns a reference to a device that is 'found' for later use, as
@@ -4138,7 +4138,7 @@ int __init devices_init(void)
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
if (!sysfs_dev_char_kobj)
goto char_kobj_err;
- device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
+ device_link_wq = alloc_workqueue("device_link_wq", WQ_PERCPU, 0);
if (!device_link_wq)
goto wq_err;
@@ -5278,6 +5278,25 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(device_set_node);
+/**
+ * get_dev_from_fwnode - Obtain a reference count of the struct device the
+ * struct fwnode_handle is associated with.
+ * @fwnode: The pointer to the struct fwnode_handle to obtain the struct device
+ * reference count of.
+ *
+ * This function obtains a reference count of the device the device pointer
+ * embedded in the struct fwnode_handle points to.
+ *
+ * Note that the struct device pointer embedded in struct fwnode_handle does
+ * *not* have a reference count of the struct device itself.
+ *
+ * Hence, it is a UAF (and thus a bug) to call this function if the caller can't
+ * guarantee that the last reference count of the corresponding struct device is
+ * not dropped concurrently.
+ *
+ * This is possible since struct fwnode_handle has its own reference count and
+ * hence can out-live the struct device it is associated with.
+ */
struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
{
return get_device((fwnode)->dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 008da0354fba..c6c57b6f61c6 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -300,13 +300,30 @@ static ssize_t print_cpus_isolated(struct device *dev,
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
+static ssize_t housekeeping_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct cpumask *hk_mask;
+
+ hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
+
+ if (housekeeping_enabled(HK_TYPE_KERNEL_NOISE))
+ return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(hk_mask));
+ return sysfs_emit(buf, "\n");
+}
+static DEVICE_ATTR_RO(housekeeping);
+
#ifdef CONFIG_NO_HZ_FULL
-static ssize_t print_cpus_nohz_full(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nohz_full_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
+ if (cpumask_available(tick_nohz_full_mask))
+ return sysfs_emit(buf, "%*pbl\n",
+ cpumask_pr_args(tick_nohz_full_mask));
+ return sysfs_emit(buf, "\n");
}
-static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
+static DEVICE_ATTR_RO(nohz_full);
#endif
#ifdef CONFIG_CRASH_HOTPLUG
@@ -325,7 +342,7 @@ static void cpu_device_release(struct device *dev)
* This is an empty function to prevent the driver core from spitting a
* warning at us. Yes, I know this is directly opposite of what the
* documentation for the driver core and kobjects say, and the author
- * of this code has already been publically ridiculed for doing
+ * of this code has already been publicly ridiculed for doing
* something as foolish as this. However, at this point in time, it is
* the only way to handle the issue of statically allocated cpu
* devices. The different architectures will have their cpu device
@@ -505,6 +522,7 @@ static struct attribute *cpu_root_attrs[] = {
&dev_attr_offline.attr,
&dev_attr_enabled.attr,
&dev_attr_isolated.attr,
+ &dev_attr_housekeeping.attr,
#ifdef CONFIG_NO_HZ_FULL
&dev_attr_nohz_full.attr,
#endif
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 13ab98e033ea..349f31bedfa1 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -193,7 +193,7 @@ void driver_deferred_probe_trigger(void)
* Kick the re-probe thread. It may already be scheduled, but it is
* safe to kick it again.
*/
- queue_work(system_unbound_wq, &deferred_probe_work);
+ queue_work(system_dfl_wq, &deferred_probe_work);
}
/**
@@ -1077,7 +1077,15 @@ EXPORT_SYMBOL_GPL(device_attach);
void device_initial_probe(struct device *dev)
{
- __device_attach(dev, true);
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (!sp)
+ return;
+
+ if (sp->drivers_autoprobe)
+ __device_attach(dev, true);
+
+ subsys_put(sp);
}
/*
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 37faf6156d7c..55bdc7f5e59d 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -23,50 +23,46 @@ struct devcd_entry {
void *data;
size_t datalen;
/*
- * Here, mutex is required to serialize the calls to del_wk work between
- * user/kernel space which happens when devcd is added with device_add()
- * and that sends uevent to user space. User space reads the uevents,
- * and calls to devcd_data_write() which try to modify the work which is
- * not even initialized/queued from devcoredump.
+ * There are 2 races for which mutex is required.
*
+ * The first race is between device creation and userspace writing to
+ * schedule immediately destruction.
*
+ * This race is handled by arming the timer before device creation, but
+ * when device creation fails the timer still exists.
*
- * cpu0(X) cpu1(Y)
+ * To solve this, hold the mutex during device_add(), and set
+ * init_completed on success before releasing the mutex.
*
- * dev_coredump() uevent sent to user space
- * device_add() ======================> user space process Y reads the
- * uevents writes to devcd fd
- * which results into writes to
+ * That way the timer will never fire until device_add() is called,
+ * it will do nothing if init_completed is not set. The timer is also
+ * cancelled in that case.
*
- * devcd_data_write()
- * mod_delayed_work()
- * try_to_grab_pending()
- * timer_delete()
- * debug_assert_init()
- * INIT_DELAYED_WORK()
- * schedule_delayed_work()
- *
- *
- * Also, mutex alone would not be enough to avoid scheduling of
- * del_wk work after it get flush from a call to devcd_free()
- * mentioned as below.
- *
- * disabled_store()
- * devcd_free()
- * mutex_lock() devcd_data_write()
- * flush_delayed_work()
- * mutex_unlock()
- * mutex_lock()
- * mod_delayed_work()
- * mutex_unlock()
- * So, delete_work flag is required.
+ * The second race involves multiple parallel invocations of devcd_free(),
+ * add a deleted flag so only 1 can call the destructor.
*/
struct mutex mutex;
- bool delete_work;
+ bool init_completed, deleted;
struct module *owner;
ssize_t (*read)(char *buffer, loff_t offset, size_t count,
void *data, size_t datalen);
void (*free)(void *data);
+ /*
+ * If nothing interferes and device_add() was returns success,
+ * del_wk will destroy the device after the timer fires.
+ *
+ * Multiple userspace processes can interfere in the working of the timer:
+ * - Writing to the coredump will reschedule the timer to run immediately,
+ * if still armed.
+ *
+ * This is handled by using "if (cancel_delayed_work()) {
+ * schedule_delayed_work() }", to prevent re-arming after having
+ * been previously fired.
+ * - Writing to /sys/class/devcoredump/disabled will destroy the
+ * coredump synchronously.
+ * This is handled by using disable_delayed_work_sync(), and then
+ * checking if deleted flag is set with &devcd->mutex held.
+ */
struct delayed_work del_wk;
struct device *failing_dev;
};
@@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev)
kfree(devcd);
}
+static void __devcd_del(struct devcd_entry *devcd)
+{
+ devcd->deleted = true;
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
static void devcd_del(struct work_struct *wk)
{
struct devcd_entry *devcd;
+ bool init_completed;
devcd = container_of(wk, struct devcd_entry, del_wk.work);
- device_del(&devcd->devcd_dev);
- put_device(&devcd->devcd_dev);
+ /* devcd->mutex serializes against dev_coredumpm_timeout */
+ mutex_lock(&devcd->mutex);
+ init_completed = devcd->init_completed;
+ mutex_unlock(&devcd->mutex);
+
+ if (init_completed)
+ __devcd_del(devcd);
}
static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
@@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct devcd_entry *devcd = dev_to_devcd(dev);
- mutex_lock(&devcd->mutex);
- if (!devcd->delete_work) {
- devcd->delete_work = true;
- mod_delayed_work(system_wq, &devcd->del_wk, 0);
- }
- mutex_unlock(&devcd->mutex);
+ /*
+ * Although it's tempting to use mod_delayed work here,
+ * that will cause a reschedule if the timer already fired.
+ */
+ if (cancel_delayed_work(&devcd->del_wk))
+ schedule_delayed_work(&devcd->del_wk, 0);
return count;
}
@@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data)
{
struct devcd_entry *devcd = dev_to_devcd(dev);
+ /*
+ * To prevent a race with devcd_data_write(), disable work and
+ * complete manually instead.
+ *
+ * We cannot rely on the return value of
+ * disable_delayed_work_sync() here, because it might be in the
+ * middle of a cancel_delayed_work + schedule_delayed_work pair.
+ *
+ * devcd->mutex here guards against multiple parallel invocations
+ * of devcd_free().
+ */
+ disable_delayed_work_sync(&devcd->del_wk);
mutex_lock(&devcd->mutex);
- if (!devcd->delete_work)
- devcd->delete_work = true;
-
- flush_delayed_work(&devcd->del_wk);
+ if (!devcd->deleted)
+ __devcd_del(devcd);
mutex_unlock(&devcd->mutex);
return 0;
}
@@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri
* put_device() <- last reference
* error = fn(dev, data) devcd_dev_release()
* devcd_free(dev, data) kfree(devcd)
- * mutex_lock(&devcd->mutex);
*
*
* In the above diagram, it looks like disabled_store() would be racing with parallelly
- * running devcd_del() and result in memory abort while acquiring devcd->mutex which
- * is called after kfree of devcd memory after dropping its last reference with
+ * running devcd_del() and result in memory abort after dropping its last reference with
* put_device(). However, this will not happens as fn(dev, data) runs
* with its own reference to device via klist_node so it is not its last reference.
* so, above situation would not occur.
@@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
devcd->read = read;
devcd->free = free;
devcd->failing_dev = get_device(dev);
- devcd->delete_work = false;
+ devcd->deleted = false;
mutex_init(&devcd->mutex);
device_initialize(&devcd->devcd_dev);
@@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
atomic_inc_return(&devcd_count));
devcd->devcd_dev.class = &devcd_class;
- mutex_lock(&devcd->mutex);
dev_set_uevent_suppress(&devcd->devcd_dev, true);
+
+ /* devcd->mutex prevents devcd_del() completing until init finishes */
+ mutex_lock(&devcd->mutex);
+ devcd->init_completed = false;
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, timeout);
+
if (device_add(&devcd->devcd_dev))
goto put_device;
@@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner,
dev_set_uevent_suppress(&devcd->devcd_dev, false);
kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD);
- INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
- schedule_delayed_work(&devcd->del_wk, timeout);
+
+ /*
+ * Safe to run devcd_del() now that we are done with devcd_dev.
+ * Alternatively we could have taken a ref on devcd_dev before
+ * dropping the lock.
+ */
+ devcd->init_completed = true;
mutex_unlock(&devcd->mutex);
return;
put_device:
- put_device(&devcd->devcd_dev);
mutex_unlock(&devcd->mutex);
+ cancel_delayed_work_sync(&devcd->del_wk);
+ put_device(&devcd->devcd_dev);
+
put_module:
module_put(owner);
free:
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index ff55e1bcfa30..f54db6d138ab 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -1117,6 +1117,27 @@ void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(devm_kmemdup);
+/**
+ * devm_kmemdup_const - conditionally duplicate and manage a region of memory
+ *
+ * @dev: Device this memory belongs to
+ * @src: memory region to duplicate
+ * @len: memory region length,
+ * @gfp: GFP mask to use
+ *
+ * Return: source address if it is in .rodata or the return value of kmemdup()
+ * to which the function falls back otherwise.
+ */
+const void *
+devm_kmemdup_const(struct device *dev, const void *src, size_t len, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)src))
+ return src;
+
+ return devm_kmemdup(dev, src, len, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kmemdup_const);
+
struct pages_devres {
unsigned long addr;
unsigned int order;
@@ -1201,13 +1222,6 @@ static void devm_percpu_release(struct device *dev, void *pdata)
free_percpu(p);
}
-static int devm_percpu_match(struct device *dev, void *data, void *p)
-{
- struct devres *devr = container_of(data, struct devres, data);
-
- return *(void **)devr->data == p;
-}
-
/**
* __devm_alloc_percpu - Resource-managed alloc_percpu
* @dev: Device to allocate per-cpu memory for
@@ -1243,21 +1257,3 @@ void __percpu *__devm_alloc_percpu(struct device *dev, size_t size,
return pcpu;
}
EXPORT_SYMBOL_GPL(__devm_alloc_percpu);
-
-/**
- * devm_free_percpu - Resource-managed free_percpu
- * @dev: Device this memory belongs to
- * @pdata: Per-cpu memory to free
- *
- * Free memory allocated with devm_alloc_percpu().
- */
-void devm_free_percpu(struct device *dev, void __percpu *pdata)
-{
- /*
- * Use devres_release() to prevent memory leakage as
- * devm_free_pages() does.
- */
- WARN_ON(devres_release(dev, devm_percpu_release, devm_percpu_match,
- (void *)(__force unsigned long)pdata));
-}
-EXPORT_SYMBOL_GPL(devm_free_percpu);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 31bfb3194b4c..194b44075ac7 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -70,7 +70,7 @@ static struct file_system_type internal_fs_type = {
#else
.init_fs_context = ramfs_init_fs_context,
#endif
- .kill_sb = kill_litter_super,
+ .kill_sb = kill_anon_super,
};
/* Simply take a ref on the existing mount */
@@ -176,15 +176,15 @@ static int dev_mkdir(const char *name, umode_t mode)
struct dentry *dentry;
struct path path;
- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ dentry = start_creating_path(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
+ dentry = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode, NULL);
if (!IS_ERR(dentry))
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return PTR_ERR_OR_ZERO(dentry);
}
@@ -222,16 +222,16 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
struct path path;
int err;
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
if (dentry == ERR_PTR(-ENOENT)) {
create_path(nodename);
- dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
+ dentry = start_creating_path(AT_FDCWD, nodename, &path, 0);
}
if (IS_ERR(dentry))
return PTR_ERR(dentry);
err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
- dev->devt);
+ dev->devt, NULL);
if (!err) {
struct iattr newattrs;
@@ -246,7 +246,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
}
- done_path_create(&path, dentry);
+ end_creating_path(&path, dentry);
return err;
}
@@ -256,18 +256,16 @@ static int dev_rmdir(const char *name)
struct dentry *dentry;
int err;
- dentry = kern_path_locked(name, &parent);
+ dentry = start_removing_path(name, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
if (d_inode(dentry)->i_private == &thread)
err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
- dentry);
+ dentry, NULL);
else
err = -EPERM;
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
- path_put(&parent);
+ end_removing_path(&parent, dentry);
return err;
}
@@ -325,7 +323,7 @@ static int handle_remove(const char *nodename, struct device *dev)
int deleted = 0;
int err = 0;
- dentry = kern_path_locked(nodename, &parent);
+ dentry = start_removing_path(nodename, &parent);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -349,10 +347,8 @@ static int handle_remove(const char *nodename, struct device *dev)
if (!err || err == -ENOENT)
deleted = 1;
}
- dput(dentry);
- inode_unlock(d_inode(parent.dentry));
+ end_removing_path(&parent, dentry);
- path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index f5fbda0a9a44..21dd02124231 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -155,6 +155,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
dev->parent = &faux_bus_root;
dev->bus = &faux_bus_type;
dev_set_name(dev, "%s", name);
+ device_set_pm_not_required(dev);
ret = device_add(dev);
if (ret) {
diff --git a/drivers/base/firmware_loader/Kconfig b/drivers/base/firmware_loader/Kconfig
index 752b9a9bea03..15eff8a4b505 100644
--- a/drivers/base/firmware_loader/Kconfig
+++ b/drivers/base/firmware_loader/Kconfig
@@ -38,7 +38,7 @@ config FW_LOADER_DEBUG
config RUST_FW_LOADER_ABSTRACTIONS
bool "Rust Firmware Loader abstractions"
depends on RUST
- depends on FW_LOADER=y
+ select FW_LOADER
help
This enables the Rust abstractions for the firmware loader API.
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 6942c62fa59d..4ebdca9e4da4 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -829,8 +829,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
size_t offset, u32 opt_flags)
{
struct firmware *fw = NULL;
- struct cred *kern_cred = NULL;
- const struct cred *old_cred;
bool nondirect = false;
int ret;
@@ -871,45 +869,38 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
* called by a driver when serving an unrelated request from userland, we use
* the kernel credentials to read the file.
*/
- kern_cred = prepare_kernel_cred(&init_task);
- if (!kern_cred) {
- ret = -ENOMEM;
- goto out;
- }
- old_cred = override_creds(kern_cred);
-
- ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
+ scoped_with_kernel_creds() {
+ ret = fw_get_filesystem_firmware(device, fw->priv, "", NULL);
- /* Only full reads can support decompression, platform, and sysfs. */
- if (!(opt_flags & FW_OPT_PARTIAL))
- nondirect = true;
+ /* Only full reads can support decompression, platform, and sysfs. */
+ if (!(opt_flags & FW_OPT_PARTIAL))
+ nondirect = true;
#ifdef CONFIG_FW_LOADER_COMPRESS_ZSTD
- if (ret == -ENOENT && nondirect)
- ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
- fw_decompress_zstd);
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".zst",
+ fw_decompress_zstd);
#endif
#ifdef CONFIG_FW_LOADER_COMPRESS_XZ
- if (ret == -ENOENT && nondirect)
- ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
- fw_decompress_xz);
+ if (ret == -ENOENT && nondirect)
+ ret = fw_get_filesystem_firmware(device, fw->priv, ".xz",
+ fw_decompress_xz);
#endif
- if (ret == -ENOENT && nondirect)
- ret = firmware_fallback_platform(fw->priv);
+ if (ret == -ENOENT && nondirect)
+ ret = firmware_fallback_platform(fw->priv);
- if (ret) {
- if (!(opt_flags & FW_OPT_NO_WARN))
- dev_warn(device,
- "Direct firmware load for %s failed with error %d\n",
- name, ret);
- if (nondirect)
- ret = firmware_fallback_sysfs(fw, name, device,
- opt_flags, ret);
- } else
- ret = assign_fw(fw, device);
-
- revert_creds(old_cred);
- put_cred(kern_cred);
+ if (ret) {
+ if (!(opt_flags & FW_OPT_NO_WARN))
+ dev_warn(device,
+ "Direct firmware load for %s failed with error %d\n",
+ name, ret);
+ if (nondirect)
+ ret = firmware_fallback_sysfs(fw, name, device,
+ opt_flags, ret);
+ } else {
+ ret = assign_fw(fw, device);
+ }
+ }
out:
if (ret < 0) {
@@ -1585,16 +1576,20 @@ static int fw_pm_notify(struct notifier_block *notify_block,
}
/* stop caching firmware once syscore_suspend is reached */
-static int fw_suspend(void)
+static int fw_suspend(void *data)
{
fw_cache.state = FW_LOADER_NO_CACHE;
return 0;
}
-static struct syscore_ops fw_syscore_ops = {
+static const struct syscore_ops fw_syscore_ops = {
.suspend = fw_suspend,
};
+static struct syscore fw_syscore = {
+ .ops = &fw_syscore_ops,
+};
+
static int __init register_fw_pm_ops(void)
{
int ret;
@@ -1610,14 +1605,14 @@ static int __init register_fw_pm_ops(void)
if (ret)
return ret;
- register_syscore_ops(&fw_syscore_ops);
+ register_syscore(&fw_syscore);
return ret;
}
static inline void unregister_fw_pm_ops(void)
{
- unregister_syscore_ops(&fw_syscore_ops);
+ unregister_syscore(&fw_syscore);
unregister_pm_notifier(&fw_cache.pm_notify);
}
#else
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index add0b9b75edd..92e91050f96a 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -47,7 +47,10 @@ static ssize_t timeout_show(const struct class *class, const struct class_attrib
static ssize_t timeout_store(const struct class *class, const struct class_attribute *attr,
const char *buf, size_t count)
{
- int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
+ int tmp_loading_timeout;
+
+ if (kstrtoint(buf, 10, &tmp_loading_timeout))
+ return -EINVAL;
if (tmp_loading_timeout < 0)
tmp_loading_timeout = 0;
@@ -157,7 +160,10 @@ static ssize_t firmware_loading_store(struct device *dev,
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
struct fw_priv *fw_priv;
ssize_t written = count;
- int loading = simple_strtol(buf, NULL, 10);
+ int loading;
+
+ if (kstrtoint(buf, 10, &loading))
+ return -EINVAL;
mutex_lock(&fw_lock);
fw_priv = fw_sysfs->fw_priv;
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 829270067d16..c3797b93c5f5 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -100,8 +100,10 @@ static ssize_t cancel_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
mutex_lock(&fwlp->lock);
- if (fwlp->progress == FW_UPLOAD_PROG_IDLE)
- ret = -ENODEV;
+ if (fwlp->progress == FW_UPLOAD_PROG_IDLE) {
+ mutex_unlock(&fwlp->lock);
+ return -ENODEV;
+ }
fwlp->ops->cancel(fwlp->fw_upload);
mutex_unlock(&fwlp->lock);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 5c6c1d6bb59f..751f248ca4a8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -198,15 +198,15 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
break;
default:
WARN_ON(1);
- return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
+ return sysfs_emit(buf, "ERROR-UNKNOWN-%d\n", mem->state);
}
return sysfs_emit(buf, "%s\n", output);
}
-int memory_notify(unsigned long val, void *v)
+int memory_notify(enum memory_block_state state, void *v)
{
- return blocking_notifier_call_chain(&memory_chain, val, v);
+ return blocking_notifier_call_chain(&memory_chain, state, v);
}
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
@@ -226,7 +226,6 @@ static int memory_block_online(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
- struct memory_notify arg;
struct zone *zone;
int ret;
@@ -246,19 +245,9 @@ static int memory_block_online(struct memory_block *mem)
if (mem->altmap)
nr_vmemmap_pages = mem->altmap->free;
- arg.altmap_start_pfn = start_pfn;
- arg.altmap_nr_pages = nr_vmemmap_pages;
- arg.start_pfn = start_pfn + nr_vmemmap_pages;
- arg.nr_pages = nr_pages - nr_vmemmap_pages;
mem_hotplug_begin();
- ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
- ret = notifier_to_errno(ret);
- if (ret)
- goto out_notifier;
-
if (nr_vmemmap_pages) {
- ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
- zone, mem->altmap->inaccessible);
+ ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
if (ret)
goto out;
}
@@ -280,11 +269,7 @@ static int memory_block_online(struct memory_block *mem)
nr_vmemmap_pages);
mem->zone = zone;
- mem_hotplug_done();
- return ret;
out:
- memory_notify(MEM_FINISH_OFFLINE, &arg);
-out_notifier:
mem_hotplug_done();
return ret;
}
@@ -297,7 +282,6 @@ static int memory_block_offline(struct memory_block *mem)
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long nr_vmemmap_pages = 0;
- struct memory_notify arg;
int ret;
if (!mem->zone)
@@ -329,11 +313,6 @@ static int memory_block_offline(struct memory_block *mem)
mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
mem->zone = NULL;
- arg.altmap_start_pfn = start_pfn;
- arg.altmap_nr_pages = nr_vmemmap_pages;
- arg.start_pfn = start_pfn + nr_vmemmap_pages;
- arg.nr_pages = nr_pages - nr_vmemmap_pages;
- memory_notify(MEM_FINISH_OFFLINE, &arg);
out:
mem_hotplug_done();
return ret;
@@ -769,21 +748,22 @@ static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
#ifdef CONFIG_NUMA
/**
- * memory_block_add_nid() - Indicate that system RAM falling into this memory
- * block device (partially) belongs to the given node.
+ * memory_block_add_nid_early() - Indicate that early system RAM falling into
+ * this memory block device (partially) belongs
+ * to the given node.
* @mem: The memory block device.
* @nid: The node id.
- * @context: The memory initialization context.
*
- * Indicate that system RAM falling into this memory block (partially) belongs
- * to the given node. If the context indicates ("early") that we are adding the
- * node during node device subsystem initialization, this will also properly
- * set/adjust mem->zone based on the zone ranges of the given node.
+ * Indicate that early system RAM falling into this memory block (partially)
+ * belongs to the given node. This will also properly set/adjust mem->zone based
+ * on the zone ranges of the given node.
+ *
+ * Memory hotplug handles this on memory block creation, where we can only have
+ * a single nid span a memory block.
*/
-void memory_block_add_nid(struct memory_block *mem, int nid,
- enum meminit_context context)
+void memory_block_add_nid_early(struct memory_block *mem, int nid)
{
- if (context == MEMINIT_EARLY && mem->nid != nid) {
+ if (mem->nid != nid) {
/*
* For early memory we have to determine the zone when setting
* the node id and handle multiple nodes spanning a single
@@ -797,19 +777,18 @@ void memory_block_add_nid(struct memory_block *mem, int nid,
mem->zone = early_node_zone_for_memory_block(mem, nid);
else
mem->zone = NULL;
+ /*
+ * If this memory block spans multiple nodes, we only indicate
+ * the last processed node. If we span multiple nodes (not applicable
+ * to hotplugged memory), zone == NULL will prohibit memory offlining
+ * and consequently unplug.
+ */
+ mem->nid = nid;
}
-
- /*
- * If this memory block spans multiple nodes, we only indicate
- * the last processed node. If we span multiple nodes (not applicable
- * to hotplugged memory), zone == NULL will prohibit memory offlining
- * and consequently unplug.
- */
- mem->nid = nid;
}
#endif
-static int add_memory_block(unsigned long block_id, unsigned long state,
+static int add_memory_block(unsigned long block_id, int nid, unsigned long state,
struct vmem_altmap *altmap,
struct memory_group *group)
{
@@ -827,7 +806,7 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
mem->start_section_nr = block_id * sections_per_block;
mem->state = state;
- mem->nid = NUMA_NO_NODE;
+ mem->nid = nid;
mem->altmap = altmap;
INIT_LIST_HEAD(&mem->group_next);
@@ -854,13 +833,6 @@ static int add_memory_block(unsigned long block_id, unsigned long state,
return 0;
}
-static int add_hotplug_memory_block(unsigned long block_id,
- struct vmem_altmap *altmap,
- struct memory_group *group)
-{
- return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
-}
-
static void remove_memory_block(struct memory_block *memory)
{
if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
@@ -886,7 +858,7 @@ static void remove_memory_block(struct memory_block *memory)
* Called under device_hotplug_lock.
*/
int create_memory_block_devices(unsigned long start, unsigned long size,
- struct vmem_altmap *altmap,
+ int nid, struct vmem_altmap *altmap,
struct memory_group *group)
{
const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
@@ -900,7 +872,7 @@ int create_memory_block_devices(unsigned long start, unsigned long size,
return -EINVAL;
for (block_id = start_block_id; block_id != end_block_id; block_id++) {
- ret = add_hotplug_memory_block(block_id, altmap, group);
+ ret = add_memory_block(block_id, nid, MEM_OFFLINE, altmap, group);
if (ret)
break;
}
@@ -1005,7 +977,7 @@ void __init memory_dev_init(void)
continue;
block_id = memory_block_id(nr);
- ret = add_memory_block(block_id, MEM_ONLINE, NULL, NULL);
+ ret = add_memory_block(block_id, NUMA_NO_NODE, MEM_ONLINE, NULL, NULL);
if (ret) {
panic("%s() failed to add memory block: %d\n",
__func__, ret);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 3399594136b2..00cf4532f121 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -249,6 +249,44 @@ void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
EXPORT_SYMBOL_GPL(node_set_perf_attrs);
/**
+ * node_update_perf_attrs - Update the performance values for given access class
+ * @nid: Node identifier to be updated
+ * @coord: Heterogeneous memory performance coordinates
+ * @access: The access class for the given attributes
+ */
+void node_update_perf_attrs(unsigned int nid, struct access_coordinate *coord,
+ enum access_coordinate_class access)
+{
+ struct node_access_nodes *access_node;
+ struct node *node;
+ int i;
+
+ if (WARN_ON_ONCE(!node_online(nid)))
+ return;
+
+ node = node_devices[nid];
+ list_for_each_entry(access_node, &node->access_list, list_node) {
+ if (access_node->access != access)
+ continue;
+
+ access_node->coord = *coord;
+ for (i = 0; access_attrs[i]; i++) {
+ sysfs_notify(&access_node->dev.kobj,
+ NULL, access_attrs[i]->name);
+ }
+ break;
+ }
+
+ /* When setting CPU access coordinates, update mempolicy */
+ if (access != ACCESS_COORDINATE_CPU)
+ return;
+
+ if (mempolicy_set_node_perf(nid, coord))
+ pr_info("failed to set mempolicy attrs for node %d\n", nid);
+}
+EXPORT_SYMBOL_GPL(node_update_perf_attrs);
+
+/**
* struct node_cache_info - Internal tracking for memory node caches
* @dev: Device represeting the cache level
* @node: List element for tracking in the node
@@ -638,50 +676,6 @@ static void node_device_release(struct device *dev)
kfree(to_node(dev));
}
-/*
- * register_node - Setup a sysfs device for a node.
- * @num - Node number to use when creating the device.
- *
- * Initialize and register the node device.
- */
-static int register_node(struct node *node, int num)
-{
- int error;
-
- node->dev.id = num;
- node->dev.bus = &node_subsys;
- node->dev.release = node_device_release;
- node->dev.groups = node_dev_groups;
- error = device_register(&node->dev);
-
- if (error) {
- put_device(&node->dev);
- } else {
- hugetlb_register_node(node);
- compaction_register_node(node);
- reclaim_register_node(node);
- }
-
- return error;
-}
-
-/**
- * unregister_node - unregister a node device
- * @node: node going away
- *
- * Unregisters a node device @node. All the devices on the node must be
- * unregistered before calling this function.
- */
-void unregister_node(struct node *node)
-{
- hugetlb_unregister_node(node);
- compaction_unregister_node(node);
- reclaim_unregister_node(node);
- node_remove_accesses(node);
- node_remove_caches(node);
- device_unregister(&node->dev);
-}
-
struct node *node_devices[MAX_NUMNODES];
/*
@@ -781,13 +775,10 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
#ifdef CONFIG_MEMORY_HOTPLUG
static void do_register_memory_block_under_node(int nid,
- struct memory_block *mem_blk,
- enum meminit_context context)
+ struct memory_block *mem_blk)
{
int ret;
- memory_block_add_nid(mem_blk, nid, context);
-
ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
&mem_blk->dev.kobj,
kobject_name(&mem_blk->dev.kobj));
@@ -815,7 +806,7 @@ static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
{
int nid = *(int *)arg;
- do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
+ do_register_memory_block_under_node(nid, mem_blk);
return 0;
}
@@ -855,7 +846,8 @@ static void register_memory_blocks_under_nodes(void)
if (!mem)
continue;
- do_register_memory_block_under_node(nid, mem, MEMINIT_EARLY);
+ memory_block_add_nid_early(mem, nid);
+ do_register_memory_block_under_node(nid, mem);
put_device(&mem->dev);
}
@@ -871,7 +863,13 @@ void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
}
#endif /* CONFIG_MEMORY_HOTPLUG */
-int register_one_node(int nid)
+/**
+ * register_node - Initialize and register the node device.
+ * @nid: Node number to use when creating the device.
+ *
+ * Return: 0 on success, -errno otherwise
+ */
+int register_node(int nid)
{
int error;
int cpu;
@@ -882,9 +880,22 @@ int register_one_node(int nid)
return -ENOMEM;
INIT_LIST_HEAD(&node->access_list);
- node_devices[nid] = node;
- error = register_node(node_devices[nid], nid);
+ node->dev.id = nid;
+ node->dev.bus = &node_subsys;
+ node->dev.release = node_device_release;
+ node->dev.groups = node_dev_groups;
+
+ error = device_register(&node->dev);
+ if (error) {
+ put_device(&node->dev);
+ return error;
+ }
+
+ node_devices[nid] = node;
+ hugetlb_register_node(node);
+ compaction_register_node(node);
+ reclaim_register_node(node);
/* link cpu under this node */
for_each_present_cpu(cpu) {
@@ -896,13 +907,26 @@ int register_one_node(int nid)
return error;
}
-
-void unregister_one_node(int nid)
+/**
+ * unregister_node - unregister a node device
+ * @nid: nid of the node going away
+ *
+ * Unregisters the node device at node id @nid. All the devices on the
+ * node must be unregistered before calling this function.
+ */
+void unregister_node(int nid)
{
- if (!node_devices[nid])
+ struct node *node = node_devices[nid];
+
+ if (!node)
return;
- unregister_node(node_devices[nid]);
+ hugetlb_unregister_node(node);
+ compaction_unregister_node(node);
+ reclaim_unregister_node(node);
+ node_remove_accesses(node);
+ node_remove_caches(node);
+ device_unregister(&node->dev);
node_devices[nid] = NULL;
}
@@ -978,7 +1002,7 @@ void __init node_dev_init(void)
* to already created cpu devices.
*/
for_each_online_node(i) {
- ret = register_one_node(i);
+ ret = register_node(i);
if (ret)
panic("%s() failed to add node: %d\n", __func__, ret);
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 09450349cf32..b45d41b018ca 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -150,25 +150,37 @@ devm_platform_ioremap_resource_byname(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(devm_platform_ioremap_resource_byname);
#endif /* CONFIG_HAS_IOMEM */
+static const struct cpumask *get_irq_affinity(struct platform_device *dev,
+ unsigned int num)
+{
+ const struct cpumask *mask = NULL;
+#ifndef CONFIG_SPARC
+ struct fwnode_handle *fwnode = dev_fwnode(&dev->dev);
+
+ if (is_of_node(fwnode))
+ mask = of_irq_get_affinity(to_of_node(fwnode), num);
+ else if (is_acpi_device_node(fwnode))
+ mask = acpi_irq_get_affinity(ACPI_HANDLE_FWNODE(fwnode), num);
+#endif
+
+ return mask ?: cpu_possible_mask;
+}
+
/**
- * platform_get_irq_optional - get an optional IRQ for a device
- * @dev: platform device
- * @num: IRQ number index
+ * platform_get_irq_affinity - get an optional IRQ and its affinity for a device
+ * @dev: platform device
+ * @num: interrupt number index
+ * @affinity: optional cpumask pointer to get the affinity of a per-cpu interrupt
*
- * Gets an IRQ for a platform device. Device drivers should check the return
- * value for errors so as to not pass a negative integer value to the
- * request_irq() APIs. This is the same as platform_get_irq(), except that it
- * does not print an error message if an IRQ can not be obtained.
- *
- * For example::
- *
- * int irq = platform_get_irq_optional(pdev, 0);
- * if (irq < 0)
- * return irq;
+ * Gets an interupt for a platform device. Device drivers should check the
+ * return value for errors so as to not pass a negative integer value to
+ * the request_irq() APIs. Optional affinity information is provided in the
+ * affinity pointer if available, and NULL otherwise.
*
- * Return: non-zero IRQ number on success, negative error number on failure.
+ * Return: non-zero interrupt number on success, negative error number on failure.
*/
-int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+int platform_get_irq_affinity(struct platform_device *dev, unsigned int num,
+ const struct cpumask **affinity)
{
int ret;
#ifdef CONFIG_SPARC
@@ -236,8 +248,37 @@ out_not_found:
out:
if (WARN(!ret, "0 is an invalid IRQ number\n"))
return -EINVAL;
+
+ if (ret > 0 && affinity)
+ *affinity = get_irq_affinity(dev, num);
+
return ret;
}
+EXPORT_SYMBOL_GPL(platform_get_irq_affinity);
+
+/**
+ * platform_get_irq_optional - get an optional interrupt for a device
+ * @dev: platform device
+ * @num: interrupt number index
+ *
+ * Gets an interrupt for a platform device. Device drivers should check the
+ * return value for errors so as to not pass a negative integer value to
+ * the request_irq() APIs. This is the same as platform_get_irq(), except
+ * that it does not print an error message if an interrupt can not be
+ * obtained.
+ *
+ * For example::
+ *
+ * int irq = platform_get_irq_optional(pdev, 0);
+ * if (irq < 0)
+ * return irq;
+ *
+ * Return: non-zero interrupt number on success, negative error number on failure.
+ */
+int platform_get_irq_optional(struct platform_device *dev, unsigned int num)
+{
+ return platform_get_irq_affinity(dev, num, NULL);
+}
EXPORT_SYMBOL_GPL(platform_get_irq_optional);
/**
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 01f11629d241..2989e42d0161 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o wakeup_stats.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
obj-$(CONFIG_PM_QOS_KUNIT_TEST) += qos-test.o
+obj-$(CONFIG_PM_RUNTIME_KUNIT_TEST) += runtime-test.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 6502720bb564..af99bbcf281c 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -8,6 +8,13 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#define CALL_PM_OP(dev, op) \
+({ \
+ struct device *_dev = (dev); \
+ const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \
+ pm && pm->op ? pm->op(_dev) : 0; \
+})
+
#ifdef CONFIG_PM
/**
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
@@ -19,12 +26,7 @@
*/
int pm_generic_runtime_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
-
- return ret;
+ return CALL_PM_OP(dev, runtime_suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
@@ -38,12 +40,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
*/
int pm_generic_runtime_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int ret;
-
- ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
-
- return ret;
+ return CALL_PM_OP(dev, runtime_resume);
}
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
#endif /* CONFIG_PM */
@@ -72,9 +69,7 @@ int pm_generic_prepare(struct device *dev)
*/
int pm_generic_suspend_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
+ return CALL_PM_OP(dev, suspend_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
@@ -84,9 +79,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
*/
int pm_generic_suspend_late(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
+ return CALL_PM_OP(dev, suspend_late);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
@@ -96,9 +89,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
*/
int pm_generic_suspend(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->suspend ? pm->suspend(dev) : 0;
+ return CALL_PM_OP(dev, suspend);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
@@ -108,9 +99,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
*/
int pm_generic_freeze_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
+ return CALL_PM_OP(dev, freeze_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
@@ -120,9 +109,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
*/
int pm_generic_freeze(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->freeze ? pm->freeze(dev) : 0;
+ return CALL_PM_OP(dev, freeze);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
@@ -132,9 +119,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
+ return CALL_PM_OP(dev, poweroff_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
@@ -144,9 +129,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
*/
int pm_generic_poweroff_late(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+ return CALL_PM_OP(dev, poweroff_late);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
@@ -156,9 +139,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
*/
int pm_generic_poweroff(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+ return CALL_PM_OP(dev, poweroff);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
@@ -168,9 +149,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
*/
int pm_generic_thaw_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
+ return CALL_PM_OP(dev, thaw_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
@@ -180,9 +159,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
*/
int pm_generic_thaw(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->thaw ? pm->thaw(dev) : 0;
+ return CALL_PM_OP(dev, thaw);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -192,9 +169,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
*/
int pm_generic_resume_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
+ return CALL_PM_OP(dev, resume_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -204,9 +179,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume_early(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+ return CALL_PM_OP(dev, resume_early);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_early);
@@ -216,9 +189,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early);
*/
int pm_generic_resume(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->resume ? pm->resume(dev) : 0;
+ return CALL_PM_OP(dev, resume);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -228,9 +199,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
+ return CALL_PM_OP(dev, restore_noirq);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -240,9 +209,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore_early(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+ return CALL_PM_OP(dev, restore_early);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_early);
@@ -252,9 +219,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_early);
*/
int pm_generic_restore(struct device *dev)
{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
- return pm && pm->restore ? pm->restore(dev) : 0;
+ return CALL_PM_OP(dev, restore);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2ea6e05e6ec9..97a8b4fcf471 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -34,16 +34,13 @@
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
+#include <linux/nmi.h>
#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_rcu_locked(pos, head, member) \
- list_for_each_entry_rcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -99,6 +96,8 @@ static const char *pm_verb(int event)
return "restore";
case PM_EVENT_RECOVER:
return "recover";
+ case PM_EVENT_POWEROFF:
+ return "poweroff";
default:
return "(unknown PM event)";
}
@@ -281,8 +280,9 @@ static void dpm_wait_for_suppliers(struct device *dev, bool async)
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_supplier(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->supplier, async);
device_links_read_unlock(idx);
@@ -338,8 +338,9 @@ static void dpm_wait_for_consumers(struct device *dev, bool async)
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
- if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dev_for_each_link_to_consumer(link, dev)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT &&
+ !device_link_flag_is_sync_state_only(link->flags))
dpm_wait(link->consumer, async);
device_links_read_unlock(idx);
@@ -369,6 +370,7 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff;
case PM_EVENT_THAW:
@@ -403,6 +405,7 @@ static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_late;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_late;
case PM_EVENT_THAW:
@@ -437,6 +440,7 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
return ops->freeze_noirq;
+ case PM_EVENT_POWEROFF:
case PM_EVENT_HIBERNATE:
return ops->poweroff_noirq;
case PM_EVENT_THAW:
@@ -517,6 +521,11 @@ struct dpm_watchdog {
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
struct dpm_watchdog wd
+static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
+module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
+MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
+ "Backtrace all CPUs on DPM watchdog timeout");
+
/**
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
* @t: The timer that PM watchdog depends on.
@@ -532,8 +541,12 @@ static void dpm_watchdog_handler(struct timer_list *t)
unsigned int time_left;
if (wd->fatal) {
+ unsigned int this_cpu = smp_processor_id();
+
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
show_stack(wd->tsk, NULL, KERN_EMERG);
+ if (dpm_watchdog_all_cpu_backtrace)
+ trigger_allbutcpu_cpu_backtrace(this_cpu);
panic("%s %s: unrecoverable failure\n",
dev_driver_string(wd->dev), dev_name(wd->dev));
}
@@ -675,7 +688,7 @@ static void dpm_async_resume_subordinate(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
- list_for_each_entry_rcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -724,8 +737,20 @@ static void device_resume_noirq(struct device *dev, pm_message_t state, bool asy
if (dev->power.syscore || dev->power.direct_complete)
goto Out;
- if (!dev->power.is_noirq_suspended)
+ if (!dev->power.is_noirq_suspended) {
+ /*
+ * This means that system suspend has been aborted in the noirq
+ * phase before invoking the noirq suspend callback for the
+ * device, so if device_suspend_late() has left it in suspend,
+ * device_resume_early() should leave it in suspend either in
+ * case the early resume of it depends on the noirq resume that
+ * has not run.
+ */
+ if (dev_pm_skip_suspend(dev))
+ dev->power.must_resume = false;
+
goto Out;
+ }
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -878,12 +903,15 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Out;
if (!dev->power.is_late_suspended)
goto Out;
+ if (dev->power.syscore)
+ goto Skip;
+
if (!dpm_wait_for_superior(dev, async))
goto Out;
@@ -916,11 +944,11 @@ Run:
Skip:
dev->power.is_late_suspended = false;
+ pm_runtime_enable(dev);
Out:
TRACE_RESUME(error);
- pm_runtime_enable(dev);
complete_all(&dev->power.completion);
if (error) {
@@ -1330,7 +1358,7 @@ static void dpm_async_suspend_superior(struct device *dev, async_func_t func)
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
@@ -1384,7 +1412,7 @@ static void dpm_superior_set_must_resume(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1605,12 +1633,6 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- /*
- * Disable runtime PM for the device without checking if there is a
- * pending resume request for it.
- */
- __pm_runtime_disable(dev, false);
-
dpm_wait_for_subordinate(dev, async);
if (READ_ONCE(async_error))
@@ -1621,9 +1643,18 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
goto Complete;
}
- if (dev->power.syscore || dev->power.direct_complete)
+ if (dev->power.direct_complete)
goto Complete;
+ /*
+ * Disable runtime PM for the device without checking if there is a
+ * pending resume request for it.
+ */
+ __pm_runtime_disable(dev, false);
+
+ if (dev->power.syscore)
+ goto Skip;
+
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -1654,6 +1685,7 @@ Run:
WRITE_ONCE(async_error, error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
+ pm_runtime_enable(dev);
goto Complete;
}
dpm_propagate_wakeup_to_parent(dev);
@@ -1813,7 +1845,7 @@ static void dpm_clear_superiors_direct_complete(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -2065,7 +2097,7 @@ static bool device_prepare_smart_suspend(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
@@ -2116,6 +2148,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
dev->power.wakeup_path = false;
+ dev->power.out_band_wakeup = false;
if (dev->power.no_pm_callbacks)
goto unlock;
diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c
new file mode 100644
index 000000000000..1535ad2b0264
--- /dev/null
+++ b/drivers/base/power/runtime-test.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Google, Inc.
+ */
+
+#include <linux/cleanup.h>
+#include <linux/pm_runtime.h>
+#include <kunit/device.h>
+#include <kunit/test.h>
+
+#define DEVICE_NAME "pm_runtime_test_device"
+
+static void pm_runtime_depth_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_get_sync(dev)); /* "already active" */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/* Test pm_runtime_put() and friends when already suspended. */
+static void pm_runtime_already_suspended_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ /* Grab 2 refcounts */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_get_noresume(dev);
+ /* The first put() sees usage_count 1 */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync_autosuspend(dev));
+ /* The second put() sees usage_count 0 but tells us "already suspended". */
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync_autosuspend(dev));
+
+ /* Should have remained suspended the whole time. */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static void pm_runtime_idle_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ pm_runtime_put_noidle(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+}
+
+static void pm_runtime_disabled_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ /* Never called pm_runtime_enable() */
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+
+ /* "disabled" is treated as "active" */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_suspended(dev));
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EACCES, pm_runtime_autosuspend(dev));
+
+ /* Still disabled */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+ KUNIT_EXPECT_FALSE(test, pm_runtime_enabled(dev));
+}
+
+static void pm_runtime_error_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ /* Fake a .runtime_resume() error */
+ dev->power.runtime_error = -EIO;
+
+ /*
+ * Note: these "fail", but they still acquire/release refcounts, so
+ * keep them balanced.
+ */
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_put_sync_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume_and_get(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EINVAL, pm_runtime_autosuspend(dev));
+
+ /* Error is still pending */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+ KUNIT_EXPECT_EQ(test, -EIO, dev->power.runtime_error);
+ /* Clear error */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_suspended(dev));
+ KUNIT_EXPECT_EQ(test, 0, dev->power.runtime_error);
+ /* Still suspended */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev));
+ pm_runtime_barrier(dev);
+ pm_runtime_put(dev);
+ pm_runtime_suspend(dev); /* flush the put(), to suspend */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_get_sync(dev));
+ pm_runtime_put_autosuspend(dev);
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_resume_and_get(dev));
+
+ /*
+ * The following should all return -EAGAIN (usage is non-zero) or 1
+ * (already resumed).
+ */
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_idle(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_idle(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_request_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_request_autosuspend(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_suspend(dev));
+ KUNIT_EXPECT_EQ(test, 1, pm_runtime_resume(dev));
+ KUNIT_EXPECT_EQ(test, -EAGAIN, pm_runtime_autosuspend(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_put_sync(dev));
+
+ /* Suspended again */
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+/*
+ * Explore a typical probe() sequence in which a device marks itself powered,
+ * but doesn't hold any runtime PM reference, so it suspends as soon as it goes
+ * idle.
+ */
+static void pm_runtime_probe_active_test(struct kunit *test)
+{
+ struct device *dev = kunit_device_register(test, DEVICE_NAME);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ KUNIT_EXPECT_TRUE(test, pm_runtime_status_suspended(dev));
+
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_set_active(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ pm_runtime_enable(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Nothing to flush. We stay active. */
+ pm_runtime_barrier(dev);
+ KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
+
+ /* Ask for idle? Now we suspend. */
+ KUNIT_EXPECT_EQ(test, 0, pm_runtime_idle(dev));
+ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
+}
+
+static struct kunit_case pm_runtime_test_cases[] = {
+ KUNIT_CASE(pm_runtime_depth_test),
+ KUNIT_CASE(pm_runtime_already_suspended_test),
+ KUNIT_CASE(pm_runtime_idle_test),
+ KUNIT_CASE(pm_runtime_disabled_test),
+ KUNIT_CASE(pm_runtime_error_test),
+ KUNIT_CASE(pm_runtime_probe_active_test),
+ {}
+};
+
+static struct kunit_suite pm_runtime_test_suite = {
+ .name = "pm_runtime_test_cases",
+ .test_cases = pm_runtime_test_cases,
+};
+
+kunit_test_suite(pm_runtime_test_suite);
+MODULE_DESCRIPTION("Runtime power management unit test suite");
+MODULE_LICENSE("GPL");
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3e84dc4122de..84676cc24221 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -90,7 +90,7 @@ static void update_pm_runtime_accounting(struct device *dev)
/*
* Because ktime_get_mono_fast_ns() is not monotonic during
* timekeeping updates, ensure that 'now' is after the last saved
- * timesptamp.
+ * timestamp.
*/
if (now < last)
return;
@@ -217,7 +217,7 @@ static int dev_memalloc_noio(struct device *dev, void *data)
* resume/suspend callback of any one of its ancestors(or the
* block device itself), the deadlock may be triggered inside the
* memory allocation since it might not complete until the block
- * device becomes active and the involed page I/O finishes. The
+ * device becomes active and the involved page I/O finishes. The
* situation is pointed out first by Alan Stern. Network device
* are involved in iSCSI kind of situation.
*
@@ -498,6 +498,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
if (retval < 0)
; /* Conditions are wrong. */
+ else if ((rpmflags & RPM_GET_PUT) && retval == 1)
+ ; /* put() is allowed in RPM_SUSPENDED */
+
/* Idle notifications are allowed only in the RPM_ACTIVE state. */
else if (dev->power.runtime_status != RPM_ACTIVE)
retval = -EAGAIN;
@@ -796,6 +799,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_status == RPM_ACTIVE &&
dev->power.last_status == RPM_ACTIVE)
retval = 1;
+ else if (rpmflags & RPM_TRANSPARENT)
+ goto out;
else
retval = -EACCES;
}
@@ -1205,7 +1210,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_resume);
*
* Otherwise, if its runtime PM status is %RPM_ACTIVE and (1) @ign_usage_count
* is set, or (2) @dev is not ignoring children and its active child count is
- * nonero, or (3) the runtime PM usage counter of @dev is not zero, increment
+ * nonzero, or (3) the runtime PM usage counter of @dev is not zero, increment
* the usage counter of @dev and return 1.
*
* Otherwise, return 0 without changing the usage counter.
@@ -1462,30 +1467,20 @@ static void __pm_runtime_barrier(struct device *dev)
* Next, make sure that all pending requests for the device have been flushed
* from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
- *
- * Return value:
- * 1, if there was a resume request pending and the device had to be woken up,
- * 0, otherwise
*/
-int pm_runtime_barrier(struct device *dev)
+void pm_runtime_barrier(struct device *dev)
{
- int retval = 0;
-
pm_runtime_get_noresume(dev);
spin_lock_irq(&dev->power.lock);
if (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
+ && dev->power.request == RPM_REQ_RESUME)
rpm_resume(dev, 0);
- retval = 1;
- }
__pm_runtime_barrier(dev);
spin_unlock_irq(&dev->power.lock);
pm_runtime_put_noidle(dev);
-
- return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
@@ -1659,9 +1654,12 @@ EXPORT_SYMBOL_GPL(devm_pm_runtime_get_noresume);
* pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
- * Increase the device's usage count and clear its power.runtime_auto flag,
- * so that it cannot be suspended at run time until pm_runtime_allow() is called
- * for it.
+ * Resume @dev if already suspended and block runtime suspend of @dev in such
+ * a way that it can be unblocked via the /sys/devices/.../power/control
+ * interface, or otherwise by calling pm_runtime_allow().
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_forbid(struct device *dev)
{
@@ -1682,7 +1680,13 @@ EXPORT_SYMBOL_GPL(pm_runtime_forbid);
* pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
- * Decrease the device's usage count and set its power.runtime_auto flag.
+ * Unblock runtime suspend of @dev after it has been blocked by
+ * pm_runtime_forbid() (for instance, if it has been blocked via the
+ * /sys/devices/.../power/control interface), check if @dev can be
+ * suspended and suspend it in that case.
+ *
+ * Calling this function many times in a row has the same effect as calling it
+ * once.
*/
void pm_runtime_allow(struct device *dev)
{
@@ -1903,8 +1907,7 @@ void pm_runtime_get_suppliers(struct device *dev)
idx = device_links_read_lock();
- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
+ dev_for_each_link_to_supplier(link, dev)
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index cd6e559648b2..d8da7195bb00 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -238,10 +238,8 @@ int show_trace_dev_match(char *buf, size_t size)
unsigned int hash = hash_string(DEVSEED, dev_name(dev),
DEVHASH);
if (hash == value) {
- int len = snprintf(buf, size, "%s\n",
+ int len = scnprintf(buf, size, "%s\n",
dev_driver_string(dev));
- if (len > size)
- len = size;
buf += len;
ret += len;
size -= len;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index d1283ff1080b..1e1a0e7eeac5 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -189,17 +189,16 @@ static void wakeup_source_remove(struct wakeup_source *ws)
if (WARN_ON(!ws))
return;
+ /*
+ * After shutting down the timer, wakeup_source_activate() will warn if
+ * the given wakeup source is passed to it.
+ */
+ timer_shutdown_sync(&ws->timer);
+
raw_spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
raw_spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
-
- timer_delete_sync(&ws->timer);
- /*
- * Clear timer.function to make wakeup_source_not_registered() treat
- * this wakeup source as not registered.
- */
- ws->timer.function = NULL;
}
/**
@@ -506,14 +505,14 @@ int device_set_wakeup_enable(struct device *dev, bool enable)
EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
/**
- * wakeup_source_not_registered - validate the given wakeup source.
+ * wakeup_source_not_usable - validate the given wakeup source.
* @ws: Wakeup source to be validated.
*/
-static bool wakeup_source_not_registered(struct wakeup_source *ws)
+static bool wakeup_source_not_usable(struct wakeup_source *ws)
{
/*
- * Use timer struct to check if the given source is initialized
- * by wakeup_source_add.
+ * Use the timer struct to check if the given wakeup source has been
+ * initialized by wakeup_source_add() and it is not going away.
*/
return ws->timer.function != pm_wakeup_timer_fn;
}
@@ -558,8 +557,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
unsigned int cec;
- if (WARN_ONCE(wakeup_source_not_registered(ws),
- "unregistered wakeup source\n"))
+ if (WARN_ONCE(wakeup_source_not_usable(ws), "unusable wakeup source\n"))
return;
ws->active = true;
diff --git a/drivers/base/property.c b/drivers/base/property.c
index f626d5bbe806..6a63860579dd 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -578,7 +578,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_property_string);
* @prop: The name of the property
* @nargs_prop: The name of the property telling the number of
* arguments in the referred node. NULL if @nargs is known,
- * otherwise @nargs is ignored. Only relevant on OF.
+ * otherwise @nargs is ignored.
* @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
* @index: Index of the reference, from zero onwards.
* @args: Result structure with reference and integer arguments.
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 6f31240ee4a9..1477329410ec 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -186,6 +186,7 @@ struct regcache_ops {
enum regcache_type type;
int (*init)(struct regmap *map);
int (*exit)(struct regmap *map);
+ int (*populate)(struct regmap *map);
#ifdef CONFIG_DEBUG_FS
void (*debugfs_init)(struct regmap *map);
#endif
@@ -288,6 +289,7 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
const struct regmap_bus *bus,
const struct regmap_config *config);
+extern struct regcache_ops regcache_flat_sparse_ops;
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_maple_ops;
extern struct regcache_ops regcache_flat_ops;
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
index f36d3618b67c..53cc59c84e2f 100644
--- a/drivers/base/regmap/regcache-flat.c
+++ b/drivers/base/regmap/regcache-flat.c
@@ -6,7 +6,11 @@
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/limits.h>
+#include <linux/overflow.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -18,46 +22,92 @@ static inline unsigned int regcache_flat_get_index(const struct regmap *map,
return regcache_get_index_by_order(map, reg);
}
+struct regcache_flat_data {
+ unsigned long *valid;
+ unsigned int data[];
+};
+
static int regcache_flat_init(struct regmap *map)
{
- int i;
- unsigned int *cache;
+ unsigned int cache_size;
+ struct regcache_flat_data *cache;
if (!map || map->reg_stride_order < 0 || !map->max_register_is_set)
return -EINVAL;
- map->cache = kcalloc(regcache_flat_get_index(map, map->max_register)
- + 1, sizeof(unsigned int), map->alloc_flags);
- if (!map->cache)
+ cache_size = regcache_flat_get_index(map, map->max_register) + 1;
+ cache = kzalloc(struct_size(cache, data, cache_size), map->alloc_flags);
+ if (!cache)
return -ENOMEM;
- cache = map->cache;
+ cache->valid = bitmap_zalloc(cache_size, map->alloc_flags);
+ if (!cache->valid)
+ goto err_free;
+
+ map->cache = cache;
+
+ return 0;
+
+err_free:
+ kfree(cache);
+ return -ENOMEM;
+}
+
+static int regcache_flat_exit(struct regmap *map)
+{
+ struct regcache_flat_data *cache = map->cache;
+
+ if (cache)
+ bitmap_free(cache->valid);
+
+ kfree(cache);
+ map->cache = NULL;
+
+ return 0;
+}
+
+static int regcache_flat_populate(struct regmap *map)
+{
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int i;
for (i = 0; i < map->num_reg_defaults; i++) {
unsigned int reg = map->reg_defaults[i].reg;
unsigned int index = regcache_flat_get_index(map, reg);
- cache[index] = map->reg_defaults[i].def;
+ cache->data[index] = map->reg_defaults[i].def;
+ __set_bit(index, cache->valid);
}
return 0;
}
-static int regcache_flat_exit(struct regmap *map)
+static int regcache_flat_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
{
- kfree(map->cache);
- map->cache = NULL;
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int index = regcache_flat_get_index(map, reg);
+
+ /* legacy behavior: ignore validity, but warn the user */
+ if (unlikely(!test_bit(index, cache->valid)))
+ dev_warn_once(map->dev,
+ "using zero-initialized flat cache, this may cause unexpected behavior");
+
+ *value = cache->data[index];
return 0;
}
-static int regcache_flat_read(struct regmap *map,
- unsigned int reg, unsigned int *value)
+static int regcache_flat_sparse_read(struct regmap *map,
+ unsigned int reg, unsigned int *value)
{
- unsigned int *cache = map->cache;
+ struct regcache_flat_data *cache = map->cache;
unsigned int index = regcache_flat_get_index(map, reg);
- *value = cache[index];
+ if (unlikely(!test_bit(index, cache->valid)))
+ return -ENOENT;
+
+ *value = cache->data[index];
return 0;
}
@@ -65,10 +115,23 @@ static int regcache_flat_read(struct regmap *map,
static int regcache_flat_write(struct regmap *map, unsigned int reg,
unsigned int value)
{
- unsigned int *cache = map->cache;
+ struct regcache_flat_data *cache = map->cache;
unsigned int index = regcache_flat_get_index(map, reg);
- cache[index] = value;
+ cache->data[index] = value;
+ __set_bit(index, cache->valid);
+
+ return 0;
+}
+
+static int regcache_flat_drop(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct regcache_flat_data *cache = map->cache;
+ unsigned int bitmap_min = regcache_flat_get_index(map, min);
+ unsigned int bitmap_max = regcache_flat_get_index(map, max);
+
+ bitmap_clear(cache->valid, bitmap_min, bitmap_max + 1 - bitmap_min);
return 0;
}
@@ -78,6 +141,18 @@ struct regcache_ops regcache_flat_ops = {
.name = "flat",
.init = regcache_flat_init,
.exit = regcache_flat_exit,
+ .populate = regcache_flat_populate,
.read = regcache_flat_read,
.write = regcache_flat_write,
};
+
+struct regcache_ops regcache_flat_sparse_ops = {
+ .type = REGCACHE_FLAT_S,
+ .name = "flat-sparse",
+ .init = regcache_flat_init,
+ .exit = regcache_flat_exit,
+ .populate = regcache_flat_populate,
+ .read = regcache_flat_sparse_read,
+ .write = regcache_flat_write,
+ .drop = regcache_flat_drop,
+};
diff --git a/drivers/base/regmap/regcache-maple.c b/drivers/base/regmap/regcache-maple.c
index 2319c30283a6..ca1c72b68f31 100644
--- a/drivers/base/regmap/regcache-maple.c
+++ b/drivers/base/regmap/regcache-maple.c
@@ -289,6 +289,23 @@ out:
return ret;
}
+static int regcache_maple_init(struct regmap *map)
+{
+ struct maple_tree *mt;
+
+ mt = kmalloc(sizeof(*mt), map->alloc_flags);
+ if (!mt)
+ return -ENOMEM;
+ map->cache = mt;
+
+ mt_init(mt);
+
+ if (!mt_external_lock(mt) && map->lock_key)
+ lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
+
+ return 0;
+}
+
static int regcache_maple_exit(struct regmap *map)
{
struct maple_tree *mt = map->cache;
@@ -340,26 +357,12 @@ static int regcache_maple_insert_block(struct regmap *map, int first,
return ret;
}
-static int regcache_maple_init(struct regmap *map)
+static int regcache_maple_populate(struct regmap *map)
{
- struct maple_tree *mt;
int i;
int ret;
int range_start;
- mt = kmalloc(sizeof(*mt), map->alloc_flags);
- if (!mt)
- return -ENOMEM;
- map->cache = mt;
-
- mt_init(mt);
-
- if (!mt_external_lock(mt) && map->lock_key)
- lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
-
- if (!map->num_reg_defaults)
- return 0;
-
range_start = 0;
/* Scan for ranges of contiguous registers */
@@ -369,23 +372,14 @@ static int regcache_maple_init(struct regmap *map)
ret = regcache_maple_insert_block(map, range_start,
i - 1);
if (ret != 0)
- goto err;
+ return ret;
range_start = i;
}
}
/* Add the last block */
- ret = regcache_maple_insert_block(map, range_start,
- map->num_reg_defaults - 1);
- if (ret != 0)
- goto err;
-
- return 0;
-
-err:
- regcache_maple_exit(map);
- return ret;
+ return regcache_maple_insert_block(map, range_start, map->num_reg_defaults - 1);
}
struct regcache_ops regcache_maple_ops = {
@@ -393,6 +387,7 @@ struct regcache_ops regcache_maple_ops = {
.name = "maple",
.init = regcache_maple_init,
.exit = regcache_maple_exit,
+ .populate = regcache_maple_populate,
.read = regcache_maple_read,
.write = regcache_maple_write,
.drop = regcache_maple_drop,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index a9d17f316e55..3344b82c3799 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -184,8 +184,6 @@ static void rbtree_debugfs_init(struct regmap *map)
static int regcache_rbtree_init(struct regmap *map)
{
struct regcache_rbtree_ctx *rbtree_ctx;
- int i;
- int ret;
map->cache = kmalloc(sizeof *rbtree_ctx, map->alloc_flags);
if (!map->cache)
@@ -195,19 +193,7 @@ static int regcache_rbtree_init(struct regmap *map)
rbtree_ctx->root = RB_ROOT;
rbtree_ctx->cached_rbnode = NULL;
- for (i = 0; i < map->num_reg_defaults; i++) {
- ret = regcache_rbtree_write(map,
- map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- if (ret)
- goto err;
- }
-
return 0;
-
-err:
- regcache_rbtree_exit(map);
- return ret;
}
static int regcache_rbtree_exit(struct regmap *map)
@@ -239,6 +225,22 @@ static int regcache_rbtree_exit(struct regmap *map)
return 0;
}
+static int regcache_rbtree_populate(struct regmap *map)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < map->num_reg_defaults; i++) {
+ ret = regcache_rbtree_write(map,
+ map->reg_defaults[i].reg,
+ map->reg_defaults[i].def);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int regcache_rbtree_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
@@ -546,6 +548,7 @@ struct regcache_ops regcache_rbtree_ops = {
.name = "rbtree",
.init = regcache_rbtree_init,
.exit = regcache_rbtree_exit,
+ .populate = regcache_rbtree_populate,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = rbtree_debugfs_init,
#endif
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index c7650fa434ad..319c342bf5a0 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -16,6 +16,7 @@
#include "internal.h"
static const struct regcache_ops *cache_types[] = {
+ &regcache_flat_sparse_ops,
&regcache_rbtree_ops,
&regcache_maple_ops,
&regcache_flat_ops,
@@ -221,8 +222,24 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
if (ret)
goto err_free;
}
+
+ if (map->num_reg_defaults && map->cache_ops->populate) {
+ dev_dbg(map->dev, "Populating %s cache\n", map->cache_ops->name);
+ map->lock(map->lock_arg);
+ ret = map->cache_ops->populate(map);
+ map->unlock(map->lock_arg);
+ if (ret)
+ goto err_exit;
+ }
return 0;
+err_exit:
+ if (map->cache_ops->exit) {
+ dev_dbg(map->dev, "Destroying %s cache\n", map->cache_ops->name);
+ map->lock(map->lock_arg);
+ ret = map->cache_ops->exit(map);
+ map->unlock(map->lock_arg);
+ }
err_free:
kfree(map->reg_defaults);
if (map->cache_free)
diff --git a/drivers/base/regmap/regmap-i3c.c b/drivers/base/regmap/regmap-i3c.c
index b5300b7c477e..863b348704dc 100644
--- a/drivers/base/regmap/regmap-i3c.c
+++ b/drivers/base/regmap/regmap-i3c.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+#include <linux/array_size.h>
#include <linux/regmap.h>
#include <linux/i3c/device.h>
#include <linux/i3c/master.h>
@@ -10,7 +11,7 @@ static int regmap_i3c_write(void *context, const void *data, size_t count)
{
struct device *dev = context;
struct i3c_device *i3c = dev_to_i3cdev(dev);
- struct i3c_priv_xfer xfers[] = {
+ struct i3c_xfer xfers[] = {
{
.rnw = false,
.len = count,
@@ -18,7 +19,7 @@ static int regmap_i3c_write(void *context, const void *data, size_t count)
},
};
- return i3c_device_do_priv_xfers(i3c, xfers, 1);
+ return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR);
}
static int regmap_i3c_read(void *context,
@@ -27,7 +28,7 @@ static int regmap_i3c_read(void *context,
{
struct device *dev = context;
struct i3c_device *i3c = dev_to_i3cdev(dev);
- struct i3c_priv_xfer xfers[2];
+ struct i3c_xfer xfers[2];
xfers[0].rnw = false;
xfers[0].len = reg_size;
@@ -37,7 +38,7 @@ static int regmap_i3c_read(void *context,
xfers[1].len = val_size;
xfers[1].data.in = val;
- return i3c_device_do_priv_xfers(i3c, xfers, 2);
+ return i3c_device_do_xfers(i3c, xfers, ARRAY_SIZE(xfers), I3C_SDR);
}
static const struct regmap_bus regmap_i3c = {
diff --git a/drivers/base/regmap/regmap-kunit.c b/drivers/base/regmap/regmap-kunit.c
index 95c5bf2a78ee..f6fc5ed016da 100644
--- a/drivers/base/regmap/regmap-kunit.c
+++ b/drivers/base/regmap/regmap-kunit.c
@@ -54,6 +54,8 @@ static const char *regcache_type_name(enum regcache_type type)
return "none";
case REGCACHE_FLAT:
return "flat";
+ case REGCACHE_FLAT_S:
+ return "flat-sparse";
case REGCACHE_RBTREE:
return "rbtree";
case REGCACHE_MAPLE:
@@ -93,6 +95,8 @@ static const struct regmap_test_param regcache_types_list[] = {
{ .cache = REGCACHE_NONE, .fast_io = true },
{ .cache = REGCACHE_FLAT },
{ .cache = REGCACHE_FLAT, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S },
+ { .cache = REGCACHE_FLAT_S, .fast_io = true },
{ .cache = REGCACHE_RBTREE },
{ .cache = REGCACHE_RBTREE, .fast_io = true },
{ .cache = REGCACHE_MAPLE },
@@ -104,6 +108,8 @@ KUNIT_ARRAY_PARAM(regcache_types, regcache_types_list, param_to_desc);
static const struct regmap_test_param real_cache_types_only_list[] = {
{ .cache = REGCACHE_FLAT },
{ .cache = REGCACHE_FLAT, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S },
+ { .cache = REGCACHE_FLAT_S, .fast_io = true },
{ .cache = REGCACHE_RBTREE },
{ .cache = REGCACHE_RBTREE, .fast_io = true },
{ .cache = REGCACHE_MAPLE },
@@ -119,6 +125,12 @@ static const struct regmap_test_param real_cache_types_list[] = {
{ .cache = REGCACHE_FLAT, .from_reg = 0x2002 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2003 },
{ .cache = REGCACHE_FLAT, .from_reg = 0x2004 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
@@ -136,6 +148,12 @@ static const struct regmap_test_param real_cache_types_list[] = {
KUNIT_ARRAY_PARAM(real_cache_types, real_cache_types_list, param_to_desc);
static const struct regmap_test_param sparse_cache_types_list[] = {
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0, .fast_io = true },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2001 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2002 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2003 },
+ { .cache = REGCACHE_FLAT_S, .from_reg = 0x2004 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0 },
{ .cache = REGCACHE_RBTREE, .from_reg = 0, .fast_io = true },
{ .cache = REGCACHE_RBTREE, .from_reg = 0x2001 },
@@ -1597,6 +1615,8 @@ static const struct regmap_test_param raw_types_list[] = {
{ .cache = REGCACHE_NONE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
@@ -1608,6 +1628,8 @@ KUNIT_ARRAY_PARAM(raw_test_types, raw_types_list, param_to_desc);
static const struct regmap_test_param raw_cache_types_list[] = {
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_FLAT, .val_endian = REGMAP_ENDIAN_BIG },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_LITTLE },
+ { .cache = REGCACHE_FLAT_S, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_LITTLE },
{ .cache = REGCACHE_RBTREE, .val_endian = REGMAP_ENDIAN_BIG },
{ .cache = REGCACHE_MAPLE, .val_endian = REGMAP_ENDIAN_LITTLE },
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 99d7fd85ca7d..29e5f3175301 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -609,4 +609,5 @@ void regmap_mmio_detach_clk(struct regmap *map)
}
EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
+MODULE_DESCRIPTION("regmap MMIO Module");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-sdw-mbq.c b/drivers/base/regmap/regmap-sdw-mbq.c
index 86644bbd0710..6a61629f5f89 100644
--- a/drivers/base/regmap/regmap-sdw-mbq.c
+++ b/drivers/base/regmap/regmap-sdw-mbq.c
@@ -15,11 +15,13 @@
struct regmap_mbq_context {
struct device *dev;
+ struct sdw_slave *sdw;
+
+ bool (*readable_reg)(struct device *dev, unsigned int reg);
struct regmap_sdw_mbq_cfg cfg;
int val_size;
- bool (*readable_reg)(struct device *dev, unsigned int reg);
};
static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
@@ -46,7 +48,7 @@ static bool regmap_sdw_mbq_deferrable(struct regmap_mbq_context *ctx, unsigned i
static int regmap_sdw_mbq_poll_busy(struct sdw_slave *slave, unsigned int reg,
struct regmap_mbq_context *ctx)
{
- struct device *dev = &slave->dev;
+ struct device *dev = ctx->dev;
int val, ret = 0;
dev_dbg(dev, "Deferring transaction for 0x%x\n", reg);
@@ -96,8 +98,7 @@ static int regmap_sdw_mbq_write_impl(struct sdw_slave *slave,
static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int val)
{
struct regmap_mbq_context *ctx = context;
- struct device *dev = ctx->dev;
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_slave *slave = ctx->sdw;
bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
@@ -156,8 +157,7 @@ static int regmap_sdw_mbq_read_impl(struct sdw_slave *slave,
static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *val)
{
struct regmap_mbq_context *ctx = context;
- struct device *dev = ctx->dev;
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ struct sdw_slave *slave = ctx->sdw;
bool deferrable = regmap_sdw_mbq_deferrable(ctx, reg);
int mbq_size = regmap_sdw_mbq_size(ctx, reg);
int ret;
@@ -208,6 +208,7 @@ static int regmap_sdw_mbq_config_check(const struct regmap_config *config)
static struct regmap_mbq_context *
regmap_sdw_mbq_gen_context(struct device *dev,
+ struct sdw_slave *sdw,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config)
{
@@ -218,6 +219,7 @@ regmap_sdw_mbq_gen_context(struct device *dev,
return ERR_PTR(-ENOMEM);
ctx->dev = dev;
+ ctx->sdw = sdw;
if (mbq_config)
ctx->cfg = *mbq_config;
@@ -228,7 +230,7 @@ regmap_sdw_mbq_gen_context(struct device *dev,
return ctx;
}
-struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
@@ -241,16 +243,16 @@ struct regmap *__regmap_init_sdw_mbq(struct sdw_slave *sdw,
if (ret)
return ERR_PTR(ret);
- ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
- return __regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ return __regmap_init(dev, &regmap_sdw_mbq, ctx,
config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_sdw_mbq);
-struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
+struct regmap *__devm_regmap_init_sdw_mbq(struct device *dev, struct sdw_slave *sdw,
const struct regmap_config *config,
const struct regmap_sdw_mbq_cfg *mbq_config,
struct lock_class_key *lock_key,
@@ -263,11 +265,11 @@ struct regmap *__devm_regmap_init_sdw_mbq(struct sdw_slave *sdw,
if (ret)
return ERR_PTR(ret);
- ctx = regmap_sdw_mbq_gen_context(&sdw->dev, config, mbq_config);
+ ctx = regmap_sdw_mbq_gen_context(dev, sdw, config, mbq_config);
if (IS_ERR(ctx))
return ERR_CAST(ctx);
- return __devm_regmap_init(&sdw->dev, &regmap_sdw_mbq, ctx,
+ return __devm_regmap_init(dev, &regmap_sdw_mbq, ctx,
config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_sdw_mbq);
diff --git a/drivers/base/regmap/regmap-slimbus.c b/drivers/base/regmap/regmap-slimbus.c
index 54eb7d227cf4..e523fae73004 100644
--- a/drivers/base/regmap/regmap-slimbus.c
+++ b/drivers/base/regmap/regmap-slimbus.c
@@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
- lock_key, lock_name);
+ return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
@@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
- return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
- lock_key, lock_name);
+ return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 1f3f782a04ba..ce9be3989a21 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -827,7 +827,7 @@ struct regmap *__regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
- if (config && config->read && config->write) {
+ if (config->read && config->write) {
map->reg_read = _regmap_bus_read;
if (config->reg_update_bits)
map->reg_update_bits = config->reg_update_bits;
@@ -2258,12 +2258,14 @@ EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
* @field: Register field to operate on
* @bits: Bits to test
*
- * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns negative errno if the underlying regmap_field_read() fails,
+ * 0 if at least one of the tested bits is not set and 1 if all tested
+ * bits are set.
*/
int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_field_read(field, &val);
if (ret)
@@ -3309,7 +3311,8 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{
- unsigned int val, ret;
+ unsigned int val;
+ int ret;
ret = regmap_read(map, reg, &val);
if (ret)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index deda7f35a059..16a8301c25d6 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -535,14 +535,29 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
ref_array = prop->pointer;
ref = &ref_array[index];
- refnode = software_node_fwnode(ref->node);
+ /*
+ * A software node can reference other software nodes or firmware
+ * nodes (which are the abstraction layer sitting on top of them).
+ * This is done to ensure we can create references to static software
+ * nodes before they're registered with the firmware node framework.
+ * At the time the reference is being resolved, we expect the swnodes
+ * in question to already have been registered and to be backed by
+ * a firmware node. This is why we use the fwnode API below to read the
+ * relevant properties and bump the reference count.
+ */
+
+ if (ref->swnode)
+ refnode = software_node_fwnode(ref->swnode);
+ else if (ref->fwnode)
+ refnode = ref->fwnode;
+ else
+ return -EINVAL;
+
if (!refnode)
return -ENOENT;
if (nargs_prop) {
- error = property_entry_read_int_array(ref->node->properties,
- nargs_prop, sizeof(u32),
- &nargs_prop_val, 1);
+ error = fwnode_property_read_u32(refnode, nargs_prop, &nargs_prop_val);
if (error)
return error;
@@ -555,7 +570,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
if (!args)
return 0;
- args->fwnode = software_node_get(refnode);
+ args->fwnode = fwnode_handle_get(refnode);
args->nargs = nargs;
for (i = 0; i < nargs; i++)
@@ -635,7 +650,10 @@ software_node_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
ref = prop->pointer;
- return software_node_get(software_node_fwnode(ref[0].node));
+ if (!ref->swnode)
+ return NULL;
+
+ return software_node_get(software_node_fwnode(ref->swnode));
}
static struct fwnode_handle *
@@ -844,7 +862,7 @@ swnode_register(const struct software_node *node, struct swnode *parent,
* of this function or by ordering the array such that parent comes before
* child.
*/
-int software_node_register_node_group(const struct software_node **node_group)
+int software_node_register_node_group(const struct software_node * const *node_group)
{
unsigned int i;
int ret;
@@ -877,8 +895,7 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* remove the nodes individually, in the correct order (child before
* parent).
*/
-void software_node_unregister_node_group(
- const struct software_node **node_group)
+void software_node_unregister_node_group(const struct software_node * const *node_group)
{
unsigned int i = 0;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 13db1f78d2ce..483adb796654 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -11,32 +11,32 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
-static LIST_HEAD(syscore_ops_list);
-static DEFINE_MUTEX(syscore_ops_lock);
+static LIST_HEAD(syscore_list);
+static DEFINE_MUTEX(syscore_lock);
/**
- * register_syscore_ops - Register a set of system core operations.
- * @ops: System core operations to register.
+ * register_syscore - Register a set of system core operations.
+ * @syscore: System core operations to register.
*/
-void register_syscore_ops(struct syscore_ops *ops)
+void register_syscore(struct syscore *syscore)
{
- mutex_lock(&syscore_ops_lock);
- list_add_tail(&ops->node, &syscore_ops_list);
- mutex_unlock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
+ list_add_tail(&syscore->node, &syscore_list);
+ mutex_unlock(&syscore_lock);
}
-EXPORT_SYMBOL_GPL(register_syscore_ops);
+EXPORT_SYMBOL_GPL(register_syscore);
/**
- * unregister_syscore_ops - Unregister a set of system core operations.
- * @ops: System core operations to unregister.
+ * unregister_syscore - Unregister a set of system core operations.
+ * @syscore: System core operations to unregister.
*/
-void unregister_syscore_ops(struct syscore_ops *ops)
+void unregister_syscore(struct syscore *syscore)
{
- mutex_lock(&syscore_ops_lock);
- list_del(&ops->node);
- mutex_unlock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
+ list_del(&syscore->node);
+ mutex_unlock(&syscore_lock);
}
-EXPORT_SYMBOL_GPL(unregister_syscore_ops);
+EXPORT_SYMBOL_GPL(unregister_syscore);
#ifdef CONFIG_PM_SLEEP
/**
@@ -46,7 +46,7 @@ EXPORT_SYMBOL_GPL(unregister_syscore_ops);
*/
int syscore_suspend(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
int ret = 0;
trace_suspend_resume(TPS("syscore_suspend"), 0, true);
@@ -59,25 +59,27 @@ int syscore_suspend(void)
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->suspend) {
- pm_pr_dbg("Calling %pS\n", ops->suspend);
- ret = ops->suspend();
+ list_for_each_entry_reverse(syscore, &syscore_list, node)
+ if (syscore->ops->suspend) {
+ pm_pr_dbg("Calling %pS\n", syscore->ops->suspend);
+ ret = syscore->ops->suspend(syscore->data);
if (ret)
goto err_out;
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pS\n", ops->suspend);
+ "Interrupts enabled after %pS\n",
+ syscore->ops->suspend);
}
trace_suspend_resume(TPS("syscore_suspend"), 0, false);
return 0;
err_out:
- pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
+ pr_err("PM: System core suspend callback %pS failed.\n",
+ syscore->ops->suspend);
- list_for_each_entry_continue(ops, &syscore_ops_list, node)
- if (ops->resume)
- ops->resume();
+ list_for_each_entry_continue(syscore, &syscore_list, node)
+ if (syscore->ops->resume)
+ syscore->ops->resume(syscore->data);
return ret;
}
@@ -90,18 +92,19 @@ EXPORT_SYMBOL_GPL(syscore_suspend);
*/
void syscore_resume(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
trace_suspend_resume(TPS("syscore_resume"), 0, true);
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core resume.\n");
- list_for_each_entry(ops, &syscore_ops_list, node)
- if (ops->resume) {
- pm_pr_dbg("Calling %pS\n", ops->resume);
- ops->resume();
+ list_for_each_entry(syscore, &syscore_list, node)
+ if (syscore->ops->resume) {
+ pm_pr_dbg("Calling %pS\n", syscore->ops->resume);
+ syscore->ops->resume(syscore->data);
WARN_ONCE(!irqs_disabled(),
- "Interrupts enabled after %pS\n", ops->resume);
+ "Interrupts enabled after %pS\n",
+ syscore->ops->resume);
}
trace_suspend_resume(TPS("syscore_resume"), 0, false);
}
@@ -113,16 +116,17 @@ EXPORT_SYMBOL_GPL(syscore_resume);
*/
void syscore_shutdown(void)
{
- struct syscore_ops *ops;
+ struct syscore *syscore;
- mutex_lock(&syscore_ops_lock);
+ mutex_lock(&syscore_lock);
- list_for_each_entry_reverse(ops, &syscore_ops_list, node)
- if (ops->shutdown) {
+ list_for_each_entry_reverse(syscore, &syscore_list, node)
+ if (syscore->ops->shutdown) {
if (initcall_debug)
- pr_info("PM: Calling %pS\n", ops->shutdown);
- ops->shutdown();
+ pr_info("PM: Calling %pS\n",
+ syscore->ops->shutdown);
+ syscore->ops->shutdown(syscore->data);
}
- mutex_unlock(&syscore_ops_lock);
+ mutex_unlock(&syscore_lock);
}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 6ecfc821cf83..72f045e6ed51 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
int err;
list_for_each_entry(core, &bus->cores, list) {
+ struct device_node *np;
+
/* We support that core ourselves */
switch (core->id.id) {
case BCMA_CORE_4706_CHIPCOMMON:
@@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
if (bcma_is_core_needed_early(core->id.id))
continue;
+ np = core->dev.of_node;
+ if (np && !of_device_is_available(np))
+ continue;
+
/* Only first GMAC core on BCM4706 is connected and working */
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
core->core_unit > 0)
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index df38fb364904..77d694448990 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ menuconfig BLK_DEV
if BLK_DEV
source "drivers/block/null_blk/Kconfig"
+source "drivers/block/rnull/Kconfig"
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -311,15 +312,6 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
-config BLK_DEV_RUST_NULL
- tristate "Rust null block driver (Experimental)"
- depends on RUST
- help
- This is the Rust implementation of the null block driver. For now it
- is only a minimal stub.
-
- If unsure, say N.
-
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index a695ce74ef22..2d8096eb8cdf 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -9,9 +9,6 @@
# needed for trace events
ccflags-y += -I$(src)
-obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
-rnull_mod-y := rnull.o
-
obj-$(CONFIG_MAC_FLOPPY) += swim3.o
obj-$(CONFIG_BLK_DEV_SWIM) += swim_mod.o
obj-$(CONFIG_BLK_DEV_FD) += floppy.o
@@ -38,6 +35,7 @@ obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk/
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull/
obj-$(CONFIG_BLK_DEV_UBLK) += ublk_drv.o
obj-$(CONFIG_BLK_DEV_ZONED_LOOP) += zloop.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 6357d86eafdc..2932b6653b6f 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1523,13 +1523,13 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = MINOR(bdev->bd_dev) & 3;
+ struct amiga_floppy_struct *p = disk->private_data;
- geo->heads = unit[drive].type->heads;
- geo->sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
- geo->cylinders = unit[drive].type->tracks;
+ geo->heads = p->type->heads;
+ geo->sectors = p->dtype->sects * p->type->sect_mult;
+ geo->cylinders = p->type->tracks;
return 0;
}
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 00b74a845328..34ead75e7e02 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -269,9 +269,9 @@ static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
}
static int
-aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+aoeblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct aoedev *d = bdev->bd_disk->private_data;
+ struct aoedev *d = disk->private_data;
if ((d->flags & DEVFL_UP) == 0) {
printk(KERN_ERR "aoe: disk not up\n");
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 6298f8e271e3..a9affb7c264d 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -1761,6 +1761,6 @@ aoecmd_exit(void)
kfree(kts);
kfree(ktiowq);
- free_page((unsigned long) page_address(empty_page));
+ __free_page(empty_page);
empty_page = NULL;
}
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
index cdf6e4041bb9..3b21750038ee 100644
--- a/drivers/block/aoe/aoemain.c
+++ b/drivers/block/aoe/aoemain.c
@@ -44,7 +44,7 @@ aoe_init(void)
{
int ret;
- aoe_wq = alloc_workqueue("aoe_wq", 0, 0);
+ aoe_wq = alloc_workqueue("aoe_wq", WQ_PERCPU, 0);
if (!aoe_wq)
return -ENOMEM;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c2eabe14af3..9778259b30d4 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -44,45 +44,74 @@ struct brd_device {
};
/*
- * Look up and return a brd's page for a given sector.
+ * Look up and return a brd's page with reference grabbed for a given sector.
*/
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
{
- return xa_load(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+ struct page *page;
+ XA_STATE(xas, &brd->brd_pages, sector >> PAGE_SECTORS_SHIFT);
+
+ rcu_read_lock();
+repeat:
+ page = xas_load(&xas);
+ if (xas_retry(&xas, page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (!page)
+ goto out;
+
+ if (!get_page_unless_zero(page)) {
+ xas_reset(&xas);
+ goto repeat;
+ }
+
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
+ xas_reset(&xas);
+ goto repeat;
+ }
+out:
+ rcu_read_unlock();
+
+ return page;
}
/*
* Insert a new page for a given sector, if one does not already exist.
+ * The returned page will grab reference.
*/
static struct page *brd_insert_page(struct brd_device *brd, sector_t sector,
blk_opf_t opf)
- __releases(rcu)
- __acquires(rcu)
{
gfp_t gfp = (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO;
struct page *page, *ret;
- rcu_read_unlock();
page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
- if (!page) {
- rcu_read_lock();
+ if (!page)
return ERR_PTR(-ENOMEM);
- }
xa_lock(&brd->brd_pages);
ret = __xa_cmpxchg(&brd->brd_pages, sector >> PAGE_SECTORS_SHIFT, NULL,
page, gfp);
- rcu_read_lock();
- if (ret) {
+ if (!ret) {
+ brd->brd_nr_pages++;
+ get_page(page);
+ xa_unlock(&brd->brd_pages);
+ return page;
+ }
+
+ if (!xa_is_err(ret)) {
+ get_page(ret);
xa_unlock(&brd->brd_pages);
- __free_page(page);
- if (xa_is_err(ret))
- return ERR_PTR(xa_err(ret));
+ put_page(page);
return ret;
}
- brd->brd_nr_pages++;
+
xa_unlock(&brd->brd_pages);
- return page;
+ put_page(page);
+ return ERR_PTR(xa_err(ret));
}
/*
@@ -95,7 +124,7 @@ static void brd_free_pages(struct brd_device *brd)
pgoff_t idx;
xa_for_each(&brd->brd_pages, idx, page) {
- __free_page(page);
+ put_page(page);
cond_resched();
}
@@ -117,7 +146,6 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
- rcu_read_lock();
page = brd_lookup_page(brd, sector);
if (!page && op_is_write(opf)) {
page = brd_insert_page(brd, sector, opf);
@@ -135,13 +163,13 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
memset(kaddr, 0, bv.bv_len);
}
kunmap_local(kaddr);
- rcu_read_unlock();
bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ if (page)
+ put_page(page);
return true;
out_error:
- rcu_read_unlock();
if (PTR_ERR(page) == -ENOMEM && (opf & REQ_NOWAIT))
bio_wouldblock_error(bio);
else
@@ -149,13 +177,6 @@ out_error:
return false;
}
-static void brd_free_one_page(struct rcu_head *head)
-{
- struct page *page = container_of(head, struct page, rcu_head);
-
- __free_page(page);
-}
-
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
{
sector_t aligned_sector = round_up(sector, PAGE_SECTORS);
@@ -170,7 +191,7 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
while (aligned_sector < aligned_end && aligned_sector < rd_size * 2) {
page = __xa_erase(&brd->brd_pages, aligned_sector >> PAGE_SECTORS_SHIFT);
if (page) {
- call_rcu(&page->rcu_head, brd_free_one_page);
+ put_page(page);
brd->brd_nr_pages--;
}
aligned_sector += PAGE_SECTORS;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 85ca000a0564..d90fa3e7f4cf 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1210,7 +1210,7 @@ static int bm_rw(struct drbd_device *device, const unsigned int flags, unsigned
return err;
}
-/**
+/*
* drbd_bm_read() - Read the whole bitmap from its on disk location.
* @device: DRBD device.
*/
@@ -1221,7 +1221,7 @@ int drbd_bm_read(struct drbd_device *device,
return bm_rw(device, BM_AIO_READ, 0);
}
-/**
+/*
* drbd_bm_write() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1233,7 +1233,7 @@ int drbd_bm_write(struct drbd_device *device,
return bm_rw(device, 0, 0);
}
-/**
+/*
* drbd_bm_write_all() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1255,7 +1255,7 @@ int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_ho
return bm_rw(device, BM_AIO_COPY_PAGES, upper_idx);
}
-/**
+/*
* drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
* @device: DRBD device.
*
@@ -1272,7 +1272,7 @@ int drbd_bm_write_copy_pages(struct drbd_device *device,
return bm_rw(device, BM_AIO_COPY_PAGES, 0);
}
-/**
+/*
* drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
* @device: DRBD device.
*/
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e09930c2b226..91f3b8afb63c 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -1330,6 +1330,7 @@ void drbd_reconsider_queue_parameters(struct drbd_device *device,
lim.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
else
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
if ((lim.discard_granularity >> SECTOR_SHIFT) >
lim.max_hw_discard_sectors) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index caaf2781136d..3de919b6f0e1 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -450,7 +450,7 @@ static struct socket *drbd_try_connect(struct drbd_connection *connection)
* a free one dynamically.
*/
what = "bind before connect";
- err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
+ err = sock->ops->bind(sock, (struct sockaddr_unsized *) &src_in6, my_addr_len);
if (err < 0)
goto out;
@@ -458,7 +458,7 @@ static struct socket *drbd_try_connect(struct drbd_connection *connection)
* stay C_WF_CONNECTION, don't go Disconnecting! */
disconnect_on_error = 0;
what = "connect";
- err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
+ err = sock->ops->connect(sock, (struct sockaddr_unsized *) &peer_in6, peer_addr_len, 0);
out:
if (err < 0) {
@@ -537,7 +537,7 @@ static int prepare_listen_socket(struct drbd_connection *connection, struct acce
drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
what = "bind before listen";
- err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
+ err = s_listen->ops->bind(s_listen, (struct sockaddr_unsized *)&my_addr, my_addr_len);
if (err < 0)
goto out;
@@ -1736,13 +1736,13 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
page = peer_req->pages;
page_chain_for_each(page) {
unsigned len = min_t(int, ds, PAGE_SIZE);
- data = kmap(page);
+ data = kmap_local_page(page);
err = drbd_recv_all_warn(peer_device->connection, data, len);
if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
drbd_err(device, "Fault injection: Corrupting data on receive\n");
data[0] = data[0] ^ (unsigned long)-1;
}
- kunmap(page);
+ kunmap_local(data);
if (err) {
drbd_free_peer_req(device, peer_req);
return NULL;
@@ -1777,7 +1777,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
page = drbd_alloc_pages(peer_device, 1, 1);
- data = kmap(page);
+ data = kmap_local_page(page);
while (data_size) {
unsigned int len = min_t(int, data_size, PAGE_SIZE);
@@ -1786,7 +1786,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
break;
data_size -= len;
}
- kunmap(page);
+ kunmap_local(data);
drbd_free_pages(peer_device->device, page);
return err;
}
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 24be0c2c4075..c28786e0fe1c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -163,35 +163,35 @@
/* do print messages for unexpected interrupts */
static int print_unex = 1;
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/workqueue.h>
-#include <linux/fdreg.h>
-#include <linux/fd.h>
-#include <linux/hdreg.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
+#include <linux/async.h>
#include <linux/bio.h>
-#include <linux/string.h>
-#include <linux/jiffies.h>
-#include <linux/fcntl.h>
+#include <linux/compat.h>
#include <linux/delay.h>
-#include <linux/mc146818rtc.h> /* CMOS defines */
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/fcntl.h>
+#include <linux/fd.h>
+#include <linux/fdreg.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
#include <linux/major.h>
-#include <linux/platform_device.h>
+#include <linux/mc146818rtc.h> /* CMOS defines */
+#include <linux/mm.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/timer.h>
#include <linux/uaccess.h>
-#include <linux/async.h>
-#include <linux/compat.h>
+#include <linux/workqueue.h>
/*
* PS/2 floppies have much slower step rates than regular floppies.
@@ -233,8 +233,6 @@ static unsigned short virtual_dma_port = 0x3f0;
irqreturn_t floppy_interrupt(int irq, void *dev_id);
static int set_dor(int fdc, char mask, char data);
-#define K_64 0x10000 /* 64KB */
-
/* the following is the mask of allowed drives. By default units 2 and
* 3 of both floppy controllers are disabled, because switching on the
* motor of these drives causes system hangs on some PCI computers. drive
@@ -331,7 +329,7 @@ static bool initialized;
* This default is used whenever the current disk size is unknown.
* [Now it is rather a minimum]
*/
-#define MAX_DISK_SIZE 4 /* 3984 */
+#define MAX_DISK_SIZE (PAGE_SIZE / 1024)
/*
* globals used by 'result()'
@@ -3092,16 +3090,13 @@ static int raw_cmd_copyin(int cmd, void __user *param,
*rcmd = NULL;
loop:
- ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
+ ptr = memdup_user(param, sizeof(*ptr));
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
*rcmd = ptr;
- ret = copy_from_user(ptr, param, sizeof(*ptr));
ptr->next = NULL;
ptr->buffer_length = 0;
ptr->kernel_data = NULL;
- if (ret)
- return -EFAULT;
param += sizeof(struct floppy_raw_cmd);
if (ptr->cmd_count > FD_RAW_CMD_FULLSIZE)
return -EINVAL;
@@ -3363,9 +3358,9 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
return 0;
}
-static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int fd_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- int drive = (long)bdev->bd_disk->private_data;
+ int drive = (long)disk->private_data;
int type = ITYPE(drive_state[drive].fd_device);
struct floppy_struct *g;
int ret;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 053a086d547e..272bc608e528 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -348,11 +348,10 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
struct file *file = lo->lo_backing_file;
struct bio_vec tmp;
unsigned int offset;
- int nr_bvec = 0;
+ unsigned int nr_bvec;
int ret;
- rq_for_each_bvec(tmp, rq, rq_iter)
- nr_bvec++;
+ nr_bvec = blk_rq_nr_bvec(rq);
if (rq->bio != rq->biotail) {
@@ -551,8 +550,10 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
/* suppress uevents while reconfiguring the device */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1);
@@ -822,7 +823,7 @@ static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
if (worker)
goto queue_work;
- worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT | __GFP_NOWARN);
+ worker = kzalloc(sizeof(struct loop_worker), GFP_NOWAIT);
/*
* In the event we cannot allocate a worker, just queue on the
* rootcg worker and issue the I/O as the rootcg
@@ -993,8 +994,10 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
return -EBADF;
error = loop_check_backing_file(file);
- if (error)
+ if (error) {
+ fput(file);
return error;
+ }
is_loop = is_loop_device(file);
@@ -1904,6 +1907,10 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
goto failed;
}
+ /* We can block in this context, so ignore REQ_NOWAIT. */
+ if (rq->cmd_flags & REQ_NOWAIT)
+ rq->cmd_flags &= ~REQ_NOWAIT;
+
if (cmd_blkcg_css)
kthread_associate_blkcg(cmd_blkcg_css);
if (cmd_memcg_css)
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 8fc7761397bd..567192e371a8 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3148,17 +3148,17 @@ static int mtip_block_compat_ioctl(struct block_device *dev,
* that each partition is also 4KB aligned. Non-aligned partitions adversely
* affects performance.
*
- * @dev Pointer to the block_device strucutre.
+ * @disk Pointer to the gendisk strucutre.
* @geo Pointer to a hd_geometry structure.
*
* return value
* 0 Operation completed successfully.
* -ENOTTY An error occurred while reading the drive capacity.
*/
-static int mtip_block_getgeo(struct block_device *dev,
+static int mtip_block_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
- struct driver_data *dd = dev->bd_disk->private_data;
+ struct driver_data *dd = disk->private_data;
sector_t capacity;
if (!dd)
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 6463d0e8d0ce..f6c33b21f69e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -311,7 +311,7 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
if (args) {
INIT_WORK(&args->work, nbd_dead_link_work);
args->index = nbd->index;
- queue_work(system_wq, &args->work);
+ queue_work(system_percpu_wq, &args->work);
}
}
if (!nsock->dead) {
@@ -565,24 +565,27 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
msg.msg_iter = *iter;
noreclaim_flag = memalloc_noreclaim_save();
- do {
- sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
- sock->sk->sk_use_task_frag = false;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
- if (send)
- result = sock_sendmsg(sock, &msg);
- else
- result = sock_recvmsg(sock, &msg, msg.msg_flags);
-
- if (result <= 0) {
- if (result == 0)
- result = -EPIPE; /* short read */
- break;
- }
- if (sent)
- *sent += result;
- } while (msg_data_left(&msg));
+
+ scoped_with_kernel_creds() {
+ do {
+ sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
+ sock->sk->sk_use_task_frag = false;
+ msg.msg_flags = msg_flags | MSG_NOSIGNAL;
+
+ if (send)
+ result = sock_sendmsg(sock, &msg);
+ else
+ result = sock_recvmsg(sock, &msg, msg.msg_flags);
+
+ if (result <= 0) {
+ if (result == 0)
+ result = -EPIPE; /* short read */
+ break;
+ }
+ if (sent)
+ *sent += result;
+ } while (msg_data_left(&msg));
+ }
memalloc_noreclaim_restore(noreclaim_flag);
@@ -1018,9 +1021,9 @@ static void recv_work(struct work_struct *work)
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
- nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
+ nbd_config_put(nbd);
kfree(args);
}
@@ -1217,6 +1220,14 @@ static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
if (!sock)
return NULL;
+ if (!sk_is_tcp(sock->sk) &&
+ !sk_is_stream_unix(sock->sk)) {
+ dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
+ *err = -EINVAL;
+ sockfd_put(sock);
+ return NULL;
+ }
+
if (sock->ops->shutdown == sock_no_shutdown) {
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
*err = -EINVAL;
@@ -2227,12 +2238,13 @@ again:
ret = nbd_start_device(nbd);
out:
- mutex_unlock(&nbd->config_lock);
if (!ret) {
set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
refcount_inc(&nbd->config_refs);
nbd_connect_reply(info, nbd->index);
}
+ mutex_unlock(&nbd->config_lock);
+
nbd_config_put(nbd);
if (put_dev)
nbd_put(nbd);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 91642c9a3b29..c7c0fb79a6bf 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -223,7 +223,7 @@ MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed nu
static unsigned long g_cache_size;
module_param_named(cache_size, g_cache_size, ulong, 0444);
-MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
+MODULE_PARM_DESC(cache_size, "Cache size in MiB for memory-backed device. Default: 0 (none)");
static bool g_fua = true;
module_param_named(fua, g_fua, bool, 0444);
@@ -1129,26 +1129,28 @@ again:
return 0;
}
-static int copy_to_nullb(struct nullb *nullb, struct page *source,
- unsigned int off, sector_t sector, size_t n, bool is_fua)
+static blk_status_t copy_to_nullb(struct nullb *nullb, void *source,
+ loff_t pos, size_t n, bool is_fua)
{
size_t temp, count = 0;
- unsigned int offset;
struct nullb_page *t_page;
+ sector_t sector;
while (count < n) {
- temp = min_t(size_t, nullb->dev->blocksize, n - count);
+ temp = min3(nullb->dev->blocksize, n - count,
+ PAGE_SIZE - offset_in_page(pos));
+ sector = pos >> SECTOR_SHIFT;
if (null_cache_active(nullb) && !is_fua)
null_make_cache_space(nullb, PAGE_SIZE);
- offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
t_page = null_insert_page(nullb, sector,
!null_cache_active(nullb) || is_fua);
if (!t_page)
- return -ENOSPC;
+ return BLK_STS_NOSPC;
- memcpy_page(t_page->page, offset, source, off + count, temp);
+ memcpy_to_page(t_page->page, offset_in_page(pos),
+ source + count, temp);
__set_bit(sector & SECTOR_MASK, t_page->bitmap);
@@ -1156,41 +1158,34 @@ static int copy_to_nullb(struct nullb *nullb, struct page *source,
null_free_sector(nullb, sector, true);
count += temp;
- sector += temp >> SECTOR_SHIFT;
+ pos += temp;
}
- return 0;
+ return BLK_STS_OK;
}
-static int copy_from_nullb(struct nullb *nullb, struct page *dest,
- unsigned int off, sector_t sector, size_t n)
+static void copy_from_nullb(struct nullb *nullb, void *dest, loff_t pos,
+ size_t n)
{
size_t temp, count = 0;
- unsigned int offset;
struct nullb_page *t_page;
+ sector_t sector;
while (count < n) {
- temp = min_t(size_t, nullb->dev->blocksize, n - count);
+ temp = min3(nullb->dev->blocksize, n - count,
+ PAGE_SIZE - offset_in_page(pos));
+ sector = pos >> SECTOR_SHIFT;
- offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
t_page = null_lookup_page(nullb, sector, false,
!null_cache_active(nullb));
-
if (t_page)
- memcpy_page(dest, off + count, t_page->page, offset,
- temp);
+ memcpy_from_page(dest + count, t_page->page,
+ offset_in_page(pos), temp);
else
- memzero_page(dest, off + count, temp);
+ memset(dest + count, 0, temp);
count += temp;
- sector += temp >> SECTOR_SHIFT;
+ pos += temp;
}
- return 0;
-}
-
-static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
- unsigned int len, unsigned int off)
-{
- memset_page(page, off, 0xff, len);
}
blk_status_t null_handle_discard(struct nullb_device *dev,
@@ -1234,34 +1229,39 @@ static blk_status_t null_handle_flush(struct nullb *nullb)
return errno_to_blk_status(err);
}
-static int null_transfer(struct nullb *nullb, struct page *page,
- unsigned int len, unsigned int off, bool is_write, sector_t sector,
+static blk_status_t null_transfer(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off, bool is_write, loff_t pos,
bool is_fua)
{
struct nullb_device *dev = nullb->dev;
+ blk_status_t err = BLK_STS_OK;
unsigned int valid_len = len;
- int err = 0;
+ void *p;
+ p = kmap_local_page(page) + off;
if (!is_write) {
- if (dev->zoned)
+ if (dev->zoned) {
valid_len = null_zone_valid_read_len(nullb,
- sector, len);
+ pos >> SECTOR_SHIFT, len);
+ if (valid_len && valid_len != len)
+ valid_len -= pos & (SECTOR_SIZE - 1);
+ }
if (valid_len) {
- err = copy_from_nullb(nullb, page, off,
- sector, valid_len);
+ copy_from_nullb(nullb, p, pos, valid_len);
off += valid_len;
len -= valid_len;
}
if (len)
- nullb_fill_pattern(nullb, page, len, off);
+ memset(p + valid_len, 0xff, len);
flush_dcache_page(page);
} else {
flush_dcache_page(page);
- err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
+ err = copy_to_nullb(nullb, p, pos, len, is_fua);
}
+ kunmap_local(p);
return err;
}
@@ -1274,9 +1274,9 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
{
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct nullb *nullb = cmd->nq->dev->nullb;
- int err = 0;
+ blk_status_t err = BLK_STS_OK;
unsigned int len;
- sector_t sector = blk_rq_pos(rq);
+ loff_t pos = blk_rq_pos(rq) << SECTOR_SHIFT;
unsigned int max_bytes = nr_sectors << SECTOR_SHIFT;
unsigned int transferred_bytes = 0;
struct req_iterator iter;
@@ -1288,18 +1288,18 @@ static blk_status_t null_handle_data_transfer(struct nullb_cmd *cmd,
if (transferred_bytes + len > max_bytes)
len = max_bytes - transferred_bytes;
err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
- op_is_write(req_op(rq)), sector,
+ op_is_write(req_op(rq)), pos,
rq->cmd_flags & REQ_FUA);
if (err)
break;
- sector += len >> SECTOR_SHIFT;
+ pos += len;
transferred_bytes += len;
if (transferred_bytes >= max_bytes)
break;
}
spin_unlock_irq(&nullb->lock);
- return errno_to_blk_status(err);
+ return err;
}
static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
@@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors,
+ .dma_alignment = 1,
};
struct nullb *nullb;
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 7bb6128dbaaf..6c4c4bbe7dad 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -143,7 +143,8 @@ int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim);
int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args);
blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
sector_t sector, sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb,
diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c
index 4e5728f45989..0ada35dc0989 100644
--- a/drivers/block/null_blk/zoned.c
+++ b/drivers/block/null_blk/zoned.c
@@ -191,7 +191,7 @@ void null_free_zoned_dev(struct nullb_device *dev)
}
int null_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct nullb *nullb = disk->private_data;
struct nullb_device *dev = nullb->dev;
@@ -225,7 +225,7 @@ int null_report_zones(struct gendisk *disk, sector_t sector,
blkz.capacity = zone->capacity;
null_unlock_zone(dev, zone);
- error = cb(&blkz, i, data);
+ error = disk_report_zone(disk, &blkz, i, args);
if (error)
return error;
}
@@ -242,7 +242,7 @@ size_t null_zone_valid_read_len(struct nullb *nullb,
{
struct nullb_device *dev = nullb->dev;
struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
- unsigned int nr_sectors = len >> SECTOR_SHIFT;
+ unsigned int nr_sectors = DIV_ROUND_UP(len, SECTOR_SIZE);
/* Read must be below the write pointer position */
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index dc9e4a14b885..8892f218a814 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -85,10 +85,14 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
struct bio_vec bvec;
rq_for_each_segment(bvec, req, iter) {
+ dev_dbg(&dev->sbd.core, "%s:%u: %u sectors from %llu\n",
+ __func__, __LINE__, bio_sectors(iter.bio),
+ iter.bio->bi_iter.bi_sector);
if (gather)
memcpy_from_bvec(dev->bounce_buf + offset, &bvec);
else
memcpy_to_bvec(&bvec, dev->bounce_buf + offset);
+ offset += bvec.bv_len;
}
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index faafd7ff43d6..af0e21149dbc 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -7389,7 +7389,7 @@ static int __init rbd_init(void)
* The number of active work items is limited by the number of
* rbd devices * queue depth, so leave @max_active at default.
*/
- rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
+ rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!rbd_wq) {
rc = -ENOMEM;
goto err_out_slab;
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 15627417f12e..f1409e54010a 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -942,11 +942,11 @@ static void rnbd_client_release(struct gendisk *gen)
rnbd_clt_put_dev(dev);
}
-static int rnbd_client_getgeo(struct block_device *block_device,
+static int rnbd_client_getgeo(struct gendisk *disk,
struct hd_geometry *geo)
{
u64 size;
- struct rnbd_clt_dev *dev = block_device->bd_disk->private_data;
+ struct rnbd_clt_dev *dev = disk->private_data;
struct queue_limits *limit = &dev->queue->limits;
size = dev->size * (limit->logical_block_size / SECTOR_SIZE);
@@ -1809,7 +1809,7 @@ static int __init rnbd_client_init(void)
unregister_blkdev(rnbd_client_major, "rnbd");
return err;
}
- rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", 0, 0);
+ rnbd_clt_wq = alloc_workqueue("rnbd_clt_wq", WQ_PERCPU, 0);
if (!rnbd_clt_wq) {
pr_err("Failed to load module, alloc_workqueue failed.\n");
rnbd_clt_destroy_sysfs_files();
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index f35be51d213c..77360c2a6069 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -24,7 +24,7 @@
#define RTRS_PORT 1234
/**
- * enum rnbd_msg_types - RNBD message types
+ * enum rnbd_msg_type - RNBD message types
* @RNBD_MSG_SESS_INFO: initial session info from client to server
* @RNBD_MSG_SESS_INFO_RSP: initial session info from server to client
* @RNBD_MSG_OPEN: open (map) device request
@@ -47,10 +47,11 @@ enum rnbd_msg_type {
*/
struct rnbd_msg_hdr {
__le16 type;
+ /* private: */
__le16 __padding;
};
-/**
+/*
* We allow to map RO many times and RW only once. We allow to map yet another
* time RW, if MIGRATION is provided (second RW export can be required for
* example for VM migration)
@@ -78,6 +79,7 @@ static const __maybe_unused struct {
struct rnbd_msg_sess_info {
struct rnbd_msg_hdr hdr;
u8 ver;
+ /* private: */
u8 reserved[31];
};
@@ -89,6 +91,7 @@ struct rnbd_msg_sess_info {
struct rnbd_msg_sess_info_rsp {
struct rnbd_msg_hdr hdr;
u8 ver;
+ /* private: */
u8 reserved[31];
};
@@ -97,13 +100,16 @@ struct rnbd_msg_sess_info_rsp {
* @hdr: message header
* @access_mode: the mode to open remote device, valid values see:
* enum rnbd_access_mode
- * @device_name: device path on remote side
+ * @dev_name: device path on remote side
*/
struct rnbd_msg_open {
struct rnbd_msg_hdr hdr;
u8 access_mode;
+ /* private: */
u8 resv1;
+ /* public: */
s8 dev_name[NAME_MAX];
+ /* private: */
u8 reserved[3];
};
@@ -155,6 +161,7 @@ struct rnbd_msg_open_rsp {
__le16 secure_discard;
u8 obsolete_rotational;
u8 cache_policy;
+ /* private: */
u8 reserved[10];
};
@@ -187,7 +194,7 @@ struct rnbd_msg_io {
* @RNBD_OP_DISCARD: discard sectors
* @RNBD_OP_SECURE_ERASE: securely erase sectors
* @RNBD_OP_WRITE_ZEROES: write zeroes sectors
-
+ *
* @RNBD_F_SYNC: request is sync (sync write or read)
* @RNBD_F_FUA: forced unit access
*/
diff --git a/drivers/block/rnull.rs b/drivers/block/rnull.rs
deleted file mode 100644
index d07e76ae2c13..000000000000
--- a/drivers/block/rnull.rs
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-//! This is a Rust implementation of the C null block driver.
-//!
-//! Supported features:
-//!
-//! - blk-mq interface
-//! - direct completion
-//! - block size 4k
-//!
-//! The driver is not configurable.
-
-use kernel::{
- alloc::flags,
- block::mq::{
- self,
- gen_disk::{self, GenDisk},
- Operations, TagSet,
- },
- error::Result,
- new_mutex, pr_info,
- prelude::*,
- sync::{Arc, Mutex},
- types::ARef,
-};
-
-module! {
- type: NullBlkModule,
- name: "rnull_mod",
- authors: ["Andreas Hindborg"],
- description: "Rust implementation of the C null block driver",
- license: "GPL v2",
-}
-
-#[pin_data]
-struct NullBlkModule {
- #[pin]
- _disk: Mutex<GenDisk<NullBlkDevice>>,
-}
-
-impl kernel::InPlaceModule for NullBlkModule {
- fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
- pr_info!("Rust null_blk loaded\n");
-
- // Use a immediately-called closure as a stable `try` block
- let disk = /* try */ (|| {
- let tagset = Arc::pin_init(TagSet::new(1, 256, 1), flags::GFP_KERNEL)?;
-
- gen_disk::GenDiskBuilder::new()
- .capacity_sectors(4096 << 11)
- .logical_block_size(4096)?
- .physical_block_size(4096)?
- .rotational(false)
- .build(format_args!("rnullb{}", 0), tagset)
- })();
-
- try_pin_init!(Self {
- _disk <- new_mutex!(disk?, "nullb:disk"),
- })
- }
-}
-
-struct NullBlkDevice;
-
-#[vtable]
-impl Operations for NullBlkDevice {
- #[inline(always)]
- fn queue_rq(rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
- mq::Request::end_ok(rq)
- .map_err(|_e| kernel::error::code::EIO)
- // We take no refcounts on the request, so we expect to be able to
- // end the request. The request reference must be unique at this
- // point, and so `end_ok` cannot fail.
- .expect("Fatal error - expected to be able to end request");
-
- Ok(())
- }
-
- fn commit_rqs() {}
-}
diff --git a/drivers/block/rnull/Kconfig b/drivers/block/rnull/Kconfig
new file mode 100644
index 000000000000..7bc5b376c128
--- /dev/null
+++ b/drivers/block/rnull/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Rust null block device driver configuration
+
+config BLK_DEV_RUST_NULL
+ tristate "Rust null block driver (Experimental)"
+ depends on RUST && CONFIGFS_FS
+ help
+ This is the Rust implementation of the null block driver. Like
+ the C version, the driver allows the user to create virutal block
+ devices that can be configured via various configuration options.
+
+ If unsure, say N.
diff --git a/drivers/block/rnull/Makefile b/drivers/block/rnull/Makefile
new file mode 100644
index 000000000000..11cfa5e615dc
--- /dev/null
+++ b/drivers/block/rnull/Makefile
@@ -0,0 +1,3 @@
+
+obj-$(CONFIG_BLK_DEV_RUST_NULL) += rnull_mod.o
+rnull_mod-y := rnull.o
diff --git a/drivers/block/rnull/configfs.rs b/drivers/block/rnull/configfs.rs
new file mode 100644
index 000000000000..6713a6d92391
--- /dev/null
+++ b/drivers/block/rnull/configfs.rs
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use super::{NullBlkDevice, THIS_MODULE};
+use kernel::{
+ block::mq::gen_disk::{GenDisk, GenDiskBuilder},
+ c_str,
+ configfs::{self, AttributeOperations},
+ configfs_attrs,
+ fmt::{self, Write as _},
+ new_mutex,
+ page::PAGE_SIZE,
+ prelude::*,
+ str::{kstrtobool_bytes, CString},
+ sync::Mutex,
+};
+use pin_init::PinInit;
+
+pub(crate) fn subsystem() -> impl PinInit<kernel::configfs::Subsystem<Config>, Error> {
+ let item_type = configfs_attrs! {
+ container: configfs::Subsystem<Config>,
+ data: Config,
+ child: DeviceConfig,
+ attributes: [
+ features: 0,
+ ],
+ };
+
+ kernel::configfs::Subsystem::new(c_str!("rnull"), item_type, try_pin_init!(Config {}))
+}
+
+#[pin_data]
+pub(crate) struct Config {}
+
+#[vtable]
+impl AttributeOperations<0> for Config {
+ type Data = Config;
+
+ fn show(_this: &Config, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_str("blocksize,size,rotational,irqmode\n")?;
+ Ok(writer.bytes_written())
+ }
+}
+
+#[vtable]
+impl configfs::GroupOperations for Config {
+ type Child = DeviceConfig;
+
+ fn make_group(
+ &self,
+ name: &CStr,
+ ) -> Result<impl PinInit<configfs::Group<DeviceConfig>, Error>> {
+ let item_type = configfs_attrs! {
+ container: configfs::Group<DeviceConfig>,
+ data: DeviceConfig,
+ attributes: [
+ // Named for compatibility with C null_blk
+ power: 0,
+ blocksize: 1,
+ rotational: 2,
+ size: 3,
+ irqmode: 4,
+ ],
+ };
+
+ Ok(configfs::Group::new(
+ name.try_into()?,
+ item_type,
+ // TODO: cannot coerce new_mutex!() to impl PinInit<_, Error>, so put mutex inside
+ try_pin_init!( DeviceConfig {
+ data <- new_mutex!(DeviceConfigInner {
+ powered: false,
+ block_size: 4096,
+ rotational: false,
+ disk: None,
+ capacity_mib: 4096,
+ irq_mode: IRQMode::None,
+ name: name.try_into()?,
+ }),
+ }),
+ ))
+ }
+}
+
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum IRQMode {
+ None,
+ Soft,
+}
+
+impl TryFrom<u8> for IRQMode {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u8) -> Result<Self> {
+ match value {
+ 0 => Ok(Self::None),
+ 1 => Ok(Self::Soft),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+impl fmt::Display for IRQMode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::None => f.write_str("0")?,
+ Self::Soft => f.write_str("1")?,
+ }
+ Ok(())
+ }
+}
+
+#[pin_data]
+pub(crate) struct DeviceConfig {
+ #[pin]
+ data: Mutex<DeviceConfigInner>,
+}
+
+#[pin_data]
+struct DeviceConfigInner {
+ powered: bool,
+ name: CString,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ disk: Option<GenDisk<NullBlkDevice>>,
+}
+
+#[vtable]
+impl configfs::AttributeOperations<0> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().powered {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ let power_op = kstrtobool_bytes(page)?;
+ let mut guard = this.data.lock();
+
+ if !guard.powered && power_op {
+ guard.disk = Some(NullBlkDevice::new(
+ &guard.name,
+ guard.block_size,
+ guard.rotational,
+ guard.capacity_mib,
+ guard.irq_mode,
+ )?);
+ guard.powered = true;
+ } else if guard.powered && !power_op {
+ drop(guard.disk.take());
+ guard.powered = false;
+ }
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<1> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().block_size))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u32>().map_err(|_| EINVAL)?;
+
+ GenDiskBuilder::validate_block_size(value)?;
+ this.data.lock().block_size = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<2> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+
+ if this.data.lock().rotational {
+ writer.write_str("1\n")?;
+ } else {
+ writer.write_str("0\n")?;
+ }
+
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ this.data.lock().rotational = kstrtobool_bytes(page)?;
+
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<3> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().capacity_mib))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u64>().map_err(|_| EINVAL)?;
+
+ this.data.lock().capacity_mib = value;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl configfs::AttributeOperations<4> for DeviceConfig {
+ type Data = DeviceConfig;
+
+ fn show(this: &DeviceConfig, page: &mut [u8; PAGE_SIZE]) -> Result<usize> {
+ let mut writer = kernel::str::Formatter::new(page);
+ writer.write_fmt(fmt!("{}\n", this.data.lock().irq_mode))?;
+ Ok(writer.bytes_written())
+ }
+
+ fn store(this: &DeviceConfig, page: &[u8]) -> Result {
+ if this.data.lock().powered {
+ return Err(EBUSY);
+ }
+
+ let text = core::str::from_utf8(page)?.trim();
+ let value = text.parse::<u8>().map_err(|_| EINVAL)?;
+
+ this.data.lock().irq_mode = IRQMode::try_from(value)?;
+ Ok(())
+ }
+}
diff --git a/drivers/block/rnull/rnull.rs b/drivers/block/rnull/rnull.rs
new file mode 100644
index 000000000000..a9d5e575a2c4
--- /dev/null
+++ b/drivers/block/rnull/rnull.rs
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! This is a Rust implementation of the C null block driver.
+
+mod configfs;
+
+use configfs::IRQMode;
+use kernel::{
+ block::{
+ self,
+ mq::{
+ self,
+ gen_disk::{self, GenDisk},
+ Operations, TagSet,
+ },
+ },
+ error::Result,
+ pr_info,
+ prelude::*,
+ sync::{aref::ARef, Arc},
+};
+use pin_init::PinInit;
+
+module! {
+ type: NullBlkModule,
+ name: "rnull_mod",
+ authors: ["Andreas Hindborg"],
+ description: "Rust implementation of the C null block driver",
+ license: "GPL v2",
+}
+
+#[pin_data]
+struct NullBlkModule {
+ #[pin]
+ configfs_subsystem: kernel::configfs::Subsystem<configfs::Config>,
+}
+
+impl kernel::InPlaceModule for NullBlkModule {
+ fn init(_module: &'static ThisModule) -> impl PinInit<Self, Error> {
+ pr_info!("Rust null_blk loaded\n");
+
+ try_pin_init!(Self {
+ configfs_subsystem <- configfs::subsystem(),
+ })
+ }
+}
+
+struct NullBlkDevice;
+
+impl NullBlkDevice {
+ fn new(
+ name: &CStr,
+ block_size: u32,
+ rotational: bool,
+ capacity_mib: u64,
+ irq_mode: IRQMode,
+ ) -> Result<GenDisk<Self>> {
+ let tagset = Arc::pin_init(TagSet::new(1, 256, 1), GFP_KERNEL)?;
+
+ let queue_data = Box::new(QueueData { irq_mode }, GFP_KERNEL)?;
+
+ gen_disk::GenDiskBuilder::new()
+ .capacity_sectors(capacity_mib << (20 - block::SECTOR_SHIFT))
+ .logical_block_size(block_size)?
+ .physical_block_size(block_size)?
+ .rotational(rotational)
+ .build(fmt!("{}", name.to_str()?), tagset, queue_data)
+ }
+}
+
+struct QueueData {
+ irq_mode: IRQMode,
+}
+
+#[vtable]
+impl Operations for NullBlkDevice {
+ type QueueData = KBox<QueueData>;
+
+ #[inline(always)]
+ fn queue_rq(queue_data: &QueueData, rq: ARef<mq::Request<Self>>, _is_last: bool) -> Result {
+ match queue_data.irq_mode {
+ IRQMode::None => mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request"),
+ IRQMode::Soft => mq::Request::complete(rq),
+ }
+ Ok(())
+ }
+
+ fn commit_rqs(_queue_data: &QueueData) {}
+
+ fn complete(rq: ARef<mq::Request<Self>>) {
+ mq::Request::end_ok(rq)
+ .map_err(|_e| kernel::error::code::EIO)
+ // We take no refcounts on the request, so we expect to be able to
+ // end the request. The request reference must be unique at this
+ // point, and so `end_ok` cannot fail.
+ .expect("Fatal error - expected to be able to end request");
+ }
+}
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 7af21fe67671..db1fe9772a4d 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -119,9 +119,8 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
return vio_dring_avail(dr, VDC_TX_RING_SIZE);
}
-static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int vdc_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct gendisk *disk = bdev->bd_disk;
sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
@@ -1189,7 +1188,7 @@ static void vdc_ldc_reset(struct vdc_port *port)
}
if (port->ldc_timeout)
- mod_delayed_work(system_wq, &port->ldc_reset_timer_work,
+ mod_delayed_work(system_percpu_wq, &port->ldc_reset_timer_work,
round_jiffies(jiffies + HZ * port->ldc_timeout));
mod_timer(&port->vio.timer, round_jiffies(jiffies + HZ));
return;
@@ -1217,7 +1216,7 @@ static int __init vdc_init(void)
{
int err;
- sunvdc_wq = alloc_workqueue("sunvdc", 0, 0);
+ sunvdc_wq = alloc_workqueue("sunvdc", WQ_PERCPU, 0);
if (!sunvdc_wq)
return -ENOMEM;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index eda33c5eb5e2..416015947ae6 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -711,9 +711,9 @@ static int floppy_ioctl(struct block_device *bdev, blk_mode_t mode,
return -ENOTTY;
}
-static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int floppy_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct floppy_state *fs = bdev->bd_disk->private_data;
+ struct floppy_state *fs = disk->private_data;
struct floppy_struct *g;
int ret;
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 67d4a867aec4..2c715df63f23 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -155,12 +155,13 @@ struct ublk_uring_cmd_pdu {
*/
#define UBLK_REFCOUNT_INIT (REFCOUNT_MAX / 2)
+union ublk_io_buf {
+ __u64 addr;
+ struct ublk_auto_buf_reg auto_reg;
+};
+
struct ublk_io {
- /* userspace buffer address from io cmd */
- union {
- __u64 addr;
- struct ublk_auto_buf_reg buf;
- };
+ union ublk_io_buf buf;
unsigned int flags;
int res;
@@ -201,18 +202,14 @@ struct ublk_queue {
bool force_abort;
bool canceling;
bool fail_io; /* copy of dev->state == UBLK_S_DEV_FAIL_IO */
- unsigned short nr_io_ready; /* how many ios setup */
spinlock_t cancel_lock;
struct ublk_device *dev;
- struct ublk_io ios[];
+ struct ublk_io ios[] __counted_by(q_depth);
};
struct ublk_device {
struct gendisk *ub_disk;
- char *__queues;
-
- unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@@ -234,12 +231,14 @@ struct ublk_device {
struct ublk_params params;
struct completion completion;
- unsigned int nr_queues_ready;
+ u32 nr_io_ready;
bool unprivileged_daemons;
struct mutex cancel_mutex;
bool canceling;
pid_t ublksrv_tgid;
struct delayed_work exit_work;
+
+ struct ublk_queue *queues[];
};
/* header of ublk_params */
@@ -252,8 +251,7 @@ static void ublk_io_release(void *priv);
static void ublk_stop_dev_unlocked(struct ublk_device *ub);
static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq);
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, struct ublk_io *io,
- size_t offset);
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset);
static inline unsigned int ublk_req_build_flags(struct request *req);
static inline struct ublksrv_io_desc *
@@ -267,7 +265,7 @@ static inline bool ublk_dev_is_zoned(const struct ublk_device *ub)
return ub->dev_info.flags & UBLK_F_ZONED;
}
-static inline bool ublk_queue_is_zoned(struct ublk_queue *ubq)
+static inline bool ublk_queue_is_zoned(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_ZONED;
}
@@ -370,7 +368,7 @@ static void *ublk_alloc_report_buffer(struct ublk_device *ublk,
}
static int ublk_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct ublk_device *ub = disk->private_data;
unsigned int zone_size_sectors = disk->queue->limits.chunk_sectors;
@@ -433,7 +431,7 @@ free_req:
if (!zone->len)
break;
- ret = cb(zone, i, data);
+ ret = disk_report_zone(disk, zone, i, args);
if (ret)
goto out;
@@ -501,7 +499,7 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
- iod->addr = io->addr;
+ iod->addr = io->buf.addr;
return BLK_STS_OK;
}
@@ -532,7 +530,8 @@ static blk_status_t ublk_setup_iod_zoned(struct ublk_queue *ubq,
#endif
-static inline void __ublk_complete_rq(struct request *req);
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map);
static dev_t ublk_chr_devt;
static const struct class ublk_chr_class = {
@@ -664,22 +663,44 @@ static inline bool ublk_support_zero_copy(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_SUPPORT_ZERO_COPY;
}
+static inline bool ublk_dev_support_zero_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_SUPPORT_ZERO_COPY;
+}
+
static inline bool ublk_support_auto_buf_reg(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_AUTO_BUF_REG;
}
+static inline bool ublk_dev_support_auto_buf_reg(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_AUTO_BUF_REG;
+}
+
static inline bool ublk_support_user_copy(const struct ublk_queue *ubq)
{
return ubq->flags & UBLK_F_USER_COPY;
}
+static inline bool ublk_dev_support_user_copy(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_USER_COPY;
+}
+
static inline bool ublk_need_map_io(const struct ublk_queue *ubq)
{
return !ublk_support_user_copy(ubq) && !ublk_support_zero_copy(ubq) &&
!ublk_support_auto_buf_reg(ubq);
}
+static inline bool ublk_dev_need_map_io(const struct ublk_device *ub)
+{
+ return !ublk_dev_support_user_copy(ub) &&
+ !ublk_dev_support_zero_copy(ub) &&
+ !ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
{
/*
@@ -697,6 +718,13 @@ static inline bool ublk_need_req_ref(const struct ublk_queue *ubq)
ublk_support_auto_buf_reg(ubq);
}
+static inline bool ublk_dev_need_req_ref(const struct ublk_device *ub)
+{
+ return ublk_dev_support_user_copy(ub) ||
+ ublk_dev_support_zero_copy(ub) ||
+ ublk_dev_support_auto_buf_reg(ub);
+}
+
static inline void ublk_init_req_ref(const struct ublk_queue *ubq,
struct ublk_io *io)
{
@@ -711,8 +739,11 @@ static inline bool ublk_get_req_ref(struct ublk_io *io)
static inline void ublk_put_req_ref(struct ublk_io *io, struct request *req)
{
- if (refcount_dec_and_test(&io->ref))
- __ublk_complete_rq(req);
+ if (!refcount_dec_and_test(&io->ref))
+ return;
+
+ /* ublk_need_map_io() and ublk_need_req_ref() are mutually exclusive */
+ __ublk_complete_rq(req, io, false);
}
static inline bool ublk_sub_req_ref(struct ublk_io *io)
@@ -728,6 +759,11 @@ static inline bool ublk_need_get_data(const struct ublk_queue *ubq)
return ubq->flags & UBLK_F_NEED_GET_DATA;
}
+static inline bool ublk_dev_need_get_data(const struct ublk_device *ub)
+{
+ return ub->dev_info.flags & UBLK_F_NEED_GET_DATA;
+}
+
/* Called in slow path only, keep it noinline for trace purpose */
static noinline struct ublk_device *ublk_get_device(struct ublk_device *ub)
{
@@ -745,7 +781,7 @@ static noinline void ublk_put_device(struct ublk_device *ub)
static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
int qid)
{
- return (struct ublk_queue *)&(dev->__queues[qid * dev->queue_size]);
+ return dev->queues[qid];
}
static inline bool ublk_rq_has_data(const struct request *rq)
@@ -764,11 +800,9 @@ static inline int __ublk_queue_cmd_buf_size(int depth)
return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE);
}
-static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id)
+static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
-
- return __ublk_queue_cmd_buf_size(ubq->q_depth);
+ return __ublk_queue_cmd_buf_size(ub->dev_info.queue_depth);
}
static int ublk_max_cmd_buf_size(void)
@@ -880,73 +914,6 @@ static const struct block_device_operations ub_fops = {
.report_zones = ublk_report_zones,
};
-#define UBLK_MAX_PIN_PAGES 32
-
-struct ublk_io_iter {
- struct page *pages[UBLK_MAX_PIN_PAGES];
- struct bio *bio;
- struct bvec_iter iter;
-};
-
-/* return how many pages are copied */
-static void ublk_copy_io_pages(struct ublk_io_iter *data,
- size_t total, size_t pg_off, int dir)
-{
- unsigned done = 0;
- unsigned pg_idx = 0;
-
- while (done < total) {
- struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
- unsigned int bytes = min3(bv.bv_len, (unsigned)total - done,
- (unsigned)(PAGE_SIZE - pg_off));
- void *bv_buf = bvec_kmap_local(&bv);
- void *pg_buf = kmap_local_page(data->pages[pg_idx]);
-
- if (dir == ITER_DEST)
- memcpy(pg_buf + pg_off, bv_buf, bytes);
- else
- memcpy(bv_buf, pg_buf + pg_off, bytes);
-
- kunmap_local(pg_buf);
- kunmap_local(bv_buf);
-
- /* advance page array */
- pg_off += bytes;
- if (pg_off == PAGE_SIZE) {
- pg_idx += 1;
- pg_off = 0;
- }
-
- done += bytes;
-
- /* advance bio */
- bio_advance_iter_single(data->bio, &data->iter, bytes);
- if (!data->iter.bi_size) {
- data->bio = data->bio->bi_next;
- if (data->bio == NULL)
- break;
- data->iter = data->bio->bi_iter;
- }
- }
-}
-
-static bool ublk_advance_io_iter(const struct request *req,
- struct ublk_io_iter *iter, unsigned int offset)
-{
- struct bio *bio = req->bio;
-
- for_each_bio(bio) {
- if (bio->bi_iter.bi_size > offset) {
- iter->bio = bio;
- iter->iter = bio->bi_iter;
- bio_advance_iter(iter->bio, &iter->iter, offset);
- return true;
- }
- offset -= bio->bi_iter.bi_size;
- }
- return false;
-}
-
/*
* Copy data between request pages and io_iter, and 'offset'
* is the start point of linear offset of request.
@@ -954,34 +921,35 @@ static bool ublk_advance_io_iter(const struct request *req,
static size_t ublk_copy_user_pages(const struct request *req,
unsigned offset, struct iov_iter *uiter, int dir)
{
- struct ublk_io_iter iter;
+ struct req_iterator iter;
+ struct bio_vec bv;
size_t done = 0;
- if (!ublk_advance_io_iter(req, &iter, offset))
- return 0;
-
- while (iov_iter_count(uiter) && iter.bio) {
- unsigned nr_pages;
- ssize_t len;
- size_t off;
- int i;
+ rq_for_each_segment(bv, req, iter) {
+ void *bv_buf;
+ size_t copied;
- len = iov_iter_get_pages2(uiter, iter.pages,
- iov_iter_count(uiter),
- UBLK_MAX_PIN_PAGES, &off);
- if (len <= 0)
- return done;
-
- ublk_copy_io_pages(&iter, len, off, dir);
- nr_pages = DIV_ROUND_UP(len + off, PAGE_SIZE);
- for (i = 0; i < nr_pages; i++) {
- if (dir == ITER_DEST)
- set_page_dirty(iter.pages[i]);
- put_page(iter.pages[i]);
+ if (offset >= bv.bv_len) {
+ offset -= bv.bv_len;
+ continue;
}
- done += len;
- }
+ bv.bv_offset += offset;
+ bv.bv_len -= offset;
+ bv_buf = bvec_kmap_local(&bv);
+ if (dir == ITER_DEST)
+ copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
+ else
+ copied = copy_from_iter(bv_buf, bv.bv_len, uiter);
+
+ kunmap_local(bv_buf);
+
+ done += copied;
+ if (copied < bv.bv_len)
+ break;
+
+ offset = 0;
+ }
return done;
}
@@ -996,8 +964,9 @@ static inline bool ublk_need_unmap_req(const struct request *req)
(req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN);
}
-static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
- const struct ublk_io *io)
+static unsigned int ublk_map_io(const struct ublk_queue *ubq,
+ const struct request *req,
+ const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
@@ -1013,19 +982,19 @@ static int ublk_map_io(const struct ublk_queue *ubq, const struct request *req,
struct iov_iter iter;
const int dir = ITER_DEST;
- import_ubuf(dir, u64_to_user_ptr(io->addr), rq_bytes, &iter);
+ import_ubuf(dir, u64_to_user_ptr(io->buf.addr), rq_bytes, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
}
-static int ublk_unmap_io(const struct ublk_queue *ubq,
+static unsigned int ublk_unmap_io(bool need_map,
const struct request *req,
const struct ublk_io *io)
{
const unsigned int rq_bytes = blk_rq_bytes(req);
- if (!ublk_need_map_io(ubq))
+ if (!need_map)
return rq_bytes;
if (ublk_need_unmap_req(req)) {
@@ -1034,7 +1003,7 @@ static int ublk_unmap_io(const struct ublk_queue *ubq,
WARN_ON_ONCE(io->res > rq_bytes);
- import_ubuf(dir, u64_to_user_ptr(io->addr), io->res, &iter);
+ import_ubuf(dir, u64_to_user_ptr(io->buf.addr), io->res, &iter);
return ublk_copy_user_pages(req, 0, &iter, dir);
}
return rq_bytes;
@@ -1072,13 +1041,8 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
{
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag);
struct ublk_io *io = &ubq->ios[req->tag];
- enum req_op op = req_op(req);
u32 ublk_op;
- if (!ublk_queue_is_zoned(ubq) &&
- (op_is_zone_mgmt(op) || op == REQ_OP_ZONE_APPEND))
- return BLK_STS_IOERR;
-
switch (req_op(req)) {
case REQ_OP_READ:
ublk_op = UBLK_IO_OP_READ;
@@ -1105,7 +1069,7 @@ static blk_status_t ublk_setup_iod(struct ublk_queue *ubq, struct request *req)
iod->op_flags = ublk_op | ublk_req_build_flags(req);
iod->nr_sectors = blk_rq_sectors(req);
iod->start_sector = blk_rq_pos(req);
- iod->addr = io->addr;
+ iod->addr = io->buf.addr;
return BLK_STS_OK;
}
@@ -1117,10 +1081,9 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
}
/* todo: handle partial completion */
-static inline void __ublk_complete_rq(struct request *req)
+static inline void __ublk_complete_rq(struct request *req, struct ublk_io *io,
+ bool need_map)
{
- struct ublk_queue *ubq = req->mq_hctx->driver_data;
- struct ublk_io *io = &ubq->ios[req->tag];
unsigned int unmapped_bytes;
blk_status_t res = BLK_STS_OK;
@@ -1144,7 +1107,7 @@ static inline void __ublk_complete_rq(struct request *req)
goto exit;
/* for READ request, writing data in iod->addr to rq buffers */
- unmapped_bytes = ublk_unmap_io(ubq, req, io);
+ unmapped_bytes = ublk_unmap_io(need_map, req, io);
/*
* Extremely impossible since we got data filled in just before
@@ -1189,7 +1152,7 @@ static void ublk_complete_io_cmd(struct ublk_io *io, struct request *req,
struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req);
/* tell ublksrv one io request is coming */
- io_uring_cmd_done(cmd, res, 0, issue_flags);
+ io_uring_cmd_done(cmd, res, issue_flags);
}
#define UBLK_REQUEUE_DELAY_MS 3
@@ -1205,45 +1168,65 @@ static inline void __ublk_abort_rq(struct ublk_queue *ubq,
}
static void
-ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, struct ublk_io *io)
+ublk_auto_buf_reg_fallback(const struct ublk_queue *ubq, unsigned tag)
{
- unsigned tag = io - ubq->ios;
struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag);
iod->op_flags |= UBLK_IO_F_NEED_REG_BUF;
}
-static bool ublk_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
- struct ublk_io *io, unsigned int issue_flags)
+enum auto_buf_reg_res {
+ AUTO_BUF_REG_FAIL,
+ AUTO_BUF_REG_FALLBACK,
+ AUTO_BUF_REG_OK,
+};
+
+static void ublk_prep_auto_buf_reg_io(const struct ublk_queue *ubq,
+ struct request *req, struct ublk_io *io,
+ struct io_uring_cmd *cmd,
+ enum auto_buf_reg_res res)
+{
+ if (res == AUTO_BUF_REG_OK) {
+ io->task_registered_buffers = 1;
+ io->buf_ctx_handle = io_uring_cmd_ctx_handle(cmd);
+ io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
+ }
+ ublk_init_req_ref(ubq, io);
+ __ublk_prep_compl_io_cmd(io, req);
+}
+
+static enum auto_buf_reg_res
+__ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
int ret;
- ret = io_buffer_register_bvec(io->cmd, req, ublk_io_release,
- io->buf.index, issue_flags);
+ ret = io_buffer_register_bvec(cmd, req, ublk_io_release,
+ io->buf.auto_reg.index, issue_flags);
if (ret) {
- if (io->buf.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
- ublk_auto_buf_reg_fallback(ubq, io);
- return true;
+ if (io->buf.auto_reg.flags & UBLK_AUTO_BUF_REG_FALLBACK) {
+ ublk_auto_buf_reg_fallback(ubq, req->tag);
+ return AUTO_BUF_REG_FALLBACK;
}
blk_mq_end_request(req, BLK_STS_IOERR);
- return false;
+ return AUTO_BUF_REG_FAIL;
}
- io->task_registered_buffers = 1;
- io->buf_ctx_handle = io_uring_cmd_ctx_handle(io->cmd);
- io->flags |= UBLK_IO_FLAG_AUTO_BUF_REG;
- return true;
+ return AUTO_BUF_REG_OK;
}
-static bool ublk_prep_auto_buf_reg(struct ublk_queue *ubq,
- struct request *req, struct ublk_io *io,
- unsigned int issue_flags)
+static void ublk_do_auto_buf_reg(const struct ublk_queue *ubq, struct request *req,
+ struct ublk_io *io, struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
- ublk_init_req_ref(ubq, io);
- if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req))
- return ublk_auto_buf_reg(ubq, req, io, issue_flags);
+ enum auto_buf_reg_res res = __ublk_do_auto_buf_reg(ubq, req, io, cmd,
+ issue_flags);
- return true;
+ if (res != AUTO_BUF_REG_FAIL) {
+ ublk_prep_auto_buf_reg_io(ubq, req, io, cmd, res);
+ io_uring_cmd_done(cmd, UBLK_IO_RES_OK, issue_flags);
+ }
}
static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
@@ -1274,10 +1257,9 @@ static bool ublk_start_io(const struct ublk_queue *ubq, struct request *req,
return true;
}
-static void ublk_dispatch_req(struct ublk_queue *ubq,
- struct request *req,
- unsigned int issue_flags)
+static void ublk_dispatch_req(struct ublk_queue *ubq, struct request *req)
{
+ unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
int tag = req->tag;
struct ublk_io *io = &ubq->ios[tag];
@@ -1316,17 +1298,21 @@ static void ublk_dispatch_req(struct ublk_queue *ubq,
if (!ublk_start_io(ubq, req, io))
return;
- if (ublk_prep_auto_buf_reg(ubq, req, io, issue_flags))
+ if (ublk_support_auto_buf_reg(ubq) && ublk_rq_has_data(req)) {
+ ublk_do_auto_buf_reg(ubq, req, io, io->cmd, issue_flags);
+ } else {
+ ublk_init_req_ref(ubq, io);
ublk_complete_io_cmd(io, req, UBLK_IO_RES_OK, issue_flags);
+ }
}
-static void ublk_cmd_tw_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_cmd_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct ublk_queue *ubq = pdu->ubq;
- ublk_dispatch_req(ubq, pdu->req, issue_flags);
+ ublk_dispatch_req(ubq, pdu->req);
}
static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
@@ -1338,9 +1324,9 @@ static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq)
io_uring_cmd_complete_in_task(cmd, ublk_cmd_tw_cb);
}
-static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_cmd_list_tw_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
struct request *rq = pdu->req_list;
struct request *next;
@@ -1348,7 +1334,7 @@ static void ublk_cmd_list_tw_cb(struct io_uring_cmd *cmd,
do {
next = rq->rq_next;
rq->rq_next = NULL;
- ublk_dispatch_req(rq->mq_hctx->driver_data, rq, issue_flags);
+ ublk_dispatch_req(rq->mq_hctx->driver_data, rq);
rq = next;
} while (rq);
}
@@ -1500,9 +1486,6 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
{
int i;
- /* All old ioucmds have to be completed */
- ubq->nr_io_ready = 0;
-
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -1512,7 +1495,7 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
*/
io->flags &= UBLK_IO_FLAG_CANCELED;
io->cmd = NULL;
- io->addr = 0;
+ io->buf.addr = 0;
/*
* old task is PF_EXITING, put it now
@@ -1551,7 +1534,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
ub->mm = NULL;
- ub->nr_queues_ready = 0;
+ ub->nr_io_ready = 0;
ub->unprivileged_daemons = false;
ub->ublksrv_tgid = -1;
}
@@ -1775,23 +1758,23 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma)
__func__, q_id, current->pid, vma->vm_start,
phys_off, (unsigned long)sz);
- if (sz != ublk_queue_cmd_buf_size(ub, q_id))
+ if (sz != ublk_queue_cmd_buf_size(ub))
return -EINVAL;
pfn = virt_to_phys(ublk_queue_cmd_buf(ub, q_id)) >> PAGE_SHIFT;
return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
}
-static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
+static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io,
struct request *req)
{
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE);
- if (ublk_nosrv_should_reissue_outstanding(ubq->dev))
+ if (ublk_nosrv_should_reissue_outstanding(ub))
blk_mq_requeue_request(req, false);
else {
io->res = -EIO;
- __ublk_complete_rq(req);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
}
}
@@ -1811,7 +1794,7 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
struct ublk_io *io = &ubq->ios[i];
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
- __ublk_fail_req(ubq, io, io->req);
+ __ublk_fail_req(ub, io, io->req);
}
}
@@ -1873,7 +1856,7 @@ static void ublk_cancel_cmd(struct ublk_queue *ubq, unsigned tag,
spin_unlock(&ubq->cancel_lock);
if (!done)
- io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+ io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags);
}
/*
@@ -1916,9 +1899,11 @@ static void ublk_uring_cmd_cancel_fn(struct io_uring_cmd *cmd,
ublk_cancel_cmd(ubq, pdu->tag, issue_flags);
}
-static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+static inline bool ublk_dev_ready(const struct ublk_device *ub)
{
- return ubq->nr_io_ready == ubq->q_depth;
+ u32 total = (u32)ub->dev_info.nr_hw_queues * ub->dev_info.queue_depth;
+
+ return ub->nr_io_ready == total;
}
static void ublk_cancel_queue(struct ublk_queue *ubq)
@@ -2042,16 +2027,14 @@ static void ublk_reset_io_flags(struct ublk_device *ub)
}
/* device can only be started after all IOs are ready */
-static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
+static void ublk_mark_io_ready(struct ublk_device *ub)
__must_hold(&ub->mutex)
{
- ubq->nr_io_ready++;
- if (ublk_queue_ready(ubq))
- ub->nr_queues_ready++;
if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
ub->unprivileged_daemons = true;
- if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
+ ub->nr_io_ready++;
+ if (ublk_dev_ready(ub)) {
/* now we are ready for handling ublk io request */
ublk_reset_io_flags(ub);
complete_all(&ub->completion);
@@ -2073,13 +2056,16 @@ static inline int ublk_check_cmd_op(u32 cmd_op)
static inline int ublk_set_auto_buf_reg(struct ublk_io *io, struct io_uring_cmd *cmd)
{
- io->buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
+ struct ublk_auto_buf_reg buf;
+
+ buf = ublk_sqe_addr_to_auto_buf_reg(READ_ONCE(cmd->sqe->addr));
- if (io->buf.reserved0 || io->buf.reserved1)
+ if (buf.reserved0 || buf.reserved1)
return -EINVAL;
- if (io->buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
+ if (buf.flags & ~UBLK_AUTO_BUF_REG_F_MASK)
return -EINVAL;
+ io->buf.auto_reg = buf;
return 0;
}
@@ -2101,7 +2087,7 @@ static int ublk_handle_auto_buf_reg(struct ublk_io *io,
* this ublk request gets stuck.
*/
if (io->buf_ctx_handle == io_uring_cmd_ctx_handle(cmd))
- *buf_idx = io->buf.index;
+ *buf_idx = io->buf.auto_reg.index;
}
return ublk_set_auto_buf_reg(io, cmd);
@@ -2122,14 +2108,14 @@ ublk_fill_io_cmd(struct ublk_io *io, struct io_uring_cmd *cmd)
}
static inline int
-ublk_config_io_buf(const struct ublk_queue *ubq, struct ublk_io *io,
+ublk_config_io_buf(const struct ublk_device *ub, struct ublk_io *io,
struct io_uring_cmd *cmd, unsigned long buf_addr,
u16 *buf_idx)
{
- if (ublk_support_auto_buf_reg(ubq))
+ if (ublk_dev_support_auto_buf_reg(ub))
return ublk_handle_auto_buf_reg(io, cmd, buf_idx);
- io->addr = buf_addr;
+ io->buf.addr = buf_addr;
return 0;
}
@@ -2165,18 +2151,18 @@ static void ublk_io_release(void *priv)
}
static int ublk_register_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag,
struct ublk_io *io,
unsigned int index, unsigned int issue_flags)
{
- struct ublk_device *ub = cmd->file->private_data;
struct request *req;
int ret;
- if (!ublk_support_zero_copy(ubq))
+ if (!ublk_dev_support_zero_copy(ub))
return -EINVAL;
- req = __ublk_check_and_get_req(ub, ubq, io, 0);
+ req = __ublk_check_and_get_req(ub, q_id, tag, io, 0);
if (!req)
return -EINVAL;
@@ -2192,7 +2178,8 @@ static int ublk_register_io_buf(struct io_uring_cmd *cmd,
static int
ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
- const struct ublk_queue *ubq, struct ublk_io *io,
+ struct ublk_device *ub,
+ u16 q_id, u16 tag, struct ublk_io *io,
unsigned index, unsigned issue_flags)
{
unsigned new_registered_buffers;
@@ -2205,9 +2192,10 @@ ublk_daemon_register_io_buf(struct io_uring_cmd *cmd,
*/
new_registered_buffers = io->task_registered_buffers + 1;
if (unlikely(new_registered_buffers >= UBLK_REFCOUNT_INIT))
- return ublk_register_io_buf(cmd, ubq, io, index, issue_flags);
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io, index,
+ issue_flags);
- if (!ublk_support_zero_copy(ubq) || !ublk_rq_has_data(req))
+ if (!ublk_dev_support_zero_copy(ub) || !ublk_rq_has_data(req))
return -EINVAL;
ret = io_buffer_register_bvec(cmd, req, ublk_io_release, index,
@@ -2229,14 +2217,14 @@ static int ublk_unregister_io_buf(struct io_uring_cmd *cmd,
return io_buffer_unregister_bvec(cmd, index, issue_flags);
}
-static int ublk_check_fetch_buf(const struct ublk_queue *ubq, __u64 buf_addr)
+static int ublk_check_fetch_buf(const struct ublk_device *ub, __u64 buf_addr)
{
- if (ublk_need_map_io(ubq)) {
+ if (ublk_dev_need_map_io(ub)) {
/*
* FETCH_RQ has to provide IO buffer if NEED GET
* DATA is not enabled
*/
- if (!buf_addr && !ublk_need_get_data(ubq))
+ if (!buf_addr && !ublk_dev_need_get_data(ub))
return -EINVAL;
} else if (buf_addr) {
/* User copy requires addr to be unset */
@@ -2245,55 +2233,56 @@ static int ublk_check_fetch_buf(const struct ublk_queue *ubq, __u64 buf_addr)
return 0;
}
-static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_queue *ubq,
- struct ublk_io *io, __u64 buf_addr)
+static int __ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
+ struct ublk_io *io)
{
- struct ublk_device *ub = ubq->dev;
- int ret = 0;
-
- /*
- * When handling FETCH command for setting up ublk uring queue,
- * ub->mutex is the innermost lock, and we won't block for handling
- * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
- */
- mutex_lock(&ub->mutex);
- /* UBLK_IO_FETCH_REQ is only allowed before queue is setup */
- if (ublk_queue_ready(ubq)) {
- ret = -EBUSY;
- goto out;
- }
+ /* UBLK_IO_FETCH_REQ is only allowed before dev is setup */
+ if (ublk_dev_ready(ub))
+ return -EBUSY;
/* allow each command to be FETCHed at most once */
- if (io->flags & UBLK_IO_FLAG_ACTIVE) {
- ret = -EINVAL;
- goto out;
- }
+ if (io->flags & UBLK_IO_FLAG_ACTIVE)
+ return -EINVAL;
WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV);
ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, buf_addr, NULL);
- if (ret)
- goto out;
WRITE_ONCE(io->task, get_task_struct(current));
- ublk_mark_io_ready(ub, ubq);
-out:
+ ublk_mark_io_ready(ub);
+
+ return 0;
+}
+
+static int ublk_fetch(struct io_uring_cmd *cmd, struct ublk_device *ub,
+ struct ublk_io *io, __u64 buf_addr)
+{
+ int ret;
+
+ /*
+ * When handling FETCH command for setting up ublk uring queue,
+ * ub->mutex is the innermost lock, and we won't block for handling
+ * FETCH, so it is fine even for IO_URING_F_NONBLOCK.
+ */
+ mutex_lock(&ub->mutex);
+ ret = __ublk_fetch(cmd, ub, io);
+ if (!ret)
+ ret = ublk_config_io_buf(ub, io, cmd, buf_addr, NULL);
mutex_unlock(&ub->mutex);
return ret;
}
-static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq,
+static int ublk_check_commit_and_fetch(const struct ublk_device *ub,
struct ublk_io *io, __u64 buf_addr)
{
struct request *req = io->req;
- if (ublk_need_map_io(ubq)) {
+ if (ublk_dev_need_map_io(ub)) {
/*
* COMMIT_AND_FETCH_REQ has to provide IO buffer if
* NEED GET DATA is not enabled or it is Read IO.
*/
- if (!buf_addr && (!ublk_need_get_data(ubq) ||
+ if (!buf_addr && (!ublk_dev_need_get_data(ub) ||
req_op(req) == REQ_OP_READ))
return -EINVAL;
} else if (req_op(req) != REQ_OP_ZONE_APPEND && buf_addr) {
@@ -2307,10 +2296,10 @@ static int ublk_check_commit_and_fetch(const struct ublk_queue *ubq,
return 0;
}
-static bool ublk_need_complete_req(const struct ublk_queue *ubq,
+static bool ublk_need_complete_req(const struct ublk_device *ub,
struct ublk_io *io)
{
- if (ublk_need_req_ref(ubq))
+ if (ublk_dev_need_req_ref(ub))
return ublk_sub_req_ref(io);
return true;
}
@@ -2325,7 +2314,7 @@ static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
*/
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
/* update iod->addr because ublksrv may have passed a new io buffer */
- ublk_get_iod(ubq, req->tag)->addr = io->addr;
+ ublk_get_iod(ubq, req->tag)->addr = io->buf.addr;
pr_devel("%s: update iod->addr: qid %d tag %d io_flags %x addr %llx\n",
__func__, ubq->q_id, req->tag, io->flags,
ublk_get_iod(ubq, req->tag)->addr);
@@ -2333,23 +2322,28 @@ static bool ublk_get_data(const struct ublk_queue *ubq, struct ublk_io *io,
return ublk_start_io(ubq, req, io);
}
-static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
- unsigned int issue_flags,
- const struct ublksrv_io_cmd *ub_cmd)
+static int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
{
+ /* May point to userspace-mapped memory */
+ const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
u16 buf_idx = UBLK_INVALID_BUF_IDX;
struct ublk_device *ub = cmd->file->private_data;
struct ublk_queue *ubq;
- struct ublk_io *io;
+ struct ublk_io *io = NULL;
u32 cmd_op = cmd->cmd_op;
- unsigned tag = ub_cmd->tag;
+ u16 q_id = READ_ONCE(ub_src->q_id);
+ u16 tag = READ_ONCE(ub_src->tag);
+ s32 result = READ_ONCE(ub_src->result);
+ u64 addr = READ_ONCE(ub_src->addr); /* unioned with zone_append_lba */
struct request *req;
int ret;
bool compl;
+ WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
+
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
- __func__, cmd->cmd_op, ub_cmd->q_id, tag,
- ub_cmd->result);
+ __func__, cmd->cmd_op, q_id, tag, result);
ret = ublk_check_cmd_op(cmd_op);
if (ret)
@@ -2360,25 +2354,24 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* so no need to validate the q_id, tag, or task
*/
if (_IOC_NR(cmd_op) == UBLK_IO_UNREGISTER_IO_BUF)
- return ublk_unregister_io_buf(cmd, ub, ub_cmd->addr,
- issue_flags);
+ return ublk_unregister_io_buf(cmd, ub, addr, issue_flags);
ret = -EINVAL;
- if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues)
+ if (q_id >= ub->dev_info.nr_hw_queues)
goto out;
- ubq = ublk_get_queue(ub, ub_cmd->q_id);
+ ubq = ublk_get_queue(ub, q_id);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
goto out;
io = &ubq->ios[tag];
/* UBLK_IO_FETCH_REQ can be handled on any task, which sets io->task */
if (unlikely(_IOC_NR(cmd_op) == UBLK_IO_FETCH_REQ)) {
- ret = ublk_check_fetch_buf(ubq, ub_cmd->addr);
+ ret = ublk_check_fetch_buf(ub, addr);
if (ret)
goto out;
- ret = ublk_fetch(cmd, ubq, io, ub_cmd->addr);
+ ret = ublk_fetch(cmd, ub, io, addr);
if (ret)
goto out;
@@ -2392,8 +2385,8 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* so can be handled on any task
*/
if (_IOC_NR(cmd_op) == UBLK_IO_REGISTER_IO_BUF)
- return ublk_register_io_buf(cmd, ubq, io, ub_cmd->addr,
- issue_flags);
+ return ublk_register_io_buf(cmd, ub, q_id, tag, io,
+ addr, issue_flags);
goto out;
}
@@ -2414,24 +2407,24 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
switch (_IOC_NR(cmd_op)) {
case UBLK_IO_REGISTER_IO_BUF:
- return ublk_daemon_register_io_buf(cmd, ubq, io, ub_cmd->addr,
+ return ublk_daemon_register_io_buf(cmd, ub, q_id, tag, io, addr,
issue_flags);
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- ret = ublk_check_commit_and_fetch(ubq, io, ub_cmd->addr);
+ ret = ublk_check_commit_and_fetch(ub, io, addr);
if (ret)
goto out;
- io->res = ub_cmd->result;
+ io->res = result;
req = ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, &buf_idx);
- compl = ublk_need_complete_req(ubq, io);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, &buf_idx);
+ compl = ublk_need_complete_req(ub, io);
/* can't touch 'ublk_io' any more */
if (buf_idx != UBLK_INVALID_BUF_IDX)
io_buffer_unregister_bvec(cmd, buf_idx, issue_flags);
if (req_op(req) == REQ_OP_ZONE_APPEND)
- req->__sector = ub_cmd->zone_append_lba;
+ req->__sector = addr;
if (compl)
- __ublk_complete_rq(req);
+ __ublk_complete_rq(req, io, ublk_dev_need_map_io(ub));
if (ret)
goto out;
@@ -2443,7 +2436,7 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
* request
*/
req = ublk_fill_io_cmd(io, cmd);
- ret = ublk_config_io_buf(ubq, io, cmd, ub_cmd->addr, NULL);
+ ret = ublk_config_io_buf(ub, io, cmd, addr, NULL);
WARN_ON_ONCE(ret);
if (likely(ublk_get_data(ubq, io, req))) {
__ublk_prep_compl_io_cmd(io, req);
@@ -2458,21 +2451,20 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd,
out:
pr_devel("%s: complete: cmd op %d, tag %d ret %x io_flags %x\n",
- __func__, cmd_op, tag, ret, io->flags);
+ __func__, cmd_op, tag, ret, io ? io->flags : 0);
return ret;
}
static inline struct request *__ublk_check_and_get_req(struct ublk_device *ub,
- const struct ublk_queue *ubq, struct ublk_io *io, size_t offset)
+ u16 q_id, u16 tag, struct ublk_io *io, size_t offset)
{
- unsigned tag = io - ubq->ios;
struct request *req;
/*
* can't use io->req in case of concurrent UBLK_IO_COMMIT_AND_FETCH_REQ,
* which would overwrite it with io->cmd
*/
- req = blk_mq_tag_to_rq(ub->tag_set.tags[ubq->q_id], tag);
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag);
if (!req)
return NULL;
@@ -2494,33 +2486,14 @@ fail_put:
return NULL;
}
-static inline int ublk_ch_uring_cmd_local(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
-{
- /*
- * Not necessary for async retry, but let's keep it simple and always
- * copy the values to avoid any potential reuse.
- */
- const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe);
- const struct ublksrv_io_cmd ub_cmd = {
- .q_id = READ_ONCE(ub_src->q_id),
- .tag = READ_ONCE(ub_src->tag),
- .result = READ_ONCE(ub_src->result),
- .addr = READ_ONCE(ub_src->addr)
- };
-
- WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED);
-
- return __ublk_ch_uring_cmd(cmd, issue_flags, &ub_cmd);
-}
-
-static void ublk_ch_uring_cmd_cb(struct io_uring_cmd *cmd,
- unsigned int issue_flags)
+static void ublk_ch_uring_cmd_cb(struct io_tw_req tw_req, io_tw_token_t tw)
{
+ unsigned int issue_flags = IO_URING_CMD_TASK_WORK_ISSUE_FLAGS;
+ struct io_uring_cmd *cmd = io_uring_cmd_from_tw(tw_req);
int ret = ublk_ch_uring_cmd_local(cmd, issue_flags);
if (ret != -EIOCBQUEUED)
- io_uring_cmd_done(cmd, ret, 0, issue_flags);
+ io_uring_cmd_done(cmd, ret, issue_flags);
}
static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
@@ -2566,9 +2539,6 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
size_t buf_off;
u16 tag, q_id;
- if (!ub)
- return ERR_PTR(-EACCES);
-
if (!user_backed_iter(iter))
return ERR_PTR(-EACCES);
@@ -2583,23 +2553,17 @@ static struct request *ublk_check_and_get_req(struct kiocb *iocb,
return ERR_PTR(-EINVAL);
ubq = ublk_get_queue(ub, q_id);
- if (!ubq)
- return ERR_PTR(-EINVAL);
-
- if (!ublk_support_user_copy(ubq))
+ if (!ublk_dev_support_user_copy(ub))
return ERR_PTR(-EACCES);
- if (tag >= ubq->q_depth)
+ if (tag >= ub->dev_info.queue_depth)
return ERR_PTR(-EINVAL);
*io = &ubq->ios[tag];
- req = __ublk_check_and_get_req(ub, ubq, *io, buf_off);
+ req = __ublk_check_and_get_req(ub, q_id, tag, *io, buf_off);
if (!req)
return ERR_PTR(-EINVAL);
- if (!req->mq_hctx || !req->mq_hctx->driver_data)
- goto fail;
-
if (!ublk_check_ubuf_dir(req, dir))
goto fail;
@@ -2656,9 +2620,13 @@ static const struct file_operations ublk_ch_fops = {
static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
{
- int size = ublk_queue_cmd_buf_size(ub, q_id);
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
- int i;
+ struct ublk_queue *ubq = ub->queues[q_id];
+ int size, i;
+
+ if (!ubq)
+ return;
+
+ size = ublk_queue_cmd_buf_size(ub);
for (i = 0; i < ubq->q_depth; i++) {
struct ublk_io *io = &ubq->ios[i];
@@ -2670,57 +2638,76 @@ static void ublk_deinit_queue(struct ublk_device *ub, int q_id)
if (ubq->io_cmd_buf)
free_pages((unsigned long)ubq->io_cmd_buf, get_order(size));
+
+ kvfree(ubq);
+ ub->queues[q_id] = NULL;
+}
+
+static int ublk_get_queue_numa_node(struct ublk_device *ub, int q_id)
+{
+ unsigned int cpu;
+
+ /* Find first CPU mapped to this queue */
+ for_each_possible_cpu(cpu) {
+ if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[cpu] == q_id)
+ return cpu_to_node(cpu);
+ }
+
+ return NUMA_NO_NODE;
}
static int ublk_init_queue(struct ublk_device *ub, int q_id)
{
- struct ublk_queue *ubq = ublk_get_queue(ub, q_id);
+ int depth = ub->dev_info.queue_depth;
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
- void *ptr;
+ struct ublk_queue *ubq;
+ struct page *page;
+ int numa_node;
int size;
+ /* Determine NUMA node based on queue's CPU affinity */
+ numa_node = ublk_get_queue_numa_node(ub, q_id);
+
+ /* Allocate queue structure on local NUMA node */
+ ubq = kvzalloc_node(struct_size(ubq, ios, depth), GFP_KERNEL,
+ numa_node);
+ if (!ubq)
+ return -ENOMEM;
+
spin_lock_init(&ubq->cancel_lock);
ubq->flags = ub->dev_info.flags;
ubq->q_id = q_id;
- ubq->q_depth = ub->dev_info.queue_depth;
- size = ublk_queue_cmd_buf_size(ub, q_id);
+ ubq->q_depth = depth;
+ size = ublk_queue_cmd_buf_size(ub);
- ptr = (void *) __get_free_pages(gfp_flags, get_order(size));
- if (!ptr)
+ /* Allocate I/O command buffer on local NUMA node */
+ page = alloc_pages_node(numa_node, gfp_flags, get_order(size));
+ if (!page) {
+ kvfree(ubq);
return -ENOMEM;
+ }
+ ubq->io_cmd_buf = page_address(page);
- ubq->io_cmd_buf = ptr;
+ ub->queues[q_id] = ubq;
ubq->dev = ub;
return 0;
}
static void ublk_deinit_queues(struct ublk_device *ub)
{
- int nr_queues = ub->dev_info.nr_hw_queues;
int i;
- if (!ub->__queues)
- return;
-
- for (i = 0; i < nr_queues; i++)
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
ublk_deinit_queue(ub, i);
- kvfree(ub->__queues);
}
static int ublk_init_queues(struct ublk_device *ub)
{
- int nr_queues = ub->dev_info.nr_hw_queues;
- int depth = ub->dev_info.queue_depth;
- int ubq_size = sizeof(struct ublk_queue) + depth * sizeof(struct ublk_io);
- int i, ret = -ENOMEM;
-
- ub->queue_size = ubq_size;
- ub->__queues = kvcalloc(nr_queues, ubq_size, GFP_KERNEL);
- if (!ub->__queues)
- return ret;
+ int i, ret;
- for (i = 0; i < nr_queues; i++) {
- if (ublk_init_queue(ub, i))
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ ret = ublk_init_queue(ub, i);
+ if (ret)
goto fail;
}
@@ -3122,7 +3109,7 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
goto out_unlock;
ret = -ENOMEM;
- ub = kzalloc(sizeof(*ub), GFP_KERNEL);
+ ub = kzalloc(struct_size(ub, queues, info.nr_hw_queues), GFP_KERNEL);
if (!ub)
goto out_unlock;
mutex_init(&ub->mutex);
@@ -3172,17 +3159,17 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ub->dev_info.nr_hw_queues, nr_cpu_ids);
ublk_align_max_io_size(ub);
- ret = ublk_init_queues(ub);
+ ret = ublk_add_tag_set(ub);
if (ret)
goto out_free_dev_number;
- ret = ublk_add_tag_set(ub);
+ ret = ublk_init_queues(ub);
if (ret)
- goto out_deinit_queues;
+ goto out_free_tag_set;
ret = -EFAULT;
if (copy_to_user(argp, &ub->dev_info, sizeof(info)))
- goto out_free_tag_set;
+ goto out_deinit_queues;
/*
* Add the char dev so that ublksrv daemon can be setup.
@@ -3191,10 +3178,10 @@ static int ublk_ctrl_add_dev(const struct ublksrv_ctrl_cmd *header)
ret = ublk_add_chdev(ub);
goto out_unlock;
-out_free_tag_set:
- blk_mq_free_tag_set(&ub->tag_set);
out_deinit_queues:
ublk_deinit_queues(ub);
+out_free_tag_set:
+ blk_mq_free_tag_set(&ub->tag_set);
out_free_dev_number:
ublk_free_dev_number(ub);
out_free_ub:
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e649fa67bac1..357434bdae99 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -584,7 +584,8 @@ out:
static int virtblk_parse_zone(struct virtio_blk *vblk,
struct virtio_blk_zone_descriptor *entry,
- unsigned int idx, report_zones_cb cb, void *data)
+ unsigned int idx,
+ struct blk_report_zones_args *args)
{
struct blk_zone zone = { };
@@ -650,12 +651,12 @@ static int virtblk_parse_zone(struct virtio_blk *vblk,
* The callback below checks the validity of the reported
* entry data, no need to further validate it here.
*/
- return cb(&zone, idx, data);
+ return disk_report_zone(vblk->disk, &zone, idx, args);
}
static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb,
- void *data)
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args)
{
struct virtio_blk *vblk = disk->private_data;
struct virtio_blk_zone_report *report;
@@ -693,7 +694,7 @@ static int virtblk_report_zones(struct gendisk *disk, sector_t sector,
for (i = 0; i < nz && zone_idx < nr_zones; i++) {
ret = virtblk_parse_zone(vblk, &report->zones[i],
- zone_idx, cb, data);
+ zone_idx, args);
if (ret)
goto fail_report;
@@ -829,9 +830,9 @@ out:
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
-static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
+static int virtblk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct virtio_blk *vblk = bd->bd_disk->private_data;
+ struct virtio_blk *vblk = disk->private_data;
int ret = 0;
mutex_lock(&vblk->vdev_mutex);
@@ -853,7 +854,7 @@ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
/* some standard values, similar to sd */
geo->heads = 1 << 6;
geo->sectors = 1 << 5;
- geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+ geo->cylinders = get_capacity(disk) >> 11;
}
out:
mutex_unlock(&vblk->vdev_mutex);
@@ -1026,8 +1027,13 @@ static int init_vq(struct virtio_blk *vblk)
out:
kfree(vqs);
kfree(vqs_info);
- if (err)
+ if (err) {
kfree(vblk->vqs);
+ /*
+ * Set to NULL to prevent freeing vqs again during freezing.
+ */
+ vblk->vqs = NULL;
+ }
return err;
}
@@ -1598,6 +1604,12 @@ static int virtblk_freeze_priv(struct virtio_device *vdev)
vdev->config->del_vqs(vdev);
kfree(vblk->vqs);
+ /*
+ * Set to NULL to prevent freeing vqs again after a failed vqs
+ * allocation during resume. Note that kfree() already handles NULL
+ * pointers safely.
+ */
+ vblk->vqs = NULL;
return 0;
}
@@ -1682,7 +1694,7 @@ static int __init virtio_blk_init(void)
{
int error;
- virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
+ virtblk_wq = alloc_workqueue("virtio-blk", WQ_PERCPU, 0);
if (!virtblk_wq)
return -ENOMEM;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 5babe575c288..04fc6b552c04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -493,11 +493,11 @@ static void blkif_restart_queue_callback(void *arg)
schedule_work(&rinfo->work);
}
-static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+static int blkif_getgeo(struct gendisk *disk, struct hd_geometry *hg)
{
/* We don't have real geometry info, but let's at least return
values consistent with the size of the device */
- sector_t nsect = get_capacity(bd->bd_disk);
+ sector_t nsect = get_capacity(disk);
sector_t cylinders = nsect;
hg->heads = 0xff;
diff --git a/drivers/block/zloop.c b/drivers/block/zloop.c
index a423228e201b..77bd6081b244 100644
--- a/drivers/block/zloop.c
+++ b/drivers/block/zloop.c
@@ -32,6 +32,8 @@ enum {
ZLOOP_OPT_NR_QUEUES = (1 << 6),
ZLOOP_OPT_QUEUE_DEPTH = (1 << 7),
ZLOOP_OPT_BUFFERED_IO = (1 << 8),
+ ZLOOP_OPT_ZONE_APPEND = (1 << 9),
+ ZLOOP_OPT_ORDERED_ZONE_APPEND = (1 << 10),
};
static const match_table_t zloop_opt_tokens = {
@@ -44,6 +46,8 @@ static const match_table_t zloop_opt_tokens = {
{ ZLOOP_OPT_NR_QUEUES, "nr_queues=%u" },
{ ZLOOP_OPT_QUEUE_DEPTH, "queue_depth=%u" },
{ ZLOOP_OPT_BUFFERED_IO, "buffered_io" },
+ { ZLOOP_OPT_ZONE_APPEND, "zone_append=%u" },
+ { ZLOOP_OPT_ORDERED_ZONE_APPEND, "ordered_zone_append" },
{ ZLOOP_OPT_ERR, NULL }
};
@@ -56,6 +60,8 @@ static const match_table_t zloop_opt_tokens = {
#define ZLOOP_DEF_NR_QUEUES 1
#define ZLOOP_DEF_QUEUE_DEPTH 128
#define ZLOOP_DEF_BUFFERED_IO false
+#define ZLOOP_DEF_ZONE_APPEND true
+#define ZLOOP_DEF_ORDERED_ZONE_APPEND false
/* Arbitrary limit on the zone size (16GB). */
#define ZLOOP_MAX_ZONE_SIZE_MB 16384
@@ -71,6 +77,8 @@ struct zloop_options {
unsigned int nr_queues;
unsigned int queue_depth;
bool buffered_io;
+ bool zone_append;
+ bool ordered_zone_append;
};
/*
@@ -92,6 +100,7 @@ struct zloop_zone {
unsigned long flags;
struct mutex lock;
+ spinlock_t wp_lock;
enum blk_zone_cond cond;
sector_t start;
sector_t wp;
@@ -108,6 +117,8 @@ struct zloop_device {
struct workqueue_struct *workqueue;
bool buffered_io;
+ bool zone_append;
+ bool ordered_zone_append;
const char *base_dir;
struct file *data_dir;
@@ -147,6 +158,7 @@ static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
struct zloop_zone *zone = &zlo->zones[zone_no];
struct kstat stat;
sector_t file_sectors;
+ unsigned long flags;
int ret;
lockdep_assert_held(&zone->lock);
@@ -172,16 +184,18 @@ static int zloop_update_seq_zone(struct zloop_device *zlo, unsigned int zone_no)
return -EINVAL;
}
+ spin_lock_irqsave(&zone->wp_lock, flags);
if (!file_sectors) {
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
} else if (file_sectors == zlo->zone_capacity) {
zone->cond = BLK_ZONE_COND_FULL;
- zone->wp = zone->start + zlo->zone_size;
+ zone->wp = ULLONG_MAX;
} else {
zone->cond = BLK_ZONE_COND_CLOSED;
zone->wp = zone->start + file_sectors;
}
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
return 0;
}
@@ -225,6 +239,7 @@ unlock:
static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
{
struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
int ret = 0;
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -243,10 +258,12 @@ static int zloop_close_zone(struct zloop_device *zlo, unsigned int zone_no)
break;
case BLK_ZONE_COND_IMP_OPEN:
case BLK_ZONE_COND_EXP_OPEN:
+ spin_lock_irqsave(&zone->wp_lock, flags);
if (zone->wp == zone->start)
zone->cond = BLK_ZONE_COND_EMPTY;
else
zone->cond = BLK_ZONE_COND_CLOSED;
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
break;
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_FULL:
@@ -264,6 +281,7 @@ unlock:
static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
{
struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
int ret = 0;
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -281,9 +299,11 @@ static int zloop_reset_zone(struct zloop_device *zlo, unsigned int zone_no)
goto unlock;
}
+ spin_lock_irqsave(&zone->wp_lock, flags);
zone->cond = BLK_ZONE_COND_EMPTY;
zone->wp = zone->start;
clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
unlock:
mutex_unlock(&zone->lock);
@@ -308,6 +328,7 @@ static int zloop_reset_all_zones(struct zloop_device *zlo)
static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
{
struct zloop_zone *zone = &zlo->zones[zone_no];
+ unsigned long flags;
int ret = 0;
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
@@ -325,9 +346,11 @@ static int zloop_finish_zone(struct zloop_device *zlo, unsigned int zone_no)
goto unlock;
}
+ spin_lock_irqsave(&zone->wp_lock, flags);
zone->cond = BLK_ZONE_COND_FULL;
- zone->wp = zone->start + zlo->zone_size;
+ zone->wp = ULLONG_MAX;
clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
unlock:
mutex_unlock(&zone->lock);
@@ -369,8 +392,9 @@ static void zloop_rw(struct zloop_cmd *cmd)
struct zloop_zone *zone;
struct iov_iter iter;
struct bio_vec tmp;
+ unsigned long flags;
sector_t zone_end;
- int nr_bvec = 0;
+ unsigned int nr_bvec;
int ret;
atomic_set(&cmd->ref, 2);
@@ -378,6 +402,11 @@ static void zloop_rw(struct zloop_cmd *cmd)
cmd->nr_sectors = nr_sectors;
cmd->ret = 0;
+ if (WARN_ON_ONCE(is_append && !zlo->zone_append)) {
+ ret = -EIO;
+ goto out;
+ }
+
/* We should never get an I/O beyond the device capacity. */
if (WARN_ON_ONCE(zone_no >= zlo->nr_zones)) {
ret = -EIO;
@@ -406,16 +435,31 @@ static void zloop_rw(struct zloop_cmd *cmd)
if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
mutex_lock(&zone->lock);
- if (is_append) {
- sector = zone->wp;
- cmd->sector = sector;
- }
+ spin_lock_irqsave(&zone->wp_lock, flags);
/*
- * Write operations must be aligned to the write pointer and
- * fully contained within the zone capacity.
+ * Zone append operations always go at the current write
+ * pointer, but regular write operations must already be
+ * aligned to the write pointer when submitted.
*/
- if (sector != zone->wp || zone->wp + nr_sectors > zone_end) {
+ if (is_append) {
+ /*
+ * If ordered zone append is in use, we already checked
+ * and set the target sector in zloop_queue_rq().
+ */
+ if (!zlo->ordered_zone_append) {
+ if (zone->cond == BLK_ZONE_COND_FULL ||
+ zone->wp + nr_sectors > zone_end) {
+ spin_unlock_irqrestore(&zone->wp_lock,
+ flags);
+ ret = -EIO;
+ goto unlock;
+ }
+ sector = zone->wp;
+ }
+ cmd->sector = sector;
+ } else if (sector != zone->wp) {
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
pr_err("Zone %u: unaligned write: sect %llu, wp %llu\n",
zone_no, sector, zone->wp);
ret = -EIO;
@@ -428,17 +472,22 @@ static void zloop_rw(struct zloop_cmd *cmd)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
/*
- * Advance the write pointer of sequential zones. If the write
- * fails, the wp position will be corrected when the next I/O
- * copmpletes.
+ * Advance the write pointer, unless ordered zone append is in
+ * use. If the write fails, the write pointer position will be
+ * corrected when the next I/O starts execution.
*/
- zone->wp += nr_sectors;
- if (zone->wp == zone_end)
- zone->cond = BLK_ZONE_COND_FULL;
+ if (!is_append || !zlo->ordered_zone_append) {
+ zone->wp += nr_sectors;
+ if (zone->wp == zone_end) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = ULLONG_MAX;
+ }
+ }
+
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
}
- rq_for_each_bvec(tmp, rq, rq_iter)
- nr_bvec++;
+ nr_bvec = blk_rq_nr_bvec(rq);
if (rq->bio != rq->biotail) {
struct bio_vec *bvec;
@@ -498,6 +547,10 @@ static void zloop_handle_cmd(struct zloop_cmd *cmd)
struct request *rq = blk_mq_rq_from_pdu(cmd);
struct zloop_device *zlo = rq->q->queuedata;
+ /* We can block in this context, so ignore REQ_NOWAIT. */
+ if (rq->cmd_flags & REQ_NOWAIT)
+ rq->cmd_flags &= ~REQ_NOWAIT;
+
switch (req_op(rq)) {
case REQ_OP_READ:
case REQ_OP_WRITE:
@@ -608,6 +661,35 @@ static void zloop_complete_rq(struct request *rq)
blk_mq_end_request(rq, sts);
}
+static bool zloop_set_zone_append_sector(struct request *rq)
+{
+ struct zloop_device *zlo = rq->q->queuedata;
+ unsigned int zone_no = rq_zone_no(rq);
+ struct zloop_zone *zone = &zlo->zones[zone_no];
+ sector_t zone_end = zone->start + zlo->zone_capacity;
+ sector_t nr_sectors = blk_rq_sectors(rq);
+ unsigned long flags;
+
+ spin_lock_irqsave(&zone->wp_lock, flags);
+
+ if (zone->cond == BLK_ZONE_COND_FULL ||
+ zone->wp + nr_sectors > zone_end) {
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+ return false;
+ }
+
+ rq->__sector = zone->wp;
+ zone->wp += blk_rq_sectors(rq);
+ if (zone->wp >= zone_end) {
+ zone->cond = BLK_ZONE_COND_FULL;
+ zone->wp = ULLONG_MAX;
+ }
+
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
+
+ return true;
+}
+
static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -618,6 +700,16 @@ static blk_status_t zloop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (zlo->state == Zlo_deleting)
return BLK_STS_IOERR;
+ /*
+ * If we need to strongly order zone append operations, set the request
+ * sector to the zone write pointer location now instead of when the
+ * command work runs.
+ */
+ if (zlo->ordered_zone_append && req_op(rq) == REQ_OP_ZONE_APPEND) {
+ if (!zloop_set_zone_append_sector(rq))
+ return BLK_STS_IOERR;
+ }
+
blk_mq_start_request(rq);
INIT_WORK(&cmd->work, zloop_cmd_workfn);
@@ -647,11 +739,12 @@ static int zloop_open(struct gendisk *disk, blk_mode_t mode)
}
static int zloop_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones, struct blk_report_zones_args *args)
{
struct zloop_device *zlo = disk->private_data;
struct blk_zone blkz = {};
unsigned int first, i;
+ unsigned long flags;
int ret;
first = disk_zone_no(disk, sector);
@@ -675,7 +768,9 @@ static int zloop_report_zones(struct gendisk *disk, sector_t sector,
blkz.start = zone->start;
blkz.len = zlo->zone_size;
+ spin_lock_irqsave(&zone->wp_lock, flags);
blkz.wp = zone->wp;
+ spin_unlock_irqrestore(&zone->wp_lock, flags);
blkz.cond = zone->cond;
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
blkz.type = BLK_ZONE_TYPE_CONVENTIONAL;
@@ -687,7 +782,7 @@ static int zloop_report_zones(struct gendisk *disk, sector_t sector,
mutex_unlock(&zone->lock);
- ret = cb(&blkz, i, data);
+ ret = disk_report_zone(disk, &blkz, i, args);
if (ret)
return ret;
}
@@ -783,6 +878,7 @@ static int zloop_init_zone(struct zloop_device *zlo, struct zloop_options *opts,
int ret;
mutex_init(&zone->lock);
+ spin_lock_init(&zone->wp_lock);
zone->start = (sector_t)zone_no << zlo->zone_shift;
if (!restore)
@@ -884,7 +980,6 @@ static int zloop_ctl_add(struct zloop_options *opts)
{
struct queue_limits lim = {
.max_hw_sectors = SZ_1M >> SECTOR_SHIFT,
- .max_hw_zone_append_sectors = SZ_1M >> SECTOR_SHIFT,
.chunk_sectors = opts->zone_size,
.features = BLK_FEAT_ZONED,
};
@@ -936,6 +1031,9 @@ static int zloop_ctl_add(struct zloop_options *opts)
zlo->nr_zones = nr_zones;
zlo->nr_conv_zones = opts->nr_conv_zones;
zlo->buffered_io = opts->buffered_io;
+ zlo->zone_append = opts->zone_append;
+ if (zlo->zone_append)
+ zlo->ordered_zone_append = opts->ordered_zone_append;
zlo->workqueue = alloc_workqueue("zloop%d", WQ_UNBOUND | WQ_FREEZABLE,
opts->nr_queues * opts->queue_depth, zlo->id);
@@ -976,6 +1074,8 @@ static int zloop_ctl_add(struct zloop_options *opts)
lim.physical_block_size = zlo->block_size;
lim.logical_block_size = zlo->block_size;
+ if (zlo->zone_append)
+ lim.max_hw_zone_append_sectors = lim.max_hw_sectors;
zlo->tag_set.ops = &zloop_mq_ops;
zlo->tag_set.nr_hw_queues = opts->nr_queues;
@@ -1016,10 +1116,14 @@ static int zloop_ctl_add(struct zloop_options *opts)
zlo->state = Zlo_live;
mutex_unlock(&zloop_ctl_mutex);
- pr_info("Added device %d: %u zones of %llu MB, %u B block size\n",
+ pr_info("zloop: device %d, %u zones of %llu MiB, %u B block size\n",
zlo->id, zlo->nr_zones,
((sector_t)zlo->zone_size << SECTOR_SHIFT) >> 20,
zlo->block_size);
+ pr_info("zloop%d: using %s%s zone append\n",
+ zlo->id,
+ zlo->ordered_zone_append ? "ordered " : "",
+ zlo->zone_append ? "native" : "emulated");
return 0;
@@ -1106,6 +1210,8 @@ static int zloop_parse_options(struct zloop_options *opts, const char *buf)
opts->nr_queues = ZLOOP_DEF_NR_QUEUES;
opts->queue_depth = ZLOOP_DEF_QUEUE_DEPTH;
opts->buffered_io = ZLOOP_DEF_BUFFERED_IO;
+ opts->zone_append = ZLOOP_DEF_ZONE_APPEND;
+ opts->ordered_zone_append = ZLOOP_DEF_ORDERED_ZONE_APPEND;
if (!buf)
return 0;
@@ -1215,6 +1321,21 @@ static int zloop_parse_options(struct zloop_options *opts, const char *buf)
case ZLOOP_OPT_BUFFERED_IO:
opts->buffered_io = true;
break;
+ case ZLOOP_OPT_ZONE_APPEND:
+ if (match_uint(args, &token)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (token != 0 && token != 1) {
+ pr_err("Invalid zone_append value\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ opts->zone_append = token;
+ break;
+ case ZLOOP_OPT_ORDERED_ZONE_APPEND:
+ opts->ordered_zone_append = true;
+ break;
case ZLOOP_OPT_ERR:
default:
pr_warn("unknown parameter or missing value '%s'\n", p);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 8acad3cc6e6e..5759823d6314 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -500,8 +500,31 @@ out:
}
#ifdef CONFIG_ZRAM_WRITEBACK
+#define INVALID_BDEV_BLOCK (~0UL)
+
+struct zram_wb_ctl {
+ /* idle list is accessed only by the writeback task, no concurency */
+ struct list_head idle_reqs;
+ /* done list is accessed concurrently, protect by done_lock */
+ struct list_head done_reqs;
+ wait_queue_head_t done_wait;
+ spinlock_t done_lock;
+ atomic_t num_inflight;
+};
+
+struct zram_wb_req {
+ unsigned long blk_idx;
+ struct page *page;
+ struct zram_pp_slot *pps;
+ struct bio_vec bio_vec;
+ struct bio bio;
+
+ struct list_head entry;
+};
+
static ssize_t writeback_limit_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
@@ -510,33 +533,31 @@ static ssize_t writeback_limit_enable_store(struct device *dev,
if (kstrtoull(buf, 10, &val))
return ret;
- down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
+ down_write(&zram->init_lock);
zram->wb_limit_enable = val;
- spin_unlock(&zram->wb_limit_lock);
- up_read(&zram->init_lock);
+ up_write(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_enable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
bool val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->wb_limit_enable;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return sysfs_emit(buf, "%d\n", val);
}
static ssize_t writeback_limit_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
u64 val;
@@ -545,31 +566,71 @@ static ssize_t writeback_limit_store(struct device *dev,
if (kstrtoull(buf, 10, &val))
return ret;
- down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
+ /*
+ * When the page size is greater than 4KB, if bd_wb_limit is set to
+ * a value that is not page - size aligned, it will cause value
+ * wrapping. For example, when the page size is set to 16KB and
+ * bd_wb_limit is set to 3, a single write - back operation will
+ * cause bd_wb_limit to become -1. Even more terrifying is that
+ * bd_wb_limit is an unsigned number.
+ */
+ val = rounddown(val, PAGE_SIZE / 4096);
+
+ down_write(&zram->init_lock);
zram->bd_wb_limit = val;
- spin_unlock(&zram->wb_limit_lock);
- up_read(&zram->init_lock);
+ up_write(&zram->init_lock);
ret = len;
return ret;
}
static ssize_t writeback_limit_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u64 val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->bd_wb_limit;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return sysfs_emit(buf, "%llu\n", val);
}
+static ssize_t writeback_batch_size_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct zram *zram = dev_to_zram(dev);
+ u32 val;
+
+ if (kstrtouint(buf, 10, &val))
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ down_write(&zram->init_lock);
+ zram->wb_batch_size = val;
+ up_write(&zram->init_lock);
+
+ return len;
+}
+
+static ssize_t writeback_batch_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 val;
+ struct zram *zram = dev_to_zram(dev);
+
+ down_read(&zram->init_lock);
+ val = zram->wb_batch_size;
+ up_read(&zram->init_lock);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
static void reset_bdev(struct zram *zram)
{
if (!zram->backing_dev)
@@ -697,23 +758,20 @@ out:
return err;
}
-static unsigned long alloc_block_bdev(struct zram *zram)
+static unsigned long zram_reserve_bdev_block(struct zram *zram)
{
- unsigned long blk_idx = 1;
-retry:
- /* skip 0 bit to confuse zram.handle = 0 */
- blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, blk_idx);
- if (blk_idx == zram->nr_pages)
- return 0;
+ unsigned long blk_idx;
- if (test_and_set_bit(blk_idx, zram->bitmap))
- goto retry;
+ blk_idx = find_next_zero_bit(zram->bitmap, zram->nr_pages, 0);
+ if (blk_idx == zram->nr_pages)
+ return INVALID_BDEV_BLOCK;
+ set_bit(blk_idx, zram->bitmap);
atomic64_inc(&zram->stats.bd_count);
return blk_idx;
}
-static void free_block_bdev(struct zram *zram, unsigned long blk_idx)
+static void zram_release_bdev_block(struct zram *zram, unsigned long blk_idx)
{
int was_set;
@@ -734,32 +792,249 @@ static void read_from_bdev_async(struct zram *zram, struct page *page,
submit_bio(bio);
}
-static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
+static void release_wb_req(struct zram_wb_req *req)
{
- unsigned long blk_idx = 0;
- struct page *page = NULL;
- struct zram_pp_slot *pps;
- struct bio_vec bio_vec;
- struct bio bio;
+ __free_page(req->page);
+ kfree(req);
+}
+
+static void release_wb_ctl(struct zram_wb_ctl *wb_ctl)
+{
+ if (!wb_ctl)
+ return;
+
+ /* We should never have inflight requests at this point */
+ WARN_ON(atomic_read(&wb_ctl->num_inflight));
+ WARN_ON(!list_empty(&wb_ctl->done_reqs));
+
+ while (!list_empty(&wb_ctl->idle_reqs)) {
+ struct zram_wb_req *req;
+
+ req = list_first_entry(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ list_del(&req->entry);
+ release_wb_req(req);
+ }
+
+ kfree(wb_ctl);
+}
+
+static struct zram_wb_ctl *init_wb_ctl(struct zram *zram)
+{
+ struct zram_wb_ctl *wb_ctl;
+ int i;
+
+ wb_ctl = kmalloc(sizeof(*wb_ctl), GFP_KERNEL);
+ if (!wb_ctl)
+ return NULL;
+
+ INIT_LIST_HEAD(&wb_ctl->idle_reqs);
+ INIT_LIST_HEAD(&wb_ctl->done_reqs);
+ atomic_set(&wb_ctl->num_inflight, 0);
+ init_waitqueue_head(&wb_ctl->done_wait);
+ spin_lock_init(&wb_ctl->done_lock);
+
+ for (i = 0; i < zram->wb_batch_size; i++) {
+ struct zram_wb_req *req;
+
+ /*
+ * This is fatal condition only if we couldn't allocate
+ * any requests at all. Otherwise we just work with the
+ * requests that we have successfully allocated, so that
+ * writeback can still proceed, even if there is only one
+ * request on the idle list.
+ */
+ req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_NOWARN);
+ if (!req)
+ break;
+
+ req->page = alloc_page(GFP_KERNEL | __GFP_NOWARN);
+ if (!req->page) {
+ kfree(req);
+ break;
+ }
+
+ list_add(&req->entry, &wb_ctl->idle_reqs);
+ }
+
+ /* We couldn't allocate any requests, so writeabck is not possible */
+ if (list_empty(&wb_ctl->idle_reqs))
+ goto release_wb_ctl;
+
+ return wb_ctl;
+
+release_wb_ctl:
+ release_wb_ctl(wb_ctl);
+ return NULL;
+}
+
+static void zram_account_writeback_rollback(struct zram *zram)
+{
+ lockdep_assert_held_read(&zram->init_lock);
+
+ if (zram->wb_limit_enable)
+ zram->bd_wb_limit += 1UL << (PAGE_SHIFT - 12);
+}
+
+static void zram_account_writeback_submit(struct zram *zram)
+{
+ lockdep_assert_held_read(&zram->init_lock);
+
+ if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
+ zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
+}
+
+static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
+{
+ u32 index = req->pps->index;
+ int err;
+
+ err = blk_status_to_errno(req->bio.bi_status);
+ if (err) {
+ /*
+ * Failed wb requests should not be accounted in wb_limit
+ * (if enabled).
+ */
+ zram_account_writeback_rollback(zram);
+ zram_release_bdev_block(zram, req->blk_idx);
+ return err;
+ }
+
+ atomic64_inc(&zram->stats.bd_writes);
+ zram_slot_lock(zram, index);
+ /*
+ * We release slot lock during writeback so slot can change under us:
+ * slot_free() or slot_free() and zram_write_page(). In both cases
+ * slot loses ZRAM_PP_SLOT flag. No concurrent post-processing can
+ * set ZRAM_PP_SLOT on such slots until current post-processing
+ * finishes.
+ */
+ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) {
+ zram_release_bdev_block(zram, req->blk_idx);
+ goto out;
+ }
+
+ zram_free_page(zram, index);
+ zram_set_flag(zram, index, ZRAM_WB);
+ zram_set_handle(zram, index, req->blk_idx);
+ atomic64_inc(&zram->stats.pages_stored);
+
+out:
+ zram_slot_unlock(zram, index);
+ return 0;
+}
+
+static void zram_writeback_endio(struct bio *bio)
+{
+ struct zram_wb_req *req = container_of(bio, struct zram_wb_req, bio);
+ struct zram_wb_ctl *wb_ctl = bio->bi_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wb_ctl->done_lock, flags);
+ list_add(&req->entry, &wb_ctl->done_reqs);
+ spin_unlock_irqrestore(&wb_ctl->done_lock, flags);
+
+ wake_up(&wb_ctl->done_wait);
+}
+
+static void zram_submit_wb_request(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl,
+ struct zram_wb_req *req)
+{
+ /*
+ * wb_limit (if enabled) should be adjusted before submission,
+ * so that we don't over-submit.
+ */
+ zram_account_writeback_submit(zram);
+ atomic_inc(&wb_ctl->num_inflight);
+ req->bio.bi_private = wb_ctl;
+ submit_bio(&req->bio);
+}
+
+static int zram_complete_done_reqs(struct zram *zram,
+ struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req;
+ unsigned long flags;
int ret = 0, err;
- u32 index;
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ while (atomic_read(&wb_ctl->num_inflight) > 0) {
+ spin_lock_irqsave(&wb_ctl->done_lock, flags);
+ req = list_first_entry_or_null(&wb_ctl->done_reqs,
+ struct zram_wb_req, entry);
+ if (req)
+ list_del(&req->entry);
+ spin_unlock_irqrestore(&wb_ctl->done_lock, flags);
+
+ /* ->num_inflight > 0 doesn't mean we have done requests */
+ if (!req)
+ break;
+
+ err = zram_writeback_complete(zram, req);
+ if (err)
+ ret = err;
+
+ atomic_dec(&wb_ctl->num_inflight);
+ release_pp_slot(zram, req->pps);
+ req->pps = NULL;
+
+ list_add(&req->entry, &wb_ctl->idle_reqs);
+ }
+
+ return ret;
+}
+
+static struct zram_wb_req *zram_select_idle_req(struct zram_wb_ctl *wb_ctl)
+{
+ struct zram_wb_req *req;
+
+ req = list_first_entry_or_null(&wb_ctl->idle_reqs,
+ struct zram_wb_req, entry);
+ if (req)
+ list_del(&req->entry);
+ return req;
+}
+
+static int zram_writeback_slots(struct zram *zram,
+ struct zram_pp_ctl *ctl,
+ struct zram_wb_ctl *wb_ctl)
+{
+ unsigned long blk_idx = INVALID_BDEV_BLOCK;
+ struct zram_wb_req *req = NULL;
+ struct zram_pp_slot *pps;
+ int ret = 0, err = 0;
+ u32 index = 0;
while ((pps = select_pp_slot(ctl))) {
- spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
- spin_unlock(&zram->wb_limit_lock);
ret = -EIO;
break;
}
- spin_unlock(&zram->wb_limit_lock);
- if (!blk_idx) {
- blk_idx = alloc_block_bdev(zram);
- if (!blk_idx) {
+ while (!req) {
+ req = zram_select_idle_req(wb_ctl);
+ if (req)
+ break;
+
+ wait_event(wb_ctl->done_wait,
+ !list_empty(&wb_ctl->done_reqs));
+
+ err = zram_complete_done_reqs(zram, wb_ctl);
+ /*
+ * BIO errors are not fatal, we continue and simply
+ * attempt to writeback the remaining objects (pages).
+ * At the same time we need to signal user-space that
+ * some writes (at least one, but also could be all of
+ * them) were not successful and we do so by returning
+ * the most recent BIO error.
+ */
+ if (err)
+ ret = err;
+ }
+
+ if (blk_idx == INVALID_BDEV_BLOCK) {
+ blk_idx = zram_reserve_bdev_block(zram);
+ if (blk_idx == INVALID_BDEV_BLOCK) {
ret = -ENOSPC;
break;
}
@@ -768,74 +1043,54 @@ static int zram_writeback_slots(struct zram *zram, struct zram_pp_ctl *ctl)
index = pps->index;
zram_slot_lock(zram, index);
/*
- * scan_slots() sets ZRAM_PP_SLOT and relases slot lock, so
+ * scan_slots() sets ZRAM_PP_SLOT and releases slot lock, so
* slots can change in the meantime. If slots are accessed or
* freed they lose ZRAM_PP_SLOT flag and hence we don't
* post-process them.
*/
if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
goto next;
- if (zram_read_from_zspool(zram, page, index))
+ if (zram_read_from_zspool(zram, req->page, index))
goto next;
zram_slot_unlock(zram, index);
- bio_init(&bio, zram->bdev, &bio_vec, 1,
- REQ_OP_WRITE | REQ_SYNC);
- bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9);
- __bio_add_page(&bio, page, PAGE_SIZE, 0);
-
/*
- * XXX: A single page IO would be inefficient for write
- * but it would be not bad as starter.
+ * From now on pp-slot is owned by the req, remove it from
+ * its pp bucket.
*/
- err = submit_bio_wait(&bio);
- if (err) {
- release_pp_slot(zram, pps);
- /*
- * BIO errors are not fatal, we continue and simply
- * attempt to writeback the remaining objects (pages).
- * At the same time we need to signal user-space that
- * some writes (at least one, but also could be all of
- * them) were not successful and we do so by returning
- * the most recent BIO error.
- */
- ret = err;
- continue;
- }
-
- atomic64_inc(&zram->stats.bd_writes);
- zram_slot_lock(zram, index);
- /*
- * Same as above, we release slot lock during writeback so
- * slot can change under us: slot_free() or slot_free() and
- * reallocation (zram_write_page()). In both cases slot loses
- * ZRAM_PP_SLOT flag. No concurrent post-processing can set
- * ZRAM_PP_SLOT on such slots until current post-processing
- * finishes.
- */
- if (!zram_test_flag(zram, index, ZRAM_PP_SLOT))
- goto next;
+ list_del_init(&pps->entry);
+
+ req->blk_idx = blk_idx;
+ req->pps = pps;
+ bio_init(&req->bio, zram->bdev, &req->bio_vec, 1, REQ_OP_WRITE);
+ req->bio.bi_iter.bi_sector = req->blk_idx * (PAGE_SIZE >> 9);
+ req->bio.bi_end_io = zram_writeback_endio;
+ __bio_add_page(&req->bio, req->page, PAGE_SIZE, 0);
+
+ zram_submit_wb_request(zram, wb_ctl, req);
+ blk_idx = INVALID_BDEV_BLOCK;
+ req = NULL;
+ cond_resched();
+ continue;
- zram_free_page(zram, index);
- zram_set_flag(zram, index, ZRAM_WB);
- zram_set_handle(zram, index, blk_idx);
- blk_idx = 0;
- atomic64_inc(&zram->stats.pages_stored);
- spin_lock(&zram->wb_limit_lock);
- if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
- zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
- spin_unlock(&zram->wb_limit_lock);
next:
zram_slot_unlock(zram, index);
release_pp_slot(zram, pps);
-
- cond_resched();
}
- if (blk_idx)
- free_block_bdev(zram, blk_idx);
- if (page)
- __free_page(page);
+ /*
+ * Selected idle req, but never submitted it due to some error or
+ * wb limit.
+ */
+ if (req)
+ release_wb_req(req);
+
+ while (atomic_read(&wb_ctl->num_inflight) > 0) {
+ wait_event(wb_ctl->done_wait, !list_empty(&wb_ctl->done_reqs));
+ err = zram_complete_done_reqs(zram, wb_ctl);
+ if (err)
+ ret = err;
+ }
return ret;
}
@@ -948,7 +1203,8 @@ static ssize_t writeback_store(struct device *dev,
struct zram *zram = dev_to_zram(dev);
u64 nr_pages = zram->disksize >> PAGE_SHIFT;
unsigned long lo = 0, hi = nr_pages;
- struct zram_pp_ctl *ctl = NULL;
+ struct zram_pp_ctl *pp_ctl = NULL;
+ struct zram_wb_ctl *wb_ctl = NULL;
char *args, *param, *val;
ssize_t ret = len;
int err, mode = 0;
@@ -970,8 +1226,14 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- ctl = init_pp_ctl();
- if (!ctl) {
+ pp_ctl = init_pp_ctl();
+ if (!pp_ctl) {
+ ret = -ENOMEM;
+ goto release_init_lock;
+ }
+
+ wb_ctl = init_wb_ctl(zram);
+ if (!wb_ctl) {
ret = -ENOMEM;
goto release_init_lock;
}
@@ -1000,7 +1262,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
break;
}
@@ -1011,7 +1273,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
break;
}
@@ -1022,7 +1284,7 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
continue;
}
@@ -1033,17 +1295,18 @@ static ssize_t writeback_store(struct device *dev,
goto release_init_lock;
}
- scan_slots_for_writeback(zram, mode, lo, hi, ctl);
+ scan_slots_for_writeback(zram, mode, lo, hi, pp_ctl);
continue;
}
}
- err = zram_writeback_slots(zram, ctl);
+ err = zram_writeback_slots(zram, pp_ctl, wb_ctl);
if (err)
ret = err;
release_init_lock:
- release_pp_ctl(zram, ctl);
+ release_pp_ctl(zram, pp_ctl);
+ release_wb_ctl(wb_ctl);
atomic_set(&zram->pp_in_progress, 0);
up_read(&zram->init_lock);
@@ -1085,7 +1348,7 @@ static int read_from_bdev_sync(struct zram *zram, struct page *page,
work.entry = entry;
INIT_WORK_ONSTACK(&work.work, zram_sync_read);
- queue_work(system_unbound_wq, &work.work);
+ queue_work(system_dfl_wq, &work.work);
flush_work(&work.work);
destroy_work_on_stack(&work.work);
@@ -1112,7 +1375,9 @@ static int read_from_bdev(struct zram *zram, struct page *page,
return -EIO;
}
-static void free_block_bdev(struct zram *zram, unsigned long blk_idx) {};
+static void zram_release_bdev_block(struct zram *zram, unsigned long blk_idx)
+{
+}
#endif
#ifdef CONFIG_ZRAM_MEMORY_TRACKING
@@ -1225,18 +1490,6 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg)
zram->comp_algs[prio] = alg;
}
-static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio,
- char *buf, ssize_t at)
-{
- ssize_t sz;
-
- down_read(&zram->init_lock);
- sz = zcomp_available_show(zram->comp_algs[prio], buf, at);
- up_read(&zram->init_lock);
-
- return sz;
-}
-
static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf)
{
char *compressor;
@@ -1387,8 +1640,12 @@ static ssize_t comp_algorithm_show(struct device *dev,
char *buf)
{
struct zram *zram = dev_to_zram(dev);
+ ssize_t sz;
- return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf, 0);
+ down_read(&zram->init_lock);
+ sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_COMP], buf, 0);
+ up_read(&zram->init_lock);
+ return sz;
}
static ssize_t comp_algorithm_store(struct device *dev,
@@ -1412,14 +1669,15 @@ static ssize_t recomp_algorithm_show(struct device *dev,
ssize_t sz = 0;
u32 prio;
+ down_read(&zram->init_lock);
for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) {
if (!zram->comp_algs[prio])
continue;
sz += sysfs_emit_at(buf, sz, "#%d: ", prio);
- sz += __comp_algorithm_show(zram, prio, buf, sz);
+ sz += zcomp_available_show(zram->comp_algs[prio], buf, sz);
}
-
+ up_read(&zram->init_lock);
return sz;
}
@@ -1641,7 +1899,7 @@ static void zram_free_page(struct zram *zram, size_t index)
if (zram_test_flag(zram, index, ZRAM_WB)) {
zram_clear_flag(zram, index, ZRAM_WB);
- free_block_bdev(zram, zram_get_handle(zram, index));
+ zram_release_bdev_block(zram, zram_get_handle(zram, index));
goto out;
}
@@ -1747,14 +2005,14 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index,
ret = zram_read_from_zspool(zram, page, index);
zram_slot_unlock(zram, index);
} else {
+ unsigned long blk_idx = zram_get_handle(zram, index);
+
/*
* The slot should be unlocked before reading from the backing
* device.
*/
zram_slot_unlock(zram, index);
-
- ret = read_from_bdev(zram, page, zram_get_handle(zram, index),
- parent);
+ ret = read_from_bdev(zram, page, blk_idx, parent);
}
/* Should NEVER happen. Return bio error if it does. */
@@ -1795,6 +2053,7 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
u32 index)
{
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_SAME);
zram_set_handle(zram, index, fill);
zram_slot_unlock(zram, index);
@@ -1832,6 +2091,7 @@ static int write_incompressible_page(struct zram *zram, struct page *page,
kunmap_local(src);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_flag(zram, index, ZRAM_HUGE);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, PAGE_SIZE);
@@ -1855,11 +2115,6 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
unsigned long element;
bool same_filled;
- /* First, free memory allocated to this slot (if any) */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- zram_slot_unlock(zram, index);
-
mem = kmap_local_page(page);
same_filled = page_same_filled(mem, &element);
kunmap_local(mem);
@@ -1901,6 +2156,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
zcomp_stream_put(zstrm);
zram_slot_lock(zram, index);
+ zram_free_page(zram, index);
zram_set_handle(zram, index, handle);
zram_set_obj_size(zram, index, comp_len);
zram_slot_unlock(zram, index);
@@ -2619,6 +2875,7 @@ static DEVICE_ATTR_RW(backing_dev);
static DEVICE_ATTR_WO(writeback);
static DEVICE_ATTR_RW(writeback_limit);
static DEVICE_ATTR_RW(writeback_limit_enable);
+static DEVICE_ATTR_RW(writeback_batch_size);
#endif
#ifdef CONFIG_ZRAM_MULTI_COMP
static DEVICE_ATTR_RW(recomp_algorithm);
@@ -2640,6 +2897,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_writeback.attr,
&dev_attr_writeback_limit.attr,
&dev_attr_writeback_limit_enable.attr,
+ &dev_attr_writeback_batch_size.attr,
#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
@@ -2701,7 +2959,7 @@ static int zram_add(void)
init_rwsem(&zram->init_lock);
#ifdef CONFIG_ZRAM_WRITEBACK
- spin_lock_init(&zram->wb_limit_lock);
+ zram->wb_batch_size = 32;
#endif
/* gendisk structure */
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index 6cee93f9c0d0..c6d94501376c 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -127,8 +127,8 @@ struct zram {
bool claim; /* Protected by disk->open_mutex */
#ifdef CONFIG_ZRAM_WRITEBACK
struct file *backing_dev;
- spinlock_t wb_limit_lock;
bool wb_limit_enable;
+ u32 wb_batch_size;
u64 bd_wb_limit;
struct block_device *bdev;
unsigned long *bitmap;
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 4ab32abf0f48..c5d45cf91f88 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -188,6 +188,7 @@ config BT_HCIUART_3WIRE
bool "Three-wire UART (H5) protocol support"
depends on BT_HCIUART
depends on BT_HCIUART_SERDEV
+ select CRC_CCITT
help
The HCI Three-wire UART Transport Layer makes it possible to
user the Bluetooth HCI over a serial port interface. The HCI
@@ -312,7 +313,9 @@ config BT_HCIBCM4377
config BT_HCIBPA10X
tristate "HCI BPA10x USB driver"
+ depends on BT_HCIUART
depends on USB
+ select BT_HCIUART_H4
help
Bluetooth HCI BPA10x USB driver.
This driver provides support for the Digianswer BPA 100/105 Bluetooth
@@ -437,8 +440,10 @@ config BT_MTKSDIO
config BT_MTKUART
tristate "MediaTek HCI UART driver"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
depends on USB || !BT_HCIBTUSB_MTK
+ select BT_HCIUART_H4
select BT_MTK
help
MediaTek Bluetooth HCI UART driver.
@@ -483,7 +488,9 @@ config BT_VIRTIO
config BT_NXPUART
tristate "NXP protocol support"
+ depends on BT_HCIUART
depends on SERIAL_DEV_BUS
+ select BT_HCIUART_H4
select CRC32
select CRC8
help
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 8b43dfc755de..e305d04aac9d 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -20,7 +20,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define VERSION "0.11"
@@ -41,6 +41,7 @@ struct bpa10x_data {
struct usb_anchor rx_anchor;
struct sk_buff *rx_skb[2];
+ struct hci_uart hu;
};
static void bpa10x_tx_complete(struct urb *urb)
@@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
if (urb->status == 0) {
bool idx = usb_pipebulk(urb->pipe);
- data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
+ data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
urb->transfer_buffer,
urb->actual_length,
bpa10x_recv_pkts,
@@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
hci_set_drvdata(hdev, data);
data->hdev = hdev;
+ data->hu.hdev = hdev;
SET_HCIDEV_DEV(hdev, &intf->dev);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 3a3a56ddbb06..d33cc70eec66 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -642,7 +642,9 @@ int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud
snprintf(postfix, sizeof(postfix), "-%4.4x-%4.4x", vid, pid);
}
- fw_name = kmalloc(BCM_FW_NAME_COUNT_MAX * BCM_FW_NAME_LEN, GFP_KERNEL);
+ fw_name = kmalloc_array(BCM_FW_NAME_COUNT_MAX,
+ sizeof(*fw_name),
+ GFP_KERNEL);
if (!fw_name)
return -ENOMEM;
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index be69d21c9aa7..9d29ab811f80 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -484,6 +484,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x1d: /* BlazarU (BzrU) */
case 0x1e: /* BlazarI (Bzr) */
case 0x1f: /* Scorpious Peak */
+ case 0x22: /* BlazarIW (BzrIW) */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -3253,6 +3254,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x22:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -3593,6 +3595,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x1d:
case 0x1e:
case 0x1f:
+ case 0x22:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 6e7bbbd35279..2936b535479f 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -15,9 +15,11 @@
#include <linux/interrupt.h>
#include <linux/unaligned.h>
+#include <linux/devcoredump.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_drv.h>
#include "btintel.h"
#include "btintel_pcie.h"
@@ -35,8 +37,13 @@
/* Intel Bluetooth PCIe device id table */
static const struct pci_device_id btintel_pcie_table[] = {
+ /* BlazarI, Wildcat Lake */
{ BTINTEL_PCI_DEVICE(0x4D76, PCI_ANY_ID) },
+ /* BlazarI, Lunar Lake */
{ BTINTEL_PCI_DEVICE(0xA876, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H484 */
+ { BTINTEL_PCI_DEVICE(0xE376, PCI_ANY_ID) },
+ /* Scorpious, Panther Lake-H404 */
{ BTINTEL_PCI_DEVICE(0xE476, PCI_ANY_ID) },
{ 0 }
};
@@ -554,25 +561,6 @@ static void btintel_pcie_mac_init(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static int btintel_pcie_add_dmp_data(struct hci_dev *hdev, const void *data, int size)
-{
- struct sk_buff *skb;
- int err;
-
- skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- return -ENOMEM;
-
- skb_put_data(skb, data, size);
- err = hci_devcd_append(hdev, skb);
- if (err) {
- bt_dev_err(hdev, "Failed to append data in the coredump");
- return err;
- }
-
- return 0;
-}
-
static int btintel_pcie_get_mac_access(struct btintel_pcie_data *data)
{
u32 reg;
@@ -617,30 +605,35 @@ static void btintel_pcie_release_mac_access(struct btintel_pcie_data *data)
btintel_pcie_wr_reg32(data, BTINTEL_PCIE_CSR_FUNC_CTRL_REG, reg);
}
-static void btintel_pcie_copy_tlv(struct sk_buff *skb, enum btintel_pcie_tlv_type type,
- void *data, int size)
+static void *btintel_pcie_copy_tlv(void *dest, enum btintel_pcie_tlv_type type,
+ void *data, size_t size)
{
struct intel_tlv *tlv;
- tlv = skb_put(skb, sizeof(*tlv) + size);
+ tlv = dest;
tlv->type = type;
tlv->len = size;
memcpy(tlv->val, data, tlv->len);
+ return dest + sizeof(*tlv) + size;
}
static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
{
- u32 offset, prev_size, wr_ptr_status, dump_size, i;
+ u32 offset, prev_size, wr_ptr_status, dump_size, data_len;
struct btintel_pcie_dbgc *dbgc = &data->dbgc;
- u8 buf_idx, dump_time_len, fw_build;
struct hci_dev *hdev = data->hdev;
+ u8 *pdata, *p, buf_idx;
struct intel_tlv *tlv;
struct timespec64 now;
- struct sk_buff *skb;
struct tm tm_now;
- char buf[256];
- u16 hdr_len;
- int ret;
+ char fw_build[128];
+ char ts[128];
+ char vendor[64];
+ char driver[64];
+
+ if (!IS_ENABLED(CONFIG_DEV_COREDUMP))
+ return -EOPNOTSUPP;
+
wr_ptr_status = btintel_pcie_rd_dev_mem(data, BTINTEL_PCIE_DBGC_CUR_DBGBUFF_STATUS);
offset = wr_ptr_status & BTINTEL_PCIE_DBG_OFFSET_BIT_MASK;
@@ -657,88 +650,84 @@ static int btintel_pcie_read_dram_buffers(struct btintel_pcie_data *data)
else
return -EINVAL;
+ snprintf(vendor, sizeof(vendor), "Vendor: Intel\n");
+ snprintf(driver, sizeof(driver), "Driver: %s\n",
+ data->dmp_hdr.driver_name);
+
ktime_get_real_ts64(&now);
time64_to_tm(now.tv_sec, 0, &tm_now);
- dump_time_len = snprintf(buf, sizeof(buf), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
+ snprintf(ts, sizeof(ts), "Dump Time: %02d-%02d-%04ld %02d:%02d:%02d",
tm_now.tm_mday, tm_now.tm_mon + 1, tm_now.tm_year + 1900,
tm_now.tm_hour, tm_now.tm_min, tm_now.tm_sec);
- fw_build = snprintf(buf + dump_time_len, sizeof(buf) - dump_time_len,
+ snprintf(fw_build, sizeof(fw_build),
"Firmware Timestamp: Year %u WW %02u buildtype %u build %u",
2000 + (data->dmp_hdr.fw_timestamp >> 8),
data->dmp_hdr.fw_timestamp & 0xff, data->dmp_hdr.fw_build_type,
data->dmp_hdr.fw_build_num);
- hdr_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
- sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
- sizeof(*tlv) + dump_time_len +
- sizeof(*tlv) + fw_build;
+ data_len = sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_bt) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.write_ptr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.wrap_ctr) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.trigger_reason) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.fw_git_sha1) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvr_top) +
+ sizeof(*tlv) + sizeof(data->dmp_hdr.cnvi_top) +
+ sizeof(*tlv) + strlen(ts) +
+ sizeof(*tlv) + strlen(fw_build) +
+ sizeof(*tlv) + strlen(vendor) +
+ sizeof(*tlv) + strlen(driver);
- dump_size = hdr_len + sizeof(hdr_len);
+ /*
+ * sizeof(u32) - signature
+ * sizeof(data_len) - to store tlv data size
+ * data_len - TLV data
+ */
+ dump_size = sizeof(u32) + sizeof(data_len) + data_len;
- skb = alloc_skb(dump_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
/* Add debug buffers data length to dump size */
dump_size += BTINTEL_PCIE_DBGC_BUFFER_SIZE * dbgc->count;
- ret = hci_devcd_init(hdev, dump_size);
- if (ret) {
- bt_dev_err(hdev, "Failed to init devcoredump, err %d", ret);
- kfree_skb(skb);
- return ret;
- }
+ pdata = vmalloc(dump_size);
+ if (!pdata)
+ return -ENOMEM;
+ p = pdata;
- skb_put_data(skb, &hdr_len, sizeof(hdr_len));
+ *(u32 *)p = BTINTEL_PCIE_MAGIC_NUM;
+ p += sizeof(u32);
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
- sizeof(data->dmp_hdr.cnvi_bt));
+ *(u32 *)p = data_len;
+ p += sizeof(u32);
- btintel_pcie_copy_tlv(skb, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
- sizeof(data->dmp_hdr.write_ptr));
+
+ p = btintel_pcie_copy_tlv(p, BTINTEL_VENDOR, vendor, strlen(vendor));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DRIVER, driver, strlen(driver));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_DUMP_TIME, ts, strlen(ts));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_BUILD, fw_build,
+ strlen(fw_build));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_BT, &data->dmp_hdr.cnvi_bt,
+ sizeof(data->dmp_hdr.cnvi_bt));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRITE_PTR, &data->dmp_hdr.write_ptr,
+ sizeof(data->dmp_hdr.write_ptr));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
+ sizeof(data->dmp_hdr.wrap_ctr));
data->dmp_hdr.wrap_ctr = btintel_pcie_rd_dev_mem(data,
BTINTEL_PCIE_DBGC_DBGBUFF_WRAP_ARND);
- btintel_pcie_copy_tlv(skb, BTINTEL_WRAP_CTR, &data->dmp_hdr.wrap_ctr,
- sizeof(data->dmp_hdr.wrap_ctr));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
- sizeof(data->dmp_hdr.trigger_reason));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
- sizeof(data->dmp_hdr.fw_git_sha1));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
- sizeof(data->dmp_hdr.cnvr_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
- sizeof(data->dmp_hdr.cnvi_top));
-
- btintel_pcie_copy_tlv(skb, BTINTEL_DUMP_TIME, buf, dump_time_len);
-
- btintel_pcie_copy_tlv(skb, BTINTEL_FW_BUILD, buf + dump_time_len, fw_build);
-
- ret = hci_devcd_append(hdev, skb);
- if (ret)
- goto exit_err;
-
- for (i = 0; i < dbgc->count; i++) {
- ret = btintel_pcie_add_dmp_data(hdev, dbgc->bufs[i].data,
- BTINTEL_PCIE_DBGC_BUFFER_SIZE);
- if (ret)
- break;
- }
-
-exit_err:
- hci_devcd_complete(hdev);
- return ret;
+ p = btintel_pcie_copy_tlv(p, BTINTEL_TRIGGER_REASON, &data->dmp_hdr.trigger_reason,
+ sizeof(data->dmp_hdr.trigger_reason));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_FW_SHA, &data->dmp_hdr.fw_git_sha1,
+ sizeof(data->dmp_hdr.fw_git_sha1));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVR_TOP, &data->dmp_hdr.cnvr_top,
+ sizeof(data->dmp_hdr.cnvr_top));
+ p = btintel_pcie_copy_tlv(p, BTINTEL_CNVI_TOP, &data->dmp_hdr.cnvi_top,
+ sizeof(data->dmp_hdr.cnvi_top));
+
+ memcpy(p, dbgc->bufs[0].data, dbgc->count * BTINTEL_PCIE_DBGC_BUFFER_SIZE);
+ dev_coredumpv(&hdev->dev, pdata, dump_size, GFP_KERNEL);
+ return 0;
}
static void btintel_pcie_dump_traces(struct hci_dev *hdev)
@@ -760,51 +749,6 @@ static void btintel_pcie_dump_traces(struct hci_dev *hdev)
bt_dev_err(hdev, "Failed to dump traces: (%d)", ret);
}
-static void btintel_pcie_dump_hdr(struct hci_dev *hdev, struct sk_buff *skb)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
- u16 len = skb->len;
- u16 *hdrlen_ptr;
- char buf[80];
-
- hdrlen_ptr = skb_put_zero(skb, sizeof(len));
-
- snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n",
- INTEL_HW_VARIANT(data->dmp_hdr.cnvi_bt));
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Firmware Build Number: %u\n",
- data->dmp_hdr.fw_build_num);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Driver: %s\n", data->dmp_hdr.driver_name);
- skb_put_data(skb, buf, strlen(buf));
-
- snprintf(buf, sizeof(buf), "Vendor: Intel\n");
- skb_put_data(skb, buf, strlen(buf));
-
- *hdrlen_ptr = skb->len - len;
-}
-
-static void btintel_pcie_dump_notify(struct hci_dev *hdev, int state)
-{
- struct btintel_pcie_data *data = hci_get_drvdata(hdev);
-
- switch (state) {
- case HCI_DEVCOREDUMP_IDLE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- case HCI_DEVCOREDUMP_ACTIVE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_ACTIVE;
- break;
- case HCI_DEVCOREDUMP_TIMEOUT:
- case HCI_DEVCOREDUMP_ABORT:
- case HCI_DEVCOREDUMP_DONE:
- data->dmp_hdr.state = HCI_DEVCOREDUMP_IDLE;
- break;
- }
-}
-
/* This function enables BT function by setting BTINTEL_PCIE_CSR_FUNC_CTRL_MAC_INIT bit in
* BTINTEL_PCIE_CSR_FUNC_CTRL_REG register and wait for MSI-X with
* BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0.
@@ -882,6 +826,11 @@ static inline bool btintel_pcie_in_d0(struct btintel_pcie_data *data)
return !(data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_D3_STATE_READY);
}
+static inline bool btintel_pcie_in_device_halt(struct btintel_pcie_data *data)
+{
+ return data->boot_stage_cache & BTINTEL_PCIE_CSR_BOOT_STAGE_DEVICE_HALTED;
+}
+
static void btintel_pcie_wr_sleep_cntrl(struct btintel_pcie_data *data,
u32 dxstate)
{
@@ -1378,6 +1327,11 @@ static void btintel_pcie_rx_work(struct work_struct *work)
struct btintel_pcie_data, rx_work);
struct sk_buff *skb;
+ if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
+ btintel_pcie_dump_traces(data->hdev);
+ clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
+ }
+
if (test_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags)) {
/* Unlike usb products, controller will not send hardware
* exception event on exception. Instead controller writes the
@@ -1390,11 +1344,6 @@ static void btintel_pcie_rx_work(struct work_struct *work)
clear_bit(BTINTEL_PCIE_HWEXP_INPROGRESS, &data->flags);
}
- if (test_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags)) {
- btintel_pcie_dump_traces(data->hdev);
- clear_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS, &data->flags);
- }
-
/* Process the sk_buf in queue and send to the HCI layer */
while ((skb = skb_dequeue(&data->rx_skb_q))) {
btintel_pcie_recv_frame(data, skb);
@@ -1524,11 +1473,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
btintel_pcie_msix_gp1_handler(data);
- /* This interrupt is triggered by the firmware after updating
- * boot_stage register and image_response register
- */
- if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
- btintel_pcie_msix_gp0_handler(data);
/* For TX */
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
@@ -1544,6 +1488,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
btintel_pcie_msix_tx_handle(data);
}
+ /* This interrupt is triggered by the firmware after updating
+ * boot_stage register and image_response register
+ */
+ if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
+ btintel_pcie_msix_gp0_handler(data);
+
/*
* Before sending the interrupt the HW disables it to prevent a nested
* interrupt. This is done by writing 1 to the corresponding bit in
@@ -2149,6 +2099,7 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) {
case 0x1e: /* BzrI */
case 0x1f: /* ScP */
+ case 0x22: /* BzrIW */
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
@@ -2184,13 +2135,6 @@ static int btintel_pcie_setup_internal(struct hci_dev *hdev)
if (ver_tlv.img_type == 0x02 || ver_tlv.img_type == 0x03)
data->dmp_hdr.fw_git_sha1 = ver_tlv.git_sha1;
- err = hci_devcd_register(hdev, btintel_pcie_dump_traces, btintel_pcie_dump_hdr,
- btintel_pcie_dump_notify);
- if (err) {
- bt_dev_err(hdev, "Failed to register coredump (%d)", err);
- goto exit_error;
- }
-
btintel_print_fseq_info(hdev);
exit_error:
kfree_skb(skb);
@@ -2236,6 +2180,7 @@ btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
{
struct btintel_pcie_dev_recovery *tmp, *data = NULL;
const char *name = pci_name(pdev);
+ const size_t name_len = strlen(name) + 1;
struct hci_dev *hdev = to_hci_dev(dev);
spin_lock(&btintel_pcie_recovery_lock);
@@ -2252,11 +2197,11 @@ btintel_pcie_get_recovery(struct pci_dev *pdev, struct device *dev)
return data;
}
- data = kzalloc(struct_size(data, name, strlen(name) + 1), GFP_ATOMIC);
+ data = kzalloc(struct_size(data, name, name_len), GFP_ATOMIC);
if (!data)
return NULL;
- strscpy_pad(data->name, name, strlen(name) + 1);
+ strscpy(data->name, name, name_len);
spin_lock(&btintel_pcie_recovery_lock);
list_add_tail(&data->list, &btintel_pcie_recovery_list);
spin_unlock(&btintel_pcie_recovery_lock);
@@ -2319,7 +2264,6 @@ static void btintel_pcie_removal_work(struct work_struct *wk)
btintel_pcie_synchronize_irqs(data);
flush_work(&data->rx_work);
- flush_work(&data->hdev->dump.dump_rx);
bt_dev_dbg(data->hdev, "Release bluetooth interface");
btintel_pcie_release_hdev(data);
@@ -2410,6 +2354,70 @@ static void btintel_pcie_hw_error(struct hci_dev *hdev, u8 code)
btintel_pcie_reset(hdev);
}
+static bool btintel_pcie_wakeup(struct hci_dev *hdev)
+{
+ struct btintel_pcie_data *data = hci_get_drvdata(hdev);
+
+ return device_may_wakeup(&data->pdev->dev);
+}
+
+static const struct {
+ u16 opcode;
+ const char *desc;
+} btintel_pcie_hci_drv_supported_commands[] = {
+ /* Common commands */
+ { HCI_DRV_OP_READ_INFO, "Read Info" },
+};
+
+static int btintel_pcie_hci_drv_read_info(struct hci_dev *hdev, void *data,
+ u16 data_len)
+{
+ struct hci_drv_rp_read_info *rp;
+ size_t rp_size;
+ int err, i;
+ u16 opcode, num_supported_commands =
+ ARRAY_SIZE(btintel_pcie_hci_drv_supported_commands);
+
+ rp_size = sizeof(*rp) + num_supported_commands * 2;
+
+ rp = kmalloc(rp_size, GFP_KERNEL);
+ if (!rp)
+ return -ENOMEM;
+
+ strscpy_pad(rp->driver_name, KBUILD_MODNAME);
+
+ rp->num_supported_commands = cpu_to_le16(num_supported_commands);
+ for (i = 0; i < num_supported_commands; i++) {
+ opcode = btintel_pcie_hci_drv_supported_commands[i].opcode;
+ bt_dev_dbg(hdev,
+ "Supported HCI Drv command (0x%02x|0x%04x): %s",
+ hci_opcode_ogf(opcode),
+ hci_opcode_ocf(opcode),
+ btintel_pcie_hci_drv_supported_commands[i].desc);
+ rp->supported_commands[i] = cpu_to_le16(opcode);
+ }
+
+ err = hci_drv_cmd_complete(hdev, HCI_DRV_OP_READ_INFO,
+ HCI_DRV_STATUS_SUCCESS,
+ rp, rp_size);
+
+ kfree(rp);
+ return err;
+}
+
+static const struct hci_drv_handler btintel_pcie_hci_drv_common_handlers[] = {
+ { btintel_pcie_hci_drv_read_info, HCI_DRV_READ_INFO_SIZE },
+};
+
+static const struct hci_drv_handler btintel_pcie_hci_drv_specific_handlers[] = {};
+
+static struct hci_drv btintel_pcie_hci_drv = {
+ .common_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_common_handlers),
+ .common_handlers = btintel_pcie_hci_drv_common_handlers,
+ .specific_handler_count = ARRAY_SIZE(btintel_pcie_hci_drv_specific_handlers),
+ .specific_handlers = btintel_pcie_hci_drv_specific_handlers,
+};
+
static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
{
int err;
@@ -2435,6 +2443,8 @@ static int btintel_pcie_setup_hdev(struct btintel_pcie_data *data)
hdev->set_diag = btintel_set_diag;
hdev->set_bdaddr = btintel_set_bdaddr;
hdev->reset = btintel_pcie_reset;
+ hdev->wakeup = btintel_pcie_wakeup;
+ hdev->hci_drv = &btintel_pcie_hci_drv;
err = hci_register_dev(hdev);
if (err < 0) {
@@ -2573,11 +2583,165 @@ static void btintel_pcie_coredump(struct device *dev)
}
#endif
+static int btintel_pcie_set_dxstate(struct btintel_pcie_data *data, u32 dxstate)
+{
+ int retry = 0, status;
+ u32 dx_intr_timeout_ms = 200;
+
+ do {
+ data->gp0_received = false;
+
+ btintel_pcie_wr_sleep_cntrl(data, dxstate);
+
+ status = wait_event_timeout(data->gp0_wait_q, data->gp0_received,
+ msecs_to_jiffies(dx_intr_timeout_ms));
+
+ if (status)
+ return 0;
+
+ bt_dev_warn(data->hdev,
+ "Timeout (%u ms) on alive interrupt for D%d entry, retry count %d",
+ dx_intr_timeout_ms, dxstate, retry);
+
+ /* clear gp0 cause */
+ btintel_pcie_clr_reg_bits(data,
+ BTINTEL_PCIE_CSR_MSIX_HW_INT_CAUSES,
+ BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0);
+
+ /* A hardware bug may cause the alive interrupt to be missed.
+ * Check if the controller reached the expected state and retry
+ * the operation only if it hasn't.
+ */
+ if (dxstate == BTINTEL_PCIE_STATE_D0) {
+ if (btintel_pcie_in_d0(data))
+ return 0;
+ } else {
+ if (btintel_pcie_in_d3(data))
+ return 0;
+ }
+
+ } while (++retry < BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES);
+
+ return -EBUSY;
+}
+
+static int btintel_pcie_suspend_late(struct device *dev, pm_message_t mesg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ u32 dxstate;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+
+ dxstate = (mesg.event == PM_EVENT_SUSPEND ?
+ BTINTEL_PCIE_STATE_D3_HOT : BTINTEL_PCIE_STATE_D3_COLD);
+
+ data->pm_sx_event = mesg.event;
+
+ start = ktime_get();
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ err = btintel_pcie_set_dxstate(data, dxstate);
+
+ if (err)
+ return err;
+
+ bt_dev_dbg(data->hdev,
+ "device entered into d3 state from d0 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return err;
+}
+
+static int btintel_pcie_suspend(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_SUSPEND);
+}
+
+static int btintel_pcie_hibernate(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_HIBERNATE);
+}
+
+static int btintel_pcie_freeze(struct device *dev)
+{
+ return btintel_pcie_suspend_late(dev, PMSG_FREEZE);
+}
+
+static int btintel_pcie_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct btintel_pcie_data *data;
+ ktime_t start;
+ int err;
+
+ data = pci_get_drvdata(pdev);
+ data->gp0_received = false;
+
+ start = ktime_get();
+
+ /* When the system enters S4 (hibernate) mode, bluetooth device loses
+ * power, which results in the erasure of its loaded firmware.
+ * Consequently, function level reset (flr) is required on system
+ * resume to bring the controller back into an operational state by
+ * initiating a new firmware download.
+ */
+
+ if (data->pm_sx_event == PM_EVENT_FREEZE ||
+ data->pm_sx_event == PM_EVENT_HIBERNATE) {
+ set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
+ btintel_pcie_reset(data->hdev);
+ return 0;
+ }
+
+ /* Refer: 6.4.11.7 -> Platform power management */
+ err = btintel_pcie_set_dxstate(data, BTINTEL_PCIE_STATE_D0);
+
+ if (err == 0) {
+ bt_dev_dbg(data->hdev,
+ "device entered into d0 state from d3 in %lld us",
+ ktime_to_us(ktime_get() - start));
+ return err;
+ }
+
+ /* Trigger function level reset if the controller is in error
+ * state during resume() to bring back the controller to
+ * operational mode
+ */
+
+ data->boot_stage_cache = btintel_pcie_rd_reg32(data,
+ BTINTEL_PCIE_CSR_BOOT_STAGE_REG);
+ if (btintel_pcie_in_error(data) ||
+ btintel_pcie_in_device_halt(data)) {
+ bt_dev_err(data->hdev, "Controller in error state for D0 entry");
+ if (!test_and_set_bit(BTINTEL_PCIE_COREDUMP_INPROGRESS,
+ &data->flags)) {
+ data->dmp_hdr.trigger_reason =
+ BTINTEL_PCIE_TRIGGER_REASON_FW_ASSERT;
+ queue_work(data->workqueue, &data->rx_work);
+ }
+ set_bit(BTINTEL_PCIE_CORE_HALTED, &data->flags);
+ btintel_pcie_reset(data->hdev);
+ }
+ return err;
+}
+
+static const struct dev_pm_ops btintel_pcie_pm_ops = {
+ .suspend = btintel_pcie_suspend,
+ .resume = btintel_pcie_resume,
+ .freeze = btintel_pcie_freeze,
+ .thaw = btintel_pcie_resume,
+ .poweroff = btintel_pcie_hibernate,
+ .restore = btintel_pcie_resume,
+};
+
static struct pci_driver btintel_pcie_driver = {
.name = KBUILD_MODNAME,
.id_table = btintel_pcie_table,
.probe = btintel_pcie_probe,
.remove = btintel_pcie_remove,
+ .driver.pm = pm_sleep_ptr(&btintel_pcie_pm_ops),
#ifdef CONFIG_DEV_COREDUMP
.driver.coredump = btintel_pcie_coredump
#endif
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 0fa876c5b954..e3d941ffef4a 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -132,6 +132,8 @@ enum btintel_pcie_tlv_type {
BTINTEL_CNVI_TOP,
BTINTEL_DUMP_TIME,
BTINTEL_FW_BUILD,
+ BTINTEL_VENDOR,
+ BTINTEL_DRIVER
};
/* causes for the MBOX interrupts */
@@ -156,6 +158,8 @@ enum msix_mbox_int_causes {
/* Default interrupt timeout in msec */
#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
+#define BTINTEL_PCIE_DX_TRANSITION_MAX_RETRIES 3
+
/* The number of descriptors in TX queues */
#define BTINTEL_PCIE_TX_DESCS_COUNT 32
@@ -462,6 +466,7 @@ struct btintel_pcie_dump_header {
* @txq: TX Queue struct
* @rxq: RX Queue struct
* @alive_intr_ctxt: Alive interrupt context
+ * @pm_sx_event: PM event on which system got suspended
*/
struct btintel_pcie_data {
struct pci_dev *pdev;
@@ -511,6 +516,7 @@ struct btintel_pcie_data {
u32 alive_intr_ctxt;
struct btintel_pcie_dbgc dbgc;
struct btintel_pcie_dump_header dmp_hdr;
+ u8 pm_sx_event;
};
static inline u32 btintel_pcie_rd_reg32(struct btintel_pcie_data *data,
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index 4fc673640bfc..fba3ab6d30a5 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -29,7 +29,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.1"
@@ -615,7 +615,6 @@ static void btmtksdio_txrx_work(struct work_struct *work)
sdio_release_host(bdev->func);
- pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
}
@@ -1270,6 +1269,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
sdio_claim_host(bdev->func);
+ /* set drv_pmctrl if BT is closed before doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ sdio_enable_func(bdev->func);
+ btmtksdio_drv_pmctrl(bdev);
+ }
+
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
skb_queue_purge(&bdev->txq);
cancel_work_sync(&bdev->txrx_work);
@@ -1285,6 +1290,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
goto err;
}
+ /* set fw_pmctrl back if BT is closed after doing reset */
+ if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
+ btmtksdio_fw_pmctrl(bdev);
+ sdio_disable_func(bdev->func);
+ }
+
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
err:
sdio_release_host(bdev->func);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 76995cfcd534..27aa48ff3ac2 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -27,7 +27,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#include "btmtk.h"
#define VERSION "0.2"
@@ -79,6 +79,7 @@ struct btmtkuart_dev {
u16 stp_dlen;
const struct btmtkuart_data *data;
+ struct hci_uart hu;
};
#define btmtkuart_is_standalone(bdev) \
@@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
sz_left -= adv;
p_left += adv;
- bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
+ bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
sz_h4, mtk_recv_pkts,
ARRAY_SIZE(mtk_recv_pkts));
if (IS_ERR(bdev->rx_skb)) {
@@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
}
bdev->hdev = hdev;
+ bdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, bdev);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 76e7f857fb7d..3b1e9224e965 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -24,7 +24,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#include "h4_recv.h"
+#include "hci_uart.h"
#define MANUFACTURER_NXP 37
@@ -212,6 +212,7 @@ struct btnxpuart_dev {
struct ps_data psdata;
struct btnxpuart_data *nxp_data;
struct reset_control *pdn;
+ struct hci_uart hu;
};
#define NXP_V1_FW_REQ_PKT 0xa5
@@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
ps_start_timer(nxpdev);
- nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
+ nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
if (IS_ERR(nxpdev->rx_skb)) {
int err = PTR_ERR(nxpdev->rx_skb);
@@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
reset_control_deassert(nxpdev->pdn);
nxpdev->hdev = hdev;
+ nxpdev->hu.hdev = hdev;
hdev->bus = HCI_UART;
hci_set_drvdata(hdev, nxpdev);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 6abd962502e3..5603b282f9bc 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -50,7 +50,7 @@
#define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}})
#define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}})
-#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}})
+#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0xAD, 0x00, 0xb0}})
#define RTL_PATCH_SNIPPETS 0x01
#define RTL_PATCH_DUMMY_HEADER 0x02
@@ -72,6 +72,7 @@ enum btrtl_chip_id {
CHIP_ID_8851B = 36,
CHIP_ID_8922A = 44,
CHIP_ID_8852BT = 47,
+ CHIP_ID_8761C = 51,
};
struct id_table {
@@ -230,6 +231,14 @@ static const struct id_table ic_id_table[] = {
.cfg_name = "rtl_bt/rtl8761bu_config",
.hw_info = "rtl8761bu" },
+ /* 8761CU */
+ { IC_INFO(RTL_ROM_LMP_8761A, 0x0e, 0, HCI_USB),
+ .config_needed = false,
+ .has_rom_version = true,
+ .fw_name = "rtl_bt/rtl8761cu_fw",
+ .cfg_name = "rtl_bt/rtl8761cu_config",
+ .hw_info = "rtl8761cu" },
+
/* 8822C with UART interface */
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0x8, HCI_UART),
.config_needed = true,
@@ -344,7 +353,8 @@ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev,
(ic_id_table[i].hci_rev != hci_rev))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) &&
- (ic_id_table[i].hci_ver != hci_ver))
+ (ic_id_table[i].hci_ver != hci_ver) &&
+ (ic_id_table[i].hci_ver != 0))
continue;
if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) &&
(ic_id_table[i].hci_bus != hci_bus))
@@ -534,7 +544,6 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
{
struct rtl_epatch_header_v2 *hdr;
int rc;
- u8 reg_val[2];
u8 key_id;
u32 num_sections;
struct rtl_section *section;
@@ -549,14 +558,7 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
.len = btrtl_dev->fw_len - 7, /* Cut the tail */
};
- rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
- if (rc < 0)
- return -EIO;
- key_id = reg_val[0];
-
- rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id);
-
- btrtl_dev->key_id = key_id;
+ key_id = btrtl_dev->key_id;
hdr = rtl_iov_pull_data(&iov, sizeof(*hdr));
if (!hdr)
@@ -625,8 +627,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
len += entry->len;
}
- if (!len)
+ if (!len) {
+ kvfree(ptr);
return -EPERM;
+ }
*_buf = ptr;
return len;
@@ -668,6 +672,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
{ RTL_ROM_LMP_8851B, 36 }, /* 8851B */
{ RTL_ROM_LMP_8922A, 44 }, /* 8922A */
{ RTL_ROM_LMP_8852A, 47 }, /* 8852BT */
+ { RTL_ROM_LMP_8761A, 51 }, /* 8761C */
};
if (btrtl_dev->fw_len <= 8)
@@ -1068,6 +1073,8 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
u16 hci_rev, lmp_subver;
u8 hci_ver, lmp_ver, chip_type = 0;
int ret;
+ int rc;
+ u8 key_id;
u8 reg_val[2];
btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL);
@@ -1178,6 +1185,14 @@ next:
goto err_free;
}
+ rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val);
+ if (rc < 0)
+ goto err_free;
+
+ key_id = reg_val[0];
+ btrtl_dev->key_id = key_id;
+ rtl_dev_info(hdev, "%s: key id %u", __func__, key_id);
+
btrtl_dev->fw_len = -EIO;
if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) {
snprintf(fw_name, sizeof(fw_name), "%s_v2.bin",
@@ -1200,7 +1215,7 @@ next:
goto err_free;
}
- if (btrtl_dev->ic_info->cfg_name) {
+ if (btrtl_dev->ic_info->cfg_name && !btrtl_dev->key_id) {
if (postfix) {
snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin",
btrtl_dev->ic_info->cfg_name, postfix);
@@ -1301,6 +1316,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8851B:
case CHIP_ID_8922A:
case CHIP_ID_8852BT:
+ case CHIP_ID_8761C:
hci_set_quirk(hdev, HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED);
/* RTL8852C needs to transmit mSBC data continuously without
@@ -1520,6 +1536,8 @@ MODULE_FIRMWARE("rtl_bt/rtl8761b_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761b_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761bu_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8761bu_config.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761cu_fw.bin");
+MODULE_FIRMWARE("rtl_bt/rtl8761cu_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin");
MODULE_FIRMWARE("rtl_bt/rtl8821c_fw.bin");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8085fabadde8..8ed3883ab8ee 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -66,6 +66,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
#define BTUSB_ACTIONS_SEMI BIT(27)
+#define BTUSB_BARROT BIT(28)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -503,6 +504,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8821CE Bluetooth devices */
{ USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3533), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
@@ -522,6 +525,8 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8851BU Bluetooth devices */
{ USB_DEVICE(0x3625, 0x010b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2001, 0x332a), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK |
@@ -582,6 +587,12 @@ static const struct usb_device_id quirks_table[] = {
/* Realtek 8852BT/8852BE-VT Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe12f), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3618), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3619), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8922AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8922), .driver_info = BTUSB_REALTEK |
@@ -618,6 +629,8 @@ static const struct usb_device_id quirks_table[] = {
/* Additional MediaTek MT7920 Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe134), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe135), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3620), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3621), .driver_info = BTUSB_MEDIATEK |
@@ -682,6 +695,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0489, 0xe153), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe170), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
@@ -698,6 +713,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3633), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
@@ -732,6 +749,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3613), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3627), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3628), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3630), .driver_info = BTUSB_MEDIATEK |
@@ -774,6 +793,8 @@ static const struct usb_device_id quirks_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2b89, 0x8761), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x2b89, 0x6275), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -810,6 +831,10 @@ static const struct usb_device_id quirks_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Barrot Technology Bluetooth devices */
+ { USB_DEVICE(0x33fa, 0x0010), .driver_info = BTUSB_BARROT },
+ { USB_DEVICE(0x33fa, 0x0012), .driver_info = BTUSB_BARROT },
+
/* Actions Semiconductor ATS2851 based devices */
{ USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
@@ -1120,6 +1145,24 @@ static void btusb_qca_reset(struct hci_dev *hdev)
btusb_reset(hdev);
}
+static u8 btusb_classify_qca_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ /* Some Qualcomm controllers, e.g., QCNFA765 with WCN6855 chip, send debug
+ * packets as ACL frames with connection handle 0x2EDC. These are not real
+ * ACL packets and should be reclassified as HCI_DIAG_PKT to prevent
+ * "ACL packet for unknown connection handle 3804" errors.
+ */
+ if (skb->len >= 2) {
+ u16 handle = get_unaligned_le16(skb->data);
+
+ if (handle == 0x2EDC)
+ return HCI_DIAG_PKT;
+ }
+
+ /* Use default packet type for other packets */
+ return hci_skb_pkt_type(skb);
+}
+
static inline void btusb_free_frags(struct btusb_data *data)
{
unsigned long flags;
@@ -1192,6 +1235,18 @@ static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count)
}
if (!hci_skb_expect(skb)) {
+ /* Each chunk should correspond to at least 1 or more
+ * events so if there are still bytes left that doesn't
+ * constitute a new event this is likely a bug in the
+ * controller.
+ */
+ if (count && count < HCI_EVENT_HDR_SIZE) {
+ bt_dev_warn(data->hdev,
+ "Unexpected continuation: %d bytes",
+ count);
+ count = 0;
+ }
+
/* Complete frame */
btusb_recv_event(data, skb);
skb = NULL;
@@ -2688,9 +2743,21 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
{
- struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
+ struct btmtk_data *btmtk_data;
int err;
+ if (!data->hdev)
+ return;
+
+ btmtk_data = hci_get_priv(data->hdev);
+ if (!btmtk_data)
+ return;
+
+ if (!btmtk_data->isopkt_intf) {
+ bt_dev_err(data->hdev, "Can't claim NULL iso interface");
+ return;
+ }
+
/*
* The function usb_driver_claim_interface() is documented to need
* locks held if it's not called from a probe routine. The code here
@@ -2712,17 +2779,30 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
{
- struct btmtk_data *btmtk_data = hci_get_priv(hdev);
+ struct btmtk_data *btmtk_data;
+
+ if (!hdev)
+ return;
+
+ btmtk_data = hci_get_priv(hdev);
+ if (!btmtk_data)
+ return;
if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
- dev_kfree_skb_irq(btmtk_data->isopkt_skb);
- btmtk_data->isopkt_skb = NULL;
- usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
- usb_driver_release_interface(&btusb_driver,
- btmtk_data->isopkt_intf);
+ if (btmtk_data->isopkt_skb) {
+ dev_kfree_skb_irq(btmtk_data->isopkt_skb);
+ btmtk_data->isopkt_skb = NULL;
+ }
+
+ if (btmtk_data->isopkt_intf) {
+ usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
+ usb_driver_release_interface(&btusb_driver,
+ btmtk_data->isopkt_intf);
+ btmtk_data->isopkt_intf = NULL;
+ }
}
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
@@ -2760,6 +2840,19 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data)
btusb_stop_traffic(data);
usb_kill_anchored_urbs(&data->tx_anchor);
+ /* Toggle the hard reset line. The MediaTek device is going to
+ * yank itself off the USB and then replug. The cleanup is handled
+ * correctly on the way out (standard USB disconnect), and the new
+ * device is detected cleanly and bound to the driver again like
+ * it should be.
+ */
+ if (data->reset_gpio) {
+ gpiod_set_value_cansleep(data->reset_gpio, 1);
+ msleep(200);
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+ return 0;
+ }
+
err = btmtk_usb_subsys_reset(hdev, btmtk_data->dev_id);
usb_queue_reset_device(data->intf);
@@ -3213,6 +3306,7 @@ static const struct qca_device_info qca_devices_table[] = {
static const struct qca_custom_firmware qca_custom_btfws[] = {
{ 0x00130201, 0x030A, "QCA2066" },
+ { 0x00130201, 0x030B, "QCA2066" },
{ },
};
@@ -4178,6 +4272,7 @@ static int btusb_probe(struct usb_interface *intf,
data->recv_acl = btusb_recv_acl_qca;
hci_devcd_register(hdev, btusb_coredump_qca, btusb_dump_hdr_qca, NULL);
data->setup_on_usb = btusb_setup_qca;
+ hdev->classify_pkt_type = btusb_classify_qca_pkt_type;
hdev->shutdown = btusb_shutdown_qca;
hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
hdev->reset = btusb_qca_reset;
@@ -4338,6 +4433,11 @@ static void btusb_disconnect(struct usb_interface *intf)
hci_unregister_dev(hdev);
+ if (data->oob_wake_irq)
+ device_init_wakeup(&data->udev->dev, false);
+ if (data->reset_gpio)
+ gpiod_put(data->reset_gpio);
+
if (intf == data->intf) {
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
@@ -4348,17 +4448,11 @@ static void btusb_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&btusb_driver, data->diag);
usb_driver_release_interface(&btusb_driver, data->intf);
} else if (intf == data->diag) {
- usb_driver_release_interface(&btusb_driver, data->intf);
if (data->isoc)
usb_driver_release_interface(&btusb_driver, data->isoc);
+ usb_driver_release_interface(&btusb_driver, data->intf);
}
- if (data->oob_wake_irq)
- device_init_wakeup(&data->udev->dev, false);
-
- if (data->reset_gpio)
- gpiod_put(data->reset_gpio);
-
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/h4_recv.h b/drivers/bluetooth/h4_recv.h
deleted file mode 100644
index 28cf2d8c2d48..000000000000
--- a/drivers/bluetooth/h4_recv.h
+++ /dev/null
@@ -1,153 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Generic Bluetooth HCI UART driver
- *
- * Copyright (C) 2015-2018 Intel Corporation
- */
-
-#include <linux/unaligned.h>
-
-struct h4_recv_pkt {
- u8 type; /* Packet type */
- u8 hlen; /* Header length */
- u8 loff; /* Data length offset in header */
- u8 lsize; /* Data length field size */
- u16 maxlen; /* Max overall packet length */
- int (*recv)(struct hci_dev *hdev, struct sk_buff *skb);
-};
-
-#define H4_RECV_ACL \
- .type = HCI_ACLDATA_PKT, \
- .hlen = HCI_ACL_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE \
-
-#define H4_RECV_SCO \
- .type = HCI_SCODATA_PKT, \
- .hlen = HCI_SCO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 1, \
- .maxlen = HCI_MAX_SCO_SIZE
-
-#define H4_RECV_EVENT \
- .type = HCI_EVENT_PKT, \
- .hlen = HCI_EVENT_HDR_SIZE, \
- .loff = 1, \
- .lsize = 1, \
- .maxlen = HCI_MAX_EVENT_SIZE
-
-#define H4_RECV_ISO \
- .type = HCI_ISODATA_PKT, \
- .hlen = HCI_ISO_HDR_SIZE, \
- .loff = 2, \
- .lsize = 2, \
- .maxlen = HCI_MAX_FRAME_SIZE
-
-static inline struct sk_buff *h4_recv_buf(struct hci_dev *hdev,
- struct sk_buff *skb,
- const unsigned char *buffer,
- int count,
- const struct h4_recv_pkt *pkts,
- int pkts_count)
-{
- /* Check for error from previous call */
- if (IS_ERR(skb))
- skb = NULL;
-
- while (count) {
- int i, len;
-
- if (!skb) {
- for (i = 0; i < pkts_count; i++) {
- if (buffer[0] != (&pkts[i])->type)
- continue;
-
- skb = bt_skb_alloc((&pkts[i])->maxlen,
- GFP_ATOMIC);
- if (!skb)
- return ERR_PTR(-ENOMEM);
-
- hci_skb_pkt_type(skb) = (&pkts[i])->type;
- hci_skb_expect(skb) = (&pkts[i])->hlen;
- break;
- }
-
- /* Check for invalid packet type */
- if (!skb)
- return ERR_PTR(-EILSEQ);
-
- count -= 1;
- buffer += 1;
- }
-
- len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
- skb_put_data(skb, buffer, len);
-
- count -= len;
- buffer += len;
-
- /* Check for partial packet */
- if (skb->len < hci_skb_expect(skb))
- continue;
-
- for (i = 0; i < pkts_count; i++) {
- if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
- break;
- }
-
- if (i >= pkts_count) {
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (skb->len == (&pkts[i])->hlen) {
- u16 dlen;
-
- switch ((&pkts[i])->lsize) {
- case 0:
- /* No variable data length */
- dlen = 0;
- break;
- case 1:
- /* Single octet variable length */
- dlen = skb->data[(&pkts[i])->loff];
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- case 2:
- /* Double octet variable length */
- dlen = get_unaligned_le16(skb->data +
- (&pkts[i])->loff);
- hci_skb_expect(skb) += dlen;
-
- if (skb_tailroom(skb) < dlen) {
- kfree_skb(skb);
- return ERR_PTR(-EMSGSIZE);
- }
- break;
- default:
- /* Unsupported variable length */
- kfree_skb(skb);
- return ERR_PTR(-EILSEQ);
- }
-
- if (!dlen) {
- /* No more data, complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- } else {
- /* Complete frame */
- (&pkts[i])->recv(hdev, skb);
- skb = NULL;
- }
- }
-
- return skb;
-}
diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c
index 2d40302409ff..94588676510f 100644
--- a/drivers/bluetooth/hci_ag6xx.c
+++ b/drivers/bluetooth/hci_ag6xx.c
@@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
+ ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
ag6xx_recv_pkts,
ARRAY_SIZE(ag6xx_recv_pkts));
if (IS_ERR(ag6xx->rx_skb)) {
diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c
index 707e90f80130..b1f32c5a8a3f 100644
--- a/drivers/bluetooth/hci_aml.c
+++ b/drivers/bluetooth/hci_aml.c
@@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)
struct aml_data *aml_data = hu->priv;
int err;
- aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
+ aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
aml_recv_pkts,
ARRAY_SIZE(aml_recv_pkts));
if (IS_ERR(aml_data->rx_skb)) {
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index dbfe34664633..8d2b5e7f0d6a 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
{
struct ath_struct *ath = hu->priv;
- ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
+ ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
if (IS_ERR(ath->rx_skb)) {
int err = PTR_ERR(ath->rx_skb);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index f96617b85d87..9286a5f40f55 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -326,7 +326,6 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
bt_dev_dbg(bdev, "Host wake IRQ");
pm_runtime_get(bdev->dev);
- pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
return IRQ_HANDLED;
@@ -698,7 +697,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
+ bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
if (IS_ERR(bcm->rx_skb)) {
int err = PTR_ERR(bcm->rx_skb);
@@ -710,7 +709,6 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
mutex_lock(&bcm_device_lock);
if (bcm->dev && bcm_device_exists(bcm->dev)) {
pm_runtime_get(bcm->dev->dev);
- pm_runtime_mark_last_busy(bcm->dev->dev);
pm_runtime_put_autosuspend(bcm->dev->dev);
}
mutex_unlock(&bcm_device_lock);
@@ -748,10 +746,8 @@ static struct sk_buff *bcm_dequeue(struct hci_uart *hu)
skb = skb_dequeue(&bcm->txq);
- if (bdev) {
- pm_runtime_mark_last_busy(bdev->dev);
+ if (bdev)
pm_runtime_put_autosuspend(bdev->dev);
- }
mutex_unlock(&bcm_device_lock);
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 664d82d1e613..591abe6d63dd 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -582,6 +582,9 @@ static int bcsp_recv(struct hci_uart *hu, const void *data, int count)
struct bcsp_struct *bcsp = hu->priv;
const unsigned char *ptr;
+ if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
+ return -EUNATCH;
+
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 9070e31a68bf..ec017df8572c 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
+ h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
if (IS_ERR(h4->rx_skb)) {
int err = PTR_ERR(h4->rx_skb);
@@ -151,12 +151,12 @@ int __exit h4_deinit(void)
return hci_uart_unregister_proto(&h4p);
}
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count)
{
- struct hci_uart *hu = hci_get_drvdata(hdev);
u8 alignment = hu->alignment ? hu->alignment : 1;
+ struct hci_dev *hdev = hu->hdev;
/* Check for error from previous call */
if (IS_ERR(skb))
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index d0d4420c1a0f..96e20a66ecd1 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -7,6 +7,8 @@
*/
#include <linux/acpi.h>
+#include <linux/bitrev.h>
+#include <linux/crc-ccitt.h>
#include <linux/errno.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
@@ -58,6 +60,7 @@ enum {
H5_TX_ACK_REQ, /* Pending ack to send */
H5_WAKEUP_DISABLE, /* Device cannot wake host */
H5_HW_FLOW_CONTROL, /* Use HW flow control */
+ H5_CRC, /* Use CRC */
};
struct h5 {
@@ -141,8 +144,8 @@ static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
static u8 h5_cfg_field(struct h5 *h5)
{
- /* Sliding window size (first 3 bits) */
- return h5->tx_win & 0x07;
+ /* Sliding window size (first 3 bits) and CRC request (fifth bit). */
+ return (h5->tx_win & 0x07) | 0x10;
}
static void h5_timed_event(struct timer_list *t)
@@ -213,7 +216,6 @@ static void h5_peer_reset(struct hci_uart *hu)
static int h5_open(struct hci_uart *hu)
{
struct h5 *h5;
- const unsigned char sync[] = { 0x01, 0x7e };
BT_DBG("hu %p", hu);
@@ -243,9 +245,11 @@ static int h5_open(struct hci_uart *hu)
set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
- /* Send initial sync request */
- h5_link_control(hu, sync, sizeof(sync));
- mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+ /*
+ * Wait one jiffy because the UART layer won't set HCI_UART_PROTO_READY,
+ * which allows us to send link packets, until this function returns.
+ */
+ mod_timer(&h5->timer, jiffies + 1);
return 0;
}
@@ -360,8 +364,10 @@ static void h5_handle_internal_rx(struct hci_uart *hu)
h5_link_control(hu, conf_rsp, 2);
h5_link_control(hu, conf_req, 3);
} else if (memcmp(data, conf_rsp, 2) == 0) {
- if (H5_HDR_LEN(hdr) > 2)
+ if (H5_HDR_LEN(hdr) > 2) {
h5->tx_win = (data[2] & 0x07);
+ assign_bit(H5_CRC, &h5->flags, data[2] & 0x10);
+ }
BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
h5->state = H5_ACTIVE;
hci_uart_init_ready(hu);
@@ -425,7 +431,24 @@ static void h5_complete_rx_pkt(struct hci_uart *hu)
static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
{
- h5_complete_rx_pkt(hu);
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+ u16 crc;
+ __be16 crc_be;
+
+ crc = crc_ccitt(0xffff, hdr, 4 + H5_HDR_LEN(hdr));
+ crc = bitrev16(crc);
+
+ crc_be = cpu_to_be16(crc);
+
+ if (memcmp(&crc_be, hdr + 4 + H5_HDR_LEN(hdr), 2) != 0) {
+ bt_dev_err(hu->hdev, "Received packet with invalid CRC");
+ h5_reset_rx(h5);
+ } else {
+ /* Remove CRC bytes */
+ skb_trim(h5->rx_skb, 4 + H5_HDR_LEN(hdr));
+ h5_complete_rx_pkt(hu);
+ }
return 0;
}
@@ -556,6 +579,7 @@ static void h5_reset_rx(struct h5 *h5)
h5->rx_func = h5_rx_delimiter;
h5->rx_pending = 0;
clear_bit(H5_RX_ESC, &h5->flags);
+ clear_bit(H5_CRC, &h5->flags);
}
static int h5_recv(struct hci_uart *hu, const void *data, int count)
@@ -592,7 +616,6 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
if (hu->serdev) {
pm_runtime_get(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
pm_runtime_put_autosuspend(&hu->serdev->dev);
}
@@ -634,7 +657,6 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
if (hu->serdev) {
pm_runtime_get_sync(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
pm_runtime_put_autosuspend(&hu->serdev->dev);
}
@@ -686,6 +708,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
struct h5 *h5 = hu->priv;
struct sk_buff *nskb;
u8 hdr[4];
+ u16 crc;
int i;
if (!valid_packet_type(pkt_type)) {
@@ -713,6 +736,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
/* Reliable packet? */
if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
hdr[0] |= 1 << 7;
+ hdr[0] |= (test_bit(H5_CRC, &h5->flags) && 1) << 6;
hdr[0] |= h5->tx_seq;
h5->tx_seq = (h5->tx_seq + 1) % 8;
}
@@ -732,6 +756,15 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
for (i = 0; i < len; i++)
h5_slip_one_byte(nskb, data[i]);
+ if (H5_HDR_CRC(hdr)) {
+ crc = crc_ccitt(0xffff, hdr, 4);
+ crc = crc_ccitt(crc, data, len);
+ crc = bitrev16(crc);
+
+ h5_slip_one_byte(nskb, (crc >> 8) & 0xff);
+ h5_slip_one_byte(nskb, crc & 0xff);
+ }
+
h5_slip_delim(nskb);
return nskb;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 9b353c3d6442..20baf2895dec 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -280,7 +280,6 @@ static irqreturn_t intel_irq(int irq, void *dev_id)
/* Host/Controller are now LPM resumed, trigger a new delayed suspend */
pm_runtime_get(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
return IRQ_HANDLED;
@@ -371,7 +370,6 @@ static void intel_busy_work(struct work_struct *work)
list_for_each_entry(idev, &intel_device_list, list) {
if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
break;
}
@@ -972,7 +970,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
+ intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
intel_recv_pkts,
ARRAY_SIZE(intel_recv_pkts));
if (IS_ERR(intel->rx_skb)) {
@@ -1003,7 +1001,6 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
list_for_each_entry(idev, &intel_device_list, list) {
if (hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get_sync(&idev->pdev->dev);
- pm_runtime_mark_last_busy(&idev->pdev->dev);
pm_runtime_put_autosuspend(&idev->pdev->dev);
break;
}
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 7044c86325ce..6f4e25917b86 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
+ ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
if (IS_ERR(ll->rx_skb)) {
int err = PTR_ERR(ll->rx_skb);
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index e08222395772..8767522ec4c6 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
!test_bit(STATE_FW_LOADED, &mrvl->flags))
return count;
- mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
- mrvl_recv_pkts,
- ARRAY_SIZE(mrvl_recv_pkts));
+ mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
+ mrvl_recv_pkts,
+ ARRAY_SIZE(mrvl_recv_pkts));
if (IS_ERR(mrvl->rx_skb)) {
int err = PTR_ERR(mrvl->rx_skb);
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index cd7575c20f65..1e65b541f8ad 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
- nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
+ btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
+ nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
if (IS_ERR(btdev->rx_skb)) {
err = PTR_ERR(btdev->rx_skb);
dev_err(dev, "Frame reassembly failed (%d)", err);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 4cff4d9be313..888176b0faa9 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
return -EUNATCH;
- qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
+ qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
if (IS_ERR(qca->rx_skb)) {
int err = PTR_ERR(qca->rx_skb);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 5ea5dd80e297..48ac7ca9334e 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -121,10 +121,6 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
-#ifdef CONFIG_BT_HCIUART_H4
-int h4_init(void);
-int h4_deinit(void);
-
struct h4_recv_pkt {
u8 type; /* Packet type */
u8 hlen; /* Header length */
@@ -162,7 +158,11 @@ struct h4_recv_pkt {
.lsize = 2, \
.maxlen = HCI_MAX_FRAME_SIZE \
-struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
+#ifdef CONFIG_BT_HCIUART_H4
+int h4_init(void);
+int h4_deinit(void);
+
+struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
const unsigned char *buffer, int count,
const struct h4_recv_pkt *pkts, int pkts_count);
#endif
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index c1c0a4759c7e..25845c04e562 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -176,8 +176,8 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
- mc_dev->obj_desc.type);
+ return sysfs_emit(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+ mc_dev->obj_desc.type);
}
static DEVICE_ATTR_RO(modalias);
@@ -203,7 +203,7 @@ static ssize_t driver_override_show(struct device *dev,
{
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", mc_dev->driver_override);
+ return sysfs_emit(buf, "%s\n", mc_dev->driver_override);
}
static DEVICE_ATTR_RW(driver_override);
@@ -1104,6 +1104,9 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
* Get physical address of MC portal for the root DPRC:
*/
plat_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!plat_res)
+ return -EINVAL;
+
mc_portal_phys_addr = plat_res->start;
mc_portal_size = resource_size(plat_res);
mc_portal_base_phys_addr = mc_portal_phys_addr & ~0x3ffffff;
diff --git a/drivers/bus/fsl-mc/mc-sys.c b/drivers/bus/fsl-mc/mc-sys.c
index b22c59d57c8f..31037f41893e 100644
--- a/drivers/bus/fsl-mc/mc-sys.c
+++ b/drivers/bus/fsl-mc/mc-sys.c
@@ -248,7 +248,7 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd)
enum mc_cmd_status status;
unsigned long irq_flags = 0;
- if (in_irq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
+ if (in_hardirq() && !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL))
return -EINVAL;
if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
index 577965f95fda..512da7482acc 100644
--- a/drivers/bus/mhi/ep/internal.h
+++ b/drivers/bus/mhi/ep/internal.h
@@ -11,7 +11,7 @@
#include "../common.h"
-extern struct bus_type mhi_ep_bus_type;
+extern const struct bus_type mhi_ep_bus_type;
#define MHI_REG_OFFSET 0x100
#define BHI_REG_OFFSET 0x200
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index b3eafcf2a2c5..3c208b5c8446 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -403,17 +403,13 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
{
struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- size_t tr_len, read_offset, write_offset;
+ size_t tr_len, read_offset;
struct mhi_ep_buf_info buf_info = {};
u32 len = MHI_EP_DEFAULT_MTU;
struct mhi_ring_element *el;
- bool tr_done = false;
void *buf_addr;
- u32 buf_left;
int ret;
- buf_left = len;
-
do {
/* Don't process the transfer ring if the channel is not in RUNNING state */
if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
@@ -426,24 +422,23 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
/* Check if there is data pending to be read from previous read operation */
if (mhi_chan->tre_bytes_left) {
dev_dbg(dev, "TRE bytes remaining: %u\n", mhi_chan->tre_bytes_left);
- tr_len = min(buf_left, mhi_chan->tre_bytes_left);
+ tr_len = min(len, mhi_chan->tre_bytes_left);
} else {
mhi_chan->tre_loc = MHI_TRE_DATA_GET_PTR(el);
mhi_chan->tre_size = MHI_TRE_DATA_GET_LEN(el);
mhi_chan->tre_bytes_left = mhi_chan->tre_size;
- tr_len = min(buf_left, mhi_chan->tre_size);
+ tr_len = min(len, mhi_chan->tre_size);
}
read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
- write_offset = len - buf_left;
buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL);
if (!buf_addr)
return -ENOMEM;
buf_info.host_addr = mhi_chan->tre_loc + read_offset;
- buf_info.dev_addr = buf_addr + write_offset;
+ buf_info.dev_addr = buf_addr;
buf_info.size = tr_len;
buf_info.cb = mhi_ep_read_completion;
buf_info.cb_buf = buf_addr;
@@ -459,16 +454,12 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
goto err_free_buf_addr;
}
- buf_left -= tr_len;
mhi_chan->tre_bytes_left -= tr_len;
- if (!mhi_chan->tre_bytes_left) {
- if (MHI_TRE_DATA_GET_IEOT(el))
- tr_done = true;
-
+ if (!mhi_chan->tre_bytes_left)
mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size;
- }
- } while (buf_left && !tr_done);
+ /* Read until the some buffer is left or the ring becomes not empty */
+ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
return 0;
@@ -502,15 +493,11 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring)
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
} else {
/* UL channel */
- do {
- ret = mhi_ep_read_channel(mhi_cntrl, ring);
- if (ret < 0) {
- dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
- return ret;
- }
-
- /* Read until the ring becomes empty */
- } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE));
+ ret = mhi_ep_read_channel(mhi_cntrl, ring);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n");
+ return ret;
+ }
}
return 0;
@@ -1507,7 +1494,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker);
INIT_WORK(&mhi_cntrl->ch_ring_work, mhi_ep_ch_ring_worker);
- mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0);
+ mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", WQ_PERCPU, 0);
if (!mhi_cntrl->wq) {
ret = -ENOMEM;
goto err_destroy_ring_item_cache;
@@ -1716,7 +1703,7 @@ static int mhi_ep_match(struct device *dev, const struct device_driver *drv)
return 0;
};
-struct bus_type mhi_ep_bus_type = {
+const struct bus_type mhi_ep_bus_type = {
.name = "mhi_ep",
.dev_name = "mhi_ep",
.match = mhi_ep_match,
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 7f72aab38ce9..099be8dd1900 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -194,7 +194,6 @@ static void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
@@ -221,7 +220,7 @@ static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
continue;
if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
- dev_err(dev, "irq %d not available for event ring\n",
+ dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
mhi_event->irq);
ret = -EINVAL;
goto error_request;
@@ -232,7 +231,7 @@ static int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
irq_flags,
"mhi", mhi_event);
if (ret) {
- dev_err(dev, "Error requesting irq:%d for ev:%d\n",
+ dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
mhi_cntrl->irq[mhi_event->irq], i);
goto error_request;
}
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 034be33565b7..7937bb1f742c 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -170,6 +170,8 @@ enum mhi_pm_state {
MHI_PM_IN_ERROR_STATE(pm_state))
#define MHI_PM_IN_SUSPEND_STATE(pm_state) (pm_state & \
(MHI_PM_M3_ENTER | MHI_PM_M3))
+#define MHI_PM_FATAL_ERROR(pm_state) ((pm_state == MHI_PM_FW_DL_ERR) || \
+ (pm_state >= MHI_PM_SYS_ERR_FAIL))
#define NR_OF_CMD_RINGS 1
#define CMD_EL_PER_RING 128
@@ -403,6 +405,7 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee);
/* ISR handlers */
irqreturn_t mhi_irq_handler(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index 52bef663e182..861551274319 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -512,6 +512,7 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
mhi_cntrl->ee = ee;
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
wake_up_all(&mhi_cntrl->state_event);
}
break;
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 4edb5bb476ba..e3bc737313a2 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -34,28 +34,34 @@
/**
* struct mhi_pci_dev_info - MHI PCI device specific information
* @config: MHI controller configuration
+ * @vf_config: MHI controller configuration for Virtual function (optional)
* @name: name of the PCI module
* @fw: firmware path (if any)
* @edl: emergency download mode firmware path (if any)
* @edl_trigger: capable of triggering EDL mode in the device (if supported)
* @bar_num: PCI base address register to use for MHI MMIO register space
* @dma_data_width: DMA transfer word size (32 or 64 bits)
+ * @vf_dma_data_width: DMA transfer word size for VF's (optional)
* @mru_default: default MRU size for MBIM network packets
* @sideband_wake: Devices using dedicated sideband GPIO for wakeup instead
* of inband wake support (such as sdx24)
* @no_m3: M3 not supported
+ * @reset_on_remove: Set true for devices that require SoC during driver removal
*/
struct mhi_pci_dev_info {
const struct mhi_controller_config *config;
+ const struct mhi_controller_config *vf_config;
const char *name;
const char *fw;
const char *edl;
bool edl_trigger;
unsigned int bar_num;
unsigned int dma_data_width;
+ unsigned int vf_dma_data_width;
unsigned int mru_default;
bool sideband_wake;
bool no_m3;
+ bool reset_on_remove;
};
#define MHI_CHANNEL_CONFIG_UL(ch_num, ch_name, el_count, ev_ring) \
@@ -296,8 +302,10 @@ static const struct mhi_pci_dev_info mhi_qcom_qdu100_info = {
.config = &mhi_qcom_qdu100_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .vf_dma_data_width = 40,
.sideband_wake = false,
.no_m3 = true,
+ .reset_on_remove = true,
};
static const struct mhi_channel_config mhi_qcom_sa8775p_channels[] = {
@@ -655,6 +663,17 @@ static const struct mhi_pci_dev_info mhi_foxconn_t99w696_info = {
.sideband_wake = false,
};
+static const struct mhi_pci_dev_info mhi_foxconn_t99w760_info = {
+ .name = "foxconn-t99w760",
+ .edl = "qcom/sdx35/foxconn/xbl_s_devprg_ns.melf",
+ .edl_trigger = true,
+ .config = &modem_foxconn_sdx61_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .mru_default = 32768,
+ .sideband_wake = false,
+};
+
static const struct mhi_channel_config mhi_mv3x_channels[] = {
MHI_CHANNEL_CONFIG_UL(0, "LOOPBACK", 64, 0),
MHI_CHANNEL_CONFIG_DL(1, "LOOPBACK", 64, 0),
@@ -869,6 +888,16 @@ static const struct mhi_pci_dev_info mhi_telit_fn990b40_info = {
.edl_trigger = true,
};
+static const struct mhi_pci_dev_info mhi_telit_fe990b40_info = {
+ .name = "telit-fe990b40",
+ .config = &modem_telit_fn920c04_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
+ .mru_default = 32768,
+ .edl_trigger = true,
+};
+
static const struct mhi_pci_dev_info mhi_netprisma_lcur57_info = {
.name = "netprisma-lcur57",
.edl = "qcom/prog_firehose_sdx24.mbn",
@@ -917,26 +946,17 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* Telit FE990A */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2015),
.driver_data = (kernel_ulong_t) &mhi_telit_fe990a_info },
- /* Foxconn T99W696.01, Lenovo Generic SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe142),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
- /* Foxconn T99W696.02, Lenovo X1 Carbon SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe143),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
- /* Foxconn T99W696.03, Lenovo X1 2in1 SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe144),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
- /* Foxconn T99W696.04, Lenovo PRC SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe145),
- .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
- /* Foxconn T99W696.00, Foxconn SKU */
- { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, 0xe146),
+ /* Foxconn T99W696, all variants */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, PCI_VENDOR_ID_FOXCONN, PCI_ANY_ID),
.driver_data = (kernel_ulong_t) &mhi_foxconn_t99w696_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info },
/* Telit FN990B40 (sdx72) */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x201a),
.driver_data = (kernel_ulong_t) &mhi_telit_fn990b40_info },
+ /* Telit FE990B40 (sdx72) */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0309, 0x1c5d, 0x2025),
+ .driver_data = (kernel_ulong_t) &mhi_telit_fe990b40_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info },
/* QDU100, x100-DU */
@@ -1001,6 +1021,8 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* DW5934e(sdx72), Non-eSIM */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe11e),
.driver_data = (kernel_ulong_t) &mhi_foxconn_dw5934e_info },
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe123),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_t99w760_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(PCI_VENDOR_ID_THALES, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
@@ -1037,6 +1059,7 @@ struct mhi_pci_device {
struct work_struct recovery_work;
struct timer_list health_check_timer;
unsigned long status;
+ bool reset_on_remove;
};
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
@@ -1092,7 +1115,7 @@ static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
u16 vendor = 0;
- if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
+ if (pci_read_config_word(pci_physfn(pdev), PCI_VENDOR_ID, &vendor))
return false;
if (vendor == (u16) ~0 || vendor == 0)
@@ -1203,7 +1226,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_warn(&pdev->dev, "device recovery started\n");
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
pm_runtime_forbid(&pdev->dev);
/* Clean up MHI state */
@@ -1230,7 +1255,10 @@ static void mhi_pci_recovery_work(struct work_struct *work)
dev_dbg(&pdev->dev, "Recovery completed\n");
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+
return;
err_unprepare:
@@ -1301,6 +1329,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
const struct mhi_controller_config *mhi_cntrl_config;
struct mhi_pci_device *mhi_pdev;
struct mhi_controller *mhi_cntrl;
+ unsigned int dma_data_width;
int err;
dev_info(&pdev->dev, "MHI PCI device found: %s\n", info->name);
@@ -1311,14 +1340,24 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
- timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
- mhi_cntrl_config = info->config;
+ if (pdev->is_virtfn && info->vf_config)
+ mhi_cntrl_config = info->vf_config;
+ else
+ mhi_cntrl_config = info->config;
+
+ /* Initialize health check monitor only for Physical functions */
+ if (pdev->is_physfn)
+ timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
+
mhi_cntrl = &mhi_pdev->mhi_cntrl;
+ dma_data_width = (pdev->is_virtfn && info->vf_dma_data_width) ?
+ info->vf_dma_data_width : info->dma_data_width;
+
mhi_cntrl->cntrl_dev = &pdev->dev;
mhi_cntrl->iova_start = 0;
- mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
+ mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(dma_data_width);
mhi_cntrl->fw_image = info->fw;
mhi_cntrl->edl_image = info->edl;
@@ -1330,6 +1369,9 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->mru = info->mru_default;
mhi_cntrl->name = info->name;
+ if (pdev->is_physfn)
+ mhi_pdev->reset_on_remove = info->reset_on_remove;
+
if (info->edl_trigger)
mhi_cntrl->edl_trigger = mhi_pci_generic_edl_trigger;
@@ -1339,7 +1381,7 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mhi_cntrl->wake_toggle = mhi_pci_wake_toggle_nop;
}
- err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
+ err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(dma_data_width));
if (err)
return err;
@@ -1376,7 +1418,8 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
/* start health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* Allow runtime suspend only if both PME from D3Hot and M3 are supported */
if (pci_pme_capable(pdev, PCI_D3hot) && !(info->no_m3)) {
@@ -1401,7 +1444,10 @@ static void mhi_pci_remove(struct pci_dev *pdev)
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
- timer_delete_sync(&mhi_pdev->health_check_timer);
+ pci_disable_sriov(pdev);
+
+ if (pdev->is_physfn)
+ timer_delete_sync(&mhi_pdev->health_check_timer);
cancel_work_sync(&mhi_pdev->recovery_work);
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1413,6 +1459,9 @@ static void mhi_pci_remove(struct pci_dev *pdev)
if (pci_pme_capable(pdev, PCI_D3hot))
pm_runtime_get_noresume(&pdev->dev);
+ if (mhi_pdev->reset_on_remove)
+ mhi_soc_reset(mhi_cntrl);
+
mhi_unregister_controller(mhi_cntrl);
}
@@ -1429,7 +1478,8 @@ static void mhi_pci_reset_prepare(struct pci_dev *pdev)
dev_info(&pdev->dev, "reset\n");
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
/* Clean up MHI state */
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
@@ -1474,7 +1524,8 @@ static void mhi_pci_reset_done(struct pci_dev *pdev)
}
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
}
static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
@@ -1539,7 +1590,9 @@ static int __maybe_unused mhi_pci_runtime_suspend(struct device *dev)
if (test_and_set_bit(MHI_PCI_DEV_SUSPENDED, &mhi_pdev->status))
return 0;
- timer_delete(&mhi_pdev->health_check_timer);
+ if (pdev->is_physfn)
+ timer_delete(&mhi_pdev->health_check_timer);
+
cancel_work_sync(&mhi_pdev->recovery_work);
if (!test_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status) ||
@@ -1590,7 +1643,8 @@ static int __maybe_unused mhi_pci_runtime_resume(struct device *dev)
}
/* Resume health check */
- mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
+ if (pdev->is_physfn)
+ mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
/* It can be a remote wakeup (no mhi runtime_get), update access time */
pm_runtime_mark_last_busy(dev);
@@ -1676,7 +1730,8 @@ static struct pci_driver mhi_pci_driver = {
.remove = mhi_pci_remove,
.shutdown = mhi_pci_shutdown,
.err_handler = &mhi_pci_err_handler,
- .driver.pm = &mhi_pci_pm_ops
+ .driver.pm = &mhi_pci_pm_ops,
+ .sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(mhi_pci_driver);
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index 33d92bf2fc3e..b4ef115189b5 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -418,6 +418,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
device_for_each_child(&mhi_cntrl->mhi_dev->dev, &current_ee,
mhi_destroy_device);
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_MISSION_MODE);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
/* Force MHI to be in M0 state before continuing */
ret = __mhi_device_get_sync(mhi_cntrl);
@@ -631,6 +632,8 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
/* Wake up threads waiting for state transition */
wake_up_all(&mhi_cntrl->state_event);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (MHI_REG_ACCESS_VALID(prev_state)) {
/*
* If the device is in PBL or SBL, it will only respond to
@@ -829,6 +832,8 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_create_devices(mhi_cntrl);
if (mhi_cntrl->fbc_download)
mhi_download_amss_image(mhi_cntrl);
+
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_MISSION_MODE:
mhi_pm_mission_mode_transition(mhi_cntrl);
@@ -838,6 +843,7 @@ void mhi_pm_st_worker(struct work_struct *work)
mhi_cntrl->ee = MHI_EE_FP;
write_unlock_irq(&mhi_cntrl->pm_lock);
mhi_create_devices(mhi_cntrl);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
break;
case DEV_ST_TRANSITION_READY:
mhi_ready_state_transition(mhi_cntrl);
@@ -1240,6 +1246,8 @@ static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful,
write_unlock_irq(&mhi_cntrl->pm_lock);
mutex_unlock(&mhi_cntrl->pm_mutex);
+ mhi_uevent_notify(mhi_cntrl, mhi_cntrl->ee);
+
if (destroy_device)
mhi_queue_state_transition(mhi_cntrl,
DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE);
@@ -1279,7 +1287,7 @@ int mhi_sync_power_up(struct mhi_controller *mhi_cntrl)
mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms;
wait_event_timeout(mhi_cntrl->state_event,
MHI_IN_MISSION_MODE(mhi_cntrl->ee) ||
- MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
+ MHI_PM_FATAL_ERROR(mhi_cntrl->pm_state),
msecs_to_jiffies(timeout_ms));
ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT;
@@ -1338,3 +1346,22 @@ void mhi_device_put(struct mhi_device *mhi_dev)
read_unlock_bh(&mhi_cntrl->pm_lock);
}
EXPORT_SYMBOL_GPL(mhi_device_put);
+
+void mhi_uevent_notify(struct mhi_controller *mhi_cntrl, enum mhi_ee_type ee)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ char *buf[2];
+ int ret;
+
+ buf[0] = kasprintf(GFP_KERNEL, "EXEC_ENV=%s", TO_MHI_EXEC_STR(ee));
+ buf[1] = NULL;
+
+ if (!buf[0])
+ return;
+
+ ret = kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, buf);
+ if (ret)
+ dev_err(dev, "Failed to send %s uevent\n", TO_MHI_EXEC_STR(ee));
+
+ kfree(buf[0]);
+}
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 00cb792bda18..dd94145c9b22 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1006,7 +1006,7 @@ static __init int mvebu_mbus_debugfs_init(void)
}
fs_initcall(mvebu_mbus_debugfs_init);
-static int mvebu_mbus_suspend(void)
+static int mvebu_mbus_suspend(void *data)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
@@ -1040,7 +1040,7 @@ static int mvebu_mbus_suspend(void)
return 0;
}
-static void mvebu_mbus_resume(void)
+static void mvebu_mbus_resume(void *data)
{
struct mvebu_mbus_state *s = &mbus_state;
int win;
@@ -1069,9 +1069,13 @@ static void mvebu_mbus_resume(void)
}
}
-static struct syscore_ops mvebu_mbus_syscore_ops = {
- .suspend = mvebu_mbus_suspend,
- .resume = mvebu_mbus_resume,
+static const struct syscore_ops mvebu_mbus_syscore_ops = {
+ .suspend = mvebu_mbus_suspend,
+ .resume = mvebu_mbus_resume,
+};
+
+static struct syscore mvebu_mbus_syscore = {
+ .ops = &mvebu_mbus_syscore_ops,
};
static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
@@ -1118,7 +1122,7 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
writel(UNIT_SYNC_BARRIER_ALL,
mbus->mbuswins_base + UNIT_SYNC_BARRIER_OFF);
- register_syscore_ops(&mvebu_mbus_syscore_ops);
+ register_syscore(&mvebu_mbus_syscore);
return 0;
}
diff --git a/drivers/bus/stm32_rifsc.c b/drivers/bus/stm32_rifsc.c
index 4cf1b60014b7..debeaf8ea1bd 100644
--- a/drivers/bus/stm32_rifsc.c
+++ b/drivers/bus/stm32_rifsc.c
@@ -5,6 +5,7 @@
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -25,6 +26,8 @@
#define RIFSC_RISC_PRIVCFGR0 0x30
#define RIFSC_RISC_PER0_CIDCFGR 0x100
#define RIFSC_RISC_PER0_SEMCR 0x104
+#define RIFSC_RISC_REG0_ACFGR 0x900
+#define RIFSC_RISC_REG3_AADDR 0x924
#define RIFSC_RISC_HWCFGR2 0xFEC
/*
@@ -70,6 +73,565 @@
#define RIF_CID0 0x0
#define RIF_CID1 0x1
+#if defined(CONFIG_DEBUG_FS)
+#define RIFSC_RISUP_ENTRIES 128
+#define RIFSC_RIMU_ENTRIES 16
+#define RIFSC_RISAL_SUBREGIONS 2
+#define RIFSC_RISAL_GRANULARITY 8
+
+#define RIFSC_RIMC_ATTR0 0xC10
+
+#define RIFSC_RIMC_CIDSEL BIT(2)
+#define RIFSC_RIMC_MCID_MASK GENMASK(6, 4)
+#define RIFSC_RIMC_MSEC BIT(8)
+#define RIFSC_RIMC_MPRIV BIT(9)
+
+#define RIFSC_RISC_SRCID_MASK GENMASK(6, 4)
+#define RIFSC_RISC_SRPRIV BIT(9)
+#define RIFSC_RISC_SRSEC BIT(8)
+#define RIFSC_RISC_SRRLOCK BIT(1)
+#define RIFSC_RISC_SREN BIT(0)
+#define RIFSC_RISC_SRLENGTH_MASK GENMASK(27, 16)
+#define RIFSC_RISC_SRSTART_MASK GENMASK(10, 0)
+
+static const char *stm32mp21_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = {
+ "ETR",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "OTG_HS",
+ "USBH",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "RESERVED",
+ "DCMIPP",
+ "LTDC_L1/L2",
+ "LTDC_L3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+};
+
+static const char *stm32mp25_rifsc_rimu_names[RIFSC_RIMU_ENTRIES] = {
+ "ETR",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "USB3DR",
+ "USBH",
+ "ETH1",
+ "ETH2",
+ "PCIE",
+ "GPU",
+ "DMCIPP",
+ "LTDC_L0/L1",
+ "LTDC_L2",
+ "LTDC_ROT",
+ "VDEC",
+ "VENC"
+};
+
+static const char *stm32mp21_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = {
+ "TIM1",
+ "TIM2",
+ "TIM3",
+ "TIM4",
+ "TIM5",
+ "TIM6",
+ "TIM7",
+ "TIM8",
+ "TIM10",
+ "TIM11",
+ "TIM12",
+ "TIM13",
+ "TIM14",
+ "TIM15",
+ "TIM16",
+ "TIM17",
+ "RESERVED",
+ "LPTIM1",
+ "LPTIM2",
+ "LPTIM3",
+ "LPTIM4",
+ "LPTIM5",
+ "SPI1",
+ "SPI2",
+ "SPI3",
+ "SPI4",
+ "SPI5",
+ "SPI6",
+ "RESERVED",
+ "RESERVED",
+ "SPDIFRX",
+ "USART1",
+ "USART2",
+ "USART3",
+ "UART4",
+ "UART5",
+ "USART6",
+ "UART7",
+ "RESERVED",
+ "RESERVED",
+ "LPUART1",
+ "I2C1",
+ "I2C2",
+ "I2C3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "SAI1",
+ "SAI2",
+ "SAI3",
+ "SAI4",
+ "RESERVED",
+ "MDF1",
+ "RESERVED",
+ "FDCAN",
+ "HDP",
+ "ADC1",
+ "ADC2",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "USBH",
+ "RESERVED",
+ "RESERVED",
+ "OTG_HS",
+ "DDRPERFM",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "STGEN",
+ "OCTOSPI1",
+ "RESERVED",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "RESERVED",
+ "LTDC_CMN",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "CSI",
+ "DCMIPP",
+ "DCMI_PSSI",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RNG1",
+ "RNG2",
+ "PKA",
+ "SAES",
+ "HASH1",
+ "HASH2",
+ "CRYP1",
+ "CRYP2",
+ "IWDG1",
+ "IWDG2",
+ "IWDG3",
+ "IWDG4",
+ "WWDG1",
+ "RESERVED",
+ "VREFBUF",
+ "DTS",
+ "RAMCFG",
+ "CRC",
+ "SERC",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "I3C1",
+ "I3C2",
+ "I3C3",
+ "RESERVED",
+ "ICACHE_DCACHE",
+ "LTDC_L1L2",
+ "LTDC_L3",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "RESERVED",
+ "OTFDEC1",
+ "RESERVED",
+ "IAC",
+};
+
+static const char *stm32mp25_rifsc_risup_names[RIFSC_RISUP_ENTRIES] = {
+ "TIM1",
+ "TIM2",
+ "TIM3",
+ "TIM4",
+ "TIM5",
+ "TIM6",
+ "TIM7",
+ "TIM8",
+ "TIM10",
+ "TIM11",
+ "TIM12",
+ "TIM13",
+ "TIM14",
+ "TIM15",
+ "TIM16",
+ "TIM17",
+ "TIM20",
+ "LPTIM1",
+ "LPTIM2",
+ "LPTIM3",
+ "LPTIM4",
+ "LPTIM5",
+ "SPI1",
+ "SPI2",
+ "SPI3",
+ "SPI4",
+ "SPI5",
+ "SPI6",
+ "SPI7",
+ "SPI8",
+ "SPDIFRX",
+ "USART1",
+ "USART2",
+ "USART3",
+ "UART4",
+ "UART5",
+ "USART6",
+ "UART7",
+ "UART8",
+ "UART9",
+ "LPUART1",
+ "I2C1",
+ "I2C2",
+ "I2C3",
+ "I2C4",
+ "I2C5",
+ "I2C6",
+ "I2C7",
+ "I2C8",
+ "SAI1",
+ "SAI2",
+ "SAI3",
+ "SAI4",
+ "RESERVED",
+ "MDF1",
+ "ADF1",
+ "FDCAN",
+ "HDP",
+ "ADC12",
+ "ADC3",
+ "ETH1",
+ "ETH2",
+ "RESERVED",
+ "USBH",
+ "RESERVED",
+ "RESERVED",
+ "USB3DR",
+ "COMBOPHY",
+ "PCIE",
+ "UCPD1",
+ "ETHSW_DEIP",
+ "ETHSW_ACM_CF",
+ "ETHSW_ACM_MSGBU",
+ "STGEN",
+ "OCTOSPI1",
+ "OCTOSPI2",
+ "SDMMC1",
+ "SDMMC2",
+ "SDMMC3",
+ "GPU",
+ "LTDC_CMN",
+ "DSI_CMN",
+ "RESERVED",
+ "RESERVED",
+ "LVDS",
+ "RESERVED",
+ "CSI",
+ "DCMIPP",
+ "DCMI_PSSI",
+ "VDEC",
+ "VENC",
+ "RESERVED",
+ "RNG",
+ "PKA",
+ "SAES",
+ "HASH",
+ "CRYP1",
+ "CRYP2",
+ "IWDG1",
+ "IWDG2",
+ "IWDG3",
+ "IWDG4",
+ "IWDG5",
+ "WWDG1",
+ "WWDG2",
+ "RESERVED",
+ "VREFBUF",
+ "DTS",
+ "RAMCFG",
+ "CRC",
+ "SERC",
+ "OCTOSPIM",
+ "GICV2M",
+ "RESERVED",
+ "I3C1",
+ "I3C2",
+ "I3C3",
+ "I3C4",
+ "ICACHE_DCACHE",
+ "LTDC_L0L1",
+ "LTDC_L2",
+ "LTDC_ROT",
+ "DSI_TRIG",
+ "DSI_RDFIFO",
+ "RESERVED",
+ "OTFDEC1",
+ "OTFDEC2",
+ "IAC",
+};
+struct rifsc_risup_debug_data {
+ char dev_name[15];
+ u8 dev_cid;
+ u8 dev_sem_cids;
+ u8 dev_id;
+ bool dev_cid_filt_en;
+ bool dev_sem_en;
+ bool dev_priv;
+ bool dev_sec;
+};
+
+struct rifsc_rimu_debug_data {
+ char m_name[11];
+ u8 m_cid;
+ bool cidsel;
+ bool m_sec;
+ bool m_priv;
+};
+
+struct rifsc_subreg_debug_data {
+ bool sr_sec;
+ bool sr_priv;
+ u8 sr_cid;
+ bool sr_rlock;
+ bool sr_enable;
+ u16 sr_start;
+ u16 sr_length;
+};
+
+struct stm32_rifsc_resources_names {
+ const char **device_names;
+ const char **initiator_names;
+};
+struct rifsc_dbg_private {
+ const struct stm32_rifsc_resources_names *res_names;
+ void __iomem *mmio;
+ unsigned int nb_risup;
+ unsigned int nb_rimu;
+ unsigned int nb_risal;
+};
+
+static const struct stm32_rifsc_resources_names rifsc_mp21_res_names = {
+ .device_names = stm32mp21_rifsc_risup_names,
+ .initiator_names = stm32mp21_rifsc_rimu_names,
+};
+
+static const struct stm32_rifsc_resources_names rifsc_mp25_res_names = {
+ .device_names = stm32mp25_rifsc_risup_names,
+ .initiator_names = stm32mp25_rifsc_rimu_names,
+};
+
+static void stm32_rifsc_fill_rimu_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_rimu_debug_data *dbg_entry, int i)
+{
+ const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names;
+ u32 rimc_attr = readl_relaxed(rifsc->mmio + RIFSC_RIMC_ATTR0 + 0x4 * i);
+
+ snprintf(dbg_entry->m_name, sizeof(dbg_entry->m_name), "%s", dbg_names->initiator_names[i]);
+ dbg_entry->m_cid = FIELD_GET(RIFSC_RIMC_MCID_MASK, rimc_attr);
+ dbg_entry->cidsel = rimc_attr & RIFSC_RIMC_CIDSEL;
+ dbg_entry->m_sec = rimc_attr & RIFSC_RIMC_MSEC;
+ dbg_entry->m_priv = rimc_attr & RIFSC_RIMC_MPRIV;
+}
+
+static void stm32_rifsc_fill_dev_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_risup_debug_data *dbg_entry, int i)
+{
+ const struct stm32_rifsc_resources_names *dbg_names = rifsc->res_names;
+ u32 cid_cfgr, sec_cfgr, priv_cfgr;
+ u8 reg_id = i / IDS_PER_RISC_SEC_PRIV_REGS;
+ u8 reg_offset = i % IDS_PER_RISC_SEC_PRIV_REGS;
+
+ cid_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PER0_CIDCFGR + 0x8 * i);
+ sec_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_SECCFGR0 + 0x4 * reg_id);
+ priv_cfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_PRIVCFGR0 + 0x4 * reg_id);
+
+ snprintf(dbg_entry->dev_name, sizeof(dbg_entry->dev_name), "%s",
+ dbg_names->device_names[i]);
+ dbg_entry->dev_id = i;
+ dbg_entry->dev_cid_filt_en = cid_cfgr & CIDCFGR_CFEN;
+ dbg_entry->dev_sem_en = cid_cfgr & CIDCFGR_SEMEN;
+ dbg_entry->dev_cid = FIELD_GET(RIFSC_RISC_SCID_MASK, cid_cfgr);
+ dbg_entry->dev_sem_cids = FIELD_GET(RIFSC_RISC_SEMWL_MASK, cid_cfgr);
+ dbg_entry->dev_sec = sec_cfgr & BIT(reg_offset) ? true : false;
+ dbg_entry->dev_priv = priv_cfgr & BIT(reg_offset) ? true : false;
+}
+
+
+static void stm32_rifsc_fill_subreg_dbg_entry(struct rifsc_dbg_private *rifsc,
+ struct rifsc_subreg_debug_data *dbg_entry, int i,
+ int j)
+{
+ u32 risc_xcfgr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG0_ACFGR + 0x10 * i + 0x8 * j);
+ u32 risc_xaddr;
+
+ dbg_entry->sr_sec = risc_xcfgr & RIFSC_RISC_SRSEC;
+ dbg_entry->sr_priv = risc_xcfgr & RIFSC_RISC_SRPRIV;
+ dbg_entry->sr_cid = FIELD_GET(RIFSC_RISC_SRCID_MASK, risc_xcfgr);
+ dbg_entry->sr_rlock = risc_xcfgr & RIFSC_RISC_SRRLOCK;
+ dbg_entry->sr_enable = risc_xcfgr & RIFSC_RISC_SREN;
+ if (i == 2) {
+ risc_xaddr = readl_relaxed(rifsc->mmio + RIFSC_RISC_REG3_AADDR + 0x8 * j);
+ dbg_entry->sr_length = FIELD_GET(RIFSC_RISC_SRLENGTH_MASK, risc_xaddr);
+ dbg_entry->sr_start = FIELD_GET(RIFSC_RISC_SRSTART_MASK, risc_xaddr);
+ } else {
+ dbg_entry->sr_start = 0;
+ dbg_entry->sr_length = U16_MAX;
+ }
+}
+
+static int stm32_rifsc_conf_dump_show(struct seq_file *s, void *data)
+{
+ struct rifsc_dbg_private *rifsc = (struct rifsc_dbg_private *)s->private;
+ int i, j;
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RIFSC dump\n");
+ seq_puts(s, "=============================================\n\n");
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RISUP dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_printf(s, "\n| %-15s |", "Peripheral name");
+ seq_puts(s, "| Firewall ID |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |");
+ seq_puts(s, "| CID filtering |");
+ seq_puts(s, "| Semaphore mode |");
+ seq_puts(s, "| SCID |");
+ seq_printf(s, "| %7s |\n", "SEMWL");
+
+ for (i = 0; i < RIFSC_RISUP_ENTRIES && i < rifsc->nb_risup; i++) {
+ struct rifsc_risup_debug_data d_dbg_entry;
+
+ stm32_rifsc_fill_dev_dbg_entry(rifsc, &d_dbg_entry, i);
+
+ seq_printf(s, "| %-15s |", d_dbg_entry.dev_name);
+ seq_printf(s, "| %-11d |", d_dbg_entry.dev_id);
+ seq_printf(s, "| %-8s |", d_dbg_entry.dev_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |", d_dbg_entry.dev_priv ? "PRIV" : "NPRIV");
+ seq_printf(s, "| %-13s |", str_enabled_disabled(d_dbg_entry.dev_cid_filt_en));
+ seq_printf(s, "| %-14s |", str_enabled_disabled(d_dbg_entry.dev_sem_en));
+ seq_printf(s, "| %-4d |", d_dbg_entry.dev_cid);
+ seq_printf(s, "| %#-7x |\n", d_dbg_entry.dev_sem_cids);
+ }
+
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RIMU dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_puts(s, "| RIMU's name |");
+ seq_puts(s, "| CIDSEL |");
+ seq_puts(s, "| MCID |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |\n");
+
+ for (i = 0; i < RIFSC_RIMU_ENTRIES && rifsc->nb_rimu; i++) {
+ struct rifsc_rimu_debug_data m_dbg_entry;
+
+ stm32_rifsc_fill_rimu_dbg_entry(rifsc, &m_dbg_entry, i);
+
+ seq_printf(s, "| %-11s |", m_dbg_entry.m_name);
+ seq_printf(s, "| %-6s |", m_dbg_entry.cidsel ? "CIDSEL" : "");
+ seq_printf(s, "| %-4d |", m_dbg_entry.m_cid);
+ seq_printf(s, "| %-8s |", m_dbg_entry.m_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |\n", m_dbg_entry.m_priv ? "PRIV" : "NPRIV");
+ }
+
+ if (rifsc->nb_risal > 0) {
+ seq_puts(s, "\n=============================================\n");
+ seq_puts(s, " RISAL dump\n");
+ seq_puts(s, "=============================================\n");
+
+ seq_puts(s, "| Memory |");
+ seq_puts(s, "| Subreg. |");
+ seq_puts(s, "| N/SECURE |");
+ seq_puts(s, "| N/PRIVILEGED |");
+ seq_puts(s, "| Subreg. CID |");
+ seq_puts(s, "| Resource lock |");
+ seq_puts(s, "| Subreg. enable |");
+ seq_puts(s, "| Subreg. start |");
+ seq_puts(s, "| Subreg. end |\n");
+
+ for (i = 0; i < rifsc->nb_risal; i++) {
+ for (j = 0; j < RIFSC_RISAL_SUBREGIONS; j++) {
+ struct rifsc_subreg_debug_data sr_dbg_entry;
+
+ stm32_rifsc_fill_subreg_dbg_entry(rifsc, &sr_dbg_entry, i, j);
+
+ seq_printf(s, "| LPSRAM%1d |", i + 1);
+ seq_printf(s, "| %1s |", (j == 0) ? "A" : "B");
+ seq_printf(s, "| %-8s |", sr_dbg_entry.sr_sec ? "SEC" : "NSEC");
+ seq_printf(s, "| %-12s |", sr_dbg_entry.sr_priv ? "PRIV" : "NPRIV");
+ seq_printf(s, "| 0x%-9x |", sr_dbg_entry.sr_cid);
+ seq_printf(s, "| %-13s |",
+ sr_dbg_entry.sr_rlock ? "locked (1)" : "unlocked (0)");
+ seq_printf(s, "| %-14s |",
+ str_enabled_disabled(sr_dbg_entry.sr_enable));
+ seq_printf(s, "| 0x%-11x |", sr_dbg_entry.sr_start);
+ seq_printf(s, "| 0x%-11x |\n", sr_dbg_entry.sr_start +
+ sr_dbg_entry.sr_length - 1);
+ }
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stm32_rifsc_conf_dump);
+
+static int stm32_rifsc_register_debugfs(struct stm32_firewall_controller *rifsc_controller,
+ u32 nb_risup, u32 nb_rimu, u32 nb_risal)
+{
+ struct rifsc_dbg_private *rifsc_priv;
+ struct dentry *root = NULL;
+
+ rifsc_priv = devm_kzalloc(rifsc_controller->dev, sizeof(*rifsc_priv), GFP_KERNEL);
+ if (!rifsc_priv)
+ return -ENOMEM;
+
+ rifsc_priv->mmio = rifsc_controller->mmio;
+ rifsc_priv->nb_risup = nb_risup;
+ rifsc_priv->nb_rimu = nb_rimu;
+ rifsc_priv->nb_risal = nb_risal;
+ rifsc_priv->res_names = of_device_get_match_data(rifsc_controller->dev);
+
+ root = debugfs_lookup("stm32_firewall", NULL);
+ if (!root)
+ root = debugfs_create_dir("stm32_firewall", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ debugfs_create_file("rifsc", 0444, root, rifsc_priv, &stm32_rifsc_conf_dump_fops);
+
+ return 0;
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
static bool stm32_rifsc_is_semaphore_available(void __iomem *addr)
{
return !(readl(addr) & SEMCR_MUTEX);
@@ -207,9 +769,19 @@ static int stm32_rifsc_probe(struct platform_device *pdev)
rifsc_controller->release_access = stm32_rifsc_release_access;
/* Get number of RIFSC entries*/
- nb_risup = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF1_MASK;
- nb_rimu = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF2_MASK;
- nb_risal = readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2) & HWCFGR2_CONF3_MASK;
+ nb_risup = FIELD_GET(HWCFGR2_CONF1_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ nb_rimu = FIELD_GET(HWCFGR2_CONF2_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ nb_risal = FIELD_GET(HWCFGR2_CONF3_MASK,
+ readl(rifsc_controller->mmio + RIFSC_RISC_HWCFGR2));
+ /*
+ * On STM32MP21, RIFSC_RISC_HWCFGR2 shows an incorrect number of RISAL (NUM_RISAL is 3
+ * instead of 0). A software workaround is implemented using the st,mem-map property in the
+ * device tree. This property is absent or left empty if there is no RISAL.
+ */
+ if (of_device_is_compatible(np, "st,stm32mp21-rifsc"))
+ nb_risal = 0;
rifsc_controller->max_entries = nb_risup + nb_rimu + nb_risal;
platform_set_drvdata(pdev, rifsc_controller);
@@ -228,12 +800,29 @@ static int stm32_rifsc_probe(struct platform_device *pdev)
return rc;
}
+#if defined(CONFIG_DEBUG_FS)
+ rc = stm32_rifsc_register_debugfs(rifsc_controller, nb_risup, nb_rimu, nb_risal);
+ if (rc)
+ return dev_err_probe(rifsc_controller->dev, rc, "Failed creating debugfs entry\n");
+#endif
+
/* Populate all allowed nodes */
return of_platform_populate(np, NULL, NULL, &pdev->dev);
}
static const struct of_device_id stm32_rifsc_of_match[] = {
- { .compatible = "st,stm32mp25-rifsc" },
+ {
+ .compatible = "st,stm32mp25-rifsc",
+#if defined(CONFIG_DEBUG_FS)
+ .data = &rifsc_mp25_res_names,
+#endif
+ },
+ {
+ .compatible = "st,stm32mp21-rifsc",
+#if defined(CONFIG_DEBUG_FS)
+ .data = &rifsc_mp21_res_names,
+#endif
+ },
{}
};
MODULE_DEVICE_TABLE(of, stm32_rifsc_of_match);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 7a33c3b31d1e..82735c58be11 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -373,7 +373,6 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
unlock:
mutex_unlock(&rsb->lock);
- pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
@@ -417,7 +416,6 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
mutex_unlock(&rsb->lock);
- pm_runtime_mark_last_busy(rsb->dev);
pm_runtime_put_autosuspend(rsb->dev);
return ret;
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 5566ad11399e..610354ce7f8f 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -48,6 +48,7 @@ enum sysc_soc {
SOC_UNKNOWN,
SOC_2420,
SOC_2430,
+ SOC_AM33,
SOC_3430,
SOC_AM35,
SOC_3630,
@@ -2912,6 +2913,7 @@ static void ti_sysc_idle(struct work_struct *work)
static const struct soc_device_attribute sysc_soc_match[] = {
SOC_FLAG("OMAP242*", SOC_2420),
SOC_FLAG("OMAP243*", SOC_2430),
+ SOC_FLAG("AM33*", SOC_AM33),
SOC_FLAG("AM35*", SOC_AM35),
SOC_FLAG("OMAP3[45]*", SOC_3430),
SOC_FLAG("OMAP3[67]*", SOC_3630),
@@ -3117,10 +3119,15 @@ static int sysc_check_active_timer(struct sysc *ddata)
* can be dropped if we stop supporting old beagleboard revisions
* A to B4 at some point.
*/
- if (sysc_soc->soc == SOC_3430 || sysc_soc->soc == SOC_AM35)
+ switch (sysc_soc->soc) {
+ case SOC_AM33:
+ case SOC_3430:
+ case SOC_AM35:
error = -ENXIO;
- else
+ break;
+ default:
error = -EBUSY;
+ }
if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
(ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
diff --git a/drivers/cache/Kconfig b/drivers/cache/Kconfig
index db51386c663a..1518449d47b5 100644
--- a/drivers/cache/Kconfig
+++ b/drivers/cache/Kconfig
@@ -1,9 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
-menu "Cache Drivers"
+
+menuconfig CACHEMAINT_FOR_DMA
+ bool "Cache management for noncoherent DMA"
+ depends on RISCV
+ default y
+ help
+ These drivers implement support for noncoherent DMA master devices
+ on platforms that lack the standard CPU interfaces for this.
+
+if CACHEMAINT_FOR_DMA
config AX45MP_L2_CACHE
bool "Andes Technology AX45MP L2 Cache controller"
- depends on RISCV
select RISCV_NONSTANDARD_CACHE_OPS
help
Support for the L2 cache controller on Andes Technology AX45MP platforms.
@@ -16,7 +24,6 @@ config SIFIVE_CCACHE
config STARFIVE_STARLINK_CACHE
bool "StarFive StarLink Cache controller"
- depends on RISCV
depends on ARCH_STARFIVE
depends on 64BIT
select RISCV_DMA_NONCOHERENT
@@ -24,4 +31,26 @@ config STARFIVE_STARLINK_CACHE
help
Support for the StarLink cache controller IP from StarFive.
-endmenu
+endif #CACHEMAINT_FOR_DMA
+
+menuconfig CACHEMAINT_FOR_HOTPLUG
+ bool "Cache management for memory hot plug like operations"
+ depends on GENERIC_CPU_CACHE_MAINTENANCE
+ help
+ These drivers implement cache management for flows where it is necessary
+ to flush data from all host caches.
+
+if CACHEMAINT_FOR_HOTPLUG
+
+config HISI_SOC_HHA
+ tristate "HiSilicon Hydra Home Agent (HHA) device driver"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ help
+ The Hydra Home Agent (HHA) is responsible for cache coherency
+ on the SoC. This drivers enables the cache maintenance functions of
+ the HHA.
+
+ This driver can be built as a module. If so, the module will be
+ called hisi_soc_hha.
+
+endif #CACHEMAINT_FOR_HOTPLUG
diff --git a/drivers/cache/Makefile b/drivers/cache/Makefile
index 55c5e851034d..b3362b15d6c1 100644
--- a/drivers/cache/Makefile
+++ b/drivers/cache/Makefile
@@ -3,3 +3,5 @@
obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o
+
+obj-$(CONFIG_HISI_SOC_HHA) += hisi_soc_hha.o
diff --git a/drivers/cache/hisi_soc_hha.c b/drivers/cache/hisi_soc_hha.c
new file mode 100644
index 000000000000..25ff0f5ae79b
--- /dev/null
+++ b/drivers/cache/hisi_soc_hha.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for HiSilicon Hydra Home Agent (HHA).
+ *
+ * Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
+ * Author: Yicong Yang <yangyicong@hisilicon.com>
+ * Yushan Wang <wangyushan12@huawei.com>
+ *
+ * A system typically contains multiple HHAs. Each is responsible for a subset
+ * of the physical addresses in the system, but interleave can make the mapping
+ * from a particular cache line to a responsible HHA complex. As such no
+ * filtering is done in the driver, with the hardware being responsible for
+ * responding with success for even if it was not responsible for any addresses
+ * in the range on which the operation was requested.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cache_coherency.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/memregion.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+
+#define HISI_HHA_CTRL 0x5004
+#define HISI_HHA_CTRL_EN BIT(0)
+#define HISI_HHA_CTRL_RANGE BIT(1)
+#define HISI_HHA_CTRL_TYPE GENMASK(3, 2)
+#define HISI_HHA_START_L 0x5008
+#define HISI_HHA_START_H 0x500c
+#define HISI_HHA_LEN_L 0x5010
+#define HISI_HHA_LEN_H 0x5014
+
+/* The maintain operation performs in a 128 Byte granularity */
+#define HISI_HHA_MAINT_ALIGN 128
+
+#define HISI_HHA_POLL_GAP_US 10
+#define HISI_HHA_POLL_TIMEOUT_US 50000
+
+struct hisi_soc_hha {
+ /* Must be first element */
+ struct cache_coherency_ops_inst cci;
+ /* Locks HHA instance to forbid overlapping access. */
+ struct mutex lock;
+ void __iomem *base;
+};
+
+static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha)
+{
+ u32 val;
+
+ return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val,
+ !(val & HISI_HHA_CTRL_EN),
+ HISI_HHA_POLL_GAP_US,
+ HISI_HHA_POLL_TIMEOUT_US);
+}
+
+static int hisi_soc_hha_wbinv(struct cache_coherency_ops_inst *cci,
+ struct cc_inval_params *invp)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+ phys_addr_t top, addr = invp->addr;
+ size_t size = invp->size;
+ u32 reg;
+
+ if (!size)
+ return -EINVAL;
+
+ addr = ALIGN_DOWN(addr, HISI_HHA_MAINT_ALIGN);
+ top = ALIGN(addr + size, HISI_HHA_MAINT_ALIGN);
+ size = top - addr;
+
+ guard(mutex)(&soc_hha->lock);
+
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -EBUSY;
+
+ /*
+ * Hardware will search for addresses ranging [addr, addr + size - 1],
+ * last byte included, and perform maintenance in 128 byte granules
+ * on those cachelines which contain the addresses. If a given instance
+ * is either not responsible for a cacheline or that cacheline is not
+ * currently present then the search will fail, no operation will be
+ * necessary and the device will report success.
+ */
+ size -= 1;
+
+ writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L);
+ writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H);
+ writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L);
+ writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H);
+
+ reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */
+ reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN;
+ writel(reg, soc_hha->base + HISI_HHA_CTRL);
+
+ return 0;
+}
+
+static int hisi_soc_hha_done(struct cache_coherency_ops_inst *cci)
+{
+ struct hisi_soc_hha *soc_hha =
+ container_of(cci, struct hisi_soc_hha, cci);
+
+ guard(mutex)(&soc_hha->lock);
+ if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct cache_coherency_ops hha_ops = {
+ .wbinv = hisi_soc_hha_wbinv,
+ .done = hisi_soc_hha_done,
+};
+
+static int hisi_soc_hha_probe(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha;
+ struct resource *mem;
+ int ret;
+
+ soc_hha = cache_coherency_ops_instance_alloc(&hha_ops,
+ struct hisi_soc_hha, cci);
+ if (!soc_hha)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, soc_hha);
+
+ mutex_init(&soc_hha->lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ ret = -ENOMEM;
+ goto err_free_cci;
+ }
+
+ soc_hha->base = ioremap(mem->start, resource_size(mem));
+ if (!soc_hha->base) {
+ ret = dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to remap io memory");
+ goto err_free_cci;
+ }
+
+ ret = cache_coherency_ops_instance_register(&soc_hha->cci);
+ if (ret)
+ goto err_iounmap;
+
+ return 0;
+
+err_iounmap:
+ iounmap(soc_hha->base);
+err_free_cci:
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+ return ret;
+}
+
+static void hisi_soc_hha_remove(struct platform_device *pdev)
+{
+ struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev);
+
+ cache_coherency_ops_instance_unregister(&soc_hha->cci);
+ iounmap(soc_hha->base);
+ cache_coherency_ops_instance_put(&soc_hha->cci);
+}
+
+static const struct acpi_device_id hisi_soc_hha_ids[] = {
+ { "HISI0511", },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids);
+
+static struct platform_driver hisi_soc_hha_driver = {
+ .driver = {
+ .name = "hisi_soc_hha",
+ .acpi_match_table = hisi_soc_hha_ids,
+ },
+ .probe = hisi_soc_hha_probe,
+ .remove = hisi_soc_hha_remove,
+};
+
+module_platform_driver(hisi_soc_hha_driver);
+
+MODULE_IMPORT_NS("CACHE_COHERENCY");
+MODULE_DESCRIPTION("HiSilicon Hydra Home Agent driver supporting cache maintenance");
+MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
+MODULE_AUTHOR("Yushan Wang <wangyushan12@huawei.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cache/sifive_ccache.c b/drivers/cache/sifive_ccache.c
index e1a283805ea7..a86800b123b9 100644
--- a/drivers/cache/sifive_ccache.c
+++ b/drivers/cache/sifive_ccache.c
@@ -151,16 +151,16 @@ static void ccache_flush_range(phys_addr_t start, size_t len)
if (!len)
return;
- mb();
+ mb(); /* complete earlier memory accesses before the cache flush */
for (line = ALIGN_DOWN(start, SIFIVE_CCACHE_LINE_SIZE); line < end;
line += SIFIVE_CCACHE_LINE_SIZE) {
#ifdef CONFIG_32BIT
- writel(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
+ writel_relaxed(line >> 4, ccache_base + SIFIVE_CCACHE_FLUSH32);
#else
- writeq(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
+ writeq_relaxed(line, ccache_base + SIFIVE_CCACHE_FLUSH64);
#endif
- mb();
}
+ mb(); /* issue later memory accesses after the cache flush */
}
static const struct riscv_nonstd_cache_ops ccache_mgmt_ops __initconst = {
diff --git a/drivers/cdx/Kconfig b/drivers/cdx/Kconfig
index 3af41f51cf38..1f1e360507d7 100644
--- a/drivers/cdx/Kconfig
+++ b/drivers/cdx/Kconfig
@@ -8,7 +8,6 @@
config CDX_BUS
bool "CDX Bus driver"
depends on OF && ARM64 || COMPILE_TEST
- select GENERIC_MSI_IRQ
help
Driver to enable Composable DMA Transfer(CDX) Bus. CDX bus
exposes Fabric devices which uses composable DMA IP to the
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index 092306ca2541..b39af2f1937f 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -170,7 +170,7 @@ static int cdx_unregister_device(struct device *dev,
return 0;
}
-static void cdx_unregister_devices(struct bus_type *bus)
+static void cdx_unregister_devices(const struct bus_type *bus)
{
/* Reset all the devices attached to cdx bus */
bus_for_each_dev(bus, NULL, NULL, cdx_unregister_device);
@@ -310,7 +310,7 @@ static int cdx_probe(struct device *dev)
* Setup MSI device data so that generic MSI alloc/free can
* be used by the device driver.
*/
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
error = msi_setup_device_data(&cdx_dev->dev);
if (error)
return error;
@@ -651,7 +651,7 @@ static struct attribute *cdx_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(cdx_bus);
-struct bus_type cdx_bus_type = {
+const struct bus_type cdx_bus_type = {
.name = "cdx",
.match = cdx_bus_match,
.probe = cdx_probe,
@@ -833,7 +833,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
cdx_dev->dev_num);
- if (cdx->msi_domain) {
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ) && cdx->msi_domain) {
cdx_dev->num_msi = dev_params->num_msi;
dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
}
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index 3388a5d1462c..91b95422b263 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -174,6 +174,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
}
parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
+ of_node_put(parent_node);
if (!parent || !msi_get_domain_info(parent)) {
dev_err(dev, "unable to locate ITS domain\n");
return NULL;
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index 0641a4c21e66..a480b62cbd1f 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -10,7 +10,6 @@ if CDX_BUS
config CDX_CONTROLLER
tristate "CDX bus controller"
depends on HAS_DMA
- select GENERIC_MSI_IRQ
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/cdx/controller/bitfield.h b/drivers/cdx/controller/bitfield.h
deleted file mode 100644
index 567f8ec47582..000000000000
--- a/drivers/cdx/controller/bitfield.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_BITFIELD_H
-#define CDX_BITFIELD_H
-
-#include <linux/bitfield.h>
-
-/* Lowest bit numbers and widths */
-#define CDX_DWORD_LBN 0
-#define CDX_DWORD_WIDTH 32
-
-/* Specified attribute (e.g. LBN) of the specified field */
-#define CDX_VAL(field, attribute) field ## _ ## attribute
-/* Low bit number of the specified field */
-#define CDX_LOW_BIT(field) CDX_VAL(field, LBN)
-/* Bit width of the specified field */
-#define CDX_WIDTH(field) CDX_VAL(field, WIDTH)
-/* High bit number of the specified field */
-#define CDX_HIGH_BIT(field) (CDX_LOW_BIT(field) + CDX_WIDTH(field) - 1)
-
-/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
-struct cdx_dword {
- __le32 cdx_u32;
-};
-
-/* Value expanders for printk */
-#define CDX_DWORD_VAL(dword) \
- ((unsigned int)le32_to_cpu((dword).cdx_u32))
-
-/*
- * Extract bit field portion [low,high) from the 32-bit little-endian
- * element which contains bits [min,max)
- */
-#define CDX_DWORD_FIELD(dword, field) \
- (FIELD_GET(GENMASK(CDX_HIGH_BIT(field), CDX_LOW_BIT(field)), \
- le32_to_cpu((dword).cdx_u32)))
-
-/*
- * Creates the portion of the named bit field that lies within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELD(field, value) \
- (FIELD_PREP(GENMASK(CDX_HIGH_BIT(field), \
- CDX_LOW_BIT(field)), value))
-
-/*
- * Creates the portion of the named bit fields that lie within the
- * range [min,max).
- */
-#define CDX_INSERT_FIELDS(field1, value1, \
- field2, value2, \
- field3, value3, \
- field4, value4, \
- field5, value5, \
- field6, value6, \
- field7, value7) \
- (CDX_INSERT_FIELD(field1, (value1)) | \
- CDX_INSERT_FIELD(field2, (value2)) | \
- CDX_INSERT_FIELD(field3, (value3)) | \
- CDX_INSERT_FIELD(field4, (value4)) | \
- CDX_INSERT_FIELD(field5, (value5)) | \
- CDX_INSERT_FIELD(field6, (value6)) | \
- CDX_INSERT_FIELD(field7, (value7)))
-
-#define CDX_POPULATE_DWORD(dword, ...) \
- (dword).cdx_u32 = cpu_to_le32(CDX_INSERT_FIELDS(__VA_ARGS__))
-
-/* Populate a dword field with various numbers of arguments */
-#define CDX_POPULATE_DWORD_7 CDX_POPULATE_DWORD
-#define CDX_POPULATE_DWORD_6(dword, ...) \
- CDX_POPULATE_DWORD_7(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_5(dword, ...) \
- CDX_POPULATE_DWORD_6(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_4(dword, ...) \
- CDX_POPULATE_DWORD_5(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_3(dword, ...) \
- CDX_POPULATE_DWORD_4(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_2(dword, ...) \
- CDX_POPULATE_DWORD_3(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_POPULATE_DWORD_1(dword, ...) \
- CDX_POPULATE_DWORD_2(dword, CDX_DWORD, 0, __VA_ARGS__)
-#define CDX_SET_DWORD(dword) \
- CDX_POPULATE_DWORD_1(dword, CDX_DWORD, 0xffffffff)
-
-#endif /* CDX_BITFIELD_H */
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index fca83141e3e6..280f207735da 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -14,7 +14,7 @@
#include "cdx_controller.h"
#include "../cdx.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
{
@@ -193,7 +193,8 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
cdx->ops = &cdx_ops;
/* Create MSI domain */
- cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (IS_ENABLED(CONFIG_GENERIC_MSI_IRQ))
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
if (!cdx->msi_domain) {
ret = dev_err_probe(&pdev->dev, -ENODEV, "cdx_msi_domain_init() failed");
goto cdx_msi_fail;
diff --git a/drivers/cdx/controller/cdx_rpmsg.c b/drivers/cdx/controller/cdx_rpmsg.c
index 61f1a290ff08..59aabd99fa8f 100644
--- a/drivers/cdx/controller/cdx_rpmsg.c
+++ b/drivers/cdx/controller/cdx_rpmsg.c
@@ -15,7 +15,7 @@
#include "../cdx.h"
#include "cdx_controller.h"
#include "mcdi_functions.h"
-#include "mcdi.h"
+#include "mcdid.h"
static struct rpmsg_device_id cdx_rpmsg_id_table[] = {
{ .name = "mcdi_ipc" },
diff --git a/drivers/cdx/controller/mcdi.c b/drivers/cdx/controller/mcdi.c
index e760f8d347cc..2e82ffc18d89 100644
--- a/drivers/cdx/controller/mcdi.c
+++ b/drivers/cdx/controller/mcdi.c
@@ -23,9 +23,10 @@
#include <linux/log2.h>
#include <linux/net_tstamp.h>
#include <linux/wait.h>
+#include <linux/cdx/bitfield.h>
-#include "bitfield.h"
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
static void cdx_mcdi_cancel_cmd(struct cdx_mcdi *cdx, struct cdx_mcdi_cmd *cmd);
static void cdx_mcdi_wait_for_cleanup(struct cdx_mcdi *cdx);
@@ -99,6 +100,19 @@ static unsigned long cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd
return cdx->mcdi_ops->mcdi_rpc_timeout(cdx, cmd);
}
+/**
+ * cdx_mcdi_init - Initialize MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function allocates and initializes internal MCDI structures and resources
+ * for the CDX device, including the workqueue, locking primitives, and command
+ * tracking mechanisms. It sets the initial operating mode and prepares the device
+ * for MCDI operations.
+ *
+ * Return:
+ * * 0 - on success
+ * * -ENOMEM - if memory allocation or workqueue creation fails
+ */
int cdx_mcdi_init(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -128,7 +142,16 @@ fail2:
fail:
return rc;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_init);
+/**
+ * cdx_mcdi_finish - Cleanup MCDI (Management Controller Driver Interface) state
+ * @cdx: Handle to the CDX MCDI structure
+ *
+ * This function is responsible for cleaning up the MCDI (Management Controller Driver Interface)
+ * resources associated with a cdx_mcdi structure. Also destroys the mcdi workqueue.
+ *
+ */
void cdx_mcdi_finish(struct cdx_mcdi *cdx)
{
struct cdx_mcdi_iface *mcdi;
@@ -143,6 +166,7 @@ void cdx_mcdi_finish(struct cdx_mcdi *cdx)
kfree(cdx->mcdi);
cdx->mcdi = NULL;
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_finish);
static bool cdx_mcdi_flushed(struct cdx_mcdi_iface *mcdi, bool ignore_cleanups)
{
@@ -553,6 +577,19 @@ static void cdx_mcdi_start_or_queue(struct cdx_mcdi_iface *mcdi,
cdx_mcdi_cmd_start_or_queue(mcdi, cmd);
}
+/**
+ * cdx_mcdi_process_cmd - Process an incoming MCDI response
+ * @cdx: Handle to the CDX MCDI structure
+ * @outbuf: Pointer to the response buffer received from the management controller
+ * @len: Length of the response buffer in bytes
+ *
+ * This function handles a response from the management controller. It locates the
+ * corresponding command using the sequence number embedded in the header,
+ * completes the command if it is still pending, and initiates any necessary cleanup.
+ *
+ * The function assumes that the response buffer is well-formed and at least one
+ * dword in size.
+ */
void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len)
{
struct cdx_mcdi_iface *mcdi;
@@ -590,6 +627,7 @@ void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int le
cdx_mcdi_process_cleanup_list(mcdi->cdx, &cleanup_list);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_process_cmd);
static void cdx_mcdi_cmd_work(struct work_struct *context)
{
@@ -757,6 +795,7 @@ int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
return cdx_mcdi_rpc_sync(cdx, cmd, inbuf, inlen, outbuf, outlen,
outlen_actual, false);
}
+EXPORT_SYMBOL_GPL(cdx_mcdi_rpc);
/**
* cdx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
diff --git a/drivers/cdx/controller/mcdi.h b/drivers/cdx/controller/mcdi.h
deleted file mode 100644
index 54a65e9760ae..000000000000
--- a/drivers/cdx/controller/mcdi.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright 2008-2013 Solarflare Communications Inc.
- * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
- */
-
-#ifndef CDX_MCDI_H
-#define CDX_MCDI_H
-
-#include <linux/mutex.h>
-#include <linux/kref.h>
-#include <linux/rpmsg.h>
-
-#include "bitfield.h"
-#include "mc_cdx_pcol.h"
-
-#ifdef DEBUG
-#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
-#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
-#else
-#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
-#define CDX_WARN_ON_PARANOID(x) do {} while (0)
-#endif
-
-/**
- * enum cdx_mcdi_mode - MCDI transaction mode
- * @MCDI_MODE_EVENTS: wait for an mcdi response callback.
- * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
- */
-enum cdx_mcdi_mode {
- MCDI_MODE_EVENTS,
- MCDI_MODE_FAIL,
-};
-
-#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_RPC_LONG_TIMEOU (60 * HZ)
-#define MCDI_RPC_POST_RST_TIME (10 * HZ)
-
-#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
-
-/**
- * enum cdx_mcdi_cmd_state - State for an individual MCDI command
- * @MCDI_STATE_QUEUED: Command not started and is waiting to run.
- * @MCDI_STATE_RETRY: Command was submitted and MC rejected with no resources,
- * as MC have too many outstanding commands. Command will be retried once
- * another command returns.
- * @MCDI_STATE_RUNNING: Command was accepted and is running.
- * @MCDI_STATE_RUNNING_CANCELLED: Command is running but the issuer cancelled
- * the command.
- * @MCDI_STATE_FINISHED: Processing of this command has completed.
- */
-
-enum cdx_mcdi_cmd_state {
- MCDI_STATE_QUEUED,
- MCDI_STATE_RETRY,
- MCDI_STATE_RUNNING,
- MCDI_STATE_RUNNING_CANCELLED,
- MCDI_STATE_FINISHED,
-};
-
-/**
- * struct cdx_mcdi - CDX MCDI Firmware interface, to interact
- * with CDX controller.
- * @mcdi: MCDI interface
- * @mcdi_ops: MCDI operations
- * @r5_rproc : R5 Remoteproc device handle
- * @rpdev: RPMsg device
- * @ept: RPMsg endpoint
- * @work: Post probe work
- */
-struct cdx_mcdi {
- /* MCDI interface */
- struct cdx_mcdi_data *mcdi;
- const struct cdx_mcdi_ops *mcdi_ops;
-
- struct rproc *r5_rproc;
- struct rpmsg_device *rpdev;
- struct rpmsg_endpoint *ept;
- struct work_struct work;
-};
-
-struct cdx_mcdi_ops {
- void (*mcdi_request)(struct cdx_mcdi *cdx,
- const struct cdx_dword *hdr, size_t hdr_len,
- const struct cdx_dword *sdu, size_t sdu_len);
- unsigned int (*mcdi_rpc_timeout)(struct cdx_mcdi *cdx, unsigned int cmd);
-};
-
-typedef void cdx_mcdi_async_completer(struct cdx_mcdi *cdx,
- unsigned long cookie, int rc,
- struct cdx_dword *outbuf,
- size_t outlen_actual);
-
-/**
- * struct cdx_mcdi_cmd - An outstanding MCDI command
- * @ref: Reference count. There will be one reference if the command is
- * in the mcdi_iface cmd_list, another if it's on a cleanup list,
- * and a third if it's queued in the work queue.
- * @list: The data for this entry in mcdi->cmd_list
- * @cleanup_list: The data for this entry in a cleanup list
- * @work: The work item for this command, queued in mcdi->workqueue
- * @mcdi: The mcdi_iface for this command
- * @state: The state of this command
- * @inlen: inbuf length
- * @inbuf: Input buffer
- * @quiet: Whether to silence errors
- * @reboot_seen: Whether a reboot has been seen during this command,
- * to prevent duplicates
- * @seq: Sequence number
- * @started: Jiffies this command was started at
- * @cookie: Context for completion function
- * @completer: Completion function
- * @handle: Command handle
- * @cmd: Command number
- * @rc: Return code
- * @outlen: Length of output buffer
- * @outbuf: Output buffer
- */
-struct cdx_mcdi_cmd {
- struct kref ref;
- struct list_head list;
- struct list_head cleanup_list;
- struct work_struct work;
- struct cdx_mcdi_iface *mcdi;
- enum cdx_mcdi_cmd_state state;
- size_t inlen;
- const struct cdx_dword *inbuf;
- bool quiet;
- bool reboot_seen;
- u8 seq;
- unsigned long started;
- unsigned long cookie;
- cdx_mcdi_async_completer *completer;
- unsigned int handle;
- unsigned int cmd;
- int rc;
- size_t outlen;
- struct cdx_dword *outbuf;
- /* followed by inbuf data if necessary */
-};
-
-/**
- * struct cdx_mcdi_iface - MCDI protocol context
- * @cdx: The associated NIC
- * @iface_lock: Serialise access to this structure
- * @outstanding_cleanups: Count of cleanups
- * @cmd_list: List of outstanding and running commands
- * @workqueue: Workqueue used for delayed processing
- * @cmd_complete_wq: Waitqueue for command completion
- * @db_held_by: Command the MC doorbell is in use by
- * @seq_held_by: Command each sequence number is in use by
- * @prev_handle: The last used command handle
- * @mode: Poll for mcdi completion, or wait for an mcdi_event
- * @prev_seq: The last used sequence number
- * @new_epoch: Indicates start of day or start of MC reboot recovery
- */
-struct cdx_mcdi_iface {
- struct cdx_mcdi *cdx;
- /* Serialise access */
- struct mutex iface_lock;
- unsigned int outstanding_cleanups;
- struct list_head cmd_list;
- struct workqueue_struct *workqueue;
- wait_queue_head_t cmd_complete_wq;
- struct cdx_mcdi_cmd *db_held_by;
- struct cdx_mcdi_cmd *seq_held_by[16];
- unsigned int prev_handle;
- enum cdx_mcdi_mode mode;
- u8 prev_seq;
- bool new_epoch;
-};
-
-/**
- * struct cdx_mcdi_data - extra state for NICs that implement MCDI
- * @iface: Interface/protocol state
- * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
- */
-struct cdx_mcdi_data {
- struct cdx_mcdi_iface iface;
- u32 fn_flags;
-};
-
-static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
-{
- return cdx->mcdi ? &cdx->mcdi->iface : NULL;
-}
-
-int cdx_mcdi_init(struct cdx_mcdi *cdx);
-void cdx_mcdi_finish(struct cdx_mcdi *cdx);
-
-void cdx_mcdi_process_cmd(struct cdx_mcdi *cdx, struct cdx_dword *outbuf, int len);
-int cdx_mcdi_rpc(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- struct cdx_dword *outbuf, size_t outlen, size_t *outlen_actual);
-int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
- const struct cdx_dword *inbuf, size_t inlen,
- cdx_mcdi_async_completer *complete,
- unsigned long cookie);
-int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
- unsigned int timeout_jiffies);
-
-/*
- * We expect that 16- and 32-bit fields in MCDI requests and responses
- * are appropriately aligned, but 64-bit fields are only
- * 32-bit-aligned.
- */
-#define MCDI_DECLARE_BUF(_name, _len) struct cdx_dword _name[DIV_ROUND_UP(_len, 4)] = {{0}}
-#define _MCDI_PTR(_buf, _offset) \
- ((u8 *)(_buf) + (_offset))
-#define MCDI_PTR(_buf, _field) \
- _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
-#define _MCDI_CHECK_ALIGN(_ofst, _align) \
- ((void)BUILD_BUG_ON_ZERO((_ofst) & ((_align) - 1)), \
- (_ofst))
-#define _MCDI_DWORD(_buf, _field) \
- ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
-
-#define MCDI_BYTE(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
- *MCDI_PTR(_buf, _field))
-#define MCDI_WORD(_buf, _field) \
- ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
- le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
-#define MCDI_SET_DWORD(_buf, _field, _value) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), CDX_DWORD, _value)
-#define MCDI_DWORD(_buf, _field) \
- CDX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), CDX_DWORD)
-#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
- CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
- MC_CMD_ ## _name1, _value1)
-#define MCDI_SET_QWORD(_buf, _field, _value) \
- do { \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
- CDX_DWORD, (u32)(_value)); \
- CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
- CDX_DWORD, (u64)(_value) >> 32); \
- } while (0)
-#define MCDI_QWORD(_buf, _field) \
- (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
- (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
-
-#endif /* CDX_MCDI_H */
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index 885c69e6ebe5..8ae2d99be81e 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -5,7 +5,6 @@
#include <linux/module.h>
-#include "mcdi.h"
#include "mcdi_functions.h"
int cdx_mcdi_get_num_buses(struct cdx_mcdi *cdx)
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index b9942affdc6b..57fd1bae706b 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -8,7 +8,8 @@
#ifndef CDX_MCDI_FUNCTIONS_H
#define CDX_MCDI_FUNCTIONS_H
-#include "mcdi.h"
+#include <linux/cdx/mcdi.h>
+#include "mcdid.h"
#include "../cdx.h"
/**
diff --git a/drivers/cdx/controller/mcdid.h b/drivers/cdx/controller/mcdid.h
new file mode 100644
index 000000000000..7fc29f099265
--- /dev/null
+++ b/drivers/cdx/controller/mcdid.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright (C) 2022-2025, Advanced Micro Devices, Inc.
+ */
+
+#ifndef CDX_MCDID_H
+#define CDX_MCDID_H
+
+#include <linux/mutex.h>
+#include <linux/kref.h>
+#include <linux/rpmsg.h>
+
+#include "mc_cdx_pcol.h"
+
+#ifdef DEBUG
+#define CDX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define CDX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define CDX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define CDX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+#define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
+
+static inline struct cdx_mcdi_iface *cdx_mcdi_if(struct cdx_mcdi *cdx)
+{
+ return cdx->mcdi ? &cdx->mcdi->iface : NULL;
+}
+
+int cdx_mcdi_rpc_async(struct cdx_mcdi *cdx, unsigned int cmd,
+ const struct cdx_dword *inbuf, size_t inlen,
+ cdx_mcdi_async_completer *complete,
+ unsigned long cookie);
+int cdx_mcdi_wait_for_quiescence(struct cdx_mcdi *cdx,
+ unsigned int timeout_jiffies);
+
+/*
+ * We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.
+ */
+#define MCDI_BYTE(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
+ *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ CDX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ CDX_DWORD, (u32)(_value)); \
+ CDX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ CDX_DWORD, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], CDX_DWORD) | \
+ (u64)CDX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], CDX_DWORD) << 32)
+
+#endif /* CDX_MCDID_H */
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index e9b360cdc99a..1291369b9126 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -6,6 +6,7 @@
obj-y += mem.o random.o
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
index f9bec10a6064..0849d933a2d5 100644
--- a/drivers/char/adi.c
+++ b/drivers/char/adi.c
@@ -80,8 +80,8 @@ static ssize_t adi_read(struct file *file, char __user *buf,
bytes_read += ver_buf_sz;
ver_buf_idx = 0;
- ver_buf_sz = min(count - bytes_read,
- (size_t)MAX_BUF_SZ);
+ ver_buf_sz = min_t(size_t, count - bytes_read,
+ MAX_BUF_SZ);
}
}
@@ -131,7 +131,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
ssize_t ret;
int i;
- if (count <= 0)
+ if (count == 0)
return -EINVAL;
ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
@@ -157,7 +157,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
}
bytes_written += ver_buf_sz;
- ver_buf_sz = min(count - bytes_written, (size_t)MAX_BUF_SZ);
+ ver_buf_sz = min_t(size_t, count - bytes_written, MAX_BUF_SZ);
} while (bytes_written < count);
(*offp) += bytes_written;
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 53ce352f7197..4aa5d1c76f83 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -143,17 +143,9 @@ static DEFINE_MUTEX(state_lock);
/*
- * Compatibility cruft until the IPAQ people move over to the new
- * interface.
- */
-static void __apm_get_power_status(struct apm_power_info *info)
-{
-}
-
-/*
* This allows machines to provide their own "apm get power status" function.
*/
-void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
+void (*apm_get_power_status)(struct apm_power_info *);
EXPORT_SYMBOL(apm_get_power_status);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 9fed9706d9cd..c138c468f3a4 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -835,7 +835,10 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -ENOTTY;
break;
}
- Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
+ if (cmd != 6)
+ Dummy = readb(apbs[IndexCard].RamIO + VERS);
+
kfree(adgl);
mutex_unlock(&ac_mutex);
return ret;
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index 497fc167cb8c..231cbf7b300f 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -69,7 +69,8 @@ MODULE_VERSION(VERSION_STR);
static int __init hangcheck_parse_tick(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_tick = par;
return 1;
}
@@ -77,7 +78,8 @@ static int __init hangcheck_parse_tick(char *str)
static int __init hangcheck_parse_margin(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_margin = par;
return 1;
}
@@ -85,7 +87,8 @@ static int __init hangcheck_parse_margin(char *str)
static int __init hangcheck_parse_reboot(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_reboot = par;
return 1;
}
@@ -93,7 +96,8 @@ static int __init hangcheck_parse_reboot(char *str)
static int __init hangcheck_parse_dump_tasks(char *str)
{
int par;
- if (get_option(&str,&par))
+
+ if (get_option(&str, &par))
hangcheck_dump_tasks = par;
return 1;
}
@@ -126,23 +130,23 @@ static void hangcheck_fire(struct timer_list *unused)
if (tsc_diff > hangcheck_tsc_margin) {
if (hangcheck_dump_tasks) {
- printk(KERN_CRIT "Hangcheck: Task state:\n");
+ pr_crit("Hangcheck: Task state:\n");
#ifdef CONFIG_MAGIC_SYSRQ
handle_sysrq('t');
#endif /* CONFIG_MAGIC_SYSRQ */
}
if (hangcheck_reboot) {
- printk(KERN_CRIT "Hangcheck: hangcheck is restarting the machine.\n");
+ pr_crit("Hangcheck: hangcheck is restarting the machine.\n");
emergency_restart();
} else {
- printk(KERN_CRIT "Hangcheck: hangcheck value past margin!\n");
+ pr_crit("Hangcheck: hangcheck value past margin!\n");
}
}
#if 0
/*
* Enable to investigate delays in detail
*/
- printk("Hangcheck: called %Ld ns since last time (%Ld ns overshoot)\n",
+ pr_debug("Hangcheck: called %lld ns since last time (%lld ns overshoot)\n",
tsc_diff, tsc_diff - hangcheck_tick*TIMER_FREQ);
#endif
mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
@@ -152,7 +156,7 @@ static void hangcheck_fire(struct timer_list *unused)
static int __init hangcheck_init(void)
{
- printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
+ pr_debug("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
VERSION_STR, hangcheck_tick, hangcheck_margin);
hangcheck_tsc_margin =
(unsigned long long)hangcheck_margin + hangcheck_tick;
@@ -168,7 +172,7 @@ static int __init hangcheck_init(void)
static void __exit hangcheck_exit(void)
{
timer_delete_sync(&hangcheck_ticktock);
- printk("Hangcheck: Stopped hangcheck timer.\n");
+ pr_debug("Hangcheck: Stopped hangcheck timer.\n");
}
module_init(hangcheck_init);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 0713ea2b2a51..4f5ccd3a1f56 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -867,7 +867,7 @@ int hpet_alloc(struct hpet_data *hdp)
printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
- hpetp->hp_ntimer > 1 ? "s" : "");
+ str_plural(hpetp->hp_ntimer));
for (i = 0; i < hpetp->hp_ntimer; i++)
printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index c85827843447..492a2a61a65b 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -77,7 +77,7 @@ config HW_RANDOM_AIROHA
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on (ARCH_MICROCHIP || COMPILE_TEST)
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -312,6 +312,7 @@ config HW_RANDOM_INGENIC_TRNG
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK || COMPILE_TEST
+ depends on ARM_AMBA
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
index aa2b135e3ee2..6d6ac409efcf 100644
--- a/drivers/char/hw_random/bcm2835-rng.c
+++ b/drivers/char/hw_random/bcm2835-rng.c
@@ -138,12 +138,11 @@ static const struct of_device_id bcm2835_rng_of_match[] = {
{ .compatible = "brcm,bcm6368-rng"},
{},
};
+MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
static int bcm2835_rng_probe(struct platform_device *pdev)
{
- const struct bcm2835_rng_of_data *of_data;
struct device *dev = &pdev->dev;
- const struct of_device_id *rng_id;
struct bcm2835_rng_priv *priv;
int err;
@@ -171,12 +170,10 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
priv->rng.cleanup = bcm2835_rng_cleanup;
if (dev_of_node(dev)) {
- rng_id = of_match_node(bcm2835_rng_of_match, dev->of_node);
- if (!rng_id)
- return -EINVAL;
+ const struct bcm2835_rng_of_data *of_data;
/* Check for rng init function, execute it */
- of_data = rng_id->data;
+ of_data = of_device_get_match_data(dev);
if (of_data)
priv->mask_interrupts = of_data->mask_interrupts;
}
@@ -191,8 +188,6 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
return err;
}
-MODULE_DEVICE_TABLE(of, bcm2835_rng_of_match);
-
static const struct platform_device_id bcm2835_rng_devtype[] = {
{ .name = "bcm2835-rng" },
{ .name = "bcm63xx-rng" },
diff --git a/drivers/char/hw_random/cn10k-rng.c b/drivers/char/hw_random/cn10k-rng.c
index 31935316a160..3b4e78182e14 100644
--- a/drivers/char/hw_random/cn10k-rng.c
+++ b/drivers/char/hw_random/cn10k-rng.c
@@ -188,7 +188,7 @@ static int cn10k_rng_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rng->reg_base = pcim_iomap(pdev, 0, 0);
if (!rng->reg_base)
- return dev_err_probe(&pdev->dev, -ENOMEM, "Error while mapping CSRs, exiting\n");
+ return -ENOMEM;
rng->ops.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
"cn10k-rng-%s", dev_name(&pdev->dev));
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 018316f54621..96d7fe41b373 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -341,6 +341,9 @@ static ssize_t rng_current_store(struct device *dev,
if (sysfs_streq(buf, "")) {
err = enable_best_rng();
+ } else if (sysfs_streq(buf, "none")) {
+ cur_rng_set_by_user = 1;
+ drop_current_rng();
} else {
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
@@ -392,7 +395,7 @@ static ssize_t rng_available_show(struct device *dev,
strlcat(buf, rng->name, PAGE_SIZE);
strlcat(buf, " ", PAGE_SIZE);
}
- strlcat(buf, "\n", PAGE_SIZE);
+ strlcat(buf, "none\n", PAGE_SIZE);
mutex_unlock(&rng_mutex);
return strlen(buf);
@@ -542,10 +545,10 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->dying);
/* Adjust quality field to always have a proper value */
- rng->quality = min_t(u16, min_t(u16, default_quality, 1024), rng->quality ?: 1024);
+ rng->quality = min3(default_quality, 1024, rng->quality ?: 1024);
- if (!current_rng ||
- (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
+ if (!cur_rng_set_by_user &&
+ (!current_rng || rng->quality > current_rng->quality)) {
/*
* Set new rng as current as the new rng source
* provides better entropy quality and was not
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index d8fd8a354482..9e408144a10c 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -231,6 +231,10 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
if (IS_ERR(ks_sa_rng->regmap_cfg))
return dev_err_probe(dev, -EINVAL, "syscon_node_to_regmap failed\n");
+ ks_sa_rng->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(ks_sa_rng->clk))
+ return dev_err_probe(dev, PTR_ERR(ks_sa_rng->clk), "Failed to get clock\n");
+
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
diff --git a/drivers/char/hw_random/n2rng.h b/drivers/char/hw_random/n2rng.h
index 9a870f5dc371..7612f15a261f 100644
--- a/drivers/char/hw_random/n2rng.h
+++ b/drivers/char/hw_random/n2rng.h
@@ -48,7 +48,7 @@
#define HV_RNG_NUM_CONTROL 4
-#ifndef __ASSEMBLY__
+#ifndef __ASSEMBLER__
extern unsigned long sun4v_rng_get_diag_ctl(void);
extern unsigned long sun4v_rng_ctl_read_v1(unsigned long ctl_regs_ra,
unsigned long *state,
@@ -147,6 +147,6 @@ struct n2rng {
#define N2RNG_BUSY_LIMIT 100
#define N2RNG_HCHECK_LIMIT 100
-#endif /* !(__ASSEMBLY__) */
+#endif /* !(__ASSEMBLER__) */
#endif /* _N2RNG_H */
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index d27e32e9bfee..3024d5e9fd61 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -9,8 +9,7 @@
* Author(s): Harald Freudenberger <freude@de.ibm.com>
*/
-#define KMSG_COMPONENT "trng"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "trng: " fmt
#include <linux/hw_random.h>
#include <linux/kernel.h>
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index b95f6d0f17ed..e61f06393209 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -150,7 +150,7 @@ static int timeriomem_rng_probe(struct platform_device *pdev)
priv->rng_ops.quality = pdata->quality;
}
- priv->period = ns_to_ktime(period * NSEC_PER_USEC);
+ priv->period = us_to_ktime(period);
init_completion(&priv->completion);
hrtimer_setup(&priv->timer, timeriomem_rng_trigger, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index f4adc6feb3b2..92bed266d07c 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -84,6 +84,13 @@ config IPMI_IPMB
bus, and it also supports direct messaging on the bus using
IPMB direct messages. This module requires I2C support.
+config IPMI_LS2K
+ bool 'Loongson-2K IPMI interface'
+ depends on LOONGARCH
+ select MFD_LS2K_BMC_CORE
+ help
+ Provides a driver for Loongson-2K IPMI interfaces.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index e0944547c9d0..4ea450a82242 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -8,6 +8,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o \
ipmi_si_mem_io.o
ipmi_si-$(CONFIG_HAS_IOPORT) += ipmi_si_port_io.o
ipmi_si-$(CONFIG_PCI) += ipmi_si_pci.o
+ipmi_si-$(CONFIG_IPMI_LS2K) += ipmi_si_ls2k.o
ipmi_si-$(CONFIG_PARISC) += ipmi_si_parisc.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 6a4f279c7c1f..3a51e58b2487 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -404,8 +404,7 @@ static void ipmi_ipmb_shutdown(void *send_info)
ipmi_ipmb_stop_thread(iidev);
}
-static void ipmi_ipmb_sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int ipmi_ipmb_sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_ipmb_dev *iidev = send_info;
unsigned long flags;
@@ -417,6 +416,7 @@ static void ipmi_ipmb_sender(void *send_info,
spin_unlock_irqrestore(&iidev->lock, flags);
up(&iidev->wake_thread);
+ return IPMI_CC_NO_ERROR;
}
static void ipmi_ipmb_request_events(void *send_info)
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index ecfcb50302f6..efda90dcf5b3 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -122,10 +122,10 @@ struct si_sm_data {
unsigned long error0_timeout;
};
-static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
- struct si_sm_io *io, enum kcs_states state)
+static unsigned int init_kcs_data(struct si_sm_data *kcs,
+ struct si_sm_io *io)
{
- kcs->state = state;
+ kcs->state = KCS_IDLE;
kcs->io = io;
kcs->write_pos = 0;
kcs->write_count = 0;
@@ -140,12 +140,6 @@ static unsigned int init_kcs_data_with_state(struct si_sm_data *kcs,
return 2;
}
-static unsigned int init_kcs_data(struct si_sm_data *kcs,
- struct si_sm_io *io)
-{
- return init_kcs_data_with_state(kcs, io, KCS_IDLE);
-}
-
static inline unsigned char read_status(struct si_sm_data *kcs)
{
return kcs->io->inputb(kcs->io, 1);
@@ -276,7 +270,7 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
if (size > MAX_KCS_WRITE_SIZE)
return IPMI_REQ_LEN_EXCEEDED_ERR;
- if (kcs->state != KCS_IDLE) {
+ if ((kcs->state != KCS_IDLE) && (kcs->state != KCS_HOSED)) {
dev_warn(kcs->io->dev, "KCS in invalid state %d\n", kcs->state);
return IPMI_NOT_IN_MY_STATE_ERR;
}
@@ -501,7 +495,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
}
if (kcs->state == KCS_HOSED) {
- init_kcs_data_with_state(kcs, kcs->io, KCS_ERROR0);
+ init_kcs_data(kcs, kcs->io);
return SI_SM_HOSED;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 8e9050f99e9e..3f48fc6ab596 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -38,7 +38,9 @@
#define IPMI_DRIVER_VERSION "39.2"
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user);
static int ipmi_init_msghandler(void);
static void smi_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf);
@@ -50,6 +52,8 @@ static void intf_free(struct kref *ref);
static bool initialized;
static bool drvregistered;
+static struct timer_list ipmi_timer;
+
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
enum ipmi_panic_event_op {
IPMI_SEND_PANIC_EVENT_NONE,
@@ -432,6 +436,7 @@ struct ipmi_smi {
atomic_t nr_users;
struct device_attribute nr_users_devattr;
struct device_attribute nr_msgs_devattr;
+ struct device_attribute maintenance_mode_devattr;
/* Used for wake ups at startup. */
@@ -464,7 +469,7 @@ struct ipmi_smi {
* interface to match them up with their responses. A routine
* is called periodically to time the items in this list.
*/
- spinlock_t seq_lock;
+ struct mutex seq_lock;
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
int curr_seq;
@@ -539,7 +544,11 @@ struct ipmi_smi {
/* For handling of maintenance mode. */
int maintenance_mode;
- bool maintenance_mode_enable;
+
+#define IPMI_MAINTENANCE_MODE_STATE_OFF 0
+#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1
+#define IPMI_MAINTENANCE_MODE_STATE_RESET 2
+ int maintenance_mode_state;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
@@ -590,7 +599,8 @@ static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
static int __ipmi_bmc_register(struct ipmi_smi *intf,
struct ipmi_device_id *id,
bool guid_set, guid_t *guid, int intf_num);
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id, bool rescan);
static void free_ipmi_user(struct kref *ref)
{
@@ -955,7 +965,6 @@ static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
* risk. At this moment, simply skip it in that case.
*/
ipmi_free_recv_msg(msg);
- atomic_dec(&msg->user->nr_msgs);
} else {
/*
* Deliver it in smi_work. The message will hold a
@@ -1116,12 +1125,11 @@ static int intf_find_seq(struct ipmi_smi *intf,
struct ipmi_recv_msg **recv_msg)
{
int rv = -ENODEV;
- unsigned long flags;
if (seq >= IPMI_IPMB_NUM_SEQ)
return -EINVAL;
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->seq_table[seq].inuse) {
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
@@ -1134,7 +1142,7 @@ static int intf_find_seq(struct ipmi_smi *intf,
rv = 0;
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1145,14 +1153,13 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
long msgid)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1163,7 +1170,7 @@ static int intf_start_seq_timer(struct ipmi_smi *intf,
ent->timeout = ent->orig_timeout;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
return rv;
}
@@ -1174,7 +1181,6 @@ static int intf_err_seq(struct ipmi_smi *intf,
unsigned int err)
{
int rv = -ENODEV;
- unsigned long flags;
unsigned char seq;
unsigned long seqid;
struct ipmi_recv_msg *msg = NULL;
@@ -1182,7 +1188,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* We do this verification because the user can be deleted
* while a message is outstanding.
@@ -1196,7 +1202,7 @@ static int intf_err_seq(struct ipmi_smi *intf,
msg = ent->recv_msg;
rv = 0;
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
if (msg)
deliver_err_response(intf, msg, err);
@@ -1209,7 +1215,6 @@ int ipmi_create_user(unsigned int if_num,
void *handler_data,
struct ipmi_user **user)
{
- unsigned long flags;
struct ipmi_user *new_user = NULL;
int rv = 0;
struct ipmi_smi *intf;
@@ -1277,9 +1282,9 @@ int ipmi_create_user(unsigned int if_num,
new_user->gets_events = false;
mutex_lock(&intf->users_mutex);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
list_add(&new_user->link, &intf->users);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
mutex_unlock(&intf->users_mutex);
if (handler->ipmi_watchdog_pretimeout)
@@ -1325,7 +1330,6 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
{
struct ipmi_smi *intf = user->intf;
int i;
- unsigned long flags;
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
struct ipmi_recv_msg *msg, *msg2;
@@ -1346,7 +1350,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
list_del(&user->link);
atomic_dec(&intf->nr_users);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
if (intf->seq_table[i].inuse
&& (intf->seq_table[i].recv_msg->user == user)) {
@@ -1355,7 +1359,7 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
}
}
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Remove the user from the command receiver's table. First
@@ -1534,8 +1538,15 @@ EXPORT_SYMBOL(ipmi_get_maintenance_mode);
static void maintenance_mode_update(struct ipmi_smi *intf)
{
if (intf->handlers->set_maintenance_mode)
+ /*
+ * Lower level drivers only care about firmware mode
+ * as it affects their timing. They don't care about
+ * reset, which disables all commands for a while.
+ */
intf->handlers->set_maintenance_mode(
- intf->send_info, intf->maintenance_mode_enable);
+ intf->send_info,
+ (intf->maintenance_mode_state ==
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE));
}
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
@@ -1552,16 +1563,17 @@ int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
- intf->maintenance_mode_enable
- = (intf->auto_maintenance_timeout > 0);
+ /* Just leave it alone. */
break;
case IPMI_MAINTENANCE_MODE_OFF:
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
break;
case IPMI_MAINTENANCE_MODE_ON:
- intf->maintenance_mode_enable = true;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
break;
default:
@@ -1616,8 +1628,7 @@ int ipmi_set_gets_events(struct ipmi_user *user, bool val)
}
list_for_each_entry_safe(msg, msg2, &msgs, link) {
- msg->user = user;
- kref_get(&user->refcount);
+ ipmi_set_recv_msg_user(msg, user);
deliver_local_response(intf, msg);
}
}
@@ -1922,14 +1933,20 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
if (is_maintenance_mode_cmd(msg)) {
unsigned long flags;
+ int newst;
+
+ if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)
+ newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
+ else
+ newst = IPMI_MAINTENANCE_MODE_STATE_RESET;
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
- intf->auto_maintenance_timeout
- = maintenance_mode_timeout_ms;
+ intf->auto_maintenance_timeout = maintenance_mode_timeout_ms;
if (!intf->maintenance_mode
- && !intf->maintenance_mode_enable) {
- intf->maintenance_mode_enable = true;
+ && intf->maintenance_mode_state < newst) {
+ intf->maintenance_mode_state = newst;
maintenance_mode_update(intf);
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags);
@@ -1943,7 +1960,7 @@ static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
smi_msg->data[1] = msg->cmd;
smi_msg->msgid = msgid;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
if (msg->data_len > 0)
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 2;
@@ -2024,12 +2041,9 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (is_maintenance_mode_cmd(msg))
intf->ipmb_maintenance_mode_timeout =
@@ -2087,7 +2101,7 @@ static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2140,7 +2154,7 @@ static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
smi_msg->data_size = msg->data_len + 4;
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
return 0;
}
@@ -2203,12 +2217,9 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* Save the receive message so we can use it
* to deliver the response.
*/
- smi_msg->user_data = recv_msg;
+ smi_msg->recv_msg = recv_msg;
} else {
- /* It's a command, so get a sequence for it. */
- unsigned long flags;
-
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
/*
* Create a sequence number with a 1 second
@@ -2257,7 +2268,7 @@ static int i_ipmi_req_lan(struct ipmi_smi *intf,
* to be correct.
*/
out_err:
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ mutex_unlock(&intf->seq_lock);
}
return rv;
@@ -2288,22 +2299,18 @@ static int i_ipmi_request(struct ipmi_user *user,
int run_to_completion = READ_ONCE(intf->run_to_completion);
int rv = 0;
- if (user) {
- if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
- /* Decrement will happen at the end of the routine. */
- rv = -EBUSY;
- goto out;
- }
- }
-
- if (supplied_recv)
+ if (supplied_recv) {
recv_msg = supplied_recv;
- else {
- recv_msg = ipmi_alloc_recv_msg();
- if (recv_msg == NULL) {
- rv = -ENOMEM;
- goto out;
+ recv_msg->user = user;
+ if (user) {
+ atomic_inc(&user->nr_msgs);
+ /* The put happens when the message is freed. */
+ kref_get(&user->refcount);
}
+ } else {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg))
+ return PTR_ERR(recv_msg);
}
recv_msg->user_msg_data = user_msg_data;
@@ -2314,22 +2321,22 @@ static int i_ipmi_request(struct ipmi_user *user,
if (smi_msg == NULL) {
if (!supplied_recv)
ipmi_free_recv_msg(recv_msg);
- rv = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
}
if (!run_to_completion)
mutex_lock(&intf->users_mutex);
+ if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) {
+ /* No messages while the BMC is in reset. */
+ rv = -EBUSY;
+ goto out_err;
+ }
if (intf->in_shutdown) {
rv = -ENODEV;
goto out_err;
}
- recv_msg->user = user;
- if (user)
- /* The put happens when the message is freed. */
- kref_get(&user->refcount);
recv_msg->msgid = msgid;
/*
* Store the message to send in the receive message so timeout
@@ -2358,8 +2365,10 @@ static int i_ipmi_request(struct ipmi_user *user,
if (rv) {
out_err:
- ipmi_free_smi_msg(smi_msg);
- ipmi_free_recv_msg(recv_msg);
+ if (!supplied_smi)
+ ipmi_free_smi_msg(smi_msg);
+ if (!supplied_recv)
+ ipmi_free_recv_msg(recv_msg);
} else {
dev_dbg(intf->si_dev, "Send: %*ph\n",
smi_msg->data_size, smi_msg->data);
@@ -2369,9 +2378,6 @@ out_err:
if (!run_to_completion)
mutex_unlock(&intf->users_mutex);
-out:
- if (rv && user)
- atomic_dec(&user->nr_msgs);
return rv;
}
@@ -2622,6 +2628,12 @@ retry_bmc_lock:
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
goto out_noprocessing;
+ /* Don't allow sysfs access when in maintenance mode. */
+ if (intf->maintenance_mode_state) {
+ rv = -EBUSY;
+ goto out_noprocessing;
+ }
+
prev_guid_set = bmc->dyn_guid_set;
__get_guid(intf);
@@ -2657,7 +2669,7 @@ retry_bmc_lock:
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
need_waiter(intf); /* Retry later on an error. */
else
- __scan_channels(intf, &id);
+ __scan_channels(intf, &id, false);
if (!intf_set) {
@@ -2677,7 +2689,7 @@ retry_bmc_lock:
goto out_noprocessing;
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
/* Version info changes, scan the channels again. */
- __scan_channels(intf, &bmc->fetch_id);
+ __scan_channels(intf, &bmc->fetch_id, true);
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
@@ -3406,8 +3418,6 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
intf->channels_ready = true;
wake_up(&intf->waitq);
} else {
- intf->channel_list = intf->wchannels + set;
- intf->channels_ready = true;
rv = send_channel_info_cmd(intf, intf->curr_channel);
}
@@ -3429,10 +3439,21 @@ channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
/*
* Must be holding intf->bmc_reg_mutex to call this.
*/
-static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
+static int __scan_channels(struct ipmi_smi *intf,
+ struct ipmi_device_id *id,
+ bool rescan)
{
int rv;
+ if (rescan) {
+ /* Clear channels_ready to force channels rescan. */
+ intf->channels_ready = false;
+ }
+
+ /* Skip channel scan if channels are already marked ready */
+ if (intf->channels_ready)
+ return 0;
+
if (ipmi_version_major(id) > 1
|| (ipmi_version_major(id) == 1
&& ipmi_version_minor(id) >= 5)) {
@@ -3517,6 +3538,19 @@ static ssize_t nr_msgs_show(struct device *dev,
}
static DEVICE_ATTR_RO(nr_msgs);
+static ssize_t maintenance_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ipmi_smi *intf = container_of(attr,
+ struct ipmi_smi,
+ maintenance_mode_devattr);
+
+ return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state,
+ intf->auto_maintenance_timeout);
+}
+static DEVICE_ATTR_RO(maintenance_mode);
+
static void redo_bmc_reg(struct work_struct *work)
{
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
@@ -3575,7 +3609,7 @@ int ipmi_add_smi(struct module *owner,
atomic_set(&intf->nr_users, 0);
intf->handlers = handlers;
intf->send_info = send_info;
- spin_lock_init(&intf->seq_lock);
+ mutex_init(&intf->seq_lock);
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
intf->seq_table[j].inuse = 0;
intf->seq_table[j].seqid = 0;
@@ -3634,7 +3668,7 @@ int ipmi_add_smi(struct module *owner,
}
mutex_lock(&intf->bmc_reg_mutex);
- rv = __scan_channels(intf, &id);
+ rv = __scan_channels(intf, &id, false);
mutex_unlock(&intf->bmc_reg_mutex);
if (rv)
goto out_err_bmc_reg;
@@ -3653,6 +3687,14 @@ int ipmi_add_smi(struct module *owner,
goto out_err_bmc_reg;
}
+ intf->maintenance_mode_devattr = dev_attr_maintenance_mode;
+ sysfs_attr_init(&intf->maintenance_mode_devattr.attr);
+ rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr);
+ if (rv) {
+ device_remove_file(intf->si_dev, &intf->nr_users_devattr);
+ goto out_err_bmc_reg;
+ }
+
intf->intf_num = i;
mutex_unlock(&ipmi_interfaces_mutex);
@@ -3760,6 +3802,7 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
if (intf->handlers->shutdown)
intf->handlers->shutdown(intf->send_info);
+ device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr);
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
@@ -3862,7 +3905,7 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_addr *ipmb_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 10) {
/* Message not big enough, just ignore it. */
@@ -3883,9 +3926,8 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -3915,47 +3957,41 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
- ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
- ipmb_addr->slave_addr = msg->rsp[6];
- ipmb_addr->lun = msg->rsp[7] & 3;
- ipmb_addr->channel = msg->rsp[3] & 0xf;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
+ ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
+ ipmb_addr->slave_addr = msg->rsp[6];
+ ipmb_addr->lun = msg->rsp[7] & 3;
+ ipmb_addr->channel = msg->rsp[3] & 0xf;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[7] >> 2;
- recv_msg->msg.netfn = msg->rsp[4] >> 2;
- recv_msg->msg.cmd = msg->rsp[8];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[7] >> 2;
+ recv_msg->msg.netfn = msg->rsp[4] >> 2;
+ recv_msg->msg.cmd = msg->rsp[8];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 10, not 9 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 10;
- memcpy(recv_msg->msg_data, &msg->rsp[9],
- msg->rsp_size - 10);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 10, not 9 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 10;
+ memcpy(recv_msg->msg_data, &msg->rsp[9],
+ msg->rsp_size - 10);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -3968,7 +4004,7 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
int rv = 0;
struct ipmi_user *user = NULL;
struct ipmi_ipmb_direct_addr *daddr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
unsigned char netfn = msg->rsp[0] >> 2;
unsigned char cmd = msg->rsp[3];
@@ -3977,9 +4013,8 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4001,44 +4036,38 @@ static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
- daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
- daddr->channel = 0;
- daddr->slave_addr = msg->rsp[1];
- daddr->rs_lun = msg->rsp[0] & 3;
- daddr->rq_lun = msg->rsp[2] & 3;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = (msg->rsp[2] >> 2);
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[3];
- recv_msg->msg.data = recv_msg->msg_data;
-
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, msg->rsp + 4,
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4050,7 +4079,7 @@ static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_ipmb_direct_addr *daddr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4152,7 +4181,7 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_lan_addr *lan_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
if (msg->rsp_size < 12) {
/* Message not big enough, just ignore it. */
@@ -4173,9 +4202,8 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4206,49 +4234,44 @@ static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
* causes it to not be freed or queued.
*/
rv = -1;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /* Extract the source address from the data. */
- lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
- lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
- lan_addr->session_handle = msg->rsp[4];
- lan_addr->remote_SWID = msg->rsp[8];
- lan_addr->local_SWID = msg->rsp[5];
- lan_addr->lun = msg->rsp[9] & 3;
- lan_addr->channel = msg->rsp[3] & 0xf;
- lan_addr->privilege = msg->rsp[3] >> 4;
+ } else if (!IS_ERR(recv_msg)) {
+ /* Extract the source address from the data. */
+ lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
+ lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
+ lan_addr->session_handle = msg->rsp[4];
+ lan_addr->remote_SWID = msg->rsp[8];
+ lan_addr->local_SWID = msg->rsp[5];
+ lan_addr->lun = msg->rsp[9] & 3;
+ lan_addr->channel = msg->rsp[3] & 0xf;
+ lan_addr->privilege = msg->rsp[3] >> 4;
- /*
- * Extract the rest of the message information
- * from the IPMB header.
- */
- recv_msg->user = user;
- recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
- recv_msg->msgid = msg->rsp[9] >> 2;
- recv_msg->msg.netfn = msg->rsp[6] >> 2;
- recv_msg->msg.cmd = msg->rsp[10];
- recv_msg->msg.data = recv_msg->msg_data;
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = msg->rsp[9] >> 2;
+ recv_msg->msg.netfn = msg->rsp[6] >> 2;
+ recv_msg->msg.cmd = msg->rsp[10];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * We chop off 12, not 11 bytes because the checksum
- * at the end also needs to be removed.
- */
- recv_msg->msg.data_len = msg->rsp_size - 12;
- memcpy(recv_msg->msg_data, &msg->rsp[11],
- msg->rsp_size - 12);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * We chop off 12, not 11 bytes because the checksum
+ * at the end also needs to be removed.
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 12;
+ memcpy(recv_msg->msg_data, &msg->rsp[11],
+ msg->rsp_size - 12);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4270,7 +4293,7 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
unsigned char chan;
struct ipmi_user *user = NULL;
struct ipmi_system_interface_addr *smi_addr;
- struct ipmi_recv_msg *recv_msg;
+ struct ipmi_recv_msg *recv_msg = NULL;
/*
* We expect the OEM SW to perform error checking
@@ -4299,9 +4322,8 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
if (rcvr) {
user = rcvr->user;
- kref_get(&user->refcount);
- } else
- user = NULL;
+ recv_msg = ipmi_alloc_recv_msg(user);
+ }
rcu_read_unlock();
if (user == NULL) {
@@ -4314,48 +4336,42 @@ static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
*/
rv = 0;
- } else {
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
- /*
- * We couldn't allocate memory for the
- * message, so requeue it for handling
- * later.
- */
- rv = 1;
- kref_put(&user->refcount, free_ipmi_user);
- } else {
- /*
- * OEM Messages are expected to be delivered via
- * the system interface to SMS software. We might
- * need to visit this again depending on OEM
- * requirements
- */
- smi_addr = ((struct ipmi_system_interface_addr *)
- &recv_msg->addr);
- smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
- smi_addr->channel = IPMI_BMC_CHANNEL;
- smi_addr->lun = msg->rsp[0] & 3;
-
- recv_msg->user = user;
- recv_msg->user_msg_data = NULL;
- recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
- recv_msg->msg.netfn = msg->rsp[0] >> 2;
- recv_msg->msg.cmd = msg->rsp[1];
- recv_msg->msg.data = recv_msg->msg_data;
+ } else if (!IS_ERR(recv_msg)) {
+ /*
+ * OEM Messages are expected to be delivered via
+ * the system interface to SMS software. We might
+ * need to visit this again depending on OEM
+ * requirements
+ */
+ smi_addr = ((struct ipmi_system_interface_addr *)
+ &recv_msg->addr);
+ smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+ smi_addr->channel = IPMI_BMC_CHANNEL;
+ smi_addr->lun = msg->rsp[0] & 3;
+
+ recv_msg->user_msg_data = NULL;
+ recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[1];
+ recv_msg->msg.data = recv_msg->msg_data;
- /*
- * The message starts at byte 4 which follows the
- * Channel Byte in the "GET MESSAGE" command
- */
- recv_msg->msg.data_len = msg->rsp_size - 4;
- memcpy(recv_msg->msg_data, &msg->rsp[4],
- msg->rsp_size - 4);
- if (deliver_response(intf, recv_msg))
- ipmi_inc_stat(intf, unhandled_commands);
- else
- ipmi_inc_stat(intf, handled_commands);
- }
+ /*
+ * The message starts at byte 4 which follows the
+ * Channel Byte in the "GET MESSAGE" command
+ */
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, &msg->rsp[4],
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ } else {
+ /*
+ * We couldn't allocate memory for the message, so
+ * requeue it for handling later.
+ */
+ rv = 1;
}
return rv;
@@ -4413,8 +4429,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
if (!user->gets_events)
continue;
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(user);
+ if (IS_ERR(recv_msg)) {
mutex_unlock(&intf->users_mutex);
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
link) {
@@ -4435,8 +4451,6 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
deliver_count++;
copy_event_into_recv_msg(recv_msg, msg);
- recv_msg->user = user;
- kref_get(&user->refcount);
list_add_tail(&recv_msg->link, &msgs);
}
mutex_unlock(&intf->users_mutex);
@@ -4452,8 +4466,8 @@ static int handle_read_event_rsp(struct ipmi_smi *intf,
* No one to receive the message, put it in queue if there's
* not already too many things in the queue.
*/
- recv_msg = ipmi_alloc_recv_msg();
- if (!recv_msg) {
+ recv_msg = ipmi_alloc_recv_msg(NULL);
+ if (IS_ERR(recv_msg)) {
/*
* We couldn't allocate memory for the
* message, so requeue it for handling
@@ -4488,7 +4502,7 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
struct ipmi_recv_msg *recv_msg;
struct ipmi_system_interface_addr *smi_addr;
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
if (recv_msg == NULL) {
dev_warn(intf->si_dev,
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
@@ -4529,9 +4543,10 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
if (msg->rsp_size < 2) {
/* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+ dev_warn_ratelimited(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1,
+ msg->data[1], msg->rsp_size);
return_unspecified:
/* Generate an error response for the message. */
@@ -4561,14 +4576,14 @@ return_unspecified:
} else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data == NULL)) {
+ && (msg->recv_msg == NULL)) {
if (intf->in_shutdown || intf->run_to_completion)
goto out;
/*
* This is the local response to a command send, start
- * the timer for these. The user_data will not be
+ * the timer for these. The recv_msg will not be
* NULL if this is a response send, and we will let
* response sends just go through.
*/
@@ -4628,7 +4643,7 @@ return_unspecified:
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ && (msg->recv_msg != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
@@ -4645,7 +4660,7 @@ return_unspecified:
cc = msg->rsp[2];
process_response_response:
- recv_msg = msg->user_data;
+ recv_msg = msg->recv_msg;
requeue = 0;
if (!recv_msg)
@@ -4801,6 +4816,7 @@ static void smi_work(struct work_struct *t)
int run_to_completion = READ_ONCE(intf->run_to_completion);
struct ipmi_smi_msg *newmsg = NULL;
struct ipmi_recv_msg *msg, *msg2;
+ int cc;
/*
* Start the next message if available.
@@ -4809,7 +4825,7 @@ static void smi_work(struct work_struct *t)
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
-
+restart:
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -4830,8 +4846,17 @@ static void smi_work(struct work_struct *t)
if (!run_to_completion)
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
- if (newmsg)
- intf->handlers->sender(intf->send_info, newmsg);
+ if (newmsg) {
+ cc = intf->handlers->sender(intf->send_info, newmsg);
+ if (cc) {
+ if (newmsg->recv_msg)
+ deliver_err_response(intf,
+ newmsg->recv_msg, cc);
+ else
+ ipmi_free_smi_msg(newmsg);
+ goto restart;
+ }
+ }
handle_new_recv_msgs(intf);
@@ -4868,12 +4893,10 @@ static void smi_work(struct work_struct *t)
list_del(&msg->link);
- if (refcount_read(&user->destroyed) == 0) {
+ if (refcount_read(&user->destroyed) == 0)
ipmi_free_recv_msg(msg);
- } else {
- atomic_dec(&user->nr_msgs);
+ else
user->handler->ipmi_recv_hndl(msg, user->handler_data);
- }
}
mutex_unlock(&intf->user_msgs_mutex);
@@ -4951,8 +4974,7 @@ smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
struct list_head *timeouts,
unsigned long timeout_period,
- int slot, unsigned long *flags,
- bool *need_timer)
+ int slot, bool *need_timer)
{
struct ipmi_recv_msg *msg;
@@ -5004,7 +5026,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
return;
}
- spin_unlock_irqrestore(&intf->seq_lock, *flags);
+ mutex_unlock(&intf->seq_lock);
/*
* Send the new message. We send with a zero
@@ -5025,7 +5047,7 @@ static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
} else
ipmi_free_smi_msg(smi_msg);
- spin_lock_irqsave(&intf->seq_lock, *flags);
+ mutex_lock(&intf->seq_lock);
}
}
@@ -5052,7 +5074,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
* list.
*/
INIT_LIST_HEAD(&timeouts);
- spin_lock_irqsave(&intf->seq_lock, flags);
+ mutex_lock(&intf->seq_lock);
if (intf->ipmb_maintenance_mode_timeout) {
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
intf->ipmb_maintenance_mode_timeout = 0;
@@ -5062,8 +5084,8 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &intf->seq_table[i],
&timeouts, timeout_period, i,
- &flags, &need_timer);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ &need_timer);
+ mutex_unlock(&intf->seq_lock);
list_for_each_entry_safe(msg, msg2, &timeouts, link)
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
@@ -5083,7 +5105,9 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
-= timeout_period;
if (!intf->maintenance_mode
&& (intf->auto_maintenance_timeout <= 0)) {
- intf->maintenance_mode_enable = false;
+ intf->maintenance_mode_state =
+ IPMI_MAINTENANCE_MODE_STATE_OFF;
+ intf->auto_maintenance_timeout = 0;
maintenance_mode_update(intf);
}
}
@@ -5099,15 +5123,13 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
static void ipmi_request_event(struct ipmi_smi *intf)
{
/* No event requests when in maintenance mode. */
- if (intf->maintenance_mode_enable)
+ if (intf->maintenance_mode_state)
return;
if (!intf->in_shutdown)
intf->handlers->request_events(intf->send_info);
}
-static struct timer_list ipmi_timer;
-
static atomic_t stop_operation;
static void ipmi_timeout_work(struct work_struct *work)
@@ -5131,6 +5153,8 @@ static void ipmi_timeout_work(struct work_struct *work)
}
need_timer = true;
}
+ if (intf->maintenance_mode_state)
+ need_timer = true;
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
}
@@ -5174,7 +5198,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
if (rv) {
rv->done = free_smi_msg;
- rv->user_data = NULL;
+ rv->recv_msg = NULL;
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
atomic_inc(&smi_msg_inuse_count);
}
@@ -5190,27 +5214,51 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
kfree(msg);
}
-static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
{
struct ipmi_recv_msg *rv;
+ if (user) {
+ if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
- if (rv) {
- rv->user = NULL;
- rv->done = free_recv_msg;
- atomic_inc(&recv_msg_inuse_count);
+ if (!rv) {
+ if (user)
+ atomic_dec(&user->nr_msgs);
+ return ERR_PTR(-ENOMEM);
}
+
+ rv->user = user;
+ rv->done = free_recv_msg;
+ if (user)
+ kref_get(&user->refcount);
+ atomic_inc(&recv_msg_inuse_count);
return rv;
}
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user && !oops_in_progress)
+ if (msg->user && !oops_in_progress) {
+ atomic_dec(&msg->user->nr_msgs);
kref_put(&msg->user->refcount, free_ipmi_user);
+ }
msg->done(msg);
}
EXPORT_SYMBOL(ipmi_free_recv_msg);
+static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
+ struct ipmi_user *user)
+{
+ WARN_ON_ONCE(msg->user); /* User should not be set. */
+ msg->user = user;
+ atomic_inc(&user->nr_msgs);
+ kref_get(&user->refcount);
+}
+
static atomic_t panic_done_count = ATOMIC_INIT(0);
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
index 4a2efafcd1f8..52a1130defe5 100644
--- a/drivers/char/ipmi/ipmi_powernv.c
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -51,7 +51,7 @@ static void send_error_reply(struct ipmi_smi_powernv *smi,
ipmi_smi_msg_received(smi->intf, msg);
}
-static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
+static int ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
{
struct ipmi_smi_powernv *smi = send_info;
struct opal_ipmi_msg *opal_msg;
@@ -93,18 +93,19 @@ static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
smi->interface_id, opal_msg, size);
rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
pr_devel("%s: -> %d\n", __func__, rc);
-
- if (!rc) {
- smi->cur_msg = msg;
- spin_unlock_irqrestore(&smi->msg_lock, flags);
- return;
+ if (rc) {
+ comp = IPMI_ERR_UNSPECIFIED;
+ goto err_unlock;
}
- comp = IPMI_ERR_UNSPECIFIED;
+ smi->cur_msg = msg;
+ spin_unlock_irqrestore(&smi->msg_lock, flags);
+ return IPMI_CC_NO_ERROR;
+
err_unlock:
spin_unlock_irqrestore(&smi->msg_lock, flags);
err:
- send_error_reply(smi, msg, comp);
+ return comp;
}
static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h
index 508c3fd45877..687835b53da5 100644
--- a/drivers/char/ipmi/ipmi_si.h
+++ b/drivers/char/ipmi/ipmi_si.h
@@ -101,6 +101,13 @@ void ipmi_si_pci_shutdown(void);
static inline void ipmi_si_pci_init(void) { }
static inline void ipmi_si_pci_shutdown(void) { }
#endif
+#ifdef CONFIG_IPMI_LS2K
+void ipmi_si_ls2k_init(void);
+void ipmi_si_ls2k_shutdown(void);
+#else
+static inline void ipmi_si_ls2k_init(void) { }
+static inline void ipmi_si_ls2k_shutdown(void) { }
+#endif
#ifdef CONFIG_PARISC
void ipmi_si_parisc_init(void);
void ipmi_si_parisc_shutdown(void);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 8b5524069c15..5459ffdde8dc 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -53,6 +53,7 @@
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
+#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */
enum si_intf_state {
SI_NORMAL,
@@ -61,7 +62,8 @@ enum si_intf_state {
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
- SI_SETTING_ENABLES
+ SI_SETTING_ENABLES,
+ SI_HOSED
/* FIXME - add watchdog stuff. */
};
@@ -273,8 +275,7 @@ void debug_timestamp(struct smi_info *smi_info, char *msg)
struct timespec64 t;
ktime_get_ts64(&t);
- dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n",
- msg, t.tv_sec, t.tv_nsec);
+ dev_dbg(smi_info->io.dev, "**%s: %ptSp\n", msg, &t);
}
#else
#define debug_timestamp(smi_info, x)
@@ -313,7 +314,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
- int rv;
+ int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
@@ -390,6 +391,17 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void start_get_flags(struct smi_info *smi_info)
+{
+ unsigned char msg[2];
+
+ msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
+ msg[1] = IPMI_GET_MSG_FLAGS_CMD;
+
+ start_new_msg(smi_info, msg, 2);
+ smi_info->si_state = SI_GETTING_FLAGS;
+}
+
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -742,6 +754,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
}
break;
}
+ case SI_HOSED: /* Shouldn't happen. */
+ break;
}
}
@@ -756,6 +770,10 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
enum si_sm_result si_sm_result;
restart:
+ if (smi_info->si_state == SI_HOSED)
+ /* Just in case, hosed state is only left from the timeout. */
+ return SI_SM_HOSED;
+
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
@@ -779,18 +797,20 @@ restart:
/*
* Do the before return_hosed_msg, because that
- * releases the lock.
+ * releases the lock. We just disable operations for
+ * a while and retry in hosed state.
*/
- smi_info->si_state = SI_NORMAL;
+ smi_info->si_state = SI_HOSED;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
- return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
+ return_hosed_msg(smi_info, IPMI_BUS_ERR);
}
- goto restart;
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
+ goto out;
}
/*
@@ -798,8 +818,6 @@ restart:
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
- unsigned char msg[2];
-
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
@@ -817,11 +835,7 @@ restart:
* interrupts work with the SMI, that's not really
* possible.
*/
- msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
- msg[1] = IPMI_GET_MSG_FLAGS_CMD;
-
- start_new_msg(smi_info, msg, 2);
- smi_info->si_state = SI_GETTING_FLAGS;
+ start_get_flags(smi_info);
goto restart;
}
}
@@ -894,27 +908,29 @@ static void flush_messages(void *send_info)
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
- while (result != SI_SM_IDLE) {
+ while (result != SI_SM_IDLE && result != SI_SM_HOSED) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp(smi_info, "Enqueue");
+ if (smi_info->si_state == SI_HOSED)
+ return IPMI_BUS_ERR;
+
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
- return;
+ return IPMI_CC_NO_ERROR;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
@@ -929,6 +945,7 @@ static void sender(void *send_info,
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ return IPMI_CC_NO_ERROR;
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
@@ -1087,6 +1104,10 @@ static void smi_timeout(struct timer_list *t)
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp(smi_info, "Timer");
+ if (smi_info->si_state == SI_HOSED)
+ /* Try something to see if the BMC is now operational. */
+ start_get_flags(smi_info);
+
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
@@ -1096,14 +1117,11 @@ static void smi_timeout(struct timer_list *t)
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_mod_timer;
- }
-
- /*
- * If the state machine asks for a short delay, then shorten
- * the timer timeout.
- */
- if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ } else if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ /*
+ * If the state machine asks for a short delay, then shorten
+ * the timer timeout.
+ */
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
@@ -1111,7 +1129,6 @@ static void smi_timeout(struct timer_list *t)
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
-do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
@@ -2120,6 +2137,8 @@ static int __init init_ipmi_si(void)
ipmi_si_pci_init();
+ ipmi_si_ls2k_init();
+
ipmi_si_parisc_init();
mutex_lock(&smi_infos_lock);
@@ -2331,6 +2350,8 @@ static void cleanup_ipmi_si(void)
ipmi_si_pci_shutdown();
+ ipmi_si_ls2k_shutdown();
+
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
diff --git a/drivers/char/ipmi/ipmi_si_ls2k.c b/drivers/char/ipmi/ipmi_si_ls2k.c
new file mode 100644
index 000000000000..45442c257efd
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_si_ls2k.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for Loongson-2K BMC IPMI interface
+ *
+ * Copyright (C) 2024-2025 Loongson Technology Corporation Limited.
+ *
+ * Authors:
+ * Chong Qiao <qiaochong@loongson.cn>
+ * Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "ipmi_si.h"
+
+#define LS2K_KCS_FIFO_IBFH 0x0
+#define LS2K_KCS_FIFO_IBFT 0x1
+#define LS2K_KCS_FIFO_OBFH 0x2
+#define LS2K_KCS_FIFO_OBFT 0x3
+
+/* KCS registers */
+#define LS2K_KCS_REG_STS 0x4
+#define LS2K_KCS_REG_DATA_OUT 0x5
+#define LS2K_KCS_REG_DATA_IN 0x6
+#define LS2K_KCS_REG_CMD 0x8
+
+#define LS2K_KCS_CMD_DATA 0xa
+#define LS2K_KCS_VERSION 0xb
+#define LS2K_KCS_WR_REQ 0xc
+#define LS2K_KCS_WR_ACK 0x10
+
+#define LS2K_KCS_STS_OBF BIT(0)
+#define LS2K_KCS_STS_IBF BIT(1)
+#define LS2K_KCS_STS_SMS_ATN BIT(2)
+#define LS2K_KCS_STS_CMD BIT(3)
+
+#define LS2K_KCS_DATA_MASK (LS2K_KCS_STS_OBF | LS2K_KCS_STS_IBF | LS2K_KCS_STS_CMD)
+
+static bool ls2k_registered;
+
+static unsigned char ls2k_mem_inb_v0(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ int reg_offset;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_STS;
+ } else {
+ writeb(readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_STS_OBF, addr + LS2K_KCS_REG_STS);
+ reg_offset = LS2K_KCS_REG_DATA_OUT;
+ }
+
+ return readb(addr + reg_offset);
+}
+
+static unsigned char ls2k_mem_inb_v1(const struct si_sm_io *io, unsigned int offset)
+{
+ void __iomem *addr = io->addr;
+ unsigned char inb = 0, cmd;
+ bool obf, ibf;
+
+ obf = readb(addr + LS2K_KCS_FIFO_OBFH) ^ readb(addr + LS2K_KCS_FIFO_OBFT);
+ ibf = readb(addr + LS2K_KCS_FIFO_IBFH) ^ readb(addr + LS2K_KCS_FIFO_IBFT);
+ cmd = readb(addr + LS2K_KCS_CMD_DATA);
+
+ if (offset & BIT(0)) {
+ inb = readb(addr + LS2K_KCS_REG_STS) & ~LS2K_KCS_DATA_MASK;
+ inb |= FIELD_PREP(LS2K_KCS_STS_OBF, obf)
+ | FIELD_PREP(LS2K_KCS_STS_IBF, ibf)
+ | FIELD_PREP(LS2K_KCS_STS_CMD, cmd);
+ } else {
+ inb = readb(addr + LS2K_KCS_REG_DATA_OUT);
+ writeb(readb(addr + LS2K_KCS_FIFO_OBFH), addr + LS2K_KCS_FIFO_OBFT);
+ }
+
+ return inb;
+}
+
+static void ls2k_mem_outb_v0(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char sts = readb(addr + LS2K_KCS_REG_STS);
+ int reg_offset;
+
+ if (sts & LS2K_KCS_STS_IBF)
+ return;
+
+ if (offset & BIT(0)) {
+ reg_offset = LS2K_KCS_REG_CMD;
+ sts |= LS2K_KCS_STS_CMD;
+ } else {
+ reg_offset = LS2K_KCS_REG_DATA_IN;
+ sts &= ~LS2K_KCS_STS_CMD;
+ }
+
+ writew(val, addr + reg_offset);
+ writeb(sts | LS2K_KCS_STS_IBF, addr + LS2K_KCS_REG_STS);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_outb_v1(const struct si_sm_io *io, unsigned int offset,
+ unsigned char val)
+{
+ void __iomem *addr = io->addr;
+ unsigned char ibfh, ibft;
+ int reg_offset;
+
+ ibfh = readb(addr + LS2K_KCS_FIFO_IBFH);
+ ibft = readb(addr + LS2K_KCS_FIFO_IBFT);
+
+ if (ibfh ^ ibft)
+ return;
+
+ reg_offset = (offset & BIT(0)) ? LS2K_KCS_REG_CMD : LS2K_KCS_REG_DATA_IN;
+ writew(val, addr + reg_offset);
+
+ writeb(offset & BIT(0), addr + LS2K_KCS_CMD_DATA);
+ writeb(!ibft, addr + LS2K_KCS_FIFO_IBFH);
+ writel(readl(addr + LS2K_KCS_WR_REQ) + 1, addr + LS2K_KCS_WR_REQ);
+}
+
+static void ls2k_mem_cleanup(struct si_sm_io *io)
+{
+ if (io->addr)
+ iounmap(io->addr);
+}
+
+static int ipmi_ls2k_mem_setup(struct si_sm_io *io)
+{
+ unsigned char version;
+
+ io->addr = ioremap(io->addr_data, io->regspacing);
+ if (!io->addr)
+ return -EIO;
+
+ version = readb(io->addr + LS2K_KCS_VERSION);
+
+ io->inputb = version ? ls2k_mem_inb_v1 : ls2k_mem_inb_v0;
+ io->outputb = version ? ls2k_mem_outb_v1 : ls2k_mem_outb_v0;
+ io->io_cleanup = ls2k_mem_cleanup;
+
+ return 0;
+}
+
+static int ipmi_ls2k_probe(struct platform_device *pdev)
+{
+ struct si_sm_io io;
+
+ memset(&io, 0, sizeof(io));
+
+ io.si_info = &ipmi_kcs_si_info;
+ io.io_setup = ipmi_ls2k_mem_setup;
+ io.addr_data = pdev->resource[0].start;
+ io.regspacing = resource_size(&pdev->resource[0]);
+ io.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "addr 0x%lx, spacing %d.\n", io.addr_data, io.regspacing);
+
+ return ipmi_si_add_smi(&io);
+}
+
+static void ipmi_ls2k_remove(struct platform_device *pdev)
+{
+ ipmi_si_remove_by_dev(&pdev->dev);
+}
+
+struct platform_driver ipmi_ls2k_platform_driver = {
+ .driver = {
+ .name = "ls2k-ipmi-si",
+ },
+ .probe = ipmi_ls2k_probe,
+ .remove = ipmi_ls2k_remove,
+};
+
+void ipmi_si_ls2k_init(void)
+{
+ platform_driver_register(&ipmi_ls2k_platform_driver);
+ ls2k_registered = true;
+}
+
+void ipmi_si_ls2k_shutdown(void)
+{
+ if (ls2k_registered)
+ platform_driver_unregister(&ipmi_ls2k_platform_driver);
+}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 1bc42830444d..ef1582a029f4 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1068,8 +1068,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
}
}
-static void sender(void *send_info,
- struct ipmi_smi_msg *msg)
+static int sender(void *send_info, struct ipmi_smi_msg *msg)
{
struct ssif_info *ssif_info = send_info;
unsigned long oflags, *flags;
@@ -1084,11 +1083,10 @@ static void sender(void *send_info,
struct timespec64 t;
ktime_get_real_ts64(&t);
- dev_dbg(&ssif_info->client->dev,
- "**Enqueue %02x %02x: %lld.%6.6ld\n",
- msg->data[0], msg->data[1],
- (long long)t.tv_sec, (long)t.tv_nsec / NSEC_PER_USEC);
+ dev_dbg(&ssif_info->client->dev, "**Enqueue %02x %02x: %ptSp\n",
+ msg->data[0], msg->data[1], &t);
}
+ return IPMI_CC_NO_ERROR;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 48839958b0b1..52039fae1594 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -304,13 +304,13 @@ static unsigned zero_mmap_capabilities(struct file *file)
}
/* can't do an in-place private mapping if there's no MMU */
-static inline int private_mapping_ok(struct vm_area_struct *vma)
+static inline int private_mapping_ok(struct vm_area_desc *desc)
{
- return is_nommu_shared_mapping(vma->vm_flags);
+ return is_nommu_shared_mapping(desc->vm_flags);
}
#else
-static inline int private_mapping_ok(struct vm_area_struct *vma)
+static inline int private_mapping_ok(struct vm_area_desc *desc)
{
return 1;
}
@@ -322,46 +322,49 @@ static const struct vm_operations_struct mmap_mem_ops = {
#endif
};
-static int mmap_mem(struct file *file, struct vm_area_struct *vma)
+static int mmap_filter_error(int err)
{
- size_t size = vma->vm_end - vma->vm_start;
- phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ return -EAGAIN;
+}
+
+static int mmap_mem_prepare(struct vm_area_desc *desc)
+{
+ struct file *file = desc->file;
+ const size_t size = vma_desc_size(desc);
+ const phys_addr_t offset = (phys_addr_t)desc->pgoff << PAGE_SHIFT;
/* Does it even fit in phys_addr_t? */
- if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ if (offset >> PAGE_SHIFT != desc->pgoff)
return -EINVAL;
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
- if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
+ if (!valid_mmap_phys_addr_range(desc->pgoff, size))
return -EINVAL;
- if (!private_mapping_ok(vma))
+ if (!private_mapping_ok(desc))
return -ENOSYS;
- if (!range_is_allowed(vma->vm_pgoff, size))
+ if (!range_is_allowed(desc->pgoff, size))
return -EPERM;
- if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
- &vma->vm_page_prot))
+ if (!phys_mem_access_prot_allowed(file, desc->pgoff, size,
+ &desc->page_prot))
return -EINVAL;
- vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
- size,
- vma->vm_page_prot);
+ desc->page_prot = phys_mem_access_prot(file, desc->pgoff,
+ size,
+ desc->page_prot);
- vma->vm_ops = &mmap_mem_ops;
+ desc->vm_ops = &mmap_mem_ops;
+
+ /* Remap-pfn-range will mark the range VM_IO. */
+ mmap_action_remap_full(desc, desc->pgoff);
+ /* We filter remap errors to -EAGAIN. */
+ desc->action.error_hook = mmap_filter_error;
- /* Remap-pfn-range will mark the range VM_IO */
- if (remap_pfn_range(vma,
- vma->vm_start,
- vma->vm_pgoff,
- size,
- vma->vm_page_prot)) {
- return -EAGAIN;
- }
return 0;
}
@@ -501,38 +504,64 @@ static ssize_t read_zero(struct file *file, char __user *buf,
return cleared;
}
-static int mmap_zero(struct file *file, struct vm_area_struct *vma)
+static int mmap_zero_private_success(const struct vm_area_struct *vma)
+{
+ /*
+ * This is a highly unique situation where we mark a MAP_PRIVATE mapping
+ * of /dev/zero anonymous, despite it not being.
+ */
+ vma_set_anonymous((struct vm_area_struct *)vma);
+
+ return 0;
+}
+
+static int mmap_zero_prepare(struct vm_area_desc *desc)
{
#ifndef CONFIG_MMU
return -ENOSYS;
#endif
- if (vma->vm_flags & VM_SHARED)
- return shmem_zero_setup(vma);
- vma_set_anonymous(vma);
+ if (desc->vm_flags & VM_SHARED)
+ return shmem_zero_setup_desc(desc);
+
+ desc->action.success_hook = mmap_zero_private_success;
return 0;
}
+#ifndef CONFIG_MMU
+static unsigned long get_unmapped_area_zero(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ return -ENOSYS;
+}
+#else
static unsigned long get_unmapped_area_zero(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
-#ifdef CONFIG_MMU
if (flags & MAP_SHARED) {
/*
- * mmap_zero() will call shmem_zero_setup() to create a file,
- * so use shmem's get_unmapped_area in case it can be huge;
- * and pass NULL for file as in mmap.c's get_unmapped_area(),
- * so as not to confuse shmem with our handle on "/dev/zero".
+ * mmap_zero_prepare() will call shmem_zero_setup() to create a
+ * file, so use shmem's get_unmapped_area in case it can be
+ * huge; and pass NULL for file as in mmap.c's
+ * get_unmapped_area(), so as not to confuse shmem with our
+ * handle on "/dev/zero".
*/
return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
}
- /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
- return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags);
+ /*
+ * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it,
+ * attempt to map aligned to huge page size if possible, otherwise we
+ * fall back to system page size mappings.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return thp_get_unmapped_area(file, addr, len, pgoff, flags);
#else
- return -ENOSYS;
+ return mm_get_unmapped_area(file, addr, len, pgoff, flags);
#endif
}
+#endif /* CONFIG_MMU */
static ssize_t write_full(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
@@ -619,7 +648,7 @@ static const struct file_operations __maybe_unused mem_fops = {
.llseek = memory_lseek,
.read = read_mem,
.write = write_mem,
- .mmap = mmap_mem,
+ .mmap_prepare = mmap_mem_prepare,
.open = open_mem,
#ifndef CONFIG_MMU
.get_unmapped_area = get_unmapped_area_mem,
@@ -655,7 +684,7 @@ static const struct file_operations zero_fops = {
.write_iter = write_iter_zero,
.splice_read = copy_splice_read,
.splice_write = splice_write_zero,
- .mmap = mmap_zero,
+ .mmap_prepare = mmap_zero_prepare,
.get_unmapped_area = get_unmapped_area_zero,
#ifndef CONFIG_MMU
.mmap_capabilities = zero_mmap_capabilities,
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 558302a64dd9..726516fb0a3b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -132,7 +132,8 @@ static int misc_open(struct inode *inode, struct file *file)
break;
}
- if (!new_fops) {
+ /* Only request module for fixed minor code */
+ if (!new_fops && minor < MISC_DYNAMIC_MINOR) {
mutex_unlock(&misc_mtx);
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
@@ -144,10 +145,11 @@ static int misc_open(struct inode *inode, struct file *file)
new_fops = fops_get(iter->fops);
break;
}
- if (!new_fops)
- goto fail;
}
+ if (!new_fops)
+ goto fail;
+
/*
* Place the miscdevice in the file's
* private_data so it can be used by the
@@ -210,6 +212,12 @@ int misc_register(struct miscdevice *misc)
int err = 0;
bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
+ if (misc->minor > MISC_DYNAMIC_MINOR) {
+ pr_err("Invalid fixed minor %d for miscdevice '%s'\n",
+ misc->minor, misc->name);
+ return -EINVAL;
+ }
+
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
@@ -275,13 +283,12 @@ EXPORT_SYMBOL(misc_register);
void misc_deregister(struct miscdevice *misc)
{
- if (WARN_ON(list_empty(&misc->list)))
- return;
-
mutex_lock(&misc_mtx);
- list_del(&misc->list);
+ list_del_init(&misc->list);
device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor));
misc_minor_free(misc->minor);
+ if (misc->minor > MISC_DYNAMIC_MINOR)
+ misc->minor = MISC_DYNAMIC_MINOR;
mutex_unlock(&misc_mtx);
}
EXPORT_SYMBOL(misc_deregister);
diff --git a/drivers/misc/misc_minor_kunit.c b/drivers/char/misc_minor_kunit.c
index 30eceac5f1b6..6fc8b05169c5 100644
--- a/drivers/misc/misc_minor_kunit.c
+++ b/drivers/char/misc_minor_kunit.c
@@ -7,12 +7,6 @@
#include <linux/file.h>
#include <linux/init_syscalls.h>
-/* dynamic minor (2) */
-static struct miscdevice dev_dynamic_minor = {
- .minor = 2,
- .name = "dev_dynamic_minor",
-};
-
/* static minor (LCD_MINOR) */
static struct miscdevice dev_static_minor = {
.minor = LCD_MINOR,
@@ -25,16 +19,6 @@ static struct miscdevice dev_misc_dynamic_minor = {
.name = "dev_misc_dynamic_minor",
};
-static void kunit_dynamic_minor(struct kunit *test)
-{
- int ret;
-
- ret = misc_register(&dev_dynamic_minor);
- KUNIT_EXPECT_EQ(test, 0, ret);
- KUNIT_EXPECT_EQ(test, 2, dev_dynamic_minor.minor);
- misc_deregister(&dev_dynamic_minor);
-}
-
static void kunit_static_minor(struct kunit *test)
{
int ret;
@@ -157,13 +141,7 @@ static bool is_valid_dynamic_minor(int minor)
{
if (minor < 0)
return false;
- if (minor == MISC_DYNAMIC_MINOR)
- return false;
- if (minor >= 0 && minor <= 15)
- return false;
- if (minor >= 128 && minor < MISC_DYNAMIC_MINOR)
- return false;
- return true;
+ return minor > MISC_DYNAMIC_MINOR;
}
static int miscdev_test_open(struct inode *inode, struct file *file)
@@ -557,7 +535,7 @@ static void __init miscdev_test_conflict(struct kunit *test)
*/
miscstat.minor = miscdyn.minor;
ret = misc_register(&miscstat);
- KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
if (ret == 0)
misc_deregister(&miscstat);
@@ -590,8 +568,9 @@ static void __init miscdev_test_conflict_reverse(struct kunit *test)
misc_deregister(&miscdyn);
ret = misc_register(&miscstat);
- KUNIT_EXPECT_EQ(test, ret, 0);
- KUNIT_EXPECT_EQ(test, miscstat.minor, miscdyn.minor);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
/*
* Try to register a dynamic minor after registering a static minor
@@ -601,25 +580,81 @@ static void __init miscdev_test_conflict_reverse(struct kunit *test)
miscdyn.minor = MISC_DYNAMIC_MINOR;
ret = misc_register(&miscdyn);
KUNIT_EXPECT_EQ(test, ret, 0);
- KUNIT_EXPECT_NE(test, miscdyn.minor, miscstat.minor);
+ KUNIT_EXPECT_EQ(test, miscdyn.minor, miscstat.minor);
KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
if (ret == 0)
misc_deregister(&miscdyn);
+}
- miscdev_test_can_open(test, &miscstat);
+/* Take minor(> MISC_DYNAMIC_MINOR) as invalid when register miscdevice */
+static void miscdev_test_invalid_input(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR + 1,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
- misc_deregister(&miscstat);
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+/*
+ * Verify if @miscdyn_a can still be registered successfully without
+ * reinitialization even if its minor ever owned was requested by
+ * another miscdevice such as @miscdyn_b.
+ */
+static void miscdev_test_dynamic_reentry(struct kunit *test)
+{
+ struct miscdevice miscdyn_a = {
+ .name = "miscdyn_a",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscdyn_b = {
+ .name = "miscdyn_b",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ int ret, minor_a;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ minor_a = miscdyn_a.minor;
+ if (ret != 0)
+ return;
+ misc_deregister(&miscdyn_a);
+
+ ret = misc_register(&miscdyn_b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn_b.minor, minor_a);
+ if (ret != 0)
+ return;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ KUNIT_EXPECT_NE(test, miscdyn_a.minor, miscdyn_b.minor);
+ if (ret == 0)
+ misc_deregister(&miscdyn_a);
+
+ misc_deregister(&miscdyn_b);
}
static struct kunit_case test_cases[] = {
- KUNIT_CASE(kunit_dynamic_minor),
KUNIT_CASE(kunit_static_minor),
KUNIT_CASE(kunit_misc_dynamic_minor),
+ KUNIT_CASE(miscdev_test_invalid_input),
KUNIT_CASE_PARAM(miscdev_test_twice, miscdev_gen_params),
KUNIT_CASE_PARAM(miscdev_test_duplicate_minor, miscdev_gen_params),
KUNIT_CASE(miscdev_test_duplicate_name),
KUNIT_CASE(miscdev_test_duplicate_name_leak),
KUNIT_CASE_PARAM(miscdev_test_duplicate_error, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_reentry),
{}
};
diff --git a/drivers/char/mwave/3780i.c b/drivers/char/mwave/3780i.c
index 4a8937f80570..90f93cefb21c 100644
--- a/drivers/char/mwave/3780i.c
+++ b/drivers/char/mwave/3780i.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "3780i: " fmt
+
#include <linux/kernel.h>
#include <linux/unistd.h>
#include <linux/delay.h>
@@ -75,18 +77,12 @@ unsigned short dsp3780I_ReadMsaCfg(unsigned short usDspBaseIO,
unsigned long flags;
unsigned short val;
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_ReadMsaCfg entry usDspBaseIO %x ulMsaAddr %lx\n",
- usDspBaseIO, ulMsaAddr);
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
val = InWordDsp(DSP_MsaDataDSISHigh);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_2(TRACE_3780I, "3780i::dsp3780I_ReadMsaCfg exit val %x\n", val);
-
return val;
}
@@ -95,10 +91,6 @@ void dsp3780I_WriteMsaCfg(unsigned short usDspBaseIO,
{
unsigned long flags;
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_WriteMsaCfg entry usDspBaseIO %x ulMsaAddr %lx usValue %x\n",
- usDspBaseIO, ulMsaAddr, usValue);
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulMsaAddr);
OutWordDsp(DSP_MsaAddrHigh, (unsigned short) (ulMsaAddr >> 16));
@@ -112,64 +104,18 @@ static void dsp3780I_WriteGenCfg(unsigned short usDspBaseIO, unsigned uIndex,
DSP_ISA_SLAVE_CONTROL rSlaveControl;
DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
-
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg entry usDspBaseIO %x uIndex %x ucValue %x\n",
- usDspBaseIO, uIndex, ucValue);
-
MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg rSlaveControl %x\n",
- MKBYTE(rSlaveControl));
-
rSlaveControl_Save = rSlaveControl;
rSlaveControl.ConfigMode = true;
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_WriteGenCfg entry rSlaveControl+ConfigMode %x\n",
- MKBYTE(rSlaveControl));
-
OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
OutByteDsp(DSP_ConfigData, ucValue);
OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_WriteGenCfg exit\n");
-
-
}
-#if 0
-unsigned char dsp3780I_ReadGenCfg(unsigned short usDspBaseIO,
- unsigned uIndex)
-{
- DSP_ISA_SLAVE_CONTROL rSlaveControl;
- DSP_ISA_SLAVE_CONTROL rSlaveControl_Save;
- unsigned char ucValue;
-
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780i_ReadGenCfg entry usDspBaseIO %x uIndex %x\n",
- usDspBaseIO, uIndex);
-
- MKBYTE(rSlaveControl) = InByteDsp(DSP_IsaSlaveControl);
- rSlaveControl_Save = rSlaveControl;
- rSlaveControl.ConfigMode = true;
- OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl));
- OutByteDsp(DSP_ConfigAddress, (unsigned char) uIndex);
- ucValue = InByteDsp(DSP_ConfigData);
- OutByteDsp(DSP_IsaSlaveControl, MKBYTE(rSlaveControl_Save));
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_ReadGenCfg exit ucValue %x\n", ucValue);
-
-
- return ucValue;
-}
-#endif /* 0 */
-
-int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+int dsp3780I_EnableDSP(struct dsp_3780i_config_settings *pSettings,
unsigned short *pIrqMap,
unsigned short *pDmaMap)
{
@@ -191,25 +137,13 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
DSP_CLOCK_CONTROL_2 rClockControl2;
DSP_ISA_SLAVE_CONTROL rSlaveControl;
DSP_HBRIDGE_CONTROL rHBridgeControl;
- unsigned short ChipID = 0;
unsigned short tval;
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_EnableDSP entry pSettings->bDSPEnabled %x\n",
- pSettings->bDSPEnabled);
-
-
if (!pSettings->bDSPEnabled) {
- PRINTK_ERROR( KERN_ERR "3780i::dsp3780I_EnableDSP: Error: DSP not enabled. Aborting.\n" );
+ pr_err("%s: Error: DSP not enabled. Aborting.\n", __func__);
return -EIO;
}
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP entry pSettings->bModemEnabled %x\n",
- pSettings->bModemEnabled);
-
if (pSettings->bModemEnabled) {
rUartCfg1.Reserved = rUartCfg2.Reserved = 0;
rUartCfg1.IrqActiveLow = pSettings->bUartIrqActiveLow;
@@ -282,23 +216,10 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
rSlaveControl.ConfigMode = false;
rSlaveControl.Reserved = 0;
- PRINTK_4(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP usDspBaseIO %x index %x taddr %x\n",
- usDspBaseIO, DSP_IsaSlaveControl,
- usDspBaseIO + DSP_IsaSlaveControl);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveContrl %x\n",
- MKWORD(rSlaveControl));
-
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_IsaSlaveControl, MKWORD(rSlaveControl));
MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveControl 2 %x\n", tval);
-
-
for (i = 0; i < 11; i++)
udelay(2000);
@@ -307,10 +228,6 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
MKWORD(tval) = InWordDsp(DSP_IsaSlaveControl);
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP rSlaveControl 3 %x\n", tval);
-
-
/* Program our general configuration registers */
WriteGenCfg(DSP_HBridgeCfg1Index, MKBYTE(rHBridgeCfg1));
WriteGenCfg(DSP_HBridgeCfg2Index, MKBYTE(rHBridgeCfg2));
@@ -331,10 +248,6 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
rHBridgeControl.IoAutoInc = false;
rHBridgeControl.DiagnosticMode = false;
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780i_EnableDSP DSP_HBridgeControl %x rHBridgeControl %x\n",
- DSP_HBridgeControl, MKWORD(rHBridgeControl));
-
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
WriteMsaCfg(DSP_LBusTimeoutDisable, MKWORD(rLBusTimeoutDisable));
@@ -342,24 +255,17 @@ int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
WriteMsaCfg(DSP_ClockControl_2, MKWORD(rClockControl2));
WriteMsaCfg(DSP_ChipReset, MKWORD(rChipReset));
- ChipID = ReadMsaCfg(DSP_ChipID);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_EnableDSP exiting bRC=true, ChipID %x\n",
- ChipID);
+ ReadMsaCfg(DSP_ChipID);
return 0;
}
-int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_DisableDSP(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_ISA_SLAVE_CONTROL rSlaveControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP entry\n");
-
rSlaveControl.ClockControl = 0;
rSlaveControl.SoftReset = true;
rSlaveControl.ConfigMode = false;
@@ -375,29 +281,20 @@ int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings)
udelay(5);
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_DisableDSP exit\n");
-
return 0;
}
-int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_Reset(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_BOOT_DOMAIN rBootDomain;
DSP_HBRIDGE_CONTROL rHBridgeControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset entry\n");
-
spin_lock_irqsave(&dsp_lock, flags);
/* Mask DSP to PC interrupt */
MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rHBridgeControl %x\n",
- MKWORD(rHBridgeControl));
-
rHBridgeControl.EnableDspInt = false;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
@@ -408,9 +305,6 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
rBootDomain.NMI = true;
rBootDomain.Reserved = 0;
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Reset rBootDomain %x\n",
- MKWORD(rBootDomain));
-
WriteMsaCfg(DSP_MspBootDomain, MKWORD(rBootDomain));
/* Reset all the chiplets and then reactivate them */
@@ -419,24 +313,17 @@ int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings)
WriteMsaCfg(DSP_ChipReset,
(unsigned short) (~pSettings->usChipletEnable));
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Reset exit bRC=0\n");
-
return 0;
}
-int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
+int dsp3780I_Run(struct dsp_3780i_config_settings *pSettings)
{
unsigned long flags;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_BOOT_DOMAIN rBootDomain;
DSP_HBRIDGE_CONTROL rHBridgeControl;
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run entry\n");
-
-
/* Transition the core to a running state */
rBootDomain.ResetCore = true;
rBootDomain.Halt = false;
@@ -459,15 +346,9 @@ int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings)
MKWORD(rHBridgeControl) = InWordDsp(DSP_HBridgeControl);
rHBridgeControl.EnableDspInt = true;
- PRINTK_2(TRACE_3780I, "3780i::dsp3780i_Run rHBridgeControl %x\n",
- MKWORD(rHBridgeControl));
-
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
-
- PRINTK_1(TRACE_3780I, "3780i::dsp3780i_Run exit bRC=true\n");
-
return 0;
}
@@ -479,12 +360,6 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned short __user *pusBuffer = pvBuffer;
unsigned short val;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -499,17 +374,9 @@ int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
if(put_user(val, pusBuffer++))
return -EFAULT;
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_ReadDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadDStore exit bRC=true\n");
-
return 0;
}
@@ -521,12 +388,6 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
unsigned short __user *pusBuffer = pvBuffer;
unsigned short val;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadAndDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -541,17 +402,9 @@ int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
if(put_user(val, pusBuffer++))
return -EFAULT;
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_ReadAndCleanDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadAndClearDStore exit bRC=true\n");
-
return 0;
}
@@ -562,12 +415,6 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
-
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780D_WriteDStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/* Set the initial MSA address. No adjustments need to be made to data store addresses */
spin_lock_irqsave(&dsp_lock, flags);
OutWordDsp(DSP_MsaAddrLow, (unsigned short) ulDSPAddr);
@@ -583,17 +430,9 @@ int dsp3780I_WriteDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
OutWordDsp(DSP_MsaDataDSISHigh, val);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_3(TRACE_3780I,
- "3780I::dsp3780I_WriteDStore uCount %x val %x\n",
- uCount, val);
-
PaceMsaAccess(usDspBaseIO);
}
-
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780D_WriteDStore exit bRC=true\n");
-
return 0;
}
@@ -604,10 +443,6 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_ReadIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
/*
* Set the initial MSA address. To convert from an instruction store
* address to an MSA address
@@ -631,17 +466,10 @@ int dsp3780I_ReadIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
if(put_user(val_hi, pusBuffer++))
return -EFAULT;
- PRINTK_4(TRACE_3780I,
- "3780I::dsp3780I_ReadIStore uCount %x val_lo %x val_hi %x\n",
- uCount, val_lo, val_hi);
-
PaceMsaAccess(usDspBaseIO);
}
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_ReadIStore exit bRC=true\n");
-
return 0;
}
@@ -652,11 +480,6 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned long flags;
unsigned short __user *pusBuffer = pvBuffer;
- PRINTK_5(TRACE_3780I,
- "3780i::dsp3780I_WriteIStore entry usDspBaseIO %x, pusBuffer %p, uCount %x, ulDSPAddr %lx\n",
- usDspBaseIO, pusBuffer, uCount, ulDSPAddr);
-
-
/*
* Set the initial MSA address. To convert from an instruction store
* address to an MSA address
@@ -680,17 +503,9 @@ int dsp3780I_WriteIStore(unsigned short usDspBaseIO, void __user *pvBuffer,
OutWordDsp(DSP_MsaDataDSISHigh, val_hi);
spin_unlock_irqrestore(&dsp_lock, flags);
- PRINTK_4(TRACE_3780I,
- "3780I::dsp3780I_WriteIStore uCount %x val_lo %x val_hi %x\n",
- uCount, val_lo, val_hi);
-
PaceMsaAccess(usDspBaseIO);
-
}
- PRINTK_1(TRACE_3780I,
- "3780I::dsp3780I_WriteIStore exit bRC=true\n");
-
return 0;
}
@@ -700,12 +515,6 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
{
unsigned long flags;
DSP_HBRIDGE_CONTROL rHBridgeControl;
- unsigned short temp;
-
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource entry usDspBaseIO %x pusIPCSource %p\n",
- usDspBaseIO, pusIPCSource);
/*
* Disable DSP to PC interrupts, read the interrupt register,
@@ -717,22 +526,11 @@ int dsp3780I_GetIPCSource(unsigned short usDspBaseIO,
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
*pusIPCSource = InWordDsp(DSP_Interrupt);
- temp = (unsigned short) ~(*pusIPCSource);
-
- PRINTK_3(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource, usIPCSource %x ~ %x\n",
- *pusIPCSource, temp);
-
OutWordDsp(DSP_Interrupt, (unsigned short) ~(*pusIPCSource));
rHBridgeControl.EnableDspInt = true;
OutWordDsp(DSP_HBridgeControl, MKWORD(rHBridgeControl));
spin_unlock_irqrestore(&dsp_lock, flags);
-
- PRINTK_2(TRACE_3780I,
- "3780i::dsp3780I_GetIPCSource exit usIPCSource %x\n",
- *pusIPCSource);
-
return 0;
}
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
index 95164246afd1..53dafceb20e0 100644
--- a/drivers/char/mwave/3780i.h
+++ b/drivers/char/mwave/3780i.h
@@ -261,7 +261,7 @@ typedef struct {
* the only values maintained by the 3780i support layer are the saved UART
* registers.
*/
-typedef struct _DSP_3780I_CONFIG_SETTINGS {
+struct dsp_3780i_config_settings {
/* Location of base configuration register */
unsigned short usBaseConfigIO;
@@ -313,16 +313,16 @@ typedef struct _DSP_3780I_CONFIG_SETTINGS {
unsigned char ucSCR; /* Scratch register */
unsigned char ucDLL; /* Divisor latch, low byte */
unsigned char ucDLM; /* Divisor latch, high byte */
-} DSP_3780I_CONFIG_SETTINGS;
+};
/* 3780i support functions */
-int dsp3780I_EnableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings,
+int dsp3780I_EnableDSP(struct dsp_3780i_config_settings *pSettings,
unsigned short *pIrqMap,
unsigned short *pDmaMap);
-int dsp3780I_DisableDSP(DSP_3780I_CONFIG_SETTINGS * pSettings);
-int dsp3780I_Reset(DSP_3780I_CONFIG_SETTINGS * pSettings);
-int dsp3780I_Run(DSP_3780I_CONFIG_SETTINGS * pSettings);
+int dsp3780I_DisableDSP(struct dsp_3780i_config_settings *pSettings);
+int dsp3780I_Reset(struct dsp_3780i_config_settings *pSettings);
+int dsp3780I_Run(struct dsp_3780i_config_settings *pSettings);
int dsp3780I_ReadDStore(unsigned short usDspBaseIO, void __user *pvBuffer,
unsigned uCount, unsigned long ulDSPAddr);
int dsp3780I_ReadAndClearDStore(unsigned short usDspBaseIO,
diff --git a/drivers/char/mwave/Makefile b/drivers/char/mwave/Makefile
index a24fe96e3c96..e56c1a375535 100644
--- a/drivers/char/mwave/Makefile
+++ b/drivers/char/mwave/Makefile
@@ -8,9 +8,3 @@
obj-$(CONFIG_MWAVE) += mwave.o
mwave-y := mwavedd.o smapi.o tp3780i.o 3780i.o
-
-# To have the mwave driver disable other uarts if necessary
-# ccflags-y := -DMWAVE_FUTZ_WITH_OTHER_DEVICES
-
-# To compile in lots (~20 KiB) of run-time enablable printk()s for debugging:
-ccflags-y += -DMW_TRACE
diff --git a/drivers/char/mwave/README b/drivers/char/mwave/README
index c2a58f428bc8..6224aa814c62 100644
--- a/drivers/char/mwave/README
+++ b/drivers/char/mwave/README
@@ -4,16 +4,6 @@ Module options
The mwave module takes the following options. Note that these options
are not saved by the BIOS and so do not persist after unload and reload.
- mwave_debug=value, where value is bitwise OR of trace flags:
- 0x0001 mwavedd api tracing
- 0x0002 smapi api tracing
- 0x0004 3780i tracing
- 0x0008 tp3780i tracing
-
- Tracing only occurs if the driver has been compiled with the
- MW_TRACE macro #defined (i.e. let ccflags-y := -DMW_TRACE
- in the Makefile).
-
mwave_3780i_irq=5/7/10/11/15
If the dsp irq has not been setup and stored in bios by the
thinkpad configuration utility then this parameter allows the
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 11272d605ecd..640a9cb0dd8d 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "mwavedd: " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fs.h>
@@ -75,131 +77,62 @@ MODULE_LICENSE("GPL");
* We'll depend on users using the tpctl utility to do that for now
*/
static DEFINE_MUTEX(mwave_mutex);
-int mwave_debug = 0;
int mwave_3780i_irq = 0;
int mwave_3780i_io = 0;
int mwave_uart_irq = 0;
int mwave_uart_io = 0;
-module_param(mwave_debug, int, 0);
module_param_hw(mwave_3780i_irq, int, irq, 0);
module_param_hw(mwave_3780i_io, int, ioport, 0);
module_param_hw(mwave_uart_irq, int, irq, 0);
module_param_hw(mwave_uart_io, int, ioport, 0);
-static int mwave_open(struct inode *inode, struct file *file);
-static int mwave_close(struct inode *inode, struct file *file);
-static long mwave_ioctl(struct file *filp, unsigned int iocmd,
- unsigned long ioarg);
-
-MWAVE_DEVICE_DATA mwave_s_mdd;
-
-static int mwave_open(struct inode *inode, struct file *file)
-{
- unsigned int retval = 0;
-
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_open, entry inode %p file %p\n",
- inode, file);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_open, exit return retval %x\n", retval);
-
- return retval;
-}
-
-static int mwave_close(struct inode *inode, struct file *file)
-{
- unsigned int retval = 0;
-
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_close, entry inode %p file %p\n",
- inode, file);
-
- PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_close, exit retval %x\n",
- retval);
-
- return retval;
-}
+struct mwave_device_data mwave_s_mdd;
static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned long ioarg)
{
unsigned int retval = 0;
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
void __user *arg = (void __user *)ioarg;
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, entry file %p cmd %x arg %x\n",
- file, iocmd, (int) ioarg);
-
switch (iocmd) {
case IOCTL_MW_RESET:
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
- " calling tp3780I_ResetDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_ResetDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RESET"
- " retval %x from tp3780I_ResetDSP\n",
- retval);
break;
case IOCTL_MW_RUN:
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
- " calling tp3780I_StartDSP\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_StartDSP(&pDrvData->rBDData);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_RUN"
- " retval %x from tp3780I_StartDSP\n",
- retval);
break;
case IOCTL_MW_DSP_ABILITIES: {
- MW_ABILITIES rAbilities;
+ struct mw_abilities rAbilities;
- PRINTK_1(TRACE_MWAVE,
- "mwavedd::mwave_ioctl,"
- " IOCTL_MW_DSP_ABILITIES calling"
- " tp3780I_QueryAbilities\n");
mutex_lock(&mwave_mutex);
retval = tp3780I_QueryAbilities(&pDrvData->rBDData,
&rAbilities);
mutex_unlock(&mwave_mutex);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
- " retval %x from tp3780I_QueryAbilities\n",
- retval);
if (retval == 0) {
- if( copy_to_user(arg, &rAbilities,
- sizeof(MW_ABILITIES)) )
+ if (copy_to_user(arg, &rAbilities, sizeof(rAbilities)))
return -EFAULT;
}
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES"
- " exit retval %x\n",
- retval);
}
break;
case IOCTL_MW_READ_DATA:
case IOCTL_MW_READCLEAR_DATA: {
- MW_READWRITE rReadData;
+ struct mw_readwrite rReadData;
unsigned short __user *pusBuffer = NULL;
if( copy_from_user(&rReadData, arg,
- sizeof(MW_READWRITE)) )
+ sizeof(struct mw_readwrite)) )
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rReadData.ulDataLength, ioarg, pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd,
@@ -211,19 +144,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_READ_INST: {
- MW_READWRITE rReadData;
+ struct mw_readwrite rReadData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rReadData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rReadData, arg, sizeof(rReadData)))
return -EFAULT;
pusBuffer = (unsigned short __user *) (rReadData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_READ_INST,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rReadData.ulDataLength / 2, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -234,19 +161,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_WRITE_DATA: {
- MW_READWRITE rWriteData;
+ struct mw_readwrite rWriteData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rWriteData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rWriteData, arg, sizeof(rWriteData)))
return -EFAULT;
pusBuffer = (unsigned short __user *) (rWriteData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_WRITE_DATA,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rWriteData.ulDataLength, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -257,19 +178,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
break;
case IOCTL_MW_WRITE_INST: {
- MW_READWRITE rWriteData;
+ struct mw_readwrite rWriteData;
unsigned short __user *pusBuffer = NULL;
- if( copy_from_user(&rWriteData, arg,
- sizeof(MW_READWRITE)) )
+ if (copy_from_user(&rWriteData, arg, sizeof(rWriteData)))
return -EFAULT;
pusBuffer = (unsigned short __user *)(rWriteData.pBuf);
- PRINTK_4(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_WRITE_INST,"
- " size %lx, ioarg %lx pusBuffer %p\n",
- rWriteData.ulDataLength, ioarg,
- pusBuffer);
mutex_lock(&mwave_mutex);
retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData,
iocmd, pusBuffer,
@@ -283,30 +198,17 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_REGISTER_IPC:"
- " Error: Invalid ipcnum %x\n",
- ipcnum);
+ pr_err("%s: IOCTL_MW_REGISTER_IPC: Error: Invalid ipcnum %x\n",
+ __func__, ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
- " ipcnum %x entry usIntCount %x\n",
- ipcnum,
- pDrvData->IPCs[ipcnum].usIntCount);
mutex_lock(&mwave_mutex);
pDrvData->IPCs[ipcnum].bIsHere = false;
pDrvData->IPCs[ipcnum].bIsEnabled = true;
mutex_unlock(&mwave_mutex);
-
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
- " ipcnum %x exit\n",
- ipcnum);
}
break;
@@ -314,28 +216,17 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
unsigned int ipcnum = (unsigned int) ioarg;
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_GET_IPC: Error:"
- " Invalid ipcnum %x\n", ipcnum);
+ pr_err("%s: IOCTL_MW_GET_IPC: Error: Invalid ipcnum %x\n", __func__,
+ ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
ARRAY_SIZE(pDrvData->IPCs));
- PRINTK_3(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
- " ipcnum %x, usIntCount %x\n",
- ipcnum,
- pDrvData->IPCs[ipcnum].usIntCount);
-
+
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
DECLARE_WAITQUEUE(wait, current);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl, thread for"
- " ipc %x going to sleep\n",
- ipcnum);
add_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
pDrvData->IPCs[ipcnum].bIsHere = true;
set_current_state(TASK_INTERRUPTIBLE);
@@ -343,31 +234,15 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
/* the interrupt handler while we were gone */
if (pDrvData->IPCs[ipcnum].usIntCount == 1) { /* first int has occurred (race condition) */
pDrvData->IPCs[ipcnum].usIntCount = 2; /* first int has been handled */
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl"
- " IOCTL_MW_GET_IPC ipcnum %x"
- " handling first int\n",
- ipcnum);
} else { /* either 1st int has not yet occurred, or we have already handled the first int */
schedule();
if (pDrvData->IPCs[ipcnum].usIntCount == 1) {
pDrvData->IPCs[ipcnum].usIntCount = 2;
}
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl"
- " IOCTL_MW_GET_IPC ipcnum %x"
- " woke up and returning to"
- " application\n",
- ipcnum);
}
pDrvData->IPCs[ipcnum].bIsHere = false;
remove_wait_queue(&pDrvData->IPCs[ipcnum].ipc_wait_queue, &wait);
set_current_state(TASK_RUNNING);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC,"
- " returning thread for ipc %x"
- " processing\n",
- ipcnum);
}
mutex_unlock(&mwave_mutex);
}
@@ -376,16 +251,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
case IOCTL_MW_UNREGISTER_IPC: {
unsigned int ipcnum = (unsigned int) ioarg;
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_ioctl IOCTL_MW_UNREGISTER_IPC"
- " ipcnum %x\n",
- ipcnum);
if (ipcnum >= ARRAY_SIZE(pDrvData->IPCs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_ioctl:"
- " IOCTL_MW_UNREGISTER_IPC:"
- " Error: Invalid ipcnum %x\n",
- ipcnum);
+ pr_err("%s: IOCTL_MW_UNREGISTER_IPC: Error: Invalid ipcnum %x\n",
+ __func__, ipcnum);
return -EINVAL;
}
ipcnum = array_index_nospec(ipcnum,
@@ -405,35 +273,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
return -ENOTTY;
} /* switch */
- PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, exit retval %x\n", retval);
-
return retval;
}
-
-static ssize_t mwave_read(struct file *file, char __user *buf, size_t count,
- loff_t * ppos)
-{
- PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_read entry file %p, buf %p, count %zx ppos %p\n",
- file, buf, count, ppos);
-
- return -EINVAL;
-}
-
-
-static ssize_t mwave_write(struct file *file, const char __user *buf,
- size_t count, loff_t * ppos)
-{
- PRINTK_5(TRACE_MWAVE,
- "mwavedd::mwave_write entry file %p, buf %p,"
- " count %zx ppos %p\n",
- file, buf, count, ppos);
-
- return -EINVAL;
-}
-
-
static int register_serial_portandirq(unsigned int port, int irq)
{
struct uart_8250_port uart;
@@ -446,9 +288,7 @@ static int register_serial_portandirq(unsigned int port, int irq)
/* OK */
break;
default:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::register_serial_portandirq:"
- " Error: Illegal port %x\n", port );
+ pr_err("%s: Error: Illegal port %x\n", __func__, port);
return -1;
} /* switch */
/* port is okay */
@@ -461,9 +301,7 @@ static int register_serial_portandirq(unsigned int port, int irq)
/* OK */
break;
default:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::register_serial_portandirq:"
- " Error: Illegal irq %x\n", irq );
+ pr_err("%s: Error: Illegal irq %x\n", __func__, irq);
return -1;
} /* switch */
/* irq is okay */
@@ -478,56 +316,14 @@ static int register_serial_portandirq(unsigned int port, int irq)
return serial8250_register_8250_port(&uart);
}
-
static const struct file_operations mwave_fops = {
.owner = THIS_MODULE,
- .read = mwave_read,
- .write = mwave_write,
.unlocked_ioctl = mwave_ioctl,
- .open = mwave_open,
- .release = mwave_close,
.llseek = default_llseek,
};
-
static struct miscdevice mwave_misc_dev = { MWAVE_MINOR, "mwave", &mwave_fops };
-#if 0 /* totally b0rked */
-/*
- * sysfs support <paulsch@us.ibm.com>
- */
-
-struct device mwave_device;
-
-/* Prevent code redundancy, create a macro for mwave_show_* functions. */
-#define mwave_show_function(attr_name, format_string, field) \
-static ssize_t mwave_show_##attr_name(struct device *dev, struct device_attribute *attr, char *buf) \
-{ \
- DSP_3780I_CONFIG_SETTINGS *pSettings = \
- &mwave_s_mdd.rBDData.rDspSettings; \
- return sprintf(buf, format_string, pSettings->field); \
-}
-
-/* All of our attributes are read attributes. */
-#define mwave_dev_rd_attr(attr_name, format_string, field) \
- mwave_show_function(attr_name, format_string, field) \
-static DEVICE_ATTR(attr_name, S_IRUGO, mwave_show_##attr_name, NULL)
-
-mwave_dev_rd_attr (3780i_dma, "%i\n", usDspDma);
-mwave_dev_rd_attr (3780i_irq, "%i\n", usDspIrq);
-mwave_dev_rd_attr (3780i_io, "%#.4x\n", usDspBaseIO);
-mwave_dev_rd_attr (uart_irq, "%i\n", usUartIrq);
-mwave_dev_rd_attr (uart_io, "%#.4x\n", usUartBaseIO);
-
-static struct device_attribute * const mwave_dev_attrs[] = {
- &dev_attr_3780i_dma,
- &dev_attr_3780i_irq,
- &dev_attr_3780i_io,
- &dev_attr_uart_irq,
- &dev_attr_uart_io,
-};
-#endif
-
/*
* mwave_init is called on module load
*
@@ -536,20 +332,7 @@ static struct device_attribute * const mwave_dev_attrs[] = {
*/
static void mwave_exit(void)
{
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
-
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit entry\n");
-
-#if 0
- for (i = 0; i < pDrvData->nr_registered_attrs; i++)
- device_remove_file(&mwave_device, mwave_dev_attrs[i]);
- pDrvData->nr_registered_attrs = 0;
-
- if (pDrvData->device_registered) {
- device_unregister(&mwave_device);
- pDrvData->device_registered = false;
- }
-#endif
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
if ( pDrvData->sLine >= 0 ) {
serial8250_unregister_port(pDrvData->sLine);
@@ -566,8 +349,6 @@ static void mwave_exit(void)
if (pDrvData->bBDInitialized) {
tp3780I_Cleanup(&pDrvData->rBDData);
}
-
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_exit exit\n");
}
module_exit(mwave_exit);
@@ -576,11 +357,9 @@ static int __init mwave_init(void)
{
int i;
int retval = 0;
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
- PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_init entry\n");
-
- memset(&mwave_s_mdd, 0, sizeof(MWAVE_DEVICE_DATA));
+ memset(&mwave_s_mdd, 0, sizeof(mwave_s_mdd));
pDrvData->bBDInitialized = false;
pDrvData->bResourcesClaimed = false;
@@ -597,60 +376,34 @@ static int __init mwave_init(void)
}
retval = tp3780I_InitializeBoardData(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_InitializeBoardData"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_init: Error:"
- " Failed to initialize board data\n");
+ pr_err("%s: Error: Failed to initialize board data\n", __func__);
goto cleanup_error;
}
pDrvData->bBDInitialized = true;
retval = tp3780I_CalcResources(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_CalcResources"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to calculate resources\n");
+ pr_err("%s: Error: Failed to calculate resources\n", __func__);
goto cleanup_error;
}
retval = tp3780I_ClaimResources(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_ClaimResources"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to claim resources\n");
+ pr_err("%s: Error: Failed to claim resources\n", __func__);
goto cleanup_error;
}
pDrvData->bResourcesClaimed = true;
retval = tp3780I_EnableDSP(&pDrvData->rBDData);
- PRINTK_2(TRACE_MWAVE,
- "mwavedd::mwave_init, return from tp3780I_EnableDSP"
- " retval %x\n",
- retval);
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to enable DSP\n");
+ pr_err("%s: Error: Failed to enable DSP\n", __func__);
goto cleanup_error;
}
pDrvData->bDSPEnabled = true;
if (misc_register(&mwave_misc_dev) < 0) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to register misc device\n");
+ pr_err("%s: Error: Failed to register misc device\n", __func__);
goto cleanup_error;
}
pDrvData->bMwaveDevRegistered = true;
@@ -660,40 +413,16 @@ static int __init mwave_init(void)
pDrvData->rBDData.rDspSettings.usUartIrq
);
if (pDrvData->sLine < 0) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to register serial driver\n");
+ pr_err("%s: Error: Failed to register serial driver\n", __func__);
goto cleanup_error;
}
/* uart is registered */
-#if 0
- /* sysfs */
- memset(&mwave_device, 0, sizeof (struct device));
- dev_set_name(&mwave_device, "mwave");
-
- if (device_register(&mwave_device))
- goto cleanup_error;
- pDrvData->device_registered = true;
- for (i = 0; i < ARRAY_SIZE(mwave_dev_attrs); i++) {
- if(device_create_file(&mwave_device, mwave_dev_attrs[i])) {
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd:mwave_init: Error:"
- " Failed to create sysfs file %s\n",
- mwave_dev_attrs[i]->attr.name);
- goto cleanup_error;
- }
- pDrvData->nr_registered_attrs++;
- }
-#endif
-
/* SUCCESS! */
return 0;
cleanup_error:
- PRINTK_ERROR(KERN_ERR_MWAVE
- "mwavedd::mwave_init: Error:"
- " Failed to initialize\n");
+ pr_err("%s: Error: Failed to initialize\n", __func__);
mwave_exit(); /* clean up */
return -EIO;
diff --git a/drivers/char/mwave/mwavedd.h b/drivers/char/mwave/mwavedd.h
index 21cb09c7bed7..e1da1493eec5 100644
--- a/drivers/char/mwave/mwavedd.h
+++ b/drivers/char/mwave/mwavedd.h
@@ -56,97 +56,35 @@
#include <linux/uaccess.h>
#include <linux/wait.h>
-extern int mwave_debug;
extern int mwave_3780i_irq;
extern int mwave_3780i_io;
extern int mwave_uart_irq;
extern int mwave_uart_io;
-#define PRINTK_ERROR printk
-#define KERN_ERR_MWAVE KERN_ERR "mwave: "
-
-#define TRACE_MWAVE 0x0001
-#define TRACE_SMAPI 0x0002
-#define TRACE_3780I 0x0004
-#define TRACE_TP3780I 0x0008
-
-#ifdef MW_TRACE
-#define PRINTK_1(f,s) \
- if (f & (mwave_debug)) { \
- printk(s); \
- }
-
-#define PRINTK_2(f,s,v1) \
- if (f & (mwave_debug)) { \
- printk(s,v1); \
- }
-
-#define PRINTK_3(f,s,v1,v2) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2); \
- }
-
-#define PRINTK_4(f,s,v1,v2,v3) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3); \
- }
-
-#define PRINTK_5(f,s,v1,v2,v3,v4) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4); \
- }
-
-#define PRINTK_6(f,s,v1,v2,v3,v4,v5) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5); \
- }
-
-#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5,v6); \
- }
-
-#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7) \
- if (f & (mwave_debug)) { \
- printk(s,v1,v2,v3,v4,v5,v6,v7); \
- }
-
-#else
-#define PRINTK_1(f,s)
-#define PRINTK_2(f,s,v1)
-#define PRINTK_3(f,s,v1,v2)
-#define PRINTK_4(f,s,v1,v2,v3)
-#define PRINTK_5(f,s,v1,v2,v3,v4)
-#define PRINTK_6(f,s,v1,v2,v3,v4,v5)
-#define PRINTK_7(f,s,v1,v2,v3,v4,v5,v6)
-#define PRINTK_8(f,s,v1,v2,v3,v4,v5,v6,v7)
-#endif
-
-
-typedef struct _MWAVE_IPC {
+struct mwave_ipc {
unsigned short usIntCount; /* 0=none, 1=first, 2=greater than 1st */
bool bIsEnabled;
bool bIsHere;
/* entry spin lock */
wait_queue_head_t ipc_wait_queue;
-} MWAVE_IPC;
+};
-typedef struct _MWAVE_DEVICE_DATA {
- THINKPAD_BD_DATA rBDData; /* board driver's data area */
+struct mwave_device_data {
+ struct thinkpad_bd_data rBDData; /* board driver's data area */
unsigned long ulIPCSource_ISR; /* IPC source bits for recently processed intr, set during ISR processing */
unsigned long ulIPCSource_DPC; /* IPC source bits for recently processed intr, set during DPC processing */
bool bBDInitialized;
bool bResourcesClaimed;
bool bDSPEnabled;
bool bDSPReset;
- MWAVE_IPC IPCs[16];
+ struct mwave_ipc IPCs[16];
bool bMwaveDevRegistered;
short sLine;
int nr_registered_attrs;
int device_registered;
-} MWAVE_DEVICE_DATA, *pMWAVE_DEVICE_DATA;
+};
-extern MWAVE_DEVICE_DATA mwave_s_mdd;
+extern struct mwave_device_data mwave_s_mdd;
#endif
diff --git a/drivers/char/mwave/mwavepub.h b/drivers/char/mwave/mwavepub.h
index 60c961ae23b4..280327bdaa38 100644
--- a/drivers/char/mwave/mwavepub.h
+++ b/drivers/char/mwave/mwavepub.h
@@ -53,7 +53,7 @@
#include <linux/miscdevice.h>
-typedef struct _MW_ABILITIES {
+struct mw_abilities {
unsigned long instr_per_sec;
unsigned long data_size;
unsigned long inst_size;
@@ -63,27 +63,27 @@ typedef struct _MW_ABILITIES {
unsigned long component_list[7];
char mwave_os_name[16];
char bios_task_name[16];
-} MW_ABILITIES, *pMW_ABILITIES;
+};
-typedef struct _MW_READWRITE {
+struct mw_readwrite {
unsigned short usDspAddress; /* The dsp address */
unsigned long ulDataLength; /* The size in bytes of the data or user buffer */
void __user *pBuf; /* Input:variable sized buffer */
-} MW_READWRITE, *pMW_READWRITE;
+};
#define IOCTL_MW_RESET _IO(MWAVE_MINOR,1)
#define IOCTL_MW_RUN _IO(MWAVE_MINOR,2)
-#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,MW_ABILITIES)
-#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,MW_READWRITE)
-#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,MW_READWRITE)
-#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,MW_READWRITE)
-#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,MW_READWRITE)
-#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,MW_READWRITE)
+#define IOCTL_MW_DSP_ABILITIES _IOR(MWAVE_MINOR,3,struct mw_abilities)
+#define IOCTL_MW_READ_DATA _IOR(MWAVE_MINOR,4,struct mw_readwrite)
+#define IOCTL_MW_READCLEAR_DATA _IOR(MWAVE_MINOR,5,struct mw_readwrite)
+#define IOCTL_MW_READ_INST _IOR(MWAVE_MINOR,6,struct mw_readwrite)
+#define IOCTL_MW_WRITE_DATA _IOW(MWAVE_MINOR,7,struct mw_readwrite)
+#define IOCTL_MW_WRITE_INST _IOW(MWAVE_MINOR,8,struct mw_readwrite)
#define IOCTL_MW_REGISTER_IPC _IOW(MWAVE_MINOR,9,int)
#define IOCTL_MW_UNREGISTER_IPC _IOW(MWAVE_MINOR,10,int)
#define IOCTL_MW_GET_IPC _IOW(MWAVE_MINOR,11,int)
-#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,MW_READWRITE)
+#define IOCTL_MW_TRACE _IOR(MWAVE_MINOR,12,struct mw_readwrite)
#endif
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index f8d79d393b69..df6354b24339 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "smapi: " fmt
+
#include <linux/kernel.h>
#include <linux/mc146818rtc.h> /* CMOS defines */
#include "smapi.h"
@@ -69,10 +71,6 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
unsigned short usSmapiOK = -EIO, *pusSmapiOK = &usSmapiOK;
unsigned int inBXCX = (inBX << 16) | inCX;
unsigned int inDISI = (inDI << 16) | inSI;
- int retval = 0;
-
- PRINTK_5(TRACE_SMAPI, "inBX %x inCX %x inDI %x inSI %x\n",
- inBX, inCX, inDI, inSI);
__asm__ __volatile__("movw $0x5380,%%ax\n\t"
"movl %7,%%ebx\n\t"
@@ -107,10 +105,6 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
:"%eax", "%ebx", "%ecx", "%edx", "%edi",
"%esi");
- PRINTK_8(TRACE_SMAPI,
- "myoutAX %x myoutBX %x myoutCX %x myoutDX %x myoutDI %x myoutSI %x usSmapiOK %x\n",
- myoutAX, myoutBX, myoutCX, myoutDX, myoutDI, myoutSI,
- usSmapiOK);
*outAX = myoutAX;
*outBX = myoutBX;
*outCX = myoutCX;
@@ -118,13 +112,11 @@ static int smapi_request(unsigned short inBX, unsigned short inCX,
*outDI = myoutDI;
*outSI = myoutSI;
- retval = (usSmapiOK == 1) ? 0 : -EIO;
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_request exit retval %x\n", retval);
- return retval;
+ return usSmapiOK == 1 ? 0 : -EIO;
}
-int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
+int smapi_query_DSP_cfg(struct smapi_dsp_settings *pSettings)
{
int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
@@ -134,17 +126,13 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
static const unsigned short ausUartBases[] = {
0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
-
bRC = smapi_request(0x1802, 0x0000, 0, 0,
&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
if (bRC) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Error: Could not get DSP Settings. Aborting.\n");
+ pr_err("%s: Error: Could not get DSP Settings. Aborting.\n", __func__);
return bRC;
}
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
-
pSettings->bDSPPresent = ((usBX & 0x0100) != 0);
pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
pSettings->usDspIRQ = usSI & 0x00FF;
@@ -154,27 +142,20 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
} else {
pSettings->usDspBaseIO = 0;
}
- PRINTK_6(TRACE_SMAPI,
- "smapi::smapi_query_DSP_cfg get DSP Settings bDSPPresent %x bDSPEnabled %x usDspIRQ %x usDspDMA %x usDspBaseIO %x\n",
- pSettings->bDSPPresent, pSettings->bDSPEnabled,
- pSettings->usDspIRQ, pSettings->usDspDMA,
- pSettings->usDspBaseIO);
/* check for illegal values */
if ( pSettings->usDspBaseIO == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP base I/O address is 0\n");
+ pr_err("%s: Worry: DSP base I/O address is 0\n", __func__);
if ( pSettings->usDspIRQ == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: DSP IRQ line is 0\n");
+ pr_err("%s: Worry: DSP IRQ line is 0\n", __func__);
bRC = smapi_request(0x1804, 0x0000, 0, 0,
&usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
if (bRC) {
- PRINTK_ERROR("smapi::smapi_query_DSP_cfg: Error: Could not get DSP modem settings. Aborting.\n");
+ pr_err("%s: Error: Could not get DSP modem settings. Aborting.\n", __func__);
return bRC;
}
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg, smapi_request OK\n");
-
pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
pSettings->usUartIRQ = usSI & 0x000F;
if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
@@ -183,19 +164,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->usUartBaseIO = 0;
}
- PRINTK_4(TRACE_SMAPI,
- "smapi::smapi_query_DSP_cfg get DSP modem settings bModemEnabled %x usUartIRQ %x usUartBaseIO %x\n",
- pSettings->bModemEnabled,
- pSettings->usUartIRQ,
- pSettings->usUartBaseIO);
-
/* check for illegal values */
if ( pSettings->usUartBaseIO == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART base I/O address is 0\n");
+ pr_err("%s: Worry: UART base I/O address is 0\n", __func__);
if ( pSettings->usUartIRQ == 0 )
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_query_DSP_cfg: Worry: UART IRQ line is 0\n");
-
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg exit bRC %x\n", bRC);
+ pr_err("%s: Worry: UART IRQ line is 0\n", __func__);
return bRC;
}
@@ -218,17 +191,14 @@ int smapi_set_DSP_cfg(void)
unsigned short dspio_index = 0, uartio_index = 0;
- PRINTK_5(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg entry mwave_3780i_irq %x mwave_3780i_io %x mwave_uart_irq %x mwave_uart_io %x\n",
- mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
-
if (mwave_3780i_io) {
for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
if (mwave_3780i_io == ausDspBases[i])
break;
}
if (i == ARRAY_SIZE(ausDspBases)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
+ pr_err("%s: Error: Invalid mwave_3780i_io address %x. Aborting.\n",
+ __func__, mwave_3780i_io);
return bRC;
}
dspio_index = i;
@@ -240,7 +210,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausDspIrqs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
+ pr_err("%s: Error: Invalid mwave_3780i_irq %x. Aborting.\n", __func__,
+ mwave_3780i_irq);
return bRC;
}
}
@@ -251,7 +222,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausUartBases)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
+ pr_err("%s: Error: Invalid mwave_uart_io address %x. Aborting.\n", __func__,
+ mwave_uart_io);
return bRC;
}
uartio_index = i;
@@ -264,7 +236,8 @@ int smapi_set_DSP_cfg(void)
break;
}
if (i == ARRAY_SIZE(ausUartIrqs)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
+ pr_err("%s: Error: Invalid mwave_uart_irq %x. Aborting.\n", __func__,
+ mwave_uart_irq);
return bRC;
}
}
@@ -279,46 +252,15 @@ int smapi_set_DSP_cfg(void)
if (usBX & 0x0100) { /* serial port A is present */
if (usCX & 1) { /* serial port is enabled */
if ((usSI & 0xFF) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port A irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port\n");
- bRC = smapi_request(0x1403, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1402, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port A irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usSI & 0xFF, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI >> 8) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port A\n");
- bRC = smapi_request (0x1403, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request (0x1402, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port A base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI >> 8],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -332,46 +274,15 @@ int smapi_set_DSP_cfg(void)
if (usBX & 0x0100) { /* serial port B is present */
if (usCX & 1) { /* serial port is enabled */
if ((usSI & 0xFF) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port B irq %x conflicts with mwave_uart_irq %x\n", usSI & 0xFF, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
- bRC = smapi_request(0x1405, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1404, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port B irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usSI & 0xFF, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI >> 8) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI >> 8], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1 (TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting serial port B\n");
- bRC = smapi_request (0x1405, 0x0100, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request (0x1404, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: Serial port B base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI >> 8],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -387,58 +298,15 @@ int smapi_set_DSP_cfg(void)
/* bRC == 0 */
if ((usCX & 0xff) != 0xff) { /* IR port not disabled */
if ((usCX & 0xff) == mwave_uart_irq) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: IR port irq %x conflicts with mwave_uart_irq %x\n", usCX & 0xff, mwave_uart_irq);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
- bRC = smapi_request(0x1701, 0x0100, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1700, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1704, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: IR port irq %x conflicts with mwave_uart_irq %x\n",
+ __func__, usCX & 0xff, mwave_uart_irq);
goto exit_conflict;
-#endif
} else {
if ((usSI & 0xff) == uartio_index) {
-#ifndef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_ERROR(KERN_ERR_MWAVE
- "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
-#else
- PRINTK_3(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg: IR port base I/O address %x conflicts with mwave uart I/O %x\n", ausUartBases[usSI & 0xff], ausUartBases[uartio_index]);
-#endif
-#ifdef MWAVE_FUTZ_WITH_OTHER_DEVICES
- PRINTK_1(TRACE_SMAPI,
- "smapi::smapi_set_DSP_cfg Disabling conflicting IR port\n");
- bRC = smapi_request(0x1701, 0x0100, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1700, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1705, 0x01ff, 0, usSI,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
- bRC = smapi_request(0x1704, 0x0000, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
- if (bRC) goto exit_smapi_request_error;
-#else
+ pr_err("%s: IR port base I/O address %x conflicts with mwave uart I/O %x\n",
+ __func__, ausUartBases[usSI & 0xff],
+ ausUartBases[uartio_index]);
goto exit_conflict;
-#endif
}
}
}
@@ -482,7 +350,6 @@ int smapi_set_DSP_cfg(void)
if (bRC) goto exit_smapi_request_error;
/* normal exit: */
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_set_DSP_cfg exit\n");
return 0;
exit_conflict:
@@ -490,64 +357,32 @@ exit_conflict:
return -EIO;
exit_smapi_request_error:
- PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg exit on smapi_request error bRC %x\n", bRC);
+ pr_err("%s: exit on smapi_request error bRC %x\n", __func__, bRC);
return bRC;
}
int smapi_set_DSP_power_state(bool bOn)
{
- int bRC;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
unsigned short usPowerFunction;
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state entry bOn %x\n", bOn);
-
usPowerFunction = (bOn) ? 1 : 0;
- bRC = smapi_request(0x4901, 0x0000, 0, usPowerFunction,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
-
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_set_DSP_power_state exit bRC %x\n", bRC);
-
- return bRC;
+ return smapi_request(0x4901, 0x0000, 0, usPowerFunction, &usAX, &usBX, &usCX, &usDX, &usDI,
+ &usSI);
}
-#if 0
-static int SmapiQuerySystemID(void)
-{
- int bRC = -EIO;
- unsigned short usAX = 0xffff, usBX = 0xffff, usCX = 0xffff,
- usDX = 0xffff, usDI = 0xffff, usSI = 0xffff;
-
- printk("smapi::SmapiQUerySystemID entry\n");
- bRC = smapi_request(0x0000, 0, 0, 0,
- &usAX, &usBX, &usCX, &usDX, &usDI, &usSI);
-
- if (bRC == 0) {
- printk("AX=%x, BX=%x, CX=%x, DX=%x, DI=%x, SI=%x\n",
- usAX, usBX, usCX, usDX, usDI, usSI);
- } else {
- printk("smapi::SmapiQuerySystemID smapi_request error\n");
- }
-
- return bRC;
-}
-#endif /* 0 */
-
int smapi_init(void)
{
int retval = -EIO;
unsigned short usSmapiID = 0;
unsigned long flags;
- PRINTK_1(TRACE_SMAPI, "smapi::smapi_init entry\n");
-
spin_lock_irqsave(&rtc_lock, flags);
usSmapiID = CMOS_READ(0x7C);
usSmapiID |= (CMOS_READ(0x7D) << 8);
spin_unlock_irqrestore(&rtc_lock, flags);
- PRINTK_2(TRACE_SMAPI, "smapi::smapi_init usSmapiID %x\n", usSmapiID);
if (usSmapiID == 0x5349) {
spin_lock_irqsave(&rtc_lock, flags);
@@ -555,16 +390,13 @@ int smapi_init(void)
g_usSmapiPort |= (CMOS_READ(0x7F) << 8);
spin_unlock_irqrestore(&rtc_lock, flags);
if (g_usSmapiPort == 0) {
- PRINTK_ERROR("smapi::smapi_init, ERROR unable to read from SMAPI port\n");
+ pr_err("%s: ERROR unable to read from SMAPI port\n", __func__);
} else {
- PRINTK_2(TRACE_SMAPI,
- "smapi::smapi_init, exit true g_usSmapiPort %x\n",
- g_usSmapiPort);
retval = 0;
//SmapiQuerySystemID();
}
} else {
- PRINTK_ERROR("smapi::smapi_init, ERROR invalid usSmapiID\n");
+ pr_err("%s: ERROR invalid usSmapiID\n", __func__);
retval = -ENXIO;
}
diff --git a/drivers/char/mwave/smapi.h b/drivers/char/mwave/smapi.h
index ebc206b000b9..e605b16ed23c 100644
--- a/drivers/char/mwave/smapi.h
+++ b/drivers/char/mwave/smapi.h
@@ -49,7 +49,7 @@
#ifndef _LINUX_SMAPI_H
#define _LINUX_SMAPI_H
-typedef struct {
+struct smapi_dsp_settings {
int bDSPPresent;
int bDSPEnabled;
int bModemEnabled;
@@ -65,10 +65,10 @@ typedef struct {
unsigned short usSndblstIRQ;
unsigned short usSndblstDMA;
unsigned short usSndblstBaseIO;
-} SMAPI_DSP_SETTINGS;
+};
int smapi_init(void);
-int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings);
+int smapi_query_DSP_cfg(struct smapi_dsp_settings *pSettings);
int smapi_set_DSP_cfg(void);
int smapi_set_DSP_power_state(bool bOn);
diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
index 83eaffeb22c8..7363b0f764e0 100644
--- a/drivers/char/mwave/tp3780i.c
+++ b/drivers/char/mwave/tp3780i.c
@@ -46,6 +46,8 @@
* First release to the public
*/
+#define pr_fmt(fmt) "tp3780i: " fmt
+
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
@@ -65,16 +67,14 @@ static unsigned short s_ausThinkpadDmaToField[8] =
static unsigned short s_numIrqs = 16, s_numDmas = 8;
-static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
+static void EnableSRAM(struct thinkpad_bd_data *pBDData)
{
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
DSP_GPIO_OUTPUT_DATA_15_8 rGpioOutputData;
DSP_GPIO_DRIVER_ENABLE_15_8 rGpioDriverEnable;
DSP_GPIO_MODE_15_8 rGpioMode;
- PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM, entry\n");
-
MKWORD(rGpioMode) = ReadMsaCfg(DSP_GpioModeControl_15_8);
rGpioMode.GpioMode10 = 0;
WriteMsaCfg(DSP_GpioModeControl_15_8, MKWORD(rGpioMode));
@@ -88,54 +88,31 @@ static void EnableSRAM(THINKPAD_BD_DATA * pBDData)
rGpioOutputData.Latch10 = 0;
rGpioOutputData.Mask10 = true;
WriteMsaCfg(DSP_GpioOutputData_15_8, MKWORD(rGpioOutputData));
-
- PRINTK_1(TRACE_TP3780I, "tp3780i::EnableSRAM exit\n");
}
static irqreturn_t UartInterrupt(int irq, void *dev_id)
{
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::UartInterrupt entry irq %x dev_id %p\n", irq, dev_id);
return IRQ_HANDLED;
}
static irqreturn_t DspInterrupt(int irq, void *dev_id)
{
- pMWAVE_DEVICE_DATA pDrvData = &mwave_s_mdd;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pDrvData->rBDData.rDspSettings;
+ struct mwave_device_data *pDrvData = &mwave_s_mdd;
+ struct dsp_3780i_config_settings *pSettings = &pDrvData->rBDData.rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
unsigned short usIPCSource = 0, usIsolationMask, usPCNum;
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt entry irq %x dev_id %p\n", irq, dev_id);
-
if (dsp3780I_GetIPCSource(usDspBaseIO, &usIPCSource) == 0) {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, return from dsp3780i_GetIPCSource, usIPCSource %x\n",
- usIPCSource);
usIsolationMask = 1;
for (usPCNum = 1; usPCNum <= 16; usPCNum++) {
if (usIPCSource & usIsolationMask) {
usIPCSource &= ~usIsolationMask;
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::DspInterrupt usPCNum %x usIPCSource %x\n",
- usPCNum, usIPCSource);
if (pDrvData->IPCs[usPCNum - 1].usIntCount == 0) {
pDrvData->IPCs[usPCNum - 1].usIntCount = 1;
}
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt usIntCount %x\n",
- pDrvData->IPCs[usPCNum - 1].usIntCount);
if (pDrvData->IPCs[usPCNum - 1].bIsEnabled == true) {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, waking up usPCNum %x\n",
- usPCNum - 1);
wake_up_interruptible(&pDrvData->IPCs[usPCNum - 1].ipc_wait_queue);
- } else {
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::DspInterrupt, no one waiting for IPC %x\n",
- usPCNum - 1);
}
}
if (usIPCSource == 0)
@@ -143,56 +120,42 @@ static irqreturn_t DspInterrupt(int irq, void *dev_id)
/* try next IPC */
usIsolationMask = usIsolationMask << 1;
}
- } else {
- PRINTK_1(TRACE_TP3780I,
- "tp3780i::DspInterrupt, return false from dsp3780i_GetIPCSource\n");
}
- PRINTK_1(TRACE_TP3780I, "tp3780i::DspInterrupt exit\n");
return IRQ_HANDLED;
}
-int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData)
+int tp3780I_InitializeBoardData(struct thinkpad_bd_data *pBDData)
{
int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
pBDData->bDSPEnabled = false;
pSettings->bInterruptClaimed = false;
retval = smapi_init();
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_InitializeBoardData: Error: SMAPI is not available on this machine\n");
+ pr_err("%s: Error: SMAPI is not available on this machine\n", __func__);
} else {
if (mwave_3780i_irq || mwave_3780i_io || mwave_uart_irq || mwave_uart_io) {
retval = smapi_set_DSP_cfg();
}
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_InitializeBoardData exit retval %x\n", retval);
-
return retval;
}
-void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData)
+void tp3780I_Cleanup(struct thinkpad_bd_data *pBDData)
{
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_Cleanup entry and exit pBDData %p\n", pBDData);
}
-int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_CalcResources(struct thinkpad_bd_data *pBDData)
{
- SMAPI_DSP_SETTINGS rSmapiInfo;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_CalcResources entry pBDData %p\n", pBDData);
+ struct smapi_dsp_settings rSmapiInfo;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (smapi_query_DSP_cfg(&rSmapiInfo)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Could not query DSP config. Aborting.\n");
+ pr_err("%s: Error: Could not query DSP config. Aborting.\n", __func__);
return -EIO;
}
@@ -203,7 +166,7 @@ int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
|| ( rSmapiInfo.usUartIRQ == 0 )
|| ( rSmapiInfo.usUartBaseIO == 0 )
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_CalcResources: Error: Illegal resource setting. Aborting.\n");
+ pr_err("%s: Error: Illegal resource setting. Aborting.\n", __func__);
return -EIO;
}
@@ -225,41 +188,31 @@ int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData)
pBDData->bShareDspIrq = pBDData->bShareUartIrq = 0;
}
- PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_CalcResources exit\n");
-
return 0;
}
-int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ClaimResources(struct thinkpad_bd_data *pBDData)
{
int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
struct resource *pres;
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ClaimResources entry pBDData %p\n", pBDData);
-
pres = request_region(pSettings->usDspBaseIO, 16, "mwave_3780i");
if ( pres == NULL ) retval = -EIO;
if (retval) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_ClaimResources: Error: Could not claim I/O region starting at %x\n", pSettings->usDspBaseIO);
- retval = -EIO;
+ pr_err("%s: Error: Could not claim I/O region starting at %x\n", __func__,
+ pSettings->usDspBaseIO);
+ return -EIO;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ClaimResources exit retval %x\n", retval);
-
return retval;
}
-int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ReleaseResources(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReleaseResources entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
release_region(pSettings->usDspBaseIO & (~3), 16);
@@ -268,28 +221,23 @@ int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData)
pSettings->bInterruptClaimed = false;
}
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReleaseResources exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_EnableDSP(struct thinkpad_bd_data *pBDData)
{
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
bool bDSPPoweredUp = false, bInterruptAllocated = false;
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP entry pBDData %p\n", pBDData);
-
if (pBDData->bDSPEnabled) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: DSP already enabled!\n");
+ pr_err("%s: Error: DSP already enabled!\n", __func__);
goto exit_cleanup;
}
if (!pSettings->bDSPEnabled) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780::tp3780I_EnableDSP: Error: pSettings->bDSPEnabled not set\n");
+ pr_err("%s: Error: pSettings->bDSPEnabled not set\n", __func__);
goto exit_cleanup;
}
@@ -299,7 +247,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
|| (s_ausThinkpadIrqToField[pSettings->usDspIrq] == 0xFFFF)
|| (s_ausThinkpadDmaToField[pSettings->usDspDma] == 0xFFFF)
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: invalid irq %x\n", pSettings->usDspIrq);
+ pr_err("%s: Error: invalid irq %x\n", __func__, pSettings->usDspIrq);
goto exit_cleanup;
}
@@ -307,7 +255,8 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
((pSettings->usDspBaseIO & 0xF00F) != 0)
|| (pSettings->usDspBaseIO & 0x0FF0) == 0
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid DSP base I/O address %x\n", pSettings->usDspBaseIO);
+ pr_err("%s: Error: Invalid DSP base I/O address %x\n", __func__,
+ pSettings->usDspBaseIO);
goto exit_cleanup;
}
@@ -316,7 +265,7 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pSettings->usUartIrq >= s_numIrqs
|| s_ausThinkpadIrqToField[pSettings->usUartIrq] == 0xFFFF
) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Invalid UART IRQ %x\n", pSettings->usUartIrq);
+ pr_err("%s: Error: Invalid UART IRQ %x\n", __func__, pSettings->usUartIrq);
goto exit_cleanup;
}
switch (pSettings->usUartBaseIO) {
@@ -327,7 +276,8 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
break;
default:
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Invalid UART base I/O address %x\n", pSettings->usUartBaseIO);
+ pr_err("%s: Error: Invalid UART base I/O address %x\n", __func__,
+ pSettings->usUartBaseIO);
goto exit_cleanup;
}
}
@@ -356,33 +306,30 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pSettings->usChipletEnable = TP_CFG_ChipletEnable;
if (request_irq(pSettings->usUartIrq, &UartInterrupt, 0, "mwave_uart", NULL)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: Could not get UART IRQ %x\n", pSettings->usUartIrq);
+ pr_err("%s: Error: Could not get UART IRQ %x\n", __func__, pSettings->usUartIrq);
goto exit_cleanup;
} else { /* no conflict just release */
free_irq(pSettings->usUartIrq, NULL);
}
if (request_irq(pSettings->usDspIrq, &DspInterrupt, 0, "mwave_3780i", NULL)) {
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: Could not get 3780i IRQ %x\n", pSettings->usDspIrq);
+ pr_err("%s: Error: Could not get 3780i IRQ %x\n", __func__, pSettings->usDspIrq);
goto exit_cleanup;
} else {
- PRINTK_3(TRACE_TP3780I,
- "tp3780i::tp3780I_EnableDSP, got interrupt %x bShareDspIrq %x\n",
- pSettings->usDspIrq, pBDData->bShareDspIrq);
bInterruptAllocated = true;
pSettings->bInterruptClaimed = true;
}
smapi_set_DSP_power_state(false);
if (smapi_set_DSP_power_state(true)) {
- PRINTK_ERROR(KERN_ERR_MWAVE "tp3780i::tp3780I_EnableDSP: Error: smapi_set_DSP_power_state(true) failed\n");
+ pr_err("%s: Error: smapi_set_DSP_power_state(true) failed\n", __func__);
goto exit_cleanup;
} else {
bDSPPoweredUp = true;
}
if (dsp3780I_EnableDSP(pSettings, s_ausThinkpadIrqToField, s_ausThinkpadDmaToField)) {
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Error: dsp7880I_EnableDSP() failed\n");
+ pr_err("%s: Error: dsp7880I_EnableDSP() failed\n", __func__);
goto exit_cleanup;
}
@@ -390,12 +337,10 @@ int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData)
pBDData->bDSPEnabled = true;
- PRINTK_1(TRACE_TP3780I, "tp3780i::tp3780I_EnableDSP exit\n");
-
return 0;
exit_cleanup:
- PRINTK_ERROR("tp3780i::tp3780I_EnableDSP: Cleaning up\n");
+ pr_err("%s: Cleaning up\n", __func__);
if (bDSPPoweredUp)
smapi_set_DSP_power_state(false);
if (bInterruptAllocated) {
@@ -406,12 +351,9 @@ exit_cleanup:
}
-int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_DisableDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (pBDData->bDSPEnabled) {
dsp3780I_DisableDSP(&pBDData->rDspSettings);
@@ -423,56 +365,38 @@ int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData)
pBDData->bDSPEnabled = false;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_DisableDSP exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_ResetDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP entry pBDData %p\n",
- pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (dsp3780I_Reset(pSettings) == 0) {
EnableSRAM(pBDData);
- } else {
- retval = -EIO;
+ return 0;
}
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ResetDSP exit retval %x\n", retval);
-
- return retval;
+ return -EIO;
}
-int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData)
+int tp3780I_StartDSP(struct thinkpad_bd_data *pBDData)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
-
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP entry pBDData %p\n", pBDData);
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
if (dsp3780I_Run(pSettings) == 0) {
// @BUG @TBD EnableSRAM(pBDData);
} else {
- retval = -EIO;
+ return -EIO;
}
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_StartDSP exit retval %x\n", retval);
-
- return retval;
+ return 0;
}
-int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities)
+int tp3780I_QueryAbilities(struct thinkpad_bd_data *pBDData, struct mw_abilities *pAbilities)
{
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
-
memset(pAbilities, 0, sizeof(*pAbilities));
/* fill out standard constant fields */
pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
@@ -497,25 +421,17 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
memcpy(pAbilities->bios_task_name, TP_ABILITIES_BIOSTASK_NAME,
sizeof(TP_ABILITIES_BIOSTASK_NAME));
- PRINTK_1(TRACE_TP3780I,
- "tp3780i::tp3780I_QueryAbilities exit retval=SUCCESSFUL\n");
-
return 0;
}
-int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspDStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
bool bRC = 0;
- PRINTK_6(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspDStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
- pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
-
if (pBDData->bDSPEnabled) {
switch (uOpcode) {
case IOCTL_MW_READ_DATA:
@@ -532,26 +448,18 @@ int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
}
}
- retval = (bRC) ? -EIO : 0;
- PRINTK_2(TRACE_TP3780I, "tp3780i::tp3780I_ReadWriteDspDStore exit retval %x\n", retval);
-
- return retval;
+ return bRC ? -EIO : 0;
}
-int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspIStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr)
{
- int retval = 0;
- DSP_3780I_CONFIG_SETTINGS *pSettings = &pBDData->rDspSettings;
+ struct dsp_3780i_config_settings *pSettings = &pBDData->rDspSettings;
unsigned short usDspBaseIO = pSettings->usDspBaseIO;
bool bRC = 0;
- PRINTK_6(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspIStore entry pBDData %p, uOpcode %x, pvBuffer %p, uCount %x, ulDSPAddr %lx\n",
- pBDData, uOpcode, pvBuffer, uCount, ulDSPAddr);
-
if (pBDData->bDSPEnabled) {
switch (uOpcode) {
case IOCTL_MW_READ_INST:
@@ -564,11 +472,6 @@ int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
}
}
- retval = (bRC) ? -EIO : 0;
-
- PRINTK_2(TRACE_TP3780I,
- "tp3780i::tp3780I_ReadWriteDspIStore exit retval %x\n", retval);
-
- return retval;
+ return bRC ? -EIO : 0;
}
diff --git a/drivers/char/mwave/tp3780i.h b/drivers/char/mwave/tp3780i.h
index 8bd976d42fae..c0001a344741 100644
--- a/drivers/char/mwave/tp3780i.h
+++ b/drivers/char/mwave/tp3780i.h
@@ -75,27 +75,27 @@
#define TP_CFG_PllBypass 0 /* don't bypass */
#define TP_CFG_ChipletEnable 0xFFFF /* Enable all chiplets */
-typedef struct {
+struct thinkpad_bd_data {
int bDSPEnabled;
int bShareDspIrq;
int bShareUartIrq;
- DSP_3780I_CONFIG_SETTINGS rDspSettings;
-} THINKPAD_BD_DATA;
+ struct dsp_3780i_config_settings rDspSettings;
+};
-int tp3780I_InitializeBoardData(THINKPAD_BD_DATA * pBDData);
-int tp3780I_CalcResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ClaimResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ReleaseResources(THINKPAD_BD_DATA * pBDData);
-int tp3780I_EnableDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_DisableDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_ResetDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_StartDSP(THINKPAD_BD_DATA * pBDData);
-int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities);
-void tp3780I_Cleanup(THINKPAD_BD_DATA *pBDData);
-int tp3780I_ReadWriteDspDStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_InitializeBoardData(struct thinkpad_bd_data *pBDData);
+int tp3780I_CalcResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_ClaimResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_ReleaseResources(struct thinkpad_bd_data *pBDData);
+int tp3780I_EnableDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_DisableDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_ResetDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_StartDSP(struct thinkpad_bd_data *pBDData);
+int tp3780I_QueryAbilities(struct thinkpad_bd_data *pBDData, struct mw_abilities *pAbilities);
+void tp3780I_Cleanup(struct thinkpad_bd_data *pBDData);
+int tp3780I_ReadWriteDspDStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr);
-int tp3780I_ReadWriteDspIStore(THINKPAD_BD_DATA * pBDData, unsigned int uOpcode,
+int tp3780I_ReadWriteDspIStore(struct thinkpad_bd_data *pBDData, unsigned int uOpcode,
void __user *pvBuffer, unsigned int uCount,
unsigned long ulDSPAddr);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index b8b24b6ed3fe..bab03c7c4194 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -259,8 +259,8 @@ static void crng_reseed(struct work_struct *work)
u8 key[CHACHA_KEY_SIZE];
/* Immediately schedule the next reseeding, so that it fires sooner rather than later. */
- if (likely(system_unbound_wq))
- queue_delayed_work(system_unbound_wq, &next_reseed, crng_reseed_interval());
+ if (likely(system_dfl_wq))
+ queue_delayed_work(system_dfl_wq, &next_reseed, crng_reseed_interval());
extract_entropy(key, sizeof(key));
@@ -427,7 +427,7 @@ static void _get_random_bytes(void *buf, size_t len)
/*
* This returns random bytes in arbitrary quantities. The quality of the
- * random bytes is good as /dev/urandom. In order to ensure that the
+ * random bytes is as good as /dev/urandom. In order to ensure that the
* randomness provided by this function is okay, the function
* wait_for_random_bytes() should be called and return 0 at least once
* at any point prior.
@@ -491,7 +491,7 @@ out_zero_chacha:
/*
* Batched entropy returns random integers. The quality of the random
- * number is good as /dev/urandom. In order to ensure that the randomness
+ * number is as good as /dev/urandom. In order to ensure that the randomness
* provided by this function is okay, the function wait_for_random_bytes()
* should be called and return 0 at least once at any point prior.
*/
@@ -636,7 +636,7 @@ enum {
};
static struct {
- struct blake2s_state hash;
+ struct blake2s_ctx hash;
spinlock_t lock;
unsigned int init_bits;
} input_pool = {
@@ -701,7 +701,7 @@ static void extract_entropy(void *buf, size_t len)
/* next_key = HASHPRF(seed, RDSEED || 0) */
block.counter = 0;
- blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
+ blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), next_key, sizeof(next_key));
blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
spin_unlock_irqrestore(&input_pool.lock, flags);
@@ -711,7 +711,7 @@ static void extract_entropy(void *buf, size_t len)
i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
/* output = HASHPRF(seed, RDSEED || ++counter) */
++block.counter;
- blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
+ blake2s(seed, sizeof(seed), (const u8 *)&block, sizeof(block), buf, i);
len -= i;
buf += i;
}
@@ -741,8 +741,8 @@ static void __cold _credit_init_bits(size_t bits)
if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
- if (static_key_initialized && system_unbound_wq)
- queue_work(system_unbound_wq, &set_ready);
+ if (system_dfl_wq)
+ queue_work(system_dfl_wq, &set_ready);
atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
#ifdef CONFIG_VDSO_GETRANDOM
WRITE_ONCE(vdso_k_rng_data->is_ready, true);
@@ -794,7 +794,7 @@ static void __cold _credit_init_bits(size_t bits)
*
* add_bootloader_randomness() is called by bootloader drivers, such as EFI
* and device tree, and credits its input depending on whether or not the
- * command line option 'random.trust_bootloader'.
+ * command line option 'random.trust_bootloader' is set.
*
* add_vmfork_randomness() adds a unique (but not necessarily secret) ID
* representing the current instance of a VM to the pool, without crediting,
@@ -915,9 +915,8 @@ void __init random_init(void)
add_latent_entropy();
/*
- * If we were initialized by the cpu or bootloader before jump labels
- * or workqueues are initialized, then we should enable the static
- * branch here, where it's guaranteed that these have been initialized.
+ * If we were initialized by the cpu or bootloader before workqueues
+ * are initialized, then we should enable the static branch here.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);
@@ -1296,6 +1295,7 @@ static void __cold try_to_generate_entropy(void)
struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
+ cpumask_var_t timer_cpus;
int cpu = -1;
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
@@ -1310,13 +1310,15 @@ static void __cold try_to_generate_entropy(void)
atomic_set(&stack->samples, 0);
timer_setup_on_stack(&stack->timer, entropy_timer, 0);
+ if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL))
+ goto out;
+
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
* executing by checking timer_delete_sync_try(), before queueing the next one.
*/
if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
- struct cpumask timer_cpus;
unsigned int num_cpus;
/*
@@ -1326,19 +1328,19 @@ static void __cold try_to_generate_entropy(void)
preempt_disable();
/* Only schedule callbacks on timer CPUs that are online. */
- cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
- num_cpus = cpumask_weight(&timer_cpus);
+ cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+ num_cpus = cpumask_weight(timer_cpus);
/* In very bizarre case of misconfiguration, fallback to all online. */
if (unlikely(num_cpus == 0)) {
- timer_cpus = *cpu_online_mask;
- num_cpus = cpumask_weight(&timer_cpus);
+ *timer_cpus = *cpu_online_mask;
+ num_cpus = cpumask_weight(timer_cpus);
}
/* Basic CPU round-robin, which avoids the current CPU. */
do {
- cpu = cpumask_next(cpu, &timer_cpus);
+ cpu = cpumask_next(cpu, timer_cpus);
if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(&timer_cpus);
+ cpu = cpumask_first(timer_cpus);
} while (cpu == smp_processor_id() && num_cpus > 1);
/* Expiring the timer at `jiffies` means it's the next tick. */
@@ -1354,6 +1356,8 @@ static void __cold try_to_generate_entropy(void)
}
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
+ free_cpumask_var(timer_cpus);
+out:
timer_delete_sync(&stack->timer);
timer_destroy_on_stack(&stack->timer);
}
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index dddd702b2454..8a8f692b6088 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -29,10 +29,11 @@ if TCG_TPM
config TCG_TPM2_HMAC
bool "Use HMAC and encrypted transactions on the TPM bus"
- default X86_64
+ default n
select CRYPTO_ECDH
select CRYPTO_LIB_AESCFB
select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_UTILS
help
Setting this causes us to deploy a scheme which uses request
and response HMACs in addition to encryption for
@@ -189,6 +190,15 @@ config TCG_IBMVTPM
will be accessible from within Linux. To compile this driver
as a module, choose M here; the module will be called tpm_ibmvtpm.
+config TCG_LOONGSON
+ tristate "Loongson TPM Interface"
+ depends on MFD_LOONGSON_SE
+ help
+ If you want to make Loongson TPM support available, say Yes and
+ it will be accessible from within Linux. To compile this
+ driver as a module, choose M here; the module will be called
+ tpm_loongson.
+
config TCG_XEN
tristate "XEN TPM Interface"
depends on TCG_TPM && XEN
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 9de1b3ea34a9..5b5cdc0d32e4 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -46,3 +46,4 @@ obj-$(CONFIG_TCG_ARM_CRB_FFA) += tpm_crb_ffa.o
obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o
obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o
obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o
+obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e25daf2396d3..082b910ddf0d 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -231,42 +231,6 @@ struct tpm_chip *tpm_default_chip(void)
EXPORT_SYMBOL_GPL(tpm_default_chip);
/**
- * tpm_find_get_ops() - find and reserve a TPM chip
- * @chip: a &struct tpm_chip instance, %NULL for the default chip
- *
- * Finds a TPM chip and reserves its class device and operations. The chip must
- * be released with tpm_put_ops() after use.
- * This function is for internal use only. It supports existing TPM callers
- * by accepting NULL, but those callers should be converted to pass in a chip
- * directly.
- *
- * Return:
- * A reserved &struct tpm_chip instance.
- * %NULL if a chip is not found.
- * %NULL if the chip is not available.
- */
-struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip)
-{
- int rc;
-
- if (chip) {
- if (!tpm_try_get_ops(chip))
- return chip;
- return NULL;
- }
-
- chip = tpm_default_chip();
- if (!chip)
- return NULL;
- rc = tpm_try_get_ops(chip);
- /* release additional reference we got from tpm_default_chip() */
- put_device(&chip->dev);
- if (rc)
- return NULL;
- return chip;
-}
-
-/**
* tpm_dev_release() - free chip memory and the device number
* @dev: the character device for the TPM chip
*
@@ -282,7 +246,6 @@ static void tpm_dev_release(struct device *dev)
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
- kfree(chip->allocated_banks);
#ifdef CONFIG_TCG_TPM2_HMAC
kfree(chip->auth);
#endif
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index f2a5e09257dd..f942c0c8e402 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -275,7 +275,8 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
int __init tpm_dev_common_init(void)
{
- tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
return !tpm_dev_wq ? -ENOMEM : 0;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index b71725827743..f745a098908b 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(suspend_pcr,
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
{
if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return tpm2_calc_ordinal_duration(chip, ordinal);
+ return tpm2_calc_ordinal_duration(ordinal);
else
return tpm1_calc_ordinal_duration(chip, ordinal);
}
@@ -313,10 +313,13 @@ int tpm_is_tpm2(struct tpm_chip *chip)
{
int rc;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
tpm_put_ops(chip);
@@ -338,10 +341,13 @@ int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx,
{
int rc;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_pcr_read(chip, pcr_idx, digest, NULL);
else
@@ -369,10 +375,13 @@ int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
int rc;
int i;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
for (i = 0; i < chip->nr_allocated_banks; i++) {
if (digests[i].alg_id != chip->allocated_banks[i].alg_id) {
rc = -EINVAL;
@@ -492,10 +501,13 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
if (!out || max > TPM_MAX_RNG_DATA)
return -EINVAL;
- chip = tpm_find_get_ops(chip);
if (!chip)
return -ENODEV;
+ rc = tpm_try_get_ops(chip);
+ if (rc)
+ return rc;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_get_random(chip, out, max);
else
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 7bb87fa5f7a1..02c07fef41ba 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -267,7 +267,6 @@ static inline void tpm_msleep(unsigned int delay_msec)
int tpm_chip_bootstrap(struct tpm_chip *chip);
int tpm_chip_start(struct tpm_chip *chip);
void tpm_chip_stop(struct tpm_chip *chip);
-struct tpm_chip *tpm_find_get_ops(struct tpm_chip *chip);
struct tpm_chip *tpm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
@@ -299,7 +298,7 @@ ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal);
int tpm2_probe(struct tpm_chip *chip);
int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip);
int tpm2_find_cc(struct tpm_chip *chip, u32 cc);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index cf64c7385105..b49a790f1bd5 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -799,11 +799,6 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
*/
int tpm1_get_pcr_allocation(struct tpm_chip *chip)
{
- chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks)
- return -ENOMEM;
-
chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 524d802ede26..3a77be7ebf4a 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -11,14 +11,17 @@
* used by the kernel internally.
*/
+#include "linux/dev_printk.h"
+#include "linux/tpm.h"
#include "tpm.h"
#include <crypto/hash_info.h>
+#include <linux/unaligned.h>
static bool disable_pcr_integrity;
module_param(disable_pcr_integrity, bool, 0444);
MODULE_PARM_DESC(disable_pcr_integrity, "Disable integrity protection of TPM2_PCR_Extend");
-static struct tpm2_hash tpm2_hash_map[] = {
+struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SHA1, TPM_ALG_SHA1},
{HASH_ALGO_SHA256, TPM_ALG_SHA256},
{HASH_ALGO_SHA384, TPM_ALG_SHA384},
@@ -26,122 +29,71 @@ static struct tpm2_hash tpm2_hash_map[] = {
{HASH_ALGO_SM3_256, TPM_ALG_SM3_256},
};
+int tpm2_find_hash_alg(unsigned int crypto_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tpm2_hash_map); i++)
+ if (crypto_id == tpm2_hash_map[i].crypto_id)
+ return tpm2_hash_map[i].tpm_id;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(tpm2_find_hash_alg);
+
int tpm2_get_timeouts(struct tpm_chip *chip)
{
- /* Fixed timeouts for TPM2 */
chip->timeout_a = msecs_to_jiffies(TPM2_TIMEOUT_A);
chip->timeout_b = msecs_to_jiffies(TPM2_TIMEOUT_B);
chip->timeout_c = msecs_to_jiffies(TPM2_TIMEOUT_C);
chip->timeout_d = msecs_to_jiffies(TPM2_TIMEOUT_D);
-
- /* PTP spec timeouts */
- chip->duration[TPM_SHORT] = msecs_to_jiffies(TPM2_DURATION_SHORT);
- chip->duration[TPM_MEDIUM] = msecs_to_jiffies(TPM2_DURATION_MEDIUM);
- chip->duration[TPM_LONG] = msecs_to_jiffies(TPM2_DURATION_LONG);
-
- /* Key creation commands long timeouts */
- chip->duration[TPM_LONG_LONG] =
- msecs_to_jiffies(TPM2_DURATION_LONG_LONG);
-
chip->flags |= TPM_CHIP_FLAG_HAVE_TIMEOUTS;
-
return 0;
}
-/**
- * tpm2_ordinal_duration_index() - returns an index to the chip duration table
- * @ordinal: TPM command ordinal.
- *
- * The function returns an index to the chip duration table
- * (enum tpm_duration), that describes the maximum amount of
- * time the chip could take to return the result for a particular ordinal.
- *
- * The values of the MEDIUM, and LONG durations are taken
- * from the PC Client Profile (PTP) specification (750, 2000 msec)
- *
- * LONG_LONG is for commands that generates keys which empirically takes
- * a longer time on some systems.
- *
- * Return:
- * * TPM_MEDIUM
- * * TPM_LONG
- * * TPM_LONG_LONG
- * * TPM_UNDEFINED
+/*
+ * Contains the maximum durations in milliseconds for TPM2 commands.
*/
-static u8 tpm2_ordinal_duration_index(u32 ordinal)
-{
- switch (ordinal) {
- /* Startup */
- case TPM2_CC_STARTUP: /* 144 */
- return TPM_MEDIUM;
-
- case TPM2_CC_SELF_TEST: /* 143 */
- return TPM_LONG;
-
- case TPM2_CC_GET_RANDOM: /* 17B */
- return TPM_LONG;
-
- case TPM2_CC_SEQUENCE_UPDATE: /* 15C */
- return TPM_MEDIUM;
- case TPM2_CC_SEQUENCE_COMPLETE: /* 13E */
- return TPM_MEDIUM;
- case TPM2_CC_EVENT_SEQUENCE_COMPLETE: /* 185 */
- return TPM_MEDIUM;
- case TPM2_CC_HASH_SEQUENCE_START: /* 186 */
- return TPM_MEDIUM;
-
- case TPM2_CC_VERIFY_SIGNATURE: /* 177 */
- return TPM_LONG_LONG;
-
- case TPM2_CC_PCR_EXTEND: /* 182 */
- return TPM_MEDIUM;
-
- case TPM2_CC_HIERARCHY_CONTROL: /* 121 */
- return TPM_LONG;
- case TPM2_CC_HIERARCHY_CHANGE_AUTH: /* 129 */
- return TPM_LONG;
-
- case TPM2_CC_GET_CAPABILITY: /* 17A */
- return TPM_MEDIUM;
-
- case TPM2_CC_NV_READ: /* 14E */
- return TPM_LONG;
-
- case TPM2_CC_CREATE_PRIMARY: /* 131 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE: /* 153 */
- return TPM_LONG_LONG;
- case TPM2_CC_CREATE_LOADED: /* 191 */
- return TPM_LONG_LONG;
-
- default:
- return TPM_UNDEFINED;
- }
-}
+static const struct {
+ unsigned long ordinal;
+ unsigned long duration;
+} tpm2_ordinal_duration_map[] = {
+ {TPM2_CC_STARTUP, 750},
+ {TPM2_CC_SELF_TEST, 3000},
+ {TPM2_CC_GET_RANDOM, 2000},
+ {TPM2_CC_SEQUENCE_UPDATE, 750},
+ {TPM2_CC_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_EVENT_SEQUENCE_COMPLETE, 750},
+ {TPM2_CC_HASH_SEQUENCE_START, 750},
+ {TPM2_CC_VERIFY_SIGNATURE, 30000},
+ {TPM2_CC_PCR_EXTEND, 750},
+ {TPM2_CC_HIERARCHY_CONTROL, 2000},
+ {TPM2_CC_HIERARCHY_CHANGE_AUTH, 2000},
+ {TPM2_CC_GET_CAPABILITY, 750},
+ {TPM2_CC_NV_READ, 2000},
+ {TPM2_CC_CREATE_PRIMARY, 30000},
+ {TPM2_CC_CREATE, 30000},
+ {TPM2_CC_CREATE_LOADED, 30000},
+};
/**
- * tpm2_calc_ordinal_duration() - calculate the maximum command duration
- * @chip: TPM chip to use.
+ * tpm2_calc_ordinal_duration() - Calculate the maximum command duration
* @ordinal: TPM command ordinal.
*
- * The function returns the maximum amount of time the chip could take
- * to return the result for a particular ordinal in jiffies.
- *
- * Return: A maximal duration time for an ordinal in jiffies.
+ * Returns the maximum amount of time the chip is expected by kernel to
+ * take in jiffies.
*/
-unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal)
+unsigned long tpm2_calc_ordinal_duration(u32 ordinal)
{
- unsigned int index;
+ int i;
- index = tpm2_ordinal_duration_index(ordinal);
+ for (i = 0; i < ARRAY_SIZE(tpm2_ordinal_duration_map); i++)
+ if (ordinal == tpm2_ordinal_duration_map[i].ordinal)
+ return msecs_to_jiffies(tpm2_ordinal_duration_map[i].duration);
- if (index != TPM_UNDEFINED)
- return chip->duration[index];
- else
- return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
+ return msecs_to_jiffies(TPM2_DURATION_DEFAULT);
}
-
struct tpm2_pcr_read_out {
__be32 update_cnt;
__be32 pcr_selects_cnt;
@@ -250,11 +202,15 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
}
if (!disable_pcr_integrity) {
- tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ rc = tpm_buf_append_name(chip, &buf, pcr_idx, NULL);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
tpm_buf_append_hmac_session(chip, &buf, 0, NULL, 0);
} else {
tpm_buf_append_handle(chip, &buf, pcr_idx);
- tpm_buf_append_auth(chip, &buf, 0, NULL, 0);
+ tpm_buf_append_auth(chip, &buf, NULL, 0);
}
tpm_buf_append_u32(&buf, chip->nr_allocated_banks);
@@ -265,8 +221,14 @@ int tpm2_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
chip->allocated_banks[i].digest_size);
}
- if (!disable_pcr_integrity)
- tpm_buf_fill_hmac_session(chip, &buf);
+ if (!disable_pcr_integrity) {
+ rc = tpm_buf_fill_hmac_session(chip, &buf);
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return rc;
+ }
+ }
+
rc = tpm_transmit_cmd(chip, &buf, 0, "attempting extend a PCR value");
if (!disable_pcr_integrity)
rc = tpm_buf_check_hmac_response(chip, &buf, rc);
@@ -320,11 +282,24 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
do {
tpm_buf_reset(&buf, TPM2_ST_SESSIONS, TPM2_CC_GET_RANDOM);
- tpm_buf_append_hmac_session_opt(chip, &buf, TPM2_SA_ENCRYPT
- | TPM2_SA_CONTINUE_SESSION,
- NULL, 0);
+ if (tpm2_chip_auth(chip)) {
+ tpm_buf_append_hmac_session(chip, &buf,
+ TPM2_SA_ENCRYPT |
+ TPM2_SA_CONTINUE_SESSION,
+ NULL, 0);
+ } else {
+ offset = buf.handles * 4 + TPM_HEADER_SIZE;
+ head = (struct tpm_header *)buf.data;
+ if (tpm_buf_length(&buf) == offset)
+ head->tag = cpu_to_be16(TPM2_ST_NO_SESSIONS);
+ }
tpm_buf_append_u16(&buf, num_bytes);
- tpm_buf_fill_hmac_session(chip, &buf);
+ err = tpm_buf_fill_hmac_session(chip, &buf);
+ if (err) {
+ tpm_buf_destroy(&buf);
+ return err;
+ }
+
err = tpm_transmit_cmd(chip, &buf,
offsetof(struct tpm2_get_random_out,
buffer),
@@ -601,11 +576,9 @@ ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
nr_possible_banks = be32_to_cpup(
(__be32 *)&buf.data[TPM_HEADER_SIZE + 5]);
-
- chip->allocated_banks = kcalloc(nr_possible_banks,
- sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks) {
+ if (nr_possible_banks > TPM2_MAX_PCR_BANKS) {
+ pr_err("tpm: out of bank capacity: %u > %u\n",
+ nr_possible_banks, TPM2_MAX_PCR_BANKS);
rc = -ENOMEM;
goto out;
}
diff --git a/drivers/char/tpm/tpm2-sessions.c b/drivers/char/tpm/tpm2-sessions.c
index bdb119453dfb..4149379665c4 100644
--- a/drivers/char/tpm/tpm2-sessions.c
+++ b/drivers/char/tpm/tpm2-sessions.c
@@ -69,8 +69,8 @@
#include <linux/unaligned.h>
#include <crypto/kpp.h>
#include <crypto/ecdh.h>
-#include <crypto/hash.h>
-#include <crypto/hmac.h>
+#include <crypto/sha2.h>
+#include <crypto/utils.h>
/* maximum number of names the TPM must remember for authorization */
#define AUTH_MAX_NAMES 3
@@ -144,59 +144,80 @@ struct tpm2_auth {
/*
* Name Size based on TPM algorithm (assumes no hash bigger than 255)
*/
-static u8 name_size(const u8 *name)
+static int name_size(const u8 *name)
{
- static u8 size_map[] = {
- [TPM_ALG_SHA1] = SHA1_DIGEST_SIZE,
- [TPM_ALG_SHA256] = SHA256_DIGEST_SIZE,
- [TPM_ALG_SHA384] = SHA384_DIGEST_SIZE,
- [TPM_ALG_SHA512] = SHA512_DIGEST_SIZE,
- };
- u16 alg = get_unaligned_be16(name);
- return size_map[alg] + 2;
-}
-
-static int tpm2_parse_read_public(char *name, struct tpm_buf *buf)
-{
- struct tpm_header *head = (struct tpm_header *)buf->data;
- off_t offset = TPM_HEADER_SIZE;
- u32 tot_len = be32_to_cpu(head->length);
- u32 val;
-
- /* we're starting after the header so adjust the length */
- tot_len -= TPM_HEADER_SIZE;
-
- /* skip public */
- val = tpm_buf_read_u16(buf, &offset);
- if (val > tot_len)
- return -EINVAL;
- offset += val;
- /* name */
- val = tpm_buf_read_u16(buf, &offset);
- if (val != name_size(&buf->data[offset]))
+ u16 hash_alg = get_unaligned_be16(name);
+
+ switch (hash_alg) {
+ case TPM_ALG_SHA1:
+ return SHA1_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA256:
+ return SHA256_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA384:
+ return SHA384_DIGEST_SIZE + 2;
+ case TPM_ALG_SHA512:
+ return SHA512_DIGEST_SIZE + 2;
+ default:
+ pr_warn("tpm: unsupported name algorithm: 0x%04x\n", hash_alg);
return -EINVAL;
- memcpy(name, &buf->data[offset], val);
- /* forget the rest */
- return 0;
+ }
}
-static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
+static int tpm2_read_public(struct tpm_chip *chip, u32 handle, void *name)
{
+ u32 mso = tpm2_handle_mso(handle);
+ off_t offset = TPM_HEADER_SIZE;
+ int rc, name_size_alg;
struct tpm_buf buf;
- int rc;
+
+ if (mso != TPM2_MSO_PERSISTENT && mso != TPM2_MSO_VOLATILE &&
+ mso != TPM2_MSO_NVRAM) {
+ memcpy(name, &handle, sizeof(u32));
+ return sizeof(u32);
+ }
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_READ_PUBLIC);
if (rc)
return rc;
tpm_buf_append_u32(&buf, handle);
- rc = tpm_transmit_cmd(chip, &buf, 0, "read public");
- if (rc == TPM2_RC_SUCCESS)
- rc = tpm2_parse_read_public(name, &buf);
- tpm_buf_destroy(&buf);
+ rc = tpm_transmit_cmd(chip, &buf, 0, "TPM2_ReadPublic");
+ if (rc) {
+ tpm_buf_destroy(&buf);
+ return tpm_ret_to_err(rc);
+ }
- return rc;
+ /* Skip TPMT_PUBLIC: */
+ offset += tpm_buf_read_u16(&buf, &offset);
+
+ /*
+ * Ensure space for the length field of TPM2B_NAME and hashAlg field of
+ * TPMT_HA (the extra four bytes).
+ */
+ if (offset + 4 > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ rc = tpm_buf_read_u16(&buf, &offset);
+ name_size_alg = name_size(&buf.data[offset]);
+
+ if (name_size_alg < 0)
+ return name_size_alg;
+
+ if (rc != name_size_alg) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ if (offset + rc > tpm_buf_length(&buf)) {
+ tpm_buf_destroy(&buf);
+ return -EIO;
+ }
+
+ memcpy(name, &buf.data[offset], rc);
+ return name_size_alg;
}
#endif /* CONFIG_TCG_TPM2_HMAC */
@@ -221,52 +242,76 @@ static int tpm2_read_public(struct tpm_chip *chip, u32 handle, char *name)
* As with most tpm_buf operations, success is assumed because failure
* will be caused by an incorrect programming model and indicated by a
* kernel message.
+ *
+ * Ends the authorization session on failure.
*/
-void tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
- u32 handle, u8 *name)
+int tpm_buf_append_name(struct tpm_chip *chip, struct tpm_buf *buf,
+ u32 handle, u8 *name)
{
#ifdef CONFIG_TCG_TPM2_HMAC
enum tpm2_mso_type mso = tpm2_handle_mso(handle);
struct tpm2_auth *auth;
+ u16 name_size_alg;
int slot;
+ int ret;
#endif
if (!tpm2_chip_auth(chip)) {
tpm_buf_append_handle(chip, buf, handle);
- return;
+ return 0;
}
#ifdef CONFIG_TCG_TPM2_HMAC
slot = (tpm_buf_length(buf) - TPM_HEADER_SIZE) / 4;
if (slot >= AUTH_MAX_NAMES) {
- dev_err(&chip->dev, "TPM: too many handles\n");
- return;
+ dev_err(&chip->dev, "too many handles\n");
+ ret = -EIO;
+ goto err;
}
auth = chip->auth;
- WARN(auth->session != tpm_buf_length(buf),
- "name added in wrong place\n");
+ if (auth->session != tpm_buf_length(buf)) {
+ dev_err(&chip->dev, "session state malformed");
+ ret = -EIO;
+ goto err;
+ }
tpm_buf_append_u32(buf, handle);
auth->session += 4;
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- if (!name)
- tpm2_read_public(chip, handle, auth->name[slot]);
+ if (!name) {
+ ret = tpm2_read_public(chip, handle, auth->name[slot]);
+ if (ret < 0)
+ goto err;
+
+ name_size_alg = ret;
+ }
} else {
- if (name)
- dev_err(&chip->dev, "TPM: Handle does not require name but one is specified\n");
+ if (name) {
+ dev_err(&chip->dev, "handle 0x%08x does not use a name\n",
+ handle);
+ ret = -EIO;
+ goto err;
+ }
}
auth->name_h[slot] = handle;
if (name)
- memcpy(auth->name[slot], name, name_size(name));
+ memcpy(auth->name[slot], name, name_size_alg);
+#endif
+ return 0;
+
+#ifdef CONFIG_TCG_TPM2_HMAC
+err:
+ tpm2_end_auth_session(chip);
+ return tpm_ret_to_err(ret);
#endif
}
EXPORT_SYMBOL_GPL(tpm_buf_append_name);
void tpm_buf_append_auth(struct tpm_chip *chip, struct tpm_buf *buf,
- u8 attributes, u8 *passphrase, int passphrase_len)
+ u8 *passphrase, int passphrase_len)
{
/* offset tells us where the sessions area begins */
int offset = buf->handles * 4 + TPM_HEADER_SIZE;
@@ -327,8 +372,7 @@ void tpm_buf_append_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf,
#endif
if (!tpm2_chip_auth(chip)) {
- tpm_buf_append_auth(chip, buf, attributes, passphrase,
- passphrase_len);
+ tpm_buf_append_auth(chip, buf, passphrase, passphrase_len);
return;
}
@@ -385,51 +429,6 @@ static int tpm2_create_primary(struct tpm_chip *chip, u32 hierarchy,
u32 *handle, u8 *name);
/*
- * It turns out the crypto hmac(sha256) is hard for us to consume
- * because it assumes a fixed key and the TPM seems to change the key
- * on every operation, so we weld the hmac init and final functions in
- * here to give it the same usage characteristics as a regular hash
- */
-static void tpm2_hmac_init(struct sha256_ctx *sctx, u8 *key, u32 key_len)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- sha256_init(sctx);
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_IPAD_VALUE;
- }
- sha256_update(sctx, pad, sizeof(pad));
-}
-
-static void tpm2_hmac_final(struct sha256_ctx *sctx, u8 *key, u32 key_len,
- u8 *out)
-{
- u8 pad[SHA256_BLOCK_SIZE];
- int i;
-
- for (i = 0; i < sizeof(pad); i++) {
- if (i < key_len)
- pad[i] = key[i];
- else
- pad[i] = 0;
- pad[i] ^= HMAC_OPAD_VALUE;
- }
-
- /* collect the final hash; use out as temporary storage */
- sha256_final(sctx, out);
-
- sha256_init(sctx);
- sha256_update(sctx, pad, sizeof(pad));
- sha256_update(sctx, out, SHA256_DIGEST_SIZE);
- sha256_final(sctx, out);
-}
-
-/*
* assume hash sha256 and nonces u, v of size SHA256_DIGEST_SIZE but
* otherwise standard tpm2_KDFa. Note output is in bytes not bits.
*/
@@ -440,16 +439,16 @@ static void tpm2_KDFa(u8 *key, u32 key_len, const char *label, u8 *u,
const __be32 bits = cpu_to_be32(bytes * 8);
while (bytes > 0) {
- struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
__be32 c = cpu_to_be32(counter);
- tpm2_hmac_init(&sctx, key, key_len);
- sha256_update(&sctx, (u8 *)&c, sizeof(c));
- sha256_update(&sctx, label, strlen(label)+1);
- sha256_update(&sctx, u, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, v, SHA256_DIGEST_SIZE);
- sha256_update(&sctx, (u8 *)&bits, sizeof(bits));
- tpm2_hmac_final(&sctx, key, key_len, out);
+ hmac_sha256_init_usingrawkey(&hctx, key, key_len);
+ hmac_sha256_update(&hctx, (u8 *)&c, sizeof(c));
+ hmac_sha256_update(&hctx, label, strlen(label) + 1);
+ hmac_sha256_update(&hctx, u, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, v, SHA256_DIGEST_SIZE);
+ hmac_sha256_update(&hctx, (u8 *)&bits, sizeof(bits));
+ hmac_sha256_final(&hctx, out);
bytes -= SHA256_DIGEST_SIZE;
counter++;
@@ -578,11 +577,9 @@ static void tpm_buf_append_salt(struct tpm_buf *buf, struct tpm_chip *chip,
* encryption key and encrypts the first parameter of the command
* buffer with it.
*
- * As with most tpm_buf operations, success is assumed because failure
- * will be caused by an incorrect programming model and indicated by a
- * kernel message.
+ * Ends the authorization session on failure.
*/
-void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
+int tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
{
u32 cc, handles, val;
struct tpm2_auth *auth = chip->auth;
@@ -593,9 +590,13 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u32 attrs;
u8 cphash[SHA256_DIGEST_SIZE];
struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
+ int ret;
- if (!auth)
- return;
+ if (!auth) {
+ ret = -EIO;
+ goto err;
+ }
/* save the command code in BE format */
auth->ordinal = head->ordinal;
@@ -604,9 +605,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
i = tpm2_find_cc(chip, cc);
if (i < 0) {
- dev_err(&chip->dev, "Command 0x%x not found in TPM\n", cc);
- return;
+ dev_err(&chip->dev, "command 0x%08x not found\n", cc);
+ ret = -EIO;
+ goto err;
}
+
attrs = chip->cc_attrs_tbl[i];
handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0);
@@ -620,9 +623,9 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
u32 handle = tpm_buf_read_u32(buf, &offset_s);
if (auth->name_h[i] != handle) {
- dev_err(&chip->dev, "TPM: handle %d wrong for name\n",
- i);
- return;
+ dev_err(&chip->dev, "invalid handle 0x%08x\n", handle);
+ ret = -EIO;
+ goto err;
}
}
/* point offset_s to the start of the sessions */
@@ -653,12 +656,14 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
offset_s += len;
}
if (offset_s != offset_p) {
- dev_err(&chip->dev, "TPM session length is incorrect\n");
- return;
+ dev_err(&chip->dev, "session length is incorrect\n");
+ ret = -EIO;
+ goto err;
}
if (!hmac) {
- dev_err(&chip->dev, "TPM could not find HMAC session\n");
- return;
+ dev_err(&chip->dev, "could not find HMAC session\n");
+ ret = -EIO;
+ goto err;
}
/* encrypt before HMAC */
@@ -690,8 +695,11 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
if (mso == TPM2_MSO_PERSISTENT ||
mso == TPM2_MSO_VOLATILE ||
mso == TPM2_MSO_NVRAM) {
- sha256_update(&sctx, auth->name[i],
- name_size(auth->name[i]));
+ ret = name_size(auth->name[i]);
+ if (ret < 0)
+ goto err;
+
+ sha256_update(&sctx, auth->name[i], ret);
} else {
__be32 h = cpu_to_be32(auth->name_h[i]);
@@ -704,14 +712,19 @@ void tpm_buf_fill_hmac_session(struct tpm_chip *chip, struct tpm_buf *buf)
sha256_final(&sctx, cphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, cphash, sizeof(cphash));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, hmac);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, cphash, sizeof(cphash));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
+ hmac_sha256_final(&hctx, hmac);
+ return 0;
+
+err:
+ tpm2_end_auth_session(chip);
+ return ret;
}
EXPORT_SYMBOL(tpm_buf_fill_hmac_session);
@@ -751,6 +764,7 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
u8 rphash[SHA256_DIGEST_SIZE];
u32 attrs, cc;
struct sha256_ctx sctx;
+ struct hmac_sha256_ctx hctx;
u16 tag = be16_to_cpu(head->tag);
int parm_len, len, i, handles;
@@ -820,21 +834,20 @@ int tpm_buf_check_hmac_response(struct tpm_chip *chip, struct tpm_buf *buf,
sha256_final(&sctx, rphash);
/* now calculate the hmac */
- tpm2_hmac_init(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len);
- sha256_update(&sctx, rphash, sizeof(rphash));
- sha256_update(&sctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
- sha256_update(&sctx, auth->our_nonce, sizeof(auth->our_nonce));
- sha256_update(&sctx, &auth->attrs, 1);
+ hmac_sha256_init_usingrawkey(&hctx, auth->session_key,
+ sizeof(auth->session_key) +
+ auth->passphrase_len);
+ hmac_sha256_update(&hctx, rphash, sizeof(rphash));
+ hmac_sha256_update(&hctx, auth->tpm_nonce, sizeof(auth->tpm_nonce));
+ hmac_sha256_update(&hctx, auth->our_nonce, sizeof(auth->our_nonce));
+ hmac_sha256_update(&hctx, &auth->attrs, 1);
/* we're done with the rphash, so put our idea of the hmac there */
- tpm2_hmac_final(&sctx, auth->session_key, sizeof(auth->session_key)
- + auth->passphrase_len, rphash);
- if (memcmp(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE) == 0) {
- rc = 0;
- } else {
+ hmac_sha256_final(&hctx, rphash);
+ if (crypto_memneq(rphash, &buf->data[offset_s], SHA256_DIGEST_SIZE)) {
dev_err(&chip->dev, "TPM: HMAC check failed\n");
goto out;
}
+ rc = 0;
/* now do response decryption */
if (auth->attrs & TPM2_SA_ENCRYPT) {
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index ed97344f2324..6c25305c256e 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -133,8 +133,7 @@ static inline bool tpm_crb_has_idle(u32 start_method)
{
return !(start_method == ACPI_TPM2_START_METHOD ||
start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD ||
- start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC ||
- start_method == ACPI_TPM2_CRB_WITH_ARM_FFA);
+ start_method == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC);
}
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
@@ -180,6 +179,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
*
* @dev: crb device
* @priv: crb private data
+ * @loc: locality
*
* Write CRB_CTRL_REQ_GO_IDLE to TPM_CRB_CTRL_REQ
* The device should respond within TIMEOUT_C by clearing the bit.
@@ -191,7 +191,7 @@ static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete
*
* Return: 0 always
*/
-static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
+static int __crb_go_idle(struct device *dev, struct crb_priv *priv, int loc)
{
int rc;
@@ -200,6 +200,12 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
+ if (rc)
+ return rc;
+ }
+
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
@@ -220,7 +226,7 @@ static int crb_go_idle(struct tpm_chip *chip)
struct device *dev = &chip->dev;
struct crb_priv *priv = dev_get_drvdata(dev);
- return __crb_go_idle(dev, priv);
+ return __crb_go_idle(dev, priv, chip->locality);
}
/**
@@ -228,6 +234,7 @@ static int crb_go_idle(struct tpm_chip *chip)
*
* @dev: crb device
* @priv: crb private data
+ * @loc: locality
*
* Write CRB_CTRL_REQ_CMD_READY to TPM_CRB_CTRL_REQ
* and poll till the device acknowledge it by clearing the bit.
@@ -238,7 +245,7 @@ static int crb_go_idle(struct tpm_chip *chip)
*
* Return: 0 on success -ETIME on timeout;
*/
-static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
+static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv, int loc)
{
int rc;
@@ -247,6 +254,12 @@ static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+ if (priv->sm == ACPI_TPM2_CRB_WITH_ARM_FFA) {
+ rc = tpm_crb_ffa_start(CRB_FFA_START_TYPE_COMMAND, loc);
+ if (rc)
+ return rc;
+ }
+
rc = crb_try_pluton_doorbell(priv, true);
if (rc)
return rc;
@@ -267,7 +280,7 @@ static int crb_cmd_ready(struct tpm_chip *chip)
struct device *dev = &chip->dev;
struct crb_priv *priv = dev_get_drvdata(dev);
- return __crb_cmd_ready(dev, priv);
+ return __crb_cmd_ready(dev, priv, chip->locality);
}
static int __crb_request_locality(struct device *dev,
@@ -401,7 +414,7 @@ static int crb_do_acpi_start(struct tpm_chip *chip)
#ifdef CONFIG_ARM64
/*
* This is a TPM Command Response Buffer start method that invokes a
- * Secure Monitor Call to requrest the firmware to execute or cancel
+ * Secure Monitor Call to request the firmware to execute or cancel
* a TPM 2.0 command.
*/
static int tpm_crb_smc_start(struct device *dev, unsigned long func_id)
@@ -444,7 +457,7 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t len)
/* Seems to be necessary for every command */
if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
- __crb_cmd_ready(&chip->dev, priv);
+ __crb_cmd_ready(&chip->dev, priv, chip->locality);
memcpy_toio(priv->cmd, buf, len);
@@ -672,7 +685,7 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
* PTT HW bug w/a: wake up the device to access
* possibly not retained registers.
*/
- ret = __crb_cmd_ready(dev, priv);
+ ret = __crb_cmd_ready(dev, priv, 0);
if (ret)
goto out_relinquish_locality;
@@ -744,7 +757,7 @@ out:
if (!ret)
priv->cmd_size = cmd_size;
- __crb_go_idle(dev, priv);
+ __crb_go_idle(dev, priv, 0);
out_relinquish_locality:
diff --git a/drivers/char/tpm/tpm_loongson.c b/drivers/char/tpm/tpm_loongson.c
new file mode 100644
index 000000000000..9e50250763d1
--- /dev/null
+++ b/drivers/char/tpm/tpm_loongson.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/device.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+
+#include "tpm.h"
+
+struct tpm_loongson_cmd {
+ u32 cmd_id;
+ u32 data_off;
+ u32 data_len;
+ u32 pad[5];
+};
+
+static int tpm_loongson_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd_ret = tpm_engine->command_ret;
+
+ if (cmd_ret->data_len > count)
+ return -EIO;
+
+ memcpy(buf, tpm_engine->data_buffer, cmd_ret->data_len);
+
+ return cmd_ret->data_len;
+}
+
+static int tpm_loongson_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count)
+{
+ struct loongson_se_engine *tpm_engine = dev_get_drvdata(&chip->dev);
+ struct tpm_loongson_cmd *cmd = tpm_engine->command;
+
+ if (count > tpm_engine->buffer_size)
+ return -E2BIG;
+
+ cmd->data_len = count;
+ memcpy(tpm_engine->data_buffer, buf, count);
+
+ return loongson_se_send_engine_cmd(tpm_engine);
+}
+
+static const struct tpm_class_ops tpm_loongson_ops = {
+ .flags = TPM_OPS_AUTO_STARTUP,
+ .recv = tpm_loongson_recv,
+ .send = tpm_loongson_send,
+};
+
+static int tpm_loongson_probe(struct platform_device *pdev)
+{
+ struct loongson_se_engine *tpm_engine;
+ struct device *dev = &pdev->dev;
+ struct tpm_loongson_cmd *cmd;
+ struct tpm_chip *chip;
+
+ tpm_engine = loongson_se_init_engine(dev->parent, SE_ENGINE_TPM);
+ if (!tpm_engine)
+ return -ENODEV;
+ cmd = tpm_engine->command;
+ cmd->cmd_id = SE_CMD_TPM;
+ cmd->data_off = tpm_engine->buffer_off;
+
+ chip = tpmm_chip_alloc(dev, &tpm_loongson_ops);
+ if (IS_ERR(chip))
+ return PTR_ERR(chip);
+ chip->flags = TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_IRQ;
+ dev_set_drvdata(&chip->dev, tpm_engine);
+
+ return tpm_chip_register(chip);
+}
+
+static struct platform_driver tpm_loongson = {
+ .probe = tpm_loongson_probe,
+ .driver = {
+ .name = "tpm_loongson",
+ },
+};
+module_platform_driver(tpm_loongson);
+
+MODULE_ALIAS("platform:tpm_loongson");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Loongson TPM driver");
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index d53fce1c9d6f..c9793a3d986d 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -33,6 +33,20 @@ static const guid_t tpm_ppi_guid =
GUID_INIT(0x3DDDFAA6, 0x361B, 0x4EB4,
0xA4, 0x24, 0x8D, 0x10, 0x08, 0x9D, 0x16, 0x53);
+static const char * const tpm_ppi_info[] = {
+ "Not implemented",
+ "BIOS only",
+ "Blocked for OS by system firmware",
+ "User required",
+ "User not required",
+};
+
+/* A spinlock to protect access to the cache from concurrent reads */
+static DEFINE_MUTEX(tpm_ppi_lock);
+
+static u32 ppi_operations_cache[PPI_VS_REQ_END + 1];
+static bool ppi_cache_populated;
+
static bool tpm_ppi_req_has_parameter(u64 req)
{
return req == 23;
@@ -277,8 +291,7 @@ cleanup:
return status;
}
-static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
- u32 end)
+static ssize_t cache_ppi_operations(acpi_handle dev_handle, char *buf)
{
int i;
u32 ret;
@@ -286,34 +299,22 @@ static ssize_t show_ppi_operations(acpi_handle dev_handle, char *buf, u32 start,
union acpi_object *obj, tmp;
union acpi_object argv = ACPI_INIT_DSM_ARGV4(1, &tmp);
- static char *info[] = {
- "Not implemented",
- "BIOS only",
- "Blocked for OS by BIOS",
- "User required",
- "User not required",
- };
-
if (!acpi_check_dsm(dev_handle, &tpm_ppi_guid, TPM_PPI_REVISION_ID_1,
1 << TPM_PPI_FN_GETOPR))
return -EPERM;
tmp.integer.type = ACPI_TYPE_INTEGER;
- for (i = start; i <= end; i++) {
+ for (i = 0; i <= PPI_VS_REQ_END; i++) {
tmp.integer.value = i;
obj = tpm_eval_dsm(dev_handle, TPM_PPI_FN_GETOPR,
ACPI_TYPE_INTEGER, &argv,
TPM_PPI_REVISION_ID_1);
- if (!obj) {
+ if (!obj)
return -ENOMEM;
- } else {
- ret = obj->integer.value;
- ACPI_FREE(obj);
- }
- if (ret > 0 && ret < ARRAY_SIZE(info))
- len += sysfs_emit_at(buf, len, "%d %d: %s\n",
- i, ret, info[ret]);
+ ret = obj->integer.value;
+ ppi_operations_cache[i] = ret;
+ ACPI_FREE(obj);
}
return len;
@@ -324,9 +325,30 @@ static ssize_t tpm_show_ppi_tcg_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
+
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
- return show_ppi_operations(chip->acpi_dev_handle, buf, 0,
- PPI_TPM_REQ_MAX);
+ ppi_cache_populated = true;
+ }
+
+ for (i = 0; i <= PPI_TPM_REQ_MAX; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
@@ -334,9 +356,30 @@ static ssize_t tpm_show_ppi_vs_operations(struct device *dev,
char *buf)
{
struct tpm_chip *chip = to_tpm_chip(dev);
+ ssize_t len = 0;
+ u32 ret;
+ int i;
- return show_ppi_operations(chip->acpi_dev_handle, buf, PPI_VS_REQ_START,
- PPI_VS_REQ_END);
+ mutex_lock(&tpm_ppi_lock);
+ if (!ppi_cache_populated) {
+ len = cache_ppi_operations(chip->acpi_dev_handle, buf);
+ if (len < 0) {
+ mutex_unlock(&tpm_ppi_lock);
+ return len;
+ }
+
+ ppi_cache_populated = true;
+ }
+
+ for (i = PPI_VS_REQ_START; i <= PPI_VS_REQ_END; i++) {
+ ret = ppi_operations_cache[i];
+ if (ret >= 0 && ret < ARRAY_SIZE(tpm_ppi_info))
+ len += sysfs_emit_at(buf, len, "%d %d: %s\n",
+ i, ret, tpm_ppi_info[ret]);
+ }
+ mutex_unlock(&tpm_ppi_lock);
+
+ return len;
}
static DEVICE_ATTR(version, S_IRUGO, tpm_show_ppi_version, NULL);
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 4b12c4b9da8b..e2a1769081b1 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -265,8 +265,7 @@ static u8 tpm_tis_status(struct tpm_chip *chip)
/*
* Dump stack for forensics, as invalid TPM_STS.x could be
- * potentially triggered by impaired tpm_try_get_ops() or
- * tpm_find_get_ops().
+ * potentially triggered by impaired tpm_try_get_ops().
*/
dump_stack();
}
@@ -978,8 +977,8 @@ restore_irqs:
* will call disable_irq which undoes all of the above.
*/
if (!(chip->flags & TPM_CHIP_FLAG_IRQ)) {
- tpm_tis_write8(priv, original_int_vec,
- TPM_INT_VECTOR(priv->locality));
+ tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality),
+ original_int_vec);
rc = -1;
}
diff --git a/drivers/char/xillybus/xillybus_core.c b/drivers/char/xillybus/xillybus_core.c
index efb1ae834265..fc4e69b5cb6a 100644
--- a/drivers/char/xillybus/xillybus_core.c
+++ b/drivers/char/xillybus/xillybus_core.c
@@ -1973,7 +1973,7 @@ EXPORT_SYMBOL(xillybus_endpoint_remove);
static int __init xillybus_init(void)
{
- xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+ xillybus_wq = alloc_workqueue(xillyname, WQ_UNBOUND, 0);
if (!xillybus_wq)
return -ENOMEM;
diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c
index 45771b1a3716..386531474213 100644
--- a/drivers/char/xillybus/xillyusb.c
+++ b/drivers/char/xillybus/xillyusb.c
@@ -2163,7 +2163,7 @@ static int xillyusb_probe(struct usb_interface *interface,
spin_lock_init(&xdev->error_lock);
xdev->in_counter = 0;
xdev->in_bytes_left = 0;
- xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI, 0);
+ xdev->workq = alloc_workqueue(xillyname, WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!xdev->workq) {
dev_err(&interface->dev, "Failed to allocate work queue\n");
@@ -2275,7 +2275,7 @@ static int __init xillyusb_init(void)
{
int rc = 0;
- wakeup_wq = alloc_workqueue(xillyname, 0, 0);
+ wakeup_wq = alloc_workqueue(xillyname, WQ_UNBOUND, 0);
if (!wakeup_wq)
return -ENOMEM;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4d56475f94fc..3a1611008e48 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -364,6 +364,7 @@ config COMMON_CLK_LOCHNAGAR
config COMMON_CLK_NPCM8XX
tristate "Clock driver for the NPCM8XX SoC Family"
depends on ARCH_NPCM || COMPILE_TEST
+ select AUXILIARY_BUS
help
This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family,
all the clocks are initialized by the bootloader, so this driver
@@ -501,6 +502,15 @@ config COMMON_CLK_SP7021
Not all features of the PLL are currently supported
by the driver.
+config COMMON_CLK_RPMI
+ tristate "Clock driver based on RISC-V RPMI"
+ depends on RISCV || COMPILE_TEST
+ depends on MAILBOX
+ default RISCV
+ help
+ Support for clocks based on the clock service group defined by
+ the RISC-V platform management interface (RPMI) specification.
+
source "drivers/clk/actions/Kconfig"
source "drivers/clk/analogbits/Kconfig"
source "drivers/clk/baikal-t1/Kconfig"
@@ -511,6 +521,7 @@ source "drivers/clk/imx/Kconfig"
source "drivers/clk/ingenic/Kconfig"
source "drivers/clk/keystone/Kconfig"
source "drivers/clk/mediatek/Kconfig"
+source "drivers/clk/mmp/Kconfig"
source "drivers/clk/meson/Kconfig"
source "drivers/clk/mstar/Kconfig"
source "drivers/clk/microchip/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 18ed29cfdc11..61ec08404442 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o
obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o
obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o
obj-$(CONFIG_COMMON_CLK_RP1) += clk-rp1.o
+obj-$(CONFIG_COMMON_CLK_RPMI) += clk-rpmi.o
obj-$(CONFIG_COMMON_CLK_HI655X) += clk-hi655x.o
obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o
obj-$(CONFIG_COMMON_CLK_SCMI) += clk-scmi.o
@@ -124,8 +125,7 @@ obj-$(CONFIG_ARCH_HISI) += hisilicon/
obj-y += imgtec/
obj-y += imx/
obj-y += ingenic/
-obj-$(CONFIG_ARCH_K3) += keystone/
-obj-$(CONFIG_ARCH_KEYSTONE) += keystone/
+obj-y += keystone/
obj-y += mediatek/
obj-$(CONFIG_ARCH_MESON) += meson/
obj-y += microchip/
diff --git a/drivers/clk/actions/owl-common.c b/drivers/clk/actions/owl-common.c
index c62024b7c737..b3dded204dc5 100644
--- a/drivers/clk/actions/owl-common.c
+++ b/drivers/clk/actions/owl-common.c
@@ -18,7 +18,6 @@ static const struct regmap_config owl_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x00cc,
- .fast_io = true,
};
static void owl_clk_set_regmap(const struct owl_clk_desc *desc,
diff --git a/drivers/clk/actions/owl-common.h b/drivers/clk/actions/owl-common.h
index 8fb65f3e82d7..5768a2e0f6a0 100644
--- a/drivers/clk/actions/owl-common.h
+++ b/drivers/clk/actions/owl-common.h
@@ -32,7 +32,7 @@ struct owl_clk_desc {
};
static inline struct owl_clk_common *
- hw_to_owl_clk_common(const struct clk_hw *hw)
+ hw_to_owl_clk_common(struct clk_hw *hw)
{
return container_of(hw, struct owl_clk_common, hw);
}
diff --git a/drivers/clk/actions/owl-composite.c b/drivers/clk/actions/owl-composite.c
index 48f177f6ce9c..00b74f8bc437 100644
--- a/drivers/clk/actions/owl-composite.c
+++ b/drivers/clk/actions/owl-composite.c
@@ -122,13 +122,13 @@ static int owl_comp_fact_set_rate(struct clk_hw *hw, unsigned long rate,
rate, parent_rate);
}
-static long owl_comp_fix_fact_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_comp_fix_fact_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_composite *comp = hw_to_owl_comp(hw);
struct clk_fixed_factor *fix_fact_hw = &comp->rate.fix_fact_hw;
- return comp->fix_fact_ops->round_rate(&fix_fact_hw->hw, rate, parent_rate);
+ return comp->fix_fact_ops->determine_rate(&fix_fact_hw->hw, req);
}
static unsigned long owl_comp_fix_fact_recalc_rate(struct clk_hw *hw,
@@ -193,7 +193,7 @@ const struct clk_ops owl_comp_fix_fact_ops = {
.is_enabled = owl_comp_is_enabled,
/* fix_fact_ops */
- .round_rate = owl_comp_fix_fact_round_rate,
+ .determine_rate = owl_comp_fix_fact_determine_rate,
.recalc_rate = owl_comp_fix_fact_recalc_rate,
.set_rate = owl_comp_fix_fact_set_rate,
};
diff --git a/drivers/clk/actions/owl-composite.h b/drivers/clk/actions/owl-composite.h
index bca38bf8f218..6d7c6f0c47c8 100644
--- a/drivers/clk/actions/owl-composite.h
+++ b/drivers/clk/actions/owl-composite.h
@@ -108,7 +108,7 @@ struct owl_composite {
}, \
}
-static inline struct owl_composite *hw_to_owl_comp(const struct clk_hw *hw)
+static inline struct owl_composite *hw_to_owl_comp(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-divider.c b/drivers/clk/actions/owl-divider.c
index cddac00fe324..118f1393c678 100644
--- a/drivers/clk/actions/owl-divider.c
+++ b/drivers/clk/actions/owl-divider.c
@@ -23,13 +23,16 @@ long owl_divider_helper_round_rate(struct owl_clk_common *common,
div_hw->div_flags);
}
-static long owl_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_divider *div = hw_to_owl_divider(hw);
- return owl_divider_helper_round_rate(&div->common, &div->div_hw,
- rate, parent_rate);
+ req->rate = owl_divider_helper_round_rate(&div->common, &div->div_hw,
+ req->rate,
+ &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_divider_helper_recalc_rate(struct owl_clk_common *common,
@@ -89,6 +92,6 @@ static int owl_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops owl_divider_ops = {
.recalc_rate = owl_divider_recalc_rate,
- .round_rate = owl_divider_round_rate,
+ .determine_rate = owl_divider_determine_rate,
.set_rate = owl_divider_set_rate,
};
diff --git a/drivers/clk/actions/owl-divider.h b/drivers/clk/actions/owl-divider.h
index 083be6d80954..d76f58782c52 100644
--- a/drivers/clk/actions/owl-divider.h
+++ b/drivers/clk/actions/owl-divider.h
@@ -49,7 +49,7 @@ struct owl_divider {
}, \
}
-static inline struct owl_divider *hw_to_owl_divider(const struct clk_hw *hw)
+static inline struct owl_divider *hw_to_owl_divider(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c
index 64f316cf7cfc..12f41f6bacd6 100644
--- a/drivers/clk/actions/owl-factor.c
+++ b/drivers/clk/actions/owl-factor.c
@@ -130,14 +130,16 @@ long owl_factor_helper_round_rate(struct owl_clk_common *common,
return *parent_rate * mul / div;
}
-static long owl_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_factor *factor = hw_to_owl_factor(hw);
struct owl_factor_hw *factor_hw = &factor->factor_hw;
- return owl_factor_helper_round_rate(&factor->common, factor_hw,
- rate, parent_rate);
+ req->rate = owl_factor_helper_round_rate(&factor->common, factor_hw,
+ req->rate, &req->best_parent_rate);
+
+ return 0;
}
unsigned long owl_factor_helper_recalc_rate(struct owl_clk_common *common,
@@ -214,7 +216,7 @@ static int owl_factor_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops owl_factor_ops = {
- .round_rate = owl_factor_round_rate,
+ .determine_rate = owl_factor_determine_rate,
.recalc_rate = owl_factor_recalc_rate,
.set_rate = owl_factor_set_rate,
};
diff --git a/drivers/clk/actions/owl-factor.h b/drivers/clk/actions/owl-factor.h
index 04b89cbfdccb..24c704d40925 100644
--- a/drivers/clk/actions/owl-factor.h
+++ b/drivers/clk/actions/owl-factor.h
@@ -57,7 +57,7 @@ struct owl_factor {
#define div_mask(d) ((1 << ((d)->width)) - 1)
-static inline struct owl_factor *hw_to_owl_factor(const struct clk_hw *hw)
+static inline struct owl_factor *hw_to_owl_factor(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-gate.h b/drivers/clk/actions/owl-gate.h
index c2f161c93fda..ac458d4385ee 100644
--- a/drivers/clk/actions/owl-gate.h
+++ b/drivers/clk/actions/owl-gate.h
@@ -56,7 +56,7 @@ struct owl_gate {
}, \
} \
-static inline struct owl_gate *hw_to_owl_gate(const struct clk_hw *hw)
+static inline struct owl_gate *hw_to_owl_gate(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-mux.h b/drivers/clk/actions/owl-mux.h
index 53b9ab665294..dc0ecc2d5e10 100644
--- a/drivers/clk/actions/owl-mux.h
+++ b/drivers/clk/actions/owl-mux.h
@@ -44,7 +44,7 @@ struct owl_mux {
}, \
}
-static inline struct owl_mux *hw_to_owl_mux(const struct clk_hw *hw)
+static inline struct owl_mux *hw_to_owl_mux(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/actions/owl-pll.c b/drivers/clk/actions/owl-pll.c
index 155f313986b4..869690b79cc1 100644
--- a/drivers/clk/actions/owl-pll.c
+++ b/drivers/clk/actions/owl-pll.c
@@ -56,8 +56,8 @@ static const struct clk_pll_table *_get_pll_table(
return table;
}
-static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int owl_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct owl_pll *pll = hw_to_owl_pll(hw);
struct owl_pll_hw *pll_hw = &pll->pll_hw;
@@ -65,17 +65,24 @@ static long owl_pll_round_rate(struct clk_hw *hw, unsigned long rate,
u32 mul;
if (pll_hw->table) {
- clkt = _get_pll_table(pll_hw->table, rate);
- return clkt->rate;
+ clkt = _get_pll_table(pll_hw->table, req->rate);
+ req->rate = clkt->rate;
+
+ return 0;
}
/* fixed frequency */
- if (pll_hw->width == 0)
- return pll_hw->bfreq;
+ if (pll_hw->width == 0) {
+ req->rate = pll_hw->bfreq;
- mul = owl_pll_calculate_mul(pll_hw, rate);
+ return 0;
+ }
+
+ mul = owl_pll_calculate_mul(pll_hw, req->rate);
- return pll_hw->bfreq * mul;
+ req->rate = pll_hw->bfreq * mul;
+
+ return 0;
}
static unsigned long owl_pll_recalc_rate(struct clk_hw *hw,
@@ -188,7 +195,7 @@ const struct clk_ops owl_pll_ops = {
.enable = owl_pll_enable,
.disable = owl_pll_disable,
.is_enabled = owl_pll_is_enabled,
- .round_rate = owl_pll_round_rate,
+ .determine_rate = owl_pll_determine_rate,
.recalc_rate = owl_pll_recalc_rate,
.set_rate = owl_pll_set_rate,
};
diff --git a/drivers/clk/actions/owl-pll.h b/drivers/clk/actions/owl-pll.h
index 78e5fc360b03..58e19f1ade43 100644
--- a/drivers/clk/actions/owl-pll.h
+++ b/drivers/clk/actions/owl-pll.h
@@ -98,7 +98,7 @@ struct owl_pll {
#define mul_mask(m) ((1 << ((m)->width)) - 1)
-static inline struct owl_pll *hw_to_owl_pll(const struct clk_hw *hw)
+static inline struct owl_pll *hw_to_owl_pll(struct clk_hw *hw)
{
struct owl_clk_common *common = hw_to_owl_clk_common(hw);
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
index a92da64c12e1..bf9b635ac9d6 100644
--- a/drivers/clk/at91/clk-audio-pll.c
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -270,8 +270,8 @@ static int clk_audio_pll_frac_determine_rate(struct clk_hw *hw,
return 0;
}
-static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pad_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -283,7 +283,7 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
/*
* Rate divisor is actually made of two different divisors, multiplied
@@ -304,12 +304,12 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
continue;
best_parent_rate = clk_hw_round_rate(pclk,
- rate * tmp_qd * div);
+ req->rate * tmp_qd * div);
tmp_rate = best_parent_rate / (div * tmp_qd);
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
}
@@ -318,11 +318,13 @@ static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n",
__func__, best_rate, best_parent_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
-static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_audio_pll_pmc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *pclk = clk_hw_get_parent(hw);
long best_rate = -EINVAL;
@@ -333,20 +335,20 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
int best_diff = -1;
pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
- rate, *parent_rate);
+ req->rate, req->best_parent_rate);
- if (!rate)
+ if (!req->rate)
return 0;
best_parent_rate = clk_round_rate(pclk->clk, 1);
- div = max(best_parent_rate / rate, 1UL);
+ div = max(best_parent_rate / req->rate, 1UL);
for (; div <= AUDIO_PLL_QDPMC_MAX; div++) {
- best_parent_rate = clk_round_rate(pclk->clk, rate * div);
+ best_parent_rate = clk_round_rate(pclk->clk, req->rate * div);
tmp_rate = best_parent_rate / div;
- tmp_diff = abs(rate - tmp_rate);
+ tmp_diff = abs(req->rate - tmp_rate);
if (best_diff < 0 || best_diff > tmp_diff) {
- *parent_rate = best_parent_rate;
+ req->best_parent_rate = best_parent_rate;
best_rate = tmp_rate;
best_diff = tmp_diff;
tmp_qd = div;
@@ -356,9 +358,11 @@ static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
}
pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n",
- __func__, best_rate, *parent_rate, tmp_qd - 1);
+ __func__, best_rate, req->best_parent_rate, tmp_qd - 1);
+
+ req->rate = best_rate;
- return best_rate;
+ return 0;
}
static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +440,7 @@ static const struct clk_ops audio_pll_pad_ops = {
.enable = clk_audio_pll_pad_enable,
.disable = clk_audio_pll_pad_disable,
.recalc_rate = clk_audio_pll_pad_recalc_rate,
- .round_rate = clk_audio_pll_pad_round_rate,
+ .determine_rate = clk_audio_pll_pad_determine_rate,
.set_rate = clk_audio_pll_pad_set_rate,
};
@@ -444,7 +448,7 @@ static const struct clk_ops audio_pll_pmc_ops = {
.enable = clk_audio_pll_pmc_enable,
.disable = clk_audio_pll_pmc_disable,
.recalc_rate = clk_audio_pll_pmc_recalc_rate,
- .round_rate = clk_audio_pll_pmc_round_rate,
+ .determine_rate = clk_audio_pll_pmc_determine_rate,
.set_rate = clk_audio_pll_pmc_set_rate,
};
diff --git a/drivers/clk/at91/clk-h32mx.c b/drivers/clk/at91/clk-h32mx.c
index 1e6c12eeda10..a9aa93b5a870 100644
--- a/drivers/clk/at91/clk-h32mx.c
+++ b/drivers/clk/at91/clk-h32mx.c
@@ -40,21 +40,32 @@ static unsigned long clk_sama5d4_h32mx_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_sama5d4_h32mx_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sama5d4_h32mx_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -77,7 +88,7 @@ static int clk_sama5d4_h32mx_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops h32mx_ops = {
.recalc_rate = clk_sama5d4_h32mx_recalc_rate,
- .round_rate = clk_sama5d4_h32mx_round_rate,
+ .determine_rate = clk_sama5d4_h32mx_determine_rate,
.set_rate = clk_sama5d4_h32mx_set_rate,
};
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index 7a544e429d34..d5ea2069ec83 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -580,6 +580,9 @@ clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
{
struct clk_master *master = to_clk_master(hw);
+ if (master->div == MASTER_PRES_MAX)
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, 3);
+
return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
}
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index c173a44c800a..e7208c47268b 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -3,6 +3,7 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -279,8 +280,11 @@ static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw,
long best_diff = LONG_MIN;
u32 shift;
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = parent_rate;
+
+ return 0;
+ }
/* Fist step: check the available dividers. */
for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
@@ -332,50 +336,57 @@ end:
return 0;
}
-static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int clk_sam9x5_peripheral_no_parent_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int shift = 0;
unsigned long best_rate;
unsigned long best_diff;
- unsigned long cur_rate = *parent_rate;
+ unsigned long cur_rate = req->best_parent_rate;
unsigned long cur_diff;
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
- if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max)
- return *parent_rate;
+ if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (periph->range.max) {
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
+ cur_rate = req->best_parent_rate >> shift;
if (cur_rate <= periph->range.max)
break;
}
}
- if (rate >= cur_rate)
- return cur_rate;
+ if (req->rate >= cur_rate) {
+ req->rate = cur_rate;
+
+ return 0;
+ }
- best_diff = cur_rate - rate;
+ best_diff = cur_rate - req->rate;
best_rate = cur_rate;
for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
- cur_rate = *parent_rate >> shift;
- if (cur_rate < rate)
- cur_diff = rate - cur_rate;
+ cur_rate = req->best_parent_rate >> shift;
+ if (cur_rate < req->rate)
+ cur_diff = req->rate - cur_rate;
else
- cur_diff = cur_rate - rate;
+ cur_diff = cur_rate - req->rate;
if (cur_diff < best_diff) {
best_diff = cur_diff;
best_rate = cur_rate;
}
- if (!best_diff || cur_rate < rate)
+ if (!best_diff || cur_rate < req->rate)
break;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
@@ -427,7 +438,7 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.disable = clk_sam9x5_peripheral_disable,
.is_enabled = clk_sam9x5_peripheral_is_enabled,
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
- .round_rate = clk_sam9x5_peripheral_round_rate,
+ .determine_rate = clk_sam9x5_peripheral_no_parent_determine_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
.save_context = clk_sam9x5_peripheral_save_context,
.restore_context = clk_sam9x5_peripheral_restore_context,
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 249d6a53cedf..5c5f7398effe 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -231,13 +231,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
return bestrate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
- return clk_pll_get_best_div_mul(pll, rate, *parent_rate,
- NULL, NULL, NULL);
+ req->rate = clk_pll_get_best_div_mul(pll, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -302,7 +304,7 @@ static const struct clk_ops pll_ops = {
.unprepare = clk_pll_unprepare,
.is_prepared = clk_pll_is_prepared,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.save_context = clk_pll_save_context,
.restore_context = clk_pll_restore_context,
diff --git a/drivers/clk/at91/clk-plldiv.c b/drivers/clk/at91/clk-plldiv.c
index ba3a1839a96d..3ac09fecc54e 100644
--- a/drivers/clk/at91/clk-plldiv.c
+++ b/drivers/clk/at91/clk-plldiv.c
@@ -33,21 +33,33 @@ static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_plldiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_plldiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- if (rate > *parent_rate)
- return *parent_rate;
- div = *parent_rate / 2;
- if (rate < div)
- return div;
+ if (req->rate > req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
- if (rate - div < *parent_rate - rate)
- return div;
+ return 0;
+ }
+
+ div = req->best_parent_rate / 2;
+ if (req->rate < div) {
+ req->rate = div;
+
+ return 0;
+ }
+
+ if (req->rate - div < req->best_parent_rate - req->rate) {
+ req->rate = div;
- return *parent_rate;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -66,7 +78,7 @@ static int clk_plldiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops plldiv_ops = {
.recalc_rate = clk_plldiv_recalc_rate,
- .round_rate = clk_plldiv_round_rate,
+ .determine_rate = clk_plldiv_determine_rate,
.set_rate = clk_plldiv_set_rate,
};
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index cefd9948e103..3b965057ba0d 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -93,8 +93,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -103,11 +103,8 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
(cmul == frac->mul && cfrac == frac->frac))
goto unlock;
- /* Recommended value for PMC_PLL_ACR */
- if (core->characteristics->upll)
- val = AT91_PMC_PLL_ACR_DEFAULT_UPLL;
- else
- val = AT91_PMC_PLL_ACR_DEFAULT_PLLA;
+ /* Load recommended value for PMC_PLL_ACR */
+ val = core->characteristics->acr;
regmap_write(regmap, AT91_PMC_PLL_ACR, val);
regmap_write(regmap, AT91_PMC_PLL_CTRL1,
@@ -128,17 +125,17 @@ static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
udelay(10);
}
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -164,8 +161,8 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0);
@@ -173,9 +170,9 @@ static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
regmap_update_bits(regmap, AT91_PMC_PLL_ACR,
AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -230,12 +227,16 @@ static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core,
return tmprate;
}
-static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_frac_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false);
+ req->rate = sam9x60_frac_pll_compute_mul_frac(core, req->rate,
+ req->best_parent_rate,
+ false);
+
+ return 0;
}
static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -262,8 +263,8 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
@@ -275,18 +276,18 @@ static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
(frac->mul << core->layout->mul_shift) |
(frac->frac << core->layout->frac_shift));
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
AT91_PMC_PLL_CTRL0_ENLOCK |
AT91_PMC_PLL_CTRL0_ENPLL);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -321,7 +322,7 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
@@ -332,13 +333,16 @@ static const struct clk_ops sam9x60_frac_pll_ops_chg = {
.unprepare = sam9x60_frac_pll_unprepare,
.is_prepared = sam9x60_frac_pll_is_prepared,
.recalc_rate = sam9x60_frac_pll_recalc_rate,
- .round_rate = sam9x60_frac_pll_round_rate,
+ .determine_rate = sam9x60_frac_pll_determine_rate,
.set_rate = sam9x60_frac_pll_set_rate_chg,
.save_context = sam9x60_frac_pll_save_context,
.restore_context = sam9x60_frac_pll_restore_context,
};
-/* This function should be called with spinlock acquired. */
+/* This function should be called with spinlock acquired.
+ * Warning: this function must be called only if the same PLL ID was set in
+ * PLL_UPDT register previously.
+ */
static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
bool enable)
{
@@ -350,9 +354,9 @@ static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
core->layout->div_mask | ena_msk,
(div << core->layout->div_shift) | ena_val);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
while (!sam9x60_pll_ready(regmap, core->id))
cpu_relax();
@@ -366,8 +370,8 @@ static int sam9x60_div_pll_set(struct sam9x60_pll_core *core)
unsigned int val, cdiv;
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -398,15 +402,15 @@ static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
spin_lock_irqsave(core->lock, flags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_ID_MSK, core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_ID_MSK, core->id);
regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
core->layout->endiv_mask, 0);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
spin_unlock_irqrestore(core->lock, flags);
}
@@ -487,12 +491,15 @@ static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core,
return best_rate;
}
-static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sam9x60_div_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
- return sam9x60_div_pll_compute_div(core, parent_rate, rate);
+ req->rate = sam9x60_div_pll_compute_div(core, &req->best_parent_rate,
+ req->rate);
+
+ return 0;
}
static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -518,8 +525,8 @@ static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
spin_lock_irqsave(core->lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core->id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
@@ -574,8 +581,8 @@ static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier,
div->div = div->safe_div;
spin_lock_irqsave(core.lock, irqflags);
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
- core.id);
+ regmap_write_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core.id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
cdiv = (val & core.layout->div_mask) >> core.layout->div_shift;
@@ -601,7 +608,7 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -612,7 +619,7 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.set_rate = sam9x60_div_pll_set_rate_chg,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
@@ -623,7 +630,7 @@ static const struct clk_ops sam9x60_fixed_div_pll_ops = {
.unprepare = sam9x60_div_pll_unprepare,
.is_prepared = sam9x60_div_pll_is_prepared,
.recalc_rate = sam9x60_fixed_div_pll_recalc_rate,
- .round_rate = sam9x60_div_pll_round_rate,
+ .determine_rate = sam9x60_div_pll_determine_rate,
.save_context = sam9x60_div_pll_save_context,
.restore_context = sam9x60_div_pll_restore_context,
};
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index b0696a928aa9..e906928cfbf0 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -319,8 +319,8 @@ static unsigned long at91rm9200_clk_usb_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int at91rm9200_clk_usb_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw);
struct clk_hw *parent = clk_hw_get_parent(hw);
@@ -336,25 +336,27 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
if (!usb->divisors[i])
continue;
- tmp_parent_rate = rate * usb->divisors[i];
+ tmp_parent_rate = req->rate * usb->divisors[i];
tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate);
tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
- if (tmprate < rate)
- tmpdiff = rate - tmprate;
+ if (tmprate < req->rate)
+ tmpdiff = req->rate - tmprate;
else
- tmpdiff = tmprate - rate;
+ tmpdiff = tmprate - req->rate;
if (bestdiff < 0 || bestdiff > tmpdiff) {
bestrate = tmprate;
bestdiff = tmpdiff;
- *parent_rate = tmp_parent_rate;
+ req->best_parent_rate = tmp_parent_rate;
}
if (!bestdiff)
break;
}
- return bestrate;
+ req->rate = bestrate;
+
+ return 0;
}
static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -384,7 +386,7 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops at91rm9200_usb_ops = {
.recalc_rate = at91rm9200_clk_usb_recalc_rate,
- .round_rate = at91rm9200_clk_usb_round_rate,
+ .determine_rate = at91rm9200_clk_usb_determine_rate,
.set_rate = at91rm9200_clk_usb_set_rate,
};
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index acf780a81589..2310f6f73162 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -115,7 +115,7 @@ struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
/* Address in SECURAM that say if we suspend to backup mode. */
static void __iomem *at91_pmc_backup_suspend;
-static int at91_pmc_suspend(void)
+static int at91_pmc_suspend(void *data)
{
unsigned int backup;
@@ -129,7 +129,7 @@ static int at91_pmc_suspend(void)
return clk_save_context();
}
-static void at91_pmc_resume(void)
+static void at91_pmc_resume(void *data)
{
unsigned int backup;
@@ -143,11 +143,15 @@ static void at91_pmc_resume(void)
clk_restore_context();
}
-static struct syscore_ops pmc_syscore_ops = {
+static const struct syscore_ops pmc_syscore_ops = {
.suspend = at91_pmc_suspend,
.resume = at91_pmc_resume,
};
+static struct syscore pmc_syscore = {
+ .ops = &pmc_syscore_ops,
+};
+
static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
{ .compatible = "microchip,sama7g5-pmc", },
@@ -185,7 +189,7 @@ static int __init pmc_register_ops(void)
return -ENOMEM;
}
- register_syscore_ops(&pmc_syscore_ops);
+ register_syscore(&pmc_syscore);
return 0;
}
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 4fb29ca111f7..543d7aee8d24 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -80,6 +80,7 @@ struct clk_pll_characteristics {
u16 *icpll;
u8 *out;
u8 upll : 1;
+ u32 acr;
};
struct clk_programmable_layout {
@@ -116,9 +117,6 @@ struct at91_clk_pms {
unsigned int parent;
};
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
#define ndck(a, s) (a[s - 1].id + 1)
#define nck(a) (a[ARRAY_SIZE(a) - 1].id + 1)
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index db6db9e2073e..18baf4a256f4 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -36,6 +36,7 @@ static const struct clk_pll_characteristics plla_characteristics = {
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00020010),
};
static const struct clk_range upll_outputs[] = {
@@ -48,6 +49,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.output = upll_outputs,
.core_output = core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN = [18 MHz, 32 MHz]*/
};
static const struct clk_pll_layout pll_frac_layout = {
diff --git a/drivers/clk/at91/sam9x7.c b/drivers/clk/at91/sam9x7.c
index ffab32b047a0..89868a0aeaba 100644
--- a/drivers/clk/at91/sam9x7.c
+++ b/drivers/clk/at91/sam9x7.c
@@ -107,6 +107,7 @@ static const struct clk_pll_characteristics plla_characteristics = {
.num_output = ARRAY_SIZE(plla_outputs),
.output = plla_outputs,
.core_output = plla_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
};
static const struct clk_pll_characteristics upll_characteristics = {
@@ -115,6 +116,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.output = upll_outputs,
.core_output = upll_core_outputs,
.upll = true,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics lvdspll_characteristics = {
@@ -122,6 +124,7 @@ static const struct clk_pll_characteristics lvdspll_characteristics = {
.num_output = ARRAY_SIZE(lvdspll_outputs),
.output = lvdspll_outputs,
.core_output = lvdspll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics audiopll_characteristics = {
@@ -129,6 +132,7 @@ static const struct clk_pll_characteristics audiopll_characteristics = {
.num_output = ARRAY_SIZE(audiopll_outputs),
.output = audiopll_outputs,
.core_output = audiopll_core_outputs,
+ .acr = UL(0x12023010), /* fIN=[20 MHz, 32 MHz] */
};
static const struct clk_pll_characteristics plladiv2_characteristics = {
@@ -136,6 +140,7 @@ static const struct clk_pll_characteristics plladiv2_characteristics = {
.num_output = ARRAY_SIZE(plladiv2_outputs),
.output = plladiv2_outputs,
.core_output = plladiv2_core_outputs,
+ .acr = UL(0x00020010), /* Old ACR_DEFAULT_PLLA value */
};
/* Layout for fractional PLL ID PLLA. */
@@ -403,6 +408,7 @@ static const struct {
{ .n = "pioD_clk", .id = 44, },
{ .n = "tcb1_clk", .id = 45, },
{ .n = "dbgu_clk", .id = 47, },
+ { .n = "pmecc_clk", .id = 48, },
/*
* mpddr_clk feeds DDR controller and is enabled by bootloader thus we
* need to keep it enabled in case there is no Linux consumer for it.
diff --git a/drivers/clk/at91/sama7d65.c b/drivers/clk/at91/sama7d65.c
index a5d40df8b2f2..7dee2b160ffb 100644
--- a/drivers/clk/at91/sama7d65.c
+++ b/drivers/clk/at91/sama7d65.c
@@ -138,6 +138,7 @@ static const struct clk_pll_characteristics cpu_pll_characteristics = {
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/* PLL characteristics. */
@@ -146,6 +147,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
static const struct clk_pll_characteristics lvdspll_characteristics = {
@@ -153,6 +155,7 @@ static const struct clk_pll_characteristics lvdspll_characteristics = {
.num_output = ARRAY_SIZE(lvdspll_outputs),
.output = lvdspll_outputs,
.core_output = lvdspll_core_outputs,
+ .acr = UL(0x00070010),
};
static const struct clk_pll_characteristics upll_characteristics = {
@@ -160,6 +163,7 @@ static const struct clk_pll_characteristics upll_characteristics = {
.num_output = ARRAY_SIZE(upll_outputs),
.output = upll_outputs,
.core_output = upll_core_outputs,
+ .acr = UL(0x12020010),
.upll = true,
};
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 8385badc1c70..1340c2b00619 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -113,6 +113,7 @@ static const struct clk_pll_characteristics cpu_pll_characteristics = {
.num_output = ARRAY_SIZE(cpu_pll_outputs),
.output = cpu_pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/* PLL characteristics. */
@@ -121,6 +122,7 @@ static const struct clk_pll_characteristics pll_characteristics = {
.num_output = ARRAY_SIZE(pll_outputs),
.output = pll_outputs,
.core_output = core_outputs,
+ .acr = UL(0x00070010),
};
/*
diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c
index 9667ce898428..6f3e1151b354 100644
--- a/drivers/clk/axs10x/i2s_pll_clock.c
+++ b/drivers/clk/axs10x/i2s_pll_clock.c
@@ -108,21 +108,21 @@ static unsigned long i2s_pll_recalc_rate(struct clk_hw *hw,
return ((parent_rate / idiv) * fbdiv) / odiv;
}
-static long i2s_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int i2s_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct i2s_pll_clk *clk = to_i2s_pll_clk(hw);
- const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(*prate);
+ const struct i2s_pll_cfg *pll_cfg = i2s_pll_get_cfg(req->best_parent_rate);
int i;
if (!pll_cfg) {
- dev_err(clk->dev, "invalid parent rate=%ld\n", *prate);
+ dev_err(clk->dev, "invalid parent rate=%ld\n", req->best_parent_rate);
return -EINVAL;
}
for (i = 0; pll_cfg[i].rate != 0; i++)
- if (pll_cfg[i].rate == rate)
- return rate;
+ if (pll_cfg[i].rate == req->rate)
+ return 0;
return -EINVAL;
}
@@ -156,7 +156,7 @@ static int i2s_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops i2s_pll_ops = {
.recalc_rate = i2s_pll_recalc_rate,
- .round_rate = i2s_pll_round_rate,
+ .determine_rate = i2s_pll_determine_rate,
.set_rate = i2s_pll_set_rate,
};
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
index 6c7a2b62b406..c7ca473ee76c 100644
--- a/drivers/clk/axs10x/pll_clock.c
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -149,8 +149,8 @@ static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int axs10x_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
long best_rate;
@@ -163,11 +163,13 @@ static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops axs10x_pll_ops = {
.recalc_rate = axs10x_pll_recalc_rate,
- .round_rate = axs10x_pll_round_rate,
+ .determine_rate = axs10x_pll_determine_rate,
.set_rate = axs10x_pll_set_rate,
};
diff --git a/drivers/clk/baikal-t1/ccu-div.c b/drivers/clk/baikal-t1/ccu-div.c
index 8d5fc7158f33..849d1f55765f 100644
--- a/drivers/clk/baikal-t1/ccu-div.c
+++ b/drivers/clk/baikal-t1/ccu-div.c
@@ -228,15 +228,18 @@ static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
CCU_DIV_CLKDIV_MAX(mask));
}
-static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_var_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
unsigned long divider;
- divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
+ divider = ccu_div_var_calc_divider(req->rate, req->best_parent_rate,
+ div->mask);
- return ccu_div_calc_freq(*parent_rate, divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, divider);
+
+ return 0;
}
/*
@@ -308,12 +311,14 @@ static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
return ccu_div_calc_freq(parent_rate, div->divider);
}
-static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_div_fixed_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_div *div = to_ccu_div(hw);
- return ccu_div_calc_freq(*parent_rate, div->divider);
+ req->rate = ccu_div_calc_freq(req->best_parent_rate, div->divider);
+
+ return 0;
}
static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -534,14 +539,14 @@ static const struct clk_ops ccu_div_var_gate_to_set_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_fast,
.debug_init = ccu_div_var_debug_init
};
static const struct clk_ops ccu_div_var_nogate_ops = {
.recalc_rate = ccu_div_var_recalc_rate,
- .round_rate = ccu_div_var_round_rate,
+ .determine_rate = ccu_div_var_determine_rate,
.set_rate = ccu_div_var_set_rate_slow,
.debug_init = ccu_div_var_debug_init
};
@@ -551,7 +556,7 @@ static const struct clk_ops ccu_div_gate_ops = {
.disable = ccu_div_gate_disable,
.is_enabled = ccu_div_gate_is_enabled,
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_gate_debug_init
};
@@ -565,7 +570,7 @@ static const struct clk_ops ccu_div_buf_ops = {
static const struct clk_ops ccu_div_fixed_ops = {
.recalc_rate = ccu_div_fixed_recalc_rate,
- .round_rate = ccu_div_fixed_round_rate,
+ .determine_rate = ccu_div_fixed_determine_rate,
.set_rate = ccu_div_fixed_set_rate,
.debug_init = ccu_div_fixed_debug_init
};
diff --git a/drivers/clk/baikal-t1/ccu-pll.c b/drivers/clk/baikal-t1/ccu-pll.c
index 13ef28001439..357269f41cdc 100644
--- a/drivers/clk/baikal-t1/ccu-pll.c
+++ b/drivers/clk/baikal-t1/ccu-pll.c
@@ -228,14 +228,16 @@ static void ccu_pll_calc_factors(unsigned long rate, unsigned long parent_rate,
}
}
-static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long nr = 1, nf = 1, od = 1;
- ccu_pll_calc_factors(rate, *parent_rate, &nr, &nf, &od);
+ ccu_pll_calc_factors(req->rate, req->best_parent_rate, &nr, &nf, &od);
- return ccu_pll_calc_freq(*parent_rate, nr, nf, od);
+ req->rate = ccu_pll_calc_freq(req->best_parent_rate, nr, nf, od);
+
+ return 0;
}
/*
@@ -481,7 +483,7 @@ static const struct clk_ops ccu_pll_gate_to_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_norst,
.debug_init = ccu_pll_debug_init
};
@@ -491,7 +493,7 @@ static const struct clk_ops ccu_pll_straight_set_ops = {
.disable = ccu_pll_disable,
.is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.set_rate = ccu_pll_set_rate_reset,
.debug_init = ccu_pll_debug_init
};
diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c
index dcacf55c55ae..83ec13da9b2e 100644
--- a/drivers/clk/bcm/clk-iproc-asiu.c
+++ b/drivers/clk/bcm/clk-iproc-asiu.c
@@ -98,22 +98,27 @@ static unsigned long iproc_asiu_clk_recalc_rate(struct clk_hw *hw,
return clk->rate;
}
-static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int iproc_asiu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int div;
- if (rate == 0 || *parent_rate == 0)
+ if (req->rate == 0 || req->best_parent_rate == 0)
return -EINVAL;
- if (rate == *parent_rate)
- return *parent_rate;
+ if (req->rate == req->best_parent_rate)
+ return 0;
- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
- if (div < 2)
- return *parent_rate;
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ if (div < 2) {
+ req->rate = req->best_parent_rate;
- return *parent_rate / div;
+ return 0;
+ }
+
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -168,7 +173,7 @@ static const struct clk_ops iproc_asiu_ops = {
.enable = iproc_asiu_clk_enable,
.disable = iproc_asiu_clk_disable,
.recalc_rate = iproc_asiu_clk_recalc_rate,
- .round_rate = iproc_asiu_clk_round_rate,
+ .determine_rate = iproc_asiu_clk_determine_rate,
.set_rate = iproc_asiu_clk_set_rate,
};
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 8e4fde03ed23..1a9162f0ae31 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -68,6 +68,8 @@ struct raspberrypi_clk_variant {
char *clkdev;
unsigned long min_rate;
bool minimize;
+ bool maximize;
+ u32 flags;
};
static struct raspberrypi_clk_variant
@@ -75,6 +77,7 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_ARM_CLK_ID] = {
.export = true,
.clkdev = "cpu0",
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_CORE_CLK_ID] = {
.export = true,
@@ -90,6 +93,12 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* always use the minimum the drivers will let us.
*/
.minimize = true,
+
+ /*
+ * It should never be disabled as it drives the bus for
+ * everything else.
+ */
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_M2MC_CLK_ID] = {
.export = true,
@@ -115,18 +124,29 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
* drivers will let us.
*/
.minimize = true,
+
+ /*
+ * As mentioned above, this clock is disabled during boot,
+ * the firmware will skip the HSM initialization, resulting
+ * in a bus lockup. Therefore, make sure it's enabled
+ * during boot, but after it, it can be enabled/disabled
+ * by the driver.
+ */
+ .flags = CLK_IGNORE_UNUSED,
},
[RPI_FIRMWARE_V3D_CLK_ID] = {
.export = true,
- .minimize = true,
+ .maximize = true,
},
[RPI_FIRMWARE_PIXEL_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_HEVC_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_ISP_CLK_ID] = {
.export = true,
@@ -135,6 +155,7 @@ raspberrypi_clk_variants[RPI_FIRMWARE_NUM_CLK_ID] = {
[RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = {
.export = true,
.minimize = true,
+ .flags = CLK_IS_CRITICAL,
},
[RPI_FIRMWARE_VEC_CLK_ID] = {
.export = true,
@@ -194,8 +215,11 @@ static int raspberrypi_fw_is_prepared(struct clk_hw *hw)
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_STATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s state: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return !!(val & RPI_FIRMWARE_STATE_ENABLE_BIT);
}
@@ -211,8 +235,11 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
- if (ret)
+ if (ret) {
+ dev_err_ratelimited(rpi->dev, "Failed to get %s frequency: %d\n",
+ clk_hw_get_name(hw), ret);
return 0;
+ }
return val;
}
@@ -259,7 +286,41 @@ static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw,
return 0;
}
+static int raspberrypi_fw_prepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = RPI_FIRMWARE_STATE_ENABLE_BIT;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to on: %d\n",
+ clk_hw_get_name(hw), ret);
+
+ return ret;
+}
+
+static void raspberrypi_fw_unprepare(struct clk_hw *hw)
+{
+ const struct raspberrypi_clk_data *data = clk_hw_to_data(hw);
+ struct raspberrypi_clk *rpi = data->rpi;
+ u32 state = 0;
+ int ret;
+
+ ret = raspberrypi_clock_property(rpi->firmware, data,
+ RPI_FIRMWARE_SET_CLOCK_STATE, &state);
+ if (ret)
+ dev_err_ratelimited(rpi->dev,
+ "Failed to set clock %s state to off: %d\n",
+ clk_hw_get_name(hw), ret);
+}
+
static const struct clk_ops raspberrypi_firmware_clk_ops = {
+ .prepare = raspberrypi_fw_prepare,
+ .unprepare = raspberrypi_fw_unprepare,
.is_prepared = raspberrypi_fw_is_prepared,
.recalc_rate = raspberrypi_fw_get_rate,
.determine_rate = raspberrypi_fw_dumb_determine_rate,
@@ -289,7 +350,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
if (!init.name)
return ERR_PTR(-ENOMEM);
init.ops = &raspberrypi_firmware_clk_ops;
- init.flags = CLK_GET_RATE_NOCACHE;
+ init.flags = variant->flags | CLK_GET_RATE_NOCACHE;
data->hw.init = &init;
@@ -326,6 +387,9 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
}
}
+ if (variant->maximize)
+ variant->min_rate = max_rate;
+
if (variant->min_rate) {
unsigned long rate;
diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c
index 457a48d48941..d3ced4a0f029 100644
--- a/drivers/clk/clk-apple-nco.c
+++ b/drivers/clk/clk-apple-nco.c
@@ -212,13 +212,15 @@ static unsigned long applnco_recalc_rate(struct clk_hw *hw,
((u64) div) * incbase + inc1);
}
-static long applnco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int applnco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long lo = *parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
- unsigned long hi = *parent_rate / COARSE_DIV_OFFSET;
+ unsigned long lo = req->best_parent_rate / (COARSE_DIV_OFFSET + LFSR_TBLSIZE) + 1;
+ unsigned long hi = req->best_parent_rate / COARSE_DIV_OFFSET;
- return clamp(rate, lo, hi);
+ req->rate = clamp(req->rate, lo, hi);
+
+ return 0;
}
static int applnco_enable(struct clk_hw *hw)
@@ -246,7 +248,7 @@ static void applnco_disable(struct clk_hw *hw)
static const struct clk_ops applnco_ops = {
.set_rate = applnco_set_rate,
.recalc_rate = applnco_recalc_rate,
- .round_rate = applnco_round_rate,
+ .determine_rate = applnco_determine_rate,
.enable = applnco_enable,
.disable = applnco_disable,
.is_enabled = applnco_is_enabled,
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index aec62301fa06..fa5ccef73e60 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -540,7 +540,7 @@ static int axi_clkgen_setup_limits(struct axi_clkgen *axi_clkgen,
default:
return dev_err_probe(dev, -ENODEV, "Unknown speed grade %d\n",
speed_grade);
- };
+ }
/* Overwrite vco limits for ultrascale+ */
if (tech == ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS) {
diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c
index 4a3462ee8f3e..3823383f3fa6 100644
--- a/drivers/clk/clk-axm5516.c
+++ b/drivers/clk/clk-axm5516.c
@@ -529,7 +529,6 @@ static const struct regmap_config axmclk_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1fffc,
- .fast_io = true,
};
static const struct of_device_id axmclk_match_table[] = {
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index 002f7360b1c6..dac190bc6e19 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -608,8 +608,8 @@ static unsigned long bm1880_clk_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int bm1880_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
struct bm1880_div_clock *div = &div_hw->div;
@@ -621,13 +621,18 @@ static long bm1880_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(reg_addr) >> div->shift;
val &= clk_div_mask(div->width);
- return divider_ro_round_rate(hw, rate, prate, div->table,
- div->width, div->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ div->table,
+ div->width, div->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, div->table,
- div->width, div->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div->table, div->width, div->flags);
+
+ return 0;
}
static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -665,7 +670,7 @@ static int bm1880_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops bm1880_clk_div_ops = {
.recalc_rate = bm1880_clk_div_recalc_rate,
- .round_rate = bm1880_clk_div_round_rate,
+ .determine_rate = bm1880_clk_div_determine_rate,
.set_rate = bm1880_clk_div_set_rate,
};
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
index d0705bb03a2a..a495d313b02f 100644
--- a/drivers/clk/clk-cdce706.c
+++ b/drivers/clk/clk-cdce706.c
@@ -183,8 +183,8 @@ static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce706_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cdce706_hw_data *hwd = to_hw_data(hw);
unsigned long mul, div;
@@ -192,9 +192,9 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
dev_dbg(&hwd->dev_data->client->dev,
"%s, rate: %lu, parent_rate: %lu\n",
- __func__, rate, *parent_rate);
+ __func__, req->rate, req->best_parent_rate);
- rational_best_approximation(rate, *parent_rate,
+ rational_best_approximation(req->rate, req->best_parent_rate,
CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
&mul, &div);
hwd->mul = mul;
@@ -204,9 +204,11 @@ static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
"%s, pll: %d, mul: %lu, div: %lu\n",
__func__, hwd->idx, mul, div);
- res = (u64)*parent_rate * hwd->mul;
+ res = (u64)req->best_parent_rate * hwd->mul;
do_div(res, hwd->div);
- return res;
+ req->rate = res;
+
+ return 0;
}
static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -251,7 +253,7 @@ static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops cdce706_pll_ops = {
.recalc_rate = cdce706_pll_recalc_rate,
- .round_rate = cdce706_pll_round_rate,
+ .determine_rate = cdce706_pll_determine_rate,
.set_rate = cdce706_pll_set_rate,
};
diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c
index c51818c1af98..0b2ad21e6e4d 100644
--- a/drivers/clk/clk-cdce925.c
+++ b/drivers/clk/clk-cdce925.c
@@ -128,13 +128,15 @@ static void cdce925_pll_find_rate(unsigned long rate,
}
}
-static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u16 n, m;
- cdce925_pll_find_rate(rate, *parent_rate, &n, &m);
- return (long)cdce925_pll_calculate_rate(*parent_rate, n, m);
+ cdce925_pll_find_rate(req->rate, req->best_parent_rate, &n, &m);
+ req->rate = (long)cdce925_pll_calculate_rate(req->best_parent_rate, n, m);
+
+ return 0;
}
static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -266,7 +268,7 @@ static const struct clk_ops cdce925_pll_ops = {
.prepare = cdce925_pll_prepare,
.unprepare = cdce925_pll_unprepare,
.recalc_rate = cdce925_pll_recalc_rate,
- .round_rate = cdce925_pll_round_rate,
+ .determine_rate = cdce925_pll_determine_rate,
.set_rate = cdce925_pll_set_rate,
};
@@ -420,20 +422,23 @@ static unsigned long cdce925_clk_best_parent_rate(
return rate * pdiv_best;
}
-static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_calc_divider(req->rate, l_parent_rate);
- if (l_parent_rate / divider != rate) {
- l_parent_rate = cdce925_clk_best_parent_rate(hw, rate);
- divider = cdce925_calc_divider(rate, l_parent_rate);
- *parent_rate = l_parent_rate;
+ if (l_parent_rate / divider != req->rate) {
+ l_parent_rate = cdce925_clk_best_parent_rate(hw, req->rate);
+ divider = cdce925_calc_divider(req->rate, l_parent_rate);
+ req->best_parent_rate = l_parent_rate;
}
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -451,7 +456,7 @@ static const struct clk_ops cdce925_clk_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_round_rate,
+ .determine_rate = cdce925_clk_determine_rate,
.set_rate = cdce925_clk_set_rate,
};
@@ -473,14 +478,17 @@ static u16 cdce925_y1_calc_divider(unsigned long rate,
return (u16)divider;
}
-static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cdce925_clk_y1_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long l_parent_rate = *parent_rate;
- u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate);
+ unsigned long l_parent_rate = req->best_parent_rate;
+ u16 divider = cdce925_y1_calc_divider(req->rate, l_parent_rate);
if (divider)
- return (long)(l_parent_rate / divider);
+ req->rate = (long)(l_parent_rate / divider);
+ else
+ req->rate = 0;
+
return 0;
}
@@ -498,7 +506,7 @@ static const struct clk_ops cdce925_clk_y1_ops = {
.prepare = cdce925_clk_prepare,
.unprepare = cdce925_clk_unprepare,
.recalc_rate = cdce925_clk_recalc_rate,
- .round_rate = cdce925_clk_y1_round_rate,
+ .determine_rate = cdce925_clk_y1_determine_rate,
.set_rate = cdce925_clk_y1_set_rate,
};
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index 35cb93ad298a..8800472ba63f 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -305,15 +305,19 @@ static unsigned long cs2000_recalc_rate(struct clk_hw *hw,
return cs2000_ratio_to_rate(ratio, parent_rate, priv->lf_ratio);
}
-static long cs2000_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int cs2000_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct cs2000_priv *priv = hw_to_priv(hw);
u32 ratio;
- ratio = cs2000_rate_to_ratio(*parent_rate, rate, priv->lf_ratio);
+ ratio = cs2000_rate_to_ratio(req->best_parent_rate, req->rate,
+ priv->lf_ratio);
- return cs2000_ratio_to_rate(ratio, *parent_rate, priv->lf_ratio);
+ req->rate = cs2000_ratio_to_rate(ratio, req->best_parent_rate,
+ priv->lf_ratio);
+
+ return 0;
}
static int cs2000_select_ratio_mode(struct cs2000_priv *priv,
@@ -430,7 +434,7 @@ static u8 cs2000_get_parent(struct clk_hw *hw)
static const struct clk_ops cs2000_ops = {
.get_parent = cs2000_get_parent,
.recalc_rate = cs2000_recalc_rate,
- .round_rate = cs2000_round_rate,
+ .determine_rate = cs2000_determine_rate,
.set_rate = cs2000_set_rate,
.prepare = cs2000_enable,
.unprepare = cs2000_disable,
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index c1f426b8a504..2601b6155afb 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -431,27 +431,6 @@ long divider_ro_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent,
}
EXPORT_SYMBOL_GPL(divider_ro_round_rate_parent);
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- /* if read only, just return current value */
- if (divider->flags & CLK_DIVIDER_READ_ONLY) {
- u32 val;
-
- val = clk_div_readl(divider) >> divider->shift;
- val &= clk_div_mask(divider->width);
-
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
- }
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int clk_divider_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -527,7 +506,6 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
@@ -535,7 +513,6 @@ EXPORT_SYMBOL_GPL(clk_divider_ops);
const struct clk_ops clk_divider_ro_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
.determine_rate = clk_divider_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
diff --git a/drivers/clk/clk-en7523.c b/drivers/clk/clk-en7523.c
index 15bbdeb60b8e..08cc8e5acf43 100644
--- a/drivers/clk/clk-en7523.c
+++ b/drivers/clk/clk-en7523.c
@@ -9,6 +9,7 @@
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <dt-bindings/clock/en7523-clk.h>
+#include <dt-bindings/reset/airoha,en7523-reset.h>
#include <dt-bindings/reset/airoha,en7581-reset.h>
#define RST_NR_PER_BANK 32
@@ -299,6 +300,53 @@ static const u16 en7581_rst_ofs[] = {
REG_RST_CTRL1,
};
+static const u16 en7523_rst_map[] = {
+ /* RST_CTRL2 */
+ [EN7523_XPON_PHY_RST] = 0,
+ [EN7523_XSI_MAC_RST] = 7,
+ [EN7523_XSI_PHY_RST] = 8,
+ [EN7523_NPU_RST] = 9,
+ [EN7523_I2S_RST] = 10,
+ [EN7523_TRNG_RST] = 11,
+ [EN7523_TRNG_MSTART_RST] = 12,
+ [EN7523_DUAL_HSI0_RST] = 13,
+ [EN7523_DUAL_HSI1_RST] = 14,
+ [EN7523_HSI_RST] = 15,
+ [EN7523_DUAL_HSI0_MAC_RST] = 16,
+ [EN7523_DUAL_HSI1_MAC_RST] = 17,
+ [EN7523_HSI_MAC_RST] = 18,
+ [EN7523_WDMA_RST] = 19,
+ [EN7523_WOE0_RST] = 20,
+ [EN7523_WOE1_RST] = 21,
+ [EN7523_HSDMA_RST] = 22,
+ [EN7523_I2C2RBUS_RST] = 23,
+ [EN7523_TDMA_RST] = 24,
+ /* RST_CTRL1 */
+ [EN7523_PCM1_ZSI_ISI_RST] = RST_NR_PER_BANK + 0,
+ [EN7523_FE_PDMA_RST] = RST_NR_PER_BANK + 1,
+ [EN7523_FE_QDMA_RST] = RST_NR_PER_BANK + 2,
+ [EN7523_PCM_SPIWP_RST] = RST_NR_PER_BANK + 4,
+ [EN7523_CRYPTO_RST] = RST_NR_PER_BANK + 6,
+ [EN7523_TIMER_RST] = RST_NR_PER_BANK + 8,
+ [EN7523_PCM1_RST] = RST_NR_PER_BANK + 11,
+ [EN7523_UART_RST] = RST_NR_PER_BANK + 12,
+ [EN7523_GPIO_RST] = RST_NR_PER_BANK + 13,
+ [EN7523_GDMA_RST] = RST_NR_PER_BANK + 14,
+ [EN7523_I2C_MASTER_RST] = RST_NR_PER_BANK + 16,
+ [EN7523_PCM2_ZSI_ISI_RST] = RST_NR_PER_BANK + 17,
+ [EN7523_SFC_RST] = RST_NR_PER_BANK + 18,
+ [EN7523_UART2_RST] = RST_NR_PER_BANK + 19,
+ [EN7523_GDMP_RST] = RST_NR_PER_BANK + 20,
+ [EN7523_FE_RST] = RST_NR_PER_BANK + 21,
+ [EN7523_USB_HOST_P0_RST] = RST_NR_PER_BANK + 22,
+ [EN7523_GSW_RST] = RST_NR_PER_BANK + 23,
+ [EN7523_SFC2_PCM_RST] = RST_NR_PER_BANK + 25,
+ [EN7523_PCIE0_RST] = RST_NR_PER_BANK + 26,
+ [EN7523_PCIE1_RST] = RST_NR_PER_BANK + 27,
+ [EN7523_PCIE_HB_RST] = RST_NR_PER_BANK + 29,
+ [EN7523_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
+};
+
static const u16 en7581_rst_map[] = {
/* RST_CTRL2 */
[EN7581_XPON_PHY_RST] = 0,
@@ -357,6 +405,9 @@ static const u16 en7581_rst_map[] = {
[EN7581_XPON_MAC_RST] = RST_NR_PER_BANK + 31,
};
+static int en7581_reset_register(struct device *dev, void __iomem *base,
+ const u16 *rst_map, int nr_resets);
+
static u32 en7523_get_base_rate(const struct en_clk_desc *desc, u32 val)
{
if (!desc->base_bits)
@@ -552,7 +603,8 @@ static int en7523_clk_hw_init(struct platform_device *pdev,
en7523_register_clocks(&pdev->dev, clk_data, base, np_base);
- return 0;
+ return en7581_reset_register(&pdev->dev, np_base, en7523_rst_map,
+ ARRAY_SIZE(en7523_rst_map));
}
static void en7581_register_clocks(struct device *dev, struct clk_hw_onecell_data *clk_data,
@@ -652,7 +704,8 @@ static const struct reset_control_ops en7581_reset_ops = {
.status = en7523_reset_status,
};
-static int en7581_reset_register(struct device *dev, void __iomem *base)
+static int en7581_reset_register(struct device *dev, void __iomem *base,
+ const u16 *rst_map, int nr_resets)
{
struct en_rst_data *rst_data;
@@ -661,10 +714,10 @@ static int en7581_reset_register(struct device *dev, void __iomem *base)
return -ENOMEM;
rst_data->bank_ofs = en7581_rst_ofs;
- rst_data->idx_map = en7581_rst_map;
+ rst_data->idx_map = rst_map;
rst_data->base = base;
- rst_data->rcdev.nr_resets = ARRAY_SIZE(en7581_rst_map);
+ rst_data->rcdev.nr_resets = nr_resets;
rst_data->rcdev.of_xlate = en7523_reset_xlate;
rst_data->rcdev.ops = &en7581_reset_ops;
rst_data->rcdev.of_node = dev->of_node;
@@ -698,7 +751,8 @@ static int en7581_clk_hw_init(struct platform_device *pdev,
val = readl(base + REG_NP_SCU_PCIC);
writel(val | 3, base + REG_NP_SCU_PCIC);
- return en7581_reset_register(&pdev->dev, base);
+ return en7581_reset_register(&pdev->dev, base, en7581_rst_map,
+ ARRAY_SIZE(en7581_rst_map));
}
static int en7523_clk_probe(struct platform_device *pdev)
diff --git a/drivers/clk/clk-ep93xx.c b/drivers/clk/clk-ep93xx.c
index 4bd8d6ecf6a2..972aadd11493 100644
--- a/drivers/clk/clk-ep93xx.c
+++ b/drivers/clk/clk-ep93xx.c
@@ -389,23 +389,25 @@ static unsigned long ep93xx_div_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, clk->div[index]);
}
-static long ep93xx_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ep93xx_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ep93xx_clk *clk = ep93xx_clk_from(hw);
unsigned long best = 0, now;
unsigned int i;
for (i = 0; i < clk->num_div; i++) {
- if ((rate * clk->div[i]) == *parent_rate)
- return rate;
+ if (req->rate * clk->div[i] == req->best_parent_rate)
+ return 0;
- now = DIV_ROUND_CLOSEST(*parent_rate, clk->div[i]);
- if (!best || is_best(rate, now, best))
+ now = DIV_ROUND_CLOSEST(req->best_parent_rate, clk->div[i]);
+ if (!best || is_best(req->rate, now, best))
best = now;
}
- return best;
+ req->rate = best;
+
+ return 0;
}
static int ep93xx_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -437,7 +439,7 @@ static const struct clk_ops ep93xx_div_ops = {
.disable = ep93xx_clk_disable,
.is_enabled = ep93xx_clk_is_enabled,
.recalc_rate = ep93xx_div_recalc_rate,
- .round_rate = ep93xx_div_round_rate,
+ .determine_rate = ep93xx_div_determine_rate,
.set_rate = ep93xx_div_set_rate,
};
@@ -486,9 +488,10 @@ static const struct ep93xx_gate ep93xx_uarts[] = {
static int ep93xx_uart_clock_init(struct ep93xx_clk_priv *priv)
{
struct clk_parent_data parent_data = { };
- unsigned int i, idx, ret, clk_uart_div;
+ unsigned int i, idx, clk_uart_div;
struct ep93xx_clk *clk;
u32 val;
+ int ret;
regmap_read(priv->map, EP93XX_SYSCON_PWRCNT, &val);
if (val & EP93XX_SYSCON_PWRCNT_UARTBAUD)
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index e62ae8794d44..de658c9e4c53 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -30,19 +30,21 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
unsigned long best_parent;
- best_parent = (rate / fix->mult) * fix->div;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / fix->mult) * fix->div;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return (*prate / fix->div) * fix->mult;
+ req->rate = (req->best_parent_rate / fix->div) * fix->mult;
+
+ return 0;
}
static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -50,7 +52,7 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
{
/*
* We must report success but we can do so unconditionally because
- * clk_factor_round_rate returns values that ensure this call is a
+ * clk_factor_determine_rate returns values that ensure this call is a
* nop.
*/
@@ -69,7 +71,7 @@ static unsigned long clk_factor_recalc_accuracy(struct clk_hw *hw,
}
const struct clk_ops clk_fixed_factor_ops = {
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.recalc_rate = clk_factor_recalc_rate,
.recalc_accuracy = clk_factor_recalc_accuracy,
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index da057172cc90..cd36a6e27f25 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -151,25 +151,32 @@ void clk_fractional_divider_general_approximation(struct clk_hw *hw,
}
EXPORT_SYMBOL_GPL(clk_fractional_divider_general_approximation);
-static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_fd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long m, n;
u64 ret;
- if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
- return *parent_rate;
+ if (!req->rate || (!clk_hw_can_set_rate_parent(hw) && req->rate >= req->best_parent_rate)) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (fd->approximation)
- fd->approximation(hw, rate, parent_rate, &m, &n);
+ fd->approximation(hw, req->rate, &req->best_parent_rate, &m, &n);
else
- clk_fractional_divider_general_approximation(hw, rate, parent_rate, &m, &n);
+ clk_fractional_divider_general_approximation(hw, req->rate,
+ &req->best_parent_rate,
+ &m, &n);
- ret = (u64)*parent_rate * m;
+ ret = (u64)req->best_parent_rate * m;
do_div(ret, n);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int clk_fd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -250,7 +257,7 @@ static void clk_fd_debug_init(struct clk_hw *hw, struct dentry *dentry)
const struct clk_ops clk_fractional_divider_ops = {
.recalc_rate = clk_fd_recalc_rate,
- .round_rate = clk_fd_round_rate,
+ .determine_rate = clk_fd_determine_rate,
.set_rate = clk_fd_set_rate,
#ifdef CONFIG_DEBUG_FS
.debug_init = clk_fd_debug_init,
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index 856b008e07c6..e94589c38568 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -126,13 +126,16 @@ static unsigned long gemini_pci_recalc_rate(struct clk_hw *hw,
return 33000000;
}
-static long gemini_pci_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int gemini_pci_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* We support 33 and 66 MHz */
- if (rate < 48000000)
- return 33000000;
- return 66000000;
+ if (req->rate < 48000000)
+ req->rate = 33000000;
+ else
+ req->rate = 66000000;
+
+ return 0;
}
static int gemini_pci_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -179,7 +182,7 @@ static int gemini_pci_is_enabled(struct clk_hw *hw)
static const struct clk_ops gemini_pci_clk_ops = {
.recalc_rate = gemini_pci_recalc_rate,
- .round_rate = gemini_pci_round_rate,
+ .determine_rate = gemini_pci_determine_rate,
.set_rate = gemini_pci_set_rate,
.enable = gemini_pci_enable,
.disable = gemini_pci_disable,
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
index 6e68a41a70a1..cc583934ecf2 100644
--- a/drivers/clk/clk-highbank.c
+++ b/drivers/clk/clk-highbank.c
@@ -130,15 +130,17 @@ static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
*pdivf = divf;
}
-static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 divq, divf;
- unsigned long ref_freq = *parent_rate;
+ unsigned long ref_freq = req->best_parent_rate;
- clk_pll_calc(rate, ref_freq, &divq, &divf);
+ clk_pll_calc(req->rate, ref_freq, &divq, &divf);
- return (ref_freq * (divf + 1)) / (1 << divq);
+ req->rate = (ref_freq * (divf + 1)) / (1 << divq);
+
+ return 0;
}
static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -185,7 +187,7 @@ static const struct clk_ops clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -227,16 +229,18 @@ static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_periclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
div++;
div &= ~0x1;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -255,7 +259,7 @@ static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops periclk_ops = {
.recalc_rate = clk_periclk_recalc_rate,
- .round_rate = clk_periclk_round_rate,
+ .determine_rate = clk_periclk_determine_rate,
.set_rate = clk_periclk_set_rate,
};
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
index 921523fc26f2..7d56a47c2aa7 100644
--- a/drivers/clk/clk-hsdk-pll.c
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -197,8 +197,8 @@ static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hsdk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int i;
unsigned long best_rate;
@@ -211,13 +211,15 @@ static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
best_rate = pll_cfg[0].rate;
for (i = 1; pll_cfg[i].rate != 0; i++) {
- if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ if (abs(req->rate - pll_cfg[i].rate) < abs(req->rate - best_rate))
best_rate = pll_cfg[i].rate;
}
dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
- return best_rate;
+ req->rate = best_rate;
+
+ return 0;
}
static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
@@ -296,7 +298,7 @@ static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hsdk_pll_ops = {
.recalc_rate = hsdk_pll_recalc_rate,
- .round_rate = hsdk_pll_round_rate,
+ .determine_rate = hsdk_pll_determine_rate,
.set_rate = hsdk_pll_set_rate,
};
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
index 16e0405fe28b..3c7a48c616bb 100644
--- a/drivers/clk/clk-lan966x.c
+++ b/drivers/clk/clk-lan966x.c
@@ -16,8 +16,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <dt-bindings/clock/microchip,lan966x.h>
-
#define GCK_ENA BIT(0)
#define GCK_SRC_SEL GENMASK(9, 8)
#define GCK_PRESCALER GENMASK(23, 16)
diff --git a/drivers/clk/clk-lmk04832.c b/drivers/clk/clk-lmk04832.c
index 2bcf422f0b04..b2107b31efa2 100644
--- a/drivers/clk/clk-lmk04832.c
+++ b/drivers/clk/clk-lmk04832.c
@@ -491,28 +491,33 @@ static long lmk04832_calc_pll2_params(unsigned long prate, unsigned long rate,
return DIV_ROUND_CLOSEST(prate * 2 * pll2_p * pll2_n, pll2_r);
}
-static long lmk04832_vco_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, vco);
unsigned int n, p, r;
long vco_rate;
int ret;
- ret = lmk04832_check_vco_ranges(lmk, rate);
+ ret = lmk04832_check_vco_ranges(lmk, req->rate);
if (ret < 0)
return ret;
- vco_rate = lmk04832_calc_pll2_params(*prate, rate, &n, &p, &r);
+ vco_rate = lmk04832_calc_pll2_params(req->best_parent_rate, req->rate,
+ &n, &p, &r);
if (vco_rate < 0) {
dev_err(lmk->dev, "PLL2 parameters out of range\n");
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
- if (rate != vco_rate)
+ if (req->rate != vco_rate)
return -EINVAL;
- return vco_rate;
+ req->rate = vco_rate;
+
+ return 0;
}
static int lmk04832_vco_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -579,7 +584,7 @@ static const struct clk_ops lmk04832_vco_ops = {
.prepare = lmk04832_vco_prepare,
.unprepare = lmk04832_vco_unprepare,
.recalc_rate = lmk04832_vco_recalc_rate,
- .round_rate = lmk04832_vco_round_rate,
+ .determine_rate = lmk04832_vco_determine_rate,
.set_rate = lmk04832_vco_set_rate,
};
@@ -888,25 +893,27 @@ static unsigned long lmk04832_sclk_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(prate, sysref_div);
}
-static long lmk04832_sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk04832 *lmk = container_of(hw, struct lmk04832, sclk);
unsigned long sclk_rate;
unsigned int sysref_div;
- sysref_div = DIV_ROUND_CLOSEST(*prate, rate);
- sclk_rate = DIV_ROUND_CLOSEST(*prate, sysref_div);
+ sysref_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ sclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, sysref_div);
if (sysref_div < 0x07 || sysref_div > 0x1fff) {
dev_err(lmk->dev, "SYSREF divider out of range\n");
return -EINVAL;
}
- if (rate != sclk_rate)
+ if (req->rate != sclk_rate)
return -EINVAL;
- return sclk_rate;
+ req->rate = sclk_rate;
+
+ return 0;
}
static int lmk04832_sclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -945,7 +952,7 @@ static const struct clk_ops lmk04832_sclk_ops = {
.prepare = lmk04832_sclk_prepare,
.unprepare = lmk04832_sclk_unprepare,
.recalc_rate = lmk04832_sclk_recalc_rate,
- .round_rate = lmk04832_sclk_round_rate,
+ .determine_rate = lmk04832_sclk_determine_rate,
.set_rate = lmk04832_sclk_set_rate,
};
@@ -1069,26 +1076,28 @@ static unsigned long lmk04832_dclk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long lmk04832_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lmk04832_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lmk_dclk *dclk = container_of(hw, struct lmk_dclk, hw);
struct lmk04832 *lmk = dclk->lmk;
unsigned long dclk_rate;
unsigned int dclk_div;
- dclk_div = DIV_ROUND_CLOSEST(*prate, rate);
- dclk_rate = DIV_ROUND_CLOSEST(*prate, dclk_div);
+ dclk_div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
+ dclk_rate = DIV_ROUND_CLOSEST(req->best_parent_rate, dclk_div);
if (dclk_div < 1 || dclk_div > 0x3ff) {
dev_err(lmk->dev, "%s_div out of range\n", clk_hw_get_name(hw));
return -EINVAL;
}
- if (rate != dclk_rate)
+ if (req->rate != dclk_rate)
return -EINVAL;
- return dclk_rate;
+ req->rate = dclk_rate;
+
+ return 0;
}
static int lmk04832_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1158,7 +1167,7 @@ static const struct clk_ops lmk04832_dclk_ops = {
.prepare = lmk04832_dclk_prepare,
.unprepare = lmk04832_dclk_unprepare,
.recalc_rate = lmk04832_dclk_recalc_rate,
- .round_rate = lmk04832_dclk_round_rate,
+ .determine_rate = lmk04832_dclk_determine_rate,
.set_rate = lmk04832_dclk_set_rate,
};
diff --git a/drivers/clk/clk-loongson1.c b/drivers/clk/clk-loongson1.c
index a3467aa6790f..f9f060d08a5f 100644
--- a/drivers/clk/clk-loongson1.c
+++ b/drivers/clk/clk-loongson1.c
@@ -93,14 +93,16 @@ static unsigned long ls1x_divider_recalc_rate(struct clk_hw *hw,
d->flags, d->width);
}
-static long ls1x_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ls1x_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ls1x_clk *ls1x_clk = to_ls1x_clk(hw);
const struct ls1x_clk_div_data *d = ls1x_clk->data;
- return divider_round_rate(hw, rate, prate, d->table,
- d->width, d->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ d->table, d->width, d->flags);
+
+ return 0;
}
static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -146,7 +148,7 @@ static int ls1x_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ls1x_clk_divider_ops = {
.recalc_rate = ls1x_divider_recalc_rate,
- .round_rate = ls1x_divider_round_rate,
+ .determine_rate = ls1x_divider_determine_rate,
.set_rate = ls1x_divider_set_rate,
};
diff --git a/drivers/clk/clk-loongson2.c b/drivers/clk/clk-loongson2.c
index 27e632edd484..9c4c6c99db3e 100644
--- a/drivers/clk/clk-loongson2.c
+++ b/drivers/clk/clk-loongson2.c
@@ -13,10 +13,6 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <dt-bindings/clock/loongson,ls2k-clk.h>
-static const struct clk_parent_data pdata[] = {
- { .fw_name = "ref_100m", },
-};
-
enum loongson2_clk_type {
CLK_TYPE_PLL,
CLK_TYPE_SCALE,
@@ -42,6 +38,7 @@ struct loongson2_clk_data {
u8 div_width;
u8 mult_shift;
u8 mult_width;
+ u8 bit_idx;
};
struct loongson2_clk_board_info {
@@ -50,6 +47,7 @@ struct loongson2_clk_board_info {
const char *name;
const char *parent_name;
unsigned long fixed_rate;
+ unsigned long flags;
u8 reg_offset;
u8 div_shift;
u8 div_width;
@@ -95,6 +93,19 @@ struct loongson2_clk_board_info {
.div_width = _dwidth, \
}
+#define CLK_SCALE_MODE(_id, _name, _pname, _offset, \
+ _dshift, _dwidth, _midx) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_SCALE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .div_shift = _dshift, \
+ .div_width = _dwidth, \
+ .bit_idx = _midx + 1, \
+ }
+
#define CLK_GATE(_id, _name, _pname, _offset, _bidx) \
{ \
.id = _id, \
@@ -105,6 +116,18 @@ struct loongson2_clk_board_info {
.bit_idx = _bidx, \
}
+#define CLK_GATE_FLAGS(_id, _name, _pname, _offset, _bidx, \
+ _flags) \
+ { \
+ .id = _id, \
+ .type = CLK_TYPE_GATE, \
+ .name = _name, \
+ .parent_name = _pname, \
+ .reg_offset = _offset, \
+ .bit_idx = _bidx, \
+ .flags = _flags \
+ }
+
#define CLK_FIXED(_id, _name, _pname, _rate) \
{ \
.id = _id, \
@@ -114,6 +137,51 @@ struct loongson2_clk_board_info {
.fixed_rate = _rate, \
}
+static const struct loongson2_clk_board_info ls2k0300_clks[] = {
+ /* Reference Clock */
+ CLK_PLL(LS2K0300_NODE_PLL, "pll_node", 0x00, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_DDR_PLL, "pll_ddr", 0x08, 15, 9, 8, 7),
+ CLK_PLL(LS2K0300_PIX_PLL, "pll_pix", 0x10, 15, 9, 8, 7),
+ CLK_FIXED(LS2K0300_CLK_STABLE, "clk_stable", NULL, 100000000),
+ CLK_FIXED(LS2K0300_CLK_THSENS, "clk_thsens", NULL, 10000000),
+ /* Node PLL */
+ CLK_DIV(LS2K0300_CLK_NODE_DIV, "clk_node_div", "pll_node", 0x00, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMAC_DIV, "clk_gmac_div", "pll_node", 0x04, 0, 7),
+ CLK_DIV(LS2K0300_CLK_I2S_DIV, "clk_i2s_div", "pll_node", 0x04, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NODE_PLL_GATE, "clk_node_pll_gate", "clk_node_div", 0x00, 0),
+ CLK_GATE(LS2K0300_CLK_GMAC_GATE, "clk_gmac_gate", "clk_gmac_div", 0x00, 1),
+ CLK_GATE(LS2K0300_CLK_I2S_GATE, "clk_i2s_gate", "clk_i2s_div", 0x00, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_NODE_GATE, "clk_node_gate", "clk_node_scale", 0x24, 0,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_NODE_SCALE, "clk_node_scale", "clk_node_pll_gate", 0x20, 0, 3,
+ 3),
+ /* DDR PLL */
+ CLK_DIV(LS2K0300_CLK_DDR_DIV, "clk_ddr_div", "pll_ddr", 0x08, 24, 7),
+ CLK_DIV(LS2K0300_CLK_NET_DIV, "clk_net_div", "pll_ddr", 0x0c, 0, 7),
+ CLK_DIV(LS2K0300_CLK_DEV_DIV, "clk_dev_div", "pll_ddr", 0x0c, 8, 7),
+ CLK_GATE(LS2K0300_CLK_NET_GATE, "clk_net_gate", "clk_net_div", 0x08, 1),
+ CLK_GATE(LS2K0300_CLK_DEV_GATE, "clk_dev_gate", "clk_dev_div", 0x08, 2),
+ CLK_GATE_FLAGS(LS2K0300_CLK_DDR_GATE, "clk_ddr_gate", "clk_ddr_div", 0x08, 0,
+ CLK_IS_CRITICAL),
+ /* PIX PLL */
+ CLK_DIV(LS2K0300_CLK_PIX_DIV, "clk_pix_div", "pll_pix", 0x10, 24, 7),
+ CLK_DIV(LS2K0300_CLK_GMACBP_DIV, "clk_gmacbp_div", "pll_pix", 0x14, 0, 7),
+ CLK_GATE(LS2K0300_CLK_PIX_PLL_GATE, "clk_pix_pll_gate", "clk_pix_div", 0x10, 0),
+ CLK_GATE(LS2K0300_CLK_PIX_GATE, "clk_pix_gate", "clk_pix_scale", 0x24, 6),
+ CLK_GATE(LS2K0300_CLK_GMACBP_GATE, "clk_gmacbp_gate", "clk_gmacbp_div", 0x10, 1),
+ CLK_SCALE_MODE(LS2K0300_CLK_PIX_SCALE, "clk_pix_scale", "clk_pix_pll_gate", 0x20, 4, 3, 7),
+ /* clk_dev_gate */
+ CLK_DIV(LS2K0300_CLK_SDIO_SCALE, "clk_sdio_scale", "clk_dev_gate", 0x20, 24, 4),
+ CLK_GATE(LS2K0300_CLK_USB_GATE, "clk_usb_gate", "clk_usb_scale", 0x24, 2),
+ CLK_GATE(LS2K0300_CLK_SDIO_GATE, "clk_sdio_gate", "clk_sdio_scale", 0x24, 4),
+ CLK_GATE(LS2K0300_CLK_APB_GATE, "clk_apb_gate", "clk_apb_scale", 0x24, 3),
+ CLK_GATE_FLAGS(LS2K0300_CLK_BOOT_GATE, "clk_boot_gate", "clk_boot_scale", 0x24, 1,
+ CLK_IS_CRITICAL),
+ CLK_SCALE_MODE(LS2K0300_CLK_USB_SCALE, "clk_usb_scale", "clk_dev_gate", 0x20, 12, 3, 15),
+ CLK_SCALE_MODE(LS2K0300_CLK_APB_SCALE, "clk_apb_scale", "clk_dev_gate", 0x20, 16, 3, 19),
+ CLK_SCALE_MODE(LS2K0300_CLK_BOOT_SCALE, "clk_boot_scale", "clk_dev_gate", 0x20, 8, 3, 11),
+};
+
static const struct loongson2_clk_board_info ls2k0500_clks[] = {
CLK_PLL(LOONGSON2_NODE_PLL, "pll_node", 0, 16, 8, 8, 6),
CLK_PLL(LOONGSON2_DDR_PLL, "pll_ddr", 0x8, 16, 8, 8, 6),
@@ -230,20 +298,26 @@ static const struct clk_ops loongson2_pll_recalc_ops = {
static unsigned long loongson2_freqscale_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
- u64 val, mult;
+ u64 val, scale;
+ u32 mode = 0;
struct loongson2_clk_data *clk = to_loongson2_clk(hw);
val = readq(clk->reg);
- mult = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+ scale = loongson2_rate_part(val, clk->div_shift, clk->div_width) + 1;
+
+ if (clk->bit_idx)
+ mode = val & BIT(clk->bit_idx - 1);
- return div_u64((u64)parent_rate * mult, 8);
+ return mode == 0 ? div_u64((u64)parent_rate * scale, 8) :
+ div_u64((u64)parent_rate, scale);
}
static const struct clk_ops loongson2_freqscale_recalc_ops = {
.recalc_rate = loongson2_freqscale_recalc_rate,
};
-static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
+static struct clk_hw *loongson2_clk_register(const char *parent,
+ struct loongson2_clk_provider *clp,
const struct loongson2_clk_board_info *cld,
const struct clk_ops *ops)
{
@@ -260,17 +334,14 @@ static struct clk_hw *loongson2_clk_register(struct loongson2_clk_provider *clp,
init.ops = ops;
init.flags = 0;
init.num_parents = 1;
-
- if (!cld->parent_name)
- init.parent_data = pdata;
- else
- init.parent_names = &cld->parent_name;
+ init.parent_names = &parent;
clk->reg = clp->base + cld->reg_offset;
clk->div_shift = cld->div_shift;
clk->div_width = cld->div_width;
clk->mult_shift = cld->mult_shift;
clk->mult_width = cld->mult_width;
+ clk->bit_idx = cld->bit_idx;
clk->hw.init = &init;
hw = &clk->hw;
@@ -288,11 +359,17 @@ static int loongson2_clk_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct loongson2_clk_provider *clp;
const struct loongson2_clk_board_info *p, *data;
+ const char *refclk_name, *parent_name;
data = device_get_match_data(dev);
if (!data)
return -EINVAL;
+ refclk_name = of_clk_get_parent_name(dev->of_node, 0);
+ if (IS_ERR(refclk_name))
+ return dev_err_probe(dev, PTR_ERR(refclk_name),
+ "failed to get refclk name\n");
+
for (p = data; p->name; p++)
clks_num = max(clks_num, p->id + 1);
@@ -314,32 +391,36 @@ static int loongson2_clk_probe(struct platform_device *pdev)
for (i = 0; i < clks_num; i++) {
p = &data[i];
+ parent_name = p->parent_name ? p->parent_name : refclk_name;
+
switch (p->type) {
case CLK_TYPE_PLL:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_pll_recalc_ops);
break;
case CLK_TYPE_SCALE:
- hw = loongson2_clk_register(clp, p,
+ hw = loongson2_clk_register(parent_name, clp, p,
&loongson2_freqscale_recalc_ops);
break;
case CLK_TYPE_DIVIDER:
hw = devm_clk_hw_register_divider(dev, p->name,
- p->parent_name, 0,
+ parent_name, 0,
clp->base + p->reg_offset,
p->div_shift, p->div_width,
- CLK_DIVIDER_ONE_BASED,
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
&clp->clk_lock);
break;
case CLK_TYPE_GATE:
- hw = devm_clk_hw_register_gate(dev, p->name, p->parent_name, 0,
+ hw = devm_clk_hw_register_gate(dev, p->name, parent_name,
+ p->flags,
clp->base + p->reg_offset,
p->bit_idx, 0,
&clp->clk_lock);
break;
case CLK_TYPE_FIXED:
- hw = devm_clk_hw_register_fixed_rate_parent_data(dev, p->name, pdata,
- 0, p->fixed_rate);
+ hw = devm_clk_hw_register_fixed_rate(dev, p->name, parent_name,
+ 0, p->fixed_rate);
break;
default:
return dev_err_probe(dev, -EINVAL, "Invalid clk type\n");
@@ -357,6 +438,7 @@ static int loongson2_clk_probe(struct platform_device *pdev)
}
static const struct of_device_id loongson2_clk_match_table[] = {
+ { .compatible = "loongson,ls2k0300-clk", .data = &ls2k0300_clks },
{ .compatible = "loongson,ls2k0500-clk", .data = &ls2k0500_clks },
{ .compatible = "loongson,ls2k-clk", .data = &ls2k1000_clks },
{ .compatible = "loongson,ls2k2000-clk", .data = &ls2k2000_clks },
diff --git a/drivers/clk/clk-max9485.c b/drivers/clk/clk-max9485.c
index be9020b6c789..0515e3e41162 100644
--- a/drivers/clk/clk-max9485.c
+++ b/drivers/clk/clk-max9485.c
@@ -159,29 +159,32 @@ static unsigned long max9485_clkout_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int max9485_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
const struct max9485_rate *curr, *prev = NULL;
for (curr = max9485_rates; curr->out != 0; curr++) {
/* Exact matches */
- if (curr->out == rate)
- return rate;
+ if (curr->out == req->rate)
+ return 0;
/*
* Find the first entry that has a frequency higher than the
* requested one.
*/
- if (curr->out > rate) {
+ if (curr->out > req->rate) {
unsigned int mid;
/*
* If this is the first entry, clamp the value to the
* lowest possible frequency.
*/
- if (!prev)
- return curr->out;
+ if (!prev) {
+ req->rate = curr->out;
+
+ return 0;
+ }
/*
* Otherwise, determine whether the previous entry or
@@ -189,14 +192,18 @@ static long max9485_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
*/
mid = prev->out + ((curr->out - prev->out) / 2);
- return (mid > rate) ? prev->out : curr->out;
+ req->rate = mid > req->rate ? prev->out : curr->out;
+
+ return 0;
}
prev = curr;
}
/* If the last entry was still too high, clamp the value */
- return prev->out;
+ req->rate = prev->out;
+
+ return 0;
}
struct max9485_clk {
@@ -221,7 +228,7 @@ static const struct max9485_clk max9485_clks[MAX9485_NUM_CLKS] = {
.parent_index = -1,
.ops = {
.set_rate = max9485_clkout_set_rate,
- .round_rate = max9485_clkout_round_rate,
+ .determine_rate = max9485_clkout_determine_rate,
.recalc_rate = max9485_clkout_recalc_rate,
},
},
diff --git a/drivers/clk/clk-milbeaut.c b/drivers/clk/clk-milbeaut.c
index 18c20aff45f7..b4f9b7143eaa 100644
--- a/drivers/clk/clk-milbeaut.c
+++ b/drivers/clk/clk-milbeaut.c
@@ -386,8 +386,8 @@ static unsigned long m10v_clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int m10v_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct m10v_clk_divider *divider = to_m10v_div(hw);
@@ -398,13 +398,19 @@ static long m10v_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(divider->reg) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -450,7 +456,7 @@ static int m10v_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops m10v_clk_divider_ops = {
.recalc_rate = m10v_clk_divider_recalc_rate,
- .round_rate = m10v_clk_divider_round_rate,
+ .determine_rate = m10v_clk_divider_determine_rate,
.set_rate = m10v_clk_divider_set_rate,
};
diff --git a/drivers/clk/clk-multiplier.c b/drivers/clk/clk-multiplier.c
index e507aa958da9..6f2955d408b6 100644
--- a/drivers/clk/clk-multiplier.c
+++ b/drivers/clk/clk-multiplier.c
@@ -112,14 +112,16 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return bestmult;
}
-static long clk_multiplier_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_multiplier_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_multiplier *mult = to_clk_multiplier(hw);
- unsigned long factor = __bestmult(hw, rate, parent_rate,
+ unsigned long factor = __bestmult(hw, req->rate, &req->best_parent_rate,
mult->width, mult->flags);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -150,7 +152,7 @@ static int clk_multiplier_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_multiplier_ops = {
.recalc_rate = clk_multiplier_recalc_rate,
- .round_rate = clk_multiplier_round_rate,
+ .determine_rate = clk_multiplier_determine_rate,
.set_rate = clk_multiplier_set_rate,
};
EXPORT_SYMBOL_GPL(clk_multiplier_ops);
diff --git a/drivers/clk/clk-rp1.c b/drivers/clk/clk-rp1.c
index afff90d48734..fd144755b879 100644
--- a/drivers/clk/clk-rp1.c
+++ b/drivers/clk/clk-rp1.c
@@ -368,6 +368,11 @@ struct rp1_clk_desc {
struct clk_divider div;
};
+static struct rp1_clk_desc *clk_audio_core;
+static struct rp1_clk_desc *clk_audio;
+static struct rp1_clk_desc *clk_i2s;
+static struct clk_hw *clk_xosc;
+
static inline
void clockman_write(struct rp1_clockman *clockman, u32 reg, u32 val)
{
@@ -475,7 +480,6 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
struct rp1_clk_desc *pll_core = container_of(hw, struct rp1_clk_desc, hw);
struct rp1_clockman *clockman = pll_core->clockman;
const struct rp1_pll_core_data *data = pll_core->data;
- unsigned long calc_rate;
u32 fbdiv_int, fbdiv_frac;
/* Disable dividers to start with. */
@@ -484,8 +488,8 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
clockman_write(clockman, data->fbdiv_frac_reg, 0);
spin_unlock(&clockman->regs_lock);
- calc_rate = get_pll_core_divider(hw, rate, parent_rate,
- &fbdiv_int, &fbdiv_frac);
+ get_pll_core_divider(hw, rate, parent_rate,
+ &fbdiv_int, &fbdiv_frac);
spin_lock(&clockman->regs_lock);
clockman_write(clockman, data->pwr_reg, fbdiv_frac ? 0 : PLL_PWR_DSMPD);
@@ -497,8 +501,6 @@ static int rp1_pll_core_set_rate(struct clk_hw *hw,
if (WARN_ON_ONCE(parent_rate > (rate / 16)))
return -ERANGE;
- pll_core->cached_rate = calc_rate;
-
spin_lock(&clockman->regs_lock);
/* Don't need to divide ref unless parent_rate > (output freq / 16) */
clockman_write(clockman, data->cs_reg,
@@ -530,13 +532,16 @@ static unsigned long rp1_pll_core_recalc_rate(struct clk_hw *hw,
return calc_rate;
}
-static long rp1_pll_core_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_core_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv_int, fbdiv_frac;
- return get_pll_core_divider(hw, rate, *parent_rate,
- &fbdiv_int, &fbdiv_frac);
+ req->rate = get_pll_core_divider(hw, req->rate, req->best_parent_rate,
+ &fbdiv_int,
+ &fbdiv_frac);
+
+ return 0;
}
static void get_pll_prim_dividers(unsigned long rate, unsigned long parent_rate,
@@ -614,14 +619,20 @@ static unsigned long rp1_pll_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, prim_div1 * prim_div2);
}
-static long rp1_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
u32 div1, div2;
- get_pll_prim_dividers(rate, *parent_rate, &div1, &div2);
+ if (hw == clk_audio_hw && clk_audio->cached_rate == req->rate)
+ req->best_parent_rate = clk_audio_core->cached_rate;
- return DIV_ROUND_CLOSEST(*parent_rate, div1 * div2);
+ get_pll_prim_dividers(req->rate, req->best_parent_rate, &div1, &div2);
+
+ req->rate = DIV_ROUND_CLOSEST(req->best_parent_rate, div1 * div2);
+
+ return 0;
}
static int rp1_pll_ph_is_on(struct clk_hw *hw)
@@ -671,13 +682,15 @@ static unsigned long rp1_pll_ph_recalc_rate(struct clk_hw *hw,
return parent_rate / data->fixed_divider;
}
-static long rp1_pll_ph_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_ph_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct rp1_clk_desc *pll_ph = container_of(hw, struct rp1_clk_desc, hw);
const struct rp1_pll_ph_data *data = pll_ph->data;
- return *parent_rate / data->fixed_divider;
+ req->rate = req->best_parent_rate / data->fixed_divider;
+
+ return 0;
}
static int rp1_pll_divider_is_on(struct clk_hw *hw)
@@ -754,11 +767,12 @@ static unsigned long rp1_pll_divider_recalc_rate(struct clk_hw *hw,
return clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long rp1_pll_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int rp1_pll_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return clk_divider_ops.round_rate(hw, rate, parent_rate);
+ req->rate = clk_divider_ops.determine_rate(hw, req);
+
+ return 0;
}
static int rp1_clock_is_on(struct clk_hw *hw)
@@ -964,6 +978,59 @@ static int rp1_clock_set_rate(struct clk_hw *hw, unsigned long rate,
return rp1_clock_set_rate_and_parent(hw, rate, parent_rate, 0xff);
}
+static unsigned long calc_core_pll_rate(struct clk_hw *pll_hw,
+ unsigned long target_rate,
+ int *pdiv_prim, int *pdiv_clk)
+{
+ static const int prim_divs[] = {
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 16,
+ 18, 20, 21, 24, 25, 28, 30, 35, 36, 42, 49,
+ };
+ const unsigned long xosc_rate = clk_hw_get_rate(clk_xosc);
+ const unsigned long core_min = xosc_rate * 16;
+ const unsigned long core_max = 2400000000;
+ int best_div_prim = 1, best_div_clk = 1;
+ unsigned long best_rate = core_max + 1;
+ unsigned long core_rate = 0;
+ int div_int, div_frac;
+ u64 div;
+ int i;
+
+ /* Given the target rate, choose a set of divisors/multipliers */
+ for (i = 0; i < ARRAY_SIZE(prim_divs); i++) {
+ int div_prim = prim_divs[i];
+ int div_clk;
+
+ for (div_clk = 1; div_clk <= 256; div_clk++) {
+ core_rate = target_rate * div_clk * div_prim;
+ if (core_rate >= core_min) {
+ if (core_rate < best_rate) {
+ best_rate = core_rate;
+ best_div_prim = div_prim;
+ best_div_clk = div_clk;
+ }
+ break;
+ }
+ }
+ }
+
+ if (best_rate < core_max) {
+ div = ((best_rate << 24) + xosc_rate / 2) / xosc_rate;
+ div_int = div >> 24;
+ div_frac = div % (1 << 24);
+ core_rate = (xosc_rate * ((div_int << 24) + div_frac) + (1 << 23)) >> 24;
+ } else {
+ core_rate = 0;
+ }
+
+ if (pdiv_prim)
+ *pdiv_prim = best_div_prim;
+ if (pdiv_clk)
+ *pdiv_clk = best_div_clk;
+
+ return core_rate;
+}
+
static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
int parent_idx,
unsigned long rate,
@@ -972,12 +1039,35 @@ static void rp1_clock_choose_div_and_prate(struct clk_hw *hw,
{
struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
const struct rp1_clock_data *data = clock->data;
+ struct clk_hw *clk_audio_hw = &clk_audio->hw;
+ struct clk_hw *clk_i2s_hw = &clk_i2s->hw;
struct clk_hw *parent;
u32 div;
u64 tmp;
parent = clk_hw_get_parent_by_index(hw, parent_idx);
+ if (hw == clk_i2s_hw && clk_i2s->cached_rate == rate && parent == clk_audio_hw) {
+ *prate = clk_audio->cached_rate;
+ *calc_rate = rate;
+ return;
+ }
+
+ if (hw == clk_i2s_hw && parent == clk_audio_hw) {
+ unsigned long core_rate, audio_rate, i2s_rate;
+ int div_prim, div_clk;
+
+ core_rate = calc_core_pll_rate(parent, rate, &div_prim, &div_clk);
+ audio_rate = DIV_ROUND_CLOSEST(core_rate, div_prim);
+ i2s_rate = DIV_ROUND_CLOSEST(audio_rate, div_clk);
+ clk_audio_core->cached_rate = core_rate;
+ clk_audio->cached_rate = audio_rate;
+ clk_i2s->cached_rate = i2s_rate;
+ *prate = audio_rate;
+ *calc_rate = i2s_rate;
+ return;
+ }
+
*prate = clk_hw_get_rate(parent);
div = rp1_clock_choose_div(rate, *prate, data);
@@ -1062,19 +1152,47 @@ static int rp1_clock_determine_rate(struct clk_hw *hw,
return 0;
}
+static int rp1_varsrc_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ /*
+ * "varsrc" exists purely to let clock dividers know the frequency
+ * of an externally-managed clock source (such as MIPI DSI byte-clock)
+ * which may change at run-time as a side-effect of some other driver.
+ */
+ clock->cached_rate = rate;
+ return 0;
+}
+
+static unsigned long rp1_varsrc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rp1_clk_desc *clock = container_of(hw, struct rp1_clk_desc, hw);
+
+ return clock->cached_rate;
+}
+
+static int rp1_varsrc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return 0;
+}
+
static const struct clk_ops rp1_pll_core_ops = {
.is_prepared = rp1_pll_core_is_on,
.prepare = rp1_pll_core_on,
.unprepare = rp1_pll_core_off,
.set_rate = rp1_pll_core_set_rate,
.recalc_rate = rp1_pll_core_recalc_rate,
- .round_rate = rp1_pll_core_round_rate,
+ .determine_rate = rp1_pll_core_determine_rate,
};
static const struct clk_ops rp1_pll_ops = {
.set_rate = rp1_pll_set_rate,
.recalc_rate = rp1_pll_recalc_rate,
- .round_rate = rp1_pll_round_rate,
+ .determine_rate = rp1_pll_determine_rate,
};
static const struct clk_ops rp1_pll_ph_ops = {
@@ -1082,7 +1200,7 @@ static const struct clk_ops rp1_pll_ph_ops = {
.prepare = rp1_pll_ph_on,
.unprepare = rp1_pll_ph_off,
.recalc_rate = rp1_pll_ph_recalc_rate,
- .round_rate = rp1_pll_ph_round_rate,
+ .determine_rate = rp1_pll_ph_determine_rate,
};
static const struct clk_ops rp1_pll_divider_ops = {
@@ -1091,7 +1209,7 @@ static const struct clk_ops rp1_pll_divider_ops = {
.unprepare = rp1_pll_divider_off,
.set_rate = rp1_pll_divider_set_rate,
.recalc_rate = rp1_pll_divider_recalc_rate,
- .round_rate = rp1_pll_divider_round_rate,
+ .determine_rate = rp1_pll_divider_determine_rate,
};
static const struct clk_ops rp1_clk_ops = {
@@ -1106,6 +1224,12 @@ static const struct clk_ops rp1_clk_ops = {
.determine_rate = rp1_clock_determine_rate,
};
+static const struct clk_ops rp1_varsrc_ops = {
+ .set_rate = rp1_varsrc_set_rate,
+ .recalc_rate = rp1_varsrc_recalc_rate,
+ .determine_rate = rp1_varsrc_determine_rate,
+};
+
static struct clk_hw *rp1_register_pll(struct rp1_clockman *clockman,
struct rp1_clk_desc *desc)
{
@@ -1241,6 +1365,36 @@ static struct rp1_clk_desc pll_sys_desc = REGISTER_PLL(
)
);
+static struct rp1_clk_desc pll_audio_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_PRIM,
+ .fc0_src = FC_NUM(4, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_video_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_PRIM,
+ .fc0_src = FC_NUM(3, 2),
+ )
+);
+
static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
.hw.init = CLK_HW_INIT_PARENTS_DATA(
"pll_sys_sec",
@@ -1256,16 +1410,42 @@ static struct rp1_clk_desc pll_sys_sec_desc = REGISTER_PLL_DIV(
)
);
+static struct rp1_clk_desc pll_video_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_VIDEO_SEC,
+ .fc0_src = FC_NUM(5, 3),
+ )
+);
+
+static const struct clk_parent_data clk_eth_tsu_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
.hw.init = CLK_HW_INIT_PARENTS_DATA(
"clk_eth_tsu",
- (const struct clk_parent_data[]) { { .index = 0 } },
+ clk_eth_tsu_parents,
&rp1_clk_ops,
0
),
CLK_DATA(rp1_clock_data,
.num_std_parents = 0,
- .num_aux_parents = 1,
+ .num_aux_parents = 8,
.ctrl_reg = CLK_ETH_TSU_CTRL,
.div_int_reg = CLK_ETH_TSU_DIV_INT,
.sel_reg = CLK_ETH_TSU_SEL,
@@ -1278,6 +1458,7 @@ static struct rp1_clk_desc clk_eth_tsu_desc = REGISTER_CLK(
static const struct clk_parent_data clk_eth_parents[] = {
{ .hw = &pll_sys_sec_desc.div.hw },
{ .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
};
static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
@@ -1289,7 +1470,7 @@ static struct rp1_clk_desc clk_eth_desc = REGISTER_CLK(
),
CLK_DATA(rp1_clock_data,
.num_std_parents = 0,
- .num_aux_parents = 2,
+ .num_aux_parents = 3,
.ctrl_reg = CLK_ETH_CTRL,
.div_int_reg = CLK_ETH_DIV_INT,
.sel_reg = CLK_ETH_SEL,
@@ -1342,6 +1523,756 @@ static struct rp1_clk_desc pll_sys_pri_ph_desc = REGISTER_PLL(
)
);
+static struct rp1_clk_desc pll_audio_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_AUDIO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
+static struct rp1_clk_desc pll_video_pri_ph_desc = REGISTER_PLL(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_video_pri_ph",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_video_desc.hw }
+ },
+ &rp1_pll_ph_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_ph_data,
+ .ph_reg = PLL_VIDEO_PRIM,
+ .fixed_divider = 2,
+ .phase = RP1_PLL_PHASE_0,
+ .fc0_src = FC_NUM(4, 3),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_sec_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_sec",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_SEC,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc pll_audio_tern_desc = REGISTER_PLL_DIV(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "pll_audio_tern",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_audio_core_desc.hw }
+ },
+ &rp1_pll_divider_ops,
+ 0
+ ),
+ CLK_DATA(rp1_pll_data,
+ .ctrl_reg = PLL_AUDIO_TERN,
+ .fc0_src = FC_NUM(6, 2),
+ )
+);
+
+static struct rp1_clk_desc clk_slow_sys_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_slow_sys",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ CLK_IS_CRITICAL
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ .ctrl_reg = CLK_SLOW_SYS_CTRL,
+ .div_int_reg = CLK_SLOW_SYS_DIV_INT,
+ .sel_reg = CLK_SLOW_SYS_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 4),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static const struct clk_parent_data clk_dma_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_dma_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dma",
+ clk_dma_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_DMA_CTRL,
+ .div_int_reg = CLK_DMA_DIV_INT,
+ .sel_reg = CLK_DMA_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 2),
+ )
+);
+
+static const struct clk_parent_data clk_uart_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_uart_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_uart",
+ clk_uart_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_UART_CTRL,
+ .div_int_reg = CLK_UART_DIV_INT,
+ .sel_reg = CLK_UART_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(6, 7),
+ )
+);
+
+static const struct clk_parent_data clk_pwm0_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm0",
+ clk_pwm0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM0_CTRL,
+ .div_int_reg = CLK_PWM0_DIV_INT,
+ .div_frac_reg = CLK_PWM0_DIV_FRAC,
+ .sel_reg = CLK_PWM0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(0, 5),
+ )
+);
+
+static const struct clk_parent_data clk_pwm1_parents[] = {
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_pwm1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_pwm1",
+ clk_pwm1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_PWM1_CTRL,
+ .div_int_reg = CLK_PWM1_DIV_INT,
+ .div_frac_reg = CLK_PWM1_DIV_FRAC,
+ .sel_reg = CLK_PWM1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(1, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_in_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_in_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_in",
+ clk_audio_in_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 5,
+ .ctrl_reg = CLK_AUDIO_IN_CTRL,
+ .div_int_reg = CLK_AUDIO_IN_DIV_INT,
+ .sel_reg = CLK_AUDIO_IN_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 76800 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(2, 5),
+ )
+);
+
+static const struct clk_parent_data clk_audio_out_parents[] = {
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = 0 },
+};
+
+static struct rp1_clk_desc clk_audio_out_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_audio_out",
+ clk_audio_out_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 4,
+ .ctrl_reg = CLK_AUDIO_OUT_CTRL,
+ .div_int_reg = CLK_AUDIO_OUT_DIV_INT,
+ .sel_reg = CLK_AUDIO_OUT_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 153600 * HZ_PER_KHZ,
+ .fc0_src = FC_NUM(3, 5),
+ )
+);
+
+static const struct clk_parent_data clk_i2s_parents[] = {
+ { .index = 0 },
+ { .hw = &pll_audio_desc.hw },
+ { .hw = &pll_audio_sec_desc.hw },
+};
+
+static struct rp1_clk_desc clk_i2s_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_i2s",
+ clk_i2s_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_PARENT
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 3,
+ .ctrl_reg = CLK_I2S_CTRL,
+ .div_int_reg = CLK_I2S_DIV_INT,
+ .sel_reg = CLK_I2S_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi0_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI0_CFG_CTRL,
+ .div_int_reg = CLK_MIPI0_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI0_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_mipi1_cfg_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_cfg",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_MIPI1_CFG_CTRL,
+ .div_int_reg = CLK_MIPI1_CFG_DIV_INT,
+ .sel_reg = CLK_MIPI1_CFG_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 6),
+ .clk_src_mask = 0x1,
+ )
+);
+
+static struct rp1_clk_desc clk_adc_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_adc",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_ADC_CTRL,
+ .div_int_reg = CLK_ADC_DIV_INT,
+ .sel_reg = CLK_ADC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 5),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_timer_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_timer",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_TIMER_CTRL,
+ .div_int_reg = CLK_SDIO_TIMER_DIV_INT,
+ .sel_reg = CLK_SDIO_TIMER_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 50 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 4),
+ )
+);
+
+static struct rp1_clk_desc clk_sdio_alt_src_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_sdio_alt_src",
+ (const struct clk_parent_data[]) {
+ { .hw = &pll_sys_desc.hw }
+ },
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 1,
+ .ctrl_reg = CLK_SDIO_ALT_SRC_CTRL,
+ .div_int_reg = CLK_SDIO_ALT_SRC_DIV_INT,
+ .sel_reg = CLK_SDIO_ALT_SRC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 4),
+ )
+);
+
+static const struct clk_parent_data clk_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_dpi",
+ clk_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DPI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_DPI_DIV_INT,
+ .sel_reg = VIDEO_CLK_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp0_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_i2s_desc.hw },
+ { .hw = &clk_adc_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp0_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp0",
+ clk_gp0_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(0),
+ .ctrl_reg = CLK_GP0_CTRL,
+ .div_int_reg = CLK_GP0_DIV_INT,
+ .div_frac_reg = CLK_GP0_DIV_FRAC,
+ .sel_reg = CLK_GP0_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp1_parents[] = {
+ { .hw = &clk_sdio_timer_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_adc_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp1_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp1",
+ clk_gp1_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(1),
+ .ctrl_reg = CLK_GP1_CTRL,
+ .div_int_reg = CLK_GP1_DIV_INT,
+ .div_frac_reg = CLK_GP1_DIV_FRAC,
+ .sel_reg = CLK_GP1_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(1, 1),
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi0_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi0_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static struct rp1_clk_desc clksrc_mipi1_dsi_byteclk_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clksrc_mipi1_dsi_byteclk",
+ (const struct clk_parent_data[]) { { .index = 0 } },
+ &rp1_varsrc_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 1,
+ .num_aux_parents = 0,
+ )
+);
+
+static const struct clk_parent_data clk_mipi0_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi0_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi0_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi0_dpi",
+ clk_mipi0_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI0_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI0_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI0_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI0_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 6),
+ )
+);
+
+static const struct clk_parent_data clk_mipi1_dpi_parents[] = {
+ { .hw = &pll_sys_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clksrc_mipi1_dsi_byteclk_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_mipi1_dpi_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_mipi1_dpi",
+ clk_mipi1_dpi_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let DSI driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_MIPI1_DPI_CTRL,
+ .div_int_reg = VIDEO_CLK_MIPI1_DPI_DIV_INT,
+ .div_frac_reg = VIDEO_CLK_MIPI1_DPI_DIV_FRAC,
+ .sel_reg = VIDEO_CLK_MIPI1_DPI_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 200 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp2_parents[] = {
+ { .hw = &clk_sdio_alt_src_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_sys_sec_desc.hw },
+ { .index = -1 },
+ { .hw = &pll_video_desc.hw },
+ { .hw = &clk_audio_in_desc.hw },
+ { .hw = &clk_dpi_desc.hw },
+ { .hw = &clk_pwm0_desc.hw },
+ { .hw = &clk_pwm1_desc.hw },
+ { .hw = &clk_mipi0_dpi_desc.hw },
+ { .hw = &clk_mipi1_cfg_desc.hw },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp2_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp2",
+ clk_gp2_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(2),
+ .ctrl_reg = CLK_GP2_CTRL,
+ .div_int_reg = CLK_GP2_DIV_INT,
+ .div_frac_reg = CLK_GP2_DIV_FRAC,
+ .sel_reg = CLK_GP2_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(2, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp3_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_pri_ph_desc.hw },
+ { .hw = &clk_audio_out_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi1_dpi_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp3_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp3",
+ clk_gp3_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(3),
+ .ctrl_reg = CLK_GP3_CTRL,
+ .div_int_reg = CLK_GP3_DIV_INT,
+ .div_frac_reg = CLK_GP3_DIV_FRAC,
+ .sel_reg = CLK_GP3_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(3, 1),
+ )
+);
+
+static const struct clk_parent_data clk_gp4_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_mipi0_cfg_desc.hw },
+ { .hw = &clk_uart_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &clk_sys_desc.hw },
+};
+
+static struct rp1_clk_desc clk_gp4_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp4",
+ clk_gp4_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(4),
+ .ctrl_reg = CLK_GP4_CTRL,
+ .div_int_reg = CLK_GP4_DIV_INT,
+ .div_frac_reg = CLK_GP4_DIV_FRAC,
+ .sel_reg = CLK_GP4_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(4, 1),
+ )
+);
+
+static const struct clk_parent_data clk_vec_parents[] = {
+ { .hw = &pll_sys_pri_ph_desc.hw },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &pll_video_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_vec_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_vec",
+ clk_vec_parents,
+ &rp1_clk_ops,
+ CLK_SET_RATE_NO_REPARENT /* Let VEC driver set parent */
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 8,
+ .ctrl_reg = VIDEO_CLK_VEC_CTRL,
+ .div_int_reg = VIDEO_CLK_VEC_DIV_INT,
+ .sel_reg = VIDEO_CLK_VEC_SEL,
+ .div_int_max = DIV_INT_8BIT_MAX,
+ .max_freq = 108 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(0, 6),
+ )
+);
+
+static const struct clk_parent_data clk_gp5_parents[] = {
+ { .index = 0 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .hw = &pll_video_sec_desc.hw },
+ { .hw = &clk_eth_tsu_desc.hw },
+ { .index = -1 },
+ { .hw = &clk_vec_desc.hw },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+ { .index = -1 },
+};
+
+static struct rp1_clk_desc clk_gp5_desc = REGISTER_CLK(
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(
+ "clk_gp5",
+ clk_gp5_parents,
+ &rp1_clk_ops,
+ 0
+ ),
+ CLK_DATA(rp1_clock_data,
+ .num_std_parents = 0,
+ .num_aux_parents = 16,
+ .oe_mask = BIT(5),
+ .ctrl_reg = CLK_GP5_CTRL,
+ .div_int_reg = CLK_GP5_DIV_INT,
+ .div_frac_reg = CLK_GP5_DIV_FRAC,
+ .sel_reg = CLK_GP5_SEL,
+ .div_int_max = DIV_INT_16BIT_MAX,
+ .max_freq = 100 * HZ_PER_MHZ,
+ .fc0_src = FC_NUM(5, 1),
+ )
+);
+
static struct rp1_clk_desc *const clk_desc_array[] = {
[RP1_PLL_SYS_CORE] = &pll_sys_core_desc,
[RP1_PLL_AUDIO_CORE] = &pll_audio_core_desc,
@@ -1352,6 +2283,38 @@ static struct rp1_clk_desc *const clk_desc_array[] = {
[RP1_CLK_SYS] = &clk_sys_desc,
[RP1_PLL_SYS_PRI_PH] = &pll_sys_pri_ph_desc,
[RP1_PLL_SYS_SEC] = &pll_sys_sec_desc,
+ [RP1_PLL_AUDIO] = &pll_audio_desc,
+ [RP1_PLL_VIDEO] = &pll_video_desc,
+ [RP1_PLL_AUDIO_PRI_PH] = &pll_audio_pri_ph_desc,
+ [RP1_PLL_VIDEO_PRI_PH] = &pll_video_pri_ph_desc,
+ [RP1_PLL_AUDIO_SEC] = &pll_audio_sec_desc,
+ [RP1_PLL_VIDEO_SEC] = &pll_video_sec_desc,
+ [RP1_PLL_AUDIO_TERN] = &pll_audio_tern_desc,
+ [RP1_CLK_SLOW_SYS] = &clk_slow_sys_desc,
+ [RP1_CLK_DMA] = &clk_dma_desc,
+ [RP1_CLK_UART] = &clk_uart_desc,
+ [RP1_CLK_PWM0] = &clk_pwm0_desc,
+ [RP1_CLK_PWM1] = &clk_pwm1_desc,
+ [RP1_CLK_AUDIO_IN] = &clk_audio_in_desc,
+ [RP1_CLK_AUDIO_OUT] = &clk_audio_out_desc,
+ [RP1_CLK_I2S] = &clk_i2s_desc,
+ [RP1_CLK_MIPI0_CFG] = &clk_mipi0_cfg_desc,
+ [RP1_CLK_MIPI1_CFG] = &clk_mipi1_cfg_desc,
+ [RP1_CLK_ADC] = &clk_adc_desc,
+ [RP1_CLK_SDIO_TIMER] = &clk_sdio_timer_desc,
+ [RP1_CLK_SDIO_ALT_SRC] = &clk_sdio_alt_src_desc,
+ [RP1_CLK_GP0] = &clk_gp0_desc,
+ [RP1_CLK_GP1] = &clk_gp1_desc,
+ [RP1_CLK_GP2] = &clk_gp2_desc,
+ [RP1_CLK_GP3] = &clk_gp3_desc,
+ [RP1_CLK_GP4] = &clk_gp4_desc,
+ [RP1_CLK_GP5] = &clk_gp5_desc,
+ [RP1_CLK_VEC] = &clk_vec_desc,
+ [RP1_CLK_DPI] = &clk_dpi_desc,
+ [RP1_CLK_MIPI0_DPI] = &clk_mipi0_dpi_desc,
+ [RP1_CLK_MIPI1_DPI] = &clk_mipi1_dpi_desc,
+ [RP1_CLK_MIPI0_DSI_BYTECLOCK] = &clksrc_mipi0_dsi_byteclk_desc,
+ [RP1_CLK_MIPI1_DSI_BYTECLOCK] = &clksrc_mipi1_dsi_byteclk_desc,
};
static const struct regmap_range rp1_reg_ranges[] = {
@@ -1466,6 +2429,11 @@ static int rp1_clk_probe(struct platform_device *pdev)
hws[i] = desc->clk_register(clockman, desc);
}
+ clk_audio_core = &pll_audio_core_desc;
+ clk_audio = &pll_audio_desc;
+ clk_i2s = &clk_i2s_desc;
+ clk_xosc = clk_hw_get_parent_by_index(&clk_i2s->hw, 0);
+
platform_set_drvdata(pdev, clockman);
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
diff --git a/drivers/clk/clk-rpmi.c b/drivers/clk/clk-rpmi.c
new file mode 100644
index 000000000000..921296aafa68
--- /dev/null
+++ b/drivers/clk/clk-rpmi.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V MPXY Based Clock Driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/wordpart.h>
+
+#define RPMI_CLK_DISCRETE_MAX_NUM_RATES 16
+#define RPMI_CLK_NAME_LEN 16
+
+#define to_rpmi_clk(clk) container_of(clk, struct rpmi_clk, hw)
+
+enum rpmi_clk_config {
+ RPMI_CLK_DISABLE = 0,
+ RPMI_CLK_ENABLE = 1,
+ RPMI_CLK_CONFIG_MAX_IDX
+};
+
+#define RPMI_CLK_TYPE_MASK GENMASK(1, 0)
+enum rpmi_clk_type {
+ RPMI_CLK_DISCRETE = 0,
+ RPMI_CLK_LINEAR = 1,
+ RPMI_CLK_TYPE_MAX_IDX
+};
+
+struct rpmi_clk_context {
+ struct device *dev;
+ struct mbox_chan *chan;
+ struct mbox_client client;
+ u32 max_msg_data_size;
+};
+
+/*
+ * rpmi_clk_rates represents the rates format
+ * as specified by the RPMI specification.
+ * No other data format (e.g., struct linear_range)
+ * is required to avoid to and from conversion.
+ */
+union rpmi_clk_rates {
+ u64 discrete[RPMI_CLK_DISCRETE_MAX_NUM_RATES];
+ struct {
+ u64 min;
+ u64 max;
+ u64 step;
+ } linear;
+};
+
+struct rpmi_clk {
+ struct rpmi_clk_context *context;
+ u32 id;
+ u32 num_rates;
+ u32 transition_latency;
+ enum rpmi_clk_type type;
+ union rpmi_clk_rates *rates;
+ char name[RPMI_CLK_NAME_LEN];
+ struct clk_hw hw;
+};
+
+struct rpmi_clk_rate_discrete {
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_clk_rate_linear {
+ __le32 min_lo;
+ __le32 min_hi;
+ __le32 max_lo;
+ __le32 max_hi;
+ __le32 step_lo;
+ __le32 step_hi;
+};
+
+struct rpmi_get_num_clocks_rx {
+ __le32 status;
+ __le32 num_clocks;
+};
+
+struct rpmi_get_attrs_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_attrs_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 num_rates;
+ __le32 transition_latency;
+ char name[RPMI_CLK_NAME_LEN];
+};
+
+struct rpmi_get_supp_rates_tx {
+ __le32 clkid;
+ __le32 clk_rate_idx;
+};
+
+struct rpmi_get_supp_rates_rx {
+ __le32 status;
+ __le32 flags;
+ __le32 remaining;
+ __le32 returned;
+ __le32 rates[];
+};
+
+struct rpmi_get_rate_tx {
+ __le32 clkid;
+};
+
+struct rpmi_get_rate_rx {
+ __le32 status;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_tx {
+ __le32 clkid;
+ __le32 flags;
+ __le32 lo;
+ __le32 hi;
+};
+
+struct rpmi_set_rate_rx {
+ __le32 status;
+};
+
+struct rpmi_set_config_tx {
+ __le32 clkid;
+ __le32 config;
+};
+
+struct rpmi_set_config_rx {
+ __le32 status;
+};
+
+static inline u64 rpmi_clkrate_u64(u32 __hi, u32 __lo)
+{
+ return (((u64)(__hi) << 32) | (u32)(__lo));
+}
+
+static u32 rpmi_clk_get_num_clocks(struct rpmi_clk_context *context)
+{
+ struct rpmi_get_num_clocks_rx rx, *resp;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_NUM_CLOCKS,
+ NULL, 0, &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return 0;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp || resp->status)
+ return 0;
+
+ return le32_to_cpu(resp->num_clocks);
+}
+
+static int rpmi_clk_get_attrs(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_attrs_tx tx;
+ struct rpmi_get_attrs_rx rx, *resp;
+ u8 format;
+ int ret;
+
+ tx.clkid = cpu_to_le32(clkid);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_ATTRIBUTES,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ rpmi_clk->id = clkid;
+ rpmi_clk->num_rates = le32_to_cpu(resp->num_rates);
+ rpmi_clk->transition_latency = le32_to_cpu(resp->transition_latency);
+ strscpy(rpmi_clk->name, resp->name, RPMI_CLK_NAME_LEN);
+
+ format = le32_to_cpu(resp->flags) & RPMI_CLK_TYPE_MASK;
+ if (format >= RPMI_CLK_TYPE_MAX_IDX)
+ return -EINVAL;
+
+ rpmi_clk->type = format;
+
+ return 0;
+}
+
+static int rpmi_clk_get_supported_rates(u32 clkid, struct rpmi_clk *rpmi_clk)
+{
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_clk_rate_discrete *rate_discrete;
+ struct rpmi_clk_rate_linear *rate_linear;
+ struct rpmi_get_supp_rates_tx tx;
+ struct rpmi_get_supp_rates_rx *resp;
+ struct rpmi_mbox_message msg;
+ size_t clk_rate_idx;
+ int ret, rateidx, j;
+
+ tx.clkid = cpu_to_le32(clkid);
+ tx.clk_rate_idx = 0;
+
+ /*
+ * Make sure we allocate rx buffer sufficient to be accommodate all
+ * the rates sent in one RPMI message.
+ */
+ struct rpmi_get_supp_rates_rx *rx __free(kfree) =
+ kzalloc(context->max_msg_data_size, GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx), rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ rate_discrete = (struct rpmi_clk_rate_discrete *)resp->rates;
+
+ for (rateidx = 0; rateidx < le32_to_cpu(resp->returned); rateidx++) {
+ rpmi_clk->rates->discrete[rateidx] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[rateidx].hi),
+ le32_to_cpu(rate_discrete[rateidx].lo));
+ }
+
+ /*
+ * Keep sending the request message until all
+ * the rates are received.
+ */
+ clk_rate_idx = 0;
+ while (le32_to_cpu(resp->remaining)) {
+ clk_rate_idx += le32_to_cpu(resp->returned);
+ tx.clk_rate_idx = cpu_to_le32(clk_rate_idx);
+
+ rpmi_mbox_init_send_with_response(&msg,
+ RPMI_CLK_SRV_GET_SUPPORTED_RATES,
+ &tx, sizeof(tx),
+ rx, context->max_msg_data_size);
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+ if (!le32_to_cpu(resp->returned))
+ return -EINVAL;
+
+ for (j = 0; j < le32_to_cpu(resp->returned); j++) {
+ if (rateidx >= clk_rate_idx + le32_to_cpu(resp->returned))
+ break;
+ rpmi_clk->rates->discrete[rateidx++] =
+ rpmi_clkrate_u64(le32_to_cpu(rate_discrete[j].hi),
+ le32_to_cpu(rate_discrete[j].lo));
+ }
+ }
+ } else if (rpmi_clk->type == RPMI_CLK_LINEAR) {
+ rate_linear = (struct rpmi_clk_rate_linear *)resp->rates;
+
+ rpmi_clk->rates->linear.min = rpmi_clkrate_u64(le32_to_cpu(rate_linear->min_hi),
+ le32_to_cpu(rate_linear->min_lo));
+ rpmi_clk->rates->linear.max = rpmi_clkrate_u64(le32_to_cpu(rate_linear->max_hi),
+ le32_to_cpu(rate_linear->max_lo));
+ rpmi_clk->rates->linear.step = rpmi_clkrate_u64(le32_to_cpu(rate_linear->step_hi),
+ le32_to_cpu(rate_linear->step_lo));
+ }
+
+ return 0;
+}
+
+static unsigned long rpmi_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_get_rate_tx tx;
+ struct rpmi_get_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return rpmi_clkrate_u64(le32_to_cpu(resp->hi), le32_to_cpu(resp->lo));
+}
+
+static int rpmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ u64 fmin, fmax, ftmp;
+
+ /*
+ * Keep the requested rate if the clock format
+ * is of discrete type. Let the platform which
+ * is actually controlling the clock handle that.
+ */
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE)
+ return 0;
+
+ fmin = rpmi_clk->rates->linear.min;
+ fmax = rpmi_clk->rates->linear.max;
+
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
+ return 0;
+ }
+
+ ftmp = req->rate - fmin;
+ ftmp += rpmi_clk->rates->linear.step - 1;
+ do_div(ftmp, rpmi_clk->rates->linear.step);
+
+ req->rate = ftmp * rpmi_clk->rates->linear.step + fmin;
+
+ return 0;
+}
+
+static int rpmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_rate_tx tx;
+ struct rpmi_set_rate_rx rx, *resp;
+ int ret;
+
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+ tx.lo = cpu_to_le32(lower_32_bits(rate));
+ tx.hi = cpu_to_le32(upper_32_bits(rate));
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_RATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static int rpmi_clk_enable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx, *resp;
+ int ret;
+
+ tx.config = cpu_to_le32(RPMI_CLK_ENABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return ret;
+
+ resp = rpmi_mbox_get_msg_response(&msg);
+ if (!resp)
+ return -EINVAL;
+ if (resp->status)
+ return rpmi_to_linux_error(le32_to_cpu(resp->status));
+
+ return 0;
+}
+
+static void rpmi_clk_disable(struct clk_hw *hw)
+{
+ struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
+ struct rpmi_clk_context *context = rpmi_clk->context;
+ struct rpmi_mbox_message msg;
+ struct rpmi_set_config_tx tx;
+ struct rpmi_set_config_rx rx;
+
+ tx.config = cpu_to_le32(RPMI_CLK_DISABLE);
+ tx.clkid = cpu_to_le32(rpmi_clk->id);
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+
+ rpmi_mbox_send_message(context->chan, &msg);
+}
+
+static const struct clk_ops rpmi_clk_ops = {
+ .recalc_rate = rpmi_clk_recalc_rate,
+ .determine_rate = rpmi_clk_determine_rate,
+ .set_rate = rpmi_clk_set_rate,
+ .prepare = rpmi_clk_enable,
+ .unprepare = rpmi_clk_disable,
+};
+
+static struct clk_hw *rpmi_clk_enumerate(struct rpmi_clk_context *context, u32 clkid)
+{
+ struct device *dev = context->dev;
+ unsigned long min_rate, max_rate;
+ union rpmi_clk_rates *rates;
+ struct rpmi_clk *rpmi_clk;
+ struct clk_init_data init = {};
+ struct clk_hw *clk_hw;
+ int ret;
+
+ rates = devm_kzalloc(dev, sizeof(*rates), GFP_KERNEL);
+ if (!rates)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk = devm_kzalloc(dev, sizeof(*rpmi_clk), GFP_KERNEL);
+ if (!rpmi_clk)
+ return ERR_PTR(-ENOMEM);
+
+ rpmi_clk->context = context;
+ rpmi_clk->rates = rates;
+
+ ret = rpmi_clk_get_attrs(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Failed to get clk-%u attributes\n",
+ clkid);
+
+ ret = rpmi_clk_get_supported_rates(clkid, rpmi_clk);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Get supported rates failed for clk-%u\n",
+ clkid);
+
+ init.flags = CLK_GET_RATE_NOCACHE;
+ init.num_parents = 0;
+ init.ops = &rpmi_clk_ops;
+ init.name = rpmi_clk->name;
+ clk_hw = &rpmi_clk->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(dev, clk_hw);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret,
+ "Unable to register clk-%u\n",
+ clkid);
+
+ if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
+ min_rate = rpmi_clk->rates->discrete[0];
+ max_rate = rpmi_clk->rates->discrete[rpmi_clk->num_rates - 1];
+ } else {
+ min_rate = rpmi_clk->rates->linear.min;
+ max_rate = rpmi_clk->rates->linear.max;
+ }
+
+ clk_hw_set_rate_range(clk_hw, min_rate, max_rate);
+
+ return clk_hw;
+}
+
+static void rpmi_clk_mbox_chan_release(void *data)
+{
+ struct mbox_chan *chan = data;
+
+ mbox_free_channel(chan);
+}
+
+static int rpmi_clk_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned int num_clocks, i;
+ struct clk_hw_onecell_data *clk_data;
+ struct rpmi_clk_context *context;
+ struct rpmi_mbox_message msg;
+ struct clk_hw *hw_ptr;
+ struct device *dev = &pdev->dev;
+
+ context = devm_kzalloc(dev, sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+ context->dev = dev;
+ platform_set_drvdata(pdev, context);
+
+ context->client.dev = context->dev;
+ context->client.rx_callback = NULL;
+ context->client.tx_block = false;
+ context->client.knows_txdone = true;
+ context->client.tx_tout = 0;
+
+ context->chan = mbox_request_channel(&context->client, 0);
+ if (IS_ERR(context->chan))
+ return PTR_ERR(context->chan);
+
+ ret = devm_add_action_or_reset(dev, rpmi_clk_mbox_chan_release, context->chan);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add rpmi mbox channel cleanup\n");
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SPEC_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get spec version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "msg protocol version mismatch, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_ID);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group ID\n");
+ if (msg.attr.value != RPMI_SRVGRP_CLOCK) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group match failed, expected 0x%x, found 0x%x\n",
+ RPMI_SRVGRP_CLOCK, msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_VERSION);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get service group version\n");
+ if (msg.attr.value < RPMI_MKVER(1, 0)) {
+ return dev_err_probe(dev, -EINVAL,
+ "service group version failed, expected 0x%x, found 0x%x\n",
+ RPMI_MKVER(1, 0), msg.attr.value);
+ }
+
+ rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE);
+ ret = rpmi_mbox_send_message(context->chan, &msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get max message data size\n");
+
+ context->max_msg_data_size = msg.attr.value;
+ num_clocks = rpmi_clk_get_num_clocks(context);
+ if (!num_clocks)
+ return dev_err_probe(dev, -ENODEV, "No clocks found\n");
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clocks),
+ GFP_KERNEL);
+ if (!clk_data)
+ return dev_err_probe(dev, -ENOMEM, "No memory for clock data\n");
+ clk_data->num = num_clocks;
+
+ for (i = 0; i < clk_data->num; i++) {
+ hw_ptr = rpmi_clk_enumerate(context, i);
+ if (IS_ERR(hw_ptr)) {
+ return dev_err_probe(dev, PTR_ERR(hw_ptr),
+ "Failed to register clk-%d\n", i);
+ }
+ clk_data->hws[i] = hw_ptr;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register clock HW provider\n");
+
+ return 0;
+}
+
+static const struct of_device_id rpmi_clk_of_match[] = {
+ { .compatible = "riscv,rpmi-clock" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmi_clk_of_match);
+
+static struct platform_driver rpmi_clk_driver = {
+ .driver = {
+ .name = "riscv-rpmi-clock",
+ .of_match_table = rpmi_clk_of_match,
+ },
+ .probe = rpmi_clk_probe,
+};
+module_platform_driver(rpmi_clk_driver);
+
+MODULE_AUTHOR("Rahul Pathak <rpathak@ventanamicro.com>");
+MODULE_DESCRIPTION("Clock Driver based on RPMI message protocol");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index d4e9c3577b35..ff7ce12a5da6 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -11,6 +11,7 @@
#include <linux/regmap.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
+#include <linux/mfd/samsung/s2mpg10.h>
#include <linux/mfd/samsung/s2mps11.h>
#include <linux/mfd/samsung/s2mps13.h>
#include <linux/mfd/samsung/s2mps14.h>
@@ -140,6 +141,9 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
clk_data->num = S2MPS11_CLKS_NUM;
switch (hwid) {
+ case S2MPG10:
+ s2mps11_reg = S2MPG10_PMIC_RTCBUF;
+ break;
case S2MPS11X:
s2mps11_reg = S2MPS11_REG_RTC_CTRL;
break;
@@ -221,6 +225,7 @@ static void s2mps11_clk_remove(struct platform_device *pdev)
}
static const struct platform_device_id s2mps11_clk_id[] = {
+ { "s2mpg10-clk", S2MPG10},
{ "s2mps11-clk", S2MPS11X},
{ "s2mps13-clk", S2MPS13X},
{ "s2mps14-clk", S2MPS14X},
@@ -241,6 +246,9 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
*/
static const struct of_device_id s2mps11_dt_match[] __used = {
{
+ .compatible = "samsung,s2mpg10-clk",
+ .data = (void *)S2MPG10,
+ }, {
.compatible = "samsung,s2mps11-clk",
.data = (void *)S2MPS11X,
}, {
diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
index d2408403283f..6b286ea6f121 100644
--- a/drivers/clk/clk-scmi.c
+++ b/drivers/clk/clk-scmi.c
@@ -54,8 +54,8 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scmi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u64 fmin, fmax, ftmp;
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -67,20 +67,27 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* running at then.
*/
if (clk->info->rate_discrete)
- return rate;
+ return 0;
fmin = clk->info->range.min_rate;
fmax = clk->info->range.max_rate;
- if (rate <= fmin)
- return fmin;
- else if (rate >= fmax)
- return fmax;
+ if (req->rate <= fmin) {
+ req->rate = fmin;
+
+ return 0;
+ } else if (req->rate >= fmax) {
+ req->rate = fmax;
+
+ return 0;
+ }
- ftmp = rate - fmin;
+ ftmp = req->rate - fmin;
ftmp += clk->info->range.step_size - 1; /* to round up */
do_div(ftmp, clk->info->range.step_size);
- return ftmp * clk->info->range.step_size + fmin;
+ req->rate = ftmp * clk->info->range.step_size + fmin;
+
+ return 0;
}
static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -119,15 +126,6 @@ static u8 scmi_clk_get_parent(struct clk_hw *hw)
return p_idx;
}
-static int scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
-{
- /*
- * Suppose all the requested rates are supported, and let firmware
- * to handle the left work.
- */
- return 0;
-}
-
static int scmi_clk_enable(struct clk_hw *hw)
{
struct scmi_clk *clk = to_scmi_clk(hw);
@@ -300,7 +298,6 @@ scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
/* Rate ops */
ops->recalc_rate = scmi_clk_recalc_rate;
- ops->round_rate = scmi_clk_round_rate;
ops->determine_rate = scmi_clk_determine_rate;
if (feats_key & BIT(SCMI_CLK_RATE_CTRL_SUPPORTED))
ops->set_rate = scmi_clk_set_rate;
@@ -349,6 +346,8 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
unsigned int atomic_threshold_us,
const struct clk_ops **clk_ops_db, size_t db_size)
{
+ int ret;
+ u32 val;
const struct scmi_clock_info *ci = sclk->info;
unsigned int feats_key = 0;
const struct clk_ops *ops;
@@ -370,8 +369,13 @@ scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
if (!ci->parent_ctrl_forbidden)
feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
- if (ci->extended_config)
- feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ if (ci->extended_config) {
+ ret = scmi_proto_clk_ops->config_oem_get(sclk->ph, sclk->id,
+ SCMI_CLOCK_CFG_DUTY_CYCLE,
+ &val, NULL, false);
+ if (!ret)
+ feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
+ }
if (WARN_ON(feats_key >= db_size))
return NULL;
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 19d530d52e64..0b592de7bdb2 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -32,8 +32,8 @@ static unsigned long scpi_clk_recalc_rate(struct clk_hw *hw,
return clk->scpi_ops->clk_get_val(clk->id);
}
-static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* We can't figure out what rate it will be, so just return the
@@ -41,7 +41,7 @@ static long scpi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
* after the rate is set and we'll know what rate the clock is
* running at then.
*/
- return rate;
+ return 0;
}
static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -54,7 +54,7 @@ static int scpi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_clk_ops = {
.recalc_rate = scpi_clk_recalc_rate,
- .round_rate = scpi_clk_round_rate,
+ .determine_rate = scpi_clk_determine_rate,
.set_rate = scpi_clk_set_rate,
};
@@ -92,12 +92,14 @@ static unsigned long scpi_dvfs_recalc_rate(struct clk_hw *hw,
return opp->freq;
}
-static long scpi_dvfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int scpi_dvfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct scpi_clk *clk = to_scpi_clk(hw);
- return __scpi_dvfs_round_rate(clk, rate);
+ req->rate = __scpi_dvfs_round_rate(clk, req->rate);
+
+ return 0;
}
static int __scpi_find_dvfs_index(struct scpi_clk *clk, unsigned long rate)
@@ -124,7 +126,7 @@ static int scpi_dvfs_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops scpi_dvfs_ops = {
.recalc_rate = scpi_dvfs_recalc_rate,
- .round_rate = scpi_dvfs_round_rate,
+ .determine_rate = scpi_dvfs_determine_rate,
.set_rate = scpi_dvfs_set_rate,
};
diff --git a/drivers/clk/clk-si514.c b/drivers/clk/clk-si514.c
index 1127c35ce57d..f61590d70575 100644
--- a/drivers/clk/clk-si514.c
+++ b/drivers/clk/clk-si514.c
@@ -227,20 +227,28 @@ static unsigned long si514_recalc_rate(struct clk_hw *hw,
return si514_calc_rate(&settings);
}
-static long si514_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si514_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si514_muldiv settings;
int err;
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- err = si514_calc_muldiv(&settings, rate);
- if (err)
- return err;
+ err = si514_calc_muldiv(&settings, req->rate);
+ if (err) {
+ req->rate = err;
- return si514_calc_rate(&settings);
+ return 0;
+ }
+
+ req->rate = si514_calc_rate(&settings);
+
+ return 0;
}
/*
@@ -289,7 +297,7 @@ static const struct clk_ops si514_clk_ops = {
.unprepare = si514_unprepare,
.is_prepared = si514_is_prepared,
.recalc_rate = si514_recalc_rate,
- .round_rate = si514_round_rate,
+ .determine_rate = si514_determine_rate,
.set_rate = si514_set_rate,
};
diff --git a/drivers/clk/clk-si521xx.c b/drivers/clk/clk-si521xx.c
index 4f7b74f889f1..4ed4e1a5f4f2 100644
--- a/drivers/clk/clk-si521xx.c
+++ b/drivers/clk/clk-si521xx.c
@@ -164,15 +164,17 @@ static unsigned long si521xx_diff_recalc_rate(struct clk_hw *hw,
return (unsigned long)rate;
}
-static long si521xx_diff_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int si521xx_diff_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long best_parent;
- best_parent = (rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ best_parent = (req->rate / SI521XX_DIFF_MULT) * SI521XX_DIFF_DIV;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
- return (*prate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+ req->rate = (req->best_parent_rate / SI521XX_DIFF_DIV) * SI521XX_DIFF_MULT;
+
+ return 0;
}
static int si521xx_diff_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -208,7 +210,7 @@ static void si521xx_diff_unprepare(struct clk_hw *hw)
}
static const struct clk_ops si521xx_diff_clk_ops = {
- .round_rate = si521xx_diff_round_rate,
+ .determine_rate = si521xx_diff_determine_rate,
.set_rate = si521xx_diff_set_rate,
.recalc_rate = si521xx_diff_recalc_rate,
.prepare = si521xx_diff_prepare,
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 5004888c7eca..2499b771cd83 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -663,8 +663,8 @@ static unsigned long si5341_synth_clk_recalc_rate(struct clk_hw *hw,
return f;
}
-static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si5341_synth_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si5341_synth *synth = to_clk_si5341_synth(hw);
u64 f;
@@ -672,15 +672,21 @@ static long si5341_synth_clk_round_rate(struct clk_hw *hw, unsigned long rate,
/* The synthesizer accuracy is such that anything in range will work */
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MAX);
- if (rate < f)
- return f;
+ if (req->rate < f) {
+ req->rate = f;
+
+ return 0;
+ }
f = synth->data->freq_vco;
do_div(f, SI5341_SYNTH_N_MIN);
- if (rate > f)
- return f;
+ if (req->rate > f) {
+ req->rate = f;
- return rate;
+ return 0;
+ }
+
+ return 0;
}
static int si5341_synth_program(struct clk_si5341_synth *synth,
@@ -741,7 +747,7 @@ static const struct clk_ops si5341_synth_clk_ops = {
.prepare = si5341_synth_clk_prepare,
.unprepare = si5341_synth_clk_unprepare,
.recalc_rate = si5341_synth_clk_recalc_rate,
- .round_rate = si5341_synth_clk_round_rate,
+ .determine_rate = si5341_synth_clk_determine_rate,
.set_rate = si5341_synth_clk_set_rate,
};
diff --git a/drivers/clk/clk-si544.c b/drivers/clk/clk-si544.c
index ca3473efa314..09c06ecec1a5 100644
--- a/drivers/clk/clk-si544.c
+++ b/drivers/clk/clk-si544.c
@@ -307,16 +307,16 @@ static unsigned long si544_recalc_rate(struct clk_hw *hw,
return si544_calc_rate(&settings);
}
-static long si544_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si544_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_si544 *data = to_clk_si544(hw);
- if (!is_valid_frequency(data, rate))
+ if (!is_valid_frequency(data, req->rate))
return -EINVAL;
/* The accuracy is less than 1 Hz, so any rate is possible */
- return rate;
+ return 0;
}
/* Calculates the maximum "small" change, 950 * rate / 1000000 */
@@ -408,7 +408,7 @@ static const struct clk_ops si544_clk_ops = {
.unprepare = si544_unprepare,
.is_prepared = si544_is_prepared,
.recalc_rate = si544_recalc_rate,
- .round_rate = si544_round_rate,
+ .determine_rate = si544_determine_rate,
.set_rate = si544_set_rate,
};
diff --git a/drivers/clk/clk-si570.c b/drivers/clk/clk-si570.c
index e97fe90443a6..b0b1830dd430 100644
--- a/drivers/clk/clk-si570.c
+++ b/drivers/clk/clk-si570.c
@@ -246,34 +246,40 @@ static unsigned long si570_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long si570_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int si570_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int err;
u64 rfreq;
unsigned int n1, hs_div;
struct clk_si570 *data = to_clk_si570(hw);
- if (!rate)
+ if (!req->rate) {
+ req->rate = 0;
+
return 0;
+ }
- if (div64_u64(abs(rate - data->frequency) * 10000LL,
+ if (div64_u64(abs(req->rate - data->frequency) * 10000LL,
data->frequency) < 35) {
- rfreq = div64_u64((data->rfreq * rate) +
- div64_u64(data->frequency, 2), data->frequency);
+ rfreq = div64_u64((data->rfreq * req->rate) +
+ div64_u64(data->frequency, 2),
+ data->frequency);
n1 = data->n1;
hs_div = data->hs_div;
} else {
- err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div);
+ err = si570_calc_divs(req->rate, data, &rfreq, &n1, &hs_div);
if (err) {
dev_err(&data->i2c_client->dev,
"unable to round rate\n");
+ req->rate = 0;
+
return 0;
}
}
- return rate;
+ return 0;
}
/**
@@ -368,7 +374,7 @@ static int si570_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops si570_clk_ops = {
.recalc_rate = si570_recalc_rate,
- .round_rate = si570_round_rate,
+ .determine_rate = si570_determine_rate,
.set_rate = si570_set_rate,
};
diff --git a/drivers/clk/clk-sp7021.c b/drivers/clk/clk-sp7021.c
index 95d66191df4b..36528a71a2e6 100644
--- a/drivers/clk/clk-sp7021.c
+++ b/drivers/clk/clk-sp7021.c
@@ -7,6 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
@@ -38,13 +39,6 @@ enum {
#define MASK_DIVN GENMASK(7, 0)
#define MASK_DIVM GENMASK(14, 8)
-/* HIWORD_MASK FIELD_PREP */
-#define HWM_FIELD_PREP(mask, value) \
-({ \
- u64 _m = mask; \
- (_m << 16) | FIELD_PREP(_m, value); \
-})
-
struct sp_pll {
struct clk_hw hw;
void __iomem *reg;
@@ -313,15 +307,15 @@ static int plltv_set_rate(struct sp_pll *clk)
u32 r0, r1, r2;
r0 = BIT(clk->bp_bit + 16);
- r0 |= HWM_FIELD_PREP(MASK_SEL_FRA, clk->p[SEL_FRA]);
- r0 |= HWM_FIELD_PREP(MASK_SDM_MOD, clk->p[SDM_MOD]);
- r0 |= HWM_FIELD_PREP(MASK_PH_SEL, clk->p[PH_SEL]);
- r0 |= HWM_FIELD_PREP(MASK_NFRA, clk->p[NFRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SEL_FRA, clk->p[SEL_FRA]);
+ r0 |= FIELD_PREP_WM16(MASK_SDM_MOD, clk->p[SDM_MOD]);
+ r0 |= FIELD_PREP_WM16(MASK_PH_SEL, clk->p[PH_SEL]);
+ r0 |= FIELD_PREP_WM16(MASK_NFRA, clk->p[NFRA]);
- r1 = HWM_FIELD_PREP(MASK_DIVR, clk->p[DIVR]);
+ r1 = FIELD_PREP_WM16(MASK_DIVR, clk->p[DIVR]);
- r2 = HWM_FIELD_PREP(MASK_DIVN, clk->p[DIVN] - 1);
- r2 |= HWM_FIELD_PREP(MASK_DIVM, clk->p[DIVM] - 1);
+ r2 = FIELD_PREP_WM16(MASK_DIVN, clk->p[DIVN] - 1);
+ r2 |= FIELD_PREP_WM16(MASK_DIVM, clk->p[DIVM] - 1);
spin_lock_irqsave(&clk->lock, flags);
writel(r0, clk->reg);
@@ -412,25 +406,27 @@ static long sp_pll_calc_div(struct sp_pll *clk, unsigned long rate)
return fbdiv;
}
-static long sp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sp_pll *clk = to_sp_pll(hw);
long ret;
- if (rate == *prate) {
- ret = *prate; /* bypass */
+ if (req->rate == req->best_parent_rate) {
+ ret = req->best_parent_rate; /* bypass */
} else if (clk->div_width == DIV_A) {
- ret = plla_round_rate(clk, rate);
+ ret = plla_round_rate(clk, req->rate);
} else if (clk->div_width == DIV_TV) {
- ret = plltv_div(clk, rate);
+ ret = plltv_div(clk, req->rate);
if (ret < 0)
- ret = *prate;
+ ret = req->best_parent_rate;
} else {
- ret = sp_pll_calc_div(clk, rate) * clk->brate;
+ ret = sp_pll_calc_div(clk, req->rate) * clk->brate;
}
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static unsigned long sp_pll_recalc_rate(struct clk_hw *hw,
@@ -535,7 +531,7 @@ static const struct clk_ops sp_pll_ops = {
.enable = sp_pll_enable,
.disable = sp_pll_disable,
.is_enabled = sp_pll_is_enabled,
- .round_rate = sp_pll_round_rate,
+ .determine_rate = sp_pll_determine_rate,
.recalc_rate = sp_pll_recalc_rate,
.set_rate = sp_pll_set_rate
};
diff --git a/drivers/clk/clk-sparx5.c b/drivers/clk/clk-sparx5.c
index 0fad0c1a0186..b2facc9c95d4 100644
--- a/drivers/clk/clk-sparx5.c
+++ b/drivers/clk/clk-sparx5.c
@@ -213,19 +213,21 @@ static unsigned long s5_pll_recalc_rate(struct clk_hw *hw,
return conf.freq;
}
-static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int s5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct s5_pll_conf conf;
- return s5_calc_params(rate, *parent_rate, &conf);
+ req->rate = s5_calc_params(req->rate, req->best_parent_rate, &conf);
+
+ return 0;
}
static const struct clk_ops s5_pll_ops = {
.enable = s5_pll_enable,
.disable = s5_pll_disable,
.set_rate = s5_pll_set_rate,
- .round_rate = s5_pll_round_rate,
+ .determine_rate = s5_pll_determine_rate,
.recalc_rate = s5_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index 719cddc82ae6..b5d4d48432a0 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -443,8 +443,8 @@ static unsigned long clk_apb_mul_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_apb_mul_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_apb_mul *am = to_clk_apb_mul(hw);
unsigned long mult = 1;
@@ -453,12 +453,14 @@ static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate,
mult = 2;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
- unsigned long best_parent = rate / mult;
+ unsigned long best_parent = req->rate / mult;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
}
- return *prate * mult;
+ req->rate = req->best_parent_rate * mult;
+
+ return 0;
}
static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -474,7 +476,7 @@ static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate,
}
static const struct clk_ops clk_apb_mul_factor_ops = {
- .round_rate = clk_apb_mul_round_rate,
+ .determine_rate = clk_apb_mul_determine_rate,
.set_rate = clk_apb_mul_set_rate,
.recalc_rate = clk_apb_mul_recalc_rate,
};
@@ -670,21 +672,23 @@ static unsigned long stm32f4_pll_recalc(struct clk_hw *hw,
return parent_rate * n;
}
-static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm32f4_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gate *gate = to_clk_gate(hw);
struct stm32f4_pll *pll = to_stm32f4_pll(gate);
unsigned long n;
- n = rate / *prate;
+ n = req->rate / req->best_parent_rate;
if (n < pll->n_start)
n = pll->n_start;
else if (n > 432)
n = 432;
- return *prate * n;
+ req->rate = req->best_parent_rate * n;
+
+ return 0;
}
static void stm32f4_pll_set_ssc(struct clk_hw *hw, unsigned long parent_rate,
@@ -749,7 +753,7 @@ static const struct clk_ops stm32f4_pll_gate_ops = {
.disable = stm32f4_pll_disable,
.is_enabled = stm32f4_pll_is_enabled,
.recalc_rate = stm32f4_pll_recalc,
- .round_rate = stm32f4_pll_round_rate,
+ .determine_rate = stm32f4_pll_determine_rate,
.set_rate = stm32f4_pll_set_rate,
};
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
index 38f44b5b9b1b..9511248c6bc9 100644
--- a/drivers/clk/clk-tps68470.c
+++ b/drivers/clk/clk-tps68470.c
@@ -146,12 +146,14 @@ static unsigned int tps68470_clk_cfg_lookup(unsigned long rate)
return best_idx;
}
-static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int tps68470_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned int idx = tps68470_clk_cfg_lookup(rate);
+ unsigned int idx = tps68470_clk_cfg_lookup(req->rate);
+
+ req->rate = clk_freqs[idx].freq;
- return clk_freqs[idx].freq;
+ return 0;
}
static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -186,7 +188,7 @@ static const struct clk_ops tps68470_clk_ops = {
.prepare = tps68470_clk_prepare,
.unprepare = tps68470_clk_unprepare,
.recalc_rate = tps68470_clk_recalc_rate,
- .round_rate = tps68470_clk_round_rate,
+ .determine_rate = tps68470_clk_determine_rate,
.set_rate = tps68470_clk_set_rate,
};
diff --git a/drivers/clk/clk-versaclock3.c b/drivers/clk/clk-versaclock3.c
index 9fe27dace111..1849863dbd67 100644
--- a/drivers/clk/clk-versaclock3.c
+++ b/drivers/clk/clk-versaclock3.c
@@ -289,22 +289,25 @@ static unsigned long vc3_pfd_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pfd_data *pfd = vc3->data;
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ return 0;
+ }
+
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (pfd->num == VC3_PFD1 || pfd->num == VC3_PFD3) {
if (idiv > 63)
return -EINVAL;
@@ -313,7 +316,9 @@ static long vc3_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -354,7 +359,7 @@ static int vc3_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pfd_ops = {
.recalc_rate = vc3_pfd_recalc_rate,
- .round_rate = vc3_pfd_round_rate,
+ .determine_rate = vc3_pfd_determine_rate,
.set_rate = vc3_pfd_set_rate,
};
@@ -385,36 +390,38 @@ static unsigned long vc3_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vc3_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_pll_data *pll = vc3->data;
u64 div_frc;
- if (rate < pll->vco.min)
- rate = pll->vco.min;
- if (rate > pll->vco.max)
- rate = pll->vco.max;
+ if (req->rate < pll->vco.min)
+ req->rate = pll->vco.min;
+ if (req->rate > pll->vco.max)
+ req->rate = pll->vco.max;
- vc3->div_int = rate / *parent_rate;
+ vc3->div_int = req->rate / req->best_parent_rate;
if (pll->num == VC3_PLL2) {
if (vc3->div_int > 0x7ff)
- rate = *parent_rate * 0x7ff;
+ req->rate = req->best_parent_rate * 0x7ff;
/* Determine best fractional part, which is 16 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(16) - 1;
- vc3->div_frc = min_t(u64, div64_ul(div_frc, *parent_rate), U16_MAX);
- rate = (*parent_rate *
- (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
+ vc3->div_frc = min_t(u64,
+ div64_ul(div_frc, req->best_parent_rate),
+ U16_MAX);
+ req->rate = (req->best_parent_rate *
+ (vc3->div_int * VC3_2_POW_16 + vc3->div_frc) / VC3_2_POW_16);
} else {
- rate = *parent_rate * vc3->div_int;
+ req->rate = req->best_parent_rate * vc3->div_int;
}
- return rate;
+ return 0;
}
static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -441,7 +448,7 @@ static int vc3_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_pll_ops = {
.recalc_rate = vc3_pll_recalc_rate,
- .round_rate = vc3_pll_round_rate,
+ .determine_rate = vc3_pll_determine_rate,
.set_rate = vc3_pll_set_rate,
};
@@ -498,8 +505,8 @@ static unsigned long vc3_div_recalc_rate(struct clk_hw *hw,
div_data->flags, div_data->width);
}
-static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc3_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc3_hw_data *vc3 = container_of(hw, struct vc3_hw_data, hw);
const struct vc3_div_data *div_data = vc3->data;
@@ -511,11 +518,16 @@ static long vc3_div_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv >>= div_data->shift;
bestdiv &= VC3_DIV_MASK(div_data->width);
bestdiv = vc3_get_div(div_data->table, bestdiv, div_data->flags);
- return DIV_ROUND_UP(*parent_rate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, parent_rate, div_data->table,
- div_data->width, div_data->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ div_data->table,
+ div_data->width, div_data->flags);
+
+ return 0;
}
static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -534,7 +546,7 @@ static int vc3_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc3_div_ops = {
.recalc_rate = vc3_div_recalc_rate,
- .round_rate = vc3_div_round_rate,
+ .determine_rate = vc3_div_determine_rate,
.set_rate = vc3_div_set_rate,
};
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index 4200022d2084..57228e88e81d 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -304,11 +304,11 @@ static unsigned long vc5_dbl_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long vc5_dbl_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_dbl_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if ((*parent_rate == rate) || ((*parent_rate * 2) == rate))
- return rate;
+ if ((req->best_parent_rate == req->rate) || ((req->best_parent_rate * 2) == req->rate))
+ return 0;
else
return -EINVAL;
}
@@ -332,7 +332,7 @@ static int vc5_dbl_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_dbl_ops = {
.recalc_rate = vc5_dbl_recalc_rate,
- .round_rate = vc5_dbl_round_rate,
+ .determine_rate = vc5_dbl_determine_rate,
.set_rate = vc5_dbl_set_rate,
};
@@ -363,24 +363,29 @@ static unsigned long vc5_pfd_recalc_rate(struct clk_hw *hw,
return parent_rate / VC5_REF_DIVIDER_REF_DIV(div);
}
-static long vc5_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pfd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long idiv;
/* PLL cannot operate with input clock above 50 MHz. */
- if (rate > 50000000)
+ if (req->rate > 50000000)
return -EINVAL;
/* CLKIN within range of PLL input, feed directly to PLL. */
- if (*parent_rate <= 50000000)
- return *parent_rate;
+ if (req->best_parent_rate <= 50000000) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
- idiv = DIV_ROUND_UP(*parent_rate, rate);
+ idiv = DIV_ROUND_UP(req->best_parent_rate, req->rate);
if (idiv > 127)
return -EINVAL;
- return *parent_rate / idiv;
+ req->rate = req->best_parent_rate / idiv;
+
+ return 0;
}
static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -420,7 +425,7 @@ static int vc5_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pfd_ops = {
.recalc_rate = vc5_pfd_recalc_rate,
- .round_rate = vc5_pfd_round_rate,
+ .determine_rate = vc5_pfd_determine_rate,
.set_rate = vc5_pfd_set_rate,
};
@@ -444,30 +449,32 @@ static unsigned long vc5_pll_recalc_rate(struct clk_hw *hw,
return (parent_rate * div_int) + ((parent_rate * div_frc) >> 24);
}
-static long vc5_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
struct vc5_driver_data *vc5 = hwdata->vc5;
u32 div_int;
u64 div_frc;
- rate = clamp(rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
+ req->rate = clamp(req->rate, VC5_PLL_VCO_MIN, vc5->chip_info->vco_max);
/* Determine integer part, which is 12 bit wide */
- div_int = rate / *parent_rate;
+ div_int = req->rate / req->best_parent_rate;
if (div_int > 0xfff)
- rate = *parent_rate * 0xfff;
+ req->rate = req->best_parent_rate * 0xfff;
/* Determine best fractional part, which is 24 bit wide */
- div_frc = rate % *parent_rate;
+ div_frc = req->rate % req->best_parent_rate;
div_frc *= BIT(24) - 1;
- do_div(div_frc, *parent_rate);
+ do_div(div_frc, req->best_parent_rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return (*parent_rate * div_int) + ((*parent_rate * div_frc) >> 24);
+ req->rate = (req->best_parent_rate * div_int) + ((req->best_parent_rate * div_frc) >> 24);
+
+ return 0;
}
static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -488,7 +495,7 @@ static int vc5_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_pll_ops = {
.recalc_rate = vc5_pll_recalc_rate,
- .round_rate = vc5_pll_round_rate,
+ .determine_rate = vc5_pll_determine_rate,
.set_rate = vc5_pll_set_rate,
};
@@ -520,17 +527,17 @@ static unsigned long vc5_fod_recalc_rate(struct clk_hw *hw,
return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
}
-static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vc5_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw);
/* VCO frequency is divided by two before entering FOD */
- u32 f_in = *parent_rate / 2;
+ u32 f_in = req->best_parent_rate / 2;
u32 div_int;
u64 div_frc;
/* Determine integer part, which is 12 bit wide */
- div_int = f_in / rate;
+ div_int = f_in / req->rate;
/*
* WARNING: The clock chip does not output signal if the integer part
* of the divider is 0xfff and fractional part is non-zero.
@@ -538,18 +545,20 @@ static long vc5_fod_round_rate(struct clk_hw *hw, unsigned long rate,
*/
if (div_int > 0xffe) {
div_int = 0xffe;
- rate = f_in / div_int;
+ req->rate = f_in / div_int;
}
/* Determine best fractional part, which is 30 bit wide */
- div_frc = f_in % rate;
+ div_frc = f_in % req->rate;
div_frc <<= 24;
- do_div(div_frc, rate);
+ do_div(div_frc, req->rate);
hwdata->div_int = div_int;
hwdata->div_frc = (u32)div_frc;
- return div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+ req->rate = div64_u64((u64)f_in << 24ULL, ((u64)div_int << 24ULL) + div_frc);
+
+ return 0;
}
static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -589,7 +598,7 @@ static int vc5_fod_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vc5_fod_ops = {
.recalc_rate = vc5_fod_recalc_rate,
- .round_rate = vc5_fod_round_rate,
+ .determine_rate = vc5_fod_determine_rate,
.set_rate = vc5_fod_set_rate,
};
diff --git a/drivers/clk/clk-versaclock7.c b/drivers/clk/clk-versaclock7.c
index 483285b30c13..adcc603e3259 100644
--- a/drivers/clk/clk-versaclock7.c
+++ b/drivers/clk/clk-versaclock7.c
@@ -900,17 +900,18 @@ static unsigned long vc7_fod_recalc_rate(struct clk_hw *hw, unsigned long parent
return fod_rate;
}
-static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_fod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_fod_data *fod = container_of(hw, struct vc7_fod_data, hw);
unsigned long fod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_fod_divider(rate, *parent_rate,
+ vc7_calc_fod_divider(req->rate, req->best_parent_rate,
&fod->fod_1st_int, &fod->fod_2nd_int, &fod->fod_frac);
- fod_rate = vc7_calc_fod_2nd_stage_rate(*parent_rate, fod->fod_1st_int,
+ fod_rate = vc7_calc_fod_2nd_stage_rate(req->best_parent_rate, fod->fod_1st_int,
fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s: fod_1st_int: %u, fod_2nd_int: %u, fod_frac: %llu\n",
@@ -918,7 +919,9 @@ static long vc7_fod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned l
fod->fod_1st_int, fod->fod_2nd_int, fod->fod_frac);
pr_debug("%s - %s rate: %lu\n", __func__, clk_hw_get_name(hw), fod_rate);
- return fod_rate;
+ req->rate = fod_rate;
+
+ return 0;
}
static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -952,7 +955,7 @@ static int vc7_fod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_fod_ops = {
.recalc_rate = vc7_fod_recalc_rate,
- .round_rate = vc7_fod_round_rate,
+ .determine_rate = vc7_fod_determine_rate,
.set_rate = vc7_fod_set_rate,
};
@@ -978,21 +981,24 @@ static unsigned long vc7_iod_recalc_rate(struct clk_hw *hw, unsigned long parent
return iod_rate;
}
-static long vc7_iod_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate)
+static int vc7_iod_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw);
unsigned long iod_rate;
pr_debug("%s - %s: requested rate: %lu, parent_rate: %lu\n",
- __func__, clk_hw_get_name(hw), rate, *parent_rate);
+ __func__, clk_hw_get_name(hw), req->rate, req->best_parent_rate);
- vc7_calc_iod_divider(rate, *parent_rate, &iod->iod_int);
- iod_rate = div64_u64(*parent_rate, iod->iod_int);
+ vc7_calc_iod_divider(req->rate, req->best_parent_rate, &iod->iod_int);
+ iod_rate = div64_u64(req->best_parent_rate, iod->iod_int);
pr_debug("%s - %s: iod_int: %u\n", __func__, clk_hw_get_name(hw), iod->iod_int);
pr_debug("%s - %s rate: %ld\n", __func__, clk_hw_get_name(hw), iod_rate);
- return iod_rate;
+ req->rate = iod_rate;
+
+ return 0;
}
static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -1023,7 +1029,7 @@ static int vc7_iod_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long
static const struct clk_ops vc7_iod_ops = {
.recalc_rate = vc7_iod_recalc_rate,
- .round_rate = vc7_iod_round_rate,
+ .determine_rate = vc7_iod_determine_rate,
.set_rate = vc7_iod_set_rate,
};
diff --git a/drivers/clk/clk-vt8500.c b/drivers/clk/clk-vt8500.c
index 2a74a713ad59..eae5b3fbfb82 100644
--- a/drivers/clk/clk-vt8500.c
+++ b/drivers/clk/clk-vt8500.c
@@ -128,30 +128,31 @@ static unsigned long vt8500_dclk_recalc_rate(struct clk_hw *hw,
return parent_rate / div;
}
-static long vt8500_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vt8500_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_device *cdev = to_clk_device(hw);
u32 divisor;
- if (rate == 0)
+ if (req->rate == 0)
return 0;
- divisor = *prate / rate;
+ divisor = req->best_parent_rate / req->rate;
/* If prate / rate would be decimal, incr the divisor */
- if (rate * divisor < *prate)
+ if (req->rate * divisor < req->best_parent_rate)
divisor++;
/*
* If this is a request for SDMMC we have to adjust the divisor
* when >31 to use the fixed predivisor
*/
- if ((cdev->div_mask == 0x3F) && (divisor > 31)) {
+ if ((cdev->div_mask == 0x3F) && (divisor > 31))
divisor = 64 * ((divisor / 64) + 1);
- }
- return *prate / divisor;
+ req->rate = req->best_parent_rate / divisor;
+
+ return 0;
}
static int vt8500_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -202,7 +203,7 @@ static const struct clk_ops vt8500_gated_clk_ops = {
};
static const struct clk_ops vt8500_divisor_clk_ops = {
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -211,7 +212,7 @@ static const struct clk_ops vt8500_gated_divisor_clk_ops = {
.enable = vt8500_dclk_enable,
.disable = vt8500_dclk_disable,
.is_enabled = vt8500_dclk_is_enabled,
- .round_rate = vt8500_dclk_round_rate,
+ .determine_rate = vt8500_dclk_determine_rate,
.set_rate = vt8500_dclk_set_rate,
.recalc_rate = vt8500_dclk_recalc_rate,
};
@@ -594,8 +595,8 @@ static int vtwm_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int vtwm_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pll *pll = to_clk_pll(hw);
u32 filter, mul, div1, div2;
@@ -604,33 +605,43 @@ static long vtwm_pll_round_rate(struct clk_hw *hw, unsigned long rate,
switch (pll->type) {
case PLL_TYPE_VT8500:
- ret = vt8500_find_pll_bits(rate, *prate, &mul, &div1);
+ ret = vt8500_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1);
if (!ret)
- round_rate = VT8500_BITS_TO_FREQ(*prate, mul, div1);
+ round_rate = VT8500_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1);
break;
case PLL_TYPE_WM8650:
- ret = wm8650_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8650_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8650_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8650_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8750:
- ret = wm8750_find_pll_bits(rate, *prate, &filter, &mul, &div1, &div2);
+ ret = wm8750_find_pll_bits(req->rate, req->best_parent_rate,
+ &filter, &mul, &div1, &div2);
if (!ret)
- round_rate = WM8750_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8750_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
case PLL_TYPE_WM8850:
- ret = wm8850_find_pll_bits(rate, *prate, &mul, &div1, &div2);
+ ret = wm8850_find_pll_bits(req->rate, req->best_parent_rate,
+ &mul, &div1, &div2);
if (!ret)
- round_rate = WM8850_BITS_TO_FREQ(*prate, mul, div1, div2);
+ round_rate = WM8850_BITS_TO_FREQ(req->best_parent_rate,
+ mul, div1, div2);
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
if (ret)
- return ret;
+ req->rate = ret;
+ else
+ req->rate = round_rate;
- return round_rate;
+ return 0;
}
static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
@@ -665,7 +676,7 @@ static unsigned long vtwm_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops vtwm_pll_ops = {
- .round_rate = vtwm_pll_round_rate,
+ .determine_rate = vtwm_pll_determine_rate,
.set_rate = vtwm_pll_set_rate,
.recalc_rate = vtwm_pll_recalc_rate,
};
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
index 34e9d4d541e2..263e927138c2 100644
--- a/drivers/clk/clk-wm831x.c
+++ b/drivers/clk/clk-wm831x.c
@@ -133,18 +133,20 @@ static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *unused)
+static int wm831x_fll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int best = 0;
int i;
for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
- if (abs(wm831x_fll_auto_rates[i] - rate) <
- abs(wm831x_fll_auto_rates[best] - rate))
+ if (abs(wm831x_fll_auto_rates[i] - req->rate) <
+ abs(wm831x_fll_auto_rates[best] - req->rate))
best = i;
- return wm831x_fll_auto_rates[best];
+ req->rate = wm831x_fll_auto_rates[best];
+
+ return 0;
}
static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -214,7 +216,7 @@ static const struct clk_ops wm831x_fll_ops = {
.is_prepared = wm831x_fll_is_prepared,
.prepare = wm831x_fll_prepare,
.unprepare = wm831x_fll_unprepare,
- .round_rate = wm831x_fll_round_rate,
+ .determine_rate = wm831x_fll_determine_rate,
.recalc_rate = wm831x_fll_recalc_rate,
.set_rate = wm831x_fll_set_rate,
.get_parent = wm831x_fll_get_parent,
diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c
index 96946a8e2854..92e39f3237c2 100644
--- a/drivers/clk/clk-xgene.c
+++ b/drivers/clk/clk-xgene.c
@@ -271,23 +271,28 @@ static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
return ret;
}
-static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int xgene_clk_pmd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
u64 ret, scale;
- if (!rate || rate >= *parent_rate)
- return *parent_rate;
+ if (!req->rate || req->rate >= req->best_parent_rate) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
/* freq = parent_rate * scaler / denom */
- ret = rate * fd->denom;
- scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
+ ret = req->rate * fd->denom;
+ scale = DIV_ROUND_UP_ULL(ret, req->best_parent_rate);
- ret = (u64)*parent_rate * scale;
+ ret = (u64)req->best_parent_rate * scale;
do_div(ret, fd->denom);
- return ret;
+ req->rate = ret;
+
+ return 0;
}
static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -333,7 +338,7 @@ static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops xgene_clk_pmd_ops = {
.recalc_rate = xgene_clk_pmd_recalc_rate,
- .round_rate = xgene_clk_pmd_round_rate,
+ .determine_rate = xgene_clk_pmd_determine_rate,
.set_rate = xgene_clk_pmd_set_rate,
};
@@ -593,23 +598,25 @@ static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return parent_rate / divider_save;
}
-static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int xgene_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct xgene_clk *pclk = to_xgene_clk(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 divider;
if (pclk->param.divider_reg) {
/* Let's compute the divider */
- if (rate > parent_rate)
- rate = parent_rate;
- divider = parent_rate / rate; /* Rounded down */
+ if (req->rate > parent_rate)
+ req->rate = parent_rate;
+ divider = parent_rate / req->rate; /* Rounded down */
} else {
divider = 1;
}
- return parent_rate / divider;
+ req->rate = parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops xgene_clk_ops = {
@@ -618,7 +625,7 @@ static const struct clk_ops xgene_clk_ops = {
.is_enabled = xgene_clk_is_enabled,
.recalc_rate = xgene_clk_recalc_rate,
.set_rate = xgene_clk_set_rate,
- .round_rate = xgene_clk_round_rate,
+ .determine_rate = xgene_clk_determine_rate,
};
static struct clk *xgene_register_clk(struct device *dev,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index b821b2cdb155..85d2f2481acf 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -6,21 +6,24 @@
* Standard functionality for the common clock API. See Documentation/driver-api/clk.rst
*/
+#include <linux/clk/clk-conf.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/clk/clk-conf.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
+#include <linux/device.h>
#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
#include <linux/list.h>
-#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/pm_runtime.h>
#include <linux/sched.h>
-#include <linux/clkdev.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/stringhash.h>
#include "clk.h"
@@ -33,6 +36,9 @@ static struct task_struct *enable_owner;
static int prepare_refcnt;
static int enable_refcnt;
+#define CLK_HASH_BITS 9
+static DEFINE_HASHTABLE(clk_hashtable, CLK_HASH_BITS);
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
@@ -87,6 +93,7 @@ struct clk_core {
struct clk_duty duty;
struct hlist_head children;
struct hlist_node child_node;
+ struct hlist_node hashtable_node;
struct hlist_head clks;
unsigned int notifier_count;
#ifdef CONFIG_DEBUG_FS
@@ -395,45 +402,20 @@ struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent);
-static struct clk_core *__clk_lookup_subtree(const char *name,
- struct clk_core *core)
-{
- struct clk_core *child;
- struct clk_core *ret;
-
- if (!strcmp(core->name, name))
- return core;
-
- hlist_for_each_entry(child, &core->children, child_node) {
- ret = __clk_lookup_subtree(name, child);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
static struct clk_core *clk_core_lookup(const char *name)
{
- struct clk_core *root_clk;
- struct clk_core *ret;
+ struct clk_core *core;
+ u32 hash;
if (!name)
return NULL;
- /* search the 'proper' clk tree first */
- hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ hash = full_name_hash(NULL, name, strlen(name));
- /* if not found, then search the orphan tree */
- hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
- ret = __clk_lookup_subtree(name, root_clk);
- if (ret)
- return ret;
- }
+ /* search the hashtable */
+ hash_for_each_possible(clk_hashtable, core, hashtable_node, hash)
+ if (!strcmp(core->name, name))
+ return core;
return NULL;
}
@@ -4013,6 +3995,8 @@ static int __clk_core_init(struct clk_core *core)
hlist_add_head(&core->child_node, &clk_orphan_list);
core->orphan = true;
}
+ hash_add(clk_hashtable, &core->hashtable_node,
+ full_name_hash(NULL, core->name, strlen(core->name)));
/*
* Set clk's accuracy. The preferred method is to use
@@ -4089,6 +4073,7 @@ out:
clk_pm_runtime_put(core);
unlock:
if (ret) {
+ hash_del(&core->hashtable_node);
hlist_del_init(&core->child_node);
core->hw->core = NULL;
}
@@ -4610,6 +4595,7 @@ void clk_unregister(struct clk *clk)
clk_core_evict_parent_cache(clk->core);
+ hash_del(&clk->core->hashtable_node);
hlist_del_init(&clk->core->child_node);
if (clk->core->prepare_count)
diff --git a/drivers/clk/davinci/psc-da850.c b/drivers/clk/davinci/psc-da850.c
index 5a18bca464cd..94081ab1e688 100644
--- a/drivers/clk/davinci/psc-da850.c
+++ b/drivers/clk/davinci/psc-da850.c
@@ -6,7 +6,6 @@
*/
#include <linux/clk-provider.h>
-#include <linux/reset-controller.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/init.h>
@@ -66,14 +65,8 @@ LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0",
"fck", "ecap.1",
"fck", "ecap.2");
-static struct reset_control_lookup da850_psc0_reset_lookup_table[] = {
- RESET_LOOKUP("da850-psc0", 15, "davinci-rproc.0", NULL),
-};
-
static int da850_psc0_init(struct device *dev, void __iomem *base)
{
- reset_controller_add_lookup(da850_psc0_reset_lookup_table,
- ARRAY_SIZE(da850_psc0_reset_lookup_table));
return davinci_psc_register_clocks(dev, da850_psc0_info, 16, base);
}
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index 3a653d54bee0..7c8b00ee6019 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -34,7 +34,7 @@
.num_parents = 0, \
.flags = CLK_GET_RATE_NOCACHE, \
}, \
- },
+ }
#define to_stub_clk(_hw) container_of(_hw, struct hi3660_stub_clk, hw)
@@ -67,14 +67,14 @@ static unsigned long hi3660_stub_clk_recalc_rate(struct clk_hw *hw,
return stub_clk->rate;
}
-static long hi3660_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi3660_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/*
* LPM3 handles rate rounding so just return whatever
* rate is requested.
*/
- return rate;
+ return 0;
}
static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -97,15 +97,15 @@ static int hi3660_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi3660_stub_clk_ops = {
.recalc_rate = hi3660_stub_clk_recalc_rate,
- .round_rate = hi3660_stub_clk_round_rate,
+ .determine_rate = hi3660_stub_clk_determine_rate,
.set_rate = hi3660_stub_clk_set_rate,
};
static struct hi3660_stub_clk hi3660_stub_clks[HI3660_CLK_STUB_NUM] = {
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d")
- DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc")
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER0, 0x0001030A, "cpu-cluster.0"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_CLUSTER1, 0x0002030A, "cpu-cluster.1"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_GPU, 0x0003030A, "clk-g3d"),
+ DEFINE_CLK_STUB(HI3660_CLK_STUB_DDR, 0x00040309, "clk-ddrc"),
};
static struct clk_hw *hi3660_stub_clk_hw_get(struct of_phandle_args *clkspec,
diff --git a/drivers/clk/hisilicon/clk-hi6220-stub.c b/drivers/clk/hisilicon/clk-hi6220-stub.c
index a8319795ed1c..bf99cfafafa0 100644
--- a/drivers/clk/hisilicon/clk-hi6220-stub.c
+++ b/drivers/clk/hisilicon/clk-hi6220-stub.c
@@ -161,11 +161,11 @@ static int hi6220_stub_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int hi6220_stub_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_stub_clk *stub_clk = to_stub_clk(hw);
- unsigned long new_rate = rate / 1000; /* kHz */
+ unsigned long new_rate = req->rate / 1000; /* kHz */
switch (stub_clk->id) {
case HI6220_STUB_ACPU0:
@@ -181,12 +181,14 @@ static long hi6220_stub_clk_round_rate(struct clk_hw *hw, unsigned long rate,
break;
}
- return new_rate;
+ req->rate = new_rate;
+
+ return 0;
}
static const struct clk_ops hi6220_stub_clk_ops = {
.recalc_rate = hi6220_stub_clk_recalc_rate,
- .round_rate = hi6220_stub_clk_round_rate,
+ .determine_rate = hi6220_stub_clk_determine_rate,
.set_rate = hi6220_stub_clk_set_rate,
};
diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c
index 5348bafe694f..6bae18a84cb6 100644
--- a/drivers/clk/hisilicon/clkdivider-hi6220.c
+++ b/drivers/clk/hisilicon/clkdivider-hi6220.c
@@ -55,13 +55,15 @@ static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw,
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int hi6220_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, dclk->table,
+ dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -93,7 +95,7 @@ static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops hi6220_clkdiv_ops = {
.recalc_rate = hi6220_clkdiv_recalc_rate,
- .round_rate = hi6220_clkdiv_round_rate,
+ .determine_rate = hi6220_clkdiv_determine_rate,
.set_rate = hi6220_clkdiv_set_rate,
};
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 6ff6d934848a..b292e7ca5c24 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -105,6 +105,7 @@ config CLK_IMX8ULP
tristate "IMX8ULP CCM Clock Driver"
depends on ARCH_MXC || COMPILE_TEST
select MXC_CLK
+ select AUXILIARY_BUS
help
Build the driver for i.MX8ULP CCM Clock Driver
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index 03f2b2a1ab63..208b46873a18 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -41,6 +41,7 @@ clk-imx-lpcg-scu-$(CONFIG_CLK_IMX8QXP) += clk-lpcg-scu.o clk-imx8qxp-lpcg.o
clk-imx-acm-$(CONFIG_CLK_IMX8QXP) = clk-imx8-acm.o
obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp.o
+obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp-sim-lpav.o
obj-$(CONFIG_CLK_IMX1) += clk-imx1.o
obj-$(CONFIG_CLK_IMX25) += clk-imx25.o
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index 8ed2e0ad2769..37d2fc197be6 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -7,6 +7,7 @@
#include <linux/bits.h>
#include <linux/clk-provider.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -36,6 +37,9 @@ static int pcc_gate_enable(struct clk_hw *hw)
if (ret)
return ret;
+ /* Make sure the IP's clock is ready before release reset */
+ udelay(1);
+
spin_lock_irqsave(gate->lock, flags);
/*
* release the sw reset for peripherals associated with
@@ -47,6 +51,15 @@ static int pcc_gate_enable(struct clk_hw *hw)
spin_unlock_irqrestore(gate->lock, flags);
+ /*
+ * Read back the register to make sure the previous write has been
+ * done in the target HW register. For IP like GPU, after deassert
+ * the reset, need to wait for a while to make sure the sync reset
+ * is done
+ */
+ readl(gate->reg);
+ udelay(1);
+
return 0;
}
diff --git a/drivers/clk/imx/clk-imx8mp-audiomix.c b/drivers/clk/imx/clk-imx8mp-audiomix.c
index 775f62dddb11..131702f2c9ec 100644
--- a/drivers/clk/imx/clk-imx8mp-audiomix.c
+++ b/drivers/clk/imx/clk-imx8mp-audiomix.c
@@ -230,50 +230,19 @@ struct clk_imx8mp_audiomix_priv {
#if IS_ENABLED(CONFIG_RESET_CONTROLLER)
-static void clk_imx8mp_audiomix_reset_unregister_adev(void *_adev)
-{
- struct auxiliary_device *adev = _adev;
-
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
-}
-
-static void clk_imx8mp_audiomix_reset_adev_release(struct device *dev)
-{
- struct auxiliary_device *adev = to_auxiliary_dev(dev);
-
- kfree(adev);
-}
-
static int clk_imx8mp_audiomix_reset_controller_register(struct device *dev,
struct clk_imx8mp_audiomix_priv *priv)
{
- struct auxiliary_device *adev __free(kfree) = NULL;
- int ret;
+ struct auxiliary_device *adev;
if (!of_property_present(dev->of_node, "#reset-cells"))
return 0;
- adev = kzalloc(sizeof(*adev), GFP_KERNEL);
+ adev = devm_auxiliary_device_create(dev, "reset", NULL);
if (!adev)
- return -ENOMEM;
-
- adev->name = "reset";
- adev->dev.parent = dev;
- adev->dev.release = clk_imx8mp_audiomix_reset_adev_release;
-
- ret = auxiliary_device_init(adev);
- if (ret)
- return ret;
+ return -ENODEV;
- ret = auxiliary_device_add(adev);
- if (ret) {
- auxiliary_device_uninit(adev);
- return ret;
- }
-
- return devm_add_action_or_reset(dev, clk_imx8mp_audiomix_reset_unregister_adev,
- no_free_ptr(adev));
+ return 0;
}
#else /* !CONFIG_RESET_CONTROLLER */
diff --git a/drivers/clk/imx/clk-imx8ulp-sim-lpav.c b/drivers/clk/imx/clk-imx8ulp-sim-lpav.c
new file mode 100644
index 000000000000..990c95b89b75
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8ulp-sim-lpav.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <dt-bindings/clock/imx8ulp-clock.h>
+
+#include <linux/auxiliary_bus.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define SYSCTRL0 0x8
+
+#define IMX8ULP_HIFI_CLK_GATE(gname, cname, pname, bidx) \
+ { \
+ .name = gname "_cg", \
+ .id = IMX8ULP_CLK_SIM_LPAV_HIFI_##cname, \
+ .parent = { .fw_name = pname }, \
+ .bit = bidx, \
+ }
+
+struct clk_imx8ulp_sim_lpav_data {
+ spinlock_t lock; /* shared by MUX, clock gate and reset */
+ unsigned long flags; /* for spinlock usage */
+ struct clk_hw_onecell_data clk_data; /* keep last */
+};
+
+struct clk_imx8ulp_sim_lpav_gate {
+ const char *name;
+ int id;
+ const struct clk_parent_data parent;
+ u8 bit;
+};
+
+static struct clk_imx8ulp_sim_lpav_gate gates[] = {
+ IMX8ULP_HIFI_CLK_GATE("hifi_core", CORE, "core", 17),
+ IMX8ULP_HIFI_CLK_GATE("hifi_pbclk", PBCLK, "bus", 18),
+ IMX8ULP_HIFI_CLK_GATE("hifi_plat", PLAT, "plat", 19)
+};
+
+static void clk_imx8ulp_sim_lpav_lock(void *arg) __acquires(&data->lock)
+{
+ struct clk_imx8ulp_sim_lpav_data *data = dev_get_drvdata(arg);
+
+ spin_lock_irqsave(&data->lock, data->flags);
+}
+
+static void clk_imx8ulp_sim_lpav_unlock(void *arg) __releases(&data->lock)
+{
+ struct clk_imx8ulp_sim_lpav_data *data = dev_get_drvdata(arg);
+
+ spin_unlock_irqrestore(&data->lock, data->flags);
+}
+
+static int clk_imx8ulp_sim_lpav_probe(struct platform_device *pdev)
+{
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .lock = clk_imx8ulp_sim_lpav_lock,
+ .unlock = clk_imx8ulp_sim_lpav_unlock,
+ .lock_arg = &pdev->dev,
+ };
+ struct clk_imx8ulp_sim_lpav_data *data;
+ struct auxiliary_device *adev;
+ struct regmap *regmap;
+ void __iomem *base;
+ struct clk_hw *hw;
+ int i, ret;
+
+ data = devm_kzalloc(&pdev->dev,
+ struct_size(data, clk_data.hws, ARRAY_SIZE(gates)),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, data);
+
+ /*
+ * this lock is used directly by the clock gate and indirectly
+ * by the reset and mux controller via the regmap API
+ */
+ spin_lock_init(&data->lock);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(base),
+ "failed to ioremap base\n");
+ /*
+ * although the clock gate doesn't use the regmap API to modify the
+ * registers, we still need the regmap because of the reset auxiliary
+ * driver and the MUX drivers, which use the parent device's regmap
+ */
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "failed to initialize regmap\n");
+
+ data->clk_data.num = ARRAY_SIZE(gates);
+
+ for (i = 0; i < ARRAY_SIZE(gates); i++) {
+ hw = devm_clk_hw_register_gate_parent_data(&pdev->dev,
+ gates[i].name,
+ &gates[i].parent,
+ CLK_SET_RATE_PARENT,
+ base + SYSCTRL0,
+ gates[i].bit,
+ 0x0, &data->lock);
+ if (IS_ERR(hw))
+ return dev_err_probe(&pdev->dev, PTR_ERR(hw),
+ "failed to register %s gate\n",
+ gates[i].name);
+
+ data->clk_data.hws[i] = hw;
+ }
+
+ adev = devm_auxiliary_device_create(&pdev->dev, "reset", NULL);
+ if (!adev)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "failed to register aux reset\n");
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev,
+ of_clk_hw_onecell_get,
+ &data->clk_data);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register clk hw provider\n");
+
+ /* used to probe MUX child device */
+ return devm_of_platform_populate(&pdev->dev);
+}
+
+static const struct of_device_id clk_imx8ulp_sim_lpav_of_match[] = {
+ { .compatible = "fsl,imx8ulp-sim-lpav" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, clk_imx8ulp_sim_lpav_of_match);
+
+static struct platform_driver clk_imx8ulp_sim_lpav_driver = {
+ .probe = clk_imx8ulp_sim_lpav_probe,
+ .driver = {
+ .name = "clk-imx8ulp-sim-lpav",
+ .of_match_table = clk_imx8ulp_sim_lpav_of_match,
+ },
+};
+module_platform_driver(clk_imx8ulp_sim_lpav_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("i.MX8ULP LPAV System Integration Module (SIM) clock driver");
+MODULE_AUTHOR("Laurentiu Mihalcea <laurentiu.mihalcea@nxp.com>");
diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c
index 7e88877a6245..56bed4471995 100644
--- a/drivers/clk/imx/clk-imx95-blk-ctl.c
+++ b/drivers/clk/imx/clk-imx95-blk-ctl.c
@@ -36,6 +36,7 @@ struct imx95_blk_ctl {
void __iomem *base;
/* clock gate register */
u32 clk_reg_restore;
+ const struct imx95_blk_ctl_dev_data *pdata;
};
struct imx95_blk_ctl_clk_dev_data {
@@ -349,7 +350,6 @@ static const struct imx95_blk_ctl_dev_data imx94_dispmix_csr_dev_data = {
static int imx95_bc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct imx95_blk_ctl_dev_data *bc_data;
struct imx95_blk_ctl *bc;
struct clk_hw_onecell_data *clk_hw_data;
struct clk_hw **hws;
@@ -379,25 +379,25 @@ static int imx95_bc_probe(struct platform_device *pdev)
return ret;
}
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ bc->pdata = of_device_get_match_data(dev);
+ if (!bc->pdata)
return devm_of_platform_populate(dev);
- clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc_data->num_clks),
+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, bc->pdata->num_clks),
GFP_KERNEL);
if (!clk_hw_data)
return -ENOMEM;
- if (bc_data->rpm_enabled) {
+ if (bc->pdata->rpm_enabled) {
devm_pm_runtime_enable(&pdev->dev);
pm_runtime_resume_and_get(&pdev->dev);
}
- clk_hw_data->num = bc_data->num_clks;
+ clk_hw_data->num = bc->pdata->num_clks;
hws = clk_hw_data->hws;
- for (i = 0; i < bc_data->num_clks; i++) {
- const struct imx95_blk_ctl_clk_dev_data *data = &bc_data->clk_dev_data[i];
+ for (i = 0; i < bc->pdata->num_clks; i++) {
+ const struct imx95_blk_ctl_clk_dev_data *data = &bc->pdata->clk_dev_data[i];
void __iomem *reg = base + data->reg;
if (data->type == CLK_MUX) {
@@ -439,7 +439,7 @@ static int imx95_bc_probe(struct platform_device *pdev)
return 0;
cleanup:
- for (i = 0; i < bc_data->num_clks; i++) {
+ for (i = 0; i < bc->pdata->num_clks; i++) {
if (IS_ERR_OR_NULL(hws[i]))
continue;
clk_hw_unregister(hws[i]);
@@ -453,15 +453,24 @@ static int imx95_bc_runtime_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
clk_disable_unprepare(bc->clk_apb);
+
return 0;
}
static int imx95_bc_runtime_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
+ int ret;
- return clk_prepare_enable(bc->clk_apb);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
+
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
+
+ return 0;
}
#endif
@@ -469,22 +478,12 @@ static int imx95_bc_runtime_resume(struct device *dev)
static int imx95_bc_suspend(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
- int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- if (bc_data->rpm_enabled) {
- ret = pm_runtime_get_sync(bc->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bc->dev);
- return ret;
- }
- }
-
- bc->clk_reg_restore = readl(bc->base + bc_data->clk_reg_offset);
+ bc->clk_reg_restore = readl(bc->base + bc->pdata->clk_reg_offset);
+ clk_disable_unprepare(bc->clk_apb);
return 0;
}
@@ -492,16 +491,16 @@ static int imx95_bc_suspend(struct device *dev)
static int imx95_bc_resume(struct device *dev)
{
struct imx95_blk_ctl *bc = dev_get_drvdata(dev);
- const struct imx95_blk_ctl_dev_data *bc_data;
+ int ret;
- bc_data = of_device_get_match_data(dev);
- if (!bc_data)
+ if (pm_runtime_suspended(dev))
return 0;
- writel(bc->clk_reg_restore, bc->base + bc_data->clk_reg_offset);
+ ret = clk_prepare_enable(bc->clk_apb);
+ if (ret)
+ return ret;
- if (bc_data->rpm_enabled)
- pm_runtime_put(bc->dev);
+ writel(bc->clk_reg_restore, bc->base + bc->pdata->clk_reg_offset);
return 0;
}
diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
index 9e11f1c7c397..41eb38552a9c 100644
--- a/drivers/clk/imx/clk-vf610.c
+++ b/drivers/clk/imx/clk-vf610.c
@@ -139,7 +139,7 @@ static struct clk * __init vf610_get_fixed_clock(
return clk;
};
-static int vf610_clk_suspend(void)
+static int vf610_clk_suspend(void *data)
{
int i;
@@ -156,7 +156,7 @@ static int vf610_clk_suspend(void)
return 0;
}
-static void vf610_clk_resume(void)
+static void vf610_clk_resume(void *data)
{
int i;
@@ -171,11 +171,15 @@ static void vf610_clk_resume(void)
writel_relaxed(ccgr[i], CCM_CCGRx(i));
}
-static struct syscore_ops vf610_clk_syscore_ops = {
+static const struct syscore_ops vf610_clk_syscore_ops = {
.suspend = vf610_clk_suspend,
.resume = vf610_clk_resume,
};
+static struct syscore vf610_clk_syscore = {
+ .ops = &vf610_clk_syscore_ops,
+};
+
static void __init vf610_clocks_init(struct device_node *ccm_node)
{
struct device_node *np;
@@ -462,7 +466,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clk[clks_init_on[i]]);
- register_syscore_ops(&vf610_clk_syscore_ops);
+ register_syscore(&vf610_clk_syscore);
/* Add the clocks to provider list */
clk_data.clks = clk;
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 0c9c8344ad11..91e7ac0cc334 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -174,14 +174,16 @@ ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
n * od);
}
-static long
-ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *prate)
+static int ingenic_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
- return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
+ req->rate = ingenic_pll_calc(clk_info, req->rate, req->best_parent_rate,
+ NULL, NULL, NULL);
+
+ return 0;
}
static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
@@ -317,7 +319,7 @@ static int ingenic_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops ingenic_pll_ops = {
.recalc_rate = ingenic_pll_recalc_rate,
- .round_rate = ingenic_pll_round_rate,
+ .determine_rate = ingenic_pll_determine_rate,
.set_rate = ingenic_pll_set_rate,
.enable = ingenic_pll_enable,
diff --git a/drivers/clk/ingenic/jz4725b-cgu.c b/drivers/clk/ingenic/jz4725b-cgu.c
index 590e9c85cb25..94cee44c854f 100644
--- a/drivers/clk/ingenic/jz4725b-cgu.c
+++ b/drivers/clk/ingenic/jz4725b-cgu.c
@@ -268,6 +268,6 @@ static void __init jz4725b_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-cgu", jz4725b_cgu_init);
diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
index 3e0a30574ebb..2def3aedc8dd 100644
--- a/drivers/clk/ingenic/jz4740-cgu.c
+++ b/drivers/clk/ingenic/jz4740-cgu.c
@@ -266,6 +266,6 @@ static void __init jz4740_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-cgu", jz4740_cgu_init);
diff --git a/drivers/clk/ingenic/jz4755-cgu.c b/drivers/clk/ingenic/jz4755-cgu.c
index f2c2d848dab7..17cf5dcaece9 100644
--- a/drivers/clk/ingenic/jz4755-cgu.c
+++ b/drivers/clk/ingenic/jz4755-cgu.c
@@ -337,7 +337,7 @@ static void __init jz4755_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index e407f00bd594..372fe4b07992 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -436,7 +436,7 @@ static void __init jz4760_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/* We only probe via devicetree, no need for a platform driver */
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 6ae1740367f9..58f1d3bad677 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -456,7 +456,7 @@ static void __init jz4770_cgu_init(struct device_node *np)
if (retval)
pr_err("%s: failed to register CGU Clocks\n", __func__);
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/* We only probe via devicetree, no need for a platform driver */
diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c
index b1dadc0a5e75..1e88aef7ac0f 100644
--- a/drivers/clk/ingenic/jz4780-cgu.c
+++ b/drivers/clk/ingenic/jz4780-cgu.c
@@ -128,19 +128,19 @@ static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long jz4780_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int jz4780_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 15600000)
- return 12000000;
-
- if (req_rate < 21600000)
- return 19200000;
+ if (req->rate < 15600000)
+ req->rate = 12000000;
+ else if (req->rate < 21600000)
+ req->rate = 19200000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- if (req_rate < 36000000)
- return 24000000;
-
- return 48000000;
+ return 0;
}
static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -212,7 +212,7 @@ static int jz4780_otg_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops jz4780_otg_phy_ops = {
.recalc_rate = jz4780_otg_phy_recalc_rate,
- .round_rate = jz4780_otg_phy_round_rate,
+ .determine_rate = jz4780_otg_phy_determine_rate,
.set_rate = jz4780_otg_phy_set_rate,
.enable = jz4780_otg_phy_enable,
@@ -803,6 +803,6 @@ static void __init jz4780_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
CLK_OF_DECLARE_DRIVER(jz4780_cgu, "ingenic,jz4780-cgu", jz4780_cgu_init);
diff --git a/drivers/clk/ingenic/pm.c b/drivers/clk/ingenic/pm.c
index 341752b640d2..206d5cf2872f 100644
--- a/drivers/clk/ingenic/pm.c
+++ b/drivers/clk/ingenic/pm.c
@@ -15,7 +15,7 @@
static void __iomem * __maybe_unused ingenic_cgu_base;
-static int __maybe_unused ingenic_cgu_pm_suspend(void)
+static int __maybe_unused ingenic_cgu_pm_suspend(void *data)
{
u32 val = readl(ingenic_cgu_base + CGU_REG_LCR);
@@ -24,22 +24,26 @@ static int __maybe_unused ingenic_cgu_pm_suspend(void)
return 0;
}
-static void __maybe_unused ingenic_cgu_pm_resume(void)
+static void __maybe_unused ingenic_cgu_pm_resume(void *data)
{
u32 val = readl(ingenic_cgu_base + CGU_REG_LCR);
writel(val & ~LCR_LOW_POWER_MODE, ingenic_cgu_base + CGU_REG_LCR);
}
-static struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = {
+static const struct syscore_ops __maybe_unused ingenic_cgu_pm_ops = {
.suspend = ingenic_cgu_pm_suspend,
.resume = ingenic_cgu_pm_resume,
};
-void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu)
+static struct syscore __maybe_unused ingenic_cgu_pm = {
+ .ops = &ingenic_cgu_pm_ops,
+};
+
+void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu)
{
if (IS_ENABLED(CONFIG_PM_SLEEP)) {
ingenic_cgu_base = cgu->base;
- register_syscore_ops(&ingenic_cgu_pm_ops);
+ register_syscore(&ingenic_cgu_pm);
}
}
diff --git a/drivers/clk/ingenic/pm.h b/drivers/clk/ingenic/pm.h
index fa7540407b6b..0dcb57dc64cb 100644
--- a/drivers/clk/ingenic/pm.h
+++ b/drivers/clk/ingenic/pm.h
@@ -7,6 +7,6 @@
struct ingenic_cgu;
-void ingenic_cgu_register_syscore_ops(struct ingenic_cgu *cgu);
+void ingenic_cgu_register_syscore(struct ingenic_cgu *cgu);
#endif /* DRIVERS_CLK_INGENIC_PM_H */
diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c
index 7d04ef40b7cf..bc6a51da2072 100644
--- a/drivers/clk/ingenic/tcu.c
+++ b/drivers/clk/ingenic/tcu.c
@@ -455,7 +455,7 @@ err_free_tcu:
return ret;
}
-static int __maybe_unused tcu_pm_suspend(void)
+static int __maybe_unused tcu_pm_suspend(void *data)
{
struct ingenic_tcu *tcu = ingenic_tcu;
@@ -465,7 +465,7 @@ static int __maybe_unused tcu_pm_suspend(void)
return 0;
}
-static void __maybe_unused tcu_pm_resume(void)
+static void __maybe_unused tcu_pm_resume(void *data)
{
struct ingenic_tcu *tcu = ingenic_tcu;
@@ -473,11 +473,15 @@ static void __maybe_unused tcu_pm_resume(void)
clk_enable(tcu->clk);
}
-static struct syscore_ops __maybe_unused tcu_pm_ops = {
+static const struct syscore_ops __maybe_unused tcu_pm_ops = {
.suspend = tcu_pm_suspend,
.resume = tcu_pm_resume,
};
+static struct syscore __maybe_unused tcu_pm = {
+ .ops = &tcu_pm_ops,
+};
+
static void __init ingenic_tcu_init(struct device_node *np)
{
int ret = ingenic_tcu_probe(np);
@@ -486,7 +490,7 @@ static void __init ingenic_tcu_init(struct device_node *np)
pr_crit("Failed to initialize TCU clocks: %d\n", ret);
if (IS_ENABLED(CONFIG_PM_SLEEP))
- register_syscore_ops(&tcu_pm_ops);
+ register_syscore(&tcu_pm);
}
CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-tcu", ingenic_tcu_init);
diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c
index feb03eed4fe8..d89bdfb7c219 100644
--- a/drivers/clk/ingenic/x1000-cgu.c
+++ b/drivers/clk/ingenic/x1000-cgu.c
@@ -84,16 +84,17 @@ static unsigned long x1000_otg_phy_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int x1000_otg_phy_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (req_rate < 18000000)
- return 12000000;
-
- if (req_rate < 36000000)
- return 24000000;
+ if (req->rate < 18000000)
+ req->rate = 12000000;
+ else if (req->rate < 36000000)
+ req->rate = 24000000;
+ else
+ req->rate = 48000000;
- return 48000000;
+ return 0;
}
static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -161,7 +162,7 @@ static int x1000_usb_phy_is_enabled(struct clk_hw *hw)
static const struct clk_ops x1000_otg_phy_ops = {
.recalc_rate = x1000_otg_phy_recalc_rate,
- .round_rate = x1000_otg_phy_round_rate,
+ .determine_rate = x1000_otg_phy_determine_rate,
.set_rate = x1000_otg_phy_set_rate,
.enable = x1000_usb_phy_enable,
@@ -555,7 +556,7 @@ static void __init x1000_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c
index 0fd46e50a513..acf856e5009e 100644
--- a/drivers/clk/ingenic/x1830-cgu.c
+++ b/drivers/clk/ingenic/x1830-cgu.c
@@ -463,7 +463,7 @@ static void __init x1830_cgu_init(struct device_node *np)
return;
}
- ingenic_cgu_register_syscore_ops(cgu);
+ ingenic_cgu_register_syscore(cgu);
}
/*
* CGU has some children devices, this is useful for probing children devices
diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c
index c5894fc9395e..9d5071223f4c 100644
--- a/drivers/clk/keystone/sci-clk.c
+++ b/drivers/clk/keystone/sci-clk.c
@@ -480,13 +480,10 @@ static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
num_clks++;
}
- provider->clocks = devm_kmalloc_array(dev, num_clks, sizeof(sci_clk),
- GFP_KERNEL);
+ provider->clocks = devm_kmemdup_array(dev, clks, num_clks, sizeof(sci_clk), GFP_KERNEL);
if (!provider->clocks)
return -ENOMEM;
- memcpy(provider->clocks, clks, num_clks * sizeof(sci_clk));
-
provider->num_clocks = num_clks;
devm_kfree(dev, clks);
@@ -499,8 +496,8 @@ static int ti_sci_scan_clocks_from_fw(struct sci_clk_provider *provider)
static int _cmp_sci_clk_list(void *priv, const struct list_head *a,
const struct list_head *b)
{
- struct sci_clk *ca = container_of(a, struct sci_clk, node);
- struct sci_clk *cb = container_of(b, struct sci_clk, node);
+ const struct sci_clk *ca = container_of(a, struct sci_clk, node);
+ const struct sci_clk *cb = container_of(b, struct sci_clk, node);
return _cmp_sci_clk(ca, &cb);
}
diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c
index c509929da854..ecf180a7949c 100644
--- a/drivers/clk/keystone/syscon-clk.c
+++ b/drivers/clk/keystone/syscon-clk.c
@@ -129,7 +129,7 @@ static int ti_syscon_gate_clk_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- regmap = regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
+ regmap = devm_regmap_init_mmio(dev, base, &ti_syscon_regmap_cfg);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
"failed to get regmap\n");
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 5f8e6d68fa14..0e8dd82aa84e 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -1002,6 +1002,77 @@ config COMMON_CLK_MT8195_VENCSYS
help
This driver supports MediaTek MT8195 vencsys clocks.
+config COMMON_CLK_MT8196
+ tristate "Clock driver for MediaTek MT8196"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8196 basic clocks.
+
+config COMMON_CLK_MT8196_IMP_IIC_WRAP
+ tristate "Clock driver for MediaTek MT8196 imp_iic_wrap"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 i2c clocks.
+
+config COMMON_CLK_MT8196_MCUSYS
+ tristate "Clock driver for MediaTek MT8196 mcusys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mcusys clocks.
+
+config COMMON_CLK_MT8196_MDPSYS
+ tristate "Clock driver for MediaTek MT8196 mdpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 mdpsys clocks.
+
+config COMMON_CLK_MT8196_MFGCFG
+ tristate "Clock driver for MediaTek MT8196 mfgcfg"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mfgcfg clocks.
+
+config COMMON_CLK_MT8196_MMSYS
+ tristate "Clock driver for MediaTek MT8196 mmsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 mmsys clocks.
+
+config COMMON_CLK_MT8196_PEXTPSYS
+ tristate "Clock driver for MediaTek MT8196 pextpsys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 pextpsys clocks.
+
+config COMMON_CLK_MT8196_UFSSYS
+ tristate "Clock driver for MediaTek MT8196 ufssys"
+ depends on COMMON_CLK_MT8196
+ default COMMON_CLK_MT8196
+ help
+ This driver supports MediaTek MT8196 ufssys clocks.
+
+config COMMON_CLK_MT8196_VDECSYS
+ tristate "Clock driver for MediaTek MT8196 vdecsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vdecsys clocks.
+
+config COMMON_CLK_MT8196_VENCSYS
+ tristate "Clock driver for MediaTek MT8196 vencsys"
+ depends on COMMON_CLK_MT8196
+ default m
+ help
+ This driver supports MediaTek MT8196 vencsys clocks.
+
config COMMON_CLK_MT8365
tristate "Clock driver for MediaTek MT8365"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 6efec95406bd..d8736a060dbd 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -150,6 +150,19 @@ obj-$(CONFIG_COMMON_CLK_MT8195_VDOSYS) += clk-mt8195-vdo0.o clk-mt8195-vdo1.o
obj-$(CONFIG_COMMON_CLK_MT8195_VENCSYS) += clk-mt8195-venc.o
obj-$(CONFIG_COMMON_CLK_MT8195_VPPSYS) += clk-mt8195-vpp0.o clk-mt8195-vpp1.o
obj-$(CONFIG_COMMON_CLK_MT8195_WPESYS) += clk-mt8195-wpe.o
+obj-$(CONFIG_COMMON_CLK_MT8196) += clk-mt8196-apmixedsys.o clk-mt8196-topckgen.o \
+ clk-mt8196-topckgen2.o clk-mt8196-vlpckgen.o \
+ clk-mt8196-peri_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_IMP_IIC_WRAP) += clk-mt8196-imp_iic_wrap.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MCUSYS) += clk-mt8196-mcu.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MDPSYS) += clk-mt8196-mdpsys.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MFGCFG) += clk-mt8196-mfg.o
+obj-$(CONFIG_COMMON_CLK_MT8196_MMSYS) += clk-mt8196-disp0.o clk-mt8196-disp1.o clk-mt8196-vdisp_ao.o \
+ clk-mt8196-ovl0.o clk-mt8196-ovl1.o
+obj-$(CONFIG_COMMON_CLK_MT8196_PEXTPSYS) += clk-mt8196-pextp.o
+obj-$(CONFIG_COMMON_CLK_MT8196_UFSSYS) += clk-mt8196-ufs_ao.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VDECSYS) += clk-mt8196-vdec.o
+obj-$(CONFIG_COMMON_CLK_MT8196_VENCSYS) += clk-mt8196-venc.o
obj-$(CONFIG_COMMON_CLK_MT8365) += clk-mt8365-apmixedsys.o clk-mt8365.o
obj-$(CONFIG_COMMON_CLK_MT8365_APU) += clk-mt8365-apu.o
obj-$(CONFIG_COMMON_CLK_MT8365_CAM) += clk-mt8365-cam.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index 67d9e741c5e7..f6b1429ff757 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -5,6 +5,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/dev_printk.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/printk.h>
@@ -12,15 +13,14 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include "clk-mtk.h"
#include "clk-gate.h"
struct mtk_clk_gate {
struct clk_hw hw;
struct regmap *regmap;
- int set_ofs;
- int clr_ofs;
- int sta_ofs;
- u8 bit;
+ struct regmap *regmap_hwv;
+ const struct mtk_gate *gate;
};
static inline struct mtk_clk_gate *to_mtk_clk_gate(struct clk_hw *hw)
@@ -33,9 +33,9 @@ static u32 mtk_get_clockgating(struct clk_hw *hw)
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
- regmap_read(cg->regmap, cg->sta_ofs, &val);
+ regmap_read(cg->regmap, cg->gate->regs->sta_ofs, &val);
- return val & BIT(cg->bit);
+ return val & BIT(cg->gate->shift);
}
static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
@@ -52,28 +52,30 @@ static void mtk_cg_set_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->set_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit));
+ regmap_write(cg->regmap, cg->gate->regs->clr_ofs, BIT(cg->gate->shift));
}
static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_set_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_set_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- regmap_clear_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
+ regmap_clear_bits(cg->regmap, cg->gate->regs->sta_ofs,
+ BIT(cg->gate->shift));
}
static int mtk_cg_enable(struct clk_hw *hw)
@@ -100,6 +102,32 @@ static void mtk_cg_disable_inv(struct clk_hw *hw)
mtk_cg_clr_bit(hw);
}
+static int mtk_cg_hwv_set_en(struct clk_hw *hw, bool enable)
+{
+ struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
+ u32 val;
+
+ regmap_write(cg->regmap_hwv,
+ enable ? cg->gate->hwv_regs->set_ofs :
+ cg->gate->hwv_regs->clr_ofs,
+ BIT(cg->gate->shift));
+
+ return regmap_read_poll_timeout_atomic(cg->regmap_hwv,
+ cg->gate->hwv_regs->sta_ofs, val,
+ val & BIT(cg->gate->shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+}
+
+static int mtk_cg_hwv_enable(struct clk_hw *hw)
+{
+ return mtk_cg_hwv_set_en(hw, true);
+}
+
+static void mtk_cg_hwv_disable(struct clk_hw *hw)
+{
+ mtk_cg_hwv_set_en(hw, false);
+}
+
static int mtk_cg_enable_no_setclr(struct clk_hw *hw)
{
mtk_cg_clr_bit_no_setclr(hw);
@@ -124,6 +152,15 @@ static void mtk_cg_disable_inv_no_setclr(struct clk_hw *hw)
mtk_cg_clr_bit_no_setclr(hw);
}
+static bool mtk_cg_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_clk_gate_hwv_ops_setclr ||
+ ops == &mtk_clk_gate_hwv_ops_setclr_inv)
+ return true;
+
+ return false;
+}
+
const struct clk_ops mtk_clk_gate_ops_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable,
@@ -138,6 +175,20 @@ const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr_inv);
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr = {
+ .is_enabled = mtk_cg_bit_is_cleared,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr);
+
+const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv = {
+ .is_enabled = mtk_cg_bit_is_set,
+ .enable = mtk_cg_hwv_enable,
+ .disable = mtk_cg_hwv_disable,
+};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_hwv_ops_setclr_inv);
+
const struct clk_ops mtk_clk_gate_ops_no_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable_no_setclr,
@@ -152,12 +203,10 @@ const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
};
EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
-static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name,
- const char *parent_name,
- struct regmap *regmap, int set_ofs,
- int clr_ofs, int sta_ofs, u8 bit,
- const struct clk_ops *ops,
- unsigned long flags)
+static struct clk_hw *mtk_clk_register_gate(struct device *dev,
+ const struct mtk_gate *gate,
+ struct regmap *regmap,
+ struct regmap *regmap_hwv)
{
struct mtk_clk_gate *cg;
int ret;
@@ -167,18 +216,19 @@ static struct clk_hw *mtk_clk_register_gate(struct device *dev, const char *name
if (!cg)
return ERR_PTR(-ENOMEM);
- init.name = name;
- init.flags = flags | CLK_SET_RATE_PARENT;
- init.parent_names = parent_name ? &parent_name : NULL;
- init.num_parents = parent_name ? 1 : 0;
- init.ops = ops;
+ init.name = gate->name;
+ init.flags = gate->flags | CLK_SET_RATE_PARENT;
+ init.parent_names = gate->parent_name ? &gate->parent_name : NULL;
+ init.num_parents = gate->parent_name ? 1 : 0;
+ init.ops = gate->ops;
+ if (mtk_cg_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
cg->regmap = regmap;
- cg->set_ofs = set_ofs;
- cg->clr_ofs = clr_ofs;
- cg->sta_ofs = sta_ofs;
- cg->bit = bit;
-
+ cg->regmap_hwv = regmap_hwv;
+ cg->gate = gate;
cg->hw.init = &init;
ret = clk_hw_register(dev, &cg->hw);
@@ -209,6 +259,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
int i;
struct clk_hw *hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
if (!clk_data)
return -ENOMEM;
@@ -219,6 +270,12 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_gate *gate = &clks[i];
@@ -228,13 +285,7 @@ int mtk_clk_register_gates(struct device *dev, struct device_node *node,
continue;
}
- hw = mtk_clk_register_gate(dev, gate->name, gate->parent_name,
- regmap,
- gate->regs->set_ofs,
- gate->regs->clr_ofs,
- gate->regs->sta_ofs,
- gate->shift, gate->ops,
- gate->flags);
+ hw = mtk_clk_register_gate(dev, gate, regmap, regmap_hwv);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", gate->name,
diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h
index 1a46b4c56fc5..4f05b9855dae 100644
--- a/drivers/clk/mediatek/clk-gate.h
+++ b/drivers/clk/mediatek/clk-gate.h
@@ -19,6 +19,8 @@ extern const struct clk_ops mtk_clk_gate_ops_setclr;
extern const struct clk_ops mtk_clk_gate_ops_setclr_inv;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr;
extern const struct clk_ops mtk_clk_gate_ops_no_setclr_inv;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr;
+extern const struct clk_ops mtk_clk_gate_hwv_ops_setclr_inv;
struct mtk_gate_regs {
u32 sta_ofs;
@@ -31,6 +33,7 @@ struct mtk_gate {
const char *name;
const char *parent_name;
const struct mtk_gate_regs *regs;
+ const struct mtk_gate_regs *hwv_regs;
int shift;
const struct clk_ops *ops;
unsigned long flags;
diff --git a/drivers/clk/mediatek/clk-mt7622-aud.c b/drivers/clk/mediatek/clk-mt7622-aud.c
index 931a0598e598..a4ea5e20efa2 100644
--- a/drivers/clk/mediatek/clk-mt7622-aud.c
+++ b/drivers/clk/mediatek/clk-mt7622-aud.c
@@ -75,6 +75,7 @@ static const struct mtk_gate audio_clks[] = {
GATE_AUDIO1(CLK_AUDIO_A1SYS, "audio_a1sys", "a1sys_hp_sel", 21),
GATE_AUDIO1(CLK_AUDIO_A2SYS, "audio_a2sys", "a2sys_hp_sel", 22),
GATE_AUDIO1(CLK_AUDIO_AFE_CONN, "audio_afe_conn", "a1sys_hp_sel", 23),
+ GATE_AUDIO1(CLK_AUDIO_AFE_MRGIF, "audio_afe_mrgif", "aud_mux1_sel", 25),
/* AUDIO2 */
GATE_AUDIO2(CLK_AUDIO_UL1, "audio_ul1", "a1sys_hp_sel", 0),
GATE_AUDIO2(CLK_AUDIO_UL2, "audio_ul2", "a1sys_hp_sel", 1),
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
index bb648a88e43a..ad47fdb23460 100644
--- a/drivers/clk/mediatek/clk-mt8195-infra_ao.c
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -103,7 +103,7 @@ static const struct mtk_gate infra_ao_clks[] = {
GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
/* INFRA_AO1 */
- GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "top_hdmi_xtal", 0),
GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
diff --git a/drivers/clk/mediatek/clk-mt8196-apmixedsys.c b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
new file mode 100644
index 000000000000..617f5449b88b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-apmixedsys.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+/* APMIXEDSYS PLL control register offsets */
+#define MAINPLL_CON0 0x250
+#define MAINPLL_CON1 0x254
+#define UNIVPLL_CON0 0x264
+#define UNIVPLL_CON1 0x268
+#define MSDCPLL_CON0 0x278
+#define MSDCPLL_CON1 0x27c
+#define ADSPPLL_CON0 0x28c
+#define ADSPPLL_CON1 0x290
+#define EMIPLL_CON0 0x2a0
+#define EMIPLL_CON1 0x2a4
+#define EMIPLL2_CON0 0x2b4
+#define EMIPLL2_CON1 0x2b8
+#define NET1PLL_CON0 0x2c8
+#define NET1PLL_CON1 0x2cc
+#define SGMIIPLL_CON0 0x2dc
+#define SGMIIPLL_CON1 0x2e0
+
+/* APMIXEDSYS_GP2 PLL control register offsets*/
+#define MAINPLL2_CON0 0x250
+#define MAINPLL2_CON1 0x254
+#define UNIVPLL2_CON0 0x264
+#define UNIVPLL2_CON1 0x268
+#define MMPLL2_CON0 0x278
+#define MMPLL2_CON1 0x27c
+#define IMGPLL_CON0 0x28c
+#define IMGPLL_CON1 0x290
+#define TVDPLL1_CON0 0x2a0
+#define TVDPLL1_CON1 0x2a4
+#define TVDPLL2_CON0 0x2b4
+#define TVDPLL2_CON1 0x2b8
+#define TVDPLL3_CON0 0x2c8
+#define TVDPLL3_CON1 0x2cc
+
+#define PLLEN_ALL 0x080
+#define PLLEN_ALL_SET 0x084
+#define PLLEN_ALL_CLR 0x088
+
+#define FENC_STATUS_CON0 0x03c
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = PLLEN_ALL, \
+ .en_set_reg = PLLEN_ALL_SET, \
+ .en_clr_reg = PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+struct mtk_pll_desc {
+ const struct mtk_pll_data *clks;
+ size_t num_clks;
+};
+
+static const struct mtk_pll_data apmixed_plls[] = {
+ PLL_FENC(CLK_APMIXED_MAINPLL, "mainpll", MAINPLL_CON0, FENC_STATUS_CON0,
+ 7, PLL_AO, MAINPLL_CON1, 24, MAINPLL_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED_UNIVPLL, "univpll", UNIVPLL_CON0, FENC_STATUS_CON0,
+ 6, 0, UNIVPLL_CON1, 24, UNIVPLL_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED_MSDCPLL, "msdcpll", MSDCPLL_CON0, FENC_STATUS_CON0,
+ 5, 0, MSDCPLL_CON1, 24, MSDCPLL_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED_ADSPPLL, "adsppll", ADSPPLL_CON0, FENC_STATUS_CON0,
+ 4, 0, ADSPPLL_CON1, 24, ADSPPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED_EMIPLL, "emipll", EMIPLL_CON0, FENC_STATUS_CON0, 3,
+ PLL_AO, EMIPLL_CON1, 24, EMIPLL_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED_EMIPLL2, "emipll2", EMIPLL2_CON0, FENC_STATUS_CON0,
+ 2, PLL_AO, EMIPLL2_CON1, 24, EMIPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED_NET1PLL, "net1pll", NET1PLL_CON0, FENC_STATUS_CON0,
+ 1, 0, NET1PLL_CON1, 24, NET1PLL_CON1, 0, 22, 6),
+ PLL_FENC(CLK_APMIXED_SGMIIPLL, "sgmiipll", SGMIIPLL_CON0, FENC_STATUS_CON0,
+ 0, 0, SGMIIPLL_CON1, 24, SGMIIPLL_CON1, 0, 22, 7),
+};
+
+static const struct mtk_pll_desc apmixed_desc = {
+ .clks = apmixed_plls,
+ .num_clks = ARRAY_SIZE(apmixed_plls),
+};
+
+static const struct mtk_pll_data apmixed2_plls[] = {
+ PLL_FENC(CLK_APMIXED2_MAINPLL2, "mainpll2", MAINPLL2_CON0, FENC_STATUS_CON0,
+ 6, 0, MAINPLL2_CON1, 24, MAINPLL2_CON1, 0, 22, 0),
+ PLL_FENC(CLK_APMIXED2_UNIVPLL2, "univpll2", UNIVPLL2_CON0, FENC_STATUS_CON0,
+ 5, 0, UNIVPLL2_CON1, 24, UNIVPLL2_CON1, 0, 22, 1),
+ PLL_FENC(CLK_APMIXED2_MMPLL2, "mmpll2", MMPLL2_CON0, FENC_STATUS_CON0,
+ 4, 0, MMPLL2_CON1, 24, MMPLL2_CON1, 0, 22, 2),
+ PLL_FENC(CLK_APMIXED2_IMGPLL, "imgpll", IMGPLL_CON0, FENC_STATUS_CON0,
+ 3, 0, IMGPLL_CON1, 24, IMGPLL_CON1, 0, 22, 3),
+ PLL_FENC(CLK_APMIXED2_TVDPLL1, "tvdpll1", TVDPLL1_CON0, FENC_STATUS_CON0,
+ 2, 0, TVDPLL1_CON1, 24, TVDPLL1_CON1, 0, 22, 4),
+ PLL_FENC(CLK_APMIXED2_TVDPLL2, "tvdpll2", TVDPLL2_CON0, FENC_STATUS_CON0,
+ 1, 0, TVDPLL2_CON1, 24, TVDPLL2_CON1, 0, 22, 5),
+ PLL_FENC(CLK_APMIXED2_TVDPLL3, "tvdpll3", TVDPLL3_CON0, FENC_STATUS_CON0,
+ 0, 0, TVDPLL3_CON1, 24, TVDPLL3_CON1, 0, 22, 6),
+};
+
+static const struct mtk_pll_desc apmixed2_desc = {
+ .clks = apmixed2_plls,
+ .num_clks = ARRAY_SIZE(apmixed2_plls),
+};
+
+static int clk_mt8196_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const struct mtk_pll_desc *mcd;
+ int r;
+
+ mcd = device_get_match_data(&pdev->dev);
+ if (!mcd)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(mcd->num_clks);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, mcd->clks, mcd->num_clks, clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static void clk_mt8196_apmixed_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_desc *mcd = device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(mcd->clks, mcd->num_clks, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_apmixed[] = {
+ { .compatible = "mediatek,mt8196-apmixedsys", .data = &apmixed_desc },
+ { .compatible = "mediatek,mt8196-apmixedsys-gp2",
+ .data = &apmixed2_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_apmixed);
+
+static struct platform_driver clk_mt8196_apmixed_drv = {
+ .probe = clk_mt8196_apmixed_probe,
+ .remove = clk_mt8196_apmixed_remove,
+ .driver = {
+ .name = "clk-mt8196-apmixed",
+ .of_match_table = of_match_clk_mt8196_apmixed,
+ },
+};
+module_platform_driver(clk_mt8196_apmixed_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 apmixedsys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp0.c b/drivers/clk/mediatek/clk-mt8196-disp0.c
new file mode 100644
index 000000000000..9474aad26e92
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp0.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm0_hwv_regs = {
+ .set_ofs = 0x0020,
+ .clr_ofs = 0x0024,
+ .sta_ofs = 0x2c10,
+};
+
+static const struct mtk_gate_regs mm1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm1_hwv_regs = {
+ .set_ofs = 0x0028,
+ .clr_ofs = 0x002c,
+ .sta_ofs = 0x2c14,
+};
+
+#define GATE_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm0_cg_regs, \
+ .hwv_regs = &mm0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm1_cg_regs, \
+ .hwv_regs = &mm1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_clks[] = {
+ /* MM0 */
+ GATE_HWV_MM0(CLK_MM_CONFIG, "mm_config", "disp", 0),
+ GATE_HWV_MM0(CLK_MM_DISP_MUTEX0, "mm_disp_mutex0", "disp", 1),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL0, "mm_disp_aal0", "disp", 2),
+ GATE_HWV_MM0(CLK_MM_DISP_AAL1, "mm_disp_aal1", "disp", 3),
+ GATE_MM0(CLK_MM_DISP_C3D0, "mm_disp_c3d0", "disp", 4),
+ GATE_MM0(CLK_MM_DISP_C3D1, "mm_disp_c3d1", "disp", 5),
+ GATE_MM0(CLK_MM_DISP_C3D2, "mm_disp_c3d2", "disp", 6),
+ GATE_MM0(CLK_MM_DISP_C3D3, "mm_disp_c3d3", "disp", 7),
+ GATE_MM0(CLK_MM_DISP_CCORR0, "mm_disp_ccorr0", "disp", 8),
+ GATE_MM0(CLK_MM_DISP_CCORR1, "mm_disp_ccorr1", "disp", 9),
+ GATE_MM0(CLK_MM_DISP_CCORR2, "mm_disp_ccorr2", "disp", 10),
+ GATE_MM0(CLK_MM_DISP_CCORR3, "mm_disp_ccorr3", "disp", 11),
+ GATE_MM0(CLK_MM_DISP_CHIST0, "mm_disp_chist0", "disp", 12),
+ GATE_MM0(CLK_MM_DISP_CHIST1, "mm_disp_chist1", "disp", 13),
+ GATE_MM0(CLK_MM_DISP_COLOR0, "mm_disp_color0", "disp", 14),
+ GATE_MM0(CLK_MM_DISP_COLOR1, "mm_disp_color1", "disp", 15),
+ GATE_MM0(CLK_MM_DISP_DITHER0, "mm_disp_dither0", "disp", 16),
+ GATE_MM0(CLK_MM_DISP_DITHER1, "mm_disp_dither1", "disp", 17),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC0, "mm_disp_dli_async0", "disp", 18),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC1, "mm_disp_dli_async1", "disp", 19),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC2, "mm_disp_dli_async2", "disp", 20),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC3, "mm_disp_dli_async3", "disp", 21),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC4, "mm_disp_dli_async4", "disp", 22),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC5, "mm_disp_dli_async5", "disp", 23),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC6, "mm_disp_dli_async6", "disp", 24),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC7, "mm_disp_dli_async7", "disp", 25),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC8, "mm_disp_dli_async8", "disp", 26),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC9, "mm_disp_dli_async9", "disp", 27),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC10, "mm_disp_dli_async10", "disp", 28),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC11, "mm_disp_dli_async11", "disp", 29),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC12, "mm_disp_dli_async12", "disp", 30),
+ GATE_HWV_MM0(CLK_MM_DISP_DLI_ASYNC13, "mm_disp_dli_async13", "disp", 31),
+ /* MM1 */
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC14, "mm_disp_dli_async14", "disp", 0),
+ GATE_HWV_MM1(CLK_MM_DISP_DLI_ASYNC15, "mm_disp_dli_async15", "disp", 1),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC0, "mm_disp_dlo_async0", "disp", 2),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC1, "mm_disp_dlo_async1", "disp", 3),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC2, "mm_disp_dlo_async2", "disp", 4),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC3, "mm_disp_dlo_async3", "disp", 5),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC4, "mm_disp_dlo_async4", "disp", 6),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC5, "mm_disp_dlo_async5", "disp", 7),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC6, "mm_disp_dlo_async6", "disp", 8),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC7, "mm_disp_dlo_async7", "disp", 9),
+ GATE_HWV_MM1(CLK_MM_DISP_DLO_ASYNC8, "mm_disp_dlo_async8", "disp", 10),
+ GATE_MM1(CLK_MM_DISP_GAMMA0, "mm_disp_gamma0", "disp", 11),
+ GATE_MM1(CLK_MM_DISP_GAMMA1, "mm_disp_gamma1", "disp", 12),
+ GATE_MM1(CLK_MM_MDP_AAL0, "mm_mdp_aal0", "disp", 13),
+ GATE_MM1(CLK_MM_MDP_AAL1, "mm_mdp_aal1", "disp", 14),
+ GATE_HWV_MM1(CLK_MM_MDP_RDMA0, "mm_mdp_rdma0", "disp", 15),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK0, "mm_disp_postmask0", "disp", 16),
+ GATE_HWV_MM1(CLK_MM_DISP_POSTMASK1, "mm_disp_postmask1", "disp", 17),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ0, "mm_mdp_rsz0", "disp", 18),
+ GATE_HWV_MM1(CLK_MM_MDP_RSZ1, "mm_mdp_rsz1", "disp", 19),
+ GATE_HWV_MM1(CLK_MM_DISP_SPR0, "mm_disp_spr0", "disp", 20),
+ GATE_MM1(CLK_MM_DISP_TDSHP0, "mm_disp_tdshp0", "disp", 21),
+ GATE_MM1(CLK_MM_DISP_TDSHP1, "mm_disp_tdshp1", "disp", 22),
+ GATE_HWV_MM1(CLK_MM_DISP_WDMA0, "mm_disp_wdma0", "disp", 23),
+ GATE_HWV_MM1(CLK_MM_DISP_Y2R0, "mm_disp_y2r0", "disp", 24),
+ GATE_HWV_MM1(CLK_MM_SMI_SUB_COMM0, "mm_ssc", "disp", 25),
+ GATE_HWV_MM1(CLK_MM_DISP_FAKE_ENG0, "mm_disp_fake_eng0", "disp", 26),
+};
+
+static const struct mtk_clk_desc mm_mcd = {
+ .clks = mm_clks,
+ .num_clks = ARRAY_SIZE(mm_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp0_id_table[] = {
+ { .name = "clk-mt8196-disp0", .driver_data = (kernel_ulong_t)&mm_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp0_id_table);
+
+static struct platform_driver clk_mt8196_disp0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp0",
+ },
+ .id_table = clk_mt8196_disp0_id_table,
+};
+module_platform_driver(clk_mt8196_disp0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-disp1.c b/drivers/clk/mediatek/clk-mt8196-disp1.c
new file mode 100644
index 000000000000..3bbec79a7010
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-disp1.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm10_hwv_regs = {
+ .set_ofs = 0x0010,
+ .clr_ofs = 0x0014,
+ .sta_ofs = 0x2c08,
+};
+
+static const struct mtk_gate_regs mm11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mm11_hwv_regs = {
+ .set_ofs = 0x0018,
+ .clr_ofs = 0x001c,
+ .sta_ofs = 0x2c0c,
+};
+
+#define GATE_MM10(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm10_cg_regs, \
+ .hwv_regs = &mm10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_MM11(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+#define GATE_HWV_MM11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm11_cg_regs, \
+ .hwv_regs = &mm11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+static const struct mtk_gate mm1_clks[] = {
+ /* MM10 */
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_CONFIG, "mm1_dispsys1_config", "disp", 0),
+ GATE_HWV_MM10(CLK_MM1_DISPSYS1_S_CONFIG, "mm1_dispsys1_s_config", "disp", 1),
+ GATE_HWV_MM10(CLK_MM1_DISP_MUTEX0, "mm1_disp_mutex0", "disp", 2),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC20, "mm1_disp_dli_async20", "disp", 3),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC21, "mm1_disp_dli_async21", "disp", 4),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC22, "mm1_disp_dli_async22", "disp", 5),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC23, "mm1_disp_dli_async23", "disp", 6),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC24, "mm1_disp_dli_async24", "disp", 7),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC25, "mm1_disp_dli_async25", "disp", 8),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC26, "mm1_disp_dli_async26", "disp", 9),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC27, "mm1_disp_dli_async27", "disp", 10),
+ GATE_HWV_MM10(CLK_MM1_DISP_DLI_ASYNC28, "mm1_disp_dli_async28", "disp", 11),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY0, "mm1_disp_relay0", "disp", 12),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY1, "mm1_disp_relay1", "disp", 13),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY2, "mm1_disp_relay2", "disp", 14),
+ GATE_HWV_MM10(CLK_MM1_DISP_RELAY3, "mm1_disp_relay3", "disp", 15),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF0, "mm1_DP_CLK", "disp", 16),
+ GATE_HWV_MM10(CLK_MM1_DISP_DP_INTF1, "mm1_disp_dp_intf1", "disp", 17),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP0, "mm1_disp_dsc_wrap0", "disp", 18),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP1, "mm1_disp_dsc_wrap1", "disp", 19),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP2, "mm1_disp_dsc_wrap2", "disp", 20),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSC_WRAP3, "mm1_disp_dsc_wrap3", "disp", 21),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI0, "mm1_CLK0", "disp", 22),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI1, "mm1_CLK1", "disp", 23),
+ GATE_HWV_MM10(CLK_MM1_DISP_DSI2, "mm1_CLK2", "disp", 24),
+ GATE_HWV_MM10(CLK_MM1_DISP_DVO0, "mm1_disp_dvo0", "disp", 25),
+ GATE_HWV_MM10(CLK_MM1_DISP_GDMA0, "mm1_disp_gdma0", "disp", 26),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE0, "mm1_disp_merge0", "disp", 27),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE1, "mm1_disp_merge1", "disp", 28),
+ GATE_HWV_MM10(CLK_MM1_DISP_MERGE2, "mm1_disp_merge2", "disp", 29),
+ GATE_HWV_MM10(CLK_MM1_DISP_ODDMR0, "mm1_disp_oddmr0", "disp", 30),
+ GATE_HWV_MM10(CLK_MM1_DISP_POSTALIGN0, "mm1_disp_postalign0", "disp", 31),
+ /* MM11 */
+ GATE_HWV_MM11(CLK_MM1_DISP_DITHER2, "mm1_disp_dither2", "disp", 0),
+ GATE_HWV_MM11(CLK_MM1_DISP_R2Y0, "mm1_disp_r2y0", "disp", 1),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER0, "mm1_disp_splitter0", "disp", 2),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER1, "mm1_disp_splitter1", "disp", 3),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER2, "mm1_disp_splitter2", "disp", 4),
+ GATE_HWV_MM11(CLK_MM1_DISP_SPLITTER3, "mm1_disp_splitter3", "disp", 5),
+ GATE_HWV_MM11(CLK_MM1_DISP_VDCM0, "mm1_disp_vdcm0", "disp", 6),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA1, "mm1_disp_wdma1", "disp", 7),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA2, "mm1_disp_wdma2", "disp", 8),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA3, "mm1_disp_wdma3", "disp", 9),
+ GATE_HWV_MM11(CLK_MM1_DISP_WDMA4, "mm1_disp_wdma4", "disp", 10),
+ GATE_HWV_MM11(CLK_MM1_MDP_RDMA1, "mm1_mdp_rdma1", "disp", 11),
+ GATE_HWV_MM11(CLK_MM1_SMI_LARB0, "mm1_smi_larb0", "disp", 12),
+ GATE_HWV_MM11(CLK_MM1_MOD1, "mm1_mod1", "clk26m", 13),
+ GATE_HWV_MM11(CLK_MM1_MOD2, "mm1_mod2", "clk26m", 14),
+ GATE_HWV_MM11(CLK_MM1_MOD3, "mm1_mod3", "clk26m", 15),
+ GATE_HWV_MM11(CLK_MM1_MOD4, "mm1_mod4", "dp0", 16),
+ GATE_HWV_MM11(CLK_MM1_MOD5, "mm1_mod5", "dp1", 17),
+ GATE_HWV_MM11(CLK_MM1_MOD6, "mm1_mod6", "dp1", 18),
+ GATE_HWV_MM11(CLK_MM1_CG0, "mm1_cg0", "disp", 20),
+ GATE_HWV_MM11(CLK_MM1_CG1, "mm1_cg1", "disp", 21),
+ GATE_HWV_MM11(CLK_MM1_CG2, "mm1_cg2", "disp", 22),
+ GATE_HWV_MM11(CLK_MM1_CG3, "mm1_cg3", "disp", 23),
+ GATE_HWV_MM11(CLK_MM1_CG4, "mm1_cg4", "disp", 24),
+ GATE_HWV_MM11(CLK_MM1_CG5, "mm1_cg5", "disp", 25),
+ GATE_HWV_MM11(CLK_MM1_CG6, "mm1_cg6", "disp", 26),
+ GATE_HWV_MM11(CLK_MM1_CG7, "mm1_cg7", "disp", 27),
+ GATE_HWV_MM11(CLK_MM1_F26M, "mm1_f26m_ck", "clk26m", 28),
+};
+
+static const struct mtk_clk_desc mm1_mcd = {
+ .clks = mm1_clks,
+ .num_clks = ARRAY_SIZE(mm1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_disp1_id_table[] = {
+ { .name = "clk-mt8196-disp1", .driver_data = (kernel_ulong_t)&mm1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_disp1_id_table);
+
+static struct platform_driver clk_mt8196_disp1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-disp1",
+ },
+ .id_table = clk_mt8196_disp1_id_table,
+};
+module_platform_driver(clk_mt8196_disp1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 disp1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
new file mode 100644
index 000000000000..a63241671650
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-imp_iic_wrap.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs imp_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate impc_clks[] = {
+ GATE_IMP(CLK_IMPC_I2C11, "impc_i2c11", "i2c_p", 0),
+ GATE_IMP(CLK_IMPC_I2C12, "impc_i2c12", "i2c_p", 1),
+ GATE_IMP(CLK_IMPC_I2C13, "impc_i2c13", "i2c_p", 2),
+ GATE_IMP(CLK_IMPC_I2C14, "impc_i2c14", "i2c_p", 3),
+};
+
+static const struct mtk_clk_desc impc_mcd = {
+ .clks = impc_clks,
+ .num_clks = ARRAY_SIZE(impc_clks),
+};
+
+static const struct mtk_gate impe_clks[] = {
+ GATE_IMP(CLK_IMPE_I2C5, "impe_i2c5", "i2c_east", 0),
+};
+
+static const struct mtk_clk_desc impe_mcd = {
+ .clks = impe_clks,
+ .num_clks = ARRAY_SIZE(impe_clks),
+};
+
+static const struct mtk_gate_regs impn_hwv_regs = {
+ .set_ofs = 0x0000,
+ .clr_ofs = 0x0004,
+ .sta_ofs = 0x2c00,
+};
+
+#define GATE_HWV_IMPN(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &imp_cg_regs, \
+ .hwv_regs = &impn_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate impn_clks[] = {
+ GATE_IMP(CLK_IMPN_I2C1, "impn_i2c1", "i2c_north", 0),
+ GATE_IMP(CLK_IMPN_I2C2, "impn_i2c2", "i2c_north", 1),
+ GATE_IMP(CLK_IMPN_I2C4, "impn_i2c4", "i2c_north", 2),
+ GATE_HWV_IMPN(CLK_IMPN_I2C7, "impn_i2c7", "i2c_north", 3),
+ GATE_IMP(CLK_IMPN_I2C8, "impn_i2c8", "i2c_north", 4),
+ GATE_IMP(CLK_IMPN_I2C9, "impn_i2c9", "i2c_north", 5),
+};
+
+static const struct mtk_clk_desc impn_mcd = {
+ .clks = impn_clks,
+ .num_clks = ARRAY_SIZE(impn_clks),
+};
+
+static const struct mtk_gate impw_clks[] = {
+ GATE_IMP(CLK_IMPW_I2C0, "impw_i2c0", "i2c_west", 0),
+ GATE_IMP(CLK_IMPW_I2C3, "impw_i2c3", "i2c_west", 1),
+ GATE_IMP(CLK_IMPW_I2C6, "impw_i2c6", "i2c_west", 2),
+ GATE_IMP(CLK_IMPW_I2C10, "impw_i2c10", "i2c_west", 3),
+};
+
+static const struct mtk_clk_desc impw_mcd = {
+ .clks = impw_clks,
+ .num_clks = ARRAY_SIZE(impw_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_imp_iic_wrap[] = {
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-c", .data = &impc_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-e", .data = &impe_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-n", .data = &impn_mcd },
+ { .compatible = "mediatek,mt8196-imp-iic-wrap-w", .data = &impw_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_imp_iic_wrap);
+
+static struct platform_driver clk_mt8196_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8196_imp_iic_wrap,
+ },
+};
+module_platform_driver(clk_mt8196_imp_iic_wrap_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 I2C Wrapper clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mcu.c b/drivers/clk/mediatek/clk-mt8196-mcu.c
new file mode 100644
index 000000000000..5cbcc411ae73
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mcu.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define ARMPLL_LL_CON0 0x008
+#define ARMPLL_LL_CON1 0x00c
+#define ARMPLL_LL_CON2 0x010
+#define ARMPLL_LL_CON3 0x014
+#define ARMPLL_BL_CON0 0x008
+#define ARMPLL_BL_CON1 0x00c
+#define ARMPLL_BL_CON2 0x010
+#define ARMPLL_BL_CON3 0x014
+#define ARMPLL_B_CON0 0x008
+#define ARMPLL_B_CON1 0x00c
+#define ARMPLL_B_CON2 0x010
+#define ARMPLL_B_CON3 0x014
+#define CCIPLL_CON0 0x008
+#define CCIPLL_CON1 0x00c
+#define CCIPLL_CON2 0x010
+#define CCIPLL_CON3 0x014
+#define PTPPLL_CON0 0x008
+#define PTPPLL_CON1 0x00c
+#define PTPPLL_CON2 0x010
+#define PTPPLL_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data cpu_bl_plls[] = {
+ PLL(CLK_CPBL_ARMPLL_BL, "armpll-bl", ARMPLL_BL_CON0, ARMPLL_BL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_BL_CON1, 24, 0, 0, 0, ARMPLL_BL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_b_plls[] = {
+ PLL(CLK_CPB_ARMPLL_B, "armpll-b", ARMPLL_B_CON0, ARMPLL_B_CON0, 0, 0,
+ PLL_AO, BIT(0), ARMPLL_B_CON1, 24, 0, 0, 0, ARMPLL_B_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cpu_ll_plls[] = {
+ PLL(CLK_CPLL_ARMPLL_LL, "armpll-ll", ARMPLL_LL_CON0, ARMPLL_LL_CON0, 0,
+ 0, PLL_AO, BIT(0), ARMPLL_LL_CON1, 24, 0, 0, 0, ARMPLL_LL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data cci_plls[] = {
+ PLL(CLK_CCIPLL, "ccipll", CCIPLL_CON0, CCIPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), CCIPLL_CON1, 24, 0, 0, 0, CCIPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data ptp_plls[] = {
+ PLL(CLK_PTPPLL, "ptppll", PTPPLL_CON0, PTPPLL_CON0, 0, 0, PLL_AO,
+ BIT(0), PTPPLL_CON1, 24, 0, 0, 0, PTPPLL_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mcu[] = {
+ { .compatible = "mediatek,mt8196-armpll-bl-pll-ctrl",
+ .data = &cpu_bl_plls },
+ { .compatible = "mediatek,mt8196-armpll-b-pll-ctrl",
+ .data = &cpu_b_plls },
+ { .compatible = "mediatek,mt8196-armpll-ll-pll-ctrl",
+ .data = &cpu_ll_plls },
+ { .compatible = "mediatek,mt8196-ccipll-pll-ctrl", .data = &cci_plls },
+ { .compatible = "mediatek,mt8196-ptppll-pll-ctrl", .data = &ptp_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mcu);
+
+static int clk_mt8196_mcu_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mcu_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mcu_drv = {
+ .probe = clk_mt8196_mcu_probe,
+ .remove = clk_mt8196_mcu_remove,
+ .driver = {
+ .name = "clk-mt8196-mcu",
+ .of_match_table = of_match_clk_mt8196_mcu,
+ },
+};
+module_platform_driver(clk_mt8196_mcu_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 mcusys clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mdpsys.c b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
new file mode 100644
index 000000000000..7667d88f0eb0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mdpsys.c
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mdp0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mdp1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs mdp2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_MDP0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp0_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_MDP2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mdp2_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate mdp1_clks[] = {
+ /* MDP1-0 */
+ GATE_MDP0(CLK_MDP1_MDP_MUTEX0, "mdp1_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP1_SMI0, "mdp1_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP1_APB_BUS, "mdp1_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA0, "mdp1_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA1, "mdp1_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP1_MDP_RDMA2, "mdp1_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP1_MDP_BIRSZ0, "mdp1_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP1_MDP_HDR0, "mdp1_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP1_MDP_AAL0, "mdp1_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ0, "mdp1_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP1_MDP_RSZ2, "mdp1_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP1_MDP_TDSHP0, "mdp1_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP1_MDP_COLOR0, "mdp1_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP1_MDP_WROT0, "mdp1_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP1_MDP_WROT1, "mdp1_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP1_MDP_WROT2, "mdp1_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP1_MDP_FAKE_ENG0, "mdp1_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP1_APB_DB, "mdp1_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC0, "mdp1_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC1, "mdp1_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC0, "mdp1_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC1, "mdp1_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP1_MDP_DLI_ASYNC2, "mdp1_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC2, "mdp1_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC3, "mdp1_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP1_IMG_DL_ASYNC0, "mdp1_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP1_MDP_RROT0, "mdp1_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP1_MDP_MERGE0, "mdp1_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP1_MDP_C3D0, "mdp1_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP1_MDP_FG0, "mdp1_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP1_MDP_CLA2, "mdp1_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP1_MDP_DLO_ASYNC4, "mdp1_mdp_dlo_async4", "mdp", 31),
+ /* MDP1-1 */
+ GATE_MDP1(CLK_MDP1_VPP_RSZ0, "mdp1_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP1_VPP_RSZ1, "mdp1_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP1_MDP_DLO_ASYNC5, "mdp1_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP1_IMG0, "mdp1_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP1_F26M, "mdp1_f26m", "clk26m", 27),
+ /* MDP1-2 */
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY0, "mdp1_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP1_IMG_DL_RELAY1, "mdp1_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp1_mcd = {
+ .clks = mdp1_clks,
+ .num_clks = ARRAY_SIZE(mdp1_clks),
+ .need_runtime_pm = true,
+};
+
+
+static const struct mtk_gate mdp_clks[] = {
+ /* MDP0 */
+ GATE_MDP0(CLK_MDP_MDP_MUTEX0, "mdp_mdp_mutex0", "mdp", 0),
+ GATE_MDP0(CLK_MDP_SMI0, "mdp_smi0", "mdp", 1),
+ GATE_MDP0(CLK_MDP_APB_BUS, "mdp_apb_bus", "mdp", 2),
+ GATE_MDP0(CLK_MDP_MDP_RDMA0, "mdp_mdp_rdma0", "mdp", 3),
+ GATE_MDP0(CLK_MDP_MDP_RDMA1, "mdp_mdp_rdma1", "mdp", 4),
+ GATE_MDP0(CLK_MDP_MDP_RDMA2, "mdp_mdp_rdma2", "mdp", 5),
+ GATE_MDP0(CLK_MDP_MDP_BIRSZ0, "mdp_mdp_birsz0", "mdp", 6),
+ GATE_MDP0(CLK_MDP_MDP_HDR0, "mdp_mdp_hdr0", "mdp", 7),
+ GATE_MDP0(CLK_MDP_MDP_AAL0, "mdp_mdp_aal0", "mdp", 8),
+ GATE_MDP0(CLK_MDP_MDP_RSZ0, "mdp_mdp_rsz0", "mdp", 9),
+ GATE_MDP0(CLK_MDP_MDP_RSZ2, "mdp_mdp_rsz2", "mdp", 10),
+ GATE_MDP0(CLK_MDP_MDP_TDSHP0, "mdp_mdp_tdshp0", "mdp", 11),
+ GATE_MDP0(CLK_MDP_MDP_COLOR0, "mdp_mdp_color0", "mdp", 12),
+ GATE_MDP0(CLK_MDP_MDP_WROT0, "mdp_mdp_wrot0", "mdp", 13),
+ GATE_MDP0(CLK_MDP_MDP_WROT1, "mdp_mdp_wrot1", "mdp", 14),
+ GATE_MDP0(CLK_MDP_MDP_WROT2, "mdp_mdp_wrot2", "mdp", 15),
+ GATE_MDP0(CLK_MDP_MDP_FAKE_ENG0, "mdp_mdp_fake_eng0", "mdp", 16),
+ GATE_MDP0(CLK_MDP_APB_DB, "mdp_apb_db", "mdp", 17),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC0, "mdp_mdp_dli_async0", "mdp", 18),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC1, "mdp_mdp_dli_async1", "mdp", 19),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC0, "mdp_mdp_dlo_async0", "mdp", 20),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC1, "mdp_mdp_dlo_async1", "mdp", 21),
+ GATE_MDP0(CLK_MDP_MDP_DLI_ASYNC2, "mdp_mdp_dli_async2", "mdp", 22),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC2, "mdp_mdp_dlo_async2", "mdp", 23),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC3, "mdp_mdp_dlo_async3", "mdp", 24),
+ GATE_MDP0(CLK_MDP_IMG_DL_ASYNC0, "mdp_img_dl_async0", "mdp", 25),
+ GATE_MDP0(CLK_MDP_MDP_RROT0, "mdp_mdp_rrot0", "mdp", 26),
+ GATE_MDP0(CLK_MDP_MDP_MERGE0, "mdp_mdp_merge0", "mdp", 27),
+ GATE_MDP0(CLK_MDP_MDP_C3D0, "mdp_mdp_c3d0", "mdp", 28),
+ GATE_MDP0(CLK_MDP_MDP_FG0, "mdp_mdp_fg0", "mdp", 29),
+ GATE_MDP0(CLK_MDP_MDP_CLA2, "mdp_mdp_cla2", "mdp", 30),
+ GATE_MDP0(CLK_MDP_MDP_DLO_ASYNC4, "mdp_mdp_dlo_async4", "mdp", 31),
+ /* MDP1 */
+ GATE_MDP1(CLK_MDP_VPP_RSZ0, "mdp_vpp_rsz0", "mdp", 0),
+ GATE_MDP1(CLK_MDP_VPP_RSZ1, "mdp_vpp_rsz1", "mdp", 1),
+ GATE_MDP1(CLK_MDP_MDP_DLO_ASYNC5, "mdp_mdp_dlo_async5", "mdp", 2),
+ GATE_MDP1(CLK_MDP_IMG0, "mdp_img0", "mdp", 3),
+ GATE_MDP1(CLK_MDP_F26M, "mdp_f26m", "clk26m", 27),
+ /* MDP2 */
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY0, "mdp_img_dl_relay0", "mdp", 0),
+ GATE_MDP2(CLK_MDP_IMG_DL_RELAY1, "mdp_img_dl_relay1", "mdp", 8),
+};
+
+static const struct mtk_clk_desc mdp_mcd = {
+ .clks = mdp_clks,
+ .num_clks = ARRAY_SIZE(mdp_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_mdpsys[] = {
+ { .compatible = "mediatek,mt8196-mdpsys0", .data = &mdp_mcd },
+ { .compatible = "mediatek,mt8196-mdpsys1", .data = &mdp1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mdpsys);
+
+static struct platform_driver clk_mt8196_mdpsys_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-mdpsys",
+ .of_match_table = of_match_clk_mt8196_mdpsys,
+ },
+};
+module_platform_driver(clk_mt8196_mdpsys_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Multimedia Data Path clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-mfg.c b/drivers/clk/mediatek/clk-mt8196-mfg.c
new file mode 100644
index 000000000000..ae1eb9de79ae
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-mfg.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-pll.h"
+
+#define MFGPLL_CON0 0x008
+#define MFGPLL_CON1 0x00c
+#define MFGPLL_CON2 0x010
+#define MFGPLL_CON3 0x014
+#define MFGPLL_SC0_CON0 0x008
+#define MFGPLL_SC0_CON1 0x00c
+#define MFGPLL_SC0_CON2 0x010
+#define MFGPLL_SC0_CON3 0x014
+#define MFGPLL_SC1_CON0 0x008
+#define MFGPLL_SC1_CON1 0x00c
+#define MFGPLL_SC1_CON2 0x010
+#define MFGPLL_SC1_CON3 0x014
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _en_reg, _en_mask, _pll_en_bit, \
+ _flags, _rst_bar_mask, \
+ _pd_reg, _pd_shift, _tuner_reg, \
+ _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcwbits) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .en_reg = _en_reg, \
+ .en_mask = _en_mask, \
+ .pll_en_bit = _pll_en_bit, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ }
+
+static const struct mtk_pll_data mfg_ao_plls[] = {
+ PLL(CLK_MFG_AO_MFGPLL, "mfgpll", MFGPLL_CON0, MFGPLL_CON0, 0, 0, 0,
+ BIT(0), MFGPLL_CON1, 24, 0, 0, 0,
+ MFGPLL_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc0_ao_plls[] = {
+ PLL(CLK_MFGSC0_AO_MFGPLL_SC0, "mfgpll-sc0", MFGPLL_SC0_CON0,
+ MFGPLL_SC0_CON0, 0, 0, 0, BIT(0), MFGPLL_SC0_CON1, 24, 0, 0, 0,
+ MFGPLL_SC0_CON1, 0, 22),
+};
+
+static const struct mtk_pll_data mfgsc1_ao_plls[] = {
+ PLL(CLK_MFGSC1_AO_MFGPLL_SC1, "mfgpll-sc1", MFGPLL_SC1_CON0,
+ MFGPLL_SC1_CON0, 0, 0, 0, BIT(0), MFGPLL_SC1_CON1, 24, 0, 0, 0,
+ MFGPLL_SC1_CON1, 0, 22),
+};
+
+static const struct of_device_id of_match_clk_mt8196_mfg[] = {
+ { .compatible = "mediatek,mt8196-mfgpll-pll-ctrl",
+ .data = &mfg_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc0-pll-ctrl",
+ .data = &mfgsc0_ao_plls },
+ { .compatible = "mediatek,mt8196-mfgpll-sc1-pll-ctrl",
+ .data = &mfgsc1_ao_plls },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_mfg);
+
+static int clk_mt8196_mfg_probe(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls;
+ struct clk_hw_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ const int num_plls = 1;
+ int r;
+
+ plls = of_device_get_match_data(&pdev->dev);
+ if (!plls)
+ return -EINVAL;
+
+ clk_data = mtk_alloc_clk_data(num_plls);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_plls(node, plls, num_plls, clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(plls, num_plls, clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_mfg_remove(struct platform_device *pdev)
+{
+ const struct mtk_pll_data *plls = of_device_get_match_data(&pdev->dev);
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(plls, 1, clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static struct platform_driver clk_mt8196_mfg_drv = {
+ .probe = clk_mt8196_mfg_probe,
+ .remove = clk_mt8196_mfg_remove,
+ .driver = {
+ .name = "clk-mt8196-mfg",
+ .of_match_table = of_match_clk_mt8196_mfg,
+ },
+};
+module_platform_driver(clk_mt8196_mfg_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 GPU mfg clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl0.c b/drivers/clk/mediatek/clk-mt8196-ovl0.c
new file mode 100644
index 000000000000..d4affd14d2c4
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl0.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl0_hwv_regs = {
+ .set_ofs = 0x0060,
+ .clr_ofs = 0x0064,
+ .sta_ofs = 0x2c30,
+};
+
+static const struct mtk_gate_regs ovl1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl1_hwv_regs = {
+ .set_ofs = 0x0068,
+ .clr_ofs = 0x006c,
+ .sta_ofs = 0x2c34,
+};
+
+#define GATE_HWV_OVL0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl0_cg_regs, \
+ .hwv_regs = &ovl0_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl1_cg_regs, \
+ .hwv_regs = &ovl1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl_clks[] = {
+ /* OVL0 */
+ GATE_HWV_OVL0(CLK_OVLSYS_CONFIG, "ovlsys_config", "disp", 0),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG0, "ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL0(CLK_OVL_FAKE_ENG1, "ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL0(CLK_OVL_MUTEX0, "ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA0, "ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA1, "ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA2, "ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA3, "ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA4, "ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA5, "ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA6, "ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA7, "ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA8, "ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL0(CLK_OVL_EXDMA9, "ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER0, "ovl_blender0", "disp", 14),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER1, "ovl_blender1", "disp", 15),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER2, "ovl_blender2", "disp", 16),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER3, "ovl_blender3", "disp", 17),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER4, "ovl_blender4", "disp", 18),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER5, "ovl_blender5", "disp", 19),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER6, "ovl_blender6", "disp", 20),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER7, "ovl_blender7", "disp", 21),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER8, "ovl_blender8", "disp", 22),
+ GATE_HWV_OVL0(CLK_OVL_BLENDER9, "ovl_blender9", "disp", 23),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC0, "ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC1, "ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC2, "ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC3, "ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC4, "ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL0(CLK_OVL_OUTPROC5, "ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ0, "ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL0(CLK_OVL_MDP_RSZ1, "ovl_mdp_rsz1", "disp", 31),
+ /* OVL1 */
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA0, "ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL1(CLK_OVL_DISP_WDMA1, "ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL1(CLK_OVL_UFBC_WDMA0, "ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA0, "ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL1(CLK_OVL_MDP_RDMA1, "ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL1(CLK_OVL_BWM0, "ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL1(CLK_OVL_DLI0, "ovl_dli0", "disp", 6),
+ GATE_HWV_OVL1(CLK_OVL_DLI1, "ovl_dli1", "disp", 7),
+ GATE_HWV_OVL1(CLK_OVL_DLI2, "ovl_dli2", "disp", 8),
+ GATE_HWV_OVL1(CLK_OVL_DLI3, "ovl_dli3", "disp", 9),
+ GATE_HWV_OVL1(CLK_OVL_DLI4, "ovl_dli4", "disp", 10),
+ GATE_HWV_OVL1(CLK_OVL_DLI5, "ovl_dli5", "disp", 11),
+ GATE_HWV_OVL1(CLK_OVL_DLI6, "ovl_dli6", "disp", 12),
+ GATE_HWV_OVL1(CLK_OVL_DLI7, "ovl_dli7", "disp", 13),
+ GATE_HWV_OVL1(CLK_OVL_DLI8, "ovl_dli8", "disp", 14),
+ GATE_HWV_OVL1(CLK_OVL_DLO0, "ovl_dlo0", "disp", 15),
+ GATE_HWV_OVL1(CLK_OVL_DLO1, "ovl_dlo1", "disp", 16),
+ GATE_HWV_OVL1(CLK_OVL_DLO2, "ovl_dlo2", "disp", 17),
+ GATE_HWV_OVL1(CLK_OVL_DLO3, "ovl_dlo3", "disp", 18),
+ GATE_HWV_OVL1(CLK_OVL_DLO4, "ovl_dlo4", "disp", 19),
+ GATE_HWV_OVL1(CLK_OVL_DLO5, "ovl_dlo5", "disp", 20),
+ GATE_HWV_OVL1(CLK_OVL_DLO6, "ovl_dlo6", "disp", 21),
+ GATE_HWV_OVL1(CLK_OVL_DLO7, "ovl_dlo7", "disp", 22),
+ GATE_HWV_OVL1(CLK_OVL_DLO8, "ovl_dlo8", "disp", 23),
+ GATE_HWV_OVL1(CLK_OVL_DLO9, "ovl_dlo9", "disp", 24),
+ GATE_HWV_OVL1(CLK_OVL_DLO10, "ovl_dlo10", "disp", 25),
+ GATE_HWV_OVL1(CLK_OVL_DLO11, "ovl_dlo11", "disp", 26),
+ GATE_HWV_OVL1(CLK_OVL_DLO12, "ovl_dlo12", "disp", 27),
+ GATE_HWV_OVL1(CLK_OVLSYS_RELAY0, "ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL1(CLK_OVL_INLINEROT0, "ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL1(CLK_OVL_SMI, "ovl_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl_mcd = {
+ .clks = ovl_clks,
+ .num_clks = ARRAY_SIZE(ovl_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl0_id_table[] = {
+ { .name = "clk-mt8196-ovl0", .driver_data = (kernel_ulong_t)&ovl_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl0_id_table);
+
+static struct platform_driver clk_mt8196_ovl0_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl0",
+ },
+ .id_table = clk_mt8196_ovl0_id_table,
+};
+module_platform_driver(clk_mt8196_ovl0_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl0 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ovl1.c b/drivers/clk/mediatek/clk-mt8196-ovl1.c
new file mode 100644
index 000000000000..c8843d0d3ede
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ovl1.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ovl10_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs ovl10_hwv_regs = {
+ .set_ofs = 0x0050,
+ .clr_ofs = 0x0054,
+ .sta_ofs = 0x2c28,
+};
+
+static const struct mtk_gate_regs ovl11_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs ovl11_hwv_regs = {
+ .set_ofs = 0x0058,
+ .clr_ofs = 0x005c,
+ .sta_ofs = 0x2c2c,
+};
+
+#define GATE_HWV_OVL10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl10_cg_regs, \
+ .hwv_regs = &ovl10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_OVL11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ovl11_cg_regs, \
+ .hwv_regs = &ovl11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ovl1_clks[] = {
+ /* OVL10 */
+ GATE_HWV_OVL10(CLK_OVL1_OVLSYS_CONFIG, "ovl1_ovlsys_config", "disp", 0),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG0, "ovl1_ovl_fake_eng0", "disp", 1),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_FAKE_ENG1, "ovl1_ovl_fake_eng1", "disp", 2),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MUTEX0, "ovl1_ovl_mutex0", "disp", 3),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA0, "ovl1_ovl_exdma0", "disp", 4),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA1, "ovl1_ovl_exdma1", "disp", 5),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA2, "ovl1_ovl_exdma2", "disp", 6),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA3, "ovl1_ovl_exdma3", "disp", 7),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA4, "ovl1_ovl_exdma4", "disp", 8),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA5, "ovl1_ovl_exdma5", "disp", 9),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA6, "ovl1_ovl_exdma6", "disp", 10),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA7, "ovl1_ovl_exdma7", "disp", 11),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA8, "ovl1_ovl_exdma8", "disp", 12),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_EXDMA9, "ovl1_ovl_exdma9", "disp", 13),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER0, "ovl1_ovl_blender0", "disp", 14),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER1, "ovl1_ovl_blender1", "disp", 15),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER2, "ovl1_ovl_blender2", "disp", 16),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER3, "ovl1_ovl_blender3", "disp", 17),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER4, "ovl1_ovl_blender4", "disp", 18),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER5, "ovl1_ovl_blender5", "disp", 19),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER6, "ovl1_ovl_blender6", "disp", 20),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER7, "ovl1_ovl_blender7", "disp", 21),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER8, "ovl1_ovl_blender8", "disp", 22),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_BLENDER9, "ovl1_ovl_blender9", "disp", 23),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC0, "ovl1_ovl_outproc0", "disp", 24),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC1, "ovl1_ovl_outproc1", "disp", 25),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC2, "ovl1_ovl_outproc2", "disp", 26),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC3, "ovl1_ovl_outproc3", "disp", 27),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC4, "ovl1_ovl_outproc4", "disp", 28),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_OUTPROC5, "ovl1_ovl_outproc5", "disp", 29),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ0, "ovl1_ovl_mdp_rsz0", "disp", 30),
+ GATE_HWV_OVL10(CLK_OVL1_OVL_MDP_RSZ1, "ovl1_ovl_mdp_rsz1", "disp", 31),
+ /* OVL11 */
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA0, "ovl1_ovl_disp_wdma0", "disp", 0),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_DISP_WDMA1, "ovl1_ovl_disp_wdma1", "disp", 1),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_UFBC_WDMA0, "ovl1_ovl_ufbc_wdma0", "disp", 2),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA0, "ovl1_ovl_mdp_rdma0", "disp", 3),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_MDP_RDMA1, "ovl1_ovl_mdp_rdma1", "disp", 4),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_BWM0, "ovl1_ovl_bwm0", "disp", 5),
+ GATE_HWV_OVL11(CLK_OVL1_DLI0, "ovl1_dli0", "disp", 6),
+ GATE_HWV_OVL11(CLK_OVL1_DLI1, "ovl1_dli1", "disp", 7),
+ GATE_HWV_OVL11(CLK_OVL1_DLI2, "ovl1_dli2", "disp", 8),
+ GATE_HWV_OVL11(CLK_OVL1_DLI3, "ovl1_dli3", "disp", 9),
+ GATE_HWV_OVL11(CLK_OVL1_DLI4, "ovl1_dli4", "disp", 10),
+ GATE_HWV_OVL11(CLK_OVL1_DLI5, "ovl1_dli5", "disp", 11),
+ GATE_HWV_OVL11(CLK_OVL1_DLI6, "ovl1_dli6", "disp", 12),
+ GATE_HWV_OVL11(CLK_OVL1_DLI7, "ovl1_dli7", "disp", 13),
+ GATE_HWV_OVL11(CLK_OVL1_DLI8, "ovl1_dli8", "disp", 14),
+ GATE_HWV_OVL11(CLK_OVL1_DLO0, "ovl1_dlo0", "disp", 15),
+ GATE_HWV_OVL11(CLK_OVL1_DLO1, "ovl1_dlo1", "disp", 16),
+ GATE_HWV_OVL11(CLK_OVL1_DLO2, "ovl1_dlo2", "disp", 17),
+ GATE_HWV_OVL11(CLK_OVL1_DLO3, "ovl1_dlo3", "disp", 18),
+ GATE_HWV_OVL11(CLK_OVL1_DLO4, "ovl1_dlo4", "disp", 19),
+ GATE_HWV_OVL11(CLK_OVL1_DLO5, "ovl1_dlo5", "disp", 20),
+ GATE_HWV_OVL11(CLK_OVL1_DLO6, "ovl1_dlo6", "disp", 21),
+ GATE_HWV_OVL11(CLK_OVL1_DLO7, "ovl1_dlo7", "disp", 22),
+ GATE_HWV_OVL11(CLK_OVL1_DLO8, "ovl1_dlo8", "disp", 23),
+ GATE_HWV_OVL11(CLK_OVL1_DLO9, "ovl1_dlo9", "disp", 24),
+ GATE_HWV_OVL11(CLK_OVL1_DLO10, "ovl1_dlo10", "disp", 25),
+ GATE_HWV_OVL11(CLK_OVL1_DLO11, "ovl1_dlo11", "disp", 26),
+ GATE_HWV_OVL11(CLK_OVL1_DLO12, "ovl1_dlo12", "disp", 27),
+ GATE_HWV_OVL11(CLK_OVL1_OVLSYS_RELAY0, "ovl1_ovlsys_relay0", "disp", 28),
+ GATE_HWV_OVL11(CLK_OVL1_OVL_INLINEROT0, "ovl1_ovl_inlinerot0", "disp", 29),
+ GATE_HWV_OVL11(CLK_OVL1_SMI, "ovl1_smi", "disp", 30),
+};
+
+static const struct mtk_clk_desc ovl1_mcd = {
+ .clks = ovl1_clks,
+ .num_clks = ARRAY_SIZE(ovl1_clks),
+};
+
+static const struct platform_device_id clk_mt8196_ovl1_id_table[] = {
+ { .name = "clk-mt8196-ovl1", .driver_data = (kernel_ulong_t)&ovl1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, clk_mt8196_ovl1_id_table);
+
+static struct platform_driver clk_mt8196_ovl1_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-ovl1",
+ },
+ .id_table = clk_mt8196_ovl1_id_table,
+};
+module_platform_driver(clk_mt8196_ovl1_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 ovl1 clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-peri_ao.c b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
new file mode 100644
index 000000000000..f227a86c5d60
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-peri_ao.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs peri_ao0_cg_regs = {
+ .set_ofs = 0x24,
+ .clr_ofs = 0x28,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs peri_ao1_cg_regs = {
+ .set_ofs = 0x2c,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x14,
+};
+
+static const struct mtk_gate_regs peri_ao1_hwv_regs = {
+ .set_ofs = 0x0008,
+ .clr_ofs = 0x000c,
+ .sta_ofs = 0x2c04,
+};
+
+static const struct mtk_gate_regs peri_ao2_cg_regs = {
+ .set_ofs = 0x34,
+ .clr_ofs = 0x38,
+ .sta_ofs = 0x18,
+};
+
+#define GATE_PERI_AO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_PERI_AO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_HWV_PERI_AO1(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao1_cg_regs, \
+ .hwv_regs = &peri_ao1_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ }
+
+#define GATE_PERI_AO2(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &peri_ao2_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate peri_ao_clks[] = {
+ /* PERI_AO0 */
+ GATE_PERI_AO0(CLK_PERI_AO_UART0_BCLK, "peri_ao_uart0_bclk", "uart", 0),
+ GATE_PERI_AO0(CLK_PERI_AO_UART1_BCLK, "peri_ao_uart1_bclk", "uart", 1),
+ GATE_PERI_AO0(CLK_PERI_AO_UART2_BCLK, "peri_ao_uart2_bclk", "uart", 2),
+ GATE_PERI_AO0(CLK_PERI_AO_UART3_BCLK, "peri_ao_uart3_bclk", "uart", 3),
+ GATE_PERI_AO0(CLK_PERI_AO_UART4_BCLK, "peri_ao_uart4_bclk", "uart", 4),
+ GATE_PERI_AO0(CLK_PERI_AO_UART5_BCLK, "peri_ao_uart5_bclk", "uart", 5),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_HCLK, "peri_ao_pwm_x16w", "p_axi", 12),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_X16W_BCLK, "peri_ao_pwm_x16w_bclk", "pwm", 13),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK0, "peri_ao_pwm_pwm_bclk0", "pwm", 14),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK1, "peri_ao_pwm_pwm_bclk1", "pwm", 15),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK2, "peri_ao_pwm_pwm_bclk2", "pwm", 16),
+ GATE_PERI_AO0(CLK_PERI_AO_PWM_PWM_BCLK3, "peri_ao_pwm_pwm_bclk3", "pwm", 17),
+ /* PERI_AO1 */
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI0_BCLK, "peri_ao_spi0_bclk", "spi0_b", 0),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI1_BCLK, "peri_ao_spi1_bclk", "spi1_b", 2),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI2_BCLK, "peri_ao_spi2_bclk", "spi2_b", 3),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI3_BCLK, "peri_ao_spi3_bclk", "spi3_b", 4),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI4_BCLK, "peri_ao_spi4_bclk", "spi4_b", 5),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI5_BCLK, "peri_ao_spi5_bclk", "spi5_b", 6),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI6_BCLK, "peri_ao_spi6_bclk", "spi6_b", 7),
+ GATE_HWV_PERI_AO1(CLK_PERI_AO_SPI7_BCLK, "peri_ao_spi7_bclk", "spi7_b", 8),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_FLASH, "peri_ao_flashif_flash", "peri_ao_flashif_27m",
+ 18),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_27M, "peri_ao_flashif_27m", "sflash", 19),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_DRAM, "peri_ao_flashif_dram", "p_axi", 20),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_AXI, "peri_ao_flashif_axi", "peri_ao_flashif_dram", 21),
+ GATE_PERI_AO1(CLK_PERI_AO_FLASHIF_BCLK, "peri_ao_flashif_bclk", "p_axi", 22),
+ GATE_PERI_AO1(CLK_PERI_AO_AP_DMA_X32W_BCLK, "peri_ao_ap_dma_x32w_bclk", "p_axi", 26),
+ /* PERI_AO2 */
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_MSDC_SRC, "peri_ao_msdc1_msdc_src", "msdc30_1", 1),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK, "peri_ao_msdc1", "peri_ao_msdc1_axi", 2),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_AXI, "peri_ao_msdc1_axi", "p_axi", 3),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC1_HCLK_WRAP, "peri_ao_msdc1_h_wrap", "peri_ao_msdc1", 4),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_MSDC_SRC, "peri_ao_msdc2_msdc_src", "msdc30_2", 10),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK, "peri_ao_msdc2", "peri_ao_msdc2_axi", 11),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_AXI, "peri_ao_msdc2_axi", "p_axi", 12),
+ GATE_PERI_AO2(CLK_PERI_AO_MSDC2_HCLK_WRAP, "peri_ao_msdc2_h_wrap", "peri_ao_msdc2", 13),
+};
+
+static const struct mtk_clk_desc peri_ao_mcd = {
+ .clks = peri_ao_clks,
+ .num_clks = ARRAY_SIZE(peri_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_peri_ao[] = {
+ { .compatible = "mediatek,mt8196-pericfg-ao", .data = &peri_ao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_peri_ao);
+
+static struct platform_driver clk_mt8196_peri_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-peri-ao",
+ .of_match_table = of_match_clk_mt8196_peri_ao,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 pericfg_ao clock controller driver");
+module_platform_driver(clk_mt8196_peri_ao_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-pextp.c b/drivers/clk/mediatek/clk-mt8196-pextp.c
new file mode 100644
index 000000000000..3e505ecc4b6e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-pextp.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "reset.h"
+
+#define MT8196_PEXTP_RST0_SET_OFFSET 0x8
+
+static const struct mtk_gate_regs pext_cg_regs = {
+ .set_ofs = 0x18,
+ .clr_ofs = 0x1c,
+ .sta_ofs = 0x14,
+};
+
+#define GATE_PEXT(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &pext_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr,\
+ }
+
+static const struct mtk_gate pext_clks[] = {
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_TL, "pext_pm0_tl", "tl", 0),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_REF, "pext_pm0_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_MCU_BUS, "pext_pp0_mcu_bus", "clk26m", 6),
+ GATE_PEXT(CLK_PEXT_PEXTP_PHY_P0_PEXTP_REF, "pext_pp0_pextp_ref", "clk26m", 7),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AXI_250, "pext_pm0_axi_250", "ufs_pexpt0_mem_sub", 12),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_AHB_APB, "pext_pm0_ahb_apb", "ufs_pextp0_axi", 13),
+ GATE_PEXT(CLK_PEXT_PEXTP_MAC_P0_PL_P, "pext_pm0_pl_p", "clk26m", 14),
+ GATE_PEXT(CLK_PEXT_PEXTP_VLP_AO_P0_LP, "pext_pextp_vlp_ao_p0_lp", "clk26m", 19),
+};
+
+static u16 pext_rst_ofs[] = { MT8196_PEXTP_RST0_SET_OFFSET };
+
+static u16 pext_rst_idx_map[] = {
+ [MT8196_PEXTP0_RST0_PCIE0_MAC] = 0,
+ [MT8196_PEXTP0_RST0_PCIE0_PHY] = 1,
+};
+
+static const struct mtk_clk_rst_desc pext_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext_mcd = {
+ .clks = pext_clks,
+ .num_clks = ARRAY_SIZE(pext_clks),
+ .rst_desc = &pext_rst_desc,
+};
+
+static const struct mtk_gate pext1_clks[] = {
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_TL, "pext1_pm1_tl", "tl_p1", 0),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_REF, "pext1_pm1_ref", "clk26m", 1),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_TL, "pext1_pm2_tl", "tl_p2", 2),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_REF, "pext1_pm2_ref", "clk26m", 3),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_MCU_BUS, "pext1_pp1_mcu_bus", "clk26m", 8),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P1_PEXTP_REF, "pext1_pp1_pextp_ref", "clk26m", 9),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_MCU_BUS, "pext1_pp2_mcu_bus", "clk26m", 10),
+ GATE_PEXT(CLK_PEXT1_PEXTP_PHY_P2_PEXTP_REF, "pext1_pp2_pextp_ref", "clk26m", 11),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AXI_250, "pext1_pm1_axi_250",
+ "pextp1_usb_axi", 16),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_AHB_APB, "pext1_pm1_ahb_apb",
+ "pextp1_usb_mem_sub", 17),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P1_PL_P, "pext1_pm1_pl_p", "clk26m", 18),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AXI_250, "pext1_pm2_axi_250",
+ "pextp1_usb_axi", 19),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_AHB_APB, "pext1_pm2_ahb_apb",
+ "pextp1_usb_mem_sub", 20),
+ GATE_PEXT(CLK_PEXT1_PEXTP_MAC_P2_PL_P, "pext1_pm2_pl_p", "clk26m", 21),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P1_LP, "pext1_pextp_vlp_ao_p1_lp", "clk26m", 26),
+ GATE_PEXT(CLK_PEXT1_PEXTP_VLP_AO_P2_LP, "pext1_pextp_vlp_ao_p2_lp", "clk26m", 27),
+};
+
+static u16 pext1_rst_idx_map[] = {
+ [MT8196_PEXTP1_RST0_PCIE1_MAC] = 0,
+ [MT8196_PEXTP1_RST0_PCIE1_PHY] = 1,
+ [MT8196_PEXTP1_RST0_PCIE2_MAC] = 8,
+ [MT8196_PEXTP1_RST0_PCIE2_PHY] = 9,
+};
+
+static const struct mtk_clk_rst_desc pext1_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = pext_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(pext_rst_ofs),
+ .rst_idx_map = pext1_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(pext1_rst_idx_map),
+};
+
+static const struct mtk_clk_desc pext1_mcd = {
+ .clks = pext1_clks,
+ .num_clks = ARRAY_SIZE(pext1_clks),
+ .rst_desc = &pext1_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_pextp[] = {
+ { .compatible = "mediatek,mt8196-pextp0cfg-ao", .data = &pext_mcd },
+ { .compatible = "mediatek,mt8196-pextp1cfg-ao", .data = &pext1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_pextp);
+
+static struct platform_driver clk_mt8196_pextp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-pextp",
+ .of_match_table = of_match_clk_mt8196_pextp,
+ },
+};
+
+module_platform_driver(clk_mt8196_pextp_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 PCIe transmit phy clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen.c b/drivers/clk/mediatek/clk-mt8196-topckgen.c
new file mode 100644
index 000000000000..6ace11ef6b69
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen.c
@@ -0,0 +1,985 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CLK_CFG_UPDATE 0x0004
+#define CLK_CFG_UPDATE1 0x0008
+#define CLK_CFG_UPDATE2 0x000c
+#define CLK_CFG_0 0x0010
+#define CLK_CFG_0_SET 0x0014
+#define CLK_CFG_0_CLR 0x0018
+#define CLK_CFG_1 0x0020
+#define CLK_CFG_1_SET 0x0024
+#define CLK_CFG_1_CLR 0x0028
+#define CLK_CFG_2 0x0030
+#define CLK_CFG_2_SET 0x0034
+#define CLK_CFG_2_CLR 0x0038
+#define CLK_CFG_3 0x0040
+#define CLK_CFG_3_SET 0x0044
+#define CLK_CFG_3_CLR 0x0048
+#define CLK_CFG_4 0x0050
+#define CLK_CFG_4_SET 0x0054
+#define CLK_CFG_4_CLR 0x0058
+#define CLK_CFG_5 0x0060
+#define CLK_CFG_5_SET 0x0064
+#define CLK_CFG_5_CLR 0x0068
+#define CLK_CFG_6 0x0070
+#define CLK_CFG_6_SET 0x0074
+#define CLK_CFG_6_CLR 0x0078
+#define CLK_CFG_7 0x0080
+#define CLK_CFG_7_SET 0x0084
+#define CLK_CFG_7_CLR 0x0088
+#define CLK_CFG_8 0x0090
+#define CLK_CFG_8_SET 0x0094
+#define CLK_CFG_8_CLR 0x0098
+#define CLK_CFG_9 0x00a0
+#define CLK_CFG_9_SET 0x00a4
+#define CLK_CFG_9_CLR 0x00a8
+#define CLK_CFG_10 0x00b0
+#define CLK_CFG_10_SET 0x00b4
+#define CLK_CFG_10_CLR 0x00b8
+#define CLK_CFG_11 0x00c0
+#define CLK_CFG_11_SET 0x00c4
+#define CLK_CFG_11_CLR 0x00c8
+#define CLK_CFG_12 0x00d0
+#define CLK_CFG_12_SET 0x00d4
+#define CLK_CFG_12_CLR 0x00d8
+#define CLK_CFG_13 0x00e0
+#define CLK_CFG_13_SET 0x00e4
+#define CLK_CFG_13_CLR 0x00e8
+#define CLK_CFG_14 0x00f0
+#define CLK_CFG_14_SET 0x00f4
+#define CLK_CFG_14_CLR 0x00f8
+#define CLK_CFG_15 0x0100
+#define CLK_CFG_15_SET 0x0104
+#define CLK_CFG_15_CLR 0x0108
+#define CLK_CFG_16 0x0110
+#define CLK_CFG_16_SET 0x0114
+#define CLK_CFG_16_CLR 0x0118
+#define CLK_CFG_17 0x0120
+#define CLK_CFG_17_SET 0x0124
+#define CLK_CFG_17_CLR 0x0128
+#define CLK_CFG_18 0x0130
+#define CLK_CFG_18_SET 0x0134
+#define CLK_CFG_18_CLR 0x0138
+#define CLK_CFG_19 0x0140
+#define CLK_CFG_19_SET 0x0144
+#define CLK_CFG_19_CLR 0x0148
+#define CLK_AUDDIV_0 0x020c
+#define CLK_FENC_STATUS_MON_0 0x0270
+#define CLK_FENC_STATUS_MON_1 0x0274
+#define CLK_FENC_STATUS_MON_2 0x0278
+
+/* MUX SHIFT */
+#define TOP_MUX_AXI_SHIFT 0
+#define TOP_MUX_MEM_SUB_SHIFT 1
+#define TOP_MUX_IO_NOC_SHIFT 2
+#define TOP_MUX_PERI_AXI_SHIFT 3
+#define TOP_MUX_UFS_PEXTP0_AXI_SHIFT 4
+#define TOP_MUX_PEXTP1_USB_AXI_SHIFT 5
+#define TOP_MUX_PERI_FMEM_SUB_SHIFT 6
+#define TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT 7
+#define TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT 8
+#define TOP_MUX_PERI_NOC_SHIFT 9
+#define TOP_MUX_EMI_N_SHIFT 10
+#define TOP_MUX_EMI_S_SHIFT 11
+#define TOP_MUX_AP2CONN_HOST_SHIFT 14
+#define TOP_MUX_ATB_SHIFT 15
+#define TOP_MUX_CIRQ_SHIFT 16
+#define TOP_MUX_PBUS_156M_SHIFT 17
+#define TOP_MUX_EFUSE_SHIFT 20
+#define TOP_MUX_MCU_L3GIC_SHIFT 21
+#define TOP_MUX_MCU_INFRA_SHIFT 22
+#define TOP_MUX_DSP_SHIFT 23
+#define TOP_MUX_MFG_REF_SHIFT 24
+#define TOP_MUX_MFG_EB_SHIFT 26
+#define TOP_MUX_UART_SHIFT 27
+#define TOP_MUX_SPI0_BCLK_SHIFT 28
+#define TOP_MUX_SPI1_BCLK_SHIFT 29
+#define TOP_MUX_SPI2_BCLK_SHIFT 30
+#define TOP_MUX_SPI3_BCLK_SHIFT 0
+#define TOP_MUX_SPI4_BCLK_SHIFT 1
+#define TOP_MUX_SPI5_BCLK_SHIFT 2
+#define TOP_MUX_SPI6_BCLK_SHIFT 3
+#define TOP_MUX_SPI7_BCLK_SHIFT 4
+#define TOP_MUX_MSDC30_1_SHIFT 7
+#define TOP_MUX_MSDC30_2_SHIFT 8
+#define TOP_MUX_DISP_PWM_SHIFT 9
+#define TOP_MUX_USB_TOP_1P_SHIFT 10
+#define TOP_MUX_SSUSB_XHCI_1P_SHIFT 11
+#define TOP_MUX_SSUSB_FMCNT_P1_SHIFT 12
+#define TOP_MUX_I2C_PERI_SHIFT 13
+#define TOP_MUX_I2C_EAST_SHIFT 14
+#define TOP_MUX_I2C_WEST_SHIFT 15
+#define TOP_MUX_I2C_NORTH_SHIFT 16
+#define TOP_MUX_AES_UFSFDE_SHIFT 17
+#define TOP_MUX_UFS_SHIFT 18
+#define TOP_MUX_AUD_1_SHIFT 21
+#define TOP_MUX_AUD_2_SHIFT 22
+#define TOP_MUX_ADSP_SHIFT 23
+#define TOP_MUX_ADSP_UARTHUB_B_SHIFT 24
+#define TOP_MUX_DPMAIF_MAIN_SHIFT 25
+#define TOP_MUX_PWM_SHIFT 26
+#define TOP_MUX_MCUPM_SHIFT 27
+#define TOP_MUX_SFLASH_SHIFT 28
+#define TOP_MUX_IPSEAST_SHIFT 29
+#define TOP_MUX_TL_SHIFT 0
+#define TOP_MUX_TL_P1_SHIFT 1
+#define TOP_MUX_TL_P2_SHIFT 2
+#define TOP_MUX_EMI_INTERFACE_546_SHIFT 3
+#define TOP_MUX_SDF_SHIFT 4
+#define TOP_MUX_UARTHUB_BCLK_SHIFT 5
+#define TOP_MUX_DPSW_CMP_26M_SHIFT 6
+#define TOP_MUX_SMAPCK_SHIFT 7
+#define TOP_MUX_SSR_PKA_SHIFT 8
+#define TOP_MUX_SSR_DMA_SHIFT 9
+#define TOP_MUX_SSR_KDF_SHIFT 10
+#define TOP_MUX_SSR_RNG_SHIFT 11
+#define TOP_MUX_SPU0_SHIFT 12
+#define TOP_MUX_SPU1_SHIFT 13
+#define TOP_MUX_DXCC_SHIFT 14
+
+/* CKSTA REG */
+#define CKSTA_REG 0x01c8
+#define CKSTA_REG1 0x01cc
+#define CKSTA_REG2 0x01d0
+
+/* DIVIDER REG */
+#define CLK_AUDDIV_2 0x0214
+#define CLK_AUDDIV_3 0x0220
+#define CLK_AUDDIV_4 0x0224
+#define CLK_AUDDIV_5 0x0228
+
+/* HW Voter REG */
+#define HWV_CG_0_SET 0x0000
+#define HWV_CG_0_CLR 0x0004
+#define HWV_CG_0_DONE 0x2c00
+#define HWV_CG_1_SET 0x0008
+#define HWV_CG_1_CLR 0x000c
+#define HWV_CG_1_DONE 0x2c04
+#define HWV_CG_2_SET 0x0010
+#define HWV_CG_2_CLR 0x0014
+#define HWV_CG_2_DONE 0x2c08
+#define HWV_CG_3_SET 0x0018
+#define HWV_CG_3_CLR 0x001c
+#define HWV_CG_3_DONE 0x2c0c
+#define HWV_CG_4_SET 0x0020
+#define HWV_CG_4_CLR 0x0024
+#define HWV_CG_4_DONE 0x2c10
+#define HWV_CG_5_SET 0x0028
+#define HWV_CG_5_CLR 0x002c
+#define HWV_CG_5_DONE 0x2c14
+#define HWV_CG_6_SET 0x0030
+#define HWV_CG_6_CLR 0x0034
+#define HWV_CG_6_DONE 0x2c18
+#define HWV_CG_7_SET 0x0038
+#define HWV_CG_7_CLR 0x003c
+#define HWV_CG_7_DONE 0x2c1c
+#define HWV_CG_8_SET 0x0040
+#define HWV_CG_8_CLR 0x0044
+#define HWV_CG_8_DONE 0x2c20
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll", 1, 16),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll", 1, 32),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll", 1, 10),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll", 1, 20),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll", 1, 40),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll", 1, 12),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll", 1, 14),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll", 1, 28),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll", 1, 56),
+ FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll", 1, 32),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll", 1, 10),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll", 1, 20),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll", 1, 12),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll", 1, 24),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll", 1, 48),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll", 1, 96),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll", 1, 52),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll", 1, 104),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll", 1, 208),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll", 1, 416),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D10, "univpll_192m_d10", "univpll", 1, 130),
+ FACTOR(CLK_TOP_TVDPLL1_D2, "tvdpll1_d2", "tvdpll1", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_OSC_D2, "osc_d2", "ulposc", 1, 2),
+ FACTOR(CLK_TOP_OSC_D3, "osc_d3", "ulposc", 1, 3),
+ FACTOR(CLK_TOP_OSC_D4, "osc_d4", "ulposc", 1, 4),
+ FACTOR(CLK_TOP_OSC_D5, "osc_d5", "ulposc", 1, 5),
+ FACTOR(CLK_TOP_OSC_D7, "osc_d7", "ulposc", 1, 7),
+ FACTOR(CLK_TOP_OSC_D8, "osc_d8", "ulposc", 1, 8),
+ FACTOR(CLK_TOP_OSC_D10, "osc_d10", "ulposc", 1, 10),
+ FACTOR(CLK_TOP_OSC_D14, "osc_d14", "ulposc", 1, 14),
+ FACTOR(CLK_TOP_OSC_D20, "osc_d20", "ulposc", 1, 20),
+ FACTOR(CLK_TOP_OSC_D32, "osc_d32", "ulposc", 1, 32),
+ FACTOR(CLK_TOP_OSC_D40, "osc_d40", "ulposc", 1, 40),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mem_sub_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d4",
+ "univpll_d4_d4",
+ "osc_d3",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const io_noc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d8",
+ "osc_d4",
+ "mainpll_d6_d2",
+ "mainpll_d9"
+};
+
+static const char * const shared_axi_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "mainpll_d5_d8",
+ "osc_d8",
+ "mainpll_d7_d4",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const shared_sub_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4"
+};
+
+static const char * const p_noc_parents[] = {
+ "clk26m",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "osc_d4",
+ "univpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const emi_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d5_d8",
+ "mainpll_d5_d4",
+ "mainpll_d4_d4",
+ "emipll1_ck"
+};
+
+static const char * const ap2conn_host_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6"
+};
+
+static const char * const cirq_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4"
+};
+
+static const char * const pbus_156m_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "osc_d2",
+ "mainpll_d7"
+};
+
+static const char * const efuse_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const mcu_l3gic_parents[] = {
+ "clk26m",
+ "osc_d8",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const mcu_infra_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d9",
+ "mainpll_d6"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "osc_d5",
+ "osc_d4",
+ "osc_d3",
+ "univpll_d6_d2",
+ "osc_d2",
+ "univpll_d5",
+ "osc"
+};
+
+static const char * const mfg_ref_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const mfg_eb_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const spi_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_192m",
+ "univpll_d6_d2"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "msdcpll_d2"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "osc_d32",
+ "osc_d8",
+ "univpll_d6_d4",
+ "univpll_d5_d4",
+ "osc_d4",
+ "mainpll_d4_d4"
+};
+
+static const char * const usb_1p_parents[] = {
+ "clk26m",
+ "univpll_d5_d4"
+};
+
+static const char * const usb_fmcnt_p1_parents[] = {
+ "clk26m",
+ "univpll_192m_d4"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4",
+ "mainpll_d4_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const aes_ufsfde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d4"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const aud_1_parents[] = {
+ "clk26m",
+ "vlp_apll1"
+};
+
+static const char * const aud_2_parents[] = {
+ "clk26m",
+ "vlp_apll2"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "adsppll"
+};
+
+static const char * const adsp_uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpmaif_main_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "univpll_d4_d8"
+};
+
+static const char * const mcupm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const ipseast_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "mainpll_d5",
+ "mainpll_d4",
+ "mainpll_d3"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2"
+};
+
+static const char * const md_emi_parents[] = {
+ "clk26m",
+ "mainpll_d4"
+};
+
+static const char * const sdf_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4",
+ "univpll_d4"
+};
+
+static const char * const uarthub_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2"
+};
+
+static const char * const dpsw_cmp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const smapparents[] = {
+ "clk26m",
+ "mainpll_d4_d8"
+};
+
+static const char * const ssr_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const ssr_kdf_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7"
+};
+
+static const char * const ssr_rng_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const spu_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2"
+};
+
+static const char * const apll_m_parents[] = {
+ "aud_1",
+ "aud_2"
+};
+
+static const char * const sflash_parents[] = {
+ "clk26m",
+ "mainpll_d7_d8",
+ "univpll_d6_d8"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_CLR_SET_UPD(CLK_TOP_AXI, "axi",
+ axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MEM_SUB, "mem_sub",
+ mem_sub_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_IO_NOC, "io_noc",
+ io_noc_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_IO_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_AXI, "p_axi",
+ shared_axi_parents, CLK_CFG_0, CLK_CFG_0_SET,
+ CLK_CFG_0_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_AXI_SHIFT),
+ /* CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_TOP_UFS_PEXTP0_AXI, "ufs_pextp0_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 0, 3,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXTP0_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_AXI, "pextp1_usb_axi",
+ shared_axi_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 8, 3,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_AXI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_FMEM_SUB, "p_fmem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 16, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_FMEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PEXPT0_MEM_SUB, "ufs_pexpt0_mem_sub",
+ shared_sub_parents, CLK_CFG_1, CLK_CFG_1_SET,
+ CLK_CFG_1_CLR, 24, 4,
+ CLK_CFG_UPDATE, TOP_MUX_UFS_PEXPT0_MEM_SUB_SHIFT),
+ /* CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_TOP_PEXTP1_USB_MEM_SUB, "pextp1_usb_mem_sub",
+ shared_sub_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 0, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PEXTP1_USB_MEM_SUB_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_P_NOC, "p_noc",
+ p_noc_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 8, 4,
+ CLK_CFG_UPDATE, TOP_MUX_PERI_NOC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_N, "emi_n",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_N_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_S, "emi_s",
+ emi_parents, CLK_CFG_2, CLK_CFG_2_SET,
+ CLK_CFG_2_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_EMI_S_SHIFT),
+ /* CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_TOP_AP2CONN_HOST, "ap2conn_host",
+ ap2conn_host_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 16, 1,
+ CLK_CFG_UPDATE, TOP_MUX_AP2CONN_HOST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_ATB, "atb",
+ atb_parents, CLK_CFG_3, CLK_CFG_3_SET,
+ CLK_CFG_3_CLR, 24, 2,
+ CLK_CFG_UPDATE, TOP_MUX_ATB_SHIFT),
+ /* CLK_CFG_4 */
+ MUX_CLR_SET_UPD(CLK_TOP_CIRQ, "cirq",
+ cirq_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 0, 2,
+ CLK_CFG_UPDATE, TOP_MUX_CIRQ_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_PBUS_156M, "pbus_156m",
+ pbus_156m_parents, CLK_CFG_4, CLK_CFG_4_SET,
+ CLK_CFG_4_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_PBUS_156M_SHIFT),
+ /* CLK_CFG_5 */
+ MUX_CLR_SET_UPD(CLK_TOP_EFUSE, "efuse",
+ efuse_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 0, 1,
+ CLK_CFG_UPDATE, TOP_MUX_EFUSE_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCL3GIC, "mcu_l3gic",
+ mcu_l3gic_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 8, 2,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_L3GIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_MCINFRA, "mcu_infra",
+ mcu_infra_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 16, 3,
+ CLK_CFG_UPDATE, TOP_MUX_MCU_INFRA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_DSP, "dsp",
+ dsp_parents, CLK_CFG_5, CLK_CFG_5_SET,
+ CLK_CFG_5_CLR, 24, 3,
+ CLK_CFG_UPDATE, TOP_MUX_DSP_SHIFT),
+ /* CLK_CFG_6 */
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(CLK_TOP_MFG_REF, "mfg_ref", mfg_ref_parents,
+ NULL, ARRAY_SIZE(mfg_ref_parents),
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE, TOP_MUX_MFG_REF_SHIFT,
+ CLK_FENC_STATUS_MON_0, 7, CLK_IGNORE_UNUSED),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_EB, "mfg_eb",
+ mfg_eb_parents, CLK_CFG_6, CLK_CFG_6_SET,
+ CLK_CFG_6_CLR, 16, 2,
+ 23, CLK_CFG_UPDATE, TOP_MUX_MFG_EB_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UART, "uart", uart_parents,
+ CLK_CFG_6, CLK_CFG_6_SET, CLK_CFG_6_CLR,
+ HWV_CG_3_DONE, HWV_CG_3_SET, HWV_CG_3_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE, TOP_MUX_UART_SHIFT,
+ CLK_FENC_STATUS_MON_0, 4),
+ /* CLK_CFG_7 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI0_BCLK, "spi0_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE, TOP_MUX_SPI0_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 3),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI1_BCLK, "spi1_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE, TOP_MUX_SPI1_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 2),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI2_BCLK, "spi2_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE, TOP_MUX_SPI2_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 1),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI3_BCLK, "spi3_b", spi_b_parents,
+ CLK_CFG_7, CLK_CFG_7_SET, CLK_CFG_7_CLR,
+ HWV_CG_4_DONE, HWV_CG_4_SET, HWV_CG_4_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI3_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_0, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI4_BCLK, "spi4_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_SPI4_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI5_BCLK, "spi5_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_SPI5_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI6_BCLK, "spi6_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_SPI6_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_SPI7_BCLK, "spi7_b", spi_b_parents,
+ CLK_CFG_8, CLK_CFG_8_SET, CLK_CFG_8_CLR,
+ HWV_CG_5_DONE, HWV_CG_5_SET, HWV_CG_5_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_SPI7_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_1, 28),
+ /* CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_1, "msdc30_1", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_MSDC30_2, "msdc30_2", msdc30_parents,
+ CLK_CFG_9, CLK_CFG_9_SET, CLK_CFG_9_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_MSDC30_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 24),
+ /* CLK_CFG_10 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DISP_PWM, "disp_pwm", disp_pwm_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_DISP_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 23),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_TOP_1P, "usb_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 22),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_XHCI_1P, "usb_xhci_1p", usb_1p_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_1P_SHIFT,
+ CLK_FENC_STATUS_MON_1, 21),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_USB_FMCNT_P1, "usb_fmcnt_p1", usb_fmcnt_p1_parents,
+ CLK_CFG_10, CLK_CFG_10_SET, CLK_CFG_10_CLR,
+ 24, 1, 31, CLK_CFG_UPDATE1, TOP_MUX_SSUSB_FMCNT_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 20),
+ /* CLK_CFG_11 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_P, "i2c_p", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_I2C_PERI_SHIFT,
+ CLK_FENC_STATUS_MON_1, 19),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_EAST, "i2c_east", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_I2C_EAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 18),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_I2C_WEST, "i2c_west", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ 16, 3, 23, CLK_CFG_UPDATE1, TOP_MUX_I2C_WEST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 17),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_I2C_NORTH, "i2c_north", i2c_parents,
+ CLK_CFG_11, CLK_CFG_11_SET, CLK_CFG_11_CLR,
+ HWV_CG_6_DONE, HWV_CG_6_SET, HWV_CG_6_CLR,
+ 24, 3, 31, CLK_CFG_UPDATE1, TOP_MUX_I2C_NORTH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 16),
+ /* CLK_CFG_12 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AES_UFSFDE, "aes_ufsfde", aes_ufsfde_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_AES_UFSFDE_SHIFT,
+ CLK_FENC_STATUS_MON_1, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_UFS, "ufs", ufs_parents,
+ CLK_CFG_12, CLK_CFG_12_SET, CLK_CFG_12_CLR,
+ 8, 3, 15, CLK_CFG_UPDATE1, TOP_MUX_UFS_SHIFT,
+ CLK_FENC_STATUS_MON_1, 14),
+ /* CLK_CFG_13 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_1, "aud_1", aud_1_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 0, 1, 7, CLK_CFG_UPDATE1, TOP_MUX_AUD_1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 11),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_AUD_2, "aud_2", aud_2_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 8, 1, 15, CLK_CFG_UPDATE1, TOP_MUX_AUD_2_SHIFT,
+ CLK_FENC_STATUS_MON_1, 10),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_ADSP, "adsp", adsp_parents,
+ CLK_CFG_13, CLK_CFG_13_SET, CLK_CFG_13_CLR,
+ 16, 1, 23, CLK_CFG_UPDATE1, TOP_MUX_ADSP_SHIFT,
+ CLK_FENC_STATUS_MON_1, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP_UARTHUB_B, "adsp_uarthub_b",
+ adsp_uarthub_b_parents, CLK_CFG_13, CLK_CFG_13_SET,
+ CLK_CFG_13_CLR, 24, 2, 31,
+ CLK_CFG_UPDATE1, TOP_MUX_ADSP_UARTHUB_B_SHIFT),
+ /* CLK_CFG_14 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_DPMAIF_MAIN, "dpmaif_main", dpmaif_main_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 0, 4, 7, CLK_CFG_UPDATE1, TOP_MUX_DPMAIF_MAIN_SHIFT,
+ CLK_FENC_STATUS_MON_1, 7),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_PWM, "pwm", pwm_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 8, 2, 15, CLK_CFG_UPDATE1, TOP_MUX_PWM_SHIFT,
+ CLK_FENC_STATUS_MON_1, 6),
+ MUX_CLR_SET_UPD(CLK_TOP_MCUPM, "mcupm",
+ mcupm_parents, CLK_CFG_14, CLK_CFG_14_SET,
+ CLK_CFG_14_CLR, 16, 3,
+ CLK_CFG_UPDATE1, TOP_MUX_MCUPM_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_SFLASH, "sflash", sflash_parents,
+ CLK_CFG_14, CLK_CFG_14_SET, CLK_CFG_14_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE1, TOP_MUX_SFLASH_SHIFT,
+ CLK_FENC_STATUS_MON_1, 4),
+ /* CLK_CFG_15 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_IPSEAST, "ipseast", ipseast_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 0, 3, 7, CLK_CFG_UPDATE1, TOP_MUX_IPSEAST_SHIFT,
+ CLK_FENC_STATUS_MON_1, 3),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL, "tl", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 16, 2, 23, CLK_CFG_UPDATE2, TOP_MUX_TL_SHIFT,
+ CLK_FENC_STATUS_MON_1, 1),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P1, "tl_p1", tl_parents,
+ CLK_CFG_15, CLK_CFG_15_SET, CLK_CFG_15_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_TL_P1_SHIFT,
+ CLK_FENC_STATUS_MON_1, 0),
+ /* CLK_CFG_16 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP_TL_P2, "tl_p2", tl_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ 0, 2, 7, CLK_CFG_UPDATE2, TOP_MUX_TL_P2_SHIFT,
+ CLK_FENC_STATUS_MON_2, 31),
+ MUX_CLR_SET_UPD(CLK_TOP_EMI_INTERFACE_546, "emi_interface_546",
+ md_emi_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_EMI_INTERFACE_546_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SDF, "sdf",
+ sdf_parents, CLK_CFG_16, CLK_CFG_16_SET,
+ CLK_CFG_16_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SDF_SHIFT),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP_UARTHUB_BCLK, "uarthub_b", uarthub_b_parents,
+ CLK_CFG_16, CLK_CFG_16_SET, CLK_CFG_16_CLR,
+ HWV_CG_7_DONE, HWV_CG_7_SET, HWV_CG_7_CLR,
+ 24, 2, 31, CLK_CFG_UPDATE2, TOP_MUX_UARTHUB_BCLK_SHIFT,
+ CLK_FENC_STATUS_MON_2, 28),
+ /* CLK_CFG_17 */
+ MUX_CLR_SET_UPD(CLK_TOP_DPSW_CMP_26M, "dpsw_cmp_26m",
+ dpsw_cmp_26m_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 0, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_DPSW_CMP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SMAP, "smap",
+ smapparents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 8, 1,
+ CLK_CFG_UPDATE2, TOP_MUX_SMAPCK_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_PKA, "ssr_pka",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_PKA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_DMA, "ssr_dma",
+ ssr_parents, CLK_CFG_17, CLK_CFG_17_SET,
+ CLK_CFG_17_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_DMA_SHIFT),
+ /* CLK_CFG_18 */
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_KDF, "ssr_kdf",
+ ssr_kdf_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_KDF_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SSR_RNG, "ssr_rng",
+ ssr_rng_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 8, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_SSR_RNG_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU0, "spu0",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 16, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU0_SHIFT),
+ MUX_CLR_SET_UPD(CLK_TOP_SPU1, "spu1",
+ spu_parents, CLK_CFG_18, CLK_CFG_18_SET,
+ CLK_CFG_18_CLR, 24, 3,
+ CLK_CFG_UPDATE2, TOP_MUX_SPU1_SHIFT),
+ /* CLK_CFG_19 */
+ MUX_CLR_SET_UPD(CLK_TOP_DXCC, "dxcc",
+ dxcc_parents, CLK_CFG_19, CLK_CFG_19_SET,
+ CLK_CFG_19_CLR, 0, 2,
+ CLK_CFG_UPDATE2, TOP_MUX_DXCC_SHIFT),
+};
+
+static const struct mtk_composite top_aud_divs[] = {
+ /* CLK_AUDDIV_2 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN0, "apll_i2sin0_m", apll_m_parents,
+ CLK_AUDDIV_0, 16, 1, CLK_AUDDIV_2, 0, 8, CLK_AUDDIV_0, 0),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN1, "apll_i2sin1_m", apll_m_parents,
+ CLK_AUDDIV_0, 17, 1, CLK_AUDDIV_2, 8, 8, CLK_AUDDIV_0, 1),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN2, "apll_i2sin2_m", apll_m_parents,
+ CLK_AUDDIV_0, 18, 1, CLK_AUDDIV_2, 16, 8, CLK_AUDDIV_0, 2),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN3, "apll_i2sin3_m", apll_m_parents,
+ CLK_AUDDIV_0, 19, 1, CLK_AUDDIV_2, 24, 8, CLK_AUDDIV_0, 3),
+ /* CLK_AUDDIV_3 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN4, "apll_i2sin4_m", apll_m_parents,
+ CLK_AUDDIV_0, 20, 1, CLK_AUDDIV_3, 0, 8, CLK_AUDDIV_0, 4),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SIN6, "apll_i2sin6_m", apll_m_parents,
+ CLK_AUDDIV_0, 21, 1, CLK_AUDDIV_3, 8, 8, CLK_AUDDIV_0, 5),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT0, "apll_i2sout0_m", apll_m_parents,
+ CLK_AUDDIV_0, 22, 1, CLK_AUDDIV_3, 16, 8, CLK_AUDDIV_0, 6),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT1, "apll_i2sout1_m", apll_m_parents,
+ CLK_AUDDIV_0, 23, 1, CLK_AUDDIV_3, 24, 8, CLK_AUDDIV_0, 7),
+ /* CLK_AUDDIV_4 */
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT2, "apll_i2sout2_m", apll_m_parents,
+ CLK_AUDDIV_0, 24, 1, CLK_AUDDIV_4, 0, 8, CLK_AUDDIV_0, 8),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT3, "apll_i2sout3_m", apll_m_parents,
+ CLK_AUDDIV_0, 25, 1, CLK_AUDDIV_4, 8, 8, CLK_AUDDIV_0, 9),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT4, "apll_i2sout4_m", apll_m_parents,
+ CLK_AUDDIV_0, 26, 1, CLK_AUDDIV_4, 16, 8, CLK_AUDDIV_0, 10),
+ MUX_DIV_GATE(CLK_TOP_APLL_I2SOUT6, "apll_i2sout6_m", apll_m_parents,
+ CLK_AUDDIV_0, 27, 1, CLK_AUDDIV_4, 24, 8, CLK_AUDDIV_0, 11),
+ /* CLK_AUDDIV_5 */
+ MUX_DIV_GATE(CLK_TOP_APLL_FMI2S, "apll_fmi2s_m", apll_m_parents,
+ CLK_AUDDIV_0, 28, 1, CLK_AUDDIV_5, 0, 8, CLK_AUDDIV_0, 12),
+ MUX(CLK_TOP_APLL_TDMOUT, "apll_tdmout_m",
+ apll_m_parents, CLK_AUDDIV_0, 29, 1),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_M, "apll12_div_tdmout_m",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 13, CLK_AUDDIV_5, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV_TDMOUT_B, "apll12_div_tdmout_b",
+ "apll_tdmout_m", CLK_AUDDIV_0,
+ 14, CLK_AUDDIV_5, 8, 16),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+ .composite_clks = top_aud_divs,
+ .num_composite_clks = ARRAY_SIZE(top_aud_divs)
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-topckgen2.c b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
new file mode 100644
index 000000000000..6df93d7fbf91
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-topckgen2.c
@@ -0,0 +1,568 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+/* MUX SEL REG */
+#define CKSYS2_CLK_CFG_UPDATE 0x0004
+#define CKSYS2_CLK_CFG_0 0x0010
+#define CKSYS2_CLK_CFG_0_SET 0x0014
+#define CKSYS2_CLK_CFG_0_CLR 0x0018
+#define CKSYS2_CLK_CFG_1 0x0020
+#define CKSYS2_CLK_CFG_1_SET 0x0024
+#define CKSYS2_CLK_CFG_1_CLR 0x0028
+#define CKSYS2_CLK_CFG_2 0x0030
+#define CKSYS2_CLK_CFG_2_SET 0x0034
+#define CKSYS2_CLK_CFG_2_CLR 0x0038
+#define CKSYS2_CLK_CFG_3 0x0040
+#define CKSYS2_CLK_CFG_3_SET 0x0044
+#define CKSYS2_CLK_CFG_3_CLR 0x0048
+#define CKSYS2_CLK_CFG_4 0x0050
+#define CKSYS2_CLK_CFG_4_SET 0x0054
+#define CKSYS2_CLK_CFG_4_CLR 0x0058
+#define CKSYS2_CLK_CFG_5 0x0060
+#define CKSYS2_CLK_CFG_5_SET 0x0064
+#define CKSYS2_CLK_CFG_5_CLR 0x0068
+#define CKSYS2_CLK_CFG_6 0x0070
+#define CKSYS2_CLK_CFG_6_SET 0x0074
+#define CKSYS2_CLK_CFG_6_CLR 0x0078
+#define CKSYS2_CLK_FENC_STATUS_MON_0 0x0174
+
+/* MUX SHIFT */
+#define TOP_MUX_SENINF0_SHIFT 0
+#define TOP_MUX_SENINF1_SHIFT 1
+#define TOP_MUX_SENINF2_SHIFT 2
+#define TOP_MUX_SENINF3_SHIFT 3
+#define TOP_MUX_SENINF4_SHIFT 4
+#define TOP_MUX_SENINF5_SHIFT 5
+#define TOP_MUX_IMG1_SHIFT 6
+#define TOP_MUX_IPE_SHIFT 7
+#define TOP_MUX_CAM_SHIFT 8
+#define TOP_MUX_CAMTM_SHIFT 9
+#define TOP_MUX_DPE_SHIFT 10
+#define TOP_MUX_VDEC_SHIFT 11
+#define TOP_MUX_CCUSYS_SHIFT 12
+#define TOP_MUX_CCUTM_SHIFT 13
+#define TOP_MUX_VENC_SHIFT 14
+#define TOP_MUX_DVO_SHIFT 15
+#define TOP_MUX_DVO_FAVT_SHIFT 16
+#define TOP_MUX_DP1_SHIFT 17
+#define TOP_MUX_DP0_SHIFT 18
+#define TOP_MUX_DISP_SHIFT 19
+#define TOP_MUX_MDP_SHIFT 20
+#define TOP_MUX_MMINFRA_SHIFT 21
+#define TOP_MUX_MMINFRA_SNOC_SHIFT 22
+#define TOP_MUX_MMUP_SHIFT 23
+#define TOP_MUX_MMINFRA_AO_SHIFT 26
+
+/* HW Voter REG */
+#define HWV_CG_30_SET 0x0058
+#define HWV_CG_30_CLR 0x005c
+#define HWV_CG_30_DONE 0x2c2c
+
+#define MM_HWV_CG_30_SET 0x00f0
+#define MM_HWV_CG_30_CLR 0x00f4
+#define MM_HWV_CG_30_DONE 0x2c78
+#define MM_HWV_CG_31_SET 0x00f8
+#define MM_HWV_CG_31_CLR 0x00fc
+#define MM_HWV_CG_31_DONE 0x2c7c
+#define MM_HWV_CG_32_SET 0x0100
+#define MM_HWV_CG_32_CLR 0x0104
+#define MM_HWV_CG_32_DONE 0x2c80
+#define MM_HWV_CG_33_SET 0x0108
+#define MM_HWV_CG_33_CLR 0x010c
+#define MM_HWV_CG_33_DONE 0x2c84
+#define MM_HWV_CG_34_SET 0x0110
+#define MM_HWV_CG_34_CLR 0x0114
+#define MM_HWV_CG_34_DONE 0x2c88
+#define MM_HWV_CG_35_SET 0x0118
+#define MM_HWV_CG_35_CLR 0x011c
+#define MM_HWV_CG_35_DONE 0x2c8c
+#define MM_HWV_CG_36_SET 0x0120
+#define MM_HWV_CG_36_CLR 0x0124
+#define MM_HWV_CG_36_DONE 0x2c90
+#define MM_HWV_MUX_UPDATE_31_0 0x0240
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP2_MAINPLL2_D2, "mainpll2_d2", "mainpll2", 1, 2),
+ FACTOR(CLK_TOP2_MAINPLL2_D3, "mainpll2_d3", "mainpll2", 1, 3),
+ FACTOR(CLK_TOP2_MAINPLL2_D4, "mainpll2_d4", "mainpll2", 1, 4),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D2, "mainpll2_d4_d2", "mainpll2", 1, 8),
+ FACTOR(CLK_TOP2_MAINPLL2_D4_D4, "mainpll2_d4_d4", "mainpll2", 1, 16),
+ FACTOR(CLK_TOP2_MAINPLL2_D5, "mainpll2_d5", "mainpll2", 1, 5),
+ FACTOR(CLK_TOP2_MAINPLL2_D5_D2, "mainpll2_d5_d2", "mainpll2", 1, 10),
+ FACTOR(CLK_TOP2_MAINPLL2_D6, "mainpll2_d6", "mainpll2", 1, 6),
+ FACTOR(CLK_TOP2_MAINPLL2_D6_D2, "mainpll2_d6_d2", "mainpll2", 1, 12),
+ FACTOR(CLK_TOP2_MAINPLL2_D7, "mainpll2_d7", "mainpll2", 1, 7),
+ FACTOR(CLK_TOP2_MAINPLL2_D7_D2, "mainpll2_d7_d2", "mainpll2", 1, 14),
+ FACTOR(CLK_TOP2_MAINPLL2_D9, "mainpll2_d9", "mainpll2", 1, 9),
+ FACTOR(CLK_TOP2_UNIVPLL2_D3, "univpll2_d3", "univpll2", 1, 3),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4, "univpll2_d4", "univpll2", 1, 4),
+ FACTOR(CLK_TOP2_UNIVPLL2_D4_D2, "univpll2_d4_d2", "univpll2", 1, 8),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5, "univpll2_d5", "univpll2", 1, 5),
+ FACTOR(CLK_TOP2_UNIVPLL2_D5_D2, "univpll2_d5_d2", "univpll2", 1, 10),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6, "univpll2_d6", "univpll2", 1, 6),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D2, "univpll2_d6_d2", "univpll2", 1, 12),
+ FACTOR(CLK_TOP2_UNIVPLL2_D6_D4, "univpll2_d6_d4", "univpll2", 1, 24),
+ FACTOR(CLK_TOP2_UNIVPLL2_D7, "univpll2_d7", "univpll2", 1, 7),
+ FACTOR(CLK_TOP2_IMGPLL_D2, "imgpll_d2", "imgpll", 1, 2),
+ FACTOR(CLK_TOP2_IMGPLL_D4, "imgpll_d4", "imgpll", 1, 4),
+ FACTOR(CLK_TOP2_IMGPLL_D5, "imgpll_d5", "imgpll", 1, 5),
+ FACTOR(CLK_TOP2_IMGPLL_D5_D2, "imgpll_d5_d2", "imgpll", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D3, "mmpll2_d3", "mmpll2", 1, 3),
+ FACTOR(CLK_TOP2_MMPLL2_D4, "mmpll2_d4", "mmpll2", 1, 4),
+ FACTOR(CLK_TOP2_MMPLL2_D4_D2, "mmpll2_d4_d2", "mmpll2", 1, 8),
+ FACTOR(CLK_TOP2_MMPLL2_D5, "mmpll2_d5", "mmpll2", 1, 5),
+ FACTOR(CLK_TOP2_MMPLL2_D5_D2, "mmpll2_d5_d2", "mmpll2", 1, 10),
+ FACTOR(CLK_TOP2_MMPLL2_D6, "mmpll2_d6", "mmpll2", 1, 6),
+ FACTOR(CLK_TOP2_MMPLL2_D6_D2, "mmpll2_d6_d2", "mmpll2", 1, 12),
+ FACTOR(CLK_TOP2_MMPLL2_D7, "mmpll2_d7", "mmpll2", 1, 7),
+ FACTOR(CLK_TOP2_MMPLL2_D9, "mmpll2_d9", "mmpll2", 1, 9),
+ FACTOR(CLK_TOP2_TVDPLL1_D4, "tvdpll1_d4", "tvdpll1", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL1_D8, "tvdpll1_d8", "tvdpll1", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL1_D16, "tvdpll1_d16", "tvdpll1", 1, 16),
+ FACTOR(CLK_TOP2_TVDPLL2_D2, "tvdpll2_d2", "tvdpll2", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL2_D4, "tvdpll2_d4", "tvdpll2", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL2_D8, "tvdpll2_d8", "tvdpll2", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL2_D16, "tvdpll2_d16", "tvdpll2", 92, 1473),
+ FACTOR(CLK_TOP2_TVDPLL3_D2, "tvdpll3_d2", "tvdpll3", 1, 2),
+ FACTOR(CLK_TOP2_TVDPLL3_D4, "tvdpll3_d4", "tvdpll3", 1, 4),
+ FACTOR(CLK_TOP2_TVDPLL3_D8, "tvdpll3_d8", "tvdpll3", 1, 8),
+ FACTOR(CLK_TOP2_TVDPLL3_D16, "tvdpll3_d16", "tvdpll3", 92, 1473),
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d8",
+ "ck_osc_d5",
+ "ck_osc_d4",
+ "univpll2_d6_d2",
+ "mainpll2_d9",
+ "ck_osc_d2",
+ "mainpll2_d4_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "univpll2_d7",
+ "mainpll2_d6",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "univpll2_d5"
+};
+
+static const char * const img1_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "mmpll2_d6_d2",
+ "ck_osc_d2",
+ "imgpll_d5_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "imgpll_d5",
+ "ck_mainpll_d4",
+ "mmpll2_d5",
+ "imgpll_d4"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "ck_osc_d10",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "univpll2_d7",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "univpll2_d4",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const dpe_parents[] = {
+ "clk26m",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mmpll2_d5",
+ "imgpll_d4",
+ "mmpll2_d4"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d4_d4",
+ "mainpll2_d7_d2",
+ "mainpll2_d6_d2",
+ "mainpll2_d5_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mainpll2_d4",
+ "imgpll_d2"
+};
+
+static const char * const ccusys_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "ck_osc_d2",
+ "mmpll2_d5_d2",
+ "univpll2_d4_d2",
+ "mmpll2_d7",
+ "univpll2_d6",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const ccutm_parents[] = {
+ "clk26m",
+ "univpll2_d6_d4",
+ "ck_osc_d4",
+ "ck_osc_d3",
+ "univpll2_d6_d2"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mainpll2_d5_d2",
+ "univpll2_d5_d2",
+ "mainpll2_d4_d2",
+ "mmpll2_d9",
+ "univpll2_d4_d2",
+ "mmpll2_d4_d2",
+ "mainpll2_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "univpll2_d3"
+};
+
+static const char * const dp1_parents[] = {
+ "clk26m",
+ "tvdpll2_d16",
+ "tvdpll2_d8",
+ "tvdpll2_d4",
+ "tvdpll2_d2"
+};
+
+static const char * const dp0_parents[] = {
+ "clk26m",
+ "tvdpll1_d16",
+ "tvdpll1_d8",
+ "tvdpll1_d4",
+ "ck_tvdpll1_d2"
+};
+
+static const char * const disp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d4_d2",
+ "ck_mainpll_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mdp_parents[] = {
+ "clk26m",
+ "ck_mainpll_d5_d2",
+ "mainpll2_d5_d2",
+ "mmpll2_d6_d2",
+ "mainpll2_d9",
+ "mainpll2_d4_d2",
+ "mainpll2_d7",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d5_d2",
+ "ck_mainpll_d9",
+ "mmpll2_d6_d2",
+ "mainpll2_d4_d2",
+ "ck_mainpll_d6",
+ "univpll2_d6",
+ "mainpll2_d5",
+ "mmpll2_d6",
+ "univpll2_d5",
+ "mainpll2_d4",
+ "univpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3"
+};
+
+static const char * const mminfra_snoc_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d7_d2",
+ "ck_mainpll_d9",
+ "ck_mainpll_d7",
+ "ck_mainpll_d6",
+ "mmpll2_d4_d2",
+ "ck_mainpll_d5",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mmpll2_d4",
+ "mainpll2_d3",
+ "univpll2_d3",
+ "mmpll2_d3",
+ "mainpll2_d2"
+};
+
+static const char * const mmup_parents[] = {
+ "clk26m",
+ "mainpll2_d6",
+ "mainpll2_d5",
+ "ck_osc_d2",
+ "ck_osc",
+ "ck_mainpll_d4",
+ "univpll2_d4",
+ "mainpll2_d3"
+};
+
+static const char * const mminfra_ao_parents[] = {
+ "clk26m",
+ "ck_osc_d4",
+ "ck_mainpll_d3"
+};
+
+static const char * const dvo_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "tvdpll3_d2"
+};
+
+static const char * const dvo_favt_parents[] = {
+ "clk26m",
+ "tvdpll3_d16",
+ "tvdpll3_d8",
+ "tvdpll3_d4",
+ "vlp_apll1",
+ "vlp_apll2",
+ "tvdpll3_d2"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CKSYS2_CLK_CFG_0 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF0, "seninf0", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 31),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF1, "seninf1", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 30),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF2, "seninf2", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF2_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 29),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF3, "seninf3", seninf_parents,
+ CKSYS2_CLK_CFG_0, CKSYS2_CLK_CFG_0_SET, CKSYS2_CLK_CFG_0_CLR,
+ MM_HWV_CG_30_DONE, MM_HWV_CG_30_SET, MM_HWV_CG_30_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF3_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 28),
+ /* CKSYS2_CLK_CFG_1 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF4, "seninf4", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF4_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 27),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_SENINF5, "seninf5", seninf_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_SENINF5_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 26),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IMG1, "img1", img1_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IMG1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 25),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_IPE, "ipe", ipe_parents,
+ CKSYS2_CLK_CFG_1, CKSYS2_CLK_CFG_1_SET, CKSYS2_CLK_CFG_1_CLR,
+ MM_HWV_CG_31_DONE, MM_HWV_CG_31_SET, MM_HWV_CG_31_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_IPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 24),
+ /* CKSYS2_CLK_CFG_2 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAM, "cam", cam_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 23),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CAMTM, "camtm", camtm_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CAMTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 22),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DPE, "dpe", dpe_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DPE_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 21),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VDEC, "vdec", vdec_parents,
+ CKSYS2_CLK_CFG_2, CKSYS2_CLK_CFG_2_SET, CKSYS2_CLK_CFG_2_CLR,
+ MM_HWV_CG_32_DONE, MM_HWV_CG_32_SET, MM_HWV_CG_32_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VDEC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 20),
+ /* CKSYS2_CLK_CFG_3 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUSYS, "ccusys", ccusys_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUSYS_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 19),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_CCUTM, "ccutm", ccutm_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_CCUTM_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 18),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_VENC, "venc", venc_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ MM_HWV_CG_33_DONE, MM_HWV_CG_33_SET, MM_HWV_CG_33_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_VENC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 17),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO, "dvo", dvo_parents,
+ CKSYS2_CLK_CFG_3, CKSYS2_CLK_CFG_3_SET, CKSYS2_CLK_CFG_3_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 16),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DVO_FAVT, "dvo_favt", dvo_favt_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 0, 3, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DVO_FAVT_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 15),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP1, "dp1", dp1_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 8, 3, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP1_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 14),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_DP0, "dp0", dp0_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ 16, 3, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DP0_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_DISP, "disp", disp_parents,
+ CKSYS2_CLK_CFG_4, CKSYS2_CLK_CFG_4_SET, CKSYS2_CLK_CFG_4_CLR,
+ MM_HWV_CG_34_DONE, MM_HWV_CG_34_SET, MM_HWV_CG_34_CLR,
+ 24, 4, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_DISP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 12),
+ /* CKSYS2_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MDP, "mdp", mdp_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 0, 4, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MDP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA, "mminfra", mminfra_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 8, 4, 15, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_SNOC, "mminfra_snoc", mminfra_snoc_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ MM_HWV_CG_35_DONE, MM_HWV_CG_35_SET, MM_HWV_CG_35_CLR,
+ 16, 4, 23, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_SNOC_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 9),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_TOP2_MMUP, "mmup", mmup_parents,
+ CKSYS2_CLK_CFG_5, CKSYS2_CLK_CFG_5_SET, CKSYS2_CLK_CFG_5_CLR,
+ 24, 3, 31, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMUP_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 8),
+ /* CKSYS2_CLK_CFG_6 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_TOP2_MMINFRA_AO, "mminfra_ao", mminfra_ao_parents,
+ CKSYS2_CLK_CFG_6, CKSYS2_CLK_CFG_6_SET, CKSYS2_CLK_CFG_6_CLR,
+ MM_HWV_CG_36_DONE, MM_HWV_CG_36_SET, MM_HWV_CG_36_CLR,
+ 16, 2, 7, CKSYS2_CLK_CFG_UPDATE, TOP_MUX_MMINFRA_AO_SHIFT,
+ CKSYS2_CLK_FENC_STATUS_MON_0, 5),
+};
+
+static const struct mtk_clk_desc topck_desc = {
+ .factor_clks = top_divs,
+ .num_factor_clks = ARRAY_SIZE(top_divs),
+ .mux_clks = top_muxes,
+ .num_mux_clks = ARRAY_SIZE(top_muxes),
+};
+
+static const struct of_device_id of_match_clk_mt8196_ck[] = {
+ { .compatible = "mediatek,mt8196-topckgen-gp2", .data = &topck_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ck);
+
+static struct platform_driver clk_mt8196_topck_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-topck2",
+ .of_match_table = of_match_clk_mt8196_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 GP2 top clock generators driver");
+module_platform_driver(clk_mt8196_topck_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-ufs_ao.c b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
new file mode 100644
index 000000000000..0c04717b7b4b
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-ufs_ao.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+#include <dt-bindings/reset/mediatek,mt8196-resets.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#define MT8196_UFSAO_RST0_SET_OFFSET 0x48
+#define MT8196_UFSAO_RST1_SET_OFFSET 0x148
+
+static const struct mtk_gate_regs ufsao0_cg_regs = {
+ .set_ofs = 0x108,
+ .clr_ofs = 0x10c,
+ .sta_ofs = 0x104,
+};
+
+static const struct mtk_gate_regs ufsao1_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x4,
+};
+
+#define GATE_UFSAO0(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao0_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_UFSAO1(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ufsao1_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate ufsao_clks[] = {
+ /* UFSAO0 */
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_UFS, "ufsao_ufshci_ufs", "ufs", 0),
+ GATE_UFSAO0(CLK_UFSAO_UFSHCI_AES, "ufsao_ufshci_aes", "aes_ufsfde", 1),
+ /* UFSAO1 */
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_TX_SYM, "ufsao_unipro_tx_sym", "clk26m", 0),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM0, "ufsao_unipro_rx_sym0", "clk26m", 1),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_RX_SYM1, "ufsao_unipro_rx_sym1", "clk26m", 2),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SYS, "ufsao_unipro_sys", "ufs", 3),
+ GATE_UFSAO1(CLK_UFSAO_UNIPRO_SAP, "ufsao_unipro_sap", "clk26m", 4),
+ GATE_UFSAO1(CLK_UFSAO_PHY_SAP, "ufsao_phy_sap", "clk26m", 8),
+};
+
+static u16 ufsao_rst_ofs[] = {
+ MT8196_UFSAO_RST0_SET_OFFSET,
+ MT8196_UFSAO_RST1_SET_OFFSET
+};
+
+static u16 ufsao_rst_idx_map[] = {
+ [MT8196_UFSAO_RST0_UFS_MPHY] = 8,
+ [MT8196_UFSAO_RST1_UFS_UNIPRO] = 1 * RST_NR_PER_BANK + 0,
+ [MT8196_UFSAO_RST1_UFS_CRYPTO] = 1 * RST_NR_PER_BANK + 1,
+ [MT8196_UFSAO_RST1_UFSHCI] = 1 * RST_NR_PER_BANK + 2,
+};
+
+static const struct mtk_clk_rst_desc ufsao_rst_desc = {
+ .version = MTK_RST_SET_CLR,
+ .rst_bank_ofs = ufsao_rst_ofs,
+ .rst_bank_nr = ARRAY_SIZE(ufsao_rst_ofs),
+ .rst_idx_map = ufsao_rst_idx_map,
+ .rst_idx_map_nr = ARRAY_SIZE(ufsao_rst_idx_map),
+};
+
+static const struct mtk_clk_desc ufsao_mcd = {
+ .clks = ufsao_clks,
+ .num_clks = ARRAY_SIZE(ufsao_clks),
+ .rst_desc = &ufsao_rst_desc,
+};
+
+static const struct of_device_id of_match_clk_mt8196_ufs_ao[] = {
+ { .compatible = "mediatek,mt8196-ufscfg-ao", .data = &ufsao_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_ufs_ao);
+
+static struct platform_driver clk_mt8196_ufs_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-ufs-ao",
+ .of_match_table = of_match_clk_mt8196_ufs_ao,
+ },
+};
+
+module_platform_driver(clk_mt8196_ufs_ao_drv);
+MODULE_DESCRIPTION("MediaTek MT8196 ufs_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdec.c b/drivers/clk/mediatek/clk-mt8196-vdec.c
new file mode 100644
index 000000000000..f8dcd84a2b58
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdec.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs vde20_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vde20_hwv_regs = {
+ .set_ofs = 0x0088,
+ .clr_ofs = 0x008c,
+ .sta_ofs = 0x2c44,
+};
+
+static const struct mtk_gate_regs vde21_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde21_hwv_regs = {
+ .set_ofs = 0x0080,
+ .clr_ofs = 0x0084,
+ .sta_ofs = 0x2c40,
+};
+
+static const struct mtk_gate_regs vde22_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+static const struct mtk_gate_regs vde22_hwv_regs = {
+ .set_ofs = 0x0078,
+ .clr_ofs = 0x007c,
+ .sta_ofs = 0x2c3c,
+};
+
+#define GATE_HWV_VDE20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde21_cg_regs, \
+ .hwv_regs = &vde21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE22(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde22_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde2_clks[] = {
+ /* VDE20 */
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN, "vde2_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_ACTIVE, "vde2_vdec_active", "vdec", 4),
+ GATE_HWV_VDE20(CLK_VDE2_VDEC_CKEN_ENG, "vde2_vdec_cken_eng", "vdec", 8),
+ /* VDE21 */
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN, "vde2_lat_cken", "vdec", 0),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_ACTIVE, "vde2_lat_active", "vdec", 4),
+ GATE_HWV_VDE21(CLK_VDE2_LAT_CKEN_ENG, "vde2_lat_cken_eng", "vdec", 8),
+ /* VDE22 */
+ GATE_HWV_VDE22(CLK_VDE2_LARB1_CKEN, "vde2_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde2_mcd = {
+ .clks = vde2_clks,
+ .num_clks = ARRAY_SIZE(vde2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs vde10_hwv_regs = {
+ .set_ofs = 0x00a0,
+ .clr_ofs = 0x00a4,
+ .sta_ofs = 0x2c50,
+};
+
+static const struct mtk_gate_regs vde11_cg_regs = {
+ .set_ofs = 0x1e0,
+ .clr_ofs = 0x1e0,
+ .sta_ofs = 0x1e0,
+};
+
+static const struct mtk_gate_regs vde11_hwv_regs = {
+ .set_ofs = 0x00b0,
+ .clr_ofs = 0x00b4,
+ .sta_ofs = 0x2c58,
+};
+
+static const struct mtk_gate_regs vde12_cg_regs = {
+ .set_ofs = 0x1ec,
+ .clr_ofs = 0x1ec,
+ .sta_ofs = 0x1ec,
+};
+
+static const struct mtk_gate_regs vde12_hwv_regs = {
+ .set_ofs = 0x00a8,
+ .clr_ofs = 0x00ac,
+ .sta_ofs = 0x2c54,
+};
+
+static const struct mtk_gate_regs vde13_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vde13_hwv_regs = {
+ .set_ofs = 0x0098,
+ .clr_ofs = 0x009c,
+ .sta_ofs = 0x2c4c,
+};
+
+static const struct mtk_gate_regs vde14_hwv_regs = {
+ .set_ofs = 0x0090,
+ .clr_ofs = 0x0094,
+ .sta_ofs = 0x2c48,
+};
+
+#define GATE_HWV_VDE10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde20_cg_regs, \
+ .hwv_regs = &vde10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde11_cg_regs, \
+ .hwv_regs = &vde11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE12(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde12_cg_regs, \
+ .hwv_regs = &vde12_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+#define GATE_HWV_VDE13(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde13_cg_regs, \
+ .hwv_regs = &vde13_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VDE14(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &vde22_cg_regs, \
+ .hwv_regs = &vde14_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IGNORE_UNUSED, \
+ }
+
+static const struct mtk_gate vde1_clks[] = {
+ /* VDE10 */
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN, "vde1_vdec_cken", "vdec", 0),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_ACTIVE, "vde1_vdec_active", "vdec", 4),
+ GATE_HWV_VDE10(CLK_VDE1_VDEC_CKEN_ENG, "vde1_vdec_cken_eng", "vdec", 8),
+ /* VDE11 */
+ GATE_HWV_VDE11(CLK_VDE1_VDEC_SOC_IPS_EN, "vde1_vdec_soc_ips_en", "vdec", 0),
+ /* VDE12 */
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_EN, "vde1_aptv_en", "ck_tck_26m_mx9_ck", 0),
+ GATE_HWV_VDE12(CLK_VDE1_VDEC_SOC_APTV_TOP_EN, "vde1_aptv_topen", "ck_tck_26m_mx9_ck", 1),
+ /* VDE13 */
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN, "vde1_lat_cken", "vdec", 0),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_ACTIVE, "vde1_lat_active", "vdec", 4),
+ GATE_HWV_VDE13(CLK_VDE1_LAT_CKEN_ENG, "vde1_lat_cken_eng", "vdec", 8),
+ /* VDE14 */
+ GATE_HWV_VDE14(CLK_VDE1_LARB1_CKEN, "vde1_larb1_cken", "vdec", 0),
+};
+
+static const struct mtk_clk_desc vde1_mcd = {
+ .clks = vde1_clks,
+ .num_clks = ARRAY_SIZE(vde1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdec[] = {
+ { .compatible = "mediatek,mt8196-vdecsys", .data = &vde2_mcd },
+ { .compatible = "mediatek,mt8196-vdecsys-soc", .data = &vde1_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdec);
+
+static struct platform_driver clk_mt8196_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-vdec",
+ .of_match_table = of_match_clk_mt8196_vdec,
+ },
+};
+module_platform_driver(clk_mt8196_vdec_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Decoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
new file mode 100644
index 000000000000..fddb69d1c3eb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vdisp_ao.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs mm_v_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs mm_v_hwv_regs = {
+ .set_ofs = 0x0030,
+ .clr_ofs = 0x0034,
+ .sta_ofs = 0x2c18,
+};
+
+#define GATE_MM_AO_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE | \
+ CLK_IS_CRITICAL, \
+ }
+
+#define GATE_HWV_MM_V(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &mm_v_cg_regs, \
+ .hwv_regs = &mm_v_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate mm_v_clks[] = {
+ GATE_HWV_MM_V(CLK_MM_V_DISP_VDISP_AO_CONFIG, "mm_v_disp_vdisp_ao_config", "disp", 0),
+ GATE_HWV_MM_V(CLK_MM_V_DISP_DPC, "mm_v_disp_dpc", "disp", 16),
+ GATE_MM_AO_V(CLK_MM_V_SMI_SUB_SOMM0, "mm_v_smi_sub_somm0", "disp", 2),
+};
+
+static const struct mtk_clk_desc mm_v_mcd = {
+ .clks = mm_v_clks,
+ .num_clks = ARRAY_SIZE(mm_v_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8196_vdisp_ao[] = {
+ { .compatible = "mediatek,mt8196-vdisp-ao", .data = &mm_v_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vdisp_ao);
+
+static struct platform_driver clk_mt8196_vdisp_ao_drv = {
+ .probe = mtk_clk_pdev_probe,
+ .remove = mtk_clk_pdev_remove,
+ .driver = {
+ .name = "clk-mt8196-vdisp-ao",
+ .of_match_table = of_match_clk_mt8196_vdisp_ao,
+ },
+};
+module_platform_driver(clk_mt8196_vdisp_ao_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 vdisp_ao clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-venc.c b/drivers/clk/mediatek/clk-mt8196-venc.c
new file mode 100644
index 000000000000..13e2e36e945f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-venc.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+static const struct mtk_gate_regs ven10_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs ven10_hwv_regs = {
+ .set_ofs = 0x00b8,
+ .clr_ofs = 0x00bc,
+ .sta_ofs = 0x2c5c,
+};
+
+static const struct mtk_gate_regs ven11_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x10,
+};
+
+static const struct mtk_gate_regs ven11_hwv_regs = {
+ .set_ofs = 0x00c0,
+ .clr_ofs = 0x00c4,
+ .sta_ofs = 0x2c60,
+};
+
+#define GATE_VEN10(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven10_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv, \
+ .flags = (_flags) | \
+ CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN10(_id, _name, _parent, _shift) \
+ GATE_HWV_VEN10_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_HWV_VEN11(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven11_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven1_clks[] = {
+ /* VEN10 */
+ GATE_HWV_VEN10(CLK_VEN1_CKE0_LARB, "ven1_larb", "venc", 0),
+ GATE_HWV_VEN10(CLK_VEN1_CKE1_VENC, "ven1_venc", "venc", 4),
+ GATE_VEN10(CLK_VEN1_CKE2_JPGENC, "ven1_jpgenc", "venc", 8),
+ GATE_VEN10(CLK_VEN1_CKE3_JPGDEC, "ven1_jpgdec", "venc", 12),
+ GATE_VEN10(CLK_VEN1_CKE4_JPGDEC_C1, "ven1_jpgdec_c1", "venc", 16),
+ GATE_HWV_VEN10(CLK_VEN1_CKE5_GALS, "ven1_gals", "venc", 28),
+ GATE_HWV_VEN10(CLK_VEN1_CKE29_VENC_ADAB_CTRL, "ven1_venc_adab_ctrl",
+ "venc", 29),
+ GATE_HWV_VEN10_FLAGS(CLK_VEN1_CKE29_VENC_XPC_CTRL,
+ "ven1_venc_xpc_ctrl", "venc", 30,
+ CLK_IGNORE_UNUSED),
+ GATE_HWV_VEN10(CLK_VEN1_CKE6_GALS_SRAM, "ven1_gals_sram", "venc", 31),
+ /* VEN11 */
+ GATE_HWV_VEN11(CLK_VEN1_RES_FLAT, "ven1_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven1_mcd = {
+ .clks = ven1_clks,
+ .num_clks = ARRAY_SIZE(ven1_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven20_hwv_regs = {
+ .set_ofs = 0x00c8,
+ .clr_ofs = 0x00cc,
+ .sta_ofs = 0x2c64,
+};
+
+static const struct mtk_gate_regs ven21_hwv_regs = {
+ .set_ofs = 0x00d0,
+ .clr_ofs = 0x00d4,
+ .sta_ofs = 0x2c68,
+};
+
+#define GATE_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .shift = _shift, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ .ops = &mtk_clk_gate_ops_setclr_inv, \
+ }
+
+#define GATE_HWV_VEN20(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN21(_id, _name, _parent, _shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE \
+ }
+
+static const struct mtk_gate ven2_clks[] = {
+ /* VEN20 */
+ GATE_HWV_VEN20(CLK_VEN2_CKE0_LARB, "ven2_larb", "venc", 0),
+ GATE_HWV_VEN20(CLK_VEN2_CKE1_VENC, "ven2_venc", "venc", 4),
+ GATE_VEN20(CLK_VEN2_CKE2_JPGENC, "ven2_jpgenc", "venc", 8),
+ GATE_VEN20(CLK_VEN2_CKE3_JPGDEC, "ven2_jpgdec", "venc", 12),
+ GATE_HWV_VEN20(CLK_VEN2_CKE5_GALS, "ven2_gals", "venc", 28),
+ GATE_HWV_VEN20(CLK_VEN2_CKE29_VENC_XPC_CTRL, "ven2_venc_xpc_ctrl", "venc", 30),
+ GATE_HWV_VEN20(CLK_VEN2_CKE6_GALS_SRAM, "ven2_gals_sram", "venc", 31),
+ /* VEN21 */
+ GATE_HWV_VEN21(CLK_VEN2_RES_FLAT, "ven2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven2_mcd = {
+ .clks = ven2_clks,
+ .num_clks = ARRAY_SIZE(ven2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct mtk_gate_regs ven_c20_hwv_regs = {
+ .set_ofs = 0x00d8,
+ .clr_ofs = 0x00dc,
+ .sta_ofs = 0x2c6c,
+};
+
+static const struct mtk_gate_regs ven_c21_hwv_regs = {
+ .set_ofs = 0x00e0,
+ .clr_ofs = 0x00e4,
+ .sta_ofs = 0x2c70,
+};
+
+#define GATE_HWV_VEN_C20(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven10_cg_regs, \
+ .hwv_regs = &ven_c20_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr_inv,\
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+#define GATE_HWV_VEN_C21(_id, _name, _parent, _shift) {\
+ .id = _id, \
+ .name = _name, \
+ .parent_name = _parent, \
+ .regs = &ven11_cg_regs, \
+ .hwv_regs = &ven_c21_hwv_regs, \
+ .shift = _shift, \
+ .ops = &mtk_clk_gate_hwv_ops_setclr, \
+ .flags = CLK_OPS_PARENT_ENABLE, \
+ }
+
+static const struct mtk_gate ven_c2_clks[] = {
+ /* VEN_C20 */
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE0_LARB, "ven_c2_larb", "venc", 0),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE1_VENC, "ven_c2_venc", "venc", 4),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE5_GALS, "ven_c2_gals", "venc", 28),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE29_VENC_XPC_CTRL, "ven_c2_venc_xpc_ctrl",
+ "venc", 30),
+ GATE_HWV_VEN_C20(CLK_VEN_C2_CKE6_GALS_SRAM, "ven_c2_gals_sram", "venc", 31),
+ /* VEN_C21 */
+ GATE_HWV_VEN_C21(CLK_VEN_C2_RES_FLAT, "ven_c2_res_flat", "venc", 0),
+};
+
+static const struct mtk_clk_desc ven_c2_mcd = {
+ .clks = ven_c2_clks,
+ .num_clks = ARRAY_SIZE(ven_c2_clks),
+ .need_runtime_pm = true,
+};
+
+static const struct of_device_id of_match_clk_mt8196_venc[] = {
+ { .compatible = "mediatek,mt8196-vencsys", .data = &ven1_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c1", .data = &ven2_mcd },
+ { .compatible = "mediatek,mt8196-vencsys-c2", .data = &ven_c2_mcd },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_venc);
+
+static struct platform_driver clk_mt8196_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .remove = mtk_clk_simple_remove,
+ .driver = {
+ .name = "clk-mt8196-venc",
+ .of_match_table = of_match_clk_mt8196_venc,
+ },
+};
+module_platform_driver(clk_mt8196_venc_drv);
+
+MODULE_DESCRIPTION("MediaTek MT8196 Video Encoders clocks driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8196-vlpckgen.c b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
new file mode 100644
index 000000000000..d59a8a9d9855
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8196-vlpckgen.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 MediaTek Inc.
+ * Guangjie Song <guangjie.song@mediatek.com>
+ * Copyright (c) 2025 Collabora Ltd.
+ * Laura Nao <laura.nao@collabora.com>
+ */
+#include <dt-bindings/clock/mediatek,mt8196-clock.h>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-mtk.h"
+#include "clk-mux.h"
+#include "clk-pll.h"
+
+/* MUX SEL REG */
+#define VLP_CLK_CFG_UPDATE 0x0004
+#define VLP_CLK_CFG_UPDATE1 0x0008
+#define VLP_CLK_CFG_0 0x0010
+#define VLP_CLK_CFG_0_SET 0x0014
+#define VLP_CLK_CFG_0_CLR 0x0018
+#define VLP_CLK_CFG_1 0x0020
+#define VLP_CLK_CFG_1_SET 0x0024
+#define VLP_CLK_CFG_1_CLR 0x0028
+#define VLP_CLK_CFG_2 0x0030
+#define VLP_CLK_CFG_2_SET 0x0034
+#define VLP_CLK_CFG_2_CLR 0x0038
+#define VLP_CLK_CFG_3 0x0040
+#define VLP_CLK_CFG_3_SET 0x0044
+#define VLP_CLK_CFG_3_CLR 0x0048
+#define VLP_CLK_CFG_4 0x0050
+#define VLP_CLK_CFG_4_SET 0x0054
+#define VLP_CLK_CFG_4_CLR 0x0058
+#define VLP_CLK_CFG_5 0x0060
+#define VLP_CLK_CFG_5_SET 0x0064
+#define VLP_CLK_CFG_5_CLR 0x0068
+#define VLP_CLK_CFG_6 0x0070
+#define VLP_CLK_CFG_6_SET 0x0074
+#define VLP_CLK_CFG_6_CLR 0x0078
+#define VLP_CLK_CFG_7 0x0080
+#define VLP_CLK_CFG_7_SET 0x0084
+#define VLP_CLK_CFG_7_CLR 0x0088
+#define VLP_CLK_CFG_8 0x0090
+#define VLP_CLK_CFG_8_SET 0x0094
+#define VLP_CLK_CFG_8_CLR 0x0098
+#define VLP_CLK_CFG_9 0x00a0
+#define VLP_CLK_CFG_9_SET 0x00a4
+#define VLP_CLK_CFG_9_CLR 0x00a8
+#define VLP_CLK_CFG_10 0x00b0
+#define VLP_CLK_CFG_10_SET 0x00b4
+#define VLP_CLK_CFG_10_CLR 0x00b8
+#define VLP_OCIC_FENC_STATUS_MON_0 0x039c
+#define VLP_OCIC_FENC_STATUS_MON_1 0x03a0
+
+/* MUX SHIFT */
+#define TOP_MUX_SCP_SHIFT 0
+#define TOP_MUX_SCP_SPI_SHIFT 1
+#define TOP_MUX_SCP_IIC_SHIFT 2
+#define TOP_MUX_SCP_IIC_HS_SHIFT 3
+#define TOP_MUX_PWRAP_ULPOSC_SHIFT 4
+#define TOP_MUX_SPMI_M_TIA_32K_SHIFT 5
+#define TOP_MUX_APXGPT_26M_B_SHIFT 6
+#define TOP_MUX_DPSW_SHIFT 7
+#define TOP_MUX_DPSW_CENTRAL_SHIFT 8
+#define TOP_MUX_SPMI_M_MST_SHIFT 9
+#define TOP_MUX_DVFSRC_SHIFT 10
+#define TOP_MUX_PWM_VLP_SHIFT 11
+#define TOP_MUX_AXI_VLP_SHIFT 12
+#define TOP_MUX_SYSTIMER_26M_SHIFT 13
+#define TOP_MUX_SSPM_SHIFT 14
+#define TOP_MUX_SRCK_SHIFT 15
+#define TOP_MUX_CAMTG0_SHIFT 16
+#define TOP_MUX_CAMTG1_SHIFT 17
+#define TOP_MUX_CAMTG2_SHIFT 18
+#define TOP_MUX_CAMTG3_SHIFT 19
+#define TOP_MUX_CAMTG4_SHIFT 20
+#define TOP_MUX_CAMTG5_SHIFT 21
+#define TOP_MUX_CAMTG6_SHIFT 22
+#define TOP_MUX_CAMTG7_SHIFT 23
+#define TOP_MUX_SSPM_26M_SHIFT 25
+#define TOP_MUX_ULPOSC_SSPM_SHIFT 26
+#define TOP_MUX_VLP_PBUS_26M_SHIFT 27
+#define TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT 28
+#define TOP_MUX_DPMSRDMA_SHIFT 29
+#define TOP_MUX_VLP_PBUS_156M_SHIFT 30
+#define TOP_MUX_SPM_SHIFT 0
+#define TOP_MUX_MMINFRA_VLP_SHIFT 1
+#define TOP_MUX_USB_TOP_SHIFT 2
+#define TOP_MUX_SSUSB_XHCI_SHIFT 3
+#define TOP_MUX_NOC_VLP_SHIFT 4
+#define TOP_MUX_AUDIO_H_SHIFT 5
+#define TOP_MUX_AUD_ENGEN1_SHIFT 6
+#define TOP_MUX_AUD_ENGEN2_SHIFT 7
+#define TOP_MUX_AUD_INTBUS_SHIFT 8
+#define TOP_MUX_SPU_VLP_26M_SHIFT 9
+#define TOP_MUX_SPU0_VLP_SHIFT 10
+#define TOP_MUX_SPU1_VLP_SHIFT 11
+
+/* CKSTA REG */
+#define VLP_CKSTA_REG0 0x0250
+#define VLP_CKSTA_REG1 0x0254
+
+/* HW Voter REG */
+#define HWV_CG_9_SET 0x0048
+#define HWV_CG_9_CLR 0x004c
+#define HWV_CG_9_DONE 0x2c24
+#define HWV_CG_10_SET 0x0050
+#define HWV_CG_10_CLR 0x0054
+#define HWV_CG_10_DONE 0x2c28
+
+/* PLL REG */
+#define VLP_AP_PLL_CON3 0x264
+#define VLP_APLL1_TUNER_CON0 0x2a4
+#define VLP_APLL2_TUNER_CON0 0x2a8
+#define VLP_APLL1_CON0 0x274
+#define VLP_APLL1_CON1 0x278
+#define VLP_APLL1_CON2 0x27c
+#define VLP_APLL1_CON3 0x280
+#define VLP_APLL2_CON0 0x28c
+#define VLP_APLL2_CON1 0x290
+#define VLP_APLL2_CON2 0x294
+#define VLP_APLL2_CON3 0x298
+
+/* vlp apll1 tuner default value*/
+#define VLP_APLL1_TUNER_CON0_VALUE 0x6f28bd4d
+/* vlp apll2 tuner default value + 1*/
+#define VLP_APLL2_TUNER_CON0_VALUE 0x78fd5265
+
+#define VLP_PLLEN_ALL 0x080
+#define VLP_PLLEN_ALL_SET 0x084
+#define VLP_PLLEN_ALL_CLR 0x088
+
+#define MT8196_PLL_FMAX (3800UL * MHZ)
+#define MT8196_PLL_FMIN (1500UL * MHZ)
+#define MT8196_INTEGER_BITS 8
+
+#define PLL_FENC(_id, _name, _reg, _fenc_sta_ofs, _fenc_sta_bit,\
+ _flags, _pd_reg, _pd_shift, \
+ _pcw_reg, _pcw_shift, _pcwbits, \
+ _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .fenc_sta_ofs = _fenc_sta_ofs, \
+ .fenc_sta_bit = _fenc_sta_bit, \
+ .flags = _flags, \
+ .fmax = MT8196_PLL_FMAX, \
+ .fmin = MT8196_PLL_FMIN, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8196_INTEGER_BITS, \
+ .en_reg = VLP_PLLEN_ALL, \
+ .en_set_reg = VLP_PLLEN_ALL_SET, \
+ .en_clr_reg = VLP_PLLEN_ALL_CLR, \
+ .pll_en_bit = _pll_en_bit, \
+ .ops = &mtk_pll_fenc_clr_set_ops, \
+}
+
+static DEFINE_SPINLOCK(mt8196_clk_vlp_lock);
+
+static const struct mtk_fixed_factor vlp_divs[] = {
+ FACTOR(CLK_VLP_CLK26M, "vlp_clk26m", "clk26m", 1, 1),
+ FACTOR(CLK_VLP_APLL1_D4, "apll1_d4", "vlp_apll1", 1, 4),
+ FACTOR(CLK_VLP_APLL1_D8, "apll1_d8", "vlp_apll1", 1, 8),
+ FACTOR(CLK_VLP_APLL2_D4, "apll2_d4", "vlp_apll2", 1, 4),
+ FACTOR(CLK_VLP_APLL2_D8, "apll2_d8", "vlp_apll2", 1, 8),
+};
+
+static const char * const vlp_scp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d6",
+ "mainpll_d4",
+ "mainpll_d3",
+ "vlp_apll1"
+};
+
+static const char * const vlp_scp_spi_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const vlp_scp_iic_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_scp_iic_hs_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d4",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_pwrap_ulposc_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_spmi_32k_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_apxgpt_26m_b_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpsw_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_dpsw_central_parents[] = {
+ "clk26m",
+ "osc_d10",
+ "osc_d7",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_spmi_m_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "osc_d14",
+ "osc_d10"
+};
+
+static const char * const vlp_dvfsrc_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_pwm_vlp_parents[] = {
+ "clk26m",
+ "clk32k",
+ "osc_d20",
+ "osc_d8",
+ "mainpll_d4_d8"
+};
+
+static const char * const vlp_axi_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d7_d4",
+ "osc_d4",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_systimer_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_sspm_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d5_d2",
+ "osc_d2",
+ "mainpll_d6"
+};
+
+static const char * const vlp_srck_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_camtg0_1_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "ulposc3",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_camtg2_7_parents[] = {
+ "clk26m",
+ "univpll_192m_d32",
+ "univpll_192m_d16",
+ "clk13m",
+ "osc_d40",
+ "osc_d32",
+ "univpll_192m_d10",
+ "univpll_192m_d8",
+ "univpll_d6_d16",
+ "osc_d20",
+ "ck2_tvdpll1_d16",
+ "univpll_d6_d8"
+};
+
+static const char * const vlp_sspm_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_ulposc_sspm_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d4_d2"
+};
+
+static const char * const vlp_vlp_pbus_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_debug_err_flag_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_dpmsrdma_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2"
+};
+
+static const char * const vlp_vlp_pbus_156m_parents[] = {
+ "clk26m",
+ "osc_d2",
+ "mainpll_d7_d2",
+ "mainpll_d7"
+};
+
+static const char * const vlp_spm_parents[] = {
+ "clk26m",
+ "mainpll_d7_d4"
+};
+
+static const char * const vlp_mminfra_parents[] = {
+ "clk26m",
+ "osc_d4",
+ "mainpll_d3"
+};
+
+static const char * const vlp_usb_parents[] = {
+ "clk26m",
+ "mainpll_d9"
+};
+
+static const char * const vlp_noc_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d9"
+};
+
+static const char * const vlp_audio_h_parents[] = {
+ "vlp_clk26m",
+ "vlp_apll1",
+ "vlp_apll2"
+};
+
+static const char * const vlp_aud_engen1_parents[] = {
+ "vlp_clk26m",
+ "apll1_d8",
+ "apll1_d4"
+};
+
+static const char * const vlp_aud_engen2_parents[] = {
+ "vlp_clk26m",
+ "apll2_d8",
+ "apll2_d4"
+};
+
+static const char * const vlp_aud_intbus_parents[] = {
+ "vlp_clk26m",
+ "mainpll_d7_d4",
+ "mainpll_d4_d4"
+};
+
+static const u8 vlp_aud_parent_index[] = { 1, 2, 3 };
+
+static const char * const vlp_spvlp_26m_parents[] = {
+ "clk26m",
+ "osc_d20"
+};
+
+static const char * const vlp_spu0_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const char * const vlp_spu1_vlp_parents[] = {
+ "clk26m",
+ "osc_d20",
+ "mainpll_d4_d4",
+ "mainpll_d4_d2",
+ "mainpll_d7",
+ "mainpll_d6",
+ "mainpll_d5"
+};
+
+static const struct mtk_mux vlp_muxes[] = {
+ /* VLP_CLK_CFG_0 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_SCP, "vlp_scp", vlp_scp_parents,
+ VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET, VLP_CLK_CFG_0_CLR,
+ 0, 3, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 31),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_SPI, "vlp_scp_spi",
+ vlp_scp_spi_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_SPI_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC, "vlp_scp_iic",
+ vlp_scp_iic_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SCP_IIC_HS, "vlp_scp_iic_hs",
+ vlp_scp_iic_hs_parents, VLP_CLK_CFG_0, VLP_CLK_CFG_0_SET,
+ VLP_CLK_CFG_0_CLR, 24, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SCP_IIC_HS_SHIFT),
+ /* VLP_CLK_CFG_1 */
+ MUX_CLR_SET_UPD(CLK_VLP_PWRAP_ULPOSC, "vlp_pwrap_ulposc",
+ vlp_pwrap_ulposc_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_PWRAP_ULPOSC_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_TIA_32K, "vlp_spmi_32k",
+ vlp_spmi_32k_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_TIA_32K_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_APXGPT_26M_B, "vlp_apxgpt_26m_b",
+ vlp_apxgpt_26m_b_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_APXGPT_26M_B_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW, "vlp_dpsw",
+ vlp_dpsw_parents, VLP_CLK_CFG_1, VLP_CLK_CFG_1_SET,
+ VLP_CLK_CFG_1_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_SHIFT),
+ /* VLP_CLK_CFG_2 */
+ MUX_CLR_SET_UPD(CLK_VLP_DPSW_CENTRAL, "vlp_dpsw_central",
+ vlp_dpsw_central_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 0, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPSW_CENTRAL_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPMI_M_MST, "vlp_spmi_m",
+ vlp_spmi_m_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 8, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SPMI_M_MST_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DVFSRC, "vlp_dvfsrc",
+ vlp_dvfsrc_parents, VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET,
+ VLP_CLK_CFG_2_CLR, 16, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DVFSRC_SHIFT),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_PWM_VLP, "vlp_pwm_vlp", vlp_pwm_vlp_parents,
+ VLP_CLK_CFG_2, VLP_CLK_CFG_2_SET, VLP_CLK_CFG_2_CLR,
+ 24, 3, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_PWM_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 20),
+ /* VLP_CLK_CFG_3 */
+ MUX_CLR_SET_UPD(CLK_VLP_AXI_VLP, "vlp_axi_vlp",
+ vlp_axi_vlp_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 0, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_AXI_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SYSTIMER_26M, "vlp_systimer_26m",
+ vlp_systimer_26m_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SYSTIMER_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM, "vlp_sspm",
+ vlp_sspm_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SRCK, "vlp_srck",
+ vlp_srck_parents, VLP_CLK_CFG_3, VLP_CLK_CFG_3_SET,
+ VLP_CLK_CFG_3_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SRCK_SHIFT),
+ /* VLP_CLK_CFG_4 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG0, "vlp_camtg0", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG0_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 15),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG1, "vlp_camtg1", vlp_camtg0_1_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 14),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG2, "vlp_camtg2", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 13),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG3, "vlp_camtg3", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_4, VLP_CLK_CFG_4_SET, VLP_CLK_CFG_4_CLR,
+ HWV_CG_9_DONE, HWV_CG_9_SET, HWV_CG_9_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG3_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 12),
+ /* VLP_CLK_CFG_5 */
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG4, "vlp_camtg4", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 0, 4, 7, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG4_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 11),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG5, "vlp_camtg5", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 8, 4, 15, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG5_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 10),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG6, "vlp_camtg6", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 16, 4, 23, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG6_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 9),
+ MUX_GATE_HWV_FENC_CLR_SET_UPD(CLK_VLP_CAMTG7, "vlp_camtg7", vlp_camtg2_7_parents,
+ VLP_CLK_CFG_5, VLP_CLK_CFG_5_SET, VLP_CLK_CFG_5_CLR,
+ HWV_CG_10_DONE, HWV_CG_10_SET, HWV_CG_10_CLR,
+ 24, 4, 31, VLP_CLK_CFG_UPDATE, TOP_MUX_CAMTG7_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_0, 8),
+ /* VLP_CLK_CFG_6 */
+ MUX_CLR_SET_UPD(CLK_VLP_SSPM_26M, "vlp_sspm_26m",
+ vlp_sspm_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_SSPM_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_ULPOSC_SSPM, "vlp_ulposc_sspm",
+ vlp_ulposc_sspm_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_ULPOSC_SSPM_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_26M, "vlp_vlp_pbus_26m",
+ vlp_vlp_pbus_26m_parents, VLP_CLK_CFG_6, VLP_CLK_CFG_6_SET,
+ VLP_CLK_CFG_6_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_26M_SHIFT),
+ /* VLP_CLK_CFG_7 */
+ MUX_CLR_SET_UPD(CLK_VLP_DEBUG_ERR_FLAG, "vlp_debug_err_flag",
+ vlp_debug_err_flag_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DEBUG_ERR_FLAG_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_DPMSRDMA, "vlp_dpmsrdma",
+ vlp_dpmsrdma_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 8, 1,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_DPMSRDMA_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_VLP_PBUS_156M, "vlp_vlp_pbus_156m",
+ vlp_vlp_pbus_156m_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 16, 2,
+ VLP_CLK_CFG_UPDATE, TOP_MUX_VLP_PBUS_156M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPM, "vlp_spm",
+ vlp_spm_parents, VLP_CLK_CFG_7, VLP_CLK_CFG_7_SET,
+ VLP_CLK_CFG_7_CLR, 24, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPM_SHIFT),
+ /* VLP_CLK_CFG_8 */
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_MMINFRA, "vlp_mminfra", vlp_mminfra_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_MMINFRA_VLP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 31),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_TOP, "vlp_usb", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 8, 1, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_USB_TOP_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 30),
+ MUX_GATE_FENC_CLR_SET_UPD(CLK_VLP_USB_XHCI, "vlp_usb_xhci", vlp_usb_parents,
+ VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET, VLP_CLK_CFG_8_CLR,
+ 16, 1, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_SSUSB_XHCI_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 29),
+ MUX_CLR_SET_UPD(CLK_VLP_NOC_VLP, "vlp_noc_vlp",
+ vlp_noc_vlp_parents, VLP_CLK_CFG_8, VLP_CLK_CFG_8_SET,
+ VLP_CLK_CFG_8_CLR, 24, 2,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_NOC_VLP_SHIFT),
+ /* VLP_CLK_CFG_9 */
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUDIO_H, "vlp_audio_h",
+ vlp_audio_h_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 0, 2, 7, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUDIO_H_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 27),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN1, "vlp_aud_engen1",
+ vlp_aud_engen1_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 8, 2, 15, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN1_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 26),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_ENGEN2, "vlp_aud_engen2",
+ vlp_aud_engen2_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 16, 2, 23, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_ENGEN2_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 25),
+ MUX_GATE_FENC_CLR_SET_UPD_INDEXED(CLK_VLP_AUD_INTBUS, "vlp_aud_intbus",
+ vlp_aud_intbus_parents, vlp_aud_parent_index,
+ VLP_CLK_CFG_9, VLP_CLK_CFG_9_SET, VLP_CLK_CFG_9_CLR,
+ 24, 2, 31, VLP_CLK_CFG_UPDATE1, TOP_MUX_AUD_INTBUS_SHIFT,
+ VLP_OCIC_FENC_STATUS_MON_1, 24),
+ /* VLP_CLK_CFG_10 */
+ MUX_CLR_SET_UPD(CLK_VLP_SPVLP_26M, "vlp_spvlp_26m",
+ vlp_spvlp_26m_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 0, 1,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU_VLP_26M_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU0_VLP, "vlp_spu0_vlp",
+ vlp_spu0_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 8, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU0_VLP_SHIFT),
+ MUX_CLR_SET_UPD(CLK_VLP_SPU1_VLP, "vlp_spu1_vlp",
+ vlp_spu1_vlp_parents, VLP_CLK_CFG_10, VLP_CLK_CFG_10_SET,
+ VLP_CLK_CFG_10_CLR, 16, 3,
+ VLP_CLK_CFG_UPDATE1, TOP_MUX_SPU1_VLP_SHIFT),
+};
+
+static const struct mtk_pll_data vlp_plls[] = {
+ PLL_FENC(CLK_VLP_APLL1, "vlp_apll1", VLP_APLL1_CON0, 0x0358, 1, 0,
+ VLP_APLL1_CON1, 24, VLP_APLL1_CON2, 0, 32, 0),
+ PLL_FENC(CLK_VLP_APLL2, "vlp_apll2", VLP_APLL2_CON0, 0x0358, 0, 0,
+ VLP_APLL2_CON1, 24, VLP_APLL2_CON2, 0, 32, 1),
+};
+
+static const struct regmap_config vlpckgen_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x1000,
+ .fast_io = true,
+};
+
+static int clk_mt8196_vlp_probe(struct platform_device *pdev)
+{
+ static void __iomem *base;
+ struct clk_hw_onecell_data *clk_data;
+ int r;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(vlp_muxes) +
+ ARRAY_SIZE(vlp_plls) +
+ ARRAY_SIZE(vlp_divs));
+ if (!clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(dev, base, &vlpckgen_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ r = mtk_clk_register_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ if (r)
+ goto free_clk_data;
+
+ r = mtk_clk_register_muxes(&pdev->dev, vlp_muxes, ARRAY_SIZE(vlp_muxes),
+ node, &mt8196_clk_vlp_lock, clk_data);
+ if (r)
+ goto unregister_factors;
+
+ r = mtk_clk_register_plls(node, vlp_plls, ARRAY_SIZE(vlp_plls),
+ clk_data);
+ if (r)
+ goto unregister_muxes;
+
+ r = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
+ if (r)
+ goto unregister_plls;
+
+ platform_set_drvdata(pdev, clk_data);
+
+ /* Initialize APLL tuner registers */
+ regmap_write(regmap, VLP_APLL1_TUNER_CON0, VLP_APLL1_TUNER_CON0_VALUE);
+ regmap_write(regmap, VLP_APLL2_TUNER_CON0, VLP_APLL2_TUNER_CON0_VALUE);
+
+ return r;
+
+unregister_plls:
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+unregister_muxes:
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+unregister_factors:
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+free_clk_data:
+ mtk_free_clk_data(clk_data);
+
+ return r;
+}
+
+static void clk_mt8196_vlp_remove(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *clk_data = platform_get_drvdata(pdev);
+ struct device_node *node = pdev->dev.of_node;
+
+ of_clk_del_provider(node);
+ mtk_clk_unregister_plls(vlp_plls, ARRAY_SIZE(vlp_plls), clk_data);
+ mtk_clk_unregister_muxes(vlp_muxes, ARRAY_SIZE(vlp_muxes), clk_data);
+ mtk_clk_unregister_factors(vlp_divs, ARRAY_SIZE(vlp_divs), clk_data);
+ mtk_free_clk_data(clk_data);
+}
+
+static const struct of_device_id of_match_clk_mt8196_vlp_ck[] = {
+ { .compatible = "mediatek,mt8196-vlpckgen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_match_clk_mt8196_vlp_ck);
+
+static struct platform_driver clk_mt8196_vlp_drv = {
+ .probe = clk_mt8196_vlp_probe,
+ .remove = clk_mt8196_vlp_remove,
+ .driver = {
+ .name = "clk-mt8196-vlpck",
+ .of_match_table = of_match_clk_mt8196_vlp_ck,
+ },
+};
+
+MODULE_DESCRIPTION("MediaTek MT8196 VLP clock generator driver");
+module_platform_driver(clk_mt8196_vlp_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index ba1d1c495bc2..19cd27941747 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -685,4 +685,20 @@ void mtk_clk_simple_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(mtk_clk_simple_remove);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node)
+{
+ struct device_node *hwv_node;
+ struct regmap *regmap_hwv;
+
+ hwv_node = of_parse_phandle(node, "mediatek,hardware-voter", 0);
+ if (!hwv_node)
+ return NULL;
+
+ regmap_hwv = device_node_to_regmap(hwv_node);
+ of_node_put(hwv_node);
+
+ return regmap_hwv;
+}
+EXPORT_SYMBOL_GPL(mtk_clk_get_hwv_regmap);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index c17fe1c2d732..5417b9264e6d 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -20,6 +20,8 @@
#define MHZ (1000 * 1000)
+#define MTK_WAIT_HWV_DONE_US 30
+
struct platform_device;
/*
@@ -173,6 +175,25 @@ struct mtk_composite {
.flags = 0, \
}
+#define MUX_DIV_GATE(_id, _name, _parents, \
+ _mux_reg, _mux_shift, _mux_width, \
+ _div_reg, _div_shift, _div_width, \
+ _gate_reg, _gate_shift) { \
+ .id = _id, \
+ .name = _name, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .mux_reg = _mux_reg, \
+ .mux_shift = _mux_shift, \
+ .mux_width = _mux_width, \
+ .divider_reg = _div_reg, \
+ .divider_shift = _div_shift, \
+ .divider_width = _div_width, \
+ .gate_reg = _gate_reg, \
+ .gate_shift = _gate_shift, \
+ .flags = CLK_SET_RATE_PARENT, \
+ }
+
int mtk_clk_register_composites(struct device *dev,
const struct mtk_composite *mcs, int num,
void __iomem *base, spinlock_t *lock,
@@ -245,5 +266,6 @@ int mtk_clk_pdev_probe(struct platform_device *pdev);
void mtk_clk_pdev_remove(struct platform_device *pdev);
int mtk_clk_simple_probe(struct platform_device *pdev);
void mtk_clk_simple_remove(struct platform_device *pdev);
+struct regmap *mtk_clk_get_hwv_regmap(struct device_node *node);
#endif /* __DRV_CLK_MTK_H */
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 60990296450b..c5af6dc078a3 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -8,6 +8,7 @@
#include <linux/clk-provider.h>
#include <linux/compiler_types.h>
#include <linux/container_of.h>
+#include <linux/dev_printk.h>
#include <linux/err.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -15,11 +16,15 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include "clk-mtk.h"
#include "clk-mux.h"
+#define MTK_WAIT_FENC_DONE_US 30
+
struct mtk_clk_mux {
struct clk_hw hw;
struct regmap *regmap;
+ struct regmap *regmap_hwv;
const struct mtk_mux *data;
spinlock_t *lock;
bool reparent;
@@ -30,6 +35,33 @@ static inline struct mtk_clk_mux *to_mtk_clk_mux(struct clk_hw *hw)
return container_of(hw, struct mtk_clk_mux, hw);
}
+static int mtk_clk_mux_fenc_enable_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ if (mux->lock)
+ spin_lock_irqsave(mux->lock, flags);
+ else
+ __acquire(mux->lock);
+
+ regmap_write(mux->regmap, mux->data->clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ if (mux->lock)
+ spin_unlock_irqrestore(mux->lock, flags);
+ else
+ __release(mux->lock);
+
+ return ret;
+}
+
static int mtk_clk_mux_enable_setclr(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -70,6 +102,16 @@ static void mtk_clk_mux_disable_setclr(struct clk_hw *hw)
BIT(mux->data->gate_shift));
}
+static int mtk_clk_mux_fenc_is_enabled(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_read(mux->regmap, mux->data->fenc_sta_mon_ofs, &val);
+
+ return !!(val & BIT(mux->data->fenc_shift));
+}
+
static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -80,6 +122,41 @@ static int mtk_clk_mux_is_enabled(struct clk_hw *hw)
return (val & BIT(mux->data->gate_shift)) == 0;
}
+static int mtk_clk_mux_hwv_fenc_enable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+ int ret;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_set_ofs,
+ BIT(mux->data->gate_shift));
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, val & BIT(mux->data->gate_shift), 0,
+ MTK_WAIT_HWV_DONE_US);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout_atomic(mux->regmap, mux->data->fenc_sta_mon_ofs,
+ val, val & BIT(mux->data->fenc_shift), 1,
+ MTK_WAIT_FENC_DONE_US);
+
+ return ret;
+}
+
+static void mtk_clk_mux_hwv_disable(struct clk_hw *hw)
+{
+ struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ u32 val;
+
+ regmap_write(mux->regmap_hwv, mux->data->hwv_clr_ofs,
+ BIT(mux->data->gate_shift));
+
+ regmap_read_poll_timeout_atomic(mux->regmap_hwv, mux->data->hwv_sta_ofs,
+ val, (val & BIT(mux->data->gate_shift)),
+ 0, MTK_WAIT_HWV_DONE_US);
+}
+
static u8 mtk_clk_mux_get_parent(struct clk_hw *hw)
{
struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
@@ -146,9 +223,15 @@ static int mtk_clk_mux_set_parent_setclr_lock(struct clk_hw *hw, u8 index)
static int mtk_clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct mtk_clk_mux *mux = to_mtk_clk_mux(hw);
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static bool mtk_clk_mux_uses_hwv(const struct clk_ops *ops)
+{
+ if (ops == &mtk_mux_gate_hwv_fenc_clr_set_upd_ops)
+ return true;
- return clk_mux_determine_rate_flags(hw, req, mux->data->flags);
+ return false;
}
const struct clk_ops mtk_mux_clr_set_upd_ops = {
@@ -168,9 +251,30 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
};
EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
+const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_fenc_enable_setclr,
+ .disable = mtk_clk_mux_disable_setclr,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_fenc_clr_set_upd_ops);
+
+const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops = {
+ .enable = mtk_clk_mux_hwv_fenc_enable,
+ .disable = mtk_clk_mux_hwv_disable,
+ .is_enabled = mtk_clk_mux_fenc_is_enabled,
+ .get_parent = mtk_clk_mux_get_parent,
+ .set_parent = mtk_clk_mux_set_parent_setclr_lock,
+ .determine_rate = mtk_clk_mux_determine_rate,
+};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_hwv_fenc_clr_set_upd_ops);
+
static struct clk_hw *mtk_clk_register_mux(struct device *dev,
const struct mtk_mux *mux,
struct regmap *regmap,
+ struct regmap *regmap_hwv,
spinlock_t *lock)
{
struct mtk_clk_mux *clk_mux;
@@ -186,8 +290,13 @@ static struct clk_hw *mtk_clk_register_mux(struct device *dev,
init.parent_names = mux->parent_names;
init.num_parents = mux->num_parents;
init.ops = mux->ops;
+ if (mtk_clk_mux_uses_hwv(init.ops) && !regmap_hwv)
+ return dev_err_ptr_probe(
+ dev, -ENXIO,
+ "regmap not found for hardware voter clocks\n");
clk_mux->regmap = regmap;
+ clk_mux->regmap_hwv = regmap_hwv;
clk_mux->data = mux;
clk_mux->lock = lock;
clk_mux->hw.init = &init;
@@ -220,6 +329,7 @@ int mtk_clk_register_muxes(struct device *dev,
struct clk_hw_onecell_data *clk_data)
{
struct regmap *regmap;
+ struct regmap *regmap_hwv;
struct clk_hw *hw;
int i;
@@ -229,6 +339,12 @@ int mtk_clk_register_muxes(struct device *dev,
return PTR_ERR(regmap);
}
+ regmap_hwv = mtk_clk_get_hwv_regmap(node);
+ if (IS_ERR(regmap_hwv))
+ return dev_err_probe(
+ dev, PTR_ERR(regmap_hwv),
+ "Cannot find hardware voter regmap for %pOF\n", node);
+
for (i = 0; i < num; i++) {
const struct mtk_mux *mux = &muxes[i];
@@ -238,7 +354,7 @@ int mtk_clk_register_muxes(struct device *dev,
continue;
}
- hw = mtk_clk_register_mux(dev, mux, regmap, lock);
+ hw = mtk_clk_register_mux(dev, mux, regmap, regmap_hwv, lock);
if (IS_ERR(hw)) {
pr_err("Failed to register clk %s: %pe\n", mux->name,
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index 943ad1d7ce4b..151e56dcf884 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -29,10 +29,16 @@ struct mtk_mux {
u32 clr_ofs;
u32 upd_ofs;
+ u32 hwv_set_ofs;
+ u32 hwv_clr_ofs;
+ u32 hwv_sta_ofs;
+ u32 fenc_sta_mon_ofs;
+
u8 mux_shift;
u8 mux_width;
u8 gate_shift;
s8 upd_shift;
+ u8 fenc_shift;
const struct clk_ops *ops;
signed char num_parents;
@@ -77,6 +83,8 @@ struct mtk_mux {
extern const struct clk_ops mtk_mux_clr_set_upd_ops;
extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_fenc_clr_set_upd_ops;
+extern const struct clk_ops mtk_mux_gate_hwv_fenc_clr_set_upd_ops;
#define MUX_GATE_CLR_SET_UPD_FLAGS(_id, _name, _parents, _mux_ofs, \
_mux_set_ofs, _mux_clr_ofs, _shift, _width, \
@@ -118,6 +126,85 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
0, _upd_ofs, _upd, CLK_SET_RATE_PARENT, \
mtk_mux_clr_set_upd_ops)
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .hwv_sta_ofs = _hwv_sta_ofs, \
+ .hwv_set_ofs = _hwv_set_ofs, \
+ .hwv_clr_ofs = _hwv_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .num_parents = ARRAY_SIZE(_parents), \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_hwv_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_HWV_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_HWV_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _hwv_sta_ofs, _hwv_set_ofs, _hwv_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ _num_parents, _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, _flags) { \
+ .id = _id, \
+ .name = _name, \
+ .mux_ofs = _mux_ofs, \
+ .set_ofs = _mux_set_ofs, \
+ .clr_ofs = _mux_clr_ofs, \
+ .upd_ofs = _upd_ofs, \
+ .fenc_sta_mon_ofs = _fenc_sta_mon_ofs, \
+ .mux_shift = _shift, \
+ .mux_width = _width, \
+ .gate_shift = _gate, \
+ .upd_shift = _upd, \
+ .fenc_shift = _fenc, \
+ .parent_names = _parents, \
+ .parent_index = _paridx, \
+ .num_parents = _num_parents, \
+ .flags = _flags, \
+ .ops = &mtk_mux_gate_fenc_clr_set_upd_ops, \
+ }
+
+#define MUX_GATE_FENC_CLR_SET_UPD(_id, _name, _parents, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, \
+ NULL, ARRAY_SIZE(_parents), _mux_ofs, \
+ _mux_set_ofs, _mux_clr_ofs, _shift, \
+ _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
+#define MUX_GATE_FENC_CLR_SET_UPD_INDEXED(_id, _name, _parents, _paridx, \
+ _mux_ofs, _mux_set_ofs, _mux_clr_ofs, \
+ _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc) \
+ MUX_GATE_FENC_CLR_SET_UPD_FLAGS(_id, _name, _parents, _paridx, \
+ ARRAY_SIZE(_paridx), _mux_ofs, _mux_set_ofs, \
+ _mux_clr_ofs, _shift, _width, _gate, _upd_ofs, _upd, \
+ _fenc_sta_mon_ofs, _fenc, 0)
+
int mtk_clk_register_muxes(struct device *dev,
const struct mtk_mux *muxes,
int num, struct device_node *node,
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index ce453e1718e5..cd2b6ce551c6 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -37,6 +37,13 @@ int mtk_pll_is_prepared(struct clk_hw *hw)
return (readl(pll->en_addr) & BIT(pll->data->pll_en_bit)) != 0;
}
+static int mtk_pll_fenc_is_prepared(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ return !!(readl(pll->fenc_addr) & BIT(pll->data->fenc_sta_bit));
+}
+
static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin,
u32 pcw, int postdiv)
{
@@ -200,16 +207,19 @@ unsigned long mtk_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv);
}
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
u32 pcw = 0;
int postdiv;
- mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate);
+ mtk_pll_calc_values(pll, &pcw, &postdiv, req->rate,
+ req->best_parent_rate);
+
+ req->rate = __mtk_pll_recalc_rate(pll, req->best_parent_rate, pcw,
+ postdiv);
- return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv);
+ return 0;
}
int mtk_pll_prepare(struct clk_hw *hw)
@@ -274,14 +284,43 @@ void mtk_pll_unprepare(struct clk_hw *hw)
writel(r, pll->pwr_addr);
}
+static int mtk_pll_prepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_set_addr);
+
+ /* Wait 20us after enable for the PLL to stabilize */
+ udelay(20);
+
+ return 0;
+}
+
+static void mtk_pll_unprepare_setclr(struct clk_hw *hw)
+{
+ struct mtk_clk_pll *pll = to_mtk_clk_pll(hw);
+
+ writel(BIT(pll->data->pll_en_bit), pll->en_clr_addr);
+}
+
const struct clk_ops mtk_pll_ops = {
.is_prepared = mtk_pll_is_prepared,
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
+ .set_rate = mtk_pll_set_rate,
+};
+
+const struct clk_ops mtk_pll_fenc_clr_set_ops = {
+ .is_prepared = mtk_pll_fenc_is_prepared,
+ .prepare = mtk_pll_prepare_setclr,
+ .unprepare = mtk_pll_unprepare_setclr,
+ .recalc_rate = mtk_pll_recalc_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_pll_set_rate,
};
+EXPORT_SYMBOL_GPL(mtk_pll_fenc_clr_set_ops);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
@@ -308,9 +347,15 @@ struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
pll->en_addr = base + data->en_reg;
else
pll->en_addr = pll->base_addr + REG_CON0;
+ if (data->en_set_reg)
+ pll->en_set_addr = base + data->en_set_reg;
+ if (data->en_clr_reg)
+ pll->en_clr_addr = base + data->en_clr_reg;
pll->hw.init = &init;
pll->data = data;
+ pll->fenc_addr = base + data->fenc_sta_ofs;
+
init.name = data->name;
init.flags = (data->flags & PLL_AO) ? CLK_IS_CRITICAL : 0;
init.ops = pll_ops;
@@ -333,12 +378,13 @@ struct clk_hw *mtk_clk_register_pll(const struct mtk_pll_data *data,
{
struct mtk_clk_pll *pll;
struct clk_hw *hw;
+ const struct clk_ops *pll_ops = data->ops ? data->ops : &mtk_pll_ops;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
- hw = mtk_clk_register_pll_ops(pll, data, base, &mtk_pll_ops);
+ hw = mtk_clk_register_pll_ops(pll, data, base, pll_ops);
if (IS_ERR(hw))
kfree(pll);
diff --git a/drivers/clk/mediatek/clk-pll.h b/drivers/clk/mediatek/clk-pll.h
index 285c8db958b3..d71c150ce83e 100644
--- a/drivers/clk/mediatek/clk-pll.h
+++ b/drivers/clk/mediatek/clk-pll.h
@@ -29,6 +29,7 @@ struct mtk_pll_data {
u32 reg;
u32 pwr_reg;
u32 en_mask;
+ u32 fenc_sta_ofs;
u32 pd_reg;
u32 tuner_reg;
u32 tuner_en_reg;
@@ -47,8 +48,11 @@ struct mtk_pll_data {
const struct mtk_pll_div_table *div_table;
const char *parent_name;
u32 en_reg;
+ u32 en_set_reg;
+ u32 en_clr_reg;
u8 pll_en_bit; /* Assume 0, indicates BIT(0) by default */
u8 pcw_chg_bit;
+ u8 fenc_sta_bit;
};
/*
@@ -68,6 +72,9 @@ struct mtk_clk_pll {
void __iomem *pcw_addr;
void __iomem *pcw_chg_addr;
void __iomem *en_addr;
+ void __iomem *en_set_addr;
+ void __iomem *en_clr_addr;
+ void __iomem *fenc_addr;
const struct mtk_pll_data *data;
};
@@ -78,6 +85,7 @@ void mtk_clk_unregister_plls(const struct mtk_pll_data *plls, int num_plls,
struct clk_hw_onecell_data *clk_data);
extern const struct clk_ops mtk_pll_ops;
+extern const struct clk_ops mtk_pll_fenc_clr_set_ops;
static inline struct mtk_clk_pll *to_mtk_clk_pll(struct clk_hw *hw)
{
@@ -96,8 +104,7 @@ void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv,
u32 freq, u32 fin);
int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
-long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate);
+int mtk_pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
struct clk_hw *mtk_clk_register_pll_ops(struct mtk_clk_pll *pll,
const struct mtk_pll_data *data,
diff --git a/drivers/clk/mediatek/clk-pllfh.c b/drivers/clk/mediatek/clk-pllfh.c
index 094ec8a26d66..83630ee07ee9 100644
--- a/drivers/clk/mediatek/clk-pllfh.c
+++ b/drivers/clk/mediatek/clk-pllfh.c
@@ -42,7 +42,7 @@ static const struct clk_ops mtk_pllfh_ops = {
.prepare = mtk_pll_prepare,
.unprepare = mtk_pll_unprepare,
.recalc_rate = mtk_pll_recalc_rate,
- .round_rate = mtk_pll_round_rate,
+ .determine_rate = mtk_pll_determine_rate,
.set_rate = mtk_fhctl_set_rate,
};
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 7197d23543b8..71481607a6d5 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -36,6 +36,8 @@ config COMMON_CLK_MESON_VCLK
select COMMON_CLK_MESON_REGMAP
config COMMON_CLK_MESON_CLKC_UTILS
+ select REGMAP
+ select MFD_SYSCON
tristate
config COMMON_CLK_MESON_AO_CLKC
@@ -44,11 +46,6 @@ config COMMON_CLK_MESON_AO_CLKC
select COMMON_CLK_MESON_CLKC_UTILS
select RESET_CONTROLLER
-config COMMON_CLK_MESON_EE_CLKC
- tristate
- select COMMON_CLK_MESON_REGMAP
- select COMMON_CLK_MESON_CLKC_UTILS
-
config COMMON_CLK_MESON_CPU_DYNDIV
tristate
select COMMON_CLK_MESON_REGMAP
@@ -73,12 +70,12 @@ config COMMON_CLK_GXBB
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_VID_PLL_DIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic S905 devices, aka gxbb.
@@ -89,11 +86,11 @@ config COMMON_CLK_AXG
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select MFD_SYSCON
help
Support for the clock controller on AmLogic A113D devices, aka axg.
@@ -167,11 +164,11 @@ config COMMON_CLK_G12A
depends on ARM64
default ARCH_MESON
select COMMON_CLK_MESON_REGMAP
+ select COMMON_CLK_MESON_CLKC_UTILS
select COMMON_CLK_MESON_DUALDIV
select COMMON_CLK_MESON_MPLL
select COMMON_CLK_MESON_PLL
select COMMON_CLK_MESON_AO_CLKC
- select COMMON_CLK_MESON_EE_CLKC
select COMMON_CLK_MESON_CPU_DYNDIV
select COMMON_CLK_MESON_VID_PLL_DIV
select COMMON_CLK_MESON_VCLK
diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile
index bc56a47931c1..c6998e752c68 100644
--- a/drivers/clk/meson/Makefile
+++ b/drivers/clk/meson/Makefile
@@ -5,7 +5,6 @@ obj-$(CONFIG_COMMON_CLK_MESON_CLKC_UTILS) += meson-clkc-utils.o
obj-$(CONFIG_COMMON_CLK_MESON_AO_CLKC) += meson-aoclk.o
obj-$(CONFIG_COMMON_CLK_MESON_CPU_DYNDIV) += clk-cpu-dyndiv.o
obj-$(CONFIG_COMMON_CLK_MESON_DUALDIV) += clk-dualdiv.o
-obj-$(CONFIG_COMMON_CLK_MESON_EE_CLKC) += meson-eeclk.o
obj-$(CONFIG_COMMON_CLK_MESON_MPLL) += clk-mpll.o
obj-$(CONFIG_COMMON_CLK_MESON_PHASE) += clk-phase.o
obj-$(CONFIG_COMMON_CLK_MESON_PLL) += clk-pll.o
diff --git a/drivers/clk/meson/a1-peripherals.c b/drivers/clk/meson/a1-peripherals.c
index 1f5d445d44fe..5e0d58c01405 100644
--- a/drivers/clk/meson/a1-peripherals.c
+++ b/drivers/clk/meson/a1-peripherals.c
@@ -46,7 +46,7 @@
#define PSRAM_CLK_CTRL 0xf4
#define DMC_CLK_CTRL 0xf8
-static struct clk_regmap xtal_in = {
+static struct clk_regmap a1_xtal_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 0,
@@ -61,7 +61,7 @@ static struct clk_regmap xtal_in = {
},
};
-static struct clk_regmap fixpll_in = {
+static struct clk_regmap a1_fixpll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 1,
@@ -76,7 +76,7 @@ static struct clk_regmap fixpll_in = {
},
};
-static struct clk_regmap usb_phy_in = {
+static struct clk_regmap a1_usb_phy_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 2,
@@ -91,7 +91,7 @@ static struct clk_regmap usb_phy_in = {
},
};
-static struct clk_regmap usb_ctrl_in = {
+static struct clk_regmap a1_usb_ctrl_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 3,
@@ -106,7 +106,7 @@ static struct clk_regmap usb_ctrl_in = {
},
};
-static struct clk_regmap hifipll_in = {
+static struct clk_regmap a1_hifipll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 4,
@@ -121,7 +121,7 @@ static struct clk_regmap hifipll_in = {
},
};
-static struct clk_regmap syspll_in = {
+static struct clk_regmap a1_syspll_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 5,
@@ -136,7 +136,7 @@ static struct clk_regmap syspll_in = {
},
};
-static struct clk_regmap dds_in = {
+static struct clk_regmap a1_dds_in = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_OSCIN_CTRL,
.bit_idx = 6,
@@ -151,7 +151,7 @@ static struct clk_regmap dds_in = {
},
};
-static struct clk_regmap rtc_32k_in = {
+static struct clk_regmap a1_rtc_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 31,
@@ -166,7 +166,7 @@ static struct clk_regmap rtc_32k_in = {
},
};
-static const struct meson_clk_dualdiv_param clk_32k_div_table[] = {
+static const struct meson_clk_dualdiv_param a1_32k_div_table[] = {
{
.dual = 1,
.n1 = 733,
@@ -177,7 +177,7 @@ static const struct meson_clk_dualdiv_param clk_32k_div_table[] = {
{}
};
-static struct clk_regmap rtc_32k_div = {
+static struct clk_regmap a1_rtc_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = RTC_BY_OSCIN_CTRL0,
@@ -204,19 +204,19 @@ static struct clk_regmap rtc_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "rtc_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_in.hw
+ &a1_rtc_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap rtc_32k_xtal = {
+static struct clk_regmap a1_rtc_32k_xtal = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL1,
.bit_idx = 24,
@@ -225,13 +225,13 @@ static struct clk_regmap rtc_32k_xtal = {
.name = "rtc_32k_xtal",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_in.hw
+ &a1_rtc_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap rtc_32k_sel = {
+static struct clk_regmap a1_rtc_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_CTRL,
.mask = 0x3,
@@ -242,15 +242,15 @@ static struct clk_regmap rtc_32k_sel = {
.name = "rtc_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_xtal.hw,
- &rtc_32k_div.hw,
+ &a1_rtc_32k_xtal.hw,
+ &a1_rtc_32k_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap rtc = {
+static struct clk_regmap a1_rtc = {
.data = &(struct clk_regmap_gate_data){
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 30,
@@ -259,38 +259,38 @@ static struct clk_regmap rtc = {
.name = "rtc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_sel.hw
+ &a1_rtc_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_sys[] = { 0, 1, 2, 3, 7 };
-static const struct clk_parent_data sys_parents[] = {
+static u32 a1_sys_parents_val_table[] = { 0, 1, 2, 3, 7 };
+static const struct clk_parent_data a1_sys_parents[] = {
{ .fw_name = "xtal" },
{ .fw_name = "fclk_div2" },
{ .fw_name = "fclk_div3" },
{ .fw_name = "fclk_div5" },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap sys_b_sel = {
+static struct clk_regmap a1_sys_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_sys,
+ .table = a1_sys_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sys_b_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_parents,
- .num_parents = ARRAY_SIZE(sys_parents),
+ .parent_data = a1_sys_parents,
+ .num_parents = ARRAY_SIZE(a1_sys_parents),
},
};
-static struct clk_regmap sys_b_div = {
+static struct clk_regmap a1_sys_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = SYS_CLK_CTRL0,
.shift = 16,
@@ -300,14 +300,14 @@ static struct clk_regmap sys_b_div = {
.name = "sys_b_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_b_sel.hw
+ &a1_sys_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_b = {
+static struct clk_regmap a1_sys_b = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_CLK_CTRL0,
.bit_idx = 29,
@@ -316,29 +316,29 @@ static struct clk_regmap sys_b = {
.name = "sys_b",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_b_div.hw
+ &a1_sys_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_a_sel = {
+static struct clk_regmap a1_sys_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_sys,
+ .table = a1_sys_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sys_a_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_parents,
- .num_parents = ARRAY_SIZE(sys_parents),
+ .parent_data = a1_sys_parents,
+ .num_parents = ARRAY_SIZE(a1_sys_parents),
},
};
-static struct clk_regmap sys_a_div = {
+static struct clk_regmap a1_sys_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = SYS_CLK_CTRL0,
.shift = 0,
@@ -348,14 +348,14 @@ static struct clk_regmap sys_a_div = {
.name = "sys_a_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a_sel.hw
+ &a1_sys_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys_a = {
+static struct clk_regmap a1_sys_a = {
.data = &(struct clk_regmap_gate_data){
.offset = SYS_CLK_CTRL0,
.bit_idx = 13,
@@ -364,14 +364,14 @@ static struct clk_regmap sys_a = {
.name = "sys_a",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a_div.hw
+ &a1_sys_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sys = {
+static struct clk_regmap a1_sys = {
.data = &(struct clk_regmap_mux_data){
.offset = SYS_CLK_CTRL0,
.mask = 0x1,
@@ -381,8 +381,8 @@ static struct clk_regmap sys = {
.name = "sys",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sys_a.hw,
- &sys_b.hw,
+ &a1_sys_a.hw,
+ &a1_sys_b.hw,
},
.num_parents = 2,
/*
@@ -398,32 +398,32 @@ static struct clk_regmap sys = {
},
};
-static u32 mux_table_dsp_ab[] = { 0, 1, 2, 3, 4, 7 };
-static const struct clk_parent_data dsp_ab_parent_data[] = {
+static u32 a1_dsp_parents_val_table[] = { 0, 1, 2, 3, 4, 7 };
+static const struct clk_parent_data a1_dsp_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "hifi_pll", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap dspa_a_sel = {
+static struct clk_regmap a1_dspa_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspa_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspa_a_div = {
+static struct clk_regmap a1_dspa_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPA_CLK_CTRL0,
.shift = 0,
@@ -433,14 +433,14 @@ static struct clk_regmap dspa_a_div = {
.name = "dspa_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a_sel.hw
+ &a1_dspa_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_a = {
+static struct clk_regmap a1_dspa_a = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_CTRL0,
.bit_idx = 13,
@@ -449,29 +449,29 @@ static struct clk_regmap dspa_a = {
.name = "dspa_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a_div.hw
+ &a1_dspa_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_b_sel = {
+static struct clk_regmap a1_dspa_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspa_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspa_b_div = {
+static struct clk_regmap a1_dspa_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPA_CLK_CTRL0,
.shift = 16,
@@ -481,14 +481,14 @@ static struct clk_regmap dspa_b_div = {
.name = "dspa_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_b_sel.hw
+ &a1_dspa_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_b = {
+static struct clk_regmap a1_dspa_b = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_CTRL0,
.bit_idx = 29,
@@ -497,14 +497,14 @@ static struct clk_regmap dspa_b = {
.name = "dspa_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_b_div.hw
+ &a1_dspa_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_sel = {
+static struct clk_regmap a1_dspa_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPA_CLK_CTRL0,
.mask = 0x1,
@@ -514,15 +514,15 @@ static struct clk_regmap dspa_sel = {
.name = "dspa_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_a.hw,
- &dspa_b.hw,
+ &a1_dspa_a.hw,
+ &a1_dspa_b.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_en = {
+static struct clk_regmap a1_dspa_en = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_EN,
.bit_idx = 1,
@@ -531,14 +531,14 @@ static struct clk_regmap dspa_en = {
.name = "dspa_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_sel.hw
+ &a1_dspa_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspa_en_nic = {
+static struct clk_regmap a1_dspa_en_nic = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPA_CLK_EN,
.bit_idx = 0,
@@ -547,29 +547,29 @@ static struct clk_regmap dspa_en_nic = {
.name = "dspa_en_nic",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspa_sel.hw
+ &a1_dspa_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_a_sel = {
+static struct clk_regmap a1_dspb_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspb_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspb_a_div = {
+static struct clk_regmap a1_dspb_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPB_CLK_CTRL0,
.shift = 0,
@@ -579,14 +579,14 @@ static struct clk_regmap dspb_a_div = {
.name = "dspb_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a_sel.hw
+ &a1_dspb_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_a = {
+static struct clk_regmap a1_dspb_a = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_CTRL0,
.bit_idx = 13,
@@ -595,29 +595,29 @@ static struct clk_regmap dspb_a = {
.name = "dspb_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a_div.hw
+ &a1_dspb_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_b_sel = {
+static struct clk_regmap a1_dspb_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_dsp_ab,
+ .table = a1_dsp_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "dspb_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = dsp_ab_parent_data,
- .num_parents = ARRAY_SIZE(dsp_ab_parent_data),
+ .parent_data = a1_dsp_parents,
+ .num_parents = ARRAY_SIZE(a1_dsp_parents),
},
};
-static struct clk_regmap dspb_b_div = {
+static struct clk_regmap a1_dspb_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = DSPB_CLK_CTRL0,
.shift = 16,
@@ -627,14 +627,14 @@ static struct clk_regmap dspb_b_div = {
.name = "dspb_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_b_sel.hw
+ &a1_dspb_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_b = {
+static struct clk_regmap a1_dspb_b = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_CTRL0,
.bit_idx = 29,
@@ -643,14 +643,14 @@ static struct clk_regmap dspb_b = {
.name = "dspb_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_b_div.hw
+ &a1_dspb_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_sel = {
+static struct clk_regmap a1_dspb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DSPB_CLK_CTRL0,
.mask = 0x1,
@@ -660,15 +660,15 @@ static struct clk_regmap dspb_sel = {
.name = "dspb_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_a.hw,
- &dspb_b.hw,
+ &a1_dspb_a.hw,
+ &a1_dspb_b.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_en = {
+static struct clk_regmap a1_dspb_en = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_EN,
.bit_idx = 1,
@@ -677,14 +677,14 @@ static struct clk_regmap dspb_en = {
.name = "dspb_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_sel.hw
+ &a1_dspb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dspb_en_nic = {
+static struct clk_regmap a1_dspb_en_nic = {
.data = &(struct clk_regmap_gate_data){
.offset = DSPB_CLK_EN,
.bit_idx = 0,
@@ -693,14 +693,14 @@ static struct clk_regmap dspb_en_nic = {
.name = "dspb_en_nic",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dspb_sel.hw
+ &a1_dspb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap clk_24m = {
+static struct clk_regmap a1_24m = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 11,
@@ -715,20 +715,20 @@ static struct clk_regmap clk_24m = {
},
};
-static struct clk_fixed_factor clk_24m_div2 = {
+static struct clk_fixed_factor a1_24m_div2 = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "24m_div2",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_24m.hw
+ &a1_24m.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap clk_12m = {
+static struct clk_regmap a1_12m = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 10,
@@ -737,13 +737,13 @@ static struct clk_regmap clk_12m = {
.name = "12m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_24m_div2.hw
+ &a1_24m_div2.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div2_divn_pre = {
+static struct clk_regmap a1_fclk_div2_divn_pre = {
.data = &(struct clk_regmap_div_data){
.offset = CLK12_24_CTRL,
.shift = 0,
@@ -759,7 +759,7 @@ static struct clk_regmap fclk_div2_divn_pre = {
},
};
-static struct clk_regmap fclk_div2_divn = {
+static struct clk_regmap a1_fclk_div2_divn = {
.data = &(struct clk_regmap_gate_data){
.offset = CLK12_24_CTRL,
.bit_idx = 12,
@@ -768,7 +768,7 @@ static struct clk_regmap fclk_div2_divn = {
.name = "fclk_div2_divn",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_divn_pre.hw
+ &a1_fclk_div2_divn_pre.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -779,10 +779,10 @@ static struct clk_regmap fclk_div2_divn = {
* the index 2 is sys_pll_div16, it will be implemented in the CPU clock driver,
* the index 4 is the clock measurement source, it's not supported yet
*/
-static u32 gen_table[] = { 0, 1, 3, 5, 6, 7, 8 };
-static const struct clk_parent_data gen_parent_data[] = {
+static u32 a1_gen_parents_val_table[] = { 0, 1, 3, 5, 6, 7, 8 };
+static const struct clk_parent_data a1_gen_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
{ .fw_name = "hifi_pll", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
@@ -790,18 +790,18 @@ static const struct clk_parent_data gen_parent_data[] = {
{ .fw_name = "fclk_div7", },
};
-static struct clk_regmap gen_sel = {
+static struct clk_regmap a1_gen_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = GEN_CLK_CTRL,
.mask = 0xf,
.shift = 12,
- .table = gen_table,
+ .table = a1_gen_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gen_parent_data,
- .num_parents = ARRAY_SIZE(gen_parent_data),
+ .parent_data = a1_gen_parents,
+ .num_parents = ARRAY_SIZE(a1_gen_parents),
/*
* The GEN clock can be connected to an external pad, so it
* may be set up directly from the device tree. Additionally,
@@ -813,7 +813,7 @@ static struct clk_regmap gen_sel = {
},
};
-static struct clk_regmap gen_div = {
+static struct clk_regmap a1_gen_div = {
.data = &(struct clk_regmap_div_data){
.offset = GEN_CLK_CTRL,
.shift = 0,
@@ -823,14 +823,14 @@ static struct clk_regmap gen_div = {
.name = "gen_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_sel.hw
+ &a1_gen_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gen = {
+static struct clk_regmap a1_gen = {
.data = &(struct clk_regmap_gate_data){
.offset = GEN_CLK_CTRL,
.bit_idx = 11,
@@ -839,14 +839,14 @@ static struct clk_regmap gen = {
.name = "gen",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_div.hw
+ &a1_gen_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap saradc_sel = {
+static struct clk_regmap a1_saradc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SAR_ADC_CLK_CTRL,
.mask = 0x1,
@@ -857,13 +857,13 @@ static struct clk_regmap saradc_sel = {
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &sys.hw, },
+ { .hw = &a1_sys.hw, },
},
.num_parents = 2,
},
};
-static struct clk_regmap saradc_div = {
+static struct clk_regmap a1_saradc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SAR_ADC_CLK_CTRL,
.shift = 0,
@@ -873,14 +873,14 @@ static struct clk_regmap saradc_div = {
.name = "saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &saradc_sel.hw
+ &a1_saradc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap saradc = {
+static struct clk_regmap a1_saradc = {
.data = &(struct clk_regmap_gate_data){
.offset = SAR_ADC_CLK_CTRL,
.bit_idx = 8,
@@ -889,20 +889,20 @@ static struct clk_regmap saradc = {
.name = "saradc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &saradc_div.hw
+ &a1_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data pwm_abcd_parents[] = {
+static const struct clk_parent_data a1_pwm_abcd_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
- { .hw = &rtc.hw },
+ { .hw = &a1_sys.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap pwm_a_sel = {
+static struct clk_regmap a1_pwm_a_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_AB_CTRL,
.mask = 0x1,
@@ -911,12 +911,12 @@ static struct clk_regmap pwm_a_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_a_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_a_div = {
+static struct clk_regmap a1_pwm_a_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_AB_CTRL,
.shift = 0,
@@ -926,14 +926,14 @@ static struct clk_regmap pwm_a_div = {
.name = "pwm_a_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_a_sel.hw
+ &a1_pwm_a_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_a = {
+static struct clk_regmap a1_pwm_a = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_AB_CTRL,
.bit_idx = 8,
@@ -942,14 +942,14 @@ static struct clk_regmap pwm_a = {
.name = "pwm_a",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_a_div.hw
+ &a1_pwm_a_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_b_sel = {
+static struct clk_regmap a1_pwm_b_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_AB_CTRL,
.mask = 0x1,
@@ -958,12 +958,12 @@ static struct clk_regmap pwm_b_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_b_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_b_div = {
+static struct clk_regmap a1_pwm_b_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_AB_CTRL,
.shift = 16,
@@ -973,14 +973,14 @@ static struct clk_regmap pwm_b_div = {
.name = "pwm_b_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_b_sel.hw
+ &a1_pwm_b_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_b = {
+static struct clk_regmap a1_pwm_b = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_AB_CTRL,
.bit_idx = 24,
@@ -989,14 +989,14 @@ static struct clk_regmap pwm_b = {
.name = "pwm_b",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_b_div.hw
+ &a1_pwm_b_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_c_sel = {
+static struct clk_regmap a1_pwm_c_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_CD_CTRL,
.mask = 0x1,
@@ -1005,12 +1005,12 @@ static struct clk_regmap pwm_c_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_c_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_c_div = {
+static struct clk_regmap a1_pwm_c_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_CD_CTRL,
.shift = 0,
@@ -1020,14 +1020,14 @@ static struct clk_regmap pwm_c_div = {
.name = "pwm_c_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_c_sel.hw
+ &a1_pwm_c_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_c = {
+static struct clk_regmap a1_pwm_c = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_CD_CTRL,
.bit_idx = 8,
@@ -1036,14 +1036,14 @@ static struct clk_regmap pwm_c = {
.name = "pwm_c",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_c_div.hw
+ &a1_pwm_c_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_d_sel = {
+static struct clk_regmap a1_pwm_d_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_CD_CTRL,
.mask = 0x1,
@@ -1052,12 +1052,12 @@ static struct clk_regmap pwm_d_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_d_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_abcd_parents,
- .num_parents = ARRAY_SIZE(pwm_abcd_parents),
+ .parent_data = a1_pwm_abcd_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_abcd_parents),
},
};
-static struct clk_regmap pwm_d_div = {
+static struct clk_regmap a1_pwm_d_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_CD_CTRL,
.shift = 16,
@@ -1067,14 +1067,14 @@ static struct clk_regmap pwm_d_div = {
.name = "pwm_d_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_d_sel.hw
+ &a1_pwm_d_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_d = {
+static struct clk_regmap a1_pwm_d = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_CD_CTRL,
.bit_idx = 24,
@@ -1083,21 +1083,21 @@ static struct clk_regmap pwm_d = {
.name = "pwm_d",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_d_div.hw
+ &a1_pwm_d_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data pwm_ef_parents[] = {
+static const struct clk_parent_data a1_pwm_ef_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
+ { .hw = &a1_sys.hw },
{ .fw_name = "fclk_div5", },
- { .hw = &rtc.hw },
+ { .hw = &a1_rtc.hw },
};
-static struct clk_regmap pwm_e_sel = {
+static struct clk_regmap a1_pwm_e_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_EF_CTRL,
.mask = 0x3,
@@ -1106,12 +1106,12 @@ static struct clk_regmap pwm_e_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_e_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_ef_parents,
- .num_parents = ARRAY_SIZE(pwm_ef_parents),
+ .parent_data = a1_pwm_ef_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_ef_parents),
},
};
-static struct clk_regmap pwm_e_div = {
+static struct clk_regmap a1_pwm_e_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_EF_CTRL,
.shift = 0,
@@ -1121,14 +1121,14 @@ static struct clk_regmap pwm_e_div = {
.name = "pwm_e_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_e_sel.hw
+ &a1_pwm_e_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_e = {
+static struct clk_regmap a1_pwm_e = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_EF_CTRL,
.bit_idx = 8,
@@ -1137,14 +1137,14 @@ static struct clk_regmap pwm_e = {
.name = "pwm_e",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_e_div.hw
+ &a1_pwm_e_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_f_sel = {
+static struct clk_regmap a1_pwm_f_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PWM_CLK_EF_CTRL,
.mask = 0x3,
@@ -1153,12 +1153,12 @@ static struct clk_regmap pwm_f_sel = {
.hw.init = &(struct clk_init_data){
.name = "pwm_f_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = pwm_ef_parents,
- .num_parents = ARRAY_SIZE(pwm_ef_parents),
+ .parent_data = a1_pwm_ef_parents,
+ .num_parents = ARRAY_SIZE(a1_pwm_ef_parents),
},
};
-static struct clk_regmap pwm_f_div = {
+static struct clk_regmap a1_pwm_f_div = {
.data = &(struct clk_regmap_div_data){
.offset = PWM_CLK_EF_CTRL,
.shift = 16,
@@ -1168,14 +1168,14 @@ static struct clk_regmap pwm_f_div = {
.name = "pwm_f_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_f_sel.hw
+ &a1_pwm_f_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap pwm_f = {
+static struct clk_regmap a1_pwm_f = {
.data = &(struct clk_regmap_gate_data){
.offset = PWM_CLK_EF_CTRL,
.bit_idx = 24,
@@ -1184,7 +1184,7 @@ static struct clk_regmap pwm_f = {
.name = "pwm_f",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &pwm_f_div.hw
+ &a1_pwm_f_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1200,14 +1200,14 @@ static struct clk_regmap pwm_f = {
* --------------------|/
* 24M
*/
-static const struct clk_parent_data spicc_spifc_parents[] = {
+static const struct clk_parent_data a1_spi_parents[] = {
{ .fw_name = "fclk_div2"},
{ .fw_name = "fclk_div3"},
{ .fw_name = "fclk_div5"},
{ .fw_name = "hifi_pll" },
};
-static struct clk_regmap spicc_sel = {
+static struct clk_regmap a1_spicc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SPICC_CLK_CTRL,
.mask = 0x3,
@@ -1216,12 +1216,12 @@ static struct clk_regmap spicc_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_spifc_parents,
- .num_parents = ARRAY_SIZE(spicc_spifc_parents),
+ .parent_data = a1_spi_parents,
+ .num_parents = ARRAY_SIZE(a1_spi_parents),
},
};
-static struct clk_regmap spicc_div = {
+static struct clk_regmap a1_spicc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SPICC_CLK_CTRL,
.shift = 0,
@@ -1231,14 +1231,14 @@ static struct clk_regmap spicc_div = {
.name = "spicc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spicc_sel.hw
+ &a1_spicc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spicc_sel2 = {
+static struct clk_regmap a1_spicc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SPICC_CLK_CTRL,
.mask = 0x1,
@@ -1248,7 +1248,7 @@ static struct clk_regmap spicc_sel2 = {
.name = "spicc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &spicc_div.hw },
+ { .hw = &a1_spicc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1256,7 +1256,7 @@ static struct clk_regmap spicc_sel2 = {
},
};
-static struct clk_regmap spicc = {
+static struct clk_regmap a1_spicc = {
.data = &(struct clk_regmap_gate_data){
.offset = SPICC_CLK_CTRL,
.bit_idx = 8,
@@ -1265,14 +1265,14 @@ static struct clk_regmap spicc = {
.name = "spicc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spicc_sel2.hw
+ &a1_spicc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ts_div = {
+static struct clk_regmap a1_ts_div = {
.data = &(struct clk_regmap_div_data){
.offset = TS_CLK_CTRL,
.shift = 0,
@@ -1288,7 +1288,7 @@ static struct clk_regmap ts_div = {
},
};
-static struct clk_regmap ts = {
+static struct clk_regmap a1_ts = {
.data = &(struct clk_regmap_gate_data){
.offset = TS_CLK_CTRL,
.bit_idx = 8,
@@ -1297,14 +1297,14 @@ static struct clk_regmap ts = {
.name = "ts",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ts_div.hw
+ &a1_ts_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spifc_sel = {
+static struct clk_regmap a1_spifc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SPIFC_CLK_CTRL,
.mask = 0x3,
@@ -1313,12 +1313,12 @@ static struct clk_regmap spifc_sel = {
.hw.init = &(struct clk_init_data){
.name = "spifc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_spifc_parents,
- .num_parents = ARRAY_SIZE(spicc_spifc_parents),
+ .parent_data = a1_spi_parents,
+ .num_parents = ARRAY_SIZE(a1_spi_parents),
},
};
-static struct clk_regmap spifc_div = {
+static struct clk_regmap a1_spifc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SPIFC_CLK_CTRL,
.shift = 0,
@@ -1328,14 +1328,14 @@ static struct clk_regmap spifc_div = {
.name = "spifc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spifc_sel.hw
+ &a1_spifc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap spifc_sel2 = {
+static struct clk_regmap a1_spifc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SPIFC_CLK_CTRL,
.mask = 0x1,
@@ -1345,7 +1345,7 @@ static struct clk_regmap spifc_sel2 = {
.name = "spifc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &spifc_div.hw },
+ { .hw = &a1_spifc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1353,7 +1353,7 @@ static struct clk_regmap spifc_sel2 = {
},
};
-static struct clk_regmap spifc = {
+static struct clk_regmap a1_spifc = {
.data = &(struct clk_regmap_gate_data){
.offset = SPIFC_CLK_CTRL,
.bit_idx = 8,
@@ -1362,21 +1362,21 @@ static struct clk_regmap spifc = {
.name = "spifc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &spifc_sel2.hw
+ &a1_spifc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data usb_bus_parents[] = {
+static const struct clk_parent_data a1_usb_bus_parents[] = {
{ .fw_name = "xtal", },
- { .hw = &sys.hw },
+ { .hw = &a1_sys.hw },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap usb_bus_sel = {
+static struct clk_regmap a1_usb_bus_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = USB_BUSCLK_CTRL,
.mask = 0x3,
@@ -1385,13 +1385,13 @@ static struct clk_regmap usb_bus_sel = {
.hw.init = &(struct clk_init_data){
.name = "usb_bus_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = usb_bus_parents,
- .num_parents = ARRAY_SIZE(usb_bus_parents),
+ .parent_data = a1_usb_bus_parents,
+ .num_parents = ARRAY_SIZE(a1_usb_bus_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap usb_bus_div = {
+static struct clk_regmap a1_usb_bus_div = {
.data = &(struct clk_regmap_div_data){
.offset = USB_BUSCLK_CTRL,
.shift = 0,
@@ -1401,14 +1401,14 @@ static struct clk_regmap usb_bus_div = {
.name = "usb_bus_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &usb_bus_sel.hw
+ &a1_usb_bus_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap usb_bus = {
+static struct clk_regmap a1_usb_bus = {
.data = &(struct clk_regmap_gate_data){
.offset = USB_BUSCLK_CTRL,
.bit_idx = 8,
@@ -1417,21 +1417,21 @@ static struct clk_regmap usb_bus = {
.name = "usb_bus",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &usb_bus_div.hw
+ &a1_usb_bus_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data sd_emmc_psram_dmc_parents[] = {
+static const struct clk_parent_data a1_sd_emmc_parents[] = {
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "hifi_pll", },
};
-static struct clk_regmap sd_emmc_sel = {
+static struct clk_regmap a1_sd_emmc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = SD_EMMC_CLK_CTRL,
.mask = 0x3,
@@ -1440,12 +1440,12 @@ static struct clk_regmap sd_emmc_sel = {
.hw.init = &(struct clk_init_data){
.name = "sd_emmc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap sd_emmc_div = {
+static struct clk_regmap a1_sd_emmc_div = {
.data = &(struct clk_regmap_div_data){
.offset = SD_EMMC_CLK_CTRL,
.shift = 0,
@@ -1455,14 +1455,14 @@ static struct clk_regmap sd_emmc_div = {
.name = "sd_emmc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_sel.hw
+ &a1_sd_emmc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap sd_emmc_sel2 = {
+static struct clk_regmap a1_sd_emmc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = SD_EMMC_CLK_CTRL,
.mask = 0x1,
@@ -1472,7 +1472,7 @@ static struct clk_regmap sd_emmc_sel2 = {
.name = "sd_emmc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &sd_emmc_div.hw },
+ { .hw = &a1_sd_emmc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1480,7 +1480,7 @@ static struct clk_regmap sd_emmc_sel2 = {
},
};
-static struct clk_regmap sd_emmc = {
+static struct clk_regmap a1_sd_emmc = {
.data = &(struct clk_regmap_gate_data){
.offset = SD_EMMC_CLK_CTRL,
.bit_idx = 8,
@@ -1489,14 +1489,14 @@ static struct clk_regmap sd_emmc = {
.name = "sd_emmc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_sel2.hw
+ &a1_sd_emmc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap psram_sel = {
+static struct clk_regmap a1_psram_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = PSRAM_CLK_CTRL,
.mask = 0x3,
@@ -1505,12 +1505,12 @@ static struct clk_regmap psram_sel = {
.hw.init = &(struct clk_init_data){
.name = "psram_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap psram_div = {
+static struct clk_regmap a1_psram_div = {
.data = &(struct clk_regmap_div_data){
.offset = PSRAM_CLK_CTRL,
.shift = 0,
@@ -1520,14 +1520,14 @@ static struct clk_regmap psram_div = {
.name = "psram_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &psram_sel.hw
+ &a1_psram_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap psram_sel2 = {
+static struct clk_regmap a1_psram_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = PSRAM_CLK_CTRL,
.mask = 0x1,
@@ -1537,7 +1537,7 @@ static struct clk_regmap psram_sel2 = {
.name = "psram_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &psram_div.hw },
+ { .hw = &a1_psram_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1545,7 +1545,7 @@ static struct clk_regmap psram_sel2 = {
},
};
-static struct clk_regmap psram = {
+static struct clk_regmap a1_psram = {
.data = &(struct clk_regmap_gate_data){
.offset = PSRAM_CLK_CTRL,
.bit_idx = 8,
@@ -1554,14 +1554,14 @@ static struct clk_regmap psram = {
.name = "psram",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &psram_sel2.hw
+ &a1_psram_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dmc_sel = {
+static struct clk_regmap a1_dmc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = DMC_CLK_CTRL,
.mask = 0x3,
@@ -1570,12 +1570,12 @@ static struct clk_regmap dmc_sel = {
.hw.init = &(struct clk_init_data){
.name = "dmc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = sd_emmc_psram_dmc_parents,
- .num_parents = ARRAY_SIZE(sd_emmc_psram_dmc_parents),
+ .parent_data = a1_sd_emmc_parents,
+ .num_parents = ARRAY_SIZE(a1_sd_emmc_parents),
},
};
-static struct clk_regmap dmc_div = {
+static struct clk_regmap a1_dmc_div = {
.data = &(struct clk_regmap_div_data){
.offset = DMC_CLK_CTRL,
.shift = 0,
@@ -1585,14 +1585,14 @@ static struct clk_regmap dmc_div = {
.name = "dmc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dmc_sel.hw
+ &a1_dmc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap dmc_sel2 = {
+static struct clk_regmap a1_dmc_sel2 = {
.data = &(struct clk_regmap_mux_data){
.offset = DMC_CLK_CTRL,
.mask = 0x1,
@@ -1602,7 +1602,7 @@ static struct clk_regmap dmc_sel2 = {
.name = "dmc_sel2",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &dmc_div.hw },
+ { .hw = &a1_dmc_div.hw },
{ .fw_name = "xtal", },
},
.num_parents = 2,
@@ -1610,7 +1610,7 @@ static struct clk_regmap dmc_sel2 = {
},
};
-static struct clk_regmap dmc = {
+static struct clk_regmap a1_dmc = {
.data = &(struct clk_regmap_gate_data){
.offset = DMC_CLK_CTRL,
.bit_idx = 8,
@@ -1619,14 +1619,14 @@ static struct clk_regmap dmc = {
.name = "dmc",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &dmc_sel2.hw
+ &a1_dmc_sel2.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ceca_32k_in = {
+static struct clk_regmap a1_ceca_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = CECA_CLK_CTRL0,
.bit_idx = 31,
@@ -1641,7 +1641,7 @@ static struct clk_regmap ceca_32k_in = {
},
};
-static struct clk_regmap ceca_32k_div = {
+static struct clk_regmap a1_ceca_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = CECA_CLK_CTRL0,
@@ -1668,19 +1668,19 @@ static struct clk_regmap ceca_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "ceca_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_in.hw
+ &a1_ceca_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap ceca_32k_sel_pre = {
+static struct clk_regmap a1_ceca_32k_sel_pre = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECA_CLK_CTRL1,
.mask = 0x1,
@@ -1691,15 +1691,15 @@ static struct clk_regmap ceca_32k_sel_pre = {
.name = "ceca_32k_sel_pre",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_div.hw,
- &ceca_32k_in.hw,
+ &a1_ceca_32k_div.hw,
+ &a1_ceca_32k_in.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ceca_32k_sel = {
+static struct clk_regmap a1_ceca_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECA_CLK_CTRL1,
.mask = 0x1,
@@ -1710,14 +1710,14 @@ static struct clk_regmap ceca_32k_sel = {
.name = "ceca_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_sel_pre.hw,
- &rtc.hw,
+ &a1_ceca_32k_sel_pre.hw,
+ &a1_rtc.hw,
},
.num_parents = 2,
},
};
-static struct clk_regmap ceca_32k_out = {
+static struct clk_regmap a1_ceca_32k_out = {
.data = &(struct clk_regmap_gate_data){
.offset = CECA_CLK_CTRL0,
.bit_idx = 30,
@@ -1726,14 +1726,14 @@ static struct clk_regmap ceca_32k_out = {
.name = "ceca_32k_out",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ceca_32k_sel.hw
+ &a1_ceca_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap cecb_32k_in = {
+static struct clk_regmap a1_cecb_32k_in = {
.data = &(struct clk_regmap_gate_data){
.offset = CECB_CLK_CTRL0,
.bit_idx = 31,
@@ -1748,7 +1748,7 @@ static struct clk_regmap cecb_32k_in = {
},
};
-static struct clk_regmap cecb_32k_div = {
+static struct clk_regmap a1_cecb_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = CECB_CLK_CTRL0,
@@ -1775,19 +1775,19 @@ static struct clk_regmap cecb_32k_div = {
.shift = 28,
.width = 1,
},
- .table = clk_32k_div_table,
+ .table = a1_32k_div_table,
},
.hw.init = &(struct clk_init_data){
.name = "cecb_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_in.hw
+ &a1_cecb_32k_in.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap cecb_32k_sel_pre = {
+static struct clk_regmap a1_cecb_32k_sel_pre = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECB_CLK_CTRL1,
.mask = 0x1,
@@ -1798,15 +1798,15 @@ static struct clk_regmap cecb_32k_sel_pre = {
.name = "cecb_32k_sel_pre",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_div.hw,
- &cecb_32k_in.hw,
+ &a1_cecb_32k_div.hw,
+ &a1_cecb_32k_in.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap cecb_32k_sel = {
+static struct clk_regmap a1_cecb_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CECB_CLK_CTRL1,
.mask = 0x1,
@@ -1817,14 +1817,14 @@ static struct clk_regmap cecb_32k_sel = {
.name = "cecb_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_sel_pre.hw,
- &rtc.hw,
+ &a1_cecb_32k_sel_pre.hw,
+ &a1_rtc.hw,
},
.num_parents = 2,
},
};
-static struct clk_regmap cecb_32k_out = {
+static struct clk_regmap a1_cecb_32k_out = {
.data = &(struct clk_regmap_gate_data){
.offset = CECB_CLK_CTRL0,
.bit_idx = 30,
@@ -1833,282 +1833,265 @@ static struct clk_regmap cecb_32k_out = {
.name = "cecb_32k_out",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &cecb_32k_sel.hw
+ &a1_cecb_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &sys.hw)
-
-static MESON_GATE(clktree, SYS_CLK_EN0, 0);
-static MESON_GATE(reset_ctrl, SYS_CLK_EN0, 1);
-static MESON_GATE(analog_ctrl, SYS_CLK_EN0, 2);
-static MESON_GATE(pwr_ctrl, SYS_CLK_EN0, 3);
-static MESON_GATE(pad_ctrl, SYS_CLK_EN0, 4);
-static MESON_GATE(sys_ctrl, SYS_CLK_EN0, 5);
-static MESON_GATE(temp_sensor, SYS_CLK_EN0, 6);
-static MESON_GATE(am2axi_dev, SYS_CLK_EN0, 7);
-static MESON_GATE(spicc_b, SYS_CLK_EN0, 8);
-static MESON_GATE(spicc_a, SYS_CLK_EN0, 9);
-static MESON_GATE(msr, SYS_CLK_EN0, 10);
-static MESON_GATE(audio, SYS_CLK_EN0, 11);
-static MESON_GATE(jtag_ctrl, SYS_CLK_EN0, 12);
-static MESON_GATE(saradc_en, SYS_CLK_EN0, 13);
-static MESON_GATE(pwm_ef, SYS_CLK_EN0, 14);
-static MESON_GATE(pwm_cd, SYS_CLK_EN0, 15);
-static MESON_GATE(pwm_ab, SYS_CLK_EN0, 16);
-static MESON_GATE(cec, SYS_CLK_EN0, 17);
-static MESON_GATE(i2c_s, SYS_CLK_EN0, 18);
-static MESON_GATE(ir_ctrl, SYS_CLK_EN0, 19);
-static MESON_GATE(i2c_m_d, SYS_CLK_EN0, 20);
-static MESON_GATE(i2c_m_c, SYS_CLK_EN0, 21);
-static MESON_GATE(i2c_m_b, SYS_CLK_EN0, 22);
-static MESON_GATE(i2c_m_a, SYS_CLK_EN0, 23);
-static MESON_GATE(acodec, SYS_CLK_EN0, 24);
-static MESON_GATE(otp, SYS_CLK_EN0, 25);
-static MESON_GATE(sd_emmc_a, SYS_CLK_EN0, 26);
-static MESON_GATE(usb_phy, SYS_CLK_EN0, 27);
-static MESON_GATE(usb_ctrl, SYS_CLK_EN0, 28);
-static MESON_GATE(sys_dspb, SYS_CLK_EN0, 29);
-static MESON_GATE(sys_dspa, SYS_CLK_EN0, 30);
-static MESON_GATE(dma, SYS_CLK_EN0, 31);
-static MESON_GATE(irq_ctrl, SYS_CLK_EN1, 0);
-static MESON_GATE(nic, SYS_CLK_EN1, 1);
-static MESON_GATE(gic, SYS_CLK_EN1, 2);
-static MESON_GATE(uart_c, SYS_CLK_EN1, 3);
-static MESON_GATE(uart_b, SYS_CLK_EN1, 4);
-static MESON_GATE(uart_a, SYS_CLK_EN1, 5);
-static MESON_GATE(sys_psram, SYS_CLK_EN1, 6);
-static MESON_GATE(rsa, SYS_CLK_EN1, 8);
-static MESON_GATE(coresight, SYS_CLK_EN1, 9);
-static MESON_GATE(am2axi_vad, AXI_CLK_EN, 0);
-static MESON_GATE(audio_vad, AXI_CLK_EN, 1);
-static MESON_GATE(axi_dmc, AXI_CLK_EN, 3);
-static MESON_GATE(axi_psram, AXI_CLK_EN, 4);
-static MESON_GATE(ramb, AXI_CLK_EN, 5);
-static MESON_GATE(rama, AXI_CLK_EN, 6);
-static MESON_GATE(axi_spifc, AXI_CLK_EN, 7);
-static MESON_GATE(axi_nic, AXI_CLK_EN, 8);
-static MESON_GATE(axi_dma, AXI_CLK_EN, 9);
-static MESON_GATE(cpu_ctrl, AXI_CLK_EN, 10);
-static MESON_GATE(rom, AXI_CLK_EN, 11);
-static MESON_GATE(prod_i2c, AXI_CLK_EN, 12);
+static const struct clk_parent_data a1_pclk_parents = { .hw = &a1_sys.hw };
+
+#define A1_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(a1_##_name, _reg, _bit, &a1_pclk_parents, _flags)
+
+/*
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static A1_PCLK(clktree, SYS_CLK_EN0, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(reset_ctrl, SYS_CLK_EN0, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(analog_ctrl, SYS_CLK_EN0, 2, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwr_ctrl, SYS_CLK_EN0, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(pad_ctrl, SYS_CLK_EN0, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_ctrl, SYS_CLK_EN0, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(temp_sensor, SYS_CLK_EN0, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(am2axi_dev, SYS_CLK_EN0, 7, CLK_IGNORE_UNUSED);
+static A1_PCLK(spicc_b, SYS_CLK_EN0, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(spicc_a, SYS_CLK_EN0, 9, CLK_IGNORE_UNUSED);
+static A1_PCLK(msr, SYS_CLK_EN0, 10, CLK_IGNORE_UNUSED);
+static A1_PCLK(audio, SYS_CLK_EN0, 11, CLK_IGNORE_UNUSED);
+static A1_PCLK(jtag_ctrl, SYS_CLK_EN0, 12, CLK_IGNORE_UNUSED);
+static A1_PCLK(saradc_en, SYS_CLK_EN0, 13, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_ef, SYS_CLK_EN0, 14, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_cd, SYS_CLK_EN0, 15, CLK_IGNORE_UNUSED);
+static A1_PCLK(pwm_ab, SYS_CLK_EN0, 16, CLK_IGNORE_UNUSED);
+static A1_PCLK(cec, SYS_CLK_EN0, 17, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_s, SYS_CLK_EN0, 18, CLK_IGNORE_UNUSED);
+static A1_PCLK(ir_ctrl, SYS_CLK_EN0, 19, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_d, SYS_CLK_EN0, 20, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_c, SYS_CLK_EN0, 21, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_b, SYS_CLK_EN0, 22, CLK_IGNORE_UNUSED);
+static A1_PCLK(i2c_m_a, SYS_CLK_EN0, 23, CLK_IGNORE_UNUSED);
+static A1_PCLK(acodec, SYS_CLK_EN0, 24, CLK_IGNORE_UNUSED);
+static A1_PCLK(otp, SYS_CLK_EN0, 25, CLK_IGNORE_UNUSED);
+static A1_PCLK(sd_emmc_a, SYS_CLK_EN0, 26, CLK_IGNORE_UNUSED);
+static A1_PCLK(usb_phy, SYS_CLK_EN0, 27, CLK_IGNORE_UNUSED);
+static A1_PCLK(usb_ctrl, SYS_CLK_EN0, 28, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_dspb, SYS_CLK_EN0, 29, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_dspa, SYS_CLK_EN0, 30, CLK_IGNORE_UNUSED);
+static A1_PCLK(dma, SYS_CLK_EN0, 31, CLK_IGNORE_UNUSED);
+
+static A1_PCLK(irq_ctrl, SYS_CLK_EN1, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(nic, SYS_CLK_EN1, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(gic, SYS_CLK_EN1, 2, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_c, SYS_CLK_EN1, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_b, SYS_CLK_EN1, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(uart_a, SYS_CLK_EN1, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(sys_psram, SYS_CLK_EN1, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(rsa, SYS_CLK_EN1, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(coresight, SYS_CLK_EN1, 9, CLK_IGNORE_UNUSED);
+
+static A1_PCLK(am2axi_vad, AXI_CLK_EN, 0, CLK_IGNORE_UNUSED);
+static A1_PCLK(audio_vad, AXI_CLK_EN, 1, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_dmc, AXI_CLK_EN, 3, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_psram, AXI_CLK_EN, 4, CLK_IGNORE_UNUSED);
+static A1_PCLK(ramb, AXI_CLK_EN, 5, CLK_IGNORE_UNUSED);
+static A1_PCLK(rama, AXI_CLK_EN, 6, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_spifc, AXI_CLK_EN, 7, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_nic, AXI_CLK_EN, 8, CLK_IGNORE_UNUSED);
+static A1_PCLK(axi_dma, AXI_CLK_EN, 9, CLK_IGNORE_UNUSED);
+static A1_PCLK(cpu_ctrl, AXI_CLK_EN, 10, CLK_IGNORE_UNUSED);
+static A1_PCLK(rom, AXI_CLK_EN, 11, CLK_IGNORE_UNUSED);
+static A1_PCLK(prod_i2c, AXI_CLK_EN, 12, CLK_IGNORE_UNUSED);
/* Array of all clocks registered by this provider */
-static struct clk_hw *a1_periphs_hw_clks[] = {
- [CLKID_XTAL_IN] = &xtal_in.hw,
- [CLKID_FIXPLL_IN] = &fixpll_in.hw,
- [CLKID_USB_PHY_IN] = &usb_phy_in.hw,
- [CLKID_USB_CTRL_IN] = &usb_ctrl_in.hw,
- [CLKID_HIFIPLL_IN] = &hifipll_in.hw,
- [CLKID_SYSPLL_IN] = &syspll_in.hw,
- [CLKID_DDS_IN] = &dds_in.hw,
- [CLKID_SYS] = &sys.hw,
- [CLKID_CLKTREE] = &clktree.hw,
- [CLKID_RESET_CTRL] = &reset_ctrl.hw,
- [CLKID_ANALOG_CTRL] = &analog_ctrl.hw,
- [CLKID_PWR_CTRL] = &pwr_ctrl.hw,
- [CLKID_PAD_CTRL] = &pad_ctrl.hw,
- [CLKID_SYS_CTRL] = &sys_ctrl.hw,
- [CLKID_TEMP_SENSOR] = &temp_sensor.hw,
- [CLKID_AM2AXI_DIV] = &am2axi_dev.hw,
- [CLKID_SPICC_B] = &spicc_b.hw,
- [CLKID_SPICC_A] = &spicc_a.hw,
- [CLKID_MSR] = &msr.hw,
- [CLKID_AUDIO] = &audio.hw,
- [CLKID_JTAG_CTRL] = &jtag_ctrl.hw,
- [CLKID_SARADC_EN] = &saradc_en.hw,
- [CLKID_PWM_EF] = &pwm_ef.hw,
- [CLKID_PWM_CD] = &pwm_cd.hw,
- [CLKID_PWM_AB] = &pwm_ab.hw,
- [CLKID_CEC] = &cec.hw,
- [CLKID_I2C_S] = &i2c_s.hw,
- [CLKID_IR_CTRL] = &ir_ctrl.hw,
- [CLKID_I2C_M_D] = &i2c_m_d.hw,
- [CLKID_I2C_M_C] = &i2c_m_c.hw,
- [CLKID_I2C_M_B] = &i2c_m_b.hw,
- [CLKID_I2C_M_A] = &i2c_m_a.hw,
- [CLKID_ACODEC] = &acodec.hw,
- [CLKID_OTP] = &otp.hw,
- [CLKID_SD_EMMC_A] = &sd_emmc_a.hw,
- [CLKID_USB_PHY] = &usb_phy.hw,
- [CLKID_USB_CTRL] = &usb_ctrl.hw,
- [CLKID_SYS_DSPB] = &sys_dspb.hw,
- [CLKID_SYS_DSPA] = &sys_dspa.hw,
- [CLKID_DMA] = &dma.hw,
- [CLKID_IRQ_CTRL] = &irq_ctrl.hw,
- [CLKID_NIC] = &nic.hw,
- [CLKID_GIC] = &gic.hw,
- [CLKID_UART_C] = &uart_c.hw,
- [CLKID_UART_B] = &uart_b.hw,
- [CLKID_UART_A] = &uart_a.hw,
- [CLKID_SYS_PSRAM] = &sys_psram.hw,
- [CLKID_RSA] = &rsa.hw,
- [CLKID_CORESIGHT] = &coresight.hw,
- [CLKID_AM2AXI_VAD] = &am2axi_vad.hw,
- [CLKID_AUDIO_VAD] = &audio_vad.hw,
- [CLKID_AXI_DMC] = &axi_dmc.hw,
- [CLKID_AXI_PSRAM] = &axi_psram.hw,
- [CLKID_RAMB] = &ramb.hw,
- [CLKID_RAMA] = &rama.hw,
- [CLKID_AXI_SPIFC] = &axi_spifc.hw,
- [CLKID_AXI_NIC] = &axi_nic.hw,
- [CLKID_AXI_DMA] = &axi_dma.hw,
- [CLKID_CPU_CTRL] = &cpu_ctrl.hw,
- [CLKID_ROM] = &rom.hw,
- [CLKID_PROC_I2C] = &prod_i2c.hw,
- [CLKID_DSPA_SEL] = &dspa_sel.hw,
- [CLKID_DSPB_SEL] = &dspb_sel.hw,
- [CLKID_DSPA_EN] = &dspa_en.hw,
- [CLKID_DSPA_EN_NIC] = &dspa_en_nic.hw,
- [CLKID_DSPB_EN] = &dspb_en.hw,
- [CLKID_DSPB_EN_NIC] = &dspb_en_nic.hw,
- [CLKID_RTC] = &rtc.hw,
- [CLKID_CECA_32K] = &ceca_32k_out.hw,
- [CLKID_CECB_32K] = &cecb_32k_out.hw,
- [CLKID_24M] = &clk_24m.hw,
- [CLKID_12M] = &clk_12m.hw,
- [CLKID_FCLK_DIV2_DIVN] = &fclk_div2_divn.hw,
- [CLKID_GEN] = &gen.hw,
- [CLKID_SARADC_SEL] = &saradc_sel.hw,
- [CLKID_SARADC] = &saradc.hw,
- [CLKID_PWM_A] = &pwm_a.hw,
- [CLKID_PWM_B] = &pwm_b.hw,
- [CLKID_PWM_C] = &pwm_c.hw,
- [CLKID_PWM_D] = &pwm_d.hw,
- [CLKID_PWM_E] = &pwm_e.hw,
- [CLKID_PWM_F] = &pwm_f.hw,
- [CLKID_SPICC] = &spicc.hw,
- [CLKID_TS] = &ts.hw,
- [CLKID_SPIFC] = &spifc.hw,
- [CLKID_USB_BUS] = &usb_bus.hw,
- [CLKID_SD_EMMC] = &sd_emmc.hw,
- [CLKID_PSRAM] = &psram.hw,
- [CLKID_DMC] = &dmc.hw,
- [CLKID_SYS_A_SEL] = &sys_a_sel.hw,
- [CLKID_SYS_A_DIV] = &sys_a_div.hw,
- [CLKID_SYS_A] = &sys_a.hw,
- [CLKID_SYS_B_SEL] = &sys_b_sel.hw,
- [CLKID_SYS_B_DIV] = &sys_b_div.hw,
- [CLKID_SYS_B] = &sys_b.hw,
- [CLKID_DSPA_A_SEL] = &dspa_a_sel.hw,
- [CLKID_DSPA_A_DIV] = &dspa_a_div.hw,
- [CLKID_DSPA_A] = &dspa_a.hw,
- [CLKID_DSPA_B_SEL] = &dspa_b_sel.hw,
- [CLKID_DSPA_B_DIV] = &dspa_b_div.hw,
- [CLKID_DSPA_B] = &dspa_b.hw,
- [CLKID_DSPB_A_SEL] = &dspb_a_sel.hw,
- [CLKID_DSPB_A_DIV] = &dspb_a_div.hw,
- [CLKID_DSPB_A] = &dspb_a.hw,
- [CLKID_DSPB_B_SEL] = &dspb_b_sel.hw,
- [CLKID_DSPB_B_DIV] = &dspb_b_div.hw,
- [CLKID_DSPB_B] = &dspb_b.hw,
- [CLKID_RTC_32K_IN] = &rtc_32k_in.hw,
- [CLKID_RTC_32K_DIV] = &rtc_32k_div.hw,
- [CLKID_RTC_32K_XTAL] = &rtc_32k_xtal.hw,
- [CLKID_RTC_32K_SEL] = &rtc_32k_sel.hw,
- [CLKID_CECB_32K_IN] = &cecb_32k_in.hw,
- [CLKID_CECB_32K_DIV] = &cecb_32k_div.hw,
- [CLKID_CECB_32K_SEL_PRE] = &cecb_32k_sel_pre.hw,
- [CLKID_CECB_32K_SEL] = &cecb_32k_sel.hw,
- [CLKID_CECA_32K_IN] = &ceca_32k_in.hw,
- [CLKID_CECA_32K_DIV] = &ceca_32k_div.hw,
- [CLKID_CECA_32K_SEL_PRE] = &ceca_32k_sel_pre.hw,
- [CLKID_CECA_32K_SEL] = &ceca_32k_sel.hw,
- [CLKID_DIV2_PRE] = &fclk_div2_divn_pre.hw,
- [CLKID_24M_DIV2] = &clk_24m_div2.hw,
- [CLKID_GEN_SEL] = &gen_sel.hw,
- [CLKID_GEN_DIV] = &gen_div.hw,
- [CLKID_SARADC_DIV] = &saradc_div.hw,
- [CLKID_PWM_A_SEL] = &pwm_a_sel.hw,
- [CLKID_PWM_A_DIV] = &pwm_a_div.hw,
- [CLKID_PWM_B_SEL] = &pwm_b_sel.hw,
- [CLKID_PWM_B_DIV] = &pwm_b_div.hw,
- [CLKID_PWM_C_SEL] = &pwm_c_sel.hw,
- [CLKID_PWM_C_DIV] = &pwm_c_div.hw,
- [CLKID_PWM_D_SEL] = &pwm_d_sel.hw,
- [CLKID_PWM_D_DIV] = &pwm_d_div.hw,
- [CLKID_PWM_E_SEL] = &pwm_e_sel.hw,
- [CLKID_PWM_E_DIV] = &pwm_e_div.hw,
- [CLKID_PWM_F_SEL] = &pwm_f_sel.hw,
- [CLKID_PWM_F_DIV] = &pwm_f_div.hw,
- [CLKID_SPICC_SEL] = &spicc_sel.hw,
- [CLKID_SPICC_DIV] = &spicc_div.hw,
- [CLKID_SPICC_SEL2] = &spicc_sel2.hw,
- [CLKID_TS_DIV] = &ts_div.hw,
- [CLKID_SPIFC_SEL] = &spifc_sel.hw,
- [CLKID_SPIFC_DIV] = &spifc_div.hw,
- [CLKID_SPIFC_SEL2] = &spifc_sel2.hw,
- [CLKID_USB_BUS_SEL] = &usb_bus_sel.hw,
- [CLKID_USB_BUS_DIV] = &usb_bus_div.hw,
- [CLKID_SD_EMMC_SEL] = &sd_emmc_sel.hw,
- [CLKID_SD_EMMC_DIV] = &sd_emmc_div.hw,
- [CLKID_SD_EMMC_SEL2] = &sd_emmc_sel2.hw,
- [CLKID_PSRAM_SEL] = &psram_sel.hw,
- [CLKID_PSRAM_DIV] = &psram_div.hw,
- [CLKID_PSRAM_SEL2] = &psram_sel2.hw,
- [CLKID_DMC_SEL] = &dmc_sel.hw,
- [CLKID_DMC_DIV] = &dmc_div.hw,
- [CLKID_DMC_SEL2] = &dmc_sel2.hw,
-};
-
-static const struct regmap_config a1_periphs_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = DMC_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data a1_periphs_clks = {
- .hws = a1_periphs_hw_clks,
- .num = ARRAY_SIZE(a1_periphs_hw_clks),
-};
-
-static int meson_a1_periphs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *base;
- struct regmap *map;
- int clkid, err;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- map = devm_regmap_init_mmio(dev, base, &a1_periphs_regmap_cfg);
- if (IS_ERR(map))
- return dev_err_probe(dev, PTR_ERR(map),
- "can't init regmap mmio region\n");
-
- for (clkid = 0; clkid < a1_periphs_clks.num; clkid++) {
- err = devm_clk_hw_register(dev, a1_periphs_clks.hws[clkid]);
- if (err)
- return dev_err_probe(dev, err,
- "clock[%d] registration failed\n",
- clkid);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, &a1_periphs_clks);
-}
-
-static const struct of_device_id a1_periphs_clkc_match_table[] = {
- { .compatible = "amlogic,a1-peripherals-clkc", },
+static struct clk_hw *a1_peripherals_hw_clks[] = {
+ [CLKID_XTAL_IN] = &a1_xtal_in.hw,
+ [CLKID_FIXPLL_IN] = &a1_fixpll_in.hw,
+ [CLKID_USB_PHY_IN] = &a1_usb_phy_in.hw,
+ [CLKID_USB_CTRL_IN] = &a1_usb_ctrl_in.hw,
+ [CLKID_HIFIPLL_IN] = &a1_hifipll_in.hw,
+ [CLKID_SYSPLL_IN] = &a1_syspll_in.hw,
+ [CLKID_DDS_IN] = &a1_dds_in.hw,
+ [CLKID_SYS] = &a1_sys.hw,
+ [CLKID_CLKTREE] = &a1_clktree.hw,
+ [CLKID_RESET_CTRL] = &a1_reset_ctrl.hw,
+ [CLKID_ANALOG_CTRL] = &a1_analog_ctrl.hw,
+ [CLKID_PWR_CTRL] = &a1_pwr_ctrl.hw,
+ [CLKID_PAD_CTRL] = &a1_pad_ctrl.hw,
+ [CLKID_SYS_CTRL] = &a1_sys_ctrl.hw,
+ [CLKID_TEMP_SENSOR] = &a1_temp_sensor.hw,
+ [CLKID_AM2AXI_DIV] = &a1_am2axi_dev.hw,
+ [CLKID_SPICC_B] = &a1_spicc_b.hw,
+ [CLKID_SPICC_A] = &a1_spicc_a.hw,
+ [CLKID_MSR] = &a1_msr.hw,
+ [CLKID_AUDIO] = &a1_audio.hw,
+ [CLKID_JTAG_CTRL] = &a1_jtag_ctrl.hw,
+ [CLKID_SARADC_EN] = &a1_saradc_en.hw,
+ [CLKID_PWM_EF] = &a1_pwm_ef.hw,
+ [CLKID_PWM_CD] = &a1_pwm_cd.hw,
+ [CLKID_PWM_AB] = &a1_pwm_ab.hw,
+ [CLKID_CEC] = &a1_cec.hw,
+ [CLKID_I2C_S] = &a1_i2c_s.hw,
+ [CLKID_IR_CTRL] = &a1_ir_ctrl.hw,
+ [CLKID_I2C_M_D] = &a1_i2c_m_d.hw,
+ [CLKID_I2C_M_C] = &a1_i2c_m_c.hw,
+ [CLKID_I2C_M_B] = &a1_i2c_m_b.hw,
+ [CLKID_I2C_M_A] = &a1_i2c_m_a.hw,
+ [CLKID_ACODEC] = &a1_acodec.hw,
+ [CLKID_OTP] = &a1_otp.hw,
+ [CLKID_SD_EMMC_A] = &a1_sd_emmc_a.hw,
+ [CLKID_USB_PHY] = &a1_usb_phy.hw,
+ [CLKID_USB_CTRL] = &a1_usb_ctrl.hw,
+ [CLKID_SYS_DSPB] = &a1_sys_dspb.hw,
+ [CLKID_SYS_DSPA] = &a1_sys_dspa.hw,
+ [CLKID_DMA] = &a1_dma.hw,
+ [CLKID_IRQ_CTRL] = &a1_irq_ctrl.hw,
+ [CLKID_NIC] = &a1_nic.hw,
+ [CLKID_GIC] = &a1_gic.hw,
+ [CLKID_UART_C] = &a1_uart_c.hw,
+ [CLKID_UART_B] = &a1_uart_b.hw,
+ [CLKID_UART_A] = &a1_uart_a.hw,
+ [CLKID_SYS_PSRAM] = &a1_sys_psram.hw,
+ [CLKID_RSA] = &a1_rsa.hw,
+ [CLKID_CORESIGHT] = &a1_coresight.hw,
+ [CLKID_AM2AXI_VAD] = &a1_am2axi_vad.hw,
+ [CLKID_AUDIO_VAD] = &a1_audio_vad.hw,
+ [CLKID_AXI_DMC] = &a1_axi_dmc.hw,
+ [CLKID_AXI_PSRAM] = &a1_axi_psram.hw,
+ [CLKID_RAMB] = &a1_ramb.hw,
+ [CLKID_RAMA] = &a1_rama.hw,
+ [CLKID_AXI_SPIFC] = &a1_axi_spifc.hw,
+ [CLKID_AXI_NIC] = &a1_axi_nic.hw,
+ [CLKID_AXI_DMA] = &a1_axi_dma.hw,
+ [CLKID_CPU_CTRL] = &a1_cpu_ctrl.hw,
+ [CLKID_ROM] = &a1_rom.hw,
+ [CLKID_PROC_I2C] = &a1_prod_i2c.hw,
+ [CLKID_DSPA_SEL] = &a1_dspa_sel.hw,
+ [CLKID_DSPB_SEL] = &a1_dspb_sel.hw,
+ [CLKID_DSPA_EN] = &a1_dspa_en.hw,
+ [CLKID_DSPA_EN_NIC] = &a1_dspa_en_nic.hw,
+ [CLKID_DSPB_EN] = &a1_dspb_en.hw,
+ [CLKID_DSPB_EN_NIC] = &a1_dspb_en_nic.hw,
+ [CLKID_RTC] = &a1_rtc.hw,
+ [CLKID_CECA_32K] = &a1_ceca_32k_out.hw,
+ [CLKID_CECB_32K] = &a1_cecb_32k_out.hw,
+ [CLKID_24M] = &a1_24m.hw,
+ [CLKID_12M] = &a1_12m.hw,
+ [CLKID_FCLK_DIV2_DIVN] = &a1_fclk_div2_divn.hw,
+ [CLKID_GEN] = &a1_gen.hw,
+ [CLKID_SARADC_SEL] = &a1_saradc_sel.hw,
+ [CLKID_SARADC] = &a1_saradc.hw,
+ [CLKID_PWM_A] = &a1_pwm_a.hw,
+ [CLKID_PWM_B] = &a1_pwm_b.hw,
+ [CLKID_PWM_C] = &a1_pwm_c.hw,
+ [CLKID_PWM_D] = &a1_pwm_d.hw,
+ [CLKID_PWM_E] = &a1_pwm_e.hw,
+ [CLKID_PWM_F] = &a1_pwm_f.hw,
+ [CLKID_SPICC] = &a1_spicc.hw,
+ [CLKID_TS] = &a1_ts.hw,
+ [CLKID_SPIFC] = &a1_spifc.hw,
+ [CLKID_USB_BUS] = &a1_usb_bus.hw,
+ [CLKID_SD_EMMC] = &a1_sd_emmc.hw,
+ [CLKID_PSRAM] = &a1_psram.hw,
+ [CLKID_DMC] = &a1_dmc.hw,
+ [CLKID_SYS_A_SEL] = &a1_sys_a_sel.hw,
+ [CLKID_SYS_A_DIV] = &a1_sys_a_div.hw,
+ [CLKID_SYS_A] = &a1_sys_a.hw,
+ [CLKID_SYS_B_SEL] = &a1_sys_b_sel.hw,
+ [CLKID_SYS_B_DIV] = &a1_sys_b_div.hw,
+ [CLKID_SYS_B] = &a1_sys_b.hw,
+ [CLKID_DSPA_A_SEL] = &a1_dspa_a_sel.hw,
+ [CLKID_DSPA_A_DIV] = &a1_dspa_a_div.hw,
+ [CLKID_DSPA_A] = &a1_dspa_a.hw,
+ [CLKID_DSPA_B_SEL] = &a1_dspa_b_sel.hw,
+ [CLKID_DSPA_B_DIV] = &a1_dspa_b_div.hw,
+ [CLKID_DSPA_B] = &a1_dspa_b.hw,
+ [CLKID_DSPB_A_SEL] = &a1_dspb_a_sel.hw,
+ [CLKID_DSPB_A_DIV] = &a1_dspb_a_div.hw,
+ [CLKID_DSPB_A] = &a1_dspb_a.hw,
+ [CLKID_DSPB_B_SEL] = &a1_dspb_b_sel.hw,
+ [CLKID_DSPB_B_DIV] = &a1_dspb_b_div.hw,
+ [CLKID_DSPB_B] = &a1_dspb_b.hw,
+ [CLKID_RTC_32K_IN] = &a1_rtc_32k_in.hw,
+ [CLKID_RTC_32K_DIV] = &a1_rtc_32k_div.hw,
+ [CLKID_RTC_32K_XTAL] = &a1_rtc_32k_xtal.hw,
+ [CLKID_RTC_32K_SEL] = &a1_rtc_32k_sel.hw,
+ [CLKID_CECB_32K_IN] = &a1_cecb_32k_in.hw,
+ [CLKID_CECB_32K_DIV] = &a1_cecb_32k_div.hw,
+ [CLKID_CECB_32K_SEL_PRE] = &a1_cecb_32k_sel_pre.hw,
+ [CLKID_CECB_32K_SEL] = &a1_cecb_32k_sel.hw,
+ [CLKID_CECA_32K_IN] = &a1_ceca_32k_in.hw,
+ [CLKID_CECA_32K_DIV] = &a1_ceca_32k_div.hw,
+ [CLKID_CECA_32K_SEL_PRE] = &a1_ceca_32k_sel_pre.hw,
+ [CLKID_CECA_32K_SEL] = &a1_ceca_32k_sel.hw,
+ [CLKID_DIV2_PRE] = &a1_fclk_div2_divn_pre.hw,
+ [CLKID_24M_DIV2] = &a1_24m_div2.hw,
+ [CLKID_GEN_SEL] = &a1_gen_sel.hw,
+ [CLKID_GEN_DIV] = &a1_gen_div.hw,
+ [CLKID_SARADC_DIV] = &a1_saradc_div.hw,
+ [CLKID_PWM_A_SEL] = &a1_pwm_a_sel.hw,
+ [CLKID_PWM_A_DIV] = &a1_pwm_a_div.hw,
+ [CLKID_PWM_B_SEL] = &a1_pwm_b_sel.hw,
+ [CLKID_PWM_B_DIV] = &a1_pwm_b_div.hw,
+ [CLKID_PWM_C_SEL] = &a1_pwm_c_sel.hw,
+ [CLKID_PWM_C_DIV] = &a1_pwm_c_div.hw,
+ [CLKID_PWM_D_SEL] = &a1_pwm_d_sel.hw,
+ [CLKID_PWM_D_DIV] = &a1_pwm_d_div.hw,
+ [CLKID_PWM_E_SEL] = &a1_pwm_e_sel.hw,
+ [CLKID_PWM_E_DIV] = &a1_pwm_e_div.hw,
+ [CLKID_PWM_F_SEL] = &a1_pwm_f_sel.hw,
+ [CLKID_PWM_F_DIV] = &a1_pwm_f_div.hw,
+ [CLKID_SPICC_SEL] = &a1_spicc_sel.hw,
+ [CLKID_SPICC_DIV] = &a1_spicc_div.hw,
+ [CLKID_SPICC_SEL2] = &a1_spicc_sel2.hw,
+ [CLKID_TS_DIV] = &a1_ts_div.hw,
+ [CLKID_SPIFC_SEL] = &a1_spifc_sel.hw,
+ [CLKID_SPIFC_DIV] = &a1_spifc_div.hw,
+ [CLKID_SPIFC_SEL2] = &a1_spifc_sel2.hw,
+ [CLKID_USB_BUS_SEL] = &a1_usb_bus_sel.hw,
+ [CLKID_USB_BUS_DIV] = &a1_usb_bus_div.hw,
+ [CLKID_SD_EMMC_SEL] = &a1_sd_emmc_sel.hw,
+ [CLKID_SD_EMMC_DIV] = &a1_sd_emmc_div.hw,
+ [CLKID_SD_EMMC_SEL2] = &a1_sd_emmc_sel2.hw,
+ [CLKID_PSRAM_SEL] = &a1_psram_sel.hw,
+ [CLKID_PSRAM_DIV] = &a1_psram_div.hw,
+ [CLKID_PSRAM_SEL2] = &a1_psram_sel2.hw,
+ [CLKID_DMC_SEL] = &a1_dmc_sel.hw,
+ [CLKID_DMC_DIV] = &a1_dmc_div.hw,
+ [CLKID_DMC_SEL2] = &a1_dmc_sel2.hw,
+};
+
+static const struct meson_clkc_data a1_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = a1_peripherals_hw_clks,
+ .num = ARRAY_SIZE(a1_peripherals_hw_clks),
+ },
+};
+
+static const struct of_device_id a1_peripherals_clkc_match_table[] = {
+ {
+ .compatible = "amlogic,a1-peripherals-clkc",
+ .data = &a1_peripherals_clkc_data,
+ },
{}
};
-MODULE_DEVICE_TABLE(of, a1_periphs_clkc_match_table);
+MODULE_DEVICE_TABLE(of, a1_peripherals_clkc_match_table);
-static struct platform_driver a1_periphs_clkc_driver = {
- .probe = meson_a1_periphs_probe,
+static struct platform_driver a1_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "a1-peripherals-clkc",
- .of_match_table = a1_periphs_clkc_match_table,
+ .of_match_table = a1_peripherals_clkc_match_table,
},
};
-module_platform_driver(a1_periphs_clkc_driver);
+module_platform_driver(a1_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic A1 Peripherals Clock Controller driver");
MODULE_AUTHOR("Jian Hu <jian.hu@amlogic.com>");
diff --git a/drivers/clk/meson/a1-pll.c b/drivers/clk/meson/a1-pll.c
index dabd4fad1f57..1f82e9c7c14e 100644
--- a/drivers/clk/meson/a1-pll.c
+++ b/drivers/clk/meson/a1-pll.c
@@ -26,7 +26,7 @@
#include <dt-bindings/clock/amlogic,a1-pll-clkc.h>
-static struct clk_regmap fixed_pll_dco = {
+static struct clk_regmap a1_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = ANACTRL_FIXPLL_CTRL0,
@@ -69,7 +69,7 @@ static struct clk_regmap fixed_pll_dco = {
},
};
-static struct clk_regmap fixed_pll = {
+static struct clk_regmap a1_fixed_pll = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 20,
@@ -78,18 +78,18 @@ static struct clk_regmap fixed_pll = {
.name = "fixed_pll",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll_dco.hw
+ &a1_fixed_pll_dco.hw
},
.num_parents = 1,
},
};
-static const struct pll_mult_range hifi_pll_mult_range = {
+static const struct pll_mult_range a1_hifi_pll_range = {
.min = 32,
.max = 64,
};
-static const struct reg_sequence hifi_init_regs[] = {
+static const struct reg_sequence a1_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL1, .def = 0x01800000 },
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00001100 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x100a1100 },
@@ -97,7 +97,7 @@ static const struct reg_sequence hifi_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL0, .def = 0x01f18000 },
};
-static struct clk_regmap hifi_pll = {
+static struct clk_regmap a1_hifi_pll = {
.data = &(struct meson_clk_pll_data){
.en = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
@@ -134,9 +134,9 @@ static struct clk_regmap hifi_pll = {
.shift = 6,
.width = 1,
},
- .range = &hifi_pll_mult_range,
- .init_regs = hifi_init_regs,
- .init_count = ARRAY_SIZE(hifi_init_regs),
+ .range = &a1_hifi_pll_range,
+ .init_regs = a1_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(a1_hifi_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "hifi_pll",
@@ -148,20 +148,20 @@ static struct clk_regmap hifi_pll = {
},
};
-static struct clk_fixed_factor fclk_div2_div = {
+static struct clk_fixed_factor a1_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
.name = "fclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div2 = {
+static struct clk_regmap a1_fclk_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 21,
@@ -170,7 +170,7 @@ static struct clk_regmap fclk_div2 = {
.name = "fclk_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_div.hw
+ &a1_fclk_div2_div.hw
},
.num_parents = 1,
/*
@@ -186,20 +186,20 @@ static struct clk_regmap fclk_div2 = {
},
};
-static struct clk_fixed_factor fclk_div3_div = {
+static struct clk_fixed_factor a1_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data){
.name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div3 = {
+static struct clk_regmap a1_fclk_div3 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 22,
@@ -208,7 +208,7 @@ static struct clk_regmap fclk_div3 = {
.name = "fclk_div3",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div3_div.hw
+ &a1_fclk_div3_div.hw
},
.num_parents = 1,
/*
@@ -219,20 +219,20 @@ static struct clk_regmap fclk_div3 = {
},
};
-static struct clk_fixed_factor fclk_div5_div = {
+static struct clk_fixed_factor a1_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data){
.name = "fclk_div5_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div5 = {
+static struct clk_regmap a1_fclk_div5 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 23,
@@ -241,7 +241,7 @@ static struct clk_regmap fclk_div5 = {
.name = "fclk_div5",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div5_div.hw
+ &a1_fclk_div5_div.hw
},
.num_parents = 1,
/*
@@ -252,20 +252,20 @@ static struct clk_regmap fclk_div5 = {
},
};
-static struct clk_fixed_factor fclk_div7_div = {
+static struct clk_fixed_factor a1_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data){
.name = "fclk_div7_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fixed_pll.hw
+ &a1_fixed_pll.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap fclk_div7 = {
+static struct clk_regmap a1_fclk_div7 = {
.data = &(struct clk_regmap_gate_data){
.offset = ANACTRL_FIXPLL_CTRL0,
.bit_idx = 24,
@@ -274,7 +274,7 @@ static struct clk_regmap fclk_div7 = {
.name = "fclk_div7",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div7_div.hw
+ &a1_fclk_div7_div.hw
},
.num_parents = 1,
},
@@ -282,69 +282,37 @@ static struct clk_regmap fclk_div7 = {
/* Array of all clocks registered by this provider */
static struct clk_hw *a1_pll_hw_clks[] = {
- [CLKID_FIXED_PLL_DCO] = &fixed_pll_dco.hw,
- [CLKID_FIXED_PLL] = &fixed_pll.hw,
- [CLKID_FCLK_DIV2_DIV] = &fclk_div2_div.hw,
- [CLKID_FCLK_DIV3_DIV] = &fclk_div3_div.hw,
- [CLKID_FCLK_DIV5_DIV] = &fclk_div5_div.hw,
- [CLKID_FCLK_DIV7_DIV] = &fclk_div7_div.hw,
- [CLKID_FCLK_DIV2] = &fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &fclk_div3.hw,
- [CLKID_FCLK_DIV5] = &fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &fclk_div7.hw,
- [CLKID_HIFI_PLL] = &hifi_pll.hw,
+ [CLKID_FIXED_PLL_DCO] = &a1_fixed_pll_dco.hw,
+ [CLKID_FIXED_PLL] = &a1_fixed_pll.hw,
+ [CLKID_FCLK_DIV2_DIV] = &a1_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV3_DIV] = &a1_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV5_DIV] = &a1_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV7_DIV] = &a1_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV2] = &a1_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &a1_fclk_div3.hw,
+ [CLKID_FCLK_DIV5] = &a1_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &a1_fclk_div7.hw,
+ [CLKID_HIFI_PLL] = &a1_hifi_pll.hw,
};
-static const struct regmap_config a1_pll_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_HIFIPLL_STS,
-};
-
-static struct meson_clk_hw_data a1_pll_clks = {
- .hws = a1_pll_hw_clks,
- .num = ARRAY_SIZE(a1_pll_hw_clks),
+static const struct meson_clkc_data a1_pll_clkc_data = {
+ .hw_clks = {
+ .hws = a1_pll_hw_clks,
+ .num = ARRAY_SIZE(a1_pll_hw_clks),
+ },
};
-static int meson_a1_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- void __iomem *base;
- struct regmap *map;
- int clkid, err;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- map = devm_regmap_init_mmio(dev, base, &a1_pll_regmap_cfg);
- if (IS_ERR(map))
- return dev_err_probe(dev, PTR_ERR(map),
- "can't init regmap mmio region\n");
-
- /* Register clocks */
- for (clkid = 0; clkid < a1_pll_clks.num; clkid++) {
- err = devm_clk_hw_register(dev, a1_pll_clks.hws[clkid]);
- if (err)
- return dev_err_probe(dev, err,
- "clock[%d] registration failed\n",
- clkid);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &a1_pll_clks);
-}
-
static const struct of_device_id a1_pll_clkc_match_table[] = {
- { .compatible = "amlogic,a1-pll-clkc", },
+ {
+ .compatible = "amlogic,a1-pll-clkc",
+ .data = &a1_pll_clkc_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, a1_pll_clkc_match_table);
static struct platform_driver a1_pll_clkc_driver = {
- .probe = meson_a1_pll_probe,
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "a1-pll-clkc",
.of_match_table = a1_pll_clkc_match_table,
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index cd5d0b5ebdb2..902fbd34039c 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -34,32 +34,21 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-#define AXG_AO_GATE(_name, _bit) \
-static struct clk_regmap axg_aoclk_##_name = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = (AO_RTI_GEN_CNTL_REG0), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "axg_ao_" #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static const struct clk_parent_data axg_ao_pclk_parents = { .fw_name = "mpeg-clk" };
-AXG_AO_GATE(remote, 0);
-AXG_AO_GATE(i2c_master, 1);
-AXG_AO_GATE(i2c_slave, 2);
-AXG_AO_GATE(uart1, 3);
-AXG_AO_GATE(uart2, 5);
-AXG_AO_GATE(ir_blaster, 6);
-AXG_AO_GATE(saradc, 7);
+#define AXG_AO_GATE(_name, _bit, _flags) \
+ MESON_PCLK(axg_ao_##_name, AO_RTI_GEN_CNTL_REG0, _bit, \
+ &axg_ao_pclk_parents, _flags)
-static struct clk_regmap axg_aoclk_cts_oscin = {
+static AXG_AO_GATE(remote, 0, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(i2c_master, 1, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(i2c_slave, 2, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(uart1, 3, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(uart2, 5, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(ir_blaster, 6, CLK_IGNORE_UNUSED);
+static AXG_AO_GATE(saradc, 7, CLK_IGNORE_UNUSED);
+
+static struct clk_regmap axg_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 14,
@@ -74,7 +63,7 @@ static struct clk_regmap axg_aoclk_cts_oscin = {
},
};
-static struct clk_regmap axg_aoclk_32k_pre = {
+static struct clk_regmap axg_ao_32k_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
@@ -83,7 +72,7 @@ static struct clk_regmap axg_aoclk_32k_pre = {
.name = "axg_ao_32k_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_cts_oscin.hw
+ &axg_ao_cts_oscin.hw
},
.num_parents = 1,
},
@@ -99,7 +88,7 @@ static const struct meson_clk_dualdiv_param axg_32k_div_table[] = {
}, {}
};
-static struct clk_regmap axg_aoclk_32k_div = {
+static struct clk_regmap axg_ao_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -132,13 +121,13 @@ static struct clk_regmap axg_aoclk_32k_div = {
.name = "axg_ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_pre.hw
+ &axg_ao_32k_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap axg_aoclk_32k_sel = {
+static struct clk_regmap axg_ao_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -149,15 +138,15 @@ static struct clk_regmap axg_aoclk_32k_sel = {
.name = "axg_ao_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_div.hw,
- &axg_aoclk_32k_pre.hw,
+ &axg_ao_32k_div.hw,
+ &axg_ao_32k_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_32k = {
+static struct clk_regmap axg_ao_32k = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
@@ -166,14 +155,14 @@ static struct clk_regmap axg_aoclk_32k = {
.name = "axg_ao_32k",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_32k_sel.hw
+ &axg_ao_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
+static struct clk_regmap axg_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -184,7 +173,7 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
.name = "axg_ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &axg_aoclk_32k.hw },
+ { .hw = &axg_ao_32k.hw },
{ .fw_name = "ext_32k-0", },
},
.num_parents = 2,
@@ -192,7 +181,7 @@ static struct clk_regmap axg_aoclk_cts_rtc_oscin = {
},
};
-static struct clk_regmap axg_aoclk_clk81 = {
+static struct clk_regmap axg_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -200,68 +189,74 @@ static struct clk_regmap axg_aoclk_clk81 = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
+ /*
+ * NOTE: this is one of the infamous clock the pwm driver
+ * can request directly by its global name. It's wrong but
+ * there is not much we can do about it until the support
+ * for the old pwm bindings is dropped
+ */
.name = "axg_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &axg_aoclk_cts_rtc_oscin.hw },
+ { .hw = &axg_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_saradc_mux = {
+static struct clk_regmap axg_ao_saradc_mux = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_SAR_CLK,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_mux",
+ .name = "ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &axg_aoclk_clk81.hw },
+ { .hw = &axg_ao_clk81.hw },
},
.num_parents = 2,
},
};
-static struct clk_regmap axg_aoclk_saradc_div = {
+static struct clk_regmap axg_ao_saradc_div = {
.data = &(struct clk_regmap_div_data) {
.offset = AO_SAR_CLK,
.shift = 0,
.width = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_div",
+ .name = "ao_saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_saradc_mux.hw
+ &axg_ao_saradc_mux.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap axg_aoclk_saradc_gate = {
+static struct clk_regmap axg_ao_saradc_gate = {
.data = &(struct clk_regmap_gate_data) {
.offset = AO_SAR_CLK,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "axg_ao_saradc_gate",
+ .name = "ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_aoclk_saradc_div.hw
+ &axg_ao_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int axg_aoclk_reset[] = {
+static const unsigned int axg_ao_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -270,53 +265,55 @@ static const unsigned int axg_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_hw *axg_aoclk_hw_clks[] = {
- [CLKID_AO_REMOTE] = &axg_aoclk_remote.hw,
- [CLKID_AO_I2C_MASTER] = &axg_aoclk_i2c_master.hw,
- [CLKID_AO_I2C_SLAVE] = &axg_aoclk_i2c_slave.hw,
- [CLKID_AO_UART1] = &axg_aoclk_uart1.hw,
- [CLKID_AO_UART2] = &axg_aoclk_uart2.hw,
- [CLKID_AO_IR_BLASTER] = &axg_aoclk_ir_blaster.hw,
- [CLKID_AO_SAR_ADC] = &axg_aoclk_saradc.hw,
- [CLKID_AO_CLK81] = &axg_aoclk_clk81.hw,
- [CLKID_AO_SAR_ADC_SEL] = &axg_aoclk_saradc_mux.hw,
- [CLKID_AO_SAR_ADC_DIV] = &axg_aoclk_saradc_div.hw,
- [CLKID_AO_SAR_ADC_CLK] = &axg_aoclk_saradc_gate.hw,
- [CLKID_AO_CTS_OSCIN] = &axg_aoclk_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &axg_aoclk_32k_pre.hw,
- [CLKID_AO_32K_DIV] = &axg_aoclk_32k_div.hw,
- [CLKID_AO_32K_SEL] = &axg_aoclk_32k_sel.hw,
- [CLKID_AO_32K] = &axg_aoclk_32k.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &axg_aoclk_cts_rtc_oscin.hw,
+static struct clk_hw *axg_ao_hw_clks[] = {
+ [CLKID_AO_REMOTE] = &axg_ao_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &axg_ao_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &axg_ao_i2c_slave.hw,
+ [CLKID_AO_UART1] = &axg_ao_uart1.hw,
+ [CLKID_AO_UART2] = &axg_ao_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &axg_ao_ir_blaster.hw,
+ [CLKID_AO_SAR_ADC] = &axg_ao_saradc.hw,
+ [CLKID_AO_CLK81] = &axg_ao_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &axg_ao_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &axg_ao_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &axg_ao_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &axg_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &axg_ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &axg_ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &axg_ao_32k_sel.hw,
+ [CLKID_AO_32K] = &axg_ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &axg_ao_cts_rtc_oscin.hw,
};
-static const struct meson_aoclk_data axg_aoclkc_data = {
+static const struct meson_aoclk_data axg_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(axg_aoclk_reset),
- .reset = axg_aoclk_reset,
- .hw_clks = {
- .hws = axg_aoclk_hw_clks,
- .num = ARRAY_SIZE(axg_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(axg_ao_reset),
+ .reset = axg_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = axg_ao_hw_clks,
+ .num = ARRAY_SIZE(axg_ao_hw_clks),
+ },
},
};
-static const struct of_device_id axg_aoclkc_match_table[] = {
+static const struct of_device_id axg_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-axg-aoclkc",
- .data = &axg_aoclkc_data,
+ .data = &axg_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, axg_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, axg_ao_clkc_match_table);
-static struct platform_driver axg_aoclkc_driver = {
+static struct platform_driver axg_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
- .name = "axg-aoclkc",
- .of_match_table = axg_aoclkc_match_table,
+ .name = "axg-ao-clkc",
+ .of_match_table = axg_ao_clkc_match_table,
},
};
-module_platform_driver(axg_aoclkc_driver);
+module_platform_driver(axg_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 208833c3ee95..0a25c649ef1d 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -18,7 +18,7 @@
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include <dt-bindings/clock/axg-clkc.h>
@@ -333,7 +333,7 @@ static struct clk_regmap axg_gp0_pll = {
},
};
-static const struct reg_sequence axg_hifi_init_regs[] = {
+static const struct reg_sequence axg_hifi_pll_init_regs[] = {
{ .reg = HHI_HIFI_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_HIFI_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_HIFI_PLL_CNTL3, .def = 0x0a6a3a88 },
@@ -374,8 +374,8 @@ static struct clk_regmap axg_hifi_pll_dco = {
.width = 1,
},
.table = axg_gp0_pll_params_table,
- .init_regs = axg_hifi_init_regs,
- .init_count = ARRAY_SIZE(axg_hifi_init_regs),
+ .init_regs = axg_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(axg_hifi_pll_init_regs),
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
@@ -780,7 +780,7 @@ static const struct pll_params_table axg_pcie_pll_params_table[] = {
{ /* sentinel */ },
};
-static const struct reg_sequence axg_pcie_init_regs[] = {
+static const struct reg_sequence axg_pcie_pll_init_regs[] = {
{ .reg = HHI_PCIE_PLL_CNTL1, .def = 0x0084a2aa },
{ .reg = HHI_PCIE_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_PCIE_PLL_CNTL3, .def = 0x0a47488e },
@@ -823,8 +823,8 @@ static struct clk_regmap axg_pcie_pll_dco = {
.width = 1,
},
.table = axg_pcie_pll_params_table,
- .init_regs = axg_pcie_init_regs,
- .init_count = ARRAY_SIZE(axg_pcie_init_regs),
+ .init_regs = axg_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(axg_pcie_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "pcie_pll_dco",
@@ -935,8 +935,9 @@ static struct clk_regmap axg_pcie_cml_en1 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div7.hw },
{ .hw = &axg_mpll1.hw },
@@ -946,32 +947,32 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &axg_fclk_div5.hw },
};
-static struct clk_regmap axg_mpeg_clk_sel = {
+static struct clk_regmap axg_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = clk81_parents,
+ .num_parents = ARRAY_SIZE(clk81_parents),
},
};
-static struct clk_regmap axg_mpeg_clk_div = {
+static struct clk_regmap axg_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_mpeg_clk_sel.hw
+ &axg_clk81_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -987,14 +988,14 @@ static struct clk_regmap axg_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &axg_mpeg_clk_div.hw
+ &axg_clk81_div.hw
},
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const struct clk_parent_data axg_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data axg_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div2.hw },
{ .hw = &axg_fclk_div3.hw },
@@ -1018,8 +1019,8 @@ static struct clk_regmap axg_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
+ .parent_data = axg_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1068,8 +1069,8 @@ static struct clk_regmap axg_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parent_data),
+ .parent_data = axg_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(axg_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1110,7 +1111,7 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const struct clk_hw *axg_vpu_parent_hws[] = {
+static const struct clk_hw *axg_vpu_parents[] = {
&axg_fclk_div4.hw,
&axg_fclk_div3.hw,
&axg_fclk_div5.hw,
@@ -1126,8 +1127,8 @@ static struct clk_regmap axg_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
/* We need a specific parent for VPU clock source, let it be set in DT */
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1175,8 +1176,8 @@ static struct clk_regmap axg_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
/* We need a specific parent for VPU clock source, let it be set in DT */
.flags = CLK_SET_RATE_NO_REPARENT,
},
@@ -1244,8 +1245,8 @@ static struct clk_regmap axg_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1292,8 +1293,8 @@ static struct clk_regmap axg_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .parent_hws = axg_vpu_parents,
+ .num_parents = ARRAY_SIZE(axg_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1365,7 +1366,7 @@ static struct clk_regmap axg_vapb = {
/* Video Clocks */
-static const struct clk_hw *axg_vclk_parent_hws[] = {
+static const struct clk_hw *axg_vclk_parents[] = {
&axg_gp0_pll.hw,
&axg_fclk_div4.hw,
&axg_fclk_div3.hw,
@@ -1384,8 +1385,8 @@ static struct clk_regmap axg_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .parent_hws = axg_vclk_parents,
+ .num_parents = ARRAY_SIZE(axg_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1399,8 +1400,8 @@ static struct clk_regmap axg_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .parent_hws = axg_vclk_parents,
+ .num_parents = ARRAY_SIZE(axg_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1739,8 +1740,8 @@ static struct clk_fixed_factor axg_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *axg_cts_parent_hws[] = {
+static u32 axg_cts_encl_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *axg_cts_encl_parents[] = {
&axg_vclk_div1.hw,
&axg_vclk_div2.hw,
&axg_vclk_div4.hw,
@@ -1758,13 +1759,13 @@ static struct clk_regmap axg_cts_encl_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 12,
- .table = mux_table_cts_sel,
+ .table = axg_cts_encl_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = axg_cts_parent_hws,
- .num_parents = ARRAY_SIZE(axg_cts_parent_hws),
+ .parent_hws = axg_cts_encl_parents,
+ .num_parents = ARRAY_SIZE(axg_cts_encl_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1787,8 +1788,8 @@ static struct clk_regmap axg_cts_encl = {
/* MIPI DSI Host Clock */
-static u32 mux_table_axg_vdin_meas[] = { 0, 1, 2, 3, 6, 7 };
-static const struct clk_parent_data axg_vdin_meas_parent_data[] = {
+static u32 axg_vdin_meas_parents_val_table[] = { 0, 1, 2, 3, 6, 7 };
+static const struct clk_parent_data axg_vdin_meas_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_fclk_div4.hw },
{ .hw = &axg_fclk_div3.hw },
@@ -1803,13 +1804,13 @@ static struct clk_regmap axg_vdin_meas_sel = {
.mask = 0x7,
.shift = 21,
.flags = CLK_MUX_ROUND_CLOSEST,
- .table = mux_table_axg_vdin_meas,
+ .table = axg_vdin_meas_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "vdin_meas_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = axg_vdin_meas_parent_data,
- .num_parents = ARRAY_SIZE(axg_vdin_meas_parent_data),
+ .parent_data = axg_vdin_meas_parents,
+ .num_parents = ARRAY_SIZE(axg_vdin_meas_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1845,9 +1846,8 @@ static struct clk_regmap axg_vdin_meas = {
},
};
-static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
- 9, 10, 11, 13, 14, };
-static const struct clk_parent_data gen_clk_parent_data[] = {
+static u32 gen_clk_parents_val_table[] = { 0, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, };
+static const struct clk_parent_data gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &axg_hifi_pll.hw },
{ .hw = &axg_mpll0.hw },
@@ -1866,7 +1866,7 @@ static struct clk_regmap axg_gen_clk_sel = {
.offset = HHI_GEN_CLK_CNTL,
.mask = 0xf,
.shift = 12,
- .table = mux_table_gen_clk,
+ .table = gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
@@ -1877,8 +1877,8 @@ static struct clk_regmap axg_gen_clk_sel = {
* hifi_pll, mpll0, mpll1, mpll2, mpll3, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_data = gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(gen_clk_parent_data),
+ .parent_data = gen_clk_parents,
+ .num_parents = ARRAY_SIZE(gen_clk_parents),
},
};
@@ -1915,59 +1915,71 @@ static struct clk_regmap axg_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &axg_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(axg_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(axg_audio_locker, HHI_GCLK_MPEG0, 2);
-static MESON_GATE(axg_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
-static MESON_GATE(axg_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(axg_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(axg_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(axg_spicc_0, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(axg_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(axg_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(axg_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(axg_mipi_dsi_phy, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(axg_spicc_1, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(axg_pcie_a, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(axg_pcie_b, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(axg_hiu_reg, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(axg_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(axg_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(axg_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(axg_dma, HHI_GCLK_MPEG0, 27);
-static MESON_GATE(axg_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(axg_audio, HHI_GCLK_MPEG1, 0);
-static MESON_GATE(axg_eth_core, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(axg_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(axg_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(axg_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(axg_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(axg_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(axg_usb_general, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(axg_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(axg_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(axg_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(axg_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(axg_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(axg_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(axg_usb0_to_ddr, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(axg_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(axg_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(axg_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(axg_gic, HHI_GCLK_MPEG2, 30);
+static const struct clk_parent_data axg_pclk_parents = { .hw = &axg_clk81.hw };
+
+#define AXG_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(axg_##_name, _reg, _bit, &axg_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static AXG_PCLK(ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(audio_locker, HHI_GCLK_MPEG0, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mipi_dsi_host, HHI_GCLK_MPEG0, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static AXG_PCLK(periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spicc_0, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static AXG_PCLK(i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static AXG_PCLK(rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static AXG_PCLK(uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mipi_dsi_phy, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spicc_1, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pcie_a, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static AXG_PCLK(pcie_b, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static AXG_PCLK(hiu_reg, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static AXG_PCLK(assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static AXG_PCLK(emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static AXG_PCLK(emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(dma, HHI_GCLK_MPEG0, 27, CLK_IGNORE_UNUSED);
+static AXG_PCLK(spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static AXG_PCLK(audio, HHI_GCLK_MPEG1, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(eth_core, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static AXG_PCLK(g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static AXG_PCLK(reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb_general, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static AXG_PCLK(efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static AXG_PCLK(boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static AXG_PCLK(ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb1_to_ddr, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static AXG_PCLK(usb0_to_ddr, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static AXG_PCLK(mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static AXG_PCLK(vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static AXG_PCLK(sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static AXG_PCLK(gic, HHI_GCLK_MPEG2, 30, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(axg_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(axg_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(axg_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(axg_ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(axg_ao_i2c, HHI_GCLK_AO, 4);
+static AXG_PCLK(ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
+static AXG_PCLK(ao_i2c, HHI_GCLK_AO, 4, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
@@ -1980,8 +1992,8 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_FCLK_DIV5] = &axg_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &axg_fclk_div7.hw,
[CLKID_GP0_PLL] = &axg_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &axg_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &axg_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &axg_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &axg_clk81_div.hw,
[CLKID_CLK81] = &axg_clk81.hw,
[CLKID_MPLL0] = &axg_mpll0.hw,
[CLKID_MPLL1] = &axg_mpll1.hw,
@@ -2110,28 +2122,27 @@ static struct clk_hw *axg_hw_clks[] = {
[CLKID_VDIN_MEAS] = &axg_vdin_meas.hw,
};
-static const struct meson_eeclkc_data axg_clkc_data = {
+static const struct meson_clkc_data axg_clkc_data = {
.hw_clks = {
.hws = axg_hw_clks,
.num = ARRAY_SIZE(axg_hw_clks),
},
};
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id axg_clkc_match_table[] = {
{ .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, axg_clkc_match_table);
-static struct platform_driver axg_driver = {
- .probe = meson_eeclkc_probe,
+static struct platform_driver axg_clkc_driver = {
+ .probe = meson_clkc_syscon_probe,
.driver = {
.name = "axg-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = axg_clkc_match_table,
},
};
-module_platform_driver(axg_driver);
+module_platform_driver(axg_clkc_driver);
MODULE_DESCRIPTION("Amlogic AXG Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/c3-peripherals.c b/drivers/clk/meson/c3-peripherals.c
index a25e7d5dc669..b158756cfee4 100644
--- a/drivers/clk/meson/c3-peripherals.c
+++ b/drivers/clk/meson/c3-peripherals.c
@@ -48,7 +48,16 @@
#define SPIFC_CLK_CTRL 0x1a0
#define NNA_CLK_CTRL 0x220
-static struct clk_regmap rtc_xtal_clkin = {
+#define C3_COMP_SEL(_name, _reg, _shift, _mask, _pdata) \
+ MESON_COMP_SEL(c3_, _name, _reg, _shift, _mask, _pdata, NULL, 0, 0)
+
+#define C3_COMP_DIV(_name, _reg, _shift, _width) \
+ MESON_COMP_DIV(c3_, _name, _reg, _shift, _width, 0, CLK_SET_RATE_PARENT)
+
+#define C3_COMP_GATE(_name, _reg, _bit) \
+ MESON_COMP_GATE(c3_, _name, _reg, _bit, CLK_SET_RATE_PARENT)
+
+static struct clk_regmap c3_rtc_xtal_clkin = {
.data = &(struct clk_regmap_gate_data) {
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 31,
@@ -63,12 +72,12 @@ static struct clk_regmap rtc_xtal_clkin = {
},
};
-static const struct meson_clk_dualdiv_param rtc_32k_div_table[] = {
+static const struct meson_clk_dualdiv_param c3_rtc_32k_div_table[] = {
{ 733, 732, 8, 11, 1 },
{ /* sentinel */ }
};
-static struct clk_regmap rtc_32k_div = {
+static struct clk_regmap c3_rtc_32k_div = {
.data = &(struct meson_clk_dualdiv_data) {
.n1 = {
.reg_off = RTC_BY_OSCIN_CTRL0,
@@ -95,39 +104,39 @@ static struct clk_regmap rtc_32k_div = {
.shift = 28,
.width = 1,
},
- .table = rtc_32k_div_table,
+ .table = c3_rtc_32k_div_table,
},
.hw.init = &(struct clk_init_data) {
.name = "rtc_32k_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_xtal_clkin.hw
+ &c3_rtc_xtal_clkin.hw
},
.num_parents = 1,
},
};
-static const struct clk_parent_data rtc_32k_mux_parent_data[] = {
- { .hw = &rtc_32k_div.hw },
- { .hw = &rtc_xtal_clkin.hw }
+static const struct clk_parent_data c3_rtc_32k_parents[] = {
+ { .hw = &c3_rtc_32k_div.hw },
+ { .hw = &c3_rtc_xtal_clkin.hw }
};
-static struct clk_regmap rtc_32k_mux = {
+static struct clk_regmap c3_rtc_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_BY_OSCIN_CTRL1,
.mask = 0x1,
.shift = 24,
},
.hw.init = &(struct clk_init_data) {
- .name = "rtc_32k_mux",
+ .name = "rtc_32k_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = rtc_32k_mux_parent_data,
- .num_parents = ARRAY_SIZE(rtc_32k_mux_parent_data),
+ .parent_data = c3_rtc_32k_parents,
+ .num_parents = ARRAY_SIZE(c3_rtc_32k_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap rtc_32k = {
+static struct clk_regmap c3_rtc_32k = {
.data = &(struct clk_regmap_gate_data) {
.offset = RTC_BY_OSCIN_CTRL0,
.bit_idx = 30,
@@ -136,20 +145,20 @@ static struct clk_regmap rtc_32k = {
.name = "rtc_32k",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &rtc_32k_mux.hw
+ &c3_rtc_32k_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data rtc_clk_mux_parent_data[] = {
+static const struct clk_parent_data c3_rtc_clk_parents[] = {
{ .fw_name = "oscin" },
- { .hw = &rtc_32k.hw },
+ { .hw = &c3_rtc_32k.hw },
{ .fw_name = "pad_osc" }
};
-static struct clk_regmap rtc_clk = {
+static struct clk_regmap c3_rtc_clk = {
.data = &(struct clk_regmap_mux_data) {
.offset = RTC_CTRL,
.mask = 0x3,
@@ -158,62 +167,45 @@ static struct clk_regmap rtc_clk = {
.hw.init = &(struct clk_init_data) {
.name = "rtc_clk",
.ops = &clk_regmap_mux_ops,
- .parent_data = rtc_clk_mux_parent_data,
- .num_parents = ARRAY_SIZE(rtc_clk_mux_parent_data),
+ .parent_data = c3_rtc_clk_parents,
+ .num_parents = ARRAY_SIZE(c3_rtc_clk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-#define C3_CLK_GATE(_name, _reg, _bit, _fw_name, _ops, _flags) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = _ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = #_fw_name, \
- }, \
- .num_parents = 1, \
- .flags = (_flags), \
- }, \
-}
-
-#define C3_SYS_GATE(_name, _reg, _bit, _flags) \
- C3_CLK_GATE(_name, _reg, _bit, sysclk, \
- &clk_regmap_gate_ops, _flags)
-
-#define C3_SYS_GATE_RO(_name, _reg, _bit) \
- C3_CLK_GATE(_name, _reg, _bit, sysclk, \
- &clk_regmap_gate_ro_ops, 0)
-
-static C3_SYS_GATE(sys_reset_ctrl, SYS_CLK_EN0_REG0, 1, 0);
-static C3_SYS_GATE(sys_pwr_ctrl, SYS_CLK_EN0_REG0, 3, 0);
-static C3_SYS_GATE(sys_pad_ctrl, SYS_CLK_EN0_REG0, 4, 0);
-static C3_SYS_GATE(sys_ctrl, SYS_CLK_EN0_REG0, 5, 0);
-static C3_SYS_GATE(sys_ts_pll, SYS_CLK_EN0_REG0, 6, 0);
+static const struct clk_parent_data c3_sys_pclk_parents = { .fw_name = "sysclk" };
+
+#define C3_SYS_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(c3_##_name, _reg, _bit, &c3_sys_pclk_parents, _flags)
+
+#define C3_SYS_PCLK_RO(_name, _reg, _bit) \
+ MESON_PCLK_RO(c3_##_name, _reg, _bit, &c3_sys_pclk_parents, 0)
+
+static C3_SYS_PCLK(sys_reset_ctrl, SYS_CLK_EN0_REG0, 1, 0);
+static C3_SYS_PCLK(sys_pwr_ctrl, SYS_CLK_EN0_REG0, 3, 0);
+static C3_SYS_PCLK(sys_pad_ctrl, SYS_CLK_EN0_REG0, 4, 0);
+static C3_SYS_PCLK(sys_ctrl, SYS_CLK_EN0_REG0, 5, 0);
+static C3_SYS_PCLK(sys_ts_pll, SYS_CLK_EN0_REG0, 6, 0);
/*
* NOTE: sys_dev_arb provides the clock to the ETH and SPICC arbiters that
* access the AXI bus.
*/
-static C3_SYS_GATE(sys_dev_arb, SYS_CLK_EN0_REG0, 7, 0);
+static C3_SYS_PCLK(sys_dev_arb, SYS_CLK_EN0_REG0, 7, 0);
/*
* FIXME: sys_mmc_pclk provides the clock for the DDR PHY, DDR will only be
* initialized in bl2, and this clock should not be touched in linux.
*/
-static C3_SYS_GATE_RO(sys_mmc_pclk, SYS_CLK_EN0_REG0, 8);
+static C3_SYS_PCLK_RO(sys_mmc_pclk, SYS_CLK_EN0_REG0, 8);
/*
* NOTE: sys_cpu_ctrl provides the clock for CPU controller. After clock is
* disabled, cpu_clk and other key CPU-related configurations cannot take effect.
*/
-static C3_SYS_GATE(sys_cpu_ctrl, SYS_CLK_EN0_REG0, 11, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_jtag_ctrl, SYS_CLK_EN0_REG0, 12, 0);
-static C3_SYS_GATE(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
+static C3_SYS_PCLK(sys_cpu_ctrl, SYS_CLK_EN0_REG0, 11, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_jtag_ctrl, SYS_CLK_EN0_REG0, 12, 0);
+static C3_SYS_PCLK(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
/*
* NOTE: sys_irq_ctrl provides the clock for IRQ controller. The IRQ controller
@@ -221,18 +213,18 @@ static C3_SYS_GATE(sys_ir_ctrl, SYS_CLK_EN0_REG0, 13, 0);
* AOCPU. If the clock is disabled, interrupt-related functions will occurs an
* exception.
*/
-static C3_SYS_GATE(sys_irq_ctrl, SYS_CLK_EN0_REG0, 14, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_msr_clk, SYS_CLK_EN0_REG0, 15, 0);
-static C3_SYS_GATE(sys_rom, SYS_CLK_EN0_REG0, 16, 0);
-static C3_SYS_GATE(sys_uart_f, SYS_CLK_EN0_REG0, 17, 0);
-static C3_SYS_GATE(sys_cpu_apb, SYS_CLK_EN0_REG0, 18, 0);
-static C3_SYS_GATE(sys_rsa, SYS_CLK_EN0_REG0, 19, 0);
-static C3_SYS_GATE(sys_sar_adc, SYS_CLK_EN0_REG0, 20, 0);
-static C3_SYS_GATE(sys_startup, SYS_CLK_EN0_REG0, 21, 0);
-static C3_SYS_GATE(sys_secure, SYS_CLK_EN0_REG0, 22, 0);
-static C3_SYS_GATE(sys_spifc, SYS_CLK_EN0_REG0, 23, 0);
-static C3_SYS_GATE(sys_nna, SYS_CLK_EN0_REG0, 25, 0);
-static C3_SYS_GATE(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
+static C3_SYS_PCLK(sys_irq_ctrl, SYS_CLK_EN0_REG0, 14, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_msr_clk, SYS_CLK_EN0_REG0, 15, 0);
+static C3_SYS_PCLK(sys_rom, SYS_CLK_EN0_REG0, 16, 0);
+static C3_SYS_PCLK(sys_uart_f, SYS_CLK_EN0_REG0, 17, 0);
+static C3_SYS_PCLK(sys_cpu_apb, SYS_CLK_EN0_REG0, 18, 0);
+static C3_SYS_PCLK(sys_rsa, SYS_CLK_EN0_REG0, 19, 0);
+static C3_SYS_PCLK(sys_sar_adc, SYS_CLK_EN0_REG0, 20, 0);
+static C3_SYS_PCLK(sys_startup, SYS_CLK_EN0_REG0, 21, 0);
+static C3_SYS_PCLK(sys_secure, SYS_CLK_EN0_REG0, 22, 0);
+static C3_SYS_PCLK(sys_spifc, SYS_CLK_EN0_REG0, 23, 0);
+static C3_SYS_PCLK(sys_nna, SYS_CLK_EN0_REG0, 25, 0);
+static C3_SYS_PCLK(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
/*
* FIXME: sys_gic provides the clock for GIC(Generic Interrupt Controller).
@@ -240,8 +232,8 @@ static C3_SYS_GATE(sys_eth_mac, SYS_CLK_EN0_REG0, 26, 0);
* used by our GIC is the public driver in kernel, and there is no management
* clock in the driver.
*/
-static C3_SYS_GATE(sys_gic, SYS_CLK_EN0_REG0, 27, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
+static C3_SYS_PCLK(sys_gic, SYS_CLK_EN0_REG0, 27, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
/*
* NOTE: sys_big_nic provides the clock to the control bus of the NIC(Network
@@ -249,84 +241,85 @@ static C3_SYS_GATE(sys_rama, SYS_CLK_EN0_REG0, 28, 0);
* SPIFC, CAPU, JTAG, EMMC, SDIO, sec_top, USB, Audio, ETH, SPICC) in the
* system. After clock is disabled, The NIC cannot work.
*/
-static C3_SYS_GATE(sys_big_nic, SYS_CLK_EN0_REG0, 29, CLK_IS_CRITICAL);
-static C3_SYS_GATE(sys_ramb, SYS_CLK_EN0_REG0, 30, 0);
-static C3_SYS_GATE(sys_audio_pclk, SYS_CLK_EN0_REG0, 31, 0);
-static C3_SYS_GATE(sys_pwm_kl, SYS_CLK_EN0_REG1, 0, 0);
-static C3_SYS_GATE(sys_pwm_ij, SYS_CLK_EN0_REG1, 1, 0);
-static C3_SYS_GATE(sys_usb, SYS_CLK_EN0_REG1, 2, 0);
-static C3_SYS_GATE(sys_sd_emmc_a, SYS_CLK_EN0_REG1, 3, 0);
-static C3_SYS_GATE(sys_sd_emmc_c, SYS_CLK_EN0_REG1, 4, 0);
-static C3_SYS_GATE(sys_pwm_ab, SYS_CLK_EN0_REG1, 5, 0);
-static C3_SYS_GATE(sys_pwm_cd, SYS_CLK_EN0_REG1, 6, 0);
-static C3_SYS_GATE(sys_pwm_ef, SYS_CLK_EN0_REG1, 7, 0);
-static C3_SYS_GATE(sys_pwm_gh, SYS_CLK_EN0_REG1, 8, 0);
-static C3_SYS_GATE(sys_spicc_1, SYS_CLK_EN0_REG1, 9, 0);
-static C3_SYS_GATE(sys_spicc_0, SYS_CLK_EN0_REG1, 10, 0);
-static C3_SYS_GATE(sys_uart_a, SYS_CLK_EN0_REG1, 11, 0);
-static C3_SYS_GATE(sys_uart_b, SYS_CLK_EN0_REG1, 12, 0);
-static C3_SYS_GATE(sys_uart_c, SYS_CLK_EN0_REG1, 13, 0);
-static C3_SYS_GATE(sys_uart_d, SYS_CLK_EN0_REG1, 14, 0);
-static C3_SYS_GATE(sys_uart_e, SYS_CLK_EN0_REG1, 15, 0);
-static C3_SYS_GATE(sys_i2c_m_a, SYS_CLK_EN0_REG1, 16, 0);
-static C3_SYS_GATE(sys_i2c_m_b, SYS_CLK_EN0_REG1, 17, 0);
-static C3_SYS_GATE(sys_i2c_m_c, SYS_CLK_EN0_REG1, 18, 0);
-static C3_SYS_GATE(sys_i2c_m_d, SYS_CLK_EN0_REG1, 19, 0);
-static C3_SYS_GATE(sys_i2c_s_a, SYS_CLK_EN0_REG1, 20, 0);
-static C3_SYS_GATE(sys_rtc, SYS_CLK_EN0_REG1, 21, 0);
-static C3_SYS_GATE(sys_ge2d, SYS_CLK_EN0_REG1, 22, 0);
-static C3_SYS_GATE(sys_isp, SYS_CLK_EN0_REG1, 23, 0);
-static C3_SYS_GATE(sys_gpv_isp_nic, SYS_CLK_EN0_REG1, 24, 0);
-static C3_SYS_GATE(sys_gpv_cve_nic, SYS_CLK_EN0_REG1, 25, 0);
-static C3_SYS_GATE(sys_mipi_dsi_host, SYS_CLK_EN0_REG1, 26, 0);
-static C3_SYS_GATE(sys_mipi_dsi_phy, SYS_CLK_EN0_REG1, 27, 0);
-static C3_SYS_GATE(sys_eth_phy, SYS_CLK_EN0_REG1, 28, 0);
-static C3_SYS_GATE(sys_acodec, SYS_CLK_EN0_REG1, 29, 0);
-static C3_SYS_GATE(sys_dwap, SYS_CLK_EN0_REG1, 30, 0);
-static C3_SYS_GATE(sys_dos, SYS_CLK_EN0_REG1, 31, 0);
-static C3_SYS_GATE(sys_cve, SYS_CLK_EN0_REG2, 0, 0);
-static C3_SYS_GATE(sys_vout, SYS_CLK_EN0_REG2, 1, 0);
-static C3_SYS_GATE(sys_vc9000e, SYS_CLK_EN0_REG2, 2, 0);
-static C3_SYS_GATE(sys_pwm_mn, SYS_CLK_EN0_REG2, 3, 0);
-static C3_SYS_GATE(sys_sd_emmc_b, SYS_CLK_EN0_REG2, 4, 0);
-
-#define C3_AXI_GATE(_name, _reg, _bit, _flags) \
- C3_CLK_GATE(_name, _reg, _bit, axiclk, \
- &clk_regmap_gate_ops, _flags)
+static C3_SYS_PCLK(sys_big_nic, SYS_CLK_EN0_REG0, 29, CLK_IS_CRITICAL);
+static C3_SYS_PCLK(sys_ramb, SYS_CLK_EN0_REG0, 30, 0);
+static C3_SYS_PCLK(sys_audio_pclk, SYS_CLK_EN0_REG0, 31, 0);
+static C3_SYS_PCLK(sys_pwm_kl, SYS_CLK_EN0_REG1, 0, 0);
+static C3_SYS_PCLK(sys_pwm_ij, SYS_CLK_EN0_REG1, 1, 0);
+static C3_SYS_PCLK(sys_usb, SYS_CLK_EN0_REG1, 2, 0);
+static C3_SYS_PCLK(sys_sd_emmc_a, SYS_CLK_EN0_REG1, 3, 0);
+static C3_SYS_PCLK(sys_sd_emmc_c, SYS_CLK_EN0_REG1, 4, 0);
+static C3_SYS_PCLK(sys_pwm_ab, SYS_CLK_EN0_REG1, 5, 0);
+static C3_SYS_PCLK(sys_pwm_cd, SYS_CLK_EN0_REG1, 6, 0);
+static C3_SYS_PCLK(sys_pwm_ef, SYS_CLK_EN0_REG1, 7, 0);
+static C3_SYS_PCLK(sys_pwm_gh, SYS_CLK_EN0_REG1, 8, 0);
+static C3_SYS_PCLK(sys_spicc_1, SYS_CLK_EN0_REG1, 9, 0);
+static C3_SYS_PCLK(sys_spicc_0, SYS_CLK_EN0_REG1, 10, 0);
+static C3_SYS_PCLK(sys_uart_a, SYS_CLK_EN0_REG1, 11, 0);
+static C3_SYS_PCLK(sys_uart_b, SYS_CLK_EN0_REG1, 12, 0);
+static C3_SYS_PCLK(sys_uart_c, SYS_CLK_EN0_REG1, 13, 0);
+static C3_SYS_PCLK(sys_uart_d, SYS_CLK_EN0_REG1, 14, 0);
+static C3_SYS_PCLK(sys_uart_e, SYS_CLK_EN0_REG1, 15, 0);
+static C3_SYS_PCLK(sys_i2c_m_a, SYS_CLK_EN0_REG1, 16, 0);
+static C3_SYS_PCLK(sys_i2c_m_b, SYS_CLK_EN0_REG1, 17, 0);
+static C3_SYS_PCLK(sys_i2c_m_c, SYS_CLK_EN0_REG1, 18, 0);
+static C3_SYS_PCLK(sys_i2c_m_d, SYS_CLK_EN0_REG1, 19, 0);
+static C3_SYS_PCLK(sys_i2c_s_a, SYS_CLK_EN0_REG1, 20, 0);
+static C3_SYS_PCLK(sys_rtc, SYS_CLK_EN0_REG1, 21, 0);
+static C3_SYS_PCLK(sys_ge2d, SYS_CLK_EN0_REG1, 22, 0);
+static C3_SYS_PCLK(sys_isp, SYS_CLK_EN0_REG1, 23, 0);
+static C3_SYS_PCLK(sys_gpv_isp_nic, SYS_CLK_EN0_REG1, 24, 0);
+static C3_SYS_PCLK(sys_gpv_cve_nic, SYS_CLK_EN0_REG1, 25, 0);
+static C3_SYS_PCLK(sys_mipi_dsi_host, SYS_CLK_EN0_REG1, 26, 0);
+static C3_SYS_PCLK(sys_mipi_dsi_phy, SYS_CLK_EN0_REG1, 27, 0);
+static C3_SYS_PCLK(sys_eth_phy, SYS_CLK_EN0_REG1, 28, 0);
+static C3_SYS_PCLK(sys_acodec, SYS_CLK_EN0_REG1, 29, 0);
+static C3_SYS_PCLK(sys_dwap, SYS_CLK_EN0_REG1, 30, 0);
+static C3_SYS_PCLK(sys_dos, SYS_CLK_EN0_REG1, 31, 0);
+static C3_SYS_PCLK(sys_cve, SYS_CLK_EN0_REG2, 0, 0);
+static C3_SYS_PCLK(sys_vout, SYS_CLK_EN0_REG2, 1, 0);
+static C3_SYS_PCLK(sys_vc9000e, SYS_CLK_EN0_REG2, 2, 0);
+static C3_SYS_PCLK(sys_pwm_mn, SYS_CLK_EN0_REG2, 3, 0);
+static C3_SYS_PCLK(sys_sd_emmc_b, SYS_CLK_EN0_REG2, 4, 0);
+
+static const struct clk_parent_data c3_axi_pclk_parents = { .fw_name = "axiclk" };
+
+#define C3_AXI_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(c3_##_name, _reg, _bit, &c3_axi_pclk_parents, _flags)
/*
* NOTE: axi_sys_nic provides the clock to the AXI bus of the system NIC. After
* clock is disabled, The NIC cannot work.
*/
-static C3_AXI_GATE(axi_sys_nic, AXI_CLK_EN0, 2, CLK_IS_CRITICAL);
-static C3_AXI_GATE(axi_isp_nic, AXI_CLK_EN0, 3, 0);
-static C3_AXI_GATE(axi_cve_nic, AXI_CLK_EN0, 4, 0);
-static C3_AXI_GATE(axi_ramb, AXI_CLK_EN0, 5, 0);
-static C3_AXI_GATE(axi_rama, AXI_CLK_EN0, 6, 0);
+static C3_AXI_PCLK(axi_sys_nic, AXI_CLK_EN0, 2, CLK_IS_CRITICAL);
+static C3_AXI_PCLK(axi_isp_nic, AXI_CLK_EN0, 3, 0);
+static C3_AXI_PCLK(axi_cve_nic, AXI_CLK_EN0, 4, 0);
+static C3_AXI_PCLK(axi_ramb, AXI_CLK_EN0, 5, 0);
+static C3_AXI_PCLK(axi_rama, AXI_CLK_EN0, 6, 0);
/*
* NOTE: axi_cpu_dmc provides the clock to the AXI bus where the CPU accesses
* the DDR. After clock is disabled, The CPU will not have access to the DDR.
*/
-static C3_AXI_GATE(axi_cpu_dmc, AXI_CLK_EN0, 7, CLK_IS_CRITICAL);
-static C3_AXI_GATE(axi_nic, AXI_CLK_EN0, 8, 0);
-static C3_AXI_GATE(axi_dma, AXI_CLK_EN0, 9, 0);
+static C3_AXI_PCLK(axi_cpu_dmc, AXI_CLK_EN0, 7, CLK_IS_CRITICAL);
+static C3_AXI_PCLK(axi_nic, AXI_CLK_EN0, 8, 0);
+static C3_AXI_PCLK(axi_dma, AXI_CLK_EN0, 9, 0);
/*
* NOTE: axi_mux_nic provides the clock to the NIC's AXI bus for NN(Neural
* Network) and other devices(CPU, EMMC, SDIO, sec_top, USB, Audio, ETH, SPICC)
* to access RAM space.
*/
-static C3_AXI_GATE(axi_mux_nic, AXI_CLK_EN0, 10, 0);
-static C3_AXI_GATE(axi_cve, AXI_CLK_EN0, 12, 0);
+static C3_AXI_PCLK(axi_mux_nic, AXI_CLK_EN0, 10, 0);
+static C3_AXI_PCLK(axi_cve, AXI_CLK_EN0, 12, 0);
/*
* NOTE: axi_dev1_dmc provides the clock for the peripherals(EMMC, SDIO,
* sec_top, USB, Audio, ETH, SPICC) to access the AXI bus of the DDR.
*/
-static C3_AXI_GATE(axi_dev1_dmc, AXI_CLK_EN0, 13, 0);
-static C3_AXI_GATE(axi_dev0_dmc, AXI_CLK_EN0, 14, 0);
-static C3_AXI_GATE(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
+static C3_AXI_PCLK(axi_dev1_dmc, AXI_CLK_EN0, 13, 0);
+static C3_AXI_PCLK(axi_dev0_dmc, AXI_CLK_EN0, 14, 0);
+static C3_AXI_PCLK(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
/*
* clk_12_24m model
@@ -335,7 +328,7 @@ static C3_AXI_GATE(axi_dsp_dmc, AXI_CLK_EN0, 15, 0);
* xtal---->| gate |---->| div |------------>| pad |
* |------| |-----| |-----|
*/
-static struct clk_regmap clk_12_24m_in = {
+static struct clk_regmap c3_clk_12_24m_in = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLK12_24_CTRL,
.bit_idx = 11,
@@ -350,7 +343,7 @@ static struct clk_regmap clk_12_24m_in = {
},
};
-static struct clk_regmap clk_12_24m = {
+static struct clk_regmap c3_clk_12_24m = {
.data = &(struct clk_regmap_div_data) {
.offset = CLK12_24_CTRL,
.shift = 10,
@@ -360,14 +353,14 @@ static struct clk_regmap clk_12_24m = {
.name = "clk_12_24m",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &clk_12_24m_in.hw
+ &c3_clk_12_24m_in.hw
},
.num_parents = 1,
},
};
/* Fix me: set value 0 will div by 2 like value 1 */
-static struct clk_regmap fclk_25m_div = {
+static struct clk_regmap c3_fclk_25m_div = {
.data = &(struct clk_regmap_div_data) {
.offset = CLK12_24_CTRL,
.shift = 0,
@@ -383,7 +376,7 @@ static struct clk_regmap fclk_25m_div = {
},
};
-static struct clk_regmap fclk_25m = {
+static struct clk_regmap c3_fclk_25m = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLK12_24_CTRL,
.bit_idx = 12,
@@ -392,7 +385,7 @@ static struct clk_regmap fclk_25m = {
.name = "fclk_25m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_25m_div.hw
+ &c3_fclk_25m_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -404,11 +397,10 @@ static struct clk_regmap fclk_25m = {
* is manged by clock measures module. Their hardware are out of clock tree.
* Channel 4 8 9 10 11 13 14 15 16 18 are not connected.
*/
-static u32 gen_parent_table[] = { 0, 1, 2, 5, 6, 7, 17, 19, 20, 21, 22, 23, 24};
-
-static const struct clk_parent_data gen_parent_data[] = {
+static u32 c3_gen_parents_val_table[] = { 0, 1, 2, 5, 6, 7, 17, 19, 20, 21, 22, 23, 24};
+static const struct clk_parent_data c3_gen_parents[] = {
{ .fw_name = "oscin" },
- { .hw = &rtc_clk.hw },
+ { .hw = &c3_rtc_clk.hw },
{ .fw_name = "sysplldiv16" },
{ .fw_name = "gp0" },
{ .fw_name = "gp1" },
@@ -422,22 +414,22 @@ static const struct clk_parent_data gen_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap gen_sel = {
+static struct clk_regmap c3_gen_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = GEN_CLK_CTRL,
.mask = 0x1f,
.shift = 12,
- .table = gen_parent_table,
+ .table = c3_gen_parents_val_table,
},
.hw.init = &(struct clk_init_data) {
.name = "gen_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gen_parent_data,
- .num_parents = ARRAY_SIZE(gen_parent_data),
+ .parent_data = c3_gen_parents,
+ .num_parents = ARRAY_SIZE(c3_gen_parents),
},
};
-static struct clk_regmap gen_div = {
+static struct clk_regmap c3_gen_div = {
.data = &(struct clk_regmap_div_data) {
.offset = GEN_CLK_CTRL,
.shift = 0,
@@ -447,14 +439,14 @@ static struct clk_regmap gen_div = {
.name = "gen_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_sel.hw
+ &c3_gen_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap gen = {
+static struct clk_regmap c3_gen = {
.data = &(struct clk_regmap_gate_data) {
.offset = GEN_CLK_CTRL,
.bit_idx = 11,
@@ -463,214 +455,86 @@ static struct clk_regmap gen = {
.name = "gen",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gen_div.hw
+ &c3_gen_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data saradc_parent_data[] = {
+static const struct clk_parent_data c3_saradc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "sysclk" }
};
-static struct clk_regmap saradc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SAR_CLK_CTRL0,
- .mask = 0x1,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = saradc_parent_data,
- .num_parents = ARRAY_SIZE(saradc_parent_data),
- },
-};
-
-static struct clk_regmap saradc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SAR_CLK_CTRL0,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &saradc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap saradc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SAR_CLK_CTRL0,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "saradc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &saradc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(saradc, SAR_CLK_CTRL0, 9, 0x1, c3_saradc_parents);
+static C3_COMP_DIV(saradc, SAR_CLK_CTRL0, 0, 8);
+static C3_COMP_GATE(saradc, SAR_CLK_CTRL0, 8);
-static const struct clk_parent_data pwm_parent_data[] = {
+static const struct clk_parent_data c3_pwm_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "gp1" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" }
};
-#define AML_PWM_CLK_MUX(_name, _reg, _shift) { \
- .data = &(struct clk_regmap_mux_data) { \
- .offset = _reg, \
- .mask = 0x3, \
- .shift = _shift, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_sel", \
- .ops = &clk_regmap_mux_ops, \
- .parent_data = pwm_parent_data, \
- .num_parents = ARRAY_SIZE(pwm_parent_data), \
- }, \
-}
-
-#define AML_PWM_CLK_DIV(_name, _reg, _shift) { \
- .data = &(struct clk_regmap_div_data) { \
- .offset = _reg, \
- .shift = _shift, \
- .width = 8, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_div", \
- .ops = &clk_regmap_divider_ops, \
- .parent_names = (const char *[]) { #_name "_sel" },\
- .num_parents = 1, \
- .flags = CLK_SET_RATE_PARENT, \
- }, \
-}
-
-#define AML_PWM_CLK_GATE(_name, _reg, _bit) { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = _reg, \
- .bit_idx = _bit, \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_names = (const char *[]) { #_name "_div" },\
- .num_parents = 1, \
- .flags = CLK_SET_RATE_PARENT, \
- }, \
-}
-
-static struct clk_regmap pwm_a_sel =
- AML_PWM_CLK_MUX(pwm_a, PWM_CLK_AB_CTRL, 9);
-static struct clk_regmap pwm_a_div =
- AML_PWM_CLK_DIV(pwm_a, PWM_CLK_AB_CTRL, 0);
-static struct clk_regmap pwm_a =
- AML_PWM_CLK_GATE(pwm_a, PWM_CLK_AB_CTRL, 8);
-
-static struct clk_regmap pwm_b_sel =
- AML_PWM_CLK_MUX(pwm_b, PWM_CLK_AB_CTRL, 25);
-static struct clk_regmap pwm_b_div =
- AML_PWM_CLK_DIV(pwm_b, PWM_CLK_AB_CTRL, 16);
-static struct clk_regmap pwm_b =
- AML_PWM_CLK_GATE(pwm_b, PWM_CLK_AB_CTRL, 24);
-
-static struct clk_regmap pwm_c_sel =
- AML_PWM_CLK_MUX(pwm_c, PWM_CLK_CD_CTRL, 9);
-static struct clk_regmap pwm_c_div =
- AML_PWM_CLK_DIV(pwm_c, PWM_CLK_CD_CTRL, 0);
-static struct clk_regmap pwm_c =
- AML_PWM_CLK_GATE(pwm_c, PWM_CLK_CD_CTRL, 8);
-
-static struct clk_regmap pwm_d_sel =
- AML_PWM_CLK_MUX(pwm_d, PWM_CLK_CD_CTRL, 25);
-static struct clk_regmap pwm_d_div =
- AML_PWM_CLK_DIV(pwm_d, PWM_CLK_CD_CTRL, 16);
-static struct clk_regmap pwm_d =
- AML_PWM_CLK_GATE(pwm_d, PWM_CLK_CD_CTRL, 24);
-
-static struct clk_regmap pwm_e_sel =
- AML_PWM_CLK_MUX(pwm_e, PWM_CLK_EF_CTRL, 9);
-static struct clk_regmap pwm_e_div =
- AML_PWM_CLK_DIV(pwm_e, PWM_CLK_EF_CTRL, 0);
-static struct clk_regmap pwm_e =
- AML_PWM_CLK_GATE(pwm_e, PWM_CLK_EF_CTRL, 8);
-
-static struct clk_regmap pwm_f_sel =
- AML_PWM_CLK_MUX(pwm_f, PWM_CLK_EF_CTRL, 25);
-static struct clk_regmap pwm_f_div =
- AML_PWM_CLK_DIV(pwm_f, PWM_CLK_EF_CTRL, 16);
-static struct clk_regmap pwm_f =
- AML_PWM_CLK_GATE(pwm_f, PWM_CLK_EF_CTRL, 24);
-
-static struct clk_regmap pwm_g_sel =
- AML_PWM_CLK_MUX(pwm_g, PWM_CLK_GH_CTRL, 9);
-static struct clk_regmap pwm_g_div =
- AML_PWM_CLK_DIV(pwm_g, PWM_CLK_GH_CTRL, 0);
-static struct clk_regmap pwm_g =
- AML_PWM_CLK_GATE(pwm_g, PWM_CLK_GH_CTRL, 8);
-
-static struct clk_regmap pwm_h_sel =
- AML_PWM_CLK_MUX(pwm_h, PWM_CLK_GH_CTRL, 25);
-static struct clk_regmap pwm_h_div =
- AML_PWM_CLK_DIV(pwm_h, PWM_CLK_GH_CTRL, 16);
-static struct clk_regmap pwm_h =
- AML_PWM_CLK_GATE(pwm_h, PWM_CLK_GH_CTRL, 24);
-
-static struct clk_regmap pwm_i_sel =
- AML_PWM_CLK_MUX(pwm_i, PWM_CLK_IJ_CTRL, 9);
-static struct clk_regmap pwm_i_div =
- AML_PWM_CLK_DIV(pwm_i, PWM_CLK_IJ_CTRL, 0);
-static struct clk_regmap pwm_i =
- AML_PWM_CLK_GATE(pwm_i, PWM_CLK_IJ_CTRL, 8);
-
-static struct clk_regmap pwm_j_sel =
- AML_PWM_CLK_MUX(pwm_j, PWM_CLK_IJ_CTRL, 25);
-static struct clk_regmap pwm_j_div =
- AML_PWM_CLK_DIV(pwm_j, PWM_CLK_IJ_CTRL, 16);
-static struct clk_regmap pwm_j =
- AML_PWM_CLK_GATE(pwm_j, PWM_CLK_IJ_CTRL, 24);
-
-static struct clk_regmap pwm_k_sel =
- AML_PWM_CLK_MUX(pwm_k, PWM_CLK_KL_CTRL, 9);
-static struct clk_regmap pwm_k_div =
- AML_PWM_CLK_DIV(pwm_k, PWM_CLK_KL_CTRL, 0);
-static struct clk_regmap pwm_k =
- AML_PWM_CLK_GATE(pwm_k, PWM_CLK_KL_CTRL, 8);
-
-static struct clk_regmap pwm_l_sel =
- AML_PWM_CLK_MUX(pwm_l, PWM_CLK_KL_CTRL, 25);
-static struct clk_regmap pwm_l_div =
- AML_PWM_CLK_DIV(pwm_l, PWM_CLK_KL_CTRL, 16);
-static struct clk_regmap pwm_l =
- AML_PWM_CLK_GATE(pwm_l, PWM_CLK_KL_CTRL, 24);
-
-static struct clk_regmap pwm_m_sel =
- AML_PWM_CLK_MUX(pwm_m, PWM_CLK_MN_CTRL, 9);
-static struct clk_regmap pwm_m_div =
- AML_PWM_CLK_DIV(pwm_m, PWM_CLK_MN_CTRL, 0);
-static struct clk_regmap pwm_m =
- AML_PWM_CLK_GATE(pwm_m, PWM_CLK_MN_CTRL, 8);
-
-static struct clk_regmap pwm_n_sel =
- AML_PWM_CLK_MUX(pwm_n, PWM_CLK_MN_CTRL, 25);
-static struct clk_regmap pwm_n_div =
- AML_PWM_CLK_DIV(pwm_n, PWM_CLK_MN_CTRL, 16);
-static struct clk_regmap pwm_n =
- AML_PWM_CLK_GATE(pwm_n, PWM_CLK_MN_CTRL, 24);
-
-static const struct clk_parent_data spicc_parent_data[] = {
+static C3_COMP_SEL(pwm_a, PWM_CLK_AB_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_a, PWM_CLK_AB_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_a, PWM_CLK_AB_CTRL, 8);
+
+static C3_COMP_SEL(pwm_b, PWM_CLK_AB_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_b, PWM_CLK_AB_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_b, PWM_CLK_AB_CTRL, 24);
+
+static C3_COMP_SEL(pwm_c, PWM_CLK_CD_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_c, PWM_CLK_CD_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_c, PWM_CLK_CD_CTRL, 8);
+
+static C3_COMP_SEL(pwm_d, PWM_CLK_CD_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_d, PWM_CLK_CD_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_d, PWM_CLK_CD_CTRL, 24);
+
+static C3_COMP_SEL(pwm_e, PWM_CLK_EF_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_e, PWM_CLK_EF_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_e, PWM_CLK_EF_CTRL, 8);
+
+static C3_COMP_SEL(pwm_f, PWM_CLK_EF_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_f, PWM_CLK_EF_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_f, PWM_CLK_EF_CTRL, 24);
+
+static C3_COMP_SEL(pwm_g, PWM_CLK_GH_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_g, PWM_CLK_GH_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_g, PWM_CLK_GH_CTRL, 8);
+
+static C3_COMP_SEL(pwm_h, PWM_CLK_GH_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_h, PWM_CLK_GH_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_h, PWM_CLK_GH_CTRL, 24);
+
+static C3_COMP_SEL(pwm_i, PWM_CLK_IJ_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_i, PWM_CLK_IJ_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_i, PWM_CLK_IJ_CTRL, 8);
+
+static C3_COMP_SEL(pwm_j, PWM_CLK_IJ_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_j, PWM_CLK_IJ_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_j, PWM_CLK_IJ_CTRL, 24);
+
+static C3_COMP_SEL(pwm_k, PWM_CLK_KL_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_k, PWM_CLK_KL_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_k, PWM_CLK_KL_CTRL, 8);
+
+static C3_COMP_SEL(pwm_l, PWM_CLK_KL_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_l, PWM_CLK_KL_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_l, PWM_CLK_KL_CTRL, 24);
+
+static C3_COMP_SEL(pwm_m, PWM_CLK_MN_CTRL, 9, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_m, PWM_CLK_MN_CTRL, 0, 8);
+static C3_COMP_GATE(pwm_m, PWM_CLK_MN_CTRL, 8);
+
+static C3_COMP_SEL(pwm_n, PWM_CLK_MN_CTRL, 25, 0x3, c3_pwm_parents);
+static C3_COMP_DIV(pwm_n, PWM_CLK_MN_CTRL, 16, 8);
+static C3_COMP_GATE(pwm_n, PWM_CLK_MN_CTRL, 24);
+
+static const struct clk_parent_data c3_spicc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "sysclk" },
{ .fw_name = "fdiv4" },
@@ -681,101 +545,15 @@ static const struct clk_parent_data spicc_parent_data[] = {
{ .fw_name = "gp1" }
};
-static struct clk_regmap spicc_a_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPICC_CLK_CTRL,
- .mask = 0x7,
- .shift = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spicc_parent_data,
- .num_parents = ARRAY_SIZE(spicc_parent_data),
- },
-};
-
-static struct clk_regmap spicc_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPICC_CLK_CTRL,
- .shift = 0,
- .width = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_a_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spicc_a, SPICC_CLK_CTRL, 7, 0x7, c3_spicc_parents);
+static C3_COMP_DIV(spicc_a, SPICC_CLK_CTRL, 0, 6);
+static C3_COMP_GATE(spicc_a, SPICC_CLK_CTRL, 6);
-static struct clk_regmap spicc_a = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPICC_CLK_CTRL,
- .bit_idx = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_a",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(spicc_b, SPICC_CLK_CTRL, 23, 0x7, c3_spicc_parents);
+static C3_COMP_DIV(spicc_b, SPICC_CLK_CTRL, 16, 6);
+static C3_COMP_GATE(spicc_b, SPICC_CLK_CTRL, 22);
-static struct clk_regmap spicc_b_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPICC_CLK_CTRL,
- .mask = 0x7,
- .shift = 23,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spicc_parent_data,
- .num_parents = ARRAY_SIZE(spicc_parent_data),
- },
-};
-
-static struct clk_regmap spicc_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPICC_CLK_CTRL,
- .shift = 16,
- .width = 6,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_b_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spicc_b = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPICC_CLK_CTRL,
- .bit_idx = 22,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spicc_b",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spicc_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data spifc_parent_data[] = {
+static const struct clk_parent_data c3_spifc_parents[] = {
{ .fw_name = "gp0" },
{ .fw_name = "fdiv2" },
{ .fw_name = "fdiv3" },
@@ -786,54 +564,11 @@ static const struct clk_parent_data spifc_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap spifc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SPIFC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = spifc_parent_data,
- .num_parents = ARRAY_SIZE(spifc_parent_data),
- },
-};
+static C3_COMP_SEL(spifc, SPIFC_CLK_CTRL, 9, 0x7, c3_spifc_parents);
+static C3_COMP_DIV(spifc, SPIFC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(spifc, SPIFC_CLK_CTRL, 8);
-static struct clk_regmap spifc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SPIFC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spifc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap spifc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SPIFC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "spifc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &spifc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data emmc_parent_data[] = {
+static const struct clk_parent_data c3_sd_emmc_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2" },
{ .fw_name = "fdiv3" },
@@ -844,148 +579,19 @@ static const struct clk_parent_data emmc_parent_data[] = {
{ .fw_name = "gp0" }
};
-static struct clk_regmap sd_emmc_a_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
-
-static struct clk_regmap sd_emmc_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_a_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_a = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .bit_idx = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_a",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_b_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
-
-static struct clk_regmap sd_emmc_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_b_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_b = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = SD_EMMC_CLK_CTRL,
- .bit_idx = 23,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_b",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sd_emmc_c_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = NAND_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = emmc_parent_data,
- .num_parents = ARRAY_SIZE(emmc_parent_data),
- },
-};
+static C3_COMP_SEL(sd_emmc_a, SD_EMMC_CLK_CTRL, 9, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_a, SD_EMMC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(sd_emmc_a, SD_EMMC_CLK_CTRL, 7);
-static struct clk_regmap sd_emmc_c_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = NAND_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_c_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(sd_emmc_b, SD_EMMC_CLK_CTRL, 25, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_b, SD_EMMC_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(sd_emmc_b, SD_EMMC_CLK_CTRL, 23);
-static struct clk_regmap sd_emmc_c = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = NAND_CLK_CTRL,
- .bit_idx = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "sd_emmc_c",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sd_emmc_c_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(sd_emmc_c, NAND_CLK_CTRL, 9, 0x7, c3_sd_emmc_parents);
+static C3_COMP_DIV(sd_emmc_c, NAND_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(sd_emmc_c, NAND_CLK_CTRL, 7);
-static struct clk_regmap ts_div = {
+static struct clk_regmap c3_ts_div = {
.data = &(struct clk_regmap_div_data) {
.offset = TS_CLK_CTRL,
.shift = 0,
@@ -1001,7 +607,7 @@ static struct clk_regmap ts_div = {
},
};
-static struct clk_regmap ts = {
+static struct clk_regmap c3_ts = {
.data = &(struct clk_regmap_gate_data) {
.offset = TS_CLK_CTRL,
.bit_idx = 8,
@@ -1010,29 +616,29 @@ static struct clk_regmap ts = {
.name = "ts",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ts_div.hw
+ &c3_ts_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data eth_parent = {
+static const struct clk_parent_data c3_eth_parents = {
.fw_name = "fdiv2",
};
-static struct clk_fixed_factor eth_125m_div = {
+static struct clk_fixed_factor c3_eth_125m_div = {
.mult = 1,
.div = 8,
.hw.init = &(struct clk_init_data) {
.name = "eth_125m_div",
.ops = &clk_fixed_factor_ops,
- .parent_data = &eth_parent,
+ .parent_data = &c3_eth_parents,
.num_parents = 1,
},
};
-static struct clk_regmap eth_125m = {
+static struct clk_regmap c3_eth_125m = {
.data = &(struct clk_regmap_gate_data) {
.offset = ETH_CLK_CTRL,
.bit_idx = 7,
@@ -1041,14 +647,14 @@ static struct clk_regmap eth_125m = {
.name = "eth_125m",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &eth_125m_div.hw
+ &c3_eth_125m_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap eth_rmii_div = {
+static struct clk_regmap c3_eth_rmii_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ETH_CLK_CTRL,
.shift = 0,
@@ -1057,12 +663,12 @@ static struct clk_regmap eth_rmii_div = {
.hw.init = &(struct clk_init_data) {
.name = "eth_rmii_div",
.ops = &clk_regmap_divider_ops,
- .parent_data = &eth_parent,
+ .parent_data = &c3_eth_parents,
.num_parents = 1,
},
};
-static struct clk_regmap eth_rmii = {
+static struct clk_regmap c3_eth_rmii = {
.data = &(struct clk_regmap_gate_data) {
.offset = ETH_CLK_CTRL,
.bit_idx = 8,
@@ -1071,14 +677,14 @@ static struct clk_regmap eth_rmii = {
.name = "eth_rmii",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &eth_rmii_div.hw
+ &c3_eth_rmii_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data mipi_dsi_meas_parent_data[] = {
+static const struct clk_parent_data c3_mipi_dsi_meas_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" },
@@ -1089,54 +695,11 @@ static const struct clk_parent_data mipi_dsi_meas_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap mipi_dsi_meas_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .mask = 0x7,
- .shift = 21,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = mipi_dsi_meas_parent_data,
- .num_parents = ARRAY_SIZE(mipi_dsi_meas_parent_data),
- },
-};
-
-static struct clk_regmap mipi_dsi_meas_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .shift = 12,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &mipi_dsi_meas_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 21, 0x7, c3_mipi_dsi_meas_parents);
+static C3_COMP_DIV(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 12, 7);
+static C3_COMP_GATE(mipi_dsi_meas, VDIN_MEAS_CLK_CTRL, 20);
-static struct clk_regmap mipi_dsi_meas = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDIN_MEAS_CLK_CTRL,
- .bit_idx = 20,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "mipi_dsi_meas",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &mipi_dsi_meas_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data dsi_phy_parent_data[] = {
+static const struct clk_parent_data c3_dsi_phy_parents[] = {
{ .fw_name = "gp1" },
{ .fw_name = "gp0" },
{ .fw_name = "hifi" },
@@ -1147,54 +710,11 @@ static const struct clk_parent_data dsi_phy_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap dsi_phy_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .mask = 0x7,
- .shift = 12,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = dsi_phy_parent_data,
- .num_parents = ARRAY_SIZE(dsi_phy_parent_data),
- },
-};
-
-static struct clk_regmap dsi_phy_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dsi_phy_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 12, 0x7, c3_dsi_phy_parents);
+static C3_COMP_DIV(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(dsi_phy, MIPIDSI_PHY_CLK_CTRL, 8);
-static struct clk_regmap dsi_phy = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = MIPIDSI_PHY_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dsi_phy",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dsi_phy_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vout_mclk_parent_data[] = {
+static const struct clk_parent_data c3_vout_mclk_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1205,54 +725,11 @@ static const struct clk_parent_data vout_mclk_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap vout_mclk_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VOUTENC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vout_mclk_parent_data,
- .num_parents = ARRAY_SIZE(vout_mclk_parent_data),
- },
-};
+static C3_COMP_SEL(vout_mclk, VOUTENC_CLK_CTRL, 9, 0x7, c3_vout_mclk_parents);
+static C3_COMP_DIV(vout_mclk, VOUTENC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vout_mclk, VOUTENC_CLK_CTRL, 8);
-static struct clk_regmap vout_mclk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VOUTENC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_mclk_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vout_mclk = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VOUTENC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_mclk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_mclk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vout_enc_parent_data[] = {
+static const struct clk_parent_data c3_vout_enc_parents[] = {
{ .fw_name = "gp1" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1263,54 +740,11 @@ static const struct clk_parent_data vout_enc_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap vout_enc_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VOUTENC_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vout_enc_parent_data,
- .num_parents = ARRAY_SIZE(vout_enc_parent_data),
- },
-};
-
-static struct clk_regmap vout_enc_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VOUTENC_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_enc_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vout_enc = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VOUTENC_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vout_enc",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vout_enc_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vout_enc, VOUTENC_CLK_CTRL, 25, 0x7, c3_vout_enc_parents);
+static C3_COMP_DIV(vout_enc, VOUTENC_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(vout_enc, VOUTENC_CLK_CTRL, 24);
-static const struct clk_parent_data hcodec_pre_parent_data[] = {
+static const struct clk_parent_data c3_hcodec_pre_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1321,106 +755,20 @@ static const struct clk_parent_data hcodec_pre_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap hcodec_0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDEC_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_pre_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_pre_parent_data),
- },
-};
+static C3_COMP_SEL(hcodec_0, VDEC_CLK_CTRL, 9, 0x7, c3_hcodec_pre_parents);
+static C3_COMP_DIV(hcodec_0, VDEC_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(hcodec_0, VDEC_CLK_CTRL, 8);
-static struct clk_regmap hcodec_0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDEC_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(hcodec_1, VDEC3_CLK_CTRL, 9, 0x7, c3_hcodec_pre_parents);
+static C3_COMP_DIV(hcodec_1, VDEC3_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(hcodec_1, VDEC3_CLK_CTRL, 8);
-static struct clk_regmap hcodec_0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDEC_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
+static const struct clk_parent_data c3_hcodec_parents[] = {
+ { .hw = &c3_hcodec_0.hw },
+ { .hw = &c3_hcodec_1.hw }
};
-static struct clk_regmap hcodec_1_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VDEC3_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_pre_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_pre_parent_data),
- },
-};
-
-static struct clk_regmap hcodec_1_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VDEC3_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_1_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap hcodec_1 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VDEC3_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "hcodec_1",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &hcodec_1_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data hcodec_parent_data[] = {
- { .hw = &hcodec_0.hw },
- { .hw = &hcodec_1.hw }
-};
-
-static struct clk_regmap hcodec = {
+static struct clk_regmap c3_hcodec = {
.data = &(struct clk_regmap_mux_data) {
.offset = VDEC3_CLK_CTRL,
.mask = 0x1,
@@ -1429,13 +777,13 @@ static struct clk_regmap hcodec = {
.hw.init = &(struct clk_init_data) {
.name = "hcodec",
.ops = &clk_regmap_mux_ops,
- .parent_data = hcodec_parent_data,
- .num_parents = ARRAY_SIZE(hcodec_parent_data),
+ .parent_data = c3_hcodec_parents,
+ .num_parents = ARRAY_SIZE(c3_hcodec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data vc9000e_parent_data[] = {
+static const struct clk_parent_data c3_vc9000e_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv4" },
{ .fw_name = "fdiv3" },
@@ -1446,101 +794,15 @@ static const struct clk_parent_data vc9000e_parent_data[] = {
{ .fw_name = "gp0" }
};
-static struct clk_regmap vc9000e_aclk_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VC9000E_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vc9000e_parent_data,
- .num_parents = ARRAY_SIZE(vc9000e_parent_data),
- },
-};
-
-static struct clk_regmap vc9000e_aclk_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VC9000E_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_aclk_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vc9000e_aclk = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VC9000E_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_aclk",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_aclk_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(vc9000e_aclk, VC9000E_CLK_CTRL, 9, 0x7, c3_vc9000e_parents);
+static C3_COMP_DIV(vc9000e_aclk, VC9000E_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vc9000e_aclk, VC9000E_CLK_CTRL, 8);
-static struct clk_regmap vc9000e_core_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VC9000E_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vc9000e_parent_data,
- .num_parents = ARRAY_SIZE(vc9000e_parent_data),
- },
-};
+static C3_COMP_SEL(vc9000e_core, VC9000E_CLK_CTRL, 25, 0x7, c3_vc9000e_parents);
+static C3_COMP_DIV(vc9000e_core, VC9000E_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(vc9000e_core, VC9000E_CLK_CTRL, 24);
-static struct clk_regmap vc9000e_core_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VC9000E_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_core_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vc9000e_core = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VC9000E_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vc9000e_core",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vc9000e_core_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data csi_phy_parent_data[] = {
+static const struct clk_parent_data c3_csi_phy_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1551,54 +813,11 @@ static const struct clk_parent_data csi_phy_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap csi_phy0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = ISP0_CLK_CTRL,
- .mask = 0x7,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = csi_phy_parent_data,
- .num_parents = ARRAY_SIZE(csi_phy_parent_data),
- },
-};
-
-static struct clk_regmap csi_phy0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = ISP0_CLK_CTRL,
- .shift = 16,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &csi_phy0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap csi_phy0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = ISP0_CLK_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "csi_phy0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &csi_phy0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(csi_phy0, ISP0_CLK_CTRL, 25, 0x7, c3_csi_phy_parents);
+static C3_COMP_DIV(csi_phy0, ISP0_CLK_CTRL, 16, 7);
+static C3_COMP_GATE(csi_phy0, ISP0_CLK_CTRL, 24);
-static const struct clk_parent_data dewarpa_parent_data[] = {
+static const struct clk_parent_data c3_dewarpa_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1609,54 +828,11 @@ static const struct clk_parent_data dewarpa_parent_data[] = {
{ .fw_name = "fdiv7" }
};
-static struct clk_regmap dewarpa_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = DEWARPA_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = dewarpa_parent_data,
- .num_parents = ARRAY_SIZE(dewarpa_parent_data),
- },
-};
-
-static struct clk_regmap dewarpa_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = DEWARPA_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dewarpa_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap dewarpa = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = DEWARPA_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "dewarpa",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &dewarpa_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(dewarpa, DEWARPA_CLK_CTRL, 9, 0x7, c3_dewarpa_parents);
+static C3_COMP_DIV(dewarpa, DEWARPA_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(dewarpa, DEWARPA_CLK_CTRL, 8);
-static const struct clk_parent_data isp_parent_data[] = {
+static const struct clk_parent_data c3_isp_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1667,54 +843,11 @@ static const struct clk_parent_data isp_parent_data[] = {
{ .fw_name = "oscin" }
};
-static struct clk_regmap isp0_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = ISP0_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = isp_parent_data,
- .num_parents = ARRAY_SIZE(isp_parent_data),
- },
-};
-
-static struct clk_regmap isp0_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = ISP0_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &isp0_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap isp0 = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = ISP0_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "isp0",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &isp0_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(isp0, ISP0_CLK_CTRL, 9, 0x7, c3_isp_parents);
+static C3_COMP_DIV(isp0, ISP0_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(isp0, ISP0_CLK_CTRL, 8);
-static const struct clk_parent_data nna_core_parent_data[] = {
+static const struct clk_parent_data c3_nna_core_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv4" },
@@ -1725,54 +858,11 @@ static const struct clk_parent_data nna_core_parent_data[] = {
{ .fw_name = "hifi" }
};
-static struct clk_regmap nna_core_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = NNA_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = nna_core_parent_data,
- .num_parents = ARRAY_SIZE(nna_core_parent_data),
- },
-};
+static C3_COMP_SEL(nna_core, NNA_CLK_CTRL, 9, 0x7, c3_nna_core_parents);
+static C3_COMP_DIV(nna_core, NNA_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(nna_core, NNA_CLK_CTRL, 8);
-static struct clk_regmap nna_core_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = NNA_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &nna_core_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap nna_core = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = NNA_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "nna_core",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &nna_core_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data ge2d_parent_data[] = {
+static const struct clk_parent_data c3_ge2d_parents[] = {
{ .fw_name = "oscin" },
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
@@ -1780,57 +870,14 @@ static const struct clk_parent_data ge2d_parent_data[] = {
{ .fw_name = "hifi" },
{ .fw_name = "fdiv5" },
{ .fw_name = "gp0" },
- { .hw = &rtc_clk.hw }
-};
-
-static struct clk_regmap ge2d_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = GE2D_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = ge2d_parent_data,
- .num_parents = ARRAY_SIZE(ge2d_parent_data),
- },
+ { .hw = &c3_rtc_clk.hw }
};
-static struct clk_regmap ge2d_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = GE2D_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &ge2d_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static C3_COMP_SEL(ge2d, GE2D_CLK_CTRL, 9, 0x7, c3_ge2d_parents);
+static C3_COMP_DIV(ge2d, GE2D_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(ge2d, GE2D_CLK_CTRL, 8);
-static struct clk_regmap ge2d = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = GE2D_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "ge2d",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &ge2d_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static const struct clk_parent_data vapb_parent_data[] = {
+static const struct clk_parent_data c3_vapb_parents[] = {
{ .fw_name = "fdiv2p5" },
{ .fw_name = "fdiv3" },
{ .fw_name = "fdiv4" },
@@ -1841,317 +888,239 @@ static const struct clk_parent_data vapb_parent_data[] = {
{ .fw_name = "oscin" },
};
-static struct clk_regmap vapb_sel = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = VAPB_CLK_CTRL,
- .mask = 0x7,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb_sel",
- .ops = &clk_regmap_mux_ops,
- .parent_data = vapb_parent_data,
- .num_parents = ARRAY_SIZE(vapb_parent_data),
+static C3_COMP_SEL(vapb, VAPB_CLK_CTRL, 9, 0x7, c3_vapb_parents);
+static C3_COMP_DIV(vapb, VAPB_CLK_CTRL, 0, 7);
+static C3_COMP_GATE(vapb, VAPB_CLK_CTRL, 8);
+
+static struct clk_hw *c3_peripherals_hw_clks[] = {
+ [CLKID_RTC_XTAL_CLKIN] = &c3_rtc_xtal_clkin.hw,
+ [CLKID_RTC_32K_DIV] = &c3_rtc_32k_div.hw,
+ [CLKID_RTC_32K_MUX] = &c3_rtc_32k_sel.hw,
+ [CLKID_RTC_32K] = &c3_rtc_32k.hw,
+ [CLKID_RTC_CLK] = &c3_rtc_clk.hw,
+ [CLKID_SYS_RESET_CTRL] = &c3_sys_reset_ctrl.hw,
+ [CLKID_SYS_PWR_CTRL] = &c3_sys_pwr_ctrl.hw,
+ [CLKID_SYS_PAD_CTRL] = &c3_sys_pad_ctrl.hw,
+ [CLKID_SYS_CTRL] = &c3_sys_ctrl.hw,
+ [CLKID_SYS_TS_PLL] = &c3_sys_ts_pll.hw,
+ [CLKID_SYS_DEV_ARB] = &c3_sys_dev_arb.hw,
+ [CLKID_SYS_MMC_PCLK] = &c3_sys_mmc_pclk.hw,
+ [CLKID_SYS_CPU_CTRL] = &c3_sys_cpu_ctrl.hw,
+ [CLKID_SYS_JTAG_CTRL] = &c3_sys_jtag_ctrl.hw,
+ [CLKID_SYS_IR_CTRL] = &c3_sys_ir_ctrl.hw,
+ [CLKID_SYS_IRQ_CTRL] = &c3_sys_irq_ctrl.hw,
+ [CLKID_SYS_MSR_CLK] = &c3_sys_msr_clk.hw,
+ [CLKID_SYS_ROM] = &c3_sys_rom.hw,
+ [CLKID_SYS_UART_F] = &c3_sys_uart_f.hw,
+ [CLKID_SYS_CPU_ARB] = &c3_sys_cpu_apb.hw,
+ [CLKID_SYS_RSA] = &c3_sys_rsa.hw,
+ [CLKID_SYS_SAR_ADC] = &c3_sys_sar_adc.hw,
+ [CLKID_SYS_STARTUP] = &c3_sys_startup.hw,
+ [CLKID_SYS_SECURE] = &c3_sys_secure.hw,
+ [CLKID_SYS_SPIFC] = &c3_sys_spifc.hw,
+ [CLKID_SYS_NNA] = &c3_sys_nna.hw,
+ [CLKID_SYS_ETH_MAC] = &c3_sys_eth_mac.hw,
+ [CLKID_SYS_GIC] = &c3_sys_gic.hw,
+ [CLKID_SYS_RAMA] = &c3_sys_rama.hw,
+ [CLKID_SYS_BIG_NIC] = &c3_sys_big_nic.hw,
+ [CLKID_SYS_RAMB] = &c3_sys_ramb.hw,
+ [CLKID_SYS_AUDIO_PCLK] = &c3_sys_audio_pclk.hw,
+ [CLKID_SYS_PWM_KL] = &c3_sys_pwm_kl.hw,
+ [CLKID_SYS_PWM_IJ] = &c3_sys_pwm_ij.hw,
+ [CLKID_SYS_USB] = &c3_sys_usb.hw,
+ [CLKID_SYS_SD_EMMC_A] = &c3_sys_sd_emmc_a.hw,
+ [CLKID_SYS_SD_EMMC_C] = &c3_sys_sd_emmc_c.hw,
+ [CLKID_SYS_PWM_AB] = &c3_sys_pwm_ab.hw,
+ [CLKID_SYS_PWM_CD] = &c3_sys_pwm_cd.hw,
+ [CLKID_SYS_PWM_EF] = &c3_sys_pwm_ef.hw,
+ [CLKID_SYS_PWM_GH] = &c3_sys_pwm_gh.hw,
+ [CLKID_SYS_SPICC_1] = &c3_sys_spicc_1.hw,
+ [CLKID_SYS_SPICC_0] = &c3_sys_spicc_0.hw,
+ [CLKID_SYS_UART_A] = &c3_sys_uart_a.hw,
+ [CLKID_SYS_UART_B] = &c3_sys_uart_b.hw,
+ [CLKID_SYS_UART_C] = &c3_sys_uart_c.hw,
+ [CLKID_SYS_UART_D] = &c3_sys_uart_d.hw,
+ [CLKID_SYS_UART_E] = &c3_sys_uart_e.hw,
+ [CLKID_SYS_I2C_M_A] = &c3_sys_i2c_m_a.hw,
+ [CLKID_SYS_I2C_M_B] = &c3_sys_i2c_m_b.hw,
+ [CLKID_SYS_I2C_M_C] = &c3_sys_i2c_m_c.hw,
+ [CLKID_SYS_I2C_M_D] = &c3_sys_i2c_m_d.hw,
+ [CLKID_SYS_I2S_S_A] = &c3_sys_i2c_s_a.hw,
+ [CLKID_SYS_RTC] = &c3_sys_rtc.hw,
+ [CLKID_SYS_GE2D] = &c3_sys_ge2d.hw,
+ [CLKID_SYS_ISP] = &c3_sys_isp.hw,
+ [CLKID_SYS_GPV_ISP_NIC] = &c3_sys_gpv_isp_nic.hw,
+ [CLKID_SYS_GPV_CVE_NIC] = &c3_sys_gpv_cve_nic.hw,
+ [CLKID_SYS_MIPI_DSI_HOST] = &c3_sys_mipi_dsi_host.hw,
+ [CLKID_SYS_MIPI_DSI_PHY] = &c3_sys_mipi_dsi_phy.hw,
+ [CLKID_SYS_ETH_PHY] = &c3_sys_eth_phy.hw,
+ [CLKID_SYS_ACODEC] = &c3_sys_acodec.hw,
+ [CLKID_SYS_DWAP] = &c3_sys_dwap.hw,
+ [CLKID_SYS_DOS] = &c3_sys_dos.hw,
+ [CLKID_SYS_CVE] = &c3_sys_cve.hw,
+ [CLKID_SYS_VOUT] = &c3_sys_vout.hw,
+ [CLKID_SYS_VC9000E] = &c3_sys_vc9000e.hw,
+ [CLKID_SYS_PWM_MN] = &c3_sys_pwm_mn.hw,
+ [CLKID_SYS_SD_EMMC_B] = &c3_sys_sd_emmc_b.hw,
+ [CLKID_AXI_SYS_NIC] = &c3_axi_sys_nic.hw,
+ [CLKID_AXI_ISP_NIC] = &c3_axi_isp_nic.hw,
+ [CLKID_AXI_CVE_NIC] = &c3_axi_cve_nic.hw,
+ [CLKID_AXI_RAMB] = &c3_axi_ramb.hw,
+ [CLKID_AXI_RAMA] = &c3_axi_rama.hw,
+ [CLKID_AXI_CPU_DMC] = &c3_axi_cpu_dmc.hw,
+ [CLKID_AXI_NIC] = &c3_axi_nic.hw,
+ [CLKID_AXI_DMA] = &c3_axi_dma.hw,
+ [CLKID_AXI_MUX_NIC] = &c3_axi_mux_nic.hw,
+ [CLKID_AXI_CVE] = &c3_axi_cve.hw,
+ [CLKID_AXI_DEV1_DMC] = &c3_axi_dev1_dmc.hw,
+ [CLKID_AXI_DEV0_DMC] = &c3_axi_dev0_dmc.hw,
+ [CLKID_AXI_DSP_DMC] = &c3_axi_dsp_dmc.hw,
+ [CLKID_12_24M_IN] = &c3_clk_12_24m_in.hw,
+ [CLKID_12M_24M] = &c3_clk_12_24m.hw,
+ [CLKID_FCLK_25M_DIV] = &c3_fclk_25m_div.hw,
+ [CLKID_FCLK_25M] = &c3_fclk_25m.hw,
+ [CLKID_GEN_SEL] = &c3_gen_sel.hw,
+ [CLKID_GEN_DIV] = &c3_gen_div.hw,
+ [CLKID_GEN] = &c3_gen.hw,
+ [CLKID_SARADC_SEL] = &c3_saradc_sel.hw,
+ [CLKID_SARADC_DIV] = &c3_saradc_div.hw,
+ [CLKID_SARADC] = &c3_saradc.hw,
+ [CLKID_PWM_A_SEL] = &c3_pwm_a_sel.hw,
+ [CLKID_PWM_A_DIV] = &c3_pwm_a_div.hw,
+ [CLKID_PWM_A] = &c3_pwm_a.hw,
+ [CLKID_PWM_B_SEL] = &c3_pwm_b_sel.hw,
+ [CLKID_PWM_B_DIV] = &c3_pwm_b_div.hw,
+ [CLKID_PWM_B] = &c3_pwm_b.hw,
+ [CLKID_PWM_C_SEL] = &c3_pwm_c_sel.hw,
+ [CLKID_PWM_C_DIV] = &c3_pwm_c_div.hw,
+ [CLKID_PWM_C] = &c3_pwm_c.hw,
+ [CLKID_PWM_D_SEL] = &c3_pwm_d_sel.hw,
+ [CLKID_PWM_D_DIV] = &c3_pwm_d_div.hw,
+ [CLKID_PWM_D] = &c3_pwm_d.hw,
+ [CLKID_PWM_E_SEL] = &c3_pwm_e_sel.hw,
+ [CLKID_PWM_E_DIV] = &c3_pwm_e_div.hw,
+ [CLKID_PWM_E] = &c3_pwm_e.hw,
+ [CLKID_PWM_F_SEL] = &c3_pwm_f_sel.hw,
+ [CLKID_PWM_F_DIV] = &c3_pwm_f_div.hw,
+ [CLKID_PWM_F] = &c3_pwm_f.hw,
+ [CLKID_PWM_G_SEL] = &c3_pwm_g_sel.hw,
+ [CLKID_PWM_G_DIV] = &c3_pwm_g_div.hw,
+ [CLKID_PWM_G] = &c3_pwm_g.hw,
+ [CLKID_PWM_H_SEL] = &c3_pwm_h_sel.hw,
+ [CLKID_PWM_H_DIV] = &c3_pwm_h_div.hw,
+ [CLKID_PWM_H] = &c3_pwm_h.hw,
+ [CLKID_PWM_I_SEL] = &c3_pwm_i_sel.hw,
+ [CLKID_PWM_I_DIV] = &c3_pwm_i_div.hw,
+ [CLKID_PWM_I] = &c3_pwm_i.hw,
+ [CLKID_PWM_J_SEL] = &c3_pwm_j_sel.hw,
+ [CLKID_PWM_J_DIV] = &c3_pwm_j_div.hw,
+ [CLKID_PWM_J] = &c3_pwm_j.hw,
+ [CLKID_PWM_K_SEL] = &c3_pwm_k_sel.hw,
+ [CLKID_PWM_K_DIV] = &c3_pwm_k_div.hw,
+ [CLKID_PWM_K] = &c3_pwm_k.hw,
+ [CLKID_PWM_L_SEL] = &c3_pwm_l_sel.hw,
+ [CLKID_PWM_L_DIV] = &c3_pwm_l_div.hw,
+ [CLKID_PWM_L] = &c3_pwm_l.hw,
+ [CLKID_PWM_M_SEL] = &c3_pwm_m_sel.hw,
+ [CLKID_PWM_M_DIV] = &c3_pwm_m_div.hw,
+ [CLKID_PWM_M] = &c3_pwm_m.hw,
+ [CLKID_PWM_N_SEL] = &c3_pwm_n_sel.hw,
+ [CLKID_PWM_N_DIV] = &c3_pwm_n_div.hw,
+ [CLKID_PWM_N] = &c3_pwm_n.hw,
+ [CLKID_SPICC_A_SEL] = &c3_spicc_a_sel.hw,
+ [CLKID_SPICC_A_DIV] = &c3_spicc_a_div.hw,
+ [CLKID_SPICC_A] = &c3_spicc_a.hw,
+ [CLKID_SPICC_B_SEL] = &c3_spicc_b_sel.hw,
+ [CLKID_SPICC_B_DIV] = &c3_spicc_b_div.hw,
+ [CLKID_SPICC_B] = &c3_spicc_b.hw,
+ [CLKID_SPIFC_SEL] = &c3_spifc_sel.hw,
+ [CLKID_SPIFC_DIV] = &c3_spifc_div.hw,
+ [CLKID_SPIFC] = &c3_spifc.hw,
+ [CLKID_SD_EMMC_A_SEL] = &c3_sd_emmc_a_sel.hw,
+ [CLKID_SD_EMMC_A_DIV] = &c3_sd_emmc_a_div.hw,
+ [CLKID_SD_EMMC_A] = &c3_sd_emmc_a.hw,
+ [CLKID_SD_EMMC_B_SEL] = &c3_sd_emmc_b_sel.hw,
+ [CLKID_SD_EMMC_B_DIV] = &c3_sd_emmc_b_div.hw,
+ [CLKID_SD_EMMC_B] = &c3_sd_emmc_b.hw,
+ [CLKID_SD_EMMC_C_SEL] = &c3_sd_emmc_c_sel.hw,
+ [CLKID_SD_EMMC_C_DIV] = &c3_sd_emmc_c_div.hw,
+ [CLKID_SD_EMMC_C] = &c3_sd_emmc_c.hw,
+ [CLKID_TS_DIV] = &c3_ts_div.hw,
+ [CLKID_TS] = &c3_ts.hw,
+ [CLKID_ETH_125M_DIV] = &c3_eth_125m_div.hw,
+ [CLKID_ETH_125M] = &c3_eth_125m.hw,
+ [CLKID_ETH_RMII_DIV] = &c3_eth_rmii_div.hw,
+ [CLKID_ETH_RMII] = &c3_eth_rmii.hw,
+ [CLKID_MIPI_DSI_MEAS_SEL] = &c3_mipi_dsi_meas_sel.hw,
+ [CLKID_MIPI_DSI_MEAS_DIV] = &c3_mipi_dsi_meas_div.hw,
+ [CLKID_MIPI_DSI_MEAS] = &c3_mipi_dsi_meas.hw,
+ [CLKID_DSI_PHY_SEL] = &c3_dsi_phy_sel.hw,
+ [CLKID_DSI_PHY_DIV] = &c3_dsi_phy_div.hw,
+ [CLKID_DSI_PHY] = &c3_dsi_phy.hw,
+ [CLKID_VOUT_MCLK_SEL] = &c3_vout_mclk_sel.hw,
+ [CLKID_VOUT_MCLK_DIV] = &c3_vout_mclk_div.hw,
+ [CLKID_VOUT_MCLK] = &c3_vout_mclk.hw,
+ [CLKID_VOUT_ENC_SEL] = &c3_vout_enc_sel.hw,
+ [CLKID_VOUT_ENC_DIV] = &c3_vout_enc_div.hw,
+ [CLKID_VOUT_ENC] = &c3_vout_enc.hw,
+ [CLKID_HCODEC_0_SEL] = &c3_hcodec_0_sel.hw,
+ [CLKID_HCODEC_0_DIV] = &c3_hcodec_0_div.hw,
+ [CLKID_HCODEC_0] = &c3_hcodec_0.hw,
+ [CLKID_HCODEC_1_SEL] = &c3_hcodec_1_sel.hw,
+ [CLKID_HCODEC_1_DIV] = &c3_hcodec_1_div.hw,
+ [CLKID_HCODEC_1] = &c3_hcodec_1.hw,
+ [CLKID_HCODEC] = &c3_hcodec.hw,
+ [CLKID_VC9000E_ACLK_SEL] = &c3_vc9000e_aclk_sel.hw,
+ [CLKID_VC9000E_ACLK_DIV] = &c3_vc9000e_aclk_div.hw,
+ [CLKID_VC9000E_ACLK] = &c3_vc9000e_aclk.hw,
+ [CLKID_VC9000E_CORE_SEL] = &c3_vc9000e_core_sel.hw,
+ [CLKID_VC9000E_CORE_DIV] = &c3_vc9000e_core_div.hw,
+ [CLKID_VC9000E_CORE] = &c3_vc9000e_core.hw,
+ [CLKID_CSI_PHY0_SEL] = &c3_csi_phy0_sel.hw,
+ [CLKID_CSI_PHY0_DIV] = &c3_csi_phy0_div.hw,
+ [CLKID_CSI_PHY0] = &c3_csi_phy0.hw,
+ [CLKID_DEWARPA_SEL] = &c3_dewarpa_sel.hw,
+ [CLKID_DEWARPA_DIV] = &c3_dewarpa_div.hw,
+ [CLKID_DEWARPA] = &c3_dewarpa.hw,
+ [CLKID_ISP0_SEL] = &c3_isp0_sel.hw,
+ [CLKID_ISP0_DIV] = &c3_isp0_div.hw,
+ [CLKID_ISP0] = &c3_isp0.hw,
+ [CLKID_NNA_CORE_SEL] = &c3_nna_core_sel.hw,
+ [CLKID_NNA_CORE_DIV] = &c3_nna_core_div.hw,
+ [CLKID_NNA_CORE] = &c3_nna_core.hw,
+ [CLKID_GE2D_SEL] = &c3_ge2d_sel.hw,
+ [CLKID_GE2D_DIV] = &c3_ge2d_div.hw,
+ [CLKID_GE2D] = &c3_ge2d.hw,
+ [CLKID_VAPB_SEL] = &c3_vapb_sel.hw,
+ [CLKID_VAPB_DIV] = &c3_vapb_div.hw,
+ [CLKID_VAPB] = &c3_vapb.hw,
+};
+
+static const struct meson_clkc_data c3_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = c3_peripherals_hw_clks,
+ .num = ARRAY_SIZE(c3_peripherals_hw_clks),
},
};
-static struct clk_regmap vapb_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = VAPB_CLK_CTRL,
- .shift = 0,
- .width = 7,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vapb_sel.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap vapb = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = VAPB_CLK_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data) {
- .name = "vapb",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &vapb_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_hw *c3_periphs_hw_clks[] = {
- [CLKID_RTC_XTAL_CLKIN] = &rtc_xtal_clkin.hw,
- [CLKID_RTC_32K_DIV] = &rtc_32k_div.hw,
- [CLKID_RTC_32K_MUX] = &rtc_32k_mux.hw,
- [CLKID_RTC_32K] = &rtc_32k.hw,
- [CLKID_RTC_CLK] = &rtc_clk.hw,
- [CLKID_SYS_RESET_CTRL] = &sys_reset_ctrl.hw,
- [CLKID_SYS_PWR_CTRL] = &sys_pwr_ctrl.hw,
- [CLKID_SYS_PAD_CTRL] = &sys_pad_ctrl.hw,
- [CLKID_SYS_CTRL] = &sys_ctrl.hw,
- [CLKID_SYS_TS_PLL] = &sys_ts_pll.hw,
- [CLKID_SYS_DEV_ARB] = &sys_dev_arb.hw,
- [CLKID_SYS_MMC_PCLK] = &sys_mmc_pclk.hw,
- [CLKID_SYS_CPU_CTRL] = &sys_cpu_ctrl.hw,
- [CLKID_SYS_JTAG_CTRL] = &sys_jtag_ctrl.hw,
- [CLKID_SYS_IR_CTRL] = &sys_ir_ctrl.hw,
- [CLKID_SYS_IRQ_CTRL] = &sys_irq_ctrl.hw,
- [CLKID_SYS_MSR_CLK] = &sys_msr_clk.hw,
- [CLKID_SYS_ROM] = &sys_rom.hw,
- [CLKID_SYS_UART_F] = &sys_uart_f.hw,
- [CLKID_SYS_CPU_ARB] = &sys_cpu_apb.hw,
- [CLKID_SYS_RSA] = &sys_rsa.hw,
- [CLKID_SYS_SAR_ADC] = &sys_sar_adc.hw,
- [CLKID_SYS_STARTUP] = &sys_startup.hw,
- [CLKID_SYS_SECURE] = &sys_secure.hw,
- [CLKID_SYS_SPIFC] = &sys_spifc.hw,
- [CLKID_SYS_NNA] = &sys_nna.hw,
- [CLKID_SYS_ETH_MAC] = &sys_eth_mac.hw,
- [CLKID_SYS_GIC] = &sys_gic.hw,
- [CLKID_SYS_RAMA] = &sys_rama.hw,
- [CLKID_SYS_BIG_NIC] = &sys_big_nic.hw,
- [CLKID_SYS_RAMB] = &sys_ramb.hw,
- [CLKID_SYS_AUDIO_PCLK] = &sys_audio_pclk.hw,
- [CLKID_SYS_PWM_KL] = &sys_pwm_kl.hw,
- [CLKID_SYS_PWM_IJ] = &sys_pwm_ij.hw,
- [CLKID_SYS_USB] = &sys_usb.hw,
- [CLKID_SYS_SD_EMMC_A] = &sys_sd_emmc_a.hw,
- [CLKID_SYS_SD_EMMC_C] = &sys_sd_emmc_c.hw,
- [CLKID_SYS_PWM_AB] = &sys_pwm_ab.hw,
- [CLKID_SYS_PWM_CD] = &sys_pwm_cd.hw,
- [CLKID_SYS_PWM_EF] = &sys_pwm_ef.hw,
- [CLKID_SYS_PWM_GH] = &sys_pwm_gh.hw,
- [CLKID_SYS_SPICC_1] = &sys_spicc_1.hw,
- [CLKID_SYS_SPICC_0] = &sys_spicc_0.hw,
- [CLKID_SYS_UART_A] = &sys_uart_a.hw,
- [CLKID_SYS_UART_B] = &sys_uart_b.hw,
- [CLKID_SYS_UART_C] = &sys_uart_c.hw,
- [CLKID_SYS_UART_D] = &sys_uart_d.hw,
- [CLKID_SYS_UART_E] = &sys_uart_e.hw,
- [CLKID_SYS_I2C_M_A] = &sys_i2c_m_a.hw,
- [CLKID_SYS_I2C_M_B] = &sys_i2c_m_b.hw,
- [CLKID_SYS_I2C_M_C] = &sys_i2c_m_c.hw,
- [CLKID_SYS_I2C_M_D] = &sys_i2c_m_d.hw,
- [CLKID_SYS_I2S_S_A] = &sys_i2c_s_a.hw,
- [CLKID_SYS_RTC] = &sys_rtc.hw,
- [CLKID_SYS_GE2D] = &sys_ge2d.hw,
- [CLKID_SYS_ISP] = &sys_isp.hw,
- [CLKID_SYS_GPV_ISP_NIC] = &sys_gpv_isp_nic.hw,
- [CLKID_SYS_GPV_CVE_NIC] = &sys_gpv_cve_nic.hw,
- [CLKID_SYS_MIPI_DSI_HOST] = &sys_mipi_dsi_host.hw,
- [CLKID_SYS_MIPI_DSI_PHY] = &sys_mipi_dsi_phy.hw,
- [CLKID_SYS_ETH_PHY] = &sys_eth_phy.hw,
- [CLKID_SYS_ACODEC] = &sys_acodec.hw,
- [CLKID_SYS_DWAP] = &sys_dwap.hw,
- [CLKID_SYS_DOS] = &sys_dos.hw,
- [CLKID_SYS_CVE] = &sys_cve.hw,
- [CLKID_SYS_VOUT] = &sys_vout.hw,
- [CLKID_SYS_VC9000E] = &sys_vc9000e.hw,
- [CLKID_SYS_PWM_MN] = &sys_pwm_mn.hw,
- [CLKID_SYS_SD_EMMC_B] = &sys_sd_emmc_b.hw,
- [CLKID_AXI_SYS_NIC] = &axi_sys_nic.hw,
- [CLKID_AXI_ISP_NIC] = &axi_isp_nic.hw,
- [CLKID_AXI_CVE_NIC] = &axi_cve_nic.hw,
- [CLKID_AXI_RAMB] = &axi_ramb.hw,
- [CLKID_AXI_RAMA] = &axi_rama.hw,
- [CLKID_AXI_CPU_DMC] = &axi_cpu_dmc.hw,
- [CLKID_AXI_NIC] = &axi_nic.hw,
- [CLKID_AXI_DMA] = &axi_dma.hw,
- [CLKID_AXI_MUX_NIC] = &axi_mux_nic.hw,
- [CLKID_AXI_CVE] = &axi_cve.hw,
- [CLKID_AXI_DEV1_DMC] = &axi_dev1_dmc.hw,
- [CLKID_AXI_DEV0_DMC] = &axi_dev0_dmc.hw,
- [CLKID_AXI_DSP_DMC] = &axi_dsp_dmc.hw,
- [CLKID_12_24M_IN] = &clk_12_24m_in.hw,
- [CLKID_12M_24M] = &clk_12_24m.hw,
- [CLKID_FCLK_25M_DIV] = &fclk_25m_div.hw,
- [CLKID_FCLK_25M] = &fclk_25m.hw,
- [CLKID_GEN_SEL] = &gen_sel.hw,
- [CLKID_GEN_DIV] = &gen_div.hw,
- [CLKID_GEN] = &gen.hw,
- [CLKID_SARADC_SEL] = &saradc_sel.hw,
- [CLKID_SARADC_DIV] = &saradc_div.hw,
- [CLKID_SARADC] = &saradc.hw,
- [CLKID_PWM_A_SEL] = &pwm_a_sel.hw,
- [CLKID_PWM_A_DIV] = &pwm_a_div.hw,
- [CLKID_PWM_A] = &pwm_a.hw,
- [CLKID_PWM_B_SEL] = &pwm_b_sel.hw,
- [CLKID_PWM_B_DIV] = &pwm_b_div.hw,
- [CLKID_PWM_B] = &pwm_b.hw,
- [CLKID_PWM_C_SEL] = &pwm_c_sel.hw,
- [CLKID_PWM_C_DIV] = &pwm_c_div.hw,
- [CLKID_PWM_C] = &pwm_c.hw,
- [CLKID_PWM_D_SEL] = &pwm_d_sel.hw,
- [CLKID_PWM_D_DIV] = &pwm_d_div.hw,
- [CLKID_PWM_D] = &pwm_d.hw,
- [CLKID_PWM_E_SEL] = &pwm_e_sel.hw,
- [CLKID_PWM_E_DIV] = &pwm_e_div.hw,
- [CLKID_PWM_E] = &pwm_e.hw,
- [CLKID_PWM_F_SEL] = &pwm_f_sel.hw,
- [CLKID_PWM_F_DIV] = &pwm_f_div.hw,
- [CLKID_PWM_F] = &pwm_f.hw,
- [CLKID_PWM_G_SEL] = &pwm_g_sel.hw,
- [CLKID_PWM_G_DIV] = &pwm_g_div.hw,
- [CLKID_PWM_G] = &pwm_g.hw,
- [CLKID_PWM_H_SEL] = &pwm_h_sel.hw,
- [CLKID_PWM_H_DIV] = &pwm_h_div.hw,
- [CLKID_PWM_H] = &pwm_h.hw,
- [CLKID_PWM_I_SEL] = &pwm_i_sel.hw,
- [CLKID_PWM_I_DIV] = &pwm_i_div.hw,
- [CLKID_PWM_I] = &pwm_i.hw,
- [CLKID_PWM_J_SEL] = &pwm_j_sel.hw,
- [CLKID_PWM_J_DIV] = &pwm_j_div.hw,
- [CLKID_PWM_J] = &pwm_j.hw,
- [CLKID_PWM_K_SEL] = &pwm_k_sel.hw,
- [CLKID_PWM_K_DIV] = &pwm_k_div.hw,
- [CLKID_PWM_K] = &pwm_k.hw,
- [CLKID_PWM_L_SEL] = &pwm_l_sel.hw,
- [CLKID_PWM_L_DIV] = &pwm_l_div.hw,
- [CLKID_PWM_L] = &pwm_l.hw,
- [CLKID_PWM_M_SEL] = &pwm_m_sel.hw,
- [CLKID_PWM_M_DIV] = &pwm_m_div.hw,
- [CLKID_PWM_M] = &pwm_m.hw,
- [CLKID_PWM_N_SEL] = &pwm_n_sel.hw,
- [CLKID_PWM_N_DIV] = &pwm_n_div.hw,
- [CLKID_PWM_N] = &pwm_n.hw,
- [CLKID_SPICC_A_SEL] = &spicc_a_sel.hw,
- [CLKID_SPICC_A_DIV] = &spicc_a_div.hw,
- [CLKID_SPICC_A] = &spicc_a.hw,
- [CLKID_SPICC_B_SEL] = &spicc_b_sel.hw,
- [CLKID_SPICC_B_DIV] = &spicc_b_div.hw,
- [CLKID_SPICC_B] = &spicc_b.hw,
- [CLKID_SPIFC_SEL] = &spifc_sel.hw,
- [CLKID_SPIFC_DIV] = &spifc_div.hw,
- [CLKID_SPIFC] = &spifc.hw,
- [CLKID_SD_EMMC_A_SEL] = &sd_emmc_a_sel.hw,
- [CLKID_SD_EMMC_A_DIV] = &sd_emmc_a_div.hw,
- [CLKID_SD_EMMC_A] = &sd_emmc_a.hw,
- [CLKID_SD_EMMC_B_SEL] = &sd_emmc_b_sel.hw,
- [CLKID_SD_EMMC_B_DIV] = &sd_emmc_b_div.hw,
- [CLKID_SD_EMMC_B] = &sd_emmc_b.hw,
- [CLKID_SD_EMMC_C_SEL] = &sd_emmc_c_sel.hw,
- [CLKID_SD_EMMC_C_DIV] = &sd_emmc_c_div.hw,
- [CLKID_SD_EMMC_C] = &sd_emmc_c.hw,
- [CLKID_TS_DIV] = &ts_div.hw,
- [CLKID_TS] = &ts.hw,
- [CLKID_ETH_125M_DIV] = &eth_125m_div.hw,
- [CLKID_ETH_125M] = &eth_125m.hw,
- [CLKID_ETH_RMII_DIV] = &eth_rmii_div.hw,
- [CLKID_ETH_RMII] = &eth_rmii.hw,
- [CLKID_MIPI_DSI_MEAS_SEL] = &mipi_dsi_meas_sel.hw,
- [CLKID_MIPI_DSI_MEAS_DIV] = &mipi_dsi_meas_div.hw,
- [CLKID_MIPI_DSI_MEAS] = &mipi_dsi_meas.hw,
- [CLKID_DSI_PHY_SEL] = &dsi_phy_sel.hw,
- [CLKID_DSI_PHY_DIV] = &dsi_phy_div.hw,
- [CLKID_DSI_PHY] = &dsi_phy.hw,
- [CLKID_VOUT_MCLK_SEL] = &vout_mclk_sel.hw,
- [CLKID_VOUT_MCLK_DIV] = &vout_mclk_div.hw,
- [CLKID_VOUT_MCLK] = &vout_mclk.hw,
- [CLKID_VOUT_ENC_SEL] = &vout_enc_sel.hw,
- [CLKID_VOUT_ENC_DIV] = &vout_enc_div.hw,
- [CLKID_VOUT_ENC] = &vout_enc.hw,
- [CLKID_HCODEC_0_SEL] = &hcodec_0_sel.hw,
- [CLKID_HCODEC_0_DIV] = &hcodec_0_div.hw,
- [CLKID_HCODEC_0] = &hcodec_0.hw,
- [CLKID_HCODEC_1_SEL] = &hcodec_1_sel.hw,
- [CLKID_HCODEC_1_DIV] = &hcodec_1_div.hw,
- [CLKID_HCODEC_1] = &hcodec_1.hw,
- [CLKID_HCODEC] = &hcodec.hw,
- [CLKID_VC9000E_ACLK_SEL] = &vc9000e_aclk_sel.hw,
- [CLKID_VC9000E_ACLK_DIV] = &vc9000e_aclk_div.hw,
- [CLKID_VC9000E_ACLK] = &vc9000e_aclk.hw,
- [CLKID_VC9000E_CORE_SEL] = &vc9000e_core_sel.hw,
- [CLKID_VC9000E_CORE_DIV] = &vc9000e_core_div.hw,
- [CLKID_VC9000E_CORE] = &vc9000e_core.hw,
- [CLKID_CSI_PHY0_SEL] = &csi_phy0_sel.hw,
- [CLKID_CSI_PHY0_DIV] = &csi_phy0_div.hw,
- [CLKID_CSI_PHY0] = &csi_phy0.hw,
- [CLKID_DEWARPA_SEL] = &dewarpa_sel.hw,
- [CLKID_DEWARPA_DIV] = &dewarpa_div.hw,
- [CLKID_DEWARPA] = &dewarpa.hw,
- [CLKID_ISP0_SEL] = &isp0_sel.hw,
- [CLKID_ISP0_DIV] = &isp0_div.hw,
- [CLKID_ISP0] = &isp0.hw,
- [CLKID_NNA_CORE_SEL] = &nna_core_sel.hw,
- [CLKID_NNA_CORE_DIV] = &nna_core_div.hw,
- [CLKID_NNA_CORE] = &nna_core.hw,
- [CLKID_GE2D_SEL] = &ge2d_sel.hw,
- [CLKID_GE2D_DIV] = &ge2d_div.hw,
- [CLKID_GE2D] = &ge2d.hw,
- [CLKID_VAPB_SEL] = &vapb_sel.hw,
- [CLKID_VAPB_DIV] = &vapb_div.hw,
- [CLKID_VAPB] = &vapb.hw,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = NNA_CLK_CTRL,
-};
-
-static struct meson_clk_hw_data c3_periphs_clks = {
- .hws = c3_periphs_hw_clks,
- .num = ARRAY_SIZE(c3_periphs_hw_clks),
-};
-
-static int c3_peripherals_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int clkid, ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- for (clkid = 0; clkid < c3_periphs_clks.num; clkid++) {
- /* array might be sparse */
- if (!c3_periphs_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, c3_periphs_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &c3_periphs_clks);
-}
-
static const struct of_device_id c3_peripherals_clkc_match_table[] = {
{
.compatible = "amlogic,c3-peripherals-clkc",
+ .data = &c3_peripherals_clkc_data,
},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, c3_peripherals_clkc_match_table);
-static struct platform_driver c3_peripherals_driver = {
- .probe = c3_peripherals_probe,
+static struct platform_driver c3_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "c3-peripherals-clkc",
.of_match_table = c3_peripherals_clkc_match_table,
},
};
-module_platform_driver(c3_peripherals_driver);
+module_platform_driver(c3_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic C3 Peripherals Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
diff --git a/drivers/clk/meson/c3-pll.c b/drivers/clk/meson/c3-pll.c
index 2c5594b8e49a..dd047d17488c 100644
--- a/drivers/clk/meson/c3-pll.c
+++ b/drivers/clk/meson/c3-pll.c
@@ -34,7 +34,7 @@
#define ANACTRL_MPLL_CTRL3 0x18c
#define ANACTRL_MPLL_CTRL4 0x190
-static struct clk_regmap fclk_50m_en = {
+static struct clk_regmap c3_fclk_50m_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 0,
@@ -49,20 +49,20 @@ static struct clk_regmap fclk_50m_en = {
},
};
-static struct clk_fixed_factor fclk_50m = {
+static struct clk_fixed_factor c3_fclk_50m = {
.mult = 1,
.div = 40,
.hw.init = &(struct clk_init_data) {
.name = "fclk_50m",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_50m_en.hw
+ &c3_fclk_50m_en.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div2_div = {
+static struct clk_fixed_factor c3_fclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data) {
@@ -75,7 +75,7 @@ static struct clk_fixed_factor fclk_div2_div = {
},
};
-static struct clk_regmap fclk_div2 = {
+static struct clk_regmap c3_fclk_div2 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 24,
@@ -84,13 +84,13 @@ static struct clk_regmap fclk_div2 = {
.name = "fclk_div2",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2_div.hw
+ &c3_fclk_div2_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div2p5_div = {
+static struct clk_fixed_factor c3_fclk_div2p5_div = {
.mult = 2,
.div = 5,
.hw.init = &(struct clk_init_data) {
@@ -103,7 +103,7 @@ static struct clk_fixed_factor fclk_div2p5_div = {
},
};
-static struct clk_regmap fclk_div2p5 = {
+static struct clk_regmap c3_fclk_div2p5 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 4,
@@ -112,13 +112,13 @@ static struct clk_regmap fclk_div2p5 = {
.name = "fclk_div2p5",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div2p5_div.hw
+ &c3_fclk_div2p5_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div3_div = {
+static struct clk_fixed_factor c3_fclk_div3_div = {
.mult = 1,
.div = 3,
.hw.init = &(struct clk_init_data) {
@@ -131,7 +131,7 @@ static struct clk_fixed_factor fclk_div3_div = {
},
};
-static struct clk_regmap fclk_div3 = {
+static struct clk_regmap c3_fclk_div3 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 20,
@@ -140,13 +140,13 @@ static struct clk_regmap fclk_div3 = {
.name = "fclk_div3",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div3_div.hw
+ &c3_fclk_div3_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div4_div = {
+static struct clk_fixed_factor c3_fclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data) {
@@ -159,7 +159,7 @@ static struct clk_fixed_factor fclk_div4_div = {
},
};
-static struct clk_regmap fclk_div4 = {
+static struct clk_regmap c3_fclk_div4 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 21,
@@ -168,13 +168,13 @@ static struct clk_regmap fclk_div4 = {
.name = "fclk_div4",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div4_div.hw
+ &c3_fclk_div4_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div5_div = {
+static struct clk_fixed_factor c3_fclk_div5_div = {
.mult = 1,
.div = 5,
.hw.init = &(struct clk_init_data) {
@@ -187,7 +187,7 @@ static struct clk_fixed_factor fclk_div5_div = {
},
};
-static struct clk_regmap fclk_div5 = {
+static struct clk_regmap c3_fclk_div5 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 22,
@@ -196,13 +196,13 @@ static struct clk_regmap fclk_div5 = {
.name = "fclk_div5",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div5_div.hw
+ &c3_fclk_div5_div.hw
},
.num_parents = 1,
},
};
-static struct clk_fixed_factor fclk_div7_div = {
+static struct clk_fixed_factor c3_fclk_div7_div = {
.mult = 1,
.div = 7,
.hw.init = &(struct clk_init_data) {
@@ -215,7 +215,7 @@ static struct clk_fixed_factor fclk_div7_div = {
},
};
-static struct clk_regmap fclk_div7 = {
+static struct clk_regmap c3_fclk_div7 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_FIXPLL_CTRL4,
.bit_idx = 23,
@@ -224,13 +224,13 @@ static struct clk_regmap fclk_div7 = {
.name = "fclk_div7",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &fclk_div7_div.hw
+ &c3_fclk_div7_div.hw
},
.num_parents = 1,
},
};
-static const struct reg_sequence c3_gp0_init_regs[] = {
+static const struct reg_sequence c3_gp0_pll_init_regs[] = {
{ .reg = ANACTRL_GP0PLL_CTRL2, .def = 0x0 },
{ .reg = ANACTRL_GP0PLL_CTRL3, .def = 0x48681c00 },
{ .reg = ANACTRL_GP0PLL_CTRL4, .def = 0x88770290 },
@@ -243,7 +243,7 @@ static const struct pll_mult_range c3_gp0_pll_mult_range = {
.max = 250,
};
-static struct clk_regmap gp0_pll_dco = {
+static struct clk_regmap c3_gp0_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_GP0PLL_CTRL0,
@@ -276,8 +276,8 @@ static struct clk_regmap gp0_pll_dco = {
.width = 1,
},
.range = &c3_gp0_pll_mult_range,
- .init_regs = c3_gp0_init_regs,
- .init_count = ARRAY_SIZE(c3_gp0_init_regs),
+ .init_regs = c3_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data) {
.name = "gp0_pll_dco",
@@ -300,7 +300,7 @@ static const struct clk_div_table c3_gp0_pll_od_table[] = {
{ /* sentinel */ }
};
-static struct clk_regmap gp0_pll = {
+static struct clk_regmap c3_gp0_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_GP0PLL_CTRL0,
.shift = 16,
@@ -311,14 +311,14 @@ static struct clk_regmap gp0_pll = {
.name = "gp0_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gp0_pll_dco.hw
+ &c3_gp0_pll_dco.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct reg_sequence c3_hifi_init_regs[] = {
+static const struct reg_sequence c3_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x0 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -326,7 +326,7 @@ static const struct reg_sequence c3_hifi_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL6, .def = 0x56540000 },
};
-static struct clk_regmap hifi_pll_dco = {
+static struct clk_regmap c3_hifi_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_HIFIPLL_CTRL0,
@@ -359,8 +359,8 @@ static struct clk_regmap hifi_pll_dco = {
.width = 1,
},
.range = &c3_gp0_pll_mult_range,
- .init_regs = c3_hifi_init_regs,
- .init_count = ARRAY_SIZE(c3_hifi_init_regs),
+ .init_regs = c3_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_hifi_pll_init_regs),
.frac_max = 100000,
},
.hw.init = &(struct clk_init_data) {
@@ -373,7 +373,7 @@ static struct clk_regmap hifi_pll_dco = {
},
};
-static struct clk_regmap hifi_pll = {
+static struct clk_regmap c3_hifi_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_HIFIPLL_CTRL0,
.shift = 16,
@@ -384,14 +384,14 @@ static struct clk_regmap hifi_pll = {
.name = "hifi_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &hifi_pll_dco.hw
+ &c3_hifi_pll_dco.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct reg_sequence c3_mclk_init_regs[] = {
+static const struct reg_sequence c3_mclk_pll_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL1, .def = 0x1420500f },
{ .reg = ANACTRL_MPLL_CTRL2, .def = 0x00023041 },
{ .reg = ANACTRL_MPLL_CTRL3, .def = 0x18180000 },
@@ -403,7 +403,7 @@ static const struct pll_mult_range c3_mclk_pll_mult_range = {
.max = 133,
};
-static struct clk_regmap mclk_pll_dco = {
+static struct clk_regmap c3_mclk_pll_dco = {
.data = &(struct meson_clk_pll_data) {
.en = {
.reg_off = ANACTRL_MPLL_CTRL0,
@@ -431,8 +431,8 @@ static struct clk_regmap mclk_pll_dco = {
.width = 1,
},
.range = &c3_mclk_pll_mult_range,
- .init_regs = c3_mclk_init_regs,
- .init_count = ARRAY_SIZE(c3_mclk_init_regs),
+ .init_regs = c3_mclk_pll_init_regs,
+ .init_count = ARRAY_SIZE(c3_mclk_pll_init_regs),
},
.hw.init = &(struct clk_init_data) {
.name = "mclk_pll_dco",
@@ -444,7 +444,7 @@ static struct clk_regmap mclk_pll_dco = {
},
};
-static const struct clk_div_table c3_mpll_od_table[] = {
+static const struct clk_div_table c3_mpll_pll_od_table[] = {
{ 0, 1 },
{ 1, 2 },
{ 2, 4 },
@@ -453,25 +453,25 @@ static const struct clk_div_table c3_mpll_od_table[] = {
{ /* sentinel */ }
};
-static struct clk_regmap mclk_pll_od = {
+static struct clk_regmap c3_mclk_pll_od = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL0,
.shift = 12,
.width = 3,
- .table = c3_mpll_od_table,
+ .table = c3_mpll_pll_od_table,
},
.hw.init = &(struct clk_init_data) {
.name = "mclk_pll_od",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk_pll_dco.hw },
+ &c3_mclk_pll_dco.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
/* both value 0 and 1 gives divide the input rate by one */
-static struct clk_regmap mclk_pll = {
+static struct clk_regmap c3_mclk_pll = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 16,
@@ -482,20 +482,20 @@ static struct clk_regmap mclk_pll = {
.name = "mclk_pll",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk_pll_od.hw
+ &c3_mclk_pll_od.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const struct clk_parent_data mclk_parent[] = {
- { .hw = &mclk_pll.hw },
+static const struct clk_parent_data c3_mclk_parents[] = {
+ { .hw = &c3_mclk_pll.hw },
{ .fw_name = "mclk" },
- { .hw = &fclk_50m.hw }
+ { .hw = &c3_fclk_50m.hw }
};
-static struct clk_regmap mclk0_sel = {
+static struct clk_regmap c3_mclk0_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = ANACTRL_MPLL_CTRL4,
.mask = 0x3,
@@ -504,12 +504,12 @@ static struct clk_regmap mclk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "mclk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = mclk_parent,
- .num_parents = ARRAY_SIZE(mclk_parent),
+ .parent_data = c3_mclk_parents,
+ .num_parents = ARRAY_SIZE(c3_mclk_parents),
},
};
-static struct clk_regmap mclk0_div_en = {
+static struct clk_regmap c3_mclk0_div_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 1,
@@ -518,14 +518,14 @@ static struct clk_regmap mclk0_div_en = {
.name = "mclk0_div_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_sel.hw
+ &c3_mclk0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk0_div = {
+static struct clk_regmap c3_mclk0_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 2,
@@ -535,14 +535,14 @@ static struct clk_regmap mclk0_div = {
.name = "mclk0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_div_en.hw
+ &c3_mclk0_div_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk0 = {
+static struct clk_regmap c3_mclk0 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 0,
@@ -551,14 +551,14 @@ static struct clk_regmap mclk0 = {
.name = "mclk0",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk0_div.hw
+ &c3_mclk0_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1_sel = {
+static struct clk_regmap c3_mclk1_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = ANACTRL_MPLL_CTRL4,
.mask = 0x3,
@@ -567,12 +567,12 @@ static struct clk_regmap mclk1_sel = {
.hw.init = &(struct clk_init_data) {
.name = "mclk1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = mclk_parent,
- .num_parents = ARRAY_SIZE(mclk_parent),
+ .parent_data = c3_mclk_parents,
+ .num_parents = ARRAY_SIZE(c3_mclk_parents),
},
};
-static struct clk_regmap mclk1_div_en = {
+static struct clk_regmap c3_mclk1_div_en = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 9,
@@ -581,14 +581,14 @@ static struct clk_regmap mclk1_div_en = {
.name = "mclk1_div_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_sel.hw
+ &c3_mclk1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1_div = {
+static struct clk_regmap c3_mclk1_div = {
.data = &(struct clk_regmap_div_data) {
.offset = ANACTRL_MPLL_CTRL4,
.shift = 10,
@@ -598,14 +598,14 @@ static struct clk_regmap mclk1_div = {
.name = "mclk1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_div_en.hw
+ &c3_mclk1_div_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap mclk1 = {
+static struct clk_regmap c3_mclk1 = {
.data = &(struct clk_regmap_gate_data) {
.offset = ANACTRL_MPLL_CTRL4,
.bit_idx = 8,
@@ -614,7 +614,7 @@ static struct clk_regmap mclk1 = {
.name = "mclk1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &mclk1_div.hw
+ &c3_mclk1_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -622,96 +622,61 @@ static struct clk_regmap mclk1 = {
};
static struct clk_hw *c3_pll_hw_clks[] = {
- [CLKID_FCLK_50M_EN] = &fclk_50m_en.hw,
- [CLKID_FCLK_50M] = &fclk_50m.hw,
- [CLKID_FCLK_DIV2_DIV] = &fclk_div2_div.hw,
- [CLKID_FCLK_DIV2] = &fclk_div2.hw,
- [CLKID_FCLK_DIV2P5_DIV] = &fclk_div2p5_div.hw,
- [CLKID_FCLK_DIV2P5] = &fclk_div2p5.hw,
- [CLKID_FCLK_DIV3_DIV] = &fclk_div3_div.hw,
- [CLKID_FCLK_DIV3] = &fclk_div3.hw,
- [CLKID_FCLK_DIV4_DIV] = &fclk_div4_div.hw,
- [CLKID_FCLK_DIV4] = &fclk_div4.hw,
- [CLKID_FCLK_DIV5_DIV] = &fclk_div5_div.hw,
- [CLKID_FCLK_DIV5] = &fclk_div5.hw,
- [CLKID_FCLK_DIV7_DIV] = &fclk_div7_div.hw,
- [CLKID_FCLK_DIV7] = &fclk_div7.hw,
- [CLKID_GP0_PLL_DCO] = &gp0_pll_dco.hw,
- [CLKID_GP0_PLL] = &gp0_pll.hw,
- [CLKID_HIFI_PLL_DCO] = &hifi_pll_dco.hw,
- [CLKID_HIFI_PLL] = &hifi_pll.hw,
- [CLKID_MCLK_PLL_DCO] = &mclk_pll_dco.hw,
- [CLKID_MCLK_PLL_OD] = &mclk_pll_od.hw,
- [CLKID_MCLK_PLL] = &mclk_pll.hw,
- [CLKID_MCLK0_SEL] = &mclk0_sel.hw,
- [CLKID_MCLK0_SEL_EN] = &mclk0_div_en.hw,
- [CLKID_MCLK0_DIV] = &mclk0_div.hw,
- [CLKID_MCLK0] = &mclk0.hw,
- [CLKID_MCLK1_SEL] = &mclk1_sel.hw,
- [CLKID_MCLK1_SEL_EN] = &mclk1_div_en.hw,
- [CLKID_MCLK1_DIV] = &mclk1_div.hw,
- [CLKID_MCLK1] = &mclk1.hw
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_MPLL_CTRL4,
-};
-
-static struct meson_clk_hw_data c3_pll_clks = {
- .hws = c3_pll_hw_clks,
- .num = ARRAY_SIZE(c3_pll_hw_clks),
-};
-
-static int c3_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int clkid, ret;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- for (clkid = 0; clkid < c3_pll_clks.num; clkid++) {
- /* array might be sparse */
- if (!c3_pll_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, c3_pll_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &c3_pll_clks);
-}
+ [CLKID_FCLK_50M_EN] = &c3_fclk_50m_en.hw,
+ [CLKID_FCLK_50M] = &c3_fclk_50m.hw,
+ [CLKID_FCLK_DIV2_DIV] = &c3_fclk_div2_div.hw,
+ [CLKID_FCLK_DIV2] = &c3_fclk_div2.hw,
+ [CLKID_FCLK_DIV2P5_DIV] = &c3_fclk_div2p5_div.hw,
+ [CLKID_FCLK_DIV2P5] = &c3_fclk_div2p5.hw,
+ [CLKID_FCLK_DIV3_DIV] = &c3_fclk_div3_div.hw,
+ [CLKID_FCLK_DIV3] = &c3_fclk_div3.hw,
+ [CLKID_FCLK_DIV4_DIV] = &c3_fclk_div4_div.hw,
+ [CLKID_FCLK_DIV4] = &c3_fclk_div4.hw,
+ [CLKID_FCLK_DIV5_DIV] = &c3_fclk_div5_div.hw,
+ [CLKID_FCLK_DIV5] = &c3_fclk_div5.hw,
+ [CLKID_FCLK_DIV7_DIV] = &c3_fclk_div7_div.hw,
+ [CLKID_FCLK_DIV7] = &c3_fclk_div7.hw,
+ [CLKID_GP0_PLL_DCO] = &c3_gp0_pll_dco.hw,
+ [CLKID_GP0_PLL] = &c3_gp0_pll.hw,
+ [CLKID_HIFI_PLL_DCO] = &c3_hifi_pll_dco.hw,
+ [CLKID_HIFI_PLL] = &c3_hifi_pll.hw,
+ [CLKID_MCLK_PLL_DCO] = &c3_mclk_pll_dco.hw,
+ [CLKID_MCLK_PLL_OD] = &c3_mclk_pll_od.hw,
+ [CLKID_MCLK_PLL] = &c3_mclk_pll.hw,
+ [CLKID_MCLK0_SEL] = &c3_mclk0_sel.hw,
+ [CLKID_MCLK0_SEL_EN] = &c3_mclk0_div_en.hw,
+ [CLKID_MCLK0_DIV] = &c3_mclk0_div.hw,
+ [CLKID_MCLK0] = &c3_mclk0.hw,
+ [CLKID_MCLK1_SEL] = &c3_mclk1_sel.hw,
+ [CLKID_MCLK1_SEL_EN] = &c3_mclk1_div_en.hw,
+ [CLKID_MCLK1_DIV] = &c3_mclk1_div.hw,
+ [CLKID_MCLK1] = &c3_mclk1.hw
+};
+
+static const struct meson_clkc_data c3_pll_clkc_data = {
+ .hw_clks = {
+ .hws = c3_pll_hw_clks,
+ .num = ARRAY_SIZE(c3_pll_hw_clks),
+ },
+};
static const struct of_device_id c3_pll_clkc_match_table[] = {
{
.compatible = "amlogic,c3-pll-clkc",
+ .data = &c3_pll_clkc_data,
},
{}
};
MODULE_DEVICE_TABLE(of, c3_pll_clkc_match_table);
-static struct platform_driver c3_pll_driver = {
- .probe = c3_pll_probe,
+static struct platform_driver c3_pll_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "c3-pll-clkc",
.of_match_table = c3_pll_clkc_match_table,
},
};
-module_platform_driver(c3_pll_driver);
+module_platform_driver(c3_pll_clkc_driver);
MODULE_DESCRIPTION("Amlogic C3 PLL Clock Controller driver");
MODULE_AUTHOR("Chuan Liu <chuan.liu@amlogic.com>");
diff --git a/drivers/clk/meson/clk-regmap.h b/drivers/clk/meson/clk-regmap.h
index f8cac2df5755..8e5c39b023e1 100644
--- a/drivers/clk/meson/clk-regmap.h
+++ b/drivers/clk/meson/clk-regmap.h
@@ -118,24 +118,4 @@ clk_get_regmap_mux_data(struct clk_regmap *clk)
extern const struct clk_ops clk_regmap_mux_ops;
extern const struct clk_ops clk_regmap_mux_ro_ops;
-#define __MESON_PCLK(_name, _reg, _bit, _ops, _pname) \
-struct clk_regmap _name = { \
- .data = &(struct clk_regmap_gate_data){ \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name, \
- .ops = _ops, \
- .parent_hws = (const struct clk_hw *[]) { _pname }, \
- .num_parents = 1, \
- .flags = (CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED), \
- }, \
-}
-
-#define MESON_PCLK(_name, _reg, _bit, _pname) \
- __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pname)
-
-#define MESON_PCLK_RO(_name, _reg, _bit, _pname) \
- __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pname)
#endif /* __CLK_REGMAP_H */
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index 4095a1b2bb80..96981da271fa 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -37,46 +37,38 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
+static const struct clk_parent_data g12a_ao_pclk_parents = { .fw_name = "mpeg-clk" };
+
+#define G12A_AO_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(g12a_ao_##_name, _reg, _bit, &g12a_ao_pclk_parents, _flags)
+
/*
- * Like every other peripheral clock gate in Amlogic Clock drivers,
- * we are using CLK_IGNORE_UNUSED here, so we keep the state of the
- * bootloader. The goal is to remove this flag at some point.
- * Actually removing it will require some extensive test to be done safely.
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
*/
-#define AXG_AO_GATE(_name, _reg, _bit) \
-static struct clk_regmap g12a_aoclk_##_name = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = (_reg), \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = "g12a_ao_" #_name, \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static G12A_AO_PCLK(ahb, AO_CLK_GATE0, 0, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ir_in, AO_CLK_GATE0, 1, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(i2c_m0, AO_CLK_GATE0, 2, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(i2c_s0, AO_CLK_GATE0, 3, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(uart, AO_CLK_GATE0, 4, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(prod_i2c, AO_CLK_GATE0, 5, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(uart2, AO_CLK_GATE0, 6, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ir_out, AO_CLK_GATE0, 7, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(saradc, AO_CLK_GATE0, 8, CLK_IGNORE_UNUSED);
-AXG_AO_GATE(ahb, AO_CLK_GATE0, 0);
-AXG_AO_GATE(ir_in, AO_CLK_GATE0, 1);
-AXG_AO_GATE(i2c_m0, AO_CLK_GATE0, 2);
-AXG_AO_GATE(i2c_s0, AO_CLK_GATE0, 3);
-AXG_AO_GATE(uart, AO_CLK_GATE0, 4);
-AXG_AO_GATE(prod_i2c, AO_CLK_GATE0, 5);
-AXG_AO_GATE(uart2, AO_CLK_GATE0, 6);
-AXG_AO_GATE(ir_out, AO_CLK_GATE0, 7);
-AXG_AO_GATE(saradc, AO_CLK_GATE0, 8);
-AXG_AO_GATE(mailbox, AO_CLK_GATE0_SP, 0);
-AXG_AO_GATE(m3, AO_CLK_GATE0_SP, 1);
-AXG_AO_GATE(ahb_sram, AO_CLK_GATE0_SP, 2);
-AXG_AO_GATE(rti, AO_CLK_GATE0_SP, 3);
-AXG_AO_GATE(m4_fclk, AO_CLK_GATE0_SP, 4);
-AXG_AO_GATE(m4_hclk, AO_CLK_GATE0_SP, 5);
+static G12A_AO_PCLK(mailbox, AO_CLK_GATE0_SP, 0, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m3, AO_CLK_GATE0_SP, 1, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(ahb_sram, AO_CLK_GATE0_SP, 2, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(rti, AO_CLK_GATE0_SP, 3, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m4_fclk, AO_CLK_GATE0_SP, 4, CLK_IGNORE_UNUSED);
+static G12A_AO_PCLK(m4_hclk, AO_CLK_GATE0_SP, 5, CLK_IGNORE_UNUSED);
-static struct clk_regmap g12a_aoclk_cts_oscin = {
+static struct clk_regmap g12a_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 14,
@@ -103,22 +95,22 @@ static const struct meson_clk_dualdiv_param g12a_32k_div_table[] = {
/* 32k_by_oscin clock */
-static struct clk_regmap g12a_aoclk_32k_by_oscin_pre = {
+static struct clk_regmap g12a_ao_32k_by_oscin_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_pre",
+ .name = "ao_32k_by_oscin_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cts_oscin.hw
+ &g12a_ao_cts_oscin.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
+static struct clk_regmap g12a_ao_32k_by_oscin_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -148,16 +140,16 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_div = {
.table = g12a_32k_div_table,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_div",
+ .name = "ao_32k_by_oscin_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_pre.hw
+ &g12a_ao_32k_by_oscin_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
+static struct clk_regmap g12a_ao_32k_by_oscin_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -165,27 +157,27 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin_sel = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin_sel",
+ .name = "ao_32k_by_oscin_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_div.hw,
- &g12a_aoclk_32k_by_oscin_pre.hw,
+ &g12a_ao_32k_by_oscin_div.hw,
+ &g12a_ao_32k_by_oscin_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_32k_by_oscin = {
+static struct clk_regmap g12a_ao_32k_by_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_32k_by_oscin",
+ .name = "ao_32k_by_oscin",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_32k_by_oscin_sel.hw
+ &g12a_ao_32k_by_oscin_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -194,22 +186,22 @@ static struct clk_regmap g12a_aoclk_32k_by_oscin = {
/* cec clock */
-static struct clk_regmap g12a_aoclk_cec_pre = {
+static struct clk_regmap g12a_ao_cec_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_CEC_CLK_CNTL_REG0,
.bit_idx = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_pre",
+ .name = "ao_cec_pre",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cts_oscin.hw
+ &g12a_ao_cts_oscin.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_cec_div = {
+static struct clk_regmap g12a_ao_cec_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_CEC_CLK_CNTL_REG0,
@@ -239,16 +231,16 @@ static struct clk_regmap g12a_aoclk_cec_div = {
.table = g12a_32k_div_table,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_div",
+ .name = "ao_cec_div",
.ops = &meson_clk_dualdiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_pre.hw
+ &g12a_ao_cec_pre.hw
},
.num_parents = 1,
},
};
-static struct clk_regmap g12a_aoclk_cec_sel = {
+static struct clk_regmap g12a_ao_cec_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_CEC_CLK_CNTL_REG1,
.mask = 0x1,
@@ -256,34 +248,34 @@ static struct clk_regmap g12a_aoclk_cec_sel = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec_sel",
+ .name = "ao_cec_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_div.hw,
- &g12a_aoclk_cec_pre.hw,
+ &g12a_ao_cec_div.hw,
+ &g12a_ao_cec_pre.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_cec = {
+static struct clk_regmap g12a_ao_cec = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_CEC_CLK_CNTL_REG0,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cec",
+ .name = "ao_cec",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_cec_sel.hw
+ &g12a_ao_cec_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
+static struct clk_regmap g12a_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -291,10 +283,10 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_cts_rtc_oscin",
+ .name = "ao_cts_rtc_oscin",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
- { .hw = &g12a_aoclk_32k_by_oscin.hw },
+ { .hw = &g12a_ao_32k_by_oscin.hw },
{ .fw_name = "ext-32k-0", },
},
.num_parents = 2,
@@ -302,7 +294,7 @@ static struct clk_regmap g12a_aoclk_cts_rtc_oscin = {
},
};
-static struct clk_regmap g12a_aoclk_clk81 = {
+static struct clk_regmap g12a_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -310,68 +302,74 @@ static struct clk_regmap g12a_aoclk_clk81 = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
+ /*
+ * NOTE: this is one of the infamous clock the pwm driver
+ * can request directly by its global name. It's wrong but
+ * there is not much we can do about it until the support
+ * for the old pwm bindings is dropped
+ */
.name = "g12a_ao_clk81",
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &g12a_aoclk_cts_rtc_oscin.hw },
+ { .hw = &g12a_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_saradc_mux = {
+static struct clk_regmap g12a_ao_saradc_mux = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_SAR_CLK,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_mux",
+ .name = "ao_saradc_mux",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &g12a_aoclk_clk81.hw },
+ { .hw = &g12a_ao_clk81.hw },
},
.num_parents = 2,
},
};
-static struct clk_regmap g12a_aoclk_saradc_div = {
+static struct clk_regmap g12a_ao_saradc_div = {
.data = &(struct clk_regmap_div_data) {
.offset = AO_SAR_CLK,
.shift = 0,
.width = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_div",
+ .name = "ao_saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_saradc_mux.hw
+ &g12a_ao_saradc_mux.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap g12a_aoclk_saradc_gate = {
+static struct clk_regmap g12a_ao_saradc_gate = {
.data = &(struct clk_regmap_gate_data) {
.offset = AO_SAR_CLK,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "g12a_ao_saradc_gate",
+ .name = "ao_saradc_gate",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_aoclk_saradc_div.hw
+ &g12a_ao_saradc_div.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int g12a_aoclk_reset[] = {
+static const unsigned int g12a_ao_reset[] = {
[RESET_AO_IR_IN] = 16,
[RESET_AO_UART] = 17,
[RESET_AO_I2C_M] = 18,
@@ -381,65 +379,67 @@ static const unsigned int g12a_aoclk_reset[] = {
[RESET_AO_IR_OUT] = 23,
};
-static struct clk_hw *g12a_aoclk_hw_clks[] = {
- [CLKID_AO_AHB] = &g12a_aoclk_ahb.hw,
- [CLKID_AO_IR_IN] = &g12a_aoclk_ir_in.hw,
- [CLKID_AO_I2C_M0] = &g12a_aoclk_i2c_m0.hw,
- [CLKID_AO_I2C_S0] = &g12a_aoclk_i2c_s0.hw,
- [CLKID_AO_UART] = &g12a_aoclk_uart.hw,
- [CLKID_AO_PROD_I2C] = &g12a_aoclk_prod_i2c.hw,
- [CLKID_AO_UART2] = &g12a_aoclk_uart2.hw,
- [CLKID_AO_IR_OUT] = &g12a_aoclk_ir_out.hw,
- [CLKID_AO_SAR_ADC] = &g12a_aoclk_saradc.hw,
- [CLKID_AO_MAILBOX] = &g12a_aoclk_mailbox.hw,
- [CLKID_AO_M3] = &g12a_aoclk_m3.hw,
- [CLKID_AO_AHB_SRAM] = &g12a_aoclk_ahb_sram.hw,
- [CLKID_AO_RTI] = &g12a_aoclk_rti.hw,
- [CLKID_AO_M4_FCLK] = &g12a_aoclk_m4_fclk.hw,
- [CLKID_AO_M4_HCLK] = &g12a_aoclk_m4_hclk.hw,
- [CLKID_AO_CLK81] = &g12a_aoclk_clk81.hw,
- [CLKID_AO_SAR_ADC_SEL] = &g12a_aoclk_saradc_mux.hw,
- [CLKID_AO_SAR_ADC_DIV] = &g12a_aoclk_saradc_div.hw,
- [CLKID_AO_SAR_ADC_CLK] = &g12a_aoclk_saradc_gate.hw,
- [CLKID_AO_CTS_OSCIN] = &g12a_aoclk_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &g12a_aoclk_32k_by_oscin_pre.hw,
- [CLKID_AO_32K_DIV] = &g12a_aoclk_32k_by_oscin_div.hw,
- [CLKID_AO_32K_SEL] = &g12a_aoclk_32k_by_oscin_sel.hw,
- [CLKID_AO_32K] = &g12a_aoclk_32k_by_oscin.hw,
- [CLKID_AO_CEC_PRE] = &g12a_aoclk_cec_pre.hw,
- [CLKID_AO_CEC_DIV] = &g12a_aoclk_cec_div.hw,
- [CLKID_AO_CEC_SEL] = &g12a_aoclk_cec_sel.hw,
- [CLKID_AO_CEC] = &g12a_aoclk_cec.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &g12a_aoclk_cts_rtc_oscin.hw,
+static struct clk_hw *g12a_ao_hw_clks[] = {
+ [CLKID_AO_AHB] = &g12a_ao_ahb.hw,
+ [CLKID_AO_IR_IN] = &g12a_ao_ir_in.hw,
+ [CLKID_AO_I2C_M0] = &g12a_ao_i2c_m0.hw,
+ [CLKID_AO_I2C_S0] = &g12a_ao_i2c_s0.hw,
+ [CLKID_AO_UART] = &g12a_ao_uart.hw,
+ [CLKID_AO_PROD_I2C] = &g12a_ao_prod_i2c.hw,
+ [CLKID_AO_UART2] = &g12a_ao_uart2.hw,
+ [CLKID_AO_IR_OUT] = &g12a_ao_ir_out.hw,
+ [CLKID_AO_SAR_ADC] = &g12a_ao_saradc.hw,
+ [CLKID_AO_MAILBOX] = &g12a_ao_mailbox.hw,
+ [CLKID_AO_M3] = &g12a_ao_m3.hw,
+ [CLKID_AO_AHB_SRAM] = &g12a_ao_ahb_sram.hw,
+ [CLKID_AO_RTI] = &g12a_ao_rti.hw,
+ [CLKID_AO_M4_FCLK] = &g12a_ao_m4_fclk.hw,
+ [CLKID_AO_M4_HCLK] = &g12a_ao_m4_hclk.hw,
+ [CLKID_AO_CLK81] = &g12a_ao_clk81.hw,
+ [CLKID_AO_SAR_ADC_SEL] = &g12a_ao_saradc_mux.hw,
+ [CLKID_AO_SAR_ADC_DIV] = &g12a_ao_saradc_div.hw,
+ [CLKID_AO_SAR_ADC_CLK] = &g12a_ao_saradc_gate.hw,
+ [CLKID_AO_CTS_OSCIN] = &g12a_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &g12a_ao_32k_by_oscin_pre.hw,
+ [CLKID_AO_32K_DIV] = &g12a_ao_32k_by_oscin_div.hw,
+ [CLKID_AO_32K_SEL] = &g12a_ao_32k_by_oscin_sel.hw,
+ [CLKID_AO_32K] = &g12a_ao_32k_by_oscin.hw,
+ [CLKID_AO_CEC_PRE] = &g12a_ao_cec_pre.hw,
+ [CLKID_AO_CEC_DIV] = &g12a_ao_cec_div.hw,
+ [CLKID_AO_CEC_SEL] = &g12a_ao_cec_sel.hw,
+ [CLKID_AO_CEC] = &g12a_ao_cec.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &g12a_ao_cts_rtc_oscin.hw,
};
-static const struct meson_aoclk_data g12a_aoclkc_data = {
+static const struct meson_aoclk_data g12a_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(g12a_aoclk_reset),
- .reset = g12a_aoclk_reset,
- .hw_clks = {
- .hws = g12a_aoclk_hw_clks,
- .num = ARRAY_SIZE(g12a_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(g12a_ao_reset),
+ .reset = g12a_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = g12a_ao_hw_clks,
+ .num = ARRAY_SIZE(g12a_ao_hw_clks),
+ },
},
};
-static const struct of_device_id g12a_aoclkc_match_table[] = {
+static const struct of_device_id g12a_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-g12a-aoclkc",
- .data = &g12a_aoclkc_data,
+ .data = &g12a_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, g12a_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, g12a_ao_clkc_match_table);
-static struct platform_driver g12a_aoclkc_driver = {
+static struct platform_driver g12a_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
.name = "g12a-aoclkc",
- .of_match_table = g12a_aoclkc_match_table,
+ .of_match_table = g12a_ao_clkc_match_table,
},
};
-module_platform_driver(g12a_aoclkc_driver);
+module_platform_driver(g12a_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic G12A Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index 66f0e817e416..185b6348251d 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -23,7 +23,7 @@
#include "clk-cpu-dyndiv.h"
#include "vid-pll-div.h"
#include "vclk.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include <dt-bindings/clock/g12a-clkc.h>
@@ -386,6 +386,451 @@ static struct clk_fixed_factor g12b_sys1_pll_div16 = {
},
};
+static const struct pll_mult_range g12a_gp0_pll_mult_range = {
+ .min = 125,
+ .max = 255,
+};
+
+/*
+ * Internal gp0 pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_gp0_pll_init_regs[] = {
+ { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
+ { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
+ { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_gp0_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP0_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP0_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_gp0_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_gp0_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP0_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp0_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_gp0_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap sm1_gp1_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_GP1_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_GP1_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ /* This clock feeds the DSU, avoid disabling it */
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static struct clk_regmap sm1_gp1_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_GP1_PLL_CNTL0,
+ .shift = 16,
+ .width = 3,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "gp1_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &sm1_gp1_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+/*
+ * Internal hifi pll emulation configuration parameters
+ */
+static const struct reg_sequence g12a_hifi_pll_init_regs[] = {
+ { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
+ { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
+ { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
+ { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
+ { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
+};
+
+static struct clk_regmap g12a_hifi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HIFI_PLL_CNTL1,
+ .shift = 0,
+ .width = 17,
+ },
+ .l = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HIFI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .range = &g12a_gp0_pll_mult_range,
+ .init_regs = g12a_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_hifi_pll_init_regs),
+ .flags = CLK_MESON_PLL_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll_dco",
+ .ops = &meson_clk_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_hifi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HIFI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = (CLK_DIVIDER_POWER_OF_TWO |
+ CLK_DIVIDER_ROUND_CLOSEST),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hifi_pll",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hifi_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+/*
+ * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
+ * 100MHz reference clock for the PCIe Analog PHY, and thus requires
+ * a strict register sequence to enable the PLL.
+ */
+static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
+ { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
+ { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
+ { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
+ { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
+ { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
+ { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
+};
+
+/* Keep a single entry table for recalc/round_rate() ops */
+static const struct pll_params_table g12a_pcie_pll_table[] = {
+ PLL_PARAMS(150, 1),
+ {0, 0},
+};
+
+static struct clk_regmap g12a_pcie_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_PCIE_PLL_CNTL1,
+ .shift = 0,
+ .width = 12,
+ },
+ .l = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 31,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_PCIE_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ .table = g12a_pcie_pll_table,
+ .init_regs = g12a_pcie_pll_init_regs,
+ .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco",
+ .ops = &meson_clk_pcie_pll_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_dco_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_pcie_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_PCIE_PLL_CNTL0,
+ .shift = 16,
+ .width = 5,
+ .flags = CLK_DIVIDER_ROUND_CLOSEST |
+ CLK_DIVIDER_ONE_BASED |
+ CLK_DIVIDER_ALLOW_ZERO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_od",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_dco_div2.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_fixed_factor g12a_pcie_pll = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_pll_pll",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_pcie_pll_od.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_dco = {
+ .data = &(struct meson_clk_pll_data){
+ .en = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 28,
+ .width = 1,
+ },
+ .m = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 0,
+ .width = 8,
+ },
+ .n = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 10,
+ .width = 5,
+ },
+ .frac = {
+ .reg_off = HHI_HDMI_PLL_CNTL1,
+ .shift = 0,
+ .width = 16,
+ },
+ .l = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 30,
+ .width = 1,
+ },
+ .rst = {
+ .reg_off = HHI_HDMI_PLL_CNTL0,
+ .shift = 29,
+ .width = 1,
+ },
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco",
+ .ops = &meson_clk_pll_ro_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ },
+ .num_parents = 1,
+ /*
+ * Display directly handle hdmi pll registers ATM, we need
+ * NOCACHE to keep our view of the clock as accurate as possible
+ */
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 16,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_dco.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll_od2 = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 18,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_od2",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_hdmi_pll = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_HDMI_PLL_CNTL0,
+ .shift = 20,
+ .width = 2,
+ .flags = CLK_DIVIDER_POWER_OF_TWO,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll",
+ .ops = &clk_regmap_divider_ro_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_hdmi_pll_od2.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_fixed_factor g12a_fclk_div2_div = {
.mult = 1,
.div = 2,
@@ -459,36 +904,166 @@ static struct clk_regmap g12a_fclk_div3 = {
},
};
-/* Datasheet names this field as "premux0" */
-static struct clk_regmap g12a_cpu_clk_premux0 = {
+
+static struct clk_fixed_factor g12a_fclk_div4_div = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div4 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 21,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div4",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div4_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 22,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div5_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div7_div = {
+ .mult = 1,
+ .div = 7,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div7 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 23,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div7",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div7_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_fclk_div2p5_div = {
+ .mult = 1,
+ .div = 5,
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_fclk_div2p5 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_FIX_PLL_CNTL1,
+ .bit_idx = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "fclk_div2p5",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fclk_div2p5_div.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor g12a_mpll_50m_div = {
+ .mult = 1,
+ .div = 80,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_50m_div",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap g12a_mpll_50m = {
.data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL0,
- .mask = 0x3,
- .shift = 0,
- .flags = CLK_MUX_ROUND_CLOSEST,
+ .offset = HHI_FIX_PLL_CNTL3,
+ .mask = 0x1,
+ .shift = 5,
},
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn0_sel",
- .ops = &clk_regmap_mux_ops,
+ .name = "mpll_50m",
+ .ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
- { .hw = &g12a_fclk_div2.hw },
- { .hw = &g12a_fclk_div3.hw },
+ { .hw = &g12a_mpll_50m_div.hw },
},
- .num_parents = 3,
- .flags = CLK_SET_RATE_PARENT,
+ .num_parents = 2,
},
};
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12a_cpu_clk_premux1 = {
+static struct clk_fixed_factor g12a_mpll_prediv = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll_prediv",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_fixed_pll_dco.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+/* Datasheet names this field as "premux0" */
+static struct clk_regmap g12a_cpu_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
- .shift = 16,
+ .shift = 0,
+ .flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
- .name = "cpu_clk_dyn1_sel",
+ .name = "cpu_clk_dyn0_sel",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
@@ -496,13 +1071,12 @@ static struct clk_regmap g12a_cpu_clk_premux1 = {
{ .hw = &g12a_fclk_div3.hw },
},
.num_parents = 3,
- /* This sub-tree is used a parking clock */
- .flags = CLK_SET_RATE_NO_REPARENT
+ .flags = CLK_SET_RATE_PARENT,
},
};
/* Datasheet names this field as "mux0_divn_tcnt" */
-static struct clk_regmap g12a_cpu_clk_mux0_div = {
+static struct clk_regmap g12a_cpu_clk_dyn0_div = {
.data = &(struct meson_clk_cpu_dyndiv_data){
.div = {
.reg_off = HHI_SYS_CPU_CLK_CNTL0,
@@ -519,7 +1093,7 @@ static struct clk_regmap g12a_cpu_clk_mux0_div = {
.name = "cpu_clk_dyn0_div",
.ops = &meson_clk_cpu_dyndiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux0.hw
+ &g12a_cpu_clk_dyn0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -527,7 +1101,7 @@ static struct clk_regmap g12a_cpu_clk_mux0_div = {
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap g12a_cpu_clk_postmux0 = {
+static struct clk_regmap g12a_cpu_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
@@ -538,16 +1112,37 @@ static struct clk_regmap g12a_cpu_clk_postmux0 = {
.name = "cpu_clk_dyn0",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux0.hw,
- &g12a_cpu_clk_mux0_div.hw,
+ &g12a_cpu_clk_dyn0_sel.hw,
+ &g12a_cpu_clk_dyn0_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap g12a_cpu_clk_dyn1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL0,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cpu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ },
+ .num_parents = 3,
+ /* This sub-tree is used a parking clock */
+ .flags = CLK_SET_RATE_NO_REPARENT
+ },
+};
+
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap g12a_cpu_clk_mux1_div = {
+static struct clk_regmap g12a_cpu_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.shift = 20,
@@ -557,14 +1152,14 @@ static struct clk_regmap g12a_cpu_clk_mux1_div = {
.name = "cpu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux1.hw
+ &g12a_cpu_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap g12a_cpu_clk_postmux1 = {
+static struct clk_regmap g12a_cpu_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x1,
@@ -574,8 +1169,8 @@ static struct clk_regmap g12a_cpu_clk_postmux1 = {
.name = "cpu_clk_dyn1",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_premux1.hw,
- &g12a_cpu_clk_mux1_div.hw,
+ &g12a_cpu_clk_dyn1_sel.hw,
+ &g12a_cpu_clk_dyn1_div.hw,
},
.num_parents = 2,
/* This sub-tree is used a parking clock */
@@ -595,8 +1190,8 @@ static struct clk_regmap g12a_cpu_clk_dyn = {
.name = "cpu_clk_dyn",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_cpu_clk_postmux0.hw,
- &g12a_cpu_clk_postmux1.hw,
+ &g12a_cpu_clk_dyn0.hw,
+ &g12a_cpu_clk_dyn1.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -644,7 +1239,7 @@ static struct clk_regmap g12b_cpu_clk = {
};
/* Datasheet names this field as "premux0" */
-static struct clk_regmap g12b_cpub_clk_premux0 = {
+static struct clk_regmap g12b_cpub_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
@@ -665,7 +1260,7 @@ static struct clk_regmap g12b_cpub_clk_premux0 = {
};
/* Datasheet names this field as "mux0_divn_tcnt" */
-static struct clk_regmap g12b_cpub_clk_mux0_div = {
+static struct clk_regmap g12b_cpub_clk_dyn0_div = {
.data = &(struct meson_clk_cpu_dyndiv_data){
.div = {
.reg_off = HHI_SYS_CPUB_CLK_CNTL,
@@ -682,7 +1277,7 @@ static struct clk_regmap g12b_cpub_clk_mux0_div = {
.name = "cpub_clk_dyn0_div",
.ops = &meson_clk_cpu_dyndiv_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux0.hw
+ &g12b_cpub_clk_dyn0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -690,7 +1285,7 @@ static struct clk_regmap g12b_cpub_clk_mux0_div = {
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap g12b_cpub_clk_postmux0 = {
+static struct clk_regmap g12b_cpub_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
@@ -701,8 +1296,8 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
.name = "cpub_clk_dyn0",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux0.hw,
- &g12b_cpub_clk_mux0_div.hw
+ &g12b_cpub_clk_dyn0_sel.hw,
+ &g12b_cpub_clk_dyn0_div.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -710,7 +1305,7 @@ static struct clk_regmap g12b_cpub_clk_postmux0 = {
};
/* Datasheet names this field as "premux1" */
-static struct clk_regmap g12b_cpub_clk_premux1 = {
+static struct clk_regmap g12b_cpub_clk_dyn1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x3,
@@ -731,7 +1326,7 @@ static struct clk_regmap g12b_cpub_clk_premux1 = {
};
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap g12b_cpub_clk_mux1_div = {
+static struct clk_regmap g12b_cpub_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.shift = 20,
@@ -741,14 +1336,14 @@ static struct clk_regmap g12b_cpub_clk_mux1_div = {
.name = "cpub_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux1.hw
+ &g12b_cpub_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap g12b_cpub_clk_postmux1 = {
+static struct clk_regmap g12b_cpub_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL,
.mask = 0x1,
@@ -758,8 +1353,8 @@ static struct clk_regmap g12b_cpub_clk_postmux1 = {
.name = "cpub_clk_dyn1",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_premux1.hw,
- &g12b_cpub_clk_mux1_div.hw
+ &g12b_cpub_clk_dyn1_sel.hw,
+ &g12b_cpub_clk_dyn1_div.hw
},
.num_parents = 2,
/* This sub-tree is used a parking clock */
@@ -779,8 +1374,8 @@ static struct clk_regmap g12b_cpub_clk_dyn = {
.name = "cpub_clk_dyn",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_postmux0.hw,
- &g12b_cpub_clk_postmux1.hw
+ &g12b_cpub_clk_dyn0.hw,
+ &g12b_cpub_clk_dyn1.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -807,10 +1402,8 @@ static struct clk_regmap g12b_cpub_clk = {
},
};
-static struct clk_regmap sm1_gp1_pll;
-
/* Datasheet names this field as "premux0" */
-static struct clk_regmap sm1_dsu_clk_premux0 = {
+static struct clk_regmap sm1_dsu_clk_dyn0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x3,
@@ -829,28 +1422,8 @@ static struct clk_regmap sm1_dsu_clk_premux0 = {
},
};
-/* Datasheet names this field as "premux1" */
-static struct clk_regmap sm1_dsu_clk_premux1 = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_SYS_CPU_CLK_CNTL5,
- .mask = 0x3,
- .shift = 16,
- },
- .hw.init = &(struct clk_init_data){
- .name = "dsu_clk_dyn1_sel",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .hw = &g12a_fclk_div2.hw },
- { .hw = &g12a_fclk_div3.hw },
- { .hw = &sm1_gp1_pll.hw },
- },
- .num_parents = 4,
- },
-};
-
/* Datasheet names this field as "Mux0_divn_tcnt" */
-static struct clk_regmap sm1_dsu_clk_mux0_div = {
+static struct clk_regmap sm1_dsu_clk_dyn0_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.shift = 4,
@@ -860,14 +1433,14 @@ static struct clk_regmap sm1_dsu_clk_mux0_div = {
.name = "dsu_clk_dyn0_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux0.hw
+ &sm1_dsu_clk_dyn0_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux0" */
-static struct clk_regmap sm1_dsu_clk_postmux0 = {
+static struct clk_regmap sm1_dsu_clk_dyn0 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x1,
@@ -877,15 +1450,35 @@ static struct clk_regmap sm1_dsu_clk_postmux0 = {
.name = "dsu_clk_dyn0",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux0.hw,
- &sm1_dsu_clk_mux0_div.hw,
+ &sm1_dsu_clk_dyn0_sel.hw,
+ &sm1_dsu_clk_dyn0_div.hw,
},
.num_parents = 2,
},
};
+/* Datasheet names this field as "premux1" */
+static struct clk_regmap sm1_dsu_clk_dyn1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_SYS_CPU_CLK_CNTL5,
+ .mask = 0x3,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "dsu_clk_dyn1_sel",
+ .ops = &clk_regmap_mux_ro_ops,
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xtal", },
+ { .hw = &g12a_fclk_div2.hw },
+ { .hw = &g12a_fclk_div3.hw },
+ { .hw = &sm1_gp1_pll.hw },
+ },
+ .num_parents = 4,
+ },
+};
+
/* Datasheet names this field as "Mux1_divn_tcnt" */
-static struct clk_regmap sm1_dsu_clk_mux1_div = {
+static struct clk_regmap sm1_dsu_clk_dyn1_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.shift = 20,
@@ -895,14 +1488,14 @@ static struct clk_regmap sm1_dsu_clk_mux1_div = {
.name = "dsu_clk_dyn1_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux1.hw
+ &sm1_dsu_clk_dyn1_sel.hw
},
.num_parents = 1,
},
};
/* Datasheet names this field as "postmux1" */
-static struct clk_regmap sm1_dsu_clk_postmux1 = {
+static struct clk_regmap sm1_dsu_clk_dyn1 = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL5,
.mask = 0x1,
@@ -912,8 +1505,8 @@ static struct clk_regmap sm1_dsu_clk_postmux1 = {
.name = "dsu_clk_dyn1",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_premux1.hw,
- &sm1_dsu_clk_mux1_div.hw,
+ &sm1_dsu_clk_dyn1_sel.hw,
+ &sm1_dsu_clk_dyn1_div.hw,
},
.num_parents = 2,
},
@@ -930,8 +1523,8 @@ static struct clk_regmap sm1_dsu_clk_dyn = {
.name = "dsu_clk_dyn",
.ops = &clk_regmap_mux_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &sm1_dsu_clk_postmux0.hw,
- &sm1_dsu_clk_postmux1.hw,
+ &sm1_dsu_clk_dyn0.hw,
+ &sm1_dsu_clk_dyn1.hw,
},
.num_parents = 2,
},
@@ -1043,7 +1636,7 @@ static struct notifier_block g12a_cpu_clk_mux_nb = {
.notifier_call = g12a_cpu_clk_mux_notifier_cb,
};
-struct g12a_cpu_clk_postmux_nb_data {
+struct g12a_cpu_clk_dyn_nb_data {
struct notifier_block nb;
struct clk_hw *xtal;
struct clk_hw *cpu_clk_dyn;
@@ -1052,33 +1645,33 @@ struct g12a_cpu_clk_postmux_nb_data {
struct clk_hw *cpu_clk_premux1;
};
-static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
- unsigned long event, void *data)
+static int g12a_cpu_clk_dyn_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
{
- struct g12a_cpu_clk_postmux_nb_data *nb_data =
- container_of(nb, struct g12a_cpu_clk_postmux_nb_data, nb);
+ struct g12a_cpu_clk_dyn_nb_data *nb_data =
+ container_of(nb, struct g12a_cpu_clk_dyn_nb_data, nb);
switch (event) {
case PRE_RATE_CHANGE:
/*
- * This notifier means cpu_clk_postmux0 clock will be changed
+ * This notifier means cpu_clk_dyn0 clock will be changed
* to feed cpu_clk, this is the current path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux0
- * \- cpu_clk_muxX_div
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0
+ * \- cpu_clk_dyn0_div
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
* OR
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
*/
- /* Setup cpu_clk_premux1 to xtal */
+ /* Setup cpu_clk_dyn1_sel to xtal */
clk_hw_set_parent(nb_data->cpu_clk_premux1,
nb_data->xtal);
- /* Setup cpu_clk_postmux1 to bypass divider */
+ /* Setup cpu_clk_dyn1 to bypass divider */
clk_hw_set_parent(nb_data->cpu_clk_postmux1,
nb_data->cpu_clk_premux1);
@@ -1090,8 +1683,8 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
* Now, cpu_clk is 24MHz in the current path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux1
- * \- cpu_clk_premux1
+ * \- cpu_clk_dyn1
+ * \- cpu_clk_dyn1_sel
* \- xtal
*/
@@ -1101,8 +1694,8 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
case POST_RATE_CHANGE:
/*
- * The cpu_clk_postmux0 has ben updated, now switch back
- * cpu_clk_dyn to cpu_clk_postmux0 and take the changes
+ * The cpu_clk_dyn0 has ben updated, now switch back
+ * cpu_clk_dyn to cpu_clk_dyn0 and take the changes
* in account.
*/
@@ -1114,12 +1707,12 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
* new path :
* cpu_clk
* \- cpu_clk_dyn
- * \- cpu_clk_postmux0
- * \- cpu_clk_muxX_div
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0
+ * \- cpu_clk_dyn0_div
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
* OR
- * \- cpu_clk_premux0
+ * \- cpu_clk_dyn0_sel
* \- fclk_div3 or fclk_div2
*/
@@ -1132,20 +1725,20 @@ static int g12a_cpu_clk_postmux_notifier_cb(struct notifier_block *nb,
}
}
-static struct g12a_cpu_clk_postmux_nb_data g12a_cpu_clk_postmux0_nb_data = {
+static struct g12a_cpu_clk_dyn_nb_data g12a_cpu_clk_dyn0_nb_data = {
.cpu_clk_dyn = &g12a_cpu_clk_dyn.hw,
- .cpu_clk_postmux0 = &g12a_cpu_clk_postmux0.hw,
- .cpu_clk_postmux1 = &g12a_cpu_clk_postmux1.hw,
- .cpu_clk_premux1 = &g12a_cpu_clk_premux1.hw,
- .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+ .cpu_clk_postmux0 = &g12a_cpu_clk_dyn0.hw,
+ .cpu_clk_postmux1 = &g12a_cpu_clk_dyn1.hw,
+ .cpu_clk_premux1 = &g12a_cpu_clk_dyn1_sel.hw,
+ .nb.notifier_call = g12a_cpu_clk_dyn_notifier_cb,
};
-static struct g12a_cpu_clk_postmux_nb_data g12b_cpub_clk_postmux0_nb_data = {
+static struct g12a_cpu_clk_dyn_nb_data g12b_cpub_clk_dyn0_nb_data = {
.cpu_clk_dyn = &g12b_cpub_clk_dyn.hw,
- .cpu_clk_postmux0 = &g12b_cpub_clk_postmux0.hw,
- .cpu_clk_postmux1 = &g12b_cpub_clk_postmux1.hw,
- .cpu_clk_premux1 = &g12b_cpub_clk_premux1.hw,
- .nb.notifier_call = g12a_cpu_clk_postmux_notifier_cb,
+ .cpu_clk_postmux0 = &g12b_cpub_clk_dyn0.hw,
+ .cpu_clk_postmux1 = &g12b_cpub_clk_dyn1.hw,
+ .cpu_clk_premux1 = &g12b_cpub_clk_dyn1_sel.hw,
+ .nb.notifier_call = g12a_cpu_clk_dyn_notifier_cb,
};
struct g12a_sys_pll_nb_data {
@@ -1551,27 +2144,29 @@ static struct clk_fixed_factor g12b_cpub_clk_div8 = {
},
};
-static u32 mux_table_cpub[] = { 1, 2, 3, 4, 5, 6, 7 };
+static u32 g12b_cpub_clk_if_parents_val_table[] = { 1, 2, 3, 4, 5, 6, 7 };
+static const struct clk_hw *g12b_cpub_clk_if_parents[] = {
+ &g12b_cpub_clk_div2.hw,
+ &g12b_cpub_clk_div3.hw,
+ &g12b_cpub_clk_div4.hw,
+ &g12b_cpub_clk_div5.hw,
+ &g12b_cpub_clk_div6.hw,
+ &g12b_cpub_clk_div7.hw,
+ &g12b_cpub_clk_div8.hw,
+};
+
static struct clk_regmap g12b_cpub_clk_apb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 3,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_apb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1600,21 +2195,13 @@ static struct clk_regmap g12b_cpub_clk_atb_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 6,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_atb_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1643,21 +2230,13 @@ static struct clk_regmap g12b_cpub_clk_axi_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 9,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_axi_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1686,21 +2265,13 @@ static struct clk_regmap g12b_cpub_clk_trace_sel = {
.offset = HHI_SYS_CPUB_CLK_CNTL1,
.mask = 7,
.shift = 20,
- .table = mux_table_cpub,
+ .table = g12b_cpub_clk_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpub_clk_trace_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12b_cpub_clk_div2.hw,
- &g12b_cpub_clk_div3.hw,
- &g12b_cpub_clk_div4.hw,
- &g12b_cpub_clk_div5.hw,
- &g12b_cpub_clk_div6.hw,
- &g12b_cpub_clk_div7.hw,
- &g12b_cpub_clk_div8.hw
- },
- .num_parents = 7,
+ .parent_hws = g12b_cpub_clk_if_parents,
+ .num_parents = ARRAY_SIZE(g12b_cpub_clk_if_parents),
},
};
@@ -1724,600 +2295,6 @@ static struct clk_regmap g12b_cpub_clk_trace = {
},
};
-static const struct pll_mult_range g12a_gp0_pll_mult_range = {
- .min = 125,
- .max = 255,
-};
-
-/*
- * Internal gp0 pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_gp0_init_regs[] = {
- { .reg = HHI_GP0_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_GP0_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_GP0_PLL_CNTL3, .def = 0x48681c00 },
- { .reg = HHI_GP0_PLL_CNTL4, .def = 0x33771290 },
- { .reg = HHI_GP0_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_GP0_PLL_CNTL6, .def = 0x56540000 },
-};
-
-static struct clk_regmap g12a_gp0_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_GP0_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_GP0_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .range = &g12a_gp0_pll_mult_range,
- .init_regs = g12a_gp0_init_regs,
- .init_count = ARRAY_SIZE(g12a_gp0_init_regs),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp0_pll_dco",
- .ops = &meson_clk_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_gp0_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_GP0_PLL_CNTL0,
- .shift = 16,
- .width = 3,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp0_pll",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_gp0_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap sm1_gp1_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_GP1_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_GP1_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp1_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- /* This clock feeds the DSU, avoid disabling it */
- .flags = CLK_IS_CRITICAL,
- },
-};
-
-static struct clk_regmap sm1_gp1_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_GP1_PLL_CNTL0,
- .shift = 16,
- .width = 3,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "gp1_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &sm1_gp1_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-/*
- * Internal hifi pll emulation configuration parameters
- */
-static const struct reg_sequence g12a_hifi_init_regs[] = {
- { .reg = HHI_HIFI_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_HIFI_PLL_CNTL2, .def = 0x00000000 },
- { .reg = HHI_HIFI_PLL_CNTL3, .def = 0x6a285c00 },
- { .reg = HHI_HIFI_PLL_CNTL4, .def = 0x65771290 },
- { .reg = HHI_HIFI_PLL_CNTL5, .def = 0x39272000 },
- { .reg = HHI_HIFI_PLL_CNTL6, .def = 0x56540000 },
-};
-
-static struct clk_regmap g12a_hifi_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_HIFI_PLL_CNTL1,
- .shift = 0,
- .width = 17,
- },
- .l = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HIFI_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .range = &g12a_gp0_pll_mult_range,
- .init_regs = g12a_hifi_init_regs,
- .init_count = ARRAY_SIZE(g12a_hifi_init_regs),
- .flags = CLK_MESON_PLL_ROUND_CLOSEST,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hifi_pll_dco",
- .ops = &meson_clk_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_hifi_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HIFI_PLL_CNTL0,
- .shift = 16,
- .width = 2,
- .flags = (CLK_DIVIDER_POWER_OF_TWO |
- CLK_DIVIDER_ROUND_CLOSEST),
- },
- .hw.init = &(struct clk_init_data){
- .name = "hifi_pll",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hifi_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-/*
- * The Meson G12A PCIE PLL is fined tuned to deliver a very precise
- * 100MHz reference clock for the PCIe Analog PHY, and thus requires
- * a strict register sequence to enable the PLL.
- */
-static const struct reg_sequence g12a_pcie_pll_init_regs[] = {
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x20090496 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x30090496 },
- { .reg = HHI_PCIE_PLL_CNTL1, .def = 0x00000000 },
- { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001100 },
- { .reg = HHI_PCIE_PLL_CNTL3, .def = 0x10058e00 },
- { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x000100c0 },
- { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000048 },
- { .reg = HHI_PCIE_PLL_CNTL5, .def = 0x68000068, .delay_us = 20 },
- { .reg = HHI_PCIE_PLL_CNTL4, .def = 0x008100c0, .delay_us = 10 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x34090496 },
- { .reg = HHI_PCIE_PLL_CNTL0, .def = 0x14090496, .delay_us = 10 },
- { .reg = HHI_PCIE_PLL_CNTL2, .def = 0x00001000 },
-};
-
-/* Keep a single entry table for recalc/round_rate() ops */
-static const struct pll_params_table g12a_pcie_pll_table[] = {
- PLL_PARAMS(150, 1),
- {0, 0},
-};
-
-static struct clk_regmap g12a_pcie_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_PCIE_PLL_CNTL1,
- .shift = 0,
- .width = 12,
- },
- .l = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 31,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_PCIE_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- .table = g12a_pcie_pll_table,
- .init_regs = g12a_pcie_pll_init_regs,
- .init_count = ARRAY_SIZE(g12a_pcie_pll_init_regs),
- },
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_dco",
- .ops = &meson_clk_pcie_pll_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_pcie_pll_dco_div2 = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_dco_div2",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_pcie_pll_od = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_PCIE_PLL_CNTL0,
- .shift = 16,
- .width = 5,
- .flags = CLK_DIVIDER_ROUND_CLOSEST |
- CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ALLOW_ZERO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_od",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_dco_div2.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_fixed_factor g12a_pcie_pll = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "pcie_pll_pll",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_pcie_pll_od.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_dco = {
- .data = &(struct meson_clk_pll_data){
- .en = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 28,
- .width = 1,
- },
- .m = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 0,
- .width = 8,
- },
- .n = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 10,
- .width = 5,
- },
- .frac = {
- .reg_off = HHI_HDMI_PLL_CNTL1,
- .shift = 0,
- .width = 16,
- },
- .l = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 30,
- .width = 1,
- },
- .rst = {
- .reg_off = HHI_HDMI_PLL_CNTL0,
- .shift = 29,
- .width = 1,
- },
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- },
- .num_parents = 1,
- /*
- * Display directly handle hdmi pll registers ATM, we need
- * NOCACHE to keep our view of the clock as accurate as possible
- */
- .flags = CLK_GET_RATE_NOCACHE,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_od = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 16,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_od",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_dco.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll_od2 = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 18,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll_od2",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_od.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap g12a_hdmi_pll = {
- .data = &(struct clk_regmap_div_data){
- .offset = HHI_HDMI_PLL_CNTL0,
- .shift = 20,
- .width = 2,
- .flags = CLK_DIVIDER_POWER_OF_TWO,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_pll",
- .ops = &clk_regmap_divider_ro_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_hdmi_pll_od2.hw
- },
- .num_parents = 1,
- .flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div4_div = {
- .mult = 1,
- .div = 4,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div4_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div4 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 21,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div4",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div4_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div5_div = {
- .mult = 1,
- .div = 5,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div5_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div5 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 22,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div5",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div5_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div7_div = {
- .mult = 1,
- .div = 7,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div7_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) { &g12a_fixed_pll.hw },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div7 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 23,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div7",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div7_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_fclk_div2p5_div = {
- .mult = 1,
- .div = 5,
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2p5_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_fclk_div2p5 = {
- .data = &(struct clk_regmap_gate_data){
- .offset = HHI_FIX_PLL_CNTL1,
- .bit_idx = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "fclk_div2p5",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fclk_div2p5_div.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_fixed_factor g12a_mpll_50m_div = {
- .mult = 1,
- .div = 80,
- .hw.init = &(struct clk_init_data){
- .name = "mpll_50m_div",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap g12a_mpll_50m = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_FIX_PLL_CNTL3,
- .mask = 0x1,
- .shift = 5,
- },
- .hw.init = &(struct clk_init_data){
- .name = "mpll_50m",
- .ops = &clk_regmap_mux_ro_ops,
- .parent_data = (const struct clk_parent_data []) {
- { .fw_name = "xtal", },
- { .hw = &g12a_mpll_50m_div.hw },
- },
- .num_parents = 2,
- },
-};
-
-static struct clk_fixed_factor g12a_mpll_prediv = {
- .mult = 1,
- .div = 2,
- .hw.init = &(struct clk_init_data){
- .name = "mpll_prediv",
- .ops = &clk_fixed_factor_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &g12a_fixed_pll_dco.hw
- },
- .num_parents = 1,
- },
-};
-
static const struct reg_sequence g12a_mpll0_init_regs[] = {
{ .reg = HHI_MPLL_CNTL2, .def = 0x40000033 },
};
@@ -2530,8 +2507,9 @@ static struct clk_regmap g12a_mpll3 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 g12a_clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data g12a_clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div7.hw },
{ .hw = &g12a_mpll1.hw },
@@ -2541,32 +2519,32 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &g12a_fclk_div5.hw },
};
-static struct clk_regmap g12a_mpeg_clk_sel = {
+static struct clk_regmap g12a_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = g12a_clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = g12a_clk81_parents,
+ .num_parents = ARRAY_SIZE(g12a_clk81_parents),
},
};
-static struct clk_regmap g12a_mpeg_clk_div = {
+static struct clk_regmap g12a_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_mpeg_clk_sel.hw
+ &g12a_clk81_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2582,14 +2560,14 @@ static struct clk_regmap g12a_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &g12a_mpeg_clk_div.hw
+ &g12a_clk81_div.hw
},
.num_parents = 1,
.flags = (CLK_SET_RATE_PARENT | CLK_IS_CRITICAL),
},
};
-static const struct clk_parent_data g12a_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data g12a_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div2.hw },
{ .hw = &g12a_fclk_div3.hw },
@@ -2613,8 +2591,8 @@ static struct clk_regmap g12a_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2662,8 +2640,8 @@ static struct clk_regmap g12a_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2711,8 +2689,8 @@ static struct clk_regmap g12a_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parent_data),
+ .parent_data = g12a_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(g12a_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2774,7 +2752,7 @@ static struct clk_regmap g12a_vid_pll_div = {
},
};
-static const struct clk_hw *g12a_vid_pll_parent_hws[] = {
+static const struct clk_hw *g12a_vid_pll_parents[] = {
&g12a_vid_pll_div.hw,
&g12a_hdmi_pll.hw,
};
@@ -2792,8 +2770,8 @@ static struct clk_regmap g12a_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_hws = g12a_vid_pll_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vid_pll_parent_hws),
+ .parent_hws = g12a_vid_pll_parents,
+ .num_parents = ARRAY_SIZE(g12a_vid_pll_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2816,7 +2794,7 @@ static struct clk_regmap g12a_vid_pll = {
/* VPU Clock */
-static const struct clk_hw *g12a_vpu_parent_hws[] = {
+static const struct clk_hw *g12a_vpu_parents[] = {
&g12a_fclk_div3.hw,
&g12a_fclk_div4.hw,
&g12a_fclk_div5.hw,
@@ -2836,8 +2814,8 @@ static struct clk_regmap g12a_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
+ .parent_hws = g12a_vpu_parents,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2880,8 +2858,8 @@ static struct clk_regmap g12a_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vpu_parent_hws),
+ .parent_hws = g12a_vpu_parents,
+ .num_parents = ARRAY_SIZE(g12a_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -2939,7 +2917,7 @@ static struct clk_regmap g12a_vpu = {
/* VDEC clocks */
-static const struct clk_hw *g12a_vdec_parent_hws[] = {
+static const struct clk_hw *g12a_vdec_parents[] = {
&g12a_fclk_div2p5.hw,
&g12a_fclk_div3.hw,
&g12a_fclk_div4.hw,
@@ -2959,8 +2937,8 @@ static struct clk_regmap g12a_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3009,8 +2987,8 @@ static struct clk_regmap g12a_vdec_hevcf_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3059,8 +3037,8 @@ static struct clk_regmap g12a_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vdec_parent_hws),
+ .parent_hws = g12a_vdec_parents,
+ .num_parents = ARRAY_SIZE(g12a_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3101,7 +3079,7 @@ static struct clk_regmap g12a_vdec_hevc = {
/* VAPB Clock */
-static const struct clk_hw *g12a_vapb_parent_hws[] = {
+static const struct clk_hw *g12a_vapb_parents[] = {
&g12a_fclk_div4.hw,
&g12a_fclk_div3.hw,
&g12a_fclk_div5.hw,
@@ -3121,8 +3099,8 @@ static struct clk_regmap g12a_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
+ .parent_hws = g12a_vapb_parents,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3169,8 +3147,8 @@ static struct clk_regmap g12a_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vapb_parent_hws),
+ .parent_hws = g12a_vapb_parents,
+ .num_parents = ARRAY_SIZE(g12a_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3244,7 +3222,7 @@ static struct clk_regmap g12a_vapb = {
},
};
-static const struct clk_hw *g12a_vclk_parent_hws[] = {
+static const struct clk_hw *g12a_vclk_parents[] = {
&g12a_vid_pll.hw,
&g12a_gp0_pll.hw,
&g12a_hifi_pll.hw,
@@ -3264,8 +3242,8 @@ static struct clk_regmap g12a_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
+ .parent_hws = g12a_vclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3279,8 +3257,8 @@ static struct clk_regmap g12a_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_vclk_parent_hws),
+ .parent_hws = g12a_vclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3643,8 +3621,8 @@ static struct clk_fixed_factor g12a_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *g12a_cts_parent_hws[] = {
+static u32 g12a_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *g12a_cts_parents[] = {
&g12a_vclk_div1.hw,
&g12a_vclk_div2.hw,
&g12a_vclk_div4.hw,
@@ -3662,13 +3640,13 @@ static struct clk_regmap g12a_cts_enci_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3678,13 +3656,13 @@ static struct clk_regmap g12a_cts_encp_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3694,13 +3672,13 @@ static struct clk_regmap g12a_cts_encl_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 12,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -3710,20 +3688,20 @@ static struct clk_regmap g12a_cts_vdac_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = g12a_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_parent_hws),
+ .parent_hws = g12a_cts_parents,
+ .num_parents = ARRAY_SIZE(g12a_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
/* TOFIX: add support for cts_tcon */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *g12a_cts_hdmi_tx_parent_hws[] = {
+static u32 g12a_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *g12a_hdmi_tx_parents[] = {
&g12a_vclk_div1.hw,
&g12a_vclk_div2.hw,
&g12a_vclk_div4.hw,
@@ -3741,13 +3719,13 @@ static struct clk_regmap g12a_hdmi_tx_sel = {
.offset = HHI_HDMI_CLK_CNTL,
.mask = 0xf,
.shift = 16,
- .table = mux_table_hdmi_tx_sel,
+ .table = g12a_hdmi_tx_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_cts_hdmi_tx_parent_hws),
+ .parent_hws = g12a_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_tx_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -3834,7 +3812,7 @@ static struct clk_regmap g12a_hdmi_tx = {
/* MIPI DSI Host Clocks */
-static const struct clk_hw *g12a_mipi_dsi_pxclk_parent_hws[] = {
+static const struct clk_hw *g12a_mipi_dsi_pxclk_parents[] = {
&g12a_vid_pll.hw,
&g12a_gp0_pll.hw,
&g12a_hifi_pll.hw,
@@ -3855,8 +3833,8 @@ static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "mipi_dsi_pxclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_mipi_dsi_pxclk_parent_hws,
- .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws),
+ .parent_hws = g12a_mipi_dsi_pxclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT,
},
};
@@ -3907,7 +3885,7 @@ static struct clk_regmap g12a_mipi_dsi_pxclk = {
/* MIPI ISP Clocks */
-static const struct clk_parent_data g12b_mipi_isp_parent_data[] = {
+static const struct clk_parent_data g12b_mipi_isp_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw },
{ .hw = &g12a_hifi_pll.hw },
@@ -3927,8 +3905,8 @@ static struct clk_regmap g12b_mipi_isp_sel = {
.hw.init = &(struct clk_init_data){
.name = "mipi_isp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12b_mipi_isp_parent_data,
- .num_parents = ARRAY_SIZE(g12b_mipi_isp_parent_data),
+ .parent_data = g12b_mipi_isp_parents,
+ .num_parents = ARRAY_SIZE(g12b_mipi_isp_parents),
},
};
@@ -3967,7 +3945,7 @@ static struct clk_regmap g12b_mipi_isp = {
/* HDMI Clocks */
-static const struct clk_parent_data g12a_hdmi_parent_data[] = {
+static const struct clk_parent_data g12a_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_fclk_div4.hw },
{ .hw = &g12a_fclk_div3.hw },
@@ -3984,8 +3962,8 @@ static struct clk_regmap g12a_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(g12a_hdmi_parent_data),
+ .parent_data = g12a_hdmi_parents,
+ .num_parents = ARRAY_SIZE(g12a_hdmi_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -4025,7 +4003,7 @@ static struct clk_regmap g12a_hdmi = {
* mux because it does top-to-bottom updates the each clock tree and
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data g12a_mali_0_1_parent_data[] = {
+static const struct clk_parent_data g12a_mali_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw },
{ .hw = &g12a_hifi_pll.hw },
@@ -4045,8 +4023,8 @@ static struct clk_regmap g12a_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = g12a_mali_parents,
+ .num_parents = ARRAY_SIZE(g12a_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -4099,8 +4077,8 @@ static struct clk_regmap g12a_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = g12a_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = g12a_mali_parents,
+ .num_parents = ARRAY_SIZE(g12a_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -4144,11 +4122,6 @@ static struct clk_regmap g12a_mali_1 = {
},
};
-static const struct clk_hw *g12a_mali_parent_hws[] = {
- &g12a_mali_0.hw,
- &g12a_mali_1.hw,
-};
-
static struct clk_regmap g12a_mali = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
@@ -4158,7 +4131,10 @@ static struct clk_regmap g12a_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_hws = g12a_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mali_0.hw,
+ &g12a_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
@@ -4197,7 +4173,7 @@ static struct clk_regmap g12a_ts = {
/* SPICC SCLK source clock */
-static const struct clk_parent_data spicc_sclk_parent_data[] = {
+static const struct clk_parent_data g12a_spicc_sclk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_clk81.hw },
{ .hw = &g12a_fclk_div4.hw },
@@ -4216,8 +4192,8 @@ static struct clk_regmap g12a_spicc0_sclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc0_sclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_sclk_parent_data,
- .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ .parent_data = g12a_spicc_sclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_spicc_sclk_parents),
},
};
@@ -4263,8 +4239,8 @@ static struct clk_regmap g12a_spicc1_sclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "spicc1_sclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = spicc_sclk_parent_data,
- .num_parents = ARRAY_SIZE(spicc_sclk_parent_data),
+ .parent_data = g12a_spicc_sclk_parents,
+ .num_parents = ARRAY_SIZE(g12a_spicc_sclk_parents),
},
};
@@ -4303,7 +4279,7 @@ static struct clk_regmap g12a_spicc1_sclk = {
/* Neural Network Accelerator source clock */
-static const struct clk_parent_data nna_clk_parent_data[] = {
+static const struct clk_parent_data sm1_nna_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &g12a_gp0_pll.hw, },
{ .hw = &g12a_hifi_pll.hw, },
@@ -4323,8 +4299,8 @@ static struct clk_regmap sm1_nna_axi_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "nna_axi_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = nna_clk_parent_data,
- .num_parents = ARRAY_SIZE(nna_clk_parent_data),
+ .parent_data = sm1_nna_clk_parents,
+ .num_parents = ARRAY_SIZE(sm1_nna_clk_parents),
},
};
@@ -4370,8 +4346,8 @@ static struct clk_regmap sm1_nna_core_clk_sel = {
.hw.init = &(struct clk_init_data){
.name = "nna_core_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = nna_clk_parent_data,
- .num_parents = ARRAY_SIZE(nna_clk_parent_data),
+ .parent_data = sm1_nna_clk_parents,
+ .num_parents = ARRAY_SIZE(sm1_nna_clk_parents),
},
};
@@ -4408,89 +4384,101 @@ static struct clk_regmap sm1_nna_core_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw)
-
-#define MESON_GATE_RO(_name, _reg, _bit) \
- MESON_PCLK_RO(_name, _reg, _bit, &g12a_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(g12a_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(g12a_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(g12a_audio_locker, HHI_GCLK_MPEG0, 2);
-static MESON_GATE(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3);
-static MESON_GATE(g12a_eth_phy, HHI_GCLK_MPEG0, 4);
-static MESON_GATE(g12a_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(g12a_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(g12a_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(g12a_spicc_0, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(g12a_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(g12a_sana, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(g12a_sd, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(g12a_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(g12a_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20);
-static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24);
-static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28);
-
-static MESON_GATE(g12a_audio, HHI_GCLK_MPEG1, 0);
-static MESON_GATE(g12a_eth_core, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(g12a_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(g12a_audio_ififo, HHI_GCLK_MPEG1, 11);
-static MESON_GATE(g12a_adc, HHI_GCLK_MPEG1, 13);
-static MESON_GATE(g12a_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(g12a_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(g12a_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(g12a_pcie_comb, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(g12a_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(g12a_usb_general, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(g12a_pcie_phy, HHI_GCLK_MPEG1, 27);
-static MESON_GATE(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29);
-
-static MESON_GATE(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(g12a_htx_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(g12a_bt656, HHI_GCLK_MPEG2, 6);
-static MESON_GATE(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17);
-static MESON_GATE(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(g12a_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(g12a_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(g12b_csi_phy1, HHI_GCLK_MPEG2, 28);
-static MESON_GATE(g12b_csi_phy0, HHI_GCLK_MPEG2, 29);
-static MESON_GATE(g12a_gic, HHI_GCLK_MPEG2, 30);
-
-static MESON_GATE(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5);
-static MESON_GATE(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6);
-static MESON_GATE(g12a_vclk2_other, HHI_GCLK_OTHER, 7);
-static MESON_GATE(g12a_vclk2_enci, HHI_GCLK_OTHER, 8);
-static MESON_GATE(g12a_vclk2_encp, HHI_GCLK_OTHER, 9);
-static MESON_GATE(g12a_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(g12a_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(g12a_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(g12a_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(g12a_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(g12a_vclk2_enct, HHI_GCLK_OTHER, 22);
-static MESON_GATE(g12a_vclk2_encl, HHI_GCLK_OTHER, 23);
-static MESON_GATE(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(g12a_vclk2_other1, HHI_GCLK_OTHER, 26);
-
-static MESON_GATE_RO(g12a_dma, HHI_GCLK_OTHER2, 0);
-static MESON_GATE_RO(g12a_efuse, HHI_GCLK_OTHER2, 1);
-static MESON_GATE_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2);
-static MESON_GATE_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3);
-static MESON_GATE_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4);
+static const struct clk_parent_data g12a_pclk_parents = { .hw = &g12a_clk81.hw };
+
+#define G12A_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &g12a_pclk_parents, _flags)
+
+#define G12A_PCLK_RO(_name, _reg, _bit, _flags) \
+ MESON_PCLK_RO(_name, _reg, _bit, &g12a_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static G12A_PCLK(g12a_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_locker, HHI_GCLK_MPEG0, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mipi_dsi_host, HHI_GCLK_MPEG0, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_eth_phy, HHI_GCLK_MPEG0, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_spicc_0, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_sana, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_sd, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_spicc_1, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_hiu_reg, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_a, HHI_GCLK_MPEG0, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_codec, HHI_GCLK_MPEG0, 28, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_audio, HHI_GCLK_MPEG1, 0, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_eth_core, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_audio_ififo, HHI_GCLK_MPEG1, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_adc, HHI_GCLK_MPEG1, 13, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pcie_comb, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_usb_general, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_pcie_phy, HHI_GCLK_MPEG1, 27, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_htx_hdcp22, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_htx_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_bt656, HHI_GCLK_MPEG2, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_usb1_to_ddr, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_mipi_isp_gate, HHI_GCLK_MPEG2, 17, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_csi_phy1, HHI_GCLK_MPEG2, 28, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12b_csi_phy0, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_gic, HHI_GCLK_MPEG2, 30, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK(g12a_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venct0, HHI_GCLK_OTHER, 5, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venct1, HHI_GCLK_OTHER, 6, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_other, HHI_GCLK_OTHER, 7, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_enci, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_encp, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_enct, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_encl, HHI_GCLK_OTHER, 23, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_venclmmc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static G12A_PCLK(g12a_vclk2_other1, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+
+static G12A_PCLK_RO(g12a_dma, HHI_GCLK_OTHER2, 0, 0);
+static G12A_PCLK_RO(g12a_efuse, HHI_GCLK_OTHER2, 1, 0);
+static G12A_PCLK_RO(g12a_rom_boot, HHI_GCLK_OTHER2, 2, 0);
+static G12A_PCLK_RO(g12a_reset_sec, HHI_GCLK_OTHER2, 3, 0);
+static G12A_PCLK_RO(g12a_sec_ahb_apb3, HHI_GCLK_OTHER2, 4, 0);
/* Array of all clocks provided by this provider */
static struct clk_hw *g12a_hw_clks[] = {
@@ -4503,8 +4491,8 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -4676,12 +4664,12 @@ static struct clk_hw *g12a_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -4730,8 +4718,8 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -4903,12 +4891,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12b_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -4940,12 +4928,12 @@ static struct clk_hw *g12b_hw_clks[] = {
[CLKID_SYS1_PLL] = &g12b_sys1_pll.hw,
[CLKID_SYS1_PLL_DIV16_EN] = &g12b_sys1_pll_div16_en.hw,
[CLKID_SYS1_PLL_DIV16] = &g12b_sys1_pll_div16.hw,
- [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_premux0.hw,
- [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_mux0_div.hw,
- [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_postmux0.hw,
- [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_premux1.hw,
- [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_mux1_div.hw,
- [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_postmux1.hw,
+ [CLKID_CPUB_CLK_DYN0_SEL] = &g12b_cpub_clk_dyn0_sel.hw,
+ [CLKID_CPUB_CLK_DYN0_DIV] = &g12b_cpub_clk_dyn0_div.hw,
+ [CLKID_CPUB_CLK_DYN0] = &g12b_cpub_clk_dyn0.hw,
+ [CLKID_CPUB_CLK_DYN1_SEL] = &g12b_cpub_clk_dyn1_sel.hw,
+ [CLKID_CPUB_CLK_DYN1_DIV] = &g12b_cpub_clk_dyn1_div.hw,
+ [CLKID_CPUB_CLK_DYN1] = &g12b_cpub_clk_dyn1.hw,
[CLKID_CPUB_CLK_DYN] = &g12b_cpub_clk_dyn.hw,
[CLKID_CPUB_CLK] = &g12b_cpub_clk.hw,
[CLKID_CPUB_CLK_DIV16_EN] = &g12b_cpub_clk_div16_en.hw,
@@ -4998,8 +4986,8 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_FCLK_DIV7] = &g12a_fclk_div7.hw,
[CLKID_FCLK_DIV2P5] = &g12a_fclk_div2p5.hw,
[CLKID_GP0_PLL] = &g12a_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &g12a_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &g12a_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &g12a_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &g12a_clk81_div.hw,
[CLKID_CLK81] = &g12a_clk81.hw,
[CLKID_MPLL0] = &g12a_mpll0.hw,
[CLKID_MPLL1] = &g12a_mpll1.hw,
@@ -5171,12 +5159,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_MPLL_50M] = &g12a_mpll_50m.hw,
[CLKID_SYS_PLL_DIV16_EN] = &g12a_sys_pll_div16_en.hw,
[CLKID_SYS_PLL_DIV16] = &g12a_sys_pll_div16.hw,
- [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_premux0.hw,
- [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_mux0_div.hw,
- [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_postmux0.hw,
- [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_premux1.hw,
- [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_mux1_div.hw,
- [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_postmux1.hw,
+ [CLKID_CPU_CLK_DYN0_SEL] = &g12a_cpu_clk_dyn0_sel.hw,
+ [CLKID_CPU_CLK_DYN0_DIV] = &g12a_cpu_clk_dyn0_div.hw,
+ [CLKID_CPU_CLK_DYN0] = &g12a_cpu_clk_dyn0.hw,
+ [CLKID_CPU_CLK_DYN1_SEL] = &g12a_cpu_clk_dyn1_sel.hw,
+ [CLKID_CPU_CLK_DYN1_DIV] = &g12a_cpu_clk_dyn1_div.hw,
+ [CLKID_CPU_CLK_DYN1] = &g12a_cpu_clk_dyn1.hw,
[CLKID_CPU_CLK_DYN] = &g12a_cpu_clk_dyn.hw,
[CLKID_CPU_CLK] = &g12a_cpu_clk.hw,
[CLKID_CPU_CLK_DIV16_EN] = &g12a_cpu_clk_div16_en.hw,
@@ -5206,12 +5194,12 @@ static struct clk_hw *sm1_hw_clks[] = {
[CLKID_TS] = &g12a_ts.hw,
[CLKID_GP1_PLL_DCO] = &sm1_gp1_pll_dco.hw,
[CLKID_GP1_PLL] = &sm1_gp1_pll.hw,
- [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_premux0.hw,
- [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_premux1.hw,
- [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_mux0_div.hw,
- [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_postmux0.hw,
- [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_mux1_div.hw,
- [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_postmux1.hw,
+ [CLKID_DSU_CLK_DYN0_SEL] = &sm1_dsu_clk_dyn0_sel.hw,
+ [CLKID_DSU_CLK_DYN0_DIV] = &sm1_dsu_clk_dyn0_div.hw,
+ [CLKID_DSU_CLK_DYN0] = &sm1_dsu_clk_dyn0.hw,
+ [CLKID_DSU_CLK_DYN1_SEL] = &sm1_dsu_clk_dyn1_sel.hw,
+ [CLKID_DSU_CLK_DYN1_DIV] = &sm1_dsu_clk_dyn1_div.hw,
+ [CLKID_DSU_CLK_DYN1] = &sm1_dsu_clk_dyn1.hw,
[CLKID_DSU_CLK_DYN] = &sm1_dsu_clk_dyn.hw,
[CLKID_DSU_CLK_FINAL] = &sm1_dsu_final_clk.hw,
[CLKID_DSU_CLK] = &sm1_dsu_clk.hw,
@@ -5241,8 +5229,7 @@ static const struct reg_sequence g12a_init_regs[] = {
#define DVFS_CON_ID "dvfs"
-static int meson_g12a_dvfs_setup_common(struct device *dev,
- struct clk_hw **hws)
+static int g12a_dvfs_setup_common(struct device *dev, struct clk_hw **hws)
{
struct clk *notifier_clk;
struct clk_hw *xtal;
@@ -5251,13 +5238,13 @@ static int meson_g12a_dvfs_setup_common(struct device *dev,
xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
/* Setup clock notifier for cpu_clk_postmux0 */
- g12a_cpu_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_postmux0.hw,
+ g12a_cpu_clk_dyn0_nb_data.xtal = xtal;
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_dyn0.hw,
DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
- &g12a_cpu_clk_postmux0_nb_data.nb);
+ &g12a_cpu_clk_dyn0_nb_data.nb);
if (ret) {
- dev_err(dev, "failed to register the cpu_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpu_clk_dyn0 notifier\n");
return ret;
}
@@ -5274,7 +5261,7 @@ static int meson_g12a_dvfs_setup_common(struct device *dev,
return 0;
}
-static int meson_g12b_dvfs_setup(struct platform_device *pdev)
+static int g12b_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12b_hw_clks;
struct device *dev = &pdev->dev;
@@ -5282,7 +5269,7 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
struct clk_hw *xtal;
int ret;
- ret = meson_g12a_dvfs_setup_common(dev, hws);
+ ret = g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
@@ -5311,18 +5298,19 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
/* Add notifiers for the second CPU cluster */
/* Setup clock notifier for cpub_clk_postmux0 */
- g12b_cpub_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_postmux0.hw,
+ g12b_cpub_clk_dyn0_nb_data.xtal = xtal;
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn0.hw,
DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
- &g12b_cpub_clk_postmux0_nb_data.nb);
+ &g12b_cpub_clk_dyn0_nb_data.nb);
if (ret) {
- dev_err(dev, "failed to register the cpub_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpub_clk_dyn0 notifier\n");
return ret;
}
/* Setup clock notifier for cpub_clk_dyn mux */
- notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw, "dvfs");
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw,
+ DVFS_CON_ID);
ret = devm_clk_notifier_register(dev, notifier_clk,
&g12a_cpu_clk_mux_nb);
if (ret) {
@@ -5351,14 +5339,14 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
return 0;
}
-static int meson_g12a_dvfs_setup(struct platform_device *pdev)
+static int g12a_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12a_hw_clks;
struct device *dev = &pdev->dev;
struct clk *notifier_clk;
int ret;
- ret = meson_g12a_dvfs_setup_common(dev, hws);
+ ret = g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
@@ -5383,27 +5371,27 @@ static int meson_g12a_dvfs_setup(struct platform_device *pdev)
return 0;
}
-struct meson_g12a_data {
- const struct meson_eeclkc_data eeclkc_data;
+struct g12a_clkc_data {
+ const struct meson_clkc_data clkc_data;
int (*dvfs_setup)(struct platform_device *pdev);
};
-static int meson_g12a_probe(struct platform_device *pdev)
+static int g12a_clkc_probe(struct platform_device *pdev)
{
- const struct meson_eeclkc_data *eeclkc_data;
- const struct meson_g12a_data *g12a_data;
+ const struct meson_clkc_data *clkc_data;
+ const struct g12a_clkc_data *g12a_data;
int ret;
- eeclkc_data = of_device_get_match_data(&pdev->dev);
- if (!eeclkc_data)
+ clkc_data = of_device_get_match_data(&pdev->dev);
+ if (!clkc_data)
return -EINVAL;
- ret = meson_eeclkc_probe(pdev);
+ ret = meson_clkc_syscon_probe(pdev);
if (ret)
return ret;
- g12a_data = container_of(eeclkc_data, struct meson_g12a_data,
- eeclkc_data);
+ g12a_data = container_of(clkc_data, struct g12a_clkc_data,
+ clkc_data);
if (g12a_data->dvfs_setup)
return g12a_data->dvfs_setup(pdev);
@@ -5411,8 +5399,8 @@ static int meson_g12a_probe(struct platform_device *pdev)
return 0;
}
-static const struct meson_g12a_data g12a_clkc_data = {
- .eeclkc_data = {
+static const struct g12a_clkc_data g12a_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = g12a_hw_clks,
.num = ARRAY_SIZE(g12a_hw_clks),
@@ -5420,54 +5408,54 @@ static const struct meson_g12a_data g12a_clkc_data = {
.init_regs = g12a_init_regs,
.init_count = ARRAY_SIZE(g12a_init_regs),
},
- .dvfs_setup = meson_g12a_dvfs_setup,
+ .dvfs_setup = g12a_dvfs_setup,
};
-static const struct meson_g12a_data g12b_clkc_data = {
- .eeclkc_data = {
+static const struct g12a_clkc_data g12b_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = g12b_hw_clks,
.num = ARRAY_SIZE(g12b_hw_clks),
},
},
- .dvfs_setup = meson_g12b_dvfs_setup,
+ .dvfs_setup = g12b_dvfs_setup,
};
-static const struct meson_g12a_data sm1_clkc_data = {
- .eeclkc_data = {
+static const struct g12a_clkc_data sm1_clkc_data = {
+ .clkc_data = {
.hw_clks = {
.hws = sm1_hw_clks,
.num = ARRAY_SIZE(sm1_hw_clks),
},
},
- .dvfs_setup = meson_g12a_dvfs_setup,
+ .dvfs_setup = g12a_dvfs_setup,
};
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id g12a_clkc_match_table[] = {
{
.compatible = "amlogic,g12a-clkc",
- .data = &g12a_clkc_data.eeclkc_data
+ .data = &g12a_clkc_data.clkc_data
},
{
.compatible = "amlogic,g12b-clkc",
- .data = &g12b_clkc_data.eeclkc_data
+ .data = &g12b_clkc_data.clkc_data
},
{
.compatible = "amlogic,sm1-clkc",
- .data = &sm1_clkc_data.eeclkc_data
+ .data = &sm1_clkc_data.clkc_data
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, g12a_clkc_match_table);
-static struct platform_driver g12a_driver = {
- .probe = meson_g12a_probe,
+static struct platform_driver g12a_clkc_driver = {
+ .probe = g12a_clkc_probe,
.driver = {
.name = "g12a-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = g12a_clkc_match_table,
},
};
-module_platform_driver(g12a_driver);
+module_platform_driver(g12a_clkc_driver);
MODULE_DESCRIPTION("Amlogic G12/SM1 Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index f075fbd450f3..c7dfb3a06cb5 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -23,31 +23,20 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98
-#define GXBB_AO_GATE(_name, _bit) \
-static struct clk_regmap _name##_ao = { \
- .data = &(struct clk_regmap_gate_data) { \
- .offset = AO_RTI_GEN_CNTL_REG0, \
- .bit_idx = (_bit), \
- }, \
- .hw.init = &(struct clk_init_data) { \
- .name = #_name "_ao", \
- .ops = &clk_regmap_gate_ops, \
- .parent_data = &(const struct clk_parent_data) { \
- .fw_name = "mpeg-clk", \
- }, \
- .num_parents = 1, \
- .flags = CLK_IGNORE_UNUSED, \
- }, \
-}
+static const struct clk_parent_data gxbb_ao_pclk_parents = { .fw_name = "mpeg-clk" };
-GXBB_AO_GATE(remote, 0);
-GXBB_AO_GATE(i2c_master, 1);
-GXBB_AO_GATE(i2c_slave, 2);
-GXBB_AO_GATE(uart1, 3);
-GXBB_AO_GATE(uart2, 5);
-GXBB_AO_GATE(ir_blaster, 6);
+#define GXBB_AO_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(gxbb_ao_##_name, AO_RTI_GEN_CNTL_REG0, _bit, \
+ &gxbb_ao_pclk_parents, _flags)
-static struct clk_regmap ao_cts_oscin = {
+static GXBB_AO_PCLK(remote, 0, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(i2c_master, 1, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(i2c_slave, 2, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(uart1, 3, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(uart2, 5, CLK_IGNORE_UNUSED);
+static GXBB_AO_PCLK(ir_blaster, 6, CLK_IGNORE_UNUSED);
+
+static struct clk_regmap gxbb_ao_cts_oscin = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTI_PWR_CNTL_REG0,
.bit_idx = 6,
@@ -62,7 +51,7 @@ static struct clk_regmap ao_cts_oscin = {
},
};
-static struct clk_regmap ao_32k_pre = {
+static struct clk_regmap gxbb_ao_32k_pre = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 31,
@@ -70,7 +59,7 @@ static struct clk_regmap ao_32k_pre = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_pre",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_cts_oscin.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_cts_oscin.hw },
.num_parents = 1,
},
};
@@ -85,7 +74,7 @@ static const struct meson_clk_dualdiv_param gxbb_32k_div_table[] = {
}, {}
};
-static struct clk_regmap ao_32k_div = {
+static struct clk_regmap gxbb_ao_32k_div = {
.data = &(struct meson_clk_dualdiv_data){
.n1 = {
.reg_off = AO_RTC_ALT_CLK_CNTL0,
@@ -117,12 +106,12 @@ static struct clk_regmap ao_32k_div = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k_div",
.ops = &meson_clk_dualdiv_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_32k_pre.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_32k_pre.hw },
.num_parents = 1,
},
};
-static struct clk_regmap ao_32k_sel = {
+static struct clk_regmap gxbb_ao_32k_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTC_ALT_CLK_CNTL1,
.mask = 0x1,
@@ -133,15 +122,15 @@ static struct clk_regmap ao_32k_sel = {
.name = "ao_32k_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &ao_32k_div.hw,
- &ao_32k_pre.hw
+ &gxbb_ao_32k_div.hw,
+ &gxbb_ao_32k_pre.hw
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_32k = {
+static struct clk_regmap gxbb_ao_32k = {
.data = &(struct clk_regmap_gate_data){
.offset = AO_RTC_ALT_CLK_CNTL0,
.bit_idx = 30,
@@ -149,13 +138,13 @@ static struct clk_regmap ao_32k = {
.hw.init = &(struct clk_init_data){
.name = "ao_32k",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &ao_32k_sel.hw },
+ .parent_hws = (const struct clk_hw *[]) { &gxbb_ao_32k_sel.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_cts_rtc_oscin = {
+static struct clk_regmap gxbb_ao_cts_rtc_oscin = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x7,
@@ -170,14 +159,14 @@ static struct clk_regmap ao_cts_rtc_oscin = {
{ .fw_name = "ext-32k-0", },
{ .fw_name = "ext-32k-1", },
{ .fw_name = "ext-32k-2", },
- { .hw = &ao_32k.hw },
+ { .hw = &gxbb_ao_32k.hw },
},
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_clk81 = {
+static struct clk_regmap gxbb_ao_clk81 = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_RTI_PWR_CNTL_REG0,
.mask = 0x1,
@@ -189,14 +178,14 @@ static struct clk_regmap ao_clk81 = {
.ops = &clk_regmap_mux_ro_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "mpeg-clk", },
- { .hw = &ao_cts_rtc_oscin.hw },
+ { .hw = &gxbb_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap ao_cts_cec = {
+static struct clk_regmap gxbb_ao_cts_cec = {
.data = &(struct clk_regmap_mux_data) {
.offset = AO_CRT_CLK_CNTL1,
.mask = 0x1,
@@ -221,14 +210,14 @@ static struct clk_regmap ao_cts_cec = {
*/
.parent_data = (const struct clk_parent_data []) {
{ .name = "fixme", .index = -1, },
- { .hw = &ao_cts_rtc_oscin.hw },
+ { .hw = &gxbb_ao_cts_rtc_oscin.hw },
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static const unsigned int gxbb_aoclk_reset[] = {
+static const unsigned int gxbb_ao_reset[] = {
[RESET_AO_REMOTE] = 16,
[RESET_AO_I2C_MASTER] = 18,
[RESET_AO_I2C_SLAVE] = 19,
@@ -237,50 +226,52 @@ static const unsigned int gxbb_aoclk_reset[] = {
[RESET_AO_IR_BLASTER] = 23,
};
-static struct clk_hw *gxbb_aoclk_hw_clks[] = {
- [CLKID_AO_REMOTE] = &remote_ao.hw,
- [CLKID_AO_I2C_MASTER] = &i2c_master_ao.hw,
- [CLKID_AO_I2C_SLAVE] = &i2c_slave_ao.hw,
- [CLKID_AO_UART1] = &uart1_ao.hw,
- [CLKID_AO_UART2] = &uart2_ao.hw,
- [CLKID_AO_IR_BLASTER] = &ir_blaster_ao.hw,
- [CLKID_AO_CEC_32K] = &ao_cts_cec.hw,
- [CLKID_AO_CTS_OSCIN] = &ao_cts_oscin.hw,
- [CLKID_AO_32K_PRE] = &ao_32k_pre.hw,
- [CLKID_AO_32K_DIV] = &ao_32k_div.hw,
- [CLKID_AO_32K_SEL] = &ao_32k_sel.hw,
- [CLKID_AO_32K] = &ao_32k.hw,
- [CLKID_AO_CTS_RTC_OSCIN] = &ao_cts_rtc_oscin.hw,
- [CLKID_AO_CLK81] = &ao_clk81.hw,
+static struct clk_hw *gxbb_ao_hw_clks[] = {
+ [CLKID_AO_REMOTE] = &gxbb_ao_remote.hw,
+ [CLKID_AO_I2C_MASTER] = &gxbb_ao_i2c_master.hw,
+ [CLKID_AO_I2C_SLAVE] = &gxbb_ao_i2c_slave.hw,
+ [CLKID_AO_UART1] = &gxbb_ao_uart1.hw,
+ [CLKID_AO_UART2] = &gxbb_ao_uart2.hw,
+ [CLKID_AO_IR_BLASTER] = &gxbb_ao_ir_blaster.hw,
+ [CLKID_AO_CEC_32K] = &gxbb_ao_cts_cec.hw,
+ [CLKID_AO_CTS_OSCIN] = &gxbb_ao_cts_oscin.hw,
+ [CLKID_AO_32K_PRE] = &gxbb_ao_32k_pre.hw,
+ [CLKID_AO_32K_DIV] = &gxbb_ao_32k_div.hw,
+ [CLKID_AO_32K_SEL] = &gxbb_ao_32k_sel.hw,
+ [CLKID_AO_32K] = &gxbb_ao_32k.hw,
+ [CLKID_AO_CTS_RTC_OSCIN] = &gxbb_ao_cts_rtc_oscin.hw,
+ [CLKID_AO_CLK81] = &gxbb_ao_clk81.hw,
};
-static const struct meson_aoclk_data gxbb_aoclkc_data = {
+static const struct meson_aoclk_data gxbb_ao_clkc_data = {
.reset_reg = AO_RTI_GEN_CNTL_REG0,
- .num_reset = ARRAY_SIZE(gxbb_aoclk_reset),
- .reset = gxbb_aoclk_reset,
- .hw_clks = {
- .hws = gxbb_aoclk_hw_clks,
- .num = ARRAY_SIZE(gxbb_aoclk_hw_clks),
+ .num_reset = ARRAY_SIZE(gxbb_ao_reset),
+ .reset = gxbb_ao_reset,
+ .clkc_data = {
+ .hw_clks = {
+ .hws = gxbb_ao_hw_clks,
+ .num = ARRAY_SIZE(gxbb_ao_hw_clks),
+ },
},
};
-static const struct of_device_id gxbb_aoclkc_match_table[] = {
+static const struct of_device_id gxbb_ao_clkc_match_table[] = {
{
.compatible = "amlogic,meson-gx-aoclkc",
- .data = &gxbb_aoclkc_data,
+ .data = &gxbb_ao_clkc_data.clkc_data,
},
{ }
};
-MODULE_DEVICE_TABLE(of, gxbb_aoclkc_match_table);
+MODULE_DEVICE_TABLE(of, gxbb_ao_clkc_match_table);
-static struct platform_driver gxbb_aoclkc_driver = {
+static struct platform_driver gxbb_ao_clkc_driver = {
.probe = meson_aoclkc_probe,
.driver = {
.name = "gxbb-aoclkc",
- .of_match_table = gxbb_aoclkc_match_table,
+ .of_match_table = gxbb_ao_clkc_match_table,
},
};
-module_platform_driver(gxbb_aoclkc_driver);
+module_platform_driver(gxbb_ao_clkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Always-ON Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 362d1b87ea5b..5a229c4ffae1 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -13,7 +13,7 @@
#include "clk-regmap.h"
#include "clk-pll.h"
#include "clk-mpll.h"
-#include "meson-eeclk.h"
+#include "meson-clkc-utils.h"
#include "vid-pll-div.h"
#include <dt-bindings/clock/gxbb-clkc.h>
@@ -116,70 +116,6 @@
#define HHI_BT656_CLK_CNTL 0x3d4
#define HHI_SAR_CLK_CNTL 0x3d8
-static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
- PLL_PARAMS(32, 1),
- PLL_PARAMS(33, 1),
- PLL_PARAMS(34, 1),
- PLL_PARAMS(35, 1),
- PLL_PARAMS(36, 1),
- PLL_PARAMS(37, 1),
- PLL_PARAMS(38, 1),
- PLL_PARAMS(39, 1),
- PLL_PARAMS(40, 1),
- PLL_PARAMS(41, 1),
- PLL_PARAMS(42, 1),
- PLL_PARAMS(43, 1),
- PLL_PARAMS(44, 1),
- PLL_PARAMS(45, 1),
- PLL_PARAMS(46, 1),
- PLL_PARAMS(47, 1),
- PLL_PARAMS(48, 1),
- PLL_PARAMS(49, 1),
- PLL_PARAMS(50, 1),
- PLL_PARAMS(51, 1),
- PLL_PARAMS(52, 1),
- PLL_PARAMS(53, 1),
- PLL_PARAMS(54, 1),
- PLL_PARAMS(55, 1),
- PLL_PARAMS(56, 1),
- PLL_PARAMS(57, 1),
- PLL_PARAMS(58, 1),
- PLL_PARAMS(59, 1),
- PLL_PARAMS(60, 1),
- PLL_PARAMS(61, 1),
- PLL_PARAMS(62, 1),
- { /* sentinel */ },
-};
-
-static const struct pll_params_table gxl_gp0_pll_params_table[] = {
- PLL_PARAMS(42, 1),
- PLL_PARAMS(43, 1),
- PLL_PARAMS(44, 1),
- PLL_PARAMS(45, 1),
- PLL_PARAMS(46, 1),
- PLL_PARAMS(47, 1),
- PLL_PARAMS(48, 1),
- PLL_PARAMS(49, 1),
- PLL_PARAMS(50, 1),
- PLL_PARAMS(51, 1),
- PLL_PARAMS(52, 1),
- PLL_PARAMS(53, 1),
- PLL_PARAMS(54, 1),
- PLL_PARAMS(55, 1),
- PLL_PARAMS(56, 1),
- PLL_PARAMS(57, 1),
- PLL_PARAMS(58, 1),
- PLL_PARAMS(59, 1),
- PLL_PARAMS(60, 1),
- PLL_PARAMS(61, 1),
- PLL_PARAMS(62, 1),
- PLL_PARAMS(63, 1),
- PLL_PARAMS(64, 1),
- PLL_PARAMS(65, 1),
- PLL_PARAMS(66, 1),
- { /* sentinel */ },
-};
-
static struct clk_regmap gxbb_fixed_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -523,7 +459,42 @@ static struct clk_regmap gxbb_sys_pll = {
},
};
-static const struct reg_sequence gxbb_gp0_init_regs[] = {
+static const struct pll_params_table gxbb_gp0_pll_params_table[] = {
+ PLL_PARAMS(32, 1),
+ PLL_PARAMS(33, 1),
+ PLL_PARAMS(34, 1),
+ PLL_PARAMS(35, 1),
+ PLL_PARAMS(36, 1),
+ PLL_PARAMS(37, 1),
+ PLL_PARAMS(38, 1),
+ PLL_PARAMS(39, 1),
+ PLL_PARAMS(40, 1),
+ PLL_PARAMS(41, 1),
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence gxbb_gp0_pll_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0x69c80000 },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a5590c4 },
{ .reg = HHI_GP0_PLL_CNTL4, .def = 0x0000500d },
@@ -557,8 +528,8 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
.width = 1,
},
.table = gxbb_gp0_pll_params_table,
- .init_regs = gxbb_gp0_init_regs,
- .init_count = ARRAY_SIZE(gxbb_gp0_init_regs),
+ .init_regs = gxbb_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(gxbb_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -570,7 +541,36 @@ static struct clk_regmap gxbb_gp0_pll_dco = {
},
};
-static const struct reg_sequence gxl_gp0_init_regs[] = {
+static const struct pll_params_table gxl_gp0_pll_params_table[] = {
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(43, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(46, 1),
+ PLL_PARAMS(47, 1),
+ PLL_PARAMS(48, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(50, 1),
+ PLL_PARAMS(51, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(53, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(55, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(57, 1),
+ PLL_PARAMS(58, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(63, 1),
+ PLL_PARAMS(64, 1),
+ PLL_PARAMS(65, 1),
+ PLL_PARAMS(66, 1),
+ { /* sentinel */ },
+};
+
+static const struct reg_sequence gxl_gp0_pll_init_regs[] = {
{ .reg = HHI_GP0_PLL_CNTL1, .def = 0xc084b000 },
{ .reg = HHI_GP0_PLL_CNTL2, .def = 0xb75020be },
{ .reg = HHI_GP0_PLL_CNTL3, .def = 0x0a59a288 },
@@ -611,8 +611,8 @@ static struct clk_regmap gxl_gp0_pll_dco = {
.width = 1,
},
.table = gxl_gp0_pll_params_table,
- .init_regs = gxl_gp0_init_regs,
- .init_count = ARRAY_SIZE(gxl_gp0_init_regs),
+ .init_regs = gxl_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(gxl_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -972,8 +972,9 @@ static struct clk_regmap gxbb_mpll2 = {
},
};
-static u32 mux_table_clk81[] = { 0, 2, 3, 4, 5, 6, 7 };
-static const struct clk_parent_data clk81_parent_data[] = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 clk81_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data clk81_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div7.hw },
{ .hw = &gxbb_mpll1.hw },
@@ -983,37 +984,37 @@ static const struct clk_parent_data clk81_parent_data[] = {
{ .hw = &gxbb_fclk_div5.hw },
};
-static struct clk_regmap gxbb_mpeg_clk_sel = {
+static struct clk_regmap gxbb_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
/*
* bits 14:12 selects from 8 possible parents:
* xtal, 1'b0 (wtf), fclk_div7, mpll_clkout1, mpll_clkout2,
* fclk_div4, fclk_div3, fclk_div5
*/
- .parent_data = clk81_parent_data,
- .num_parents = ARRAY_SIZE(clk81_parent_data),
+ .parent_data = clk81_parents,
+ .num_parents = ARRAY_SIZE(clk81_parents),
},
};
-static struct clk_regmap gxbb_mpeg_clk_div = {
+static struct clk_regmap gxbb_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpeg_clk_sel.hw
+ &gxbb_clk81_sel.hw
},
.num_parents = 1,
},
@@ -1029,7 +1030,7 @@ static struct clk_regmap gxbb_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpeg_clk_div.hw
+ &gxbb_clk81_div.hw
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
@@ -1094,7 +1095,7 @@ static struct clk_regmap gxbb_sar_adc_clk = {
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data gxbb_mali_0_1_parent_data[] = {
+static const struct clk_parent_data gxbb_mali_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_gp0_pll.hw },
{ .hw = &gxbb_mpll2.hw },
@@ -1114,8 +1115,8 @@ static struct clk_regmap gxbb_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = gxbb_mali_parents,
+ .num_parents = ARRAY_SIZE(gxbb_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1168,8 +1169,8 @@ static struct clk_regmap gxbb_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_mali_0_1_parent_data,
- .num_parents = 8,
+ .parent_data = gxbb_mali_parents,
+ .num_parents = ARRAY_SIZE(gxbb_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1213,11 +1214,6 @@ static struct clk_regmap gxbb_mali_1 = {
},
};
-static const struct clk_hw *gxbb_mali_parent_hws[] = {
- &gxbb_mali_0.hw,
- &gxbb_mali_1.hw,
-};
-
static struct clk_regmap gxbb_mali = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
@@ -1227,29 +1223,35 @@ static struct clk_regmap gxbb_mali = {
.hw.init = &(struct clk_init_data){
.name = "mali",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mali_0.hw,
+ &gxbb_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
+static u32 gxbb_cts_mclk_parents_val_table[] = { 1, 2, 3 };
+static const struct clk_hw *gxbb_cts_mclk_parents[] = {
+ &gxbb_mpll0.hw,
+ &gxbb_mpll1.hw,
+ &gxbb_mpll2.hw,
+};
+
static struct clk_regmap gxbb_cts_amclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL,
.mask = 0x3,
.shift = 9,
- .table = (u32[]){ 1, 2, 3 },
+ .table = gxbb_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpll0.hw,
- &gxbb_mpll1.hw,
- &gxbb_mpll2.hw,
- },
- .num_parents = 3,
+ .parent_hws = gxbb_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_mclk_parents),
},
};
@@ -1292,18 +1294,14 @@ static struct clk_regmap gxbb_cts_mclk_i958_sel = {
.offset = HHI_AUD_CLK_CNTL2,
.mask = 0x3,
.shift = 25,
- .table = (u32[]){ 1, 2, 3 },
+ .table = gxbb_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &gxbb_mpll0.hw,
- &gxbb_mpll1.hw,
- &gxbb_mpll2.hw,
- },
- .num_parents = 3,
+ .parent_hws = gxbb_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_mclk_parents),
},
};
@@ -1368,7 +1366,7 @@ static struct clk_regmap gxbb_cts_i958 = {
* This clock does not exist yet in this controller or the AO one
*/
static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 };
-static const struct clk_parent_data gxbb_32k_clk_parent_data[] = {
+static const struct clk_parent_data gxbb_32k_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div3.hw },
{ .hw = &gxbb_fclk_div5.hw },
@@ -1380,11 +1378,11 @@ static struct clk_regmap gxbb_32k_clk_sel = {
.mask = 0x3,
.shift = 16,
.table = gxbb_32k_clk_parents_val_table,
- },
+ },
.hw.init = &(struct clk_init_data){
.name = "32k_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_32k_clk_parent_data,
+ .parent_data = gxbb_32k_clk_parents,
.num_parents = 4,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1423,7 +1421,7 @@ static struct clk_regmap gxbb_32k_clk = {
},
};
-static const struct clk_parent_data gxbb_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data gxbb_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div2.hw },
{ .hw = &gxbb_fclk_div3.hw },
@@ -1447,8 +1445,8 @@ static struct clk_regmap gxbb_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1497,8 +1495,8 @@ static struct clk_regmap gxbb_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1547,8 +1545,8 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parent_data),
+ .parent_data = gxbb_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(gxbb_sd_emmc_clk0_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1589,7 +1587,7 @@ static struct clk_regmap gxbb_sd_emmc_c_clk0 = {
/* VPU Clock */
-static const struct clk_hw *gxbb_vpu_parent_hws[] = {
+static const struct clk_hw *gxbb_vpu_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -1609,8 +1607,8 @@ static struct clk_regmap gxbb_vpu_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
+ .parent_hws = gxbb_vpu_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1657,8 +1655,8 @@ static struct clk_regmap gxbb_vpu_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vpu_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vpu_parent_hws),
+ .parent_hws = gxbb_vpu_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vpu_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1716,7 +1714,7 @@ static struct clk_regmap gxbb_vpu = {
/* VAPB Clock */
-static const struct clk_hw *gxbb_vapb_parent_hws[] = {
+static const struct clk_hw *gxbb_vapb_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -1736,8 +1734,8 @@ static struct clk_regmap gxbb_vapb_0_sel = {
* bits 9:10 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
+ .parent_hws = gxbb_vapb_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1788,8 +1786,8 @@ static struct clk_regmap gxbb_vapb_1_sel = {
* bits 25:26 selects from 4 possible parents:
* fclk_div4, fclk_div3, fclk_div5, fclk_div7,
*/
- .parent_hws = gxbb_vapb_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vapb_parent_hws),
+ .parent_hws = gxbb_vapb_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vapb_parents),
.flags = CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1897,7 +1895,7 @@ static struct clk_regmap gxbb_vid_pll_div = {
},
};
-static const struct clk_parent_data gxbb_vid_pll_parent_data[] = {
+static const struct clk_parent_data gxbb_vid_pll_parents[] = {
{ .hw = &gxbb_vid_pll_div.hw },
/*
* Note:
@@ -1922,8 +1920,8 @@ static struct clk_regmap gxbb_vid_pll_sel = {
* bit 18 selects from 2 possible parents:
* vid_pll_div or hdmi_pll
*/
- .parent_data = gxbb_vid_pll_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_vid_pll_parent_data),
+ .parent_data = gxbb_vid_pll_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vid_pll_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1944,7 +1942,7 @@ static struct clk_regmap gxbb_vid_pll = {
},
};
-static const struct clk_hw *gxbb_vclk_parent_hws[] = {
+static const struct clk_hw *gxbb_vclk_parents[] = {
&gxbb_vid_pll.hw,
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
@@ -1968,8 +1966,8 @@ static struct clk_regmap gxbb_vclk_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_hws = gxbb_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
+ .parent_hws = gxbb_vclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -1988,8 +1986,8 @@ static struct clk_regmap gxbb_vclk2_sel = {
* vid_pll, fclk_div4, fclk_div3, fclk_div5,
* vid_pll, fclk_div7, mp1
*/
- .parent_hws = gxbb_vclk_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vclk_parent_hws),
+ .parent_hws = gxbb_vclk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vclk_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2328,8 +2326,8 @@ static struct clk_fixed_factor gxbb_vclk2_div12 = {
},
};
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *gxbb_cts_parent_hws[] = {
+static u32 gxbb_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *gxbb_cts_parents[] = {
&gxbb_vclk_div1.hw,
&gxbb_vclk_div2.hw,
&gxbb_vclk_div4.hw,
@@ -2347,13 +2345,13 @@ static struct clk_regmap gxbb_cts_enci_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2363,13 +2361,13 @@ static struct clk_regmap gxbb_cts_encp_sel = {
.offset = HHI_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2379,50 +2377,13 @@ static struct clk_regmap gxbb_cts_vdac_sel = {
.offset = HHI_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = gxbb_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_cts_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_parent_hws),
- .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
- },
-};
-
-/* TOFIX: add support for cts_tcon */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *gxbb_cts_hdmi_tx_parent_hws[] = {
- &gxbb_vclk_div1.hw,
- &gxbb_vclk_div2.hw,
- &gxbb_vclk_div4.hw,
- &gxbb_vclk_div6.hw,
- &gxbb_vclk_div12.hw,
- &gxbb_vclk2_div1.hw,
- &gxbb_vclk2_div2.hw,
- &gxbb_vclk2_div4.hw,
- &gxbb_vclk2_div6.hw,
- &gxbb_vclk2_div12.hw,
-};
-
-static struct clk_regmap gxbb_hdmi_tx_sel = {
- .data = &(struct clk_regmap_mux_data){
- .offset = HHI_HDMI_CLK_CNTL,
- .mask = 0xf,
- .shift = 16,
- .table = mux_table_hdmi_tx_sel,
- },
- .hw.init = &(struct clk_init_data){
- .name = "hdmi_tx_sel",
- .ops = &clk_regmap_mux_ops,
- /*
- * bits 31:28 selects from 12 possible parents:
- * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
- * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
- * cts_tcon
- */
- .parent_hws = gxbb_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_cts_hdmi_tx_parent_hws),
+ .parent_hws = gxbb_cts_parents,
+ .num_parents = ARRAY_SIZE(gxbb_cts_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2475,6 +2436,43 @@ static struct clk_regmap gxbb_cts_vdac = {
},
};
+/* TOFIX: add support for cts_tcon */
+static u32 gxbb_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *gxbb_hdmi_tx_parents[] = {
+ &gxbb_vclk_div1.hw,
+ &gxbb_vclk_div2.hw,
+ &gxbb_vclk_div4.hw,
+ &gxbb_vclk_div6.hw,
+ &gxbb_vclk_div12.hw,
+ &gxbb_vclk2_div1.hw,
+ &gxbb_vclk2_div2.hw,
+ &gxbb_vclk2_div4.hw,
+ &gxbb_vclk2_div6.hw,
+ &gxbb_vclk2_div12.hw,
+};
+
+static struct clk_regmap gxbb_hdmi_tx_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_HDMI_CLK_CNTL,
+ .mask = 0xf,
+ .shift = 16,
+ .table = gxbb_hdmi_tx_parents_val_table,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_tx_sel",
+ .ops = &clk_regmap_mux_ops,
+ /*
+ * bits 31:28 selects from 12 possible parents:
+ * vclk_div1, vclk_div2, vclk_div4, vclk_div6, vclk_div12
+ * vclk2_div1, vclk2_div2, vclk2_div4, vclk2_div6, vclk2_div12,
+ * cts_tcon
+ */
+ .parent_hws = gxbb_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_tx_parents),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
static struct clk_regmap gxbb_hdmi_tx = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL2,
@@ -2493,7 +2491,7 @@ static struct clk_regmap gxbb_hdmi_tx = {
/* HDMI Clocks */
-static const struct clk_parent_data gxbb_hdmi_parent_data[] = {
+static const struct clk_parent_data gxbb_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_fclk_div4.hw },
{ .hw = &gxbb_fclk_div3.hw },
@@ -2510,8 +2508,8 @@ static struct clk_regmap gxbb_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = gxbb_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(gxbb_hdmi_parent_data),
+ .parent_data = gxbb_hdmi_parents,
+ .num_parents = ARRAY_SIZE(gxbb_hdmi_parents),
.flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
},
};
@@ -2547,7 +2545,7 @@ static struct clk_regmap gxbb_hdmi = {
/* VDEC clocks */
-static const struct clk_hw *gxbb_vdec_parent_hws[] = {
+static const struct clk_hw *gxbb_vdec_parents[] = {
&gxbb_fclk_div4.hw,
&gxbb_fclk_div3.hw,
&gxbb_fclk_div5.hw,
@@ -2564,8 +2562,8 @@ static struct clk_regmap gxbb_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
+ .parent_hws = gxbb_vdec_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2614,8 +2612,8 @@ static struct clk_regmap gxbb_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = gxbb_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(gxbb_vdec_parent_hws),
+ .parent_hws = gxbb_vdec_parents,
+ .num_parents = ARRAY_SIZE(gxbb_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2654,9 +2652,8 @@ static struct clk_regmap gxbb_vdec_hevc = {
},
};
-static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
- 9, 10, 11, 13, 14, };
-static const struct clk_parent_data gen_clk_parent_data[] = {
+static u32 gxbb_gen_clk_parents_val_table[] = { 0, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, };
+static const struct clk_parent_data gxbb_gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &gxbb_vdec_1.hw },
{ .hw = &gxbb_vdec_hevc.hw },
@@ -2675,7 +2672,7 @@ static struct clk_regmap gxbb_gen_clk_sel = {
.offset = HHI_GEN_CLK_CNTL,
.mask = 0xf,
.shift = 12,
- .table = mux_table_gen_clk,
+ .table = gxbb_gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
@@ -2686,8 +2683,8 @@ static struct clk_regmap gxbb_gen_clk_sel = {
* vid_pll, vid2_pll (hevc), mpll0, mpll1, mpll2, fdiv4,
* fdiv3, fdiv5, [cts_msr_clk], fdiv7, gp0_pll
*/
- .parent_data = gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(gen_clk_parent_data),
+ .parent_data = gxbb_gen_clk_parents,
+ .num_parents = ARRAY_SIZE(gxbb_gen_clk_parents),
},
};
@@ -2724,100 +2721,118 @@ static struct clk_regmap gxbb_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &gxbb_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-static MESON_GATE(gxbb_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(gxbb_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(gxbb_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(gxbb_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(gxbb_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(gxbb_spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(gxbb_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(gxbb_sana, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(gxbb_smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(gxbb_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(gxbb_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(gxbb_sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(gxbb_stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(gxbb_async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(gxbb_sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(gxbb_abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(gxbb_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(gxbb_emmc_a, HHI_GCLK_MPEG0, 24);
-static MESON_GATE(gxbb_emmc_b, HHI_GCLK_MPEG0, 25);
-static MESON_GATE(gxbb_emmc_c, HHI_GCLK_MPEG0, 26);
-static MESON_GATE(gxl_acodec, HHI_GCLK_MPEG0, 28);
-static MESON_GATE(gxbb_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(gxbb_eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(gxbb_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(gxbb_blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(gxbb_aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(gxbb_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(gxbb_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(gxbb_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(gxbb_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(gxbb_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(gxbb_nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(gxbb_dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(gxbb_usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(gxbb_vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(gxbb_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(gxbb_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(gxbb_dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(gxbb_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(gxbb_sar_adc, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8);
-static MESON_GATE(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(gxbb_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(gxbb_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(gxbb_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(gxbb_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22);
-static MESON_GATE(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(gxbb_vclk_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(gxbb_edp, HHI_GCLK_OTHER, 31);
+static const struct clk_parent_data gxbb_pclk_parents = { .hw = &gxbb_clk81.hw };
+
+#define GXBB_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &gxbb_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static GXBB_PCLK(gxbb_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_spicc, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sana, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_smart_card, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sdhc, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_stream, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_async_fifo, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sdio, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_abuf, HHI_GCLK_MPEG0, 18, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hiu_iface, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_a, HHI_GCLK_MPEG0, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_b, HHI_GCLK_MPEG0, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_emmc_c, HHI_GCLK_MPEG0, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxl_acodec, HHI_GCLK_MPEG0, 28, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_eth, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_blkmv, HHI_GCLK_MPEG1, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_aiu, HHI_GCLK_MPEG1, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_nand, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dos_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vdin1, HHI_GCLK_MPEG1, 28, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hdmi_intr_sync, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_hdmi_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dvin, HHI_GCLK_MPEG2, 12, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sar_adc, HHI_GCLK_MPEG2, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_clk81_a53, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+
+static GXBB_PCLK(gxbb_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_venci_int0, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_vencp_int, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_gclk_venci_int1, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_venclmcc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_vclk_other, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_edp, HHI_GCLK_OTHER, 31, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(gxbb_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(gxbb_ao_iface, HHI_GCLK_AO, 3);
-static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4);
+static GXBB_PCLK(gxbb_ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
+static GXBB_PCLK(gxbb_ao_i2c, HHI_GCLK_AO, 4, CLK_IGNORE_UNUSED);
/* AIU gates */
-static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu.hw);
-static MESON_PCLK(gxbb_iec958, HHI_GCLK_MPEG1, 7, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_i2s_out, HHI_GCLK_MPEG1, 8, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_amclk, HHI_GCLK_MPEG1, 9, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_aififo2, HHI_GCLK_MPEG1, 10, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_mixer, HHI_GCLK_MPEG1, 11, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12, &gxbb_aiu_glue.hw);
-static MESON_PCLK(gxbb_adc, HHI_GCLK_MPEG1, 13, &gxbb_aiu_glue.hw);
+static const struct clk_parent_data gxbb_aiu_glue_parents = { .hw = &gxbb_aiu.hw };
+static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu_glue_parents, CLK_IGNORE_UNUSED);
+
+static const struct clk_parent_data gxbb_aiu_pclk_parents = { .hw = &gxbb_aiu_glue.hw };
+#define GXBB_AIU_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(_name, HHI_GCLK_MPEG1, _bit, &gxbb_aiu_pclk_parents, _flags)
+
+static GXBB_AIU_PCLK(gxbb_iec958, 7, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_i2s_out, 8, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_amclk, 9, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_aififo2, 10, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_mixer, 11, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_mixer_iface, 12, CLK_IGNORE_UNUSED);
+static GXBB_AIU_PCLK(gxbb_adc, 13, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
@@ -2831,8 +2846,8 @@ static struct clk_hw *gxbb_hw_clks[] = {
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
[CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &gxbb_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_clk81_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
[CLKID_MPLL0] = &gxbb_mpll0.hw,
[CLKID_MPLL1] = &gxbb_mpll1.hw,
@@ -3039,8 +3054,8 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_FCLK_DIV5] = &gxbb_fclk_div5.hw,
[CLKID_FCLK_DIV7] = &gxbb_fclk_div7.hw,
[CLKID_GP0_PLL] = &gxbb_gp0_pll.hw,
- [CLKID_MPEG_SEL] = &gxbb_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &gxbb_mpeg_clk_div.hw,
+ [CLKID_MPEG_SEL] = &gxbb_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &gxbb_clk81_div.hw,
[CLKID_CLK81] = &gxbb_clk81.hw,
[CLKID_MPLL0] = &gxbb_mpll0.hw,
[CLKID_MPLL1] = &gxbb_mpll1.hw,
@@ -3237,35 +3252,35 @@ static struct clk_hw *gxl_hw_clks[] = {
[CLKID_ACODEC] = &gxl_acodec.hw,
};
-static const struct meson_eeclkc_data gxbb_clkc_data = {
+static const struct meson_clkc_data gxbb_clkc_data = {
.hw_clks = {
.hws = gxbb_hw_clks,
.num = ARRAY_SIZE(gxbb_hw_clks),
},
};
-static const struct meson_eeclkc_data gxl_clkc_data = {
+static const struct meson_clkc_data gxl_clkc_data = {
.hw_clks = {
.hws = gxl_hw_clks,
.num = ARRAY_SIZE(gxl_hw_clks),
},
};
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id gxbb_clkc_match_table[] = {
{ .compatible = "amlogic,gxbb-clkc", .data = &gxbb_clkc_data },
{ .compatible = "amlogic,gxl-clkc", .data = &gxl_clkc_data },
{},
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, gxbb_clkc_match_table);
-static struct platform_driver gxbb_driver = {
- .probe = meson_eeclkc_probe,
+static struct platform_driver gxbb_clkc_driver = {
+ .probe = meson_clkc_syscon_probe,
.driver = {
.name = "gxbb-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = gxbb_clkc_match_table,
},
};
-module_platform_driver(gxbb_driver);
+module_platform_driver(gxbb_clkc_driver);
MODULE_DESCRIPTION("Amlogic GXBB Main Clock Controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 894c02fda072..8f6bdea18119 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -37,15 +37,23 @@ static const struct reset_control_ops meson_aoclk_reset_ops = {
int meson_aoclkc_probe(struct platform_device *pdev)
{
struct meson_aoclk_reset_controller *rstc;
- struct meson_aoclk_data *data;
+ const struct meson_clkc_data *clkc_data;
+ const struct meson_aoclk_data *data;
struct device *dev = &pdev->dev;
struct device_node *np;
struct regmap *regmap;
- int ret, clkid;
+ int ret;
- data = (struct meson_aoclk_data *) of_device_get_match_data(dev);
- if (!data)
- return -ENODEV;
+ clkc_data = of_device_get_match_data(dev);
+ if (!clkc_data)
+ return -EINVAL;
+
+ ret = meson_clkc_syscon_probe(pdev);
+ if (ret)
+ return ret;
+
+ data = container_of(clkc_data, struct meson_aoclk_data,
+ clkc_data);
rstc = devm_kzalloc(dev, sizeof(*rstc), GFP_KERNEL);
if (!rstc)
@@ -71,19 +79,7 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return ret;
}
- /* Register all clks */
- for (clkid = 0; clkid < data->hw_clks.num; clkid++) {
- if (!data->hw_clks.hws[clkid])
- continue;
-
- ret = devm_clk_hw_register(dev, data->hw_clks.hws[clkid]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
+ return 0;
}
EXPORT_SYMBOL_NS_GPL(meson_aoclkc_probe, "CLK_MESON");
diff --git a/drivers/clk/meson/meson-aoclk.h b/drivers/clk/meson/meson-aoclk.h
index ea5fc61308af..2c83e73d3a77 100644
--- a/drivers/clk/meson/meson-aoclk.h
+++ b/drivers/clk/meson/meson-aoclk.h
@@ -20,10 +20,10 @@
#include "meson-clkc-utils.h"
struct meson_aoclk_data {
+ const struct meson_clkc_data clkc_data;
const unsigned int reset_reg;
const int num_reset;
const unsigned int *reset;
- struct meson_clk_hw_data hw_clks;
};
struct meson_aoclk_reset_controller {
diff --git a/drivers/clk/meson/meson-clkc-utils.c b/drivers/clk/meson/meson-clkc-utils.c
index 6937d1482719..870f50548e26 100644
--- a/drivers/clk/meson/meson-clkc-utils.c
+++ b/drivers/clk/meson/meson-clkc-utils.c
@@ -3,9 +3,13 @@
* Copyright (c) 2023 Neil Armstrong <neil.armstrong@linaro.org>
*/
-#include <linux/of_device.h>
#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
#include "meson-clkc-utils.h"
struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_data)
@@ -22,6 +26,86 @@ struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_da
}
EXPORT_SYMBOL_NS_GPL(meson_clk_hw_get, "CLK_MESON");
+static int meson_clkc_init(struct device *dev, struct regmap *map)
+{
+ const struct meson_clkc_data *data;
+ struct clk_hw *hw;
+ int ret, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ if (data->init_count)
+ regmap_multi_reg_write(map, data->init_regs, data->init_count);
+
+ for (i = 0; i < data->hw_clks.num; i++) {
+ hw = data->hw_clks.hws[i];
+
+ /* array might be sparse */
+ if (!hw)
+ continue;
+
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret) {
+ dev_err(dev, "registering %s clock failed\n",
+ hw->init->name);
+ return ret;
+ }
+ }
+
+ return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
+}
+
+int meson_clkc_syscon_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct regmap *map;
+
+ np = of_get_parent(dev->of_node);
+ map = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR(map)) {
+ dev_err(dev, "failed to get parent syscon regmap\n");
+ return PTR_ERR(map);
+ }
+
+ return meson_clkc_init(dev, map);
+}
+EXPORT_SYMBOL_NS_GPL(meson_clkc_syscon_probe, "CLK_MESON");
+
+int meson_clkc_mmio_probe(struct platform_device *pdev)
+{
+ const struct meson_clkc_data *data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *map;
+ struct regmap_config regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_cfg.max_register = resource_size(res) - regmap_cfg.reg_stride;
+
+ map = devm_regmap_init_mmio(dev, base, &regmap_cfg);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ return meson_clkc_init(dev, map);
+}
+EXPORT_SYMBOL_NS_GPL(meson_clkc_mmio_probe, "CLK_MESON");
+
MODULE_DESCRIPTION("Amlogic Clock Controller Utilities");
MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-clkc-utils.h b/drivers/clk/meson/meson-clkc-utils.h
index fe6f40728949..ddadf14b4923 100644
--- a/drivers/clk/meson/meson-clkc-utils.h
+++ b/drivers/clk/meson/meson-clkc-utils.h
@@ -9,6 +9,8 @@
#include <linux/of_device.h>
#include <linux/clk-provider.h>
+struct platform_device;
+
struct meson_clk_hw_data {
struct clk_hw **hws;
unsigned int num;
@@ -16,4 +18,91 @@ struct meson_clk_hw_data {
struct clk_hw *meson_clk_hw_get(struct of_phandle_args *clkspec, void *clk_hw_data);
+struct meson_clkc_data {
+ const struct reg_sequence *init_regs;
+ unsigned int init_count;
+ struct meson_clk_hw_data hw_clks;
+};
+
+int meson_clkc_syscon_probe(struct platform_device *pdev);
+int meson_clkc_mmio_probe(struct platform_device *pdev);
+
+#define __MESON_PCLK(_name, _reg, _bit, _ops, _pdata, _flags) \
+struct clk_regmap _name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = _ops, \
+ .parent_data = (_pdata), \
+ .num_parents = 1, \
+ .flags = (_flags), \
+ }, \
+}
+
+#define MESON_PCLK(_name, _reg, _bit, _pdata, _flags) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ops, _pdata, _flags)
+
+#define MESON_PCLK_RO(_name, _reg, _bit, _pdata, _flags) \
+ __MESON_PCLK(_name, _reg, _bit, &clk_regmap_gate_ro_ops, _pdata, _flags)
+
+/* Helpers for the usual sel/div/gate composite clocks */
+#define MESON_COMP_SEL(_prefix, _name, _reg, _shift, _mask, _pdata, \
+ _table, _dflags, _iflags) \
+struct clk_regmap _prefix##_name##_sel = { \
+ .data = &(struct clk_regmap_mux_data) { \
+ .offset = (_reg), \
+ .mask = (_mask), \
+ .shift = (_shift), \
+ .flags = (_dflags), \
+ .table = (_table), \
+ }, \
+ .hw.init = &(struct clk_init_data){ \
+ .name = #_name "_sel", \
+ .ops = &clk_regmap_mux_ops, \
+ .parent_data = _pdata, \
+ .num_parents = ARRAY_SIZE(_pdata), \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define MESON_COMP_DIV(_prefix, _name, _reg, _shift, _width, \
+ _dflags, _iflags) \
+struct clk_regmap _prefix##_name##_div = { \
+ .data = &(struct clk_regmap_div_data) { \
+ .offset = (_reg), \
+ .shift = (_shift), \
+ .width = (_width), \
+ .flags = (_dflags), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name "_div", \
+ .ops = &clk_regmap_divider_ops, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &_prefix##_name##_sel.hw \
+ }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
+#define MESON_COMP_GATE(_prefix, _name, _reg, _bit, _iflags) \
+struct clk_regmap _prefix##_name = { \
+ .data = &(struct clk_regmap_gate_data) { \
+ .offset = (_reg), \
+ .bit_idx = (_bit), \
+ }, \
+ .hw.init = &(struct clk_init_data) { \
+ .name = #_name, \
+ .ops = &clk_regmap_gate_ops, \
+ .parent_hws = (const struct clk_hw *[]) { \
+ &_prefix##_name##_div.hw \
+ }, \
+ .num_parents = 1, \
+ .flags = (_iflags), \
+ }, \
+}
+
#endif
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
deleted file mode 100644
index 6236bf970d79..000000000000
--- a/drivers/clk/meson/meson-eeclk.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/regmap.h>
-#include <linux/module.h>
-
-#include "clk-regmap.h"
-#include "meson-eeclk.h"
-
-int meson_eeclkc_probe(struct platform_device *pdev)
-{
- const struct meson_eeclkc_data *data;
- struct device *dev = &pdev->dev;
- struct device_node *np;
- struct regmap *map;
- int ret, i;
-
- data = of_device_get_match_data(dev);
- if (!data)
- return -EINVAL;
-
- /* Get the hhi system controller node */
- np = of_get_parent(dev->of_node);
- map = syscon_node_to_regmap(np);
- of_node_put(np);
- if (IS_ERR(map)) {
- dev_err(dev,
- "failed to get HHI regmap\n");
- return PTR_ERR(map);
- }
-
- if (data->init_count)
- regmap_multi_reg_write(map, data->init_regs, data->init_count);
-
- for (i = 0; i < data->hw_clks.num; i++) {
- /* array might be sparse */
- if (!data->hw_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, data->hw_clks.hws[i]);
- if (ret) {
- dev_err(dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, (void *)&data->hw_clks);
-}
-EXPORT_SYMBOL_NS_GPL(meson_eeclkc_probe, "CLK_MESON");
-
-MODULE_DESCRIPTION("Amlogic Main Clock Controller Helpers");
-MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS("CLK_MESON");
diff --git a/drivers/clk/meson/meson-eeclk.h b/drivers/clk/meson/meson-eeclk.h
deleted file mode 100644
index 6a81d67b46b2..000000000000
--- a/drivers/clk/meson/meson-eeclk.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2019 BayLibre, SAS.
- * Author: Jerome Brunet <jbrunet@baylibre.com>
- */
-
-#ifndef __MESON_CLKC_H
-#define __MESON_CLKC_H
-
-#include <linux/clk-provider.h>
-#include "clk-regmap.h"
-#include "meson-clkc-utils.h"
-
-struct platform_device;
-
-struct meson_eeclkc_data {
- const struct reg_sequence *init_regs;
- unsigned int init_count;
- struct meson_clk_hw_data hw_clks;
-};
-
-int meson_eeclkc_probe(struct platform_device *pdev);
-
-#endif /* __MESON_CLKC_H */
diff --git a/drivers/clk/meson/meson8-ddr.c b/drivers/clk/meson/meson8-ddr.c
index 1975fc3987e2..0f93774f7371 100644
--- a/drivers/clk/meson/meson8-ddr.c
+++ b/drivers/clk/meson/meson8-ddr.c
@@ -12,6 +12,7 @@
#include "clk-regmap.h"
#include "clk-pll.h"
+#include "meson-clkc-utils.h"
#define AM_DDR_PLL_CNTL 0x00
#define AM_DDR_PLL_CNTL1 0x04
@@ -77,60 +78,31 @@ static struct clk_regmap meson8_ddr_pll = {
},
};
-static struct clk_hw_onecell_data meson8_ddr_clk_hw_onecell_data = {
- .hws = {
- [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
- [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
- },
- .num = 2,
+static struct clk_hw *meson8_ddr_hw_clks[] = {
+ [DDR_CLKID_DDR_PLL_DCO] = &meson8_ddr_pll_dco.hw,
+ [DDR_CLKID_DDR_PLL] = &meson8_ddr_pll.hw,
};
-static const struct regmap_config meson8_ddr_clkc_regmap_config = {
- .reg_bits = 8,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = DDR_CLK_STS,
+static const struct meson_clkc_data meson8_ddr_clkc_data = {
+ .hw_clks = {
+ .hws = meson8_ddr_hw_clks,
+ .num = ARRAY_SIZE(meson8_ddr_hw_clks),
+ },
};
-static int meson8_ddr_clkc_probe(struct platform_device *pdev)
-{
- struct regmap *regmap;
- void __iomem *base;
- struct clk_hw *hw;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &meson8_ddr_clkc_regmap_config);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Register all clks */
- for (i = 0; i < meson8_ddr_clk_hw_onecell_data.num; i++) {
- hw = meson8_ddr_clk_hw_onecell_data.hws[i];
-
- ret = devm_clk_hw_register(&pdev->dev, hw);
- if (ret) {
- dev_err(&pdev->dev, "Clock registration failed\n");
- return ret;
- }
- }
-
- return devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_onecell_get,
- &meson8_ddr_clk_hw_onecell_data);
-}
-
static const struct of_device_id meson8_ddr_clkc_match_table[] = {
- { .compatible = "amlogic,meson8-ddr-clkc" },
- { .compatible = "amlogic,meson8b-ddr-clkc" },
+ {
+ .compatible = "amlogic,meson8-ddr-clkc",
+ .data = &meson8_ddr_clkc_data,
+ }, {
+ .compatible = "amlogic,meson8b-ddr-clkc",
+ .data = &meson8_ddr_clkc_data,
+ },
{ /* sentinel */ }
};
static struct platform_driver meson8_ddr_clkc_driver = {
- .probe = meson8_ddr_clkc_probe,
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "meson8-ddr-clkc",
.of_match_table = meson8_ddr_clkc_match_table,
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 206538326614..95d0b9cbd904 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -214,7 +214,7 @@ static const struct reg_sequence meson8b_hdmi_pll_init_regs[] = {
{ .reg = HHI_VID2_PLL_CNTL2, .def = 0x0430a800 },
};
-static const struct pll_params_table hdmi_pll_params_table[] = {
+static const struct pll_params_table meson8b_hdmi_pll_params_table[] = {
PLL_PARAMS(40, 1),
PLL_PARAMS(42, 1),
PLL_PARAMS(44, 1),
@@ -267,7 +267,7 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
.shift = 29,
.width = 1,
},
- .table = hdmi_pll_params_table,
+ .table = meson8b_hdmi_pll_params_table,
.init_regs = meson8b_hdmi_pll_init_regs,
.init_count = ARRAY_SIZE(meson8b_hdmi_pll_init_regs),
},
@@ -670,16 +670,17 @@ static struct clk_regmap meson8b_mpll2 = {
},
};
-static u32 mux_table_clk81[] = { 6, 5, 7 };
-static struct clk_regmap meson8b_mpeg_clk_sel = {
+/* clk81 is often referred as "mpeg_clk" */
+static u32 meson8b_clk81_parents_val_table[] = { 6, 5, 7 };
+static struct clk_regmap meson8b_clk81_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MPEG_CLK_CNTL,
.mask = 0x7,
.shift = 12,
- .table = mux_table_clk81,
+ .table = meson8b_clk81_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_sel",
+ .name = "clk81_sel",
.ops = &clk_regmap_mux_ro_ops,
/*
* FIXME bits 14:12 selects from 8 possible parents:
@@ -695,17 +696,17 @@ static struct clk_regmap meson8b_mpeg_clk_sel = {
},
};
-static struct clk_regmap meson8b_mpeg_clk_div = {
+static struct clk_regmap meson8b_clk81_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_MPEG_CLK_CNTL,
.shift = 0,
.width = 7,
},
.hw.init = &(struct clk_init_data){
- .name = "mpeg_clk_div",
+ .name = "clk81_div",
.ops = &clk_regmap_divider_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_mpeg_clk_sel.hw
+ &meson8b_clk81_sel.hw
},
.num_parents = 1,
},
@@ -720,7 +721,7 @@ static struct clk_regmap meson8b_clk81 = {
.name = "clk81",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_mpeg_clk_div.hw
+ &meson8b_clk81_div.hw
},
.num_parents = 1,
.flags = CLK_IS_CRITICAL,
@@ -774,7 +775,7 @@ static struct clk_fixed_factor meson8b_cpu_in_div3 = {
},
};
-static const struct clk_div_table cpu_scale_table[] = {
+static const struct clk_div_table meson8b_cpu_scale_div_table[] = {
{ .val = 1, .div = 4 },
{ .val = 2, .div = 6 },
{ .val = 3, .div = 8 },
@@ -791,7 +792,7 @@ static struct clk_regmap meson8b_cpu_scale_div = {
.offset = HHI_SYS_CPU_CLK_CNTL1,
.shift = 20,
.width = 10,
- .table = cpu_scale_table,
+ .table = meson8b_cpu_scale_div_table,
.flags = CLK_DIVIDER_ALLOW_ZERO,
},
.hw.init = &(struct clk_init_data){
@@ -805,13 +806,13 @@ static struct clk_regmap meson8b_cpu_scale_div = {
},
};
-static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
+static u32 meson8b_cpu_scale_out_parents_val_table[] = { 0, 1, 3 };
static struct clk_regmap meson8b_cpu_scale_out_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
.shift = 2,
- .table = mux_table_cpu_scale_out_sel,
+ .table = meson8b_cpu_scale_out_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_scale_out_sel",
@@ -893,13 +894,13 @@ static struct clk_regmap meson8b_nand_clk_div = {
},
};
-static struct clk_regmap meson8b_nand_clk_gate = {
+static struct clk_regmap meson8b_nand_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_NAND_CLK_CNTL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "nand_clk_gate",
+ .name = "nand_clk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_nand_clk_div.hw
@@ -1000,160 +1001,137 @@ static struct clk_fixed_factor meson8b_cpu_clk_div8 = {
},
};
-static u32 mux_table_apb[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_apb_clk_sel = {
+static u32 meson8b_cpu_if_parents_val_table[] = { 1, 2, 3, 4, 5, 6, 7 };
+static const struct clk_hw *meson8b_cpu_if_parents[] = {
+ &meson8b_cpu_clk_div2.hw,
+ &meson8b_cpu_clk_div3.hw,
+ &meson8b_cpu_clk_div4.hw,
+ &meson8b_cpu_clk_div5.hw,
+ &meson8b_cpu_clk_div6.hw,
+ &meson8b_cpu_clk_div7.hw,
+ &meson8b_cpu_clk_div8.hw,
+};
+
+static struct clk_regmap meson8b_apb_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 3,
- .table = mux_table_apb,
+ .table = meson8b_cpu_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "apb_clk_sel",
+ .name = "apb_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_apb_clk_gate = {
+static struct clk_regmap meson8b_apb = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 16,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "apb_clk_dis",
+ .name = "apb",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_apb_clk_sel.hw
+ &meson8b_apb_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_periph_clk_sel = {
+static struct clk_regmap meson8b_periph_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 6,
},
.hw.init = &(struct clk_init_data){
- .name = "periph_clk_sel",
+ .name = "periph_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_periph_clk_gate = {
+static struct clk_regmap meson8b_periph = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 17,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "periph_clk_dis",
+ .name = "periph",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_periph_clk_sel.hw
+ &meson8b_periph_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static u32 mux_table_axi[] = { 1, 2, 3, 4, 5, 6, 7 };
-static struct clk_regmap meson8b_axi_clk_sel = {
+static struct clk_regmap meson8b_axi_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 9,
- .table = mux_table_axi,
+ .table = meson8b_cpu_if_parents_val_table,
},
.hw.init = &(struct clk_init_data){
- .name = "axi_clk_sel",
+ .name = "axi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_axi_clk_gate = {
+static struct clk_regmap meson8b_axi = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 18,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "axi_clk_dis",
+ .name = "axi",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_axi_clk_sel.hw
+ &meson8b_axi_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_l2_dram_clk_sel = {
+static struct clk_regmap meson8b_l2_dram_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.mask = 0x7,
.shift = 12,
},
.hw.init = &(struct clk_init_data){
- .name = "l2_dram_clk_sel",
+ .name = "l2_dram_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &meson8b_cpu_clk_div2.hw,
- &meson8b_cpu_clk_div3.hw,
- &meson8b_cpu_clk_div4.hw,
- &meson8b_cpu_clk_div5.hw,
- &meson8b_cpu_clk_div6.hw,
- &meson8b_cpu_clk_div7.hw,
- &meson8b_cpu_clk_div8.hw,
- },
- .num_parents = 7,
+ .parent_hws = meson8b_cpu_if_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cpu_if_parents),
},
};
-static struct clk_regmap meson8b_l2_dram_clk_gate = {
+static struct clk_regmap meson8b_l2_dram = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.bit_idx = 19,
.flags = CLK_GATE_SET_TO_DISABLE,
},
.hw.init = &(struct clk_init_data){
- .name = "l2_dram_clk_dis",
+ .name = "l2_dram",
.ops = &clk_regmap_gate_ro_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_l2_dram_clk_sel.hw
+ &meson8b_l2_dram_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1286,7 +1264,7 @@ static struct clk_regmap meson8b_vid_pll_final_div = {
},
};
-static const struct clk_hw *meson8b_vclk_mux_parent_hws[] = {
+static const struct clk_hw *meson8b_vclk_parents[] = {
&meson8b_vid_pll_final_div.hw,
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
@@ -1305,8 +1283,8 @@ static struct clk_regmap meson8b_vclk_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
+ .parent_hws = meson8b_vclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1343,13 +1321,13 @@ static struct clk_regmap meson8b_vclk_en = {
},
};
-static struct clk_regmap meson8b_vclk_div1_gate = {
+static struct clk_regmap meson8b_vclk_div1 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div1_en",
+ .name = "vclk_div1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1363,7 +1341,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div2",
+ .name = "vclk_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1373,13 +1351,13 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = {
}
};
-static struct clk_regmap meson8b_vclk_div2_div_gate = {
+static struct clk_regmap meson8b_vclk_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div2_en",
+ .name = "vclk_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div2_div.hw
@@ -1393,7 +1371,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div4",
+ .name = "vclk_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1403,13 +1381,13 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = {
}
};
-static struct clk_regmap meson8b_vclk_div4_div_gate = {
+static struct clk_regmap meson8b_vclk_div4 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div4_en",
+ .name = "vclk_div4",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div4_div.hw
@@ -1423,7 +1401,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
.mult = 1,
.div = 6,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div6",
+ .name = "vclk_div6_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1433,13 +1411,13 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = {
}
};
-static struct clk_regmap meson8b_vclk_div6_div_gate = {
+static struct clk_regmap meson8b_vclk_div6 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div6_en",
+ .name = "vclk_div6",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div6_div.hw
@@ -1453,7 +1431,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
.mult = 1,
.div = 12,
.hw.init = &(struct clk_init_data){
- .name = "vclk_div12",
+ .name = "vclk_div12_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
@@ -1463,13 +1441,13 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = {
}
};
-static struct clk_regmap meson8b_vclk_div12_div_gate = {
+static struct clk_regmap meson8b_vclk_div12 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VID_CLK_CNTL,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk_div12_en",
+ .name = "vclk_div12",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div12_div.hw
@@ -1488,13 +1466,13 @@ static struct clk_regmap meson8b_vclk2_in_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
+ .parent_hws = meson8b_vclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_parents),
.flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
-static struct clk_regmap meson8b_vclk2_clk_in_en = {
+static struct clk_regmap meson8b_vclk2_in_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 16,
@@ -1510,7 +1488,7 @@ static struct clk_regmap meson8b_vclk2_clk_in_en = {
},
};
-static struct clk_regmap meson8b_vclk2_clk_en = {
+static struct clk_regmap meson8b_vclk2_en = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 19,
@@ -1519,23 +1497,23 @@ static struct clk_regmap meson8b_vclk2_clk_en = {
.name = "vclk2_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_in_en.hw
+ &meson8b_vclk2_in_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap meson8b_vclk2_div1_gate = {
+static struct clk_regmap meson8b_vclk2_div1 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 0,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div1_en",
+ .name = "vclk2_div1",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1546,23 +1524,23 @@ static struct clk_fixed_factor meson8b_vclk2_div2_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div2",
+ .name = "vclk2_div2_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div2_div_gate = {
+static struct clk_regmap meson8b_vclk2_div2 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 1,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div2_en",
+ .name = "vclk2_div2",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div2_div.hw
@@ -1576,23 +1554,23 @@ static struct clk_fixed_factor meson8b_vclk2_div4_div = {
.mult = 1,
.div = 4,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div4",
+ .name = "vclk2_div4_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div4_div_gate = {
+static struct clk_regmap meson8b_vclk2_div4 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 2,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div4_en",
+ .name = "vclk2_div4",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div4_div.hw
@@ -1606,23 +1584,23 @@ static struct clk_fixed_factor meson8b_vclk2_div6_div = {
.mult = 1,
.div = 6,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div6",
+ .name = "vclk2_div6_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div6_div_gate = {
+static struct clk_regmap meson8b_vclk2_div6 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 3,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div6_en",
+ .name = "vclk2_div6",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div6_div.hw
@@ -1636,23 +1614,23 @@ static struct clk_fixed_factor meson8b_vclk2_div12_div = {
.mult = 1,
.div = 12,
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div12",
+ .name = "vclk2_div12_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_vclk2_clk_en.hw
+ &meson8b_vclk2_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
}
};
-static struct clk_regmap meson8b_vclk2_div12_div_gate = {
+static struct clk_regmap meson8b_vclk2_div12 = {
.data = &(struct clk_regmap_gate_data){
.offset = HHI_VIID_CLK_DIV,
.bit_idx = 4,
},
.hw.init = &(struct clk_init_data){
- .name = "vclk2_div12_en",
+ .name = "vclk2_div12",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div12_div.hw
@@ -1662,12 +1640,12 @@ static struct clk_regmap meson8b_vclk2_div12_div_gate = {
},
};
-static const struct clk_hw *meson8b_vclk_enc_mux_parent_hws[] = {
- &meson8b_vclk_div1_gate.hw,
- &meson8b_vclk_div2_div_gate.hw,
- &meson8b_vclk_div4_div_gate.hw,
- &meson8b_vclk_div6_div_gate.hw,
- &meson8b_vclk_div12_div_gate.hw,
+static const struct clk_hw *meson8b_vclk_enc_parents[] = {
+ &meson8b_vclk_div1.hw,
+ &meson8b_vclk_div2.hw,
+ &meson8b_vclk_div4.hw,
+ &meson8b_vclk_div6.hw,
+ &meson8b_vclk_div12.hw,
};
static struct clk_regmap meson8b_cts_enct_sel = {
@@ -1679,8 +1657,8 @@ static struct clk_regmap meson8b_cts_enct_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enct_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1710,8 +1688,8 @@ static struct clk_regmap meson8b_cts_encp_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1741,8 +1719,8 @@ static struct clk_regmap meson8b_cts_enci_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1772,8 +1750,8 @@ static struct clk_regmap meson8b_hdmi_tx_pixel_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1794,14 +1772,6 @@ static struct clk_regmap meson8b_hdmi_tx_pixel = {
},
};
-static const struct clk_hw *meson8b_vclk2_enc_mux_parent_hws[] = {
- &meson8b_vclk2_div1_gate.hw,
- &meson8b_vclk2_div2_div_gate.hw,
- &meson8b_vclk2_div4_div_gate.hw,
- &meson8b_vclk2_div6_div_gate.hw,
- &meson8b_vclk2_div12_div_gate.hw,
-};
-
static struct clk_regmap meson8b_cts_encl_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VIID_CLK_DIV,
@@ -1811,8 +1781,8 @@ static struct clk_regmap meson8b_cts_encl_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1842,8 +1812,8 @@ static struct clk_regmap meson8b_cts_vdac0_sel = {
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vclk2_enc_mux_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
+ .parent_hws = meson8b_vclk_enc_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vclk_enc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1926,7 +1896,8 @@ static struct clk_regmap meson8b_hdmi_sys = {
* CLK_SET_RATE_GATE is set.
* Meson8 only has mali_0 and no glitch-free mux.
*/
-static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
+static u32 meson8b_mali_parents_val_table[] = { 0, 2, 3, 4, 5, 6, 7 };
+static const struct clk_parent_data meson8b_mali_parents[] = {
{ .fw_name = "xtal", .name = "xtal", .index = -1, },
{ .hw = &meson8b_mpll2.hw, },
{ .hw = &meson8b_mpll1.hw, },
@@ -1936,20 +1907,18 @@ static const struct clk_parent_data meson8b_mali_0_1_parent_data[] = {
{ .hw = &meson8b_fclk_div5.hw, },
};
-static u32 meson8b_mali_0_1_mux_table[] = { 0, 2, 3, 4, 5, 6, 7 };
-
static struct clk_regmap meson8b_mali_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_MALI_CLK_CNTL,
.mask = 0x7,
.shift = 9,
- .table = meson8b_mali_0_1_mux_table,
+ .table = meson8b_mali_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = meson8b_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
+ .parent_data = meson8b_mali_parents,
+ .num_parents = ARRAY_SIZE(meson8b_mali_parents),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -1998,13 +1967,13 @@ static struct clk_regmap meson8b_mali_1_sel = {
.offset = HHI_MALI_CLK_CNTL,
.mask = 0x7,
.shift = 25,
- .table = meson8b_mali_0_1_mux_table,
+ .table = meson8b_mali_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = meson8b_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(meson8b_mali_0_1_parent_data),
+ .parent_data = meson8b_mali_parents,
+ .num_parents = ARRAY_SIZE(meson8b_mali_parents),
/*
* Don't propagate rate changes up because the only changeable
* parents are mpll1 and mpll2 but we need those for audio and
@@ -2139,20 +2108,13 @@ static struct clk_regmap meson8m2_gp_pll = {
},
};
-static const struct clk_hw *meson8b_vpu_0_1_parent_hws[] = {
+static const struct clk_hw *meson8b_vpu_parents[] = {
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
&meson8b_fclk_div5.hw,
&meson8b_fclk_div7.hw,
};
-static const struct clk_hw *mmeson8m2_vpu_0_1_parent_hws[] = {
- &meson8b_fclk_div4.hw,
- &meson8b_fclk_div3.hw,
- &meson8b_fclk_div5.hw,
- &meson8m2_gp_pll.hw,
-};
-
static struct clk_regmap meson8b_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2162,12 +2124,19 @@ static struct clk_regmap meson8b_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
+ .parent_hws = meson8b_vpu_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
+static const struct clk_hw *mmeson8m2_vpu_parents[] = {
+ &meson8b_fclk_div4.hw,
+ &meson8b_fclk_div3.hw,
+ &meson8b_fclk_div5.hw,
+ &meson8m2_gp_pll.hw,
+};
+
static struct clk_regmap meson8m2_vpu_0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VPU_CLK_CNTL,
@@ -2177,8 +2146,8 @@ static struct clk_regmap meson8m2_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
+ .parent_hws = mmeson8m2_vpu_parents,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2233,8 +2202,8 @@ static struct clk_regmap meson8b_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vpu_0_1_parent_hws),
+ .parent_hws = meson8b_vpu_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2248,8 +2217,8 @@ static struct clk_regmap meson8m2_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = mmeson8m2_vpu_0_1_parent_hws,
- .num_parents = ARRAY_SIZE(mmeson8m2_vpu_0_1_parent_hws),
+ .parent_hws = mmeson8m2_vpu_parents,
+ .num_parents = ARRAY_SIZE(mmeson8m2_vpu_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2321,7 +2290,7 @@ static struct clk_regmap meson8b_vpu = {
},
};
-static const struct clk_hw *meson8b_vdec_parent_hws[] = {
+static const struct clk_hw *meson8b_vdec_parents[] = {
&meson8b_fclk_div4.hw,
&meson8b_fclk_div3.hw,
&meson8b_fclk_div5.hw,
@@ -2340,8 +2309,8 @@ static struct clk_regmap meson8b_vdec_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2443,8 +2412,8 @@ static struct clk_regmap meson8b_vdec_hcodec_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hcodec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2493,8 +2462,8 @@ static struct clk_regmap meson8b_vdec_2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2543,8 +2512,8 @@ static struct clk_regmap meson8b_vdec_hevc_sel = {
.hw.init = &(struct clk_init_data){
.name = "vdec_hevc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_vdec_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_vdec_parent_hws),
+ .parent_hws = meson8b_vdec_parents,
+ .num_parents = ARRAY_SIZE(meson8b_vdec_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2603,27 +2572,26 @@ static struct clk_regmap meson8b_vdec_hevc = {
};
/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const struct clk_hw *meson8b_cts_amclk_parent_hws[] = {
+static u32 meson8b_cts_mclk_parents_val_table[] = { 1, 2, 3 };
+static const struct clk_hw *meson8b_cts_mclk_parents[] = {
&meson8b_mpll0.hw,
&meson8b_mpll1.hw,
&meson8b_mpll2.hw
};
-static u32 meson8b_cts_amclk_mux_table[] = { 1, 2, 3 };
-
static struct clk_regmap meson8b_cts_amclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL,
.mask = 0x3,
.shift = 9,
- .table = meson8b_cts_amclk_mux_table,
+ .table = meson8b_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data){
.name = "cts_amclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_cts_amclk_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_cts_amclk_parent_hws),
+ .parent_hws = meson8b_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_parents),
},
};
@@ -2661,28 +2629,19 @@ static struct clk_regmap meson8b_cts_amclk = {
},
};
-/* TODO: the clock at index 0 is "DDR_PLL" which we don't support yet */
-static const struct clk_hw *meson8b_cts_mclk_i958_parent_hws[] = {
- &meson8b_mpll0.hw,
- &meson8b_mpll1.hw,
- &meson8b_mpll2.hw
-};
-
-static u32 meson8b_cts_mclk_i958_mux_table[] = { 1, 2, 3 };
-
static struct clk_regmap meson8b_cts_mclk_i958_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_AUD_CLK_CNTL2,
.mask = 0x3,
.shift = 25,
- .table = meson8b_cts_mclk_i958_mux_table,
+ .table = meson8b_cts_mclk_parents_val_table,
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
.name = "cts_mclk_i958_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = meson8b_cts_mclk_i958_parent_hws,
- .num_parents = ARRAY_SIZE(meson8b_cts_mclk_i958_parent_hws),
+ .parent_hws = meson8b_cts_mclk_parents,
+ .num_parents = ARRAY_SIZE(meson8b_cts_mclk_parents),
},
};
@@ -2742,113 +2701,128 @@ static struct clk_regmap meson8b_cts_i958 = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &meson8b_clk81.hw)
-
-/* Everything Else (EE) domain gates */
-
-static MESON_GATE(meson8b_ddr, HHI_GCLK_MPEG0, 0);
-static MESON_GATE(meson8b_dos, HHI_GCLK_MPEG0, 1);
-static MESON_GATE(meson8b_isa, HHI_GCLK_MPEG0, 5);
-static MESON_GATE(meson8b_pl301, HHI_GCLK_MPEG0, 6);
-static MESON_GATE(meson8b_periphs, HHI_GCLK_MPEG0, 7);
-static MESON_GATE(meson8b_spicc, HHI_GCLK_MPEG0, 8);
-static MESON_GATE(meson8b_i2c, HHI_GCLK_MPEG0, 9);
-static MESON_GATE(meson8b_sar_adc, HHI_GCLK_MPEG0, 10);
-static MESON_GATE(meson8b_smart_card, HHI_GCLK_MPEG0, 11);
-static MESON_GATE(meson8b_rng0, HHI_GCLK_MPEG0, 12);
-static MESON_GATE(meson8b_uart0, HHI_GCLK_MPEG0, 13);
-static MESON_GATE(meson8b_sdhc, HHI_GCLK_MPEG0, 14);
-static MESON_GATE(meson8b_stream, HHI_GCLK_MPEG0, 15);
-static MESON_GATE(meson8b_async_fifo, HHI_GCLK_MPEG0, 16);
-static MESON_GATE(meson8b_sdio, HHI_GCLK_MPEG0, 17);
-static MESON_GATE(meson8b_abuf, HHI_GCLK_MPEG0, 18);
-static MESON_GATE(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19);
-static MESON_GATE(meson8b_assist_misc, HHI_GCLK_MPEG0, 23);
-static MESON_GATE(meson8b_spi, HHI_GCLK_MPEG0, 30);
-
-static MESON_GATE(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2);
-static MESON_GATE(meson8b_eth, HHI_GCLK_MPEG1, 3);
-static MESON_GATE(meson8b_demux, HHI_GCLK_MPEG1, 4);
-static MESON_GATE(meson8b_blkmv, HHI_GCLK_MPEG1, 14);
-static MESON_GATE(meson8b_aiu, HHI_GCLK_MPEG1, 15);
-static MESON_GATE(meson8b_uart1, HHI_GCLK_MPEG1, 16);
-static MESON_GATE(meson8b_g2d, HHI_GCLK_MPEG1, 20);
-static MESON_GATE(meson8b_usb0, HHI_GCLK_MPEG1, 21);
-static MESON_GATE(meson8b_usb1, HHI_GCLK_MPEG1, 22);
-static MESON_GATE(meson8b_reset, HHI_GCLK_MPEG1, 23);
-static MESON_GATE(meson8b_nand, HHI_GCLK_MPEG1, 24);
-static MESON_GATE(meson8b_dos_parser, HHI_GCLK_MPEG1, 25);
-static MESON_GATE(meson8b_usb, HHI_GCLK_MPEG1, 26);
-static MESON_GATE(meson8b_vdin1, HHI_GCLK_MPEG1, 28);
-static MESON_GATE(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29);
-static MESON_GATE(meson8b_efuse, HHI_GCLK_MPEG1, 30);
-static MESON_GATE(meson8b_boot_rom, HHI_GCLK_MPEG1, 31);
-
-static MESON_GATE(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1);
-static MESON_GATE(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2);
-static MESON_GATE(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3);
-static MESON_GATE(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4);
-static MESON_GATE(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8);
-static MESON_GATE(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9);
-static MESON_GATE(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11);
-static MESON_GATE(meson8b_dvin, HHI_GCLK_MPEG2, 12);
-static MESON_GATE(meson8b_uart2, HHI_GCLK_MPEG2, 15);
-static MESON_GATE(meson8b_sana, HHI_GCLK_MPEG2, 22);
-static MESON_GATE(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25);
-static MESON_GATE(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26);
-static MESON_GATE(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29);
-
-static MESON_GATE(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1);
-static MESON_GATE(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2);
-static MESON_GATE(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3);
-static MESON_GATE(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4);
-static MESON_GATE(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8);
-static MESON_GATE(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9);
-static MESON_GATE(meson8b_dac_clk, HHI_GCLK_OTHER, 10);
-static MESON_GATE(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14);
-static MESON_GATE(meson8b_iec958_gate, HHI_GCLK_OTHER, 16);
-static MESON_GATE(meson8b_enc480p, HHI_GCLK_OTHER, 20);
-static MESON_GATE(meson8b_rng1, HHI_GCLK_OTHER, 21);
-static MESON_GATE(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22);
-static MESON_GATE(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24);
-static MESON_GATE(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25);
-static MESON_GATE(meson8b_vclk2_other, HHI_GCLK_OTHER, 26);
-static MESON_GATE(meson8b_edp, HHI_GCLK_OTHER, 31);
+static const struct clk_parent_data meson8b_pclk_parents = { .hw = &meson8b_clk81.hw };
+
+#define MESON8B_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &meson8b_pclk_parents, _flags)
+
+/*
+ * Everything Else (EE) domain gates
+ *
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static MESON8B_PCLK(meson8b_ddr, HHI_GCLK_MPEG0, 0, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dos, HHI_GCLK_MPEG0, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_isa, HHI_GCLK_MPEG0, 5, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_pl301, HHI_GCLK_MPEG0, 6, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_periphs, HHI_GCLK_MPEG0, 7, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_spicc, HHI_GCLK_MPEG0, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_i2c, HHI_GCLK_MPEG0, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sar_adc, HHI_GCLK_MPEG0, 10, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_smart_card, HHI_GCLK_MPEG0, 11, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_rng0, HHI_GCLK_MPEG0, 12, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart0, HHI_GCLK_MPEG0, 13, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sdhc, HHI_GCLK_MPEG0, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_stream, HHI_GCLK_MPEG0, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_async_fifo, HHI_GCLK_MPEG0, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sdio, HHI_GCLK_MPEG0, 17, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_abuf, HHI_GCLK_MPEG0, 18, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hiu_iface, HHI_GCLK_MPEG0, 19, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_assist_misc, HHI_GCLK_MPEG0, 23, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_spi, HHI_GCLK_MPEG0, 30, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_eth, HHI_GCLK_MPEG1, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_demux, HHI_GCLK_MPEG1, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_blkmv, HHI_GCLK_MPEG1, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_aiu, HHI_GCLK_MPEG1, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart1, HHI_GCLK_MPEG1, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_g2d, HHI_GCLK_MPEG1, 20, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb0, HHI_GCLK_MPEG1, 21, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb1, HHI_GCLK_MPEG1, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_reset, HHI_GCLK_MPEG1, 23, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_nand, HHI_GCLK_MPEG1, 24, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dos_parser, HHI_GCLK_MPEG1, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb, HHI_GCLK_MPEG1, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vdin1, HHI_GCLK_MPEG1, 28, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ahb_arb0, HHI_GCLK_MPEG1, 29, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_efuse, HHI_GCLK_MPEG1, 30, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_boot_rom, HHI_GCLK_MPEG1, 31, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_ahb_data_bus, HHI_GCLK_MPEG2, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ahb_ctrl_bus, HHI_GCLK_MPEG2, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hdmi_intr_sync, HHI_GCLK_MPEG2, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_hdmi_pclk, HHI_GCLK_MPEG2, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb1_ddr_bridge, HHI_GCLK_MPEG2, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_usb0_ddr_bridge, HHI_GCLK_MPEG2, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_mmc_pclk, HHI_GCLK_MPEG2, 11, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dvin, HHI_GCLK_MPEG2, 12, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_uart2, HHI_GCLK_MPEG2, 15, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sana, HHI_GCLK_MPEG2, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vpu_intr, HHI_GCLK_MPEG2, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_sec_ahb_ahb3_bridge, HHI_GCLK_MPEG2, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_clk81_a9, HHI_GCLK_MPEG2, 29, CLK_IGNORE_UNUSED);
+
+static MESON8B_PCLK(meson8b_vclk2_venci0, HHI_GCLK_OTHER, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_venci1, HHI_GCLK_OTHER, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencp0, HHI_GCLK_OTHER, 3, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencp1, HHI_GCLK_OTHER, 4, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_venci_int, HHI_GCLK_OTHER, 8, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_vencp_int, HHI_GCLK_OTHER, 9, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_dac_clk, HHI_GCLK_OTHER, 10, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_aoclk_gate, HHI_GCLK_OTHER, 14, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_iec958_gate, HHI_GCLK_OTHER, 16, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_enc480p, HHI_GCLK_OTHER, 20, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_rng1, HHI_GCLK_OTHER, 21, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_gclk_vencl_int, HHI_GCLK_OTHER, 22, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_venclmcc, HHI_GCLK_OTHER, 24, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_vclk2_other, HHI_GCLK_OTHER, 26, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_edp, HHI_GCLK_OTHER, 31, CLK_IGNORE_UNUSED);
/* AIU gates */
-#define MESON_AIU_GLUE_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &meson8b_aiu_glue.hw)
-
-static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6, &meson8b_aiu.hw);
-static MESON_AIU_GLUE_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7);
-static MESON_AIU_GLUE_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8);
-static MESON_AIU_GLUE_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9);
-static MESON_AIU_GLUE_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10);
-static MESON_AIU_GLUE_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11);
-static MESON_AIU_GLUE_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12);
-static MESON_AIU_GLUE_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13);
+static const struct clk_parent_data meson8b_aiu_glue_parents = { .hw = &meson8b_aiu.hw };
+static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6,
+ &meson8b_aiu_glue_parents, CLK_IGNORE_UNUSED);
+
+static const struct clk_parent_data meson8b_aiu_pclk_parents = { .hw = &meson8b_aiu_glue.hw };
+#define MESON8B_AIU_PCLK(_name, _bit, _flags) \
+ MESON_PCLK(_name, HHI_GCLK_MPEG1, _bit, &meson8b_aiu_pclk_parents, _flags)
+
+static MESON8B_AIU_PCLK(meson8b_iec958, 7, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_i2s_out, 8, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_amclk, 9, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_aififo2, 10, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_mixer, 11, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_mixer_iface, 12, CLK_IGNORE_UNUSED);
+static MESON8B_AIU_PCLK(meson8b_adc, 13, CLK_IGNORE_UNUSED);
/* Always On (AO) domain gates */
-static MESON_GATE(meson8b_ao_media_cpu, HHI_GCLK_AO, 0);
-static MESON_GATE(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1);
-static MESON_GATE(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2);
-static MESON_GATE(meson8b_ao_iface, HHI_GCLK_AO, 3);
+static MESON8B_PCLK(meson8b_ao_media_cpu, HHI_GCLK_AO, 0, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_ahb_sram, HHI_GCLK_AO, 1, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_ahb_bus, HHI_GCLK_AO, 2, CLK_IGNORE_UNUSED);
+static MESON8B_PCLK(meson8b_ao_iface, HHI_GCLK_AO, 3, CLK_IGNORE_UNUSED);
static struct clk_hw *meson8_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -2945,7 +2919,7 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -2956,14 +2930,14 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -2974,27 +2948,27 @@ static struct clk_hw *meson8_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -3041,18 +3015,18 @@ static struct clk_hw *meson8_hw_clks[] = {
};
static struct clk_hw *meson8b_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -3149,7 +3123,7 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -3160,14 +3134,14 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -3178,27 +3152,27 @@ static struct clk_hw *meson8b_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
@@ -3256,18 +3230,18 @@ static struct clk_hw *meson8b_hw_clks[] = {
};
static struct clk_hw *meson8m2_hw_clks[] = {
- [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
- [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
- [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
- [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
- [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
- [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
- [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
- [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
- [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
- [CLKID_MPEG_SEL] = &meson8b_mpeg_clk_sel.hw,
- [CLKID_MPEG_DIV] = &meson8b_mpeg_clk_div.hw,
- [CLKID_CLK81] = &meson8b_clk81.hw,
+ [CLKID_PLL_FIXED] = &meson8b_fixed_pll.hw,
+ [CLKID_PLL_VID] = &meson8b_vid_pll.hw,
+ [CLKID_PLL_SYS] = &meson8b_sys_pll.hw,
+ [CLKID_FCLK_DIV2] = &meson8b_fclk_div2.hw,
+ [CLKID_FCLK_DIV3] = &meson8b_fclk_div3.hw,
+ [CLKID_FCLK_DIV4] = &meson8b_fclk_div4.hw,
+ [CLKID_FCLK_DIV5] = &meson8b_fclk_div5.hw,
+ [CLKID_FCLK_DIV7] = &meson8b_fclk_div7.hw,
+ [CLKID_CPUCLK] = &meson8b_cpu_clk.hw,
+ [CLKID_MPEG_SEL] = &meson8b_clk81_sel.hw,
+ [CLKID_MPEG_DIV] = &meson8b_clk81_div.hw,
+ [CLKID_CLK81] = &meson8b_clk81.hw,
[CLKID_DDR] = &meson8b_ddr.hw,
[CLKID_DOS] = &meson8b_dos.hw,
[CLKID_ISA] = &meson8b_isa.hw,
@@ -3364,7 +3338,7 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_FCLK_DIV7_DIV] = &meson8b_fclk_div7_div.hw,
[CLKID_NAND_SEL] = &meson8b_nand_clk_sel.hw,
[CLKID_NAND_DIV] = &meson8b_nand_clk_div.hw,
- [CLKID_NAND_CLK] = &meson8b_nand_clk_gate.hw,
+ [CLKID_NAND_CLK] = &meson8b_nand_clk.hw,
[CLKID_PLL_FIXED_DCO] = &meson8b_fixed_pll_dco.hw,
[CLKID_HDMI_PLL_DCO] = &meson8b_hdmi_pll_dco.hw,
[CLKID_PLL_SYS_DCO] = &meson8b_sys_pll_dco.hw,
@@ -3375,14 +3349,14 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_CPU_CLK_DIV6] = &meson8b_cpu_clk_div6.hw,
[CLKID_CPU_CLK_DIV7] = &meson8b_cpu_clk_div7.hw,
[CLKID_CPU_CLK_DIV8] = &meson8b_cpu_clk_div8.hw,
- [CLKID_APB_SEL] = &meson8b_apb_clk_sel.hw,
- [CLKID_APB] = &meson8b_apb_clk_gate.hw,
- [CLKID_PERIPH_SEL] = &meson8b_periph_clk_sel.hw,
- [CLKID_PERIPH] = &meson8b_periph_clk_gate.hw,
- [CLKID_AXI_SEL] = &meson8b_axi_clk_sel.hw,
- [CLKID_AXI] = &meson8b_axi_clk_gate.hw,
- [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_clk_sel.hw,
- [CLKID_L2_DRAM] = &meson8b_l2_dram_clk_gate.hw,
+ [CLKID_APB_SEL] = &meson8b_apb_sel.hw,
+ [CLKID_APB] = &meson8b_apb.hw,
+ [CLKID_PERIPH_SEL] = &meson8b_periph_sel.hw,
+ [CLKID_PERIPH] = &meson8b_periph.hw,
+ [CLKID_AXI_SEL] = &meson8b_axi_sel.hw,
+ [CLKID_AXI] = &meson8b_axi.hw,
+ [CLKID_L2_DRAM_SEL] = &meson8b_l2_dram_sel.hw,
+ [CLKID_L2_DRAM] = &meson8b_l2_dram.hw,
[CLKID_HDMI_PLL_LVDS_OUT] = &meson8b_hdmi_pll_lvds_out.hw,
[CLKID_HDMI_PLL_HDMI_OUT] = &meson8b_hdmi_pll_hdmi_out.hw,
[CLKID_VID_PLL_IN_SEL] = &meson8b_vid_pll_in_sel.hw,
@@ -3393,27 +3367,27 @@ static struct clk_hw *meson8m2_hw_clks[] = {
[CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw,
[CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw,
[CLKID_VCLK_EN] = &meson8b_vclk_en.hw,
- [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw,
+ [CLKID_VCLK_DIV1] = &meson8b_vclk_div1.hw,
[CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw,
- [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw,
+ [CLKID_VCLK_DIV2] = &meson8b_vclk_div2.hw,
[CLKID_VCLK_DIV4_DIV] = &meson8b_vclk_div4_div.hw,
- [CLKID_VCLK_DIV4] = &meson8b_vclk_div4_div_gate.hw,
+ [CLKID_VCLK_DIV4] = &meson8b_vclk_div4.hw,
[CLKID_VCLK_DIV6_DIV] = &meson8b_vclk_div6_div.hw,
- [CLKID_VCLK_DIV6] = &meson8b_vclk_div6_div_gate.hw,
+ [CLKID_VCLK_DIV6] = &meson8b_vclk_div6.hw,
[CLKID_VCLK_DIV12_DIV] = &meson8b_vclk_div12_div.hw,
- [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw,
+ [CLKID_VCLK_DIV12] = &meson8b_vclk_div12.hw,
[CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw,
- [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw,
- [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw,
- [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw,
+ [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_in_en.hw,
+ [CLKID_VCLK2_EN] = &meson8b_vclk2_en.hw,
+ [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1.hw,
[CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw,
- [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw,
+ [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2.hw,
[CLKID_VCLK2_DIV4_DIV] = &meson8b_vclk2_div4_div.hw,
- [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4_div_gate.hw,
+ [CLKID_VCLK2_DIV4] = &meson8b_vclk2_div4.hw,
[CLKID_VCLK2_DIV6_DIV] = &meson8b_vclk2_div6_div.hw,
- [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6_div_gate.hw,
+ [CLKID_VCLK2_DIV6] = &meson8b_vclk2_div6.hw,
[CLKID_VCLK2_DIV12_DIV] = &meson8b_vclk2_div12_div.hw,
- [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12_div_gate.hw,
+ [CLKID_VCLK2_DIV12] = &meson8b_vclk2_div12.hw,
[CLKID_CTS_ENCT_SEL] = &meson8b_cts_enct_sel.hw,
[CLKID_CTS_ENCT] = &meson8b_cts_enct.hw,
[CLKID_CTS_ENCP_SEL] = &meson8b_cts_encp_sel.hw,
diff --git a/drivers/clk/meson/s4-peripherals.c b/drivers/clk/meson/s4-peripherals.c
index c9400cf54c84..6d69b132d1e1 100644
--- a/drivers/clk/meson/s4-peripherals.c
+++ b/drivers/clk/meson/s4-peripherals.c
@@ -62,6 +62,15 @@
#define CLKCTRL_PWM_CLK_IJ_CTRL 0x190
#define CLKCTRL_DEMOD_CLK_CTRL 0x200
+#define S4_COMP_SEL(_name, _reg, _shift, _mask, _pdata) \
+ MESON_COMP_SEL(s4_, _name, _reg, _shift, _mask, _pdata, NULL, 0, 0)
+
+#define S4_COMP_DIV(_name, _reg, _shift, _width) \
+ MESON_COMP_DIV(s4_, _name, _reg, _shift, _width, 0, CLK_SET_RATE_PARENT)
+
+#define S4_COMP_GATE(_name, _reg, _bit) \
+ MESON_COMP_GATE(s4_, _name, _reg, _bit, CLK_SET_RATE_PARENT)
+
static struct clk_regmap s4_rtc_32k_by_oscin_clkin = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_RTC_BY_OSCIN_CTRL0,
@@ -182,8 +191,8 @@ static struct clk_regmap s4_rtc_clk = {
};
/* The index 5 is AXI_CLK, which is dedicated to AXI. So skip it. */
-static u32 mux_table_sys_ab_clk_sel[] = { 0, 1, 2, 3, 4, 6, 7 };
-static const struct clk_parent_data sys_ab_clk_parent_data[] = {
+static u32 s4_sysclk_parents_val_table[] = { 0, 1, 2, 3, 4, 6, 7 };
+static const struct clk_parent_data s4_sysclk_parents[] = {
{ .fw_name = "xtal" },
{ .fw_name = "fclk_div2" },
{ .fw_name = "fclk_div3" },
@@ -205,13 +214,13 @@ static struct clk_regmap s4_sysclk_b_sel = {
.offset = CLKCTRL_SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 26,
- .table = mux_table_sys_ab_clk_sel,
+ .table = s4_sysclk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sysclk_b_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_ab_clk_parent_data,
- .num_parents = ARRAY_SIZE(sys_ab_clk_parent_data),
+ .parent_data = s4_sysclk_parents,
+ .num_parents = ARRAY_SIZE(s4_sysclk_parents),
},
};
@@ -251,13 +260,13 @@ static struct clk_regmap s4_sysclk_a_sel = {
.offset = CLKCTRL_SYS_CLK_CTRL0,
.mask = 0x7,
.shift = 10,
- .table = mux_table_sys_ab_clk_sel,
+ .table = s4_sysclk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "sysclk_a_sel",
.ops = &clk_regmap_mux_ro_ops,
- .parent_data = sys_ab_clk_parent_data,
- .num_parents = ARRAY_SIZE(sys_ab_clk_parent_data),
+ .parent_data = s4_sysclk_parents,
+ .num_parents = ARRAY_SIZE(s4_sysclk_parents),
},
};
@@ -523,24 +532,24 @@ static struct clk_regmap s4_cecb_32k_clkout = {
},
};
-static const struct clk_parent_data s4_sc_parent_data[] = {
+static const struct clk_parent_data s4_sc_clk_parents[] = {
{ .fw_name = "fclk_div4" },
{ .fw_name = "fclk_div3" },
{ .fw_name = "fclk_div5" },
{ .fw_name = "xtal", }
};
-static struct clk_regmap s4_sc_clk_mux = {
+static struct clk_regmap s4_sc_clk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_SC_CLK_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "sc_clk_mux",
+ .name = "sc_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sc_parent_data,
- .num_parents = ARRAY_SIZE(s4_sc_parent_data),
+ .parent_data = s4_sc_clk_parents,
+ .num_parents = ARRAY_SIZE(s4_sc_clk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -555,20 +564,20 @@ static struct clk_regmap s4_sc_clk_div = {
.name = "sc_clk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_sc_clk_mux.hw
+ &s4_sc_clk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_sc_clk_gate = {
+static struct clk_regmap s4_sc_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_SC_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "sc_clk_gate",
+ .name = "sc_clk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_sc_clk_div.hw
@@ -578,13 +587,13 @@ static struct clk_regmap s4_sc_clk_gate = {
},
};
-static struct clk_regmap s4_12_24M_clk_gate = {
+static struct clk_regmap s4_12_24M = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_CLK12_24_CTRL,
.bit_idx = 11,
},
.hw.init = &(struct clk_init_data) {
- .name = "12_24m_gate",
+ .name = "12_24M",
.ops = &clk_regmap_gate_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", }
@@ -593,32 +602,32 @@ static struct clk_regmap s4_12_24M_clk_gate = {
},
};
-static struct clk_fixed_factor s4_12M_clk_div = {
+static struct clk_fixed_factor s4_12M_div = {
.mult = 1,
.div = 2,
.hw.init = &(struct clk_init_data){
- .name = "12M",
+ .name = "12M_div",
.ops = &clk_fixed_factor_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_12_24M_clk_gate.hw
+ &s4_12_24M.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_12_24M_clk = {
+static struct clk_regmap s4_12_24M_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_CLK12_24_CTRL,
.mask = 0x1,
.shift = 10,
},
.hw.init = &(struct clk_init_data) {
- .name = "12_24m",
+ .name = "12_24M_sel",
.ops = &clk_regmap_mux_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_12_24M_clk_gate.hw,
- &s4_12M_clk_div.hw,
+ &s4_12_24M.hw,
+ &s4_12M_div.hw,
},
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
@@ -687,7 +696,7 @@ static struct clk_regmap s4_vid_pll = {
},
};
-static const struct clk_parent_data s4_vclk_parent_data[] = {
+static const struct clk_parent_data s4_vclk_parents[] = {
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "gp0_pll", },
{ .fw_name = "hifi_pll", },
@@ -707,8 +716,8 @@ static struct clk_regmap s4_vclk_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_vclk_parent_data),
+ .parent_data = s4_vclk_parents,
+ .num_parents = ARRAY_SIZE(s4_vclk_parents),
.flags = 0,
},
};
@@ -722,8 +731,8 @@ static struct clk_regmap s4_vclk2_sel = {
.hw.init = &(struct clk_init_data){
.name = "vclk2_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_vclk_parent_data),
+ .parent_data = s4_vclk_parents,
+ .num_parents = ARRAY_SIZE(s4_vclk_parents),
.flags = 0,
},
};
@@ -1071,8 +1080,8 @@ static struct clk_fixed_factor s4_vclk2_div12 = {
};
/* The 5,6,7 indexes corresponds to no real clock, so there are not used. */
-static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *s4_cts_parent_hws[] = {
+static u32 s4_cts_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *s4_cts_parents[] = {
&s4_vclk_div1.hw,
&s4_vclk_div2.hw,
&s4_vclk_div4.hw,
@@ -1090,13 +1099,13 @@ static struct clk_regmap s4_cts_enci_sel = {
.offset = CLKCTRL_VID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1106,13 +1115,13 @@ static struct clk_regmap s4_cts_encp_sel = {
.offset = CLKCTRL_VID_CLK_DIV,
.mask = 0xf,
.shift = 20,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1122,20 +1131,20 @@ static struct clk_regmap s4_cts_vdac_sel = {
.offset = CLKCTRL_VIID_CLK_DIV,
.mask = 0xf,
.shift = 28,
- .table = mux_table_cts_sel,
+ .table = s4_cts_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_parent_hws),
+ .parent_hws = s4_cts_parents,
+ .num_parents = ARRAY_SIZE(s4_cts_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
/* The 5,6,7 indexes corresponds to no real clock, so there are not used. */
-static u32 mux_table_hdmi_tx_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
-static const struct clk_hw *s4_cts_hdmi_tx_parent_hws[] = {
+static u32 s4_hdmi_tx_parents_val_table[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *s4_hdmi_tx_parents[] = {
&s4_vclk_div1.hw,
&s4_vclk_div2.hw,
&s4_vclk_div4.hw,
@@ -1153,13 +1162,13 @@ static struct clk_regmap s4_hdmi_tx_sel = {
.offset = CLKCTRL_HDMI_CLK_CTRL,
.mask = 0xf,
.shift = 16,
- .table = mux_table_hdmi_tx_sel,
+ .table = s4_hdmi_tx_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_cts_hdmi_tx_parent_hws,
- .num_parents = ARRAY_SIZE(s4_cts_hdmi_tx_parent_hws),
+ .parent_hws = s4_hdmi_tx_parents,
+ .num_parents = ARRAY_SIZE(s4_hdmi_tx_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1229,7 +1238,7 @@ static struct clk_regmap s4_hdmi_tx = {
};
/* HDMI Clocks */
-static const struct clk_parent_data s4_hdmi_parent_data[] = {
+static const struct clk_parent_data s4_hdmi_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
@@ -1246,8 +1255,8 @@ static struct clk_regmap s4_hdmi_sel = {
.hw.init = &(struct clk_init_data){
.name = "hdmi_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_hdmi_parent_data,
- .num_parents = ARRAY_SIZE(s4_hdmi_parent_data),
+ .parent_data = s4_hdmi_parents,
+ .num_parents = ARRAY_SIZE(s4_hdmi_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1298,7 +1307,7 @@ static struct clk_regmap s4_ts_clk_div = {
},
};
-static struct clk_regmap s4_ts_clk_gate = {
+static struct clk_regmap s4_ts_clk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_TS_CLK_CTRL,
.bit_idx = 8,
@@ -1320,7 +1329,7 @@ static struct clk_regmap s4_ts_clk_gate = {
* mux because it does top-to-bottom updates the each clock tree and
* switches to the "inactive" one when CLK_SET_RATE_GATE is set.
*/
-static const struct clk_parent_data s4_mali_0_1_parent_data[] = {
+static const struct clk_parent_data s4_mali_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "gp0_pll", },
{ .fw_name = "hifi_pll", },
@@ -1340,8 +1349,8 @@ static struct clk_regmap s4_mali_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(s4_mali_0_1_parent_data),
+ .parent_data = s4_mali_parents,
+ .num_parents = ARRAY_SIZE(s4_mali_parents),
/*
* Don't request the parent to change the rate because
* all GPU frequencies can be derived from the fclk_*
@@ -1394,8 +1403,8 @@ static struct clk_regmap s4_mali_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "mali_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_mali_0_1_parent_data,
- .num_parents = ARRAY_SIZE(s4_mali_0_1_parent_data),
+ .parent_data = s4_mali_parents,
+ .num_parents = ARRAY_SIZE(s4_mali_parents),
.flags = 0,
},
};
@@ -1433,28 +1442,26 @@ static struct clk_regmap s4_mali_1 = {
},
};
-static const struct clk_hw *s4_mali_parent_hws[] = {
- &s4_mali_0.hw,
- &s4_mali_1.hw
-};
-
-static struct clk_regmap s4_mali_mux = {
+static struct clk_regmap s4_mali_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_MALI_CLK_CTRL,
.mask = 1,
.shift = 31,
},
.hw.init = &(struct clk_init_data){
- .name = "mali",
+ .name = "mali_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_mali_parent_hws,
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_mali_0.hw,
+ &s4_mali_1.hw,
+ },
.num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VDEC clocks */
-static const struct clk_parent_data s4_dec_parent_data[] = {
+static const struct clk_parent_data s4_dec_parents[] = {
{ .fw_name = "fclk_div2p5", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div4", },
@@ -1465,7 +1472,7 @@ static const struct clk_parent_data s4_dec_parent_data[] = {
{ .fw_name = "xtal", }
};
-static struct clk_regmap s4_vdec_p0_mux = {
+static struct clk_regmap s4_vdec_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC_CLK_CTRL,
.mask = 0x7,
@@ -1473,10 +1480,10 @@ static struct clk_regmap s4_vdec_p0_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_p0_mux",
+ .name = "vdec_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1492,7 +1499,7 @@ static struct clk_regmap s4_vdec_p0_div = {
.name = "vdec_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdec_p0_mux.hw
+ &s4_vdec_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1515,7 +1522,7 @@ static struct clk_regmap s4_vdec_p0 = {
},
};
-static struct clk_regmap s4_vdec_p1_mux = {
+static struct clk_regmap s4_vdec_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC3_CLK_CTRL,
.mask = 0x7,
@@ -1523,10 +1530,10 @@ static struct clk_regmap s4_vdec_p1_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_p1_mux",
+ .name = "vdec_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1542,7 +1549,7 @@ static struct clk_regmap s4_vdec_p1_div = {
.name = "vdec_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdec_p1_mux.hw
+ &s4_vdec_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1565,27 +1572,25 @@ static struct clk_regmap s4_vdec_p1 = {
},
};
-static const struct clk_hw *s4_vdec_mux_parent_hws[] = {
- &s4_vdec_p0.hw,
- &s4_vdec_p1.hw
-};
-
-static struct clk_regmap s4_vdec_mux = {
+static struct clk_regmap s4_vdec_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC3_CLK_CTRL,
.mask = 0x1,
.shift = 15,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdec_mux",
+ .name = "vdec_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_vdec_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_vdec_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_vdec_p0.hw,
+ &s4_vdec_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hevcf_p0_mux = {
+static struct clk_regmap s4_hevcf_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC2_CLK_CTRL,
.mask = 0x7,
@@ -1593,10 +1598,10 @@ static struct clk_regmap s4_hevcf_p0_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf_p0_mux",
+ .name = "hevcf_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1612,7 +1617,7 @@ static struct clk_regmap s4_hevcf_p0_div = {
.name = "hevcf_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hevcf_p0_mux.hw
+ &s4_hevcf_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1625,7 +1630,7 @@ static struct clk_regmap s4_hevcf_p0 = {
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "hevcf_p0_gate",
+ .name = "hevcf_p0",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hevcf_p0_div.hw
@@ -1635,7 +1640,7 @@ static struct clk_regmap s4_hevcf_p0 = {
},
};
-static struct clk_regmap s4_hevcf_p1_mux = {
+static struct clk_regmap s4_hevcf_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC4_CLK_CTRL,
.mask = 0x7,
@@ -1643,10 +1648,10 @@ static struct clk_regmap s4_hevcf_p1_mux = {
.flags = CLK_MUX_ROUND_CLOSEST,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf_p1_mux",
+ .name = "hevcf_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_dec_parent_data,
- .num_parents = ARRAY_SIZE(s4_dec_parent_data),
+ .parent_data = s4_dec_parents,
+ .num_parents = ARRAY_SIZE(s4_dec_parents),
.flags = 0,
},
};
@@ -1662,7 +1667,7 @@ static struct clk_regmap s4_hevcf_p1_div = {
.name = "hevcf_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hevcf_p1_mux.hw
+ &s4_hevcf_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1685,28 +1690,26 @@ static struct clk_regmap s4_hevcf_p1 = {
},
};
-static const struct clk_hw *s4_hevcf_mux_parent_hws[] = {
- &s4_hevcf_p0.hw,
- &s4_hevcf_p1.hw
-};
-
-static struct clk_regmap s4_hevcf_mux = {
+static struct clk_regmap s4_hevcf_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDEC4_CLK_CTRL,
.mask = 0x1,
.shift = 15,
},
.hw.init = &(struct clk_init_data) {
- .name = "hevcf",
+ .name = "hevcf_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_hevcf_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_hevcf_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_hevcf_p0.hw,
+ &s4_hevcf_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VPU Clock */
-static const struct clk_parent_data s4_vpu_parent_data[] = {
+static const struct clk_parent_data s4_vpu_parents[] = {
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div5", },
@@ -1726,8 +1729,8 @@ static struct clk_regmap s4_vpu_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_parent_data),
+ .parent_data = s4_vpu_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_parents),
.flags = 0,
},
};
@@ -1770,8 +1773,8 @@ static struct clk_regmap s4_vpu_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vpu_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_parent_data),
+ .parent_data = s4_vpu_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_parents),
.flags = 0,
},
};
@@ -1823,24 +1826,24 @@ static struct clk_regmap s4_vpu = {
},
};
-static const struct clk_parent_data vpu_clkb_tmp_parent_data[] = {
+static const struct clk_parent_data vpu_clkb_tmp_parents[] = {
{ .hw = &s4_vpu.hw },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div5", },
{ .fw_name = "fclk_div7", }
};
-static struct clk_regmap s4_vpu_clkb_tmp_mux = {
+static struct clk_regmap s4_vpu_clkb_tmp_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKB_CTRL,
.mask = 0x3,
.shift = 20,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkb_tmp_mux",
+ .name = "vpu_clkb_tmp_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = vpu_clkb_tmp_parent_data,
- .num_parents = ARRAY_SIZE(vpu_clkb_tmp_parent_data),
+ .parent_data = vpu_clkb_tmp_parents,
+ .num_parents = ARRAY_SIZE(vpu_clkb_tmp_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -1855,7 +1858,7 @@ static struct clk_regmap s4_vpu_clkb_tmp_div = {
.name = "vpu_clkb_tmp_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkb_tmp_mux.hw
+ &s4_vpu_clkb_tmp_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1911,7 +1914,7 @@ static struct clk_regmap s4_vpu_clkb = {
},
};
-static const struct clk_parent_data s4_vpu_clkc_parent_data[] = {
+static const struct clk_parent_data s4_vpu_clkc_parents[] = {
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
@@ -1922,17 +1925,17 @@ static const struct clk_parent_data s4_vpu_clkc_parent_data[] = {
{ .fw_name = "gp0_pll", },
};
-static struct clk_regmap s4_vpu_clkc_p0_mux = {
+static struct clk_regmap s4_vpu_clkc_p0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_p0_mux",
+ .name = "vpu_clkc_p0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_clkc_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_clkc_parent_data),
+ .parent_data = s4_vpu_clkc_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_clkc_parents),
.flags = 0,
},
};
@@ -1947,7 +1950,7 @@ static struct clk_regmap s4_vpu_clkc_p0_div = {
.name = "vpu_clkc_p0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkc_p0_mux.hw
+ &s4_vpu_clkc_p0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1970,17 +1973,17 @@ static struct clk_regmap s4_vpu_clkc_p0 = {
},
};
-static struct clk_regmap s4_vpu_clkc_p1_mux = {
+static struct clk_regmap s4_vpu_clkc_p1_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x7,
.shift = 25,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_p1_mux",
+ .name = "vpu_clkc_p1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vpu_clkc_parent_data,
- .num_parents = ARRAY_SIZE(s4_vpu_clkc_parent_data),
+ .parent_data = s4_vpu_clkc_parents,
+ .num_parents = ARRAY_SIZE(s4_vpu_clkc_parents),
.flags = 0,
},
};
@@ -1995,7 +1998,7 @@ static struct clk_regmap s4_vpu_clkc_p1_div = {
.name = "vpu_clkc_p1_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vpu_clkc_p1_mux.hw
+ &s4_vpu_clkc_p1_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2018,28 +2021,26 @@ static struct clk_regmap s4_vpu_clkc_p1 = {
},
};
-static const struct clk_hw *s4_vpu_mux_parent_hws[] = {
- &s4_vpu_clkc_p0.hw,
- &s4_vpu_clkc_p1.hw
-};
-
-static struct clk_regmap s4_vpu_clkc_mux = {
+static struct clk_regmap s4_vpu_clkc_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VPU_CLKC_CTRL,
.mask = 0x1,
.shift = 31,
},
.hw.init = &(struct clk_init_data) {
- .name = "vpu_clkc_mux",
+ .name = "vpu_clkc_sel",
.ops = &clk_regmap_mux_ops,
- .parent_hws = s4_vpu_mux_parent_hws,
- .num_parents = ARRAY_SIZE(s4_vpu_mux_parent_hws),
+ .parent_hws = (const struct clk_hw *[]) {
+ &s4_vpu_clkc_p0.hw,
+ &s4_vpu_clkc_p1.hw,
+ },
+ .num_parents = 2,
.flags = CLK_SET_RATE_PARENT,
},
};
/* VAPB Clock */
-static const struct clk_parent_data s4_vapb_parent_data[] = {
+static const struct clk_parent_data s4_vapb_parents[] = {
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
@@ -2059,8 +2060,8 @@ static struct clk_regmap s4_vapb_0_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vapb_parent_data,
- .num_parents = ARRAY_SIZE(s4_vapb_parent_data),
+ .parent_data = s4_vapb_parents,
+ .num_parents = ARRAY_SIZE(s4_vapb_parents),
.flags = 0,
},
};
@@ -2107,8 +2108,8 @@ static struct clk_regmap s4_vapb_1_sel = {
.hw.init = &(struct clk_init_data){
.name = "vapb_1_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vapb_parent_data,
- .num_parents = ARRAY_SIZE(s4_vapb_parent_data),
+ .parent_data = s4_vapb_parents,
+ .num_parents = ARRAY_SIZE(s4_vapb_parents),
.flags = 0,
},
};
@@ -2164,13 +2165,13 @@ static struct clk_regmap s4_vapb = {
},
};
-static struct clk_regmap s4_ge2d_gate = {
+static struct clk_regmap s4_ge2d = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_VAPBCLK_CTRL,
.bit_idx = 30,
},
.hw.init = &(struct clk_init_data) {
- .name = "ge2d_clk",
+ .name = "ge2d",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) { &s4_vapb.hw },
.num_parents = 1,
@@ -2178,24 +2179,24 @@ static struct clk_regmap s4_ge2d_gate = {
},
};
-static const struct clk_parent_data s4_esmclk_parent_data[] = {
+static const struct clk_parent_data s4_hdcp22_esmclk_parents[] = {
{ .fw_name = "fclk_div7", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap s4_hdcp22_esmclk_mux = {
+static struct clk_regmap s4_hdcp22_esmclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_HDCP22_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "hdcp22_esmclk_mux",
+ .name = "hdcp22_esmclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_esmclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_esmclk_parent_data),
+ .parent_data = s4_hdcp22_esmclk_parents,
+ .num_parents = ARRAY_SIZE(s4_hdcp22_esmclk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2210,20 +2211,20 @@ static struct clk_regmap s4_hdcp22_esmclk_div = {
.name = "hdcp22_esmclk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hdcp22_esmclk_mux.hw
+ &s4_hdcp22_esmclk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hdcp22_esmclk_gate = {
+static struct clk_regmap s4_hdcp22_esmclk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_HDCP22_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "hdcp22_esmclk_gate",
+ .name = "hdcp22_esmclk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hdcp22_esmclk_div.hw
@@ -2233,24 +2234,24 @@ static struct clk_regmap s4_hdcp22_esmclk_gate = {
},
};
-static const struct clk_parent_data s4_skpclk_parent_data[] = {
+static const struct clk_parent_data s4_hdcp22_skpclk_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
{ .fw_name = "fclk_div5", },
};
-static struct clk_regmap s4_hdcp22_skpclk_mux = {
+static struct clk_regmap s4_hdcp22_skpclk_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_HDCP22_CTRL,
.mask = 0x3,
.shift = 25,
},
.hw.init = &(struct clk_init_data) {
- .name = "hdcp22_skpclk_mux",
+ .name = "hdcp22_skpclk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_skpclk_parent_data,
- .num_parents = ARRAY_SIZE(s4_skpclk_parent_data),
+ .parent_data = s4_hdcp22_skpclk_parents,
+ .num_parents = ARRAY_SIZE(s4_hdcp22_skpclk_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2265,20 +2266,20 @@ static struct clk_regmap s4_hdcp22_skpclk_div = {
.name = "hdcp22_skpclk_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_hdcp22_skpclk_mux.hw
+ &s4_hdcp22_skpclk_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_hdcp22_skpclk_gate = {
+static struct clk_regmap s4_hdcp22_skpclk = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_HDCP22_CTRL,
.bit_idx = 24,
},
.hw.init = &(struct clk_init_data){
- .name = "hdcp22_skpclk_gate",
+ .name = "hdcp22_skpclk",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_hdcp22_skpclk_div.hw
@@ -2288,7 +2289,7 @@ static struct clk_regmap s4_hdcp22_skpclk_gate = {
},
};
-static const struct clk_parent_data s4_vdin_parent_data[] = {
+static const struct clk_parent_data s4_vdin_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
@@ -2296,17 +2297,17 @@ static const struct clk_parent_data s4_vdin_parent_data[] = {
{ .hw = &s4_vid_pll.hw }
};
-static struct clk_regmap s4_vdin_meas_mux = {
+static struct clk_regmap s4_vdin_meas_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_VDIN_MEAS_CLK_CTRL,
.mask = 0x7,
.shift = 9,
},
.hw.init = &(struct clk_init_data) {
- .name = "vdin_meas_mux",
+ .name = "vdin_meas_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_vdin_parent_data,
- .num_parents = ARRAY_SIZE(s4_vdin_parent_data),
+ .parent_data = s4_vdin_parents,
+ .num_parents = ARRAY_SIZE(s4_vdin_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2321,20 +2322,20 @@ static struct clk_regmap s4_vdin_meas_div = {
.name = "vdin_meas_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_vdin_meas_mux.hw
+ &s4_vdin_meas_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_vdin_meas_gate = {
+static struct clk_regmap s4_vdin_meas = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_VDIN_MEAS_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "vdin_meas_gate",
+ .name = "vdin_meas",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_vdin_meas_div.hw
@@ -2345,7 +2346,7 @@ static struct clk_regmap s4_vdin_meas_gate = {
};
/* EMMC/NAND clock */
-static const struct clk_parent_data s4_sd_emmc_clk0_parent_data[] = {
+static const struct clk_parent_data s4_sd_emmc_clk0_parents[] = {
{ .fw_name = "xtal", },
{ .fw_name = "fclk_div2", },
{ .fw_name = "fclk_div3", },
@@ -2365,8 +2366,8 @@ static struct clk_regmap s4_sd_emmc_c_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_c_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2413,8 +2414,8 @@ static struct clk_regmap s4_sd_emmc_a_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_a_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2461,8 +2462,8 @@ static struct clk_regmap s4_sd_emmc_b_clk0_sel = {
.hw.init = &(struct clk_init_data) {
.name = "sd_emmc_b_clk0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_sd_emmc_clk0_parent_data,
- .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parent_data),
+ .parent_data = s4_sd_emmc_clk0_parents,
+ .num_parents = ARRAY_SIZE(s4_sd_emmc_clk0_parents),
.flags = 0,
},
};
@@ -2501,7 +2502,7 @@ static struct clk_regmap s4_sd_emmc_b_clk0 = {
};
/* SPICC Clock */
-static const struct clk_parent_data s4_spicc_parent_data[] = {
+static const struct clk_parent_data s4_spicc_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_sys_clk.hw },
{ .fw_name = "fclk_div4", },
@@ -2511,17 +2512,17 @@ static const struct clk_parent_data s4_spicc_parent_data[] = {
{ .fw_name = "fclk_div7", },
};
-static struct clk_regmap s4_spicc0_mux = {
+static struct clk_regmap s4_spicc0_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = CLKCTRL_SPICC_CLK_CTRL,
.mask = 0x7,
.shift = 7,
},
.hw.init = &(struct clk_init_data) {
- .name = "spicc0_mux",
+ .name = "spicc0_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_spicc_parent_data,
- .num_parents = ARRAY_SIZE(s4_spicc_parent_data),
+ .parent_data = s4_spicc_parents,
+ .num_parents = ARRAY_SIZE(s4_spicc_parents),
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2536,20 +2537,20 @@ static struct clk_regmap s4_spicc0_div = {
.name = "spicc0_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_spicc0_mux.hw
+ &s4_spicc0_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_spicc0_gate = {
+static struct clk_regmap s4_spicc0_en = {
.data = &(struct clk_regmap_gate_data){
.offset = CLKCTRL_SPICC_CLK_CTRL,
.bit_idx = 6,
},
.hw.init = &(struct clk_init_data){
- .name = "spicc0",
+ .name = "spicc0_en",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_spicc0_div.hw
@@ -2560,500 +2561,61 @@ static struct clk_regmap s4_spicc0_gate = {
};
/* PWM Clock */
-static const struct clk_parent_data s4_pwm_parent_data[] = {
+static const struct clk_parent_data s4_pwm_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "fclk_div4", },
{ .fw_name = "fclk_div3", },
};
-static struct clk_regmap s4_pwm_a_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_a_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_a_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_a_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_a_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_a_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_b_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_b_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_b_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_b_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_AB_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_b_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_b_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_c_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_c_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_c_mux.hw
- },
- .num_parents = 1,
- },
-};
-
-static struct clk_regmap s4_pwm_c_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_c_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_c_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_d_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_d_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_d_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_d_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_CD_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_d_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_d_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_e_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_e_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_e_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_e_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_e_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_e_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_f_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_a, CLKCTRL_PWM_CLK_AB_CTRL, 8);
-static struct clk_regmap s4_pwm_f_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_f_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_b, CLKCTRL_PWM_CLK_AB_CTRL, 24);
-static struct clk_regmap s4_pwm_f_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_EF_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_f_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_f_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_c, CLKCTRL_PWM_CLK_CD_CTRL, 8);
-static struct clk_regmap s4_pwm_g_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_d, CLKCTRL_PWM_CLK_CD_CTRL, 24);
-static struct clk_regmap s4_pwm_g_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_g_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_e, CLKCTRL_PWM_CLK_EF_CTRL, 8);
-static struct clk_regmap s4_pwm_g_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_g_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_g_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_f, CLKCTRL_PWM_CLK_EF_CTRL, 24);
-static struct clk_regmap s4_pwm_h_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_g, CLKCTRL_PWM_CLK_GH_CTRL, 8);
-static struct clk_regmap s4_pwm_h_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_h_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_h, CLKCTRL_PWM_CLK_GH_CTRL, 24);
-static struct clk_regmap s4_pwm_h_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_GH_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_h_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_h_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
+static S4_COMP_SEL(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 9, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 0, 8);
+static S4_COMP_GATE(pwm_i, CLKCTRL_PWM_CLK_IJ_CTRL, 8);
-static struct clk_regmap s4_pwm_i_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .mask = 0x3,
- .shift = 9,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
+static S4_COMP_SEL(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 25, 0x3, s4_pwm_parents);
+static S4_COMP_DIV(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 16, 8);
+static S4_COMP_GATE(pwm_j, CLKCTRL_PWM_CLK_IJ_CTRL, 24);
-static struct clk_regmap s4_pwm_i_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .shift = 0,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_i_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_i_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .bit_idx = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_i_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_i_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_j_mux = {
- .data = &(struct clk_regmap_mux_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .mask = 0x3,
- .shift = 25,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_mux",
- .ops = &clk_regmap_mux_ops,
- .parent_data = s4_pwm_parent_data,
- .num_parents = ARRAY_SIZE(s4_pwm_parent_data),
- .flags = 0,
- },
-};
-
-static struct clk_regmap s4_pwm_j_div = {
- .data = &(struct clk_regmap_div_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .shift = 16,
- .width = 8,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_div",
- .ops = &clk_regmap_divider_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_j_mux.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_pwm_j_gate = {
- .data = &(struct clk_regmap_gate_data) {
- .offset = CLKCTRL_PWM_CLK_IJ_CTRL,
- .bit_idx = 24,
- },
- .hw.init = &(struct clk_init_data){
- .name = "pwm_j_gate",
- .ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) {
- &s4_pwm_j_div.hw
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- },
-};
-
-static struct clk_regmap s4_saradc_mux = {
+static struct clk_regmap s4_saradc_sel = {
.data = &(struct clk_regmap_mux_data) {
.offset = CLKCTRL_SAR_CLK_CTRL,
.mask = 0x3,
.shift = 9,
},
.hw.init = &(struct clk_init_data){
- .name = "saradc_mux",
+ .name = "saradc_sel",
.ops = &clk_regmap_mux_ops,
.parent_data = (const struct clk_parent_data []) {
{ .fw_name = "xtal", },
@@ -3074,20 +2636,20 @@ static struct clk_regmap s4_saradc_div = {
.name = "saradc_div",
.ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
- &s4_saradc_mux.hw
+ &s4_saradc_sel.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
};
-static struct clk_regmap s4_saradc_gate = {
+static struct clk_regmap s4_saradc = {
.data = &(struct clk_regmap_gate_data) {
.offset = CLKCTRL_SAR_CLK_CTRL,
.bit_idx = 8,
},
.hw.init = &(struct clk_init_data){
- .name = "saradc_clk",
+ .name = "saradc",
.ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&s4_saradc_div.hw
@@ -3102,9 +2664,8 @@ static struct clk_regmap s4_saradc_gate = {
* corresponding clock sources are not described in the clock tree and internal clock
* for debug, so they are skipped.
*/
-static u32 s4_gen_clk_mux_table[] = { 0, 4, 5, 7, 19, 21, 22,
- 23, 24, 25, 26, 27, 28 };
-static const struct clk_parent_data s4_gen_clk_parent_data[] = {
+static u32 s4_gen_clk_parents_val_table[] = { 0, 4, 5, 7, 19, 21, 22, 23, 24, 25, 26, 27, 28 };
+static const struct clk_parent_data s4_gen_clk_parents[] = {
{ .fw_name = "xtal", },
{ .hw = &s4_vid_pll.hw },
{ .fw_name = "gp0_pll", },
@@ -3125,13 +2686,13 @@ static struct clk_regmap s4_gen_clk_sel = {
.offset = CLKCTRL_GEN_CLK_CTRL,
.mask = 0x1f,
.shift = 12,
- .table = s4_gen_clk_mux_table,
+ .table = s4_gen_clk_parents_val_table,
},
.hw.init = &(struct clk_init_data){
.name = "gen_clk_sel",
.ops = &clk_regmap_mux_ops,
- .parent_data = s4_gen_clk_parent_data,
- .num_parents = ARRAY_SIZE(s4_gen_clk_parent_data),
+ .parent_data = s4_gen_clk_parents,
+ .num_parents = ARRAY_SIZE(s4_gen_clk_parents),
/*
* Because the GEN clock can be connected to an external pad
* and may be set up directly from the device tree. Don't
@@ -3174,61 +2735,75 @@ static struct clk_regmap s4_gen_clk = {
},
};
-#define MESON_GATE(_name, _reg, _bit) \
- MESON_PCLK(_name, _reg, _bit, &s4_sys_clk.hw)
-
-static MESON_GATE(s4_ddr, CLKCTRL_SYS_CLK_EN0_REG0, 0);
-static MESON_GATE(s4_dos, CLKCTRL_SYS_CLK_EN0_REG0, 1);
-static MESON_GATE(s4_ethphy, CLKCTRL_SYS_CLK_EN0_REG0, 4);
-static MESON_GATE(s4_mali, CLKCTRL_SYS_CLK_EN0_REG0, 6);
-static MESON_GATE(s4_aocpu, CLKCTRL_SYS_CLK_EN0_REG0, 13);
-static MESON_GATE(s4_aucpu, CLKCTRL_SYS_CLK_EN0_REG0, 14);
-static MESON_GATE(s4_cec, CLKCTRL_SYS_CLK_EN0_REG0, 16);
-static MESON_GATE(s4_sdemmca, CLKCTRL_SYS_CLK_EN0_REG0, 24);
-static MESON_GATE(s4_sdemmcb, CLKCTRL_SYS_CLK_EN0_REG0, 25);
-static MESON_GATE(s4_nand, CLKCTRL_SYS_CLK_EN0_REG0, 26);
-static MESON_GATE(s4_smartcard, CLKCTRL_SYS_CLK_EN0_REG0, 27);
-static MESON_GATE(s4_acodec, CLKCTRL_SYS_CLK_EN0_REG0, 28);
-static MESON_GATE(s4_spifc, CLKCTRL_SYS_CLK_EN0_REG0, 29);
-static MESON_GATE(s4_msr_clk, CLKCTRL_SYS_CLK_EN0_REG0, 30);
-static MESON_GATE(s4_ir_ctrl, CLKCTRL_SYS_CLK_EN0_REG0, 31);
-static MESON_GATE(s4_audio, CLKCTRL_SYS_CLK_EN0_REG1, 0);
-static MESON_GATE(s4_eth, CLKCTRL_SYS_CLK_EN0_REG1, 3);
-static MESON_GATE(s4_uart_a, CLKCTRL_SYS_CLK_EN0_REG1, 5);
-static MESON_GATE(s4_uart_b, CLKCTRL_SYS_CLK_EN0_REG1, 6);
-static MESON_GATE(s4_uart_c, CLKCTRL_SYS_CLK_EN0_REG1, 7);
-static MESON_GATE(s4_uart_d, CLKCTRL_SYS_CLK_EN0_REG1, 8);
-static MESON_GATE(s4_uart_e, CLKCTRL_SYS_CLK_EN0_REG1, 9);
-static MESON_GATE(s4_aififo, CLKCTRL_SYS_CLK_EN0_REG1, 11);
-static MESON_GATE(s4_ts_ddr, CLKCTRL_SYS_CLK_EN0_REG1, 15);
-static MESON_GATE(s4_ts_pll, CLKCTRL_SYS_CLK_EN0_REG1, 16);
-static MESON_GATE(s4_g2d, CLKCTRL_SYS_CLK_EN0_REG1, 20);
-static MESON_GATE(s4_spicc0, CLKCTRL_SYS_CLK_EN0_REG1, 21);
-static MESON_GATE(s4_usb, CLKCTRL_SYS_CLK_EN0_REG1, 26);
-static MESON_GATE(s4_i2c_m_a, CLKCTRL_SYS_CLK_EN0_REG1, 30);
-static MESON_GATE(s4_i2c_m_b, CLKCTRL_SYS_CLK_EN0_REG1, 31);
-static MESON_GATE(s4_i2c_m_c, CLKCTRL_SYS_CLK_EN0_REG2, 0);
-static MESON_GATE(s4_i2c_m_d, CLKCTRL_SYS_CLK_EN0_REG2, 1);
-static MESON_GATE(s4_i2c_m_e, CLKCTRL_SYS_CLK_EN0_REG2, 2);
-static MESON_GATE(s4_hdmitx_apb, CLKCTRL_SYS_CLK_EN0_REG2, 4);
-static MESON_GATE(s4_i2c_s_a, CLKCTRL_SYS_CLK_EN0_REG2, 5);
-static MESON_GATE(s4_usb1_to_ddr, CLKCTRL_SYS_CLK_EN0_REG2, 8);
-static MESON_GATE(s4_hdcp22, CLKCTRL_SYS_CLK_EN0_REG2, 10);
-static MESON_GATE(s4_mmc_apb, CLKCTRL_SYS_CLK_EN0_REG2, 11);
-static MESON_GATE(s4_rsa, CLKCTRL_SYS_CLK_EN0_REG2, 18);
-static MESON_GATE(s4_cpu_debug, CLKCTRL_SYS_CLK_EN0_REG2, 19);
-static MESON_GATE(s4_vpu_intr, CLKCTRL_SYS_CLK_EN0_REG2, 25);
-static MESON_GATE(s4_demod, CLKCTRL_SYS_CLK_EN0_REG2, 27);
-static MESON_GATE(s4_sar_adc, CLKCTRL_SYS_CLK_EN0_REG2, 28);
-static MESON_GATE(s4_gic, CLKCTRL_SYS_CLK_EN0_REG2, 30);
-static MESON_GATE(s4_pwm_ab, CLKCTRL_SYS_CLK_EN0_REG3, 7);
-static MESON_GATE(s4_pwm_cd, CLKCTRL_SYS_CLK_EN0_REG3, 8);
-static MESON_GATE(s4_pwm_ef, CLKCTRL_SYS_CLK_EN0_REG3, 9);
-static MESON_GATE(s4_pwm_gh, CLKCTRL_SYS_CLK_EN0_REG3, 10);
-static MESON_GATE(s4_pwm_ij, CLKCTRL_SYS_CLK_EN0_REG3, 11);
+static const struct clk_parent_data s4_pclk_parents = { .hw = &s4_sys_clk.hw };
+
+#define S4_PCLK(_name, _reg, _bit, _flags) \
+ MESON_PCLK(_name, _reg, _bit, &s4_pclk_parents, _flags)
+
+/*
+ * NOTE: The gates below are marked with CLK_IGNORE_UNUSED for historic reasons
+ * Users are encouraged to test without it and submit changes to:
+ * - remove the flag if not necessary
+ * - replace the flag with something more adequate, such as CLK_IS_CRITICAL,
+ * if appropriate.
+ * - add a comment explaining why the use of CLK_IGNORE_UNUSED is desirable
+ * for a particular clock.
+ */
+static S4_PCLK(s4_ddr, CLKCTRL_SYS_CLK_EN0_REG0, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_dos, CLKCTRL_SYS_CLK_EN0_REG0, 1, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ethphy, CLKCTRL_SYS_CLK_EN0_REG0, 4, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_mali, CLKCTRL_SYS_CLK_EN0_REG0, 6, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aocpu, CLKCTRL_SYS_CLK_EN0_REG0, 13, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aucpu, CLKCTRL_SYS_CLK_EN0_REG0, 14, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_cec, CLKCTRL_SYS_CLK_EN0_REG0, 16, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sdemmca, CLKCTRL_SYS_CLK_EN0_REG0, 24, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sdemmcb, CLKCTRL_SYS_CLK_EN0_REG0, 25, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_nand, CLKCTRL_SYS_CLK_EN0_REG0, 26, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_smartcard, CLKCTRL_SYS_CLK_EN0_REG0, 27, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_acodec, CLKCTRL_SYS_CLK_EN0_REG0, 28, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_spifc, CLKCTRL_SYS_CLK_EN0_REG0, 29, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_msr_clk, CLKCTRL_SYS_CLK_EN0_REG0, 30, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ir_ctrl, CLKCTRL_SYS_CLK_EN0_REG0, 31, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_audio, CLKCTRL_SYS_CLK_EN0_REG1, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_eth, CLKCTRL_SYS_CLK_EN0_REG1, 3, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_a, CLKCTRL_SYS_CLK_EN0_REG1, 5, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_b, CLKCTRL_SYS_CLK_EN0_REG1, 6, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_c, CLKCTRL_SYS_CLK_EN0_REG1, 7, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_d, CLKCTRL_SYS_CLK_EN0_REG1, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_uart_e, CLKCTRL_SYS_CLK_EN0_REG1, 9, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_aififo, CLKCTRL_SYS_CLK_EN0_REG1, 11, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ts_ddr, CLKCTRL_SYS_CLK_EN0_REG1, 15, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_ts_pll, CLKCTRL_SYS_CLK_EN0_REG1, 16, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_g2d, CLKCTRL_SYS_CLK_EN0_REG1, 20, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_spicc0, CLKCTRL_SYS_CLK_EN0_REG1, 21, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_usb, CLKCTRL_SYS_CLK_EN0_REG1, 26, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_a, CLKCTRL_SYS_CLK_EN0_REG1, 30, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_b, CLKCTRL_SYS_CLK_EN0_REG1, 31, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_i2c_m_c, CLKCTRL_SYS_CLK_EN0_REG2, 0, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_d, CLKCTRL_SYS_CLK_EN0_REG2, 1, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_m_e, CLKCTRL_SYS_CLK_EN0_REG2, 2, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_hdmitx_apb, CLKCTRL_SYS_CLK_EN0_REG2, 4, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_i2c_s_a, CLKCTRL_SYS_CLK_EN0_REG2, 5, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_usb1_to_ddr, CLKCTRL_SYS_CLK_EN0_REG2, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_hdcp22, CLKCTRL_SYS_CLK_EN0_REG2, 10, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_mmc_apb, CLKCTRL_SYS_CLK_EN0_REG2, 11, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_rsa, CLKCTRL_SYS_CLK_EN0_REG2, 18, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_cpu_debug, CLKCTRL_SYS_CLK_EN0_REG2, 19, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_vpu_intr, CLKCTRL_SYS_CLK_EN0_REG2, 25, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_demod, CLKCTRL_SYS_CLK_EN0_REG2, 27, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_sar_adc, CLKCTRL_SYS_CLK_EN0_REG2, 28, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_gic, CLKCTRL_SYS_CLK_EN0_REG2, 30, CLK_IGNORE_UNUSED);
+
+static S4_PCLK(s4_pwm_ab, CLKCTRL_SYS_CLK_EN0_REG3, 7, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_cd, CLKCTRL_SYS_CLK_EN0_REG3, 8, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_ef, CLKCTRL_SYS_CLK_EN0_REG3, 9, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_gh, CLKCTRL_SYS_CLK_EN0_REG3, 10, CLK_IGNORE_UNUSED);
+static S4_PCLK(s4_pwm_ij, CLKCTRL_SYS_CLK_EN0_REG3, 11, CLK_IGNORE_UNUSED);
/* Array of all clocks provided by this provider */
-static struct clk_hw *s4_periphs_hw_clks[] = {
+static struct clk_hw *s4_peripherals_hw_clks[] = {
[CLKID_RTC_32K_CLKIN] = &s4_rtc_32k_by_oscin_clkin.hw,
[CLKID_RTC_32K_DIV] = &s4_rtc_32k_by_oscin_div.hw,
[CLKID_RTC_32K_SEL] = &s4_rtc_32k_by_oscin_sel.hw,
@@ -3251,12 +2826,12 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_CECB_32K_SEL_PRE] = &s4_cecb_32k_sel_pre.hw,
[CLKID_CECB_32K_SEL] = &s4_cecb_32k_sel.hw,
[CLKID_CECB_32K_CLKOUT] = &s4_cecb_32k_clkout.hw,
- [CLKID_SC_CLK_SEL] = &s4_sc_clk_mux.hw,
+ [CLKID_SC_CLK_SEL] = &s4_sc_clk_sel.hw,
[CLKID_SC_CLK_DIV] = &s4_sc_clk_div.hw,
- [CLKID_SC] = &s4_sc_clk_gate.hw,
- [CLKID_12_24M] = &s4_12_24M_clk_gate.hw,
- [CLKID_12M_CLK_DIV] = &s4_12M_clk_div.hw,
- [CLKID_12_24M_CLK_SEL] = &s4_12_24M_clk.hw,
+ [CLKID_SC] = &s4_sc_clk.hw,
+ [CLKID_12_24M] = &s4_12_24M.hw,
+ [CLKID_12M_CLK_DIV] = &s4_12M_div.hw,
+ [CLKID_12_24M_CLK_SEL] = &s4_12_24M_sel.hw,
[CLKID_VID_PLL_DIV] = &s4_vid_pll_div.hw,
[CLKID_VID_PLL_SEL] = &s4_vid_pll_sel.hw,
[CLKID_VID_PLL] = &s4_vid_pll.hw,
@@ -3298,28 +2873,28 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_HDMI_DIV] = &s4_hdmi_div.hw,
[CLKID_HDMI] = &s4_hdmi.hw,
[CLKID_TS_CLK_DIV] = &s4_ts_clk_div.hw,
- [CLKID_TS] = &s4_ts_clk_gate.hw,
+ [CLKID_TS] = &s4_ts_clk.hw,
[CLKID_MALI_0_SEL] = &s4_mali_0_sel.hw,
[CLKID_MALI_0_DIV] = &s4_mali_0_div.hw,
[CLKID_MALI_0] = &s4_mali_0.hw,
[CLKID_MALI_1_SEL] = &s4_mali_1_sel.hw,
[CLKID_MALI_1_DIV] = &s4_mali_1_div.hw,
[CLKID_MALI_1] = &s4_mali_1.hw,
- [CLKID_MALI_SEL] = &s4_mali_mux.hw,
- [CLKID_VDEC_P0_SEL] = &s4_vdec_p0_mux.hw,
+ [CLKID_MALI_SEL] = &s4_mali_sel.hw,
+ [CLKID_VDEC_P0_SEL] = &s4_vdec_p0_sel.hw,
[CLKID_VDEC_P0_DIV] = &s4_vdec_p0_div.hw,
[CLKID_VDEC_P0] = &s4_vdec_p0.hw,
- [CLKID_VDEC_P1_SEL] = &s4_vdec_p1_mux.hw,
+ [CLKID_VDEC_P1_SEL] = &s4_vdec_p1_sel.hw,
[CLKID_VDEC_P1_DIV] = &s4_vdec_p1_div.hw,
[CLKID_VDEC_P1] = &s4_vdec_p1.hw,
- [CLKID_VDEC_SEL] = &s4_vdec_mux.hw,
- [CLKID_HEVCF_P0_SEL] = &s4_hevcf_p0_mux.hw,
+ [CLKID_VDEC_SEL] = &s4_vdec_sel.hw,
+ [CLKID_HEVCF_P0_SEL] = &s4_hevcf_p0_sel.hw,
[CLKID_HEVCF_P0_DIV] = &s4_hevcf_p0_div.hw,
[CLKID_HEVCF_P0] = &s4_hevcf_p0.hw,
- [CLKID_HEVCF_P1_SEL] = &s4_hevcf_p1_mux.hw,
+ [CLKID_HEVCF_P1_SEL] = &s4_hevcf_p1_sel.hw,
[CLKID_HEVCF_P1_DIV] = &s4_hevcf_p1_div.hw,
[CLKID_HEVCF_P1] = &s4_hevcf_p1.hw,
- [CLKID_HEVCF_SEL] = &s4_hevcf_mux.hw,
+ [CLKID_HEVCF_SEL] = &s4_hevcf_sel.hw,
[CLKID_VPU_0_SEL] = &s4_vpu_0_sel.hw,
[CLKID_VPU_0_DIV] = &s4_vpu_0_div.hw,
[CLKID_VPU_0] = &s4_vpu_0.hw,
@@ -3327,18 +2902,18 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_VPU_1_DIV] = &s4_vpu_1_div.hw,
[CLKID_VPU_1] = &s4_vpu_1.hw,
[CLKID_VPU] = &s4_vpu.hw,
- [CLKID_VPU_CLKB_TMP_SEL] = &s4_vpu_clkb_tmp_mux.hw,
+ [CLKID_VPU_CLKB_TMP_SEL] = &s4_vpu_clkb_tmp_sel.hw,
[CLKID_VPU_CLKB_TMP_DIV] = &s4_vpu_clkb_tmp_div.hw,
[CLKID_VPU_CLKB_TMP] = &s4_vpu_clkb_tmp.hw,
[CLKID_VPU_CLKB_DIV] = &s4_vpu_clkb_div.hw,
[CLKID_VPU_CLKB] = &s4_vpu_clkb.hw,
- [CLKID_VPU_CLKC_P0_SEL] = &s4_vpu_clkc_p0_mux.hw,
+ [CLKID_VPU_CLKC_P0_SEL] = &s4_vpu_clkc_p0_sel.hw,
[CLKID_VPU_CLKC_P0_DIV] = &s4_vpu_clkc_p0_div.hw,
[CLKID_VPU_CLKC_P0] = &s4_vpu_clkc_p0.hw,
- [CLKID_VPU_CLKC_P1_SEL] = &s4_vpu_clkc_p1_mux.hw,
+ [CLKID_VPU_CLKC_P1_SEL] = &s4_vpu_clkc_p1_sel.hw,
[CLKID_VPU_CLKC_P1_DIV] = &s4_vpu_clkc_p1_div.hw,
[CLKID_VPU_CLKC_P1] = &s4_vpu_clkc_p1.hw,
- [CLKID_VPU_CLKC_SEL] = &s4_vpu_clkc_mux.hw,
+ [CLKID_VPU_CLKC_SEL] = &s4_vpu_clkc_sel.hw,
[CLKID_VAPB_0_SEL] = &s4_vapb_0_sel.hw,
[CLKID_VAPB_0_DIV] = &s4_vapb_0_div.hw,
[CLKID_VAPB_0] = &s4_vapb_0.hw,
@@ -3346,10 +2921,10 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_VAPB_1_DIV] = &s4_vapb_1_div.hw,
[CLKID_VAPB_1] = &s4_vapb_1.hw,
[CLKID_VAPB] = &s4_vapb.hw,
- [CLKID_GE2D] = &s4_ge2d_gate.hw,
- [CLKID_VDIN_MEAS_SEL] = &s4_vdin_meas_mux.hw,
+ [CLKID_GE2D] = &s4_ge2d.hw,
+ [CLKID_VDIN_MEAS_SEL] = &s4_vdin_meas_sel.hw,
[CLKID_VDIN_MEAS_DIV] = &s4_vdin_meas_div.hw,
- [CLKID_VDIN_MEAS] = &s4_vdin_meas_gate.hw,
+ [CLKID_VDIN_MEAS] = &s4_vdin_meas.hw,
[CLKID_SD_EMMC_C_CLK_SEL] = &s4_sd_emmc_c_clk0_sel.hw,
[CLKID_SD_EMMC_C_CLK_DIV] = &s4_sd_emmc_c_clk0_div.hw,
[CLKID_SD_EMMC_C] = &s4_sd_emmc_c_clk0.hw,
@@ -3359,42 +2934,42 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_SD_EMMC_B_CLK_SEL] = &s4_sd_emmc_b_clk0_sel.hw,
[CLKID_SD_EMMC_B_CLK_DIV] = &s4_sd_emmc_b_clk0_div.hw,
[CLKID_SD_EMMC_B] = &s4_sd_emmc_b_clk0.hw,
- [CLKID_SPICC0_SEL] = &s4_spicc0_mux.hw,
+ [CLKID_SPICC0_SEL] = &s4_spicc0_sel.hw,
[CLKID_SPICC0_DIV] = &s4_spicc0_div.hw,
- [CLKID_SPICC0_EN] = &s4_spicc0_gate.hw,
- [CLKID_PWM_A_SEL] = &s4_pwm_a_mux.hw,
+ [CLKID_SPICC0_EN] = &s4_spicc0_en.hw,
+ [CLKID_PWM_A_SEL] = &s4_pwm_a_sel.hw,
[CLKID_PWM_A_DIV] = &s4_pwm_a_div.hw,
- [CLKID_PWM_A] = &s4_pwm_a_gate.hw,
- [CLKID_PWM_B_SEL] = &s4_pwm_b_mux.hw,
+ [CLKID_PWM_A] = &s4_pwm_a.hw,
+ [CLKID_PWM_B_SEL] = &s4_pwm_b_sel.hw,
[CLKID_PWM_B_DIV] = &s4_pwm_b_div.hw,
- [CLKID_PWM_B] = &s4_pwm_b_gate.hw,
- [CLKID_PWM_C_SEL] = &s4_pwm_c_mux.hw,
+ [CLKID_PWM_B] = &s4_pwm_b.hw,
+ [CLKID_PWM_C_SEL] = &s4_pwm_c_sel.hw,
[CLKID_PWM_C_DIV] = &s4_pwm_c_div.hw,
- [CLKID_PWM_C] = &s4_pwm_c_gate.hw,
- [CLKID_PWM_D_SEL] = &s4_pwm_d_mux.hw,
+ [CLKID_PWM_C] = &s4_pwm_c.hw,
+ [CLKID_PWM_D_SEL] = &s4_pwm_d_sel.hw,
[CLKID_PWM_D_DIV] = &s4_pwm_d_div.hw,
- [CLKID_PWM_D] = &s4_pwm_d_gate.hw,
- [CLKID_PWM_E_SEL] = &s4_pwm_e_mux.hw,
+ [CLKID_PWM_D] = &s4_pwm_d.hw,
+ [CLKID_PWM_E_SEL] = &s4_pwm_e_sel.hw,
[CLKID_PWM_E_DIV] = &s4_pwm_e_div.hw,
- [CLKID_PWM_E] = &s4_pwm_e_gate.hw,
- [CLKID_PWM_F_SEL] = &s4_pwm_f_mux.hw,
+ [CLKID_PWM_E] = &s4_pwm_e.hw,
+ [CLKID_PWM_F_SEL] = &s4_pwm_f_sel.hw,
[CLKID_PWM_F_DIV] = &s4_pwm_f_div.hw,
- [CLKID_PWM_F] = &s4_pwm_f_gate.hw,
- [CLKID_PWM_G_SEL] = &s4_pwm_g_mux.hw,
+ [CLKID_PWM_F] = &s4_pwm_f.hw,
+ [CLKID_PWM_G_SEL] = &s4_pwm_g_sel.hw,
[CLKID_PWM_G_DIV] = &s4_pwm_g_div.hw,
- [CLKID_PWM_G] = &s4_pwm_g_gate.hw,
- [CLKID_PWM_H_SEL] = &s4_pwm_h_mux.hw,
+ [CLKID_PWM_G] = &s4_pwm_g.hw,
+ [CLKID_PWM_H_SEL] = &s4_pwm_h_sel.hw,
[CLKID_PWM_H_DIV] = &s4_pwm_h_div.hw,
- [CLKID_PWM_H] = &s4_pwm_h_gate.hw,
- [CLKID_PWM_I_SEL] = &s4_pwm_i_mux.hw,
+ [CLKID_PWM_H] = &s4_pwm_h.hw,
+ [CLKID_PWM_I_SEL] = &s4_pwm_i_sel.hw,
[CLKID_PWM_I_DIV] = &s4_pwm_i_div.hw,
- [CLKID_PWM_I] = &s4_pwm_i_gate.hw,
- [CLKID_PWM_J_SEL] = &s4_pwm_j_mux.hw,
+ [CLKID_PWM_I] = &s4_pwm_i.hw,
+ [CLKID_PWM_J_SEL] = &s4_pwm_j_sel.hw,
[CLKID_PWM_J_DIV] = &s4_pwm_j_div.hw,
- [CLKID_PWM_J] = &s4_pwm_j_gate.hw,
- [CLKID_SARADC_SEL] = &s4_saradc_mux.hw,
+ [CLKID_PWM_J] = &s4_pwm_j.hw,
+ [CLKID_SARADC_SEL] = &s4_saradc_sel.hw,
[CLKID_SARADC_DIV] = &s4_saradc_div.hw,
- [CLKID_SARADC] = &s4_saradc_gate.hw,
+ [CLKID_SARADC] = &s4_saradc.hw,
[CLKID_GEN_SEL] = &s4_gen_clk_sel.hw,
[CLKID_GEN_DIV] = &s4_gen_clk_div.hw,
[CLKID_GEN] = &s4_gen_clk.hw,
@@ -3447,73 +3022,38 @@ static struct clk_hw *s4_periphs_hw_clks[] = {
[CLKID_PWM_EF] = &s4_pwm_ef.hw,
[CLKID_PWM_GH] = &s4_pwm_gh.hw,
[CLKID_PWM_IJ] = &s4_pwm_ij.hw,
- [CLKID_HDCP22_ESMCLK_SEL] = &s4_hdcp22_esmclk_mux.hw,
+ [CLKID_HDCP22_ESMCLK_SEL] = &s4_hdcp22_esmclk_sel.hw,
[CLKID_HDCP22_ESMCLK_DIV] = &s4_hdcp22_esmclk_div.hw,
- [CLKID_HDCP22_ESMCLK] = &s4_hdcp22_esmclk_gate.hw,
- [CLKID_HDCP22_SKPCLK_SEL] = &s4_hdcp22_skpclk_mux.hw,
+ [CLKID_HDCP22_ESMCLK] = &s4_hdcp22_esmclk.hw,
+ [CLKID_HDCP22_SKPCLK_SEL] = &s4_hdcp22_skpclk_sel.hw,
[CLKID_HDCP22_SKPCLK_DIV] = &s4_hdcp22_skpclk_div.hw,
- [CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk_gate.hw,
-};
-
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = CLKCTRL_DEMOD_CLK_CTRL,
+ [CLKID_HDCP22_SKPCLK] = &s4_hdcp22_skpclk.hw,
};
-static struct meson_clk_hw_data s4_periphs_clks = {
- .hws = s4_periphs_hw_clks,
- .num = ARRAY_SIZE(s4_periphs_hw_clks),
+static const struct meson_clkc_data s4_peripherals_clkc_data = {
+ .hw_clks = {
+ .hws = s4_peripherals_hw_clks,
+ .num = ARRAY_SIZE(s4_peripherals_hw_clks),
+ },
};
-static int meson_s4_periphs_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "can't init regmap mmio region\n");
-
- for (i = 0; i < s4_periphs_clks.num; i++) {
- /* array might be sparse */
- if (!s4_periphs_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, s4_periphs_clks.hws[i]);
- if (ret)
- return dev_err_probe(dev, ret,
- "clock[%d] registration failed\n", i);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get, &s4_periphs_clks);
-}
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id s4_peripherals_clkc_match_table[] = {
{
.compatible = "amlogic,s4-peripherals-clkc",
+ .data = &s4_peripherals_clkc_data,
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, s4_peripherals_clkc_match_table);
-static struct platform_driver s4_driver = {
- .probe = meson_s4_periphs_probe,
+static struct platform_driver s4_peripherals_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
- .name = "s4-periphs-clkc",
- .of_match_table = clkc_match_table,
+ .name = "s4-peripherals-clkc",
+ .of_match_table = s4_peripherals_clkc_match_table,
},
};
-module_platform_driver(s4_driver);
+module_platform_driver(s4_peripherals_clkc_driver);
MODULE_DESCRIPTION("Amlogic S4 Peripherals Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
diff --git a/drivers/clk/meson/s4-pll.c b/drivers/clk/meson/s4-pll.c
index 3d689d2f003e..56ce6f566e53 100644
--- a/drivers/clk/meson/s4-pll.c
+++ b/drivers/clk/meson/s4-pll.c
@@ -281,7 +281,7 @@ static const struct pll_mult_range s4_gp0_pll_mult_range = {
/*
* Internal gp0 pll emulation configuration parameters
*/
-static const struct reg_sequence s4_gp0_init_regs[] = {
+static const struct reg_sequence s4_gp0_pll_init_regs[] = {
{ .reg = ANACTRL_GP0PLL_CTRL1, .def = 0x00000000 },
{ .reg = ANACTRL_GP0PLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_GP0PLL_CTRL3, .def = 0x48681c00 },
@@ -318,8 +318,8 @@ static struct clk_regmap s4_gp0_pll_dco = {
.width = 1,
},
.range = &s4_gp0_pll_mult_range,
- .init_regs = s4_gp0_init_regs,
- .init_count = ARRAY_SIZE(s4_gp0_init_regs),
+ .init_regs = s4_gp0_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_gp0_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
.name = "gp0_pll_dco",
@@ -353,7 +353,7 @@ static struct clk_regmap s4_gp0_pll = {
/*
* Internal hifi pll emulation configuration parameters
*/
-static const struct reg_sequence s4_hifi_init_regs[] = {
+static const struct reg_sequence s4_hifi_pll_init_regs[] = {
{ .reg = ANACTRL_HIFIPLL_CTRL2, .def = 0x00000000 },
{ .reg = ANACTRL_HIFIPLL_CTRL3, .def = 0x6a285c00 },
{ .reg = ANACTRL_HIFIPLL_CTRL4, .def = 0x65771290 },
@@ -394,8 +394,8 @@ static struct clk_regmap s4_hifi_pll_dco = {
.width = 1,
},
.range = &s4_gp0_pll_mult_range,
- .init_regs = s4_hifi_init_regs,
- .init_count = ARRAY_SIZE(s4_hifi_init_regs),
+ .init_regs = s4_hifi_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_hifi_pll_init_regs),
.frac_max = 100000,
.flags = CLK_MESON_PLL_ROUND_CLOSEST,
},
@@ -794,76 +794,36 @@ static struct clk_hw *s4_pll_hw_clks[] = {
[CLKID_MPLL3] = &s4_mpll3.hw,
};
-static const struct reg_sequence s4_init_regs[] = {
+static const struct reg_sequence s4_pll_init_regs[] = {
{ .reg = ANACTRL_MPLL_CTRL0, .def = 0x00000543 },
};
-static const struct regmap_config clkc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .max_register = ANACTRL_HDMIPLL_CTRL0,
-};
-
-static struct meson_clk_hw_data s4_pll_clks = {
- .hws = s4_pll_hw_clks,
- .num = ARRAY_SIZE(s4_pll_hw_clks),
+static const struct meson_clkc_data s4_pll_clkc_data = {
+ .hw_clks = {
+ .hws = s4_pll_hw_clks,
+ .num = ARRAY_SIZE(s4_pll_hw_clks),
+ },
+ .init_regs = s4_pll_init_regs,
+ .init_count = ARRAY_SIZE(s4_pll_init_regs),
};
-static int meson_s4_pll_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *regmap;
- void __iomem *base;
- int ret, i;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return dev_err_probe(dev, PTR_ERR(base),
- "can't ioremap resource\n");
-
- regmap = devm_regmap_init_mmio(dev, base, &clkc_regmap_config);
- if (IS_ERR(regmap))
- return dev_err_probe(dev, PTR_ERR(regmap),
- "can't init regmap mmio region\n");
-
- ret = regmap_multi_reg_write(regmap, s4_init_regs, ARRAY_SIZE(s4_init_regs));
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to init registers\n");
-
- /* Register clocks */
- for (i = 0; i < s4_pll_clks.num; i++) {
- /* array might be sparse */
- if (!s4_pll_clks.hws[i])
- continue;
-
- ret = devm_clk_hw_register(dev, s4_pll_clks.hws[i]);
- if (ret)
- return dev_err_probe(dev, ret,
- "clock[%d] registration failed\n", i);
- }
-
- return devm_of_clk_add_hw_provider(dev, meson_clk_hw_get,
- &s4_pll_clks);
-}
-
-static const struct of_device_id clkc_match_table[] = {
+static const struct of_device_id s4_pll_clkc_match_table[] = {
{
.compatible = "amlogic,s4-pll-clkc",
+ .data = &s4_pll_clkc_data,
},
{}
};
-MODULE_DEVICE_TABLE(of, clkc_match_table);
+MODULE_DEVICE_TABLE(of, s4_pll_clkc_match_table);
-static struct platform_driver s4_driver = {
- .probe = meson_s4_pll_probe,
+static struct platform_driver s4_pll_clkc_driver = {
+ .probe = meson_clkc_mmio_probe,
.driver = {
.name = "s4-pll-clkc",
- .of_match_table = clkc_match_table,
+ .of_match_table = s4_pll_clkc_match_table,
},
};
-module_platform_driver(s4_driver);
+module_platform_driver(s4_pll_clkc_driver);
MODULE_DESCRIPTION("Amlogic S4 PLL Clock Controller driver");
MODULE_AUTHOR("Yu Tu <yu.tu@amlogic.com>");
diff --git a/drivers/clk/microchip/Kconfig b/drivers/clk/microchip/Kconfig
index 0724ce65898f..1b9e43eb5497 100644
--- a/drivers/clk/microchip/Kconfig
+++ b/drivers/clk/microchip/Kconfig
@@ -7,6 +7,8 @@ config MCHP_CLK_MPFS
bool "Clk driver for PolarFire SoC"
depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
default ARCH_MICROCHIP_POLARFIRE
+ depends on MFD_SYSCON
select AUXILIARY_BUS
+ select REGMAP_MMIO
help
Supports Clock Configuration for PolarFire SoC
diff --git a/drivers/clk/microchip/clk-core.c b/drivers/clk/microchip/clk-core.c
index 6fbc6dc50ca3..b34348d491f3 100644
--- a/drivers/clk/microchip/clk-core.c
+++ b/drivers/clk/microchip/clk-core.c
@@ -155,11 +155,13 @@ static unsigned long pbclk_recalc_rate(struct clk_hw *hw,
return parent_rate / pbclk_read_pbdiv(pb);
}
-static long pbclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int pbclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return calc_best_divided_rate(rate, *parent_rate,
- PB_DIV_MAX, PB_DIV_MIN);
+ req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
+ PB_DIV_MAX, PB_DIV_MIN);
+
+ return 0;
}
static int pbclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -207,7 +209,7 @@ const struct clk_ops pic32_pbclk_ops = {
.disable = pbclk_disable,
.is_enabled = pbclk_is_enabled,
.recalc_rate = pbclk_recalc_rate,
- .round_rate = pbclk_round_rate,
+ .determine_rate = pbclk_determine_rate,
.set_rate = pbclk_set_rate,
};
@@ -372,18 +374,6 @@ static unsigned long roclk_recalc_rate(struct clk_hw *hw,
return roclk_calc_rate(parent_rate, rodiv, rotrim);
}
-static long roclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
-{
- u32 rotrim, rodiv;
-
- /* calculate dividers for new rate */
- roclk_calc_div_trim(rate, *parent_rate, &rodiv, &rotrim);
-
- /* caclulate new rate (rounding) based on new rodiv & rotrim */
- return roclk_calc_rate(*parent_rate, rodiv, rotrim);
-}
-
static int roclk_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -394,6 +384,8 @@ static int roclk_determine_rate(struct clk_hw *hw,
/* find a parent which can generate nearest clkrate >= rate */
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ u32 rotrim, rodiv;
+
/* get parent */
parent_clk = clk_hw_get_parent_by_index(hw, i);
if (!parent_clk)
@@ -404,7 +396,12 @@ static int roclk_determine_rate(struct clk_hw *hw,
if (req->rate > parent_rate)
continue;
- nearest_rate = roclk_round_rate(hw, req->rate, &parent_rate);
+ /* calculate dividers for new rate */
+ roclk_calc_div_trim(req->rate, req->best_parent_rate, &rodiv, &rotrim);
+
+ /* caclulate new rate (rounding) based on new rodiv & rotrim */
+ nearest_rate = roclk_calc_rate(req->best_parent_rate, rodiv, rotrim);
+
delta = abs(nearest_rate - req->rate);
if ((nearest_rate >= req->rate) && (delta < best_delta)) {
best_parent_clk = parent_clk;
@@ -665,12 +662,15 @@ static unsigned long spll_clk_recalc_rate(struct clk_hw *hw,
return rate64;
}
-static long spll_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int spll_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct pic32_sys_pll *pll = clkhw_to_spll(hw);
- return spll_calc_mult_div(pll, rate, *parent_rate, NULL, NULL);
+ req->rate = spll_calc_mult_div(pll, req->rate, req->best_parent_rate,
+ NULL, NULL);
+
+ return 0;
}
static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -725,7 +725,7 @@ static int spll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
/* SPLL clock operation */
const struct clk_ops pic32_spll_ops = {
.recalc_rate = spll_clk_recalc_rate,
- .round_rate = spll_clk_round_rate,
+ .determine_rate = spll_clk_determine_rate,
.set_rate = spll_clk_set_rate,
};
@@ -780,10 +780,13 @@ static unsigned long sclk_get_rate(struct clk_hw *hw, unsigned long parent_rate)
return parent_rate / div;
}
-static long sclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return calc_best_divided_rate(rate, *parent_rate, SLEW_SYSDIV, 1);
+ req->rate = calc_best_divided_rate(req->rate, req->best_parent_rate,
+ SLEW_SYSDIV, 1);
+
+ return 0;
}
static int sclk_set_rate(struct clk_hw *hw,
@@ -909,7 +912,7 @@ static int sclk_init(struct clk_hw *hw)
const struct clk_ops pic32_sclk_ops = {
.get_parent = sclk_get_parent,
.set_parent = sclk_set_parent,
- .round_rate = sclk_round_rate,
+ .determine_rate = sclk_determine_rate,
.set_rate = sclk_set_rate,
.recalc_rate = sclk_get_rate,
.init = sclk_init,
diff --git a/drivers/clk/microchip/clk-mpfs.c b/drivers/clk/microchip/clk-mpfs.c
index c22632a7439c..ee58304913ef 100644
--- a/drivers/clk/microchip/clk-mpfs.c
+++ b/drivers/clk/microchip/clk-mpfs.c
@@ -4,10 +4,13 @@
*
* Copyright (C) 2020-2022 Microchip Technology Inc. All rights reserved.
*/
+#include <linux/cleanup.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <dt-bindings/clock/microchip,mpfs-clock.h>
#include <soc/microchip/mpfs.h>
@@ -30,6 +33,14 @@
#define MSSPLL_POSTDIV_WIDTH 0x07u
#define MSSPLL_FIXED_DIV 4u
+static const struct regmap_config mpfs_clk_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .max_register = REG_SUBBLK_RESET_CR,
+};
+
/*
* This clock ID is defined here, rather than the binding headers, as it is an
* internal clock only, and therefore has no consumers in other peripheral
@@ -39,6 +50,7 @@
struct mpfs_clock_data {
struct device *dev;
+ struct regmap *regmap;
void __iomem *base;
void __iomem *msspll_base;
struct clk_hw_onecell_data hw_data;
@@ -67,21 +79,39 @@ struct mpfs_msspll_out_hw_clock {
#define to_mpfs_msspll_out_clk(_hw) container_of(_hw, struct mpfs_msspll_out_hw_clock, hw)
+struct mpfs_cfg_clock {
+ struct regmap *map;
+ const struct clk_div_table *table;
+ u8 map_offset;
+ u8 shift;
+ u8 width;
+ u8 flags;
+};
+
struct mpfs_cfg_hw_clock {
- struct clk_divider cfg;
- struct clk_init_data init;
+ struct clk_hw hw;
+ struct mpfs_cfg_clock cfg;
unsigned int id;
- u32 reg_offset;
+};
+
+#define to_mpfs_cfg_clk(_hw) container_of(_hw, struct mpfs_cfg_hw_clock, hw)
+
+struct mpfs_periph_clock {
+ struct regmap *map;
+ u8 map_offset;
+ u8 shift;
};
struct mpfs_periph_hw_clock {
- struct clk_gate periph;
+ struct clk_hw hw;
+ struct mpfs_periph_clock periph;
unsigned int id;
};
+#define to_mpfs_periph_clk(_hw) container_of(_hw, struct mpfs_periph_hw_clock, hw)
+
/*
- * mpfs_clk_lock prevents anything else from writing to the
- * mpfs clk block while a software locked register is being written.
+ * Protects MSSPLL outputs, since there's two to a register
*/
static DEFINE_SPINLOCK(mpfs_clk_lock);
@@ -219,16 +249,61 @@ static int mpfs_clk_register_msspll_outs(struct device *dev,
/*
* "CFG" clocks
*/
+static unsigned long mpfs_cfg_clk_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+ struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+ u32 val;
-#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \
- .id = _id, \
- .cfg.shift = _shift, \
- .cfg.width = _width, \
- .cfg.table = _table, \
- .reg_offset = _offset, \
- .cfg.flags = _flags, \
- .cfg.hw.init = CLK_HW_INIT(_name, _parent, &clk_divider_ops, 0), \
- .cfg.lock = &mpfs_clk_lock, \
+ regmap_read(cfg->map, cfg->map_offset, &val);
+ val >>= cfg->shift;
+ val &= clk_div_mask(cfg->width);
+
+ return divider_recalc_rate(hw, prate, val, cfg->table, cfg->flags, cfg->width);
+}
+
+static int mpfs_cfg_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+ struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+
+ return divider_determine_rate(hw, req, cfg->table, cfg->width, 0);
+}
+
+static int mpfs_cfg_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate)
+{
+ struct mpfs_cfg_hw_clock *cfg_hw = to_mpfs_cfg_clk(hw);
+ struct mpfs_cfg_clock *cfg = &cfg_hw->cfg;
+ int divider_setting;
+ u32 val;
+ u32 mask;
+
+ divider_setting = divider_get_val(rate, prate, cfg->table, cfg->width, 0);
+
+ if (divider_setting < 0)
+ return divider_setting;
+
+ mask = clk_div_mask(cfg->width) << cfg->shift;
+ val = divider_setting << cfg->shift;
+ regmap_update_bits(cfg->map, cfg->map_offset, val, mask);
+
+ return 0;
+}
+
+static const struct clk_ops mpfs_clk_cfg_ops = {
+ .recalc_rate = mpfs_cfg_clk_recalc_rate,
+ .determine_rate = mpfs_cfg_clk_determine_rate,
+ .set_rate = mpfs_cfg_clk_set_rate,
+};
+
+#define CLK_CFG(_id, _name, _parent, _shift, _width, _table, _flags, _offset) { \
+ .id = _id, \
+ .cfg.shift = _shift, \
+ .cfg.width = _width, \
+ .cfg.table = _table, \
+ .cfg.map_offset = _offset, \
+ .cfg.flags = _flags, \
+ .hw.init = CLK_HW_INIT(_name, _parent, &mpfs_clk_cfg_ops, 0), \
}
#define CLK_CPU_OFFSET 0u
@@ -248,10 +323,10 @@ static struct mpfs_cfg_hw_clock mpfs_cfg_clks[] = {
.cfg.shift = 0,
.cfg.width = 12,
.cfg.table = mpfs_div_rtcref_table,
- .reg_offset = REG_RTC_CLOCK_CR,
+ .cfg.map_offset = REG_RTC_CLOCK_CR,
.cfg.flags = CLK_DIVIDER_ONE_BASED,
- .cfg.hw.init =
- CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &clk_divider_ops, 0),
+ .hw.init =
+ CLK_HW_INIT_PARENTS_DATA("clk_rtcref", mpfs_ext_ref, &mpfs_clk_cfg_ops, 0),
}
};
@@ -264,14 +339,14 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *
for (i = 0; i < num_clks; i++) {
struct mpfs_cfg_hw_clock *cfg_hw = &cfg_hws[i];
- cfg_hw->cfg.reg = data->base + cfg_hw->reg_offset;
- ret = devm_clk_hw_register(dev, &cfg_hw->cfg.hw);
+ cfg_hw->cfg.map = data->regmap;
+ ret = devm_clk_hw_register(dev, &cfg_hw->hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
cfg_hw->id);
id = cfg_hw->id;
- data->hw_data.hws[id] = &cfg_hw->cfg.hw;
+ data->hw_data.hws[id] = &cfg_hw->hw;
}
return 0;
@@ -281,15 +356,50 @@ static int mpfs_clk_register_cfgs(struct device *dev, struct mpfs_cfg_hw_clock *
* peripheral clocks - devices connected to axi or ahb buses.
*/
-#define CLK_PERIPH(_id, _name, _parent, _shift, _flags) { \
- .id = _id, \
- .periph.bit_idx = _shift, \
- .periph.hw.init = CLK_HW_INIT_HW(_name, _parent, &clk_gate_ops, \
- _flags), \
- .periph.lock = &mpfs_clk_lock, \
+static int mpfs_periph_clk_enable(struct clk_hw *hw)
+{
+ struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+ struct mpfs_periph_clock *periph = &periph_hw->periph;
+
+ regmap_update_bits(periph->map, periph->map_offset,
+ BIT(periph->shift), BIT(periph->shift));
+
+ return 0;
}
-#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].cfg.hw)
+static void mpfs_periph_clk_disable(struct clk_hw *hw)
+{
+ struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+ struct mpfs_periph_clock *periph = &periph_hw->periph;
+
+ regmap_update_bits(periph->map, periph->map_offset, BIT(periph->shift), 0);
+}
+
+static int mpfs_periph_clk_is_enabled(struct clk_hw *hw)
+{
+ struct mpfs_periph_hw_clock *periph_hw = to_mpfs_periph_clk(hw);
+ struct mpfs_periph_clock *periph = &periph_hw->periph;
+ u32 val;
+
+ regmap_read(periph->map, periph->map_offset, &val);
+
+ return !!(val & BIT(periph->shift));
+}
+
+static const struct clk_ops mpfs_periph_clk_ops = {
+ .enable = mpfs_periph_clk_enable,
+ .disable = mpfs_periph_clk_disable,
+ .is_enabled = mpfs_periph_clk_is_enabled,
+};
+
+#define CLK_PERIPH(_id, _name, _parent, _shift, _flags) { \
+ .id = _id, \
+ .periph.map_offset = REG_SUBBLK_CLOCK_CR, \
+ .periph.shift = _shift, \
+ .hw.init = CLK_HW_INIT_HW(_name, _parent, &mpfs_periph_clk_ops, _flags), \
+}
+
+#define PARENT_CLK(PARENT) (&mpfs_cfg_clks[CLK_##PARENT##_OFFSET].hw)
/*
* Critical clocks:
@@ -346,19 +456,55 @@ static int mpfs_clk_register_periphs(struct device *dev, struct mpfs_periph_hw_c
for (i = 0; i < num_clks; i++) {
struct mpfs_periph_hw_clock *periph_hw = &periph_hws[i];
- periph_hw->periph.reg = data->base + REG_SUBBLK_CLOCK_CR;
- ret = devm_clk_hw_register(dev, &periph_hw->periph.hw);
+ periph_hw->periph.map = data->regmap;
+ ret = devm_clk_hw_register(dev, &periph_hw->hw);
if (ret)
return dev_err_probe(dev, ret, "failed to register clock id: %d\n",
periph_hw->id);
id = periph_hws[i].id;
- data->hw_data.hws[id] = &periph_hw->periph.hw;
+ data->hw_data.hws[id] = &periph_hw->hw;
}
return 0;
}
+static inline int mpfs_clk_syscon_probe(struct mpfs_clock_data *clk_data,
+ struct platform_device *pdev)
+{
+ clk_data->regmap = syscon_regmap_lookup_by_compatible("microchip,mpfs-mss-top-sysreg");
+ if (IS_ERR(clk_data->regmap))
+ return PTR_ERR(clk_data->regmap);
+
+ clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clk_data->msspll_base))
+ return PTR_ERR(clk_data->msspll_base);
+
+ return 0;
+}
+
+static inline int mpfs_clk_old_format_probe(struct mpfs_clock_data *clk_data,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ dev_warn(&pdev->dev, "falling back to old devicetree format");
+
+ clk_data->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(clk_data->base))
+ return PTR_ERR(clk_data->base);
+
+ clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(clk_data->msspll_base))
+ return PTR_ERR(clk_data->msspll_base);
+
+ clk_data->regmap = devm_regmap_init_mmio(dev, clk_data->base, &mpfs_clk_regmap_config);
+ if (IS_ERR(clk_data->regmap))
+ return PTR_ERR(clk_data->regmap);
+
+ return mpfs_reset_controller_register(dev, clk_data->regmap);
+}
+
static int mpfs_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -374,13 +520,12 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
- clk_data->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(clk_data->base))
- return PTR_ERR(clk_data->base);
-
- clk_data->msspll_base = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(clk_data->msspll_base))
- return PTR_ERR(clk_data->msspll_base);
+ ret = mpfs_clk_syscon_probe(clk_data, pdev);
+ if (ret) {
+ ret = mpfs_clk_old_format_probe(clk_data, pdev);
+ if (ret)
+ return ret;
+ }
clk_data->hw_data.num = num_clks;
clk_data->dev = dev;
@@ -406,11 +551,7 @@ static int mpfs_clk_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, &clk_data->hw_data);
- if (ret)
- return ret;
-
- return mpfs_reset_controller_register(dev, clk_data->base + REG_SUBBLK_RESET_CR);
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, &clk_data->hw_data);
}
static const struct of_device_id mpfs_clk_of_match_table[] = {
diff --git a/drivers/clk/mmp/Kconfig b/drivers/clk/mmp/Kconfig
new file mode 100644
index 000000000000..b0d2fea3cda5
--- /dev/null
+++ b/drivers/clk/mmp/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config COMMON_CLK_PXA1908
+ bool "Clock driver for Marvell PXA1908"
+ depends on ARCH_MMP || COMPILE_TEST
+ depends on OF
+ default y if ARCH_MMP && ARM64
+ select AUXILIARY_BUS
+ help
+ This driver supports the Marvell PXA1908 SoC clocks.
diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile
index 062cd87fa8dd..0a94f2f08563 100644
--- a/drivers/clk/mmp/Makefile
+++ b/drivers/clk/mmp/Makefile
@@ -11,4 +11,7 @@ obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o pwr-island.o
obj-$(CONFIG_COMMON_CLK_MMP2_AUDIO) += clk-audio.o
-obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o clk-pxa1908-apbc.o clk-pxa1908-apbcp.o clk-pxa1908-apmu.o clk-pxa1908-mpmu.o
+obj-$(CONFIG_COMMON_CLK_PXA1908) += clk-pxa1908-apbc.o clk-pxa1908-apbcp.o \
+ clk-pxa1908-mpmu.o clk-pxa1908-apmu.o
+
+obj-$(CONFIG_ARCH_MMP) += clk-of-pxa1928.o
diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c
index 88d798d510cd..ed27fc796c94 100644
--- a/drivers/clk/mmp/clk-audio.c
+++ b/drivers/clk/mmp/clk-audio.c
@@ -164,23 +164,23 @@ static unsigned long audio_pll_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int audio_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned int prediv;
unsigned int postdiv;
long rounded = 0;
for (prediv = 0; prediv < ARRAY_SIZE(predivs); prediv++) {
- if (predivs[prediv].parent_rate != *parent_rate)
+ if (predivs[prediv].parent_rate != req->best_parent_rate)
continue;
for (postdiv = 0; postdiv < ARRAY_SIZE(postdivs); postdiv++) {
long freq = predivs[prediv].freq_vco;
freq /= postdivs[postdiv].divisor;
- if (freq == rate)
- return rate;
- if (freq < rate)
+ if (freq == req->rate)
+ return 0;
+ if (freq < req->rate)
continue;
if (rounded && freq > rounded)
continue;
@@ -188,7 +188,9 @@ static long audio_pll_round_rate(struct clk_hw *hw, unsigned long rate,
}
}
- return rounded;
+ req->rate = rounded;
+
+ return 0;
}
static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -228,7 +230,7 @@ static int audio_pll_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops audio_pll_ops = {
.recalc_rate = audio_pll_recalc_rate,
- .round_rate = audio_pll_round_rate,
+ .determine_rate = audio_pll_determine_rate,
.set_rate = audio_pll_set_rate,
};
diff --git a/drivers/clk/mmp/clk-frac.c b/drivers/clk/mmp/clk-frac.c
index 6556f6ada2e8..0b1bb01346f0 100644
--- a/drivers/clk/mmp/clk-frac.c
+++ b/drivers/clk/mmp/clk-frac.c
@@ -21,8 +21,8 @@
#define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw)
-static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct mmp_clk_factor *factor = to_clk_factor(hw);
u64 rate = 0, prev_rate;
@@ -33,19 +33,20 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
d = &factor->ftbl[i];
prev_rate = rate;
- rate = (u64)(*prate) * d->denominator;
+ rate = (u64)(req->best_parent_rate) * d->denominator;
do_div(rate, d->numerator * factor->masks->factor);
- if (rate > drate)
+ if (rate > req->rate)
break;
}
- if ((i == 0) || (i == factor->ftbl_cnt)) {
- return rate;
- } else {
- if ((drate - prev_rate) > (rate - drate))
- return rate;
- else
- return prev_rate;
- }
+
+ if ((i == 0) || (i == factor->ftbl_cnt))
+ req->rate = rate;
+ else if ((req->rate - prev_rate) > (rate - req->rate))
+ req->rate = rate;
+ else
+ req->rate = prev_rate;
+
+ return 0;
}
static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
@@ -160,7 +161,7 @@ static int clk_factor_init(struct clk_hw *hw)
static const struct clk_ops clk_factor_ops = {
.recalc_rate = clk_factor_recalc_rate,
- .round_rate = clk_factor_round_rate,
+ .determine_rate = clk_factor_determine_rate,
.set_rate = clk_factor_set_rate,
.init = clk_factor_init,
};
diff --git a/drivers/clk/mmp/clk-pxa1908-apmu.c b/drivers/clk/mmp/clk-pxa1908-apmu.c
index d3a070687fc5..7594a495a009 100644
--- a/drivers/clk/mmp/clk-pxa1908-apmu.c
+++ b/drivers/clk/mmp/clk-pxa1908-apmu.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/auxiliary_bus.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -85,6 +86,7 @@ static void pxa1908_axi_periph_clk_init(struct pxa1908_clk_unit *pxa_unit)
static int pxa1908_apmu_probe(struct platform_device *pdev)
{
struct pxa1908_clk_unit *pxa_unit;
+ struct auxiliary_device *adev;
pxa_unit = devm_kzalloc(&pdev->dev, sizeof(*pxa_unit), GFP_KERNEL);
if (!pxa_unit)
@@ -94,6 +96,11 @@ static int pxa1908_apmu_probe(struct platform_device *pdev)
if (IS_ERR(pxa_unit->base))
return PTR_ERR(pxa_unit->base);
+ adev = devm_auxiliary_device_create(&pdev->dev, "power", NULL);
+ if (IS_ERR(adev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(adev),
+ "Failed to register power controller\n");
+
mmp_clk_init(pdev->dev.of_node, &pxa_unit->unit, APMU_NR_CLKS);
pxa1908_axi_periph_clk_init(pxa_unit);
diff --git a/drivers/clk/mstar/clk-msc313-cpupll.c b/drivers/clk/mstar/clk-msc313-cpupll.c
index a93e2dba09d3..3e643be02fe2 100644
--- a/drivers/clk/mstar/clk-msc313-cpupll.c
+++ b/drivers/clk/mstar/clk-msc313-cpupll.c
@@ -140,20 +140,22 @@ static unsigned long msc313_cpupll_recalc_rate(struct clk_hw *hw, unsigned long
parent_rate);
}
-static long msc313_cpupll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int msc313_cpupll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u32 reg = msc313_cpupll_regforfrequecy(rate, *parent_rate);
- long rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate);
+ u32 reg = msc313_cpupll_regforfrequecy(req->rate, req->best_parent_rate);
+ long rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate);
/*
* This is my poor attempt at making sure the resulting
* rate doesn't overshoot the requested rate.
*/
- for (; rounded >= rate && reg > 0; reg--)
- rounded = msc313_cpupll_frequencyforreg(reg, *parent_rate);
+ for (; rounded >= req->rate && reg > 0; reg--)
+ rounded = msc313_cpupll_frequencyforreg(reg, req->best_parent_rate);
- return rounded;
+ req->rate = rounded;
+
+ return 0;
}
static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -168,7 +170,7 @@ static int msc313_cpupll_set_rate(struct clk_hw *hw, unsigned long rate, unsigne
static const struct clk_ops msc313_cpupll_ops = {
.recalc_rate = msc313_cpupll_recalc_rate,
- .round_rate = msc313_cpupll_round_rate,
+ .determine_rate = msc313_cpupll_determine_rate,
.set_rate = msc313_cpupll_set_rate,
};
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index 677cc3514849..1e44ace7d951 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -210,19 +210,21 @@ static int ap_cpu_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long ap_cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ap_cpu_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int divider = *parent_rate / rate;
+ int divider = req->best_parent_rate / req->rate;
divider = min(divider, APN806_MAX_DIVIDER);
- return *parent_rate / divider;
+ req->rate = req->best_parent_rate / divider;
+
+ return 0;
}
static const struct clk_ops ap_cpu_clk_ops = {
.recalc_rate = ap_cpu_clk_recalc_rate,
- .round_rate = ap_cpu_clk_round_rate,
+ .determine_rate = ap_cpu_clk_determine_rate,
.set_rate = ap_cpu_clk_set_rate,
};
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 13906e31bef8..bd0bc8e7b1e7 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -454,12 +454,12 @@ static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP_ULL((u64)parent_rate, div);
}
-static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_pm_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
struct regmap *base = pm_cpu->nb_pm_base;
- unsigned int div = *parent_rate / rate;
+ unsigned int div = req->best_parent_rate / req->rate;
unsigned int load_level;
/* only available when DVFS is enabled */
if (!armada_3700_pm_dvfs_is_enabled(base))
@@ -474,13 +474,16 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
val >>= offset;
val &= ARMADA_37XX_NB_TBG_DIV_MASK;
- if (val == div)
+ if (val == div) {
/*
* We found a load level matching the target
* divider, switch to this load level and
* return.
*/
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
+ }
}
/* We didn't find any valid divider */
@@ -600,7 +603,7 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_pm_cpu_ops = {
.get_parent = clk_pm_cpu_get_parent,
- .round_rate = clk_pm_cpu_round_rate,
+ .determine_rate = clk_pm_cpu_determine_rate,
.set_rate = clk_pm_cpu_set_rate,
.recalc_rate = clk_pm_cpu_recalc_rate,
};
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
index 818b175391fa..628032341cbb 100644
--- a/drivers/clk/mvebu/clk-corediv.c
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -135,19 +135,21 @@ static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_corediv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* Valid ratio are 1:4, 1:5, 1:6 and 1:8 */
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
if (div < 4)
div = 4;
else if (div > 6)
div = 8;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -199,7 +201,7 @@ static const struct clk_corediv_soc_desc armada370_corediv_soc = {
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -215,7 +217,7 @@ static const struct clk_corediv_soc_desc armada380_corediv_soc = {
.disable = clk_corediv_disable,
.is_enabled = clk_corediv_is_enabled,
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -228,7 +230,7 @@ static const struct clk_corediv_soc_desc armada375_corediv_soc = {
.ndescs = ARRAY_SIZE(mvebu_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(8),
@@ -240,7 +242,7 @@ static const struct clk_corediv_soc_desc mv98dx3236_corediv_soc = {
.ndescs = ARRAY_SIZE(mv98dx3236_corediv_desc),
.ops = {
.recalc_rate = clk_corediv_recalc_rate,
- .round_rate = clk_corediv_round_rate,
+ .determine_rate = clk_corediv_determine_rate,
.set_rate = clk_corediv_set_rate,
},
.ratio_reload = BIT(10),
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index db2b38c21304..0de7660e73d2 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -56,19 +56,21 @@ static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
return parent_rate / div;
}
-static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_cpu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
/* Valid ratio are 1:1, 1:2 and 1:3 */
u32 div;
- div = *parent_rate / rate;
+ div = req->best_parent_rate / req->rate;
if (div == 0)
div = 1;
else if (div > 3)
div = 3;
- return *parent_rate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
@@ -159,7 +161,7 @@ static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
static const struct clk_ops cpu_ops = {
.recalc_rate = clk_cpu_recalc_rate,
- .round_rate = clk_cpu_round_rate,
+ .determine_rate = clk_cpu_determine_rate,
.set_rate = clk_cpu_set_rate,
};
diff --git a/drivers/clk/mvebu/common.c b/drivers/clk/mvebu/common.c
index 785dbede4835..5adbbd91a6db 100644
--- a/drivers/clk/mvebu/common.c
+++ b/drivers/clk/mvebu/common.c
@@ -215,22 +215,26 @@ static struct clk *clk_gating_get_src(
return ERR_PTR(-ENODEV);
}
-static int mvebu_clk_gating_suspend(void)
+static int mvebu_clk_gating_suspend(void *data)
{
ctrl->saved_reg = readl(ctrl->base);
return 0;
}
-static void mvebu_clk_gating_resume(void)
+static void mvebu_clk_gating_resume(void *data)
{
writel(ctrl->saved_reg, ctrl->base);
}
-static struct syscore_ops clk_gate_syscore_ops = {
+static const struct syscore_ops clk_gate_syscore_ops = {
.suspend = mvebu_clk_gating_suspend,
.resume = mvebu_clk_gating_resume,
};
+static struct syscore clk_gate_syscore = {
+ .ops = &clk_gate_syscore_ops,
+};
+
void __init mvebu_clk_gating_setup(struct device_node *np,
const struct clk_gating_soc_desc *desc)
{
@@ -284,7 +288,7 @@ void __init mvebu_clk_gating_setup(struct device_node *np,
of_clk_add_provider(np, clk_gating_get_src, ctrl);
- register_syscore_ops(&clk_gate_syscore_ops);
+ register_syscore(&clk_gate_syscore);
return;
gates_out:
diff --git a/drivers/clk/mvebu/cp110-system-controller.c b/drivers/clk/mvebu/cp110-system-controller.c
index 03c59bf22106..b47c86906046 100644
--- a/drivers/clk/mvebu/cp110-system-controller.c
+++ b/drivers/clk/mvebu/cp110-system-controller.c
@@ -110,6 +110,25 @@ static const char * const gate_base_names[] = {
[CP110_GATE_EIP197] = "eip197"
};
+static unsigned long gate_flags(const u8 bit_idx)
+{
+ switch (bit_idx) {
+ case CP110_GATE_PCIE_X1_0:
+ case CP110_GATE_PCIE_X1_1:
+ case CP110_GATE_PCIE_X4:
+ /*
+ * If a port had an active link at boot time, stopping
+ * the clock creates a failed state from which controller
+ * driver can not recover.
+ * Prevent stopping this clock till after a driver has taken
+ * ownership.
+ */
+ return CLK_IGNORE_UNUSED;
+ default:
+ return 0;
+ }
+};
+
struct cp110_gate_clk {
struct clk_hw hw;
struct regmap *regmap;
@@ -171,6 +190,7 @@ static struct clk_hw *cp110_register_gate(const char *name,
init.ops = &cp110_gate_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
+ init.flags = gate_flags(bit_idx);
gate->regmap = regmap;
gate->bit_idx = bit_idx;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index 0a90452ee808..47cc49e4cd99 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -108,23 +108,23 @@ static unsigned long dove_recalc_rate(struct clk_hw *hw, unsigned long parent)
return rate;
}
-static long dove_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent)
+static int dove_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dove_clk *dc = to_dove_clk(hw);
- unsigned long parent_rate = *parent;
+ unsigned long parent_rate = req->best_parent_rate;
int divider;
- divider = dove_calc_divider(dc, rate, parent_rate, false);
+ divider = dove_calc_divider(dc, req->rate, parent_rate, false);
if (divider < 0)
return divider;
- rate = DIV_ROUND_CLOSEST(parent_rate, divider);
+ req->rate = DIV_ROUND_CLOSEST(parent_rate, divider);
pr_debug("%s(): %s divider=%u parent=%lu rate=%lu\n",
- __func__, dc->name, divider, parent_rate, rate);
+ __func__, dc->name, divider, parent_rate, req->rate);
- return rate;
+ return 0;
}
static int dove_set_clock(struct clk_hw *hw, unsigned long rate,
@@ -154,7 +154,7 @@ static int dove_set_clock(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops dove_divider_ops = {
.set_rate = dove_set_clock,
- .round_rate = dove_round_rate,
+ .determine_rate = dove_determine_rate,
.recalc_rate = dove_recalc_rate,
};
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
index 0a78ef380646..8afe1a9c1552 100644
--- a/drivers/clk/mxs/clk-div.c
+++ b/drivers/clk/mxs/clk-div.c
@@ -40,12 +40,12 @@ static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
return div->ops->recalc_rate(&div->divider.hw, parent_rate);
}
-static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_div *div = to_clk_div(hw);
- return div->ops->round_rate(&div->divider.hw, rate, prate);
+ return div->ops->determine_rate(&div->divider.hw, req);
}
static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +63,7 @@ static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_div_ops = {
.recalc_rate = clk_div_recalc_rate,
- .round_rate = clk_div_round_rate,
+ .determine_rate = clk_div_determine_rate,
.set_rate = clk_div_set_rate,
};
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
index bba0d840dd76..73f514fb84ff 100644
--- a/drivers/clk/mxs/clk-frac.c
+++ b/drivers/clk/mxs/clk-frac.c
@@ -44,18 +44,18 @@ static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
return tmp_rate >> frac->width;
}
-static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_frac *frac = to_clk_frac(hw);
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u32 div;
u64 tmp, tmp_rate, result;
- if (rate > parent_rate)
+ if (req->rate > parent_rate)
return -EINVAL;
- tmp = rate;
+ tmp = req->rate;
tmp <<= frac->width;
do_div(tmp, parent_rate);
div = tmp;
@@ -67,7 +67,9 @@ static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
result = tmp_rate >> frac->width;
if ((result << frac->width) < tmp_rate)
result += 1;
- return result;
+ req->rate = result;
+
+ return 0;
}
static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -103,7 +105,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
- .round_rate = clk_frac_round_rate,
+ .determine_rate = clk_frac_determine_rate,
.set_rate = clk_frac_set_rate,
};
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
index 2297259da89a..a99ee4cd2ece 100644
--- a/drivers/clk/mxs/clk-ref.c
+++ b/drivers/clk/mxs/clk-ref.c
@@ -57,22 +57,24 @@ static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
return tmp;
}
-static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_ref_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long parent_rate = *prate;
+ unsigned long parent_rate = req->best_parent_rate;
u64 tmp = parent_rate;
u8 frac;
- tmp = tmp * 18 + rate / 2;
- do_div(tmp, rate);
+ tmp = tmp * 18 + req->rate / 2;
+ do_div(tmp, req->rate);
frac = clamp(tmp, 18, 35);
tmp = parent_rate;
tmp *= 18;
do_div(tmp, frac);
- return tmp;
+ req->rate = tmp;
+
+ return 0;
}
static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -104,7 +106,7 @@ static const struct clk_ops clk_ref_ops = {
.enable = clk_ref_enable,
.disable = clk_ref_disable,
.recalc_rate = clk_ref_recalc_rate,
- .round_rate = clk_ref_round_rate,
+ .determine_rate = clk_ref_determine_rate,
.set_rate = clk_ref_set_rate,
};
diff --git a/drivers/clk/nuvoton/clk-ma35d1-divider.c b/drivers/clk/nuvoton/clk-ma35d1-divider.c
index bb8c23d2b895..e39f53d5bf45 100644
--- a/drivers/clk/nuvoton/clk-ma35d1-divider.c
+++ b/drivers/clk/nuvoton/clk-ma35d1-divider.c
@@ -39,12 +39,16 @@ static unsigned long ma35d1_clkdiv_recalc_rate(struct clk_hw *hw, unsigned long
CLK_DIVIDER_ROUND_CLOSEST, dclk->width);
}
-static long ma35d1_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
+static int ma35d1_clkdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ma35d1_adc_clk_div *dclk = to_ma35d1_adc_clk_div(hw);
- return divider_round_rate(hw, rate, prate, dclk->table,
- dclk->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ dclk->table, dclk->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate)
@@ -71,7 +75,7 @@ static int ma35d1_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, unsigne
static const struct clk_ops ma35d1_adc_clkdiv_ops = {
.recalc_rate = ma35d1_clkdiv_recalc_rate,
- .round_rate = ma35d1_clkdiv_round_rate,
+ .determine_rate = ma35d1_clkdiv_determine_rate,
.set_rate = ma35d1_clkdiv_set_rate,
};
diff --git a/drivers/clk/nuvoton/clk-ma35d1-pll.c b/drivers/clk/nuvoton/clk-ma35d1-pll.c
index ff3fb8b87c24..4620acfe47e8 100644
--- a/drivers/clk/nuvoton/clk-ma35d1-pll.c
+++ b/drivers/clk/nuvoton/clk-ma35d1-pll.c
@@ -244,35 +244,43 @@ static unsigned long ma35d1_clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
return 0;
}
-static long ma35d1_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ma35d1_clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ma35d1_clk_pll *pll = to_ma35d1_clk_pll(hw);
u32 reg_ctl[3] = { 0 };
unsigned long pll_freq;
long ret;
- if (*parent_rate < PLL_FREF_MIN_FREQ || *parent_rate > PLL_FREF_MAX_FREQ)
+ if (req->best_parent_rate < PLL_FREF_MIN_FREQ || req->best_parent_rate > PLL_FREF_MAX_FREQ)
return -EINVAL;
- ret = ma35d1_pll_find_closest(pll, rate, *parent_rate, reg_ctl, &pll_freq);
+ ret = ma35d1_pll_find_closest(pll, req->rate, req->best_parent_rate,
+ reg_ctl, &pll_freq);
if (ret < 0)
return ret;
switch (pll->id) {
case CAPLL:
reg_ctl[0] = readl_relaxed(pll->ctl0_base);
- pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], *parent_rate);
- return pll_freq;
+ pll_freq = ma35d1_calc_smic_pll_freq(reg_ctl[0], req->best_parent_rate);
+ req->rate = pll_freq;
+
+ return 0;
case DDRPLL:
case APLL:
case EPLL:
case VPLL:
reg_ctl[0] = readl_relaxed(pll->ctl0_base);
reg_ctl[1] = readl_relaxed(pll->ctl1_base);
- pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, *parent_rate);
- return pll_freq;
+ pll_freq = ma35d1_calc_pll_freq(pll->mode, reg_ctl, req->best_parent_rate);
+ req->rate = pll_freq;
+
+ return 0;
}
+
+ req->rate = 0;
+
return 0;
}
@@ -311,12 +319,12 @@ static const struct clk_ops ma35d1_clk_pll_ops = {
.unprepare = ma35d1_clk_pll_unprepare,
.set_rate = ma35d1_clk_pll_set_rate,
.recalc_rate = ma35d1_clk_pll_recalc_rate,
- .round_rate = ma35d1_clk_pll_round_rate,
+ .determine_rate = ma35d1_clk_pll_determine_rate,
};
static const struct clk_ops ma35d1_clk_fixed_pll_ops = {
.recalc_rate = ma35d1_clk_pll_recalc_rate,
- .round_rate = ma35d1_clk_pll_round_rate,
+ .determine_rate = ma35d1_clk_pll_determine_rate,
};
struct clk_hw *ma35d1_reg_clk_pll(struct device *dev, u32 id, u8 u8mode, const char *name,
diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c
index 81efa885069b..b9e204d63a97 100644
--- a/drivers/clk/nxp/clk-lpc18xx-cgu.c
+++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c
@@ -370,23 +370,25 @@ static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lpc18xx_pll0_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long m;
- if (*prate < rate) {
+ if (req->best_parent_rate < req->rate) {
pr_warn("%s: pll dividers not supported\n", __func__);
return -EINVAL;
}
- m = DIV_ROUND_UP_ULL(*prate, rate * 2);
- if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
- pr_warn("%s: unable to support rate %lu\n", __func__, rate);
+ m = DIV_ROUND_UP_ULL(req->best_parent_rate, req->rate * 2);
+ if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
+ pr_warn("%s: unable to support rate %lu\n", __func__, req->rate);
return -EINVAL;
}
- return 2 * *prate * m;
+ req->rate = 2 * req->best_parent_rate * m;
+
+ return 0;
}
static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -402,7 +404,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
}
m = DIV_ROUND_UP_ULL(parent_rate, rate * 2);
- if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) {
+ if (m == 0 || m > LPC18XX_PLL0_MSEL_MAX) {
pr_warn("%s: unable to support rate %lu\n", __func__, rate);
return -EINVAL;
}
@@ -443,7 +445,7 @@ static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc18xx_pll0_ops = {
.recalc_rate = lpc18xx_pll0_recalc_rate,
- .round_rate = lpc18xx_pll0_round_rate,
+ .determine_rate = lpc18xx_pll0_determine_rate,
.set_rate = lpc18xx_pll0_set_rate,
};
diff --git a/drivers/clk/nxp/clk-lpc32xx.c b/drivers/clk/nxp/clk-lpc32xx.c
index e00f270bc6aa..23f980cf6a2b 100644
--- a/drivers/clk/nxp/clk-lpc32xx.c
+++ b/drivers/clk/nxp/clk-lpc32xx.c
@@ -68,7 +68,6 @@ static const struct regmap_config lpc32xx_scb_regmap_config = {
.reg_stride = 4,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
.max_register = 0x114,
- .fast_io = true,
};
static struct regmap *clk_regmap;
@@ -579,17 +578,17 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return regmap_update_bits(clk_regmap, clk->reg, 0x1FFFF, val);
}
-static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_hclk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
- u64 m_i, o = rate, i = *parent_rate, d = (u64)rate << 6;
+ u64 m_i, o = req->rate, i = req->best_parent_rate, d = (u64)req->rate << 6;
u64 m = 0, n = 0, p = 0;
int p_i, n_i;
- pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
+ pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate, req->rate);
- if (rate > 266500000)
+ if (req->rate > 266500000)
return -EINVAL;
/* Have to check all 20 possibilities to find the minimal M */
@@ -614,9 +613,9 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
}
}
- if (d == (u64)rate << 6) {
+ if (d == (u64)req->rate << 6) {
pr_err("%s: %lu: no valid PLL parameters are found\n",
- clk_hw_get_name(hw), rate);
+ clk_hw_get_name(hw), req->rate);
return -EINVAL;
}
@@ -634,22 +633,25 @@ static long clk_hclk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
if (!d)
pr_debug("%s: %lu: found exact match: %llu/%llu/%llu\n",
- clk_hw_get_name(hw), rate, m, n, p);
+ clk_hw_get_name(hw), req->rate, m, n, p);
else
pr_debug("%s: %lu: found closest: %llu/%llu/%llu - %llu\n",
- clk_hw_get_name(hw), rate, m, n, p, o);
+ clk_hw_get_name(hw), req->rate, m, n, p, o);
- return o;
+ req->rate = o;
+
+ return 0;
}
-static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_usb_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_pll_clk *clk = to_lpc32xx_pll_clk(hw);
struct clk_hw *usb_div_hw, *osc_hw;
u64 d_i, n_i, m, o;
- pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), *parent_rate, rate);
+ pr_debug("%s: %lu/%lu\n", clk_hw_get_name(hw), req->best_parent_rate,
+ req->rate);
/*
* The only supported USB clock is 48MHz, with PLL internal constraints
@@ -657,7 +659,7 @@ static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
* and post-divider must be 4, this slightly simplifies calculation of
* USB divider, USB PLL N and M parameters.
*/
- if (rate != 48000000)
+ if (req->rate != 48000000)
return -EINVAL;
/* USB divider clock */
@@ -685,30 +687,30 @@ static long clk_usb_pll_round_rate(struct clk_hw *hw, unsigned long rate,
clk->m_div = m;
clk->p_div = 2;
clk->mode = PLL_NON_INTEGER;
- *parent_rate = div64_u64(o, d_i);
+ req->best_parent_rate = div64_u64(o, d_i);
- return rate;
+ return 0;
}
}
return -EINVAL;
}
-#define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _rr) \
+#define LPC32XX_DEFINE_PLL_OPS(_name, _rc, _sr, _dr) \
static const struct clk_ops clk_ ##_name ## _ops = { \
.enable = clk_pll_enable, \
.disable = clk_pll_disable, \
.is_enabled = clk_pll_is_enabled, \
.recalc_rate = _rc, \
.set_rate = _sr, \
- .round_rate = _rr, \
+ .determine_rate = _dr, \
}
LPC32XX_DEFINE_PLL_OPS(pll_397x, clk_pll_397x_recalc_rate, NULL, NULL);
LPC32XX_DEFINE_PLL_OPS(hclk_pll, clk_pll_recalc_rate,
- clk_pll_set_rate, clk_hclk_pll_round_rate);
+ clk_pll_set_rate, clk_hclk_pll_determine_rate);
LPC32XX_DEFINE_PLL_OPS(usb_pll, clk_pll_recalc_rate,
- clk_pll_set_rate, clk_usb_pll_round_rate);
+ clk_pll_set_rate, clk_usb_pll_determine_rate);
static int clk_ddram_is_enabled(struct clk_hw *hw)
{
@@ -955,8 +957,8 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lpc32xx_clk_div *divider = to_lpc32xx_div(hw);
unsigned int bestdiv;
@@ -968,11 +970,15 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
bestdiv &= div_mask(divider->width);
bestdiv = _get_div(divider->table, bestdiv, divider->flags,
divider->width);
- return DIV_ROUND_UP(*prate, bestdiv);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, bestdiv);
+
+ return 0;
}
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ divider->table, divider->width, divider->flags);
+
+ return 0;
}
static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -991,7 +997,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops lpc32xx_clk_divider_ops = {
.recalc_rate = clk_divider_recalc_rate,
- .round_rate = clk_divider_round_rate,
+ .determine_rate = clk_divider_determine_rate,
.set_rate = clk_divider_set_rate,
};
diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c
index 025b9df76cdb..d05337915e2b 100644
--- a/drivers/clk/pistachio/clk-pll.c
+++ b/drivers/clk/pistachio/clk-pll.c
@@ -139,19 +139,23 @@ pll_get_params(struct pistachio_clk_pll *pll, unsigned long fref,
return NULL;
}
-static long pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int pll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct pistachio_clk_pll *pll = to_pistachio_pll(hw);
unsigned int i;
for (i = 0; i < pll->nr_rates; i++) {
- if (i > 0 && pll->rates[i].fref == *parent_rate &&
- pll->rates[i].fout <= rate)
- return pll->rates[i - 1].fout;
+ if (i > 0 && pll->rates[i].fref == req->best_parent_rate &&
+ pll->rates[i].fout <= req->rate) {
+ req->rate = pll->rates[i - 1].fout;
+
+ return 0;
+ }
}
- return pll->rates[0].fout;
+ req->rate = pll->rates[0].fout;
+
+ return 0;
}
static int pll_gf40lp_frac_enable(struct clk_hw *hw)
@@ -300,7 +304,7 @@ static const struct clk_ops pll_gf40lp_frac_ops = {
.disable = pll_gf40lp_frac_disable,
.is_enabled = pll_gf40lp_frac_is_enabled,
.recalc_rate = pll_gf40lp_frac_recalc_rate,
- .round_rate = pll_round_rate,
+ .determine_rate = pll_determine_rate,
.set_rate = pll_gf40lp_frac_set_rate,
};
@@ -432,7 +436,7 @@ static const struct clk_ops pll_gf40lp_laint_ops = {
.disable = pll_gf40lp_laint_disable,
.is_enabled = pll_gf40lp_laint_is_enabled,
.recalc_rate = pll_gf40lp_laint_recalc_rate,
- .round_rate = pll_round_rate,
+ .determine_rate = pll_determine_rate,
.set_rate = pll_gf40lp_laint_set_rate,
};
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 6cb6cd3e1778..a284ba040b78 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -19,6 +19,33 @@ menuconfig COMMON_CLK_QCOM
if COMMON_CLK_QCOM
+config CLK_GLYMUR_DISPCC
+ tristate "GLYMUR Display Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select CLK_GLYMUR_GCC
+ help
+ Support for the display clock controllers on Qualcomm
+ Technologies, Inc. GLYMUR devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
+
+config CLK_GLYMUR_GCC
+ tristate "GLYMUR Global Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on GLYMUR devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, etc.
+
+config CLK_GLYMUR_TCSRCC
+ tristate "GLYMUR TCSR Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select QCOM_GDSC
+ help
+ Support for the TCSR clock controller on GLYMUR devices.
+ Say Y if you want to use peripheral devices such as USB/PCIe/EDP.
+
config CLK_X1E80100_CAMCC
tristate "X1E80100 Camera Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -187,8 +214,17 @@ config IPQ_APSS_PLL
Say Y if you want to support CPU frequency scaling on ipq based
devices.
+config IPQ_APSS_5424
+ tristate "IPQ5424 APSS Clock Controller"
+ select IPQ_APSS_PLL
+ default y if IPQ_GCC_5424
+ help
+ Support for APSS Clock controller on Qualcomm IPQ5424 platform.
+ Say Y if you want to support CPU frequency scaling on ipq based
+ devices.
+
config IPQ_APSS_6018
- tristate "IPQ APSS Clock Controller"
+ tristate "IPQ6018 APSS Clock Controller"
select IPQ_APSS_PLL
depends on QCOM_APCS_IPC || COMPILE_TEST
depends on QCOM_SMEM
@@ -281,6 +317,17 @@ config IPQ_GCC_9574
i2c, USB, SD/eMMC, etc. Select this for the root clock
of ipq9574.
+config IPQ_NSSCC_5424
+ tristate "IPQ5424 NSS Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ depends on IPQ_GCC_5424
+ help
+ Support for NSS clock controller on ipq5424 devices.
+ NSSCC receives the clock sources from GCC, CMN PLL and UNIPHY (PCS).
+ It in turn supplies the clocks and resets to the networking hardware.
+ Say Y or M if you want to enable networking function on the
+ IPQ5424 devices.
+
config IPQ_NSSCC_9574
tristate "IPQ9574 NSS Clock Controller"
depends on ARM64 || COMPILE_TEST
@@ -323,12 +370,12 @@ config MSM_GCC_8916
SD/eMMC, display, graphics, camera etc.
config MSM_GCC_8917
- tristate "MSM8917/QM215 Global Clock Controller"
+ tristate "MSM89(17/37)/QM215 Global Clock Controller"
depends on ARM64 || COMPILE_TEST
select QCOM_GDSC
help
- Support for the global clock controller on msm8917 and qm215
- devices.
+ Support for the global clock controller on msm8917, msm8937
+ and qm215 devices.
Say Y if you want to use devices such as UART, SPI i2c, USB,
SD/eMMC, display, graphics, camera etc.
@@ -495,7 +542,8 @@ config QCM_DISPCC_2290
config QCS_DISPCC_615
tristate "QCS615 Display Clock Controller"
- select QCM_GCC_615
+ depends on ARM64 || COMPILE_TEST
+ select QCS_GCC_615
help
Support for the display clock controller on Qualcomm Technologies, Inc
QCS615 devices.
@@ -550,6 +598,7 @@ config QCS_GCC_615
config QCS_GPUCC_615
tristate "QCS615 Graphics clock controller"
+ depends on ARM64 || COMPILE_TEST
select QCS_GCC_615
help
Support for the graphics clock controller on QCS615 devices.
@@ -558,6 +607,7 @@ config QCS_GPUCC_615
config QCS_VIDEOCC_615
tristate "QCS615 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select QCS_GCC_615
help
Support for the video clock controller on QCS615 devices.
@@ -1412,6 +1462,7 @@ config SA_VIDEOCC_8775P
config SM_VIDEOCC_6350
tristate "SM6350 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
select SM_GCC_6350
select QCOM_GDSC
help
@@ -1480,6 +1531,17 @@ config SM_VIDEOCC_8550
Say Y if you want to support video devices and functionality such as
video encode/decode.
+config SM_VIDEOCC_8750
+ tristate "SM8750 Video Clock Controller"
+ depends on ARM64 || COMPILE_TEST
+ select SM_GCC_8750
+ select QCOM_GDSC
+ help
+ Support for the video clock controller on Qualcomm Technologies, Inc.
+ SM8750 devices.
+ Say Y if you want to support video devices and functionality such as
+ video encode/decode.
+
config SPMI_PMIC_CLKDIV
tristate "SPMI PMIC clkdiv Support"
depends on SPMI || COMPILE_TEST
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index ddb7e06fae40..0ac8a9055a43 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -21,6 +21,9 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
+obj-$(CONFIG_CLK_GLYMUR_DISPCC) += dispcc-glymur.o
+obj-$(CONFIG_CLK_GLYMUR_GCC) += gcc-glymur.o
+obj-$(CONFIG_CLK_GLYMUR_TCSRCC) += tcsrcc-glymur.o
obj-$(CONFIG_CLK_X1E80100_CAMCC) += camcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_DISPCC) += dispcc-x1e80100.o
obj-$(CONFIG_CLK_X1E80100_GCC) += gcc-x1e80100.o
@@ -29,6 +32,7 @@ obj-$(CONFIG_CLK_X1E80100_TCSRCC) += tcsrcc-x1e80100.o
obj-$(CONFIG_CLK_X1P42100_GPUCC) += gpucc-x1p42100.o
obj-$(CONFIG_CLK_QCM2290_GPUCC) += gpucc-qcm2290.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
+obj-$(CONFIG_IPQ_APSS_5424) += apss-ipq5424.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
obj-$(CONFIG_IPQ_CMN_PLL) += ipq-cmn-pll.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
@@ -39,6 +43,7 @@ obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o
obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
+obj-$(CONFIG_IPQ_NSSCC_5424) += nsscc-ipq5424.o
obj-$(CONFIG_IPQ_NSSCC_9574) += nsscc-ipq9574.o
obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
@@ -180,6 +185,7 @@ obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8350) += videocc-sm8350.o
obj-$(CONFIG_SM_VIDEOCC_8450) += videocc-sm8450.o
obj-$(CONFIG_SM_VIDEOCC_8550) += videocc-sm8550.o
+obj-$(CONFIG_SM_VIDEOCC_8750) += videocc-sm8750.o
obj-$(CONFIG_SM_VIDEOCC_MILOS) += videocc-milos.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index f43d455ab4b8..724a642311e5 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -33,7 +33,6 @@ static const struct regmap_config a53pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
- .fast_io = true,
};
static struct pll_freq_tbl *qcom_a53pll_get_freq_tbl(struct device *dev)
diff --git a/drivers/clk/qcom/a7-pll.c b/drivers/clk/qcom/a7-pll.c
index c4a53e5db229..04b5492a3c21 100644
--- a/drivers/clk/qcom/a7-pll.c
+++ b/drivers/clk/qcom/a7-pll.c
@@ -27,7 +27,7 @@ static struct clk_alpha_pll a7pll = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "a7pll",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
@@ -50,7 +50,6 @@ static const struct regmap_config a7pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1000,
- .fast_io = true,
};
static int qcom_a7pll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
index d6c1aea7e9e1..3a8987fe7008 100644
--- a/drivers/clk/qcom/apss-ipq-pll.c
+++ b/drivers/clk/qcom/apss-ipq-pll.c
@@ -169,7 +169,6 @@ static const struct regmap_config ipq_pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x40,
- .fast_io = true,
};
static int apss_ipq_pll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/apss-ipq5424.c b/drivers/clk/qcom/apss-ipq5424.c
new file mode 100644
index 000000000000..2d622c1fe5d0
--- /dev/null
+++ b/drivers/clk/qcom/apss-ipq5424.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/arm/qcom,ids.h>
+#include <dt-bindings/clock/qcom,apss-ipq.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+
+enum {
+ DT_XO,
+ DT_CLK_REF,
+};
+
+enum {
+ P_XO,
+ P_GPLL0,
+ P_APSS_PLL_EARLY,
+ P_L3_PLL,
+};
+
+static const struct alpha_pll_config apss_pll_config = {
+ .l = 0x3b,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x04000000,
+ .user_ctl_val = 0xf,
+};
+
+static struct clk_alpha_pll ipq5424_apss_pll = {
+ .offset = 0x0,
+ .config = &apss_pll_config,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_2290],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "apss_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data parents_apss_silver_clk_src[] = {
+ { .index = DT_XO },
+ { .index = DT_CLK_REF },
+ { .hw = &ipq5424_apss_pll.clkr.hw },
+};
+
+static const struct parent_map parents_apss_silver_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 4 },
+ { P_APSS_PLL_EARLY, 5 },
+};
+
+static const struct freq_tbl ftbl_apss_clk_src[] = {
+ F(816000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ F(1416000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ F(1800000000, P_APSS_PLL_EARLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_silver_clk_src = {
+ .cmd_rcgr = 0x0080,
+ .freq_tbl = ftbl_apss_clk_src,
+ .hid_width = 5,
+ .parent_map = parents_apss_silver_clk_src_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "apss_silver_clk_src",
+ .parent_data = parents_apss_silver_clk_src,
+ .num_parents = ARRAY_SIZE(parents_apss_silver_clk_src),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch apss_silver_core_clk = {
+ .halt_reg = 0x008c,
+ .clkr = {
+ .enable_reg = 0x008c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "apss_silver_core_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &apss_silver_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct alpha_pll_config l3_pll_config = {
+ .l = 0x29,
+ .config_ctl_val = 0x08200920,
+ .config_ctl_hi_val = 0x05008001,
+ .config_ctl_hi1_val = 0x04000000,
+ .user_ctl_val = 0xf,
+};
+
+static struct clk_alpha_pll ipq5424_l3_pll = {
+ .offset = 0x10000,
+ .config = &l3_pll_config,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_HUAYRA_2290],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "l3_pll",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_XO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_huayra_ops,
+ },
+ },
+};
+
+static const struct clk_parent_data parents_l3_clk_src[] = {
+ { .index = DT_XO },
+ { .index = DT_CLK_REF },
+ { .hw = &ipq5424_l3_pll.clkr.hw },
+};
+
+static const struct parent_map parents_l3_clk_src_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 4 },
+ { P_L3_PLL, 5 },
+};
+
+static const struct freq_tbl ftbl_l3_clk_src[] = {
+ F(816000000, P_L3_PLL, 1, 0, 0),
+ F(984000000, P_L3_PLL, 1, 0, 0),
+ F(1272000000, P_L3_PLL, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 l3_clk_src = {
+ .cmd_rcgr = 0x10080,
+ .freq_tbl = ftbl_l3_clk_src,
+ .hid_width = 5,
+ .parent_map = parents_l3_clk_src_map,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "l3_clk_src",
+ .parent_data = parents_l3_clk_src,
+ .num_parents = ARRAY_SIZE(parents_l3_clk_src),
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_branch l3_core_clk = {
+ .halt_reg = 0x1008c,
+ .clkr = {
+ .enable_reg = 0x1008c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "l3_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &l3_clk_src.clkr.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct regmap_config apss_ipq5424_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true,
+};
+
+static struct clk_regmap *apss_ipq5424_clks[] = {
+ [APSS_PLL_EARLY] = &ipq5424_apss_pll.clkr,
+ [APSS_SILVER_CLK_SRC] = &apss_silver_clk_src.clkr,
+ [APSS_SILVER_CORE_CLK] = &apss_silver_core_clk.clkr,
+ [L3_PLL] = &ipq5424_l3_pll.clkr,
+ [L3_CLK_SRC] = &l3_clk_src.clkr,
+ [L3_CORE_CLK] = &l3_core_clk.clkr,
+};
+
+static struct clk_alpha_pll *ipa5424_apss_plls[] = {
+ &ipq5424_l3_pll,
+ &ipq5424_apss_pll,
+};
+
+static struct qcom_cc_driver_data ipa5424_apss_driver_data = {
+ .alpha_plls = ipa5424_apss_plls,
+ .num_alpha_plls = ARRAY_SIZE(ipa5424_apss_plls),
+};
+
+#define IPQ_APPS_PLL_ID (5424 * 3) /* some unique value */
+
+static const struct qcom_icc_hws_data icc_ipq5424_cpu_l3[] = {
+ { MASTER_CPU, SLAVE_L3, L3_CORE_CLK },
+};
+
+static const struct qcom_cc_desc apss_ipq5424_desc = {
+ .config = &apss_ipq5424_regmap_config,
+ .clks = apss_ipq5424_clks,
+ .num_clks = ARRAY_SIZE(apss_ipq5424_clks),
+ .icc_hws = icc_ipq5424_cpu_l3,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_cpu_l3),
+ .icc_first_node_id = IPQ_APPS_PLL_ID,
+ .driver_data = &ipa5424_apss_driver_data,
+};
+
+static int apss_ipq5424_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &apss_ipq5424_desc);
+}
+
+static const struct of_device_id apss_ipq5424_match_table[] = {
+ { .compatible = "qcom,ipq5424-apss-clk" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apss_ipq5424_match_table);
+
+static struct platform_driver apss_ipq5424_driver = {
+ .probe = apss_ipq5424_probe,
+ .driver = {
+ .name = "apss-ipq5424-clk",
+ .of_match_table = apss_ipq5424_match_table,
+ .sync_state = icc_sync_state,
+ },
+};
+
+module_platform_driver(apss_ipq5424_driver);
+
+MODULE_DESCRIPTION("QCOM APSS IPQ5424 CLK Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/camcc-milos.c b/drivers/clk/qcom/camcc-milos.c
index 75bd939f7dd1..0077c9c9249f 100644
--- a/drivers/clk/qcom/camcc-milos.c
+++ b/drivers/clk/qcom/camcc-milos.c
@@ -2124,7 +2124,7 @@ static struct qcom_cc_driver_data cam_cc_milos_driver_data = {
.num_clk_cbcrs = ARRAY_SIZE(cam_cc_milos_critical_cbcrs),
};
-static struct qcom_cc_desc cam_cc_milos_desc = {
+static const struct qcom_cc_desc cam_cc_milos_desc = {
.config = &cam_cc_milos_regmap_config,
.clks = cam_cc_milos_clocks,
.num_clks = ARRAY_SIZE(cam_cc_milos_clocks),
diff --git a/drivers/clk/qcom/camcc-sdm845.c b/drivers/clk/qcom/camcc-sdm845.c
index cf60e8dd292a..fb313da7165b 100644
--- a/drivers/clk/qcom/camcc-sdm845.c
+++ b/drivers/clk/qcom/camcc-sdm845.c
@@ -1543,6 +1543,7 @@ static struct gdsc bps_gdsc = {
.name = "bps_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1552,6 +1553,7 @@ static struct gdsc ipe_0_gdsc = {
.name = "ipe_0_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1561,6 +1563,7 @@ static struct gdsc ipe_1_gdsc = {
.name = "ipe_1_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/camcc-sm6350.c b/drivers/clk/qcom/camcc-sm6350.c
index 8aac97d29ce3..7df12c1311c6 100644
--- a/drivers/clk/qcom/camcc-sm6350.c
+++ b/drivers/clk/qcom/camcc-sm6350.c
@@ -145,15 +145,11 @@ static struct clk_alpha_pll_postdiv camcc_pll1_out_even = {
static const struct alpha_pll_config camcc_pll2_config = {
.l = 0x64,
.alpha = 0x0,
- .post_div_val = 0x3 << 8,
- .post_div_mask = 0x3 << 8,
- .aux_output_mask = BIT(1),
- .main_output_mask = BIT(0),
- .early_output_mask = BIT(3),
.config_ctl_val = 0x20000800,
.config_ctl_hi_val = 0x400003d2,
.test_ctl_val = 0x04000400,
.test_ctl_hi_val = 0x00004000,
+ .user_ctl_val = 0x0000030b,
};
static struct clk_alpha_pll camcc_pll2 = {
@@ -1693,6 +1689,8 @@ static struct clk_branch camcc_sys_tmr_clk = {
},
};
+static struct gdsc titan_top_gdsc;
+
static struct gdsc bps_gdsc = {
.gdscr = 0x6004,
.en_rest_wait_val = 0x2,
@@ -1702,6 +1700,7 @@ static struct gdsc bps_gdsc = {
.name = "bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
.flags = VOTABLE,
};
@@ -1714,6 +1713,7 @@ static struct gdsc ipe_0_gdsc = {
.name = "ipe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
.flags = VOTABLE,
};
@@ -1726,6 +1726,7 @@ static struct gdsc ife_0_gdsc = {
.name = "ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
};
static struct gdsc ife_1_gdsc = {
@@ -1737,6 +1738,7 @@ static struct gdsc ife_1_gdsc = {
.name = "ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
};
static struct gdsc ife_2_gdsc = {
@@ -1748,6 +1750,7 @@ static struct gdsc ife_2_gdsc = {
.name = "ife_2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &titan_top_gdsc.pd,
};
static struct gdsc titan_top_gdsc = {
diff --git a/drivers/clk/qcom/camcc-sm7150.c b/drivers/clk/qcom/camcc-sm7150.c
index 4a3baf5d8e85..ee963ed341c3 100644
--- a/drivers/clk/qcom/camcc-sm7150.c
+++ b/drivers/clk/qcom/camcc-sm7150.c
@@ -139,13 +139,9 @@ static struct clk_fixed_factor camcc_pll1_out_even = {
/* 1920MHz configuration */
static const struct alpha_pll_config camcc_pll2_config = {
.l = 0x64,
- .post_div_val = 0x3 << 8,
- .post_div_mask = 0x3 << 8,
- .early_output_mask = BIT(3),
- .aux_output_mask = BIT(1),
- .main_output_mask = BIT(0),
.config_ctl_hi_val = 0x400003d6,
.config_ctl_val = 0x20000954,
+ .user_ctl_val = 0x0000030b,
};
static struct clk_alpha_pll camcc_pll2 = {
@@ -1846,6 +1842,7 @@ static struct gdsc camcc_bps_gdsc = {
.name = "camcc_bps_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &camcc_titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1875,6 +1872,7 @@ static struct gdsc camcc_ipe_0_gdsc = {
.name = "camcc_ipe_0_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &camcc_titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1884,6 +1882,7 @@ static struct gdsc camcc_ipe_1_gdsc = {
.name = "camcc_ipe_1_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &camcc_titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -1896,7 +1895,7 @@ static struct gdsc camcc_titan_top_gdsc = {
.pwrsts = PWRSTS_OFF_ON,
};
-struct clk_hw *camcc_sm7150_hws[] = {
+static struct clk_hw *camcc_sm7150_hws[] = {
[CAMCC_PLL0_OUT_EVEN] = &camcc_pll0_out_even.hw,
[CAMCC_PLL0_OUT_ODD] = &camcc_pll0_out_odd.hw,
[CAMCC_PLL1_OUT_EVEN] = &camcc_pll1_out_even.hw,
diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c
index 6da89c49ba3d..c95a00628630 100644
--- a/drivers/clk/qcom/camcc-sm8250.c
+++ b/drivers/clk/qcom/camcc-sm8250.c
@@ -2213,6 +2213,7 @@ static struct gdsc bps_gdsc = {
.name = "bps_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2222,6 +2223,7 @@ static struct gdsc ipe_0_gdsc = {
.name = "ipe_0_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2231,6 +2233,7 @@ static struct gdsc sbi_gdsc = {
.name = "sbi_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/camcc-sm8450.c b/drivers/clk/qcom/camcc-sm8450.c
index 4dd8be8cc988..ef8cf54d0eed 100644
--- a/drivers/clk/qcom/camcc-sm8450.c
+++ b/drivers/clk/qcom/camcc-sm8450.c
@@ -2935,6 +2935,7 @@ static struct gdsc bps_gdsc = {
.name = "bps_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2944,6 +2945,7 @@ static struct gdsc ipe_0_gdsc = {
.name = "ipe_0_gdsc",
},
.flags = HW_CTRL | POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
@@ -2953,6 +2955,7 @@ static struct gdsc sbi_gdsc = {
.name = "sbi_gdsc",
},
.flags = POLL_CFG_GDSCR,
+ .parent = &titan_top_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
};
diff --git a/drivers/clk/qcom/camcc-sm8550.c b/drivers/clk/qcom/camcc-sm8550.c
index 63aed9e4c362..b8ece8a57a8a 100644
--- a/drivers/clk/qcom/camcc-sm8550.c
+++ b/drivers/clk/qcom/camcc-sm8550.c
@@ -3204,6 +3204,8 @@ static struct clk_branch cam_cc_sfe_1_fast_ahb_clk = {
},
};
+static struct gdsc cam_cc_titan_top_gdsc;
+
static struct gdsc cam_cc_bps_gdsc = {
.gdscr = 0x10004,
.en_rest_wait_val = 0x2,
@@ -3213,6 +3215,7 @@ static struct gdsc cam_cc_bps_gdsc = {
.name = "cam_cc_bps_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3225,6 +3228,7 @@ static struct gdsc cam_cc_ife_0_gdsc = {
.name = "cam_cc_ife_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3237,6 +3241,7 @@ static struct gdsc cam_cc_ife_1_gdsc = {
.name = "cam_cc_ife_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3249,6 +3254,7 @@ static struct gdsc cam_cc_ife_2_gdsc = {
.name = "cam_cc_ife_2_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3261,6 +3267,7 @@ static struct gdsc cam_cc_ipe_0_gdsc = {
.name = "cam_cc_ipe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3273,6 +3280,7 @@ static struct gdsc cam_cc_sbi_gdsc = {
.name = "cam_cc_sbi_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3285,6 +3293,7 @@ static struct gdsc cam_cc_sfe_0_gdsc = {
.name = "cam_cc_sfe_0_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
@@ -3297,6 +3306,7 @@ static struct gdsc cam_cc_sfe_1_gdsc = {
.name = "cam_cc_sfe_1_gdsc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .parent = &cam_cc_titan_top_gdsc.pd,
.flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
};
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index fec6eb376e27..6aeba40358c1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -66,7 +66,7 @@
#define GET_PLL_TYPE(pll) (((pll)->regs - clk_alpha_pll_regs[0]) / PLL_OFF_MAX_REGS)
const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -77,7 +77,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x10,
@@ -87,7 +87,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_APSS] = {
[PLL_OFF_L_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL] = 0x10,
[PLL_OFF_USER_CTL] = 0x18,
@@ -97,7 +97,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
},
- [CLK_ALPHA_PLL_TYPE_HUAYRA_2290] = {
+ [CLK_ALPHA_PLL_TYPE_HUAYRA_2290] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -110,7 +110,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x28,
[PLL_OFF_STATUS] = 0x38,
},
- [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -119,7 +119,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x1c,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_USER_CTL] = 0x0c,
[PLL_OFF_USER_CTL_U] = 0x10,
@@ -147,7 +147,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x38,
[PLL_OFF_ALPHA_VAL] = 0x40,
},
- [CLK_ALPHA_PLL_TYPE_AGERA] = {
+ [CLK_ALPHA_PLL_TYPE_AGERA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -157,7 +157,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL_U] = 0x1c,
[PLL_OFF_STATUS] = 0x2c,
},
- [CLK_ALPHA_PLL_TYPE_ZONDA] = {
+ [CLK_ALPHA_PLL_TYPE_ZONDA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -243,7 +243,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x28,
[PLL_OFF_TEST_CTL_U] = 0x2c,
},
- [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT_EVO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -254,7 +254,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_CONFIG_CTL] = 0x20,
[PLL_OFF_STATUS] = 0x24,
},
- [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
+ [CLK_ALPHA_PLL_TYPE_BRAMMO_EVO] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_ALPHA_VAL_U] = 0x0c,
@@ -275,7 +275,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_TEST_CTL] = 0x30,
[PLL_OFF_TEST_CTL_U] = 0x34,
},
- [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
+ [CLK_ALPHA_PLL_TYPE_STROMER_PLUS] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_USER_CTL] = 0x08,
[PLL_OFF_USER_CTL_U] = 0x0c,
@@ -286,7 +286,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_ALPHA_VAL] = 0x24,
[PLL_OFF_ALPHA_VAL_U] = 0x28,
},
- [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
+ [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_USER_CTL] = 0x0c,
@@ -301,7 +301,7 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x30,
[PLL_OFF_STATUS] = 0x3c,
},
- [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
+ [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_ALPHA_VAL] = 0x08,
[PLL_OFF_TEST_CTL] = 0x0c,
@@ -849,22 +849,25 @@ static int clk_alpha_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
clk_alpha_pll_hwfsm_is_enabled);
}
-static long clk_alpha_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha_width = pll_alpha_width(pll);
u64 a;
unsigned long min_freq, max_freq;
- rate = alpha_pll_round_rate(rate, *prate, &l, &a, alpha_width);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate, &l,
+ &a, alpha_width);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, req->rate))
+ return 0;
min_freq = pll->vco_table[0].min_freq;
max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
- return clamp(rate, min_freq, max_freq);
+ req->rate = clamp(req->rate, min_freq, max_freq);
+
+ return 0;
}
void clk_huayra_2290_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
@@ -1048,12 +1051,15 @@ static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long alpha_pll_huayra_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int alpha_pll_huayra_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 l, a;
- return alpha_huayra_pll_round_rate(rate, *prate, &l, &a);
+ req->rate = alpha_huayra_pll_round_rate(req->rate,
+ req->best_parent_rate, &l, &a);
+
+ return 0;
}
static int trion_pll_is_enabled(struct clk_alpha_pll *pll,
@@ -1175,7 +1181,7 @@ const struct clk_ops clk_alpha_pll_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
@@ -1185,7 +1191,7 @@ const struct clk_ops clk_alpha_pll_huayra_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_huayra_recalc_rate,
- .round_rate = alpha_pll_huayra_round_rate,
+ .determine_rate = alpha_pll_huayra_determine_rate,
.set_rate = alpha_pll_huayra_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_huayra_ops);
@@ -1195,7 +1201,7 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = {
.disable = clk_alpha_pll_hwfsm_disable,
.is_enabled = clk_alpha_pll_hwfsm_is_enabled,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_hwfsm_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
@@ -1205,7 +1211,7 @@ const struct clk_ops clk_alpha_pll_fixed_trion_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_trion_ops);
@@ -1240,9 +1246,8 @@ static const struct clk_div_table clk_alpha_2bit_div_table[] = {
{ }
};
-static long
-clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
const struct clk_div_table *table;
@@ -1252,13 +1257,15 @@ clk_alpha_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
else
table = clk_alpha_div_table;
- return divider_round_rate(hw, rate, prate, table,
- pll->width, CLK_DIVIDER_POWER_OF_TWO);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ table, pll->width,
+ CLK_DIVIDER_POWER_OF_TWO);
+
+ return 0;
}
-static long
-clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_alpha_pll_postdiv_ro_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
u32 ctl, div;
@@ -1270,9 +1277,12 @@ clk_alpha_pll_postdiv_round_ro_rate(struct clk_hw *hw, unsigned long rate,
div = 1 << fls(ctl);
if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), div * rate);
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ div * req->rate);
+
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, div);
- return DIV_ROUND_UP_ULL((u64)*prate, div);
+ return 0;
}
static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1291,13 +1301,13 @@ static int clk_alpha_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_postdiv_ops = {
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_determine_rate,
.set_rate = clk_alpha_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
const struct clk_ops clk_alpha_pll_postdiv_ro_ops = {
- .round_rate = clk_alpha_pll_postdiv_round_ro_rate,
+ .determine_rate = clk_alpha_pll_postdiv_ro_determine_rate,
.recalc_rate = clk_alpha_pll_postdiv_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ro_ops);
@@ -1542,7 +1552,7 @@ const struct clk_ops clk_alpha_pll_fabia_ops = {
.is_enabled = clk_alpha_pll_is_enabled,
.set_rate = alpha_pll_fabia_set_rate,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fabia_ops);
@@ -1551,7 +1561,7 @@ const struct clk_ops clk_alpha_pll_fixed_fabia_ops = {
.disable = alpha_pll_fabia_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_fabia_ops);
@@ -1602,14 +1612,16 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return (parent_rate / div);
}
-static long
-clk_trion_pll_postdiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_trion_pll_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
};
static int
@@ -1635,18 +1647,21 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops clk_alpha_pll_postdiv_trion_ops = {
.recalc_rate = clk_trion_pll_postdiv_recalc_rate,
- .round_rate = clk_trion_pll_postdiv_round_rate,
+ .determine_rate = clk_trion_pll_postdiv_determine_rate,
.set_rate = clk_trion_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_trion_ops);
-static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *prate)
+static int clk_alpha_pll_postdiv_fabia_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- return divider_round_rate(hw, rate, prate, pll->post_div_table,
- pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ pll->post_div_table,
+ pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
@@ -1681,7 +1696,7 @@ static int clk_alpha_pll_postdiv_fabia_set_rate(struct clk_hw *hw,
const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
@@ -1833,7 +1848,7 @@ const struct clk_ops clk_alpha_pll_trion_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_trion_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_trion_ops);
@@ -1844,14 +1859,14 @@ const struct clk_ops clk_alpha_pll_lucid_ops = {
.disable = clk_trion_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_trion_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
@@ -1903,7 +1918,7 @@ const struct clk_ops clk_alpha_pll_agera_ops = {
.disable = clk_alpha_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = alpha_pll_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_agera_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
@@ -2119,7 +2134,7 @@ const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_5lpe_ops);
@@ -2129,13 +2144,13 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_5lpe_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_5lpe_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_lucid_5lpe_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_5lpe_ops);
@@ -2304,7 +2319,7 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.disable = clk_zonda_pll_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_zonda_ops);
@@ -2529,13 +2544,13 @@ const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops = {
.disable = alpha_pll_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_evo_ops);
const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
.recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
- .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .determine_rate = clk_alpha_pll_postdiv_fabia_determine_rate,
.set_rate = clk_lucid_evo_pll_postdiv_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
@@ -2546,7 +2561,7 @@ const struct clk_ops clk_alpha_pll_lucid_evo_ops = {
.disable = alpha_pll_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_evo_ops);
@@ -2557,7 +2572,7 @@ const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops = {
.disable = alpha_pll_reset_lucid_evo_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = alpha_pll_lucid_evo_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = alpha_pll_lucid_5lpe_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_reset_lucid_evo_ops);
@@ -2732,22 +2747,25 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw,
return parent_rate * l;
}
-static long clk_rivian_evo_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_rivian_evo_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long min_freq, max_freq;
u32 l;
u64 a;
- rate = alpha_pll_round_rate(rate, *prate, &l, &a, 0);
- if (!pll->vco_table || alpha_pll_find_vco(pll, rate))
- return rate;
+ req->rate = alpha_pll_round_rate(req->rate, req->best_parent_rate, &l,
+ &a, 0);
+ if (!pll->vco_table || alpha_pll_find_vco(pll, req->rate))
+ return 0;
min_freq = pll->vco_table[0].min_freq;
max_freq = pll->vco_table[pll->num_vco - 1].max_freq;
- return clamp(rate, min_freq, max_freq);
+ req->rate = clamp(req->rate, min_freq, max_freq);
+
+ return 0;
}
const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
@@ -2755,7 +2773,7 @@ const struct clk_ops clk_alpha_pll_rivian_evo_ops = {
.disable = alpha_pll_lucid_5lpe_disable,
.is_enabled = clk_trion_pll_is_enabled,
.recalc_rate = clk_rivian_evo_pll_recalc_rate,
- .round_rate = clk_rivian_evo_pll_round_rate,
+ .determine_rate = clk_rivian_evo_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_rivian_evo_ops);
@@ -2964,7 +2982,7 @@ const struct clk_ops clk_alpha_pll_regera_ops = {
.disable = clk_zonda_pll_disable,
.is_enabled = clk_alpha_pll_is_enabled,
.recalc_rate = clk_trion_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regera_ops);
@@ -3169,7 +3187,7 @@ const struct clk_ops clk_alpha_pll_slew_ops = {
.enable = clk_alpha_pll_slew_enable,
.disable = clk_alpha_pll_disable,
.recalc_rate = clk_alpha_pll_recalc_rate,
- .round_rate = clk_alpha_pll_round_rate,
+ .determine_rate = clk_alpha_pll_determine_rate,
.set_rate = clk_alpha_pll_slew_set_rate,
};
EXPORT_SYMBOL(clk_alpha_pll_slew_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index ff41aeab0ab9..0903a05b18cc 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -29,6 +29,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID_OLE,
CLK_ALPHA_PLL_TYPE_PONGO_ELU,
CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
+ CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T = CLK_ALPHA_PLL_TYPE_TAYCAN_ELU,
CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
CLK_ALPHA_PLL_TYPE_DEFAULT_EVO,
CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
@@ -192,14 +193,17 @@ extern const struct clk_ops clk_alpha_pll_zonda_ops;
extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
#define clk_alpha_pll_taycan_elu_ops clk_alpha_pll_lucid_evo_ops
+#define clk_alpha_pll_taycan_eko_t_ops clk_alpha_pll_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
#define clk_alpha_pll_reset_lucid_ole_ops clk_alpha_pll_reset_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
#define clk_alpha_pll_fixed_lucid_ole_ops clk_alpha_pll_fixed_lucid_evo_ops
#define clk_alpha_pll_fixed_taycan_elu_ops clk_alpha_pll_fixed_lucid_evo_ops
+#define clk_alpha_pll_fixed_taycan_eko_t_ops clk_alpha_pll_fixed_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
#define clk_alpha_pll_postdiv_lucid_ole_ops clk_alpha_pll_postdiv_lucid_evo_ops
#define clk_alpha_pll_postdiv_taycan_elu_ops clk_alpha_pll_postdiv_lucid_evo_ops
+#define clk_alpha_pll_postdiv_taycan_eko_t_ops clk_alpha_pll_postdiv_lucid_evo_ops
extern const struct clk_ops clk_alpha_pll_pongo_elu_ops;
extern const struct clk_ops clk_alpha_pll_rivian_evo_ops;
@@ -233,6 +237,8 @@ void clk_pongo_elu_pll_configure(struct clk_alpha_pll *pll, struct regmap *regma
const struct alpha_pll_config *config);
#define clk_taycan_elu_pll_configure(pll, regmap, config) \
clk_lucid_evo_pll_configure(pll, regmap, config)
+#define clk_taycan_eko_t_pll_configure(pll, regmap, config) \
+ clk_lucid_evo_pll_configure(pll, regmap, config)
void clk_rivian_evo_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 0f10090d4ae6..444e7d8648d4 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -142,8 +142,8 @@ static int clk_branch2_mem_enable(struct clk_hw *hw)
u32 val;
int ret;
- regmap_update_bits(branch.clkr.regmap, mem_br->mem_enable_reg,
- mem_br->mem_enable_ack_mask, mem_br->mem_enable_ack_mask);
+ regmap_assign_bits(branch.clkr.regmap, mem_br->mem_enable_reg,
+ mem_br->mem_enable_mask, !mem_br->mem_enable_invert);
ret = regmap_read_poll_timeout(branch.clkr.regmap, mem_br->mem_ack_reg,
val, val & mem_br->mem_enable_ack_mask, 0, 200);
@@ -159,8 +159,8 @@ static void clk_branch2_mem_disable(struct clk_hw *hw)
{
struct clk_mem_branch *mem_br = to_clk_mem_branch(hw);
- regmap_update_bits(mem_br->branch.clkr.regmap, mem_br->mem_enable_reg,
- mem_br->mem_enable_ack_mask, 0);
+ regmap_assign_bits(mem_br->branch.clkr.regmap, mem_br->mem_enable_reg,
+ mem_br->mem_enable_mask, mem_br->mem_enable_invert);
return clk_branch2_disable(hw);
}
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 292756435f53..6bc2ba2b5350 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -44,6 +44,8 @@ struct clk_branch {
* @mem_enable_reg: branch clock memory gating register
* @mem_ack_reg: branch clock memory ack register
* @mem_enable_ack_mask: branch clock memory enable and ack field in @mem_ack_reg
+ * @mem_enable_mask: branch clock memory enable mask
+ * @mem_enable_invert: branch clock memory enable and disable has invert logic
* @branch: branch clock gating handle
*
* Clock which can gate its memories.
@@ -52,6 +54,8 @@ struct clk_mem_branch {
u32 mem_enable_reg;
u32 mem_ack_reg;
u32 mem_enable_ack_mask;
+ u32 mem_enable_mask;
+ bool mem_enable_invert;
struct clk_branch branch;
};
diff --git a/drivers/clk/qcom/clk-cbf-8996.c b/drivers/clk/qcom/clk-cbf-8996.c
index ce4efcd995ea..0b40ed601f9a 100644
--- a/drivers/clk/qcom/clk-cbf-8996.c
+++ b/drivers/clk/qcom/clk-cbf-8996.c
@@ -212,7 +212,6 @@ static const struct regmap_config cbf_msm8996_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x10000,
- .fast_io = true,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c
index 72689448a653..21d13c0841ed 100644
--- a/drivers/clk/qcom/clk-cpu-8996.c
+++ b/drivers/clk/qcom/clk-cpu-8996.c
@@ -411,7 +411,6 @@ static const struct regmap_config cpu_msm8996_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x80210,
- .fast_io = true,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 987141c91fe0..31f0650b48ba 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -423,7 +423,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
req->best_parent_rate = rate;
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 8001fd9faf9d..e18cb8807d73 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -201,7 +201,7 @@ __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
m &= mask;
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
- n = ~n;
+ n = ~n;
n &= mask;
n += m;
mode = cfg & CFG_MODE_MASK;
@@ -274,7 +274,7 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
req->best_parent_rate = rate;
@@ -311,7 +311,7 @@ __clk_rcg2_select_conf(struct clk_hw *hw, const struct freq_multi_tbl *f,
if (!p)
continue;
- parent_rate = clk_hw_get_rate(p);
+ parent_rate = clk_hw_get_rate(p);
rate = calc_rate(parent_rate, conf->n, conf->m, conf->n, conf->pre_div);
if (rate == req_rate) {
@@ -382,7 +382,7 @@ static int _freq_tbl_fm_determine_rate(struct clk_hw *hw, const struct freq_mult
rate = tmp;
}
} else {
- rate = clk_hw_get_rate(p);
+ rate = clk_hw_get_rate(p);
}
req->best_parent_hw = p;
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
index 63c9fca0d65d..4f5395f0ab6d 100644
--- a/drivers/clk/qcom/clk-regmap-divider.c
+++ b/drivers/clk/qcom/clk-regmap-divider.c
@@ -15,8 +15,8 @@ static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
}
-static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int div_ro_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
struct clk_regmap *clkr = &divider->clkr;
@@ -26,17 +26,24 @@ static long div_round_ro_rate(struct clk_hw *hw, unsigned long rate,
val >>= divider->shift;
val &= BIT(divider->width) - 1;
- return divider_ro_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST, val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate, NULL,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST, val);
+
+ return 0;
}
-static long div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int div_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_regmap_div *divider = to_clk_regmap_div(hw);
- return divider_round_rate(hw, rate, prate, NULL, divider->width,
- CLK_DIVIDER_ROUND_CLOSEST);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL,
+ divider->width,
+ CLK_DIVIDER_ROUND_CLOSEST);
+
+ return 0;
}
static int div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -70,14 +77,14 @@ static unsigned long div_recalc_rate(struct clk_hw *hw,
}
const struct clk_ops clk_regmap_div_ops = {
- .round_rate = div_round_rate,
+ .determine_rate = div_determine_rate,
.set_rate = div_set_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
const struct clk_ops clk_regmap_div_ro_ops = {
- .round_rate = div_round_ro_rate,
+ .determine_rate = div_ro_determine_rate,
.recalc_rate = div_recalc_rate,
};
EXPORT_SYMBOL_GPL(clk_regmap_div_ro_ops);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 1496fb3de4be..1a98b3a0c528 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -87,7 +87,7 @@ static DEFINE_MUTEX(rpmh_clk_lock);
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpmh_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -105,7 +105,7 @@ static DEFINE_MUTEX(rpmh_clk_lock);
.hw.init = &(struct clk_init_data){ \
.ops = &clk_rpmh_ops, \
.name = #_name "_ao", \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -182,7 +182,7 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c)
}
c->last_sent_aggr_state = c->aggr_state;
- c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
+ c->peer->last_sent_aggr_state = c->last_sent_aggr_state;
return 0;
}
@@ -390,6 +390,11 @@ DEFINE_CLK_RPMH_VRM(clk7, _a4, "clka7", 4);
DEFINE_CLK_RPMH_VRM(div_clk1, _div2, "divclka1", 2);
+DEFINE_CLK_RPMH_VRM(clk3, _a, "C3A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk4, _a, "C4A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk5, _a, "C5A_E0", 1);
+DEFINE_CLK_RPMH_VRM(clk8, _a, "C8A_E0", 1);
+
DEFINE_CLK_RPMH_BCM(ce, "CE0");
DEFINE_CLK_RPMH_BCM(hwkm, "HK0");
DEFINE_CLK_RPMH_BCM(ipa, "IP0");
@@ -850,6 +855,7 @@ static struct clk_hw *qcs615_rpmh_clocks[] = {
[RPMH_RF_CLK1_A] = &clk_rpmh_rf_clk1_a_ao.hw,
[RPMH_RF_CLK2] = &clk_rpmh_rf_clk2_a.hw,
[RPMH_RF_CLK2_A] = &clk_rpmh_rf_clk2_a_ao.hw,
+ [RPMH_IPA_CLK] = &clk_rpmh_ipa.hw,
};
static const struct clk_rpmh_desc clk_rpmh_qcs615 = {
@@ -879,6 +885,22 @@ static const struct clk_rpmh_desc clk_rpmh_sm8750 = {
.clka_optional = true,
};
+static struct clk_hw *glymur_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &clk_rpmh_bi_tcxo_div2.hw,
+ [RPMH_CXO_CLK_A] = &clk_rpmh_bi_tcxo_div2_ao.hw,
+ [RPMH_RF_CLK3] = &clk_rpmh_clk3_a.hw,
+ [RPMH_RF_CLK3_A] = &clk_rpmh_clk3_a_ao.hw,
+ [RPMH_RF_CLK4] = &clk_rpmh_clk4_a.hw,
+ [RPMH_RF_CLK4_A] = &clk_rpmh_clk4_a_ao.hw,
+ [RPMH_RF_CLK5] = &clk_rpmh_clk5_a.hw,
+ [RPMH_RF_CLK5_A] = &clk_rpmh_clk5_a_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_glymur = {
+ .clks = glymur_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(glymur_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -968,6 +990,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
}
static const struct of_device_id clk_rpmh_match_table[] = {
+ { .compatible = "qcom,glymur-rpmh-clk", .data = &clk_rpmh_glymur},
{ .compatible = "qcom,milos-rpmh-clk", .data = &clk_rpmh_milos},
{ .compatible = "qcom,qcs615-rpmh-clk", .data = &clk_rpmh_qcs615},
{ .compatible = "qcom,qdu1000-rpmh-clk", .data = &clk_rpmh_qdu1000},
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 3bf6df3884a5..103db984a40b 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -30,7 +30,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -47,7 +47,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -74,7 +74,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_name, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
@@ -92,7 +92,7 @@
.hw.init = &(struct clk_init_data){ \
.ops = &clk_smd_rpm_branch_ops, \
.name = #_active, \
- .parent_data = &(const struct clk_parent_data){ \
+ .parent_data = &(const struct clk_parent_data){ \
.fw_name = "xo", \
.name = "xo_board", \
}, \
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 37c3008e6c1b..121591886774 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -277,8 +277,8 @@ static int qcom_cc_icc_register(struct device *dev,
icd[i].slave_id = desc->icc_hws[i].slave_id;
hws = &desc->clks[desc->icc_hws[i].clk_id]->hw;
icd[i].clk = devm_clk_hw_get_clk(dev, hws, "icc");
- if (!icd[i].clk)
- return dev_err_probe(dev, -ENOENT,
+ if (IS_ERR(icd[i].clk))
+ return dev_err_probe(dev, PTR_ERR(icd[i].clk),
"(%d) clock entry is null\n", i);
icd[i].name = clk_hw_get_name(hws);
}
diff --git a/drivers/clk/qcom/dispcc-glymur.c b/drivers/clk/qcom/dispcc-glymur.c
new file mode 100644
index 000000000000..5203fa6383f6
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-glymur.c
@@ -0,0 +1,1982 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-dispcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_SLEEP_CLK,
+ DT_DP0_PHY_PLL_LINK_CLK,
+ DT_DP0_PHY_PLL_VCO_DIV_CLK,
+ DT_DP1_PHY_PLL_LINK_CLK,
+ DT_DP1_PHY_PLL_VCO_DIV_CLK,
+ DT_DP2_PHY_PLL_LINK_CLK,
+ DT_DP2_PHY_PLL_VCO_DIV_CLK,
+ DT_DP3_PHY_PLL_LINK_CLK,
+ DT_DP3_PHY_PLL_VCO_DIV_CLK,
+ DT_DSI0_PHY_PLL_OUT_BYTECLK,
+ DT_DSI0_PHY_PLL_OUT_DSICLK,
+ DT_DSI1_PHY_PLL_OUT_BYTECLK,
+ DT_DSI1_PHY_PLL_OUT_DSICLK,
+ DT_STANDALONE_PHY_PLL0_LINK_CLK,
+ DT_STANDALONE_PHY_PLL0_VCO_DIV_CLK,
+ DT_STANDALONE_PHY_PLL1_LINK_CLK,
+ DT_STANDALONE_PHY_PLL1_VCO_DIV_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DISP_CC_PLL1_OUT_EVEN,
+ P_DISP_CC_PLL1_OUT_MAIN,
+ P_DP0_PHY_PLL_LINK_CLK,
+ P_DP0_PHY_PLL_VCO_DIV_CLK,
+ P_DP1_PHY_PLL_LINK_CLK,
+ P_DP1_PHY_PLL_VCO_DIV_CLK,
+ P_DP2_PHY_PLL_LINK_CLK,
+ P_DP2_PHY_PLL_VCO_DIV_CLK,
+ P_DP3_PHY_PLL_LINK_CLK,
+ P_DP3_PHY_PLL_VCO_DIV_CLK,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_BYTECLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_STANDALONE_PHY_PLL0_LINK_CLK,
+ P_STANDALONE_PHY_PLL0_VCO_DIV_CLK,
+ P_STANDALONE_PHY_PLL1_LINK_CLK,
+ P_STANDALONE_PHY_PLL1_VCO_DIV_CLK,
+};
+
+static const struct pll_vco taycan_eko_t_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+/* 257.142858 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0xd,
+ .alpha = 0x6492,
+ .config_ctl_val = 0x25c400e7,
+ .config_ctl_hi_val = 0x0a8060e0,
+ .config_ctl_hi1_val = 0xf51dea20,
+ .user_ctl_val = 0x00000008,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .config = &disp_cc_pll0_config,
+ .vco_table = taycan_eko_t_vco,
+ .num_vco = ARRAY_SIZE(taycan_eko_t_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_eko_t_ops,
+ },
+ },
+};
+
+/* 600.0 MHz Configuration */
+static const struct alpha_pll_config disp_cc_pll1_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x25c400e7,
+ .config_ctl_hi_val = 0x0a8060e0,
+ .config_ctl_hi1_val = 0xf51dea20,
+ .user_ctl_val = 0x00000008,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll disp_cc_pll1 = {
+ .offset = 0x1000,
+ .config = &disp_cc_pll1_config,
+ .vco_table = taycan_eko_t_vco,
+ .num_vco = ARRAY_SIZE(taycan_eko_t_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_pll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_STANDALONE_PHY_PLL0_VCO_DIV_CLK, 1 },
+ { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 },
+ { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 },
+ { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 },
+ { P_STANDALONE_PHY_PLL1_VCO_DIV_CLK, 5 },
+ { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_STANDALONE_PHY_PLL0_VCO_DIV_CLK },
+ { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK },
+ { .index = DT_STANDALONE_PHY_PLL1_VCO_DIV_CLK },
+ { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_DP0_PHY_PLL_LINK_CLK, 1 },
+ { P_DP1_PHY_PLL_LINK_CLK, 2 },
+ { P_DP2_PHY_PLL_LINK_CLK, 3 },
+ { P_DP3_PHY_PLL_LINK_CLK, 4 },
+ { P_STANDALONE_PHY_PLL1_LINK_CLK, 5 },
+ { P_STANDALONE_PHY_PLL0_LINK_CLK, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DP0_PHY_PLL_LINK_CLK },
+ { .index = DT_DP1_PHY_PLL_LINK_CLK },
+ { .index = DT_DP2_PHY_PLL_LINK_CLK },
+ { .index = DT_DP3_PHY_PLL_LINK_CLK },
+ { .index = DT_STANDALONE_PHY_PLL1_LINK_CLK },
+ { .index = DT_STANDALONE_PHY_PLL0_LINK_CLK },
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 3 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_DSICLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_DSICLK },
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 },
+ { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK },
+ { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK },
+};
+
+static const struct parent_map disp_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_DISP_CC_PLL1_OUT_MAIN, 4 },
+ { P_DISP_CC_PLL1_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &disp_cc_pll0.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+ { .hw = &disp_cc_pll1.clkr.hw },
+};
+
+static const struct parent_map disp_cc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map disp_cc_parent_map_9[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data disp_cc_parent_data_9[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_disp_cc_esync0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_esync0_clk_src = {
+ .cmd_rcgr = 0x80c0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_esync1_clk_src = {
+ .cmd_rcgr = 0x80d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk_src",
+ .parent_data = disp_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0),
+ F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x8360,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_6,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_data = disp_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x8180,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = {
+ .cmd_rcgr = 0x819c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = {
+ .cmd_rcgr = 0x8234,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = {
+ .cmd_rcgr = 0x81e8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = {
+ .cmd_rcgr = 0x8204,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = {
+ .cmd_rcgr = 0x821c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = {
+ .cmd_rcgr = 0x8298,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = {
+ .cmd_rcgr = 0x827c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = {
+ .cmd_rcgr = 0x824c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = {
+ .cmd_rcgr = 0x8264,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = {
+ .cmd_rcgr = 0x82fc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = {
+ .cmd_rcgr = 0x82b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = {
+ .cmd_rcgr = 0x82cc,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = {
+ .cmd_rcgr = 0x82e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = {
+ .cmd_rcgr = 0x8348,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = {
+ .cmd_rcgr = 0x832c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk_src",
+ .parent_data = disp_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = {
+ .cmd_rcgr = 0x8314,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk_src",
+ .parent_data = disp_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_dp_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x81b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = {
+ .cmd_rcgr = 0x81d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk_src",
+ .parent_data = disp_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(156000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(205000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(337000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(417000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(532000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(600000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(660000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(717000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x8150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_7,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .hw_clk_ctrl = true,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_data = disp_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x8108,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = {
+ .cmd_rcgr = 0x8120,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk2_clk_src = {
+ .cmd_rcgr = 0x8138,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk_src",
+ .parent_data = disp_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x8168,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_osc_clk_src = {
+ .cmd_rcgr = 0x80f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_8,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk_src",
+ .parent_data = disp_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xe064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_9,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_sleep_clk_src",
+ .parent_data = disp_cc_parent_data_9,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0xe044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_esync0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_xo_clk_src",
+ .parent_data = disp_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(disp_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x8198,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = {
+ .reg = 0x81b4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = {
+ .reg = 0x8200,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx0_link_dpin_div_clk_src = {
+ .reg = 0x838c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = {
+ .reg = 0x8294,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx1_link_dpin_div_clk_src = {
+ .reg = 0x8390,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = {
+ .reg = 0x82c8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx2_link_dpin_div_clk_src = {
+ .reg = 0x8394,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = {
+ .reg = 0x8344,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div disp_cc_mdss_dptx3_link_dpin_div_clk_src = {
+ .reg = 0x8398,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_dpin_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch disp_cc_esync0_clk = {
+ .halt_reg = 0x80b8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_esync1_clk = {
+ .halt_reg = 0x80bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_esync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_esync1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_accu_shift_clk = {
+ .halt_reg = 0xe060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xe060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_accu_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x80b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x8034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte1_intf_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_byte1_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_byte1_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_aux_clk = {
+ .halt_reg = 0x8064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8064,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_dpin_clk = {
+ .halt_reg = 0x837c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x837c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = {
+ .halt_reg = 0x8054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8054,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = {
+ .halt_reg = 0x8060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8060,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = {
+ .halt_reg = 0x8050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8050,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_aux_clk = {
+ .halt_reg = 0x8080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_clk = {
+ .halt_reg = 0x8070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_dpin_clk = {
+ .halt_reg = 0x8380,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8380,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = {
+ .halt_reg = 0x8078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = {
+ .halt_reg = 0x8068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8068,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = {
+ .halt_reg = 0x806c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x806c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = {
+ .halt_reg = 0x8074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_aux_clk = {
+ .halt_reg = 0x8098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8098,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_clk = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_dpin_clk = {
+ .halt_reg = 0x8384,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8384,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = {
+ .halt_reg = 0x8090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8090,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = {
+ .halt_reg = 0x8084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = {
+ .halt_reg = 0x8088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_pixel1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx2_usb_router_link_intf_clk = {
+ .halt_reg = 0x8378,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8378,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx2_usb_router_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_aux_clk = {
+ .halt_reg = 0x80a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_clk = {
+ .halt_reg = 0x80a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_dpin_clk = {
+ .halt_reg = 0x8388,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8388,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_dpin_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_dpin_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = {
+ .halt_reg = 0x80a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_link_intf_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = {
+ .halt_reg = 0x809c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x809c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_dptx3_pixel0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x8044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc1_clk = {
+ .halt_reg = 0x8048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp1_clk = {
+ .halt_reg = 0xa004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x8010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut1_clk = {
+ .halt_reg = 0xa014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xa014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x8020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x8020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x8004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk1_clk = {
+ .halt_reg = 0x8008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk2_clk = {
+ .halt_reg = 0x800c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_pclk2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_pclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
+ .halt_reg = 0xc00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_ahb_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_rscc_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync1_clk = {
+ .halt_reg = 0xa024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x8030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_mdss_vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_osc_clk = {
+ .halt_reg = 0x80b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x80b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "disp_cc_osc_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &disp_cc_osc_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc disp_cc_mdss_core_gdsc = {
+ .gdscr = 0x9000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc disp_cc_mdss_core_int2_gdsc = {
+ .gdscr = 0xb000,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "disp_cc_mdss_core_int2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *disp_cc_glymur_clocks[] = {
+ [DISP_CC_ESYNC0_CLK] = &disp_cc_esync0_clk.clkr,
+ [DISP_CC_ESYNC0_CLK_SRC] = &disp_cc_esync0_clk_src.clkr,
+ [DISP_CC_ESYNC1_CLK] = &disp_cc_esync1_clk.clkr,
+ [DISP_CC_ESYNC1_CLK_SRC] = &disp_cc_esync1_clk_src.clkr,
+ [DISP_CC_MDSS_ACCU_SHIFT_CLK] = &disp_cc_mdss_accu_shift_clk.clkr,
+ [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr,
+ [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DPIN_CLK] = &disp_cc_mdss_dptx0_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DPIN_CLK] = &disp_cc_mdss_dptx1_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DPIN_CLK] = &disp_cc_mdss_dptx2_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr,
+ [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK] =
+ &disp_cc_mdss_dptx2_usb_router_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DPIN_CLK] = &disp_cc_mdss_dptx3_link_dpin_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_DPIN_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_dpin_div_clk_src.clkr,
+ [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr,
+ [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr,
+ [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr,
+ [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr,
+ [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK] = &disp_cc_mdss_pclk2_clk.clkr,
+ [DISP_CC_MDSS_PCLK2_CLK_SRC] = &disp_cc_mdss_pclk2_clk_src.clkr,
+ [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
+ [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_OSC_CLK] = &disp_cc_osc_clk.clkr,
+ [DISP_CC_OSC_CLK_SRC] = &disp_cc_osc_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL1] = &disp_cc_pll1.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *disp_cc_glymur_gdscs[] = {
+ [DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
+ [DISP_CC_MDSS_CORE_INT2_GDSC] = &disp_cc_mdss_core_int2_gdsc,
+};
+
+static const struct qcom_reset_map disp_cc_glymur_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
+};
+
+static struct clk_alpha_pll *disp_cc_glymur_plls[] = {
+ &disp_cc_pll0,
+ &disp_cc_pll1,
+};
+
+static u32 disp_cc_glymur_critical_cbcrs[] = {
+ 0xe07c, /* DISP_CC_SLEEP_CLK */
+ 0xe05c, /* DISP_CC_XO_CLK */
+};
+
+static const struct regmap_config disp_cc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11014,
+ .fast_io = true,
+};
+
+static struct qcom_cc_driver_data disp_cc_glymur_driver_data = {
+ .alpha_plls = disp_cc_glymur_plls,
+ .num_alpha_plls = ARRAY_SIZE(disp_cc_glymur_plls),
+ .clk_cbcrs = disp_cc_glymur_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(disp_cc_glymur_critical_cbcrs),
+};
+
+static const struct qcom_cc_desc disp_cc_glymur_desc = {
+ .config = &disp_cc_glymur_regmap_config,
+ .clks = disp_cc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_glymur_clocks),
+ .resets = disp_cc_glymur_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_glymur_resets),
+ .gdscs = disp_cc_glymur_gdscs,
+ .num_gdscs = ARRAY_SIZE(disp_cc_glymur_gdscs),
+ .use_rpm = true,
+ .driver_data = &disp_cc_glymur_driver_data,
+};
+
+static const struct of_device_id disp_cc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_glymur_match_table);
+
+static int disp_cc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &disp_cc_glymur_desc);
+}
+
+static struct platform_driver disp_cc_glymur_driver = {
+ .probe = disp_cc_glymur_probe,
+ .driver = {
+ .name = "dispcc-glymur",
+ .of_match_table = disp_cc_glymur_match_table,
+ },
+};
+
+module_platform_driver(disp_cc_glymur_driver);
+
+MODULE_DESCRIPTION("QTI DISPCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/dispcc-milos.c b/drivers/clk/qcom/dispcc-milos.c
index 602d3a498d33..95b6dd89d9ae 100644
--- a/drivers/clk/qcom/dispcc-milos.c
+++ b/drivers/clk/qcom/dispcc-milos.c
@@ -937,7 +937,7 @@ static struct qcom_cc_driver_data disp_cc_milos_driver_data = {
.clk_regs_configure = disp_cc_milos_clk_regs_configure,
};
-static struct qcom_cc_desc disp_cc_milos_desc = {
+static const struct qcom_cc_desc disp_cc_milos_desc = {
.config = &disp_cc_milos_regmap_config,
.clks = disp_cc_milos_clocks,
.num_clks = ARRAY_SIZE(disp_cc_milos_clocks),
diff --git a/drivers/clk/qcom/dispcc-sc7280.c b/drivers/clk/qcom/dispcc-sc7280.c
index 8bdf57734a3d..465dc06c8712 100644
--- a/drivers/clk/qcom/dispcc-sc7280.c
+++ b/drivers/clk/qcom/dispcc-sc7280.c
@@ -17,6 +17,7 @@
#include "clk-regmap-divider.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
P_BI_TCXO,
@@ -847,6 +848,11 @@ static struct gdsc *disp_cc_sc7280_gdscs[] = {
[DISP_CC_MDSS_CORE_GDSC] = &disp_cc_mdss_core_gdsc,
};
+static const struct qcom_reset_map disp_cc_sc7280_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x1000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x2000 },
+};
+
static const struct regmap_config disp_cc_sc7280_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -861,6 +867,8 @@ static const struct qcom_cc_desc disp_cc_sc7280_desc = {
.num_clks = ARRAY_SIZE(disp_cc_sc7280_clocks),
.gdscs = disp_cc_sc7280_gdscs,
.num_gdscs = ARRAY_SIZE(disp_cc_sc7280_gdscs),
+ .resets = disp_cc_sc7280_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sc7280_resets),
};
static const struct of_device_id disp_cc_sc7280_match_table[] = {
diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c
index b0bd163a449c..5b1d8f86515f 100644
--- a/drivers/clk/qcom/dispcc-sm6350.c
+++ b/drivers/clk/qcom/dispcc-sm6350.c
@@ -679,6 +679,11 @@ static struct clk_branch disp_cc_xo_clk = {
},
};
+static const struct qcom_reset_map disp_cc_sm6350_resets[] = {
+ [DISP_CC_MDSS_CORE_BCR] = { 0x1000 },
+ [DISP_CC_MDSS_RSCC_BCR] = { 0x2000 },
+};
+
static struct gdsc mdss_gdsc = {
.gdscr = 0x1004,
.en_rest_wait_val = 0x2,
@@ -746,6 +751,8 @@ static const struct qcom_cc_desc disp_cc_sm6350_desc = {
.num_clks = ARRAY_SIZE(disp_cc_sm6350_clocks),
.gdscs = disp_cc_sm6350_gdscs,
.num_gdscs = ARRAY_SIZE(disp_cc_sm6350_gdscs),
+ .resets = disp_cc_sm6350_resets,
+ .num_resets = ARRAY_SIZE(disp_cc_sm6350_resets),
};
static const struct of_device_id disp_cc_sm6350_match_table[] = {
diff --git a/drivers/clk/qcom/dispcc-sm7150.c b/drivers/clk/qcom/dispcc-sm7150.c
index bdfff246ed3f..811d380a8e9f 100644
--- a/drivers/clk/qcom/dispcc-sm7150.c
+++ b/drivers/clk/qcom/dispcc-sm7150.c
@@ -20,6 +20,7 @@
#include "clk-regmap-divider.h"
#include "common.h"
#include "gdsc.h"
+#include "reset.h"
enum {
DT_BI_TCXO,
@@ -356,7 +357,7 @@ static struct clk_rcg2 dispcc_mdss_pclk0_clk_src = {
.name = "dispcc_mdss_pclk0_clk_src",
.parent_data = dispcc_parent_data_4,
.num_parents = ARRAY_SIZE(dispcc_parent_data_4),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
.ops = &clk_pixel_ops,
},
};
@@ -951,6 +952,10 @@ static struct gdsc *dispcc_sm7150_gdscs[] = {
[MDSS_GDSC] = &mdss_gdsc,
};
+static const struct qcom_reset_map dispcc_sm7150_resets[] = {
+ [DISPCC_MDSS_CORE_BCR] = { 0x2000 },
+};
+
static const struct regmap_config dispcc_sm7150_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -965,6 +970,8 @@ static const struct qcom_cc_desc dispcc_sm7150_desc = {
.num_clks = ARRAY_SIZE(dispcc_sm7150_clocks),
.gdscs = dispcc_sm7150_gdscs,
.num_gdscs = ARRAY_SIZE(dispcc_sm7150_gdscs),
+ .resets = dispcc_sm7150_resets,
+ .num_resets = ARRAY_SIZE(dispcc_sm7150_resets),
};
static const struct of_device_id dispcc_sm7150_match_table[] = {
diff --git a/drivers/clk/qcom/dispcc-x1e80100.c b/drivers/clk/qcom/dispcc-x1e80100.c
index 40069eba41f2..aa7fd43969f9 100644
--- a/drivers/clk/qcom/dispcc-x1e80100.c
+++ b/drivers/clk/qcom/dispcc-x1e80100.c
@@ -1618,6 +1618,9 @@ static struct clk_regmap *disp_cc_x1e80100_clocks[] = {
static const struct qcom_reset_map disp_cc_x1e80100_resets[] = {
[DISP_CC_MDSS_CORE_BCR] = { 0x8000 },
+ [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK_ARES] = { .reg = 0x8044, .bit = 2 },
+ [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK_ARES] = { .reg = 0x8068, .bit = 2 },
+ [DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK_ARES] = { .reg = 0x8088, .bit = 2 },
[DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 },
[DISP_CC_MDSS_RSCC_BCR] = { 0xc000 },
};
diff --git a/drivers/clk/qcom/ecpricc-qdu1000.c b/drivers/clk/qcom/ecpricc-qdu1000.c
index dbc11260479b..c2a16616ed64 100644
--- a/drivers/clk/qcom/ecpricc-qdu1000.c
+++ b/drivers/clk/qcom/ecpricc-qdu1000.c
@@ -920,6 +920,7 @@ static struct clk_branch ecpri_cc_eth_100g_c2c1_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_0_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x80b4,
@@ -943,6 +944,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_1_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x80bc,
@@ -966,6 +968,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_c2c_0_hm_ff_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_c2c_hm_macsec_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(4),
.mem_enable_ack_mask = BIT(4),
.branch = {
.halt_reg = 0x80ac,
@@ -989,6 +992,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_c2c_hm_macsec_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_clk = {
.mem_enable_reg = 0x8414,
.mem_ack_reg = 0x8428,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x80d8,
@@ -1012,6 +1016,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_dbg_c2c_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_dbg_c2c_hm_ff_1_clk = {
.mem_enable_reg = 0x8414,
.mem_ack_reg = 0x8428,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x80e0,
@@ -1053,6 +1058,7 @@ static struct clk_branch ecpri_cc_eth_100g_dbg_c2c_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_0_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x800c,
@@ -1076,6 +1082,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_1_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x8014,
@@ -1099,6 +1106,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_2_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(2),
.mem_enable_ack_mask = BIT(2),
.branch = {
.halt_reg = 0x801c,
@@ -1122,6 +1130,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_2_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_0_hm_ff_3_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(3),
.mem_enable_ack_mask = BIT(3),
.branch = {
.halt_reg = 0x8024,
@@ -1163,6 +1172,7 @@ static struct clk_branch ecpri_cc_eth_100g_fh_0_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_0_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x8044,
@@ -1186,6 +1196,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_1_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x804c,
@@ -1209,6 +1220,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_2_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(2),
.mem_enable_ack_mask = BIT(2),
.branch = {
.halt_reg = 0x8054,
@@ -1232,6 +1244,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_2_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_1_hm_ff_3_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(3),
.mem_enable_ack_mask = BIT(3),
.branch = {
.halt_reg = 0x805c,
@@ -1273,6 +1286,7 @@ static struct clk_branch ecpri_cc_eth_100g_fh_1_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_0_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(0),
.mem_enable_ack_mask = BIT(0),
.branch = {
.halt_reg = 0x807c,
@@ -1296,6 +1310,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_1_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(1),
.mem_enable_ack_mask = BIT(1),
.branch = {
.halt_reg = 0x8084,
@@ -1319,6 +1334,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_2_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(2),
.mem_enable_ack_mask = BIT(2),
.branch = {
.halt_reg = 0x808c,
@@ -1342,6 +1358,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_2_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_2_hm_ff_3_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(3),
.mem_enable_ack_mask = BIT(3),
.branch = {
.halt_reg = 0x8094,
@@ -1383,6 +1400,7 @@ static struct clk_branch ecpri_cc_eth_100g_fh_2_udp_fifo_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_0_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(4),
.mem_enable_ack_mask = BIT(4),
.branch = {
.halt_reg = 0x8004,
@@ -1406,6 +1424,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_0_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_1_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(4),
.mem_enable_ack_mask = BIT(4),
.branch = {
.halt_reg = 0x803c,
@@ -1429,6 +1448,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_1_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_2_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(4),
.mem_enable_ack_mask = BIT(4),
.branch = {
.halt_reg = 0x8074,
@@ -1452,6 +1472,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_fh_macsec_2_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_c2c_hm_ref_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x80c4,
@@ -1475,6 +1496,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_mac_c2c_hm_ref_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk = {
.mem_enable_reg = 0x8414,
.mem_ack_reg = 0x8428,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x80e8,
@@ -1498,6 +1520,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_mac_dbg_c2c_hm_ref_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh0_hm_ref_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x802c,
@@ -1521,6 +1544,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh0_hm_ref_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh1_hm_ref_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841c,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x8064,
@@ -1544,6 +1568,7 @@ static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh1_hm_ref_clk = {
static struct clk_mem_branch ecpri_cc_eth_100g_mac_fh2_hm_ref_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(5),
.mem_enable_ack_mask = BIT(5),
.branch = {
.halt_reg = 0x809c,
@@ -1603,6 +1628,7 @@ static struct clk_branch ecpri_cc_eth_dbg_noc_axi_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_0_ock_sram_clk = {
.mem_enable_reg = 0x8404,
.mem_ack_reg = 0x8418,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd140,
@@ -1621,6 +1647,7 @@ static struct clk_mem_branch ecpri_cc_eth_phy_0_ock_sram_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_1_ock_sram_clk = {
.mem_enable_reg = 0x8408,
.mem_ack_reg = 0x841C,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd148,
@@ -1639,6 +1666,7 @@ static struct clk_mem_branch ecpri_cc_eth_phy_1_ock_sram_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_2_ock_sram_clk = {
.mem_enable_reg = 0x840c,
.mem_ack_reg = 0x8420,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd150,
@@ -1657,6 +1685,7 @@ static struct clk_mem_branch ecpri_cc_eth_phy_2_ock_sram_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_3_ock_sram_clk = {
.mem_enable_reg = 0x8410,
.mem_ack_reg = 0x8424,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd158,
@@ -1675,6 +1704,7 @@ static struct clk_mem_branch ecpri_cc_eth_phy_3_ock_sram_clk = {
static struct clk_mem_branch ecpri_cc_eth_phy_4_ock_sram_clk = {
.mem_enable_reg = 0x8414,
.mem_ack_reg = 0x8428,
+ .mem_enable_mask = BIT(6),
.mem_enable_ack_mask = BIT(6),
.branch = {
.halt_reg = 0xd160,
diff --git a/drivers/clk/qcom/gcc-glymur.c b/drivers/clk/qcom/gcc-glymur.c
new file mode 100644
index 000000000000..deab819576d0
--- /dev/null
+++ b/drivers/clk/qcom/gcc-glymur.c
@@ -0,0 +1,8615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-gcc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap-phy-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+ DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_PCIE_3A_PIPE_CLK,
+ DT_PCIE_3B_PIPE_CLK,
+ DT_PCIE_4_PIPE_CLK,
+ DT_PCIE_5_PIPE_CLK,
+ DT_PCIE_6_PIPE_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ DT_UFS_PHY_RX_SYMBOL_0_CLK,
+ DT_UFS_PHY_RX_SYMBOL_1_CLK,
+ DT_UFS_PHY_TX_SYMBOL_0_CLK,
+ DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL14_OUT_EVEN,
+ P_GCC_GPLL14_OUT_MAIN,
+ P_GCC_GPLL1_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL5_OUT_MAIN,
+ P_GCC_GPLL7_OUT_MAIN,
+ P_GCC_GPLL8_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC,
+ P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC,
+ P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_PCIE_3A_PIPE_CLK,
+ P_PCIE_3B_PIPE_CLK,
+ P_PCIE_4_PIPE_CLK,
+ P_PCIE_5_PIPE_CLK,
+ P_PCIE_6_PIPE_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK,
+ P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK,
+ P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll0_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_eko_t_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll1",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll14 = {
+ .offset = 0xe000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll14",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll14_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll14_out_even = {
+ .offset = 0xe000,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll14_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll14_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll14_out_even",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll14.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_taycan_eko_t_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll5",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll7",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll8 = {
+ .offset = 0x8000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll8",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_EKO_T],
+ .clkr = {
+ .enable_reg = 0x62040,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_taycan_eko_t_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src;
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src;
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src;
+
+static struct clk_rcg2 gcc_usb4_1_phy_pll_pipe_clk_src;
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .index = DT_BI_TCXO },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL1_OUT_MAIN, 4 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll1.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL14_OUT_MAIN, 1 },
+ { P_GCC_GPLL14_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll14.clkr.hw },
+ { .hw = &gcc_gpll14_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL8_OUT_MAIN, 2 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_GCC_GPLL8_OUT_MAIN, 3 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .hw = &gcc_gpll8.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_17[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_18[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_19[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_UFS_PHY_RX_SYMBOL_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_20[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_20[] = {
+ { .index = DT_UFS_PHY_TX_SYMBOL_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_21[] = {
+ { P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_21[] = {
+ { .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_22[] = {
+ { P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC, 2 },
+ { P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_22[] = {
+ { .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .hw = &gcc_usb4_1_phy_pll_pipe_clk_src.clkr.hw },
+ { .index = DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_23[] = {
+ { P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_23[] = {
+ { .hw = &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static const struct parent_map gcc_parent_map_24[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_24[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_25[] = {
+ { P_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_25[] = {
+ { .index = DT_USB3_UNI_PHY_MP_GCC_USB30_PIPE_1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_26[] = {
+ { P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_26[] = {
+ { .index = DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_27[] = {
+ { P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_27[] = {
+ { .index = DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_28[] = {
+ { P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_28[] = {
+ { .index = DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_29[] = {
+ { P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_29[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_30[] = {
+ { P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_30[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_31[] = {
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_31[] = {
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_32[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL7_OUT_MAIN, 2 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_32[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll7.clkr.hw },
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct parent_map gcc_parent_map_33[] = {
+ { P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_33[] = {
+ { .index = DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_34[] = {
+ { P_QUSB4PHY_0_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_34[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_35[] = {
+ { P_QUSB4PHY_0_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_35[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_36[] = {
+ { P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_36[] = {
+ { .index = DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_37[] = {
+ { P_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_37[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP0_GMUX_2_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_38[] = {
+ { P_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_38[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP1_GMUX_2_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_39[] = {
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_39[] = {
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_40[] = {
+ { P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_40[] = {
+ { .index = DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_41[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL5_OUT_MAIN, 3 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_41[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll5.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_42[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_42[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_43[] = {
+ { P_QUSB4PHY_1_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_43[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_44[] = {
+ { P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_44[] = {
+ { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_45[] = {
+ { P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_45[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_46[] = {
+ { P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_46[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_47[] = {
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_47[] = {
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_48[] = {
+ { P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_48[] = {
+ { .index = DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct parent_map gcc_parent_map_49[] = {
+ { P_QUSB4PHY_2_GCC_USB4_RX0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_49[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_50[] = {
+ { P_QUSB4PHY_2_GCC_USB4_RX1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_50[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct parent_map gcc_parent_map_51[] = {
+ { P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_51[] = {
+ { .index = DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3a_pipe_clk_src = {
+ .reg = 0xdc088,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3A_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_3b_pipe_clk_src = {
+ .reg = 0x941b4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_3B_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_4_pipe_clk_src = {
+ .reg = 0x881a4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_4_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_5_pipe_clk_src = {
+ .reg = 0xc309c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_5_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_pcie_6_pipe_clk_src = {
+ .reg = 0x8a1a4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_PCIE_6_PIPE_CLK,
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x7706c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_18,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_18),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x770f0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_19,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_19),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x7705c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_20,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_20,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_20),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_prim_phy_pipe_clk_src = {
+ .reg = 0x2b0b8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_21,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_21,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_21),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_sec_phy_pipe_clk_src = {
+ .reg = 0x2d0c4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_22,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_22,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_22),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb34_tert_phy_pipe_clk_src = {
+ .reg = 0xe00bc,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_23,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_23,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_23),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_0_clk_src = {
+ .reg = 0x9a07c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_24,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk_src",
+ .parent_data = gcc_parent_data_24,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_24),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_mp_phy_pipe_1_clk_src = {
+ .reg = 0x9a084,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_25,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk_src",
+ .parent_data = gcc_parent_data_25,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_25),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x3f08c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_26,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_26,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_26),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
+ .reg = 0xe207c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_27,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_27,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_27),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = {
+ .reg = 0xe107c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_28,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_28,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_28),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_dp0_clk_src = {
+ .reg = 0x2b080,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_29,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_29,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_29),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_dp1_clk_src = {
+ .reg = 0x2b134,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_30,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_30,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_30),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2b0f0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_31,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_31,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_31),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2b120,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_33,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_33,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_33),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_rx0_clk_src = {
+ .reg = 0x2b0c0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_34,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_34,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_34),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_rx1_clk_src = {
+ .reg = 0x2b0d4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_35,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_35,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_35),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_0_phy_sys_clk_src = {
+ .reg = 0x2b100,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_36,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_36,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_36),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp0_clk_src = {
+ .reg = 0x2d08c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_37,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_37,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_37),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_dp1_clk_src = {
+ .reg = 0x2d154,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_38,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_38,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_38),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2d114,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_39,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_39,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_39),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2d140,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_40,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_40,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_40),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx0_clk_src = {
+ .reg = 0x2d0e4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_42,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_42,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_42),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_rx1_clk_src = {
+ .reg = 0x2d0f8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_43,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_43,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_43),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_1_phy_sys_clk_src = {
+ .reg = 0x2d124,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_44,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_44,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_44),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_dp0_clk_src = {
+ .reg = 0xe0084,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_45,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_45,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_45),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_dp1_clk_src = {
+ .reg = 0xe013c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_46,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_46,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_46),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0xe00f4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_47,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_47,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_47),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0xe0124,
+ .shift = 0,
+ .width = 1,
+ .parent_map = gcc_parent_map_48,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_48,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_48),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_rx0_clk_src = {
+ .reg = 0xe00c4,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_49,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_49,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_49),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_rx1_clk_src = {
+ .reg = 0xe00d8,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_50,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_50,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_50),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb4_2_phy_sys_clk_src = {
+ .reg = 0xe0104,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_51,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_51,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_51),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x64004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x92004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x93004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0xc8168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc803c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x2e168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x2e03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_aux_clk_src = {
+ .cmd_rcgr = 0xc0168,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_2_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc003c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_aux_clk_src = {
+ .cmd_rcgr = 0xdc08c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3a_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xdc070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_aux_clk_src = {
+ .cmd_rcgr = 0x941b8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_3b_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x94088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_aux_clk_src = {
+ .cmd_rcgr = 0x881a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_4_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x88078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_aux_clk_src = {
+ .cmd_rcgr = 0xc30a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_5_phy_rchng_clk_src = {
+ .cmd_rcgr = 0xc3084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6_aux_clk_src = {
+ .cmd_rcgr = 0x8a1a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_6_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x8a078,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_3a_aux_clk_src = {
+ .cmd_rcgr = 0x6c01c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3a_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_3b_aux_clk_src = {
+ .cmd_rcgr = 0x7501c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3b_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_4_aux_clk_src = {
+ .cmd_rcgr = 0xd3018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_4_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_5_aux_clk_src = {
+ .cmd_rcgr = 0xd2018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_5_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_phy_6_aux_clk_src = {
+ .cmd_rcgr = 0xd4018,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_6_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x33010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_oob_qspi_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_oob_qspi_s0_clk_src_init = {
+ .name = "gcc_qupv3_oob_qspi_s0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_oob_qspi_s0_clk_src = {
+ .cmd_rcgr = 0xe7044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_oob_qspi_s0_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_oob_qspi_s1_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_EVEN, 2, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_oob_qspi_s1_clk_src_init = {
+ .name = "gcc_qupv3_oob_qspi_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_oob_qspi_s1_clk_src = {
+ .cmd_rcgr = 0xe7170,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_oob_qspi_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s2_clk_src = {
+ .cmd_rcgr = 0x287a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s3_clk_src = {
+ .cmd_rcgr = 0x288d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_qspi_s6_clk_src = {
+ .cmd_rcgr = 0x2866c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_qspi_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x28014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x28150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s4_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x282b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x283f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x28540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s2_clk_src = {
+ .cmd_rcgr = 0xb37a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s3_clk_src = {
+ .cmd_rcgr = 0xb38d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_qspi_s6_clk_src = {
+ .cmd_rcgr = 0xb366c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_qspi_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0xb3014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0xb3150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0xb32b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0xb33f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = {
+ .cmd_rcgr = 0xb3540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s7_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s2_clk_src = {
+ .cmd_rcgr = 0xb47a0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s3_clk_src = {
+ .cmd_rcgr = 0xb48d0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_qspi_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_qspi_s6_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_qspi_s6_clk_src = {
+ .cmd_rcgr = 0xb466c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_oob_qspi_s1_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_qspi_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0xb4014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0xb4150,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0xb42b4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0xb43f0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s7_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
+ .cmd_rcgr = 0xb4540,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s4_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0xb001c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_17,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_17),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0xdf01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x77038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(201500000, P_GCC_GPLL4_OUT_MAIN, 4, 0, 0),
+ F(403000000, P_GCC_GPLL4_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x77090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x770c4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x770a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb20_master_clk_src[] = {
+ F(60000000, P_GCC_GPLL14_OUT_MAIN, 10, 0, 0),
+ F(120000000, P_GCC_GPLL14_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb20_master_clk_src = {
+ .cmd_rcgr = 0xbc030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_usb20_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb20_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xbc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mp_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mp_master_clk_src = {
+ .cmd_rcgr = 0x9a03c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mp_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x9a054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x3f04c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x3f064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_master_clk_src = {
+ .cmd_rcgr = 0xe203c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xe2054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_master_clk_src = {
+ .cmd_rcgr = 0xe103c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mp_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_tert_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xe1054,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_mp_phy_aux_clk_src = {
+ .cmd_rcgr = 0x9a088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x3f090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
+ .cmd_rcgr = 0xe2080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_tert_phy_aux_clk_src = {
+ .cmd_rcgr = 0xe1080,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_master_clk_src[] = {
+ F(177666750, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(355333500, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_master_clk_src = {
+ .cmd_rcgr = 0x2b02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(125000000, P_GCC_GPLL7_OUT_MAIN, 4, 0, 0),
+ F(250000000, P_GCC_GPLL7_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_0_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2b104,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_32,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_32,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_32),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_sb_if_clk_src = {
+ .cmd_rcgr = 0x2b0a0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_0_tmu_clk_src = {
+ .cmd_rcgr = 0x2b084,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_master_clk_src = {
+ .cmd_rcgr = 0x2d02c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(177666750, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
+ F(355333500, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0x2d128,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb4_1_phy_pll_pipe_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(311000000, P_GCC_GPLL5_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb4_1_phy_pll_pipe_clk_src = {
+ .cmd_rcgr = 0x2d0c8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_41,
+ .freq_tbl = ftbl_gcc_usb4_1_phy_pll_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pll_pipe_clk_src",
+ .parent_data = gcc_parent_data_41,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_41),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_sb_if_clk_src = {
+ .cmd_rcgr = 0x2d0ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_1_tmu_clk_src = {
+ .cmd_rcgr = 0x2d090,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_master_clk_src = {
+ .cmd_rcgr = 0xe002c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_usb4_0_master_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_phy_pcie_pipe_clk_src = {
+ .cmd_rcgr = 0xe0108,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_sb_if_clk_src = {
+ .cmd_rcgr = 0xe00a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb4_2_tmu_clk_src = {
+ .cmd_rcgr = 0xe0088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_usb4_0_phy_pcie_pipe_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_no_init_park_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_3b_pipe_div_clk_src = {
+ .reg = 0x94070,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_4_pipe_div_clk_src = {
+ .reg = 0x88060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_5_pipe_div_clk_src = {
+ .reg = 0xc306c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_pcie_6_pipe_div_clk_src = {
+ .reg = 0x8a060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_oob_s0_clk_src = {
+ .reg = 0xe7024,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_oob_s1_clk_src = {
+ .reg = 0xe7038,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s1_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s2_clk_src = {
+ .reg = 0x2828c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s3_clk_src = {
+ .reg = 0x282a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap0_s6_clk_src = {
+ .reg = 0x2852c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s2_clk_src = {
+ .reg = 0xb328c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s3_clk_src = {
+ .reg = 0xb32a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap1_s6_clk_src = {
+ .reg = 0xb352c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s2_clk_src = {
+ .reg = 0xb428c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s3_clk_src = {
+ .reg = 0xb42a0,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_qupv3_wrap2_s6_clk_src = {
+ .reg = 0xb452c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb20_mock_utmi_postdiv_clk_src = {
+ .reg = 0xbc174,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mp_mock_utmi_postdiv_clk_src = {
+ .reg = 0x9a06c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x3f07c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = {
+ .reg = 0xe206c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_tert_mock_utmi_postdiv_clk_src = {
+ .reg = 0xe106c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_postdiv_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_3a_west_sf_axi_clk = {
+ .halt_reg = 0xdc0bc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_3a_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_3b_west_sf_axi_clk = {
+ .halt_reg = 0x941ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_3b_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_4_west_sf_axi_clk = {
+ .halt_reg = 0x881d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_4_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_5_east_sf_axi_clk = {
+ .halt_reg = 0xc30d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_5_east_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_6_west_sf_axi_clk = {
+ .halt_reg = 0x8a1d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_noc_pcie_6_west_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x77000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb2_prim_axi_clk = {
+ .halt_reg = 0xbc17c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbc17c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbc17c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_mp_axi_clk = {
+ .halt_reg = 0x9a004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9a004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x3f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f00c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_sec_axi_clk = {
+ .halt_reg = 0xe2004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_tert_axi_clk = {
+ .halt_reg = 0xe1004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_0_axi_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_0_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_1_axi_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_1_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb4_2_axi_clk = {
+ .halt_reg = 0xe0000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe0000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe0000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_aggre_usb4_2_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_ahb_clk = {
+ .halt_reg = 0x9b02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_axi_clk = {
+ .halt_reg = 0x9b030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_av1e_xo_clk = {
+ .halt_reg = 0x9b044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_av1e_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x34038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x34038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x26014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x26028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x26028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x82004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x82004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_south_ahb_clk = {
+ .halt_reg = 0xba2ec,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2ec,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_pcie_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb2_prim_axi_clk = {
+ .halt_reg = 0xbc178,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xbc178,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xbc178,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb2_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_mp_axi_clk = {
+ .halt_reg = 0x9a000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9a000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9a000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_mp_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x3f000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
+ .halt_reg = 0xe2000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe2000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_sec_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_tert_axi_clk = {
+ .halt_reg = 0xe1000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe1000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb3_tert_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_ahb_clk = {
+ .halt_reg = 0x3f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb_anoc_south_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_cfg_noc_usb_anoc_south_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x27008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ .flags = CLK_IS_CRITICAL,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_ahb_clk = {
+ .halt_reg = 0x9b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0_clk = {
+ .halt_reg = 0x9b008,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9b008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_axi0c_clk = {
+ .halt_reg = 0x9b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eva_xo_clk = {
+ .halt_reg = 0x9b024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9b024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_eva_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x64000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x64000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x92000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x92000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x93000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x93000,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gemnoc_gfx_clk = {
+ .halt_reg = 0x71010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gemnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_reg = 0x71024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_reg = 0x7102c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7102c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0xc8018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0xba4a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0xba498,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba498,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0xc8038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0xc8028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0xba488,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba488,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0xba484,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x2e018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0xba480,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba480,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0xba470,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba470,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x2e038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x2e028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0xba460,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba460,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0xba45c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_aux_clk = {
+ .halt_reg = 0xc0018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_cfg_ahb_clk = {
+ .halt_reg = 0xba4d0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4d0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_mstr_axi_clk = {
+ .halt_reg = 0xba4c0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xba4c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_phy_rchng_clk = {
+ .halt_reg = 0xc0038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_2_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_pipe_clk = {
+ .halt_reg = 0xc0028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_axi_clk = {
+ .halt_reg = 0xba4b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = {
+ .halt_reg = 0xba4ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_2_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_aux_clk = {
+ .halt_reg = 0xdc04c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc04c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_cfg_ahb_clk = {
+ .halt_reg = 0xba4f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_mstr_axi_clk = {
+ .halt_reg = 0xdc038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xdc038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_phy_rchng_clk = {
+ .halt_reg = 0xdc06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_pipe_clk = {
+ .halt_reg = 0xdc05c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xdc05c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3a_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_axi_clk = {
+ .halt_reg = 0xdc024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3a_slv_q2a_axi_clk = {
+ .halt_reg = 0xdc01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3a_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_aux_clk = {
+ .halt_reg = 0x94050,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_cfg_ahb_clk = {
+ .halt_reg = 0xba4f4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_mstr_axi_clk = {
+ .halt_reg = 0x94038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x94038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_phy_rchng_clk = {
+ .halt_reg = 0x94084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_clk = {
+ .halt_reg = 0x94060,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_pipe_div2_clk = {
+ .halt_reg = 0x94074,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_3b_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_axi_clk = {
+ .halt_reg = 0x94024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x94024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_3b_slv_q2a_axi_clk = {
+ .halt_reg = 0x9401c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_3b_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_aux_clk = {
+ .halt_reg = 0x88040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_cfg_ahb_clk = {
+ .halt_reg = 0xba4fc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4fc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_mstr_axi_clk = {
+ .halt_reg = 0x88030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x88030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_phy_rchng_clk = {
+ .halt_reg = 0x88074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_clk = {
+ .halt_reg = 0x88050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_pipe_div2_clk = {
+ .halt_reg = 0x88064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_4_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_axi_clk = {
+ .halt_reg = 0x88020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x88020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_4_slv_q2a_axi_clk = {
+ .halt_reg = 0x8801c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_4_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_aux_clk = {
+ .halt_reg = 0xc304c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_cfg_ahb_clk = {
+ .halt_reg = 0xba4f8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba4f8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_mstr_axi_clk = {
+ .halt_reg = 0xc3038,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xc3038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_phy_rchng_clk = {
+ .halt_reg = 0xc3080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_clk = {
+ .halt_reg = 0xc305c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_pipe_div2_clk = {
+ .halt_reg = 0xc3070,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_5_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_axi_clk = {
+ .halt_reg = 0xc3024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc3024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_5_slv_q2a_axi_clk = {
+ .halt_reg = 0xc301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_5_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_aux_clk = {
+ .halt_reg = 0x8a040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_cfg_ahb_clk = {
+ .halt_reg = 0xba500,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba500,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_mstr_axi_clk = {
+ .halt_reg = 0x8a030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x8a030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_phy_rchng_clk = {
+ .halt_reg = 0x8a074,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_phy_rchng_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_pipe_clk = {
+ .halt_reg = 0x8a050,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_pipe_div2_clk = {
+ .halt_reg = 0x8a064,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_pipe_div2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_6_pipe_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_slv_axi_clk = {
+ .halt_reg = 0x8a020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8a020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_6_slv_q2a_axi_clk = {
+ .halt_reg = 0x8a01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_6_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_pwrctl_clk = {
+ .halt_reg = 0xba2ac,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_pwrctl_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_qosgen_extref_clk = {
+ .halt_reg = 0xba2a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_qosgen_extref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_sf_center_clk = {
+ .halt_reg = 0xba2b0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2b0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_sf_center_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_slave_sf_east_clk = {
+ .halt_reg = 0xba2b8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2b8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_slave_sf_east_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_slave_sf_west_clk = {
+ .halt_reg = 0xba2c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_slave_sf_west_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_noc_tsctr_clk = {
+ .halt_reg = 0xba2a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba2a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_noc_tsctr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_3a_aux_clk = {
+ .halt_reg = 0x6c038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x6c038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3a_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_3a_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_3b_aux_clk = {
+ .halt_reg = 0x75034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_3b_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_3b_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_4_aux_clk = {
+ .halt_reg = 0xd3030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_4_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_4_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_5_aux_clk = {
+ .halt_reg = 0xd2030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_5_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_5_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_phy_6_aux_clk = {
+ .halt_reg = 0xd4030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_phy_6_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pcie_phy_6_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_cfg_ahb_clk = {
+ .halt_reg = 0xb8004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xb8004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rscc_xo_clk = {
+ .halt_reg = 0xb8008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62038,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pcie_rscc_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x33004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x33004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x33004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x33008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x33008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_av1e_ahb_clk = {
+ .halt_reg = 0x9b048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9b048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x9b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_av1e_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_cmd_ahb_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_cmd_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x26008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x71008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x71008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_3a_ahb_clk = {
+ .halt_reg = 0xdc018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xdc018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_3a_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_3b_ahb_clk = {
+ .halt_reg = 0x94018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x94018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62028,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_3b_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_4_ahb_clk = {
+ .halt_reg = 0x88018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x88018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_5_ahb_clk = {
+ .halt_reg = 0xc3018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc3018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_5_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_6_ahb_clk = {
+ .halt_reg = 0x8a018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8a018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62030,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_pcie_6_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x32018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x32008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32008,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x32014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec1_ahb_clk = {
+ .halt_reg = 0x32010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x32010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32010,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x3200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_core_2x_clk = {
+ .halt_reg = 0xc5040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_core_clk = {
+ .halt_reg = 0xc502c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_m_ahb_clk = {
+ .halt_reg = 0xe7004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe7004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe7004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_qspi_s0_clk = {
+ .halt_reg = 0xe7040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_qspi_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_qspi_s1_clk = {
+ .halt_reg = 0xe729c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_qspi_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_qspi_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s0_clk = {
+ .halt_reg = 0xe7014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s1_clk = {
+ .halt_reg = 0xe7028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_oob_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_s_ahb_clk = {
+ .halt_reg = 0xc5028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5028,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_oob_tcxo_clk = {
+ .halt_reg = 0xe703c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_oob_tcxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0xc5448,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0xc5434,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s2_clk = {
+ .halt_reg = 0x2879c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s3_clk = {
+ .halt_reg = 0x288cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_qspi_s6_clk = {
+ .halt_reg = 0x28798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x28140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x2827c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x28290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x282a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x283e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x2851c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x28530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0xc5198,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(14),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0xc5184,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s2_clk = {
+ .halt_reg = 0xb379c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(24),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s3_clk = {
+ .halt_reg = 0xb38cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(25),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_qspi_s6_clk = {
+ .halt_reg = 0xb3798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(23),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0xb3004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(15),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0xb3140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(16),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0xb327c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(17),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0xb3290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(18),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0xb32a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(19),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0xb33e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(20),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0xb351c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(21),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s7_clk = {
+ .halt_reg = 0xb3530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(22),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap1_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap1_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0xc52f0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(29),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0xc52dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(28),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s2_clk = {
+ .halt_reg = 0xb479c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(7),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s3_clk = {
+ .halt_reg = 0xb48cc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(8),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_qspi_s6_clk = {
+ .halt_reg = 0xb4798,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(6),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_qspi_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0xb4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(30),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0xb4140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(31),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0xb427c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0xb4290,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(1),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0xb42a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(2),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0xb43e0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(3),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0xb451c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(4),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s7_clk = {
+ .halt_reg = 0xb4530,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(5),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap2_s7_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_qupv3_wrap2_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0xc542c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc542c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(9),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0xc5430,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5430,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62020,
+ .enable_mask = BIT(10),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0xc517c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc517c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0xc5180,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc5180,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0xc52d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc52d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(26),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0xc52d8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xc52d8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(27),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0xb0014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0xb0004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0xdf014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xdf014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0xdf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xdf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0xba504,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba504,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba504,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x7701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x77080,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x770c0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x770c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x770c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x77034,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x770dc,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x770dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x77030,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x77030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x77070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x77070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x77070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_master_clk = {
+ .halt_reg = 0xbc018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc018,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_mock_utmi_clk = {
+ .halt_reg = 0xbc02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb20_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb20_sleep_clk = {
+ .halt_reg = 0xbc028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xbc028,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb20_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_master_clk = {
+ .halt_reg = 0x9a024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_mock_utmi_clk = {
+ .halt_reg = 0x9a038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mp_sleep_clk = {
+ .halt_reg = 0x9a034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_mp_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x3f030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x3f048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x3f044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+ .halt_reg = 0xe2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+ .halt_reg = 0xe2038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+ .halt_reg = 0xe2034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_sec_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_master_clk = {
+ .halt_reg = 0xe1024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1024,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_mock_utmi_clk = {
+ .halt_reg = 0xe1038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1038,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_tert_sleep_clk = {
+ .halt_reg = 0xe1034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1034,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb30_tert_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_aux_clk = {
+ .halt_reg = 0x9a070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_com_aux_clk = {
+ .halt_reg = 0x9a074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9a074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_0_clk = {
+ .halt_reg = 0x9a078,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x9a078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_pipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_mp_phy_pipe_1_clk = {
+ .halt_reg = 0x9a080,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x9a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_mp_phy_pipe_1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_mp_phy_pipe_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x3f080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f080,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x3f084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3f084,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x3f088,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x3f088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3f088,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_aux_clk = {
+ .halt_reg = 0xe2070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = {
+ .halt_reg = 0xe2074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe2074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_sec_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
+ .halt_reg = 0xe2078,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe2078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe2078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_sec_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_aux_clk = {
+ .halt_reg = 0xe1070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_com_aux_clk = {
+ .halt_reg = 0xe1074,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe1074,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb3_tert_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
+ .halt_reg = 0xe1078,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe1078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb3_tert_phy_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_cfg_ahb_clk = {
+ .halt_reg = 0xba450,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba450,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba450,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp0_clk = {
+ .halt_reg = 0x2b070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_dp1_clk = {
+ .halt_reg = 0x2b124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b124,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_master_clk = {
+ .halt_reg = 0x2b01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2b0f4,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x2b0f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2b04c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(11),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
+ .halt_reg = 0x2b0c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_rx1_clk = {
+ .halt_reg = 0x2b0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = {
+ .halt_reg = 0x2b0bc,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2b0bc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b0bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sb_if_clk = {
+ .halt_reg = 0x2b048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_sys_clk = {
+ .halt_reg = 0x2b05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_tmu_clk = {
+ .halt_reg = 0x2b09c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b09c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2b09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_0_uc_hrr_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_cfg_ahb_clk = {
+ .halt_reg = 0xba454,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba454,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba454,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp0_clk = {
+ .halt_reg = 0x2d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_dp1_clk = {
+ .halt_reg = 0x2d144,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d144,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_master_clk = {
+ .halt_reg = 0x2d01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0x2d118,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x2d118,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
+ .halt_reg = 0x2d04c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(12),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
+ .halt_reg = 0x2d0e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d0e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_rx1_clk = {
+ .halt_reg = 0x2d0fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d0fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = {
+ .halt_reg = 0x2d0e0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x2d0e0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d0e0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sb_if_clk = {
+ .halt_reg = 0x2d048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_sys_clk = {
+ .halt_reg = 0x2d05c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_tmu_clk = {
+ .halt_reg = 0x2d0a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2d0a8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2d0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_1_uc_hrr_clk = {
+ .halt_reg = 0x2d06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_cfg_ahb_clk = {
+ .halt_reg = 0xba458,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xba458,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xba458,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp0_clk = {
+ .halt_reg = 0xe0070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0070,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_dp1_clk = {
+ .halt_reg = 0xe0128,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0128,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_master_clk = {
+ .halt_reg = 0xe001c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe001c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_master_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = {
+ .halt_reg = 0xe00f8,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0xe00f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
+ .halt_reg = 0xe004c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(13),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
+ .halt_reg = 0xe00c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_rx1_clk = {
+ .halt_reg = 0xe00dc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe00dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = {
+ .halt_reg = 0xe00c0,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0xe00c0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe00c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sb_if_clk = {
+ .halt_reg = 0xe0048,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sb_if_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_sb_if_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_sys_clk = {
+ .halt_reg = 0xe005c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe005c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_tmu_clk = {
+ .halt_reg = 0xe00a0,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0xe00a0,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe00a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_tmu_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_tmu_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb4_2_uc_hrr_clk = {
+ .halt_reg = 0xe006c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe006c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_uc_hrr_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x3201c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x3201c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0c_clk = {
+ .halt_reg = 0x32030,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32030,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi0c_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x32044,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x32044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x32044,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_pcie_0_tunnel_gdsc = {
+ .gdscr = 0xc8004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_0_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_1_tunnel_gdsc = {
+ .gdscr = 0x2e004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_1_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_2_tunnel_gdsc = {
+ .gdscr = 0xc0004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_2_tunnel_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3a_gdsc = {
+ .gdscr = 0xdc004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3a_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3a_phy_gdsc = {
+ .gdscr = 0x6c004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3a_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3b_gdsc = {
+ .gdscr = 0x94004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_3b_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_3b_phy_gdsc = {
+ .gdscr = 0x75004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_3b_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_gdsc = {
+ .gdscr = 0x88004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_4_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_4_phy_gdsc = {
+ .gdscr = 0xd3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_4_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_gdsc = {
+ .gdscr = 0xc3004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_5_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_5_phy_gdsc = {
+ .gdscr = 0xd2004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_5_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_gdsc = {
+ .gdscr = 0x8a004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_pcie_6_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_pcie_6_phy_gdsc = {
+ .gdscr = 0xd4004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_pcie_6_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | VOTABLE,
+};
+
+static struct gdsc gcc_ufs_phy_gdsc = {
+ .gdscr = 0x77008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb20_prim_gdsc = {
+ .gdscr = 0xbc004,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb20_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_mp_gdsc = {
+ .gdscr = 0x9a010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_mp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x3f01c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_sec_gdsc = {
+ .gdscr = 0xe2010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_sec_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb30_tert_gdsc = {
+ .gdscr = 0xe1010,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb30_tert_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss0_phy_gdsc = {
+ .gdscr = 0x5400c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb3_mp_ss1_phy_gdsc = {
+ .gdscr = 0x5402c,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb3_mp_ss1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_0_gdsc = {
+ .gdscr = 0x2b008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_1_gdsc = {
+ .gdscr = 0x2d008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb4_2_gdsc = {
+ .gdscr = 0xe0008,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0xf,
+ .pd = {
+ .name = "gcc_usb4_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_0_phy_gdsc = {
+ .gdscr = 0xdb024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_0_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_1_phy_gdsc = {
+ .gdscr = 0x2c024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_1_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc gcc_usb_2_phy_gdsc = {
+ .gdscr = 0xbe024,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x2,
+ .pd = {
+ .name = "gcc_usb_2_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *gcc_glymur_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_3A_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_3a_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_3B_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_3b_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_4_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_4_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_5_EAST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_5_east_sf_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_6_WEST_SF_AXI_CLK] = &gcc_aggre_noc_pcie_6_west_sf_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_USB2_PRIM_AXI_CLK] = &gcc_aggre_usb2_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_MP_AXI_CLK] = &gcc_aggre_usb3_mp_axi_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr,
+ [GCC_AGGRE_USB3_TERT_AXI_CLK] = &gcc_aggre_usb3_tert_axi_clk.clkr,
+ [GCC_AGGRE_USB4_0_AXI_CLK] = &gcc_aggre_usb4_0_axi_clk.clkr,
+ [GCC_AGGRE_USB4_1_AXI_CLK] = &gcc_aggre_usb4_1_axi_clk.clkr,
+ [GCC_AGGRE_USB4_2_AXI_CLK] = &gcc_aggre_usb4_2_axi_clk.clkr,
+ [GCC_AV1E_AHB_CLK] = &gcc_av1e_ahb_clk.clkr,
+ [GCC_AV1E_AXI_CLK] = &gcc_av1e_axi_clk.clkr,
+ [GCC_AV1E_XO_CLK] = &gcc_av1e_xo_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_south_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB2_PRIM_AXI_CLK] = &gcc_cfg_noc_usb2_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_MP_AXI_CLK] = &gcc_cfg_noc_usb3_mp_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
+ [GCC_CFG_NOC_USB3_TERT_AXI_CLK] = &gcc_cfg_noc_usb3_tert_axi_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_AHB_CLK] = &gcc_cfg_noc_usb_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB_ANOC_SOUTH_AHB_CLK] = &gcc_cfg_noc_usb_anoc_south_ahb_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_EVA_AHB_CLK] = &gcc_eva_ahb_clk.clkr,
+ [GCC_EVA_AXI0_CLK] = &gcc_eva_axi0_clk.clkr,
+ [GCC_EVA_AXI0C_CLK] = &gcc_eva_axi0c_clk.clkr,
+ [GCC_EVA_XO_CLK] = &gcc_eva_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL1] = &gcc_gpll1.clkr,
+ [GCC_GPLL14] = &gcc_gpll14.clkr,
+ [GCC_GPLL14_OUT_EVEN] = &gcc_gpll14_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL5] = &gcc_gpll5.clkr,
+ [GCC_GPLL7] = &gcc_gpll7.clkr,
+ [GCC_GPLL8] = &gcc_gpll8.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GEMNOC_GFX_CLK] = &gcc_gpu_gemnoc_gfx_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr,
+ [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr,
+ [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr,
+ [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK] = &gcc_pcie_2_phy_rchng_clk.clkr,
+ [GCC_PCIE_2_PHY_RCHNG_CLK_SRC] = &gcc_pcie_2_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr,
+ [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr,
+ [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK] = &gcc_pcie_3a_aux_clk.clkr,
+ [GCC_PCIE_3A_AUX_CLK_SRC] = &gcc_pcie_3a_aux_clk_src.clkr,
+ [GCC_PCIE_3A_CFG_AHB_CLK] = &gcc_pcie_3a_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3A_MSTR_AXI_CLK] = &gcc_pcie_3a_mstr_axi_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK] = &gcc_pcie_3a_phy_rchng_clk.clkr,
+ [GCC_PCIE_3A_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3a_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3A_PIPE_CLK] = &gcc_pcie_3a_pipe_clk.clkr,
+ [GCC_PCIE_3A_PIPE_CLK_SRC] = &gcc_pcie_3a_pipe_clk_src.clkr,
+ [GCC_PCIE_3A_SLV_AXI_CLK] = &gcc_pcie_3a_slv_axi_clk.clkr,
+ [GCC_PCIE_3A_SLV_Q2A_AXI_CLK] = &gcc_pcie_3a_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK] = &gcc_pcie_3b_aux_clk.clkr,
+ [GCC_PCIE_3B_AUX_CLK_SRC] = &gcc_pcie_3b_aux_clk_src.clkr,
+ [GCC_PCIE_3B_CFG_AHB_CLK] = &gcc_pcie_3b_cfg_ahb_clk.clkr,
+ [GCC_PCIE_3B_MSTR_AXI_CLK] = &gcc_pcie_3b_mstr_axi_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK] = &gcc_pcie_3b_phy_rchng_clk.clkr,
+ [GCC_PCIE_3B_PHY_RCHNG_CLK_SRC] = &gcc_pcie_3b_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_CLK] = &gcc_pcie_3b_pipe_clk.clkr,
+ [GCC_PCIE_3B_PIPE_CLK_SRC] = &gcc_pcie_3b_pipe_clk_src.clkr,
+ [GCC_PCIE_3B_PIPE_DIV2_CLK] = &gcc_pcie_3b_pipe_div2_clk.clkr,
+ [GCC_PCIE_3B_PIPE_DIV_CLK_SRC] = &gcc_pcie_3b_pipe_div_clk_src.clkr,
+ [GCC_PCIE_3B_SLV_AXI_CLK] = &gcc_pcie_3b_slv_axi_clk.clkr,
+ [GCC_PCIE_3B_SLV_Q2A_AXI_CLK] = &gcc_pcie_3b_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK] = &gcc_pcie_4_aux_clk.clkr,
+ [GCC_PCIE_4_AUX_CLK_SRC] = &gcc_pcie_4_aux_clk_src.clkr,
+ [GCC_PCIE_4_CFG_AHB_CLK] = &gcc_pcie_4_cfg_ahb_clk.clkr,
+ [GCC_PCIE_4_MSTR_AXI_CLK] = &gcc_pcie_4_mstr_axi_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK] = &gcc_pcie_4_phy_rchng_clk.clkr,
+ [GCC_PCIE_4_PHY_RCHNG_CLK_SRC] = &gcc_pcie_4_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_CLK] = &gcc_pcie_4_pipe_clk.clkr,
+ [GCC_PCIE_4_PIPE_CLK_SRC] = &gcc_pcie_4_pipe_clk_src.clkr,
+ [GCC_PCIE_4_PIPE_DIV2_CLK] = &gcc_pcie_4_pipe_div2_clk.clkr,
+ [GCC_PCIE_4_PIPE_DIV_CLK_SRC] = &gcc_pcie_4_pipe_div_clk_src.clkr,
+ [GCC_PCIE_4_SLV_AXI_CLK] = &gcc_pcie_4_slv_axi_clk.clkr,
+ [GCC_PCIE_4_SLV_Q2A_AXI_CLK] = &gcc_pcie_4_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK] = &gcc_pcie_5_aux_clk.clkr,
+ [GCC_PCIE_5_AUX_CLK_SRC] = &gcc_pcie_5_aux_clk_src.clkr,
+ [GCC_PCIE_5_CFG_AHB_CLK] = &gcc_pcie_5_cfg_ahb_clk.clkr,
+ [GCC_PCIE_5_MSTR_AXI_CLK] = &gcc_pcie_5_mstr_axi_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK] = &gcc_pcie_5_phy_rchng_clk.clkr,
+ [GCC_PCIE_5_PHY_RCHNG_CLK_SRC] = &gcc_pcie_5_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_CLK] = &gcc_pcie_5_pipe_clk.clkr,
+ [GCC_PCIE_5_PIPE_CLK_SRC] = &gcc_pcie_5_pipe_clk_src.clkr,
+ [GCC_PCIE_5_PIPE_DIV2_CLK] = &gcc_pcie_5_pipe_div2_clk.clkr,
+ [GCC_PCIE_5_PIPE_DIV_CLK_SRC] = &gcc_pcie_5_pipe_div_clk_src.clkr,
+ [GCC_PCIE_5_SLV_AXI_CLK] = &gcc_pcie_5_slv_axi_clk.clkr,
+ [GCC_PCIE_5_SLV_Q2A_AXI_CLK] = &gcc_pcie_5_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_6_AUX_CLK] = &gcc_pcie_6_aux_clk.clkr,
+ [GCC_PCIE_6_AUX_CLK_SRC] = &gcc_pcie_6_aux_clk_src.clkr,
+ [GCC_PCIE_6_CFG_AHB_CLK] = &gcc_pcie_6_cfg_ahb_clk.clkr,
+ [GCC_PCIE_6_MSTR_AXI_CLK] = &gcc_pcie_6_mstr_axi_clk.clkr,
+ [GCC_PCIE_6_PHY_RCHNG_CLK] = &gcc_pcie_6_phy_rchng_clk.clkr,
+ [GCC_PCIE_6_PHY_RCHNG_CLK_SRC] = &gcc_pcie_6_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_6_PIPE_CLK] = &gcc_pcie_6_pipe_clk.clkr,
+ [GCC_PCIE_6_PIPE_CLK_SRC] = &gcc_pcie_6_pipe_clk_src.clkr,
+ [GCC_PCIE_6_PIPE_DIV2_CLK] = &gcc_pcie_6_pipe_div2_clk.clkr,
+ [GCC_PCIE_6_PIPE_DIV_CLK_SRC] = &gcc_pcie_6_pipe_div_clk_src.clkr,
+ [GCC_PCIE_6_SLV_AXI_CLK] = &gcc_pcie_6_slv_axi_clk.clkr,
+ [GCC_PCIE_6_SLV_Q2A_AXI_CLK] = &gcc_pcie_6_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_NOC_PWRCTL_CLK] = &gcc_pcie_noc_pwrctl_clk.clkr,
+ [GCC_PCIE_NOC_QOSGEN_EXTREF_CLK] = &gcc_pcie_noc_qosgen_extref_clk.clkr,
+ [GCC_PCIE_NOC_SF_CENTER_CLK] = &gcc_pcie_noc_sf_center_clk.clkr,
+ [GCC_PCIE_NOC_SLAVE_SF_EAST_CLK] = &gcc_pcie_noc_slave_sf_east_clk.clkr,
+ [GCC_PCIE_NOC_SLAVE_SF_WEST_CLK] = &gcc_pcie_noc_slave_sf_west_clk.clkr,
+ [GCC_PCIE_NOC_TSCTR_CLK] = &gcc_pcie_noc_tsctr_clk.clkr,
+ [GCC_PCIE_PHY_3A_AUX_CLK] = &gcc_pcie_phy_3a_aux_clk.clkr,
+ [GCC_PCIE_PHY_3A_AUX_CLK_SRC] = &gcc_pcie_phy_3a_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_3B_AUX_CLK] = &gcc_pcie_phy_3b_aux_clk.clkr,
+ [GCC_PCIE_PHY_3B_AUX_CLK_SRC] = &gcc_pcie_phy_3b_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_4_AUX_CLK] = &gcc_pcie_phy_4_aux_clk.clkr,
+ [GCC_PCIE_PHY_4_AUX_CLK_SRC] = &gcc_pcie_phy_4_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_5_AUX_CLK] = &gcc_pcie_phy_5_aux_clk.clkr,
+ [GCC_PCIE_PHY_5_AUX_CLK_SRC] = &gcc_pcie_phy_5_aux_clk_src.clkr,
+ [GCC_PCIE_PHY_6_AUX_CLK] = &gcc_pcie_phy_6_aux_clk.clkr,
+ [GCC_PCIE_PHY_6_AUX_CLK_SRC] = &gcc_pcie_phy_6_aux_clk_src.clkr,
+ [GCC_PCIE_RSCC_CFG_AHB_CLK] = &gcc_pcie_rscc_cfg_ahb_clk.clkr,
+ [GCC_PCIE_RSCC_XO_CLK] = &gcc_pcie_rscc_xo_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_AV1E_AHB_CLK] = &gcc_qmip_av1e_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_CMD_AHB_CLK] = &gcc_qmip_camera_cmd_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_3A_AHB_CLK] = &gcc_qmip_pcie_3a_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_3B_AHB_CLK] = &gcc_qmip_pcie_3b_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_4_AHB_CLK] = &gcc_qmip_pcie_4_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_5_AHB_CLK] = &gcc_qmip_pcie_5_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_6_AHB_CLK] = &gcc_qmip_pcie_6_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC1_AHB_CLK] = &gcc_qmip_video_vcodec1_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_CORE_2X_CLK] = &gcc_qupv3_oob_core_2x_clk.clkr,
+ [GCC_QUPV3_OOB_CORE_CLK] = &gcc_qupv3_oob_core_clk.clkr,
+ [GCC_QUPV3_OOB_M_AHB_CLK] = &gcc_qupv3_oob_m_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S0_CLK] = &gcc_qupv3_oob_qspi_s0_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S0_CLK_SRC] = &gcc_qupv3_oob_qspi_s0_clk_src.clkr,
+ [GCC_QUPV3_OOB_QSPI_S1_CLK] = &gcc_qupv3_oob_qspi_s1_clk.clkr,
+ [GCC_QUPV3_OOB_QSPI_S1_CLK_SRC] = &gcc_qupv3_oob_qspi_s1_clk_src.clkr,
+ [GCC_QUPV3_OOB_S0_CLK] = &gcc_qupv3_oob_s0_clk.clkr,
+ [GCC_QUPV3_OOB_S0_CLK_SRC] = &gcc_qupv3_oob_s0_clk_src.clkr,
+ [GCC_QUPV3_OOB_S1_CLK] = &gcc_qupv3_oob_s1_clk.clkr,
+ [GCC_QUPV3_OOB_S1_CLK_SRC] = &gcc_qupv3_oob_s1_clk_src.clkr,
+ [GCC_QUPV3_OOB_S_AHB_CLK] = &gcc_qupv3_oob_s_ahb_clk.clkr,
+ [GCC_QUPV3_OOB_TCXO_CLK] = &gcc_qupv3_oob_tcxo_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK] = &gcc_qupv3_wrap0_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK] = &gcc_qupv3_wrap0_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S6_CLK] = &gcc_qupv3_wrap0_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap0_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK] = &gcc_qupv3_wrap1_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK] = &gcc_qupv3_wrap1_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S6_CLK] = &gcc_qupv3_wrap1_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap1_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK] = &gcc_qupv3_wrap1_s7_clk.clkr,
+ [GCC_QUPV3_WRAP1_S7_CLK_SRC] = &gcc_qupv3_wrap1_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK] = &gcc_qupv3_wrap2_qspi_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S2_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK] = &gcc_qupv3_wrap2_qspi_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S3_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S6_CLK] = &gcc_qupv3_wrap2_qspi_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_QSPI_S6_CLK_SRC] = &gcc_qupv3_wrap2_qspi_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK] = &gcc_qupv3_wrap2_s7_clk.clkr,
+ [GCC_QUPV3_WRAP2_S7_CLK_SRC] = &gcc_qupv3_wrap2_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB20_MASTER_CLK] = &gcc_usb20_master_clk.clkr,
+ [GCC_USB20_MASTER_CLK_SRC] = &gcc_usb20_master_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK] = &gcc_usb20_mock_utmi_clk.clkr,
+ [GCC_USB20_MOCK_UTMI_CLK_SRC] = &gcc_usb20_mock_utmi_clk_src.clkr,
+ [GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb20_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB20_SLEEP_CLK] = &gcc_usb20_sleep_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK] = &gcc_usb30_mp_master_clk.clkr,
+ [GCC_USB30_MP_MASTER_CLK_SRC] = &gcc_usb30_mp_master_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK] = &gcc_usb30_mp_mock_utmi_clk.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mp_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MP_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mp_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MP_SLEEP_CLK] = &gcc_usb30_mp_sleep_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+ [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = &gcc_usb30_sec_mock_utmi_clk_src.clkr,
+ [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK] = &gcc_usb30_tert_master_clk.clkr,
+ [GCC_USB30_TERT_MASTER_CLK_SRC] = &gcc_usb30_tert_master_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK] = &gcc_usb30_tert_mock_utmi_clk.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_CLK_SRC] = &gcc_usb30_tert_mock_utmi_clk_src.clkr,
+ [GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_TERT_SLEEP_CLK] = &gcc_usb30_tert_sleep_clk.clkr,
+ [GCC_USB34_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb34_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB34_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb34_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB34_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb34_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
+ [GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK] = &gcc_usb3_mp_phy_pipe_0_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_0_CLK_SRC] = &gcc_usb3_mp_phy_pipe_0_clk_src.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK] = &gcc_usb3_mp_phy_pipe_1_clk.clkr,
+ [GCC_USB3_MP_PHY_PIPE_1_CLK_SRC] = &gcc_usb3_mp_phy_pipe_1_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr,
+ [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr,
+ [GCC_USB3_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb3_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK] = &gcc_usb3_tert_phy_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_AUX_CLK_SRC] = &gcc_usb3_tert_phy_aux_clk_src.clkr,
+ [GCC_USB3_TERT_PHY_COM_AUX_CLK] = &gcc_usb3_tert_phy_com_aux_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK] = &gcc_usb3_tert_phy_pipe_clk.clkr,
+ [GCC_USB3_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb3_tert_phy_pipe_clk_src.clkr,
+ [GCC_USB4_0_CFG_AHB_CLK] = &gcc_usb4_0_cfg_ahb_clk.clkr,
+ [GCC_USB4_0_DP0_CLK] = &gcc_usb4_0_dp0_clk.clkr,
+ [GCC_USB4_0_DP1_CLK] = &gcc_usb4_0_dp1_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK] = &gcc_usb4_0_master_clk.clkr,
+ [GCC_USB4_0_MASTER_CLK_SRC] = &gcc_usb4_0_master_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP0_CLK_SRC] = &gcc_usb4_0_phy_dp0_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP1_CLK_SRC] = &gcc_usb4_0_phy_dp1_clk_src.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_0_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK] = &gcc_usb4_0_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK] = &gcc_usb4_0_phy_rx0_clk.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK_SRC] = &gcc_usb4_0_phy_rx0_clk_src.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK] = &gcc_usb4_0_phy_rx1_clk.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK_SRC] = &gcc_usb4_0_phy_rx1_clk_src.clkr,
+ [GCC_USB4_0_PHY_SYS_CLK_SRC] = &gcc_usb4_0_phy_sys_clk_src.clkr,
+ [GCC_USB4_0_PHY_USB_PIPE_CLK] = &gcc_usb4_0_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK] = &gcc_usb4_0_sb_if_clk.clkr,
+ [GCC_USB4_0_SB_IF_CLK_SRC] = &gcc_usb4_0_sb_if_clk_src.clkr,
+ [GCC_USB4_0_SYS_CLK] = &gcc_usb4_0_sys_clk.clkr,
+ [GCC_USB4_0_TMU_CLK] = &gcc_usb4_0_tmu_clk.clkr,
+ [GCC_USB4_0_TMU_CLK_SRC] = &gcc_usb4_0_tmu_clk_src.clkr,
+ [GCC_USB4_0_UC_HRR_CLK] = &gcc_usb4_0_uc_hrr_clk.clkr,
+ [GCC_USB4_1_CFG_AHB_CLK] = &gcc_usb4_1_cfg_ahb_clk.clkr,
+ [GCC_USB4_1_DP0_CLK] = &gcc_usb4_1_dp0_clk.clkr,
+ [GCC_USB4_1_DP1_CLK] = &gcc_usb4_1_dp1_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr,
+ [GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP0_CLK_SRC] = &gcc_usb4_1_phy_dp0_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP1_CLK_SRC] = &gcc_usb4_1_phy_dp1_clk_src.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_1_PHY_PLL_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pll_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK_SRC] = &gcc_usb4_1_phy_rx0_clk_src.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK_SRC] = &gcc_usb4_1_phy_rx1_clk_src.clkr,
+ [GCC_USB4_1_PHY_SYS_CLK_SRC] = &gcc_usb4_1_phy_sys_clk_src.clkr,
+ [GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr,
+ [GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr,
+ [GCC_USB4_1_SYS_CLK] = &gcc_usb4_1_sys_clk.clkr,
+ [GCC_USB4_1_TMU_CLK] = &gcc_usb4_1_tmu_clk.clkr,
+ [GCC_USB4_1_TMU_CLK_SRC] = &gcc_usb4_1_tmu_clk_src.clkr,
+ [GCC_USB4_1_UC_HRR_CLK] = &gcc_usb4_1_uc_hrr_clk.clkr,
+ [GCC_USB4_2_CFG_AHB_CLK] = &gcc_usb4_2_cfg_ahb_clk.clkr,
+ [GCC_USB4_2_DP0_CLK] = &gcc_usb4_2_dp0_clk.clkr,
+ [GCC_USB4_2_DP1_CLK] = &gcc_usb4_2_dp1_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK] = &gcc_usb4_2_master_clk.clkr,
+ [GCC_USB4_2_MASTER_CLK_SRC] = &gcc_usb4_2_master_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP0_CLK_SRC] = &gcc_usb4_2_phy_dp0_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP1_CLK_SRC] = &gcc_usb4_2_phy_dp1_clk_src.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_2_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK] = &gcc_usb4_2_phy_pcie_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK] = &gcc_usb4_2_phy_rx0_clk.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK_SRC] = &gcc_usb4_2_phy_rx0_clk_src.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK] = &gcc_usb4_2_phy_rx1_clk.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK_SRC] = &gcc_usb4_2_phy_rx1_clk_src.clkr,
+ [GCC_USB4_2_PHY_SYS_CLK_SRC] = &gcc_usb4_2_phy_sys_clk_src.clkr,
+ [GCC_USB4_2_PHY_USB_PIPE_CLK] = &gcc_usb4_2_phy_usb_pipe_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK] = &gcc_usb4_2_sb_if_clk.clkr,
+ [GCC_USB4_2_SB_IF_CLK_SRC] = &gcc_usb4_2_sb_if_clk_src.clkr,
+ [GCC_USB4_2_SYS_CLK] = &gcc_usb4_2_sys_clk.clkr,
+ [GCC_USB4_2_TMU_CLK] = &gcc_usb4_2_tmu_clk.clkr,
+ [GCC_USB4_2_TMU_CLK_SRC] = &gcc_usb4_2_tmu_clk_src.clkr,
+ [GCC_USB4_2_UC_HRR_CLK] = &gcc_usb4_2_uc_hrr_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI0C_CLK] = &gcc_video_axi0c_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static struct gdsc *gcc_glymur_gdscs[] = {
+ [GCC_PCIE_0_TUNNEL_GDSC] = &gcc_pcie_0_tunnel_gdsc,
+ [GCC_PCIE_1_TUNNEL_GDSC] = &gcc_pcie_1_tunnel_gdsc,
+ [GCC_PCIE_2_TUNNEL_GDSC] = &gcc_pcie_2_tunnel_gdsc,
+ [GCC_PCIE_3A_GDSC] = &gcc_pcie_3a_gdsc,
+ [GCC_PCIE_3A_PHY_GDSC] = &gcc_pcie_3a_phy_gdsc,
+ [GCC_PCIE_3B_GDSC] = &gcc_pcie_3b_gdsc,
+ [GCC_PCIE_3B_PHY_GDSC] = &gcc_pcie_3b_phy_gdsc,
+ [GCC_PCIE_4_GDSC] = &gcc_pcie_4_gdsc,
+ [GCC_PCIE_4_PHY_GDSC] = &gcc_pcie_4_phy_gdsc,
+ [GCC_PCIE_5_GDSC] = &gcc_pcie_5_gdsc,
+ [GCC_PCIE_5_PHY_GDSC] = &gcc_pcie_5_phy_gdsc,
+ [GCC_PCIE_6_GDSC] = &gcc_pcie_6_gdsc,
+ [GCC_PCIE_6_PHY_GDSC] = &gcc_pcie_6_phy_gdsc,
+ [GCC_UFS_PHY_GDSC] = &gcc_ufs_phy_gdsc,
+ [GCC_USB20_PRIM_GDSC] = &gcc_usb20_prim_gdsc,
+ [GCC_USB30_MP_GDSC] = &gcc_usb30_mp_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_USB30_SEC_GDSC] = &gcc_usb30_sec_gdsc,
+ [GCC_USB30_TERT_GDSC] = &gcc_usb30_tert_gdsc,
+ [GCC_USB3_MP_SS0_PHY_GDSC] = &gcc_usb3_mp_ss0_phy_gdsc,
+ [GCC_USB3_MP_SS1_PHY_GDSC] = &gcc_usb3_mp_ss1_phy_gdsc,
+ [GCC_USB4_0_GDSC] = &gcc_usb4_0_gdsc,
+ [GCC_USB4_1_GDSC] = &gcc_usb4_1_gdsc,
+ [GCC_USB4_2_GDSC] = &gcc_usb4_2_gdsc,
+ [GCC_USB_0_PHY_GDSC] = &gcc_usb_0_phy_gdsc,
+ [GCC_USB_1_PHY_GDSC] = &gcc_usb_1_phy_gdsc,
+ [GCC_USB_2_PHY_GDSC] = &gcc_usb_2_phy_gdsc,
+};
+
+static const struct qcom_reset_map gcc_glymur_resets[] = {
+ [GCC_AV1E_BCR] = { 0x9b028 },
+ [GCC_CAMERA_BCR] = { 0x26000 },
+ [GCC_DISPLAY_BCR] = { 0x27000 },
+ [GCC_EVA_BCR] = { 0x9b000 },
+ [GCC_GPU_BCR] = { 0x71000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0xbc2d0 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0xbc2dc },
+ [GCC_PCIE_0_PHY_BCR] = { 0xbc2d8 },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0xbc2e0 },
+ [GCC_PCIE_0_TUNNEL_BCR] = { 0xc8000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x7f018 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x7f024 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x7f020 },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x7f028 },
+ [GCC_PCIE_1_TUNNEL_BCR] = { 0x2e000 },
+ [GCC_PCIE_2_LINK_DOWN_BCR] = { 0x281d0 },
+ [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0x281dc },
+ [GCC_PCIE_2_PHY_BCR] = { 0x281d8 },
+ [GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR] = { 0x281e0 },
+ [GCC_PCIE_2_TUNNEL_BCR] = { 0xc0000 },
+ [GCC_PCIE_3A_BCR] = { 0xdc000 },
+ [GCC_PCIE_3A_LINK_DOWN_BCR] = { 0x7b0a0 },
+ [GCC_PCIE_3A_NOCSR_COM_PHY_BCR] = { 0x7b0ac },
+ [GCC_PCIE_3A_PHY_BCR] = { 0x6c000 },
+ [GCC_PCIE_3A_PHY_NOCSR_COM_PHY_BCR] = { 0x7b0b0 },
+ [GCC_PCIE_3B_BCR] = { 0x94000 },
+ [GCC_PCIE_3B_LINK_DOWN_BCR] = { 0x7a0c0 },
+ [GCC_PCIE_3B_NOCSR_COM_PHY_BCR] = { 0x7a0cc },
+ [GCC_PCIE_3B_PHY_BCR] = { 0x75000 },
+ [GCC_PCIE_3B_PHY_NOCSR_COM_PHY_BCR] = { 0x7a0c8 },
+ [GCC_PCIE_4_BCR] = { 0x88000 },
+ [GCC_PCIE_4_LINK_DOWN_BCR] = { 0x980c0 },
+ [GCC_PCIE_4_NOCSR_COM_PHY_BCR] = { 0x980cc },
+ [GCC_PCIE_4_PHY_BCR] = { 0xd3000 },
+ [GCC_PCIE_4_PHY_NOCSR_COM_PHY_BCR] = { 0x980d0 },
+ [GCC_PCIE_5_BCR] = { 0xc3000 },
+ [GCC_PCIE_5_LINK_DOWN_BCR] = { 0x850c0 },
+ [GCC_PCIE_5_NOCSR_COM_PHY_BCR] = { 0x850cc },
+ [GCC_PCIE_5_PHY_BCR] = { 0xd2000 },
+ [GCC_PCIE_5_PHY_NOCSR_COM_PHY_BCR] = { 0x850d0 },
+ [GCC_PCIE_6_BCR] = { 0x8a000 },
+ [GCC_PCIE_6_LINK_DOWN_BCR] = { 0x3a0b0 },
+ [GCC_PCIE_6_NOCSR_COM_PHY_BCR] = { 0x3a0bc },
+ [GCC_PCIE_6_PHY_BCR] = { 0xd4000 },
+ [GCC_PCIE_6_PHY_NOCSR_COM_PHY_BCR] = { 0x3a0c0 },
+ [GCC_PCIE_NOC_BCR] = { 0xba294 },
+ [GCC_PCIE_PHY_BCR] = { 0x6f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PCIE_RSCC_BCR] = { 0xb8000 },
+ [GCC_PDM_BCR] = { 0x33000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x28000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0xb3000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0xb4000 },
+ [GCC_QUPV3_WRAPPER_OOB_BCR] = { 0xe7000 },
+ [GCC_QUSB2PHY_HS0_MP_BCR] = { 0xca000 },
+ [GCC_QUSB2PHY_HS1_MP_BCR] = { 0xe6000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0xad024 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0xae000 },
+ [GCC_QUSB2PHY_TERT_BCR] = { 0xc9000 },
+ [GCC_QUSB2PHY_USB20_HS_BCR] = { 0xe9000 },
+ [GCC_SDCC2_BCR] = { 0xb0000 },
+ [GCC_SDCC4_BCR] = { 0xdf000 },
+ [GCC_TCSR_PCIE_BCR] = { 0x281e4 },
+ [GCC_UFS_PHY_BCR] = { 0x77004 },
+ [GCC_USB20_PRIM_BCR] = { 0xbc000 },
+ [GCC_USB30_MP_BCR] = { 0x9a00c },
+ [GCC_USB30_PRIM_BCR] = { 0x3f018 },
+ [GCC_USB30_SEC_BCR] = { 0xe200c },
+ [GCC_USB30_TERT_BCR] = { 0xe100c },
+ [GCC_USB3_MP_SS0_PHY_BCR] = { 0x54008 },
+ [GCC_USB3_MP_SS1_PHY_BCR] = { 0x54028 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0xdb000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x2c000 },
+ [GCC_USB3_PHY_TERT_BCR] = { 0xbe000 },
+ [GCC_USB3_UNIPHY_MP0_BCR] = { 0x54000 },
+ [GCC_USB3_UNIPHY_MP1_BCR] = { 0x54020 },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0xdb004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x2c004 },
+ [GCC_USB3PHY_PHY_TERT_BCR] = { 0xbe004 },
+ [GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x54004 },
+ [GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x54024 },
+ [GCC_USB4_0_BCR] = { 0x2b004 },
+ [GCC_USB4_0_DP0_PHY_PRIM_BCR] = { 0xdb010 },
+ [GCC_USB4_1_BCR] = { 0x2d004 },
+ [GCC_USB4_2_BCR] = { 0xe0004 },
+ [GCC_USB_0_PHY_BCR] = { 0xdb020 },
+ [GCC_USB_1_PHY_BCR] = { 0x2c020 },
+ [GCC_USB_2_PHY_BCR] = { 0xbe020 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x3201c, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x32044, 2 },
+ [GCC_VIDEO_BCR] = { 0x32000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_oob_qspi_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_oob_qspi_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_qspi_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s7_clk_src),
+};
+
+static u32 gcc_glymur_critical_cbcrs[] = {
+ 0x26004, /* GCC_CAMERA_AHB_CLK */
+ 0x26040, /* GCC_CAMERA_XO_CLK */
+ 0x27004, /* GCC_DISP_AHB_CLK */
+ 0x71004, /* GCC_GPU_CFG_AHB_CLK */
+ 0x32004, /* GCC_VIDEO_AHB_CLK */
+ 0x32058, /* GCC_VIDEO_XO_CLK */
+};
+
+static const struct regmap_config gcc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f8ff0,
+ .fast_io = true,
+};
+
+static void clk_glymur_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ qcom_branch_set_force_mem_core(regmap, gcc_ufs_phy_ice_core_clk, true);
+}
+
+static struct qcom_cc_driver_data gcc_glymur_driver_data = {
+ .clk_cbcrs = gcc_glymur_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(gcc_glymur_critical_cbcrs),
+ .dfs_rcgs = gcc_dfs_clocks,
+ .num_dfs_rcgs = ARRAY_SIZE(gcc_dfs_clocks),
+ .clk_regs_configure = clk_glymur_regs_configure,
+};
+
+static const struct qcom_cc_desc gcc_glymur_desc = {
+ .config = &gcc_glymur_regmap_config,
+ .clks = gcc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(gcc_glymur_clocks),
+ .resets = gcc_glymur_resets,
+ .num_resets = ARRAY_SIZE(gcc_glymur_resets),
+ .gdscs = gcc_glymur_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_glymur_gdscs),
+ .driver_data = &gcc_glymur_driver_data,
+};
+
+static const struct of_device_id gcc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_glymur_match_table);
+
+static int gcc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &gcc_glymur_desc);
+}
+
+static struct platform_driver gcc_glymur_driver = {
+ .probe = gcc_glymur_probe,
+ .driver = {
+ .name = "gcc-glymur",
+ .of_match_table = gcc_glymur_match_table,
+ },
+};
+
+static int __init gcc_glymur_init(void)
+{
+ return platform_driver_register(&gcc_glymur_driver);
+}
+subsys_initcall(gcc_glymur_init);
+
+static void __exit gcc_glymur_exit(void)
+{
+ platform_driver_unregister(&gcc_glymur_driver);
+}
+module_exit(gcc_glymur_exit);
+
+MODULE_DESCRIPTION("QTI GCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/gcc-ipq5424.c b/drivers/clk/qcom/gcc-ipq5424.c
index 3d42f3d85c7a..35af6ffeeb85 100644
--- a/drivers/clk/qcom/gcc-ipq5424.c
+++ b/drivers/clk/qcom/gcc-ipq5424.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018,2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/clk-provider.h>
@@ -79,6 +79,20 @@ static struct clk_fixed_factor gpll0_div2 = {
},
};
+static struct clk_alpha_pll_postdiv gpll0_out_aux = {
+ .offset = 0x20000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "gpll0_out_aux",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0.clkr.hw
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
static struct clk_alpha_pll gpll2 = {
.offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_NSS_HUAYRA],
@@ -2934,6 +2948,7 @@ static struct clk_regmap *gcc_ipq5424_clocks[] = {
[GPLL2] = &gpll2.clkr,
[GPLL2_OUT_MAIN] = &gpll2_out_main.clkr,
[GPLL4] = &gpll4.clkr,
+ [GPLL0_OUT_AUX] = &gpll0_out_aux.clkr,
};
static const struct qcom_reset_map gcc_ipq5424_resets[] = {
@@ -3250,6 +3265,16 @@ static const struct qcom_icc_hws_data icc_ipq5424_hws[] = {
{ MASTER_ANOC_PCIE3, SLAVE_ANOC_PCIE3, GCC_ANOC_PCIE3_2LANE_M_CLK },
{ MASTER_CNOC_PCIE3, SLAVE_CNOC_PCIE3, GCC_CNOC_PCIE3_2LANE_S_CLK },
{ MASTER_CNOC_USB, SLAVE_CNOC_USB, GCC_CNOC_USB_CLK },
+ { MASTER_NSSNOC_NSSCC, SLAVE_NSSNOC_NSSCC, GCC_NSSNOC_NSSCC_CLK },
+ { MASTER_NSSNOC_SNOC_0, SLAVE_NSSNOC_SNOC_0, GCC_NSSNOC_SNOC_CLK },
+ { MASTER_NSSNOC_SNOC_1, SLAVE_NSSNOC_SNOC_1, GCC_NSSNOC_SNOC_1_CLK },
+ { MASTER_NSSNOC_PCNOC_1, SLAVE_NSSNOC_PCNOC_1, GCC_NSSNOC_PCNOC_1_CLK },
+ { MASTER_NSSNOC_QOSGEN_REF, SLAVE_NSSNOC_QOSGEN_REF, GCC_NSSNOC_QOSGEN_REF_CLK },
+ { MASTER_NSSNOC_TIMEOUT_REF, SLAVE_NSSNOC_TIMEOUT_REF, GCC_NSSNOC_TIMEOUT_REF_CLK },
+ { MASTER_NSSNOC_XO_DCD, SLAVE_NSSNOC_XO_DCD, GCC_NSSNOC_XO_DCD_CLK },
+ { MASTER_NSSNOC_ATB, SLAVE_NSSNOC_ATB, GCC_NSSNOC_ATB_CLK },
+ { MASTER_CNOC_LPASS_CFG, SLAVE_CNOC_LPASS_CFG, GCC_CNOC_LPASS_CFG_CLK },
+ { MASTER_SNOC_LPASS, SLAVE_SNOC_LPASS, GCC_SNOC_LPASS_CLK },
};
static const struct of_device_id gcc_ipq5424_match_table[] = {
@@ -3284,6 +3309,7 @@ static const struct qcom_cc_desc gcc_ipq5424_desc = {
.num_clk_hws = ARRAY_SIZE(gcc_ipq5424_hws),
.icc_hws = icc_ipq5424_hws,
.num_icc_hws = ARRAY_SIZE(icc_ipq5424_hws),
+ .icc_first_node_id = IPQ_APPS_ID,
};
static int gcc_ipq5424_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/gcc-ipq6018.c b/drivers/clk/qcom/gcc-ipq6018.c
index d861191b0c85..d4fc491a18b2 100644
--- a/drivers/clk/qcom/gcc-ipq6018.c
+++ b/drivers/clk/qcom/gcc-ipq6018.c
@@ -511,15 +511,23 @@ static struct clk_rcg2 apss_ahb_clk_src = {
},
};
-static const struct freq_tbl ftbl_nss_port5_rx_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_RX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_RX, 5, 0, 0),
- F(78125000, P_UNIPHY1_RX, 4, 0, 0),
- F(125000000, P_UNIPHY1_RX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_RX, 1, 0, 0),
- F(156250000, P_UNIPHY1_RX, 2, 0, 0),
- F(312500000, P_UNIPHY1_RX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_25[] = {
+ C(P_UNIPHY1_RX, 12.5, 0, 0),
+ C(P_UNIPHY0_RX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_rx_clk_src_125[] = {
+ C(P_UNIPHY1_RX, 2.5, 0, 0),
+ C(P_UNIPHY0_RX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_rx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_RX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_RX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_RX, 1, 0, 0),
{ }
};
@@ -547,26 +555,34 @@ gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_rx_clk_src = {
.cmd_rcgr = 0x68060,
- .freq_tbl = ftbl_nss_port5_rx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_rx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_rx_clk_src",
.parent_data = gcc_xo_uniphy0_rx_tx_uniphy1_rx_tx_ubi32_bias,
.num_parents = 7,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
-static const struct freq_tbl ftbl_nss_port5_tx_clk_src[] = {
- F(24000000, P_XO, 1, 0, 0),
- F(25000000, P_UNIPHY1_TX, 12.5, 0, 0),
- F(25000000, P_UNIPHY0_TX, 5, 0, 0),
- F(78125000, P_UNIPHY1_TX, 4, 0, 0),
- F(125000000, P_UNIPHY1_TX, 2.5, 0, 0),
- F(125000000, P_UNIPHY0_TX, 1, 0, 0),
- F(156250000, P_UNIPHY1_TX, 2, 0, 0),
- F(312500000, P_UNIPHY1_TX, 1, 0, 0),
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_25[] = {
+ C(P_UNIPHY1_TX, 12.5, 0, 0),
+ C(P_UNIPHY0_TX, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_port5_tx_clk_src_125[] = {
+ C(P_UNIPHY1_TX, 2.5, 0, 0),
+ C(P_UNIPHY0_TX, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_port5_tx_clk_src[] = {
+ FMS(24000000, P_XO, 1, 0, 0),
+ FM(25000000, ftbl_nss_port5_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_TX, 4, 0, 0),
+ FM(125000000, ftbl_nss_port5_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_TX, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_TX, 1, 0, 0),
{ }
};
@@ -594,14 +610,14 @@ gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map[] = {
static struct clk_rcg2 nss_port5_tx_clk_src = {
.cmd_rcgr = 0x68068,
- .freq_tbl = ftbl_nss_port5_tx_clk_src,
+ .freq_multi_tbl = ftbl_nss_port5_tx_clk_src,
.hid_width = 5,
.parent_map = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias_map,
.clkr.hw.init = &(struct clk_init_data){
.name = "nss_port5_tx_clk_src",
.parent_data = gcc_xo_uniphy0_tx_rx_uniphy1_tx_rx_ubi32_bias,
.num_parents = 7,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_fm_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8917.c b/drivers/clk/qcom/gcc-msm8917.c
index 3e2a2ae2ee6e..0a1aa623cd49 100644
--- a/drivers/clk/qcom/gcc-msm8917.c
+++ b/drivers/clk/qcom/gcc-msm8917.c
@@ -37,6 +37,8 @@ enum {
DT_SLEEP_CLK,
DT_DSI0PLL,
DT_DSI0PLL_BYTE,
+ DT_DSI1PLL,
+ DT_DSI1PLL_BYTE,
};
enum {
@@ -48,6 +50,8 @@ enum {
P_GPLL6,
P_DSI0PLL,
P_DSI0PLL_BYTE,
+ P_DSI1PLL,
+ P_DSI1PLL_BYTE,
};
static struct clk_alpha_pll gpll0_sleep_clk_src = {
@@ -102,7 +106,11 @@ static const struct pll_vco gpll3_p_vco[] = {
{ 700000000, 1400000000, 0 },
};
-static const struct alpha_pll_config gpll3_early_config = {
+static const struct pll_vco gpll3_p_vco_msm8937[] = {
+ { 525000000, 1066000000, 0 },
+};
+
+static struct alpha_pll_config gpll3_early_config = {
.l = 63,
.config_ctl_val = 0x4001055b,
.early_output_mask = 0,
@@ -273,6 +281,19 @@ static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
{ }
};
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x0200c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.cmd_rcgr = 0x03000,
.hid_width = 5,
@@ -351,6 +372,19 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
}
};
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x18000,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
@@ -362,6 +396,20 @@ static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
{ }
};
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x02024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.cmd_rcgr = 0x03014,
.hid_width = 5,
@@ -446,6 +494,20 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
}
};
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
F(3686400, P_GPLL0, 1, 72, 15625),
F(7372800, P_GPLL0, 1, 144, 15625),
@@ -525,11 +587,19 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
static const struct parent_map gcc_byte0_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL_BYTE, 1 },
+ { P_DSI1PLL_BYTE, 3 },
+};
+
+static const struct parent_map gcc_byte1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 3 },
+ { P_DSI1PLL_BYTE, 1 },
};
static const struct clk_parent_data gcc_byte_data[] = {
{ .index = DT_XO },
{ .index = DT_DSI0PLL_BYTE },
+ { .index = DT_DSI1PLL_BYTE },
};
static struct clk_rcg2 byte0_clk_src = {
@@ -545,6 +615,19 @@ static struct clk_rcg2 byte0_clk_src = {
}
};
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .hid_width = 5,
+ .parent_map = gcc_byte1_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = gcc_byte_data,
+ .num_parents = ARRAY_SIZE(gcc_byte_data),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct freq_tbl ftbl_camss_gp_clk_src[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
@@ -642,6 +725,17 @@ static const struct freq_tbl ftbl_cpp_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_cpp_clk_src_msm8937[] = {
+ F(133330000, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 5, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ F(308570000, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 cpp_clk_src = {
.cmd_rcgr = 0x58018,
.hid_width = 5,
@@ -655,6 +749,13 @@ static struct clk_rcg2 cpp_clk_src = {
}
};
+static struct clk_init_data vcodec0_clk_src_init_msm8937 = {
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_cpp_data,
+ .num_parents = ARRAY_SIZE(gcc_cpp_data),
+ .ops = &clk_rcg2_ops,
+};
+
static const struct freq_tbl ftbl_crypto_clk_src[] = {
F(50000000, P_GPLL0, 16, 0, 0),
F(80000000, P_GPLL0, 10, 0, 0),
@@ -730,6 +831,13 @@ static const struct freq_tbl ftbl_csi_phytimer_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_csi_phytimer_clk_src_msm8937[] = {
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
static struct clk_rcg2 csi0phytimer_clk_src = {
.cmd_rcgr = 0x4e000,
.hid_width = 5,
@@ -774,6 +882,19 @@ static struct clk_rcg2 esc0_clk_src = {
}
};
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_xo_gpll0_out_aux_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = gcc_xo_gpll0_data,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_data),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static const struct parent_map gcc_gfx3d_map[] = {
{ P_XO, 0 },
{ P_GPLL0, 1 },
@@ -817,6 +938,25 @@ static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_gfx3d_clk_src_msm8937[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(216000000, P_GPLL6, 5, 0, 0),
+ F(228570000, P_GPLL0, 3.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(266670000, P_GPLL0, 3, 0, 0),
+ F(300000000, P_GPLL3, 1, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(375000000, P_GPLL3, 1, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(450000000, P_GPLL3, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 gfx3d_clk_src = {
.cmd_rcgr = 0x59000,
.hid_width = 5,
@@ -973,21 +1113,29 @@ static struct clk_rcg2 mdp_clk_src = {
}
};
-static const struct parent_map gcc_pclk_map[] = {
+static const struct parent_map gcc_pclk0_map[] = {
{ P_XO, 0 },
{ P_DSI0PLL, 1 },
+ { P_DSI1PLL, 3 },
+};
+
+static const struct parent_map gcc_pclk1_map[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 3 },
+ { P_DSI1PLL, 1 },
};
static const struct clk_parent_data gcc_pclk_data[] = {
{ .index = DT_XO },
{ .index = DT_DSI0PLL },
+ { .index = DT_DSI1PLL },
};
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x4d000,
.hid_width = 5,
.mnd_width = 8,
- .parent_map = gcc_pclk_map,
+ .parent_map = gcc_pclk0_map,
.clkr.hw.init = &(struct clk_init_data) {
.name = "pclk0_clk_src",
.parent_data = gcc_pclk_data,
@@ -997,6 +1145,20 @@ static struct clk_rcg2 pclk0_clk_src = {
}
};
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_pclk1_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_pclk_data,
+ .num_parents = ARRAY_SIZE(gcc_pclk_data),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static const struct freq_tbl ftbl_pdm2_clk_src[] = {
F(64000000, P_GPLL0, 12.5, 0, 0),
{ }
@@ -1108,6 +1270,14 @@ static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_usb_hs_system_clk_src_msm8937[] = {
+ F(57142857, P_GPLL0, 14, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 usb_hs_system_clk_src = {
.cmd_rcgr = 0x41010,
.hid_width = 5,
@@ -1132,6 +1302,15 @@ static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_vcodec0_clk_src_msm8937[] = {
+ F(166150000, P_GPLL6, 6.5, 0, 0),
+ F(240000000, P_GPLL6, 4.5, 0, 0),
+ F(308571428, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ { }
+};
+
static struct clk_rcg2 vcodec0_clk_src = {
.cmd_rcgr = 0x4c000,
.hid_width = 5,
@@ -1160,6 +1339,23 @@ static const struct freq_tbl ftbl_vfe_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_vfe_clk_src_msm8937[] = {
+ F(50000000, P_GPLL0, 16, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(133333333, P_GPLL0, 6, 0, 0),
+ F(160000000, P_GPLL0, 5, 0, 0),
+ F(177777778, P_GPLL0, 4.5, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266666667, P_GPLL0, 3, 0, 0),
+ F(308571428, P_GPLL6, 3.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(360000000, P_GPLL6, 3, 0, 0),
+ F(400000000, P_GPLL0, 2, 0, 0),
+ F(432000000, P_GPLL6, 2.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 vfe0_clk_src = {
.cmd_rcgr = 0x58000,
.hid_width = 5,
@@ -1269,6 +1465,24 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
}
};
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x02008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.halt_reg = 0x03010,
.halt_check = BRANCH_HALT,
@@ -1377,6 +1591,42 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
}
};
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x18020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x02004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x02004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.halt_reg = 0x0300c,
.halt_check = BRANCH_HALT,
@@ -1485,6 +1735,24 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
}
};
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.halt_reg = 0x0203c,
.halt_check = BRANCH_HALT,
@@ -2521,6 +2789,24 @@ static struct clk_branch gcc_mdss_byte0_clk = {
}
};
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_esc0_clk = {
.halt_reg = 0x4d098,
.halt_check = BRANCH_HALT,
@@ -2539,6 +2825,24 @@ static struct clk_branch gcc_mdss_esc0_clk = {
}
};
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_mdp_clk = {
.halt_reg = 0x4d088,
.halt_check = BRANCH_HALT,
@@ -2575,6 +2879,24 @@ static struct clk_branch gcc_mdss_pclk0_clk = {
}
};
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_mdss_vsync_clk = {
.halt_reg = 0x4d090,
.halt_check = BRANCH_HALT,
@@ -2632,6 +2954,24 @@ static struct clk_branch gcc_oxili_ahb_clk = {
}
};
+static struct clk_branch gcc_oxili_aon_clk = {
+ .halt_reg = 0x5904c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5904c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_aon_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+ },
+};
+
static struct clk_branch gcc_oxili_gfx3d_clk = {
.halt_reg = 0x59020,
.halt_check = BRANCH_HALT,
@@ -2650,6 +2990,19 @@ static struct clk_branch gcc_oxili_gfx3d_clk = {
}
};
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_oxili_timer_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_pdm2_clk = {
.halt_reg = 0x4400c,
.halt_check = BRANCH_HALT,
@@ -3027,6 +3380,28 @@ static struct gdsc oxili_gx_gdsc = {
.flags = CLAMP_IO,
};
+static struct gdsc oxili_gx_gdsc_msm8937 = {
+ .gdscr = 0x5901c,
+ .clamp_io_ctrl = 0x5b00c,
+ .cxcs = (unsigned int []){ 0x59000 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = CLAMP_IO,
+};
+
+static struct gdsc oxili_cx_gdsc = {
+ .gdscr = 0x59044,
+ .cxcs = (unsigned int []){ 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct gdsc cpp_gdsc = {
.gdscr = 0x58078,
.cxcs = (unsigned int []){ 0x5803c, 0x58064 },
@@ -3207,6 +3582,188 @@ static struct clk_regmap *gcc_msm8917_clocks[] = {
[GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
};
+static struct clk_regmap *gcc_msm8937_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_EARLY] = &gpll0_early.clkr,
+ [GPLL0_SLEEP_CLK_SRC] = &gpll0_sleep_clk_src.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_EARLY] = &gpll3_early.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_EARLY] = &gpll4_early.clkr,
+ [GPLL6] = &gpll6,
+ [GPLL6_EARLY] = &gpll6_early.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [MSM8937_BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [MSM8937_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [MSM8937_BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [MSM8937_BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [MSM8937_BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [MSM8937_ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [MSM8937_PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [MSM8937_GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [MSM8937_GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [MSM8937_GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [MSM8937_GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_CSI_VFE1_CLK] = &gcc_camss_csi_vfe1_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AHB_CLK] = &gcc_camss_vfe0_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_AXI_CLK] = &gcc_camss_vfe0_axi_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE1_AHB_CLK] = &gcc_camss_vfe1_ahb_clk.clkr,
+ [GCC_CAMSS_VFE1_AXI_CLK] = &gcc_camss_vfe1_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_CLK] = &gcc_camss_vfe1_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [MSM8937_GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [MSM8937_GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [MSM8937_GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [MSM8937_GCC_OXILI_AON_CLK] = &gcc_oxili_aon_clk.clkr,
+ [GCC_OXILI_GFX3D_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [MSM8937_GCC_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE1_TBU_CLK] = &gcc_vfe1_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+};
+
static const struct qcom_reset_map gcc_msm8917_resets[] = {
[GCC_CAMSS_MICRO_BCR] = { 0x56008 },
[GCC_MSS_BCR] = { 0x71000 },
@@ -3234,6 +3791,18 @@ static struct gdsc *gcc_msm8917_gdscs[] = {
[VFE1_GDSC] = &vfe1_gdsc,
};
+static struct gdsc *gcc_msm8937_gdscs[] = {
+ [CPP_GDSC] = &cpp_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc_msm8937,
+ [MSM8937_OXILI_CX_GDSC] = &oxili_cx_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_GDSC] = &venus_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+};
+
static const struct qcom_cc_desc gcc_msm8917_desc = {
.config = &gcc_msm8917_regmap_config,
.clks = gcc_msm8917_clocks,
@@ -3254,6 +3823,41 @@ static const struct qcom_cc_desc gcc_qm215_desc = {
.num_gdscs = ARRAY_SIZE(gcc_msm8917_gdscs),
};
+static const struct qcom_cc_desc gcc_msm8937_desc = {
+ .config = &gcc_msm8917_regmap_config,
+ .clks = gcc_msm8937_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8937_clocks),
+ .resets = gcc_msm8917_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8917_resets),
+ .gdscs = gcc_msm8937_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8937_gdscs),
+};
+
+static void msm8937_clock_override(void)
+{
+ /* GPLL3 750MHz configuration */
+ gpll3_early_config.l = 47;
+ gpll3_early.vco_table = gpll3_p_vco_msm8937;
+ gpll3_early.num_vco = ARRAY_SIZE(gpll3_p_vco_msm8937);
+
+ /*
+ * Set below clocks for use specific msm8937 parent map.
+ */
+ vcodec0_clk_src.parent_map = gcc_cpp_map;
+ vcodec0_clk_src.clkr.hw.init = &vcodec0_clk_src_init_msm8937;
+
+ /*
+ * Set below clocks for use specific msm8937 freq table.
+ */
+ vfe0_clk_src.freq_tbl = ftbl_vfe_clk_src_msm8937;
+ vfe1_clk_src.freq_tbl = ftbl_vfe_clk_src_msm8937;
+ cpp_clk_src.freq_tbl = ftbl_cpp_clk_src_msm8937;
+ vcodec0_clk_src.freq_tbl = ftbl_vcodec0_clk_src_msm8937;
+ csi0phytimer_clk_src.freq_tbl = ftbl_csi_phytimer_clk_src_msm8937;
+ csi1phytimer_clk_src.freq_tbl = ftbl_csi_phytimer_clk_src_msm8937;
+ usb_hs_system_clk_src.freq_tbl = ftbl_usb_hs_system_clk_src_msm8937;
+}
+
static int gcc_msm8917_probe(struct platform_device *pdev)
{
struct regmap *regmap;
@@ -3261,8 +3865,12 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
gcc_desc = of_device_get_match_data(&pdev->dev);
- if (gcc_desc == &gcc_qm215_desc)
+ if (gcc_desc == &gcc_qm215_desc) {
gfx3d_clk_src.parent_map = gcc_gfx3d_map_qm215;
+ } else if (gcc_desc == &gcc_msm8937_desc) {
+ msm8937_clock_override();
+ gfx3d_clk_src.freq_tbl = ftbl_gfx3d_clk_src_msm8937;
+ }
regmap = qcom_cc_map(pdev, gcc_desc);
if (IS_ERR(regmap))
@@ -3276,6 +3884,7 @@ static int gcc_msm8917_probe(struct platform_device *pdev)
static const struct of_device_id gcc_msm8917_match_table[] = {
{ .compatible = "qcom,gcc-msm8917", .data = &gcc_msm8917_desc },
{ .compatible = "qcom,gcc-qm215", .data = &gcc_qm215_desc },
+ { .compatible = "qcom,gcc-msm8937", .data = &gcc_msm8937_desc },
{},
};
MODULE_DEVICE_TABLE(of, gcc_msm8917_match_table);
diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c
index 5ca003c9bfba..efc75a3814ab 100644
--- a/drivers/clk/qcom/gcc-qcs404.c
+++ b/drivers/clk/qcom/gcc-qcs404.c
@@ -2754,7 +2754,7 @@ static struct clk_regmap *gcc_qcs404_clocks[] = {
[GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
[GCC_DCC_XO_CLK] = &gcc_dcc_xo_clk.clkr,
[GCC_WCSS_Q6_AHB_CLK] = &gcc_wdsp_q6ss_ahbs_clk.clkr,
- [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr,
+ [GCC_WCSS_Q6_AXIM_CLK] = &gcc_wdsp_q6ss_axim_clk.clkr,
};
diff --git a/drivers/clk/qcom/gcc-qcs615.c b/drivers/clk/qcom/gcc-qcs615.c
index 9695446bc2a3..5b3b8dd4f114 100644
--- a/drivers/clk/qcom/gcc-qcs615.c
+++ b/drivers/clk/qcom/gcc-qcs615.c
@@ -784,7 +784,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
.name = "gcc_sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_1,
.num_parents = ARRAY_SIZE(gcc_parent_data_1),
- .ops = &clk_rcg2_floor_ops,
+ .ops = &clk_rcg2_shared_floor_ops,
},
};
@@ -806,7 +806,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
.name = "gcc_sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = ARRAY_SIZE(gcc_parent_data_0),
- .ops = &clk_rcg2_floor_ops,
+ .ops = &clk_rcg2_shared_floor_ops,
},
};
@@ -830,7 +830,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_8,
.num_parents = ARRAY_SIZE(gcc_parent_data_8),
- .ops = &clk_rcg2_floor_ops,
+ .ops = &clk_rcg2_shared_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sc8280xp.c b/drivers/clk/qcom/gcc-sc8280xp.c
index f27d0003f427..2ab111585d7f 100644
--- a/drivers/clk/qcom/gcc-sc8280xp.c
+++ b/drivers/clk/qcom/gcc-sc8280xp.c
@@ -2224,7 +2224,6 @@ static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = {
};
static const struct freq_tbl ftbl_gcc_usb4_1_master_clk_src[] = {
- F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0),
F(175000000, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
F(350000000, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
{ }
@@ -6775,10 +6774,6 @@ static struct gdsc pcie_1_tunnel_gdsc = {
.flags = VOTABLE | RETAIN_FF_ENABLE,
};
-/*
- * The Qualcomm PCIe driver does not yet implement suspend so to keep the
- * PCIe power domains always-on for now.
- */
static struct gdsc pcie_2a_gdsc = {
.gdscr = 0x9d004,
.collapse_ctrl = 0x52128,
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 01a76f1b5b4c..20253a06a583 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -2247,6 +2247,45 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
},
};
+static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
+ .halt_reg = 0x7d014,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos1_vote_lpass_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7d048,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos1_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos2_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7e048,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x7e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "hlos2_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct gdsc ufs_gdsc = {
.gdscr = 0x75004,
.gds_hw_ctrl = 0x0,
@@ -2277,6 +2316,33 @@ static struct gdsc pcie_0_gdsc = {
.flags = VOTABLE,
};
+static struct gdsc hlos1_vote_turing_adsp_gdsc = {
+ .gdscr = 0x7d04c,
+ .pd = {
+ .name = "hlos1_vote_turing_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos2_vote_turing_adsp_gdsc = {
+ .gdscr = 0x7e04c,
+ .pd = {
+ .name = "hlos2_vote_turing_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_lpass_adsp_gdsc = {
+ .gdscr = 0x7d034,
+ .pd = {
+ .name = "hlos1_vote_lpass_adsp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
static struct clk_hw *gcc_sdm660_hws[] = {
&xo.hw,
&gpll0_early_div.hw,
@@ -2409,12 +2475,18 @@ static struct clk_regmap *gcc_sdm660_clocks[] = {
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
[USB3_PHY_AUX_CLK_SRC] = &usb3_phy_aux_clk_src.clkr,
+ [GCC_HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
+ [GCC_HLOS1_VOTE_TURING_ADSP_SMMU_CLK] = &hlos1_vote_turing_adsp_smmu_clk.clkr,
+ [GCC_HLOS2_VOTE_TURING_ADSP_SMMU_CLK] = &hlos2_vote_turing_adsp_smmu_clk.clkr,
};
static struct gdsc *gcc_sdm660_gdscs[] = {
[UFS_GDSC] = &ufs_gdsc,
[USB_30_GDSC] = &usb_30_gdsc,
[PCIE_0_GDSC] = &pcie_0_gdsc,
+ [HLOS1_VOTE_TURING_ADSP_GDSC] = &hlos1_vote_turing_adsp_gdsc,
+ [HLOS2_VOTE_TURING_ADSP_GDSC] = &hlos2_vote_turing_adsp_gdsc,
+ [HLOS1_VOTE_LPASS_ADSP_GDSC] = &hlos1_vote_lpass_adsp_gdsc,
};
static const struct qcom_reset_map gcc_sdm660_resets[] = {
diff --git a/drivers/clk/qcom/gcc-sm8750.c b/drivers/clk/qcom/gcc-sm8750.c
index 8092dd6b37b5..def86b71a3da 100644
--- a/drivers/clk/qcom/gcc-sm8750.c
+++ b/drivers/clk/qcom/gcc-sm8750.c
@@ -1012,6 +1012,7 @@ static struct clk_rcg2 gcc_qupv3_wrap2_s7_clk_src = {
static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(400000, P_BI_TCXO, 12, 1, 4),
F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
diff --git a/drivers/clk/qcom/gcc-x1e80100.c b/drivers/clk/qcom/gcc-x1e80100.c
index 301fc9fc32d8..b63c8abdd2fc 100644
--- a/drivers/clk/qcom/gcc-x1e80100.c
+++ b/drivers/clk/qcom/gcc-x1e80100.c
@@ -32,6 +32,33 @@ enum {
DT_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE,
DT_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE,
DT_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE,
+ DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ DT_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ DT_QUSB4PHY_2_GCC_USB4_RX1_CLK,
+ DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
};
enum {
@@ -42,10 +69,40 @@ enum {
P_GCC_GPLL7_OUT_MAIN,
P_GCC_GPLL8_OUT_MAIN,
P_GCC_GPLL9_OUT_MAIN,
+ P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC,
+ P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC,
+ P_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC,
+ P_QUSB4PHY_0_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_0_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_1_GCC_USB4_RX1_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX0_CLK,
+ P_QUSB4PHY_2_GCC_USB4_RX1_CLK,
P_SLEEP_CLK,
P_USB3_PHY_0_WRAPPER_GCC_USB30_PIPE_CLK,
P_USB3_PHY_1_WRAPPER_GCC_USB30_PIPE_CLK,
P_USB3_PHY_2_WRAPPER_GCC_USB30_PIPE_CLK,
+ P_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK,
+ P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK,
};
static struct clk_alpha_pll gcc_gpll0 = {
@@ -320,6 +377,342 @@ static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
{ }
};
+static const struct clk_parent_data gcc_parent_data_13[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_14[] = {
+ { .index = DT_GCC_USB4_0_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_15[] = {
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_16[] = {
+ { .index = DT_GCC_USB4_0_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_17[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_18[] = {
+ { .index = DT_QUSB4PHY_0_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_19[] = {
+ { .index = DT_GCC_USB4_0_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_0_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_20[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_21[] = {
+ { .index = DT_GCC_USB4_1_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_22[] = {
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_23[] = {
+ { .index = DT_GCC_USB4_1_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_24[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_25[] = {
+ { .index = DT_QUSB4PHY_1_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_26[] = {
+ { .index = DT_GCC_USB4_1_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_1_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_27[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP0_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_28[] = {
+ { .index = DT_GCC_USB4_2_PHY_DP1_GMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_29[] = {
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_30[] = {
+ { .index = DT_GCC_USB4_2_PHY_PCIE_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static const struct clk_parent_data gcc_parent_data_31[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX0_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_32[] = {
+ { .index = DT_QUSB4PHY_2_GCC_USB4_RX1_CLK },
+ { .index = DT_BI_TCXO },
+};
+
+static const struct clk_parent_data gcc_parent_data_33[] = {
+ { .index = DT_GCC_USB4_2_PHY_SYS_PIPEGMUX_CLK_SRC },
+ { .index = DT_USB4_2_PHY_GCC_USB4_PCIE_PIPE_CLK },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_dp0_clk_src = {
+ .reg = 0x9f06c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_13,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_dp1_clk_src = {
+ .reg = 0x9f114,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_14,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x9f0d4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_15,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x9f104,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_16,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_rx0_clk_src = {
+ .reg = 0x9f0ac,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_17,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_rx1_clk_src = {
+ .reg = 0x9f0bc,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_18,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_0_phy_sys_clk_src = {
+ .reg = 0x9f0e4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_0_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_19,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_dp0_clk_src = {
+ .reg = 0x2b06c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_20,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_dp1_clk_src = {
+ .reg = 0x2b114,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_21,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x2b0d4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_22,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x2b104,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_23,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_rx0_clk_src = {
+ .reg = 0x2b0ac,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_24,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_rx1_clk_src = {
+ .reg = 0x2b0bc,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_25,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_1_phy_sys_clk_src = {
+ .reg = 0x2b0e4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_1_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_26,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_dp0_clk_src = {
+ .reg = 0x1106c,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp0_clk_src",
+ .parent_data = gcc_parent_data_27,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_dp1_clk_src = {
+ .reg = 0x11114,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_dp1_clk_src",
+ .parent_data = gcc_parent_data_28,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_p2rr2p_pipe_clk_src = {
+ .reg = 0x110d4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_p2rr2p_pipe_clk_src",
+ .parent_data = gcc_parent_data_29,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_pcie_pipe_mux_clk_src = {
+ .reg = 0x11104,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_pcie_pipe_mux_clk_src",
+ .parent_data = gcc_parent_data_30,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_rx0_clk_src = {
+ .reg = 0x110ac,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx0_clk_src",
+ .parent_data = gcc_parent_data_31,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_rx1_clk_src = {
+ .reg = 0x110bc,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_rx1_clk_src",
+ .parent_data = gcc_parent_data_32,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
+static struct clk_regmap_phy_mux gcc_usb4_2_phy_sys_clk_src = {
+ .reg = 0x110e4,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb4_2_phy_sys_clk_src",
+ .parent_data = gcc_parent_data_33,
+ .ops = &clk_regmap_phy_mux_ops,
+ },
+ },
+};
+
static struct clk_rcg2 gcc_gp1_clk_src = {
.cmd_rcgr = 0x64004,
.mnd_width = 16,
@@ -1456,7 +1849,6 @@ static struct clk_rcg2 gcc_usb3_tert_phy_aux_clk_src = {
};
static const struct freq_tbl ftbl_gcc_usb4_0_master_clk_src[] = {
- F(85714286, P_GCC_GPLL0_OUT_EVEN, 3.5, 0, 0),
F(175000000, P_GCC_GPLL8_OUT_MAIN, 4, 0, 0),
F(350000000, P_GCC_GPLL8_OUT_MAIN, 2, 0, 0),
{ }
@@ -2790,6 +3182,11 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.enable_mask = BIT(25),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_0_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2879,6 +3276,11 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.enable_mask = BIT(30),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_1_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2968,6 +3370,11 @@ static struct clk_branch gcc_pcie_2_pipe_clk = {
.enable_mask = BIT(23),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pcie_2_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5156,6 +5563,33 @@ static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
},
};
+static const struct parent_map gcc_parent_map_34[] = {
+ { P_GCC_USB3_PRIM_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_34[] = {
+ { .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_0_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_0_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static struct clk_regmap_mux gcc_usb34_prim_phy_pipe_clk_src = {
+ .reg = 0x39070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_34,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_34,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_34),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
.halt_reg = 0x39068,
.halt_check = BRANCH_HALT_SKIP,
@@ -5167,7 +5601,7 @@ static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb3_prim_phy_pipe_clk",
.parent_hws = (const struct clk_hw*[]) {
- &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -5227,6 +5661,33 @@ static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
},
};
+static const struct parent_map gcc_parent_map_35[] = {
+ { P_GCC_USB3_SEC_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_35[] = {
+ { .hw = &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_1_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_1_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static struct clk_regmap_mux gcc_usb34_sec_phy_pipe_clk_src = {
+ .reg = 0xa1070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_35,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_sec_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_35,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_35),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
.halt_reg = 0xa1068,
.halt_check = BRANCH_HALT_SKIP,
@@ -5238,7 +5699,7 @@ static struct clk_branch gcc_usb3_sec_phy_pipe_clk = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb3_sec_phy_pipe_clk",
.parent_hws = (const struct clk_hw*[]) {
- &gcc_usb3_sec_phy_pipe_clk_src.clkr.hw,
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -5298,6 +5759,33 @@ static struct clk_regmap_mux gcc_usb3_tert_phy_pipe_clk_src = {
},
};
+static const struct parent_map gcc_parent_map_36[] = {
+ { P_GCC_USB3_TERT_PHY_PIPE_CLK_SRC, 0 },
+ { P_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK, 1 },
+ { P_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_36[] = {
+ { .hw = &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw },
+ { .index = DT_USB4_2_PHY_GCC_USB4RTR_MAX_PIPE_CLK },
+ { .index = DT_GCC_USB4_2_PHY_PIPEGMUX_CLK_SRC },
+};
+
+static struct clk_regmap_mux gcc_usb34_tert_phy_pipe_clk_src = {
+ .reg = 0xa2070,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_36,
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "gcc_usb34_tert_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_36,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_36),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
.halt_reg = 0xa2068,
.halt_check = BRANCH_HALT_SKIP,
@@ -5309,7 +5797,7 @@ static struct clk_branch gcc_usb3_tert_phy_pipe_clk = {
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb3_tert_phy_pipe_clk",
.parent_hws = (const struct clk_hw*[]) {
- &gcc_usb3_tert_phy_pipe_clk_src.clkr.hw,
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -5335,12 +5823,17 @@ static struct clk_branch gcc_usb4_0_cfg_ahb_clk = {
static struct clk_branch gcc_usb4_0_dp0_clk = {
.halt_reg = 0x9f060,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x9f060,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5348,12 +5841,17 @@ static struct clk_branch gcc_usb4_0_dp0_clk = {
static struct clk_branch gcc_usb4_0_dp1_clk = {
.halt_reg = 0x9f108,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x9f108,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5385,6 +5883,11 @@ static struct clk_branch gcc_usb4_0_phy_p2rr2p_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5398,6 +5901,11 @@ static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
.enable_mask = BIT(19),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5405,12 +5913,17 @@ static struct clk_branch gcc_usb4_0_phy_pcie_pipe_clk = {
static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
.halt_reg = 0x9f0b0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x9f0b0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5418,12 +5931,17 @@ static struct clk_branch gcc_usb4_0_phy_rx0_clk = {
static struct clk_branch gcc_usb4_0_phy_rx1_clk = {
.halt_reg = 0x9f0c0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x9f0c0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5439,6 +5957,11 @@ static struct clk_branch gcc_usb4_0_phy_usb_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5470,6 +5993,11 @@ static struct clk_branch gcc_usb4_0_sys_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_0_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_0_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5512,12 +6040,17 @@ static struct clk_branch gcc_usb4_1_cfg_ahb_clk = {
static struct clk_branch gcc_usb4_1_dp0_clk = {
.halt_reg = 0x2b060,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x2b060,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5525,12 +6058,17 @@ static struct clk_branch gcc_usb4_1_dp0_clk = {
static struct clk_branch gcc_usb4_1_dp1_clk = {
.halt_reg = 0x2b108,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x2b108,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5562,6 +6100,11 @@ static struct clk_branch gcc_usb4_1_phy_p2rr2p_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5575,6 +6118,11 @@ static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5582,12 +6130,17 @@ static struct clk_branch gcc_usb4_1_phy_pcie_pipe_clk = {
static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
.halt_reg = 0x2b0b0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x2b0b0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5595,12 +6148,17 @@ static struct clk_branch gcc_usb4_1_phy_rx0_clk = {
static struct clk_branch gcc_usb4_1_phy_rx1_clk = {
.halt_reg = 0x2b0c0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x2b0c0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5616,6 +6174,11 @@ static struct clk_branch gcc_usb4_1_phy_usb_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_sec_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5647,6 +6210,11 @@ static struct clk_branch gcc_usb4_1_sys_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_1_sys_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_1_phy_sys_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5689,12 +6257,17 @@ static struct clk_branch gcc_usb4_2_cfg_ahb_clk = {
static struct clk_branch gcc_usb4_2_dp0_clk = {
.halt_reg = 0x11060,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x11060,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_dp0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5702,12 +6275,17 @@ static struct clk_branch gcc_usb4_2_dp0_clk = {
static struct clk_branch gcc_usb4_2_dp1_clk = {
.halt_reg = 0x11108,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x11108,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_dp1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_dp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5739,6 +6317,11 @@ static struct clk_branch gcc_usb4_2_phy_p2rr2p_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_p2rr2p_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5752,6 +6335,11 @@ static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
.enable_mask = BIT(1),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_pcie_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5759,12 +6347,17 @@ static struct clk_branch gcc_usb4_2_phy_pcie_pipe_clk = {
static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
.halt_reg = 0x110b0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x110b0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_rx0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5772,12 +6365,17 @@ static struct clk_branch gcc_usb4_2_phy_rx0_clk = {
static struct clk_branch gcc_usb4_2_phy_rx1_clk = {
.halt_reg = 0x110c0,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x110c0,
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_rx1_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb4_2_phy_rx1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -5793,6 +6391,11 @@ static struct clk_branch gcc_usb4_2_phy_usb_pipe_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_usb4_2_phy_usb_pipe_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &gcc_usb34_tert_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -6483,6 +7086,9 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_USB30_TERT_MOCK_UTMI_CLK_SRC] = &gcc_usb30_tert_mock_utmi_clk_src.clkr,
[GCC_USB30_TERT_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_tert_mock_utmi_postdiv_clk_src.clkr,
[GCC_USB30_TERT_SLEEP_CLK] = &gcc_usb30_tert_sleep_clk.clkr,
+ [GCC_USB34_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb34_prim_phy_pipe_clk_src.clkr,
+ [GCC_USB34_SEC_PHY_PIPE_CLK_SRC] = &gcc_usb34_sec_phy_pipe_clk_src.clkr,
+ [GCC_USB34_TERT_PHY_PIPE_CLK_SRC] = &gcc_usb34_tert_phy_pipe_clk_src.clkr,
[GCC_USB3_MP_PHY_AUX_CLK] = &gcc_usb3_mp_phy_aux_clk.clkr,
[GCC_USB3_MP_PHY_AUX_CLK_SRC] = &gcc_usb3_mp_phy_aux_clk_src.clkr,
[GCC_USB3_MP_PHY_COM_AUX_CLK] = &gcc_usb3_mp_phy_com_aux_clk.clkr,
@@ -6508,11 +7114,18 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_USB4_0_DP1_CLK] = &gcc_usb4_0_dp1_clk.clkr,
[GCC_USB4_0_MASTER_CLK] = &gcc_usb4_0_master_clk.clkr,
[GCC_USB4_0_MASTER_CLK_SRC] = &gcc_usb4_0_master_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP0_CLK_SRC] = &gcc_usb4_0_phy_dp0_clk_src.clkr,
+ [GCC_USB4_0_PHY_DP1_CLK_SRC] = &gcc_usb4_0_phy_dp1_clk_src.clkr,
[GCC_USB4_0_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_0_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_0_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_0_phy_p2rr2p_pipe_clk_src.clkr,
[GCC_USB4_0_PHY_PCIE_PIPE_CLK] = &gcc_usb4_0_phy_pcie_pipe_clk.clkr,
[GCC_USB4_0_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_0_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_0_phy_pcie_pipe_mux_clk_src.clkr,
[GCC_USB4_0_PHY_RX0_CLK] = &gcc_usb4_0_phy_rx0_clk.clkr,
+ [GCC_USB4_0_PHY_RX0_CLK_SRC] = &gcc_usb4_0_phy_rx0_clk_src.clkr,
[GCC_USB4_0_PHY_RX1_CLK] = &gcc_usb4_0_phy_rx1_clk.clkr,
+ [GCC_USB4_0_PHY_RX1_CLK_SRC] = &gcc_usb4_0_phy_rx1_clk_src.clkr,
+ [GCC_USB4_0_PHY_SYS_CLK_SRC] = &gcc_usb4_0_phy_sys_clk_src.clkr,
[GCC_USB4_0_PHY_USB_PIPE_CLK] = &gcc_usb4_0_phy_usb_pipe_clk.clkr,
[GCC_USB4_0_SB_IF_CLK] = &gcc_usb4_0_sb_if_clk.clkr,
[GCC_USB4_0_SB_IF_CLK_SRC] = &gcc_usb4_0_sb_if_clk_src.clkr,
@@ -6524,11 +7137,18 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_USB4_1_DP1_CLK] = &gcc_usb4_1_dp1_clk.clkr,
[GCC_USB4_1_MASTER_CLK] = &gcc_usb4_1_master_clk.clkr,
[GCC_USB4_1_MASTER_CLK_SRC] = &gcc_usb4_1_master_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP0_CLK_SRC] = &gcc_usb4_1_phy_dp0_clk_src.clkr,
+ [GCC_USB4_1_PHY_DP1_CLK_SRC] = &gcc_usb4_1_phy_dp1_clk_src.clkr,
[GCC_USB4_1_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_1_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_1_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_1_phy_p2rr2p_pipe_clk_src.clkr,
[GCC_USB4_1_PHY_PCIE_PIPE_CLK] = &gcc_usb4_1_phy_pcie_pipe_clk.clkr,
[GCC_USB4_1_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_1_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_1_phy_pcie_pipe_mux_clk_src.clkr,
[GCC_USB4_1_PHY_RX0_CLK] = &gcc_usb4_1_phy_rx0_clk.clkr,
+ [GCC_USB4_1_PHY_RX0_CLK_SRC] = &gcc_usb4_1_phy_rx0_clk_src.clkr,
[GCC_USB4_1_PHY_RX1_CLK] = &gcc_usb4_1_phy_rx1_clk.clkr,
+ [GCC_USB4_1_PHY_RX1_CLK_SRC] = &gcc_usb4_1_phy_rx1_clk_src.clkr,
+ [GCC_USB4_1_PHY_SYS_CLK_SRC] = &gcc_usb4_1_phy_sys_clk_src.clkr,
[GCC_USB4_1_PHY_USB_PIPE_CLK] = &gcc_usb4_1_phy_usb_pipe_clk.clkr,
[GCC_USB4_1_SB_IF_CLK] = &gcc_usb4_1_sb_if_clk.clkr,
[GCC_USB4_1_SB_IF_CLK_SRC] = &gcc_usb4_1_sb_if_clk_src.clkr,
@@ -6540,11 +7160,18 @@ static struct clk_regmap *gcc_x1e80100_clocks[] = {
[GCC_USB4_2_DP1_CLK] = &gcc_usb4_2_dp1_clk.clkr,
[GCC_USB4_2_MASTER_CLK] = &gcc_usb4_2_master_clk.clkr,
[GCC_USB4_2_MASTER_CLK_SRC] = &gcc_usb4_2_master_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP0_CLK_SRC] = &gcc_usb4_2_phy_dp0_clk_src.clkr,
+ [GCC_USB4_2_PHY_DP1_CLK_SRC] = &gcc_usb4_2_phy_dp1_clk_src.clkr,
[GCC_USB4_2_PHY_P2RR2P_PIPE_CLK] = &gcc_usb4_2_phy_p2rr2p_pipe_clk.clkr,
+ [GCC_USB4_2_PHY_P2RR2P_PIPE_CLK_SRC] = &gcc_usb4_2_phy_p2rr2p_pipe_clk_src.clkr,
[GCC_USB4_2_PHY_PCIE_PIPE_CLK] = &gcc_usb4_2_phy_pcie_pipe_clk.clkr,
[GCC_USB4_2_PHY_PCIE_PIPE_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_clk_src.clkr,
+ [GCC_USB4_2_PHY_PCIE_PIPE_MUX_CLK_SRC] = &gcc_usb4_2_phy_pcie_pipe_mux_clk_src.clkr,
[GCC_USB4_2_PHY_RX0_CLK] = &gcc_usb4_2_phy_rx0_clk.clkr,
+ [GCC_USB4_2_PHY_RX0_CLK_SRC] = &gcc_usb4_2_phy_rx0_clk_src.clkr,
[GCC_USB4_2_PHY_RX1_CLK] = &gcc_usb4_2_phy_rx1_clk.clkr,
+ [GCC_USB4_2_PHY_RX1_CLK_SRC] = &gcc_usb4_2_phy_rx1_clk_src.clkr,
+ [GCC_USB4_2_PHY_SYS_CLK_SRC] = &gcc_usb4_2_phy_sys_clk_src.clkr,
[GCC_USB4_2_PHY_USB_PIPE_CLK] = &gcc_usb4_2_phy_usb_pipe_clk.clkr,
[GCC_USB4_2_SB_IF_CLK] = &gcc_usb4_2_sb_if_clk.clkr,
[GCC_USB4_2_SB_IF_CLK_SRC] = &gcc_usb4_2_sb_if_clk_src.clkr,
@@ -6660,16 +7287,52 @@ static const struct qcom_reset_map gcc_x1e80100_resets[] = {
[GCC_USB3_UNIPHY_MP0_BCR] = { 0x19000 },
[GCC_USB3_UNIPHY_MP1_BCR] = { 0x54000 },
[GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 },
+ [GCC_USB4PHY_PHY_PRIM_BCR] = { 0x5000c },
[GCC_USB3PHY_PHY_SEC_BCR] = { 0x2a004 },
+ [GCC_USB4PHY_PHY_SEC_BCR] = { 0x2a00c },
[GCC_USB3PHY_PHY_TERT_BCR] = { 0xa3004 },
+ [GCC_USB4PHY_PHY_TERT_BCR] = { 0xa300c },
[GCC_USB3UNIPHY_PHY_MP0_BCR] = { 0x19004 },
[GCC_USB3UNIPHY_PHY_MP1_BCR] = { 0x54004 },
[GCC_USB4_0_BCR] = { 0x9f000 },
[GCC_USB4_0_DP0_PHY_PRIM_BCR] = { 0x50010 },
- [GCC_USB4_1_DP0_PHY_SEC_BCR] = { 0x2a010 },
- [GCC_USB4_2_DP0_PHY_TERT_BCR] = { 0xa3010 },
+ [GCC_USB4_0_MISC_USB4_SYS_BCR] = { .reg = 0xad0f8, .bit = 0 },
+ [GCC_USB4_0_MISC_RX_CLK_0_BCR] = { .reg = 0xad0f8, .bit = 1 },
+ [GCC_USB4_0_MISC_RX_CLK_1_BCR] = { .reg = 0xad0f8, .bit = 2 },
+ [GCC_USB4_0_MISC_USB_PIPE_BCR] = { .reg = 0xad0f8, .bit = 3 },
+ [GCC_USB4_0_MISC_PCIE_PIPE_BCR] = { .reg = 0xad0f8, .bit = 4 },
+ [GCC_USB4_0_MISC_TMU_BCR] = { .reg = 0xad0f8, .bit = 5 },
+ [GCC_USB4_0_MISC_SB_IF_BCR] = { .reg = 0xad0f8, .bit = 6 },
+ [GCC_USB4_0_MISC_HIA_MSTR_BCR] = { .reg = 0xad0f8, .bit = 7 },
+ [GCC_USB4_0_MISC_AHB_BCR] = { .reg = 0xad0f8, .bit = 8 },
+ [GCC_USB4_0_MISC_DP0_MAX_PCLK_BCR] = { .reg = 0xad0f8, .bit = 9 },
+ [GCC_USB4_0_MISC_DP1_MAX_PCLK_BCR] = { .reg = 0xad0f8, .bit = 10 },
[GCC_USB4_1_BCR] = { 0x2b000 },
+ [GCC_USB4_1_DP0_PHY_SEC_BCR] = { 0x2a010 },
+ [GCC_USB4_1_MISC_USB4_SYS_BCR] = { .reg = 0xae0f8, .bit = 0 },
+ [GCC_USB4_1_MISC_RX_CLK_0_BCR] = { .reg = 0xae0f8, .bit = 1 },
+ [GCC_USB4_1_MISC_RX_CLK_1_BCR] = { .reg = 0xae0f8, .bit = 2 },
+ [GCC_USB4_1_MISC_USB_PIPE_BCR] = { .reg = 0xae0f8, .bit = 3 },
+ [GCC_USB4_1_MISC_PCIE_PIPE_BCR] = { .reg = 0xae0f8, .bit = 4 },
+ [GCC_USB4_1_MISC_TMU_BCR] = { .reg = 0xae0f8, .bit = 5 },
+ [GCC_USB4_1_MISC_SB_IF_BCR] = { .reg = 0xae0f8, .bit = 6 },
+ [GCC_USB4_1_MISC_HIA_MSTR_BCR] = { .reg = 0xae0f8, .bit = 7 },
+ [GCC_USB4_1_MISC_AHB_BCR] = { .reg = 0xae0f8, .bit = 8 },
+ [GCC_USB4_1_MISC_DP0_MAX_PCLK_BCR] = { .reg = 0xae0f8, .bit = 9 },
+ [GCC_USB4_1_MISC_DP1_MAX_PCLK_BCR] = { .reg = 0xae0f8, .bit = 10 },
[GCC_USB4_2_BCR] = { 0x11000 },
+ [GCC_USB4_2_DP0_PHY_TERT_BCR] = { 0xa3010 },
+ [GCC_USB4_2_MISC_USB4_SYS_BCR] = { .reg = 0xaf0f8, .bit = 0 },
+ [GCC_USB4_2_MISC_RX_CLK_0_BCR] = { .reg = 0xaf0f8, .bit = 1 },
+ [GCC_USB4_2_MISC_RX_CLK_1_BCR] = { .reg = 0xaf0f8, .bit = 2 },
+ [GCC_USB4_2_MISC_USB_PIPE_BCR] = { .reg = 0xaf0f8, .bit = 3 },
+ [GCC_USB4_2_MISC_PCIE_PIPE_BCR] = { .reg = 0xaf0f8, .bit = 4 },
+ [GCC_USB4_2_MISC_TMU_BCR] = { .reg = 0xaf0f8, .bit = 5 },
+ [GCC_USB4_2_MISC_SB_IF_BCR] = { .reg = 0xaf0f8, .bit = 6 },
+ [GCC_USB4_2_MISC_HIA_MSTR_BCR] = { .reg = 0xaf0f8, .bit = 7 },
+ [GCC_USB4_2_MISC_AHB_BCR] = { .reg = 0xaf0f8, .bit = 8 },
+ [GCC_USB4_2_MISC_DP0_MAX_PCLK_BCR] = { .reg = 0xaf0f8, .bit = 9 },
+ [GCC_USB4_2_MISC_DP1_MAX_PCLK_BCR] = { .reg = 0xaf0f8, .bit = 10 },
[GCC_USB_0_PHY_BCR] = { 0x50020 },
[GCC_USB_1_PHY_BCR] = { 0x2a020 },
[GCC_USB_2_PHY_BCR] = { 0xa3020 },
diff --git a/drivers/clk/qcom/gpucc-sa8775p.c b/drivers/clk/qcom/gpucc-sa8775p.c
index 78cad622cb5a..25dcc5912f99 100644
--- a/drivers/clk/qcom/gpucc-sa8775p.c
+++ b/drivers/clk/qcom/gpucc-sa8775p.c
@@ -365,7 +365,7 @@ static struct clk_branch gpu_cc_cx_gmu_clk = {
&gpu_cc_gmu_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
@@ -414,7 +414,7 @@ static struct clk_branch gpu_cc_cxo_clk = {
&gpu_cc_xo_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -499,7 +499,7 @@ static struct clk_branch gpu_cc_hub_cx_int_clk = {
&gpu_cc_hub_cx_int_div_clk_src.clkr.hw,
},
.num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c
index a7bf44544b95..97287488e05a 100644
--- a/drivers/clk/qcom/gpucc-sc7180.c
+++ b/drivers/clk/qcom/gpucc-sc7180.c
@@ -42,7 +42,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sm6350.c b/drivers/clk/qcom/gpucc-sm6350.c
index ee89c42413f8..efbee1518dd3 100644
--- a/drivers/clk/qcom/gpucc-sm6350.c
+++ b/drivers/clk/qcom/gpucc-sm6350.c
@@ -67,7 +67,7 @@ static struct clk_alpha_pll gpu_cc_pll0 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll0",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.index = DT_BI_TCXO,
.fw_name = "bi_tcxo",
},
@@ -111,7 +111,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.index = DT_BI_TCXO,
.fw_name = "bi_tcxo",
},
diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c
index 7ce91208c0bc..5701031c17f3 100644
--- a/drivers/clk/qcom/gpucc-sm8150.c
+++ b/drivers/clk/qcom/gpucc-sm8150.c
@@ -53,7 +53,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c
index ca0a1681d352..eee3208640cd 100644
--- a/drivers/clk/qcom/gpucc-sm8250.c
+++ b/drivers/clk/qcom/gpucc-sm8250.c
@@ -56,7 +56,7 @@ static struct clk_alpha_pll gpu_cc_pll1 = {
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
- .parent_data = &(const struct clk_parent_data){
+ .parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
index b0b0cb074b4a..385964196185 100644
--- a/drivers/clk/qcom/hfpll.c
+++ b/drivers/clk/qcom/hfpll.c
@@ -99,7 +99,6 @@ static const struct regmap_config hfpll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x30,
- .fast_io = true,
};
static int qcom_hfpll_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/ipq-cmn-pll.c b/drivers/clk/qcom/ipq-cmn-pll.c
index b3d7169c63e5..dafbf5732048 100644
--- a/drivers/clk/qcom/ipq-cmn-pll.c
+++ b/drivers/clk/qcom/ipq-cmn-pll.c
@@ -108,7 +108,6 @@ static const struct regmap_config ipq_cmn_pll_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x7fc,
- .fast_io = true,
};
static const struct cmn_pll_fixed_output_clk ipq5018_output_clks[] = {
diff --git a/drivers/clk/qcom/lpassaudiocc-sc7280.c b/drivers/clk/qcom/lpassaudiocc-sc7280.c
index 3ff123bffa11..7e2172969289 100644
--- a/drivers/clk/qcom/lpassaudiocc-sc7280.c
+++ b/drivers/clk/qcom/lpassaudiocc-sc7280.c
@@ -709,8 +709,8 @@ static const struct qcom_cc_desc lpass_audio_cc_sc7280_desc = {
};
static const struct qcom_reset_map lpass_audio_cc_sc7280_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
- [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_TX_CGCR] = { 0xa8, 1 },
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
};
diff --git a/drivers/clk/qcom/lpasscc-sc8280xp.c b/drivers/clk/qcom/lpasscc-sc8280xp.c
index 9fd9498d7dc8..ff839788c40e 100644
--- a/drivers/clk/qcom/lpasscc-sc8280xp.c
+++ b/drivers/clk/qcom/lpasscc-sc8280xp.c
@@ -18,9 +18,9 @@
#include "reset.h"
static const struct qcom_reset_map lpass_audiocc_sc8280xp_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { 0xa0, 1 },
[LPASS_AUDIO_SWR_WSA_CGCR] = { 0xb0, 1 },
- [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
+ [LPASS_AUDIO_SWR_WSA2_CGCR] = { 0xd8, 1 },
};
static const struct regmap_config lpass_audiocc_sc8280xp_regmap_config = {
diff --git a/drivers/clk/qcom/lpasscc-sm6115.c b/drivers/clk/qcom/lpasscc-sm6115.c
index 8ffdab71b948..ac6d219233b4 100644
--- a/drivers/clk/qcom/lpasscc-sm6115.c
+++ b/drivers/clk/qcom/lpasscc-sm6115.c
@@ -17,7 +17,7 @@
#include "reset.h"
static const struct qcom_reset_map lpass_audiocc_sm6115_resets[] = {
- [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
+ [LPASS_AUDIO_SWR_RX_CGCR] = { .reg = 0x98, .bit = 1, .udelay = 500 },
};
static struct regmap_config lpass_audiocc_sm6115_regmap_config = {
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 5937b071533b..5174bd3dcdc5 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -42,7 +42,7 @@ static const struct alpha_pll_config lpass_lpaaudio_dig_pll_config = {
};
static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
- [CLK_ALPHA_PLL_TYPE_FABIA] = {
+ [CLK_ALPHA_PLL_TYPE_FABIA] = {
[PLL_OFF_L_VAL] = 0x04,
[PLL_OFF_CAL_L_VAL] = 0x8,
[PLL_OFF_USER_CTL] = 0x0c,
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index e69fc65b13da..dbd3f561dc6d 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -74,7 +74,7 @@ static struct clk_alpha_pll mmpll0 = {
},
};
-static struct clk_alpha_pll mmpll6 = {
+static struct clk_alpha_pll mmpll6 = {
.offset = 0xf0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
@@ -2781,6 +2781,7 @@ static struct gdsc *mmcc_sdm660_gdscs[] = {
};
static const struct qcom_reset_map mmcc_660_resets[] = {
+ [MDSS_BCR] = { 0x2300 },
[CAMSS_MICRO_BCR] = { 0x3490 },
};
diff --git a/drivers/clk/qcom/nsscc-ipq5424.c b/drivers/clk/qcom/nsscc-ipq5424.c
new file mode 100644
index 000000000000..5893c7146180
--- /dev/null
+++ b/drivers/clk/qcom/nsscc-ipq5424.c
@@ -0,0 +1,1340 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/interconnect-provider.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,ipq5424-nsscc.h>
+#include <dt-bindings/interconnect/qcom,ipq5424.h>
+#include <dt-bindings/reset/qcom,ipq5424-nsscc.h>
+
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+
+/* Need to match the order of clocks in DT binding */
+enum {
+ DT_CMN_PLL_XO_CLK,
+ DT_CMN_PLL_NSS_300M_CLK,
+ DT_CMN_PLL_NSS_375M_CLK,
+ DT_GCC_GPLL0_OUT_AUX,
+ DT_UNIPHY0_NSS_RX_CLK,
+ DT_UNIPHY0_NSS_TX_CLK,
+ DT_UNIPHY1_NSS_RX_CLK,
+ DT_UNIPHY1_NSS_TX_CLK,
+ DT_UNIPHY2_NSS_RX_CLK,
+ DT_UNIPHY2_NSS_TX_CLK,
+};
+
+enum {
+ P_CMN_PLL_XO_CLK,
+ P_CMN_PLL_NSS_300M_CLK,
+ P_CMN_PLL_NSS_375M_CLK,
+ P_GCC_GPLL0_OUT_AUX,
+ P_UNIPHY0_NSS_RX_CLK,
+ P_UNIPHY0_NSS_TX_CLK,
+ P_UNIPHY1_NSS_RX_CLK,
+ P_UNIPHY1_NSS_TX_CLK,
+ P_UNIPHY2_NSS_RX_CLK,
+ P_UNIPHY2_NSS_TX_CLK,
+};
+
+static const struct parent_map nss_cc_parent_map_0[] = {
+ { P_CMN_PLL_XO_CLK, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_CMN_PLL_NSS_300M_CLK, 5 },
+ { P_CMN_PLL_NSS_375M_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_0[] = {
+ { .index = DT_CMN_PLL_XO_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_CMN_PLL_NSS_300M_CLK },
+ { .index = DT_CMN_PLL_NSS_375M_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_1[] = {
+ { P_CMN_PLL_XO_CLK, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_UNIPHY0_NSS_RX_CLK, 3 },
+ { P_UNIPHY0_NSS_TX_CLK, 4 },
+ { P_CMN_PLL_NSS_300M_CLK, 5 },
+ { P_CMN_PLL_NSS_375M_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_1[] = {
+ { .index = DT_CMN_PLL_XO_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_UNIPHY0_NSS_RX_CLK },
+ { .index = DT_UNIPHY0_NSS_TX_CLK },
+ { .index = DT_CMN_PLL_NSS_300M_CLK },
+ { .index = DT_CMN_PLL_NSS_375M_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_2[] = {
+ { P_CMN_PLL_XO_CLK, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_UNIPHY1_NSS_RX_CLK, 3 },
+ { P_UNIPHY1_NSS_TX_CLK, 4 },
+ { P_CMN_PLL_NSS_300M_CLK, 5 },
+ { P_CMN_PLL_NSS_375M_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_2[] = {
+ { .index = DT_CMN_PLL_XO_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_UNIPHY1_NSS_RX_CLK },
+ { .index = DT_UNIPHY1_NSS_TX_CLK },
+ { .index = DT_CMN_PLL_NSS_300M_CLK },
+ { .index = DT_CMN_PLL_NSS_375M_CLK },
+};
+
+static const struct parent_map nss_cc_parent_map_3[] = {
+ { P_CMN_PLL_XO_CLK, 0 },
+ { P_GCC_GPLL0_OUT_AUX, 2 },
+ { P_UNIPHY2_NSS_RX_CLK, 3 },
+ { P_UNIPHY2_NSS_TX_CLK, 4 },
+ { P_CMN_PLL_NSS_300M_CLK, 5 },
+ { P_CMN_PLL_NSS_375M_CLK, 6 },
+};
+
+static const struct clk_parent_data nss_cc_parent_data_3[] = {
+ { .index = DT_CMN_PLL_XO_CLK },
+ { .index = DT_GCC_GPLL0_OUT_AUX },
+ { .index = DT_UNIPHY2_NSS_RX_CLK },
+ { .index = DT_UNIPHY2_NSS_TX_CLK },
+ { .index = DT_CMN_PLL_NSS_300M_CLK },
+ { .index = DT_CMN_PLL_NSS_375M_CLK },
+};
+
+static const struct freq_tbl ftbl_nss_cc_ce_clk_src[] = {
+ F(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ F(375000000, P_CMN_PLL_NSS_375M_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_ce_clk_src = {
+ .cmd_rcgr = 0x5e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ce_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_cfg_clk_src[] = {
+ F(100000000, P_GCC_GPLL0_OUT_AUX, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_cfg_clk_src = {
+ .cmd_rcgr = 0x6a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_tbl = ftbl_nss_cc_cfg_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_cfg_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_nss_cc_eip_bfdcd_clk_src[] = {
+ F(300000000, P_CMN_PLL_NSS_300M_CLK, 1, 0, 0),
+ F(375000000, P_CMN_PLL_NSS_375M_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_eip_bfdcd_clk_src = {
+ .cmd_rcgr = 0x644,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_tbl = ftbl_nss_cc_eip_bfdcd_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_eip_bfdcd_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_25[] = {
+ C(P_UNIPHY0_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_rx_clk_src_125[] = {
+ C(P_UNIPHY0_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1_rx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port1_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY0_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port1_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY0_NSS_RX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY0_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port1_rx_clk_src = {
+ .cmd_rcgr = 0x4b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_multi_tbl = ftbl_nss_cc_port1_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_rx_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_25[] = {
+ C(P_UNIPHY0_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port1_tx_clk_src_125[] = {
+ C(P_UNIPHY0_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port1_tx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port1_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY0_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port1_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY0_NSS_TX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY0_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port1_tx_clk_src = {
+ .cmd_rcgr = 0x4c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_1,
+ .freq_multi_tbl = ftbl_nss_cc_port1_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_tx_clk_src",
+ .parent_data = nss_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_1),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port2_rx_clk_src_25[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY1_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port2_rx_clk_src_125[] = {
+ C(P_UNIPHY1_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port2_rx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port2_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port2_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_NSS_RX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port2_rx_clk_src = {
+ .cmd_rcgr = 0x4cc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_multi_tbl = ftbl_nss_cc_port2_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_rx_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port2_tx_clk_src_25[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY1_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port2_tx_clk_src_125[] = {
+ C(P_UNIPHY1_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port2_tx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port2_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY1_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port2_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY1_NSS_TX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY1_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port2_tx_clk_src = {
+ .cmd_rcgr = 0x4d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_2,
+ .freq_multi_tbl = ftbl_nss_cc_port2_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_tx_clk_src",
+ .parent_data = nss_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_2),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port3_rx_clk_src_25[] = {
+ C(P_UNIPHY2_NSS_RX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY2_NSS_RX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port3_rx_clk_src_125[] = {
+ C(P_UNIPHY2_NSS_RX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port3_rx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port3_rx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_NSS_RX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port3_rx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_NSS_RX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_NSS_RX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port3_rx_clk_src = {
+ .cmd_rcgr = 0x4e4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_multi_tbl = ftbl_nss_cc_port3_rx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_rx_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static const struct freq_conf ftbl_nss_cc_port3_tx_clk_src_25[] = {
+ C(P_UNIPHY2_NSS_TX_CLK, 12.5, 0, 0),
+ C(P_UNIPHY2_NSS_TX_CLK, 5, 0, 0),
+};
+
+static const struct freq_conf ftbl_nss_cc_port3_tx_clk_src_125[] = {
+ C(P_UNIPHY2_NSS_TX_CLK, 2.5, 0, 0),
+ C(P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
+};
+
+static const struct freq_multi_tbl ftbl_nss_cc_port3_tx_clk_src[] = {
+ FMS(24000000, P_CMN_PLL_XO_CLK, 1, 0, 0),
+ FM(25000000, ftbl_nss_cc_port3_tx_clk_src_25),
+ FMS(78125000, P_UNIPHY2_NSS_TX_CLK, 4, 0, 0),
+ FM(125000000, ftbl_nss_cc_port3_tx_clk_src_125),
+ FMS(156250000, P_UNIPHY2_NSS_TX_CLK, 2, 0, 0),
+ FMS(312500000, P_UNIPHY2_NSS_TX_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 nss_cc_port3_tx_clk_src = {
+ .cmd_rcgr = 0x4f0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_3,
+ .freq_multi_tbl = ftbl_nss_cc_port3_tx_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_tx_clk_src",
+ .parent_data = nss_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_3),
+ .ops = &clk_rcg2_fm_ops,
+ },
+};
+
+static struct clk_rcg2 nss_cc_ppe_clk_src = {
+ .cmd_rcgr = 0x3ec,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = nss_cc_parent_map_0,
+ .freq_tbl = ftbl_nss_cc_ce_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_clk_src",
+ .parent_data = nss_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(nss_cc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port1_rx_div_clk_src = {
+ .reg = 0x4bc,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port1_tx_div_clk_src = {
+ .reg = 0x4c8,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port1_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port2_rx_div_clk_src = {
+ .reg = 0x4d4,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port2_tx_div_clk_src = {
+ .reg = 0x4e0,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port2_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port3_rx_div_clk_src = {
+ .reg = 0x4ec,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_rx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_port3_tx_div_clk_src = {
+ .reg = 0x4f8,
+ .shift = 0,
+ .width = 9,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_port3_tx_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_tx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac0_ptp_ref_div_clk_src = {
+ .reg = 0x3f4,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac0_ptp_ref_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac1_ptp_ref_div_clk_src = {
+ .reg = 0x3f8,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac1_ptp_ref_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div nss_cc_xgmac2_ptp_ref_div_clk_src = {
+ .reg = 0x3fc,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "nss_cc_xgmac2_ptp_ref_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch nss_cc_ce_apb_clk = {
+ .halt_reg = 0x5e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ce_apb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ce_axi_clk = {
+ .halt_reg = 0x5ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ce_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_debug_clk = {
+ .halt_reg = 0x70c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x70c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_debug_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_eip_clk = {
+ .halt_reg = 0x658,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x658,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_eip_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_eip_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nss_csr_clk = {
+ .halt_reg = 0x6b0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nss_csr_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_apb_clk = {
+ .halt_reg = 0x5f4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5f4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_ce_apb_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ce_axi_clk = {
+ .halt_reg = 0x5f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_ce_axi_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ce_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_eip_clk = {
+ .halt_reg = 0x660,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x660,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_eip_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_eip_bfdcd_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_nss_csr_clk = {
+ .halt_reg = 0x6b4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6b4,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_nss_csr_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_cfg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_cfg_clk = {
+ .halt_reg = 0x444,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x444,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_ppe_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_nssnoc_ppe_clk = {
+ .halt_reg = 0x440,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x440,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_nssnoc_ppe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_mac_clk = {
+ .halt_reg = 0x428,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x428,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_mac_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_rx_clk = {
+ .halt_reg = 0x4fc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4fc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port1_tx_clk = {
+ .halt_reg = 0x504,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x504,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port1_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_mac_clk = {
+ .halt_reg = 0x430,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x430,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_mac_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_rx_clk = {
+ .halt_reg = 0x50c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x50c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port2_tx_clk = {
+ .halt_reg = 0x514,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x514,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port2_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_mac_clk = {
+ .halt_reg = 0x438,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x438,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_mac_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_rx_clk = {
+ .halt_reg = 0x51c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_port3_tx_clk = {
+ .halt_reg = 0x524,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x524,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_port3_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_edma_cfg_clk = {
+ .halt_reg = 0x424,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x424,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_edma_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_edma_clk = {
+ .halt_reg = 0x41c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x41c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_edma_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_btq_clk = {
+ .halt_reg = 0x408,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x408,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_switch_btq_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_cfg_clk = {
+ .halt_reg = 0x418,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x418,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_switch_cfg_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_clk = {
+ .halt_reg = 0x410,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x410,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_switch_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_ppe_switch_ipe_clk = {
+ .halt_reg = 0x400,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x400,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_ppe_switch_ipe_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_ppe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port1_rx_clk = {
+ .halt_reg = 0x57c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port1_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port1_tx_clk = {
+ .halt_reg = 0x580,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x580,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port1_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port1_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port2_rx_clk = {
+ .halt_reg = 0x584,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x584,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port2_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port2_tx_clk = {
+ .halt_reg = 0x588,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x588,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port2_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port2_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port3_rx_clk = {
+ .halt_reg = 0x58c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port3_rx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_rx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_uniphy_port3_tx_clk = {
+ .halt_reg = 0x590,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x590,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_uniphy_port3_tx_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_port3_tx_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac0_ptp_ref_clk = {
+ .halt_reg = 0x448,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x448,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_xgmac0_ptp_ref_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac1_ptp_ref_clk = {
+ .halt_reg = 0x44c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_xgmac1_ptp_ref_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch nss_cc_xgmac2_ptp_ref_clk = {
+ .halt_reg = 0x450,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x450,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data){
+ .name = "nss_cc_xgmac2_ptp_ref_clk",
+ .parent_hws = (const struct clk_hw*[]){
+ &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *nss_cc_ipq5424_clocks[] = {
+ [NSS_CC_CE_APB_CLK] = &nss_cc_ce_apb_clk.clkr,
+ [NSS_CC_CE_AXI_CLK] = &nss_cc_ce_axi_clk.clkr,
+ [NSS_CC_CE_CLK_SRC] = &nss_cc_ce_clk_src.clkr,
+ [NSS_CC_CFG_CLK_SRC] = &nss_cc_cfg_clk_src.clkr,
+ [NSS_CC_DEBUG_CLK] = &nss_cc_debug_clk.clkr,
+ [NSS_CC_EIP_BFDCD_CLK_SRC] = &nss_cc_eip_bfdcd_clk_src.clkr,
+ [NSS_CC_EIP_CLK] = &nss_cc_eip_clk.clkr,
+ [NSS_CC_NSS_CSR_CLK] = &nss_cc_nss_csr_clk.clkr,
+ [NSS_CC_NSSNOC_CE_APB_CLK] = &nss_cc_nssnoc_ce_apb_clk.clkr,
+ [NSS_CC_NSSNOC_CE_AXI_CLK] = &nss_cc_nssnoc_ce_axi_clk.clkr,
+ [NSS_CC_NSSNOC_EIP_CLK] = &nss_cc_nssnoc_eip_clk.clkr,
+ [NSS_CC_NSSNOC_NSS_CSR_CLK] = &nss_cc_nssnoc_nss_csr_clk.clkr,
+ [NSS_CC_NSSNOC_PPE_CFG_CLK] = &nss_cc_nssnoc_ppe_cfg_clk.clkr,
+ [NSS_CC_NSSNOC_PPE_CLK] = &nss_cc_nssnoc_ppe_clk.clkr,
+ [NSS_CC_PORT1_MAC_CLK] = &nss_cc_port1_mac_clk.clkr,
+ [NSS_CC_PORT1_RX_CLK] = &nss_cc_port1_rx_clk.clkr,
+ [NSS_CC_PORT1_RX_CLK_SRC] = &nss_cc_port1_rx_clk_src.clkr,
+ [NSS_CC_PORT1_RX_DIV_CLK_SRC] = &nss_cc_port1_rx_div_clk_src.clkr,
+ [NSS_CC_PORT1_TX_CLK] = &nss_cc_port1_tx_clk.clkr,
+ [NSS_CC_PORT1_TX_CLK_SRC] = &nss_cc_port1_tx_clk_src.clkr,
+ [NSS_CC_PORT1_TX_DIV_CLK_SRC] = &nss_cc_port1_tx_div_clk_src.clkr,
+ [NSS_CC_PORT2_MAC_CLK] = &nss_cc_port2_mac_clk.clkr,
+ [NSS_CC_PORT2_RX_CLK] = &nss_cc_port2_rx_clk.clkr,
+ [NSS_CC_PORT2_RX_CLK_SRC] = &nss_cc_port2_rx_clk_src.clkr,
+ [NSS_CC_PORT2_RX_DIV_CLK_SRC] = &nss_cc_port2_rx_div_clk_src.clkr,
+ [NSS_CC_PORT2_TX_CLK] = &nss_cc_port2_tx_clk.clkr,
+ [NSS_CC_PORT2_TX_CLK_SRC] = &nss_cc_port2_tx_clk_src.clkr,
+ [NSS_CC_PORT2_TX_DIV_CLK_SRC] = &nss_cc_port2_tx_div_clk_src.clkr,
+ [NSS_CC_PORT3_MAC_CLK] = &nss_cc_port3_mac_clk.clkr,
+ [NSS_CC_PORT3_RX_CLK] = &nss_cc_port3_rx_clk.clkr,
+ [NSS_CC_PORT3_RX_CLK_SRC] = &nss_cc_port3_rx_clk_src.clkr,
+ [NSS_CC_PORT3_RX_DIV_CLK_SRC] = &nss_cc_port3_rx_div_clk_src.clkr,
+ [NSS_CC_PORT3_TX_CLK] = &nss_cc_port3_tx_clk.clkr,
+ [NSS_CC_PORT3_TX_CLK_SRC] = &nss_cc_port3_tx_clk_src.clkr,
+ [NSS_CC_PORT3_TX_DIV_CLK_SRC] = &nss_cc_port3_tx_div_clk_src.clkr,
+ [NSS_CC_PPE_CLK_SRC] = &nss_cc_ppe_clk_src.clkr,
+ [NSS_CC_PPE_EDMA_CFG_CLK] = &nss_cc_ppe_edma_cfg_clk.clkr,
+ [NSS_CC_PPE_EDMA_CLK] = &nss_cc_ppe_edma_clk.clkr,
+ [NSS_CC_PPE_SWITCH_BTQ_CLK] = &nss_cc_ppe_switch_btq_clk.clkr,
+ [NSS_CC_PPE_SWITCH_CFG_CLK] = &nss_cc_ppe_switch_cfg_clk.clkr,
+ [NSS_CC_PPE_SWITCH_CLK] = &nss_cc_ppe_switch_clk.clkr,
+ [NSS_CC_PPE_SWITCH_IPE_CLK] = &nss_cc_ppe_switch_ipe_clk.clkr,
+ [NSS_CC_UNIPHY_PORT1_RX_CLK] = &nss_cc_uniphy_port1_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT1_TX_CLK] = &nss_cc_uniphy_port1_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT2_RX_CLK] = &nss_cc_uniphy_port2_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT2_TX_CLK] = &nss_cc_uniphy_port2_tx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT3_RX_CLK] = &nss_cc_uniphy_port3_rx_clk.clkr,
+ [NSS_CC_UNIPHY_PORT3_TX_CLK] = &nss_cc_uniphy_port3_tx_clk.clkr,
+ [NSS_CC_XGMAC0_PTP_REF_CLK] = &nss_cc_xgmac0_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC0_PTP_REF_DIV_CLK_SRC] = &nss_cc_xgmac0_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC1_PTP_REF_CLK] = &nss_cc_xgmac1_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC1_PTP_REF_DIV_CLK_SRC] = &nss_cc_xgmac1_ptp_ref_div_clk_src.clkr,
+ [NSS_CC_XGMAC2_PTP_REF_CLK] = &nss_cc_xgmac2_ptp_ref_clk.clkr,
+ [NSS_CC_XGMAC2_PTP_REF_DIV_CLK_SRC] = &nss_cc_xgmac2_ptp_ref_div_clk_src.clkr,
+};
+
+static const struct qcom_reset_map nss_cc_ipq5424_resets[] = {
+ [NSS_CC_CE_APB_CLK_ARES] = { 0x5e8, 2 },
+ [NSS_CC_CE_AXI_CLK_ARES] = { 0x5ec, 2 },
+ [NSS_CC_DEBUG_CLK_ARES] = { 0x70c, 2 },
+ [NSS_CC_EIP_CLK_ARES] = { 0x658, 2 },
+ [NSS_CC_NSS_CSR_CLK_ARES] = { 0x6b0, 2 },
+ [NSS_CC_NSSNOC_CE_APB_CLK_ARES] = { 0x5f4, 2 },
+ [NSS_CC_NSSNOC_CE_AXI_CLK_ARES] = { 0x5f8, 2 },
+ [NSS_CC_NSSNOC_EIP_CLK_ARES] = { 0x660, 2 },
+ [NSS_CC_NSSNOC_NSS_CSR_CLK_ARES] = { 0x6b4, 2 },
+ [NSS_CC_NSSNOC_PPE_CLK_ARES] = { 0x440, 2 },
+ [NSS_CC_NSSNOC_PPE_CFG_CLK_ARES] = { 0x444, 2 },
+ [NSS_CC_PORT1_MAC_CLK_ARES] = { 0x428, 2 },
+ [NSS_CC_PORT1_RX_CLK_ARES] = { 0x4fc, 2 },
+ [NSS_CC_PORT1_TX_CLK_ARES] = { 0x504, 2 },
+ [NSS_CC_PORT2_MAC_CLK_ARES] = { 0x430, 2 },
+ [NSS_CC_PORT2_RX_CLK_ARES] = { 0x50c, 2 },
+ [NSS_CC_PORT2_TX_CLK_ARES] = { 0x514, 2 },
+ [NSS_CC_PORT3_MAC_CLK_ARES] = { 0x438, 2 },
+ [NSS_CC_PORT3_RX_CLK_ARES] = { 0x51c, 2 },
+ [NSS_CC_PORT3_TX_CLK_ARES] = { 0x524, 2 },
+ [NSS_CC_PPE_BCR] = { 0x3e8 },
+ [NSS_CC_PPE_EDMA_CLK_ARES] = { 0x41c, 2 },
+ [NSS_CC_PPE_EDMA_CFG_CLK_ARES] = { 0x424, 2 },
+ [NSS_CC_PPE_SWITCH_BTQ_CLK_ARES] = { 0x408, 2 },
+ [NSS_CC_PPE_SWITCH_CLK_ARES] = { 0x410, 2 },
+ [NSS_CC_PPE_SWITCH_CFG_CLK_ARES] = { 0x418, 2 },
+ [NSS_CC_PPE_SWITCH_IPE_CLK_ARES] = { 0x400, 2 },
+ [NSS_CC_UNIPHY_PORT1_RX_CLK_ARES] = { 0x57c, 2 },
+ [NSS_CC_UNIPHY_PORT1_TX_CLK_ARES] = { 0x580, 2 },
+ [NSS_CC_UNIPHY_PORT2_RX_CLK_ARES] = { 0x584, 2 },
+ [NSS_CC_UNIPHY_PORT2_TX_CLK_ARES] = { 0x588, 2 },
+ [NSS_CC_UNIPHY_PORT3_RX_CLK_ARES] = { 0x58c, 2 },
+ [NSS_CC_UNIPHY_PORT3_TX_CLK_ARES] = { 0x590, 2 },
+ [NSS_CC_XGMAC0_PTP_REF_CLK_ARES] = { 0x448, 2 },
+ [NSS_CC_XGMAC1_PTP_REF_CLK_ARES] = { 0x44c, 2 },
+ [NSS_CC_XGMAC2_PTP_REF_CLK_ARES] = { 0x450, 2 },
+};
+
+static const struct regmap_config nss_cc_ipq5424_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x800,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_hws_data icc_ipq5424_nss_hws[] = {
+ { MASTER_NSSNOC_PPE, SLAVE_NSSNOC_PPE, NSS_CC_NSSNOC_PPE_CLK },
+ { MASTER_NSSNOC_PPE_CFG, SLAVE_NSSNOC_PPE_CFG, NSS_CC_NSSNOC_PPE_CFG_CLK },
+ { MASTER_NSSNOC_NSS_CSR, SLAVE_NSSNOC_NSS_CSR, NSS_CC_NSSNOC_NSS_CSR_CLK },
+ { MASTER_NSSNOC_CE_AXI, SLAVE_NSSNOC_CE_AXI, NSS_CC_NSSNOC_CE_AXI_CLK},
+ { MASTER_NSSNOC_CE_APB, SLAVE_NSSNOC_CE_APB, NSS_CC_NSSNOC_CE_APB_CLK},
+ { MASTER_NSSNOC_EIP, SLAVE_NSSNOC_EIP, NSS_CC_NSSNOC_EIP_CLK},
+};
+
+#define IPQ_NSSCC_ID (5424 * 2) /* some unique value */
+
+static const struct qcom_cc_desc nss_cc_ipq5424_desc = {
+ .config = &nss_cc_ipq5424_regmap_config,
+ .clks = nss_cc_ipq5424_clocks,
+ .num_clks = ARRAY_SIZE(nss_cc_ipq5424_clocks),
+ .resets = nss_cc_ipq5424_resets,
+ .num_resets = ARRAY_SIZE(nss_cc_ipq5424_resets),
+ .icc_hws = icc_ipq5424_nss_hws,
+ .num_icc_hws = ARRAY_SIZE(icc_ipq5424_nss_hws),
+ .icc_first_node_id = IPQ_NSSCC_ID,
+};
+
+static const struct dev_pm_ops nss_cc_ipq5424_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static const struct of_device_id nss_cc_ipq5424_match_table[] = {
+ { .compatible = "qcom,ipq5424-nsscc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nss_cc_ipq5424_match_table);
+
+static int nss_cc_ipq5424_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to enable runtime PM\n");
+
+ ret = devm_pm_clk_create(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to create PM clock\n");
+
+ ret = pm_clk_add(&pdev->dev, "bus");
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to add bus clock\n");
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Fail to resume\n");
+
+ ret = qcom_cc_probe(pdev, &nss_cc_ipq5424_desc);
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
+}
+
+static struct platform_driver nss_cc_ipq5424_driver = {
+ .probe = nss_cc_ipq5424_probe,
+ .driver = {
+ .name = "qcom,ipq5424-nsscc",
+ .of_match_table = nss_cc_ipq5424_match_table,
+ .pm = &nss_cc_ipq5424_pm_ops,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(nss_cc_ipq5424_driver);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. NSSCC IPQ5424 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/nsscc-ipq9574.c b/drivers/clk/qcom/nsscc-ipq9574.c
index 64c6b05ff066..c8b11b04a7c2 100644
--- a/drivers/clk/qcom/nsscc-ipq9574.c
+++ b/drivers/clk/qcom/nsscc-ipq9574.c
@@ -3016,7 +3016,7 @@ static const struct qcom_reset_map nss_cc_ipq9574_resets[] = {
[NSSPORT4_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(5, 4) },
[NSSPORT5_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(3, 2) },
[NSSPORT6_RESET] = { .reg = 0x28a24, .bitmask = GENMASK(1, 0) },
- [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
+ [EDMA_HW_RESET] = { .reg = 0x28a08, .bitmask = GENMASK(16, 15) },
};
static const struct regmap_config nss_cc_ipq9574_regmap_config = {
diff --git a/drivers/clk/qcom/tcsrcc-glymur.c b/drivers/clk/qcom/tcsrcc-glymur.c
new file mode 100644
index 000000000000..215bc2ac548d
--- /dev/null
+++ b/drivers/clk/qcom/tcsrcc-glymur.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,glymur-tcsr.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO_PAD,
+};
+
+static struct clk_branch tcsr_edp_clkref_en = {
+ .halt_reg = 0x60,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x60,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_edp_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_1_clkref_en = {
+ .halt_reg = 0x48,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x48,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_2_clkref_en = {
+ .halt_reg = 0x4c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x4c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_3_clkref_en = {
+ .halt_reg = 0x54,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x54,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_pcie_4_clkref_en = {
+ .halt_reg = 0x58,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x58,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_pcie_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_1_clkref_en = {
+ .halt_reg = 0x6c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x6c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_2_clkref_en = {
+ .halt_reg = 0x70,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x70,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_3_clkref_en = {
+ .halt_reg = 0x74,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x74,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_3_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb2_4_clkref_en = {
+ .halt_reg = 0x88,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x88,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb2_4_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_0_clkref_en = {
+ .halt_reg = 0x64,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x64,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_0_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb3_1_clkref_en = {
+ .halt_reg = 0x68,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x68,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb3_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_1_clkref_en = {
+ .halt_reg = 0x44,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x44,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb4_1_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch tcsr_usb4_2_clkref_en = {
+ .halt_reg = 0x5c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x5c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "tcsr_usb4_2_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *tcsr_cc_glymur_clocks[] = {
+ [TCSR_EDP_CLKREF_EN] = &tcsr_edp_clkref_en.clkr,
+ [TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
+ [TCSR_PCIE_2_CLKREF_EN] = &tcsr_pcie_2_clkref_en.clkr,
+ [TCSR_PCIE_3_CLKREF_EN] = &tcsr_pcie_3_clkref_en.clkr,
+ [TCSR_PCIE_4_CLKREF_EN] = &tcsr_pcie_4_clkref_en.clkr,
+ [TCSR_USB2_1_CLKREF_EN] = &tcsr_usb2_1_clkref_en.clkr,
+ [TCSR_USB2_2_CLKREF_EN] = &tcsr_usb2_2_clkref_en.clkr,
+ [TCSR_USB2_3_CLKREF_EN] = &tcsr_usb2_3_clkref_en.clkr,
+ [TCSR_USB2_4_CLKREF_EN] = &tcsr_usb2_4_clkref_en.clkr,
+ [TCSR_USB3_0_CLKREF_EN] = &tcsr_usb3_0_clkref_en.clkr,
+ [TCSR_USB3_1_CLKREF_EN] = &tcsr_usb3_1_clkref_en.clkr,
+ [TCSR_USB4_1_CLKREF_EN] = &tcsr_usb4_1_clkref_en.clkr,
+ [TCSR_USB4_2_CLKREF_EN] = &tcsr_usb4_2_clkref_en.clkr,
+};
+
+static const struct regmap_config tcsr_cc_glymur_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x94,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc tcsr_cc_glymur_desc = {
+ .config = &tcsr_cc_glymur_regmap_config,
+ .clks = tcsr_cc_glymur_clocks,
+ .num_clks = ARRAY_SIZE(tcsr_cc_glymur_clocks),
+};
+
+static const struct of_device_id tcsr_cc_glymur_match_table[] = {
+ { .compatible = "qcom,glymur-tcsr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tcsr_cc_glymur_match_table);
+
+static int tcsr_cc_glymur_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &tcsr_cc_glymur_desc);
+}
+
+static struct platform_driver tcsr_cc_glymur_driver = {
+ .probe = tcsr_cc_glymur_probe,
+ .driver = {
+ .name = "tcsrcc-glymur",
+ .of_match_table = tcsr_cc_glymur_match_table,
+ },
+};
+
+static int __init tcsr_cc_glymur_init(void)
+{
+ return platform_driver_register(&tcsr_cc_glymur_driver);
+}
+subsys_initcall(tcsr_cc_glymur_init);
+
+static void __exit tcsr_cc_glymur_exit(void)
+{
+ platform_driver_unregister(&tcsr_cc_glymur_driver);
+}
+module_exit(tcsr_cc_glymur_exit);
+
+MODULE_DESCRIPTION("QTI TCSRCC GLYMUR Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/qcom/tcsrcc-x1e80100.c b/drivers/clk/qcom/tcsrcc-x1e80100.c
index ff61769a0807..a367e1f55622 100644
--- a/drivers/clk/qcom/tcsrcc-x1e80100.c
+++ b/drivers/clk/qcom/tcsrcc-x1e80100.c
@@ -29,6 +29,10 @@ static struct clk_branch tcsr_edp_clkref_en = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "tcsr_edp_clkref_en",
+ .parent_data = &(const struct clk_parent_data){
+ .index = DT_BI_TCXO_PAD,
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/qcom/videocc-milos.c b/drivers/clk/qcom/videocc-milos.c
index 998301e0ba88..acc9df295d4f 100644
--- a/drivers/clk/qcom/videocc-milos.c
+++ b/drivers/clk/qcom/videocc-milos.c
@@ -366,7 +366,7 @@ static struct qcom_cc_driver_data video_cc_milos_driver_data = {
.num_clk_cbcrs = ARRAY_SIZE(video_cc_milos_critical_cbcrs),
};
-static struct qcom_cc_desc video_cc_milos_desc = {
+static const struct qcom_cc_desc video_cc_milos_desc = {
.config = &video_cc_milos_regmap_config,
.clks = video_cc_milos_clocks,
.num_clks = ARRAY_SIZE(video_cc_milos_clocks),
diff --git a/drivers/clk/qcom/videocc-sm8750.c b/drivers/clk/qcom/videocc-sm8750.c
new file mode 100644
index 000000000000..0acf3104d702
--- /dev/null
+++ b/drivers/clk/qcom/videocc-sm8750.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,sm8750-videocc.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ DT_BI_TCXO,
+ DT_BI_TCXO_AO,
+ DT_SLEEP_CLK,
+};
+
+enum {
+ P_BI_TCXO,
+ P_SLEEP_CLK,
+ P_VIDEO_CC_PLL0_OUT_MAIN,
+};
+
+static const struct pll_vco taycan_elu_vco[] = {
+ { 249600000, 2500000000, 0 },
+};
+
+static const struct alpha_pll_config video_cc_pll0_config = {
+ .l = 0x25,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x19660387,
+ .config_ctl_hi_val = 0x098060a0,
+ .config_ctl_hi1_val = 0xb416cb20,
+ .user_ctl_val = 0x00000000,
+ .user_ctl_hi_val = 0x00000002,
+};
+
+static struct clk_alpha_pll video_cc_pll0 = {
+ .offset = 0x0,
+ .config = &video_cc_pll0_config,
+ .vco_table = taycan_elu_vco,
+ .num_vco = ARRAY_SIZE(taycan_elu_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TAYCAN_ELU],
+ .clkr = {
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_pll0",
+ .parent_data = &(const struct clk_parent_data) {
+ .index = DT_BI_TCXO,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_taycan_elu_ops,
+ },
+ },
+};
+
+static const struct parent_map video_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_0_ao[] = {
+ { .index = DT_BI_TCXO_AO },
+};
+
+static const struct parent_map video_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_VIDEO_CC_PLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_1[] = {
+ { .index = DT_BI_TCXO },
+ { .hw = &video_cc_pll0.clkr.hw },
+};
+
+static const struct parent_map video_cc_parent_map_2[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data video_cc_parent_data_2_ao[] = {
+ { .index = DT_SLEEP_CLK },
+};
+
+static const struct freq_tbl ftbl_video_cc_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_ahb_clk_src = {
+ .cmd_rcgr = 0x8018,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_ahb_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
+ F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1260000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1600000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1710000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ F(1890000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_mvs0_clk_src = {
+ .cmd_rcgr = 0x8000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_1,
+ .freq_tbl = ftbl_video_cc_mvs0_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk_src",
+ .parent_data = video_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 video_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x80e0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_2,
+ .freq_tbl = ftbl_video_cc_sleep_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_sleep_clk_src",
+ .parent_data = video_cc_parent_data_2_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_2_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 video_cc_xo_clk_src = {
+ .cmd_rcgr = 0x80bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = video_cc_parent_map_0,
+ .freq_tbl = ftbl_video_cc_ahb_clk_src,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_xo_clk_src",
+ .parent_data = video_cc_parent_data_0_ao,
+ .num_parents = ARRAY_SIZE(video_cc_parent_data_0_ao),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
+ .reg = 0x809c,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
+ .reg = 0x8060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_div2_div_clk_src",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch video_cc_mvs0_clk = {
+ .halt_reg = 0x807c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x807c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x807c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_mem_branch video_cc_mvs0_freerun_clk = {
+ .mem_enable_reg = 0x8090,
+ .mem_ack_reg = 0x8090,
+ .mem_enable_mask = BIT(3),
+ .mem_enable_ack_mask = GENMASK(11, 10),
+ .mem_enable_invert = true,
+ .branch = {
+ .halt_reg = 0x808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_freerun_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_mem_ops,
+ },
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0_shift_clk = {
+ .halt_reg = 0x80d8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80d8,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_clk = {
+ .halt_reg = 0x804c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_freerun_clk = {
+ .halt_reg = 0x805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_freerun_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_mvs0c_div2_div_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch video_cc_mvs0c_shift_clk = {
+ .halt_reg = 0x80dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x80dc,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x80dc,
+ .enable_mask = BIT(0),
+ .hw.init = &(const struct clk_init_data) {
+ .name = "video_cc_mvs0c_shift_clk",
+ .parent_hws = (const struct clk_hw*[]) {
+ &video_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc video_cc_mvs0c_gdsc = {
+ .gdscr = 0x8034,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0c_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc video_cc_mvs0_gdsc = {
+ .gdscr = 0x8068,
+ .en_rest_wait_val = 0x2,
+ .en_few_wait_val = 0x2,
+ .clk_dis_wait_val = 0x6,
+ .pd = {
+ .name = "video_cc_mvs0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .parent = &video_cc_mvs0c_gdsc.pd,
+ .flags = POLL_CFG_GDSCR | RETAIN_FF_ENABLE | HW_CTRL_TRIGGER,
+};
+
+static struct clk_regmap *video_cc_sm8750_clocks[] = {
+ [VIDEO_CC_AHB_CLK_SRC] = &video_cc_ahb_clk_src.clkr,
+ [VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
+ [VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
+ [VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
+ [VIDEO_CC_MVS0_FREERUN_CLK] = &video_cc_mvs0_freerun_clk.branch.clkr,
+ [VIDEO_CC_MVS0_SHIFT_CLK] = &video_cc_mvs0_shift_clk.clkr,
+ [VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
+ [VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
+ [VIDEO_CC_MVS0C_FREERUN_CLK] = &video_cc_mvs0c_freerun_clk.clkr,
+ [VIDEO_CC_MVS0C_SHIFT_CLK] = &video_cc_mvs0c_shift_clk.clkr,
+ [VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
+ [VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
+ [VIDEO_CC_XO_CLK_SRC] = &video_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *video_cc_sm8750_gdscs[] = {
+ [VIDEO_CC_MVS0_GDSC] = &video_cc_mvs0_gdsc,
+ [VIDEO_CC_MVS0C_GDSC] = &video_cc_mvs0c_gdsc,
+};
+
+static const struct qcom_reset_map video_cc_sm8750_resets[] = {
+ [VIDEO_CC_INTERFACE_BCR] = { 0x80a0 },
+ [VIDEO_CC_MVS0_BCR] = { 0x8064 },
+ [VIDEO_CC_MVS0C_CLK_ARES] = { 0x804c, 2 },
+ [VIDEO_CC_MVS0C_BCR] = { 0x8030 },
+ [VIDEO_CC_MVS0_FREERUN_CLK_ARES] = { 0x808c, 2 },
+ [VIDEO_CC_MVS0C_FREERUN_CLK_ARES] = { 0x805c, 2 },
+ [VIDEO_CC_XO_CLK_ARES] = { 0x80d4, 2 },
+};
+
+static const struct regmap_config video_cc_sm8750_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9f4c,
+ .fast_io = true,
+};
+
+static struct clk_alpha_pll *video_cc_sm8750_plls[] = {
+ &video_cc_pll0,
+};
+
+static u32 video_cc_sm8750_critical_cbcrs[] = {
+ 0x80a4, /* VIDEO_CC_AHB_CLK */
+ 0x80f8, /* VIDEO_CC_SLEEP_CLK */
+ 0x80d4, /* VIDEO_CC_XO_CLK */
+};
+
+static void clk_sm8750_regs_configure(struct device *dev, struct regmap *regmap)
+{
+ /* Update DLY_ACCU_RED_SHIFTER_DONE to 0xF for mvs0, mvs0c */
+ regmap_update_bits(regmap, 0x8074, GENMASK(25, 21), GENMASK(25, 21));
+ regmap_update_bits(regmap, 0x8040, GENMASK(25, 21), GENMASK(25, 21));
+
+ regmap_update_bits(regmap, 0x9f24, BIT(0), BIT(0));
+}
+
+static struct qcom_cc_driver_data video_cc_sm8750_driver_data = {
+ .alpha_plls = video_cc_sm8750_plls,
+ .num_alpha_plls = ARRAY_SIZE(video_cc_sm8750_plls),
+ .clk_cbcrs = video_cc_sm8750_critical_cbcrs,
+ .num_clk_cbcrs = ARRAY_SIZE(video_cc_sm8750_critical_cbcrs),
+ .clk_regs_configure = clk_sm8750_regs_configure,
+};
+
+static struct qcom_cc_desc video_cc_sm8750_desc = {
+ .config = &video_cc_sm8750_regmap_config,
+ .clks = video_cc_sm8750_clocks,
+ .num_clks = ARRAY_SIZE(video_cc_sm8750_clocks),
+ .resets = video_cc_sm8750_resets,
+ .num_resets = ARRAY_SIZE(video_cc_sm8750_resets),
+ .gdscs = video_cc_sm8750_gdscs,
+ .num_gdscs = ARRAY_SIZE(video_cc_sm8750_gdscs),
+ .use_rpm = true,
+ .driver_data = &video_cc_sm8750_driver_data,
+};
+
+static const struct of_device_id video_cc_sm8750_match_table[] = {
+ { .compatible = "qcom,sm8750-videocc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, video_cc_sm8750_match_table);
+
+static int video_cc_sm8750_probe(struct platform_device *pdev)
+{
+ return qcom_cc_probe(pdev, &video_cc_sm8750_desc);
+}
+
+static struct platform_driver video_cc_sm8750_driver = {
+ .probe = video_cc_sm8750_probe,
+ .driver = {
+ .name = "video_cc-sm8750",
+ .of_match_table = video_cc_sm8750_match_table,
+ },
+};
+
+static int __init video_cc_sm8750_init(void)
+{
+ return platform_driver_register(&video_cc_sm8750_driver);
+}
+subsys_initcall(video_cc_sm8750_init);
+
+static void __exit video_cc_sm8750_exit(void)
+{
+ platform_driver_unregister(&video_cc_sm8750_driver);
+}
+module_exit(video_cc_sm8750_exit);
+
+MODULE_DESCRIPTION("QTI VIDEO_CC SM8750 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/renesas/clk-div6.c b/drivers/clk/renesas/clk-div6.c
index 3abd6e5400ad..f7b827b5e9b2 100644
--- a/drivers/clk/renesas/clk-div6.c
+++ b/drivers/clk/renesas/clk-div6.c
@@ -7,6 +7,7 @@
* Contact: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk-provider.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -171,8 +172,7 @@ static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
if (clock->src_mask == 0)
return 0;
- hw_index = (readl(clock->reg) & clock->src_mask) >>
- __ffs(clock->src_mask);
+ hw_index = field_get(clock->src_mask, readl(clock->reg));
for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
if (clock->parents[i] == hw_index)
return i;
@@ -191,7 +191,7 @@ static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
if (index >= clk_hw_get_num_parents(hw))
return -EINVAL;
- src = clock->parents[index] << __ffs(clock->src_mask);
+ src = field_prep(clock->src_mask, clock->parents[index]);
writel((readl(clock->reg) & ~clock->src_mask) | src, clock->reg);
return 0;
}
diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c
index 5bc473c2adb3..2f65fe2c6bdf 100644
--- a/drivers/clk/renesas/clk-mstp.c
+++ b/drivers/clk/renesas/clk-mstp.c
@@ -303,6 +303,9 @@ void cpg_mstp_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
+static struct device_node *cpg_mstp_pd_np __initdata = NULL;
+static struct generic_pm_domain *cpg_mstp_pd_genpd __initdata = NULL;
+
void __init cpg_mstp_add_clk_domain(struct device_node *np)
{
struct generic_pm_domain *pd;
@@ -324,5 +327,20 @@ void __init cpg_mstp_add_clk_domain(struct device_node *np)
pd->detach_dev = cpg_mstp_detach_dev;
pm_genpd_init(pd, &pm_domain_always_on_gov, false);
- of_genpd_add_provider_simple(np, pd);
+ cpg_mstp_pd_np = of_node_get(np);
+ cpg_mstp_pd_genpd = pd;
+}
+
+static int __init cpg_mstp_pd_init_provider(void)
+{
+ int error;
+
+ if (!cpg_mstp_pd_np)
+ return -ENODEV;
+
+ error = of_genpd_add_provider_simple(cpg_mstp_pd_np, cpg_mstp_pd_genpd);
+
+ of_node_put(cpg_mstp_pd_np);
+ return error;
}
+postcore_initcall(cpg_mstp_pd_init_provider);
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 1be7b9592aa6..d67dff05d9f4 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -26,7 +26,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R8A779A0_CLK_OSC,
+ LAST_DT_CORE_CLK = R8A779A0_CLK_ZG,
/* External Input Clocks */
CLK_EXTAL,
@@ -39,6 +39,7 @@ enum clk_ids {
CLK_PLL21,
CLK_PLL30,
CLK_PLL31,
+ CLK_PLL4,
CLK_PLL5,
CLK_PLL1_DIV2,
CLK_PLL20_DIV2,
@@ -65,6 +66,7 @@ enum clk_ids {
#define CPG_PLL21CR 0x0838 /* PLL21 Control Register */
#define CPG_PLL30CR 0x083c /* PLL30 Control Register */
#define CPG_PLL31CR 0x0840 /* PLL31 Control Register */
+#define CPG_PLL4CR 0x0844 /* PLL4 Control Register */
static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* External Clock Inputs */
@@ -79,6 +81,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_PLL(".pll21", CLK_PLL21, CPG_PLL21CR),
DEF_PLL(".pll30", CLK_PLL30, CPG_PLL30CR),
DEF_PLL(".pll31", CLK_PLL31, CPG_PLL31CR),
+ DEF_PLL(".pll4", CLK_PLL4, CPG_PLL4CR),
DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
DEF_FIXED(".pll20_div2", CLK_PLL20_DIV2, CLK_PLL20, 2, 1),
@@ -98,6 +101,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* Core Clock Outputs */
DEF_GEN4_Z("z0", R8A779A0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL20, 2, 0),
DEF_GEN4_Z("z1", R8A779A0_CLK_Z1, CLK_TYPE_GEN4_Z, CLK_PLL21, 2, 8),
+ DEF_GEN4_Z("zg", R8A779A0_CLK_ZG, CLK_TYPE_GEN4_Z, CLK_PLL4, 2, 88),
DEF_FIXED("zx", R8A779A0_CLK_ZX, CLK_PLL20_DIV2, 2, 1),
DEF_FIXED("s1d1", R8A779A0_CLK_S1D1, CLK_S1, 1, 1),
DEF_FIXED("s1d2", R8A779A0_CLK_S1D2, CLK_S1, 2, 1),
@@ -138,6 +142,7 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("3dge", 0, R8A779A0_CLK_ZG),
DEF_MOD("isp0", 16, R8A779A0_CLK_S1D1),
DEF_MOD("isp1", 17, R8A779A0_CLK_S1D1),
DEF_MOD("isp2", 18, R8A779A0_CLK_S1D1),
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index dcda19318b2a..0f5c91b5dfa9 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -1333,9 +1333,9 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
if (IS_ERR(mclk))
return PTR_ERR(mclk);
- clocks->reg = of_iomap(np, 0);
- if (WARN_ON(!clocks->reg))
- return -ENOMEM;
+ clocks->reg = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(clocks->reg))
+ return PTR_ERR(clocks->reg);
r9a06g032_init_h2mode(clocks);
diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c
index 02dc5cecfd8d..33e9a1223c72 100644
--- a/drivers/clk/renesas/r9a07g043-cpg.c
+++ b/drivers/clk/renesas/r9a07g043-cpg.c
@@ -164,143 +164,143 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
static const struct rzg2l_mod_clk r9a07g043_mod_clks[] = {
#ifdef CONFIG_ARM64
DEF_MOD("gic", R9A07G043_GIC600_GICCLK, R9A07G043_CLK_P1,
- 0x514, 0, 0),
+ 0x514, 0, MSTOP(BUS_REG1, BIT(7))),
DEF_MOD("ia55_pclk", R9A07G043_IA55_PCLK, R9A07G043_CLK_P2,
- 0x518, 0, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("ia55_clk", R9A07G043_IA55_CLK, R9A07G043_CLK_P1,
- 0x518, 1, 0),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
#endif
#ifdef CONFIG_RISCV
DEF_MOD("iax45_pclk", R9A07G043_IAX45_PCLK, R9A07G043_CLK_P2,
- 0x518, 0, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("iax45_clk", R9A07G043_IAX45_CLK, R9A07G043_CLK_P1,
- 0x518, 1, 0),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
#endif
DEF_MOD("dmac_aclk", R9A07G043_DMAC_ACLK, R9A07G043_CLK_P1,
- 0x52c, 0, 0),
+ 0x52c, 0, MSTOP(BUS_REG1, BIT(2))),
DEF_MOD("dmac_pclk", R9A07G043_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1, 0),
+ 0x52c, 1, MSTOP(BUS_REG1, BIT(3))),
DEF_MOD("ostm0_pclk", R9A07G043_OSTM0_PCLK, R9A07G043_CLK_P0,
- 0x534, 0, 0),
+ 0x534, 0, MSTOP(BUS_REG0, BIT(4))),
DEF_MOD("ostm1_pclk", R9A07G043_OSTM1_PCLK, R9A07G043_CLK_P0,
- 0x534, 1, 0),
+ 0x534, 1, MSTOP(BUS_REG0, BIT(5))),
DEF_MOD("ostm2_pclk", R9A07G043_OSTM2_PCLK, R9A07G043_CLK_P0,
- 0x534, 2, 0),
+ 0x534, 2, MSTOP(BUS_REG0, BIT(6))),
DEF_MOD("mtu_x_mck", R9A07G043_MTU_X_MCK_MTU3, R9A07G043_CLK_P0,
- 0x538, 0, 0),
+ 0x538, 0, MSTOP(BUS_MCPU1, BIT(2))),
DEF_MOD("wdt0_pclk", R9A07G043_WDT0_PCLK, R9A07G043_CLK_P0,
- 0x548, 0, 0),
+ 0x548, 0, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt0_clk", R9A07G043_WDT0_CLK, R9A07G043_OSCCLK,
- 0x548, 1, 0),
+ 0x548, 1, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("spi_clk2", R9A07G043_SPI_CLK2, R9A07G043_CLK_SPI1,
- 0x550, 0, 0),
+ 0x550, 0, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("spi_clk", R9A07G043_SPI_CLK, R9A07G043_CLK_SPI0,
- 0x550, 1, 0),
+ 0x550, 1, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("sdhi0_imclk", R9A07G043_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0, 0),
+ 0x554, 0, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_imclk2", R9A07G043_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1, 0),
+ 0x554, 1, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_clk_hs", R9A07G043_SDHI0_CLK_HS, R9A07G043_CLK_SD0,
- 0x554, 2, 0),
+ 0x554, 2, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_aclk", R9A07G043_SDHI0_ACLK, R9A07G043_CLK_P1,
- 0x554, 3, 0),
+ 0x554, 3, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi1_imclk", R9A07G043_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4, 0),
+ 0x554, 4, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_imclk2", R9A07G043_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5, 0),
+ 0x554, 5, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_clk_hs", R9A07G043_SDHI1_CLK_HS, R9A07G043_CLK_SD1,
- 0x554, 6, 0),
+ 0x554, 6, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_aclk", R9A07G043_SDHI1_ACLK, R9A07G043_CLK_P1,
- 0x554, 7, 0),
+ 0x554, 7, MSTOP(BUS_PERI_COM, BIT(1))),
#ifdef CONFIG_ARM64
- DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0, 0),
- DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
- 0x564, 1, 0),
- DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
- 0x564, 2, 0),
- DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
- 0x564, 3, 0),
+ DEF_MOD("cru_sysclk", R9A07G043_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_vclk", R9A07G043_CRU_VCLK, R9A07G043_CLK_M2,
+ 0x564, 1, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_pclk", R9A07G043_CRU_PCLK, R9A07G043_CLK_ZT,
+ 0x564, 2, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_aclk", R9A07G043_CRU_ACLK, R9A07G043_CLK_M0,
+ 0x564, 3, MSTOP(BUS_PERI_VIDEO, BIT(3))),
DEF_COUPLED("lcdc_clk_a", R9A07G043_LCDC_CLK_A, R9A07G043_CLK_M0,
- 0x56c, 0, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_COUPLED("lcdc_clk_p", R9A07G043_LCDC_CLK_P, R9A07G043_CLK_ZT,
- 0x56c, 0, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_MOD("lcdc_clk_d", R9A07G043_LCDC_CLK_D, R9A07G043_CLK_M3,
- 0x56c, 1, 0),
+ 0x56c, 1, MSTOP(BUS_PERI_VIDEO, BIT(9))),
#endif
DEF_MOD("ssi0_pclk", R9A07G043_SSI0_PCLK2, R9A07G043_CLK_P0,
- 0x570, 0, 0),
+ 0x570, 0, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi0_sfr", R9A07G043_SSI0_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 1, 0),
+ 0x570, 1, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi1_pclk", R9A07G043_SSI1_PCLK2, R9A07G043_CLK_P0,
- 0x570, 2, 0),
+ 0x570, 2, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi1_sfr", R9A07G043_SSI1_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 3, 0),
+ 0x570, 3, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi2_pclk", R9A07G043_SSI2_PCLK2, R9A07G043_CLK_P0,
- 0x570, 4, 0),
+ 0x570, 4, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi2_sfr", R9A07G043_SSI2_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 5, 0),
+ 0x570, 5, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi3_pclk", R9A07G043_SSI3_PCLK2, R9A07G043_CLK_P0,
- 0x570, 6, 0),
+ 0x570, 6, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("ssi3_sfr", R9A07G043_SSI3_PCLK_SFR, R9A07G043_CLK_P0,
- 0x570, 7, 0),
+ 0x570, 7, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("usb0_host", R9A07G043_USB_U2H0_HCLK, R9A07G043_CLK_P1,
- 0x578, 0, 0),
+ 0x578, 0, MSTOP(BUS_PERI_COM, BIT(5))),
DEF_MOD("usb1_host", R9A07G043_USB_U2H1_HCLK, R9A07G043_CLK_P1,
- 0x578, 1, 0),
+ 0x578, 1, MSTOP(BUS_PERI_COM, BIT(7))),
DEF_MOD("usb0_func", R9A07G043_USB_U2P_EXR_CPUCLK, R9A07G043_CLK_P1,
- 0x578, 2, 0),
+ 0x578, 2, MSTOP(BUS_PERI_COM, BIT(6))),
DEF_MOD("usb_pclk", R9A07G043_USB_PCLK, R9A07G043_CLK_P1,
- 0x578, 3, 0),
+ 0x578, 3, MSTOP(BUS_PERI_COM, BIT(4))),
DEF_COUPLED("eth0_axi", R9A07G043_ETH0_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 0, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth0_chi", R9A07G043_ETH0_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 0, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth1_axi", R9A07G043_ETH1_CLK_AXI, R9A07G043_CLK_M0,
- 0x57c, 1, 0),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_COUPLED("eth1_chi", R9A07G043_ETH1_CLK_CHI, R9A07G043_CLK_ZT,
- 0x57c, 1, 0),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_MOD("i2c0", R9A07G043_I2C0_PCLK, R9A07G043_CLK_P0,
- 0x580, 0, 0),
+ 0x580, 0, MSTOP(BUS_MCPU2, BIT(10))),
DEF_MOD("i2c1", R9A07G043_I2C1_PCLK, R9A07G043_CLK_P0,
- 0x580, 1, 0),
+ 0x580, 1, MSTOP(BUS_MCPU2, BIT(11))),
DEF_MOD("i2c2", R9A07G043_I2C2_PCLK, R9A07G043_CLK_P0,
- 0x580, 2, 0),
+ 0x580, 2, MSTOP(BUS_MCPU2, BIT(12))),
DEF_MOD("i2c3", R9A07G043_I2C3_PCLK, R9A07G043_CLK_P0,
- 0x580, 3, 0),
+ 0x580, 3, MSTOP(BUS_MCPU2, BIT(13))),
DEF_MOD("scif0", R9A07G043_SCIF0_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 0, 0),
+ 0x584, 0, MSTOP(BUS_MCPU2, BIT(1))),
DEF_MOD("scif1", R9A07G043_SCIF1_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 1, 0),
+ 0x584, 1, MSTOP(BUS_MCPU2, BIT(2))),
DEF_MOD("scif2", R9A07G043_SCIF2_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 2, 0),
+ 0x584, 2, MSTOP(BUS_MCPU2, BIT(3))),
DEF_MOD("scif3", R9A07G043_SCIF3_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 3, 0),
+ 0x584, 3, MSTOP(BUS_MCPU2, BIT(4))),
DEF_MOD("scif4", R9A07G043_SCIF4_CLK_PCK, R9A07G043_CLK_P0,
- 0x584, 4, 0),
+ 0x584, 4, MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("sci0", R9A07G043_SCI0_CLKP, R9A07G043_CLK_P0,
- 0x588, 0, 0),
+ 0x588, 0, MSTOP(BUS_MCPU2, BIT(7))),
DEF_MOD("sci1", R9A07G043_SCI1_CLKP, R9A07G043_CLK_P0,
- 0x588, 1, 0),
+ 0x588, 1, MSTOP(BUS_MCPU2, BIT(8))),
DEF_MOD("rspi0", R9A07G043_RSPI0_CLKB, R9A07G043_CLK_P0,
- 0x590, 0, 0),
+ 0x590, 0, MSTOP(BUS_MCPU1, BIT(14))),
DEF_MOD("rspi1", R9A07G043_RSPI1_CLKB, R9A07G043_CLK_P0,
- 0x590, 1, 0),
+ 0x590, 1, MSTOP(BUS_MCPU1, BIT(15))),
DEF_MOD("rspi2", R9A07G043_RSPI2_CLKB, R9A07G043_CLK_P0,
- 0x590, 2, 0),
+ 0x590, 2, MSTOP(BUS_MCPU2, BIT(0))),
DEF_MOD("canfd", R9A07G043_CANFD_PCLK, R9A07G043_CLK_P0,
- 0x594, 0, 0),
+ 0x594, 0, MSTOP(BUS_MCPU2, BIT(9))),
DEF_MOD("gpio", R9A07G043_GPIO_HCLK, R9A07G043_OSCCLK,
- 0x598, 0, 0),
+ 0x598, 0, MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A07G043_ADC_ADCLK, R9A07G043_CLK_TSU,
- 0x5a8, 0, 0),
+ 0x5a8, 0, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A07G043_ADC_PCLK, R9A07G043_CLK_P0,
- 0x5a8, 1, 0),
+ 0x5a8, 1, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A07G043_TSU_PCLK, R9A07G043_CLK_TSU,
- 0x5ac, 0, 0),
+ 0x5ac, 0, MSTOP(BUS_MCPU2, BIT(15))),
#ifdef CONFIG_RISCV
DEF_MOD("nceplic_aclk", R9A07G043_NCEPLIC_ACLK, R9A07G043_CLK_P1,
- 0x608, 0, 0),
+ 0x608, 0, MSTOP(BUS_REG1, BIT(7))),
#endif
};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index c851d4eeebbe..0dd264877b9a 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -242,163 +242,163 @@ static const struct {
} mod_clks = {
.common = {
DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1,
- 0x514, 0, 0),
+ 0x514, 0, MSTOP(BUS_REG1, BIT(7))),
DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2,
- 0x518, 0, 0),
+ 0x518, 0, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1,
- 0x518, 1, 0),
+ 0x518, 1, MSTOP(BUS_PERI_CPU, BIT(13))),
DEF_MOD("dmac_aclk", R9A07G044_DMAC_ACLK, R9A07G044_CLK_P1,
- 0x52c, 0, 0),
+ 0x52c, 0, MSTOP(BUS_REG1, BIT(2))),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
- 0x52c, 1, 0),
+ 0x52c, 1, MSTOP(BUS_REG1, BIT(3))),
DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
- 0x534, 0, 0),
+ 0x534, 0, MSTOP(BUS_REG0, BIT(4))),
DEF_MOD("ostm1_pclk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
- 0x534, 1, 0),
+ 0x534, 1, MSTOP(BUS_REG0, BIT(5))),
DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
- 0x534, 2, 0),
+ 0x534, 2, MSTOP(BUS_REG0, BIT(6))),
DEF_MOD("mtu_x_mck", R9A07G044_MTU_X_MCK_MTU3, R9A07G044_CLK_P0,
- 0x538, 0, 0),
+ 0x538, 0, MSTOP(BUS_MCPU1, BIT(2))),
DEF_MOD("gpt_pclk", R9A07G044_GPT_PCLK, R9A07G044_CLK_P0,
- 0x540, 0, 0),
+ 0x540, 0, MSTOP(BUS_MCPU1, BIT(4))),
DEF_MOD("poeg_a_clkp", R9A07G044_POEG_A_CLKP, R9A07G044_CLK_P0,
- 0x544, 0, 0),
+ 0x544, 0, MSTOP(BUS_MCPU1, BIT(5))),
DEF_MOD("poeg_b_clkp", R9A07G044_POEG_B_CLKP, R9A07G044_CLK_P0,
- 0x544, 1, 0),
+ 0x544, 1, MSTOP(BUS_MCPU1, BIT(6))),
DEF_MOD("poeg_c_clkp", R9A07G044_POEG_C_CLKP, R9A07G044_CLK_P0,
- 0x544, 2, 0),
+ 0x544, 2, MSTOP(BUS_MCPU1, BIT(7))),
DEF_MOD("poeg_d_clkp", R9A07G044_POEG_D_CLKP, R9A07G044_CLK_P0,
- 0x544, 3, 0),
+ 0x544, 3, MSTOP(BUS_MCPU1, BIT(8))),
DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
- 0x548, 0, 0),
+ 0x548, 0, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
- 0x548, 1, 0),
+ 0x548, 1, MSTOP(BUS_REG0, BIT(2))),
DEF_MOD("wdt1_pclk", R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
- 0x548, 2, 0),
+ 0x548, 2, MSTOP(BUS_REG0, BIT(3))),
DEF_MOD("wdt1_clk", R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
- 0x548, 3, 0),
+ 0x548, 3, MSTOP(BUS_REG0, BIT(3))),
DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
- 0x550, 0, 0),
+ 0x550, 0, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
- 0x550, 1, 0),
+ 0x550, 1, MSTOP(BUS_MCPU1, BIT(1))),
DEF_MOD("sdhi0_imclk", R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
- 0x554, 0, 0),
+ 0x554, 0, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
- 0x554, 1, 0),
+ 0x554, 1, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
- 0x554, 2, 0),
+ 0x554, 2, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi0_aclk", R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
- 0x554, 3, 0),
+ 0x554, 3, MSTOP(BUS_PERI_COM, BIT(0))),
DEF_MOD("sdhi1_imclk", R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
- 0x554, 4, 0),
+ 0x554, 4, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
- 0x554, 5, 0),
+ 0x554, 5, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
- 0x554, 6, 0),
+ 0x554, 6, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
- 0x554, 7, 0),
+ 0x554, 7, MSTOP(BUS_PERI_COM, BIT(1))),
DEF_MOD("gpu_clk", R9A07G044_GPU_CLK, R9A07G044_CLK_G,
- 0x558, 0, 0),
+ 0x558, 0, MSTOP(BUS_REG1, BIT(4))),
DEF_MOD("gpu_axi_clk", R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
0x558, 1, 0),
DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
0x558, 2, 0),
- DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
- 0x564, 0, 0),
- DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
- 0x564, 1, 0),
- DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
- 0x564, 2, 0),
- DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
- 0x564, 3, 0),
+ DEF_MOD("cru_sysclk", R9A07G044_CRU_SYSCLK, CLK_M2_DIV2,
+ 0x564, 0, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_vclk", R9A07G044_CRU_VCLK, R9A07G044_CLK_M2,
+ 0x564, 1, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_pclk", R9A07G044_CRU_PCLK, R9A07G044_CLK_ZT,
+ 0x564, 2, MSTOP(BUS_PERI_VIDEO, BIT(3))),
+ DEF_MOD("cru_aclk", R9A07G044_CRU_ACLK, R9A07G044_CLK_M0,
+ 0x564, 3, MSTOP(BUS_PERI_VIDEO, BIT(3))),
DEF_MOD("dsi_pll_clk", R9A07G044_MIPI_DSI_PLLCLK, R9A07G044_CLK_M1,
- 0x568, 0, 0),
+ 0x568, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_sys_clk", R9A07G044_MIPI_DSI_SYSCLK, CLK_M2_DIV2,
- 0x568, 1, 0),
+ 0x568, 1, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_aclk", R9A07G044_MIPI_DSI_ACLK, R9A07G044_CLK_P1,
- 0x568, 2, 0),
+ 0x568, 2, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_pclk", R9A07G044_MIPI_DSI_PCLK, R9A07G044_CLK_P2,
- 0x568, 3, 0),
+ 0x568, 3, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_vclk", R9A07G044_MIPI_DSI_VCLK, R9A07G044_CLK_M3,
- 0x568, 4, 0),
+ 0x568, 4, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_MOD("dsi_lpclk", R9A07G044_MIPI_DSI_LPCLK, R9A07G044_CLK_M4,
- 0x568, 5, 0),
+ 0x568, 5, MSTOP(BUS_PERI_VIDEO, GENMASK(6, 5))),
DEF_COUPLED("lcdc_a", R9A07G044_LCDC_CLK_A, R9A07G044_CLK_M0,
- 0x56c, 0, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_COUPLED("lcdc_p", R9A07G044_LCDC_CLK_P, R9A07G044_CLK_ZT,
- 0x56c, 0, 0),
+ 0x56c, 0, MSTOP(BUS_PERI_VIDEO, GENMASK(8, 7))),
DEF_MOD("lcdc_clk_d", R9A07G044_LCDC_CLK_D, R9A07G044_CLK_M3,
- 0x56c, 1, 0),
+ 0x56c, 1, MSTOP(BUS_PERI_VIDEO, BIT(9))),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
- 0x570, 0, 0),
+ 0x570, 0, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 1, 0),
+ 0x570, 1, MSTOP(BUS_MCPU1, BIT(10))),
DEF_MOD("ssi1_pclk", R9A07G044_SSI1_PCLK2, R9A07G044_CLK_P0,
- 0x570, 2, 0),
+ 0x570, 2, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi1_sfr", R9A07G044_SSI1_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 3, 0),
+ 0x570, 3, MSTOP(BUS_MCPU1, BIT(11))),
DEF_MOD("ssi2_pclk", R9A07G044_SSI2_PCLK2, R9A07G044_CLK_P0,
- 0x570, 4, 0),
+ 0x570, 4, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi2_sfr", R9A07G044_SSI2_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 5, 0),
+ 0x570, 5, MSTOP(BUS_MCPU1, BIT(12))),
DEF_MOD("ssi3_pclk", R9A07G044_SSI3_PCLK2, R9A07G044_CLK_P0,
- 0x570, 6, 0),
+ 0x570, 6, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("ssi3_sfr", R9A07G044_SSI3_PCLK_SFR, R9A07G044_CLK_P0,
- 0x570, 7, 0),
+ 0x570, 7, MSTOP(BUS_MCPU1, BIT(13))),
DEF_MOD("usb0_host", R9A07G044_USB_U2H0_HCLK, R9A07G044_CLK_P1,
- 0x578, 0, 0),
+ 0x578, 0, MSTOP(BUS_PERI_COM, BIT(5))),
DEF_MOD("usb1_host", R9A07G044_USB_U2H1_HCLK, R9A07G044_CLK_P1,
- 0x578, 1, 0),
+ 0x578, 1, MSTOP(BUS_PERI_COM, BIT(7))),
DEF_MOD("usb0_func", R9A07G044_USB_U2P_EXR_CPUCLK, R9A07G044_CLK_P1,
- 0x578, 2, 0),
+ 0x578, 2, MSTOP(BUS_PERI_COM, BIT(6))),
DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
- 0x578, 3, 0),
+ 0x578, 3, MSTOP(BUS_PERI_COM, BIT(4))),
DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 0, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 0, 0),
+ 0x57c, 0, MSTOP(BUS_PERI_COM, BIT(2))),
DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
- 0x57c, 1, 0),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
- 0x57c, 1, 0),
+ 0x57c, 1, MSTOP(BUS_PERI_COM, BIT(3))),
DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
- 0x580, 0, 0),
+ 0x580, 0, MSTOP(BUS_MCPU2, BIT(10))),
DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
- 0x580, 1, 0),
+ 0x580, 1, MSTOP(BUS_MCPU2, BIT(11))),
DEF_MOD("i2c2", R9A07G044_I2C2_PCLK, R9A07G044_CLK_P0,
- 0x580, 2, 0),
+ 0x580, 2, MSTOP(BUS_MCPU2, BIT(12))),
DEF_MOD("i2c3", R9A07G044_I2C3_PCLK, R9A07G044_CLK_P0,
- 0x580, 3, 0),
+ 0x580, 3, MSTOP(BUS_MCPU2, BIT(13))),
DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 0, 0),
+ 0x584, 0, MSTOP(BUS_MCPU2, BIT(1))),
DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 1, 0),
+ 0x584, 1, MSTOP(BUS_MCPU2, BIT(2))),
DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 2, 0),
+ 0x584, 2, MSTOP(BUS_MCPU2, BIT(3))),
DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 3, 0),
+ 0x584, 3, MSTOP(BUS_MCPU2, BIT(4))),
DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0,
- 0x584, 4, 0),
+ 0x584, 4, MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
- 0x588, 0, 0),
+ 0x588, 0, MSTOP(BUS_MCPU2, BIT(7))),
DEF_MOD("sci1", R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
- 0x588, 1, 0),
+ 0x588, 1, MSTOP(BUS_MCPU2, BIT(8))),
DEF_MOD("rspi0", R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
- 0x590, 0, 0),
+ 0x590, 0, MSTOP(BUS_MCPU1, BIT(14))),
DEF_MOD("rspi1", R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
- 0x590, 1, 0),
+ 0x590, 1, MSTOP(BUS_MCPU1, BIT(15))),
DEF_MOD("rspi2", R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
- 0x590, 2, 0),
+ 0x590, 2, MSTOP(BUS_MCPU2, BIT(0))),
DEF_MOD("canfd", R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
- 0x594, 0, 0),
+ 0x594, 0, MSTOP(BUS_MCPU2, BIT(9))),
DEF_MOD("gpio", R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
- 0x598, 0, 0),
+ 0x598, 0, MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A07G044_ADC_ADCLK, R9A07G044_CLK_TSU,
- 0x5a8, 0, 0),
+ 0x5a8, 0, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
- 0x5a8, 1, 0),
+ 0x5a8, 1, MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
- 0x5ac, 0, 0),
+ 0x5ac, 0, MSTOP(BUS_MCPU2, BIT(15))),
},
#ifdef CONFIG_CLK_R9A07G054
.drp = {
diff --git a/drivers/clk/renesas/r9a08g045-cpg.c b/drivers/clk/renesas/r9a08g045-cpg.c
index ed0661997928..79e7b19c7882 100644
--- a/drivers/clk/renesas/r9a08g045-cpg.c
+++ b/drivers/clk/renesas/r9a08g045-cpg.c
@@ -183,6 +183,7 @@ static const struct cpg_core_clk r9a08g045_core_clks[] __initconst = {
DEF_G3S_DIV("P3", R9A08G045_CLK_P3, CLK_PLL3_DIV2_4, DIVPL3C, G3S_DIVPL3C_STS,
dtable_1_32, 0, 0, 0, NULL),
DEF_FIXED("P3_DIV2", CLK_P3_DIV2, R9A08G045_CLK_P3, 1, 2),
+ DEF_FIXED("P5", R9A08G045_CLK_P5, CLK_PLL2_DIV2, 1, 4),
DEF_FIXED("ZT", R9A08G045_CLK_ZT, CLK_PLL3_DIV2_8, 1, 1),
DEF_FIXED("S0", R9A08G045_CLK_S0, CLK_SEL_PLL4, 1, 2),
DEF_FIXED("OSC", R9A08G045_OSCCLK, CLK_EXTAL, 1, 1),
@@ -284,13 +285,22 @@ static const struct rzg2l_mod_clk r9a08g045_mod_clks[] = {
MSTOP(BUS_MCPU2, BIT(5))),
DEF_MOD("scif5_clk_pck", R9A08G045_SCIF5_CLK_PCK, R9A08G045_CLK_P0, 0x584, 5,
MSTOP(BUS_MCPU3, BIT(4))),
- DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0, 0),
+ DEF_MOD("gpio_hclk", R9A08G045_GPIO_HCLK, R9A08G045_OSCCLK, 0x598, 0,
+ MSTOP(BUS_PERI_CPU, BIT(6))),
DEF_MOD("adc_adclk", R9A08G045_ADC_ADCLK, R9A08G045_CLK_TSU, 0x5a8, 0,
MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("adc_pclk", R9A08G045_ADC_PCLK, R9A08G045_CLK_TSU, 0x5a8, 1,
MSTOP(BUS_MCPU2, BIT(14))),
DEF_MOD("tsu_pclk", R9A08G045_TSU_PCLK, R9A08G045_CLK_TSU, 0x5ac, 0,
MSTOP(BUS_MCPU2, BIT(15))),
+ DEF_MOD("pci_aclk", R9A08G045_PCI_ACLK, R9A08G045_CLK_M0, 0x608, 0,
+ MSTOP(BUS_PERI_COM, BIT(10))),
+ DEF_MOD("pci_clkl1pm", R9A08G045_PCI_CLKL1PM, R9A08G045_CLK_ZT, 0x608, 1,
+ MSTOP(BUS_PERI_COM, BIT(10))),
+ DEF_MOD("i3c_pclk", R9A08G045_I3C_PCLK, R9A08G045_CLK_TSU, 0x610, 0,
+ MSTOP(BUS_MCPU3, BIT(10))),
+ DEF_MOD("i3c_tclk", R9A08G045_I3C_TCLK, R9A08G045_CLK_P5, 0x610, 1,
+ MSTOP(BUS_MCPU3, BIT(10))),
DEF_MOD("vbat_bclk", R9A08G045_VBAT_BCLK, R9A08G045_OSCCLK, 0x614, 0,
MSTOP(BUS_MCPU3, GENMASK(8, 7))),
};
@@ -331,6 +341,15 @@ static const struct rzg2l_reset r9a08g045_resets[] = {
DEF_RST(R9A08G045_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A08G045_ADC_ADRST_N, 0x8a8, 1),
DEF_RST(R9A08G045_TSU_PRESETN, 0x8ac, 0),
+ DEF_RST(R9A08G045_PCI_ARESETN, 0x908, 0),
+ DEF_RST(R9A08G045_PCI_RST_B, 0x908, 1),
+ DEF_RST(R9A08G045_PCI_RST_GP_B, 0x908, 2),
+ DEF_RST(R9A08G045_PCI_RST_PS_B, 0x908, 3),
+ DEF_RST(R9A08G045_PCI_RST_RSM_B, 0x908, 4),
+ DEF_RST(R9A08G045_PCI_RST_CFG_B, 0x908, 5),
+ DEF_RST(R9A08G045_PCI_RST_LOAD_B, 0x908, 6),
+ DEF_RST(R9A08G045_I3C_TRESETN, 0x910, 0),
+ DEF_RST(R9A08G045_I3C_PRESETN, 0x910, 1),
DEF_RST(R9A08G045_VBAT_BRESETN, 0x914, 0),
};
@@ -342,6 +361,10 @@ static const unsigned int r9a08g045_crit_mod_clks[] __initconst = {
MOD_CLK_BASE + R9A08G045_VBAT_BCLK,
};
+static const unsigned int r9a08g045_no_pm_mod_clks[] = {
+ MOD_CLK_BASE + R9A08G045_PCI_CLKL1PM,
+};
+
const struct rzg2l_cpg_info r9a08g045_cpg_info = {
/* Core Clocks */
.core_clks = r9a08g045_core_clks,
@@ -358,6 +381,10 @@ const struct rzg2l_cpg_info r9a08g045_cpg_info = {
.num_mod_clks = ARRAY_SIZE(r9a08g045_mod_clks),
.num_hw_mod_clks = R9A08G045_VBAT_BCLK + 1,
+ /* No PM modules Clocks */
+ .no_pm_mod_clks = r9a08g045_no_pm_mod_clks,
+ .num_no_pm_mod_clks = ARRAY_SIZE(r9a08g045_no_pm_mod_clks),
+
/* Resets */
.resets = r9a08g045_resets,
.num_resets = R9A08G045_VBAT_BRESETN + 1, /* Last reset ID + 1 */
diff --git a/drivers/clk/renesas/r9a09g047-cpg.c b/drivers/clk/renesas/r9a09g047-cpg.c
index 26e2be7667eb..1e9896742a06 100644
--- a/drivers/clk/renesas/r9a09g047-cpg.c
+++ b/drivers/clk/renesas/r9a09g047-cpg.c
@@ -16,7 +16,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G047_GBETH_1_CLK_PTP_REF_I,
+ LAST_DT_CORE_CLK = R9A09G047_USB2_0_CLK_CORE1,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -44,10 +44,15 @@ enum clk_ids {
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
CLK_PLLCLN_DIV20,
+ CLK_PLLCLN_DIV64,
+ CLK_PLLCLN_DIV256,
+ CLK_PLLCLN_DIV1024,
CLK_PLLDTY_ACPU,
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
CLK_PLLDTY_DIV8,
+ CLK_PLLDTY_RCPU,
+ CLK_PLLDTY_RCPU_DIV4,
CLK_PLLETH_DIV_250_FIX,
CLK_PLLETH_DIV_125_FIX,
CLK_CSDIV_PLLETH_GBE0,
@@ -140,6 +145,9 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
DEF_FIXED(".pllcln_div16", CLK_PLLCLN_DIV16, CLK_PLLCLN, 1, 16),
DEF_FIXED(".pllcln_div20", CLK_PLLCLN_DIV20, CLK_PLLCLN, 1, 20),
+ DEF_FIXED(".pllcln_div64", CLK_PLLCLN_DIV64, CLK_PLLCLN, 1, 64),
+ DEF_FIXED(".pllcln_div256", CLK_PLLCLN_DIV256, CLK_PLLCLN, 1, 256),
+ DEF_FIXED(".pllcln_div1024", CLK_PLLCLN_DIV1024, CLK_PLLCLN, 1, 1024),
DEF_DDIV(".plldty_acpu", CLK_PLLDTY_ACPU, CLK_PLLDTY, CDDIV0_DIVCTL2, dtable_2_64),
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
@@ -157,6 +165,8 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+ DEF_DDIV(".plldty_rcpu", CLK_PLLDTY_RCPU, CLK_PLLDTY, CDDIV3_DIVCTL2, dtable_2_64),
+ DEF_FIXED(".plldty_rcpu_div4", CLK_PLLDTY_RCPU_DIV4, CLK_PLLDTY_RCPU, 1, 4),
DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
DEF_DDIV(".pllvdo_gpu", CLK_PLLVDO_GPU, CLK_PLLVDO, CDDIV3_DIVCTL1, dtable_2_64),
@@ -173,17 +183,35 @@ static const struct cpg_core_clk r9a09g047_core_clks[] __initconst = {
CDDIV1_DIVCTL3, dtable_1_8),
DEF_FIXED("iotop_0_shclk", R9A09G047_IOTOP_0_SHCLK, CLK_PLLCM33_DIV16, 1, 1),
DEF_FIXED("spi_clk_spi", R9A09G047_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2),
+ DEF_FIXED("usb2_0_clk_core0", R9A09G047_USB2_0_CLK_CORE0, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb2_0_clk_core1", R9A09G047_USB2_0_CLK_CORE1, CLK_QEXTAL, 1, 1),
DEF_FIXED("gbeth_0_clk_ptp_ref_i", R9A09G047_GBETH_0_CLK_PTP_REF_I,
CLK_PLLETH_DIV_125_FIX, 1, 1),
DEF_FIXED("gbeth_1_clk_ptp_ref_i", R9A09G047_GBETH_1_CLK_PTP_REF_I,
CLK_PLLETH_DIV_125_FIX, 1, 1),
+ DEF_FIXED("usb3_0_ref_alt_clk_p", R9A09G047_USB3_0_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_0_core_clk", R9A09G047_USB3_0_CLKCORE, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
+ DEF_MOD("dmac_0_aclk", CLK_PLLCM33_GEAR, 0, 0, 0, 0,
+ BUS_MSTOP(5, BIT(9))),
+ DEF_MOD("dmac_1_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 1, 0, 1,
+ BUS_MSTOP(3, BIT(2))),
+ DEF_MOD("dmac_2_aclk", CLK_PLLDTY_ACPU_DIV2, 0, 2, 0, 2,
+ BUS_MSTOP(3, BIT(3))),
+ DEF_MOD("dmac_3_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 3, 0, 3,
+ BUS_MSTOP(10, BIT(11))),
+ DEF_MOD("dmac_4_aclk", CLK_PLLDTY_RCPU_DIV4, 0, 4, 0, 4,
+ BUS_MSTOP(10, BIT(12))),
DEF_MOD_CRITICAL("icu_0_pclk_i", CLK_PLLCM33_DIV16, 0, 5, 0, 5,
BUS_MSTOP_NONE),
DEF_MOD_CRITICAL("gic_0_gicclk", CLK_PLLDTY_ACPU_DIV4, 1, 3, 0, 19,
BUS_MSTOP(3, BIT(5))),
+ DEF_MOD("gpt_0_pclk_sfr", CLK_PLLCLN_DIV8, 3, 1, 1, 17,
+ BUS_MSTOP(6, BIT(11))),
+ DEF_MOD("gpt_1_pclk_sfr", CLK_PLLCLN_DIV8, 3, 2, 1, 18,
+ BUS_MSTOP(6, BIT(12))),
DEF_MOD("wdt_1_clkp", CLK_PLLCLN_DIV16, 4, 13, 2, 13,
BUS_MSTOP(1, BIT(0))),
DEF_MOD("wdt_1_clk_loco", CLK_QEXTAL, 4, 14, 2, 14,
@@ -196,6 +224,106 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(5, BIT(13))),
DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("rsci0_pclk", CLK_PLLCLN_DIV16, 5, 13, 2, 29,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci0_tclk", CLK_PLLCLN_DIV16, 5, 14, 2, 30,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci0_ps_ps3_n", CLK_PLLCLN_DIV1024, 5, 15, 2, 31,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci0_ps_ps2_n", CLK_PLLCLN_DIV256, 6, 0, 3, 0,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci0_ps_ps1_n", CLK_PLLCLN_DIV64, 6, 1, 3, 1,
+ BUS_MSTOP(11, BIT(3))),
+ DEF_MOD("rsci1_pclk", CLK_PLLCLN_DIV16, 6, 2, 3, 2,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci1_tclk", CLK_PLLCLN_DIV16, 6, 3, 3, 3,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci1_ps_ps3_n", CLK_PLLCLN_DIV1024, 6, 4, 3, 4,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci1_ps_ps2_n", CLK_PLLCLN_DIV256, 6, 5, 3, 5,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci1_ps_ps1_n", CLK_PLLCLN_DIV64, 6, 6, 3, 6,
+ BUS_MSTOP(11, BIT(4))),
+ DEF_MOD("rsci2_pclk", CLK_PLLCLN_DIV16, 6, 7, 3, 7,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci2_tclk", CLK_PLLCLN_DIV16, 6, 8, 3, 8,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci2_ps_ps3_n", CLK_PLLCLN_DIV1024, 6, 9, 3, 9,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci2_ps_ps2_n", CLK_PLLCLN_DIV256, 6, 10, 3, 10,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci2_ps_ps1_n", CLK_PLLCLN_DIV64, 6, 11, 3, 11,
+ BUS_MSTOP(11, BIT(5))),
+ DEF_MOD("rsci3_pclk", CLK_PLLCLN_DIV16, 6, 12, 3, 12,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci3_tclk", CLK_PLLCLN_DIV16, 6, 13, 3, 13,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci3_ps_ps3_n", CLK_PLLCLN_DIV1024, 6, 14, 3, 14,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci3_ps_ps2_n", CLK_PLLCLN_DIV256, 6, 15, 3, 15,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci3_ps_ps1_n", CLK_PLLCLN_DIV64, 7, 0, 3, 16,
+ BUS_MSTOP(11, BIT(6))),
+ DEF_MOD("rsci4_pclk", CLK_PLLCLN_DIV16, 7, 1, 3, 17,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci4_tclk", CLK_PLLCLN_DIV16, 7, 2, 3, 18,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci4_ps_ps3_n", CLK_PLLCLN_DIV1024, 7, 3, 3, 19,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci4_ps_ps2_n", CLK_PLLCLN_DIV256, 7, 4, 3, 20,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci4_ps_ps1_n", CLK_PLLCLN_DIV64, 7, 5, 3, 21,
+ BUS_MSTOP(11, BIT(7))),
+ DEF_MOD("rsci5_pclk", CLK_PLLCLN_DIV16, 7, 6, 3, 22,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci5_tclk", CLK_PLLCLN_DIV16, 7, 7, 3, 23,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci5_ps_ps3_n", CLK_PLLCLN_DIV1024, 7, 8, 3, 24,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci5_ps_ps2_n", CLK_PLLCLN_DIV256, 7, 9, 3, 25,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci5_ps_ps1_n", CLK_PLLCLN_DIV64, 7, 10, 3, 26,
+ BUS_MSTOP(11, BIT(8))),
+ DEF_MOD("rsci6_pclk", CLK_PLLCLN_DIV16, 7, 11, 3, 27,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci6_tclk", CLK_PLLCLN_DIV16, 7, 12, 3, 28,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci6_ps_ps3_n", CLK_PLLCLN_DIV1024, 7, 13, 3, 29,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci6_ps_ps2_n", CLK_PLLCLN_DIV256, 7, 14, 3, 30,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci6_ps_ps1_n", CLK_PLLCLN_DIV64, 7, 15, 3, 31,
+ BUS_MSTOP(11, BIT(9))),
+ DEF_MOD("rsci7_pclk", CLK_PLLCLN_DIV16, 8, 0, 4, 0,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci7_tclk", CLK_PLLCLN_DIV16, 8, 1, 4, 1,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci7_ps_ps3_n", CLK_PLLCLN_DIV1024, 8, 2, 4, 2,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci7_ps_ps2_n", CLK_PLLCLN_DIV256, 8, 3, 4, 3,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci7_ps_ps1_n", CLK_PLLCLN_DIV64, 8, 4, 4, 4,
+ BUS_MSTOP(11, BIT(10))),
+ DEF_MOD("rsci8_pclk", CLK_PLLCLN_DIV16, 8, 5, 4, 5,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci8_tclk", CLK_PLLCLN_DIV16, 8, 6, 4, 6,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci8_ps_ps3_n", CLK_PLLCLN_DIV1024, 8, 7, 4, 7,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci8_ps_ps2_n", CLK_PLLCLN_DIV256, 8, 8, 4, 8,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci8_ps_ps1_n", CLK_PLLCLN_DIV64, 8, 9, 4, 9,
+ BUS_MSTOP(11, BIT(11))),
+ DEF_MOD("rsci9_pclk", CLK_PLLCLN_DIV16, 8, 10, 4, 10,
+ BUS_MSTOP(11, BIT(12))),
+ DEF_MOD("rsci9_tclk", CLK_PLLCLN_DIV16, 8, 11, 4, 11,
+ BUS_MSTOP(11, BIT(12))),
+ DEF_MOD("rsci9_ps_ps3_n", CLK_PLLCLN_DIV1024, 8, 12, 4, 12,
+ BUS_MSTOP(11, BIT(12))),
+ DEF_MOD("rsci9_ps_ps2_n", CLK_PLLCLN_DIV256, 8, 13, 4, 13,
+ BUS_MSTOP(11, BIT(12))),
+ DEF_MOD("rsci9_ps_ps1_n", CLK_PLLCLN_DIV64, 8, 14, 4, 14,
+ BUS_MSTOP(11, BIT(12))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
@@ -258,6 +386,20 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb3_0_aclk", CLK_PLLDTY_DIV8, 10, 15, 5, 15,
+ BUS_MSTOP(7, BIT(12))),
+ DEF_MOD("usb3_0_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 0, 5, 16,
+ BUS_MSTOP(7, BIT(14))),
+ DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
+ BUS_MSTOP(7, BIT(7))),
+ DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
+ BUS_MSTOP(7, BIT(8))),
+ DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
+ BUS_MSTOP(7, BIT(9))),
+ DEF_MOD("usb2_0_pclk_usbtst0", CLK_PLLDTY_ACPU_DIV4, 11, 6, 5, 22,
+ BUS_MSTOP(7, BIT(10))),
+ DEF_MOD("usb2_0_pclk_usbtst1", CLK_PLLDTY_ACPU_DIV4, 11, 7, 5, 23,
+ BUS_MSTOP(7, BIT(11))),
DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_tx_i", CLK_SMUX2_GBE0_TXCLK, 11, 8, 5, 24,
BUS_MSTOP(8, BIT(5)), 1),
DEF_MOD_MUX_EXTERNAL("gbeth_0_clk_rx_i", CLK_SMUX2_GBE0_RXCLK, 11, 9, 5, 25,
@@ -300,12 +442,41 @@ static const struct rzv2h_mod_clk r9a09g047_mod_clks[] __initconst = {
static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(3, 0, 1, 1), /* SYS_0_PRESETN */
+ DEF_RST(3, 1, 1, 2), /* DMAC_0_ARESETN */
+ DEF_RST(3, 2, 1, 3), /* DMAC_1_ARESETN */
+ DEF_RST(3, 3, 1, 4), /* DMAC_2_ARESETN */
+ DEF_RST(3, 4, 1, 5), /* DMAC_3_ARESETN */
+ DEF_RST(3, 5, 1, 6), /* DMAC_4_ARESETN */
DEF_RST(3, 6, 1, 7), /* ICU_0_PRESETN_I */
DEF_RST(3, 8, 1, 9), /* GIC_0_GICRESET_N */
DEF_RST(3, 9, 1, 10), /* GIC_0_DBG_GICRESET_N */
+ DEF_RST(5, 9, 2, 10), /* GPT_0_RST_P_REG */
+ DEF_RST(5, 10, 2, 11), /* GPT_0_RST_S_REG */
+ DEF_RST(5, 11, 2, 12), /* GPT_1_RST_P_REG */
+ DEF_RST(5, 12, 2, 13), /* GPT_1_RST_S_REG */
DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(8, 1, 3, 18), /* RSCI0_PRESETN */
+ DEF_RST(8, 2, 3, 19), /* RSCI0_TRESETN */
+ DEF_RST(8, 3, 3, 20), /* RSCI1_PRESETN */
+ DEF_RST(8, 4, 3, 21), /* RSCI1_TRESETN */
+ DEF_RST(8, 5, 3, 22), /* RSCI2_PRESETN */
+ DEF_RST(8, 6, 3, 23), /* RSCI2_TRESETN */
+ DEF_RST(8, 7, 3, 24), /* RSCI3_PRESETN */
+ DEF_RST(8, 8, 3, 25), /* RSCI3_TRESETN */
+ DEF_RST(8, 9, 3, 26), /* RSCI4_PRESETN */
+ DEF_RST(8, 10, 3, 27), /* RSCI4_TRESETN */
+ DEF_RST(8, 11, 3, 28), /* RSCI5_PRESETN */
+ DEF_RST(8, 12, 3, 29), /* RSCI5_TRESETN */
+ DEF_RST(8, 13, 3, 30), /* RSCI6_PRESETN */
+ DEF_RST(8, 14, 3, 31), /* RSCI6_TRESETN */
+ DEF_RST(8, 15, 4, 0), /* RSCI7_PRESETN */
+ DEF_RST(9, 0, 4, 1), /* RSCI7_TRESETN */
+ DEF_RST(9, 1, 4, 2), /* RSCI8_PRESETN */
+ DEF_RST(9, 2, 4, 3), /* RSCI8_TRESETN */
+ DEF_RST(9, 3, 4, 4), /* RSCI9_PRESETN */
+ DEF_RST(9, 4, 4, 5), /* RSCI9_TRESETN */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
@@ -325,6 +496,11 @@ static const struct rzv2h_reset r9a09g047_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 10, 4, 27), /* USB3_0_ARESETN */
+ DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
+ DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
+ DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
+ DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
diff --git a/drivers/clk/renesas/r9a09g056-cpg.c b/drivers/clk/renesas/r9a09g056-cpg.c
index 437af86f49dd..f48a082e65d7 100644
--- a/drivers/clk/renesas/r9a09g056-cpg.c
+++ b/drivers/clk/renesas/r9a09g056-cpg.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/renesas.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -16,7 +17,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G056_SPI_CLK_SPI,
+ LAST_DT_CORE_CLK = R9A09G056_USB3_0_CLKCORE,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -28,7 +29,9 @@ enum clk_ids {
CLK_PLLCLN,
CLK_PLLDTY,
CLK_PLLCA55,
+ CLK_PLLVDO,
CLK_PLLETH,
+ CLK_PLLDSI,
CLK_PLLGPU,
/* Internal Core Clocks */
@@ -36,10 +39,10 @@ enum clk_ids {
CLK_PLLCM33_DIV4,
CLK_PLLCM33_DIV5,
CLK_PLLCM33_DIV16,
+ CLK_PLLCM33_GEAR,
CLK_SMUX2_XSPI_CLK0,
CLK_SMUX2_XSPI_CLK1,
CLK_PLLCM33_XSPI,
- CLK_PLLCM33_GEAR,
CLK_PLLCLN_DIV2,
CLK_PLLCLN_DIV8,
CLK_PLLCLN_DIV16,
@@ -47,6 +50,10 @@ enum clk_ids {
CLK_PLLDTY_ACPU_DIV2,
CLK_PLLDTY_ACPU_DIV4,
CLK_PLLDTY_DIV8,
+ CLK_PLLDTY_DIV16,
+ CLK_PLLVDO_CRU0,
+ CLK_PLLVDO_CRU1,
+ CLK_PLLVDO_ISP,
CLK_PLLETH_DIV_250_FIX,
CLK_PLLETH_DIV_125_FIX,
CLK_CSDIV_PLLETH_GBE0,
@@ -55,6 +62,9 @@ enum clk_ids {
CLK_SMUX2_GBE0_RXCLK,
CLK_SMUX2_GBE1_TXCLK,
CLK_SMUX2_GBE1_RXCLK,
+ CLK_CDIV4_PLLETH_LPCLK,
+ CLK_PLLETH_LPCLK_GEAR,
+ CLK_PLLDSI_GEAR,
CLK_PLLGPU_GEAR,
/* Module Clocks */
@@ -69,6 +79,12 @@ static const struct clk_div_table dtable_1_8[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_4[] = {
+ {0, 2},
+ {1, 4},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_16[] = {
{0, 2},
{1, 4},
@@ -77,6 +93,26 @@ static const struct clk_div_table dtable_2_16[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_32[] = {
+ {0, 2},
+ {1, 4},
+ {2, 6},
+ {3, 8},
+ {4, 10},
+ {5, 12},
+ {6, 14},
+ {7, 16},
+ {8, 18},
+ {9, 20},
+ {10, 22},
+ {11, 24},
+ {12, 26},
+ {13, 28},
+ {14, 30},
+ {15, 32},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -93,6 +129,17 @@ static const struct clk_div_table dtable_2_100[] = {
{0, 0},
};
+static const struct clk_div_table dtable_16_128[] = {
+ {0, 16},
+ {1, 32},
+ {2, 64},
+ {3, 128},
+ {0, 0},
+};
+
+RZV2H_CPG_PLL_DSI_LIMITS(rzv2n_cpg_pll_dsi_limits);
+#define PLLDSI PLL_PACK_LIMITS(0xc0, 1, 0, &rzv2n_cpg_pll_dsi_limits)
+
/* Mux clock tables */
static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
@@ -112,7 +159,9 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_FIXED(".pllcln", CLK_PLLCLN, CLK_QEXTAL, 200, 3),
DEF_FIXED(".plldty", CLK_PLLDTY, CLK_QEXTAL, 200, 3),
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
+ DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
+ DEF_PLLDSI(".plldsi", CLK_PLLDSI, CLK_QEXTAL, PLLDSI),
DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
@@ -120,11 +169,11 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
dtable_2_16),
- DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_FIXED(".pllcln_div2", CLK_PLLCLN_DIV2, CLK_PLLCLN, 1, 2),
DEF_FIXED(".pllcln_div8", CLK_PLLCLN_DIV8, CLK_PLLCLN, 1, 8),
@@ -134,6 +183,11 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_FIXED(".plldty_acpu_div2", CLK_PLLDTY_ACPU_DIV2, CLK_PLLDTY_ACPU, 1, 2),
DEF_FIXED(".plldty_acpu_div4", CLK_PLLDTY_ACPU_DIV4, CLK_PLLDTY_ACPU, 1, 4),
DEF_FIXED(".plldty_div8", CLK_PLLDTY_DIV8, CLK_PLLDTY, 1, 8),
+ DEF_FIXED(".plldty_div16", CLK_PLLDTY_DIV16, CLK_PLLDTY, 1, 16),
+
+ DEF_DDIV(".pllvdo_cru0", CLK_PLLVDO_CRU0, CLK_PLLVDO, CDDIV3_DIVCTL3, dtable_2_4),
+ DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
+ DEF_DDIV(".pllvdo_isp", CLK_PLLVDO_ISP, CLK_PLLVDO, CDDIV2_DIVCTL3, dtable_2_64),
DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
@@ -145,6 +199,12 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+ DEF_FIXED(".cdiv4_plleth_lpclk", CLK_CDIV4_PLLETH_LPCLK, CLK_PLLETH, 1, 4),
+ DEF_CSDIV(".plleth_lpclk_gear", CLK_PLLETH_LPCLK_GEAR, CLK_CDIV4_PLLETH_LPCLK,
+ CSDIV0_DIVCTL2, dtable_16_128),
+
+ DEF_PLLDSI_DIV(".plldsi_gear", CLK_PLLDSI_GEAR, CLK_PLLDSI,
+ CSDIV1_DIVCTL2, dtable_2_32),
DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
@@ -166,6 +226,8 @@ static const struct cpg_core_clk r9a09g056_core_clks[] __initconst = {
CLK_PLLETH_DIV_125_FIX, 1, 1),
DEF_FIXED_MOD_STATUS("spi_clk_spi", R9A09G056_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2,
FIXED_MOD_CONF_XSPI),
+ DEF_FIXED("usb3_0_ref_alt_clk_p", R9A09G056_USB3_0_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_0_core_clk", R9A09G056_USB3_0_CLKCORE, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
@@ -205,6 +267,12 @@ static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
BUS_MSTOP(5, BIT(13))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
BUS_MSTOP(3, BIT(13))),
DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
@@ -253,6 +321,10 @@ static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb3_0_aclk", CLK_PLLDTY_DIV8, 10, 15, 5, 15,
+ BUS_MSTOP(7, BIT(12))),
+ DEF_MOD("usb3_0_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 0, 5, 16,
+ BUS_MSTOP(7, BIT(14))),
DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
BUS_MSTOP(7, BIT(7))),
DEF_MOD("usb2_0_u2p_exr_cpuclk", CLK_PLLDTY_ACPU_DIV4, 11, 5, 5, 21,
@@ -283,6 +355,42 @@ static const struct rzv2h_mod_clk r9a09g056_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(6))),
DEF_MOD("gbeth_1_aclk_i", CLK_PLLDTY_DIV8, 12, 3, 6, 3,
BUS_MSTOP(8, BIT(6))),
+ DEF_MOD("cru_0_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 2, 6, 18,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD_NO_PM("cru_0_vclk", CLK_PLLVDO_CRU0, 13, 3, 6, 19,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_0_pclk", CLK_PLLDTY_DIV16, 13, 4, 6, 20,
+ BUS_MSTOP(9, BIT(4))),
+ DEF_MOD("cru_1_aclk", CLK_PLLDTY_ACPU_DIV2, 13, 5, 6, 21,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD_NO_PM("cru_1_vclk", CLK_PLLVDO_CRU1, 13, 6, 6, 22,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("cru_1_pclk", CLK_PLLDTY_DIV16, 13, 7, 6, 23,
+ BUS_MSTOP(9, BIT(5))),
+ DEF_MOD("isp_0_reg_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 2, 7, 2,
+ BUS_MSTOP(9, BIT(8))),
+ DEF_MOD("isp_0_pclk", CLK_PLLDTY_DIV16, 14, 3, 7, 3,
+ BUS_MSTOP(9, BIT(8))),
+ DEF_MOD("isp_0_vin_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 4, 7, 4,
+ BUS_MSTOP(9, BIT(9))),
+ DEF_MOD("isp_0_isp_sclk", CLK_PLLVDO_ISP, 14, 5, 7, 5,
+ BUS_MSTOP(9, BIT(9))),
+ DEF_MOD("dsi_0_pclk", CLK_PLLDTY_DIV16, 14, 8, 7, 8,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 9, 7, 9,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_vclk1", CLK_PLLDSI_GEAR, 14, 10, 7, 10,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_lpclk", CLK_PLLETH_LPCLK_GEAR, 14, 11, 7, 11,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_pllref_clk", CLK_QEXTAL, 14, 12, 7, 12,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("lcdc_0_clk_a", CLK_PLLDTY_ACPU_DIV2, 14, 13, 7, 13,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("lcdc_0_clk_p", CLK_PLLDTY_DIV16, 14, 14, 7, 14,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("lcdc_0_clk_d", CLK_PLLDSI_GEAR, 14, 15, 7, 15,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
BUS_MSTOP(3, BIT(4))),
DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
@@ -308,6 +416,8 @@ static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
@@ -317,16 +427,30 @@ static const struct rzv2h_reset r9a09g056_resets[] __initconst = {
DEF_RST(9, 14, 4, 15), /* RIIC_6_MRST */
DEF_RST(9, 15, 4, 16), /* RIIC_7_MRST */
DEF_RST(10, 0, 4, 17), /* RIIC_8_MRST */
- DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
- DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
+ DEF_RST(10, 3, 4, 20), /* SPI_HRESETN */
+ DEF_RST(10, 4, 4, 21), /* SPI_ARESETN */
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 10, 4, 27), /* USB3_0_ARESETN */
DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
DEF_RST(10, 15, 5, 0), /* USB2_0_PRESETN */
DEF_RST(11, 0, 5, 1), /* GBETH_0_ARESETN_I */
DEF_RST(11, 1, 5, 2), /* GBETH_1_ARESETN_I */
+ DEF_RST(12, 5, 5, 22), /* CRU_0_PRESETN */
+ DEF_RST(12, 6, 5, 23), /* CRU_0_ARESETN */
+ DEF_RST(12, 7, 5, 24), /* CRU_0_S_RESETN */
+ DEF_RST(12, 8, 5, 25), /* CRU_1_PRESETN */
+ DEF_RST(12, 9, 5, 26), /* CRU_1_ARESETN */
+ DEF_RST(12, 10, 5, 27), /* CRU_1_S_RESETN */
+ DEF_RST(13, 1, 6, 2), /* ISP_0_VIN_ARESETN */
+ DEF_RST(13, 2, 6, 3), /* ISP_0_REG_ARESETN */
+ DEF_RST(13, 3, 6, 4), /* ISP_0_ISP_SRESETN */
+ DEF_RST(13, 4, 6, 5), /* ISP_0_PRESETN */
+ DEF_RST(13, 7, 6, 8), /* DSI_0_PRESETN */
+ DEF_RST(13, 8, 6, 9), /* DSI_0_ARESETN */
+ DEF_RST(13, 12, 6, 13), /* LCDC_0_RESET_N */
DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
diff --git a/drivers/clk/renesas/r9a09g057-cpg.c b/drivers/clk/renesas/r9a09g057-cpg.c
index f7de69a93de1..400d9e94f2e9 100644
--- a/drivers/clk/renesas/r9a09g057-cpg.c
+++ b/drivers/clk/renesas/r9a09g057-cpg.c
@@ -6,6 +6,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/clk/renesas.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -16,7 +17,7 @@
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G057_SPI_CLK_SPI,
+ LAST_DT_CORE_CLK = R9A09G057_USB3_1_CLKCORE,
/* External Input Clocks */
CLK_AUDIO_EXTAL,
@@ -30,6 +31,7 @@ enum clk_ids {
CLK_PLLCA55,
CLK_PLLVDO,
CLK_PLLETH,
+ CLK_PLLDSI,
CLK_PLLGPU,
/* Internal Core Clocks */
@@ -55,6 +57,7 @@ enum clk_ids {
CLK_PLLVDO_CRU1,
CLK_PLLVDO_CRU2,
CLK_PLLVDO_CRU3,
+ CLK_PLLVDO_ISP,
CLK_PLLETH_DIV_250_FIX,
CLK_PLLETH_DIV_125_FIX,
CLK_CSDIV_PLLETH_GBE0,
@@ -63,6 +66,9 @@ enum clk_ids {
CLK_SMUX2_GBE0_RXCLK,
CLK_SMUX2_GBE1_TXCLK,
CLK_SMUX2_GBE1_RXCLK,
+ CLK_CDIV4_PLLETH_LPCLK,
+ CLK_PLLETH_LPCLK_GEAR,
+ CLK_PLLDSI_GEAR,
CLK_PLLGPU_GEAR,
/* Module Clocks */
@@ -91,6 +97,26 @@ static const struct clk_div_table dtable_2_16[] = {
{0, 0},
};
+static const struct clk_div_table dtable_2_32[] = {
+ {0, 2},
+ {1, 4},
+ {2, 6},
+ {3, 8},
+ {4, 10},
+ {5, 12},
+ {6, 14},
+ {7, 16},
+ {8, 18},
+ {9, 20},
+ {10, 22},
+ {11, 24},
+ {12, 26},
+ {13, 28},
+ {14, 30},
+ {15, 32},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_2_64[] = {
{0, 2},
{1, 4},
@@ -107,6 +133,17 @@ static const struct clk_div_table dtable_2_100[] = {
{0, 0},
};
+static const struct clk_div_table dtable_16_128[] = {
+ {0, 16},
+ {1, 32},
+ {2, 64},
+ {3, 128},
+ {0, 0},
+};
+
+RZV2H_CPG_PLL_DSI_LIMITS(rzv2h_cpg_pll_dsi_limits);
+#define PLLDSI PLL_PACK_LIMITS(0xc0, 1, 0, &rzv2h_cpg_pll_dsi_limits)
+
/* Mux clock tables */
static const char * const smux2_gbe0_rxclk[] = { ".plleth_gbe0", "et0_rxclk" };
static const char * const smux2_gbe0_txclk[] = { ".plleth_gbe0", "et0_txclk" };
@@ -128,15 +165,15 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_PLL(".pllca55", CLK_PLLCA55, CLK_QEXTAL, PLLCA55),
DEF_FIXED(".pllvdo", CLK_PLLVDO, CLK_QEXTAL, 105, 2),
DEF_FIXED(".plleth", CLK_PLLETH, CLK_QEXTAL, 125, 3),
+ DEF_PLLDSI(".plldsi", CLK_PLLDSI, CLK_QEXTAL, PLLDSI),
DEF_PLL(".pllgpu", CLK_PLLGPU, CLK_QEXTAL, PLLGPU),
/* Internal Core Clocks */
DEF_FIXED(".pllcm33_div3", CLK_PLLCM33_DIV3, CLK_PLLCM33, 1, 3),
DEF_FIXED(".pllcm33_div4", CLK_PLLCM33_DIV4, CLK_PLLCM33, 1, 4),
DEF_FIXED(".pllcm33_div5", CLK_PLLCM33_DIV5, CLK_PLLCM33, 1, 5),
- DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR,
- CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_FIXED(".pllcm33_div16", CLK_PLLCM33_DIV16, CLK_PLLCM33, 1, 16),
+ DEF_DDIV(".pllcm33_gear", CLK_PLLCM33_GEAR, CLK_PLLCM33_DIV4, CDDIV0_DIVCTL1, dtable_2_64),
DEF_SMUX(".smux2_xspi_clk0", CLK_SMUX2_XSPI_CLK0, SSEL1_SELCTL2, smux2_xspi_clk0),
DEF_SMUX(".smux2_xspi_clk1", CLK_SMUX2_XSPI_CLK1, SSEL1_SELCTL3, smux2_xspi_clk1),
DEF_CSDIV(".pllcm33_xspi", CLK_PLLCM33_XSPI, CLK_SMUX2_XSPI_CLK1, CSDIV0_DIVCTL3,
@@ -158,6 +195,7 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_DDIV(".pllvdo_cru1", CLK_PLLVDO_CRU1, CLK_PLLVDO, CDDIV4_DIVCTL0, dtable_2_4),
DEF_DDIV(".pllvdo_cru2", CLK_PLLVDO_CRU2, CLK_PLLVDO, CDDIV4_DIVCTL1, dtable_2_4),
DEF_DDIV(".pllvdo_cru3", CLK_PLLVDO_CRU3, CLK_PLLVDO, CDDIV4_DIVCTL2, dtable_2_4),
+ DEF_DDIV(".pllvdo_isp", CLK_PLLVDO_ISP, CLK_PLLVDO, CDDIV2_DIVCTL3, dtable_2_64),
DEF_FIXED(".plleth_250_fix", CLK_PLLETH_DIV_250_FIX, CLK_PLLETH, 1, 4),
DEF_FIXED(".plleth_125_fix", CLK_PLLETH_DIV_125_FIX, CLK_PLLETH_DIV_250_FIX, 1, 2),
@@ -169,6 +207,12 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
DEF_SMUX(".smux2_gbe0_rxclk", CLK_SMUX2_GBE0_RXCLK, SSEL0_SELCTL3, smux2_gbe0_rxclk),
DEF_SMUX(".smux2_gbe1_txclk", CLK_SMUX2_GBE1_TXCLK, SSEL1_SELCTL0, smux2_gbe1_txclk),
DEF_SMUX(".smux2_gbe1_rxclk", CLK_SMUX2_GBE1_RXCLK, SSEL1_SELCTL1, smux2_gbe1_rxclk),
+ DEF_FIXED(".cdiv4_plleth_lpclk", CLK_CDIV4_PLLETH_LPCLK, CLK_PLLETH, 1, 4),
+ DEF_CSDIV(".plleth_lpclk_gear", CLK_PLLETH_LPCLK_GEAR, CLK_CDIV4_PLLETH_LPCLK,
+ CSDIV0_DIVCTL2, dtable_16_128),
+
+ DEF_PLLDSI_DIV(".plldsi_gear", CLK_PLLDSI_GEAR, CLK_PLLDSI,
+ CSDIV1_DIVCTL2, dtable_2_32),
DEF_DDIV(".pllgpu_gear", CLK_PLLGPU_GEAR, CLK_PLLGPU, CDDIV3_DIVCTL1, dtable_2_64),
@@ -191,6 +235,10 @@ static const struct cpg_core_clk r9a09g057_core_clks[] __initconst = {
CLK_PLLETH_DIV_125_FIX, 1, 1),
DEF_FIXED_MOD_STATUS("spi_clk_spi", R9A09G057_SPI_CLK_SPI, CLK_PLLCM33_XSPI, 1, 2,
FIXED_MOD_CONF_XSPI),
+ DEF_FIXED("usb3_0_ref_alt_clk_p", R9A09G057_USB3_0_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_0_core_clk", R9A09G057_USB3_0_CLKCORE, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_1_ref_alt_clk_p", R9A09G057_USB3_1_REF_ALT_CLK_P, CLK_QEXTAL, 1, 1),
+ DEF_FIXED("usb3_1_core_clk", R9A09G057_USB3_1_CLKCORE, CLK_QEXTAL, 1, 1),
};
static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
@@ -240,6 +288,8 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(5, BIT(13))),
DEF_MOD("wdt_3_clk_loco", CLK_QEXTAL, 5, 2, 2, 18,
BUS_MSTOP(5, BIT(13))),
+ DEF_MOD("rtc_0_clk_rtc", CLK_PLLCM33_DIV16, 5, 3, 2, 19,
+ BUS_MSTOP(3, BIT(11) | BIT(12))),
DEF_MOD("rspi_0_pclk", CLK_PLLCLN_DIV8, 5, 4, 2, 20,
BUS_MSTOP(11, BIT(0))),
DEF_MOD("rspi_0_pclk_sfr", CLK_PLLCLN_DIV8, 5, 5, 2, 21,
@@ -260,6 +310,12 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(11, BIT(2))),
DEF_MOD("scif_0_clk_pck", CLK_PLLCM33_DIV16, 8, 15, 4, 15,
BUS_MSTOP(3, BIT(14))),
+ DEF_MOD("i3c_0_pclkrw", CLK_PLLCLN_DIV16, 9, 0, 4, 16,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_pclk", CLK_PLLCLN_DIV16, 9, 1, 4, 17,
+ BUS_MSTOP(10, BIT(15))),
+ DEF_MOD("i3c_0_tclk", CLK_PLLCLN_DIV8, 9, 2, 4, 18,
+ BUS_MSTOP(10, BIT(15))),
DEF_MOD("riic_8_ckm", CLK_PLLCM33_DIV16, 9, 3, 4, 19,
BUS_MSTOP(3, BIT(13))),
DEF_MOD("riic_0_ckm", CLK_PLLCLN_DIV16, 9, 4, 4, 20,
@@ -308,6 +364,14 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(8, BIT(4))),
DEF_MOD("sdhi_2_aclk", CLK_PLLDTY_ACPU_DIV4, 10, 14, 5, 14,
BUS_MSTOP(8, BIT(4))),
+ DEF_MOD("usb3_0_aclk", CLK_PLLDTY_DIV8, 10, 15, 5, 15,
+ BUS_MSTOP(7, BIT(12))),
+ DEF_MOD("usb3_0_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 0, 5, 16,
+ BUS_MSTOP(7, BIT(14))),
+ DEF_MOD("usb3_1_aclk", CLK_PLLDTY_DIV8, 11, 1, 5, 17,
+ BUS_MSTOP(7, BIT(13))),
+ DEF_MOD("usb3_1_pclk_usbtst", CLK_PLLDTY_ACPU_DIV4, 11, 2, 5, 18,
+ BUS_MSTOP(7, BIT(15))),
DEF_MOD("usb2_0_u2h0_hclk", CLK_PLLDTY_DIV8, 11, 3, 5, 19,
BUS_MSTOP(7, BIT(7))),
DEF_MOD("usb2_0_u2h1_hclk", CLK_PLLDTY_DIV8, 11, 4, 5, 20,
@@ -366,12 +430,40 @@ static const struct rzv2h_mod_clk r9a09g057_mod_clks[] __initconst = {
BUS_MSTOP(9, BIT(7))),
DEF_MOD("cru_3_pclk", CLK_PLLDTY_DIV16, 13, 13, 6, 29,
BUS_MSTOP(9, BIT(7))),
+ DEF_MOD("isp_0_reg_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 2, 7, 2,
+ BUS_MSTOP(9, BIT(8))),
+ DEF_MOD("isp_0_pclk", CLK_PLLDTY_DIV16, 14, 3, 7, 3,
+ BUS_MSTOP(9, BIT(8))),
+ DEF_MOD("isp_0_vin_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 4, 7, 4,
+ BUS_MSTOP(9, BIT(9))),
+ DEF_MOD("isp_0_isp_sclk", CLK_PLLVDO_ISP, 14, 5, 7, 5,
+ BUS_MSTOP(9, BIT(9))),
+ DEF_MOD("dsi_0_pclk", CLK_PLLDTY_DIV16, 14, 8, 7, 8,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_aclk", CLK_PLLDTY_ACPU_DIV2, 14, 9, 7, 9,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_vclk1", CLK_PLLDSI_GEAR, 14, 10, 7, 10,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_lpclk", CLK_PLLETH_LPCLK_GEAR, 14, 11, 7, 11,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("dsi_0_pllref_clk", CLK_QEXTAL, 14, 12, 7, 12,
+ BUS_MSTOP(9, BIT(14) | BIT(15))),
+ DEF_MOD("lcdc_0_clk_a", CLK_PLLDTY_ACPU_DIV2, 14, 13, 7, 13,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("lcdc_0_clk_p", CLK_PLLDTY_DIV16, 14, 14, 7, 14,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
+ DEF_MOD("lcdc_0_clk_d", CLK_PLLDSI_GEAR, 14, 15, 7, 15,
+ BUS_MSTOP(10, BIT(1) | BIT(2) | BIT(3))),
DEF_MOD("gpu_0_clk", CLK_PLLGPU_GEAR, 15, 0, 7, 16,
BUS_MSTOP(3, BIT(4))),
DEF_MOD("gpu_0_axi_clk", CLK_PLLDTY_ACPU_DIV2, 15, 1, 7, 17,
BUS_MSTOP(3, BIT(4))),
DEF_MOD("gpu_0_ace_clk", CLK_PLLDTY_ACPU_DIV2, 15, 2, 7, 18,
BUS_MSTOP(3, BIT(4))),
+ DEF_MOD("tsu_0_pclk", CLK_QEXTAL, 16, 9, 8, 9,
+ BUS_MSTOP(5, BIT(2))),
+ DEF_MOD("tsu_1_pclk", CLK_QEXTAL, 16, 10, 8, 10,
+ BUS_MSTOP(2, BIT(15))),
};
static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
@@ -396,6 +488,8 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(7, 6, 3, 7), /* WDT_1_RESET */
DEF_RST(7, 7, 3, 8), /* WDT_2_RESET */
DEF_RST(7, 8, 3, 9), /* WDT_3_RESET */
+ DEF_RST(7, 9, 3, 10), /* RTC_0_RST_RTC */
+ DEF_RST(7, 10, 3, 11), /* RTC_0_RST_RTC_V */
DEF_RST(7, 11, 3, 12), /* RSPI_0_PRESETN */
DEF_RST(7, 12, 3, 13), /* RSPI_0_TRESETN */
DEF_RST(7, 13, 3, 14), /* RSPI_1_PRESETN */
@@ -403,6 +497,8 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(7, 15, 3, 16), /* RSPI_2_PRESETN */
DEF_RST(8, 0, 3, 17), /* RSPI_2_TRESETN */
DEF_RST(9, 5, 4, 6), /* SCIF_0_RST_SYSTEM_N */
+ DEF_RST(9, 6, 4, 7), /* I3C_0_PRESETN */
+ DEF_RST(9, 7, 4, 8), /* I3C_0_TRESETN */
DEF_RST(9, 8, 4, 9), /* RIIC_0_MRST */
DEF_RST(9, 9, 4, 10), /* RIIC_1_MRST */
DEF_RST(9, 10, 4, 11), /* RIIC_2_MRST */
@@ -417,6 +513,8 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(10, 7, 4, 24), /* SDHI_0_IXRST */
DEF_RST(10, 8, 4, 25), /* SDHI_1_IXRST */
DEF_RST(10, 9, 4, 26), /* SDHI_2_IXRST */
+ DEF_RST(10, 10, 4, 27), /* USB3_0_ARESETN */
+ DEF_RST(10, 11, 4, 28), /* USB3_1_ARESETN */
DEF_RST(10, 12, 4, 29), /* USB2_0_U2H0_HRESETN */
DEF_RST(10, 13, 4, 30), /* USB2_0_U2H1_HRESETN */
DEF_RST(10, 14, 4, 31), /* USB2_0_U2P_EXL_SYSRST */
@@ -435,9 +533,18 @@ static const struct rzv2h_reset r9a09g057_resets[] __initconst = {
DEF_RST(12, 14, 5, 31), /* CRU_3_PRESETN */
DEF_RST(12, 15, 6, 0), /* CRU_3_ARESETN */
DEF_RST(13, 0, 6, 1), /* CRU_3_S_RESETN */
+ DEF_RST(13, 1, 6, 2), /* ISP_0_VIN_ARESETN */
+ DEF_RST(13, 2, 6, 3), /* ISP_0_REG_ARESETN */
+ DEF_RST(13, 3, 6, 4), /* ISP_0_ISP_SRESETN */
+ DEF_RST(13, 4, 6, 5), /* ISP_0_PRESETN */
+ DEF_RST(13, 7, 6, 8), /* DSI_0_PRESETN */
+ DEF_RST(13, 8, 6, 9), /* DSI_0_ARESETN */
+ DEF_RST(13, 12, 6, 13), /* LCDC_0_RESET_N */
DEF_RST(13, 13, 6, 14), /* GPU_0_RESETN */
DEF_RST(13, 14, 6, 15), /* GPU_0_AXI_RESETN */
DEF_RST(13, 15, 6, 16), /* GPU_0_ACE_RESETN */
+ DEF_RST(15, 7, 7, 8), /* TSU_0_PRESETN */
+ DEF_RST(15, 8, 7, 9), /* TSU_1_PRESETN */
};
const struct rzv2h_cpg_info r9a09g057_cpg_info __initconst = {
diff --git a/drivers/clk/renesas/r9a09g077-cpg.c b/drivers/clk/renesas/r9a09g077-cpg.c
index c920d6a9707f..fb6cc94d08a1 100644
--- a/drivers/clk/renesas/r9a09g077-cpg.c
+++ b/drivers/clk/renesas/r9a09g077-cpg.c
@@ -46,12 +46,20 @@
#define DIVCA55C2 CONF_PACK(SCKCR2, 10, 1)
#define DIVCA55C3 CONF_PACK(SCKCR2, 11, 1)
#define DIVCA55S CONF_PACK(SCKCR2, 12, 1)
+#define DIVSPI3ASYNC CONF_PACK(SCKCR2, 16, 2)
+#define DIVSCI5ASYNC CONF_PACK(SCKCR2, 18, 2)
+#define DIVSPI0ASYNC CONF_PACK(SCKCR3, 0, 2)
+#define DIVSPI1ASYNC CONF_PACK(SCKCR3, 2, 2)
+#define DIVSPI2ASYNC CONF_PACK(SCKCR3, 4, 2)
#define DIVSCI0ASYNC CONF_PACK(SCKCR3, 6, 2)
+#define DIVSCI1ASYNC CONF_PACK(SCKCR3, 8, 2)
+#define DIVSCI2ASYNC CONF_PACK(SCKCR3, 10, 2)
+#define DIVSCI3ASYNC CONF_PACK(SCKCR3, 12, 2)
+#define DIVSCI4ASYNC CONF_PACK(SCKCR3, 14, 2)
#define SEL_PLL CONF_PACK(SCKCR, 22, 1)
-
enum rzt2h_clk_types {
CLK_TYPE_RZT2H_DIV = CLK_TYPE_CUSTOM, /* Clock with divider */
CLK_TYPE_RZT2H_MUX, /* Clock with clock source selector */
@@ -67,7 +75,7 @@ enum rzt2h_clk_types {
enum clk_ids {
/* Core Clock Outputs exported to DT */
- LAST_DT_CORE_CLK = R9A09G077_SDHI_CLKHS,
+ LAST_DT_CORE_CLK = R9A09G077_ETCLKE,
/* External Input Clocks */
CLK_EXTAL,
@@ -84,6 +92,15 @@ enum clk_ids {
CLK_SEL_CLK_PLL4,
CLK_PLL4D1,
CLK_SCI0ASYNC,
+ CLK_SCI1ASYNC,
+ CLK_SCI2ASYNC,
+ CLK_SCI3ASYNC,
+ CLK_SCI4ASYNC,
+ CLK_SCI5ASYNC,
+ CLK_SPI0ASYNC,
+ CLK_SPI1ASYNC,
+ CLK_SPI2ASYNC,
+ CLK_SPI3ASYNC,
/* Module Clocks */
MOD_CLK_BASE,
@@ -133,6 +150,25 @@ static const struct cpg_core_clk r9a09g077_core_clks[] __initconst = {
DEF_FIXED(".pll4d1", CLK_PLL4D1, CLK_SEL_CLK_PLL4, 1, 1),
DEF_DIV(".sci0async", CLK_SCI0ASYNC, CLK_PLL4D1, DIVSCI0ASYNC,
dtable_24_25_30_32),
+ DEF_DIV(".sci1async", CLK_SCI1ASYNC, CLK_PLL4D1, DIVSCI1ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci2async", CLK_SCI2ASYNC, CLK_PLL4D1, DIVSCI2ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci3async", CLK_SCI3ASYNC, CLK_PLL4D1, DIVSCI3ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci4async", CLK_SCI4ASYNC, CLK_PLL4D1, DIVSCI4ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".sci5async", CLK_SCI5ASYNC, CLK_PLL4D1, DIVSCI5ASYNC,
+ dtable_24_25_30_32),
+
+ DEF_DIV(".spi0async", CLK_SPI0ASYNC, CLK_PLL4D1, DIVSPI0ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".spi1async", CLK_SPI1ASYNC, CLK_PLL4D1, DIVSPI1ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".spi2async", CLK_SPI2ASYNC, CLK_PLL4D1, DIVSPI2ASYNC,
+ dtable_24_25_30_32),
+ DEF_DIV(".spi3async", CLK_SPI3ASYNC, CLK_PLL4D1, DIVSPI3ASYNC,
+ dtable_24_25_30_32),
/* Core output clk */
DEF_DIV("CA55C0", R9A09G077_CLK_CA55C0, CLK_SEL_CLK_PLL0, DIVCA55C0,
@@ -146,17 +182,44 @@ static const struct cpg_core_clk r9a09g077_core_clks[] __initconst = {
DEF_DIV("CA55S", R9A09G077_CLK_CA55S, CLK_SEL_CLK_PLL0, DIVCA55S,
dtable_1_2),
DEF_FIXED("PCLKGPTL", R9A09G077_CLK_PCLKGPTL, CLK_SEL_CLK_PLL1, 2, 1),
+ DEF_FIXED("PCLKH", R9A09G077_CLK_PCLKH, CLK_SEL_CLK_PLL1, 4, 1),
DEF_FIXED("PCLKM", R9A09G077_CLK_PCLKM, CLK_SEL_CLK_PLL1, 8, 1),
DEF_FIXED("PCLKL", R9A09G077_CLK_PCLKL, CLK_SEL_CLK_PLL1, 16, 1),
+ DEF_FIXED("PCLKAH", R9A09G077_CLK_PCLKAH, CLK_PLL4D1, 6, 1),
DEF_FIXED("PCLKAM", R9A09G077_CLK_PCLKAM, CLK_PLL4D1, 12, 1),
DEF_FIXED("SDHI_CLKHS", R9A09G077_SDHI_CLKHS, CLK_SEL_CLK_PLL2, 1, 1),
+ DEF_FIXED("USB_CLK", R9A09G077_USB_CLK, CLK_PLL4D1, 48, 1),
+ DEF_FIXED("ETCLKA", R9A09G077_ETCLKA, CLK_SEL_CLK_PLL1, 5, 1),
+ DEF_FIXED("ETCLKB", R9A09G077_ETCLKB, CLK_SEL_CLK_PLL1, 8, 1),
+ DEF_FIXED("ETCLKC", R9A09G077_ETCLKC, CLK_SEL_CLK_PLL1, 10, 1),
+ DEF_FIXED("ETCLKD", R9A09G077_ETCLKD, CLK_SEL_CLK_PLL1, 20, 1),
+ DEF_FIXED("ETCLKE", R9A09G077_ETCLKE, CLK_SEL_CLK_PLL1, 40, 1),
};
static const struct mssr_mod_clk r9a09g077_mod_clks[] __initconst = {
DEF_MOD("sci0fck", 8, CLK_SCI0ASYNC),
+ DEF_MOD("sci1fck", 9, CLK_SCI1ASYNC),
+ DEF_MOD("sci2fck", 10, CLK_SCI2ASYNC),
+ DEF_MOD("sci3fck", 11, CLK_SCI3ASYNC),
+ DEF_MOD("sci4fck", 12, CLK_SCI4ASYNC),
DEF_MOD("iic0", 100, R9A09G077_CLK_PCLKL),
DEF_MOD("iic1", 101, R9A09G077_CLK_PCLKL),
+ DEF_MOD("spi0", 104, CLK_SPI0ASYNC),
+ DEF_MOD("spi1", 105, CLK_SPI1ASYNC),
+ DEF_MOD("spi2", 106, CLK_SPI2ASYNC),
+ DEF_MOD("adc0", 206, R9A09G077_CLK_PCLKH),
+ DEF_MOD("adc1", 207, R9A09G077_CLK_PCLKH),
+ DEF_MOD("adc2", 225, R9A09G077_CLK_PCLKM),
+ DEF_MOD("tsu", 307, R9A09G077_CLK_PCLKL),
+ DEF_MOD("gmac0", 400, R9A09G077_CLK_PCLKM),
+ DEF_MOD("ethsw", 401, R9A09G077_CLK_PCLKM),
+ DEF_MOD("ethss", 403, R9A09G077_CLK_PCLKM),
+ DEF_MOD("usb", 408, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("gmac1", 416, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("gmac2", 417, R9A09G077_CLK_PCLKAM),
+ DEF_MOD("sci5fck", 600, CLK_SCI5ASYNC),
DEF_MOD("iic2", 601, R9A09G077_CLK_PCLKL),
+ DEF_MOD("spi3", 602, CLK_SPI3ASYNC),
DEF_MOD("sdhi0", 1212, R9A09G077_CLK_PCLKAM),
DEF_MOD("sdhi1", 1213, R9A09G077_CLK_PCLKAM),
};
@@ -177,27 +240,28 @@ r9a09g077_cpg_div_clk_register(struct device *dev,
parent_name = __clk_get_name(parent);
if (core->dtable)
- clk_hw = clk_hw_register_divider_table(dev, core->name,
- parent_name, 0,
- addr,
- GET_SHIFT(core->conf),
- GET_WIDTH(core->conf),
- core->flag,
- core->dtable,
- &pub->rmw_lock);
+ clk_hw = devm_clk_hw_register_divider_table(dev, core->name,
+ parent_name,
+ CLK_SET_RATE_PARENT,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->flag,
+ core->dtable,
+ &pub->rmw_lock);
else
- clk_hw = clk_hw_register_divider(dev, core->name,
- parent_name, 0,
- addr,
- GET_SHIFT(core->conf),
- GET_WIDTH(core->conf),
- core->flag, &pub->rmw_lock);
+ clk_hw = devm_clk_hw_register_divider(dev, core->name,
+ parent_name,
+ CLK_SET_RATE_PARENT,
+ addr,
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->flag, &pub->rmw_lock);
if (IS_ERR(clk_hw))
return ERR_CAST(clk_hw);
return clk_hw->clk;
-
}
static struct clk * __init
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
index a45f8e7e9ab6..7b271de7037a 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.c
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -35,7 +35,7 @@ void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
val |= set;
writel(val, reg);
spin_unlock_irqrestore(&cpg_lock, flags);
-};
+}
static int cpg_simple_notifier_call(struct notifier_block *nb,
unsigned long action, void *data)
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 10ae20489df9..b954278ddd9d 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -54,10 +54,8 @@ static unsigned long cpg_pll_clk_recalc_rate(struct clk_hw *hw,
{
struct cpg_pll_clk *pll_clk = to_pll_clk(hw);
unsigned int mult;
- u32 val;
- val = readl(pll_clk->pllcr_reg) & CPG_PLLnCR_STC_MASK;
- mult = (val >> __ffs(CPG_PLLnCR_STC_MASK)) + 1;
+ mult = FIELD_GET(CPG_PLLnCR_STC_MASK, readl(pll_clk->pllcr_reg)) + 1;
return parent_rate * mult * pll_clk->fixed_mult;
}
@@ -94,7 +92,7 @@ static int cpg_pll_clk_set_rate(struct clk_hw *hw, unsigned long rate,
val = readl(pll_clk->pllcr_reg);
val &= ~CPG_PLLnCR_STC_MASK;
- val |= (mult - 1) << __ffs(CPG_PLLnCR_STC_MASK);
+ val |= FIELD_PREP(CPG_PLLnCR_STC_MASK, mult - 1);
writel(val, pll_clk->pllcr_reg);
for (i = 1000; i; i--) {
@@ -176,11 +174,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -231,7 +225,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
index fb9a876aaba5..ac2b5afec46d 100644
--- a/drivers/clk/renesas/rcar-gen4-cpg.c
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -257,7 +257,7 @@ static struct clk * __init cpg_pll_clk_register(const char *name,
}
/*
- * Z0 Clock & Z1 Clock
+ * Z0, Z1 and ZG Clock
*/
#define CPG_FRQCRB 0x00000804
#define CPG_FRQCRB_KICK BIT(31)
@@ -279,11 +279,7 @@ static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
+ unsigned int mult = 32 - field_get(zclk->mask, readl(zclk->reg));
return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
32 * zclk->fixed_div);
@@ -334,7 +330,8 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
return -EBUSY;
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+ cpg_reg_modify(zclk->reg, zclk->mask,
+ field_prep(zclk->mask, 32 - mult));
/*
* Set KICK bit in FRQCRB to update hardware setting and wait for
@@ -389,9 +386,14 @@ static struct clk * __init cpg_z_clk_register(const char *name,
if (offset < 32) {
zclk->reg = reg + CPG_FRQCRC0;
- } else {
+ } else if (offset < 64) {
zclk->reg = reg + CPG_FRQCRC1;
offset -= 32;
+ } else if (offset < 96) {
+ zclk->reg = reg + CPG_FRQCRB;
+ offset -= 64;
+ } else {
+ return ERR_PTR(-EINVAL);
}
zclk->kick_reg = reg + CPG_FRQCRB;
zclk->hw.init = &init;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 5ff6ee1f7d4b..7f9b7aa39790 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -40,8 +40,10 @@
#define WARN_DEBUG(x) do { } while (0)
#endif
+#define RZT2H_RESET_REG_READ_COUNT 7
+
/*
- * Module Standby and Software Reset register offets.
+ * Module Standby and Software Reset register offsets.
*
* If the registers exist, these are valid for SH-Mobile, R-Mobile,
* R-Car Gen2, R-Car Gen3, and RZ/G1.
@@ -137,6 +139,22 @@ static const u16 srcr_for_gen4[] = {
0x2C60, 0x2C64, 0x2C68, 0x2C6C, 0x2C70, 0x2C74,
};
+static const u16 mrcr_for_rzt2h[] = {
+ 0x240, /* MRCTLA */
+ 0x244, /* Reserved */
+ 0x248, /* Reserved */
+ 0x24C, /* Reserved */
+ 0x250, /* MRCTLE */
+ 0x254, /* Reserved */
+ 0x258, /* Reserved */
+ 0x25C, /* Reserved */
+ 0x260, /* MRCTLI */
+ 0x264, /* Reserved */
+ 0x268, /* Reserved */
+ 0x26C, /* Reserved */
+ 0x270, /* MRCTLM */
+};
+
/*
* Software Reset Clearing Register offsets
*/
@@ -290,9 +308,20 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable)
spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
- if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A ||
- priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
+ if (!enable || priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
+ return 0;
+
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ /*
+ * For the RZ/T2H case, it is necessary to perform a read-back after
+ * accessing the MSTPCRm register and to dummy-read any register of
+ * the IP at least seven times. Instead of memory-mapping the IP
+ * register, we simply add a delay after the read operation.
+ */
+ cpg_rzt2h_mstp_read(hw, priv->control_regs[reg]);
+ udelay(10);
return 0;
+ }
error = readl_poll_timeout_atomic(priv->pub.base0 + priv->status_regs[reg],
value, !(value & bitmask), 0, 10);
@@ -451,7 +480,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
break;
}
- if (IS_ERR_OR_NULL(clk))
+ if (IS_ERR(clk))
goto fail;
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
@@ -676,64 +705,133 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
#define rcdev_to_priv(x) container_of(x, struct cpg_mssr_priv, rcdev)
-static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int cpg_mssr_reset_operate(struct reset_controller_dev *rcdev,
+ const char *func, bool set, unsigned long id)
{
struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = id / 32;
unsigned int bit = id % 32;
+ const u16 off = set ? priv->reset_regs[reg] : priv->reset_clear_regs[reg];
u32 bitmask = BIT(bit);
- dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
+ if (func)
+ dev_dbg(priv->dev, "%s %u%02u\n", func, reg, bit);
+
+ writel(bitmask, priv->pub.base0 + off);
+ readl(priv->pub.base0 + off);
+ barrier_data(priv->pub.base0 + off);
+
+ return 0;
+}
+
+static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
/* Reset module */
- writel(bitmask, priv->pub.base0 + priv->reset_regs[reg]);
+ cpg_mssr_reset_operate(rcdev, "reset", true, id);
- /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
- udelay(35);
+ /*
+ * On R-Car Gen4, delay after SRCR has been written is 1ms.
+ * On older SoCs, delay after SRCR has been written is 35us
+ * (one cycle of the RCLK clock @ ca. 32 kHz).
+ */
+ if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4)
+ usleep_range(1000, 2000);
+ else
+ usleep_range(35, 1000);
/* Release module from reset state */
- writel(bitmask, priv->pub.base0 + priv->reset_clear_regs[reg]);
-
- return 0;
+ return cpg_mssr_reset_operate(rcdev, NULL, false, id);
}
static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
+ return cpg_mssr_reset_operate(rcdev, "assert", true, id);
+}
+
+static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return cpg_mssr_reset_operate(rcdev, "deassert", false, id);
+}
+
+static int cpg_mssr_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
-
- writel(bitmask, priv->pub.base0 + priv->reset_regs[reg]);
- return 0;
+ return !!(readl(priv->pub.base0 + priv->reset_regs[reg]) & bitmask);
}
-static int cpg_mssr_deassert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int cpg_mrcr_set_reset_state(struct reset_controller_dev *rcdev,
+ unsigned long id, bool set)
{
struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
+ void __iomem *reg_addr;
+ unsigned long flags;
+ unsigned int i;
+ u32 val;
+
+ dev_dbg(priv->dev, "%s %u%02u\n", set ? "assert" : "deassert", reg, bit);
+
+ spin_lock_irqsave(&priv->pub.rmw_lock, flags);
+
+ reg_addr = priv->pub.base0 + priv->reset_regs[reg];
+ /* Read current value and modify */
+ val = readl(reg_addr);
+ if (set)
+ val |= bitmask;
+ else
+ val &= ~bitmask;
+ writel(val, reg_addr);
- dev_dbg(priv->dev, "deassert %u%02u\n", reg, bit);
+ /*
+ * For secure processing after release from a module reset, one must
+ * perform multiple dummy reads of the same register.
+ */
+ for (i = 0; !set && i < RZT2H_RESET_REG_READ_COUNT; i++)
+ readl(reg_addr);
+
+ /* Verify the operation */
+ val = readl(reg_addr);
+ if (set == !(bitmask & val)) {
+ dev_err(priv->dev, "Reset register %u%02u operation failed\n", reg, bit);
+ spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
+ return -EIO;
+ }
+
+ spin_unlock_irqrestore(&priv->pub.rmw_lock, flags);
- writel(bitmask, priv->pub.base0 + priv->reset_clear_regs[reg]);
return 0;
}
-static int cpg_mssr_status(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int cpg_mrcr_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
- struct cpg_mssr_priv *priv = rcdev_to_priv(rcdev);
- unsigned int reg = id / 32;
- unsigned int bit = id % 32;
- u32 bitmask = BIT(bit);
+ int ret;
- return !!(readl(priv->pub.base0 + priv->reset_regs[reg]) & bitmask);
+ ret = cpg_mrcr_set_reset_state(rcdev, id, true);
+ if (ret)
+ return ret;
+
+ return cpg_mrcr_set_reset_state(rcdev, id, false);
+}
+
+static int cpg_mrcr_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return cpg_mrcr_set_reset_state(rcdev, id, true);
+}
+
+static int cpg_mrcr_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return cpg_mrcr_set_reset_state(rcdev, id, false);
}
static const struct reset_control_ops cpg_mssr_reset_ops = {
@@ -743,6 +841,13 @@ static const struct reset_control_ops cpg_mssr_reset_ops = {
.status = cpg_mssr_status,
};
+static const struct reset_control_ops cpg_mrcr_reset_ops = {
+ .reset = cpg_mrcr_reset,
+ .assert = cpg_mrcr_assert,
+ .deassert = cpg_mrcr_deassert,
+ .status = cpg_mssr_status,
+};
+
static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
@@ -760,11 +865,23 @@ static int cpg_mssr_reset_xlate(struct reset_controller_dev *rcdev,
static int cpg_mssr_reset_controller_register(struct cpg_mssr_priv *priv)
{
- priv->rcdev.ops = &cpg_mssr_reset_ops;
+ /*
+ * RZ/T2H (and family) has the Module Reset Control Registers
+ * which allows control resets of certain modules.
+ * The number of resets is not equal to the number of module clocks.
+ */
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
+ priv->rcdev.ops = &cpg_mrcr_reset_ops;
+ priv->rcdev.nr_resets = ARRAY_SIZE(mrcr_for_rzt2h) * 32;
+ } else {
+ priv->rcdev.ops = &cpg_mssr_reset_ops;
+ priv->rcdev.nr_resets = priv->num_mod_clks;
+ }
+
priv->rcdev.of_node = priv->dev->of_node;
priv->rcdev.of_reset_n_cells = 1;
priv->rcdev.of_xlate = cpg_mssr_reset_xlate;
- priv->rcdev.nr_resets = priv->num_mod_clks;
+
return devm_reset_controller_register(priv->dev, &priv->rcdev);
}
@@ -1082,6 +1199,7 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
of_for_each_phandle(&it, rc, node, "clocks", "#clock-cells", -1) {
int idx;
+ unsigned int *new_ids;
if (it.node != priv->np)
continue;
@@ -1092,11 +1210,13 @@ static int __init cpg_mssr_reserved_init(struct cpg_mssr_priv *priv,
if (args[0] != CPG_MOD)
continue;
- ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
- if (!ids) {
+ new_ids = krealloc_array(ids, (num + 1), sizeof(*ids), GFP_KERNEL);
+ if (!new_ids) {
of_node_put(it.node);
+ kfree(ids);
return -ENOMEM;
}
+ ids = new_ids;
if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
idx = MOD_CLK_PACK_10(args[1]); /* for DEF_MOD_STB() */
@@ -1166,6 +1286,7 @@ static int __init cpg_mssr_common_init(struct device *dev,
priv->control_regs = stbcr;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H) {
priv->control_regs = mstpcr_for_rzt2h;
+ priv->reset_regs = mrcr_for_rzt2h;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
priv->status_regs = mstpsr_for_gen4;
priv->control_regs = mstpcr_for_gen4;
@@ -1262,8 +1383,7 @@ static int __init cpg_mssr_probe(struct platform_device *pdev)
goto reserve_exit;
/* Reset Controller not supported for Standby Control SoCs */
- if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A ||
- priv->reg_layout == CLK_REG_LAYOUT_RZ_T2H)
+ if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A)
goto reserve_exit;
error = cpg_mssr_reset_controller_register(priv);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 187233302818..64d1ef6e4c94 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -824,11 +824,10 @@ static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw *hw,
return pll5_rate;
}
-static long rzg2l_cpg_sipll5_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int rzg2l_cpg_sipll5_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
@@ -902,7 +901,7 @@ static int rzg2l_cpg_sipll5_set_rate(struct clk_hw *hw,
static const struct clk_ops rzg2l_cpg_sipll5_ops = {
.recalc_rate = rzg2l_cpg_sipll5_recalc_rate,
- .round_rate = rzg2l_cpg_sipll5_round_rate,
+ .determine_rate = rzg2l_cpg_sipll5_determine_rate,
.set_rate = rzg2l_cpg_sipll5_set_rate,
};
@@ -1178,7 +1177,7 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
goto fail;
}
- if (IS_ERR_OR_NULL(clk))
+ if (IS_ERR(clk))
goto fail;
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
@@ -1639,8 +1638,8 @@ fail:
#define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
-static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
- unsigned long id)
+static int __rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id, bool assert)
{
struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
const struct rzg2l_cpg_info *info = priv->info;
@@ -1648,9 +1647,13 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
u32 mask = BIT(info->resets[id].bit);
s8 monbit = info->resets[id].monbit;
u32 value = mask << 16;
+ int ret;
- dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg));
+ dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
+ assert ? "assert" : "deassert", id, CLK_RST_R(reg));
+ if (!assert)
+ value |= mask;
writel(value, priv->base + CLK_RST_R(reg));
if (info->has_clk_mon_regs) {
@@ -1664,38 +1667,26 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
return 0;
}
- return readl_poll_timeout_atomic(priv->base + reg, value,
- value & mask, 10, 200);
+ ret = readl_poll_timeout_atomic(priv->base + reg, value,
+ assert == !!(value & mask), 10, 200);
+ if (ret && !assert) {
+ value = mask << 16;
+ writel(value, priv->base + CLK_RST_R(info->resets[id].off));
+ }
+
+ return ret;
+}
+
+static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ return __rzg2l_cpg_assert(rcdev, id, true);
}
static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
- struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev);
- const struct rzg2l_cpg_info *info = priv->info;
- unsigned int reg = info->resets[id].off;
- u32 mask = BIT(info->resets[id].bit);
- s8 monbit = info->resets[id].monbit;
- u32 value = (mask << 16) | mask;
-
- dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id,
- CLK_RST_R(reg));
-
- writel(value, priv->base + CLK_RST_R(reg));
-
- if (info->has_clk_mon_regs) {
- reg = CLK_MRST_R(reg);
- } else if (monbit >= 0) {
- reg = CPG_RST_MON;
- mask = BIT(monbit);
- } else {
- /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
- udelay(35);
- return 0;
- }
-
- return readl_poll_timeout_atomic(priv->base + reg, value,
- !(value & mask), 10, 200);
+ return __rzg2l_cpg_assert(rcdev, id, false);
}
static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev,
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 0a71c5ec24b6..55e815be16c8 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -34,6 +34,7 @@
#define CPG_BUS_PERI_COM_MSTOP (0xB6C)
#define CPG_BUS_PERI_CPU_MSTOP (0xB70)
#define CPG_BUS_PERI_DDR_MSTOP (0xB74)
+#define CPG_BUS_PERI_VIDEO_MSTOP (0xB78)
#define CPG_BUS_REG0_MSTOP (0xB7C)
#define CPG_BUS_REG1_MSTOP (0xB80)
#define CPG_BUS_TZCDDR_MSTOP (0xB84)
diff --git a/drivers/clk/renesas/rzv2h-cpg.c b/drivers/clk/renesas/rzv2h-cpg.c
index f468afbb54e2..3f6299b9fec0 100644
--- a/drivers/clk/renesas/rzv2h-cpg.c
+++ b/drivers/clk/renesas/rzv2h-cpg.c
@@ -14,9 +14,14 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/clk/renesas.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/iopoll.h>
+#include <linux/limits.h>
+#include <linux/math.h>
+#include <linux/math64.h>
+#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -26,6 +31,7 @@
#include <linux/refcount.h>
#include <linux/reset-controller.h>
#include <linux/string_choices.h>
+#include <linux/units.h>
#include <dt-bindings/clock/renesas-cpg-mssr.h>
@@ -47,13 +53,15 @@
#define CPG_PLL_STBY(x) ((x))
#define CPG_PLL_STBY_RESETB BIT(0)
+#define CPG_PLL_STBY_SSC_EN BIT(2)
#define CPG_PLL_STBY_RESETB_WEN BIT(16)
+#define CPG_PLL_STBY_SSC_EN_WEN BIT(18)
#define CPG_PLL_CLK1(x) ((x) + 0x004)
-#define CPG_PLL_CLK1_KDIV(x) ((s16)FIELD_GET(GENMASK(31, 16), (x)))
-#define CPG_PLL_CLK1_MDIV(x) FIELD_GET(GENMASK(15, 6), (x))
-#define CPG_PLL_CLK1_PDIV(x) FIELD_GET(GENMASK(5, 0), (x))
+#define CPG_PLL_CLK1_KDIV GENMASK(31, 16)
+#define CPG_PLL_CLK1_MDIV GENMASK(15, 6)
+#define CPG_PLL_CLK1_PDIV GENMASK(5, 0)
#define CPG_PLL_CLK2(x) ((x) + 0x008)
-#define CPG_PLL_CLK2_SDIV(x) FIELD_GET(GENMASK(2, 0), (x))
+#define CPG_PLL_CLK2_SDIV GENMASK(2, 0)
#define CPG_PLL_MON(x) ((x) + 0x010)
#define CPG_PLL_MON_RESETB BIT(0)
#define CPG_PLL_MON_LOCK BIT(4)
@@ -65,6 +73,22 @@
#define CPG_CLKSTATUS0 (0x700)
+/* On RZ/G3E SoC we have two DSI PLLs */
+#define MAX_CPG_DSI_PLL 2
+
+/**
+ * struct rzv2h_pll_dsi_info - PLL DSI information, holds the limits and parameters
+ *
+ * @pll_dsi_limits: PLL DSI parameters limits
+ * @pll_dsi_parameters: Calculated PLL DSI parameters
+ * @req_pll_dsi_rate: Requested PLL DSI rate
+ */
+struct rzv2h_pll_dsi_info {
+ const struct rzv2h_pll_limits *pll_dsi_limits;
+ struct rzv2h_pll_div_pars pll_dsi_parameters;
+ unsigned long req_pll_dsi_rate;
+};
+
/**
* struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
*
@@ -80,6 +104,7 @@
* @ff_mod_status_ops: Fixed Factor Module Status Clock operations
* @mstop_count: Array of mstop values
* @rcdev: Reset controller entity
+ * @pll_dsi_info: Array of PLL DSI information, holds the limits and parameters
*/
struct rzv2h_cpg_priv {
struct device *dev;
@@ -98,6 +123,8 @@ struct rzv2h_cpg_priv {
atomic_t *mstop_count;
struct reset_controller_dev rcdev;
+
+ struct rzv2h_pll_dsi_info pll_dsi_info[MAX_CPG_DSI_PLL];
};
#define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
@@ -168,6 +195,460 @@ struct rzv2h_ff_mod_status_clk {
#define to_rzv2h_ff_mod_status_clk(_hw) \
container_of(_hw, struct rzv2h_ff_mod_status_clk, fix.hw)
+/**
+ * struct rzv2h_plldsi_div_clk - PLL DSI DDIV clock
+ *
+ * @dtable: divider table
+ * @priv: CPG private data
+ * @hw: divider clk
+ * @ddiv: divider configuration
+ */
+struct rzv2h_plldsi_div_clk {
+ const struct clk_div_table *dtable;
+ struct rzv2h_cpg_priv *priv;
+ struct clk_hw hw;
+ struct ddiv ddiv;
+};
+
+#define to_plldsi_div_clk(_hw) \
+ container_of(_hw, struct rzv2h_plldsi_div_clk, hw)
+
+#define RZ_V2H_OSC_CLK_IN_MEGA (24 * MEGA)
+#define RZV2H_MAX_DIV_TABLES (16)
+
+/**
+ * rzv2h_get_pll_pars - Finds the best combination of PLL parameters
+ * for a given frequency.
+ *
+ * @limits: Pointer to the structure containing the limits for the PLL parameters
+ * @pars: Pointer to the structure where the best calculated PLL parameters values
+ * will be stored
+ * @freq_millihz: Target output frequency in millihertz
+ *
+ * This function calculates the best set of PLL parameters (M, K, P, S) to achieve
+ * the desired frequency.
+ * There is no direct formula to calculate the PLL parameters, as it's an open
+ * system of equations, therefore this function uses an iterative approach to
+ * determine the best solution. The best solution is one that minimizes the error
+ * (desired frequency - actual frequency).
+ *
+ * Return: true if a valid set of parameters values is found, false otherwise.
+ */
+bool rzv2h_get_pll_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_pars *pars, u64 freq_millihz)
+{
+ u64 fout_min_millihz = mul_u32_u32(limits->fout.min, MILLI);
+ u64 fout_max_millihz = mul_u32_u32(limits->fout.max, MILLI);
+ struct rzv2h_pll_pars p, best;
+
+ if (freq_millihz > fout_max_millihz ||
+ freq_millihz < fout_min_millihz)
+ return false;
+
+ /* Initialize best error to maximum possible value */
+ best.error_millihz = S64_MAX;
+
+ for (p.p = limits->p.min; p.p <= limits->p.max; p.p++) {
+ u32 fref = RZ_V2H_OSC_CLK_IN_MEGA / p.p;
+ u16 divider;
+
+ for (divider = 1 << limits->s.min, p.s = limits->s.min;
+ p.s <= limits->s.max; p.s++, divider <<= 1) {
+ for (p.m = limits->m.min; p.m <= limits->m.max; p.m++) {
+ u64 output_m, output_k_range;
+ s64 pll_k, output_k;
+ u64 fvco, output;
+
+ /*
+ * The frequency generated by the PLL + divider
+ * is calculated as follows:
+ *
+ * With:
+ * Freq = Ffout = Ffvco / 2^(pll_s)
+ * Ffvco = (pll_m + (pll_k / 65536)) * Ffref
+ * Ffref = 24MHz / pll_p
+ *
+ * Freq can also be rewritten as:
+ * Freq = Ffvco / 2^(pll_s)
+ * = ((pll_m + (pll_k / 65536)) * Ffref) / 2^(pll_s)
+ * = (pll_m * Ffref) / 2^(pll_s) + ((pll_k / 65536) * Ffref) / 2^(pll_s)
+ * = output_m + output_k
+ *
+ * Every parameter has been determined at this
+ * point, but pll_k.
+ *
+ * Considering that:
+ * limits->k.min <= pll_k <= limits->k.max
+ * Then:
+ * -0.5 <= (pll_k / 65536) < 0.5
+ * Therefore:
+ * -Ffref / (2 * 2^(pll_s)) <= output_k < Ffref / (2 * 2^(pll_s))
+ */
+
+ /* Compute output M component (in mHz) */
+ output_m = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(p.m, fref) * MILLI,
+ divider);
+ /* Compute range for output K (in mHz) */
+ output_k_range = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(fref, MILLI),
+ 2 * divider);
+ /*
+ * No point in continuing if we can't achieve
+ * the desired frequency
+ */
+ if (freq_millihz < (output_m - output_k_range) ||
+ freq_millihz >= (output_m + output_k_range)) {
+ continue;
+ }
+
+ /*
+ * Compute the K component
+ *
+ * Since:
+ * Freq = output_m + output_k
+ * Then:
+ * output_k = Freq - output_m
+ * = ((pll_k / 65536) * Ffref) / 2^(pll_s)
+ * Therefore:
+ * pll_k = (output_k * 65536 * 2^(pll_s)) / Ffref
+ */
+ output_k = freq_millihz - output_m;
+ pll_k = div_s64(output_k * 65536ULL * divider,
+ fref);
+ pll_k = DIV_S64_ROUND_CLOSEST(pll_k, MILLI);
+
+ /* Validate K value within allowed limits */
+ if (pll_k < limits->k.min ||
+ pll_k > limits->k.max)
+ continue;
+
+ p.k = pll_k;
+
+ /* Compute (Ffvco * 65536) */
+ fvco = mul_u32_u32(p.m * 65536 + p.k, fref);
+ if (fvco < mul_u32_u32(limits->fvco.min, 65536) ||
+ fvco > mul_u32_u32(limits->fvco.max, 65536))
+ continue;
+
+ /* PLL_M component of (output * 65536 * PLL_P) */
+ output = mul_u32_u32(p.m * 65536, RZ_V2H_OSC_CLK_IN_MEGA);
+ /* PLL_K component of (output * 65536 * PLL_P) */
+ output += p.k * RZ_V2H_OSC_CLK_IN_MEGA;
+ /* Make it in mHz */
+ output *= MILLI;
+ output = DIV_U64_ROUND_CLOSEST(output, 65536 * p.p * divider);
+
+ /* Check output frequency against limits */
+ if (output < fout_min_millihz ||
+ output > fout_max_millihz)
+ continue;
+
+ p.error_millihz = freq_millihz - output;
+ p.freq_millihz = output;
+
+ /* If an exact match is found, return immediately */
+ if (p.error_millihz == 0) {
+ *pars = p;
+ return true;
+ }
+
+ /* Update best match if error is smaller */
+ if (abs(best.error_millihz) > abs(p.error_millihz))
+ best = p;
+ }
+ }
+ }
+
+ /* If no valid parameters were found, return false */
+ if (best.error_millihz == S64_MAX)
+ return false;
+
+ *pars = best;
+ return true;
+}
+EXPORT_SYMBOL_NS_GPL(rzv2h_get_pll_pars, "RZV2H_CPG");
+
+/*
+ * rzv2h_get_pll_divs_pars - Finds the best combination of PLL parameters
+ * and divider value for a given frequency.
+ *
+ * @limits: Pointer to the structure containing the limits for the PLL parameters
+ * @pars: Pointer to the structure where the best calculated PLL parameters and
+ * divider values will be stored
+ * @table: Pointer to the array of valid divider values
+ * @table_size: Size of the divider values array
+ * @freq_millihz: Target output frequency in millihertz
+ *
+ * This function calculates the best set of PLL parameters (M, K, P, S) and divider
+ * value to achieve the desired frequency. See rzv2h_get_pll_pars() for more details
+ * on how the PLL parameters are calculated.
+ *
+ * freq_millihz is the desired frequency generated by the PLL followed by a
+ * a gear.
+ */
+bool rzv2h_get_pll_divs_pars(const struct rzv2h_pll_limits *limits,
+ struct rzv2h_pll_div_pars *pars,
+ const u8 *table, u8 table_size, u64 freq_millihz)
+{
+ struct rzv2h_pll_div_pars p, best;
+
+ best.div.error_millihz = S64_MAX;
+ p.div.error_millihz = S64_MAX;
+ for (unsigned int i = 0; i < table_size; i++) {
+ if (!rzv2h_get_pll_pars(limits, &p.pll, freq_millihz * table[i]))
+ continue;
+
+ p.div.divider_value = table[i];
+ p.div.freq_millihz = DIV_U64_ROUND_CLOSEST(p.pll.freq_millihz, table[i]);
+ p.div.error_millihz = freq_millihz - p.div.freq_millihz;
+
+ if (p.div.error_millihz == 0) {
+ *pars = p;
+ return true;
+ }
+
+ if (abs(best.div.error_millihz) > abs(p.div.error_millihz))
+ best = p;
+ }
+
+ if (best.div.error_millihz == S64_MAX)
+ return false;
+
+ *pars = best;
+ return true;
+}
+EXPORT_SYMBOL_NS_GPL(rzv2h_get_pll_divs_pars, "RZV2H_CPG");
+
+static unsigned long rzv2h_cpg_plldsi_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
+ struct rzv2h_cpg_priv *priv = dsi_div->priv;
+ struct ddiv ddiv = dsi_div->ddiv;
+ u32 div;
+
+ div = readl(priv->base + ddiv.offset);
+ div >>= ddiv.shift;
+ div &= clk_div_mask(ddiv.width);
+ div = dsi_div->dtable[div].div;
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, div);
+}
+
+static int rzv2h_cpg_plldsi_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
+ struct pll_clk *pll_clk = to_pll(clk_hw_get_parent(hw));
+ struct rzv2h_cpg_priv *priv = dsi_div->priv;
+ u8 table[RZV2H_MAX_DIV_TABLES] = { 0 };
+ struct rzv2h_pll_div_pars *dsi_params;
+ struct rzv2h_pll_dsi_info *dsi_info;
+ const struct clk_div_table *div;
+ unsigned int i = 0;
+ u64 rate_millihz;
+
+ dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
+ dsi_params = &dsi_info->pll_dsi_parameters;
+
+ rate_millihz = mul_u32_u32(req->rate, MILLI);
+ if (rate_millihz == dsi_params->div.error_millihz + dsi_params->div.freq_millihz)
+ goto exit_determine_rate;
+
+ for (div = dsi_div->dtable; div->div; div++) {
+ if (i >= RZV2H_MAX_DIV_TABLES)
+ return -EINVAL;
+ table[i++] = div->div;
+ }
+
+ if (!rzv2h_get_pll_divs_pars(dsi_info->pll_dsi_limits, dsi_params, table, i,
+ rate_millihz)) {
+ dev_err(priv->dev, "failed to determine rate for req->rate: %lu\n",
+ req->rate);
+ return -EINVAL;
+ }
+
+exit_determine_rate:
+ req->rate = DIV_ROUND_CLOSEST_ULL(dsi_params->div.freq_millihz, MILLI);
+ req->best_parent_rate = req->rate * dsi_params->div.divider_value;
+ dsi_info->req_pll_dsi_rate = req->best_parent_rate;
+
+ return 0;
+}
+
+static int rzv2h_cpg_plldsi_div_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct rzv2h_plldsi_div_clk *dsi_div = to_plldsi_div_clk(hw);
+ struct pll_clk *pll_clk = to_pll(clk_hw_get_parent(hw));
+ struct rzv2h_cpg_priv *priv = dsi_div->priv;
+ struct rzv2h_pll_div_pars *dsi_params;
+ struct rzv2h_pll_dsi_info *dsi_info;
+ struct ddiv ddiv = dsi_div->ddiv;
+ const struct clk_div_table *clkt;
+ bool divider_found = false;
+ u32 val, shift;
+
+ dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
+ dsi_params = &dsi_info->pll_dsi_parameters;
+
+ for (clkt = dsi_div->dtable; clkt->div; clkt++) {
+ if (clkt->div == dsi_params->div.divider_value) {
+ divider_found = true;
+ break;
+ }
+ }
+
+ if (!divider_found)
+ return -EINVAL;
+
+ shift = ddiv.shift;
+ val = readl(priv->base + ddiv.offset) | DDIV_DIVCTL_WEN(shift);
+ val &= ~(clk_div_mask(ddiv.width) << shift);
+ val |= clkt->val << shift;
+ writel(val, priv->base + ddiv.offset);
+
+ return 0;
+}
+
+static const struct clk_ops rzv2h_cpg_plldsi_div_ops = {
+ .recalc_rate = rzv2h_cpg_plldsi_div_recalc_rate,
+ .determine_rate = rzv2h_cpg_plldsi_div_determine_rate,
+ .set_rate = rzv2h_cpg_plldsi_div_set_rate,
+};
+
+static struct clk * __init
+rzv2h_cpg_plldsi_div_clk_register(const struct cpg_core_clk *core,
+ struct rzv2h_cpg_priv *priv)
+{
+ struct rzv2h_plldsi_div_clk *clk_hw_data;
+ struct clk **clks = priv->clks;
+ struct clk_init_data init;
+ const struct clk *parent;
+ const char *parent_name;
+ struct clk_hw *clk_hw;
+ int ret;
+
+ parent = clks[core->parent];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+ clk_hw_data->ddiv = core->cfg.ddiv;
+ clk_hw_data->dtable = core->dtable;
+
+ parent_name = __clk_get_name(parent);
+ init.name = core->name;
+ init.ops = &rzv2h_cpg_plldsi_div_ops;
+ init.flags = core->flag;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ clk_hw = &clk_hw_data->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(priv->dev, clk_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw->clk;
+}
+
+static int rzv2h_cpg_plldsi_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ struct rzv2h_pll_dsi_info *dsi_info;
+ u64 rate_millihz;
+
+ dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
+ /* check if the divider has already invoked the algorithm */
+ if (req->rate == dsi_info->req_pll_dsi_rate)
+ return 0;
+
+ /* If the req->rate doesn't match we do the calculation assuming there is no divider */
+ rate_millihz = mul_u32_u32(req->rate, MILLI);
+ if (!rzv2h_get_pll_pars(dsi_info->pll_dsi_limits,
+ &dsi_info->pll_dsi_parameters.pll, rate_millihz)) {
+ dev_err(priv->dev,
+ "failed to determine rate for req->rate: %lu\n",
+ req->rate);
+ return -EINVAL;
+ }
+
+ req->rate = DIV_ROUND_CLOSEST_ULL(dsi_info->pll_dsi_parameters.pll.freq_millihz, MILLI);
+ dsi_info->req_pll_dsi_rate = req->rate;
+
+ return 0;
+}
+
+static int rzv2h_cpg_pll_set_rate(struct pll_clk *pll_clk,
+ struct rzv2h_pll_pars *params,
+ bool ssc_disable)
+{
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+ u16 offset = pll_clk->pll.offset;
+ u32 val;
+ int ret;
+
+ /* Put PLL into standby mode */
+ writel(CPG_PLL_STBY_RESETB_WEN, priv->base + CPG_PLL_STBY(offset));
+ ret = readl_poll_timeout_atomic(priv->base + CPG_PLL_MON(offset),
+ val, !(val & CPG_PLL_MON_LOCK),
+ 100, 2000);
+ if (ret) {
+ dev_err(priv->dev, "Failed to put PLLDSI into standby mode");
+ return ret;
+ }
+
+ /* Output clock setting 1 */
+ writel(FIELD_PREP(CPG_PLL_CLK1_KDIV, (u16)params->k) |
+ FIELD_PREP(CPG_PLL_CLK1_MDIV, params->m) |
+ FIELD_PREP(CPG_PLL_CLK1_PDIV, params->p),
+ priv->base + CPG_PLL_CLK1(offset));
+
+ /* Output clock setting 2 */
+ val = readl(priv->base + CPG_PLL_CLK2(offset));
+ writel((val & ~CPG_PLL_CLK2_SDIV) | FIELD_PREP(CPG_PLL_CLK2_SDIV, params->s),
+ priv->base + CPG_PLL_CLK2(offset));
+
+ /* Put PLL to normal mode */
+ if (ssc_disable)
+ val = CPG_PLL_STBY_SSC_EN_WEN;
+ else
+ val = CPG_PLL_STBY_SSC_EN_WEN | CPG_PLL_STBY_SSC_EN;
+ writel(val | CPG_PLL_STBY_RESETB_WEN | CPG_PLL_STBY_RESETB,
+ priv->base + CPG_PLL_STBY(offset));
+
+ /* PLL normal mode transition, output clock stability check */
+ ret = readl_poll_timeout_atomic(priv->base + CPG_PLL_MON(offset),
+ val, (val & CPG_PLL_MON_LOCK),
+ 100, 2000);
+ if (ret) {
+ dev_err(priv->dev, "Failed to put PLLDSI into normal mode");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rzv2h_cpg_plldsi_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct pll_clk *pll_clk = to_pll(hw);
+ struct rzv2h_pll_dsi_info *dsi_info;
+ struct rzv2h_cpg_priv *priv = pll_clk->priv;
+
+ dsi_info = &priv->pll_dsi_info[pll_clk->pll.instance];
+
+ return rzv2h_cpg_pll_set_rate(pll_clk, &dsi_info->pll_dsi_parameters.pll, true);
+}
+
static int rzv2h_cpg_pll_clk_is_enabled(struct clk_hw *hw)
{
struct pll_clk *pll_clk = to_pll(hw);
@@ -231,12 +712,19 @@ static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
clk1 = readl(priv->base + CPG_PLL_CLK1(pll.offset));
clk2 = readl(priv->base + CPG_PLL_CLK2(pll.offset));
- rate = mul_u64_u32_shr(parent_rate, (CPG_PLL_CLK1_MDIV(clk1) << 16) +
- CPG_PLL_CLK1_KDIV(clk1), 16 + CPG_PLL_CLK2_SDIV(clk2));
+ rate = mul_u64_u32_shr(parent_rate, (FIELD_GET(CPG_PLL_CLK1_MDIV, clk1) << 16) +
+ (s16)FIELD_GET(CPG_PLL_CLK1_KDIV, clk1),
+ 16 + FIELD_GET(CPG_PLL_CLK2_SDIV, clk2));
- return DIV_ROUND_CLOSEST_ULL(rate, CPG_PLL_CLK1_PDIV(clk1));
+ return DIV_ROUND_CLOSEST_ULL(rate, FIELD_GET(CPG_PLL_CLK1_PDIV, clk1));
}
+static const struct clk_ops rzv2h_cpg_plldsi_ops = {
+ .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
+ .determine_rate = rzv2h_cpg_plldsi_determine_rate,
+ .set_rate = rzv2h_cpg_plldsi_set_rate,
+};
+
static const struct clk_ops rzv2h_cpg_pll_ops = {
.is_enabled = rzv2h_cpg_pll_clk_is_enabled,
.enable = rzv2h_cpg_pll_clk_enable,
@@ -263,6 +751,10 @@ rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
if (!pll_clk)
return ERR_PTR(-ENOMEM);
+ if (core->type == CLK_TYPE_PLLDSI)
+ priv->pll_dsi_info[core->cfg.pll.instance].pll_dsi_limits =
+ core->cfg.pll.limits;
+
parent_name = __clk_get_name(parent);
init.name = core->name;
init.ops = ops;
@@ -294,15 +786,6 @@ static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
-{
- struct clk_divider *divider = to_clk_divider(hw);
-
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
-}
-
static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -359,7 +842,6 @@ ddiv_timeout:
static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
.recalc_rate = rzv2h_ddiv_recalc_rate,
- .round_rate = rzv2h_ddiv_round_rate,
.determine_rate = rzv2h_ddiv_determine_rate,
.set_rate = rzv2h_ddiv_set_rate,
};
@@ -597,11 +1079,17 @@ rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
case CLK_TYPE_SMUX:
clk = rzv2h_cpg_mux_clk_register(core, priv);
break;
+ case CLK_TYPE_PLLDSI:
+ clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_plldsi_ops);
+ break;
+ case CLK_TYPE_PLLDSI_DIV:
+ clk = rzv2h_cpg_plldsi_div_clk_register(core, priv);
+ break;
default:
goto fail;
}
- if (IS_ERR_OR_NULL(clk))
+ if (IS_ERR(clk))
goto fail;
dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
@@ -864,6 +1352,7 @@ static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
u32 mask = BIT(priv->resets[id].reset_bit);
u8 monbit = priv->resets[id].mon_bit;
u32 value = mask << 16;
+ int ret;
dev_dbg(rcdev->dev, "%s id:%ld offset:0x%x\n",
assert ? "assert" : "deassert", id, reg);
@@ -875,9 +1364,14 @@ static int __rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
mask = BIT(monbit);
- return readl_poll_timeout_atomic(priv->base + reg, value,
- assert ? (value & mask) : !(value & mask),
- 10, 200);
+ ret = readl_poll_timeout_atomic(priv->base + reg, value,
+ assert == !!(value & mask), 10, 200);
+ if (ret && !assert) {
+ value = mask << 16;
+ writel(value, priv->base + GET_RST_OFFSET(priv->resets[id].reset_index));
+ }
+
+ return ret;
}
static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
diff --git a/drivers/clk/renesas/rzv2h-cpg.h b/drivers/clk/renesas/rzv2h-cpg.h
index 840eed25aeda..dc957bdaf5e9 100644
--- a/drivers/clk/renesas/rzv2h-cpg.h
+++ b/drivers/clk/renesas/rzv2h-cpg.h
@@ -16,20 +16,28 @@
*
* @offset: STBY register offset
* @has_clkn: Flag to indicate if CLK1/2 are accessible or not
+ * @instance: PLL instance number
*/
struct pll {
unsigned int offset:9;
unsigned int has_clkn:1;
+ unsigned int instance:2;
+ const struct rzv2h_pll_limits *limits;
};
-#define PLL_PACK(_offset, _has_clkn) \
+#define PLL_PACK_LIMITS(_offset, _has_clkn, _instance, _limits) \
((struct pll){ \
.offset = _offset, \
- .has_clkn = _has_clkn \
+ .has_clkn = _has_clkn, \
+ .instance = _instance, \
+ .limits = _limits \
})
-#define PLLCA55 PLL_PACK(0x60, 1)
-#define PLLGPU PLL_PACK(0x120, 1)
+#define PLL_PACK(_offset, _has_clkn, _instance) \
+ PLL_PACK_LIMITS(_offset, _has_clkn, _instance, NULL)
+
+#define PLLCA55 PLL_PACK(0x60, 1, 0)
+#define PLLGPU PLL_PACK(0x120, 1, 0)
/**
* struct ddiv - Structure for dynamic switching divider
@@ -115,9 +123,11 @@ struct fixed_mod_conf {
#define CPG_SSEL1 (0x304)
#define CPG_CDDIV0 (0x400)
#define CPG_CDDIV1 (0x404)
+#define CPG_CDDIV2 (0x408)
#define CPG_CDDIV3 (0x40C)
#define CPG_CDDIV4 (0x410)
#define CPG_CSDIV0 (0x500)
+#define CPG_CSDIV1 (0x504)
#define CDDIV0_DIVCTL1 DDIV_PACK(CPG_CDDIV0, 4, 3, 1)
#define CDDIV0_DIVCTL2 DDIV_PACK(CPG_CDDIV0, 8, 3, 2)
@@ -125,6 +135,7 @@ struct fixed_mod_conf {
#define CDDIV1_DIVCTL1 DDIV_PACK(CPG_CDDIV1, 4, 2, 5)
#define CDDIV1_DIVCTL2 DDIV_PACK(CPG_CDDIV1, 8, 2, 6)
#define CDDIV1_DIVCTL3 DDIV_PACK(CPG_CDDIV1, 12, 2, 7)
+#define CDDIV2_DIVCTL3 DDIV_PACK(CPG_CDDIV2, 12, 3, 11)
#define CDDIV3_DIVCTL1 DDIV_PACK(CPG_CDDIV3, 4, 3, 13)
#define CDDIV3_DIVCTL2 DDIV_PACK(CPG_CDDIV3, 8, 3, 14)
#define CDDIV3_DIVCTL3 DDIV_PACK(CPG_CDDIV3, 12, 1, 15)
@@ -134,7 +145,9 @@ struct fixed_mod_conf {
#define CSDIV0_DIVCTL0 DDIV_PACK(CPG_CSDIV0, 0, 2, CSDIV_NO_MON)
#define CSDIV0_DIVCTL1 DDIV_PACK(CPG_CSDIV0, 4, 2, CSDIV_NO_MON)
+#define CSDIV0_DIVCTL2 DDIV_PACK(CPG_CSDIV0, 8, 2, CSDIV_NO_MON)
#define CSDIV0_DIVCTL3 DDIV_PACK_NO_RMW(CPG_CSDIV0, 12, 2, CSDIV_NO_MON)
+#define CSDIV1_DIVCTL2 DDIV_PACK(CPG_CSDIV1, 8, 4, CSDIV_NO_MON)
#define SSEL0_SELCTL2 SMUX_PACK(CPG_SSEL0, 8, 1)
#define SSEL0_SELCTL3 SMUX_PACK(CPG_SSEL0, 12, 1)
@@ -188,6 +201,8 @@ enum clk_types {
CLK_TYPE_PLL,
CLK_TYPE_DDIV, /* Dynamic Switching Divider */
CLK_TYPE_SMUX, /* Static Mux */
+ CLK_TYPE_PLLDSI, /* PLLDSI */
+ CLK_TYPE_PLLDSI_DIV, /* PLLDSI divider */
};
#define DEF_TYPE(_name, _id, _type...) \
@@ -218,6 +233,14 @@ enum clk_types {
.num_parents = ARRAY_SIZE(_parent_names), \
.flag = CLK_SET_RATE_PARENT, \
.mux_flags = CLK_MUX_HIWORD_MASK)
+#define DEF_PLLDSI(_name, _id, _parent, _pll_packed) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLLDSI, .parent = _parent, .cfg.pll = _pll_packed)
+#define DEF_PLLDSI_DIV(_name, _id, _parent, _ddiv_packed, _dtable) \
+ DEF_TYPE(_name, _id, CLK_TYPE_PLLDSI_DIV, \
+ .cfg.ddiv = _ddiv_packed, \
+ .dtable = _dtable, \
+ .parent = _parent, \
+ .flag = CLK_SET_RATE_PARENT)
/**
* struct rzv2h_mod_clk - Module Clocks definitions
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index febb7944f34b..5cf1e0fd6fb3 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -30,6 +30,13 @@ config CLK_RV1126
help
Build the driver for RV1126 Clock Driver.
+config CLK_RV1126B
+ bool "Rockchip RV1126B clock controller support"
+ depends on ARM64 || COMPILE_TEST
+ default y
+ help
+ Build the driver for RV1126B Clock Driver.
+
config CLK_RK3036
bool "Rockchip RK3036 clock controller support"
depends on ARM || COMPILE_TEST
@@ -93,6 +100,13 @@ config CLK_RK3399
help
Build the driver for RK3399 Clock Driver.
+config CLK_RK3506
+ bool "Rockchip RK3506 clock controller support"
+ depends on ARM || COMPILE_TEST
+ default y
+ help
+ Build the driver for RK3506 Clock Driver.
+
config CLK_RK3528
bool "Rockchip RK3528 clock controller support"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/rockchip/Makefile b/drivers/clk/rockchip/Makefile
index c281a9738d9f..4d8cbb2044c7 100644
--- a/drivers/clk/rockchip/Makefile
+++ b/drivers/clk/rockchip/Makefile
@@ -20,6 +20,7 @@ clk-rockchip-$(CONFIG_RESET_CONTROLLER) += softrst.o
obj-$(CONFIG_CLK_PX30) += clk-px30.o
obj-$(CONFIG_CLK_RV110X) += clk-rv1108.o
obj-$(CONFIG_CLK_RV1126) += clk-rv1126.o
+obj-$(CONFIG_CLK_RV1126B) += clk-rv1126b.o rst-rv1126b.o
obj-$(CONFIG_CLK_RK3036) += clk-rk3036.o
obj-$(CONFIG_CLK_RK312X) += clk-rk3128.o
obj-$(CONFIG_CLK_RK3188) += clk-rk3188.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_CLK_RK3308) += clk-rk3308.o
obj-$(CONFIG_CLK_RK3328) += clk-rk3328.o
obj-$(CONFIG_CLK_RK3368) += clk-rk3368.o
obj-$(CONFIG_CLK_RK3399) += clk-rk3399.o
+obj-$(CONFIG_CLK_RK3506) += clk-rk3506.o rst-rk3506.o
obj-$(CONFIG_CLK_RK3528) += clk-rk3528.o rst-rk3528.o
obj-$(CONFIG_CLK_RK3562) += clk-rk3562.o rst-rk3562.o
obj-$(CONFIG_CLK_RK3568) += clk-rk3568.o
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index dcc9dcb597ae..6e91a3041a03 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -396,3 +396,168 @@ free_cpuclk:
kfree(cpuclk);
return ERR_PTR(ret);
}
+
+static int rockchip_cpuclk_multi_pll_pre_rate_change(struct rockchip_cpuclk *cpuclk,
+ struct clk_notifier_data *ndata)
+{
+ unsigned long new_rate = roundup(ndata->new_rate, 1000);
+ const struct rockchip_cpuclk_rate_table *rate;
+ unsigned long flags;
+
+ rate = rockchip_get_cpuclk_settings(cpuclk, new_rate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for cpuclk\n",
+ __func__, new_rate);
+ return -EINVAL;
+ }
+
+ if (new_rate > ndata->old_rate) {
+ spin_lock_irqsave(cpuclk->lock, flags);
+ rockchip_cpuclk_set_dividers(cpuclk, rate);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
+ }
+
+ return 0;
+}
+
+static int rockchip_cpuclk_multi_pll_post_rate_change(struct rockchip_cpuclk *cpuclk,
+ struct clk_notifier_data *ndata)
+{
+ unsigned long new_rate = roundup(ndata->new_rate, 1000);
+ const struct rockchip_cpuclk_rate_table *rate;
+ unsigned long flags;
+
+ rate = rockchip_get_cpuclk_settings(cpuclk, new_rate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for cpuclk\n",
+ __func__, new_rate);
+ return -EINVAL;
+ }
+
+ if (new_rate < ndata->old_rate) {
+ spin_lock_irqsave(cpuclk->lock, flags);
+ rockchip_cpuclk_set_dividers(cpuclk, rate);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
+ }
+
+ return 0;
+}
+
+static int rockchip_cpuclk_multi_pll_notifier_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+ struct rockchip_cpuclk *cpuclk = to_rockchip_cpuclk_nb(nb);
+ int ret = 0;
+
+ pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
+ __func__, event, ndata->old_rate, ndata->new_rate);
+ if (event == PRE_RATE_CHANGE)
+ ret = rockchip_cpuclk_multi_pll_pre_rate_change(cpuclk, ndata);
+ else if (event == POST_RATE_CHANGE)
+ ret = rockchip_cpuclk_multi_pll_post_rate_change(cpuclk, ndata);
+
+ return notifier_from_errno(ret);
+}
+
+struct clk *rockchip_clk_register_cpuclk_multi_pll(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ int div_offset, u8 div_shift,
+ u8 div_width, u8 div_flags,
+ unsigned long flags, spinlock_t *lock,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates)
+{
+ struct rockchip_cpuclk *cpuclk;
+ struct clk_hw *hw;
+ struct clk_mux *mux = NULL;
+ struct clk_divider *div = NULL;
+ const struct clk_ops *mux_ops = NULL, *div_ops = NULL;
+ int ret;
+
+ if (num_parents > 1) {
+ mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->reg = base + muxdiv_offset;
+ mux->shift = mux_shift;
+ mux->mask = BIT(mux_width) - 1;
+ mux->flags = mux_flags;
+ mux->lock = lock;
+ mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
+ : &clk_mux_ops;
+ }
+
+ if (div_width > 0) {
+ div = kzalloc(sizeof(*div), GFP_KERNEL);
+ if (!div) {
+ ret = -ENOMEM;
+ goto free_mux;
+ }
+
+ div->flags = div_flags;
+ if (div_offset)
+ div->reg = base + div_offset;
+ else
+ div->reg = base + muxdiv_offset;
+ div->shift = div_shift;
+ div->width = div_width;
+ div->lock = lock;
+ div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
+ ? &clk_divider_ro_ops
+ : &clk_divider_ops;
+ }
+
+ hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
+ mux ? &mux->hw : NULL, mux_ops,
+ div ? &div->hw : NULL, div_ops,
+ NULL, NULL, flags);
+ if (IS_ERR(hw)) {
+ ret = PTR_ERR(hw);
+ goto free_div;
+ }
+
+ cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
+ if (!cpuclk) {
+ ret = -ENOMEM;
+ goto unregister_clk;
+ }
+
+ cpuclk->reg_base = base;
+ cpuclk->lock = lock;
+ cpuclk->clk_nb.notifier_call = rockchip_cpuclk_multi_pll_notifier_cb;
+ ret = clk_notifier_register(hw->clk, &cpuclk->clk_nb);
+ if (ret) {
+ pr_err("%s: failed to register clock notifier for %s\n",
+ __func__, name);
+ goto free_cpuclk;
+ }
+
+ if (nrates > 0) {
+ cpuclk->rate_count = nrates;
+ cpuclk->rate_table = kmemdup(rates,
+ sizeof(*rates) * nrates,
+ GFP_KERNEL);
+ if (!cpuclk->rate_table) {
+ ret = -ENOMEM;
+ goto free_cpuclk;
+ }
+ }
+
+ return hw->clk;
+
+free_cpuclk:
+ kfree(cpuclk);
+unregister_clk:
+ clk_hw_unregister_composite(hw);
+free_div:
+ kfree(div);
+free_mux:
+ kfree(mux);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/clk/rockchip/clk-ddr.c b/drivers/clk/rockchip/clk-ddr.c
index 86718c54e56b..8866a65982a0 100644
--- a/drivers/clk/rockchip/clk-ddr.c
+++ b/drivers/clk/rockchip/clk-ddr.c
@@ -55,17 +55,18 @@ rockchip_ddrclk_sip_recalc_rate(struct clk_hw *hw,
return res.a0;
}
-static long rockchip_ddrclk_sip_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int rockchip_ddrclk_sip_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct arm_smccc_res res;
- arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, rate, 0,
+ arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, req->rate, 0,
ROCKCHIP_SIP_CONFIG_DRAM_ROUND_RATE,
0, 0, 0, 0, &res);
- return res.a0;
+ req->rate = res.a0;
+
+ return 0;
}
static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
@@ -83,7 +84,7 @@ static u8 rockchip_ddrclk_get_parent(struct clk_hw *hw)
static const struct clk_ops rockchip_ddrclk_sip_ops = {
.recalc_rate = rockchip_ddrclk_sip_recalc_rate,
.set_rate = rockchip_ddrclk_sip_set_rate,
- .round_rate = rockchip_ddrclk_sip_round_rate,
+ .determine_rate = rockchip_ddrclk_sip_determine_rate,
.get_parent = rockchip_ddrclk_get_parent,
};
diff --git a/drivers/clk/rockchip/clk-half-divider.c b/drivers/clk/rockchip/clk-half-divider.c
index 64f7faad2148..fbc018e8afa4 100644
--- a/drivers/clk/rockchip/clk-half-divider.c
+++ b/drivers/clk/rockchip/clk-half-divider.c
@@ -92,17 +92,19 @@ static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_half_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_divider *divider = to_clk_divider(hw);
int div;
- div = clk_half_divider_bestdiv(hw, rate, prate,
+ div = clk_half_divider_bestdiv(hw, req->rate, &req->best_parent_rate,
divider->width,
divider->flags);
- return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
+ req->rate = DIV_ROUND_UP_ULL(((u64)req->best_parent_rate * 2), div * 2 + 3);
+
+ return 0;
}
static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -141,7 +143,7 @@ static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_half_divider_ops = {
.recalc_rate = clk_half_divider_recalc_rate,
- .round_rate = clk_half_divider_round_rate,
+ .determine_rate = clk_half_divider_determine_rate,
.set_rate = clk_half_divider_set_rate,
};
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
index c9d599c31923..86dba3826a77 100644
--- a/drivers/clk/rockchip/clk-pll.c
+++ b/drivers/clk/rockchip/clk-pll.c
@@ -61,8 +61,8 @@ static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
return NULL;
}
-static long rockchip_pll_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
+static int rockchip_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
@@ -70,12 +70,17 @@ static long rockchip_pll_round_rate(struct clk_hw *hw,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
- if (drate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
}
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
/*
@@ -352,7 +357,7 @@ static const struct clk_ops rockchip_rk3036_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3036_pll_clk_ops = {
.recalc_rate = rockchip_rk3036_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3036_pll_set_rate,
.enable = rockchip_rk3036_pll_enable,
.disable = rockchip_rk3036_pll_disable,
@@ -571,7 +576,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
.recalc_rate = rockchip_rk3066_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3066_pll_set_rate,
.enable = rockchip_rk3066_pll_enable,
.disable = rockchip_rk3066_pll_disable,
@@ -836,7 +841,7 @@ static const struct clk_ops rockchip_rk3399_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3399_pll_clk_ops = {
.recalc_rate = rockchip_rk3399_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3399_pll_set_rate,
.enable = rockchip_rk3399_pll_enable,
.disable = rockchip_rk3399_pll_disable,
@@ -1036,7 +1041,7 @@ static const struct clk_ops rockchip_rk3588_pll_clk_norate_ops = {
static const struct clk_ops rockchip_rk3588_pll_clk_ops = {
.recalc_rate = rockchip_rk3588_pll_recalc_rate,
- .round_rate = rockchip_pll_round_rate,
+ .determine_rate = rockchip_pll_determine_rate,
.set_rate = rockchip_rk3588_pll_set_rate,
.enable = rockchip_rk3588_pll_enable,
.disable = rockchip_rk3588_pll_disable,
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index 0a1e017df7c6..9cf3e1e43b78 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -871,7 +871,7 @@ static const int rk3288_saved_cru_reg_ids[] = {
static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
-static int rk3288_clk_suspend(void)
+static int rk3288_clk_suspend(void *data)
{
int i, reg_id;
@@ -906,7 +906,7 @@ static int rk3288_clk_suspend(void)
return 0;
}
-static void rk3288_clk_resume(void)
+static void rk3288_clk_resume(void *data)
{
int i, reg_id;
@@ -923,11 +923,15 @@ static void rk3288_clk_shutdown(void)
writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
}
-static struct syscore_ops rk3288_clk_syscore_ops = {
+static const struct syscore_ops rk3288_clk_syscore_ops = {
.suspend = rk3288_clk_suspend,
.resume = rk3288_clk_resume,
};
+static struct syscore rk3288_clk_syscore = {
+ .ops = &rk3288_clk_syscore_ops,
+};
+
static void __init rk3288_common_init(struct device_node *np,
enum rk3288_variant soc)
{
@@ -976,7 +980,7 @@ static void __init rk3288_common_init(struct device_node *np,
rockchip_register_restart_notifier(ctx, RK3288_GLB_SRST_FST,
rk3288_clk_shutdown);
- register_syscore_ops(&rk3288_clk_syscore_ops);
+ register_syscore(&rk3288_clk_syscore);
rockchip_clk_of_add_provider(np, ctx);
}
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 04391e4e2874..95e6996adbae 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -526,7 +526,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
RK3368_CLKGATE_CON(3), 1, GFLAGS),
- GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
+ GATE(SCLK_MIPIDSI_24M, "sclk_mipidsi_24m", "xin24m", 0, RK3368_CLKGATE_CON(4), 14, GFLAGS),
/*
* Clock-Architecture Diagram 4
diff --git a/drivers/clk/rockchip/clk-rk3506.c b/drivers/clk/rockchip/clk-rk3506.c
new file mode 100644
index 000000000000..dd59bd60382e
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rk3506.c
@@ -0,0 +1,869 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023-2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rockchip,rk3506-cru.h>
+#include "clk.h"
+
+#define PVTPLL_SRC_SEL_PVTPLL (BIT(7) | BIT(23))
+
+enum rk3506_plls {
+ gpll, v0pll, v1pll,
+};
+
+/*
+ * [FRAC PLL]: GPLL, V0PLL, V1PLL
+ * - VCO Frequency: 950MHz to 3800MHZ
+ * - Output Frequency: 19MHz to 3800MHZ
+ * - refdiv: 1 to 63 (Int Mode), 1 to 2 (Frac Mode)
+ * - fbdiv: 16 to 3800 (Int Mode), 20 to 380 (Frac Mode)
+ * - post1div: 1 to 7
+ * - post2div: 1 to 7
+ */
+static struct rockchip_pll_rate_table rk3506_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1896000000, 1, 79, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1800000000, 1, 75, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1704000000, 1, 71, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1608000000, 1, 67, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1512000000, 1, 63, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1416000000, 1, 59, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1350000000, 4, 225, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1296000000, 1, 54, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1200000000, 1, 50, 1, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1179648000, 1, 49, 1, 1, 0, 2550137),
+ RK3036_PLL_RATE(1008000000, 1, 84, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1000000000, 3, 125, 1, 1, 1, 0),
+ RK3036_PLL_RATE(993484800, 1, 41, 1, 1, 0, 6630355),
+ RK3036_PLL_RATE(983040000, 1, 40, 1, 1, 0, 16106127),
+ RK3036_PLL_RATE(960000000, 1, 80, 2, 1, 1, 0),
+ RK3036_PLL_RATE(912000000, 1, 76, 2, 1, 1, 0),
+ RK3036_PLL_RATE(903168000, 1, 75, 2, 1, 0, 4429185),
+ RK3036_PLL_RATE(816000000, 1, 68, 2, 1, 1, 0),
+ RK3036_PLL_RATE(800000000, 3, 200, 2, 1, 1, 0),
+ RK3036_PLL_RATE(600000000, 1, 50, 2, 1, 1, 0),
+ RK3036_PLL_RATE(594000000, 2, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(408000000, 1, 68, 2, 2, 1, 0),
+ RK3036_PLL_RATE(312000000, 1, 78, 6, 1, 1, 0),
+ RK3036_PLL_RATE(216000000, 1, 72, 4, 2, 1, 0),
+ RK3036_PLL_RATE(96000000, 1, 48, 6, 2, 1, 0),
+ { /* sentinel */ },
+};
+
+#define RK3506_DIV_ACLK_CORE_MASK 0xf
+#define RK3506_DIV_ACLK_CORE_SHIFT 9
+#define RK3506_DIV_PCLK_CORE_MASK 0xf
+#define RK3506_DIV_PCLK_CORE_SHIFT 0
+
+#define RK3506_CLKSEL15(_aclk_core_div) \
+{ \
+ .reg = RK3506_CLKSEL_CON(15), \
+ .val = HIWORD_UPDATE(_aclk_core_div, RK3506_DIV_ACLK_CORE_MASK, \
+ RK3506_DIV_ACLK_CORE_SHIFT), \
+}
+
+#define RK3506_CLKSEL16(_pclk_core_div) \
+{ \
+ .reg = RK3506_CLKSEL_CON(16), \
+ .val = HIWORD_UPDATE(_pclk_core_div, RK3506_DIV_PCLK_CORE_MASK, \
+ RK3506_DIV_PCLK_CORE_SHIFT), \
+}
+
+/* SIGN-OFF: aclk_core: 500M, pclk_core: 125M, */
+#define RK3506_CPUCLK_RATE(_prate, _aclk_core_div, _pclk_core_div) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RK3506_CLKSEL15(_aclk_core_div), \
+ RK3506_CLKSEL16(_pclk_core_div), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rk3506_cpuclk_rates[] __initdata = {
+ RK3506_CPUCLK_RATE(1608000000, 3, 12),
+ RK3506_CPUCLK_RATE(1512000000, 3, 12),
+ RK3506_CPUCLK_RATE(1416000000, 2, 11),
+ RK3506_CPUCLK_RATE(1296000000, 2, 10),
+ RK3506_CPUCLK_RATE(1200000000, 2, 9),
+ RK3506_CPUCLK_RATE(1179648000, 2, 9),
+ RK3506_CPUCLK_RATE(1008000000, 1, 7),
+ RK3506_CPUCLK_RATE(903168000, 1, 7),
+ RK3506_CPUCLK_RATE(800000000, 1, 6),
+ RK3506_CPUCLK_RATE(750000000, 1, 5),
+ RK3506_CPUCLK_RATE(589824000, 1, 4),
+ RK3506_CPUCLK_RATE(400000000, 1, 3),
+ RK3506_CPUCLK_RATE(200000000, 1, 1),
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(gpll_v0pll_v1pll_parents_p) = { "gpll", "v0pll", "v1pll" };
+PNAME(gpll_v0pll_v1pll_g_parents_p) = { "clk_gpll_gate", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(gpll_v0pll_v1pll_div_parents_p) = { "clk_gpll_div", "clk_v0pll_div", "clk_v1pll_div" };
+PNAME(xin24m_gpll_v0pll_v1pll_g_parents_p) = { "xin24m", "clk_gpll_gate", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(xin24m_g_gpll_v0pll_v1pll_g_parents_p) = { "xin24m_gate", "clk_gpll_gate", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(xin24m_g_gpll_v0pll_v1pll_div_parents_p) = { "xin24m_gate", "clk_gpll_div", "clk_v0pll_div", "clk_v1pll_div" };
+PNAME(xin24m_400k_32k_parents_p) = { "xin24m", "clk_rc", "clk_32k" };
+PNAME(clk_frac_uart_matrix0_mux_parents_p) = { "xin24m", "gpll", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(clk_timer0_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "sai0_mclk_in", "sai0_sclk_in" };
+PNAME(clk_timer1_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "sai1_mclk_in", "sai1_sclk_in" };
+PNAME(clk_timer2_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "sai2_mclk_in", "sai2_sclk_in" };
+PNAME(clk_timer3_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "sai3_mclk_in", "sai3_sclk_in" };
+PNAME(clk_timer4_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "mclk_asrc0" };
+PNAME(clk_timer5_parents_p) = { "xin24m", "clk_gpll_div_100m", "clk_32k", "clk_core_pvtpll", "mclk_asrc1" };
+PNAME(sclk_uart_parents_p) = { "xin24m", "clk_gpll_gate", "clk_v0pll_gate", "clk_frac_uart_matrix0", "clk_frac_uart_matrix1",
+ "clk_frac_common_matrix0", "clk_frac_common_matrix1", "clk_frac_common_matrix2" };
+PNAME(clk_mac_ptp_root_parents_p) = { "gpll", "v0pll", "v1pll" };
+PNAME(clk_pwm_parents_p) = { "clk_rc", "sai0_mclk_in", "sai1_mclk_in", "sai2_mclk_in", "sai3_mclk_in", "sai0_sclk_in", "sai1_sclk_in",
+ "sai2_sclk_in", "sai3_sclk_in", "mclk_asrc0", "mclk_asrc1" };
+PNAME(clk_can_parents_p) = { "xin24m", "gpll", "clk_v0pll_gate", "clk_v1pll_gate", "clk_frac_voice_matrix1",
+ "clk_frac_common_matrix0", "clk_frac_common_matrix1", "clk_frac_common_matrix2" };
+PNAME(clk_pdm_parents_p) = { "xin24m_gate", "clk_int_voice_matrix0", "clk_int_voice_matrix1", "clk_int_voice_matrix2",
+ "clk_frac_voice_matrix0", "clk_frac_voice_matrix1", "clk_frac_common_matrix0", "clk_frac_common_matrix1",
+ "clk_frac_common_matrix2", "sai0_mclk_in", "sai1_mclk_in", "sai2_mclk_in", "sai3_mclk_in", "clk_gpll_div" };
+PNAME(mclk_sai_asrc_parents_p) = { "xin24m_gate", "clk_int_voice_matrix0", "clk_int_voice_matrix1", "clk_int_voice_matrix2",
+ "clk_frac_voice_matrix0", "clk_frac_voice_matrix1", "clk_frac_common_matrix0", "clk_frac_common_matrix1",
+ "clk_frac_common_matrix2", "sai0_mclk_in", "sai1_mclk_in", "sai2_mclk_in", "sai3_mclk_in" };
+PNAME(lrck_asrc_parents_p) = { "mclk_asrc0", "mclk_asrc1", "mclk_asrc2", "mclk_asrc3", "mclk_spdiftx", "clk_spdifrx_to_asrc", "clkout_pdm",
+ "sai0_fs", "sai1_fs", "sai2_fs", "sai3_fs", "sai4_fs" };
+PNAME(cclk_src_sdmmc_parents_p) = { "xin24m_gate", "gpll", "clk_v0pll_gate", "clk_v1pll_gate" };
+PNAME(dclk_vop_parents_p) = { "xin24m_gate", "clk_gpll_gate", "clk_v0pll_gate", "clk_v1pll_gate", "dummy_vop_dclk",
+ "dummy_vop_dclk", "dummy_vop_dclk", "dummy_vop_dclk" };
+PNAME(dbclk_gpio0_parents_p) = { "xin24m", "clk_rc", "clk_32k_pmu" };
+PNAME(clk_pmu_hp_timer_parents_p) = { "xin24m", "gpll_div_100m", "clk_core_pvtpll" };
+PNAME(clk_ref_out_parents_p) = { "xin24m", "gpll", "v0pll", "v1pll" };
+PNAME(clk_32k_frac_parents_p) = { "xin24m", "v0pll", "v1pll", "clk_rc" };
+PNAME(clk_32k_parents_p) = { "xin32k", "clk_32k_rc", "clk_32k_frac" };
+PNAME(clk_ref_phy_pmu_mux_parents_p) = { "xin24m", "clk_ref_phy_pll" };
+PNAME(clk_vpll_ref_parents_p) = { "xin24m", "clk_pll_ref_io" };
+PNAME(mux_armclk_p) = { "armclk_pll", "clk_core_pvtpll" };
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_pll_clock rk3506_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3506_PLL_CON(0),
+ RK3506_MODE_CON, 0, 2, 0, rk3506_pll_rates),
+ [v0pll] = PLL(pll_rk3328, PLL_V0PLL, "v0pll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3506_PLL_CON(8),
+ RK3506_MODE_CON, 2, 0, 0, rk3506_pll_rates),
+ [v1pll] = PLL(pll_rk3328, PLL_V1PLL, "v1pll", mux_pll_p,
+ CLK_IS_CRITICAL, RK3506_PLL_CON(16),
+ RK3506_MODE_CON, 4, 1, 0, rk3506_pll_rates),
+};
+
+static struct rockchip_clk_branch rk3506_armclk __initdata =
+ MUX(ARMCLK, "armclk", mux_armclk_p, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ RK3506_CLKSEL_CON(15), 8, 1, MFLAGS);
+
+static struct rockchip_clk_branch rk3506_clk_branches[] __initdata = {
+ /*
+ * CRU Clock-Architecture
+ */
+ /* top */
+ GATE(XIN24M_GATE, "xin24m_gate", "xin24m", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(CLK_GPLL_GATE, "clk_gpll_gate", "gpll", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(CLK_V0PLL_GATE, "clk_v0pll_gate", "v0pll", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(CLK_V1PLL_GATE, "clk_v1pll_gate", "v1pll", 0,
+ RK3506_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV, "clk_gpll_div", "clk_gpll_gate", CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(0), 6, 4, DFLAGS,
+ RK3506_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV_100M, "clk_gpll_div_100m", "clk_gpll_div", 0,
+ RK3506_CLKSEL_CON(0), 10, 4, DFLAGS,
+ RK3506_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE_NOMUX(CLK_V0PLL_DIV, "clk_v0pll_div", "clk_v0pll_gate", CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(1), 0, 4, DFLAGS,
+ RK3506_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_V1PLL_DIV, "clk_v1pll_div", "clk_v1pll_gate", 0,
+ RK3506_CLKSEL_CON(1), 4, 4, DFLAGS,
+ RK3506_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_INT_VOICE_MATRIX0, "clk_int_voice_matrix0", "clk_v0pll_gate", 0,
+ RK3506_CLKSEL_CON(1), 8, 5, DFLAGS,
+ RK3506_CLKGATE_CON(0), 9, GFLAGS),
+ COMPOSITE_NOMUX(CLK_INT_VOICE_MATRIX1, "clk_int_voice_matrix1", "clk_v1pll_gate", 0,
+ RK3506_CLKSEL_CON(2), 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NOMUX(CLK_INT_VOICE_MATRIX2, "clk_int_voice_matrix2", "clk_v0pll_gate", 0,
+ RK3506_CLKSEL_CON(2), 5, 5, DFLAGS,
+ RK3506_CLKGATE_CON(0), 11, GFLAGS),
+ MUX(CLK_FRAC_UART_MATRIX0_MUX, "clk_frac_uart_matrix0_mux", clk_frac_uart_matrix0_mux_parents_p, 0,
+ RK3506_CLKSEL_CON(3), 9, 2, MFLAGS),
+ MUX(CLK_FRAC_UART_MATRIX1_MUX, "clk_frac_uart_matrix1_mux", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(3), 11, 2, MFLAGS),
+ MUX(CLK_FRAC_VOICE_MATRIX0_MUX, "clk_frac_voice_matrix0_mux", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(3), 13, 2, MFLAGS),
+ MUX(CLK_FRAC_VOICE_MATRIX1_MUX, "clk_frac_voice_matrix1_mux", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(4), 0, 2, MFLAGS),
+ MUX(CLK_FRAC_COMMON_MATRIX0_MUX, "clk_frac_common_matrix0_mux", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(4), 2, 2, MFLAGS),
+ MUX(CLK_FRAC_COMMON_MATRIX1_MUX, "clk_frac_common_matrix1_mux", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(4), 4, 2, MFLAGS),
+ MUX(CLK_FRAC_COMMON_MATRIX2_MUX, "clk_frac_common_matrix2_mux", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(4), 6, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_UART_MATRIX0, "clk_frac_uart_matrix0", "clk_frac_uart_matrix0_mux", 0,
+ RK3506_CLKSEL_CON(5), 0,
+ RK3506_CLKGATE_CON(0), 13, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_UART_MATRIX1, "clk_frac_uart_matrix1", "clk_frac_uart_matrix1_mux", 0,
+ RK3506_CLKSEL_CON(6), 0,
+ RK3506_CLKGATE_CON(0), 14, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_VOICE_MATRIX0, "clk_frac_voice_matrix0", "clk_frac_voice_matrix0_mux", 0,
+ RK3506_CLKSEL_CON(7), 0,
+ RK3506_CLKGATE_CON(0), 15, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_VOICE_MATRIX1, "clk_frac_voice_matrix1", "clk_frac_voice_matrix1_mux", 0,
+ RK3506_CLKSEL_CON(9), 0,
+ RK3506_CLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_COMMON_MATRIX0, "clk_frac_common_matrix0", "clk_frac_common_matrix0_mux", 0,
+ RK3506_CLKSEL_CON(11), 0,
+ RK3506_CLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_COMMON_MATRIX1, "clk_frac_common_matrix1", "clk_frac_common_matrix1_mux", 0,
+ RK3506_CLKSEL_CON(12), 0,
+ RK3506_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_FRAC(CLK_FRAC_COMMON_MATRIX2, "clk_frac_common_matrix2", "clk_frac_common_matrix2_mux", 0,
+ RK3506_CLKSEL_CON(13), 0,
+ RK3506_CLKGATE_CON(1), 3, GFLAGS),
+ GATE(CLK_REF_USBPHY_TOP, "clk_ref_usbphy_top", "xin24m", 0,
+ RK3506_CLKGATE_CON(1), 4, GFLAGS),
+ GATE(CLK_REF_DPHY_TOP, "clk_ref_dphy_top", "xin24m", 0,
+ RK3506_CLKGATE_CON(1), 5, GFLAGS),
+
+ /* core */
+ COMPOSITE_NOGATE(0, "armclk_pll", gpll_v0pll_v1pll_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(15), 5, 2, MFLAGS, 0, 5, DFLAGS),
+ COMPOSITE_NOMUX(ACLK_CORE_ROOT, "aclk_core_root", "armclk", CLK_IGNORE_UNUSED,
+ RK3506_CLKSEL_CON(15), 9, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3506_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE_NOMUX(PCLK_CORE_ROOT, "pclk_core_root", "armclk", CLK_IGNORE_UNUSED,
+ RK3506_CLKSEL_CON(16), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
+ RK3506_CLKGATE_CON(2), 12, GFLAGS),
+ GATE(PCLK_DBG, "pclk_dbg", "pclk_core_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(3), 1, GFLAGS),
+ GATE(PCLK_CORE_GRF, "pclk_core_grf", "pclk_core_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(3), 4, GFLAGS),
+ GATE(PCLK_CORE_CRU, "pclk_core_cru", "pclk_core_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(3), 5, GFLAGS),
+ GATE(CLK_CORE_EMA_DETECT, "clk_core_ema_detect", "xin24m_gate", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(3), 6, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "aclk_core_root", 0,
+ RK3506_CLKGATE_CON(3), 8, GFLAGS),
+ GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m_gate", 0,
+ RK3506_CLKGATE_CON(3), 9, GFLAGS),
+
+ /* core peri */
+ COMPOSITE(ACLK_CORE_PERI_ROOT, "aclk_core_peri_root", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(18), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(4), 0, GFLAGS),
+ GATE(HCLK_CORE_PERI_ROOT, "hclk_core_peri_root", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 1, GFLAGS),
+ GATE(PCLK_CORE_PERI_ROOT, "pclk_core_peri_root", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 2, GFLAGS),
+ COMPOSITE(CLK_DSMC, "clk_dsmc", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(18), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(4), 4, GFLAGS),
+ GATE(ACLK_DSMC, "aclk_dsmc", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(PCLK_DSMC, "pclk_dsmc", "pclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 6, GFLAGS),
+ COMPOSITE(CLK_FLEXBUS_TX, "clk_flexbus_tx", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(19), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(4), 7, GFLAGS),
+ COMPOSITE(CLK_FLEXBUS_RX, "clk_flexbus_rx", xin24m_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(19), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(4), 8, GFLAGS),
+ GATE(ACLK_FLEXBUS, "aclk_flexbus", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 9, GFLAGS),
+ GATE(HCLK_FLEXBUS, "hclk_flexbus", "hclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 10, GFLAGS),
+ GATE(ACLK_DSMC_SLV, "aclk_dsmc_slv", "aclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 11, GFLAGS),
+ GATE(HCLK_DSMC_SLV, "hclk_dsmc_slv", "hclk_core_peri_root", 0,
+ RK3506_CLKGATE_CON(4), 12, GFLAGS),
+
+ /* bus */
+ COMPOSITE(ACLK_BUS_ROOT, "aclk_bus_root", gpll_v0pll_v1pll_div_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(21), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(5), 0, GFLAGS),
+ COMPOSITE(HCLK_BUS_ROOT, "hclk_bus_root", gpll_v0pll_v1pll_div_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(21), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE(PCLK_BUS_ROOT, "pclk_bus_root", gpll_v0pll_v1pll_div_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(22), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(5), 2, GFLAGS),
+ GATE(ACLK_SYSRAM, "aclk_sysram", "aclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(5), 6, GFLAGS),
+ GATE(HCLK_SYSRAM, "hclk_sysram", "aclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(5), 7, GFLAGS),
+ GATE(ACLK_DMAC0, "aclk_dmac0", "aclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 8, GFLAGS),
+ GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 9, GFLAGS),
+ GATE(HCLK_M0, "hclk_m0", "aclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 10, GFLAGS),
+ GATE(ACLK_CRYPTO_NS, "aclk_crypto_ns", "aclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 14, GFLAGS),
+ GATE(HCLK_CRYPTO_NS, "hclk_crypto_ns", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(5), 15, GFLAGS),
+ GATE(HCLK_RNG, "hclk_rng", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 0, GFLAGS),
+ GATE(PCLK_BUS_GRF, "pclk_bus_grf", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(6), 1, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH0, "clk_timer0_ch0", clk_timer0_parents_p, 0,
+ RK3506_CLKSEL_CON(22), 7, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH1, "clk_timer0_ch1", clk_timer1_parents_p, 0,
+ RK3506_CLKSEL_CON(22), 10, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH2, "clk_timer0_ch2", clk_timer2_parents_p, 0,
+ RK3506_CLKSEL_CON(22), 13, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH3, "clk_timer0_ch3", clk_timer3_parents_p, 0,
+ RK3506_CLKSEL_CON(23), 0, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH4, "clk_timer0_ch4", clk_timer4_parents_p, 0,
+ RK3506_CLKSEL_CON(23), 3, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0_CH5, "clk_timer0_ch5", clk_timer5_parents_p, 0,
+ RK3506_CLKSEL_CON(23), 6, 3, MFLAGS,
+ RK3506_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(PCLK_WDT0, "pclk_wdt0", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(TCLK_WDT0, "tclk_wdt0", "xin24m_gate", 0,
+ RK3506_CLKGATE_CON(6), 10, GFLAGS),
+ GATE(PCLK_WDT1, "pclk_wdt1", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 11, GFLAGS),
+ GATE(TCLK_WDT1, "tclk_wdt1", "xin24m_gate", 0,
+ RK3506_CLKGATE_CON(6), 12, GFLAGS),
+ GATE(PCLK_MAILBOX, "pclk_mailbox", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 13, GFLAGS),
+ GATE(PCLK_INTMUX, "pclk_intmux", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 14, GFLAGS),
+ GATE(PCLK_SPINLOCK, "pclk_spinlock", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(6), 15, GFLAGS),
+ GATE(PCLK_DDRC, "pclk_ddrc", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 0, GFLAGS),
+ GATE(HCLK_DDRPHY, "hclk_ddrphy", "hclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 1, GFLAGS),
+ GATE(PCLK_DDRMON, "pclk_ddrmon", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(CLK_DDRMON_OSC, "clk_ddrmon_osc", "xin24m_gate", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(PCLK_STDBY, "pclk_stdby", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_USBOTG0, "hclk_usbotg0", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(HCLK_USBOTG0_PMU, "hclk_usbotg0_pmu", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 6, GFLAGS),
+ GATE(CLK_USBOTG0_ADP, "clk_usbotg0_adp", "clk_32k", 0,
+ RK3506_CLKGATE_CON(7), 7, GFLAGS),
+ GATE(HCLK_USBOTG1, "hclk_usbotg1", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 8, GFLAGS),
+ GATE(HCLK_USBOTG1_PMU, "hclk_usbotg1_pmu", "hclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 9, GFLAGS),
+ GATE(CLK_USBOTG1_ADP, "clk_usbotg1_adp", "clk_32k", 0,
+ RK3506_CLKGATE_CON(7), 10, GFLAGS),
+ GATE(PCLK_USBPHY, "pclk_usbphy", "pclk_bus_root", 0,
+ RK3506_CLKGATE_CON(7), 11, GFLAGS),
+ GATE(ACLK_DMA2DDR, "aclk_dma2ddr", "aclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(8), 0, GFLAGS),
+ GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_bus_root", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(8), 1, GFLAGS),
+ COMPOSITE_NOMUX(STCLK_M0, "stclk_m0", "xin24m_gate", 0,
+ RK3506_CLKSEL_CON(23), 9, 6, DFLAGS,
+ RK3506_CLKGATE_CON(8), 2, GFLAGS),
+ COMPOSITE(CLK_DDRPHY, "clk_ddrphy", gpll_v0pll_v1pll_parents_p, CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKSEL_CON(4), 4, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 10, GFLAGS),
+ FACTOR(CLK_DDRC_SRC, "clk_ddrc_src", "clk_ddrphy", 0, 1, 4),
+ GATE(ACLK_DDRC_0, "aclk_ddrc_0", "clk_ddrc_src", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(10), 0, GFLAGS),
+ GATE(ACLK_DDRC_1, "aclk_ddrc_1", "clk_ddrc_src", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(10), 1, GFLAGS),
+ GATE(CLK_DDRC, "clk_ddrc", "clk_ddrc_src", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(10), 3, GFLAGS),
+ GATE(CLK_DDRMON, "clk_ddrmon", "clk_ddrc_src", CLK_IGNORE_UNUSED,
+ RK3506_CLKGATE_CON(10), 4, GFLAGS),
+
+ /* ls peri */
+ COMPOSITE(HCLK_LSPERI_ROOT, "hclk_lsperi_root", gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(29), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 0, GFLAGS),
+ GATE(PCLK_LSPERI_ROOT, "pclk_lsperi_root", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 1, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 4, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 5, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 6, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 7, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 8, GFLAGS),
+ COMPOSITE(SCLK_UART0, "sclk_uart0", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(29), 12, 3, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 9, GFLAGS),
+ COMPOSITE(SCLK_UART1, "sclk_uart1", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(30), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 10, GFLAGS),
+ COMPOSITE(SCLK_UART2, "sclk_uart2", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(30), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 11, GFLAGS),
+ COMPOSITE(SCLK_UART3, "sclk_uart3", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(31), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 12, GFLAGS),
+ COMPOSITE(SCLK_UART4, "sclk_uart4", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(31), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RK3506_CLKGATE_CON(11), 13, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(11), 14, GFLAGS),
+ COMPOSITE(CLK_I2C0, "clk_i2c0", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(32), 4, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3506_CLKGATE_CON(11), 15, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 0, GFLAGS),
+ COMPOSITE(CLK_I2C1, "clk_i2c1", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(32), 10, 2, MFLAGS, 6, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 1, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 2, GFLAGS),
+ COMPOSITE(CLK_I2C2, "clk_i2c2", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(33), 4, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 3, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 4, GFLAGS),
+ COMPOSITE(CLK_PWM1, "clk_pwm1", gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(33), 10, 2, MFLAGS, 6, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 5, GFLAGS),
+ GATE(CLK_OSC_PWM1, "clk_osc_pwm1", "xin24m", 0,
+ RK3506_CLKGATE_CON(12), 6, GFLAGS),
+ GATE(CLK_RC_PWM1, "clk_rc_pwm1", "clk_rc", 0,
+ RK3506_CLKGATE_CON(12), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_FREQ_PWM1, "clk_freq_pwm1", clk_pwm_parents_p, 0,
+ RK3506_CLKSEL_CON(33), 12, 4, MFLAGS,
+ RK3506_CLKGATE_CON(12), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_COUNTER_PWM1, "clk_counter_pwm1", clk_pwm_parents_p, 0,
+ RK3506_CLKSEL_CON(34), 0, 4, MFLAGS,
+ RK3506_CLKGATE_CON(12), 9, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 10, GFLAGS),
+ COMPOSITE(CLK_SPI0, "clk_spi0", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(34), 8, 2, MFLAGS, 4, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 11, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 12, GFLAGS),
+ COMPOSITE(CLK_SPI1, "clk_spi1", xin24m_g_gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(34), 14, 2, MFLAGS, 10, 4, DFLAGS,
+ RK3506_CLKGATE_CON(12), 13, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(12), 14, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO2, "dbclk_gpio2", xin24m_400k_32k_parents_p, 0,
+ RK3506_CLKSEL_CON(35), 0, 2, MFLAGS,
+ RK3506_CLKGATE_CON(12), 15, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 0, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO3, "dbclk_gpio3", xin24m_400k_32k_parents_p, 0,
+ RK3506_CLKSEL_CON(35), 2, 2, MFLAGS,
+ RK3506_CLKGATE_CON(13), 1, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 2, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO4, "dbclk_gpio4", xin24m_400k_32k_parents_p, 0,
+ RK3506_CLKSEL_CON(35), 4, 2, MFLAGS,
+ RK3506_CLKGATE_CON(13), 3, GFLAGS),
+ GATE(HCLK_CAN0, "hclk_can0", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 4, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", clk_can_parents_p, 0,
+ RK3506_CLKSEL_CON(35), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 5, GFLAGS),
+ GATE(HCLK_CAN1, "hclk_can1", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 6, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", clk_can_parents_p, 0,
+ RK3506_CLKSEL_CON(36), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 7, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 8, GFLAGS),
+ COMPOSITE(MCLK_PDM, "mclk_pdm", clk_pdm_parents_p, 0,
+ RK3506_CLKSEL_CON(37), 5, 4, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 9, GFLAGS),
+ COMPOSITE(CLKOUT_PDM, "clkout_pdm", clk_pdm_parents_p, 0,
+ RK3506_CLKSEL_CON(38), 10, 4, MFLAGS, 0, 10, DFLAGS,
+ RK3506_CLKGATE_CON(13), 10, GFLAGS),
+ COMPOSITE(MCLK_SPDIFTX, "mclk_spdiftx", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(39), 5, 4, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 11, GFLAGS),
+ GATE(HCLK_SPDIFTX, "hclk_spdiftx", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 12, GFLAGS),
+ GATE(HCLK_SPDIFRX, "hclk_spdifrx", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(13), 13, GFLAGS),
+ COMPOSITE(MCLK_SPDIFRX, "mclk_spdifrx", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(39), 14, 2, MFLAGS, 9, 5, DFLAGS,
+ RK3506_CLKGATE_CON(13), 14, GFLAGS),
+ COMPOSITE(MCLK_SAI0, "mclk_sai0", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(40), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(13), 15, GFLAGS),
+ GATE(HCLK_SAI0, "hclk_sai0", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(14), 0, GFLAGS),
+ GATE(MCLK_OUT_SAI0, "mclk_out_sai0", "mclk_sai0", 0,
+ RK3506_CLKGATE_CON(14), 1, GFLAGS),
+ COMPOSITE(MCLK_SAI1, "mclk_sai1", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(41), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(14), 2, GFLAGS),
+ GATE(HCLK_SAI1, "hclk_sai1", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(14), 3, GFLAGS),
+ GATE(MCLK_OUT_SAI1, "mclk_out_sai1", "mclk_sai1", 0,
+ RK3506_CLKGATE_CON(14), 4, GFLAGS),
+ GATE(HCLK_ASRC0, "hclk_asrc0", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(14), 5, GFLAGS),
+ COMPOSITE(CLK_ASRC0, "clk_asrc0", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(42), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(14), 6, GFLAGS),
+ GATE(HCLK_ASRC1, "hclk_asrc1", "hclk_lsperi_root", 0,
+ RK3506_CLKGATE_CON(14), 7, GFLAGS),
+ COMPOSITE(CLK_ASRC1, "clk_asrc1", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(42), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(14), 8, GFLAGS),
+ GATE(PCLK_CRU, "pclk_cru", "pclk_lsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(14), 9, GFLAGS),
+ GATE(PCLK_PMU_ROOT, "pclk_pmu_root", "pclk_lsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(14), 10, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC0, "mclk_asrc0", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(46), 0, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 0, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC1, "mclk_asrc1", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(46), 4, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 1, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC2, "mclk_asrc2", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(46), 8, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 2, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC3, "mclk_asrc3", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(46), 12, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 3, GFLAGS),
+ COMPOSITE_NODIV(LRCK_ASRC0_SRC, "lrck_asrc0_src", lrck_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(47), 0, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 4, GFLAGS),
+ COMPOSITE_NODIV(LRCK_ASRC0_DST, "lrck_asrc0_dst", lrck_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(47), 4, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 5, GFLAGS),
+ COMPOSITE_NODIV(LRCK_ASRC1_SRC, "lrck_asrc1_src", lrck_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(47), 8, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 6, GFLAGS),
+ COMPOSITE_NODIV(LRCK_ASRC1_DST, "lrck_asrc1_dst", lrck_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(47), 12, 4, MFLAGS,
+ RK3506_CLKGATE_CON(16), 7, GFLAGS),
+
+ /* hs peri */
+ COMPOSITE(ACLK_HSPERI_ROOT, "aclk_hsperi_root", gpll_v0pll_v1pll_div_parents_p, CLK_IS_CRITICAL,
+ RK3506_CLKSEL_CON(49), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(17), 0, GFLAGS),
+ GATE(HCLK_HSPERI_ROOT, "hclk_hsperi_root", "aclk_hsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(17), 1, GFLAGS),
+ GATE(PCLK_HSPERI_ROOT, "pclk_hsperi_root", "hclk_hsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(17), 2, GFLAGS),
+ COMPOSITE(CCLK_SRC_SDMMC, "cclk_src_sdmmc", cclk_src_sdmmc_parents_p, 0,
+ RK3506_CLKSEL_CON(49), 13, 2, MFLAGS, 7, 6, DFLAGS,
+ RK3506_CLKGATE_CON(17), 6, GFLAGS),
+ GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 7, GFLAGS),
+ GATE(HCLK_FSPI, "hclk_fspi", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 8, GFLAGS),
+ COMPOSITE(SCLK_FSPI, "sclk_fspi", xin24m_g_gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(50), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(17), 9, GFLAGS),
+ GATE(PCLK_SPI2, "pclk_spi2", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 10, GFLAGS),
+ GATE(ACLK_MAC0, "aclk_mac0", "aclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 11, GFLAGS),
+ GATE(ACLK_MAC1, "aclk_mac1", "aclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 12, GFLAGS),
+ GATE(PCLK_MAC0, "pclk_mac0", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 13, GFLAGS),
+ GATE(PCLK_MAC1, "pclk_mac1", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(17), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MAC_ROOT, "clk_mac_root", "gpll", 0,
+ RK3506_CLKSEL_CON(50), 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(17), 15, GFLAGS),
+ GATE(CLK_MAC0, "clk_mac0", "clk_mac_root", 0,
+ RK3506_CLKGATE_CON(18), 0, GFLAGS),
+ GATE(CLK_MAC1, "clk_mac1", "clk_mac_root", 0,
+ RK3506_CLKGATE_CON(18), 1, GFLAGS),
+ COMPOSITE(MCLK_SAI2, "mclk_sai2", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(51), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(18), 2, GFLAGS),
+ GATE(HCLK_SAI2, "hclk_sai2", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 3, GFLAGS),
+ GATE(MCLK_OUT_SAI2, "mclk_out_sai2", "mclk_sai2", 0,
+ RK3506_CLKGATE_CON(18), 4, GFLAGS),
+ COMPOSITE(MCLK_SAI3_SRC, "mclk_sai3_src", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(52), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(18), 5, GFLAGS),
+ GATE(HCLK_SAI3, "hclk_sai3", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 6, GFLAGS),
+ GATE(MCLK_SAI3, "mclk_sai3", "mclk_sai3_src", 0,
+ RK3506_CLKGATE_CON(18), 7, GFLAGS),
+ GATE(MCLK_OUT_SAI3, "mclk_out_sai3", "mclk_sai3_src", 0,
+ RK3506_CLKGATE_CON(18), 8, GFLAGS),
+ COMPOSITE(MCLK_SAI4_SRC, "mclk_sai4_src", mclk_sai_asrc_parents_p, 0,
+ RK3506_CLKSEL_CON(53), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(18), 9, GFLAGS),
+ GATE(HCLK_SAI4, "hclk_sai4", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 10, GFLAGS),
+ GATE(MCLK_SAI4, "mclk_sai4", "mclk_sai4_src", 0,
+ RK3506_CLKGATE_CON(18), 11, GFLAGS),
+ GATE(HCLK_DSM, "hclk_dsm", "hclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 12, GFLAGS),
+ GATE(MCLK_DSM, "mclk_dsm", "mclk_sai3_src", 0,
+ RK3506_CLKGATE_CON(18), 13, GFLAGS),
+ GATE(PCLK_AUDIO_ADC, "pclk_audio_adc", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(18), 14, GFLAGS),
+ GATE(MCLK_AUDIO_ADC, "mclk_audio_adc", "mclk_sai4_src", 0,
+ RK3506_CLKGATE_CON(18), 15, GFLAGS),
+ FACTOR(MCLK_AUDIO_ADC_DIV4, "mclk_audio_adc_div4", "mclk_audio_adc", 0, 1, 4),
+ GATE(PCLK_SARADC, "pclk_saradc", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(19), 0, GFLAGS),
+ COMPOSITE(CLK_SARADC, "clk_saradc", xin24m_400k_32k_parents_p, 0,
+ RK3506_CLKSEL_CON(54), 4, 2, MFLAGS, 0, 4, DFLAGS,
+ RK3506_CLKGATE_CON(19), 1, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(19), 3, GFLAGS),
+ GATE(CLK_SBPI_OTPC_NS, "clk_sbpi_otpc_ns", "xin24m_gate", 0,
+ RK3506_CLKGATE_CON(19), 4, GFLAGS),
+ FACTOR(CLK_USER_OTPC_NS, "clk_user_otpc_ns", "clk_sbpi_otpc_ns", 0, 1, 2),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_hsperi_root", 0,
+ RK3506_CLKGATE_CON(19), 6, GFLAGS),
+ COMPOSITE(SCLK_UART5, "sclk_uart5", sclk_uart_parents_p, 0,
+ RK3506_CLKSEL_CON(54), 11, 3, MFLAGS, 6, 5, DFLAGS,
+ RK3506_CLKGATE_CON(19), 7, GFLAGS),
+ GATE(PCLK_GPIO234_IOC, "pclk_gpio234_ioc", "pclk_hsperi_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(19), 8, GFLAGS),
+ COMPOSITE(CLK_MAC_PTP_ROOT, "clk_mac_ptp_root", clk_mac_ptp_root_parents_p, 0,
+ RK3506_CLKSEL_CON(55), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(19), 9, GFLAGS),
+ GATE(CLK_MAC0_PTP, "clk_mac0_ptp", "clk_mac_ptp_root", 0,
+ RK3506_CLKGATE_CON(19), 10, GFLAGS),
+ GATE(CLK_MAC1_PTP, "clk_mac1_ptp", "clk_mac_ptp_root", 0,
+ RK3506_CLKGATE_CON(19), 11, GFLAGS),
+ COMPOSITE(ACLK_VIO_ROOT, "aclk_vio_root", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(58), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(21), 0, GFLAGS),
+ COMPOSITE(HCLK_VIO_ROOT, "hclk_vio_root", gpll_v0pll_v1pll_div_parents_p, 0,
+ RK3506_CLKSEL_CON(58), 12, 2, MFLAGS, 7, 5, DFLAGS,
+ RK3506_CLKGATE_CON(21), 1, GFLAGS),
+ GATE(PCLK_VIO_ROOT, "pclk_vio_root", "hclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 2, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 6, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 7, GFLAGS),
+ COMPOSITE(CLK_CORE_RGA, "clk_core_rga", gpll_v0pll_v1pll_g_parents_p, 0,
+ RK3506_CLKSEL_CON(59), 5, 2, MFLAGS, 0, 5, DFLAGS,
+ RK3506_CLKGATE_CON(21), 8, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 9, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 10, GFLAGS),
+ COMPOSITE(DCLK_VOP, "dclk_vop", dclk_vop_parents_p, 0,
+ RK3506_CLKSEL_CON(60), 8, 3, MFLAGS, 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(21), 11, GFLAGS),
+ GATE(PCLK_DPHY, "pclk_dphy", "pclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 12, GFLAGS),
+ GATE(PCLK_DSI_HOST, "pclk_dsi_host", "pclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 13, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_vio_root", 0,
+ RK3506_CLKGATE_CON(21), 14, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC, "clk_tsadc", "xin24m_gate", 0,
+ RK3506_CLKSEL_CON(61), 0, 8, DFLAGS,
+ RK3506_CLKGATE_CON(21), 15, GFLAGS),
+ COMPOSITE_NOMUX(CLK_TSADC_TSEN, "clk_tsadc_tsen", "xin24m_gate", 0,
+ RK3506_CLKSEL_CON(61), 8, 3, DFLAGS,
+ RK3506_CLKGATE_CON(22), 0, GFLAGS),
+ GATE(PCLK_GPIO1_IOC, "pclk_gpio1_ioc", "pclk_vio_root", CLK_IS_CRITICAL,
+ RK3506_CLKGATE_CON(22), 1, GFLAGS),
+
+ /* pmu */
+ GATE(CLK_PMU, "clk_pmu", "xin24m", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 1, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_PMU_CRU, "pclk_pmu_cru", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_PMU_GRF, "pclk_pmu_grf", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_GPIO0_IOC, "pclk_gpio0_ioc", "pclk_pmu_root", CLK_IS_CRITICAL,
+ RK3506_PMU_CLKGATE_CON(0), 7, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pmu_root", 0,
+ RK3506_PMU_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO0, "dbclk_gpio0", dbclk_gpio0_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(0), 0, 2, MFLAGS,
+ RK3506_PMU_CLKGATE_CON(0), 9, GFLAGS),
+ GATE(PCLK_GPIO1_SHADOW, "pclk_gpio1_shadow", "pclk_pmu_root", 0,
+ RK3506_PMU_CLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_GPIO1_SHADOW, "dbclk_gpio1_shadow", dbclk_gpio0_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(0), 2, 2, MFLAGS,
+ RK3506_PMU_CLKGATE_CON(0), 11, GFLAGS),
+ GATE(PCLK_PMU_HP_TIMER, "pclk_pmu_hp_timer", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 12, GFLAGS),
+ MUX(CLK_PMU_HP_TIMER, "clk_pmu_hp_timer", clk_pmu_hp_timer_parents_p, CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKSEL_CON(0), 4, 2, MFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_pmu_root", 0,
+ RK3506_PMU_CLKGATE_CON(0), 15, GFLAGS),
+ COMPOSITE_NOMUX(CLK_PWM0, "clk_pwm0", "clk_gpll_div_100m", 0,
+ RK3506_PMU_CLKSEL_CON(0), 6, 4, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 0, GFLAGS),
+ GATE(CLK_OSC_PWM0, "clk_osc_pwm0", "xin24m", 0,
+ RK3506_PMU_CLKGATE_CON(1), 1, GFLAGS),
+ GATE(CLK_RC_PWM0, "clk_rc_pwm0", "clk_rc", 0,
+ RK3506_PMU_CLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_MAC_OUT, "clk_mac_out", "gpll", 0,
+ RK3506_PMU_CLKSEL_CON(0), 10, 6, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 3, GFLAGS),
+ COMPOSITE(CLK_REF_OUT0, "clk_ref_out0", clk_ref_out_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(1), 6, 2, MFLAGS, 0, 6, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 4, GFLAGS),
+ COMPOSITE(CLK_REF_OUT1, "clk_ref_out1", clk_ref_out_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(1), 14, 2, MFLAGS, 8, 6, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 5, GFLAGS),
+ MUX(CLK_32K_FRAC_MUX, "clk_32k_frac_mux", clk_32k_frac_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(3), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_32K_FRAC, "clk_32k_frac", "clk_32k_frac_mux", 0,
+ RK3506_PMU_CLKSEL_CON(2), 0,
+ RK3506_PMU_CLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE_NOMUX(CLK_32K_RC, "clk_32k_rc", "clk_rc", CLK_IS_CRITICAL,
+ RK3506_PMU_CLKSEL_CON(3), 2, 5, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_32K, "clk_32k", clk_32k_parents_p, CLK_IS_CRITICAL,
+ RK3506_PMU_CLKSEL_CON(3), 7, 2, MFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_32K_PMU, "clk_32k_pmu", clk_32k_parents_p, CLK_IS_CRITICAL,
+ RK3506_PMU_CLKSEL_CON(3), 9, 2, MFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 9, GFLAGS),
+ GATE(CLK_PMU_32K, "clk_pmu_32k", "clk_32k_pmu", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 3, GFLAGS),
+ GATE(CLK_PMU_HP_TIMER_32K, "clk_pmu_hp_timer_32k", "clk_32k_pmu", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(0), 14, GFLAGS),
+ GATE(PCLK_TOUCH_KEY, "pclk_touch_key", "pclk_pmu_root", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(1), 12, GFLAGS),
+ GATE(CLK_TOUCH_KEY, "clk_touch_key", "xin24m", CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKGATE_CON(1), 13, GFLAGS),
+ COMPOSITE(CLK_REF_PHY_PLL, "clk_ref_phy_pll", gpll_v0pll_v1pll_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(4), 13, 2, MFLAGS, 6, 7, DFLAGS,
+ RK3506_PMU_CLKGATE_CON(1), 14, GFLAGS),
+ MUX(CLK_REF_PHY_PMU_MUX, "clk_ref_phy_pmu_mux", clk_ref_phy_pmu_mux_parents_p, 0,
+ RK3506_PMU_CLKSEL_CON(4), 15, 1, MFLAGS),
+ GATE(CLK_WIFI_OUT, "clk_wifi_out", "xin24m", 0,
+ RK3506_PMU_CLKGATE_CON(2), 0, GFLAGS),
+ MUX(CLK_V0PLL_REF, "clk_v0pll_ref", clk_vpll_ref_parents_p, CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKSEL_CON(6), 0, 1, MFLAGS),
+ MUX(CLK_V1PLL_REF, "clk_v1pll_ref", clk_vpll_ref_parents_p, CLK_IGNORE_UNUSED,
+ RK3506_PMU_CLKSEL_CON(6), 1, 1, MFLAGS),
+
+ /* secure ns */
+ GATE(CLK_CORE_CRYPTO_NS, "clk_core_crypto_ns", "clk_core_crypto", 0,
+ RK3506_CLKGATE_CON(5), 12, GFLAGS),
+ GATE(CLK_PKA_CRYPTO_NS, "clk_pka_crypto_ns", "clk_pka_crypto", 0,
+ RK3506_CLKGATE_CON(5), 13, GFLAGS),
+
+ /* io */
+ GATE(CLK_SPI2, "clk_spi2", "clk_spi2_io", 0,
+ RK3506_CLKGATE_CON(20), 0, GFLAGS),
+};
+
+static void __init rk3506_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
+ void __iomem *reg_base;
+
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3506_clk_branches,
+ ARRAY_SIZE(rk3506_clk_branches)) + 1;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rk3506_pll_clks,
+ ARRAY_SIZE(rk3506_pll_clks),
+ 0);
+
+ rockchip_clk_register_armclk_multi_pll(ctx, &rk3506_armclk,
+ rk3506_cpuclk_rates,
+ ARRAY_SIZE(rk3506_cpuclk_rates));
+
+ rockchip_clk_register_branches(ctx, rk3506_clk_branches,
+ ARRAY_SIZE(rk3506_clk_branches));
+
+ rk3506_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RK3506_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+
+ /* pvtpll src init */
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RK3506_CLKSEL_CON(15));
+}
+
+CLK_OF_DECLARE(rk3506_cru, "rockchip,rk3506-cru", rk3506_clk_init);
+
+struct clk_rk3506_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rk3506_inits clk_rk3506_cru_init = {
+ .inits = rk3506_clk_init,
+};
+
+static const struct of_device_id clk_rk3506_match_table[] = {
+ {
+ .compatible = "rockchip,rk3506-cru",
+ .data = &clk_rk3506_cru_init,
+ },
+ { }
+};
+
+static int clk_rk3506_probe(struct platform_device *pdev)
+{
+ const struct clk_rk3506_inits *init_data;
+ struct device *dev = &pdev->dev;
+
+ init_data = device_get_match_data(dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(dev->of_node);
+
+ return 0;
+}
+
+static struct platform_driver clk_rk3506_driver = {
+ .probe = clk_rk3506_probe,
+ .driver = {
+ .name = "clk-rk3506",
+ .of_match_table = clk_rk3506_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rk3506_driver, clk_rk3506_probe);
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 97d279399ae8..74eabf9b2ae2 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -1652,6 +1652,7 @@ CLK_OF_DECLARE(rk3568_cru_pmu, "rockchip,rk3568-pmucru", rk3568_pmu_clk_init);
static void __init rk3568_clk_init(struct device_node *np)
{
struct rockchip_clk_provider *ctx;
+ unsigned long clk_nr_clks;
void __iomem *reg_base;
reg_base = of_iomap(np, 0);
@@ -1660,7 +1661,9 @@ static void __init rk3568_clk_init(struct device_node *np)
return;
}
- ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rk3568_clk_branches,
+ ARRAY_SIZE(rk3568_clk_branches)) + 1;
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
if (IS_ERR(ctx)) {
pr_err("%s: rockchip clk init failed\n", __func__);
iounmap(reg_base);
diff --git a/drivers/clk/rockchip/clk-rv1126b.c b/drivers/clk/rockchip/clk-rv1126b.c
new file mode 100644
index 000000000000..3e27bfc14854
--- /dev/null
+++ b/drivers/clk/rockchip/clk-rv1126b.c
@@ -0,0 +1,1117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/syscore_ops.h>
+#include <dt-bindings/clock/rockchip,rv1126b-cru.h>
+#include "clk.h"
+
+#define RV1126B_FRAC_MAX_PRATE 1200000000
+
+#define PVTPLL_SRC_SEL_PVTPLL (BIT(0) | BIT(16))
+
+enum rv1126b_plls {
+ gpll, cpll, aupll, dpll
+};
+
+static struct rockchip_pll_rate_table rv1126b_pll_rates[] = {
+ /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
+ RK3036_PLL_RATE(1200000000, 1, 100, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1188000000, 1, 99, 2, 1, 1, 0),
+ RK3036_PLL_RATE(1179648000, 1, 49, 1, 1, 0, 2550137),
+ RK3036_PLL_RATE(1000000000, 3, 250, 2, 1, 1, 0),
+ RK3036_PLL_RATE(993484800, 1, 41, 1, 1, 0, 6630355),
+ RK3036_PLL_RATE(983040000, 1, 40, 1, 1, 0, 16106127),
+ RK3036_PLL_RATE(903168000, 1, 75, 2, 1, 0, 4429185),
+ { /* sentinel */ },
+};
+
+#define RV1126B_DIV_ACLK_CORE_MASK 0x1f
+#define RV1126B_DIV_ACLK_CORE_SHIFT 0
+#define RV1126B_DIV_PCLK_CORE_MASK 0x1f
+#define RV1126B_DIV_PCLK_CORE_SHIFT 8
+#define RV1126B_CORE_SEL_MASK 0x1
+#define RV1126B_CORE_SEL_SHIFT 1
+
+#define RV1126B_CLKSEL0(_aclk_core) \
+{ \
+ .reg = RV1126B_CORECLKSEL_CON(2), \
+ .val = HIWORD_UPDATE(_aclk_core - 1, RV1126B_DIV_ACLK_CORE_MASK, \
+ RV1126B_DIV_ACLK_CORE_SHIFT), \
+}
+
+#define RV1126B_CLKSEL1(_pclk_dbg) \
+{ \
+ .reg = RV1126B_CORECLKSEL_CON(2), \
+ .val = HIWORD_UPDATE(_pclk_dbg - 1, RV1126B_DIV_PCLK_CORE_MASK, \
+ RV1126B_DIV_PCLK_CORE_SHIFT), \
+}
+
+#define RV1126B_CPUCLK_RATE(_prate, _aclk_core, _pclk_dbg) \
+{ \
+ .prate = _prate, \
+ .divs = { \
+ RV1126B_CLKSEL0(_aclk_core), \
+ RV1126B_CLKSEL1(_pclk_dbg), \
+ }, \
+}
+
+static struct rockchip_cpuclk_rate_table rv1126b_cpuclk_rates[] __initdata = {
+ RV1126B_CPUCLK_RATE(1608000000, 4, 10),
+ RV1126B_CPUCLK_RATE(1512000000, 4, 10),
+ RV1126B_CPUCLK_RATE(1416000000, 4, 10),
+ RV1126B_CPUCLK_RATE(1296000000, 3, 10),
+ RV1126B_CPUCLK_RATE(1200000000, 3, 10),
+ RV1126B_CPUCLK_RATE(1188000000, 3, 8),
+ RV1126B_CPUCLK_RATE(1104000000, 2, 8),
+ RV1126B_CPUCLK_RATE(1008000000, 2, 8),
+ RV1126B_CPUCLK_RATE(816000000, 2, 6),
+ RV1126B_CPUCLK_RATE(600000000, 2, 4),
+ RV1126B_CPUCLK_RATE(594000000, 2, 4),
+ RV1126B_CPUCLK_RATE(408000000, 1, 3),
+ RV1126B_CPUCLK_RATE(396000000, 1, 3),
+};
+
+PNAME(mux_pll_p) = { "xin24m" };
+PNAME(mux_gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(mux_gpll_aupll_p) = { "gpll", "aupll" };
+PNAME(mux_gpll_aupll_cpll_p) = { "gpll", "aupll", "cpll" };
+PNAME(mux_gpll_cpll_24m_p) = { "gpll", "cpll", "xin24m" };
+PNAME(mux_cpll_24m_p) = { "cpll", "xin24m" };
+PNAME(mux_24m_gpll_aupll_cpll_p) = { "xin24m", "gpll", "aupll", "cpll" };
+PNAME(mux_24m_gpll_cpll_p) = { "xin24m", "gpll", "cpll" };
+PNAME(mux_24m_gpll_aupll_p) = { "xin24m", "gpll", "aupll" };
+PNAME(mux_sclk_uart_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_uart_frac0", "clk_uart_frac1" };
+PNAME(mclk_sai0_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_audio_frac0", "clk_audio_frac1",
+ "clk_audio_int0", "clk_audio_int1",
+ "mclk_sai0_from_io" };
+PNAME(mclk_sai1_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_audio_frac0", "clk_audio_frac1",
+ "clk_audio_int0", "clk_audio_int1",
+ "mclk_sai1_from_io" };
+PNAME(mclk_sai2_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_audio_frac0", "clk_audio_frac1",
+ "clk_audio_int0", "clk_audio_int1",
+ "mclk_sai2_from_io" };
+PNAME(mux_sai_src_p) = { "xin24m", "clk_cm_frac0", "clk_cm_frac1",
+ "clk_cm_frac2", "clk_audio_frac0", "clk_audio_frac1",
+ "clk_audio_int0", "clk_audio_int1", "mclk_sai0_from_io",
+ "mclk_sai1_from_io", "mclk_sai2_from_io"};
+PNAME(mux_100m_24m_p) = { "clk_cpll_div10", "xin24m" };
+PNAME(mux_200m_24m_p) = { "clk_gpll_div6", "xin24m" };
+PNAME(mux_500m_400m_200m_p) = { "clk_cpll_div2", "clk_gpll_div3", "clk_gpll_div6" };
+PNAME(mux_300m_200m_p) = { "clk_gpll_div4", "clk_gpll_div6" };
+PNAME(mux_500m_400m_300m_p) = { "clk_cpll_div2", "clk_gpll_div3", "clk_gpll_div4" };
+PNAME(mux_333m_200m_p) = { "clk_cpll_div3", "clk_gpll_div6" };
+PNAME(mux_600m_400m_200m_p) = { "clk_gpll_div2", "clk_gpll_div3", "clk_gpll_div6" };
+PNAME(mux_400m_300m_200m_p) = { "clk_gpll_div3", "clk_gpll_div4", "clk_gpll_div6" };
+PNAME(mux_200m_100m_p) = { "clk_gpll_div6", "clk_cpll_div10" };
+PNAME(mux_200m_100m_50m_24m_p) = { "clk_gpll_div6", "clk_cpll_div10", "clk_cpll_div20",
+ "xin24m" };
+PNAME(mux_600m_24m_p) = { "clk_gpll_div2", "xin24m" };
+PNAME(mux_armclk_p) = { "clk_core_pll", "clk_core_pvtpll" };
+PNAME(aclk_npu_root_p) = { "clk_npu_pll", "clk_npu_pvtpll" };
+PNAME(clk_saradc0_p) = { "clk_saradc0_src", "clk_saradc0_rcosc_io" };
+PNAME(clk_core_vepu_p) = { "clk_vepu_pll", "clk_vepu_pvtpll" };
+PNAME(clk_core_fec_p) = { "clk_core_fec_src", "clk_vcp_pvtpll" };
+PNAME(clk_core_aisp_p) = { "clk_aisp_pll", "clk_vcp_pvtpll" };
+PNAME(clk_core_isp_root_p) = { "clk_isp_pll", "clk_isp_pvtpll" };
+PNAME(clk_gmac_ptp_ref_p) = { "clk_gmac_ptp_ref_src", "clk_gmac_ptp_from_io" };
+PNAME(clk_saradc1_p) = { "clk_saradc1_src", "clk_saradc1_rcosc_io" };
+PNAME(clk_saradc2_p) = { "clk_saradc2_src", "clk_saradc2_rcosc_io" };
+PNAME(clk_rcosc_src_p) = { "xin24m", "clk_rcosc", "clk_rcosc_div2",
+ "clk_rcosc_div3", "clk_rcosc_div4" };
+PNAME(busclk_pmu_mux_p) = { "clk_cpll_div10", "clk_rcosc_src" };
+PNAME(clk_xin_rc_div_p) = { "xin24m", "clk_rcosc_src" };
+PNAME(clk_32k_p) = { "clk_xin_rc_div", "clk_32k_rtc", "clk_32k_io" };
+PNAME(mux_24m_32k_p) = { "xin24m", "clk_32k" };
+PNAME(mux_24m_rcosc_buspmu_p) = { "xin24m", "clk_rcosc_src", "busclk_pmu_src" };
+PNAME(mux_24m_rcosc_buspmu_32k_p) = { "xin24m", "clk_rcosc_src", "busclk_pmu_src",
+ "clk_32k" };
+PNAME(sclk_uart0_p) = { "sclk_uart0_src", "xin24m", "clk_rcosc_src" };
+PNAME(clk_osc_rcosc_ctrl_p) = { "clk_rcosc_src", "clk_testout_out" };
+PNAME(lrck_src_asrc_p) = { "mclk_asrc0", "mclk_asrc1", "mclk_asrc2", "mclk_asrc3",
+ "fs_inter_from_sai0", "fs_inter_from_sai1",
+ "fs_inter_from_sai2", "clkout_pdm"};
+PNAME(clk_ref_pipephy_p) = { "clk_ref_pipephy_cpll_src", "xin24m" };
+PNAME(clk_timer0_parents_p) = { "clk_timer_root", "mclk_sai0_from_io",
+ "sclk_sai0_from_io" };
+PNAME(clk_timer1_parents_p) = { "clk_timer_root", "mclk_sai1_from_io",
+ "sclk_sai1_from_io" };
+PNAME(clk_timer2_parents_p) = { "clk_timer_root", "mclk_sai2_from_io",
+ "sclk_sai2_from_io" };
+PNAME(clk_timer3_parents_p) = { "clk_timer_root", "mclk_asrc0", "mclk_asrc1" };
+PNAME(clk_timer4_parents_p) = { "clk_timer_root", "mclk_asrc2", "mclk_asrc3" };
+PNAME(clk_macphy_p) = { "xin24m", "clk_cpll_div20" };
+PNAME(clk_cpll_div10_p) = { "gpll", "clk_aisp_pll_src" };
+
+static struct rockchip_pll_clock rv1126b_pll_clks[] __initdata = {
+ [gpll] = PLL(pll_rk3328, PLL_GPLL, "gpll", mux_pll_p,
+ CLK_IS_CRITICAL, RV1126B_PLL_CON(8),
+ RV1126B_MODE_CON, 2, 10, 0, rv1126b_pll_rates),
+ [aupll] = PLL(pll_rk3328, PLL_AUPLL, "aupll", mux_pll_p,
+ CLK_IS_CRITICAL, RV1126B_PLL_CON(0),
+ RV1126B_MODE_CON, 0, 10, 0, rv1126b_pll_rates),
+ [cpll] = PLL(pll_rk3328, PLL_CPLL, "cpll", mux_pll_p,
+ CLK_IS_CRITICAL, RV1126B_PERIPLL_CON(0),
+ RV1126B_MODE_CON, 4, 10, 0, rv1126b_pll_rates),
+ [dpll] = PLL(pll_rk3328, 0, "dpll", mux_pll_p,
+ CLK_IS_CRITICAL, RV1126B_SUBDDRPLL_CON(0),
+ RV1126B_MODE_CON, 2, 10, 0, rv1126b_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rv1126b_rcdiv_pmu_fracmux __initdata =
+ MUX(CLK_32K, "clk_32k", clk_32k_p, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
+ RV1126B_PMUCLKSEL_CON(2), 1, 2, MFLAGS);
+
+static struct rockchip_clk_branch rv1126b_clk_branches[] __initdata = {
+
+ FACTOR(0, "clk_rcosc_div2", "clk_rcosc", 0, 1, 2),
+ FACTOR(0, "clk_rcosc_div3", "clk_rcosc", 0, 1, 3),
+ FACTOR(0, "clk_rcosc_div4", "clk_rcosc", 0, 1, 4),
+
+ /* Clock Definition */
+ COMPOSITE_NODIV(CLK_AISP_PLL_SRC, "clk_aisp_pll_src", mux_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(62), 4, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(5), 4, GFLAGS),
+ DIV(CLK_AISP_PLL, "clk_aisp_pll", "clk_aisp_pll_src", 0,
+ RV1126B_CLKSEL_CON(62), 0, 3, DFLAGS),
+
+ COMPOSITE(CLK_CPLL_DIV10, "clk_cpll_div10", clk_cpll_div10_p, 0,
+ RV1126B_CLKSEL_CON(1), 15, 1, MFLAGS, 5, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 1, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPLL_DIV20, "clk_cpll_div20", "cpll", 0,
+ RV1126B_CLKSEL_CON(1), 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPLL_DIV8, "clk_cpll_div8", "cpll", 0,
+ RV1126B_CLKSEL_CON(1), 10, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 2, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV8, "clk_gpll_div8", "gpll", 0,
+ RV1126B_CLKSEL_CON(2), 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV6, "clk_gpll_div6", "gpll", 0,
+ RV1126B_CLKSEL_CON(2), 5, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 4, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV4, "clk_gpll_div4", "gpll", 0,
+ RV1126B_CLKSEL_CON(2), 10, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 5, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPLL_DIV3, "clk_cpll_div3", "cpll", 0,
+ RV1126B_CLKSEL_CON(3), 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 6, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV3, "clk_gpll_div3", "gpll", 0,
+ RV1126B_CLKSEL_CON(3), 5, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CPLL_DIV2, "clk_cpll_div2", "cpll", 0,
+ RV1126B_CLKSEL_CON(3), 10, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 8, GFLAGS),
+ COMPOSITE_NOMUX(CLK_GPLL_DIV2, "clk_gpll_div2", "gpll", 0,
+ RV1126B_CLKSEL_CON(4), 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(0), 9, GFLAGS),
+ MUX(CLK_CM_FRAC0_SRC, "clk_cm_frac0_src", mux_24m_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 0, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_CM_FRAC0, "clk_cm_frac0", "clk_cm_frac0_src", 0,
+ RV1126B_CLKSEL_CON(25), 0,
+ RV1126B_CLKGATE_CON(1), 0, GFLAGS),
+ MUX(CLK_CM_FRAC1_SRC, "clk_cm_frac1_src", mux_24m_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 2, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_CM_FRAC1, "clk_cm_frac1", "clk_cm_frac1_src", 0,
+ RV1126B_CLKSEL_CON(26), 0,
+ RV1126B_CLKGATE_CON(1), 1, GFLAGS),
+ MUX(CLK_CM_FRAC2_SRC, "clk_cm_frac2_src", mux_24m_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 4, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_CM_FRAC2, "clk_cm_frac2", "clk_cm_frac2_src", 0,
+ RV1126B_CLKSEL_CON(27), 0,
+ RV1126B_CLKGATE_CON(1), 2, GFLAGS),
+ MUX(CLK_UART_FRAC0_SRC, "clk_uart_frac0_src", mux_24m_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 6, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC0, "clk_uart_frac0", "clk_uart_frac0_src", 0,
+ RV1126B_CLKSEL_CON(28), 0,
+ RV1126B_CLKGATE_CON(1), 3, GFLAGS),
+ MUX(CLK_UART_FRAC1_SRC, "clk_uart_frac1_src", mux_24m_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(10), 8, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_UART_FRAC1, "clk_uart_frac1", "clk_uart_frac1_src", 0,
+ RV1126B_CLKSEL_CON(29), 0,
+ RV1126B_CLKGATE_CON(1), 4, GFLAGS),
+ MUX(CLK_AUDIO_FRAC0_SRC, "clk_audio_frac0_src", mux_24m_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(10), 10, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC0, "clk_audio_frac0", "clk_audio_frac0_src", 0,
+ RV1126B_CLKSEL_CON(30), 0,
+ RV1126B_CLKGATE_CON(1), 5, GFLAGS),
+ MUX(CLK_AUDIO_FRAC1_SRC, "clk_audio_frac1_src", mux_24m_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(10), 12, 2, MFLAGS),
+ COMPOSITE_FRAC(CLK_AUDIO_FRAC1, "clk_audio_frac1", "clk_audio_frac1_src", 0,
+ RV1126B_CLKSEL_CON(31), 0,
+ RV1126B_CLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE(CLK_AUDIO_INT0, "clk_audio_int0", mux_24m_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 7, GFLAGS),
+ COMPOSITE(CLK_AUDIO_INT1, "clk_audio_int1", mux_24m_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(11), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE(SCLK_UART0_SRC, "sclk_uart0_src", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(12), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 9, GFLAGS),
+ COMPOSITE(SCLK_UART1, "sclk_uart1", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(12), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 10, GFLAGS),
+ COMPOSITE(SCLK_UART2, "sclk_uart2", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(13), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 11, GFLAGS),
+ COMPOSITE(SCLK_UART3, "sclk_uart3", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(13), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 12, GFLAGS),
+ COMPOSITE(SCLK_UART4, "sclk_uart4", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(14), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 13, GFLAGS),
+ COMPOSITE(SCLK_UART5, "sclk_uart5", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(14), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(1), 14, GFLAGS),
+ COMPOSITE(SCLK_UART6, "sclk_uart6", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(15), 5, 3, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE(SCLK_UART7, "sclk_uart7", mux_sclk_uart_src_p, 0,
+ RV1126B_CLKSEL_CON(15), 13, 3, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 1, GFLAGS),
+ COMPOSITE(MCLK_SAI0, "mclk_sai0", mclk_sai0_src_p, 0,
+ RV1126B_CLKSEL_CON(16), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 2, GFLAGS),
+ COMPOSITE(MCLK_SAI1, "mclk_sai1", mclk_sai1_src_p, 0,
+ RV1126B_CLKSEL_CON(17), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 3, GFLAGS),
+ COMPOSITE(MCLK_SAI2, "mclk_sai2", mclk_sai2_src_p, 0,
+ RV1126B_CLKSEL_CON(18), 8, 4, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 4, GFLAGS),
+ COMPOSITE(MCLK_PDM, "mclk_pdm", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(19), 6, 4, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE_NOGATE(0, "clkout_pdm_src", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(20), 8, 4, MFLAGS, 0, 8, DFLAGS),
+ GATE(CLKOUT_PDM, "clkout_pdm", "clkout_pdm_src", 0,
+ RV1126B_CLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC0, "mclk_asrc0", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(16), 12, 4, MFLAGS,
+ RV1126B_CLKGATE_CON(2), 7, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC1, "mclk_asrc1", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(17), 12, 4, MFLAGS,
+ RV1126B_CLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC2, "mclk_asrc2", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(18), 12, 4, MFLAGS,
+ RV1126B_CLKGATE_CON(2), 9, GFLAGS),
+ COMPOSITE_NODIV(MCLK_ASRC3, "mclk_asrc3", mux_sai_src_p, 0,
+ RV1126B_CLKSEL_CON(19), 12, 4, MFLAGS,
+ RV1126B_CLKGATE_CON(2), 10, GFLAGS),
+ COMPOSITE(CLK_ASRC0, "clk_asrc0", mux_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(21), 6, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 11, GFLAGS),
+ COMPOSITE(CLK_ASRC1, "clk_asrc1", mux_gpll_aupll_p, 0,
+ RV1126B_CLKSEL_CON(21), 14, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(2), 12, GFLAGS),
+ COMPOSITE_NOMUX(CLK_CORE_PLL, "clk_core_pll", "gpll", CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(60), 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 0, GFLAGS),
+ COMPOSITE_NOMUX(CLK_NPU_PLL, "clk_npu_pll", "gpll", CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(60), 6, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 1, GFLAGS),
+ COMPOSITE(CLK_VEPU_PLL, "clk_vepu_pll", mux_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(61), 4, 2, MFLAGS, 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 2, GFLAGS),
+ COMPOSITE(CLK_ISP_PLL, "clk_isp_pll", mux_gpll_aupll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(61), 10, 2, MFLAGS, 6, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 3, GFLAGS),
+ COMPOSITE(CLK_SARADC0_SRC, "clk_saradc0_src", mux_200m_24m_p, 0,
+ RV1126B_CLKSEL_CON(63), 12, 1, MFLAGS, 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 6, GFLAGS),
+ COMPOSITE(CLK_SARADC1_SRC, "clk_saradc1_src", mux_200m_24m_p, 0,
+ RV1126B_CLKSEL_CON(63), 13, 1, MFLAGS, 4, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 7, GFLAGS),
+ COMPOSITE(CLK_SARADC2_SRC, "clk_saradc2_src", mux_200m_24m_p, 0,
+ RV1126B_CLKSEL_CON(63), 14, 1, MFLAGS, 8, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(5), 8, GFLAGS),
+ GATE(HCLK_RKNN, "hclk_rknn", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(5), 10, GFLAGS),
+ GATE(PCLK_NPU_ROOT, "pclk_npu_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(5), 11, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VEPU_ROOT, "aclk_vepu_root", mux_500m_400m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(40), 0, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(5), 12, GFLAGS),
+ GATE(HCLK_VEPU_ROOT, "hclk_vepu_root", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(PCLK_VEPU_ROOT, "pclk_vepu_root", "clk_cpll_div10", 0,
+ RV1126B_CLKGATE_CON(5), 14, GFLAGS),
+ COMPOSITE(CLK_CORE_RGA_SRC, "clk_core_rga_src", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(40), 5, 1, MFLAGS, 2, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 0, GFLAGS),
+ COMPOSITE_NODIV(ACLK_GMAC_ROOT, "aclk_gmac_root", mux_300m_200m_p, 0,
+ RV1126B_CLKSEL_CON(40), 6, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(6), 1, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VI_ROOT, "aclk_vi_root", mux_500m_400m_300m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(40), 7, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(6), 2, GFLAGS),
+ GATE(HCLK_VI_ROOT, "hclk_vi_root", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(6), 3, GFLAGS),
+ GATE(PCLK_VI_ROOT, "pclk_vi_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(6), 4, GFLAGS),
+ COMPOSITE_NODIV(DCLK_VICAP_ROOT, "dclk_vicap_root", mux_333m_200m_p, 0,
+ RV1126B_CLKSEL_CON(42), 5, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(6), 5, GFLAGS),
+ COMPOSITE(CLK_SYS_DSMC_ROOT, "clk_sys_dsmc_root", mux_24m_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(40), 14, 2, MFLAGS, 9, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 6, GFLAGS),
+ COMPOSITE(ACLK_VDO_ROOT, "aclk_vdo_root", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(42), 4, 1, MFLAGS, 0, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 7, GFLAGS),
+ COMPOSITE(ACLK_RKVDEC_ROOT, "aclk_rkvdec_root", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(42), 10, 1, MFLAGS, 6, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 8, GFLAGS),
+ GATE(HCLK_VDO_ROOT, "hclk_vdo_root", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(6), 9, GFLAGS),
+ GATE(PCLK_VDO_ROOT, "pclk_vdo_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(6), 10, GFLAGS),
+ COMPOSITE(DCLK_VOP, "dclk_vop", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(43), 8, 1, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 12, GFLAGS),
+ COMPOSITE(DCLK_OOC_SRC, "dclk_ooc_src", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(62), 7, 1, MFLAGS, 8, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(6), 13, GFLAGS),
+ GATE(DCLK_DECOM_SRC, "dclk_decom_src", "clk_gpll_div3", 0,
+ RV1126B_CLKGATE_CON(6), 14, GFLAGS),
+ GATE(PCLK_DDR_ROOT, "pclk_ddr_root", "clk_cpll_div10", 0,
+ RV1126B_CLKGATE_CON(7), 0, GFLAGS),
+ COMPOSITE(ACLK_SYSMEM, "aclk_sysmem", mux_gpll_cpll_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(44), 3, 1, MFLAGS, 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 1, GFLAGS),
+ COMPOSITE_NODIV(ACLK_TOP_ROOT, "aclk_top_root", mux_600m_400m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(44), 6, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(7), 3, GFLAGS),
+ COMPOSITE_NODIV(ACLK_BUS_ROOT, "aclk_bus_root", mux_400m_300m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(44), 8, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(7), 4, GFLAGS),
+ COMPOSITE_NODIV(HCLK_BUS_ROOT, "hclk_bus_root", mux_200m_100m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(44), 10, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(7), 5, GFLAGS),
+ GATE(PCLK_BUS_ROOT, "pclk_bus_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(7), 6, GFLAGS),
+ COMPOSITE(CCLK_SDMMC0, "cclk_sdmmc0", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(45), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 7, GFLAGS),
+ COMPOSITE(CCLK_SDMMC1, "cclk_sdmmc1", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 8, GFLAGS),
+ COMPOSITE(CCLK_EMMC, "cclk_emmc", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(47), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 9, GFLAGS),
+ COMPOSITE(SCLK_2X_FSPI0, "sclk_2x_fspi0", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(48), 8, 2, MFLAGS, 0, 8, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 10, GFLAGS),
+ COMPOSITE(CLK_GMAC_PTP_REF_SRC, "clk_gmac_ptp_ref_src", mux_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(45), 10, 1, MFLAGS, 11, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(7), 11, GFLAGS),
+ GATE(CLK_GMAC_125M, "clk_gmac_125m", "clk_cpll_div8", 0,
+ RV1126B_CLKGATE_CON(7), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER_ROOT, "clk_timer_root", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 11, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(7), 13, GFLAGS),
+ COMPOSITE_NODIV(TCLK_WDT_NS_SRC, "tclk_wdt_ns_src", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 12, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 0, GFLAGS),
+ COMPOSITE_NODIV(TCLK_WDT_S_SRC, "tclk_wdt_s_src", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 13, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 1, GFLAGS),
+ COMPOSITE_NODIV(TCLK_WDT_HPMCU, "tclk_wdt_hpmcu", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(46), 14, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 2, GFLAGS),
+ COMPOSITE(CLK_CAN0, "clk_can0", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(49), 6, 2, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(8), 4, GFLAGS),
+ COMPOSITE(CLK_CAN1, "clk_can1", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(49), 14, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(8), 5, GFLAGS),
+ COMPOSITE_NODIV(PCLK_PERI_ROOT, "pclk_peri_root", mux_100m_24m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(47), 12, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 6, GFLAGS),
+ COMPOSITE_NODIV(ACLK_PERI_ROOT, "aclk_peri_root", mux_200m_24m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(47), 13, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_I2C_BUS_SRC, "clk_i2c_bus_src", mux_200m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 1, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 9, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI0, "clk_spi0", mux_200m_100m_50m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 2, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 10, GFLAGS),
+ COMPOSITE_NODIV(CLK_SPI1, "clk_spi1", mux_200m_100m_50m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 4, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(8), 11, GFLAGS),
+ GATE(BUSCLK_PMU_SRC, "busclk_pmu_src", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(8), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM0, "clk_pwm0", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 8, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM2, "clk_pwm2", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 10, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_PWM3, "clk_pwm3", mux_100m_24m_p, 0,
+ RV1126B_CLKSEL_CON(50), 11, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 3, GFLAGS),
+ COMPOSITE_NODIV(CLK_PKA_RKCE_SRC, "clk_pka_rkce_src", mux_300m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(50), 12, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 4, GFLAGS),
+ COMPOSITE_NODIV(ACLK_RKCE_SRC, "aclk_rkce_src", mux_200m_24m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(50), 13, 1, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 5, GFLAGS),
+ COMPOSITE_NODIV(ACLK_VCP_ROOT, "aclk_vcp_root", mux_500m_400m_200m_p, CLK_IS_CRITICAL,
+ RV1126B_CLKSEL_CON(48), 12, 2, MFLAGS,
+ RV1126B_CLKGATE_CON(9), 6, GFLAGS),
+ GATE(HCLK_VCP_ROOT, "hclk_vcp_root", "clk_gpll_div8", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(9), 7, GFLAGS),
+ GATE(PCLK_VCP_ROOT, "pclk_vcp_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(9), 8, GFLAGS),
+ COMPOSITE(CLK_CORE_FEC_SRC, "clk_core_fec_src", mux_gpll_cpll_p, 0,
+ RV1126B_CLKSEL_CON(51), 3, 1, MFLAGS, 0, 3, DFLAGS,
+ RV1126B_CLKGATE_CON(9), 9, GFLAGS),
+ GATE(CLK_50M_GMAC_IOBUF_VI, "clk_50m_gmac_iobuf_vi", "clk_cpll_div20", 0,
+ RV1126B_CLKGATE_CON(9), 11, GFLAGS),
+ GATE(PCLK_TOP_ROOT, "pclk_top_root", "clk_cpll_div10", CLK_IS_CRITICAL,
+ RV1126B_CLKGATE_CON(15), 0, GFLAGS),
+ COMPOSITE(CLK_MIPI0_OUT2IO, "clk_mipi0_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(67), 11, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 3, GFLAGS),
+ COMPOSITE(CLK_MIPI1_OUT2IO, "clk_mipi1_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(67), 12, 1, MFLAGS, 6, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 4, GFLAGS),
+ COMPOSITE(CLK_MIPI2_OUT2IO, "clk_mipi2_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(68), 11, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 5, GFLAGS),
+ COMPOSITE(CLK_MIPI3_OUT2IO, "clk_mipi3_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(68), 12, 1, MFLAGS, 6, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 6, GFLAGS),
+ COMPOSITE(CLK_CIF_OUT2IO, "clk_cif_out2io", mux_600m_24m_p, 0,
+ RV1126B_CLKSEL_CON(69), 5, 1, MFLAGS, 0, 5, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 7, GFLAGS),
+ COMPOSITE(CLK_MAC_OUT2IO, "clk_mac_out2io", mux_gpll_cpll_24m_p, 0,
+ RV1126B_CLKSEL_CON(69), 6, 2, MFLAGS, 8, 7, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 8, GFLAGS),
+ COMPOSITE_NOMUX(MCLK_SAI0_OUT2IO, "mclk_sai0_out2io", "mclk_sai0", CLK_SET_RATE_PARENT,
+ RV1126B_CLKSEL_CON(70), 0, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 9, GFLAGS),
+ COMPOSITE_NOMUX(MCLK_SAI1_OUT2IO, "mclk_sai1_out2io", "mclk_sai1", CLK_SET_RATE_PARENT,
+ RV1126B_CLKSEL_CON(70), 5, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 10, GFLAGS),
+ COMPOSITE_NOMUX(MCLK_SAI2_OUT2IO, "mclk_sai2_out2io", "mclk_sai2", CLK_SET_RATE_PARENT,
+ RV1126B_CLKSEL_CON(70), 10, 4, DFLAGS,
+ RV1126B_CLKGATE_CON(15), 11, GFLAGS),
+
+ /* pd _npu */
+ MUX(ACLK_RKNN, "aclk_rknn", aclk_npu_root_p, CLK_SET_RATE_PARENT,
+ RV1126B_NPUCLKSEL_CON(0), 1, 1, MFLAGS),
+
+ /* pd_vepu */
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(0), 7, GFLAGS),
+ GATE(DBCLK_GPIO3, "dbclk_gpio3", "xin24m", 0,
+ RV1126B_VEPUCLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_IOC_VCCIO3, "pclk_ioc_vccio3", "pclk_vepu_root", CLK_IS_CRITICAL,
+ RV1126B_VEPUCLKGATE_CON(0), 9, GFLAGS),
+ GATE(PCLK_SARADC0, "pclk_saradc0", "pclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(0), 10, GFLAGS),
+ MUX(CLK_SARADC0, "clk_saradc0", clk_saradc0_p, CLK_SET_RATE_PARENT,
+ RV1126B_VEPUCLKSEL_CON(0), 2, 1, MFLAGS),
+ GATE(HCLK_SDMMC1, "hclk_sdmmc1", "hclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(0), 12, GFLAGS),
+ GATE(HCLK_VEPU, "hclk_vepu", "hclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(1), 1, GFLAGS),
+ GATE(ACLK_VEPU, "aclk_vepu", "aclk_vepu_root", 0,
+ RV1126B_VEPUCLKGATE_CON(1), 2, GFLAGS),
+ COMPOSITE_NODIV(CLK_CORE_VEPU, "clk_core_vepu", clk_core_vepu_p, CLK_SET_RATE_PARENT,
+ RV1126B_VEPUCLKSEL_CON(0), 1, 1, MFLAGS,
+ RV1126B_VEPUCLKGATE_CON(1), 3, GFLAGS),
+
+ /* pd_vcp */
+ GATE(HCLK_FEC, "hclk_fec", "hclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(1), 0, GFLAGS),
+ GATE(ACLK_FEC, "aclk_fec", "aclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(1), 1, GFLAGS),
+ COMPOSITE_NODIV(CLK_CORE_FEC, "clk_core_fec", clk_core_fec_p, CLK_SET_RATE_PARENT,
+ RV1126B_VCPCLKSEL_CON(0), 13, 1, MFLAGS,
+ RV1126B_VCPCLKGATE_CON(1), 2, GFLAGS),
+ GATE(HCLK_AVSP, "hclk_avsp", "hclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(1), 3, GFLAGS),
+ GATE(ACLK_AVSP, "aclk_avsp", "aclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(1), 4, GFLAGS),
+ GATE(HCLK_AISP, "hclk_aisp", "hclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(0), 11, GFLAGS),
+ GATE(ACLK_AISP, "aclk_aisp", "aclk_vcp_root", 0,
+ RV1126B_VCPCLKGATE_CON(0), 12, GFLAGS),
+ COMPOSITE_NODIV(CLK_CORE_AISP, "clk_core_aisp", clk_core_aisp_p, CLK_SET_RATE_PARENT,
+ RV1126B_VCPCLKSEL_CON(0), 15, 1, MFLAGS,
+ RV1126B_VCPCLKGATE_CON(0), 13, GFLAGS),
+
+ /* pd_vi */
+ MUX(CLK_CORE_ISP_ROOT, "clk_core_isp_root", clk_core_isp_root_p, CLK_SET_RATE_PARENT,
+ RV1126B_VICLKSEL_CON(0), 1, 1, MFLAGS),
+ GATE(PCLK_DSMC, "pclk_dsmc", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(0), 8, GFLAGS),
+ GATE(ACLK_DSMC, "aclk_dsmc", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(0), 9, GFLAGS),
+ GATE(HCLK_CAN0, "hclk_can0", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(0), 10, GFLAGS),
+ GATE(HCLK_CAN1, "hclk_can1", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(0), 11, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 0, GFLAGS),
+ GATE(DBCLK_GPIO2, "dbclk_gpio2", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 1, GFLAGS),
+ GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 2, GFLAGS),
+ GATE(DBCLK_GPIO4, "dbclk_gpio4", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 4, GFLAGS),
+ GATE(DBCLK_GPIO5, "dbclk_gpio5", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 5, GFLAGS),
+ GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 6, GFLAGS),
+ GATE(DBCLK_GPIO6, "dbclk_gpio6", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 7, GFLAGS),
+ GATE(PCLK_GPIO7, "pclk_gpio7", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(1), 8, GFLAGS),
+ GATE(DBCLK_GPIO7, "dbclk_gpio7", "xin24m", 0,
+ RV1126B_VICLKGATE_CON(1), 9, GFLAGS),
+ GATE(PCLK_IOC_VCCIO2, "pclk_ioc_vccio2", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_IOC_VCCIO4, "pclk_ioc_vccio4", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 11, GFLAGS),
+ GATE(PCLK_IOC_VCCIO5, "pclk_ioc_vccio5", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_IOC_VCCIO6, "pclk_ioc_vccio6", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 13, GFLAGS),
+ GATE(PCLK_IOC_VCCIO7, "pclk_ioc_vccio7", "pclk_vi_root", CLK_IS_CRITICAL,
+ RV1126B_VICLKGATE_CON(1), 14, GFLAGS),
+ GATE(HCLK_ISP, "hclk_isp", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 0, GFLAGS),
+ GATE(ACLK_ISP, "aclk_isp", "aclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 1, GFLAGS),
+ GATE(CLK_CORE_ISP, "clk_core_isp", "clk_core_isp_root", 0,
+ RV1126B_VICLKGATE_CON(2), 2, GFLAGS),
+ GATE(HCLK_VICAP, "hclk_vicap", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 3, GFLAGS),
+ GATE(ACLK_VICAP, "aclk_vicap", "aclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 4, GFLAGS),
+ GATE(DCLK_VICAP, "dclk_vicap", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(2), 5, GFLAGS),
+ GATE(ISP0CLK_VICAP, "isp0clk_vicap", "clk_core_isp_root", 0,
+ RV1126B_VICLKGATE_CON(2), 6, GFLAGS),
+ GATE(HCLK_VPSS, "hclk_vpss", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 7, GFLAGS),
+ GATE(ACLK_VPSS, "aclk_vpss", "aclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 8, GFLAGS),
+ GATE(CLK_CORE_VPSS, "clk_core_vpss", "clk_core_isp_root", 0,
+ RV1126B_VICLKGATE_CON(2), 9, GFLAGS),
+ GATE(HCLK_VPSL, "hclk_vpsl", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 10, GFLAGS),
+ GATE(ACLK_VPSL, "aclk_vpsl", "aclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(2), 11, GFLAGS),
+ GATE(CLK_CORE_VPSL, "clk_core_vpsl", "clk_core_isp_root", 0,
+ RV1126B_VICLKGATE_CON(2), 12, GFLAGS),
+ GATE(PCLK_CSI2HOST0, "pclk_csi2host0", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 0, GFLAGS),
+ GATE(DCLK_CSI2HOST0, "dclk_csi2host0", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(3), 1, GFLAGS),
+ GATE(PCLK_CSI2HOST1, "pclk_csi2host1", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 2, GFLAGS),
+ GATE(DCLK_CSI2HOST1, "dclk_csi2host1", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(3), 3, GFLAGS),
+ GATE(PCLK_CSI2HOST2, "pclk_csi2host2", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 4, GFLAGS),
+ GATE(DCLK_CSI2HOST2, "dclk_csi2host2", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(3), 5, GFLAGS),
+ GATE(PCLK_CSI2HOST3, "pclk_csi2host3", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 6, GFLAGS),
+ GATE(DCLK_CSI2HOST3, "dclk_csi2host3", "dclk_vicap_root", 0,
+ RV1126B_VICLKGATE_CON(3), 7, GFLAGS),
+ GATE(HCLK_SDMMC0, "hclk_sdmmc0", "hclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 8, GFLAGS),
+ GATE(ACLK_GMAC, "aclk_gmac", "aclk_gmac_root", 0,
+ RV1126B_VICLKGATE_CON(3), 9, GFLAGS),
+ GATE(PCLK_GMAC, "pclk_gmac", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 10, GFLAGS),
+ MUX(CLK_GMAC_PTP_REF, "clk_gmac_ptp_ref", clk_gmac_ptp_ref_p, CLK_SET_RATE_PARENT,
+ RV1126B_VICLKSEL_CON(0), 14, 1, MFLAGS),
+ GATE(PCLK_CSIPHY0, "pclk_csiphy0", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 11, GFLAGS),
+ GATE(PCLK_CSIPHY1, "pclk_csiphy1", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 12, GFLAGS),
+ GATE(PCLK_MACPHY, "pclk_macphy", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(3), 13, GFLAGS),
+ GATE(PCLK_SARADC1, "pclk_saradc1", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(4), 0, GFLAGS),
+ MUX(CLK_SARADC1, "clk_saradc1", clk_saradc1_p, CLK_SET_RATE_PARENT,
+ RV1126B_VICLKSEL_CON(0), 2, 1, MFLAGS),
+ GATE(PCLK_SARADC2, "pclk_saradc2", "pclk_vi_root", 0,
+ RV1126B_VICLKGATE_CON(4), 2, GFLAGS),
+ MUX(CLK_SARADC2, "clk_saradc2", clk_saradc2_p, CLK_SET_RATE_PARENT,
+ RV1126B_VICLKSEL_CON(0), 3, 1, MFLAGS),
+ COMPOSITE_NODIV(CLK_MACPHY, "clk_macphy", clk_macphy_p, 0,
+ RV1126B_VICLKSEL_CON(1), 1, 1, MFLAGS,
+ RV1126B_VICLKGATE_CON(0), 12, GFLAGS),
+
+ /* pd_vdo */
+ GATE(ACLK_RKVDEC, "aclk_rkvdec", "aclk_rkvdec_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 7, GFLAGS),
+ GATE(HCLK_RKVDEC, "hclk_rkvdec", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 8, GFLAGS),
+ GATE(CLK_HEVC_CA_RKVDEC, "clk_hevc_ca_rkvdec", "aclk_rkvdec_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 9, GFLAGS),
+ GATE(ACLK_VOP, "aclk_vop", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 10, GFLAGS),
+ GATE(HCLK_VOP, "hclk_vop", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 11, GFLAGS),
+ GATE(ACLK_OOC, "aclk_ooc", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 13, GFLAGS),
+ GATE(HCLK_OOC, "hclk_ooc", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(0), 14, GFLAGS),
+ GATE(HCLK_RKJPEG, "hclk_rkjpeg", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 3, GFLAGS),
+ GATE(ACLK_RKJPEG, "aclk_rkjpeg", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 4, GFLAGS),
+ GATE(ACLK_RKMMU_DECOM, "aclk_rkmmu_decom", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 5, GFLAGS),
+ GATE(HCLK_RKMMU_DECOM, "hclk_rkmmu_decom", "hclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 6, GFLAGS),
+ GATE(DCLK_DECOM, "dclk_decom", "dclk_decom_src", 0,
+ RV1126B_VDOCLKGATE_CON(1), 8, GFLAGS),
+ GATE(ACLK_DECOM, "aclk_decom", "aclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 9, GFLAGS),
+ GATE(PCLK_DECOM, "pclk_decom", "pclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_MIPI_DSI, "pclk_mipi_dsi", "pclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_DSIPHY, "pclk_dsiphy", "pclk_vdo_root", 0,
+ RV1126B_VDOCLKGATE_CON(1), 13, GFLAGS),
+
+ /* pd_ddr */
+ GATE(PCLK_DDRC, "pclk_ddrc", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 2, GFLAGS),
+ GATE(PCLK_DDRMON, "pclk_ddrmon", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 3, GFLAGS),
+ GATE(CLK_TIMER_DDRMON, "clk_timer_ddrmon", "xin24m", 0,
+ RV1126B_DDRCLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_DFICTRL, "pclk_dfictrl", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_DDRPHY, "pclk_ddrphy", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_DMA2DDR, "pclk_dma2ddr", "pclk_ddr_root", CLK_IS_CRITICAL,
+ RV1126B_DDRCLKGATE_CON(0), 9, GFLAGS),
+
+ /* pd_pmu*/
+ COMPOSITE_NODIV(CLK_RCOSC_SRC, "clk_rcosc_src", clk_rcosc_src_p, 0,
+ RV1126B_PMUCLKSEL_CON(1), 0, 3, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(0), 0, GFLAGS),
+ COMPOSITE_NOGATE(BUSCLK_PMU_MUX, "busclk_pmu_mux", busclk_pmu_mux_p, 0,
+ RV1126B_PMUCLKSEL_CON(1), 3, 1, MFLAGS, 4, 2, DFLAGS),
+ GATE(BUSCLK_PMU_ROOT, "busclk_pmu_root", "busclk_pmu_mux", 0,
+ RV1126B_PMUCLKGATE_CON(0), 1, GFLAGS),
+ GATE(BUSCLK_PMU1_ROOT, "busclk_pmu1_root", "busclk_pmu_mux", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(3), 11, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(0), 6, GFLAGS),
+ MUX(0, "xin_rc_src", clk_xin_rc_div_p, 0,
+ RV1126B_PMUCLKSEL_CON(2), 0, 1, MFLAGS),
+ COMPOSITE_FRACMUX_NOGATE(CLK_XIN_RC_DIV, "clk_xin_rc_div", "xin_rc_src", CLK_SET_RATE_PARENT,
+ RV1126B_PMUCLKSEL_CON(8), 0,
+ &rv1126b_rcdiv_pmu_fracmux),
+ GATE(PCLK_PMU_GPIO0, "pclk_pmu_gpio0", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(0), 7, GFLAGS),
+ COMPOSITE_NODIV(DBCLK_PMU_GPIO0, "dbclk_pmu_gpio0", mux_24m_32k_p, 0,
+ RV1126B_PMUCLKSEL_CON(2), 4, 1, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_PMU_HP_TIMER, "pclk_pmu_hp_timer", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(0), 10, GFLAGS),
+ COMPOSITE(CLK_PMU_HP_TIMER, "clk_pmu_hp_timer", mux_cpll_24m_p, CLK_IS_CRITICAL,
+ RV1126B_PMUCLKSEL_CON(1), 13, 1, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_PMUCLKGATE_CON(0), 11, GFLAGS),
+ GATE(CLK_PMU_32K_HP_TIMER, "clk_pmu_32k_hp_timer", "clk_32k", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(0), 13, GFLAGS),
+ GATE(PCLK_PWM1, "pclk_pwm1", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(1), 0, GFLAGS),
+ COMPOSITE(CLK_PWM1, "clk_pwm1", mux_24m_rcosc_buspmu_p, 0,
+ RV1126B_PMUCLKSEL_CON(2), 8, 2, MFLAGS, 6, 2, DFLAGS,
+ RV1126B_PMUCLKGATE_CON(1), 1, GFLAGS),
+ GATE(CLK_OSC_PWM1, "clk_osc_pwm1", "xin24m", 0,
+ RV1126B_PMUCLKGATE_CON(1), 2, GFLAGS),
+ GATE(CLK_RC_PWM1, "clk_rc_pwm1", "clk_32k", 0,
+ RV1126B_PMUCLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_I2C2, "pclk_i2c2", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(1), 6, GFLAGS),
+ COMPOSITE(CLK_I2C2, "clk_i2c2", mux_24m_rcosc_buspmu_p, 0,
+ RV1126B_PMUCLKSEL_CON(2), 14, 2, MFLAGS, 12, 2, DFLAGS,
+ RV1126B_PMUCLKGATE_CON(1), 7, GFLAGS),
+ GATE(PCLK_UART0, "pclk_uart0", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(1), 8, GFLAGS),
+ COMPOSITE_NODIV(SCLK_UART0, "sclk_uart0", sclk_uart0_p, CLK_SET_RATE_PARENT,
+ RV1126B_PMUCLKSEL_CON(3), 0, 2, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(1), 11, GFLAGS),
+ GATE(PCLK_RCOSC_CTRL, "pclk_rcosc_ctrl", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(2), 0, GFLAGS),
+ COMPOSITE_NODIV(CLK_OSC_RCOSC_CTRL, "clk_osc_rcosc_ctrl", clk_osc_rcosc_ctrl_p, CLK_IS_CRITICAL,
+ RV1126B_PMUCLKSEL_CON(3), 2, 1, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(2), 1, GFLAGS),
+ GATE(CLK_REF_RCOSC_CTRL, "clk_ref_rcosc_ctrl", "xin24m", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(2), 2, GFLAGS),
+ GATE(PCLK_IOC_PMUIO0, "pclk_ioc_pmuio0", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(2), 3, GFLAGS),
+ GATE(CLK_REFOUT, "clk_refout", "xin24m", 0,
+ RV1126B_PMUCLKGATE_CON(2), 6, GFLAGS),
+ GATE(CLK_PREROLL, "clk_preroll", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(2), 7, GFLAGS),
+ GATE(CLK_PREROLL_32K, "clk_preroll_32k", "clk_32k", 0,
+ RV1126B_PMUCLKGATE_CON(2), 8, GFLAGS),
+ GATE(HCLK_PMU_SRAM, "hclk_pmu_sram", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMUCLKGATE_CON(2), 9, GFLAGS),
+ GATE(PCLK_WDT_LPMCU, "pclk_wdt_lpmcu", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(3), 0, GFLAGS),
+ COMPOSITE_NODIV(TCLK_WDT_LPMCU, "tclk_wdt_lpmcu", mux_24m_rcosc_buspmu_32k_p, 0,
+ RV1126B_PMUCLKSEL_CON(3), 6, 2, MFLAGS,
+ RV1126B_PMUCLKGATE_CON(3), 1, GFLAGS),
+ GATE(CLK_LPMCU, "clk_lpmcu", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(3), 2, GFLAGS),
+ GATE(CLK_LPMCU_RTC, "clk_lpmcu_rtc", "xin24m", 0,
+ RV1126B_PMUCLKGATE_CON(3), 3, GFLAGS),
+ GATE(PCLK_LPMCU_MAILBOX, "pclk_lpmcu_mailbox", "busclk_pmu_root", 0,
+ RV1126B_PMUCLKGATE_CON(3), 4, GFLAGS),
+
+ /* pd_pmu1 */
+ GATE(PCLK_SPI2AHB, "pclk_spi2ahb", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 0, GFLAGS),
+ GATE(HCLK_SPI2AHB, "hclk_spi2ahb", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 1, GFLAGS),
+ GATE(HCLK_FSPI1, "hclk_fspi1", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 2, GFLAGS),
+ GATE(HCLK_XIP_FSPI1, "hclk_xip_fspi1", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 3, GFLAGS),
+ COMPOSITE(SCLK_1X_FSPI1, "sclk_1x_fspi1", mux_24m_rcosc_buspmu_p, 0,
+ RV1126B_PMU1CLKSEL_CON(0), 0, 2, MFLAGS, 2, 3, DFLAGS,
+ RV1126B_PMU1CLKGATE_CON(0), 4, GFLAGS),
+ GATE(PCLK_IOC_PMUIO1, "pclk_ioc_pmuio1", "busclk_pmu_root", CLK_IS_CRITICAL,
+ RV1126B_PMU1CLKGATE_CON(0), 5, GFLAGS),
+ GATE(PCLK_AUDIO_ADC_PMU, "pclk_audio_adc_pmu", "busclk_pmu_root", 0,
+ RV1126B_PMU1CLKGATE_CON(0), 8, GFLAGS),
+
+ COMPOSITE(MCLK_LPSAI, "mclk_lpsai", mux_24m_rcosc_buspmu_p, 0,
+ RV1126B_PMU1CLKSEL_CON(0), 6, 2, MFLAGS, 8, 5, DFLAGS,
+ RV1126B_PMU1CLKGATE_CON(1), 3, GFLAGS),
+ GATE(MCLK_AUDIO_ADC_PMU, "mclk_audio_adc_pmu", "mclk_lpsai", CLK_IS_CRITICAL,
+ RV1126B_PMU1CLKGATE_CON(0), 9, GFLAGS),
+ FACTOR(MCLK_AUDIO_ADC_DIV4_PMU, "mclk_audio_adc_div4_pmu", "mclk_audio_adc_pmu", 0, 1, 4),
+
+ /* pd_bus */
+ GATE(ACLK_GIC400, "aclk_gic400", "hclk_bus_root", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(0), 8, GFLAGS),
+ GATE(PCLK_WDT_NS, "pclk_wdt_ns", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(0), 10, GFLAGS),
+ GATE(TCLK_WDT_NS, "tclk_wdt_ns", "tclk_wdt_ns_src", 0,
+ RV1126B_BUSCLKGATE_CON(0), 11, GFLAGS),
+ GATE(PCLK_WDT_HPMCU, "pclk_wdt_hpmcu", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 0, GFLAGS),
+ GATE(HCLK_CACHE, "hclk_cache", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_HPMCU_MAILBOX, "pclk_hpmcu_mailbox", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 3, GFLAGS),
+ GATE(PCLK_HPMCU_INTMUX, "pclk_hpmcu_intmux", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 4, GFLAGS),
+ GATE(CLK_HPMCU, "clk_hpmcu", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 5, GFLAGS),
+ GATE(CLK_HPMCU_RTC, "clk_hpmcu_rtc", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(1), 10, GFLAGS),
+ GATE(PCLK_RKDMA, "pclk_rkdma", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 11, GFLAGS),
+ GATE(ACLK_RKDMA, "aclk_rkdma", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(1), 12, GFLAGS),
+ GATE(PCLK_DCF, "pclk_dcf", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 0, GFLAGS),
+ GATE(ACLK_DCF, "aclk_dcf", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 1, GFLAGS),
+ GATE(HCLK_RGA, "hclk_rga", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 2, GFLAGS),
+ GATE(ACLK_RGA, "aclk_rga", "aclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 3, GFLAGS),
+ GATE(CLK_CORE_RGA, "clk_core_rga", "clk_core_rga_src", 0,
+ RV1126B_BUSCLKGATE_CON(2), 4, GFLAGS),
+ GATE(PCLK_TIMER, "pclk_timer", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 5, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER0, "clk_timer0", clk_timer0_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 0, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 6, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER1, "clk_timer1", clk_timer1_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 2, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 7, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER2, "clk_timer2", clk_timer2_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 4, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 8, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER3, "clk_timer3", clk_timer3_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 6, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 9, GFLAGS),
+ COMPOSITE_NODIV(CLK_TIMER4, "clk_timer4", clk_timer4_parents_p, 0,
+ RV1126B_BUSCLKSEL_CON(2), 8, 2, MFLAGS,
+ RV1126B_BUSCLKGATE_CON(2), 10, GFLAGS),
+ GATE(HCLK_RKRNG_S_NS, "hclk_rkrng_s_ns", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(2), 14, GFLAGS),
+ GATE(HCLK_RKRNG_NS, "hclk_rkrng_ns", "hclk_rkrng_s_ns", 0,
+ RV1126B_BUSCLKGATE_CON(2), 15, GFLAGS),
+ GATE(CLK_TIMER5, "clk_timer5", "clk_timer_root", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(2), 11, GFLAGS),
+ GATE(PCLK_I2C0, "pclk_i2c0", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 0, GFLAGS),
+ GATE(CLK_I2C0, "clk_i2c0", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 1, GFLAGS),
+ GATE(PCLK_I2C1, "pclk_i2c1", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 2, GFLAGS),
+ GATE(CLK_I2C1, "clk_i2c1", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 3, GFLAGS),
+ GATE(PCLK_I2C3, "pclk_i2c3", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 4, GFLAGS),
+ GATE(CLK_I2C3, "clk_i2c3", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 5, GFLAGS),
+ GATE(PCLK_I2C4, "pclk_i2c4", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 6, GFLAGS),
+ GATE(CLK_I2C4, "clk_i2c4", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 7, GFLAGS),
+ GATE(PCLK_I2C5, "pclk_i2c5", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 8, GFLAGS),
+ GATE(CLK_I2C5, "clk_i2c5", "clk_i2c_bus_src", 0,
+ RV1126B_BUSCLKGATE_CON(3), 9, GFLAGS),
+ GATE(PCLK_SPI0, "pclk_spi0", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 10, GFLAGS),
+ GATE(PCLK_SPI1, "pclk_spi1", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(3), 12, GFLAGS),
+ GATE(PCLK_PWM0, "pclk_pwm0", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 0, GFLAGS),
+ GATE(CLK_OSC_PWM0, "clk_osc_pwm0", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 1, GFLAGS),
+ GATE(CLK_RC_PWM0, "clk_rc_pwm0", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 2, GFLAGS),
+ GATE(PCLK_PWM2, "pclk_pwm2", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 3, GFLAGS),
+ GATE(CLK_OSC_PWM2, "clk_osc_pwm2", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 4, GFLAGS),
+ GATE(CLK_RC_PWM2, "clk_rc_pwm2", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 5, GFLAGS),
+ GATE(PCLK_PWM3, "pclk_pwm3", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 6, GFLAGS),
+ GATE(CLK_OSC_PWM3, "clk_osc_pwm3", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 7, GFLAGS),
+ GATE(CLK_RC_PWM3, "clk_rc_pwm3", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(4), 8, GFLAGS),
+ GATE(PCLK_UART1, "pclk_uart1", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 9, GFLAGS),
+ GATE(PCLK_UART2, "pclk_uart2", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 10, GFLAGS),
+ GATE(PCLK_UART3, "pclk_uart3", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 11, GFLAGS),
+ GATE(PCLK_UART4, "pclk_uart4", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 12, GFLAGS),
+ GATE(PCLK_UART5, "pclk_uart5", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 13, GFLAGS),
+ GATE(PCLK_UART6, "pclk_uart6", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 14, GFLAGS),
+ GATE(PCLK_UART7, "pclk_uart7", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(4), 15, GFLAGS),
+ GATE(PCLK_TSADC, "pclk_tsadc", "pclk_bus_root", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(5), 0, GFLAGS),
+ GATE(CLK_TSADC, "clk_tsadc", "xin24m", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(5), 1, GFLAGS),
+ GATE(HCLK_SAI0, "hclk_sai0", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 2, GFLAGS),
+ GATE(HCLK_SAI1, "hclk_sai1", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 4, GFLAGS),
+ GATE(HCLK_SAI2, "hclk_sai2", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 6, GFLAGS),
+ GATE(HCLK_RKDSM, "hclk_rkdsm", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 8, GFLAGS),
+ GATE(MCLK_RKDSM, "mclk_rkdsm", "mclk_sai2", 0,
+ RV1126B_BUSCLKGATE_CON(5), 9, GFLAGS),
+ GATE(HCLK_PDM, "hclk_pdm", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 10, GFLAGS),
+ GATE(HCLK_ASRC0, "hclk_asrc0", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 11, GFLAGS),
+ GATE(HCLK_ASRC1, "hclk_asrc1", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 12, GFLAGS),
+ GATE(PCLK_AUDIO_ADC_BUS, "pclk_audio_adc_bus", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(5), 13, GFLAGS),
+ GATE(MCLK_AUDIO_ADC_BUS, "mclk_audio_adc_bus", "mclk_sai2", 0,
+ RV1126B_BUSCLKGATE_CON(5), 14, GFLAGS),
+ FACTOR(MCLK_AUDIO_ADC_DIV4_BUS, "mclk_audio_adc_div4_bus", "mclk_audio_adc_bus", 0, 1, 4),
+ GATE(PCLK_RKCE, "pclk_rkce", "pclk_bus_root", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(6), 0, GFLAGS),
+ GATE(HCLK_NS_RKCE, "hclk_ns_rkce", "hclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(6), 1, GFLAGS),
+ GATE(PCLK_OTPC_NS, "pclk_otpc_ns", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(6), 2, GFLAGS),
+ GATE(CLK_SBPI_OTPC_NS, "clk_sbpi_otpc_ns", "xin24m", 0,
+ RV1126B_BUSCLKGATE_CON(6), 3, GFLAGS),
+ COMPOSITE_NOMUX(CLK_USER_OTPC_NS, "clk_user_otpc_ns", "xin24m", 0,
+ RV1126B_BUSCLKSEL_CON(2), 12, 3, DFLAGS,
+ RV1126B_BUSCLKGATE_CON(6), 4, GFLAGS),
+ GATE(PCLK_OTP_MASK, "pclk_otp_mask", "pclk_bus_root", 0,
+ RV1126B_BUSCLKGATE_CON(6), 6, GFLAGS),
+ GATE(CLK_TSADC_PHYCTRL, "clk_tsadc_phyctrl", "xin24m", CLK_IS_CRITICAL,
+ RV1126B_BUSCLKGATE_CON(6), 8, GFLAGS),
+ MUX(LRCK_SRC_ASRC0, "lrck_src_asrc0", lrck_src_asrc_p, 0,
+ RV1126B_BUSCLKSEL_CON(3), 0, 3, MFLAGS),
+ MUX(LRCK_DST_ASRC0, "lrck_dst_asrc0", lrck_src_asrc_p, 0,
+ RV1126B_BUSCLKSEL_CON(3), 4, 3, MFLAGS),
+ MUX(LRCK_SRC_ASRC1, "lrck_src_asrc1", lrck_src_asrc_p, 0,
+ RV1126B_BUSCLKSEL_CON(3), 8, 3, MFLAGS),
+ MUX(LRCK_DST_ASRC1, "lrck_dst_asrc1", lrck_src_asrc_p, 0,
+ RV1126B_BUSCLKSEL_CON(3), 12, 3, MFLAGS),
+ GATE(ACLK_NSRKCE, "aclk_nsrkce", "aclk_rkce_src", 0,
+ RV1126B_BUSCLKGATE_CON(2), 12, GFLAGS),
+ GATE(CLK_PKA_NSRKCE, "clk_pka_nsrkce", "clk_pka_rkce_src", 0,
+ RV1126B_BUSCLKGATE_CON(2), 13, GFLAGS),
+
+ /* pd_peri */
+ DIV(PCLK_RTC_ROOT, "pclk_rtc_root", "pclk_peri_root", 0,
+ RV1126B_PERICLKSEL_CON(0), 0, 2, DFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 5, GFLAGS),
+ GATE(DBCLK_GPIO1, "dbclk_gpio1", "xin24m", 0,
+ RV1126B_PERICLKGATE_CON(0), 6, GFLAGS),
+ GATE(PCLK_IOC_VCCIO1, "pclk_ioc_vccio1", "pclk_peri_root", CLK_IS_CRITICAL,
+ RV1126B_PERICLKGATE_CON(0), 7, GFLAGS),
+ GATE(ACLK_USB3OTG, "aclk_usb3otg", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 8, GFLAGS),
+ GATE(CLK_REF_USB3OTG, "clk_ref_usb3otg", "xin24m", 0,
+ RV1126B_PERICLKGATE_CON(0), 9, GFLAGS),
+ GATE(CLK_SUSPEND_USB3OTG, "clk_suspend_usb3otg", "xin24m", 0,
+ RV1126B_PERICLKGATE_CON(0), 10, GFLAGS),
+ GATE(HCLK_USB2HOST, "hclk_usb2host", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 11, GFLAGS),
+ GATE(HCLK_ARB_USB2HOST, "hclk_arb_usb2host", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 12, GFLAGS),
+ GATE(PCLK_RTC_TEST, "pclk_rtc_test", "pclk_rtc_root", 0,
+ RV1126B_PERICLKGATE_CON(0), 13, GFLAGS),
+ GATE(HCLK_EMMC, "hclk_emmc", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 0, GFLAGS),
+ GATE(HCLK_FSPI0, "hclk_fspi0", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 1, GFLAGS),
+ GATE(HCLK_XIP_FSPI0, "hclk_xip_fspi0", "aclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 2, GFLAGS),
+ GATE(PCLK_PIPEPHY, "pclk_pipephy", "pclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 8, GFLAGS),
+ GATE(PCLK_USB2PHY, "pclk_usb2phy", "pclk_peri_root", 0,
+ RV1126B_PERICLKGATE_CON(1), 10, GFLAGS),
+ COMPOSITE_NOMUX(CLK_REF_PIPEPHY_CPLL_SRC, "clk_ref_pipephy_cpll_src", "cpll", 0,
+ RV1126B_PERICLKSEL_CON(1), 0, 6, DFLAGS,
+ RV1126B_PERICLKGATE_CON(1), 14, GFLAGS),
+ MUX(CLK_REF_PIPEPHY, "clk_ref_pipephy", clk_ref_pipephy_p, 0,
+ RV1126B_PERICLKSEL_CON(1), 12, 1, MFLAGS),
+};
+
+static struct rockchip_clk_branch rv1126b_armclk __initdata =
+ MUX(ARMCLK, "armclk", mux_armclk_p, CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ RV1126B_CORECLKSEL_CON(0), 1, 1, MFLAGS);
+
+static void __init rv1126b_clk_init(struct device_node *np)
+{
+ struct rockchip_clk_provider *ctx;
+ void __iomem *reg_base;
+ unsigned long clk_nr_clks;
+
+ clk_nr_clks = rockchip_clk_find_max_clk_id(rv1126b_clk_branches,
+ ARRAY_SIZE(rv1126b_clk_branches)) + 1;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base) {
+ pr_err("%s: could not map cru region\n", __func__);
+ return;
+ }
+
+ ctx = rockchip_clk_init(np, reg_base, clk_nr_clks);
+ if (IS_ERR(ctx)) {
+ pr_err("%s: rockchip clk init failed\n", __func__);
+ iounmap(reg_base);
+ return;
+ }
+
+ rockchip_clk_register_plls(ctx, rv1126b_pll_clks,
+ ARRAY_SIZE(rv1126b_pll_clks),
+ 0);
+
+ rockchip_clk_register_branches(ctx, rv1126b_clk_branches,
+ ARRAY_SIZE(rv1126b_clk_branches));
+
+ rockchip_clk_register_armclk_multi_pll(ctx, &rv1126b_armclk,
+ rv1126b_cpuclk_rates,
+ ARRAY_SIZE(rv1126b_cpuclk_rates));
+
+ rv1126b_rst_init(np, reg_base);
+
+ rockchip_register_restart_notifier(ctx, RV1126B_GLB_SRST_FST, NULL);
+
+ rockchip_clk_of_add_provider(np, ctx);
+
+ /* pvtpll src init */
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_CORECLKSEL_CON(0));
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_NPUCLKSEL_CON(0));
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_VICLKSEL_CON(0));
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_VEPUCLKSEL_CON(0));
+ writel_relaxed(PVTPLL_SRC_SEL_PVTPLL, reg_base + RV1126B_VCPCLKSEL_CON(0));
+}
+
+CLK_OF_DECLARE(rv1126b_cru, "rockchip,rv1126b-cru", rv1126b_clk_init);
+
+struct clk_rv1126b_inits {
+ void (*inits)(struct device_node *np);
+};
+
+static const struct clk_rv1126b_inits clk_rv1126b_init = {
+ .inits = rv1126b_clk_init,
+};
+
+static const struct of_device_id clk_rv1126b_match_table[] = {
+ {
+ .compatible = "rockchip,rv1126b-cru",
+ .data = &clk_rv1126b_init,
+ },
+ { }
+};
+
+static int clk_rv1126b_probe(struct platform_device *pdev)
+{
+ const struct clk_rv1126b_inits *init_data;
+ struct device *dev = &pdev->dev;
+
+ init_data = device_get_match_data(dev);
+ if (!init_data)
+ return -EINVAL;
+
+ if (init_data->inits)
+ init_data->inits(dev->of_node);
+
+ return 0;
+}
+
+static struct platform_driver clk_rv1126b_driver = {
+ .probe = clk_rv1126b_probe,
+ .driver = {
+ .name = "clk-rv1126b",
+ .of_match_table = clk_rv1126b_match_table,
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(clk_rv1126b_driver, clk_rv1126b_probe);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 19caf26c991b..2601df3b1066 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -722,6 +722,30 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
}
EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk);
+void rockchip_clk_register_armclk_multi_pll(struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates)
+{
+ struct clk *clk;
+
+ clk = rockchip_clk_register_cpuclk_multi_pll(list->name, list->parent_names,
+ list->num_parents, ctx->reg_base,
+ list->muxdiv_offset, list->mux_shift,
+ list->mux_width, list->mux_flags,
+ list->div_offset, list->div_shift,
+ list->div_width, list->div_flags,
+ list->flags, &ctx->lock, rates, nrates);
+ if (IS_ERR(clk)) {
+ pr_err("%s: failed to register clock %s: %ld\n",
+ __func__, list->name, PTR_ERR(clk));
+ return;
+ }
+
+ rockchip_clk_set_lookup(ctx, clk, list->id);
+}
+EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk_multi_pll);
+
void rockchip_clk_protect_critical(const char *const clocks[],
int nclocks)
{
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
index 7c5e74c7a2e2..b2fff1d13a4a 100644
--- a/drivers/clk/rockchip/clk.h
+++ b/drivers/clk/rockchip/clk.h
@@ -99,6 +99,73 @@ struct clk;
#define RV1126_EMMC_CON0 0x450
#define RV1126_EMMC_CON1 0x454
+#define RV1126B_TOPCRU_BASE 0x0
+#define RV1126B_BUSCRU_BASE 0x10000
+#define RV1126B_PERICRU_BASE 0x20000
+#define RV1126B_CORECRU_BASE 0x30000
+#define RV1126B_PMUCRU_BASE 0x40000
+#define RV1126B_PMU1CRU_BASE 0x50000
+#define RV1126B_DDRCRU_BASE 0x60000
+#define RV1126B_SUBDDRCRU_BASE 0x68000
+#define RV1126B_VICRU_BASE 0x70000
+#define RV1126B_VEPUCRU_BASE 0x80000
+#define RV1126B_NPUCRU_BASE 0x90000
+#define RV1126B_VDOCRU_BASE 0xA0000
+#define RV1126B_VCPCRU_BASE 0xB0000
+
+#define RV1126B_PLL_CON(x) ((x) * 0x4 + RV1126B_TOPCRU_BASE)
+#define RV1126B_MODE_CON (0x280 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_TOPCRU_BASE)
+#define RV1126B_SOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_TOPCRU_BASE)
+#define RV1126B_GLB_SRST_FST (0xc08 + RV1126B_TOPCRU_BASE)
+#define RV1126B_GLB_SRST_SND (0xc0c + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_CM_FRAC0_DIV_H (0xcc0 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_CM_FRAC1_DIV_H (0xcc4 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_CM_FRAC2_DIV_H (0xcc8 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_UART_FRAC0_DIV_H (0xccc + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_UART_FRAC1_DIV_H (0xcd0 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_AUDIO_FRAC0_DIV_H (0xcd4 + RV1126B_TOPCRU_BASE)
+#define RV1126B_CLK_AUDIO_FRAC1_DIV_H (0xcd8 + RV1126B_TOPCRU_BASE)
+#define RV1126B_BUSCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_BUSCRU_BASE)
+#define RV1126B_BUSCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_BUSCRU_BASE)
+#define RV1126B_BUSSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_BUSCRU_BASE)
+#define RV1126B_PERIPLL_CON(x) ((x) * 0x4 + RV1126B_PERICRU_BASE)
+#define RV1126B_PERICLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_PERICRU_BASE)
+#define RV1126B_PERICLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_PERICRU_BASE)
+#define RV1126B_PERISOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_PERICRU_BASE)
+#define RV1126B_CORECLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_CORECRU_BASE)
+#define RV1126B_CORECLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_CORECRU_BASE)
+#define RV1126B_CORESOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_CORECRU_BASE)
+#define RV1126B_PMUCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_PMUCRU_BASE)
+#define RV1126B_PMUCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_PMUCRU_BASE)
+#define RV1126B_PMUSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_PMUCRU_BASE)
+#define RV1126B_PMU1CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_PMU1CRU_BASE)
+#define RV1126B_PMU1CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_PMU1CRU_BASE)
+#define RV1126B_PMU1SOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_PMU1CRU_BASE)
+#define RV1126B_DDRCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_DDRCRU_BASE)
+#define RV1126B_DDRCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_DDRCRU_BASE)
+#define RV1126B_DDRSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_DDRCRU_BASE)
+#define RV1126B_SUBDDRPLL_CON(x) ((x) * 0x4 + RV1126B_SUBDDRCRU_BASE)
+#define RV1126B_SUBDDRCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_SUBDDRCRU_BASE)
+#define RV1126B_SUBDDRCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_SUBDDRCRU_BASE)
+#define RV1126B_SUBDDRSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_SUBDDRCRU_BASE)
+#define RV1126B_VICLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_VICRU_BASE)
+#define RV1126B_VICLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_VICRU_BASE)
+#define RV1126B_VISOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_VICRU_BASE)
+#define RV1126B_VEPUCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_VEPUCRU_BASE)
+#define RV1126B_VEPUCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_VEPUCRU_BASE)
+#define RV1126B_VEPUSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_VEPUCRU_BASE)
+#define RV1126B_NPUCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_NPUCRU_BASE)
+#define RV1126B_NPUCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_NPUCRU_BASE)
+#define RV1126B_NPUSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_NPUCRU_BASE)
+#define RV1126B_VDOCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_VDOCRU_BASE)
+#define RV1126B_VDOCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_VDOCRU_BASE)
+#define RV1126B_VDOSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_VDOCRU_BASE)
+#define RV1126B_VCPCLKSEL_CON(x) ((x) * 0x4 + 0x300 + RV1126B_VCPCRU_BASE)
+#define RV1126B_VCPCLKGATE_CON(x) ((x) * 0x4 + 0x800 + RV1126B_VCPCRU_BASE)
+#define RV1126B_VCPSOFTRST_CON(x) ((x) * 0x4 + 0xa00 + RV1126B_VCPCRU_BASE)
+
#define RK2928_PLL_CON(x) ((x) * 0x4)
#define RK2928_MODE_CON 0x40
#define RK2928_CLKSEL_CON(x) ((x) * 0x4 + 0x44)
@@ -208,6 +275,18 @@ struct clk;
#define RK3399_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x100)
#define RK3399_PMU_SOFTRST_CON(x) ((x) * 0x4 + 0x110)
+#define RK3506_PMU_CRU_BASE 0x10000
+#define RK3506_PLL_CON(x) ((x) * 0x4 + RK3506_PMU_CRU_BASE)
+#define RK3506_CLKSEL_CON(x) ((x) * 0x4 + 0x300)
+#define RK3506_CLKGATE_CON(x) ((x) * 0x4 + 0x800)
+#define RK3506_SOFTRST_CON(x) ((x) * 0x4 + 0xa00)
+#define RK3506_PMU_CLKSEL_CON(x) ((x) * 0x4 + 0x300 + RK3506_PMU_CRU_BASE)
+#define RK3506_PMU_CLKGATE_CON(x) ((x) * 0x4 + 0x800 + RK3506_PMU_CRU_BASE)
+#define RK3506_MODE_CON 0x280
+#define RK3506_GLB_CNT_TH 0xc00
+#define RK3506_GLB_SRST_FST 0xc08
+#define RK3506_GLB_SRST_SND 0xc0c
+
#define RK3528_PMU_CRU_BASE 0x10000
#define RK3528_PCIE_CRU_BASE 0x20000
#define RK3528_DDRPHY_CRU_BASE 0x28000
@@ -622,6 +701,17 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
const struct rockchip_cpuclk_rate_table *rates,
int nrates, void __iomem *reg_base, spinlock_t *lock);
+struct clk *rockchip_clk_register_cpuclk_multi_pll(const char *name,
+ const char *const *parent_names,
+ u8 num_parents, void __iomem *base,
+ int muxdiv_offset, u8 mux_shift,
+ u8 mux_width, u8 mux_flags,
+ int div_offset, u8 div_shift,
+ u8 div_width, u8 div_flags,
+ unsigned long flags, spinlock_t *lock,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates);
+
struct clk *rockchip_clk_register_mmc(const char *name,
const char *const *parent_names, u8 num_parents,
void __iomem *reg,
@@ -1208,6 +1298,10 @@ void rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
const struct rockchip_cpuclk_reg_data *reg_data,
const struct rockchip_cpuclk_rate_table *rates,
int nrates);
+void rockchip_clk_register_armclk_multi_pll(struct rockchip_clk_provider *ctx,
+ struct rockchip_clk_branch *list,
+ const struct rockchip_cpuclk_rate_table *rates,
+ int nrates);
void rockchip_clk_protect_critical(const char *const clocks[], int nclocks);
void rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
unsigned int reg, void (*cb)(void));
@@ -1246,6 +1340,8 @@ static inline void rockchip_register_softrst(struct device_node *np,
return rockchip_register_softrst_lut(np, NULL, num_regs, base, flags);
}
+void rv1126b_rst_init(struct device_node *np, void __iomem *reg_base);
+void rk3506_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3528_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3562_rst_init(struct device_node *np, void __iomem *reg_base);
void rk3576_rst_init(struct device_node *np, void __iomem *reg_base);
diff --git a/drivers/clk/rockchip/rst-rk3506.c b/drivers/clk/rockchip/rst-rk3506.c
new file mode 100644
index 000000000000..c3abde60f3c6
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rk3506.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ * Author: Finley Xiao <finley.xiao@rock-chips.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rk3506-cru.h>
+#include "clk.h"
+
+/* 0xFF9A0000 + 0x0A00 */
+#define RK3506_CRU_RESET_OFFSET(id, reg, bit) [id] = (0 + reg * 16 + bit)
+
+/* mapping table for reset ID to register offset */
+static const int rk3506_register_offset[] = {
+ /* CRU-->SOFTRST_CON00 */
+ RK3506_CRU_RESET_OFFSET(SRST_NCOREPORESET0_AC, 0, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_NCOREPORESET1_AC, 0, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_NCOREPORESET2_AC, 0, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_NCORESET0_AC, 0, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_NCORESET1_AC, 0, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_NCORESET2_AC, 0, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_NL2RESET_AC, 0, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_A_CORE_BIU_AC, 0, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_H_M0_AC, 0, 10),
+
+ /* CRU-->SOFTRST_CON02 */
+ RK3506_CRU_RESET_OFFSET(SRST_NDBGRESET, 2, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_P_CORE_BIU, 2, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_PMU, 2, 15),
+
+ /* CRU-->SOFTRST_CON03 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_DBG, 3, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_POT_DBG, 3, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_P_CORE_GRF, 3, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_CORE_EMA_DETECT, 3, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_REF_PVTPLL_CORE, 3, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO1, 3, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_DB_GPIO1, 3, 9),
+
+ /* CRU-->SOFTRST_CON04 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_CORE_PERI_BIU, 4, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DSMC, 4, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DSMC, 4, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_FLEXBUS, 4, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_A_FLEXBUS, 4, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_H_FLEXBUS, 4, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DSMC_SLV, 4, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_H_DSMC_SLV, 4, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_DSMC_SLV, 4, 13),
+
+ /* CRU-->SOFTRST_CON05 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_BUS_BIU, 5, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_BUS_BIU, 5, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_P_BUS_BIU, 5, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_A_SYSRAM, 5, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SYSRAM, 5, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DMAC0, 5, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DMAC1, 5, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_H_M0, 5, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_M0_JTAG, 5, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_H_CRYPTO, 5, 15),
+
+ /* CRU-->SOFTRST_CON06 */
+ RK3506_CRU_RESET_OFFSET(SRST_H_RNG, 6, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_P_BUS_GRF, 6, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_P_TIMER0, 6, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH0, 6, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH1, 6, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH2, 6, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH3, 6, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH4, 6, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_TIMER0_CH5, 6, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_P_WDT0, 6, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_T_WDT0, 6, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_P_WDT1, 6, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_T_WDT1, 6, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_P_MAILBOX, 6, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_INTMUX, 6, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_P_SPINLOCK, 6, 15),
+
+ /* CRU-->SOFTRST_CON07 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_DDRC, 7, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_H_DDRPHY, 7, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DDRMON, 7, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_DDRMON_OSC, 7, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DDR_LPC, 7, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_H_USBOTG0, 7, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_USBOTG0_ADP, 7, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_H_USBOTG1, 7, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_USBOTG1_ADP, 7, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_P_USBPHY, 7, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_USBPHY_POR, 7, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_USBPHY_OTG0, 7, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_USBPHY_OTG1, 7, 14),
+
+ /* CRU-->SOFTRST_CON08 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_DMA2DDR, 8, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DMA2DDR, 8, 1),
+
+ /* CRU-->SOFTRST_CON09 */
+ RK3506_CRU_RESET_OFFSET(SRST_USBOTG0_UTMI, 9, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_USBOTG1_UTMI, 9, 1),
+
+ /* CRU-->SOFTRST_CON10 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_DDRC_0, 10, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DDRC_1, 10, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_A_DDR_BIU, 10, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_DDRC, 10, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_DDRMON, 10, 4),
+
+ /* CRU-->SOFTRST_CON11 */
+ RK3506_CRU_RESET_OFFSET(SRST_H_LSPERI_BIU, 11, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART0, 11, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART1, 11, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART2, 11, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART3, 11, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART4, 11, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_UART0, 11, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_UART1, 11, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_UART2, 11, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_UART3, 11, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_UART4, 11, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_I2C0, 11, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_I2C0, 11, 15),
+
+ /* CRU-->SOFTRST_CON12 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_I2C1, 12, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_I2C1, 12, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_P_I2C2, 12, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_I2C2, 12, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_P_PWM1, 12, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_PWM1, 12, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_P_SPI0, 12, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_SPI0, 12, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_P_SPI1, 12, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_SPI1, 12, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO2, 12, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_DB_GPIO2, 12, 15),
+
+ /* CRU-->SOFTRST_CON13 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO3, 13, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_DB_GPIO3, 13, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO4, 13, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_DB_GPIO4, 13, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_CAN0, 13, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_CAN0, 13, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_H_CAN1, 13, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_CAN1, 13, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_H_PDM, 13, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_M_PDM, 13, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_PDM, 13, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_SPDIFTX, 13, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SPDIFTX, 13, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SPDIFRX, 13, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_SPDIFRX, 13, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI0, 13, 15),
+
+ /* CRU-->SOFTRST_CON14 */
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI0, 14, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI1, 14, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI1, 14, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_ASRC0, 14, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_ASRC0, 14, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_H_ASRC1, 14, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_ASRC1, 14, 8),
+
+ /* CRU-->SOFTRST_CON17 */
+ RK3506_CRU_RESET_OFFSET(SRST_H_HSPERI_BIU, 17, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SDMMC, 17, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_H_FSPI, 17, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_S_FSPI, 17, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_P_SPI2, 17, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_A_MAC0, 17, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_A_MAC1, 17, 12),
+
+ /* CRU-->SOFTRST_CON18 */
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI2, 18, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI2, 18, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI3, 18, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI3, 18, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_H_SAI4, 18, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_M_SAI4, 18, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_H_DSM, 18, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_M_DSM, 18, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_AUDIO_ADC, 18, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_M_AUDIO_ADC, 18, 15),
+
+ /* CRU-->SOFTRST_CON19 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_SARADC, 19, 0),
+ RK3506_CRU_RESET_OFFSET(SRST_SARADC, 19, 1),
+ RK3506_CRU_RESET_OFFSET(SRST_SARADC_PHY, 19, 2),
+ RK3506_CRU_RESET_OFFSET(SRST_P_OTPC_NS, 19, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_SBPI_OTPC_NS, 19, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_USER_OTPC_NS, 19, 5),
+ RK3506_CRU_RESET_OFFSET(SRST_P_UART5, 19, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_UART5, 19, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO234_IOC, 19, 8),
+
+ /* CRU-->SOFTRST_CON21 */
+ RK3506_CRU_RESET_OFFSET(SRST_A_VIO_BIU, 21, 3),
+ RK3506_CRU_RESET_OFFSET(SRST_H_VIO_BIU, 21, 4),
+ RK3506_CRU_RESET_OFFSET(SRST_H_RGA, 21, 6),
+ RK3506_CRU_RESET_OFFSET(SRST_A_RGA, 21, 7),
+ RK3506_CRU_RESET_OFFSET(SRST_CORE_RGA, 21, 8),
+ RK3506_CRU_RESET_OFFSET(SRST_A_VOP, 21, 9),
+ RK3506_CRU_RESET_OFFSET(SRST_H_VOP, 21, 10),
+ RK3506_CRU_RESET_OFFSET(SRST_VOP, 21, 11),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DPHY, 21, 12),
+ RK3506_CRU_RESET_OFFSET(SRST_P_DSI_HOST, 21, 13),
+ RK3506_CRU_RESET_OFFSET(SRST_P_TSADC, 21, 14),
+ RK3506_CRU_RESET_OFFSET(SRST_TSADC, 21, 15),
+
+ /* CRU-->SOFTRST_CON22 */
+ RK3506_CRU_RESET_OFFSET(SRST_P_GPIO1_IOC, 22, 1),
+};
+
+void rk3506_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rk3506_register_offset,
+ ARRAY_SIZE(rk3506_register_offset),
+ reg_base + RK3506_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/rockchip/rst-rv1126b.c b/drivers/clk/rockchip/rst-rv1126b.c
new file mode 100644
index 000000000000..c75b0d885ca2
--- /dev/null
+++ b/drivers/clk/rockchip/rst-rv1126b.c
@@ -0,0 +1,443 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ * Author: Elaine Zhang <zhangqing@rock-chips.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <dt-bindings/reset/rockchip,rv1126b-cru.h>
+#include "clk.h"
+
+/* 0x20000000 + 0x0A00 */
+#define TOPCRU_RESET_OFFSET(id, reg, bit) [id] = (0x0 * 4 + reg * 16 + bit)
+/* 0x20010000 + 0x0A00 */
+#define BUSCRU_RESET_OFFSET(id, reg, bit) [id] = (0x10000 * 4 + reg * 16 + bit)
+/* 0x20020000 + 0x0A00 */
+#define PERICRU_RESET_OFFSET(id, reg, bit) [id] = (0x20000 * 4 + reg * 16 + bit)
+/* 0x20030000 + 0x0A00 */
+#define CORECRU_RESET_OFFSET(id, reg, bit) [id] = (0x30000 * 4 + reg * 16 + bit)
+/* 0x20040000 + 0x0A00 */
+#define PMUCRU_RESET_OFFSET(id, reg, bit) [id] = (0x40000 * 4 + reg * 16 + bit)
+/* 0x20050000 + 0x0A00 */
+#define PMU1CRU_RESET_OFFSET(id, reg, bit) [id] = (0x50000 * 4 + reg * 16 + bit)
+/* 0x20060000 + 0x0A00 */
+#define DDRCRU_RESET_OFFSET(id, reg, bit) [id] = (0x60000 * 4 + reg * 16 + bit)
+/* 0x20068000 + 0x0A00 */
+#define SUBDDRCRU_RESET_OFFSET(id, reg, bit) [id] = (0x68000 * 4 + reg * 16 + bit)
+/* 0x20070000 + 0x0A00 */
+#define VICRU_RESET_OFFSET(id, reg, bit) [id] = (0x70000 * 4 + reg * 16 + bit)
+/* 0x20080000 + 0x0A00 */
+#define VEPUCRU_RESET_OFFSET(id, reg, bit) [id] = (0x80000 * 4 + reg * 16 + bit)
+/* 0x20090000 + 0x0A00 */
+#define NPUCRU_RESET_OFFSET(id, reg, bit) [id] = (0x90000 * 4 + reg * 16 + bit)
+/* 0x200A0000 + 0x0A00 */
+#define VDOCRU_RESET_OFFSET(id, reg, bit) [id] = (0xA0000 * 4 + reg * 16 + bit)
+/* 0x200B0000 + 0x0A00 */
+#define VCPCRU_RESET_OFFSET(id, reg, bit) [id] = (0xB0000 * 4 + reg * 16 + bit)
+
+/* =================mapping table for reset ID to register offset================== */
+static const int rv1126b_register_offset[] = {
+ /* TOPCRU-->SOFTRST_CON00 */
+
+ /* TOPCRU-->SOFTRST_CON15 */
+ TOPCRU_RESET_OFFSET(SRST_P_CRU, 15, 1),
+ TOPCRU_RESET_OFFSET(SRST_P_CRU_BIU, 15, 2),
+
+ /* BUSCRU-->SOFTRST_CON00 */
+ BUSCRU_RESET_OFFSET(SRST_A_TOP_BIU, 0, 0),
+ BUSCRU_RESET_OFFSET(SRST_A_RKCE_BIU, 0, 1),
+ BUSCRU_RESET_OFFSET(SRST_A_BUS_BIU, 0, 2),
+ BUSCRU_RESET_OFFSET(SRST_H_BUS_BIU, 0, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_BUS_BIU, 0, 4),
+ BUSCRU_RESET_OFFSET(SRST_P_CRU_BUS, 0, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_SYS_GRF, 0, 6),
+ BUSCRU_RESET_OFFSET(SRST_H_BOOTROM, 0, 7),
+ BUSCRU_RESET_OFFSET(SRST_A_GIC400, 0, 8),
+ BUSCRU_RESET_OFFSET(SRST_A_SPINLOCK, 0, 9),
+ BUSCRU_RESET_OFFSET(SRST_P_WDT_NS, 0, 10),
+ BUSCRU_RESET_OFFSET(SRST_T_WDT_NS, 0, 11),
+
+ /* BUSCRU-->SOFTRST_CON01 */
+ BUSCRU_RESET_OFFSET(SRST_P_WDT_HPMCU, 1, 0),
+ BUSCRU_RESET_OFFSET(SRST_T_WDT_HPMCU, 1, 1),
+ BUSCRU_RESET_OFFSET(SRST_H_CACHE, 1, 2),
+ BUSCRU_RESET_OFFSET(SRST_P_HPMCU_MAILBOX, 1, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_HPMCU_INTMUX, 1, 4),
+ BUSCRU_RESET_OFFSET(SRST_HPMCU_FULL_CLUSTER, 1, 5),
+ BUSCRU_RESET_OFFSET(SRST_HPMCU_PWUP, 1, 6),
+ BUSCRU_RESET_OFFSET(SRST_HPMCU_ONLY_CORE, 1, 7),
+ BUSCRU_RESET_OFFSET(SRST_T_HPMCU_JTAG, 1, 8),
+ BUSCRU_RESET_OFFSET(SRST_P_RKDMA, 1, 11),
+ BUSCRU_RESET_OFFSET(SRST_A_RKDMA, 1, 12),
+
+ /* BUSCRU-->SOFTRST_CON02 */
+ BUSCRU_RESET_OFFSET(SRST_P_DCF, 2, 0),
+ BUSCRU_RESET_OFFSET(SRST_A_DCF, 2, 1),
+ BUSCRU_RESET_OFFSET(SRST_H_RGA, 2, 2),
+ BUSCRU_RESET_OFFSET(SRST_A_RGA, 2, 3),
+ BUSCRU_RESET_OFFSET(SRST_CORE_RGA, 2, 4),
+ BUSCRU_RESET_OFFSET(SRST_P_TIMER, 2, 5),
+ BUSCRU_RESET_OFFSET(SRST_TIMER0, 2, 6),
+ BUSCRU_RESET_OFFSET(SRST_TIMER1, 2, 7),
+ BUSCRU_RESET_OFFSET(SRST_TIMER2, 2, 8),
+ BUSCRU_RESET_OFFSET(SRST_TIMER3, 2, 9),
+ BUSCRU_RESET_OFFSET(SRST_TIMER4, 2, 10),
+ BUSCRU_RESET_OFFSET(SRST_TIMER5, 2, 11),
+ BUSCRU_RESET_OFFSET(SRST_A_RKCE, 2, 12),
+ BUSCRU_RESET_OFFSET(SRST_PKA_RKCE, 2, 13),
+ BUSCRU_RESET_OFFSET(SRST_H_RKRNG_S, 2, 14),
+ BUSCRU_RESET_OFFSET(SRST_H_RKRNG_NS, 2, 15),
+
+ /* BUSCRU-->SOFTRST_CON03 */
+ BUSCRU_RESET_OFFSET(SRST_P_I2C0, 3, 0),
+ BUSCRU_RESET_OFFSET(SRST_I2C0, 3, 1),
+ BUSCRU_RESET_OFFSET(SRST_P_I2C1, 3, 2),
+ BUSCRU_RESET_OFFSET(SRST_I2C1, 3, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_I2C3, 3, 4),
+ BUSCRU_RESET_OFFSET(SRST_I2C3, 3, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_I2C4, 3, 6),
+ BUSCRU_RESET_OFFSET(SRST_I2C4, 3, 7),
+ BUSCRU_RESET_OFFSET(SRST_P_I2C5, 3, 8),
+ BUSCRU_RESET_OFFSET(SRST_I2C5, 3, 9),
+ BUSCRU_RESET_OFFSET(SRST_P_SPI0, 3, 10),
+ BUSCRU_RESET_OFFSET(SRST_SPI0, 3, 11),
+ BUSCRU_RESET_OFFSET(SRST_P_SPI1, 3, 12),
+ BUSCRU_RESET_OFFSET(SRST_SPI1, 3, 13),
+
+ /* BUSCRU-->SOFTRST_CON04 */
+ BUSCRU_RESET_OFFSET(SRST_P_PWM0, 4, 0),
+ BUSCRU_RESET_OFFSET(SRST_PWM0, 4, 1),
+ BUSCRU_RESET_OFFSET(SRST_P_PWM2, 4, 4),
+ BUSCRU_RESET_OFFSET(SRST_PWM2, 4, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_PWM3, 4, 8),
+ BUSCRU_RESET_OFFSET(SRST_PWM3, 4, 9),
+
+ /* BUSCRU-->SOFTRST_CON05 */
+ BUSCRU_RESET_OFFSET(SRST_P_UART1, 5, 0),
+ BUSCRU_RESET_OFFSET(SRST_S_UART1, 5, 1),
+ BUSCRU_RESET_OFFSET(SRST_P_UART2, 5, 2),
+ BUSCRU_RESET_OFFSET(SRST_S_UART2, 5, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_UART3, 5, 4),
+ BUSCRU_RESET_OFFSET(SRST_S_UART3, 5, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_UART4, 5, 6),
+ BUSCRU_RESET_OFFSET(SRST_S_UART4, 5, 7),
+ BUSCRU_RESET_OFFSET(SRST_P_UART5, 5, 8),
+ BUSCRU_RESET_OFFSET(SRST_S_UART5, 5, 9),
+ BUSCRU_RESET_OFFSET(SRST_P_UART6, 5, 10),
+ BUSCRU_RESET_OFFSET(SRST_S_UART6, 5, 11),
+ BUSCRU_RESET_OFFSET(SRST_P_UART7, 5, 12),
+ BUSCRU_RESET_OFFSET(SRST_S_UART7, 5, 13),
+
+ /* BUSCRU-->SOFTRST_CON06 */
+ BUSCRU_RESET_OFFSET(SRST_P_TSADC, 6, 0),
+ BUSCRU_RESET_OFFSET(SRST_TSADC, 6, 1),
+ BUSCRU_RESET_OFFSET(SRST_H_SAI0, 6, 2),
+ BUSCRU_RESET_OFFSET(SRST_M_SAI0, 6, 3),
+ BUSCRU_RESET_OFFSET(SRST_H_SAI1, 6, 4),
+ BUSCRU_RESET_OFFSET(SRST_M_SAI1, 6, 5),
+ BUSCRU_RESET_OFFSET(SRST_H_SAI2, 6, 6),
+ BUSCRU_RESET_OFFSET(SRST_M_SAI2, 6, 7),
+ BUSCRU_RESET_OFFSET(SRST_H_RKDSM, 6, 8),
+ BUSCRU_RESET_OFFSET(SRST_M_RKDSM, 6, 9),
+ BUSCRU_RESET_OFFSET(SRST_H_PDM, 6, 10),
+ BUSCRU_RESET_OFFSET(SRST_M_PDM, 6, 11),
+ BUSCRU_RESET_OFFSET(SRST_PDM, 6, 12),
+
+ /* BUSCRU-->SOFTRST_CON07 */
+ BUSCRU_RESET_OFFSET(SRST_H_ASRC0, 7, 0),
+ BUSCRU_RESET_OFFSET(SRST_ASRC0, 7, 1),
+ BUSCRU_RESET_OFFSET(SRST_H_ASRC1, 7, 2),
+ BUSCRU_RESET_OFFSET(SRST_ASRC1, 7, 3),
+ BUSCRU_RESET_OFFSET(SRST_P_AUDIO_ADC_BUS, 7, 4),
+ BUSCRU_RESET_OFFSET(SRST_M_AUDIO_ADC_BUS, 7, 5),
+ BUSCRU_RESET_OFFSET(SRST_P_RKCE, 7, 6),
+ BUSCRU_RESET_OFFSET(SRST_H_NS_RKCE, 7, 7),
+ BUSCRU_RESET_OFFSET(SRST_P_OTPC_NS, 7, 8),
+ BUSCRU_RESET_OFFSET(SRST_SBPI_OTPC_NS, 7, 9),
+ BUSCRU_RESET_OFFSET(SRST_USER_OTPC_NS, 7, 10),
+ BUSCRU_RESET_OFFSET(SRST_OTPC_ARB, 7, 11),
+ BUSCRU_RESET_OFFSET(SRST_P_OTP_MASK, 7, 12),
+
+ /* PERICRU-->SOFTRST_CON00 */
+ PERICRU_RESET_OFFSET(SRST_A_PERI_BIU, 0, 0),
+ PERICRU_RESET_OFFSET(SRST_P_PERI_BIU, 0, 1),
+ PERICRU_RESET_OFFSET(SRST_P_RTC_BIU, 0, 2),
+ PERICRU_RESET_OFFSET(SRST_P_CRU_PERI, 0, 3),
+ PERICRU_RESET_OFFSET(SRST_P_PERI_GRF, 0, 4),
+ PERICRU_RESET_OFFSET(SRST_P_GPIO1, 0, 5),
+ PERICRU_RESET_OFFSET(SRST_DB_GPIO1, 0, 6),
+ PERICRU_RESET_OFFSET(SRST_P_IOC_VCCIO1, 0, 7),
+ PERICRU_RESET_OFFSET(SRST_A_USB3OTG, 0, 8),
+ PERICRU_RESET_OFFSET(SRST_H_USB2HOST, 0, 11),
+ PERICRU_RESET_OFFSET(SRST_H_ARB_USB2HOST, 0, 12),
+ PERICRU_RESET_OFFSET(SRST_P_RTC_TEST, 0, 13),
+
+ /* PERICRU-->SOFTRST_CON01 */
+ PERICRU_RESET_OFFSET(SRST_H_EMMC, 1, 0),
+ PERICRU_RESET_OFFSET(SRST_H_FSPI0, 1, 1),
+ PERICRU_RESET_OFFSET(SRST_H_XIP_FSPI0, 1, 2),
+ PERICRU_RESET_OFFSET(SRST_S_2X_FSPI0, 1, 3),
+ PERICRU_RESET_OFFSET(SRST_UTMI_USB2HOST, 1, 5),
+ PERICRU_RESET_OFFSET(SRST_REF_PIPEPHY, 1, 7),
+ PERICRU_RESET_OFFSET(SRST_P_PIPEPHY, 1, 8),
+ PERICRU_RESET_OFFSET(SRST_P_PIPEPHY_GRF, 1, 9),
+ PERICRU_RESET_OFFSET(SRST_P_USB2PHY, 1, 10),
+ PERICRU_RESET_OFFSET(SRST_POR_USB2PHY, 1, 11),
+ PERICRU_RESET_OFFSET(SRST_OTG_USB2PHY, 1, 12),
+ PERICRU_RESET_OFFSET(SRST_HOST_USB2PHY, 1, 13),
+
+ /* CORECRU-->SOFTRST_CON00 */
+ CORECRU_RESET_OFFSET(SRST_REF_PVTPLL_CORE, 0, 0),
+ CORECRU_RESET_OFFSET(SRST_NCOREPORESET0, 0, 1),
+ CORECRU_RESET_OFFSET(SRST_NCORESET0, 0, 2),
+ CORECRU_RESET_OFFSET(SRST_NCOREPORESET1, 0, 3),
+ CORECRU_RESET_OFFSET(SRST_NCORESET1, 0, 4),
+ CORECRU_RESET_OFFSET(SRST_NCOREPORESET2, 0, 5),
+ CORECRU_RESET_OFFSET(SRST_NCORESET2, 0, 6),
+ CORECRU_RESET_OFFSET(SRST_NCOREPORESET3, 0, 7),
+ CORECRU_RESET_OFFSET(SRST_NCORESET3, 0, 8),
+ CORECRU_RESET_OFFSET(SRST_NDBGRESET, 0, 9),
+ CORECRU_RESET_OFFSET(SRST_NL2RESET, 0, 10),
+
+ /* CORECRU-->SOFTRST_CON01 */
+ CORECRU_RESET_OFFSET(SRST_A_CORE_BIU, 1, 0),
+ CORECRU_RESET_OFFSET(SRST_P_CORE_BIU, 1, 1),
+ CORECRU_RESET_OFFSET(SRST_H_CORE_BIU, 1, 2),
+ CORECRU_RESET_OFFSET(SRST_P_DBG, 1, 3),
+ CORECRU_RESET_OFFSET(SRST_POT_DBG, 1, 4),
+ CORECRU_RESET_OFFSET(SRST_NT_DBG, 1, 5),
+ CORECRU_RESET_OFFSET(SRST_P_CORE_PVTPLL, 1, 6),
+ CORECRU_RESET_OFFSET(SRST_P_CRU_CORE, 1, 7),
+ CORECRU_RESET_OFFSET(SRST_P_CORE_GRF, 1, 8),
+ CORECRU_RESET_OFFSET(SRST_P_DFT2APB, 1, 10),
+
+ /* PMUCRU-->SOFTRST_CON00 */
+ PMUCRU_RESET_OFFSET(SRST_H_PMU_BIU, 0, 0),
+ PMUCRU_RESET_OFFSET(SRST_P_PMU_GPIO0, 0, 7),
+ PMUCRU_RESET_OFFSET(SRST_DB_PMU_GPIO0, 0, 8),
+ PMUCRU_RESET_OFFSET(SRST_P_PMU_HP_TIMER, 0, 10),
+ PMUCRU_RESET_OFFSET(SRST_PMU_HP_TIMER, 0, 11),
+ PMUCRU_RESET_OFFSET(SRST_PMU_32K_HP_TIMER, 0, 12),
+
+ /* PMUCRU-->SOFTRST_CON01 */
+ PMUCRU_RESET_OFFSET(SRST_P_PWM1, 1, 0),
+ PMUCRU_RESET_OFFSET(SRST_PWM1, 1, 1),
+ PMUCRU_RESET_OFFSET(SRST_P_I2C2, 1, 2),
+ PMUCRU_RESET_OFFSET(SRST_I2C2, 1, 3),
+ PMUCRU_RESET_OFFSET(SRST_P_UART0, 1, 4),
+ PMUCRU_RESET_OFFSET(SRST_S_UART0, 1, 5),
+
+ /* PMUCRU-->SOFTRST_CON02 */
+ PMUCRU_RESET_OFFSET(SRST_P_RCOSC_CTRL, 2, 0),
+ PMUCRU_RESET_OFFSET(SRST_REF_RCOSC_CTRL, 2, 2),
+ PMUCRU_RESET_OFFSET(SRST_P_IOC_PMUIO0, 2, 3),
+ PMUCRU_RESET_OFFSET(SRST_P_CRU_PMU, 2, 4),
+ PMUCRU_RESET_OFFSET(SRST_P_PMU_GRF, 2, 5),
+ PMUCRU_RESET_OFFSET(SRST_PREROLL, 2, 7),
+ PMUCRU_RESET_OFFSET(SRST_PREROLL_32K, 2, 8),
+ PMUCRU_RESET_OFFSET(SRST_H_PMU_SRAM, 2, 9),
+
+ /* PMUCRU-->SOFTRST_CON03 */
+ PMUCRU_RESET_OFFSET(SRST_P_WDT_LPMCU, 3, 0),
+ PMUCRU_RESET_OFFSET(SRST_T_WDT_LPMCU, 3, 1),
+ PMUCRU_RESET_OFFSET(SRST_LPMCU_FULL_CLUSTER, 3, 2),
+ PMUCRU_RESET_OFFSET(SRST_LPMCU_PWUP, 3, 3),
+ PMUCRU_RESET_OFFSET(SRST_LPMCU_ONLY_CORE, 3, 4),
+ PMUCRU_RESET_OFFSET(SRST_T_LPMCU_JTAG, 3, 5),
+ PMUCRU_RESET_OFFSET(SRST_P_LPMCU_MAILBOX, 3, 6),
+
+ /* PMU1CRU-->SOFTRST_CON00 */
+ PMU1CRU_RESET_OFFSET(SRST_P_SPI2AHB, 0, 0),
+ PMU1CRU_RESET_OFFSET(SRST_H_SPI2AHB, 0, 1),
+ PMU1CRU_RESET_OFFSET(SRST_H_FSPI1, 0, 2),
+ PMU1CRU_RESET_OFFSET(SRST_H_XIP_FSPI1, 0, 3),
+ PMU1CRU_RESET_OFFSET(SRST_S_1X_FSPI1, 0, 4),
+ PMU1CRU_RESET_OFFSET(SRST_P_IOC_PMUIO1, 0, 5),
+ PMU1CRU_RESET_OFFSET(SRST_P_CRU_PMU1, 0, 6),
+ PMU1CRU_RESET_OFFSET(SRST_P_AUDIO_ADC_PMU, 0, 7),
+ PMU1CRU_RESET_OFFSET(SRST_M_AUDIO_ADC_PMU, 0, 8),
+ PMU1CRU_RESET_OFFSET(SRST_H_PMU1_BIU, 0, 9),
+
+ /* PMU1CRU-->SOFTRST_CON01 */
+ PMU1CRU_RESET_OFFSET(SRST_P_LPDMA, 1, 0),
+ PMU1CRU_RESET_OFFSET(SRST_A_LPDMA, 1, 1),
+ PMU1CRU_RESET_OFFSET(SRST_H_LPSAI, 1, 2),
+ PMU1CRU_RESET_OFFSET(SRST_M_LPSAI, 1, 3),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_TDD, 1, 4),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_FE, 1, 5),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_AAD, 1, 6),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_APB, 1, 7),
+ PMU1CRU_RESET_OFFSET(SRST_P_AOA_SRAM, 1, 8),
+
+ /* DDRCRU-->SOFTRST_CON00 */
+ DDRCRU_RESET_OFFSET(SRST_P_DDR_BIU, 0, 1),
+ DDRCRU_RESET_OFFSET(SRST_P_DDRC, 0, 2),
+ DDRCRU_RESET_OFFSET(SRST_P_DDRMON, 0, 3),
+ DDRCRU_RESET_OFFSET(SRST_TIMER_DDRMON, 0, 4),
+ DDRCRU_RESET_OFFSET(SRST_P_DFICTRL, 0, 5),
+ DDRCRU_RESET_OFFSET(SRST_P_DDR_GRF, 0, 6),
+ DDRCRU_RESET_OFFSET(SRST_P_CRU_DDR, 0, 7),
+ DDRCRU_RESET_OFFSET(SRST_P_DDRPHY, 0, 8),
+ DDRCRU_RESET_OFFSET(SRST_P_DMA2DDR, 0, 9),
+
+ /* SUBDDRCRU-->SOFTRST_CON00 */
+ SUBDDRCRU_RESET_OFFSET(SRST_A_SYSMEM_BIU, 0, 0),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_SYSMEM, 0, 1),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDR_BIU, 0, 2),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDRSCH0_CPU, 0, 3),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDRSCH1_NPU, 0, 4),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDRSCH2_POE, 0, 5),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DDRSCH3_VI, 0, 6),
+ SUBDDRCRU_RESET_OFFSET(SRST_CORE_DDRC, 0, 7),
+ SUBDDRCRU_RESET_OFFSET(SRST_DDRMON, 0, 8),
+ SUBDDRCRU_RESET_OFFSET(SRST_DFICTRL, 0, 9),
+ SUBDDRCRU_RESET_OFFSET(SRST_RS, 0, 11),
+ SUBDDRCRU_RESET_OFFSET(SRST_A_DMA2DDR, 0, 12),
+ SUBDDRCRU_RESET_OFFSET(SRST_DDRPHY, 0, 13),
+
+ /* VICRU-->SOFTRST_CON00 */
+ VICRU_RESET_OFFSET(SRST_REF_PVTPLL_ISP, 0, 0),
+ VICRU_RESET_OFFSET(SRST_A_GMAC_BIU, 0, 1),
+ VICRU_RESET_OFFSET(SRST_A_VI_BIU, 0, 2),
+ VICRU_RESET_OFFSET(SRST_H_VI_BIU, 0, 3),
+ VICRU_RESET_OFFSET(SRST_P_VI_BIU, 0, 4),
+ VICRU_RESET_OFFSET(SRST_P_CRU_VI, 0, 5),
+ VICRU_RESET_OFFSET(SRST_P_VI_GRF, 0, 6),
+ VICRU_RESET_OFFSET(SRST_P_VI_PVTPLL, 0, 7),
+ VICRU_RESET_OFFSET(SRST_P_DSMC, 0, 8),
+ VICRU_RESET_OFFSET(SRST_A_DSMC, 0, 9),
+ VICRU_RESET_OFFSET(SRST_H_CAN0, 0, 10),
+ VICRU_RESET_OFFSET(SRST_CAN0, 0, 11),
+ VICRU_RESET_OFFSET(SRST_H_CAN1, 0, 12),
+ VICRU_RESET_OFFSET(SRST_CAN1, 0, 13),
+
+ /* VICRU-->SOFTRST_CON01 */
+ VICRU_RESET_OFFSET(SRST_P_GPIO2, 1, 0),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO2, 1, 1),
+ VICRU_RESET_OFFSET(SRST_P_GPIO4, 1, 2),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO4, 1, 3),
+ VICRU_RESET_OFFSET(SRST_P_GPIO5, 1, 4),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO5, 1, 5),
+ VICRU_RESET_OFFSET(SRST_P_GPIO6, 1, 6),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO6, 1, 7),
+ VICRU_RESET_OFFSET(SRST_P_GPIO7, 1, 8),
+ VICRU_RESET_OFFSET(SRST_DB_GPIO7, 1, 9),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO2, 1, 10),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO4, 1, 11),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO5, 1, 12),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO6, 1, 13),
+ VICRU_RESET_OFFSET(SRST_P_IOC_VCCIO7, 1, 14),
+
+ /* VICRU-->SOFTRST_CON02 */
+ VICRU_RESET_OFFSET(SRST_CORE_ISP, 2, 0),
+ VICRU_RESET_OFFSET(SRST_H_VICAP, 2, 1),
+ VICRU_RESET_OFFSET(SRST_A_VICAP, 2, 2),
+ VICRU_RESET_OFFSET(SRST_D_VICAP, 2, 3),
+ VICRU_RESET_OFFSET(SRST_ISP0_VICAP, 2, 4),
+ VICRU_RESET_OFFSET(SRST_CORE_VPSS, 2, 5),
+ VICRU_RESET_OFFSET(SRST_CORE_VPSL, 2, 6),
+ VICRU_RESET_OFFSET(SRST_P_CSI2HOST0, 2, 7),
+ VICRU_RESET_OFFSET(SRST_P_CSI2HOST1, 2, 8),
+ VICRU_RESET_OFFSET(SRST_P_CSI2HOST2, 2, 9),
+ VICRU_RESET_OFFSET(SRST_P_CSI2HOST3, 2, 10),
+ VICRU_RESET_OFFSET(SRST_H_SDMMC0, 2, 11),
+ VICRU_RESET_OFFSET(SRST_A_GMAC, 2, 12),
+ VICRU_RESET_OFFSET(SRST_P_CSIPHY0, 2, 13),
+ VICRU_RESET_OFFSET(SRST_P_CSIPHY1, 2, 14),
+
+ /* VICRU-->SOFTRST_CON03 */
+ VICRU_RESET_OFFSET(SRST_P_MACPHY, 3, 0),
+ VICRU_RESET_OFFSET(SRST_MACPHY, 3, 1),
+ VICRU_RESET_OFFSET(SRST_P_SARADC1, 3, 2),
+ VICRU_RESET_OFFSET(SRST_SARADC1, 3, 3),
+ VICRU_RESET_OFFSET(SRST_P_SARADC2, 3, 5),
+ VICRU_RESET_OFFSET(SRST_SARADC2, 3, 6),
+
+ /* VEPUCRU-->SOFTRST_CON00 */
+ VEPUCRU_RESET_OFFSET(SRST_REF_PVTPLL_VEPU, 0, 0),
+ VEPUCRU_RESET_OFFSET(SRST_A_VEPU_BIU, 0, 1),
+ VEPUCRU_RESET_OFFSET(SRST_H_VEPU_BIU, 0, 2),
+ VEPUCRU_RESET_OFFSET(SRST_P_VEPU_BIU, 0, 3),
+ VEPUCRU_RESET_OFFSET(SRST_P_CRU_VEPU, 0, 4),
+ VEPUCRU_RESET_OFFSET(SRST_P_VEPU_GRF, 0, 5),
+ VEPUCRU_RESET_OFFSET(SRST_P_GPIO3, 0, 7),
+ VEPUCRU_RESET_OFFSET(SRST_DB_GPIO3, 0, 8),
+ VEPUCRU_RESET_OFFSET(SRST_P_IOC_VCCIO3, 0, 9),
+ VEPUCRU_RESET_OFFSET(SRST_P_SARADC0, 0, 10),
+ VEPUCRU_RESET_OFFSET(SRST_SARADC0, 0, 11),
+ VEPUCRU_RESET_OFFSET(SRST_H_SDMMC1, 0, 13),
+
+ /* VEPUCRU-->SOFTRST_CON01 */
+ VEPUCRU_RESET_OFFSET(SRST_P_VEPU_PVTPLL, 1, 0),
+ VEPUCRU_RESET_OFFSET(SRST_H_VEPU, 1, 1),
+ VEPUCRU_RESET_OFFSET(SRST_A_VEPU, 1, 2),
+ VEPUCRU_RESET_OFFSET(SRST_CORE_VEPU, 1, 3),
+
+ /* NPUCRU-->SOFTRST_CON00 */
+ NPUCRU_RESET_OFFSET(SRST_REF_PVTPLL_NPU, 0, 0),
+ NPUCRU_RESET_OFFSET(SRST_A_NPU_BIU, 0, 2),
+ NPUCRU_RESET_OFFSET(SRST_H_NPU_BIU, 0, 3),
+ NPUCRU_RESET_OFFSET(SRST_P_NPU_BIU, 0, 4),
+ NPUCRU_RESET_OFFSET(SRST_P_CRU_NPU, 0, 5),
+ NPUCRU_RESET_OFFSET(SRST_P_NPU_GRF, 0, 6),
+ NPUCRU_RESET_OFFSET(SRST_P_NPU_PVTPLL, 0, 8),
+ NPUCRU_RESET_OFFSET(SRST_H_RKNN, 0, 9),
+ NPUCRU_RESET_OFFSET(SRST_A_RKNN, 0, 10),
+
+ /* VDOCRU-->SOFTRST_CON00 */
+ VDOCRU_RESET_OFFSET(SRST_A_RKVDEC_BIU, 0, 0),
+ VDOCRU_RESET_OFFSET(SRST_A_VDO_BIU, 0, 1),
+ VDOCRU_RESET_OFFSET(SRST_H_VDO_BIU, 0, 3),
+ VDOCRU_RESET_OFFSET(SRST_P_VDO_BIU, 0, 4),
+ VDOCRU_RESET_OFFSET(SRST_P_CRU_VDO, 0, 5),
+ VDOCRU_RESET_OFFSET(SRST_P_VDO_GRF, 0, 6),
+ VDOCRU_RESET_OFFSET(SRST_A_RKVDEC, 0, 7),
+ VDOCRU_RESET_OFFSET(SRST_H_RKVDEC, 0, 8),
+ VDOCRU_RESET_OFFSET(SRST_HEVC_CA_RKVDEC, 0, 9),
+ VDOCRU_RESET_OFFSET(SRST_A_VOP, 0, 10),
+ VDOCRU_RESET_OFFSET(SRST_H_VOP, 0, 11),
+ VDOCRU_RESET_OFFSET(SRST_D_VOP, 0, 12),
+ VDOCRU_RESET_OFFSET(SRST_A_OOC, 0, 13),
+ VDOCRU_RESET_OFFSET(SRST_H_OOC, 0, 14),
+ VDOCRU_RESET_OFFSET(SRST_D_OOC, 0, 15),
+
+ /* VDOCRU-->SOFTRST_CON01 */
+ VDOCRU_RESET_OFFSET(SRST_H_RKJPEG, 1, 3),
+ VDOCRU_RESET_OFFSET(SRST_A_RKJPEG, 1, 4),
+ VDOCRU_RESET_OFFSET(SRST_A_RKMMU_DECOM, 1, 5),
+ VDOCRU_RESET_OFFSET(SRST_H_RKMMU_DECOM, 1, 6),
+ VDOCRU_RESET_OFFSET(SRST_D_DECOM, 1, 8),
+ VDOCRU_RESET_OFFSET(SRST_A_DECOM, 1, 9),
+ VDOCRU_RESET_OFFSET(SRST_P_DECOM, 1, 10),
+ VDOCRU_RESET_OFFSET(SRST_P_MIPI_DSI, 1, 12),
+ VDOCRU_RESET_OFFSET(SRST_P_DSIPHY, 1, 13),
+
+ /* VCPCRU-->SOFTRST_CON00 */
+ VCPCRU_RESET_OFFSET(SRST_REF_PVTPLL_VCP, 0, 0),
+ VCPCRU_RESET_OFFSET(SRST_A_VCP_BIU, 0, 1),
+ VCPCRU_RESET_OFFSET(SRST_H_VCP_BIU, 0, 2),
+ VCPCRU_RESET_OFFSET(SRST_P_VCP_BIU, 0, 3),
+ VCPCRU_RESET_OFFSET(SRST_P_CRU_VCP, 0, 4),
+ VCPCRU_RESET_OFFSET(SRST_P_VCP_GRF, 0, 5),
+ VCPCRU_RESET_OFFSET(SRST_P_VCP_PVTPLL, 0, 7),
+ VCPCRU_RESET_OFFSET(SRST_A_AISP_BIU, 0, 8),
+ VCPCRU_RESET_OFFSET(SRST_H_AISP_BIU, 0, 9),
+ VCPCRU_RESET_OFFSET(SRST_CORE_AISP, 0, 13),
+
+ /* VCPCRU-->SOFTRST_CON01 */
+ VCPCRU_RESET_OFFSET(SRST_H_FEC, 1, 0),
+ VCPCRU_RESET_OFFSET(SRST_A_FEC, 1, 1),
+ VCPCRU_RESET_OFFSET(SRST_CORE_FEC, 1, 2),
+ VCPCRU_RESET_OFFSET(SRST_H_AVSP, 1, 3),
+ VCPCRU_RESET_OFFSET(SRST_A_AVSP, 1, 4),
+};
+
+void rv1126b_rst_init(struct device_node *np, void __iomem *reg_base)
+{
+ rockchip_register_softrst_lut(np,
+ rv1126b_register_offset,
+ ARRAY_SIZE(rv1126b_register_offset),
+ reg_base + RV1126B_SOFTRST_CON(0),
+ ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 76a494e95027..70a8b82a0136 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -95,6 +95,16 @@ config EXYNOS_CLKOUT
status of the certains clocks from SoC, but it could also be tied to
other devices as an input clock.
+config EXYNOS_ACPM_CLK
+ tristate "Clock driver controlled via ACPM interface"
+ depends on EXYNOS_ACPM_PROTOCOL || (COMPILE_TEST && !EXYNOS_ACPM_PROTOCOL)
+ help
+ This driver provides support for clocks that are controlled by
+ firmware that implements the ACPM interface.
+
+ This driver uses the ACPM interface to interact with the firmware
+ providing all the clock controlls.
+
config TESLA_FSD_COMMON_CLK
bool "Tesla FSD clock controller support" if COMPILE_TEST
depends on COMMON_CLK_SAMSUNG
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index b77fe288e4bb..f3657f2e1b98 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_EXYNOS_5260_COMMON_CLK) += clk-exynos5260.o
obj-$(CONFIG_EXYNOS_5410_COMMON_CLK) += clk-exynos5410.o
obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5420.o
obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5-subcmu.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-artpec8.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
@@ -27,6 +28,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos990.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov920.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-gs101.o
+obj-$(CONFIG_EXYNOS_ACPM_CLK) += clk-acpm.o
obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
obj-$(CONFIG_TESLA_FSD_COMMON_CLK) += clk-fsd.o
diff --git a/drivers/clk/samsung/clk-acpm.c b/drivers/clk/samsung/clk-acpm.c
new file mode 100644
index 000000000000..b90809ce3f88
--- /dev/null
+++ b/drivers/clk/samsung/clk-acpm.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung Exynos ACPM protocol based clock driver.
+ *
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#include <linux/array_size.h>
+#include <linux/clk-provider.h>
+#include <linux/container_of.h>
+#include <linux/device/devres.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct acpm_clk {
+ u32 id;
+ struct clk_hw hw;
+ unsigned int mbox_chan_id;
+ const struct acpm_handle *handle;
+};
+
+struct acpm_clk_variant {
+ const char *name;
+};
+
+struct acpm_clk_driver_data {
+ const struct acpm_clk_variant *clks;
+ unsigned int nr_clks;
+ unsigned int mbox_chan_id;
+};
+
+#define to_acpm_clk(clk) container_of(clk, struct acpm_clk, hw)
+
+#define ACPM_CLK(cname) \
+ { \
+ .name = cname, \
+ }
+
+static const struct acpm_clk_variant gs101_acpm_clks[] = {
+ ACPM_CLK("mif"),
+ ACPM_CLK("int"),
+ ACPM_CLK("cpucl0"),
+ ACPM_CLK("cpucl1"),
+ ACPM_CLK("cpucl2"),
+ ACPM_CLK("g3d"),
+ ACPM_CLK("g3dl2"),
+ ACPM_CLK("tpu"),
+ ACPM_CLK("intcam"),
+ ACPM_CLK("tnr"),
+ ACPM_CLK("cam"),
+ ACPM_CLK("mfc"),
+ ACPM_CLK("disp"),
+ ACPM_CLK("bo"),
+};
+
+static const struct acpm_clk_driver_data acpm_clk_gs101 = {
+ .clks = gs101_acpm_clks,
+ .nr_clks = ARRAY_SIZE(gs101_acpm_clks),
+ .mbox_chan_id = 0,
+};
+
+static unsigned long acpm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct acpm_clk *clk = to_acpm_clk(hw);
+
+ return clk->handle->ops.dvfs_ops.get_rate(clk->handle,
+ clk->mbox_chan_id, clk->id);
+}
+
+static int acpm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ /*
+ * We can't figure out what rate it will be, so just return the
+ * rate back to the caller. acpm_clk_recalc_rate() will be called
+ * after the rate is set and we'll know what rate the clock is
+ * running at then.
+ */
+ return 0;
+}
+
+static int acpm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct acpm_clk *clk = to_acpm_clk(hw);
+
+ return clk->handle->ops.dvfs_ops.set_rate(clk->handle,
+ clk->mbox_chan_id, clk->id, rate);
+}
+
+static const struct clk_ops acpm_clk_ops = {
+ .recalc_rate = acpm_clk_recalc_rate,
+ .determine_rate = acpm_clk_determine_rate,
+ .set_rate = acpm_clk_set_rate,
+};
+
+static int acpm_clk_register(struct device *dev, struct acpm_clk *aclk,
+ const char *name)
+{
+ struct clk_init_data init = {};
+
+ init.name = name;
+ init.ops = &acpm_clk_ops;
+ aclk->hw.init = &init;
+
+ return devm_clk_hw_register(dev, &aclk->hw);
+}
+
+static int acpm_clk_probe(struct platform_device *pdev)
+{
+ const struct acpm_handle *acpm_handle;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **hws;
+ struct device *dev = &pdev->dev;
+ struct acpm_clk *aclks;
+ unsigned int mbox_chan_id;
+ int i, err, count;
+
+ acpm_handle = devm_acpm_get_by_node(dev, dev->parent->of_node);
+ if (IS_ERR(acpm_handle))
+ return dev_err_probe(dev, PTR_ERR(acpm_handle),
+ "Failed to get acpm handle\n");
+
+ count = acpm_clk_gs101.nr_clks;
+ mbox_chan_id = acpm_clk_gs101.mbox_chan_id;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, count),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = count;
+ hws = clk_data->hws;
+
+ aclks = devm_kcalloc(dev, count, sizeof(*aclks), GFP_KERNEL);
+ if (!aclks)
+ return -ENOMEM;
+
+ for (i = 0; i < count; i++) {
+ struct acpm_clk *aclk = &aclks[i];
+
+ /*
+ * The code assumes the clock IDs start from zero,
+ * are sequential and do not have gaps.
+ */
+ aclk->id = i;
+ aclk->handle = acpm_handle;
+ aclk->mbox_chan_id = mbox_chan_id;
+
+ hws[i] = &aclk->hw;
+
+ err = acpm_clk_register(dev, aclk,
+ acpm_clk_gs101.clks[i].name);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to register clock\n");
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ clk_data);
+}
+
+static const struct platform_device_id acpm_clk_id[] = {
+ { "gs101-acpm-clk" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, acpm_clk_id);
+
+static struct platform_driver acpm_clk_driver = {
+ .driver = {
+ .name = "acpm-clocks",
+ },
+ .probe = acpm_clk_probe,
+ .id_table = acpm_clk_id,
+};
+module_platform_driver(acpm_clk_driver);
+
+MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@linaro.org>");
+MODULE_DESCRIPTION("Samsung Exynos ACPM clock driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/samsung/clk-artpec8.c b/drivers/clk/samsung/clk-artpec8.c
new file mode 100644
index 000000000000..0ea7c8b58674
--- /dev/null
+++ b/drivers/clk/samsung/clk-artpec8.c
@@ -0,0 +1,1044 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Samsung Electronics Co., Ltd.
+ * https://www.samsung.com
+ * Copyright (c) 2025 Axis Communications AB.
+ * https://www.axis.com
+ *
+ * Common Clock Framework support for ARTPEC-8 SoC.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/axis,artpec8-clk.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* NOTE: Must be equal to the last clock ID increased by one */
+#define CMU_CMU_NR_CLK (CLK_DOUT_CMU_VPP_CORE + 1)
+#define CMU_BUS_NR_CLK (CLK_DOUT_BUS_PCLK + 1)
+#define CMU_CORE_NR_CLK (CLK_DOUT_CORE_PCLK + 1)
+#define CMU_CPUCL_NR_CLK (CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK + 1)
+#define CMU_FSYS_NR_CLK (CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK + 1)
+#define CMU_IMEM_NR_CLK (CLK_GOUT_IMEM_PCLK_TMU0_APBIF + 1)
+#define CMU_PERI_NR_CLK (CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK + 1)
+
+/* Register Offset definitions for CMU_CMU (0x12400000) */
+#define PLL_LOCKTIME_PLL_AUDIO 0x0000
+#define PLL_LOCKTIME_PLL_SHARED0 0x0004
+#define PLL_LOCKTIME_PLL_SHARED1 0x0008
+#define PLL_CON0_PLL_AUDIO 0x0100
+#define PLL_CON0_PLL_SHARED0 0x0120
+#define PLL_CON0_PLL_SHARED1 0x0140
+#define CLK_CON_MUX_CLKCMU_2D 0x1000
+#define CLK_CON_MUX_CLKCMU_3D 0x1004
+#define CLK_CON_MUX_CLKCMU_BUS 0x1008
+#define CLK_CON_MUX_CLKCMU_BUS_DLP 0x100c
+#define CLK_CON_MUX_CLKCMU_CDC_CORE 0x1010
+#define CLK_CON_MUX_CLKCMU_FSYS_SCAN0 0x1014
+#define CLK_CON_MUX_CLKCMU_FSYS_SCAN1 0x1018
+#define CLK_CON_MUX_CLKCMU_IMEM_JPEG 0x101c
+#define CLK_CON_MUX_CLKCMU_PERI_DISP 0x1020
+#define CLK_CON_MUX_CLKCMU_CORE_BUS 0x1024
+#define CLK_CON_MUX_CLKCMU_CORE_DLP 0x1028
+#define CLK_CON_MUX_CLKCMU_CPUCL_SWITCH 0x1030
+#define CLK_CON_MUX_CLKCMU_DLP_CORE 0x1034
+#define CLK_CON_MUX_CLKCMU_FSYS_BUS 0x1038
+#define CLK_CON_MUX_CLKCMU_FSYS_IP 0x103c
+#define CLK_CON_MUX_CLKCMU_IMEM_ACLK 0x1054
+#define CLK_CON_MUX_CLKCMU_MIF_BUSP 0x1080
+#define CLK_CON_MUX_CLKCMU_MIF_SWITCH 0x1084
+#define CLK_CON_MUX_CLKCMU_PERI_IP 0x1088
+#define CLK_CON_MUX_CLKCMU_RSP_CORE 0x108c
+#define CLK_CON_MUX_CLKCMU_TRFM_CORE 0x1090
+#define CLK_CON_MUX_CLKCMU_VCA_ACE 0x1094
+#define CLK_CON_MUX_CLKCMU_VCA_OD 0x1098
+#define CLK_CON_MUX_CLKCMU_VIO_CORE 0x109c
+#define CLK_CON_MUX_CLKCMU_VIP0_CORE 0x10a0
+#define CLK_CON_MUX_CLKCMU_VIP1_CORE 0x10a4
+#define CLK_CON_MUX_CLKCMU_VPP_CORE 0x10a8
+
+#define CLK_CON_DIV_CLKCMU_BUS 0x1800
+#define CLK_CON_DIV_CLKCMU_BUS_DLP 0x1804
+#define CLK_CON_DIV_CLKCMU_CDC_CORE 0x1808
+#define CLK_CON_DIV_CLKCMU_FSYS_SCAN0 0x180c
+#define CLK_CON_DIV_CLKCMU_FSYS_SCAN1 0x1810
+#define CLK_CON_DIV_CLKCMU_IMEM_JPEG 0x1814
+#define CLK_CON_DIV_CLKCMU_MIF_SWITCH 0x1818
+#define CLK_CON_DIV_CLKCMU_CORE_DLP 0x181c
+#define CLK_CON_DIV_CLKCMU_CORE_MAIN 0x1820
+#define CLK_CON_DIV_CLKCMU_PERI_DISP 0x1824
+#define CLK_CON_DIV_CLKCMU_CPUCL_SWITCH 0x1828
+#define CLK_CON_DIV_CLKCMU_DLP_CORE 0x182c
+#define CLK_CON_DIV_CLKCMU_FSYS_BUS 0x1830
+#define CLK_CON_DIV_CLKCMU_FSYS_IP 0x1834
+#define CLK_CON_DIV_CLKCMU_VIO_AUDIO 0x1838
+#define CLK_CON_DIV_CLKCMU_GPU_2D 0x1848
+#define CLK_CON_DIV_CLKCMU_GPU_3D 0x184c
+#define CLK_CON_DIV_CLKCMU_IMEM_ACLK 0x1854
+#define CLK_CON_DIV_CLKCMU_MIF_BUSP 0x1884
+#define CLK_CON_DIV_CLKCMU_PERI_AUDIO 0x1890
+#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1894
+#define CLK_CON_DIV_CLKCMU_RSP_CORE 0x1898
+#define CLK_CON_DIV_CLKCMU_TRFM_CORE 0x189c
+#define CLK_CON_DIV_CLKCMU_VCA_ACE 0x18a0
+#define CLK_CON_DIV_CLKCMU_VCA_OD 0x18a4
+#define CLK_CON_DIV_CLKCMU_VIO_CORE 0x18ac
+#define CLK_CON_DIV_CLKCMU_VIP0_CORE 0x18b0
+#define CLK_CON_DIV_CLKCMU_VIP1_CORE 0x18b4
+#define CLK_CON_DIV_CLKCMU_VPP_CORE 0x18b8
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18bc
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18c0
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18c4
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x18c8
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x18cc
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18d0
+
+static const unsigned long cmu_cmu_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_AUDIO,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_AUDIO,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ CLK_CON_MUX_CLKCMU_2D,
+ CLK_CON_MUX_CLKCMU_3D,
+ CLK_CON_MUX_CLKCMU_BUS,
+ CLK_CON_MUX_CLKCMU_BUS_DLP,
+ CLK_CON_MUX_CLKCMU_CDC_CORE,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN0,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN1,
+ CLK_CON_MUX_CLKCMU_IMEM_JPEG,
+ CLK_CON_MUX_CLKCMU_PERI_DISP,
+ CLK_CON_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_CLKCMU_CORE_DLP,
+ CLK_CON_MUX_CLKCMU_CPUCL_SWITCH,
+ CLK_CON_MUX_CLKCMU_DLP_CORE,
+ CLK_CON_MUX_CLKCMU_FSYS_BUS,
+ CLK_CON_MUX_CLKCMU_FSYS_IP,
+ CLK_CON_MUX_CLKCMU_IMEM_ACLK,
+ CLK_CON_MUX_CLKCMU_MIF_BUSP,
+ CLK_CON_MUX_CLKCMU_MIF_SWITCH,
+ CLK_CON_MUX_CLKCMU_PERI_IP,
+ CLK_CON_MUX_CLKCMU_RSP_CORE,
+ CLK_CON_MUX_CLKCMU_TRFM_CORE,
+ CLK_CON_MUX_CLKCMU_VCA_ACE,
+ CLK_CON_MUX_CLKCMU_VCA_OD,
+ CLK_CON_MUX_CLKCMU_VIO_CORE,
+ CLK_CON_MUX_CLKCMU_VIP0_CORE,
+ CLK_CON_MUX_CLKCMU_VIP1_CORE,
+ CLK_CON_MUX_CLKCMU_VPP_CORE,
+ CLK_CON_DIV_CLKCMU_BUS,
+ CLK_CON_DIV_CLKCMU_BUS_DLP,
+ CLK_CON_DIV_CLKCMU_CDC_CORE,
+ CLK_CON_DIV_CLKCMU_FSYS_SCAN0,
+ CLK_CON_DIV_CLKCMU_FSYS_SCAN1,
+ CLK_CON_DIV_CLKCMU_IMEM_JPEG,
+ CLK_CON_DIV_CLKCMU_MIF_SWITCH,
+ CLK_CON_DIV_CLKCMU_CORE_DLP,
+ CLK_CON_DIV_CLKCMU_CORE_MAIN,
+ CLK_CON_DIV_CLKCMU_PERI_DISP,
+ CLK_CON_DIV_CLKCMU_CPUCL_SWITCH,
+ CLK_CON_DIV_CLKCMU_DLP_CORE,
+ CLK_CON_DIV_CLKCMU_FSYS_BUS,
+ CLK_CON_DIV_CLKCMU_FSYS_IP,
+ CLK_CON_DIV_CLKCMU_VIO_AUDIO,
+ CLK_CON_DIV_CLKCMU_GPU_2D,
+ CLK_CON_DIV_CLKCMU_GPU_3D,
+ CLK_CON_DIV_CLKCMU_IMEM_ACLK,
+ CLK_CON_DIV_CLKCMU_MIF_BUSP,
+ CLK_CON_DIV_CLKCMU_PERI_AUDIO,
+ CLK_CON_DIV_CLKCMU_PERI_IP,
+ CLK_CON_DIV_CLKCMU_RSP_CORE,
+ CLK_CON_DIV_CLKCMU_TRFM_CORE,
+ CLK_CON_DIV_CLKCMU_VCA_ACE,
+ CLK_CON_DIV_CLKCMU_VCA_OD,
+ CLK_CON_DIV_CLKCMU_VIO_CORE,
+ CLK_CON_DIV_CLKCMU_VIP0_CORE,
+ CLK_CON_DIV_CLKCMU_VIP1_CORE,
+ CLK_CON_DIV_CLKCMU_VPP_CORE,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+};
+
+static const struct samsung_pll_rate_table artpec8_pll_audio_rates[] __initconst = {
+ PLL_36XX_RATE(25 * MHZ, 589823913U, 47, 1, 1, 12184),
+ PLL_36XX_RATE(25 * MHZ, 393215942U, 47, 3, 0, 12184),
+ PLL_36XX_RATE(25 * MHZ, 294911956U, 47, 1, 2, 12184),
+ PLL_36XX_RATE(25 * MHZ, 100000000U, 32, 2, 2, 0),
+ PLL_36XX_RATE(25 * MHZ, 98303985U, 47, 3, 2, 12184),
+ PLL_36XX_RATE(25 * MHZ, 49151992U, 47, 3, 3, 12184),
+};
+
+static const struct samsung_pll_clock cmu_cmu_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_SHARED0_PLL, "fout_pll_shared0", "fin_pll",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0, NULL),
+ PLL(pll_1017x, CLK_FOUT_SHARED1_PLL, "fout_pll_shared1", "fin_pll",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1, NULL),
+ PLL(pll_1031x, CLK_FOUT_AUDIO_PLL, "fout_pll_audio", "fin_pll",
+ PLL_LOCKTIME_PLL_AUDIO, PLL_CON0_PLL_AUDIO, artpec8_pll_audio_rates),
+};
+
+PNAME(mout_clkcmu_bus_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_bus_dlp_p) = { "dout_pll_shared0_div2", "dout_pll_shared0_div4",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_core_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared0_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_core_dlp_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_cpucl_switch_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_fsys_bus_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_fsys_ip_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div3",
+ "dout_pll_shared1_div2", "dout_pll_shared0_div3" };
+PNAME(mout_clkcmu_fsys_scan0_p) = { "dout_pll_shared0_div4", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_fsys_scan1_p) = { "dout_pll_shared0_div4", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_imem_imem_p) = { "dout_pll_shared1_div4", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div2" };
+PNAME(mout_clkcmu_imem_jpeg_p) = { "dout_pll_shared0_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_cdc_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_dlp_core_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_3d_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_2d_p) = { "dout_pll_shared0_div2", "dout_pll_shared1_div2",
+ "dout_pll_shared0_div3", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_mif_switch_p) = { "dout_pll_shared0", "dout_pll_shared1",
+ "dout_pll_shared0_div2", "dout_pll_shared0_div3" };
+PNAME(mout_clkcmu_mif_busp_p) = { "dout_pll_shared0_div3", "dout_pll_shared1_div4",
+ "dout_pll_shared0_div4", "dout_pll_shared0_div2" };
+PNAME(mout_clkcmu_peri_disp_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div4", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_peri_ip_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div4",
+ "dout_pll_shared1_div4", "dout_pll_shared0_div2" };
+PNAME(mout_clkcmu_rsp_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_trfm_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vca_ace_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vca_od_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vio_core_p) = { "dout_pll_shared0_div3", "dout_pll_shared0_div2",
+ "dout_pll_shared1_div2", "dout_pll_shared1_div3" };
+PNAME(mout_clkcmu_vip0_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vip1_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_vpp_core_p) = { "dout_pll_shared1_div2", "dout_pll_shared0_div3",
+ "dout_pll_shared1_div3", "dout_pll_shared1_div4" };
+PNAME(mout_clkcmu_pll_shared0_p) = { "fin_pll", "fout_pll_shared0" };
+PNAME(mout_clkcmu_pll_shared1_p) = { "fin_pll", "fout_pll_shared1" };
+PNAME(mout_clkcmu_pll_audio_p) = { "fin_pll", "fout_pll_audio" };
+
+static const struct samsung_fixed_factor_clock cmu_fixed_factor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_clkcmu_otp", "fin_pll", 1, 8, 0),
+};
+
+static const struct samsung_mux_clock cmu_cmu_mux_clks[] __initconst = {
+ MUX(0, "mout_clkcmu_pll_shared0", mout_clkcmu_pll_shared0_p, PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(0, "mout_clkcmu_pll_shared1", mout_clkcmu_pll_shared1_p, PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(0, "mout_clkcmu_pll_audio", mout_clkcmu_pll_audio_p, PLL_CON0_PLL_AUDIO, 4, 1),
+ MUX(0, "mout_clkcmu_bus_bus", mout_clkcmu_bus_bus_p, CLK_CON_MUX_CLKCMU_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_bus_dlp", mout_clkcmu_bus_dlp_p, CLK_CON_MUX_CLKCMU_BUS_DLP, 0, 2),
+ MUX(0, "mout_clkcmu_core_bus", mout_clkcmu_core_bus_p, CLK_CON_MUX_CLKCMU_CORE_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_core_dlp", mout_clkcmu_core_dlp_p, CLK_CON_MUX_CLKCMU_CORE_DLP, 0, 2),
+ MUX(0, "mout_clkcmu_cpucl_switch", mout_clkcmu_cpucl_switch_p,
+ CLK_CON_MUX_CLKCMU_CPUCL_SWITCH, 0, 3),
+ MUX(0, "mout_clkcmu_fsys_bus", mout_clkcmu_fsys_bus_p, CLK_CON_MUX_CLKCMU_FSYS_BUS, 0, 2),
+ MUX(0, "mout_clkcmu_fsys_ip", mout_clkcmu_fsys_ip_p, CLK_CON_MUX_CLKCMU_FSYS_IP, 0, 2),
+ MUX(0, "mout_clkcmu_fsys_scan0", mout_clkcmu_fsys_scan0_p,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN0, 0, 1),
+ MUX(0, "mout_clkcmu_fsys_scan1", mout_clkcmu_fsys_scan1_p,
+ CLK_CON_MUX_CLKCMU_FSYS_SCAN1, 0, 1),
+ MUX(0, "mout_clkcmu_imem_imem", mout_clkcmu_imem_imem_p,
+ CLK_CON_MUX_CLKCMU_IMEM_ACLK, 0, 2),
+ MUX(0, "mout_clkcmu_imem_jpeg", mout_clkcmu_imem_jpeg_p,
+ CLK_CON_MUX_CLKCMU_IMEM_JPEG, 0, 2),
+ nMUX(0, "mout_clkcmu_cdc_core", mout_clkcmu_cdc_core_p, CLK_CON_MUX_CLKCMU_CDC_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_dlp_core", mout_clkcmu_dlp_core_p, CLK_CON_MUX_CLKCMU_DLP_CORE, 0, 2),
+ MUX(0, "mout_clkcmu_3d", mout_clkcmu_3d_p, CLK_CON_MUX_CLKCMU_3D, 0, 2),
+ MUX(0, "mout_clkcmu_2d", mout_clkcmu_2d_p, CLK_CON_MUX_CLKCMU_2D, 0, 2),
+ MUX(0, "mout_clkcmu_mif_switch", mout_clkcmu_mif_switch_p,
+ CLK_CON_MUX_CLKCMU_MIF_SWITCH, 0, 2),
+ MUX(0, "mout_clkcmu_mif_busp", mout_clkcmu_mif_busp_p, CLK_CON_MUX_CLKCMU_MIF_BUSP, 0, 2),
+ MUX(0, "mout_clkcmu_peri_disp", mout_clkcmu_peri_disp_p,
+ CLK_CON_MUX_CLKCMU_PERI_DISP, 0, 2),
+ MUX(0, "mout_clkcmu_peri_ip", mout_clkcmu_peri_ip_p, CLK_CON_MUX_CLKCMU_PERI_IP, 0, 2),
+ MUX(0, "mout_clkcmu_rsp_core", mout_clkcmu_rsp_core_p, CLK_CON_MUX_CLKCMU_RSP_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_trfm_core", mout_clkcmu_trfm_core_p,
+ CLK_CON_MUX_CLKCMU_TRFM_CORE, 0, 2),
+ MUX(0, "mout_clkcmu_vca_ace", mout_clkcmu_vca_ace_p, CLK_CON_MUX_CLKCMU_VCA_ACE, 0, 2),
+ MUX(0, "mout_clkcmu_vca_od", mout_clkcmu_vca_od_p, CLK_CON_MUX_CLKCMU_VCA_OD, 0, 2),
+ MUX(0, "mout_clkcmu_vio_core", mout_clkcmu_vio_core_p, CLK_CON_MUX_CLKCMU_VIO_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vip0_core", mout_clkcmu_vip0_core_p,
+ CLK_CON_MUX_CLKCMU_VIP0_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vip1_core", mout_clkcmu_vip1_core_p,
+ CLK_CON_MUX_CLKCMU_VIP1_CORE, 0, 2),
+ nMUX(0, "mout_clkcmu_vpp_core", mout_clkcmu_vpp_core_p, CLK_CON_MUX_CLKCMU_VPP_CORE, 0, 2),
+};
+
+static const struct samsung_div_clock cmu_cmu_div_clks[] __initconst = {
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_pll_shared0_div2",
+ "mout_clkcmu_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_pll_shared0_div3",
+ "mout_clkcmu_pll_shared0", CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_pll_shared0_div4",
+ "dout_pll_shared0_div2", CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_pll_shared1_div2",
+ "mout_clkcmu_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_pll_shared1_div3",
+ "mout_clkcmu_pll_shared1", CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_pll_shared1_div4",
+ "dout_pll_shared1_div2", CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+ DIV(CLK_DOUT_CMU_BUS, "dout_clkcmu_bus",
+ "mout_clkcmu_bus_bus", CLK_CON_DIV_CLKCMU_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_BUS_DLP, "dout_clkcmu_bus_dlp",
+ "mout_clkcmu_bus_dlp", CLK_CON_DIV_CLKCMU_BUS_DLP, 0, 4),
+ DIV(CLK_DOUT_CMU_CORE_MAIN, "dout_clkcmu_core_main",
+ "mout_clkcmu_core_bus", CLK_CON_DIV_CLKCMU_CORE_MAIN, 0, 4),
+ DIV(CLK_DOUT_CMU_CORE_DLP, "dout_clkcmu_core_dlp",
+ "mout_clkcmu_core_dlp", CLK_CON_DIV_CLKCMU_CORE_DLP, 0, 4),
+ DIV(CLK_DOUT_CMU_CPUCL_SWITCH, "dout_clkcmu_cpucl_switch",
+ "mout_clkcmu_cpucl_switch", CLK_CON_DIV_CLKCMU_CPUCL_SWITCH, 0, 3),
+ DIV(CLK_DOUT_CMU_FSYS_BUS, "dout_clkcmu_fsys_bus",
+ "mout_clkcmu_fsys_bus", CLK_CON_DIV_CLKCMU_FSYS_BUS, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS_IP, "dout_clkcmu_fsys_ip",
+ "mout_clkcmu_fsys_ip", CLK_CON_DIV_CLKCMU_FSYS_IP, 0, 9),
+ DIV(CLK_DOUT_CMU_FSYS_SCAN0, "dout_clkcmu_fsys_scan0",
+ "mout_clkcmu_fsys_scan0", CLK_CON_DIV_CLKCMU_FSYS_SCAN0, 0, 4),
+ DIV(CLK_DOUT_CMU_FSYS_SCAN1, "dout_clkcmu_fsys_scan1",
+ "mout_clkcmu_fsys_scan1", CLK_CON_DIV_CLKCMU_FSYS_SCAN1, 0, 4),
+ DIV(CLK_DOUT_CMU_IMEM_ACLK, "dout_clkcmu_imem_aclk",
+ "mout_clkcmu_imem_imem", CLK_CON_DIV_CLKCMU_IMEM_ACLK, 0, 4),
+ DIV(CLK_DOUT_CMU_IMEM_JPEG, "dout_clkcmu_imem_jpeg",
+ "mout_clkcmu_imem_jpeg", CLK_CON_DIV_CLKCMU_IMEM_JPEG, 0, 4),
+ DIV_F(CLK_DOUT_CMU_CDC_CORE, "dout_clkcmu_cdc_core",
+ "mout_clkcmu_cdc_core", CLK_CON_DIV_CLKCMU_CDC_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_DLP_CORE, "dout_clkcmu_dlp_core",
+ "mout_clkcmu_dlp_core", CLK_CON_DIV_CLKCMU_DLP_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_CMU_GPU_3D, "dout_clkcmu_gpu_3d",
+ "mout_clkcmu_3d", CLK_CON_DIV_CLKCMU_GPU_3D, 0, 3),
+ DIV(CLK_DOUT_CMU_GPU_2D, "dout_clkcmu_gpu_2d",
+ "mout_clkcmu_2d", CLK_CON_DIV_CLKCMU_GPU_2D, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_SWITCH, "dout_clkcmu_mif_switch",
+ "mout_clkcmu_mif_switch", CLK_CON_DIV_CLKCMU_MIF_SWITCH, 0, 4),
+ DIV(CLK_DOUT_CMU_MIF_BUSP, "dout_clkcmu_mif_busp",
+ "mout_clkcmu_mif_busp", CLK_CON_DIV_CLKCMU_MIF_BUSP, 0, 3),
+ DIV(CLK_DOUT_CMU_PERI_DISP, "dout_clkcmu_peri_disp",
+ "mout_clkcmu_peri_disp", CLK_CON_DIV_CLKCMU_PERI_DISP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERI_IP, "dout_clkcmu_peri_ip",
+ "mout_clkcmu_peri_ip", CLK_CON_DIV_CLKCMU_PERI_IP, 0, 4),
+ DIV(CLK_DOUT_CMU_PERI_AUDIO, "dout_clkcmu_peri_audio",
+ "mout_clkcmu_pll_audio", CLK_CON_DIV_CLKCMU_PERI_AUDIO, 0, 4),
+ DIV(CLK_DOUT_CMU_RSP_CORE, "dout_clkcmu_rsp_core",
+ "mout_clkcmu_rsp_core", CLK_CON_DIV_CLKCMU_RSP_CORE, 0, 4),
+ DIV_F(CLK_DOUT_CMU_TRFM_CORE, "dout_clkcmu_trfm_core",
+ "mout_clkcmu_trfm_core", CLK_CON_DIV_CLKCMU_TRFM_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(CLK_DOUT_CMU_VCA_ACE, "dout_clkcmu_vca_ace",
+ "mout_clkcmu_vca_ace", CLK_CON_DIV_CLKCMU_VCA_ACE, 0, 4),
+ DIV(CLK_DOUT_CMU_VCA_OD, "dout_clkcmu_vca_od",
+ "mout_clkcmu_vca_od", CLK_CON_DIV_CLKCMU_VCA_OD, 0, 4),
+ DIV(CLK_DOUT_CMU_VIO_CORE, "dout_clkcmu_vio_core",
+ "mout_clkcmu_vio_core", CLK_CON_DIV_CLKCMU_VIO_CORE, 0, 4),
+ DIV(CLK_DOUT_CMU_VIO_AUDIO, "dout_clkcmu_vio_audio",
+ "mout_clkcmu_pll_audio", CLK_CON_DIV_CLKCMU_VIO_AUDIO, 0, 4),
+ DIV_F(CLK_DOUT_CMU_VIP0_CORE, "dout_clkcmu_vip0_core",
+ "mout_clkcmu_vip0_core", CLK_CON_DIV_CLKCMU_VIP0_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_VIP1_CORE, "dout_clkcmu_vip1_core",
+ "mout_clkcmu_vip1_core", CLK_CON_DIV_CLKCMU_VIP1_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV_F(CLK_DOUT_CMU_VPP_CORE, "dout_clkcmu_vpp_core",
+ "mout_clkcmu_vpp_core", CLK_CON_DIV_CLKCMU_VPP_CORE, 0, 4, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_cmu_info cmu_cmu_info __initconst = {
+ .pll_clks = cmu_cmu_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_cmu_pll_clks),
+ .fixed_factor_clks = cmu_fixed_factor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_fixed_factor_clks),
+ .mux_clks = cmu_cmu_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_cmu_mux_clks),
+ .div_clks = cmu_cmu_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_cmu_div_clks),
+ .nr_clk_ids = CMU_CMU_NR_CLK,
+ .clk_regs = cmu_cmu_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_cmu_clk_regs),
+};
+
+/* Register Offset definitions for CMU_BUS (0x12c10000) */
+#define PLL_CON0_MUX_CLK_BUS_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_BUS_DLP_USER 0x0120
+#define CLK_CON_DIV_CLK_BUS_PCLK 0x1800
+
+static const unsigned long cmu_bus_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_BUS_ACLK_USER,
+ PLL_CON0_MUX_CLK_BUS_DLP_USER,
+ CLK_CON_DIV_CLK_BUS_PCLK,
+};
+
+PNAME(mout_clk_bus_aclk_user_p) = { "fin_pll", "dout_clkcmu_bus" };
+PNAME(mout_clk_bus_dlp_user_p) = { "fin_pll", "dout_clkcmu_bus_dlp" };
+
+static const struct samsung_mux_clock cmu_bus_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_BUS_ACLK_USER, "mout_clk_bus_aclk_user",
+ mout_clk_bus_aclk_user_p, PLL_CON0_MUX_CLK_BUS_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_BUS_DLP_USER, "mout_clk_bus_dlp_user",
+ mout_clk_bus_dlp_user_p, PLL_CON0_MUX_CLK_BUS_DLP_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_bus_div_clks[] __initconst = {
+ DIV(CLK_DOUT_BUS_PCLK, "dout_clk_bus_pclk", "mout_clk_bus_aclk_user",
+ CLK_CON_DIV_CLK_BUS_PCLK, 0, 4),
+};
+
+static const struct samsung_cmu_info cmu_bus_info __initconst = {
+ .mux_clks = cmu_bus_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_bus_mux_clks),
+ .div_clks = cmu_bus_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_bus_div_clks),
+ .nr_clk_ids = CMU_BUS_NR_CLK,
+ .clk_regs = cmu_bus_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_bus_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CORE (0x12410000) */
+#define PLL_CON0_MUX_CLK_CORE_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_CORE_DLP_USER 0x0120
+#define CLK_CON_DIV_CLK_CORE_PCLK 0x1800
+
+static const unsigned long cmu_core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_CORE_ACLK_USER,
+ PLL_CON0_MUX_CLK_CORE_DLP_USER,
+ CLK_CON_DIV_CLK_CORE_PCLK,
+};
+
+PNAME(mout_clk_core_aclk_user_p) = { "fin_pll", "dout_clkcmu_core_main" };
+PNAME(mout_clk_core_dlp_user_p) = { "fin_pll", "dout_clkcmu_core_dlp" };
+
+static const struct samsung_mux_clock cmu_core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_ACLK_USER, "mout_clk_core_aclk_user",
+ mout_clk_core_aclk_user_p, PLL_CON0_MUX_CLK_CORE_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_DLP_USER, "mout_clk_core_dlp_user",
+ mout_clk_core_dlp_user_p, PLL_CON0_MUX_CLK_CORE_DLP_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_PCLK, "dout_clk_core_pclk",
+ "mout_clk_core_aclk_user", CLK_CON_DIV_CLK_CORE_PCLK, 0, 4),
+};
+
+static const struct samsung_cmu_info cmu_core_info __initconst = {
+ .mux_clks = cmu_core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_core_mux_clks),
+ .div_clks = cmu_core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_core_div_clks),
+ .nr_clk_ids = CMU_CORE_NR_CLK,
+ .clk_regs = cmu_core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_core_clk_regs),
+};
+
+/* Register Offset definitions for CMU_CPUCL (0x11410000) */
+#define PLL_LOCKTIME_PLL_CPUCL 0x0000
+#define PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER 0x0120
+#define PLL_CON0_PLL_CPUCL 0x0140
+#define CLK_CON_MUX_CLK_CPUCL_PLL 0x1000
+#define CLK_CON_DIV_CLK_CLUSTER_ACLK 0x1800
+#define CLK_CON_DIV_CLK_CLUSTER_CNTCLK 0x1804
+#define CLK_CON_DIV_CLK_CLUSTER_PCLKDBG 0x1808
+#define CLK_CON_DIV_CLK_CPUCL_CMUREF 0x180c
+#define CLK_CON_DIV_CLK_CPUCL_PCLK 0x1814
+#define CLK_CON_DIV_CLK_CLUSTER_ATCLK 0x1818
+#define CLK_CON_DIV_CLK_CPUCL_DBG 0x181c
+#define CLK_CON_DIV_CLK_CPUCL_PCLKDBG 0x1820
+#define CLK_CON_GAT_CLK_CLUSTER_CPU 0x2008
+#define CLK_CON_GAT_CLK_CPUCL_SHORTSTOP 0x200c
+#define CLK_CON_DMYQCH_CON_CSSYS_QCH 0x3008
+
+static const unsigned long cmu_cpucl_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_CPUCL,
+ PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER,
+ PLL_CON0_PLL_CPUCL,
+ CLK_CON_MUX_CLK_CPUCL_PLL,
+ CLK_CON_DIV_CLK_CLUSTER_ACLK,
+ CLK_CON_DIV_CLK_CLUSTER_CNTCLK,
+ CLK_CON_DIV_CLK_CLUSTER_PCLKDBG,
+ CLK_CON_DIV_CLK_CPUCL_CMUREF,
+ CLK_CON_DIV_CLK_CPUCL_PCLK,
+ CLK_CON_DIV_CLK_CLUSTER_ATCLK,
+ CLK_CON_DIV_CLK_CPUCL_DBG,
+ CLK_CON_DIV_CLK_CPUCL_PCLKDBG,
+ CLK_CON_GAT_CLK_CLUSTER_CPU,
+ CLK_CON_GAT_CLK_CPUCL_SHORTSTOP,
+ CLK_CON_DMYQCH_CON_CSSYS_QCH,
+};
+
+static const struct samsung_pll_clock cmu_cpucl_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_CPUCL_PLL, "fout_pll_cpucl", "fin_pll",
+ PLL_LOCKTIME_PLL_CPUCL, PLL_CON0_PLL_CPUCL, NULL),
+};
+
+PNAME(mout_clkcmu_cpucl_switch_user_p) = { "fin_pll", "dout_clkcmu_cpucl_switch" };
+PNAME(mout_pll_cpucl_p) = { "fin_pll", "fout_pll_cpucl" };
+PNAME(mout_clk_cpucl_pll_p) = { "mout_pll_cpucl", "mout_clkcmu_cpucl_switch_user" };
+
+static const struct samsung_mux_clock cmu_cpucl_mux_clks[] __initconst = {
+ MUX_F(0, "mout_pll_cpucl", mout_pll_cpucl_p, PLL_CON0_PLL_CPUCL, 4, 1,
+ CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0),
+ MUX(CLK_MOUT_CPUCL_SWITCH_USER, "mout_clkcmu_cpucl_switch_user",
+ mout_clkcmu_cpucl_switch_user_p, PLL_CON0_MUX_CLKCMU_CPUCL_SWITCH_USER, 4, 1),
+ MUX_F(CLK_MOUT_CPUCL_PLL, "mout_clk_cpucl_pll", mout_clk_cpucl_pll_p,
+ CLK_CON_MUX_CLK_CPUCL_PLL, 0, 1, CLK_SET_RATE_PARENT, 0),
+};
+
+static const struct samsung_fixed_factor_clock cpucl_ffactor_clks[] __initconst = {
+ FFACTOR(CLK_DOUT_CPUCL_CPU, "dout_clk_cpucl_cpu",
+ "mout_clk_cpucl_pll", 1, 1, CLK_SET_RATE_PARENT),
+};
+
+static const struct samsung_div_clock cmu_cpucl_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CPUCL_CLUSTER_ACLK, "dout_clk_cluster_aclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_ACLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_PCLKDBG, "dout_clk_cluster_pclkdbg",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_PCLKDBG, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_CNTCLK, "dout_clk_cluster_cntclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_CNTCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CLUSTER_ATCLK, "dout_clk_cluster_atclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CLUSTER_ATCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_PCLK, "dout_clk_cpucl_pclk",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_PCLK, 0, 4),
+ DIV(CLK_DOUT_CPUCL_CMUREF, "dout_clk_cpucl_cmuref",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_CMUREF, 0, 3),
+ DIV(CLK_DOUT_CPUCL_DBG, "dout_clk_cpucl_dbg",
+ "dout_clk_cpucl_cpu", CLK_CON_DIV_CLK_CPUCL_DBG, 0, 4),
+ DIV(CLK_DOUT_CPUCL_PCLKDBG, "dout_clk_cpucl_pclkdbg",
+ "dout_clk_cpucl_dbg", CLK_CON_DIV_CLK_CPUCL_PCLKDBG, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_cpucl_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CPUCL_CLUSTER_CPU, "clk_con_gat_clk_cluster_cpu",
+ "clk_con_gat_clk_cpucl_shortstop", CLK_CON_GAT_CLK_CLUSTER_CPU, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_SHORTSTOP, "clk_con_gat_clk_cpucl_shortstop",
+ "dout_clk_cpucl_cpu", CLK_CON_GAT_CLK_CPUCL_SHORTSTOP, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_PCLKDBG, "cssys_ipclkport_pclkdbg",
+ "dout_clk_cpucl_pclkdbg", CLK_CON_DMYQCH_CON_CSSYS_QCH, 1,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_CPUCL_CSSYS_IPCLKPORT_ATCLK, "cssys_ipclkport_atclk",
+ "dout_clk_cpucl_dbg", CLK_CON_DMYQCH_CON_CSSYS_QCH, 1,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_cpucl_info __initconst = {
+ .pll_clks = cmu_cpucl_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_cpucl_pll_clks),
+ .fixed_factor_clks = cpucl_ffactor_clks,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cpucl_ffactor_clks),
+ .mux_clks = cmu_cpucl_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_cpucl_mux_clks),
+ .div_clks = cmu_cpucl_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_cpucl_div_clks),
+ .gate_clks = cmu_cpucl_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_cpucl_gate_clks),
+ .nr_clk_ids = CMU_CPUCL_NR_CLK,
+ .clk_regs = cmu_cpucl_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_cpucl_clk_regs),
+};
+
+/* Register Offset definitions for CMU_FSYS (0x16c10000) */
+#define PLL_LOCKTIME_PLL_FSYS 0x0004
+#define PLL_CON0_MUX_CLK_FSYS_BUS_USER 0x0120
+#define PLL_CON0_MUX_CLK_FSYS_MMC_USER 0x0140
+#define PLL_CON0_MUX_CLK_FSYS_SCAN0_USER 0x0160
+#define PLL_CON0_MUX_CLK_FSYS_SCAN1_USER 0x0180
+#define PLL_CON0_PLL_FSYS 0x01c0
+#define CLK_CON_DIV_CLK_FSYS_ADC 0x1804
+#define CLK_CON_DIV_CLK_FSYS_BUS300 0x1808
+#define CLK_CON_DIV_CLK_FSYS_BUS_QSPI 0x180c
+#define CLK_CON_DIV_CLK_FSYS_EQOS_25 0x1810
+#define CLK_CON_DIV_CLK_FSYS_EQOS_2P5 0x1814
+#define CLK_CON_DIV_CLK_FSYS_EQOS_500 0x1818
+#define CLK_CON_DIV_CLK_FSYS_EQOS_INT125 0x181c
+#define CLK_CON_DIV_CLK_FSYS_MMC_CARD0 0x1820
+#define CLK_CON_DIV_CLK_FSYS_MMC_CARD1 0x1824
+#define CLK_CON_DIV_CLK_FSYS_OTP_MEM 0x1828
+#define CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL 0x182c
+#define CLK_CON_DIV_CLK_FSYS_QSPI 0x1830
+#define CLK_CON_DIV_CLK_FSYS_SCLK_UART 0x1834
+#define CLK_CON_DIV_CLK_FSYS_SFMC_NAND 0x1838
+#define CLK_CON_DIV_SCAN_CLK_FSYS_125 0x183c
+#define CLK_CON_DIV_SCAN_CLK_FSYS_MMC 0x1840
+#define CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE 0x1844
+#define CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK 0x2044
+#define CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK 0x204c
+#define CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART 0x2050
+#define CLK_CON_MMC0_IPCLKPORT_I_ACLK 0x2070
+#define CLK_CON_MMC1_IPCLKPORT_I_ACLK 0x2078
+#define CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG 0x208c
+#define CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG 0x2090
+#define CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG 0x2094
+#define CLK_CON_PWM_IPCLKPORT_I_PCLK_S0 0x20a0
+#define CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20 0x20bc
+#define CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY 0x20c0
+#define CLK_CON_XHB_AHBBR_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_XHB_USB_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK 0x201c
+#define CLK_CON_DMYQCH_CON_EQOS_TOP_QCH 0x3008
+#define CLK_CON_DMYQCH_CON_MMC0_QCH 0x300c
+#define CLK_CON_DMYQCH_CON_MMC1_QCH 0x3010
+#define CLK_CON_DMYQCH_CON_PCIE_TOP_QCH 0x3018
+#define CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF 0x301c
+#define CLK_CON_DMYQCH_CON_QSPI_QCH 0x3020
+#define CLK_CON_DMYQCH_CON_SFMC_QCH 0x3024
+
+static const unsigned long cmu_fsys_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_FSYS,
+ PLL_CON0_MUX_CLK_FSYS_BUS_USER,
+ PLL_CON0_MUX_CLK_FSYS_MMC_USER,
+ PLL_CON0_MUX_CLK_FSYS_SCAN0_USER,
+ PLL_CON0_MUX_CLK_FSYS_SCAN1_USER,
+ PLL_CON0_PLL_FSYS,
+ CLK_CON_DIV_CLK_FSYS_ADC,
+ CLK_CON_DIV_CLK_FSYS_BUS300,
+ CLK_CON_DIV_CLK_FSYS_BUS_QSPI,
+ CLK_CON_DIV_CLK_FSYS_EQOS_25,
+ CLK_CON_DIV_CLK_FSYS_EQOS_2P5,
+ CLK_CON_DIV_CLK_FSYS_EQOS_500,
+ CLK_CON_DIV_CLK_FSYS_EQOS_INT125,
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD0,
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD1,
+ CLK_CON_DIV_CLK_FSYS_OTP_MEM,
+ CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL,
+ CLK_CON_DIV_CLK_FSYS_QSPI,
+ CLK_CON_DIV_CLK_FSYS_SCLK_UART,
+ CLK_CON_DIV_CLK_FSYS_SFMC_NAND,
+ CLK_CON_DIV_SCAN_CLK_FSYS_125,
+ CLK_CON_DIV_SCAN_CLK_FSYS_MMC,
+ CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE,
+ CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK,
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_MMC0_IPCLKPORT_I_ACLK,
+ CLK_CON_MMC1_IPCLKPORT_I_ACLK,
+ CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG,
+ CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG,
+ CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG,
+ CLK_CON_PWM_IPCLKPORT_I_PCLK_S0,
+ CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20,
+ CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY,
+ CLK_CON_XHB_AHBBR_IPCLKPORT_CLK,
+ CLK_CON_XHB_USB_IPCLKPORT_CLK,
+ CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK,
+ CLK_CON_DMYQCH_CON_EQOS_TOP_QCH,
+ CLK_CON_DMYQCH_CON_MMC0_QCH,
+ CLK_CON_DMYQCH_CON_MMC1_QCH,
+ CLK_CON_DMYQCH_CON_PCIE_TOP_QCH,
+ CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF,
+ CLK_CON_DMYQCH_CON_QSPI_QCH,
+ CLK_CON_DMYQCH_CON_SFMC_QCH,
+};
+
+static const struct samsung_pll_clock cmu_fsys_pll_clks[] __initconst = {
+ PLL(pll_1017x, CLK_FOUT_FSYS_PLL, "fout_pll_fsys", "fin_pll",
+ PLL_LOCKTIME_PLL_FSYS, PLL_CON0_PLL_FSYS, NULL),
+};
+
+PNAME(mout_fsys_scan0_user_p) = { "fin_pll", "dout_clkcmu_fsys_scan0" };
+PNAME(mout_fsys_scan1_user_p) = { "fin_pll", "dout_clkcmu_fsys_scan1" };
+PNAME(mout_fsys_bus_user_p) = { "fin_pll", "dout_clkcmu_fsys_bus" };
+PNAME(mout_fsys_mmc_user_p) = { "fin_pll", "dout_clkcmu_fsys_ip" };
+PNAME(mout_fsys_pll_fsys_p) = { "fin_pll", "fout_pll_fsys" };
+
+static const struct samsung_mux_clock cmu_fsys_mux_clks[] __initconst = {
+ MUX(0, "mout_clk_pll_fsys", mout_fsys_pll_fsys_p, PLL_CON0_PLL_FSYS, 4, 1),
+ MUX(CLK_MOUT_FSYS_SCAN0_USER, "mout_fsys_scan0_user",
+ mout_fsys_scan0_user_p, PLL_CON0_MUX_CLK_FSYS_SCAN0_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_SCAN1_USER, "mout_fsys_scan1_user",
+ mout_fsys_scan1_user_p, PLL_CON0_MUX_CLK_FSYS_SCAN1_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_BUS_USER, "mout_fsys_bus_user",
+ mout_fsys_bus_user_p, PLL_CON0_MUX_CLK_FSYS_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_FSYS_MMC_USER, "mout_fsys_mmc_user",
+ mout_fsys_mmc_user_p, PLL_CON0_MUX_CLK_FSYS_MMC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock cmu_fsys_div_clks[] __initconst = {
+ DIV(CLK_DOUT_FSYS_PCIE_PIPE, "dout_fsys_pcie_pipe", "mout_clk_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_PCIE_PIPE, 0, 4),
+ DIV(CLK_DOUT_FSYS_ADC, "dout_fsys_adc", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_ADC, 0, 7),
+ DIV(CLK_DOUT_FSYS_PCIE_PHY_REFCLK_SYSPLL, "dout_fsys_pcie_phy_refclk_syspll",
+ "mout_clk_pll_fsys", CLK_CON_DIV_CLK_FSYS_PCIE_PHY_REFCLK_SYSPLL, 0, 8),
+ DIV(CLK_DOUT_FSYS_QSPI, "dout_fsys_qspi", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_QSPI, 0, 4),
+ DIV(CLK_DOUT_FSYS_EQOS_INT125, "dout_fsys_eqos_int125", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_EQOS_INT125, 0, 4),
+ DIV(CLK_DOUT_FSYS_OTP_MEM, "dout_fsys_otp_mem", "fin_pll",
+ CLK_CON_DIV_CLK_FSYS_OTP_MEM, 0, 9),
+ DIV(CLK_DOUT_FSYS_SCLK_UART, "dout_fsys_sclk_uart", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_SCLK_UART, 0, 10),
+ DIV(CLK_DOUT_FSYS_SFMC_NAND, "dout_fsys_sfmc_nand", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_SFMC_NAND, 0, 4),
+ DIV(CLK_DOUT_SCAN_CLK_FSYS_125, "dout_scan_clk_fsys_125", "mout_clk_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_125, 0, 4),
+ DIV(CLK_DOUT_FSYS_SCAN_CLK_MMC, "dout_scan_clk_fsys_mmc", "fout_pll_fsys",
+ CLK_CON_DIV_SCAN_CLK_FSYS_MMC, 0, 4),
+ DIV(CLK_DOUT_FSYS_EQOS_25, "dout_fsys_eqos_25", "dout_fsys_eqos_int125",
+ CLK_CON_DIV_CLK_FSYS_EQOS_25, 0, 4),
+ DIV_F(CLK_DOUT_FSYS_EQOS_2p5, "dout_fsys_eqos_2p5", "dout_fsys_eqos_25",
+ CLK_CON_DIV_CLK_FSYS_EQOS_2P5, 0, 4, CLK_SET_RATE_PARENT, 0),
+ DIV(0, "dout_fsys_eqos_500", "mout_clk_pll_fsys",
+ CLK_CON_DIV_CLK_FSYS_EQOS_500, 0, 4),
+ DIV(CLK_DOUT_FSYS_BUS300, "dout_fsys_bus300", "mout_fsys_bus_user",
+ CLK_CON_DIV_CLK_FSYS_BUS300, 0, 4),
+ DIV(CLK_DOUT_FSYS_BUS_QSPI, "dout_fsys_bus_qspi", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_BUS_QSPI, 0, 4),
+ DIV(CLK_DOUT_FSYS_MMC_CARD0, "dout_fsys_mmc_card0", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD0, 0, 10),
+ DIV(CLK_DOUT_FSYS_MMC_CARD1, "dout_fsys_mmc_card1", "mout_fsys_mmc_user",
+ CLK_CON_DIV_CLK_FSYS_MMC_CARD1, 0, 10),
+};
+
+static const struct samsung_gate_clock cmu_fsys_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_FSYS_PCIE_PHY_REFCLK_IN, "pcie_sub_ctrl_inst_0_phy_refclk_in",
+ "dout_fsys_pcie_phy_refclk_syspll", CLK_CON_DMYQCH_CON_PCIE_TOP_QCH_REF, 1,
+ CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_I_RGMII_TXCLK_2P5,
+ "eqos_top_ipclkport_i_rgmii_txclk_2p5",
+ "dout_fsys_eqos_2p5", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_ACLK_I, "eqos_top_ipclkport_aclk_i",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_EQOS_TOP_IPCLKPORT_CLK_CSR_I, "eqos_top_ipclkport_clk_csr_i",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_EQOS_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_PIPE_PAL_INST_0_I_APB_PCLK, "pipe_pal_inst_0_i_apb_pclk",
+ "dout_fsys_bus300", CLK_CON_DMYQCH_CON_PCIE_TOP_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_QSPI_IPCLKPORT_HCLK, "qspi_ipclkport_hclk",
+ "dout_fsys_bus_qspi", CLK_CON_DMYQCH_CON_QSPI_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_QSPI_IPCLKPORT_SSI_CLK, "qspi_ipclkport_ssi_clk",
+ "dout_fsys_qspi", CLK_CON_DMYQCH_CON_QSPI_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_IPCLKPORT_SDCLKIN, "mmc0_ipclkport_sdclkin",
+ "dout_fsys_mmc_card0", CLK_CON_DMYQCH_CON_MMC0_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_IPCLKPORT_SDCLKIN, "mmc1_ipclkport_sdclkin",
+ "dout_fsys_mmc_card1", CLK_CON_DMYQCH_CON_MMC1_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_SFMC_IPCLKPORT_I_ACLK_NAND, "sfmc_ipclkport_i_aclk_nand",
+ "dout_fsys_sfmc_nand", CLK_CON_DMYQCH_CON_SFMC_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_FSYS_UART0_SCLK_UART, "uart0_sclk", "dout_fsys_sclk_uart",
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, "dwc_pcie_ctl_inst_0_mstr_aclk_ug",
+ "mout_fsys_bus_user", CLK_CON_DWC_PCIE_CTL_INST_0_MSTR_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INXT_0_SLV_ACLK_UG, "dwc_pcie_ctl_inst_0_slv_aclk_ug",
+ "mout_fsys_bus_user", CLK_CON_DWC_PCIE_CTL_INST_0_SLV_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_I2C0_IPCLKPORT_I_PCLK, "fsys_i2c0_ipclkport_i_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_I2C0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_I2C1_IPCLKPORT_I_PCLK, "fsys_i2c1_ipclkport_i_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_I2C1_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_UART0_PCLK, "uart0_pclk", "dout_fsys_bus300",
+ CLK_CON_FSYS_UART0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_MMC0_IPCLKPORT_I_ACLK, "mmc0_ipclkport_i_aclk", "dout_fsys_bus300",
+ CLK_CON_MMC0_IPCLKPORT_I_ACLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_MMC1_IPCLKPORT_I_ACLK, "mmc1_ipclkport_i_aclk", "dout_fsys_bus300",
+ CLK_CON_MMC1_IPCLKPORT_I_ACLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, "dwc_pcie_ctl_inst_0_dbi_aclk_ug",
+ "dout_fsys_bus300", CLK_CON_DWC_PCIE_CTL_INST_0_DBI_ACLK_UG, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_PWM_IPCLKPORT_I_PCLK_S0, "pwm_ipclkport_i_pclk_s0", "dout_fsys_bus300",
+ CLK_CON_PWM_IPCLKPORT_I_PCLK_S0, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20, "usb20drd_ipclkport_aclk_phyctrl_20",
+ "dout_fsys_bus300", CLK_CON_USB20DRD_IPCLKPORT_ACLK_PHYCTRL_20, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_USB20DRD_IPCLKPORT_BUS_CLK_EARLY, "usb20drd_ipclkport_bus_clk_early",
+ "dout_fsys_bus300", CLK_CON_USB20DRD_IPCLKPORT_BUS_CLK_EARLY, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_XHB_AHBBR_IPCLKPORT_CLK, "xhb_ahbbr_ipclkport_clk", "dout_fsys_bus300",
+ CLK_CON_XHB_AHBBR_IPCLKPORT_CLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_XHB_USB_IPCLKPORT_CLK, "xhb_usb_ipclkport_clk", "dout_fsys_bus300",
+ CLK_CON_XHB_USB_IPCLKPORT_CLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_FSYS_BUS_QSPI, "bus_p_fsys_ipclkport_qspiclk", "dout_fsys_bus_qspi",
+ CLK_CON_BUS_P_FSYS_IPCLKPORT_QSPICLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_fsys_info __initconst = {
+ .pll_clks = cmu_fsys_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(cmu_fsys_pll_clks),
+ .mux_clks = cmu_fsys_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_fsys_mux_clks),
+ .div_clks = cmu_fsys_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_fsys_div_clks),
+ .gate_clks = cmu_fsys_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_fsys_gate_clks),
+ .nr_clk_ids = CMU_FSYS_NR_CLK,
+ .clk_regs = cmu_fsys_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_fsys_clk_regs),
+};
+
+/* Register Offset definitions for CMU_IMEM (0x10010000) */
+#define PLL_CON0_MUX_CLK_IMEM_ACLK_USER 0x0100
+#define PLL_CON0_MUX_CLK_IMEM_JPEG_USER 0x0120
+#define CLK_CON_MUX_CLK_IMEM_GIC_CA53 0x1000
+#define CLK_CON_MUX_CLK_IMEM_GIC_CA5 0x1008
+#define CLK_CON_MCT_IPCLKPORT_PCLK 0x2038
+#define CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK 0x2044
+
+static const unsigned long cmu_imem_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_IMEM_ACLK_USER,
+ PLL_CON0_MUX_CLK_IMEM_JPEG_USER,
+ CLK_CON_MUX_CLK_IMEM_GIC_CA53,
+ CLK_CON_MUX_CLK_IMEM_GIC_CA5,
+ CLK_CON_MCT_IPCLKPORT_PCLK,
+ CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK,
+};
+
+PNAME(mout_imem_aclk_user_p) = { "fin_pll", "dout_clkcmu_imem_aclk" };
+PNAME(mout_imem_gic_ca53_p) = { "mout_imem_aclk_user", "fin_pll" };
+PNAME(mout_imem_gic_ca5_p) = { "mout_imem_aclk_user", "fin_pll" };
+PNAME(mout_imem_jpeg_user_p) = { "fin_pll", "dout_clkcmu_imem_jpeg" };
+
+static const struct samsung_mux_clock cmu_imem_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_IMEM_ACLK_USER, "mout_imem_aclk_user",
+ mout_imem_aclk_user_p, PLL_CON0_MUX_CLK_IMEM_ACLK_USER, 4, 1),
+ MUX(CLK_MOUT_IMEM_GIC_CA53, "mout_imem_gic_ca53",
+ mout_imem_gic_ca53_p, CLK_CON_MUX_CLK_IMEM_GIC_CA53, 0, 1),
+ MUX(CLK_MOUT_IMEM_GIC_CA5, "mout_imem_gic_ca5",
+ mout_imem_gic_ca5_p, CLK_CON_MUX_CLK_IMEM_GIC_CA5, 0, 1),
+ MUX(CLK_MOUT_IMEM_JPEG_USER, "mout_imem_jpeg_user",
+ mout_imem_jpeg_user_p, PLL_CON0_MUX_CLK_IMEM_JPEG_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock cmu_imem_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_IMEM_MCT_PCLK, "mct_pclk", "mout_imem_aclk_user",
+ CLK_CON_MCT_IPCLKPORT_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_IMEM_PCLK_TMU0_APBIF, "sfrif_tmu_imem_ipclkport_pclk", "mout_imem_aclk_user",
+ CLK_CON_SFRIF_TMU_IMEM_IPCLKPORT_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_imem_info __initconst = {
+ .mux_clks = cmu_imem_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_imem_mux_clks),
+ .gate_clks = cmu_imem_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_imem_gate_clks),
+ .nr_clk_ids = CMU_IMEM_NR_CLK,
+ .clk_regs = cmu_imem_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_imem_clk_regs),
+};
+
+static void __init artpec8_clk_cmu_imem_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &cmu_imem_info);
+}
+
+CLK_OF_DECLARE(artpec8_clk_cmu_imem, "axis,artpec8-cmu-imem", artpec8_clk_cmu_imem_init);
+
+/* Register Offset definitions for CMU_PERI (0x16410000) */
+#define PLL_CON0_MUX_CLK_PERI_AUDIO_USER 0x0100
+#define PLL_CON0_MUX_CLK_PERI_DISP_USER 0x0120
+#define PLL_CON0_MUX_CLK_PERI_IP_USER 0x0140
+#define CLK_CON_MUX_CLK_PERI_I2S0 0x1000
+#define CLK_CON_MUX_CLK_PERI_I2S1 0x1004
+#define CLK_CON_DIV_CLK_PERI_DSIM 0x1800
+#define CLK_CON_DIV_CLK_PERI_I2S0 0x1804
+#define CLK_CON_DIV_CLK_PERI_I2S1 0x1808
+#define CLK_CON_DIV_CLK_PERI_PCLK 0x180c
+#define CLK_CON_DIV_CLK_PERI_SPI 0x1810
+#define CLK_CON_DIV_CLK_PERI_UART1 0x1814
+#define CLK_CON_DIV_CLK_PERI_UART2 0x1818
+#define CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS 0x2004
+#define CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK 0x2030
+#define CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK 0x2034
+#define CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK 0x2048
+#define CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI 0x204c
+#define CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK 0x2050
+#define CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART 0x2054
+#define CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK 0x2058
+#define CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART 0x205c
+#define CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH 0x3000
+#define CLK_CON_DMYQCH_CON_DMA4DSIM_QCH 0x3004
+#define CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH 0x3008
+#define CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH 0x300c
+
+static const unsigned long cmu_peri_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLK_PERI_AUDIO_USER,
+ PLL_CON0_MUX_CLK_PERI_DISP_USER,
+ PLL_CON0_MUX_CLK_PERI_IP_USER,
+ CLK_CON_MUX_CLK_PERI_I2S0,
+ CLK_CON_MUX_CLK_PERI_I2S1,
+ CLK_CON_DIV_CLK_PERI_DSIM,
+ CLK_CON_DIV_CLK_PERI_I2S0,
+ CLK_CON_DIV_CLK_PERI_I2S1,
+ CLK_CON_DIV_CLK_PERI_PCLK,
+ CLK_CON_DIV_CLK_PERI_SPI,
+ CLK_CON_DIV_CLK_PERI_UART1,
+ CLK_CON_DIV_CLK_PERI_UART2,
+ CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS,
+ CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI,
+ CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK,
+ CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART,
+ CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH,
+ CLK_CON_DMYQCH_CON_DMA4DSIM_QCH,
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH,
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH,
+};
+
+static const struct samsung_fixed_rate_clock peri_fixed_clks[] __initconst = {
+ FRATE(0, "clk_peri_audio", NULL, 0, 100000000),
+};
+
+PNAME(mout_peri_ip_user_p) = { "fin_pll", "dout_clkcmu_peri_ip" };
+PNAME(mout_peri_audio_user_p) = { "fin_pll", "dout_clkcmu_peri_audio" };
+PNAME(mout_peri_disp_user_p) = { "fin_pll", "dout_clkcmu_peri_disp" };
+PNAME(mout_peri_i2s0_p) = { "dout_peri_i2s0", "clk_peri_audio" };
+PNAME(mout_peri_i2s1_p) = { "dout_peri_i2s1", "clk_peri_audio" };
+
+static const struct samsung_mux_clock cmu_peri_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERI_IP_USER, "mout_peri_ip_user", mout_peri_ip_user_p,
+ PLL_CON0_MUX_CLK_PERI_IP_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_AUDIO_USER, "mout_peri_audio_user",
+ mout_peri_audio_user_p, PLL_CON0_MUX_CLK_PERI_AUDIO_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_DISP_USER, "mout_peri_disp_user", mout_peri_disp_user_p,
+ PLL_CON0_MUX_CLK_PERI_DISP_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_I2S0, "mout_peri_i2s0", mout_peri_i2s0_p,
+ CLK_CON_MUX_CLK_PERI_I2S0, 0, 1),
+ MUX(CLK_MOUT_PERI_I2S1, "mout_peri_i2s1", mout_peri_i2s1_p,
+ CLK_CON_MUX_CLK_PERI_I2S1, 0, 1),
+};
+
+static const struct samsung_div_clock cmu_peri_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERI_SPI, "dout_peri_spi", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_SPI, 0, 10),
+ DIV(CLK_DOUT_PERI_UART1, "dout_peri_uart1", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_UART1, 0, 10),
+ DIV(CLK_DOUT_PERI_UART2, "dout_peri_uart2", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_UART2, 0, 10),
+ DIV(CLK_DOUT_PERI_PCLK, "dout_peri_pclk", "mout_peri_ip_user",
+ CLK_CON_DIV_CLK_PERI_PCLK, 0, 4),
+ DIV(CLK_DOUT_PERI_I2S0, "dout_peri_i2s0", "mout_peri_audio_user",
+ CLK_CON_DIV_CLK_PERI_I2S0, 0, 4),
+ DIV(CLK_DOUT_PERI_I2S1, "dout_peri_i2s1", "mout_peri_audio_user",
+ CLK_CON_DIV_CLK_PERI_I2S1, 0, 4),
+ DIV(CLK_DOUT_PERI_DSIM, "dout_peri_dsim", "mout_peri_disp_user",
+ CLK_CON_DIV_CLK_PERI_DSIM, 0, 4),
+};
+
+static const struct samsung_gate_clock cmu_peri_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_APB_CLK, "dma4dsim_ipclkport_clk_apb_clk",
+ "dout_peri_pclk", CLK_CON_DMYQCH_CON_DMA4DSIM_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK_HST, "i2ssc0_ipclkport_clk_hst", "dout_peri_pclk",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK_HST, "i2ssc1_ipclkport_clk_hst", "dout_peri_pclk",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_AUDIO_OUT_IPCLKPORT_CLK, "audio_out_ipclkport_clk",
+ "mout_peri_audio_user", CLK_CON_DMYQCH_CON_AUDIO_OUT_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_I2SSC0_IPCLKPORT_CLK, "peri_i2ssc0_ipclkport_clk", "mout_peri_i2s0",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC0_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_I2SSC1_IPCLKPORT_CLK, "peri_i2ssc1_ipclkport_clk", "mout_peri_i2s1",
+ CLK_CON_DMYQCH_CON_PERI_I2SSC1_QCH, 1, CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERI_DMA4DSIM_IPCLKPORT_CLK_AXI_CLK, "dma4dsim_ipclkport_clk_axi_clk",
+ "mout_peri_disp_user", CLK_CON_DMYQCH_CON_DMA4DSIM_QCH, 1, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_PERI_SPI0_SCLK_SPI, "peri_spi0_ipclkport_i_sclk_spi", "dout_peri_spi",
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_SCLK_SPI, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART1_SCLK_UART, "uart1_sclk", "dout_peri_uart1",
+ CLK_CON_PERI_UART1_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART2_SCLK_UART, "uart2_sclk", "dout_peri_uart2",
+ CLK_CON_PERI_UART2_IPCLKPORT_I_SCLK_UART, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_APB_ASYNC_DSIM_IPCLKPORT_PCLKS, "apb_async_dsim_ipclkport_pclks",
+ "dout_peri_pclk", CLK_CON_APB_ASYNC_DSIM_IPCLKPORT_PCLKS, 21,
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_I2C2_IPCLKPORT_I_PCLK, "peri_i2c2_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_I2C2_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_I2C3_IPCLKPORT_I_PCLK, "peri_i2c3_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_I2C3_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_SPI0_PCLK, "peri_spi0_ipclkport_i_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_SPI0_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART1_PCLK, "uart1_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_UART1_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERI_UART2_PCLK, "uart2_pclk", "dout_peri_pclk",
+ CLK_CON_PERI_UART2_IPCLKPORT_I_PCLK, 21, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info cmu_peri_info __initconst = {
+ .mux_clks = cmu_peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmu_peri_mux_clks),
+ .div_clks = cmu_peri_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmu_peri_div_clks),
+ .gate_clks = cmu_peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmu_peri_gate_clks),
+ .fixed_clks = peri_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(peri_fixed_clks),
+ .nr_clk_ids = CMU_PERI_NR_CLK,
+ .clk_regs = cmu_peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmu_peri_clk_regs),
+};
+
+/**
+ * artpec8_cmu_probe - Probe function for ARTPEC platform clocks
+ * @pdev: Pointer to platform device
+ *
+ * Configure clock hierarchy for clock domains of ARTPEC platform
+ */
+static int __init artpec8_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id artpec8_cmu_of_match[] = {
+ {
+ .compatible = "axis,artpec8-cmu-cmu",
+ .data = &cmu_cmu_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-bus",
+ .data = &cmu_bus_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-core",
+ .data = &cmu_core_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-cpucl",
+ .data = &cmu_cpucl_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-fsys",
+ .data = &cmu_fsys_info,
+ }, {
+ .compatible = "axis,artpec8-cmu-peri",
+ .data = &cmu_peri_info,
+ }, {
+ },
+};
+
+static struct platform_driver artpec8_cmu_driver __refdata = {
+ .driver = {
+ .name = "artpec8-cmu",
+ .of_match_table = artpec8_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = artpec8_cmu_probe,
+};
+
+static int __init artpec8_cmu_init(void)
+{
+ return platform_driver_register(&artpec8_cmu_driver);
+}
+core_initcall(artpec8_cmu_init);
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 4e1ebd8a30b1..300f8d5d3c48 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -567,12 +567,14 @@ static int exynos850_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
/* -------------------------------------------------------------------------- */
/* Common round rate callback usable for all types of CPU clocks */
-static long exynos_cpuclk_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int exynos_cpuclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_hw *parent = clk_hw_get_parent(hw);
- *prate = clk_hw_round_rate(parent, drate);
- return *prate;
+ req->best_parent_rate = clk_hw_round_rate(parent, req->rate);
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
/* Common recalc rate callback usable for all types of CPU clocks */
@@ -591,7 +593,7 @@ static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
static const struct clk_ops exynos_cpuclk_clk_ops = {
.recalc_rate = exynos_cpuclk_recalc_rate,
- .round_rate = exynos_cpuclk_round_rate,
+ .determine_rate = exynos_cpuclk_determine_rate,
};
/*
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 5f1a4f5e2e59..5b21025338bd 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -175,6 +175,7 @@ static int exynos_clkout_probe(struct platform_device *pdev)
clkout->mux.shift = EXYNOS_CLKOUT_MUX_SHIFT;
clkout->mux.lock = &clkout->slock;
+ clkout->data.num = EXYNOS_CLKOUT_NR_CLKS;
clkout->data.hws[0] = clk_hw_register_composite(NULL, "clkout",
parent_names, parent_count, &clkout->mux.hw,
&clk_mux_ops, NULL, NULL, &clkout->gate.hw,
@@ -185,7 +186,6 @@ static int exynos_clkout_probe(struct platform_device *pdev)
goto err_unmap;
}
- clkout->data.num = EXYNOS_CLKOUT_NR_CLKS;
ret = of_clk_add_hw_provider(clkout->np, of_clk_hw_onecell_get, &clkout->data);
if (ret)
goto err_clk_unreg;
diff --git a/drivers/clk/samsung/clk-exynos990.c b/drivers/clk/samsung/clk-exynos990.c
index 8d3f193d2b4d..6277dd557fab 100644
--- a/drivers/clk/samsung/clk-exynos990.c
+++ b/drivers/clk/samsung/clk-exynos990.c
@@ -17,8 +17,10 @@
#include "clk-pll.h"
/* NOTE: Must be equal to the last clock ID increased by one */
-#define CLKS_NR_TOP (CLK_GOUT_CMU_VRA_BUS + 1)
-#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_XIU_D_HSI0_ACLK + 1)
+#define CLKS_NR_TOP (CLK_DOUT_CMU_CLK_CMUREF + 1)
+#define CLKS_NR_HSI0 (CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK + 1)
+#define CLKS_NR_PERIC0 (CLK_GOUT_PERIC0_SYSREG_PCLK + 1)
+#define CLKS_NR_PERIC1 (CLK_GOUT_PERIC1_XIU_P_ACLK + 1)
#define CLKS_NR_PERIS (CLK_GOUT_PERIS_OTP_CON_TOP_OSCCLK + 1)
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -45,6 +47,7 @@
#define PLL_CON3_PLL_SHARED3 0x024c
#define PLL_CON0_PLL_SHARED4 0x0280
#define PLL_CON3_PLL_SHARED4 0x028c
+#define CLK_CON_MUX_CLKCMU_DPU_BUS 0x1000
#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1004
#define CLK_CON_MUX_MUX_CLKCMU_AUD_CPU 0x1008
#define CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS 0x100c
@@ -103,6 +106,8 @@
#define CLK_CON_MUX_MUX_CLKCMU_SSP_BUS 0x10e0
#define CLK_CON_MUX_MUX_CLKCMU_TNR_BUS 0x10e4
#define CLK_CON_MUX_MUX_CLKCMU_VRA_BUS 0x10e8
+#define CLK_CON_MUX_MUX_CLK_CMU_CMUREF 0x10f0
+#define CLK_CON_MUX_MUX_CMU_CMUREF 0x10f4
#define CLK_CON_DIV_CLKCMU_APM_BUS 0x1800
#define CLK_CON_DIV_CLKCMU_AUD_CPU 0x1804
#define CLK_CON_DIV_CLKCMU_BUS0_BUS 0x1808
@@ -162,6 +167,7 @@
#define CLK_CON_DIV_CLKCMU_VRA_BUS 0x18e0
#define CLK_CON_DIV_DIV_CLKCMU_DPU 0x18e8
#define CLK_CON_DIV_DIV_CLKCMU_DPU_ALT 0x18ec
+#define CLK_CON_DIV_DIV_CLK_CMU_CMUREF 0x18f0
#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x18f4
#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18f8
#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18fc
@@ -239,13 +245,21 @@ static const unsigned long top_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_SHARED2,
PLL_LOCKTIME_PLL_SHARED3,
PLL_LOCKTIME_PLL_SHARED4,
+ PLL_CON0_PLL_G3D,
PLL_CON3_PLL_G3D,
+ PLL_CON0_PLL_MMC,
PLL_CON3_PLL_MMC,
+ PLL_CON0_PLL_SHARED0,
PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
PLL_CON3_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED2,
PLL_CON3_PLL_SHARED2,
+ PLL_CON0_PLL_SHARED3,
PLL_CON3_PLL_SHARED3,
+ PLL_CON0_PLL_SHARED4,
PLL_CON3_PLL_SHARED4,
+ CLK_CON_MUX_CLKCMU_DPU_BUS,
CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
CLK_CON_MUX_MUX_CLKCMU_AUD_CPU,
CLK_CON_MUX_MUX_CLKCMU_BUS0_BUS,
@@ -304,6 +318,8 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_SSP_BUS,
CLK_CON_MUX_MUX_CLKCMU_TNR_BUS,
CLK_CON_MUX_MUX_CLKCMU_VRA_BUS,
+ CLK_CON_MUX_MUX_CLK_CMU_CMUREF,
+ CLK_CON_MUX_MUX_CMU_CMUREF,
CLK_CON_DIV_CLKCMU_APM_BUS,
CLK_CON_DIV_CLKCMU_AUD_CPU,
CLK_CON_DIV_CLKCMU_BUS0_BUS,
@@ -363,6 +379,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_CLKCMU_VRA_BUS,
CLK_CON_DIV_DIV_CLKCMU_DPU,
CLK_CON_DIV_DIV_CLKCMU_DPU_ALT,
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF,
CLK_CON_DIV_PLL_SHARED0_DIV2,
CLK_CON_DIV_PLL_SHARED0_DIV3,
CLK_CON_DIV_PLL_SHARED0_DIV4,
@@ -458,6 +475,8 @@ PNAME(mout_pll_shared3_p) = { "oscclk", "fout_shared3_pll" };
PNAME(mout_pll_shared4_p) = { "oscclk", "fout_shared4_pll" };
PNAME(mout_pll_mmc_p) = { "oscclk", "fout_mmc_pll" };
PNAME(mout_pll_g3d_p) = { "oscclk", "fout_g3d_pll" };
+PNAME(mout_cmu_dpu_bus_p) = { "dout_cmu_dpu",
+ "dout_cmu_dpu_alt" };
PNAME(mout_cmu_apm_bus_p) = { "dout_cmu_shared0_div2",
"dout_cmu_shared2_div2" };
PNAME(mout_cmu_aud_cpu_p) = { "dout_cmu_shared0_div2",
@@ -672,6 +691,12 @@ PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
"dout_cmu_shared4_div2",
"dout_cmu_shared0_div4",
"dout_cmu_shared4_div3" };
+PNAME(mout_cmu_cmuref_p) = { "oscclk",
+ "dout_cmu_clk_cmuref" };
+PNAME(mout_cmu_clk_cmuref_p) = { "dout_cmu_shared0_div4",
+ "dout_cmu_shared1_div4",
+ "dout_cmu_shared2_div2",
+ "oscclk" };
/*
* Register name to clock name mangling strategy used in this file
@@ -689,19 +714,21 @@ PNAME(mout_cmu_vra_bus_p) = { "dout_cmu_shared0_div3",
static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_PLL_SHARED0, "mout_pll_shared0", mout_pll_shared0_p,
- PLL_CON3_PLL_SHARED0, 4, 1),
+ PLL_CON0_PLL_SHARED0, 4, 1),
MUX(CLK_MOUT_PLL_SHARED1, "mout_pll_shared1", mout_pll_shared1_p,
- PLL_CON3_PLL_SHARED1, 4, 1),
+ PLL_CON0_PLL_SHARED1, 4, 1),
MUX(CLK_MOUT_PLL_SHARED2, "mout_pll_shared2", mout_pll_shared2_p,
- PLL_CON3_PLL_SHARED2, 4, 1),
+ PLL_CON0_PLL_SHARED2, 4, 1),
MUX(CLK_MOUT_PLL_SHARED3, "mout_pll_shared3", mout_pll_shared3_p,
- PLL_CON3_PLL_SHARED3, 4, 1),
+ PLL_CON0_PLL_SHARED3, 4, 1),
MUX(CLK_MOUT_PLL_SHARED4, "mout_pll_shared4", mout_pll_shared4_p,
PLL_CON0_PLL_SHARED4, 4, 1),
MUX(CLK_MOUT_PLL_MMC, "mout_pll_mmc", mout_pll_mmc_p,
PLL_CON0_PLL_MMC, 4, 1),
MUX(CLK_MOUT_PLL_G3D, "mout_pll_g3d", mout_pll_g3d_p,
PLL_CON0_PLL_G3D, 4, 1),
+ MUX(CLK_MOUT_CMU_DPU_BUS, "mout_cmu_dpu_bus",
+ mout_cmu_dpu_bus_p, CLK_CON_MUX_CLKCMU_DPU_BUS, 0, 1),
MUX(CLK_MOUT_CMU_APM_BUS, "mout_cmu_apm_bus",
mout_cmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
MUX(CLK_MOUT_CMU_AUD_CPU, "mout_cmu_aud_cpu",
@@ -759,11 +786,11 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_CMU_DPU_ALT, "mout_cmu_dpu_alt",
mout_cmu_dpu_alt_p, CLK_CON_MUX_MUX_CLKCMU_DPU_ALT, 0, 2),
MUX(CLK_MOUT_CMU_DSP_BUS, "mout_cmu_dsp_bus",
- mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 2),
+ mout_cmu_dsp_bus_p, CLK_CON_MUX_MUX_CLKCMU_DSP_BUS, 0, 3),
MUX(CLK_MOUT_CMU_G2D_G2D, "mout_cmu_g2d_g2d",
mout_cmu_g2d_g2d_p, CLK_CON_MUX_MUX_CLKCMU_G2D_G2D, 0, 2),
MUX(CLK_MOUT_CMU_G2D_MSCL, "mout_cmu_g2d_mscl",
- mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 1),
+ mout_cmu_g2d_mscl_p, CLK_CON_MUX_MUX_CLKCMU_G2D_MSCL, 0, 2),
MUX(CLK_MOUT_CMU_HPM, "mout_cmu_hpm",
mout_cmu_hpm_p, CLK_CON_MUX_MUX_CLKCMU_HPM, 0, 2),
MUX(CLK_MOUT_CMU_HSI0_BUS, "mout_cmu_hsi0_bus",
@@ -775,7 +802,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
0, 2),
MUX(CLK_MOUT_CMU_HSI0_USBDP_DEBUG, "mout_cmu_hsi0_usbdp_debug",
mout_cmu_hsi0_usbdp_debug_p,
- CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 2),
+ CLK_CON_MUX_MUX_CLKCMU_HSI0_USBDP_DEBUG, 0, 1),
MUX(CLK_MOUT_CMU_HSI1_BUS, "mout_cmu_hsi1_bus",
mout_cmu_hsi1_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_BUS, 0, 3),
MUX(CLK_MOUT_CMU_HSI1_MMC_CARD, "mout_cmu_hsi1_mmc_card",
@@ -788,7 +815,7 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
0, 2),
MUX(CLK_MOUT_CMU_HSI1_UFS_EMBD, "mout_cmu_hsi1_ufs_embd",
mout_cmu_hsi1_ufs_embd_p, CLK_CON_MUX_MUX_CLKCMU_HSI1_UFS_EMBD,
- 0, 1),
+ 0, 2),
MUX(CLK_MOUT_CMU_HSI2_BUS, "mout_cmu_hsi2_bus",
mout_cmu_hsi2_bus_p, CLK_CON_MUX_MUX_CLKCMU_HSI2_BUS, 0, 1),
MUX(CLK_MOUT_CMU_HSI2_PCIE, "mout_cmu_hsi2_pcie",
@@ -830,6 +857,10 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
mout_cmu_tnr_bus_p, CLK_CON_MUX_MUX_CLKCMU_TNR_BUS, 0, 3),
MUX(CLK_MOUT_CMU_VRA_BUS, "mout_cmu_vra_bus",
mout_cmu_vra_bus_p, CLK_CON_MUX_MUX_CLKCMU_VRA_BUS, 0, 2),
+ MUX(CLK_MOUT_CMU_CMUREF, "mout_cmu_cmuref",
+ mout_cmu_cmuref_p, CLK_CON_MUX_MUX_CMU_CMUREF, 0, 1),
+ MUX(CLK_MOUT_CMU_CLK_CMUREF, "mout_cmu_clk_cmuref",
+ mout_cmu_clk_cmuref_p, CLK_CON_MUX_MUX_CLK_CMU_CMUREF, 0, 2),
};
static const struct samsung_div_clock top_div_clks[] __initconst = {
@@ -862,7 +893,7 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_PLL_SHARED4_DIV4, 0, 1),
DIV(CLK_DOUT_CMU_APM_BUS, "dout_cmu_apm_bus", "gout_cmu_apm_bus",
- CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+ CLK_CON_DIV_CLKCMU_APM_BUS, 0, 2),
DIV(CLK_DOUT_CMU_AUD_CPU, "dout_cmu_aud_cpu", "gout_cmu_aud_cpu",
CLK_CON_DIV_CLKCMU_AUD_CPU, 0, 3),
DIV(CLK_DOUT_CMU_BUS0_BUS, "dout_cmu_bus0_bus", "gout_cmu_bus0_bus",
@@ -887,9 +918,9 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_CMU_BOOST, 0, 2),
DIV(CLK_DOUT_CMU_CORE_BUS, "dout_cmu_core_bus", "gout_cmu_core_bus",
CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_debug",
+ DIV(CLK_DOUT_CMU_CPUCL0_DBG_BUS, "dout_cmu_cpucl0_dbg_bus",
"gout_cmu_cpucl0_dbg_bus", CLK_CON_DIV_CLKCMU_CPUCL0_DBG_BUS,
- 0, 3),
+ 0, 4),
DIV(CLK_DOUT_CMU_CPUCL0_SWITCH, "dout_cmu_cpucl0_switch",
"gout_cmu_cpucl0_switch", CLK_CON_DIV_CLKCMU_CPUCL0_SWITCH, 0, 3),
DIV(CLK_DOUT_CMU_CPUCL1_SWITCH, "dout_cmu_cpucl1_switch",
@@ -924,16 +955,11 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_HSI0_DPGTC, 0, 3),
DIV(CLK_DOUT_CMU_HSI0_USB31DRD, "dout_cmu_hsi0_usb31drd",
"gout_cmu_hsi0_usb31drd", CLK_CON_DIV_CLKCMU_HSI0_USB31DRD, 0, 4),
- DIV(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
- "gout_cmu_hsi0_usbdp_debug", CLK_CON_DIV_CLKCMU_HSI0_USBDP_DEBUG,
- 0, 4),
DIV(CLK_DOUT_CMU_HSI1_BUS, "dout_cmu_hsi1_bus", "gout_cmu_hsi1_bus",
CLK_CON_DIV_CLKCMU_HSI1_BUS, 0, 3),
DIV(CLK_DOUT_CMU_HSI1_MMC_CARD, "dout_cmu_hsi1_mmc_card",
"gout_cmu_hsi1_mmc_card", CLK_CON_DIV_CLKCMU_HSI1_MMC_CARD,
0, 9),
- DIV(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie", "gout_cmu_hsi1_pcie",
- CLK_CON_DIV_CLKCMU_HSI1_PCIE, 0, 7),
DIV(CLK_DOUT_CMU_HSI1_UFS_CARD, "dout_cmu_hsi1_ufs_card",
"gout_cmu_hsi1_ufs_card", CLK_CON_DIV_CLKCMU_HSI1_UFS_CARD,
0, 3),
@@ -942,8 +968,6 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
0, 3),
DIV(CLK_DOUT_CMU_HSI2_BUS, "dout_cmu_hsi2_bus", "gout_cmu_hsi2_bus",
CLK_CON_DIV_CLKCMU_HSI2_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie", "gout_cmu_hsi2_pcie",
- CLK_CON_DIV_CLKCMU_HSI2_PCIE, 0, 7),
DIV(CLK_DOUT_CMU_IPP_BUS, "dout_cmu_ipp_bus", "gout_cmu_ipp_bus",
CLK_CON_DIV_CLKCMU_IPP_BUS, 0, 4),
DIV(CLK_DOUT_CMU_ITP_BUS, "dout_cmu_itp_bus", "gout_cmu_itp_bus",
@@ -979,8 +1003,22 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
CLK_CON_DIV_CLKCMU_TNR_BUS, 0, 4),
DIV(CLK_DOUT_CMU_VRA_BUS, "dout_cmu_vra_bus", "gout_cmu_vra_bus",
CLK_CON_DIV_CLKCMU_VRA_BUS, 0, 4),
- DIV(CLK_DOUT_CMU_DPU, "dout_cmu_clkcmu_dpu", "gout_cmu_dpu",
- CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 4),
+ DIV(CLK_DOUT_CMU_DPU, "dout_cmu_dpu", "gout_cmu_dpu",
+ CLK_CON_DIV_DIV_CLKCMU_DPU, 0, 3),
+ DIV(CLK_DOUT_CMU_DPU_ALT, "dout_cmu_dpu_alt", "gout_cmu_dpu_bus",
+ CLK_CON_DIV_DIV_CLKCMU_DPU_ALT, 0, 4),
+ DIV(CLK_DOUT_CMU_CLK_CMUREF, "dout_cmu_clk_cmuref", "mout_cmu_clk_cmuref",
+ CLK_CON_DIV_DIV_CLK_CMU_CMUREF, 0, 2),
+};
+
+static const struct samsung_fixed_factor_clock cmu_top_ffactor[] __initconst = {
+ FFACTOR(CLK_DOUT_CMU_HSI1_PCIE, "dout_cmu_hsi1_pcie",
+ "gout_cmu_hsi1_pcie", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_OTP, "dout_cmu_otp", "oscclk", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_HSI0_USBDP_DEBUG, "dout_cmu_hsi0_usbdp_debug",
+ "gout_cmu_hsi0_usbdp_debug", 1, 8, 0),
+ FFACTOR(CLK_DOUT_CMU_HSI2_PCIE, "dout_cmu_hsi2_pcie",
+ "gout_cmu_hsi2_pcie", 1, 8, 0),
};
static const struct samsung_gate_clock top_gate_clks[] __initconst = {
@@ -1126,6 +1164,8 @@ static const struct samsung_cmu_info top_cmu_info __initconst = {
.nr_mux_clks = ARRAY_SIZE(top_mux_clks),
.div_clks = top_div_clks,
.nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .fixed_factor_clks = cmu_top_ffactor,
+ .nr_fixed_factor_clks = ARRAY_SIZE(cmu_top_ffactor),
.gate_clks = top_gate_clks,
.nr_gate_clks = ARRAY_SIZE(top_gate_clks),
.nr_clk_ids = CLKS_NR_TOP,
@@ -1186,6 +1226,8 @@ static const unsigned long hsi0_clk_regs[] __initconst = {
CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSMMU_USB_IPCLKPORT_CLK_S2,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_SYSREG_HSI0_IPCLKPORT_PCLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_ACLK_PHYCTRL,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USB31DRD_REF_CLK_40,
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_REF_SOC_PLL,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBDPPHY_SCL_APB_PCLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_I_USBPCS_APB_CLK,
CLK_CON_GAT_GOUT_BLK_HSI0_UID_USB31DRD_IPCLKPORT_BUS_CLK_EARLY,
@@ -1294,6 +1336,10 @@ static const struct samsung_gate_clock hsi0_gate_clks[] __initconst = {
"gout_hsi0_xiu_d_hsi0_aclk", "mout_hsi0_bus_user",
CLK_CON_GAT_GOUT_BLK_HSI0_UID_XIU_D_HSI0_IPCLKPORT_ACLK,
21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI0_LHS_ACEL_D_HSI0_CLK,
+ "gout_hsi0_lhs_acel_d_hsi0_clk", "mout_hsi0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_HSI0_UID_LHS_ACEL_D_HSI0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
};
static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
@@ -1307,6 +1353,1150 @@ static const struct samsung_cmu_info hsi0_cmu_info __initconst = {
.clk_name = "bus",
};
+/* ---- CMU_PERIC0 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC0 (0x10400000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC0_UART_DBG 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI00_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI01_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI02_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI03_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI04_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI05_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI13_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI15_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC0_USI_I2C_USER 0x06b4
+#define CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C 0x1828
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK 0x200c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6 0x2068
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0 0x2094
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6 0x20a8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK 0x20d4
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK 0x20e4
+
+static const unsigned long peric0_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG,
+ PLL_CON1_MUX_CLKCMU_PERIC0_UART_DBG,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+};
+
+/* Parent clock list for CMU_PERIC0 muxes */
+PNAME(mout_peric0_bus_user_p) = { "oscclk", "dout_cmu_peric0_bus" };
+PNAME(mout_peric0_uart_dbg_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi00_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi01_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi02_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi03_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi04_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi05_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi13_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi14_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi15_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+PNAME(mout_peric0_usi_i2c_user_p) = { "oscclk", "dout_cmu_peric0_ip" };
+
+static const struct samsung_mux_clock peric0_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC0_BUS_USER, "mout_peric0_bus_user",
+ mout_peric0_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_UART_DBG, "mout_peric0_uart_dbg",
+ mout_peric0_uart_dbg_p, PLL_CON0_MUX_CLKCMU_PERIC0_UART_DBG,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI00_USI_USER, "mout_peric0_usi00_usi_user",
+ mout_peric0_usi00_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI00_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI01_USI_USER, "mout_peric0_usi01_usi_user",
+ mout_peric0_usi01_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI01_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI02_USI_USER, "mout_peric0_usi02_usi_user",
+ mout_peric0_usi02_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI02_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI03_USI_USER, "mout_peric0_usi03_usi_user",
+ mout_peric0_usi03_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI03_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI04_USI_USER, "mout_peric0_usi04_usi_user",
+ mout_peric0_usi04_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI04_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI05_USI_USER, "mout_peric0_usi05_usi_user",
+ mout_peric0_usi05_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI05_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI13_USI_USER, "mout_peric0_usi13_usi_user",
+ mout_peric0_usi13_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI13_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI14_USI_USER, "mout_peric0_usi14_usi_user",
+ mout_peric0_usi14_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI14_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI15_USI_USER, "mout_peric0_usi15_usi_user",
+ mout_peric0_usi15_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI15_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC0_USI_I2C_USER, "mout_peric0_usi_i2c_user",
+ mout_peric0_usi_i2c_user_p, PLL_CON0_MUX_CLKCMU_PERIC0_USI_I2C_USER,
+ 4, 1),
+};
+
+static const struct samsung_div_clock peric0_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC0_UART_DBG, "dout_peric0_uart_dbg",
+ "mout_peric0_uart_dbg",
+ CLK_CON_DIV_DIV_CLK_PERIC0_UART_DBG,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI00_USI, "dout_peric0_usi00_usi",
+ "mout_peric0_usi00_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI00_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI01_USI, "dout_peric0_usi01_usi",
+ "mout_peric0_usi01_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI01_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI02_USI, "dout_peric0_usi02_usi",
+ "mout_peric0_usi02_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI02_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI03_USI, "dout_peric0_usi03_usi",
+ "mout_peric0_usi03_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI03_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI04_USI, "dout_peric0_usi04_usi",
+ "mout_peric0_usi04_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI04_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI05_USI, "dout_peric0_usi05_usi",
+ "mout_peric0_usi05_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI05_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI13_USI, "dout_peric0_usi13_usi",
+ "mout_peric0_usi13_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI13_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI14_USI, "dout_peric0_usi14_usi",
+ "mout_peric0_usi14_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI14_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI15_USI, "dout_peric0_usi15_usi",
+ "mout_peric0_usi15_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI15_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC0_USI_I2C, "dout_peric0_usi_i2c",
+ "mout_peric0_usi_i2c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC0_USI_I2C,
+ 0, 4),
+};
+
+static const struct samsung_gate_clock peric0_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC0_CMU_PCLK, "gout_peric0_cmu_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_PERIC0_CMU_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_OSCCLK_CLK, "gout_peric0_oscclk_clk",
+ "oscclk",
+ CLK_CON_GAT_CLK_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_D_TZPC_PCLK, "gout_peric0_d_tpzc_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_D_TZPC_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_GPIO_PCLK, "gout_peric0_gpio_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_GPIO_PERIC0_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC0_LHM_AXI_P_CLK, "gout_peric0_lhm_axi_p_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_LHM_AXI_P_PERIC0_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_10, "gout_peric0_top0_ipclk_10",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_11, "gout_peric0_top0_ipclk_11",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_12, "gout_peric0_top0_ipclk_12",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_13, "gout_peric0_top0_ipclk_13",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_14, "gout_peric0_top0_ipclk_14",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_15, "gout_peric0_top0_ipclk_15",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_4, "gout_peric0_top0_ipclk_4",
+ "dout_peric0_uart_dbg",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_5, "gout_peric0_top0_ipclk_5",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_6, "gout_peric0_top0_ipclk_6",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_7, "gout_peric0_top0_ipclk_7",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_8, "gout_peric0_top0_ipclk_8",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_IPCLK_9, "gout_peric0_top0_ipclk_9",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_10, "gout_peric0_top0_pclk_10",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_11, "gout_peric0_top0_pclk_11",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_12, "gout_peric0_top0_pclk_12",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_13, "gout_peric0_top0_pclk_13",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_14, "gout_peric0_top0_pclk_14",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_15, "gout_peric0_top0_pclk_15",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_4, "gout_peric0_top0_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_5, "gout_peric0_top0_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_6, "gout_peric0_top0_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_7, "gout_peric0_top0_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_8, "gout_peric0_top0_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP0_PCLK_9, "gout_peric0_top0_pclk_9",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP0_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_0, "gout_peric0_top1_ipclk_0",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_3, "gout_peric0_top1_ipclk_3",
+ "dout_peric0_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_4, "gout_peric0_top1_ipclk_4",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_5, "gout_peric0_top1_ipclk_5",
+ "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_6, "gout_peric0_top1_ipclk_6",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_7, "gout_peric0_top1_ipclk_7",
+ "dout_peric0_usi15_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_IPCLK_8, "gout_peric0_top1_ipclk_8",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_IPCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_0, "gout_peric0_top1_pclk_0",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_15, "gout_peric0_top1_pclk_15",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_3, "gout_peric0_top1_pclk_3",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_4, "gout_peric0_top1_pclk_4",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_5, "gout_peric0_top1_pclk_5",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_6, "gout_peric0_top1_pclk_6",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_7, "gout_peric0_top1_pclk_7",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_TOP1_PCLK_8, "gout_peric0_top1_pclk_8",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_PERIC0_TOP1_IPCLKPORT_PCLK_8,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_BUSP_CLK, "gout_peric0_busp_clk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_UART_DBG_CLK, "gout_peric0_uart_dbg_clk",
+ "dout_peric0_uart_dbg",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_UART_DBG_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI00_USI_CLK, "gout_peric0_usi00_usi_clk",
+ "dout_peric0_usi00_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI00_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI01_USI_CLK, "gout_peric0_usi01_usi_clk",
+ "dout_peric0_usi01_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI01_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI02_USI_CLK, "gout_peric0_usi02_usi_clk",
+ "dout_peric0_usi02_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI02_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI03_USI_CLK, "gout_peric0_usi03_usi_clk",
+ "dout_peric0_usi03_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI03_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI04_USI_CLK, "gout_peric0_usi04_usi_clk",
+ "dout_peric0_usi04_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI04_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI05_USI_CLK, "gout_peric0_usi05_usi_clk",
+ "dout_peric0_usi05_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI05_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI13_USI_CLK, "gout_peric0_usi13_usi_clk",
+ "dout_peric0_usi13_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI13_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI14_USI_CLK, "gout_peric0_usi14_usi_clk",
+ "dout_peric0_usi14_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI14_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI15_USI_CLK, "gout_peric0_usi15_usi_clk",
+ "dout_peric0_usi15_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI15_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_USI_I2C_CLK, "gout_peric0_usi_i2c_clk",
+ "dout_peric0_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_RSTNSYNC_CLK_PERIC0_USI_I2C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC0_SYSREG_PCLK, "gout_peric0_sysreg_pclk",
+ "mout_peric0_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC0_UID_SYSREG_PERIC0_IPCLKPORT_PCLK,
+ 21, 0, 0)
+};
+
+static const struct samsung_cmu_info peric0_cmu_info __initconst = {
+ .mux_clks = peric0_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric0_mux_clks),
+ .div_clks = peric0_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric0_div_clks),
+ .gate_clks = peric0_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric0_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC0,
+ .clk_regs = peric0_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric0_clk_regs),
+ .clk_name = "bus",
+};
+
+/* ---- CMU_PERIC1 --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_PERIC1 (0x10700000) */
+#define PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER 0x0600
+#define PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER 0x0604
+#define PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0610
+#define PLL_CON1_MUX_CLKCMU_PERIC1_UART_BT_USER 0x0614
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER 0x0620
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI06_USI_USER 0x0624
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER 0x0630
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI07_USI_USER 0x0634
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER 0x0640
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI08_USI_USER 0x0644
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER 0x0650
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI09_USI_USER 0x0654
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0660
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER 0x0664
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0670
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER 0x0674
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0680
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER 0x0684
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER 0x0690
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI16_USI_USER 0x0694
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER 0x06a0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI17_USI_USER 0x06a4
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER 0x06b0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI18_USI_USER 0x06b4
+#define PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER 0x06c0
+#define PLL_CON1_MUX_CLKCMU_PERIC1_USI_I2C_USER 0x06c4
+#define CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI 0x180c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI 0x1810
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI 0x1814
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI 0x1818
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI 0x181c
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI 0x1820
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI 0x1824
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI 0x1828
+#define CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C 0x182c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK 0x2004
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK 0x2008
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK 0x200c
+#define CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK 0x2010
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK 0x2014
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK 0x201c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK 0x2020
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10 0x2024
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11 0x2028
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12 0x202c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13 0x2030
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14 0x2034
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15 0x2038
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4 0x203c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10 0x2040
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11 0x2044
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12 0x2048
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13 0x204c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14 0x2050
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15 0x2054
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4 0x2058
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0 0x205c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1 0x2060
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10 0x2064
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12 0x206c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13 0x2070
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14 0x2074
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15 0x2078
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2 0x207c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3 0x2080
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4 0x2084
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5 0x2088
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6 0x208c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7 0x2090
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9 0x2098
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0 0x209c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1 0x20a0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10 0x20a4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12 0x20ac
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13 0x20b0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14 0x20b4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15 0x20b8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2 0x20bc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3 0x20c0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4 0x20c4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5 0x20c8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6 0x20cc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7 0x20d0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9 0x20d8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK 0x20dc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK 0x20e0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK 0x20e4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK 0x20e8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK 0x20ec
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK 0x20f0
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK 0x20f4
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK 0x20f8
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK 0x20fc
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK 0x2100
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK 0x2104
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK 0x2108
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK 0x210c
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK 0x2110
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK 0x2114
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK 0x2118
+#define CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK 0x211c
+
+static const unsigned long peric1_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ PLL_CON1_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI,
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK,
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+};
+
+/* Parent clock list for CMU_PERIC1 muxes */
+PNAME(mout_peric1_bus_user_p) = { "oscclk", "dout_cmu_peric1_bus" };
+PNAME(mout_peric1_uart_bt_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi06_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi07_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi08_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi09_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi10_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi11_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi12_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi18_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi16_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi17_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+PNAME(mout_peric1_usi_i2c_user_p) = { "oscclk", "dout_cmu_peric1_ip" };
+
+static const struct samsung_mux_clock peric1_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERIC1_BUS_USER, "mout_peric1_bus_user",
+ mout_peric1_bus_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_BUS_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_UART_BT_USER, "mout_peric1_uart_bt_user",
+ mout_peric1_uart_bt_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_UART_BT_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI06_USI_USER, "mout_peric1_usi06_usi_user",
+ mout_peric1_usi06_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI06_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI07_USI_USER, "mout_peric1_usi07_usi_user",
+ mout_peric1_usi07_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI07_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI08_USI_USER, "mout_peric1_usi08_usi_user",
+ mout_peric1_usi08_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI08_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI09_USI_USER, "mout_peric1_usi09_usi_user",
+ mout_peric1_usi09_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI09_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI10_USI_USER, "mout_peric1_usi10_usi_user",
+ mout_peric1_usi10_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI10_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI11_USI_USER, "mout_peric1_usi11_usi_user",
+ mout_peric1_usi11_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI11_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI12_USI_USER, "mout_peric1_usi12_usi_user",
+ mout_peric1_usi12_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI12_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI18_USI_USER, "mout_peric1_usi18_usi_user",
+ mout_peric1_usi18_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI18_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI16_USI_USER, "mout_peric1_usi16_usi_user",
+ mout_peric1_usi16_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI16_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI17_USI_USER, "mout_peric1_usi17_usi_user",
+ mout_peric1_usi17_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI17_USI_USER,
+ 4, 1),
+ MUX(CLK_MOUT_PERIC1_USI_I2C_USER, "mout_peric1_usi_i2c_user",
+ mout_peric1_usi_i2c_user_p, PLL_CON0_MUX_CLKCMU_PERIC1_USI_I2C_USER,
+ 4, 1),
+};
+
+static const struct samsung_div_clock peric1_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERIC1_UART_BT, "dout_peric1_uart_bt",
+ "mout_peric1_uart_bt_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_UART_BT,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI06_USI, "dout_peric1_usi06_usi",
+ "mout_peric1_usi06_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI06_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI07_USI, "dout_peric1_usi07_usi",
+ "mout_peric1_usi07_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI07_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI08_USI, "dout_peric1_usi08_usi",
+ "mout_peric1_usi08_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI08_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI18_USI, "dout_peric1_usi18_usi",
+ "mout_peric1_usi18_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI18_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI12_USI, "dout_peric1_usi12_usi",
+ "mout_peric1_usi12_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI12_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI09_USI, "dout_peric1_usi09_usi",
+ "mout_peric1_usi09_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI09_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI10_USI, "dout_peric1_usi10_usi",
+ "mout_peric1_usi10_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI10_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI11_USI, "dout_peric1_usi11_usi",
+ "mout_peric1_usi11_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI11_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI16_USI, "dout_peric1_usi16_usi",
+ "mout_peric1_usi16_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI16_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI17_USI, "dout_peric1_usi17_usi",
+ "mout_peric1_usi17_usi_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI17_USI,
+ 0, 4),
+ DIV(CLK_DOUT_PERIC1_USI_I2C, "dout_peric1_usi_i2c",
+ "mout_peric1_usi_i2c_user",
+ CLK_CON_DIV_DIV_CLK_PERIC1_USI_I2C,
+ 0, 4),
+};
+
+static const struct samsung_gate_clock peric1_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERIC1_CMU_PCLK, "gout_peric1_cmu_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_PERIC1_CMU_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_UART_BT_CLK, "gout_peric1_uart_bt_clk",
+ "dout_peric1_uart_bt",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_UART_BT_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI12_USI_CLK, "gout_peric1_usi12_usi_clk",
+ "dout_peric1_usi12_usi",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI12_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI18_USI_CLK, "gout_peric1_usi18_usi_clk",
+ "dout_peric1_usi18_usi",
+ CLK_CON_GAT_CLK_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI18_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_D_TZPC_PCLK, "gout_peric1_d_tzpc_pclk",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_D_TZPC_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_GPIO_PCLK, "gout_peric1_gpio_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_GPIO_PERIC1_IPCLKPORT_PCLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_CSIS_CLK, "gout_peric1_lhm_axi_p_csis_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_CSISPERIC1_IPCLKPORT_I_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_LHM_AXI_P_CLK, "gout_peric1_lhm_axi_p_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_LHM_AXI_P_PERIC1_IPCLKPORT_I_CLK,
+ 21, CLK_IS_CRITICAL, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_10, "gout_peric1_top0_ipclk_10",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_11, "gout_peric1_top0_ipclk_11",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_12, "gout_peric1_top0_ipclk_12",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_13, "gout_peric1_top0_ipclk_13",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_14, "gout_peric1_top0_ipclk_14",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_15, "gout_peric1_top0_ipclk_15",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_IPCLK_4, "gout_peric1_top0_ipclk_4",
+ "dout_peric1_uart_bt",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_10, "gout_peric1_top0_pclk_10",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_11, "gout_peric1_top0_pclk_11",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_11,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_12, "gout_peric1_top0_pclk_12",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_13, "gout_peric1_top0_pclk_13",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_14, "gout_peric1_top0_pclk_14",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_15, "gout_peric1_top0_pclk_15",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP0_PCLK_4, "gout_peric1_top0_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP0_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_0, "gout_peric1_top1_ipclk_0",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_1, "gout_peric1_top1_ipclk_1",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_10, "gout_peric1_top1_ipclk_10",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_12, "gout_peric1_top1_ipclk_12",
+ "dout_peric1_usi12_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_13, "gout_peric1_top1_ipclk_13",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_14, "gout_peric1_top1_ipclk_14",
+ "dout_peric1_usi18_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_15, "gout_peric1_top1_ipclk_15",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_2, "gout_peric1_top1_ipclk_2",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_3, "gout_peric1_top1_ipclk_3",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_4, "gout_peric1_top1_ipclk_4",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_5, "gout_peric1_top1_ipclk_5",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_6, "gout_peric1_top1_ipclk_6",
+ "dout_peric1_usi16_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_7, "gout_peric1_top1_ipclk_7",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_IPCLK_9, "gout_peric1_top1_ipclk_9",
+ "dout_peric1_usi17_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_IPCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_0, "gout_peric1_top1_pclk_0",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_0,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_1, "gout_peric1_top1_pclk_1",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_1,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_10, "gout_peric1_top1_pclk_10",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_10,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_12, "gout_peric1_top1_pclk_12",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_12,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_13, "gout_peric1_top1_pclk_13",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_13,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_14, "gout_peric1_top1_pclk_14",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_14,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_15, "gout_peric1_top1_pclk_15",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_15,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_2, "gout_peric1_top1_pclk_2",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_2,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_3, "gout_peric1_top1_pclk_3",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_3,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_4, "gout_peric1_top1_pclk_4",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_4,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_5, "gout_peric1_top1_pclk_5",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_5,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_6, "gout_peric1_top1_pclk_6",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_6,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_7, "gout_peric1_top1_pclk_7",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_7,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_TOP1_PCLK_9, "gout_peric1_top1_pclk_9",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_PERIC1_TOP1_IPCLKPORT_PCLK_9,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_BUSP_CLK, "gout_peric1_busp_clk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_BUSP_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_OSCCLK_CLK, "gout_peric1_oscclk_clk",
+ "oscclk",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_OSCCLK_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI06_USI_CLK, "gout_peric1_usi06_usi_clk",
+ "dout_peric1_usi06_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI06_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI07_USI_CLK, "gout_peric1_usi07_usi_clk",
+ "dout_peric1_usi07_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI07_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI08_USI_CLK, "gout_peric1_usi08_usi_clk",
+ "dout_peric1_usi08_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI08_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI09_USI_CLK, "gout_peric1_usi09_usi_clk",
+ "dout_peric1_usi09_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI09_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI10_USI_CLK, "gout_peric1_usi10_usi_clk",
+ "dout_peric1_usi10_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI10_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI11_USI_CLK, "gout_peric1_usi11_usi_clk",
+ "dout_peric1_usi11_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI11_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_USI_CLK, "gout_peric1_usi16_usi_clk",
+ "dout_peric1_usi16_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI16_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_USI_CLK, "gout_peric1_usi17_usi_clk",
+ "dout_peric1_usi17_usi",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI17_USI_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI_I2C_CLK, "gout_peric1_usi_i2c_clk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_RSTNSYNC_CLK_PERIC1_USI_I2C_IPCLKPORT_CLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_SYSREG_PCLK, "gout_peric1_sysreg_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_SYSREG_PERIC1_IPCLKPORT_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_I3C_PCLK, "gout_peric1_usi16_i3c_pclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI16_I3C_SCLK, "gout_peric1_usi16_i3c_sclk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI16_I3C_IPCLKPORT_I_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_I3C_PCLK, "gout_peric1_usi17_i3c_pclk",
+ "dout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_PCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_USI17_I3C_SCLK, "gout_peric1_usi17_i3c_sclk",
+ "dout_peric1_usi_i2c",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_USI17_I3C_IPCLKPORT_I_SCLK,
+ 21, 0, 0),
+ GATE(CLK_GOUT_PERIC1_XIU_P_ACLK, "gout_peric1_xiu_p_aclk",
+ "mout_peric1_bus_user",
+ CLK_CON_GAT_GOUT_BLK_PERIC1_UID_XIU_P_PERIC1_IPCLKPORT_ACLK,
+ 21, CLK_IGNORE_UNUSED, 0),
+};
+
+static const struct samsung_cmu_info peric1_cmu_info __initconst = {
+ .mux_clks = peric1_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peric1_mux_clks),
+ .div_clks = peric1_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peric1_div_clks),
+ .gate_clks = peric1_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peric1_gate_clks),
+ .nr_clk_ids = CLKS_NR_PERIC1,
+ .clk_regs = peric1_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peric1_clk_regs),
+ .clk_name = "bus",
+};
+
/* ---- CMU_PERIS ----------------------------------------------------------- */
/* Register Offset definitions for CMU_PERIS (0x10020000) */
@@ -1500,6 +2690,12 @@ static const struct of_device_id exynos990_cmu_of_match[] = {
{
.compatible = "samsung,exynos990-cmu-hsi0",
.data = &hsi0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos990-cmu-peric0",
+ .data = &peric0_cmu_info,
+ }, {
+ .compatible = "samsung,exynos990-cmu-peric1",
+ .data = &peric1_cmu_info,
},
{ },
};
diff --git a/drivers/clk/samsung/clk-exynosautov920.c b/drivers/clk/samsung/clk-exynosautov920.c
index 572b6ace14ac..b90b73c3518f 100644
--- a/drivers/clk/samsung/clk-exynosautov920.c
+++ b/drivers/clk/samsung/clk-exynosautov920.c
@@ -27,6 +27,8 @@
#define CLKS_NR_HSI0 (CLK_DOUT_HSI0_PCIE_APB + 1)
#define CLKS_NR_HSI1 (CLK_MOUT_HSI1_USBDRD + 1)
#define CLKS_NR_HSI2 (CLK_DOUT_HSI2_ETHERNET_PTP + 1)
+#define CLKS_NR_M2M (CLK_DOUT_M2M_NOCP + 1)
+#define CLKS_NR_MFC (CLK_DOUT_MFC_NOCP + 1)
/* ---- CMU_TOP ------------------------------------------------------------ */
@@ -1821,6 +1823,88 @@ static const struct samsung_cmu_info hsi2_cmu_info __initconst = {
.clk_name = "noc",
};
+/* ---- CMU_M2M --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_M2M (0x1a800000) */
+#define PLL_CON0_MUX_CLKCMU_M2M_JPEG_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_M2M_NOC_USER 0x610
+#define CLK_CON_DIV_DIV_CLK_M2M_NOCP 0x1800
+
+static const unsigned long m2m_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_M2M_JPEG_USER,
+ PLL_CON0_MUX_CLKCMU_M2M_NOC_USER,
+ CLK_CON_DIV_DIV_CLK_M2M_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_M2M */
+PNAME(mout_clkcmu_m2m_noc_user_p) = { "oscclk", "dout_clkcmu_m2m_noc" };
+PNAME(mout_clkcmu_m2m_jpeg_user_p) = { "oscclk", "dout_clkcmu_m2m_jpeg" };
+
+static const struct samsung_mux_clock m2m_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_M2M_JPEG_USER, "mout_clkcmu_m2m_jpeg_user",
+ mout_clkcmu_m2m_jpeg_user_p, PLL_CON0_MUX_CLKCMU_M2M_JPEG_USER, 4, 1),
+ MUX(CLK_MOUT_M2M_NOC_USER, "mout_clkcmu_m2m_noc_user",
+ mout_clkcmu_m2m_noc_user_p, PLL_CON0_MUX_CLKCMU_M2M_NOC_USER, 4, 1),
+};
+
+static const struct samsung_div_clock m2m_div_clks[] __initconst = {
+ DIV(CLK_DOUT_M2M_NOCP, "dout_m2m_nocp",
+ "mout_clkcmu_m2m_noc_user", CLK_CON_DIV_DIV_CLK_M2M_NOCP,
+ 0, 3),
+};
+
+static const struct samsung_cmu_info m2m_cmu_info __initconst = {
+ .mux_clks = m2m_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(m2m_mux_clks),
+ .div_clks = m2m_div_clks,
+ .nr_div_clks = ARRAY_SIZE(m2m_div_clks),
+ .nr_clk_ids = CLKS_NR_M2M,
+ .clk_regs = m2m_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(m2m_clk_regs),
+ .clk_name = "noc",
+};
+
+/* ---- CMU_MFC --------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_MFC (0x19c00000) */
+#define PLL_CON0_MUX_CLKCMU_MFC_MFC_USER 0x600
+#define PLL_CON0_MUX_CLKCMU_MFC_WFD_USER 0x610
+#define CLK_CON_DIV_DIV_CLK_MFC_NOCP 0x1800
+
+static const unsigned long mfc_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_MFC_MFC_USER,
+ PLL_CON0_MUX_CLKCMU_MFC_WFD_USER,
+ CLK_CON_DIV_DIV_CLK_MFC_NOCP,
+};
+
+/* List of parent clocks for Muxes in CMU_MFC */
+PNAME(mout_clkcmu_mfc_mfc_user_p) = { "oscclk", "dout_clkcmu_mfc_mfc" };
+PNAME(mout_clkcmu_mfc_wfd_user_p) = { "oscclk", "dout_clkcmu_mfc_wfd" };
+
+static const struct samsung_mux_clock mfc_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_MFC_MFC_USER, "mout_clkcmu_mfc_mfc_user",
+ mout_clkcmu_mfc_mfc_user_p, PLL_CON0_MUX_CLKCMU_MFC_MFC_USER, 4, 1),
+ MUX(CLK_MOUT_MFC_WFD_USER, "mout_clkcmu_mfc_wfd_user",
+ mout_clkcmu_mfc_wfd_user_p, PLL_CON0_MUX_CLKCMU_MFC_WFD_USER, 4, 1),
+};
+
+static const struct samsung_div_clock mfc_div_clks[] __initconst = {
+ DIV(CLK_DOUT_MFC_NOCP, "dout_mfc_nocp",
+ "mout_clkcmu_mfc_mfc_user", CLK_CON_DIV_DIV_CLK_MFC_NOCP,
+ 0, 3),
+};
+
+static const struct samsung_cmu_info mfc_cmu_info __initconst = {
+ .mux_clks = mfc_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(mfc_mux_clks),
+ .div_clks = mfc_div_clks,
+ .nr_div_clks = ARRAY_SIZE(mfc_div_clks),
+ .nr_clk_ids = CLKS_NR_MFC,
+ .clk_regs = mfc_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(mfc_clk_regs),
+ .clk_name = "noc",
+};
+
static int __init exynosautov920_cmu_probe(struct platform_device *pdev)
{
const struct samsung_cmu_info *info;
@@ -1851,6 +1935,12 @@ static const struct of_device_id exynosautov920_cmu_of_match[] = {
}, {
.compatible = "samsung,exynosautov920-cmu-hsi2",
.data = &hsi2_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-m2m",
+ .data = &m2m_cmu_info,
+ }, {
+ .compatible = "samsung,exynosautov920-cmu-mfc",
+ .data = &mfc_cmu_info,
},
{ }
};
diff --git a/drivers/clk/samsung/clk-fsd.c b/drivers/clk/samsung/clk-fsd.c
index 594931334574..4124d65e3d18 100644
--- a/drivers/clk/samsung/clk-fsd.c
+++ b/drivers/clk/samsung/clk-fsd.c
@@ -89,7 +89,7 @@
#define CLKS_NR_FSYS1 (PCIE_LINK1_IPCLKPORT_SLV_ACLK + 1)
#define CLKS_NR_IMEM (IMEM_TMU_GT_IPCLKPORT_I_CLK_TS + 1)
#define CLKS_NR_MFC (MFC_MFC_IPCLKPORT_ACLK + 1)
-#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_ACLK + 1)
+#define CLKS_NR_CAM_CSI (CAM_CSI2_3_IPCLKPORT_I_PCLK + 1)
static const unsigned long cmu_clk_regs[] __initconst = {
PLL_LOCKTIME_PLL_SHARED0,
@@ -1646,7 +1646,7 @@ static const struct samsung_pll_rate_table pll_cam_csi_rate_table[] __initconst
};
static const struct samsung_pll_clock cam_csi_pll_clks[] __initconst = {
- PLL(pll_142xx, 0, "fout_pll_cam_csi", "fin_pll",
+ PLL(pll_142xx, CAM_CSI_PLL, "fout_pll_cam_csi", "fin_pll",
PLL_LOCKTIME_PLL_CAM_CSI, PLL_CON0_PLL_CAM_CSI, pll_cam_csi_rate_table),
};
@@ -1682,51 +1682,51 @@ static const struct samsung_gate_clock cam_csi_gate_clks[] __initconst = {
GAT_CAM_CSI_BUS_D_CAM_CSI_IPCLKPORT_CLK__SYSTEM__NOC, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_0_IPCLKPORT_I_ACLK, "cam_csi0_0_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_0_IPCLKPORT_I_PCLK, "cam_csi0_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_1_IPCLKPORT_I_ACLK, "cam_csi0_1_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_1_IPCLKPORT_I_PCLK, "cam_csi0_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_2_IPCLKPORT_I_ACLK, "cam_csi0_2_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_2_IPCLKPORT_I_PCLK, "cam_csi0_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI0_3_IPCLKPORT_I_ACLK, "cam_csi0_3_ipclkport_i_aclk", "dout_cam_csi0_aclk",
GAT_CAM_CSI0_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI0_3_IPCLKPORT_I_PCLK, "cam_csi0_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI0_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_0_IPCLKPORT_I_ACLK, "cam_csi1_0_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_0_IPCLKPORT_I_PCLK, "cam_csi1_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_1_IPCLKPORT_I_ACLK, "cam_csi1_1_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_1_IPCLKPORT_I_PCLK, "cam_csi1_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_2_IPCLKPORT_I_ACLK, "cam_csi1_2_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_2_IPCLKPORT_I_PCLK, "cam_csi1_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI1_3_IPCLKPORT_I_ACLK, "cam_csi1_3_ipclkport_i_aclk", "dout_cam_csi1_aclk",
GAT_CAM_CSI1_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI1_3_IPCLKPORT_I_PCLK, "cam_csi1_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI1_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_0_IPCLKPORT_I_ACLK, "cam_csi2_0_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_0_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_0_IPCLKPORT_I_PCLK, "cam_csi2_0_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_0_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_1_IPCLKPORT_I_ACLK, "cam_csi2_1_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_1_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_1_IPCLKPORT_I_PCLK, "cam_csi2_1_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_1_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_2_IPCLKPORT_I_ACLK, "cam_csi2_2_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_2_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_2_IPCLKPORT_I_PCLK, "cam_csi2_2_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_2_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CAM_CSI2_3_IPCLKPORT_I_ACLK, "cam_csi2_3_ipclkport_i_aclk", "dout_cam_csi2_aclk",
GAT_CAM_CSI2_3_IPCLKPORT_I_ACLK, 21, CLK_IGNORE_UNUSED, 0),
- GATE(0, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
+ GATE(CAM_CSI2_3_IPCLKPORT_I_PCLK, "cam_csi2_3_ipclkport_i_pclk", "dout_cam_csi_busp",
GAT_CAM_CSI2_3_IPCLKPORT_I_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(0, "cam_ns_brdg_cam_csi_ipclkport_clk__psoc_cam_csi__clk_cam_csi_d",
"dout_cam_csi_busd",
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index e4faf02b631e..0a8fc9649ae2 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -11,14 +11,12 @@
#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/timekeeping.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include "clk.h"
#include "clk-pll.h"
-#define PLL_TIMEOUT_US 20000U
-#define PLL_TIMEOUT_LOOPS 1000000U
+#define PLL_TIMEOUT_LOOPS 20000U
struct samsung_clk_pll {
struct clk_hw hw;
@@ -49,8 +47,8 @@ static const struct samsung_pll_rate_table *samsung_get_pll_settings(
return NULL;
}
-static long samsung_pll_round_rate(struct clk_hw *hw,
- unsigned long drate, unsigned long *prate)
+static int samsung_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
const struct samsung_pll_rate_table *rate_table = pll->rate_table;
@@ -58,28 +56,24 @@ static long samsung_pll_round_rate(struct clk_hw *hw,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++) {
- if (drate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
}
/* return minimum supported value */
- return rate_table[i - 1].rate;
-}
+ req->rate = rate_table[i - 1].rate;
-static bool pll_early_timeout = true;
-
-static int __init samsung_pll_disable_early_timeout(void)
-{
- pll_early_timeout = false;
return 0;
}
-arch_initcall(samsung_pll_disable_early_timeout);
/* Wait until the PLL is locked */
static int samsung_pll_lock_wait(struct samsung_clk_pll *pll,
unsigned int reg_mask)
{
- int i, ret;
+ int ret;
u32 val;
/*
@@ -88,25 +82,15 @@ static int samsung_pll_lock_wait(struct samsung_clk_pll *pll,
* initialized, another when the timekeeping is suspended. udelay() also
* cannot be used when the clocksource is not running on arm64, since
* the current timer is used as cycle counter. So a simple busy loop
- * is used here in that special cases. The limit of iterations has been
- * derived from experimental measurements of various PLLs on multiple
- * Exynos SoC variants. Single register read time was usually in range
- * 0.4...1.5 us, never less than 0.4 us.
+ * is used here.
+ * The limit of iterations has been derived from experimental
+ * measurements of various PLLs on multiple Exynos SoC variants. Single
+ * register read time was usually in range 0.4...1.5 us, never less than
+ * 0.4 us.
*/
- if (pll_early_timeout || timekeeping_suspended) {
- i = PLL_TIMEOUT_LOOPS;
- while (i-- > 0) {
- if (readl_relaxed(pll->con_reg) & reg_mask)
- return 0;
-
- cpu_relax();
- }
- ret = -ETIMEDOUT;
- } else {
- ret = readl_relaxed_poll_timeout_atomic(pll->con_reg, val,
- val & reg_mask, 0, PLL_TIMEOUT_US);
- }
-
+ ret = readl_relaxed_poll_timeout_atomic(pll->con_reg, val,
+ val & reg_mask, 0,
+ PLL_TIMEOUT_LOOPS);
if (ret < 0)
pr_err("Could not lock PLL %s\n", clk_hw_get_name(&pll->hw));
@@ -273,7 +257,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
}
/* Set PLL lock time. */
- if (pll->type == pll_142xx)
+ if (pll->type == pll_142xx || pll->type == pll_1017x)
writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR,
pll->lock_reg);
else
@@ -298,7 +282,7 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll35xx_clk_ops = {
.recalc_rate = samsung_pll35xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll35xx_set_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
@@ -411,7 +395,7 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll36xx_clk_ops = {
.recalc_rate = samsung_pll36xx_recalc_rate,
.set_rate = samsung_pll36xx_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
};
@@ -514,7 +498,7 @@ static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll0822x_clk_ops = {
.recalc_rate = samsung_pll0822x_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll0822x_set_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
@@ -612,7 +596,7 @@ static int samsung_pll0831x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll0831x_clk_ops = {
.recalc_rate = samsung_pll0831x_recalc_rate,
.set_rate = samsung_pll0831x_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.enable = samsung_pll3xxx_enable,
.disable = samsung_pll3xxx_disable,
};
@@ -735,7 +719,7 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll45xx_clk_ops = {
.recalc_rate = samsung_pll45xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll45xx_set_rate,
};
@@ -880,7 +864,7 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll46xx_clk_ops = {
.recalc_rate = samsung_pll46xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll46xx_set_rate,
};
@@ -1093,7 +1077,7 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2550xx_clk_ops = {
.recalc_rate = samsung_pll2550xx_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll2550xx_set_rate,
};
@@ -1185,7 +1169,7 @@ static int samsung_pll2650x_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2650x_clk_ops = {
.recalc_rate = samsung_pll2650x_recalc_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
.set_rate = samsung_pll2650x_set_rate,
};
@@ -1277,7 +1261,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops samsung_pll2650xx_clk_ops = {
.recalc_rate = samsung_pll2650xx_recalc_rate,
.set_rate = samsung_pll2650xx_set_rate,
- .round_rate = samsung_pll_round_rate,
+ .determine_rate = samsung_pll_determine_rate,
};
static const struct clk_ops samsung_pll2650xx_clk_min_ops = {
@@ -1325,6 +1309,125 @@ static const struct clk_ops samsung_pll531x_clk_ops = {
.recalc_rate = samsung_pll531x_recalc_rate,
};
+/*
+ * PLL1031x Clock Type
+ */
+#define PLL1031X_LOCK_FACTOR (500)
+
+#define PLL1031X_MDIV_MASK (0x3ff)
+#define PLL1031X_PDIV_MASK (0x3f)
+#define PLL1031X_SDIV_MASK (0x7)
+#define PLL1031X_MDIV_SHIFT (16)
+#define PLL1031X_PDIV_SHIFT (8)
+#define PLL1031X_SDIV_SHIFT (0)
+
+#define PLL1031X_KDIV_MASK (0xffff)
+#define PLL1031X_KDIV_SHIFT (0)
+#define PLL1031X_MFR_MASK (0x3f)
+#define PLL1031X_MRR_MASK (0x1f)
+#define PLL1031X_MFR_SHIFT (16)
+#define PLL1031X_MRR_SHIFT (24)
+
+static unsigned long samsung_pll1031x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con3;
+ u64 fvco = parent_rate;
+
+ pll_con0 = readl_relaxed(pll->con_reg);
+ pll_con3 = readl_relaxed(pll->con_reg + 0xc);
+ mdiv = (pll_con0 >> PLL1031X_MDIV_SHIFT) & PLL1031X_MDIV_MASK;
+ pdiv = (pll_con0 >> PLL1031X_PDIV_SHIFT) & PLL1031X_PDIV_MASK;
+ sdiv = (pll_con0 >> PLL1031X_SDIV_SHIFT) & PLL1031X_SDIV_MASK;
+ kdiv = (pll_con3 & PLL1031X_KDIV_MASK);
+
+ fvco *= (mdiv << PLL1031X_MDIV_SHIFT) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= PLL1031X_MDIV_SHIFT;
+
+ return (unsigned long)fvco;
+}
+
+static bool samsung_pll1031x_mpk_change(u32 pll_con0, u32 pll_con3,
+ const struct samsung_pll_rate_table *rate)
+{
+ u32 old_mdiv, old_pdiv, old_kdiv;
+
+ old_mdiv = (pll_con0 >> PLL1031X_MDIV_SHIFT) & PLL1031X_MDIV_MASK;
+ old_pdiv = (pll_con0 >> PLL1031X_PDIV_SHIFT) & PLL1031X_PDIV_MASK;
+ old_kdiv = (pll_con3 >> PLL1031X_KDIV_SHIFT) & PLL1031X_KDIV_MASK;
+
+ return (old_mdiv != rate->mdiv || old_pdiv != rate->pdiv ||
+ old_kdiv != rate->kdiv);
+}
+
+static int samsung_pll1031x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ const struct samsung_pll_rate_table *rate;
+ u32 con0, con3;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ con0 = readl_relaxed(pll->con_reg);
+ con3 = readl_relaxed(pll->con_reg + 0xc);
+
+ if (!(samsung_pll1031x_mpk_change(con0, con3, rate))) {
+ /* If only s change, change just s value only */
+ con0 &= ~(PLL1031X_SDIV_MASK << PLL1031X_SDIV_SHIFT);
+ con0 |= rate->sdiv << PLL1031X_SDIV_SHIFT;
+ writel_relaxed(con0, pll->con_reg);
+
+ return 0;
+ }
+
+ /* Set PLL lock time. */
+ writel_relaxed(rate->pdiv * PLL1031X_LOCK_FACTOR, pll->lock_reg);
+
+ /* Set PLL M, P, and S values. */
+ con0 &= ~((PLL1031X_MDIV_MASK << PLL1031X_MDIV_SHIFT) |
+ (PLL1031X_PDIV_MASK << PLL1031X_PDIV_SHIFT) |
+ (PLL1031X_SDIV_MASK << PLL1031X_SDIV_SHIFT));
+
+ con0 |= (rate->mdiv << PLL1031X_MDIV_SHIFT) |
+ (rate->pdiv << PLL1031X_PDIV_SHIFT) |
+ (rate->sdiv << PLL1031X_SDIV_SHIFT);
+
+ /* Set PLL K, MFR and MRR values. */
+ con3 = readl_relaxed(pll->con_reg + 0xc);
+ con3 &= ~((PLL1031X_KDIV_MASK << PLL1031X_KDIV_SHIFT) |
+ (PLL1031X_MFR_MASK << PLL1031X_MFR_SHIFT) |
+ (PLL1031X_MRR_MASK << PLL1031X_MRR_SHIFT));
+ con3 |= (rate->kdiv << PLL1031X_KDIV_SHIFT) |
+ (rate->mfr << PLL1031X_MFR_SHIFT) |
+ (rate->mrr << PLL1031X_MRR_SHIFT);
+
+ /* Write configuration to PLL */
+ writel_relaxed(con0, pll->con_reg);
+ writel_relaxed(con3, pll->con_reg + 0xc);
+
+ /* Wait for PLL lock if the PLL is enabled */
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+}
+
+static const struct clk_ops samsung_pll1031x_clk_ops = {
+ .recalc_rate = samsung_pll1031x_recalc_rate,
+ .determine_rate = samsung_pll_determine_rate,
+ .set_rate = samsung_pll1031x_set_rate,
+};
+
+static const struct clk_ops samsung_pll1031x_clk_min_ops = {
+ .recalc_rate = samsung_pll1031x_recalc_rate,
+};
+
static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_clk)
{
@@ -1373,6 +1476,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_1451x:
case pll_1452x:
case pll_142xx:
+ case pll_1017x:
pll->enable_offs = PLL35XX_ENABLE_SHIFT;
pll->lock_offs = PLL35XX_LOCK_STAT_SHIFT;
if (!pll->rate_table)
@@ -1468,6 +1572,12 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
case pll_4311:
init.ops = &samsung_pll531x_clk_ops;
break;
+ case pll_1031x:
+ if (!pll->rate_table)
+ init.ops = &samsung_pll1031x_clk_min_ops;
+ else
+ init.ops = &samsung_pll1031x_clk_ops;
+ break;
default:
pr_warn("%s: Unknown pll type for pll clk %s\n",
__func__, pll_clk->name);
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index e9a5f8e0e0a3..6c8bb7f26da5 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -49,6 +49,8 @@ enum samsung_pll_type {
pll_0718x,
pll_0732x,
pll_4311,
+ pll_1017x,
+ pll_1031x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index b1fd8fac3a4c..c9fcb23de183 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -36,7 +36,7 @@ static unsigned long reg_save[][2] = {
{ASS_CLK_GATE, 0},
};
-static int s5pv210_audss_clk_suspend(void)
+static int s5pv210_audss_clk_suspend(void *data)
{
int i;
@@ -46,7 +46,7 @@ static int s5pv210_audss_clk_suspend(void)
return 0;
}
-static void s5pv210_audss_clk_resume(void)
+static void s5pv210_audss_clk_resume(void *data)
{
int i;
@@ -54,10 +54,14 @@ static void s5pv210_audss_clk_resume(void)
writel(reg_save[i][1], reg_base + reg_save[i][0]);
}
-static struct syscore_ops s5pv210_audss_clk_syscore_ops = {
+static const struct syscore_ops s5pv210_audss_clk_syscore_ops = {
.suspend = s5pv210_audss_clk_suspend,
.resume = s5pv210_audss_clk_resume,
};
+
+static struct syscore s5pv210_audss_clk_syscore = {
+ .ops = &s5pv210_audss_clk_syscore_ops,
+};
#endif /* CONFIG_PM_SLEEP */
/* register s5pv210_audss clocks */
@@ -175,7 +179,7 @@ static int s5pv210_audss_clk_probe(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
- register_syscore_ops(&s5pv210_audss_clk_syscore_ops);
+ register_syscore(&s5pv210_audss_clk_syscore);
#endif
return 0;
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index dbc9925ca8f4..c149ca6c2217 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -271,7 +271,7 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
}
#ifdef CONFIG_PM_SLEEP
-static int samsung_clk_suspend(void)
+static int samsung_clk_suspend(void *data)
{
struct samsung_clock_reg_cache *reg_cache;
@@ -284,7 +284,7 @@ static int samsung_clk_suspend(void)
return 0;
}
-static void samsung_clk_resume(void)
+static void samsung_clk_resume(void *data)
{
struct samsung_clock_reg_cache *reg_cache;
@@ -293,11 +293,15 @@ static void samsung_clk_resume(void)
reg_cache->rd_num);
}
-static struct syscore_ops samsung_clk_syscore_ops = {
+static const struct syscore_ops samsung_clk_syscore_ops = {
.suspend = samsung_clk_suspend,
.resume = samsung_clk_resume,
};
+static struct syscore samsung_clk_syscore = {
+ .ops = &samsung_clk_syscore_ops,
+};
+
void samsung_clk_extended_sleep_init(void __iomem *reg_base,
const unsigned long *rdump,
unsigned long nr_rdump,
@@ -316,7 +320,7 @@ void samsung_clk_extended_sleep_init(void __iomem *reg_base,
panic("could not allocate register dump storage.\n");
if (list_empty(&clock_reg_cache_list))
- register_syscore_ops(&samsung_clk_syscore_ops);
+ register_syscore(&samsung_clk_syscore);
reg_cache->reg_base = reg_base;
reg_cache->rd_num = nr_rdump;
diff --git a/drivers/clk/sifive/fu540-prci.h b/drivers/clk/sifive/fu540-prci.h
index e0173324f3c5..d45193c210b4 100644
--- a/drivers/clk/sifive/fu540-prci.h
+++ b/drivers/clk/sifive/fu540-prci.h
@@ -49,7 +49,7 @@ static struct __prci_wrpll_data sifive_fu540_prci_gemgxlpll_data = {
static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
- .round_rate = sifive_prci_wrpll_round_rate,
+ .determine_rate = sifive_prci_wrpll_determine_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable = sifive_prci_clock_enable,
.disable = sifive_prci_clock_disable,
diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h
index f31cd30fc395..c605a899d97d 100644
--- a/drivers/clk/sifive/fu740-prci.h
+++ b/drivers/clk/sifive/fu740-prci.h
@@ -55,7 +55,7 @@ static struct __prci_wrpll_data sifive_fu740_prci_cltxpll_data = {
static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
.set_rate = sifive_prci_wrpll_set_rate,
- .round_rate = sifive_prci_wrpll_round_rate,
+ .determine_rate = sifive_prci_wrpll_determine_rate,
.recalc_rate = sifive_prci_wrpll_recalc_rate,
.enable = sifive_prci_clock_enable,
.disable = sifive_prci_clock_disable,
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
index caba0400f8a2..4d1cc7adb2b3 100644
--- a/drivers/clk/sifive/sifive-prci.c
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -183,9 +183,8 @@ unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
return wrpll_calc_output_rate(&pwd->c, parent_rate);
}
-long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
struct __prci_wrpll_data *pwd = pc->pwd;
@@ -193,9 +192,11 @@ long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
memcpy(&c, &pwd->c, sizeof(c));
- wrpll_configure_for_rate(&c, rate, *parent_rate);
+ wrpll_configure_for_rate(&c, req->rate, req->best_parent_rate);
- return wrpll_calc_output_rate(&c, *parent_rate);
+ req->rate = wrpll_calc_output_rate(&c, req->best_parent_rate);
+
+ return 0;
}
int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h
index 91658a88af4e..d74b2bddd08a 100644
--- a/drivers/clk/sifive/sifive-prci.h
+++ b/drivers/clk/sifive/sifive-prci.h
@@ -291,8 +291,8 @@ void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd);
void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd);
/* Linux clock framework integration */
-long sifive_prci_wrpll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate);
+int sifive_prci_wrpll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req);
int sifive_prci_wrpll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
int sifive_clk_is_enabled(struct clk_hw *hw);
diff --git a/drivers/clk/socfpga/Kconfig b/drivers/clk/socfpga/Kconfig
index 0cf16b894efb..d88277e2a898 100644
--- a/drivers/clk/socfpga/Kconfig
+++ b/drivers/clk/socfpga/Kconfig
@@ -13,7 +13,7 @@ config CLK_INTEL_SOCFPGA32
default ARM && ARCH_INTEL_SOCFPGA
config CLK_INTEL_SOCFPGA64
- bool "Intel Stratix / Agilex / N5X clock controller support" if COMPILE_TEST && (!ARM64 || !ARCH_INTEL_SOCFPGA)
+ bool "Intel Stratix / Agilex / N5X / Agilex5 clock controller support" if COMPILE_TEST && (!ARM64 || !ARCH_INTEL_SOCFPGA)
default ARM64 && ARCH_INTEL_SOCFPGA
endif # CLK_INTEL_SOCFPGA
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
index e8dfce339c91..a1ea2b988eaf 100644
--- a/drivers/clk/socfpga/Makefile
+++ b/drivers/clk/socfpga/Makefile
@@ -3,4 +3,4 @@ obj-$(CONFIG_CLK_INTEL_SOCFPGA32) += clk.o clk-gate.o clk-pll.o clk-periph.o \
clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o
obj-$(CONFIG_CLK_INTEL_SOCFPGA64) += clk-s10.o \
clk-pll-s10.o clk-periph-s10.o clk-gate-s10.o \
- clk-agilex.o
+ clk-agilex.o clk-agilex5.o
diff --git a/drivers/clk/socfpga/clk-agilex5.c b/drivers/clk/socfpga/clk-agilex5.c
new file mode 100644
index 000000000000..f7f0ad884f64
--- /dev/null
+++ b/drivers/clk/socfpga/clk-agilex5.c
@@ -0,0 +1,561 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2024, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
+ */
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/clock/intel,agilex5-clkmgr.h>
+#include "stratix10-clk.h"
+#include "clk.h"
+
+/* External parent clocks come from DT via fw_name */
+static const char * const boot_pll_parents[] = {
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+};
+
+static const char * const main_pll_parents[] = {
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const periph_pll_parents[] = {
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+};
+
+/* Core free muxes */
+static const char * const core0_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const core1_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const core2_free_mux[] = {
+ "main_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const core3_free_mux[] = {
+ "main_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const dsu_free_mux[] = {
+ "main_pll_c2",
+ "peri_pll_c0",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const noc_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c1",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const emac_ptp_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const emaca_free_mux[] = {
+ "main_pll_c2",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const emacb_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const gpio_db_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c1",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const psi_ref_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const usb31_free_mux[] = {
+ "main_pll_c3",
+ "peri_pll_c2",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const s2f_user0_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+static const char * const s2f_user1_free_mux[] = {
+ "main_pll_c1",
+ "peri_pll_c3",
+ "osc1",
+ "cb-intosc-hs-div2-clk",
+ "f2s-free-clk",
+};
+
+/* Secondary muxes between free_clk and boot_clk */
+static const char * const core0_mux[] = {
+ "core0_free_clk",
+ "boot_clk",
+};
+
+static const char * const core1_mux[] = {
+ "core1_free_clk",
+ "boot_clk",
+};
+
+static const char * const core2_mux[] = {
+ "core2_free_clk",
+ "boot_clk",
+};
+
+static const char * const core3_mux[] = {
+ "core3_free_clk",
+ "boot_clk",
+};
+
+static const char * const dsu_mux[] = {
+ "dsu_free_clk",
+ "boot_clk",
+};
+
+static const char * const noc_mux[] = {
+ "noc_free_clk",
+ "boot_clk",
+};
+
+static const char * const emac_mux[] = {
+ "emaca_free_clk",
+ "emacb_free_clk",
+ "boot_clk",
+};
+
+static const char * const s2f_user0_mux[] = {
+ "s2f_user0_free_clk",
+ "boot_clk",
+};
+
+static const char * const s2f_user1_mux[] = {
+ "s2f_user1_free_clk",
+ "boot_clk",
+};
+
+static const char * const psi_mux[] = {
+ "psi_ref_free_clk",
+ "boot_clk",
+};
+
+static const char * const gpio_db_mux[] = {
+ "gpio_db_free_clk",
+ "boot_clk",
+};
+
+static const char * const emac_ptp_mux[] = {
+ "emac_ptp_free_clk",
+ "boot_clk",
+};
+
+static const char * const usb31_mux[] = {
+ "usb31_free_clk",
+ "boot_clk",
+};
+
+static const struct agilex5_pll_clock agilex5_pll_clks[] = {
+ {
+ .id = AGILEX5_BOOT_CLK,
+ .name = "boot_clk",
+ .parent_names = boot_pll_parents,
+ .num_parents = ARRAY_SIZE(boot_pll_parents),
+ .flags = 0,
+ .offset = 0x0,
+ },
+ {
+ .id = AGILEX5_MAIN_PLL_CLK,
+ .name = "main_pll",
+ .parent_names = main_pll_parents,
+ .num_parents = ARRAY_SIZE(main_pll_parents),
+ .flags = 0,
+ .offset = 0x48,
+ },
+ {
+ .id = AGILEX5_PERIPH_PLL_CLK,
+ .name = "periph_pll",
+ .parent_names = periph_pll_parents,
+ .num_parents = ARRAY_SIZE(periph_pll_parents),
+ .flags = 0,
+ .offset = 0x9C,
+ },
+};
+
+/* Main PLL C0, C1, C2, C3 and Peri PLL C0, C1, C2, C3. With ping-pong counter. */
+static const struct stratix10_perip_c_clock agilex5_main_perip_c_clks[] = {
+ { AGILEX5_MAIN_PLL_C0_CLK, "main_pll_c0", "main_pll", NULL, 1, 0,
+ 0x5C },
+ { AGILEX5_MAIN_PLL_C1_CLK, "main_pll_c1", "main_pll", NULL, 1, 0,
+ 0x60 },
+ { AGILEX5_MAIN_PLL_C2_CLK, "main_pll_c2", "main_pll", NULL, 1, 0,
+ 0x64 },
+ { AGILEX5_MAIN_PLL_C3_CLK, "main_pll_c3", "main_pll", NULL, 1, 0,
+ 0x68 },
+ { AGILEX5_PERIPH_PLL_C0_CLK, "peri_pll_c0", "periph_pll", NULL, 1, 0,
+ 0xB0 },
+ { AGILEX5_PERIPH_PLL_C1_CLK, "peri_pll_c1", "periph_pll", NULL, 1, 0,
+ 0xB4 },
+ { AGILEX5_PERIPH_PLL_C2_CLK, "peri_pll_c2", "periph_pll", NULL, 1, 0,
+ 0xB8 },
+ { AGILEX5_PERIPH_PLL_C3_CLK, "peri_pll_c3", "periph_pll", NULL, 1, 0,
+ 0xBC },
+};
+
+/* Non-SW clock-gated enabled clocks */
+static const struct agilex5_perip_cnt_clock agilex5_main_perip_cnt_clks[] = {
+ { AGILEX5_CORE0_FREE_CLK, "core0_free_clk", core0_free_mux,
+ ARRAY_SIZE(core0_free_mux), 0, 0x0100, 0, 0, 0},
+ { AGILEX5_CORE1_FREE_CLK, "core1_free_clk", core1_free_mux,
+ ARRAY_SIZE(core1_free_mux), 0, 0x0104, 0, 0, 0},
+ { AGILEX5_CORE2_FREE_CLK, "core2_free_clk", core2_free_mux,
+ ARRAY_SIZE(core2_free_mux), 0, 0x010C, 0, 0, 0},
+ { AGILEX5_CORE3_FREE_CLK, "core3_free_clk", core3_free_mux,
+ ARRAY_SIZE(core3_free_mux), 0, 0x0110, 0, 0, 0},
+ { AGILEX5_DSU_FREE_CLK, "dsu_free_clk", dsu_free_mux,
+ ARRAY_SIZE(dsu_free_mux), 0, 0xfc, 0, 0, 0},
+ { AGILEX5_NOC_FREE_CLK, "noc_free_clk", noc_free_mux,
+ ARRAY_SIZE(noc_free_mux), 0, 0x40, 0, 0, 0 },
+ { AGILEX5_EMAC_A_FREE_CLK, "emaca_free_clk", emaca_free_mux,
+ ARRAY_SIZE(emaca_free_mux), 0, 0xD4, 0, 0x88, 0 },
+ { AGILEX5_EMAC_B_FREE_CLK, "emacb_free_clk", emacb_free_mux,
+ ARRAY_SIZE(emacb_free_mux), 0, 0xD8, 0, 0x88, 1 },
+ { AGILEX5_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", emac_ptp_free_mux,
+ ARRAY_SIZE(emac_ptp_free_mux), 0, 0xDC, 0, 0x88, 2 },
+ { AGILEX5_GPIO_DB_FREE_CLK, "gpio_db_free_clk", gpio_db_free_mux,
+ ARRAY_SIZE(gpio_db_free_mux), 0, 0xE0, 0, 0x88, 3 },
+ { AGILEX5_S2F_USER0_FREE_CLK, "s2f_user0_free_clk", s2f_user0_free_mux,
+ ARRAY_SIZE(s2f_user0_free_mux), 0, 0xE8, 0, 0x30, 2 },
+ { AGILEX5_S2F_USER1_FREE_CLK, "s2f_user1_free_clk", s2f_user1_free_mux,
+ ARRAY_SIZE(s2f_user1_free_mux), 0, 0xEC, 0, 0x88, 5 },
+ { AGILEX5_PSI_REF_FREE_CLK, "psi_ref_free_clk", psi_ref_free_mux,
+ ARRAY_SIZE(psi_ref_free_mux), 0, 0xF0, 0, 0x88, 6 },
+ { AGILEX5_USB31_FREE_CLK, "usb31_free_clk", usb31_free_mux,
+ ARRAY_SIZE(usb31_free_mux), 0, 0xF8, 0, 0x88, 7},
+};
+
+static const char * const cs_pdbg_parents[] = { "cs_at_clk" };
+static const char * const usb31_bus_clk_early_parents[] = { "l4_main_clk" };
+static const char * const l4_mp_clk_parent[] = { "l4_mp_clk" };
+static const char * const l4_sp_clk_parent[] = { "l4_sp_clk" };
+static const char * const dfi_clk_parent[] = { "dfi_clk" };
+
+/* SW Clock gate enabled clocks */
+static const struct agilex5_gate_clock agilex5_gate_clks[] = {
+ { AGILEX5_CORE0_CLK, "core0_clk", core0_mux,
+ ARRAY_SIZE(core0_mux), 0, 0x24, 8, 0, 0, 0, 0x30, 5, 0 },
+ { AGILEX5_CORE1_CLK, "core1_clk", core1_mux,
+ ARRAY_SIZE(core1_mux), 0, 0x24, 9, 0, 0, 0, 0x30, 5, 0 },
+ { AGILEX5_CORE2_CLK, "core2_clk", core2_mux,
+ ARRAY_SIZE(core2_mux), 0, 0x24, 10, 0, 0, 0, 0x30, 6, 0 },
+ { AGILEX5_CORE3_CLK, "core3_clk", core3_mux,
+ ARRAY_SIZE(core3_mux), 0, 0x24, 11, 0, 0, 0, 0x30, 7, 0 },
+ { AGILEX5_MPU_CLK, "dsu_clk", dsu_mux, ARRAY_SIZE(dsu_mux), 0, 0, 0,
+ 0, 0, 0, 0x34, 4, 0 },
+ { AGILEX5_MPU_PERIPH_CLK, "mpu_periph_clk", dsu_mux,
+ ARRAY_SIZE(dsu_mux), 0, 0, 0, 0x44, 20, 2, 0x34, 4, 0 },
+ { AGILEX5_MPU_CCU_CLK, "mpu_ccu_clk", dsu_mux,
+ ARRAY_SIZE(dsu_mux), 0, 0, 0, 0x44, 18, 2, 0x34, 4, 0 },
+ { AGILEX5_L4_MAIN_CLK, "l4_main_clk", noc_mux, ARRAY_SIZE(noc_mux),
+ CLK_IS_CRITICAL, 0x24, 1, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_L4_MP_CLK, "l4_mp_clk", noc_mux, ARRAY_SIZE(noc_mux), 0,
+ 0x24, 2, 0x44, 4, 2, 0x30, 1, 0 },
+ { AGILEX5_L4_SYS_FREE_CLK, "l4_sys_free_clk", noc_mux,
+ ARRAY_SIZE(noc_mux), 0, 0, 0, 0x44, 2, 2, 0x30, 1, 0 },
+ { AGILEX5_L4_SP_CLK, "l4_sp_clk", noc_mux, ARRAY_SIZE(noc_mux),
+ CLK_IS_CRITICAL, 0x24, 3, 0x44, 6, 2, 0x30, 1, 0 },
+
+ /* Core sight clocks*/
+ { AGILEX5_CS_AT_CLK, "cs_at_clk", noc_mux, ARRAY_SIZE(noc_mux), 0,
+ 0x24, 4, 0x44, 24, 2, 0x30, 1, 0 },
+ { AGILEX5_CS_TRACE_CLK, "cs_trace_clk", noc_mux,
+ ARRAY_SIZE(noc_mux), 0, 0x24, 4, 0x44, 26, 2, 0x30, 1, 0 },
+ { AGILEX5_CS_PDBG_CLK, "cs_pdbg_clk", cs_pdbg_parents, 1, 0, 0x24, 4,
+ 0x44, 28, 1, 0, 0, 0 },
+
+ /* Main Peripheral PLL1 Begin */
+ { AGILEX5_EMAC0_CLK, "emac0_clk", emac_mux, ARRAY_SIZE(emac_mux),
+ 0, 0x7C, 0, 0, 0, 0, 0x94, 26, 0 },
+ { AGILEX5_EMAC1_CLK, "emac1_clk", emac_mux, ARRAY_SIZE(emac_mux),
+ 0, 0x7C, 1, 0, 0, 0, 0x94, 27, 0 },
+ { AGILEX5_EMAC2_CLK, "emac2_clk", emac_mux, ARRAY_SIZE(emac_mux),
+ 0, 0x7C, 2, 0, 0, 0, 0x94, 28, 0 },
+ { AGILEX5_EMAC_PTP_CLK, "emac_ptp_clk", emac_ptp_mux,
+ ARRAY_SIZE(emac_ptp_mux), 0, 0x7C, 3, 0, 0, 0, 0x88, 2, 0 },
+ { AGILEX5_GPIO_DB_CLK, "gpio_db_clk", gpio_db_mux,
+ ARRAY_SIZE(gpio_db_mux), 0, 0x7C, 4, 0x98, 0, 16, 0x88, 3, 1 },
+ /* Main Peripheral PLL1 End */
+
+ /* Peripheral clocks */
+ { AGILEX5_S2F_USER0_CLK, "s2f_user0_clk", s2f_user0_mux,
+ ARRAY_SIZE(s2f_user0_mux), 0, 0x24, 6, 0, 0, 0, 0x30, 2, 0 },
+ { AGILEX5_S2F_USER1_CLK, "s2f_user1_clk", s2f_user1_mux,
+ ARRAY_SIZE(s2f_user1_mux), 0, 0x7C, 6, 0, 0, 0, 0x88, 5, 0 },
+ { AGILEX5_PSI_REF_CLK, "psi_ref_clk", psi_mux,
+ ARRAY_SIZE(psi_mux), 0, 0x7C, 7, 0, 0, 0, 0x88, 6, 0 },
+ { AGILEX5_USB31_SUSPEND_CLK, "usb31_suspend_clk", usb31_mux,
+ ARRAY_SIZE(usb31_mux), 0, 0x7C, 25, 0, 0, 0, 0x88, 7, 0 },
+ { AGILEX5_USB31_BUS_CLK_EARLY, "usb31_bus_clk_early", usb31_bus_clk_early_parents,
+ 1, 0, 0x7C, 25, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_USB2OTG_HCLK, "usb2otg_hclk", l4_mp_clk_parent, 1, 0, 0x7C,
+ 8, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPIM_0_CLK, "spim_0_clk", l4_mp_clk_parent, 1, 0, 0x7C, 9,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPIM_1_CLK, "spim_1_clk", l4_mp_clk_parent, 1, 0, 0x7C, 11,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPIS_0_CLK, "spis_0_clk", l4_sp_clk_parent, 1, 0, 0x7C, 12,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPIS_1_CLK, "spis_1_clk", l4_sp_clk_parent, 1, 0, 0x7C, 13,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_DMA_CORE_CLK, "dma_core_clk", l4_mp_clk_parent, 1, 0, 0x7C,
+ 14, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_DMA_HS_CLK, "dma_hs_clk", l4_mp_clk_parent, 1, 0, 0x7C, 14,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I3C_0_CORE_CLK, "i3c_0_core_clk", l4_mp_clk_parent, 1, 0,
+ 0x7C, 18, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I3C_1_CORE_CLK, "i3c_1_core_clk", l4_mp_clk_parent, 1, 0,
+ 0x7C, 19, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_0_PCLK, "i2c_0_pclk", l4_sp_clk_parent, 1, 0, 0x7C, 15,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_1_PCLK, "i2c_1_pclk", l4_sp_clk_parent, 1, 0, 0x7C, 16,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_EMAC0_PCLK, "i2c_emac0_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 17, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_EMAC1_PCLK, "i2c_emac1_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 22, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_I2C_EMAC2_PCLK, "i2c_emac2_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 27, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_UART_0_PCLK, "uart_0_pclk", l4_sp_clk_parent, 1, 0, 0x7C, 20,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_UART_1_PCLK, "uart_1_pclk", l4_sp_clk_parent, 1, 0, 0x7C, 21,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPTIMER_0_PCLK, "sptimer_0_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 23, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SPTIMER_1_PCLK, "sptimer_1_pclk", l4_sp_clk_parent, 1, 0,
+ 0x7C, 24, 0, 0, 0, 0, 0, 0 },
+
+ /*NAND, SD/MMC and SoftPHY overall clocking*/
+ { AGILEX5_DFI_CLK, "dfi_clk", l4_mp_clk_parent, 1, 0, 0, 0, 0x44, 16,
+ 2, 0, 0, 0 },
+ { AGILEX5_NAND_NF_CLK, "nand_nf_clk", dfi_clk_parent, 1, 0, 0x7C, 10,
+ 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_NAND_BCH_CLK, "nand_bch_clk", l4_mp_clk_parent, 1, 0, 0x7C,
+ 10, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SDMMC_SDPHY_REG_CLK, "sdmmc_sdphy_reg_clk", l4_mp_clk_parent,
+ 1, 0, 0x7C, 5, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SDMCLK, "sdmclk", dfi_clk_parent, 1, 0, 0x7C, 5, 0, 0, 0,
+ 0, 0, 0 },
+ { AGILEX5_SOFTPHY_REG_PCLK, "softphy_reg_pclk", l4_mp_clk_parent, 1, 0,
+ 0x7C, 26, 0, 0, 0, 0, 0, 0 },
+ { AGILEX5_SOFTPHY_PHY_CLK, "softphy_phy_clk", l4_mp_clk_parent, 1, 0,
+ 0x7C, 26, 0x44, 16, 2, 0, 0, 0 },
+ { AGILEX5_SOFTPHY_CTRL_CLK, "softphy_ctrl_clk", dfi_clk_parent, 1, 0,
+ 0x7C, 26, 0, 0, 0, 0, 0, 0 },
+};
+
+static int
+agilex5_clk_register_c_perip(const struct stratix10_perip_c_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk_hw *hw_clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ hw_clk = s10_register_periph(&clks[i], base);
+ if (IS_ERR(hw_clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ continue;
+ }
+ data->clk_data.hws[clks[i].id] = hw_clk;
+ }
+ return 0;
+}
+
+static int
+agilex5_clk_register_cnt_perip(const struct agilex5_perip_cnt_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk_hw *hw_clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ hw_clk = agilex5_register_cnt_periph(&clks[i], base);
+ if (IS_ERR(hw_clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ continue;
+ }
+ data->clk_data.hws[clks[i].id] = hw_clk;
+ }
+
+ return 0;
+}
+
+static int agilex5_clk_register_gate(const struct agilex5_gate_clock *clks,
+ int nums,
+ struct stratix10_clock_data *data)
+{
+ struct clk_hw *hw_clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ hw_clk = agilex5_register_gate(&clks[i], base);
+ if (IS_ERR(hw_clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ continue;
+ }
+ data->clk_data.hws[clks[i].id] = hw_clk;
+ }
+
+ return 0;
+}
+
+static int agilex5_clk_register_pll(const struct agilex5_pll_clock *clks,
+ int nums, struct stratix10_clock_data *data)
+{
+ struct clk_hw *hw_clk;
+ void __iomem *base = data->base;
+ int i;
+
+ for (i = 0; i < nums; i++) {
+ hw_clk = agilex5_register_pll(&clks[i], base);
+ if (IS_ERR(hw_clk)) {
+ pr_err("%s: failed to register clock %s\n", __func__,
+ clks[i].name);
+ continue;
+ }
+ data->clk_data.hws[clks[i].id] = hw_clk;
+ }
+
+ return 0;
+}
+
+static int agilex5_clkmgr_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct stratix10_clock_data *clk_data;
+ void __iomem *base;
+ int i, num_clks;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ num_clks = AGILEX5_NUM_CLKS;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, clk_data.hws,
+ num_clks), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->base = base;
+ clk_data->clk_data.num = num_clks;
+
+ for (i = 0; i < num_clks; i++)
+ clk_data->clk_data.hws[i] = ERR_PTR(-ENOENT);
+
+ agilex5_clk_register_pll(agilex5_pll_clks, ARRAY_SIZE(agilex5_pll_clks),
+ clk_data);
+
+ /* mainPLL C0, C1, C2, C3 and periph PLL C0, C1, C2, C3*/
+ agilex5_clk_register_c_perip(agilex5_main_perip_c_clks,
+ ARRAY_SIZE(agilex5_main_perip_c_clks),
+ clk_data);
+
+ agilex5_clk_register_cnt_perip(agilex5_main_perip_cnt_clks,
+ ARRAY_SIZE(agilex5_main_perip_cnt_clks),
+ clk_data);
+
+ agilex5_clk_register_gate(agilex5_gate_clks,
+ ARRAY_SIZE(agilex5_gate_clks), clk_data);
+
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data->clk_data);
+ return 0;
+}
+
+static int agilex5_clkmgr_probe(struct platform_device *pdev)
+{
+ int (*probe_func)(struct platform_device *init_func);
+
+ probe_func = of_device_get_match_data(&pdev->dev);
+ if (!probe_func)
+ return -ENODEV;
+ return probe_func(pdev);
+}
+
+static const struct of_device_id agilex5_clkmgr_match_table[] = {
+ { .compatible = "intel,agilex5-clkmgr", .data = agilex5_clkmgr_init },
+ {}
+};
+
+static struct platform_driver agilex5_clkmgr_driver = {
+ .probe = agilex5_clkmgr_probe,
+ .driver = {
+ .name = "agilex5-clkmgr",
+ .suppress_bind_attrs = true,
+ .of_match_table = agilex5_clkmgr_match_table,
+ },
+};
+
+static int __init agilex5_clk_init(void)
+{
+ return platform_driver_register(&agilex5_clkmgr_driver);
+}
+core_initcall(agilex5_clk_init);
diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c
index 3930d922efb4..dce3ef137bf3 100644
--- a/drivers/clk/socfpga/clk-gate-s10.c
+++ b/drivers/clk/socfpga/clk-gate-s10.c
@@ -239,3 +239,56 @@ struct clk_hw *agilex_register_gate(const struct stratix10_gate_clock *clks, voi
}
return hw_clk;
}
+
+struct clk_hw *agilex5_register_gate(const struct agilex5_gate_clock *clks, void __iomem *regbase)
+{
+ struct clk_hw *hw_clk;
+ struct socfpga_gate_clk *socfpga_clk;
+ struct clk_init_data init;
+ int ret;
+
+ socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL);
+ if (!socfpga_clk)
+ return NULL;
+
+ socfpga_clk->hw.reg = regbase + clks->gate_reg;
+ socfpga_clk->hw.bit_idx = clks->gate_idx;
+
+ gateclk_ops.enable = clk_gate_ops.enable;
+ gateclk_ops.disable = clk_gate_ops.disable;
+
+ socfpga_clk->fixed_div = clks->fixed_div;
+
+ if (clks->div_reg)
+ socfpga_clk->div_reg = regbase + clks->div_reg;
+ else
+ socfpga_clk->div_reg = NULL;
+
+ socfpga_clk->width = clks->div_width;
+ socfpga_clk->shift = clks->div_offset;
+
+ if (clks->bypass_reg)
+ socfpga_clk->bypass_reg = regbase + clks->bypass_reg;
+ else
+ socfpga_clk->bypass_reg = NULL;
+ socfpga_clk->bypass_shift = clks->bypass_shift;
+
+ if (streq(clks->name, "cs_pdbg_clk"))
+ init.ops = &dbgclk_ops;
+ else
+ init.ops = &agilex_gateclk_ops;
+
+ init.name = clks->name;
+ init.flags = clks->flags;
+ init.num_parents = clks->num_parents;
+ init.parent_names = clks->parent_names;
+ socfpga_clk->hw.hw.init = &init;
+ hw_clk = &socfpga_clk->hw.hw;
+
+ ret = clk_hw_register(NULL, &socfpga_clk->hw.hw);
+ if (ret) {
+ kfree(socfpga_clk);
+ return ERR_PTR(ret);
+ }
+ return hw_clk;
+}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index f5c1ca42b668..f12ca43ffe7c 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -214,3 +214,44 @@ struct clk_hw *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *c
}
return hw_clk;
}
+
+struct clk_hw *agilex5_register_cnt_periph(const struct agilex5_perip_cnt_clock *clks,
+ void __iomem *regbase)
+{
+ struct clk_hw *hw_clk;
+ struct socfpga_periph_clk *periph_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+ int ret;
+
+ periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL);
+ if (WARN_ON(!periph_clk))
+ return NULL;
+
+ if (clks->offset)
+ periph_clk->hw.reg = regbase + clks->offset;
+ else
+ periph_clk->hw.reg = NULL;
+
+ if (clks->bypass_reg)
+ periph_clk->bypass_reg = regbase + clks->bypass_reg;
+ else
+ periph_clk->bypass_reg = NULL;
+ periph_clk->bypass_shift = clks->bypass_shift;
+ periph_clk->fixed_div = clks->fixed_divider;
+
+ init.name = name;
+ init.ops = &peri_cnt_clk_ops;
+ init.flags = clks->flags;
+ init.num_parents = clks->num_parents;
+ init.parent_names = clks->parent_names;
+ periph_clk->hw.hw.init = &init;
+ hw_clk = &periph_clk->hw.hw;
+
+ ret = clk_hw_register(NULL, hw_clk);
+ if (ret) {
+ kfree(periph_clk);
+ return ERR_PTR(ret);
+ }
+ return hw_clk;
+}
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index a88c212bda12..1be92827cd93 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -304,3 +304,39 @@ struct clk_hw *n5x_register_pll(const struct stratix10_pll_clock *clks,
}
return hw_clk;
}
+
+struct clk_hw *agilex5_register_pll(const struct agilex5_pll_clock *clks,
+ void __iomem *reg)
+{
+ struct clk_hw *hw_clk;
+ struct socfpga_pll *pll_clk;
+ struct clk_init_data init;
+ const char *name = clks->name;
+ int ret;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (WARN_ON(!pll_clk))
+ return NULL;
+
+ pll_clk->hw.reg = reg + clks->offset;
+
+ if (streq(name, SOCFPGA_BOOT_CLK))
+ init.ops = &clk_boot_ops;
+ else
+ init.ops = &agilex_clk_pll_ops;
+
+ init.name = name;
+ init.flags = clks->flags;
+ init.num_parents = clks->num_parents;
+ init.parent_names = clks->parent_names;
+ pll_clk->hw.hw.init = &init;
+ pll_clk->hw.bit_idx = SOCFPGA_PLL_POWER;
+ hw_clk = &pll_clk->hw.hw;
+
+ ret = clk_hw_register(NULL, hw_clk);
+ if (ret) {
+ kfree(pll_clk);
+ return ERR_PTR(ret);
+ }
+ return hw_clk;
+}
diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h
index 83fe4eb3133c..d1fe4578b3e0 100644
--- a/drivers/clk/socfpga/stratix10-clk.h
+++ b/drivers/clk/socfpga/stratix10-clk.h
@@ -73,12 +73,55 @@ struct stratix10_gate_clock {
u8 fixed_div;
};
+struct agilex5_pll_clock {
+ unsigned int id;
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+};
+
+struct agilex5_perip_cnt_clock {
+ unsigned int id;
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long offset;
+ u8 fixed_divider;
+ unsigned long bypass_reg;
+ unsigned long bypass_shift;
+};
+
+struct agilex5_gate_clock {
+ unsigned int id;
+ const char *name;
+ const char * const *parent_names;
+ u8 num_parents;
+ unsigned long flags;
+ unsigned long gate_reg;
+ u8 gate_idx;
+ unsigned long div_reg;
+ u8 div_offset;
+ u8 div_width;
+ unsigned long bypass_reg;
+ u8 bypass_shift;
+ u8 fixed_div;
+};
+
struct clk_hw *s10_register_pll(const struct stratix10_pll_clock *clks,
void __iomem *reg);
struct clk_hw *agilex_register_pll(const struct stratix10_pll_clock *clks,
void __iomem *reg);
struct clk_hw *n5x_register_pll(const struct stratix10_pll_clock *clks,
void __iomem *reg);
+struct clk_hw *agilex5_register_pll(const struct agilex5_pll_clock *clks,
+ void __iomem *reg);
+struct clk_hw *agilex5_register_cnt_periph(const struct agilex5_perip_cnt_clock *clks,
+ void __iomem *regbase);
+struct clk_hw *agilex5_register_gate(const struct agilex5_gate_clock *clks,
+ void __iomem *regbase);
struct clk_hw *s10_register_periph(const struct stratix10_perip_c_clock *clks,
void __iomem *reg);
struct clk_hw *n5x_register_periph(const struct n5x_perip_c_clock *clks,
diff --git a/drivers/clk/sophgo/clk-cv18xx-ip.c b/drivers/clk/sophgo/clk-cv18xx-ip.c
index b186e64d4813..c2b58faf0938 100644
--- a/drivers/clk/sophgo/clk-cv18xx-ip.c
+++ b/drivers/clk/sophgo/clk-cv18xx-ip.c
@@ -45,10 +45,12 @@ static unsigned long gate_recalc_rate(struct clk_hw *hw,
return parent_rate;
}
-static long gate_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int gate_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return *parent_rate;
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int gate_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -63,7 +65,7 @@ const struct clk_ops cv1800_clk_gate_ops = {
.is_enabled = gate_is_enabled,
.recalc_rate = gate_recalc_rate,
- .round_rate = gate_round_rate,
+ .determine_rate = gate_determine_rate,
.set_rate = gate_set_rate,
};
diff --git a/drivers/clk/sophgo/clk-sg2042-clkgen.c b/drivers/clk/sophgo/clk-sg2042-clkgen.c
index 9e61288d34f3..683661b71787 100644
--- a/drivers/clk/sophgo/clk-sg2042-clkgen.c
+++ b/drivers/clk/sophgo/clk-sg2042-clkgen.c
@@ -176,9 +176,8 @@ static unsigned long sg2042_clk_divider_recalc_rate(struct clk_hw *hw,
return ret_rate;
}
-static long sg2042_clk_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int sg2042_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sg2042_divider_clock *divider = to_sg2042_clk_divider(hw);
unsigned long ret_rate;
@@ -192,15 +191,17 @@ static long sg2042_clk_divider_round_rate(struct clk_hw *hw,
bestdiv = readl(divider->reg) >> divider->shift;
bestdiv &= clk_div_mask(divider->width);
}
- ret_rate = DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ ret_rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv);
} else {
- ret_rate = divider_round_rate(hw, rate, prate, NULL,
+ ret_rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, NULL,
divider->width, divider->div_flags);
}
pr_debug("--> %s: divider_round_rate: val = %ld\n",
clk_hw_get_name(hw), ret_rate);
- return ret_rate;
+ req->rate = ret_rate;
+
+ return 0;
}
static int sg2042_clk_divider_set_rate(struct clk_hw *hw,
@@ -258,13 +259,13 @@ static int sg2042_clk_divider_set_rate(struct clk_hw *hw,
static const struct clk_ops sg2042_clk_divider_ops = {
.recalc_rate = sg2042_clk_divider_recalc_rate,
- .round_rate = sg2042_clk_divider_round_rate,
+ .determine_rate = sg2042_clk_divider_determine_rate,
.set_rate = sg2042_clk_divider_set_rate,
};
static const struct clk_ops sg2042_clk_divider_ro_ops = {
.recalc_rate = sg2042_clk_divider_recalc_rate,
- .round_rate = sg2042_clk_divider_round_rate,
+ .determine_rate = sg2042_clk_divider_determine_rate,
};
/*
diff --git a/drivers/clk/sophgo/clk-sg2042-pll.c b/drivers/clk/sophgo/clk-sg2042-pll.c
index e5fb0bb7ac4f..110b6ee06fe4 100644
--- a/drivers/clk/sophgo/clk-sg2042-pll.c
+++ b/drivers/clk/sophgo/clk-sg2042-pll.c
@@ -346,37 +346,30 @@ static unsigned long sg2042_clk_pll_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long sg2042_clk_pll_round_rate(struct clk_hw *hw,
- unsigned long req_rate,
- unsigned long *prate)
+static int sg2042_clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sg2042_pll_ctrl pctrl_table;
unsigned int value;
long proper_rate;
int ret;
- ret = sg2042_get_pll_ctl_setting(&pctrl_table, req_rate, *prate);
+ ret = sg2042_get_pll_ctl_setting(&pctrl_table,
+ min(req->rate, req->max_rate),
+ req->best_parent_rate);
if (ret) {
proper_rate = 0;
goto out;
}
value = sg2042_pll_ctrl_encode(&pctrl_table);
- proper_rate = (long)sg2042_pll_recalc_rate(value, *prate);
+ proper_rate = (long)sg2042_pll_recalc_rate(value, req->best_parent_rate);
out:
- pr_debug("--> %s: pll_round_rate: val = %ld\n",
+ pr_debug("--> %s: pll_determine_rate: val = %ld\n",
clk_hw_get_name(hw), proper_rate);
- return proper_rate;
-}
+ req->rate = proper_rate;
-static int sg2042_clk_pll_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- req->rate = sg2042_clk_pll_round_rate(hw, min(req->rate, req->max_rate),
- &req->best_parent_rate);
- pr_debug("--> %s: pll_determine_rate: val = %ld\n",
- clk_hw_get_name(hw), req->rate);
return 0;
}
@@ -417,14 +410,13 @@ out:
static const struct clk_ops sg2042_clk_pll_ops = {
.recalc_rate = sg2042_clk_pll_recalc_rate,
- .round_rate = sg2042_clk_pll_round_rate,
.determine_rate = sg2042_clk_pll_determine_rate,
.set_rate = sg2042_clk_pll_set_rate,
};
static const struct clk_ops sg2042_clk_pll_ro_ops = {
.recalc_rate = sg2042_clk_pll_recalc_rate,
- .round_rate = sg2042_clk_pll_round_rate,
+ .determine_rate = sg2042_clk_pll_determine_rate,
};
/*
diff --git a/drivers/clk/spacemit/ccu-k1.c b/drivers/clk/spacemit/ccu-k1.c
index 65e6de030717..4761bc1e3b6e 100644
--- a/drivers/clk/spacemit/ccu-k1.c
+++ b/drivers/clk/spacemit/ccu-k1.c
@@ -136,13 +136,33 @@ CCU_GATE_DEFINE(pll1_d3_819p2, CCU_PARENT_HW(pll1_d3), MPMU_ACGR, BIT(14), 0);
CCU_GATE_DEFINE(pll1_d2_1228p8, CCU_PARENT_HW(pll1_d2), MPMU_ACGR, BIT(16), 0);
CCU_GATE_DEFINE(slow_uart, CCU_PARENT_NAME(osc), MPMU_ACGR, BIT(1), CLK_IGNORE_UNUSED);
-CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 0);
-CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 0);
+CCU_DDN_DEFINE(slow_uart1_14p74, pll1_d16_153p6, MPMU_SUCCR, 16, 13, 0, 13, 2, 0);
+CCU_DDN_DEFINE(slow_uart2_48, pll1_d4_614p4, MPMU_SUCCR_1, 16, 13, 0, 13, 2, 0);
CCU_GATE_DEFINE(wdt_clk, CCU_PARENT_HW(pll1_d96_25p6), MPMU_WDTPCR, BIT(1), 0);
-CCU_FACTOR_GATE_DEFINE(i2s_sysclk, CCU_PARENT_HW(pll1_d16_153p6), MPMU_ISCCR, BIT(31), 50, 1);
-CCU_FACTOR_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_sysclk), MPMU_ISCCR, BIT(29), 1, 1);
+CCU_FACTOR_DEFINE(i2s_153p6, CCU_PARENT_HW(pll1_d8_307p2), 2, 1);
+
+static const struct clk_parent_data i2s_153p6_base_parents[] = {
+ CCU_PARENT_HW(i2s_153p6),
+ CCU_PARENT_HW(pll1_d8_307p2),
+};
+CCU_MUX_DEFINE(i2s_153p6_base, i2s_153p6_base_parents, MPMU_FCCR, 29, 1, 0);
+
+static const struct clk_parent_data i2s_sysclk_src_parents[] = {
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(i2s_153p6_base)
+};
+CCU_MUX_GATE_DEFINE(i2s_sysclk_src, i2s_sysclk_src_parents, MPMU_ISCCR, 30, 1, BIT(31), 0);
+
+CCU_DDN_DEFINE(i2s_sysclk, i2s_sysclk_src, MPMU_ISCCR, 0, 15, 15, 12, 1, 0);
+
+CCU_FACTOR_DEFINE(i2s_bclk_factor, CCU_PARENT_HW(i2s_sysclk), 2, 1);
+/*
+ * Divider of i2s_bclk always implies a 1/2 factor, which is
+ * described by i2s_bclk_factor.
+ */
+CCU_DIV_GATE_DEFINE(i2s_bclk, CCU_PARENT_HW(i2s_bclk_factor), MPMU_ISCCR, 27, 2, BIT(29), 0);
static const struct clk_parent_data apb_parents[] = {
CCU_PARENT_HW(pll1_d96_25p6),
@@ -247,7 +267,14 @@ CCU_GATE_DEFINE(aib_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_AIB_CLK_RST, BIT(1),
CCU_GATE_DEFINE(onewire_clk, CCU_PARENT_NAME(vctcxo_24m), APBC_ONEWIRE_CLK_RST, BIT(1), 0);
-static const struct clk_parent_data sspa_parents[] = {
+/*
+ * When i2s_bclk is selected as the parent clock of sspa,
+ * the hardware requires bit3 to be set
+ */
+CCU_GATE_DEFINE(sspa0_i2s_bclk, CCU_PARENT_HW(i2s_bclk), APBC_SSPA0_CLK_RST, BIT(3), 0);
+CCU_GATE_DEFINE(sspa1_i2s_bclk, CCU_PARENT_HW(i2s_bclk), APBC_SSPA1_CLK_RST, BIT(3), 0);
+
+static const struct clk_parent_data sspa0_parents[] = {
CCU_PARENT_HW(pll1_d384_6p4),
CCU_PARENT_HW(pll1_d192_12p8),
CCU_PARENT_HW(pll1_d96_25p6),
@@ -255,10 +282,22 @@ static const struct clk_parent_data sspa_parents[] = {
CCU_PARENT_HW(pll1_d768_3p2),
CCU_PARENT_HW(pll1_d1536_1p6),
CCU_PARENT_HW(pll1_d3072_0p8),
- CCU_PARENT_HW(i2s_bclk),
+ CCU_PARENT_HW(sspa0_i2s_bclk),
};
-CCU_MUX_GATE_DEFINE(sspa0_clk, sspa_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
-CCU_MUX_GATE_DEFINE(sspa1_clk, sspa_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+CCU_MUX_GATE_DEFINE(sspa0_clk, sspa0_parents, APBC_SSPA0_CLK_RST, 4, 3, BIT(1), 0);
+
+static const struct clk_parent_data sspa1_parents[] = {
+ CCU_PARENT_HW(pll1_d384_6p4),
+ CCU_PARENT_HW(pll1_d192_12p8),
+ CCU_PARENT_HW(pll1_d96_25p6),
+ CCU_PARENT_HW(pll1_d48_51p2),
+ CCU_PARENT_HW(pll1_d768_3p2),
+ CCU_PARENT_HW(pll1_d1536_1p6),
+ CCU_PARENT_HW(pll1_d3072_0p8),
+ CCU_PARENT_HW(sspa1_i2s_bclk),
+};
+CCU_MUX_GATE_DEFINE(sspa1_clk, sspa1_parents, APBC_SSPA1_CLK_RST, 4, 3, BIT(1), 0);
+
CCU_GATE_DEFINE(dro_clk, CCU_PARENT_HW(apb_clk), APBC_DRO_CLK_RST, BIT(1), 0);
CCU_GATE_DEFINE(ir_clk, CCU_PARENT_HW(apb_clk), APBC_IR_CLK_RST, BIT(1), 0);
CCU_GATE_DEFINE(tsen_clk, CCU_PARENT_HW(apb_clk), APBC_TSEN_CLK_RST, BIT(1), 0);
@@ -756,6 +795,10 @@ static struct clk_hw *k1_ccu_mpmu_hws[] = {
[CLK_I2S_BCLK] = &i2s_bclk.common.hw,
[CLK_APB] = &apb_clk.common.hw,
[CLK_WDT_BUS] = &wdt_bus_clk.common.hw,
+ [CLK_I2S_153P6] = &i2s_153p6.common.hw,
+ [CLK_I2S_153P6_BASE] = &i2s_153p6_base.common.hw,
+ [CLK_I2S_SYSCLK_SRC] = &i2s_sysclk_src.common.hw,
+ [CLK_I2S_BCLK_FACTOR] = &i2s_bclk_factor.common.hw,
};
static const struct spacemit_ccu_data k1_ccu_mpmu_data = {
@@ -865,6 +908,8 @@ static struct clk_hw *k1_ccu_apbc_hws[] = {
[CLK_SSPA1_BUS] = &sspa1_bus_clk.common.hw,
[CLK_TSEN_BUS] = &tsen_bus_clk.common.hw,
[CLK_IPC_AP2AUD_BUS] = &ipc_ap2aud_bus_clk.common.hw,
+ [CLK_SSPA0_I2S_BCLK] = &sspa0_i2s_bclk.common.hw,
+ [CLK_SSPA1_I2S_BCLK] = &sspa1_i2s_bclk.common.hw,
};
static const struct spacemit_ccu_data k1_ccu_apbc_data = {
@@ -973,6 +1018,8 @@ static int spacemit_ccu_register(struct device *dev,
if (!clk_data)
return -ENOMEM;
+ clk_data->num = data->num;
+
for (i = 0; i < data->num; i++) {
struct clk_hw *hw = data->hws[i];
struct ccu_common *common;
@@ -999,8 +1046,6 @@ static int spacemit_ccu_register(struct device *dev,
clk_data->hws[i] = hw;
}
- clk_data->num = data->num;
-
ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
if (ret)
dev_err(dev, "failed to add clock hardware provider (%d)\n", ret);
diff --git a/drivers/clk/spacemit/ccu_ddn.c b/drivers/clk/spacemit/ccu_ddn.c
index be311b045698..5b16e273bee5 100644
--- a/drivers/clk/spacemit/ccu_ddn.c
+++ b/drivers/clk/spacemit/ccu_ddn.c
@@ -22,30 +22,33 @@
#include "ccu_ddn.h"
-static unsigned long ccu_ddn_calc_rate(unsigned long prate,
- unsigned long num, unsigned long den)
+static unsigned long ccu_ddn_calc_rate(unsigned long prate, unsigned long num,
+ unsigned long den, unsigned int pre_div)
{
- return prate * den / 2 / num;
+ return prate * den / pre_div / num;
}
static unsigned long ccu_ddn_calc_best_rate(struct ccu_ddn *ddn,
unsigned long rate, unsigned long prate,
unsigned long *num, unsigned long *den)
{
- rational_best_approximation(rate, prate / 2,
+ rational_best_approximation(rate, prate / ddn->pre_div,
ddn->den_mask >> ddn->den_shift,
ddn->num_mask >> ddn->num_shift,
den, num);
- return ccu_ddn_calc_rate(prate, *num, *den);
+ return ccu_ddn_calc_rate(prate, *num, *den, ddn->pre_div);
}
-static long ccu_ddn_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_ddn_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_ddn *ddn = hw_to_ccu_ddn(hw);
unsigned long num, den;
- return ccu_ddn_calc_best_rate(ddn, rate, *prate, &num, &den);
+ req->rate = ccu_ddn_calc_best_rate(ddn, req->rate,
+ req->best_parent_rate, &num, &den);
+
+ return 0;
}
static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
@@ -58,7 +61,7 @@ static unsigned long ccu_ddn_recalc_rate(struct clk_hw *hw, unsigned long prate)
num = (val & ddn->num_mask) >> ddn->num_shift;
den = (val & ddn->den_mask) >> ddn->den_shift;
- return ccu_ddn_calc_rate(prate, num, den);
+ return ccu_ddn_calc_rate(prate, num, den, ddn->pre_div);
}
static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -78,6 +81,6 @@ static int ccu_ddn_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops spacemit_ccu_ddn_ops = {
.recalc_rate = ccu_ddn_recalc_rate,
- .round_rate = ccu_ddn_round_rate,
+ .determine_rate = ccu_ddn_determine_rate,
.set_rate = ccu_ddn_set_rate,
};
diff --git a/drivers/clk/spacemit/ccu_ddn.h b/drivers/clk/spacemit/ccu_ddn.h
index a52fabe77d62..4838414a8e8d 100644
--- a/drivers/clk/spacemit/ccu_ddn.h
+++ b/drivers/clk/spacemit/ccu_ddn.h
@@ -18,13 +18,14 @@ struct ccu_ddn {
unsigned int num_shift;
unsigned int den_mask;
unsigned int den_shift;
+ unsigned int pre_div;
};
#define CCU_DDN_INIT(_name, _parent, _flags) \
CLK_HW_INIT_HW(#_name, &_parent.common.hw, &spacemit_ccu_ddn_ops, _flags)
#define CCU_DDN_DEFINE(_name, _parent, _reg_ctrl, _num_shift, _num_width, \
- _den_shift, _den_width, _flags) \
+ _den_shift, _den_width, _pre_div, _flags) \
static struct ccu_ddn _name = { \
.common = { \
.reg_ctrl = _reg_ctrl, \
@@ -33,7 +34,8 @@ static struct ccu_ddn _name = { \
.num_mask = GENMASK(_num_shift + _num_width - 1, _num_shift), \
.num_shift = _num_shift, \
.den_mask = GENMASK(_den_shift + _den_width - 1, _den_shift), \
- .den_shift = _den_shift, \
+ .den_shift = _den_shift, \
+ .pre_div = _pre_div, \
}
static inline struct ccu_ddn *hw_to_ccu_ddn(struct clk_hw *hw)
diff --git a/drivers/clk/spacemit/ccu_mix.c b/drivers/clk/spacemit/ccu_mix.c
index 9b852aa61f78..7b7990875372 100644
--- a/drivers/clk/spacemit/ccu_mix.c
+++ b/drivers/clk/spacemit/ccu_mix.c
@@ -80,10 +80,12 @@ static int ccu_mix_trigger_fc(struct clk_hw *hw)
MIX_FC_TIMEOUT_US);
}
-static long ccu_factor_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_factor_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return ccu_factor_recalc_rate(hw, *prate);
+ req->rate = ccu_factor_recalc_rate(hw, req->best_parent_rate);
+
+ return 0;
}
static int ccu_factor_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -198,7 +200,7 @@ const struct clk_ops spacemit_ccu_gate_ops = {
};
const struct clk_ops spacemit_ccu_factor_ops = {
- .round_rate = ccu_factor_round_rate,
+ .determine_rate = ccu_factor_determine_rate,
.recalc_rate = ccu_factor_recalc_rate,
.set_rate = ccu_factor_set_rate,
};
@@ -220,7 +222,7 @@ const struct clk_ops spacemit_ccu_factor_gate_ops = {
.enable = ccu_gate_enable,
.is_enabled = ccu_gate_is_enabled,
- .round_rate = ccu_factor_round_rate,
+ .determine_rate = ccu_factor_determine_rate,
.recalc_rate = ccu_factor_recalc_rate,
.set_rate = ccu_factor_set_rate,
};
diff --git a/drivers/clk/spacemit/ccu_mix.h b/drivers/clk/spacemit/ccu_mix.h
index 54d40cd39b27..c406508e3504 100644
--- a/drivers/clk/spacemit/ccu_mix.h
+++ b/drivers/clk/spacemit/ccu_mix.h
@@ -220,4 +220,4 @@ extern const struct clk_ops spacemit_ccu_div_gate_ops;
extern const struct clk_ops spacemit_ccu_mux_gate_ops;
extern const struct clk_ops spacemit_ccu_mux_div_ops;
extern const struct clk_ops spacemit_ccu_mux_div_gate_ops;
-#endif /* _CCU_DIV_H_ */
+#endif /* _CCU_MIX_H_ */
diff --git a/drivers/clk/spacemit/ccu_pll.c b/drivers/clk/spacemit/ccu_pll.c
index 45f540073a65..d92f0dae65a4 100644
--- a/drivers/clk/spacemit/ccu_pll.c
+++ b/drivers/clk/spacemit/ccu_pll.c
@@ -125,12 +125,14 @@ static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
return entry ? entry->rate : 0;
}
-static long ccu_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ccu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ccu_pll *pll = hw_to_ccu_pll(hw);
- return ccu_pll_lookup_best_rate(pll, rate)->rate;
+ req->rate = ccu_pll_lookup_best_rate(pll, req->rate)->rate;
+
+ return 0;
}
static int ccu_pll_init(struct clk_hw *hw)
@@ -152,6 +154,6 @@ const struct clk_ops spacemit_ccu_pll_ops = {
.disable = ccu_pll_disable,
.set_rate = ccu_pll_set_rate,
.recalc_rate = ccu_pll_recalc_rate,
- .round_rate = ccu_pll_round_rate,
+ .determine_rate = ccu_pll_determine_rate,
.is_enabled = ccu_pll_is_enabled,
};
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index 637938e804f8..d0d063147af8 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -49,14 +49,16 @@ static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
(rtbl[index].yscale * eq)) * 10000;
}
-static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_aux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_aux *aux = to_clk_aux(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
- aux->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ aux_calc_rate, aux->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
@@ -127,7 +129,7 @@ static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_aux_ops = {
.recalc_rate = clk_aux_recalc_rate,
- .round_rate = clk_aux_round_rate,
+ .determine_rate = clk_aux_determine_rate,
.set_rate = clk_aux_set_rate,
};
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 2380df293a2c..150f051d28e0 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -52,14 +52,16 @@ static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
return prate;
}
-static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_frac *frac = to_clk_frac(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
- frac->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ frac_calc_rate, frac->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
@@ -115,7 +117,7 @@ static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_frac_ops = {
.recalc_rate = clk_frac_recalc_rate,
- .round_rate = clk_frac_round_rate,
+ .determine_rate = clk_frac_determine_rate,
.set_rate = clk_frac_set_rate,
};
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index 4ef747c2abbb..cf9659dc9073 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -39,14 +39,16 @@ static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
return prate;
}
-static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_gpt_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_gpt *gpt = to_clk_gpt(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
- gpt->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ gpt_calc_rate, gpt->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
@@ -104,7 +106,7 @@ static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_gpt_ops = {
.recalc_rate = clk_gpt_recalc_rate,
- .round_rate = clk_gpt_round_rate,
+ .determine_rate = clk_gpt_determine_rate,
.set_rate = clk_gpt_set_rate,
};
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index 348eeab0a906..723a6eb67754 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -110,12 +110,15 @@ static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
return rate;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int unused;
- return clk_pll_round_rate_index(hw, drate, prate, &unused);
+ req->rate = clk_pll_round_rate_index(hw, req->rate,
+ &req->best_parent_rate, &unused);
+
+ return 0;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
@@ -164,7 +167,7 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -176,14 +179,16 @@ static inline unsigned long vco_calc_rate(struct clk_hw *hw,
return pll_calc_rate(vco->rtbl, prate, index, NULL);
}
-static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long *prate)
+static int clk_vco_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_vco *vco = to_clk_vco(hw);
int unused;
- return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
- vco->rtbl_cnt, &unused);
+ req->rate = clk_round_rate_index(hw, req->rate, req->best_parent_rate,
+ vco_calc_rate, vco->rtbl_cnt, &unused);
+
+ return 0;
}
static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
@@ -265,7 +270,7 @@ static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
static const struct clk_ops clk_vco_ops = {
.recalc_rate = clk_vco_recalc_rate,
- .round_rate = clk_vco_round_rate,
+ .determine_rate = clk_vco_determine_rate,
.set_rate = clk_vco_set_rate,
};
diff --git a/drivers/clk/sprd/div.c b/drivers/clk/sprd/div.c
index 936782c24127..013423881968 100644
--- a/drivers/clk/sprd/div.c
+++ b/drivers/clk/sprd/div.c
@@ -9,13 +9,16 @@
#include "div.h"
-static long sprd_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sprd_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sprd_div *cd = hw_to_sprd_div(hw);
- return divider_round_rate(&cd->common.hw, rate, parent_rate, NULL,
- cd->div.width, 0);
+ req->rate = divider_round_rate(&cd->common.hw, req->rate,
+ &req->best_parent_rate,
+ NULL, cd->div.width, 0);
+
+ return 0;
}
unsigned long sprd_div_helper_recalc_rate(struct sprd_clk_common *common,
@@ -75,7 +78,7 @@ static int sprd_div_set_rate(struct clk_hw *hw, unsigned long rate,
const struct clk_ops sprd_div_ops = {
.recalc_rate = sprd_div_recalc_rate,
- .round_rate = sprd_div_round_rate,
+ .determine_rate = sprd_div_determine_rate,
.set_rate = sprd_div_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_div_ops);
diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c
index 13a322b2535a..bc6610d5fcb7 100644
--- a/drivers/clk/sprd/pll.c
+++ b/drivers/clk/sprd/pll.c
@@ -254,16 +254,16 @@ static int sprd_pll_clk_prepare(struct clk_hw *hw)
return 0;
}
-static long sprd_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sprd_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
const struct clk_ops sprd_pll_ops = {
.prepare = sprd_pll_clk_prepare,
.recalc_rate = sprd_pll_recalc_rate,
- .round_rate = sprd_pll_round_rate,
+ .determine_rate = sprd_pll_determine_rate,
.set_rate = sprd_pll_set_rate,
};
EXPORT_SYMBOL_GPL(sprd_pll_ops);
diff --git a/drivers/clk/sprd/sc9860-clk.c b/drivers/clk/sprd/sc9860-clk.c
index cc5ed2dd8267..d7fe924fbe97 100644
--- a/drivers/clk/sprd/sc9860-clk.c
+++ b/drivers/clk/sprd/sc9860-clk.c
@@ -2021,17 +2021,13 @@ MODULE_DEVICE_TABLE(of, sprd_sc9860_clk_ids);
static int sc9860_clk_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
const struct sprd_clk_desc *desc;
int ret;
- match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node);
- if (!match) {
- pr_err("%s: of_match_node() failed", __func__);
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
return -ENODEV;
- }
- desc = match->data;
ret = sprd_clk_regmap_init(pdev, desc);
if (ret)
return ret;
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
index 5292208c4dd8..e8e7626c76db 100644
--- a/drivers/clk/st/clk-flexgen.c
+++ b/drivers/clk/st/clk-flexgen.c
@@ -303,16 +303,6 @@ static const struct clkgen_data clkgen_video = {
.mode = 1,
};
-static const struct clkgen_clk_out clkgen_stih407_a0_clk_out[] = {
- /* This clk needs to be on so that memory interface is accessible */
- { .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
-};
-
-static const struct clkgen_data clkgen_stih407_a0 = {
- .outputs = clkgen_stih407_a0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_a0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_a0_clk_out[] = {
/* Those clks need to be on so that memory interface is accessible */
{ .name = "clk-ic-lmi0", .flags = CLK_IS_CRITICAL },
@@ -324,51 +314,6 @@ static const struct clkgen_data clkgen_stih410_a0 = {
.outputs_nb = ARRAY_SIZE(clkgen_stih410_a0_clk_out),
};
-static const struct clkgen_clk_out clkgen_stih407_c0_clk_out[] = {
- { .name = "clk-icn-gpu", },
- { .name = "clk-fdma", },
- { .name = "clk-nand", },
- { .name = "clk-hva", },
- { .name = "clk-proc-stfe", },
- { .name = "clk-proc-tp", },
- { .name = "clk-rx-icn-dmu", },
- { .name = "clk-rx-icn-hva", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-cpu", .flags = CLK_IS_CRITICAL },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-tx-icn-dmu", .flags = CLK_IS_CRITICAL },
- { .name = "clk-mmc-0", },
- { .name = "clk-mmc-1", },
- { .name = "clk-jpegdec", },
- /* This clk needs to be on to keep A9 running */
- { .name = "clk-ext2fa9", .flags = CLK_IS_CRITICAL },
- { .name = "clk-ic-bdisp-0", },
- { .name = "clk-ic-bdisp-1", },
- { .name = "clk-pp-dmu", },
- { .name = "clk-vid-dmu", },
- { .name = "clk-dss-lpc", },
- { .name = "clk-st231-aud-0", },
- { .name = "clk-st231-gp-1", },
- { .name = "clk-st231-dmu", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-lmi", .flags = CLK_IS_CRITICAL },
- { .name = "clk-tx-icn-disp-1", },
- /* This clk needs to be on to keep bus interconnect alive */
- { .name = "clk-icn-sbc", .flags = CLK_IS_CRITICAL },
- { .name = "clk-stfe-frc2", },
- { .name = "clk-eth-phy", },
- { .name = "clk-eth-ref-phyclk", },
- { .name = "clk-flash-promip", },
- { .name = "clk-main-disp", },
- { .name = "clk-aux-disp", },
- { .name = "clk-compo-dvp", },
-};
-
-static const struct clkgen_data clkgen_stih407_c0 = {
- .outputs = clkgen_stih407_c0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_c0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_c0_clk_out[] = {
{ .name = "clk-icn-gpu", },
{ .name = "clk-fdma", },
@@ -482,19 +427,6 @@ static const struct clkgen_data clkgen_stih418_c0 = {
.outputs_nb = ARRAY_SIZE(clkgen_stih418_c0_clk_out),
};
-static const struct clkgen_clk_out clkgen_stih407_d0_clk_out[] = {
- { .name = "clk-pcm-0", },
- { .name = "clk-pcm-1", },
- { .name = "clk-pcm-2", },
- { .name = "clk-spdiff", },
-};
-
-static const struct clkgen_data clkgen_stih407_d0 = {
- .flags = CLK_SET_RATE_PARENT,
- .outputs = clkgen_stih407_d0_clk_out,
- .outputs_nb = ARRAY_SIZE(clkgen_stih407_d0_clk_out),
-};
-
static const struct clkgen_clk_out clkgen_stih410_d0_clk_out[] = {
{ .name = "clk-pcm-0", },
{ .name = "clk-pcm-1", },
@@ -597,18 +529,10 @@ static const struct of_device_id flexgen_of_match[] = {
.data = &clkgen_video,
},
{
- .compatible = "st,flexgen-stih407-a0",
- .data = &clkgen_stih407_a0,
- },
- {
.compatible = "st,flexgen-stih410-a0",
.data = &clkgen_stih410_a0,
},
{
- .compatible = "st,flexgen-stih407-c0",
- .data = &clkgen_stih407_c0,
- },
- {
.compatible = "st,flexgen-stih410-c0",
.data = &clkgen_stih410_c0,
},
@@ -617,10 +541,6 @@ static const struct of_device_id flexgen_of_match[] = {
.data = &clkgen_stih418_c0,
},
{
- .compatible = "st,flexgen-stih407-d0",
- .data = &clkgen_stih407_d0,
- },
- {
.compatible = "st,flexgen-stih410-d0",
.data = &clkgen_stih410_d0,
},
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 40df1db102a7..e06e7e5cc1a5 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -375,22 +375,21 @@ static int clk_fs660c32_vco_get_params(unsigned long input,
return 0;
}
-static long quadfs_pll_fs660c32_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int quadfs_pll_fs660c32_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_fs params;
- if (clk_fs660c32_vco_get_params(*prate, rate, &params))
- return rate;
+ if (clk_fs660c32_vco_get_params(req->best_parent_rate, req->rate, &params))
+ return 0;
- clk_fs660c32_vco_get_rate(*prate, &params, &rate);
+ clk_fs660c32_vco_get_rate(req->best_parent_rate, &params, &req->rate);
pr_debug("%s: %s new rate %ld [ndiv=%u]\n",
__func__, clk_hw_get_name(hw),
- rate, (unsigned int)params.ndiv);
+ req->rate, (unsigned int)params.ndiv);
- return rate;
+ return 0;
}
static int quadfs_pll_fs660c32_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -436,7 +435,7 @@ static const struct clk_ops st_quadfs_pll_c32_ops = {
.disable = quadfs_pll_disable,
.is_enabled = quadfs_pll_is_enabled,
.recalc_rate = quadfs_pll_fs660c32_recalc_rate,
- .round_rate = quadfs_pll_fs660c32_round_rate,
+ .determine_rate = quadfs_pll_fs660c32_determine_rate,
.set_rate = quadfs_pll_fs660c32_set_rate,
};
@@ -814,19 +813,21 @@ static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long quadfs_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int quadfs_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_fs params;
- rate = quadfs_find_best_rate(hw, rate, *prate, &params);
+ req->rate = quadfs_find_best_rate(hw, req->rate,
+ req->best_parent_rate, &params);
pr_debug("%s: %s new rate %ld [sdiv=0x%x,md=0x%x,pe=0x%x,nsdiv3=%u]\n",
__func__, clk_hw_get_name(hw),
- rate, (unsigned int)params.sdiv, (unsigned int)params.mdiv,
- (unsigned int)params.pe, (unsigned int)params.nsdiv);
+ req->rate, (unsigned int)params.sdiv,
+ (unsigned int)params.mdiv,
+ (unsigned int)params.pe, (unsigned int)params.nsdiv);
- return rate;
+ return 0;
}
@@ -873,7 +874,7 @@ static const struct clk_ops st_quadfs_ops = {
.enable = quadfs_fsynth_enable,
.disable = quadfs_fsynth_disable,
.is_enabled = quadfs_fsynth_is_enabled,
- .round_rate = quadfs_round_rate,
+ .determine_rate = quadfs_determine_rate,
.set_rate = quadfs_set_rate,
.recalc_rate = quadfs_recalc_rate,
};
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index b36e4d803636..c258ff87a171 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -395,25 +395,28 @@ static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw,
return rate;
}
-static long round_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm_pll3200c32_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_pll params;
- if (!clk_pll3200c32_get_params(*prate, rate, &params))
- clk_pll3200c32_get_rate(*prate, &params, &rate);
+ if (!clk_pll3200c32_get_params(req->best_parent_rate, req->rate, &params))
+ clk_pll3200c32_get_rate(req->best_parent_rate, &params,
+ &req->rate);
else {
pr_debug("%s: %s rate %ld Invalid\n", __func__,
- __clk_get_name(hw->clk), rate);
+ __clk_get_name(hw->clk), req->rate);
+ req->rate = 0;
+
return 0;
}
pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
__func__, __clk_get_name(hw->clk),
- rate, (unsigned int)params.ndiv,
+ req->rate, (unsigned int)params.ndiv,
(unsigned int)params.idf);
- return rate;
+ return 0;
}
static int set_rate_stm_pll3200c32(struct clk_hw *hw, unsigned long rate,
@@ -549,25 +552,28 @@ static unsigned long recalc_stm_pll4600c28(struct clk_hw *hw,
return rate;
}
-static long round_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int stm_pll4600c28_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_pll params;
- if (!clk_pll4600c28_get_params(*prate, rate, &params)) {
- clk_pll4600c28_get_rate(*prate, &params, &rate);
+ if (!clk_pll4600c28_get_params(req->best_parent_rate, req->rate, &params)) {
+ clk_pll4600c28_get_rate(req->best_parent_rate, &params,
+ &req->rate);
} else {
pr_debug("%s: %s rate %ld Invalid\n", __func__,
- __clk_get_name(hw->clk), rate);
+ __clk_get_name(hw->clk), req->rate);
+ req->rate = 0;
+
return 0;
}
pr_debug("%s: %s new rate %ld [ndiv=%u] [idf=%u]\n",
__func__, __clk_get_name(hw->clk),
- rate, (unsigned int)params.ndiv,
+ req->rate, (unsigned int)params.ndiv,
(unsigned int)params.idf);
- return rate;
+ return 0;
}
static int set_rate_stm_pll4600c28(struct clk_hw *hw, unsigned long rate,
@@ -628,7 +634,7 @@ static const struct clk_ops stm_pll3200c32_a9_ops = {
.disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll3200c32,
- .round_rate = round_rate_stm_pll3200c32,
+ .determine_rate = stm_pll3200c32_determine_rate,
.set_rate = set_rate_stm_pll3200c32,
};
@@ -637,7 +643,7 @@ static const struct clk_ops stm_pll4600c28_ops = {
.disable = clkgen_pll_disable,
.is_enabled = clkgen_pll_is_enabled,
.recalc_rate = recalc_stm_pll4600c28,
- .round_rate = round_rate_stm_pll4600c28,
+ .determine_rate = stm_pll4600c28_determine_rate,
.set_rate = set_rate_stm_pll4600c28,
};
diff --git a/drivers/clk/stm32/Kconfig b/drivers/clk/stm32/Kconfig
index 4d2eb993ea08..5dbd75cde657 100644
--- a/drivers/clk/stm32/Kconfig
+++ b/drivers/clk/stm32/Kconfig
@@ -25,6 +25,13 @@ config COMMON_CLK_STM32MP157
help
Support for stm32mp15x SoC family clocks.
+config COMMON_CLK_STM32MP215
+ bool "Clock driver for stm32mp21x clocks"
+ depends on ARM || ARM64 || COMPILE_TEST
+ default y
+ help
+ Support for stm32mp21x SoC family clocks
+
config COMMON_CLK_STM32MP257
bool "Clock driver for stm32mp25x clocks"
depends on ARM64 || COMPILE_TEST
diff --git a/drivers/clk/stm32/Makefile b/drivers/clk/stm32/Makefile
index 0a627164fcce..e04727b59449 100644
--- a/drivers/clk/stm32/Makefile
+++ b/drivers/clk/stm32/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_COMMON_CLK_STM32MP135) += clk-stm32mp13.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o reset-stm32.o
+obj-$(CONFIG_COMMON_CLK_STM32MP215) += clk-stm32mp21.o clk-stm32-core.o reset-stm32.o
obj-$(CONFIG_COMMON_CLK_STM32MP257) += clk-stm32mp25.o clk-stm32-core.o reset-stm32.o
diff --git a/drivers/clk/stm32/clk-stm32-core.c b/drivers/clk/stm32/clk-stm32-core.c
index 933e3cde0795..72825b9c36a4 100644
--- a/drivers/clk/stm32/clk-stm32-core.c
+++ b/drivers/clk/stm32/clk-stm32-core.c
@@ -351,14 +351,14 @@ static int clk_stm32_divider_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_stm32_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_stm32_div *div = to_clk_stm32_divider(hw);
const struct stm32_div_cfg *divider;
if (div->div_id == NO_STM32_DIV)
- return rate;
+ return 0;
divider = &div->clock_data->dividers[div->div_id];
@@ -369,14 +369,22 @@ static long clk_stm32_divider_round_rate(struct clk_hw *hw, unsigned long rate,
val = readl(div->base + divider->offset) >> divider->shift;
val &= clk_div_mask(divider->width);
- return divider_ro_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags,
- val);
+ req->rate = divider_ro_round_rate(hw, req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width,
+ divider->flags, val);
+
+ return 0;
}
- return divider_round_rate_parent(hw, clk_hw_get_parent(hw),
- rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate_parent(hw, clk_hw_get_parent(hw),
+ req->rate,
+ &req->best_parent_rate,
+ divider->table,
+ divider->width, divider->flags);
+
+ return 0;
}
static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
@@ -392,7 +400,7 @@ static unsigned long clk_stm32_divider_recalc_rate(struct clk_hw *hw,
const struct clk_ops clk_stm32_divider_ops = {
.recalc_rate = clk_stm32_divider_recalc_rate,
- .round_rate = clk_stm32_divider_round_rate,
+ .determine_rate = clk_stm32_divider_determine_rate,
.set_rate = clk_stm32_divider_set_rate,
};
diff --git a/drivers/clk/stm32/clk-stm32mp1.c b/drivers/clk/stm32/clk-stm32mp1.c
index b8b45ed22f98..2d9ccd96ec98 100644
--- a/drivers/clk/stm32/clk-stm32mp1.c
+++ b/drivers/clk/stm32/clk-stm32mp1.c
@@ -970,12 +970,15 @@ static unsigned long __bestmult(struct clk_hw *hw, unsigned long rate,
return mult;
}
-static long timer_ker_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int timer_ker_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long factor = __bestmult(hw, rate, *parent_rate);
+ unsigned long factor = __bestmult(hw, req->rate,
+ req->best_parent_rate);
- return *parent_rate * factor;
+ req->rate = req->best_parent_rate * factor;
+
+ return 0;
}
static int timer_ker_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1026,7 +1029,7 @@ static unsigned long timer_ker_recalc_rate(struct clk_hw *hw,
static const struct clk_ops timer_ker_ops = {
.recalc_rate = timer_ker_recalc_rate,
- .round_rate = timer_ker_round_rate,
+ .determine_rate = timer_ker_determine_rate,
.set_rate = timer_ker_set_rate,
};
diff --git a/drivers/clk/stm32/clk-stm32mp21.c b/drivers/clk/stm32/clk-stm32mp21.c
new file mode 100644
index 000000000000..c8a37b716bd5
--- /dev/null
+++ b/drivers/clk/stm32/clk-stm32mp21.c
@@ -0,0 +1,1586 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) STMicroelectronics 2023 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#include <linux/bus/stm32_firewall_device.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "clk-stm32-core.h"
+#include "reset-stm32.h"
+#include "stm32mp21_rcc.h"
+
+#include <dt-bindings/clock/st,stm32mp21-rcc.h>
+#include <dt-bindings/reset/st,stm32mp21-rcc.h>
+
+/* Max clock binding value */
+#define STM32MP21_LAST_CLK CK_SCMI_KER_ETR
+
+/* Clock security definition */
+#define SECF_NONE -1
+
+#define RCC_REG_SIZE 32
+#define RCC_SECCFGR(x) (((x) / RCC_REG_SIZE) * 0x4 + RCC_SECCFGR0)
+#define RCC_CIDCFGR(x) ((x) * 0x8 + RCC_R0CIDCFGR)
+#define RCC_SEMCR(x) ((x) * 0x8 + RCC_R0SEMCR)
+#define RCC_CID1 1
+
+/* Register: RIFSC_CIDCFGR */
+#define RCC_CIDCFGR_CFEN BIT(0)
+#define RCC_CIDCFGR_SEM_EN BIT(1)
+#define RCC_CIDCFGR_SEMWLC1_EN BIT(17)
+#define RCC_CIDCFGR_SCID_MASK GENMASK(6, 4)
+
+/* Register: RIFSC_SEMCR */
+#define RCC_SEMCR_SEMCID_MASK GENMASK(6, 4)
+
+#define MP21_RIF_RCC_MCO1 108
+#define MP21_RIF_RCC_MCO2 109
+
+#define SEC_RIFSC_FLAG BIT(31)
+#define SEC_RIFSC(_id) ((_id) | SEC_RIFSC_FLAG)
+
+enum {
+ HSE,
+ HSI,
+ MSI,
+ LSE,
+ LSI,
+ HSE_DIV2,
+ ICN_HS_MCU,
+ ICN_LS_MCU,
+ ICN_SDMMC,
+ ICN_DDR,
+ ICN_DISPLAY,
+ ICN_HSL,
+ ICN_NIC,
+ FLEXGEN_07,
+ FLEXGEN_08,
+ FLEXGEN_09,
+ FLEXGEN_10,
+ FLEXGEN_11,
+ FLEXGEN_12,
+ FLEXGEN_13,
+ FLEXGEN_14,
+ FLEXGEN_16,
+ FLEXGEN_17,
+ FLEXGEN_18,
+ FLEXGEN_19,
+ FLEXGEN_20,
+ FLEXGEN_21,
+ FLEXGEN_22,
+ FLEXGEN_23,
+ FLEXGEN_24,
+ FLEXGEN_25,
+ FLEXGEN_26,
+ FLEXGEN_27,
+ FLEXGEN_29,
+ FLEXGEN_30,
+ FLEXGEN_31,
+ FLEXGEN_33,
+ FLEXGEN_36,
+ FLEXGEN_37,
+ FLEXGEN_38,
+ FLEXGEN_39,
+ FLEXGEN_40,
+ FLEXGEN_41,
+ FLEXGEN_42,
+ FLEXGEN_43,
+ FLEXGEN_44,
+ FLEXGEN_45,
+ FLEXGEN_46,
+ FLEXGEN_47,
+ FLEXGEN_48,
+ FLEXGEN_50,
+ FLEXGEN_51,
+ FLEXGEN_52,
+ FLEXGEN_53,
+ FLEXGEN_54,
+ FLEXGEN_55,
+ FLEXGEN_56,
+ FLEXGEN_57,
+ FLEXGEN_58,
+ FLEXGEN_61,
+ FLEXGEN_62,
+ FLEXGEN_63,
+ ICN_APB1,
+ ICN_APB2,
+ ICN_APB3,
+ ICN_APB4,
+ ICN_APB5,
+ ICN_APBDBG,
+ TIMG1,
+ TIMG2,
+};
+
+static const struct clk_parent_data adc1_src[] = {
+ { .index = FLEXGEN_46 },
+ { .index = ICN_LS_MCU },
+};
+
+static const struct clk_parent_data adc2_src[] = {
+ { .index = FLEXGEN_47 },
+ { .index = ICN_LS_MCU },
+ { .index = FLEXGEN_46 },
+};
+
+static const struct clk_parent_data usb2phy1_src[] = {
+ { .index = FLEXGEN_57 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data usb2phy2_src[] = {
+ { .index = FLEXGEN_58 },
+ { .index = HSE_DIV2 },
+};
+
+static const struct clk_parent_data dts_src[] = {
+ { .index = HSI },
+ { .index = HSE },
+ { .index = MSI },
+};
+
+static const struct clk_parent_data mco1_src[] = {
+ { .index = FLEXGEN_61 },
+};
+
+static const struct clk_parent_data mco2_src[] = {
+ { .index = FLEXGEN_62 },
+};
+
+enum enum_mux_cfg {
+ MUX_ADC1,
+ MUX_ADC2,
+ MUX_DTS,
+ MUX_MCO1,
+ MUX_MCO2,
+ MUX_USB2PHY1,
+ MUX_USB2PHY2,
+ MUX_NB
+};
+
+#define MUX_CFG(id, _offset, _shift, _width) \
+ [id] = { \
+ .offset = (_offset), \
+ .shift = (_shift), \
+ .width = (_width), \
+ }
+
+static const struct stm32_mux_cfg stm32mp21_muxes[MUX_NB] = {
+ MUX_CFG(MUX_ADC1, RCC_ADC1CFGR, 12, 1),
+ MUX_CFG(MUX_ADC2, RCC_ADC2CFGR, 12, 2),
+ MUX_CFG(MUX_DTS, RCC_DTSCFGR, 12, 2),
+ MUX_CFG(MUX_MCO1, RCC_MCO1CFGR, 0, 1),
+ MUX_CFG(MUX_MCO2, RCC_MCO2CFGR, 0, 1),
+ MUX_CFG(MUX_USB2PHY1, RCC_USB2PHY1CFGR, 15, 1),
+ MUX_CFG(MUX_USB2PHY2, RCC_USB2PHY2CFGR, 15, 1),
+};
+
+enum enum_gate_cfg {
+ GATE_ADC1,
+ GATE_ADC2,
+ GATE_CRC,
+ GATE_CRYP1,
+ GATE_CRYP2,
+ GATE_CSI,
+ GATE_DCMIPP,
+ GATE_DCMIPSSI,
+ GATE_DDRPERFM,
+ GATE_DTS,
+ GATE_ETH1,
+ GATE_ETH1MAC,
+ GATE_ETH1RX,
+ GATE_ETH1STP,
+ GATE_ETH1TX,
+ GATE_ETH2,
+ GATE_ETH2MAC,
+ GATE_ETH2RX,
+ GATE_ETH2STP,
+ GATE_ETH2TX,
+ GATE_FDCAN,
+ GATE_HASH1,
+ GATE_HASH2,
+ GATE_HDP,
+ GATE_I2C1,
+ GATE_I2C2,
+ GATE_I2C3,
+ GATE_I3C1,
+ GATE_I3C2,
+ GATE_I3C3,
+ GATE_IWDG1,
+ GATE_IWDG2,
+ GATE_IWDG3,
+ GATE_IWDG4,
+ GATE_LPTIM1,
+ GATE_LPTIM2,
+ GATE_LPTIM3,
+ GATE_LPTIM4,
+ GATE_LPTIM5,
+ GATE_LPUART1,
+ GATE_LTDC,
+ GATE_MCO1,
+ GATE_MCO2,
+ GATE_MDF1,
+ GATE_OTG,
+ GATE_PKA,
+ GATE_RNG1,
+ GATE_RNG2,
+ GATE_SAES,
+ GATE_SAI1,
+ GATE_SAI2,
+ GATE_SAI3,
+ GATE_SAI4,
+ GATE_SDMMC1,
+ GATE_SDMMC2,
+ GATE_SDMMC3,
+ GATE_SERC,
+ GATE_SPDIFRX,
+ GATE_SPI1,
+ GATE_SPI2,
+ GATE_SPI3,
+ GATE_SPI4,
+ GATE_SPI5,
+ GATE_SPI6,
+ GATE_TIM1,
+ GATE_TIM10,
+ GATE_TIM11,
+ GATE_TIM12,
+ GATE_TIM13,
+ GATE_TIM14,
+ GATE_TIM15,
+ GATE_TIM16,
+ GATE_TIM17,
+ GATE_TIM2,
+ GATE_TIM3,
+ GATE_TIM4,
+ GATE_TIM5,
+ GATE_TIM6,
+ GATE_TIM7,
+ GATE_TIM8,
+ GATE_UART4,
+ GATE_UART5,
+ GATE_UART7,
+ GATE_USART1,
+ GATE_USART2,
+ GATE_USART3,
+ GATE_USART6,
+ GATE_USB2PHY1,
+ GATE_USB2PHY2,
+ GATE_USBH,
+ GATE_VREF,
+ GATE_WWDG1,
+ GATE_NB
+};
+
+#define GATE_CFG(id, _offset, _bit_idx, _offset_clr) \
+ [id] = { \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_offset_clr), \
+ }
+
+static const struct stm32_gate_cfg stm32mp21_gates[GATE_NB] = {
+ GATE_CFG(GATE_ADC1, RCC_ADC1CFGR, 1, 0),
+ GATE_CFG(GATE_ADC2, RCC_ADC2CFGR, 1, 0),
+ GATE_CFG(GATE_CRC, RCC_CRCCFGR, 1, 0),
+ GATE_CFG(GATE_CRYP1, RCC_CRYP1CFGR, 1, 0),
+ GATE_CFG(GATE_CRYP2, RCC_CRYP2CFGR, 1, 0),
+ GATE_CFG(GATE_CSI, RCC_CSICFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPP, RCC_DCMIPPCFGR, 1, 0),
+ GATE_CFG(GATE_DCMIPSSI, RCC_DCMIPSSICFGR, 1, 0),
+ GATE_CFG(GATE_DDRPERFM, RCC_DDRPERFMCFGR, 1, 0),
+ GATE_CFG(GATE_DTS, RCC_DTSCFGR, 1, 0),
+ GATE_CFG(GATE_ETH1, RCC_ETH1CFGR, 5, 0),
+ GATE_CFG(GATE_ETH1MAC, RCC_ETH1CFGR, 1, 0),
+ GATE_CFG(GATE_ETH1RX, RCC_ETH1CFGR, 10, 0),
+ GATE_CFG(GATE_ETH1STP, RCC_ETH1CFGR, 4, 0),
+ GATE_CFG(GATE_ETH1TX, RCC_ETH1CFGR, 8, 0),
+ GATE_CFG(GATE_ETH2, RCC_ETH2CFGR, 5, 0),
+ GATE_CFG(GATE_ETH2MAC, RCC_ETH2CFGR, 1, 0),
+ GATE_CFG(GATE_ETH2RX, RCC_ETH2CFGR, 10, 0),
+ GATE_CFG(GATE_ETH2STP, RCC_ETH2CFGR, 4, 0),
+ GATE_CFG(GATE_ETH2TX, RCC_ETH2CFGR, 8, 0),
+ GATE_CFG(GATE_FDCAN, RCC_FDCANCFGR, 1, 0),
+ GATE_CFG(GATE_HASH1, RCC_HASH1CFGR, 1, 0),
+ GATE_CFG(GATE_HASH2, RCC_HASH2CFGR, 1, 0),
+ GATE_CFG(GATE_HDP, RCC_HDPCFGR, 1, 0),
+ GATE_CFG(GATE_I2C1, RCC_I2C1CFGR, 1, 0),
+ GATE_CFG(GATE_I2C2, RCC_I2C2CFGR, 1, 0),
+ GATE_CFG(GATE_I2C3, RCC_I2C3CFGR, 1, 0),
+ GATE_CFG(GATE_I3C1, RCC_I3C1CFGR, 1, 0),
+ GATE_CFG(GATE_I3C2, RCC_I3C2CFGR, 1, 0),
+ GATE_CFG(GATE_I3C3, RCC_I3C3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG1, RCC_IWDG1CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG2, RCC_IWDG2CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG3, RCC_IWDG3CFGR, 1, 0),
+ GATE_CFG(GATE_IWDG4, RCC_IWDG4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM1, RCC_LPTIM1CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM2, RCC_LPTIM2CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM3, RCC_LPTIM3CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM4, RCC_LPTIM4CFGR, 1, 0),
+ GATE_CFG(GATE_LPTIM5, RCC_LPTIM5CFGR, 1, 0),
+ GATE_CFG(GATE_LPUART1, RCC_LPUART1CFGR, 1, 0),
+ GATE_CFG(GATE_LTDC, RCC_LTDCCFGR, 1, 0),
+ GATE_CFG(GATE_MCO1, RCC_MCO1CFGR, 8, 0),
+ GATE_CFG(GATE_MCO2, RCC_MCO2CFGR, 8, 0),
+ GATE_CFG(GATE_MDF1, RCC_MDF1CFGR, 1, 0),
+ GATE_CFG(GATE_OTG, RCC_OTGCFGR, 1, 0),
+ GATE_CFG(GATE_PKA, RCC_PKACFGR, 1, 0),
+ GATE_CFG(GATE_RNG1, RCC_RNG1CFGR, 1, 0),
+ GATE_CFG(GATE_RNG2, RCC_RNG2CFGR, 1, 0),
+ GATE_CFG(GATE_SAES, RCC_SAESCFGR, 1, 0),
+ GATE_CFG(GATE_SAI1, RCC_SAI1CFGR, 1, 0),
+ GATE_CFG(GATE_SAI2, RCC_SAI2CFGR, 1, 0),
+ GATE_CFG(GATE_SAI3, RCC_SAI3CFGR, 1, 0),
+ GATE_CFG(GATE_SAI4, RCC_SAI4CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC1, RCC_SDMMC1CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC2, RCC_SDMMC2CFGR, 1, 0),
+ GATE_CFG(GATE_SDMMC3, RCC_SDMMC3CFGR, 1, 0),
+ GATE_CFG(GATE_SERC, RCC_SERCCFGR, 1, 0),
+ GATE_CFG(GATE_SPDIFRX, RCC_SPDIFRXCFGR, 1, 0),
+ GATE_CFG(GATE_SPI1, RCC_SPI1CFGR, 1, 0),
+ GATE_CFG(GATE_SPI2, RCC_SPI2CFGR, 1, 0),
+ GATE_CFG(GATE_SPI3, RCC_SPI3CFGR, 1, 0),
+ GATE_CFG(GATE_SPI4, RCC_SPI4CFGR, 1, 0),
+ GATE_CFG(GATE_SPI5, RCC_SPI5CFGR, 1, 0),
+ GATE_CFG(GATE_SPI6, RCC_SPI6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM1, RCC_TIM1CFGR, 1, 0),
+ GATE_CFG(GATE_TIM10, RCC_TIM10CFGR, 1, 0),
+ GATE_CFG(GATE_TIM11, RCC_TIM11CFGR, 1, 0),
+ GATE_CFG(GATE_TIM12, RCC_TIM12CFGR, 1, 0),
+ GATE_CFG(GATE_TIM13, RCC_TIM13CFGR, 1, 0),
+ GATE_CFG(GATE_TIM14, RCC_TIM14CFGR, 1, 0),
+ GATE_CFG(GATE_TIM15, RCC_TIM15CFGR, 1, 0),
+ GATE_CFG(GATE_TIM16, RCC_TIM16CFGR, 1, 0),
+ GATE_CFG(GATE_TIM17, RCC_TIM17CFGR, 1, 0),
+ GATE_CFG(GATE_TIM2, RCC_TIM2CFGR, 1, 0),
+ GATE_CFG(GATE_TIM3, RCC_TIM3CFGR, 1, 0),
+ GATE_CFG(GATE_TIM4, RCC_TIM4CFGR, 1, 0),
+ GATE_CFG(GATE_TIM5, RCC_TIM5CFGR, 1, 0),
+ GATE_CFG(GATE_TIM6, RCC_TIM6CFGR, 1, 0),
+ GATE_CFG(GATE_TIM7, RCC_TIM7CFGR, 1, 0),
+ GATE_CFG(GATE_TIM8, RCC_TIM8CFGR, 1, 0),
+ GATE_CFG(GATE_UART4, RCC_UART4CFGR, 1, 0),
+ GATE_CFG(GATE_UART5, RCC_UART5CFGR, 1, 0),
+ GATE_CFG(GATE_UART7, RCC_UART7CFGR, 1, 0),
+ GATE_CFG(GATE_USART1, RCC_USART1CFGR, 1, 0),
+ GATE_CFG(GATE_USART2, RCC_USART2CFGR, 1, 0),
+ GATE_CFG(GATE_USART3, RCC_USART3CFGR, 1, 0),
+ GATE_CFG(GATE_USART6, RCC_USART6CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY1, RCC_USB2PHY1CFGR, 1, 0),
+ GATE_CFG(GATE_USB2PHY2, RCC_USB2PHY2CFGR, 1, 0),
+ GATE_CFG(GATE_USBH, RCC_USBHCFGR, 1, 0),
+ GATE_CFG(GATE_VREF, RCC_VREFCFGR, 1, 0),
+ GATE_CFG(GATE_WWDG1, RCC_WWDG1CFGR, 1, 0),
+};
+
+#define CLK_HW_INIT_INDEX(_name, _parent, _ops, _flags) \
+ (&(struct clk_init_data) { \
+ .flags = _flags, \
+ .name = _name, \
+ .parent_data = (const struct clk_parent_data[]) { \
+ { .index = _parent }, \
+ }, \
+ .num_parents = 1, \
+ .ops = _ops, \
+ })
+
+/* ADC */
+static struct clk_stm32_gate ck_icn_p_adc1 = {
+ .gate_id = GATE_ADC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc1 = {
+ .gate_id = GATE_ADC1,
+ .mux_id = MUX_ADC1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc1", adc1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_adc2 = {
+ .gate_id = GATE_ADC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_adc2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_composite ck_ker_adc2 = {
+ .gate_id = GATE_ADC2,
+ .mux_id = MUX_ADC2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_adc2", adc2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* CSI-HOST */
+static struct clk_stm32_gate ck_icn_p_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_csi", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csi = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csi", FLEXGEN_29, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_csitxesc = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csitxesc", FLEXGEN_30, &clk_stm32_gate_ops, 0),
+};
+
+/* CSI-PHY */
+static struct clk_stm32_gate ck_ker_csiphy = {
+ .gate_id = GATE_CSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_csiphy", FLEXGEN_31, &clk_stm32_gate_ops, 0),
+};
+
+/* DCMIPP */
+static struct clk_stm32_gate ck_icn_p_dcmipp = {
+ .gate_id = GATE_DCMIPP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipp", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_dcmipssi = {
+ .gate_id = GATE_DCMIPSSI,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_dcmipssi", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DDRPERMF */
+static struct clk_stm32_gate ck_icn_p_ddrperfm = {
+ .gate_id = GATE_DDRPERFM,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ddrperfm", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+/* CRC */
+static struct clk_stm32_gate ck_icn_p_crc = {
+ .gate_id = GATE_CRC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_crc", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* CRYP */
+static struct clk_stm32_gate ck_icn_p_cryp1 = {
+ .gate_id = GATE_CRYP1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_cryp2 = {
+ .gate_id = GATE_CRYP2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_cryp2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* DBG & TRACE */
+/* Trace and debug clocks are managed by SCMI */
+
+/* LTDC */
+static struct clk_stm32_gate ck_icn_p_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_ltdc", ICN_APB4, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_ltdc = {
+ .gate_id = GATE_LTDC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_ltdc", FLEXGEN_27, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* DTS */
+static struct clk_stm32_composite ck_ker_dts = {
+ .gate_id = GATE_DTS,
+ .mux_id = MUX_DTS,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_dts", dts_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* ETHERNET */
+static struct clk_stm32_gate ck_icn_p_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1stp = {
+ .gate_id = GATE_ETH1STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1 = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1", FLEXGEN_54, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1ptp = {
+ .gate_id = GATE_ETH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1mac = {
+ .gate_id = GATE_ETH1MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1tx = {
+ .gate_id = GATE_ETH1TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth1rx = {
+ .gate_id = GATE_ETH1RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth1rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_eth2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2stp = {
+ .gate_id = GATE_ETH2STP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2stp", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2 = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2", FLEXGEN_55, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2ptp = {
+ .gate_id = GATE_ETH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2ptp", FLEXGEN_56, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2mac = {
+ .gate_id = GATE_ETH2MAC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2mac", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2tx = {
+ .gate_id = GATE_ETH2TX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2tx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_eth2rx = {
+ .gate_id = GATE_ETH2RX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_eth2rx", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* FDCAN */
+static struct clk_stm32_gate ck_icn_p_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_fdcan", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_fdcan = {
+ .gate_id = GATE_FDCAN,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_fdcan", FLEXGEN_26, &clk_stm32_gate_ops, 0),
+};
+
+/* HASH */
+static struct clk_stm32_gate ck_icn_p_hash1 = {
+ .gate_id = GATE_HASH1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_hash2 = {
+ .gate_id = GATE_HASH2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hash2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* HDP */
+static struct clk_stm32_gate ck_icn_p_hdp = {
+ .gate_id = GATE_HDP,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_hdp", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* I2C */
+static struct clk_stm32_gate ck_icn_p_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i2c3", ICN_APB5, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c1 = {
+ .gate_id = GATE_I2C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c1", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c2 = {
+ .gate_id = GATE_I2C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c2", FLEXGEN_13, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i2c3 = {
+ .gate_id = GATE_I2C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i2c3", FLEXGEN_38, &clk_stm32_gate_ops, 0),
+};
+
+/* I3C */
+static struct clk_stm32_gate ck_icn_p_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_i3c3", ICN_APB5, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c1 = {
+ .gate_id = GATE_I3C1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c1", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c2 = {
+ .gate_id = GATE_I3C2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c2", FLEXGEN_14, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_i3c3 = {
+ .gate_id = GATE_I3C3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_i3c3", FLEXGEN_36, &clk_stm32_gate_ops, 0),
+};
+
+/* IWDG */
+static struct clk_stm32_gate ck_icn_p_iwdg1 = {
+ .gate_id = GATE_IWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg2 = {
+ .gate_id = GATE_IWDG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg2", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg3 = {
+ .gate_id = GATE_IWDG3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg3", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_iwdg4 = {
+ .gate_id = GATE_IWDG4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_iwdg4", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* LPTIM */
+static struct clk_stm32_gate ck_icn_p_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim1", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim3", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim4", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lptim5", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim1 = {
+ .gate_id = GATE_LPTIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim1", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim2 = {
+ .gate_id = GATE_LPTIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim2", FLEXGEN_07, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim3 = {
+ .gate_id = GATE_LPTIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim3", FLEXGEN_40, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim4 = {
+ .gate_id = GATE_LPTIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim4", FLEXGEN_41, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lptim5 = {
+ .gate_id = GATE_LPTIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lptim5", FLEXGEN_42, &clk_stm32_gate_ops, 0),
+};
+
+/* LPUART */
+static struct clk_stm32_gate ck_icn_p_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_lpuart1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_lpuart1 = {
+ .gate_id = GATE_LPUART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_lpuart1", FLEXGEN_39, &clk_stm32_gate_ops, 0),
+};
+
+/* MCO1 & MCO2 */
+static struct clk_stm32_composite ck_mco1 = {
+ .gate_id = GATE_MCO1,
+ .mux_id = MUX_MCO1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco1", mco1_src, &clk_stm32_composite_ops, 0),
+};
+
+static struct clk_stm32_composite ck_mco2 = {
+ .gate_id = GATE_MCO2,
+ .mux_id = MUX_MCO2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_mco2", mco2_src, &clk_stm32_composite_ops, 0),
+};
+
+/* MDF */
+static struct clk_stm32_gate ck_icn_p_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_mdf1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_mdf1 = {
+ .gate_id = GATE_MDF1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_mdf1", FLEXGEN_21, &clk_stm32_gate_ops, 0),
+};
+
+/* OTG */
+static struct clk_stm32_gate ck_icn_m_otg = {
+ .gate_id = GATE_OTG,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_otg", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* PKA */
+static struct clk_stm32_gate ck_icn_p_pka = {
+ .gate_id = GATE_PKA,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_pka", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* RNG */
+static struct clk_stm32_gate ck_icn_p_rng1 = {
+ .gate_id = GATE_RNG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_rng1", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_rng2 = {
+ .gate_id = GATE_RNG2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_rng2", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAES */
+static struct clk_stm32_gate ck_icn_p_saes = {
+ .gate_id = GATE_SAES,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_saes", ICN_LS_MCU, &clk_stm32_gate_ops, 0),
+};
+
+/* SAI */
+static struct clk_stm32_gate ck_icn_p_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai2", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai3", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_sai4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sai1 = {
+ .gate_id = GATE_SAI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai1", FLEXGEN_22, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai2 = {
+ .gate_id = GATE_SAI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai2", FLEXGEN_23, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai3 = {
+ .gate_id = GATE_SAI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai3", FLEXGEN_24, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_sai4 = {
+ .gate_id = GATE_SAI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sai4", FLEXGEN_25, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+/* SDMMC */
+static struct clk_stm32_gate ck_icn_m_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc1", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc2", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_sdmmc3", ICN_SDMMC, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc1 = {
+ .gate_id = GATE_SDMMC1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc1", FLEXGEN_51, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc2 = {
+ .gate_id = GATE_SDMMC2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc2", FLEXGEN_52, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_sdmmc3 = {
+ .gate_id = GATE_SDMMC3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_sdmmc3", FLEXGEN_53, &clk_stm32_gate_ops, 0),
+};
+
+/* SERC */
+static struct clk_stm32_gate ck_icn_p_serc = {
+ .gate_id = GATE_SERC,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_serc", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* SPDIF */
+static struct clk_stm32_gate ck_icn_p_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spdifrx", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spdifrx = {
+ .gate_id = GATE_SPDIFRX,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spdifrx", FLEXGEN_12, &clk_stm32_gate_ops, 0),
+};
+
+/* SPI */
+static struct clk_stm32_gate ck_icn_p_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi4", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi5", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_spi6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi1 = {
+ .gate_id = GATE_SPI1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi1", FLEXGEN_16, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi2 = {
+ .gate_id = GATE_SPI2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi2", FLEXGEN_10, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi3 = {
+ .gate_id = GATE_SPI3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi3", FLEXGEN_11, &clk_stm32_gate_ops,
+ CLK_SET_RATE_PARENT),
+};
+
+static struct clk_stm32_gate ck_ker_spi4 = {
+ .gate_id = GATE_SPI4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi4", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi5 = {
+ .gate_id = GATE_SPI5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi5", FLEXGEN_17, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_spi6 = {
+ .gate_id = GATE_SPI6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_spi6", FLEXGEN_37, &clk_stm32_gate_ops, 0),
+};
+
+/* Timers */
+static struct clk_stm32_gate ck_icn_p_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim6", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim7", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim10", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim11", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim12", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim13", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim14", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim8", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim15", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim16", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_tim17", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim2 = {
+ .gate_id = GATE_TIM2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim2", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim3 = {
+ .gate_id = GATE_TIM3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim3", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim4 = {
+ .gate_id = GATE_TIM4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim4", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim5 = {
+ .gate_id = GATE_TIM5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim5", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim6 = {
+ .gate_id = GATE_TIM6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim6", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim7 = {
+ .gate_id = GATE_TIM7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim7", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim10 = {
+ .gate_id = GATE_TIM10,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim10", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim11 = {
+ .gate_id = GATE_TIM11,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim11", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim12 = {
+ .gate_id = GATE_TIM12,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim12", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim13 = {
+ .gate_id = GATE_TIM13,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim13", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim14 = {
+ .gate_id = GATE_TIM14,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim14", TIMG1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim1 = {
+ .gate_id = GATE_TIM1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim1", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim8 = {
+ .gate_id = GATE_TIM8,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim8", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim15 = {
+ .gate_id = GATE_TIM15,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim15", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim16 = {
+ .gate_id = GATE_TIM16,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim16", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_tim17 = {
+ .gate_id = GATE_TIM17,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_tim17", TIMG2, &clk_stm32_gate_ops, 0),
+};
+
+/* UART/USART */
+static struct clk_stm32_gate ck_icn_p_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart2", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart3", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart4", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart5", ICN_APB1, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart1", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_usart6", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_p_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_uart7", ICN_APB2, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart2 = {
+ .gate_id = GATE_USART2,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart2", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart4 = {
+ .gate_id = GATE_UART4,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart4", FLEXGEN_08, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart3 = {
+ .gate_id = GATE_USART3,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart3", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart5 = {
+ .gate_id = GATE_UART5,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart5", FLEXGEN_09, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart1 = {
+ .gate_id = GATE_USART1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart1", FLEXGEN_18, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_usart6 = {
+ .gate_id = GATE_USART6,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_usart6", FLEXGEN_19, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_ker_uart7 = {
+ .gate_id = GATE_UART7,
+ .hw.init = CLK_HW_INIT_INDEX("ck_ker_uart7", FLEXGEN_20, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY1 */
+static struct clk_stm32_composite ck_ker_usb2phy1 = {
+ .gate_id = GATE_USB2PHY1,
+ .mux_id = MUX_USB2PHY1,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy1", usb2phy1_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* USBH */
+static struct clk_stm32_gate ck_icn_m_usbhehci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usbhehci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+static struct clk_stm32_gate ck_icn_m_usbhohci = {
+ .gate_id = GATE_USBH,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_m_usbhohci", ICN_HSL, &clk_stm32_gate_ops, 0),
+};
+
+/* USB2PHY2 */
+static struct clk_stm32_composite ck_ker_usb2phy2_en = {
+ .gate_id = GATE_USB2PHY2,
+ .mux_id = MUX_USB2PHY2,
+ .div_id = NO_STM32_DIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("ck_ker_usb2phy2_en", usb2phy2_src,
+ &clk_stm32_composite_ops, 0),
+};
+
+/* VREF */
+static struct clk_stm32_gate ck_icn_p_vref = {
+ .gate_id = GATE_VREF,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_vref", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+/* WWDG */
+static struct clk_stm32_gate ck_icn_p_wwdg1 = {
+ .gate_id = GATE_WWDG1,
+ .hw.init = CLK_HW_INIT_INDEX("ck_icn_p_wwdg1", ICN_APB3, &clk_stm32_gate_ops, 0),
+};
+
+static int stm32_rcc_get_access(void __iomem *base, u32 index)
+{
+ u32 seccfgr, cidcfgr, semcr;
+ int bit, cid;
+
+ bit = index % RCC_REG_SIZE;
+
+ seccfgr = readl(base + RCC_SECCFGR(index));
+ if (seccfgr & BIT(bit))
+ return -EACCES;
+
+ cidcfgr = readl(base + RCC_CIDCFGR(index));
+ if (!(cidcfgr & RCC_CIDCFGR_CFEN))
+ /* CID filtering is turned off: access granted */
+ return 0;
+
+ if (!(cidcfgr & RCC_CIDCFGR_SEM_EN)) {
+ /* Static CID mode */
+ cid = FIELD_GET(RCC_CIDCFGR_SCID_MASK, cidcfgr);
+ if (cid != RCC_CID1)
+ return -EACCES;
+ return 0;
+ }
+
+ /* Pass-list with semaphore mode */
+ if (!(cidcfgr & RCC_CIDCFGR_SEMWLC1_EN))
+ return -EACCES;
+
+ semcr = readl(base + RCC_SEMCR(index));
+
+ cid = FIELD_GET(RCC_SEMCR_SEMCID_MASK, semcr);
+ if (cid != RCC_CID1)
+ return -EACCES;
+
+ return 0;
+}
+
+static int stm32mp21_check_security(struct device_node *np, void __iomem *base,
+ const struct clock_config *cfg)
+{
+ int ret = 0;
+
+ if (cfg->sec_id != SECF_NONE) {
+ struct stm32_firewall firewall;
+ u32 index = (u32)cfg->sec_id;
+
+ if (index & SEC_RIFSC_FLAG) {
+ ret = stm32_firewall_get_firewall(np, &firewall, 1);
+ if (ret)
+ return ret;
+ ret = stm32_firewall_grant_access_by_id(&firewall, index & ~SEC_RIFSC_FLAG);
+ } else {
+ ret = stm32_rcc_get_access(base, cfg->sec_id & ~SEC_RIFSC_FLAG);
+ }
+ }
+
+ return ret;
+}
+
+static const struct clock_config stm32mp21_clock_cfg[] = {
+ STM32_GATE_CFG(CK_BUS_ETH1, ck_icn_p_eth1, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_BUS_ETH2, ck_icn_p_eth2, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_BUS_ADC1, ck_icn_p_adc1, SEC_RIFSC(58)),
+ STM32_GATE_CFG(CK_BUS_ADC2, ck_icn_p_adc2, SEC_RIFSC(59)),
+ STM32_GATE_CFG(CK_BUS_CRC, ck_icn_p_crc, SEC_RIFSC(109)),
+ STM32_GATE_CFG(CK_BUS_MDF1, ck_icn_p_mdf1, SEC_RIFSC(54)),
+ STM32_GATE_CFG(CK_BUS_HASH1, ck_icn_p_hash1, SEC_RIFSC(96)),
+ STM32_GATE_CFG(CK_BUS_HASH2, ck_icn_p_hash2, SEC_RIFSC(97)),
+ STM32_GATE_CFG(CK_BUS_RNG1, ck_icn_p_rng1, SEC_RIFSC(92)),
+ STM32_GATE_CFG(CK_BUS_RNG2, ck_icn_p_rng2, SEC_RIFSC(93)),
+ STM32_GATE_CFG(CK_BUS_CRYP1, ck_icn_p_cryp1, SEC_RIFSC(98)),
+ STM32_GATE_CFG(CK_BUS_CRYP2, ck_icn_p_cryp2, SEC_RIFSC(99)),
+ STM32_GATE_CFG(CK_BUS_SAES, ck_icn_p_saes, SEC_RIFSC(95)),
+ STM32_GATE_CFG(CK_BUS_PKA, ck_icn_p_pka, SEC_RIFSC(94)),
+ STM32_GATE_CFG(CK_BUS_LPUART1, ck_icn_p_lpuart1, SEC_RIFSC(40)),
+ STM32_GATE_CFG(CK_BUS_LPTIM3, ck_icn_p_lptim3, SEC_RIFSC(19)),
+ STM32_GATE_CFG(CK_BUS_LPTIM4, ck_icn_p_lptim4, SEC_RIFSC(20)),
+ STM32_GATE_CFG(CK_BUS_LPTIM5, ck_icn_p_lptim5, SEC_RIFSC(21)),
+ STM32_GATE_CFG(CK_BUS_SDMMC1, ck_icn_m_sdmmc1, SEC_RIFSC(76)),
+ STM32_GATE_CFG(CK_BUS_SDMMC2, ck_icn_m_sdmmc2, SEC_RIFSC(77)),
+ STM32_GATE_CFG(CK_BUS_SDMMC3, ck_icn_m_sdmmc3, SEC_RIFSC(78)),
+ STM32_GATE_CFG(CK_BUS_USBHOHCI, ck_icn_m_usbhohci, SEC_RIFSC(63)),
+ STM32_GATE_CFG(CK_BUS_USBHEHCI, ck_icn_m_usbhehci, SEC_RIFSC(63)),
+ STM32_GATE_CFG(CK_BUS_OTG, ck_icn_m_otg, SEC_RIFSC(66)),
+ STM32_GATE_CFG(CK_BUS_TIM2, ck_icn_p_tim2, SEC_RIFSC(1)),
+ STM32_GATE_CFG(CK_BUS_TIM3, ck_icn_p_tim3, SEC_RIFSC(2)),
+ STM32_GATE_CFG(CK_BUS_TIM4, ck_icn_p_tim4, SEC_RIFSC(3)),
+ STM32_GATE_CFG(CK_BUS_TIM5, ck_icn_p_tim5, SEC_RIFSC(4)),
+ STM32_GATE_CFG(CK_BUS_TIM6, ck_icn_p_tim6, SEC_RIFSC(5)),
+ STM32_GATE_CFG(CK_BUS_TIM7, ck_icn_p_tim7, SEC_RIFSC(6)),
+ STM32_GATE_CFG(CK_BUS_TIM10, ck_icn_p_tim10, SEC_RIFSC(8)),
+ STM32_GATE_CFG(CK_BUS_TIM11, ck_icn_p_tim11, SEC_RIFSC(9)),
+ STM32_GATE_CFG(CK_BUS_TIM12, ck_icn_p_tim12, SEC_RIFSC(10)),
+ STM32_GATE_CFG(CK_BUS_TIM13, ck_icn_p_tim13, SEC_RIFSC(11)),
+ STM32_GATE_CFG(CK_BUS_TIM14, ck_icn_p_tim14, SEC_RIFSC(12)),
+ STM32_GATE_CFG(CK_BUS_LPTIM1, ck_icn_p_lptim1, SEC_RIFSC(17)),
+ STM32_GATE_CFG(CK_BUS_LPTIM2, ck_icn_p_lptim2, SEC_RIFSC(18)),
+ STM32_GATE_CFG(CK_BUS_SPI2, ck_icn_p_spi2, SEC_RIFSC(23)),
+ STM32_GATE_CFG(CK_BUS_SPI3, ck_icn_p_spi3, SEC_RIFSC(24)),
+ STM32_GATE_CFG(CK_BUS_SPDIFRX, ck_icn_p_spdifrx, SEC_RIFSC(30)),
+ STM32_GATE_CFG(CK_BUS_USART2, ck_icn_p_usart2, SEC_RIFSC(32)),
+ STM32_GATE_CFG(CK_BUS_USART3, ck_icn_p_usart3, SEC_RIFSC(33)),
+ STM32_GATE_CFG(CK_BUS_UART4, ck_icn_p_uart4, SEC_RIFSC(34)),
+ STM32_GATE_CFG(CK_BUS_UART5, ck_icn_p_uart5, SEC_RIFSC(35)),
+ STM32_GATE_CFG(CK_BUS_I2C1, ck_icn_p_i2c1, SEC_RIFSC(41)),
+ STM32_GATE_CFG(CK_BUS_I2C2, ck_icn_p_i2c2, SEC_RIFSC(42)),
+ STM32_GATE_CFG(CK_BUS_I2C3, ck_icn_p_i2c3, SEC_RIFSC(43)),
+ STM32_GATE_CFG(CK_BUS_I3C1, ck_icn_p_i3c1, SEC_RIFSC(114)),
+ STM32_GATE_CFG(CK_BUS_I3C2, ck_icn_p_i3c2, SEC_RIFSC(115)),
+ STM32_GATE_CFG(CK_BUS_I3C3, ck_icn_p_i3c3, SEC_RIFSC(116)),
+ STM32_GATE_CFG(CK_BUS_TIM1, ck_icn_p_tim1, SEC_RIFSC(0)),
+ STM32_GATE_CFG(CK_BUS_TIM8, ck_icn_p_tim8, SEC_RIFSC(7)),
+ STM32_GATE_CFG(CK_BUS_TIM15, ck_icn_p_tim15, SEC_RIFSC(13)),
+ STM32_GATE_CFG(CK_BUS_TIM16, ck_icn_p_tim16, SEC_RIFSC(14)),
+ STM32_GATE_CFG(CK_BUS_TIM17, ck_icn_p_tim17, SEC_RIFSC(15)),
+ STM32_GATE_CFG(CK_BUS_SAI1, ck_icn_p_sai1, SEC_RIFSC(49)),
+ STM32_GATE_CFG(CK_BUS_SAI2, ck_icn_p_sai2, SEC_RIFSC(50)),
+ STM32_GATE_CFG(CK_BUS_SAI3, ck_icn_p_sai3, SEC_RIFSC(51)),
+ STM32_GATE_CFG(CK_BUS_SAI4, ck_icn_p_sai4, SEC_RIFSC(52)),
+ STM32_GATE_CFG(CK_BUS_USART1, ck_icn_p_usart1, SEC_RIFSC(31)),
+ STM32_GATE_CFG(CK_BUS_USART6, ck_icn_p_usart6, SEC_RIFSC(36)),
+ STM32_GATE_CFG(CK_BUS_UART7, ck_icn_p_uart7, SEC_RIFSC(37)),
+ STM32_GATE_CFG(CK_BUS_FDCAN, ck_icn_p_fdcan, SEC_RIFSC(56)),
+ STM32_GATE_CFG(CK_BUS_SPI1, ck_icn_p_spi1, SEC_RIFSC(22)),
+ STM32_GATE_CFG(CK_BUS_SPI4, ck_icn_p_spi4, SEC_RIFSC(25)),
+ STM32_GATE_CFG(CK_BUS_SPI5, ck_icn_p_spi5, SEC_RIFSC(26)),
+ STM32_GATE_CFG(CK_BUS_SPI6, ck_icn_p_spi6, SEC_RIFSC(27)),
+ STM32_GATE_CFG(CK_BUS_IWDG1, ck_icn_p_iwdg1, SEC_RIFSC(100)),
+ STM32_GATE_CFG(CK_BUS_IWDG2, ck_icn_p_iwdg2, SEC_RIFSC(101)),
+ STM32_GATE_CFG(CK_BUS_IWDG3, ck_icn_p_iwdg3, SEC_RIFSC(102)),
+ STM32_GATE_CFG(CK_BUS_IWDG4, ck_icn_p_iwdg4, SEC_RIFSC(103)),
+ STM32_GATE_CFG(CK_BUS_WWDG1, ck_icn_p_wwdg1, SEC_RIFSC(104)),
+ STM32_GATE_CFG(CK_BUS_VREF, ck_icn_p_vref, SEC_RIFSC(106)),
+ STM32_GATE_CFG(CK_BUS_SERC, ck_icn_p_serc, SEC_RIFSC(110)),
+ STM32_GATE_CFG(CK_BUS_HDP, ck_icn_p_hdp, SEC_RIFSC(57)),
+ STM32_GATE_CFG(CK_BUS_LTDC, ck_icn_p_ltdc, SEC_RIFSC(80)),
+ STM32_GATE_CFG(CK_BUS_CSI, ck_icn_p_csi, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_BUS_DCMIPP, ck_icn_p_dcmipp, SEC_RIFSC(87)),
+ STM32_GATE_CFG(CK_BUS_DCMIPSSI, ck_icn_p_dcmipssi, SEC_RIFSC(88)),
+ STM32_GATE_CFG(CK_BUS_DDRPERFM, ck_icn_p_ddrperfm, SEC_RIFSC(67)),
+ STM32_GATE_CFG(CK_KER_TIM2, ck_ker_tim2, SEC_RIFSC(1)),
+ STM32_GATE_CFG(CK_KER_TIM3, ck_ker_tim3, SEC_RIFSC(2)),
+ STM32_GATE_CFG(CK_KER_TIM4, ck_ker_tim4, SEC_RIFSC(3)),
+ STM32_GATE_CFG(CK_KER_TIM5, ck_ker_tim5, SEC_RIFSC(4)),
+ STM32_GATE_CFG(CK_KER_TIM6, ck_ker_tim6, SEC_RIFSC(5)),
+ STM32_GATE_CFG(CK_KER_TIM7, ck_ker_tim7, SEC_RIFSC(6)),
+ STM32_GATE_CFG(CK_KER_TIM10, ck_ker_tim10, SEC_RIFSC(8)),
+ STM32_GATE_CFG(CK_KER_TIM11, ck_ker_tim11, SEC_RIFSC(9)),
+ STM32_GATE_CFG(CK_KER_TIM12, ck_ker_tim12, SEC_RIFSC(10)),
+ STM32_GATE_CFG(CK_KER_TIM13, ck_ker_tim13, SEC_RIFSC(11)),
+ STM32_GATE_CFG(CK_KER_TIM14, ck_ker_tim14, SEC_RIFSC(12)),
+ STM32_GATE_CFG(CK_KER_TIM1, ck_ker_tim1, SEC_RIFSC(0)),
+ STM32_GATE_CFG(CK_KER_TIM8, ck_ker_tim8, SEC_RIFSC(7)),
+ STM32_GATE_CFG(CK_KER_TIM15, ck_ker_tim15, SEC_RIFSC(13)),
+ STM32_GATE_CFG(CK_KER_TIM16, ck_ker_tim16, SEC_RIFSC(14)),
+ STM32_GATE_CFG(CK_KER_TIM17, ck_ker_tim17, SEC_RIFSC(15)),
+ STM32_GATE_CFG(CK_KER_LPTIM1, ck_ker_lptim1, SEC_RIFSC(17)),
+ STM32_GATE_CFG(CK_KER_LPTIM2, ck_ker_lptim2, SEC_RIFSC(18)),
+ STM32_GATE_CFG(CK_KER_USART2, ck_ker_usart2, SEC_RIFSC(32)),
+ STM32_GATE_CFG(CK_KER_UART4, ck_ker_uart4, SEC_RIFSC(34)),
+ STM32_GATE_CFG(CK_KER_USART3, ck_ker_usart3, SEC_RIFSC(33)),
+ STM32_GATE_CFG(CK_KER_UART5, ck_ker_uart5, SEC_RIFSC(35)),
+ STM32_GATE_CFG(CK_KER_SPI2, ck_ker_spi2, SEC_RIFSC(23)),
+ STM32_GATE_CFG(CK_KER_SPI3, ck_ker_spi3, SEC_RIFSC(24)),
+ STM32_GATE_CFG(CK_KER_SPDIFRX, ck_ker_spdifrx, SEC_RIFSC(30)),
+ STM32_GATE_CFG(CK_KER_I2C1, ck_ker_i2c1, SEC_RIFSC(41)),
+ STM32_GATE_CFG(CK_KER_I2C2, ck_ker_i2c2, SEC_RIFSC(42)),
+ STM32_GATE_CFG(CK_KER_I3C1, ck_ker_i3c1, SEC_RIFSC(114)),
+ STM32_GATE_CFG(CK_KER_I3C2, ck_ker_i3c2, SEC_RIFSC(115)),
+ STM32_GATE_CFG(CK_KER_I2C3, ck_ker_i2c3, SEC_RIFSC(43)),
+ STM32_GATE_CFG(CK_KER_I3C3, ck_ker_i3c3, SEC_RIFSC(116)),
+ STM32_GATE_CFG(CK_KER_SPI1, ck_ker_spi1, SEC_RIFSC(22)),
+ STM32_GATE_CFG(CK_KER_SPI4, ck_ker_spi4, SEC_RIFSC(25)),
+ STM32_GATE_CFG(CK_KER_SPI5, ck_ker_spi5, SEC_RIFSC(26)),
+ STM32_GATE_CFG(CK_KER_SPI6, ck_ker_spi6, SEC_RIFSC(27)),
+ STM32_GATE_CFG(CK_KER_USART1, ck_ker_usart1, SEC_RIFSC(31)),
+ STM32_GATE_CFG(CK_KER_USART6, ck_ker_usart6, SEC_RIFSC(36)),
+ STM32_GATE_CFG(CK_KER_UART7, ck_ker_uart7, SEC_RIFSC(37)),
+ STM32_GATE_CFG(CK_KER_MDF1, ck_ker_mdf1, SEC_RIFSC(54)),
+ STM32_GATE_CFG(CK_KER_SAI1, ck_ker_sai1, SEC_RIFSC(49)),
+ STM32_GATE_CFG(CK_KER_SAI2, ck_ker_sai2, SEC_RIFSC(50)),
+ STM32_GATE_CFG(CK_KER_SAI3, ck_ker_sai3, SEC_RIFSC(51)),
+ STM32_GATE_CFG(CK_KER_SAI4, ck_ker_sai4, SEC_RIFSC(52)),
+ STM32_GATE_CFG(CK_KER_FDCAN, ck_ker_fdcan, SEC_RIFSC(56)),
+ STM32_GATE_CFG(CK_KER_CSI, ck_ker_csi, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_CSITXESC, ck_ker_csitxesc, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_CSIPHY, ck_ker_csiphy, SEC_RIFSC(86)),
+ STM32_GATE_CFG(CK_KER_LPUART1, ck_ker_lpuart1, SEC_RIFSC(40)),
+ STM32_GATE_CFG(CK_KER_LPTIM3, ck_ker_lptim3, SEC_RIFSC(19)),
+ STM32_GATE_CFG(CK_KER_LPTIM4, ck_ker_lptim4, SEC_RIFSC(20)),
+ STM32_GATE_CFG(CK_KER_LPTIM5, ck_ker_lptim5, SEC_RIFSC(21)),
+ STM32_GATE_CFG(CK_KER_SDMMC1, ck_ker_sdmmc1, SEC_RIFSC(76)),
+ STM32_GATE_CFG(CK_KER_SDMMC2, ck_ker_sdmmc2, SEC_RIFSC(77)),
+ STM32_GATE_CFG(CK_KER_SDMMC3, ck_ker_sdmmc3, SEC_RIFSC(78)),
+ STM32_GATE_CFG(CK_KER_ETH1, ck_ker_eth1, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_STP, ck_ker_eth1stp, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_KER_ETH2, ck_ker_eth2, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_STP, ck_ker_eth2stp, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_KER_ETH1PTP, ck_ker_eth1ptp, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_KER_ETH2PTP, ck_ker_eth2ptp, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH1_MAC, ck_ker_eth1mac, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_TX, ck_ker_eth1tx, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH1_RX, ck_ker_eth1rx, SEC_RIFSC(60)),
+ STM32_GATE_CFG(CK_ETH2_MAC, ck_ker_eth2mac, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_TX, ck_ker_eth2tx, SEC_RIFSC(61)),
+ STM32_GATE_CFG(CK_ETH2_RX, ck_ker_eth2rx, SEC_RIFSC(61)),
+ STM32_COMPOSITE_CFG(CK_MCO1, ck_mco1, MP21_RIF_RCC_MCO1),
+ STM32_COMPOSITE_CFG(CK_MCO2, ck_mco2, MP21_RIF_RCC_MCO2),
+ STM32_COMPOSITE_CFG(CK_KER_ADC1, ck_ker_adc1, SEC_RIFSC(58)),
+ STM32_COMPOSITE_CFG(CK_KER_ADC2, ck_ker_adc2, SEC_RIFSC(59)),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY1, ck_ker_usb2phy1, SEC_RIFSC(63)),
+ STM32_COMPOSITE_CFG(CK_KER_USB2PHY2EN, ck_ker_usb2phy2_en, SEC_RIFSC(66)),
+ STM32_COMPOSITE_CFG(CK_KER_DTS, ck_ker_dts, SEC_RIFSC(107)),
+ STM32_GATE_CFG(CK_KER_LTDC, ck_ker_ltdc, SEC_RIFSC(80)),
+};
+
+#define RESET_MP21(id, _offset, _bit_idx, _set_clr) \
+ [id] = &(struct stm32_reset_cfg){ \
+ .offset = (_offset), \
+ .bit_idx = (_bit_idx), \
+ .set_clr = (_set_clr), \
+ }
+
+static const struct stm32_reset_cfg *stm32mp21_reset_cfg[] = {
+ RESET_MP21(TIM1_R, RCC_TIM1CFGR, 0, 0),
+ RESET_MP21(TIM2_R, RCC_TIM2CFGR, 0, 0),
+ RESET_MP21(TIM3_R, RCC_TIM3CFGR, 0, 0),
+ RESET_MP21(TIM4_R, RCC_TIM4CFGR, 0, 0),
+ RESET_MP21(TIM5_R, RCC_TIM5CFGR, 0, 0),
+ RESET_MP21(TIM6_R, RCC_TIM6CFGR, 0, 0),
+ RESET_MP21(TIM7_R, RCC_TIM7CFGR, 0, 0),
+ RESET_MP21(TIM8_R, RCC_TIM8CFGR, 0, 0),
+ RESET_MP21(TIM10_R, RCC_TIM10CFGR, 0, 0),
+ RESET_MP21(TIM11_R, RCC_TIM11CFGR, 0, 0),
+ RESET_MP21(TIM12_R, RCC_TIM12CFGR, 0, 0),
+ RESET_MP21(TIM13_R, RCC_TIM13CFGR, 0, 0),
+ RESET_MP21(TIM14_R, RCC_TIM14CFGR, 0, 0),
+ RESET_MP21(TIM15_R, RCC_TIM15CFGR, 0, 0),
+ RESET_MP21(TIM16_R, RCC_TIM16CFGR, 0, 0),
+ RESET_MP21(TIM17_R, RCC_TIM17CFGR, 0, 0),
+ RESET_MP21(LPTIM1_R, RCC_LPTIM1CFGR, 0, 0),
+ RESET_MP21(LPTIM2_R, RCC_LPTIM2CFGR, 0, 0),
+ RESET_MP21(LPTIM3_R, RCC_LPTIM3CFGR, 0, 0),
+ RESET_MP21(LPTIM4_R, RCC_LPTIM4CFGR, 0, 0),
+ RESET_MP21(LPTIM5_R, RCC_LPTIM5CFGR, 0, 0),
+ RESET_MP21(SPI1_R, RCC_SPI1CFGR, 0, 0),
+ RESET_MP21(SPI2_R, RCC_SPI2CFGR, 0, 0),
+ RESET_MP21(SPI3_R, RCC_SPI3CFGR, 0, 0),
+ RESET_MP21(SPI4_R, RCC_SPI4CFGR, 0, 0),
+ RESET_MP21(SPI5_R, RCC_SPI5CFGR, 0, 0),
+ RESET_MP21(SPI6_R, RCC_SPI6CFGR, 0, 0),
+ RESET_MP21(SPDIFRX_R, RCC_SPDIFRXCFGR, 0, 0),
+ RESET_MP21(USART1_R, RCC_USART1CFGR, 0, 0),
+ RESET_MP21(USART2_R, RCC_USART2CFGR, 0, 0),
+ RESET_MP21(USART3_R, RCC_USART3CFGR, 0, 0),
+ RESET_MP21(UART4_R, RCC_UART4CFGR, 0, 0),
+ RESET_MP21(UART5_R, RCC_UART5CFGR, 0, 0),
+ RESET_MP21(USART6_R, RCC_USART6CFGR, 0, 0),
+ RESET_MP21(UART7_R, RCC_UART7CFGR, 0, 0),
+ RESET_MP21(LPUART1_R, RCC_LPUART1CFGR, 0, 0),
+ RESET_MP21(I2C1_R, RCC_I2C1CFGR, 0, 0),
+ RESET_MP21(I2C2_R, RCC_I2C2CFGR, 0, 0),
+ RESET_MP21(I2C3_R, RCC_I2C3CFGR, 0, 0),
+ RESET_MP21(SAI1_R, RCC_SAI1CFGR, 0, 0),
+ RESET_MP21(SAI2_R, RCC_SAI2CFGR, 0, 0),
+ RESET_MP21(SAI3_R, RCC_SAI3CFGR, 0, 0),
+ RESET_MP21(SAI4_R, RCC_SAI4CFGR, 0, 0),
+ RESET_MP21(MDF1_R, RCC_MDF1CFGR, 0, 0),
+ RESET_MP21(FDCAN_R, RCC_FDCANCFGR, 0, 0),
+ RESET_MP21(HDP_R, RCC_HDPCFGR, 0, 0),
+ RESET_MP21(ADC1_R, RCC_ADC1CFGR, 0, 0),
+ RESET_MP21(ADC2_R, RCC_ADC2CFGR, 0, 0),
+ RESET_MP21(ETH1_R, RCC_ETH1CFGR, 0, 0),
+ RESET_MP21(ETH2_R, RCC_ETH2CFGR, 0, 0),
+ RESET_MP21(OTG_R, RCC_OTGCFGR, 0, 0),
+ RESET_MP21(USBH_R, RCC_USBHCFGR, 0, 0),
+ RESET_MP21(USB2PHY1_R, RCC_USB2PHY1CFGR, 0, 0),
+ RESET_MP21(USB2PHY2_R, RCC_USB2PHY2CFGR, 0, 0),
+ RESET_MP21(SDMMC1_R, RCC_SDMMC1CFGR, 0, 0),
+ RESET_MP21(SDMMC1DLL_R, RCC_SDMMC1CFGR, 16, 0),
+ RESET_MP21(SDMMC2_R, RCC_SDMMC2CFGR, 0, 0),
+ RESET_MP21(SDMMC2DLL_R, RCC_SDMMC2CFGR, 16, 0),
+ RESET_MP21(SDMMC3_R, RCC_SDMMC3CFGR, 0, 0),
+ RESET_MP21(SDMMC3DLL_R, RCC_SDMMC3CFGR, 16, 0),
+ RESET_MP21(LTDC_R, RCC_LTDCCFGR, 0, 0),
+ RESET_MP21(CSI_R, RCC_CSICFGR, 0, 0),
+ RESET_MP21(DCMIPP_R, RCC_DCMIPPCFGR, 0, 0),
+ RESET_MP21(DCMIPSSI_R, RCC_DCMIPSSICFGR, 0, 0),
+ RESET_MP21(WWDG1_R, RCC_WWDG1CFGR, 0, 0),
+ RESET_MP21(VREF_R, RCC_VREFCFGR, 0, 0),
+ RESET_MP21(DTS_R, RCC_DTSCFGR, 0, 0),
+ RESET_MP21(CRC_R, RCC_CRCCFGR, 0, 0),
+ RESET_MP21(SERC_R, RCC_SERCCFGR, 0, 0),
+ RESET_MP21(I3C1_R, RCC_I3C1CFGR, 0, 0),
+ RESET_MP21(I3C2_R, RCC_I3C2CFGR, 0, 0),
+ RESET_MP21(IWDG2_KER_R, RCC_IWDGC1CFGSETR, 18, 1),
+ RESET_MP21(IWDG4_KER_R, RCC_IWDGC2CFGSETR, 18, 1),
+ RESET_MP21(RNG1_R, RCC_RNG1CFGR, 0, 0),
+ RESET_MP21(RNG2_R, RCC_RNG2CFGR, 0, 0),
+ RESET_MP21(PKA_R, RCC_PKACFGR, 0, 0),
+ RESET_MP21(SAES_R, RCC_SAESCFGR, 0, 0),
+ RESET_MP21(HASH1_R, RCC_HASH1CFGR, 0, 0),
+ RESET_MP21(HASH2_R, RCC_HASH2CFGR, 0, 0),
+ RESET_MP21(CRYP1_R, RCC_CRYP1CFGR, 0, 0),
+ RESET_MP21(CRYP2_R, RCC_CRYP2CFGR, 0, 0),
+};
+
+static u16 stm32mp21_cpt_gate[GATE_NB];
+
+static struct clk_stm32_clock_data stm32mp21_clock_data = {
+ .gate_cpt = stm32mp21_cpt_gate,
+ .gates = stm32mp21_gates,
+ .muxes = stm32mp21_muxes,
+};
+
+static struct clk_stm32_reset_data stm32mp21_reset_data = {
+ .reset_lines = stm32mp21_reset_cfg,
+ .nr_lines = ARRAY_SIZE(stm32mp21_reset_cfg),
+};
+
+static const struct stm32_rcc_match_data stm32mp21_data = {
+ .tab_clocks = stm32mp21_clock_cfg,
+ .num_clocks = ARRAY_SIZE(stm32mp21_clock_cfg),
+ .maxbinding = STM32MP21_LAST_CLK,
+ .clock_data = &stm32mp21_clock_data,
+ .reset_data = &stm32mp21_reset_data,
+ .check_security = &stm32mp21_check_security,
+};
+
+static const struct of_device_id stm32mp21_match_data[] = {
+ { .compatible = "st,stm32mp21-rcc", .data = &stm32mp21_data, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32mp21_match_data);
+
+static int stm32mp21_rcc_clocks_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ return stm32_rcc_init(dev, stm32mp21_match_data, base);
+}
+
+static struct platform_driver stm32mp21_rcc_clocks_driver = {
+ .driver = {
+ .name = "stm32mp21_rcc",
+ .of_match_table = stm32mp21_match_data,
+ },
+ .probe = stm32mp21_rcc_clocks_probe,
+};
+
+static int __init stm32mp21_clocks_init(void)
+{
+ return platform_driver_register(&stm32mp21_rcc_clocks_driver);
+}
+
+core_initcall(stm32mp21_clocks_init);
+
diff --git a/drivers/clk/stm32/stm32mp21_rcc.h b/drivers/clk/stm32/stm32mp21_rcc.h
new file mode 100644
index 000000000000..df3ea921ffba
--- /dev/null
+++ b/drivers/clk/stm32/stm32mp21_rcc.h
@@ -0,0 +1,651 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) STMicroelectronics 2025 - All Rights Reserved
+ * Author: Gabriel Fernandez <gabriel.fernandez@foss.st.com> for STMicroelectronics.
+ */
+
+#ifndef STM32MP21_RCC_H
+#define STM32MP21_RCC_H
+
+#define RCC_SECCFGR0 0x0
+#define RCC_SECCFGR1 0x4
+#define RCC_SECCFGR2 0x8
+#define RCC_SECCFGR3 0xC
+#define RCC_PRIVCFGR0 0x10
+#define RCC_PRIVCFGR1 0x14
+#define RCC_PRIVCFGR2 0x18
+#define RCC_PRIVCFGR3 0x1C
+#define RCC_RCFGLOCKR0 0x20
+#define RCC_RCFGLOCKR1 0x24
+#define RCC_RCFGLOCKR2 0x28
+#define RCC_RCFGLOCKR3 0x2C
+#define RCC_R0CIDCFGR 0x30
+#define RCC_R0SEMCR 0x34
+#define RCC_R1CIDCFGR 0x38
+#define RCC_R1SEMCR 0x3C
+#define RCC_R2CIDCFGR 0x40
+#define RCC_R2SEMCR 0x44
+#define RCC_R3CIDCFGR 0x48
+#define RCC_R3SEMCR 0x4C
+#define RCC_R4CIDCFGR 0x50
+#define RCC_R4SEMCR 0x54
+#define RCC_R5CIDCFGR 0x58
+#define RCC_R5SEMCR 0x5C
+#define RCC_R6CIDCFGR 0x60
+#define RCC_R6SEMCR 0x64
+#define RCC_R7CIDCFGR 0x68
+#define RCC_R7SEMCR 0x6C
+#define RCC_R8CIDCFGR 0x70
+#define RCC_R8SEMCR 0x74
+#define RCC_R9CIDCFGR 0x78
+#define RCC_R9SEMCR 0x7C
+#define RCC_R10CIDCFGR 0x80
+#define RCC_R10SEMCR 0x84
+#define RCC_R11CIDCFGR 0x88
+#define RCC_R11SEMCR 0x8C
+#define RCC_R12CIDCFGR 0x90
+#define RCC_R12SEMCR 0x94
+#define RCC_R13CIDCFGR 0x98
+#define RCC_R13SEMCR 0x9C
+#define RCC_R14CIDCFGR 0xA0
+#define RCC_R14SEMCR 0xA4
+#define RCC_R15CIDCFGR 0xA8
+#define RCC_R15SEMCR 0xAC
+#define RCC_R16CIDCFGR 0xB0
+#define RCC_R16SEMCR 0xB4
+#define RCC_R17CIDCFGR 0xB8
+#define RCC_R17SEMCR 0xBC
+#define RCC_R18CIDCFGR 0xC0
+#define RCC_R18SEMCR 0xC4
+#define RCC_R19CIDCFGR 0xC8
+#define RCC_R19SEMCR 0xCC
+#define RCC_R20CIDCFGR 0xD0
+#define RCC_R20SEMCR 0xD4
+#define RCC_R21CIDCFGR 0xD8
+#define RCC_R21SEMCR 0xDC
+#define RCC_R22CIDCFGR 0xE0
+#define RCC_R22SEMCR 0xE4
+#define RCC_R23CIDCFGR 0xE8
+#define RCC_R23SEMCR 0xEC
+#define RCC_R24CIDCFGR 0xF0
+#define RCC_R24SEMCR 0xF4
+#define RCC_R25CIDCFGR 0xF8
+#define RCC_R25SEMCR 0xFC
+#define RCC_R26CIDCFGR 0x100
+#define RCC_R26SEMCR 0x104
+#define RCC_R27CIDCFGR 0x108
+#define RCC_R27SEMCR 0x10C
+#define RCC_R28CIDCFGR 0x110
+#define RCC_R28SEMCR 0x114
+#define RCC_R29CIDCFGR 0x118
+#define RCC_R29SEMCR 0x11C
+#define RCC_R30CIDCFGR 0x120
+#define RCC_R30SEMCR 0x124
+#define RCC_R31CIDCFGR 0x128
+#define RCC_R31SEMCR 0x12C
+#define RCC_R32CIDCFGR 0x130
+#define RCC_R32SEMCR 0x134
+#define RCC_R33CIDCFGR 0x138
+#define RCC_R33SEMCR 0x13C
+#define RCC_R34CIDCFGR 0x140
+#define RCC_R34SEMCR 0x144
+#define RCC_R35CIDCFGR 0x148
+#define RCC_R35SEMCR 0x14C
+#define RCC_R36CIDCFGR 0x150
+#define RCC_R36SEMCR 0x154
+#define RCC_R37CIDCFGR 0x158
+#define RCC_R37SEMCR 0x15C
+#define RCC_R38CIDCFGR 0x160
+#define RCC_R38SEMCR 0x164
+#define RCC_R39CIDCFGR 0x168
+#define RCC_R39SEMCR 0x16C
+#define RCC_R40CIDCFGR 0x170
+#define RCC_R40SEMCR 0x174
+#define RCC_R41CIDCFGR 0x178
+#define RCC_R41SEMCR 0x17C
+#define RCC_R42CIDCFGR 0x180
+#define RCC_R42SEMCR 0x184
+#define RCC_R43CIDCFGR 0x188
+#define RCC_R43SEMCR 0x18C
+#define RCC_R44CIDCFGR 0x190
+#define RCC_R44SEMCR 0x194
+#define RCC_R45CIDCFGR 0x198
+#define RCC_R45SEMCR 0x19C
+#define RCC_R46CIDCFGR 0x1A0
+#define RCC_R46SEMCR 0x1A4
+#define RCC_R47CIDCFGR 0x1A8
+#define RCC_R47SEMCR 0x1AC
+#define RCC_R48CIDCFGR 0x1B0
+#define RCC_R48SEMCR 0x1B4
+#define RCC_R49CIDCFGR 0x1B8
+#define RCC_R49SEMCR 0x1BC
+#define RCC_R50CIDCFGR 0x1C0
+#define RCC_R50SEMCR 0x1C4
+#define RCC_R51CIDCFGR 0x1C8
+#define RCC_R51SEMCR 0x1CC
+#define RCC_R52CIDCFGR 0x1D0
+#define RCC_R52SEMCR 0x1D4
+#define RCC_R53CIDCFGR 0x1D8
+#define RCC_R53SEMCR 0x1DC
+#define RCC_R54CIDCFGR 0x1E0
+#define RCC_R54SEMCR 0x1E4
+#define RCC_R55CIDCFGR 0x1E8
+#define RCC_R55SEMCR 0x1EC
+#define RCC_R56CIDCFGR 0x1F0
+#define RCC_R56SEMCR 0x1F4
+#define RCC_R57CIDCFGR 0x1F8
+#define RCC_R57SEMCR 0x1FC
+#define RCC_R58CIDCFGR 0x200
+#define RCC_R58SEMCR 0x204
+#define RCC_R59CIDCFGR 0x208
+#define RCC_R59SEMCR 0x20C
+#define RCC_R60CIDCFGR 0x210
+#define RCC_R60SEMCR 0x214
+#define RCC_R61CIDCFGR 0x218
+#define RCC_R61SEMCR 0x21C
+#define RCC_R62CIDCFGR 0x220
+#define RCC_R62SEMCR 0x224
+#define RCC_R63CIDCFGR 0x228
+#define RCC_R63SEMCR 0x22C
+#define RCC_R64CIDCFGR 0x230
+#define RCC_R64SEMCR 0x234
+#define RCC_R65CIDCFGR 0x238
+#define RCC_R65SEMCR 0x23C
+#define RCC_R66CIDCFGR 0x240
+#define RCC_R66SEMCR 0x244
+#define RCC_R67CIDCFGR 0x248
+#define RCC_R67SEMCR 0x24C
+#define RCC_R68CIDCFGR 0x250
+#define RCC_R68SEMCR 0x254
+#define RCC_R69CIDCFGR 0x258
+#define RCC_R69SEMCR 0x25C
+#define RCC_R70CIDCFGR 0x260
+#define RCC_R70SEMCR 0x264
+#define RCC_R71CIDCFGR 0x268
+#define RCC_R71SEMCR 0x26C
+#define RCC_R73CIDCFGR 0x278
+#define RCC_R73SEMCR 0x27C
+#define RCC_R74CIDCFGR 0x280
+#define RCC_R74SEMCR 0x284
+#define RCC_R75CIDCFGR 0x288
+#define RCC_R75SEMCR 0x28C
+#define RCC_R76CIDCFGR 0x290
+#define RCC_R76SEMCR 0x294
+#define RCC_R77CIDCFGR 0x298
+#define RCC_R77SEMCR 0x29C
+#define RCC_R78CIDCFGR 0x2A0
+#define RCC_R78SEMCR 0x2A4
+#define RCC_R79CIDCFGR 0x2A8
+#define RCC_R79SEMCR 0x2AC
+#define RCC_R83CIDCFGR 0x2C8
+#define RCC_R83SEMCR 0x2CC
+#define RCC_R84CIDCFGR 0x2D0
+#define RCC_R84SEMCR 0x2D4
+#define RCC_R85CIDCFGR 0x2D8
+#define RCC_R85SEMCR 0x2DC
+#define RCC_R86CIDCFGR 0x2E0
+#define RCC_R86SEMCR 0x2E4
+#define RCC_R87CIDCFGR 0x2E8
+#define RCC_R87SEMCR 0x2EC
+#define RCC_R88CIDCFGR 0x2F0
+#define RCC_R88SEMCR 0x2F4
+#define RCC_R90CIDCFGR 0x300
+#define RCC_R90SEMCR 0x304
+#define RCC_R91CIDCFGR 0x308
+#define RCC_R91SEMCR 0x30C
+#define RCC_R92CIDCFGR 0x310
+#define RCC_R92SEMCR 0x314
+#define RCC_R93CIDCFGR 0x318
+#define RCC_R93SEMCR 0x31C
+#define RCC_R94CIDCFGR 0x320
+#define RCC_R94SEMCR 0x324
+#define RCC_R95CIDCFGR 0x328
+#define RCC_R95SEMCR 0x32C
+#define RCC_R96CIDCFGR 0x330
+#define RCC_R96SEMCR 0x334
+#define RCC_R97CIDCFGR 0x338
+#define RCC_R97SEMCR 0x33C
+#define RCC_R98CIDCFGR 0x340
+#define RCC_R98SEMCR 0x344
+#define RCC_R101CIDCFGR 0x358
+#define RCC_R101SEMCR 0x35C
+#define RCC_R102CIDCFGR 0x360
+#define RCC_R102SEMCR 0x364
+#define RCC_R103CIDCFGR 0x368
+#define RCC_R103SEMCR 0x36C
+#define RCC_R104CIDCFGR 0x370
+#define RCC_R104SEMCR 0x374
+#define RCC_R105CIDCFGR 0x378
+#define RCC_R105SEMCR 0x37C
+#define RCC_R106CIDCFGR 0x380
+#define RCC_R106SEMCR 0x384
+#define RCC_R108CIDCFGR 0x390
+#define RCC_R108SEMCR 0x394
+#define RCC_R109CIDCFGR 0x398
+#define RCC_R109SEMCR 0x39C
+#define RCC_R110CIDCFGR 0x3A0
+#define RCC_R110SEMCR 0x3A4
+#define RCC_R111CIDCFGR 0x3A8
+#define RCC_R111SEMCR 0x3AC
+#define RCC_R112CIDCFGR 0x3B0
+#define RCC_R112SEMCR 0x3B4
+#define RCC_R113CIDCFGR 0x3B8
+#define RCC_R113SEMCR 0x3BC
+#define RCC_GRSTCSETR 0x400
+#define RCC_C1RSTCSETR 0x404
+#define RCC_C2RSTCSETR 0x40C
+#define RCC_HWRSTSCLRR 0x410
+#define RCC_C1HWRSTSCLRR 0x414
+#define RCC_C2HWRSTSCLRR 0x418
+#define RCC_C1BOOTRSTSSETR 0x41C
+#define RCC_C1BOOTRSTSCLRR 0x420
+#define RCC_C2BOOTRSTSSETR 0x424
+#define RCC_C2BOOTRSTSCLRR 0x428
+#define RCC_C1SREQSETR 0x42C
+#define RCC_C1SREQCLRR 0x430
+#define RCC_CPUBOOTCR 0x434
+#define RCC_STBYBOOTCR 0x438
+#define RCC_LEGBOOTCR 0x43C
+#define RCC_BDCR 0x440
+#define RCC_RDCR 0x44C
+#define RCC_C1MSRDCR 0x450
+#define RCC_PWRLPDLYCR 0x454
+#define RCC_C1CIESETR 0x458
+#define RCC_C1CIFCLRR 0x45C
+#define RCC_C2CIESETR 0x460
+#define RCC_C2CIFCLRR 0x464
+#define RCC_IWDGC1FZSETR 0x468
+#define RCC_IWDGC1FZCLRR 0x46C
+#define RCC_IWDGC1CFGSETR 0x470
+#define RCC_IWDGC1CFGCLRR 0x474
+#define RCC_IWDGC2FZSETR 0x478
+#define RCC_IWDGC2FZCLRR 0x47C
+#define RCC_IWDGC2CFGSETR 0x480
+#define RCC_IWDGC2CFGCLRR 0x484
+#define RCC_MCO1CFGR 0x488
+#define RCC_MCO2CFGR 0x48C
+#define RCC_OCENSETR 0x490
+#define RCC_OCENCLRR 0x494
+#define RCC_OCRDYR 0x498
+#define RCC_HSICFGR 0x49C
+#define RCC_MSICFGR 0x4A0
+#define RCC_LSICR 0x4A4
+#define RCC_RTCDIVR 0x4A8
+#define RCC_APB1DIVR 0x4AC
+#define RCC_APB2DIVR 0x4B0
+#define RCC_APB3DIVR 0x4B4
+#define RCC_APB4DIVR 0x4B8
+#define RCC_APB5DIVR 0x4BC
+#define RCC_APBDBGDIVR 0x4C0
+#define RCC_TIMG1PRER 0x4C8
+#define RCC_TIMG2PRER 0x4CC
+#define RCC_LSMCUDIVR 0x4D0
+#define RCC_DDRCPCFGR 0x4D4
+#define RCC_DDRCAPBCFGR 0x4D8
+#define RCC_DDRPHYCAPBCFGR 0x4DC
+#define RCC_DDRPHYCCFGR 0x4E0
+#define RCC_DDRCFGR 0x4E4
+#define RCC_DDRITFCFGR 0x4E8
+#define RCC_SYSRAMCFGR 0x4F0
+#define RCC_SRAM1CFGR 0x4F8
+#define RCC_RETRAMCFGR 0x500
+#define RCC_BKPSRAMCFGR 0x504
+#define RCC_OSPI1CFGR 0x514
+#define RCC_FMCCFGR 0x51C
+#define RCC_DBGCFGR 0x520
+#define RCC_STMCFGR 0x524
+#define RCC_ETRCFGR 0x528
+#define RCC_GPIOACFGR 0x52C
+#define RCC_GPIOBCFGR 0x530
+#define RCC_GPIOCCFGR 0x534
+#define RCC_GPIODCFGR 0x538
+#define RCC_GPIOECFGR 0x53C
+#define RCC_GPIOFCFGR 0x540
+#define RCC_GPIOGCFGR 0x544
+#define RCC_GPIOHCFGR 0x548
+#define RCC_GPIOICFGR 0x54C
+#define RCC_GPIOZCFGR 0x558
+#define RCC_HPDMA1CFGR 0x55C
+#define RCC_HPDMA2CFGR 0x560
+#define RCC_HPDMA3CFGR 0x564
+#define RCC_IPCC1CFGR 0x570
+#define RCC_RTCCFGR 0x578
+#define RCC_SYSCPU1CFGR 0x580
+#define RCC_BSECCFGR 0x584
+#define RCC_PLL2CFGR1 0x590
+#define RCC_PLL2CFGR2 0x594
+#define RCC_PLL2CFGR3 0x598
+#define RCC_PLL2CFGR4 0x59C
+#define RCC_PLL2CFGR5 0x5A0
+#define RCC_PLL2CFGR6 0x5A8
+#define RCC_PLL2CFGR7 0x5AC
+#define RCC_HSIFMONCR 0x5E0
+#define RCC_HSIFVALR 0x5E4
+#define RCC_MSIFMONCR 0x5E8
+#define RCC_MSIFVALR 0x5EC
+#define RCC_TIM1CFGR 0x700
+#define RCC_TIM2CFGR 0x704
+#define RCC_TIM3CFGR 0x708
+#define RCC_TIM4CFGR 0x70C
+#define RCC_TIM5CFGR 0x710
+#define RCC_TIM6CFGR 0x714
+#define RCC_TIM7CFGR 0x718
+#define RCC_TIM8CFGR 0x71C
+#define RCC_TIM10CFGR 0x720
+#define RCC_TIM11CFGR 0x724
+#define RCC_TIM12CFGR 0x728
+#define RCC_TIM13CFGR 0x72C
+#define RCC_TIM14CFGR 0x730
+#define RCC_TIM15CFGR 0x734
+#define RCC_TIM16CFGR 0x738
+#define RCC_TIM17CFGR 0x73C
+#define RCC_LPTIM1CFGR 0x744
+#define RCC_LPTIM2CFGR 0x748
+#define RCC_LPTIM3CFGR 0x74C
+#define RCC_LPTIM4CFGR 0x750
+#define RCC_LPTIM5CFGR 0x754
+#define RCC_SPI1CFGR 0x758
+#define RCC_SPI2CFGR 0x75C
+#define RCC_SPI3CFGR 0x760
+#define RCC_SPI4CFGR 0x764
+#define RCC_SPI5CFGR 0x768
+#define RCC_SPI6CFGR 0x76C
+#define RCC_SPDIFRXCFGR 0x778
+#define RCC_USART1CFGR 0x77C
+#define RCC_USART2CFGR 0x780
+#define RCC_USART3CFGR 0x784
+#define RCC_UART4CFGR 0x788
+#define RCC_UART5CFGR 0x78C
+#define RCC_USART6CFGR 0x790
+#define RCC_UART7CFGR 0x794
+#define RCC_LPUART1CFGR 0x7A0
+#define RCC_I2C1CFGR 0x7A4
+#define RCC_I2C2CFGR 0x7A8
+#define RCC_I2C3CFGR 0x7AC
+#define RCC_SAI1CFGR 0x7C4
+#define RCC_SAI2CFGR 0x7C8
+#define RCC_SAI3CFGR 0x7CC
+#define RCC_SAI4CFGR 0x7D0
+#define RCC_MDF1CFGR 0x7D8
+#define RCC_FDCANCFGR 0x7E0
+#define RCC_HDPCFGR 0x7E4
+#define RCC_ADC1CFGR 0x7E8
+#define RCC_ADC2CFGR 0x7EC
+#define RCC_ETH1CFGR 0x7F0
+#define RCC_ETH2CFGR 0x7F4
+#define RCC_USBHCFGR 0x7FC
+#define RCC_USB2PHY1CFGR 0x800
+#define RCC_OTGCFGR 0x808
+#define RCC_USB2PHY2CFGR 0x80C
+#define RCC_STGENCFGR 0x824
+#define RCC_SDMMC1CFGR 0x830
+#define RCC_SDMMC2CFGR 0x834
+#define RCC_SDMMC3CFGR 0x838
+#define RCC_LTDCCFGR 0x840
+#define RCC_CSICFGR 0x858
+#define RCC_DCMIPPCFGR 0x85C
+#define RCC_DCMIPSSICFGR 0x860
+#define RCC_RNG1CFGR 0x870
+#define RCC_RNG2CFGR 0x874
+#define RCC_PKACFGR 0x878
+#define RCC_SAESCFGR 0x87C
+#define RCC_HASH1CFGR 0x880
+#define RCC_HASH2CFGR 0x884
+#define RCC_CRYP1CFGR 0x888
+#define RCC_CRYP2CFGR 0x88C
+#define RCC_IWDG1CFGR 0x894
+#define RCC_IWDG2CFGR 0x898
+#define RCC_IWDG3CFGR 0x89C
+#define RCC_IWDG4CFGR 0x8A0
+#define RCC_WWDG1CFGR 0x8A4
+#define RCC_VREFCFGR 0x8AC
+#define RCC_DTSCFGR 0x8B0
+#define RCC_CRCCFGR 0x8B4
+#define RCC_SERCCFGR 0x8B8
+#define RCC_DDRPERFMCFGR 0x8C0
+#define RCC_I3C1CFGR 0x8C8
+#define RCC_I3C2CFGR 0x8CC
+#define RCC_I3C3CFGR 0x8D0
+#define RCC_MUXSELCFGR 0x1000
+#define RCC_XBAR0CFGR 0x1018
+#define RCC_XBAR1CFGR 0x101C
+#define RCC_XBAR2CFGR 0x1020
+#define RCC_XBAR3CFGR 0x1024
+#define RCC_XBAR4CFGR 0x1028
+#define RCC_XBAR5CFGR 0x102C
+#define RCC_XBAR6CFGR 0x1030
+#define RCC_XBAR7CFGR 0x1034
+#define RCC_XBAR8CFGR 0x1038
+#define RCC_XBAR9CFGR 0x103C
+#define RCC_XBAR10CFGR 0x1040
+#define RCC_XBAR11CFGR 0x1044
+#define RCC_XBAR12CFGR 0x1048
+#define RCC_XBAR13CFGR 0x104C
+#define RCC_XBAR14CFGR 0x1050
+#define RCC_XBAR15CFGR 0x1054
+#define RCC_XBAR16CFGR 0x1058
+#define RCC_XBAR17CFGR 0x105C
+#define RCC_XBAR18CFGR 0x1060
+#define RCC_XBAR19CFGR 0x1064
+#define RCC_XBAR20CFGR 0x1068
+#define RCC_XBAR21CFGR 0x106C
+#define RCC_XBAR22CFGR 0x1070
+#define RCC_XBAR23CFGR 0x1074
+#define RCC_XBAR24CFGR 0x1078
+#define RCC_XBAR25CFGR 0x107C
+#define RCC_XBAR26CFGR 0x1080
+#define RCC_XBAR27CFGR 0x1084
+#define RCC_XBAR28CFGR 0x1088
+#define RCC_XBAR29CFGR 0x108C
+#define RCC_XBAR30CFGR 0x1090
+#define RCC_XBAR31CFGR 0x1094
+#define RCC_XBAR32CFGR 0x1098
+#define RCC_XBAR33CFGR 0x109C
+#define RCC_XBAR34CFGR 0x10A0
+#define RCC_XBAR35CFGR 0x10A4
+#define RCC_XBAR36CFGR 0x10A8
+#define RCC_XBAR37CFGR 0x10AC
+#define RCC_XBAR38CFGR 0x10B0
+#define RCC_XBAR39CFGR 0x10B4
+#define RCC_XBAR40CFGR 0x10B8
+#define RCC_XBAR41CFGR 0x10BC
+#define RCC_XBAR42CFGR 0x10C0
+#define RCC_XBAR43CFGR 0x10C4
+#define RCC_XBAR44CFGR 0x10C8
+#define RCC_XBAR45CFGR 0x10CC
+#define RCC_XBAR46CFGR 0x10D0
+#define RCC_XBAR47CFGR 0x10D4
+#define RCC_XBAR48CFGR 0x10D8
+#define RCC_XBAR49CFGR 0x10DC
+#define RCC_XBAR50CFGR 0x10E0
+#define RCC_XBAR51CFGR 0x10E4
+#define RCC_XBAR52CFGR 0x10E8
+#define RCC_XBAR53CFGR 0x10EC
+#define RCC_XBAR54CFGR 0x10F0
+#define RCC_XBAR55CFGR 0x10F4
+#define RCC_XBAR56CFGR 0x10F8
+#define RCC_XBAR57CFGR 0x10FC
+#define RCC_XBAR58CFGR 0x1100
+#define RCC_XBAR59CFGR 0x1104
+#define RCC_XBAR60CFGR 0x1108
+#define RCC_XBAR61CFGR 0x110C
+#define RCC_XBAR62CFGR 0x1110
+#define RCC_XBAR63CFGR 0x1114
+#define RCC_PREDIV0CFGR 0x1118
+#define RCC_PREDIV1CFGR 0x111C
+#define RCC_PREDIV2CFGR 0x1120
+#define RCC_PREDIV3CFGR 0x1124
+#define RCC_PREDIV4CFGR 0x1128
+#define RCC_PREDIV5CFGR 0x112C
+#define RCC_PREDIV6CFGR 0x1130
+#define RCC_PREDIV7CFGR 0x1134
+#define RCC_PREDIV8CFGR 0x1138
+#define RCC_PREDIV9CFGR 0x113C
+#define RCC_PREDIV10CFGR 0x1140
+#define RCC_PREDIV11CFGR 0x1144
+#define RCC_PREDIV12CFGR 0x1148
+#define RCC_PREDIV13CFGR 0x114C
+#define RCC_PREDIV14CFGR 0x1150
+#define RCC_PREDIV15CFGR 0x1154
+#define RCC_PREDIV16CFGR 0x1158
+#define RCC_PREDIV17CFGR 0x115C
+#define RCC_PREDIV18CFGR 0x1160
+#define RCC_PREDIV19CFGR 0x1164
+#define RCC_PREDIV20CFGR 0x1168
+#define RCC_PREDIV21CFGR 0x116C
+#define RCC_PREDIV22CFGR 0x1170
+#define RCC_PREDIV23CFGR 0x1174
+#define RCC_PREDIV24CFGR 0x1178
+#define RCC_PREDIV25CFGR 0x117C
+#define RCC_PREDIV26CFGR 0x1180
+#define RCC_PREDIV27CFGR 0x1184
+#define RCC_PREDIV28CFGR 0x1188
+#define RCC_PREDIV29CFGR 0x118C
+#define RCC_PREDIV30CFGR 0x1190
+#define RCC_PREDIV31CFGR 0x1194
+#define RCC_PREDIV32CFGR 0x1198
+#define RCC_PREDIV33CFGR 0x119C
+#define RCC_PREDIV34CFGR 0x11A0
+#define RCC_PREDIV35CFGR 0x11A4
+#define RCC_PREDIV36CFGR 0x11A8
+#define RCC_PREDIV37CFGR 0x11AC
+#define RCC_PREDIV38CFGR 0x11B0
+#define RCC_PREDIV39CFGR 0x11B4
+#define RCC_PREDIV40CFGR 0x11B8
+#define RCC_PREDIV41CFGR 0x11BC
+#define RCC_PREDIV42CFGR 0x11C0
+#define RCC_PREDIV43CFGR 0x11C4
+#define RCC_PREDIV44CFGR 0x11C8
+#define RCC_PREDIV45CFGR 0x11CC
+#define RCC_PREDIV46CFGR 0x11D0
+#define RCC_PREDIV47CFGR 0x11D4
+#define RCC_PREDIV48CFGR 0x11D8
+#define RCC_PREDIV49CFGR 0x11DC
+#define RCC_PREDIV50CFGR 0x11E0
+#define RCC_PREDIV51CFGR 0x11E4
+#define RCC_PREDIV52CFGR 0x11E8
+#define RCC_PREDIV53CFGR 0x11EC
+#define RCC_PREDIV54CFGR 0x11F0
+#define RCC_PREDIV55CFGR 0x11F4
+#define RCC_PREDIV56CFGR 0x11F8
+#define RCC_PREDIV57CFGR 0x11FC
+#define RCC_PREDIV58CFGR 0x1200
+#define RCC_PREDIV59CFGR 0x1204
+#define RCC_PREDIV60CFGR 0x1208
+#define RCC_PREDIV61CFGR 0x120C
+#define RCC_PREDIV62CFGR 0x1210
+#define RCC_PREDIV63CFGR 0x1214
+#define RCC_PREDIVSR1 0x1218
+#define RCC_PREDIVSR2 0x121C
+#define RCC_FINDIV0CFGR 0x1224
+#define RCC_FINDIV1CFGR 0x1228
+#define RCC_FINDIV2CFGR 0x122C
+#define RCC_FINDIV3CFGR 0x1230
+#define RCC_FINDIV4CFGR 0x1234
+#define RCC_FINDIV5CFGR 0x1238
+#define RCC_FINDIV6CFGR 0x123C
+#define RCC_FINDIV7CFGR 0x1240
+#define RCC_FINDIV8CFGR 0x1244
+#define RCC_FINDIV9CFGR 0x1248
+#define RCC_FINDIV10CFGR 0x124C
+#define RCC_FINDIV11CFGR 0x1250
+#define RCC_FINDIV12CFGR 0x1254
+#define RCC_FINDIV13CFGR 0x1258
+#define RCC_FINDIV14CFGR 0x125C
+#define RCC_FINDIV15CFGR 0x1260
+#define RCC_FINDIV16CFGR 0x1264
+#define RCC_FINDIV17CFGR 0x1268
+#define RCC_FINDIV18CFGR 0x126C
+#define RCC_FINDIV19CFGR 0x1270
+#define RCC_FINDIV20CFGR 0x1274
+#define RCC_FINDIV21CFGR 0x1278
+#define RCC_FINDIV22CFGR 0x127C
+#define RCC_FINDIV23CFGR 0x1280
+#define RCC_FINDIV24CFGR 0x1284
+#define RCC_FINDIV25CFGR 0x1288
+#define RCC_FINDIV26CFGR 0x128C
+#define RCC_FINDIV27CFGR 0x1290
+#define RCC_FINDIV28CFGR 0x1294
+#define RCC_FINDIV29CFGR 0x1298
+#define RCC_FINDIV30CFGR 0x129C
+#define RCC_FINDIV31CFGR 0x12A0
+#define RCC_FINDIV32CFGR 0x12A4
+#define RCC_FINDIV33CFGR 0x12A8
+#define RCC_FINDIV34CFGR 0x12AC
+#define RCC_FINDIV35CFGR 0x12B0
+#define RCC_FINDIV36CFGR 0x12B4
+#define RCC_FINDIV37CFGR 0x12B8
+#define RCC_FINDIV38CFGR 0x12BC
+#define RCC_FINDIV39CFGR 0x12C0
+#define RCC_FINDIV40CFGR 0x12C4
+#define RCC_FINDIV41CFGR 0x12C8
+#define RCC_FINDIV42CFGR 0x12CC
+#define RCC_FINDIV43CFGR 0x12D0
+#define RCC_FINDIV44CFGR 0x12D4
+#define RCC_FINDIV45CFGR 0x12D8
+#define RCC_FINDIV46CFGR 0x12DC
+#define RCC_FINDIV47CFGR 0x12E0
+#define RCC_FINDIV48CFGR 0x12E4
+#define RCC_FINDIV49CFGR 0x12E8
+#define RCC_FINDIV50CFGR 0x12EC
+#define RCC_FINDIV51CFGR 0x12F0
+#define RCC_FINDIV52CFGR 0x12F4
+#define RCC_FINDIV53CFGR 0x12F8
+#define RCC_FINDIV54CFGR 0x12FC
+#define RCC_FINDIV55CFGR 0x1300
+#define RCC_FINDIV56CFGR 0x1304
+#define RCC_FINDIV57CFGR 0x1308
+#define RCC_FINDIV58CFGR 0x130C
+#define RCC_FINDIV59CFGR 0x1310
+#define RCC_FINDIV60CFGR 0x1314
+#define RCC_FINDIV61CFGR 0x1318
+#define RCC_FINDIV62CFGR 0x131C
+#define RCC_FINDIV63CFGR 0x1320
+#define RCC_FINDIVSR1 0x1324
+#define RCC_FINDIVSR2 0x1328
+#define RCC_FCALCOBS0CFGR 0x1340
+#define RCC_FCALCOBS1CFGR 0x1344
+#define RCC_FCALCREFCFGR 0x1348
+#define RCC_FCALCCR1 0x134C
+#define RCC_FCALCCR2 0x1354
+#define RCC_FCALCSR 0x1358
+#define RCC_PLL4CFGR1 0x1360
+#define RCC_PLL4CFGR2 0x1364
+#define RCC_PLL4CFGR3 0x1368
+#define RCC_PLL4CFGR4 0x136C
+#define RCC_PLL4CFGR5 0x1370
+#define RCC_PLL4CFGR6 0x1378
+#define RCC_PLL4CFGR7 0x137C
+#define RCC_PLL5CFGR1 0x1388
+#define RCC_PLL5CFGR2 0x138C
+#define RCC_PLL5CFGR3 0x1390
+#define RCC_PLL5CFGR4 0x1394
+#define RCC_PLL5CFGR5 0x1398
+#define RCC_PLL5CFGR6 0x13A0
+#define RCC_PLL5CFGR7 0x13A4
+#define RCC_PLL6CFGR1 0x13B0
+#define RCC_PLL6CFGR2 0x13B4
+#define RCC_PLL6CFGR3 0x13B8
+#define RCC_PLL6CFGR4 0x13BC
+#define RCC_PLL6CFGR5 0x13C0
+#define RCC_PLL6CFGR6 0x13C8
+#define RCC_PLL6CFGR7 0x13CC
+#define RCC_PLL7CFGR1 0x13D8
+#define RCC_PLL7CFGR2 0x13DC
+#define RCC_PLL7CFGR3 0x13E0
+#define RCC_PLL7CFGR4 0x13E4
+#define RCC_PLL7CFGR5 0x13E8
+#define RCC_PLL7CFGR6 0x13F0
+#define RCC_PLL7CFGR7 0x13F4
+#define RCC_PLL8CFGR1 0x1400
+#define RCC_PLL8CFGR2 0x1404
+#define RCC_PLL8CFGR3 0x1408
+#define RCC_PLL8CFGR4 0x140C
+#define RCC_PLL8CFGR5 0x1410
+#define RCC_PLL8CFGR6 0x1418
+#define RCC_PLL8CFGR7 0x141C
+#define RCC_VERR 0xFFF4
+#define RCC_IDR 0xFFF8
+#define RCC_SIDR 0xFFFC
+
+#endif /* STM32MP21_RCC_H */
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index 8896fd052ef1..6af2d020e03e 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -57,6 +57,11 @@ config SUN55I_A523_CCU
default ARCH_SUNXI
depends on ARM64 || COMPILE_TEST
+config SUN55I_A523_MCU_CCU
+ tristate "Support for the Allwinner A523/T527 MCU CCU"
+ default ARCH_SUNXI
+ depends on ARM64 || COMPILE_TEST
+
config SUN55I_A523_R_CCU
tristate "Support for the Allwinner A523/T527 PRCM CCU"
default ARCH_SUNXI
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 82e471036de6..a1c4087d7241 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SUN50I_H6_CCU) += sun50i-h6-ccu.o
obj-$(CONFIG_SUN50I_H6_R_CCU) += sun50i-h6-r-ccu.o
obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
obj-$(CONFIG_SUN55I_A523_CCU) += sun55i-a523-ccu.o
+obj-$(CONFIG_SUN55I_A523_MCU_CCU) += sun55i-a523-mcu-ccu.o
obj-$(CONFIG_SUN55I_A523_R_CCU) += sun55i-a523-r-ccu.o
obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o
obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o
@@ -61,6 +62,7 @@ sun50i-h6-ccu-y += ccu-sun50i-h6.o
sun50i-h6-r-ccu-y += ccu-sun50i-h6-r.o
sun50i-h616-ccu-y += ccu-sun50i-h616.o
sun55i-a523-ccu-y += ccu-sun55i-a523.o
+sun55i-a523-mcu-ccu-y += ccu-sun55i-a523-mcu.o
sun55i-a523-r-ccu-y += ccu-sun55i-a523-r.o
sun4i-a10-ccu-y += ccu-sun4i-a10.o
sun5i-ccu-y += ccu-sun5i.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c
new file mode 100644
index 000000000000..197844f0fe4e
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-mcu.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2025 Chen-Yu Tsai <wens@csie.org>
+ *
+ * Based on the A523 CCU driver:
+ * Copyright (C) 2023-2024 Arm Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/sun55i-a523-mcu-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-mcu-ccu.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nm.h"
+
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc" }
+};
+
+static const struct clk_parent_data ahb[] = {
+ { .fw_name = "r-ahb" }
+};
+
+static const struct clk_parent_data apb[] = {
+ { .fw_name = "r-apb0" }
+};
+
+#define SUN55I_A523_PLL_AUDIO1_REG 0x00c
+static struct ccu_sdm_setting pll_audio1_sdm_table[] = {
+ { .rate = 2167603200, .pattern = 0xa000a234, .m = 1, .n = 90 }, /* div2->22.5792 */
+ { .rate = 2359296000, .pattern = 0xa0009ba6, .m = 1, .n = 98 }, /* div2->24.576 */
+ { .rate = 1806336000, .pattern = 0xa000872b, .m = 1, .n = 75 }, /* div5->22.576 */
+};
+
+static struct ccu_nm pll_audio1_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 11),
+ .m = _SUNXI_CCU_DIV(1, 1),
+ .sdm = _SUNXI_CCU_SDM(pll_audio1_sdm_table, BIT(24),
+ 0x010, BIT(31)),
+ .min_rate = 180000000U,
+ .max_rate = 3500000000U,
+ .common = {
+ .reg = 0x00c,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio1",
+ osc24M, &ccu_nm_ops,
+ CLK_SET_RATE_GATE),
+ },
+};
+
+/*
+ * /2 and /5 dividers are actually programmable, but we just use the
+ * values from the BSP, since the audio PLL only needs to provide a
+ * couple clock rates. This also matches the names given in the manual.
+ */
+static const struct clk_hw *pll_audio1_div_parents[] = { &pll_audio1_clk.common.hw };
+static CLK_FIXED_FACTOR_HWS(pll_audio1_div2_clk, "pll-audio1-div2",
+ pll_audio1_div_parents, 2, 1,
+ CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_audio1_div5_clk, "pll-audio1-div5",
+ pll_audio1_div_parents, 5, 1,
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_M_WITH_GATE(audio_out_clk, "audio-out",
+ "pll-audio1-div2", 0x01c,
+ 0, 5, BIT(31), CLK_SET_RATE_PARENT);
+
+static const struct clk_parent_data dsp_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ /*
+ * The order of the following two parent is from the BSP code. It is
+ * the opposite in the manual. Testing with the DSP is required to
+ * figure out the real order.
+ */
+ { .hw = &pll_audio1_div5_clk.hw },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .fw_name = "dsp" },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(dsp_clk, "mcu-dsp", dsp_parents, 0x0020,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data i2s_parents[] = {
+ { .fw_name = "pll-audio0-4x" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s0_clk, "i2s0", i2s_parents, 0x02c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s1_clk, "i2s1", i2s_parents, 0x030,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s2_clk, "i2s2", i2s_parents, 0x034,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s3_clk, "i2s3", i2s_parents, 0x038,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static const struct clk_parent_data i2s3_asrc_parents[] = {
+ { .fw_name = "pll-periph0-300m" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(i2s3_asrc_clk, "i2s3-asrc",
+ i2s3_asrc_parents, 0x03c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_i2s0_clk, "bus-i2s0", apb, 0x040, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s1_clk, "bus-i2s1", apb, 0x040, BIT(1), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s2_clk, "bus-i2s2", apb, 0x040, BIT(2), 0);
+static SUNXI_CCU_GATE_DATA(bus_i2s3_clk, "bus-i2s3", apb, 0x040, BIT(3), 0);
+
+static const struct clk_parent_data audio_parents[] = {
+ { .fw_name = "pll-audio0-4x" },
+ { .hw = &pll_audio1_div2_clk.hw },
+ { .hw = &pll_audio1_div5_clk.hw },
+};
+static SUNXI_CCU_DUALDIV_MUX_GATE(spdif_tx_clk, "spdif-tx",
+ audio_parents, 0x044,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(spdif_rx_clk, "spdif-rx",
+ i2s3_asrc_parents, 0x048,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_spdif_clk, "bus-spdif", apb, 0x04c, BIT(0), 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(dmic_clk, "dmic", audio_parents, 0x050,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_dmic_clk, "bus-dmic", apb, 0x054, BIT(0), 0);
+
+static SUNXI_CCU_DUALDIV_MUX_GATE(audio_dac_clk, "audio-dac",
+ audio_parents, 0x058,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+static SUNXI_CCU_DUALDIV_MUX_GATE(audio_adc_clk, "audio-adc",
+ audio_parents, 0x05c,
+ 0, 5, /* M */
+ 5, 5, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_DATA(bus_audio_codec_clk, "bus-audio-codec",
+ apb, 0x060, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(bus_dsp_msgbox_clk, "bus-dsp-msgbox",
+ ahb, 0x068, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_dsp_cfg_clk, "bus-dsp-cfg",
+ apb, 0x06c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(bus_npu_hclk, "bus-npu-hclk", ahb, 0x070, BIT(1), 0);
+static SUNXI_CCU_GATE_DATA(bus_npu_aclk, "bus-npu-aclk", ahb, 0x070, BIT(2), 0);
+
+static const struct clk_parent_data timer_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "r-ahb" }
+};
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer0_clk, "mcu-timer0", timer_parents,
+ 0x074,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer1_clk, "mcu-timer1", timer_parents,
+ 0x078,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer2_clk, "mcu-timer2", timer_parents,
+ 0x07c,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer3_clk, "mcu-timer3", timer_parents,
+ 0x080,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer4_clk, "mcu-timer4", timer_parents,
+ 0x084,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_P_DATA_WITH_MUX_GATE(mcu_timer5_clk, "mcu-timer5", timer_parents,
+ 0x088,
+ 1, 3, /* P */
+ 4, 2, /* mux */
+ BIT(0), /* gate */
+ 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_timer_clk, "bus-mcu-timer", ahb, 0x08c, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_dma_clk, "bus-mcu-dma", ahb, 0x104, BIT(0), 0);
+/* tzma* only found in BSP code. */
+static SUNXI_CCU_GATE_DATA(tzma0_clk, "tzma0", ahb, 0x108, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(tzma1_clk, "tzma1", ahb, 0x10c, BIT(0), 0);
+/* parent is a guess as this block is not shown in the system bus tree diagram */
+static SUNXI_CCU_GATE_DATA(bus_pubsram_clk, "bus-pubsram", ahb, 0x114, BIT(0), 0);
+
+/*
+ * user manual has "mbus" clock as parent of both clocks below,
+ * but this makes more sense, since BSP MCU DMA controller has
+ * reference to both of them, likely needing both enabled.
+ */
+static SUNXI_CCU_GATE_FW(mbus_mcu_clk, "mbus-mcu", "mbus", 0x11c, BIT(1), 0);
+static SUNXI_CCU_GATE_HW(mbus_mcu_dma_clk, "mbus-mcu-dma",
+ &mbus_mcu_clk.common.hw, 0x11c, BIT(0), 0);
+
+static const struct clk_parent_data riscv_pwm_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+};
+
+static SUNXI_CCU_MUX_DATA_WITH_GATE(riscv_clk, "riscv",
+ riscv_pwm_parents, 0x120,
+ 27, 3, BIT(31), 0);
+/* Parents are guesses as these two blocks are not shown in the system bus tree diagram */
+static SUNXI_CCU_GATE_DATA(bus_riscv_cfg_clk, "bus-riscv-cfg", ahb,
+ 0x124, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA(bus_riscv_msgbox_clk, "bus-riscv-msgbox", ahb,
+ 0x128, BIT(0), 0);
+
+static SUNXI_CCU_MUX_DATA_WITH_GATE(mcu_pwm0_clk, "mcu-pwm0",
+ riscv_pwm_parents, 0x130,
+ 24, 3, BIT(31), 0);
+static SUNXI_CCU_GATE_DATA(bus_mcu_pwm0_clk, "bus-mcu-pwm0", apb,
+ 0x134, BIT(0), 0);
+
+/*
+ * Contains all clocks that are controlled by a hardware register. They
+ * have a (sunxi) .common member, which needs to be initialised by the common
+ * sunxi CCU code, to be filled with the MMIO base address and the shared lock.
+ */
+static struct ccu_common *sun55i_a523_mcu_ccu_clks[] = {
+ &pll_audio1_clk.common,
+ &audio_out_clk.common,
+ &dsp_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &i2s3_clk.common,
+ &i2s3_asrc_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &bus_i2s3_clk.common,
+ &spdif_tx_clk.common,
+ &spdif_rx_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_dac_clk.common,
+ &audio_adc_clk.common,
+ &bus_audio_codec_clk.common,
+ &bus_dsp_msgbox_clk.common,
+ &bus_dsp_cfg_clk.common,
+ &bus_npu_aclk.common,
+ &bus_npu_hclk.common,
+ &mcu_timer0_clk.common,
+ &mcu_timer1_clk.common,
+ &mcu_timer2_clk.common,
+ &mcu_timer3_clk.common,
+ &mcu_timer4_clk.common,
+ &mcu_timer5_clk.common,
+ &bus_mcu_timer_clk.common,
+ &bus_mcu_dma_clk.common,
+ &tzma0_clk.common,
+ &tzma1_clk.common,
+ &bus_pubsram_clk.common,
+ &mbus_mcu_dma_clk.common,
+ &mbus_mcu_clk.common,
+ &riscv_clk.common,
+ &bus_riscv_cfg_clk.common,
+ &bus_riscv_msgbox_clk.common,
+ &mcu_pwm0_clk.common,
+ &bus_mcu_pwm0_clk.common,
+};
+
+static struct clk_hw_onecell_data sun55i_a523_mcu_hw_clks = {
+ .hws = {
+ [CLK_MCU_PLL_AUDIO1] = &pll_audio1_clk.common.hw,
+ [CLK_MCU_PLL_AUDIO1_DIV2] = &pll_audio1_div2_clk.hw,
+ [CLK_MCU_PLL_AUDIO1_DIV5] = &pll_audio1_div5_clk.hw,
+ [CLK_MCU_AUDIO_OUT] = &audio_out_clk.common.hw,
+ [CLK_MCU_DSP] = &dsp_clk.common.hw,
+ [CLK_MCU_I2S0] = &i2s0_clk.common.hw,
+ [CLK_MCU_I2S1] = &i2s1_clk.common.hw,
+ [CLK_MCU_I2S2] = &i2s2_clk.common.hw,
+ [CLK_MCU_I2S3] = &i2s3_clk.common.hw,
+ [CLK_MCU_I2S3_ASRC] = &i2s3_asrc_clk.common.hw,
+ [CLK_BUS_MCU_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_MCU_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_MCU_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_BUS_MCU_I2S3] = &bus_i2s3_clk.common.hw,
+ [CLK_MCU_SPDIF_TX] = &spdif_tx_clk.common.hw,
+ [CLK_MCU_SPDIF_RX] = &spdif_rx_clk.common.hw,
+ [CLK_BUS_MCU_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_MCU_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_MCU_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_MCU_AUDIO_CODEC_DAC] = &audio_dac_clk.common.hw,
+ [CLK_MCU_AUDIO_CODEC_ADC] = &audio_adc_clk.common.hw,
+ [CLK_BUS_MCU_AUDIO_CODEC] = &bus_audio_codec_clk.common.hw,
+ [CLK_BUS_MCU_DSP_MSGBOX] = &bus_dsp_msgbox_clk.common.hw,
+ [CLK_BUS_MCU_DSP_CFG] = &bus_dsp_cfg_clk.common.hw,
+ [CLK_BUS_MCU_NPU_HCLK] = &bus_npu_hclk.common.hw,
+ [CLK_BUS_MCU_NPU_ACLK] = &bus_npu_aclk.common.hw,
+ [CLK_MCU_TIMER0] = &mcu_timer0_clk.common.hw,
+ [CLK_MCU_TIMER1] = &mcu_timer1_clk.common.hw,
+ [CLK_MCU_TIMER2] = &mcu_timer2_clk.common.hw,
+ [CLK_MCU_TIMER3] = &mcu_timer3_clk.common.hw,
+ [CLK_MCU_TIMER4] = &mcu_timer4_clk.common.hw,
+ [CLK_MCU_TIMER5] = &mcu_timer5_clk.common.hw,
+ [CLK_BUS_MCU_TIMER] = &bus_mcu_timer_clk.common.hw,
+ [CLK_BUS_MCU_DMA] = &bus_mcu_dma_clk.common.hw,
+ [CLK_MCU_TZMA0] = &tzma0_clk.common.hw,
+ [CLK_MCU_TZMA1] = &tzma1_clk.common.hw,
+ [CLK_BUS_MCU_PUBSRAM] = &bus_pubsram_clk.common.hw,
+ [CLK_MCU_MBUS_DMA] = &mbus_mcu_dma_clk.common.hw,
+ [CLK_MCU_MBUS] = &mbus_mcu_clk.common.hw,
+ [CLK_MCU_RISCV] = &riscv_clk.common.hw,
+ [CLK_BUS_MCU_RISCV_CFG] = &bus_riscv_cfg_clk.common.hw,
+ [CLK_BUS_MCU_RISCV_MSGBOX] = &bus_riscv_msgbox_clk.common.hw,
+ [CLK_MCU_PWM0] = &mcu_pwm0_clk.common.hw,
+ [CLK_BUS_MCU_PWM0] = &bus_mcu_pwm0_clk.common.hw,
+ },
+ .num = CLK_BUS_MCU_PWM0 + 1,
+};
+
+static struct ccu_reset_map sun55i_a523_mcu_ccu_resets[] = {
+ [RST_BUS_MCU_I2S0] = { 0x0040, BIT(16) },
+ [RST_BUS_MCU_I2S1] = { 0x0040, BIT(17) },
+ [RST_BUS_MCU_I2S2] = { 0x0040, BIT(18) },
+ [RST_BUS_MCU_I2S3] = { 0x0040, BIT(19) },
+ [RST_BUS_MCU_SPDIF] = { 0x004c, BIT(16) },
+ [RST_BUS_MCU_DMIC] = { 0x0054, BIT(16) },
+ [RST_BUS_MCU_AUDIO_CODEC] = { 0x0060, BIT(16) },
+ [RST_BUS_MCU_DSP_MSGBOX] = { 0x0068, BIT(16) },
+ [RST_BUS_MCU_DSP_CFG] = { 0x006c, BIT(16) },
+ [RST_BUS_MCU_NPU] = { 0x0070, BIT(16) },
+ [RST_BUS_MCU_TIMER] = { 0x008c, BIT(16) },
+ /* dsp and dsp_debug resets only found in BSP code. */
+ [RST_BUS_MCU_DSP_DEBUG] = { 0x0100, BIT(16) },
+ [RST_BUS_MCU_DSP] = { 0x0100, BIT(17) },
+ [RST_BUS_MCU_DMA] = { 0x0104, BIT(16) },
+ [RST_BUS_MCU_PUBSRAM] = { 0x0114, BIT(16) },
+ [RST_BUS_MCU_RISCV_CFG] = { 0x0124, BIT(16) },
+ [RST_BUS_MCU_RISCV_DEBUG] = { 0x0124, BIT(17) },
+ [RST_BUS_MCU_RISCV_CORE] = { 0x0124, BIT(18) },
+ [RST_BUS_MCU_RISCV_MSGBOX] = { 0x0128, BIT(16) },
+ [RST_BUS_MCU_PWM0] = { 0x0134, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun55i_a523_mcu_ccu_desc = {
+ .ccu_clks = sun55i_a523_mcu_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun55i_a523_mcu_ccu_clks),
+
+ .hw_clks = &sun55i_a523_mcu_hw_clks,
+
+ .resets = sun55i_a523_mcu_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun55i_a523_mcu_ccu_resets),
+};
+
+static int sun55i_a523_mcu_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ val = readl(reg + SUN55I_A523_PLL_AUDIO1_REG);
+
+ /*
+ * The PLL clock code does not model all bits, for instance it does
+ * not support a separate enable and gate bit. We present the
+ * gate bit(27) as the enable bit, but then have to set the
+ * PLL Enable, LDO Enable, and Lock Enable bits on all PLLs here.
+ */
+ val |= BIT(31) | BIT(30) | BIT(29);
+
+ /* Enforce p1 = 5, p0 = 2 (the default) for PLL_AUDIO1 */
+ val &= ~(GENMASK(22, 20) | GENMASK(18, 16));
+ val |= (4 << 20) | (1 << 16);
+
+ writel(val, reg + SUN55I_A523_PLL_AUDIO1_REG);
+
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun55i_a523_mcu_ccu_desc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id sun55i_a523_mcu_ccu_ids[] = {
+ { .compatible = "allwinner,sun55i-a523-mcu-ccu" },
+ { }
+};
+
+static struct platform_driver sun55i_a523_mcu_ccu_driver = {
+ .probe = sun55i_a523_mcu_ccu_probe,
+ .driver = {
+ .name = "sun55i-a523-mcu-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun55i_a523_mcu_ccu_ids,
+ },
+};
+module_platform_driver(sun55i_a523_mcu_ccu_driver);
+
+MODULE_IMPORT_NS("SUNXI_CCU");
+MODULE_DESCRIPTION("Support for the Allwinner A523 MCU CCU");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
index 70ce0ca0cb7d..0339c4af0fe5 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523-r.c
@@ -121,11 +121,11 @@ static SUNXI_CCU_GATE_HW(bus_r_ir_rx_clk, "bus-r-ir-rx",
&r_apb0_clk.common.hw, 0x1cc, BIT(0), 0);
static SUNXI_CCU_GATE_HW(bus_r_dma_clk, "bus-r-dma",
- &r_apb0_clk.common.hw, 0x1dc, BIT(0), 0);
+ &r_apb0_clk.common.hw, 0x1dc, BIT(0), CLK_IS_CRITICAL);
static SUNXI_CCU_GATE_HW(bus_r_rtc_clk, "bus-r-rtc",
&r_apb0_clk.common.hw, 0x20c, BIT(0), 0);
static SUNXI_CCU_GATE_HW(bus_r_cpucfg_clk, "bus-r-cpucfg",
- &r_apb0_clk.common.hw, 0x22c, BIT(0), 0);
+ &r_apb0_clk.common.hw, 0x22c, BIT(0), CLK_IS_CRITICAL);
static struct ccu_common *sun55i_a523_r_ccu_clks[] = {
&r_ahb_clk.common,
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
index 1a9a1cb869e2..20dad06b37ca 100644
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
+++ b/drivers/clk/sunxi-ng/ccu-sun55i-a523.c
@@ -11,6 +11,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <dt-bindings/clock/sun55i-a523-ccu.h>
+#include <dt-bindings/reset/sun55i-a523-ccu.h>
+
#include "../clk.h"
#include "ccu_common.h"
@@ -25,8 +28,6 @@
#include "ccu_nkmp.h"
#include "ccu_nm.h"
-#include "ccu-sun55i-a523.h"
-
/*
* The 24 MHz oscillator, the root of most of the clock tree.
* .fw_name is the string used in the DT "clock-names" property, used to
@@ -299,7 +300,7 @@ static struct ccu_nm pll_audio0_4x_clk = {
.m = _SUNXI_CCU_DIV(16, 6),
.sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
0x178, BIT(31)),
- .min_rate = 180000000U,
+ .min_rate = 90000000U,
.max_rate = 3000000000U,
.common = {
.reg = 0x078,
@@ -486,6 +487,18 @@ static SUNXI_CCU_M_HW_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
static SUNXI_CCU_GATE_HWS(bus_ve_clk, "bus-ve", ahb_hws, 0x69c, BIT(0), 0);
+static const struct clk_hw *npu_parents[] = {
+ &pll_periph0_480M_clk.common.hw,
+ &pll_periph0_600M_clk.hw,
+ &pll_periph0_800M_clk.common.hw,
+ &pll_npu_2x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(npu_clk, "npu", npu_parents, 0x6e0,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
static SUNXI_CCU_GATE_HWS(bus_dma_clk, "bus-dma", ahb_hws, 0x70c, BIT(0), 0);
static SUNXI_CCU_GATE_HWS(bus_msgbox_clk, "bus-msgbox", ahb_hws, 0x71c,
@@ -1217,6 +1230,7 @@ static struct ccu_common *sun55i_a523_ccu_clks[] = {
&bus_ce_sys_clk.common,
&ve_clk.common,
&bus_ve_clk.common,
+ &npu_clk.common,
&bus_dma_clk.common,
&bus_msgbox_clk.common,
&bus_spinlock_clk.common,
@@ -1343,7 +1357,6 @@ static struct ccu_common *sun55i_a523_ccu_clks[] = {
};
static struct clk_hw_onecell_data sun55i_a523_hw_clks = {
- .num = CLK_NUMBER,
.hws = {
[CLK_PLL_DDR0] = &pll_ddr_clk.common.hw,
[CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.common.hw,
@@ -1524,7 +1537,9 @@ static struct clk_hw_onecell_data sun55i_a523_hw_clks = {
[CLK_FANOUT0] = &fanout0_clk.common.hw,
[CLK_FANOUT1] = &fanout1_clk.common.hw,
[CLK_FANOUT2] = &fanout2_clk.common.hw,
+ [CLK_NPU] = &npu_clk.common.hw,
},
+ .num = CLK_NPU + 1,
};
static struct ccu_reset_map sun55i_a523_ccu_resets[] = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun55i-a523.h b/drivers/clk/sunxi-ng/ccu-sun55i-a523.h
deleted file mode 100644
index fc8dd42f1b47..000000000000
--- a/drivers/clk/sunxi-ng/ccu-sun55i-a523.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2024 Arm Ltd.
- */
-
-#ifndef _CCU_SUN55I_A523_H
-#define _CCU_SUN55I_A523_H
-
-#include <dt-bindings/clock/sun55i-a523-ccu.h>
-#include <dt-bindings/reset/sun55i-a523-ccu.h>
-
-#define CLK_NUMBER (CLK_FANOUT2 + 1)
-
-#endif /* _CCU_SUN55I_A523_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
index 0536e880b80f..f6bfeba009e8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-rtc.c
@@ -325,6 +325,13 @@ static const struct sun6i_rtc_match_data sun50i_r329_rtc_ccu_data = {
.osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
};
+static const struct sun6i_rtc_match_data sun55i_a523_rtc_ccu_data = {
+ .have_ext_osc32k = true,
+ .have_iosc_calibration = true,
+ .osc32k_fanout_parents = sun50i_r329_osc32k_fanout_parents,
+ .osc32k_fanout_nparents = ARRAY_SIZE(sun50i_r329_osc32k_fanout_parents),
+};
+
static const struct of_device_id sun6i_rtc_ccu_match[] = {
{
.compatible = "allwinner,sun50i-h616-rtc",
@@ -334,6 +341,10 @@ static const struct of_device_id sun6i_rtc_ccu_match[] = {
.compatible = "allwinner,sun50i-r329-rtc",
.data = &sun50i_r329_rtc_ccu_data,
},
+ {
+ .compatible = "allwinner,sun55i-a523-rtc",
+ .data = &sun55i_a523_rtc_ccu_data,
+ },
{},
};
MODULE_DEVICE_TABLE(of, sun6i_rtc_ccu_match);
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 90d49ee8e0cc..be00b3277e97 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -274,6 +274,24 @@ struct ccu_div {
SUNXI_CCU_M_HWS_WITH_GATE(_struct, _name, _parent, _reg, \
_mshift, _mwidth, 0, _flags)
+#define SUNXI_CCU_P_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV_FLAGS(_mshift, _mwidth, \
+ CLK_DIVIDER_POWER_OF_TWO), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 354c981943b6..4221b1888b38 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -185,7 +185,7 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
p &= (1 << cmp->p.width) - 1;
if (cmp->common.features & CCU_FEATURE_DUAL_DIV)
- rate = (parent_rate / p) / m;
+ rate = (parent_rate / (p + cmp->p.offset)) / m;
else
rate = (parent_rate >> p) / m;
diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig
index 90df619dc087..62147a069606 100644
--- a/drivers/clk/tegra/Kconfig
+++ b/drivers/clk/tegra/Kconfig
@@ -4,7 +4,7 @@ config CLK_TEGRA_BPMP
depends on TEGRA_BPMP
config TEGRA_CLK_DFLL
- depends on ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
+ depends on ARCH_TEGRA_114_SOC || ARCH_TEGRA_124_SOC || ARCH_TEGRA_210_SOC
select PM_OPP
def_bool y
diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
index 2c4bb96eae16..468a4403f147 100644
--- a/drivers/clk/tegra/clk-audio-sync.c
+++ b/drivers/clk/tegra/clk-audio-sync.c
@@ -17,15 +17,15 @@ static unsigned long clk_sync_source_recalc_rate(struct clk_hw *hw,
return sync->rate;
}
-static long clk_sync_source_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_sync_source_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_sync_source *sync = to_clk_sync_source(hw);
- if (rate > sync->max_rate)
+ if (req->rate > sync->max_rate)
return -EINVAL;
else
- return rate;
+ return 0;
}
static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -38,7 +38,7 @@ static int clk_sync_source_set_rate(struct clk_hw *hw, unsigned long rate,
}
const struct clk_ops tegra_clk_sync_source_ops = {
- .round_rate = clk_sync_source_round_rate,
+ .determine_rate = clk_sync_source_determine_rate,
.set_rate = clk_sync_source_set_rate,
.recalc_rate = clk_sync_source_recalc_rate,
};
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index b2323cb8eddc..77a2586dbe00 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -635,7 +635,7 @@ static int tegra_bpmp_register_clocks(struct tegra_bpmp *bpmp,
bpmp->num_clocks = count;
- bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(struct tegra_bpmp_clk), GFP_KERNEL);
+ bpmp->clocks = devm_kcalloc(bpmp->dev, count, sizeof(*bpmp->clocks), GFP_KERNEL);
if (!bpmp->clocks)
return -ENOMEM;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index 58fa5a59e0c7..22dc29432eff 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -882,7 +882,7 @@ static void dfll_set_frequency_request(struct tegra_dfll *td,
{
u32 val = 0;
int force_val;
- int coef = 128; /* FIXME: td->cg_scale? */;
+ int coef = 128; /* FIXME: td->cg_scale? */
force_val = (req->lut_index - td->lut_safe) * coef / td->cg;
force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 38daf483ddf1..37439fcb3ac0 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -58,23 +58,31 @@ static unsigned long clk_frac_div_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_frac_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_frac_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_frac_div *divider = to_clk_frac_div(hw);
int div, mul;
- unsigned long output_rate = *prate;
+ unsigned long output_rate = req->best_parent_rate;
- if (!rate)
- return output_rate;
+ if (!req->rate) {
+ req->rate = output_rate;
- div = get_div(divider, rate, output_rate);
- if (div < 0)
- return *prate;
+ return 0;
+ }
+
+ div = get_div(divider, req->rate, output_rate);
+ if (div < 0) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
mul = get_mul(divider);
- return DIV_ROUND_UP(output_rate * mul, div + mul);
+ req->rate = DIV_ROUND_UP(output_rate * mul, div + mul);
+
+ return 0;
}
static int clk_frac_div_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -127,7 +135,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
const struct clk_ops tegra_clk_frac_div_ops = {
.recalc_rate = clk_frac_div_recalc_rate,
.set_rate = clk_frac_div_set_rate,
- .round_rate = clk_frac_div_round_rate,
+ .determine_rate = clk_frac_div_determine_rate,
.restore_context = clk_divider_restore_context,
};
diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c
index fa0cd7bb8ee6..6ebeaa7cb656 100644
--- a/drivers/clk/tegra/clk-periph.c
+++ b/drivers/clk/tegra/clk-periph.c
@@ -51,16 +51,10 @@ static int clk_periph_determine_rate(struct clk_hw *hw,
struct tegra_clk_periph *periph = to_clk_periph(hw);
const struct clk_ops *div_ops = periph->div_ops;
struct clk_hw *div_hw = &periph->divider.hw;
- long rate;
__clk_hw_set_clk(div_hw, hw);
- rate = div_ops->round_rate(div_hw, req->rate, &req->best_parent_rate);
- if (rate < 0)
- return rate;
-
- req->rate = (unsigned long)rate;
- return 0;
+ return div_ops->determine_rate(div_hw, req);
}
static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index 100b5d9b7e26..591b9f0c155a 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -840,8 +840,8 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
@@ -849,15 +849,20 @@ static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
if (pll->params->flags & TEGRA_PLL_FIXED) {
/* PLLM/MB are used for memory; we do not change rate */
if (pll->params->flags & (TEGRA_PLLM | TEGRA_PLLMB))
- return clk_hw_get_rate(hw);
- return pll->params->fixed_rate;
+ req->rate = clk_hw_get_rate(hw);
+ else
+ req->rate = pll->params->fixed_rate;
+
+ return 0;
}
- if (_get_table_rate(hw, &cfg, rate, *prate) &&
- pll->params->calc_rate(hw, &cfg, rate, *prate))
+ if (_get_table_rate(hw, &cfg, req->rate, req->best_parent_rate) &&
+ pll->params->calc_rate(hw, &cfg, req->rate, req->best_parent_rate))
return -EINVAL;
- return cfg.output_rate;
+ req->rate = cfg.output_rate;
+
+ return 0;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
@@ -1057,7 +1062,7 @@ const struct clk_ops tegra_clk_pll_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
.restore_context = tegra_clk_pll_restore_context,
};
@@ -1195,7 +1200,7 @@ static const struct clk_ops tegra_clk_pllu_ops = {
.enable = clk_pllu_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_round_rate,
+ .determine_rate = clk_pll_determine_rate,
.set_rate = clk_pll_set_rate,
};
@@ -1353,15 +1358,15 @@ static int clk_pllxc_set_rate(struct clk_hw *hw, unsigned long rate,
return ret;
}
-static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pll_ramp_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
int ret, p_div;
- u64 output_rate = *prate;
+ u64 output_rate = req->best_parent_rate;
- ret = _pll_ramp_calc_pll(hw, &cfg, rate, *prate);
+ ret = _pll_ramp_calc_pll(hw, &cfg, req->rate, req->best_parent_rate);
if (ret < 0)
return ret;
@@ -1375,7 +1380,9 @@ static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
output_rate *= cfg.n;
do_div(output_rate, cfg.m * p_div);
- return output_rate;
+ req->rate = output_rate;
+
+ return 0;
}
static void _pllcx_strobe(struct tegra_clk_pll *pll)
@@ -1598,12 +1605,15 @@ static unsigned long clk_pllre_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long clk_pllre_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_pllre_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- return _pllre_calc_rate(pll, NULL, rate, *prate);
+ req->rate = _pllre_calc_rate(pll, NULL, req->rate,
+ req->best_parent_rate);
+
+ return 0;
}
static int clk_plle_tegra114_enable(struct clk_hw *hw)
@@ -2003,7 +2013,7 @@ static const struct clk_ops tegra_clk_pllxc_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllxc_set_rate,
};
@@ -2012,7 +2022,7 @@ static const struct clk_ops tegra_clk_pllc_ops = {
.enable = clk_pllc_enable,
.disable = clk_pllc_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllc_set_rate,
};
@@ -2021,7 +2031,7 @@ static const struct clk_ops tegra_clk_pllre_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pllre_recalc_rate,
- .round_rate = clk_pllre_round_rate,
+ .determine_rate = clk_pllre_determine_rate,
.set_rate = clk_pllre_set_rate,
};
@@ -2321,7 +2331,7 @@ static const struct clk_ops tegra_clk_pllss_ops = {
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
- .round_rate = clk_pll_ramp_round_rate,
+ .determine_rate = clk_pll_ramp_determine_rate,
.set_rate = clk_pllxc_set_rate,
.restore_context = tegra_clk_pll_restore_context,
};
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 7ec47942720c..51fb356e770e 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -147,17 +147,10 @@ static int clk_super_determine_rate(struct clk_hw *hw,
{
struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
struct clk_hw *div_hw = &super->frac_div.hw;
- unsigned long rate;
__clk_hw_set_clk(div_hw, hw);
- rate = super->div_ops->round_rate(div_hw, req->rate,
- &req->best_parent_rate);
- if (rate < 0)
- return rate;
-
- req->rate = rate;
- return 0;
+ return super->div_ops->determine_rate(div_hw, req);
}
static unsigned long clk_super_recalc_rate(struct clk_hw *hw,
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 73303458e886..6c8e053311c3 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/clk/tegra.h>
#include <dt-bindings/clock/tegra114-car.h>
+#include <dt-bindings/reset/nvidia,tegra114-car.h>
#include "clk.h"
#include "clk-id.h"
@@ -1272,7 +1273,7 @@ EXPORT_SYMBOL(tegra114_clock_tune_cpu_trimmers_init);
*
* Assert the reset line of the DFLL's DVCO. No return value.
*/
-void tegra114_clock_assert_dfll_dvco_reset(void)
+static void tegra114_clock_assert_dfll_dvco_reset(void)
{
u32 v;
@@ -1281,7 +1282,6 @@ void tegra114_clock_assert_dfll_dvco_reset(void)
writel_relaxed(v, clk_base + RST_DFLL_DVCO);
tegra114_car_barrier();
}
-EXPORT_SYMBOL(tegra114_clock_assert_dfll_dvco_reset);
/**
* tegra114_clock_deassert_dfll_dvco_reset - deassert the DFLL's DVCO reset
@@ -1289,7 +1289,7 @@ EXPORT_SYMBOL(tegra114_clock_assert_dfll_dvco_reset);
* Deassert the reset line of the DFLL's DVCO, allowing the DVCO to
* operate. No return value.
*/
-void tegra114_clock_deassert_dfll_dvco_reset(void)
+static void tegra114_clock_deassert_dfll_dvco_reset(void)
{
u32 v;
@@ -1298,7 +1298,26 @@ void tegra114_clock_deassert_dfll_dvco_reset(void)
writel_relaxed(v, clk_base + RST_DFLL_DVCO);
tegra114_car_barrier();
}
-EXPORT_SYMBOL(tegra114_clock_deassert_dfll_dvco_reset);
+
+static int tegra114_reset_assert(unsigned long id)
+{
+ if (id == TEGRA114_RST_DFLL_DVCO)
+ tegra114_clock_assert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int tegra114_reset_deassert(unsigned long id)
+{
+ if (id == TEGRA114_RST_DFLL_DVCO)
+ tegra114_clock_deassert_dfll_dvco_reset();
+ else
+ return -EINVAL;
+
+ return 0;
+}
static void __init tegra114_clock_init(struct device_node *np)
{
@@ -1344,6 +1363,9 @@ static void __init tegra114_clock_init(struct device_node *np)
tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
&pll_x_params);
+ tegra_init_special_resets(1, tegra114_reset_assert,
+ tegra114_reset_deassert);
+
tegra_add_of_provider(np, of_clk_src_onecell_get);
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 0251618b82c8..457a77c5bb62 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -29,6 +29,99 @@ struct dfll_fcpu_data {
};
/* Maximum CPU frequency, indexed by CPU speedo id */
+static const unsigned long tegra114_cpu_max_freq_table[] = {
+ [0] = 2040000000UL,
+ [1] = 1810500000UL,
+ [2] = 1912500000UL,
+ [3] = 1810500000UL,
+};
+
+#define T114_CPU_CVB_TABLE \
+ .min_millivolts = 1000, \
+ .max_millivolts = 1320, \
+ .speedo_scale = 100, \
+ .voltage_scale = 1000, \
+ .entries = { \
+ { 306000000UL, { 2190643, -141851, 3576 } }, \
+ { 408000000UL, { 2250968, -144331, 3576 } }, \
+ { 510000000UL, { 2313333, -146811, 3576 } }, \
+ { 612000000UL, { 2377738, -149291, 3576 } }, \
+ { 714000000UL, { 2444183, -151771, 3576 } }, \
+ { 816000000UL, { 2512669, -154251, 3576 } }, \
+ { 918000000UL, { 2583194, -156731, 3576 } }, \
+ { 1020000000UL, { 2655759, -159211, 3576 } }, \
+ { 1122000000UL, { 2730365, -161691, 3576 } }, \
+ { 1224000000UL, { 2807010, -164171, 3576 } }, \
+ { 1326000000UL, { 2885696, -166651, 3576 } }, \
+ { 1428000000UL, { 2966422, -169131, 3576 } }, \
+ { 1530000000UL, { 3049183, -171601, 3576 } }, \
+ { 1606500000UL, { 3112179, -173451, 3576 } }, \
+ { 1708500000UL, { 3198504, -175931, 3576 } }, \
+ { 1810500000UL, { 3304747, -179126, 3576 } }, \
+ { 1912500000UL, { 3395401, -181606, 3576 } }, \
+ { 0UL, { 0, 0, 0 } }, \
+ }, \
+ .cpu_dfll_data = { \
+ .tune0_low = 0x00b0039d, \
+ .tune0_high = 0x00b0009d, \
+ .tune1 = 0x0000001f, \
+ .tune_high_min_millivolts = 1050, \
+ }
+
+static const struct cvb_table tegra114_cpu_cvb_tables[] = {
+ {
+ .speedo_id = 0,
+ .process_id = -1,
+ .min_millivolts = 1000,
+ .max_millivolts = 1250,
+ .speedo_scale = 100,
+ .voltage_scale = 100,
+ .entries = {
+ { 306000000UL, { 107330, -1569, 0 } },
+ { 408000000UL, { 111250, -1666, 0 } },
+ { 510000000UL, { 110000, -1460, 0 } },
+ { 612000000UL, { 117290, -1745, 0 } },
+ { 714000000UL, { 122700, -1910, 0 } },
+ { 816000000UL, { 125620, -1945, 0 } },
+ { 918000000UL, { 130560, -2076, 0 } },
+ { 1020000000UL, { 137280, -2303, 0 } },
+ { 1122000000UL, { 146440, -2660, 0 } },
+ { 1224000000UL, { 152190, -2825, 0 } },
+ { 1326000000UL, { 157520, -2953, 0 } },
+ { 1428000000UL, { 166100, -3261, 0 } },
+ { 1530000000UL, { 176410, -3647, 0 } },
+ { 1632000000UL, { 189620, -4186, 0 } },
+ { 1734000000UL, { 203190, -4725, 0 } },
+ { 1836000000UL, { 222670, -5573, 0 } },
+ { 1938000000UL, { 256210, -7165, 0 } },
+ { 2040000000UL, { 250050, -6544, 0 } },
+ { 0UL, { 0, 0, 0 } },
+ },
+ .cpu_dfll_data = {
+ .tune0_low = 0x00b0019d,
+ .tune0_high = 0x00b0019d,
+ .tune1 = 0x0000001f,
+ .tune_high_min_millivolts = 1000,
+ }
+ },
+ {
+ .speedo_id = 1,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+ {
+ .speedo_id = 2,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+ {
+ .speedo_id = 3,
+ .process_id = -1,
+ T114_CPU_CVB_TABLE
+ },
+};
+
+/* Maximum CPU frequency, indexed by CPU speedo id */
static const unsigned long tegra124_cpu_max_freq_table[] = {
[0] = 2014500000UL,
[1] = 2320500000UL,
@@ -93,7 +186,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
[10] = 1504500000UL,
};
-#define CPU_CVB_TABLE \
+#define TEGRA210_CPU_CVB_TABLE \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -120,7 +213,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_XA \
+#define TEGRA210_CPU_CVB_TABLE_XA \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -143,7 +236,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM1 \
+#define TEGRA210_CPU_CVB_TABLE_EUCM1 \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -166,7 +259,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM2 \
+#define TEGRA210_CPU_CVB_TABLE_EUCM2 \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -188,7 +281,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
+#define TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -209,7 +302,7 @@ static const unsigned long tegra210_cpu_max_freq_table[] = {
{ 0UL, { 0, 0, 0 } }, \
}
-#define CPU_CVB_TABLE_ODN \
+#define TEGRA210_CPU_CVB_TABLE_ODN \
.speedo_scale = 100, \
.voltage_scale = 1000, \
.entries = { \
@@ -238,7 +331,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 840,
.max_millivolts = 1120,
- CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -251,7 +344,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 840,
.max_millivolts = 1120,
- CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
+ TEGRA210_CPU_CVB_TABLE_EUCM2_JOINT_RAIL,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -264,7 +357,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 900,
.max_millivolts = 1162,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -276,7 +369,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 900,
.max_millivolts = 1162,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -288,7 +381,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 900,
.max_millivolts = 1195,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -300,7 +393,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 900,
.max_millivolts = 1195,
- CPU_CVB_TABLE_EUCM2,
+ TEGRA210_CPU_CVB_TABLE_EUCM2,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -312,7 +405,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 841,
.max_millivolts = 1227,
- CPU_CVB_TABLE_EUCM1,
+ TEGRA210_CPU_CVB_TABLE_EUCM1,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -325,7 +418,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 841,
.max_millivolts = 1227,
- CPU_CVB_TABLE_EUCM1,
+ TEGRA210_CPU_CVB_TABLE_EUCM1,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -338,7 +431,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 870,
.max_millivolts = 1150,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x20091d9,
@@ -349,7 +442,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 870,
.max_millivolts = 1150,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x25501d0,
@@ -360,7 +453,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 818,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -373,7 +466,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 818,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -386,7 +479,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = -1,
.min_millivolts = 918,
.max_millivolts = 1113,
- CPU_CVB_TABLE_XA,
+ TEGRA210_CPU_CVB_TABLE_XA,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x17711BD,
@@ -397,7 +490,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 825,
.max_millivolts = 1227,
- CPU_CVB_TABLE_ODN,
+ TEGRA210_CPU_CVB_TABLE_ODN,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -410,7 +503,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 825,
.max_millivolts = 1227,
- CPU_CVB_TABLE_ODN,
+ TEGRA210_CPU_CVB_TABLE_ODN,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -423,7 +516,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 870,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x20091d9,
@@ -434,7 +527,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 870,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune1 = 0x25501d0,
@@ -445,7 +538,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 837,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -458,7 +551,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 837,
.max_millivolts = 1227,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -471,7 +564,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 0,
.min_millivolts = 850,
.max_millivolts = 1170,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -484,7 +577,7 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
.process_id = 1,
.min_millivolts = 850,
.max_millivolts = 1170,
- CPU_CVB_TABLE,
+ TEGRA210_CPU_CVB_TABLE,
.cpu_dfll_data = {
.tune0_low = 0xffead0ff,
.tune0_high = 0xffead0ff,
@@ -494,6 +587,13 @@ static struct cvb_table tegra210_cpu_cvb_tables[] = {
},
};
+static const struct dfll_fcpu_data tegra114_dfll_fcpu_data = {
+ .cpu_max_freq_table = tegra114_cpu_max_freq_table,
+ .cpu_max_freq_table_size = ARRAY_SIZE(tegra114_cpu_max_freq_table),
+ .cpu_cvb_tables = tegra114_cpu_cvb_tables,
+ .cpu_cvb_tables_size = ARRAY_SIZE(tegra114_cpu_cvb_tables)
+};
+
static const struct dfll_fcpu_data tegra124_dfll_fcpu_data = {
.cpu_max_freq_table = tegra124_cpu_max_freq_table,
.cpu_max_freq_table_size = ARRAY_SIZE(tegra124_cpu_max_freq_table),
@@ -510,6 +610,10 @@ static const struct dfll_fcpu_data tegra210_dfll_fcpu_data = {
static const struct of_device_id tegra124_dfll_fcpu_of_match[] = {
{
+ .compatible = "nvidia,tegra114-dfll",
+ .data = &tegra114_dfll_fcpu_data,
+ },
+ {
.compatible = "nvidia,tegra124-dfll",
.data = &tegra124_dfll_fcpu_data,
},
diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c
index 672ca8c184d2..fbf3c894eb56 100644
--- a/drivers/clk/tegra/clk-tegra210-emc.c
+++ b/drivers/clk/tegra/clk-tegra210-emc.c
@@ -86,22 +86,30 @@ static unsigned long tegra210_clk_emc_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_UP(parent_rate * 2, div);
}
-static long tegra210_clk_emc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int tegra210_clk_emc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tegra210_clk_emc *emc = to_tegra210_clk_emc(hw);
struct tegra210_clk_emc_provider *provider = emc->provider;
unsigned int i;
- if (!provider || !provider->configs || provider->num_configs == 0)
- return clk_hw_get_rate(hw);
+ if (!provider || !provider->configs || provider->num_configs == 0) {
+ req->rate = clk_hw_get_rate(hw);
+
+ return 0;
+ }
for (i = 0; i < provider->num_configs; i++) {
- if (provider->configs[i].rate >= rate)
- return provider->configs[i].rate;
+ if (provider->configs[i].rate >= req->rate) {
+ req->rate = provider->configs[i].rate;
+
+ return 0;
+ }
}
- return provider->configs[i - 1].rate;
+ req->rate = provider->configs[i - 1].rate;
+
+ return 0;
}
static struct clk *tegra210_clk_emc_find_parent(struct tegra210_clk_emc *emc,
@@ -259,7 +267,7 @@ static int tegra210_clk_emc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops tegra210_clk_emc_ops = {
.get_parent = tegra210_clk_emc_get_parent,
.recalc_rate = tegra210_clk_emc_recalc_rate,
- .round_rate = tegra210_clk_emc_round_rate,
+ .determine_rate = tegra210_clk_emc_determine_rate,
.set_rate = tegra210_clk_emc_set_rate,
};
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 412902f573b5..504d0ea997a5 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -3444,7 +3444,7 @@ static void tegra210_disable_cpu_clock(u32 cpu)
static u32 spare_reg_ctx, misc_clk_enb_ctx, clk_msk_arm_ctx;
static u32 cpu_softrst_ctx[3];
-static int tegra210_clk_suspend(void)
+static int tegra210_clk_suspend(void *data)
{
unsigned int i;
@@ -3465,7 +3465,7 @@ static int tegra210_clk_suspend(void)
return 0;
}
-static void tegra210_clk_resume(void)
+static void tegra210_clk_resume(void *data)
{
unsigned int i;
@@ -3523,13 +3523,17 @@ static void tegra210_cpu_clock_resume(void)
}
#endif
-static struct syscore_ops tegra_clk_syscore_ops = {
+static const struct syscore_ops tegra_clk_syscore_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = tegra210_clk_suspend,
.resume = tegra210_clk_resume,
#endif
};
+static struct syscore tegra_clk_syscore = {
+ .ops = &tegra_clk_syscore_ops,
+};
+
static struct tegra_cpu_car_ops tegra210_cpu_car_ops = {
.wait_for_reset = tegra210_wait_cpu_in_reset,
.disable_clock = tegra210_disable_cpu_clock,
@@ -3813,6 +3817,6 @@ static void __init tegra210_clock_init(struct device_node *np)
tegra_cpu_car_ops = &tegra210_cpu_car_ops;
- register_syscore_ops(&tegra_clk_syscore_ops);
+ register_syscore(&tegra_clk_syscore);
}
CLK_OF_DECLARE(tegra210, "nvidia,tegra210-car", tegra210_clock_init);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 82a8cb9545eb..e7ebb63970d3 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -53,6 +53,7 @@
#define SYSTEM_CLK_RATE 0x030
#define TEGRA30_CLK_PERIPH_BANKS 5
+#define TEGRA30_CLK_CLK_MAX 311
#define PLLC_BASE 0x80
#define PLLC_MISC 0x8c
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 9ea839af14bc..73efd2ff37c9 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -897,8 +897,6 @@ static inline bool tegra124_clk_emc_driver_available(struct clk_hw *emc_hw)
void tegra114_clock_tune_cpu_trimmers_high(void);
void tegra114_clock_tune_cpu_trimmers_low(void);
void tegra114_clock_tune_cpu_trimmers_init(void);
-void tegra114_clock_assert_dfll_dvco_reset(void);
-void tegra114_clock_deassert_dfll_dvco_reset(void);
typedef void (*tegra_clk_apply_init_table_func)(void);
extern tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
diff --git a/drivers/clk/thead/clk-th1520-ap.c b/drivers/clk/thead/clk-th1520-ap.c
index cf1bba58f641..71ad03a998e8 100644
--- a/drivers/clk/thead/clk-th1520-ap.c
+++ b/drivers/clk/thead/clk-th1520-ap.c
@@ -18,6 +18,7 @@
#define TH1520_PLL_FBDIV GENMASK(19, 8)
#define TH1520_PLL_REFDIV GENMASK(5, 0)
#define TH1520_PLL_BYPASS BIT(30)
+#define TH1520_PLL_VCO_RST BIT(29)
#define TH1520_PLL_DSMPD BIT(24)
#define TH1520_PLL_FRAC GENMASK(23, 0)
#define TH1520_PLL_FRAC_BITS 24
@@ -48,12 +49,14 @@ struct ccu_mux {
};
struct ccu_gate {
- u32 enable;
- struct ccu_common common;
+ int clkid;
+ u32 reg;
+ struct clk_gate gate;
};
struct ccu_div {
u32 enable;
+ u32 div_en;
struct ccu_div_internal div;
struct ccu_internal mux;
struct ccu_common common;
@@ -87,12 +90,12 @@ struct ccu_pll {
0), \
}
-#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _gate, _flags) \
+#define CCU_GATE(_clkid, _struct, _name, _parent, _reg, _bit, _flags) \
struct ccu_gate _struct = { \
- .enable = _gate, \
- .common = { \
- .clkid = _clkid, \
- .cfg0 = _reg, \
+ .clkid = _clkid, \
+ .reg = _reg, \
+ .gate = { \
+ .bit_idx = _bit, \
.hw.init = CLK_HW_INIT_PARENTS_DATA( \
_name, \
_parent, \
@@ -120,13 +123,6 @@ static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
return container_of(common, struct ccu_div, common);
}
-static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw)
-{
- struct ccu_common *common = hw_to_ccu_common(hw);
-
- return container_of(common, struct ccu_gate, common);
-}
-
static u8 ccu_get_parent_helper(struct ccu_common *common,
struct ccu_internal *mux)
{
@@ -197,6 +193,55 @@ static unsigned long ccu_div_recalc_rate(struct clk_hw *hw,
return rate;
}
+static int ccu_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ unsigned int val;
+
+ if (cd->div_en)
+ return divider_determine_rate(hw, req, NULL,
+ cd->div.width, cd->div.flags);
+
+ regmap_read(cd->common.map, cd->common.cfg0, &val);
+ val = val >> cd->div.shift;
+ val &= GENMASK(cd->div.width - 1, 0);
+ return divider_ro_determine_rate(hw, req, NULL, cd->div.width,
+ cd->div.flags, val);
+}
+
+static int ccu_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct ccu_div *cd = hw_to_ccu_div(hw);
+ int val = divider_get_val(rate, parent_rate, NULL,
+ cd->div.width, cd->div.flags);
+ unsigned int curr_val, reg_val;
+
+ if (val < 0)
+ return val;
+
+ regmap_read(cd->common.map, cd->common.cfg0, &reg_val);
+ curr_val = reg_val >> cd->div.shift;
+ curr_val &= GENMASK(cd->div.width - 1, 0);
+
+ if (!cd->div_en && curr_val != val)
+ return -EINVAL;
+
+ reg_val &= ~cd->div_en;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+ udelay(1);
+
+ reg_val &= ~GENMASK(cd->div.width + cd->div.shift - 1, cd->div.shift);
+ reg_val |= val << cd->div.shift;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+
+ reg_val |= cd->div_en;
+ regmap_write(cd->common.map, cd->common.cfg0, reg_val);
+
+ return 0;
+}
+
static u8 ccu_div_get_parent(struct clk_hw *hw)
{
struct ccu_div *cd = hw_to_ccu_div(hw);
@@ -239,9 +284,34 @@ static const struct clk_ops ccu_div_ops = {
.get_parent = ccu_div_get_parent,
.set_parent = ccu_div_set_parent,
.recalc_rate = ccu_div_recalc_rate,
- .determine_rate = clk_hw_determine_rate_no_reparent,
+ .set_rate = ccu_div_set_rate,
+ .determine_rate = ccu_div_determine_rate,
};
+static void ccu_pll_disable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ regmap_set_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
+static int ccu_pll_enable(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return regmap_clear_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
+static int ccu_pll_is_enabled(struct clk_hw *hw)
+{
+ struct ccu_pll *pll = hw_to_ccu_pll(hw);
+
+ return !regmap_test_bits(pll->common.map, pll->common.cfg1,
+ TH1520_PLL_VCO_RST);
+}
+
static unsigned long th1520_pll_vco_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -299,6 +369,9 @@ static unsigned long ccu_pll_recalc_rate(struct clk_hw *hw,
}
static const struct clk_ops clk_pll_ops = {
+ .disable = ccu_pll_disable,
+ .enable = ccu_pll_enable,
+ .is_enabled = ccu_pll_is_enabled,
.recalc_rate = ccu_pll_recalc_rate,
};
@@ -314,7 +387,7 @@ static struct ccu_pll cpu_pll0_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("cpu-pll0",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -326,7 +399,7 @@ static struct ccu_pll cpu_pll1_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("cpu-pll1",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -338,7 +411,7 @@ static struct ccu_pll gmac_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("gmac-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -358,7 +431,7 @@ static struct ccu_pll video_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("video-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -410,7 +483,7 @@ static struct ccu_pll tee_pll_clk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("tee-pll",
osc_24m_clk,
&clk_pll_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -486,7 +559,7 @@ static struct ccu_div axi4_cpusys2_aclk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("axi4-cpusys2-aclk",
gmac_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -508,7 +581,7 @@ static struct ccu_div axi_aclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("axi-aclk",
axi_parents,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -657,7 +730,7 @@ static struct ccu_div apb_pclk = {
.hw.init = CLK_HW_INIT_PARENTS_DATA("apb-pclk",
apb_parents,
&ccu_div_ops,
- CLK_IGNORE_UNUSED),
+ CLK_IS_CRITICAL),
},
};
@@ -688,7 +761,7 @@ static struct ccu_div vi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -713,7 +786,7 @@ static struct ccu_div vo_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vo-axi",
video_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_IS_CRITICAL),
},
};
@@ -738,7 +811,7 @@ static struct ccu_div vp_axi_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("vp-axi",
video_pll_clk_parent,
&ccu_div_ops,
- CLK_IGNORE_UNUSED),
+ CLK_IS_CRITICAL),
},
};
@@ -756,6 +829,7 @@ static struct ccu_div venc_clk = {
};
static struct ccu_div dpu0_clk = {
+ .div_en = BIT(8),
.div = TH_CCU_DIV_FLAGS(0, 8, CLK_DIVIDER_ONE_BASED),
.common = {
.clkid = CLK_DPU0,
@@ -763,11 +837,16 @@ static struct ccu_div dpu0_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("dpu0",
dpu0_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_SET_RATE_UNGATE),
},
};
+static const struct clk_parent_data dpu0_clk_pd[] = {
+ { .hw = &dpu0_clk.common.hw }
+};
+
static struct ccu_div dpu1_clk = {
+ .div_en = BIT(8),
.div = TH_CCU_DIV_FLAGS(0, 8, CLK_DIVIDER_ONE_BASED),
.common = {
.clkid = CLK_DPU1,
@@ -775,10 +854,14 @@ static struct ccu_div dpu1_clk = {
.hw.init = CLK_HW_INIT_PARENTS_HW("dpu1",
dpu1_pll_clk_parent,
&ccu_div_ops,
- 0),
+ CLK_SET_RATE_UNGATE),
},
};
+static const struct clk_parent_data dpu1_clk_pd[] = {
+ { .hw = &dpu1_clk.common.hw }
+};
+
static CLK_FIXED_FACTOR_HW(emmc_sdio_ref_clk, "emmc-sdio-ref",
&video_pll_clk.common.hw, 4, 1, 0);
@@ -786,128 +869,132 @@ static const struct clk_parent_data emmc_sdio_ref_clk_pd[] = {
{ .hw = &emmc_sdio_ref_clk.hw },
};
-static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, BIT(4), 0);
-static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, BIT(5), 0);
+static CCU_GATE(CLK_BROM, brom_clk, "brom", ahb2_cpusys_hclk_pd, 0x100, 4, 0);
+static CCU_GATE(CLK_BMU, bmu_clk, "bmu", axi4_cpusys2_aclk_pd, 0x100, 5, 0);
static CCU_GATE(CLK_AON2CPU_A2X, aon2cpu_a2x_clk, "aon2cpu-a2x", axi4_cpusys2_aclk_pd,
- 0x134, BIT(8), 0);
+ 0x134, 8, CLK_IS_CRITICAL);
static CCU_GATE(CLK_X2X_CPUSYS, x2x_cpusys_clk, "x2x-cpusys", axi4_cpusys2_aclk_pd,
- 0x134, BIT(7), 0);
+ 0x134, 7, CLK_IS_CRITICAL);
static CCU_GATE(CLK_CPU2AON_X2H, cpu2aon_x2h_clk, "cpu2aon-x2h", axi_aclk_pd,
- 0x138, BIT(8), CLK_IGNORE_UNUSED);
+ 0x138, 8, CLK_IS_CRITICAL);
static CCU_GATE(CLK_CPU2PERI_X2H, cpu2peri_x2h_clk, "cpu2peri-x2h", axi4_cpusys2_aclk_pd,
- 0x140, BIT(9), CLK_IGNORE_UNUSED);
+ 0x140, 9, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB1_HCLK, perisys_apb1_hclk, "perisys-apb1-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(9), CLK_IGNORE_UNUSED);
+ 0x150, 9, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB2_HCLK, perisys_apb2_hclk, "perisys-apb2-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(10), CLK_IGNORE_UNUSED);
+ 0x150, 10, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB3_HCLK, perisys_apb3_hclk, "perisys-apb3-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(11), CLK_IGNORE_UNUSED);
+ 0x150, 11, CLK_IS_CRITICAL);
static CCU_GATE(CLK_PERISYS_APB4_HCLK, perisys_apb4_hclk, "perisys-apb4-hclk", perisys_ahb_hclk_pd,
- 0x150, BIT(12), 0);
-static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, BIT(5), 0);
-static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, BIT(13), 0);
-static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, BIT(30), 0);
-static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, BIT(26), 0);
-static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, BIT(24), 0);
-static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, BIT(23), 0);
-static CCU_GATE(CLK_PADCTRL0, padctrl0_clk, "padctrl0", perisys_apb_pclk_pd, 0x204, BIT(22), 0);
-static CCU_GATE(CLK_GMAC_AXI, gmac_axi_clk, "gmac-axi", axi4_cpusys2_aclk_pd, 0x204, BIT(21), 0);
-static CCU_GATE(CLK_GPIO3, gpio3_clk, "gpio3-clk", peri2sys_apb_pclk_pd, 0x204, BIT(20), 0);
-static CCU_GATE(CLK_GMAC0, gmac0_clk, "gmac0", gmac_pll_clk_pd, 0x204, BIT(19), 0);
-static CCU_GATE(CLK_PWM, pwm_clk, "pwm", perisys_apb_pclk_pd, 0x204, BIT(18), 0);
-static CCU_GATE(CLK_QSPI0, qspi0_clk, "qspi0", video_pll_clk_pd, 0x204, BIT(17), 0);
-static CCU_GATE(CLK_QSPI1, qspi1_clk, "qspi1", video_pll_clk_pd, 0x204, BIT(16), 0);
-static CCU_GATE(CLK_SPI, spi_clk, "spi", video_pll_clk_pd, 0x204, BIT(15), 0);
-static CCU_GATE(CLK_UART0_PCLK, uart0_pclk, "uart0-pclk", perisys_apb_pclk_pd, 0x204, BIT(14), 0);
-static CCU_GATE(CLK_UART1_PCLK, uart1_pclk, "uart1-pclk", perisys_apb_pclk_pd, 0x204, BIT(13), 0);
-static CCU_GATE(CLK_UART2_PCLK, uart2_pclk, "uart2-pclk", perisys_apb_pclk_pd, 0x204, BIT(12), 0);
-static CCU_GATE(CLK_UART3_PCLK, uart3_pclk, "uart3-pclk", perisys_apb_pclk_pd, 0x204, BIT(11), 0);
-static CCU_GATE(CLK_UART4_PCLK, uart4_pclk, "uart4-pclk", perisys_apb_pclk_pd, 0x204, BIT(10), 0);
-static CCU_GATE(CLK_UART5_PCLK, uart5_pclk, "uart5-pclk", perisys_apb_pclk_pd, 0x204, BIT(9), 0);
-static CCU_GATE(CLK_GPIO0, gpio0_clk, "gpio0-clk", perisys_apb_pclk_pd, 0x204, BIT(8), 0);
-static CCU_GATE(CLK_GPIO1, gpio1_clk, "gpio1-clk", perisys_apb_pclk_pd, 0x204, BIT(7), 0);
-static CCU_GATE(CLK_GPIO2, gpio2_clk, "gpio2-clk", peri2sys_apb_pclk_pd, 0x204, BIT(6), 0);
-static CCU_GATE(CLK_I2C0, i2c0_clk, "i2c0", perisys_apb_pclk_pd, 0x204, BIT(5), 0);
-static CCU_GATE(CLK_I2C1, i2c1_clk, "i2c1", perisys_apb_pclk_pd, 0x204, BIT(4), 0);
-static CCU_GATE(CLK_I2C2, i2c2_clk, "i2c2", perisys_apb_pclk_pd, 0x204, BIT(3), 0);
-static CCU_GATE(CLK_I2C3, i2c3_clk, "i2c3", perisys_apb_pclk_pd, 0x204, BIT(2), 0);
-static CCU_GATE(CLK_I2C4, i2c4_clk, "i2c4", perisys_apb_pclk_pd, 0x204, BIT(1), 0);
-static CCU_GATE(CLK_I2C5, i2c5_clk, "i2c5", perisys_apb_pclk_pd, 0x204, BIT(0), 0);
-static CCU_GATE(CLK_SPINLOCK, spinlock_clk, "spinlock", ahb2_cpusys_hclk_pd, 0x208, BIT(10), 0);
-static CCU_GATE(CLK_DMA, dma_clk, "dma", axi4_cpusys2_aclk_pd, 0x208, BIT(8), 0);
-static CCU_GATE(CLK_MBOX0, mbox0_clk, "mbox0", apb3_cpusys_pclk_pd, 0x208, BIT(7), 0);
-static CCU_GATE(CLK_MBOX1, mbox1_clk, "mbox1", apb3_cpusys_pclk_pd, 0x208, BIT(6), 0);
-static CCU_GATE(CLK_MBOX2, mbox2_clk, "mbox2", apb3_cpusys_pclk_pd, 0x208, BIT(5), 0);
-static CCU_GATE(CLK_MBOX3, mbox3_clk, "mbox3", apb3_cpusys_pclk_pd, 0x208, BIT(4), 0);
-static CCU_GATE(CLK_WDT0, wdt0_clk, "wdt0", apb3_cpusys_pclk_pd, 0x208, BIT(3), 0);
-static CCU_GATE(CLK_WDT1, wdt1_clk, "wdt1", apb3_cpusys_pclk_pd, 0x208, BIT(2), 0);
-static CCU_GATE(CLK_TIMER0, timer0_clk, "timer0", apb3_cpusys_pclk_pd, 0x208, BIT(1), 0);
-static CCU_GATE(CLK_TIMER1, timer1_clk, "timer1", apb3_cpusys_pclk_pd, 0x208, BIT(0), 0);
-static CCU_GATE(CLK_SRAM0, sram0_clk, "sram0", axi_aclk_pd, 0x20c, BIT(4), 0);
-static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, BIT(3), 0);
-static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, BIT(2), 0);
-static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, BIT(1), 0);
+ 0x150, 12, 0);
+static const struct clk_parent_data perisys_apb4_hclk_pd[] = {
+ { .hw = &perisys_apb4_hclk.gate.hw },
+};
+
+static CCU_GATE(CLK_NPU_AXI, npu_axi_clk, "npu-axi", axi_aclk_pd, 0x1c8, 5, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_CPU2VP, cpu2vp_clk, "cpu2vp", axi_aclk_pd, 0x1e0, 13, CLK_IS_CRITICAL);
+static CCU_GATE(CLK_EMMC_SDIO, emmc_sdio_clk, "emmc-sdio", emmc_sdio_ref_clk_pd, 0x204, 30, 0);
+static CCU_GATE(CLK_GMAC1, gmac1_clk, "gmac1", gmac_pll_clk_pd, 0x204, 26, 0);
+static CCU_GATE(CLK_PADCTRL1, padctrl1_clk, "padctrl1", perisys_apb_pclk_pd, 0x204, 24, 0);
+static CCU_GATE(CLK_DSMART, dsmart_clk, "dsmart", perisys_apb_pclk_pd, 0x204, 23, 0);
+static CCU_GATE(CLK_PADCTRL0, padctrl0_clk, "padctrl0", perisys_apb4_hclk_pd, 0x204, 22, 0);
+static CCU_GATE(CLK_GMAC_AXI, gmac_axi_clk, "gmac-axi", axi4_cpusys2_aclk_pd, 0x204, 21, 0);
+static CCU_GATE(CLK_GPIO3, gpio3_clk, "gpio3-clk", peri2sys_apb_pclk_pd, 0x204, 20, 0);
+static CCU_GATE(CLK_GMAC0, gmac0_clk, "gmac0", gmac_pll_clk_pd, 0x204, 19, 0);
+static CCU_GATE(CLK_PWM, pwm_clk, "pwm", perisys_apb_pclk_pd, 0x204, 18, 0);
+static CCU_GATE(CLK_QSPI0, qspi0_clk, "qspi0", video_pll_clk_pd, 0x204, 17, 0);
+static CCU_GATE(CLK_QSPI1, qspi1_clk, "qspi1", video_pll_clk_pd, 0x204, 16, 0);
+static CCU_GATE(CLK_SPI, spi_clk, "spi", video_pll_clk_pd, 0x204, 15, 0);
+static CCU_GATE(CLK_UART0_PCLK, uart0_pclk, "uart0-pclk", perisys_apb_pclk_pd, 0x204, 14, 0);
+static CCU_GATE(CLK_UART1_PCLK, uart1_pclk, "uart1-pclk", perisys_apb_pclk_pd, 0x204, 13, 0);
+static CCU_GATE(CLK_UART2_PCLK, uart2_pclk, "uart2-pclk", perisys_apb_pclk_pd, 0x204, 12, 0);
+static CCU_GATE(CLK_UART3_PCLK, uart3_pclk, "uart3-pclk", perisys_apb_pclk_pd, 0x204, 11, 0);
+static CCU_GATE(CLK_UART4_PCLK, uart4_pclk, "uart4-pclk", perisys_apb_pclk_pd, 0x204, 10, 0);
+static CCU_GATE(CLK_UART5_PCLK, uart5_pclk, "uart5-pclk", perisys_apb_pclk_pd, 0x204, 9, 0);
+static CCU_GATE(CLK_GPIO0, gpio0_clk, "gpio0-clk", perisys_apb_pclk_pd, 0x204, 8, 0);
+static CCU_GATE(CLK_GPIO1, gpio1_clk, "gpio1-clk", perisys_apb_pclk_pd, 0x204, 7, 0);
+static CCU_GATE(CLK_GPIO2, gpio2_clk, "gpio2-clk", peri2sys_apb_pclk_pd, 0x204, 6, 0);
+static CCU_GATE(CLK_I2C0, i2c0_clk, "i2c0", perisys_apb_pclk_pd, 0x204, 5, 0);
+static CCU_GATE(CLK_I2C1, i2c1_clk, "i2c1", perisys_apb_pclk_pd, 0x204, 4, 0);
+static CCU_GATE(CLK_I2C2, i2c2_clk, "i2c2", perisys_apb_pclk_pd, 0x204, 3, 0);
+static CCU_GATE(CLK_I2C3, i2c3_clk, "i2c3", perisys_apb_pclk_pd, 0x204, 2, 0);
+static CCU_GATE(CLK_I2C4, i2c4_clk, "i2c4", perisys_apb_pclk_pd, 0x204, 1, 0);
+static CCU_GATE(CLK_I2C5, i2c5_clk, "i2c5", perisys_apb_pclk_pd, 0x204, 0, 0);
+static CCU_GATE(CLK_SPINLOCK, spinlock_clk, "spinlock", ahb2_cpusys_hclk_pd, 0x208, 10, 0);
+static CCU_GATE(CLK_DMA, dma_clk, "dma", axi4_cpusys2_aclk_pd, 0x208, 8, 0);
+static CCU_GATE(CLK_MBOX0, mbox0_clk, "mbox0", apb3_cpusys_pclk_pd, 0x208, 7, 0);
+static CCU_GATE(CLK_MBOX1, mbox1_clk, "mbox1", apb3_cpusys_pclk_pd, 0x208, 6, 0);
+static CCU_GATE(CLK_MBOX2, mbox2_clk, "mbox2", apb3_cpusys_pclk_pd, 0x208, 5, 0);
+static CCU_GATE(CLK_MBOX3, mbox3_clk, "mbox3", apb3_cpusys_pclk_pd, 0x208, 4, 0);
+static CCU_GATE(CLK_WDT0, wdt0_clk, "wdt0", apb3_cpusys_pclk_pd, 0x208, 3, 0);
+static CCU_GATE(CLK_WDT1, wdt1_clk, "wdt1", apb3_cpusys_pclk_pd, 0x208, 2, 0);
+static CCU_GATE(CLK_TIMER0, timer0_clk, "timer0", apb3_cpusys_pclk_pd, 0x208, 1, 0);
+static CCU_GATE(CLK_TIMER1, timer1_clk, "timer1", apb3_cpusys_pclk_pd, 0x208, 0, 0);
+static CCU_GATE(CLK_SRAM0, sram0_clk, "sram0", axi_aclk_pd, 0x20c, 4, 0);
+static CCU_GATE(CLK_SRAM1, sram1_clk, "sram1", axi_aclk_pd, 0x20c, 3, 0);
+static CCU_GATE(CLK_SRAM2, sram2_clk, "sram2", axi_aclk_pd, 0x20c, 2, 0);
+static CCU_GATE(CLK_SRAM3, sram3_clk, "sram3", axi_aclk_pd, 0x20c, 1, 0);
static CCU_GATE(CLK_AXI4_VO_ACLK, axi4_vo_aclk, "axi4-vo-aclk",
- video_pll_clk_pd, 0x0, BIT(0), 0);
+ video_pll_clk_pd, 0x0, 0, CLK_IS_CRITICAL);
static CCU_GATE(CLK_GPU_CORE, gpu_core_clk, "gpu-core-clk", video_pll_clk_pd,
- 0x0, BIT(3), 0);
+ 0x0, 3, 0);
static CCU_GATE(CLK_GPU_CFG_ACLK, gpu_cfg_aclk, "gpu-cfg-aclk",
- video_pll_clk_pd, 0x0, BIT(4), 0);
+ video_pll_clk_pd, 0x0, 4, CLK_IS_CRITICAL);
static CCU_GATE(CLK_DPU_PIXELCLK0, dpu0_pixelclk, "dpu0-pixelclk",
- video_pll_clk_pd, 0x0, BIT(5), 0);
+ dpu0_clk_pd, 0x0, 5, CLK_SET_RATE_PARENT);
static CCU_GATE(CLK_DPU_PIXELCLK1, dpu1_pixelclk, "dpu1-pixelclk",
- video_pll_clk_pd, 0x0, BIT(6), 0);
+ dpu1_clk_pd, 0x0, 6, CLK_SET_RATE_PARENT);
static CCU_GATE(CLK_DPU_HCLK, dpu_hclk, "dpu-hclk", video_pll_clk_pd, 0x0,
- BIT(7), 0);
+ 7, 0);
static CCU_GATE(CLK_DPU_ACLK, dpu_aclk, "dpu-aclk", video_pll_clk_pd, 0x0,
- BIT(8), 0);
+ 8, 0);
static CCU_GATE(CLK_DPU_CCLK, dpu_cclk, "dpu-cclk", video_pll_clk_pd, 0x0,
- BIT(9), 0);
+ 9, 0);
static CCU_GATE(CLK_HDMI_SFR, hdmi_sfr_clk, "hdmi-sfr-clk", video_pll_clk_pd,
- 0x0, BIT(10), 0);
+ 0x0, 10, 0);
static CCU_GATE(CLK_HDMI_PCLK, hdmi_pclk, "hdmi-pclk", video_pll_clk_pd, 0x0,
- BIT(11), 0);
+ 11, 0);
static CCU_GATE(CLK_HDMI_CEC, hdmi_cec_clk, "hdmi-cec-clk", video_pll_clk_pd,
- 0x0, BIT(12), 0);
+ 0x0, 12, 0);
static CCU_GATE(CLK_MIPI_DSI0_PCLK, mipi_dsi0_pclk, "mipi-dsi0-pclk",
- video_pll_clk_pd, 0x0, BIT(13), 0);
+ video_pll_clk_pd, 0x0, 13, 0);
static CCU_GATE(CLK_MIPI_DSI1_PCLK, mipi_dsi1_pclk, "mipi-dsi1-pclk",
- video_pll_clk_pd, 0x0, BIT(14), 0);
+ video_pll_clk_pd, 0x0, 14, 0);
static CCU_GATE(CLK_MIPI_DSI0_CFG, mipi_dsi0_cfg_clk, "mipi-dsi0-cfg-clk",
- video_pll_clk_pd, 0x0, BIT(15), 0);
+ video_pll_clk_pd, 0x0, 15, 0);
static CCU_GATE(CLK_MIPI_DSI1_CFG, mipi_dsi1_cfg_clk, "mipi-dsi1-cfg-clk",
- video_pll_clk_pd, 0x0, BIT(16), 0);
+ video_pll_clk_pd, 0x0, 16, 0);
static CCU_GATE(CLK_MIPI_DSI0_REFCLK, mipi_dsi0_refclk, "mipi-dsi0-refclk",
- video_pll_clk_pd, 0x0, BIT(17), 0);
+ video_pll_clk_pd, 0x0, 17, 0);
static CCU_GATE(CLK_MIPI_DSI1_REFCLK, mipi_dsi1_refclk, "mipi-dsi1-refclk",
- video_pll_clk_pd, 0x0, BIT(18), 0);
+ video_pll_clk_pd, 0x0, 18, 0);
static CCU_GATE(CLK_HDMI_I2S, hdmi_i2s_clk, "hdmi-i2s-clk", video_pll_clk_pd,
- 0x0, BIT(19), 0);
+ 0x0, 19, 0);
static CCU_GATE(CLK_X2H_DPU1_ACLK, x2h_dpu1_aclk, "x2h-dpu1-aclk",
- video_pll_clk_pd, 0x0, BIT(20), 0);
+ video_pll_clk_pd, 0x0, 20, CLK_IS_CRITICAL);
static CCU_GATE(CLK_X2H_DPU_ACLK, x2h_dpu_aclk, "x2h-dpu-aclk",
- video_pll_clk_pd, 0x0, BIT(21), 0);
+ video_pll_clk_pd, 0x0, 21, CLK_IS_CRITICAL);
static CCU_GATE(CLK_AXI4_VO_PCLK, axi4_vo_pclk, "axi4-vo-pclk",
- video_pll_clk_pd, 0x0, BIT(22), 0);
+ video_pll_clk_pd, 0x0, 22, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_DPU_PCLK, iopmp_vosys_dpu_pclk,
- "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, BIT(23), 0);
+ "iopmp-vosys-dpu-pclk", video_pll_clk_pd, 0x0, 23, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_DPU1_PCLK, iopmp_vosys_dpu1_pclk,
- "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, BIT(24), 0);
+ "iopmp-vosys-dpu1-pclk", video_pll_clk_pd, 0x0, 24, 0);
static CCU_GATE(CLK_IOPMP_VOSYS_GPU_PCLK, iopmp_vosys_gpu_pclk,
- "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, BIT(25), 0);
+ "iopmp-vosys-gpu-pclk", video_pll_clk_pd, 0x0, 25, 0);
static CCU_GATE(CLK_IOPMP_DPU1_ACLK, iopmp_dpu1_aclk, "iopmp-dpu1-aclk",
- video_pll_clk_pd, 0x0, BIT(27), 0);
+ video_pll_clk_pd, 0x0, 27, CLK_IS_CRITICAL);
static CCU_GATE(CLK_IOPMP_DPU_ACLK, iopmp_dpu_aclk, "iopmp-dpu-aclk",
- video_pll_clk_pd, 0x0, BIT(28), 0);
+ video_pll_clk_pd, 0x0, 28, CLK_IS_CRITICAL);
static CCU_GATE(CLK_IOPMP_GPU_ACLK, iopmp_gpu_aclk, "iopmp-gpu-aclk",
- video_pll_clk_pd, 0x0, BIT(29), 0);
+ video_pll_clk_pd, 0x0, 29, CLK_IS_CRITICAL);
static CCU_GATE(CLK_MIPIDSI0_PIXCLK, mipi_dsi0_pixclk, "mipi-dsi0-pixclk",
- video_pll_clk_pd, 0x0, BIT(30), 0);
+ video_pll_clk_pd, 0x0, 30, 0);
static CCU_GATE(CLK_MIPIDSI1_PIXCLK, mipi_dsi1_pixclk, "mipi-dsi1-pixclk",
- video_pll_clk_pd, 0x0, BIT(31), 0);
+ video_pll_clk_pd, 0x0, 31, 0);
static CCU_GATE(CLK_HDMI_PIXCLK, hdmi_pixclk, "hdmi-pixclk", video_pll_clk_pd,
- 0x4, BIT(0), 0);
+ 0x4, 0, 0);
static CLK_FIXED_FACTOR_HW(gmac_pll_clk_100m, "gmac-pll-clk-100m",
&gmac_pll_clk.common.hw, 10, 1, 0);
@@ -963,107 +1050,106 @@ static struct ccu_mux *th1520_mux_clks[] = {
&uart_sclk,
};
-static struct ccu_common *th1520_gate_clks[] = {
- &emmc_sdio_clk.common,
- &aon2cpu_a2x_clk.common,
- &x2x_cpusys_clk.common,
- &brom_clk.common,
- &bmu_clk.common,
- &cpu2aon_x2h_clk.common,
- &cpu2peri_x2h_clk.common,
- &cpu2vp_clk.common,
- &perisys_apb1_hclk.common,
- &perisys_apb2_hclk.common,
- &perisys_apb3_hclk.common,
- &perisys_apb4_hclk.common,
- &npu_axi_clk.common,
- &gmac1_clk.common,
- &padctrl1_clk.common,
- &dsmart_clk.common,
- &padctrl0_clk.common,
- &gmac_axi_clk.common,
- &gpio3_clk.common,
- &gmac0_clk.common,
- &pwm_clk.common,
- &qspi0_clk.common,
- &qspi1_clk.common,
- &spi_clk.common,
- &uart0_pclk.common,
- &uart1_pclk.common,
- &uart2_pclk.common,
- &uart3_pclk.common,
- &uart4_pclk.common,
- &uart5_pclk.common,
- &gpio0_clk.common,
- &gpio1_clk.common,
- &gpio2_clk.common,
- &i2c0_clk.common,
- &i2c1_clk.common,
- &i2c2_clk.common,
- &i2c3_clk.common,
- &i2c4_clk.common,
- &i2c5_clk.common,
- &spinlock_clk.common,
- &dma_clk.common,
- &mbox0_clk.common,
- &mbox1_clk.common,
- &mbox2_clk.common,
- &mbox3_clk.common,
- &wdt0_clk.common,
- &wdt1_clk.common,
- &timer0_clk.common,
- &timer1_clk.common,
- &sram0_clk.common,
- &sram1_clk.common,
- &sram2_clk.common,
- &sram3_clk.common,
-};
-
-static struct ccu_common *th1520_vo_gate_clks[] = {
- &axi4_vo_aclk.common,
- &gpu_core_clk.common,
- &gpu_cfg_aclk.common,
- &dpu0_pixelclk.common,
- &dpu1_pixelclk.common,
- &dpu_hclk.common,
- &dpu_aclk.common,
- &dpu_cclk.common,
- &hdmi_sfr_clk.common,
- &hdmi_pclk.common,
- &hdmi_cec_clk.common,
- &mipi_dsi0_pclk.common,
- &mipi_dsi1_pclk.common,
- &mipi_dsi0_cfg_clk.common,
- &mipi_dsi1_cfg_clk.common,
- &mipi_dsi0_refclk.common,
- &mipi_dsi1_refclk.common,
- &hdmi_i2s_clk.common,
- &x2h_dpu1_aclk.common,
- &x2h_dpu_aclk.common,
- &axi4_vo_pclk.common,
- &iopmp_vosys_dpu_pclk.common,
- &iopmp_vosys_dpu1_pclk.common,
- &iopmp_vosys_gpu_pclk.common,
- &iopmp_dpu1_aclk.common,
- &iopmp_dpu_aclk.common,
- &iopmp_gpu_aclk.common,
- &mipi_dsi0_pixclk.common,
- &mipi_dsi1_pixclk.common,
- &hdmi_pixclk.common
+static struct ccu_gate *th1520_gate_clks[] = {
+ &emmc_sdio_clk,
+ &aon2cpu_a2x_clk,
+ &x2x_cpusys_clk,
+ &brom_clk,
+ &bmu_clk,
+ &cpu2aon_x2h_clk,
+ &cpu2peri_x2h_clk,
+ &cpu2vp_clk,
+ &perisys_apb1_hclk,
+ &perisys_apb2_hclk,
+ &perisys_apb3_hclk,
+ &perisys_apb4_hclk,
+ &npu_axi_clk,
+ &gmac1_clk,
+ &padctrl1_clk,
+ &dsmart_clk,
+ &padctrl0_clk,
+ &gmac_axi_clk,
+ &gpio3_clk,
+ &gmac0_clk,
+ &pwm_clk,
+ &qspi0_clk,
+ &qspi1_clk,
+ &spi_clk,
+ &uart0_pclk,
+ &uart1_pclk,
+ &uart2_pclk,
+ &uart3_pclk,
+ &uart4_pclk,
+ &uart5_pclk,
+ &gpio0_clk,
+ &gpio1_clk,
+ &gpio2_clk,
+ &i2c0_clk,
+ &i2c1_clk,
+ &i2c2_clk,
+ &i2c3_clk,
+ &i2c4_clk,
+ &i2c5_clk,
+ &spinlock_clk,
+ &dma_clk,
+ &mbox0_clk,
+ &mbox1_clk,
+ &mbox2_clk,
+ &mbox3_clk,
+ &wdt0_clk,
+ &wdt1_clk,
+ &timer0_clk,
+ &timer1_clk,
+ &sram0_clk,
+ &sram1_clk,
+ &sram2_clk,
+ &sram3_clk,
+};
+
+static struct ccu_gate *th1520_vo_gate_clks[] = {
+ &axi4_vo_aclk,
+ &gpu_core_clk,
+ &gpu_cfg_aclk,
+ &dpu0_pixelclk,
+ &dpu1_pixelclk,
+ &dpu_hclk,
+ &dpu_aclk,
+ &dpu_cclk,
+ &hdmi_sfr_clk,
+ &hdmi_pclk,
+ &hdmi_cec_clk,
+ &mipi_dsi0_pclk,
+ &mipi_dsi1_pclk,
+ &mipi_dsi0_cfg_clk,
+ &mipi_dsi1_cfg_clk,
+ &mipi_dsi0_refclk,
+ &mipi_dsi1_refclk,
+ &hdmi_i2s_clk,
+ &x2h_dpu1_aclk,
+ &x2h_dpu_aclk,
+ &axi4_vo_pclk,
+ &iopmp_vosys_dpu_pclk,
+ &iopmp_vosys_dpu1_pclk,
+ &iopmp_vosys_gpu_pclk,
+ &iopmp_dpu1_aclk,
+ &iopmp_dpu_aclk,
+ &iopmp_gpu_aclk,
+ &mipi_dsi0_pixclk,
+ &mipi_dsi1_pixclk,
+ &hdmi_pixclk
};
static const struct regmap_config th1520_clk_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
struct th1520_plat_data {
struct ccu_common **th1520_pll_clks;
struct ccu_common **th1520_div_clks;
struct ccu_mux **th1520_mux_clks;
- struct ccu_common **th1520_gate_clks;
+ struct ccu_gate **th1520_gate_clks;
int nr_clks;
int nr_pll_clks;
@@ -1102,7 +1188,6 @@ static int th1520_clk_probe(struct platform_device *pdev)
struct regmap *map;
void __iomem *base;
- struct clk_hw *hw;
int ret, i;
plat_data = device_get_match_data(&pdev->dev);
@@ -1161,20 +1246,15 @@ static int th1520_clk_probe(struct platform_device *pdev)
}
for (i = 0; i < plat_data->nr_gate_clks; i++) {
- struct ccu_gate *cg = hw_to_ccu_gate(&plat_data->th1520_gate_clks[i]->hw);
+ struct ccu_gate *cg = plat_data->th1520_gate_clks[i];
- plat_data->th1520_gate_clks[i]->map = map;
+ cg->gate.reg = base + cg->reg;
- hw = devm_clk_hw_register_gate_parent_data(dev,
- cg->common.hw.init->name,
- cg->common.hw.init->parent_data,
- cg->common.hw.init->flags,
- base + cg->common.cfg0,
- ffs(cg->enable) - 1, 0, NULL);
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ ret = devm_clk_hw_register(dev, &cg->gate.hw);
+ if (ret)
+ return ret;
- priv->hws[cg->common.clkid] = hw;
+ priv->hws[cg->clkid] = &cg->gate.hw;
}
if (plat_data == &th1520_ap_platdata) {
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 85c50ea39e6d..9269e6a0db6a 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -258,6 +258,8 @@ static const char *enable_init_clks[] = {
"dpll_ddr_m2_ck",
"dpll_mpu_m2_ck",
"l3_gclk",
+ /* WKUP_DEBUGSS_CLKCTRL - disable fails, AM335x Errata Advisory 1.0.42 */
+ "l3-aon-clkctrl:0000:0",
/* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */
"l3-clkctrl:00bc:0",
"l4hs_gclk",
diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
index 0eab7f3e2eab..b02f84d49b96 100644
--- a/drivers/clk/ti/clk-dra7-atl.c
+++ b/drivers/clk/ti/clk-dra7-atl.c
@@ -120,16 +120,18 @@ static unsigned long atl_clk_recalc_rate(struct clk_hw *hw,
return parent_rate / cdesc->divider;
}
-static long atl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int atl_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned divider;
- divider = (*parent_rate + rate / 2) / rate;
+ divider = (req->best_parent_rate + req->rate / 2) / req->rate;
if (divider > DRA7_ATL_DIVIDER_MASK + 1)
divider = DRA7_ATL_DIVIDER_MASK + 1;
- return *parent_rate / divider;
+ req->rate = req->best_parent_rate / divider;
+
+ return 0;
}
static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -156,7 +158,7 @@ static const struct clk_ops atl_clk_ops = {
.disable = atl_clk_disable,
.is_enabled = atl_clk_is_enabled,
.recalc_rate = atl_clk_recalc_rate,
- .round_rate = atl_clk_round_rate,
+ .determine_rate = atl_clk_determine_rate,
.set_rate = atl_clk_set_rate,
};
diff --git a/drivers/clk/ti/clkt_dpll.c b/drivers/clk/ti/clkt_dpll.c
index dfaa4d1f0b64..2ecd66968af4 100644
--- a/drivers/clk/ti/clkt_dpll.c
+++ b/drivers/clk/ti/clkt_dpll.c
@@ -268,20 +268,18 @@ unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
/* DPLL rate rounding code */
/**
- * omap2_dpll_round_rate - round a target rate for an OMAP DPLL
+ * omap2_dpll_determine_rate - round a target rate for an OMAP DPLL
* @hw: struct clk_hw containing the struct clk * for a DPLL
- * @target_rate: desired DPLL clock rate
- * @parent_rate: parent's DPLL clock rate
+ * @req: rate request
*
* Given a DPLL and a desired target rate, round the target rate to a
* possible, programmable rate for this DPLL. Attempts to select the
* minimum possible n. Stores the computed (m, n) in the DPLL's
* dpll_data structure so set_rate() will not need to call this
- * (expensive) function again. Returns ~0 if the target rate cannot
- * be rounded, or the rounded rate upon success.
+ * (expensive) function again. Returns -EINVAL if the target rate
+ * cannot be rounded, or the rounded rate upon success.
*/
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate)
+int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
int m, n, r, scaled_max_m;
@@ -295,19 +293,19 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
const char *clk_name;
if (!clk || !clk->dpll_data)
- return ~0;
+ return -EINVAL;
dd = clk->dpll_data;
- if (dd->max_rate && target_rate > dd->max_rate)
- target_rate = dd->max_rate;
+ if (dd->max_rate && req->rate > dd->max_rate)
+ req->rate = dd->max_rate;
ref_rate = clk_hw_get_rate(dd->clk_ref);
clk_name = clk_hw_get_name(hw);
pr_debug("clock: %s: starting DPLL round_rate, target rate %lu\n",
- clk_name, target_rate);
+ clk_name, req->rate);
- scaled_rt_rp = target_rate / (ref_rate / DPLL_SCALE_FACTOR);
+ scaled_rt_rp = req->rate / (ref_rate / DPLL_SCALE_FACTOR);
scaled_max_m = dd->max_multiplier * DPLL_SCALE_FACTOR;
dd->last_rounded_rate = 0;
@@ -332,7 +330,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
if (m > scaled_max_m)
break;
- r = _dpll_test_mult(&m, n, &new_rate, target_rate,
+ r = _dpll_test_mult(&m, n, &new_rate, req->rate,
ref_rate);
/* m can't be set low enough for this n - try with a larger n */
@@ -340,7 +338,7 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
continue;
/* skip rates above our target rate */
- delta = target_rate - new_rate;
+ delta = req->rate - new_rate;
if (delta < 0)
continue;
@@ -359,13 +357,15 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
if (prev_min_delta == LONG_MAX) {
pr_debug("clock: %s: cannot round to rate %lu\n",
- clk_name, target_rate);
- return ~0;
+ clk_name, req->rate);
+ return -EINVAL;
}
dd->last_rounded_m = min_delta_m;
dd->last_rounded_n = min_delta_n;
- dd->last_rounded_rate = target_rate - prev_min_delta;
+ dd->last_rounded_rate = req->rate - prev_min_delta;
- return dd->last_rounded_rate;
+ req->rate = dd->last_rounded_rate;
+
+ return 0;
}
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 2de7acea1ea0..d5e24fe4ae3a 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -273,8 +273,7 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
u8 index);
int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
-long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
- unsigned long *parent_rate);
+int omap2_dpll_determine_rate(struct clk_hw *hw, struct clk_rate_request *req);
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
@@ -296,9 +295,6 @@ void omap3_clk_lock_dpll5(void);
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
unsigned long parent_rate);
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate);
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req);
int omap2_clk_for_each(int (*fn)(struct clk_hw_omap *hw));
diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c
index b85382c370f7..8cba259188d4 100644
--- a/drivers/clk/ti/composite.c
+++ b/drivers/clk/ti/composite.c
@@ -26,8 +26,8 @@ static unsigned long ti_composite_recalc_rate(struct clk_hw *hw,
return ti_clk_divider_ops.recalc_rate(hw, parent_rate);
}
-static long ti_composite_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ti_composite_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
return -EINVAL;
}
@@ -40,7 +40,7 @@ static int ti_composite_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ti_composite_divider_ops = {
.recalc_rate = &ti_composite_recalc_rate,
- .round_rate = &ti_composite_round_rate,
+ .determine_rate = &ti_composite_determine_rate,
.set_rate = &ti_composite_set_rate,
};
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ade99ab6cfa9..6f58a0f2e74a 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -223,13 +223,15 @@ static int ti_clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
return bestdiv;
}
-static long ti_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int ti_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
int div;
- div = ti_clk_divider_bestdiv(hw, rate, prate);
+ div = ti_clk_divider_bestdiv(hw, req->rate, &req->best_parent_rate);
- return DIV_ROUND_UP(*prate, div);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, div);
+
+ return 0;
}
static int ti_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -299,7 +301,7 @@ static void clk_divider_restore_context(struct clk_hw *hw)
const struct clk_ops ti_clk_divider_ops = {
.recalc_rate = ti_clk_divider_recalc_rate,
- .round_rate = ti_clk_divider_round_rate,
+ .determine_rate = ti_clk_divider_determine_rate,
.set_rate = ti_clk_divider_set_rate,
.save_context = clk_divider_save_context,
.restore_context = clk_divider_restore_context,
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index 3386bd1903df..971adafd9a8b 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -25,7 +25,6 @@ static const struct clk_ops dpll_m4xen_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
.recalc_rate = &omap4_dpll_regm4xen_recalc,
- .round_rate = &omap4_dpll_regm4xen_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -48,7 +47,6 @@ static const struct clk_ops dpll_ck_ops = {
.enable = &omap3_noncore_dpll_enable,
.disable = &omap3_noncore_dpll_disable,
.recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -61,7 +59,6 @@ static const struct clk_ops dpll_ck_ops = {
static const struct clk_ops dpll_no_gate_ck_ops = {
.recalc_rate = &omap3_dpll_recalc,
.get_parent = &omap2_init_dpll_parent,
- .round_rate = &omap2_dpll_round_rate,
.set_rate = &omap3_noncore_dpll_set_rate,
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
@@ -80,7 +77,7 @@ const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
static const struct clk_ops omap2_dpll_core_ck_ops = {
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap2_dpllcore_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .determine_rate = &omap2_dpll_determine_rate,
.set_rate = &omap2_reprogram_dpllcore,
};
#else
@@ -91,7 +88,7 @@ static const struct clk_ops omap2_dpll_core_ck_ops = {};
static const struct clk_ops omap3_dpll_core_ck_ops = {
.get_parent = &omap2_init_dpll_parent,
.recalc_rate = &omap3_dpll_recalc,
- .round_rate = &omap2_dpll_round_rate,
+ .determine_rate = &omap2_dpll_determine_rate,
};
static const struct clk_ops omap3_dpll_ck_ops = {
@@ -103,7 +100,6 @@ static const struct clk_ops omap3_dpll_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
static const struct clk_ops omap3_dpll5_ck_ops = {
@@ -115,7 +111,6 @@ static const struct clk_ops omap3_dpll5_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
static const struct clk_ops omap3_dpll_per_ck_ops = {
@@ -127,7 +122,6 @@ static const struct clk_ops omap3_dpll_per_ck_ops = {
.set_parent = &omap3_noncore_dpll_set_parent,
.set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
.determine_rate = &omap3_noncore_dpll_determine_rate,
- .round_rate = &omap2_dpll_round_rate,
};
#endif
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 00680486b1bd..8c51b988a04f 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -587,6 +587,7 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
{
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
struct dpll_data *dd;
+ int ret;
if (!req->rate)
return -EINVAL;
@@ -599,8 +600,10 @@ int omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = dd->clk_bypass;
} else {
- req->rate = omap2_dpll_round_rate(hw, req->rate,
- &req->best_parent_rate);
+ ret = omap2_dpll_determine_rate(hw, req);
+ if (ret != 0)
+ return ret;
+
req->best_parent_hw = dd->clk_ref;
}
diff --git a/drivers/clk/ti/dpll44xx.c b/drivers/clk/ti/dpll44xx.c
index 3fc2cab69a3f..08ed57f181b4 100644
--- a/drivers/clk/ti/dpll44xx.c
+++ b/drivers/clk/ti/dpll44xx.c
@@ -134,68 +134,13 @@ unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
}
/**
- * omap4_dpll_regm4xen_round_rate - round DPLL rate, considering REGM4XEN bit
- * @hw: struct hw_clk containing the struct clk * of the DPLL to round a rate for
- * @target_rate: the desired rate of the DPLL
- * @parent_rate: clock rate of the DPLL parent
- *
- * Compute the rate that would be programmed into the DPLL hardware
- * for @clk if set_rate() were to be provided with the rate
- * @target_rate. Takes the REGM4XEN bit into consideration, which is
- * needed for the OMAP4 ABE DPLL. Returns the rounded rate (before
- * M-dividers) upon success, -EINVAL if @clk is null or not a DPLL, or
- * ~0 if an error occurred in omap2_dpll_round_rate().
- */
-long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
- unsigned long target_rate,
- unsigned long *parent_rate)
-{
- struct clk_hw_omap *clk = to_clk_hw_omap(hw);
- struct dpll_data *dd;
- long r;
-
- if (!clk || !clk->dpll_data)
- return -EINVAL;
-
- dd = clk->dpll_data;
-
- dd->last_rounded_m4xen = 0;
-
- /*
- * First try to compute the DPLL configuration for
- * target rate without using the 4X multiplier.
- */
- r = omap2_dpll_round_rate(hw, target_rate, NULL);
- if (r != ~0)
- goto out;
-
- /*
- * If we did not find a valid DPLL configuration, try again, but
- * this time see if using the 4X multiplier can help. Enabling the
- * 4X multiplier is equivalent to dividing the target rate by 4.
- */
- r = omap2_dpll_round_rate(hw, target_rate / OMAP4430_REGM4XEN_MULT,
- NULL);
- if (r == ~0)
- return r;
-
- dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
- dd->last_rounded_m4xen = 1;
-
-out:
- omap4_dpll_lpmode_recalc(dd);
-
- return dd->last_rounded_rate;
-}
-
-/**
* omap4_dpll_regm4xen_determine_rate - determine rate for a DPLL
* @hw: pointer to the clock to determine rate for
* @req: target rate request
*
* Determines which DPLL mode to use for reaching a desired rate.
* Checks whether the DPLL shall be in bypass or locked mode, and if
- * locked, calculates the M,N values for the DPLL via round-rate.
+ * locked, calculates the M,N values for the DPLL.
* Returns 0 on success and a negative error value otherwise.
*/
int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
@@ -215,8 +160,36 @@ int omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
(dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
req->best_parent_hw = dd->clk_bypass;
} else {
- req->rate = omap4_dpll_regm4xen_round_rate(hw, req->rate,
- &req->best_parent_rate);
+ struct clk_rate_request tmp_req;
+ long r;
+
+ clk_hw_init_rate_request(hw, &tmp_req, req->rate);
+ dd->last_rounded_m4xen = 0;
+
+ /*
+ * First try to compute the DPLL configuration for
+ * target rate without using the 4X multiplier.
+ */
+
+ r = omap2_dpll_determine_rate(hw, &tmp_req);
+ if (r < 0) {
+ /*
+ * If we did not find a valid DPLL configuration, try again, but
+ * this time see if using the 4X multiplier can help. Enabling the
+ * 4X multiplier is equivalent to dividing the target rate by 4.
+ */
+ tmp_req.rate /= OMAP4430_REGM4XEN_MULT;
+ r = omap2_dpll_determine_rate(hw, &tmp_req);
+ if (r < 0)
+ return r;
+
+ dd->last_rounded_rate *= OMAP4430_REGM4XEN_MULT;
+ dd->last_rounded_m4xen = 1;
+ }
+
+ omap4_dpll_lpmode_recalc(dd);
+
+ req->rate = dd->last_rounded_rate;
req->best_parent_hw = dd->clk_ref;
}
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 2db3fc4a443e..4f28138d2d8a 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -214,24 +214,27 @@ static int ti_fapll_set_div_mult(unsigned long rate,
return 0;
}
-static long ti_fapll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ti_fapll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 pre_div_p, mult_n;
int error;
- if (!rate)
+ if (!req->rate)
return -EINVAL;
- error = ti_fapll_set_div_mult(rate, *parent_rate,
+ error = ti_fapll_set_div_mult(req->rate, req->best_parent_rate,
&pre_div_p, &mult_n);
- if (error)
- return error;
+ if (error) {
+ req->rate = error;
- rate = *parent_rate / pre_div_p;
- rate *= mult_n;
+ return 0;
+ }
- return rate;
+ req->rate = req->best_parent_rate / pre_div_p;
+ req->rate *= mult_n;
+
+ return 0;
}
static int ti_fapll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -268,7 +271,7 @@ static const struct clk_ops ti_fapll_ops = {
.is_enabled = ti_fapll_is_enabled,
.recalc_rate = ti_fapll_recalc_rate,
.get_parent = ti_fapll_get_parent,
- .round_rate = ti_fapll_round_rate,
+ .determine_rate = ti_fapll_determine_rate,
.set_rate = ti_fapll_set_rate,
};
@@ -399,14 +402,14 @@ static u32 ti_fapll_synth_set_frac_rate(struct fapll_synth *synth,
return post_div_m;
}
-static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ti_fapll_synth_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct fapll_synth *synth = to_synth(hw);
struct fapll_data *fd = synth->fd;
unsigned long r;
- if (ti_fapll_clock_is_bypass(fd) || !synth->div || !rate)
+ if (ti_fapll_clock_is_bypass(fd) || !synth->div || !req->rate)
return -EINVAL;
/* Only post divider m available with no fractional divider? */
@@ -414,23 +417,26 @@ static long ti_fapll_synth_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long frac_rate;
u32 synth_post_div_m;
- frac_rate = ti_fapll_synth_get_frac_rate(hw, *parent_rate);
- synth_post_div_m = DIV_ROUND_UP(frac_rate, rate);
+ frac_rate = ti_fapll_synth_get_frac_rate(hw,
+ req->best_parent_rate);
+ synth_post_div_m = DIV_ROUND_UP(frac_rate, req->rate);
r = DIV_ROUND_UP(frac_rate, synth_post_div_m);
goto out;
}
- r = *parent_rate * SYNTH_PHASE_K;
- if (rate > r)
+ r = req->best_parent_rate * SYNTH_PHASE_K;
+ if (req->rate > r)
goto out;
r = DIV_ROUND_UP_ULL(r, SYNTH_MAX_INT_DIV * SYNTH_MAX_DIV_M);
- if (rate < r)
+ if (req->rate < r)
goto out;
- r = rate;
+ r = req->rate;
out:
- return r;
+ req->rate = r;
+
+ return 0;
}
static int ti_fapll_synth_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -477,7 +483,7 @@ static const struct clk_ops ti_fapll_synt_ops = {
.disable = ti_fapll_synth_disable,
.is_enabled = ti_fapll_synth_is_enabled,
.recalc_rate = ti_fapll_synth_recalc_rate,
- .round_rate = ti_fapll_synth_round_rate,
+ .determine_rate = ti_fapll_synth_determine_rate,
.set_rate = ti_fapll_synth_set_rate,
};
diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c
index 5cbf24c94606..f775e18acd46 100644
--- a/drivers/clk/ux500/clk-prcmu.c
+++ b/drivers/clk/ux500/clk-prcmu.c
@@ -53,11 +53,13 @@ static unsigned long clk_prcmu_recalc_rate(struct clk_hw *hw,
return prcmu_clock_rate(clk->cg_sel);
}
-static long clk_prcmu_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_prcmu_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_prcmu *clk = to_clk_prcmu(hw);
- return prcmu_round_clock_rate(clk->cg_sel, rate);
+ req->rate = prcmu_round_clock_rate(clk->cg_sel, req->rate);
+
+ return 0;
}
static int clk_prcmu_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -157,7 +159,7 @@ static const struct clk_ops clk_prcmu_scalable_ops = {
.prepare = clk_prcmu_prepare,
.unprepare = clk_prcmu_unprepare,
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
@@ -169,7 +171,7 @@ static const struct clk_ops clk_prcmu_gate_ops = {
static const struct clk_ops clk_prcmu_scalable_rate_ops = {
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
@@ -187,7 +189,7 @@ static const struct clk_ops clk_prcmu_opp_volt_scalable_ops = {
.prepare = clk_prcmu_opp_volt_prepare,
.unprepare = clk_prcmu_opp_volt_unprepare,
.recalc_rate = clk_prcmu_recalc_rate,
- .round_rate = clk_prcmu_round_rate,
+ .determine_rate = clk_prcmu_determine_rate,
.set_rate = clk_prcmu_set_rate,
};
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index b69c3fbdfbce..86ca04ad9fab 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -234,39 +234,51 @@ static unsigned long icst_recalc_rate(struct clk_hw *hw,
return icst->rate;
}
-static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int icst_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_icst *icst = to_icst(hw);
struct icst_vco vco;
if (icst->ctype == ICST_INTEGRATOR_AP_CM ||
icst->ctype == ICST_INTEGRATOR_CP_CM_CORE) {
- if (rate <= 12000000)
- return 12000000;
- if (rate >= 160000000)
- return 160000000;
- /* Slam to closest megahertz */
- return DIV_ROUND_CLOSEST(rate, 1000000) * 1000000;
+ if (req->rate <= 12000000)
+ req->rate = 12000000;
+ else if (req->rate >= 160000000)
+ req->rate = 160000000;
+ else {
+ /* Slam to closest megahertz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 1000000) * 1000000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_CP_CM_MEM) {
- if (rate <= 6000000)
- return 6000000;
- if (rate >= 66000000)
- return 66000000;
- /* Slam to closest 0.5 megahertz */
- return DIV_ROUND_CLOSEST(rate, 500000) * 500000;
+ if (req->rate <= 6000000)
+ req->rate = 6000000;
+ else if (req->rate >= 66000000)
+ req->rate = 66000000;
+ else {
+ /* Slam to closest 0.5 megahertz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 500000) * 500000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_AP_SYS) {
/* Divides between 3 and 50 MHz in steps of 0.25 MHz */
- if (rate <= 3000000)
- return 3000000;
- if (rate >= 50000000)
- return 5000000;
- /* Slam to closest 0.25 MHz */
- return DIV_ROUND_CLOSEST(rate, 250000) * 250000;
+ if (req->rate <= 3000000)
+ req->rate = 3000000;
+ else if (req->rate >= 50000000)
+ req->rate = 5000000;
+ else {
+ /* Slam to closest 0.25 MHz */
+ req->rate = DIV_ROUND_CLOSEST(req->rate, 250000) * 250000;
+ }
+
+ return 0;
}
if (icst->ctype == ICST_INTEGRATOR_AP_PCI) {
@@ -274,14 +286,20 @@ static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
* If we're below or less than halfway from 25 to 33 MHz
* select 25 MHz
*/
- if (rate <= 25000000 || rate < 29000000)
- return 25000000;
- /* Else just return the default frequency */
- return 33000000;
+ if (req->rate <= 25000000 || req->rate < 29000000)
+ req->rate = 25000000;
+ else {
+ /* Else just return the default frequency */
+ req->rate = 33000000;
+ }
+
+ return 0;
}
- vco = icst_hz_to_vco(icst->params, rate);
- return icst_hz(icst->params, vco);
+ vco = icst_hz_to_vco(icst->params, req->rate);
+ req->rate = icst_hz(icst->params, vco);
+
+ return 0;
}
static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -329,7 +347,7 @@ static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops icst_ops = {
.recalc_rate = icst_recalc_rate,
- .round_rate = icst_round_rate,
+ .determine_rate = icst_determine_rate,
.set_rate = icst_set_rate,
};
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index c385ca2f4a74..9adbf5c33bd1 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -33,18 +33,18 @@ static unsigned long vexpress_osc_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long vexpress_osc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int vexpress_osc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vexpress_osc *osc = to_vexpress_osc(hw);
- if (osc->rate_min && rate < osc->rate_min)
- rate = osc->rate_min;
+ if (osc->rate_min && req->rate < osc->rate_min)
+ req->rate = osc->rate_min;
- if (osc->rate_max && rate > osc->rate_max)
- rate = osc->rate_max;
+ if (osc->rate_max && req->rate > osc->rate_max)
+ req->rate = osc->rate_max;
- return rate;
+ return 0;
}
static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -57,7 +57,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops vexpress_osc_ops = {
.recalc_rate = vexpress_osc_recalc_rate,
- .round_rate = vexpress_osc_round_rate,
+ .determine_rate = vexpress_osc_determine_rate,
.set_rate = vexpress_osc_set_rate,
};
diff --git a/drivers/clk/visconti/clkc-tmpv770x.c b/drivers/clk/visconti/clkc-tmpv770x.c
index 6c753b2cb558..1e2e8d6437fe 100644
--- a/drivers/clk/visconti/clkc-tmpv770x.c
+++ b/drivers/clk/visconti/clkc-tmpv770x.c
@@ -17,6 +17,10 @@
#include "clkc.h"
#include "reset.h"
+/* Must be equal to the last clock/reset ID increased by one */
+#define CLKS_NR (TMPV770X_CLK_VIIFBS1_PROC + 1)
+#define RESETS_NR (TMPV770X_RESET_VIIFBS1_L1ISP + 1)
+
static DEFINE_SPINLOCK(tmpv770x_clk_lock);
static DEFINE_SPINLOCK(tmpv770x_rst_lock);
@@ -28,6 +32,10 @@ static const struct clk_parent_data pietherplls_parent_data[] = {
{ .fw_name = "pietherpll", .name = "pietherpll", },
};
+static const struct clk_parent_data pidnnplls_parent_data[] = {
+ { .fw_name = "pidnnpll", .name = "pidnnpll", },
+};
+
static const struct visconti_fixed_clk fixed_clk_tables[] = {
/* PLL1 */
/* PICMPT0/1, PITSC, PIUWDT, PISWDT, PISBUS, PIPMU, PIGPMU, PITMU */
@@ -64,6 +72,41 @@ static const struct visconti_clk_gate_table pietherpll_clk_gate_tables[] = {
TMPV770X_RESET_PIETHER_125M, },
};
+static const struct visconti_clk_gate_table pidnnpll_clk_gate_tables[] = {
+ { TMPV770X_CLK_VIIFBS0, "viifbs0",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 1, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS0_PROC, "viifbs0_proc",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 18, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS0_L1ISP, "viifbs0_l1isp",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 17, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS0_L2ISP, "viifbs0_l2isp",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 16, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS1, "viifbs1",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 5, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS1_PROC, "viifbs1_proc",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 22, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS1_L1ISP, "viifbs1_l1isp",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 21, 1,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIFBS1_L2ISP, "viifbs1_l2isp",
+ pidnnplls_parent_data, ARRAY_SIZE(pidnnplls_parent_data),
+ 0, 0x58, 0x158, 20, 1,
+ NO_RESET, },
+};
+
static const struct visconti_clk_gate_table clk_gate_tables[] = {
{ TMPV770X_CLK_HOX, "hox",
clks_parent_data, ARRAY_SIZE(clks_parent_data),
@@ -185,6 +228,22 @@ static const struct visconti_clk_gate_table clk_gate_tables[] = {
clks_parent_data, ARRAY_SIZE(clks_parent_data),
0, 0x14, 0x114, 0, 4,
TMPV770X_RESET_SBUSCLK, },
+ { TMPV770X_CLK_VIIF0_CFGCLK, "csi2rx0cfg",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x58, 0x158, 0, 24,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIF0_APBCLK, "csi2rx0apb",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x58, 0x158, 2, 4,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIF1_CFGCLK, "csi2rx1cfg",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x58, 0x158, 4, 24,
+ NO_RESET, },
+ { TMPV770X_CLK_VIIF1_APBCLK, "csi2rx1apb",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x58, 0x158, 6, 4,
+ NO_RESET, },
};
static const struct visconti_reset_data clk_reset_data[] = {
@@ -220,6 +279,14 @@ static const struct visconti_reset_data clk_reset_data[] = {
[TMPV770X_RESET_PIPCMIF] = { 0x464, 0x564, 0, },
[TMPV770X_RESET_PICKMON] = { 0x410, 0x510, 8, },
[TMPV770X_RESET_SBUSCLK] = { 0x414, 0x514, 0, },
+ [TMPV770X_RESET_VIIFBS0] = { 0x458, 0x558, 0, },
+ [TMPV770X_RESET_VIIFBS0_APB] = { 0x458, 0x558, 1, },
+ [TMPV770X_RESET_VIIFBS0_L2ISP] = { 0x458, 0x558, 16, },
+ [TMPV770X_RESET_VIIFBS0_L1ISP] = { 0x458, 0x558, 17, },
+ [TMPV770X_RESET_VIIFBS1] = { 0x458, 0x558, 4, },
+ [TMPV770X_RESET_VIIFBS1_APB] = { 0x458, 0x558, 5, },
+ [TMPV770X_RESET_VIIFBS1_L2ISP] = { 0x458, 0x558, 20, },
+ [TMPV770X_RESET_VIIFBS1_L1ISP] = { 0x458, 0x558, 21, },
};
static int visconti_clk_probe(struct platform_device *pdev)
@@ -234,12 +301,12 @@ static int visconti_clk_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- ctx = visconti_init_clk(dev, regmap, TMPV770X_NR_CLK);
+ ctx = visconti_init_clk(dev, regmap, CLKS_NR);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
ret = visconti_register_reset_controller(dev, regmap, clk_reset_data,
- TMPV770X_NR_RESET,
+ RESETS_NR,
&visconti_reset_ops,
&tmpv770x_rst_lock);
if (ret) {
@@ -272,6 +339,14 @@ static int visconti_clk_probe(struct platform_device *pdev)
return ret;
}
+ ret = visconti_clk_register_gates(ctx, pidnnpll_clk_gate_tables,
+ ARRAY_SIZE(pidnnpll_clk_gate_tables),
+ clk_reset_data, &tmpv770x_clk_lock);
+ if (ret) {
+ dev_err(dev, "Failed to register pidnnpll clock gate: %d\n", ret);
+ return ret;
+ }
+
return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &ctx->clk_data);
}
diff --git a/drivers/clk/visconti/pll-tmpv770x.c b/drivers/clk/visconti/pll-tmpv770x.c
index 8360ccf88867..a2208c5fc12e 100644
--- a/drivers/clk/visconti/pll-tmpv770x.c
+++ b/drivers/clk/visconti/pll-tmpv770x.c
@@ -16,6 +16,9 @@
#include "pll.h"
+/* Must be equal to the last pll ID increased by one */
+#define PLLS_NR (TMPV770X_PLL_PIIMGERPLL + 1)
+
static DEFINE_SPINLOCK(tmpv770x_pll_lock);
static const struct visconti_pll_rate_table pipll0_rates[] __initconst = {
@@ -66,7 +69,7 @@ static void __init tmpv770x_setup_plls(struct device_node *np)
if (!reg_base)
return;
- ctx = visconti_init_pll(np, reg_base, TMPV770X_NR_PLL);
+ ctx = visconti_init_pll(np, reg_base, PLLS_NR);
if (IS_ERR(ctx)) {
iounmap(reg_base);
return;
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
index 8ca1bad61864..681721d85032 100644
--- a/drivers/clk/visconti/pll.c
+++ b/drivers/clk/visconti/pll.c
@@ -100,8 +100,8 @@ static unsigned long visconti_get_pll_rate_from_data(struct visconti_pll *pll,
return rate_table[0].rate;
}
-static long visconti_pll_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *prate)
+static int visconti_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct visconti_pll *pll = to_visconti_pll(hw);
const struct visconti_pll_rate_table *rate_table = pll->rate_table;
@@ -109,11 +109,16 @@ static long visconti_pll_round_rate(struct clk_hw *hw,
/* Assuming rate_table is in descending order */
for (i = 0; i < pll->rate_count; i++)
- if (rate >= rate_table[i].rate)
- return rate_table[i].rate;
+ if (req->rate >= rate_table[i].rate) {
+ req->rate = rate_table[i].rate;
+
+ return 0;
+ }
/* return minimum supported value */
- return rate_table[i - 1].rate;
+ req->rate = rate_table[i - 1].rate;
+
+ return 0;
}
static unsigned long visconti_pll_recalc_rate(struct clk_hw *hw,
@@ -232,7 +237,7 @@ static const struct clk_ops visconti_pll_ops = {
.enable = visconti_pll_enable,
.disable = visconti_pll_disable,
.is_enabled = visconti_pll_is_enabled,
- .round_rate = visconti_pll_round_rate,
+ .determine_rate = visconti_pll_determine_rate,
.recalc_rate = visconti_pll_recalc_rate,
.set_rate = visconti_pll_set_rate,
};
diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c
index 89b53f280aee..d099667355f8 100644
--- a/drivers/clk/x86/clk-cgu.c
+++ b/drivers/clk/x86/clk-cgu.c
@@ -132,14 +132,15 @@ lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
divider->flags, divider->width);
}
-static long
-lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lgm_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
- return divider_round_rate(hw, rate, prate, divider->table,
- divider->width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate, divider->table,
+ divider->width, divider->flags);
+
+ return 0;
}
static int
@@ -182,7 +183,7 @@ static void lgm_clk_divider_disable(struct clk_hw *hw)
static const struct clk_ops lgm_clk_divider_ops = {
.recalc_rate = lgm_clk_divider_recalc_rate,
- .round_rate = lgm_clk_divider_round_rate,
+ .determine_rate = lgm_clk_divider_determine_rate,
.set_rate = lgm_clk_divider_set_rate,
.enable = lgm_clk_divider_enable,
.disable = lgm_clk_divider_disable,
@@ -487,15 +488,14 @@ lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long
-lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int lgm_clk_ddiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
u32 div, ddiv1, ddiv2;
u64 rate64;
- div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
+ div = DIV_ROUND_CLOSEST_ULL((u64)req->best_parent_rate, req->rate);
/* if predivide bit is enabled, modify div by factor of 2.5 */
if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
@@ -503,14 +503,17 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
}
- if (div <= 0)
- return *prate;
+ if (div <= 0) {
+ req->rate = req->best_parent_rate;
+
+ return 0;
+ }
if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
return -EINVAL;
- rate64 = *prate;
+ rate64 = req->best_parent_rate;
do_div(rate64, ddiv1);
do_div(rate64, ddiv2);
@@ -520,7 +523,9 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
}
- return rate64;
+ req->rate = rate64;
+
+ return 0;
}
static const struct clk_ops lgm_clk_ddiv_ops = {
@@ -528,7 +533,7 @@ static const struct clk_ops lgm_clk_ddiv_ops = {
.enable = lgm_clk_ddiv_enable,
.disable = lgm_clk_ddiv_disable,
.set_rate = lgm_clk_ddiv_set_rate,
- .round_rate = lgm_clk_ddiv_round_rate,
+ .determine_rate = lgm_clk_ddiv_determine_rate,
};
int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
diff --git a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
index 0295a13a811c..4a0136349f71 100644
--- a/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
+++ b/drivers/clk/xilinx/clk-xlnx-clock-wizard.c
@@ -322,8 +322,8 @@ err_reconfig:
return err;
}
-static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u8 div;
@@ -331,16 +331,18 @@ static long clk_wzrd_round_rate(struct clk_hw *hw, unsigned long rate,
* since we don't change parent rate we just round rate to closest
* achievable
*/
- div = DIV_ROUND_CLOSEST(*prate, rate);
+ div = DIV_ROUND_CLOSEST(req->best_parent_rate, req->rate);
- return *prate / div;
+ req->rate = req->best_parent_rate / div;
+
+ return 0;
}
static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
- u64 vco_freq, freq, diff, vcomin, vcomax;
+ u64 vco_freq, freq, diff, vcomin, vcomax, best_diff = -1ULL;
u32 m, d, o;
u32 mmin, mmax, dmin, dmax, omin, omax;
@@ -356,22 +358,26 @@ static int clk_wzrd_get_divisors_ver(struct clk_hw *hw, unsigned long rate,
for (m = mmin; m <= mmax; m++) {
for (d = dmin; d <= dmax; d++) {
vco_freq = DIV_ROUND_CLOSEST((parent_rate * m), d);
- if (vco_freq >= vcomin && vco_freq <= vcomax) {
- for (o = omin; o <= omax; o++) {
- freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
- diff = abs(freq - rate);
-
- if (diff < WZRD_MIN_ERR) {
- divider->m = m;
- divider->d = d;
- divider->o = o;
- return 0;
- }
- }
+ if (vco_freq < vcomin || vco_freq > vcomax)
+ continue;
+
+ o = DIV_ROUND_CLOSEST_ULL(vco_freq, rate);
+ if (o < omin || o > omax)
+ continue;
+ freq = DIV_ROUND_CLOSEST_ULL(vco_freq, o);
+ diff = abs(freq - rate);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ divider->m = m;
+ divider->d = d;
+ divider->o = o;
+ if (!diff)
+ return 0;
}
}
}
- return -EBUSY;
+ return 0;
}
static int clk_wzrd_get_divisors(struct clk_hw *hw, unsigned long rate,
@@ -642,14 +648,14 @@ static unsigned long clk_wzrd_recalc_rate_all_ver(struct clk_hw *hw,
divider->flags, divider->width);
}
-static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate_all(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
u32 m, d, o;
int err;
- err = clk_wzrd_get_divisors(hw, rate, *prate);
+ err = clk_wzrd_get_divisors(hw, req->rate, req->best_parent_rate);
if (err)
return err;
@@ -657,19 +663,20 @@ static long clk_wzrd_round_rate_all(struct clk_hw *hw, unsigned long rate,
d = divider->d;
o = divider->o;
- rate = div_u64(*prate * (m * 1000 + divider->m_frac), d * (o * 1000 + divider->o_frac));
- return rate;
+ req->rate = div_u64(req->best_parent_rate * (m * 1000 + divider->m_frac),
+ d * (o * 1000 + divider->o_frac));
+ return 0;
}
-static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_ver_determine_rate_all(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_wzrd_divider *divider = to_clk_wzrd_divider(hw);
unsigned long int_freq;
u32 m, d, o, div, f;
int err;
- err = clk_wzrd_get_divisors_ver(hw, rate, *prate);
+ err = clk_wzrd_get_divisors_ver(hw, req->rate, req->best_parent_rate);
if (err)
return err;
@@ -678,36 +685,38 @@ static long clk_wzrd_ver_round_rate_all(struct clk_hw *hw, unsigned long rate,
o = divider->o;
div = d * o;
- int_freq = divider_recalc_rate(hw, *prate * m, div, divider->table,
+ int_freq = divider_recalc_rate(hw, req->best_parent_rate * m, div,
+ divider->table,
divider->flags, divider->width);
- if (rate > int_freq) {
- f = DIV_ROUND_CLOSEST_ULL(rate * WZRD_FRAC_POINTS, int_freq);
- rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
+ if (req->rate > int_freq) {
+ f = DIV_ROUND_CLOSEST_ULL(req->rate * WZRD_FRAC_POINTS,
+ int_freq);
+ req->rate = DIV_ROUND_CLOSEST(int_freq * f, WZRD_FRAC_POINTS);
}
- return rate;
+ return 0;
}
static const struct clk_ops clk_wzrd_ver_divider_ops = {
- .round_rate = clk_wzrd_round_rate,
+ .determine_rate = clk_wzrd_determine_rate,
.set_rate = clk_wzrd_ver_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate_ver,
};
static const struct clk_ops clk_wzrd_ver_div_all_ops = {
- .round_rate = clk_wzrd_ver_round_rate_all,
+ .determine_rate = clk_wzrd_ver_determine_rate_all,
.set_rate = clk_wzrd_dynamic_all_ver,
.recalc_rate = clk_wzrd_recalc_rate_all_ver,
};
static const struct clk_ops clk_wzrd_clk_divider_ops = {
- .round_rate = clk_wzrd_round_rate,
+ .determine_rate = clk_wzrd_determine_rate,
.set_rate = clk_wzrd_dynamic_reconfig,
.recalc_rate = clk_wzrd_recalc_rate,
};
static const struct clk_ops clk_wzrd_clk_div_all_ops = {
- .round_rate = clk_wzrd_round_rate_all,
+ .determine_rate = clk_wzrd_determine_rate_all,
.set_rate = clk_wzrd_dynamic_all,
.recalc_rate = clk_wzrd_recalc_rate_all,
};
@@ -769,14 +778,14 @@ static int clk_wzrd_dynamic_reconfig_f(struct clk_hw *hw, unsigned long rate,
WZRD_USEC_POLL, WZRD_TIMEOUT_POLL);
}
-static long clk_wzrd_round_rate_f(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_wzrd_determine_rate_f(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- return rate;
+ return 0;
}
static const struct clk_ops clk_wzrd_clk_divider_ops_f = {
- .round_rate = clk_wzrd_round_rate_f,
+ .determine_rate = clk_wzrd_determine_rate_f,
.set_rate = clk_wzrd_dynamic_reconfig_f,
.recalc_rate = clk_wzrd_recalc_ratef,
};
@@ -1108,7 +1117,7 @@ static int clk_wzrd_register_output_clocks(struct device *dev, int nr_outputs)
(dev,
clkout_name, clk_name, 0,
clk_wzrd->base,
- (WZRD_CLK_CFG_REG(is_versal, 3) + i * 8),
+ (WZRD_CLK_CFG_REG(is_versal, 2) + i * 8),
WZRD_CLKOUT_DIVIDE_SHIFT,
WZRD_CLKOUT_DIVIDE_WIDTH,
CLK_DIVIDER_ONE_BASED |
diff --git a/drivers/clk/xilinx/xlnx_vcu.c b/drivers/clk/xilinx/xlnx_vcu.c
index 1ded67bee06c..02699bc0f82c 100644
--- a/drivers/clk/xilinx/xlnx_vcu.c
+++ b/drivers/clk/xilinx/xlnx_vcu.c
@@ -311,18 +311,21 @@ static int xvcu_pll_set_div(struct vcu_pll *pll, int div)
return 0;
}
-static long xvcu_pll_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int xvcu_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct vcu_pll *pll = to_vcu_pll(hw);
unsigned int feedback_div;
- rate = clamp_t(unsigned long, rate, pll->fvco_min, pll->fvco_max);
+ req->rate = clamp_t(unsigned long, req->rate, pll->fvco_min,
+ pll->fvco_max);
- feedback_div = DIV_ROUND_CLOSEST_ULL(rate, *parent_rate);
+ feedback_div = DIV_ROUND_CLOSEST_ULL(req->rate, req->best_parent_rate);
feedback_div = clamp_t(unsigned int, feedback_div, 25, 125);
- return *parent_rate * feedback_div;
+ req->rate = req->best_parent_rate * feedback_div;
+
+ return 0;
}
static unsigned long xvcu_pll_recalc_rate(struct clk_hw *hw,
@@ -394,7 +397,7 @@ static void xvcu_pll_disable(struct clk_hw *hw)
static const struct clk_ops vcu_pll_ops = {
.enable = xvcu_pll_enable,
.disable = xvcu_pll_disable,
- .round_rate = xvcu_pll_round_rate,
+ .determine_rate = xvcu_pll_determine_rate,
.recalc_rate = xvcu_pll_recalc_rate,
.set_rate = xvcu_pll_set_rate,
};
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index e5f8fb704df2..5eca1c14981a 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -48,18 +48,20 @@ struct zynq_pll {
* @prate: Clock frequency of parent clock
* Return: frequency closest to @rate the hardware can generate.
*/
-static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int zynq_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv;
- fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate);
if (fbdiv < PLL_FBDIV_MIN)
fbdiv = PLL_FBDIV_MIN;
else if (fbdiv > PLL_FBDIV_MAX)
fbdiv = PLL_FBDIV_MAX;
- return *prate * fbdiv;
+ req->rate = req->best_parent_rate * fbdiv;
+
+ return 0;
}
/**
@@ -167,7 +169,7 @@ static const struct clk_ops zynq_pll_ops = {
.enable = zynq_pll_enable,
.disable = zynq_pll_disable,
.is_enabled = zynq_pll_is_enabled,
- .round_rate = zynq_pll_round_rate,
+ .determine_rate = zynq_pll_determine_rate,
.recalc_rate = zynq_pll_recalc_rate
};
diff --git a/drivers/clk/zynqmp/divider.c b/drivers/clk/zynqmp/divider.c
index 5a00487ae408..c824eeacd8eb 100644
--- a/drivers/clk/zynqmp/divider.c
+++ b/drivers/clk/zynqmp/divider.c
@@ -118,9 +118,8 @@ static unsigned long zynqmp_clk_divider_recalc_rate(struct clk_hw *hw,
*
* Return: 0 on success else error+reason
*/
-static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int zynqmp_clk_divider_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct zynqmp_clk_divider *divider = to_zynqmp_clk_divider(hw);
const char *clk_name = clk_hw_get_name(hw);
@@ -145,17 +144,21 @@ static long zynqmp_clk_divider_round_rate(struct clk_hw *hw,
if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
bestdiv = 1 << bestdiv;
- return DIV_ROUND_UP_ULL((u64)*prate, bestdiv);
+ req->rate = DIV_ROUND_UP_ULL((u64)req->best_parent_rate, bestdiv);
+
+ return 0;
}
width = fls(divider->max_div);
- rate = divider_round_rate(hw, rate, prate, NULL, width, divider->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL, width, divider->flags);
- if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) && (rate % *prate))
- *prate = rate;
+ if (divider->is_frac && (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) &&
+ (req->rate % req->best_parent_rate))
+ req->best_parent_rate = req->rate;
- return rate;
+ return 0;
}
/**
@@ -199,13 +202,13 @@ static int zynqmp_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops zynqmp_clk_divider_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
- .round_rate = zynqmp_clk_divider_round_rate,
+ .determine_rate = zynqmp_clk_divider_determine_rate,
.set_rate = zynqmp_clk_divider_set_rate,
};
static const struct clk_ops zynqmp_clk_divider_ro_ops = {
.recalc_rate = zynqmp_clk_divider_recalc_rate,
- .round_rate = zynqmp_clk_divider_round_rate,
+ .determine_rate = zynqmp_clk_divider_determine_rate,
};
/**
diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
index 7411a7fd50ac..630a3936c97c 100644
--- a/drivers/clk/zynqmp/pll.c
+++ b/drivers/clk/zynqmp/pll.c
@@ -98,29 +98,29 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
*
* Return: Frequency closest to @rate the hardware can generate
*/
-static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int zynqmp_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
u32 fbdiv;
u32 mult, div;
/* Let rate fall inside the range PS_PLL_VCO_MIN ~ PS_PLL_VCO_MAX */
- if (rate > PS_PLL_VCO_MAX) {
- div = DIV_ROUND_UP(rate, PS_PLL_VCO_MAX);
- rate = rate / div;
+ if (req->rate > PS_PLL_VCO_MAX) {
+ div = DIV_ROUND_UP(req->rate, PS_PLL_VCO_MAX);
+ req->rate = req->rate / div;
}
- if (rate < PS_PLL_VCO_MIN) {
- mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, rate);
- rate = rate * mult;
+ if (req->rate < PS_PLL_VCO_MIN) {
+ mult = DIV_ROUND_UP(PS_PLL_VCO_MIN, req->rate);
+ req->rate = req->rate * mult;
}
- fbdiv = DIV_ROUND_CLOSEST(rate, *prate);
+ fbdiv = DIV_ROUND_CLOSEST(req->rate, req->best_parent_rate);
if (fbdiv < PLL_FBDIV_MIN || fbdiv > PLL_FBDIV_MAX) {
fbdiv = clamp_t(u32, fbdiv, PLL_FBDIV_MIN, PLL_FBDIV_MAX);
- rate = *prate * fbdiv;
+ req->rate = req->best_parent_rate * fbdiv;
}
- return rate;
+ return 0;
}
/**
@@ -294,7 +294,7 @@ static const struct clk_ops zynqmp_pll_ops = {
.enable = zynqmp_pll_enable,
.disable = zynqmp_pll_disable,
.is_enabled = zynqmp_pll_is_enabled,
- .round_rate = zynqmp_pll_round_rate,
+ .determine_rate = zynqmp_pll_determine_rate,
.recalc_rate = zynqmp_pll_recalc_rate,
.set_rate = zynqmp_pll_set_rate,
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 645f517a1ac2..aa59e5b13351 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -395,8 +395,7 @@ config ARM_GLOBAL_TIMER
config ARM_GT_INITIAL_PRESCALER_VAL
int "ARM global timer initial prescaler value"
- default 2 if ARCH_ZYNQ
- default 1
+ default 0
depends on ARM_GLOBAL_TIMER
help
When the ARM global timer initializes, its current rate is declared
@@ -406,6 +405,7 @@ config ARM_GT_INITIAL_PRESCALER_VAL
bounds about how much the parent clock is allowed to decrease or
increase wrt the initial clock value.
This affects CPU_FREQ max delta from the initial frequency.
+ Use 0 to use auto-detection in the driver.
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module"
@@ -474,11 +474,14 @@ config FSL_FTM_TIMER
help
Support for Freescale FlexTimer Module (FTM) timer.
-config VF_PIT_TIMER
- bool
+config NXP_PIT_TIMER
+ bool "NXP Periodic Interrupt Timer" if COMPILE_TEST
select CLKSRC_MMIO
help
- Support for Periodic Interrupt Timer on Freescale Vybrid Family SoCs.
+ Support for Periodic Interrupt Timer on Freescale / NXP
+ SoCs. This periodic timer is found on the Vybrid Family and
+ the Automotive S32G2/3 platforms. It contains 4 channels
+ where two can be coupled to form a 64 bits channel.
config SYS_SUPPORTS_SH_CMT
bool
@@ -779,4 +782,15 @@ config NXP_STM_TIMER
Enables the support for NXP System Timer Module found in the
s32g NXP platform series.
+config RTK_SYSTIMER
+ bool "Realtek SYSTIMER support"
+ depends on ARM || ARM64
+ depends on ARCH_REALTEK || COMPILE_TEST
+ select TIMER_OF
+ help
+ This option enables the driver that registers the global 1 MHz hardware
+ counter as a clock event device on Realtek SoCs. Make sure to enable
+ this option only when building for a Realtek platform or for compilation
+ testing.
+
endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 205bf3b0a8f3..b46376af6b49 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
-obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
+obj-$(CONFIG_NXP_PIT_TIMER) += timer-nxp-pit.o
obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
obj-$(CONFIG_MTK_CPUX_TIMER) += timer-mediatek-cpux.o
@@ -64,6 +64,7 @@ obj-$(CONFIG_REALTEK_OTTO_TIMER) += timer-rtl-otto.o
obj-$(CONFIG_ARC_TIMERS) += arc_timer.o
obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
+obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer_mmio.o
obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
@@ -94,3 +95,4 @@ obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o
obj-$(CONFIG_EP93XX_TIMER) += timer-ep93xx.o
obj-$(CONFIG_RALINK_TIMER) += timer-ralink.o
obj-$(CONFIG_NXP_STM_TIMER) += timer-nxp-stm.o
+obj-$(CONFIG_RTK_SYSTIMER) += timer-realtek.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 80ba6a54248c..90aeff44a276 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -34,42 +34,12 @@
#include <clocksource/arm_arch_timer.h>
-#define CNTTIDR 0x08
-#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
-
-#define CNTACR(n) (0x40 + ((n) * 4))
-#define CNTACR_RPCT BIT(0)
-#define CNTACR_RVCT BIT(1)
-#define CNTACR_RFRQ BIT(2)
-#define CNTACR_RVOFF BIT(3)
-#define CNTACR_RWVT BIT(4)
-#define CNTACR_RWPT BIT(5)
-
-#define CNTPCT_LO 0x00
-#define CNTVCT_LO 0x08
-#define CNTFRQ 0x10
-#define CNTP_CVAL_LO 0x20
-#define CNTP_CTL 0x2c
-#define CNTV_CVAL_LO 0x30
-#define CNTV_CTL 0x3c
-
/*
* The minimum amount of time a generic counter is guaranteed to not roll over
* (40 years)
*/
#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
-static unsigned arch_timers_present __initdata;
-
-struct arch_timer {
- void __iomem *base;
- struct clock_event_device evt;
-};
-
-static struct arch_timer *arch_timer_mem __ro_after_init;
-
-#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
-
static u32 arch_timer_rate __ro_after_init;
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
@@ -85,7 +55,6 @@ static struct clock_event_device __percpu *arch_timer_evt;
static enum arch_timer_ppi_nr arch_timer_uses_ppi __ro_after_init = ARCH_TIMER_VIRT_PPI;
static bool arch_timer_c3stop __ro_after_init;
-static bool arch_timer_mem_use_virtual __ro_after_init;
static bool arch_counter_suspend_stop __ro_after_init;
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
static enum vdso_clock_mode vdso_default = VDSO_CLOCKMODE_ARCHTIMER;
@@ -121,76 +90,6 @@ static int arch_counter_get_width(void)
/*
* Architected system timer support.
*/
-
-static __always_inline
-void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
- struct clock_event_device *clk)
-{
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTP_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /*
- * Not guaranteed to be atomic, so the timer
- * must be disabled at this point.
- */
- writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- writel_relaxed((u32)val, timer->base + CNTV_CTL);
- break;
- case ARCH_TIMER_REG_CVAL:
- /* Same restriction as above */
- writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- arch_timer_reg_write_cp15(access, reg, val);
- }
-}
-
-static __always_inline
-u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
- struct clock_event_device *clk)
-{
- u32 val;
-
- if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTP_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
- struct arch_timer *timer = to_arch_timer(clk);
- switch (reg) {
- case ARCH_TIMER_REG_CTRL:
- val = readl_relaxed(timer->base + CNTV_CTL);
- break;
- default:
- BUILD_BUG();
- }
- } else {
- val = arch_timer_reg_read_cp15(access, reg);
- }
-
- return val;
-}
-
static noinstr u64 raw_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
@@ -424,7 +323,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
unsigned long ctrl;
u64 cval;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -436,7 +335,7 @@ void erratum_set_next_event_generic(const int access, unsigned long evt,
write_sysreg(cval, cntv_cval_el0);
}
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
@@ -667,10 +566,10 @@ static __always_inline irqreturn_t timer_handler(const int access,
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -692,28 +591,14 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
-static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
-}
-
-static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
-}
-
static __always_inline int arch_timer_shutdown(const int access,
struct clock_event_device *clk)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
return 0;
}
@@ -728,23 +613,13 @@ static int arch_timer_shutdown_phys(struct clock_event_device *clk)
return arch_timer_shutdown(ARCH_TIMER_PHYS_ACCESS, clk);
}
-static int arch_timer_shutdown_virt_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_VIRT_ACCESS, clk);
-}
-
-static int arch_timer_shutdown_phys_mem(struct clock_event_device *clk)
-{
- return arch_timer_shutdown(ARCH_TIMER_MEM_PHYS_ACCESS, clk);
-}
-
static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
u64 cnt;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl = arch_timer_reg_read_cp15(access, ARCH_TIMER_REG_CTRL);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
@@ -753,8 +628,8 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
else
cnt = __arch_counter_get_cntvct();
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_reg_write_cp15(access, ARCH_TIMER_REG_CTRL, ctrl);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
@@ -771,60 +646,6 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
-static noinstr u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
-{
- u32 cnt_lo, cnt_hi, tmp_hi;
-
- do {
- cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
- tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
- } while (cnt_hi != tmp_hi);
-
- return ((u64) cnt_hi << 32) | cnt_lo;
-}
-
-static __always_inline void set_next_event_mem(const int access, unsigned long evt,
- struct clock_event_device *clk)
-{
- struct arch_timer *timer = to_arch_timer(clk);
- unsigned long ctrl;
- u64 cnt;
-
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
-
- /* Timer must be disabled before programming CVAL */
- if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
- ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
- }
-
- ctrl |= ARCH_TIMER_CTRL_ENABLE;
- ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
-
- if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
- cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
- else
- cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
-
- arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
-}
-
-static int arch_timer_set_next_event_virt_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
- return 0;
-}
-
-static int arch_timer_set_next_event_phys_mem(unsigned long evt,
- struct clock_event_device *clk)
-{
- set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
- return 0;
-}
-
static u64 __arch_timer_check_delta(void)
{
#ifdef CONFIG_ARM64
@@ -850,63 +671,41 @@ static u64 __arch_timer_check_delta(void)
return CLOCKSOURCE_MASK(arch_counter_get_width());
}
-static void __arch_timer_setup(unsigned type,
- struct clock_event_device *clk)
+static void __arch_timer_setup(struct clock_event_device *clk)
{
+ typeof(clk->set_next_event) sne;
u64 max_delta;
clk->features = CLOCK_EVT_FEAT_ONESHOT;
- if (type == ARCH_TIMER_TYPE_CP15) {
- typeof(clk->set_next_event) sne;
-
- arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
-
- if (arch_timer_c3stop)
- clk->features |= CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 450;
- clk->cpumask = cpumask_of(smp_processor_id());
- clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
- switch (arch_timer_uses_ppi) {
- case ARCH_TIMER_VIRT_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_virt;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
- sne = erratum_handler(set_next_event_virt);
- break;
- case ARCH_TIMER_PHYS_SECURE_PPI:
- case ARCH_TIMER_PHYS_NONSECURE_PPI:
- case ARCH_TIMER_HYP_PPI:
- clk->set_state_shutdown = arch_timer_shutdown_phys;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
- sne = erratum_handler(set_next_event_phys);
- break;
- default:
- BUG();
- }
+ arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
- clk->set_next_event = sne;
- max_delta = __arch_timer_check_delta();
- } else {
- clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
- clk->name = "arch_mem_timer";
- clk->rating = 400;
- clk->cpumask = cpu_possible_mask;
- if (arch_timer_mem_use_virtual) {
- clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
- clk->set_next_event =
- arch_timer_set_next_event_virt_mem;
- } else {
- clk->set_state_shutdown = arch_timer_shutdown_phys_mem;
- clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem;
- clk->set_next_event =
- arch_timer_set_next_event_phys_mem;
- }
-
- max_delta = CLOCKSOURCE_MASK(56);
+ if (arch_timer_c3stop)
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ clk->cpumask = cpumask_of(smp_processor_id());
+ clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case ARCH_TIMER_VIRT_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_virt;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
+ sne = erratum_handler(set_next_event_virt);
+ break;
+ case ARCH_TIMER_PHYS_SECURE_PPI:
+ case ARCH_TIMER_PHYS_NONSECURE_PPI:
+ case ARCH_TIMER_HYP_PPI:
+ clk->set_state_shutdown = arch_timer_shutdown_phys;
+ clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
+ sne = erratum_handler(set_next_event_phys);
+ break;
+ default:
+ BUG();
}
+ clk->set_next_event = sne;
+ max_delta = __arch_timer_check_delta();
+
clk->set_state_shutdown(clk);
clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
@@ -1029,7 +828,7 @@ static int arch_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
u32 flags;
- __arch_timer_setup(ARCH_TIMER_TYPE_CP15, clk);
+ __arch_timer_setup(clk);
flags = check_ppi_trigger(arch_timer_ppi[arch_timer_uses_ppi]);
enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
@@ -1075,22 +874,12 @@ static void __init arch_timer_of_configure_rate(u32 rate, struct device_node *np
pr_warn("frequency not available\n");
}
-static void __init arch_timer_banner(unsigned type)
+static void __init arch_timer_banner(void)
{
- pr_info("%s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
- type & ARCH_TIMER_TYPE_CP15 ? "cp15" : "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ?
- " and " : "",
- type & ARCH_TIMER_TYPE_MEM ? "mmio" : "",
+ pr_info("cp15 timer running at %lu.%02luMHz (%s).\n",
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
- type & ARCH_TIMER_TYPE_CP15 ?
- (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys" :
- "",
- type == (ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM) ? "/" : "",
- type & ARCH_TIMER_TYPE_MEM ?
- arch_timer_mem_use_virtual ? "virt" : "phys" :
- "");
+ (arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) ? "virt" : "phys");
}
u32 arch_timer_get_rate(void)
@@ -1108,11 +897,6 @@ bool arch_timer_evtstrm_available(void)
return cpumask_test_cpu(raw_smp_processor_id(), &evtstrm_available);
}
-static noinstr u64 arch_counter_get_cntvct_mem(void)
-{
- return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
-}
-
static struct arch_timer_kvm_info arch_timer_kvm_info;
struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
@@ -1120,42 +904,35 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
return &arch_timer_kvm_info;
}
-static void __init arch_counter_register(unsigned type)
+static void __init arch_counter_register(void)
{
u64 (*scr)(void);
+ u64 (*rd)(void);
u64 start_count;
int width;
- /* Register the CP15 based counter if we have one */
- if (type & ARCH_TIMER_TYPE_CP15) {
- u64 (*rd)(void);
-
- if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
- arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntvct_stable;
- scr = raw_counter_get_cntvct_stable;
- } else {
- rd = arch_counter_get_cntvct;
- scr = arch_counter_get_cntvct;
- }
+ if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
+ arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntvct_stable;
+ scr = raw_counter_get_cntvct_stable;
} else {
- if (arch_timer_counter_has_wa()) {
- rd = arch_counter_get_cntpct_stable;
- scr = raw_counter_get_cntpct_stable;
- } else {
- rd = arch_counter_get_cntpct;
- scr = arch_counter_get_cntpct;
- }
+ rd = arch_counter_get_cntvct;
+ scr = arch_counter_get_cntvct;
}
-
- arch_timer_read_counter = rd;
- clocksource_counter.vdso_clock_mode = vdso_default;
} else {
- arch_timer_read_counter = arch_counter_get_cntvct_mem;
- scr = arch_counter_get_cntvct_mem;
+ if (arch_timer_counter_has_wa()) {
+ rd = arch_counter_get_cntpct_stable;
+ scr = raw_counter_get_cntpct_stable;
+ } else {
+ rd = arch_counter_get_cntpct;
+ scr = arch_counter_get_cntpct;
+ }
}
+ arch_timer_read_counter = rd;
+ clocksource_counter.vdso_clock_mode = vdso_default;
+
width = arch_counter_get_width();
clocksource_counter.mask = CLOCKSOURCE_MASK(width);
cyclecounter.mask = CLOCKSOURCE_MASK(width);
@@ -1303,76 +1080,10 @@ out:
return err;
}
-static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
-{
- int ret;
- irq_handler_t func;
-
- arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
- if (!arch_timer_mem)
- return -ENOMEM;
-
- arch_timer_mem->base = base;
- arch_timer_mem->evt.irq = irq;
- __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
-
- if (arch_timer_mem_use_virtual)
- func = arch_timer_handler_virt_mem;
- else
- func = arch_timer_handler_phys_mem;
-
- ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
- if (ret) {
- pr_err("Failed to request mem timer irq\n");
- kfree(arch_timer_mem);
- arch_timer_mem = NULL;
- }
-
- return ret;
-}
-
-static const struct of_device_id arch_timer_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer", },
- { .compatible = "arm,armv8-timer", },
- {},
-};
-
-static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
- { .compatible = "arm,armv7-timer-mem", },
- {},
-};
-
-static bool __init arch_timer_needs_of_probing(void)
-{
- struct device_node *dn;
- bool needs_probing = false;
- unsigned int mask = ARCH_TIMER_TYPE_CP15 | ARCH_TIMER_TYPE_MEM;
-
- /* We have two timers, and both device-tree nodes are probed. */
- if ((arch_timers_present & mask) == mask)
- return false;
-
- /*
- * Only one type of timer is probed,
- * check if we have another type of timer node in device-tree.
- */
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15)
- dn = of_find_matching_node(NULL, arch_timer_mem_of_match);
- else
- dn = of_find_matching_node(NULL, arch_timer_of_match);
-
- if (dn && of_device_is_available(dn))
- needs_probing = true;
-
- of_node_put(dn);
-
- return needs_probing;
-}
-
static int __init arch_timer_common_init(void)
{
- arch_timer_banner(arch_timers_present);
- arch_counter_register(arch_timers_present);
+ arch_timer_banner();
+ arch_counter_register();
return arch_timer_arch_init();
}
@@ -1421,13 +1132,11 @@ static int __init arch_timer_of_init(struct device_node *np)
u32 rate;
bool has_names;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("multiple nodes in dt, skipping\n");
return 0;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
has_names = of_property_present(np, "interrupt-names");
for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) {
@@ -1472,283 +1181,22 @@ static int __init arch_timer_of_init(struct device_node *np)
if (ret)
return ret;
- if (arch_timer_needs_of_probing())
- return 0;
-
return arch_timer_common_init();
}
TIMER_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
TIMER_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
-static u32 __init
-arch_timer_mem_frame_get_cntfrq(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- u32 rate;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Unable to map frame @ %pa\n", &frame->cntbase);
- return 0;
- }
-
- rate = readl_relaxed(base + CNTFRQ);
-
- iounmap(base);
-
- return rate;
-}
-
-static struct arch_timer_mem_frame * __init
-arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- void __iomem *cntctlbase;
- u32 cnttidr;
- int i;
-
- cntctlbase = ioremap(timer_mem->cntctlbase, timer_mem->size);
- if (!cntctlbase) {
- pr_err("Can't map CNTCTLBase @ %pa\n",
- &timer_mem->cntctlbase);
- return NULL;
- }
-
- cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
-
- /*
- * Try to find a virtual capable frame. Otherwise fall back to a
- * physical capable frame.
- */
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
- CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
-
- frame = &timer_mem->frame[i];
- if (!frame->valid)
- continue;
-
- /* Try enabling everything, and see what sticks */
- writel_relaxed(cntacr, cntctlbase + CNTACR(i));
- cntacr = readl_relaxed(cntctlbase + CNTACR(i));
-
- if ((cnttidr & CNTTIDR_VIRT(i)) &&
- !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) {
- best_frame = frame;
- arch_timer_mem_use_virtual = true;
- break;
- }
-
- if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT))
- continue;
-
- best_frame = frame;
- }
-
- iounmap(cntctlbase);
-
- return best_frame;
-}
-
-static int __init
-arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
-{
- void __iomem *base;
- int ret, irq;
-
- if (arch_timer_mem_use_virtual)
- irq = frame->virt_irq;
- else
- irq = frame->phys_irq;
-
- if (!irq) {
- pr_err("Frame missing %s irq.\n",
- arch_timer_mem_use_virtual ? "virt" : "phys");
- return -EINVAL;
- }
-
- if (!request_mem_region(frame->cntbase, frame->size,
- "arch_mem_timer"))
- return -EBUSY;
-
- base = ioremap(frame->cntbase, frame->size);
- if (!base) {
- pr_err("Can't map frame's registers\n");
- return -ENXIO;
- }
-
- ret = arch_timer_mem_register(base, irq);
- if (ret) {
- iounmap(base);
- return ret;
- }
-
- arch_timers_present |= ARCH_TIMER_TYPE_MEM;
-
- return 0;
-}
-
-static int __init arch_timer_mem_of_init(struct device_node *np)
-{
- struct arch_timer_mem *timer_mem;
- struct arch_timer_mem_frame *frame;
- struct resource res;
- int ret = -EINVAL;
- u32 rate;
-
- timer_mem = kzalloc(sizeof(*timer_mem), GFP_KERNEL);
- if (!timer_mem)
- return -ENOMEM;
-
- if (of_address_to_resource(np, 0, &res))
- goto out;
- timer_mem->cntctlbase = res.start;
- timer_mem->size = resource_size(&res);
-
- for_each_available_child_of_node_scoped(np, frame_node) {
- u32 n;
- struct arch_timer_mem_frame *frame;
-
- if (of_property_read_u32(frame_node, "frame-number", &n)) {
- pr_err(FW_BUG "Missing frame-number.\n");
- goto out;
- }
- if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
- pr_err(FW_BUG "Wrong frame-number, only 0-%u are permitted.\n",
- ARCH_TIMER_MEM_MAX_FRAMES - 1);
- goto out;
- }
- frame = &timer_mem->frame[n];
-
- if (frame->valid) {
- pr_err(FW_BUG "Duplicated frame-number.\n");
- goto out;
- }
-
- if (of_address_to_resource(frame_node, 0, &res))
- goto out;
-
- frame->cntbase = res.start;
- frame->size = resource_size(&res);
-
- frame->virt_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_VIRT_SPI);
- frame->phys_irq = irq_of_parse_and_map(frame_node,
- ARCH_TIMER_PHYS_SPI);
-
- frame->valid = true;
- }
-
- frame = arch_timer_mem_find_best_frame(timer_mem);
- if (!frame) {
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer_mem->cntctlbase);
- ret = -EINVAL;
- goto out;
- }
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- arch_timer_of_configure_rate(rate, np);
-
- ret = arch_timer_mem_frame_register(frame);
- if (!ret && !arch_timer_needs_of_probing())
- ret = arch_timer_common_init();
-out:
- kfree(timer_mem);
- return ret;
-}
-TIMER_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
- arch_timer_mem_of_init);
-
#ifdef CONFIG_ACPI_GTDT
-static int __init
-arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem)
-{
- struct arch_timer_mem_frame *frame;
- u32 rate;
- int i;
-
- for (i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
- frame = &timer_mem->frame[i];
-
- if (!frame->valid)
- continue;
-
- rate = arch_timer_mem_frame_get_cntfrq(frame);
- if (rate == arch_timer_rate)
- continue;
-
- pr_err(FW_BUG "CNTFRQ mismatch: frame @ %pa: (0x%08lx), CPU: (0x%08lx)\n",
- &frame->cntbase,
- (unsigned long)rate, (unsigned long)arch_timer_rate);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __init arch_timer_mem_acpi_init(int platform_timer_count)
-{
- struct arch_timer_mem *timers, *timer;
- struct arch_timer_mem_frame *frame, *best_frame = NULL;
- int timer_count, i, ret = 0;
-
- timers = kcalloc(platform_timer_count, sizeof(*timers),
- GFP_KERNEL);
- if (!timers)
- return -ENOMEM;
-
- ret = acpi_arch_timer_mem_init(timers, &timer_count);
- if (ret || !timer_count)
- goto out;
-
- /*
- * While unlikely, it's theoretically possible that none of the frames
- * in a timer expose the combination of feature we want.
- */
- for (i = 0; i < timer_count; i++) {
- timer = &timers[i];
-
- frame = arch_timer_mem_find_best_frame(timer);
- if (!best_frame)
- best_frame = frame;
-
- ret = arch_timer_mem_verify_cntfrq(timer);
- if (ret) {
- pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n");
- goto out;
- }
-
- if (!best_frame) /* implies !frame */
- /*
- * Only complain about missing suitable frames if we
- * haven't already found one in a previous iteration.
- */
- pr_err("Unable to find a suitable frame in timer @ %pa\n",
- &timer->cntctlbase);
- }
-
- if (best_frame)
- ret = arch_timer_mem_frame_register(best_frame);
-out:
- kfree(timers);
- return ret;
-}
-
-/* Initialize per-processor generic timer and memory-mapped timer(if present) */
static int __init arch_timer_acpi_init(struct acpi_table_header *table)
{
- int ret, platform_timer_count;
+ int ret;
- if (arch_timers_present & ARCH_TIMER_TYPE_CP15) {
+ if (arch_timer_evt) {
pr_warn("already initialized, skipping\n");
return -EINVAL;
}
- arch_timers_present |= ARCH_TIMER_TYPE_CP15;
-
- ret = acpi_gtdt_init(table, &platform_timer_count);
+ ret = acpi_gtdt_init(table, NULL);
if (ret)
return ret;
@@ -1790,10 +1238,6 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
if (ret)
return ret;
- if (platform_timer_count &&
- arch_timer_mem_acpi_init(platform_timer_count))
- pr_err("Failed to initialize memory-mapped timer.\n");
-
return arch_timer_common_init();
}
TIMER_ACPI_DECLARE(arch_timer, ACPI_SIG_GTDT, arch_timer_acpi_init);
diff --git a/drivers/clocksource/arm_arch_timer_mmio.c b/drivers/clocksource/arm_arch_timer_mmio.c
new file mode 100644
index 000000000000..d10362692fdd
--- /dev/null
+++ b/drivers/clocksource/arm_arch_timer_mmio.c
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARM Generic Memory Mapped Timer support
+ *
+ * Split from drivers/clocksource/arm_arch_timer.c
+ *
+ * Copyright (C) 2011 ARM Ltd.
+ * All Rights Reserved
+ */
+
+#define pr_fmt(fmt) "arch_timer_mmio: " fmt
+
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <clocksource/arm_arch_timer.h>
+
+#define CNTTIDR 0x08
+#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
+
+#define CNTACR(n) (0x40 + ((n) * 4))
+#define CNTACR_RPCT BIT(0)
+#define CNTACR_RVCT BIT(1)
+#define CNTACR_RFRQ BIT(2)
+#define CNTACR_RVOFF BIT(3)
+#define CNTACR_RWVT BIT(4)
+#define CNTACR_RWPT BIT(5)
+
+#define CNTPCT_LO 0x00
+#define CNTVCT_LO 0x08
+#define CNTFRQ 0x10
+#define CNTP_CVAL_LO 0x20
+#define CNTP_CTL 0x2c
+#define CNTV_CVAL_LO 0x30
+#define CNTV_CTL 0x3c
+
+enum arch_timer_access {
+ PHYS_ACCESS,
+ VIRT_ACCESS,
+};
+
+struct arch_timer {
+ struct clock_event_device evt;
+ struct clocksource cs;
+ struct arch_timer_mem *gt_block;
+ void __iomem *base;
+ enum arch_timer_access access;
+ u32 rate;
+};
+
+#define evt_to_arch_timer(e) container_of(e, struct arch_timer, evt)
+#define cs_to_arch_timer(c) container_of(c, struct arch_timer, cs)
+
+static void arch_timer_mmio_write(struct arch_timer *timer,
+ enum arch_timer_reg reg, u64 val)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTP_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /*
+ * Not guaranteed to be atomic, so the timer
+ * must be disabled at this point.
+ */
+ writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
+ return;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed((u32)val, timer->base + CNTV_CTL);
+ return;
+ case ARCH_TIMER_REG_CVAL:
+ /* Same restriction as above */
+ writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
+ return;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+}
+
+static u32 arch_timer_mmio_read(struct arch_timer *timer, enum arch_timer_reg reg)
+{
+ switch (timer->access) {
+ case PHYS_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTP_CTL);
+ default:
+ break;
+ }
+ break;
+ case VIRT_ACCESS:
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ return readl_relaxed(timer->base + CNTV_CTL);
+ default:
+ break;
+ }
+ break;
+ }
+
+ /* Should never be here */
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+static noinstr u64 arch_counter_mmio_get_cnt(struct arch_timer *t)
+{
+ int offset_lo = t->access == VIRT_ACCESS ? CNTVCT_LO : CNTPCT_LO;
+ u32 cnt_lo, cnt_hi, tmp_hi;
+
+ do {
+ cnt_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ cnt_lo = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo));
+ tmp_hi = __le32_to_cpu((__le32 __force)__raw_readl(t->base + offset_lo + 4));
+ } while (cnt_hi != tmp_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static u64 arch_mmio_counter_read(struct clocksource *cs)
+{
+ struct arch_timer *at = cs_to_arch_timer(cs);
+
+ return arch_counter_mmio_get_cnt(at);
+}
+
+static int arch_timer_mmio_shutdown(struct clock_event_device *clk)
+{
+ struct arch_timer *at = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+
+ return 0;
+}
+
+static int arch_timer_mmio_set_next_event(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct arch_timer *timer = evt_to_arch_timer(clk);
+ unsigned long ctrl;
+ u64 cnt;
+
+ ctrl = arch_timer_mmio_read(timer, ARCH_TIMER_REG_CTRL);
+
+ /* Timer must be disabled before programming CVAL */
+ if (ctrl & ARCH_TIMER_CTRL_ENABLE) {
+ ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ }
+
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+ cnt = arch_counter_mmio_get_cnt(timer);
+
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CVAL, evt + cnt);
+ arch_timer_mmio_write(timer, ARCH_TIMER_REG_CTRL, ctrl);
+ return 0;
+}
+
+static irqreturn_t arch_timer_mmio_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ struct arch_timer *at = evt_to_arch_timer(evt);
+ unsigned long ctrl;
+
+ ctrl = arch_timer_mmio_read(at, ARCH_TIMER_REG_CTRL);
+ if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
+ ctrl |= ARCH_TIMER_CTRL_IT_MASK;
+ arch_timer_mmio_write(at, ARCH_TIMER_REG_CTRL, ctrl);
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static struct arch_timer_mem_frame *find_best_frame(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame, *best_frame = NULL;
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ void __iomem *cntctlbase;
+ u32 cnttidr;
+
+ cntctlbase = ioremap(at->gt_block->cntctlbase, at->gt_block->size);
+ if (!cntctlbase) {
+ dev_err(&pdev->dev, "Can't map CNTCTLBase @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return NULL;
+ }
+
+ cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
+
+ /*
+ * Try to find a virtual capable frame. Otherwise fall back to a
+ * physical capable frame.
+ */
+ for (int i = 0; i < ARCH_TIMER_MEM_MAX_FRAMES; i++) {
+ u32 cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT |
+ CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT;
+
+ frame = &at->gt_block->frame[i];
+ if (!frame->valid)
+ continue;
+
+ /* Try enabling everything, and see what sticks */
+ writel_relaxed(cntacr, cntctlbase + CNTACR(i));
+ cntacr = readl_relaxed(cntctlbase + CNTACR(i));
+
+ /* Pick a suitable frame for which we have an IRQ */
+ if ((cnttidr & CNTTIDR_VIRT(i)) &&
+ !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT)) &&
+ frame->virt_irq) {
+ best_frame = frame;
+ at->access = VIRT_ACCESS;
+ break;
+ }
+
+ if ((~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) ||
+ !frame->phys_irq)
+ continue;
+
+ at->access = PHYS_ACCESS;
+ best_frame = frame;
+ }
+
+ iounmap(cntctlbase);
+
+ return best_frame;
+}
+
+static void arch_timer_mmio_setup(struct arch_timer *at, int irq)
+{
+ at->evt = (struct clock_event_device) {
+ .features = (CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ),
+ .name = "arch_mem_timer",
+ .rating = 400,
+ .cpumask = cpu_possible_mask,
+ .irq = irq,
+ .set_next_event = arch_timer_mmio_set_next_event,
+ .set_state_oneshot_stopped = arch_timer_mmio_shutdown,
+ .set_state_shutdown = arch_timer_mmio_shutdown,
+ };
+
+ at->evt.set_state_shutdown(&at->evt);
+
+ clockevents_config_and_register(&at->evt, at->rate, 0xf,
+ (unsigned long)CLOCKSOURCE_MASK(56));
+
+ enable_irq(at->evt.irq);
+
+ at->cs = (struct clocksource) {
+ .name = "arch_mmio_counter",
+ .rating = 300,
+ .read = arch_mmio_counter_read,
+ .mask = CLOCKSOURCE_MASK(56),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ };
+
+ clocksource_register_hz(&at->cs, at->rate);
+}
+
+static int arch_timer_mmio_frame_register(struct platform_device *pdev,
+ struct arch_timer_mem_frame *frame)
+{
+ struct arch_timer *at = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ int ret, irq;
+ u32 rate;
+
+ if (!devm_request_mem_region(&pdev->dev, frame->cntbase, frame->size,
+ "arch_mem_timer"))
+ return -EBUSY;
+
+ at->base = devm_ioremap(&pdev->dev, frame->cntbase, frame->size);
+ if (!at->base) {
+ dev_err(&pdev->dev, "Can't map frame's registers\n");
+ return -ENXIO;
+ }
+
+ /*
+ * Allow "clock-frequency" to override the probed rate. If neither
+ * lead to something useful, use the CPU timer frequency as the
+ * fallback. The nice thing about that last point is that we woudn't
+ * made it here if we didn't have a valid frequency.
+ */
+ rate = readl_relaxed(at->base + CNTFRQ);
+
+ if (!np || of_property_read_u32(np, "clock-frequency", &at->rate))
+ at->rate = rate;
+
+ if (!at->rate)
+ at->rate = arch_timer_get_rate();
+
+ irq = at->access == VIRT_ACCESS ? frame->virt_irq : frame->phys_irq;
+ ret = devm_request_irq(&pdev->dev, irq, arch_timer_mmio_handler,
+ IRQF_TIMER | IRQF_NO_AUTOEN, "arch_mem_timer",
+ &at->evt);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request mem timer irq\n");
+ return ret;
+ }
+
+ /* Afer this point, we're not allowed to fail anymore */
+ arch_timer_mmio_setup(at, irq);
+ return 0;
+}
+
+static int of_populate_gt_block(struct platform_device *pdev,
+ struct arch_timer *at)
+{
+ struct resource res;
+
+ if (of_address_to_resource(pdev->dev.of_node, 0, &res))
+ return -EINVAL;
+
+ at->gt_block->cntctlbase = res.start;
+ at->gt_block->size = resource_size(&res);
+
+ for_each_available_child_of_node_scoped(pdev->dev.of_node, frame_node) {
+ struct arch_timer_mem_frame *frame;
+ u32 n;
+
+ if (of_property_read_u32(frame_node, "frame-number", &n)) {
+ dev_err(&pdev->dev, FW_BUG "Missing frame-number\n");
+ return -EINVAL;
+ }
+ if (n >= ARCH_TIMER_MEM_MAX_FRAMES) {
+ dev_err(&pdev->dev,
+ FW_BUG "Wrong frame-number, only 0-%u are permitted\n",
+ ARCH_TIMER_MEM_MAX_FRAMES - 1);
+ return -EINVAL;
+ }
+
+ frame = &at->gt_block->frame[n];
+
+ if (frame->valid) {
+ dev_err(&pdev->dev, FW_BUG "Duplicated frame-number\n");
+ return -EINVAL;
+ }
+
+ if (of_address_to_resource(frame_node, 0, &res))
+ return -EINVAL;
+
+ frame->cntbase = res.start;
+ frame->size = resource_size(&res);
+
+ frame->phys_irq = irq_of_parse_and_map(frame_node, 0);
+ frame->virt_irq = irq_of_parse_and_map(frame_node, 1);
+
+ frame->valid = true;
+ }
+
+ return 0;
+}
+
+static int arch_timer_mmio_probe(struct platform_device *pdev)
+{
+ struct arch_timer_mem_frame *frame;
+ struct arch_timer *at;
+ struct device_node *np;
+ int ret;
+
+ np = pdev->dev.of_node;
+
+ at = devm_kmalloc(&pdev->dev, sizeof(*at), GFP_KERNEL | __GFP_ZERO);
+ if (!at)
+ return -ENOMEM;
+
+ if (np) {
+ at->gt_block = devm_kmalloc(&pdev->dev, sizeof(*at->gt_block),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!at->gt_block)
+ return -ENOMEM;
+ ret = of_populate_gt_block(pdev, at);
+ if (ret)
+ return ret;
+ } else {
+ at->gt_block = dev_get_platdata(&pdev->dev);
+ }
+
+ platform_set_drvdata(pdev, at);
+
+ frame = find_best_frame(pdev);
+ if (!frame) {
+ dev_err(&pdev->dev,
+ "Unable to find a suitable frame in timer @ %pa\n",
+ &at->gt_block->cntctlbase);
+ return -EINVAL;
+ }
+
+ ret = arch_timer_mmio_frame_register(pdev, frame);
+ if (!ret)
+ dev_info(&pdev->dev,
+ "mmio timer running at %lu.%02luMHz (%s)\n",
+ (unsigned long)at->rate / 1000000,
+ (unsigned long)(at->rate / 10000) % 100,
+ at->access == VIRT_ACCESS ? "virt" : "phys");
+
+ return ret;
+}
+
+static const struct of_device_id arch_timer_mmio_of_table[] = {
+ { .compatible = "arm,armv7-timer-mem", },
+ {}
+};
+
+static struct platform_driver arch_timer_mmio_drv = {
+ .driver = {
+ .name = "arch-timer-mmio",
+ .of_match_table = arch_timer_mmio_of_table,
+ .suppress_bind_attrs = true,
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_drv);
+
+static struct platform_driver arch_timer_mmio_acpi_drv = {
+ .driver = {
+ .name = "gtdt-arm-mmio-timer",
+ .suppress_bind_attrs = true,
+ },
+ .probe = arch_timer_mmio_probe,
+};
+builtin_platform_driver(arch_timer_mmio_acpi_drv);
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 2d86bbc2764a..5e3d6bb7e437 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -263,14 +263,13 @@ static void __init gt_delay_timer_init(void)
register_current_timer_delay(&gt_delay_timer);
}
-static int __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(unsigned int psv)
{
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
- CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv - 1) |
GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
@@ -338,11 +337,45 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
return NOTIFY_DONE;
}
+struct gt_prescaler_config {
+ const char *compatible;
+ unsigned long prescaler;
+};
+
+static const struct gt_prescaler_config gt_prescaler_configs[] = {
+ /*
+ * On am43 the global timer clock is a child of the clock used for CPU
+ * OPPs, so the initial prescaler has to be compatible with all OPPs
+ * which are 300, 600, 720, 800 and 1000 with a fixed divider of 2, this
+ * gives us a GCD of 10. Initial frequency is 1000, so the prescaler is
+ * 50.
+ */
+ { .compatible = "ti,am43", .prescaler = 50 },
+ { .compatible = "xlnx,zynq-7000", .prescaler = 2 },
+ { .compatible = NULL }
+};
+
+static unsigned long gt_get_initial_prescaler_value(struct device_node *np)
+{
+ const struct gt_prescaler_config *config;
+
+ if (CONFIG_ARM_GT_INITIAL_PRESCALER_VAL != 0)
+ return CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+
+ for (config = gt_prescaler_configs; config->compatible; config++) {
+ if (of_machine_is_compatible(config->compatible))
+ return config->prescaler;
+ }
+
+ return 1;
+}
+
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
static unsigned long gt_clk_rate;
int err;
+ unsigned long psv;
/*
* In A9 r2p0 the comparators for each processor with the global timer
@@ -378,8 +411,9 @@ static int __init global_timer_of_register(struct device_node *np)
goto out_unmap;
}
+ psv = gt_get_initial_prescaler_value(np);
gt_clk_rate = clk_get_rate(gt_clk);
- gt_target_rate = gt_clk_rate / CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+ gt_target_rate = gt_clk_rate / psv;
gt_clk_rate_change_nb.notifier_call =
gt_clk_rate_change_cb;
err = clk_notifier_register(gt_clk, &gt_clk_rate_change_nb);
@@ -404,7 +438,7 @@ static int __init global_timer_of_register(struct device_node *np)
}
/* Register and immediately configure the timer on the boot CPU */
- err = gt_clocksource_init();
+ err = gt_clocksource_init(psv);
if (err)
goto out_irq;
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index e95fdc49c226..bbceb0289d45 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -78,24 +78,33 @@ static int __init clps711x_timer_init(struct device_node *np)
unsigned int irq = irq_of_parse_and_map(np, 0);
struct clk *clock = of_clk_get(np, 0);
void __iomem *base = of_iomap(np, 0);
+ int ret = 0;
if (!base)
return -ENOMEM;
- if (!irq)
- return -EINVAL;
- if (IS_ERR(clock))
- return PTR_ERR(clock);
+ if (!irq) {
+ ret = -EINVAL;
+ goto unmap_io;
+ }
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ goto unmap_io;
+ }
switch (of_alias_get_id(np, "timer")) {
case CLPS711X_CLKSRC_CLOCKSOURCE:
clps711x_clksrc_init(clock, base);
break;
case CLPS711X_CLKSRC_CLOCKEVENT:
- return _clps711x_clkevt_init(clock, base, irq);
+ ret = _clps711x_clkevt_init(clock, base, irq);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
- return 0;
+unmap_io:
+ iounmap(base);
+ return ret;
}
TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 2edc13ca184e..10356d4ec55c 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -549,14 +549,22 @@ static void __init hv_init_tsc_clocksource(void)
union hv_reference_tsc_msr tsc_msr;
/*
+ * When running as a guest partition:
+ *
* If Hyper-V offers TSC_INVARIANT, then the virtualized TSC correctly
* handles frequency and offset changes due to live migration,
* pause/resume, and other VM management operations. So lower the
* Hyper-V Reference TSC rating, causing the generic TSC to be used.
* TSC_INVARIANT is not offered on ARM64, so the Hyper-V Reference
* TSC will be preferred over the virtualized ARM64 arch counter.
+ *
+ * When running as the root partition:
+ *
+ * There is no HV_ACCESS_TSC_INVARIANT feature. Always lower the rating
+ * of the Hyper-V Reference TSC.
*/
- if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+ if ((ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) ||
+ hv_root_partition()) {
hyperv_cs_tsc.rating = 250;
hyperv_cs_msr.rating = 245;
}
diff --git a/drivers/clocksource/ingenic-sysost.c b/drivers/clocksource/ingenic-sysost.c
index cb6fc2f152d4..e79cfb0b8e05 100644
--- a/drivers/clocksource/ingenic-sysost.c
+++ b/drivers/clocksource/ingenic-sysost.c
@@ -127,18 +127,23 @@ static u8 ingenic_ost_get_prescale(unsigned long rate, unsigned long req_rate)
return 2; /* /16 divider */
}
-static long ingenic_ost_round_rate(struct clk_hw *hw, unsigned long req_rate,
- unsigned long *parent_rate)
+static int ingenic_ost_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- unsigned long rate = *parent_rate;
+ unsigned long rate = req->best_parent_rate;
u8 prescale;
- if (req_rate > rate)
- return rate;
+ if (req->rate > rate) {
+ req->rate = rate;
- prescale = ingenic_ost_get_prescale(rate, req_rate);
+ return 0;
+ }
+
+ prescale = ingenic_ost_get_prescale(rate, req->rate);
- return rate >> (prescale * 2);
+ req->rate = rate >> (prescale * 2);
+
+ return 0;
}
static int ingenic_ost_percpu_timer_set_rate(struct clk_hw *hw, unsigned long req_rate,
@@ -175,14 +180,14 @@ static int ingenic_ost_global_timer_set_rate(struct clk_hw *hw, unsigned long re
static const struct clk_ops ingenic_ost_percpu_timer_ops = {
.recalc_rate = ingenic_ost_percpu_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_percpu_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_percpu_timer_set_rate,
};
static const struct clk_ops ingenic_ost_global_timer_ops = {
.recalc_rate = ingenic_ost_global_timer_recalc_rate,
- .round_rate = ingenic_ost_round_rate,
- .set_rate = ingenic_ost_global_timer_set_rate,
+ .determine_rate = ingenic_ost_determine_rate,
+ .set_rate = ingenic_ost_global_timer_set_rate,
};
static const char * const ingenic_ost_clk_parents[] = { "ext" };
diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c
index c3536fffbe9a..5a99801a1657 100644
--- a/drivers/clocksource/scx200_hrt.c
+++ b/drivers/clocksource/scx200_hrt.c
@@ -52,6 +52,7 @@ static struct clocksource cs_hrt = {
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
/* mult, shift are set based on mhz27 flag */
+ .owner = THIS_MODULE,
};
static int __init init_hrt_clocksource(void)
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b72b36e0abed..791b298c995b 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -355,14 +355,6 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
dev_pm_syscore_device(&ch->cmt->pdev->dev, true);
- /* enable clock */
- ret = clk_enable(ch->cmt->clk);
- if (ret) {
- dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n",
- ch->index);
- goto err0;
- }
-
/* make sure channel is disabled */
sh_cmt_start_stop_ch(ch, 0);
@@ -384,19 +376,12 @@ static int sh_cmt_enable(struct sh_cmt_channel *ch)
if (ret || sh_cmt_read_cmcnt(ch)) {
dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n",
ch->index);
- ret = -ETIMEDOUT;
- goto err1;
+ return -ETIMEDOUT;
}
/* enable channel */
sh_cmt_start_stop_ch(ch, 1);
return 0;
- err1:
- /* stop clock */
- clk_disable(ch->cmt->clk);
-
- err0:
- return ret;
}
static void sh_cmt_disable(struct sh_cmt_channel *ch)
@@ -407,9 +392,6 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
/* disable interrupts in CMT block */
sh_cmt_write_cmcsr(ch, 0);
- /* stop clock */
- clk_disable(ch->cmt->clk);
-
dev_pm_syscore_device(&ch->cmt->pdev->dev, false);
}
@@ -578,37 +560,68 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
+static int sh_cmt_start_clocksource(struct sh_cmt_channel *ch)
{
int ret = 0;
unsigned long flags;
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
-
raw_spin_lock_irqsave(&ch->lock, flags);
- if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_get_sync(&ch->cmt->pdev->dev);
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(ch);
- }
if (ret)
goto out;
- ch->flags |= flag;
+
+ ch->flags |= FLAG_CLOCKSOURCE;
/* setup timeout if no clockevent */
- if (ch->cmt->num_channels == 1 &&
- flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
+ if (ch->cmt->num_channels == 1 && !(ch->flags & FLAG_CLOCKEVENT))
__sh_cmt_set_next(ch, ch->max_match_value);
+out:
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+
+ return ret;
+}
+
+static void sh_cmt_stop_clocksource(struct sh_cmt_channel *ch)
+{
+ unsigned long flags;
+ unsigned long f;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
+
+ ch->flags &= ~FLAG_CLOCKSOURCE;
+
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ sh_cmt_disable(ch);
+
+ raw_spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+static int sh_cmt_start_clockevent(struct sh_cmt_channel *ch)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&ch->lock, flags);
+
+ if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
+ ret = sh_cmt_enable(ch);
+
+ if (ret)
+ goto out;
+
+ ch->flags |= FLAG_CLOCKEVENT;
out:
raw_spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
-static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
+static void sh_cmt_stop_clockevent(struct sh_cmt_channel *ch)
{
unsigned long flags;
unsigned long f;
@@ -616,22 +629,17 @@ static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag)
raw_spin_lock_irqsave(&ch->lock, flags);
f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
- ch->flags &= ~flag;
- if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) {
+ ch->flags &= ~FLAG_CLOCKEVENT;
+
+ if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
sh_cmt_disable(ch);
- if (flag & FLAG_CLOCKEVENT)
- pm_runtime_put(&ch->cmt->pdev->dev);
- }
/* adjust the timeout to maximum if only clocksource left */
- if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE))
+ if (ch->flags & FLAG_CLOCKSOURCE)
__sh_cmt_set_next(ch, ch->max_match_value);
raw_spin_unlock_irqrestore(&ch->lock, flags);
-
- if (flag & FLAG_CLOCKSOURCE)
- pm_runtime_put(&ch->cmt->pdev->dev);
}
static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
@@ -672,7 +680,7 @@ static int sh_cmt_clocksource_enable(struct clocksource *cs)
ch->total_cycles = 0;
- ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ ret = sh_cmt_start_clocksource(ch);
if (!ret)
ch->cs_enabled = true;
@@ -685,7 +693,7 @@ static void sh_cmt_clocksource_disable(struct clocksource *cs)
WARN_ON(!ch->cs_enabled);
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
ch->cs_enabled = false;
}
@@ -696,7 +704,7 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
if (!ch->cs_enabled)
return;
- sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_stop_clocksource(ch);
dev_pm_genpd_suspend(&ch->cmt->pdev->dev);
}
@@ -708,7 +716,7 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
return;
dev_pm_genpd_resume(&ch->cmt->pdev->dev);
- sh_cmt_start(ch, FLAG_CLOCKSOURCE);
+ sh_cmt_start_clocksource(ch);
}
static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
@@ -740,7 +748,7 @@ static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced)
static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic)
{
- sh_cmt_start(ch, FLAG_CLOCKEVENT);
+ sh_cmt_start_clockevent(ch);
if (periodic)
sh_cmt_set_next(ch, ((ch->cmt->rate + HZ/2) / HZ) - 1);
@@ -752,7 +760,7 @@ static int sh_cmt_clock_event_shutdown(struct clock_event_device *ced)
{
struct sh_cmt_channel *ch = ced_to_sh_cmt(ced);
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
return 0;
}
@@ -763,7 +771,7 @@ static int sh_cmt_clock_event_set_state(struct clock_event_device *ced,
/* deal with old setting first */
if (clockevent_state_oneshot(ced) || clockevent_state_periodic(ced))
- sh_cmt_stop(ch, FLAG_CLOCKEVENT);
+ sh_cmt_stop_clockevent(ch);
dev_info(&ch->cmt->pdev->dev, "ch%u: used for %s clock events\n",
ch->index, periodic ? "periodic" : "oneshot");
@@ -1100,8 +1108,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev)
mask &= ~(1 << hwidx);
}
- clk_disable(cmt->clk);
-
platform_set_drvdata(pdev, cmt);
return 0;
@@ -1149,8 +1155,6 @@ static int sh_cmt_probe(struct platform_device *pdev)
out:
if (cmt->has_clockevent || cmt->has_clocksource)
pm_runtime_irq_safe(&pdev->dev);
- else
- pm_runtime_idle(&pdev->dev);
return 0;
}
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index 54284c1c0651..f2b4cc40db93 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -207,14 +207,14 @@ static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
-static int armada_370_xp_timer_suspend(void)
+static int armada_370_xp_timer_suspend(void *data)
{
timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
return 0;
}
-static void armada_370_xp_timer_resume(void)
+static void armada_370_xp_timer_resume(void *data)
{
writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
@@ -222,11 +222,15 @@ static void armada_370_xp_timer_resume(void)
writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
}
-static struct syscore_ops armada_370_xp_timer_syscore_ops = {
+static const struct syscore_ops armada_370_xp_timer_syscore_ops = {
.suspend = armada_370_xp_timer_suspend,
.resume = armada_370_xp_timer_resume,
};
+static struct syscore armada_370_xp_timer_syscore = {
+ .ops = &armada_370_xp_timer_syscore_ops,
+};
+
static unsigned long armada_370_delay_timer_read(void)
{
return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -324,7 +328,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np)
return res;
}
- register_syscore_ops(&armada_370_xp_timer_syscore_ops);
+ register_syscore(&armada_370_xp_timer_syscore);
return 0;
}
diff --git a/drivers/clocksource/timer-cs5535.c b/drivers/clocksource/timer-cs5535.c
index d47acfe848ae..8af666c39890 100644
--- a/drivers/clocksource/timer-cs5535.c
+++ b/drivers/clocksource/timer-cs5535.c
@@ -101,6 +101,7 @@ static struct clock_event_device cs5535_clockevent = {
.tick_resume = mfgpt_shutdown,
.set_next_event = mfgpt_next_event,
.rating = 250,
+ .owner = THIS_MODULE,
};
static irqreturn_t mfgpt_tick(int irq, void *dev_id)
diff --git a/drivers/clocksource/timer-econet-en751221.c b/drivers/clocksource/timer-econet-en751221.c
index 3b449fdaafee..4008076b1a21 100644
--- a/drivers/clocksource/timer-econet-en751221.c
+++ b/drivers/clocksource/timer-econet-en751221.c
@@ -146,7 +146,7 @@ static int __init cevt_init(struct device_node *np)
for_each_possible_cpu(i) {
struct clock_event_device *cd = &per_cpu(econet_timer_pcpu, i);
- cd->rating = 310,
+ cd->rating = 310;
cd->features = CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
diff --git a/drivers/clocksource/timer-nxp-pit.c b/drivers/clocksource/timer-nxp-pit.c
new file mode 100644
index 000000000000..d1740f18f718
--- /dev/null
+++ b/drivers/clocksource/timer-nxp-pit.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2012-2013 Freescale Semiconductor, Inc.
+ * Copyright 2018,2021-2025 NXP
+ */
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/cpuhotplug.h>
+#include <linux/clk.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+#include <linux/platform_device.h>
+
+/*
+ * Each pit takes 0x10 Bytes register space
+ */
+#define PIT0_OFFSET 0x100
+#define PIT_CH(n) (PIT0_OFFSET + 0x10 * (n))
+
+#define PITMCR(__base) (__base)
+
+#define PITMCR_FRZ BIT(0)
+#define PITMCR_MDIS BIT(1)
+
+#define PITLDVAL(__base) (__base)
+#define PITTCTRL(__base) ((__base) + 0x08)
+
+#define PITCVAL_OFFSET 0x04
+#define PITCVAL(__base) ((__base) + 0x04)
+
+#define PITTCTRL_TEN BIT(0)
+#define PITTCTRL_TIE BIT(1)
+
+#define PITTFLG(__base) ((__base) + 0x0c)
+
+#define PITTFLG_TIF BIT(0)
+
+struct pit_timer {
+ void __iomem *clksrc_base;
+ void __iomem *clkevt_base;
+ struct clock_event_device ced;
+ struct clocksource cs;
+ int rate;
+};
+
+struct pit_timer_data {
+ int max_pit_instances;
+};
+
+static DEFINE_PER_CPU(struct pit_timer *, pit_timers);
+
+/*
+ * Global structure for multiple PITs initialization
+ */
+static int pit_instances;
+static int max_pit_instances = 1;
+
+static void __iomem *sched_clock_base;
+
+static inline struct pit_timer *ced_to_pit(struct clock_event_device *ced)
+{
+ return container_of(ced, struct pit_timer, ced);
+}
+
+static inline struct pit_timer *cs_to_pit(struct clocksource *cs)
+{
+ return container_of(cs, struct pit_timer, cs);
+}
+
+static inline void pit_module_enable(void __iomem *base)
+{
+ writel(0, PITMCR(base));
+}
+
+static inline void pit_module_disable(void __iomem *base)
+{
+ writel(PITMCR_MDIS, PITMCR(base));
+}
+
+static inline void pit_timer_enable(void __iomem *base, bool tie)
+{
+ u32 val = PITTCTRL_TEN | (tie ? PITTCTRL_TIE : 0);
+
+ writel(val, PITTCTRL(base));
+}
+
+static inline void pit_timer_disable(void __iomem *base)
+{
+ writel(0, PITTCTRL(base));
+}
+
+static inline void pit_timer_set_counter(void __iomem *base, unsigned int cnt)
+{
+ writel(cnt, PITLDVAL(base));
+}
+
+static inline void pit_timer_irqack(struct pit_timer *pit)
+{
+ writel(PITTFLG_TIF, PITTFLG(pit->clkevt_base));
+}
+
+static u64 notrace pit_read_sched_clock(void)
+{
+ return ~readl(sched_clock_base);
+}
+
+static u64 pit_timer_clocksource_read(struct clocksource *cs)
+{
+ struct pit_timer *pit = cs_to_pit(cs);
+
+ return (u64)~readl(PITCVAL(pit->clksrc_base));
+}
+
+static int pit_clocksource_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate)
+{
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 2 as a clocksource and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clksrc_base = base + PIT_CH(2);
+ pit->cs.name = name;
+ pit->cs.rating = 300;
+ pit->cs.read = pit_timer_clocksource_read;
+ pit->cs.mask = CLOCKSOURCE_MASK(32);
+ pit->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ /* set the max load value and start the clock source counter */
+ pit_timer_disable(pit->clksrc_base);
+ pit_timer_set_counter(pit->clksrc_base, ~0);
+ pit_timer_enable(pit->clksrc_base, 0);
+
+ sched_clock_base = pit->clksrc_base + PITCVAL_OFFSET;
+ sched_clock_register(pit_read_sched_clock, 32, rate);
+
+ return clocksource_register_hz(&pit->cs, rate);
+}
+
+static int pit_set_next_event(unsigned long delta, struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ /*
+ * set a new value to PITLDVAL register will not restart the timer,
+ * to abort the current cycle and start a timer period with the new
+ * value, the timer must be disabled and enabled again.
+ * and the PITLAVAL should be set to delta minus one according to pit
+ * hardware requirement.
+ */
+ pit_timer_disable(pit->clkevt_base);
+ pit_timer_set_counter(pit->clkevt_base, delta - 1);
+ pit_timer_enable(pit->clkevt_base, true);
+
+ return 0;
+}
+
+static int pit_shutdown(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_disable(pit->clkevt_base);
+
+ return 0;
+}
+
+static int pit_set_periodic(struct clock_event_device *ced)
+{
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_set_next_event(pit->rate / HZ, ced);
+
+ return 0;
+}
+
+static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *ced = dev_id;
+ struct pit_timer *pit = ced_to_pit(ced);
+
+ pit_timer_irqack(pit);
+
+ /*
+ * pit hardware doesn't support oneshot, it will generate an interrupt
+ * and reload the counter value from PITLDVAL when PITCVAL reach zero,
+ * and start the counter again. So software need to disable the timer
+ * to stop the counter loop in ONESHOT mode.
+ */
+ if (likely(clockevent_state_oneshot(ced)))
+ pit_timer_disable(pit->clkevt_base);
+
+ ced->event_handler(ced);
+
+ return IRQ_HANDLED;
+}
+
+static int pit_clockevent_per_cpu_init(struct pit_timer *pit, const char *name,
+ void __iomem *base, unsigned long rate,
+ int irq, unsigned int cpu)
+{
+ int ret;
+
+ /*
+ * The channels 0 and 1 can be chained to build a 64-bit
+ * timer. Let's use the channel 3 as a clockevent and leave
+ * the channels 0 and 1 unused for anyone else who needs them
+ */
+ pit->clkevt_base = base + PIT_CH(3);
+ pit->rate = rate;
+
+ pit_timer_disable(pit->clkevt_base);
+
+ pit_timer_irqack(pit);
+
+ ret = request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_NOBALANCING,
+ name, &pit->ced);
+ if (ret)
+ return ret;
+
+ pit->ced.cpumask = cpumask_of(cpu);
+ pit->ced.irq = irq;
+
+ pit->ced.name = name;
+ pit->ced.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ pit->ced.set_state_shutdown = pit_shutdown;
+ pit->ced.set_state_periodic = pit_set_periodic;
+ pit->ced.set_next_event = pit_set_next_event;
+ pit->ced.rating = 300;
+
+ per_cpu(pit_timers, cpu) = pit;
+
+ return 0;
+}
+
+static void pit_clockevent_per_cpu_exit(struct pit_timer *pit, unsigned int cpu)
+{
+ pit_timer_disable(pit->clkevt_base);
+ free_irq(pit->ced.irq, &pit->ced);
+ per_cpu(pit_timers, cpu) = NULL;
+}
+
+static int pit_clockevent_starting_cpu(unsigned int cpu)
+{
+ struct pit_timer *pit = per_cpu(pit_timers, cpu);
+ int ret;
+
+ if (!pit)
+ return 0;
+
+ ret = irq_force_affinity(pit->ced.irq, cpumask_of(cpu));
+ if (ret) {
+ pit_clockevent_per_cpu_exit(pit, cpu);
+ return ret;
+ }
+
+ /*
+ * The value for the LDVAL register trigger is calculated as:
+ * LDVAL trigger = (period / clock period) - 1
+ * The pit is a 32-bit down count timer, when the counter value
+ * reaches 0, it will generate an interrupt, thus the minimal
+ * LDVAL trigger value is 1. And then the min_delta is
+ * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
+ */
+ clockevents_config_and_register(&pit->ced, pit->rate, 2, 0xffffffff);
+
+ return 0;
+}
+
+static int pit_timer_init(struct device_node *np)
+{
+ struct pit_timer *pit;
+ struct clk *pit_clk;
+ void __iomem *timer_base;
+ const char *name = of_node_full_name(np);
+ unsigned long clk_rate;
+ int irq, ret;
+
+ pit = kzalloc(sizeof(*pit), GFP_KERNEL);
+ if (!pit)
+ return -ENOMEM;
+
+ ret = -ENXIO;
+ timer_base = of_iomap(np, 0);
+ if (!timer_base) {
+ pr_err("Failed to iomap\n");
+ goto out_kfree;
+ }
+
+ ret = -EINVAL;
+ irq = irq_of_parse_and_map(np, 0);
+ if (irq <= 0) {
+ pr_err("Failed to irq_of_parse_and_map\n");
+ goto out_iounmap;
+ }
+
+ pit_clk = of_clk_get(np, 0);
+ if (IS_ERR(pit_clk)) {
+ ret = PTR_ERR(pit_clk);
+ goto out_irq_dispose_mapping;
+ }
+
+ ret = clk_prepare_enable(pit_clk);
+ if (ret)
+ goto out_clk_put;
+
+ clk_rate = clk_get_rate(pit_clk);
+
+ pit_module_disable(timer_base);
+
+ ret = pit_clocksource_init(pit, name, timer_base, clk_rate);
+ if (ret) {
+ pr_err("Failed to initialize clocksource '%pOF'\n", np);
+ goto out_pit_module_disable;
+ }
+
+ ret = pit_clockevent_per_cpu_init(pit, name, timer_base, clk_rate, irq, pit_instances);
+ if (ret) {
+ pr_err("Failed to initialize clockevent '%pOF'\n", np);
+ goto out_pit_clocksource_unregister;
+ }
+
+ /* enable the pit module */
+ pit_module_enable(timer_base);
+
+ pit_instances++;
+
+ if (pit_instances == max_pit_instances) {
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "PIT timer:starting",
+ pit_clockevent_starting_cpu, NULL);
+ if (ret < 0)
+ goto out_pit_clocksource_unregister;
+ }
+
+ return 0;
+
+out_pit_clocksource_unregister:
+ clocksource_unregister(&pit->cs);
+out_pit_module_disable:
+ pit_module_disable(timer_base);
+ clk_disable_unprepare(pit_clk);
+out_clk_put:
+ clk_put(pit_clk);
+out_irq_dispose_mapping:
+ irq_dispose_mapping(irq);
+out_iounmap:
+ iounmap(timer_base);
+out_kfree:
+ kfree(pit);
+
+ return ret;
+}
+
+static int pit_timer_probe(struct platform_device *pdev)
+{
+ const struct pit_timer_data *pit_timer_data;
+
+ pit_timer_data = of_device_get_match_data(&pdev->dev);
+ if (pit_timer_data)
+ max_pit_instances = pit_timer_data->max_pit_instances;
+
+ return pit_timer_init(pdev->dev.of_node);
+}
+
+static struct pit_timer_data s32g2_data = { .max_pit_instances = 2 };
+
+static const struct of_device_id pit_timer_of_match[] = {
+ { .compatible = "nxp,s32g2-pit", .data = &s32g2_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pit_timer_of_match);
+
+static struct platform_driver nxp_pit_driver = {
+ .driver = {
+ .name = "nxp-pit",
+ .of_match_table = pit_timer_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pit_timer_probe,
+};
+builtin_platform_driver(nxp_pit_driver);
+
+TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/clocksource/timer-nxp-stm.c b/drivers/clocksource/timer-nxp-stm.c
index d7ccf9001729..1ab907233f48 100644
--- a/drivers/clocksource/timer-nxp-stm.c
+++ b/drivers/clocksource/timer-nxp-stm.c
@@ -177,15 +177,15 @@ static void nxp_stm_clocksource_resume(struct clocksource *cs)
nxp_stm_clocksource_enable(cs);
}
-static void __init devm_clocksource_unregister(void *data)
+static void devm_clocksource_unregister(void *data)
{
struct stm_timer *stm_timer = data;
clocksource_unregister(&stm_timer->cs);
}
-static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
- const char *name, void __iomem *base, struct clk *clk)
+static int nxp_stm_clocksource_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, struct clk *clk)
{
int ret;
@@ -201,16 +201,15 @@ static int __init nxp_stm_clocksource_init(struct device *dev, struct stm_timer
stm_timer->cs.resume = nxp_stm_clocksource_resume;
stm_timer->cs.mask = CLOCKSOURCE_MASK(32);
stm_timer->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ stm_timer->cs.owner = THIS_MODULE;
ret = clocksource_register_hz(&stm_timer->cs, stm_timer->rate);
if (ret)
return ret;
ret = devm_add_action_or_reset(dev, devm_clocksource_unregister, stm_timer);
- if (ret) {
- clocksource_unregister(&stm_timer->cs);
+ if (ret)
return ret;
- }
stm_sched_clock = stm_timer;
@@ -297,9 +296,9 @@ static void nxp_stm_clockevent_resume(struct clock_event_device *ced)
nxp_stm_module_get(stm_timer);
}
-static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
- const char *name, void __iomem *base, int irq,
- struct clk *clk, int cpu)
+static int nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm_timer *stm_timer,
+ const char *name, void __iomem *base, int irq,
+ struct clk *clk, int cpu)
{
stm_timer->base = base;
stm_timer->rate = clk_get_rate(clk);
@@ -314,6 +313,7 @@ static int __init nxp_stm_clockevent_per_cpu_init(struct device *dev, struct stm
stm_timer->ced.cpumask = cpumask_of(cpu);
stm_timer->ced.rating = 460;
stm_timer->ced.irq = irq;
+ stm_timer->ced.owner = THIS_MODULE;
per_cpu(stm_timers, cpu) = stm_timer;
@@ -386,7 +386,7 @@ static irqreturn_t nxp_stm_module_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init nxp_stm_timer_probe(struct platform_device *pdev)
+static int nxp_stm_timer_probe(struct platform_device *pdev)
{
struct stm_timer *stm_timer;
struct device *dev = &pdev->dev;
@@ -482,14 +482,15 @@ static const struct of_device_id nxp_stm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, nxp_stm_of_match);
-static struct platform_driver nxp_stm_probe = {
+static struct platform_driver nxp_stm_driver = {
.probe = nxp_stm_timer_probe,
.driver = {
.name = "nxp-stm",
.of_match_table = nxp_stm_of_match,
+ .suppress_bind_attrs = true,
},
};
-module_platform_driver(nxp_stm_probe);
+builtin_platform_driver(nxp_stm_driver);
MODULE_DESCRIPTION("NXP System Timer Module driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clocksource/timer-ralink.c b/drivers/clocksource/timer-ralink.c
index 6ecdb4228f76..68434d9ed910 100644
--- a/drivers/clocksource/timer-ralink.c
+++ b/drivers/clocksource/timer-ralink.c
@@ -130,14 +130,15 @@ static int __init ralink_systick_init(struct device_node *np)
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%pOFn: request_irq failed", np);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_iounmap;
}
ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
SYSTICK_FREQ, 301, 16,
clocksource_mmio_readl_up);
if (ret)
- return ret;
+ goto err_free_irq;
clockevents_register_device(&systick.dev);
@@ -145,6 +146,12 @@ static int __init ralink_systick_init(struct device_node *np)
np, systick.dev.mult, systick.dev.shift);
return 0;
+
+err_free_irq:
+ irq_dispose_mapping(systick.dev.irq);
+err_iounmap:
+ iounmap(systick.membase);
+ return ret;
}
TIMER_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/drivers/clocksource/timer-rda.c b/drivers/clocksource/timer-rda.c
index fd1199c189bf..0be8e05970e2 100644
--- a/drivers/clocksource/timer-rda.c
+++ b/drivers/clocksource/timer-rda.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/sched_clock.h>
#include "timer-of.h"
@@ -153,7 +154,7 @@ static struct timer_of rda_ostimer_of = {
},
};
-static u64 rda_hwtimer_read(struct clocksource *cs)
+static u64 rda_hwtimer_clocksource_read(void)
{
void __iomem *base = timer_of_base(&rda_ostimer_of);
u32 lo, hi;
@@ -167,6 +168,11 @@ static u64 rda_hwtimer_read(struct clocksource *cs)
return ((u64)hi << 32) | lo;
}
+static u64 rda_hwtimer_read(struct clocksource *cs)
+{
+ return rda_hwtimer_clocksource_read();
+}
+
static struct clocksource rda_hwtimer_clocksource = {
.name = "rda-timer",
.rating = 400,
@@ -185,6 +191,7 @@ static int __init rda_timer_init(struct device_node *np)
return ret;
clocksource_register_hz(&rda_hwtimer_clocksource, rate);
+ sched_clock_register(rda_hwtimer_clocksource_read, 64, rate);
clockevents_config_and_register(&rda_ostimer_of.clkevt, rate,
0x2, UINT_MAX);
diff --git a/drivers/clocksource/timer-realtek.c b/drivers/clocksource/timer-realtek.c
new file mode 100644
index 000000000000..4f0439de9939
--- /dev/null
+++ b/drivers/clocksource/timer-realtek.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Realtek Semiconductor Corp.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/irqflags.h>
+#include <linux/interrupt.h>
+#include "timer-of.h"
+
+#define ENBL 1
+#define DSBL 0
+
+#define SYSTIMER_RATE 1000000
+#define SYSTIMER_MIN_DELTA 0x64
+#define SYSTIMER_MAX_DELTA ULONG_MAX
+
+/* SYSTIMER Register Offset (RTK Internal Use) */
+#define TS_LW_OFST 0x0
+#define TS_HW_OFST 0x4
+#define TS_CMP_VAL_LW_OFST 0x8
+#define TS_CMP_VAL_HW_OFST 0xC
+#define TS_CMP_CTRL_OFST 0x10
+#define TS_CMP_STAT_OFST 0x14
+
+/* SYSTIMER CMP CTRL REG Mask */
+#define TS_CMP_EN_MASK 0x1
+#define TS_WR_EN0_MASK 0x2
+
+static void __iomem *systimer_base;
+
+static u64 rtk_ts64_read(void)
+{
+ u32 low, high;
+ u64 ts;
+
+ /* Caution: Read LSB word (TS_LW_OFST) first then MSB (TS_HW_OFST) */
+ low = readl(systimer_base + TS_LW_OFST);
+ high = readl(systimer_base + TS_HW_OFST);
+ ts = ((u64)high << 32) | low;
+
+ return ts;
+}
+
+static void rtk_cmp_value_write(u64 value)
+{
+ u32 high, low;
+
+ low = value & 0xFFFFFFFF;
+ high = value >> 32;
+
+ writel(high, systimer_base + TS_CMP_VAL_HW_OFST);
+ writel(low, systimer_base + TS_CMP_VAL_LW_OFST);
+}
+
+static inline void rtk_cmp_en_write(bool cmp_en)
+{
+ u32 val;
+
+ val = TS_WR_EN0_MASK;
+ if (cmp_en == ENBL)
+ val |= TS_CMP_EN_MASK;
+
+ writel(val, systimer_base + TS_CMP_CTRL_OFST);
+}
+
+static int rtk_syst_clkevt_next_event(unsigned long cycles, struct clock_event_device *clkevt)
+{
+ u64 cmp_val;
+
+ rtk_cmp_en_write(DSBL);
+ cmp_val = rtk_ts64_read();
+
+ /* Set CMP value to current timestamp plus delta_us */
+ rtk_cmp_value_write(cmp_val + cycles);
+ rtk_cmp_en_write(ENBL);
+ return 0;
+}
+
+static irqreturn_t rtk_ts_match_intr_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clkevt = dev_id;
+ void __iomem *reg_base;
+ u32 val;
+
+ /* Disable TS CMP Match */
+ rtk_cmp_en_write(DSBL);
+
+ /* Clear TS CMP INTR */
+ reg_base = systimer_base + TS_CMP_STAT_OFST;
+ val = readl(reg_base) & TS_CMP_EN_MASK;
+ writel(val | TS_CMP_EN_MASK, reg_base);
+ clkevt->event_handler(clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static int rtk_syst_shutdown(struct clock_event_device *clkevt)
+{
+ void __iomem *reg_base;
+ u64 cmp_val = 0;
+
+ /* Disable TS CMP Match */
+ rtk_cmp_en_write(DSBL);
+ /* Set compare value to 0 */
+ rtk_cmp_value_write(cmp_val);
+
+ /* Clear TS CMP INTR */
+ reg_base = systimer_base + TS_CMP_STAT_OFST;
+ writel(TS_CMP_EN_MASK, reg_base);
+ return 0;
+}
+
+static struct timer_of rtk_timer_to = {
+ .flags = TIMER_OF_IRQ | TIMER_OF_BASE,
+
+ .clkevt = {
+ .name = "rtk-clkevt",
+ .rating = 300,
+ .cpumask = cpu_possible_mask,
+ .features = CLOCK_EVT_FEAT_DYNIRQ |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = rtk_syst_clkevt_next_event,
+ .set_state_oneshot = rtk_syst_shutdown,
+ .set_state_shutdown = rtk_syst_shutdown,
+ },
+
+ .of_irq = {
+ .flags = IRQF_TIMER | IRQF_IRQPOLL,
+ .handler = rtk_ts_match_intr_handler,
+ },
+};
+
+static int __init rtk_systimer_init(struct device_node *node)
+{
+ int ret;
+
+ ret = timer_of_init(node, &rtk_timer_to);
+ if (ret)
+ return ret;
+
+ systimer_base = timer_of_base(&rtk_timer_to);
+ clockevents_config_and_register(&rtk_timer_to.clkevt, SYSTIMER_RATE,
+ SYSTIMER_MIN_DELTA, SYSTIMER_MAX_DELTA);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(rtk_systimer, "realtek,rtd1625-systimer", rtk_systimer_init);
diff --git a/drivers/clocksource/timer-rtl-otto.c b/drivers/clocksource/timer-rtl-otto.c
index 8a3068b36e75..6113d2fdd4de 100644
--- a/drivers/clocksource/timer-rtl-otto.c
+++ b/drivers/clocksource/timer-rtl-otto.c
@@ -38,14 +38,13 @@
#define RTTM_BIT_COUNT 28
#define RTTM_MIN_DELTA 8
#define RTTM_MAX_DELTA CLOCKSOURCE_MASK(28)
+#define RTTM_MAX_DIVISOR GENMASK(15, 0)
/*
- * Timers are derived from the LXB clock frequency. Usually this is a fixed
- * multiple of the 25 MHz oscillator. The 930X SOC is an exception from that.
- * Its LXB clock has only dividers and uses the switch PLL of 2.45 GHz as its
- * base. The only meaningful frequencies we can achieve from that are 175.000
- * MHz and 153.125 MHz. The greatest common divisor of all explained possible
- * speeds is 3125000. Pin the timers to this 3.125 MHz reference frequency.
+ * Timers are derived from the lexra bus (LXB) clock frequency. This is 175 MHz
+ * on RTL930x and 200 MHz on the other platforms. With 3.125 MHz choose a common
+ * divisor to have enough range and detail. This provides comparability between
+ * the different platforms.
*/
#define RTTM_TICKS_PER_SEC 3125000
@@ -55,11 +54,6 @@ struct rttm_cs {
};
/* Simple internal register functions */
-static inline void rttm_set_counter(void __iomem *base, unsigned int counter)
-{
- iowrite32(counter, base + RTTM_CNT);
-}
-
static inline unsigned int rttm_get_counter(void __iomem *base)
{
return ioread32(base + RTTM_CNT);
@@ -112,6 +106,22 @@ static irqreturn_t rttm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void rttm_bounce_timer(void __iomem *base, u32 mode)
+{
+ /*
+ * When a running timer has less than ~5us left, a stop/start sequence
+ * might fail. While the details are unknown the most evident effect is
+ * that the subsequent interrupt will not be fired.
+ *
+ * As a workaround issue an intermediate restart with a very slow
+ * frequency of ~3kHz keeping the target counter (>=8). So the follow
+ * up restart will always be issued outside the critical window.
+ */
+
+ rttm_disable_timer(base);
+ rttm_enable_timer(base, mode, RTTM_MAX_DIVISOR);
+}
+
static void rttm_stop_timer(void __iomem *base)
{
rttm_disable_timer(base);
@@ -120,7 +130,6 @@ static void rttm_stop_timer(void __iomem *base)
static void rttm_start_timer(struct timer_of *to, u32 mode)
{
- rttm_set_counter(to->of_base.base, 0);
rttm_enable_timer(to->of_base.base, mode, to->of_clk.rate / RTTM_TICKS_PER_SEC);
}
@@ -129,7 +138,8 @@ static int rttm_next_event(unsigned long delta, struct clock_event_device *clkev
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, delta);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -141,7 +151,8 @@ static int rttm_state_oneshot(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_COUNTER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_COUNTER);
@@ -153,7 +164,8 @@ static int rttm_state_periodic(struct clock_event_device *clkevt)
struct timer_of *to = to_timer_of(clkevt);
RTTM_DEBUG(to->of_base.base);
- rttm_stop_timer(to->of_base.base);
+ rttm_bounce_timer(to->of_base.base, RTTM_CTRL_TIMER);
+ rttm_disable_timer(to->of_base.base);
rttm_set_period(to->of_base.base, RTTM_TICKS_PER_SEC / HZ);
rttm_start_timer(to, RTTM_CTRL_TIMER);
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index cd1916c05325..e82a95ea4724 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -21,6 +21,10 @@
#include <linux/of_irq.h>
#include <linux/sched_clock.h>
+#ifdef CONFIG_ARM
+#include <linux/delay.h>
+#endif
+
#include "timer-sp.h"
/* Hisilicon 64-bit timer(a variant of ARM SP804) */
@@ -102,6 +106,23 @@ static u64 notrace sp804_read(void)
return ~readl_relaxed(sched_clkevt->value);
}
+#ifdef CONFIG_ARM
+static struct delay_timer delay;
+static unsigned long sp804_read_delay_timer_read(void)
+{
+ return sp804_read();
+}
+
+static void sp804_register_delay_timer(int freq)
+{
+ delay.freq = freq;
+ delay.read_current_timer = sp804_read_delay_timer_read;
+ register_current_timer_delay(&delay);
+}
+#else
+static inline void sp804_register_delay_timer(int freq) {}
+#endif
+
static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
const char *name,
struct clk *clk,
@@ -114,6 +135,8 @@ static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
if (rate < 0)
return -EINVAL;
+ sp804_register_delay_timer(rate);
+
clkevt = sp804_clkevt_get(base);
writel(0, clkevt->ctrl);
@@ -318,6 +341,7 @@ static int __init sp804_of_init(struct device_node *np, struct sp804_timer *time
if (ret)
goto err;
}
+
initialized = true;
return 0;
diff --git a/drivers/clocksource/timer-sprd.c b/drivers/clocksource/timer-sprd.c
index 430cb99d8d79..2c07dd2af760 100644
--- a/drivers/clocksource/timer-sprd.c
+++ b/drivers/clocksource/timer-sprd.c
@@ -30,6 +30,7 @@
#define TIMER_VALUE_SHDW_HI 0x1c
#define TIMER_VALUE_LO_MASK GENMASK(31, 0)
+#define TIMER_VALUE_HI_MASK GENMASK(31, 0)
static void sprd_timer_enable(void __iomem *base, u32 flag)
{
@@ -162,15 +163,26 @@ static struct timer_of suspend_to = {
static u64 sprd_suspend_timer_read(struct clocksource *cs)
{
- return ~(u64)readl_relaxed(timer_of_base(&suspend_to) +
- TIMER_VALUE_SHDW_LO) & cs->mask;
+ u32 lo, hi;
+
+ do {
+ hi = readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_HI);
+ lo = readl_relaxed(timer_of_base(&suspend_to) +
+ TIMER_VALUE_SHDW_LO);
+ } while (hi != readl_relaxed(timer_of_base(&suspend_to) + TIMER_VALUE_SHDW_HI));
+
+ return ~(((u64)hi << 32) | lo);
}
static int sprd_suspend_timer_enable(struct clocksource *cs)
{
- sprd_timer_update_counter(timer_of_base(&suspend_to),
- TIMER_VALUE_LO_MASK);
- sprd_timer_enable(timer_of_base(&suspend_to), TIMER_CTL_PERIOD_MODE);
+ writel_relaxed(TIMER_VALUE_LO_MASK,
+ timer_of_base(&suspend_to) + TIMER_LOAD_LO);
+ writel_relaxed(TIMER_VALUE_HI_MASK,
+ timer_of_base(&suspend_to) + TIMER_LOAD_HI);
+ sprd_timer_enable(timer_of_base(&suspend_to),
+ TIMER_CTL_PERIOD_MODE|TIMER_CTL_64BIT_WIDTH);
return 0;
}
@@ -186,7 +198,7 @@ static struct clocksource suspend_clocksource = {
.read = sprd_suspend_timer_read,
.enable = sprd_suspend_timer_enable,
.disable = sprd_suspend_timer_disable,
- .mask = CLOCKSOURCE_MASK(32),
+ .mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP,
};
diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c
index 6e7944ffd7c0..3d804128c765 100644
--- a/drivers/clocksource/timer-stm32-lp.c
+++ b/drivers/clocksource/timer-stm32-lp.c
@@ -211,6 +211,7 @@ static void stm32_clkevent_lp_init(struct stm32_lp_private *priv,
priv->clkevt.rating = STM32_LP_RATING;
priv->clkevt.suspend = stm32_clkevent_lp_suspend;
priv->clkevt.resume = stm32_clkevent_lp_resume;
+ priv->clkevt.owner = THIS_MODULE;
clockevents_config_and_register(&priv->clkevt, rate, 0x1,
STM32_LPTIM_MAX_ARR);
@@ -288,5 +289,4 @@ static struct platform_driver stm32_clkevent_lp_driver = {
};
module_platform_driver(stm32_clkevent_lp_driver);
-MODULE_ALIAS("platform:stm32-lptimer-timer");
MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver");
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 6b48a9006444..f827d3f98f60 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -185,6 +185,7 @@ static int sun5i_setup_clocksource(struct platform_device *pdev,
cs->clksrc.read = sun5i_clksrc_read;
cs->clksrc.mask = CLOCKSOURCE_MASK(32);
cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ cs->clksrc.owner = THIS_MODULE;
ret = clocksource_register_hz(&cs->clksrc, rate);
if (ret) {
@@ -214,6 +215,7 @@ static int sun5i_setup_clockevent(struct platform_device *pdev,
ce->clkevt.rating = 340;
ce->clkevt.irq = irq;
ce->clkevt.cpumask = cpu_possible_mask;
+ ce->clkevt.owner = THIS_MODULE;
/* Enable timer0 interrupt */
val = readl(base + TIMER_IRQ_EN_REG);
diff --git a/drivers/clocksource/timer-tegra186.c b/drivers/clocksource/timer-tegra186.c
index e5394f98a02e..355558893e5f 100644
--- a/drivers/clocksource/timer-tegra186.c
+++ b/drivers/clocksource/timer-tegra186.c
@@ -159,7 +159,7 @@ static void tegra186_wdt_enable(struct tegra186_wdt *wdt)
tmr_writel(wdt->tmr, TMRCSSR_SRC_USEC, TMRCSSR);
/* configure timer (system reset happens on the fifth expiration) */
- value = TMRCR_PTV(wdt->base.timeout * USEC_PER_SEC / 5) |
+ value = TMRCR_PTV(wdt->base.timeout * (USEC_PER_SEC / 5)) |
TMRCR_PERIODIC | TMRCR_ENABLE;
tmr_writel(wdt->tmr, value, TMRCR);
@@ -231,7 +231,7 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
{
struct tegra186_wdt *wdt = to_tegra186_wdt(wdd);
u32 expiration, val;
- u64 timeleft;
+ u32 timeleft;
if (!watchdog_active(&wdt->base)) {
/* return zero if the watchdog timer is not activated. */
@@ -266,21 +266,26 @@ static unsigned int tegra186_wdt_get_timeleft(struct watchdog_device *wdd)
* Calculate the time remaining by adding the time for the
* counter value to the time of the counter expirations that
* remain.
+ * Note: Since wdt->base.timeout is bound to 255, the maximum
+ * value added to timeleft is
+ * 255 * (1,000,000 / 5) * 4
+ * = 255 * 200,000 * 4
+ * = 204,000,000
+ * TMRSR_PCV is a 29-bit field.
+ * Its maximum value is 0x1fffffff = 536,870,911.
+ * 204,000,000 + 536,870,911 = 740,870,911 = 0x2C28CAFF.
+ * timeleft can therefore not overflow, and 64-bit calculations
+ * are not necessary.
*/
- timeleft += (((u64)wdt->base.timeout * USEC_PER_SEC) / 5) * (4 - expiration);
+ timeleft += (wdt->base.timeout * (USEC_PER_SEC / 5)) * (4 - expiration);
/*
* Convert the current counter value to seconds,
- * rounding up to the nearest second. Cast u64 to
- * u32 under the assumption that no overflow happens
- * when coverting to seconds.
+ * rounding to the nearest second.
*/
- timeleft = DIV_ROUND_CLOSEST_ULL(timeleft, USEC_PER_SEC);
+ timeleft = DIV_ROUND_CLOSEST(timeleft, USEC_PER_SEC);
- if (WARN_ON_ONCE(timeleft > U32_MAX))
- return U32_MAX;
-
- return lower_32_bits(timeleft);
+ return timeleft;
}
static const struct watchdog_ops tegra186_wdt_ops = {
@@ -328,16 +333,12 @@ static struct tegra186_wdt *tegra186_wdt_create(struct tegra186_timer *tegra,
wdt->base.parent = tegra->dev;
err = watchdog_init_timeout(&wdt->base, 5, tegra->dev);
- if (err < 0) {
- dev_err(tegra->dev, "failed to initialize timeout: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
err = devm_watchdog_register_device(tegra->dev, &wdt->base);
- if (err < 0) {
- dev_err(tegra->dev, "failed to register WDT: %d\n", err);
+ if (err < 0)
return ERR_PTR(err);
- }
return wdt;
}
@@ -373,6 +374,7 @@ static int tegra186_timer_tsc_init(struct tegra186_timer *tegra)
tegra->tsc.read = tegra186_timer_tsc_read;
tegra->tsc.mask = CLOCKSOURCE_MASK(56);
tegra->tsc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->tsc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->tsc, 31250000);
}
@@ -392,6 +394,7 @@ static int tegra186_timer_osc_init(struct tegra186_timer *tegra)
tegra->osc.read = tegra186_timer_osc_read;
tegra->osc.mask = CLOCKSOURCE_MASK(32);
tegra->osc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->osc.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->osc, 38400000);
}
@@ -411,6 +414,7 @@ static int tegra186_timer_usec_init(struct tegra186_timer *tegra)
tegra->usec.read = tegra186_timer_usec_read;
tegra->usec.mask = CLOCKSOURCE_MASK(32);
tegra->usec.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+ tegra->usec.owner = THIS_MODULE;
return clocksource_register_hz(&tegra->usec, USEC_PER_SEC);
}
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index e9e32df6b566..793e7cdcb1b1 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -31,6 +31,7 @@
#include <linux/platform_data/dmtimer-omap.h>
#include <clocksource/timer-ti-dm.h>
+#include <linux/delay.h>
/*
* timer errata flags
@@ -836,6 +837,48 @@ static int omap_dm_timer_set_match(struct omap_dm_timer *cookie, int enable,
return 0;
}
+static int omap_dm_timer_set_cap(struct omap_dm_timer *cookie,
+ int autoreload, bool config_period)
+{
+ struct dmtimer *timer;
+ struct device *dev;
+ int rc;
+ u32 l;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer))
+ return -EINVAL;
+
+ dev = &timer->pdev->dev;
+ rc = pm_runtime_resume_and_get(dev);
+ if (rc)
+ return rc;
+ /*
+ * 1. Select autoreload mode. TIMER_TCLR[1] AR bit.
+ * 2. TIMER_TCLR[14]: Sets the functionality of the TIMER IO pin.
+ * 3. TIMER_TCLR[13] : Capture mode select bit.
+ * 3. TIMER_TCLR[9-8] : Select transition capture mode.
+ */
+
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+
+ if (autoreload)
+ l |= OMAP_TIMER_CTRL_AR;
+
+ l |= OMAP_TIMER_CTRL_CAPTMODE | OMAP_TIMER_CTRL_GPOCFG;
+
+ if (config_period == true)
+ l |= OMAP_TIMER_CTRL_TCM_LOWTOHIGH; /* Time Period config */
+ else
+ l |= OMAP_TIMER_CTRL_TCM_BOTHEDGES; /* Duty Cycle config */
+
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ pm_runtime_put_sync(dev);
+
+ return 0;
+}
+
static int omap_dm_timer_set_pwm(struct omap_dm_timer *cookie, int def_on,
int toggle, int trigger, int autoreload)
{
@@ -1023,23 +1066,92 @@ static unsigned int omap_dm_timer_read_counter(struct omap_dm_timer *cookie)
return __omap_dm_timer_read_counter(timer);
}
+static inline unsigned int __omap_dm_timer_cap(struct dmtimer *timer, int idx)
+{
+ return idx == 0 ? dmtimer_read(timer, OMAP_TIMER_CAPTURE_REG) :
+ dmtimer_read(timer, OMAP_TIMER_CAPTURE2_REG);
+}
+
static int omap_dm_timer_write_counter(struct omap_dm_timer *cookie, unsigned int value)
{
struct dmtimer *timer;
+ struct device *dev;
timer = to_dmtimer(cookie);
- if (unlikely(!timer || !atomic_read(&timer->enabled))) {
- pr_err("%s: timer not available or enabled.\n", __func__);
+ if (unlikely(!timer)) {
+ pr_err("%s: timer not available.\n", __func__);
return -EINVAL;
}
+ dev = &timer->pdev->dev;
+
+ pm_runtime_resume_and_get(dev);
dmtimer_write(timer, OMAP_TIMER_COUNTER_REG, value);
+ pm_runtime_put_sync(dev);
/* Save the context */
timer->context.tcrr = value;
return 0;
}
+/**
+ * omap_dm_timer_cap_counter() - Calculate the high count or period count depending on the
+ * configuration.
+ * @cookie:Pointer to OMAP DM timer
+ * @is_period:Whether to configure timer in period or duty cycle mode
+ *
+ * Return high count or period count if timer is enabled else appropriate error.
+ */
+static unsigned int omap_dm_timer_cap_counter(struct omap_dm_timer *cookie, bool is_period)
+{
+ struct dmtimer *timer;
+ unsigned int cap1 = 0;
+ unsigned int cap2 = 0;
+ u32 l, ret;
+
+ timer = to_dmtimer(cookie);
+ if (unlikely(!timer || !atomic_read(&timer->enabled))) {
+ pr_err("%s:timer is not available or enabled.%p\n", __func__, (void *)timer);
+ return -EINVAL;
+ }
+
+ /* Stop the timer */
+ omap_dm_timer_stop(cookie);
+
+ /* Clear the timer counter value to 0 */
+ ret = omap_dm_timer_write_counter(cookie, 0);
+ if (ret)
+ return ret;
+
+ /* Sets the timer capture configuration for period/duty cycle calculation */
+ ret = omap_dm_timer_set_cap(cookie, true, is_period);
+ if (ret) {
+ pr_err("%s: Failed to set timer capture configuration.\n", __func__);
+ return ret;
+ }
+ /* Start the timer */
+ omap_dm_timer_start(cookie);
+
+ /*
+ * 1 sec delay is given so as to provide
+ * enough time to capture low frequency signals.
+ */
+ msleep(1000);
+
+ cap1 = __omap_dm_timer_cap(timer, 0);
+ cap2 = __omap_dm_timer_cap(timer, 1);
+
+ /*
+ * Clears the TCLR configuration.
+ * The start bit must be set to 1 as the timer is already in start mode.
+ */
+ l = dmtimer_read(timer, OMAP_TIMER_CTRL_REG);
+ l &= ~(0xffff) | 0x1;
+ dmtimer_write(timer, OMAP_TIMER_CTRL_REG, l);
+
+ return (cap2-cap1);
+}
+
static int __maybe_unused omap_dm_timer_runtime_suspend(struct device *dev)
{
struct dmtimer *timer = dev_get_drvdata(dev);
@@ -1246,6 +1358,9 @@ static const struct omap_dm_timer_ops dmtimer_ops = {
.write_counter = omap_dm_timer_write_counter,
.read_status = omap_dm_timer_read_status,
.write_status = omap_dm_timer_write_status,
+ .set_cap = omap_dm_timer_set_cap,
+ .get_cap_status = omap_dm_timer_get_pwm_status,
+ .read_cap = omap_dm_timer_cap_counter,
};
static const struct dmtimer_platform_data omap3plus_pdata = {
diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c
deleted file mode 100644
index 911c92146eca..000000000000
--- a/drivers/clocksource/timer-vf-pit.c
+++ /dev/null
@@ -1,194 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2012-2013 Freescale Semiconductor, Inc.
- */
-
-#include <linux/interrupt.h>
-#include <linux/clockchips.h>
-#include <linux/clk.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/sched_clock.h>
-
-/*
- * Each pit takes 0x10 Bytes register space
- */
-#define PITMCR 0x00
-#define PIT0_OFFSET 0x100
-#define PITn_OFFSET(n) (PIT0_OFFSET + 0x10 * (n))
-#define PITLDVAL 0x00
-#define PITCVAL 0x04
-#define PITTCTRL 0x08
-#define PITTFLG 0x0c
-
-#define PITMCR_MDIS (0x1 << 1)
-
-#define PITTCTRL_TEN (0x1 << 0)
-#define PITTCTRL_TIE (0x1 << 1)
-#define PITCTRL_CHN (0x1 << 2)
-
-#define PITTFLG_TIF 0x1
-
-static void __iomem *clksrc_base;
-static void __iomem *clkevt_base;
-static unsigned long cycle_per_jiffy;
-
-static inline void pit_timer_enable(void)
-{
- __raw_writel(PITTCTRL_TEN | PITTCTRL_TIE, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_timer_disable(void)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
-}
-
-static inline void pit_irq_acknowledge(void)
-{
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-}
-
-static u64 notrace pit_read_sched_clock(void)
-{
- return ~__raw_readl(clksrc_base + PITCVAL);
-}
-
-static int __init pit_clocksource_init(unsigned long rate)
-{
- /* set the max load value and start the clock source counter */
- __raw_writel(0, clksrc_base + PITTCTRL);
- __raw_writel(~0UL, clksrc_base + PITLDVAL);
- __raw_writel(PITTCTRL_TEN, clksrc_base + PITTCTRL);
-
- sched_clock_register(pit_read_sched_clock, 32, rate);
- return clocksource_mmio_init(clksrc_base + PITCVAL, "vf-pit", rate,
- 300, 32, clocksource_mmio_readl_down);
-}
-
-static int pit_set_next_event(unsigned long delta,
- struct clock_event_device *unused)
-{
- /*
- * set a new value to PITLDVAL register will not restart the timer,
- * to abort the current cycle and start a timer period with the new
- * value, the timer must be disabled and enabled again.
- * and the PITLAVAL should be set to delta minus one according to pit
- * hardware requirement.
- */
- pit_timer_disable();
- __raw_writel(delta - 1, clkevt_base + PITLDVAL);
- pit_timer_enable();
-
- return 0;
-}
-
-static int pit_shutdown(struct clock_event_device *evt)
-{
- pit_timer_disable();
- return 0;
-}
-
-static int pit_set_periodic(struct clock_event_device *evt)
-{
- pit_set_next_event(cycle_per_jiffy, evt);
- return 0;
-}
-
-static irqreturn_t pit_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
-
- pit_irq_acknowledge();
-
- /*
- * pit hardware doesn't support oneshot, it will generate an interrupt
- * and reload the counter value from PITLDVAL when PITCVAL reach zero,
- * and start the counter again. So software need to disable the timer
- * to stop the counter loop in ONESHOT mode.
- */
- if (likely(clockevent_state_oneshot(evt)))
- pit_timer_disable();
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct clock_event_device clockevent_pit = {
- .name = "VF pit timer",
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_state_shutdown = pit_shutdown,
- .set_state_periodic = pit_set_periodic,
- .set_next_event = pit_set_next_event,
- .rating = 300,
-};
-
-static int __init pit_clockevent_init(unsigned long rate, int irq)
-{
- __raw_writel(0, clkevt_base + PITTCTRL);
- __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG);
-
- BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL,
- "VF pit timer", &clockevent_pit));
-
- clockevent_pit.cpumask = cpumask_of(0);
- clockevent_pit.irq = irq;
- /*
- * The value for the LDVAL register trigger is calculated as:
- * LDVAL trigger = (period / clock period) - 1
- * The pit is a 32-bit down count timer, when the counter value
- * reaches 0, it will generate an interrupt, thus the minimal
- * LDVAL trigger value is 1. And then the min_delta is
- * minimal LDVAL trigger value + 1, and the max_delta is full 32-bit.
- */
- clockevents_config_and_register(&clockevent_pit, rate, 2, 0xffffffff);
-
- return 0;
-}
-
-static int __init pit_timer_init(struct device_node *np)
-{
- struct clk *pit_clk;
- void __iomem *timer_base;
- unsigned long clk_rate;
- int irq, ret;
-
- timer_base = of_iomap(np, 0);
- if (!timer_base) {
- pr_err("Failed to iomap\n");
- return -ENXIO;
- }
-
- /*
- * PIT0 and PIT1 can be chained to build a 64-bit timer,
- * so choose PIT2 as clocksource, PIT3 as clockevent device,
- * and leave PIT0 and PIT1 unused for anyone else who needs them.
- */
- clksrc_base = timer_base + PITn_OFFSET(2);
- clkevt_base = timer_base + PITn_OFFSET(3);
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq <= 0)
- return -EINVAL;
-
- pit_clk = of_clk_get(np, 0);
- if (IS_ERR(pit_clk))
- return PTR_ERR(pit_clk);
-
- ret = clk_prepare_enable(pit_clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(pit_clk);
- cycle_per_jiffy = clk_rate / (HZ);
-
- /* enable the pit module */
- __raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
-
- ret = pit_clocksource_init(clk_rate);
- if (ret)
- return ret;
-
- return pit_clockevent_init(clk_rate, irq);
-}
-TIMER_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 93c68a40a17b..6dcc2567de6d 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -705,6 +705,15 @@ config COMEDI_ADL_PCI6208
To compile this driver as a module, choose M here: the module will be
called adl_pci6208.
+config COMEDI_ADL_PCI7250
+ tristate "ADLink PCI-7250 support"
+ help
+ Enable support for ADLink PCI-7250/LPCI-7250/LPCIe-7250 relay output
+ and isolated digital input boards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci7250.
+
config COMEDI_ADL_PCI7X3X
tristate "ADLink PCI-723X/743X isolated digital i/o board support"
depends on HAS_IOPORT
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 002c0e76baff..785977b40a93 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -273,19 +273,8 @@ unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s)
return free_end - async->buf_write_count;
}
-/**
- * comedi_buf_write_alloc() - Reserve buffer space for writing
- * @s: COMEDI subdevice.
- * @nbytes: Maximum space to reserve in bytes.
- *
- * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
- * data buffer associated with the subdevice. The amount reserved is limited
- * by the space available.
- *
- * Return: The amount of space reserved in bytes.
- */
-unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
- unsigned int nbytes)
+unsigned int _comedi_buf_write_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int unalloc = comedi_buf_write_n_unalloc(s);
@@ -303,6 +292,29 @@ unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
return nbytes;
}
+
+/**
+ * comedi_buf_write_alloc() - Reserve buffer space for writing
+ * @s: COMEDI subdevice.
+ * @nbytes: Maximum space to reserve in bytes.
+ *
+ * Reserve up to @nbytes bytes of space to be written in the COMEDI acquisition
+ * data buffer associated with the subdevice. The amount reserved is limited
+ * by the space available.
+ *
+ * Return: The amount of space reserved in bytes.
+ */
+unsigned int comedi_buf_write_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_write_alloc(s, nbytes);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
+ return nbytes;
+}
EXPORT_SYMBOL_GPL(comedi_buf_write_alloc);
/*
@@ -317,7 +329,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
unsigned int count = 0;
const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
- if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
+ if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) {
async->munge_count += num_bytes;
return num_bytes;
}
@@ -362,6 +374,24 @@ unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
return async->buf_write_alloc_count - async->buf_write_count;
}
+unsigned int _comedi_buf_write_free(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ struct comedi_async *async = s->async;
+ unsigned int allocated = comedi_buf_write_n_allocated(s);
+
+ if (nbytes > allocated)
+ nbytes = allocated;
+
+ async->buf_write_count += nbytes;
+ async->buf_write_ptr += nbytes;
+ comedi_buf_munge(s, async->buf_write_count - async->munge_count);
+ if (async->buf_write_ptr >= async->prealloc_bufsz)
+ async->buf_write_ptr %= async->prealloc_bufsz;
+
+ return nbytes;
+}
+
/**
* comedi_buf_write_free() - Free buffer space after it is written
* @s: COMEDI subdevice.
@@ -380,21 +410,34 @@ unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s)
unsigned int comedi_buf_write_free(struct comedi_subdevice *s,
unsigned int nbytes)
{
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_write_free(s, nbytes);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(comedi_buf_write_free);
+
+unsigned int _comedi_buf_read_n_available(struct comedi_subdevice *s)
+{
struct comedi_async *async = s->async;
- unsigned int allocated = comedi_buf_write_n_allocated(s);
+ unsigned int num_bytes;
- if (nbytes > allocated)
- nbytes = allocated;
+ if (!async)
+ return 0;
- async->buf_write_count += nbytes;
- async->buf_write_ptr += nbytes;
- comedi_buf_munge(s, async->buf_write_count - async->munge_count);
- if (async->buf_write_ptr >= async->prealloc_bufsz)
- async->buf_write_ptr %= async->prealloc_bufsz;
+ num_bytes = async->munge_count - async->buf_read_count;
- return nbytes;
+ /*
+ * ensure the async buffer 'counts' are read before we
+ * attempt to read data from the buffer
+ */
+ smp_rmb();
+
+ return num_bytes;
}
-EXPORT_SYMBOL_GPL(comedi_buf_write_free);
/**
* comedi_buf_read_n_available() - Determine amount of readable buffer space
@@ -409,23 +452,38 @@ EXPORT_SYMBOL_GPL(comedi_buf_write_free);
*/
unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
{
- struct comedi_async *async = s->async;
unsigned int num_bytes;
- if (!async)
- return 0;
+ if (comedi_get_is_subdevice_running(s)) {
+ num_bytes = _comedi_buf_read_n_available(s);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ num_bytes = 0;
+ }
+ return num_bytes;
+}
+EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
- num_bytes = async->munge_count - async->buf_read_count;
+unsigned int _comedi_buf_read_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ struct comedi_async *async = s->async;
+ unsigned int available;
+
+ available = async->munge_count - async->buf_read_alloc_count;
+ if (nbytes > available)
+ nbytes = available;
+
+ async->buf_read_alloc_count += nbytes;
/*
* ensure the async buffer 'counts' are read before we
- * attempt to read data from the buffer
+ * attempt to read data from the read-alloc'ed buffer space
*/
smp_rmb();
- return num_bytes;
+ return nbytes;
}
-EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
/**
* comedi_buf_read_alloc() - Reserve buffer space for reading
@@ -445,21 +503,12 @@ EXPORT_SYMBOL_GPL(comedi_buf_read_n_available);
unsigned int comedi_buf_read_alloc(struct comedi_subdevice *s,
unsigned int nbytes)
{
- struct comedi_async *async = s->async;
- unsigned int available;
-
- available = async->munge_count - async->buf_read_alloc_count;
- if (nbytes > available)
- nbytes = available;
-
- async->buf_read_alloc_count += nbytes;
-
- /*
- * ensure the async buffer 'counts' are read before we
- * attempt to read data from the read-alloc'ed buffer space
- */
- smp_rmb();
-
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_read_alloc(s, nbytes);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_alloc);
@@ -469,21 +518,8 @@ static unsigned int comedi_buf_read_n_allocated(struct comedi_async *async)
return async->buf_read_alloc_count - async->buf_read_count;
}
-/**
- * comedi_buf_read_free() - Free buffer space after it has been read
- * @s: COMEDI subdevice.
- * @nbytes: Maximum space to free in bytes.
- *
- * Free up to @nbytes bytes of buffer space previously reserved for reading in
- * the COMEDI acquisition data buffer associated with the subdevice. The
- * amount of space freed is limited to the amount that was reserved.
- *
- * The freed space becomes available for allocation by the writer.
- *
- * Return: The amount of space freed in bytes.
- */
-unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
- unsigned int nbytes)
+unsigned int _comedi_buf_read_free(struct comedi_subdevice *s,
+ unsigned int nbytes)
{
struct comedi_async *async = s->async;
unsigned int allocated;
@@ -503,6 +539,31 @@ unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
async->buf_read_ptr %= async->prealloc_bufsz;
return nbytes;
}
+
+/**
+ * comedi_buf_read_free() - Free buffer space after it has been read
+ * @s: COMEDI subdevice.
+ * @nbytes: Maximum space to free in bytes.
+ *
+ * Free up to @nbytes bytes of buffer space previously reserved for reading in
+ * the COMEDI acquisition data buffer associated with the subdevice. The
+ * amount of space freed is limited to the amount that was reserved.
+ *
+ * The freed space becomes available for allocation by the writer.
+ *
+ * Return: The amount of space freed in bytes.
+ */
+unsigned int comedi_buf_read_free(struct comedi_subdevice *s,
+ unsigned int nbytes)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_read_free(s, nbytes);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
+ return nbytes;
+}
EXPORT_SYMBOL_GPL(comedi_buf_read_free);
static void comedi_buf_memcpy_to(struct comedi_subdevice *s,
@@ -558,6 +619,38 @@ static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
}
}
+static unsigned int _comedi_buf_write_samples(struct comedi_subdevice *s,
+ const void *data,
+ unsigned int nsamples)
+{
+ unsigned int max_samples;
+ unsigned int nbytes;
+
+ /*
+ * Make sure there is enough room in the buffer for all the samples.
+ * If not, clamp the nsamples to the number that will fit, flag the
+ * buffer overrun and add the samples that fit.
+ */
+ max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
+ if (nsamples > max_samples) {
+ dev_warn(s->device->class_dev, "buffer overrun\n");
+ s->async->events |= COMEDI_CB_OVERFLOW;
+ nsamples = max_samples;
+ }
+
+ if (nsamples == 0)
+ return 0;
+
+ nbytes = comedi_samples_to_bytes(s, nsamples);
+ nbytes = _comedi_buf_write_alloc(s, nbytes);
+ comedi_buf_memcpy_to(s, data, nbytes);
+ _comedi_buf_write_free(s, nbytes);
+ _comedi_inc_scan_progress(s, nbytes);
+ s->async->events |= COMEDI_CB_BLOCK;
+
+ return nbytes;
+}
+
/**
* comedi_buf_write_samples() - Write sample data to COMEDI buffer
* @s: COMEDI subdevice.
@@ -578,34 +671,42 @@ static void comedi_buf_memcpy_from(struct comedi_subdevice *s,
unsigned int comedi_buf_write_samples(struct comedi_subdevice *s,
const void *data, unsigned int nsamples)
{
+ unsigned int nbytes;
+
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_write_samples(s, data, nsamples);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
+ return nbytes;
+}
+EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
+
+static unsigned int _comedi_buf_read_samples(struct comedi_subdevice *s,
+ void *data, unsigned int nsamples)
+{
unsigned int max_samples;
unsigned int nbytes;
- /*
- * Make sure there is enough room in the buffer for all the samples.
- * If not, clamp the nsamples to the number that will fit, flag the
- * buffer overrun and add the samples that fit.
- */
- max_samples = comedi_bytes_to_samples(s, comedi_buf_write_n_unalloc(s));
- if (nsamples > max_samples) {
- dev_warn(s->device->class_dev, "buffer overrun\n");
- s->async->events |= COMEDI_CB_OVERFLOW;
+ /* clamp nsamples to the number of full samples available */
+ max_samples = comedi_bytes_to_samples(s,
+ _comedi_buf_read_n_available(s));
+ if (nsamples > max_samples)
nsamples = max_samples;
- }
if (nsamples == 0)
return 0;
- nbytes = comedi_buf_write_alloc(s,
+ nbytes = _comedi_buf_read_alloc(s,
comedi_samples_to_bytes(s, nsamples));
- comedi_buf_memcpy_to(s, data, nbytes);
- comedi_buf_write_free(s, nbytes);
- comedi_inc_scan_progress(s, nbytes);
+ comedi_buf_memcpy_from(s, data, nbytes);
+ _comedi_buf_read_free(s, nbytes);
+ _comedi_inc_scan_progress(s, nbytes);
s->async->events |= COMEDI_CB_BLOCK;
return nbytes;
}
-EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
/**
* comedi_buf_read_samples() - Read sample data from COMEDI buffer
@@ -624,25 +725,14 @@ EXPORT_SYMBOL_GPL(comedi_buf_write_samples);
unsigned int comedi_buf_read_samples(struct comedi_subdevice *s,
void *data, unsigned int nsamples)
{
- unsigned int max_samples;
unsigned int nbytes;
- /* clamp nsamples to the number of full samples available */
- max_samples = comedi_bytes_to_samples(s,
- comedi_buf_read_n_available(s));
- if (nsamples > max_samples)
- nsamples = max_samples;
-
- if (nsamples == 0)
- return 0;
-
- nbytes = comedi_buf_read_alloc(s,
- comedi_samples_to_bytes(s, nsamples));
- comedi_buf_memcpy_from(s, data, nbytes);
- comedi_buf_read_free(s, nbytes);
- comedi_inc_scan_progress(s, nbytes);
- s->async->events |= COMEDI_CB_BLOCK;
-
+ if (comedi_get_is_subdevice_running(s)) {
+ nbytes = _comedi_buf_read_samples(s, data, nsamples);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nbytes = 0;
+ }
return nbytes;
}
EXPORT_SYMBOL_GPL(comedi_buf_read_samples);
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 7e2f2b1a1c36..657c98cd723e 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -38,6 +38,7 @@
* COMEDI_SRF_ERROR: indicates an COMEDI_CB_ERROR event has occurred
* since the last command was started
* COMEDI_SRF_RUNNING: command is running
+ * COMEDI_SRF_BUSY: command was started and subdevice still busy
* COMEDI_SRF_FREE_SPRIV: free s->private on detach
*
* COMEDI_SRF_BUSY_MASK: runflags that indicate the subdevice is "busy"
@@ -45,9 +46,11 @@
#define COMEDI_SRF_RT BIT(1)
#define COMEDI_SRF_ERROR BIT(2)
#define COMEDI_SRF_RUNNING BIT(27)
+#define COMEDI_SRF_BUSY BIT(28)
#define COMEDI_SRF_FREE_SPRIV BIT(31)
-#define COMEDI_SRF_BUSY_MASK (COMEDI_SRF_ERROR | COMEDI_SRF_RUNNING)
+#define COMEDI_SRF_BUSY_MASK \
+ (COMEDI_SRF_ERROR | COMEDI_SRF_RUNNING | COMEDI_SRF_BUSY)
/**
* struct comedi_file - Per-file private data for COMEDI device
@@ -665,6 +668,11 @@ static bool comedi_is_runflags_in_error(unsigned int runflags)
return runflags & COMEDI_SRF_ERROR;
}
+static bool comedi_is_runflags_busy(unsigned int runflags)
+{
+ return runflags & COMEDI_SRF_BUSY;
+}
+
/**
* comedi_is_subdevice_running() - Check if async command running on subdevice
* @s: COMEDI subdevice.
@@ -687,6 +695,46 @@ static bool __comedi_is_subdevice_running(struct comedi_subdevice *s)
return comedi_is_runflags_running(runflags);
}
+/**
+ * comedi_get_is_subdevice_running() - Get if async command running on subdevice
+ * @s: COMEDI subdevice.
+ *
+ * If an asynchronous COMEDI command is running on the subdevice, increment
+ * a reference counter. If the function return value indicates that a
+ * command is running, then the details of the command will not be destroyed
+ * before a matching call to comedi_put_is_subdevice_running().
+ *
+ * Return: %true if an asynchronous COMEDI command is active on the
+ * subdevice, else %false.
+ */
+bool comedi_get_is_subdevice_running(struct comedi_subdevice *s)
+{
+ unsigned long flags;
+ bool running;
+
+ spin_lock_irqsave(&s->spin_lock, flags);
+ running = __comedi_is_subdevice_running(s);
+ if (running)
+ refcount_inc(&s->async->run_active);
+ spin_unlock_irqrestore(&s->spin_lock, flags);
+ return running;
+}
+EXPORT_SYMBOL_GPL(comedi_get_is_subdevice_running);
+
+/**
+ * comedi_put_is_subdevice_running() - Put if async command running on subdevice
+ * @s: COMEDI subdevice.
+ *
+ * Decrements the reference counter that was incremented when
+ * comedi_get_is_subdevice_running() returned %true.
+ */
+void comedi_put_is_subdevice_running(struct comedi_subdevice *s)
+{
+ if (refcount_dec_and_test(&s->async->run_active))
+ complete_all(&s->async->run_complete);
+}
+EXPORT_SYMBOL_GPL(comedi_put_is_subdevice_running);
+
bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
{
unsigned int runflags = __comedi_get_subdevice_runflags(s);
@@ -736,20 +784,28 @@ static void do_become_nonbusy(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
+ unsigned int runflags;
+ unsigned long flags;
lockdep_assert_held(&dev->mutex);
- comedi_update_subdevice_runflags(s, COMEDI_SRF_RUNNING, 0);
- if (async) {
+ spin_lock_irqsave(&s->spin_lock, flags);
+ runflags = __comedi_get_subdevice_runflags(s);
+ __comedi_clear_subdevice_runflags(s, COMEDI_SRF_RUNNING |
+ COMEDI_SRF_BUSY);
+ spin_unlock_irqrestore(&s->spin_lock, flags);
+ if (comedi_is_runflags_busy(runflags)) {
+ /*
+ * "Run active" counter was set to 1 when setting up the
+ * command. Decrement it and wait for it to become 0.
+ */
+ comedi_put_is_subdevice_running(s);
+ wait_for_completion(&async->run_complete);
comedi_buf_reset(s);
async->inttrig = NULL;
kfree(async->cmd.chanlist);
async->cmd.chanlist = NULL;
s->busy = NULL;
wake_up_interruptible_all(&async->wait_head);
- } else {
- dev_err(dev->class_dev,
- "BUG: (?) %s called with async=NULL\n", __func__);
- s->busy = NULL;
}
}
@@ -1150,15 +1206,15 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
if (!(async->cmd.flags & CMDF_WRITE)) {
/* command was set up in "read" direction */
if (bi.bytes_read) {
- comedi_buf_read_alloc(s, bi.bytes_read);
- bi.bytes_read = comedi_buf_read_free(s, bi.bytes_read);
+ _comedi_buf_read_alloc(s, bi.bytes_read);
+ bi.bytes_read = _comedi_buf_read_free(s, bi.bytes_read);
}
/*
* If nothing left to read, and command has stopped, and
* {"read" position not updated or command stopped normally},
* then become non-busy.
*/
- if (comedi_buf_read_n_available(s) == 0 &&
+ if (_comedi_buf_read_n_available(s) == 0 &&
!comedi_is_runflags_running(runflags) &&
(bi.bytes_read == 0 ||
!comedi_is_runflags_in_error(runflags))) {
@@ -1175,9 +1231,9 @@ static int do_bufinfo_ioctl(struct comedi_device *dev,
if (comedi_is_runflags_in_error(runflags))
retval = -EPIPE;
} else if (bi.bytes_written) {
- comedi_buf_write_alloc(s, bi.bytes_written);
+ _comedi_buf_write_alloc(s, bi.bytes_written);
bi.bytes_written =
- comedi_buf_write_free(s, bi.bytes_written);
+ _comedi_buf_write_free(s, bi.bytes_written);
}
bi.bytes_read = 0;
}
@@ -1860,8 +1916,14 @@ static int do_cmd_ioctl(struct comedi_device *dev,
if (async->cmd.flags & CMDF_WAKE_EOS)
async->cb_mask |= COMEDI_CB_EOS;
+ /*
+ * Set the "run active" counter with an initial count of 1 that will
+ * complete the "safe to reset" event when it is decremented to 0.
+ */
+ refcount_set(&s->async->run_active, 1);
+ reinit_completion(&s->async->run_complete);
comedi_update_subdevice_runflags(s, COMEDI_SRF_BUSY_MASK,
- COMEDI_SRF_RUNNING);
+ COMEDI_SRF_RUNNING | COMEDI_SRF_BUSY);
/*
* Set s->busy _after_ setting COMEDI_SRF_RUNNING flag to avoid
@@ -2284,15 +2346,10 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
rc = check_insnlist_len(dev, insnlist.n_insns);
if (rc)
break;
- insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL);
- if (!insns) {
- rc = -ENOMEM;
- break;
- }
- if (copy_from_user(insns, insnlist.insns,
- sizeof(*insns) * insnlist.n_insns)) {
- rc = -EFAULT;
- kfree(insns);
+ insns = memdup_array_user(insnlist.insns, insnlist.n_insns,
+ sizeof(*insns));
+ if (IS_ERR(insns)) {
+ rc = PTR_ERR(insns);
break;
}
rc = do_insnlist_ioctl(dev, insns, insnlist.n_insns, file);
@@ -2512,7 +2569,7 @@ static __poll_t comedi_poll(struct file *file, poll_table *wait)
poll_wait(file, &s->async->wait_head, wait);
if (s->busy != file || !comedi_is_subdevice_running(s) ||
(s->async->cmd.flags & CMDF_WRITE) ||
- comedi_buf_read_n_available(s) > 0)
+ _comedi_buf_read_n_available(s) > 0)
mask |= EPOLLIN | EPOLLRDNORM;
}
@@ -2645,7 +2702,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
break;
/* Allocate all free buffer space. */
- comedi_buf_write_alloc(s, async->prealloc_bufsz);
+ _comedi_buf_write_alloc(s, async->prealloc_bufsz);
m = comedi_buf_write_n_allocated(s);
n = min_t(size_t, m, nbytes);
@@ -2673,7 +2730,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
n -= m;
retval = -EFAULT;
}
- comedi_buf_write_free(s, n);
+ _comedi_buf_write_free(s, n);
count += n;
nbytes -= n;
@@ -2759,7 +2816,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
while (count == 0 && !retval) {
set_current_state(TASK_INTERRUPTIBLE);
- m = comedi_buf_read_n_available(s);
+ m = _comedi_buf_read_n_available(s);
n = min_t(size_t, m, nbytes);
if (n == 0) {
@@ -2799,8 +2856,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
retval = -EFAULT;
}
- comedi_buf_read_alloc(s, n);
- comedi_buf_read_free(s, n);
+ _comedi_buf_read_alloc(s, n);
+ _comedi_buf_read_free(s, n);
count += n;
nbytes -= n;
@@ -2834,7 +2891,7 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
s == new_s && new_s->async == async && s->busy == file &&
!(async->cmd.flags & CMDF_WRITE) &&
!comedi_is_subdevice_running(s) &&
- comedi_buf_read_n_available(s) == 0)
+ _comedi_buf_read_n_available(s) == 0)
do_become_nonbusy(dev, s);
mutex_unlock(&dev->mutex);
}
@@ -3023,7 +3080,12 @@ static int compat_chaninfo(struct file *file, unsigned long arg)
chaninfo.rangelist = compat_ptr(chaninfo32.rangelist);
mutex_lock(&dev->mutex);
- err = do_chaninfo_ioctl(dev, &chaninfo);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ err = -ENODEV;
+ } else {
+ err = do_chaninfo_ioctl(dev, &chaninfo);
+ }
mutex_unlock(&dev->mutex);
return err;
}
@@ -3044,7 +3106,12 @@ static int compat_rangeinfo(struct file *file, unsigned long arg)
rangeinfo.range_ptr = compat_ptr(rangeinfo32.range_ptr);
mutex_lock(&dev->mutex);
- err = do_rangeinfo_ioctl(dev, &rangeinfo);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ err = -ENODEV;
+ } else {
+ err = do_rangeinfo_ioctl(dev, &rangeinfo);
+ }
mutex_unlock(&dev->mutex);
return err;
}
@@ -3120,7 +3187,12 @@ static int compat_cmd(struct file *file, unsigned long arg)
return rc;
mutex_lock(&dev->mutex);
- rc = do_cmd_ioctl(dev, &cmd, &copy, file);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ rc = -ENODEV;
+ } else {
+ rc = do_cmd_ioctl(dev, &cmd, &copy, file);
+ }
mutex_unlock(&dev->mutex);
if (copy) {
/* Special case: copy cmd back to user. */
@@ -3145,7 +3217,12 @@ static int compat_cmdtest(struct file *file, unsigned long arg)
return rc;
mutex_lock(&dev->mutex);
- rc = do_cmdtest_ioctl(dev, &cmd, &copy, file);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ rc = -ENODEV;
+ } else {
+ rc = do_cmdtest_ioctl(dev, &cmd, &copy, file);
+ }
mutex_unlock(&dev->mutex);
if (copy) {
err = put_compat_cmd(compat_ptr(arg), &cmd);
@@ -3205,7 +3282,12 @@ static int compat_insnlist(struct file *file, unsigned long arg)
}
mutex_lock(&dev->mutex);
- rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ rc = -ENODEV;
+ } else {
+ rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
+ }
mutex_unlock(&dev->mutex);
kfree(insns);
return rc;
@@ -3224,7 +3306,12 @@ static int compat_insn(struct file *file, unsigned long arg)
return rc;
mutex_lock(&dev->mutex);
- rc = do_insn_ioctl(dev, &insn, file);
+ if (!dev->attached) {
+ dev_dbg(dev->class_dev, "no driver attached\n");
+ rc = -ENODEV;
+ } else {
+ rc = do_insn_ioctl(dev, &insn, file);
+ }
mutex_unlock(&dev->mutex);
return rc;
}
@@ -3299,18 +3386,7 @@ static const struct file_operations comedi_fops = {
.llseek = noop_llseek,
};
-/**
- * comedi_event() - Handle events for asynchronous COMEDI command
- * @dev: COMEDI device.
- * @s: COMEDI subdevice.
- * Context: in_interrupt() (usually), @s->spin_lock spin-lock not held.
- *
- * If an asynchronous COMEDI command is active on the subdevice, process
- * any %COMEDI_CB_... event flags that have been set, usually by an
- * interrupt handler. These may change the run state of the asynchronous
- * command, wake a task, and/or send a %SIGIO signal.
- */
-void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
+void _comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
unsigned int events;
@@ -3346,6 +3422,25 @@ void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
if (si_code)
kill_fasync(&dev->async_queue, SIGIO, si_code);
}
+
+/**
+ * comedi_event() - Handle events for asynchronous COMEDI command
+ * @dev: COMEDI device.
+ * @s: COMEDI subdevice.
+ * Context: in_interrupt() (usually), @s->spin_lock spin-lock not held.
+ *
+ * If an asynchronous COMEDI command is active on the subdevice, process
+ * any %COMEDI_CB_... event flags that have been set, usually by an
+ * interrupt handler. These may change the run state of the asynchronous
+ * command, wake a task, and/or send a %SIGIO signal.
+ */
+void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ comedi_event(dev, s);
+ comedi_put_is_subdevice_running(s);
+ }
+}
EXPORT_SYMBOL_GPL(comedi_event);
/* Note: the ->mutex is pre-locked on successful return */
diff --git a/drivers/comedi/comedi_internal.h b/drivers/comedi/comedi_internal.h
index cf10ba016ebc..41a3b09f8f05 100644
--- a/drivers/comedi/comedi_internal.h
+++ b/drivers/comedi/comedi_internal.h
@@ -36,6 +36,18 @@ struct comedi_buf_map *
comedi_buf_map_from_subdev_get(struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_available(struct comedi_subdevice *s);
unsigned int comedi_buf_write_n_allocated(struct comedi_subdevice *s);
+unsigned int _comedi_buf_write_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes);
+unsigned int _comedi_buf_write_free(struct comedi_subdevice *s,
+ unsigned int nbytes);
+unsigned int _comedi_buf_read_n_available(struct comedi_subdevice *s);
+unsigned int _comedi_buf_read_alloc(struct comedi_subdevice *s,
+ unsigned int nbytes);
+unsigned int _comedi_buf_read_free(struct comedi_subdevice *s,
+ unsigned int nbytes);
+void _comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes);
+void _comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
void comedi_device_cancel_all(struct comedi_device *dev);
bool comedi_can_auto_free_spriv(struct comedi_subdevice *s);
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index c9ebaadc5e82..69cd2a253c66 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -441,6 +441,13 @@ unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+static unsigned int _comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+ struct comedi_cmd *cmd = &s->async->cmd;
+
+ return comedi_bytes_per_scan_cmd(s, cmd);
+}
+
/**
* comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
* @s: COMEDI subdevice.
@@ -458,9 +465,16 @@ EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
*/
unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
{
- struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned int num_bytes;
- return comedi_bytes_per_scan_cmd(s, cmd);
+ if (comedi_get_is_subdevice_running(s)) {
+ num_bytes = _comedi_bytes_per_scan(s);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ /* Use nomimal, single sample scan length. */
+ num_bytes = comedi_samples_to_bytes(s, 1);
+ }
+ return num_bytes;
}
EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
@@ -482,6 +496,17 @@ static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
return nscans;
}
+static unsigned int _comedi_nscans_left(struct comedi_subdevice *s,
+ unsigned int nscans)
+{
+ if (nscans == 0) {
+ unsigned int nbytes = _comedi_buf_read_n_available(s);
+
+ nscans = nbytes / _comedi_bytes_per_scan(s);
+ }
+ return __comedi_nscans_left(s, nscans);
+}
+
/**
* comedi_nscans_left() - Return the number of scans left in the command
* @s: COMEDI subdevice.
@@ -499,25 +524,18 @@ static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
unsigned int comedi_nscans_left(struct comedi_subdevice *s,
unsigned int nscans)
{
- if (nscans == 0) {
- unsigned int nbytes = comedi_buf_read_n_available(s);
-
- nscans = nbytes / comedi_bytes_per_scan(s);
+ if (comedi_get_is_subdevice_running(s)) {
+ nscans = _comedi_nscans_left(s, nscans);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nscans = 0;
}
- return __comedi_nscans_left(s, nscans);
+ return nscans;
}
EXPORT_SYMBOL_GPL(comedi_nscans_left);
-/**
- * comedi_nsamples_left() - Return the number of samples left in the command
- * @s: COMEDI subdevice.
- * @nsamples: The expected number of samples.
- *
- * Returns the number of samples remaining to complete the command, or the
- * specified expected number of samples (@nsamples), whichever is fewer.
- */
-unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
- unsigned int nsamples)
+static unsigned int _comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
@@ -538,24 +556,34 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
return samples_left;
return nsamples;
}
-EXPORT_SYMBOL_GPL(comedi_nsamples_left);
/**
- * comedi_inc_scan_progress() - Update scan progress in asynchronous command
+ * comedi_nsamples_left() - Return the number of samples left in the command
* @s: COMEDI subdevice.
- * @num_bytes: Amount of data in bytes to increment scan progress.
+ * @nsamples: The expected number of samples.
*
- * Increments the scan progress by the number of bytes specified by @num_bytes.
- * If the scan progress reaches or exceeds the scan length in bytes, reduce
- * it modulo the scan length in bytes and set the "end of scan" asynchronous
- * event flag (%COMEDI_CB_EOS) to be processed later.
+ * Returns the number of samples remaining to complete the command, or the
+ * specified expected number of samples (@nsamples), whichever is fewer.
*/
-void comedi_inc_scan_progress(struct comedi_subdevice *s,
- unsigned int num_bytes)
+unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
+ unsigned int nsamples)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ nsamples = _comedi_nsamples_left(s, nsamples);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ nsamples = 0;
+ }
+ return nsamples;
+}
+EXPORT_SYMBOL_GPL(comedi_nsamples_left);
+
+void _comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes)
{
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
- unsigned int scan_length = comedi_bytes_per_scan(s);
+ unsigned int scan_length = _comedi_bytes_per_scan(s);
/* track the 'cur_chan' for non-SDF_PACKED subdevices */
if (!(s->subdev_flags & SDF_PACKED)) {
@@ -576,8 +604,43 @@ void comedi_inc_scan_progress(struct comedi_subdevice *s,
async->events |= COMEDI_CB_EOS;
}
}
+
+/**
+ * comedi_inc_scan_progress() - Update scan progress in asynchronous command
+ * @s: COMEDI subdevice.
+ * @num_bytes: Amount of data in bytes to increment scan progress.
+ *
+ * Increments the scan progress by the number of bytes specified by @num_bytes.
+ * If the scan progress reaches or exceeds the scan length in bytes, reduce
+ * it modulo the scan length in bytes and set the "end of scan" asynchronous
+ * event flag (%COMEDI_CB_EOS) to be processed later.
+ */
+void comedi_inc_scan_progress(struct comedi_subdevice *s,
+ unsigned int num_bytes)
+{
+ if (comedi_get_is_subdevice_running(s)) {
+ _comedi_inc_scan_progress(s, num_bytes);
+ comedi_put_is_subdevice_running(s);
+ }
+}
EXPORT_SYMBOL_GPL(comedi_inc_scan_progress);
+static unsigned int _comedi_handle_events(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ unsigned int events = s->async->events;
+
+ if (events == 0)
+ return events;
+
+ if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
+ s->cancel(dev, s);
+
+ _comedi_event(dev, s);
+
+ return events;
+}
+
/**
* comedi_handle_events() - Handle events and possibly stop acquisition
* @dev: COMEDI device.
@@ -597,16 +660,14 @@ EXPORT_SYMBOL_GPL(comedi_inc_scan_progress);
unsigned int comedi_handle_events(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- unsigned int events = s->async->events;
-
- if (events == 0)
- return events;
-
- if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
- s->cancel(dev, s);
-
- comedi_event(dev, s);
+ unsigned int events;
+ if (comedi_get_is_subdevice_running(s)) {
+ events = _comedi_handle_events(dev, s);
+ comedi_put_is_subdevice_running(s);
+ } else {
+ events = 0;
+ }
return events;
}
EXPORT_SYMBOL_GPL(comedi_handle_events);
@@ -677,6 +738,7 @@ static int __comedi_device_postconfig_async(struct comedi_device *dev,
return -ENOMEM;
init_waitqueue_head(&async->wait_head);
+ init_completion(&async->run_complete);
s->async = async;
async->max_bufsize = comedi_default_buf_maxsize_kb * 1024;
diff --git a/drivers/comedi/drivers/8255.c b/drivers/comedi/drivers/8255.c
index f45f7bd1c61a..5f70938b4477 100644
--- a/drivers/comedi/drivers/8255.c
+++ b/drivers/comedi/drivers/8255.c
@@ -77,19 +77,17 @@ static int dev_8255_attach(struct comedi_device *dev,
* base address of the chip.
*/
ret = __comedi_request_region(dev, iobase, I8255_SIZE);
+ if (ret)
+ return ret;
+ ret = subdev_8255_io_init(dev, s, iobase);
if (ret) {
+ /*
+ * Release the I/O port region here, as the
+ * "detach" handler cannot find it.
+ */
+ release_region(iobase, I8255_SIZE);
s->type = COMEDI_SUBD_UNUSED;
- } else {
- ret = subdev_8255_io_init(dev, s, iobase);
- if (ret) {
- /*
- * Release the I/O port region here, as the
- * "detach" handler cannot find it.
- */
- release_region(iobase, I8255_SIZE);
- s->type = COMEDI_SUBD_UNUSED;
- return ret;
- }
+ return ret;
}
}
diff --git a/drivers/comedi/drivers/Makefile b/drivers/comedi/drivers/Makefile
index b24ac00cab73..7b99a431330d 100644
--- a/drivers/comedi/drivers/Makefile
+++ b/drivers/comedi/drivers/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_COMEDI_ADDI_APCI_3120) += addi_apci_3120.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3501) += addi_apci_3501.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3XXX) += addi_apci_3xxx.o
obj-$(CONFIG_COMEDI_ADL_PCI6208) += adl_pci6208.o
+obj-$(CONFIG_COMEDI_ADL_PCI7250) += adl_pci7250.o
obj-$(CONFIG_COMEDI_ADL_PCI7X3X) += adl_pci7x3x.o
obj-$(CONFIG_COMEDI_ADL_PCI8164) += adl_pci8164.o
obj-$(CONFIG_COMEDI_ADL_PCI9111) += adl_pci9111.o
diff --git a/drivers/comedi/drivers/adl_pci7250.c b/drivers/comedi/drivers/adl_pci7250.c
new file mode 100644
index 000000000000..78c85a402435
--- /dev/null
+++ b/drivers/comedi/drivers/adl_pci7250.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * adl_pci7250.c
+ *
+ * Comedi driver for ADLink PCI-7250 series cards.
+ *
+ * Copyright (C) 2015, 2025 Ian Abbott <abbotti@mev.co.uk>
+ */
+
+/*
+ * Driver: adl_pci7250
+ * Description: Driver for the ADLINK PCI-7250 relay output & digital input card
+ * Devices: [ADLINK] PCI-7250 (adl_pci7250) LPCI-7250 LPCIe-7250
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Status: works
+ * Updated: Mon, 02 Jun 2025 13:54:11 +0100
+ *
+ * The driver assumes that 3 PCI-7251 modules are fitted to the PCI-7250,
+ * giving 32 channels of relay outputs and 32 channels of isolated digital
+ * inputs. That is also the case for the LPCI-7250 and older LPCIe-7250
+ * cards although they do not physically support the PCI-7251 modules.
+ * Newer LPCIe-7250 cards have a different PCI subsystem device ID, so
+ * set the number of channels to 8 for these cards.
+ *
+ * Not fitting the PCI-7251 modules shouldn't do any harm, but the extra
+ * inputs and relay outputs won't work!
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
+
+#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
+
+static unsigned char adl_pci7250_read8(struct comedi_device *dev,
+ unsigned int offset)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio)
+ return inb(dev->iobase + offset);
+#endif
+ return readb(dev->mmio + offset);
+}
+
+static void adl_pci7250_write8(struct comedi_device *dev, unsigned int offset,
+ unsigned char val)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio) {
+ outb(val, dev->iobase + offset);
+ return;
+ }
+#endif
+ writeb(val, dev->mmio + offset);
+}
+
+static int adl_pci7250_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = comedi_dio_update_state(s, data);
+
+ if (mask) {
+ unsigned int state = s->state;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ if ((mask & 0xffu) != 0) {
+ /* write relay data to even offset registers */
+ adl_pci7250_write8(dev, i * 2, state & 0xffu);
+ }
+ state >>= 8;
+ mask >>= 8;
+ }
+ }
+
+ data[1] = s->state;
+
+ return 2;
+}
+
+static int adl_pci7250_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int value = 0;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ /* read DI value from odd offset registers */
+ value |= (unsigned int)adl_pci7250_read8(dev, i * 2 + 1) <<
+ (i * 8);
+ }
+
+ data[1] = value;
+
+ return 2;
+}
+
+static int pci7250_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct comedi_subdevice *s;
+ unsigned int max_chans;
+ unsigned int i;
+ int ret;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ if (pci_resource_len(pcidev, 2) < 8)
+ return -ENXIO;
+
+ /*
+ * Newer LPCIe-7250 boards use MMIO. Older LPCIe-7250, LPCI-7250, and
+ * PCI-7250 boards use Port I/O.
+ */
+ if (pci_resource_flags(pcidev, 2) & IORESOURCE_MEM) {
+ dev->mmio = pci_ioremap_bar(pcidev, 2);
+ if (!dev->mmio)
+ return -ENOMEM;
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ dev->iobase = pci_resource_start(pcidev, 2);
+ } else {
+ dev_err(dev->class_dev,
+ "error! need I/O port support\n");
+ return -ENXIO;
+ }
+
+ if (pcidev->subsystem_device == 0x7000) {
+ /*
+ * This is a newer LPCIe-7250 variant and cannot possibly
+ * have PCI-7251 modules fitted, so limit the number of
+ * channels to 8.
+ */
+ max_chans = 8;
+ } else {
+ /*
+ * It is unknown whether the board is a PCI-7250, an LPCI-7250,
+ * or an older LPCIe-7250 variant, so treat it as a PCI-7250
+ * and assume it can have PCI-7251 modules fitted to increase
+ * the number of channels to a maximum of 32.
+ */
+ max_chans = 32;
+ }
+
+ ret = comedi_alloc_subdevices(dev, 2);
+ if (ret)
+ return ret;
+
+ /* Relay digital output. */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_do_insn_bits;
+ /* Read initial state of relays from the even offset registers. */
+ s->state = 0;
+ for (i = 0; i * 8 < max_chans; i++) {
+ s->state |= (unsigned int)adl_pci7250_read8(dev, i * 2) <<
+ (i * 8);
+ }
+
+ /* Isolated digital input. */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_di_insn_bits;
+
+ return 0;
+}
+
+static struct comedi_driver adl_pci7250_driver = {
+ .driver_name = "adl_pci7250",
+ .module = THIS_MODULE,
+ .auto_attach = pci7250_auto_attach,
+ .detach = comedi_pci_detach,
+};
+
+static int adl_pci7250_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return comedi_pci_auto_config(dev, &adl_pci7250_driver,
+ id->driver_data);
+}
+
+static const struct pci_device_id adl_pci7250_pci_table[] = {
+#ifdef CONFIG_HAS_IOPORT
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7250) },
+#endif
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7000) }, /* newer LPCIe-7250 */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, adl_pci7250_pci_table);
+
+static struct pci_driver adl_pci7250_pci_driver = {
+ .name = "adl_pci7250",
+ .id_table = adl_pci7250_pci_table,
+ .probe = adl_pci7250_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
+};
+module_comedi_pci_driver(adl_pci7250_driver, adl_pci7250_pci_driver);
+
+MODULE_AUTHOR("Comedi https://www.comedi.org");
+MODULE_DESCRIPTION("Comedi driver for ADLink PCI-7250 series boards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/comedi/drivers/c6xdigio.c b/drivers/comedi/drivers/c6xdigio.c
index 14b90d1c64dc..8a38d97d463b 100644
--- a/drivers/comedi/drivers/c6xdigio.c
+++ b/drivers/comedi/drivers/c6xdigio.c
@@ -249,9 +249,6 @@ static int c6xdigio_attach(struct comedi_device *dev,
if (ret)
return ret;
- /* Make sure that PnP ports get activated */
- pnp_register_driver(&c6xdigio_pnp_driver);
-
s = &dev->subdevices[0];
/* pwm output subdevice */
s->type = COMEDI_SUBD_PWM;
@@ -278,19 +275,46 @@ static int c6xdigio_attach(struct comedi_device *dev,
return 0;
}
-static void c6xdigio_detach(struct comedi_device *dev)
-{
- comedi_legacy_detach(dev);
- pnp_unregister_driver(&c6xdigio_pnp_driver);
-}
-
static struct comedi_driver c6xdigio_driver = {
.driver_name = "c6xdigio",
.module = THIS_MODULE,
.attach = c6xdigio_attach,
- .detach = c6xdigio_detach,
+ .detach = comedi_legacy_detach,
};
-module_comedi_driver(c6xdigio_driver);
+
+static bool c6xdigio_pnp_registered = false;
+
+static int __init c6xdigio_module_init(void)
+{
+ int ret;
+
+ ret = comedi_driver_register(&c6xdigio_driver);
+ if (ret)
+ return ret;
+
+ if (IS_ENABLED(CONFIG_PNP)) {
+ /* Try to activate the PnP ports */
+ ret = pnp_register_driver(&c6xdigio_pnp_driver);
+ if (ret) {
+ pr_warn("failed to register pnp driver - err %d\n",
+ ret);
+ ret = 0; /* ignore the error. */
+ } else {
+ c6xdigio_pnp_registered = true;
+ }
+ }
+
+ return 0;
+}
+module_init(c6xdigio_module_init);
+
+static void __exit c6xdigio_module_exit(void)
+{
+ if (c6xdigio_pnp_registered)
+ pnp_unregister_driver(&c6xdigio_pnp_driver);
+ comedi_driver_unregister(&c6xdigio_driver);
+}
+module_exit(c6xdigio_module_exit);
MODULE_AUTHOR("Comedi https://www.comedi.org");
MODULE_DESCRIPTION("Comedi driver for the C6x_DIGIO DSP daughter card");
diff --git a/drivers/comedi/drivers/comedi_bond.c b/drivers/comedi/drivers/comedi_bond.c
index 78c39fa84177..30650fa36fff 100644
--- a/drivers/comedi/drivers/comedi_bond.c
+++ b/drivers/comedi/drivers/comedi_bond.c
@@ -205,7 +205,7 @@ static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
snprintf(file, sizeof(file), "/dev/comedi%d", minor);
file[sizeof(file) - 1] = 0;
- d = comedi_open(file);
+ d = comedi_open_from(file, dev->minor);
if (!d) {
dev_err(dev->class_dev,
@@ -326,7 +326,7 @@ static void bonding_detach(struct comedi_device *dev)
if (!bdev)
continue;
if (!test_and_set_bit(bdev->minor, devs_closed))
- comedi_close(bdev->dev);
+ comedi_close_from(bdev->dev, dev->minor);
kfree(bdev);
}
kfree(devpriv->devs);
diff --git a/drivers/comedi/drivers/multiq3.c b/drivers/comedi/drivers/multiq3.c
index 07ff5383da99..ac369e9a262d 100644
--- a/drivers/comedi/drivers/multiq3.c
+++ b/drivers/comedi/drivers/multiq3.c
@@ -67,6 +67,11 @@
#define MULTIQ3_TRSFRCNTR_OL 0x10 /* xfer CNTR to OL (x and y) */
#define MULTIQ3_EFLAG_RESET 0x06 /* reset E bit of flag reg */
+/*
+ * Limit on the number of optional encoder channels
+ */
+#define MULTIQ3_MAX_ENC_CHANS 8
+
static void multiq3_set_ctrl(struct comedi_device *dev, unsigned int bits)
{
/*
@@ -312,6 +317,10 @@ static int multiq3_attach(struct comedi_device *dev,
s->insn_read = multiq3_encoder_insn_read;
s->insn_config = multiq3_encoder_insn_config;
+ /* sanity check for number of encoder channels */
+ if (s->n_chan > MULTIQ3_MAX_ENC_CHANS)
+ s->n_chan = MULTIQ3_MAX_ENC_CHANS;
+
for (i = 0; i < s->n_chan; i++)
multiq3_encoder_reset(dev, i);
diff --git a/drivers/comedi/drivers/ni_670x.c b/drivers/comedi/drivers/ni_670x.c
index c875d251c230..563a9c790f12 100644
--- a/drivers/comedi/drivers/ni_670x.c
+++ b/drivers/comedi/drivers/ni_670x.c
@@ -199,7 +199,7 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
const struct comedi_lrange **range_table_list;
range_table_list = kmalloc_array(32,
- sizeof(struct comedi_lrange *),
+ sizeof(*range_table_list),
GFP_KERNEL);
if (!range_table_list)
return -ENOMEM;
diff --git a/drivers/comedi/drivers/pcl818.c b/drivers/comedi/drivers/pcl818.c
index 4127adcfb229..06fe06396f23 100644
--- a/drivers/comedi/drivers/pcl818.c
+++ b/drivers/comedi/drivers/pcl818.c
@@ -1111,10 +1111,9 @@ static void pcl818_detach(struct comedi_device *dev)
{
struct pcl818_private *devpriv = dev->private;
- if (devpriv) {
- pcl818_ai_cancel(dev, dev->read_subdev);
+ if (devpriv)
pcl818_reset(dev);
- }
+
pcl818_free_dma(dev);
comedi_legacy_detach(dev);
}
diff --git a/drivers/comedi/kcomedilib/kcomedilib_main.c b/drivers/comedi/kcomedilib/kcomedilib_main.c
index 43fbe1a63b14..baa9eaaf97d4 100644
--- a/drivers/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/comedi/kcomedilib/kcomedilib_main.c
@@ -15,6 +15,7 @@
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/io.h>
+#include <linux/bitmap.h>
#include <linux/comedi.h>
#include <linux/comedi/comedidev.h>
@@ -24,7 +25,104 @@ MODULE_AUTHOR("David Schleef <ds@schleef.org>");
MODULE_DESCRIPTION("Comedi kernel library");
MODULE_LICENSE("GPL");
-struct comedi_device *comedi_open(const char *filename)
+static DEFINE_MUTEX(kcomedilib_to_from_lock);
+
+/*
+ * Row index is the "to" node, column index is the "from" node, element value
+ * is the number of links from the "from" node to the "to" node.
+ */
+static unsigned char
+ kcomedilib_to_from[COMEDI_NUM_BOARD_MINORS][COMEDI_NUM_BOARD_MINORS];
+
+static bool kcomedilib_set_link_from_to(unsigned int from, unsigned int to)
+{
+ DECLARE_BITMAP(destinations[2], COMEDI_NUM_BOARD_MINORS);
+ unsigned int cur = 0;
+ bool okay = true;
+
+ /*
+ * Allow "from" node to be out of range (no loop checking),
+ * but require "to" node to be in range.
+ */
+ if (to >= COMEDI_NUM_BOARD_MINORS)
+ return false;
+ if (from >= COMEDI_NUM_BOARD_MINORS)
+ return true;
+
+ /*
+ * Check that kcomedilib_to_from[to][from] can be made non-zero
+ * without creating a loop.
+ *
+ * Termination of the loop-testing code relies on the assumption that
+ * kcomedilib_to_from[][] does not contain any loops.
+ *
+ * Start with a set destinations set containing "from" as the only
+ * element and work backwards looking for loops.
+ */
+ bitmap_zero(destinations[cur], COMEDI_NUM_BOARD_MINORS);
+ set_bit(from, destinations[cur]);
+ mutex_lock(&kcomedilib_to_from_lock);
+ do {
+ unsigned int next = 1 - cur;
+ unsigned int t = 0;
+
+ if (test_bit(to, destinations[cur])) {
+ /* Loop detected. */
+ okay = false;
+ break;
+ }
+ /* Create next set of destinations. */
+ bitmap_zero(destinations[next], COMEDI_NUM_BOARD_MINORS);
+ while ((t = find_next_bit(destinations[cur],
+ COMEDI_NUM_BOARD_MINORS,
+ t)) < COMEDI_NUM_BOARD_MINORS) {
+ unsigned int f;
+
+ for (f = 0; f < COMEDI_NUM_BOARD_MINORS; f++) {
+ if (kcomedilib_to_from[t][f])
+ set_bit(f, destinations[next]);
+ }
+ t++;
+ }
+ cur = next;
+ } while (!bitmap_empty(destinations[cur], COMEDI_NUM_BOARD_MINORS));
+ if (okay) {
+ /* Allow a maximum of 255 links from "from" to "to". */
+ if (kcomedilib_to_from[to][from] < 255)
+ kcomedilib_to_from[to][from]++;
+ else
+ okay = false;
+ }
+ mutex_unlock(&kcomedilib_to_from_lock);
+ return okay;
+}
+
+static void kcomedilib_clear_link_from_to(unsigned int from, unsigned int to)
+{
+ if (to < COMEDI_NUM_BOARD_MINORS && from < COMEDI_NUM_BOARD_MINORS) {
+ mutex_lock(&kcomedilib_to_from_lock);
+ if (kcomedilib_to_from[to][from])
+ kcomedilib_to_from[to][from]--;
+ mutex_unlock(&kcomedilib_to_from_lock);
+ }
+}
+
+/**
+ * comedi_open_from() - Open a COMEDI device from the kernel with loop checks
+ * @filename: Fake pathname of the form "/dev/comediN".
+ * @from: Device number it is being opened from (if in range).
+ *
+ * Converts @filename to a COMEDI device number and "opens" it if it exists
+ * and is attached to a low-level COMEDI driver.
+ *
+ * If @from is in range, refuse to open the device if doing so would form a
+ * loop of devices opening each other. There is also a limit of 255 on the
+ * number of concurrent opens from one device to another.
+ *
+ * Return: A pointer to the COMEDI device on success.
+ * Return %NULL on failure.
+ */
+struct comedi_device *comedi_open_from(const char *filename, int from)
{
struct comedi_device *dev, *retval = NULL;
unsigned int minor;
@@ -43,7 +141,7 @@ struct comedi_device *comedi_open(const char *filename)
return NULL;
down_read(&dev->attach_lock);
- if (dev->attached)
+ if (dev->attached && kcomedilib_set_link_from_to(from, minor))
retval = dev;
else
retval = NULL;
@@ -54,14 +152,26 @@ struct comedi_device *comedi_open(const char *filename)
return retval;
}
-EXPORT_SYMBOL_GPL(comedi_open);
+EXPORT_SYMBOL_GPL(comedi_open_from);
-int comedi_close(struct comedi_device *dev)
+/**
+ * comedi_close_from() - Close a COMEDI device from the kernel with loop checks
+ * @dev: COMEDI device.
+ * @from: Device number it was opened from (if in range).
+ *
+ * Closes a COMEDI device previously opened by comedi_open_from().
+ *
+ * If @from is in range, it should be match the one used by comedi_open_from().
+ *
+ * Returns: 0
+ */
+int comedi_close_from(struct comedi_device *dev, int from)
{
+ kcomedilib_clear_link_from_to(from, dev->minor);
comedi_dev_put(dev);
return 0;
}
-EXPORT_SYMBOL_GPL(comedi_close);
+EXPORT_SYMBOL_GPL(comedi_close_from);
static int comedi_do_insn(struct comedi_device *dev,
struct comedi_insn *insn,
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 1a299d1f350b..19d457ae4c3b 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -451,7 +451,7 @@ static void mchp_tc_irq_remove(void *ptr)
static int mchp_tc_irq_enable(struct counter_device *const counter, int irq)
{
struct mchp_tc_data *const priv = counter_priv(counter);
- int ret = devm_request_irq(counter->parent, irq, mchp_tc_isr, 0,
+ int ret = devm_request_irq(counter->parent, irq, mchp_tc_isr, IRQF_SHARED,
dev_name(counter->parent), counter);
if (ret < 0)
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
index 3faaf7f60539..3586a7ab9887 100644
--- a/drivers/counter/ti-ecap-capture.c
+++ b/drivers/counter/ti-ecap-capture.c
@@ -465,11 +465,6 @@ static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ecap_cnt_pm_disable(void *dev)
-{
- pm_runtime_disable(dev);
-}
-
static int ecap_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -523,12 +518,9 @@ static int ecap_cnt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, counter_dev);
- pm_runtime_enable(dev);
-
- /* Register a cleanup callback to care for disabling PM */
- ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
- return dev_err_probe(dev, ret, "failed to add pm disable action\n");
+ return ret;
ret = devm_counter_add(dev, counter_dev);
if (ret)
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4f7f9201598d..e73a66785d69 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -318,7 +318,6 @@ static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask)
return cmd.val;
}
-/* Called via smp_call_function_many(), on the target CPUs */
static void do_drv_write(void *_cmd)
{
struct drv_cmd *cmd = _cmd;
@@ -335,14 +334,8 @@ static void drv_write(struct acpi_cpufreq_data *data,
.val = val,
.func.write = data->cpu_freq_write,
};
- int this_cpu;
- this_cpu = get_cpu();
- if (cpumask_test_cpu(this_cpu, mask))
- do_drv_write(&cmd);
-
- smp_call_function_many(mask, do_drv_write, &cmd, 1);
- put_cpu();
+ on_each_cpu_mask(mask, do_drv_write, &cmd, true);
}
static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data)
@@ -402,7 +395,7 @@ static unsigned int check_freqs(struct cpufreq_policy *policy,
cur_freq = extract_freq(policy, get_cur_val(mask, data));
if (cur_freq == freq)
return 1;
- udelay(10);
+ usleep_range(10, 15);
}
return 0;
}
diff --git a/drivers/cpufreq/airoha-cpufreq.c b/drivers/cpufreq/airoha-cpufreq.c
index 4fe39eadd163..b6b1cdc4d11d 100644
--- a/drivers/cpufreq/airoha-cpufreq.c
+++ b/drivers/cpufreq/airoha-cpufreq.c
@@ -107,6 +107,7 @@ static struct platform_driver airoha_cpufreq_driver = {
};
static const struct of_device_id airoha_cpufreq_match_list[] __initconst = {
+ { .compatible = "airoha,an7583" },
{ .compatible = "airoha,en7581" },
{},
};
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index b4c79fde1979..c45bc98721d2 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -65,13 +65,13 @@ static const char * const amd_pstate_mode_string[] = {
[AMD_PSTATE_PASSIVE] = "passive",
[AMD_PSTATE_ACTIVE] = "active",
[AMD_PSTATE_GUIDED] = "guided",
- NULL,
};
+static_assert(ARRAY_SIZE(amd_pstate_mode_string) == AMD_PSTATE_MAX);
const char *amd_pstate_get_mode_string(enum amd_pstate_mode mode)
{
- if (mode < 0 || mode >= AMD_PSTATE_MAX)
- return NULL;
+ if (mode < AMD_PSTATE_UNDEFINED || mode >= AMD_PSTATE_MAX)
+ mode = AMD_PSTATE_UNDEFINED;
return amd_pstate_mode_string[mode];
}
EXPORT_SYMBOL_GPL(amd_pstate_get_mode_string);
@@ -110,6 +110,7 @@ enum energy_perf_value_index {
EPP_INDEX_BALANCE_PERFORMANCE,
EPP_INDEX_BALANCE_POWERSAVE,
EPP_INDEX_POWERSAVE,
+ EPP_INDEX_MAX,
};
static const char * const energy_perf_strings[] = {
@@ -118,8 +119,8 @@ static const char * const energy_perf_strings[] = {
[EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
[EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
[EPP_INDEX_POWERSAVE] = "power",
- NULL
};
+static_assert(ARRAY_SIZE(energy_perf_strings) == EPP_INDEX_MAX);
static unsigned int epp_values[] = {
[EPP_INDEX_DEFAULT] = 0,
@@ -127,7 +128,8 @@ static unsigned int epp_values[] = {
[EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
[EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
[EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
- };
+};
+static_assert(ARRAY_SIZE(epp_values) == EPP_INDEX_MAX);
typedef int (*cppc_mode_transition_fn)(int);
@@ -183,7 +185,7 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
{
int i;
- for (i=0; i < AMD_PSTATE_MAX; i++) {
+ for (i = 0; i < AMD_PSTATE_MAX; i++) {
if (!strncmp(str, amd_pstate_mode_string[i], size))
return i;
}
@@ -872,10 +874,10 @@ static void amd_pstate_update_limits(struct cpufreq_policy *policy)
*/
static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
{
- u32 transition_delay_ns;
+ int transition_delay_ns;
transition_delay_ns = cppc_get_transition_latency(cpu);
- if (transition_delay_ns == CPUFREQ_ETERNAL) {
+ if (transition_delay_ns < 0) {
if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
else
@@ -891,10 +893,10 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
*/
static u32 amd_pstate_get_transition_latency(unsigned int cpu)
{
- u32 transition_latency;
+ int transition_latency;
transition_latency = cppc_get_transition_latency(cpu);
- if (transition_latency == CPUFREQ_ETERNAL)
+ if (transition_latency < 0)
return AMD_PSTATE_TRANSITION_LATENCY;
return transition_latency;
@@ -1137,16 +1139,15 @@ static ssize_t show_amd_pstate_hw_prefcore(struct cpufreq_policy *policy,
static ssize_t show_energy_performance_available_preferences(
struct cpufreq_policy *policy, char *buf)
{
- int i = 0;
- int offset = 0;
+ int offset = 0, i;
struct amd_cpudata *cpudata = policy->driver_data;
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
return sysfs_emit_at(buf, offset, "%s\n",
energy_perf_strings[EPP_INDEX_PERFORMANCE]);
- while (energy_perf_strings[i] != NULL)
- offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+ for (i = 0; i < ARRAY_SIZE(energy_perf_strings); i++)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i]);
offset += sysfs_emit_at(buf, offset, "\n");
@@ -1157,15 +1158,10 @@ static ssize_t store_energy_performance_preference(
struct cpufreq_policy *policy, const char *buf, size_t count)
{
struct amd_cpudata *cpudata = policy->driver_data;
- char str_preference[21];
ssize_t ret;
u8 epp;
- ret = sscanf(buf, "%20s", str_preference);
- if (ret != 1)
- return -EINVAL;
-
- ret = match_string(energy_perf_strings, -1, str_preference);
+ ret = sysfs_match_string(energy_perf_strings, buf);
if (ret < 0)
return -EINVAL;
@@ -1282,7 +1278,7 @@ static int amd_pstate_change_mode_without_dvr_change(int mode)
if (cpu_feature_enabled(X86_FEATURE_CPPC) || cppc_state == AMD_PSTATE_ACTIVE)
return 0;
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
cppc_set_auto_sel(cpu, (cppc_state == AMD_PSTATE_PASSIVE) ? 0 : 1);
}
@@ -1353,9 +1349,8 @@ int amd_pstate_update_status(const char *buf, size_t size)
return -EINVAL;
mode_idx = get_mode_idx_from_str(buf, size);
-
- if (mode_idx < 0 || mode_idx >= AMD_PSTATE_MAX)
- return -EINVAL;
+ if (mode_idx < 0)
+ return mode_idx;
if (mode_state_machine[cppc_state][mode_idx]) {
guard(mutex)(&amd_pstate_driver_lock);
@@ -1614,7 +1609,11 @@ static int amd_pstate_cpu_offline(struct cpufreq_policy *policy)
* min_perf value across kexec reboots. If this CPU is just onlined normally after this, the
* limits, epp and desired perf will get reset to the cached values in cpudata struct
*/
- return amd_pstate_update_perf(policy, perf.bios_min_perf, 0U, 0U, 0U, false);
+ return amd_pstate_update_perf(policy, perf.bios_min_perf,
+ FIELD_GET(AMD_CPPC_DES_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
+ FIELD_GET(AMD_CPPC_EPP_PERF_MASK, cpudata->cppc_req_cached),
+ false);
}
static int amd_pstate_suspend(struct cpufreq_policy *policy)
diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
index f28a4435fba7..0efe403a5980 100644
--- a/drivers/cpufreq/armada-37xx-cpufreq.c
+++ b/drivers/cpufreq/armada-37xx-cpufreq.c
@@ -265,7 +265,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
*/
target_vm = avs_map[l0_vdd_min] - 100;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[1] = armada_37xx_avs_val_match(target_vm);
/*
@@ -273,7 +273,7 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
* be larger than 1000mv
*/
target_vm = avs_map[l0_vdd_min] - 150;
- target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
+ target_vm = max(target_vm, MIN_VOLT_MV);
dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
/*
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 5940d262374f..71450cca8e9f 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -480,7 +480,7 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct private_data *priv;
if (!policy)
@@ -488,8 +488,6 @@ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
priv = policy->driver_data;
- cpufreq_cpu_put(policy);
-
return brcm_avs_get_frequency(priv->base);
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 4a17162a392d..9eac77c4f294 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -50,8 +50,7 @@ struct cppc_freq_invariance {
static DEFINE_PER_CPU(struct cppc_freq_invariance, cppc_freq_inv);
static struct kthread_worker *kworker_fie;
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1);
/**
@@ -87,8 +86,7 @@ static void cppc_scale_freq_workfn(struct kthread_work *work)
return;
}
- perf = cppc_perf_from_fbctrs(cpu_data, &cppc_fi->prev_perf_fb_ctrs,
- &fb_ctrs);
+ perf = cppc_perf_from_fbctrs(&cppc_fi->prev_perf_fb_ctrs, &fb_ctrs);
if (!perf)
return;
@@ -144,16 +142,15 @@ static void cppc_cpufreq_cpu_fie_init(struct cpufreq_policy *policy)
init_irq_work(&cppc_fi->irq_work, cppc_irq_work);
ret = cppc_get_perf_ctrs(cpu, &cppc_fi->prev_perf_fb_ctrs);
- if (ret) {
- pr_warn("%s: failed to read perf counters for cpu:%d: %d\n",
- __func__, cpu, ret);
- /*
- * Don't abort if the CPU was offline while the driver
- * was getting registered.
- */
- if (cpu_online(cpu))
- return;
+ /*
+ * Don't abort as the CPU was offline while the driver was
+ * getting registered.
+ */
+ if (ret && cpu_online(cpu)) {
+ pr_debug("%s: failed to read perf counters for cpu:%d: %d\n",
+ __func__, cpu, ret);
+ return;
}
}
@@ -310,6 +307,16 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
return 0;
}
+static unsigned int __cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
+{
+ int transition_latency_ns = cppc_get_transition_latency(cpu);
+
+ if (transition_latency_ns < 0)
+ return CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS / NSEC_PER_USEC;
+
+ return transition_latency_ns / NSEC_PER_USEC;
+}
+
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
@@ -332,12 +339,12 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
return 10000;
}
}
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
{
- return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
+ return __cppc_cpufreq_get_transition_delay_us(cpu);
}
#endif
@@ -684,8 +691,7 @@ static inline u64 get_delta(u64 t1, u64 t0)
return (u32)t1 - (u32)t0;
}
-static int cppc_perf_from_fbctrs(struct cppc_cpudata *cpu_data,
- struct cppc_perf_fb_ctrs *fb_ctrs_t0,
+static int cppc_perf_from_fbctrs(struct cppc_perf_fb_ctrs *fb_ctrs_t0,
struct cppc_perf_fb_ctrs *fb_ctrs_t1)
{
u64 delta_reference, delta_delivered;
@@ -725,8 +731,8 @@ static int cppc_get_perf_ctrs_sample(int cpu,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cppc_cpudata *cpu_data;
u64 delivered_perf;
int ret;
@@ -736,8 +742,6 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
cpu_data = policy->driver_data;
- cpufreq_cpu_put(policy);
-
ret = cppc_get_perf_ctrs_sample(cpu, &fb_ctrs_t0, &fb_ctrs_t1);
if (ret) {
if (ret == -EFAULT)
@@ -747,8 +751,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
return 0;
}
- delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
- &fb_ctrs_t1);
+ delivered_perf = cppc_perf_from_fbctrs(&fb_ctrs_t0, &fb_ctrs_t1);
if (!delivered_perf)
goto out_invalid_counters;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index 015dd393eaba..a1d11ecd1ac8 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -87,6 +87,7 @@ static const struct of_device_id allowlist[] __initconst = {
{ .compatible = "st-ericsson,u9540", },
{ .compatible = "starfive,jh7110", },
+ { .compatible = "starfive,jh7110s", },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap4", },
@@ -103,6 +104,7 @@ static const struct of_device_id allowlist[] __initconst = {
* platforms using "operating-points-v2" property.
*/
static const struct of_device_id blocklist[] __initconst = {
+ { .compatible = "airoha,an7583", },
{ .compatible = "airoha,en7581", },
{ .compatible = "allwinner,sun50i-a100" },
@@ -188,9 +190,11 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "ti,omap3", },
{ .compatible = "ti,am625", },
{ .compatible = "ti,am62a7", },
+ { .compatible = "ti,am62d2", },
{ .compatible = "ti,am62p5", },
{ .compatible = "qcom,ipq5332", },
+ { .compatible = "qcom,ipq5424", },
{ .compatible = "qcom,ipq6018", },
{ .compatible = "qcom,ipq8064", },
{ .compatible = "qcom,ipq8074", },
@@ -215,20 +219,13 @@ static bool __init cpu0_node_has_opp_v2_prop(void)
static int __init cpufreq_dt_platdev_init(void)
{
- struct device_node *np __free(device_node) = of_find_node_by_path("/");
- const struct of_device_id *match;
- const void *data = NULL;
+ const void *data;
- if (!np)
- return -ENODEV;
-
- match = of_match_node(allowlist, np);
- if (match) {
- data = match->data;
+ data = of_machine_get_match_data(allowlist);
+ if (data)
goto create_pdev;
- }
- if (cpu0_node_has_opp_v2_prop() && !of_match_node(blocklist, np))
+ if (cpu0_node_has_opp_v2_prop() && !of_machine_device_match(blocklist))
goto create_pdev;
return -ENODEV;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 506437489b4d..7d5079fd1688 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -104,7 +104,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
if (!transition_latency)
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cpumask_copy(policy->cpus, priv->cpus);
policy->driver_data = priv;
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index fedad1081973..fbbbe501cf2d 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -145,6 +145,8 @@ static unsigned int nforce2_fsb_read(int bootfsb)
pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb);
fsb /= 1000000;
+ pci_dev_put(nforce2_sub5);
+
/* Check if PLL register is already set */
pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp);
@@ -426,6 +428,7 @@ static int __init nforce2_init(void)
static void __exit nforce2_exit(void)
{
cpufreq_unregister_driver(&nforce2_driver);
+ pci_dev_put(nforce2_dev);
}
module_init(nforce2_init);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index fc7eace8b65b..4472bb1ec83c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -664,10 +664,10 @@ unlock:
static unsigned int cpufreq_parse_policy(char *str_governor)
{
- if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "performance", strlen("performance")))
return CPUFREQ_POLICY_PERFORMANCE;
- if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
+ if (!strncasecmp(str_governor, "powersave", strlen("powersave")))
return CPUFREQ_POLICY_POWERSAVE;
return CPUFREQ_POLICY_UNKNOWN;
@@ -914,7 +914,7 @@ static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
const char *buf, size_t count)
{
unsigned int freq = 0;
- unsigned int ret;
+ int ret;
if (!policy->governor || !policy->governor->store_setspeed)
return -EINVAL;
@@ -1121,7 +1121,8 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
if (has_target()) {
/* Update policy governor to the one used before hotplug. */
- gov = get_governor(policy->last_governor);
+ if (policy->last_governor[0] != '\0')
+ gov = get_governor(policy->last_governor);
if (gov) {
pr_debug("Restoring governor %s for cpu %d\n",
gov->name, policy->cpu);
@@ -1420,9 +1421,12 @@ static int cpufreq_policy_online(struct cpufreq_policy *policy,
* If there is a problem with its frequency table, take it
* offline and drop it.
*/
- ret = cpufreq_table_validate_and_sort(policy);
- if (ret)
- goto out_offline_policy;
+ if (policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_ASCENDING &&
+ policy->freq_table_sorted != CPUFREQ_TABLE_SORTED_DESCENDING) {
+ ret = cpufreq_table_validate_and_sort(policy);
+ if (ret)
+ goto out_offline_policy;
+ }
/* related_cpus should at least include policy->cpus. */
cpumask_copy(policy->related_cpus, policy->cpus);
@@ -1844,7 +1848,6 @@ static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, b
*/
unsigned int cpufreq_quick_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy) = NULL;
unsigned long flags;
read_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1859,7 +1862,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cur;
@@ -1875,9 +1878,7 @@ EXPORT_SYMBOL(cpufreq_quick_get);
*/
unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->max;
@@ -1893,9 +1894,7 @@ EXPORT_SYMBOL(cpufreq_quick_get_max);
*/
__weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (policy)
return policy->cpuinfo.max_freq;
@@ -1919,9 +1918,7 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -2556,7 +2553,7 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
for_each_inactive_policy(policy) {
if (!strcmp(policy->last_governor, governor->name)) {
policy->governor = NULL;
- strcpy(policy->last_governor, "\0");
+ policy->last_governor[0] = '\0';
}
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -2750,9 +2747,7 @@ static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
*/
void cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2769,9 +2764,7 @@ EXPORT_SYMBOL(cpufreq_update_policy);
*/
void cpufreq_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy)
return;
@@ -2792,7 +2785,7 @@ int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
if (!policy->freq_table)
return -ENXIO;
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret) {
pr_err("%s: Policy frequency update failed\n", __func__);
return ret;
@@ -2921,10 +2914,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
return -EPROBE_DEFER;
if (!driver_data || !driver_data->verify || !driver_data->init ||
- !(driver_data->setpolicy || driver_data->target_index ||
- driver_data->target) ||
- (driver_data->setpolicy && (driver_data->target_index ||
- driver_data->target)) ||
+ (driver_data->target_index && driver_data->target) ||
+ (!!driver_data->setpolicy == (driver_data->target_index || driver_data->target)) ||
(!driver_data->get_intermediate != !driver_data->target_intermediate) ||
(!driver_data->online != !driver_data->offline) ||
(driver_data->adjust_perf && !driver_data->fast_switch))
@@ -2953,6 +2944,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
goto err_null_driver;
}
+ /*
+ * Mark support for the scheduler's frequency invariance engine for
+ * drivers that implement target(), target_index() or fast_switch().
+ */
+ if (!cpufreq_driver->setpolicy) {
+ static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
+ pr_debug("cpufreq: supports frequency invariance\n");
+ }
+
ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_boost_unreg;
@@ -2974,21 +2974,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
hp_online = ret;
ret = 0;
- /*
- * Mark support for the scheduler's frequency invariance engine for
- * drivers that implement target(), target_index() or fast_switch().
- */
- if (!cpufreq_driver->setpolicy) {
- static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
- pr_debug("supports frequency invariance");
- }
-
pr_debug("driver %s up and running\n", driver_data->name);
goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
err_boost_unreg:
+ if (!cpufreq_driver->setpolicy)
+ static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
remove_boost_sysfs_file();
err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -3056,9 +3049,7 @@ static int __init cpufreq_core_init(void)
static bool cpufreq_policy_is_good_for_eas(unsigned int cpu)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
if (!policy) {
pr_debug("cpufreq policy not set for CPU: %d\n", cpu);
return false;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 56500b25d77c..cce6a8d113e1 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -152,9 +152,9 @@ static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set,
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ if (ret || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
return -EINVAL;
dbs_data->sampling_down_factor = input;
@@ -168,9 +168,9 @@ static ssize_t up_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
+ if (ret || input > 100 || input <= cs_tuners->down_threshold)
return -EINVAL;
dbs_data->up_threshold = input;
@@ -184,10 +184,10 @@ static ssize_t down_threshold_store(struct gov_attr_set *attr_set,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
/* cannot be lower than 1 otherwise freq will not fall */
- if (ret != 1 || input < 1 || input >= dbs_data->up_threshold)
+ if (ret || input < 1 || input >= dbs_data->up_threshold)
return -EINVAL;
cs_tuners->down_threshold = input;
@@ -201,9 +201,9 @@ static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set,
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
- if (ret != 1)
- return -EINVAL;
+ ret = kstrtouint(buf, 0, &input);
+ if (ret)
+ return ret;
if (input > 1)
input = 1;
@@ -226,10 +226,10 @@ static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf,
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
- ret = sscanf(buf, "%u", &input);
+ ret = kstrtouint(buf, 0, &input);
- if (ret != 1)
- return -EINVAL;
+ if (ret)
+ return ret;
if (input > 100)
input = 100;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 0e65d37c9231..a6ecc203f7b7 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -30,29 +30,6 @@ static struct od_ops od_ops;
static unsigned int default_powersave_bias;
/*
- * Not all CPUs want IO time to be accounted as busy; this depends on how
- * efficient idling at a higher frequency/voltage is.
- * Pavel Machek says this is not so for various generations of AMD and old
- * Intel systems.
- * Mike Chan (android.com) claims this is also not true for ARM.
- * Because of this, whitelist specific known (series) of CPUs by default, and
- * leave all others up to the user.
- */
-static int should_io_be_busy(void)
-{
-#if defined(CONFIG_X86)
- /*
- * For Intel, Core 2 (model 15) and later have an efficient idle.
- */
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 15)
- return 1;
-#endif
- return 0;
-}
-
-/*
* Find right freq to be set now with powersave_bias on.
* Returns the freq_hi to be used right now and will set freq_hi_delay_us,
* freq_lo, and freq_lo_delay_us in percpu area for averaging freqs.
@@ -377,7 +354,7 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
dbs_data->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
- dbs_data->io_is_busy = should_io_be_busy();
+ dbs_data->io_is_busy = od_should_io_be_busy();
dbs_data->tuners = tuners;
return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.h b/drivers/cpufreq/cpufreq_ondemand.h
index 1af8e5c4b86f..2ca8f1aaf2e3 100644
--- a/drivers/cpufreq/cpufreq_ondemand.h
+++ b/drivers/cpufreq/cpufreq_ondemand.h
@@ -24,3 +24,26 @@ static inline struct od_policy_dbs_info *to_dbs_info(struct policy_dbs_info *pol
struct od_dbs_tuners {
unsigned int powersave_bias;
};
+
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+
+/*
+ * Not all CPUs want IO time to be accounted as busy; this depends on
+ * how efficient idling at a higher frequency/voltage is.
+ *
+ * Pavel Machek says this is not so for various generations of AMD and
+ * old Intel systems. Mike Chan (android.com) claims this is also not
+ * true for ARM.
+ *
+ * Because of this, select a known series of Intel CPUs (Family 6 and
+ * later) by default, and leave all others up to the user.
+ */
+static inline bool od_should_io_be_busy(void)
+{
+ return (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
+ boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO);
+}
+#else
+static inline bool od_should_io_be_busy(void) { return false; }
+#endif
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 35de513af6c9..7f251daf03ce 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -28,22 +28,21 @@ static bool policy_has_boost_freq(struct cpufreq_policy *policy)
return false;
}
-int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int min_freq = ~0;
unsigned int max_freq = 0;
- unsigned int freq;
+ unsigned int freq, i;
- cpufreq_for_each_valid_entry(pos, table) {
+ cpufreq_for_each_valid_entry_idx(pos, table, i) {
freq = pos->frequency;
if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
- pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq);
+ pr_debug("table entry %u: %u kHz\n", i, freq);
if (freq < min_freq)
min_freq = freq;
if (freq > max_freq)
@@ -65,10 +64,9 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
return 0;
}
-int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
- struct cpufreq_frequency_table *table)
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy)
{
- struct cpufreq_frequency_table *pos;
+ struct cpufreq_frequency_table *pos, *table = policy->freq_table;
unsigned int freq, prev_smaller = 0;
bool found = false;
@@ -110,7 +108,7 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
if (!policy->freq_table)
return -ENODEV;
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
}
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
@@ -128,7 +126,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
};
struct cpufreq_frequency_table *pos;
struct cpufreq_frequency_table *table = policy->freq_table;
- unsigned int freq, diff, i = 0;
+ unsigned int freq, diff, i;
int index;
pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
@@ -354,7 +352,7 @@ int cpufreq_table_validate_and_sort(struct cpufreq_policy *policy)
return 0;
}
- ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
+ ret = cpufreq_frequency_table_cpuinfo(policy);
if (ret)
return ret;
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index db1c88e9d3f9..e93697d3edfd 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -442,7 +442,7 @@ soc_opp_out:
}
if (of_property_read_u32(np, "clock-latency", &transition_latency))
- transition_latency = CPUFREQ_ETERNAL;
+ transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
/*
* Calculate the ramp time for max voltage change in the
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0d5d283a5429..ec4abe374573 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -575,13 +575,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
int scaling = cpu->pstate.scaling;
int freq;
- pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
- pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
- pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
+ pr_debug("CPU%d: PERF_CTL max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
+ pr_debug("CPU%d: PERF_CTL turbo = %d\n", cpu->cpu, perf_ctl_turbo);
+ pr_debug("CPU%d: PERF_CTL scaling = %d\n", cpu->cpu, perf_ctl_scaling);
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
+ if (scaling == perf_ctl_scaling)
+ return;
+
+ hwp_is_hybrid = true;
+
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
perf_ctl_scaling);
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
@@ -603,9 +608,6 @@ static bool turbo_is_disabled(void)
{
u64 misc_en;
- if (!cpu_feature_enabled(X86_FEATURE_IDA))
- return true;
-
rdmsrq(MSR_IA32_MISC_ENABLE, misc_en);
return !!(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
@@ -620,24 +622,9 @@ static int min_perf_pct_min(void)
(cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
}
-static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return (s16)ret;
-
- return (s16)(epb & 0x0f);
-}
-
static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
{
- s16 epp;
+ s16 epp = -EOPNOTSUPP;
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
@@ -651,34 +638,13 @@ static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
return epp;
}
epp = (hwp_req_data >> 24) & 0xff;
- } else {
- /* When there is no EPP present, HWP uses EPB settings */
- epp = intel_pstate_get_epb(cpu_data);
}
return epp;
}
-static int intel_pstate_set_epb(int cpu, s16 pref)
-{
- u64 epb;
- int ret;
-
- if (!boot_cpu_has(X86_FEATURE_EPB))
- return -ENXIO;
-
- ret = rdmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
- if (ret)
- return ret;
-
- epb = (epb & ~0x0f) | pref;
- wrmsrq_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
-
- return 0;
-}
-
/*
- * EPP/EPB display strings corresponding to EPP index in the
+ * EPP display strings corresponding to EPP index in the
* energy_perf_strings[]
* index String
*-------------------------------------
@@ -782,7 +748,7 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
u32 raw_epp)
{
int epp = -EINVAL;
- int ret;
+ int ret = -EOPNOTSUPP;
if (!pref_index)
epp = cpu_data->epp_default;
@@ -802,10 +768,6 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
return -EBUSY;
ret = intel_pstate_set_epp(cpu_data, epp);
- } else {
- if (epp == -EINVAL)
- epp = (pref_index - 1) << 2;
- ret = intel_pstate_set_epb(cpu_data->cpu, epp);
}
return ret;
@@ -937,13 +899,26 @@ static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
cpufreq_freq_attr_ro(base_frequency);
+enum hwp_cpufreq_attr_index {
+ HWP_BASE_FREQUENCY_INDEX = 0,
+ HWP_PERFORMANCE_PREFERENCE_INDEX,
+ HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX,
+ HWP_CPUFREQ_ATTR_COUNT,
+};
+
static struct freq_attr *hwp_cpufreq_attrs[] = {
- &energy_performance_preference,
- &energy_performance_available_preferences,
- &base_frequency,
- NULL,
+ [HWP_BASE_FREQUENCY_INDEX] = &base_frequency,
+ [HWP_PERFORMANCE_PREFERENCE_INDEX] = &energy_performance_preference,
+ [HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] =
+ &energy_performance_available_preferences,
+ [HWP_CPUFREQ_ATTR_COUNT] = NULL,
};
+static u8 hybrid_get_cpu_type(unsigned int cpu)
+{
+ return cpu_data(cpu).topo.intel_type;
+}
+
static bool no_cas __ro_after_init;
static struct cpudata *hybrid_max_perf_cpu __read_mostly;
@@ -960,11 +935,8 @@ static int hybrid_active_power(struct device *dev, unsigned long *power,
unsigned long *freq)
{
/*
- * Create "utilization bins" of 0-40%, 40%-60%, 60%-80%, and 80%-100%
- * of the maximum capacity such that two CPUs of the same type will be
- * regarded as equally attractive if the utilization of each of them
- * falls into the same bin, which should prevent tasks from being
- * migrated between them too often.
+ * Create four "states" corresponding to 40%, 60%, 80%, and 100% of the
+ * full capacity.
*
* For this purpose, return the "frequency" of 2 for the first
* performance level and otherwise leave the value set by the caller.
@@ -978,38 +950,40 @@ static int hybrid_active_power(struct device *dev, unsigned long *power,
return 0;
}
+static bool hybrid_has_l3(unsigned int cpu)
+{
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
+ unsigned int i;
+
+ if (!cacheinfo)
+ return false;
+
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 3)
+ return true;
+ }
+
+ return false;
+}
+
static int hybrid_get_cost(struct device *dev, unsigned long freq,
unsigned long *cost)
{
- struct pstate_data *pstate = &all_cpu_data[dev->id]->pstate;
- struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(dev->id);
-
+ /* Facilitate load balancing between CPUs of the same type. */
+ *cost = freq;
/*
- * The smaller the perf-to-frequency scaling factor, the larger the IPC
- * ratio between the given CPU and the least capable CPU in the system.
- * Regard that IPC ratio as the primary cost component and assume that
- * the scaling factors for different CPU types will differ by at least
- * 5% and they will not be above INTEL_PSTATE_CORE_SCALING.
+ * Adjust the cost depending on CPU type.
*
- * Add the freq value to the cost, so that the cost of running on CPUs
- * of the same type in different "utilization bins" is different.
- */
- *cost = div_u64(100ULL * INTEL_PSTATE_CORE_SCALING, pstate->scaling) + freq;
- /*
- * Increase the cost slightly for CPUs able to access L3 to avoid
- * touching it in case some other CPUs of the same type can do the work
- * without it.
+ * The idea is to start loading up LPE-cores before E-cores and start
+ * to populate E-cores when LPE-cores are utilized above 60% of the
+ * capacity. Similarly, P-cores start to be populated when E-cores are
+ * utilized above 60% of the capacity.
*/
- if (cacheinfo) {
- unsigned int i;
-
- /* Check if L3 cache is there. */
- for (i = 0; i < cacheinfo->num_leaves; i++) {
- if (cacheinfo->info_list[i].level == 3) {
- *cost += 2;
- break;
- }
- }
+ if (hybrid_get_cpu_type(dev->id) == INTEL_CPU_TYPE_ATOM) {
+ if (hybrid_has_l3(dev->id)) /* E-core */
+ *cost += 1;
+ } else { /* P-core */
+ *cost += 2;
}
return 0;
@@ -1072,9 +1046,9 @@ static void hybrid_set_cpu_capacity(struct cpudata *cpu)
topology_set_cpu_scale(cpu->cpu, arch_scale_cpu_capacity(cpu->cpu));
- pr_debug("CPU%d: perf = %u, max. perf = %u, base perf = %d\n", cpu->cpu,
- cpu->capacity_perf, hybrid_max_perf_cpu->capacity_perf,
- cpu->pstate.max_pstate_physical);
+ pr_debug("CPU%d: capacity perf = %u, base perf = %u, sys max perf = %u\n",
+ cpu->cpu, cpu->capacity_perf, cpu->pstate.max_pstate_physical,
+ hybrid_max_perf_cpu->capacity_perf);
}
static void hybrid_clear_cpu_capacity(unsigned int cpunum)
@@ -1337,9 +1311,8 @@ static void intel_pstate_hwp_set(unsigned int cpu)
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
value &= ~GENMASK_ULL(31, 24);
value |= (u64)epp << 24;
- } else {
- intel_pstate_set_epb(cpu, epp);
}
+
skip_epp:
WRITE_ONCE(cpu_data->hwp_req_cached, value);
wrmsrq_on_cpu(cpu, MSR_HWP_REQUEST, value);
@@ -1411,13 +1384,17 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
#define POWER_CTL_EE_ENABLE 1
#define POWER_CTL_EE_DISABLE 2
+/* Enable bit for Dynamic Efficiency Control (DEC) */
+#define POWER_CTL_DEC_ENABLE 27
+
static int power_ctl_ee_state;
static void set_power_ctl_ee_state(bool input)
{
u64 power_ctl;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
if (input) {
power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
@@ -1427,7 +1404,6 @@ static void set_power_ctl_ee_state(bool input)
power_ctl_ee_state = POWER_CTL_EE_DISABLE;
}
wrmsrq(MSR_IA32_POWER_CTL, power_ctl);
- mutex_unlock(&intel_pstate_driver_lock);
}
static void intel_pstate_hwp_enable(struct cpudata *cpudata);
@@ -1502,9 +1478,7 @@ static void __intel_pstate_update_max_freq(struct cpufreq_policy *policy,
static bool intel_pstate_update_max_freq(struct cpudata *cpudata)
{
- struct cpufreq_policy *policy __free(put_cpufreq_policy);
-
- policy = cpufreq_cpu_get(cpudata->cpu);
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpudata->cpu);
if (!policy)
return false;
@@ -1551,13 +1525,9 @@ static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- ssize_t ret;
+ guard(mutex)(&intel_pstate_driver_lock);
- mutex_lock(&intel_pstate_driver_lock);
- ret = intel_pstate_show_status(buf);
- mutex_unlock(&intel_pstate_driver_lock);
-
- return ret;
+ return intel_pstate_show_status(buf);
}
static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
@@ -1566,11 +1536,13 @@ static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
char *p = memchr(buf, '\n', count);
int ret;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
ret = intel_pstate_update_status(buf, p ? p - buf : count);
- mutex_unlock(&intel_pstate_driver_lock);
+ if (ret < 0)
+ return ret;
- return ret < 0 ? ret : count;
+ return count;
}
static ssize_t show_turbo_pct(struct kobject *kobj,
@@ -1580,12 +1552,10 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
int total, no_turbo, turbo_pct;
uint32_t turbo_fp;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
cpu = all_cpu_data[0];
@@ -1594,8 +1564,6 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
turbo_fp = div_fp(no_turbo, total);
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
- mutex_unlock(&intel_pstate_driver_lock);
-
return sprintf(buf, "%u\n", turbo_pct);
}
@@ -1605,38 +1573,26 @@ static ssize_t show_num_pstates(struct kobject *kobj,
struct cpudata *cpu;
int total;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
cpu = all_cpu_data[0];
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
- mutex_unlock(&intel_pstate_driver_lock);
-
return sprintf(buf, "%u\n", total);
}
static ssize_t show_no_turbo(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- ssize_t ret;
+ guard(mutex)(&intel_pstate_driver_lock);
- mutex_lock(&intel_pstate_driver_lock);
-
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
-
- ret = sprintf(buf, "%u\n", global.no_turbo);
-
- mutex_unlock(&intel_pstate_driver_lock);
- return ret;
+ return sprintf(buf, "%u\n", global.no_turbo);
}
static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
@@ -1648,29 +1604,25 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
if (sscanf(buf, "%u", &input) != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- count = -EAGAIN;
- goto unlock_driver;
- }
+ if (!intel_pstate_driver)
+ return -EAGAIN;
no_turbo = !!clamp_t(int, input, 0, 1);
WRITE_ONCE(global.turbo_disabled, turbo_is_disabled());
if (global.turbo_disabled && !no_turbo) {
pr_notice("Turbo disabled by BIOS or unavailable on processor\n");
- count = -EPERM;
if (global.no_turbo)
- goto unlock_driver;
- else
- no_turbo = 1;
- }
+ return -EPERM;
- if (no_turbo == global.no_turbo) {
- goto unlock_driver;
+ no_turbo = 1;
}
+ if (no_turbo == global.no_turbo)
+ return count;
+
WRITE_ONCE(global.no_turbo, no_turbo);
mutex_lock(&intel_pstate_limits_lock);
@@ -1689,47 +1641,43 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
intel_pstate_update_limits_for_all();
arch_set_max_freq_ratio(no_turbo);
-unlock_driver:
- mutex_unlock(&intel_pstate_driver_lock);
-
return count;
}
-static void update_qos_request(enum freq_qos_req_type type)
+static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
{
+ struct cpudata *cpudata = all_cpu_data[cpu];
+ unsigned int freq = cpudata->pstate.turbo_freq;
struct freq_qos_request *req;
- struct cpufreq_policy *policy;
- int i;
-
- for_each_possible_cpu(i) {
- struct cpudata *cpu = all_cpu_data[i];
- unsigned int freq, perf_pct;
- policy = cpufreq_cpu_get(i);
- if (!policy)
- continue;
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return;
- req = policy->driver_data;
- cpufreq_cpu_put(policy);
+ req = policy->driver_data;
+ if (!req)
+ return;
- if (!req)
- continue;
+ if (hwp_active)
+ intel_pstate_get_hwp_cap(cpudata);
- if (hwp_active)
- intel_pstate_get_hwp_cap(cpu);
+ if (type == FREQ_QOS_MIN) {
+ freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
+ } else {
+ req++;
+ freq = (freq * global.max_perf_pct) / 100;
+ }
- if (type == FREQ_QOS_MIN) {
- perf_pct = global.min_perf_pct;
- } else {
- req++;
- perf_pct = global.max_perf_pct;
- }
+ if (freq_qos_update_request(req, freq) < 0)
+ pr_warn("Failed to update freq constraint: CPU%d\n", cpu);
+}
- freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
+static void update_qos_requests(enum freq_qos_req_type type)
+{
+ int i;
- if (freq_qos_update_request(req, freq) < 0)
- pr_warn("Failed to update freq constraint: CPU%d\n", i);
- }
+ for_each_possible_cpu(i)
+ update_cpu_qos_request(i, type);
}
static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
@@ -1742,12 +1690,10 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
mutex_lock(&intel_pstate_limits_lock);
@@ -1758,9 +1704,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MAX);
-
- mutex_unlock(&intel_pstate_driver_lock);
+ update_qos_requests(FREQ_QOS_MAX);
return count;
}
@@ -1775,12 +1719,10 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (ret != 1)
return -EINVAL;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
- if (!intel_pstate_driver) {
- mutex_unlock(&intel_pstate_driver_lock);
+ if (!intel_pstate_driver)
return -EAGAIN;
- }
mutex_lock(&intel_pstate_limits_lock);
@@ -1792,9 +1734,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
if (intel_pstate_driver == &intel_pstate)
intel_pstate_update_policies();
else
- update_qos_request(FREQ_QOS_MIN);
-
- mutex_unlock(&intel_pstate_driver_lock);
+ update_qos_requests(FREQ_QOS_MIN);
return count;
}
@@ -1816,10 +1756,10 @@ static ssize_t store_hwp_dynamic_boost(struct kobject *a,
if (ret)
return ret;
- mutex_lock(&intel_pstate_driver_lock);
+ guard(mutex)(&intel_pstate_driver_lock);
+
hwp_boost = !!input;
intel_pstate_update_policies();
- mutex_unlock(&intel_pstate_driver_lock);
return count;
}
@@ -2108,6 +2048,18 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
intel_pstate_update_epp_defaults(cpudata);
}
+static u64 get_perf_ctl_val(int pstate)
+{
+ u64 val;
+
+ val = (u64)pstate << 8;
+ if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled) &&
+ cpu_feature_enabled(X86_FEATURE_IDA))
+ val |= (u64)1 << 32;
+
+ return val;
+}
+
static int atom_get_min_pstate(int not_used)
{
u64 value;
@@ -2134,14 +2086,10 @@ static int atom_get_turbo_pstate(int not_used)
static u64 atom_get_val(struct cpudata *cpudata, int pstate)
{
- u64 val;
+ u64 val = get_perf_ctl_val(pstate);
int32_t vid_fp;
u32 vid;
- val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
- val |= (u64)1 << 32;
-
vid_fp = cpudata->vid.min + mul_fp(
int_tofp(pstate - cpudata->pstate.min_pstate),
cpudata->vid.ratio);
@@ -2301,13 +2249,7 @@ static int core_get_turbo_pstate(int cpu)
static u64 core_get_val(struct cpudata *cpudata, int pstate)
{
- u64 val;
-
- val = (u64)pstate << 8;
- if (READ_ONCE(global.no_turbo) && !READ_ONCE(global.turbo_disabled))
- val |= (u64)1 << 32;
-
- return val;
+ return get_perf_ctl_val(pstate);
}
static int knl_get_aperf_mperf_shift(void)
@@ -2331,18 +2273,14 @@ static int knl_get_turbo_pstate(int cpu)
static int hwp_get_cpu_scaling(int cpu)
{
if (hybrid_scaling_factor) {
- struct cpuinfo_x86 *c = &cpu_data(cpu);
- u8 cpu_type = c->topo.intel_type;
-
/*
* Return the hybrid scaling factor for P-cores and use the
* default core scaling for E-cores.
*/
- if (cpu_type == INTEL_CPU_TYPE_CORE)
+ if (hybrid_get_cpu_type(cpu) == INTEL_CPU_TYPE_CORE)
return hybrid_scaling_factor;
- if (cpu_type == INTEL_CPU_TYPE_ATOM)
- return core_get_scaling();
+ return core_get_scaling();
}
/* Use core scaling on non-hybrid systems. */
@@ -2377,11 +2315,10 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
{
- int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
int perf_ctl_scaling = pstate_funcs.get_scaling();
+ cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical(cpu->cpu);
cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
- cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
if (hwp_active && !hwp_mode_bdw) {
@@ -2389,10 +2326,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
if (pstate_funcs.get_cpu_scaling) {
cpu->pstate.scaling = pstate_funcs.get_cpu_scaling(cpu->cpu);
- if (cpu->pstate.scaling != perf_ctl_scaling) {
- intel_pstate_hybrid_hwp_adjust(cpu);
- hwp_is_hybrid = true;
- }
+ intel_pstate_hybrid_hwp_adjust(cpu);
} else {
cpu->pstate.scaling = perf_ctl_scaling;
}
@@ -2575,7 +2509,7 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
* that sample.time will always be reset before setting the utilization
* update hook and make the caller skip the sample then.
*/
- if (cpu->last_sample_time) {
+ if (likely(cpu->last_sample_time)) {
intel_pstate_calc_avg_perf(cpu);
return true;
}
@@ -2794,6 +2728,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
+ X86_MATCH(INTEL_DIAMONDRAPIDS_X, core_funcs),
{}
};
#endif
@@ -3802,6 +3737,26 @@ static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
{}
};
+static bool hwp_check_epp(void)
+{
+ if (boot_cpu_has(X86_FEATURE_HWP_EPP))
+ return true;
+
+ /* Without EPP support, don't expose EPP-related sysfs attributes. */
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_PREFERENCE_INDEX] = NULL;
+ hwp_cpufreq_attrs[HWP_PERFORMANCE_AVAILABLE_PREFERENCES_INDEX] = NULL;
+
+ return false;
+}
+
+static bool hwp_check_dec(void)
+{
+ u64 power_ctl;
+
+ rdmsrq(MSR_IA32_POWER_CTL, power_ctl);
+ return !!(power_ctl & BIT(POWER_CTL_DEC_ENABLE));
+}
+
static int __init intel_pstate_init(void)
{
static struct cpudata **_all_cpu_data;
@@ -3822,23 +3777,32 @@ static int __init intel_pstate_init(void)
id = x86_match_cpu(hwp_support_ids);
if (id) {
- hwp_forced = intel_pstate_hwp_is_enabled();
+ bool epp_present = hwp_check_epp();
- if (hwp_forced)
+ /*
+ * If HWP is enabled already, there is no choice but to deal
+ * with it.
+ */
+ hwp_forced = intel_pstate_hwp_is_enabled();
+ if (hwp_forced) {
pr_info("HWP enabled by BIOS\n");
- else if (no_load)
+ no_hwp = 0;
+ } else if (no_load) {
return -ENODEV;
+ } else if (!epp_present && !hwp_check_dec()) {
+ /*
+ * Avoid enabling HWP for processors without EPP support
+ * unless the Dynamic Efficiency Control (DEC) enable
+ * bit (MSR_IA32_POWER_CTL, bit 27) is set because that
+ * means incomplete HWP implementation which is a corner
+ * case and supporting it is generally problematic.
+ */
+ no_hwp = 1;
+ }
copy_cpu_funcs(&core_funcs);
- /*
- * Avoid enabling HWP for processors without EPP support,
- * because that means incomplete HWP implementation which is a
- * corner case and supporting it is generally problematic.
- *
- * If HWP is enabled already, though, there is no choice but to
- * deal with it.
- */
- if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
+
+ if (!no_hwp) {
hwp_active = true;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
@@ -3917,9 +3881,9 @@ hwp_cpu_matched:
}
- mutex_lock(&intel_pstate_driver_lock);
- rc = intel_pstate_register_driver(default_driver);
- mutex_unlock(&intel_pstate_driver_lock);
+ scoped_guard(mutex, &intel_pstate_driver_lock) {
+ rc = intel_pstate_register_driver(default_driver);
+ }
if (rc) {
intel_pstate_sysfs_remove();
return rc;
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index ba0e08c8486a..49e76b44468a 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -953,6 +953,9 @@ static void __exit longhaul_exit(void)
struct cpufreq_policy *policy = cpufreq_cpu_get(0);
int i;
+ if (unlikely(!policy))
+ return;
+
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
struct cpufreq_freqs freqs;
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 74f1b4c796e4..ae4500ab4891 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -24,6 +24,8 @@
#define POLL_USEC 1000
#define TIMEOUT_USEC 300000
+#define FDVFS_FDIV_HZ (26 * 1000)
+
enum {
REG_FREQ_LUT_TABLE,
REG_FREQ_ENABLE,
@@ -35,7 +37,14 @@ enum {
REG_ARRAY_SIZE,
};
-struct mtk_cpufreq_data {
+struct mtk_cpufreq_priv {
+ struct device *dev;
+ const struct mtk_cpufreq_variant *variant;
+ void __iomem *fdvfs;
+};
+
+struct mtk_cpufreq_domain {
+ struct mtk_cpufreq_priv *parent;
struct cpufreq_frequency_table *table;
void __iomem *reg_bases[REG_ARRAY_SIZE];
struct resource *res;
@@ -43,20 +52,51 @@ struct mtk_cpufreq_data {
int nr_opp;
};
-static const u16 cpufreq_mtk_offsets[REG_ARRAY_SIZE] = {
- [REG_FREQ_LUT_TABLE] = 0x0,
- [REG_FREQ_ENABLE] = 0x84,
- [REG_FREQ_PERF_STATE] = 0x88,
- [REG_FREQ_HW_STATE] = 0x8c,
- [REG_EM_POWER_TBL] = 0x90,
- [REG_FREQ_LATENCY] = 0x110,
+struct mtk_cpufreq_variant {
+ int (*init)(struct mtk_cpufreq_priv *priv);
+ const u16 reg_offsets[REG_ARRAY_SIZE];
+ const bool is_hybrid_dvfs;
+};
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_base_variant = {
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x110,
+ },
+};
+
+static int mtk_cpufreq_hw_mt8196_init(struct mtk_cpufreq_priv *priv)
+{
+ priv->fdvfs = devm_of_iomap(priv->dev, priv->dev->of_node, 0, NULL);
+ if (IS_ERR(priv->fdvfs))
+ return dev_err_probe(priv->dev, PTR_ERR(priv->fdvfs),
+ "failed to get fdvfs iomem\n");
+
+ return 0;
+}
+
+static const struct mtk_cpufreq_variant cpufreq_mtk_mt8196_variant = {
+ .init = mtk_cpufreq_hw_mt8196_init,
+ .reg_offsets = {
+ [REG_FREQ_LUT_TABLE] = 0x0,
+ [REG_FREQ_ENABLE] = 0x84,
+ [REG_FREQ_PERF_STATE] = 0x88,
+ [REG_FREQ_HW_STATE] = 0x8c,
+ [REG_EM_POWER_TBL] = 0x90,
+ [REG_FREQ_LATENCY] = 0x114,
+ },
+ .is_hybrid_dvfs = true,
};
static int __maybe_unused
mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
unsigned long *KHz)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
int i;
@@ -80,19 +120,38 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW,
return 0;
}
+static void mtk_cpufreq_hw_fdvfs_switch(unsigned int target_freq,
+ struct cpufreq_policy *policy)
+{
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ struct mtk_cpufreq_priv *priv = data->parent;
+ unsigned int cpu;
+
+ target_freq = DIV_ROUND_UP(target_freq, FDVFS_FDIV_HZ);
+ for_each_cpu(cpu, policy->real_cpus) {
+ writel_relaxed(target_freq, priv->fdvfs + cpu * 4);
+ }
+}
+
static int mtk_cpufreq_hw_target_index(struct cpufreq_policy *policy,
unsigned int index)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
-
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ struct mtk_cpufreq_domain *data = policy->driver_data;
+ unsigned int target_freq;
+
+ if (data->parent->fdvfs) {
+ target_freq = policy->freq_table[index].frequency;
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ } else {
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ }
return 0;
}
static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct cpufreq_policy *policy;
unsigned int index;
@@ -111,18 +170,21 @@ static unsigned int mtk_cpufreq_hw_get(unsigned int cpu)
static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
unsigned int index;
index = cpufreq_table_find_index_dl(policy, target_freq, false);
- writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
+ if (data->parent->fdvfs)
+ mtk_cpufreq_hw_fdvfs_switch(target_freq, policy);
+ else
+ writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
return policy->freq_table[index].frequency;
}
static int mtk_cpu_create_freq_table(struct platform_device *pdev,
- struct mtk_cpufreq_data *data)
+ struct mtk_cpufreq_domain *data)
{
struct device *dev = &pdev->dev;
u32 temp, i, freq, prev_freq = 0;
@@ -157,9 +219,9 @@ static int mtk_cpu_create_freq_table(struct platform_device *pdev,
static int mtk_cpu_resources_init(struct platform_device *pdev,
struct cpufreq_policy *policy,
- const u16 *offsets)
+ struct mtk_cpufreq_priv *priv)
{
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
struct device *dev = &pdev->dev;
struct resource *res;
struct of_phandle_args args;
@@ -180,6 +242,15 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
index = args.args[0];
of_node_put(args.np);
+ /*
+ * In a cpufreq with hybrid DVFS, such as the MT8196, the first declared
+ * register range is for FDVFS, followed by the frequency domain MMIOs.
+ */
+ if (priv->variant->is_hybrid_dvfs)
+ index++;
+
+ data->parent = priv;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, index);
if (!res) {
dev_err(dev, "failed to get mem resource %d\n", index);
@@ -202,7 +273,7 @@ static int mtk_cpu_resources_init(struct platform_device *pdev,
data->res = res;
for (i = REG_FREQ_LUT_TABLE; i < REG_ARRAY_SIZE; i++)
- data->reg_bases[i] = base + offsets[i];
+ data->reg_bases[i] = base + priv->variant->reg_offsets[i];
ret = mtk_cpu_create_freq_table(pdev, data);
if (ret) {
@@ -223,7 +294,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
int sig, pwr_hw = CPUFREQ_HW_STATUS | SVS_HW_STATUS;
- struct mtk_cpufreq_data *data;
+ struct mtk_cpufreq_domain *data;
unsigned int latency;
int ret;
@@ -238,7 +309,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
latency = readl_relaxed(data->reg_bases[REG_FREQ_LATENCY]) * 1000;
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
policy->fast_switch_possible = true;
@@ -262,7 +333,7 @@ static int mtk_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
{
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
struct resource *res = data->res;
void __iomem *base = data->base;
@@ -275,7 +346,7 @@ static void mtk_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
static void mtk_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(mtk_cpufreq_get_cpu_power);
- struct mtk_cpufreq_data *data = policy->driver_data;
+ struct mtk_cpufreq_domain *data = policy->driver_data;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), data->nr_opp,
&em_cb, policy->cpus, true);
@@ -297,6 +368,7 @@ static struct cpufreq_driver cpufreq_mtk_hw_driver = {
static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
{
+ struct mtk_cpufreq_priv *priv;
const void *data;
int ret, cpu;
struct device *cpu_dev;
@@ -320,7 +392,20 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
if (!data)
return -EINVAL;
- platform_set_drvdata(pdev, (void *) data);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->variant = data;
+ priv->dev = &pdev->dev;
+
+ if (priv->variant->init) {
+ ret = priv->variant->init(priv);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
cpufreq_mtk_hw_driver.driver_data = pdev;
ret = cpufreq_register_driver(&cpufreq_mtk_hw_driver);
@@ -336,7 +421,8 @@ static void mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
- { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
+ { .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_base_variant },
+ { .compatible = "mediatek,mt8196-cpufreq-hw", .data = &cpufreq_mtk_mt8196_variant },
{}
};
MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
index f3f02c4b6888..052ca7cd2f4f 100644
--- a/drivers/cpufreq/mediatek-cpufreq.c
+++ b/drivers/cpufreq/mediatek-cpufreq.c
@@ -123,7 +123,7 @@ static int mtk_cpufreq_voltage_tracking(struct mtk_cpu_dvfs_info *info,
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_vproc > new_vproc) {
+ } else {
vproc = max(new_vproc,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(proc_reg, vproc,
@@ -320,7 +320,6 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
struct dev_pm_opp *new_opp;
struct mtk_cpu_dvfs_info *info;
unsigned long freq, volt;
- struct cpufreq_policy *policy;
int ret = 0;
info = container_of(nb, struct mtk_cpu_dvfs_info, opp_nb);
@@ -353,12 +352,12 @@ static int mtk_cpufreq_opp_notifier(struct notifier_block *nb,
}
dev_pm_opp_put(new_opp);
- policy = cpufreq_cpu_get(info->opp_cpu);
- if (policy) {
+
+ struct cpufreq_policy *policy __free(put_cpufreq_policy)
+ = cpufreq_cpu_get(info->opp_cpu);
+ if (policy)
cpufreq_driver_target(policy, freq / 1000,
CPUFREQ_RELATION_L);
- cpufreq_cpu_put(policy);
- }
}
}
@@ -404,9 +403,11 @@ static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu)
}
info->cpu_clk = clk_get(cpu_dev, "cpu");
- if (IS_ERR(info->cpu_clk))
- return dev_err_probe(cpu_dev, PTR_ERR(info->cpu_clk),
- "cpu%d: failed to get cpu clk\n", cpu);
+ if (IS_ERR(info->cpu_clk)) {
+ ret = PTR_ERR(info->cpu_clk);
+ dev_err_probe(cpu_dev, ret, "cpu%d: failed to get cpu clk\n", cpu);
+ goto out_put_cci_dev;
+ }
info->inter_clk = clk_get(cpu_dev, "intermediate");
if (IS_ERR(info->inter_clk)) {
@@ -552,6 +553,10 @@ out_free_inter_clock:
out_free_mux_clock:
clk_put(info->cpu_clk);
+out_put_cci_dev:
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
+
return ret;
}
@@ -569,6 +574,8 @@ static void mtk_cpu_dvfs_info_release(struct mtk_cpu_dvfs_info *info)
clk_put(info->inter_clk);
dev_pm_opp_of_cpumask_remove_table(&info->cpus);
dev_pm_opp_unregister_notifier(info->cpu_dev, &info->opp_nb);
+ if (info->soc_data->ccifreq_supported)
+ put_device(info->cci_dev);
}
static int mtk_cpufreq_init(struct cpufreq_policy *policy)
@@ -757,22 +764,14 @@ MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
static int __init mtk_cpufreq_driver_init(void)
{
- struct device_node *np;
- const struct of_device_id *match;
const struct mtk_cpufreq_platform_data *data;
int err;
- np = of_find_node_by_path("/");
- if (!np)
- return -ENODEV;
-
- match = of_match_node(mtk_cpufreq_machines, np);
- of_node_put(np);
- if (!match) {
+ data = of_machine_get_match_data(mtk_cpufreq_machines);
+ if (!data) {
pr_debug("Machine is not compatible with mtk-cpufreq\n");
return -ENODEV;
}
- data = match->data;
err = platform_driver_register(&mtk_cpufreq_platdrv);
if (err)
diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
index 54f8117103c8..81e16b5a0245 100644
--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
+++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
@@ -200,6 +200,10 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
case QCOM_ID_IPQ9574:
drv->versions = 1 << (unsigned int)(*speedbin);
break;
+ case QCOM_ID_IPQ5424:
+ case QCOM_ID_IPQ5404:
+ drv->versions = (*speedbin == 0x3b) ? BIT(1) : BIT(0);
+ break;
case QCOM_ID_MSM8996SG:
case QCOM_ID_APQ8096SG:
drv->versions = 1 << ((unsigned int)(*speedbin) + 4);
@@ -252,13 +256,22 @@ len_error:
return ret;
}
+static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unused = {
+ { .compatible = "qcom,ipq8062", .data = (const void *)QCOM_ID_IPQ8062 },
+ { .compatible = "qcom,ipq8064", .data = (const void *)QCOM_ID_IPQ8064 },
+ { .compatible = "qcom,ipq8065", .data = (const void *)QCOM_ID_IPQ8065 },
+ { .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 },
+ { .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 },
+ { .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 },
+};
+
static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
struct nvmem_cell *speedbin_nvmem,
char **pvs_name,
struct qcom_cpufreq_drv *drv)
{
+ int msm_id = -1, ret = 0;
int speed = 0, pvs = 0;
- int msm_id, ret = 0;
u8 *speedbin;
size_t len;
@@ -275,8 +288,30 @@ static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev,
get_krait_bin_format_a(cpu_dev, &speed, &pvs, speedbin);
ret = qcom_smem_get_soc_id(&msm_id);
- if (ret)
+ if (ret == -ENODEV) {
+ const struct of_device_id *match;
+ struct device_node *root;
+
+ root = of_find_node_by_path("/");
+ if (!root) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* Fallback to compatible match with no SMEM initialized */
+ match = of_match_node(qcom_cpufreq_ipq806x_match_list, root);
+ of_node_put(root);
+ if (!match) {
+ ret = -ENODEV;
+ goto exit;
+ }
+
+ /* We found a matching device, get the msm_id from the data entry */
+ msm_id = (int)(uintptr_t)match->data;
+ ret = 0;
+ } else if (ret) {
goto exit;
+ }
switch (msm_id) {
case QCOM_ID_IPQ8062:
@@ -591,6 +626,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst __maybe_u
{ .compatible = "qcom,msm8996", .data = &match_data_kryo },
{ .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
{ .compatible = "qcom,ipq5332", .data = &match_data_kryo },
+ { .compatible = "qcom,ipq5424", .data = &match_data_kryo },
{ .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
{ .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
{ .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 7e1fbf9a091f..31e07f0279db 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -28,15 +28,11 @@ fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
/// Finds supply name for the CPU from DT.
fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
// Try "cpu0" for older DTs, fallback to "cpu".
- let name = (cpu.as_u32() == 0)
+ (cpu.as_u32() == 0)
.then(|| find_supply_name_exact(dev, "cpu0"))
.flatten()
- .or_else(|| find_supply_name_exact(dev, "cpu"))?;
-
- let mut list = KVec::with_capacity(1, GFP_KERNEL).ok()?;
- list.push(name, GFP_KERNEL).ok()?;
-
- Some(list)
+ .or_else(|| find_supply_name_exact(dev, "cpu"))
+ .and_then(|name| kernel::kvec![name].ok())
}
/// Represents the cpufreq dt device.
@@ -123,7 +119,7 @@ impl cpufreq::Driver for CPUFreqDTDriver {
let mut transition_latency = opp_table.max_transition_latency_ns() as u32;
if transition_latency == 0 {
- transition_latency = cpufreq::ETERNAL_LATENCY_NS;
+ transition_latency = cpufreq::DEFAULT_TRANSITION_LATENCY_NS;
}
policy
@@ -211,9 +207,9 @@ impl platform::Driver for CPUFreqDTDriver {
fn probe(
pdev: &platform::Device<Core>,
_id_info: Option<&Self::IdInfo>,
- ) -> Result<Pin<KBox<Self>>> {
+ ) -> impl PinInit<Self, Error> {
cpufreq::Registration::<CPUFreqDTDriver>::new_foreign_owned(pdev.as_ref())?;
- Ok(KBox::new(Self {}, GFP_KERNEL)?.into())
+ Ok(Self {})
}
}
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index 76c888ed8d16..ba8a1c96427a 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -518,7 +518,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if (policy->cpu != 0) {
ret = -EINVAL;
- goto out_dmc1;
+ goto out;
}
/*
@@ -530,7 +530,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
pr_err("CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
- goto out_dmc1;
+ goto out;
}
/* Find current refresh counter and frequency each DMC */
@@ -544,6 +544,8 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
cpufreq_generic_init(policy, s5pv210_freq_table, 40000);
return 0;
+out:
+ clk_put(dmc1_clk);
out_dmc1:
clk_put(dmc0_clk);
out_dmc0:
@@ -554,17 +556,15 @@ out_dmc0:
static int s5pv210_cpufreq_reboot_notifier_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(0);
int ret;
- struct cpufreq_policy *policy;
- policy = cpufreq_cpu_get(0);
if (!policy) {
pr_debug("cpufreq: get no policy for cpu0\n");
return NOTIFY_BAD;
}
ret = cpufreq_driver_target(policy, SLEEP_FREQ, 0);
- cpufreq_cpu_put(policy);
if (ret < 0)
return NOTIFY_BAD;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index ef078426bfd5..d2a110079f5f 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -15,6 +15,7 @@
#include <linux/energy_model.h>
#include <linux/export.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
@@ -293,7 +294,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
latency = perf_ops->transition_latency_get(ph, domain);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
@@ -424,6 +425,15 @@ static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
return true;
}
+ /*
+ * Older Broadcom STB chips had a "clocks" property for CPU node(s)
+ * that did not match the SCMI performance protocol node, if we got
+ * there, it means we had such an older Device Tree, therefore return
+ * true to preserve backwards compatibility.
+ */
+ if (of_machine_is_compatible("brcm,brcmstb"))
+ return true;
+
return false;
}
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index dcbb0ae7dd47..e530345baddf 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -157,7 +157,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
latency = scpi_ops->get_transition_latency(cpu_dev);
if (!latency)
- latency = CPUFREQ_ETERNAL;
+ latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
policy->cpuinfo.transition_latency = latency;
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 9c0b01e00508..642ddb9ea217 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -89,11 +89,9 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy,
static int sh_cpufreq_verify(struct cpufreq_policy_data *policy)
{
struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
- struct cpufreq_frequency_table *freq_table;
- freq_table = cpuclk->nr_freqs ? cpuclk->freq_table : NULL;
- if (freq_table)
- return cpufreq_frequency_table_verify(policy, freq_table);
+ if (policy->freq_table)
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 707c71090cc3..2a1550e1aa21 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -182,7 +182,7 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "clock-latency",
&spear_cpufreq.transition_latency))
- spear_cpufreq.transition_latency = CPUFREQ_ETERNAL;
+ spear_cpufreq.transition_latency = CPUFREQ_DEFAULT_TRANSITION_LATENCY_NS;
cnt = of_property_count_u32_elems(np, "cpufreq_tbl");
if (cnt <= 0) {
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 0b66df4ed513..f8b42e981635 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -378,16 +378,16 @@ EXPORT_SYMBOL_GPL(speedstep_detect_processor);
* DETECT SPEEDSTEP SPEEDS *
*********************************************************************/
-unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state))
+int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state))
{
unsigned int prev_speed;
- unsigned int ret = 0;
unsigned long flags;
ktime_t tv1, tv2;
+ int ret = 0;
if ((!processor) || (!low_speed) || (!high_speed) || (!set_state))
return -EINVAL;
diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h
index dc762ea786be..48329647d4c4 100644
--- a/drivers/cpufreq/speedstep-lib.h
+++ b/drivers/cpufreq/speedstep-lib.h
@@ -41,8 +41,8 @@ extern unsigned int speedstep_get_frequency(enum speedstep_processor processor);
* SPEEDSTEP_LOW; the second argument is zero so that no
* cpufreq_notify_transition calls are initiated.
*/
-extern unsigned int speedstep_get_freqs(enum speedstep_processor processor,
- unsigned int *low_speed,
- unsigned int *high_speed,
- unsigned int *transition_latency,
- void (*set_state) (unsigned int state));
+extern int speedstep_get_freqs(enum speedstep_processor processor,
+ unsigned int *low_speed,
+ unsigned int *high_speed,
+ unsigned int *transition_latency,
+ void (*set_state)(unsigned int state));
diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
index 744312a44279..4fffc8e83692 100644
--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
+++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
@@ -332,13 +332,6 @@ static const struct of_device_id sun50i_cpufreq_match_list[] = {
};
MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
-static const struct of_device_id *sun50i_cpufreq_match_node(void)
-{
- struct device_node *np __free(device_node) = of_find_node_by_path("/");
-
- return of_match_node(sun50i_cpufreq_match_list, np);
-}
-
/*
* Since the driver depends on nvmem drivers, which may return EPROBE_DEFER,
* all the real activity is done in the probe, which may be defered as well.
@@ -346,11 +339,9 @@ static const struct of_device_id *sun50i_cpufreq_match_node(void)
*/
static int __init sun50i_cpufreq_init(void)
{
- const struct of_device_id *match;
int ret;
- match = sun50i_cpufreq_match_node();
- if (!match)
+ if (!of_machine_device_match(sun50i_cpufreq_match_list))
return -ENODEV;
ret = platform_driver_register(&sun50i_cpufreq_driver);
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index cbabb726c664..34ed943c5f34 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/units.h>
#include <soc/tegra/bpmp.h>
#include <soc/tegra/bpmp-abi.h>
@@ -58,7 +59,7 @@ static const struct tegra186_cpufreq_cpu tegra186_cpus[] = {
};
struct tegra186_cpufreq_cluster {
- struct cpufreq_frequency_table *table;
+ struct cpufreq_frequency_table *bpmp_lut;
u32 ref_clk_khz;
u32 div;
};
@@ -66,16 +67,119 @@ struct tegra186_cpufreq_cluster {
struct tegra186_cpufreq_data {
void __iomem *regs;
const struct tegra186_cpufreq_cpu *cpus;
+ bool icc_dram_bw_scaling;
struct tegra186_cpufreq_cluster clusters[];
};
+static int tegra_cpufreq_set_bw(struct cpufreq_policy *policy, unsigned long freq_khz)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct device *dev;
+ int ret;
+
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_exact(dev, freq_khz * HZ_PER_KHZ, true);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret)
+ data->icc_dram_bw_scaling = false;
+
+ return ret;
+}
+
+static int tegra_cpufreq_init_cpufreq_table(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *bpmp_lut,
+ struct cpufreq_frequency_table **opp_table)
+{
+ struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
+ struct cpufreq_frequency_table *freq_table = NULL;
+ struct cpufreq_frequency_table *pos;
+ struct device *cpu_dev;
+ unsigned long rate;
+ int ret, max_opps;
+ int j = 0;
+
+ cpu_dev = get_cpu_device(policy->cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__, policy->cpu);
+ return -ENODEV;
+ }
+
+ /* Initialize OPP table mentioned in operating-points-v2 property in DT */
+ ret = dev_pm_opp_of_add_table_indexed(cpu_dev, 0);
+ if (ret) {
+ dev_err(cpu_dev, "Invalid or empty opp table in device tree\n");
+ data->icc_dram_bw_scaling = false;
+ return ret;
+ }
+
+ max_opps = dev_pm_opp_get_opp_count(cpu_dev);
+ if (max_opps <= 0) {
+ dev_err(cpu_dev, "Failed to add OPPs\n");
+ return max_opps;
+ }
+
+ /* Disable all opps and cross-validate against LUT later */
+ for (rate = 0; ; rate++) {
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_ceil(cpu_dev, &rate);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_disable(cpu_dev, rate);
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
+ if (!freq_table)
+ return -ENOMEM;
+
+ /*
+ * Cross check the frequencies from BPMP-FW LUT against the OPP's present in DT.
+ * Enable only those DT OPP's which are present in LUT also.
+ */
+ cpufreq_for_each_valid_entry(pos, bpmp_lut) {
+ struct dev_pm_opp *opp __free(put_opp) =
+ dev_pm_opp_find_freq_exact(cpu_dev, pos->frequency * HZ_PER_KHZ, false);
+ if (IS_ERR(opp))
+ continue;
+
+ ret = dev_pm_opp_enable(cpu_dev, pos->frequency * HZ_PER_KHZ);
+ if (ret < 0)
+ return ret;
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = pos->frequency;
+ j++;
+ }
+
+ freq_table[j].driver_data = pos->driver_data;
+ freq_table[j].frequency = CPUFREQ_TABLE_END;
+
+ *opp_table = &freq_table[0];
+
+ dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
+
+ /* Prime interconnect data */
+ tegra_cpufreq_set_bw(policy, freq_table[j - 1].frequency);
+
+ return ret;
+}
+
static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id;
+ struct cpufreq_frequency_table *freq_table;
+ struct cpufreq_frequency_table *bpmp_lut;
u32 cpu;
+ int ret;
- policy->freq_table = data->clusters[cluster].table;
policy->cpuinfo.transition_latency = 300 * 1000;
policy->driver_data = NULL;
@@ -85,6 +189,20 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy)
cpumask_set_cpu(cpu, policy->cpus);
}
+ bpmp_lut = data->clusters[cluster].bpmp_lut;
+
+ if (data->icc_dram_bw_scaling) {
+ ret = tegra_cpufreq_init_cpufreq_table(policy, bpmp_lut, &freq_table);
+ if (!ret) {
+ policy->freq_table = freq_table;
+ return 0;
+ }
+ }
+
+ data->icc_dram_bw_scaling = false;
+ policy->freq_table = bpmp_lut;
+ pr_info("OPP tables missing from DT, EMC frequency scaling disabled\n");
+
return 0;
}
@@ -93,23 +211,30 @@ static int tegra186_cpufreq_set_target(struct cpufreq_policy *policy,
{
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct cpufreq_frequency_table *tbl = policy->freq_table + index;
- unsigned int edvd_offset = data->cpus[policy->cpu].edvd_offset;
+ unsigned int edvd_offset;
u32 edvd_val = tbl->driver_data;
+ u32 cpu;
+
+ for_each_cpu(cpu, policy->cpus) {
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
+
+ if (data->icc_dram_bw_scaling)
+ tegra_cpufreq_set_bw(policy, tbl->frequency);
- writel(edvd_val, data->regs + edvd_offset);
return 0;
}
static unsigned int tegra186_cpufreq_get(unsigned int cpu)
{
+ struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
struct tegra186_cpufreq_data *data = cpufreq_get_driver_data();
struct tegra186_cpufreq_cluster *cluster;
- struct cpufreq_policy *policy;
unsigned int edvd_offset, cluster_id;
u32 ndiv;
- policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
@@ -117,7 +242,6 @@ static unsigned int tegra186_cpufreq_get(unsigned int cpu)
ndiv = readl(data->regs + edvd_offset) & EDVD_CORE_VOLT_FREQ_F_MASK;
cluster_id = data->cpus[policy->cpu].bpmp_cluster_id;
cluster = &data->clusters[cluster_id];
- cpufreq_cpu_put(policy);
return (cluster->ref_clk_khz * ndiv) / cluster->div;
}
@@ -132,15 +256,16 @@ static struct cpufreq_driver tegra186_cpufreq_driver = {
.init = tegra186_cpufreq_init,
};
-static struct cpufreq_frequency_table *init_vhint_table(
+static struct cpufreq_frequency_table *tegra_cpufreq_bpmp_read_lut(
struct platform_device *pdev, struct tegra_bpmp *bpmp,
- struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id)
+ struct tegra186_cpufreq_cluster *cluster, unsigned int cluster_id,
+ int *num_rates)
{
struct cpufreq_frequency_table *table;
struct mrq_cpu_vhint_request req;
struct tegra_bpmp_message msg;
struct cpu_vhint_data *data;
- int err, i, j, num_rates = 0;
+ int err, i, j;
dma_addr_t phys;
void *virt;
@@ -170,6 +295,7 @@ static struct cpufreq_frequency_table *init_vhint_table(
goto free;
}
+ *num_rates = 0;
for (i = data->vfloor; i <= data->vceil; i++) {
u16 ndiv = data->ndiv[i];
@@ -180,10 +306,10 @@ static struct cpufreq_frequency_table *init_vhint_table(
if (i > 0 && ndiv == data->ndiv[i - 1])
continue;
- num_rates++;
+ (*num_rates)++;
}
- table = devm_kcalloc(&pdev->dev, num_rates + 1, sizeof(*table),
+ table = devm_kcalloc(&pdev->dev, *num_rates + 1, sizeof(*table),
GFP_KERNEL);
if (!table) {
table = ERR_PTR(-ENOMEM);
@@ -225,7 +351,10 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
{
struct tegra186_cpufreq_data *data;
struct tegra_bpmp *bpmp;
- unsigned int i = 0, err;
+ struct device *cpu_dev;
+ unsigned int i = 0, err, edvd_offset;
+ int num_rates = 0;
+ u32 edvd_val, cpu;
data = devm_kzalloc(&pdev->dev,
struct_size(data, clusters, TEGRA186_NUM_CLUSTERS),
@@ -248,15 +377,39 @@ static int tegra186_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < TEGRA186_NUM_CLUSTERS; i++) {
struct tegra186_cpufreq_cluster *cluster = &data->clusters[i];
- cluster->table = init_vhint_table(pdev, bpmp, cluster, i);
- if (IS_ERR(cluster->table)) {
- err = PTR_ERR(cluster->table);
+ cluster->bpmp_lut = tegra_cpufreq_bpmp_read_lut(pdev, bpmp, cluster, i, &num_rates);
+ if (IS_ERR(cluster->bpmp_lut)) {
+ err = PTR_ERR(cluster->bpmp_lut);
+ goto put_bpmp;
+ } else if (!num_rates) {
+ err = -EINVAL;
goto put_bpmp;
}
+
+ for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) {
+ if (data->cpus[cpu].bpmp_cluster_id == i) {
+ edvd_val = cluster->bpmp_lut[num_rates - 1].driver_data;
+ edvd_offset = data->cpus[cpu].edvd_offset;
+ writel(edvd_val, data->regs + edvd_offset);
+ }
+ }
}
tegra186_cpufreq_driver.driver_data = data;
+ /* Check for optional OPPv2 and interconnect paths on CPU0 to enable ICC scaling */
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev) {
+ err = -EPROBE_DEFER;
+ goto put_bpmp;
+ }
+
+ if (dev_pm_opp_of_get_opp_desc_node(cpu_dev)) {
+ err = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL);
+ if (!err)
+ data->icc_dram_bw_scaling = true;
+ }
+
err = cpufreq_register_driver(&tegra186_cpufreq_driver);
put_bpmp:
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 9b4f516f313e..695599e1001f 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -750,7 +750,8 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(bpmp))
return PTR_ERR(bpmp);
- read_counters_wq = alloc_workqueue("read_counters_wq", __WQ_LEGACY, 1);
+ read_counters_wq = alloc_workqueue("read_counters_wq",
+ __WQ_LEGACY | WQ_PERCPU, 1);
if (!read_counters_wq) {
dev_err(&pdev->dev, "fail to create_workqueue\n");
err = -EINVAL;
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index 5a5147277cd0..6ee76f5fe9c5 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -72,7 +72,9 @@ enum {
#define AM62P5_EFUSE_O_MPU_OPP 15
#define AM62P5_EFUSE_S_MPU_OPP 19
+#define AM62P5_EFUSE_T_MPU_OPP 20
#define AM62P5_EFUSE_U_MPU_OPP 21
+#define AM62P5_EFUSE_V_MPU_OPP 22
#define AM62P5_SUPPORT_O_MPU_OPP BIT(0)
#define AM62P5_SUPPORT_U_MPU_OPP BIT(2)
@@ -153,7 +155,9 @@ static unsigned long am62p5_efuse_xlate(struct ti_cpufreq_data *opp_data,
unsigned long calculated_efuse = AM62P5_SUPPORT_O_MPU_OPP;
switch (efuse) {
+ case AM62P5_EFUSE_V_MPU_OPP:
case AM62P5_EFUSE_U_MPU_OPP:
+ case AM62P5_EFUSE_T_MPU_OPP:
case AM62P5_EFUSE_S_MPU_OPP:
calculated_efuse |= AM62P5_SUPPORT_U_MPU_OPP;
fallthrough;
@@ -307,9 +311,10 @@ static struct ti_cpufreq_soc_data am3517_soc_data = {
};
static const struct soc_device_attribute k3_cpufreq_soc[] = {
- { .family = "AM62X", .revision = "SR1.0" },
- { .family = "AM62AX", .revision = "SR1.0" },
- { .family = "AM62PX", .revision = "SR1.0" },
+ { .family = "AM62X", },
+ { .family = "AM62AX", },
+ { .family = "AM62PX", },
+ { .family = "AM62DX", },
{ /* sentinel */ }
};
@@ -457,6 +462,7 @@ static const struct of_device_id ti_cpufreq_of_match[] __maybe_unused = {
{ .compatible = "ti,omap36xx", .data = &omap36xx_soc_data, },
{ .compatible = "ti,am625", .data = &am625_soc_data, },
{ .compatible = "ti,am62a7", .data = &am62a7_soc_data, },
+ { .compatible = "ti,am62d2", .data = &am62a7_soc_data, },
{ .compatible = "ti,am62p5", .data = &am62p5_soc_data, },
/* legacy */
{ .compatible = "ti,omap3430", .data = &omap34xx_soc_data, },
diff --git a/drivers/cpufreq/virtual-cpufreq.c b/drivers/cpufreq/virtual-cpufreq.c
index 7dd1b0c263c7..6ffa16d239b2 100644
--- a/drivers/cpufreq/virtual-cpufreq.c
+++ b/drivers/cpufreq/virtual-cpufreq.c
@@ -250,7 +250,7 @@ static int virt_cpufreq_offline(struct cpufreq_policy *policy)
static int virt_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
{
if (policy->freq_table)
- return cpufreq_frequency_table_verify(policy, policy->freq_table);
+ return cpufreq_frequency_table_verify(policy);
cpufreq_verify_within_cpu_limits(policy);
return 0;
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index 4abba42fcc31..08f6bf2f6409 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -166,20 +166,11 @@ static const struct of_device_id compatible_machine_match[] = {
static int __init bl_idle_init(void)
{
int ret;
- struct device_node *root = of_find_node_by_path("/");
- const struct of_device_id *match_id;
-
- if (!root)
- return -ENODEV;
/*
* Initialize the driver just for a compliant set of machines
*/
- match_id = of_match_node(compatible_machine_match, root);
-
- of_node_put(root);
-
- if (!match_id)
+ if (!of_machine_device_match(compatible_machine_match))
return -ENODEV;
if (!mcpm_is_available())
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index b19bc60cc627..dcf20ea5ef5e 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -177,26 +177,30 @@ static void psci_idle_syscore_switch(bool suspend)
}
}
-static int psci_idle_syscore_suspend(void)
+static int psci_idle_syscore_suspend(void *data)
{
psci_idle_syscore_switch(true);
return 0;
}
-static void psci_idle_syscore_resume(void)
+static void psci_idle_syscore_resume(void *data)
{
psci_idle_syscore_switch(false);
}
-static struct syscore_ops psci_idle_syscore_ops = {
+static const struct syscore_ops psci_idle_syscore_ops = {
.suspend = psci_idle_syscore_suspend,
.resume = psci_idle_syscore_resume,
};
+static struct syscore psci_idle_syscore = {
+ .ops = &psci_idle_syscore_ops,
+};
+
static void psci_idle_init_syscore(void)
{
if (psci_cpuidle_use_syscore)
- register_syscore_ops(&psci_idle_syscore_ops);
+ register_syscore(&psci_idle_syscore);
}
static void psci_idle_init_cpuhp(void)
@@ -382,8 +386,8 @@ static int psci_idle_init_cpu(struct device *dev, int cpu)
drv->states[0].exit_latency = 1;
drv->states[0].target_residency = 1;
drv->states[0].power_usage = UINT_MAX;
- strcpy(drv->states[0].name, "WFI");
- strcpy(drv->states[0].desc, "ARM WFI");
+ strscpy(drv->states[0].name, "WFI");
+ strscpy(drv->states[0].desc, "ARM WFI");
/*
* If no DT idle states are detected (ret == 0) let the driver
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index 5f386761b156..7ab6f68b96a8 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -86,9 +86,9 @@ static const struct of_device_id qcom_idle_state_match[] = {
static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
- struct platform_device *pdev = NULL;
+ struct platform_device *pdev;
struct device_node *cpu_node, *saw_node;
- struct cpuidle_qcom_spm_data *data = NULL;
+ struct cpuidle_qcom_spm_data *data;
int ret;
cpu_node = of_cpu_device_node_get(cpu);
@@ -96,20 +96,23 @@ static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
return -ENODEV;
saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ of_node_put(cpu_node);
if (!saw_node)
return -ENODEV;
pdev = of_find_device_by_node(saw_node);
of_node_put(saw_node);
- of_node_put(cpu_node);
if (!pdev)
return -ENODEV;
data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
- if (!data)
+ if (!data) {
+ put_device(&pdev->dev);
return -ENOMEM;
+ }
data->spm = dev_get_drvdata(&pdev->dev);
+ put_device(&pdev->dev);
if (!data->spm)
return -EINVAL;
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index a360bc4d20b7..19be6475d356 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -303,8 +304,8 @@ static int sbi_cpuidle_init_cpu(struct device *dev, int cpu)
drv->states[0].exit_latency = 1;
drv->states[0].target_residency = 1;
drv->states[0].power_usage = UINT_MAX;
- strcpy(drv->states[0].name, "WFI");
- strcpy(drv->states[0].desc, "RISC-V WFI");
+ strscpy(drv->states[0].name, "WFI");
+ strscpy(drv->states[0].desc, "RISC-V WFI");
/*
* If no DT idle states are detected (ret == 0) let the driver
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 0835da449db8..c7876e9e024f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -184,20 +184,22 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
* cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
* @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU.
+ * @latency_limit_ns: Idle state exit latency limit
*
* If there are states with the ->enter_s2idle callback, find the deepest of
* them and enter it with frozen tick.
*/
-int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
+int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{
int index;
/*
- * Find the deepest state with ->enter_s2idle present, which guarantees
- * that interrupts won't be enabled when it exits and allows the tick to
- * be frozen safely.
+ * Find the deepest state with ->enter_s2idle present that meets the
+ * specified latency limit, which guarantees that interrupts won't be
+ * enabled when it exits and allows the tick to be frozen safely.
*/
- index = find_deepest_state(drv, dev, U64_MAX, 0, true);
+ index = find_deepest_state(drv, dev, latency_limit_ns, 0, true);
if (index > 0) {
enter_s2idle_proper(drv, dev, index);
local_irq_enable();
@@ -635,8 +637,14 @@ static void __cpuidle_device_init(struct cpuidle_device *dev)
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+ unsigned int cpu = dev->cpu;
int i, ret;
+ if (per_cpu(cpuidle_devices, cpu)) {
+ pr_info("CPU%d: cpuidle device already registered\n", cpu);
+ return -EEXIST;
+ }
+
if (!try_module_get(drv->owner))
return -EINVAL;
@@ -648,7 +656,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
dev->states_usage[i].disable |= CPUIDLE_STATE_DISABLED_BY_USER;
}
- per_cpu(cpuidle_devices, dev->cpu) = dev;
+ per_cpu(cpuidle_devices, cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
ret = cpuidle_coupled_register_device(dev);
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 9bbfa594c442..370664c47e65 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -8,6 +8,8 @@
* This code is licenced under the GPL.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/sched.h>
@@ -193,6 +195,14 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
s->exit_latency_ns = 0;
else
s->exit_latency = div_u64(s->exit_latency_ns, NSEC_PER_USEC);
+
+ /*
+ * Warn if the exit latency of a CPU idle state exceeds its
+ * target residency which is assumed to never happen in cpuidle
+ * in multiple places.
+ */
+ if (s->exit_latency_ns > s->target_residency_ns)
+ pr_warn("Idle state %d target residency too low\n", i);
}
}
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 0d0f9751ff8f..5d0e7f78c6c5 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -111,6 +111,10 @@ s64 cpuidle_governor_latency_req(unsigned int cpu)
struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device);
int global_req = cpu_latency_qos_limit();
+ int global_wake_req = cpu_wakeup_latency_qos_limit();
+
+ if (global_req > global_wake_req)
+ global_req = global_wake_req;
if (device_req > global_req)
device_req = global_req;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index b2e3d0b0a116..64d6f7a1c776 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -188,20 +188,17 @@ again:
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
+ *
+ * However, if the number of remaining samples is too small to exclude
+ * any more outliers, allow the deepest available idle state to be
+ * selected because there are systems where the time spent by CPUs in
+ * deep idle states is correlated to the maximum frequency the CPUs
+ * can get to. On those systems, shallow idle states should be avoided
+ * unless there is a clear indication that the given CPU is most likley
+ * going to be woken up shortly.
*/
- if (divisor * 4 <= INTERVALS * 3) {
- /*
- * If there are sufficiently many data points still under
- * consideration after the outliers have been eliminated,
- * returning without a prediction would be a mistake because it
- * is likely that the next interval will not exceed the current
- * maximum, so return the latter in that case.
- */
- if (divisor >= INTERVALS / 2)
- return max;
-
+ if (divisor * 4 <= INTERVALS * 3)
return UINT_MAX;
- }
/* Update the thresholds for the next round. */
if (avg - min > max - avg)
@@ -314,45 +311,51 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (s->exit_latency_ns > latency_req)
break;
- if (s->target_residency_ns > predicted_ns) {
- /*
- * Use a physical idle state, not busy polling, unless
- * a timer is going to trigger soon enough.
- */
- if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
- s->target_residency_ns <= data->next_timer_ns) {
- predicted_ns = s->target_residency_ns;
- idx = i;
- break;
- }
- if (predicted_ns < TICK_NSEC)
- break;
-
- if (!tick_nohz_tick_stopped()) {
- /*
- * If the state selected so far is shallow,
- * waking up early won't hurt, so retain the
- * tick in that case and let the governor run
- * again in the next iteration of the loop.
- */
- predicted_ns = drv->states[idx].target_residency_ns;
- break;
- }
+ if (s->target_residency_ns <= predicted_ns) {
+ idx = i;
+ continue;
+ }
+ /*
+ * Use a physical idle state instead of busy polling so long as
+ * its target residency is below the residency threshold, its
+ * exit latency is not greater than the predicted idle duration,
+ * and the next timer doesn't expire soon.
+ */
+ if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
+ s->target_residency_ns < RESIDENCY_THRESHOLD_NS &&
+ s->target_residency_ns <= data->next_timer_ns &&
+ s->exit_latency_ns <= predicted_ns) {
+ predicted_ns = s->target_residency_ns;
+ idx = i;
+ break;
+ }
+
+ if (predicted_ns < TICK_NSEC)
+ break;
+
+ if (!tick_nohz_tick_stopped()) {
/*
- * If the state selected so far is shallow and this
- * state's target residency matches the time till the
- * closest timer event, select this one to avoid getting
- * stuck in the shallow one for too long.
+ * If the state selected so far is shallow, waking up
+ * early won't hurt, so retain the tick in that case and
+ * let the governor run again in the next iteration of
+ * the idle loop.
*/
- if (drv->states[idx].target_residency_ns < TICK_NSEC &&
- s->target_residency_ns <= delta_tick)
- idx = i;
-
- return idx;
+ predicted_ns = drv->states[idx].target_residency_ns;
+ break;
}
- idx = i;
+ /*
+ * If the state selected so far is shallow and this state's
+ * target residency matches the time till the closest timer
+ * event, select this one to avoid getting stuck in the shallow
+ * one for too long.
+ */
+ if (drv->states[idx].target_residency_ns < TICK_NSEC &&
+ s->target_residency_ns <= delta_tick)
+ idx = i;
+
+ return idx;
}
if (idx == -1)
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index bfa55c1eab5b..81ac5fd58a1c 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -76,7 +76,7 @@
* likely woken up by a non-timer wakeup source).
*
* 2. If the second sum computed in step 1 is greater than a half of the sum of
- * both metrics for the candidate state bin and all subsequent bins(if any),
+ * both metrics for the candidate state bin and all subsequent bins (if any),
* a shallower idle state is likely to be more suitable, so look for it.
*
* - Traverse the enabled idle states shallower than the candidate one in the
@@ -133,21 +133,33 @@ struct teo_bin {
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
* @total: Grand total of the "intercepts" and "hits" metrics for all bins.
+ * @total_tick: Wakeups by the scheduler tick.
* @tick_intercepts: "Intercepts" before TICK_NSEC.
* @short_idles: Wakeups after short idle periods.
- * @artificial_wakeup: Set if the wakeup has been triggered by a safety net.
+ * @tick_wakeup: Set if the last wakeup was by the scheduler tick.
*/
struct teo_cpu {
s64 sleep_length_ns;
struct teo_bin state_bins[CPUIDLE_STATE_MAX];
unsigned int total;
+ unsigned int total_tick;
unsigned int tick_intercepts;
unsigned int short_idles;
- bool artificial_wakeup;
+ bool tick_wakeup;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
+static void teo_decay(unsigned int *metric)
+{
+ unsigned int delta = *metric >> DECAY_SHIFT;
+
+ if (delta)
+ *metric -= delta;
+ else
+ *metric = 0;
+}
+
/**
* teo_update - Update CPU metrics after wakeup.
* @drv: cpuidle driver containing state data.
@@ -155,21 +167,22 @@ static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
*/
static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ struct teo_cpu *cpu_data = this_cpu_ptr(&teo_cpus);
int i, idx_timer = 0, idx_duration = 0;
- s64 target_residency_ns;
- u64 measured_ns;
+ s64 target_residency_ns, measured_ns;
+ unsigned int total = 0;
- cpu_data->short_idles -= cpu_data->short_idles >> DECAY_SHIFT;
+ teo_decay(&cpu_data->short_idles);
- if (cpu_data->artificial_wakeup) {
+ if (dev->poll_time_limit) {
+ dev->poll_time_limit = false;
/*
- * If one of the safety nets has triggered, assume that this
+ * Polling state timeout has triggered, so assume that this
* might have been a long sleep.
*/
- measured_ns = U64_MAX;
+ measured_ns = S64_MAX;
} else {
- u64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
+ s64 lat_ns = drv->states[dev->last_state_idx].exit_latency_ns;
measured_ns = dev->last_residency_ns;
/*
@@ -196,8 +209,10 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
for (i = 0; i < drv->state_count; i++) {
struct teo_bin *bin = &cpu_data->state_bins[i];
- bin->hits -= bin->hits >> DECAY_SHIFT;
- bin->intercepts -= bin->intercepts >> DECAY_SHIFT;
+ teo_decay(&bin->hits);
+ total += bin->hits;
+ teo_decay(&bin->intercepts);
+ total += bin->intercepts;
target_residency_ns = drv->states[i].target_residency_ns;
@@ -208,7 +223,24 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
}
}
- cpu_data->tick_intercepts -= cpu_data->tick_intercepts >> DECAY_SHIFT;
+ cpu_data->total = total + PULSE;
+
+ teo_decay(&cpu_data->tick_intercepts);
+
+ teo_decay(&cpu_data->total_tick);
+ if (cpu_data->tick_wakeup) {
+ cpu_data->total_tick += PULSE;
+ /*
+ * If tick wakeups dominate the wakeup pattern, count this one
+ * as a hit on the deepest available idle state to increase the
+ * likelihood of stopping the tick.
+ */
+ if (3 * cpu_data->total_tick > 2 * cpu_data->total) {
+ cpu_data->state_bins[drv->state_count-1].hits += PULSE;
+ return;
+ }
+ }
+
/*
* If the measured idle duration falls into the same bin as the sleep
* length, this is a "hit", so update the "hits" metric for that bin.
@@ -219,18 +251,9 @@ static void teo_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
cpu_data->state_bins[idx_timer].hits += PULSE;
} else {
cpu_data->state_bins[idx_duration].intercepts += PULSE;
- if (TICK_NSEC <= measured_ns)
+ if (measured_ns <= TICK_NSEC)
cpu_data->tick_intercepts += PULSE;
}
-
- cpu_data->total -= cpu_data->total >> DECAY_SHIFT;
- cpu_data->total += PULSE;
-}
-
-static bool teo_state_ok(int i, struct cpuidle_driver *drv)
-{
- return !tick_nohz_tick_stopped() ||
- drv->states[i].target_residency_ns >= TICK_NSEC;
}
/**
@@ -239,17 +262,15 @@ static bool teo_state_ok(int i, struct cpuidle_driver *drv)
* @dev: Target CPU.
* @state_idx: Index of the capping idle state.
* @duration_ns: Idle duration value to match.
- * @no_poll: Don't consider polling states.
*/
static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx,
- s64 duration_ns, bool no_poll)
+ s64 duration_ns)
{
int i;
for (i = state_idx - 1; i >= 0; i--) {
- if (dev->states_usage[i].disable ||
- (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING))
+ if (dev->states_usage[i].disable)
continue;
state_idx = i;
@@ -268,7 +289,7 @@ static int teo_find_shallower_state(struct cpuidle_driver *drv,
static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
- struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ struct teo_cpu *cpu_data = this_cpu_ptr(&teo_cpus);
s64 latency_req = cpuidle_governor_latency_req(dev->cpu);
ktime_t delta_tick = TICK_NSEC / 2;
unsigned int idx_intercept_sum = 0;
@@ -356,7 +377,18 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* better choice.
*/
if (2 * idx_intercept_sum > cpu_data->total - idx_hit_sum) {
- int first_suitable_idx = idx;
+ int min_idx = idx0;
+
+ if (tick_nohz_tick_stopped()) {
+ /*
+ * Look for the shallowest idle state below the current
+ * candidate one whose target residency is at least
+ * equal to the tick period length.
+ */
+ while (min_idx < idx &&
+ drv->states[min_idx].target_residency_ns < TICK_NSEC)
+ min_idx++;
+ }
/*
* Look for the deepest idle state whose target residency had
@@ -366,49 +398,14 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* Take the possible duration limitation present if the tick
* has been stopped already into account.
*/
- intercept_sum = 0;
-
- for (i = idx - 1; i >= 0; i--) {
- struct teo_bin *bin = &cpu_data->state_bins[i];
-
- intercept_sum += bin->intercepts;
-
- if (2 * intercept_sum > idx_intercept_sum) {
- /*
- * Use the current state unless it is too
- * shallow or disabled, in which case take the
- * first enabled state that is deep enough.
- */
- if (teo_state_ok(i, drv) &&
- !dev->states_usage[i].disable) {
- idx = i;
- break;
- }
- idx = first_suitable_idx;
- break;
- }
+ for (i = idx - 1, intercept_sum = 0; i >= min_idx; i--) {
+ intercept_sum += cpu_data->state_bins[i].intercepts;
if (dev->states_usage[i].disable)
continue;
- if (teo_state_ok(i, drv)) {
- /*
- * The current state is deep enough, but still
- * there may be a better one.
- */
- first_suitable_idx = i;
- continue;
- }
-
- /*
- * The current state is too shallow, so if no suitable
- * states other than the initial candidate have been
- * found, give up (the remaining states to check are
- * shallower still), but otherwise the first suitable
- * state other than the initial candidate may turn out
- * to be preferable.
- */
- if (first_suitable_idx == idx)
+ idx = i;
+ if (2 * intercept_sum > idx_intercept_sum)
break;
}
}
@@ -458,11 +455,8 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* If the closest expected timer is before the target residency of the
* candidate state, a shallower one needs to be found.
*/
- if (drv->states[idx].target_residency_ns > duration_ns) {
- i = teo_find_shallower_state(drv, dev, idx, duration_ns, false);
- if (teo_state_ok(i, drv))
- idx = i;
- }
+ if (drv->states[idx].target_residency_ns > duration_ns)
+ idx = teo_find_shallower_state(drv, dev, idx, duration_ns);
/*
* If the selected state's target residency is below the tick length
@@ -490,7 +484,7 @@ end:
*/
if (idx > idx0 &&
drv->states[idx].target_residency_ns > delta_tick)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick, false);
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
out_tick:
*stop_tick = false;
@@ -504,20 +498,11 @@ out_tick:
*/
static void teo_reflect(struct cpuidle_device *dev, int state)
{
- struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ struct teo_cpu *cpu_data = this_cpu_ptr(&teo_cpus);
+
+ cpu_data->tick_wakeup = tick_nohz_idle_got_tick();
dev->last_state_idx = state;
- if (dev->poll_time_limit ||
- (tick_nohz_idle_got_tick() && cpu_data->sleep_length_ns > TICK_NSEC)) {
- /*
- * The wakeup was not "genuine", but triggered by one of the
- * safety nets.
- */
- dev->poll_time_limit = false;
- cpu_data->artificial_wakeup = true;
- } else {
- cpu_data->artificial_wakeup = false;
- }
}
/**
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index 9b6d90a72601..c7524e4c522a 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -4,9 +4,13 @@
*/
#include <linux/cpuidle.h>
+#include <linux/export.h>
+#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/idle.h>
+#include <linux/sprintf.h>
+#include <linux/types.h>
#define POLL_IDLE_RELAX_COUNT 200
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index d6f5da61cb7d..61de64817604 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -27,14 +27,14 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) (PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
+ if (i >= (ssize_t)(PAGE_SIZE - (CPUIDLE_NAME_LEN + 2)))
goto out;
- i += scnprintf(&buf[i], CPUIDLE_NAME_LEN + 1, "%s ", tmp->name);
+ i += sysfs_emit_at(buf, i, "%.*s ", CPUIDLE_NAME_LEN, tmp->name);
}
out:
- i+= sprintf(&buf[i], "\n");
+ i += sysfs_emit_at(buf, i, "\n");
mutex_unlock(&cpuidle_lock);
return i;
}
@@ -49,9 +49,9 @@ static ssize_t show_current_driver(struct device *dev,
spin_lock(&cpuidle_driver_lock);
drv = cpuidle_get_driver();
if (drv)
- ret = sprintf(buf, "%s\n", drv->name);
+ ret = sysfs_emit(buf, "%s\n", drv->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
spin_unlock(&cpuidle_driver_lock);
return ret;
@@ -65,9 +65,9 @@ static ssize_t show_current_governor(struct device *dev,
mutex_lock(&cpuidle_lock);
if (cpuidle_curr_governor)
- ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
+ ret = sysfs_emit(buf, "%s\n", cpuidle_curr_governor->name);
else
- ret = sprintf(buf, "none\n");
+ ret = sysfs_emit(buf, "none\n");
mutex_unlock(&cpuidle_lock);
return ret;
@@ -230,7 +230,7 @@ static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0644, show, store)
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
- return sprintf(buf, "%u\n", state->_name);\
+ return sysfs_emit(buf, "%u\n", state->_name);\
}
#define define_show_state_ull_function(_name) \
@@ -238,7 +238,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
@@ -247,8 +247,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
char *buf) \
{ \
if (state->_name[0] == '\0')\
- return sprintf(buf, "<null>\n");\
- return sprintf(buf, "%s\n", state->_name);\
+ return sysfs_emit(buf, "<null>\n");\
+ return sysfs_emit(buf, "%s\n", state->_name);\
}
#define define_show_state_time_function(_name) \
@@ -256,7 +256,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state->_name##_ns)); \
}
define_show_state_time_function(exit_latency)
@@ -273,14 +273,14 @@ static ssize_t show_state_time(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
+ return sysfs_emit(buf, "%llu\n", ktime_to_us(state_usage->time_ns));
}
static ssize_t show_state_disable(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
state_usage->disable & CPUIDLE_STATE_DISABLED_BY_USER);
}
@@ -310,7 +310,7 @@ static ssize_t show_state_default_status(struct cpuidle_state *state,
struct cpuidle_state_usage *state_usage,
char *buf)
{
- return sprintf(buf, "%s\n",
+ return sysfs_emit(buf, "%s\n",
state->flags & CPUIDLE_FLAG_OFF ? "disabled" : "enabled");
}
@@ -358,7 +358,7 @@ static ssize_t show_state_s2idle_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, \
char *buf) \
{ \
- return sprintf(buf, "%llu\n", state_usage->s2idle_##_name);\
+ return sysfs_emit(buf, "%llu\n", state_usage->s2idle_##_name);\
}
define_show_state_s2idle_ull_function(usage);
@@ -550,7 +550,7 @@ static ssize_t show_driver_name(struct cpuidle_driver *drv, char *buf)
ssize_t ret;
spin_lock(&cpuidle_driver_lock);
- ret = sprintf(buf, "%s\n", drv ? drv->name : "none");
+ ret = sysfs_emit(buf, "%s\n", drv ? drv->name : "none");
spin_unlock(&cpuidle_driver_lock);
return ret;
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 04b4c43b6bae..8d3b5d2890f8 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -439,7 +439,7 @@ config CRYPTO_DEV_ATMEL_AUTHENC
config CRYPTO_DEV_ATMEL_AES
tristate "Support for Atmel AES hw accelerator"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
select CRYPTO_AES
select CRYPTO_AEAD
select CRYPTO_SKCIPHER
@@ -725,6 +725,19 @@ config CRYPTO_DEV_TEGRA
Select this to enable Tegra Security Engine which accelerates various
AES encryption/decryption and HASH algorithms.
+config CRYPTO_DEV_XILINX_TRNG
+ tristate "Support for Xilinx True Random Generator"
+ depends on ZYNQMP_FIRMWARE || COMPILE_TEST
+ select CRYPTO_DF80090A
+ select CRYPTO_RNG
+ select HW_RANDOM
+ help
+ Xilinx Versal SoC driver provides kernel-side support for True Random Number
+ Generator and Pseudo random Number in CTR_DRBG mode as defined in NIST SP800-90A.
+
+ To compile this driver as a module, choose M here: the module
+ will be called xilinx-trng.
+
config CRYPTO_DEV_ZYNQMP_AES
tristate "Support for Xilinx ZynqMP AES hw accelerator"
depends on ZYNQMP_FIRMWARE || COMPILE_TEST
@@ -840,6 +853,7 @@ config CRYPTO_DEV_CCREE
If unsure say Y.
source "drivers/crypto/hisilicon/Kconfig"
+source "drivers/crypto/loongson/Kconfig"
source "drivers/crypto/amlogic/Kconfig"
@@ -863,5 +877,6 @@ config CRYPTO_DEV_SA2UL
source "drivers/crypto/aspeed/Kconfig"
source "drivers/crypto/starfive/Kconfig"
source "drivers/crypto/inside-secure/eip93/Kconfig"
+source "drivers/crypto/ti/Kconfig"
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 22eadcc8f4a2..322ae8854e3e 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -44,7 +44,9 @@ obj-y += inside-secure/
obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-y += xilinx/
obj-y += hisilicon/
+obj-y += loongson/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-y += intel/
obj-y += starfive/
obj-y += cavium/
+obj-y += ti/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
index 5663df49dd81..021614b65e39 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
@@ -111,7 +111,7 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
- struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct sun8i_ce_alg_template *algt;
algt = container_of(alg, struct sun8i_ce_alg_template,
alg.skcipher.base);
@@ -131,21 +131,19 @@ static int sun8i_ce_cipher_fallback(struct skcipher_request *areq)
return err;
}
-static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req)
+static int sun8i_ce_cipher_prepare(struct skcipher_request *areq,
+ struct ce_task *cet)
{
- struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
unsigned int todo, len, offset, ivsize;
u32 common, sym;
- int flow, i;
+ int i;
int nr_sgs = 0;
int nr_sgd = 0;
int err = 0;
@@ -163,14 +161,9 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
algt->stat_req++;
- flow = rctx->flow;
-
- chan = &ce->chanlist[flow];
-
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_cipher[algt->ce_algo_id];
common |= rctx->op_dir | CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -209,11 +202,11 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
if (areq->iv && ivsize > 0) {
if (rctx->op_dir & CE_DECRYPTION) {
offset = areq->cryptlen - ivsize;
- scatterwalk_map_and_copy(chan->backup_iv, areq->src,
+ scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
offset, ivsize, 0);
}
- memcpy(chan->bounce_iv, areq->iv, ivsize);
- rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, ivsize,
+ memcpy(rctx->bounce_iv, areq->iv, ivsize);
+ rctx->addr_iv = dma_map_single(ce->dev, rctx->bounce_iv, ivsize,
DMA_TO_DEVICE);
if (dma_mapping_error(ce->dev, rctx->addr_iv)) {
dev_err(ce->dev, "Cannot DMA MAP IV\n");
@@ -276,7 +269,6 @@ static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req
goto theend_sgs;
}
- chan->timeout = areq->cryptlen;
rctx->nr_sgs = ns;
rctx->nr_sgd = nd;
return 0;
@@ -300,13 +292,13 @@ theend_iv:
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, chan->backup_iv, ivsize);
- memzero_explicit(chan->backup_iv, ivsize);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- memzero_explicit(chan->bounce_iv, ivsize);
+ memzero_explicit(rctx->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
@@ -315,24 +307,17 @@ theend:
return err;
}
-static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
- void *async_req)
+static void sun8i_ce_cipher_unprepare(struct skcipher_request *areq,
+ struct ce_task *cet)
{
- struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ce_dev *ce = op->ce;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
unsigned int ivsize, offset;
int nr_sgs = rctx->nr_sgs;
int nr_sgd = rctx->nr_sgd;
- int flow;
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
- cet = chan->tl;
ivsize = crypto_skcipher_ivsize(tfm);
if (areq->src == areq->dst) {
@@ -349,43 +334,43 @@ static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine,
DMA_TO_DEVICE);
offset = areq->cryptlen - ivsize;
if (rctx->op_dir & CE_DECRYPTION) {
- memcpy(areq->iv, chan->backup_iv, ivsize);
- memzero_explicit(chan->backup_iv, ivsize);
+ memcpy(areq->iv, rctx->backup_iv, ivsize);
+ memzero_explicit(rctx->backup_iv, ivsize);
} else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
}
- memzero_explicit(chan->bounce_iv, ivsize);
+ memzero_explicit(rctx->bounce_iv, ivsize);
}
dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE);
}
-static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq)
-{
- struct skcipher_request *breq = container_of(areq, struct skcipher_request, base);
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq);
- struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
- struct sun8i_ce_dev *ce = op->ce;
- struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq);
- int flow, err;
-
- flow = rctx->flow;
- err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm));
- sun8i_ce_cipher_unprepare(engine, areq);
- local_bh_disable();
- crypto_finalize_skcipher_request(engine, breq, err);
- local_bh_enable();
-}
-
int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq)
{
- int err = sun8i_ce_cipher_prepare(engine, areq);
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(req);
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct sun8i_cipher_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+ err = sun8i_ce_cipher_prepare(req, chan->tl);
if (err)
return err;
- sun8i_ce_cipher_run(engine, areq);
+ err = sun8i_ce_run_task(ce, rctx->flow,
+ crypto_tfm_alg_name(req->base.tfm));
+
+ sun8i_ce_cipher_unprepare(req, chan->tl);
+
+ local_bh_disable();
+ crypto_finalize_skcipher_request(engine, req, err);
+ local_bh_enable();
+
return 0;
}
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 658f520cee0c..c16bb6ce6ee3 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -169,6 +169,12 @@ static const struct ce_variant ce_r40_variant = {
.trng = CE_ID_NOTSUPP,
};
+static void sun8i_ce_dump_task_descriptors(struct sun8i_ce_flow *chan)
+{
+ print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
+ chan->tl, sizeof(struct ce_task), false);
+}
+
/*
* sun8i_ce_get_engine_number() get the next channel slot
* This is a simple round-robin way of getting the next channel
@@ -183,7 +189,6 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
{
u32 v;
int err = 0;
- struct ce_task *cet = ce->chanlist[flow].tl;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
ce->chanlist[flow].stat_req++;
@@ -210,11 +215,10 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
mutex_unlock(&ce->mlock);
wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete,
- msecs_to_jiffies(ce->chanlist[flow].timeout));
+ msecs_to_jiffies(CE_DMA_TIMEOUT_MS));
if (ce->chanlist[flow].status == 0) {
- dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name,
- ce->chanlist[flow].timeout, flow);
+ dev_err(ce->dev, "DMA timeout for %s on flow %d\n", name, flow);
err = -EFAULT;
}
/* No need to lock for this read, the channel is locked so
@@ -226,9 +230,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
/* Sadly, the error bit is not per flow */
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -245,9 +248,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
v &= 0xF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -261,9 +263,8 @@ int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name)
v &= 0xFF;
if (v) {
dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow);
+ sun8i_ce_dump_task_descriptors(&ce->chanlist[flow]);
err = -EFAULT;
- print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4,
- cet, sizeof(struct ce_task), false);
}
if (v & CE_ERR_ALGO_NOTSUP)
dev_err(ce->dev, "CE ERROR: algorithm not supported\n");
@@ -758,18 +759,6 @@ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce)
err = -ENOMEM;
goto error_engine;
}
- ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
- GFP_KERNEL | GFP_DMA);
- if (!ce->chanlist[i].bounce_iv) {
- err = -ENOMEM;
- goto error_engine;
- }
- ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE,
- GFP_KERNEL);
- if (!ce->chanlist[i].backup_iv) {
- err = -ENOMEM;
- goto error_engine;
- }
}
return 0;
error_engine:
@@ -1063,7 +1052,7 @@ static int sun8i_ce_probe(struct platform_device *pdev)
pm_runtime_put_sync(ce->dev);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct dentry *dbgfs_dir __maybe_unused;
+ struct dentry *dbgfs_dir;
struct dentry *dbgfs_stats __maybe_unused;
/* Ignore error of debugfs */
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
index 13bdfb8a2c62..d01594353d9a 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
@@ -26,7 +26,7 @@
static void sun8i_ce_hash_stat_fb_inc(struct crypto_ahash *tfm)
{
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) {
- struct sun8i_ce_alg_template *algt __maybe_unused;
+ struct sun8i_ce_alg_template *algt;
struct ahash_alg *alg = crypto_ahash_alg(tfm);
algt = container_of(alg, struct sun8i_ce_alg_template,
@@ -58,7 +58,8 @@ int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm)
crypto_ahash_set_reqsize(tfm,
sizeof(struct sun8i_ce_hash_reqctx) +
- crypto_ahash_reqsize(op->fallback_tfm));
+ crypto_ahash_reqsize(op->fallback_tfm) +
+ CRYPTO_DMA_PADDING);
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
memcpy(algt->fbname,
@@ -84,7 +85,7 @@ void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm)
int sun8i_ce_hash_init(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -100,7 +101,7 @@ int sun8i_ce_hash_init(struct ahash_request *areq)
int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -114,7 +115,7 @@ int sun8i_ce_hash_export(struct ahash_request *areq, void *out)
int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -128,7 +129,7 @@ int sun8i_ce_hash_import(struct ahash_request *areq, const void *in)
int sun8i_ce_hash_final(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -145,7 +146,7 @@ int sun8i_ce_hash_final(struct ahash_request *areq)
int sun8i_ce_hash_update(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -160,7 +161,7 @@ int sun8i_ce_hash_update(struct ahash_request *areq)
int sun8i_ce_hash_finup(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -178,7 +179,7 @@ int sun8i_ce_hash_finup(struct ahash_request *areq)
static int sun8i_ce_hash_digest_fb(struct ahash_request *areq)
{
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
@@ -238,19 +239,15 @@ static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq)
int sun8i_ce_hash_digest(struct ahash_request *areq)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
- struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
- struct sun8i_ce_alg_template *algt;
- struct sun8i_ce_dev *ce;
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
struct crypto_engine *engine;
int e;
if (sun8i_ce_hash_need_fallback(areq))
return sun8i_ce_hash_digest_fb(areq);
- algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
- ce = algt->ce;
-
e = sun8i_ce_get_engine_number(ce);
rctx->flow = e;
engine = ce->chanlist[e].engine;
@@ -316,28 +313,22 @@ static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count,
return j;
}
-int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
+static int sun8i_ce_hash_prepare(struct ahash_request *areq, struct ce_task *cet)
{
- struct ahash_request *areq = container_of(breq, struct ahash_request, base);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
- struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
struct sun8i_ce_alg_template *algt;
struct sun8i_ce_dev *ce;
- struct sun8i_ce_flow *chan;
- struct ce_task *cet;
struct scatterlist *sg;
- int nr_sgs, flow, err;
+ int nr_sgs, err;
unsigned int len;
u32 common;
u64 byte_count;
__le32 *bf;
- void *buf, *result;
int j, i, todo;
u64 bs;
int digestsize;
- dma_addr_t addr_res, addr_pad;
- int ns = sg_nents_for_len(areq->src, areq->nbytes);
algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base);
ce = algt->ce;
@@ -349,32 +340,16 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
if (digestsize == SHA384_DIGEST_SIZE)
digestsize = SHA512_DIGEST_SIZE;
- /* the padding could be up to two block. */
- buf = kcalloc(2, bs, GFP_KERNEL | GFP_DMA);
- if (!buf) {
- err = -ENOMEM;
- goto err_out;
- }
- bf = (__le32 *)buf;
-
- result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
- if (!result) {
- err = -ENOMEM;
- goto err_free_buf;
- }
-
- flow = rctx->flow;
- chan = &ce->chanlist[flow];
+ bf = (__le32 *)rctx->pad;
if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG))
algt->stat_req++;
dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes);
- cet = chan->tl;
memset(cet, 0, sizeof(struct ce_task));
- cet->t_id = cpu_to_le32(flow);
+ cet->t_id = cpu_to_le32(rctx->flow);
common = ce->variant->alg_hash[algt->ce_algo_id];
common |= CE_COMM_INT;
cet->t_common_ctl = cpu_to_le32(common);
@@ -382,11 +357,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
cet->t_sym_ctl = 0;
cet->t_asym_ctl = 0;
- nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ rctx->nr_sgs = sg_nents_for_len(areq->src, areq->nbytes);
+ nr_sgs = dma_map_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs);
err = -EINVAL;
- goto err_free_result;
+ goto err_out;
}
len = areq->nbytes;
@@ -401,10 +377,13 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
err = -EINVAL;
goto err_unmap_src;
}
- addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE);
- cet->t_dst[0].addr = desc_addr_val_le32(ce, addr_res);
- cet->t_dst[0].len = cpu_to_le32(digestsize / 4);
- if (dma_mapping_error(ce->dev, addr_res)) {
+
+ rctx->result_len = digestsize;
+ rctx->addr_res = dma_map_single(ce->dev, rctx->result, rctx->result_len,
+ DMA_FROM_DEVICE);
+ cet->t_dst[0].addr = desc_addr_val_le32(ce, rctx->addr_res);
+ cet->t_dst[0].len = cpu_to_le32(rctx->result_len / 4);
+ if (dma_mapping_error(ce->dev, rctx->addr_res)) {
dev_err(ce->dev, "DMA map dest\n");
err = -EINVAL;
goto err_unmap_src;
@@ -432,10 +411,12 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
goto err_unmap_result;
}
- addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE);
- cet->t_src[i].addr = desc_addr_val_le32(ce, addr_pad);
+ rctx->pad_len = j * 4;
+ rctx->addr_pad = dma_map_single(ce->dev, rctx->pad, rctx->pad_len,
+ DMA_TO_DEVICE);
+ cet->t_src[i].addr = desc_addr_val_le32(ce, rctx->addr_pad);
cet->t_src[i].len = cpu_to_le32(j);
- if (dma_mapping_error(ce->dev, addr_pad)) {
+ if (dma_mapping_error(ce->dev, rctx->addr_pad)) {
dev_err(ce->dev, "DMA error on padding SG\n");
err = -EINVAL;
goto err_unmap_result;
@@ -446,29 +427,59 @@ int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq)
else
cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j);
- chan->timeout = areq->nbytes;
-
- err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm));
-
- dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE);
+ return 0;
err_unmap_result:
- dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE);
- if (!err)
- memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
err_unmap_src:
- dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
-err_free_result:
- kfree(result);
+err_out:
+ return err;
+}
-err_free_buf:
- kfree(buf);
+static void sun8i_ce_hash_unprepare(struct ahash_request *areq,
+ struct ce_task *cet)
+{
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_dev *ce = ctx->ce;
+
+ dma_unmap_single(ce->dev, rctx->addr_pad, rctx->pad_len, DMA_TO_DEVICE);
+ dma_unmap_single(ce->dev, rctx->addr_res, rctx->result_len,
+ DMA_FROM_DEVICE);
+ dma_unmap_sg(ce->dev, areq->src, rctx->nr_sgs, DMA_TO_DEVICE);
+}
+
+int sun8i_ce_hash_run(struct crypto_engine *engine, void *async_req)
+{
+ struct ahash_request *areq = ahash_request_cast(async_req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun8i_ce_hash_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx_dma(areq);
+ struct sun8i_ce_dev *ce = ctx->ce;
+ struct sun8i_ce_flow *chan;
+ int err;
+
+ chan = &ce->chanlist[rctx->flow];
+
+ err = sun8i_ce_hash_prepare(areq, chan->tl);
+ if (err)
+ return err;
+
+ err = sun8i_ce_run_task(ce, rctx->flow, crypto_ahash_alg_name(tfm));
+
+ sun8i_ce_hash_unprepare(areq, chan->tl);
+
+ if (!err)
+ memcpy(areq->result, rctx->result,
+ crypto_ahash_digestsize(tfm));
-err_out:
local_bh_disable();
- crypto_finalize_hash_request(engine, breq, err);
+ crypto_finalize_hash_request(engine, async_req, err);
local_bh_enable();
return 0;
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
index 762459867b6c..d0a1ac66738b 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
@@ -137,7 +137,6 @@ int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src,
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
- ce->chanlist[flow].timeout = 2000;
err = sun8i_ce_run_task(ce, 3, "PRNG");
mutex_unlock(&ce->rnglock);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
index e1e8bc15202e..244529bf0616 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
@@ -79,7 +79,6 @@ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wa
cet->t_dst[0].addr = desc_addr_val_le32(ce, dma_dst);
cet->t_dst[0].len = cpu_to_le32(todo / 4);
- ce->chanlist[flow].timeout = todo;
err = sun8i_ce_run_task(ce, 3, "TRNG");
mutex_unlock(&ce->rnglock);
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 0f9a89067016..71f5a0cd3d45 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -106,9 +106,13 @@
#define MAX_SG 8
#define CE_MAX_CLOCKS 4
+#define CE_DMA_TIMEOUT_MS 3000
#define MAXFLOW 4
+#define CE_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
+#define CE_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
/*
* struct ce_clock - Describe clocks used by sun8i-ce
* @name: Name of clock needed by this variant
@@ -187,8 +191,6 @@ struct ce_task {
* @status: set to 1 by interrupt if task is done
* @t_phy: Physical address of task
* @tl: pointer to the current ce_task for this flow
- * @backup_iv: buffer which contain the next IV to store
- * @bounce_iv: buffer which contain the IV
* @stat_req: number of request done by this flow
*/
struct sun8i_ce_flow {
@@ -196,10 +198,7 @@ struct sun8i_ce_flow {
struct completion complete;
int status;
dma_addr_t t_phy;
- int timeout;
struct ce_task *tl;
- void *backup_iv;
- void *bounce_iv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG
unsigned long stat_req;
#endif
@@ -264,6 +263,8 @@ static inline __le32 desc_addr_val_le32(struct sun8i_ce_dev *dev,
* @nr_sgd: The number of destination SG (as given by dma_map_sg())
* @addr_iv: The IV addr returned by dma_map_single, need to unmap later
* @addr_key: The key addr returned by dma_map_single, need to unmap later
+ * @bounce_iv: Current IV buffer
+ * @backup_iv: Next IV buffer
* @fallback_req: request struct for invoking the fallback skcipher TFM
*/
struct sun8i_cipher_req_ctx {
@@ -273,6 +274,8 @@ struct sun8i_cipher_req_ctx {
int nr_sgd;
dma_addr_t addr_iv;
dma_addr_t addr_key;
+ u8 bounce_iv[AES_BLOCK_SIZE] __aligned(sizeof(u32));
+ u8 backup_iv[AES_BLOCK_SIZE];
struct skcipher_request fallback_req; // keep at the end
};
@@ -304,9 +307,23 @@ struct sun8i_ce_hash_tfm_ctx {
* struct sun8i_ce_hash_reqctx - context for an ahash request
* @fallback_req: pre-allocated fallback request
* @flow: the flow to use for this request
+ * @nr_sgs: number of entries in the source scatterlist
+ * @result_len: result length in bytes
+ * @pad_len: padding length in bytes
+ * @addr_res: DMA address of the result buffer, returned by dma_map_single()
+ * @addr_pad: DMA address of the padding buffer, returned by dma_map_single()
+ * @result: per-request result buffer
+ * @pad: per-request padding buffer (up to 2 blocks)
*/
struct sun8i_ce_hash_reqctx {
int flow;
+ int nr_sgs;
+ size_t result_len;
+ size_t pad_len;
+ dma_addr_t addr_res;
+ dma_addr_t addr_pad;
+ u8 result[CE_MAX_HASH_DIGEST_SIZE] __aligned(CRYPTO_DMA_ALIGN);
+ u8 pad[2 * CE_MAX_HASH_BLOCK_SIZE];
struct ahash_request fallback_req; // keep at the end
};
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 8bc08089f044..36a1ebca2e70 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -502,6 +502,7 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
ss = algt->ss;
+ j = 0;
digestsize = crypto_ahash_digestsize(tfm);
if (digestsize == SHA224_DIGEST_SIZE)
@@ -536,7 +537,6 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
goto err_dma_result;
}
- j = 0;
len = areq->nbytes;
sg = areq->src;
i = 0;
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
index 8d1c79aaca07..5993bcba9716 100644
--- a/drivers/crypto/aspeed/aspeed-acry.c
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
err_engine_rsa_start:
crypto_engine_exit(acry_dev->crypt_engine_rsa);
clk_exit:
- clk_disable_unprepare(acry_dev->clk);
return rc;
}
@@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev)
aspeed_acry_unregister(acry_dev);
crypto_engine_exit(acry_dev->crypt_engine_rsa);
tasklet_kill(&acry_dev->done_task);
- clk_disable_unprepare(acry_dev->clk);
}
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
index a72dfebc53ff..fa201dae1f81 100644
--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
+++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
@@ -346,7 +346,7 @@ free_req:
} else {
dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents,
- DMA_TO_DEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents,
DMA_TO_DEVICE);
}
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index a895e4289efa..9688d116d07e 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(atmel_i2c_probe);
static int __init atmel_i2c_init(void)
{
- atmel_wq = alloc_workqueue("atmel_wq", 0, 0);
+ atmel_wq = alloc_workqueue("atmel_wq", WQ_PERCPU, 0);
return atmel_wq ? 0 : -ENOMEM;
}
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 098f5532f389..3b2a92029b16 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -512,7 +512,7 @@ static int atmel_tdes_crypt_start(struct atmel_tdes_dev *dd)
if (err && (dd->flags & TDES_FLAGS_FAST)) {
dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
- dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+ dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE);
}
return err;
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 75ee065da1ec..b04d6379244a 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -252,7 +252,7 @@ struct artpec6_crypto_dma_descriptors {
};
enum artpec6_crypto_variant {
- ARTPEC6_CRYPTO,
+ ARTPEC6_CRYPTO = 1,
ARTPEC7_CRYPTO,
};
@@ -2842,7 +2842,6 @@ MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
static int artpec6_crypto_probe(struct platform_device *pdev)
{
- const struct of_device_id *match;
enum artpec6_crypto_variant variant;
struct artpec6_crypto *ac;
struct device *dev = &pdev->dev;
@@ -2853,12 +2852,10 @@ static int artpec6_crypto_probe(struct platform_device *pdev)
if (artpec6_crypto_dev)
return -ENODEV;
- match = of_match_node(artpec6_crypto_of_match, dev->of_node);
- if (!match)
+ variant = (enum artpec6_crypto_variant)of_device_get_match_data(dev);
+ if (!variant)
return -EINVAL;
- variant = (enum artpec6_crypto_variant)match->data;
-
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index 079a22cc9f02..c18dbac56493 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -2,13 +2,14 @@
/*
* Copyright (C) 2015 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
* Copyright (C) 2021 Pengutronix, Ahmad Fatoum <kernel@pengutronix.de>
- * Copyright 2024 NXP
+ * Copyright 2024-2025 NXP
*/
#define pr_fmt(fmt) "caam blob_gen: " fmt
#include <linux/bitfield.h>
#include <linux/device.h>
+#include <keys/trusted-type.h>
#include <soc/fsl/caam-blob.h>
#include "compat.h"
@@ -60,18 +61,27 @@ static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *con
complete(&res->completion);
}
+static u32 check_caam_state(struct device *jrdev)
+{
+ const struct caam_drv_private *ctrlpriv;
+
+ ctrlpriv = dev_get_drvdata(jrdev->parent);
+ return FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+}
+
int caam_process_blob(struct caam_blob_priv *priv,
struct caam_blob_info *info, bool encap)
{
- const struct caam_drv_private *ctrlpriv;
struct caam_blob_job_result testres;
struct device *jrdev = &priv->jrdev;
dma_addr_t dma_in, dma_out;
int op = OP_PCLID_BLOB;
+ int hwbk_caam_ovhd = 0;
size_t output_len;
u32 *desc;
u32 moo;
int ret;
+ int len;
if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH)
return -EINVAL;
@@ -82,14 +92,29 @@ int caam_process_blob(struct caam_blob_priv *priv,
} else {
op |= OP_TYPE_DECAP_PROTOCOL;
output_len = info->input_len - CAAM_BLOB_OVERHEAD;
+ info->output_len = output_len;
+ }
+
+ if (encap && info->pkey_info.is_pkey) {
+ op |= OP_PCL_BLOB_BLACK;
+ if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
+ op |= OP_PCL_BLOB_EKT;
+ hwbk_caam_ovhd = CAAM_CCM_OVERHEAD;
+ }
+ if ((info->input_len + hwbk_caam_ovhd) > MAX_KEY_SIZE)
+ return -EINVAL;
+
+ len = info->input_len + hwbk_caam_ovhd;
+ } else {
+ len = info->input_len;
}
desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL);
if (!desc)
return -ENOMEM;
- dma_in = dma_map_single(jrdev, info->input, info->input_len,
- DMA_TO_DEVICE);
+ dma_in = dma_map_single(jrdev, info->input, len,
+ encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, dma_in)) {
dev_err(jrdev, "unable to map input DMA buffer\n");
ret = -ENOMEM;
@@ -104,8 +129,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
goto out_unmap_in;
}
- ctrlpriv = dev_get_drvdata(jrdev->parent);
- moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->jr[0]->perfmon.status));
+ moo = check_caam_state(jrdev);
if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
dev_warn(jrdev,
"using insecure test key, enable HAB to use unique device key!\n");
@@ -117,18 +141,48 @@ int caam_process_blob(struct caam_blob_priv *priv,
* Class 1 Context DWords 0+1+2+3. The random BK is stored in the
* Class 1 Key Register. Operation Mode is set to AES-CCM.
*/
-
init_job_desc(desc, 0);
+
+ if (encap && info->pkey_info.is_pkey) {
+ /*!1. key command used to load class 1 key register
+ * from input plain key.
+ */
+ append_key(desc, dma_in, info->input_len,
+ CLASS_1 | KEY_DEST_CLASS_REG);
+ /*!2. Fifostore to store protected key from class 1 key register. */
+ if (info->pkey_info.key_enc_algo == CAAM_ENC_ALGO_CCM) {
+ append_fifo_store(desc, dma_in, info->input_len,
+ LDST_CLASS_1_CCB |
+ FIFOST_TYPE_KEY_CCM_JKEK);
+ } else {
+ append_fifo_store(desc, dma_in, info->input_len,
+ LDST_CLASS_1_CCB |
+ FIFOST_TYPE_KEY_KEK);
+ }
+ /*
+ * JUMP_OFFSET specifies the offset of the JUMP target from
+ * the JUMP command's address in the descriptor buffer.
+ */
+ append_jump(desc, JUMP_COND_NOP | BIT(0) << JUMP_OFFSET_SHIFT);
+ }
+
+ /*!3. Load class 2 key with key modifier. */
append_key_as_imm(desc, info->key_mod, info->key_mod_len,
- info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG);
- append_seq_in_ptr_intlen(desc, dma_in, info->input_len, 0);
- append_seq_out_ptr_intlen(desc, dma_out, output_len, 0);
+ info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG);
+
+ /*!4. SEQ IN PTR Command. */
+ append_seq_in_ptr(desc, dma_in, info->input_len, 0);
+
+ /*!5. SEQ OUT PTR Command. */
+ append_seq_out_ptr(desc, dma_out, output_len, 0);
+
+ /*!6. Blob encapsulation/decapsulation PROTOCOL Command. */
append_operation(desc, op);
- print_hex_dump_debug("data@"__stringify(__LINE__)": ",
+ print_hex_dump_debug("data@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, info->input,
- info->input_len, false);
- print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+ len, false);
+ print_hex_dump_debug("jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, desc,
desc_bytes(desc), false);
@@ -139,7 +193,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
if (ret == -EINPROGRESS) {
wait_for_completion(&testres.completion);
ret = testres.err;
- print_hex_dump_debug("output@"__stringify(__LINE__)": ",
+ print_hex_dump_debug("output@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 1, info->output,
output_len, false);
}
@@ -149,10 +203,10 @@ int caam_process_blob(struct caam_blob_priv *priv,
dma_unmap_single(jrdev, dma_out, output_len, DMA_FROM_DEVICE);
out_unmap_in:
- dma_unmap_single(jrdev, dma_in, info->input_len, DMA_TO_DEVICE);
+ dma_unmap_single(jrdev, dma_in, len,
+ encap ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
out_free:
kfree(desc);
-
return ret;
}
EXPORT_SYMBOL(caam_process_blob);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 2cfb1b8d8c7c..32a6e6e15ee2 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -3,7 +3,7 @@
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016-2019, 2023 NXP
+ * Copyright 2016-2019, 2023, 2025 NXP
*
* Based on talitos crypto API driver.
*
@@ -61,13 +61,16 @@
#include <crypto/internal/engine.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
+#include <keys/trusted-type.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/key-type.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <soc/fsl/caam-blob.h>
/*
* crypto alg
@@ -119,12 +122,15 @@ struct caam_ctx {
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
dma_addr_t key_dma;
+ u8 protected_key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t protected_key_dma;
enum dma_data_direction dir;
struct device *jrdev;
struct alginfo adata;
struct alginfo cdata;
unsigned int authsize;
bool xts_key_fallback;
+ bool is_blob;
struct crypto_skcipher *fallback;
};
@@ -751,9 +757,14 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
print_hex_dump_debug("key in @"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+ /* Here keylen is actual key length */
ctx->cdata.keylen = keylen;
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
+ /* Here protected key len is plain key length */
+ ctx->cdata.plain_keylen = keylen;
+ ctx->cdata.key_cmd_opt = 0;
+
/* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
@@ -772,6 +783,62 @@ static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
return 0;
}
+static int paes_skcipher_setkey(struct crypto_skcipher *skcipher,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_pkey_info *pkey_info = (struct caam_pkey_info *)key;
+ struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
+ struct device *jrdev = ctx->jrdev;
+ int err;
+
+ ctx->cdata.key_inline = false;
+
+ keylen = keylen - CAAM_PKEY_HEADER;
+
+ /* Retrieve the length of key */
+ ctx->cdata.plain_keylen = pkey_info->plain_key_sz;
+
+ /* Retrieve the length of blob*/
+ ctx->cdata.keylen = keylen;
+
+ /* Retrieve the address of the blob */
+ ctx->cdata.key_virt = pkey_info->key_buf;
+
+ /* Validate key length for AES algorithms */
+ err = aes_check_keylen(ctx->cdata.plain_keylen);
+ if (err) {
+ dev_err(jrdev, "bad key length\n");
+ return err;
+ }
+
+ /* set command option */
+ ctx->cdata.key_cmd_opt |= KEY_ENC;
+
+ /* check if the Protected-Key is CCM key */
+ if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
+ ctx->cdata.key_cmd_opt |= KEY_EKT;
+
+ memcpy(ctx->key, ctx->cdata.key_virt, keylen);
+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
+ ctx->cdata.key_dma = ctx->key_dma;
+
+ if (pkey_info->key_enc_algo == CAAM_ENC_ALGO_CCM)
+ ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
+ ctx->cdata.plain_keylen +
+ CAAM_CCM_OVERHEAD,
+ DMA_FROM_DEVICE);
+ else
+ ctx->protected_key_dma = dma_map_single(jrdev, ctx->protected_key,
+ ctx->cdata.plain_keylen,
+ DMA_FROM_DEVICE);
+
+ ctx->cdata.protected_key_dma = ctx->protected_key_dma;
+ ctx->is_blob = true;
+
+ return 0;
+}
+
static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
const u8 *key, unsigned int keylen)
{
@@ -1254,7 +1321,9 @@ static void init_skcipher_job(struct skcipher_request *req,
struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
struct device *jrdev = ctx->jrdev;
int ivsize = crypto_skcipher_ivsize(skcipher);
- u32 *desc = edesc->hw_desc;
+ u32 *desc = !ctx->is_blob ? edesc->hw_desc :
+ (u32 *)((u8 *)edesc->hw_desc + CAAM_DESC_BYTES_MAX);
+ dma_addr_t desc_dma;
u32 *sh_desc;
u32 in_options = 0, out_options = 0;
dma_addr_t src_dma, dst_dma, ptr;
@@ -1269,11 +1338,6 @@ static void init_skcipher_job(struct skcipher_request *req,
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
- sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
- ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
if (ivsize || edesc->mapped_src_nents > 1) {
src_dma = edesc->sec4_sg_dma;
@@ -1283,8 +1347,6 @@ static void init_skcipher_job(struct skcipher_request *req,
src_dma = sg_dma_address(req->src);
}
- append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
-
if (likely(req->src == req->dst)) {
dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry);
out_options = in_options;
@@ -1296,7 +1358,25 @@ static void init_skcipher_job(struct skcipher_request *req,
out_options = LDST_SGF;
}
- append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
+ if (ctx->is_blob) {
+ cnstr_desc_skcipher_enc_dec(desc, &ctx->cdata,
+ src_dma, dst_dma, req->cryptlen + ivsize,
+ in_options, out_options,
+ ivsize, encrypt);
+
+ desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), DMA_TO_DEVICE);
+
+ cnstr_desc_protected_blob_decap(edesc->hw_desc, &ctx->cdata, desc_dma);
+ } else {
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
+
+ len = desc_len(sh_desc);
+ init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+ append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options);
+
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options);
+ }
}
/*
@@ -1817,6 +1897,7 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
u32 *desc;
int ret = 0;
+ int len;
/*
* XTS is expected to return an error even for input length = 0
@@ -1842,8 +1923,12 @@ static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
crypto_skcipher_decrypt(&rctx->fallback_req);
}
+ len = DESC_JOB_IO_LEN * CAAM_CMD_SZ;
+ if (ctx->is_blob)
+ len += CAAM_DESC_BYTES_MAX;
+
/* allocate extended descriptor */
- edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, len);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
@@ -1888,6 +1973,27 @@ static struct caam_skcipher_alg driver_algs[] = {
{
.skcipher.base = {
.base = {
+ .cra_name = "cbc(paes)",
+ .cra_driver_name = "cbc-paes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = paes_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE + CAAM_BLOB_OVERHEAD +
+ CAAM_PKEY_HEADER,
+ .max_keysize = AES_MAX_KEY_SIZE + CAAM_BLOB_OVERHEAD +
+ CAAM_PKEY_HEADER,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .skcipher.op = {
+ .do_one_request = skcipher_do_one_req,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher.base = {
+ .base = {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-caam",
.cra_blocksize = AES_BLOCK_SIZE,
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index 7571e1ac913b..04c1105eb1f5 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -2,12 +2,13 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016-2019 NXP
+ * Copyright 2016-2019, 2025 NXP
*/
#include "compat.h"
#include "desc_constr.h"
#include "caamalg_desc.h"
+#include <soc/fsl/caam-blob.h>
/*
* For aead functions, read payload and write payload,
@@ -1364,6 +1365,84 @@ static inline void skcipher_append_src_dst(u32 *desc)
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
+void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
+ unsigned int in_options, unsigned int out_options,
+ unsigned int ivsize, const bool encrypt)
+{
+ u32 options = cdata->algtype | OP_ALG_AS_INIT;
+
+ if (encrypt)
+ options |= OP_ALG_ENCRYPT;
+ else
+ options |= OP_ALG_DECRYPT;
+
+ init_job_desc(desc, 0);
+
+ append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL |
+ JUMP_COND_NOP | JUMP_TEST_ALL | 1);
+
+ append_key(desc, cdata->protected_key_dma, cdata->plain_keylen,
+ CLASS_1 | KEY_DEST_CLASS_REG | cdata->key_cmd_opt);
+
+ append_seq_in_ptr(desc, src, data_sz, in_options);
+
+ append_seq_out_ptr(desc, dst, data_sz, out_options);
+
+ /* Load IV, if there is one */
+ if (ivsize)
+ append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB);
+
+ append_operation(desc, options);
+
+ skcipher_append_src_dst(desc);
+
+ /* Store IV */
+ if (ivsize)
+ append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
+ LDST_CLASS_1_CCB);
+
+ print_hex_dump_debug("skcipher_enc_dec job desc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+}
+EXPORT_SYMBOL(cnstr_desc_skcipher_enc_dec);
+
+void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t next_desc_addr)
+{
+ u32 protected_store;
+
+ init_job_desc(desc, 0);
+
+ /* Load key modifier */
+ append_load_as_imm(desc, KEYMOD, sizeof(KEYMOD) - 1,
+ LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_KEY);
+
+ append_seq_in_ptr_intlen(desc, cdata->key_dma,
+ cdata->plain_keylen + CAAM_BLOB_OVERHEAD, 0);
+
+ append_seq_out_ptr_intlen(desc, cdata->protected_key_dma,
+ cdata->plain_keylen, 0);
+
+ protected_store = OP_PCLID_BLOB | OP_PCL_BLOB_BLACK;
+ if ((cdata->key_cmd_opt >> KEY_EKT_OFFSET) & 1)
+ protected_store |= OP_PCL_BLOB_EKT;
+
+ append_operation(desc, OP_TYPE_DECAP_PROTOCOL | protected_store);
+
+ if (next_desc_addr) {
+ append_jump(desc, JUMP_TYPE_NONLOCAL | JUMP_TEST_ALL);
+ append_ptr(desc, next_desc_addr);
+ }
+
+ print_hex_dump_debug("protected blob decap job desc@" __stringify(__LINE__) ":",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+}
+EXPORT_SYMBOL(cnstr_desc_protected_blob_decap);
+
/**
* cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
@@ -1391,7 +1470,8 @@ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
+ | cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
@@ -1466,7 +1546,8 @@ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
/* Load class1 key only */
append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
+ cdata->plain_keylen, CLASS_1 | KEY_DEST_CLASS_REG
+ | cdata->key_cmd_opt);
/* Load nonce into CONTEXT1 reg */
if (is_rfc3686) {
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index f2893393ba5e..323490a4a756 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -2,7 +2,7 @@
/*
* Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016, 2025 NXP
*/
#ifndef _CAAMALG_DESC_H_
@@ -48,6 +48,9 @@
#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
16 * CAAM_CMD_SZ)
+/* Key modifier for CAAM Protected blobs */
+#define KEYMOD "SECURE_KEY"
+
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
unsigned int icvsize, int era);
@@ -113,4 +116,12 @@ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_desc_protected_blob_decap(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t next_desc);
+
+void cnstr_desc_skcipher_enc_dec(u32 * const desc, struct alginfo *cdata,
+ dma_addr_t src, dma_addr_t dst, unsigned int data_sz,
+ unsigned int in_options, unsigned int out_options,
+ unsigned int ivsize, const bool encrypt);
+
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index b3d14a7f4dd1..0eb43c862516 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -181,7 +181,9 @@ static inline void test_len(struct hwrng *rng, size_t len, bool wait)
struct device *dev = ctx->ctrldev;
buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL);
-
+ if (!buf) {
+ return;
+ }
while (len > 0) {
read_len = rng->read(rng, buf, len, wait);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index a93be395c878..320be5d77737 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -592,9 +592,9 @@ static int init_clocks(struct device *dev, const struct caam_imx_data *data)
int ret;
ctrlpriv->num_clks = data->num_clks;
- ctrlpriv->clks = devm_kmemdup(dev, data->clks,
- data->num_clks * sizeof(data->clks[0]),
- GFP_KERNEL);
+ ctrlpriv->clks = devm_kmemdup_array(dev, data->clks,
+ data->num_clks, sizeof(*data->clks),
+ GFP_KERNEL);
if (!ctrlpriv->clks)
return -ENOMEM;
@@ -703,12 +703,12 @@ static int caam_ctrl_rng_init(struct device *dev)
*/
if (needs_entropy_delay_adjustment())
ent_delay = 12000;
- if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
+ if (!inst_handles) {
dev_info(dev,
"Entropy delay = %u\n",
ent_delay);
kick_trng(dev, ent_delay);
- ent_delay += 400;
+ ent_delay = ent_delay * 2;
}
/*
* if instantiate_rng(...) fails, the loop will rerun
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index e13470901586..c28e94fcb8c7 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -4,7 +4,7 @@
* Definitions to support CAAM descriptor instruction generation
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2018 NXP
+ * Copyright 2018, 2025 NXP
*/
#ifndef DESC_H
@@ -162,6 +162,7 @@
* Enhanced Encryption of Key
*/
#define KEY_EKT 0x00100000
+#define KEY_EKT_OFFSET 20
/*
* Encrypted with Trusted Key
@@ -403,6 +404,7 @@
#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_CCM_JKEK (0x14 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_JKEK (0x20 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
@@ -1001,6 +1003,11 @@
#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+/* Blob protocol protinfo bits */
+
+#define OP_PCL_BLOB_BLACK 0x0004
+#define OP_PCL_BLOB_EKT 0x0100
+
/* For DTLS - OP_PCLID_DTLS */
#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 824c94d44f94..2a29dd2c9c8a 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -3,7 +3,7 @@
* caam descriptor construction helper functions
*
* Copyright 2008-2012 Freescale Semiconductor, Inc.
- * Copyright 2019 NXP
+ * Copyright 2019, 2025 NXP
*/
#ifndef DESC_CONSTR_H
@@ -498,17 +498,23 @@ do { \
* @keylen: length of the provided algorithm key, in bytes
* @keylen_pad: padded length of the provided algorithm key, in bytes
* @key_dma: dma (bus) address where algorithm key resides
+ * @protected_key_dma: dma (bus) address where protected key resides
* @key_virt: virtual address where algorithm key resides
* @key_inline: true - key can be inlined in the descriptor; false - key is
* referenced by the descriptor
+ * @plain_keylen: size of the key to be loaded by the CAAM
+ * @key_cmd_opt: optional parameters for KEY command
*/
struct alginfo {
u32 algtype;
unsigned int keylen;
unsigned int keylen_pad;
dma_addr_t key_dma;
+ dma_addr_t protected_key_dma;
const void *key_virt;
bool key_inline;
+ u32 plain_keylen;
+ u32 key_cmd_opt;
};
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_mbx.c b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
index d4e06999af9b..a6a76e50ba84 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_mbx.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_mbx.c
@@ -192,7 +192,7 @@ int nitrox_mbox_init(struct nitrox_device *ndev)
}
/* allocate pf2vf response workqueue */
- ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0);
+ ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", WQ_PERCPU, 0);
if (!ndev->iov.pf2vf_wq) {
kfree(ndev->iov.vfdev);
ndev->iov.vfdev = NULL;
diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
index f394e45e11ab..f16a0f611317 100644
--- a/drivers/crypto/ccp/Kconfig
+++ b/drivers/crypto/ccp/Kconfig
@@ -39,6 +39,7 @@ config CRYPTO_DEV_SP_PSP
bool "Platform Security Processor (PSP) device"
default y
depends on CRYPTO_DEV_CCP_DD && X86_64 && AMD_IOMMU
+ select PCI_TSM if PCI
help
Provide support for the AMD Platform Security Processor (PSP).
The PSP is a dedicated processor that provides support for key
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
index 394484929dae..0424e08561ef 100644
--- a/drivers/crypto/ccp/Makefile
+++ b/drivers/crypto/ccp/Makefile
@@ -13,7 +13,12 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \
tee-dev.o \
platform-access.o \
dbc.o \
- hsti.o
+ hsti.o \
+ sfs.o
+
+ifeq ($(CONFIG_PCI_TSM),y)
+ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += sev-dev-tsm.o sev-dev-tio.o
+endif
obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
ccp-crypto-objs := ccp-crypto-main.o \
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c531d13d971f..246801912e1a 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -507,7 +507,7 @@ int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
u32 trng_value;
- int len = min_t(int, sizeof(trng_value), max);
+ int len = min(sizeof(trng_value), max);
/* Locking is provided by the caller so we can update device
* hwrng-related fields safely
diff --git a/drivers/crypto/ccp/hsti.c b/drivers/crypto/ccp/hsti.c
index 1b39a4fb55c0..c29c6a9c0f3f 100644
--- a/drivers/crypto/ccp/hsti.c
+++ b/drivers/crypto/ccp/hsti.c
@@ -74,7 +74,7 @@ struct attribute_group psp_security_attr_group = {
.is_visible = psp_security_is_visible,
};
-static int psp_poulate_hsti(struct psp_device *psp)
+static int psp_populate_hsti(struct psp_device *psp)
{
struct hsti_request *req;
int ret;
@@ -84,11 +84,11 @@ static int psp_poulate_hsti(struct psp_device *psp)
return 0;
/* Allocate command-response buffer */
- req = kzalloc(sizeof(*req), GFP_KERNEL | __GFP_ZERO);
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->header.payload_size = sizeof(req);
+ req->header.payload_size = sizeof(*req);
ret = psp_send_platform_access_msg(PSP_CMD_HSTI_QUERY, (struct psp_request *)req);
if (ret)
@@ -114,7 +114,7 @@ int psp_init_hsti(struct psp_device *psp)
int ret;
if (PSP_FEATURE(psp, HSTI)) {
- ret = psp_poulate_hsti(psp);
+ ret = psp_populate_hsti(psp);
if (ret)
return ret;
}
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 1c5a7189631e..9e21da0e298a 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -17,6 +17,7 @@
#include "psp-dev.h"
#include "sev-dev.h"
#include "tee-dev.h"
+#include "sfs.h"
#include "platform-access.h"
#include "dbc.h"
#include "hsti.h"
@@ -182,6 +183,17 @@ static int psp_check_tee_support(struct psp_device *psp)
return 0;
}
+static int psp_check_sfs_support(struct psp_device *psp)
+{
+ /* Check if device supports SFS feature */
+ if (!psp->capability.sfs) {
+ dev_dbg(psp->dev, "psp does not support SFS\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int psp_init(struct psp_device *psp)
{
int ret;
@@ -198,6 +210,12 @@ static int psp_init(struct psp_device *psp)
return ret;
}
+ if (!psp_check_sfs_support(psp)) {
+ ret = sfs_dev_init(psp);
+ if (ret)
+ return ret;
+ }
+
if (psp->vdata->platform_access) {
ret = platform_access_dev_init(psp);
if (ret)
@@ -302,6 +320,8 @@ void psp_dev_destroy(struct sp_device *sp)
tee_dev_destroy(psp);
+ sfs_dev_destroy(psp);
+
dbc_dev_destroy(psp);
platform_access_dev_destroy(psp);
diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
index e43ce87ede76..268c83f298cb 100644
--- a/drivers/crypto/ccp/psp-dev.h
+++ b/drivers/crypto/ccp/psp-dev.h
@@ -32,7 +32,8 @@ union psp_cap_register {
unsigned int sev :1,
tee :1,
dbc_thru_ext :1,
- rsvd1 :4,
+ sfs :1,
+ rsvd1 :3,
security_reporting :1,
fused_part :1,
rsvd2 :1,
@@ -68,6 +69,7 @@ struct psp_device {
void *tee_data;
void *platform_access_data;
void *dbc_data;
+ void *sfs_data;
union psp_cap_register capability;
};
@@ -118,12 +120,16 @@ struct psp_ext_request {
* @PSP_SUB_CMD_DBC_SET_UID: Set UID for DBC
* @PSP_SUB_CMD_DBC_GET_PARAMETER: Get parameter from DBC
* @PSP_SUB_CMD_DBC_SET_PARAMETER: Set parameter for DBC
+ * @PSP_SUB_CMD_SFS_GET_FW_VERS: Get firmware versions for ASP and other MP
+ * @PSP_SUB_CMD_SFS_UPDATE: Command to load, verify and execute SFS package
*/
enum psp_sub_cmd {
PSP_SUB_CMD_DBC_GET_NONCE = PSP_DYNAMIC_BOOST_GET_NONCE,
PSP_SUB_CMD_DBC_SET_UID = PSP_DYNAMIC_BOOST_SET_UID,
PSP_SUB_CMD_DBC_GET_PARAMETER = PSP_DYNAMIC_BOOST_GET_PARAMETER,
PSP_SUB_CMD_DBC_SET_PARAMETER = PSP_DYNAMIC_BOOST_SET_PARAMETER,
+ PSP_SUB_CMD_SFS_GET_FW_VERS = PSP_SFS_GET_FW_VERSIONS,
+ PSP_SUB_CMD_SFS_UPDATE = PSP_SFS_UPDATE,
};
int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs,
diff --git a/drivers/crypto/ccp/sev-dev-tio.c b/drivers/crypto/ccp/sev-dev-tio.c
new file mode 100644
index 000000000000..9a98f98c20a7
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.c
@@ -0,0 +1,864 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to PSP for CCP/SEV-TIO/SNP-VM
+
+#include <linux/pci.h>
+#include <linux/tsm.h>
+#include <linux/psp.h>
+#include <linux/vmalloc.h>
+#include <linux/bitfield.h>
+#include <linux/pci-doe.h>
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+#include <asm/page.h>
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+#define to_tio_status(dev_data) \
+ (container_of((dev_data), struct tio_dsm, data)->sev->tio_status)
+
+#define SLA_PAGE_TYPE_DATA 0
+#define SLA_PAGE_TYPE_SCATTER 1
+#define SLA_PAGE_SIZE_4K 0
+#define SLA_PAGE_SIZE_2M 1
+#define SLA_SZ(s) ((s).page_size == SLA_PAGE_SIZE_2M ? SZ_2M : SZ_4K)
+#define SLA_SCATTER_LEN(s) (SLA_SZ(s) / sizeof(struct sla_addr_t))
+#define SLA_EOL ((struct sla_addr_t) { .pfn = ((1UL << 40) - 1) })
+#define SLA_NULL ((struct sla_addr_t) { 0 })
+#define IS_SLA_NULL(s) ((s).sla == SLA_NULL.sla)
+#define IS_SLA_EOL(s) ((s).sla == SLA_EOL.sla)
+
+static phys_addr_t sla_to_pa(struct sla_addr_t sla)
+{
+ u64 pfn = sla.pfn;
+ u64 pa = pfn << PAGE_SHIFT;
+
+ return pa;
+}
+
+static void *sla_to_va(struct sla_addr_t sla)
+{
+ void *va = __va(__sme_clr(sla_to_pa(sla)));
+
+ return va;
+}
+
+#define sla_to_pfn(sla) (__pa(sla_to_va(sla)) >> PAGE_SHIFT)
+#define sla_to_page(sla) virt_to_page(sla_to_va(sla))
+
+static struct sla_addr_t make_sla(struct page *pg, bool stp)
+{
+ u64 pa = __sme_set(page_to_phys(pg));
+ struct sla_addr_t ret = {
+ .pfn = pa >> PAGE_SHIFT,
+ .page_size = SLA_PAGE_SIZE_4K, /* Do not do SLA_PAGE_SIZE_2M ATM */
+ .page_type = stp ? SLA_PAGE_TYPE_SCATTER : SLA_PAGE_TYPE_DATA
+ };
+
+ return ret;
+}
+
+/* the BUFFER Structure */
+#define SLA_BUFFER_FLAG_ENCRYPTION BIT(0)
+
+/*
+ * struct sla_buffer_hdr - Scatter list address buffer header
+ *
+ * @capacity_sz: Total capacity of the buffer in bytes
+ * @payload_sz: Size of buffer payload in bytes, must be multiple of 32B
+ * @flags: Buffer flags (SLA_BUFFER_FLAG_ENCRYPTION: buffer is encrypted)
+ * @iv: Initialization vector used for encryption
+ * @authtag: Authentication tag for encrypted buffer
+ */
+struct sla_buffer_hdr {
+ u32 capacity_sz;
+ u32 payload_sz; /* The size of BUFFER_PAYLOAD in bytes. Must be multiple of 32B */
+ u32 flags;
+ u8 reserved1[4];
+ u8 iv[16]; /* IV used for the encryption of this buffer */
+ u8 authtag[16]; /* Authentication tag for this buffer */
+ u8 reserved2[16];
+} __packed;
+
+enum spdm_data_type_t {
+ DOBJ_DATA_TYPE_SPDM = 0x1,
+ DOBJ_DATA_TYPE_SECURE_SPDM = 0x2,
+};
+
+struct spdm_dobj_hdr_req {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_REQ */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+struct spdm_dobj_hdr_resp {
+ struct spdm_dobj_hdr hdr; /* hdr.id == SPDM_DOBJ_ID_RESP */
+ u8 data_type; /* spdm_data_type_t */
+ u8 reserved2[5];
+} __packed;
+
+/* Defined in sev-dev-tio.h so sev-dev-tsm.c can read types of blobs */
+struct spdm_dobj_hdr_cert;
+struct spdm_dobj_hdr_meas;
+struct spdm_dobj_hdr_report;
+
+/* Used in all SPDM-aware TIO commands */
+struct spdm_ctrl {
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+} __packed;
+
+static size_t sla_dobj_id_to_size(u8 id)
+{
+ size_t n;
+
+ BUILD_BUG_ON(sizeof(struct spdm_dobj_hdr_resp) != 0x10);
+ switch (id) {
+ case SPDM_DOBJ_ID_REQ:
+ n = sizeof(struct spdm_dobj_hdr_req);
+ break;
+ case SPDM_DOBJ_ID_RESP:
+ n = sizeof(struct spdm_dobj_hdr_resp);
+ break;
+ default:
+ WARN_ON(1);
+ n = 0;
+ break;
+ }
+
+ return n;
+}
+
+#define SPDM_DOBJ_HDR_SIZE(hdr) sla_dobj_id_to_size((hdr)->id)
+#define SPDM_DOBJ_DATA(hdr) ((u8 *)(hdr) + SPDM_DOBJ_HDR_SIZE(hdr))
+#define SPDM_DOBJ_LEN(hdr) ((hdr)->length - SPDM_DOBJ_HDR_SIZE(hdr))
+
+#define sla_to_dobj_resp_hdr(buf) ((struct spdm_dobj_hdr_resp *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_RESP))
+#define sla_to_dobj_req_hdr(buf) ((struct spdm_dobj_hdr_req *) \
+ sla_to_dobj_hdr_check((buf), SPDM_DOBJ_ID_REQ))
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr(struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return NULL;
+
+ return (struct spdm_dobj_hdr *) &buf[1];
+}
+
+static struct spdm_dobj_hdr *sla_to_dobj_hdr_check(struct sla_buffer_hdr *buf, u32 check_dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(!hdr))
+ return NULL;
+
+ if (hdr->id != check_dobjid) {
+ pr_err("! ERROR: expected %d, found %d\n", check_dobjid, hdr->id);
+ return NULL;
+ }
+
+ return hdr;
+}
+
+static void *sla_to_data(struct sla_buffer_hdr *buf, u32 dobjid)
+{
+ struct spdm_dobj_hdr *hdr = sla_to_dobj_hdr(buf);
+
+ if (WARN_ON_ONCE(dobjid != SPDM_DOBJ_ID_REQ && dobjid != SPDM_DOBJ_ID_RESP))
+ return NULL;
+
+ if (!hdr)
+ return NULL;
+
+ return (u8 *) hdr + sla_dobj_id_to_size(dobjid);
+}
+
+/*
+ * struct sev_data_tio_status - SEV_CMD_TIO_STATUS command
+ *
+ * @length: Length of this command buffer in bytes
+ * @status_paddr: System physical address of the TIO_STATUS structure
+ */
+struct sev_data_tio_status {
+ u32 length;
+ u8 reserved[4];
+ u64 status_paddr;
+} __packed;
+
+/* TIO_INIT */
+struct sev_data_tio_init {
+ u32 length;
+ u8 reserved[12];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_create - TIO_DEV_CREATE command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address pointing to a buffer to be used as a device context buffer
+ * @device_id: PCIe Routing Identifier of the device to connect to
+ * @root_port_id: PCIe Routing Identifier of the root port of the device
+ * @segment_id: PCIe Segment Identifier of the device to connect to
+ */
+struct sev_data_tio_dev_create {
+ u32 length;
+ u8 reserved1[4];
+ struct sla_addr_t dev_ctx_sla;
+ u16 device_id;
+ u16 root_port_id;
+ u8 segment_id;
+ u8 reserved2[11];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_connect - TIO_DEV_CONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @tc_mask: Bitmask of the traffic classes to initialize for SEV-TIO usage.
+ * Setting the kth bit of the TC_MASK to 1 indicates that the traffic
+ * class k will be initialized
+ * @cert_slot: Slot number of the certificate requested for constructing the SPDM session
+ * @ide_stream_id: IDE stream IDs to be associated with this device.
+ * Valid only if corresponding bit in TC_MASK is set
+ */
+struct sev_data_tio_dev_connect {
+ u32 length;
+ u8 reserved1[4];
+ struct spdm_ctrl spdm_ctrl;
+ u8 reserved2[8];
+ struct sla_addr_t dev_ctx_sla;
+ u8 tc_mask;
+ u8 cert_slot;
+ u8 reserved3[6];
+ u8 ide_stream_id[8];
+ u8 reserved4[8];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_disconnect - TIO_DEV_DISCONNECT command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_DISCONNECT_FLAG_FORCE: force disconnect)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+#define TIO_DEV_DISCONNECT_FLAG_FORCE BIT(0)
+
+struct sev_data_tio_dev_disconnect {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_meas - TIO_DEV_MEASUREMENTS command
+ *
+ * @length: Length in bytes of this command buffer
+ * @flags: Command flags (TIO_DEV_MEAS_FLAG_RAW_BITSTREAM: request raw measurements)
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ * @meas_nonce: Nonce for measurement freshness verification
+ */
+#define TIO_DEV_MEAS_FLAG_RAW_BITSTREAM BIT(0)
+
+struct sev_data_tio_dev_meas {
+ u32 length;
+ u32 flags;
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+ u8 meas_nonce[32];
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_certs - TIO_DEV_CERTIFICATES command
+ *
+ * @length: Length in bytes of this command buffer
+ * @spdm_ctrl: SPDM control structure defined in Section 5.1
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ */
+struct sev_data_tio_dev_certs {
+ u32 length;
+ u8 reserved[4];
+ struct spdm_ctrl spdm_ctrl;
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+/*
+ * struct sev_data_tio_dev_reclaim - TIO_DEV_RECLAIM command
+ *
+ * @length: Length in bytes of this command buffer
+ * @dev_ctx_sla: Scatter list address of the device context buffer
+ *
+ * This command reclaims resources associated with a device context.
+ */
+struct sev_data_tio_dev_reclaim {
+ u32 length;
+ u8 reserved[4];
+ struct sla_addr_t dev_ctx_sla;
+} __packed;
+
+static struct sla_buffer_hdr *sla_buffer_map(struct sla_addr_t sla)
+{
+ struct sla_buffer_hdr *buf;
+
+ BUILD_BUG_ON(sizeof(struct sla_buffer_hdr) != 0x40);
+ if (IS_SLA_NULL(sla))
+ return NULL;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (WARN_ON_ONCE(SLA_SZ(scatter[i]) > SZ_4K))
+ return NULL;
+
+ if (WARN_ON_ONCE(scatter[i].page_type == SLA_PAGE_TYPE_SCATTER))
+ return NULL;
+
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (WARN_ON_ONCE(!npages))
+ return NULL;
+
+ struct page **pp = kmalloc_array(npages, sizeof(pp[0]), GFP_KERNEL);
+
+ if (!pp)
+ return NULL;
+
+ for (i = 0; i < npages; ++i)
+ pp[i] = sla_to_page(scatter[i]);
+
+ buf = vm_map_ram(pp, npages, 0);
+ kfree(pp);
+ } else {
+ struct page *pg = sla_to_page(sla);
+
+ buf = vm_map_ram(&pg, 1, 0);
+ }
+
+ return buf;
+}
+
+static void sla_buffer_unmap(struct sla_addr_t sla, struct sla_buffer_hdr *buf)
+{
+ if (!buf)
+ return;
+
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ struct sla_addr_t *scatter = sla_to_va(sla);
+ unsigned int i, npages = 0;
+
+ for (i = 0; i < SLA_SCATTER_LEN(sla); ++i) {
+ if (IS_SLA_EOL(scatter[i])) {
+ npages = i;
+ break;
+ }
+ }
+ if (!npages)
+ return;
+
+ vm_unmap_ram(buf, npages);
+ } else {
+ vm_unmap_ram(buf, 1);
+ }
+}
+
+static void dobj_response_init(struct sla_buffer_hdr *buf)
+{
+ struct spdm_dobj_hdr *dobj = sla_to_dobj_hdr(buf);
+
+ dobj->id = SPDM_DOBJ_ID_RESP;
+ dobj->version.major = 0x1;
+ dobj->version.minor = 0;
+ dobj->length = 0;
+ buf->payload_sz = sla_dobj_id_to_size(dobj->id) + dobj->length;
+}
+
+static void sla_free(struct sla_addr_t sla, size_t len, bool firmware_state)
+{
+ unsigned int npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ int ret = 0, i;
+
+ if (IS_SLA_NULL(sla))
+ return;
+
+ if (firmware_state) {
+ if (sla.page_type == SLA_PAGE_TYPE_SCATTER) {
+ scatter = sla_to_va(sla);
+
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+
+ ret = snp_reclaim_pages(sla_to_pa(scatter[i]), 1, false);
+ if (ret)
+ break;
+ }
+ } else {
+ ret = snp_reclaim_pages(sla_to_pa(sla), 1, false);
+ }
+ }
+
+ if (WARN_ON(ret))
+ return;
+
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (IS_SLA_EOL(scatter[i]))
+ break;
+ free_page((unsigned long)sla_to_va(scatter[i]));
+ }
+ }
+
+ free_page((unsigned long)sla_to_va(sla));
+}
+
+static struct sla_addr_t sla_alloc(size_t len, bool firmware_state)
+{
+ unsigned long i, npages = PAGE_ALIGN(len) >> PAGE_SHIFT;
+ struct sla_addr_t *scatter = NULL;
+ struct sla_addr_t ret = SLA_NULL;
+ struct sla_buffer_hdr *buf;
+ struct page *pg;
+
+ if (npages == 0)
+ return ret;
+
+ if (WARN_ON_ONCE(npages > ((PAGE_SIZE / sizeof(struct sla_addr_t)) + 1)))
+ return ret;
+
+ BUILD_BUG_ON(PAGE_SIZE < SZ_4K);
+
+ if (npages > 1) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, true);
+ scatter = page_to_virt(pg);
+ for (i = 0; i < npages; ++i) {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ goto no_reclaim_exit;
+
+ scatter[i] = make_sla(pg, false);
+ }
+ scatter[i] = SLA_EOL;
+ } else {
+ pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!pg)
+ return SLA_NULL;
+
+ ret = make_sla(pg, false);
+ }
+
+ buf = sla_buffer_map(ret);
+ if (!buf)
+ goto no_reclaim_exit;
+
+ buf->capacity_sz = (npages << PAGE_SHIFT);
+ sla_buffer_unmap(ret, buf);
+
+ if (firmware_state) {
+ if (scatter) {
+ for (i = 0; i < npages; ++i) {
+ if (rmp_make_private(sla_to_pfn(scatter[i]), 0,
+ PG_LEVEL_4K, 0, true))
+ goto free_exit;
+ }
+ } else {
+ if (rmp_make_private(sla_to_pfn(ret), 0, PG_LEVEL_4K, 0, true))
+ goto no_reclaim_exit;
+ }
+ }
+
+ return ret;
+
+no_reclaim_exit:
+ firmware_state = false;
+free_exit:
+ sla_free(ret, len, firmware_state);
+ return SLA_NULL;
+}
+
+/* Expands a buffer, only firmware owned buffers allowed for now */
+static int sla_expand(struct sla_addr_t *sla, size_t *len)
+{
+ struct sla_buffer_hdr *oldbuf = sla_buffer_map(*sla), *newbuf;
+ struct sla_addr_t oldsla = *sla, newsla;
+ size_t oldlen = *len, newlen;
+
+ if (!oldbuf)
+ return -EFAULT;
+
+ newlen = oldbuf->capacity_sz;
+ if (oldbuf->capacity_sz == oldlen) {
+ /* This buffer does not require expansion, must be another buffer */
+ sla_buffer_unmap(oldsla, oldbuf);
+ return 1;
+ }
+
+ pr_notice("Expanding BUFFER from %ld to %ld bytes\n", oldlen, newlen);
+
+ newsla = sla_alloc(newlen, true);
+ if (IS_SLA_NULL(newsla))
+ return -ENOMEM;
+
+ newbuf = sla_buffer_map(newsla);
+ if (!newbuf) {
+ sla_free(newsla, newlen, true);
+ return -EFAULT;
+ }
+
+ memcpy(newbuf, oldbuf, oldlen);
+
+ sla_buffer_unmap(newsla, newbuf);
+ sla_free(oldsla, oldlen, true);
+ *sla = newsla;
+ *len = newlen;
+
+ return 0;
+}
+
+static int sev_tio_do_cmd(int cmd, void *data, size_t data_len, int *psp_ret,
+ struct tsm_dsm_tio *dev_data)
+{
+ int rc;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+
+ if (WARN_ON(!rc && *psp_ret == SEV_RET_SPDM_REQUEST))
+ return -EIO;
+
+ if (rc == 0 && *psp_ret == SEV_RET_EXPAND_BUFFER_LENGTH_REQUEST) {
+ int rc1, rc2;
+
+ rc1 = sla_expand(&dev_data->output, &dev_data->output_len);
+ if (rc1 < 0)
+ return rc1;
+
+ rc2 = sla_expand(&dev_data->scratch, &dev_data->scratch_len);
+ if (rc2 < 0)
+ return rc2;
+
+ if (!rc1 && !rc2)
+ /* Neither buffer requires expansion, this is wrong */
+ return -EFAULT;
+
+ *psp_ret = 0;
+ rc = sev_do_cmd(cmd, data, psp_ret);
+ }
+
+ if ((rc == 0 || rc == -EIO) && *psp_ret == SEV_RET_SPDM_REQUEST) {
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ struct spdm_dobj_hdr_req *req_hdr;
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t resp_len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) + sizeof(struct sla_buffer_hdr));
+
+ if (!dev_data->cmd) {
+ if (WARN_ON_ONCE(!data_len || (data_len != *(u32 *) data)))
+ return -EINVAL;
+ if (WARN_ON(data_len > sizeof(dev_data->cmd_data)))
+ return -EFAULT;
+ memcpy(dev_data->cmd_data, data, data_len);
+ memset(&dev_data->cmd_data[data_len], 0xFF,
+ sizeof(dev_data->cmd_data) - data_len);
+ dev_data->cmd = cmd;
+ }
+
+ req_hdr = sla_to_dobj_req_hdr(dev_data->reqbuf);
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ switch (req_hdr->data_type) {
+ case DOBJ_DATA_TYPE_SPDM:
+ rc = PCI_DOE_FEATURE_CMA;
+ break;
+ case DOBJ_DATA_TYPE_SECURE_SPDM:
+ rc = PCI_DOE_FEATURE_SSESSION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ resp_hdr->data_type = req_hdr->data_type;
+ dev_data->spdm.req_len = req_hdr->hdr.length -
+ sla_dobj_id_to_size(SPDM_DOBJ_ID_REQ);
+ dev_data->spdm.rsp_len = resp_len;
+ } else if (dev_data && dev_data->cmd) {
+ /* For either error or success just stop the bouncing */
+ memset(dev_data->cmd_data, 0, sizeof(dev_data->cmd_data));
+ dev_data->cmd = 0;
+ }
+
+ return rc;
+}
+
+int sev_tio_continue(struct tsm_dsm_tio *dev_data)
+{
+ struct spdm_dobj_hdr_resp *resp_hdr;
+ int ret;
+
+ if (!dev_data || !dev_data->cmd)
+ return -EINVAL;
+
+ resp_hdr = sla_to_dobj_resp_hdr(dev_data->respbuf);
+ resp_hdr->hdr.length = ALIGN(sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ dev_data->spdm.rsp_len, 32);
+ dev_data->respbuf->payload_sz = resp_hdr->hdr.length;
+
+ ret = sev_tio_do_cmd(dev_data->cmd, dev_data->cmd_data, 0,
+ &dev_data->psp_ret, dev_data);
+ if (ret)
+ return ret;
+
+ if (dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void spdm_ctrl_init(struct spdm_ctrl *ctrl, struct tsm_dsm_tio *dev_data)
+{
+ ctrl->req = dev_data->req;
+ ctrl->resp = dev_data->resp;
+ ctrl->scratch = dev_data->scratch;
+ ctrl->output = dev_data->output;
+}
+
+static void spdm_ctrl_free(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ size_t len = tio_status->spdm_req_size_max -
+ (sla_dobj_id_to_size(SPDM_DOBJ_ID_RESP) +
+ sizeof(struct sla_buffer_hdr));
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ sla_buffer_unmap(dev_data->resp, dev_data->respbuf);
+ sla_buffer_unmap(dev_data->req, dev_data->reqbuf);
+ spdm->rsp = NULL;
+ spdm->req = NULL;
+ sla_free(dev_data->req, len, true);
+ sla_free(dev_data->resp, len, false);
+ sla_free(dev_data->scratch, tio_status->spdm_scratch_size_max, true);
+
+ dev_data->req.sla = 0;
+ dev_data->resp.sla = 0;
+ dev_data->scratch.sla = 0;
+ dev_data->respbuf = NULL;
+ dev_data->reqbuf = NULL;
+ sla_free(dev_data->output, tio_status->spdm_out_size_max, true);
+}
+
+static int spdm_ctrl_alloc(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct tsm_spdm *spdm = &dev_data->spdm;
+ int ret;
+
+ dev_data->req = sla_alloc(tio_status->spdm_req_size_max, true);
+ dev_data->resp = sla_alloc(tio_status->spdm_req_size_max, false);
+ dev_data->scratch_len = tio_status->spdm_scratch_size_max;
+ dev_data->scratch = sla_alloc(dev_data->scratch_len, true);
+ dev_data->output_len = tio_status->spdm_out_size_max;
+ dev_data->output = sla_alloc(dev_data->output_len, true);
+
+ if (IS_SLA_NULL(dev_data->req) || IS_SLA_NULL(dev_data->resp) ||
+ IS_SLA_NULL(dev_data->scratch) || IS_SLA_NULL(dev_data->dev_ctx)) {
+ ret = -ENOMEM;
+ goto free_spdm_exit;
+ }
+
+ dev_data->reqbuf = sla_buffer_map(dev_data->req);
+ dev_data->respbuf = sla_buffer_map(dev_data->resp);
+ if (!dev_data->reqbuf || !dev_data->respbuf) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ spdm->req = sla_to_data(dev_data->reqbuf, SPDM_DOBJ_ID_REQ);
+ spdm->rsp = sla_to_data(dev_data->respbuf, SPDM_DOBJ_ID_RESP);
+ if (!spdm->req || !spdm->rsp) {
+ ret = -EFAULT;
+ goto free_spdm_exit;
+ }
+
+ dobj_response_init(dev_data->respbuf);
+
+ return 0;
+
+free_spdm_exit:
+ spdm_ctrl_free(dev_data);
+ return ret;
+}
+
+int sev_tio_init_locked(void *tio_status_page)
+{
+ struct sev_tio_status *tio_status = tio_status_page;
+ struct sev_data_tio_status data_status = {
+ .length = sizeof(data_status),
+ };
+ int ret, psp_ret;
+
+ data_status.status_paddr = __psp_pa(tio_status_page);
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ if (tio_status->length < offsetofend(struct sev_tio_status, tdictx_size) ||
+ tio_status->reserved)
+ return -EFAULT;
+
+ if (!tio_status->tio_en && !tio_status->tio_init_done)
+ return -ENOENT;
+
+ if (tio_status->tio_init_done)
+ return -EBUSY;
+
+ struct sev_data_tio_init ti = { .length = sizeof(ti) };
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_INIT, &ti, &psp_ret);
+ if (ret)
+ return ret;
+
+ ret = __sev_do_cmd_locked(SEV_CMD_TIO_STATUS, &data_status, &psp_ret);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id,
+ u16 root_port_id, u8 segment_id)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_create create = {
+ .length = sizeof(create),
+ .device_id = device_id,
+ .root_port_id = root_port_id,
+ .segment_id = segment_id,
+ };
+ void *data_pg;
+ int ret;
+
+ dev_data->dev_ctx = sla_alloc(tio_status->devctx_size, true);
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return -ENOMEM;
+
+ data_pg = snp_alloc_firmware_page(GFP_KERNEL_ACCOUNT);
+ if (!data_pg) {
+ ret = -ENOMEM;
+ goto free_ctx_exit;
+ }
+
+ create.dev_ctx_sla = dev_data->dev_ctx;
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_CREATE, &create, &dev_data->psp_ret);
+ if (ret)
+ goto free_data_pg_exit;
+
+ dev_data->data_pg = data_pg;
+
+ return 0;
+
+free_data_pg_exit:
+ snp_free_firmware_page(data_pg);
+free_ctx_exit:
+ sla_free(create.dev_ctx_sla, tio_status->devctx_size, true);
+ return ret;
+}
+
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data)
+{
+ struct sev_tio_status *tio_status = to_tio_status(dev_data);
+ struct sev_data_tio_dev_reclaim r = {
+ .length = sizeof(r),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ };
+ int ret;
+
+ if (dev_data->data_pg) {
+ snp_free_firmware_page(dev_data->data_pg);
+ dev_data->data_pg = NULL;
+ }
+
+ if (IS_SLA_NULL(dev_data->dev_ctx))
+ return 0;
+
+ ret = sev_do_cmd(SEV_CMD_TIO_DEV_RECLAIM, &r, &dev_data->psp_ret);
+
+ sla_free(dev_data->dev_ctx, tio_status->devctx_size, true);
+ dev_data->dev_ctx = SLA_NULL;
+
+ spdm_ctrl_free(dev_data);
+
+ return ret;
+}
+
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot)
+{
+ struct sev_data_tio_dev_connect connect = {
+ .length = sizeof(connect),
+ .tc_mask = tc_mask,
+ .cert_slot = cert_slot,
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .ide_stream_id = {
+ ids[0], ids[1], ids[2], ids[3],
+ ids[4], ids[5], ids[6], ids[7]
+ },
+ };
+ int ret;
+
+ if (WARN_ON(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+ if (!(tc_mask & 1))
+ return -EINVAL;
+
+ ret = spdm_ctrl_alloc(dev_data);
+ if (ret)
+ return ret;
+
+ spdm_ctrl_init(&connect.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_CONNECT, &connect, sizeof(connect),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force)
+{
+ struct sev_data_tio_dev_disconnect dc = {
+ .length = sizeof(dc),
+ .dev_ctx_sla = dev_data->dev_ctx,
+ .flags = force ? TIO_DEV_DISCONNECT_FLAG_FORCE : 0,
+ };
+
+ if (WARN_ON_ONCE(IS_SLA_NULL(dev_data->dev_ctx)))
+ return -EFAULT;
+
+ spdm_ctrl_init(&dc.spdm_ctrl, dev_data);
+
+ return sev_tio_do_cmd(SEV_CMD_TIO_DEV_DISCONNECT, &dc, sizeof(dc),
+ &dev_data->psp_ret, dev_data);
+}
+
+int sev_tio_cmd_buffer_len(int cmd)
+{
+ switch (cmd) {
+ case SEV_CMD_TIO_STATUS: return sizeof(struct sev_data_tio_status);
+ case SEV_CMD_TIO_INIT: return sizeof(struct sev_data_tio_init);
+ case SEV_CMD_TIO_DEV_CREATE: return sizeof(struct sev_data_tio_dev_create);
+ case SEV_CMD_TIO_DEV_RECLAIM: return sizeof(struct sev_data_tio_dev_reclaim);
+ case SEV_CMD_TIO_DEV_CONNECT: return sizeof(struct sev_data_tio_dev_connect);
+ case SEV_CMD_TIO_DEV_DISCONNECT: return sizeof(struct sev_data_tio_dev_disconnect);
+ default: return 0;
+ }
+}
diff --git a/drivers/crypto/ccp/sev-dev-tio.h b/drivers/crypto/ccp/sev-dev-tio.h
new file mode 100644
index 000000000000..67512b3dbc53
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tio.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __PSP_SEV_TIO_H__
+#define __PSP_SEV_TIO_H__
+
+#include <linux/pci-tsm.h>
+#include <linux/pci-ide.h>
+#include <linux/tsm.h>
+#include <uapi/linux/psp-sev.h>
+
+struct sla_addr_t {
+ union {
+ u64 sla;
+ struct {
+ u64 page_type :1,
+ page_size :1,
+ reserved1 :10,
+ pfn :40,
+ reserved2 :12;
+ };
+ };
+} __packed;
+
+#define SEV_TIO_MAX_COMMAND_LENGTH 128
+
+/* SPDM control structure for DOE */
+struct tsm_spdm {
+ unsigned long req_len;
+ void *req;
+ unsigned long rsp_len;
+ void *rsp;
+};
+
+/* Describes TIO device */
+struct tsm_dsm_tio {
+ u8 cert_slot;
+ struct sla_addr_t dev_ctx;
+ struct sla_addr_t req;
+ struct sla_addr_t resp;
+ struct sla_addr_t scratch;
+ struct sla_addr_t output;
+ size_t output_len;
+ size_t scratch_len;
+ struct tsm_spdm spdm;
+ struct sla_buffer_hdr *reqbuf; /* vmap'ed @req for DOE */
+ struct sla_buffer_hdr *respbuf; /* vmap'ed @resp for DOE */
+
+ int cmd;
+ int psp_ret;
+ u8 cmd_data[SEV_TIO_MAX_COMMAND_LENGTH];
+ void *data_pg; /* Data page for DEV_STATUS/TDI_STATUS/TDI_INFO/ASID_FENCE */
+
+#define TIO_IDE_MAX_TC 8
+ struct pci_ide *ide[TIO_IDE_MAX_TC];
+};
+
+/* Describes TSM structure for PF0 pointed by pci_dev->tsm */
+struct tio_dsm {
+ struct pci_tsm_pf0 tsm;
+ struct tsm_dsm_tio data;
+ struct sev_device *sev;
+};
+
+/* Data object IDs */
+#define SPDM_DOBJ_ID_NONE 0
+#define SPDM_DOBJ_ID_REQ 1
+#define SPDM_DOBJ_ID_RESP 2
+
+struct spdm_dobj_hdr {
+ u32 id; /* Data object type identifier */
+ u32 length; /* Length of the data object, INCLUDING THIS HEADER */
+ struct { /* Version of the data object structure */
+ u8 minor;
+ u8 major;
+ } version;
+} __packed;
+
+/**
+ * struct sev_tio_status - TIO_STATUS command's info_paddr buffer
+ *
+ * @length: Length of this structure in bytes
+ * @tio_en: Indicates that SNP_INIT_EX initialized the RMP for SEV-TIO
+ * @tio_init_done: Indicates TIO_INIT has been invoked
+ * @spdm_req_size_min: Minimum SPDM request buffer size in bytes
+ * @spdm_req_size_max: Maximum SPDM request buffer size in bytes
+ * @spdm_scratch_size_min: Minimum SPDM scratch buffer size in bytes
+ * @spdm_scratch_size_max: Maximum SPDM scratch buffer size in bytes
+ * @spdm_out_size_min: Minimum SPDM output buffer size in bytes
+ * @spdm_out_size_max: Maximum for the SPDM output buffer size in bytes
+ * @spdm_rsp_size_min: Minimum SPDM response buffer size in bytes
+ * @spdm_rsp_size_max: Maximum SPDM response buffer size in bytes
+ * @devctx_size: Size of a device context buffer in bytes
+ * @tdictx_size: Size of a TDI context buffer in bytes
+ * @tio_crypto_alg: TIO crypto algorithms supported
+ */
+struct sev_tio_status {
+ u32 length;
+ u32 tio_en :1,
+ tio_init_done :1,
+ reserved :30;
+ u32 spdm_req_size_min;
+ u32 spdm_req_size_max;
+ u32 spdm_scratch_size_min;
+ u32 spdm_scratch_size_max;
+ u32 spdm_out_size_min;
+ u32 spdm_out_size_max;
+ u32 spdm_rsp_size_min;
+ u32 spdm_rsp_size_max;
+ u32 devctx_size;
+ u32 tdictx_size;
+ u32 tio_crypto_alg;
+ u8 reserved2[12];
+} __packed;
+
+int sev_tio_init_locked(void *tio_status_page);
+int sev_tio_continue(struct tsm_dsm_tio *dev_data);
+
+int sev_tio_dev_create(struct tsm_dsm_tio *dev_data, u16 device_id, u16 root_port_id,
+ u8 segment_id);
+int sev_tio_dev_connect(struct tsm_dsm_tio *dev_data, u8 tc_mask, u8 ids[8], u8 cert_slot);
+int sev_tio_dev_disconnect(struct tsm_dsm_tio *dev_data, bool force);
+int sev_tio_dev_reclaim(struct tsm_dsm_tio *dev_data);
+
+#endif /* __PSP_SEV_TIO_H__ */
diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
new file mode 100644
index 000000000000..ea29cd5d0ff9
--- /dev/null
+++ b/drivers/crypto/ccp/sev-dev-tsm.c
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+// Interface to CCP/SEV-TIO for generic PCIe TDISP module
+
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/tsm.h>
+#include <linux/iommu.h>
+#include <linux/pci-doe.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+
+#include <asm/sev-common.h>
+#include <asm/sev.h>
+
+#include "psp-dev.h"
+#include "sev-dev.h"
+#include "sev-dev-tio.h"
+
+MODULE_IMPORT_NS("PCI_IDE");
+
+#define TIO_DEFAULT_NR_IDE_STREAMS 1
+
+static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS;
+module_param_named(ide_nr, nr_ide_streams, uint, 0644);
+MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB");
+
+#define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev))
+#define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data))
+#define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data))
+#define tsm_dev_to_sev(tsmdev) dev_to_sev((tsmdev)->dev.parent)
+
+#define pdev_to_tio_dsm(pdev) (container_of((pdev)->tsm, struct tio_dsm, tsm.base_tsm))
+
+static int sev_tio_spdm_cmd(struct tio_dsm *dsm, int ret)
+{
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ struct tsm_spdm *spdm = &dev_data->spdm;
+
+ /* Check the main command handler response before entering the loop */
+ if (ret == 0 && dev_data->psp_ret != SEV_RET_SUCCESS)
+ return -EINVAL;
+
+ if (ret <= 0)
+ return ret;
+
+ /* ret > 0 means "SPDM requested" */
+ while (ret == PCI_DOE_FEATURE_CMA || ret == PCI_DOE_FEATURE_SSESSION) {
+ ret = pci_doe(dsm->tsm.doe_mb, PCI_VENDOR_ID_PCI_SIG, ret,
+ spdm->req, spdm->req_len, spdm->rsp, spdm->rsp_len);
+ if (ret < 0)
+ break;
+
+ WARN_ON_ONCE(ret == 0); /* The response should never be empty */
+ spdm->rsp_len = ret;
+ ret = sev_tio_continue(dev_data);
+ }
+
+ return ret;
+}
+
+static int stream_enable(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+ int ret;
+
+ ret = pci_ide_stream_enable(rp, ide);
+ if (ret)
+ return ret;
+
+ ret = pci_ide_stream_enable(ide->pdev, ide);
+ if (ret)
+ pci_ide_stream_disable(rp, ide);
+
+ return ret;
+}
+
+static int streams_enable(struct pci_ide **ide)
+{
+ int ret = 0;
+
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = stream_enable(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void stream_disable(struct pci_ide *ide)
+{
+ pci_ide_stream_disable(ide->pdev, ide);
+ pci_ide_stream_disable(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_disable(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ stream_disable(ide[i]);
+}
+
+static void stream_setup(struct pci_ide *ide)
+{
+ struct pci_dev *rp = pcie_find_root_port(ide->pdev);
+
+ ide->partner[PCI_IDE_EP].rid_start = 0;
+ ide->partner[PCI_IDE_EP].rid_end = 0xffff;
+ ide->partner[PCI_IDE_RP].rid_start = 0;
+ ide->partner[PCI_IDE_RP].rid_end = 0xffff;
+
+ ide->pdev->ide_cfg = 0;
+ ide->pdev->ide_tee_limit = 1;
+ rp->ide_cfg = 1;
+ rp->ide_tee_limit = 0;
+
+ pci_warn(ide->pdev, "Forcing CFG/TEE for %s", pci_name(rp));
+ pci_ide_stream_setup(ide->pdev, ide);
+ pci_ide_stream_setup(rp, ide);
+}
+
+static u8 streams_setup(struct pci_ide **ide, u8 *ids)
+{
+ bool def = false;
+ u8 tc_mask = 0;
+ int i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (!ide[i]) {
+ ids[i] = 0xFF;
+ continue;
+ }
+
+ tc_mask |= BIT(i);
+ ids[i] = ide[i]->stream_id;
+
+ if (!def) {
+ struct pci_ide_partner *settings;
+
+ settings = pci_ide_to_settings(ide[i]->pdev, ide[i]);
+ settings->default_stream = 1;
+ def = true;
+ }
+
+ stream_setup(ide[i]);
+ }
+
+ return tc_mask;
+}
+
+static int streams_register(struct pci_ide **ide)
+{
+ int ret = 0, i;
+
+ for (i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ ret = pci_ide_stream_register(ide[i]);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void streams_unregister(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i)
+ if (ide[i])
+ pci_ide_stream_unregister(ide[i]);
+}
+
+static void stream_teardown(struct pci_ide *ide)
+{
+ pci_ide_stream_teardown(ide->pdev, ide);
+ pci_ide_stream_teardown(pcie_find_root_port(ide->pdev), ide);
+}
+
+static void streams_teardown(struct pci_ide **ide)
+{
+ for (int i = 0; i < TIO_IDE_MAX_TC; ++i) {
+ if (ide[i]) {
+ stream_teardown(ide[i]);
+ pci_ide_stream_free(ide[i]);
+ ide[i] = NULL;
+ }
+ }
+}
+
+static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide,
+ unsigned int tc)
+{
+ struct pci_dev *rp = pcie_find_root_port(pdev);
+ struct pci_ide *ide1;
+
+ if (ide[tc]) {
+ pci_err(pdev, "Stream for class=%d already registered", tc);
+ return -EBUSY;
+ }
+
+ /* FIXME: find a better way */
+ if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS)
+ pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams);
+ pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams);
+
+ ide1 = pci_ide_stream_alloc(pdev);
+ if (!ide1)
+ return -EFAULT;
+
+ /* Blindly assign streamid=0 to TC=0, and so on */
+ ide1->stream_id = tc;
+
+ ide[tc] = ide1;
+
+ return 0;
+}
+
+static struct pci_tsm *tio_pf0_probe(struct pci_dev *pdev, struct sev_device *sev)
+{
+ struct tio_dsm *dsm __free(kfree) = kzalloc(sizeof(*dsm), GFP_KERNEL);
+ int rc;
+
+ if (!dsm)
+ return NULL;
+
+ rc = pci_tsm_pf0_constructor(pdev, &dsm->tsm, sev->tsmdev);
+ if (rc)
+ return NULL;
+
+ pci_dbg(pdev, "TSM enabled\n");
+ dsm->sev = sev;
+ return &no_free_ptr(dsm)->tsm.base_tsm;
+}
+
+static struct pci_tsm *dsm_probe(struct tsm_dev *tsmdev, struct pci_dev *pdev)
+{
+ struct sev_device *sev = tsm_dev_to_sev(tsmdev);
+
+ if (is_pci_tsm_pf0(pdev))
+ return tio_pf0_probe(pdev, sev);
+ return 0;
+}
+
+static void dsm_remove(struct pci_tsm *tsm)
+{
+ struct pci_dev *pdev = tsm->pdev;
+
+ pci_dbg(pdev, "TSM disabled\n");
+
+ if (is_pci_tsm_pf0(pdev)) {
+ struct tio_dsm *dsm = container_of(tsm, struct tio_dsm, tsm.base_tsm);
+
+ pci_tsm_pf0_destructor(&dsm->tsm);
+ kfree(dsm);
+ }
+}
+
+static int dsm_create(struct tio_dsm *dsm)
+{
+ struct pci_dev *pdev = dsm->tsm.base_tsm.pdev;
+ u8 segment_id = pdev->bus ? pci_domain_nr(pdev->bus) : 0;
+ struct pci_dev *rootport = pcie_find_root_port(pdev);
+ u16 device_id = pci_dev_id(pdev);
+ u16 root_port_id;
+ u32 lnkcap = 0;
+
+ if (pci_read_config_dword(rootport, pci_pcie_cap(rootport) + PCI_EXP_LNKCAP,
+ &lnkcap))
+ return -ENODEV;
+
+ root_port_id = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
+
+ return sev_tio_dev_create(&dsm->data, device_id, root_port_id, segment_id);
+}
+
+static int dsm_connect(struct pci_dev *pdev)
+{
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ u8 ids[TIO_IDE_MAX_TC];
+ u8 tc_mask;
+ int ret;
+
+ if (pci_find_doe_mailbox(pdev, PCI_VENDOR_ID_PCI_SIG,
+ PCI_DOE_FEATURE_SSESSION) != dsm->tsm.doe_mb) {
+ pci_err(pdev, "CMA DOE MB must support SSESSION\n");
+ return -EFAULT;
+ }
+
+ ret = stream_alloc(pdev, dev_data->ide, 0);
+ if (ret)
+ return ret;
+
+ ret = dsm_create(dsm);
+ if (ret)
+ goto ide_free_exit;
+
+ tc_mask = streams_setup(dev_data->ide, ids);
+
+ ret = sev_tio_dev_connect(dev_data, tc_mask, ids, dev_data->cert_slot);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret)
+ goto free_exit;
+
+ streams_enable(dev_data->ide);
+
+ ret = streams_register(dev_data->ide);
+ if (ret)
+ goto free_exit;
+
+ return 0;
+
+free_exit:
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ide_free_exit:
+
+ streams_teardown(dev_data->ide);
+
+ return ret;
+}
+
+static void dsm_disconnect(struct pci_dev *pdev)
+{
+ bool force = SYSTEM_HALT <= system_state && system_state <= SYSTEM_RESTART;
+ struct tio_dsm *dsm = pdev_to_tio_dsm(pdev);
+ struct tsm_dsm_tio *dev_data = &dsm->data;
+ int ret;
+
+ ret = sev_tio_dev_disconnect(dev_data, force);
+ ret = sev_tio_spdm_cmd(dsm, ret);
+ if (ret && !force) {
+ ret = sev_tio_dev_disconnect(dev_data, true);
+ sev_tio_spdm_cmd(dsm, ret);
+ }
+
+ sev_tio_dev_reclaim(dev_data);
+
+ streams_disable(dev_data->ide);
+ streams_unregister(dev_data->ide);
+ streams_teardown(dev_data->ide);
+}
+
+static struct pci_tsm_ops sev_tsm_ops = {
+ .probe = dsm_probe,
+ .remove = dsm_remove,
+ .connect = dsm_connect,
+ .disconnect = dsm_disconnect,
+};
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
+{
+ struct sev_tio_status *t = kzalloc(sizeof(*t), GFP_KERNEL);
+ struct tsm_dev *tsmdev;
+ int ret;
+
+ WARN_ON(sev->tio_status);
+
+ if (!t)
+ return;
+
+ ret = sev_tio_init_locked(tio_status_page);
+ if (ret) {
+ pr_warn("SEV-TIO STATUS failed with %d\n", ret);
+ goto error_exit;
+ }
+
+ tsmdev = tsm_register(sev->dev, &sev_tsm_ops);
+ if (IS_ERR(tsmdev))
+ goto error_exit;
+
+ memcpy(t, tio_status_page, sizeof(*t));
+
+ pr_notice("SEV-TIO status: EN=%d INIT_DONE=%d rq=%d..%d rs=%d..%d "
+ "scr=%d..%d out=%d..%d dev=%d tdi=%d algos=%x\n",
+ t->tio_en, t->tio_init_done,
+ t->spdm_req_size_min, t->spdm_req_size_max,
+ t->spdm_rsp_size_min, t->spdm_rsp_size_max,
+ t->spdm_scratch_size_min, t->spdm_scratch_size_max,
+ t->spdm_out_size_min, t->spdm_out_size_max,
+ t->devctx_size, t->tdictx_size,
+ t->tio_crypto_alg);
+
+ sev->tsmdev = tsmdev;
+ sev->tio_status = t;
+
+ return;
+
+error_exit:
+ kfree(t);
+ pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
+ ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
+}
+
+void sev_tsm_uninit(struct sev_device *sev)
+{
+ if (sev->tsmdev)
+ tsm_unregister(sev->tsmdev);
+
+ sev->tsmdev = NULL;
+}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index e058ba027792..956ea609d0cc 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -28,6 +28,7 @@
#include <linux/fs_struct.h>
#include <linux/psp.h>
#include <linux/amd-iommu.h>
+#include <linux/crash_dump.h>
#include <asm/smp.h>
#include <asm/cacheflush.h>
@@ -74,6 +75,14 @@ static bool psp_init_on_probe = true;
module_param(psp_init_on_probe, bool, 0444);
MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it");
+#if IS_ENABLED(CONFIG_PCI_TSM)
+static bool sev_tio_enabled = true;
+module_param_named(tio, sev_tio_enabled, bool, 0444);
+MODULE_PARM_DESC(tio, "Enables TIO in SNP_INIT_EX");
+#else
+static const bool sev_tio_enabled = false;
+#endif
+
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
@@ -82,6 +91,21 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
static bool psp_dead;
static int psp_timeout;
+enum snp_hv_fixed_pages_state {
+ ALLOCATED,
+ HV_FIXED,
+};
+
+struct snp_hv_fixed_pages_entry {
+ struct list_head list;
+ struct page *page;
+ unsigned int order;
+ bool free;
+ enum snp_hv_fixed_pages_state page_state;
+};
+
+static LIST_HEAD(snp_hv_fixed_pages);
+
/* Trusted Memory Region (TMR):
* The TMR is a 1MB area that must be 1MB aligned. Use the page allocator
* to allocate the memory, which will return aligned memory for the specified
@@ -233,7 +257,9 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_SNP_GUEST_REQUEST: return sizeof(struct sev_data_snp_guest_request);
case SEV_CMD_SNP_CONFIG: return sizeof(struct sev_user_data_snp_config);
case SEV_CMD_SNP_COMMIT: return sizeof(struct sev_data_snp_commit);
- default: return 0;
+ case SEV_CMD_SNP_FEATURE_INFO: return sizeof(struct sev_data_snp_feature_info);
+ case SEV_CMD_SNP_VLEK_LOAD: return sizeof(struct sev_user_data_snp_vlek_load);
+ default: return sev_tio_cmd_buffer_len(cmd);
}
return 0;
@@ -241,27 +267,20 @@ static int sev_cmd_buffer_len(int cmd)
static struct file *open_file_as_root(const char *filename, int flags, umode_t mode)
{
- struct file *fp;
- struct path root;
- struct cred *cred;
- const struct cred *old_cred;
+ struct path root __free(path_put) = {};
task_lock(&init_task);
get_fs_root(init_task.fs, &root);
task_unlock(&init_task);
- cred = prepare_creds();
+ CLASS(prepare_creds, cred)();
if (!cred)
return ERR_PTR(-ENOMEM);
- cred->fsuid = GLOBAL_ROOT_UID;
- old_cred = override_creds(cred);
-
- fp = file_open_root(&root, filename, flags, mode);
- path_put(&root);
- put_cred(revert_creds(old_cred));
+ cred->fsuid = GLOBAL_ROOT_UID;
- return fp;
+ scoped_with_creds(cred)
+ return file_open_root(&root, filename, flags, mode);
}
static int sev_read_init_ex_file(void)
@@ -369,13 +388,7 @@ static int sev_write_init_ex_file_if_required(int cmd_id)
return sev_write_init_ex_file();
}
-/*
- * snp_reclaim_pages() needs __sev_do_cmd_locked(), and __sev_do_cmd_locked()
- * needs snp_reclaim_pages(), so a forward declaration is needed.
- */
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
-
-static int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
+int snp_reclaim_pages(unsigned long paddr, unsigned int npages, bool locked)
{
int ret, err, i;
@@ -409,6 +422,7 @@ cleanup:
snp_leak_pages(__phys_to_pfn(paddr), npages - i);
return ret;
}
+EXPORT_SYMBOL_GPL(snp_reclaim_pages);
static int rmp_mark_pages_firmware(unsigned long paddr, unsigned int npages, bool locked)
{
@@ -839,16 +853,17 @@ static int snp_reclaim_cmd_buf(int cmd, void *cmd_buf)
return 0;
}
-static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
{
struct cmd_buf_desc desc_list[CMD_BUF_DESC_MAX] = {0};
struct psp_device *psp = psp_master;
struct sev_device *sev;
unsigned int cmdbuff_hi, cmdbuff_lo;
unsigned int phys_lsb, phys_msb;
- unsigned int reg, ret = 0;
+ unsigned int reg;
void *cmd_buf;
int buf_len;
+ int ret = 0;
if (!psp || !psp->sev_data)
return -ENODEV;
@@ -1073,6 +1088,247 @@ static void snp_set_hsave_pa(void *arg)
wrmsrq(MSR_VM_HSAVE_PA, 0);
}
+/* Hypervisor Fixed pages API interface */
+static void snp_hv_fixed_pages_state_update(struct sev_device *sev,
+ enum snp_hv_fixed_pages_state page_state)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ entry->page_state = page_state;
+}
+
+/*
+ * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole
+ * 2MB pages are marked as HV_FIXED.
+ */
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_device *sev;
+ unsigned int order;
+ struct page *page;
+
+ if (!psp_master || !psp_master->sev_data)
+ return NULL;
+
+ sev = psp_master->sev_data;
+
+ order = get_order(PMD_SIZE * num_2mb_pages);
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ /*
+ * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed
+ * page state, fail if SNP is already initialized.
+ */
+ if (sev->snp_initialized)
+ return NULL;
+
+ /* Re-use freed pages that match the request */
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ /* Hypervisor fixed page allocator implements exact fit policy */
+ if (entry->order == order && entry->free) {
+ entry->free = false;
+ memset(page_address(entry->page), 0,
+ (1 << entry->order) * PAGE_SIZE);
+ return entry->page;
+ }
+ }
+
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return NULL;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ __free_pages(page, order);
+ return NULL;
+ }
+
+ entry->page = page;
+ entry->order = order;
+ list_add_tail(&entry->list, &snp_hv_fixed_pages);
+
+ return page;
+}
+
+void snp_free_hv_fixed_pages(struct page *page)
+{
+ struct psp_device *psp_master = psp_get_master_device();
+ struct snp_hv_fixed_pages_entry *entry, *nentry;
+
+ if (!psp_master || !psp_master->sev_data)
+ return;
+
+ /*
+ * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list
+ * also needs to be protected using the same mutex.
+ */
+ guard(mutex)(&sev_cmd_mutex);
+
+ list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) {
+ if (entry->page != page)
+ continue;
+
+ /*
+ * HV_FIXED page state cannot be changed until reboot
+ * and they cannot be used by an SNP guest, so they cannot
+ * be returned back to the page allocator.
+ * Mark the pages as free internally to allow possible re-use.
+ */
+ if (entry->page_state == HV_FIXED) {
+ entry->free = true;
+ } else {
+ __free_pages(page, entry->order);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return;
+ }
+}
+
+static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+ struct sev_data_range *range;
+ int num_elements;
+
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ num_elements = list_count_nodes(&snp_hv_fixed_pages) +
+ range_list->num_elements;
+
+ /*
+ * Ensure the list of HV_FIXED pages that will be passed to firmware
+ * do not exceed the page-sized argument buffer.
+ */
+ if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) {
+ dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n");
+ return;
+ }
+
+ range = &range_list->ranges[range_list->num_elements];
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list) {
+ range->base = page_to_pfn(entry->page) << PAGE_SHIFT;
+ range->page_count = 1 << entry->order;
+ range++;
+ }
+ range_list->num_elements = num_elements;
+}
+
+static void snp_leak_hv_fixed_pages(void)
+{
+ struct snp_hv_fixed_pages_entry *entry;
+
+ /* List is protected by sev_cmd_mutex */
+ lockdep_assert_held(&sev_cmd_mutex);
+
+ if (list_empty(&snp_hv_fixed_pages))
+ return;
+
+ list_for_each_entry(entry, &snp_hv_fixed_pages, list)
+ if (entry->page_state == HV_FIXED)
+ __snp_leak_pages(page_to_pfn(entry->page),
+ 1 << entry->order, false);
+}
+
+bool sev_is_snp_ciphertext_hiding_supported(void)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+
+ if (!psp || !psp->sev_data)
+ return false;
+
+ sev = psp->sev_data;
+
+ /*
+ * Feature information indicates if CipherTextHiding feature is
+ * supported by the SEV firmware and additionally platform status
+ * indicates if CipherTextHiding feature is enabled in the
+ * Platform BIOS.
+ */
+ return ((sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED) &&
+ sev->snp_plat_status.ciphertext_hiding_cap);
+}
+EXPORT_SYMBOL_GPL(sev_is_snp_ciphertext_hiding_supported);
+
+static int snp_get_platform_data(struct sev_device *sev, int *error)
+{
+ struct sev_data_snp_feature_info snp_feat_info;
+ struct snp_feature_info *feat_info;
+ struct sev_data_snp_addr buf;
+ struct page *page;
+ int rc;
+
+ /*
+ * This function is expected to be called before SNP is
+ * initialized.
+ */
+ if (sev->snp_initialized)
+ return -EINVAL;
+
+ buf.address = __psp_pa(&sev->snp_plat_status);
+ rc = sev_do_cmd(SEV_CMD_SNP_PLATFORM_STATUS, &buf, error);
+ if (rc) {
+ dev_err(sev->dev, "SNP PLATFORM_STATUS command failed, ret = %d, error = %#x\n",
+ rc, *error);
+ return rc;
+ }
+
+ sev->api_major = sev->snp_plat_status.api_major;
+ sev->api_minor = sev->snp_plat_status.api_minor;
+ sev->build = sev->snp_plat_status.build_id;
+
+ /*
+ * Do feature discovery of the currently loaded firmware,
+ * and cache feature information from CPUID 0x8000_0024,
+ * sub-function 0.
+ */
+ if (!sev->snp_plat_status.feature_info)
+ return 0;
+
+ /*
+ * Use dynamically allocated structure for the SNP_FEATURE_INFO
+ * command to ensure structure is 8-byte aligned, and does not
+ * cross a page boundary.
+ */
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ feat_info = page_address(page);
+ snp_feat_info.length = sizeof(snp_feat_info);
+ snp_feat_info.ecx_in = 0;
+ snp_feat_info.feature_info_paddr = __psp_pa(feat_info);
+
+ rc = sev_do_cmd(SEV_CMD_SNP_FEATURE_INFO, &snp_feat_info, error);
+ if (!rc)
+ sev->snp_feat_info_0 = *feat_info;
+ else
+ dev_err(sev->dev, "SNP FEATURE_INFO command failed, ret = %d, error = %#x\n",
+ rc, *error);
+
+ __free_page(page);
+
+ return rc;
+}
+
static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
{
struct sev_data_range_list *range_list = arg;
@@ -1103,7 +1359,7 @@ static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg)
return 0;
}
-static int __sev_snp_init_locked(int *error)
+static int __sev_snp_init_locked(int *error, unsigned int max_snp_asid)
{
struct psp_device *psp = psp_master;
struct sev_data_snp_init_ex data;
@@ -1139,6 +1395,8 @@ static int __sev_snp_init_locked(int *error)
*
*/
if (sev_version_greater_or_equal(SNP_MIN_API_MAJOR, 52)) {
+ bool tio_supp = !!(sev->snp_feat_info_0.ebx & SNP_SEV_TIO_SUPPORTED);
+
/*
* Firmware checks that the pages containing the ranges enumerated
* in the RANGES structure are either in the default page state or in the
@@ -1163,10 +1421,33 @@ static int __sev_snp_init_locked(int *error)
return rc;
}
+ /*
+ * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the
+ * HV_Fixed page list.
+ */
+ snp_add_hv_fixed_pages(sev, snp_range_list);
+
memset(&data, 0, sizeof(data));
+
+ if (max_snp_asid) {
+ data.ciphertext_hiding_en = 1;
+ data.max_snp_asid = max_snp_asid;
+ }
+
data.init_rmp = 1;
data.list_paddr_en = 1;
data.list_paddr = __psp_pa(snp_range_list);
+
+ data.tio_en = tio_supp && sev_tio_enabled && amd_iommu_sev_tio_supported();
+
+ /*
+ * When psp_init_on_probe is disabled, the userspace calling
+ * SEV ioctl can inadvertently shut down SNP and SEV-TIO causing
+ * unexpected state loss.
+ */
+ if (data.tio_en && !psp_init_on_probe)
+ dev_warn(sev->dev, "SEV-TIO as incompatible with psp_init_on_probe=0\n");
+
cmd = SEV_CMD_SNP_INIT_EX;
} else {
cmd = SEV_CMD_SNP_INIT;
@@ -1202,8 +1483,10 @@ static int __sev_snp_init_locked(int *error)
return rc;
}
+ snp_hv_fixed_pages_state_update(sev, HV_FIXED);
sev->snp_initialized = true;
- dev_dbg(sev->dev, "SEV-SNP firmware initialized\n");
+ dev_dbg(sev->dev, "SEV-SNP firmware initialized, SEV-TIO is %s\n",
+ data.tio_en ? "enabled" : "disabled");
dev_info(sev->dev, "SEV-SNP API:%d.%d build:%d\n", sev->api_major,
sev->api_minor, sev->build);
@@ -1211,6 +1494,23 @@ static int __sev_snp_init_locked(int *error)
atomic_notifier_chain_register(&panic_notifier_list,
&snp_panic_notifier);
+ if (data.tio_en) {
+ /*
+ * This executes with the sev_cmd_mutex held so down the stack
+ * snp_reclaim_pages(locked=false) might be needed (which is extremely
+ * unlikely) but will cause a deadlock.
+ * Instead of exporting __snp_alloc_firmware_pages(), allocate a page
+ * for this one call here.
+ */
+ void *tio_status = page_address(__snp_alloc_firmware_pages(
+ GFP_KERNEL_ACCOUNT | __GFP_ZERO, 0, true));
+
+ if (tio_status) {
+ sev_tsm_init_locked(sev, tio_status);
+ __snp_free_firmware_pages(virt_to_page(tio_status), 0, true);
+ }
+ }
+
sev_es_tmr_size = SNP_TMR_SIZE;
return 0;
@@ -1286,7 +1586,7 @@ static int __sev_platform_init_locked(int *error)
sev = psp_master->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
return 0;
__sev_platform_init_handle_tmr(sev);
@@ -1318,7 +1618,7 @@ static int __sev_platform_init_locked(int *error)
return rc;
}
- sev->state = SEV_STATE_INIT;
+ sev->sev_plat_status.state = SEV_STATE_INIT;
/* Prepare for first SEV guest launch after INIT */
wbinvd_on_all_cpus();
@@ -1345,12 +1645,21 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args)
if (!psp_master || !psp_master->sev_data)
return -ENODEV;
+ /*
+ * Skip SNP/SEV initialization under a kdump kernel as SEV/SNP
+ * may already be initialized in the previous kernel. Since no
+ * SNP/SEV guests are run under a kdump kernel, there is no
+ * need to initialize SNP or SEV during kdump boot.
+ */
+ if (is_kdump_kernel())
+ return 0;
+
sev = psp_master->sev_data;
- if (sev->state == SEV_STATE_INIT)
+ if (sev->sev_plat_status.state == SEV_STATE_INIT)
return 0;
- rc = __sev_snp_init_locked(&args->error);
+ rc = __sev_snp_init_locked(&args->error, args->max_snp_asid);
if (rc && rc != -ENODEV)
return rc;
@@ -1384,7 +1693,7 @@ static int __sev_platform_shutdown_locked(int *error)
sev = psp->sev_data;
- if (sev->state == SEV_STATE_UNINIT)
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT)
return 0;
ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
@@ -1394,7 +1703,7 @@ static int __sev_platform_shutdown_locked(int *error)
return ret;
}
- sev->state = SEV_STATE_UNINIT;
+ sev->sev_plat_status.state = SEV_STATE_UNINIT;
dev_dbg(sev->dev, "SEV firmware shutdown\n");
return ret;
@@ -1433,7 +1742,7 @@ static int snp_move_to_init_state(struct sev_issue_cmd *argp, bool *shutdown_req
{
int error, rc;
- rc = __sev_snp_init_locked(&error);
+ rc = __sev_snp_init_locked(&error, 0);
if (rc) {
argp->error = SEV_RET_INVALID_PLATFORM_STATE;
return rc;
@@ -1502,7 +1811,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool wr
if (!writable)
return -EPERM;
- if (sev->state == SEV_STATE_UNINIT) {
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
rc = sev_move_to_init_state(argp, &shutdown_required);
if (rc)
return rc;
@@ -1551,7 +1860,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
data.len = input.length;
cmd:
- if (sev->state == SEV_STATE_UNINIT) {
+ if (sev->sev_plat_status.state == SEV_STATE_UNINIT) {
ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_blob;
@@ -1599,6 +1908,16 @@ static int sev_get_api_version(void)
struct sev_user_data_status status;
int error = 0, ret;
+ /*
+ * Cache SNP platform status and SNP feature information
+ * if SNP is available.
+ */
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) {
+ ret = snp_get_platform_data(sev, &error);
+ if (ret)
+ return 1;
+ }
+
ret = sev_platform_status(&status, &error);
if (ret) {
dev_err(sev->dev,
@@ -1606,10 +1925,12 @@ static int sev_get_api_version(void)
return 1;
}
+ /* Cache SEV platform status */
+ sev->sev_plat_status = status;
+
sev->api_major = status.api_major;
sev->api_minor = status.api_minor;
sev->build = status.build;
- sev->state = status.state;
return 0;
}
@@ -1784,6 +2105,7 @@ static int __sev_snp_shutdown_locked(int *error, bool panic)
return ret;
}
+ snp_leak_hv_fixed_pages();
sev->snp_initialized = false;
dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n");
@@ -1837,7 +2159,7 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable)
data.oca_cert_len = input.oca_cert_len;
/* If platform is not in INIT state then transition it to INIT */
- if (sev->state != SEV_STATE_INIT) {
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
ret = sev_move_to_init_state(argp, &shutdown_required);
if (ret)
goto e_free_oca;
@@ -2008,7 +2330,7 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
cmd:
/* If platform is not in INIT state then transition it to INIT. */
- if (sev->state != SEV_STATE_INIT) {
+ if (sev->sev_plat_status.state != SEV_STATE_INIT) {
if (!writable) {
ret = -EPERM;
goto e_free_cert;
@@ -2430,7 +2752,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
{
int error;
- __sev_platform_shutdown_locked(NULL);
+ __sev_platform_shutdown_locked(&error);
if (sev_es_tmr) {
/*
@@ -2468,8 +2790,20 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic)
static void sev_firmware_shutdown(struct sev_device *sev)
{
+ /*
+ * Calling without sev_cmd_mutex held as TSM will likely try disconnecting
+ * IDE and this ends up calling sev_do_cmd() which locks sev_cmd_mutex.
+ */
+ if (sev->tio_status)
+ sev_tsm_uninit(sev);
+
mutex_lock(&sev_cmd_mutex);
+
__sev_firmware_shutdown(sev, false);
+
+ kfree(sev->tio_status);
+ sev->tio_status = NULL;
+
mutex_unlock(&sev_cmd_mutex);
}
@@ -2482,6 +2816,43 @@ void sev_platform_shutdown(void)
}
EXPORT_SYMBOL_GPL(sev_platform_shutdown);
+u64 sev_get_snp_policy_bits(void)
+{
+ struct psp_device *psp = psp_master;
+ struct sev_device *sev;
+ u64 policy_bits;
+
+ if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ return 0;
+
+ if (!psp || !psp->sev_data)
+ return 0;
+
+ sev = psp->sev_data;
+
+ policy_bits = SNP_POLICY_MASK_BASE;
+
+ if (sev->snp_plat_status.feature_info) {
+ if (sev->snp_feat_info_0.ecx & SNP_RAPL_DISABLE_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_RAPL_DIS;
+
+ if (sev->snp_feat_info_0.ecx & SNP_CIPHER_TEXT_HIDING_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_CIPHERTEXT_HIDING_DRAM;
+
+ if (sev->snp_feat_info_0.ecx & SNP_AES_256_XTS_POLICY_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_MEM_AES_256_XTS;
+
+ if (sev->snp_feat_info_0.ecx & SNP_CXL_ALLOW_POLICY_SUPPORTED)
+ policy_bits |= SNP_POLICY_MASK_CXL_ALLOW;
+
+ if (sev_version_greater_or_equal(1, 58))
+ policy_bits |= SNP_POLICY_MASK_PAGE_SWAP_DISABLE;
+ }
+
+ return policy_bits;
+}
+EXPORT_SYMBOL_GPL(sev_get_snp_policy_bits);
+
void sev_dev_destroy(struct psp_device *psp)
{
struct sev_device *sev = psp->sev_data;
diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h
index 3e4e5574e88a..b1cd556bbbf6 100644
--- a/drivers/crypto/ccp/sev-dev.h
+++ b/drivers/crypto/ccp/sev-dev.h
@@ -34,6 +34,8 @@ struct sev_misc_dev {
struct miscdevice misc;
};
+struct sev_tio_status;
+
struct sev_device {
struct device *dev;
struct psp_device *psp;
@@ -42,7 +44,6 @@ struct sev_device {
struct sev_vdata *vdata;
- int state;
unsigned int int_rcvd;
wait_queue_head_t int_queue;
struct sev_misc_dev *misc;
@@ -57,12 +58,29 @@ struct sev_device {
bool cmd_buf_backup_active;
bool snp_initialized;
+
+ struct sev_user_data_status sev_plat_status;
+
+ struct sev_user_data_snp_status snp_plat_status;
+ struct snp_feature_info snp_feat_info_0;
+
+ struct tsm_dev *tsmdev;
+ struct sev_tio_status *tio_status;
};
int sev_dev_init(struct psp_device *psp);
void sev_dev_destroy(struct psp_device *psp);
+int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret);
+
void sev_pci_init(void);
void sev_pci_exit(void);
+struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages);
+void snp_free_hv_fixed_pages(struct page *page);
+
+void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page);
+void sev_tsm_uninit(struct sev_device *sev);
+int sev_tio_cmd_buffer_len(int cmd);
+
#endif /* __SEV_DEV_H */
diff --git a/drivers/crypto/ccp/sfs.c b/drivers/crypto/ccp/sfs.c
new file mode 100644
index 000000000000..2f4beaafe7ec
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AMD Secure Processor Seamless Firmware Servicing support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "sfs.h"
+#include "sev-dev.h"
+
+#define SFS_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC)
+#define SFS_MAX_PAYLOAD_SIZE (2 * 1024 * 1024)
+#define SFS_NUM_2MB_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PMD_SIZE)
+#define SFS_NUM_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PAGE_SIZE)
+
+static DEFINE_MUTEX(sfs_ioctl_mutex);
+
+static struct sfs_misc_dev *misc_dev;
+
+static int send_sfs_cmd(struct sfs_device *sfs_dev, int msg)
+{
+ int ret;
+
+ sfs_dev->command_buf->hdr.status = 0;
+ sfs_dev->command_buf->hdr.sub_cmd_id = msg;
+
+ ret = psp_extended_mailbox_cmd(sfs_dev->psp,
+ SFS_DEFAULT_TIMEOUT,
+ (struct psp_ext_request *)sfs_dev->command_buf);
+ if (ret == -EIO) {
+ dev_dbg(sfs_dev->dev,
+ "msg 0x%x failed with PSP error: 0x%x, extended status: 0x%x\n",
+ msg, sfs_dev->command_buf->hdr.status,
+ *(u32 *)sfs_dev->command_buf->buf);
+ }
+
+ return ret;
+}
+
+static int send_sfs_get_fw_versions(struct sfs_device *sfs_dev)
+{
+ /*
+ * SFS_GET_FW_VERSIONS command needs the output buffer to be
+ * initialized to 0xC7 in every byte.
+ */
+ memset(sfs_dev->command_buf->sfs_buffer, 0xc7, PAGE_SIZE);
+ sfs_dev->command_buf->hdr.payload_size = 2 * PAGE_SIZE;
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_GET_FW_VERSIONS);
+}
+
+static int send_sfs_update_package(struct sfs_device *sfs_dev, const char *payload_name)
+{
+ char payload_path[PAYLOAD_NAME_SIZE + sizeof("amd/")];
+ const struct firmware *firmware;
+ unsigned long package_size;
+ int ret;
+
+ /* Sanitize userspace provided payload name */
+ if (!strnchr(payload_name, PAYLOAD_NAME_SIZE, '\0'))
+ return -EINVAL;
+
+ snprintf(payload_path, sizeof(payload_path), "amd/%s", payload_name);
+
+ ret = firmware_request_nowarn(&firmware, payload_path, sfs_dev->dev);
+ if (ret < 0) {
+ dev_warn_ratelimited(sfs_dev->dev, "firmware request failed for %s (%d)\n",
+ payload_path, ret);
+ return -ENOENT;
+ }
+
+ /*
+ * SFS Update Package command's input buffer contains TEE_EXT_CMD_BUFFER
+ * followed by the Update Package and it should be 64KB aligned.
+ */
+ package_size = ALIGN(firmware->size + PAGE_SIZE, 0x10000U);
+
+ /*
+ * SFS command buffer is a pre-allocated 2MB buffer, fail update package
+ * if SFS payload is larger than the pre-allocated command buffer.
+ */
+ if (package_size > SFS_MAX_PAYLOAD_SIZE) {
+ dev_warn_ratelimited(sfs_dev->dev,
+ "SFS payload size %ld larger than maximum supported payload size of %u\n",
+ package_size, SFS_MAX_PAYLOAD_SIZE);
+ release_firmware(firmware);
+ return -E2BIG;
+ }
+
+ /*
+ * Copy firmware data to a HV_Fixed memory region.
+ */
+ memcpy(sfs_dev->command_buf->sfs_buffer, firmware->data, firmware->size);
+ sfs_dev->command_buf->hdr.payload_size = package_size;
+
+ release_firmware(firmware);
+
+ return send_sfs_cmd(sfs_dev, PSP_SFS_UPDATE);
+}
+
+static long sfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct sfs_user_get_fw_versions __user *sfs_get_fw_versions;
+ struct sfs_user_update_package __user *sfs_update_package;
+ struct psp_device *psp_master = psp_get_master_device();
+ char payload_name[PAYLOAD_NAME_SIZE];
+ struct sfs_device *sfs_dev;
+ int ret = 0;
+
+ if (!psp_master || !psp_master->sfs_data)
+ return -ENODEV;
+
+ sfs_dev = psp_master->sfs_data;
+
+ guard(mutex)(&sfs_ioctl_mutex);
+
+ switch (cmd) {
+ case SFSIOCFWVERS:
+ dev_dbg(sfs_dev->dev, "in SFSIOCFWVERS\n");
+
+ sfs_get_fw_versions = (struct sfs_user_get_fw_versions __user *)arg;
+
+ ret = send_sfs_get_fw_versions(sfs_dev);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_get_fw_versions->blob, sfs_dev->command_buf->sfs_buffer,
+ PAGE_SIZE))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_get_fw_versions->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_get_fw_versions->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_get_fw_versions->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ case SFSIOCUPDATEPKG:
+ dev_dbg(sfs_dev->dev, "in SFSIOCUPDATEPKG\n");
+
+ sfs_update_package = (struct sfs_user_update_package __user *)arg;
+
+ if (copy_from_user(payload_name, sfs_update_package->payload_name,
+ PAYLOAD_NAME_SIZE))
+ return -EFAULT;
+
+ ret = send_sfs_update_package(sfs_dev, payload_name);
+ if (ret && ret != -EIO)
+ return ret;
+
+ /*
+ * Return SFS status and extended status back to userspace
+ * if PSP status indicated success or command error.
+ */
+ if (copy_to_user(&sfs_update_package->sfs_status,
+ &sfs_dev->command_buf->hdr.status,
+ sizeof(sfs_update_package->sfs_status)))
+ return -EFAULT;
+ if (copy_to_user(&sfs_update_package->sfs_extended_status,
+ &sfs_dev->command_buf->buf,
+ sizeof(sfs_update_package->sfs_extended_status)))
+ return -EFAULT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct file_operations sfs_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = sfs_ioctl,
+};
+
+static void sfs_exit(struct kref *ref)
+{
+ misc_deregister(&misc_dev->misc);
+ kfree(misc_dev);
+ misc_dev = NULL;
+}
+
+void sfs_dev_destroy(struct psp_device *psp)
+{
+ struct sfs_device *sfs_dev = psp->sfs_data;
+
+ if (!sfs_dev)
+ return;
+
+ /*
+ * Change SFS command buffer back to the default "Write-Back" type.
+ */
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+ snp_free_hv_fixed_pages(sfs_dev->page);
+
+ if (sfs_dev->misc)
+ kref_put(&misc_dev->refcount, sfs_exit);
+
+ psp->sfs_data = NULL;
+}
+
+/* Based on sev_misc_init() */
+static int sfs_misc_init(struct sfs_device *sfs)
+{
+ struct device *dev = sfs->dev;
+ int ret;
+
+ /*
+ * SFS feature support can be detected on multiple devices but the SFS
+ * FW commands must be issued on the master. During probe, we do not
+ * know the master hence we create /dev/sfs on the first device probe.
+ */
+ if (!misc_dev) {
+ struct miscdevice *misc;
+
+ misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
+ if (!misc_dev)
+ return -ENOMEM;
+
+ misc = &misc_dev->misc;
+ misc->minor = MISC_DYNAMIC_MINOR;
+ misc->name = "sfs";
+ misc->fops = &sfs_fops;
+ misc->mode = 0600;
+
+ ret = misc_register(misc);
+ if (ret)
+ return ret;
+
+ kref_init(&misc_dev->refcount);
+ } else {
+ kref_get(&misc_dev->refcount);
+ }
+
+ sfs->misc = misc_dev;
+ dev_dbg(dev, "registered SFS device\n");
+
+ return 0;
+}
+
+int sfs_dev_init(struct psp_device *psp)
+{
+ struct device *dev = psp->dev;
+ struct sfs_device *sfs_dev;
+ struct page *page;
+ int ret = -ENOMEM;
+
+ sfs_dev = devm_kzalloc(dev, sizeof(*sfs_dev), GFP_KERNEL);
+ if (!sfs_dev)
+ return -ENOMEM;
+
+ /*
+ * Pre-allocate 2MB command buffer for all SFS commands using
+ * SNP HV_Fixed page allocator which also transitions the
+ * SFS command buffer to HV_Fixed page state if SNP is enabled.
+ */
+ page = snp_alloc_hv_fixed_pages(SFS_NUM_2MB_PAGES_CMDBUF);
+ if (!page) {
+ dev_dbg(dev, "Command Buffer HV-Fixed page allocation failed\n");
+ goto cleanup_dev;
+ }
+ sfs_dev->page = page;
+ sfs_dev->command_buf = page_address(page);
+
+ dev_dbg(dev, "Command buffer 0x%px to be marked as HV_Fixed\n", sfs_dev->command_buf);
+
+ /*
+ * SFS command buffer must be mapped as non-cacheable.
+ */
+ ret = set_memory_uc((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+ if (ret) {
+ dev_dbg(dev, "Set memory uc failed\n");
+ goto cleanup_cmd_buf;
+ }
+
+ dev_dbg(dev, "Command buffer 0x%px marked uncacheable\n", sfs_dev->command_buf);
+
+ psp->sfs_data = sfs_dev;
+ sfs_dev->dev = dev;
+ sfs_dev->psp = psp;
+
+ ret = sfs_misc_init(sfs_dev);
+ if (ret)
+ goto cleanup_mem_attr;
+
+ dev_notice(sfs_dev->dev, "SFS support is available\n");
+
+ return 0;
+
+cleanup_mem_attr:
+ set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF);
+
+cleanup_cmd_buf:
+ snp_free_hv_fixed_pages(page);
+
+cleanup_dev:
+ psp->sfs_data = NULL;
+ devm_kfree(dev, sfs_dev);
+
+ return ret;
+}
diff --git a/drivers/crypto/ccp/sfs.h b/drivers/crypto/ccp/sfs.h
new file mode 100644
index 000000000000..97704c210efd
--- /dev/null
+++ b/drivers/crypto/ccp/sfs.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD Platform Security Processor (PSP) Seamless Firmware (SFS) Support.
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@amd.com>
+ */
+
+#ifndef __SFS_H__
+#define __SFS_H__
+
+#include <uapi/linux/psp-sfs.h>
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/psp-sev.h>
+#include <linux/psp-platform-access.h>
+#include <linux/set_memory.h>
+
+#include "psp-dev.h"
+
+struct sfs_misc_dev {
+ struct kref refcount;
+ struct miscdevice misc;
+};
+
+struct sfs_command {
+ struct psp_ext_req_buffer_hdr hdr;
+ u8 buf[PAGE_SIZE - sizeof(struct psp_ext_req_buffer_hdr)];
+ u8 sfs_buffer[];
+} __packed;
+
+struct sfs_device {
+ struct device *dev;
+ struct psp_device *psp;
+
+ struct page *page;
+ struct sfs_command *command_buf;
+
+ struct sfs_misc_dev *misc;
+};
+
+void sfs_dev_destroy(struct psp_device *psp);
+int sfs_dev_init(struct psp_device *psp);
+
+#endif /* __SFS_H__ */
diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
index 6f9d7063257d..1335a83fe052 100644
--- a/drivers/crypto/ccp/sp-dev.h
+++ b/drivers/crypto/ccp/sp-dev.h
@@ -95,7 +95,7 @@ struct sp_device {
struct device *dev;
- struct sp_dev_vdata *dev_vdata;
+ const struct sp_dev_vdata *dev_vdata;
unsigned int ord;
char name[SP_MAX_NAME_LEN];
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index e7bb803912a6..8891ceee1d7d 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -459,6 +459,17 @@ static const struct psp_vdata pspv6 = {
.intsts_reg = 0x10514, /* P2CMSG_INTSTS */
};
+static const struct psp_vdata pspv7 = {
+ .tee = &teev2,
+ .cmdresp_reg = 0x10944, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */
+ .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10510, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */
+};
+
#endif
static const struct sp_dev_vdata dev_vdata[] = {
@@ -525,6 +536,13 @@ static const struct sp_dev_vdata dev_vdata[] = {
.psp_vdata = &pspv6,
#endif
},
+ { /* 9 */
+ .bar = 2,
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+ .psp_vdata = &pspv7,
+#endif
+ },
+
};
static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] },
@@ -539,6 +557,7 @@ static const struct pci_device_id sp_pci_table[] = {
{ PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] },
{ PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] },
{ PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] },
+ { PCI_VDEVICE(AMD, 0x115A), (kernel_ulong_t)&dev_vdata[9] },
/* Last entry must be zero */
{ 0, }
};
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 3933cac1694d..3f9843fa7782 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -52,24 +52,13 @@ static const struct of_device_id sp_of_match[] = {
};
MODULE_DEVICE_TABLE(of, sp_of_match);
-static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
-{
- const struct of_device_id *match;
-
- match = of_match_node(sp_of_match, pdev->dev.of_node);
- if (match && match->data)
- return (struct sp_dev_vdata *)match->data;
-
- return NULL;
-}
-
-static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
+static const struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev)
{
const struct acpi_device_id *match;
match = acpi_match_device(sp_acpi_match, &pdev->dev);
if (match && match->driver_data)
- return (struct sp_dev_vdata *)match->driver_data;
+ return (const struct sp_dev_vdata *)match->driver_data;
return NULL;
}
@@ -123,7 +112,7 @@ static int sp_platform_probe(struct platform_device *pdev)
goto e_err;
sp->dev_specific = sp_platform;
- sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev)
+ sp->dev_vdata = pdev->dev.of_node ? of_device_get_match_data(&pdev->dev)
: sp_get_acpi_version(pdev);
if (!sp->dev_vdata) {
ret = -ENODEV;
diff --git a/drivers/crypto/ccree/cc_buffer_mgr.c b/drivers/crypto/ccree/cc_buffer_mgr.c
index 3963bb91321f..dc7e0cd51c25 100644
--- a/drivers/crypto/ccree/cc_buffer_mgr.c
+++ b/drivers/crypto/ccree/cc_buffer_mgr.c
@@ -1235,6 +1235,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
int rc = 0;
u32 dummy = 0;
u32 mapped_nents = 0;
+ int sg_nents;
dev_dbg(dev, " update params : curr_buff=%p curr_buff_cnt=0x%X nbytes=0x%X src=%p curr_index=%u\n",
curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index);
@@ -1248,7 +1249,10 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
if (total_in_len < block_size) {
dev_dbg(dev, " less than one block: curr_buff=%p *curr_buff_cnt=0x%X copy_to=%p\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
- areq_ctx->in_nents = sg_nents_for_len(src, nbytes);
+ sg_nents = sg_nents_for_len(src, nbytes);
+ if (sg_nents < 0)
+ return sg_nents;
+ areq_ctx->in_nents = sg_nents;
sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes;
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 5dd3f6a4781a..37294bb74003 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -4,9 +4,9 @@ config CRYPTO_DEV_CHELSIO
depends on CHELSIO_T4
select CRYPTO_LIB_AES
select CRYPTO_LIB_GF128MUL
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select CRYPTO_AUTHENC
help
The Chelsio Crypto Co-processor driver for T6 adapters.
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index be21e4e2016c..22cbc343198a 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -51,7 +51,6 @@
#include <crypto/aes.h>
#include <crypto/algapi.h>
-#include <crypto/hash.h>
#include <crypto/gcm.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
@@ -277,88 +276,60 @@ static void get_aes_decrypt_key(unsigned char *dec_key,
}
}
-static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
+static int chcr_prepare_hmac_key(const u8 *raw_key, unsigned int raw_key_len,
+ int digestsize, void *istate, void *ostate)
{
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
-
- switch (ds) {
+ __be32 *istate32 = istate, *ostate32 = ostate;
+ __be64 *istate64 = istate, *ostate64 = ostate;
+ union {
+ struct hmac_sha1_key sha1;
+ struct hmac_sha224_key sha224;
+ struct hmac_sha256_key sha256;
+ struct hmac_sha384_key sha384;
+ struct hmac_sha512_key sha512;
+ } k;
+
+ switch (digestsize) {
case SHA1_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha1", 0, 0);
+ hmac_sha1_preparekey(&k.sha1, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha1.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha1.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha1.ostate.h[i]);
+ }
break;
case SHA224_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha224", 0, 0);
+ hmac_sha224_preparekey(&k.sha224, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha224.key.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha224.key.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha224.key.ostate.h[i]);
+ }
break;
case SHA256_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha256", 0, 0);
+ hmac_sha256_preparekey(&k.sha256, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha256.key.istate.h); i++) {
+ istate32[i] = cpu_to_be32(k.sha256.key.istate.h[i]);
+ ostate32[i] = cpu_to_be32(k.sha256.key.ostate.h[i]);
+ }
break;
case SHA384_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha384", 0, 0);
+ hmac_sha384_preparekey(&k.sha384, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha384.key.istate.h); i++) {
+ istate64[i] = cpu_to_be64(k.sha384.key.istate.h[i]);
+ ostate64[i] = cpu_to_be64(k.sha384.key.ostate.h[i]);
+ }
break;
case SHA512_DIGEST_SIZE:
- base_hash = crypto_alloc_shash("sha512", 0, 0);
+ hmac_sha512_preparekey(&k.sha512, raw_key, raw_key_len);
+ for (int i = 0; i < ARRAY_SIZE(k.sha512.key.istate.h); i++) {
+ istate64[i] = cpu_to_be64(k.sha512.key.istate.h[i]);
+ ostate64[i] = cpu_to_be64(k.sha512.key.ostate.h[i]);
+ }
break;
+ default:
+ return -EINVAL;
}
-
- return base_hash;
-}
-
-static int chcr_compute_partial_hash(struct shash_desc *desc,
- char *iopad, char *result_hash,
- int digest_size)
-{
- struct sha1_state sha1_st;
- struct sha256_state sha256_st;
- struct sha512_state sha512_st;
- int error;
-
- if (digest_size == SHA1_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha1_st);
- memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
- } else if (digest_size == SHA224_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
-
- } else if (digest_size == SHA256_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha256_st);
- memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
-
- } else if (digest_size == SHA384_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
-
- } else if (digest_size == SHA512_DIGEST_SIZE) {
- error = crypto_shash_init(desc) ?:
- crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
- crypto_shash_export_core(desc, &sha512_st);
- memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
- } else {
- error = -EINVAL;
- pr_err("Unknown digest size %d\n", digest_size);
- }
- return error;
-}
-
-static void chcr_change_order(char *buf, int ds)
-{
- int i;
-
- if (ds == SHA512_DIGEST_SIZE) {
- for (i = 0; i < (ds / sizeof(u64)); i++)
- *((__be64 *)buf + i) =
- cpu_to_be64(*((u64 *)buf + i));
- } else {
- for (i = 0; i < (ds / sizeof(u32)); i++)
- *((__be32 *)buf + i) =
- cpu_to_be32(*((u32 *)buf + i));
- }
+ memzero_explicit(&k, sizeof(k));
+ return 0;
}
static inline int is_hmac(struct crypto_tfm *tfm)
@@ -1547,11 +1518,6 @@ static int get_alg_config(struct algo_param *params,
return 0;
}
-static inline void chcr_free_shash(struct crypto_shash *base_hash)
-{
- crypto_free_shash(base_hash);
-}
-
/**
* create_hash_wr - Create hash work request
* @req: Cipher req base
@@ -2202,53 +2168,13 @@ static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
- unsigned int digestsize = crypto_ahash_digestsize(tfm);
- unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
- unsigned int i, err = 0, updated_digestsize;
-
- SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
/* use the key to calculate the ipad and opad. ipad will sent with the
* first request's data. opad will be sent with the final hash result
* ipad in hmacctx->ipad and opad in hmacctx->opad location
*/
- shash->tfm = hmacctx->base_hash;
- if (keylen > bs) {
- err = crypto_shash_digest(shash, key, keylen,
- hmacctx->ipad);
- if (err)
- goto out;
- keylen = digestsize;
- } else {
- memcpy(hmacctx->ipad, key, keylen);
- }
- memset(hmacctx->ipad + keylen, 0, bs - keylen);
- unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs,
- "fortified memcpy causes -Wrestrict warning");
-
- for (i = 0; i < bs / sizeof(int); i++) {
- *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
- *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
- }
-
- updated_digestsize = digestsize;
- if (digestsize == SHA224_DIGEST_SIZE)
- updated_digestsize = SHA256_DIGEST_SIZE;
- else if (digestsize == SHA384_DIGEST_SIZE)
- updated_digestsize = SHA512_DIGEST_SIZE;
- err = chcr_compute_partial_hash(shash, hmacctx->ipad,
- hmacctx->ipad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->ipad, updated_digestsize);
-
- err = chcr_compute_partial_hash(shash, hmacctx->opad,
- hmacctx->opad, digestsize);
- if (err)
- goto out;
- chcr_change_order(hmacctx->opad, updated_digestsize);
-out:
- return err;
+ return chcr_prepare_hmac_key(key, keylen, crypto_ahash_digestsize(tfm),
+ hmacctx->ipad, hmacctx->opad);
}
static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
@@ -2344,30 +2270,11 @@ static int chcr_hmac_init(struct ahash_request *areq)
static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
- unsigned int digestsize =
- crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct chcr_ahash_req_ctx));
- hmacctx->base_hash = chcr_alloc_shash(digestsize);
- if (IS_ERR(hmacctx->base_hash))
- return PTR_ERR(hmacctx->base_hash);
return chcr_device_init(crypto_tfm_ctx(tfm));
}
-static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
-{
- struct chcr_context *ctx = crypto_tfm_ctx(tfm);
- struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
-
- if (hmacctx->base_hash) {
- chcr_free_shash(hmacctx->base_hash);
- hmacctx->base_hash = NULL;
- }
-}
-
inline void chcr_aead_common_exit(struct aead_request *req)
{
struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req);
@@ -3557,15 +3464,12 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
/* it contains auth and cipher key both*/
struct crypto_authenc_keys keys;
- unsigned int bs, subtype;
+ unsigned int subtype;
unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
- int err = 0, i, key_ctx_len = 0;
+ int err = 0, key_ctx_len = 0;
unsigned char ck_size = 0;
- unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
- struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
struct algo_param param;
int align;
- u8 *o_ptr = NULL;
crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
@@ -3613,68 +3517,26 @@ static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
aeadctx->enckey_len << 3);
}
- base_hash = chcr_alloc_shash(max_authsize);
- if (IS_ERR(base_hash)) {
- pr_err("Base driver cannot be loaded\n");
+
+ align = KEYCTX_ALIGN_PAD(max_authsize);
+ err = chcr_prepare_hmac_key(keys.authkey, keys.authkeylen, max_authsize,
+ actx->h_iopad,
+ actx->h_iopad + param.result_size + align);
+ if (err)
goto out;
- }
- {
- SHASH_DESC_ON_STACK(shash, base_hash);
-
- shash->tfm = base_hash;
- bs = crypto_shash_blocksize(base_hash);
- align = KEYCTX_ALIGN_PAD(max_authsize);
- o_ptr = actx->h_iopad + param.result_size + align;
-
- if (keys.authkeylen > bs) {
- err = crypto_shash_digest(shash, keys.authkey,
- keys.authkeylen,
- o_ptr);
- if (err) {
- pr_err("Base driver cannot be loaded\n");
- goto out;
- }
- keys.authkeylen = max_authsize;
- } else
- memcpy(o_ptr, keys.authkey, keys.authkeylen);
-
- /* Compute the ipad-digest*/
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= IPAD_DATA;
-
- if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
- max_authsize))
- goto out;
- /* Compute the opad-digest */
- memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
- memcpy(pad, o_ptr, keys.authkeylen);
- for (i = 0; i < bs >> 2; i++)
- *((unsigned int *)pad + i) ^= OPAD_DATA;
- if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
- goto out;
+ key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16) +
+ (param.result_size + align) * 2;
+ aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 0, 1,
+ key_ctx_len >> 4);
+ actx->auth_mode = param.auth_mode;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return 0;
- /* convert the ipad and opad digest to network order */
- chcr_change_order(actx->h_iopad, param.result_size);
- chcr_change_order(o_ptr, param.result_size);
- key_ctx_len = sizeof(struct _key_ctx) +
- roundup(keys.enckeylen, 16) +
- (param.result_size + align) * 2;
- aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
- 0, 1, key_ctx_len >> 4);
- actx->auth_mode = param.auth_mode;
- chcr_free_shash(base_hash);
-
- memzero_explicit(&keys, sizeof(keys));
- return 0;
- }
out:
aeadctx->enckey_len = 0;
memzero_explicit(&keys, sizeof(keys));
- if (!IS_ERR(base_hash))
- chcr_free_shash(base_hash);
return -EINVAL;
}
@@ -4490,7 +4352,6 @@ static int chcr_register_alg(void)
if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
a_hash->halg.base.cra_init = chcr_hmac_cra_init;
- a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
a_hash->init = chcr_hmac_init;
a_hash->setkey = chcr_ahash_setkey;
a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 1d693b8436e6..e1e79e5f01e7 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -241,7 +241,6 @@ struct chcr_aead_ctx {
};
struct hmac_ctx {
- struct crypto_shash *base_hash;
u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128];
u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128];
};
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 925991526745..edf36f6add52 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -913,11 +913,10 @@ static void hifn_init_pll(struct hifn_device *dev)
else
pllcfg |= HIFN_PLL_REF_CLK_HBI;
- if (hifn_pll_ref[3] != '\0')
- freq = simple_strtoul(hifn_pll_ref + 3, NULL, 10);
- else {
+ if (hifn_pll_ref[3] == '\0' ||
+ kstrtouint(hifn_pll_ref + 3, 10, &freq)) {
freq = 66;
- dev_info(&dev->pdev->dev, "assuming %uMHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n",
+ dev_info(&dev->pdev->dev, "assuming %u MHz clock speed, override with hifn_pll_ref=%.3s<frequency>\n",
freq, hifn_pll_ref);
}
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 4137a8bf131f..4835bdebdbb3 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -69,7 +69,6 @@ config CRYPTO_DEV_HISI_HPRE
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
select CRYPTO_RSA
- select CRYPTO_CURVE25519
select CRYPTO_ECDH
help
Support for HiSilicon HPRE(High Performance RSA Engine)
diff --git a/drivers/crypto/hisilicon/debugfs.c b/drivers/crypto/hisilicon/debugfs.c
index 45e130b901eb..17eb236e9ee4 100644
--- a/drivers/crypto/hisilicon/debugfs.c
+++ b/drivers/crypto/hisilicon/debugfs.c
@@ -888,6 +888,7 @@ static int qm_diff_regs_init(struct hisi_qm *qm,
dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
ret = PTR_ERR(qm->debug.acc_diff_regs);
qm->debug.acc_diff_regs = NULL;
+ qm->debug.qm_diff_regs = NULL;
return ret;
}
diff --git a/drivers/crypto/hisilicon/hpre/hpre_crypto.c b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
index 1550c3818383..21ccf879f70c 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_crypto.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_crypto.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 HiSilicon Limited. */
#include <crypto/akcipher.h>
-#include <crypto/curve25519.h>
#include <crypto/dh.h>
#include <crypto/ecc_curve.h>
#include <crypto/ecdh.h>
@@ -106,16 +105,6 @@ struct hpre_ecdh_ctx {
dma_addr_t dma_g;
};
-struct hpre_curve25519_ctx {
- /* low address: p->a->k */
- unsigned char *p;
- dma_addr_t dma_p;
-
- /* gx coordinate */
- unsigned char *g;
- dma_addr_t dma_g;
-};
-
struct hpre_ctx {
struct hisi_qp *qp;
struct device *dev;
@@ -129,7 +118,6 @@ struct hpre_ctx {
struct hpre_rsa_ctx rsa;
struct hpre_dh_ctx dh;
struct hpre_ecdh_ctx ecdh;
- struct hpre_curve25519_ctx curve25519;
};
/* for ecc algorithms */
unsigned int curve_id;
@@ -146,7 +134,6 @@ struct hpre_asym_request {
struct akcipher_request *rsa;
struct kpp_request *dh;
struct kpp_request *ecdh;
- struct kpp_request *curve25519;
} areq;
int err;
int req_id;
@@ -1214,8 +1201,7 @@ static void hpre_key_to_big_end(u8 *data, int len)
}
}
-static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
- bool is_ecdh)
+static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
{
struct device *dev = ctx->dev;
unsigned int sz = ctx->key_sz;
@@ -1224,17 +1210,11 @@ static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
if (is_clear_all)
hisi_qm_stop_qp(ctx->qp);
- if (is_ecdh && ctx->ecdh.p) {
+ if (ctx->ecdh.p) {
/* ecdh: p->a->k->b */
memzero_explicit(ctx->ecdh.p + shift, sz);
dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
ctx->ecdh.p = NULL;
- } else if (!is_ecdh && ctx->curve25519.p) {
- /* curve25519: p->a->k */
- memzero_explicit(ctx->curve25519.p + shift, sz);
- dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
- ctx->curve25519.dma_p);
- ctx->curve25519.p = NULL;
}
hpre_ctx_clear(ctx, is_clear_all);
@@ -1432,7 +1412,7 @@ static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
return -EINVAL;
}
- hpre_ecc_clear_ctx(ctx, false, true);
+ hpre_ecc_clear_ctx(ctx, false);
ret = hpre_ecdh_set_param(ctx, &params);
if (ret < 0) {
@@ -1683,337 +1663,7 @@ static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
{
struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, true);
-}
-
-static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- u8 secret[CURVE25519_KEY_SIZE] = { 0 };
- unsigned int sz = ctx->key_sz;
- const struct ecc_curve *curve;
- unsigned int shift = sz << 1;
- void *p;
-
- /*
- * The key from 'buf' is in little-endian, we should preprocess it as
- * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
- * then convert it to big endian. Only in this way, the result can be
- * the same as the software curve-25519 that exists in crypto.
- */
- memcpy(secret, buf, len);
- curve25519_clamp_secret(secret);
- hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
-
- p = ctx->curve25519.p + sz - len;
-
- curve = ecc_get_curve25519();
-
- /* fill curve parameters */
- fill_curve_param(p, curve->p, len, curve->g.ndigits);
- fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
- memcpy(p + shift, secret, len);
- fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
- memzero_explicit(secret, CURVE25519_KEY_SIZE);
-}
-
-static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
-{
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
-
- /* p->a->k->gx */
- if (!ctx->curve25519.p) {
- ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
- &ctx->curve25519.dma_p,
- GFP_KERNEL);
- if (!ctx->curve25519.p)
- return -ENOMEM;
- }
-
- ctx->curve25519.g = ctx->curve25519.p + shift + sz;
- ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
-
- hpre_curve25519_fill_curve(ctx, buf, len);
-
- return 0;
-}
-
-static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- int ret = -EINVAL;
-
- if (len != CURVE25519_KEY_SIZE ||
- !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "key is null or key len is not 32bytes!\n");
- return ret;
- }
-
- /* Free old secret if any */
- hpre_ecc_clear_ctx(ctx, false, false);
-
- ctx->key_sz = CURVE25519_KEY_SIZE;
- ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
- if (ret) {
- dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
- hpre_ecc_clear_ctx(ctx, false, false);
- return ret;
- }
-
- return 0;
-}
-
-static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
-{
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
-
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, dma);
-
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
-
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
-}
-
-static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
-{
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
-
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.curve25519;
- areq->dst_len = ctx->key_sz;
-
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
-
- /* Do unmap before data processing */
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
-
- hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
-
- kpp_request_complete(areq, ret);
-
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
-}
-
-static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
-{
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
-
- if (unlikely(req->dst_len < ctx->key_sz)) {
- req->dst_len = ctx->key_sz;
- return -EINVAL;
- }
-
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_curve25519_cb;
- h_req->areq.curve25519 = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->curve25519.dma_p);
-
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
-
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
-
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
-}
-
-static void hpre_curve25519_src_modulo_p(u8 *ptr)
-{
- int i;
-
- for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
- ptr[i] = 0;
-
- /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
- ptr[i] -= 0xed;
-}
-
-static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- u8 p[CURVE25519_KEY_SIZE] = { 0 };
- const struct ecc_curve *curve;
- dma_addr_t dma = 0;
- u8 *ptr;
-
- if (len != CURVE25519_KEY_SIZE) {
- dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
- return -EINVAL;
- }
-
- ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
-
- scatterwalk_map_and_copy(ptr, data, 0, len, 0);
-
- if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "gx is null!\n");
- goto err;
- }
-
- /*
- * Src_data(gx) is in little-endian order, MSB in the final byte should
- * be masked as described in RFC7748, then transform it to big-endian
- * form, then hisi_hpre can use the data.
- */
- ptr[31] &= 0x7f;
- hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
-
- curve = ecc_get_curve25519();
-
- fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
-
- /*
- * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
- * we get its modulus to p, and then use it.
- */
- if (memcmp(ptr, p, ctx->key_sz) == 0) {
- dev_err(dev, "gx is p!\n");
- goto err;
- } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
- hpre_curve25519_src_modulo_p(ptr);
- }
-
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
-
-err:
- dma_free_coherent(dev, ctx->key_sz, ptr, dma);
- return -EINVAL;
-}
-
-static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
-{
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
-
- if (!data || !sg_is_last(data) || len != ctx->key_sz) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
-
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
-
- msg->out = cpu_to_le64(dma);
- return 0;
-}
-
-static int hpre_curve25519_compute_value(struct kpp_request *req)
-{
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
-
- ret = hpre_curve25519_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
- return ret;
- }
-
- if (req->src) {
- ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n",
- ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->curve25519.dma_g);
- }
-
- ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
-
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
-
-clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
-}
-
-static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- return ctx->key_sz;
-}
-
-static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
-
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
-}
-
-static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
-{
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
-
- hpre_ecc_clear_ctx(ctx, true, false);
+ hpre_ecc_clear_ctx(ctx, true);
}
static struct akcipher_alg rsa = {
@@ -2095,22 +1745,6 @@ static struct kpp_alg ecdh_curves[] = {
}
};
-static struct kpp_alg curve25519_alg = {
- .set_secret = hpre_curve25519_set_secret,
- .generate_public_key = hpre_curve25519_compute_value,
- .compute_shared_secret = hpre_curve25519_compute_value,
- .max_size = hpre_curve25519_max_size,
- .init = hpre_curve25519_init_tfm,
- .exit = hpre_curve25519_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "curve25519",
- .cra_driver_name = "hpre-curve25519",
- .cra_module = THIS_MODULE,
- },
-};
-
static int hpre_register_rsa(struct hisi_qm *qm)
{
int ret;
@@ -2192,28 +1826,6 @@ static void hpre_unregister_ecdh(struct hisi_qm *qm)
crypto_unregister_kpp(&ecdh_curves[i]);
}
-static int hpre_register_x25519(struct hisi_qm *qm)
-{
- int ret;
-
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return 0;
-
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
-
- return ret;
-}
-
-static void hpre_unregister_x25519(struct hisi_qm *qm)
-{
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return;
-
- crypto_unregister_kpp(&curve25519_alg);
-}
-
int hpre_algs_register(struct hisi_qm *qm)
{
int ret = 0;
@@ -2236,17 +1848,11 @@ int hpre_algs_register(struct hisi_qm *qm)
if (ret)
goto unreg_dh;
- ret = hpre_register_x25519(qm);
- if (ret)
- goto unreg_ecdh;
-
hpre_available_devs++;
mutex_unlock(&hpre_algs_lock);
return ret;
-unreg_ecdh:
- hpre_unregister_ecdh(qm);
unreg_dh:
hpre_unregister_dh(qm);
unreg_rsa:
@@ -2262,7 +1868,6 @@ void hpre_algs_unregister(struct hisi_qm *qm)
if (--hpre_available_devs)
goto unlock;
- hpre_unregister_x25519(qm);
hpre_unregister_ecdh(qm);
hpre_unregister_dh(qm);
hpre_unregister_rsa(qm);
diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c
index f5b47e5ff48a..b94fecd765ee 100644
--- a/drivers/crypto/hisilicon/hpre/hpre_main.c
+++ b/drivers/crypto/hisilicon/hpre/hpre_main.c
@@ -39,6 +39,7 @@
#define HPRE_HAC_RAS_NFE_ENB 0x301414
#define HPRE_HAC_RAS_FE_ENB 0x301418
#define HPRE_HAC_INT_SET 0x301500
+#define HPRE_AXI_ERROR_MASK GENMASK(21, 10)
#define HPRE_RNG_TIMEOUT_NUM 0x301A34
#define HPRE_CORE_INT_ENABLE 0
#define HPRE_RDCHN_INI_ST 0x301a00
@@ -78,6 +79,11 @@
#define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
#define HPRE_PREFETCH_DISABLE BIT(30)
#define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
+#define HPRE_SVA_PREFTCH_DFX4 0x301144
+#define HPRE_WAIT_SVA_READY 500000
+#define HPRE_READ_SVA_STATUS_TIMES 3
+#define HPRE_WAIT_US_MIN 10
+#define HPRE_WAIT_US_MAX 20
/* clock gate */
#define HPRE_CLKGATE_CTL 0x301a10
@@ -466,6 +472,33 @@ struct hisi_qp *hpre_create_qp(u8 type)
return NULL;
}
+static int hpre_wait_sva_ready(struct hisi_qm *qm)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + HPRE_SVA_PREFTCH_DFX4);
+ if (val)
+ count = 0;
+ else if (++count == HPRE_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HPRE_WAIT_US_MIN, HPRE_WAIT_US_MAX);
+ } while (++try_times < HPRE_WAIT_SVA_READY);
+
+ if (try_times == HPRE_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static void hpre_config_pasid(struct hisi_qm *qm)
{
u32 val1, val2;
@@ -563,7 +596,7 @@ static void disable_flr_of_bme(struct hisi_qm *qm)
writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
}
-static void hpre_open_sva_prefetch(struct hisi_qm *qm)
+static void hpre_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -571,20 +604,21 @@ static void hpre_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val &= HPRE_PREFETCH_ENABLE;
+ val |= HPRE_PREFETCH_DISABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
- val, !(val & HPRE_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
+ val, !(val & HPRE_SVA_DISABLE_READY),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hpre_wait_sva_ready(qm);
}
-static void hpre_close_sva_prefetch(struct hisi_qm *qm)
+static void hpre_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -592,16 +626,24 @@ static void hpre_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
- val |= HPRE_PREFETCH_DISABLE;
+ val &= HPRE_PREFETCH_ENABLE;
writel(val, qm->io_base + HPRE_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
- val, !(val & HPRE_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
+ val, !(val & HPRE_PREFETCH_DISABLE),
HPRE_REG_RD_INTVRL_US,
HPRE_REG_RD_TMOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hpre_wait_sva_ready(qm);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hpre_close_sva_prefetch(qm);
}
static void hpre_enable_clock_gate(struct hisi_qm *qm)
@@ -721,6 +763,7 @@ static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
/* Config data buffer pasid needed by Kunpeng 920 */
hpre_config_pasid(qm);
+ hpre_open_sva_prefetch(qm);
hpre_enable_clock_gate(qm);
@@ -756,8 +799,7 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
if (enable) {
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -771,38 +813,33 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hpre_hw_error_disable(struct hisi_qm *qm)
{
- u32 ce, nfe;
-
- ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* disable hpre hw error interrupts */
- writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
+ writel(err_mask, qm->io_base + HPRE_INT_MASK);
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, false);
}
static void hpre_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe, err_en;
-
- ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* clear HPRE hw error source if having */
- writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
+ writel(err_mask, qm->io_base + HPRE_HAC_SOURCE_INT);
/* configure error type */
- writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
- writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
- writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
+ writel(dev_err->ce, qm->io_base + HPRE_RAS_CE_ENB);
+ writel(dev_err->nfe, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(dev_err->fe, qm->io_base + HPRE_RAS_FE_ENB);
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
hpre_master_ooo_ctrl(qm, true);
/* enable hpre hw error interrupts */
- err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
- writel(~err_en, qm->io_base + HPRE_INT_MASK);
+ writel(~err_mask, qm->io_base + HPRE_INT_MASK);
}
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -1171,7 +1208,7 @@ static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(hpre_cap_query_info);
- hpre_cap = devm_kzalloc(dev, sizeof(*hpre_cap) * size, GFP_KERNEL);
+ hpre_cap = devm_kcalloc(dev, size, sizeof(*hpre_cap), GFP_KERNEL);
if (!hpre_cap)
return -ENOMEM;
@@ -1357,12 +1394,20 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
}
+static void hpre_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + HPRE_RAS_NFE_ENB);
+ writel(ce_mask, qm->io_base + HPRE_RAS_CE_ENB);
+}
+
static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 value;
@@ -1380,16 +1425,18 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
err_status = hpre_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
hpre_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
hpre_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
hpre_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hpre_enable_error_report(qm);
}
return ACC_ERR_RECOVERED;
@@ -1400,28 +1447,64 @@ static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = hpre_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return false;
}
+static void hpre_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+ u32 val;
+
+ val = ~(err_mask & (~HPRE_AXI_ERROR_MASK));
+ writel(val, qm->io_base + HPRE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~HPRE_AXI_ERROR_MASK),
+ qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+}
+
+static void hpre_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(HPRE_AXI_ERROR_MASK, qm->io_base + HPRE_HAC_SOURCE_INT);
+
+ writel(~err_mask, qm->io_base + HPRE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
+}
+
static void hpre_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = HPRE_HAC_RAS_FE_ENABLE;
+ qm_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+
+ dev_err->fe = HPRE_HAC_RAS_FE_ENABLE;
+ dev_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
+ HPRE_RESET_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
- err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
- err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
- HPRE_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
err_info->acpi_rst = "HRST";
}
@@ -1439,6 +1522,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
.err_info_init = hpre_err_info_init,
.get_err_result = hpre_get_err_result,
.dev_is_abnormal = hpre_dev_is_abnormal,
+ .disable_axi_error = hpre_disable_axi_error,
+ .enable_axi_error = hpre_enable_axi_error,
};
static int hpre_pf_probe_init(struct hpre *hpre)
@@ -1450,8 +1535,6 @@ static int hpre_pf_probe_init(struct hpre *hpre)
if (ret)
return ret;
- hpre_open_sva_prefetch(qm);
-
hisi_qm_dev_err_init(qm);
ret = hpre_show_last_regs_init(qm);
if (ret)
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 2e4ee7ecfdfb..f8bfff5dd0bd 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -45,6 +45,8 @@
#define QM_SQ_TYPE_MASK GENMASK(3, 0)
#define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
+#define QM_SQC_DISABLE_QP (1U << 6)
+#define QM_XQC_RANDOM_DATA 0xaaaa
/* cqc shift */
#define QM_CQ_HOP_NUM_SHIFT 0
@@ -62,10 +64,10 @@
#define QM_EQE_AEQE_SIZE (2UL << 12)
#define QM_EQC_PHASE_SHIFT 16
-#define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
+#define QM_EQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
#define QM_EQE_CQN_MASK GENMASK(15, 0)
-#define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
+#define QM_AEQE_PHASE(dw0) (((dw0) >> 16) & 0x1)
#define QM_AEQE_TYPE_SHIFT 17
#define QM_AEQE_TYPE_MASK 0xf
#define QM_AEQE_CQN_MASK GENMASK(15, 0)
@@ -145,9 +147,9 @@
#define QM_RAS_CE_TIMES_PER_IRQ 1
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
#define QM_AXI_RRESP_ERR BIT(0)
-#define QM_ECC_MBIT BIT(2)
#define QM_DB_TIMEOUT BIT(10)
#define QM_OF_FIFO_OF BIT(11)
+#define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12))
#define QM_RESET_WAIT_TIMEOUT 400
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -163,7 +165,6 @@
#define ACC_MASTER_TRANS_RETURN 0x300150
#define ACC_MASTER_GLOBAL_CTRL 0x300000
#define ACC_AM_CFG_PORT_WR_EN 0x30001c
-#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
#define ACC_AM_ROB_ECC_INT_STS 0x300104
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
#define QM_MSI_CAP_ENABLE BIT(16)
@@ -520,7 +521,7 @@ static bool qm_check_dev_error(struct hisi_qm *qm)
return false;
err_status = qm_get_hw_error_status(pf_qm);
- if (err_status & pf_qm->err_info.qm_shutdown_mask)
+ if (err_status & pf_qm->err_info.qm_err.shutdown_mask)
return true;
if (pf_qm->err_ini->dev_is_abnormal)
@@ -975,23 +976,23 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
{
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qm_poll_data *poll_data = NULL;
+ u32 dw0 = le32_to_cpu(eqe->dw0);
u16 eq_depth = qm->eq_depth;
u16 cqn, eqe_num = 0;
- if (QM_EQE_PHASE(eqe) != qm->status.eqc_phase) {
+ if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
atomic64_inc(&qm->debug.dfx.err_irq_cnt);
qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
return;
}
- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
+ cqn = dw0 & QM_EQE_CQN_MASK;
if (unlikely(cqn >= qm->qp_num))
return;
poll_data = &qm->poll_data[cqn];
- while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
- cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
- poll_data->qp_finish_id[eqe_num] = cqn;
+ while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
+ poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK;
eqe_num++;
if (qm->status.eq_head == eq_depth - 1) {
@@ -1005,6 +1006,8 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
if (eqe_num == (eq_depth >> 1) - 1)
break;
+
+ dw0 = le32_to_cpu(eqe->dw0);
}
poll_data->eqe_num = eqe_num;
@@ -1097,15 +1100,15 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
{
struct hisi_qm *qm = data;
struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
+ u32 dw0 = le32_to_cpu(aeqe->dw0);
u16 aeq_depth = qm->aeq_depth;
u32 type, qp_id;
atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
- while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
- type = (le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT) &
- QM_AEQE_TYPE_MASK;
- qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
+ while (QM_AEQE_PHASE(dw0) == qm->status.aeqc_phase) {
+ type = (dw0 >> QM_AEQE_TYPE_SHIFT) & QM_AEQE_TYPE_MASK;
+ qp_id = dw0 & QM_AEQE_CQN_MASK;
switch (type) {
case QM_EQ_OVERFLOW:
@@ -1133,6 +1136,7 @@ static irqreturn_t qm_aeq_thread(int irq, void *data)
aeqe++;
qm->status.aeq_head++;
}
+ dw0 = le32_to_cpu(aeqe->dw0);
}
qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
@@ -1282,6 +1286,13 @@ static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
(factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
}
break;
+ /*
+ * Note: The current logic only needs to handle the above three types
+ * If new types are added, they need to be supplemented here,
+ * otherwise undefined behavior may occur.
+ */
+ default:
+ break;
}
}
@@ -1395,17 +1406,17 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm)
static void qm_hw_error_cfg(struct hisi_qm *qm)
{
- struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
- qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
+ qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe;
/* clear QM hw residual error source */
writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* configure error type */
- writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
- writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
+ writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE);
}
static void qm_hw_error_init_v2(struct hisi_qm *qm)
@@ -1434,7 +1445,7 @@ static void qm_hw_error_init_v3(struct hisi_qm *qm)
qm_hw_error_cfg(qm);
/* enable close master ooo when hardware error happened */
- writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+ writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
irq_unmask = ~qm->error_mask;
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -1496,6 +1507,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
{
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
u32 error_status;
error_status = qm_get_hw_error_status(qm);
@@ -1504,17 +1516,16 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
qm->err_status.is_qm_ecc_mbit = true;
qm_log_hw_error(qm, error_status);
- if (error_status & qm->err_info.qm_reset_mask) {
+ if (error_status & qm_err->reset_mask) {
/* Disable the same error reporting until device is recovered. */
- writel(qm->err_info.nfe & (~error_status),
- qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE);
return ACC_ERR_NEED_RESET;
}
/* Clear error source if not need reset. */
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
- writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
- writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
+ writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
+ writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
}
return ACC_ERR_RECOVERED;
@@ -2651,10 +2662,10 @@ static int qm_hw_err_isolate(struct hisi_qm *qm)
}
}
list_add(&hw_err->list, &isolate->qm_hw_errs);
- mutex_unlock(&isolate->isolate_lock);
if (count >= isolate->err_threshold)
isolate->is_isolate = true;
+ mutex_unlock(&isolate->isolate_lock);
return 0;
}
@@ -2663,12 +2674,10 @@ static void qm_hw_err_destroy(struct hisi_qm *qm)
{
struct qm_hw_err *err, *tmp;
- mutex_lock(&qm->isolate_data.isolate_lock);
list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
list_del(&err->list);
kfree(err);
}
- mutex_unlock(&qm->isolate_data.isolate_lock);
}
static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
@@ -2696,10 +2705,12 @@ static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
if (qm->isolate_data.is_isolate)
return -EPERM;
+ mutex_lock(&qm->isolate_data.isolate_lock);
qm->isolate_data.err_threshold = num;
/* After the policy is updated, need to reset the hardware err list */
qm_hw_err_destroy(qm);
+ mutex_unlock(&qm->isolate_data.isolate_lock);
return 0;
}
@@ -2736,12 +2747,36 @@ static void qm_remove_uacce(struct hisi_qm *qm)
struct uacce_device *uacce = qm->uacce;
if (qm->use_sva) {
+ mutex_lock(&qm->isolate_data.isolate_lock);
qm_hw_err_destroy(qm);
+ mutex_unlock(&qm->isolate_data.isolate_lock);
+
uacce_remove(uacce);
qm->uacce = NULL;
}
}
+static void qm_uacce_api_ver_init(struct hisi_qm *qm)
+{
+ struct uacce_device *uacce = qm->uacce;
+
+ switch (qm->ver) {
+ case QM_HW_V1:
+ uacce->api_ver = HISI_QM_API_VER_BASE;
+ break;
+ case QM_HW_V2:
+ uacce->api_ver = HISI_QM_API_VER2_BASE;
+ break;
+ case QM_HW_V3:
+ case QM_HW_V4:
+ uacce->api_ver = HISI_QM_API_VER3_BASE;
+ break;
+ default:
+ uacce->api_ver = HISI_QM_API_VER5_BASE;
+ break;
+ }
+}
+
static int qm_alloc_uacce(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2776,13 +2811,6 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->priv = qm;
if (qm->ver == QM_HW_V1)
- uacce->api_ver = HISI_QM_API_VER_BASE;
- else if (qm->ver == QM_HW_V2)
- uacce->api_ver = HISI_QM_API_VER2_BASE;
- else
- uacce->api_ver = HISI_QM_API_VER3_BASE;
-
- if (qm->ver == QM_HW_V1)
mmio_page_nr = QM_DOORBELL_PAGE_NR;
else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps))
mmio_page_nr = QM_DOORBELL_PAGE_NR +
@@ -2801,6 +2829,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
qm->uacce = uacce;
+ qm_uacce_api_ver_init(qm);
INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
mutex_init(&qm->isolate_data.isolate_lock);
@@ -3003,11 +3032,36 @@ static void qm_put_pci_res(struct hisi_qm *qm)
pci_release_mem_regions(pdev);
}
+static void hisi_mig_region_clear(struct hisi_qm *qm)
+{
+ u32 val;
+
+ /* Clear migration region set of PF */
+ if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) {
+ val = readl(qm->io_base + QM_MIG_REGION_SEL);
+ val &= ~QM_MIG_REGION_EN;
+ writel(val, qm->io_base + QM_MIG_REGION_SEL);
+ }
+}
+
+static void hisi_mig_region_enable(struct hisi_qm *qm)
+{
+ u32 val;
+
+ /* Select migration region of PF */
+ if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V3) {
+ val = readl(qm->io_base + QM_MIG_REGION_SEL);
+ val |= QM_MIG_REGION_EN;
+ writel(val, qm->io_base + QM_MIG_REGION_SEL);
+ }
+}
+
static void hisi_qm_pci_uninit(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
pci_free_irq_vectors(pdev);
+ hisi_mig_region_clear(qm);
qm_put_pci_res(qm);
pci_disable_device(pdev);
}
@@ -3179,6 +3233,9 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
qm_init_eq_aeq_status(qm);
+ /* Before starting the dev, clear the memory and then configure to device using. */
+ memset(qm->qdma.va, 0, qm->qdma.size);
+
ret = qm_eq_ctx_cfg(qm);
if (ret) {
dev_err(dev, "Set eqc failed!\n");
@@ -3190,9 +3247,13 @@ static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
static int __hisi_qm_start(struct hisi_qm *qm)
{
+ struct device *dev = &qm->pdev->dev;
int ret;
- WARN_ON(!qm->qdma.va);
+ if (!qm->qdma.va) {
+ dev_err(dev, "qm qdma is NULL!\n");
+ return -EINVAL;
+ }
if (qm->fun_type == QM_HW_PF) {
ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
@@ -3266,7 +3327,7 @@ static int qm_restart(struct hisi_qm *qm)
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
- qp->is_resetting == true) {
+ qp->is_resetting == true && qp->is_in_kernel == true) {
ret = qm_start_qp_nolock(qp, 0);
if (ret < 0) {
dev_err(dev, "Failed to start qp%d!\n", i);
@@ -3298,24 +3359,44 @@ static void qm_stop_started_qp(struct hisi_qm *qm)
}
/**
- * qm_clear_queues() - Clear all queues memory in a qm.
- * @qm: The qm in which the queues will be cleared.
+ * qm_invalid_queues() - invalid all queues in use.
+ * @qm: The qm in which the queues will be invalidated.
*
- * This function clears all queues memory in a qm. Reset of accelerator can
- * use this to clear queues.
+ * This function invalid all queues in use. If the doorbell command is sent
+ * to device in user space after the device is reset, the device discards
+ * the doorbell command.
*/
-static void qm_clear_queues(struct hisi_qm *qm)
+static void qm_invalid_queues(struct hisi_qm *qm)
{
struct hisi_qp *qp;
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
int i;
+ /*
+ * Normal stop queues is no longer used and does not need to be
+ * invalid queues.
+ */
+ if (qm->status.stop_reason == QM_NORMAL)
+ return;
+
+ if (qm->status.stop_reason == QM_DOWN)
+ hisi_qm_cache_wb(qm);
+
for (i = 0; i < qm->qp_num; i++) {
qp = &qm->qp_array[i];
- if (qp->is_in_kernel && qp->is_resetting)
+ if (!qp->is_resetting)
+ continue;
+
+ /* Modify random data and set sqc close bit to invalid queue. */
+ sqc = qm->sqc + i;
+ cqc = qm->cqc + i;
+ sqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
+ sqc->w13 = cpu_to_le16(QM_SQC_DISABLE_QP);
+ cqc->w8 = cpu_to_le16(QM_XQC_RANDOM_DATA);
+ if (qp->is_in_kernel)
memset(qp->qdma.va, 0, qp->qdma.size);
}
-
- memset(qm->qdma.va, 0, qm->qdma.size);
}
/**
@@ -3372,7 +3453,7 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
}
}
- qm_clear_queues(qm);
+ qm_invalid_queues(qm);
qm->status.stop_reason = QM_NORMAL;
err_unlock:
@@ -3617,24 +3698,25 @@ static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
return 0;
}
-static int qm_clear_vft_config(struct hisi_qm *qm)
+static void qm_clear_vft_config(struct hisi_qm *qm)
{
- int ret;
u32 i;
- for (i = 1; i <= qm->vfs_num; i++) {
- ret = hisi_qm_set_vft(qm, i, 0, 0);
- if (ret)
- return ret;
- }
- qm->vfs_num = 0;
+ /*
+ * When disabling SR-IOV, clear the configuration of each VF in the hardware
+ * sequentially. Failure to clear a single VF should not affect the clearing
+ * operation of other VFs.
+ */
+ for (i = 1; i <= qm->vfs_num; i++)
+ (void)hisi_qm_set_vft(qm, i, 0, 0);
- return 0;
+ qm->vfs_num = 0;
}
static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
{
struct device *dev = &qm->pdev->dev;
+ struct qm_shaper_factor t_factor;
u32 ir = qos * QM_QOS_RATE;
int ret, total_vfs, i;
@@ -3642,6 +3724,7 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
if (fun_index > total_vfs)
return -EINVAL;
+ memcpy(&t_factor, &qm->factor[fun_index], sizeof(t_factor));
qm->factor[fun_index].func_qos = qos;
ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
@@ -3655,11 +3738,21 @@ static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
if (ret) {
dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
- return -EINVAL;
+ goto back_func_qos;
}
}
return 0;
+
+back_func_qos:
+ memcpy(&qm->factor[fun_index], &t_factor, sizeof(t_factor));
+ for (i--; i >= ALG_TYPE_0; i--) {
+ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
+ if (ret)
+ dev_err(dev, "failed to restore shaper vft during rollback!\n");
+ }
+
+ return -EINVAL;
}
static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
@@ -3826,8 +3919,14 @@ static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
}
pdev = container_of(dev, struct pci_dev, dev);
+ if (pci_physfn(pdev) != qm->pdev) {
+ pci_err(qm->pdev, "the pdev input does not match the pf!\n");
+ put_device(dev);
+ return -EINVAL;
+ }
*fun_index = pdev->devfn;
+ put_device(dev);
return 0;
}
@@ -3960,13 +4059,13 @@ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
goto err_put_sync;
}
+ qm->vfs_num = num_vfs;
ret = pci_enable_sriov(pdev, num_vfs);
if (ret) {
pci_err(pdev, "Can't enable VF!\n");
qm_clear_vft_config(qm);
goto err_put_sync;
}
- qm->vfs_num = num_vfs;
pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
@@ -4001,11 +4100,10 @@ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
}
pci_disable_sriov(pdev);
-
- qm->vfs_num = 0;
+ qm_clear_vft_config(qm);
qm_pm_put_sync(qm);
- return qm_clear_vft_config(qm);
+ return 0;
}
EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
@@ -4179,9 +4277,9 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
!qm->err_status.is_qm_ecc_mbit &&
!qm->err_ini->close_axi_master_ooo) {
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
- writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
+ writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask,
qm->io_base + QM_RAS_NFE_ENABLE);
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
+ writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET);
}
}
@@ -4447,9 +4545,6 @@ static void qm_restart_prepare(struct hisi_qm *qm)
{
u32 value;
- if (qm->err_ini->open_sva_prefetch)
- qm->err_ini->open_sva_prefetch(qm);
-
if (qm->ver >= QM_HW_V3)
return;
@@ -4463,12 +4558,12 @@ static void qm_restart_prepare(struct hisi_qm *qm)
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
/* clear dev ecc 2bit error source if having */
- value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
+ value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask;
if (value && qm->err_ini->clear_dev_hw_err_status)
qm->err_ini->clear_dev_hw_err_status(qm, value);
/* clear QM ecc mbit error source */
- writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+ writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
/* clear AM Reorder Buffer ecc mbit source */
writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
@@ -4495,6 +4590,34 @@ clear_flags:
qm->err_status.is_dev_ecc_mbit = false;
}
+static void qm_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
+ u32 val;
+
+ val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR));
+ writel(val, qm->io_base + QM_ABNORMAL_INT_MASK);
+ if (qm->ver > QM_HW_V2)
+ writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR),
+ qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ if (qm->err_ini->disable_axi_error)
+ qm->err_ini->disable_axi_error(qm);
+}
+
+static void qm_enable_axi_error(struct hisi_qm *qm)
+{
+ /* clear axi error source */
+ writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE);
+
+ writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
+ if (qm->ver > QM_HW_V2)
+ writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
+
+ if (qm->err_ini->enable_axi_error)
+ qm->err_ini->enable_axi_error(qm);
+}
+
static int qm_controller_reset_done(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -4528,6 +4651,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
qm_restart_prepare(qm);
hisi_qm_dev_err_init(qm);
+ qm_disable_axi_error(qm);
if (qm->err_ini->open_axi_master_ooo)
qm->err_ini->open_axi_master_ooo(qm);
@@ -4550,7 +4674,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
ret = qm_wait_vf_prepare_finish(qm);
if (ret)
pci_err(pdev, "failed to start by vfs in soft reset!\n");
-
+ qm_enable_axi_error(qm);
qm_cmd_init(qm);
qm_restart_done(qm);
@@ -4731,6 +4855,15 @@ flr_done:
}
EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
+static irqreturn_t qm_rsvd_irq(int irq, void *data)
+{
+ struct hisi_qm *qm = data;
+
+ dev_info(&qm->pdev->dev, "Reserved interrupt, ignore!\n");
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t qm_abnormal_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -4760,8 +4893,6 @@ void hisi_qm_dev_shutdown(struct pci_dev *pdev)
ret = hisi_qm_stop(qm, QM_DOWN);
if (ret)
dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
-
- hisi_qm_cache_wb(qm);
}
EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
@@ -5014,7 +5145,7 @@ static void qm_unregister_abnormal_irq(struct hisi_qm *qm)
struct pci_dev *pdev = qm->pdev;
u32 irq_vector, val;
- if (qm->fun_type == QM_HW_VF)
+ if (qm->fun_type == QM_HW_VF && qm->ver < QM_HW_V3)
return;
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
@@ -5031,17 +5162,28 @@ static int qm_register_abnormal_irq(struct hisi_qm *qm)
u32 irq_vector, val;
int ret;
- if (qm->fun_type == QM_HW_VF)
- return 0;
-
val = qm->cap_tables.qm_cap_table[QM_ABNORMAL_IRQ].cap_val;
if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK))
return 0;
-
irq_vector = val & QM_IRQ_VECTOR_MASK;
+
+ /* For VF, this is a reserved interrupt in V3 version. */
+ if (qm->fun_type == QM_HW_VF) {
+ if (qm->ver < QM_HW_V3)
+ return 0;
+
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_rsvd_irq,
+ IRQF_NO_AUTOEN, qm->dev_name, qm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request reserved irq, ret = %d!\n", ret);
+ return ret;
+ }
+ return 0;
+ }
+
ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm);
if (ret)
- dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret);
+ dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d!\n", ret);
return ret;
}
@@ -5407,6 +5549,12 @@ static int hisi_qm_pci_init(struct hisi_qm *qm)
pci_set_master(pdev);
num_vec = qm_get_irq_num(qm);
+ if (!num_vec) {
+ dev_err(dev, "Device irq num is zero!\n");
+ ret = -EINVAL;
+ goto err_get_pci_res;
+ }
+ num_vec = roundup_pow_of_two(num_vec);
ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
if (ret < 0) {
dev_err(dev, "Failed to enable MSI vectors!\n");
@@ -5629,6 +5777,7 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_free_qm_memory;
qm_cmd_init(qm);
+ hisi_mig_region_enable(qm);
return 0;
@@ -5767,6 +5916,7 @@ static int qm_rebuild_for_resume(struct hisi_qm *qm)
}
qm_cmd_init(qm);
+ hisi_mig_region_enable(qm);
hisi_qm_dev_err_init(qm);
/* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */
writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG);
diff --git a/drivers/crypto/hisilicon/sec/sec_drv.c b/drivers/crypto/hisilicon/sec/sec_drv.c
index ef0cb733c92c..129cb6faa0b7 100644
--- a/drivers/crypto/hisilicon/sec/sec_drv.c
+++ b/drivers/crypto/hisilicon/sec/sec_drv.c
@@ -922,7 +922,8 @@ static int sec_hw_init(struct sec_dev_info *info)
struct iommu_domain *domain;
u32 sec_ipv4_mask = 0;
u32 sec_ipv6_mask[10] = {};
- u32 i, ret;
+ int ret;
+ u32 i;
domain = iommu_get_domain_for_dev(info->dev);
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index d044ded0f290..31590d01139a 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1944,14 +1944,12 @@ static void sec_request_uninit(struct sec_req *req)
static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
{
struct sec_qp_ctx *qp_ctx;
- int i;
+ int i = 0;
- for (i = 0; i < ctx->sec->ctx_q_num; i++) {
+ do {
qp_ctx = &ctx->qp_ctx[i];
req->req_id = sec_alloc_req_id(req, qp_ctx);
- if (req->req_id >= 0)
- break;
- }
+ } while (req->req_id < 0 && ++i < ctx->sec->ctx_q_num);
req->qp_ctx = qp_ctx;
req->backlog = &qp_ctx->backlog;
diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
index 72cf48d1f3ab..5eb2d6820742 100644
--- a/drivers/crypto/hisilicon/sec2/sec_main.c
+++ b/drivers/crypto/hisilicon/sec2/sec_main.c
@@ -47,6 +47,8 @@
#define SEC_RAS_FE_ENB_MSK 0x0
#define SEC_OOO_SHUTDOWN_SEL 0x301014
#define SEC_RAS_DISABLE 0x0
+#define SEC_AXI_ERROR_MASK (BIT(0) | BIT(1))
+
#define SEC_MEM_START_INIT_REG 0x301100
#define SEC_MEM_INIT_DONE_REG 0x301104
@@ -93,6 +95,16 @@
#define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11)))
#define SEC_PREFETCH_DISABLE BIT(1)
#define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11))
+#define SEC_SVA_PREFETCH_INFO 0x301ED4
+#define SEC_SVA_STALL_NUM GENMASK(23, 8)
+#define SEC_SVA_PREFETCH_NUM GENMASK(2, 0)
+#define SEC_WAIT_SVA_READY 500000
+#define SEC_READ_SVA_STATUS_TIMES 3
+#define SEC_WAIT_US_MIN 10
+#define SEC_WAIT_US_MAX 20
+#define SEC_WAIT_QP_US_MIN 1000
+#define SEC_WAIT_QP_US_MAX 2000
+#define SEC_MAX_WAIT_TIMES 2000
#define SEC_DELAY_10_US 10
#define SEC_POLL_TIMEOUT_US 1000
@@ -464,6 +476,81 @@ static void sec_set_endian(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
+static int sec_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == SEC_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(SEC_WAIT_US_MIN, SEC_WAIT_US_MAX);
+ } while (++try_times < SEC_WAIT_SVA_READY);
+
+ if (try_times == SEC_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void sec_close_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val |= SEC_PREFETCH_DISABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
+ val, !(val & SEC_SVA_DISABLE_READY),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret)
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)sec_wait_sva_ready(qm, SEC_SVA_PREFETCH_INFO, SEC_SVA_STALL_NUM);
+}
+
+static void sec_open_sva_prefetch(struct hisi_qm *qm)
+{
+ u32 val;
+ int ret;
+
+ if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
+ return;
+
+ /* Enable prefetch */
+ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
+ val &= SEC_PREFETCH_ENABLE;
+ writel(val, qm->io_base + SEC_PREFETCH_CFG);
+
+ ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
+ val, !(val & SEC_PREFETCH_DISABLE),
+ SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ sec_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = sec_wait_sva_ready(qm, SEC_SVA_TRANS, SEC_SVA_PREFETCH_NUM);
+ if (ret)
+ sec_close_sva_prefetch(qm);
+}
+
static void sec_engine_sva_config(struct hisi_qm *qm)
{
u32 reg;
@@ -497,45 +584,7 @@ static void sec_engine_sva_config(struct hisi_qm *qm)
writel_relaxed(reg, qm->io_base +
SEC_INTERFACE_USER_CTRL1_REG);
}
-}
-
-static void sec_open_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- /* Enable prefetch */
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val &= SEC_PREFETCH_ENABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG,
- val, !(val & SEC_PREFETCH_DISABLE),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
-}
-
-static void sec_close_sva_prefetch(struct hisi_qm *qm)
-{
- u32 val;
- int ret;
-
- if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
- return;
-
- val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG);
- val |= SEC_PREFETCH_DISABLE;
- writel(val, qm->io_base + SEC_PREFETCH_CFG);
-
- ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS,
- val, !(val & SEC_SVA_DISABLE_READY),
- SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ sec_open_sva_prefetch(qm);
}
static void sec_enable_clock_gate(struct hisi_qm *qm)
@@ -666,8 +715,7 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + SEC_CONTROL_REG);
if (enable) {
val1 |= SEC_AXI_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= SEC_AXI_SHUTDOWN_DISABLE;
val2 = 0x0;
@@ -681,7 +729,8 @@ static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void sec_hw_error_enable(struct hisi_qm *qm)
{
- u32 ce, nfe;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
if (qm->ver == QM_HW_V1) {
writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK);
@@ -689,22 +738,19 @@ static void sec_hw_error_enable(struct hisi_qm *qm)
return;
}
- ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
- nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
-
/* clear SEC hw error source if having */
- writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE);
+ writel(err_mask, qm->io_base + SEC_CORE_INT_SOURCE);
/* enable RAS int */
- writel(ce, qm->io_base + SEC_RAS_CE_REG);
- writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG);
- writel(nfe, qm->io_base + SEC_RAS_NFE_REG);
+ writel(dev_err->ce, qm->io_base + SEC_RAS_CE_REG);
+ writel(dev_err->fe, qm->io_base + SEC_RAS_FE_REG);
+ writel(dev_err->nfe, qm->io_base + SEC_RAS_NFE_REG);
/* enable SEC block master OOO when nfe occurs on Kunpeng930 */
sec_master_ooo_ctrl(qm, true);
/* enable SEC hw error interrupts */
- writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK);
+ writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);
}
static void sec_hw_error_disable(struct hisi_qm *qm)
@@ -1061,12 +1107,20 @@ static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG);
}
+static void sec_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + SEC_RAS_NFE_REG);
+ writel(ce_mask, qm->io_base + SEC_RAS_CE_REG);
+}
+
static void sec_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
@@ -1082,16 +1136,18 @@ static enum acc_err_result sec_get_err_result(struct hisi_qm *qm)
err_status = sec_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
sec_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
sec_disable_error_report(qm, err_status);
return ACC_ERR_NEED_RESET;
}
sec_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ sec_enable_error_report(qm);
}
return ACC_ERR_RECOVERED;
@@ -1102,28 +1158,62 @@ static bool sec_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = sec_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return false;
}
+static void sec_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ writel(err_mask & ~SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~SEC_AXI_ERROR_MASK),
+ qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+}
+
+static void sec_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(SEC_AXI_ERROR_MASK, qm->io_base + SEC_CORE_INT_SOURCE);
+
+ writel(err_mask, qm->io_base + SEC_CORE_INT_MASK);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + SEC_OOO_SHUTDOWN_SEL);
+}
+
static void sec_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = SEC_RAS_FE_ENB_MSK;
+ qm_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+
+ dev_err->fe = SEC_RAS_FE_ENB_MSK;
+ dev_err->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
+ SEC_RESET_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->fe = SEC_RAS_FE_ENB_MSK;
- err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC;
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info,
- SEC_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = BIT(0);
err_info->acpi_rst = "SRST";
}
@@ -1141,6 +1231,8 @@ static const struct hisi_qm_err_ini sec_err_ini = {
.err_info_init = sec_err_info_init,
.get_err_result = sec_get_err_result,
.dev_is_abnormal = sec_dev_is_abnormal,
+ .disable_axi_error = sec_disable_axi_error,
+ .enable_axi_error = sec_enable_axi_error,
};
static int sec_pf_probe_init(struct sec_dev *sec)
@@ -1152,7 +1244,6 @@ static int sec_pf_probe_init(struct sec_dev *sec)
if (ret)
return ret;
- sec_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
sec_debug_regs_clear(qm);
ret = sec_show_last_regs_init(qm);
@@ -1169,7 +1260,7 @@ static int sec_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(sec_cap_query_info);
- sec_cap = devm_kzalloc(&pdev->dev, sizeof(*sec_cap) * size, GFP_KERNEL);
+ sec_cap = devm_kcalloc(&pdev->dev, size, sizeof(*sec_cap), GFP_KERNEL);
if (!sec_cap)
return -ENOMEM;
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 7a9ef2a9972a..24c7b6ab285b 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -245,11 +245,6 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl,
}
curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma);
- if (IS_ERR(curr_hw_sgl)) {
- dev_err(dev, "Get SGL error!\n");
- ret = -ENOMEM;
- goto err_unmap;
- }
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
diff --git a/drivers/crypto/hisilicon/zip/dae_main.c b/drivers/crypto/hisilicon/zip/dae_main.c
index 6f22e4c36e49..68aebd02fc84 100644
--- a/drivers/crypto/hisilicon/zip/dae_main.c
+++ b/drivers/crypto/hisilicon/zip/dae_main.c
@@ -15,6 +15,7 @@
#define DAE_REG_RD_TMOUT_US USEC_PER_SEC
#define DAE_ALG_NAME "hashagg"
+#define DAE_V5_ALG_NAME "hashagg\nudma\nhashjoin\ngather"
/* error */
#define DAE_AXI_CFG_OFFSET 0x331000
@@ -82,6 +83,7 @@ int hisi_dae_set_user_domain(struct hisi_qm *qm)
int hisi_dae_set_alg(struct hisi_qm *qm)
{
+ const char *alg_name;
size_t len;
if (!dae_is_support(qm))
@@ -90,9 +92,14 @@ int hisi_dae_set_alg(struct hisi_qm *qm)
if (!qm->uacce)
return 0;
+ if (qm->ver >= QM_HW_V5)
+ alg_name = DAE_V5_ALG_NAME;
+ else
+ alg_name = DAE_ALG_NAME;
+
len = strlen(qm->uacce->algs);
/* A line break may be required */
- if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
+ if (len + strlen(alg_name) + 1 >= QM_DEV_ALG_MAX_LEN) {
pci_err(qm->pdev, "algorithm name is too long!\n");
return -EINVAL;
}
@@ -100,7 +107,7 @@ int hisi_dae_set_alg(struct hisi_qm *qm)
if (len)
strcat((char *)qm->uacce->algs, "\n");
- strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
+ strcat((char *)qm->uacce->algs, alg_name);
return 0;
}
@@ -168,6 +175,12 @@ static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
}
+static void hisi_dae_enable_error_report(struct hisi_qm *qm)
+{
+ writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
+ writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
+}
+
static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
{
const struct hisi_dae_hw_error *err = dae_hw_error;
@@ -209,6 +222,8 @@ enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
return ACC_ERR_NEED_RESET;
}
hisi_dae_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hisi_dae_enable_error_report(qm);
return ACC_ERR_RECOVERED;
}
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index d8ba23b7cc7d..4fcbe6bada06 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -65,6 +65,7 @@
#define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16
#define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24
#define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0)
+#define HZIP_AXI_ERROR_MASK (BIT(2) | BIT(3))
#define HZIP_SQE_SIZE 128
#define HZIP_PF_DEF_Q_NUM 64
#define HZIP_PF_DEF_Q_BASE 0
@@ -80,6 +81,7 @@
#define HZIP_ALG_GZIP_BIT GENMASK(3, 2)
#define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4)
#define HZIP_ALG_LZ77_BIT GENMASK(7, 6)
+#define HZIP_ALG_LZ4_BIT GENMASK(9, 8)
#define HZIP_BUF_SIZE 22
#define HZIP_SQE_MASK_OFFSET 64
@@ -95,10 +97,16 @@
#define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0)))
#define HZIP_SVA_PREFETCH_DISABLE BIT(26)
#define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30))
+#define HZIP_SVA_PREFETCH_NUM GENMASK(18, 16)
+#define HZIP_SVA_STALL_NUM GENMASK(15, 0)
#define HZIP_SHAPER_RATE_COMPRESS 750
#define HZIP_SHAPER_RATE_DECOMPRESS 140
-#define HZIP_DELAY_1_US 1
-#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_DELAY_1_US 1
+#define HZIP_POLL_TIMEOUT_US 1000
+#define HZIP_WAIT_SVA_READY 500000
+#define HZIP_READ_SVA_STATUS_TIMES 3
+#define HZIP_WAIT_US_MIN 10
+#define HZIP_WAIT_US_MAX 20
/* clock gating */
#define HZIP_PEH_CFG_AUTO_GATE 0x3011A8
@@ -111,6 +119,9 @@
/* zip comp high performance */
#define HZIP_HIGH_PERF_OFFSET 0x301208
+#define HZIP_LIT_LEN_EN_OFFSET 0x301204
+#define HZIP_LIT_LEN_EN_EN BIT(4)
+
enum {
HZIP_HIGH_COMP_RATE,
HZIP_HIGH_COMP_PERF,
@@ -141,6 +152,12 @@ static const struct qm_dev_alg zip_dev_algs[] = { {
}, {
.alg_msk = HZIP_ALG_LZ77_BIT,
.alg = "lz77_zstd\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ77_BIT,
+ .alg = "lz77_only\n",
+ }, {
+ .alg_msk = HZIP_ALG_LZ4_BIT,
+ .alg = "lz4\n",
},
};
@@ -448,10 +465,23 @@ bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg)
return false;
}
-static int hisi_zip_set_high_perf(struct hisi_qm *qm)
+static void hisi_zip_literal_set(struct hisi_qm *qm)
+{
+ u32 val;
+
+ if (qm->ver < QM_HW_V3)
+ return;
+
+ val = readl_relaxed(qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
+ val &= ~HZIP_LIT_LEN_EN_EN;
+
+ /* enable literal length in stream mode compression */
+ writel(val, qm->io_base + HZIP_LIT_LEN_EN_OFFSET);
+}
+
+static void hisi_zip_set_high_perf(struct hisi_qm *qm)
{
u32 val;
- int ret;
val = readl_relaxed(qm->io_base + HZIP_HIGH_PERF_OFFSET);
if (perf_mode == HZIP_HIGH_COMP_PERF)
@@ -461,16 +491,36 @@ static int hisi_zip_set_high_perf(struct hisi_qm *qm)
/* Set perf mode */
writel(val, qm->io_base + HZIP_HIGH_PERF_OFFSET);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_HIGH_PERF_OFFSET,
- val, val == perf_mode, HZIP_DELAY_1_US,
- HZIP_POLL_TIMEOUT_US);
- if (ret)
- pci_err(qm->pdev, "failed to set perf mode\n");
+}
- return ret;
+static int hisi_zip_wait_sva_ready(struct hisi_qm *qm, __u32 offset, __u32 mask)
+{
+ u32 val, try_times = 0;
+ u8 count = 0;
+
+ /*
+ * Read the register value every 10-20us. If the value is 0 for three
+ * consecutive times, the SVA module is ready.
+ */
+ do {
+ val = readl(qm->io_base + offset);
+ if (val & mask)
+ count = 0;
+ else if (++count == HZIP_READ_SVA_STATUS_TIMES)
+ break;
+
+ usleep_range(HZIP_WAIT_US_MIN, HZIP_WAIT_US_MAX);
+ } while (++try_times < HZIP_WAIT_SVA_READY);
+
+ if (try_times == HZIP_WAIT_SVA_READY) {
+ pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
-static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -478,19 +528,20 @@ static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
- /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val &= HZIP_PREFETCH_ENABLE;
+ val |= HZIP_SVA_PREFETCH_DISABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
- val, !(val & HZIP_SVA_PREFETCH_DISABLE),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
+ val, !(val & HZIP_SVA_DISABLE_READY),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
if (ret)
- pci_err(qm->pdev, "failed to open sva prefetch\n");
+ pci_err(qm->pdev, "failed to close sva prefetch\n");
+
+ (void)hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_STALL_NUM);
}
-static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
+static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm)
{
u32 val;
int ret;
@@ -498,15 +549,23 @@ static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm)
if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
return;
+ /* Enable prefetch */
val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG);
- val |= HZIP_SVA_PREFETCH_DISABLE;
+ val &= HZIP_PREFETCH_ENABLE;
writel(val, qm->io_base + HZIP_PREFETCH_CFG);
- ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS,
- val, !(val & HZIP_SVA_DISABLE_READY),
+ ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG,
+ val, !(val & HZIP_SVA_PREFETCH_DISABLE),
HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US);
+ if (ret) {
+ pci_err(qm->pdev, "failed to open sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
+ return;
+ }
+
+ ret = hisi_zip_wait_sva_ready(qm, HZIP_SVA_TRANS, HZIP_SVA_PREFETCH_NUM);
if (ret)
- pci_err(qm->pdev, "failed to close sva prefetch\n");
+ hisi_zip_close_sva_prefetch(qm);
}
static void hisi_zip_enable_clock_gate(struct hisi_qm *qm)
@@ -530,6 +589,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
void __iomem *base = qm->io_base;
u32 dcomp_bm, comp_bm;
u32 zip_core_en;
+ int ret;
/* qm user domain */
writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1);
@@ -565,6 +625,7 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63);
writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63);
}
+ hisi_zip_open_sva_prefetch(qm);
/* let's open all compression/decompression cores */
@@ -580,9 +641,19 @@ static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm)
CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) |
FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL);
+ hisi_zip_set_high_perf(qm);
+ hisi_zip_literal_set(qm);
hisi_zip_enable_clock_gate(qm);
- return hisi_dae_set_user_domain(qm);
+ ret = hisi_dae_set_user_domain(qm);
+ if (ret)
+ goto close_sva_prefetch;
+
+ return 0;
+
+close_sva_prefetch:
+ hisi_zip_close_sva_prefetch(qm);
+ return ret;
}
static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
@@ -592,8 +663,7 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL);
if (enable) {
val1 |= HZIP_AXI_SHUTDOWN_ENABLE;
- val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ val2 = qm->err_info.dev_err.shutdown_mask;
} else {
val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE;
val2 = 0x0;
@@ -607,7 +677,8 @@ static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
{
- u32 nfe, ce;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
if (qm->ver == QM_HW_V1) {
writel(HZIP_CORE_INT_MASK_ALL,
@@ -616,33 +687,29 @@ static void hisi_zip_hw_error_enable(struct hisi_qm *qm)
return;
}
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
-
/* clear ZIP hw error source if having */
- writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
+ writel(err_mask, qm->io_base + HZIP_CORE_INT_SOURCE);
/* configure error type */
- writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
- writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
- writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(dev_err->ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+ writel(dev_err->fe, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB);
+ writel(dev_err->nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
hisi_zip_master_ooo_ctrl(qm, true);
/* enable ZIP hw error interrupts */
- writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_dae_hw_error_enable(qm);
}
static void hisi_zip_hw_error_disable(struct hisi_qm *qm)
{
- u32 nfe, ce;
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
/* disable ZIP hw error interrupts */
- nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
- ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
- writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG);
+ writel(err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
hisi_zip_master_ooo_ctrl(qm, false);
@@ -1116,12 +1183,20 @@ static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type)
{
- u32 nfe_mask;
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
- nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
}
+static void hisi_zip_enable_error_report(struct hisi_qm *qm)
+{
+ u32 nfe_mask = qm->err_info.dev_err.nfe;
+ u32 ce_mask = qm->err_info.dev_err.ce;
+
+ writel(nfe_mask, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB);
+ writel(ce_mask, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB);
+}
+
static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm)
{
u32 val;
@@ -1160,16 +1235,18 @@ static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm)
/* Get device hardware new error status */
err_status = hisi_zip_get_hw_err_status(qm);
if (err_status) {
- if (err_status & qm->err_info.ecc_2bits_mask)
+ if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
qm->err_status.is_dev_ecc_mbit = true;
hisi_zip_log_hw_error(qm, err_status);
- if (err_status & qm->err_info.dev_reset_mask) {
+ if (err_status & qm->err_info.dev_err.reset_mask) {
/* Disable the same error reporting until device is recovered. */
hisi_zip_disable_error_report(qm, err_status);
- return ACC_ERR_NEED_RESET;
+ zip_result = ACC_ERR_NEED_RESET;
} else {
hisi_zip_clear_hw_err_status(qm, err_status);
+ /* Avoid firmware disable error report, re-enable. */
+ hisi_zip_enable_error_report(qm);
}
}
@@ -1185,7 +1262,7 @@ static bool hisi_zip_dev_is_abnormal(struct hisi_qm *qm)
u32 err_status;
err_status = hisi_zip_get_hw_err_status(qm);
- if (err_status & qm->err_info.dev_shutdown_mask)
+ if (err_status & qm->err_info.dev_err.shutdown_mask)
return true;
return hisi_dae_dev_is_abnormal(qm);
@@ -1196,23 +1273,59 @@ static int hisi_zip_set_priv_status(struct hisi_qm *qm)
return hisi_dae_close_axi_master_ooo(qm);
}
+static void hisi_zip_disable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+ u32 val;
+
+ val = ~(err_mask & (~HZIP_AXI_ERROR_MASK));
+ writel(val, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask & (~HZIP_AXI_ERROR_MASK),
+ qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+}
+
+static void hisi_zip_enable_axi_error(struct hisi_qm *qm)
+{
+ struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
+ u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
+
+ /* clear axi error source */
+ writel(HZIP_AXI_ERROR_MASK, qm->io_base + HZIP_CORE_INT_SOURCE);
+
+ writel(~err_mask, qm->io_base + HZIP_CORE_INT_MASK_REG);
+
+ if (qm->ver > QM_HW_V2)
+ writel(dev_err->shutdown_mask, qm->io_base + HZIP_OOO_SHUTDOWN_SEL);
+}
+
static void hisi_zip_err_info_init(struct hisi_qm *qm)
{
struct hisi_qm_err_info *err_info = &qm->err_info;
+ struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
+ struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
+
+ qm_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ qm_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
+ qm_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
+ qm_err->ecc_2bits_mask = QM_ECC_MBIT;
+ qm_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
+ qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+
+ dev_err->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
+ dev_err->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver);
+ dev_err->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver);
+ dev_err->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
+ dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
+ dev_err->reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
+ ZIP_RESET_MASK_CAP, qm->cap_ver);
- err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK;
- err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver);
- err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_NFE_MASK_CAP, qm->cap_ver);
- err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC;
- err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
- err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_QM_RESET_MASK_CAP, qm->cap_ver);
- err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info,
- ZIP_RESET_MASK_CAP, qm->cap_ver);
err_info->msi_wr_port = HZIP_WR_PORT;
err_info->acpi_rst = "ZRST";
}
@@ -1232,6 +1345,8 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = {
.get_err_result = hisi_zip_get_err_result,
.set_priv_status = hisi_zip_set_priv_status,
.dev_is_abnormal = hisi_zip_dev_is_abnormal,
+ .disable_axi_error = hisi_zip_disable_axi_error,
+ .enable_axi_error = hisi_zip_enable_axi_error,
};
static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
@@ -1251,11 +1366,6 @@ static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip)
if (ret)
return ret;
- ret = hisi_zip_set_high_perf(qm);
- if (ret)
- return ret;
-
- hisi_zip_open_sva_prefetch(qm);
hisi_qm_dev_err_init(qm);
hisi_zip_debug_regs_clear(qm);
@@ -1273,7 +1383,7 @@ static int zip_pre_store_cap_reg(struct hisi_qm *qm)
size_t i, size;
size = ARRAY_SIZE(zip_cap_query_info);
- zip_cap = devm_kzalloc(&pdev->dev, sizeof(*zip_cap) * size, GFP_KERNEL);
+ zip_cap = devm_kcalloc(&pdev->dev, size, sizeof(*zip_cap), GFP_KERNEL);
if (!zip_cap)
return -ENOMEM;
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 76b7ecb5624b..f22c12e36b56 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -700,7 +700,7 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
{
- return img_hash_cra_init(tfm, "md5-generic");
+ return img_hash_cra_init(tfm, "md5-lib");
}
static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c
index 23f585219fb4..d0058757b000 100644
--- a/drivers/crypto/intel/iaa/iaa_crypto_main.c
+++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c
@@ -805,7 +805,7 @@ static int save_iaa_wq(struct idxd_wq *wq)
if (!cpus_per_iaa)
cpus_per_iaa = 1;
out:
- return 0;
+ return ret;
}
static void remove_iaa_wq(struct idxd_wq *wq)
diff --git a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
index 8f9e21ced0fe..48281d882260 100644
--- a/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
+++ b/drivers/crypto/intel/keembay/keembay-ocs-hcu-core.c
@@ -232,7 +232,7 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
struct device *dev = rctx->hcu_dev->dev;
unsigned int remainder = 0;
unsigned int total;
- size_t nents;
+ int nents;
size_t count;
int rc;
int i;
@@ -253,6 +253,9 @@ static int kmb_ocs_dma_prepare(struct ahash_request *req)
/* Determine the number of scatter gather list entries to process. */
nents = sg_nents_for_len(req->src, rctx->sg_data_total - remainder);
+ if (nents < 0)
+ return nents;
+
/* If there are entries to process, map them. */
if (nents) {
rctx->sg_dma_nents = dma_map_sg(dev, req->src, nents,
diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig
index 359c61f0c8a1..4b4861460dd4 100644
--- a/drivers/crypto/intel/qat/Kconfig
+++ b/drivers/crypto/intel/qat/Kconfig
@@ -6,12 +6,11 @@ config CRYPTO_DEV_QAT
select CRYPTO_SKCIPHER
select CRYPTO_AKCIPHER
select CRYPTO_DH
- select CRYPTO_HMAC
select CRYPTO_RSA
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- select CRYPTO_SHA512
select CRYPTO_LIB_AES
+ select CRYPTO_LIB_SHA1
+ select CRYPTO_LIB_SHA256
+ select CRYPTO_LIB_SHA512
select FW_LOADER
select CRC8
diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c
index 35679b21ff63..11728cf32653 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c
@@ -105,7 +105,6 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev)
accel_dev->accel_id);
hw_device->reset_device(accel_dev);
pci_restore_state(pdev);
- pci_save_state(pdev);
}
}
@@ -204,7 +203,6 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
if (!pdev->is_busmaster)
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
res = adf_dev_up(accel_dev, false);
if (res && res != -EALREADY)
return PCI_ERS_RESULT_DISCONNECT;
@@ -276,11 +274,11 @@ int adf_notify_fatal_error(struct adf_accel_dev *accel_dev)
int adf_init_aer(void)
{
device_reset_wq = alloc_workqueue("qat_device_reset_wq",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!device_reset_wq)
return -EFAULT;
- device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0);
+ device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", WQ_PERCPU, 0);
if (!device_sriov_wq) {
destroy_workqueue(device_reset_wq);
device_reset_wq = NULL;
diff --git a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
index 48c62a14a6a7..c2e6f0cb7480 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c
@@ -89,26 +89,14 @@ err_chrdev_unreg:
return -EFAULT;
}
-static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
- unsigned long arg)
+static struct adf_user_cfg_ctl_data *adf_ctl_alloc_resources(unsigned long arg)
{
struct adf_user_cfg_ctl_data *cfg_data;
- cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
- if (!cfg_data)
- return -ENOMEM;
-
- /* Initialize device id to NO DEVICE as 0 is a valid device id */
- cfg_data->device_id = ADF_CFG_NO_DEVICE;
-
- if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+ cfg_data = memdup_user((void __user *)arg, sizeof(*cfg_data));
+ if (IS_ERR(cfg_data))
pr_err("QAT: failed to copy from user cfg_data.\n");
- kfree(cfg_data);
- return -EIO;
- }
-
- *ctl_data = cfg_data;
- return 0;
+ return cfg_data;
}
static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
@@ -188,13 +176,13 @@ out_err:
static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
unsigned long arg)
{
- int ret;
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
+ int ret = 0;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
if (!accel_dev) {
@@ -267,9 +255,9 @@ static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
int ret;
struct adf_user_cfg_ctl_data *ctl_data;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
if (adf_devmgr_verify_id(ctl_data->device_id)) {
pr_err("QAT: Device %d not found\n", ctl_data->device_id);
@@ -301,9 +289,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
struct adf_user_cfg_ctl_data *ctl_data;
struct adf_accel_dev *accel_dev;
- ret = adf_ctl_alloc_resources(&ctl_data, arg);
- if (ret)
- return ret;
+ ctl_data = adf_ctl_alloc_resources(arg);
+ if (IS_ERR(ctl_data))
+ return PTR_ERR(ctl_data);
ret = -ENODEV;
accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
index cf804f95838a..faa60b04c406 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_gen6_tl.c
@@ -21,6 +21,25 @@
#define SLICE_IDX(sl) offsetof(struct icp_qat_fw_init_admin_slice_cnt, sl##_cnt)
+#define ADF_GEN6_TL_CMDQ_WAIT_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_wait_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_wait_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_EXEC_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_exec_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_exec_cnt, gen6))
+#define ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(_name) \
+ ADF_TL_COUNTER("cmdq_drain_" #_name, ADF_TL_SIMPLE_COUNT, \
+ ADF_TL_CMDQ_REG_OFF(_name, reg_tm_cmdq_drain_cnt, \
+ gen6))
+
+#define CPR_QUEUE_COUNT 5
+#define DCPR_QUEUE_COUNT 3
+#define PKE_QUEUE_COUNT 1
+#define WAT_QUEUE_COUNT 7
+#define WCP_QUEUE_COUNT 7
+#define USC_QUEUE_COUNT 3
+#define ATH_QUEUE_COUNT 2
+
/* Device level counters. */
static const struct adf_tl_dbg_counter dev_counters[] = {
/* PCIe partial transactions. */
@@ -57,6 +76,10 @@ static const struct adf_tl_dbg_counter dev_counters[] = {
/* Maximum uTLB used. */
ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT,
ADF_GEN6_TL_DEV_REG_OFF(reg_tl_at_max_utlb_used)),
+ /* Ring Empty average[ns] across all rings */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_DEV_REG_OFF(reg_tl_re_cnt)),
};
/* Accelerator utilization counters */
@@ -95,6 +118,80 @@ static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = {
[SLICE_IDX(ath)] = ADF_GEN6_TL_SL_EXEC_COUNTER(ath),
};
+static const struct adf_tl_dbg_counter cnv_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(cnv),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(cnv)
+};
+
+#define NUM_CMDQ_COUNTERS ARRAY_SIZE(cnv_cmdq_counters)
+
+static const struct adf_tl_dbg_counter dcprz_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(dcprz),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(dcprz)
+};
+
+static_assert(ARRAY_SIZE(dcprz_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter pke_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(pke),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(pke)
+};
+
+static_assert(ARRAY_SIZE(pke_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wat_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wat),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wat)
+};
+
+static_assert(ARRAY_SIZE(wat_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter wcp_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(wcp),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(wcp)
+};
+
+static_assert(ARRAY_SIZE(wcp_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ucs_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ucs),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ucs)
+};
+
+static_assert(ARRAY_SIZE(ucs_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+static const struct adf_tl_dbg_counter ath_cmdq_counters[] = {
+ ADF_GEN6_TL_CMDQ_WAIT_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_EXEC_COUNTER(ath),
+ ADF_GEN6_TL_CMDQ_DRAIN_COUNTER(ath)
+};
+
+static_assert(ARRAY_SIZE(ath_cmdq_counters) == NUM_CMDQ_COUNTERS);
+
+/* CMDQ drain counters. */
+static const struct adf_tl_dbg_counter *cmdq_counters[ADF_TL_SL_CNT_COUNT] = {
+ /* Compression accelerator execution count. */
+ [SLICE_IDX(cpr)] = cnv_cmdq_counters,
+ /* Decompression accelerator execution count. */
+ [SLICE_IDX(dcpr)] = dcprz_cmdq_counters,
+ /* PKE execution count. */
+ [SLICE_IDX(pke)] = pke_cmdq_counters,
+ /* Wireless Authentication accelerator execution count. */
+ [SLICE_IDX(wat)] = wat_cmdq_counters,
+ /* Wireless Cipher accelerator execution count. */
+ [SLICE_IDX(wcp)] = wcp_cmdq_counters,
+ /* UCS accelerator execution count. */
+ [SLICE_IDX(ucs)] = ucs_cmdq_counters,
+ /* Authentication accelerator execution count. */
+ [SLICE_IDX(ath)] = ath_cmdq_counters,
+};
+
/* Ring pair counters. */
static const struct adf_tl_dbg_counter rp_counters[] = {
/* PCIe partial transactions. */
@@ -122,12 +219,17 @@ static const struct adf_tl_dbg_counter rp_counters[] = {
/* Payload DevTLB miss rate. */
ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT,
ADF_GEN6_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)),
+ /* Ring Empty average[ns]. */
+ ADF_TL_COUNTER_LATENCY(RE_ACC_NAME, ADF_TL_COUNTER_NS_AVG,
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_acc),
+ ADF_GEN6_TL_RP_REG_OFF(reg_tl_re_cnt)),
};
void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
{
tl_data->layout_sz = ADF_GEN6_TL_LAYOUT_SZ;
tl_data->slice_reg_sz = ADF_GEN6_TL_SLICE_REG_SZ;
+ tl_data->cmdq_reg_sz = ADF_GEN6_TL_CMDQ_REG_SZ;
tl_data->rp_reg_sz = ADF_GEN6_TL_RP_REG_SZ;
tl_data->num_hbuff = ADF_GEN6_TL_NUM_HIST_BUFFS;
tl_data->max_rp = ADF_GEN6_TL_MAX_RP_NUM;
@@ -139,8 +241,18 @@ void adf_gen6_init_tl_data(struct adf_tl_hw_data *tl_data)
tl_data->num_dev_counters = ARRAY_SIZE(dev_counters);
tl_data->sl_util_counters = sl_util_counters;
tl_data->sl_exec_counters = sl_exec_counters;
+ tl_data->cmdq_counters = cmdq_counters;
+ tl_data->num_cmdq_counters = NUM_CMDQ_COUNTERS;
tl_data->rp_counters = rp_counters;
tl_data->num_rp_counters = ARRAY_SIZE(rp_counters);
tl_data->max_sl_cnt = ADF_GEN6_TL_MAX_SLICES_PER_TYPE;
+
+ tl_data->multiplier.cpr_cnt = CPR_QUEUE_COUNT;
+ tl_data->multiplier.dcpr_cnt = DCPR_QUEUE_COUNT;
+ tl_data->multiplier.pke_cnt = PKE_QUEUE_COUNT;
+ tl_data->multiplier.wat_cnt = WAT_QUEUE_COUNT;
+ tl_data->multiplier.wcp_cnt = WCP_QUEUE_COUNT;
+ tl_data->multiplier.ucs_cnt = USC_QUEUE_COUNT;
+ tl_data->multiplier.ath_cnt = ATH_QUEUE_COUNT;
}
EXPORT_SYMBOL_GPL(adf_gen6_init_tl_data);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c
index 12e565613661..4639d7fd93e6 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c
@@ -384,7 +384,8 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
*/
int __init adf_init_misc_wq(void)
{
- adf_misc_wq = alloc_workqueue("qat_misc_wq", WQ_MEM_RECLAIM, 0);
+ adf_misc_wq = alloc_workqueue("qat_misc_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !adf_misc_wq ? -ENOMEM : 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
index 69295a9ddf0a..4ccc94ed9493 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs_utils.c
@@ -1,18 +1,12 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2025 Intel Corporation */
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/sprintf.h>
#include <linux/string_helpers.h>
#include "adf_pm_dbgfs_utils.h"
-/*
- * This is needed because a variable is used to index the mask at
- * pm_scnprint_table(), making it not compile time constant, so the compile
- * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled.
- */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
#define PM_INFO_MAX_KEY_LEN 21
static int pm_scnprint_table(char *buff, const struct pm_status_row *table,
diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
index 31d1ef0cb1f5..bb904ba4bf84 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_sriov_configure);
int __init adf_init_pf_wq(void)
{
/* Workqueue for PF2VF responses */
- pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq", WQ_MEM_RECLAIM, 0);
+ pf2vf_resp_wq = alloc_workqueue("qat_pf2vf_resp_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !pf2vf_resp_wq ? -ENOMEM : 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
index 74fb0c2ed241..b64142db1f0d 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c
@@ -212,6 +212,23 @@ int adf_tl_halt(struct adf_accel_dev *accel_dev)
return ret;
}
+static void adf_set_cmdq_cnt(struct adf_accel_dev *accel_dev,
+ struct adf_tl_hw_data *tl_data)
+{
+ struct icp_qat_fw_init_admin_slice_cnt *slice_cnt, *cmdq_cnt;
+
+ slice_cnt = &accel_dev->telemetry->slice_cnt;
+ cmdq_cnt = &accel_dev->telemetry->cmdq_cnt;
+
+ cmdq_cnt->cpr_cnt = slice_cnt->cpr_cnt * tl_data->multiplier.cpr_cnt;
+ cmdq_cnt->dcpr_cnt = slice_cnt->dcpr_cnt * tl_data->multiplier.dcpr_cnt;
+ cmdq_cnt->pke_cnt = slice_cnt->pke_cnt * tl_data->multiplier.pke_cnt;
+ cmdq_cnt->wat_cnt = slice_cnt->wat_cnt * tl_data->multiplier.wat_cnt;
+ cmdq_cnt->wcp_cnt = slice_cnt->wcp_cnt * tl_data->multiplier.wcp_cnt;
+ cmdq_cnt->ucs_cnt = slice_cnt->ucs_cnt * tl_data->multiplier.ucs_cnt;
+ cmdq_cnt->ath_cnt = slice_cnt->ath_cnt * tl_data->multiplier.ath_cnt;
+}
+
int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
{
struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
@@ -235,6 +252,8 @@ int adf_tl_run(struct adf_accel_dev *accel_dev, int state)
return ret;
}
+ adf_set_cmdq_cnt(accel_dev, tl_data);
+
telemetry->hbuffs = state;
atomic_set(&telemetry->state, state);
diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
index e54a406cc1b4..02d75c3c214a 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h
@@ -28,19 +28,23 @@ struct dentry;
struct adf_tl_hw_data {
size_t layout_sz;
size_t slice_reg_sz;
+ size_t cmdq_reg_sz;
size_t rp_reg_sz;
size_t msg_cnt_off;
const struct adf_tl_dbg_counter *dev_counters;
const struct adf_tl_dbg_counter *sl_util_counters;
const struct adf_tl_dbg_counter *sl_exec_counters;
+ const struct adf_tl_dbg_counter **cmdq_counters;
const struct adf_tl_dbg_counter *rp_counters;
u8 num_hbuff;
u8 cpp_ns_per_cycle;
u8 bw_units_to_bytes;
u8 num_dev_counters;
u8 num_rp_counters;
+ u8 num_cmdq_counters;
u8 max_rp;
u8 max_sl_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt multiplier;
};
struct adf_telemetry {
@@ -69,6 +73,7 @@ struct adf_telemetry {
struct mutex wr_lock;
struct delayed_work work_ctx;
struct icp_qat_fw_init_admin_slice_cnt slice_cnt;
+ struct icp_qat_fw_init_admin_slice_cnt cmdq_cnt;
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
index a32db273842a..b81f70576683 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c
@@ -339,6 +339,48 @@ static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev,
return 0;
}
+static int tl_print_cmdq_counter(struct adf_telemetry *telemetry,
+ const struct adf_tl_dbg_counter *ctr,
+ struct seq_file *s, u8 cnt_id, u8 counter)
+{
+ size_t cmdq_regs_sz = GET_TL_DATA(telemetry->accel_dev).cmdq_reg_sz;
+ size_t offset_inc = cnt_id * cmdq_regs_sz;
+ struct adf_tl_dbg_counter slice_ctr;
+ char cnt_name[MAX_COUNT_NAME_SIZE];
+
+ slice_ctr = *(ctr + counter);
+ slice_ctr.offset1 += offset_inc;
+ snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", slice_ctr.name, cnt_id);
+
+ return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name);
+}
+
+static int tl_calc_and_print_cmdq_counters(struct adf_accel_dev *accel_dev,
+ struct seq_file *s, u8 cnt_type,
+ u8 cnt_id)
+{
+ struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev);
+ struct adf_telemetry *telemetry = accel_dev->telemetry;
+ const struct adf_tl_dbg_counter **cmdq_tl_counters;
+ const struct adf_tl_dbg_counter *ctr;
+ u8 counter;
+ int ret;
+
+ cmdq_tl_counters = tl_data->cmdq_counters;
+ ctr = cmdq_tl_counters[cnt_type];
+
+ for (counter = 0; counter < tl_data->num_cmdq_counters; counter++) {
+ ret = tl_print_cmdq_counter(telemetry, ctr, s, cnt_id, counter);
+ if (ret) {
+ dev_notice(&GET_DEV(accel_dev),
+ "invalid slice utilization counter type\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt)
{
seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG);
@@ -352,6 +394,7 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
struct adf_telemetry *telemetry = accel_dev->telemetry;
const struct adf_tl_dbg_counter *dev_tl_counters;
u8 num_dev_counters = tl_data->num_dev_counters;
+ u8 *cmdq_cnt = (u8 *)&telemetry->cmdq_cnt;
u8 *sl_cnt = (u8 *)&telemetry->slice_cnt;
const struct adf_tl_dbg_counter *ctr;
unsigned int i;
@@ -387,6 +430,15 @@ static int tl_print_dev_data(struct adf_accel_dev *accel_dev,
}
}
+ /* Print per command queue telemetry. */
+ for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) {
+ for (j = 0; j < cmdq_cnt[i]; j++) {
+ ret = tl_calc_and_print_cmdq_counters(accel_dev, s, i, j);
+ if (ret)
+ return ret;
+ }
+ }
+
return 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
index 11cc9eae19b3..97c5eeaa1b17 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
+++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h
@@ -17,6 +17,7 @@ struct adf_accel_dev;
#define LAT_ACC_NAME "gp_lat_acc_avg"
#define BW_IN_NAME "bw_in"
#define BW_OUT_NAME "bw_out"
+#define RE_ACC_NAME "re_acc_avg"
#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg"
#define AT_TRANS_LAT_NAME "at_trans_lat_avg"
#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used"
@@ -43,6 +44,10 @@ struct adf_accel_dev;
(ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg))
+#define ADF_TL_CMDQ_REG_OFF(slice, reg, qat_gen) \
+ (ADF_TL_DEV_REG_OFF(slice##_cmdq[0], qat_gen) + \
+ offsetof(struct adf_##qat_gen##_tl_cmdq_data_regs, reg))
+
#define ADF_TL_RP_REG_OFF(reg, qat_gen) \
(ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \
offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg))
diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
index a4636ec9f9ca..d0fef20a3df4 100644
--- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq);
*/
int __init adf_init_vf_wq(void)
{
- adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq", WQ_MEM_RECLAIM, 0);
+ adf_vf_stop_wq = alloc_workqueue("adf_vf_stop_wq",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
return !adf_vf_stop_wq ? -EFAULT : 0;
}
diff --git a/drivers/crypto/intel/qat/qat_common/qat_algs.c b/drivers/crypto/intel/qat/qat_common/qat_algs.c
index 43e6dd9b77b7..7f638a62e3ad 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_algs.c
@@ -5,12 +5,10 @@
#include <linux/crypto.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
-#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/aes.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
-#include <crypto/hmac.h>
#include <crypto/algapi.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
@@ -68,16 +66,10 @@ struct qat_alg_aead_ctx {
dma_addr_t dec_cd_paddr;
struct icp_qat_fw_la_bulk_req enc_fw_req;
struct icp_qat_fw_la_bulk_req dec_fw_req;
- struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ unsigned int hash_digestsize;
+ unsigned int hash_blocksize;
struct qat_crypto_instance *inst;
- union {
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
- };
- char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
- char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_skcipher_ctx {
@@ -94,125 +86,57 @@ struct qat_alg_skcipher_ctx {
int mode;
};
-static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
- switch (qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- return ICP_QAT_HW_SHA1_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- return ICP_QAT_HW_SHA256_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- return ICP_QAT_HW_SHA512_STATE1_SZ;
- default:
- return -EFAULT;
- }
-}
-
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
struct qat_alg_aead_ctx *ctx,
const u8 *auth_key,
unsigned int auth_keylen)
{
- SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- int block_size = crypto_shash_blocksize(ctx->hash_tfm);
- int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- __be32 *hash_state_out;
- __be64 *hash512_state_out;
- int i, offset;
-
- memset(ctx->ipad, 0, block_size);
- memset(ctx->opad, 0, block_size);
- shash->tfm = ctx->hash_tfm;
-
- if (auth_keylen > block_size) {
- int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ctx->ipad);
- if (ret)
- return ret;
-
- memcpy(ctx->opad, ctx->ipad, digest_size);
- } else {
- memcpy(ctx->ipad, auth_key, auth_keylen);
- memcpy(ctx->opad, auth_key, auth_keylen);
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1: {
+ struct hmac_sha1_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ round_up(sizeof(key.istate.h), 8));
+
+ hmac_sha1_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ctx->ipad + i;
- char *opad_ptr = ctx->opad + i;
- *ipad_ptr ^= HMAC_IPAD_VALUE;
- *opad_ptr ^= HMAC_OPAD_VALUE;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256: {
+ struct hmac_sha256_key key;
+ __be32 *istate = (__be32 *)hash->sha.state1;
+ __be32 *ostate = (__be32 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha256_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be32(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be32(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->ipad, block_size))
- return -EFAULT;
-
- hash_state_out = (__be32 *)hash->sha.state1;
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export_core(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export_core(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export_core(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
- default:
- return -EFAULT;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512: {
+ struct hmac_sha512_key key;
+ __be64 *istate = (__be64 *)hash->sha.state1;
+ __be64 *ostate = (__be64 *)(hash->sha.state1 +
+ sizeof(key.key.istate.h));
+
+ hmac_sha512_preparekey(&key, auth_key, auth_keylen);
+ for (int i = 0; i < ARRAY_SIZE(key.key.istate.h); i++) {
+ istate[i] = cpu_to_be64(key.key.istate.h[i]);
+ ostate[i] = cpu_to_be64(key.key.ostate.h[i]);
+ }
+ memzero_explicit(&key, sizeof(key));
+ return 0;
}
-
- if (crypto_shash_init(shash))
- return -EFAULT;
-
- if (crypto_shash_update(shash, ctx->opad, block_size))
- return -EFAULT;
-
- offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
- if (offset < 0)
- return -EFAULT;
-
- hash_state_out = (__be32 *)(hash->sha.state1 + offset);
- hash512_state_out = (__be64 *)hash_state_out;
-
- switch (ctx->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export_core(shash, &ctx->sha1))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export_core(shash, &ctx->sha256))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export_core(shash, &ctx->sha512))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
- break;
default:
return -EFAULT;
}
- memzero_explicit(ctx->ipad, block_size);
- memzero_explicit(ctx->opad, block_size);
- return 0;
}
static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
@@ -259,7 +183,7 @@ static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
ctx->qat_hash_alg, digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -326,7 +250,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
struct icp_qat_hw_cipher_algo_blk *cipher =
(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+ roundup(ctx->hash_digestsize, 8) * 2);
struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
@@ -346,7 +270,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
ctx->qat_hash_alg,
digestsize);
hash->sha.inner_setup.auth_counter.counter =
- cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+ cpu_to_be32(ctx->hash_blocksize);
if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
return -EFAULT;
@@ -368,7 +292,7 @@ static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
cipher_cd_ctrl->cipher_cfg_offset =
(sizeof(struct icp_qat_hw_auth_setup) +
- roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+ roundup(ctx->hash_digestsize, 8) * 2) >> 3;
ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
@@ -1150,32 +1074,35 @@ static int qat_alg_skcipher_xts_decrypt(struct skcipher_request *req)
}
static int qat_alg_aead_init(struct crypto_aead *tfm,
- enum icp_qat_hw_auth_algo hash,
- const char *hash_name)
+ enum icp_qat_hw_auth_algo hash_alg,
+ unsigned int hash_digestsize,
+ unsigned int hash_blocksize)
{
struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
- ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
- if (IS_ERR(ctx->hash_tfm))
- return PTR_ERR(ctx->hash_tfm);
- ctx->qat_hash_alg = hash;
+ ctx->qat_hash_alg = hash_alg;
+ ctx->hash_digestsize = hash_digestsize;
+ ctx->hash_blocksize = hash_blocksize;
crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
return 0;
}
static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1,
+ SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
}
static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256,
+ SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
}
static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
{
- return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512,
+ SHA512_DIGEST_SIZE, SHA512_BLOCK_SIZE);
}
static void qat_alg_aead_exit(struct crypto_aead *tfm)
@@ -1184,8 +1111,6 @@ static void qat_alg_aead_exit(struct crypto_aead *tfm)
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
- crypto_free_shash(ctx->hash_tfm);
-
if (!inst)
return;
diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
index 21d652a1c8ef..06d49cb781ae 100644
--- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c
@@ -200,20 +200,12 @@ qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
static int qat_uclo_parse_num(char *str, unsigned int *num)
{
- char buf[16] = {0};
- unsigned long ae = 0;
- int i;
-
- strscpy(buf, str, sizeof(buf));
- for (i = 0; i < 16; i++) {
- if (!isdigit(buf[i])) {
- buf[i] = '\0';
- break;
- }
- }
- if ((kstrtoul(buf, 10, &ae)))
- return -EFAULT;
+ unsigned long long ae;
+ char *end;
+ ae = simple_strtoull(str, &end, 10);
+ if (ae > UINT_MAX || str == end || (end - str) > 19)
+ return -EINVAL;
*num = (unsigned int)ae;
return 0;
}
@@ -1900,7 +1892,7 @@ static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
if (sobj_hdr)
sobj_chunk_num = sobj_hdr->num_chunks;
- mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) *
+ mobj_hdr = kcalloc(size_add(uobj_chunk_num, sobj_chunk_num),
sizeof(*mobj_hdr), GFP_KERNEL);
if (!mobj_hdr)
return -ENOMEM;
diff --git a/drivers/crypto/loongson/Kconfig b/drivers/crypto/loongson/Kconfig
new file mode 100644
index 000000000000..15475da8fc11
--- /dev/null
+++ b/drivers/crypto/loongson/Kconfig
@@ -0,0 +1,5 @@
+config CRYPTO_DEV_LOONGSON_RNG
+ tristate "Support for Loongson RNG Driver"
+ depends on MFD_LOONGSON_SE
+ help
+ Support for Loongson RNG Driver.
diff --git a/drivers/crypto/loongson/Makefile b/drivers/crypto/loongson/Makefile
new file mode 100644
index 000000000000..1ce5ec32b553
--- /dev/null
+++ b/drivers/crypto/loongson/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CRYPTO_DEV_LOONGSON_RNG) += loongson-rng.o
diff --git a/drivers/crypto/loongson/loongson-rng.c b/drivers/crypto/loongson/loongson-rng.c
new file mode 100644
index 000000000000..3a4940260f9e
--- /dev/null
+++ b/drivers/crypto/loongson/loongson-rng.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 HiSilicon Limited. */
+/* Copyright (c) 2025 Loongson Technology Corporation Limited. */
+
+#include <linux/crypto.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mfd/loongson-se.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/random.h>
+#include <crypto/internal/rng.h>
+
+#define SE_SEED_SIZE 32
+
+struct loongson_rng_list {
+ struct mutex lock;
+ struct list_head list;
+ int registered;
+};
+
+struct loongson_rng {
+ u32 used;
+ struct loongson_se_engine *engine;
+ struct list_head list;
+ struct mutex lock;
+};
+
+struct loongson_rng_ctx {
+ struct loongson_rng *rng;
+};
+
+struct loongson_rng_cmd {
+ u32 cmd_id;
+ union {
+ u32 len;
+ u32 ret;
+ } u;
+ u32 seed_off;
+ u32 out_off;
+ u32 pad[4];
+};
+
+static struct loongson_rng_list rng_devices = {
+ .lock = __MUTEX_INITIALIZER(rng_devices.lock),
+ .list = LIST_HEAD_INIT(rng_devices.list),
+};
+
+static int loongson_rng_generate(struct crypto_rng *tfm, const u8 *src,
+ unsigned int slen, u8 *dstn, unsigned int dlen)
+{
+ struct loongson_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct loongson_rng *rng = ctx->rng;
+ struct loongson_rng_cmd *cmd = rng->engine->command;
+ int err, len;
+
+ mutex_lock(&rng->lock);
+ cmd->seed_off = 0;
+ do {
+ len = min(dlen, rng->engine->buffer_size);
+ cmd = rng->engine->command;
+ cmd->u.len = len;
+ err = loongson_se_send_engine_cmd(rng->engine);
+ if (err)
+ break;
+
+ cmd = rng->engine->command_ret;
+ if (cmd->u.ret) {
+ err = -EIO;
+ break;
+ }
+
+ memcpy(dstn, rng->engine->data_buffer, len);
+ dlen -= len;
+ dstn += len;
+ } while (dlen > 0);
+ mutex_unlock(&rng->lock);
+
+ return err;
+}
+
+static int loongson_rng_init(struct crypto_tfm *tfm)
+{
+ struct loongson_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct loongson_rng *rng;
+ u32 min_used = U32_MAX;
+
+ mutex_lock(&rng_devices.lock);
+ list_for_each_entry(rng, &rng_devices.list, list) {
+ if (rng->used < min_used) {
+ ctx->rng = rng;
+ min_used = rng->used;
+ }
+ }
+ ctx->rng->used++;
+ mutex_unlock(&rng_devices.lock);
+
+ return 0;
+}
+
+static void loongson_rng_exit(struct crypto_tfm *tfm)
+{
+ struct loongson_rng_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ mutex_lock(&rng_devices.lock);
+ ctx->rng->used--;
+ mutex_unlock(&rng_devices.lock);
+}
+
+static int loongson_rng_seed(struct crypto_rng *tfm, const u8 *seed,
+ unsigned int slen)
+{
+ struct loongson_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ struct loongson_rng *rng = ctx->rng;
+ struct loongson_rng_cmd *cmd;
+ int err;
+
+ if (slen < SE_SEED_SIZE)
+ return -EINVAL;
+
+ slen = min(slen, rng->engine->buffer_size);
+
+ mutex_lock(&rng->lock);
+ cmd = rng->engine->command;
+ cmd->u.len = slen;
+ cmd->seed_off = rng->engine->buffer_off;
+ memcpy(rng->engine->data_buffer, seed, slen);
+ err = loongson_se_send_engine_cmd(rng->engine);
+ if (err)
+ goto out;
+
+ cmd = rng->engine->command_ret;
+ if (cmd->u.ret)
+ err = -EIO;
+out:
+ mutex_unlock(&rng->lock);
+
+ return err;
+}
+
+static struct rng_alg loongson_rng_alg = {
+ .generate = loongson_rng_generate,
+ .seed = loongson_rng_seed,
+ .seedsize = SE_SEED_SIZE,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "loongson_stdrng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct loongson_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = loongson_rng_init,
+ .cra_exit = loongson_rng_exit,
+ },
+};
+
+static int loongson_rng_probe(struct platform_device *pdev)
+{
+ struct loongson_rng_cmd *cmd;
+ struct loongson_rng *rng;
+ int ret = 0;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->engine = loongson_se_init_engine(pdev->dev.parent, SE_ENGINE_RNG);
+ if (!rng->engine)
+ return -ENODEV;
+ cmd = rng->engine->command;
+ cmd->cmd_id = SE_CMD_RNG;
+ cmd->out_off = rng->engine->buffer_off;
+ mutex_init(&rng->lock);
+
+ mutex_lock(&rng_devices.lock);
+
+ if (!rng_devices.registered) {
+ ret = crypto_register_rng(&loongson_rng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register crypto(%d)\n", ret);
+ goto out;
+ }
+ rng_devices.registered = 1;
+ }
+
+ list_add_tail(&rng->list, &rng_devices.list);
+out:
+ mutex_unlock(&rng_devices.lock);
+
+ return ret;
+}
+
+static struct platform_driver loongson_rng_driver = {
+ .probe = loongson_rng_probe,
+ .driver = {
+ .name = "loongson-rng",
+ },
+};
+module_platform_driver(loongson_rng_driver);
+
+MODULE_ALIAS("platform:loongson-rng");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yinggang Gu <guyinggang@loongson.cn>");
+MODULE_AUTHOR("Qunqin Zhao <zhaoqunqin@loongson.cn>");
+MODULE_DESCRIPTION("Loongson Random Number Generator driver");
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 9c21f5d835d2..301bdf239e7d 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -420,7 +420,6 @@ static int mv_cesa_probe(struct platform_device *pdev)
{
const struct mv_cesa_caps *caps = &orion_caps;
const struct mbus_dram_target_info *dram;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct mv_cesa_dev *cesa;
struct mv_cesa_engine *engines;
@@ -433,11 +432,9 @@ static int mv_cesa_probe(struct platform_device *pdev)
}
if (dev->of_node) {
- match = of_match_node(mv_cesa_of_match_table, dev->of_node);
- if (!match || !match->data)
+ caps = of_device_get_match_data(dev);
+ if (!caps)
return -ENOTSUPP;
-
- caps = match->data;
}
cesa = devm_kzalloc(dev, sizeof(*cesa), GFP_KERNEL);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
index 215a1a8ba7e9..07a74f702c3a 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_devlink.c
@@ -24,7 +24,8 @@ static int otx2_cpt_dl_egrp_delete(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
ctx->val.vstr[0] = '\0';
@@ -32,7 +33,8 @@ static int otx2_cpt_dl_uc_info(struct devlink *dl, u32 id,
}
static int otx2_cpt_dl_t106_mode_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_cpt_devlink *cpt_dl = devlink_priv(dl);
struct otx2_cptpf_dev *cptpf = cpt_dl->cptpf;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index cc47e361089a..b5cc5401f704 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -3,6 +3,7 @@
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/string.h>
#include <linux/string_choices.h>
#include "otx2_cptpf_ucode.h"
#include "otx2_cpt_common.h"
@@ -458,13 +459,13 @@ static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
u16 rid)
{
char filename[OTX2_CPT_NAME_LENGTH];
- char eng_type[8] = {0};
+ char eng_type[8];
int ret, e, i;
INIT_LIST_HEAD(&fw_info->ucodes);
for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
- strcpy(eng_type, get_eng_type_str(e));
+ strscpy(eng_type, get_eng_type_str(e));
for (i = 0; i < strlen(eng_type); i++)
eng_type[i] = tolower(eng_type[i]);
@@ -1615,7 +1616,7 @@ int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
return -EINVAL;
}
err_msg = "Invalid engine group format";
- strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
+ strscpy(tmp_buf, ctx->val.vstr);
start = tmp_buf;
has_se = has_ie = has_ae = false;
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index fd0a98b2fb1b..0493041ea088 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -1043,8 +1043,10 @@ static struct scomp_alg nx842_powernv_alg = {
.base.cra_priority = 300,
.base.cra_module = THIS_MODULE,
- .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
- .free_ctx = nx842_crypto_free_ctx,
+ .streams = {
+ .alloc_ctx = nx842_powernv_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
.compress = nx842_crypto_compress,
.decompress = nx842_crypto_decompress,
};
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index f528e072494a..fc0222ebe807 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -1020,8 +1020,10 @@ static struct scomp_alg nx842_pseries_alg = {
.base.cra_priority = 300,
.base.cra_module = THIS_MODULE,
- .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
- .free_ctx = nx842_crypto_free_ctx,
+ .streams = {
+ .alloc_ctx = nx842_pseries_crypto_alloc_ctx,
+ .free_ctx = nx842_crypto_free_ctx,
+ },
.compress = nx842_crypto_compress,
.decompress = nx842_crypto_decompress,
};
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 244e24e52987..3cc802622dd5 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "omap-crypto.h"
#include "omap-aes.h"
@@ -221,7 +222,7 @@ static void omap_aes_dma_out_callback(void *data)
struct omap_aes_dev *dd = data;
/* dma_lch_out - completed */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -494,9 +495,9 @@ static void omap_aes_copy_ivout(struct omap_aes_dev *dd, u8 *ivbuf)
((u32 *)ivbuf)[i] = omap_aes_read(dd, AES_REG_IV(dd, i));
}
-static void omap_aes_done_task(unsigned long data)
+static void omap_aes_done_task(struct work_struct *t)
{
- struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+ struct omap_aes_dev *dd = from_work(dd, t, done_task);
pr_debug("enter done_task\n");
@@ -925,7 +926,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
if (!dd->total)
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
else
/* Enable DATA_IN interrupt for next block */
omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
@@ -1140,7 +1141,7 @@ static int omap_aes_probe(struct platform_device *pdev)
(reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_aes_done_task);
err = omap_aes_dma_init(dd);
if (err == -EPROBE_DEFER) {
@@ -1229,7 +1230,7 @@ err_engine:
omap_aes_dma_cleanup(dd);
err_irq:
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
err_pm_disable:
pm_runtime_disable(dev);
err_res:
@@ -1264,7 +1265,7 @@ static void omap_aes_remove(struct platform_device *pdev)
crypto_engine_exit(dd->engine);
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
omap_aes_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index 41d67780fd45..99c36a777e97 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -159,7 +159,7 @@ struct omap_aes_dev {
unsigned long flags;
int err;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
struct aead_queue aead_queue;
spinlock_t lock;
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index 9c5538ae17db..149ebd77710b 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -32,6 +32,7 @@
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include "omap-crypto.h"
@@ -130,7 +131,7 @@ struct omap_des_dev {
unsigned long flags;
int err;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
struct skcipher_request *req;
struct crypto_engine *engine;
@@ -325,7 +326,7 @@ static void omap_des_dma_out_callback(void *data)
struct omap_des_dev *dd = data;
/* dma_lch_out - completed */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_des_dma_init(struct omap_des_dev *dd)
@@ -580,9 +581,9 @@ static int omap_des_crypt_req(struct crypto_engine *engine,
omap_des_crypt_dma_start(dd);
}
-static void omap_des_done_task(unsigned long data)
+static void omap_des_done_task(struct work_struct *t)
{
- struct omap_des_dev *dd = (struct omap_des_dev *)data;
+ struct omap_des_dev *dd = from_work(dd, t, done_task);
int i;
pr_debug("enter done_task\n");
@@ -890,7 +891,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
if (!dd->total)
/* All bytes read! */
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
else
/* Enable DATA_IN interrupt for next block */
omap_des_write(dd, DES_REG_IRQ_ENABLE(dd), 0x2);
@@ -986,7 +987,7 @@ static int omap_des_probe(struct platform_device *pdev)
(reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
(reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
- tasklet_init(&dd->done_task, omap_des_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_des_done_task);
err = omap_des_dma_init(dd);
if (err == -EPROBE_DEFER) {
@@ -1053,7 +1054,7 @@ err_engine:
omap_des_dma_cleanup(dd);
err_irq:
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
err_get:
pm_runtime_disable(dev);
err_res:
@@ -1077,7 +1078,7 @@ static void omap_des_remove(struct platform_device *pdev)
crypto_engine_unregister_skcipher(
&dd->pdata->algs_info[i].algs_list[j]);
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
omap_des_dma_cleanup(dd);
pm_runtime_disable(dd->dev);
}
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6328e8026b91..ff8aac02994a 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -37,6 +37,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#define MD5_DIGEST_SIZE 16
@@ -217,7 +218,7 @@ struct omap_sham_dev {
int irq;
int err;
struct dma_chan *dma_lch;
- struct tasklet_struct done_task;
+ struct work_struct done_task;
u8 polling_mode;
u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
@@ -561,7 +562,7 @@ static void omap_sham_dma_callback(void *param)
struct omap_sham_dev *dd = param;
set_bit(FLAGS_DMA_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
}
static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
@@ -1703,9 +1704,9 @@ static struct ahash_engine_alg algs_sha384_sha512[] = {
},
};
-static void omap_sham_done_task(unsigned long data)
+static void omap_sham_done_task(struct work_struct *t)
{
- struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ struct omap_sham_dev *dd = from_work(dd, t, done_task);
int err = 0;
dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
@@ -1739,7 +1740,7 @@ finish:
static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
{
set_bit(FLAGS_OUTPUT_READY, &dd->flags);
- tasklet_schedule(&dd->done_task);
+ queue_work(system_bh_wq, &dd->done_task);
return IRQ_HANDLED;
}
@@ -2059,7 +2060,7 @@ static int omap_sham_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list);
- tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
+ INIT_WORK(&dd->done_task, omap_sham_done_task);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
@@ -2194,7 +2195,7 @@ static void omap_sham_remove(struct platform_device *pdev)
&dd->pdata->algs_info[i].algs_list[j]);
dd->pdata->algs_info[i].registered--;
}
- tasklet_kill(&dd->done_task);
+ cancel_work_sync(&dd->done_task);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index e95e84486d9a..b966f3365b7d 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -21,7 +21,6 @@
#include "sha.h"
#include "aead.h"
-#define QCE_MAJOR_VERSION5 0x05
#define QCE_QUEUE_LENGTH 1
#define QCE_DEFAULT_MEM_BANDWIDTH 393600
@@ -161,7 +160,7 @@ static int qce_check_version(struct qce_device *qce)
* the driver does not support v5 with minor 0 because it has special
* alignment requirements.
*/
- if (major != QCE_MAJOR_VERSION5 || minor == 0)
+ if (major == 5 && minor == 0)
return -ENODEV;
qce->burst_size = QCE_BAM_BURST_SIZE;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 1dec7aea852d..68cafd4741ad 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -24,11 +24,13 @@ int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
dma->txchan = dma_request_chan(dev, "tx");
if (IS_ERR(dma->txchan))
- return PTR_ERR(dma->txchan);
+ return dev_err_probe(dev, PTR_ERR(dma->txchan),
+ "Failed to get TX DMA channel\n");
dma->rxchan = dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan)) {
- ret = PTR_ERR(dma->rxchan);
+ ret = dev_err_probe(dev, PTR_ERR(dma->rxchan),
+ "Failed to get RX DMA channel\n");
goto error_rx;
}
diff --git a/drivers/crypto/rockchip/rk3288_crypto_ahash.c b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
index d6928ebe9526..b9f5a8b42e66 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_ahash.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_ahash.c
@@ -254,7 +254,7 @@ static void rk_hash_unprepare(struct crypto_engine *engine, void *breq)
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct rk_crypto_info *rkc = rctx->dev;
- dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
+ dma_unmap_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
}
static int rk_hash_run(struct crypto_engine *engine, void *breq)
diff --git a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
index 9393e10671c2..e80f9148c012 100644
--- a/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
+++ b/drivers/crypto/rockchip/rk3288_crypto_skcipher.c
@@ -321,8 +321,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
algt->stat_req++;
rkc->nreq++;
- ivsize = crypto_skcipher_ivsize(tfm);
- if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
+ if (areq->iv && ivsize > 0) {
if (rctx->mode & RK_CRYPTO_DEC) {
offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
diff --git a/drivers/crypto/starfive/jh7110-aes.c b/drivers/crypto/starfive/jh7110-aes.c
index 86a1a1fa9f8f..426b24889af8 100644
--- a/drivers/crypto/starfive/jh7110-aes.c
+++ b/drivers/crypto/starfive/jh7110-aes.c
@@ -511,8 +511,7 @@ static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
stsg = sg_next(stsg), dtsg = sg_next(dtsg)) {
src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_BIDIRECTIONAL);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg error\n");
+ return -ENOMEM;
dst_nents = src_nents;
len = min(sg_dma_len(stsg), remain);
@@ -528,13 +527,11 @@ static int starfive_aes_map_sg(struct starfive_cryp_dev *cryp,
for (stsg = src, dtsg = dst;;) {
src_nents = dma_map_sg(cryp->dev, stsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg src error\n");
+ return -ENOMEM;
dst_nents = dma_map_sg(cryp->dev, dtsg, 1, DMA_FROM_DEVICE);
if (dst_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg dst error\n");
+ return -ENOMEM;
len = min(sg_dma_len(stsg), sg_dma_len(dtsg));
len = min(len, remain);
@@ -669,8 +666,7 @@ static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq
if (cryp->assoclen) {
rctx->adata = kzalloc(cryp->assoclen + AES_BLOCK_SIZE, GFP_KERNEL);
if (!rctx->adata)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "Failed to alloc memory for adata");
+ return -ENOMEM;
if (sg_copy_to_buffer(req->src, sg_nents_for_len(req->src, cryp->assoclen),
rctx->adata, cryp->assoclen) != cryp->assoclen)
diff --git a/drivers/crypto/starfive/jh7110-hash.c b/drivers/crypto/starfive/jh7110-hash.c
index 6cfe0238f615..54b7af4a7aee 100644
--- a/drivers/crypto/starfive/jh7110-hash.c
+++ b/drivers/crypto/starfive/jh7110-hash.c
@@ -229,8 +229,7 @@ static int starfive_hash_one_request(struct crypto_engine *engine, void *areq)
for_each_sg(rctx->in_sg, tsg, rctx->in_sg_len, i) {
src_nents = dma_map_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
if (src_nents == 0)
- return dev_err_probe(cryp->dev, -ENOMEM,
- "dma_map_sg error\n");
+ return -ENOMEM;
ret = starfive_hash_dma_xfer(cryp, tsg);
dma_unmap_sg(cryp->dev, tsg, 1, DMA_TO_DEVICE);
@@ -326,6 +325,7 @@ static int starfive_hash_digest(struct ahash_request *req)
struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm);
struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req);
struct starfive_cryp_dev *cryp = ctx->cryp;
+ int sg_len;
memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx));
@@ -334,7 +334,10 @@ static int starfive_hash_digest(struct ahash_request *req)
rctx->in_sg = req->src;
rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
rctx->digsize = crypto_ahash_digestsize(tfm);
- rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
+ sg_len = sg_nents_for_len(rctx->in_sg, rctx->total);
+ if (sg_len < 0)
+ return sg_len;
+ rctx->in_sg_len = sg_len;
ctx->rctx = rctx;
return crypto_transfer_hash_request_to_engine(cryp->engine, req);
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index a89b4c5d62a0..5e82e8a1f71a 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -2781,5 +2781,5 @@ static struct platform_driver stm32_cryp_driver = {
module_platform_driver(stm32_cryp_driver);
MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
-MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver");
+MODULE_DESCRIPTION("STMicroelectronics STM32 CRYP hardware driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/tegra/tegra-se-hash.c b/drivers/crypto/tegra/tegra-se-hash.c
index d09b4aaeecef..4a298ace6e9f 100644
--- a/drivers/crypto/tegra/tegra-se-hash.c
+++ b/drivers/crypto/tegra/tegra-se-hash.c
@@ -400,8 +400,9 @@ static int tegra_sha_do_update(struct ahash_request *req)
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct tegra_se *se = ctx->se;
- unsigned int nblks, nresidue, size, ret;
+ unsigned int nblks, nresidue, size;
u32 *cpuvaddr = se->cmdbuf->addr;
+ int ret;
nresidue = (req->nbytes + rctx->residue.size) % rctx->blk_size;
nblks = (req->nbytes + rctx->residue.size) / rctx->blk_size;
diff --git a/drivers/crypto/tegra/tegra-se-main.c b/drivers/crypto/tegra/tegra-se-main.c
index 1c94f1de0546..7237f14eaf5a 100644
--- a/drivers/crypto/tegra/tegra-se-main.c
+++ b/drivers/crypto/tegra/tegra-se-main.c
@@ -310,7 +310,7 @@ static int tegra_se_probe(struct platform_device *pdev)
se->engine = crypto_engine_alloc_init(dev, 0);
if (!se->engine)
- return dev_err_probe(dev, -ENOMEM, "failed to init crypto engine\n");
+ return -ENOMEM;
ret = crypto_engine_start(se->engine);
if (ret) {
diff --git a/drivers/crypto/ti/Kconfig b/drivers/crypto/ti/Kconfig
new file mode 100644
index 000000000000..a3692ceec49b
--- /dev/null
+++ b/drivers/crypto/ti/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config CRYPTO_DEV_TI_DTHEV2
+ tristate "Support for TI DTHE V2 cryptography engine"
+ depends on ARCH_K3 || COMPILE_TEST
+ select CRYPTO_ENGINE
+ select CRYPTO_SKCIPHER
+ select CRYPTO_ECB
+ select CRYPTO_CBC
+ select CRYPTO_XTS
+ help
+ This enables support for the TI DTHE V2 hw cryptography engine
+ which can be found on TI K3 SOCs. Selecting this enables use
+ of hardware offloading for cryptographic algorithms on
+ these devices, providing enhanced resistance against side-channel
+ attacks.
diff --git a/drivers/crypto/ti/Makefile b/drivers/crypto/ti/Makefile
new file mode 100644
index 000000000000..b883078f203d
--- /dev/null
+++ b/drivers/crypto/ti/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_TI_DTHEV2) += dthev2.o
+dthev2-objs := dthev2-common.o dthev2-aes.o
diff --git a/drivers/crypto/ti/dthev2-aes.c b/drivers/crypto/ti/dthev2-aes.c
new file mode 100644
index 000000000000..156729ccc50e
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-aes.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "dthev2-common.h"
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+/* Registers */
+
+// AES Engine
+#define DTHE_P_AES_BASE 0x7000
+
+#define DTHE_P_AES_KEY1_0 0x0038
+#define DTHE_P_AES_KEY1_1 0x003C
+#define DTHE_P_AES_KEY1_2 0x0030
+#define DTHE_P_AES_KEY1_3 0x0034
+#define DTHE_P_AES_KEY1_4 0x0028
+#define DTHE_P_AES_KEY1_5 0x002C
+#define DTHE_P_AES_KEY1_6 0x0020
+#define DTHE_P_AES_KEY1_7 0x0024
+
+#define DTHE_P_AES_KEY2_0 0x0018
+#define DTHE_P_AES_KEY2_1 0x001C
+#define DTHE_P_AES_KEY2_2 0x0010
+#define DTHE_P_AES_KEY2_3 0x0014
+#define DTHE_P_AES_KEY2_4 0x0008
+#define DTHE_P_AES_KEY2_5 0x000C
+#define DTHE_P_AES_KEY2_6 0x0000
+#define DTHE_P_AES_KEY2_7 0x0004
+
+#define DTHE_P_AES_IV_IN_0 0x0040
+#define DTHE_P_AES_IV_IN_1 0x0044
+#define DTHE_P_AES_IV_IN_2 0x0048
+#define DTHE_P_AES_IV_IN_3 0x004C
+#define DTHE_P_AES_CTRL 0x0050
+#define DTHE_P_AES_C_LENGTH_0 0x0054
+#define DTHE_P_AES_C_LENGTH_1 0x0058
+#define DTHE_P_AES_AUTH_LENGTH 0x005C
+#define DTHE_P_AES_DATA_IN_OUT 0x0060
+
+#define DTHE_P_AES_SYSCONFIG 0x0084
+#define DTHE_P_AES_IRQSTATUS 0x008C
+#define DTHE_P_AES_IRQENABLE 0x0090
+
+/* Register write values and macros */
+
+enum aes_ctrl_mode_masks {
+ AES_CTRL_ECB_MASK = 0x00,
+ AES_CTRL_CBC_MASK = BIT(5),
+ AES_CTRL_XTS_MASK = BIT(12) | BIT(11),
+};
+
+#define DTHE_AES_CTRL_MODE_CLEAR_MASK ~GENMASK(28, 5)
+
+#define DTHE_AES_CTRL_DIR_ENC BIT(2)
+
+#define DTHE_AES_CTRL_KEYSIZE_16B BIT(3)
+#define DTHE_AES_CTRL_KEYSIZE_24B BIT(4)
+#define DTHE_AES_CTRL_KEYSIZE_32B (BIT(3) | BIT(4))
+
+#define DTHE_AES_CTRL_SAVE_CTX_SET BIT(29)
+
+#define DTHE_AES_CTRL_OUTPUT_READY BIT_MASK(0)
+#define DTHE_AES_CTRL_INPUT_READY BIT_MASK(1)
+#define DTHE_AES_CTRL_SAVED_CTX_READY BIT_MASK(30)
+#define DTHE_AES_CTRL_CTX_READY BIT_MASK(31)
+
+#define DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN GENMASK(6, 5)
+#define DTHE_AES_IRQENABLE_EN_ALL GENMASK(3, 0)
+
+/* Misc */
+#define AES_IV_SIZE AES_BLOCK_SIZE
+#define AES_BLOCK_WORDS (AES_BLOCK_SIZE / sizeof(u32))
+#define AES_IV_WORDS AES_BLOCK_WORDS
+
+static int dthe_cipher_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+ ctx->keylen = 0;
+
+ return 0;
+}
+
+static int dthe_cipher_xts_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+
+ ctx->dev_data = dev_data;
+ ctx->keylen = 0;
+
+ ctx->skcipher_fb = crypto_alloc_sync_skcipher("xts(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->skcipher_fb)) {
+ dev_err(dev_data->dev, "fallback driver xts(aes) couldn't be loaded\n");
+ return PTR_ERR(ctx->skcipher_fb);
+ }
+
+ return 0;
+}
+
+static void dthe_cipher_xts_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_sync_skcipher(ctx->skcipher_fb);
+}
+
+static int dthe_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ctx->keylen = keylen;
+ memcpy(ctx->key, key, keylen);
+
+ return 0;
+}
+
+static int dthe_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->aes_mode = DTHE_AES_ECB;
+
+ return dthe_aes_setkey(tfm, key, keylen);
+}
+
+static int dthe_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ ctx->aes_mode = DTHE_AES_CBC;
+
+ return dthe_aes_setkey(tfm, key, keylen);
+}
+
+static int dthe_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ if (keylen != 2 * AES_KEYSIZE_128 &&
+ keylen != 2 * AES_KEYSIZE_192 &&
+ keylen != 2 * AES_KEYSIZE_256)
+ return -EINVAL;
+
+ ctx->aes_mode = DTHE_AES_XTS;
+ ctx->keylen = keylen / 2;
+ memcpy(ctx->key, key, keylen);
+
+ crypto_sync_skcipher_clear_flags(ctx->skcipher_fb, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->skcipher_fb,
+ crypto_skcipher_get_flags(tfm) &
+ CRYPTO_TFM_REQ_MASK);
+
+ return crypto_sync_skcipher_setkey(ctx->skcipher_fb, key, keylen);
+}
+
+static void dthe_aes_set_ctrl_key(struct dthe_tfm_ctx *ctx,
+ struct dthe_aes_req_ctx *rctx,
+ u32 *iv_in)
+{
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+ u32 ctrl_val = 0;
+
+ writel_relaxed(ctx->key[0], aes_base_reg + DTHE_P_AES_KEY1_0);
+ writel_relaxed(ctx->key[1], aes_base_reg + DTHE_P_AES_KEY1_1);
+ writel_relaxed(ctx->key[2], aes_base_reg + DTHE_P_AES_KEY1_2);
+ writel_relaxed(ctx->key[3], aes_base_reg + DTHE_P_AES_KEY1_3);
+
+ if (ctx->keylen > AES_KEYSIZE_128) {
+ writel_relaxed(ctx->key[4], aes_base_reg + DTHE_P_AES_KEY1_4);
+ writel_relaxed(ctx->key[5], aes_base_reg + DTHE_P_AES_KEY1_5);
+ }
+ if (ctx->keylen == AES_KEYSIZE_256) {
+ writel_relaxed(ctx->key[6], aes_base_reg + DTHE_P_AES_KEY1_6);
+ writel_relaxed(ctx->key[7], aes_base_reg + DTHE_P_AES_KEY1_7);
+ }
+
+ if (ctx->aes_mode == DTHE_AES_XTS) {
+ size_t key2_offset = ctx->keylen / sizeof(u32);
+
+ writel_relaxed(ctx->key[key2_offset + 0], aes_base_reg + DTHE_P_AES_KEY2_0);
+ writel_relaxed(ctx->key[key2_offset + 1], aes_base_reg + DTHE_P_AES_KEY2_1);
+ writel_relaxed(ctx->key[key2_offset + 2], aes_base_reg + DTHE_P_AES_KEY2_2);
+ writel_relaxed(ctx->key[key2_offset + 3], aes_base_reg + DTHE_P_AES_KEY2_3);
+
+ if (ctx->keylen > AES_KEYSIZE_128) {
+ writel_relaxed(ctx->key[key2_offset + 4], aes_base_reg + DTHE_P_AES_KEY2_4);
+ writel_relaxed(ctx->key[key2_offset + 5], aes_base_reg + DTHE_P_AES_KEY2_5);
+ }
+ if (ctx->keylen == AES_KEYSIZE_256) {
+ writel_relaxed(ctx->key[key2_offset + 6], aes_base_reg + DTHE_P_AES_KEY2_6);
+ writel_relaxed(ctx->key[key2_offset + 7], aes_base_reg + DTHE_P_AES_KEY2_7);
+ }
+ }
+
+ if (rctx->enc)
+ ctrl_val |= DTHE_AES_CTRL_DIR_ENC;
+
+ if (ctx->keylen == AES_KEYSIZE_128)
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_16B;
+ else if (ctx->keylen == AES_KEYSIZE_192)
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_24B;
+ else
+ ctrl_val |= DTHE_AES_CTRL_KEYSIZE_32B;
+
+ // Write AES mode
+ ctrl_val &= DTHE_AES_CTRL_MODE_CLEAR_MASK;
+ switch (ctx->aes_mode) {
+ case DTHE_AES_ECB:
+ ctrl_val |= AES_CTRL_ECB_MASK;
+ break;
+ case DTHE_AES_CBC:
+ ctrl_val |= AES_CTRL_CBC_MASK;
+ break;
+ case DTHE_AES_XTS:
+ ctrl_val |= AES_CTRL_XTS_MASK;
+ break;
+ }
+
+ if (iv_in) {
+ ctrl_val |= DTHE_AES_CTRL_SAVE_CTX_SET;
+ for (int i = 0; i < AES_IV_WORDS; ++i)
+ writel_relaxed(iv_in[i],
+ aes_base_reg + DTHE_P_AES_IV_IN_0 + (DTHE_REG_SIZE * i));
+ }
+
+ writel_relaxed(ctrl_val, aes_base_reg + DTHE_P_AES_CTRL);
+}
+
+static void dthe_aes_dma_in_callback(void *data)
+{
+ struct skcipher_request *req = (struct skcipher_request *)data;
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ complete(&rctx->aes_compl);
+}
+
+static int dthe_aes_run(struct crypto_engine *engine, void *areq)
+{
+ struct skcipher_request *req = container_of(areq, struct skcipher_request, base);
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ unsigned int len = req->cryptlen;
+ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+
+ int src_nents = sg_nents_for_len(src, len);
+ int dst_nents;
+
+ int src_mapped_nents;
+ int dst_mapped_nents;
+
+ bool diff_dst;
+ enum dma_data_direction src_dir, dst_dir;
+
+ struct device *tx_dev, *rx_dev;
+ struct dma_async_tx_descriptor *desc_in, *desc_out;
+
+ int ret;
+
+ void __iomem *aes_base_reg = dev_data->regs + DTHE_P_AES_BASE;
+
+ u32 aes_irqenable_val = readl_relaxed(aes_base_reg + DTHE_P_AES_IRQENABLE);
+ u32 aes_sysconfig_val = readl_relaxed(aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_sysconfig_val |= DTHE_AES_SYSCONFIG_DMA_DATA_IN_OUT_EN;
+ writel_relaxed(aes_sysconfig_val, aes_base_reg + DTHE_P_AES_SYSCONFIG);
+
+ aes_irqenable_val |= DTHE_AES_IRQENABLE_EN_ALL;
+ writel_relaxed(aes_irqenable_val, aes_base_reg + DTHE_P_AES_IRQENABLE);
+
+ if (src == dst) {
+ diff_dst = false;
+ src_dir = DMA_BIDIRECTIONAL;
+ dst_dir = DMA_BIDIRECTIONAL;
+ } else {
+ diff_dst = true;
+ src_dir = DMA_TO_DEVICE;
+ dst_dir = DMA_FROM_DEVICE;
+ }
+
+ tx_dev = dmaengine_get_dma_device(dev_data->dma_aes_tx);
+ rx_dev = dmaengine_get_dma_device(dev_data->dma_aes_rx);
+
+ src_mapped_nents = dma_map_sg(tx_dev, src, src_nents, src_dir);
+ if (src_mapped_nents == 0) {
+ ret = -EINVAL;
+ goto aes_err;
+ }
+
+ if (!diff_dst) {
+ dst_nents = src_nents;
+ dst_mapped_nents = src_mapped_nents;
+ } else {
+ dst_nents = sg_nents_for_len(dst, len);
+ dst_mapped_nents = dma_map_sg(rx_dev, dst, dst_nents, dst_dir);
+ if (dst_mapped_nents == 0) {
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+ ret = -EINVAL;
+ goto aes_err;
+ }
+ }
+
+ desc_in = dmaengine_prep_slave_sg(dev_data->dma_aes_rx, dst, dst_mapped_nents,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_in) {
+ dev_err(dev_data->dev, "IN prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aes_prep_err;
+ }
+
+ desc_out = dmaengine_prep_slave_sg(dev_data->dma_aes_tx, src, src_mapped_nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_out) {
+ dev_err(dev_data->dev, "OUT prep_slave_sg() failed\n");
+ ret = -EINVAL;
+ goto aes_prep_err;
+ }
+
+ desc_in->callback = dthe_aes_dma_in_callback;
+ desc_in->callback_param = req;
+
+ init_completion(&rctx->aes_compl);
+
+ if (ctx->aes_mode == DTHE_AES_ECB)
+ dthe_aes_set_ctrl_key(ctx, rctx, NULL);
+ else
+ dthe_aes_set_ctrl_key(ctx, rctx, (u32 *)req->iv);
+
+ writel_relaxed(lower_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_0);
+ writel_relaxed(upper_32_bits(req->cryptlen), aes_base_reg + DTHE_P_AES_C_LENGTH_1);
+
+ dmaengine_submit(desc_in);
+ dmaengine_submit(desc_out);
+
+ dma_async_issue_pending(dev_data->dma_aes_rx);
+ dma_async_issue_pending(dev_data->dma_aes_tx);
+
+ // Need to do a timeout to ensure finalise gets called if DMA callback fails for any reason
+ ret = wait_for_completion_timeout(&rctx->aes_compl, msecs_to_jiffies(DTHE_DMA_TIMEOUT_MS));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ dmaengine_terminate_sync(dev_data->dma_aes_rx);
+ dmaengine_terminate_sync(dev_data->dma_aes_tx);
+
+ for (int i = 0; i < AES_BLOCK_WORDS; ++i)
+ readl_relaxed(aes_base_reg + DTHE_P_AES_DATA_IN_OUT + (DTHE_REG_SIZE * i));
+ } else {
+ ret = 0;
+ }
+
+ // For modes other than ECB, read IV_OUT
+ if (ctx->aes_mode != DTHE_AES_ECB) {
+ u32 *iv_out = (u32 *)req->iv;
+
+ for (int i = 0; i < AES_IV_WORDS; ++i)
+ iv_out[i] = readl_relaxed(aes_base_reg +
+ DTHE_P_AES_IV_IN_0 +
+ (DTHE_REG_SIZE * i));
+ }
+
+aes_prep_err:
+ dma_unmap_sg(tx_dev, src, src_nents, src_dir);
+ if (dst_dir != DMA_BIDIRECTIONAL)
+ dma_unmap_sg(rx_dev, dst, dst_nents, dst_dir);
+
+aes_err:
+ local_bh_disable();
+ crypto_finalize_skcipher_request(dev_data->engine, req, ret);
+ local_bh_enable();
+ return 0;
+}
+
+static int dthe_aes_crypt(struct skcipher_request *req)
+{
+ struct dthe_tfm_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+ struct dthe_data *dev_data = dthe_get_dev(ctx);
+ struct crypto_engine *engine;
+
+ /*
+ * If data is not a multiple of AES_BLOCK_SIZE:
+ * - need to return -EINVAL for ECB, CBC as they are block ciphers
+ * - need to fallback to software as H/W doesn't support Ciphertext Stealing for XTS
+ */
+ if (req->cryptlen % AES_BLOCK_SIZE) {
+ if (ctx->aes_mode == DTHE_AES_XTS) {
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->skcipher_fb);
+
+ skcipher_request_set_callback(subreq, skcipher_request_flags(req),
+ req->base.complete, req->base.data);
+ skcipher_request_set_crypt(subreq, req->src, req->dst,
+ req->cryptlen, req->iv);
+
+ return rctx->enc ? crypto_skcipher_encrypt(subreq) :
+ crypto_skcipher_decrypt(subreq);
+ }
+ return -EINVAL;
+ }
+
+ /*
+ * If data length input is zero, no need to do any operation.
+ * Except for XTS mode, where data length should be non-zero.
+ */
+ if (req->cryptlen == 0) {
+ if (ctx->aes_mode == DTHE_AES_XTS)
+ return -EINVAL;
+ return 0;
+ }
+
+ engine = dev_data->engine;
+ return crypto_transfer_skcipher_request_to_engine(engine, req);
+}
+
+static int dthe_aes_encrypt(struct skcipher_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ rctx->enc = 1;
+ return dthe_aes_crypt(req);
+}
+
+static int dthe_aes_decrypt(struct skcipher_request *req)
+{
+ struct dthe_aes_req_ctx *rctx = skcipher_request_ctx(req);
+
+ rctx->enc = 0;
+ return dthe_aes_crypt(req);
+}
+
+static struct skcipher_engine_alg cipher_algs[] = {
+ {
+ .base.init = dthe_cipher_init_tfm,
+ .base.setkey = dthe_aes_ecb_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.base = {
+ .cra_name = "ecb(aes)",
+ .cra_driver_name = "ecb-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* ECB AES */
+ {
+ .base.init = dthe_cipher_init_tfm,
+ .base.setkey = dthe_aes_cbc_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE,
+ .base.max_keysize = AES_MAX_KEY_SIZE,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* CBC AES */
+ {
+ .base.init = dthe_cipher_xts_init_tfm,
+ .base.exit = dthe_cipher_xts_exit_tfm,
+ .base.setkey = dthe_aes_xts_setkey,
+ .base.encrypt = dthe_aes_encrypt,
+ .base.decrypt = dthe_aes_decrypt,
+ .base.min_keysize = AES_MIN_KEY_SIZE * 2,
+ .base.max_keysize = AES_MAX_KEY_SIZE * 2,
+ .base.ivsize = AES_IV_SIZE,
+ .base.base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-dthev2",
+ .cra_priority = 299,
+ .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_alignmask = AES_BLOCK_SIZE - 1,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct dthe_tfm_ctx),
+ .cra_reqsize = sizeof(struct dthe_aes_req_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .op.do_one_request = dthe_aes_run,
+ }, /* XTS AES */
+};
+
+int dthe_register_aes_algs(void)
+{
+ return crypto_engine_register_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+}
+
+void dthe_unregister_aes_algs(void)
+{
+ crypto_engine_unregister_skciphers(cipher_algs, ARRAY_SIZE(cipher_algs));
+}
diff --git a/drivers/crypto/ti/dthev2-common.c b/drivers/crypto/ti/dthev2-common.c
new file mode 100644
index 000000000000..c39d37933b9e
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-common.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+
+#include "dthev2-common.h"
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+
+#define DRIVER_NAME "dthev2"
+
+static struct dthe_list dthe_dev_list = {
+ .dev_list = LIST_HEAD_INIT(dthe_dev_list.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(dthe_dev_list.lock),
+};
+
+struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx)
+{
+ struct dthe_data *dev_data;
+
+ if (ctx->dev_data)
+ return ctx->dev_data;
+
+ spin_lock_bh(&dthe_dev_list.lock);
+ dev_data = list_first_entry(&dthe_dev_list.dev_list, struct dthe_data, list);
+ if (dev_data)
+ list_move_tail(&dev_data->list, &dthe_dev_list.dev_list);
+ spin_unlock_bh(&dthe_dev_list.lock);
+
+ return dev_data;
+}
+
+static int dthe_dma_init(struct dthe_data *dev_data)
+{
+ int ret;
+ struct dma_slave_config cfg;
+
+ dev_data->dma_aes_rx = NULL;
+ dev_data->dma_aes_tx = NULL;
+ dev_data->dma_sha_tx = NULL;
+
+ dev_data->dma_aes_rx = dma_request_chan(dev_data->dev, "rx");
+ if (IS_ERR(dev_data->dma_aes_rx)) {
+ return dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_aes_rx),
+ "Unable to request rx DMA channel\n");
+ }
+
+ dev_data->dma_aes_tx = dma_request_chan(dev_data->dev, "tx1");
+ if (IS_ERR(dev_data->dma_aes_tx)) {
+ ret = dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_aes_tx),
+ "Unable to request tx1 DMA channel\n");
+ goto err_dma_aes_tx;
+ }
+
+ dev_data->dma_sha_tx = dma_request_chan(dev_data->dev, "tx2");
+ if (IS_ERR(dev_data->dma_sha_tx)) {
+ ret = dev_err_probe(dev_data->dev, PTR_ERR(dev_data->dma_sha_tx),
+ "Unable to request tx2 DMA channel\n");
+ goto err_dma_sha_tx;
+ }
+
+ memzero_explicit(&cfg, sizeof(cfg));
+
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = 4;
+
+ ret = dmaengine_slave_config(dev_data->dma_aes_rx, &cfg);
+ if (ret) {
+ dev_err(dev_data->dev, "Can't configure IN dmaengine slave: %d\n", ret);
+ goto err_dma_config;
+ }
+
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_maxburst = 4;
+
+ ret = dmaengine_slave_config(dev_data->dma_aes_tx, &cfg);
+ if (ret) {
+ dev_err(dev_data->dev, "Can't configure OUT dmaengine slave: %d\n", ret);
+ goto err_dma_config;
+ }
+
+ return 0;
+
+err_dma_config:
+ dma_release_channel(dev_data->dma_sha_tx);
+err_dma_sha_tx:
+ dma_release_channel(dev_data->dma_aes_tx);
+err_dma_aes_tx:
+ dma_release_channel(dev_data->dma_aes_rx);
+
+ return ret;
+}
+
+static int dthe_register_algs(void)
+{
+ return dthe_register_aes_algs();
+}
+
+static void dthe_unregister_algs(void)
+{
+ dthe_unregister_aes_algs();
+}
+
+static int dthe_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dthe_data *dev_data;
+ int ret;
+
+ dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ dev_data->dev = dev;
+ dev_data->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dev_data->regs))
+ return PTR_ERR(dev_data->regs);
+
+ platform_set_drvdata(pdev, dev_data);
+
+ spin_lock(&dthe_dev_list.lock);
+ list_add(&dev_data->list, &dthe_dev_list.dev_list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ ret = dthe_dma_init(dev_data);
+ if (ret)
+ goto probe_dma_err;
+
+ dev_data->engine = crypto_engine_alloc_init(dev, 1);
+ if (!dev_data->engine) {
+ ret = -ENOMEM;
+ goto probe_engine_err;
+ }
+
+ ret = crypto_engine_start(dev_data->engine);
+ if (ret) {
+ dev_err(dev, "Failed to start crypto engine\n");
+ goto probe_engine_start_err;
+ }
+
+ ret = dthe_register_algs();
+ if (ret) {
+ dev_err(dev, "Failed to register algs\n");
+ goto probe_engine_start_err;
+ }
+
+ return 0;
+
+probe_engine_start_err:
+ crypto_engine_exit(dev_data->engine);
+probe_engine_err:
+ dma_release_channel(dev_data->dma_aes_rx);
+ dma_release_channel(dev_data->dma_aes_tx);
+ dma_release_channel(dev_data->dma_sha_tx);
+probe_dma_err:
+ spin_lock(&dthe_dev_list.lock);
+ list_del(&dev_data->list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ return ret;
+}
+
+static void dthe_remove(struct platform_device *pdev)
+{
+ struct dthe_data *dev_data = platform_get_drvdata(pdev);
+
+ spin_lock(&dthe_dev_list.lock);
+ list_del(&dev_data->list);
+ spin_unlock(&dthe_dev_list.lock);
+
+ dthe_unregister_algs();
+
+ crypto_engine_exit(dev_data->engine);
+
+ dma_release_channel(dev_data->dma_aes_rx);
+ dma_release_channel(dev_data->dma_aes_tx);
+ dma_release_channel(dev_data->dma_sha_tx);
+}
+
+static const struct of_device_id dthe_of_match[] = {
+ { .compatible = "ti,am62l-dthev2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dthe_of_match);
+
+static struct platform_driver dthe_driver = {
+ .probe = dthe_probe,
+ .remove = dthe_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dthe_of_match,
+ },
+};
+
+module_platform_driver(dthe_driver);
+
+MODULE_AUTHOR("T Pratham <t-pratham@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments DTHE V2 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ti/dthev2-common.h b/drivers/crypto/ti/dthev2-common.h
new file mode 100644
index 000000000000..c7a06a4c353f
--- /dev/null
+++ b/drivers/crypto/ti/dthev2-common.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * K3 DTHE V2 crypto accelerator driver
+ *
+ * Copyright (C) Texas Instruments 2025 - https://www.ti.com
+ * Author: T Pratham <t-pratham@ti.com>
+ */
+
+#ifndef __TI_DTHEV2_H__
+#define __TI_DTHEV2_H__
+
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/hash.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+
+#define DTHE_REG_SIZE 4
+#define DTHE_DMA_TIMEOUT_MS 2000
+/*
+ * Size of largest possible key (of all algorithms) to be stored in dthe_tfm_ctx
+ * This is currently the keysize of XTS-AES-256 which is 512 bits (64 bytes)
+ */
+#define DTHE_MAX_KEYSIZE (AES_MAX_KEY_SIZE * 2)
+
+enum dthe_aes_mode {
+ DTHE_AES_ECB = 0,
+ DTHE_AES_CBC,
+ DTHE_AES_XTS,
+};
+
+/* Driver specific struct definitions */
+
+/**
+ * struct dthe_data - DTHE_V2 driver instance data
+ * @dev: Device pointer
+ * @regs: Base address of the register space
+ * @list: list node for dev
+ * @engine: Crypto engine instance
+ * @dma_aes_rx: AES Rx DMA Channel
+ * @dma_aes_tx: AES Tx DMA Channel
+ * @dma_sha_tx: SHA Tx DMA Channel
+ */
+struct dthe_data {
+ struct device *dev;
+ void __iomem *regs;
+ struct list_head list;
+ struct crypto_engine *engine;
+
+ struct dma_chan *dma_aes_rx;
+ struct dma_chan *dma_aes_tx;
+
+ struct dma_chan *dma_sha_tx;
+};
+
+/**
+ * struct dthe_list - device data list head
+ * @dev_list: linked list head
+ * @lock: Spinlock protecting accesses to the list
+ */
+struct dthe_list {
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+/**
+ * struct dthe_tfm_ctx - Transform ctx struct containing ctx for all sub-components of DTHE V2
+ * @dev_data: Device data struct pointer
+ * @keylen: AES key length
+ * @key: AES key
+ * @aes_mode: AES mode
+ * @skcipher_fb: Fallback crypto skcipher handle for AES-XTS mode
+ */
+struct dthe_tfm_ctx {
+ struct dthe_data *dev_data;
+ unsigned int keylen;
+ u32 key[DTHE_MAX_KEYSIZE / sizeof(u32)];
+ enum dthe_aes_mode aes_mode;
+ struct crypto_sync_skcipher *skcipher_fb;
+};
+
+/**
+ * struct dthe_aes_req_ctx - AES engine req ctx struct
+ * @enc: flag indicating encryption or decryption operation
+ * @aes_compl: Completion variable for use in manual completion in case of DMA callback failure
+ */
+struct dthe_aes_req_ctx {
+ int enc;
+ struct completion aes_compl;
+};
+
+/* Struct definitions end */
+
+struct dthe_data *dthe_get_dev(struct dthe_tfm_ctx *ctx);
+
+int dthe_register_aes_algs(void);
+void dthe_unregister_aes_algs(void);
+
+#endif
diff --git a/drivers/crypto/xilinx/Makefile b/drivers/crypto/xilinx/Makefile
index 730feff5b5f2..9b51636ef75e 100644
--- a/drivers/crypto/xilinx/Makefile
+++ b/drivers/crypto/xilinx/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_XILINX_TRNG) += xilinx-trng.o
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += zynqmp-aes-gcm.o
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_SHA3) += zynqmp-sha.o
diff --git a/drivers/crypto/xilinx/xilinx-trng.c b/drivers/crypto/xilinx/xilinx-trng.c
new file mode 100644
index 000000000000..db0fbb28ff32
--- /dev/null
+++ b/drivers/crypto/xilinx/xilinx-trng.c
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal True Random Number Generator driver
+ * Copyright (c) 2024 - 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <crypto/aes.h>
+#include <crypto/df_sp80090a.h>
+#include <crypto/internal/drbg.h>
+#include <crypto/internal/cipher.h>
+#include <crypto/internal/rng.h>
+
+/* TRNG Registers Offsets */
+#define TRNG_STATUS_OFFSET 0x4U
+#define TRNG_CTRL_OFFSET 0x8U
+#define TRNG_EXT_SEED_OFFSET 0x40U
+#define TRNG_PER_STRNG_OFFSET 0x80U
+#define TRNG_CORE_OUTPUT_OFFSET 0xC0U
+#define TRNG_RESET_OFFSET 0xD0U
+#define TRNG_OSC_EN_OFFSET 0xD4U
+
+/* Mask values */
+#define TRNG_RESET_VAL_MASK BIT(0)
+#define TRNG_OSC_EN_VAL_MASK BIT(0)
+#define TRNG_CTRL_PRNGSRST_MASK BIT(0)
+#define TRNG_CTRL_EUMODE_MASK BIT(8)
+#define TRNG_CTRL_TRSSEN_MASK BIT(2)
+#define TRNG_CTRL_PRNGSTART_MASK BIT(5)
+#define TRNG_CTRL_PRNGXS_MASK BIT(3)
+#define TRNG_CTRL_PRNGMODE_MASK BIT(7)
+#define TRNG_STATUS_DONE_MASK BIT(0)
+#define TRNG_STATUS_QCNT_MASK GENMASK(11, 9)
+#define TRNG_STATUS_QCNT_16_BYTES 0x800
+
+/* Sizes in bytes */
+#define TRNG_SEED_LEN_BYTES 48U
+#define TRNG_ENTROPY_SEED_LEN_BYTES 64U
+#define TRNG_SEC_STRENGTH_SHIFT 5U
+#define TRNG_SEC_STRENGTH_BYTES BIT(TRNG_SEC_STRENGTH_SHIFT)
+#define TRNG_BYTES_PER_REG 4U
+#define TRNG_RESET_DELAY 10
+#define TRNG_NUM_INIT_REGS 12U
+#define TRNG_READ_4_WORD 4
+#define TRNG_DATA_READ_DELAY 8000
+
+struct xilinx_rng {
+ void __iomem *rng_base;
+ struct device *dev;
+ unsigned char *scratchpadbuf;
+ struct crypto_aes_ctx *aesctx;
+ struct mutex lock; /* Protect access to TRNG device */
+ struct hwrng trng;
+};
+
+struct xilinx_rng_ctx {
+ struct xilinx_rng *rng;
+};
+
+static struct xilinx_rng *xilinx_rng_dev;
+
+static void xtrng_readwrite32(void __iomem *addr, u32 mask, u8 value)
+{
+ u32 val;
+
+ val = ioread32(addr);
+ val = (val & (~mask)) | (mask & value);
+ iowrite32(val, addr);
+}
+
+static void xtrng_trng_reset(void __iomem *addr)
+{
+ xtrng_readwrite32(addr + TRNG_RESET_OFFSET, TRNG_RESET_VAL_MASK, TRNG_RESET_VAL_MASK);
+ udelay(TRNG_RESET_DELAY);
+ xtrng_readwrite32(addr + TRNG_RESET_OFFSET, TRNG_RESET_VAL_MASK, 0);
+}
+
+static void xtrng_hold_reset(void __iomem *addr)
+{
+ xtrng_readwrite32(addr + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK,
+ TRNG_CTRL_PRNGSRST_MASK);
+ iowrite32(TRNG_RESET_VAL_MASK, addr + TRNG_RESET_OFFSET);
+ udelay(TRNG_RESET_DELAY);
+}
+
+static void xtrng_softreset(struct xilinx_rng *rng)
+{
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK,
+ TRNG_CTRL_PRNGSRST_MASK);
+ udelay(TRNG_RESET_DELAY);
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSRST_MASK, 0);
+}
+
+/* Return no. of bytes read */
+static size_t xtrng_readblock32(void __iomem *rng_base, __be32 *buf, int blocks32, bool wait)
+{
+ int read = 0, ret;
+ int timeout = 1;
+ int i, idx;
+ u32 val;
+
+ if (wait)
+ timeout = TRNG_DATA_READ_DELAY;
+
+ for (i = 0; i < (blocks32 * 2); i++) {
+ /* TRNG core generate data in 16 bytes. Read twice to complete 32 bytes read */
+ ret = readl_poll_timeout(rng_base + TRNG_STATUS_OFFSET, val,
+ (val & TRNG_STATUS_QCNT_MASK) ==
+ TRNG_STATUS_QCNT_16_BYTES, !!wait, timeout);
+ if (ret)
+ break;
+
+ for (idx = 0; idx < TRNG_READ_4_WORD; idx++) {
+ *(buf + read) = cpu_to_be32(ioread32(rng_base + TRNG_CORE_OUTPUT_OFFSET));
+ read += 1;
+ }
+ }
+ return read * 4;
+}
+
+static int xtrng_collect_random_data(struct xilinx_rng *rng, u8 *rand_gen_buf,
+ int no_of_random_bytes, bool wait)
+{
+ u8 randbuf[TRNG_SEC_STRENGTH_BYTES];
+ int byteleft, blocks, count = 0;
+ int ret;
+
+ byteleft = no_of_random_bytes & (TRNG_SEC_STRENGTH_BYTES - 1);
+ blocks = no_of_random_bytes >> TRNG_SEC_STRENGTH_SHIFT;
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK,
+ TRNG_CTRL_PRNGSTART_MASK);
+ if (blocks) {
+ ret = xtrng_readblock32(rng->rng_base, (__be32 *)rand_gen_buf, blocks, wait);
+ if (!ret)
+ return 0;
+ count += ret;
+ }
+
+ if (byteleft) {
+ ret = xtrng_readblock32(rng->rng_base, (__be32 *)randbuf, 1, wait);
+ if (!ret)
+ return count;
+ memcpy(rand_gen_buf + (blocks * TRNG_SEC_STRENGTH_BYTES), randbuf, byteleft);
+ count += byteleft;
+ }
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGSTART_MASK, 0U);
+
+ return count;
+}
+
+static void xtrng_write_multiple_registers(void __iomem *base_addr, u32 *values, size_t n)
+{
+ void __iomem *reg_addr;
+ size_t i;
+
+ /* Write seed value into EXTERNAL_SEED Registers in big endian format */
+ for (i = 0; i < n; i++) {
+ reg_addr = (base_addr + ((n - 1 - i) * TRNG_BYTES_PER_REG));
+ iowrite32((u32 __force)(cpu_to_be32(values[i])), reg_addr);
+ }
+}
+
+static void xtrng_enable_entropy(struct xilinx_rng *rng)
+{
+ iowrite32(TRNG_OSC_EN_VAL_MASK, rng->rng_base + TRNG_OSC_EN_OFFSET);
+ xtrng_softreset(rng);
+ iowrite32(TRNG_CTRL_EUMODE_MASK | TRNG_CTRL_TRSSEN_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
+}
+
+static int xtrng_reseed_internal(struct xilinx_rng *rng)
+{
+ u8 entropy[TRNG_ENTROPY_SEED_LEN_BYTES];
+ struct drbg_string data;
+ LIST_HEAD(seedlist);
+ u32 val;
+ int ret;
+
+ drbg_string_fill(&data, entropy, TRNG_SEED_LEN_BYTES);
+ list_add_tail(&data.list, &seedlist);
+ memset(entropy, 0, sizeof(entropy));
+ xtrng_enable_entropy(rng);
+
+ /* collect random data to use it as entropy (input for DF) */
+ ret = xtrng_collect_random_data(rng, entropy, TRNG_SEED_LEN_BYTES, true);
+ if (ret != TRNG_SEED_LEN_BYTES)
+ return -EINVAL;
+ ret = crypto_drbg_ctr_df(rng->aesctx, rng->scratchpadbuf,
+ TRNG_SEED_LEN_BYTES, &seedlist, AES_BLOCK_SIZE,
+ TRNG_SEED_LEN_BYTES);
+ if (ret)
+ return ret;
+
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET,
+ (u32 *)rng->scratchpadbuf, TRNG_NUM_INIT_REGS);
+ /* select reseed operation */
+ iowrite32(TRNG_CTRL_PRNGXS_MASK, rng->rng_base + TRNG_CTRL_OFFSET);
+
+ /* Start the reseed operation with above configuration and wait for STATUS.Done bit to be
+ * set. Monitor STATUS.CERTF bit, if set indicates SP800-90B entropy health test has failed.
+ */
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK,
+ TRNG_CTRL_PRNGSTART_MASK);
+
+ ret = readl_poll_timeout(rng->rng_base + TRNG_STATUS_OFFSET, val,
+ (val & TRNG_STATUS_DONE_MASK) == TRNG_STATUS_DONE_MASK,
+ 1U, 15000U);
+ if (ret)
+ return ret;
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET, TRNG_CTRL_PRNGSTART_MASK, 0U);
+
+ return 0;
+}
+
+static int xtrng_random_bytes_generate(struct xilinx_rng *rng, u8 *rand_buf_ptr,
+ u32 rand_buf_size, int wait)
+{
+ int nbytes;
+ int ret;
+
+ xtrng_readwrite32(rng->rng_base + TRNG_CTRL_OFFSET,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGXS_MASK,
+ TRNG_CTRL_PRNGMODE_MASK | TRNG_CTRL_PRNGXS_MASK);
+ nbytes = xtrng_collect_random_data(rng, rand_buf_ptr, rand_buf_size, wait);
+
+ ret = xtrng_reseed_internal(rng);
+ if (ret) {
+ dev_err(rng->dev, "Re-seed fail\n");
+ return ret;
+ }
+
+ return nbytes;
+}
+
+static int xtrng_trng_generate(struct crypto_rng *tfm, const u8 *src, u32 slen,
+ u8 *dst, u32 dlen)
+{
+ struct xilinx_rng_ctx *ctx = crypto_rng_ctx(tfm);
+ int ret;
+
+ mutex_lock(&ctx->rng->lock);
+ ret = xtrng_random_bytes_generate(ctx->rng, dst, dlen, true);
+ mutex_unlock(&ctx->rng->lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int xtrng_trng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
+{
+ return 0;
+}
+
+static int xtrng_trng_init(struct crypto_tfm *rtfm)
+{
+ struct xilinx_rng_ctx *ctx = crypto_tfm_ctx(rtfm);
+
+ ctx->rng = xilinx_rng_dev;
+
+ return 0;
+}
+
+static struct rng_alg xtrng_trng_alg = {
+ .generate = xtrng_trng_generate,
+ .seed = xtrng_trng_seed,
+ .seedsize = 0,
+ .base = {
+ .cra_name = "stdrng",
+ .cra_driver_name = "xilinx-trng",
+ .cra_priority = 300,
+ .cra_ctxsize = sizeof(struct xilinx_rng_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_init = xtrng_trng_init,
+ },
+};
+
+static int xtrng_hwrng_trng_read(struct hwrng *hwrng, void *data, size_t max, bool wait)
+{
+ u8 buf[TRNG_SEC_STRENGTH_BYTES];
+ struct xilinx_rng *rng;
+ int ret = -EINVAL, i = 0;
+
+ rng = container_of(hwrng, struct xilinx_rng, trng);
+ /* Return in case wait not set and lock not available. */
+ if (!mutex_trylock(&rng->lock) && !wait)
+ return 0;
+ else if (!mutex_is_locked(&rng->lock) && wait)
+ mutex_lock(&rng->lock);
+
+ while (i < max) {
+ ret = xtrng_random_bytes_generate(rng, buf, TRNG_SEC_STRENGTH_BYTES, wait);
+ if (ret < 0)
+ break;
+
+ memcpy(data + i, buf, min_t(int, ret, (max - i)));
+ i += min_t(int, ret, (max - i));
+ }
+ mutex_unlock(&rng->lock);
+
+ return ret;
+}
+
+static int xtrng_hwrng_register(struct hwrng *trng)
+{
+ int ret;
+
+ trng->name = "Xilinx Versal Crypto Engine TRNG";
+ trng->read = xtrng_hwrng_trng_read;
+
+ ret = hwrng_register(trng);
+ if (ret)
+ pr_err("Fail to register the TRNG\n");
+
+ return ret;
+}
+
+static void xtrng_hwrng_unregister(struct hwrng *trng)
+{
+ hwrng_unregister(trng);
+}
+
+static int xtrng_probe(struct platform_device *pdev)
+{
+ struct xilinx_rng *rng;
+ size_t sb_size;
+ int ret;
+
+ rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL);
+ if (!rng)
+ return -ENOMEM;
+
+ rng->dev = &pdev->dev;
+ rng->rng_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rng->rng_base)) {
+ dev_err(&pdev->dev, "Failed to map resource %pe\n", rng->rng_base);
+ return PTR_ERR(rng->rng_base);
+ }
+
+ rng->aesctx = devm_kzalloc(&pdev->dev, sizeof(*rng->aesctx), GFP_KERNEL);
+ if (!rng->aesctx)
+ return -ENOMEM;
+
+ sb_size = crypto_drbg_ctr_df_datalen(TRNG_SEED_LEN_BYTES, AES_BLOCK_SIZE);
+ rng->scratchpadbuf = devm_kzalloc(&pdev->dev, sb_size, GFP_KERNEL);
+ if (!rng->scratchpadbuf) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ xtrng_trng_reset(rng->rng_base);
+ ret = xtrng_reseed_internal(rng);
+ if (ret) {
+ dev_err(&pdev->dev, "TRNG Seed fail\n");
+ goto end;
+ }
+
+ xilinx_rng_dev = rng;
+ mutex_init(&rng->lock);
+ ret = crypto_register_rng(&xtrng_trng_alg);
+ if (ret) {
+ dev_err(&pdev->dev, "Crypto Random device registration failed: %d\n", ret);
+ goto end;
+ }
+
+ ret = xtrng_hwrng_register(&rng->trng);
+ if (ret) {
+ dev_err(&pdev->dev, "HWRNG device registration failed: %d\n", ret);
+ goto crypto_rng_free;
+ }
+ platform_set_drvdata(pdev, rng);
+
+ return 0;
+
+crypto_rng_free:
+ crypto_unregister_rng(&xtrng_trng_alg);
+
+end:
+ return ret;
+}
+
+static void xtrng_remove(struct platform_device *pdev)
+{
+ struct xilinx_rng *rng;
+ u32 zero[TRNG_NUM_INIT_REGS] = { };
+
+ rng = platform_get_drvdata(pdev);
+ xtrng_hwrng_unregister(&rng->trng);
+ crypto_unregister_rng(&xtrng_trng_alg);
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_EXT_SEED_OFFSET, zero,
+ TRNG_NUM_INIT_REGS);
+ xtrng_write_multiple_registers(rng->rng_base + TRNG_PER_STRNG_OFFSET, zero,
+ TRNG_NUM_INIT_REGS);
+ xtrng_hold_reset(rng->rng_base);
+ xilinx_rng_dev = NULL;
+}
+
+static const struct of_device_id xtrng_of_match[] = {
+ { .compatible = "xlnx,versal-trng", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xtrng_of_match);
+
+static struct platform_driver xtrng_driver = {
+ .driver = {
+ .name = "xlnx,versal-trng",
+ .of_match_table = xtrng_of_match,
+ },
+ .probe = xtrng_probe,
+ .remove = xtrng_remove,
+};
+
+module_platform_driver(xtrng_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harsh Jain <h.jain@amd.com>");
+MODULE_AUTHOR("Mounika Botcha <mounika.botcha@amd.com>");
+MODULE_DESCRIPTION("True Random Number Generator Driver");
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index 712624cba2b6..77ac940e3013 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -11,38 +11,52 @@
#include "cxlpci.h"
#include "cxl.h"
-struct cxl_cxims_data {
- int nr_maps;
- u64 xormaps[] __counted_by(nr_maps);
-};
-
static const guid_t acpi_cxl_qtg_id_guid =
GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
+#define HBIW_TO_NR_MAPS_SIZE (CXL_DECODER_MAX_INTERLEAVE + 1)
+static const int hbiw_to_nr_maps[HBIW_TO_NR_MAPS_SIZE] = {
+ [1] = 0, [2] = 1, [3] = 0, [4] = 2, [6] = 1, [8] = 3, [12] = 2, [16] = 4
+};
+
+static const int valid_hbiw[] = { 1, 2, 3, 4, 6, 8, 12, 16 };
-static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
+u64 cxl_do_xormap_calc(struct cxl_cxims_data *cximsd, u64 addr, int hbiw)
{
- struct cxl_cxims_data *cximsd = cxlrd->platform_data;
- int hbiw = cxlrd->cxlsd.nr_targets;
+ int nr_maps_to_apply = -1;
u64 val;
int pos;
- /* No xormaps for host bridge interleave ways of 1 or 3 */
- if (hbiw == 1 || hbiw == 3)
- return hpa;
+ /*
+ * Strictly validate hbiw since this function is used for testing and
+ * that nullifies any expectation of trusted parameters from the CXL
+ * Region Driver.
+ */
+ for (int i = 0; i < ARRAY_SIZE(valid_hbiw); i++) {
+ if (valid_hbiw[i] == hbiw) {
+ nr_maps_to_apply = hbiw_to_nr_maps[hbiw];
+ break;
+ }
+ }
+ if (nr_maps_to_apply == -1 || nr_maps_to_apply > cximsd->nr_maps)
+ return ULLONG_MAX;
/*
- * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore
- * the position bit to its value before the xormap was applied at
- * HPA->DPA translation.
+ * In regions using XOR interleave arithmetic the CXL HPA may not
+ * be the same as the SPA. This helper performs the SPA->CXL HPA
+ * or the CXL HPA->SPA translation. Since XOR is self-inverting,
+ * so is this function.
+ *
+ * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) applying the
+ * xormaps will toggle a position bit.
*
* pos is the lowest set bit in an XORMAP
- * val is the XORALLBITS(HPA & XORMAP)
+ * val is the XORALLBITS(addr & XORMAP)
*
* XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS
* as an operation that outputs a single bit by XORing all the
- * bits in the input (hpa & xormap). Implement XORALLBITS using
+ * bits in the input (addr & xormap). Implement XORALLBITS using
* hweight64(). If the hamming weight is even the XOR of those
* bits results in val==0, if odd the XOR result is val==1.
*/
@@ -51,11 +65,19 @@ static u64 cxl_xor_hpa_to_spa(struct cxl_root_decoder *cxlrd, u64 hpa)
if (!cximsd->xormaps[i])
continue;
pos = __ffs(cximsd->xormaps[i]);
- val = (hweight64(hpa & cximsd->xormaps[i]) & 1);
- hpa = (hpa & ~(1ULL << pos)) | (val << pos);
+ val = (hweight64(addr & cximsd->xormaps[i]) & 1);
+ addr = (addr & ~(1ULL << pos)) | (val << pos);
}
- return hpa;
+ return addr;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_do_xormap_calc, "cxl_translate");
+
+static u64 cxl_apply_xor_maps(struct cxl_root_decoder *cxlrd, u64 addr)
+{
+ struct cxl_cxims_data *cximsd = cxlrd->platform_data;
+
+ return cxl_do_xormap_calc(cximsd, addr, cxlrd->cxlsd.nr_targets);
}
struct cxl_cxims_context {
@@ -113,9 +135,9 @@ static unsigned long cfmws_to_decoder_flags(int restrictions)
{
unsigned long flags = CXL_DECODER_F_ENABLE;
- if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE2)
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_DEVMEM)
flags |= CXL_DECODER_F_TYPE2;
- if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_TYPE3)
+ if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_HOSTONLYMEM)
flags |= CXL_DECODER_F_TYPE3;
if (restrictions & ACPI_CEDT_CFMWS_RESTRICT_VOLATILE)
flags |= CXL_DECODER_F_RAM;
@@ -345,12 +367,12 @@ static int cxl_acpi_set_cache_size(struct cxl_root_decoder *cxlrd)
struct resource res;
int nid, rc;
- res = DEFINE_RES(start, size, 0);
+ res = DEFINE_RES_MEM(start, size);
nid = phys_to_target_node(start);
rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size);
if (rc)
- return rc;
+ return 0;
/*
* The cache range is expected to be within the CFMWS.
@@ -375,21 +397,18 @@ static void cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
int rc;
rc = cxl_acpi_set_cache_size(cxlrd);
- if (!rc)
- return;
-
- if (rc != -EOPNOTSUPP) {
+ if (rc) {
/*
- * Failing to support extended linear cache region resize does not
+ * Failing to retrieve extended linear cache region resize does not
* prevent the region from functioning. Only causes cxl list showing
* incorrect region size.
*/
dev_warn(cxlrd->cxlsd.cxld.dev.parent,
- "Extended linear cache calculation failed rc:%d\n", rc);
- }
+ "Extended linear cache retrieval failed rc:%d\n", rc);
- /* Ignoring return code */
- cxlrd->cache_size = 0;
+ /* Ignoring return code */
+ cxlrd->cache_size = 0;
+ }
}
DEFINE_FREE(put_cxlrd, struct cxl_root_decoder *,
@@ -398,7 +417,6 @@ DEFINE_FREE(del_cxl_resource, struct resource *, if (_T) del_cxl_resource(_T))
static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
struct cxl_cfmws_context *ctx)
{
- int target_map[CXL_DECODER_MAX_INTERLEAVE];
struct cxl_port *root_port = ctx->root_port;
struct cxl_cxims_context cxims_ctx;
struct device *dev = ctx->dev;
@@ -416,8 +434,6 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
rc = eig_to_granularity(cfmws->granularity, &ig);
if (rc)
return rc;
- for (i = 0; i < ways; i++)
- target_map[i] = cfmws->interleave_targets[i];
struct resource *res __free(del_cxl_resource) = alloc_cxl_resource(
cfmws->base_hpa, cfmws->window_size, ctx->id++);
@@ -443,6 +459,8 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
.end = cfmws->base_hpa + cfmws->window_size - 1,
};
cxld->interleave_ways = ways;
+ for (i = 0; i < ways; i++)
+ cxld->target_map[i] = cfmws->interleave_targets[i];
/*
* Minimize the x1 granularity to advertise support for any
* valid region granularity
@@ -451,8 +469,6 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
ig = CXL_DECODER_MIN_GRANULARITY;
cxld->interleave_granularity = ig;
- cxl_setup_extended_linear_cache(cxlrd);
-
if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR) {
if (ways != 1 && ways != 3) {
cxims_ctx = (struct cxl_cxims_context) {
@@ -468,14 +484,15 @@ static int __cxl_parse_cfmws(struct acpi_cedt_cfmws *cfmws,
return -EINVAL;
}
}
+ cxlrd->ops.hpa_to_spa = cxl_apply_xor_maps;
+ cxlrd->ops.spa_to_hpa = cxl_apply_xor_maps;
}
- cxlrd->qos_class = cfmws->qtg_id;
+ cxl_setup_extended_linear_cache(cxlrd);
- if (cfmws->interleave_arithmetic == ACPI_CEDT_CFMWS_ARITHMETIC_XOR)
- cxlrd->hpa_to_spa = cxl_xor_hpa_to_spa;
+ cxlrd->qos_class = cfmws->qtg_id;
- rc = cxl_decoder_add(cxld, target_map);
+ rc = cxl_decoder_add(cxld);
if (rc)
return rc;
diff --git a/drivers/cxl/core/cdat.c b/drivers/cxl/core/cdat.c
index c0af645425f4..7120b5f2e31f 100644
--- a/drivers/cxl/core/cdat.c
+++ b/drivers/cxl/core/cdat.c
@@ -338,7 +338,7 @@ static int match_cxlrd_hb(struct device *dev, void *data)
guard(rwsem_read)(&cxl_rwsem.region);
for (int i = 0; i < cxlsd->nr_targets; i++) {
- if (host_bridge == cxlsd->target[i]->dport_dev)
+ if (cxlsd->target[i] && host_bridge == cxlsd->target[i]->dport_dev)
return 1;
}
@@ -440,8 +440,8 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
} *tbl = (struct acpi_cdat_sslbis_table *)header;
int size = sizeof(header->cdat) + sizeof(tbl->sslbis_header);
struct acpi_cdat_sslbis *sslbis;
- struct cxl_port *port = arg;
- struct device *dev = &port->dev;
+ struct cxl_dport *dport = arg;
+ struct device *dev = &dport->port->dev;
int remain, entries, i;
u16 len;
@@ -467,8 +467,6 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
u16 y = le16_to_cpu((__force __le16)tbl->entries[i].porty_id);
__le64 le_base;
__le16 le_val;
- struct cxl_dport *dport;
- unsigned long index;
u16 dsp_id;
u64 val;
@@ -499,28 +497,27 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
sslbis->data_type);
- xa_for_each(&port->dports, index, dport) {
- if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
- dsp_id == dport->port_id) {
- cxl_access_coordinate_set(dport->coord,
- sslbis->data_type,
- val);
- }
+ if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
+ dsp_id == dport->port_id) {
+ cxl_access_coordinate_set(dport->coord,
+ sslbis->data_type, val);
+ return 0;
}
}
return 0;
}
-void cxl_switch_parse_cdat(struct cxl_port *port)
+void cxl_switch_parse_cdat(struct cxl_dport *dport)
{
+ struct cxl_port *port = dport->port;
int rc;
if (!port->cdat.table)
return;
rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
- port, port->cdat.table, port->cdat.length);
+ dport, port->cdat.table, port->cdat.length);
rc = cdat_table_parse_output(rc);
if (rc)
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
@@ -829,7 +826,7 @@ static struct xarray *cxl_switch_gather_bandwidth(struct cxl_region *cxlr,
cxl_coordinates_combine(coords, coords, ctx->coord);
/*
- * Take the min of the calculated bandwdith and the upstream
+ * Take the min of the calculated bandwidth and the upstream
* switch SSLBIS bandwidth if there's a parent switch
*/
if (!is_root)
@@ -952,7 +949,7 @@ static struct xarray *cxl_hb_gather_bandwidth(struct xarray *xa)
/**
* cxl_region_update_bandwidth - Update the bandwidth access coordinates of a region
* @cxlr: The region being operated on
- * @input_xa: xarray holds cxl_perf_ctx wht calculated bandwidth per ACPI0017 instance
+ * @input_xa: xarray holds cxl_perf_ctx with calculated bandwidth per ACPI0017 instance
*/
static void cxl_region_update_bandwidth(struct cxl_region *cxlr,
struct xarray *input_xa)
@@ -1075,14 +1072,3 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
}
}
-
-int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access)
-{
- return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
-}
-
-bool cxl_need_node_perf_attrs_update(int nid)
-{
- return !acpi_node_backed_by_real_pxm(nid);
-}
diff --git a/drivers/cxl/core/core.h b/drivers/cxl/core/core.h
index 2669f251d677..1fb66132b777 100644
--- a/drivers/cxl/core/core.h
+++ b/drivers/cxl/core/core.h
@@ -135,11 +135,12 @@ enum cxl_poison_trace_type {
CXL_POISON_TRACE_CLEAR,
};
+enum poison_cmd_enabled_bits;
+bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
+ enum poison_cmd_enabled_bits cmd);
+
long cxl_pci_get_latency(struct pci_dev *pdev);
int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
-int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
- enum access_coordinate_class access);
-bool cxl_need_node_perf_attrs_update(int nid);
int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
struct access_coordinate *c);
@@ -147,6 +148,11 @@ int cxl_ras_init(void);
void cxl_ras_exit(void);
int cxl_gpf_port_setup(struct cxl_dport *dport);
+struct cxl_hdm;
+int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info);
+int cxl_port_get_possible_dports(struct cxl_port *port);
+
#ifdef CONFIG_CXL_FEATURES
struct cxl_feat_entry *
cxl_feature_info(struct cxl_features_state *cxlfs, const uuid_t *uuid);
diff --git a/drivers/cxl/core/features.c b/drivers/cxl/core/features.c
index 7c750599ea69..4bc484b46f43 100644
--- a/drivers/cxl/core/features.c
+++ b/drivers/cxl/core/features.c
@@ -371,6 +371,9 @@ cxl_feature_info(struct cxl_features_state *cxlfs,
{
struct cxl_feat_entry *feat;
+ if (!cxlfs || !cxlfs->entries)
+ return ERR_PTR(-EOPNOTSUPP);
+
for (int i = 0; i < cxlfs->entries->num_features; i++) {
feat = &cxlfs->entries->ent[i];
if (uuid_equal(uuid, &feat->uuid))
diff --git a/drivers/cxl/core/hdm.c b/drivers/cxl/core/hdm.c
index e9e1d555cec6..1c5d2022c87a 100644
--- a/drivers/cxl/core/hdm.c
+++ b/drivers/cxl/core/hdm.c
@@ -21,12 +21,11 @@ struct cxl_rwsem cxl_rwsem = {
.dpa = __RWSEM_INITIALIZER(cxl_rwsem.dpa),
};
-static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map)
+static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld)
{
int rc;
- rc = cxl_decoder_add_locked(cxld, target_map);
+ rc = cxl_decoder_add_locked(cxld);
if (rc) {
put_device(&cxld->dev);
dev_err(&port->dev, "Failed to add decoder\n");
@@ -50,12 +49,9 @@ static int add_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
* are claimed and passed to the single dport. Disable the range until the first
* CXL region is enumerated / activated.
*/
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
+static int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
{
struct cxl_switch_decoder *cxlsd;
- struct cxl_dport *dport = NULL;
- int single_port_map[1];
- unsigned long index;
struct cxl_hdm *cxlhdm = dev_get_drvdata(&port->dev);
/*
@@ -71,13 +67,8 @@ int devm_cxl_add_passthrough_decoder(struct cxl_port *port)
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport)
- break;
- single_port_map[0] = dport->port_id;
-
- return add_hdm_decoder(port, &cxlsd->cxld, single_port_map);
+ return add_hdm_decoder(port, &cxlsd->cxld);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_add_passthrough_decoder, "CXL");
static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
{
@@ -147,8 +138,8 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
* @port: cxl_port to map
* @info: cached DVSEC range register info
*/
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info)
+static struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
+ struct cxl_endpoint_dvsec_info *info)
{
struct cxl_register_map *reg_map = &port->reg_map;
struct device *dev = &port->dev;
@@ -197,13 +188,12 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
*/
if (should_emulate_decoders(info)) {
dev_dbg(dev, "Fallback map %d range register%s\n", info->ranges,
- info->ranges > 1 ? "s" : "");
+ str_plural(info->ranges));
cxlhdm->decoder_count = info->ranges;
}
return cxlhdm;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_hdm, "CXL");
static void __cxl_dpa_debug(struct seq_file *file, struct resource *r, int depth)
{
@@ -915,6 +905,9 @@ static void cxl_decoder_reset(struct cxl_decoder *cxld)
if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
return;
+ if (test_bit(CXL_DECODER_F_LOCK, &cxld->flags))
+ return;
+
if (port->commit_end == id)
cxl_port_commit_reap(cxld);
else
@@ -984,7 +977,7 @@ static int cxl_setup_hdm_decoder_from_dvsec(
}
static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
- int *target_map, void __iomem *hdm, int which,
+ void __iomem *hdm, int which,
u64 *dpa_base, struct cxl_endpoint_dvsec_info *info)
{
struct cxl_endpoint_decoder *cxled = NULL;
@@ -1103,7 +1096,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
hi = readl(hdm + CXL_HDM_DECODER0_TL_HIGH(which));
target_list.value = (hi << 32) + lo;
for (i = 0; i < cxld->interleave_ways; i++)
- target_map[i] = target_list.target_id[i];
+ cxld->target_map[i] = target_list.target_id[i];
return 0;
}
@@ -1168,8 +1161,8 @@ static void cxl_settle_decoders(struct cxl_hdm *cxlhdm)
* @cxlhdm: Structure to populate with HDM capabilities
* @info: cached DVSEC range register info
*/
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info)
+static int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
+ struct cxl_endpoint_dvsec_info *info)
{
void __iomem *hdm = cxlhdm->regs.hdm_decoder;
struct cxl_port *port = cxlhdm->port;
@@ -1179,7 +1172,6 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxl_settle_decoders(cxlhdm);
for (i = 0; i < cxlhdm->decoder_count; i++) {
- int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
int rc, target_count = cxlhdm->target_count;
struct cxl_decoder *cxld;
@@ -1207,8 +1199,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
cxld = &cxlsd->cxld;
}
- rc = init_hdm_decoder(port, cxld, target_map, hdm, i,
- &dpa_base, info);
+ rc = init_hdm_decoder(port, cxld, hdm, i, &dpa_base, info);
if (rc) {
dev_warn(&port->dev,
"Failed to initialize decoder%d.%d\n",
@@ -1216,7 +1207,7 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
put_device(&cxld->dev);
return rc;
}
- rc = add_hdm_decoder(port, cxld, target_map);
+ rc = add_hdm_decoder(port, cxld);
if (rc) {
dev_warn(&port->dev,
"Failed to add decoder%d.%d\n", port->id, i);
@@ -1226,4 +1217,71 @@ int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
return 0;
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_enumerate_decoders, "CXL");
+
+/**
+ * __devm_cxl_switch_port_decoders_setup - allocate and setup switch decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_hdm *cxlhdm;
+
+ if (is_cxl_root(port) || is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ cxlhdm = devm_cxl_setup_hdm(port, NULL);
+ if (!IS_ERR(cxlhdm))
+ return devm_cxl_enumerate_decoders(cxlhdm, NULL);
+
+ if (PTR_ERR(cxlhdm) != -ENODEV) {
+ dev_err(&port->dev, "Failed to map HDM decoder capability\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ if (cxl_port_get_possible_dports(port) == 1) {
+ dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
+ return devm_cxl_add_passthrough_decoder(port);
+ }
+
+ dev_err(&port->dev, "HDM decoder capability not found\n");
+ return -ENXIO;
+}
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_switch_port_decoders_setup, "CXL");
+
+/**
+ * devm_cxl_endpoint_decoders_setup - allocate and setup endpoint decoders
+ * @port: CXL port context
+ *
+ * Return 0 or -errno on error
+ */
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port)
+{
+ struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
+ struct cxl_endpoint_dvsec_info info = { .port = port };
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ struct cxl_hdm *cxlhdm;
+ int rc;
+
+ if (!is_cxl_endpoint(port))
+ return -EOPNOTSUPP;
+
+ rc = cxl_dvsec_rr_decode(cxlds, &info);
+ if (rc < 0)
+ return rc;
+
+ cxlhdm = devm_cxl_setup_hdm(port, &info);
+ if (IS_ERR(cxlhdm)) {
+ if (PTR_ERR(cxlhdm) == -ENODEV)
+ dev_err(&port->dev, "HDM decoder registers not found\n");
+ return PTR_ERR(cxlhdm);
+ }
+
+ rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
+ if (rc)
+ return rc;
+
+ return devm_cxl_enumerate_decoders(cxlhdm, &info);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_endpoint_decoders_setup, "CXL");
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index c569e00a511f..e370d733e440 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -200,6 +200,14 @@ static ssize_t security_erase_store(struct device *dev,
static struct device_attribute dev_attr_security_erase =
__ATTR(erase, 0200, NULL, security_erase_store);
+bool cxl_memdev_has_poison_cmd(struct cxl_memdev *cxlmd,
+ enum poison_cmd_enabled_bits cmd)
+{
+ struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+
+ return test_bit(cmd, mds->poison.enabled_cmds);
+}
+
static int cxl_get_poison_by_memdev(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
@@ -276,7 +284,7 @@ static int cxl_validate_poison_dpa(struct cxl_memdev *cxlmd, u64 dpa)
return 0;
}
-int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_inject_poison inject;
@@ -288,13 +296,8 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
- if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
- return rc;
-
- ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
- if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
- return rc;
+ lockdep_assert_held(&cxl_rwsem.dpa);
+ lockdep_assert_held(&cxl_rwsem.region);
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
@@ -324,9 +327,24 @@ int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
return 0;
}
+
+int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ return cxl_inject_poison_locked(cxlmd, dpa);
+}
EXPORT_SYMBOL_NS_GPL(cxl_inject_poison, "CXL");
-int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa)
{
struct cxl_mailbox *cxl_mbox = &cxlmd->cxlds->cxl_mbox;
struct cxl_mbox_clear_poison clear;
@@ -338,13 +356,8 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
if (!IS_ENABLED(CONFIG_DEBUG_FS))
return 0;
- ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
- if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
- return rc;
-
- ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
- if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
- return rc;
+ lockdep_assert_held(&cxl_rwsem.dpa);
+ lockdep_assert_held(&cxl_rwsem.region);
rc = cxl_validate_poison_dpa(cxlmd, dpa);
if (rc)
@@ -383,6 +396,21 @@ int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
return 0;
}
+
+int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa)
+{
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ return cxl_clear_poison_locked(cxlmd, dpa);
+}
EXPORT_SYMBOL_NS_GPL(cxl_clear_poison, "CXL");
static struct attribute *cxl_memdev_attributes[] = {
diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c
index b50551601c2e..5b023a0178a4 100644
--- a/drivers/cxl/core/pci.c
+++ b/drivers/cxl/core/pci.c
@@ -24,84 +24,52 @@ static unsigned short media_ready_timeout = 60;
module_param(media_ready_timeout, ushort, 0644);
MODULE_PARM_DESC(media_ready_timeout, "seconds to wait for media ready");
-struct cxl_walk_context {
- struct pci_bus *bus;
- struct cxl_port *port;
+static int pci_get_port_num(struct pci_dev *pdev)
+{
+ u32 lnkcap;
int type;
- int error;
- int count;
-};
-static int match_add_dports(struct pci_dev *pdev, void *data)
-{
- struct cxl_walk_context *ctx = data;
- struct cxl_port *port = ctx->port;
- int type = pci_pcie_type(pdev);
- struct cxl_register_map map;
- struct cxl_dport *dport;
- u32 lnkcap, port_num;
- int rc;
+ type = pci_pcie_type(pdev);
+ if (type != PCI_EXP_TYPE_DOWNSTREAM && type != PCI_EXP_TYPE_ROOT_PORT)
+ return -EINVAL;
- if (pdev->bus != ctx->bus)
- return 0;
- if (!pci_is_pcie(pdev))
- return 0;
- if (type != ctx->type)
- return 0;
if (pci_read_config_dword(pdev, pci_pcie_cap(pdev) + PCI_EXP_LNKCAP,
&lnkcap))
- return 0;
-
- rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
- if (rc)
- dev_dbg(&port->dev, "failed to find component registers\n");
-
- port_num = FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
- dport = devm_cxl_add_dport(port, &pdev->dev, port_num, map.resource);
- if (IS_ERR(dport)) {
- ctx->error = PTR_ERR(dport);
- return PTR_ERR(dport);
- }
- ctx->count++;
+ return -ENXIO;
- return 0;
+ return FIELD_GET(PCI_EXP_LNKCAP_PN, lnkcap);
}
/**
- * devm_cxl_port_enumerate_dports - enumerate downstream ports of the upstream port
- * @port: cxl_port whose ->uport_dev is the upstream of dports to be enumerated
+ * __devm_cxl_add_dport_by_dev - allocate a dport by dport device
+ * @port: cxl_port that hosts the dport
+ * @dport_dev: 'struct device' of the dport
*
- * Returns a positive number of dports enumerated or a negative error
- * code.
+ * Returns the allocated dport on success or ERR_PTR() of -errno on error
*/
-int devm_cxl_port_enumerate_dports(struct cxl_port *port)
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev)
{
- struct pci_bus *bus = cxl_port_to_pci_bus(port);
- struct cxl_walk_context ctx;
- int type;
+ struct cxl_register_map map;
+ struct pci_dev *pdev;
+ int port_num, rc;
- if (!bus)
- return -ENXIO;
+ if (!dev_is_pci(dport_dev))
+ return ERR_PTR(-EINVAL);
- if (pci_is_root_bus(bus))
- type = PCI_EXP_TYPE_ROOT_PORT;
- else
- type = PCI_EXP_TYPE_DOWNSTREAM;
+ pdev = to_pci_dev(dport_dev);
+ port_num = pci_get_port_num(pdev);
+ if (port_num < 0)
+ return ERR_PTR(port_num);
- ctx = (struct cxl_walk_context) {
- .port = port,
- .bus = bus,
- .type = type,
- };
- pci_walk_bus(bus, match_add_dports, &ctx);
+ rc = cxl_find_regblock(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+ if (rc)
+ return ERR_PTR(rc);
- if (ctx.count == 0)
- return -ENODEV;
- if (ctx.error)
- return ctx.error;
- return ctx.count;
+ device_lock_assert(&port->dev);
+ return devm_cxl_add_dport(port, dport_dev, port_num, map.resource);
}
-EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, "CXL");
+EXPORT_SYMBOL_NS_GPL(__devm_cxl_add_dport_by_dev, "CXL");
static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
{
@@ -1169,3 +1137,53 @@ int cxl_gpf_port_setup(struct cxl_dport *dport)
return 0;
}
+
+struct cxl_walk_context {
+ struct pci_bus *bus;
+ struct cxl_port *port;
+ int type;
+ int error;
+ int count;
+};
+
+static int count_dports(struct pci_dev *pdev, void *data)
+{
+ struct cxl_walk_context *ctx = data;
+ int type = pci_pcie_type(pdev);
+
+ if (pdev->bus != ctx->bus)
+ return 0;
+ if (!pci_is_pcie(pdev))
+ return 0;
+ if (type != ctx->type)
+ return 0;
+
+ ctx->count++;
+ return 0;
+}
+
+int cxl_port_get_possible_dports(struct cxl_port *port)
+{
+ struct pci_bus *bus = cxl_port_to_pci_bus(port);
+ struct cxl_walk_context ctx;
+ int type;
+
+ if (!bus) {
+ dev_err(&port->dev, "No PCI bus found for port %s\n",
+ dev_name(&port->dev));
+ return -ENXIO;
+ }
+
+ if (pci_is_root_bus(bus))
+ type = PCI_EXP_TYPE_ROOT_PORT;
+ else
+ type = PCI_EXP_TYPE_DOWNSTREAM;
+
+ ctx = (struct cxl_walk_context) {
+ .bus = bus,
+ .type = type,
+ };
+ pci_walk_bus(bus, count_dports, &ctx);
+
+ return ctx.count;
+}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 29197376b18e..fef3aa0c6680 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -33,6 +33,15 @@
static DEFINE_IDA(cxl_port_ida);
static DEFINE_XARRAY(cxl_root_buses);
+/*
+ * The terminal device in PCI is NULL and @platform_bus
+ * for platform devices (for cxl_test)
+ */
+static bool is_cxl_host_bridge(struct device *dev)
+{
+ return (!dev || dev == &platform_bus);
+}
+
int cxl_num_decoders_committed(struct cxl_port *port)
{
lockdep_assert_held(&cxl_rwsem.region);
@@ -740,6 +749,7 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
xa_init(&port->dports);
xa_init(&port->endpoints);
xa_init(&port->regions);
+ port->component_reg_phys = CXL_RESOURCE_NONE;
device_initialize(dev);
lockdep_set_class_and_subclass(&dev->mutex, &cxl_port_key, port->depth);
@@ -858,9 +868,7 @@ static int cxl_port_add(struct cxl_port *port,
if (rc)
return rc;
- rc = cxl_port_setup_regs(port, component_reg_phys);
- if (rc)
- return rc;
+ port->component_reg_phys = component_reg_phys;
} else {
rc = dev_set_name(dev, "root%d", port->id);
if (rc)
@@ -1173,6 +1181,20 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (rc)
return ERR_PTR(rc);
+ /*
+ * Setup port register if this is the first dport showed up. Having
+ * a dport also means that there is at least 1 active link.
+ */
+ if (port->nr_dports == 1 &&
+ port->component_reg_phys != CXL_RESOURCE_NONE) {
+ rc = cxl_port_setup_regs(port, port->component_reg_phys);
+ if (rc) {
+ xa_erase(&port->dports, (unsigned long)dport->dport_dev);
+ return ERR_PTR(rc);
+ }
+ port->component_reg_phys = CXL_RESOURCE_NONE;
+ }
+
get_device(dport_dev);
rc = devm_add_action_or_reset(host, cxl_dport_remove, dport);
if (rc)
@@ -1348,21 +1370,6 @@ static struct cxl_port *find_cxl_port(struct device *dport_dev,
return port;
}
-static struct cxl_port *find_cxl_port_at(struct cxl_port *parent_port,
- struct device *dport_dev,
- struct cxl_dport **dport)
-{
- struct cxl_find_port_ctx ctx = {
- .dport_dev = dport_dev,
- .parent_port = parent_port,
- .dport = dport,
- };
- struct cxl_port *port;
-
- port = __find_cxl_port(&ctx);
- return port;
-}
-
/*
* All users of grandparent() are using it to walk PCIe-like switch port
* hierarchy. A PCIe switch is comprised of a bridge device representing the
@@ -1423,7 +1430,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_autoremove, "CXL");
* through ->remove(). This "bottom-up" removal selectively removes individual
* child ports manually. This depends on devm_cxl_add_port() to not change is
* devm action registration order, and for dports to have already been
- * destroyed by reap_dports().
+ * destroyed by del_dports().
*/
static void delete_switch_port(struct cxl_port *port)
{
@@ -1432,18 +1439,24 @@ static void delete_switch_port(struct cxl_port *port)
devm_release_action(port->dev.parent, unregister_port, port);
}
-static void reap_dports(struct cxl_port *port)
+static void del_dport(struct cxl_dport *dport)
+{
+ struct cxl_port *port = dport->port;
+
+ devm_release_action(&port->dev, cxl_dport_unlink, dport);
+ devm_release_action(&port->dev, cxl_dport_remove, dport);
+ devm_kfree(&port->dev, dport);
+}
+
+static void del_dports(struct cxl_port *port)
{
struct cxl_dport *dport;
unsigned long index;
device_lock_assert(&port->dev);
- xa_for_each(&port->dports, index, dport) {
- devm_release_action(&port->dev, cxl_dport_unlink, dport);
- devm_release_action(&port->dev, cxl_dport_remove, dport);
- devm_kfree(&port->dev, dport);
- }
+ xa_for_each(&port->dports, index, dport)
+ del_dport(dport);
}
struct detach_ctx {
@@ -1501,7 +1514,7 @@ static void cxl_detach_ep(void *data)
*/
died = true;
port->dead = true;
- reap_dports(port);
+ del_dports(port);
}
device_unlock(&port->dev);
@@ -1532,16 +1545,157 @@ static resource_size_t find_component_registers(struct device *dev)
return map.resource;
}
+static int match_port_by_uport(struct device *dev, const void *data)
+{
+ const struct device *uport_dev = data;
+ struct cxl_port *port;
+
+ if (!is_cxl_port(dev))
+ return 0;
+
+ port = to_cxl_port(dev);
+ return uport_dev == port->uport_dev;
+}
+
+/*
+ * Function takes a device reference on the port device. Caller should do a
+ * put_device() when done.
+ */
+static struct cxl_port *find_cxl_port_by_uport(struct device *uport_dev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&cxl_bus_type, NULL, uport_dev, match_port_by_uport);
+ if (dev)
+ return to_cxl_port(dev);
+ return NULL;
+}
+
+static int update_decoder_targets(struct device *dev, void *data)
+{
+ struct cxl_dport *dport = data;
+ struct cxl_switch_decoder *cxlsd;
+ struct cxl_decoder *cxld;
+ int i;
+
+ if (!is_switch_decoder(dev))
+ return 0;
+
+ cxlsd = to_cxl_switch_decoder(dev);
+ cxld = &cxlsd->cxld;
+ guard(rwsem_write)(&cxl_rwsem.region);
+
+ for (i = 0; i < cxld->interleave_ways; i++) {
+ if (cxld->target_map[i] == dport->port_id) {
+ cxlsd->target[i] = dport;
+ dev_dbg(dev, "dport%d found in target list, index %d\n",
+ dport->port_id, i);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+DEFINE_FREE(del_cxl_dport, struct cxl_dport *, if (!IS_ERR_OR_NULL(_T)) del_dport(_T))
+static struct cxl_dport *cxl_port_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+ int rc;
+
+ device_lock_assert(&port->dev);
+ if (!port->dev.driver)
+ return ERR_PTR(-ENXIO);
+
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (dport) {
+ dev_dbg(&port->dev, "dport%d:%s already exists\n",
+ dport->port_id, dev_name(dport_dev));
+ return ERR_PTR(-EBUSY);
+ }
+
+ struct cxl_dport *new_dport __free(del_cxl_dport) =
+ devm_cxl_add_dport_by_dev(port, dport_dev);
+ if (IS_ERR(new_dport))
+ return new_dport;
+
+ cxl_switch_parse_cdat(new_dport);
+
+ if (ida_is_empty(&port->decoder_ida)) {
+ rc = devm_cxl_switch_port_decoders_setup(port);
+ if (rc)
+ return ERR_PTR(rc);
+ dev_dbg(&port->dev, "first dport%d:%s added with decoders\n",
+ new_dport->port_id, dev_name(dport_dev));
+ return no_free_ptr(new_dport);
+ }
+
+ /* New dport added, update the decoder targets */
+ device_for_each_child(&port->dev, new_dport, update_decoder_targets);
+
+ dev_dbg(&port->dev, "dport%d:%s added\n", new_dport->port_id,
+ dev_name(dport_dev));
+
+ return no_free_ptr(new_dport);
+}
+
+static struct cxl_dport *devm_cxl_create_port(struct device *ep_dev,
+ struct cxl_port *parent_port,
+ struct cxl_dport *parent_dport,
+ struct device *uport_dev,
+ struct device *dport_dev)
+{
+ resource_size_t component_reg_phys;
+
+ device_lock_assert(&parent_port->dev);
+ if (!parent_port->dev.driver) {
+ dev_warn(ep_dev,
+ "port %s:%s:%s disabled, failed to enumerate CXL.mem\n",
+ dev_name(&parent_port->dev), dev_name(uport_dev),
+ dev_name(dport_dev));
+ }
+
+ struct cxl_port *port __free(put_cxl_port) =
+ find_cxl_port_by_uport(uport_dev);
+ if (!port) {
+ component_reg_phys = find_component_registers(uport_dev);
+ port = devm_cxl_add_port(&parent_port->dev, uport_dev,
+ component_reg_phys, parent_dport);
+ if (IS_ERR(port))
+ return ERR_CAST(port);
+
+ /*
+ * retry to make sure a port is found. a port device
+ * reference is taken.
+ */
+ port = find_cxl_port_by_uport(uport_dev);
+ if (!port)
+ return ERR_PTR(-ENODEV);
+
+ dev_dbg(ep_dev, "created port %s:%s\n",
+ dev_name(&port->dev), dev_name(port->uport_dev));
+ } else {
+ /*
+ * Port was created before right before this function is
+ * called. Signal the caller to deal with it.
+ */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ guard(device)(&port->dev);
+ return cxl_port_add_dport(port, dport_dev);
+}
+
static int add_port_attach_ep(struct cxl_memdev *cxlmd,
struct device *uport_dev,
struct device *dport_dev)
{
struct device *dparent = grandparent(dport_dev);
struct cxl_dport *dport, *parent_dport;
- resource_size_t component_reg_phys;
int rc;
- if (!dparent) {
+ if (is_cxl_host_bridge(dparent)) {
/*
* The iteration reached the topology root without finding the
* CXL-root 'cxl_port' on a previous iteration, fail for now to
@@ -1553,42 +1707,31 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
}
struct cxl_port *parent_port __free(put_cxl_port) =
- find_cxl_port(dparent, &parent_dport);
+ find_cxl_port_by_uport(dparent->parent);
if (!parent_port) {
/* iterate to create this parent_port */
return -EAGAIN;
}
- /*
- * Definition with __free() here to keep the sequence of
- * dereferencing the device of the port before the parent_port releasing.
- */
- struct cxl_port *port __free(put_cxl_port) = NULL;
scoped_guard(device, &parent_port->dev) {
- if (!parent_port->dev.driver) {
- dev_warn(&cxlmd->dev,
- "port %s:%s disabled, failed to enumerate CXL.mem\n",
- dev_name(&parent_port->dev), dev_name(uport_dev));
- return -ENXIO;
+ parent_dport = cxl_find_dport_by_dev(parent_port, dparent);
+ if (!parent_dport) {
+ parent_dport = cxl_port_add_dport(parent_port, dparent);
+ if (IS_ERR(parent_dport))
+ return PTR_ERR(parent_dport);
}
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port) {
- component_reg_phys = find_component_registers(uport_dev);
- port = devm_cxl_add_port(&parent_port->dev, uport_dev,
- component_reg_phys, parent_dport);
- if (IS_ERR(port))
- return PTR_ERR(port);
-
- /* retry find to pick up the new dport information */
- port = find_cxl_port_at(parent_port, dport_dev, &dport);
- if (!port)
- return -ENXIO;
+ dport = devm_cxl_create_port(&cxlmd->dev, parent_port,
+ parent_dport, uport_dev,
+ dport_dev);
+ if (IS_ERR(dport)) {
+ /* Port already exists, restart iteration */
+ if (PTR_ERR(dport) == -EAGAIN)
+ return 0;
+ return PTR_ERR(dport);
}
}
- dev_dbg(&cxlmd->dev, "add to new port %s:%s\n",
- dev_name(&port->dev), dev_name(port->uport_dev));
rc = cxl_add_ep(dport, &cxlmd->dev);
if (rc == -EBUSY) {
/*
@@ -1601,6 +1744,25 @@ static int add_port_attach_ep(struct cxl_memdev *cxlmd,
return rc;
}
+static struct cxl_dport *find_or_add_dport(struct cxl_port *port,
+ struct device *dport_dev)
+{
+ struct cxl_dport *dport;
+
+ device_lock_assert(&port->dev);
+ dport = cxl_find_dport_by_dev(port, dport_dev);
+ if (!dport) {
+ dport = cxl_port_add_dport(port, dport_dev);
+ if (IS_ERR(dport))
+ return dport;
+
+ /* New dport added, restart iteration */
+ return ERR_PTR(-EAGAIN);
+ }
+
+ return dport;
+}
+
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd)
{
struct device *dev = &cxlmd->dev;
@@ -1629,11 +1791,7 @@ retry:
struct device *uport_dev;
struct cxl_dport *dport;
- /*
- * The terminal "grandparent" in PCI is NULL and @platform_bus
- * for platform devices
- */
- if (!dport_dev || dport_dev == &platform_bus)
+ if (is_cxl_host_bridge(dport_dev))
return 0;
uport_dev = dport_dev->parent;
@@ -1647,12 +1805,26 @@ retry:
dev_name(iter), dev_name(dport_dev),
dev_name(uport_dev));
struct cxl_port *port __free(put_cxl_port) =
- find_cxl_port(dport_dev, &dport);
+ find_cxl_port_by_uport(uport_dev);
if (port) {
dev_dbg(&cxlmd->dev,
"found already registered port %s:%s\n",
dev_name(&port->dev),
dev_name(port->uport_dev));
+
+ /*
+ * RP port enumerated by cxl_acpi without dport will
+ * have the dport added here.
+ */
+ scoped_guard(device, &port->dev) {
+ dport = find_or_add_dport(port, dport_dev);
+ if (IS_ERR(dport)) {
+ if (PTR_ERR(dport) == -EAGAIN)
+ goto retry;
+ return PTR_ERR(dport);
+ }
+ }
+
rc = cxl_add_ep(dport, &cxlmd->dev);
/*
@@ -1704,24 +1876,24 @@ struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, "CXL");
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
- struct cxl_port *port, int *target_map)
+ struct cxl_port *port)
{
+ struct cxl_decoder *cxld = &cxlsd->cxld;
int i;
- if (!target_map)
- return 0;
-
device_lock_assert(&port->dev);
if (xa_empty(&port->dports))
- return -EINVAL;
+ return 0;
guard(rwsem_write)(&cxl_rwsem.region);
for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
- struct cxl_dport *dport = find_dport(port, target_map[i]);
+ struct cxl_dport *dport = find_dport(port, cxld->target_map[i]);
- if (!dport)
- return -ENXIO;
+ if (!dport) {
+ /* dport may be activated later */
+ continue;
+ }
cxlsd->target[i] = dport;
}
@@ -1910,9 +2082,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
/**
* cxl_decoder_add_locked - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* Certain types of decoders may not have any targets. The main example of this
* is an endpoint device. A more awkward example is a hostbridge whose root
@@ -1926,7 +2095,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_endpoint_decoder_alloc, "CXL");
* Return: Negative error code if the decoder wasn't properly configured; else
* returns 0.
*/
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add_locked(struct cxl_decoder *cxld)
{
struct cxl_port *port;
struct device *dev;
@@ -1947,7 +2116,7 @@ int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map)
if (!is_endpoint_decoder(dev)) {
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
- rc = decoder_populate_targets(cxlsd, port, target_map);
+ rc = decoder_populate_targets(cxlsd, port);
if (rc && (cxld->flags & CXL_DECODER_F_ENABLE)) {
dev_err(&port->dev,
"Failed to populate active decoder targets\n");
@@ -1966,9 +2135,6 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
/**
* cxl_decoder_add - Add a decoder with targets
* @cxld: The cxl decoder allocated by cxl_<type>_decoder_alloc()
- * @target_map: A list of downstream ports that this decoder can direct memory
- * traffic to. These numbers should correspond with the port number
- * in the PCIe Link Capabilities structure.
*
* This is the unlocked variant of cxl_decoder_add_locked().
* See cxl_decoder_add_locked().
@@ -1976,7 +2142,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_decoder_add_locked, "CXL");
* Context: Process context. Takes and releases the device lock of the port that
* owns the @cxld.
*/
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
+int cxl_decoder_add(struct cxl_decoder *cxld)
{
struct cxl_port *port;
@@ -1989,7 +2155,7 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
port = to_cxl_port(cxld->dev.parent);
guard(device)(&port->dev);
- return cxl_decoder_add_locked(cxld, target_map);
+ return cxl_decoder_add_locked(cxld);
}
EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, "CXL");
diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c
index 71cc42d05248..ae899f68551f 100644
--- a/drivers/cxl/core/region.c
+++ b/drivers/cxl/core/region.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2022 Intel Corporation. All rights reserved. */
#include <linux/memregion.h>
#include <linux/genalloc.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/memory.h>
@@ -10,6 +11,7 @@
#include <linux/sort.h>
#include <linux/idr.h>
#include <linux/memory-tiers.h>
+#include <linux/string_choices.h>
#include <cxlmem.h>
#include <cxl.h>
#include "core.h"
@@ -30,6 +32,12 @@
* 3. Decoder targets
*/
+/*
+ * nodemask that sets per node when the access_coordinates for the node has
+ * been updated by the CXL memory hotplug notifier.
+ */
+static nodemask_t nodemask_region_seen = NODE_MASK_NONE;
+
static struct cxl_region *to_cxl_region(struct device *dev);
#define __ACCESS_ATTR_RO(_level, _name) { \
@@ -228,7 +236,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
return -ENXIO;
}
- cpu_cache_invalidate_memregion(IORES_DESC_CXL);
+ if (!cxlr->params.res)
+ return -ENXIO;
+ cpu_cache_invalidate_memregion(cxlr->params.res->start,
+ resource_size(cxlr->params.res));
return 0;
}
@@ -237,6 +248,9 @@ static void cxl_region_decode_reset(struct cxl_region *cxlr, int count)
struct cxl_region_params *p = &cxlr->params;
int i;
+ if (test_bit(CXL_REGION_F_LOCK, &cxlr->flags))
+ return;
+
/*
* Before region teardown attempt to flush, evict any data cached for
* this region, or scream loudly about missing arch / platform support
@@ -411,6 +425,9 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
return len;
}
+ if (test_bit(CXL_REGION_F_LOCK, &cxlr->flags))
+ return -EPERM;
+
rc = queue_reset(cxlr);
if (rc)
return rc;
@@ -453,21 +470,6 @@ static ssize_t commit_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(commit);
-static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
- int n)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct cxl_region *cxlr = to_cxl_region(dev);
-
- /*
- * Support tooling that expects to find a 'uuid' attribute for all
- * regions regardless of mode.
- */
- if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_PARTMODE_PMEM)
- return 0444;
- return a->mode;
-}
-
static ssize_t interleave_ways_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -746,6 +748,21 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RW(size);
+static ssize_t extended_linear_cache_size_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cxl_region *cxlr = to_cxl_region(dev);
+ struct cxl_region_params *p = &cxlr->params;
+ ssize_t rc;
+
+ ACQUIRE(rwsem_read_intr, rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &rwsem)))
+ return rc;
+ return sysfs_emit(buf, "%#llx\n", p->cache_size);
+}
+static DEVICE_ATTR_RO(extended_linear_cache_size);
+
static struct attribute *cxl_region_attrs[] = {
&dev_attr_uuid.attr,
&dev_attr_commit.attr,
@@ -754,9 +771,34 @@ static struct attribute *cxl_region_attrs[] = {
&dev_attr_resource.attr,
&dev_attr_size.attr,
&dev_attr_mode.attr,
+ &dev_attr_extended_linear_cache_size.attr,
NULL,
};
+static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct cxl_region *cxlr = to_cxl_region(dev);
+
+ /*
+ * Support tooling that expects to find a 'uuid' attribute for all
+ * regions regardless of mode.
+ */
+ if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_PARTMODE_PMEM)
+ return 0444;
+
+ /*
+ * Don't display extended linear cache attribute if there is no
+ * extended linear cache.
+ */
+ if (a == &dev_attr_extended_linear_cache_size.attr &&
+ cxlr->params.cache_size == 0)
+ return 0;
+
+ return a->mode;
+}
+
static const struct attribute_group cxl_region_group = {
.attrs = cxl_region_attrs,
.is_visible = cxl_region_visible,
@@ -830,16 +872,16 @@ static int match_free_decoder(struct device *dev, const void *data)
return 1;
}
-static bool region_res_match_cxl_range(const struct cxl_region_params *p,
- struct range *range)
+static bool spa_maps_hpa(const struct cxl_region_params *p,
+ const struct range *range)
{
if (!p->res)
return false;
/*
- * If an extended linear cache region then the CXL range is assumed
- * to be fronted by the DRAM range in current known implementation.
- * This assumption will be made until a variant implementation exists.
+ * The extended linear cache region is constructed by a 1:1 ratio
+ * where the SPA maps equal amounts of DRAM and CXL HPA capacity with
+ * CXL decoders at the high end of the SPA range.
*/
return p->res->start + p->cache_size == range->start &&
p->res->end == range->end;
@@ -857,7 +899,7 @@ static int match_auto_decoder(struct device *dev, const void *data)
cxld = to_cxl_decoder(dev);
r = &cxld->hpa_range;
- if (region_res_match_cxl_range(p, r))
+ if (spa_maps_hpa(p, r))
return 1;
return 0;
@@ -1051,6 +1093,16 @@ static int cxl_rr_assign_decoder(struct cxl_port *port, struct cxl_region *cxlr,
return 0;
}
+static void cxl_region_set_lock(struct cxl_region *cxlr,
+ struct cxl_decoder *cxld)
+{
+ if (!test_bit(CXL_DECODER_F_LOCK, &cxld->flags))
+ return;
+
+ set_bit(CXL_REGION_F_LOCK, &cxlr->flags);
+ clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags);
+}
+
/**
* cxl_port_attach_region() - track a region's interest in a port by endpoint
* @port: port to add a new region reference 'struct cxl_region_ref'
@@ -1162,6 +1214,8 @@ static int cxl_port_attach_region(struct cxl_port *port,
}
}
+ cxl_region_set_lock(cxlr, cxld);
+
rc = cxl_rr_ep_add(cxl_rr, cxled);
if (rc) {
dev_dbg(&cxlr->dev,
@@ -1320,7 +1374,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
struct cxl_endpoint_decoder *cxled)
{
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
- int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos;
+ int parent_iw, parent_ig, ig, iw, rc, pos = cxled->pos;
struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr);
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
@@ -1457,7 +1511,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
if (cxld->interleave_ways != iw ||
(iw > 1 && cxld->interleave_granularity != ig) ||
- !region_res_match_cxl_range(p, &cxld->hpa_range) ||
+ !spa_maps_hpa(p, &cxld->hpa_range) ||
((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) {
dev_err(&cxlr->dev,
"%s:%s %s expected iw: %d ig: %d %pr\n",
@@ -1468,9 +1522,7 @@ static int cxl_port_setup_targets(struct cxl_port *port,
dev_name(port->uport_dev), dev_name(&port->dev),
__func__, cxld->interleave_ways,
cxld->interleave_granularity,
- (cxld->flags & CXL_DECODER_F_ENABLE) ?
- "enabled" :
- "disabled",
+ str_enabled_disabled(cxld->flags & CXL_DECODER_F_ENABLE),
cxld->hpa_range.start, cxld->hpa_range.end);
return -ENXIO;
}
@@ -1510,11 +1562,12 @@ add_target:
cxl_rr->nr_targets_set);
return -ENXIO;
}
- } else
+ } else {
cxlsd->target[cxl_rr->nr_targets_set] = ep->dport;
- inc = 1;
+ cxlsd->cxld.target_map[cxl_rr->nr_targets_set] = ep->dport->port_id;
+ }
+ cxl_rr->nr_targets_set++;
out_target_set:
- cxl_rr->nr_targets_set += inc;
dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n",
dev_name(port->uport_dev), dev_name(&port->dev),
cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev),
@@ -2431,6 +2484,7 @@ static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int i
dev->bus = &cxl_bus_type;
dev->type = &cxl_region_type;
cxlr->id = id;
+ cxl_region_set_lock(cxlr, &cxlrd->cxlsd.cxld);
return cxlr;
}
@@ -2442,14 +2496,8 @@ static bool cxl_region_update_coordinates(struct cxl_region *cxlr, int nid)
for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
if (cxlr->coord[i].read_bandwidth) {
- rc = 0;
- if (cxl_need_node_perf_attrs_update(nid))
- node_set_perf_attrs(nid, &cxlr->coord[i], i);
- else
- rc = cxl_update_hmat_access_coordinates(nid, cxlr, i);
-
- if (rc == 0)
- cset++;
+ node_update_perf_attrs(nid, &cxlr->coord[i], i);
+ cset++;
}
}
@@ -2487,6 +2535,10 @@ static int cxl_region_perf_attrs_callback(struct notifier_block *nb,
if (nid != region_nid)
return NOTIFY_DONE;
+ /* No action needed if node bit already set */
+ if (node_test_and_set(nid, nodemask_region_seen))
+ return NOTIFY_DONE;
+
if (!cxl_region_update_coordinates(cxlr, nid))
return NOTIFY_DONE;
@@ -2918,28 +2970,119 @@ static bool cxl_is_hpa_in_chunk(u64 hpa, struct cxl_region *cxlr, int pos)
return false;
}
-u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
- u64 dpa)
+#define CXL_POS_ZERO 0
+/**
+ * cxl_validate_translation_params
+ * @eiw: encoded interleave ways
+ * @eig: encoded interleave granularity
+ * @pos: position in interleave
+ *
+ * Callers pass CXL_POS_ZERO when no position parameter needs validating.
+ *
+ * Returns: 0 on success, -EINVAL on first invalid parameter
+ */
+int cxl_validate_translation_params(u8 eiw, u16 eig, int pos)
{
- struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
- u64 dpa_offset, hpa_offset, bits_upper, mask_upper, hpa;
- struct cxl_region_params *p = &cxlr->params;
- struct cxl_endpoint_decoder *cxled = NULL;
- u16 eig = 0;
- u8 eiw = 0;
- int pos;
+ int ways, gran;
- for (int i = 0; i < p->nr_targets; i++) {
- cxled = p->targets[i];
- if (cxlmd == cxled_to_memdev(cxled))
- break;
+ if (eiw_to_ways(eiw, &ways)) {
+ pr_debug("%s: invalid eiw=%u\n", __func__, eiw);
+ return -EINVAL;
+ }
+ if (eig_to_granularity(eig, &gran)) {
+ pr_debug("%s: invalid eig=%u\n", __func__, eig);
+ return -EINVAL;
+ }
+ if (pos < 0 || pos >= ways) {
+ pr_debug("%s: invalid pos=%d for ways=%u\n", __func__, pos,
+ ways);
+ return -EINVAL;
}
- if (!cxled || cxlmd != cxled_to_memdev(cxled))
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_validate_translation_params, "cxl_translate");
+
+u64 cxl_calculate_dpa_offset(u64 hpa_offset, u8 eiw, u16 eig)
+{
+ u64 dpa_offset, bits_lower, bits_upper, temp;
+ int ret;
+
+ ret = cxl_validate_translation_params(eiw, eig, CXL_POS_ZERO);
+ if (ret)
return ULLONG_MAX;
- pos = cxled->pos;
- ways_to_eiw(p->interleave_ways, &eiw);
- granularity_to_eig(p->interleave_granularity, &eig);
+ /*
+ * DPA offset: CXL Spec 3.2 Section 8.2.4.20.13
+ * Lower bits [IG+7:0] pass through unchanged
+ * (eiw < 8)
+ * Per spec: DPAOffset[51:IG+8] = (HPAOffset[51:IG+IW+8] >> IW)
+ * Clear the position bits to isolate upper section, then
+ * reverse the left shift by eiw that occurred during DPA->HPA
+ * (eiw >= 8)
+ * Per spec: DPAOffset[51:IG+8] = HPAOffset[51:IG+IW] / 3
+ * Extract upper bits from the correct bit range and divide by 3
+ * to recover the original DPA upper bits
+ */
+ bits_lower = hpa_offset & GENMASK_ULL(eig + 7, 0);
+ if (eiw < 8) {
+ temp = hpa_offset &= ~GENMASK_ULL(eig + eiw + 8 - 1, 0);
+ dpa_offset = temp >> eiw;
+ } else {
+ bits_upper = div64_u64(hpa_offset >> (eig + eiw), 3);
+ dpa_offset = bits_upper << (eig + 8);
+ }
+ dpa_offset |= bits_lower;
+
+ return dpa_offset;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_calculate_dpa_offset, "cxl_translate");
+
+int cxl_calculate_position(u64 hpa_offset, u8 eiw, u16 eig)
+{
+ unsigned int ways = 0;
+ u64 shifted, rem;
+ int pos, ret;
+
+ ret = cxl_validate_translation_params(eiw, eig, CXL_POS_ZERO);
+ if (ret)
+ return ret;
+
+ if (!eiw)
+ /* position is 0 if no interleaving */
+ return 0;
+
+ /*
+ * Interleave position: CXL Spec 3.2 Section 8.2.4.20.13
+ * eiw < 8
+ * Position is in the IW bits at HPA_OFFSET[IG+8+IW-1:IG+8].
+ * Per spec "remove IW bits starting with bit position IG+8"
+ * eiw >= 8
+ * Position is not explicitly stored in HPA_OFFSET bits. It is
+ * derived from the modulo operation of the upper bits using
+ * the total number of interleave ways.
+ */
+ if (eiw < 8) {
+ pos = (hpa_offset >> (eig + 8)) & GENMASK(eiw - 1, 0);
+ } else {
+ shifted = hpa_offset >> (eig + 8);
+ eiw_to_ways(eiw, &ways);
+ div64_u64_rem(shifted, ways, &rem);
+ pos = rem;
+ }
+
+ return pos;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_calculate_position, "cxl_translate");
+
+u64 cxl_calculate_hpa_offset(u64 dpa_offset, int pos, u8 eiw, u16 eig)
+{
+ u64 mask_upper, hpa_offset, bits_upper;
+ int ret;
+
+ ret = cxl_validate_translation_params(eiw, eig, pos);
+ if (ret)
+ return ULLONG_MAX;
/*
* The device position in the region interleave set was removed
@@ -2951,9 +3094,6 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
* 8.2.4.19.13 Implementation Note: Device Decode Logic
*/
- /* Remove the dpa base */
- dpa_offset = dpa - cxl_dpa_resource_start(cxled);
-
mask_upper = GENMASK_ULL(51, eig + 8);
if (eiw < 8) {
@@ -2968,12 +3108,43 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
/* The lower bits remain unchanged */
hpa_offset |= dpa_offset & GENMASK_ULL(eig + 7, 0);
+ return hpa_offset;
+}
+EXPORT_SYMBOL_FOR_MODULES(cxl_calculate_hpa_offset, "cxl_translate");
+
+u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
+ u64 dpa)
+{
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_endpoint_decoder *cxled = NULL;
+ u64 dpa_offset, hpa_offset, hpa;
+ u16 eig = 0;
+ u8 eiw = 0;
+ int pos;
+
+ for (int i = 0; i < p->nr_targets; i++) {
+ if (cxlmd == cxled_to_memdev(p->targets[i])) {
+ cxled = p->targets[i];
+ break;
+ }
+ }
+ if (!cxled)
+ return ULLONG_MAX;
+
+ pos = cxled->pos;
+ ways_to_eiw(p->interleave_ways, &eiw);
+ granularity_to_eig(p->interleave_granularity, &eig);
+
+ dpa_offset = dpa - cxl_dpa_resource_start(cxled);
+ hpa_offset = cxl_calculate_hpa_offset(dpa_offset, pos, eiw, eig);
+
/* Apply the hpa_offset to the region base address */
hpa = hpa_offset + p->res->start + p->cache_size;
/* Root decoder translation overrides typical modulo decode */
- if (cxlrd->hpa_to_spa)
- hpa = cxlrd->hpa_to_spa(cxlrd, hpa);
+ if (cxlrd->ops.hpa_to_spa)
+ hpa = cxlrd->ops.hpa_to_spa(cxlrd, hpa);
if (!cxl_resource_contains_addr(p->res, hpa)) {
dev_dbg(&cxlr->dev,
@@ -2982,12 +3153,70 @@ u64 cxl_dpa_to_hpa(struct cxl_region *cxlr, const struct cxl_memdev *cxlmd,
}
/* Simple chunk check, by pos & gran, only applies to modulo decodes */
- if (!cxlrd->hpa_to_spa && (!cxl_is_hpa_in_chunk(hpa, cxlr, pos)))
+ if (!cxlrd->ops.hpa_to_spa && !cxl_is_hpa_in_chunk(hpa, cxlr, pos))
return ULLONG_MAX;
return hpa;
}
+struct dpa_result {
+ struct cxl_memdev *cxlmd;
+ u64 dpa;
+};
+
+static int region_offset_to_dpa_result(struct cxl_region *cxlr, u64 offset,
+ struct dpa_result *result)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
+ struct cxl_endpoint_decoder *cxled;
+ u64 hpa, hpa_offset, dpa_offset;
+ u16 eig = 0;
+ u8 eiw = 0;
+ int pos;
+
+ lockdep_assert_held(&cxl_rwsem.region);
+ lockdep_assert_held(&cxl_rwsem.dpa);
+
+ /* Input validation ensures valid ways and gran */
+ granularity_to_eig(p->interleave_granularity, &eig);
+ ways_to_eiw(p->interleave_ways, &eiw);
+
+ /*
+ * If the root decoder has SPA to CXL HPA callback, use it. Otherwise
+ * CXL HPA is assumed to equal SPA.
+ */
+ if (cxlrd->ops.spa_to_hpa) {
+ hpa = cxlrd->ops.spa_to_hpa(cxlrd, p->res->start + offset);
+ hpa_offset = hpa - p->res->start;
+ } else {
+ hpa_offset = offset;
+ }
+
+ pos = cxl_calculate_position(hpa_offset, eiw, eig);
+ if (pos < 0 || pos >= p->nr_targets) {
+ dev_dbg(&cxlr->dev, "Invalid position %d for %d targets\n",
+ pos, p->nr_targets);
+ return -ENXIO;
+ }
+
+ dpa_offset = cxl_calculate_dpa_offset(hpa_offset, eiw, eig);
+
+ /* Look-up and return the result: a memdev and a DPA */
+ for (int i = 0; i < p->nr_targets; i++) {
+ cxled = p->targets[i];
+ if (cxled->pos != pos)
+ continue;
+ result->cxlmd = cxled_to_memdev(cxled);
+ result->dpa = cxl_dpa_resource_start(cxled) + dpa_offset;
+
+ return 0;
+ }
+ dev_err(&cxlr->dev, "No device found for position %d\n", pos);
+
+ return -ENXIO;
+}
+
static struct lock_class_key cxl_pmem_region_key;
static int cxl_pmem_region_alloc(struct cxl_region *cxlr)
@@ -3287,10 +3516,7 @@ static int match_region_by_range(struct device *dev, const void *data)
p = &cxlr->params;
guard(rwsem_read)(&cxl_rwsem.region);
- if (p->res && p->res->start == r->start && p->res->end == r->end)
- return 1;
-
- return 0;
+ return spa_maps_hpa(p, r);
}
static int cxl_extended_linear_cache_resize(struct cxl_region *cxlr,
@@ -3371,6 +3597,10 @@ static int __construct_region(struct cxl_region *cxlr,
"Extended linear cache calculation failed rc:%d\n", rc);
}
+ rc = sysfs_update_group(&cxlr->dev.kobj, &cxl_region_group);
+ if (rc)
+ return rc;
+
rc = insert_resource(cxlrd->res, res);
if (rc) {
/*
@@ -3542,6 +3772,107 @@ static void shutdown_notifiers(void *_cxlr)
unregister_mt_adistance_algorithm(&cxlr->adist_notifier);
}
+static void remove_debugfs(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
+
+static int validate_region_offset(struct cxl_region *cxlr, u64 offset)
+{
+ struct cxl_region_params *p = &cxlr->params;
+ resource_size_t region_size;
+ u64 hpa;
+
+ if (offset < p->cache_size) {
+ dev_err(&cxlr->dev,
+ "Offset %#llx is within extended linear cache %pa\n",
+ offset, &p->cache_size);
+ return -EINVAL;
+ }
+
+ region_size = resource_size(p->res);
+ if (offset >= region_size) {
+ dev_err(&cxlr->dev, "Offset %#llx exceeds region size %pa\n",
+ offset, &region_size);
+ return -EINVAL;
+ }
+
+ hpa = p->res->start + offset;
+ if (hpa < p->res->start || hpa > p->res->end) {
+ dev_err(&cxlr->dev, "HPA %#llx not in region %pr\n", hpa,
+ p->res);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxl_region_debugfs_poison_inject(void *data, u64 offset)
+{
+ struct dpa_result result = { .dpa = ULLONG_MAX, .cxlmd = NULL };
+ struct cxl_region *cxlr = data;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ if (validate_region_offset(cxlr, offset))
+ return -EINVAL;
+
+ offset -= cxlr->params.cache_size;
+ rc = region_offset_to_dpa_result(cxlr, offset, &result);
+ if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev,
+ "Failed to resolve DPA for region offset %#llx rc %d\n",
+ offset, rc);
+
+ return rc ? rc : -EINVAL;
+ }
+
+ return cxl_inject_poison_locked(result.cxlmd, result.dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_inject_fops, NULL,
+ cxl_region_debugfs_poison_inject, "%llx\n");
+
+static int cxl_region_debugfs_poison_clear(void *data, u64 offset)
+{
+ struct dpa_result result = { .dpa = ULLONG_MAX, .cxlmd = NULL };
+ struct cxl_region *cxlr = data;
+ int rc;
+
+ ACQUIRE(rwsem_read_intr, region_rwsem)(&cxl_rwsem.region);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &region_rwsem)))
+ return rc;
+
+ ACQUIRE(rwsem_read_intr, dpa_rwsem)(&cxl_rwsem.dpa);
+ if ((rc = ACQUIRE_ERR(rwsem_read_intr, &dpa_rwsem)))
+ return rc;
+
+ if (validate_region_offset(cxlr, offset))
+ return -EINVAL;
+
+ offset -= cxlr->params.cache_size;
+ rc = region_offset_to_dpa_result(cxlr, offset, &result);
+ if (rc || !result.cxlmd || result.dpa == ULLONG_MAX) {
+ dev_dbg(&cxlr->dev,
+ "Failed to resolve DPA for region offset %#llx rc %d\n",
+ offset, rc);
+
+ return rc ? rc : -EINVAL;
+ }
+
+ return cxl_clear_poison_locked(result.cxlmd, result.dpa);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(cxl_poison_clear_fops, NULL,
+ cxl_region_debugfs_poison_clear, "%llx\n");
+
static int cxl_region_can_probe(struct cxl_region *cxlr)
{
struct cxl_region_params *p = &cxlr->params;
@@ -3571,6 +3902,7 @@ static int cxl_region_probe(struct device *dev)
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region_params *p = &cxlr->params;
+ bool poison_supported = true;
int rc;
rc = cxl_region_can_probe(cxlr);
@@ -3594,6 +3926,31 @@ static int cxl_region_probe(struct device *dev)
if (rc)
return rc;
+ /* Create poison attributes if all memdevs support the capabilities */
+ for (int i = 0; i < p->nr_targets; i++) {
+ struct cxl_endpoint_decoder *cxled = p->targets[i];
+ struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+
+ if (!cxl_memdev_has_poison_cmd(cxlmd, CXL_POISON_ENABLED_INJECT) ||
+ !cxl_memdev_has_poison_cmd(cxlmd, CXL_POISON_ENABLED_CLEAR)) {
+ poison_supported = false;
+ break;
+ }
+ }
+
+ if (poison_supported) {
+ struct dentry *dentry;
+
+ dentry = cxl_debugfs_create_dir(dev_name(dev));
+ debugfs_create_file("inject_poison", 0200, dentry, cxlr,
+ &cxl_poison_inject_fops);
+ debugfs_create_file("clear_poison", 0200, dentry, cxlr,
+ &cxl_poison_clear_fops);
+ rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
+ if (rc)
+ return rc;
+ }
+
switch (cxlr->mode) {
case CXL_PARTMODE_PMEM:
rc = devm_cxl_region_edac_register(cxlr);
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index a53ec4798b12..a972e4ef1936 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -1068,7 +1068,7 @@ TRACE_EVENT(cxl_poison,
__entry->hpa = cxl_dpa_to_hpa(cxlr, cxlmd,
__entry->dpa);
if (__entry->hpa != ULLONG_MAX && cxlr->params.cache_size)
- __entry->hpa_alias0 = __entry->hpa +
+ __entry->hpa_alias0 = __entry->hpa -
cxlr->params.cache_size;
else
__entry->hpa_alias0 = ULLONG_MAX;
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 847e37be42c4..ba17fa86d249 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -357,6 +357,9 @@ enum cxl_decoder_type {
* @target_type: accelerator vs expander (type2 vs type3) selector
* @region: currently assigned region for this decoder
* @flags: memory type capabilities and locking
+ * @target_map: cached copy of hardware port-id list, available at init
+ * before all @dport objects have been instantiated. While
+ * dport id is 8bit, CFMWS interleave targets are 32bits.
* @commit: device/decoder-type specific callback to commit settings to hw
* @reset: device/decoder-type specific callback to reset hw settings
*/
@@ -369,6 +372,7 @@ struct cxl_decoder {
enum cxl_decoder_type target_type;
struct cxl_region *region;
unsigned long flags;
+ u32 target_map[CXL_DECODER_MAX_INTERLEAVE];
int (*commit)(struct cxl_decoder *cxld);
void (*reset)(struct cxl_decoder *cxld);
};
@@ -419,27 +423,35 @@ struct cxl_switch_decoder {
};
struct cxl_root_decoder;
-typedef u64 (*cxl_hpa_to_spa_fn)(struct cxl_root_decoder *cxlrd, u64 hpa);
+/**
+ * struct cxl_rd_ops - CXL root decoder callback operations
+ * @hpa_to_spa: Convert host physical address to system physical address
+ * @spa_to_hpa: Convert system physical address to host physical address
+ */
+struct cxl_rd_ops {
+ u64 (*hpa_to_spa)(struct cxl_root_decoder *cxlrd, u64 hpa);
+ u64 (*spa_to_hpa)(struct cxl_root_decoder *cxlrd, u64 spa);
+};
/**
* struct cxl_root_decoder - Static platform CXL address decoder
* @res: host / parent resource for region allocations
* @cache_size: extended linear cache size if exists, otherwise zero.
* @region_id: region id for next region provisioning event
- * @hpa_to_spa: translate CXL host-physical-address to Platform system-physical-address
* @platform_data: platform specific configuration data
* @range_lock: sync region autodiscovery by address range
* @qos_class: QoS performance class cookie
+ * @ops: CXL root decoder operations
* @cxlsd: base cxl switch decoder
*/
struct cxl_root_decoder {
struct resource *res;
resource_size_t cache_size;
atomic_t region_id;
- cxl_hpa_to_spa_fn hpa_to_spa;
void *platform_data;
struct mutex range_lock;
int qos_class;
+ struct cxl_rd_ops ops;
struct cxl_switch_decoder cxlsd;
};
@@ -505,6 +517,14 @@ enum cxl_partition_mode {
*/
#define CXL_REGION_F_NEEDS_RESET 1
+/*
+ * Indicate whether this region is locked due to 1 or more decoders that have
+ * been locked. The approach of all or nothing is taken with regard to the
+ * locked attribute. CXL_REGION_F_NEEDS_RESET should not be set if this flag is
+ * set.
+ */
+#define CXL_REGION_F_LOCK 2
+
/**
* struct cxl_region - CXL region
* @dev: This region's device
@@ -595,6 +615,7 @@ struct cxl_dax_region {
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
+ * @component_reg_phys: Physical address of component register
*/
struct cxl_port {
struct device dev;
@@ -618,6 +639,7 @@ struct cxl_port {
} cdat;
bool cdat_available;
long pci_latency;
+ resource_size_t component_reg_phys;
};
/**
@@ -724,6 +746,25 @@ static inline bool is_cxl_root(struct cxl_port *port)
return port->uport_dev == port->dev.parent;
}
+/* Address translation functions exported to cxl_translate test module only */
+int cxl_validate_translation_params(u8 eiw, u16 eig, int pos);
+u64 cxl_calculate_hpa_offset(u64 dpa_offset, int pos, u8 eiw, u16 eig);
+u64 cxl_calculate_dpa_offset(u64 hpa_offset, u8 eiw, u16 eig);
+int cxl_calculate_position(u64 hpa_offset, u8 eiw, u16 eig);
+struct cxl_cxims_data {
+ int nr_maps;
+ u64 xormaps[] __counted_by(nr_maps);
+};
+
+#if IS_ENABLED(CONFIG_CXL_ACPI)
+u64 cxl_do_xormap_calc(struct cxl_cxims_data *cximsd, u64 addr, int hbiw);
+#else
+static inline u64 cxl_do_xormap_calc(struct cxl_cxims_data *cximsd, u64 addr, int hbiw)
+{
+ return ULLONG_MAX;
+}
+#endif
+
int cxl_num_decoders_committed(struct cxl_port *port);
bool is_cxl_port(const struct device *dev);
struct cxl_port *to_cxl_port(const struct device *dev);
@@ -781,9 +822,9 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
unsigned int nr_targets);
-int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add(struct cxl_decoder *cxld);
struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
-int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
+int cxl_decoder_add_locked(struct cxl_decoder *cxld);
int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
static inline int cxl_root_decoder_autoremove(struct device *host,
struct cxl_root_decoder *cxlrd)
@@ -806,12 +847,10 @@ struct cxl_endpoint_dvsec_info {
struct range dvsec_range[2];
};
-struct cxl_hdm;
-struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
-int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
+int devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int __devm_cxl_switch_port_decoders_setup(struct cxl_port *port);
+int devm_cxl_endpoint_decoders_setup(struct cxl_port *port);
+
struct cxl_dev_state;
int cxl_dvsec_rr_decode(struct cxl_dev_state *cxlds,
struct cxl_endpoint_dvsec_info *info);
@@ -890,7 +929,7 @@ static inline u64 cxl_port_get_spa_cache_alias(struct cxl_port *endpoint,
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
-void cxl_switch_parse_cdat(struct cxl_port *port);
+void cxl_switch_parse_cdat(struct cxl_dport *dport);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
@@ -905,6 +944,10 @@ void cxl_coordinates_combine(struct access_coordinate *out,
struct access_coordinate *c2);
bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
+struct cxl_dport *devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
+struct cxl_dport *__devm_cxl_add_dport_by_dev(struct cxl_port *port,
+ struct device *dport_dev);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
@@ -915,4 +958,21 @@ bool cxl_endpoint_decoder_reset_detected(struct cxl_port *port);
#endif
u16 cxl_gpf_get_dvsec(struct device *dev);
+
+/*
+ * Declaration for functions that are mocked by cxl_test that are called by
+ * cxl_core. The respective functions are defined as __foo() and called by
+ * cxl_core as foo(). The macros below ensures that those functions would
+ * exist as foo(). See tools/testing/cxl/cxl_core_exports.c and
+ * tools/testing/cxl/exports.h for setting up the mock functions. The dance
+ * is done to avoid a circular dependency where cxl_core calls a function that
+ * ends up being a mock function and goes to * cxl_test where it calls a
+ * cxl_core function.
+ */
+#ifndef CXL_TEST_ENABLE
+#define DECLARE_TESTABLE(x) __##x
+#define devm_cxl_add_dport_by_dev DECLARE_TESTABLE(devm_cxl_add_dport_by_dev)
+#define devm_cxl_switch_port_decoders_setup DECLARE_TESTABLE(devm_cxl_switch_port_decoders_setup)
+#endif
+
#endif /* __CXL_H__ */
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index 751478dfc410..434031a0c1f7 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -869,6 +869,8 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
int cxl_trigger_poison_list(struct cxl_memdev *cxlmd);
int cxl_inject_poison(struct cxl_memdev *cxlmd, u64 dpa);
int cxl_clear_poison(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_inject_poison_locked(struct cxl_memdev *cxlmd, u64 dpa);
+int cxl_clear_poison_locked(struct cxl_memdev *cxlmd, u64 dpa);
#ifdef CONFIG_CXL_EDAC_MEM_FEATURES
int devm_cxl_memdev_edac_register(struct cxl_memdev *cxlmd);
diff --git a/drivers/cxl/cxlpci.h b/drivers/cxl/cxlpci.h
index 54e219b0049e..1d526bea8431 100644
--- a/drivers/cxl/cxlpci.h
+++ b/drivers/cxl/cxlpci.h
@@ -127,10 +127,7 @@ static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
}
-int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
-int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
- struct cxl_endpoint_dvsec_info *info);
void read_cdat_data(struct cxl_port *port);
void cxl_cor_error_detected(struct pci_dev *pdev);
pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index bd100ac31672..0be4e508affe 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -136,7 +136,7 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
if (opcode == CXL_MBOX_OP_SANITIZE) {
mutex_lock(&cxl_mbox->mbox_mutex);
if (mds->security.sanitize_node)
- mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
+ mod_delayed_work(system_percpu_wq, &mds->security.poll_dwork, 0);
mutex_unlock(&cxl_mbox->mbox_mutex);
} else {
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
diff --git a/drivers/cxl/port.c b/drivers/cxl/port.c
index cf32dc50b7a6..51c8f2f84717 100644
--- a/drivers/cxl/port.c
+++ b/drivers/cxl/port.c
@@ -59,55 +59,20 @@ static int discover_region(struct device *dev, void *unused)
static int cxl_switch_port_probe(struct cxl_port *port)
{
- struct cxl_hdm *cxlhdm;
- int rc;
+ /* Reset nr_dports for rebind of driver */
+ port->nr_dports = 0;
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
- rc = devm_cxl_port_enumerate_dports(port);
- if (rc < 0)
- return rc;
-
- cxl_switch_parse_cdat(port);
-
- cxlhdm = devm_cxl_setup_hdm(port, NULL);
- if (!IS_ERR(cxlhdm))
- return devm_cxl_enumerate_decoders(cxlhdm, NULL);
-
- if (PTR_ERR(cxlhdm) != -ENODEV) {
- dev_err(&port->dev, "Failed to map HDM decoder capability\n");
- return PTR_ERR(cxlhdm);
- }
-
- if (rc == 1) {
- dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
- return devm_cxl_add_passthrough_decoder(port);
- }
-
- dev_err(&port->dev, "HDM decoder capability not found\n");
- return -ENXIO;
+ return 0;
}
static int cxl_endpoint_port_probe(struct cxl_port *port)
{
- struct cxl_endpoint_dvsec_info info = { .port = port };
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
- struct cxl_dev_state *cxlds = cxlmd->cxlds;
- struct cxl_hdm *cxlhdm;
int rc;
- rc = cxl_dvsec_rr_decode(cxlds, &info);
- if (rc < 0)
- return rc;
-
- cxlhdm = devm_cxl_setup_hdm(port, &info);
- if (IS_ERR(cxlhdm)) {
- if (PTR_ERR(cxlhdm) == -ENODEV)
- dev_err(&port->dev, "HDM decoder registers not found\n");
- return PTR_ERR(cxlhdm);
- }
-
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
cxl_endpoint_parse_cdat(port);
@@ -117,11 +82,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
if (rc)
return rc;
- rc = cxl_hdm_decode_init(cxlds, cxlhdm, &info);
- if (rc)
- return rc;
-
- rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
+ rc = devm_cxl_endpoint_decoders_setup(port);
if (rc)
return rc;
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 2bb40a6060af..22999a402e02 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -13,8 +13,9 @@
#include "dax-private.h"
#include "bus.h"
-static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
- const char *func)
+static int __check_vma(struct dev_dax *dev_dax, vm_flags_t vm_flags,
+ unsigned long start, unsigned long end, struct file *file,
+ const char *func)
{
struct device *dev = &dev_dax->dev;
unsigned long mask;
@@ -23,7 +24,7 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return -ENXIO;
/* prevent private mappings from being established */
- if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
+ if ((vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
dev_info_ratelimited(dev,
"%s: %s: fail, attempted private mapping\n",
current->comm, func);
@@ -31,15 +32,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
}
mask = dev_dax->align - 1;
- if (vma->vm_start & mask || vma->vm_end & mask) {
+ if (start & mask || end & mask) {
dev_info_ratelimited(dev,
"%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
- current->comm, func, vma->vm_start, vma->vm_end,
+ current->comm, func, start, end,
mask);
return -EINVAL;
}
- if (!vma_is_dax(vma)) {
+ if (!file_is_dax(file)) {
dev_info_ratelimited(dev,
"%s: %s: fail, vma is not DAX capable\n",
current->comm, func);
@@ -49,6 +50,13 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
return 0;
}
+static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
+ const char *func)
+{
+ return __check_vma(dev_dax, vma->vm_flags, vma->vm_start, vma->vm_end,
+ vma->vm_file, func);
+}
+
/* see "strong" declaration in tools/testing/nvdimm/dax-dev.c */
__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
unsigned long size)
@@ -285,8 +293,9 @@ static const struct vm_operations_struct dax_vm_ops = {
.pagesize = dev_dax_pagesize,
};
-static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
+static int dax_mmap_prepare(struct vm_area_desc *desc)
{
+ struct file *filp = desc->file;
struct dev_dax *dev_dax = filp->private_data;
int rc, id;
@@ -297,13 +306,14 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
* fault time.
*/
id = dax_read_lock();
- rc = check_vma(dev_dax, vma, __func__);
+ rc = __check_vma(dev_dax, desc->vm_flags, desc->start, desc->end, filp,
+ __func__);
dax_read_unlock(id);
if (rc)
return rc;
- vma->vm_ops = &dax_vm_ops;
- vm_flags_set(vma, VM_HUGEPAGE);
+ desc->vm_ops = &dax_vm_ops;
+ desc->vm_flags |= VM_HUGEPAGE;
return 0;
}
@@ -330,14 +340,13 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
if ((off + len_align) < off)
goto out;
- addr_align = mm_get_unmapped_area(current->mm, filp, addr, len_align,
- pgoff, flags);
+ addr_align = mm_get_unmapped_area(filp, addr, len_align, pgoff, flags);
if (!IS_ERR_VALUE(addr_align)) {
addr_align += (off - addr_align) & (align - 1);
return addr_align;
}
out:
- return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags);
+ return mm_get_unmapped_area(filp, addr, len, pgoff, flags);
}
static const struct address_space_operations dev_dax_aops = {
@@ -377,7 +386,7 @@ static const struct file_operations dax_fops = {
.open = dax_open,
.release = dax_release,
.get_unmapped_area = dax_get_unmapped_area,
- .mmap = dax_mmap,
+ .mmap_prepare = dax_mmap_prepare,
.fop_flags = FOP_MMAP_SYNC,
};
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 54c480e874cb..c00b9dff4a06 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -388,7 +388,7 @@ static const struct super_operations dax_sops = {
.alloc_inode = dax_alloc_inode,
.destroy_inode = dax_destroy_inode,
.free_inode = dax_free_inode,
- .drop_inode = generic_delete_inode,
+ .drop_inode = inode_just_drop,
};
static int dax_init_fs_context(struct fs_context *fc)
@@ -433,7 +433,7 @@ static struct dax_device *dax_dev_get(dev_t devt)
return NULL;
dax_dev = to_dax_dev(inode);
- if (inode->i_state & I_NEW) {
+ if (inode_state_read_once(inode) & I_NEW) {
set_bit(DAXDEV_ALIVE, &dax_dev->flags);
inode->i_cdev = &dax_dev->cdev;
inode->i_mode = S_IFCHR;
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 2e8d01d47f69..00979f2e0e27 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -20,6 +20,7 @@
#include <linux/stat.h>
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/list.h>
@@ -28,7 +29,6 @@
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/units.h>
-#include "governor.h"
#define CREATE_TRACE_POINTS
#include <trace/events/devfreq.h>
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 0470d7c175f4..5e6e7e900bda 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/bitfield.h>
+#include <linux/hw_bitfield.h>
#include <linux/bits.h>
#include <linux/perf_event.h>
@@ -30,19 +31,16 @@
#define DMC_MAX_CHANNELS 4
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
-
/* DDRMON_CTRL */
#define DDRMON_CTRL 0x04
+#define DDRMON_CTRL_LPDDR5 BIT(6)
#define DDRMON_CTRL_DDR4 BIT(5)
#define DDRMON_CTRL_LPDDR4 BIT(4)
#define DDRMON_CTRL_HARDWARE_EN BIT(3)
#define DDRMON_CTRL_LPDDR23 BIT(2)
#define DDRMON_CTRL_SOFTWARE_EN BIT(1)
#define DDRMON_CTRL_TIMER_CNT_EN BIT(0)
-#define DDRMON_CTRL_DDR_TYPE_MASK (DDRMON_CTRL_DDR4 | \
- DDRMON_CTRL_LPDDR4 | \
- DDRMON_CTRL_LPDDR23)
+#define DDRMON_CTRL_LP5_BANK_MODE_MASK GENMASK(8, 7)
#define DDRMON_CH0_WR_NUM 0x20
#define DDRMON_CH0_RD_NUM 0x24
@@ -116,12 +114,63 @@ struct rockchip_dfi {
int buswidth[DMC_MAX_CHANNELS];
int ddrmon_stride;
bool ddrmon_ctrl_single;
+ u32 lp5_bank_mode;
+ bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
+ unsigned int count_multiplier; /* number of data clocks per count */
};
+static int rockchip_dfi_ddrtype_to_ctrl(struct rockchip_dfi *dfi, u32 *ctrl)
+{
+ u32 ddrmon_ver;
+
+ switch (dfi->ddr_type) {
+ case ROCKCHIP_DDRTYPE_LPDDR2:
+ case ROCKCHIP_DDRTYPE_LPDDR3:
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0);
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR4:
+ case ROCKCHIP_DDRTYPE_LPDDR4X:
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 0);
+ break;
+ case ROCKCHIP_DDRTYPE_LPDDR5:
+ ddrmon_ver = readl_relaxed(dfi->regs);
+ if (ddrmon_ver < 0x40) {
+ *ctrl = FIELD_PREP_WM16(DDRMON_CTRL_LPDDR23, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR4, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LPDDR5, 1) |
+ FIELD_PREP_WM16(DDRMON_CTRL_LP5_BANK_MODE_MASK,
+ dfi->lp5_bank_mode);
+ break;
+ }
+
+ /*
+ * As it is unknown whether the unpleasant special case
+ * behaviour used by the vendor kernel is needed for any
+ * shipping hardware, ask users to report if they have
+ * some of that hardware.
+ */
+ dev_err(&dfi->edev->dev,
+ "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n",
+ ddrmon_ver);
+ return -EOPNOTSUPP;
+ default:
+ dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n",
+ dfi->ddr_type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
{
void __iomem *dfi_regs = dfi->regs;
int i, ret = 0;
+ u32 ctrl;
mutex_lock(&dfi->mutex);
@@ -135,36 +184,26 @@ static int rockchip_dfi_enable(struct rockchip_dfi *dfi)
goto out;
}
+ ret = rockchip_dfi_ddrtype_to_ctrl(dfi, &ctrl);
+ if (ret)
+ goto out;
+
for (i = 0; i < dfi->max_channels; i++) {
- u32 ctrl = 0;
if (!(dfi->channel_mask & BIT(i)))
continue;
/* clear DDRMON_CTRL setting */
- writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_TIMER_CNT_EN |
- DDRMON_CTRL_SOFTWARE_EN | DDRMON_CTRL_HARDWARE_EN),
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_TIMER_CNT_EN, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0) |
+ FIELD_PREP_WM16(DDRMON_CTRL_HARDWARE_EN, 0),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
- /* set ddr type to dfi */
- switch (dfi->ddr_type) {
- case ROCKCHIP_DDRTYPE_LPDDR2:
- case ROCKCHIP_DDRTYPE_LPDDR3:
- ctrl = DDRMON_CTRL_LPDDR23;
- break;
- case ROCKCHIP_DDRTYPE_LPDDR4:
- case ROCKCHIP_DDRTYPE_LPDDR4X:
- ctrl = DDRMON_CTRL_LPDDR4;
- break;
- default:
- break;
- }
-
- writel_relaxed(HIWORD_UPDATE(ctrl, DDRMON_CTRL_DDR_TYPE_MASK),
- dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
+ writel_relaxed(ctrl, dfi_regs + i * dfi->ddrmon_stride +
+ DDRMON_CTRL);
/* enable count, use software mode */
- writel_relaxed(HIWORD_UPDATE(DDRMON_CTRL_SOFTWARE_EN, DDRMON_CTRL_SOFTWARE_EN),
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 1),
dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
if (dfi->ddrmon_ctrl_single)
@@ -194,8 +233,8 @@ static void rockchip_dfi_disable(struct rockchip_dfi *dfi)
if (!(dfi->channel_mask & BIT(i)))
continue;
- writel_relaxed(HIWORD_UPDATE(0, DDRMON_CTRL_SOFTWARE_EN),
- dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
+ writel_relaxed(FIELD_PREP_WM16(DDRMON_CTRL_SOFTWARE_EN, 0),
+ dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL);
if (dfi->ddrmon_ctrl_single)
break;
@@ -435,7 +474,7 @@ static u64 rockchip_ddr_perf_event_get_count(struct perf_event *event)
switch (event->attr.config) {
case PERF_EVENT_CYCLES:
- count = total.c[0].clock_cycles;
+ count = total.c[0].clock_cycles * dfi->count_multiplier;
break;
case PERF_EVENT_READ_BYTES:
for (i = 0; i < dfi->max_channels; i++)
@@ -651,10 +690,14 @@ static int rockchip_ddr_perf_init(struct rockchip_dfi *dfi)
break;
case ROCKCHIP_DDRTYPE_LPDDR4:
case ROCKCHIP_DDRTYPE_LPDDR4X:
+ case ROCKCHIP_DDRTYPE_LPDDR5:
dfi->burst_len = 16;
break;
}
+ if (!dfi->count_multiplier)
+ dfi->count_multiplier = 1;
+
ret = perf_pmu_register(pmu, "rockchip_ddr", -1);
if (ret)
return ret;
@@ -726,7 +769,7 @@ static int rk3568_dfi_init(struct rockchip_dfi *dfi)
static int rk3588_dfi_init(struct rockchip_dfi *dfi)
{
struct regmap *regmap_pmu = dfi->regmap_pmu;
- u32 reg2, reg3, reg4;
+ u32 reg2, reg3, reg4, reg6;
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG2, &reg2);
regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG3, &reg3);
@@ -751,6 +794,15 @@ static int rk3588_dfi_init(struct rockchip_dfi *dfi)
dfi->max_channels = 4;
dfi->ddrmon_stride = 0x4000;
+ dfi->count_multiplier = 2;
+
+ if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) {
+ regmap_read(regmap_pmu, RK3588_PMUGRF_OS_REG6, &reg6);
+ dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7;
+ dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6);
+ if (dfi->lp5_ckr)
+ dfi->count_multiplier *= 2;
+ }
return 0;
};
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
deleted file mode 100644
index 0adfebc0467a..000000000000
--- a/drivers/devfreq/governor.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * governor.h - internal header for devfreq governors.
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This header is for devfreq governors in drivers/devfreq/
- */
-
-#ifndef _GOVERNOR_H
-#define _GOVERNOR_H
-
-#include <linux/devfreq.h>
-
-#define DEVFREQ_NAME_LEN 16
-
-#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
-
-/* Devfreq events */
-#define DEVFREQ_GOV_START 0x1
-#define DEVFREQ_GOV_STOP 0x2
-#define DEVFREQ_GOV_UPDATE_INTERVAL 0x3
-#define DEVFREQ_GOV_SUSPEND 0x4
-#define DEVFREQ_GOV_RESUME 0x5
-
-#define DEVFREQ_MIN_FREQ 0
-#define DEVFREQ_MAX_FREQ ULONG_MAX
-
-/*
- * Definition of the governor feature flags
- * - DEVFREQ_GOV_FLAG_IMMUTABLE
- * : This governor is never changeable to other governors.
- * - DEVFREQ_GOV_FLAG_IRQ_DRIVEN
- * : The devfreq won't schedule the work for this governor.
- */
-#define DEVFREQ_GOV_FLAG_IMMUTABLE BIT(0)
-#define DEVFREQ_GOV_FLAG_IRQ_DRIVEN BIT(1)
-
-/*
- * Definition of governor attribute flags except for common sysfs attributes
- * - DEVFREQ_GOV_ATTR_POLLING_INTERVAL
- * : Indicate polling_interval sysfs attribute
- * - DEVFREQ_GOV_ATTR_TIMER
- * : Indicate timer sysfs attribute
- */
-#define DEVFREQ_GOV_ATTR_POLLING_INTERVAL BIT(0)
-#define DEVFREQ_GOV_ATTR_TIMER BIT(1)
-
-/**
- * struct devfreq_cpu_data - Hold the per-cpu data
- * @node: list node
- * @dev: reference to cpu device.
- * @first_cpu: the cpumask of the first cpu of a policy.
- * @opp_table: reference to cpu opp table.
- * @cur_freq: the current frequency of the cpu.
- * @min_freq: the min frequency of the cpu.
- * @max_freq: the max frequency of the cpu.
- *
- * This structure stores the required cpu_data of a cpu.
- * This is auto-populated by the governor.
- */
-struct devfreq_cpu_data {
- struct list_head node;
-
- struct device *dev;
- unsigned int first_cpu;
-
- struct opp_table *opp_table;
- unsigned int cur_freq;
- unsigned int min_freq;
- unsigned int max_freq;
-};
-
-/**
- * struct devfreq_governor - Devfreq policy governor
- * @node: list node - contains registered devfreq governors
- * @name: Governor's name
- * @attrs: Governor's sysfs attribute flags
- * @flags: Governor's feature flags
- * @get_target_freq: Returns desired operating frequency for the device.
- * Basically, get_target_freq will run
- * devfreq_dev_profile.get_dev_status() to get the
- * status of the device (load = busy_time / total_time).
- * @event_handler: Callback for devfreq core framework to notify events
- * to governors. Events include per device governor
- * init and exit, opp changes out of devfreq, suspend
- * and resume of per device devfreq during device idle.
- *
- * Note that the callbacks are called with devfreq->lock locked by devfreq.
- */
-struct devfreq_governor {
- struct list_head node;
-
- const char name[DEVFREQ_NAME_LEN];
- const u64 attrs;
- const u64 flags;
- int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
- int (*event_handler)(struct devfreq *devfreq,
- unsigned int event, void *data);
-};
-
-void devfreq_monitor_start(struct devfreq *devfreq);
-void devfreq_monitor_stop(struct devfreq *devfreq);
-void devfreq_monitor_suspend(struct devfreq *devfreq);
-void devfreq_monitor_resume(struct devfreq *devfreq);
-void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay);
-
-int devfreq_add_governor(struct devfreq_governor *governor);
-int devfreq_remove_governor(struct devfreq_governor *governor);
-
-int devm_devfreq_add_governor(struct device *dev,
- struct devfreq_governor *governor);
-
-int devfreq_update_status(struct devfreq *devfreq, unsigned long freq);
-int devfreq_update_target(struct devfreq *devfreq, unsigned long freq);
-void devfreq_get_freq_range(struct devfreq *devfreq, unsigned long *min_freq,
- unsigned long *max_freq);
-
-static inline int devfreq_update_stats(struct devfreq *df)
-{
- if (!df->profile->get_dev_status)
- return -EINVAL;
-
- return df->profile->get_dev_status(df->dev.parent, &df->last_status);
-}
-#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
index 953cf9a1e9f7..8cd6f9a59f64 100644
--- a/drivers/devfreq/governor_passive.c
+++ b/drivers/devfreq/governor_passive.c
@@ -14,8 +14,33 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/units.h>
-#include "governor.h"
+
+/**
+ * struct devfreq_cpu_data - Hold the per-cpu data
+ * @node: list node
+ * @dev: reference to cpu device.
+ * @first_cpu: the cpumask of the first cpu of a policy.
+ * @opp_table: reference to cpu opp table.
+ * @cur_freq: the current frequency of the cpu.
+ * @min_freq: the min frequency of the cpu.
+ * @max_freq: the max frequency of the cpu.
+ *
+ * This structure stores the required cpu_data of a cpu.
+ * This is auto-populated by the governor.
+ */
+struct devfreq_cpu_data {
+ struct list_head node;
+
+ struct device *dev;
+ unsigned int first_cpu;
+
+ struct opp_table *opp_table;
+ unsigned int cur_freq;
+ unsigned int min_freq;
+ unsigned int max_freq;
+};
static struct devfreq_cpu_data *
get_parent_cpu_data(struct devfreq_passive_data *p_data,
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index 2e4e981446fa..fdb22bf512cf 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -7,8 +7,8 @@
*/
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/module.h>
-#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
unsigned long *freq)
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index f059e8814804..ee2d6ec8a512 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -7,8 +7,8 @@
*/
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/module.h>
-#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
unsigned long *freq)
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index c23435736367..ac9c5e9e51a4 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -9,12 +9,12 @@
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/math64.h>
-#include "governor.h"
/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
#define DFSO_UPTHRESHOLD (90)
-#define DFSO_DOWNDIFFERENCTIAL (5)
+#define DFSO_DOWNDIFFERENTIAL (5)
static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned long *freq)
{
@@ -22,7 +22,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
struct devfreq_dev_status *stat;
unsigned long long a, b;
unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
- unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
+ unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENTIAL;
struct devfreq_simple_ondemand_data *data = df->data;
err = devfreq_update_stats(df);
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 175de0c0b50e..395174f93960 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -9,11 +9,11 @@
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/kstrtox.h>
#include <linux/pm.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include "governor.h"
struct userspace_data {
unsigned long user_frequency;
diff --git a/drivers/devfreq/hisi_uncore_freq.c b/drivers/devfreq/hisi_uncore_freq.c
index 96d1815059e3..4d00d813c8ac 100644
--- a/drivers/devfreq/hisi_uncore_freq.c
+++ b/drivers/devfreq/hisi_uncore_freq.c
@@ -9,6 +9,7 @@
#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/device.h>
#include <linux/dev_printk.h>
#include <linux/errno.h>
@@ -26,8 +27,6 @@
#include <linux/units.h>
#include <acpi/pcc.h>
-#include "governor.h"
-
struct hisi_uncore_pcc_data {
u16 status;
u16 resv;
@@ -265,10 +264,11 @@ static int hisi_uncore_target(struct device *dev, unsigned long *freq,
dev_err(dev, "Failed to get opp for freq %lu hz\n", *freq);
return PTR_ERR(opp);
}
- dev_pm_opp_put(opp);
data = (u32)(dev_pm_opp_get_freq(opp) / HZ_PER_MHZ);
+ dev_pm_opp_put(opp);
+
return hisi_uncore_cmd_send(uncore, HUCF_PCC_CMD_SET_FREQ, &data);
}
diff --git a/drivers/devfreq/mtk-cci-devfreq.c b/drivers/devfreq/mtk-cci-devfreq.c
index 22fe9e631f8a..4c22be728f6a 100644
--- a/drivers/devfreq/mtk-cci-devfreq.c
+++ b/drivers/devfreq/mtk-cci-devfreq.c
@@ -86,7 +86,7 @@ static int mtk_ccifreq_set_voltage(struct mtk_ccifreq_drv *drv, int new_voltage)
soc_data->sram_max_volt);
return ret;
}
- } else if (pre_voltage > new_voltage) {
+ } else {
voltage = max(new_voltage,
pre_vsram - soc_data->max_volt_shift);
ret = regulator_set_voltage(drv->proc_reg, voltage,
@@ -386,7 +386,8 @@ out_disable_cci_clk:
out_free_resources:
if (regulator_is_enabled(drv->proc_reg))
regulator_disable(drv->proc_reg);
- if (drv->sram_reg && regulator_is_enabled(drv->sram_reg))
+ if (!IS_ERR_OR_NULL(drv->sram_reg) &&
+ regulator_is_enabled(drv->sram_reg))
regulator_disable(drv->sram_reg);
return ret;
diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c
index 4a4f0106ab9d..8b57194ac698 100644
--- a/drivers/devfreq/tegra30-devfreq.c
+++ b/drivers/devfreq/tegra30-devfreq.c
@@ -9,9 +9,11 @@
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/devfreq.h>
+#include <linux/devfreq-governor.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -21,8 +23,6 @@
#include <soc/tegra/fuse.h>
-#include "governor.h"
-
#define ACTMON_GLB_STATUS 0x0
#define ACTMON_GLB_PERIOD_CTRL 0x4
@@ -326,14 +326,9 @@ static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
unsigned int i;
const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
- for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
- if (cpu_freq >= ratio->cpu_freq) {
- if (ratio->emc_freq >= tegra->max_freq)
- return tegra->max_freq;
- else
- return ratio->emc_freq;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++)
+ if (cpu_freq >= ratio->cpu_freq)
+ return min(ratio->emc_freq, tegra->max_freq);
return 0;
}
diff --git a/drivers/dibs/Kconfig b/drivers/dibs/Kconfig
new file mode 100644
index 000000000000..5dc347b9b235
--- /dev/null
+++ b/drivers/dibs/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0
+config DIBS
+ tristate "DIBS support"
+ default n
+ help
+ Direct Internal Buffer Sharing (DIBS)
+ A communication method that uses common physical (internal) memory
+ for synchronous direct access into a remote buffer.
+
+ Select this option to provide the abstraction layer between
+ dibs devices and dibs clients like the SMC protocol.
+ The module name is dibs.
+
+config DIBS_LO
+ bool "intra-OS shortcut with dibs loopback"
+ depends on DIBS
+ default n
+ help
+ DIBS_LO enables the creation of an software-emulated dibs device
+ named lo which can be used for transferring data when communication
+ occurs within the same OS. This helps in convenient testing of
+ dibs clients, since dibs loopback is independent of architecture or
+ hardware.
diff --git a/drivers/dibs/Makefile b/drivers/dibs/Makefile
new file mode 100644
index 000000000000..85805490c77f
--- /dev/null
+++ b/drivers/dibs/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# DIBS class module
+#
+
+dibs-y += dibs_main.o
+obj-$(CONFIG_DIBS) += dibs.o
+dibs-$(CONFIG_DIBS_LO) += dibs_loopback.o \ No newline at end of file
diff --git a/drivers/dibs/dibs_loopback.c b/drivers/dibs/dibs_loopback.c
new file mode 100644
index 000000000000..aa029e29c6b2
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Functions for dibs loopback/loopback-ism device.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/dibs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "dibs_loopback.h"
+
+#define DIBS_LO_SUPPORT_NOCOPY 0x1
+#define DIBS_DMA_ADDR_INVALID (~(dma_addr_t)0)
+
+static const char dibs_lo_dev_name[] = "lo";
+/* global loopback device */
+static struct dibs_lo_dev *lo_dev;
+
+static u16 dibs_lo_get_fabric_id(struct dibs_dev *dibs)
+{
+ return DIBS_LOOPBACK_FABRIC;
+}
+
+static int dibs_lo_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
+ u32 vid_valid, u32 vid)
+{
+ /* rgid should be the same as lgid */
+ if (!uuid_equal(rgid, &dibs->gid))
+ return -ENETUNREACH;
+ return 0;
+}
+
+static int dibs_lo_max_dmbs(void)
+{
+ return DIBS_LO_MAX_DMBS;
+}
+
+static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
+ struct dibs_client *client)
+{
+ struct dibs_lo_dmb_node *dmb_node, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ struct folio *folio;
+ unsigned long flags;
+ int sba_idx, rc;
+
+ ldev = dibs->drv_priv;
+ sba_idx = dmb->idx;
+ /* check space for new dmb */
+ for_each_clear_bit(sba_idx, ldev->sba_idx_mask, DIBS_LO_MAX_DMBS) {
+ if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
+ break;
+ }
+ if (sba_idx == DIBS_LO_MAX_DMBS)
+ return -ENOSPC;
+
+ dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
+ if (!dmb_node) {
+ rc = -ENOMEM;
+ goto err_bit;
+ }
+
+ dmb_node->sba_idx = sba_idx;
+ dmb_node->len = dmb->dmb_len;
+
+ /* not critical; fail under memory pressure and fallback to TCP */
+ folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_ZERO,
+ get_order(dmb_node->len));
+ if (!folio) {
+ rc = -ENOMEM;
+ goto err_node;
+ }
+ dmb_node->cpu_addr = folio_address(folio);
+ dmb_node->dma_addr = DIBS_DMA_ADDR_INVALID;
+ refcount_set(&dmb_node->refcnt, 1);
+
+again:
+ /* add new dmb into hash table */
+ get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
+ if (tmp_node->token == dmb_node->token) {
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ goto again;
+ }
+ }
+ hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+ atomic_inc(&ldev->dmb_cnt);
+
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[sba_idx] = client->id;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ return 0;
+
+err_node:
+ kfree(dmb_node);
+err_bit:
+ clear_bit(sba_idx, ldev->sba_idx_mask);
+ return rc;
+}
+
+static void __dibs_lo_unregister_dmb(struct dibs_lo_dev *ldev,
+ struct dibs_lo_dmb_node *dmb_node)
+{
+ /* remove dmb from hash table */
+ write_lock_bh(&ldev->dmb_ht_lock);
+ hash_del(&dmb_node->list);
+ write_unlock_bh(&ldev->dmb_ht_lock);
+
+ clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
+ folio_put(virt_to_folio(dmb_node->cpu_addr));
+ kfree(dmb_node);
+
+ if (atomic_dec_and_test(&ldev->dmb_cnt))
+ wake_up(&ldev->ldev_release);
+}
+
+static int dibs_lo_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ unsigned long flags;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb from hash table */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ if (!dmb_node)
+ return -EINVAL;
+
+ if (refcount_dec_and_test(&dmb_node->refcnt)) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->dmb_clientid_arr[dmb_node->sba_idx] = NO_DIBS_CLIENT;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ }
+ return 0;
+}
+
+static int dibs_lo_support_dmb_nocopy(struct dibs_dev *dibs)
+{
+ return DIBS_LO_SUPPORT_NOCOPY;
+}
+
+static int dibs_lo_attach_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
+ if (tmp_node->token == dmb->dmb_tok) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!refcount_inc_not_zero(&dmb_node->refcnt))
+ /* the dmb is being unregistered, but has
+ * not been removed from the hash table.
+ */
+ return -EINVAL;
+
+ /* provide dmb information */
+ dmb->idx = dmb_node->sba_idx;
+ dmb->dmb_tok = dmb_node->token;
+ dmb->cpu_addr = dmb_node->cpu_addr;
+ dmb->dma_addr = dmb_node->dma_addr;
+ dmb->dmb_len = dmb_node->len;
+ return 0;
+}
+
+static int dibs_lo_detach_dmb(struct dibs_dev *dibs, u64 token)
+{
+ struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+
+ ldev = dibs->drv_priv;
+
+ /* find dmb_node according to dmb->dmb_tok */
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
+ if (tmp_node->token == token) {
+ dmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!dmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (refcount_dec_and_test(&dmb_node->refcnt))
+ __dibs_lo_unregister_dmb(ldev, dmb_node);
+ return 0;
+}
+
+static int dibs_lo_move_data(struct dibs_dev *dibs, u64 dmb_tok,
+ unsigned int idx, bool sf, unsigned int offset,
+ void *data, unsigned int size)
+{
+ struct dibs_lo_dmb_node *rmb_node = NULL, *tmp_node;
+ struct dibs_lo_dev *ldev;
+ u16 s_mask;
+ u8 client_id;
+ u32 sba_idx;
+
+ ldev = dibs->drv_priv;
+
+ read_lock_bh(&ldev->dmb_ht_lock);
+ hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
+ if (tmp_node->token == dmb_tok) {
+ rmb_node = tmp_node;
+ break;
+ }
+ }
+ if (!rmb_node) {
+ read_unlock_bh(&ldev->dmb_ht_lock);
+ return -EINVAL;
+ }
+ memcpy((char *)rmb_node->cpu_addr + offset, data, size);
+ sba_idx = rmb_node->sba_idx;
+ read_unlock_bh(&ldev->dmb_ht_lock);
+
+ if (!sf)
+ return 0;
+
+ spin_lock(&dibs->lock);
+ client_id = dibs->dmb_clientid_arr[sba_idx];
+ s_mask = ror16(0x1000, idx);
+ if (likely(client_id != NO_DIBS_CLIENT && dibs->subs[client_id]))
+ dibs->subs[client_id]->ops->handle_irq(dibs, sba_idx, s_mask);
+ spin_unlock(&dibs->lock);
+
+ return 0;
+}
+
+static const struct dibs_dev_ops dibs_lo_ops = {
+ .get_fabric_id = dibs_lo_get_fabric_id,
+ .query_remote_gid = dibs_lo_query_rgid,
+ .max_dmbs = dibs_lo_max_dmbs,
+ .register_dmb = dibs_lo_register_dmb,
+ .unregister_dmb = dibs_lo_unregister_dmb,
+ .move_data = dibs_lo_move_data,
+ .support_mmapped_rdmb = dibs_lo_support_dmb_nocopy,
+ .attach_dmb = dibs_lo_attach_dmb,
+ .detach_dmb = dibs_lo_detach_dmb,
+};
+
+static void dibs_lo_dev_init(struct dibs_lo_dev *ldev)
+{
+ rwlock_init(&ldev->dmb_ht_lock);
+ hash_init(ldev->dmb_ht);
+ atomic_set(&ldev->dmb_cnt, 0);
+ init_waitqueue_head(&ldev->ldev_release);
+}
+
+static void dibs_lo_dev_exit(struct dibs_lo_dev *ldev)
+{
+ if (atomic_read(&ldev->dmb_cnt))
+ wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
+}
+
+static int dibs_lo_dev_probe(void)
+{
+ struct dibs_lo_dev *ldev;
+ struct dibs_dev *dibs;
+ int ret;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return -ENOMEM;
+
+ dibs = dibs_dev_alloc();
+ if (!dibs) {
+ kfree(ldev);
+ return -ENOMEM;
+ }
+
+ ldev->dibs = dibs;
+ dibs->drv_priv = ldev;
+ dibs_lo_dev_init(ldev);
+ uuid_gen(&dibs->gid);
+ dibs->ops = &dibs_lo_ops;
+
+ dibs->dev.parent = NULL;
+ dev_set_name(&dibs->dev, "%s", dibs_lo_dev_name);
+
+ ret = dibs_dev_add(dibs);
+ if (ret)
+ goto err_reg;
+ lo_dev = ldev;
+ return 0;
+
+err_reg:
+ kfree(dibs->dmb_clientid_arr);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&dibs->dev);
+ kfree(ldev);
+
+ return ret;
+}
+
+static void dibs_lo_dev_remove(void)
+{
+ if (!lo_dev)
+ return;
+
+ dibs_dev_del(lo_dev->dibs);
+ dibs_lo_dev_exit(lo_dev);
+ /* pairs with dibs_dev_alloc() */
+ put_device(&lo_dev->dibs->dev);
+ kfree(lo_dev);
+ lo_dev = NULL;
+}
+
+int dibs_loopback_init(void)
+{
+ return dibs_lo_dev_probe();
+}
+
+void dibs_loopback_exit(void)
+{
+ dibs_lo_dev_remove();
+}
diff --git a/drivers/dibs/dibs_loopback.h b/drivers/dibs/dibs_loopback.h
new file mode 100644
index 000000000000..0664f6a8e662
--- /dev/null
+++ b/drivers/dibs/dibs_loopback.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dibs loopback (aka loopback-ism) device structure definitions.
+ *
+ * Copyright (c) 2024, Alibaba Inc.
+ *
+ * Author: Wen Gu <guwen@linux.alibaba.com>
+ * Tony Lu <tonylu@linux.alibaba.com>
+ *
+ */
+
+#ifndef _DIBS_LOOPBACK_H
+#define _DIBS_LOOPBACK_H
+
+#include <linux/dibs.h>
+#include <linux/hashtable.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#if IS_ENABLED(CONFIG_DIBS_LO)
+#define DIBS_LO_DMBS_HASH_BITS 12
+#define DIBS_LO_MAX_DMBS 5000
+
+struct dibs_lo_dmb_node {
+ struct hlist_node list;
+ u64 token;
+ u32 len;
+ u32 sba_idx;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ refcount_t refcnt;
+};
+
+struct dibs_lo_dev {
+ struct dibs_dev *dibs;
+ atomic_t dmb_cnt;
+ rwlock_t dmb_ht_lock;
+ DECLARE_BITMAP(sba_idx_mask, DIBS_LO_MAX_DMBS);
+ DECLARE_HASHTABLE(dmb_ht, DIBS_LO_DMBS_HASH_BITS);
+ wait_queue_head_t ldev_release;
+};
+
+int dibs_loopback_init(void);
+void dibs_loopback_exit(void);
+#else
+static inline int dibs_loopback_init(void)
+{
+ return 0;
+}
+
+static inline void dibs_loopback_exit(void)
+{
+}
+#endif
+
+#endif /* _DIBS_LOOPBACK_H */
diff --git a/drivers/dibs/dibs_main.c b/drivers/dibs/dibs_main.c
new file mode 100644
index 000000000000..b8c16586706c
--- /dev/null
+++ b/drivers/dibs/dibs_main.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DIBS - Direct Internal Buffer Sharing
+ *
+ * Implementation of the DIBS class module
+ *
+ * Copyright IBM Corp. 2025
+ */
+#define pr_fmt(fmt) "dibs: " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dibs.h>
+
+#include "dibs_loopback.h"
+
+MODULE_DESCRIPTION("Direct Internal Buffer Sharing class");
+MODULE_LICENSE("GPL");
+
+static struct class *dibs_class;
+
+/* use an array rather a list for fast mapping: */
+static struct dibs_client *clients[MAX_DIBS_CLIENTS];
+static u8 max_client;
+static DEFINE_MUTEX(clients_lock);
+struct dibs_dev_list {
+ struct list_head list;
+ struct mutex mutex; /* protects dibs device list */
+};
+
+static struct dibs_dev_list dibs_dev_list = {
+ .list = LIST_HEAD_INIT(dibs_dev_list.list),
+ .mutex = __MUTEX_INITIALIZER(dibs_dev_list.mutex),
+};
+
+static void dibs_setup_forwarding(struct dibs_client *client,
+ struct dibs_dev *dibs)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ dibs->subs[client->id] = client;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+}
+
+int dibs_register_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ int i, rc = -ENOSPC;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i) {
+ if (!clients[i]) {
+ clients[i] = client;
+ client->id = i;
+ if (i == max_client)
+ max_client++;
+ rc = 0;
+ break;
+ }
+ }
+ mutex_unlock(&clients_lock);
+
+ if (i < MAX_DIBS_CLIENTS) {
+ /* initialize with all devices that we got so far */
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ dibs->priv[i] = NULL;
+ client->ops->add_dev(dibs);
+ dibs_setup_forwarding(client, dibs);
+ }
+ }
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_register_client);
+
+int dibs_unregister_client(struct dibs_client *client)
+{
+ struct dibs_dev *dibs;
+ unsigned long flags;
+ int max_dmbs;
+ int rc = 0;
+
+ mutex_lock(&dibs_dev_list.mutex);
+ list_for_each_entry(dibs, &dibs_dev_list.list, list) {
+ spin_lock_irqsave(&dibs->lock, flags);
+ max_dmbs = dibs->ops->max_dmbs();
+ for (int i = 0; i < max_dmbs; ++i) {
+ if (dibs->dmb_clientid_arr[i] == client->id) {
+ WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n",
+ __func__, client->name);
+ rc = -EBUSY;
+ goto err_reg_dmb;
+ }
+ }
+ /* Stop forwarding IRQs and events */
+ dibs->subs[client->id] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ clients[client->id]->ops->del_dev(dibs);
+ dibs->priv[client->id] = NULL;
+ }
+
+ mutex_lock(&clients_lock);
+ clients[client->id] = NULL;
+ if (client->id + 1 == max_client)
+ max_client--;
+ mutex_unlock(&clients_lock);
+
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+
+err_reg_dmb:
+ spin_unlock_irqrestore(&dibs->lock, flags);
+ mutex_unlock(&dibs_dev_list.mutex);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(dibs_unregister_client);
+
+static void dibs_dev_release(struct device *dev)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ kfree(dibs);
+}
+
+struct dibs_dev *dibs_dev_alloc(void)
+{
+ struct dibs_dev *dibs;
+
+ dibs = kzalloc(sizeof(*dibs), GFP_KERNEL);
+ if (!dibs)
+ return dibs;
+ dibs->dev.release = dibs_dev_release;
+ dibs->dev.class = dibs_class;
+ device_initialize(&dibs->dev);
+
+ return dibs;
+}
+EXPORT_SYMBOL_GPL(dibs_dev_alloc);
+
+static ssize_t gid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+
+ return sysfs_emit(buf, "%pUb\n", &dibs->gid);
+}
+static DEVICE_ATTR_RO(gid);
+
+static ssize_t fabric_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct dibs_dev *dibs;
+ u16 fabric_id;
+
+ dibs = container_of(dev, struct dibs_dev, dev);
+ fabric_id = dibs->ops->get_fabric_id(dibs);
+
+ return sysfs_emit(buf, "0x%04x\n", fabric_id);
+}
+static DEVICE_ATTR_RO(fabric_id);
+
+static struct attribute *dibs_dev_attrs[] = {
+ &dev_attr_gid.attr,
+ &dev_attr_fabric_id.attr,
+ NULL,
+};
+
+static const struct attribute_group dibs_dev_attr_group = {
+ .attrs = dibs_dev_attrs,
+};
+
+int dibs_dev_add(struct dibs_dev *dibs)
+{
+ int max_dmbs;
+ int i, ret;
+
+ max_dmbs = dibs->ops->max_dmbs();
+ spin_lock_init(&dibs->lock);
+ dibs->dmb_clientid_arr = kzalloc(max_dmbs, GFP_KERNEL);
+ if (!dibs->dmb_clientid_arr)
+ return -ENOMEM;
+ memset(dibs->dmb_clientid_arr, NO_DIBS_CLIENT, max_dmbs);
+
+ ret = device_add(&dibs->dev);
+ if (ret)
+ goto free_client_arr;
+
+ ret = sysfs_create_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+ if (ret) {
+ dev_err(&dibs->dev, "sysfs_create_group failed for dibs_dev\n");
+ goto err_device_del;
+ }
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i]) {
+ clients[i]->ops->add_dev(dibs);
+ dibs_setup_forwarding(clients[i], dibs);
+ }
+ }
+ mutex_unlock(&clients_lock);
+ list_add(&dibs->list, &dibs_dev_list.list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ return 0;
+
+err_device_del:
+ device_del(&dibs->dev);
+free_client_arr:
+ kfree(dibs->dmb_clientid_arr);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(dibs_dev_add);
+
+void dibs_dev_del(struct dibs_dev *dibs)
+{
+ unsigned long flags;
+ int i;
+
+ sysfs_remove_group(&dibs->dev.kobj, &dibs_dev_attr_group);
+
+ spin_lock_irqsave(&dibs->lock, flags);
+ for (i = 0; i < MAX_DIBS_CLIENTS; ++i)
+ dibs->subs[i] = NULL;
+ spin_unlock_irqrestore(&dibs->lock, flags);
+
+ mutex_lock(&dibs_dev_list.mutex);
+ mutex_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i) {
+ if (clients[i])
+ clients[i]->ops->del_dev(dibs);
+ }
+ mutex_unlock(&clients_lock);
+ list_del_init(&dibs->list);
+ mutex_unlock(&dibs_dev_list.mutex);
+
+ device_del(&dibs->dev);
+ kfree(dibs->dmb_clientid_arr);
+}
+EXPORT_SYMBOL_GPL(dibs_dev_del);
+
+static int __init dibs_init(void)
+{
+ int rc;
+
+ dibs_class = class_create("dibs");
+ if (IS_ERR(dibs_class))
+ return PTR_ERR(dibs_class);
+
+ rc = dibs_loopback_init();
+ if (rc)
+ pr_err("%s fails with %d\n", __func__, rc);
+
+ return rc;
+}
+
+static void __exit dibs_exit(void)
+{
+ dibs_loopback_exit();
+ class_destroy(dibs_class);
+}
+
+subsys_initcall(dibs_init);
+module_exit(dibs_exit);
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 70ec901edf2c..2008fb7481b3 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
- dma-fence-unwrap.o dma-resv.o
+ dma-fence-unwrap.o dma-resv.o dma-buf-mapping.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf-mapping.c b/drivers/dma-buf/dma-buf-mapping.c
new file mode 100644
index 000000000000..b7352e609fbd
--- /dev/null
+++ b/drivers/dma-buf/dma-buf-mapping.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DMA BUF Mapping Helpers
+ *
+ */
+#include <linux/dma-buf-mapping.h>
+#include <linux/dma-resv.h>
+
+static struct scatterlist *fill_sg_entry(struct scatterlist *sgl, size_t length,
+ dma_addr_t addr)
+{
+ unsigned int len, nents;
+ int i;
+
+ nents = DIV_ROUND_UP(length, UINT_MAX);
+ for (i = 0; i < nents; i++) {
+ len = min_t(size_t, length, UINT_MAX);
+ length -= len;
+ /*
+ * DMABUF abuses scatterlist to create a scatterlist
+ * that does not have any CPU list, only the DMA list.
+ * Always set the page related values to NULL to ensure
+ * importers can't use it. The phys_addr based DMA API
+ * does not require the CPU list for mapping or unmapping.
+ */
+ sg_set_page(sgl, NULL, 0, 0);
+ sg_dma_address(sgl) = addr + (dma_addr_t)i * UINT_MAX;
+ sg_dma_len(sgl) = len;
+ sgl = sg_next(sgl);
+ }
+
+ return sgl;
+}
+
+static unsigned int calc_sg_nents(struct dma_iova_state *state,
+ struct dma_buf_phys_vec *phys_vec,
+ size_t nr_ranges, size_t size)
+{
+ unsigned int nents = 0;
+ size_t i;
+
+ if (!state || !dma_use_iova(state)) {
+ for (i = 0; i < nr_ranges; i++)
+ nents += DIV_ROUND_UP(phys_vec[i].len, UINT_MAX);
+ } else {
+ /*
+ * In IOVA case, there is only one SG entry which spans
+ * for whole IOVA address space, but we need to make sure
+ * that it fits sg->length, maybe we need more.
+ */
+ nents = DIV_ROUND_UP(size, UINT_MAX);
+ }
+
+ return nents;
+}
+
+/**
+ * struct dma_buf_dma - holds DMA mapping information
+ * @sgt: Scatter-gather table
+ * @state: DMA IOVA state relevant in IOMMU-based DMA
+ * @size: Total size of DMA transfer
+ */
+struct dma_buf_dma {
+ struct sg_table sgt;
+ struct dma_iova_state *state;
+ size_t size;
+};
+
+/**
+ * dma_buf_phys_vec_to_sgt - Returns the scatterlist table of the attachment
+ * from arrays of physical vectors. This funciton is intended for MMIO memory
+ * only.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @provider: [in] p2pdma provider
+ * @phys_vec: [in] array of physical vectors
+ * @nr_ranges: [in] number of entries in phys_vec array
+ * @size: [in] total size of phys_vec
+ * @dir: [in] direction of DMA transfer
+ *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ *
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
+ * A mapping must be unmapped by using dma_buf_free_sgt().
+ *
+ * NOTE: This function is intended for exporters. If direct traffic routing is
+ * mandatory exporter should call routing pci_p2pdma_map_type() before calling
+ * this function.
+ */
+struct sg_table *dma_buf_phys_vec_to_sgt(struct dma_buf_attachment *attach,
+ struct p2pdma_provider *provider,
+ struct dma_buf_phys_vec *phys_vec,
+ size_t nr_ranges, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned int nents, mapped_len = 0;
+ struct dma_buf_dma *dma;
+ struct scatterlist *sgl;
+ dma_addr_t addr;
+ size_t i;
+ int ret;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (WARN_ON(!attach || !attach->dmabuf || !provider))
+ /* This function is supposed to work on MMIO memory only */
+ return ERR_PTR(-EINVAL);
+
+ dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+ return ERR_PTR(-ENOMEM);
+
+ switch (pci_p2pdma_map_type(provider, attach->dev)) {
+ case PCI_P2PDMA_MAP_BUS_ADDR:
+ /*
+ * There is no need in IOVA at all for this flow.
+ */
+ break;
+ case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
+ dma->state = kzalloc(sizeof(*dma->state), GFP_KERNEL);
+ if (!dma->state) {
+ ret = -ENOMEM;
+ goto err_free_dma;
+ }
+
+ dma_iova_try_alloc(attach->dev, dma->state, 0, size);
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_free_dma;
+ }
+
+ nents = calc_sg_nents(dma->state, phys_vec, nr_ranges, size);
+ ret = sg_alloc_table(&dma->sgt, nents, GFP_KERNEL | __GFP_ZERO);
+ if (ret)
+ goto err_free_state;
+
+ sgl = dma->sgt.sgl;
+
+ for (i = 0; i < nr_ranges; i++) {
+ if (!dma->state) {
+ addr = pci_p2pdma_bus_addr_map(provider,
+ phys_vec[i].paddr);
+ } else if (dma_use_iova(dma->state)) {
+ ret = dma_iova_link(attach->dev, dma->state,
+ phys_vec[i].paddr, 0,
+ phys_vec[i].len, dir,
+ DMA_ATTR_MMIO);
+ if (ret)
+ goto err_unmap_dma;
+
+ mapped_len += phys_vec[i].len;
+ } else {
+ addr = dma_map_phys(attach->dev, phys_vec[i].paddr,
+ phys_vec[i].len, dir,
+ DMA_ATTR_MMIO);
+ ret = dma_mapping_error(attach->dev, addr);
+ if (ret)
+ goto err_unmap_dma;
+ }
+
+ if (!dma->state || !dma_use_iova(dma->state))
+ sgl = fill_sg_entry(sgl, phys_vec[i].len, addr);
+ }
+
+ if (dma->state && dma_use_iova(dma->state)) {
+ WARN_ON_ONCE(mapped_len != size);
+ ret = dma_iova_sync(attach->dev, dma->state, 0, mapped_len);
+ if (ret)
+ goto err_unmap_dma;
+
+ sgl = fill_sg_entry(sgl, mapped_len, dma->state->addr);
+ }
+
+ dma->size = size;
+
+ /*
+ * No CPU list included — set orig_nents = 0 so others can detect
+ * this via SG table (use nents only).
+ */
+ dma->sgt.orig_nents = 0;
+
+
+ /*
+ * SGL must be NULL to indicate that SGL is the last one
+ * and we allocated correct number of entries in sg_alloc_table()
+ */
+ WARN_ON_ONCE(sgl);
+ return &dma->sgt;
+
+err_unmap_dma:
+ if (!i || !dma->state) {
+ ; /* Do nothing */
+ } else if (dma_use_iova(dma->state)) {
+ dma_iova_destroy(attach->dev, dma->state, mapped_len, dir,
+ DMA_ATTR_MMIO);
+ } else {
+ for_each_sgtable_dma_sg(&dma->sgt, sgl, i)
+ dma_unmap_phys(attach->dev, sg_dma_address(sgl),
+ sg_dma_len(sgl), dir, DMA_ATTR_MMIO);
+ }
+ sg_free_table(&dma->sgt);
+err_free_state:
+ kfree(dma->state);
+err_free_dma:
+ kfree(dma);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_phys_vec_to_sgt, "DMA_BUF");
+
+/**
+ * dma_buf_free_sgt- unmaps the buffer
+ * @attach: [in] attachment to unmap buffer from
+ * @sgt: [in] scatterlist info of the buffer to unmap
+ * @dir: [in] direction of DMA transfer
+ *
+ * This unmaps a DMA mapping for @attached obtained
+ * by dma_buf_phys_vec_to_sgt().
+ */
+void dma_buf_free_sgt(struct dma_buf_attachment *attach, struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct dma_buf_dma *dma = container_of(sgt, struct dma_buf_dma, sgt);
+ int i;
+
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (!dma->state) {
+ ; /* Do nothing */
+ } else if (dma_use_iova(dma->state)) {
+ dma_iova_destroy(attach->dev, dma->state, dma->size, dir,
+ DMA_ATTR_MMIO);
+ } else {
+ struct scatterlist *sgl;
+
+ for_each_sgtable_dma_sg(sgt, sgl, i)
+ dma_unmap_phys(attach->dev, sg_dma_address(sgl),
+ sg_dma_len(sgl), dir, DMA_ATTR_MMIO);
+ }
+
+ sg_free_table(sgt);
+ kfree(dma->state);
+ kfree(dma);
+
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_free_sgt, "DMA_BUF");
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 2bcf9ceca997..edaa9e4ee4ae 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -768,18 +768,10 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
*/
int dma_buf_fd(struct dma_buf *dmabuf, int flags)
{
- int fd;
-
if (!dmabuf || !dmabuf->file)
return -EINVAL;
- fd = get_unused_fd_flags(flags);
- if (fd < 0)
- return fd;
-
- fd_install(fd, dmabuf->file);
-
- return fd;
+ return FD_ADD(flags, dmabuf->file);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 3f78c56b58dc..b4f5c8635276 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -121,29 +121,27 @@ static const struct dma_fence_ops dma_fence_stub_ops = {
.get_timeline_name = dma_fence_stub_get_name,
};
+static int __init dma_fence_init_stub(void)
+{
+ dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
+ &dma_fence_stub_lock, 0, 0);
+
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &dma_fence_stub.flags);
+
+ dma_fence_signal(&dma_fence_stub);
+ return 0;
+}
+subsys_initcall(dma_fence_init_stub);
+
/**
* dma_fence_get_stub - return a signaled fence
*
- * Return a stub fence which is already signaled. The fence's
- * timestamp corresponds to the first time after boot this
- * function is called.
+ * Return a stub fence which is already signaled. The fence's timestamp
+ * corresponds to the initialisation time of the linux kernel.
*/
struct dma_fence *dma_fence_get_stub(void)
{
- spin_lock(&dma_fence_stub_lock);
- if (!dma_fence_stub.ops) {
- dma_fence_init(&dma_fence_stub,
- &dma_fence_stub_ops,
- &dma_fence_stub_lock,
- 0, 0);
-
- set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &dma_fence_stub.flags);
-
- dma_fence_signal_locked(&dma_fence_stub);
- }
- spin_unlock(&dma_fence_stub_lock);
-
return dma_fence_get(&dma_fence_stub);
}
EXPORT_SYMBOL(dma_fence_get_stub);
@@ -999,19 +997,21 @@ EXPORT_SYMBOL(dma_fence_set_deadline);
*/
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
{
- const char __rcu *timeline;
- const char __rcu *driver;
+ const char __rcu *timeline = "";
+ const char __rcu *driver = "";
+ const char *signaled = "";
rcu_read_lock();
- timeline = dma_fence_timeline_name(fence);
- driver = dma_fence_driver_name(fence);
+ if (!dma_fence_is_signaled(fence)) {
+ timeline = dma_fence_timeline_name(fence);
+ driver = dma_fence_driver_name(fence);
+ signaled = "un";
+ }
- seq_printf(seq, "%s %s seq %llu %ssignalled\n",
- rcu_dereference(driver),
- rcu_dereference(timeline),
- fence->seqno,
- dma_fence_is_signaled(fence) ? "" : "un");
+ seq_printf(seq, "%llu:%llu %s %s %ssignalled\n",
+ fence->context, fence->seqno, timeline, driver,
+ signaled);
rcu_read_unlock();
}
@@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
"RCU protection is required for safe access to returned string");
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
- return fence->ops->get_driver_name(fence);
+ return fence->ops->get_timeline_name(fence);
else
return "signaled-timeline";
}
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 3cbe87d4a464..8ab49924f8b7 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -11,6 +11,7 @@
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/nospec.h>
#include <linux/syscalls.h>
@@ -202,6 +203,7 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
{
return heap->priv;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_drvdata, "DMA_BUF_HEAP");
/**
* dma_heap_get_name - get heap name
@@ -214,6 +216,7 @@ const char *dma_heap_get_name(struct dma_heap *heap)
{
return heap->name;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_get_name, "DMA_BUF_HEAP");
/**
* dma_heap_add - adds a heap to dmabuf heaps
@@ -303,6 +306,7 @@ err0:
kfree(heap);
return err_ret;
}
+EXPORT_SYMBOL_NS_GPL(dma_heap_add, "DMA_BUF_HEAP");
static char *dma_heap_devnode(const struct device *dev, umode_t *mode)
{
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index bb369b38b001..a5eef06c4226 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -12,13 +12,3 @@ config DMABUF_HEAPS_CMA
Choose this option to enable dma-buf CMA heap. This heap is backed
by the Contiguous Memory Allocator (CMA). If your system has these
regions, you should say Y here.
-
-config DMABUF_HEAPS_CMA_LEGACY
- bool "Legacy DMA-BUF CMA Heap"
- default y
- depends on DMABUF_HEAPS_CMA
- help
- Add a duplicate CMA-backed dma-buf heap with legacy naming derived
- from the CMA area's devicetree node, or "reserved" if the area is not
- defined in the devicetree. This uses the same underlying allocator as
- CONFIG_DMABUF_HEAPS_CMA.
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 0df007111975..42f88193eab9 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -14,6 +14,7 @@
#include <linux/cma.h>
#include <linux/dma-buf.h>
+#include <linux/dma-buf/heaps/cma.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
@@ -21,12 +22,27 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#define DEFAULT_CMA_NAME "default_cma_region"
+static struct cma *dma_areas[MAX_CMA_AREAS] __initdata;
+static unsigned int dma_areas_num __initdata;
+
+int __init dma_heap_cma_register_heap(struct cma *cma)
+{
+ if (dma_areas_num >= ARRAY_SIZE(dma_areas))
+ return -EINVAL;
+
+ dma_areas[dma_areas_num++] = cma;
+
+ return 0;
+}
+
struct cma_heap {
struct dma_heap *heap;
struct cma *cma;
@@ -395,33 +411,30 @@ static int __init __add_cma_heap(struct cma *cma, const char *name)
return 0;
}
-static int __init add_default_cma_heap(void)
+static int __init add_cma_heaps(void)
{
struct cma *default_cma = dev_get_cma_area(NULL);
- const char *legacy_cma_name;
+ unsigned int i;
int ret;
- if (!default_cma)
- return 0;
+ if (default_cma) {
+ ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
+ if (ret)
+ return ret;
+ }
- ret = __add_cma_heap(default_cma, DEFAULT_CMA_NAME);
- if (ret)
- return ret;
+ for (i = 0; i < dma_areas_num; i++) {
+ struct cma *cma = dma_areas[i];
- if (IS_ENABLED(CONFIG_DMABUF_HEAPS_CMA_LEGACY)) {
- legacy_cma_name = cma_get_name(default_cma);
- if (!strcmp(legacy_cma_name, DEFAULT_CMA_NAME)) {
- pr_warn("legacy name and default name are the same, skipping legacy heap\n");
- return 0;
+ ret = __add_cma_heap(cma, cma_get_name(cma));
+ if (ret) {
+ pr_warn("Failed to add CMA heap %s", cma_get_name(cma));
+ continue;
}
- ret = __add_cma_heap(default_cma, legacy_cma_name);
- if (ret)
- pr_warn("failed to add legacy heap: %pe\n",
- ERR_PTR(ret));
}
return 0;
}
-module_init(add_default_cma_heap);
+module_init(add_cma_heaps);
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index bbe7881f1360..4c782fe33fd4 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -186,20 +186,35 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct system_heap_buffer *buffer = dmabuf->priv;
struct sg_table *table = &buffer->sg_table;
unsigned long addr = vma->vm_start;
- struct sg_page_iter piter;
- int ret;
+ unsigned long pgoff = vma->vm_pgoff;
+ struct scatterlist *sg;
+ int i, ret;
+
+ for_each_sgtable_sg(table, sg, i) {
+ unsigned long n = sg->length >> PAGE_SHIFT;
- for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
- struct page *page = sg_page_iter_page(&piter);
+ if (pgoff < n)
+ break;
+ pgoff -= n;
+ }
+
+ for (; sg && addr < vma->vm_end; sg = sg_next(sg)) {
+ unsigned long n = (sg->length >> PAGE_SHIFT) - pgoff;
+ struct page *page = sg_page(sg) + pgoff;
+ unsigned long size = n << PAGE_SHIFT;
+
+ if (addr + size > vma->vm_end)
+ size = vma->vm_end - addr;
- ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
- vma->vm_page_prot);
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page),
+ size, vma->vm_page_prot);
if (ret)
return ret;
- addr += PAGE_SIZE;
- if (addr >= vma->vm_end)
- return 0;
+
+ addr += size;
+ pgoff = 0;
}
+
return 0;
}
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 3c20f1d31cf5..6f09d13be6b6 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -8,6 +8,7 @@
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include <linux/panic.h>
#include <linux/slab.h>
#include <linux/sync_file.h>
@@ -349,6 +350,9 @@ static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
struct sync_file *sync_file;
struct sw_sync_create_fence_data data;
+ /* SW sync fence are inherently unsafe and can deadlock the kernel */
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+
if (fd < 0)
return fd;
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
index 67cd69551e42..9e5d662cd4e8 100644
--- a/drivers/dma-buf/sync_debug.c
+++ b/drivers/dma-buf/sync_debug.c
@@ -59,7 +59,7 @@ static void sync_print_fence(struct seq_file *s,
struct timespec64 ts64 =
ktime_to_timespec64(fence->timestamp);
- seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
+ seq_printf(s, "@%ptSp", &ts64);
}
seq_printf(s, ": %lld", fence->seqno);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 05c7c7d9e5a4..8bb0a119ecd4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -102,7 +102,7 @@ config ARM_DMA350
config AT_HDMAC
tristate "Atmel AHB DMA support"
- depends on ARCH_AT91
+ depends on ARCH_AT91 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -143,7 +143,7 @@ config BCM_SBA_RAID
config DMA_BCM2835
tristate "BCM2835 DMA engine support"
- depends on ARCH_BCM2835
+ depends on ARCH_BCM2835 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -450,7 +450,7 @@ config MILBEAUT_XDMAC
config MMP_PDMA
tristate "MMP PDMA support"
- depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
+ depends on ARCH_MMP || ARCH_PXA || ARCH_SPACEMIT || COMPILE_TEST
select DMA_ENGINE
help
Support the MMP PDMA engine for PXA and MMP platform.
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 2d147712cbc6..7d226453961f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -887,7 +887,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
first = xt->sgl;
dev_info(chan2dev(chan),
- "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
+ "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
__func__, &xt->src_start, &xt->dst_start, xt->numf,
xt->frame_size, flags);
@@ -1174,7 +1174,7 @@ atc_prep_dma_memset_sg(struct dma_chan *chan,
int i;
int ret;
- dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
+ dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%x f0x%lx\n", __func__,
value, sg_len, flags);
if (unlikely(!sgl || !sg_len)) {
@@ -1503,7 +1503,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
unsigned int periods = buf_len / period_len;
unsigned int i;
- dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
+ dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%zu/%zu)\n",
direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
&buf_addr,
periods, buf_len, period_len);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 0117bb2e8591..321748e2983e 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -1060,7 +1060,6 @@ static struct platform_driver bcm2835_dma_driver = {
module_platform_driver(bcm2835_dma_driver);
-MODULE_ALIAS("platform:bcm2835-dma");
MODULE_DESCRIPTION("BCM2835 DMA engine driver");
MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b43255f914f3..8e5f7defa6b6 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -584,6 +584,25 @@ dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
return dw_edma_device_transfer(&xfer);
}
+static void dw_hdma_set_callback_result(struct virt_dma_desc *vd,
+ enum dmaengine_tx_result result)
+{
+ u32 residue = 0;
+ struct dw_edma_desc *desc;
+ struct dmaengine_result *res;
+
+ if (!vd->tx.callback_result)
+ return;
+
+ desc = vd2dw_edma_desc(vd);
+ if (desc)
+ residue = desc->alloc_sz - desc->xfer_sz;
+
+ res = &vd->tx_result;
+ res->result = result;
+ res->residue = residue;
+}
+
static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
{
struct dw_edma_desc *desc;
@@ -597,6 +616,8 @@ static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
case EDMA_REQ_NONE:
desc = vd2dw_edma_desc(vd);
if (!desc->chunks_alloc) {
+ dw_hdma_set_callback_result(vd,
+ DMA_TRANS_NOERROR);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
@@ -633,6 +654,7 @@ static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
spin_lock_irqsave(&chan->vc.lock, flags);
vd = vchan_next_desc(&chan->vc);
if (vd) {
+ dw_hdma_set_callback_result(vd, DMA_TRANS_ABORTED);
list_del(&vd->node);
vchan_cookie_complete(vd);
}
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index cee56cd31a61..c63fa52036d7 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -21,8 +21,6 @@
#include "internal.h"
-#define DRV_NAME "dw_dmac"
-
static int dw_probe(struct platform_device *pdev)
{
const struct dw_dma_chip_pdata *match;
@@ -190,7 +188,7 @@ static struct platform_driver dw_driver = {
.remove = dw_remove,
.shutdown = dw_shutdown,
.driver = {
- .name = DRV_NAME,
+ .name = "dw_dmac",
.pm = pm_sleep_ptr(&dw_dev_pm_ops),
.of_match_table = of_match_ptr(dw_dma_of_id_table),
.acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table),
@@ -211,4 +209,3 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller platform driver");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 4976d7dde080..a59212758029 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -206,15 +206,19 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
-static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth src_addr_width,
+ enum dma_slave_buswidth dst_addr_width)
{
- u32 val;
+ u32 src_val, dst_val;
- if (addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
- addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- val = ffs(addr_width) - 1;
- return val | (val << 8);
+ src_val = ffs(src_addr_width) - 1;
+ dst_val = ffs(dst_addr_width) - 1;
+ return dst_val | (src_val << 8);
}
void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
@@ -612,13 +616,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
dma_buf_next = dma_addr;
if (direction == DMA_MEM_TO_DEV) {
+ if (!fsl_chan->cfg.src_addr_width)
+ fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
fsl_chan->attr =
- fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
+ fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.dst_addr_width *
fsl_chan->cfg.dst_maxburst;
} else {
+ if (!fsl_chan->cfg.dst_addr_width)
+ fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
fsl_chan->attr =
- fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
+ fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.src_addr_width *
fsl_chan->cfg.src_maxburst;
}
@@ -689,13 +699,19 @@ struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
fsl_desc->dirn = direction;
if (direction == DMA_MEM_TO_DEV) {
+ if (!fsl_chan->cfg.src_addr_width)
+ fsl_chan->cfg.src_addr_width = fsl_chan->cfg.dst_addr_width;
fsl_chan->attr =
- fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
+ fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.dst_addr_width *
fsl_chan->cfg.dst_maxburst;
} else {
+ if (!fsl_chan->cfg.dst_addr_width)
+ fsl_chan->cfg.dst_addr_width = fsl_chan->cfg.src_addr_width;
fsl_chan->attr =
- fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width,
+ fsl_chan->cfg.dst_addr_width);
nbytes = fsl_chan->cfg.src_addr_width *
fsl_chan->cfg.src_maxburst;
}
@@ -766,6 +782,10 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
struct fsl_edma_desc *fsl_desc;
+ u32 src_bus_width, dst_bus_width;
+
+ src_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_src) - 1));
+ dst_bus_width = min_t(u32, DMA_SLAVE_BUSWIDTH_32_BYTES, 1 << (ffs(dma_dst) - 1));
fsl_desc = fsl_edma_alloc_desc(fsl_chan, 1);
if (!fsl_desc)
@@ -778,8 +798,9 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
/* To match with copy_align and max_seg_size so 1 tcd is enough */
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
- fsl_edma_get_tcd_attr(DMA_SLAVE_BUSWIDTH_32_BYTES),
- 32, len, 0, 1, 1, 32, 0, true, true, false);
+ fsl_edma_get_tcd_attr(src_bus_width, dst_bus_width),
+ src_bus_width, len, 0, 1, 1, dst_bus_width, 0, true,
+ true, false);
return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
}
diff --git a/drivers/dma/fsl-edma-main.c b/drivers/dma/fsl-edma-main.c
index 97583c7d51a2..a753b7cbfa7a 100644
--- a/drivers/dma/fsl-edma-main.c
+++ b/drivers/dma/fsl-edma-main.c
@@ -999,6 +999,5 @@ static void __exit fsl_edma_exit(void)
}
module_exit(fsl_edma_exit);
-MODULE_ALIAS("platform:fsl-edma");
MODULE_DESCRIPTION("Freescale eDMA engine driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 21e13f1207cb..6ace5bf80c40 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1296,6 +1296,5 @@ static struct platform_driver fsl_qdma_driver = {
module_platform_driver(fsl_qdma_driver);
-MODULE_ALIAS("platform:fsl-qdma");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
diff --git a/drivers/dma/idxd/defaults.c b/drivers/dma/idxd/defaults.c
index c607ae8dd12c..2bbbcd02a0da 100644
--- a/drivers/dma/idxd/defaults.c
+++ b/drivers/dma/idxd/defaults.c
@@ -36,12 +36,10 @@ int idxd_load_iaa_device_defaults(struct idxd_device *idxd)
group->num_wqs++;
/* set name to "iaa_crypto" */
- memset(wq->name, 0, WQ_NAME_SIZE + 1);
- strscpy(wq->name, "iaa_crypto", WQ_NAME_SIZE + 1);
+ strscpy_pad(wq->name, "iaa_crypto");
/* set driver_name to "crypto" */
- memset(wq->driver_name, 0, DRIVER_NAME_SIZE + 1);
- strscpy(wq->driver_name, "crypto", DRIVER_NAME_SIZE + 1);
+ strscpy_pad(wq->driver_name, "crypto");
engine = idxd->engines[0];
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 5cf419fe6b46..c2cdf41b6e57 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -16,6 +16,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
u32 *status);
static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
+static int idxd_wq_config_write(struct idxd_wq *wq);
/* Interrupt control bits */
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
@@ -215,14 +216,28 @@ int idxd_wq_disable(struct idxd_wq *wq, bool reset_config)
return 0;
}
+ /*
+ * Disable WQ does not drain address translations, if WQ attributes are
+ * changed before translations are drained, pending translations can
+ * be issued using updated WQ attibutes, resulting in invalid
+ * translations being cached in the device translation cache.
+ *
+ * To make sure pending translations are drained before WQ
+ * attributes are changed, we use a WQ Drain followed by WQ Reset and
+ * then restore the WQ configuration.
+ */
+ idxd_wq_drain(wq);
+
operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
- idxd_cmd_exec(idxd, IDXD_CMD_DISABLE_WQ, operand, &status);
+ idxd_cmd_exec(idxd, IDXD_CMD_RESET_WQ, operand, &status);
if (status != IDXD_CMDSTS_SUCCESS) {
- dev_dbg(dev, "WQ disable failed: %#x\n", status);
+ dev_dbg(dev, "WQ reset failed: %#x\n", status);
return -ENXIO;
}
+ idxd_wq_config_write(wq);
+
if (reset_config)
idxd_wq_disable_cleanup(wq);
clear_bit(wq->id, idxd->wq_enable_map);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 8c4725ad1f64..2acc34b3daff 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -80,6 +80,8 @@ static struct pci_device_id idxd_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, IAA_DMR, &idxd_driver_data[IDXD_TYPE_IAX]) },
/* IAA PTL platforms */
{ PCI_DEVICE_DATA(INTEL, IAA_PTL, &idxd_driver_data[IDXD_TYPE_IAX]) },
+ /* IAA WCL platforms */
+ { PCI_DEVICE_DATA(INTEL, IAA_WCL, &idxd_driver_data[IDXD_TYPE_IAX]) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 9c1c546fe443..8dc2e8bca779 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -3,13 +3,18 @@
#ifndef _IDXD_REGISTERS_H_
#define _IDXD_REGISTERS_H_
+#ifdef __KERNEL__
#include <uapi/linux/idxd.h>
+#else
+#include <linux/idxd.h>
+#endif
/* PCI Config */
#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb
#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212
#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216
#define PCI_DEVICE_ID_INTEL_IAA_PTL 0xb02d
+#define PCI_DEVICE_ID_INTEL_IAA_WCL 0xfd2d
#define DEVICE_VERSION_1 0x100
#define DEVICE_VERSION_2 0x200
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 02a85d6f1bea..ed9e56de5a9b 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -256,7 +256,7 @@ struct sdma_script_start_addrs {
/* End of v3 array */
union { s32 v3_end; s32 mcu_2_zqspi_addr; };
/* End of v4 array */
- s32 v4_end[0];
+ s32 v4_end[];
};
/*
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index a180171087a8..12a4a4860a74 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -19,6 +19,8 @@
#define IOAT_DMA_DCA_ANY_CPU ~0
+int system_has_dca_enabled(struct pci_dev *pdev);
+
#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 79e4e4c09c18..0373c48520c9 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -63,9 +63,6 @@
#define IOAT_VER_3_3 0x33 /* Version 3.3 */
#define IOAT_VER_3_4 0x34 /* Version 3.4 */
-
-int system_has_dca_enabled(struct pci_dev *pdev);
-
#define IOAT_DESC_SZ 64
struct ioat_dma_descriptor {
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 02f68b328511..227398673b73 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -1286,7 +1286,6 @@ static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index acc2983e28e0..0f9cd7815f88 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -1034,5 +1034,4 @@ static struct platform_driver k3_pdma_driver = {
module_platform_driver(k3_pdma_driver);
MODULE_DESCRIPTION("HiSilicon k3 DMA Driver");
-MODULE_ALIAS("platform:k3dma");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a95d31103d30..d07229a74886 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/platform_data/mmp_dma.h>
#include <linux/dmapool.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/of_dma.h>
#include <linux/of.h>
@@ -23,9 +25,12 @@
#define DCSR 0x0000
#define DALGN 0x00a0
#define DINT 0x00f0
-#define DDADR 0x0200
+#define DDADR(n) (0x0200 + ((n) << 4))
#define DSADR(n) (0x0204 + ((n) << 4))
#define DTADR(n) (0x0208 + ((n) << 4))
+#define DDADRH(n) (0x0300 + ((n) << 4))
+#define DSADRH(n) (0x0304 + ((n) << 4))
+#define DTADRH(n) (0x0308 + ((n) << 4))
#define DCMD 0x020c
#define DCSR_RUN BIT(31) /* Run Bit (read / write) */
@@ -42,6 +47,7 @@
#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
#define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
#define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension Enable */
#define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
#define DCSR_EORINTR BIT(9) /* The end of Receive */
@@ -74,6 +80,16 @@ struct mmp_pdma_desc_hw {
u32 dsadr; /* DSADR value for the current transfer */
u32 dtadr; /* DTADR value for the current transfer */
u32 dcmd; /* DCMD value for the current transfer */
+ /*
+ * The following 32-bit words are only used in the 64-bit, ie.
+ * LPAE (Long Physical Address Extension) mode.
+ * They are used to specify the high 32 bits of the descriptor's
+ * addresses.
+ */
+ u32 ddadrh; /* High 32-bit of DDADR */
+ u32 dsadrh; /* High 32-bit of DSADR */
+ u32 dtadrh; /* High 32-bit of DTADR */
+ u32 rsvd; /* reserved */
} __aligned(32);
struct mmp_pdma_desc_sw {
@@ -118,12 +134,55 @@ struct mmp_pdma_phy {
struct mmp_pdma_chan *vchan;
};
+/**
+ * struct mmp_pdma_ops - Operations for the MMP PDMA controller
+ *
+ * Hardware Register Operations (read/write hardware registers):
+ * @write_next_addr: Function to program address of next descriptor into
+ * DDADR/DDADRH
+ * @read_src_addr: Function to read the source address from DSADR/DSADRH
+ * @read_dst_addr: Function to read the destination address from DTADR/DTADRH
+ *
+ * Descriptor Memory Operations (manipulate descriptor structs in memory):
+ * @set_desc_next_addr: Function to set next descriptor address in descriptor
+ * @set_desc_src_addr: Function to set the source address in descriptor
+ * @set_desc_dst_addr: Function to set the destination address in descriptor
+ * @get_desc_src_addr: Function to get the source address from descriptor
+ * @get_desc_dst_addr: Function to get the destination address from descriptor
+ *
+ * Controller Configuration:
+ * @run_bits: Control bits in DCSR register for channel start/stop
+ * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
+ * settings, or explicit mask like DMA_BIT_MASK(32/64)
+ */
+struct mmp_pdma_ops {
+ /* Hardware Register Operations */
+ void (*write_next_addr)(struct mmp_pdma_phy *phy, dma_addr_t addr);
+ u64 (*read_src_addr)(struct mmp_pdma_phy *phy);
+ u64 (*read_dst_addr)(struct mmp_pdma_phy *phy);
+
+ /* Descriptor Memory Operations */
+ void (*set_desc_next_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_src_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ void (*set_desc_dst_addr)(struct mmp_pdma_desc_hw *desc,
+ dma_addr_t addr);
+ u64 (*get_desc_src_addr)(const struct mmp_pdma_desc_hw *desc);
+ u64 (*get_desc_dst_addr)(const struct mmp_pdma_desc_hw *desc);
+
+ /* Controller Configuration */
+ u32 run_bits;
+ u64 dma_mask;
+};
+
struct mmp_pdma_device {
int dma_channels;
void __iomem *base;
struct device *dev;
struct dma_device device;
struct mmp_pdma_phy *phy;
+ const struct mmp_pdma_ops *ops;
spinlock_t phy_lock; /* protect alloc/free phy channels */
};
@@ -136,24 +195,112 @@ struct mmp_pdma_device {
#define to_mmp_pdma_dev(dmadev) \
container_of(dmadev, struct mmp_pdma_device, device)
-static int mmp_pdma_config_write(struct dma_chan *dchan,
- struct dma_slave_config *cfg,
- enum dma_transfer_direction direction);
+/* For 32-bit PDMA */
+static void write_next_addr_32(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(addr, phy->base + DDADR(phy->idx));
+}
+
+static u64 read_src_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DSADR(phy->idx));
+}
+
+static u64 read_dst_addr_32(struct mmp_pdma_phy *phy)
+{
+ return readl(phy->base + DTADR(phy->idx));
+}
+
+static void set_desc_next_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = addr;
+}
+
+static void set_desc_src_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = addr;
+}
+
+static void set_desc_dst_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = addr;
+}
+
+static u64 get_desc_src_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_32(const struct mmp_pdma_desc_hw *desc)
+{
+ return desc->dtadr;
+}
+
+/* For 64-bit PDMA */
+static void write_next_addr_64(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+ writel(lower_32_bits(addr), phy->base + DDADR(phy->idx));
+ writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx));
+}
+
+static u64 read_src_addr_64(struct mmp_pdma_phy *phy)
+{
+ u32 low = readl(phy->base + DSADR(phy->idx));
+ u32 high = readl(phy->base + DSADRH(phy->idx));
+
+ return ((u64)high << 32) | low;
+}
-static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
+static u64 read_dst_addr_64(struct mmp_pdma_phy *phy)
{
- u32 reg = (phy->idx << 4) + DDADR;
+ u32 low = readl(phy->base + DTADR(phy->idx));
+ u32 high = readl(phy->base + DTADRH(phy->idx));
- writel(addr, phy->base + reg);
+ return ((u64)high << 32) | low;
}
+static void set_desc_next_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->ddadr = lower_32_bits(addr);
+ desc->ddadrh = upper_32_bits(addr);
+}
+
+static void set_desc_src_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dsadr = lower_32_bits(addr);
+ desc->dsadrh = upper_32_bits(addr);
+}
+
+static void set_desc_dst_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
+{
+ desc->dtadr = lower_32_bits(addr);
+ desc->dtadrh = upper_32_bits(addr);
+}
+
+static u64 get_desc_src_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dsadrh << 32) | desc->dsadr;
+}
+
+static u64 get_desc_dst_addr_64(const struct mmp_pdma_desc_hw *desc)
+{
+ return ((u64)desc->dtadrh << 32) | desc->dtadr;
+}
+
+static int mmp_pdma_config_write(struct dma_chan *dchan,
+ struct dma_slave_config *cfg,
+ enum dma_transfer_direction direction);
+
static void enable_chan(struct mmp_pdma_phy *phy)
{
u32 reg, dalgn;
+ struct mmp_pdma_device *pdev;
if (!phy->vchan)
return;
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+
reg = DRCMR(phy->vchan->drcmr);
writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
@@ -165,18 +312,29 @@ static void enable_chan(struct mmp_pdma_phy *phy)
writel(dalgn, phy->base + DALGN);
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
+ writel(readl(phy->base + reg) | pdev->ops->run_bits,
+ phy->base + reg);
}
static void disable_chan(struct mmp_pdma_phy *phy)
{
- u32 reg;
+ u32 reg, dcsr;
if (!phy)
return;
reg = (phy->idx << 2) + DCSR;
- writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
+ dcsr = readl(phy->base + reg);
+
+ if (phy->vchan) {
+ struct mmp_pdma_device *pdev;
+
+ pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
+ writel(dcsr & ~pdev->ops->run_bits, phy->base + reg);
+ } else {
+ /* If no vchan, just clear the RUN bit */
+ writel(dcsr & ~DCSR_RUN, phy->base + reg);
+ }
}
static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -295,6 +453,7 @@ static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
static void start_pending_queue(struct mmp_pdma_chan *chan)
{
struct mmp_pdma_desc_sw *desc;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
/* still in running, irq will start the pending list */
if (!chan->idle) {
@@ -329,7 +488,7 @@ static void start_pending_queue(struct mmp_pdma_chan *chan)
* Program the descriptor's address into the DMA controller,
* then start the DMA transaction
*/
- set_desc(chan->phy, desc->async_tx.phys);
+ pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys);
enable_chan(chan->phy);
chan->idle = false;
}
@@ -445,15 +604,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
size_t len, unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
size_t copy = 0;
- if (!dchan)
- return NULL;
-
- if (!len)
+ if (!dchan || !len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
chan = to_mmp_pdma_chan(dchan);
chan->byte_align = false;
@@ -476,13 +634,14 @@ mmp_pdma_prep_memcpy(struct dma_chan *dchan,
chan->byte_align = true;
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -526,6 +685,7 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
unsigned long flags, void *context)
{
struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device);
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
size_t len, avail;
struct scatterlist *sg;
@@ -557,17 +717,18 @@ mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
if (dir == DMA_MEM_TO_DEV) {
- new->desc.dsadr = addr;
+ pdev->ops->set_desc_src_addr(&new->desc, addr);
new->desc.dtadr = chan->dev_addr;
} else {
new->desc.dsadr = chan->dev_addr;
- new->desc.dtadr = addr;
+ pdev->ops->set_desc_dst_addr(&new->desc, addr);
}
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -607,12 +768,15 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
unsigned long flags)
{
struct mmp_pdma_chan *chan;
+ struct mmp_pdma_device *pdev;
struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
dma_addr_t dma_src, dma_dst;
if (!dchan || !len || !period_len)
return NULL;
+ pdev = to_mmp_pdma_dev(dchan->device);
+
/* the buffer length must be a multiple of period_len */
if (len % period_len != 0)
return NULL;
@@ -649,13 +813,14 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
(DCMD_LENGTH & period_len));
- new->desc.dsadr = dma_src;
- new->desc.dtadr = dma_dst;
+ pdev->ops->set_desc_src_addr(&new->desc, dma_src);
+ pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
if (!first)
first = new;
else
- prev->desc.ddadr = new->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&prev->desc,
+ new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -676,7 +841,7 @@ mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
first->async_tx.cookie = -EBUSY;
/* make the cyclic link */
- new->desc.ddadr = first->async_tx.phys;
+ pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys);
chan->cyclic_first = first;
return &first->async_tx;
@@ -762,7 +927,9 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
dma_cookie_t cookie)
{
struct mmp_pdma_desc_sw *sw;
- u32 curr, residue = 0;
+ struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
+ u64 curr;
+ u32 residue = 0;
bool passed = false;
bool cyclic = chan->cyclic_first != NULL;
@@ -774,17 +941,18 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
return 0;
if (chan->dir == DMA_DEV_TO_MEM)
- curr = readl(chan->phy->base + DTADR(chan->phy->idx));
+ curr = pdev->ops->read_dst_addr(chan->phy);
else
- curr = readl(chan->phy->base + DSADR(chan->phy->idx));
+ curr = pdev->ops->read_src_addr(chan->phy);
list_for_each_entry(sw, &chan->chain_running, node) {
- u32 start, end, len;
+ u64 start, end;
+ u32 len;
if (chan->dir == DMA_DEV_TO_MEM)
- start = sw->desc.dtadr;
+ start = pdev->ops->get_desc_dst_addr(&sw->desc);
else
- start = sw->desc.dsadr;
+ start = pdev->ops->get_desc_src_addr(&sw->desc);
len = sw->desc.dcmd & DCMD_LENGTH;
end = start + len;
@@ -800,7 +968,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
if (passed) {
residue += len;
} else if (curr >= start && curr <= end) {
- residue += end - curr;
+ residue += (u32)(end - curr);
passed = true;
}
@@ -994,9 +1162,42 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
return 0;
}
+static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
+ .write_next_addr = write_next_addr_32,
+ .read_src_addr = read_src_addr_32,
+ .read_dst_addr = read_dst_addr_32,
+ .set_desc_next_addr = set_desc_next_addr_32,
+ .set_desc_src_addr = set_desc_src_addr_32,
+ .set_desc_dst_addr = set_desc_dst_addr_32,
+ .get_desc_src_addr = get_desc_src_addr_32,
+ .get_desc_dst_addr = get_desc_dst_addr_32,
+ .run_bits = (DCSR_RUN),
+ .dma_mask = 0, /* let OF/platform set DMA mask */
+};
+
+static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
+ .write_next_addr = write_next_addr_64,
+ .read_src_addr = read_src_addr_64,
+ .read_dst_addr = read_dst_addr_64,
+ .set_desc_next_addr = set_desc_next_addr_64,
+ .set_desc_src_addr = set_desc_src_addr_64,
+ .set_desc_dst_addr = set_desc_dst_addr_64,
+ .get_desc_src_addr = get_desc_src_addr_64,
+ .get_desc_dst_addr = get_desc_dst_addr_64,
+ .run_bits = (DCSR_RUN | DCSR_LPAEEN),
+ .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
+};
+
static const struct of_device_id mmp_pdma_dt_ids[] = {
- { .compatible = "marvell,pdma-1.0", },
- {}
+ {
+ .compatible = "marvell,pdma-1.0",
+ .data = &marvell_pdma_v1_ops
+ }, {
+ .compatible = "spacemit,k1-pdma",
+ .data = &spacemit_k1_pdma_ops
+ }, {
+ /* sentinel */
+ }
};
MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
@@ -1019,6 +1220,8 @@ static int mmp_pdma_probe(struct platform_device *op)
{
struct mmp_pdma_device *pdev;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+ struct clk *clk;
+ struct reset_control *rst;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@@ -1037,6 +1240,19 @@ static int mmp_pdma_probe(struct platform_device *op)
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
+ clk = devm_clk_get_optional_enabled(pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev,
+ NULL);
+ if (IS_ERR(rst))
+ return PTR_ERR(rst);
+
+ pdev->ops = of_device_get_match_data(&op->dev);
+ if (!pdev->ops)
+ return -ENODEV;
+
if (pdev->dev->of_node) {
/* Parse new and deprecated dma-channels properties */
if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
@@ -1098,7 +1314,10 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- if (pdev->dev->coherent_dma_mask)
+ /* Set DMA mask based on ops->dma_mask, or OF/platform */
+ if (pdev->ops->dma_mask)
+ dma_set_mask(pdev->dev, pdev->ops->dma_mask);
+ else if (pdev->dev->coherent_dma_mask)
dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
else
dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index b7fb843c67a6..ba03321eeff7 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -554,8 +554,7 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static void mmp_tdma_remove(struct platform_device *pdev)
{
- if (pdev->dev.of_node)
- of_dma_controller_free(pdev->dev.of_node);
+ of_dma_controller_free(pdev->dev.of_node);
}
static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
@@ -743,6 +742,5 @@ module_platform_driver(mmp_tdma_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
-MODULE_ALIAS("platform:mmp-tdma");
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 1fdcb0f5c9e7..5e8386296046 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1013,7 +1013,7 @@ static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan)
dma_async_device_unregister(&mv_chan->dmadev);
- dma_free_coherent(dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
dma_unmap_single(dev, mv_chan->dummy_src_addr,
MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
@@ -1163,7 +1163,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
err_free_irq:
free_irq(mv_chan->irq, mv_chan);
err_free_dma:
- dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE,
+ dma_free_wc(&pdev->dev, MV_XOR_POOL_SIZE,
mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool);
err_unmap_dst:
dma_unmap_single(dma_dev->dev, mv_chan->dummy_dst_addr,
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 765462303de0..334425faac00 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1500,7 +1500,6 @@ static const struct platform_device_id nbpf_ids[] = {
};
MODULE_DEVICE_TABLE(platform, nbpf_ids);
-#ifdef CONFIG_PM
static int nbpf_runtime_suspend(struct device *dev)
{
struct nbpf_device *nbpf = dev_get_drvdata(dev);
@@ -1513,17 +1512,16 @@ static int nbpf_runtime_resume(struct device *dev)
struct nbpf_device *nbpf = dev_get_drvdata(dev);
return clk_prepare_enable(nbpf->clk);
}
-#endif
static const struct dev_pm_ops nbpf_pm_ops = {
- SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
+ RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL)
};
static struct platform_driver nbpf_driver = {
.driver = {
.name = "dma-nbpf",
.of_match_table = nbpf_match,
- .pm = &nbpf_pm_ops,
+ .pm = pm_ptr(&nbpf_pm_ops),
},
.id_table = nbpf_ids,
.probe = nbpf_probe,
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 9d2a5a967a99..61500ad7c850 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -874,7 +874,7 @@ static int ppc440spe_dma2_pq_slot_count(dma_addr_t *srcs,
pr_err("%s: src_cnt=%d, state=%d, addr_count=%d, order=%lld\n",
__func__, src_cnt, state, addr_count, order);
for (i = 0; i < src_cnt; i++)
- pr_err("\t[%d] 0x%llx \n", i, srcs[i]);
+ pr_err("\t[%d] 0x%llx\n", i, srcs[i]);
BUG();
}
@@ -3636,7 +3636,7 @@ static void ppc440spe_adma_issue_pending(struct dma_chan *chan)
ppc440spe_chan = to_ppc440spe_adma_chan(chan);
dev_dbg(ppc440spe_chan->device->common.dev,
- "ppc440spe adma%d: %s %d \n", ppc440spe_chan->device->id,
+ "ppc440spe adma%d: %s %d\n", ppc440spe_chan->device->id,
__func__, ppc440spe_chan->pending);
if (ppc440spe_chan->pending) {
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 8e87738086b2..66bfea1f156d 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -1619,7 +1619,8 @@ gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
}
static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
- struct scatterlist *sgl, enum dma_transfer_direction direction)
+ struct scatterlist *sgl, enum dma_transfer_direction direction,
+ unsigned long flags)
{
struct gpi_i2c_config *i2c = chan->config;
struct device *dev = chan->gpii->gpi_dev->dev;
@@ -1684,6 +1685,9 @@ static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+
+ if (!(flags & DMA_PREP_INTERRUPT))
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_BEI);
}
for (i = 0; i < tre_idx; i++)
@@ -1827,6 +1831,9 @@ gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
+ if (!(flags & DMA_PREP_INTERRUPT) && (nr - nr_tre < 2))
+ return NULL;
+
gpi_desc = kzalloc(sizeof(*gpi_desc), GFP_NOWAIT);
if (!gpi_desc)
return NULL;
@@ -1835,7 +1842,7 @@ gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (gchan->protocol == QCOM_GPI_SPI) {
i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
} else if (gchan->protocol == QCOM_GPI_I2C) {
- i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction);
+ i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction, flags);
} else {
dev_err(dev, "invalid peripheral: %d\n", gchan->protocol);
kfree(gpi_desc);
diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig
index 8184d475a49a..a16c7e83bd14 100644
--- a/drivers/dma/sh/Kconfig
+++ b/drivers/dma/sh/Kconfig
@@ -50,7 +50,7 @@ config RENESAS_USB_DMAC
config RZ_DMAC
tristate "Renesas RZ DMA Controller"
- depends on ARCH_R7S72100 || ARCH_RZG2L || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
select RENESAS_DMA
select DMA_VIRTUAL_CHANNELS
help
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 0c45ce8c74aa..475a347cae1b 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1728,19 +1728,12 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM
-static int rcar_dmac_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
static int rcar_dmac_runtime_resume(struct device *dev)
{
struct rcar_dmac *dmac = dev_get_drvdata(dev);
return rcar_dmac_init(dmac);
}
-#endif
static const struct dev_pm_ops rcar_dmac_pm = {
/*
@@ -1748,10 +1741,9 @@ static const struct dev_pm_ops rcar_dmac_pm = {
* - Wait for the current transfer to complete and stop the device,
* - Resume transfers, if any.
*/
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume,
- NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(NULL, rcar_dmac_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -2036,7 +2028,7 @@ MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
static struct platform_driver rcar_dmac_driver = {
.driver = {
- .pm = &rcar_dmac_pm,
+ .pm = pm_ptr(&rcar_dmac_pm),
.name = "rcar-dmac",
.of_match_table = rcar_dmac_of_ids,
},
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 6b4fce453c85..834741adadaa 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -129,12 +129,25 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
const struct shdma_ops *ops = sdev->ops;
dev_dbg(schan->dev, "Bring up channel %d\n",
schan->id);
- /*
- * TODO: .xfer_setup() might fail on some platforms.
- * Make it int then, on error remove chunks from the
- * queue again
- */
- ops->setup_xfer(schan, schan->slave_id);
+
+ ret = ops->setup_xfer(schan, schan->slave_id);
+ if (ret < 0) {
+ dev_err(schan->dev, "setup_xfer failed: %d\n", ret);
+
+ /* Remove chunks from the queue and mark them as idle */
+ list_for_each_entry_safe(chunk, c, &schan->ld_queue, node) {
+ if (chunk->cookie == cookie) {
+ chunk->mark = DESC_IDLE;
+ list_move(&chunk->node, &schan->ld_free);
+ }
+ }
+
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ ret = pm_runtime_put(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+ return ret;
+ }
if (schan->pm_state == SHDMA_PM_PENDING)
shdma_chan_xfer_ld_queue(schan);
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 093e449e19ee..603e15102e45 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -300,21 +300,30 @@ static bool sh_dmae_channel_busy(struct shdma_chan *schan)
return dmae_is_busy(sh_chan);
}
-static void sh_dmae_setup_xfer(struct shdma_chan *schan,
- int slave_id)
+static int sh_dmae_setup_xfer(struct shdma_chan *schan, int slave_id)
{
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
shdma_chan);
+ int ret = 0;
if (slave_id >= 0) {
const struct sh_dmae_slave_config *cfg =
sh_chan->config;
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
+ ret = dmae_set_dmars(sh_chan, cfg->mid_rid);
+ if (ret < 0)
+ goto END;
+
+ ret = dmae_set_chcr(sh_chan, cfg->chcr);
+ if (ret < 0)
+ goto END;
+
} else {
dmae_init(sh_chan);
}
+
+END:
+ return ret;
}
/*
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 7e2b6c97fa2f..b42e5a66fd95 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -670,7 +670,6 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
-#ifdef CONFIG_PM
static int usb_dmac_runtime_suspend(struct device *dev)
{
struct usb_dmac *dmac = dev_get_drvdata(dev);
@@ -691,13 +690,11 @@ static int usb_dmac_runtime_resume(struct device *dev)
return usb_dmac_init(dmac);
}
-#endif /* CONFIG_PM */
static const struct dev_pm_ops usb_dmac_pm = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
- NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -894,7 +891,7 @@ MODULE_DEVICE_TABLE(of, usb_dmac_of_ids);
static struct platform_driver usb_dmac_driver = {
.driver = {
- .pm = &usb_dmac_pm,
+ .pm = pm_ptr(&usb_dmac_pm),
.name = "usb-dmac",
.of_match_table = usb_dmac_of_ids,
},
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 187a090463ce..6207e0b185e1 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -1311,4 +1311,3 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DMA driver for Spreadtrum");
MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>");
-MODULE_ALIAS("platform:sprd-dma");
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index c65ee0c7bfbd..dc2ab7d16cf2 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -866,4 +866,3 @@ MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index fad896ff29a2..d0e8bb27a03b 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -1230,7 +1230,6 @@ static struct platform_driver tegra_admac_driver = {
module_platform_driver(tegra_admac_driver);
-MODULE_ALIAS("platform:tegra210-adma");
MODULE_DESCRIPTION("NVIDIA Tegra ADMA driver");
MODULE_AUTHOR("Dara Ramesh <dramesh@nvidia.com>");
MODULE_AUTHOR("Jon Hunter <jonathanh@nvidia.com>");
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index a34d8f0ceed8..fabff602065f 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2173,6 +2173,99 @@ error:
}
/**
+ * xilinx_dma_prep_peripheral_dma_vec - prepare descriptors for a DMA_SLAVE
+ * transaction from DMA vectors
+ * @dchan: DMA channel
+ * @vecs: Array of DMA vectors that should be transferred
+ * @nb: number of entries in @vecs
+ * @direction: DMA direction
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_dma_prep_peripheral_dma_vec(
+ struct dma_chan *dchan, const struct dma_vec *vecs, size_t nb,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_axidma_tx_segment *segment, *head, *prev = NULL;
+ size_t copy;
+ size_t sg_used;
+ unsigned int i;
+
+ if (!is_slave_direction(direction) || direction != chan->direction)
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ /* Build transactions using information from DMA vectors */
+ for (i = 0; i < nb; i++) {
+ sg_used = 0;
+
+ /* Loop until the entire dma_vec entry is used */
+ while (sg_used < vecs[i].len) {
+ struct xilinx_axidma_desc_hw *hw;
+
+ /* Get a free segment */
+ segment = xilinx_axidma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ /*
+ * Calculate the maximum number of bytes to transfer,
+ * making sure it is less than the hw limit
+ */
+ copy = xilinx_dma_calc_copysize(chan, vecs[i].len,
+ sg_used);
+ hw = &segment->hw;
+
+ /* Fill in the descriptor */
+ xilinx_axidma_buf(chan, hw, vecs[i].addr, sg_used, 0);
+ hw->control = copy;
+
+ if (prev)
+ prev->hw.next_desc = segment->phys;
+
+ prev = segment;
+ sg_used += copy;
+
+ /*
+ * Insert the segment into the descriptor segments
+ * list.
+ */
+ list_add_tail(&segment->node, &desc->segments);
+ }
+ }
+
+ head = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node);
+ desc->async_tx.phys = head->phys;
+
+ /* For the last DMA_MEM_TO_DEV transfer, set EOP */
+ if (chan->direction == DMA_MEM_TO_DEV) {
+ segment->hw.control |= XILINX_DMA_BD_SOP;
+ segment = list_last_entry(&desc->segments,
+ struct xilinx_axidma_tx_segment,
+ node);
+ segment->hw.control |= XILINX_DMA_BD_EOP;
+ }
+
+ if (chan->xdev->has_axistream_connected)
+ desc->async_tx.metadata_ops = &xilinx_dma_metadata_ops;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -3180,6 +3273,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
xdev->common.device_config = xilinx_dma_device_config;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
+ xdev->common.device_prep_peripheral_dma_vec = xilinx_dma_prep_peripheral_dma_vec;
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
xdev->common.device_prep_dma_cyclic =
xilinx_dma_prep_dma_cyclic;
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index d05fc5fcc77d..f7e584de4335 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -1173,9 +1173,9 @@ static void zynqmp_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&zdev->common);
zynqmp_dma_chan_remove(zdev->chan);
- pm_runtime_disable(zdev->dev);
- if (!pm_runtime_enabled(zdev->dev))
+ if (pm_runtime_active(zdev->dev))
zynqmp_dma_runtime_suspend(zdev->dev);
+ pm_runtime_disable(zdev->dev);
}
static const struct of_device_id zynqmp_dma_of_match[] = {
@@ -1193,6 +1193,7 @@ static struct platform_driver zynqmp_dma_driver = {
},
.probe = zynqmp_dma_probe,
.remove = zynqmp_dma_remove,
+ .shutdown = zynqmp_dma_remove,
};
module_platform_driver(zynqmp_dma_driver);
diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
index 036f21cac0a9..64944f601ee5 100644
--- a/drivers/dpll/dpll_netlink.c
+++ b/drivers/dpll/dpll_netlink.c
@@ -165,6 +165,27 @@ dpll_msg_add_phase_offset_monitor(struct sk_buff *msg, struct dpll_device *dpll,
}
static int
+dpll_msg_add_phase_offset_avg_factor(struct sk_buff *msg,
+ struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor;
+ int ret;
+
+ if (ops->phase_offset_avg_factor_get) {
+ ret = ops->phase_offset_avg_factor_get(dpll, dpll_priv(dpll),
+ &factor, extack);
+ if (ret)
+ return ret;
+ if (nla_put_u32(msg, DPLL_A_PHASE_OFFSET_AVG_FACTOR, factor))
+ return -EMSGSIZE;
+ }
+
+ return 0;
+}
+
+static int
dpll_msg_add_lock_status(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
@@ -211,8 +232,8 @@ static int
dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
struct netlink_ext_ack *extack)
{
+ DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 };
const struct dpll_device_ops *ops = dpll_device_ops(dpll);
- DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 };
enum dpll_clock_quality_level ql;
int ret;
@@ -221,7 +242,7 @@ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
if (ret)
return ret;
- for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX)
+ for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1)
if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
return -EMSGSIZE;
@@ -616,6 +637,10 @@ dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
ret = dpll_msg_add_pin_freq(msg, pin, ref, extack);
if (ret)
return ret;
+ if (prop->phase_gran &&
+ nla_put_u32(msg, DPLL_A_PIN_PHASE_ADJUST_GRAN,
+ prop->phase_gran))
+ return -EMSGSIZE;
if (nla_put_s32(msg, DPLL_A_PIN_PHASE_ADJUST_MIN,
prop->phase_range.min))
return -EMSGSIZE;
@@ -677,6 +702,9 @@ dpll_device_get_one(struct dpll_device *dpll, struct sk_buff *msg,
ret = dpll_msg_add_phase_offset_monitor(msg, dpll, extack);
if (ret)
return ret;
+ ret = dpll_msg_add_phase_offset_avg_factor(msg, dpll, extack);
+ if (ret)
+ return ret;
return 0;
}
@@ -840,6 +868,23 @@ dpll_phase_offset_monitor_set(struct dpll_device *dpll, struct nlattr *a,
}
static int
+dpll_phase_offset_avg_factor_set(struct dpll_device *dpll, struct nlattr *a,
+ struct netlink_ext_ack *extack)
+{
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+ u32 factor = nla_get_u32(a);
+
+ if (!ops->phase_offset_avg_factor_set) {
+ NL_SET_ERR_MSG_ATTR(extack, a,
+ "device not capable of changing phase offset average factor");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->phase_offset_avg_factor_set(dpll, dpll_priv(dpll), factor,
+ extack);
+}
+
+static int
dpll_pin_freq_set(struct dpll_pin *pin, struct nlattr *a,
struct netlink_ext_ack *extack)
{
@@ -1220,7 +1265,13 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
if (phase_adj > pin->prop.phase_range.max ||
phase_adj < pin->prop.phase_range.min) {
NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
- "phase adjust value not supported");
+ "phase adjust value of out range");
+ return -EINVAL;
+ }
+ if (pin->prop.phase_gran && phase_adj % (s32)pin->prop.phase_gran) {
+ NL_SET_ERR_MSG_ATTR_FMT(extack, phase_adj_attr,
+ "phase adjust value not multiple of %u",
+ pin->prop.phase_gran);
return -EINVAL;
}
@@ -1518,16 +1569,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
return -EMSGSIZE;
}
pin = dpll_pin_find_from_nlattr(info);
- if (!IS_ERR(pin)) {
- if (!dpll_pin_available(pin)) {
- nlmsg_free(msg);
- return -ENODEV;
- }
- ret = dpll_msg_add_pin_handle(msg, pin);
- if (ret) {
- nlmsg_free(msg);
- return ret;
- }
+ if (IS_ERR(pin)) {
+ nlmsg_free(msg);
+ return PTR_ERR(pin);
+ }
+ if (!dpll_pin_available(pin)) {
+ nlmsg_free(msg);
+ return -ENODEV;
+ }
+ ret = dpll_msg_add_pin_handle(msg, pin);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
}
genlmsg_end(msg, hdr);
@@ -1694,12 +1747,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
}
dpll = dpll_device_find_from_nlattr(info);
- if (!IS_ERR(dpll)) {
- ret = dpll_msg_add_dev_handle(msg, dpll);
- if (ret) {
- nlmsg_free(msg);
- return ret;
- }
+ if (IS_ERR(dpll)) {
+ nlmsg_free(msg);
+ return PTR_ERR(dpll);
+ }
+ ret = dpll_msg_add_dev_handle(msg, dpll);
+ if (ret) {
+ nlmsg_free(msg);
+ return ret;
}
genlmsg_end(msg, hdr);
@@ -1736,14 +1791,25 @@ int dpll_nl_device_get_doit(struct sk_buff *skb, struct genl_info *info)
static int
dpll_set_from_nlattr(struct dpll_device *dpll, struct genl_info *info)
{
- int ret;
-
- if (info->attrs[DPLL_A_PHASE_OFFSET_MONITOR]) {
- struct nlattr *a = info->attrs[DPLL_A_PHASE_OFFSET_MONITOR];
+ struct nlattr *a;
+ int rem, ret;
- ret = dpll_phase_offset_monitor_set(dpll, a, info->extack);
- if (ret)
- return ret;
+ nla_for_each_attr(a, genlmsg_data(info->genlhdr),
+ genlmsg_len(info->genlhdr), rem) {
+ switch (nla_type(a)) {
+ case DPLL_A_PHASE_OFFSET_MONITOR:
+ ret = dpll_phase_offset_monitor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ case DPLL_A_PHASE_OFFSET_AVG_FACTOR:
+ ret = dpll_phase_offset_avg_factor_set(dpll, a,
+ info->extack);
+ if (ret)
+ return ret;
+ break;
+ }
}
return 0;
diff --git a/drivers/dpll/dpll_nl.c b/drivers/dpll/dpll_nl.c
index 9f2efaf25268..36d11ff195df 100644
--- a/drivers/dpll/dpll_nl.c
+++ b/drivers/dpll/dpll_nl.c
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/dpll.yaml */
/* YNL-GEN kernel source */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#include <net/netlink.h>
#include <net/genetlink.h>
@@ -42,9 +43,10 @@ static const struct nla_policy dpll_device_get_nl_policy[DPLL_A_ID + 1] = {
};
/* DPLL_CMD_DEVICE_SET - do */
-static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_MONITOR + 1] = {
+static const struct nla_policy dpll_device_set_nl_policy[DPLL_A_PHASE_OFFSET_AVG_FACTOR + 1] = {
[DPLL_A_ID] = { .type = NLA_U32, },
[DPLL_A_PHASE_OFFSET_MONITOR] = NLA_POLICY_MAX(NLA_U32, 1),
+ [DPLL_A_PHASE_OFFSET_AVG_FACTOR] = { .type = NLA_U32, },
};
/* DPLL_CMD_PIN_ID_GET - do */
@@ -112,7 +114,7 @@ static const struct genl_split_ops dpll_nl_ops[] = {
.doit = dpll_nl_device_set_doit,
.post_doit = dpll_post_doit,
.policy = dpll_device_set_nl_policy,
- .maxattr = DPLL_A_PHASE_OFFSET_MONITOR,
+ .maxattr = DPLL_A_PHASE_OFFSET_AVG_FACTOR,
.flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
},
{
diff --git a/drivers/dpll/dpll_nl.h b/drivers/dpll/dpll_nl.h
index 3da10cfe9a6e..7419679b6977 100644
--- a/drivers/dpll/dpll_nl.h
+++ b/drivers/dpll/dpll_nl.h
@@ -2,6 +2,7 @@
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/dpll.yaml */
/* YNL-GEN kernel header */
+/* To regenerate run: tools/net/ynl/ynl-regen.sh */
#ifndef _LINUX_DPLL_GEN_H
#define _LINUX_DPLL_GEN_H
diff --git a/drivers/dpll/zl3073x/Makefile b/drivers/dpll/zl3073x/Makefile
index c3e2f02f319d..bd324c7fe710 100644
--- a/drivers/dpll/zl3073x/Makefile
+++ b/drivers/dpll/zl3073x/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ZL3073X) += zl3073x.o
-zl3073x-objs := core.o devlink.o dpll.o prop.o
+zl3073x-objs := core.o devlink.o dpll.o flash.o fw.o \
+ out.o prop.o ref.o synth.o
obj-$(CONFIG_ZL3073X_I2C) += zl3073x_i2c.o
zl3073x_i2c-objs := i2c.o
diff --git a/drivers/dpll/zl3073x/core.c b/drivers/dpll/zl3073x/core.c
index 7ebcfc5ec1f0..383e2397dd03 100644
--- a/drivers/dpll/zl3073x/core.c
+++ b/drivers/dpll/zl3073x/core.c
@@ -95,9 +95,9 @@ EXPORT_SYMBOL_NS_GPL(zl30735_chip_info, "ZL3073X");
#define ZL_RANGE_OFFSET 0x80
#define ZL_PAGE_SIZE 0x80
-#define ZL_NUM_PAGES 15
+#define ZL_NUM_PAGES 256
#define ZL_PAGE_SEL 0x7F
-#define ZL_PAGE_SEL_MASK GENMASK(3, 0)
+#define ZL_PAGE_SEL_MASK GENMASK(7, 0)
#define ZL_NUM_REGS (ZL_NUM_PAGES * ZL_PAGE_SIZE)
/* Regmap range configuration */
@@ -129,54 +129,14 @@ const struct regmap_config zl3073x_regmap_config = {
};
EXPORT_SYMBOL_NS_GPL(zl3073x_regmap_config, "ZL3073X");
-/**
- * zl3073x_ref_freq_factorize - factorize given frequency
- * @freq: input frequency
- * @base: base frequency
- * @mult: multiplier
- *
- * Checks if the given frequency can be factorized using one of the
- * supported base frequencies. If so the base frequency and multiplier
- * are stored into appropriate parameters if they are not NULL.
- *
- * Return: 0 on success, -EINVAL if the frequency cannot be factorized
- */
-int
-zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult)
-{
- static const u16 base_freqs[] = {
- 1, 2, 4, 5, 8, 10, 16, 20, 25, 32, 40, 50, 64, 80, 100, 125,
- 128, 160, 200, 250, 256, 320, 400, 500, 625, 640, 800, 1000,
- 1250, 1280, 1600, 2000, 2500, 3125, 3200, 4000, 5000, 6250,
- 6400, 8000, 10000, 12500, 15625, 16000, 20000, 25000, 31250,
- 32000, 40000, 50000, 62500,
- };
- u32 div;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(base_freqs); i++) {
- div = freq / base_freqs[i];
-
- if (div <= U16_MAX && (freq % base_freqs[i]) == 0) {
- if (base)
- *base = base_freqs[i];
- if (mult)
- *mult = div;
-
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
static bool
zl3073x_check_reg(struct zl3073x_dev *zldev, unsigned int reg, size_t size)
{
/* Check that multiop lock is held when accessing registers
- * from page 10 and above.
+ * from page 10 and above except the page 255 that does not
+ * need this protection.
*/
- if (ZL_REG_PAGE(reg) >= 10)
+ if (ZL_REG_PAGE(reg) >= 10 && ZL_REG_PAGE(reg) < 255)
lockdep_assert_held(&zldev->multiop_lock);
/* Check the index is in valid range for indexed register */
@@ -447,185 +407,147 @@ int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
}
/**
- * zl3073x_ref_state_fetch - get input reference state
- * @zldev: pointer to zl3073x_dev structure
- * @index: input reference index to fetch state for
+ * zl3073x_do_hwreg_op - Perform HW register read/write operation
+ * @zldev: zl3073x device pointer
+ * @op: operation to perform
*
- * Function fetches information for the given input reference that are
- * invariant and stores them for later use.
+ * Performs requested operation and waits for its completion.
*
* Return: 0 on success, <0 on error
*/
static int
-zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index)
+zl3073x_do_hwreg_op(struct zl3073x_dev *zldev, u8 op)
{
- struct zl3073x_ref *input = &zldev->ref[index];
- u8 ref_config;
int rc;
- /* If the input is differential then the configuration for N-pin
- * reference is ignored and P-pin config is used for both.
- */
- if (zl3073x_is_n_pin(index) &&
- zl3073x_ref_is_diff(zldev, index - 1)) {
- input->enabled = zl3073x_ref_is_enabled(zldev, index - 1);
- input->diff = true;
-
- return 0;
- }
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(index));
- if (rc)
- return rc;
-
- /* Read ref_config register */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_CONFIG, &ref_config);
+ /* Set requested operation and set pending bit */
+ rc = zl3073x_write_u8(zldev, ZL_REG_HWREG_OP, op | ZL_HWREG_OP_PENDING);
if (rc)
return rc;
- input->enabled = FIELD_GET(ZL_REF_CONFIG_ENABLE, ref_config);
- input->diff = FIELD_GET(ZL_REF_CONFIG_DIFF_EN, ref_config);
-
- dev_dbg(zldev->dev, "REF%u is %s and configured as %s\n", index,
- str_enabled_disabled(input->enabled),
- input->diff ? "differential" : "single-ended");
-
- return rc;
+ /* Poll for completion - pending bit cleared */
+ return zl3073x_poll_zero_u8(zldev, ZL_REG_HWREG_OP,
+ ZL_HWREG_OP_PENDING);
}
/**
- * zl3073x_out_state_fetch - get output state
- * @zldev: pointer to zl3073x_dev structure
- * @index: output index to fetch state for
+ * zl3073x_read_hwreg - Read HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value of the HW register
*
- * Function fetches information for the given output (not output pin)
- * that are invariant and stores them for later use.
+ * Reads HW register value and stores it into @value.
*
* Return: 0 on success, <0 on error
*/
-static int
-zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index)
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value)
{
- struct zl3073x_out *out = &zldev->out[index];
- u8 output_ctrl, output_mode;
int rc;
- /* Read output configuration */
- rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_CTRL(index), &output_ctrl);
+ /* Set address to read data from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
if (rc)
return rc;
- /* Store info about output enablement and synthesizer the output
- * is connected to.
- */
- out->enabled = FIELD_GET(ZL_OUTPUT_CTRL_EN, output_ctrl);
- out->synth = FIELD_GET(ZL_OUTPUT_CTRL_SYNTH_SEL, output_ctrl);
-
- dev_dbg(zldev->dev, "OUT%u is %s and connected to SYNTH%u\n", index,
- str_enabled_disabled(out->enabled), out->synth);
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ /* Perform the read operation */
+ rc = zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_READ);
if (rc)
return rc;
- /* Read output_mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
- if (rc)
- return rc;
-
- /* Extract and store output signal format */
- out->signal_format = FIELD_GET(ZL_OUTPUT_MODE_SIGNAL_FORMAT,
- output_mode);
-
- dev_dbg(zldev->dev, "OUT%u has signal format 0x%02x\n", index,
- out->signal_format);
-
- return rc;
+ /* Read the received data */
+ return zl3073x_read_u32(zldev, ZL_REG_HWREG_READ_DATA, value);
}
/**
- * zl3073x_synth_state_fetch - get synth state
- * @zldev: pointer to zl3073x_dev structure
- * @index: synth index to fetch state for
+ * zl3073x_write_hwreg - Write value to HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW registers address
+ * @value: Value to be written to HW register
*
- * Function fetches information for the given synthesizer that are
- * invariant and stores them for later use.
+ * Stores the requested value into HW register.
*
* Return: 0 on success, <0 on error
*/
-static int
-zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 index)
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value)
{
- struct zl3073x_synth *synth = &zldev->synth[index];
- u16 base, m, n;
- u8 synth_ctrl;
- u32 mult;
int rc;
- /* Read synth control register */
- rc = zl3073x_read_u8(zldev, ZL_REG_SYNTH_CTRL(index), &synth_ctrl);
+ /* Set address to write data to */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_ADDR, addr);
if (rc)
return rc;
- /* Store info about synth enablement and DPLL channel the synth is
- * driven by.
- */
- synth->enabled = FIELD_GET(ZL_SYNTH_CTRL_EN, synth_ctrl);
- synth->dpll = FIELD_GET(ZL_SYNTH_CTRL_DPLL_SEL, synth_ctrl);
-
- dev_dbg(zldev->dev, "SYNTH%u is %s and driven by DPLL%u\n", index,
- str_enabled_disabled(synth->enabled), synth->dpll);
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read synth configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_SYNTH_MB_SEM, ZL_SYNTH_MB_SEM_RD,
- ZL_REG_SYNTH_MB_MASK, BIT(index));
+ /* Set data to be written */
+ rc = zl3073x_write_u32(zldev, ZL_REG_HWREG_WRITE_DATA, value);
if (rc)
return rc;
- /* The output frequency is determined by the following formula:
- * base * multiplier * numerator / denominator
- *
- * Read registers with these values
- */
- rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_BASE, &base);
- if (rc)
- return rc;
+ /* Perform the write operation */
+ return zl3073x_do_hwreg_op(zldev, ZL_HWREG_OP_WRITE);
+}
- rc = zl3073x_read_u32(zldev, ZL_REG_SYNTH_FREQ_MULT, &mult);
- if (rc)
- return rc;
+/**
+ * zl3073x_update_hwreg - Update certain bits in HW register
+ * @zldev: zl3073x device pointer
+ * @addr: HW register address
+ * @value: Value to be written into HW register
+ * @mask: Bitmask indicating bits to be updated
+ *
+ * Reads given HW register, updates requested bits specified by value and
+ * mask and writes result back to HW register.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask)
+{
+ u32 tmp;
+ int rc;
- rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_M, &m);
+ rc = zl3073x_read_hwreg(zldev, addr, &tmp);
if (rc)
return rc;
- rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_N, &n);
- if (rc)
- return rc;
+ tmp &= ~mask;
+ tmp |= value & mask;
- /* Check denominator for zero to avoid div by 0 */
- if (!n) {
- dev_err(zldev->dev,
- "Zero divisor for SYNTH%u retrieved from device\n",
- index);
- return -EINVAL;
- }
+ return zl3073x_write_hwreg(zldev, addr, tmp);
+}
- /* Compute and store synth frequency */
- zldev->synth[index].freq = div_u64(mul_u32_u32(base * m, mult), n);
+/**
+ * zl3073x_write_hwreg_seq - Write HW registers sequence
+ * @zldev: pointer to device structure
+ * @seq: pointer to first sequence item
+ * @num_items: number of items in sequence
+ *
+ * Writes given HW registers sequence.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_items; i++) {
+ dev_dbg(zldev->dev, "Write 0x%0x [0x%0x] to 0x%0x",
+ seq[i].value, seq[i].mask, seq[i].addr);
+
+ if (seq[i].mask == U32_MAX)
+ /* Write value directly */
+ rc = zl3073x_write_hwreg(zldev, seq[i].addr,
+ seq[i].value);
+ else
+ /* Update only bits specified by the mask */
+ rc = zl3073x_update_hwreg(zldev, seq[i].addr,
+ seq[i].value, seq[i].mask);
+ if (rc)
+ return rc;
- dev_dbg(zldev->dev, "SYNTH%u frequency: %u Hz\n", index,
- zldev->synth[index].freq);
+ if (seq->wait)
+ msleep(seq->wait);
+ }
return rc;
}
@@ -669,6 +591,21 @@ zl3073x_dev_state_fetch(struct zl3073x_dev *zldev)
return rc;
}
+static void
+zl3073x_dev_ref_status_update(struct zl3073x_dev *zldev)
+{
+ int i, rc;
+
+ for (i = 0; i < ZL3073X_NUM_REFS; i++) {
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(i),
+ &zldev->ref[i].mon_status);
+ if (rc)
+ dev_warn(zldev->dev,
+ "Failed to get REF%u status: %pe\n", i,
+ ERR_PTR(rc));
+ }
+}
+
/**
* zl3073x_ref_phase_offsets_update - update reference phase offsets
* @zldev: pointer to zl3073x_dev structure
@@ -788,6 +725,9 @@ zl3073x_dev_periodic_work(struct kthread_work *work)
struct zl3073x_dpll *zldpll;
int rc;
+ /* Update input references status */
+ zl3073x_dev_ref_status_update(zldev);
+
/* Update DPLL-to-connected-ref phase offsets registers */
rc = zl3073x_ref_phase_offsets_update(zldev, -1);
if (rc)
@@ -809,21 +749,190 @@ zl3073x_dev_periodic_work(struct kthread_work *work)
msecs_to_jiffies(500));
}
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor)
+{
+ u8 dpll_meas_ctrl, value;
+ int rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Convert requested factor to register value */
+ value = (factor + 1) & 0x0f;
+
+ /* Update phase measurement control register */
+ dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
+ dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, value);
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Save the new factor */
+ zldev->phase_avg_factor = factor;
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_phase_meas_setup - setup phase offset measurement
+ * @zldev: pointer to zl3073x_dev structure
+ *
+ * Enable phase offset measurement block, set measurement averaging factor
+ * and enable DPLL-to-its-ref phase measurement for all DPLLs.
+ *
+ * Returns: 0 on success, <0 on error
+ */
+static int
+zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+ u8 dpll_meas_ctrl, mask = 0;
+ int rc;
+
+ /* Setup phase measurement averaging factor */
+ rc = zl3073x_dev_phase_avg_factor_set(zldev, zldev->phase_avg_factor);
+ if (rc)
+ return rc;
+
+ /* Read DPLL phase measurement control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL measurement block */
+ dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
+
+ /* Update phase measurement control register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
+ if (rc)
+ return rc;
+
+ /* Enable DPLL-to-connected-ref measurement for each channel */
+ list_for_each_entry(zldpll, &zldev->dplls, list)
+ mask |= BIT(zldpll->id);
+
+ return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
+}
+
+/**
+ * zl3073x_dev_start - Start normal operation
+ * @zldev: zl3073x device pointer
+ * @full: perform full initialization
+ *
+ * The function starts normal operation, which means registering all DPLLs and
+ * their pins, and starting monitoring. If full initialization is requested,
+ * the function additionally initializes the phase offset measurement block and
+ * fetches hardware-invariant parameters.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full)
+{
+ struct zl3073x_dpll *zldpll;
+ u8 info;
+ int rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_INFO, &info);
+ if (rc) {
+ dev_err(zldev->dev, "Failed to read device status info\n");
+ return rc;
+ }
+
+ if (!FIELD_GET(ZL_INFO_READY, info)) {
+ /* The ready bit indicates that the firmware was successfully
+ * configured and is ready for normal operation. If it is
+ * cleared then the configuration stored in flash is wrong
+ * or missing. In this situation the driver will expose
+ * only devlink interface to give an opportunity to flash
+ * the correct config.
+ */
+ dev_info(zldev->dev,
+ "FW not fully ready - missing or corrupted config\n");
+
+ return 0;
+ }
+
+ if (full) {
+ /* Fetch device state */
+ rc = zl3073x_dev_state_fetch(zldev);
+ if (rc)
+ return rc;
+
+ /* Setup phase offset measurement block */
+ rc = zl3073x_dev_phase_meas_setup(zldev);
+ if (rc) {
+ dev_err(zldev->dev,
+ "Failed to setup phase measurement\n");
+ return rc;
+ }
+ }
+
+ /* Register all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ rc = zl3073x_dpll_register(zldpll);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to register DPLL%u\n",
+ zldpll->id);
+ return rc;
+ }
+ }
+
+ /* Perform initial firmware fine phase correction */
+ rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc,
+ "Failed to init fine phase correction\n");
+ return rc;
+ }
+
+ /* Start monitoring */
+ kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ return 0;
+}
+
+/**
+ * zl3073x_dev_stop - Stop normal operation
+ * @zldev: zl3073x device pointer
+ *
+ * The function stops the normal operation that mean deregistration of all
+ * DPLLs and their pins and stop monitoring.
+ *
+ * Return: 0 on success, <0 on error
+ */
+void zl3073x_dev_stop(struct zl3073x_dev *zldev)
+{
+ struct zl3073x_dpll *zldpll;
+
+ /* Stop monitoring */
+ kthread_cancel_delayed_work_sync(&zldev->work);
+
+ /* Unregister all DPLLs */
+ list_for_each_entry(zldpll, &zldev->dplls, list) {
+ if (zldpll->dpll_dev)
+ zl3073x_dpll_unregister(zldpll);
+ }
+}
+
static void zl3073x_dev_dpll_fini(void *ptr)
{
struct zl3073x_dpll *zldpll, *next;
struct zl3073x_dev *zldev = ptr;
- /* Stop monitoring thread */
+ /* Stop monitoring and unregister DPLLs */
+ zl3073x_dev_stop(zldev);
+
+ /* Destroy monitoring thread */
if (zldev->kworker) {
- kthread_cancel_delayed_work_sync(&zldev->work);
kthread_destroy_worker(zldev->kworker);
zldev->kworker = NULL;
}
- /* Release DPLLs */
+ /* Free all DPLLs */
list_for_each_entry_safe(zldpll, next, &zldev->dplls, list) {
- zl3073x_dpll_unregister(zldpll);
list_del(&zldpll->list);
zl3073x_dpll_free(zldpll);
}
@@ -839,7 +948,7 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
INIT_LIST_HEAD(&zldev->dplls);
- /* Initialize all DPLLs */
+ /* Allocate all DPLLs */
for (i = 0; i < num_dplls; i++) {
zldpll = zl3073x_dpll_alloc(zldev, i);
if (IS_ERR(zldpll)) {
@@ -849,25 +958,9 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
goto error;
}
- rc = zl3073x_dpll_register(zldpll);
- if (rc) {
- dev_err_probe(zldev->dev, rc,
- "Failed to register DPLL%u\n", i);
- zl3073x_dpll_free(zldpll);
- goto error;
- }
-
list_add_tail(&zldpll->list, &zldev->dplls);
}
- /* Perform initial firmware fine phase correction */
- rc = zl3073x_dpll_init_fine_phase_adjust(zldev);
- if (rc) {
- dev_err_probe(zldev->dev, rc,
- "Failed to init fine phase correction\n");
- goto error;
- }
-
/* Initialize monitoring thread */
kthread_init_delayed_work(&zldev->work, zl3073x_dev_periodic_work);
kworker = kthread_run_worker(0, "zl3073x-%s", dev_name(zldev->dev));
@@ -875,9 +968,14 @@ zl3073x_devm_dpll_init(struct zl3073x_dev *zldev, u8 num_dplls)
rc = PTR_ERR(kworker);
goto error;
}
-
zldev->kworker = kworker;
- kthread_queue_delayed_work(zldev->kworker, &zldev->work, 0);
+
+ /* Start normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc) {
+ dev_err_probe(zldev->dev, rc, "Failed to start device\n");
+ goto error;
+ }
/* Add devres action to release DPLL related resources */
rc = devm_add_action_or_reset(zldev->dev, zl3073x_dev_dpll_fini, zldev);
@@ -893,46 +991,6 @@ error:
}
/**
- * zl3073x_dev_phase_meas_setup - setup phase offset measurement
- * @zldev: pointer to zl3073x_dev structure
- * @num_channels: number of DPLL channels
- *
- * Enable phase offset measurement block, set measurement averaging factor
- * and enable DPLL-to-its-ref phase measurement for all DPLLs.
- *
- * Returns: 0 on success, <0 on error
- */
-static int
-zl3073x_dev_phase_meas_setup(struct zl3073x_dev *zldev, int num_channels)
-{
- u8 dpll_meas_ctrl, mask;
- int i, rc;
-
- /* Read DPLL phase measurement control register */
- rc = zl3073x_read_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, &dpll_meas_ctrl);
- if (rc)
- return rc;
-
- /* Setup phase measurement averaging factor */
- dpll_meas_ctrl &= ~ZL_DPLL_MEAS_CTRL_AVG_FACTOR;
- dpll_meas_ctrl |= FIELD_PREP(ZL_DPLL_MEAS_CTRL_AVG_FACTOR, 3);
-
- /* Enable DPLL measurement block */
- dpll_meas_ctrl |= ZL_DPLL_MEAS_CTRL_EN;
-
- /* Update phase measurement control register */
- rc = zl3073x_write_u8(zldev, ZL_REG_DPLL_MEAS_CTRL, dpll_meas_ctrl);
- if (rc)
- return rc;
-
- /* Enable DPLL-to-connected-ref measurement for each channel */
- for (i = 0, mask = 0; i < num_channels; i++)
- mask |= BIT(i);
-
- return zl3073x_write_u8(zldev, ZL_REG_DPLL_PHASE_ERR_READ_MASK, mask);
-}
-
-/**
* zl3073x_dev_probe - initialize zl3073x device
* @zldev: pointer to zl3073x device
* @chip_info: chip info based on compatible
@@ -991,6 +1049,9 @@ int zl3073x_dev_probe(struct zl3073x_dev *zldev,
*/
zldev->clock_id = get_random_u64();
+ /* Default phase offset averaging factor */
+ zldev->phase_avg_factor = 2;
+
/* Initialize mutex for operations where multiple reads, writes
* and/or polls are required to be done atomically.
*/
@@ -999,17 +1060,6 @@ int zl3073x_dev_probe(struct zl3073x_dev *zldev,
return dev_err_probe(zldev->dev, rc,
"Failed to initialize mutex\n");
- /* Fetch device state */
- rc = zl3073x_dev_state_fetch(zldev);
- if (rc)
- return rc;
-
- /* Setup phase offset measurement block */
- rc = zl3073x_dev_phase_meas_setup(zldev, chip_info->num_channels);
- if (rc)
- return dev_err_probe(zldev->dev, rc,
- "Failed to setup phase measurement\n");
-
/* Register DPLL channels */
rc = zl3073x_devm_dpll_init(zldev, chip_info->num_channels);
if (rc)
diff --git a/drivers/dpll/zl3073x/core.h b/drivers/dpll/zl3073x/core.h
index 71af2c800110..09bca2d0926d 100644
--- a/drivers/dpll/zl3073x/core.h
+++ b/drivers/dpll/zl3073x/core.h
@@ -3,12 +3,16 @@
#ifndef _ZL3073X_CORE_H
#define _ZL3073X_CORE_H
+#include <linux/bitfield.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/types.h>
+#include "out.h"
+#include "ref.h"
#include "regs.h"
+#include "synth.h"
struct device;
struct regmap;
@@ -27,59 +31,23 @@ struct zl3073x_dpll;
ZL3073X_NUM_OUTPUT_PINS)
/**
- * struct zl3073x_ref - input reference invariant info
- * @enabled: input reference is enabled or disabled
- * @diff: true if input reference is differential
- * @ffo: current fractional frequency offset
- */
-struct zl3073x_ref {
- bool enabled;
- bool diff;
- s64 ffo;
-};
-
-/**
- * struct zl3073x_out - output invariant info
- * @enabled: out is enabled or disabled
- * @synth: synthesizer the out is connected to
- * @signal_format: out signal format
- */
-struct zl3073x_out {
- bool enabled;
- u8 synth;
- u8 signal_format;
-};
-
-/**
- * struct zl3073x_synth - synthesizer invariant info
- * @freq: synthesizer frequency
- * @dpll: ID of DPLL the synthesizer is driven by
- * @enabled: synth is enabled or disabled
- */
-struct zl3073x_synth {
- u32 freq;
- u8 dpll;
- bool enabled;
-};
-
-/**
* struct zl3073x_dev - zl3073x device
* @dev: pointer to device
* @regmap: regmap to access device registers
* @multiop_lock: to serialize multiple register operations
- * @clock_id: clock id of the device
* @ref: array of input references' invariants
* @out: array of outs' invariants
* @synth: array of synths' invariants
* @dplls: list of DPLLs
* @kworker: thread for periodic work
* @work: periodic work
+ * @clock_id: clock id of the device
+ * @phase_avg_factor: phase offset measurement averaging factor
*/
struct zl3073x_dev {
struct device *dev;
struct regmap *regmap;
struct mutex multiop_lock;
- u64 clock_id;
/* Invariants */
struct zl3073x_ref ref[ZL3073X_NUM_REFS];
@@ -92,6 +60,10 @@ struct zl3073x_dev {
/* Monitor */
struct kthread_worker *kworker;
struct kthread_delayed_work work;
+
+ /* Devlink parameters */
+ u64 clock_id;
+ u8 phase_avg_factor;
};
struct zl3073x_chip_info {
@@ -111,10 +83,42 @@ struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
int zl3073x_dev_probe(struct zl3073x_dev *zldev,
const struct zl3073x_chip_info *chip_info);
+int zl3073x_dev_start(struct zl3073x_dev *zldev, bool full);
+void zl3073x_dev_stop(struct zl3073x_dev *zldev);
+
+static inline u8 zl3073x_dev_phase_avg_factor_get(struct zl3073x_dev *zldev)
+{
+ return zldev->phase_avg_factor;
+}
+
+int zl3073x_dev_phase_avg_factor_set(struct zl3073x_dev *zldev, u8 factor);
+
/**********************
* Registers operations
**********************/
+/**
+ * struct zl3073x_hwreg_seq_item - HW register write sequence item
+ * @addr: HW register to be written
+ * @value: value to be written to HW register
+ * @mask: bitmask indicating bits to be updated
+ * @wait: number of ms to wait after register write
+ */
+struct zl3073x_hwreg_seq_item {
+ u32 addr;
+ u32 value;
+ u32 mask;
+ u32 wait;
+};
+
+#define HWREG_SEQ_ITEM(_addr, _value, _mask, _wait) \
+{ \
+ .addr = _addr, \
+ .value = FIELD_PREP_CONST(_mask, _value), \
+ .mask = _mask, \
+ .wait = _wait, \
+}
+
int zl3073x_mb_op(struct zl3073x_dev *zldev, unsigned int op_reg, u8 op_val,
unsigned int mask_reg, u16 mask_val);
int zl3073x_poll_zero_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 mask);
@@ -126,12 +130,18 @@ int zl3073x_write_u8(struct zl3073x_dev *zldev, unsigned int reg, u8 val);
int zl3073x_write_u16(struct zl3073x_dev *zldev, unsigned int reg, u16 val);
int zl3073x_write_u32(struct zl3073x_dev *zldev, unsigned int reg, u32 val);
int zl3073x_write_u48(struct zl3073x_dev *zldev, unsigned int reg, u64 val);
+int zl3073x_read_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 *value);
+int zl3073x_write_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value);
+int zl3073x_update_hwreg(struct zl3073x_dev *zldev, u32 addr, u32 value,
+ u32 mask);
+int zl3073x_write_hwreg_seq(struct zl3073x_dev *zldev,
+ const struct zl3073x_hwreg_seq_item *seq,
+ size_t num_items);
/*****************
* Misc operations
*****************/
-int zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult);
int zl3073x_ref_phase_offsets_update(struct zl3073x_dev *zldev, int channel);
static inline bool
@@ -173,172 +183,141 @@ zl3073x_output_pin_out_get(u8 id)
}
/**
- * zl3073x_ref_ffo_get - get current fractional frequency offset
+ * zl3073x_dev_ref_freq_get - get input reference frequency
* @zldev: pointer to zl3073x device
* @index: input reference index
*
- * Return: the latest measured fractional frequency offset
+ * Return: frequency of given input reference
*/
-static inline s64
-zl3073x_ref_ffo_get(struct zl3073x_dev *zldev, u8 index)
+static inline u32
+zl3073x_dev_ref_freq_get(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->ref[index].ffo;
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
+
+ return zl3073x_ref_freq_get(ref);
}
/**
- * zl3073x_ref_is_diff - check if the given input reference is differential
+ * zl3073x_dev_ref_is_diff - check if the given input reference is differential
* @zldev: pointer to zl3073x device
* @index: input reference index
*
* Return: true if reference is differential, false if reference is single-ended
*/
static inline bool
-zl3073x_ref_is_diff(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_ref_is_diff(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->ref[index].diff;
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
+
+ return zl3073x_ref_is_diff(ref);
}
-/**
- * zl3073x_ref_is_enabled - check if the given input reference is enabled
+/*
+ * zl3073x_dev_ref_is_status_ok - check the given input reference status
* @zldev: pointer to zl3073x device
* @index: input reference index
*
- * Return: true if input refernce is enabled, false otherwise
+ * Return: true if the status is ok, false otherwise
*/
static inline bool
-zl3073x_ref_is_enabled(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_ref_is_status_ok(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->ref[index].enabled;
-}
+ const struct zl3073x_ref *ref = zl3073x_ref_state_get(zldev, index);
-/**
- * zl3073x_synth_dpll_get - get DPLL ID the synth is driven by
- * @zldev: pointer to zl3073x device
- * @index: synth index
- *
- * Return: ID of DPLL the given synthetizer is driven by
- */
-static inline u8
-zl3073x_synth_dpll_get(struct zl3073x_dev *zldev, u8 index)
-{
- return zldev->synth[index].dpll;
+ return zl3073x_ref_is_status_ok(ref);
}
/**
- * zl3073x_synth_freq_get - get synth current freq
+ * zl3073x_dev_synth_freq_get - get synth current freq
* @zldev: pointer to zl3073x device
* @index: synth index
*
* Return: frequency of given synthetizer
*/
static inline u32
-zl3073x_synth_freq_get(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_synth_freq_get(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->synth[index].freq;
-}
+ const struct zl3073x_synth *synth;
-/**
- * zl3073x_synth_is_enabled - check if the given synth is enabled
- * @zldev: pointer to zl3073x device
- * @index: synth index
- *
- * Return: true if synth is enabled, false otherwise
- */
-static inline bool
-zl3073x_synth_is_enabled(struct zl3073x_dev *zldev, u8 index)
-{
- return zldev->synth[index].enabled;
+ synth = zl3073x_synth_state_get(zldev, index);
+ return zl3073x_synth_freq_get(synth);
}
/**
- * zl3073x_out_synth_get - get synth connected to given output
+ * zl3073x_dev_out_synth_get - get synth connected to given output
* @zldev: pointer to zl3073x device
* @index: output index
*
* Return: index of synth connected to given output.
*/
static inline u8
-zl3073x_out_synth_get(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_out_synth_get(struct zl3073x_dev *zldev, u8 index)
{
- return zldev->out[index].synth;
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+
+ return zl3073x_out_synth_get(out);
}
/**
- * zl3073x_out_is_enabled - check if the given output is enabled
+ * zl3073x_dev_out_is_enabled - check if the given output is enabled
* @zldev: pointer to zl3073x device
* @index: output index
*
* Return: true if the output is enabled, false otherwise
*/
static inline bool
-zl3073x_out_is_enabled(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_out_is_enabled(struct zl3073x_dev *zldev, u8 index)
{
- u8 synth;
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+ const struct zl3073x_synth *synth;
+ u8 synth_id;
/* Output is enabled only if associated synth is enabled */
- synth = zl3073x_out_synth_get(zldev, index);
- if (zl3073x_synth_is_enabled(zldev, synth))
- return zldev->out[index].enabled;
+ synth_id = zl3073x_out_synth_get(out);
+ synth = zl3073x_synth_state_get(zldev, synth_id);
- return false;
+ return zl3073x_synth_is_enabled(synth) && zl3073x_out_is_enabled(out);
}
/**
- * zl3073x_out_signal_format_get - get output signal format
- * @zldev: pointer to zl3073x device
- * @index: output index
- *
- * Return: signal format of given output
- */
-static inline u8
-zl3073x_out_signal_format_get(struct zl3073x_dev *zldev, u8 index)
-{
- return zldev->out[index].signal_format;
-}
-
-/**
- * zl3073x_out_dpll_get - get DPLL ID the output is driven by
+ * zl3073x_dev_out_dpll_get - get DPLL ID the output is driven by
* @zldev: pointer to zl3073x device
* @index: output index
*
* Return: ID of DPLL the given output is driven by
*/
static inline
-u8 zl3073x_out_dpll_get(struct zl3073x_dev *zldev, u8 index)
+u8 zl3073x_dev_out_dpll_get(struct zl3073x_dev *zldev, u8 index)
{
- u8 synth;
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
+ const struct zl3073x_synth *synth;
+ u8 synth_id;
/* Get synthesizer connected to given output */
- synth = zl3073x_out_synth_get(zldev, index);
+ synth_id = zl3073x_out_synth_get(out);
+ synth = zl3073x_synth_state_get(zldev, synth_id);
/* Return DPLL that drives the synth */
- return zl3073x_synth_dpll_get(zldev, synth);
+ return zl3073x_synth_dpll_get(synth);
}
/**
- * zl3073x_out_is_diff - check if the given output is differential
+ * zl3073x_dev_out_is_diff - check if the given output is differential
* @zldev: pointer to zl3073x device
* @index: output index
*
* Return: true if output is differential, false if output is single-ended
*/
static inline bool
-zl3073x_out_is_diff(struct zl3073x_dev *zldev, u8 index)
+zl3073x_dev_out_is_diff(struct zl3073x_dev *zldev, u8 index)
{
- switch (zl3073x_out_signal_format_get(zldev, index)) {
- case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS:
- case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF:
- case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM:
- return true;
- default:
- break;
- }
+ const struct zl3073x_out *out = zl3073x_out_state_get(zldev, index);
- return false;
+ return zl3073x_out_is_diff(out);
}
/**
- * zl3073x_output_pin_is_enabled - check if the given output pin is enabled
+ * zl3073x_dev_output_pin_is_enabled - check if the given output pin is enabled
* @zldev: pointer to zl3073x device
* @id: output pin id
*
@@ -348,16 +327,21 @@ zl3073x_out_is_diff(struct zl3073x_dev *zldev, u8 index)
* Return: true if output pin is enabled, false if output pin is disabled
*/
static inline bool
-zl3073x_output_pin_is_enabled(struct zl3073x_dev *zldev, u8 id)
+zl3073x_dev_output_pin_is_enabled(struct zl3073x_dev *zldev, u8 id)
{
- u8 output = zl3073x_output_pin_out_get(id);
+ u8 out_id = zl3073x_output_pin_out_get(id);
+ const struct zl3073x_out *out;
+
+ out = zl3073x_out_state_get(zldev, out_id);
- /* Check if the whole output is enabled */
- if (!zl3073x_out_is_enabled(zldev, output))
+ /* Check if the output is enabled - call _dev_ helper that
+ * additionally checks for attached synth enablement.
+ */
+ if (!zl3073x_dev_out_is_enabled(zldev, out_id))
return false;
/* Check signal format */
- switch (zl3073x_out_signal_format_get(zldev, output)) {
+ switch (zl3073x_out_signal_format_get(out)) {
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DISABLED:
/* Both output pins are disabled by signal format */
return false;
diff --git a/drivers/dpll/zl3073x/devlink.c b/drivers/dpll/zl3073x/devlink.c
index 7e7fe726ee37..ccc22332b346 100644
--- a/drivers/dpll/zl3073x/devlink.c
+++ b/drivers/dpll/zl3073x/devlink.c
@@ -9,6 +9,8 @@
#include "core.h"
#include "devlink.h"
#include "dpll.h"
+#include "flash.h"
+#include "fw.h"
#include "regs.h"
/**
@@ -86,14 +88,12 @@ zl3073x_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct zl3073x_dev *zldev = devlink_priv(devlink);
- struct zl3073x_dpll *zldpll;
if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
return -EOPNOTSUPP;
- /* Unregister all DPLLs */
- list_for_each_entry(zldpll, &zldev->dplls, list)
- zl3073x_dpll_unregister(zldpll);
+ /* Stop normal operation */
+ zl3073x_dev_stop(zldev);
return 0;
}
@@ -107,7 +107,6 @@ zl3073x_devlink_reload_up(struct devlink *devlink,
{
struct zl3073x_dev *zldev = devlink_priv(devlink);
union devlink_param_value val;
- struct zl3073x_dpll *zldpll;
int rc;
if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
@@ -125,24 +124,156 @@ zl3073x_devlink_reload_up(struct devlink *devlink,
zldev->clock_id = val.vu64;
}
- /* Re-register all DPLLs */
- list_for_each_entry(zldpll, &zldev->dplls, list) {
- rc = zl3073x_dpll_register(zldpll);
- if (rc)
- dev_warn(zldev->dev,
- "Failed to re-register DPLL%u\n", zldpll->id);
- }
+ /* Restart normal operation */
+ rc = zl3073x_dev_start(zldev, false);
+ if (rc)
+ dev_warn(zldev->dev, "Failed to re-start normal operation\n");
*actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
return 0;
}
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total)
+{
+ struct devlink *devlink = priv_to_devlink(zldev);
+
+ devlink_flash_update_status_notify(devlink, msg, component, done,
+ total);
+}
+
+/**
+ * zl3073x_devlink_flash_prepare - Prepare and enter flash mode
+ * @zldev: zl3073x device pointer
+ * @zlfw: pointer to loaded firmware
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function stops normal operation and switches the device to flash mode.
+ * If an error occurs the normal operation is resumed.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_prepare(struct zl3073x_dev *zldev,
+ struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *util;
+ int rc;
+
+ util = zlfw->component[ZL_FW_COMPONENT_UTIL];
+ if (!util) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Utility is missing in firmware",
+ NULL, 0, 0);
+ return -ENOEXEC;
+ }
+
+ /* Stop normal operation prior entering flash mode */
+ zl3073x_dev_stop(zldev);
+
+ rc = zl3073x_flash_mode_enter(zldev, util->data, util->size, extack);
+ if (rc) {
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to enter flash mode",
+ NULL, 0, 0);
+
+ /* Resume normal operation */
+ zl3073x_dev_start(zldev, true);
+
+ return rc;
+ }
+
+ return 0;
+}
+
+/**
+ * zl3073x_devlink_flash_finish - Leave flash mode and resume normal operation
+ * @zldev: zl3073x device pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function switches the device back to standard mode and resumes normal
+ * operation.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_finish(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Reset device CPU to normal mode */
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ /* Resume normal operation */
+ rc = zl3073x_dev_start(zldev, true);
+ if (rc)
+ zl3073x_devlink_flash_notify(zldev,
+ "Failed to start normal operation",
+ NULL, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_devlink_flash_update - Devlink flash update callback
+ * @devlink: devlink structure pointer
+ * @params: flashing parameters pointer
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dev *zldev = devlink_priv(devlink);
+ struct zl3073x_fw *zlfw;
+ int rc = 0;
+
+ zlfw = zl3073x_fw_load(zldev, params->fw->data, params->fw->size,
+ extack);
+ if (IS_ERR(zlfw)) {
+ zl3073x_devlink_flash_notify(zldev, "Failed to load firmware",
+ NULL, 0, 0);
+ rc = PTR_ERR(zlfw);
+ goto finish;
+ }
+
+ /* Stop normal operation and enter flash mode */
+ rc = zl3073x_devlink_flash_prepare(zldev, zlfw, extack);
+ if (rc)
+ goto finish;
+
+ rc = zl3073x_fw_flash(zldev, zlfw, extack);
+ if (rc) {
+ zl3073x_devlink_flash_finish(zldev, extack);
+ goto finish;
+ }
+
+ /* Resume normal mode */
+ rc = zl3073x_devlink_flash_finish(zldev, extack);
+
+finish:
+ if (!IS_ERR(zlfw))
+ zl3073x_fw_free(zlfw);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ NULL, 0, 0);
+
+ return rc;
+}
+
static const struct devlink_ops zl3073x_devlink_ops = {
.info_get = zl3073x_devlink_info_get,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = zl3073x_devlink_reload_down,
.reload_up = zl3073x_devlink_reload_up,
+ .flash_update = zl3073x_devlink_flash_update,
};
static void
diff --git a/drivers/dpll/zl3073x/devlink.h b/drivers/dpll/zl3073x/devlink.h
index 037720db204f..63dfd6fa1cd6 100644
--- a/drivers/dpll/zl3073x/devlink.h
+++ b/drivers/dpll/zl3073x/devlink.h
@@ -9,4 +9,7 @@ struct zl3073x_dev *zl3073x_devm_alloc(struct device *dev);
int zl3073x_devlink_register(struct zl3073x_dev *zldev);
+void zl3073x_devlink_flash_notify(struct zl3073x_dev *zldev, const char *msg,
+ const char *component, u32 done, u32 total);
+
#endif /* _ZL3073X_DEVLINK_H */
diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c
index 3e42e9e7fd27..9879d85d29af 100644
--- a/drivers/dpll/zl3073x/dpll.c
+++ b/drivers/dpll/zl3073x/dpll.c
@@ -35,6 +35,7 @@
* @prio: pin priority <0, 14>
* @selectable: pin is selectable in automatic mode
* @esync_control: embedded sync is controllable
+ * @phase_gran: phase adjustment granularity
* @pin_state: last saved pin state
* @phase_offset: last saved pin phase offset
* @freq_offset: last saved fractional frequency offset
@@ -49,6 +50,7 @@ struct zl3073x_dpll_pin {
u8 prio;
bool selectable;
bool esync_control;
+ s32 phase_gran;
enum dpll_pin_state pin_state;
s64 phase_offset;
s64 freq_offset;
@@ -98,60 +100,6 @@ zl3073x_dpll_pin_direction_get(const struct dpll_pin *dpll_pin, void *pin_priv,
return 0;
}
-/**
- * zl3073x_dpll_input_ref_frequency_get - get input reference frequency
- * @zldpll: pointer to zl3073x_dpll
- * @ref_id: reference id
- * @frequency: pointer to variable to store frequency
- *
- * Reads frequency of given input reference.
- *
- * Return: 0 on success, <0 on error
- */
-static int
-zl3073x_dpll_input_ref_frequency_get(struct zl3073x_dpll *zldpll, u8 ref_id,
- u32 *frequency)
-{
- struct zl3073x_dev *zldev = zldpll->dev;
- u16 base, mult, num, denom;
- int rc;
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref_id));
- if (rc)
- return rc;
-
- /* Read registers to compute resulting frequency */
- rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_BASE, &base);
- if (rc)
- return rc;
- rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_MULT, &mult);
- if (rc)
- return rc;
- rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_M, &num);
- if (rc)
- return rc;
- rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_N, &denom);
- if (rc)
- return rc;
-
- /* Sanity check that HW has not returned zero denominator */
- if (!denom) {
- dev_err(zldev->dev,
- "Zero divisor for ref %u frequency got from device\n",
- ref_id);
- return -EINVAL;
- }
-
- /* Compute the frequency */
- *frequency = mul_u64_u32_div(base * mult, num, denom);
-
- return rc;
-}
-
static int
zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
void *pin_priv,
@@ -163,39 +111,15 @@ zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 ref, ref_sync_ctrl, sync_mode;
- u32 esync_div, ref_freq;
- int rc;
-
- /* Get reference frequency */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_dpll_input_ref_frequency_get(zldpll, pin->id, &ref_freq);
- if (rc)
- return rc;
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration into mailbox */
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
- if (rc)
- return rc;
+ const struct zl3073x_ref *ref;
+ u8 ref_id;
- /* Get ref sync mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref_sync_ctrl);
- if (rc)
- return rc;
-
- /* Get esync divisor */
- rc = zl3073x_read_u32(zldev, ZL_REG_REF_ESYNC_DIV, &esync_div);
- if (rc)
- return rc;
-
- sync_mode = FIELD_GET(ZL_REF_SYNC_CTRL_MODE, ref_sync_ctrl);
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
- switch (sync_mode) {
+ switch (FIELD_GET(ZL_REF_SYNC_CTRL_MODE, ref->sync_ctrl)) {
case ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75:
- esync->freq = (esync_div == ZL_REF_ESYNC_DIV_1HZ) ? 1 : 0;
+ esync->freq = ref->esync_n_div == ZL_REF_ESYNC_DIV_1HZ ? 1 : 0;
esync->pulse = 25;
break;
default:
@@ -207,7 +131,7 @@ zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
/* If the pin supports esync control expose its range but only
* if the current reference frequency is > 1 Hz.
*/
- if (pin->esync_control && ref_freq > 1) {
+ if (pin->esync_control && zl3073x_ref_freq_get(ref) > 1) {
esync->range = esync_freq_ranges;
esync->range_num = ARRAY_SIZE(esync_freq_ranges);
} else {
@@ -215,7 +139,7 @@ zl3073x_dpll_input_pin_esync_get(const struct dpll_pin *dpll_pin,
esync->range_num = 0;
}
- return rc;
+ return 0;
}
static int
@@ -228,22 +152,11 @@ zl3073x_dpll_input_pin_esync_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 ref, ref_sync_ctrl, sync_mode;
- int rc;
-
- guard(mutex)(&zldev->multiop_lock);
+ struct zl3073x_ref ref;
+ u8 ref_id, sync_mode;
- /* Read reference configuration into mailbox */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
- if (rc)
- return rc;
-
- /* Get ref sync mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref_sync_ctrl);
- if (rc)
- return rc;
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
/* Use freq == 0 to disable esync */
if (!freq)
@@ -251,25 +164,16 @@ zl3073x_dpll_input_pin_esync_set(const struct dpll_pin *dpll_pin,
else
sync_mode = ZL_REF_SYNC_CTRL_MODE_50_50_ESYNC_25_75;
- ref_sync_ctrl &= ~ZL_REF_SYNC_CTRL_MODE;
- ref_sync_ctrl |= FIELD_PREP(ZL_REF_SYNC_CTRL_MODE, sync_mode);
-
- /* Update ref sync control register */
- rc = zl3073x_write_u8(zldev, ZL_REG_REF_SYNC_CTRL, ref_sync_ctrl);
- if (rc)
- return rc;
+ ref.sync_ctrl &= ~ZL_REF_SYNC_CTRL_MODE;
+ ref.sync_ctrl |= FIELD_PREP(ZL_REF_SYNC_CTRL_MODE, sync_mode);
if (freq) {
- /* 1 Hz is only supported frequnecy currently */
- rc = zl3073x_write_u32(zldev, ZL_REG_REF_ESYNC_DIV,
- ZL_REF_ESYNC_DIV_1HZ);
- if (rc)
- return rc;
+ /* 1 Hz is only supported frequency now */
+ ref.esync_n_div = ZL_REF_ESYNC_DIV_1HZ;
}
- /* Commit reference configuration */
- return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
- ZL_REG_REF_MB_MASK, BIT(ref));
+ /* Update reference configuration */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
}
static int
@@ -293,17 +197,12 @@ zl3073x_dpll_input_pin_frequency_get(const struct dpll_pin *dpll_pin,
{
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dpll_pin *pin = pin_priv;
- u32 ref_freq;
- u8 ref;
- int rc;
+ u8 ref_id;
- /* Read and return ref frequency */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_dpll_input_ref_frequency_get(zldpll, ref, &ref_freq);
- if (!rc)
- *frequency = ref_freq;
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ *frequency = zl3073x_dev_ref_freq_get(zldpll->dev, ref_id);
- return rc;
+ return 0;
}
static int
@@ -316,39 +215,18 @@ zl3073x_dpll_input_pin_frequency_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u16 base, mult;
- u8 ref;
- int rc;
+ struct zl3073x_ref ref;
+ u8 ref_id;
- /* Get base frequency and multiplier for the requested frequency */
- rc = zl3073x_ref_freq_factorize(frequency, &base, &mult);
- if (rc)
- return rc;
+ /* Get reference state */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
- guard(mutex)(&zldev->multiop_lock);
+ /* Update frequency */
+ zl3073x_ref_freq_set(&ref, frequency);
- /* Load reference configuration */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
-
- /* Update base frequency, multiplier, numerator & denominator */
- rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_BASE, base);
- if (rc)
- return rc;
- rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_MULT, mult);
- if (rc)
- return rc;
- rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_M, 1);
- if (rc)
- return rc;
- rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_N, 1);
- if (rc)
- return rc;
-
- /* Commit reference configuration */
- return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
- ZL_REG_REF_MB_MASK, BIT(ref));
+ /* Commit reference state */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
}
/**
@@ -495,19 +373,10 @@ zl3073x_dpll_connected_ref_get(struct zl3073x_dpll *zldpll, u8 *ref)
if (rc)
return rc;
- if (ZL3073X_DPLL_REF_IS_VALID(*ref)) {
- u8 ref_status;
-
- /* Read the reference monitor status */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(*ref),
- &ref_status);
- if (rc)
- return rc;
-
- /* If the monitor indicates an error nothing is connected */
- if (ref_status != ZL_REF_MON_STATUS_OK)
- *ref = ZL3073X_DPLL_REF_NONE;
- }
+ /* If the monitor indicates an error nothing is connected */
+ if (ZL3073X_DPLL_REF_IS_VALID(*ref) &&
+ !zl3073x_dev_ref_is_status_ok(zldev, *ref))
+ *ref = ZL3073X_DPLL_REF_NONE;
return 0;
}
@@ -522,34 +391,25 @@ zl3073x_dpll_input_pin_phase_offset_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 conn_ref, ref, ref_status;
+ const struct zl3073x_ref *ref;
+ u8 conn_id, ref_id;
s64 ref_phase;
int rc;
/* Get currently connected reference */
- rc = zl3073x_dpll_connected_ref_get(zldpll, &conn_ref);
+ rc = zl3073x_dpll_connected_ref_get(zldpll, &conn_id);
if (rc)
return rc;
/* Report phase offset only for currently connected pin if the phase
- * monitor feature is disabled.
+ * monitor feature is disabled and only if the input pin signal is
+ * present.
*/
- ref = zl3073x_input_pin_ref_get(pin->id);
- if (!zldpll->phase_monitor && ref != conn_ref) {
- *phase_offset = 0;
-
- return 0;
- }
-
- /* Get this pin monitor status */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref), &ref_status);
- if (rc)
- return rc;
-
- /* Report phase offset only if the input pin signal is present */
- if (ref_status != ZL_REF_MON_STATUS_OK) {
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+ if ((!zldpll->phase_monitor && ref_id != conn_id) ||
+ !zl3073x_ref_is_status_ok(ref)) {
*phase_offset = 0;
-
return 0;
}
@@ -559,20 +419,12 @@ zl3073x_dpll_input_pin_phase_offset_get(const struct dpll_pin *dpll_pin,
* the phase offset is modded to the period of the signal
* the dpll is locked to.
*/
- if (ZL3073X_DPLL_REF_IS_VALID(conn_ref) && conn_ref != ref) {
+ if (ZL3073X_DPLL_REF_IS_VALID(conn_id) && conn_id != ref_id) {
u32 conn_freq, ref_freq;
- /* Get frequency of connected ref */
- rc = zl3073x_dpll_input_ref_frequency_get(zldpll, conn_ref,
- &conn_freq);
- if (rc)
- return rc;
-
- /* Get frequency of given ref */
- rc = zl3073x_dpll_input_ref_frequency_get(zldpll, ref,
- &ref_freq);
- if (rc)
- return rc;
+ /* Get frequency of connected and given ref */
+ conn_freq = zl3073x_dev_ref_freq_get(zldev, conn_id);
+ ref_freq = zl3073x_ref_freq_get(ref);
if (conn_freq > ref_freq) {
s64 conn_period, div_factor;
@@ -599,33 +451,23 @@ zl3073x_dpll_input_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
+ const struct zl3073x_ref *ref;
s64 phase_comp;
- u8 ref;
- int rc;
-
- guard(mutex)(&zldev->multiop_lock);
+ u8 ref_id;
/* Read reference configuration */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
- if (rc)
- return rc;
-
- /* Read current phase offset compensation */
- rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, &phase_comp);
- if (rc)
- return rc;
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
/* Perform sign extension for 48bit signed value */
- phase_comp = sign_extend64(phase_comp, 47);
+ phase_comp = sign_extend64(ref->phase_comp, 47);
/* Reverse two's complement negation applied during set and convert
* to 32bit signed int
*/
*phase_adjust = (s32)-phase_comp;
- return rc;
+ return 0;
}
static int
@@ -639,32 +481,20 @@ zl3073x_dpll_input_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- s64 phase_comp;
- u8 ref;
- int rc;
+ struct zl3073x_ref ref;
+ u8 ref_id;
+
+ /* Read reference configuration */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = *zl3073x_ref_state_get(zldev, ref_id);
/* The value in the register is stored as two's complement negation
* of requested value.
*/
- phase_comp = -phase_adjust;
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read reference configuration */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
- ZL_REG_REF_MB_MASK, BIT(ref));
- if (rc)
- return rc;
-
- /* Write the requested value into the compensation register */
- rc = zl3073x_write_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP, phase_comp);
- if (rc)
- return rc;
+ ref.phase_comp = -phase_adjust;
- /* Commit reference configuration */
- return zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
- ZL_REG_REF_MB_MASK, BIT(ref));
+ /* Update reference configuration */
+ return zl3073x_ref_state_set(zldev, ref_id, &ref);
}
/**
@@ -775,7 +605,7 @@ zl3073x_dpll_ref_state_get(struct zl3073x_dpll_pin *pin,
{
struct zl3073x_dpll *zldpll = pin->dpll;
struct zl3073x_dev *zldev = zldpll->dev;
- u8 ref, ref_conn, status;
+ u8 ref, ref_conn;
int rc;
ref = zl3073x_input_pin_ref_get(pin->id);
@@ -795,20 +625,9 @@ zl3073x_dpll_ref_state_get(struct zl3073x_dpll_pin *pin,
* pin as selectable.
*/
if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_AUTO &&
- pin->selectable) {
- /* Read reference monitor status */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref),
- &status);
- if (rc)
- return rc;
-
- /* If the monitor indicates errors report the reference
- * as disconnected
- */
- if (status == ZL_REF_MON_STATUS_OK) {
- *state = DPLL_PIN_STATE_SELECTABLE;
- return 0;
- }
+ zl3073x_dev_ref_is_status_ok(zldev, ref) && pin->selectable) {
+ *state = DPLL_PIN_STATE_SELECTABLE;
+ return 0;
}
/* Otherwise report the pin as disconnected */
@@ -951,21 +770,19 @@ zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- struct device *dev = zldev->dev;
- u32 esync_period, esync_width;
- u8 clock_type, synth;
- u8 out, output_mode;
- u32 output_div;
+ const struct zl3073x_synth *synth;
+ const struct zl3073x_out *out;
+ u8 clock_type, out_id;
u32 synth_freq;
- int rc;
- out = zl3073x_output_pin_out_get(pin->id);
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
/* If N-division is enabled, esync is not supported. The register used
* for N-division is also used for the esync divider so both cannot
* be used.
*/
- switch (zl3073x_out_signal_format_get(zldev, out)) {
+ switch (zl3073x_out_signal_format_get(out)) {
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
return -EOPNOTSUPP;
@@ -973,38 +790,11 @@ zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
break;
}
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration into mailbox */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(out));
+ synth_freq = zl3073x_synth_freq_get(synth);
- /* Read output mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
- if (rc)
- return rc;
-
- /* Read output divisor */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
- if (rc)
- return rc;
-
- /* Check output divisor for zero */
- if (!output_div) {
- dev_err(dev, "Zero divisor for OUTPUT%u got from device\n",
- out);
- return -EINVAL;
- }
-
- /* Get synth attached to output pin */
- synth = zl3073x_out_synth_get(zldev, out);
-
- /* Get synth frequency */
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
-
- clock_type = FIELD_GET(ZL_OUTPUT_MODE_CLOCK_TYPE, output_mode);
+ clock_type = FIELD_GET(ZL_OUTPUT_MODE_CLOCK_TYPE, out->mode);
if (clock_type != ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC) {
/* No need to read esync data if it is not enabled */
esync->freq = 0;
@@ -1013,38 +803,21 @@ zl3073x_dpll_output_pin_esync_get(const struct dpll_pin *dpll_pin,
goto finish;
}
- /* Read esync period */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, &esync_period);
- if (rc)
- return rc;
-
- /* Check esync divisor for zero */
- if (!esync_period) {
- dev_err(dev, "Zero esync divisor for OUTPUT%u got from device\n",
- out);
- return -EINVAL;
- }
-
- /* Get esync pulse width in units of half synth cycles */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, &esync_width);
- if (rc)
- return rc;
-
/* Compute esync frequency */
- esync->freq = synth_freq / output_div / esync_period;
+ esync->freq = synth_freq / out->div / out->esync_n_period;
/* By comparing the esync_pulse_width to the half of the pulse width
* the esync pulse percentage can be determined.
* Note that half pulse width is in units of half synth cycles, which
* is why it reduces down to be output_div.
*/
- esync->pulse = (50 * esync_width) / output_div;
+ esync->pulse = (50 * out->esync_n_width) / out->div;
finish:
/* Set supported esync ranges if the pin supports esync control and
* if the output frequency is > 1 Hz.
*/
- if (pin->esync_control && (synth_freq / output_div) > 1) {
+ if (pin->esync_control && (synth_freq / out->div) > 1) {
esync->range = esync_freq_ranges;
esync->range_num = ARRAY_SIZE(esync_freq_ranges);
} else {
@@ -1062,21 +835,22 @@ zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
void *dpll_priv, u64 freq,
struct netlink_ext_ack *extack)
{
- u32 esync_period, esync_width, output_div;
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u8 clock_type, out, output_mode, synth;
+ const struct zl3073x_synth *synth;
+ struct zl3073x_out out;
+ u8 clock_type, out_id;
u32 synth_freq;
- int rc;
- out = zl3073x_output_pin_out_get(pin->id);
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
/* If N-division is enabled, esync is not supported. The register used
* for N-division is also used for the esync divider so both cannot
* be used.
*/
- switch (zl3073x_out_signal_format_get(zldev, out)) {
+ switch (zl3073x_out_signal_format_get(&out)) {
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
return -EOPNOTSUPP;
@@ -1084,19 +858,6 @@ zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
break;
}
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration into mailbox */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
-
- /* Read output mode */
- rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &output_mode);
- if (rc)
- return rc;
-
/* Select clock type */
if (freq)
clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_ESYNC;
@@ -1104,38 +865,19 @@ zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
clock_type = ZL_OUTPUT_MODE_CLOCK_TYPE_NORMAL;
/* Update clock type in output mode */
- output_mode &= ~ZL_OUTPUT_MODE_CLOCK_TYPE;
- output_mode |= FIELD_PREP(ZL_OUTPUT_MODE_CLOCK_TYPE, clock_type);
- rc = zl3073x_write_u8(zldev, ZL_REG_OUTPUT_MODE, output_mode);
- if (rc)
- return rc;
+ out.mode &= ~ZL_OUTPUT_MODE_CLOCK_TYPE;
+ out.mode |= FIELD_PREP(ZL_OUTPUT_MODE_CLOCK_TYPE, clock_type);
/* If esync is being disabled just write mailbox and finish */
if (!freq)
goto write_mailbox;
- /* Get synth attached to output pin */
- synth = zl3073x_out_synth_get(zldev, out);
-
- /* Get synth frequency */
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
-
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
- if (rc)
- return rc;
-
- /* Check output divisor for zero */
- if (!output_div) {
- dev_err(zldev->dev,
- "Zero divisor for OUTPUT%u got from device\n", out);
- return -EINVAL;
- }
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(&out));
+ synth_freq = zl3073x_synth_freq_get(synth);
/* Compute and update esync period */
- esync_period = synth_freq / (u32)freq / output_div;
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, esync_period);
- if (rc)
- return rc;
+ out.esync_n_period = synth_freq / (u32)freq / out.div;
/* Half of the period in units of 1/2 synth cycle can be represented by
* the output_div. To get the supported esync pulse width of 25% of the
@@ -1143,15 +885,11 @@ zl3073x_dpll_output_pin_esync_set(const struct dpll_pin *dpll_pin,
* assumes that output_div is even, otherwise some resolution will be
* lost.
*/
- esync_width = output_div / 2;
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, esync_width);
- if (rc)
- return rc;
+ out.esync_n_width = out.div / 2;
write_mailbox:
/* Commit output configuration */
- return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ return zl3073x_out_state_set(zldev, out_id, &out);
}
static int
@@ -1164,83 +902,46 @@ zl3073x_dpll_output_pin_frequency_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- struct device *dev = zldev->dev;
- u8 out, signal_format, synth;
- u32 output_div, synth_freq;
- int rc;
-
- out = zl3073x_output_pin_out_get(pin->id);
- synth = zl3073x_out_synth_get(zldev, out);
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration into mailbox */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
-
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &output_div);
- if (rc)
- return rc;
+ const struct zl3073x_synth *synth;
+ const struct zl3073x_out *out;
+ u32 synth_freq;
+ u8 out_id;
- /* Check output divisor for zero */
- if (!output_div) {
- dev_err(dev, "Zero divisor for output %u got from device\n",
- out);
- return -EINVAL;
- }
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
- /* Read used signal format for the given output */
- signal_format = zl3073x_out_signal_format_get(zldev, out);
+ /* Get attached synth frequency */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(out));
+ synth_freq = zl3073x_synth_freq_get(synth);
- switch (signal_format) {
+ switch (zl3073x_out_signal_format_get(out)) {
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV:
case ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV_INV:
/* In case of divided format we have to distiguish between
* given output pin type.
+ *
+ * For P-pin the resulting frequency is computed as simple
+ * division of synth frequency and output divisor.
+ *
+ * For N-pin we have to divide additionally by divisor stored
+ * in esync_n_period output mailbox register that is used as
+ * N-pin divisor for these modes.
*/
- if (zl3073x_dpll_is_p_pin(pin)) {
- /* For P-pin the resulting frequency is computed as
- * simple division of synth frequency and output
- * divisor.
- */
- *frequency = synth_freq / output_div;
- } else {
- /* For N-pin we have to divide additionally by
- * divisor stored in esync_period output mailbox
- * register that is used as N-pin divisor for these
- * modes.
- */
- u32 ndiv;
-
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
- &ndiv);
- if (rc)
- return rc;
+ *frequency = synth_freq / out->div;
- /* Check N-pin divisor for zero */
- if (!ndiv) {
- dev_err(dev,
- "Zero N-pin divisor for output %u got from device\n",
- out);
- return -EINVAL;
- }
+ if (!zl3073x_dpll_is_p_pin(pin))
+ *frequency = (u32)*frequency / out->esync_n_period;
- /* Compute final divisor for N-pin */
- *frequency = synth_freq / output_div / ndiv;
- }
break;
default:
/* In other modes the resulting frequency is computed as
* division of synth frequency and output divisor.
*/
- *frequency = synth_freq / output_div;
+ *frequency = synth_freq / out->div;
break;
}
- return rc;
+ return 0;
}
static int
@@ -1253,28 +954,21 @@ zl3073x_dpll_output_pin_frequency_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- struct device *dev = zldev->dev;
- u32 output_n_freq, output_p_freq;
- u8 out, signal_format, synth;
- u32 cur_div, new_div, ndiv;
- u32 synth_freq;
- int rc;
+ const struct zl3073x_synth *synth;
+ u8 out_id, signal_format;
+ u32 new_div, synth_freq;
+ struct zl3073x_out out;
+
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
- out = zl3073x_output_pin_out_get(pin->id);
- synth = zl3073x_out_synth_get(zldev, out);
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
+ /* Get attached synth frequency and compute new divisor */
+ synth = zl3073x_synth_state_get(zldev, zl3073x_out_synth_get(&out));
+ synth_freq = zl3073x_synth_freq_get(synth);
new_div = synth_freq / (u32)frequency;
/* Get used signal format for the given output */
- signal_format = zl3073x_out_signal_format_get(zldev, out);
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Load output configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
+ signal_format = zl3073x_out_signal_format_get(&out);
/* Check signal format */
if (signal_format != ZL_OUTPUT_MODE_SIGNAL_FORMAT_2_NDIV &&
@@ -1282,99 +976,50 @@ zl3073x_dpll_output_pin_frequency_set(const struct dpll_pin *dpll_pin,
/* For non N-divided signal formats the frequency is computed
* as division of synth frequency and output divisor.
*/
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, new_div);
- if (rc)
- return rc;
+ out.div = new_div;
/* For 50/50 duty cycle the divisor is equal to width */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, new_div);
- if (rc)
- return rc;
+ out.width = new_div;
/* Commit output configuration */
- return zl3073x_mb_op(zldev,
- ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- }
-
- /* For N-divided signal format get current divisor */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &cur_div);
- if (rc)
- return rc;
-
- /* Check output divisor for zero */
- if (!cur_div) {
- dev_err(dev, "Zero divisor for output %u got from device\n",
- out);
- return -EINVAL;
+ return zl3073x_out_state_set(zldev, out_id, &out);
}
- /* Get N-pin divisor (shares the same register with esync */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, &ndiv);
- if (rc)
- return rc;
-
- /* Check N-pin divisor for zero */
- if (!ndiv) {
- dev_err(dev,
- "Zero N-pin divisor for output %u got from device\n",
- out);
- return -EINVAL;
- }
-
- /* Compute current output frequency for P-pin */
- output_p_freq = synth_freq / cur_div;
-
- /* Compute current N-pin frequency */
- output_n_freq = output_p_freq / ndiv;
-
if (zl3073x_dpll_is_p_pin(pin)) {
/* We are going to change output frequency for P-pin but
* if the requested frequency is less than current N-pin
* frequency then indicate a failure as we are not able
* to compute N-pin divisor to keep its frequency unchanged.
+ *
+ * Update divisor for N-pin to keep N-pin frequency.
*/
- if (frequency <= output_n_freq)
+ out.esync_n_period = (out.esync_n_period * out.div) / new_div;
+ if (!out.esync_n_period)
return -EINVAL;
/* Update the output divisor */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, new_div);
- if (rc)
- return rc;
+ out.div = new_div;
/* For 50/50 duty cycle the divisor is equal to width */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, new_div);
- if (rc)
- return rc;
-
- /* Compute new divisor for N-pin */
- ndiv = (u32)frequency / output_n_freq;
+ out.width = out.div;
} else {
/* We are going to change frequency of N-pin but if
* the requested freq is greater or equal than freq of P-pin
* in the output pair we cannot compute divisor for the N-pin.
* In this case indicate a failure.
+ *
+ * Update divisor for N-pin
*/
- if (output_p_freq <= frequency)
+ out.esync_n_period = div64_u64(synth_freq, frequency * out.div);
+ if (!out.esync_n_period)
return -EINVAL;
-
- /* Compute new divisor for N-pin */
- ndiv = output_p_freq / (u32)frequency;
}
- /* Update divisor for the N-pin */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD, ndiv);
- if (rc)
- return rc;
-
/* For 50/50 duty cycle the divisor is equal to width */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH, ndiv);
- if (rc)
- return rc;
+ out.esync_n_width = out.esync_n_period;
/* Commit output configuration */
- return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ return zl3073x_out_state_set(zldev, out_id, &out);
}
static int
@@ -1388,42 +1033,18 @@ zl3073x_dpll_output_pin_phase_adjust_get(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- u32 synth_freq;
- s32 phase_comp;
- u8 out, synth;
- int rc;
-
- out = zl3073x_output_pin_out_get(pin->id);
- synth = zl3073x_out_synth_get(zldev, out);
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
-
- /* Check synth freq for zero */
- if (!synth_freq) {
- dev_err(zldev->dev, "Got zero synth frequency for output %u\n",
- out);
- return -EINVAL;
- }
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
-
- /* Read current output phase compensation */
- rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP, &phase_comp);
- if (rc)
- return rc;
+ const struct zl3073x_out *out;
+ u8 out_id;
- /* Value in register is expressed in half synth clock cycles */
- phase_comp *= (int)div_u64(PSEC_PER_SEC, 2 * synth_freq);
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = zl3073x_out_state_get(zldev, out_id);
- /* Reverse two's complement negation applied during 'set' */
- *phase_adjust = -phase_comp;
+ /* Convert value to ps and reverse two's complement negation applied
+ * during 'set'
+ */
+ *phase_adjust = -out->phase_comp * pin->phase_gran;
- return rc;
+ return 0;
}
static int
@@ -1437,52 +1058,19 @@ zl3073x_dpll_output_pin_phase_adjust_set(const struct dpll_pin *dpll_pin,
struct zl3073x_dpll *zldpll = dpll_priv;
struct zl3073x_dev *zldev = zldpll->dev;
struct zl3073x_dpll_pin *pin = pin_priv;
- int half_synth_cycle;
- u32 synth_freq;
- u8 out, synth;
- int rc;
-
- /* Get attached synth */
- out = zl3073x_output_pin_out_get(pin->id);
- synth = zl3073x_out_synth_get(zldev, out);
-
- /* Get synth's frequency */
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
-
- /* Value in register is expressed in half synth clock cycles so
- * the given phase adjustment a multiple of half synth clock.
- */
- half_synth_cycle = (int)div_u64(PSEC_PER_SEC, 2 * synth_freq);
+ struct zl3073x_out out;
+ u8 out_id;
- if ((phase_adjust % half_synth_cycle) != 0) {
- NL_SET_ERR_MSG_FMT(extack,
- "Phase adjustment value has to be multiple of %d",
- half_synth_cycle);
- return -EINVAL;
- }
- phase_adjust /= half_synth_cycle;
+ out_id = zl3073x_output_pin_out_get(pin->id);
+ out = *zl3073x_out_state_get(zldev, out_id);
/* The value in the register is stored as two's complement negation
- * of requested value.
+ * of requested value and expressed in half synth clock cycles.
*/
- phase_adjust = -phase_adjust;
-
- guard(mutex)(&zldev->multiop_lock);
-
- /* Read output configuration */
- rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
- if (rc)
- return rc;
-
- /* Write the requested value into the compensation register */
- rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP, phase_adjust);
- if (rc)
- return rc;
+ out.phase_comp = -phase_adjust / pin->phase_gran;
/* Update output configuration from mailbox */
- return zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
- ZL_REG_OUTPUT_MB_MASK, BIT(out));
+ return zl3073x_out_state_set(zldev, out_id, &out);
}
static int
@@ -1577,6 +1165,59 @@ zl3073x_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
}
static int
+zl3073x_dpll_phase_offset_avg_factor_get(const struct dpll_device *dpll,
+ void *dpll_priv, u32 *factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *zldpll = dpll_priv;
+
+ *factor = zl3073x_dev_phase_avg_factor_get(zldpll->dev);
+
+ return 0;
+}
+
+static void
+zl3073x_dpll_change_work(struct work_struct *work)
+{
+ struct zl3073x_dpll *zldpll;
+
+ zldpll = container_of(work, struct zl3073x_dpll, change_work);
+ dpll_device_change_ntf(zldpll->dpll_dev);
+}
+
+static int
+zl3073x_dpll_phase_offset_avg_factor_set(const struct dpll_device *dpll,
+ void *dpll_priv, u32 factor,
+ struct netlink_ext_ack *extack)
+{
+ struct zl3073x_dpll *item, *zldpll = dpll_priv;
+ int rc;
+
+ if (factor > 15) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Phase offset average factor has to be from range <0,15>");
+ return -EINVAL;
+ }
+
+ rc = zl3073x_dev_phase_avg_factor_set(zldpll->dev, factor);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Failed to set phase offset averaging factor");
+ return rc;
+ }
+
+ /* The averaging factor is common for all DPLL channels so after change
+ * we have to send a notification for other DPLL devices.
+ */
+ list_for_each_entry(item, &zldpll->dev->dplls, list) {
+ if (item != zldpll)
+ schedule_work(&item->change_work);
+ }
+
+ return 0;
+}
+
+static int
zl3073x_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
void *dpll_priv,
enum dpll_feature_state *state,
@@ -1635,6 +1276,8 @@ static const struct dpll_pin_ops zl3073x_dpll_output_pin_ops = {
static const struct dpll_device_ops zl3073x_dpll_device_ops = {
.lock_status_get = zl3073x_dpll_lock_status_get,
.mode_get = zl3073x_dpll_mode_get,
+ .phase_offset_avg_factor_get = zl3073x_dpll_phase_offset_avg_factor_get,
+ .phase_offset_avg_factor_set = zl3073x_dpll_phase_offset_avg_factor_set,
.phase_offset_monitor_get = zl3073x_dpll_phase_offset_monitor_get,
.phase_offset_monitor_set = zl3073x_dpll_phase_offset_monitor_set,
};
@@ -1703,9 +1346,10 @@ zl3073x_dpll_pin_register(struct zl3073x_dpll_pin *pin, u32 index)
if (IS_ERR(props))
return PTR_ERR(props);
- /* Save package label & esync capability */
+ /* Save package label, esync capability and phase adjust granularity */
strscpy(pin->label, props->package_label);
pin->esync_control = props->esync_control;
+ pin->phase_gran = props->dpll_props.phase_gran;
if (zl3073x_dpll_is_input_pin(pin)) {
rc = zl3073x_dpll_ref_prio_get(pin, &pin->prio);
@@ -1823,33 +1467,32 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
const char *name;
if (dir == DPLL_PIN_DIRECTION_INPUT) {
- u8 ref = zl3073x_input_pin_ref_get(index);
-
- name = "REF";
+ u8 ref_id = zl3073x_input_pin_ref_get(index);
+ const struct zl3073x_ref *ref;
/* Skip the pin if the DPLL is running in NCO mode */
if (zldpll->refsel_mode == ZL_DPLL_MODE_REFSEL_MODE_NCO)
return false;
- is_diff = zl3073x_ref_is_diff(zldev, ref);
- is_enabled = zl3073x_ref_is_enabled(zldev, ref);
+ name = "REF";
+ ref = zl3073x_ref_state_get(zldev, ref_id);
+ is_diff = zl3073x_ref_is_diff(ref);
+ is_enabled = zl3073x_ref_is_enabled(ref);
} else {
/* Output P&N pair shares single HW output */
u8 out = zl3073x_output_pin_out_get(index);
- name = "OUT";
-
/* Skip the pin if it is connected to different DPLL channel */
- if (zl3073x_out_dpll_get(zldev, out) != zldpll->id) {
+ if (zl3073x_dev_out_dpll_get(zldev, out) != zldpll->id) {
dev_dbg(zldev->dev,
- "%s%u is driven by different DPLL\n", name,
- out);
+ "OUT%u is driven by different DPLL\n", out);
return false;
}
- is_diff = zl3073x_out_is_diff(zldev, out);
- is_enabled = zl3073x_out_is_enabled(zldev, out);
+ name = "OUT";
+ is_diff = zl3073x_dev_out_is_diff(zldev, out);
+ is_enabled = zl3073x_dev_output_pin_is_enabled(zldev, index);
}
/* Skip N-pin if the corresponding input/output is differential */
@@ -1983,6 +1626,8 @@ zl3073x_dpll_device_unregister(struct zl3073x_dpll *zldpll)
{
WARN(!zldpll->dpll_dev, "DPLL device is not registered\n");
+ cancel_work_sync(&zldpll->change_work);
+
dpll_device_unregister(zldpll->dpll_dev, &zl3073x_dpll_device_ops,
zldpll);
dpll_device_put(zldpll->dpll_dev);
@@ -2004,42 +1649,26 @@ zl3073x_dpll_pin_phase_offset_check(struct zl3073x_dpll_pin *pin)
struct zl3073x_dev *zldev = zldpll->dev;
unsigned int reg;
s64 phase_offset;
- u8 ref;
+ u8 ref_id;
int rc;
- ref = zl3073x_input_pin_ref_get(pin->id);
+ /* No phase offset if the ref monitor reports signal errors */
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ if (!zl3073x_dev_ref_is_status_ok(zldev, ref_id))
+ return false;
/* Select register to read phase offset value depending on pin and
* phase monitor state:
* 1) For connected pin use dpll_phase_err_data register
* 2) For other pins use appropriate ref_phase register if the phase
- * monitor feature is enabled and reference monitor does not
- * report signal errors for given input pin
+ * monitor feature is enabled.
*/
- if (pin->pin_state == DPLL_PIN_STATE_CONNECTED) {
+ if (pin->pin_state == DPLL_PIN_STATE_CONNECTED)
reg = ZL_REG_DPLL_PHASE_ERR_DATA(zldpll->id);
- } else if (zldpll->phase_monitor) {
- u8 status;
-
- /* Get reference monitor status */
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref),
- &status);
- if (rc) {
- dev_err(zldev->dev,
- "Failed to read %s refmon status: %pe\n",
- pin->label, ERR_PTR(rc));
-
- return false;
- }
-
- if (status != ZL_REF_MON_STATUS_OK)
- return false;
-
- reg = ZL_REG_REF_PHASE(ref);
- } else {
- /* The pin is not connected or phase monitor disabled */
+ else if (zldpll->phase_monitor)
+ reg = ZL_REG_REF_PHASE(ref_id);
+ else
return false;
- }
/* Read measured phase offset value */
rc = zl3073x_read_u48(zldev, reg, &phase_offset);
@@ -2078,32 +1707,22 @@ zl3073x_dpll_pin_ffo_check(struct zl3073x_dpll_pin *pin)
{
struct zl3073x_dpll *zldpll = pin->dpll;
struct zl3073x_dev *zldev = zldpll->dev;
- u8 ref, status;
- s64 ffo;
- int rc;
+ const struct zl3073x_ref *ref;
+ u8 ref_id;
/* Get reference monitor status */
- ref = zl3073x_input_pin_ref_get(pin->id);
- rc = zl3073x_read_u8(zldev, ZL_REG_REF_MON_STATUS(ref), &status);
- if (rc) {
- dev_err(zldev->dev, "Failed to read %s refmon status: %pe\n",
- pin->label, ERR_PTR(rc));
-
- return false;
- }
+ ref_id = zl3073x_input_pin_ref_get(pin->id);
+ ref = zl3073x_ref_state_get(zldev, ref_id);
/* Do not report ffo changes if the reference monitor report errors */
- if (status != ZL_REF_MON_STATUS_OK)
+ if (!zl3073x_ref_is_status_ok(ref))
return false;
- /* Get the latest measured ref's ffo */
- ffo = zl3073x_ref_ffo_get(zldev, ref);
-
/* Compare with previous value */
- if (pin->freq_offset != ffo) {
+ if (pin->freq_offset != ref->ffo) {
dev_dbg(zldev->dev, "%s freq offset changed: %lld -> %lld\n",
- pin->label, pin->freq_offset, ffo);
- pin->freq_offset = ffo;
+ pin->label, pin->freq_offset, ref->ffo);
+ pin->freq_offset = ref->ffo;
return true;
}
@@ -2258,6 +1877,7 @@ zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch)
zldpll->dev = zldev;
zldpll->id = ch;
INIT_LIST_HEAD(&zldpll->pins);
+ INIT_WORK(&zldpll->change_work, zl3073x_dpll_change_work);
return zldpll;
}
diff --git a/drivers/dpll/zl3073x/dpll.h b/drivers/dpll/zl3073x/dpll.h
index 304910ffc9c0..e8c39b44b356 100644
--- a/drivers/dpll/zl3073x/dpll.h
+++ b/drivers/dpll/zl3073x/dpll.h
@@ -20,6 +20,7 @@
* @dpll_dev: pointer to registered DPLL device
* @lock_status: last saved DPLL lock status
* @pins: list of pins
+ * @change_work: device change notification work
*/
struct zl3073x_dpll {
struct list_head list;
@@ -32,6 +33,7 @@ struct zl3073x_dpll {
struct dpll_device *dpll_dev;
enum dpll_lock_status lock_status;
struct list_head pins;
+ struct work_struct change_work;
};
struct zl3073x_dpll *zl3073x_dpll_alloc(struct zl3073x_dev *zldev, u8 ch);
diff --git a/drivers/dpll/zl3073x/flash.c b/drivers/dpll/zl3073x/flash.c
new file mode 100644
index 000000000000..83452a77e3e9
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/minmax.h>
+#include <linux/netlink.h>
+#include <linux/sched/signal.h>
+#include <linux/sizes.h>
+#include <linux/sprintf.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <net/devlink.h>
+
+#include "core.h"
+#include "devlink.h"
+#include "flash.h"
+
+#define ZL_FLASH_ERR_PFX "FW update failed: "
+#define ZL_FLASH_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL_FLASH_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+/**
+ * zl3073x_flash_download - Download image block to device memory
+ * @zldev: zl3073x device structure
+ * @component: name of the component to be downloaded
+ * @addr: device memory target address
+ * @data: pointer to data to download
+ * @size: size of data to download
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_download(struct zl3073x_dev *zldev, const char *component,
+ u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_CHECK_DELAY 5000 /* Check for interrupt each 5 seconds */
+ unsigned long check_time;
+ const void *ptr, *end;
+ int rc = 0;
+
+ dev_dbg(zldev->dev, "Downloading %zu bytes to device memory at 0x%0x\n",
+ size, addr);
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += 4, addr += 4) {
+ /* Write current word to HW memory */
+ rc = zl3073x_write_hwreg(zldev, addr,
+ get_unaligned((u32 *)ptr));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack,
+ "failed to write to memory at 0x%0x",
+ addr);
+ return rc;
+ }
+
+ if (time_is_before_jiffies(check_time)) {
+ if (signal_pending(current)) {
+ ZL_FLASH_ERR_MSG(extack,
+ "Flashing interrupted");
+ return -EINTR;
+ }
+
+ check_time = jiffies + msecs_to_jiffies(ZL_CHECK_DELAY);
+ }
+
+ /* Report status each 1 kB block */
+ if ((ptr - data) % 1024 == 0)
+ zl3073x_devlink_flash_notify(zldev, "Downloading image",
+ component, ptr - data,
+ size);
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Downloading image", component,
+ ptr - data, size);
+
+ dev_dbg(zldev->dev, "%zu bytes downloaded to device memory\n", size);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_error_check - Check for flash utility errors
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function checks for errors detected by the flash utility and
+ * reports them if any were found.
+ *
+ * Return: 0 on success, -EIO when errors are detected
+ */
+static int
+zl3073x_flash_error_check(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ u32 count, cause;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_COUNT, &count);
+ if (rc)
+ return rc;
+ else if (!count)
+ return 0; /* No error */
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_ERROR_CAUSE, &cause);
+ if (rc)
+ return rc;
+
+ /* Report errors */
+ ZL_FLASH_ERR_MSG(extack,
+ "utility error occurred: count=%u cause=0x%x", count,
+ cause);
+
+ return -EIO;
+}
+
+/**
+ * zl3073x_flash_wait_ready - Check or wait for utility to be ready to flash
+ * @zldev: zl3073x device structure
+ * @timeout_ms: timeout for the waiting
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_wait_ready(struct zl3073x_dev *zldev, unsigned int timeout_ms)
+{
+#define ZL_FLASH_POLL_DELAY_MS 100
+ unsigned long timeout;
+ int rc, i;
+
+ dev_dbg(zldev->dev, "Waiting for flashing to be ready\n");
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ for (i = 0; time_is_after_jiffies(timeout); i++) {
+ u8 value;
+
+ /* Check for interrupt each 1s */
+ if (i > 9) {
+ if (signal_pending(current))
+ return -EINTR;
+ i = 0;
+ }
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value = FIELD_GET(ZL_WRITE_FLASH_OP, value);
+
+ if (value == ZL_WRITE_FLASH_OP_DONE)
+ return 0; /* Successfully done */
+
+ msleep(ZL_FLASH_POLL_DELAY_MS);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * zl3073x_flash_cmd_wait - Perform flash operation and wait for finish
+ * @zldev: zl3073x device structure
+ * @operation: operation to perform
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_cmd_wait(struct zl3073x_dev *zldev, u32 operation,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_PHASE1_TIMEOUT_MS 60000 /* up to 1 minute */
+#define ZL_FLASH_PHASE2_TIMEOUT_MS 120000 /* up to 2 minutes */
+ u8 value;
+ int rc;
+
+ dev_dbg(zldev->dev, "Sending flash command: 0x%x\n", operation);
+
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE1_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ /* Issue the requested operation */
+ rc = zl3073x_read_u8(zldev, ZL_REG_WRITE_FLASH, &value);
+ if (rc)
+ return rc;
+
+ value &= ~ZL_WRITE_FLASH_OP;
+ value |= FIELD_PREP(ZL_WRITE_FLASH_OP, operation);
+
+ rc = zl3073x_write_u8(zldev, ZL_REG_WRITE_FLASH, value);
+ if (rc)
+ return rc;
+
+ /* Wait for command completion */
+ rc = zl3073x_flash_wait_ready(zldev, ZL_FLASH_PHASE2_TIMEOUT_MS);
+ if (rc)
+ return rc;
+
+ return zl3073x_flash_error_check(zldev, extack);
+}
+
+/**
+ * zl3073x_flash_get_sector_size - Get flash sector size
+ * @zldev: zl3073x device structure
+ * @sector_size: sector size returned by the function
+ *
+ * The function reads the flash sector size detected by flash utility and
+ * stores it into @sector_size.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_get_sector_size(struct zl3073x_dev *zldev, size_t *sector_size)
+{
+ u8 flash_info;
+ int rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_INFO, &flash_info);
+ if (rc)
+ return rc;
+
+ switch (FIELD_GET(ZL_FLASH_INFO_SECTOR_SIZE, flash_info)) {
+ case ZL_FLASH_INFO_SECTOR_4K:
+ *sector_size = SZ_4K;
+ break;
+ case ZL_FLASH_INFO_SECTOR_64K:
+ *sector_size = SZ_64K;
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_block - Download and flash memory block
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @operation: flash operation to perform
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function downloads the memory block given by the @data pointer and
+ * the size @size and flashes it into internal memory on flash page @page.
+ * The internal flash operation performed by the firmware is specified by
+ * the @operation parameter.
+ *
+ * Return: 0 on success, <0 on error
+ */
+static int
+zl3073x_flash_block(struct zl3073x_dev *zldev, const char *component,
+ u32 operation, u32 page, u32 addr, const void *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Download block to device memory */
+ rc = zl3073x_flash_download(zldev, component, addr, data, size, extack);
+ if (rc)
+ return rc;
+
+ /* Set address to flash from */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_START_ADDR, addr);
+ if (rc)
+ return rc;
+
+ /* Set size of block to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_IMAGE_SIZE, size);
+ if (rc)
+ return rc;
+
+ /* Set destination page to flash */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, page);
+ if (rc)
+ return rc;
+
+ /* Set filling pattern */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FILL_PATTERN, U32_MAX);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, 0,
+ size);
+
+ dev_dbg(zldev->dev, "Flashing %zu bytes to page %u\n", size, page);
+
+ /* Execute sectors flash operation */
+ rc = zl3073x_flash_cmd_wait(zldev, operation, extack);
+ if (rc)
+ return rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Flashing image", component, size,
+ size);
+
+ return 0;
+}
+
+/**
+ * zl3073x_flash_sectors - Flash sectors
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting from page @page. The function uses sector flash
+ * method and has to take into account the flash sector size reported by
+ * flashing utility. Input data are spliced into blocks according this
+ * sector size and each block is flashed separately.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+#define ZL_FLASH_MAX_BLOCK_SIZE 0x0001E000
+#define ZL_FLASH_PAGE_SIZE 256
+ size_t max_block_size, block_size, sector_size;
+ const void *ptr, *end;
+ int rc;
+
+ /* Get flash sector size */
+ rc = zl3073x_flash_get_sector_size(zldev, &sector_size);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "Failed to get flash sector size");
+ return rc;
+ }
+
+ /* Determine max block size depending on sector size */
+ max_block_size = ALIGN_DOWN(ZL_FLASH_MAX_BLOCK_SIZE, sector_size);
+
+ for (ptr = data, end = data + size; ptr < end; ptr += block_size) {
+ char comp_str[32];
+
+ block_size = min_t(size_t, max_block_size, end - ptr);
+
+ /* Add suffix '-partN' if the requested component size is
+ * greater than max_block_size.
+ */
+ if (max_block_size < size)
+ snprintf(comp_str, sizeof(comp_str), "%s-part%zu",
+ component, (ptr - data) / max_block_size + 1);
+ else
+ strscpy(comp_str, component);
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, comp_str,
+ ZL_WRITE_FLASH_OP_SECTORS, page, addr,
+ ptr, block_size, extack);
+ if (rc)
+ goto finish;
+
+ /* Move to next page */
+ page += block_size / ZL_FLASH_PAGE_SIZE;
+ }
+
+finish:
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page - Flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @page: destination flash page
+ * @addr: device memory address to load data
+ * @data: pointer to data to be flashed
+ * @size: size of data
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function flashes given @data with size of @size to the internal flash
+ * memory block starting with page @page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Flash the memory block */
+ rc = zl3073x_flash_block(zldev, component, ZL_WRITE_FLASH_OP_PAGE, page,
+ addr, data, size, extack);
+
+ zl3073x_devlink_flash_notify(zldev,
+ rc ? "Flashing failed" : "Flashing done",
+ component, 0, 0);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_page_copy - Copy flash page
+ * @zldev: zl3073x device structure
+ * @component: component name
+ * @src_page: source page to copy
+ * @dst_page: destination page
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function copies one flash page specified by @src_page into the flash
+ * page specified by @dst_page.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack)
+{
+ int rc;
+
+ /* Set source page to be copied */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_READ, src_page);
+ if (rc)
+ return rc;
+
+ /* Set destination page for the copy */
+ rc = zl3073x_write_u32(zldev, ZL_REG_FLASH_INDEX_WRITE, dst_page);
+ if (rc)
+ return rc;
+
+ /* Perform copy operation */
+ rc = zl3073x_flash_cmd_wait(zldev, ZL_WRITE_FLASH_OP_COPY_PAGE, extack);
+ if (rc)
+ ZL_FLASH_ERR_MSG(extack, "Failed to copy page %u to page %u",
+ src_page, dst_page);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_verify - Check flash utility
+ * @zldev: zl3073x device structure
+ *
+ * Return: 0 if the flash utility is ready, <0 on error
+ */
+static int
+zl3073x_flash_mode_verify(struct zl3073x_dev *zldev)
+{
+ u8 family, release;
+ u32 hash;
+ int rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_FLASH_HASH, &hash);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_FAMILY, &family);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u8(zldev, ZL_REG_FLASH_RELEASE, &release);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev,
+ "Flash utility check: hash 0x%08x, fam 0x%02x, rel 0x%02x\n",
+ hash, family, release);
+
+ /* Return success for correct family */
+ return (family == 0x21) ? 0 : -ENODEV;
+}
+
+static int
+zl3073x_flash_host_ctrl_enable(struct zl3073x_dev *zldev)
+{
+ u8 host_ctrl;
+ int rc;
+
+ /* Enable host control */
+ rc = zl3073x_read_u8(zldev, ZL_REG_HOST_CONTROL, &host_ctrl);
+ if (rc)
+ return rc;
+
+ host_ctrl |= ZL_HOST_CONTROL_ENABLE;
+
+ return zl3073x_write_u8(zldev, ZL_REG_HOST_CONTROL, host_ctrl);
+}
+
+/**
+ * zl3073x_flash_mode_enter - Switch the device to flash mode
+ * @zldev: zl3073x device structure
+ * @util_ptr: buffer with flash utility
+ * @util_size: size of buffer with flash utility
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function prepares and switches the device into flash mode.
+ *
+ * The procedure:
+ * 1) Stop device CPU by specific HW register sequence
+ * 2) Download flash utility to device memory
+ * 3) Resume device CPU by specific HW register sequence
+ * 4) Check communication with flash utility
+ * 5) Enable host control necessary to access flash API
+ * 6) Check for potential error detected by the utility
+ *
+ * The API provided by normal firmware is not available in flash mode
+ * so the caller has to ensure that this API is not used in this mode.
+ *
+ * After performing flash operation the caller should call
+ * @zl3073x_flash_mode_leave to return back to normal operation.
+ *
+ * Return: 0 on success, <0 on error.
+ */
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written prior utility download */
+ static const struct zl3073x_hwreg_seq_item pre_seq[] = {
+ HWREG_SEQ_ITEM(0x80000400, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80206340, 1, BIT(4), 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(2), 0),
+ HWREG_SEQ_ITEM(0x10000024, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000001, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, BIT(10), 1000),
+ };
+ /* Sequence to be written after utility download */
+ static const struct zl3073x_hwreg_seq_item post_seq[] = {
+ HWREG_SEQ_ITEM(0x10400004, 0x000000C0, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400008, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400010, 0x20000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10400014, 0x20000004, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 1, GENMASK(10, 9), 0),
+ HWREG_SEQ_ITEM(0x10000020, 0x00000000, U32_MAX, 0),
+ HWREG_SEQ_ITEM(0x10000000, 0, BIT(0), 1000),
+ };
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Prepare flash mode", "utility",
+ 0, 0);
+
+ /* Execure pre-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, pre_seq, ARRAY_SIZE(pre_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute pre-load sequence");
+ goto error;
+ }
+
+ /* Download utility image to device memory */
+ rc = zl3073x_flash_download(zldev, "utility", 0x20000000, util_ptr,
+ util_size, extack);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot download flash utility");
+ goto error;
+ }
+
+ /* Execute post-load sequence */
+ rc = zl3073x_write_hwreg_seq(zldev, post_seq, ARRAY_SIZE(post_seq));
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot execute post-load sequence");
+ goto error;
+ }
+
+ /* Check that utility identifies itself correctly */
+ rc = zl3073x_flash_mode_verify(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "flash utility check failed");
+ goto error;
+ }
+
+ /* Enable host control */
+ rc = zl3073x_flash_host_ctrl_enable(zldev);
+ if (rc) {
+ ZL_FLASH_ERR_MSG(extack, "cannot enable host control");
+ goto error;
+ }
+
+ zl3073x_devlink_flash_notify(zldev, "Flash mode enabled", "utility",
+ 0, 0);
+
+ return 0;
+
+error:
+ zl3073x_flash_mode_leave(zldev, extack);
+
+ return rc;
+}
+
+/**
+ * zl3073x_flash_mode_leave - Leave flash mode
+ * @zldev: zl3073x device structure
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function instructs the device to leave the flash mode and
+ * to return back to normal operation.
+ *
+ * The procedure:
+ * 1) Set reset flag
+ * 2) Reset the device CPU by specific HW register sequence
+ * 3) Wait for the device to be ready
+ * 4) Check the reset flag was cleared
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack)
+{
+ /* Sequence to be written after flash */
+ static const struct zl3073x_hwreg_seq_item fw_reset_seq[] = {
+ HWREG_SEQ_ITEM(0x80000404, 1, BIT(0), 0),
+ HWREG_SEQ_ITEM(0x80000410, 1, BIT(0), 0),
+ };
+ u8 reset_status;
+ int rc;
+
+ zl3073x_devlink_flash_notify(zldev, "Leaving flash mode", "utility",
+ 0, 0);
+
+ /* Read reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Set reset bit */
+ reset_status |= ZL_REG_RESET_STATUS_RESET;
+
+ /* Update reset status register */
+ rc = zl3073x_write_u8(zldev, ZL_REG_RESET_STATUS, reset_status);
+ if (rc)
+ return rc;
+
+ /* We do not check the return value here as the sequence resets
+ * the device CPU and the last write always return an error.
+ */
+ zl3073x_write_hwreg_seq(zldev, fw_reset_seq, ARRAY_SIZE(fw_reset_seq));
+
+ /* Wait for the device to be ready */
+ msleep(500);
+
+ /* Read again the reset status register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_RESET_STATUS, &reset_status);
+ if (rc)
+ return rc;
+
+ /* Check the reset bit was cleared */
+ if (reset_status & ZL_REG_RESET_STATUS_RESET) {
+ dev_err(zldev->dev,
+ "Reset not confirmed after switch to normal mode\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/flash.h b/drivers/dpll/zl3073x/flash.h
new file mode 100644
index 000000000000..effe1b16b359
--- /dev/null
+++ b/drivers/dpll/zl3073x/flash.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef __ZL3073X_FLASH_H
+#define __ZL3073X_FLASH_H
+
+#include <linux/types.h>
+
+struct netlink_ext_ack;
+struct zl3073x_dev;
+
+int zl3073x_flash_mode_enter(struct zl3073x_dev *zldev, const void *util_ptr,
+ size_t util_size, struct netlink_ext_ack *extack);
+
+int zl3073x_flash_mode_leave(struct zl3073x_dev *zldev,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_page_copy(struct zl3073x_dev *zldev, const char *component,
+ u32 src_page, u32 dst_page,
+ struct netlink_ext_ack *extack);
+
+int zl3073x_flash_sectors(struct zl3073x_dev *zldev, const char *component,
+ u32 page, u32 addr, const void *data, size_t size,
+ struct netlink_ext_ack *extack);
+
+#endif /* __ZL3073X_FLASH_H */
diff --git a/drivers/dpll/zl3073x/fw.c b/drivers/dpll/zl3073x/fw.c
new file mode 100644
index 000000000000..55b638247f4b
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/array_size.h>
+#include <linux/build_bug.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "core.h"
+#include "flash.h"
+#include "fw.h"
+
+#define ZL3073X_FW_ERR_PFX "FW load failed: "
+#define ZL3073X_FW_ERR_MSG(_extack, _msg, ...) \
+ NL_SET_ERR_MSG_FMT_MOD((_extack), ZL3073X_FW_ERR_PFX _msg, \
+ ## __VA_ARGS__)
+
+enum zl3073x_flash_type {
+ ZL3073X_FLASH_TYPE_NONE = 0,
+ ZL3073X_FLASH_TYPE_SECTORS,
+ ZL3073X_FLASH_TYPE_PAGE,
+ ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+};
+
+struct zl3073x_fw_component_info {
+ const char *name;
+ size_t max_size;
+ enum zl3073x_flash_type flash_type;
+ u32 load_addr;
+ u32 dest_page;
+ u32 copy_page;
+};
+
+static const struct zl3073x_fw_component_info component_info[] = {
+ [ZL_FW_COMPONENT_UTIL] = {
+ .name = "utility",
+ .max_size = 0x4000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_NONE,
+ },
+ [ZL_FW_COMPONENT_FW1] = {
+ .name = "firmware1",
+ .max_size = 0x35000,
+ .load_addr = 0x20002000,
+ .flash_type = ZL3073X_FLASH_TYPE_SECTORS,
+ .dest_page = 0x020,
+ },
+ [ZL_FW_COMPONENT_FW2] = {
+ .name = "firmware2",
+ .max_size = 0x0040,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e0,
+ .copy_page = 0x000,
+ },
+ [ZL_FW_COMPONENT_FW3] = {
+ .name = "firmware3",
+ .max_size = 0x0248,
+ .load_addr = 0x20000400,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE_AND_COPY,
+ .dest_page = 0x3e4,
+ .copy_page = 0x004,
+ },
+ [ZL_FW_COMPONENT_CFG0] = {
+ .name = "config0",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3d0,
+ },
+ [ZL_FW_COMPONENT_CFG1] = {
+ .name = "config1",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3c0,
+ },
+ [ZL_FW_COMPONENT_CFG2] = {
+ .name = "config2",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3b0,
+ },
+ [ZL_FW_COMPONENT_CFG3] = {
+ .name = "config3",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x3a0,
+ },
+ [ZL_FW_COMPONENT_CFG4] = {
+ .name = "config4",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x390,
+ },
+ [ZL_FW_COMPONENT_CFG5] = {
+ .name = "config5",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x380,
+ },
+ [ZL_FW_COMPONENT_CFG6] = {
+ .name = "config6",
+ .max_size = 0x1000,
+ .load_addr = 0x20000000,
+ .flash_type = ZL3073X_FLASH_TYPE_PAGE,
+ .dest_page = 0x370,
+ },
+};
+
+/* Sanity check */
+static_assert(ARRAY_SIZE(component_info) == ZL_FW_NUM_COMPONENTS);
+
+/**
+ * zl3073x_fw_component_alloc - Alloc structure to hold firmware component
+ * @size: size of buffer to store data
+ *
+ * Return: pointer to allocated component structure or NULL on error.
+ */
+static struct zl3073x_fw_component *
+zl3073x_fw_component_alloc(size_t size)
+{
+ struct zl3073x_fw_component *comp;
+
+ comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return NULL;
+
+ comp->size = size;
+ comp->data = kzalloc(size, GFP_KERNEL);
+ if (!comp->data) {
+ kfree(comp);
+ return NULL;
+ }
+
+ return comp;
+}
+
+/**
+ * zl3073x_fw_component_free - Free allocated component structure
+ * @comp: pointer to allocated component
+ */
+static void
+zl3073x_fw_component_free(struct zl3073x_fw_component *comp)
+{
+ if (comp)
+ kfree(comp->data);
+
+ kfree(comp);
+}
+
+/**
+ * zl3073x_fw_component_id_get - Get ID for firmware component name
+ * @name: input firmware component name
+ *
+ * Return:
+ * - ZL3073X_FW_COMPONENT_* ID for known component name
+ * - ZL3073X_FW_COMPONENT_INVALID if the given name is unknown
+ */
+static enum zl3073x_fw_component_id
+zl3073x_fw_component_id_get(const char *name)
+{
+ enum zl3073x_fw_component_id id;
+
+ for (id = 0; id < ZL_FW_NUM_COMPONENTS; id++)
+ if (!strcasecmp(name, component_info[id].name))
+ return id;
+
+ return ZL_FW_COMPONENT_INVALID;
+}
+
+/**
+ * zl3073x_fw_component_load - Load component from firmware source
+ * @zldev: zl3073x device structure
+ * @pcomp: pointer to loaded component
+ * @psrc: data pointer to load component from
+ * @psize: remaining bytes in buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The function allocates single firmware component and loads the data from
+ * the buffer specified by @psrc and @psize. Pointer to allocated component
+ * is stored in output @pcomp. Source data pointer @psrc and remaining bytes
+ * @psize are updated accordingly.
+ *
+ * Return:
+ * * 1 when component was allocated and loaded
+ * * 0 when there is no component to load
+ * * <0 on error
+ */
+static ssize_t
+zl3073x_fw_component_load(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component **pcomp,
+ const char **psrc, size_t *psize,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ struct zl3073x_fw_component *comp = NULL;
+ struct device *dev = zldev->dev;
+ enum zl3073x_fw_component_id id;
+ char buf[32], name[16];
+ u32 count, size, *dest;
+ int pos, rc;
+
+ /* Fetch image name and size from input */
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%15s %u %n", name, &count, &pos);
+ if (!rc) {
+ /* No more data */
+ return 0;
+ } else if (rc == 1 || count > U32_MAX / sizeof(u32)) {
+ ZL3073X_FW_ERR_MSG(extack, "invalid component size");
+ return -EINVAL;
+ }
+ *psrc += pos;
+ *psize -= pos;
+
+ dev_dbg(dev, "Firmware component '%s' found\n", name);
+
+ id = zl3073x_fw_component_id_get(name);
+ if (id == ZL_FW_COMPONENT_INVALID) {
+ ZL3073X_FW_ERR_MSG(extack, "unknown component type '%s'", name);
+ return -EINVAL;
+ }
+
+ info = &component_info[id];
+ size = count * sizeof(u32); /* get size in bytes */
+
+ /* Check image size validity */
+ if (size > component_info[id].max_size) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "[%s] component is too big (%u bytes)\n",
+ info->name, size);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Indicated component image size: %u bytes\n", size);
+
+ /* Alloc component */
+ comp = zl3073x_fw_component_alloc(size);
+ if (!comp) {
+ ZL3073X_FW_ERR_MSG(extack, "failed to alloc memory");
+ return -ENOMEM;
+ }
+ comp->id = id;
+
+ /* Load component data from firmware source */
+ for (dest = comp->data; count; count--, dest++) {
+ strscpy(buf, *psrc, min(sizeof(buf), *psize));
+ rc = sscanf(buf, "%x %n", dest, &pos);
+ if (!rc)
+ goto err_data;
+
+ *psrc += pos;
+ *psize -= pos;
+ }
+
+ *pcomp = comp;
+
+ return 1;
+
+err_data:
+ ZL3073X_FW_ERR_MSG(extack, "[%s] invalid or missing data", info->name);
+
+ zl3073x_fw_component_free(comp);
+
+ return -ENODATA;
+}
+
+/**
+ * zl3073x_fw_free - Free allocated firmware
+ * @fw: firmware pointer
+ *
+ * The function frees existing firmware allocated by @zl3073x_fw_load.
+ */
+void zl3073x_fw_free(struct zl3073x_fw *fw)
+{
+ size_t i;
+
+ if (!fw)
+ return;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++)
+ zl3073x_fw_component_free(fw->component[i]);
+
+ kfree(fw);
+}
+
+/**
+ * zl3073x_fw_load - Load all components from source
+ * @zldev: zl3073x device structure
+ * @data: source buffer pointer
+ * @size: size of source buffer
+ * @extack: netlink extack pointer to report errors
+ *
+ * The functions allocate firmware structure and loads all components from
+ * the given buffer specified by @data and @size.
+ *
+ * Return: pointer to firmware on success, error pointer on error
+ */
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack)
+{
+ struct zl3073x_fw_component *comp;
+ enum zl3073x_fw_component_id id;
+ struct zl3073x_fw *fw;
+ ssize_t rc;
+
+ /* Allocate firmware structure */
+ fw = kzalloc(sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ /* Load single component */
+ rc = zl3073x_fw_component_load(zldev, &comp, &data, &size,
+ extack);
+ if (rc <= 0)
+ /* Everything was read or error occurred */
+ break;
+
+ id = comp->id;
+
+ /* Report error if the given component is present twice
+ * or more.
+ */
+ if (fw->component[id]) {
+ ZL3073X_FW_ERR_MSG(extack,
+ "duplicate component '%s' detected",
+ component_info[id].name);
+ zl3073x_fw_component_free(comp);
+ rc = -EINVAL;
+ break;
+ }
+
+ fw->component[id] = comp;
+ } while (true);
+
+ if (rc) {
+ /* Free allocated firmware in case of error */
+ zl3073x_fw_free(fw);
+ return ERR_PTR(rc);
+ }
+
+ return fw;
+}
+
+/**
+ * zl3073x_fw_component_flash - Flash all components
+ * @zldev: zl3073x device structure
+ * @comp: pointer to components array
+ * @extack: netlink extack pointer to report errors
+ *
+ * Return: 0 in case of success or negative number otherwise.
+ */
+static int
+zl3073x_fw_component_flash(struct zl3073x_dev *zldev,
+ struct zl3073x_fw_component *comp,
+ struct netlink_ext_ack *extack)
+{
+ const struct zl3073x_fw_component_info *info;
+ int rc;
+
+ info = &component_info[comp->id];
+
+ switch (info->flash_type) {
+ case ZL3073X_FLASH_TYPE_NONE:
+ /* Non-flashable component - used for utility */
+ return 0;
+ case ZL3073X_FLASH_TYPE_SECTORS:
+ rc = zl3073x_flash_sectors(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data,
+ comp->size, extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ break;
+ case ZL3073X_FLASH_TYPE_PAGE_AND_COPY:
+ rc = zl3073x_flash_page(zldev, info->name, info->dest_page,
+ info->load_addr, comp->data, comp->size,
+ extack);
+ if (!rc)
+ rc = zl3073x_flash_page_copy(zldev, info->name,
+ info->dest_page,
+ info->copy_page, extack);
+ break;
+ }
+ if (rc)
+ ZL3073X_FW_ERR_MSG(extack, "Failed to flash component '%s'",
+ info->name);
+
+ return rc;
+}
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < ZL_FW_NUM_COMPONENTS; i++) {
+ if (!zlfw->component[i])
+ continue; /* Component is not present */
+
+ rc = zl3073x_fw_component_flash(zldev, zlfw->component[i],
+ extack);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/dpll/zl3073x/fw.h b/drivers/dpll/zl3073x/fw.h
new file mode 100644
index 000000000000..fcaa89ab075e
--- /dev/null
+++ b/drivers/dpll/zl3073x/fw.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_FW_H
+#define _ZL3073X_FW_H
+
+/*
+ * enum zl3073x_fw_component_id - Identifiers for possible flash components
+ */
+enum zl3073x_fw_component_id {
+ ZL_FW_COMPONENT_INVALID = -1,
+ ZL_FW_COMPONENT_UTIL = 0,
+ ZL_FW_COMPONENT_FW1,
+ ZL_FW_COMPONENT_FW2,
+ ZL_FW_COMPONENT_FW3,
+ ZL_FW_COMPONENT_CFG0,
+ ZL_FW_COMPONENT_CFG1,
+ ZL_FW_COMPONENT_CFG2,
+ ZL_FW_COMPONENT_CFG3,
+ ZL_FW_COMPONENT_CFG4,
+ ZL_FW_COMPONENT_CFG5,
+ ZL_FW_COMPONENT_CFG6,
+ ZL_FW_NUM_COMPONENTS
+};
+
+/**
+ * struct zl3073x_fw_component - Firmware component
+ * @id: Flash component ID
+ * @size: Size of the buffer
+ * @data: Pointer to buffer with component data
+ */
+struct zl3073x_fw_component {
+ enum zl3073x_fw_component_id id;
+ size_t size;
+ void *data;
+};
+
+/**
+ * struct zl3073x_fw - Firmware bundle
+ * @component: firmware components array
+ */
+struct zl3073x_fw {
+ struct zl3073x_fw_component *component[ZL_FW_NUM_COMPONENTS];
+};
+
+struct zl3073x_fw *zl3073x_fw_load(struct zl3073x_dev *zldev, const char *data,
+ size_t size, struct netlink_ext_ack *extack);
+void zl3073x_fw_free(struct zl3073x_fw *fw);
+
+int zl3073x_fw_flash(struct zl3073x_dev *zldev, struct zl3073x_fw *zlfw,
+ struct netlink_ext_ack *extack);
+
+#endif /* _ZL3073X_FW_H */
diff --git a/drivers/dpll/zl3073x/out.c b/drivers/dpll/zl3073x/out.c
new file mode 100644
index 000000000000..86829a0c1c02
--- /dev/null
+++ b/drivers/dpll/zl3073x/out.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "out.h"
+
+/**
+ * zl3073x_out_state_fetch - fetch output state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: output index to fetch state for
+ *
+ * Function fetches state of the given output from hardware and stores it
+ * for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_out *out = &zldev->out[index];
+ int rc;
+
+ /* Read output configuration */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_CTRL(index), &out->ctrl);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "OUT%u is %s and connected to SYNTH%u\n", index,
+ str_enabled_disabled(zl3073x_out_is_enabled(out)),
+ zl3073x_out_synth_get(out));
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read output mode */
+ rc = zl3073x_read_u8(zldev, ZL_REG_OUTPUT_MODE, &out->mode);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "OUT%u has signal format 0x%02x\n", index,
+ zl3073x_out_signal_format_get(out));
+
+ /* Read output divisor */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_DIV, &out->div);
+ if (rc)
+ return rc;
+
+ if (!out->div) {
+ dev_err(zldev->dev, "Zero divisor for OUT%u got from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ dev_dbg(zldev->dev, "OUT%u divisor: %u\n", index, out->div);
+
+ /* Read output width */
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_WIDTH, &out->width);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
+ &out->esync_n_period);
+ if (rc)
+ return rc;
+
+ if (!out->esync_n_period) {
+ dev_err(zldev->dev,
+ "Zero esync divisor for OUT%u got from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH,
+ &out->esync_n_width);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP,
+ &out->phase_comp);
+ if (rc)
+ return rc;
+
+ return rc;
+}
+
+/**
+ * zl3073x_out_state_get - get current output state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: output index to get state for
+ *
+ * Return: pointer to given output state
+ */
+const struct zl3073x_out *zl3073x_out_state_get(struct zl3073x_dev *zldev,
+ u8 index)
+{
+ return &zldev->out[index];
+}
+
+int zl3073x_out_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_out *out)
+{
+ struct zl3073x_out *dout = &zldev->out[index];
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read output configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_RD,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Update mailbox with changed values */
+ if (dout->div != out->div)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_DIV, out->div);
+ if (!rc && dout->width != out->width)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_WIDTH, out->width);
+ if (!rc && dout->esync_n_period != out->esync_n_period)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_PERIOD,
+ out->esync_n_period);
+ if (!rc && dout->esync_n_width != out->esync_n_width)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_ESYNC_WIDTH,
+ out->esync_n_width);
+ if (!rc && dout->mode != out->mode)
+ rc = zl3073x_write_u8(zldev, ZL_REG_OUTPUT_MODE, out->mode);
+ if (!rc && dout->phase_comp != out->phase_comp)
+ rc = zl3073x_write_u32(zldev, ZL_REG_OUTPUT_PHASE_COMP,
+ out->phase_comp);
+ if (rc)
+ return rc;
+
+ /* Commit output configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_OUTPUT_MB_SEM, ZL_OUTPUT_MB_SEM_WR,
+ ZL_REG_OUTPUT_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* After successful commit store new state */
+ dout->div = out->div;
+ dout->width = out->width;
+ dout->esync_n_period = out->esync_n_period;
+ dout->esync_n_width = out->esync_n_width;
+ dout->mode = out->mode;
+ dout->phase_comp = out->phase_comp;
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/out.h b/drivers/dpll/zl3073x/out.h
new file mode 100644
index 000000000000..e8ea7a0e0f07
--- /dev/null
+++ b/drivers/dpll/zl3073x/out.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_OUT_H
+#define _ZL3073X_OUT_H
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_out - output state
+ * @div: output divisor
+ * @width: output pulse width
+ * @esync_n_period: embedded sync or n-pin period (for n-div formats)
+ * @esync_n_width: embedded sync or n-pin pulse width
+ * @phase_comp: phase compensation
+ * @ctrl: output control
+ * @mode: output mode
+ */
+struct zl3073x_out {
+ u32 div;
+ u32 width;
+ u32 esync_n_period;
+ u32 esync_n_width;
+ s32 phase_comp;
+ u8 ctrl;
+ u8 mode;
+};
+
+int zl3073x_out_state_fetch(struct zl3073x_dev *zldev, u8 index);
+const struct zl3073x_out *zl3073x_out_state_get(struct zl3073x_dev *zldev,
+ u8 index);
+
+int zl3073x_out_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_out *out);
+
+/**
+ * zl3073x_out_signal_format_get - get output signal format
+ * @out: pointer to out state
+ *
+ * Return: signal format of given output
+ */
+static inline u8 zl3073x_out_signal_format_get(const struct zl3073x_out *out)
+{
+ return FIELD_GET(ZL_OUTPUT_MODE_SIGNAL_FORMAT, out->mode);
+}
+
+/**
+ * zl3073x_out_is_diff - check if the given output is differential
+ * @out: pointer to out state
+ *
+ * Return: true if output is differential, false if output is single-ended
+ */
+static inline bool zl3073x_out_is_diff(const struct zl3073x_out *out)
+{
+ switch (zl3073x_out_signal_format_get(out)) {
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LVDS:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_DIFF:
+ case ZL_OUTPUT_MODE_SIGNAL_FORMAT_LOWVCM:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
+ * zl3073x_out_is_enabled - check if the given output is enabled
+ * @out: pointer to out state
+ *
+ * Return: true if output is enabled, false if output is disabled
+ */
+static inline bool zl3073x_out_is_enabled(const struct zl3073x_out *out)
+{
+ return !!FIELD_GET(ZL_OUTPUT_CTRL_EN, out->ctrl);
+}
+
+/**
+ * zl3073x_out_synth_get - get synth connected to given output
+ * @out: pointer to out state
+ *
+ * Return: index of synth connected to given output.
+ */
+static inline u8 zl3073x_out_synth_get(const struct zl3073x_out *out)
+{
+ return FIELD_GET(ZL_OUTPUT_CTRL_SYNTH_SEL, out->ctrl);
+}
+
+#endif /* _ZL3073X_OUT_H */
diff --git a/drivers/dpll/zl3073x/prop.c b/drivers/dpll/zl3073x/prop.c
index 4cf7e8aefcb3..4ed153087570 100644
--- a/drivers/dpll/zl3073x/prop.c
+++ b/drivers/dpll/zl3073x/prop.c
@@ -46,10 +46,10 @@ zl3073x_pin_check_freq(struct zl3073x_dev *zldev, enum dpll_pin_direction dir,
/* Get output pin synthesizer */
out = zl3073x_output_pin_out_get(id);
- synth = zl3073x_out_synth_get(zldev, out);
+ synth = zl3073x_dev_out_synth_get(zldev, out);
/* Get synth frequency */
- synth_freq = zl3073x_synth_freq_get(zldev, synth);
+ synth_freq = zl3073x_dev_synth_freq_get(zldev, synth);
/* Check the frequency divides synth frequency */
if (synth_freq % (u32)freq)
@@ -93,13 +93,13 @@ zl3073x_prop_pin_package_label_set(struct zl3073x_dev *zldev,
prefix = "REF";
ref = zl3073x_input_pin_ref_get(id);
- is_diff = zl3073x_ref_is_diff(zldev, ref);
+ is_diff = zl3073x_dev_ref_is_diff(zldev, ref);
} else {
u8 out;
prefix = "OUT";
out = zl3073x_output_pin_out_get(id);
- is_diff = zl3073x_out_is_diff(zldev, out);
+ is_diff = zl3073x_dev_out_is_diff(zldev, out);
}
if (!is_diff)
@@ -208,7 +208,18 @@ struct zl3073x_pin_props *zl3073x_pin_props_get(struct zl3073x_dev *zldev,
DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
} else {
+ u8 out, synth;
+ u32 f;
+
props->dpll_props.type = DPLL_PIN_TYPE_GNSS;
+
+ /* The output pin phase adjustment granularity equals half of
+ * the synth frequency count.
+ */
+ out = zl3073x_output_pin_out_get(index);
+ synth = zl3073x_dev_out_synth_get(zldev, out);
+ f = 2 * zl3073x_dev_synth_freq_get(zldev, synth);
+ props->dpll_props.phase_gran = f ? div_u64(PSEC_PER_SEC, f) : 1;
}
props->dpll_props.phase_range.min = S32_MIN;
diff --git a/drivers/dpll/zl3073x/ref.c b/drivers/dpll/zl3073x/ref.c
new file mode 100644
index 000000000000..aa2de13effa8
--- /dev/null
+++ b/drivers/dpll/zl3073x/ref.c
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "ref.h"
+
+/**
+ * zl3073x_ref_freq_factorize - factorize given frequency
+ * @freq: input frequency
+ * @base: base frequency
+ * @mult: multiplier
+ *
+ * Checks if the given frequency can be factorized using one of the
+ * supported base frequencies. If so the base frequency and multiplier
+ * are stored into appropriate parameters if they are not NULL.
+ *
+ * Return: 0 on success, -EINVAL if the frequency cannot be factorized
+ */
+int
+zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult)
+{
+ static const u16 base_freqs[] = {
+ 1, 2, 4, 5, 8, 10, 16, 20, 25, 32, 40, 50, 64, 80, 100, 125,
+ 128, 160, 200, 250, 256, 320, 400, 500, 625, 640, 800, 1000,
+ 1250, 1280, 1600, 2000, 2500, 3125, 3200, 4000, 5000, 6250,
+ 6400, 8000, 10000, 12500, 15625, 16000, 20000, 25000, 31250,
+ 32000, 40000, 50000, 62500,
+ };
+ u32 div;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(base_freqs); i++) {
+ div = freq / base_freqs[i];
+
+ if (div <= U16_MAX && (freq % base_freqs[i]) == 0) {
+ if (base)
+ *base = base_freqs[i];
+ if (mult)
+ *mult = div;
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * zl3073x_ref_state_fetch - fetch input reference state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: input reference index to fetch state for
+ *
+ * Function fetches state for the given input reference from hardware and
+ * stores it for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_ref *ref = &zldev->ref[index];
+ int rc;
+
+ /* For differential type inputs the N-pin reference shares
+ * part of the configuration with the P-pin counterpart.
+ */
+ if (zl3073x_is_n_pin(index) && zl3073x_ref_is_diff(ref - 1)) {
+ struct zl3073x_ref *p_ref = ref - 1; /* P-pin counterpart*/
+
+ /* Copy the shared items from the P-pin */
+ ref->config = p_ref->config;
+ ref->esync_n_div = p_ref->esync_n_div;
+ ref->freq_base = p_ref->freq_base;
+ ref->freq_mult = p_ref->freq_mult;
+ ref->freq_ratio_m = p_ref->freq_ratio_m;
+ ref->freq_ratio_n = p_ref->freq_ratio_n;
+ ref->phase_comp = p_ref->phase_comp;
+ ref->sync_ctrl = p_ref->sync_ctrl;
+
+ return 0; /* Finish - no non-shared items for now */
+ }
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Read ref_config register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_CONFIG, &ref->config);
+ if (rc)
+ return rc;
+
+ /* Read frequency related registers */
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_BASE, &ref->freq_base);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_FREQ_MULT, &ref->freq_mult);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_M, &ref->freq_ratio_m);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u16(zldev, ZL_REG_REF_RATIO_N, &ref->freq_ratio_n);
+ if (rc)
+ return rc;
+
+ /* Read eSync and N-div rated registers */
+ rc = zl3073x_read_u32(zldev, ZL_REG_REF_ESYNC_DIV, &ref->esync_n_div);
+ if (rc)
+ return rc;
+ rc = zl3073x_read_u8(zldev, ZL_REG_REF_SYNC_CTRL, &ref->sync_ctrl);
+ if (rc)
+ return rc;
+
+ /* Read phase compensation register */
+ rc = zl3073x_read_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP,
+ &ref->phase_comp);
+ if (rc)
+ return rc;
+
+ dev_dbg(zldev->dev, "REF%u is %s and configured as %s\n", index,
+ str_enabled_disabled(zl3073x_ref_is_enabled(ref)),
+ zl3073x_ref_is_diff(ref) ? "differential" : "single-ended");
+
+ return rc;
+}
+
+/**
+ * zl3073x_ref_state_get - get current input reference state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: input reference index to get state for
+ *
+ * Return: pointer to given input reference state
+ */
+const struct zl3073x_ref *
+zl3073x_ref_state_get(struct zl3073x_dev *zldev, u8 index)
+{
+ return &zldev->ref[index];
+}
+
+int zl3073x_ref_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_ref *ref)
+{
+ struct zl3073x_ref *dref = &zldev->ref[index];
+ int rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read reference configuration into mailbox */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_RD,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* Update mailbox with changed values */
+ if (dref->freq_base != ref->freq_base)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_BASE,
+ ref->freq_base);
+ if (!rc && dref->freq_mult != ref->freq_mult)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_FREQ_MULT,
+ ref->freq_mult);
+ if (!rc && dref->freq_ratio_m != ref->freq_ratio_m)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_M,
+ ref->freq_ratio_m);
+ if (!rc && dref->freq_ratio_n != ref->freq_ratio_n)
+ rc = zl3073x_write_u16(zldev, ZL_REG_REF_RATIO_N,
+ ref->freq_ratio_n);
+ if (!rc && dref->esync_n_div != ref->esync_n_div)
+ rc = zl3073x_write_u32(zldev, ZL_REG_REF_ESYNC_DIV,
+ ref->esync_n_div);
+ if (!rc && dref->sync_ctrl != ref->sync_ctrl)
+ rc = zl3073x_write_u8(zldev, ZL_REG_REF_SYNC_CTRL,
+ ref->sync_ctrl);
+ if (!rc && dref->phase_comp != ref->phase_comp)
+ rc = zl3073x_write_u48(zldev, ZL_REG_REF_PHASE_OFFSET_COMP,
+ ref->phase_comp);
+ if (rc)
+ return rc;
+
+ /* Commit reference configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_REF_MB_SEM, ZL_REF_MB_SEM_WR,
+ ZL_REG_REF_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* After successful commit store new state */
+ dref->freq_base = ref->freq_base;
+ dref->freq_mult = ref->freq_mult;
+ dref->freq_ratio_m = ref->freq_ratio_m;
+ dref->freq_ratio_n = ref->freq_ratio_n;
+ dref->esync_n_div = ref->esync_n_div;
+ dref->sync_ctrl = ref->sync_ctrl;
+ dref->phase_comp = ref->phase_comp;
+
+ return 0;
+}
diff --git a/drivers/dpll/zl3073x/ref.h b/drivers/dpll/zl3073x/ref.h
new file mode 100644
index 000000000000..efc7f59cd9f9
--- /dev/null
+++ b/drivers/dpll/zl3073x/ref.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_REF_H
+#define _ZL3073X_REF_H
+
+#include <linux/bitfield.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_ref - input reference state
+ * @ffo: current fractional frequency offset
+ * @phase_comp: phase compensation
+ * @esync_n_div: divisor for embedded sync or n-divided signal formats
+ * @freq_base: frequency base
+ * @freq_mult: frequnecy multiplier
+ * @freq_ratio_m: FEC mode multiplier
+ * @freq_ratio_n: FEC mode divisor
+ * @config: reference config
+ * @sync_ctrl: reference sync control
+ * @mon_status: reference monitor status
+ */
+struct zl3073x_ref {
+ s64 ffo;
+ u64 phase_comp;
+ u32 esync_n_div;
+ u16 freq_base;
+ u16 freq_mult;
+ u16 freq_ratio_m;
+ u16 freq_ratio_n;
+ u8 config;
+ u8 sync_ctrl;
+ u8 mon_status;
+};
+
+int zl3073x_ref_state_fetch(struct zl3073x_dev *zldev, u8 index);
+
+const struct zl3073x_ref *zl3073x_ref_state_get(struct zl3073x_dev *zldev,
+ u8 index);
+
+int zl3073x_ref_state_set(struct zl3073x_dev *zldev, u8 index,
+ const struct zl3073x_ref *ref);
+
+int zl3073x_ref_freq_factorize(u32 freq, u16 *base, u16 *mult);
+
+/**
+ * zl3073x_ref_ffo_get - get current fractional frequency offset
+ * @ref: pointer to ref state
+ *
+ * Return: the latest measured fractional frequency offset
+ */
+static inline s64
+zl3073x_ref_ffo_get(const struct zl3073x_ref *ref)
+{
+ return ref->ffo;
+}
+
+/**
+ * zl3073x_ref_freq_get - get given input reference frequency
+ * @ref: pointer to ref state
+ *
+ * Return: frequency of the given input reference
+ */
+static inline u32
+zl3073x_ref_freq_get(const struct zl3073x_ref *ref)
+{
+ return mul_u64_u32_div(ref->freq_base * ref->freq_mult,
+ ref->freq_ratio_m, ref->freq_ratio_n);
+}
+
+/**
+ * zl3073x_ref_freq_set - set given input reference frequency
+ * @ref: pointer to ref state
+ * @freq: frequency to be set
+ *
+ * Return: 0 on success, <0 when frequency cannot be factorized
+ */
+static inline int
+zl3073x_ref_freq_set(struct zl3073x_ref *ref, u32 freq)
+{
+ u16 base, mult;
+ int rc;
+
+ rc = zl3073x_ref_freq_factorize(freq, &base, &mult);
+ if (rc)
+ return rc;
+
+ ref->freq_base = base;
+ ref->freq_mult = mult;
+
+ return 0;
+}
+
+/**
+ * zl3073x_ref_is_diff - check if the given input reference is differential
+ * @ref: pointer to ref state
+ *
+ * Return: true if reference is differential, false if reference is single-ended
+ */
+static inline bool
+zl3073x_ref_is_diff(const struct zl3073x_ref *ref)
+{
+ return !!FIELD_GET(ZL_REF_CONFIG_DIFF_EN, ref->config);
+}
+
+/**
+ * zl3073x_ref_is_enabled - check if the given input reference is enabled
+ * @ref: pointer to ref state
+ *
+ * Return: true if input refernce is enabled, false otherwise
+ */
+static inline bool
+zl3073x_ref_is_enabled(const struct zl3073x_ref *ref)
+{
+ return !!FIELD_GET(ZL_REF_CONFIG_ENABLE, ref->config);
+}
+
+/**
+ * zl3073x_ref_is_status_ok - check the given input reference status
+ * @ref: pointer to ref state
+ *
+ * Return: true if the status is ok, false otherwise
+ */
+static inline bool
+zl3073x_ref_is_status_ok(const struct zl3073x_ref *ref)
+{
+ return ref->mon_status == ZL_REF_MON_STATUS_OK;
+}
+
+#endif /* _ZL3073X_REF_H */
diff --git a/drivers/dpll/zl3073x/regs.h b/drivers/dpll/zl3073x/regs.h
index 614e33128a5c..d837bee72b17 100644
--- a/drivers/dpll/zl3073x/regs.h
+++ b/drivers/dpll/zl3073x/regs.h
@@ -67,11 +67,17 @@
* Register Page 0, General
**************************/
+#define ZL_REG_INFO ZL_REG(0, 0x00, 1)
+#define ZL_INFO_READY BIT(7)
+
#define ZL_REG_ID ZL_REG(0, 0x01, 2)
#define ZL_REG_REVISION ZL_REG(0, 0x03, 2)
#define ZL_REG_FW_VER ZL_REG(0, 0x05, 2)
#define ZL_REG_CUSTOM_CONFIG_VER ZL_REG(0, 0x07, 4)
+#define ZL_REG_RESET_STATUS ZL_REG(0, 0x18, 1)
+#define ZL_REG_RESET_STATUS_RESET BIT(0)
+
/*************************
* Register Page 2, Status
*************************/
@@ -260,4 +266,52 @@
#define ZL_REG_OUTPUT_ESYNC_WIDTH ZL_REG(14, 0x18, 4)
#define ZL_REG_OUTPUT_PHASE_COMP ZL_REG(14, 0x20, 4)
+/*
+ * Register Page 255 - HW registers access
+ */
+#define ZL_REG_HWREG_OP ZL_REG(0xff, 0x00, 1)
+#define ZL_HWREG_OP_WRITE 0x28
+#define ZL_HWREG_OP_READ 0x29
+#define ZL_HWREG_OP_PENDING BIT(1)
+
+#define ZL_REG_HWREG_ADDR ZL_REG(0xff, 0x04, 4)
+#define ZL_REG_HWREG_WRITE_DATA ZL_REG(0xff, 0x08, 4)
+#define ZL_REG_HWREG_READ_DATA ZL_REG(0xff, 0x0c, 4)
+
+/*
+ * Registers available in flash mode
+ */
+#define ZL_REG_FLASH_HASH ZL_REG(0, 0x78, 4)
+#define ZL_REG_FLASH_FAMILY ZL_REG(0, 0x7c, 1)
+#define ZL_REG_FLASH_RELEASE ZL_REG(0, 0x7d, 1)
+
+#define ZL_REG_HOST_CONTROL ZL_REG(1, 0x02, 1)
+#define ZL_HOST_CONTROL_ENABLE BIT(0)
+
+#define ZL_REG_IMAGE_START_ADDR ZL_REG(1, 0x04, 4)
+#define ZL_REG_IMAGE_SIZE ZL_REG(1, 0x08, 4)
+#define ZL_REG_FLASH_INDEX_READ ZL_REG(1, 0x0c, 4)
+#define ZL_REG_FLASH_INDEX_WRITE ZL_REG(1, 0x10, 4)
+#define ZL_REG_FILL_PATTERN ZL_REG(1, 0x14, 4)
+
+#define ZL_REG_WRITE_FLASH ZL_REG(1, 0x18, 1)
+#define ZL_WRITE_FLASH_OP GENMASK(2, 0)
+#define ZL_WRITE_FLASH_OP_DONE 0x0
+#define ZL_WRITE_FLASH_OP_SECTORS 0x2
+#define ZL_WRITE_FLASH_OP_PAGE 0x3
+#define ZL_WRITE_FLASH_OP_COPY_PAGE 0x4
+
+#define ZL_REG_FLASH_INFO ZL_REG(2, 0x00, 1)
+#define ZL_FLASH_INFO_SECTOR_SIZE GENMASK(3, 0)
+#define ZL_FLASH_INFO_SECTOR_4K 0
+#define ZL_FLASH_INFO_SECTOR_64K 1
+
+#define ZL_REG_ERROR_COUNT ZL_REG(2, 0x04, 4)
+#define ZL_REG_ERROR_CAUSE ZL_REG(2, 0x08, 4)
+
+#define ZL_REG_OP_STATE ZL_REG(2, 0x14, 1)
+#define ZL_OP_STATE_NO_COMMAND 0
+#define ZL_OP_STATE_PENDING 1
+#define ZL_OP_STATE_DONE 2
+
#endif /* _ZL3073X_REGS_H */
diff --git a/drivers/dpll/zl3073x/synth.c b/drivers/dpll/zl3073x/synth.c
new file mode 100644
index 000000000000..da839572dab2
--- /dev/null
+++ b/drivers/dpll/zl3073x/synth.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/dev_printk.h>
+#include <linux/string.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "core.h"
+#include "synth.h"
+
+/**
+ * zl3073x_synth_state_fetch - fetch synth state from hardware
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: synth index to fetch state for
+ *
+ * Function fetches state of the given synthesizer from the hardware and
+ * stores it for later use.
+ *
+ * Return: 0 on success, <0 on error
+ */
+int zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 index)
+{
+ struct zl3073x_synth *synth = &zldev->synth[index];
+ int rc;
+
+ /* Read synth control register */
+ rc = zl3073x_read_u8(zldev, ZL_REG_SYNTH_CTRL(index), &synth->ctrl);
+ if (rc)
+ return rc;
+
+ guard(mutex)(&zldev->multiop_lock);
+
+ /* Read synth configuration */
+ rc = zl3073x_mb_op(zldev, ZL_REG_SYNTH_MB_SEM, ZL_SYNTH_MB_SEM_RD,
+ ZL_REG_SYNTH_MB_MASK, BIT(index));
+ if (rc)
+ return rc;
+
+ /* The output frequency is determined by the following formula:
+ * base * multiplier * numerator / denominator
+ *
+ * Read registers with these values
+ */
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_BASE, &synth->freq_base);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u32(zldev, ZL_REG_SYNTH_FREQ_MULT, &synth->freq_mult);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_M, &synth->freq_m);
+ if (rc)
+ return rc;
+
+ rc = zl3073x_read_u16(zldev, ZL_REG_SYNTH_FREQ_N, &synth->freq_n);
+ if (rc)
+ return rc;
+
+ /* Check denominator for zero to avoid div by 0 */
+ if (!synth->freq_n) {
+ dev_err(zldev->dev,
+ "Zero divisor for SYNTH%u retrieved from device\n",
+ index);
+ return -EINVAL;
+ }
+
+ dev_dbg(zldev->dev, "SYNTH%u frequency: %u Hz\n", index,
+ zl3073x_synth_freq_get(synth));
+
+ return rc;
+}
+
+/**
+ * zl3073x_synth_state_get - get current synth state
+ * @zldev: pointer to zl3073x_dev structure
+ * @index: synth index to get state for
+ *
+ * Return: pointer to given synth state
+ */
+const struct zl3073x_synth *zl3073x_synth_state_get(struct zl3073x_dev *zldev,
+ u8 index)
+{
+ return &zldev->synth[index];
+}
diff --git a/drivers/dpll/zl3073x/synth.h b/drivers/dpll/zl3073x/synth.h
new file mode 100644
index 000000000000..6c55eb8a888c
--- /dev/null
+++ b/drivers/dpll/zl3073x/synth.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ZL3073X_SYNTH_H
+#define _ZL3073X_SYNTH_H
+
+#include <linux/bitfield.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
+#include "regs.h"
+
+struct zl3073x_dev;
+
+/**
+ * struct zl3073x_synth - synthesizer state
+ * @freq_mult: frequency multiplier
+ * @freq_base: frequency base
+ * @freq_m: frequency numerator
+ * @freq_n: frequency denominator
+ * @ctrl: synth control
+ */
+struct zl3073x_synth {
+ u32 freq_mult;
+ u16 freq_base;
+ u16 freq_m;
+ u16 freq_n;
+ u8 ctrl;
+};
+
+int zl3073x_synth_state_fetch(struct zl3073x_dev *zldev, u8 synth_id);
+
+const struct zl3073x_synth *zl3073x_synth_state_get(struct zl3073x_dev *zldev,
+ u8 synth_id);
+
+int zl3073x_synth_state_set(struct zl3073x_dev *zldev, u8 synth_id,
+ const struct zl3073x_synth *synth);
+
+/**
+ * zl3073x_synth_dpll_get - get DPLL ID the synth is driven by
+ * @synth: pointer to synth state
+ *
+ * Return: ID of DPLL the given synthetizer is driven by
+ */
+static inline u8 zl3073x_synth_dpll_get(const struct zl3073x_synth *synth)
+{
+ return FIELD_GET(ZL_SYNTH_CTRL_DPLL_SEL, synth->ctrl);
+}
+
+/**
+ * zl3073x_synth_freq_get - get synth current freq
+ * @synth: pointer to synth state
+ *
+ * Return: frequency of given synthetizer
+ */
+static inline u32 zl3073x_synth_freq_get(const struct zl3073x_synth *synth)
+{
+ return mul_u64_u32_div(synth->freq_base * synth->freq_m,
+ synth->freq_mult, synth->freq_n);
+}
+
+/**
+ * zl3073x_synth_is_enabled - check if the given synth is enabled
+ * @synth: pointer to synth state
+ *
+ * Return: true if synth is enabled, false otherwise
+ */
+static inline bool zl3073x_synth_is_enabled(const struct zl3073x_synth *synth)
+{
+ return FIELD_GET(ZL_SYNTH_CTRL_EN, synth->ctrl);
+}
+
+#endif /* _ZL3073X_SYNTH_H */
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 19ad3c3b675d..81e40543ffd8 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -23,14 +23,6 @@ menuconfig EDAC
if EDAC
-config EDAC_LEGACY_SYSFS
- bool "EDAC legacy sysfs"
- default y
- help
- Enable the compatibility sysfs nodes.
- Use 'Y' if your edac utilities aren't ported to work with the newer
- structures.
-
config EDAC_DEBUG
bool "Debugging"
select DEBUG_FS
@@ -291,6 +283,18 @@ config EDAC_I10NM
system has non-volatile DIMMs you should also manually
select CONFIG_ACPI_NFIT.
+config EDAC_IMH
+ tristate "Intel Integrated Memory/IO Hub MC"
+ depends on X86_64 && X86_MCE_INTEL && ACPI
+ depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_IMH can't be y
+ select DMI
+ select ACPI_ADXL
+ help
+ Support for error detection and correction the Intel
+ Integrated Memory/IO Hub Memory Controller. This MC IP is
+ first used on the Diamond Rapids servers but may appear on
+ others in the future.
+
config EDAC_PND2
tristate "Intel Pondicherry2"
depends on PCI && X86_64 && X86_MCE_INTEL
@@ -576,4 +580,20 @@ config EDAC_LOONGSON
errors (CE) only. Loongson-3A5000/3C5000/3D5000/3A6000/3C6000
are compatible.
+config EDAC_CORTEX_A72
+ tristate "ARM Cortex A72"
+ depends on ARM64
+ help
+ Support for L1/L2 cache error detection for ARM Cortex A72 processor.
+ The detected and reported errors are from reading CPU/L2 memory error
+ syndrome registers.
+
+config EDAC_VERSALNET
+ tristate "AMD VersalNET DDR Controller"
+ depends on CDX_CONTROLLER && ARCH_ZYNQMP
+ help
+ Support for single bit error correction, double bit error detection
+ and other system errors from various IP subsystems like RPU, NOCs,
+ HNICX, PL on the AMD Versal NET DDR memory controller.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index a8f2d8f6c894..8429b1e856bc 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -65,6 +65,9 @@ obj-$(CONFIG_EDAC_SKX) += skx_edac.o skx_edac_common.o
i10nm_edac-y := i10nm_base.o
obj-$(CONFIG_EDAC_I10NM) += i10nm_edac.o skx_edac_common.o
+imh_edac-y := imh_base.o
+obj-$(CONFIG_EDAC_IMH) += imh_edac.o skx_edac_common.o
+
obj-$(CONFIG_EDAC_HIGHBANK_MC) += highbank_mc_edac.o
obj-$(CONFIG_EDAC_HIGHBANK_L2) += highbank_l2_edac.o
@@ -88,3 +91,5 @@ obj-$(CONFIG_EDAC_NPCM) += npcm_edac.o
obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
obj-$(CONFIG_EDAC_VERSAL) += versal_edac.o
obj-$(CONFIG_EDAC_LOONGSON) += loongson_edac.o
+obj-$(CONFIG_EDAC_VERSALNET) += versalnet_edac.o
+obj-$(CONFIG_EDAC_CORTEX_A72) += a72_edac.o
diff --git a/drivers/edac/a72_edac.c b/drivers/edac/a72_edac.c
new file mode 100644
index 000000000000..9262d75c3855
--- /dev/null
+++ b/drivers/edac/a72_edac.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cortex A72 EDAC L1 and L2 cache error detection
+ *
+ * Copyright (c) 2020 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright (c) 2025 Microsoft Corporation, <vijayb@linux.microsoft.com>
+ *
+ * Based on Code from:
+ * Copyright (c) 2018, NXP Semiconductor
+ * Author: York Sun <york.sun@nxp.com>
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/bitfield.h>
+#include <asm/smp_plat.h>
+
+#include "edac_module.h"
+
+#define DRVNAME "a72-edac"
+
+#define SYS_CPUMERRSR_EL1 sys_reg(3, 1, 15, 2, 2)
+#define SYS_L2MERRSR_EL1 sys_reg(3, 1, 15, 2, 3)
+
+#define CPUMERRSR_EL1_RAMID GENMASK(30, 24)
+#define L2MERRSR_EL1_CPUID_WAY GENMASK(21, 18)
+
+#define CPUMERRSR_EL1_VALID BIT(31)
+#define CPUMERRSR_EL1_FATAL BIT(63)
+#define L2MERRSR_EL1_VALID BIT(31)
+#define L2MERRSR_EL1_FATAL BIT(63)
+
+#define L1_I_TAG_RAM 0x00
+#define L1_I_DATA_RAM 0x01
+#define L1_D_TAG_RAM 0x08
+#define L1_D_DATA_RAM 0x09
+#define TLB_RAM 0x18
+
+#define MESSAGE_SIZE 64
+
+struct mem_err_synd_reg {
+ u64 cpu_mesr;
+ u64 l2_mesr;
+};
+
+static struct cpumask compat_mask;
+
+static void report_errors(struct edac_device_ctl_info *edac_ctl, int cpu,
+ struct mem_err_synd_reg *mesr)
+{
+ u64 cpu_mesr = mesr->cpu_mesr;
+ u64 l2_mesr = mesr->l2_mesr;
+ char msg[MESSAGE_SIZE];
+
+ if (cpu_mesr & CPUMERRSR_EL1_VALID) {
+ const char *str;
+ bool fatal = cpu_mesr & CPUMERRSR_EL1_FATAL;
+
+ switch (FIELD_GET(CPUMERRSR_EL1_RAMID, cpu_mesr)) {
+ case L1_I_TAG_RAM:
+ str = "L1-I Tag RAM";
+ break;
+ case L1_I_DATA_RAM:
+ str = "L1-I Data RAM";
+ break;
+ case L1_D_TAG_RAM:
+ str = "L1-D Tag RAM";
+ break;
+ case L1_D_DATA_RAM:
+ str = "L1-D Data RAM";
+ break;
+ case TLB_RAM:
+ str = "TLB RAM";
+ break;
+ default:
+ str = "Unspecified";
+ break;
+ }
+
+ snprintf(msg, MESSAGE_SIZE, "%s %s error(s) on CPU %d",
+ str, fatal ? "fatal" : "correctable", cpu);
+
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 0, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 0, msg);
+ }
+
+ if (l2_mesr & L2MERRSR_EL1_VALID) {
+ bool fatal = l2_mesr & L2MERRSR_EL1_FATAL;
+
+ snprintf(msg, MESSAGE_SIZE, "L2 %s error(s) on CPU %d CPUID/WAY 0x%lx",
+ fatal ? "fatal" : "correctable", cpu,
+ FIELD_GET(L2MERRSR_EL1_CPUID_WAY, l2_mesr));
+ if (fatal)
+ edac_device_handle_ue(edac_ctl, cpu, 1, msg);
+ else
+ edac_device_handle_ce(edac_ctl, cpu, 1, msg);
+ }
+}
+
+static void read_errors(void *data)
+{
+ struct mem_err_synd_reg *mesr = data;
+
+ mesr->cpu_mesr = read_sysreg_s(SYS_CPUMERRSR_EL1);
+ if (mesr->cpu_mesr & CPUMERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_CPUMERRSR_EL1);
+ isb();
+ }
+ mesr->l2_mesr = read_sysreg_s(SYS_L2MERRSR_EL1);
+ if (mesr->l2_mesr & L2MERRSR_EL1_VALID) {
+ write_sysreg_s(0, SYS_L2MERRSR_EL1);
+ isb();
+ }
+}
+
+static void a72_edac_check(struct edac_device_ctl_info *edac_ctl)
+{
+ struct mem_err_synd_reg mesr;
+ int cpu;
+
+ cpus_read_lock();
+ for_each_cpu_and(cpu, cpu_online_mask, &compat_mask) {
+ smp_call_function_single(cpu, read_errors, &mesr, true);
+ report_errors(edac_ctl, cpu, &mesr);
+ }
+ cpus_read_unlock();
+}
+
+static int a72_edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl;
+ struct device *dev = &pdev->dev;
+ int rc;
+
+ edac_ctl = edac_device_alloc_ctl_info(0, "cpu",
+ num_possible_cpus(), "L", 2, 1,
+ edac_device_alloc_index());
+ if (!edac_ctl)
+ return -ENOMEM;
+
+ edac_ctl->edac_check = a72_edac_check;
+ edac_ctl->dev = dev;
+ edac_ctl->mod_name = dev_name(dev);
+ edac_ctl->dev_name = dev_name(dev);
+ edac_ctl->ctl_name = DRVNAME;
+ dev_set_drvdata(dev, edac_ctl);
+
+ rc = edac_device_add_device(edac_ctl);
+ if (rc)
+ goto out_dev;
+
+ return 0;
+
+out_dev:
+ edac_device_free_ctl_info(edac_ctl);
+
+ return rc;
+}
+
+static void a72_edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *edac_ctl = dev_get_drvdata(&pdev->dev);
+
+ edac_device_del_device(edac_ctl->dev);
+ edac_device_free_ctl_info(edac_ctl);
+}
+
+static const struct of_device_id cortex_arm64_edac_of_match[] = {
+ { .compatible = "arm,cortex-a72" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cortex_arm64_edac_of_match);
+
+static struct platform_driver a72_edac_driver = {
+ .probe = a72_edac_probe,
+ .remove = a72_edac_remove,
+ .driver = {
+ .name = DRVNAME,
+ },
+};
+
+static struct platform_device *a72_pdev;
+
+static int __init a72_edac_driver_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu);
+ if (np) {
+ if (of_match_node(cortex_arm64_edac_of_match, np) &&
+ of_property_read_bool(np, "edac-enabled")) {
+ cpumask_set_cpu(cpu, &compat_mask);
+ }
+ } else {
+ pr_warn("failed to find device node for CPU %d\n", cpu);
+ }
+ }
+
+ if (cpumask_empty(&compat_mask))
+ return 0;
+
+ a72_pdev = platform_device_register_simple(DRVNAME, -1, NULL, 0);
+ if (IS_ERR(a72_pdev)) {
+ pr_err("failed to register A72 EDAC device\n");
+ return PTR_ERR(a72_pdev);
+ }
+
+ return platform_driver_register(&a72_edac_driver);
+}
+
+static void __exit a72_edac_driver_exit(void)
+{
+ platform_device_unregister(a72_pdev);
+ platform_driver_unregister(&a72_edac_driver);
+}
+
+module_init(a72_edac_driver_init);
+module_exit(a72_edac_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_DESCRIPTION("Cortex A72 L1 and L2 cache EDAC driver");
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 7685a8550d4b..0c5b94e64ea1 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1184,10 +1184,22 @@ altr_check_ocram_deps_init(struct altr_edac_device_dev *device)
if (ret)
return ret;
- /* Verify OCRAM has been initialized */
+ /*
+ * Verify that OCRAM has been initialized.
+ * During a warm reset, OCRAM contents are retained, but the control
+ * and status registers are reset to their default values. Therefore,
+ * ECC must be explicitly re-enabled in the control register.
+ * Error condition: if INITCOMPLETEA is clear and ECC_EN is already set.
+ */
if (!ecc_test_bits(ALTR_A10_ECC_INITCOMPLETEA,
- (base + ALTR_A10_ECC_INITSTAT_OFST)))
- return -ENODEV;
+ (base + ALTR_A10_ECC_INITSTAT_OFST))) {
+ if (!ecc_test_bits(ALTR_A10_ECC_EN,
+ (base + ALTR_A10_ECC_CTRL_OFST)))
+ ecc_set_bits(ALTR_A10_ECC_EN,
+ (base + ALTR_A10_ECC_CTRL_OFST));
+ else
+ return -ENODEV;
+ }
/* Enable IRQ on Single Bit Error */
writel(ALTR_A10_ECC_SERRINTEN, (base + ALTR_A10_ECC_ERRINTENS_OFST));
@@ -1357,7 +1369,7 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject2_fops,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_ETHERNET */
@@ -1447,7 +1459,7 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject2_fops,
+ .inject_fops = &altr_edac_a10_device_inject_fops,
};
#endif /* CONFIG_EDAC_ALTERA_USB */
@@ -2130,8 +2142,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
edac->irq_chip.name = pdev->dev.of_node->name;
edac->irq_chip.irq_mask = a10_eccmgr_irq_mask;
edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask;
- edac->domain = irq_domain_create_linear(of_fwnode_handle(pdev->dev.of_node),
- 64, &a10_eccmgr_ic_ops, edac);
+ edac->domain = irq_domain_create_linear(dev_fwnode(&pdev->dev), 64, &a10_eccmgr_ic_ops,
+ edac);
if (!edac->domain) {
dev_err(&pdev->dev, "Error adding IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 07f1e9dc1ca7..2391f3469961 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3732,6 +3732,7 @@ static void hw_info_put(struct amd64_pvt *pvt)
pci_dev_put(pvt->F1);
pci_dev_put(pvt->F2);
kfree(pvt->umc);
+ kfree(pvt->csels);
}
static struct low_ops umc_ops = {
@@ -3766,6 +3767,7 @@ static int per_family_init(struct amd64_pvt *pvt)
pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86;
+ char *tmp_name = NULL;
pvt->max_mcs = 2;
/*
@@ -3779,7 +3781,7 @@ static int per_family_init(struct amd64_pvt *pvt)
switch (pvt->fam) {
case 0xf:
- pvt->ctl_name = (pvt->ext_model >= K8_REV_F) ?
+ tmp_name = (pvt->ext_model >= K8_REV_F) ?
"K8 revF or later" : "K8 revE or earlier";
pvt->f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP;
pvt->f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL;
@@ -3788,7 +3790,6 @@ static int per_family_init(struct amd64_pvt *pvt)
break;
case 0x10:
- pvt->ctl_name = "F10h";
pvt->f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP;
pvt->f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM;
pvt->ops->dbam_to_cs = f10_dbam_to_chip_select;
@@ -3797,12 +3798,10 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x15:
switch (pvt->model) {
case 0x30:
- pvt->ctl_name = "F15h_M30h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2;
break;
case 0x60:
- pvt->ctl_name = "F15h_M60h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2;
pvt->ops->dbam_to_cs = f15_m60h_dbam_to_chip_select;
@@ -3811,7 +3810,6 @@ static int per_family_init(struct amd64_pvt *pvt)
/* Richland is only client */
return -ENODEV;
default:
- pvt->ctl_name = "F15h";
pvt->f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2;
pvt->ops->dbam_to_cs = f15_dbam_to_chip_select;
@@ -3822,12 +3820,10 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x16:
switch (pvt->model) {
case 0x30:
- pvt->ctl_name = "F16h_M30h";
pvt->f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2;
break;
default:
- pvt->ctl_name = "F16h";
pvt->f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1;
pvt->f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2;
break;
@@ -3836,76 +3832,51 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x17:
switch (pvt->model) {
- case 0x10 ... 0x2f:
- pvt->ctl_name = "F17h_M10h";
- break;
case 0x30 ... 0x3f:
- pvt->ctl_name = "F17h_M30h";
pvt->max_mcs = 8;
break;
- case 0x60 ... 0x6f:
- pvt->ctl_name = "F17h_M60h";
- break;
- case 0x70 ... 0x7f:
- pvt->ctl_name = "F17h_M70h";
- break;
default:
- pvt->ctl_name = "F17h";
break;
}
break;
case 0x18:
- pvt->ctl_name = "F18h";
break;
case 0x19:
switch (pvt->model) {
case 0x00 ... 0x0f:
- pvt->ctl_name = "F19h";
pvt->max_mcs = 8;
break;
case 0x10 ... 0x1f:
- pvt->ctl_name = "F19h_M10h";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
- case 0x20 ... 0x2f:
- pvt->ctl_name = "F19h_M20h";
- break;
case 0x30 ... 0x3f:
if (pvt->F3->device == PCI_DEVICE_ID_AMD_MI200_DF_F3) {
- pvt->ctl_name = "MI200";
+ tmp_name = "MI200";
pvt->max_mcs = 4;
pvt->dram_type = MEM_HBM2;
pvt->gpu_umc_base = 0x50000;
pvt->ops = &gpu_ops;
} else {
- pvt->ctl_name = "F19h_M30h";
pvt->max_mcs = 8;
}
break;
- case 0x50 ... 0x5f:
- pvt->ctl_name = "F19h_M50h";
- break;
case 0x60 ... 0x6f:
- pvt->ctl_name = "F19h_M60h";
pvt->flags.zn_regs_v2 = 1;
break;
case 0x70 ... 0x7f:
- pvt->ctl_name = "F19h_M70h";
pvt->max_mcs = 4;
pvt->flags.zn_regs_v2 = 1;
break;
case 0x90 ... 0x9f:
- pvt->ctl_name = "F19h_M90h";
pvt->max_mcs = 4;
pvt->dram_type = MEM_HBM3;
pvt->gpu_umc_base = 0x90000;
pvt->ops = &gpu_ops;
break;
case 0xa0 ... 0xaf:
- pvt->ctl_name = "F19h_MA0h";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
@@ -3915,12 +3886,20 @@ static int per_family_init(struct amd64_pvt *pvt)
case 0x1A:
switch (pvt->model) {
case 0x00 ... 0x1f:
- pvt->ctl_name = "F1Ah";
pvt->max_mcs = 12;
pvt->flags.zn_regs_v2 = 1;
break;
case 0x40 ... 0x4f:
- pvt->ctl_name = "F1Ah_M40h";
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x50 ... 0x57:
+ case 0xc0 ... 0xc7:
+ pvt->max_mcs = 16;
+ pvt->flags.zn_regs_v2 = 1;
+ break;
+ case 0x90 ... 0x9f:
+ case 0xa0 ... 0xaf:
+ pvt->max_mcs = 8;
pvt->flags.zn_regs_v2 = 1;
break;
}
@@ -3931,6 +3910,16 @@ static int per_family_init(struct amd64_pvt *pvt)
return -ENODEV;
}
+ if (tmp_name)
+ scnprintf(pvt->ctl_name, sizeof(pvt->ctl_name), tmp_name);
+ else
+ scnprintf(pvt->ctl_name, sizeof(pvt->ctl_name), "F%02Xh_M%02Xh",
+ pvt->fam, pvt->model);
+
+ pvt->csels = kcalloc(pvt->max_mcs, sizeof(*pvt->csels), GFP_KERNEL);
+ if (!pvt->csels)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 17228d07de4c..1757c1b99fc8 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -96,11 +96,12 @@
/* Hardware limit on ChipSelect rows per MC and processors per system */
#define NUM_CHIPSELECTS 8
#define DRAM_RANGES 8
-#define NUM_CONTROLLERS 12
#define ON true
#define OFF false
+#define MAX_CTL_NAMELEN 19
+
/*
* PCI-defined configuration space registers
*/
@@ -346,7 +347,7 @@ struct amd64_pvt {
u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */
/* one for each DCT/UMC */
- struct chip_select csels[NUM_CONTROLLERS];
+ struct chip_select *csels;
/* DRAM base and limit pairs F1x[78,70,68,60,58,50,48,40] */
struct dram_range ranges[DRAM_RANGES];
@@ -362,7 +363,7 @@ struct amd64_pvt {
/* x4, x8, or x16 syndromes in use */
u8 ecc_sym_sz;
- const char *ctl_name;
+ char ctl_name[MAX_CTL_NAMELEN];
u16 f1_id, f2_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
diff --git a/drivers/edac/ecs.c b/drivers/edac/ecs.c
index 51c451c7f0f0..51c451c7f0f0 100755..100644
--- a/drivers/edac/ecs.c
+++ b/drivers/edac/ecs.c
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0f338adf7d93..091cc6aae8a9 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -115,377 +115,6 @@ static const char * const edac_caps[] = {
[EDAC_S16ECD16ED] = "S16ECD16ED"
};
-#ifdef CONFIG_EDAC_LEGACY_SYSFS
-/*
- * EDAC sysfs CSROW data structures and methods
- */
-
-#define to_csrow(k) container_of(k, struct csrow_info, dev)
-
-/*
- * We need it to avoid namespace conflicts between the legacy API
- * and the per-dimm/per-rank one
- */
-#define DEVICE_ATTR_LEGACY(_name, _mode, _show, _store) \
- static struct device_attribute dev_attr_legacy_##_name = __ATTR(_name, _mode, _show, _store)
-
-struct dev_ch_attribute {
- struct device_attribute attr;
- unsigned int channel;
-};
-
-#define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
- static struct dev_ch_attribute dev_attr_legacy_##_name = \
- { __ATTR(_name, _mode, _show, _store), (_var) }
-
-#define to_channel(k) (container_of(k, struct dev_ch_attribute, attr)->channel)
-
-/* Set of more default csrow<id> attribute show/store functions */
-static ssize_t csrow_ue_count_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%u\n", csrow->ue_count);
-}
-
-static ssize_t csrow_ce_count_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%u\n", csrow->ce_count);
-}
-
-static ssize_t csrow_size_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
- int i;
- u32 nr_pages = 0;
-
- for (i = 0; i < csrow->nr_channels; i++)
- nr_pages += csrow->channels[i]->dimm->nr_pages;
- return sysfs_emit(data, "%u\n", PAGES_TO_MiB(nr_pages));
-}
-
-static ssize_t csrow_mem_type_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%s\n", edac_mem_types[csrow->channels[0]->dimm->mtype]);
-}
-
-static ssize_t csrow_dev_type_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%s\n", dev_types[csrow->channels[0]->dimm->dtype]);
-}
-
-static ssize_t csrow_edac_mode_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
-
- return sysfs_emit(data, "%s\n", edac_caps[csrow->channels[0]->dimm->edac_mode]);
-}
-
-/* show/store functions for DIMM Label attributes */
-static ssize_t channel_dimm_label_show(struct device *dev,
- struct device_attribute *mattr,
- char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
- unsigned int chan = to_channel(mattr);
- struct rank_info *rank = csrow->channels[chan];
-
- /* if field has not been initialized, there is nothing to send */
- if (!rank->dimm->label[0])
- return 0;
-
- return sysfs_emit(data, "%s\n", rank->dimm->label);
-}
-
-static ssize_t channel_dimm_label_store(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
-{
- struct csrow_info *csrow = to_csrow(dev);
- unsigned int chan = to_channel(mattr);
- struct rank_info *rank = csrow->channels[chan];
- size_t copy_count = count;
-
- if (count == 0)
- return -EINVAL;
-
- if (data[count - 1] == '\0' || data[count - 1] == '\n')
- copy_count -= 1;
-
- if (copy_count == 0 || copy_count >= sizeof(rank->dimm->label))
- return -EINVAL;
-
- memcpy(rank->dimm->label, data, copy_count);
- rank->dimm->label[copy_count] = '\0';
-
- return count;
-}
-
-/* show function for dynamic chX_ce_count attribute */
-static ssize_t channel_ce_count_show(struct device *dev,
- struct device_attribute *mattr, char *data)
-{
- struct csrow_info *csrow = to_csrow(dev);
- unsigned int chan = to_channel(mattr);
- struct rank_info *rank = csrow->channels[chan];
-
- return sysfs_emit(data, "%u\n", rank->ce_count);
-}
-
-/* cwrow<id>/attribute files */
-DEVICE_ATTR_LEGACY(size_mb, S_IRUGO, csrow_size_show, NULL);
-DEVICE_ATTR_LEGACY(dev_type, S_IRUGO, csrow_dev_type_show, NULL);
-DEVICE_ATTR_LEGACY(mem_type, S_IRUGO, csrow_mem_type_show, NULL);
-DEVICE_ATTR_LEGACY(edac_mode, S_IRUGO, csrow_edac_mode_show, NULL);
-DEVICE_ATTR_LEGACY(ue_count, S_IRUGO, csrow_ue_count_show, NULL);
-DEVICE_ATTR_LEGACY(ce_count, S_IRUGO, csrow_ce_count_show, NULL);
-
-/* default attributes of the CSROW<id> object */
-static struct attribute *csrow_attrs[] = {
- &dev_attr_legacy_dev_type.attr,
- &dev_attr_legacy_mem_type.attr,
- &dev_attr_legacy_edac_mode.attr,
- &dev_attr_legacy_size_mb.attr,
- &dev_attr_legacy_ue_count.attr,
- &dev_attr_legacy_ce_count.attr,
- NULL,
-};
-
-static const struct attribute_group csrow_attr_grp = {
- .attrs = csrow_attrs,
-};
-
-static const struct attribute_group *csrow_attr_groups[] = {
- &csrow_attr_grp,
- NULL
-};
-
-static const struct device_type csrow_attr_type = {
- .groups = csrow_attr_groups,
-};
-
-/*
- * possible dynamic channel DIMM Label attribute files
- *
- */
-DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 0);
-DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 1);
-DEVICE_CHANNEL(ch2_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 2);
-DEVICE_CHANNEL(ch3_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 3);
-DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 4);
-DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 5);
-DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 6);
-DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 7);
-DEVICE_CHANNEL(ch8_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 8);
-DEVICE_CHANNEL(ch9_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 9);
-DEVICE_CHANNEL(ch10_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 10);
-DEVICE_CHANNEL(ch11_dimm_label, S_IRUGO | S_IWUSR,
- channel_dimm_label_show, channel_dimm_label_store, 11);
-
-/* Total possible dynamic DIMM Label attribute file table */
-static struct attribute *dynamic_csrow_dimm_attr[] = {
- &dev_attr_legacy_ch0_dimm_label.attr.attr,
- &dev_attr_legacy_ch1_dimm_label.attr.attr,
- &dev_attr_legacy_ch2_dimm_label.attr.attr,
- &dev_attr_legacy_ch3_dimm_label.attr.attr,
- &dev_attr_legacy_ch4_dimm_label.attr.attr,
- &dev_attr_legacy_ch5_dimm_label.attr.attr,
- &dev_attr_legacy_ch6_dimm_label.attr.attr,
- &dev_attr_legacy_ch7_dimm_label.attr.attr,
- &dev_attr_legacy_ch8_dimm_label.attr.attr,
- &dev_attr_legacy_ch9_dimm_label.attr.attr,
- &dev_attr_legacy_ch10_dimm_label.attr.attr,
- &dev_attr_legacy_ch11_dimm_label.attr.attr,
- NULL
-};
-
-/* possible dynamic channel ce_count attribute files */
-DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 0);
-DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 1);
-DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 2);
-DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 3);
-DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 4);
-DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 5);
-DEVICE_CHANNEL(ch6_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 6);
-DEVICE_CHANNEL(ch7_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 7);
-DEVICE_CHANNEL(ch8_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 8);
-DEVICE_CHANNEL(ch9_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 9);
-DEVICE_CHANNEL(ch10_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 10);
-DEVICE_CHANNEL(ch11_ce_count, S_IRUGO,
- channel_ce_count_show, NULL, 11);
-
-/* Total possible dynamic ce_count attribute file table */
-static struct attribute *dynamic_csrow_ce_count_attr[] = {
- &dev_attr_legacy_ch0_ce_count.attr.attr,
- &dev_attr_legacy_ch1_ce_count.attr.attr,
- &dev_attr_legacy_ch2_ce_count.attr.attr,
- &dev_attr_legacy_ch3_ce_count.attr.attr,
- &dev_attr_legacy_ch4_ce_count.attr.attr,
- &dev_attr_legacy_ch5_ce_count.attr.attr,
- &dev_attr_legacy_ch6_ce_count.attr.attr,
- &dev_attr_legacy_ch7_ce_count.attr.attr,
- &dev_attr_legacy_ch8_ce_count.attr.attr,
- &dev_attr_legacy_ch9_ce_count.attr.attr,
- &dev_attr_legacy_ch10_ce_count.attr.attr,
- &dev_attr_legacy_ch11_ce_count.attr.attr,
- NULL
-};
-
-static umode_t csrow_dev_is_visible(struct kobject *kobj,
- struct attribute *attr, int idx)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
-
- if (idx >= csrow->nr_channels)
- return 0;
-
- if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) {
- WARN_ONCE(1, "idx: %d\n", idx);
- return 0;
- }
-
- /* Only expose populated DIMMs */
- if (!csrow->channels[idx]->dimm->nr_pages)
- return 0;
-
- return attr->mode;
-}
-
-
-static const struct attribute_group csrow_dev_dimm_group = {
- .attrs = dynamic_csrow_dimm_attr,
- .is_visible = csrow_dev_is_visible,
-};
-
-static const struct attribute_group csrow_dev_ce_count_group = {
- .attrs = dynamic_csrow_ce_count_attr,
- .is_visible = csrow_dev_is_visible,
-};
-
-static const struct attribute_group *csrow_dev_groups[] = {
- &csrow_dev_dimm_group,
- &csrow_dev_ce_count_group,
- NULL
-};
-
-static void csrow_release(struct device *dev)
-{
- /*
- * Nothing to do, just unregister sysfs here. The mci
- * device owns the data and will also release it.
- */
-}
-
-static inline int nr_pages_per_csrow(struct csrow_info *csrow)
-{
- int chan, nr_pages = 0;
-
- for (chan = 0; chan < csrow->nr_channels; chan++)
- nr_pages += csrow->channels[chan]->dimm->nr_pages;
-
- return nr_pages;
-}
-
-/* Create a CSROW object under specified edac_mc_device */
-static int edac_create_csrow_object(struct mem_ctl_info *mci,
- struct csrow_info *csrow, int index)
-{
- int err;
-
- csrow->dev.type = &csrow_attr_type;
- csrow->dev.groups = csrow_dev_groups;
- csrow->dev.release = csrow_release;
- device_initialize(&csrow->dev);
- csrow->dev.parent = &mci->dev;
- csrow->mci = mci;
- dev_set_name(&csrow->dev, "csrow%d", index);
- dev_set_drvdata(&csrow->dev, csrow);
-
- err = device_add(&csrow->dev);
- if (err) {
- edac_dbg(1, "failure: create device %s\n", dev_name(&csrow->dev));
- put_device(&csrow->dev);
- return err;
- }
-
- edac_dbg(0, "device %s created\n", dev_name(&csrow->dev));
-
- return 0;
-}
-
-/* Create a CSROW object under specified edac_mc_device */
-static int edac_create_csrow_objects(struct mem_ctl_info *mci)
-{
- int err, i;
- struct csrow_info *csrow;
-
- for (i = 0; i < mci->nr_csrows; i++) {
- csrow = mci->csrows[i];
- if (!nr_pages_per_csrow(csrow))
- continue;
- err = edac_create_csrow_object(mci, mci->csrows[i], i);
- if (err < 0)
- goto error;
- }
- return 0;
-
-error:
- for (--i; i >= 0; i--) {
- if (device_is_registered(&mci->csrows[i]->dev))
- device_unregister(&mci->csrows[i]->dev);
- }
-
- return err;
-}
-
-static void edac_delete_csrow_objects(struct mem_ctl_info *mci)
-{
- int i;
-
- for (i = 0; i < mci->nr_csrows; i++) {
- if (device_is_registered(&mci->csrows[i]->dev))
- device_unregister(&mci->csrows[i]->dev);
- }
-}
-
-#endif
-
/*
* Per-dimm (or per-rank) devices
*/
@@ -965,12 +594,6 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
goto fail;
}
-#ifdef CONFIG_EDAC_LEGACY_SYSFS
- err = edac_create_csrow_objects(mci);
- if (err < 0)
- goto fail;
-#endif
-
edac_create_debugfs_nodes(mci);
return 0;
@@ -995,9 +618,6 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
#ifdef CONFIG_EDAC_DEBUG
edac_debugfs_remove_recursive(mci->debugfs);
#endif
-#ifdef CONFIG_EDAC_LEGACY_SYSFS
- edac_delete_csrow_objects(mci);
-#endif
mci_for_each_dimm(mci, dimm) {
if (!device_is_registered(&dimm->dev))
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 1eb0136c6fbd..d80c88818691 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -15,6 +15,7 @@
#include "edac_module.h"
#include <ras/ras_event.h>
#include <linux/notifier.h>
+#include <linux/string.h>
#define OTHER_DETAIL_LEN 400
@@ -332,7 +333,7 @@ static int ghes_edac_report_mem_error(struct notifier_block *nb,
p = pvt->msg;
p += snprintf(p, sizeof(pvt->msg), "%s", cper_mem_err_type_str(etype));
} else {
- strcpy(pvt->msg, "unknown error");
+ strscpy(pvt->msg, "unknown error");
}
/* Error address */
@@ -357,14 +358,14 @@ static int ghes_edac_report_mem_error(struct notifier_block *nb,
dimm = find_dimm_by_handle(mci, mem_err->mem_dev_handle);
if (dimm) {
e->top_layer = dimm->idx;
- strcpy(e->label, dimm->label);
+ strscpy(e->label, dimm->label);
}
}
if (p > e->location)
*(p - 1) = '\0';
if (!*e->label)
- strcpy(e->label, "unknown memory");
+ strscpy(e->label, "unknown memory");
/* All other fields are mapped on e->other_detail */
p = pvt->other_detail;
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index bf4171ac191d..89b3e8cc38b1 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -468,17 +468,18 @@ static int i10nm_get_imc_num(struct res_config *cfg)
return -ENODEV;
}
- if (imc_num > I10NM_NUM_DDR_IMC) {
- i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
- return -EINVAL;
- }
-
if (cfg->ddr_imc_num != imc_num) {
/*
- * Store the number of present DDR memory controllers.
+ * Update the configuration data to reflect the number of
+ * present DDR memory controllers.
*/
cfg->ddr_imc_num = imc_num;
edac_dbg(2, "Set DDR MC number: %d", imc_num);
+
+ /* Release and reallocate skx_dev list with the updated number. */
+ skx_remove();
+ if (skx_get_all_bus_mappings(cfg, &i10nm_edac_list) <= 0)
+ return -ENODEV;
}
return 0;
@@ -1057,6 +1058,15 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
return !!GET_BITFIELD(mcmtr, 2, 2);
}
+static bool i10nm_channel_disabled(struct skx_imc *imc, int chan)
+{
+ u32 mcmtr = I10NM_GET_MCMTR(imc, chan);
+
+ edac_dbg(1, "mc%d ch%d mcmtr reg %x\n", imc->mc, chan, mcmtr);
+
+ return (mcmtr == ~0 || GET_BITFIELD(mcmtr, 18, 18));
+}
+
static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
struct res_config *cfg)
{
@@ -1070,6 +1080,11 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
if (!imc->mbase)
continue;
+ if (i10nm_channel_disabled(imc, i)) {
+ edac_dbg(1, "mc%d ch%d is disabled.\n", imc->mc, i);
+ continue;
+ }
+
ndimms = 0;
if (res_cfg->type != GNR)
@@ -1183,7 +1198,8 @@ static int __init i10nm_init(void)
d->imc[i].num_dimms = cfg->ddr_dimm_num;
}
- rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
+ rc = skx_register_mci(&d->imc[i], &d->imc[i].mdev->dev,
+ pci_name(d->imc[i].mdev),
"Intel_10nm Socket", EDAC_MOD_STR,
i10nm_get_dimm_config, cfg);
if (rc < 0)
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
index 5c1fa1c0d12e..eaab6af143e1 100644
--- a/drivers/edac/ie31200_edac.c
+++ b/drivers/edac/ie31200_edac.c
@@ -44,6 +44,7 @@
* but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
*/
+#include <linux/bitfield.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -99,6 +100,8 @@
/* Alder Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1 0x4660
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2 0x4668 /* 8P+4E, e.g. i7-12700K */
+#define PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3 0x4648 /* 6P+4E, e.g. i5-12600K */
/* Bartlett Lake-S */
#define PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1 0x4639
@@ -137,9 +140,6 @@
#define IE31200_CAPID0_DDPCD BIT(6)
#define IE31200_CAPID0_ECC BIT(1)
-/* Non-constant mask variant of FIELD_GET() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-
static int nr_channels;
static struct pci_dev *mci_pdev;
static int ie31200_registered = 1;
@@ -524,6 +524,7 @@ static int ie31200_register_mci(struct pci_dev *pdev, struct res_config *cfg, in
ie31200_pvt.priv[mc] = priv;
return 0;
fail_unmap:
+ put_device(&priv->dev);
iounmap(window);
fail_free:
edac_mc_free(mci);
@@ -596,6 +597,7 @@ static void ie31200_unregister_mcis(void)
mci = priv->mci;
edac_mc_del_mc(mci->pdev);
iounmap(priv->window);
+ put_device(&priv->dev);
edac_mc_free(mci);
}
}
@@ -761,6 +763,8 @@ static const struct pci_device_id ie31200_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_S_6), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_RPL_HX_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_1), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_2), (kernel_ulong_t)&rpl_s_cfg},
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_ADL_S_3), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_1), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_2), (kernel_ulong_t)&rpl_s_cfg},
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IE31200_BTL_S_3), (kernel_ulong_t)&rpl_s_cfg},
diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c
index 2fc59f9eed69..553c31a2d922 100644
--- a/drivers/edac/igen6_edac.c
+++ b/drivers/edac/igen6_edac.c
@@ -1300,6 +1300,7 @@ static int igen6_register_mci(int mc, void __iomem *window, struct pci_dev *pdev
imc->mci = mci;
return 0;
fail3:
+ put_device(&imc->dev);
mci->pvt_info = NULL;
kfree(mci->ctl_name);
fail2:
@@ -1326,6 +1327,7 @@ static void igen6_unregister_mcis(void)
kfree(mci->ctl_name);
mci->pvt_info = NULL;
edac_mc_free(mci);
+ put_device(&imc->dev);
iounmap(imc->window);
}
}
diff --git a/drivers/edac/imh_base.c b/drivers/edac/imh_base.c
new file mode 100644
index 000000000000..4348b3883b45
--- /dev/null
+++ b/drivers/edac/imh_base.c
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Intel(R) servers with Integrated Memory/IO Hub-based memory controller.
+ * Copyright (c) 2025, Intel Corporation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/mce.h>
+#include <asm/cpu.h>
+#include "edac_module.h"
+#include "skx_common.h"
+
+#define IMH_REVISION "v0.0.1"
+#define EDAC_MOD_STR "imh_edac"
+
+/* Debug macros */
+#define imh_printk(level, fmt, arg...) \
+ edac_printk(level, "imh", fmt, ##arg)
+
+/* Configuration Agent(Ubox) */
+#define MMIO_BASE_H(reg) (((u64)GET_BITFIELD(reg, 0, 29)) << 23)
+#define SOCKET_ID(reg) GET_BITFIELD(reg, 0, 3)
+
+/* PUNIT */
+#define DDR_IMC_BITMAP(reg) GET_BITFIELD(reg, 23, 30)
+
+/* Memory Controller */
+#define ECC_ENABLED(reg) GET_BITFIELD(reg, 2, 2)
+#define DIMM_POPULATED(reg) GET_BITFIELD(reg, 15, 15)
+
+/* System Cache Agent(SCA) */
+#define TOLM(reg) (((u64)GET_BITFIELD(reg, 16, 31)) << 16)
+#define TOHM(reg) (((u64)GET_BITFIELD(reg, 16, 51)) << 16)
+
+/* Home Agent (HA) */
+#define NMCACHING(reg) GET_BITFIELD(reg, 8, 8)
+
+/**
+ * struct local_reg - A register as described in the local package view.
+ *
+ * @pkg: (input) The package where the register is located.
+ * @pbase: (input) The IP MMIO base physical address in the local package view.
+ * @size: (input) The IP MMIO size.
+ * @offset: (input) The register offset from the IP MMIO base @pbase.
+ * @width: (input) The register width in byte.
+ * @vbase: (internal) The IP MMIO base virtual address.
+ * @val: (output) The register value.
+ */
+struct local_reg {
+ int pkg;
+ u64 pbase;
+ u32 size;
+ u32 offset;
+ u8 width;
+ void __iomem *vbase;
+ u64 val;
+};
+
+#define DEFINE_LOCAL_REG(name, cfg, package, north, ip_name, ip_idx, reg_name) \
+ struct local_reg name = { \
+ .pkg = package, \
+ .pbase = (north ? (cfg)->mmio_base_l_north : \
+ (cfg)->mmio_base_l_south) + \
+ (cfg)->ip_name##_base + \
+ (cfg)->ip_name##_size * (ip_idx), \
+ .size = (cfg)->ip_name##_size, \
+ .offset = (cfg)->ip_name##_reg_##reg_name##_offset, \
+ .width = (cfg)->ip_name##_reg_##reg_name##_width, \
+ }
+
+static u64 readx(void __iomem *addr, u8 width)
+{
+ switch (width) {
+ case 1:
+ return readb(addr);
+ case 2:
+ return readw(addr);
+ case 4:
+ return readl(addr);
+ case 8:
+ return readq(addr);
+ default:
+ imh_printk(KERN_ERR, "Invalid reg 0x%p width %d\n", addr, width);
+ return 0;
+ }
+}
+
+static void __read_local_reg(void *reg)
+{
+ struct local_reg *r = (struct local_reg *)reg;
+
+ r->val = readx(r->vbase + r->offset, r->width);
+}
+
+/* Read a local-view register. */
+static bool read_local_reg(struct local_reg *reg)
+{
+ int cpu;
+
+ /* Get the target CPU in the package @reg->pkg. */
+ for_each_online_cpu(cpu) {
+ if (reg->pkg == topology_physical_package_id(cpu))
+ break;
+ }
+
+ if (cpu >= nr_cpu_ids)
+ return false;
+
+ reg->vbase = ioremap(reg->pbase, reg->size);
+ if (!reg->vbase) {
+ imh_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", reg->pbase);
+ return false;
+ }
+
+ /* Get the target CPU to read the register. */
+ smp_call_function_single(cpu, __read_local_reg, reg, 1);
+ iounmap(reg->vbase);
+
+ return true;
+}
+
+/* Get the bitmap of memory controller instances in package @pkg. */
+static u32 get_imc_bitmap(struct res_config *cfg, int pkg, bool north)
+{
+ DEFINE_LOCAL_REG(reg, cfg, pkg, north, pcu, 0, capid3);
+
+ if (!read_local_reg(&reg))
+ return 0;
+
+ edac_dbg(2, "Pkg%d %s mc instances bitmap 0x%llx (reg 0x%llx)\n",
+ pkg, north ? "north" : "south",
+ DDR_IMC_BITMAP(reg.val), reg.val);
+
+ return DDR_IMC_BITMAP(reg.val);
+}
+
+static void imc_release(struct device *dev)
+{
+ edac_dbg(2, "imc device %s released\n", dev_name(dev));
+ kfree(dev);
+}
+
+static int __get_ddr_munits(struct res_config *cfg, struct skx_dev *d,
+ bool north, int lmc)
+{
+ unsigned long size = cfg->ddr_chan_mmio_sz * cfg->ddr_chan_num;
+ unsigned long bitmap = get_imc_bitmap(cfg, d->pkg, north);
+ void __iomem *mbase;
+ struct device *dev;
+ int i, rc, pmc;
+ u64 base;
+
+ for_each_set_bit(i, &bitmap, sizeof(bitmap) * 8) {
+ base = north ? d->mmio_base_h_north : d->mmio_base_h_south;
+ base += cfg->ddr_imc_base + size * i;
+
+ edac_dbg(2, "Pkg%d mc%d mmio base 0x%llx size 0x%lx\n",
+ d->pkg, lmc, base, size);
+
+ /* Set up the imc MMIO. */
+ mbase = ioremap(base, size);
+ if (!mbase) {
+ imh_printk(KERN_ERR, "Failed to ioremap 0x%llx\n", base);
+ return -ENOMEM;
+ }
+
+ d->imc[lmc].mbase = mbase;
+ d->imc[lmc].lmc = lmc;
+
+ /* Create the imc device instance. */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->release = imc_release;
+ device_initialize(dev);
+ rc = dev_set_name(dev, "0x%llx", base);
+ if (rc) {
+ imh_printk(KERN_ERR, "Failed to set dev name\n");
+ put_device(dev);
+ return rc;
+ }
+
+ d->imc[lmc].dev = dev;
+
+ /* Set up the imc index mapping. */
+ pmc = north ? i : 8 + i;
+ skx_set_mc_mapping(d, pmc, lmc);
+
+ lmc++;
+ }
+
+ return lmc;
+}
+
+static bool get_ddr_munits(struct res_config *cfg, struct skx_dev *d)
+{
+ int lmc = __get_ddr_munits(cfg, d, true, 0);
+
+ if (lmc < 0)
+ return false;
+
+ lmc = __get_ddr_munits(cfg, d, false, lmc);
+ if (lmc <= 0)
+ return false;
+
+ return true;
+}
+
+static bool get_socket_id(struct res_config *cfg, struct skx_dev *d)
+{
+ DEFINE_LOCAL_REG(reg, cfg, d->pkg, true, ubox, 0, socket_id);
+ u8 src_id;
+ int i;
+
+ if (!read_local_reg(&reg))
+ return false;
+
+ src_id = SOCKET_ID(reg.val);
+ edac_dbg(2, "socket id 0x%x (reg 0x%llx)\n", src_id, reg.val);
+
+ for (i = 0; i < cfg->ddr_imc_num; i++)
+ d->imc[i].src_id = src_id;
+
+ return true;
+}
+
+/* Get TOLM (Top Of Low Memory) and TOHM (Top Of High Memory) parameters. */
+static bool imh_get_tolm_tohm(struct res_config *cfg, u64 *tolm, u64 *tohm)
+{
+ DEFINE_LOCAL_REG(reg, cfg, 0, true, sca, 0, tolm);
+
+ if (!read_local_reg(&reg))
+ return false;
+
+ *tolm = TOLM(reg.val);
+ edac_dbg(2, "tolm 0x%llx (reg 0x%llx)\n", *tolm, reg.val);
+
+ DEFINE_LOCAL_REG(reg2, cfg, 0, true, sca, 0, tohm);
+
+ if (!read_local_reg(&reg2))
+ return false;
+
+ *tohm = TOHM(reg2.val);
+ edac_dbg(2, "tohm 0x%llx (reg 0x%llx)\n", *tohm, reg2.val);
+
+ return true;
+}
+
+/* Get the system-view MMIO_BASE_H for {north,south}-IMH. */
+static int imh_get_all_mmio_base_h(struct res_config *cfg, struct list_head *edac_list)
+{
+ int i, n = topology_max_packages(), imc_num = cfg->ddr_imc_num + cfg->hbm_imc_num;
+ struct skx_dev *d;
+
+ for (i = 0; i < n; i++) {
+ d = kzalloc(struct_size(d, imc, imc_num), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ DEFINE_LOCAL_REG(reg, cfg, i, true, ubox, 0, mmio_base);
+
+ /* Get MMIO_BASE_H for the north-IMH. */
+ if (!read_local_reg(&reg) || !reg.val) {
+ kfree(d);
+ imh_printk(KERN_ERR, "Pkg%d has no north mmio_base_h\n", i);
+ return -ENODEV;
+ }
+
+ d->mmio_base_h_north = MMIO_BASE_H(reg.val);
+ edac_dbg(2, "Pkg%d north mmio_base_h 0x%llx (reg 0x%llx)\n",
+ i, d->mmio_base_h_north, reg.val);
+
+ /* Get MMIO_BASE_H for the south-IMH (optional). */
+ DEFINE_LOCAL_REG(reg2, cfg, i, false, ubox, 0, mmio_base);
+
+ if (read_local_reg(&reg2)) {
+ d->mmio_base_h_south = MMIO_BASE_H(reg2.val);
+ edac_dbg(2, "Pkg%d south mmio_base_h 0x%llx (reg 0x%llx)\n",
+ i, d->mmio_base_h_south, reg2.val);
+ }
+
+ d->pkg = i;
+ d->num_imc = imc_num;
+ skx_init_mc_mapping(d);
+ list_add_tail(&d->list, edac_list);
+ }
+
+ return 0;
+}
+
+/* Get the number of per-package memory controllers. */
+static int imh_get_imc_num(struct res_config *cfg)
+{
+ int imc_num = hweight32(get_imc_bitmap(cfg, 0, true)) +
+ hweight32(get_imc_bitmap(cfg, 0, false));
+
+ if (!imc_num) {
+ imh_printk(KERN_ERR, "Invalid mc number\n");
+ return -ENODEV;
+ }
+
+ if (cfg->ddr_imc_num != imc_num) {
+ /*
+ * Update the configuration data to reflect the number of
+ * present DDR memory controllers.
+ */
+ cfg->ddr_imc_num = imc_num;
+ edac_dbg(2, "Set ddr mc number %d\n", imc_num);
+ }
+
+ return 0;
+}
+
+/* Get all memory controllers' parameters. */
+static int imh_get_munits(struct res_config *cfg, struct list_head *edac_list)
+{
+ struct skx_imc *imc;
+ struct skx_dev *d;
+ u8 mc = 0;
+ int i;
+
+ list_for_each_entry(d, edac_list, list) {
+ if (!get_ddr_munits(cfg, d)) {
+ imh_printk(KERN_ERR, "No mc found\n");
+ return -ENODEV;
+ }
+
+ if (!get_socket_id(cfg, d)) {
+ imh_printk(KERN_ERR, "Failed to get socket id\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
+ imc = &d->imc[i];
+ if (!imc->mbase)
+ continue;
+
+ imc->chan_mmio_sz = cfg->ddr_chan_mmio_sz;
+ imc->num_channels = cfg->ddr_chan_num;
+ imc->num_dimms = cfg->ddr_dimm_num;
+ imc->mc = mc++;
+ }
+ }
+
+ return 0;
+}
+
+static bool check_2lm_enabled(struct res_config *cfg, struct skx_dev *d, int ha_idx)
+{
+ DEFINE_LOCAL_REG(reg, cfg, d->pkg, true, ha, ha_idx, mode);
+
+ if (!read_local_reg(&reg))
+ return false;
+
+ if (!NMCACHING(reg.val))
+ return false;
+
+ edac_dbg(2, "2-level memory configuration (reg 0x%llx, ha idx %d)\n", reg.val, ha_idx);
+ return true;
+}
+
+/* Check whether the system has a 2-level memory configuration. */
+static bool imh_2lm_enabled(struct res_config *cfg, struct list_head *head)
+{
+ struct skx_dev *d;
+ int i;
+
+ list_for_each_entry(d, head, list) {
+ for (i = 0; i < cfg->ddr_imc_num; i++)
+ if (check_2lm_enabled(cfg, d, i))
+ return true;
+ }
+
+ return false;
+}
+
+/* Helpers to read memory controller registers */
+static u64 read_imc_reg(struct skx_imc *imc, int chan, u32 offset, u8 width)
+{
+ return readx(imc->mbase + imc->chan_mmio_sz * chan + offset, width);
+}
+
+static u32 read_imc_mcmtr(struct res_config *cfg, struct skx_imc *imc, int chan)
+{
+ return (u32)read_imc_reg(imc, chan, cfg->ddr_reg_mcmtr_offset, cfg->ddr_reg_mcmtr_width);
+}
+
+static u32 read_imc_dimmmtr(struct res_config *cfg, struct skx_imc *imc, int chan, int dimm)
+{
+ return (u32)read_imc_reg(imc, chan, cfg->ddr_reg_dimmmtr_offset +
+ cfg->ddr_reg_dimmmtr_width * dimm,
+ cfg->ddr_reg_dimmmtr_width);
+}
+
+static bool ecc_enabled(u32 mcmtr)
+{
+ return (bool)ECC_ENABLED(mcmtr);
+}
+
+static bool dimm_populated(u32 dimmmtr)
+{
+ return (bool)DIMM_POPULATED(dimmmtr);
+}
+
+/* Get each DIMM's configurations of the memory controller @mci. */
+static int imh_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
+{
+ struct skx_pvt *pvt = mci->pvt_info;
+ struct skx_imc *imc = pvt->imc;
+ struct dimm_info *dimm;
+ u32 mcmtr, dimmmtr;
+ int i, j, ndimms;
+
+ for (i = 0; i < imc->num_channels; i++) {
+ if (!imc->mbase)
+ continue;
+
+ mcmtr = read_imc_mcmtr(cfg, imc, i);
+
+ for (ndimms = 0, j = 0; j < imc->num_dimms; j++) {
+ dimmmtr = read_imc_dimmmtr(cfg, imc, i, j);
+ edac_dbg(1, "mcmtr 0x%x dimmmtr 0x%x (mc%d ch%d dimm%d)\n",
+ mcmtr, dimmmtr, imc->mc, i, j);
+
+ if (!dimm_populated(dimmmtr))
+ continue;
+
+ dimm = edac_get_dimm(mci, i, j, 0);
+ ndimms += skx_get_dimm_info(dimmmtr, 0, 0, dimm,
+ imc, i, j, cfg);
+ }
+
+ if (ndimms && !ecc_enabled(mcmtr)) {
+ imh_printk(KERN_ERR, "ECC is disabled on mc%d ch%d\n",
+ imc->mc, i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/* Register all memory controllers to the EDAC core. */
+static int imh_register_mci(struct res_config *cfg, struct list_head *edac_list)
+{
+ struct skx_imc *imc;
+ struct skx_dev *d;
+ int i, rc;
+
+ list_for_each_entry(d, edac_list, list) {
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
+ imc = &d->imc[i];
+ if (!imc->mbase)
+ continue;
+
+ rc = skx_register_mci(imc, imc->dev,
+ dev_name(imc->dev),
+ "Intel IMH-based Socket",
+ EDAC_MOD_STR,
+ imh_get_dimm_config, cfg);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static struct res_config dmr_cfg = {
+ .type = DMR,
+ .support_ddr5 = true,
+ .mmio_base_l_north = 0xf6800000,
+ .mmio_base_l_south = 0xf6000000,
+ .ddr_chan_num = 1,
+ .ddr_dimm_num = 2,
+ .ddr_imc_base = 0x39b000,
+ .ddr_chan_mmio_sz = 0x8000,
+ .ddr_reg_mcmtr_offset = 0x360,
+ .ddr_reg_mcmtr_width = 4,
+ .ddr_reg_dimmmtr_offset = 0x370,
+ .ddr_reg_dimmmtr_width = 4,
+ .ubox_base = 0x0,
+ .ubox_size = 0x2000,
+ .ubox_reg_mmio_base_offset = 0x580,
+ .ubox_reg_mmio_base_width = 4,
+ .ubox_reg_socket_id_offset = 0x1080,
+ .ubox_reg_socket_id_width = 4,
+ .pcu_base = 0x3000,
+ .pcu_size = 0x10000,
+ .pcu_reg_capid3_offset = 0x290,
+ .pcu_reg_capid3_width = 4,
+ .sca_base = 0x24c000,
+ .sca_size = 0x2500,
+ .sca_reg_tolm_offset = 0x2100,
+ .sca_reg_tolm_width = 8,
+ .sca_reg_tohm_offset = 0x2108,
+ .sca_reg_tohm_width = 8,
+ .ha_base = 0x3eb000,
+ .ha_size = 0x1000,
+ .ha_reg_mode_offset = 0x4a0,
+ .ha_reg_mode_width = 4,
+};
+
+static const struct x86_cpu_id imh_cpuids[] = {
+ X86_MATCH_VFM(INTEL_DIAMONDRAPIDS_X, &dmr_cfg),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, imh_cpuids);
+
+static struct notifier_block imh_mce_dec = {
+ .notifier_call = skx_mce_check_error,
+ .priority = MCE_PRIO_EDAC,
+};
+
+static int __init imh_init(void)
+{
+ const struct x86_cpu_id *id;
+ struct list_head *edac_list;
+ struct res_config *cfg;
+ const char *owner;
+ u64 tolm, tohm;
+ int rc;
+
+ edac_dbg(2, "\n");
+
+ if (ghes_get_devices())
+ return -EBUSY;
+
+ owner = edac_get_owner();
+ if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
+ return -EBUSY;
+
+ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+ return -ENODEV;
+
+ id = x86_match_cpu(imh_cpuids);
+ if (!id)
+ return -ENODEV;
+ cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
+
+ if (!imh_get_tolm_tohm(cfg, &tolm, &tohm))
+ return -ENODEV;
+
+ skx_set_hi_lo(tolm, tohm);
+
+ rc = imh_get_imc_num(cfg);
+ if (rc < 0)
+ goto fail;
+
+ edac_list = skx_get_edac_list();
+
+ rc = imh_get_all_mmio_base_h(cfg, edac_list);
+ if (rc)
+ goto fail;
+
+ rc = imh_get_munits(cfg, edac_list);
+ if (rc)
+ goto fail;
+
+ skx_set_mem_cfg(imh_2lm_enabled(cfg, edac_list));
+
+ rc = imh_register_mci(cfg, edac_list);
+ if (rc)
+ goto fail;
+
+ rc = skx_adxl_get();
+ if (rc)
+ goto fail;
+
+ opstate_init();
+ mce_register_decode_chain(&imh_mce_dec);
+ skx_setup_debug("imh_test");
+
+ imh_printk(KERN_INFO, "%s\n", IMH_REVISION);
+
+ return 0;
+fail:
+ skx_remove();
+ return rc;
+}
+
+static void __exit imh_exit(void)
+{
+ edac_dbg(2, "\n");
+
+ skx_teardown_debug();
+ mce_unregister_decode_chain(&imh_mce_dec);
+ skx_adxl_put();
+ skx_remove();
+}
+
+module_init(imh_init);
+module_exit(imh_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Qiuxu Zhuo");
+MODULE_DESCRIPTION("MC Driver for Intel servers using IMH-based memory controller");
diff --git a/drivers/edac/mem_repair.c b/drivers/edac/mem_repair.c
index 108d69209146..108d69209146 100755..100644
--- a/drivers/edac/mem_repair.c
+++ b/drivers/edac/mem_repair.c
diff --git a/drivers/edac/scrub.c b/drivers/edac/scrub.c
index f9d02af2fc3a..f9d02af2fc3a 100755..100644
--- a/drivers/edac/scrub.c
+++ b/drivers/edac/scrub.c
diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c
index 29897b21fb8e..aa6593ccda2d 100644
--- a/drivers/edac/skx_base.c
+++ b/drivers/edac/skx_base.c
@@ -33,6 +33,15 @@ static unsigned int nvdimm_count;
#define MASK26 0x3FFFFFF /* Mask for 2^26 */
#define MASK29 0x1FFFFFFF /* Mask for 2^29 */
+static struct res_config skx_cfg = {
+ .type = SKX,
+ .decs_did = 0x2016,
+ .busno_cfg_offset = 0xcc,
+ .ddr_imc_num = 2,
+ .ddr_chan_num = 3,
+ .ddr_dimm_num = 2,
+};
+
static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
{
struct skx_dev *d;
@@ -52,7 +61,7 @@ enum munittype {
struct munit {
u16 did;
- u16 devfn[SKX_NUM_IMC];
+ u16 devfn[2];
u8 busidx;
u8 per_socket;
enum munittype mtype;
@@ -89,11 +98,11 @@ static int get_all_munits(const struct munit *m)
if (!pdev)
break;
ndev++;
- if (m->per_socket == SKX_NUM_IMC) {
- for (i = 0; i < SKX_NUM_IMC; i++)
+ if (m->per_socket == skx_cfg.ddr_imc_num) {
+ for (i = 0; i < skx_cfg.ddr_imc_num; i++)
if (m->devfn[i] == pdev->devfn)
break;
- if (i == SKX_NUM_IMC)
+ if (i == skx_cfg.ddr_imc_num)
goto fail;
}
d = get_skx_dev(pdev->bus, m->busidx);
@@ -157,12 +166,6 @@ fail:
return -ENODEV;
}
-static struct res_config skx_cfg = {
- .type = SKX,
- .decs_did = 0x2016,
- .busno_cfg_offset = 0xcc,
-};
-
static const struct x86_cpu_id skx_cpuids[] = {
X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
{ }
@@ -186,11 +189,11 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci, struct res_config *cfg)
/* Only the mcmtr on the first channel is effective */
pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
- for (i = 0; i < SKX_NUM_CHANNELS; i++) {
+ for (i = 0; i < cfg->ddr_chan_num; i++) {
ndimms = 0;
pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
- for (j = 0; j < SKX_NUM_DIMMS; j++) {
+ for (j = 0; j < cfg->ddr_dimm_num; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
pci_read_config_dword(imc->chan[i].cdev,
0x80 + 4 * j, &mtr);
@@ -620,6 +623,7 @@ static int __init skx_init(void)
return -ENODEV;
cfg = (struct res_config *)id->driver_data;
+ skx_set_res_cfg(cfg);
rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
if (rc)
@@ -652,11 +656,14 @@ static int __init skx_init(void)
goto fail;
edac_dbg(2, "src_id = %d\n", src_id);
- for (i = 0; i < SKX_NUM_IMC; i++) {
+ for (i = 0; i < cfg->ddr_imc_num; i++) {
d->imc[i].mc = mc++;
d->imc[i].lmc = i;
d->imc[i].src_id = src_id;
- rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
+ d->imc[i].num_channels = cfg->ddr_chan_num;
+ d->imc[i].num_dimms = cfg->ddr_dimm_num;
+ rc = skx_register_mci(&d->imc[i], &d->imc[i].chan[0].cdev->dev,
+ pci_name(d->imc[i].chan[0].cdev),
"Skylake Socket", EDAC_MOD_STR,
skx_get_dimm_config, cfg);
if (rc < 0)
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index 39c733dbc5b9..3276afe43922 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -14,9 +14,11 @@
* Copyright (c) 2018, Intel Corporation.
*/
+#include <linux/topology.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/adxl.h>
+#include <linux/overflow.h>
#include <acpi/nfit.h>
#include <asm/mce.h>
#include <asm/uv/uv.h>
@@ -122,7 +124,7 @@ void skx_adxl_put(void)
}
EXPORT_SYMBOL_GPL(skx_adxl_put);
-static void skx_init_mc_mapping(struct skx_dev *d)
+void skx_init_mc_mapping(struct skx_dev *d)
{
/*
* By default, the BIOS presents all memory controllers within each
@@ -130,31 +132,38 @@ static void skx_init_mc_mapping(struct skx_dev *d)
* the logical indices of the memory controllers enumerated by the
* EDAC driver.
*/
- for (int i = 0; i < NUM_IMC; i++)
- d->mc_mapping[i] = i;
+ for (int i = 0; i < d->num_imc; i++)
+ d->imc[i].mc_mapping = i;
}
+EXPORT_SYMBOL_GPL(skx_init_mc_mapping);
void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc)
{
edac_dbg(0, "Set the mapping of mc phy idx to logical idx: %02d -> %02d\n",
pmc, lmc);
- d->mc_mapping[pmc] = lmc;
+ d->imc[lmc].mc_mapping = pmc;
}
EXPORT_SYMBOL_GPL(skx_set_mc_mapping);
-static u8 skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
+static int skx_get_mc_mapping(struct skx_dev *d, u8 pmc)
{
- edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
- pmc, d->mc_mapping[pmc]);
+ for (int lmc = 0; lmc < d->num_imc; lmc++) {
+ if (d->imc[lmc].mc_mapping == pmc) {
+ edac_dbg(0, "Get the mapping of mc phy idx to logical idx: %02d -> %02d\n",
+ pmc, lmc);
- return d->mc_mapping[pmc];
+ return lmc;
+ }
+ }
+
+ return -1;
}
static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
{
+ int i, lmc, len = 0;
struct skx_dev *d;
- int i, len = 0;
if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
res->addr < BIT_ULL(32))) {
@@ -200,7 +209,7 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
res->cs = (int)adxl_values[component_indices[INDEX_CS]];
}
- if (res->imc > NUM_IMC - 1 || res->imc < 0) {
+ if (res->imc < 0) {
skx_printk(KERN_ERR, "Bad imc %d\n", res->imc);
return false;
}
@@ -218,7 +227,13 @@ static bool skx_adxl_decode(struct decoded_addr *res, enum error_source err_src)
return false;
}
- res->imc = skx_get_mc_mapping(d, res->imc);
+ lmc = skx_get_mc_mapping(d, res->imc);
+ if (lmc < 0) {
+ skx_printk(KERN_ERR, "No lmc for imc %d\n", res->imc);
+ return false;
+ }
+
+ res->imc = lmc;
for (i = 0; i < adxl_component_count; i++) {
if (adxl_values[i] == ~0x0ull)
@@ -265,7 +280,7 @@ static int skx_get_pkg_id(struct skx_dev *d, u8 *id)
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->initialized && cpu_to_node(cpu) == node) {
- *id = c->topo.pkg_id;
+ *id = topology_physical_package_id(cpu);
return 0;
}
}
@@ -320,10 +335,10 @@ static int get_width(u32 mtr)
*/
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
{
+ int ndev = 0, imc_num = cfg->ddr_imc_num + cfg->hbm_imc_num;
struct pci_dev *pdev, *prev;
struct skx_dev *d;
u32 reg;
- int ndev = 0;
prev = NULL;
for (;;) {
@@ -331,7 +346,7 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
if (!pdev)
break;
ndev++;
- d = kzalloc(sizeof(*d), GFP_KERNEL);
+ d = kzalloc(struct_size(d, imc, imc_num), GFP_KERNEL);
if (!d) {
pci_dev_put(pdev);
return -ENOMEM;
@@ -354,8 +369,10 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
d->seg = GET_BITFIELD(reg, 16, 23);
}
- edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x\n",
- d->bus[0], d->bus[1], d->bus[2], d->bus[3]);
+ d->num_imc = imc_num;
+
+ edac_dbg(2, "busses: 0x%x, 0x%x, 0x%x, 0x%x, imcs %d\n",
+ d->bus[0], d->bus[1], d->bus[2], d->bus[3], imc_num);
list_add_tail(&d->list, &dev_edac_list);
prev = pdev;
@@ -368,6 +385,12 @@ int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list)
}
EXPORT_SYMBOL_GPL(skx_get_all_bus_mappings);
+struct list_head *skx_get_edac_list(void)
+{
+ return &dev_edac_list;
+}
+EXPORT_SYMBOL_GPL(skx_get_edac_list);
+
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
{
struct pci_dev *pdev;
@@ -408,6 +431,13 @@ fail:
}
EXPORT_SYMBOL_GPL(skx_get_hi_lo);
+void skx_set_hi_lo(u64 tolm, u64 tohm)
+{
+ skx_tolm = tolm;
+ skx_tohm = tohm;
+}
+EXPORT_SYMBOL_GPL(skx_set_hi_lo);
+
static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
int minval, int maxval, const char *name)
{
@@ -421,7 +451,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add,
}
#define numrank(reg) skx_get_dimm_attr(reg, 12, 13, 0, 0, 2, "ranks")
-#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows")
+#define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 7, "rows")
#define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols")
int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
@@ -529,9 +559,9 @@ unknown_size:
}
EXPORT_SYMBOL_GPL(skx_get_nvdimm_info);
-int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
- const char *ctl_name, const char *mod_str,
- get_dimm_config_f get_dimm_config,
+int skx_register_mci(struct skx_imc *imc, struct device *dev,
+ const char *dev_name, const char *ctl_name,
+ const char *mod_str, get_dimm_config_f get_dimm_config,
struct res_config *cfg)
{
struct mem_ctl_info *mci;
@@ -541,10 +571,10 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
/* Allocate a new MC control structure */
layers[0].type = EDAC_MC_LAYER_CHANNEL;
- layers[0].size = NUM_CHANNELS;
+ layers[0].size = imc->num_channels;
layers[0].is_virt_csrow = false;
layers[1].type = EDAC_MC_LAYER_SLOT;
- layers[1].size = NUM_DIMMS;
+ layers[1].size = imc->num_dimms;
layers[1].is_virt_csrow = true;
mci = edac_mc_alloc(imc->mc, ARRAY_SIZE(layers), layers,
sizeof(struct skx_pvt));
@@ -572,7 +602,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
mci->edac_ctl_cap = EDAC_FLAG_NONE;
mci->edac_cap = EDAC_FLAG_NONE;
mci->mod_name = mod_str;
- mci->dev_name = pci_name(pdev);
+ mci->dev_name = dev_name;
mci->ctl_page_to_phys = NULL;
rc = get_dimm_config(mci, cfg);
@@ -580,7 +610,7 @@ int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
goto fail;
/* Record ptr to the generic device */
- mci->pdev = &pdev->dev;
+ mci->pdev = dev;
/* Add this new MC control structure to EDAC's list of MCs */
if (unlikely(edac_mc_add_mc(mci))) {
@@ -784,7 +814,7 @@ void skx_remove(void)
list_for_each_entry_safe(d, tmp, &dev_edac_list, list) {
list_del(&d->list);
- for (i = 0; i < NUM_IMC; i++) {
+ for (i = 0; i < d->num_imc; i++) {
if (d->imc[i].mci)
skx_unregister_mci(&d->imc[i]);
@@ -794,7 +824,10 @@ void skx_remove(void)
if (d->imc[i].mbase)
iounmap(d->imc[i].mbase);
- for (j = 0; j < NUM_CHANNELS; j++) {
+ if (d->imc[i].dev)
+ put_device(d->imc[i].dev);
+
+ for (j = 0; j < d->imc[i].num_channels; j++) {
if (d->imc[i].chan[j].cdev)
pci_dev_put(d->imc[i].chan[j].cdev);
}
@@ -817,7 +850,7 @@ EXPORT_SYMBOL_GPL(skx_remove);
/*
* Debug feature.
* Exercise the address decode logic by writing an address to
- * /sys/kernel/debug/edac/{skx,i10nm}_test/addr.
+ * /sys/kernel/debug/edac/{skx,i10nm,imh}_test/addr.
*/
static struct dentry *skx_test;
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index ec4966f7ea40..f88038e5b18c 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -29,23 +29,18 @@
#define GET_BITFIELD(v, lo, hi) \
(((v) & GENMASK_ULL((hi), (lo))) >> (lo))
-#define SKX_NUM_IMC 2 /* Memory controllers per socket */
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
-#define I10NM_NUM_DDR_IMC 12
#define I10NM_NUM_DDR_CHANNELS 2
#define I10NM_NUM_DDR_DIMMS 2
-#define I10NM_NUM_HBM_IMC 16
#define I10NM_NUM_HBM_CHANNELS 2
#define I10NM_NUM_HBM_DIMMS 1
-#define I10NM_NUM_IMC (I10NM_NUM_DDR_IMC + I10NM_NUM_HBM_IMC)
#define I10NM_NUM_CHANNELS MAX(I10NM_NUM_DDR_CHANNELS, I10NM_NUM_HBM_CHANNELS)
#define I10NM_NUM_DIMMS MAX(I10NM_NUM_DDR_DIMMS, I10NM_NUM_HBM_DIMMS)
-#define NUM_IMC MAX(SKX_NUM_IMC, I10NM_NUM_IMC)
#define NUM_CHANNELS MAX(SKX_NUM_CHANNELS, I10NM_NUM_CHANNELS)
#define NUM_DIMMS MAX(SKX_NUM_DIMMS, I10NM_NUM_DIMMS)
@@ -126,35 +121,49 @@ struct reg_rrl {
* memory controllers on the die.
*/
struct skx_dev {
- struct list_head list;
+ /* {skx,i10nm}_edac */
u8 bus[4];
int seg;
struct pci_dev *sad_all;
struct pci_dev *util_all;
- struct pci_dev *uracu; /* for i10nm CPU */
- struct pci_dev *pcu_cr3; /* for HBM memory detection */
+ struct pci_dev *uracu;
+ struct pci_dev *pcu_cr3;
u32 mcroute;
- /*
- * Some server BIOS may hide certain memory controllers, and the
- * EDAC driver skips those hidden memory controllers. However, the
- * ADXL still decodes memory error address using physical memory
- * controller indices. The mapping table is used to convert the
- * physical indices (reported by ADXL) to the logical indices
- * (used the EDAC driver) of present memory controllers during the
- * error handling process.
- */
- u8 mc_mapping[NUM_IMC];
+
+ /* imh_edac */
+ /* System-view MMIO base physical addresses. */
+ u64 mmio_base_h_north;
+ u64 mmio_base_h_south;
+ int pkg;
+
+ int num_imc;
+ struct list_head list;
struct skx_imc {
+ /* i10nm_edac */
+ struct pci_dev *mdev;
+
+ /* imh_edac */
+ struct device *dev;
+
struct mem_ctl_info *mci;
- struct pci_dev *mdev; /* for i10nm CPU */
- void __iomem *mbase; /* for i10nm CPU */
- int chan_mmio_sz; /* for i10nm CPU */
+ void __iomem *mbase;
+ int chan_mmio_sz;
int num_channels; /* channels per memory controller */
int num_dimms; /* dimms per channel */
bool hbm_mc;
u8 mc; /* system wide mc# */
u8 lmc; /* socket relative mc# */
u8 src_id;
+ /*
+ * Some server BIOS may hide certain memory controllers, and the
+ * EDAC driver skips those hidden memory controllers. However, the
+ * ADXL still decodes memory error address using physical memory
+ * controller indices. The mapping table is used to convert the
+ * physical indices (reported by ADXL) to the logical indices
+ * (used the EDAC driver) of present memory controllers during the
+ * error handling process.
+ */
+ u8 mc_mapping;
struct skx_channel {
struct pci_dev *cdev;
struct pci_dev *edev;
@@ -171,7 +180,7 @@ struct skx_dev {
u8 colbits;
} dimms[NUM_DIMMS];
} chan[NUM_CHANNELS];
- } imc[NUM_IMC];
+ } imc[];
};
struct skx_pvt {
@@ -182,7 +191,8 @@ enum type {
SKX,
I10NM,
SPR,
- GNR
+ GNR,
+ DMR,
};
enum {
@@ -241,10 +251,6 @@ struct pci_bdf {
struct res_config {
enum type type;
- /* Configuration agent device ID */
- unsigned int decs_did;
- /* Default bus number configuration register offset */
- int busno_cfg_offset;
/* DDR memory controllers per socket */
int ddr_imc_num;
/* DDR channels per DDR memory controller */
@@ -262,23 +268,57 @@ struct res_config {
/* Per HBM channel memory-mapped I/O size */
int hbm_chan_mmio_sz;
bool support_ddr5;
- /* SAD device BDF */
- struct pci_bdf sad_all_bdf;
- /* PCU device BDF */
- struct pci_bdf pcu_cr3_bdf;
- /* UTIL device BDF */
- struct pci_bdf util_all_bdf;
- /* URACU device BDF */
- struct pci_bdf uracu_bdf;
- /* DDR mdev device BDF */
- struct pci_bdf ddr_mdev_bdf;
- /* HBM mdev device BDF */
- struct pci_bdf hbm_mdev_bdf;
- int sad_all_offset;
/* RRL register sets per DDR channel */
struct reg_rrl *reg_rrl_ddr;
/* RRL register sets per HBM channel */
struct reg_rrl *reg_rrl_hbm[2];
+ union {
+ /* {skx,i10nm}_edac */
+ struct {
+ /* Configuration agent device ID */
+ unsigned int decs_did;
+ /* Default bus number configuration register offset */
+ int busno_cfg_offset;
+ struct pci_bdf sad_all_bdf;
+ struct pci_bdf pcu_cr3_bdf;
+ struct pci_bdf util_all_bdf;
+ struct pci_bdf uracu_bdf;
+ struct pci_bdf ddr_mdev_bdf;
+ struct pci_bdf hbm_mdev_bdf;
+ int sad_all_offset;
+ };
+ /* imh_edac */
+ struct {
+ /* MMIO base physical address in local package view */
+ u64 mmio_base_l_north;
+ u64 mmio_base_l_south;
+ u64 ddr_imc_base;
+ u64 ddr_reg_mcmtr_offset;
+ u8 ddr_reg_mcmtr_width;
+ u64 ddr_reg_dimmmtr_offset;
+ u8 ddr_reg_dimmmtr_width;
+ u64 ubox_base;
+ u32 ubox_size;
+ u32 ubox_reg_mmio_base_offset;
+ u8 ubox_reg_mmio_base_width;
+ u32 ubox_reg_socket_id_offset;
+ u8 ubox_reg_socket_id_width;
+ u64 pcu_base;
+ u32 pcu_size;
+ u32 pcu_reg_capid3_offset;
+ u8 pcu_reg_capid3_width;
+ u64 sca_base;
+ u32 sca_size;
+ u32 sca_reg_tolm_offset;
+ u8 sca_reg_tolm_width;
+ u32 sca_reg_tohm_offset;
+ u8 sca_reg_tohm_width;
+ u64 ha_base;
+ u32 ha_size;
+ u32 ha_reg_mode_offset;
+ u8 ha_reg_mode_width;
+ };
+ };
};
typedef int (*get_dimm_config_f)(struct mem_ctl_info *mci,
@@ -291,13 +331,17 @@ void skx_adxl_put(void);
void skx_set_decode(skx_decode_f decode, skx_show_retry_log_f show_retry_log);
void skx_set_mem_cfg(bool mem_cfg_2lm);
void skx_set_res_cfg(struct res_config *cfg);
+void skx_init_mc_mapping(struct skx_dev *d);
void skx_set_mc_mapping(struct skx_dev *d, u8 pmc, u8 lmc);
int skx_get_src_id(struct skx_dev *d, int off, u8 *id);
int skx_get_all_bus_mappings(struct res_config *cfg, struct list_head **list);
+struct list_head *skx_get_edac_list(void);
+
int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm);
+void skx_set_hi_lo(u64 tolm, u64 tohm);
int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
struct skx_imc *imc, int chan, int dimmno,
@@ -306,7 +350,7 @@ int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm,
int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
int chan, int dimmno, const char *mod_str);
-int skx_register_mci(struct skx_imc *imc, struct pci_dev *pdev,
+int skx_register_mci(struct skx_imc *imc, struct device *dev, const char *dev_name,
const char *ctl_name, const char *mod_str,
get_dimm_config_f get_dimm_config,
struct res_config *cfg);
diff --git a/drivers/edac/versalnet_edac.c b/drivers/edac/versalnet_edac.c
new file mode 100644
index 000000000000..1a1092793092
--- /dev/null
+++ b/drivers/edac/versalnet_edac.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD Versal NET memory controller driver
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/cdx/edac_cdx_pcol.h>
+#include <linux/edac.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/ras.h>
+#include <linux/remoteproc.h>
+#include <linux/rpmsg.h>
+#include <linux/sizes.h>
+#include <ras/ras_event.h>
+
+#include "edac_module.h"
+
+/* Granularity of reported error in bytes */
+#define MC5_ERR_GRAIN 1
+#define MC_GET_DDR_CONFIG_IN_LEN 4
+
+#define MC5_IRQ_CE_MASK GENMASK(18, 15)
+#define MC5_IRQ_UE_MASK GENMASK(14, 11)
+
+#define MC5_RANK_1_MASK GENMASK(11, 6)
+#define MASK_24 GENMASK(29, 24)
+#define MASK_0 GENMASK(5, 0)
+
+#define MC5_LRANK_1_MASK GENMASK(11, 6)
+#define MC5_LRANK_2_MASK GENMASK(17, 12)
+#define MC5_BANK1_MASK GENMASK(11, 6)
+#define MC5_GRP_0_MASK GENMASK(17, 12)
+#define MC5_GRP_1_MASK GENMASK(23, 18)
+
+#define MC5_REGHI_ROW 7
+#define MC5_EACHBIT 1
+#define MC5_ERR_TYPE_CE 0
+#define MC5_ERR_TYPE_UE 1
+#define MC5_HIGH_MEM_EN BIT(20)
+#define MC5_MEM_MASK GENMASK(19, 0)
+#define MC5_X16_BASE 256
+#define MC5_X16_ECC 32
+#define MC5_X16_SIZE (MC5_X16_BASE + MC5_X16_ECC)
+#define MC5_X32_SIZE 576
+#define MC5_HIMEM_BASE (256 * SZ_1M)
+#define MC5_ILC_HIMEM_EN BIT(28)
+#define MC5_ILC_MEM GENMASK(27, 0)
+#define MC5_INTERLEAVE_SEL GENMASK(3, 0)
+#define MC5_BUS_WIDTH_MASK GENMASK(19, 18)
+#define MC5_NUM_CHANS_MASK BIT(17)
+#define MC5_RANK_MASK GENMASK(15, 14)
+
+#define ERROR_LEVEL 2
+#define ERROR_ID 3
+#define TOTAL_ERR_LENGTH 5
+#define MSG_ERR_OFFSET 8
+#define MSG_ERR_LENGTH 9
+#define ERROR_DATA 10
+#define MCDI_RESPONSE 0xFF
+
+#define REG_MAX 152
+#define ADEC_MAX 152
+#define NUM_CONTROLLERS 8
+#define REGS_PER_CONTROLLER 19
+#define ADEC_NUM 19
+#define BUFFER_SZ 80
+
+#define XDDR5_BUS_WIDTH_64 0
+#define XDDR5_BUS_WIDTH_32 1
+#define XDDR5_BUS_WIDTH_16 2
+
+/**
+ * struct ecc_error_info - ECC error log information.
+ * @burstpos: Burst position.
+ * @lrank: Logical Rank number.
+ * @rank: Rank number.
+ * @group: Group number.
+ * @bank: Bank number.
+ * @col: Column number.
+ * @row: Row number.
+ * @rowhi: Row number higher bits.
+ * @i: Combined ECC error vector containing encoded values of burst position,
+ * rank, bank, column, and row information.
+ */
+union ecc_error_info {
+ struct {
+ u32 burstpos:3;
+ u32 lrank:4;
+ u32 rank:2;
+ u32 group:3;
+ u32 bank:2;
+ u32 col:11;
+ u32 row:7;
+ u32 rowhi;
+ };
+ u64 i;
+} __packed;
+
+/* Row and column bit positions in the address decoder (ADEC) registers. */
+union row_col_mapping {
+ struct {
+ u32 row0:6;
+ u32 row1:6;
+ u32 row2:6;
+ u32 row3:6;
+ u32 row4:6;
+ u32 reserved:2;
+ };
+ struct {
+ u32 col1:6;
+ u32 col2:6;
+ u32 col3:6;
+ u32 col4:6;
+ u32 col5:6;
+ u32 reservedcol:2;
+ };
+ u32 i;
+} __packed;
+
+/**
+ * struct ecc_status - ECC status information to report.
+ * @ceinfo: Correctable errors.
+ * @ueinfo: Uncorrected errors.
+ * @channel: Channel number.
+ * @error_type: Error type.
+ */
+struct ecc_status {
+ union ecc_error_info ceinfo[2];
+ union ecc_error_info ueinfo[2];
+ u8 channel;
+ u8 error_type;
+};
+
+/**
+ * struct mc_priv - DDR memory controller private instance data.
+ * @message: Buffer for framing the event specific info.
+ * @stat: ECC status information.
+ * @error_id: The error id.
+ * @error_level: The error level.
+ * @dwidth: Width of data bus excluding ECC bits.
+ * @part_len: The support of the message received.
+ * @regs: The registers sent on the rpmsg.
+ * @adec: Address decode registers.
+ * @mci: Memory controller interface.
+ * @ept: rpmsg endpoint.
+ * @mcdi: The mcdi handle.
+ */
+struct mc_priv {
+ char message[256];
+ struct ecc_status stat;
+ u32 error_id;
+ u32 error_level;
+ u32 dwidth;
+ u32 part_len;
+ u32 regs[REG_MAX];
+ u32 adec[ADEC_MAX];
+ struct mem_ctl_info *mci[NUM_CONTROLLERS];
+ struct rpmsg_endpoint *ept;
+ struct cdx_mcdi *mcdi;
+};
+
+/*
+ * Address decoder (ADEC) registers to match the order in which the register
+ * information is received from the firmware.
+ */
+enum adec_info {
+ CONF = 0,
+ ADEC0,
+ ADEC1,
+ ADEC2,
+ ADEC3,
+ ADEC4,
+ ADEC5,
+ ADEC6,
+ ADEC7,
+ ADEC8,
+ ADEC9,
+ ADEC10,
+ ADEC11,
+ ADEC12,
+ ADEC13,
+ ADEC14,
+ ADEC15,
+ ADEC16,
+ ADECILC,
+};
+
+enum reg_info {
+ ISR = 0,
+ IMR,
+ ECCR0_ERR_STATUS,
+ ECCR0_ADDR_LO,
+ ECCR0_ADDR_HI,
+ ECCR0_DATA_LO,
+ ECCR0_DATA_HI,
+ ECCR0_PAR,
+ ECCR1_ERR_STATUS,
+ ECCR1_ADDR_LO,
+ ECCR1_ADDR_HI,
+ ECCR1_DATA_LO,
+ ECCR1_DATA_HI,
+ ECCR1_PAR,
+ XMPU_ERR,
+ XMPU_ERR_ADDR_L0,
+ XMPU_ERR_ADDR_HI,
+ XMPU_ERR_AXI_ID,
+ ADEC_CHK_ERR_LOG,
+};
+
+static bool get_ddr_info(u32 *error_data, struct mc_priv *priv)
+{
+ u32 reglo, reghi, parity, eccr0_val, eccr1_val, isr;
+ struct ecc_status *p;
+
+ isr = error_data[ISR];
+
+ if (!(isr & (MC5_IRQ_UE_MASK | MC5_IRQ_CE_MASK)))
+ return false;
+
+ eccr0_val = error_data[ECCR0_ERR_STATUS];
+ eccr1_val = error_data[ECCR1_ERR_STATUS];
+
+ if (!eccr0_val && !eccr1_val)
+ return false;
+
+ p = &priv->stat;
+
+ if (!eccr0_val)
+ p->channel = 1;
+ else
+ p->channel = 0;
+
+ reglo = error_data[ECCR0_ADDR_LO];
+ reghi = error_data[ECCR0_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[0].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[0].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR0_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ reglo = error_data[ECCR1_ADDR_LO];
+ reghi = error_data[ECCR1_ADDR_HI];
+ if (isr & MC5_IRQ_CE_MASK)
+ p->ceinfo[1].i = reglo | (u64)reghi << 32;
+ else if (isr & MC5_IRQ_UE_MASK)
+ p->ueinfo[1].i = reglo | (u64)reghi << 32;
+
+ parity = error_data[ECCR1_PAR];
+ edac_dbg(2, "ERR DATA: 0x%08X%08X PARITY: 0x%08X\n",
+ reghi, reglo, parity);
+
+ return true;
+}
+
+/**
+ * convert_to_physical - Convert @error_data to a physical address.
+ * @priv: DDR memory controller private instance data.
+ * @pinf: ECC error info structure.
+ * @controller: Controller number of the MC5
+ * @error_data: the DDRMC5 ADEC address decoder register data
+ *
+ * Return: physical address of the DDR memory.
+ */
+static unsigned long convert_to_physical(struct mc_priv *priv,
+ union ecc_error_info pinf,
+ int controller, int *error_data)
+{
+ u32 row, blk, rsh_req_addr, interleave, ilc_base_ctrl_add, ilc_himem_en, reg, offset;
+ u64 high_mem_base, high_mem_offset, low_mem_offset, ilcmem_base;
+ unsigned long err_addr = 0, addr;
+ union row_col_mapping cols;
+ union row_col_mapping rows;
+ u32 col_bit_0;
+
+ row = pinf.rowhi << MC5_REGHI_ROW | pinf.row;
+ offset = controller * ADEC_NUM;
+
+ reg = error_data[ADEC6];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC7];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+ row >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC8];
+ rows.i = reg;
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row3;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row4;
+
+ reg = error_data[ADEC9];
+ rows.i = reg;
+
+ err_addr |= (row & BIT(0)) << rows.row0;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row1;
+ row >>= MC5_EACHBIT;
+ err_addr |= (row & BIT(0)) << rows.row2;
+ row >>= MC5_EACHBIT;
+
+ col_bit_0 = FIELD_GET(MASK_24, error_data[ADEC9]);
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << col_bit_0;
+
+ cols.i = error_data[ADEC10];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ cols.i = error_data[ADEC11];
+ err_addr |= (pinf.col & 1) << cols.col1;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col2;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col3;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col4;
+ pinf.col >>= 1;
+ err_addr |= (pinf.col & 1) << cols.col5;
+ pinf.col >>= 1;
+
+ reg = error_data[ADEC12];
+ err_addr |= (pinf.bank & BIT(0)) << (reg & MASK_0);
+ pinf.bank >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_BANK1_MASK, reg);
+ pinf.bank >>= MC5_EACHBIT;
+
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_0_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MC5_GRP_1_MASK, reg);
+ pinf.group >>= MC5_EACHBIT;
+ err_addr |= (pinf.bank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.group >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC4];
+ err_addr |= (pinf.rank & BIT(0)) << (reg & MASK_0);
+ pinf.rank >>= MC5_EACHBIT;
+ err_addr |= (pinf.rank & BIT(0)) << FIELD_GET(MC5_RANK_1_MASK, reg);
+ pinf.rank >>= MC5_EACHBIT;
+
+ reg = error_data[ADEC5];
+ err_addr |= (pinf.lrank & BIT(0)) << (reg & MASK_0);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_1_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MC5_LRANK_2_MASK, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+ err_addr |= (pinf.lrank & BIT(0)) << FIELD_GET(MASK_24, reg);
+ pinf.lrank >>= MC5_EACHBIT;
+
+ high_mem_base = (priv->adec[ADEC2 + offset] & MC5_MEM_MASK) * MC5_HIMEM_BASE;
+ interleave = priv->adec[ADEC13 + offset] & MC5_INTERLEAVE_SEL;
+
+ high_mem_offset = priv->adec[ADEC3 + offset] & MC5_MEM_MASK;
+ low_mem_offset = priv->adec[ADEC1 + offset] & MC5_MEM_MASK;
+ reg = priv->adec[ADEC14 + offset];
+ ilc_himem_en = !!(reg & MC5_ILC_HIMEM_EN);
+ ilcmem_base = (reg & MC5_ILC_MEM) * SZ_1M;
+ if (ilc_himem_en)
+ ilc_base_ctrl_add = ilcmem_base - high_mem_offset;
+ else
+ ilc_base_ctrl_add = ilcmem_base - low_mem_offset;
+
+ if (priv->dwidth == DEV_X16) {
+ blk = err_addr / MC5_X16_SIZE;
+ rsh_req_addr = (blk << 8) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ } else {
+ blk = err_addr / MC5_X32_SIZE;
+ rsh_req_addr = (blk << 9) + ilc_base_ctrl_add;
+ err_addr = rsh_req_addr * interleave * 2;
+ }
+
+ if ((priv->adec[ADEC2 + offset] & MC5_HIGH_MEM_EN) && err_addr >= high_mem_base)
+ addr = err_addr - high_mem_offset;
+ else
+ addr = err_addr - low_mem_offset;
+
+ return addr;
+}
+
+/**
+ * handle_error - Handle errors.
+ * @priv: DDR memory controller private instance data.
+ * @stat: ECC status structure.
+ * @ctl_num: Controller number of the MC5
+ * @error_data: the MC5 ADEC address decoder register data
+ *
+ * Handles ECC correctable and uncorrectable errors.
+ */
+static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
+ int ctl_num, int *error_data)
+{
+ union ecc_error_info pinf;
+ struct mem_ctl_info *mci;
+ unsigned long pa;
+ phys_addr_t pfn;
+ int err;
+
+ if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS))
+ return;
+
+ mci = priv->mci[ctl_num];
+
+ if (stat->error_type == MC5_ERR_TYPE_CE) {
+ pinf = stat->ceinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s Controller %d Addr at %lx\n",
+ "CE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ }
+
+ if (stat->error_type == MC5_ERR_TYPE_UE) {
+ pinf = stat->ueinfo[stat->channel];
+ snprintf(priv->message, sizeof(priv->message),
+ "Error type:%s controller %d Addr at %lx\n",
+ "UE", ctl_num, convert_to_physical(priv, pinf, ctl_num, error_data));
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 1, 0, 0, 0, 0, 0, -1,
+ priv->message, "");
+ pa = convert_to_physical(priv, pinf, ctl_num, error_data);
+ pfn = PHYS_PFN(pa);
+
+ if (IS_ENABLED(CONFIG_MEMORY_FAILURE)) {
+ err = memory_failure(pfn, MF_ACTION_REQUIRED);
+ if (err)
+ edac_dbg(2, "memory_failure() error: %d", err);
+ else
+ edac_dbg(2, "Poison page at PA 0x%lx\n", pa);
+ }
+ }
+}
+
+static void mc_init(struct mem_ctl_info *mci, struct device *dev)
+{
+ struct mc_priv *priv = mci->pvt_info;
+ struct csrow_info *csi;
+ struct dimm_info *dimm;
+ u32 row;
+ int ch;
+
+ /* Initialize controller capabilities and configuration */
+ mci->mtype_cap = MEM_FLAG_DDR5;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->scrub_cap = SCRUB_HW_SRC;
+ mci->scrub_mode = SCRUB_NONE;
+
+ mci->edac_cap = EDAC_FLAG_SECDED;
+ mci->ctl_name = "VersalNET DDR5";
+ mci->dev_name = dev_name(dev);
+ mci->mod_name = "versalnet_edac";
+
+ edac_op_state = EDAC_OPSTATE_INT;
+
+ for (row = 0; row < mci->nr_csrows; row++) {
+ csi = mci->csrows[row];
+ for (ch = 0; ch < csi->nr_channels; ch++) {
+ dimm = csi->channels[ch]->dimm;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->mtype = MEM_DDR5;
+ dimm->grain = MC5_ERR_GRAIN;
+ dimm->dtype = priv->dwidth;
+ }
+ }
+}
+
+#define to_mci(k) container_of(k, struct mem_ctl_info, dev)
+
+static unsigned int mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd)
+{
+ return MCDI_RPC_TIMEOUT;
+}
+
+static void mcdi_request(struct cdx_mcdi *cdx,
+ const struct cdx_dword *hdr, size_t hdr_len,
+ const struct cdx_dword *sdu, size_t sdu_len)
+{
+ void *send_buf;
+ int ret;
+
+ send_buf = kzalloc(hdr_len + sdu_len, GFP_KERNEL);
+ if (!send_buf)
+ return;
+
+ memcpy(send_buf, hdr, hdr_len);
+ memcpy(send_buf + hdr_len, sdu, sdu_len);
+
+ ret = rpmsg_send(cdx->ept, send_buf, hdr_len + sdu_len);
+ if (ret)
+ dev_err(&cdx->rpdev->dev, "Failed to send rpmsg data: %d\n", ret);
+
+ kfree(send_buf);
+}
+
+static const struct cdx_mcdi_ops mcdi_ops = {
+ .mcdi_rpc_timeout = mcdi_rpc_timeout,
+ .mcdi_request = mcdi_request,
+};
+
+static void get_ddr_config(u32 index, u32 *buffer, struct cdx_mcdi *amd_mcdi)
+{
+ size_t outlen;
+ int ret;
+
+ MCDI_DECLARE_BUF(inbuf, MC_GET_DDR_CONFIG_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, BUFFER_SZ);
+
+ MCDI_SET_DWORD(inbuf, EDAC_GET_DDR_CONFIG_IN_CONTROLLER_INDEX, index);
+
+ ret = cdx_mcdi_rpc(amd_mcdi, MC_CMD_EDAC_GET_DDR_CONFIG, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (!ret)
+ memcpy(buffer, MCDI_PTR(outbuf, GET_DDR_CONFIG),
+ (ADEC_NUM * 4));
+}
+
+static int setup_mcdi(struct mc_priv *mc_priv)
+{
+ struct cdx_mcdi *amd_mcdi;
+ int ret, i;
+
+ amd_mcdi = kzalloc(sizeof(*amd_mcdi), GFP_KERNEL);
+ if (!amd_mcdi)
+ return -ENOMEM;
+
+ amd_mcdi->mcdi_ops = &mcdi_ops;
+ ret = cdx_mcdi_init(amd_mcdi);
+ if (ret) {
+ kfree(amd_mcdi);
+ return ret;
+ }
+
+ amd_mcdi->ept = mc_priv->ept;
+ mc_priv->mcdi = amd_mcdi;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++)
+ get_ddr_config(i, &mc_priv->adec[ADEC_NUM * i], amd_mcdi);
+
+ return 0;
+}
+
+static const guid_t amd_versalnet_guid = GUID_INIT(0x82678888, 0xa556, 0x44f2,
+ 0xb8, 0xb4, 0x45, 0x56, 0x2e,
+ 0x8c, 0x5b, 0xec);
+
+static int rpmsg_cb(struct rpmsg_device *rpdev, void *data,
+ int len, void *priv, u32 src)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+ const guid_t *sec_type = &guid_null;
+ u32 length, offset, error_id;
+ u32 *result = (u32 *)data;
+ struct ecc_status *p;
+ int i, j, k, sec_sev;
+ const char *err_str;
+ u32 *adec_data;
+
+ if (*(u8 *)data == MCDI_RESPONSE) {
+ cdx_mcdi_process_cmd(mc_priv->mcdi, (struct cdx_dword *)data, len);
+ return 0;
+ }
+
+ sec_sev = result[ERROR_LEVEL];
+ error_id = result[ERROR_ID];
+ length = result[MSG_ERR_LENGTH];
+ offset = result[MSG_ERR_OFFSET];
+
+ /*
+ * The data can come in two stretches. Construct the regs from two
+ * messages. The offset indicates the offset from which the data is to
+ * be taken.
+ */
+ for (i = 0 ; i < length; i++) {
+ k = offset + i;
+ j = ERROR_DATA + i;
+ mc_priv->regs[k] = result[j];
+ }
+
+ if (result[TOTAL_ERR_LENGTH] > length) {
+ if (!mc_priv->part_len)
+ mc_priv->part_len = length;
+ else
+ mc_priv->part_len += length;
+
+ if (mc_priv->part_len < result[TOTAL_ERR_LENGTH])
+ return 0;
+ mc_priv->part_len = 0;
+ }
+
+ mc_priv->error_id = error_id;
+ mc_priv->error_level = result[ERROR_LEVEL];
+
+ switch (error_id) {
+ case 5: err_str = "General Software Non-Correctable error"; break;
+ case 6: err_str = "CFU error"; break;
+ case 7: err_str = "CFRAME error"; break;
+ case 10: err_str = "DDRMC Microblaze Correctable ECC error"; break;
+ case 11: err_str = "DDRMC Microblaze Non-Correctable ECC error"; break;
+ case 15: err_str = "MMCM error"; break;
+ case 16: err_str = "HNICX Correctable error"; break;
+ case 17: err_str = "HNICX Non-Correctable error"; break;
+
+ case 18:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_CE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+ case 19:
+ p = &mc_priv->stat;
+ memset(p, 0, sizeof(struct ecc_status));
+ p->error_type = MC5_ERR_TYPE_UE;
+ for (i = 0 ; i < NUM_CONTROLLERS; i++) {
+ if (get_ddr_info(&mc_priv->regs[i * REGS_PER_CONTROLLER], mc_priv)) {
+ adec_data = mc_priv->adec + ADEC_NUM * i;
+ handle_error(mc_priv, &mc_priv->stat, i, adec_data);
+ }
+ }
+ return 0;
+
+ case 21: err_str = "GT Non-Correctable error"; break;
+ case 22: err_str = "PL Sysmon Correctable error"; break;
+ case 23: err_str = "PL Sysmon Non-Correctable error"; break;
+ case 111: err_str = "LPX unexpected dfx activation error"; break;
+ case 114: err_str = "INT_LPD Non-Correctable error"; break;
+ case 116: err_str = "INT_OCM Non-Correctable error"; break;
+ case 117: err_str = "INT_FPD Correctable error"; break;
+ case 118: err_str = "INT_FPD Non-Correctable error"; break;
+ case 120: err_str = "INT_IOU Non-Correctable error"; break;
+ case 123: err_str = "err_int_irq from APU GIC Distributor"; break;
+ case 124: err_str = "fault_int_irq from APU GIC Distribute"; break;
+ case 132 ... 139: err_str = "FPX SPLITTER error"; break;
+ case 140: err_str = "APU Cluster 0 error"; break;
+ case 141: err_str = "APU Cluster 1 error"; break;
+ case 142: err_str = "APU Cluster 2 error"; break;
+ case 143: err_str = "APU Cluster 3 error"; break;
+ case 145: err_str = "WWDT1 LPX error"; break;
+ case 147: err_str = "IPI error"; break;
+ case 152 ... 153: err_str = "AFIFS error"; break;
+ case 154 ... 155: err_str = "LPX glitch error"; break;
+ case 185 ... 186: err_str = "FPX AFIFS error"; break;
+ case 195 ... 199: err_str = "AFIFM error"; break;
+ case 108: err_str = "PSM Correctable error"; break;
+ case 59: err_str = "PMC correctable error"; break;
+ case 60: err_str = "PMC Un correctable error"; break;
+ case 43 ... 47: err_str = "PMC Sysmon error"; break;
+ case 163 ... 184: err_str = "RPU error"; break;
+ case 148: err_str = "OCM0 correctable error"; break;
+ case 149: err_str = "OCM1 correctable error"; break;
+ case 150: err_str = "OCM0 Un-correctable error"; break;
+ case 151: err_str = "OCM1 Un-correctable error"; break;
+ case 189: err_str = "PSX_CMN_3 PD block consolidated error"; break;
+ case 191: err_str = "FPD_INT_WRAP PD block consolidated error"; break;
+ case 232: err_str = "CRAM Un-Correctable error"; break;
+ default: err_str = "VERSAL_EDAC_ERR_ID: %d"; break;
+ }
+
+ snprintf(mc_priv->message,
+ sizeof(mc_priv->message),
+ "[VERSAL_EDAC_ERR_ID: %d] Error type: %s", error_id, err_str);
+
+ /* Convert to bytes */
+ length = result[TOTAL_ERR_LENGTH] * 4;
+ log_non_standard_event(sec_type, &amd_versalnet_guid, mc_priv->message,
+ sec_sev, (void *)&mc_priv->regs, length);
+
+ return 0;
+}
+
+static struct rpmsg_device_id amd_rpmsg_id_table[] = {
+ { .name = "error_ipc" },
+ { },
+};
+MODULE_DEVICE_TABLE(rpmsg, amd_rpmsg_id_table);
+
+static int rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ struct rpmsg_channel_info chinfo;
+ struct mc_priv *pg;
+
+ pg = (struct mc_priv *)amd_rpmsg_id_table[0].driver_data;
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = rpdev->dst;
+ strscpy(chinfo.name, amd_rpmsg_id_table[0].name,
+ strlen(amd_rpmsg_id_table[0].name));
+
+ pg->ept = rpmsg_create_ept(rpdev, rpmsg_cb, NULL, chinfo);
+ if (!pg->ept)
+ return dev_err_probe(&rpdev->dev, -ENXIO, "Failed to create ept for channel %s\n",
+ chinfo.name);
+
+ dev_set_drvdata(&rpdev->dev, pg);
+
+ return 0;
+}
+
+static void rpmsg_remove(struct rpmsg_device *rpdev)
+{
+ struct mc_priv *mc_priv = dev_get_drvdata(&rpdev->dev);
+
+ rpmsg_destroy_ept(mc_priv->ept);
+ dev_set_drvdata(&rpdev->dev, NULL);
+}
+
+static struct rpmsg_driver amd_rpmsg_driver = {
+ .drv.name = KBUILD_MODNAME,
+ .probe = rpmsg_probe,
+ .remove = rpmsg_remove,
+ .callback = rpmsg_cb,
+ .id_table = amd_rpmsg_id_table,
+};
+
+static void versal_edac_release(struct device *dev)
+{
+ kfree(dev);
+}
+
+static int init_versalnet(struct mc_priv *priv, struct platform_device *pdev)
+{
+ u32 num_chans, rank, dwidth, config;
+ struct edac_mc_layer layers[2];
+ struct mem_ctl_info *mci;
+ struct device *dev;
+ enum dev_type dt;
+ char *name;
+ int rc, i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ config = priv->adec[CONF + i * ADEC_NUM];
+ num_chans = FIELD_GET(MC5_NUM_CHANS_MASK, config);
+ rank = 1 << FIELD_GET(MC5_RANK_MASK, config);
+ dwidth = FIELD_GET(MC5_BUS_WIDTH_MASK, config);
+
+ switch (dwidth) {
+ case XDDR5_BUS_WIDTH_16:
+ dt = DEV_X16;
+ break;
+ case XDDR5_BUS_WIDTH_32:
+ dt = DEV_X32;
+ break;
+ case XDDR5_BUS_WIDTH_64:
+ dt = DEV_X64;
+ break;
+ default:
+ dt = DEV_UNKNOWN;
+ }
+
+ if (dt == DEV_UNKNOWN)
+ continue;
+
+ /* Find the first enabled device and register that one. */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = rank;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+
+ rc = -ENOMEM;
+ mci = edac_mc_alloc(i, ARRAY_SIZE(layers), layers,
+ sizeof(struct mc_priv));
+ if (!mci) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed memory allocation for MC%d\n", i);
+ goto err_alloc;
+ }
+
+ priv->mci[i] = mci;
+ priv->dwidth = dt;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ dev->release = versal_edac_release;
+ name = kmalloc(32, GFP_KERNEL);
+ sprintf(name, "versal-net-ddrmc5-edac-%d", i);
+ dev->init_name = name;
+ rc = device_register(dev);
+ if (rc)
+ goto err_alloc;
+
+ mci->pdev = dev;
+
+ platform_set_drvdata(pdev, priv);
+
+ mc_init(mci, dev);
+ rc = edac_mc_add_mc(mci);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register MC%d with EDAC core\n", i);
+ goto err_alloc;
+ }
+ }
+ return 0;
+
+err_alloc:
+ while (i--) {
+ mci = priv->mci[i];
+ if (!mci)
+ continue;
+
+ if (mci->pdev) {
+ device_unregister(mci->pdev);
+ edac_mc_del_mc(mci->pdev);
+ }
+
+ edac_mc_free(mci);
+ }
+
+ return rc;
+}
+
+static void remove_versalnet(struct mc_priv *priv)
+{
+ struct mem_ctl_info *mci;
+ int i;
+
+ for (i = 0; i < NUM_CONTROLLERS; i++) {
+ device_unregister(priv->mci[i]->pdev);
+ mci = edac_mc_del_mc(priv->mci[i]->pdev);
+ if (!mci)
+ return;
+
+ edac_mc_free(mci);
+ }
+}
+
+static int mc_probe(struct platform_device *pdev)
+{
+ struct device_node *r5_core_node;
+ struct mc_priv *priv;
+ struct rproc *rp;
+ int rc;
+
+ r5_core_node = of_parse_phandle(pdev->dev.of_node, "amd,rproc", 0);
+ if (!r5_core_node) {
+ dev_err(&pdev->dev, "amd,rproc: invalid phandle\n");
+ return -EINVAL;
+ }
+
+ rp = rproc_get_by_phandle(r5_core_node->phandle);
+ if (!rp)
+ return -EPROBE_DEFER;
+
+ rc = rproc_boot(rp);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to attach to remote processor\n");
+ goto err_rproc_boot;
+ }
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ amd_rpmsg_id_table[0].driver_data = (kernel_ulong_t)priv;
+
+ rc = register_rpmsg_driver(&amd_rpmsg_driver);
+ if (rc) {
+ edac_printk(KERN_ERR, EDAC_MC, "Failed to register RPMsg driver: %d\n", rc);
+ goto err_alloc;
+ }
+
+ rc = setup_mcdi(priv);
+ if (rc)
+ goto err_unreg;
+
+ priv->mcdi->r5_rproc = rp;
+
+ rc = init_versalnet(priv, pdev);
+ if (rc)
+ goto err_init;
+
+ return 0;
+
+err_init:
+ cdx_mcdi_finish(priv->mcdi);
+
+err_unreg:
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+
+err_alloc:
+ rproc_shutdown(rp);
+
+err_rproc_boot:
+ rproc_put(rp);
+
+ return rc;
+}
+
+static void mc_remove(struct platform_device *pdev)
+{
+ struct mc_priv *priv = platform_get_drvdata(pdev);
+
+ unregister_rpmsg_driver(&amd_rpmsg_driver);
+ remove_versalnet(priv);
+ rproc_shutdown(priv->mcdi->r5_rproc);
+ cdx_mcdi_finish(priv->mcdi);
+}
+
+static const struct of_device_id amd_edac_match[] = {
+ { .compatible = "xlnx,versal-net-ddrmc5", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, amd_edac_match);
+
+static struct platform_driver amd_ddr_edac_mc_driver = {
+ .driver = {
+ .name = "versal-net-edac",
+ .of_match_table = amd_edac_match,
+ },
+ .probe = mc_probe,
+ .remove = mc_remove,
+};
+
+module_platform_driver(amd_ddr_edac_mc_driver);
+
+MODULE_AUTHOR("AMD Inc");
+MODULE_DESCRIPTION("Versal NET EDAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index edceea083b98..bd76d599109c 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -135,7 +135,7 @@ static int eisa_bus_uevent(const struct device *dev, struct kobj_uevent_env *env
return 0;
}
-struct bus_type eisa_bus_type = {
+const struct bus_type eisa_bus_type = {
.name = "eisa",
.match = eisa_bus_match,
.uevent = eisa_bus_uevent,
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index a6f6d467aacf..aec46bf03302 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -134,6 +134,19 @@ config EXTCON_MAX8997
Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
detector and switch.
+config EXTCON_MAX14526
+ tristate "Maxim MAX14526 EXTCON Support"
+ depends on I2C
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Maxim MAX14526
+ MUIC device. The MAX14526 MUIC is a USB port accessory
+ detector and switch. The MAX14526 is designed to simplify
+ interface requirements on portable devices by multiplexing
+ common inputs (USB, UART, Microphone, Stereo Audio and
+ Composite Video) on a single micro/mini USB connector.
+
config EXTCON_PALMAS
tristate "Palmas USB EXTCON support"
depends on MFD_PALMAS
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0d6d23faf748..6482f2bfd661 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
+obj-$(CONFIG_EXTCON_MAX14526) += extcon-max14526.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o
obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 46c40d85c2ac..7e3c9f38297b 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -164,6 +164,8 @@ static void adc_jack_remove(struct platform_device *pdev)
{
struct adc_jack_data *data = platform_get_drvdata(pdev);
+ if (data->wakeup_source)
+ device_init_wakeup(&pdev->dev, false);
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
}
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index d3bcbe839c09..19856dddade6 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -470,7 +470,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- device_init_wakeup(dev, true);
+ devm_device_init_wakeup(dev);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index b11b43171063..a031eb0914a0 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -317,7 +317,7 @@ static int fsa9480_probe(struct i2c_client *client)
return ret;
}
- device_init_wakeup(info->dev, true);
+ devm_device_init_wakeup(info->dev);
fsa9480_detect_dev(info);
return 0;
diff --git a/drivers/extcon/extcon-max14526.c b/drivers/extcon/extcon-max14526.c
new file mode 100644
index 000000000000..3750a5c20612
--- /dev/null
+++ b/drivers/extcon/extcon-max14526.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/extcon-provider.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* I2C addresses of MUIC internal registers */
+#define MAX14526_DEVICE_ID 0x00
+#define MAX14526_ID 0x02
+
+/* CONTROL_1 register masks */
+#define MAX14526_CONTROL_1 0x01
+#define ID_2P2 BIT(6)
+#define ID_620 BIT(5)
+#define ID_200 BIT(4)
+#define VLDO BIT(3)
+#define SEMREN BIT(2)
+#define ADC_EN BIT(1)
+#define CP_EN BIT(0)
+
+/* CONTROL_2 register masks */
+#define MAX14526_CONTROL_2 0x02
+#define INTPOL BIT(7)
+#define INT_EN BIT(6)
+#define MIC_LP BIT(5)
+#define CP_AUD BIT(4)
+#define CHG_TYPE BIT(1)
+#define USB_DET_DIS BIT(0)
+
+/* SW_CONTROL register masks */
+#define MAX14526_SW_CONTROL 0x03
+#define SW_DATA 0x00
+#define SW_UART 0x01
+#define SW_AUDIO 0x02
+#define SW_OPEN 0x07
+
+/* INT_STATUS register masks */
+#define MAX14526_INT_STAT 0x04
+#define CHGDET BIT(7)
+#define MR_COMP BIT(6)
+#define SENDEND BIT(5)
+#define V_VBUS BIT(4)
+
+/* STATUS register masks */
+#define MAX14526_STATUS 0x05
+#define CPORT BIT(7)
+#define CHPORT BIT(6)
+#define C1COMP BIT(0)
+
+enum max14526_idno_resistance {
+ MAX14526_GND,
+ MAX14526_24KOHM,
+ MAX14526_56KOHM,
+ MAX14526_100KOHM,
+ MAX14526_130KOHM,
+ MAX14526_180KOHM,
+ MAX14526_240KOHM,
+ MAX14526_330KOHM,
+ MAX14526_430KOHM,
+ MAX14526_620KOHM,
+ MAX14526_910KOHM,
+ MAX14526_OPEN
+};
+
+enum max14526_field_idx {
+ VENDOR_ID, CHIP_REV, /* DEVID */
+ DM, DP, /* SW_CONTROL */
+ MAX14526_N_REGMAP_FIELDS
+};
+
+static const struct reg_field max14526_reg_field[MAX14526_N_REGMAP_FIELDS] = {
+ [VENDOR_ID] = REG_FIELD(MAX14526_DEVICE_ID, 4, 7),
+ [CHIP_REV] = REG_FIELD(MAX14526_DEVICE_ID, 0, 3),
+ [DM] = REG_FIELD(MAX14526_SW_CONTROL, 0, 2),
+ [DP] = REG_FIELD(MAX14526_SW_CONTROL, 3, 5),
+};
+
+struct max14526_data {
+ struct i2c_client *client;
+ struct extcon_dev *edev;
+
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX14526_N_REGMAP_FIELDS];
+
+ int last_state;
+ int cable;
+};
+
+enum max14526_muic_modes {
+ MAX14526_OTG = MAX14526_GND, /* no power */
+ MAX14526_MHL = MAX14526_56KOHM, /* no power */
+ MAX14526_OTG_Y = MAX14526_GND | V_VBUS,
+ MAX14526_MHL_CHG = MAX14526_GND | V_VBUS | CHGDET,
+ MAX14526_NONE = MAX14526_OPEN,
+ MAX14526_USB = MAX14526_OPEN | V_VBUS,
+ MAX14526_CHG = MAX14526_OPEN | V_VBUS | CHGDET,
+};
+
+static const unsigned int max14526_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_FAST,
+ EXTCON_DISP_MHL,
+ EXTCON_NONE,
+};
+
+static int max14526_ap_usb_mode(struct max14526_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ /* Enable USB Path */
+ ret = regmap_field_write(priv->rfield[DM], SW_DATA);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(priv->rfield[DP], SW_DATA);
+ if (ret)
+ return ret;
+
+ /* Enable 200K, Charger Pump and ADC */
+ ret = regmap_write(priv->regmap, MAX14526_CONTROL_1,
+ ID_200 | ADC_EN | CP_EN);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "AP USB mode set\n");
+
+ return 0;
+}
+
+static irqreturn_t max14526_interrupt(int irq, void *dev_id)
+{
+ struct max14526_data *priv = dev_id;
+ struct device *dev = &priv->client->dev;
+ int state, ret;
+
+ /*
+ * Upon an MUIC IRQ (MUIC_INT_N falls), wait at least 70ms
+ * before reading INT_STAT and STATUS. After the reads,
+ * MUIC_INT_N returns to high (but the INT_STAT and STATUS
+ * contents will be held).
+ */
+ msleep(100);
+
+ ret = regmap_read(priv->regmap, MAX14526_INT_STAT, &state);
+ if (ret)
+ dev_err(dev, "failed to read MUIC state %d\n", ret);
+
+ if (state == priv->last_state)
+ return IRQ_HANDLED;
+
+ /* Detach previous device */
+ extcon_set_state_sync(priv->edev, priv->cable, false);
+
+ switch (state) {
+ case MAX14526_USB:
+ priv->cable = EXTCON_USB;
+ break;
+
+ case MAX14526_CHG:
+ priv->cable = EXTCON_CHG_USB_FAST;
+ break;
+
+ case MAX14526_OTG:
+ case MAX14526_OTG_Y:
+ priv->cable = EXTCON_USB_HOST;
+ break;
+
+ case MAX14526_MHL:
+ case MAX14526_MHL_CHG:
+ priv->cable = EXTCON_DISP_MHL;
+ break;
+
+ case MAX14526_NONE:
+ default:
+ priv->cable = EXTCON_NONE;
+ break;
+ }
+
+ extcon_set_state_sync(priv->edev, priv->cable, true);
+
+ priv->last_state = state;
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config max14526_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX14526_STATUS,
+};
+
+static int max14526_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max14526_data *priv;
+ int ret, dev_id, rev, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->regmap = devm_regmap_init_i2c(client, &max14526_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap), "cannot allocate regmap\n");
+
+ for (i = 0; i < MAX14526_N_REGMAP_FIELDS; i++) {
+ priv->rfield[i] = devm_regmap_field_alloc(dev, priv->regmap,
+ max14526_reg_field[i]);
+ if (IS_ERR(priv->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(priv->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
+
+ /* Detect if MUIC version is supported */
+ ret = regmap_field_read(priv->rfield[VENDOR_ID], &dev_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read MUIC ID\n");
+
+ regmap_field_read(priv->rfield[CHIP_REV], &rev);
+
+ if (dev_id == MAX14526_ID)
+ dev_info(dev, "detected MAX14526 MUIC with id 0x%x, rev 0x%x\n", dev_id, rev);
+ else
+ dev_err_probe(dev, -EINVAL, "MUIC vendor id 0x%X is not recognized\n", dev_id);
+
+ priv->edev = devm_extcon_dev_allocate(dev, max14526_extcon_cable);
+ if (IS_ERR(priv->edev))
+ return dev_err_probe(dev, (IS_ERR(priv->edev)),
+ "failed to allocate extcon device\n");
+
+ ret = devm_extcon_dev_register(dev, priv->edev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register extcon device\n");
+
+ ret = max14526_ap_usb_mode(priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set AP USB mode\n");
+
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, INT_EN, INT_EN);
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, USB_DET_DIS, (u32)~USB_DET_DIS);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL, &max14526_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, client->name, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static int max14526_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max14526_data *priv = i2c_get_clientdata(client);
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(max14526_pm_ops, NULL, max14526_resume);
+
+static const struct of_device_id max14526_match[] = {
+ { .compatible = "maxim,max14526" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max14526_match);
+
+static const struct i2c_device_id max14526_id[] = {
+ { "max14526" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max14526_id);
+
+static struct i2c_driver max14526_driver = {
+ .driver = {
+ .name = "max14526",
+ .of_match_table = max14526_match,
+ .pm = &max14526_pm_ops,
+ },
+ .probe = max14526_probe,
+ .id_table = max14526_id,
+};
+module_i2c_driver(max14526_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("MAX14526 extcon driver to support MUIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 53de581a393a..afaba5685c3d 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -155,7 +155,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(dev, 1);
+ devm_device_init_wakeup(dev);
/* Perform initial detection */
qcom_usb_extcon_detect_cable(&info->wq_detcable.work);
diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
index aae774e7a5c3..0462d7b9e547 100644
--- a/drivers/firewire/core-card.c
+++ b/drivers/firewire/core-card.c
@@ -86,8 +86,6 @@ static size_t config_rom_length = 1 + 4 + 1 + 1;
*/
#define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
-#define CANON_OUI 0x000085
-
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
struct fw_descriptor *desc;
@@ -229,8 +227,7 @@ void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
/* Use an arbitrary short delay to combine multiple reset requests. */
fw_card_get(card);
- if (!queue_delayed_work(fw_workqueue, &card->br_work,
- delayed ? DIV_ROUND_UP(HZ, 100) : 0))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, delayed ? msecs_to_jiffies(10) : 0))
fw_card_put(card);
}
EXPORT_SYMBOL(fw_schedule_bus_reset);
@@ -241,10 +238,10 @@ static void br_work(struct work_struct *work)
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
- time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
+ time_is_after_jiffies64(card->reset_jiffies + secs_to_jiffies(2))) {
trace_bus_reset_postpone(card->index, card->generation, card->br_short);
- if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
+ if (!queue_delayed_work(fw_workqueue, &card->br_work, secs_to_jiffies(2)))
fw_card_put(card);
return;
}
@@ -280,225 +277,249 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_card_put(card);
}
-static void bm_work(struct work_struct *work)
+enum bm_contention_outcome {
+ // The bus management contention window is not expired.
+ BM_CONTENTION_OUTCOME_WITHIN_WINDOW = 0,
+ // The IRM node has link off.
+ BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF,
+ // The IRM node complies IEEE 1394:1994 only.
+ BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY,
+ // Another bus reset, BM work has been rescheduled.
+ BM_CONTENTION_OUTCOME_AT_NEW_GENERATION,
+ // We have been unable to send the lock request to IRM node due to some local problem.
+ BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION,
+ // The lock request failed, maybe the IRM isn't really IRM capable after all.
+ BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM,
+ // Somebody else is BM.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM,
+ // The local node succeeds after contending for bus manager.
+ BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM,
+};
+
+static enum bm_contention_outcome contend_for_bm(struct fw_card *card)
+__must_hold(&card->lock)
{
- static const char gap_count_table[] = {
- 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ int generation = card->generation;
+ int local_id = card->local_node->node_id;
+ __be32 data[2] = {
+ cpu_to_be32(BUS_MANAGER_ID_NOT_REGISTERED),
+ cpu_to_be32(local_id),
};
- struct fw_card *card = from_work(card, work, bm_work.work);
- struct fw_device *root_device, *irm_device;
- struct fw_node *root_node;
- int root_id, new_root_id, irm_id, bm_id, local_id;
- int gap_count, generation, grace, rcode;
- bool do_reset = false;
- bool root_device_is_running;
- bool root_device_is_cmc;
- bool irm_is_1394_1995_only;
- bool keep_this_irm;
- __be32 transaction_data[2];
+ bool grace = time_is_before_jiffies64(card->reset_jiffies + msecs_to_jiffies(125));
+ struct fw_node *irm_node;
+ struct fw_device *irm_device;
+ int irm_node_id, irm_device_quirks = 0;
+ int rcode;
- spin_lock_irq(&card->lock);
+ lockdep_assert_held(&card->lock);
- if (card->local_node == NULL) {
- spin_unlock_irq(&card->lock);
- goto out_put_card;
+ if (!grace) {
+ if (!is_next_generation(generation, card->bm_generation) || card->bm_abdicate)
+ return BM_CONTENTION_OUTCOME_WITHIN_WINDOW;
}
- generation = card->generation;
-
- root_node = card->root_node;
- fw_node_get(root_node);
- root_device = root_node->data;
- root_device_is_running = root_device &&
- atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
- root_device_is_cmc = root_device && root_device->cmc;
+ irm_node = card->irm_node;
+ if (!irm_node->link_on) {
+ fw_notice(card, "IRM has link off, making local node (%02x) root\n", local_id);
+ return BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF;
+ }
- irm_device = card->irm_node->data;
- irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
- (irm_device->config_rom[2] & 0x000000f0) == 0;
+ // NOTE: It is likely that the quirk detection for IRM device has not done yet.
+ irm_device = fw_node_get_device(irm_node);
+ if (irm_device)
+ irm_device_quirks = READ_ONCE(irm_device->quirks);
+ if ((irm_device_quirks & FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY) &&
+ !(irm_device_quirks & FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER)) {
+ fw_notice(card, "IRM is not 1394a compliant, making local node (%02x) root\n",
+ local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ }
- /* Canon MV5i works unreliably if it is not root node. */
- keep_this_irm = irm_device && irm_device->config_rom &&
- irm_device->config_rom[3] >> 8 == CANON_OUI;
+ irm_node_id = irm_node->node_id;
- root_id = root_node->node_id;
- irm_id = card->irm_node->node_id;
- local_id = card->local_node->node_id;
+ spin_unlock_irq(&card->lock);
- grace = time_after64(get_jiffies_64(),
- card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
+ rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, irm_node_id, generation,
+ SCODE_100, CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID, data,
+ sizeof(data));
- if ((is_next_generation(generation, card->bm_generation) &&
- !card->bm_abdicate) ||
- (card->bm_generation != generation && grace)) {
- /*
- * This first step is to figure out who is IRM and
- * then try to become bus manager. If the IRM is not
- * well defined (e.g. does not have an active link
- * layer or does not responds to our lock request, we
- * will have to do a little vigilante bus management.
- * In that case, we do a goto into the gap count logic
- * so that when we do the reset, we still optimize the
- * gap count. That could well save a reset in the
- * next generation.
- */
+ spin_lock_irq(&card->lock);
- if (!card->irm_node->link_on) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM has link off", new_root_id);
- goto pick_me;
+ switch (rcode) {
+ case RCODE_GENERATION:
+ return BM_CONTENTION_OUTCOME_AT_NEW_GENERATION;
+ case RCODE_SEND_ERROR:
+ return BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION;
+ case RCODE_COMPLETE:
+ {
+ int bm_id = be32_to_cpu(data[0]);
+
+ // Used by cdev layer for "struct fw_cdev_event_bus_reset".
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ card->bm_node_id = 0xffc0 & bm_id;
+ else
+ card->bm_node_id = local_id;
+
+ if (bm_id != BUS_MANAGER_ID_NOT_REGISTERED)
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM;
+ else
+ return BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM;
+ }
+ default:
+ if (!(irm_device_quirks & FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER)) {
+ fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
+ fw_rcode_string(rcode), local_id);
+ return BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY;
+ } else {
+ return BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM;
}
+ }
+}
- if (irm_is_1394_1995_only && !keep_this_irm) {
- new_root_id = local_id;
- fw_notice(card, "%s, making local node (%02x) root\n",
- "IRM is not 1394a compliant", new_root_id);
- goto pick_me;
- }
+DEFINE_FREE(node_unref, struct fw_node *, if (_T) fw_node_put(_T))
+DEFINE_FREE(card_unref, struct fw_card *, if (_T) fw_card_put(_T))
- transaction_data[0] = cpu_to_be32(0x3f);
- transaction_data[1] = cpu_to_be32(local_id);
+static void bm_work(struct work_struct *work)
+{
+ static const char gap_count_table[] = {
+ 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
+ };
+ struct fw_card *card __free(card_unref) = from_work(card, work, bm_work.work);
+ struct fw_node *root_node __free(node_unref) = NULL;
+ int root_id, new_root_id, irm_id, local_id;
+ int expected_gap_count, generation;
+ bool stand_for_root = false;
+
+ spin_lock_irq(&card->lock);
+ if (card->local_node == NULL) {
spin_unlock_irq(&card->lock);
+ return;
+ }
- rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
- irm_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
- transaction_data, 8);
+ generation = card->generation;
- if (rcode == RCODE_GENERATION)
- /* Another bus reset, BM work has been rescheduled. */
- goto out;
+ root_node = fw_node_get(card->root_node);
- bm_id = be32_to_cpu(transaction_data[0]);
+ root_id = root_node->node_id;
+ irm_id = card->irm_node->node_id;
+ local_id = card->local_node->node_id;
- scoped_guard(spinlock_irq, &card->lock) {
- if (rcode == RCODE_COMPLETE && generation == card->generation)
- card->bm_node_id =
- bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
- }
+ if (card->bm_generation != generation) {
+ enum bm_contention_outcome result = contend_for_bm(card);
- if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
- /* Somebody else is BM. Only act as IRM. */
- if (local_id == irm_id)
+ switch (result) {
+ case BM_CONTENTION_OUTCOME_WITHIN_WINDOW:
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_HAS_LINK_OFF:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_COMPLIES_1394_1995_ONLY:
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_AT_NEW_GENERATION:
+ // BM work has been rescheduled.
+ spin_unlock_irq(&card->lock);
+ return;
+ case BM_CONTENTION_OUTCOME_LOCAL_PROBLEM_AT_TRANSACTION:
+ // Let's try again later and hope that the local problem has gone away by
+ // then.
+ spin_unlock_irq(&card->lock);
+ fw_schedule_bm_work(card, msecs_to_jiffies(125));
+ return;
+ case BM_CONTENTION_OUTCOME_IRM_IS_NOT_CAPABLE_FOR_IRM:
+ // Let's do a bus reset and pick the local node as root, and thus, IRM.
+ stand_for_root = true;
+ break;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_ANOTHER_NODE_AS_BM:
+ if (local_id == irm_id) {
+ // Only acts as IRM.
+ spin_unlock_irq(&card->lock);
allocate_broadcast_channel(card, generation);
-
- goto out;
- }
-
- if (rcode == RCODE_SEND_ERROR) {
- /*
- * We have been unable to send the lock request due to
- * some local problem. Let's try again later and hope
- * that the problem has gone away by then.
- */
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
+ spin_lock_irq(&card->lock);
+ }
+ fallthrough;
+ case BM_CONTENTION_OUTCOME_IRM_HOLDS_LOCAL_NODE_AS_BM:
+ default:
+ card->bm_generation = generation;
+ break;
}
+ }
- spin_lock_irq(&card->lock);
-
- if (rcode != RCODE_COMPLETE && !keep_this_irm) {
- /*
- * The lock request failed, maybe the IRM
- * isn't really IRM capable after all. Let's
- * do a bus reset and pick the local node as
- * root, and thus, IRM.
- */
- new_root_id = local_id;
- fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
- fw_rcode_string(rcode), new_root_id);
- goto pick_me;
+ // We're bus manager for this generation, so next step is to make sure we have an active
+ // cycle master and do gap count optimization.
+ if (!stand_for_root) {
+ if (card->gap_count == GAP_COUNT_MISMATCHED) {
+ // If self IDs have inconsistent gap counts, do a
+ // bus reset ASAP. The config rom read might never
+ // complete, so don't wait for it. However, still
+ // send a PHY configuration packet prior to the
+ // bus reset. The PHY configuration packet might
+ // fail, but 1394-2008 8.4.5.2 explicitly permits
+ // it in this case, so it should be safe to try.
+ stand_for_root = true;
+
+ // We must always send a bus reset if the gap count
+ // is inconsistent, so bypass the 5-reset limit.
+ card->bm_retries = 0;
+ } else {
+ // Now investigate root node.
+ struct fw_device *root_device = fw_node_get_device(root_node);
+
+ if (root_device == NULL) {
+ // Either link_on is false, or we failed to read the
+ // config rom. In either case, pick another root.
+ stand_for_root = true;
+ } else {
+ bool root_device_is_running =
+ atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
+
+ if (!root_device_is_running) {
+ // If we haven't probed this device yet, bail out now
+ // and let's try again once that's done.
+ spin_unlock_irq(&card->lock);
+ return;
+ } else if (!root_device->cmc) {
+ // Current root has an active link layer and we
+ // successfully read the config rom, but it's not
+ // cycle master capable.
+ stand_for_root = true;
+ }
+ }
}
- } else if (card->bm_generation != generation) {
- /*
- * We weren't BM in the last generation, and the last
- * bus reset is less than 125ms ago. Reschedule this job.
- */
- spin_unlock_irq(&card->lock);
- fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
- goto out;
}
- /*
- * We're bus manager for this generation, so next step is to
- * make sure we have an active cycle master and do gap count
- * optimization.
- */
- card->bm_generation = generation;
-
- if (card->gap_count == 0) {
- /*
- * If self IDs have inconsistent gap counts, do a
- * bus reset ASAP. The config rom read might never
- * complete, so don't wait for it. However, still
- * send a PHY configuration packet prior to the
- * bus reset. The PHY configuration packet might
- * fail, but 1394-2008 8.4.5.2 explicitly permits
- * it in this case, so it should be safe to try.
- */
- new_root_id = local_id;
- /*
- * We must always send a bus reset if the gap count
- * is inconsistent, so bypass the 5-reset limit.
- */
- card->bm_retries = 0;
- } else if (root_device == NULL) {
- /*
- * Either link_on is false, or we failed to read the
- * config rom. In either case, pick another root.
- */
+ if (stand_for_root) {
new_root_id = local_id;
- } else if (!root_device_is_running) {
- /*
- * If we haven't probed this device yet, bail out now
- * and let's try again once that's done.
- */
- spin_unlock_irq(&card->lock);
- goto out;
- } else if (root_device_is_cmc) {
- /*
- * We will send out a force root packet for this
- * node as part of the gap count optimization.
- */
- new_root_id = root_id;
} else {
- /*
- * Current root has an active link layer and we
- * successfully read the config rom, but it's not
- * cycle master capable.
- */
- new_root_id = local_id;
+ // We will send out a force root packet for this node as part of the gap count
+ // optimization on behalf of the node.
+ new_root_id = root_id;
}
- pick_me:
/*
* Pick a gap count from 1394a table E-1. The table doesn't cover
* the typically much larger 1394b beta repeater delays though.
*/
if (!card->beta_repeaters_present &&
root_node->max_hops < ARRAY_SIZE(gap_count_table))
- gap_count = gap_count_table[root_node->max_hops];
+ expected_gap_count = gap_count_table[root_node->max_hops];
else
- gap_count = 63;
+ expected_gap_count = 63;
- /*
- * Finally, figure out if we should do a reset or not. If we have
- * done less than 5 resets with the same physical topology and we
- * have either a new root or a new gap count setting, let's do it.
- */
+ // Finally, figure out if we should do a reset or not. If we have done less than 5 resets
+ // with the same physical topology and we have either a new root or a new gap count
+ // setting, let's do it.
+ if (card->bm_retries++ < 5 && (card->gap_count != expected_gap_count || new_root_id != root_id)) {
+ int card_gap_count = card->gap_count;
- if (card->bm_retries++ < 5 &&
- (card->gap_count != gap_count || new_root_id != root_id))
- do_reset = true;
-
- spin_unlock_irq(&card->lock);
+ spin_unlock_irq(&card->lock);
- if (do_reset) {
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
- new_root_id, gap_count);
- fw_send_phy_config(card, new_root_id, generation, gap_count);
+ new_root_id, expected_gap_count);
+ fw_send_phy_config(card, new_root_id, generation, expected_gap_count);
/*
* Where possible, use a short bus reset to minimize
* disruption to isochronous transfers. But in the event
@@ -511,31 +532,27 @@ static void bm_work(struct work_struct *work)
* may treat it as two, causing a gap count inconsistency
* again. Using a long bus reset prevents this.
*/
- reset_bus(card, card->gap_count != 0);
+ reset_bus(card, card_gap_count != 0);
/* Will allocate broadcast channel after the reset. */
- goto out;
- }
+ } else {
+ struct fw_device *root_device = fw_node_get_device(root_node);
- if (root_device_is_cmc) {
- /*
- * Make sure that the cycle master sends cycle start packets.
- */
- transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
- rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
- root_id, generation, SCODE_100,
- CSR_REGISTER_BASE + CSR_STATE_SET,
- transaction_data, 4);
- if (rcode == RCODE_GENERATION)
- goto out;
- }
+ spin_unlock_irq(&card->lock);
- if (local_id == irm_id)
- allocate_broadcast_channel(card, generation);
+ if (root_device && root_device->cmc) {
+ // Make sure that the cycle master sends cycle start packets.
+ __be32 data = cpu_to_be32(CSR_STATE_BIT_CMSTR);
+ int rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
+ root_id, generation, SCODE_100,
+ CSR_REGISTER_BASE + CSR_STATE_SET,
+ &data, sizeof(data));
+ if (rcode == RCODE_GENERATION)
+ return;
+ }
- out:
- fw_node_put(root_node);
- out_put_card:
- fw_card_put(card);
+ if (local_id == irm_id)
+ allocate_broadcast_channel(card, generation);
+ }
}
void fw_card_initialize(struct fw_card *card,
@@ -547,20 +564,26 @@ void fw_card_initialize(struct fw_card *card,
card->index = atomic_inc_return(&index);
card->driver = driver;
card->device = device;
- card->current_tlabel = 0;
- card->tlabel_mask = 0;
- card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
- card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
- card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
- card->split_timeout_jiffies =
- DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
+
+ card->transactions.current_tlabel = 0;
+ card->transactions.tlabel_mask = 0;
+ INIT_LIST_HEAD(&card->transactions.list);
+ spin_lock_init(&card->transactions.lock);
+
+ spin_lock_init(&card->topology_map.lock);
+
+ card->split_timeout.hi = DEFAULT_SPLIT_TIMEOUT / 8000;
+ card->split_timeout.lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
+ card->split_timeout.cycles = DEFAULT_SPLIT_TIMEOUT;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(DEFAULT_SPLIT_TIMEOUT);
+ spin_lock_init(&card->split_timeout.lock);
+
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
- INIT_LIST_HEAD(&card->transaction_list);
- INIT_LIST_HEAD(&card->phy_receiver_list);
+
spin_lock_init(&card->lock);
card->local_node = NULL;
@@ -570,9 +593,13 @@ void fw_card_initialize(struct fw_card *card,
}
EXPORT_SYMBOL(fw_card_initialize);
+DEFINE_FREE(workqueue_destroy, struct workqueue_struct *, if (_T) destroy_workqueue(_T))
+
int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
unsigned int supported_isoc_contexts)
{
+ struct workqueue_struct *isoc_wq __free(workqueue_destroy) = NULL;
+ struct workqueue_struct *async_wq __free(workqueue_destroy) = NULL;
int ret;
// This workqueue should be:
@@ -587,10 +614,10 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == n_it + n_ir A hardIRQ could notify events for multiple isochronous
// contexts if they are scheduled to the same cycle.
- card->isoc_wq = alloc_workqueue("firewire-isoc-card%u",
- WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
- supported_isoc_contexts, card->index);
- if (!card->isoc_wq)
+ isoc_wq = alloc_workqueue("firewire-isoc-card%u",
+ WQ_UNBOUND | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ supported_isoc_contexts, card->index);
+ if (!isoc_wq)
return -ENOMEM;
// This workqueue should be:
@@ -602,14 +629,14 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
// * == WQ_SYSFS Parameters are available via sysfs.
// * max_active == 4 A hardIRQ could notify events for a pair of requests and
// response AR/AT contexts.
- card->async_wq = alloc_workqueue("firewire-async-card%u",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
- 4, card->index);
- if (!card->async_wq) {
- ret = -ENOMEM;
- goto err_isoc;
- }
+ async_wq = alloc_workqueue("firewire-async-card%u",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_HIGHPRI | WQ_SYSFS,
+ 4, card->index);
+ if (!async_wq)
+ return -ENOMEM;
+ card->isoc_wq = isoc_wq;
+ card->async_wq = async_wq;
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
@@ -617,18 +644,18 @@ int fw_card_add(struct fw_card *card, u32 max_receive, u32 link_speed, u64 guid,
scoped_guard(mutex, &card_mutex) {
generate_config_rom(card, tmp_config_rom);
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
- if (ret < 0)
- goto err_async;
+ if (ret < 0) {
+ card->isoc_wq = NULL;
+ card->async_wq = NULL;
+ return ret;
+ }
+ retain_and_null_ptr(isoc_wq);
+ retain_and_null_ptr(async_wq);
list_add_tail(&card->link, &card_list);
}
return 0;
-err_async:
- destroy_workqueue(card->async_wq);
-err_isoc:
- destroy_workqueue(card->isoc_wq);
- return ret;
}
EXPORT_SYMBOL(fw_card_add);
@@ -759,9 +786,13 @@ void fw_core_remove_card(struct fw_card *card)
/* Switch off most of the card driver interface. */
dummy_driver.free_iso_context = card->driver->free_iso_context;
dummy_driver.stop_iso = card->driver->stop_iso;
+ dummy_driver.disable = card->driver->disable;
card->driver = &dummy_driver;
+
drain_workqueue(card->isoc_wq);
drain_workqueue(card->async_wq);
+ card->driver->disable(card);
+ fw_cancel_pending_transactions(card);
scoped_guard(spinlock_irqsave, &card->lock)
fw_destroy_nodes(card);
@@ -773,7 +804,7 @@ void fw_core_remove_card(struct fw_card *card)
destroy_workqueue(card->isoc_wq);
destroy_workqueue(card->async_wq);
- WARN_ON(!list_empty(&card->transaction_list));
+ WARN_ON(!list_empty(&card->transactions.list));
}
EXPORT_SYMBOL(fw_core_remove_card);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 78b10c6ef7fe..49dc1612c691 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -41,12 +41,15 @@
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
-#define FW_CDEV_KERNEL_VERSION 5
+#define FW_CDEV_KERNEL_VERSION 6
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
+static DEFINE_SPINLOCK(phy_receiver_list_lock);
+static LIST_HEAD(phy_receiver_list);
+
struct client {
u32 version;
struct fw_device *device;
@@ -937,11 +940,12 @@ static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
if (a->length > 256)
return -EINVAL;
- r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
+ r = kmalloc(struct_size(r, data, a->length), GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
- if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
+ if (copy_from_user(r->data, u64_to_uptr(a->data),
+ flex_array_size(r, data, a->length))) {
ret = -EFAULT;
goto failed;
}
@@ -1324,8 +1328,8 @@ static void iso_resource_work(struct work_struct *work)
todo = r->todo;
// Allow 1000ms grace period for other reallocations.
if (todo == ISO_RES_ALLOC &&
- time_before64(get_jiffies_64(), client->device->card->reset_jiffies + HZ)) {
- schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
+ time_is_after_jiffies64(client->device->card->reset_jiffies + secs_to_jiffies(1))) {
+ schedule_iso_resource(r, msecs_to_jiffies(333));
skip = true;
} else {
// We could be called twice within the same generation.
@@ -1669,15 +1673,16 @@ static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
- struct fw_card *card = client->device->card;
/* Access policy: Allow this ioctl only on local nodes' device files. */
if (!client->device->is_local)
return -ENOSYS;
- guard(spinlock_irq)(&card->lock);
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
+ list_move_tail(&client->phy_receiver_link, &phy_receiver_list);
- list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
client->phy_receiver_closure = a->closure;
return 0;
@@ -1687,10 +1692,17 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
struct client *client;
- guard(spinlock_irqsave)(&card->lock);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ guard(spinlock_irqsave)(&phy_receiver_list_lock);
+
+ list_for_each_entry(client, &phy_receiver_list, phy_receiver_link) {
+ struct inbound_phy_packet_event *e;
+
+ if (client->device->card != card)
+ continue;
- list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
- struct inbound_phy_packet_event *e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
+ e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
if (e == NULL)
break;
@@ -1857,7 +1869,9 @@ static int fw_device_op_release(struct inode *inode, struct file *file)
struct client_resource *resource;
unsigned long index;
- scoped_guard(spinlock_irq, &client->device->card->lock)
+ // NOTE: This can be without irq when we can guarantee that __fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irq, &phy_receiver_list_lock)
list_del(&client->phy_receiver_link);
scoped_guard(mutex, &client->device->client_list_mutex)
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index aeacd4cfd694..9b0080397154 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -542,8 +542,83 @@ static struct device_attribute fw_device_attributes[] = {
__ATTR_NULL,
};
-static int read_rom(struct fw_device *device,
- int generation, int index, u32 *data)
+#define CANON_OUI 0x000085
+
+static int detect_quirks_by_bus_information_block(const u32 *bus_information_block)
+{
+ int quirks = 0;
+
+ if ((bus_information_block[2] & 0x000000f0) == 0)
+ quirks |= FW_DEVICE_QUIRK_IRM_IS_1394_1995_ONLY;
+
+ if ((bus_information_block[3] >> 8) == CANON_OUI)
+ quirks |= FW_DEVICE_QUIRK_IRM_IGNORES_BUS_MANAGER;
+
+ return quirks;
+}
+
+struct entry_match {
+ unsigned int index;
+ u32 value;
+};
+
+static const struct entry_match motu_audio_express_matches[] = {
+ { 1, 0x030001f2 },
+ { 3, 0xd1000002 },
+ { 4, 0x8d000005 },
+ { 6, 0x120001f2 },
+ { 7, 0x13000033 },
+ { 8, 0x17104800 },
+};
+
+static const struct entry_match tascam_fw_series_matches[] = {
+ { 1, 0x0300022e },
+ { 3, 0x8d000006 },
+ { 4, 0xd1000001 },
+ { 6, 0x1200022e },
+ { 8, 0xd4000004 },
+};
+
+static int detect_quirks_by_root_directory(const u32 *root_directory, unsigned int length)
+{
+ static const struct {
+ enum fw_device_quirk quirk;
+ const struct entry_match *matches;
+ unsigned int match_count;
+ } *entry, entries[] = {
+ {
+ .quirk = FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE,
+ .matches = motu_audio_express_matches,
+ .match_count = ARRAY_SIZE(motu_audio_express_matches),
+ },
+ {
+ .quirk = FW_DEVICE_QUIRK_UNSTABLE_AT_S400,
+ .matches = tascam_fw_series_matches,
+ .match_count = ARRAY_SIZE(tascam_fw_series_matches),
+ },
+ };
+ int quirks = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i) {
+ int j;
+
+ entry = entries + i;
+ for (j = 0; j < entry->match_count; ++j) {
+ unsigned int index = entry->matches[j].index;
+ unsigned int value = entry->matches[j].value;
+
+ if ((length < index) || (root_directory[index] != value))
+ break;
+ }
+ if (j == entry->match_count)
+ quirks |= entry->quirk;
+ }
+
+ return quirks;
+}
+
+static int read_rom(struct fw_device *device, int generation, int speed, int index, u32 *data)
{
u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
int i, rcode;
@@ -554,7 +629,7 @@ static int read_rom(struct fw_device *device,
for (i = 10; i < 100; i += 10) {
rcode = fw_run_transaction(device->card,
TCODE_READ_QUADLET_REQUEST, device->node_id,
- generation, device->max_speed, offset, data, 4);
+ generation, speed, offset, data, 4);
if (rcode != RCODE_BUSY)
break;
msleep(i);
@@ -578,10 +653,11 @@ static int read_rom(struct fw_device *device,
static int read_config_rom(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
- const u32 *old_rom, *new_rom;
- u32 *rom, *stack;
+ const u32 *new_rom, *old_rom __free(kfree) = NULL;
+ u32 *stack, *rom __free(kfree) = NULL;
u32 sp, key;
- int i, end, length, ret;
+ int i, end, length, ret, speed;
+ int quirks;
rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
@@ -591,13 +667,13 @@ static int read_config_rom(struct fw_device *device, int generation)
stack = &rom[MAX_CONFIG_ROM_SIZE];
memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
- device->max_speed = SCODE_100;
+ speed = SCODE_100;
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
- ret = read_rom(device, generation, i, &rom[i]);
+ ret = read_rom(device, generation, speed, i, &rom[i]);
if (ret != RCODE_COMPLETE)
- goto out;
+ return ret;
/*
* As per IEEE1212 7.2, during initialization, devices can
* reply with a 0 for the first quadlet of the config
@@ -606,39 +682,14 @@ static int read_config_rom(struct fw_device *device, int generation)
* harddisk). In that case we just fail, and the
* retry mechanism will try again later.
*/
- if (i == 0 && rom[i] == 0) {
- ret = RCODE_BUSY;
- goto out;
- }
+ if (i == 0 && rom[i] == 0)
+ return RCODE_BUSY;
}
- device->max_speed = device->node->max_speed;
-
- /*
- * Determine the speed of
- * - devices with link speed less than PHY speed,
- * - devices with 1394b PHY (unless only connected to 1394a PHYs),
- * - all devices if there are 1394b repeaters.
- * Note, we cannot use the bus info block's link_spd as starting point
- * because some buggy firmwares set it lower than necessary and because
- * 1394-1995 nodes do not have the field.
- */
- if ((rom[2] & 0x7) < device->max_speed ||
- device->max_speed == SCODE_BETA ||
- card->beta_repeaters_present) {
- u32 dummy;
-
- /* for S1600 and S3200 */
- if (device->max_speed == SCODE_BETA)
- device->max_speed = card->link_speed;
+ quirks = detect_quirks_by_bus_information_block(rom);
- while (device->max_speed > SCODE_100) {
- if (read_rom(device, generation, 0, &dummy) ==
- RCODE_COMPLETE)
- break;
- device->max_speed--;
- }
- }
+ // Just prevent from torn writing/reading.
+ WRITE_ONCE(device->quirks, quirks);
/*
* Now parse the config rom. The config rom is a recursive
@@ -659,15 +710,13 @@ static int read_config_rom(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
- if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) {
- ret = -ENXIO;
- goto out;
- }
+ if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
+ return -ENXIO;
/* Read header quadlet for the block to get the length. */
- ret = read_rom(device, generation, i, &rom[i]);
+ ret = read_rom(device, generation, speed, i, &rom[i]);
if (ret != RCODE_COMPLETE)
- goto out;
+ return ret;
end = i + (rom[i] >> 16) + 1;
if (end > MAX_CONFIG_ROM_SIZE) {
/*
@@ -689,9 +738,9 @@ static int read_config_rom(struct fw_device *device, int generation)
* it references another block, and push it in that case.
*/
for (; i < end; i++) {
- ret = read_rom(device, generation, i, &rom[i]);
+ ret = read_rom(device, generation, speed, i, &rom[i]);
if (ret != RCODE_COMPLETE)
- goto out;
+ return ret;
if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
continue;
@@ -716,27 +765,54 @@ static int read_config_rom(struct fw_device *device, int generation)
length = i;
}
+ quirks |= detect_quirks_by_root_directory(rom + ROOT_DIR_OFFSET, length - ROOT_DIR_OFFSET);
+
+ // Just prevent from torn writing/reading.
+ WRITE_ONCE(device->quirks, quirks);
+
+ if (unlikely(quirks & FW_DEVICE_QUIRK_UNSTABLE_AT_S400))
+ speed = SCODE_200;
+ else
+ speed = device->node->max_speed;
+
+ // Determine the speed of
+ // - devices with link speed less than PHY speed,
+ // - devices with 1394b PHY (unless only connected to 1394a PHYs),
+ // - all devices if there are 1394b repeaters.
+ // Note, we cannot use the bus info block's link_spd as starting point because some buggy
+ // firmwares set it lower than necessary and because 1394-1995 nodes do not have the field.
+ if ((rom[2] & 0x7) < speed || speed == SCODE_BETA || card->beta_repeaters_present) {
+ u32 dummy;
+
+ // for S1600 and S3200.
+ if (speed == SCODE_BETA)
+ speed = card->link_speed;
+
+ while (speed > SCODE_100) {
+ if (read_rom(device, generation, speed, 0, &dummy) ==
+ RCODE_COMPLETE)
+ break;
+ --speed;
+ }
+ }
+
+ device->max_speed = speed;
+
old_rom = device->config_rom;
new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
- if (new_rom == NULL) {
- ret = -ENOMEM;
- goto out;
- }
+ if (new_rom == NULL)
+ return -ENOMEM;
scoped_guard(rwsem_write, &fw_device_rwsem) {
device->config_rom = new_rom;
device->config_rom_length = length;
}
- kfree(old_rom);
- ret = RCODE_COMPLETE;
device->max_rec = rom[2] >> 12 & 0xf;
device->cmc = rom[2] >> 30 & 1;
device->irmc = rom[2] >> 31 & 1;
- out:
- kfree(rom);
- return ret;
+ return RCODE_COMPLETE;
}
static void fw_unit_release(struct device *dev)
@@ -847,16 +923,15 @@ static void fw_schedule_device_work(struct fw_device *device,
*/
#define MAX_RETRIES 10
-#define RETRY_DELAY (3 * HZ)
-#define INITIAL_DELAY (HZ / 2)
-#define SHUTDOWN_DELAY (2 * HZ)
+#define RETRY_DELAY secs_to_jiffies(3)
+#define INITIAL_DELAY msecs_to_jiffies(500)
+#define SHUTDOWN_DELAY secs_to_jiffies(2)
static void fw_device_shutdown(struct work_struct *work)
{
struct fw_device *device = from_work(device, work, work.work);
- if (time_before64(get_jiffies_64(),
- device->card->reset_jiffies + SHUTDOWN_DELAY)
+ if (time_is_after_jiffies64(device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
fw_schedule_device_work(device, SHUTDOWN_DELAY);
return;
@@ -887,7 +962,7 @@ static void fw_device_release(struct device *dev)
* bus manager work looks at this node.
*/
scoped_guard(spinlock_irqsave, &card->lock)
- device->node->data = NULL;
+ fw_node_set_device(device->node, NULL);
fw_node_put(device->node);
kfree(device->config_rom);
@@ -1007,7 +1082,7 @@ static void fw_device_init(struct work_struct *work)
int ret;
/*
- * All failure paths here set node->data to NULL, so that we
+ * All failure paths here call fw_node_set_device(node, NULL), so that we
* don't try to do device_for_each_child() on a kfree()'d
* device.
*/
@@ -1051,9 +1126,9 @@ static void fw_device_init(struct work_struct *work)
struct fw_node *obsolete_node = reused->node;
device->node = obsolete_node;
- device->node->data = device;
+ fw_node_set_device(device->node, device);
reused->node = current_node;
- reused->node->data = reused;
+ fw_node_set_device(reused->node, reused);
reused->max_speed = device->max_speed;
reused->node_id = current_node->node_id;
@@ -1123,10 +1198,10 @@ static void fw_device_init(struct work_struct *work)
device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
- fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
+ fw_notice(card, "created device %s: GUID %08x%08x, S%d00, quirks %08x\n",
dev_name(&device->device),
device->config_rom[3], device->config_rom[4],
- 1 << device->max_speed);
+ 1 << device->max_speed, device->quirks);
device->config_rom_retries = 0;
set_broadcast_channel(device, device->generation);
@@ -1161,7 +1236,7 @@ static int reread_config_rom(struct fw_device *device, int generation,
int i, rcode;
for (i = 0; i < 6; i++) {
- rcode = read_rom(device, generation, i, &q);
+ rcode = read_rom(device, generation, device->max_speed, i, &q);
if (rcode != RCODE_COMPLETE)
return rcode;
@@ -1292,7 +1367,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* FW_NODE_UPDATED callbacks can update the node_id
* and generation for the device.
*/
- node->data = device;
+ fw_node_set_device(node, device);
/*
* Many devices are slow to respond after bus resets,
@@ -1307,7 +1382,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_INITIATED_RESET:
case FW_NODE_LINK_ON:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
goto create;
@@ -1324,7 +1399,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
break;
case FW_NODE_UPDATED:
- device = node->data;
+ device = fw_node_get_device(node);
if (device == NULL)
break;
@@ -1339,7 +1414,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
case FW_NODE_DESTROYED:
case FW_NODE_LINK_OFF:
- if (!node->data)
+ if (!fw_node_get_device(node))
break;
/*
@@ -1354,7 +1429,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* the device in shutdown state to have that code fail
* to create the device.
*/
- device = node->data;
+ device = fw_node_get_device(node);
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
device->workfn = fw_device_shutdown;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 74a6aa7d8cc9..ed3ae8cdb0cd 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -241,7 +241,7 @@ static struct fw_node *build_tree(struct fw_card *card, const u32 *sid, int self
// If PHYs report different gap counts, set an invalid count which will force a gap
// count reconfiguration and a reset.
if (phy_packet_self_id_zero_get_gap_count(self_id_sequence[0]) != gap_count)
- gap_count = 0;
+ gap_count = GAP_COUNT_MISMATCHED;
update_hop_count(node);
@@ -325,9 +325,11 @@ static void report_found_node(struct fw_card *card,
card->bm_retries = 0;
}
-/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
+__must_hold(&card->lock)
{
+ lockdep_assert_held(&card->lock);
+
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
@@ -435,20 +437,23 @@ static void update_tree(struct fw_card *card, struct fw_node *root)
}
}
-static void update_topology_map(struct fw_card *card,
- u32 *self_ids, int self_id_count)
+static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_node_id,
+ const u32 *self_ids, int self_id_count)
{
- int node_count = (card->root_node->node_id & 0x3f) + 1;
- __be32 *map = card->topology_map;
+ __be32 *map = buffer;
+ u32 next_generation = be32_to_cpu(buffer[1]) + 1;
+ int node_count = (root_node_id & 0x3f) + 1;
+
+ memset(map, 0, buffer_size);
*map++ = cpu_to_be32((self_id_count + 2) << 16);
- *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
+ *map++ = cpu_to_be32(next_generation);
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--)
*map++ = cpu_to_be32p(self_ids++);
- fw_compute_block_crc(card->topology_map);
+ fw_compute_block_crc(buffer);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
@@ -458,46 +463,45 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
trace_bus_reset_handle(card->index, generation, node_id, bm_abdicate, self_ids, self_id_count);
- guard(spinlock_irqsave)(&card->lock);
-
- /*
- * If the selfID buffer is not the immediate successor of the
- * previously processed one, we cannot reliably compare the
- * old and new topologies.
- */
- if (!is_next_generation(generation, card->generation) &&
- card->local_node != NULL) {
- fw_destroy_nodes(card);
- card->bm_retries = 0;
+ scoped_guard(spinlock, &card->lock) {
+ // If the selfID buffer is not the immediate successor of the
+ // previously processed one, we cannot reliably compare the
+ // old and new topologies.
+ if (!is_next_generation(generation, card->generation) && card->local_node != NULL) {
+ fw_destroy_nodes(card);
+ card->bm_retries = 0;
+ }
+ card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
+ card->node_id = node_id;
+ // Update node_id before generation to prevent anybody from using
+ // a stale node_id together with a current generation.
+ smp_wmb();
+ card->generation = generation;
+ card->reset_jiffies = get_jiffies_64();
+ card->bm_node_id = 0xffff;
+ card->bm_abdicate = bm_abdicate;
+
+ local_node = build_tree(card, self_ids, self_id_count, generation);
+
+ card->color++;
+
+ if (local_node == NULL) {
+ fw_err(card, "topology build failed\n");
+ // FIXME: We need to issue a bus reset in this case.
+ } else if (card->local_node == NULL) {
+ card->local_node = local_node;
+ for_each_fw_node(card, local_node, report_found_node);
+ } else {
+ update_tree(card, local_node);
+ }
}
- card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
- card->node_id = node_id;
- /*
- * Update node_id before generation to prevent anybody from using
- * a stale node_id together with a current generation.
- */
- smp_wmb();
- card->generation = generation;
- card->reset_jiffies = get_jiffies_64();
- card->bm_node_id = 0xffff;
- card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
- local_node = build_tree(card, self_ids, self_id_count, generation);
-
- update_topology_map(card, self_ids, self_id_count);
-
- card->color++;
-
- if (local_node == NULL) {
- fw_err(card, "topology build failed\n");
- /* FIXME: We need to issue a bus reset in this case. */
- } else if (card->local_node == NULL) {
- card->local_node = local_node;
- for_each_fw_node(card, local_node, report_found_node);
- } else {
- update_tree(card, local_node);
+ // Just used by transaction layer.
+ scoped_guard(spinlock, &card->topology_map.lock) {
+ update_topology_map(card->topology_map.buffer, sizeof(card->topology_map.buffer),
+ card->root_node->node_id, self_ids, self_id_count);
}
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 1d1c2d8f85ae..7fea11a5e359 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -44,26 +44,68 @@ static int try_cancel_split_timeout(struct fw_transaction *t)
return 1;
}
-static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
- u32 response_tstamp)
+// card->transactions.lock must be acquired in advance.
+static void remove_transaction_entry(struct fw_card *card, struct fw_transaction *entry)
{
- struct fw_transaction *t = NULL, *iter;
+ list_del_init(&entry->link);
+ card->transactions.tlabel_mask &= ~(1ULL << entry->tlabel);
+}
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter == transaction) {
- if (try_cancel_split_timeout(iter)) {
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- }
- break;
- }
+// Must be called without holding card->transactions.lock.
+void fw_cancel_pending_transactions(struct fw_card *card)
+{
+ struct fw_transaction *t, *tmp;
+ LIST_HEAD(pending_list);
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ list_for_each_entry_safe(t, tmp, &card->transactions.list, link) {
+ if (try_cancel_split_timeout(t))
+ list_move(&t->link, &pending_list);
+ }
+ }
+
+ list_for_each_entry_safe(t, tmp, &pending_list, link) {
+ list_del(&t->link);
+
+ if (!t->with_tstamp) {
+ t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0,
+ t->callback_data);
+ } else {
+ t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp, 0,
+ NULL, 0, t->callback_data);
}
}
+}
+
+// card->transactions.lock must be acquired in advance.
+#define find_and_pop_transaction_entry(card, condition) \
+({ \
+ struct fw_transaction *iter, *t = NULL; \
+ list_for_each_entry(iter, &card->transactions.list, link) { \
+ if (condition) { \
+ t = iter; \
+ break; \
+ } \
+ } \
+ if (t && try_cancel_split_timeout(t)) \
+ remove_transaction_entry(card, t); \
+ t; \
+})
- if (!t)
- return -ENOENT;
+static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
+ u32 response_tstamp)
+{
+ struct fw_transaction *t;
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ t = find_and_pop_transaction_entry(card, iter == transaction);
+ if (!t)
+ return -ENOENT;
+ }
if (!t->with_tstamp) {
t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
@@ -117,11 +159,10 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
struct fw_transaction *t = timer_container_of(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
- scoped_guard(spinlock_irqsave, &card->lock) {
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
if (list_empty(&t->link))
return;
- list_del(&t->link);
- card->tlabel_mask &= ~(1ULL << t->tlabel);
+ remove_transaction_entry(card, t);
}
if (!t->with_tstamp) {
@@ -135,14 +176,18 @@ static void split_transaction_timeout_callback(struct timer_list *timer)
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
- guard(spinlock_irqsave)(&card->lock);
+ unsigned long delta;
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction))
return;
t->is_split_transaction = true;
- mod_timer(&t->split_timeout_timer,
- jiffies + card->split_timeout_jiffies);
+
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ delta = card->split_timeout.jiffies;
+ mod_timer(&t->split_timeout_timer, jiffies + delta);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
@@ -162,8 +207,12 @@ static void transmit_complete_callback(struct fw_packet *packet,
break;
case ACK_PENDING:
{
- t->split_timeout_cycle =
- compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ t->split_timeout_cycle =
+ compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
+ }
start_split_transaction_timeout(t, card);
break;
}
@@ -259,18 +308,21 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
}
static int allocate_tlabel(struct fw_card *card)
+__must_hold(&card->transactions.lock)
{
int tlabel;
- tlabel = card->current_tlabel;
- while (card->tlabel_mask & (1ULL << tlabel)) {
+ lockdep_assert_held(&card->transactions.lock);
+
+ tlabel = card->transactions.current_tlabel;
+ while (card->transactions.tlabel_mask & (1ULL << tlabel)) {
tlabel = (tlabel + 1) & 0x3f;
- if (tlabel == card->current_tlabel)
+ if (tlabel == card->transactions.current_tlabel)
return -EBUSY;
}
- card->current_tlabel = (tlabel + 1) & 0x3f;
- card->tlabel_mask |= 1ULL << tlabel;
+ card->transactions.current_tlabel = (tlabel + 1) & 0x3f;
+ card->transactions.tlabel_mask |= 1ULL << tlabel;
return tlabel;
}
@@ -331,7 +383,6 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
void *payload, size_t length, union fw_transaction_callback callback,
bool with_tstamp, void *callback_data)
{
- unsigned long flags;
int tlabel;
/*
@@ -339,11 +390,11 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
* the list while holding the card spinlock.
*/
- spin_lock_irqsave(&card->lock, flags);
-
- tlabel = allocate_tlabel(card);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ tlabel = allocate_tlabel(card);
if (tlabel < 0) {
- spin_unlock_irqrestore(&card->lock, flags);
if (!with_tstamp) {
callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
} else {
@@ -368,15 +419,22 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode
t->callback = callback;
t->with_tstamp = with_tstamp;
t->callback_data = callback_data;
-
- fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
- speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
- list_add_tail(&t->link, &card->transaction_list);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->lock) {
+ // The node_id field of fw_card can be updated when handling SelfIDComplete.
+ fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id,
+ generation, speed, offset, payload, length);
+ }
- spin_unlock_irqrestore(&card->lock, flags);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock)
+ list_add_tail(&t->link, &card->transactions.list);
+ // Safe with no lock, since the index field of fw_card is immutable once assigned.
trace_async_request_outbound_initiate((uintptr_t)t, card->index, generation, speed,
t->packet.header, payload,
tcode_is_read_request(tcode) ? 0 : length / 4);
@@ -458,7 +516,7 @@ static struct fw_packet phy_config_packet = {
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
- long timeout = DIV_ROUND_UP(HZ, 10);
+ long timeout = msecs_to_jiffies(100);
u32 data = 0;
phy_packet_set_packet_identifier(&data, PHY_PACKET_PACKET_IDENTIFIER_PHY_CONFIG);
@@ -779,11 +837,14 @@ EXPORT_SYMBOL(fw_fill_response);
static u32 compute_split_timeout_timestamp(struct fw_card *card,
u32 request_timestamp)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
u32 timestamp;
- cycles = card->split_timeout_cycles;
+ lockdep_assert_held(&card->split_timeout.lock);
+
+ cycles = card->split_timeout.cycles;
cycles += request_timestamp & 0x1fff;
timestamp = request_timestamp & ~0x1fff;
@@ -834,9 +895,12 @@ static struct fw_request *allocate_request(struct fw_card *card,
return NULL;
kref_init(&request->kref);
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock)
+ request->response.timestamp = compute_split_timeout_timestamp(card, p->timestamp);
+
request->response.speed = p->speed;
- request->response.timestamp =
- compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
@@ -1072,7 +1136,7 @@ EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
- struct fw_transaction *t = NULL, *iter;
+ struct fw_transaction *t = NULL;
u32 *data;
size_t data_length;
int tcode, tlabel, source, rcode;
@@ -1111,17 +1175,11 @@ void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
break;
}
- scoped_guard(spinlock_irqsave, &card->lock) {
- list_for_each_entry(iter, &card->transaction_list, link) {
- if (iter->node_id == source && iter->tlabel == tlabel) {
- if (try_cancel_split_timeout(iter)) {
- list_del_init(&iter->link);
- card->tlabel_mask &= ~(1ULL << iter->tlabel);
- t = iter;
- }
- break;
- }
- }
+ // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for
+ // local destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->transactions.lock) {
+ t = find_and_pop_transaction_entry(card,
+ iter->node_id == source && iter->tlabel == tlabel);
}
trace_async_response_inbound((uintptr_t)t, card->index, p->generation, p->speed, p->ack,
@@ -1196,7 +1254,11 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
}
start = (offset - topology_map_region.start) / 4;
- memcpy(payload, &card->topology_map[start], length);
+
+ // NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local
+ // destination never runs in any type of IRQ context.
+ scoped_guard(spinlock_irqsave, &card->topology_map.lock)
+ memcpy(payload, &card->topology_map.buffer[start], length);
fw_send_response(card, request, RCODE_COMPLETE);
}
@@ -1211,16 +1273,17 @@ static const struct fw_address_region registers_region =
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void update_split_timeout(struct fw_card *card)
+__must_hold(&card->split_timeout.lock)
{
unsigned int cycles;
- cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
+ cycles = card->split_timeout.hi * 8000 + (card->split_timeout.lo >> 19);
/* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
cycles = clamp(cycles, 800u, 3u * 8000u);
- card->split_timeout_cycles = cycles;
- card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
+ card->split_timeout.cycles = cycles;
+ card->split_timeout.jiffies = isoc_cycles_to_jiffies(cycles);
}
static void handle_registers(struct fw_card *card, struct fw_request *request,
@@ -1270,12 +1333,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_HI:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_hi);
+ *data = cpu_to_be32(card->split_timeout.hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_hi = be32_to_cpu(*data) & 7;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.hi = be32_to_cpu(*data) & 7;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1283,12 +1349,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_SPLIT_TIMEOUT_LO:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
- *data = cpu_to_be32(card->split_timeout_lo);
+ *data = cpu_to_be32(card->split_timeout.lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
- guard(spinlock_irqsave)(&card->lock);
-
- card->split_timeout_lo = be32_to_cpu(*data) & 0xfff80000;
- update_split_timeout(card);
+ // NOTE: This can be without irqsave when we can guarantee that
+ // __fw_send_request() for local destination never runs in any type of IRQ
+ // context.
+ scoped_guard(spinlock_irqsave, &card->split_timeout.lock) {
+ card->split_timeout.lo = be32_to_cpu(*data) & 0xfff80000;
+ update_split_timeout(card);
+ }
} else {
rcode = RCODE_TYPE_ERROR;
}
@@ -1399,7 +1468,8 @@ static int __init fw_core_init(void)
{
int ret;
- fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
+ fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 0);
if (!fw_workqueue)
return -ENOMEM;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 9b298af1cac0..41fb39d9a4e6 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -27,6 +27,11 @@ struct fw_packet;
/* -card */
+// This is the arbitrary value we use to indicate a mismatched gap count.
+#define GAP_COUNT_MISMATCHED 0
+
+#define isoc_cycles_to_jiffies(cycles) usecs_to_jiffies((u32)div_u64((u64)cycles * USEC_PER_SEC, 8000))
+
extern __printf(2, 3)
void fw_err(const struct fw_card *card, const char *fmt, ...);
extern __printf(2, 3)
@@ -60,6 +65,9 @@ struct fw_card_driver {
int (*enable)(struct fw_card *card,
const __be32 *config_rom, size_t length);
+ // After returning the call, any function is no longer triggered to handle hardware event.
+ void (*disable)(struct fw_card *card);
+
int (*read_phy_reg)(struct fw_card *card, int address);
int (*update_phy_reg)(struct fw_card *card, int address,
int clear_bits, int set_bits);
@@ -167,6 +175,9 @@ static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_fun
/* -topology */
+// The initial value of BUS_MANAGER_ID register, to express nothing registered.
+#define BUS_MANAGER_ID_NOT_REGISTERED 0x3f
+
enum {
FW_NODE_CREATED,
FW_NODE_UPDATED,
@@ -194,8 +205,8 @@ struct fw_node {
/* For serializing node topology into a list. */
struct list_head link;
- /* Upper layer specific data. */
- void *data;
+ // The device when already associated, else NULL.
+ struct fw_device *device;
struct fw_node *ports[] __counted_by(port_count);
};
@@ -219,6 +230,16 @@ static inline void fw_node_put(struct fw_node *node)
kref_put(&node->kref, release_node);
}
+static inline struct fw_device *fw_node_get_device(struct fw_node *node)
+{
+ return node->device;
+}
+
+static inline void fw_node_set_device(struct fw_node *node, struct fw_device *device)
+{
+ node->device = device;
+}
+
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
void fw_destroy_nodes(struct fw_card *card);
@@ -266,6 +287,8 @@ void fw_fill_response(struct fw_packet *response, u32 *request_header,
void fw_request_get(struct fw_request *request);
void fw_request_put(struct fw_request *request);
+void fw_cancel_pending_transactions(struct fw_card *card);
+
// Convert the value of IEEE 1394 CYCLE_TIME register to the format of timeStamp field in
// descriptors of 1394 OHCI.
static inline u32 cycle_time_to_ohci_tstamp(u32 tstamp)
diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c
index 48b879e9e831..121f0c2f6401 100644
--- a/drivers/firewire/init_ohci1394_dma.c
+++ b/drivers/firewire/init_ohci1394_dma.c
@@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci)
/**
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
+ * @ohci: Pointer to the OHCI-1394 controller structure
*
* OHCI1394 initialization itself and any device going on- or offline
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
@@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
/**
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
@@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
+ * @ohci: Pointer to the OHCI-1394 controller structure
+ *
* This initializes the given controller and enables physical DMA engine in it.
*/
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
@@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
/**
* init_ohci1394_controller - Map the registers of the controller and init DMA
+ * @num: PCI bus number
+ * @slot: PCI device number
+ * @func: PCI function number
+ *
* This maps the registers of the specified controller and initializes it
*/
static inline void __init init_ohci1394_controller(int num, int slot, int func)
@@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
/**
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
+ * @opt: Kernel command line parameter string
*/
static int __init setup_ohci1394_dma(char *opt)
{
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 5d8301b0f3aa..e3e78dc42530 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -228,13 +228,10 @@ struct fw_ohci {
__le32 *self_id;
dma_addr_t self_id_bus;
- struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
-static struct workqueue_struct *selfid_workqueue;
-
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
{
return container_of(card, struct fw_ohci, card);
@@ -393,225 +390,10 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
")");
-#define OHCI_PARAM_DEBUG_AT_AR 1
-#define OHCI_PARAM_DEBUG_SELFIDS 2
-#define OHCI_PARAM_DEBUG_IRQS 4
-
-static int param_debug;
-module_param_named(debug, param_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Verbose logging, deprecated in v6.11 kernel or later. (default = 0"
- ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
- ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
- ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
- ", or a combination, or all = -1)");
-
static bool param_remote_dma;
module_param_named(remote_dma, param_remote_dma, bool, 0444);
MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
-static void log_irqs(struct fw_ohci *ohci, u32 evt)
-{
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
- return;
-
- ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
- evt & OHCI1394_selfIDComplete ? " selfID" : "",
- evt & OHCI1394_RQPkt ? " AR_req" : "",
- evt & OHCI1394_RSPkt ? " AR_resp" : "",
- evt & OHCI1394_reqTxComplete ? " AT_req" : "",
- evt & OHCI1394_respTxComplete ? " AT_resp" : "",
- evt & OHCI1394_isochRx ? " IR" : "",
- evt & OHCI1394_isochTx ? " IT" : "",
- evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
- evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
- evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
- evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
- evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
- evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
- evt & OHCI1394_busReset ? " busReset" : "",
- evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
- OHCI1394_RSPkt | OHCI1394_reqTxComplete |
- OHCI1394_respTxComplete | OHCI1394_isochRx |
- OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent |
- OHCI1394_regAccessFail | OHCI1394_busReset)
- ? " ?" : "");
-}
-
-static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
-{
- static const char *const speed[] = {
- [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
- };
- static const char *const power[] = {
- [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
- [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
- };
- static const char port[] = {
- [PHY_PACKET_SELF_ID_PORT_STATUS_NONE] = '.',
- [PHY_PACKET_SELF_ID_PORT_STATUS_NCONN] = '-',
- [PHY_PACKET_SELF_ID_PORT_STATUS_PARENT] = 'p',
- [PHY_PACKET_SELF_ID_PORT_STATUS_CHILD] = 'c',
- };
- struct self_id_sequence_enumerator enumerator = {
- .cursor = ohci->self_id_buffer,
- .quadlet_count = self_id_count,
- };
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
- return;
-
- ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
- self_id_count, generation, ohci->node_id);
-
- while (enumerator.quadlet_count > 0) {
- unsigned int quadlet_count;
- unsigned int port_index;
- const u32 *s;
- int i;
-
- s = self_id_sequence_enumerator_next(&enumerator, &quadlet_count);
- if (IS_ERR(s))
- break;
-
- ohci_notice(ohci,
- "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
- *s,
- phy_packet_self_id_get_phy_id(*s),
- port[self_id_sequence_get_port_status(s, quadlet_count, 0)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, 2)],
- speed[*s >> 14 & 3], *s >> 16 & 63,
- power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
- *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
-
- port_index = 3;
- for (i = 1; i < quadlet_count; ++i) {
- ohci_notice(ohci,
- "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
- s[i],
- phy_packet_self_id_get_phy_id(s[i]),
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 1)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 2)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 3)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 4)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 5)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 6)],
- port[self_id_sequence_get_port_status(s, quadlet_count, port_index + 7)]
- );
-
- port_index += 8;
- }
- }
-}
-
-static const char *evts[] = {
- [0x00] = "evt_no_status", [0x01] = "-reserved-",
- [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
- [0x04] = "evt_underrun", [0x05] = "evt_overrun",
- [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
- [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
- [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
- [0x0c] = "-reserved-", [0x0d] = "-reserved-",
- [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
- [0x10] = "-reserved-", [0x11] = "ack_complete",
- [0x12] = "ack_pending ", [0x13] = "-reserved-",
- [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
- [0x16] = "ack_busy_B", [0x17] = "-reserved-",
- [0x18] = "-reserved-", [0x19] = "-reserved-",
- [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
- [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
- [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
- [0x20] = "pending/cancelled",
-};
-
-static void log_ar_at_event(struct fw_ohci *ohci,
- char dir, int speed, u32 *header, int evt)
-{
- static const char *const tcodes[] = {
- [TCODE_WRITE_QUADLET_REQUEST] = "QW req",
- [TCODE_WRITE_BLOCK_REQUEST] = "BW req",
- [TCODE_WRITE_RESPONSE] = "W resp",
- [0x3] = "-reserved-",
- [TCODE_READ_QUADLET_REQUEST] = "QR req",
- [TCODE_READ_BLOCK_REQUEST] = "BR req",
- [TCODE_READ_QUADLET_RESPONSE] = "QR resp",
- [TCODE_READ_BLOCK_RESPONSE] = "BR resp",
- [TCODE_CYCLE_START] = "cycle start",
- [TCODE_LOCK_REQUEST] = "Lk req",
- [TCODE_STREAM_DATA] = "async stream packet",
- [TCODE_LOCK_RESPONSE] = "Lk resp",
- [0xc] = "-reserved-",
- [0xd] = "-reserved-",
- [TCODE_LINK_INTERNAL] = "link internal",
- [0xf] = "-reserved-",
- };
- int tcode = async_header_get_tcode(header);
- char specific[12];
-
- if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
- return;
-
- if (unlikely(evt >= ARRAY_SIZE(evts)))
- evt = 0x1f;
-
- if (evt == OHCI1394_evt_bus_reset) {
- ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
- dir, (header[2] >> 16) & 0xff);
- return;
- }
-
- switch (tcode) {
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_READ_QUADLET_RESPONSE:
- case TCODE_CYCLE_START:
- snprintf(specific, sizeof(specific), " = %08x",
- be32_to_cpu((__force __be32)header[3]));
- break;
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_READ_BLOCK_RESPONSE:
- case TCODE_LOCK_REQUEST:
- case TCODE_LOCK_RESPONSE:
- snprintf(specific, sizeof(specific), " %x,%x",
- async_header_get_data_length(header),
- async_header_get_extended_tcode(header));
- break;
- default:
- specific[0] = '\0';
- }
-
- switch (tcode) {
- case TCODE_STREAM_DATA:
- ohci_notice(ohci, "A%c %s, %s\n",
- dir, evts[evt], tcodes[tcode]);
- break;
- case TCODE_LINK_INTERNAL:
- ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
- dir, evts[evt], header[1], header[2]);
- break;
- case TCODE_WRITE_QUADLET_REQUEST:
- case TCODE_WRITE_BLOCK_REQUEST:
- case TCODE_READ_QUADLET_REQUEST:
- case TCODE_READ_BLOCK_REQUEST:
- case TCODE_LOCK_REQUEST:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
- break;
- default:
- ohci_notice(ohci,
- "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
- dir, speed, async_header_get_tlabel(header),
- async_header_get_source(header), async_header_get_destination(header),
- evts[evt], tcodes[tcode], specific);
- }
-}
-
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
@@ -957,8 +739,6 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
p.timestamp = status & 0xffff;
p.generation = ohci->request_generation;
- log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
-
/*
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
@@ -977,7 +757,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
- * at a slightly incorrect time (in bus_reset_work).
+ * at a slightly incorrect time (in handle_selfid_complete_event).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
@@ -1539,6 +1319,14 @@ static void at_context_flush(struct at_context *ctx)
enable_work(&ctx->work);
}
+static int find_fw_device(struct device *dev, const void *data)
+{
+ struct fw_device *device = fw_device(dev);
+ const u32 *params = data;
+
+ return (device->generation == params[0]) && (device->node_id == params[1]);
+}
+
static int handle_at_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -1566,8 +1354,6 @@ static int handle_at_packet(struct context *context,
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
-
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
@@ -1612,6 +1398,27 @@ static int handle_at_packet(struct context *context,
fallthrough;
default:
+ if (unlikely(evt == 0x10)) {
+ u32 params[2] = {
+ packet->generation,
+ async_header_get_destination(packet->header),
+ };
+ struct device *dev;
+
+ fw_card_get(&ohci->card);
+ dev = device_find_child(ohci->card.device, (const void *)params, find_fw_device);
+ fw_card_put(&ohci->card);
+ if (dev) {
+ struct fw_device *device = fw_device(dev);
+ int quirks = READ_ONCE(device->quirks);
+
+ put_device(dev);
+ if (quirks & FW_DEVICE_QUIRK_ACK_PACKET_WITH_INVALID_PENDING_CODE) {
+ packet->ack = ACK_PENDING;
+ break;
+ }
+ }
+ }
packet->ack = RCODE_SEND_ERROR;
break;
}
@@ -1772,6 +1579,25 @@ static void at_context_transmit(struct at_context *ctx, struct fw_packet *packet
static void detect_dead_context(struct fw_ohci *ohci,
const char *name, unsigned int regs)
{
+ static const char *const evts[] = {
+ [0x00] = "evt_no_status", [0x01] = "-reserved-",
+ [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
+ [0x04] = "evt_underrun", [0x05] = "evt_overrun",
+ [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
+ [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
+ [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
+ [0x0c] = "-reserved-", [0x0d] = "-reserved-",
+ [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
+ [0x10] = "-reserved-", [0x11] = "ack_complete",
+ [0x12] = "ack_pending ", [0x13] = "-reserved-",
+ [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
+ [0x16] = "ack_busy_B", [0x17] = "-reserved-",
+ [0x18] = "-reserved-", [0x19] = "-reserved-",
+ [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
+ [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
+ [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
+ [0x20] = "pending/cancelled",
+ };
u32 ctl;
ctl = reg_read(ohci, CONTROL_SET(regs));
@@ -2030,9 +1856,9 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
return self_id_count;
}
-static void bus_reset_work(struct work_struct *work)
+static irqreturn_t handle_selfid_complete_event(int irq, void *data)
{
- struct fw_ohci *ohci = from_work(ohci, work, bus_reset_work);
+ struct fw_ohci *ohci = data;
int self_id_count, generation, new_generation, i, j;
u32 reg, quadlet;
void *free_rom = NULL;
@@ -2043,11 +1869,11 @@ static void bus_reset_work(struct work_struct *work)
if (!(reg & OHCI1394_NodeID_idValid)) {
ohci_notice(ohci,
"node ID not valid, new bus reset in progress\n");
- return;
+ goto end;
}
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
ohci_notice(ohci, "malconfigured bus\n");
- return;
+ goto end;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
@@ -2061,8 +1887,11 @@ static void bus_reset_work(struct work_struct *work)
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (ohci1394_self_id_count_is_error(reg)) {
ohci_notice(ohci, "self ID receive error\n");
- return;
+ goto end;
}
+
+ trace_self_id_complete(ohci->card.index, reg, ohci->self_id, has_be_header_quirk(ohci));
+
/*
* The count in the SelfIDCount register is the number of
* bytes in the self ID receive buffer. Since we also receive
@@ -2073,7 +1902,7 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
- return;
+ goto end;
}
quadlet = cond_le32_to_cpu(ohci->self_id[0], has_be_header_quirk(ohci));
@@ -2100,7 +1929,7 @@ static void bus_reset_work(struct work_struct *work)
ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
j, self_id_count, id, id2);
- return;
+ goto end;
}
ohci->self_id_buffer[j] = id;
}
@@ -2110,13 +1939,13 @@ static void bus_reset_work(struct work_struct *work)
if (self_id_count < 0) {
ohci_notice(ohci,
"could not construct local self ID\n");
- return;
+ goto end;
}
}
if (self_id_count == 0) {
ohci_notice(ohci, "no self IDs\n");
- return;
+ goto end;
}
rmb();
@@ -2138,7 +1967,7 @@ static void bus_reset_work(struct work_struct *work)
new_generation = ohci1394_self_id_count_get_generation(reg);
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
- return;
+ goto end;
}
// FIXME: Document how the locking works.
@@ -2195,12 +2024,12 @@ static void bus_reset_work(struct work_struct *work)
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
- log_selfids(ohci, generation, self_id_count);
-
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
self_id_count, ohci->self_id_buffer,
ohci->csr_state_setclear_abdicate);
ohci->csr_state_setclear_abdicate = false;
+end:
+ return IRQ_HANDLED;
}
static irqreturn_t irq_handler(int irq, void *data)
@@ -2214,11 +2043,6 @@ static irqreturn_t irq_handler(int irq, void *data)
if (!event || !~event)
return IRQ_NONE;
- if (unlikely(param_debug > 0)) {
- dev_notice_ratelimited(ohci->card.device,
- "The debug parameter is superseded by tracepoints events, and deprecated.");
- }
-
/*
* busReset and postedWriteErr events must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
@@ -2226,21 +2050,11 @@ static irqreturn_t irq_handler(int irq, void *data)
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
trace_irqs(ohci->card.index, event);
- log_irqs(ohci, event);
- // The flag is masked again at bus_reset_work() scheduled by selfID event.
+
+ // The flag is masked again at handle_selfid_complete_event() scheduled by selfID event.
if (event & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
- if (event & OHCI1394_selfIDComplete) {
- if (trace_self_id_complete_enabled()) {
- u32 reg = reg_read(ohci, OHCI1394_SelfIDCount);
-
- trace_self_id_complete(ohci->card.index, reg, ohci->self_id,
- has_be_header_quirk(ohci));
- }
- queue_work(selfid_workqueue, &ohci->bus_reset_work);
- }
-
if (event & OHCI1394_RQPkt)
queue_work(ohci->card.async_wq, &ohci->ar_request_ctx.work);
@@ -2311,7 +2125,10 @@ static irqreturn_t irq_handler(int irq, void *data)
} else
flush_writes(ohci);
- return IRQ_HANDLED;
+ if (event & OHCI1394_selfIDComplete)
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
}
static int software_reset(struct fw_ohci *ohci)
@@ -2591,6 +2408,41 @@ static int ohci_enable(struct fw_card *card,
return 0;
}
+static void ohci_disable(struct fw_card *card)
+{
+ struct pci_dev *pdev = to_pci_dev(card->device);
+ struct fw_ohci *ohci = pci_get_drvdata(pdev);
+ int i, irq = pci_irq_vector(pdev, 0);
+
+ // If the removal is happening from the suspend state, LPS won't be enabled and host
+ // registers (eg., IntMaskClear) won't be accessible.
+ if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS))
+ return;
+
+ reg_write(ohci, OHCI1394_IntMaskClear, ~0);
+ flush_writes(ohci);
+
+ if (irq >= 0)
+ synchronize_irq(irq);
+
+ flush_work(&ohci->ar_request_ctx.work);
+ flush_work(&ohci->ar_response_ctx.work);
+ flush_work(&ohci->at_request_ctx.work);
+ flush_work(&ohci->at_response_ctx.work);
+
+ for (i = 0; i < ohci->n_ir; ++i) {
+ if (!(ohci->ir_context_mask & BIT(i)))
+ flush_work(&ohci->ir_context_list[i].base.work);
+ }
+ for (i = 0; i < ohci->n_it; ++i) {
+ if (!(ohci->it_context_mask & BIT(i)))
+ flush_work(&ohci->it_context_list[i].base.work);
+ }
+
+ at_context_flush(&ohci->at_request_ctx);
+ at_context_flush(&ohci->at_response_ctx);
+}
+
static int ohci_set_config_rom(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
@@ -2624,7 +2476,7 @@ static int ohci_set_config_rom(struct fw_card *card,
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
- * ohci->next_config_rom to NULL (see bus_reset_work).
+ * ohci->next_config_rom to NULL (see handle_selfid_complete_event).
*/
next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
@@ -2705,7 +2557,6 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
- log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
@@ -3626,6 +3477,7 @@ static int ohci_flush_iso_completions(struct fw_iso_context *base)
static const struct fw_card_driver ohci_driver = {
.enable = ohci_enable,
+ .disable = ohci_disable,
.read_phy_reg = ohci_read_phy_reg,
.update_phy_reg = ohci_update_phy_reg,
.set_config_rom = ohci_set_config_rom,
@@ -3695,7 +3547,6 @@ static int pci_probe(struct pci_dev *dev,
u32 bus_options, max_receive, link_speed, version;
u64 guid;
int i, flags, irq, err;
- size_t size;
if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
@@ -3722,8 +3573,6 @@ static int pci_probe(struct pci_dev *dev,
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
- INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
-
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
ohci_err(ohci, "invalid MMIO resource\n");
@@ -3791,8 +3640,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
- size = sizeof(struct iso_context) * ohci->n_ir;
- ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->ir_context_list = devm_kcalloc(&dev->dev, ohci->n_ir, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->ir_context_list)
return -ENOMEM;
@@ -3806,8 +3654,7 @@ static int pci_probe(struct pci_dev *dev,
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
- size = sizeof(struct iso_context) * ohci->n_it;
- ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
+ ohci->it_context_list = devm_kcalloc(&dev->dev, ohci->n_it, sizeof(struct iso_context), GFP_KERNEL);
if (!ohci->it_context_list)
return -ENOMEM;
@@ -3832,7 +3679,9 @@ static int pci_probe(struct pci_dev *dev,
goto fail_msi;
}
- err = request_threaded_irq(irq, irq_handler, NULL,
+ // IRQF_ONESHOT is not applied so that any events are handled in the hardIRQ handler during
+ // invoking the threaded IRQ handler for SelfIDComplete event.
+ err = request_threaded_irq(irq, irq_handler, handle_selfid_complete_event,
pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
ohci);
if (err < 0) {
@@ -3868,22 +3717,8 @@ static void pci_remove(struct pci_dev *dev)
struct fw_ohci *ohci = pci_get_drvdata(dev);
int irq;
- /*
- * If the removal is happening from the suspend state, LPS won't be
- * enabled and host registers (eg., IntMaskClear) won't be accessible.
- */
- if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
- reg_write(ohci, OHCI1394_IntMaskClear, ~0);
- flush_writes(ohci);
- }
- cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
- /*
- * FIXME: Fail all pending packets here, now that the upper
- * layers can't queue any more.
- */
-
software_reset(ohci);
irq = pci_irq_vector(dev, 0);
@@ -3949,17 +3784,12 @@ static struct pci_driver fw_ohci_pci_driver = {
static int __init fw_ohci_init(void)
{
- selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
- if (!selfid_workqueue)
- return -ENOMEM;
-
return pci_register_driver(&fw_ohci_pci_driver);
}
static void __exit fw_ohci_cleanup(void)
{
pci_unregister_driver(&fw_ohci_pci_driver);
- destroy_workqueue(selfid_workqueue);
}
module_init(fw_ohci_init);
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 65bf1685350a..c72ee4756585 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id)
return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
}
+static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src)
+{
+ struct ffa_mem_region_attributes *ep_mem_access;
+
+ if (FFA_EMAD_HAS_IMPDEF_FIELD(version))
+ memcpy(dst, src, sizeof(ep_mem_access->impdef_val));
+}
+
+static void
+ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region)
+{
+ if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) {
+ mem_region->ep_mem_size = 0;
+ } else {
+ mem_region->ep_mem_size = ffa_emad_size_get(version);
+ mem_region->ep_mem_offset = sizeof(*mem_region);
+ memset(mem_region->reserved, 0, 12);
+ }
+}
+
static int
ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
struct ffa_mem_ops_args *args)
@@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
mem_region->flags = args->flags;
mem_region->sender_id = drv_info->vm_id;
mem_region->attributes = ffa_memory_attributes_get(func_id);
- ep_mem_access = buffer +
- ffa_mem_desc_offset(buffer, 0, drv_info->version);
composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
drv_info->version);
- for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
+ for (idx = 0; idx < args->nattrs; idx++) {
+ ep_mem_access = buffer +
+ ffa_mem_desc_offset(buffer, idx, drv_info->version);
ep_mem_access->receiver = args->attrs[idx].receiver;
ep_mem_access->attrs = args->attrs[idx].attrs;
ep_mem_access->composite_off = composite_offset;
ep_mem_access->flag = 0;
ep_mem_access->reserved = 0;
+ ffa_emad_impdef_value_init(drv_info->version,
+ ep_mem_access->impdef_val,
+ args->attrs[idx].impdef_val);
}
mem_region->handle = 0;
mem_region->ep_count = args->nattrs;
- if (drv_info->version <= FFA_VERSION_1_0) {
- mem_region->ep_mem_size = 0;
- } else {
- mem_region->ep_mem_size = sizeof(*ep_mem_access);
- mem_region->ep_mem_offset = sizeof(*mem_region);
- memset(mem_region->reserved, 0, 12);
- }
+ ffa_mem_region_additional_setup(drv_info->version, mem_region);
composite = buffer + composite_offset;
composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 24e59ddf85e7..c7698cfaa4e8 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -401,8 +401,8 @@ static void scmi_device_release(struct device *dev)
static void __scmi_device_destroy(struct scmi_device *scmi_dev)
{
- pr_debug("(%s) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(scmi_dev->dev.parent->of_node),
+ pr_debug("(%pOF) Destroying SCMI device '%s' for protocol 0x%x (%s)\n",
+ scmi_dev->dev.parent->of_node,
dev_name(&scmi_dev->dev), scmi_dev->protocol_id,
scmi_dev->name);
@@ -474,9 +474,8 @@ __scmi_device_create(struct device_node *np, struct device *parent,
if (retval)
goto put_dev;
- pr_debug("(%s) Created SCMI device '%s' for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node),
- dev_name(&scmi_dev->dev), protocol, name);
+ pr_debug("(%pOF) Created SCMI device '%s' for protocol 0x%x (%s)\n",
+ parent->of_node, dev_name(&scmi_dev->dev), protocol, name);
return scmi_dev;
put_dev:
@@ -493,8 +492,8 @@ _scmi_device_create(struct device_node *np, struct device *parent,
sdev = __scmi_device_create(np, parent, protocol, name);
if (!sdev)
- pr_err("(%s) Failed to create device for protocol 0x%x (%s)\n",
- of_node_full_name(parent->of_node), protocol, name);
+ pr_err("(%pOF) Failed to create device for protocol 0x%x (%s)\n",
+ parent->of_node, protocol, name);
return sdev;
}
diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h
index 07b9e629276d..7c35c95fddba 100644
--- a/drivers/firmware/arm_scmi/common.h
+++ b/drivers/firmware/arm_scmi/common.h
@@ -309,16 +309,36 @@ enum debug_counters {
SCMI_DEBUG_COUNTERS_LAST
};
-static inline void scmi_inc_count(atomic_t *arr, int stat)
+/**
+ * struct scmi_debug_info - Debug common info
+ * @top_dentry: A reference to the top debugfs dentry
+ * @name: Name of this SCMI instance
+ * @type: Type of this SCMI instance
+ * @is_atomic: Flag to state if the transport of this instance is atomic
+ * @counters: An array of atomic_c's used for tracking statistics (if enabled)
+ */
+struct scmi_debug_info {
+ struct dentry *top_dentry;
+ const char *name;
+ const char *type;
+ bool is_atomic;
+ atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
+};
+
+static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_inc(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_inc(&dbg->counters[stat]);
+ }
}
-static inline void scmi_dec_count(atomic_t *arr, int stat)
+static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat)
{
- if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS))
- atomic_dec(&arr[stat]);
+ if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
+ if (dbg)
+ atomic_dec(&dbg->counters[stat]);
+ }
}
enum scmi_bad_msg {
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index bd56a877fdfc..5caa9191a8d1 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -116,22 +116,6 @@ struct scmi_protocol_instance {
#define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph)
/**
- * struct scmi_debug_info - Debug common info
- * @top_dentry: A reference to the top debugfs dentry
- * @name: Name of this SCMI instance
- * @type: Type of this SCMI instance
- * @is_atomic: Flag to state if the transport of this instance is atomic
- * @counters: An array of atomic_c's used for tracking statistics (if enabled)
- */
-struct scmi_debug_info {
- struct dentry *top_dentry;
- const char *name;
- const char *type;
- bool is_atomic;
- atomic_t counters[SCMI_DEBUG_COUNTERS_LAST];
-};
-
-/**
* struct scmi_info - Structure representing a SCMI instance
*
* @id: A sequence number starting from zero identifying this instance
@@ -610,7 +594,7 @@ scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer,
/* Set in-flight */
set_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq);
- scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT);
+ scmi_inc_count(info->dbg, XFERS_INFLIGHT);
xfer->pending = true;
}
@@ -819,8 +803,9 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
hash_del(&xfer->node);
xfer->pending = false;
- scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT);
+ scmi_dec_count(info->dbg, XFERS_INFLIGHT);
}
+ xfer->flags = 0;
hlist_add_head(&xfer->node, &minfo->free_xfers);
}
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
@@ -839,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
{
struct scmi_info *info = handle_to_scmi_info(handle);
- xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW;
- xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET;
return __scmi_xfer_put(&info->tx_minfo, xfer);
}
@@ -1034,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
spin_unlock_irqrestore(&minfo->xfer_lock, flags);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED);
- scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED);
+ scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED);
return xfer;
}
@@ -1062,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr)
msg_type, xfer_id, msg_hdr, xfer->state);
scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID);
- scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID);
+ scmi_inc_count(info->dbg, ERR_MSG_INVALID);
/* On error the refcount incremented above has to be dropped */
__scmi_xfer_put(minfo, xfer);
@@ -1107,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
PTR_ERR(xfer));
scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM);
- scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM);
+ scmi_inc_count(info->dbg, ERR_MSG_NOMEM);
scmi_clear_channel(info, cinfo);
return;
@@ -1123,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo,
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "NOTI", xfer->hdr.seq,
xfer->hdr.status, xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, NOTIFICATION_OK);
+ scmi_inc_count(info->dbg, NOTIFICATION_OK);
scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
@@ -1183,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo,
if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) {
scmi_clear_channel(info, cinfo);
complete(xfer->async_done);
- scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK);
+ scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK);
} else {
complete(&xfer->done);
- scmi_inc_count(info->dbg->counters, RESPONSE_OK);
+ scmi_inc_count(info->dbg, RESPONSE_OK);
}
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
@@ -1296,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"timed out in resp(caller: %pS) - polling\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT);
}
}
@@ -1321,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
"RESP" : "resp",
xfer->hdr.seq, xfer->hdr.status,
xfer->rx.buf, xfer->rx.len);
- scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK);
+ scmi_inc_count(info->dbg, RESPONSE_POLLED_OK);
if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
scmi_raw_message_report(info->raw, xfer,
@@ -1336,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc,
dev_err(dev, "timed out in resp(caller: %pS)\n",
(void *)_RET_IP_);
ret = -ETIMEDOUT;
- scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT);
+ scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT);
}
}
@@ -1420,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
!is_transport_polling_capable(info->desc)) {
dev_warn_once(dev,
"Polling mode is not supported by transport.\n");
- scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED);
+ scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED);
return -EINVAL;
}
cinfo = idr_find(&info->tx_idr, pi->proto->id);
if (unlikely(!cinfo)) {
- scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND);
+ scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND);
return -EINVAL;
}
/* True ONLY if also supported by transport. */
@@ -1461,19 +1444,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
ret = info->desc->ops->send_message(cinfo, xfer);
if (ret < 0) {
dev_dbg(dev, "Failed to send message %d\n", ret);
- scmi_inc_count(info->dbg->counters, SENT_FAIL);
+ scmi_inc_count(info->dbg, SENT_FAIL);
return ret;
}
trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id,
xfer->hdr.id, "CMND", xfer->hdr.seq,
xfer->hdr.status, xfer->tx.buf, xfer->tx.len);
- scmi_inc_count(info->dbg->counters, SENT_OK);
+ scmi_inc_count(info->dbg, SENT_OK);
ret = scmi_wait_for_message_response(cinfo, xfer);
if (!ret && xfer->hdr.status) {
ret = scmi_to_linux_errno(xfer->hdr.status);
- scmi_inc_count(info->dbg->counters, ERR_PROTOCOL);
+ scmi_inc_count(info->dbg, ERR_PROTOCOL);
}
if (info->desc->ops->mark_txdone)
@@ -3044,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info)
u8 channels[SCMI_MAX_CHANNELS] = {};
DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {};
- if (!info->dbg)
- return -EINVAL;
-
/* Enumerate all channels to collect their ids */
idr_for_each_entry(&info->tx_idr, cinfo, id) {
/*
@@ -3218,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev)
if (!info->dbg)
dev_warn(dev, "Failed to setup SCMI debugfs.\n");
- if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
+ if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) {
ret = scmi_debugfs_raw_mode_setup(info);
if (!coex) {
if (ret)
@@ -3423,6 +3403,9 @@ int scmi_inflight_count(const struct scmi_handle *handle)
if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) {
struct scmi_info *info = handle_to_scmi_info(handle);
+ if (!info->dbg)
+ return 0;
+
return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]);
} else {
return 0;
diff --git a/drivers/firmware/arm_scmi/quirks.c b/drivers/firmware/arm_scmi/quirks.c
index 03960aca3610..03848283c2a0 100644
--- a/drivers/firmware/arm_scmi/quirks.c
+++ b/drivers/firmware/arm_scmi/quirks.c
@@ -71,6 +71,7 @@
*/
#include <linux/ctype.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/hashtable.h>
@@ -89,9 +90,9 @@
struct scmi_quirk {
bool enabled;
const char *name;
- char *vendor;
- char *sub_vendor_id;
- char *impl_ver_range;
+ const char *vendor;
+ const char *sub_vendor_id;
+ const char *impl_ver_range;
u32 start_range;
u32 end_range;
struct static_key_false *key;
@@ -217,7 +218,7 @@ static unsigned int scmi_quirk_signature(const char *vend, const char *sub_vend)
static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
{
- const char *last, *first = quirk->impl_ver_range;
+ const char *last, *first __free(kfree) = NULL;
size_t len;
char *sep;
int ret;
@@ -228,8 +229,12 @@ static int scmi_quirk_range_parse(struct scmi_quirk *quirk)
if (!len)
return 0;
+ first = kmemdup(quirk->impl_ver_range, len + 1, GFP_KERNEL);
+ if (!first)
+ return -ENOMEM;
+
last = first + len - 1;
- sep = strchr(quirk->impl_ver_range, '-');
+ sep = strchr(first, '-');
if (sep)
*sep = '\0';
diff --git a/drivers/firmware/arm_scmi/transports/mailbox.c b/drivers/firmware/arm_scmi/transports/mailbox.c
index bd041c99b92b..ae0f67e6cc45 100644
--- a/drivers/firmware/arm_scmi/transports/mailbox.c
+++ b/drivers/firmware/arm_scmi/transports/mailbox.c
@@ -127,8 +127,8 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
(num_mb == 1 && num_sh != 1) || (num_mb == 3 && num_sh != 2) ||
(num_mb == 4 && num_sh != 2)) {
dev_warn(cdev,
- "Invalid channel descriptor for '%s' - mbs:%d shm:%d\n",
- of_node_full_name(np), num_mb, num_sh);
+ "Invalid channel descriptor for '%pOF' - mbs:%d shm:%d\n",
+ np, num_mb, num_sh);
return -EINVAL;
}
@@ -140,8 +140,7 @@ static int mailbox_chan_validate(struct device *cdev, int *a2p_rx_chan,
of_parse_phandle(np, "shmem", 1);
if (!np_tx || !np_rx || np_tx == np_rx) {
- dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
- of_node_full_name(np));
+ dev_warn(cdev, "Invalid shmem descriptor for '%pOF'\n", np);
ret = -EINVAL;
}
}
diff --git a/drivers/firmware/arm_scmi/transports/optee.c b/drivers/firmware/arm_scmi/transports/optee.c
index 3949a877e17d..dc0f46340153 100644
--- a/drivers/firmware/arm_scmi/transports/optee.c
+++ b/drivers/firmware/arm_scmi/transports/optee.c
@@ -498,7 +498,7 @@ static void scmi_optee_mark_txdone(struct scmi_chan_info *cinfo, int ret,
mutex_unlock(&channel->mu);
}
-static struct scmi_transport_ops scmi_optee_ops = {
+static const struct scmi_transport_ops scmi_optee_ops = {
.chan_available = scmi_optee_chan_available,
.chan_setup = scmi_optee_chan_setup,
.chan_free = scmi_optee_chan_free,
diff --git a/drivers/firmware/arm_scmi/transports/virtio.c b/drivers/firmware/arm_scmi/transports/virtio.c
index cb934db9b2b4..326c4a93e44b 100644
--- a/drivers/firmware/arm_scmi/transports/virtio.c
+++ b/drivers/firmware/arm_scmi/transports/virtio.c
@@ -871,6 +871,9 @@ static int scmi_vio_probe(struct virtio_device *vdev)
/* Ensure initialized scmi_vdev is visible */
smp_store_mb(scmi_vdev, vdev);
+ /* Set device ready */
+ virtio_device_ready(vdev);
+
ret = platform_driver_register(&scmi_virtio_driver);
if (ret) {
vdev->priv = NULL;
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index a8915d3b4df5..700a3f24f4ef 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -25,7 +25,10 @@
enum scmi_imx_misc_protocol_cmd {
SCMI_IMX_MISC_CTRL_SET = 0x3,
SCMI_IMX_MISC_CTRL_GET = 0x4,
+ SCMI_IMX_MISC_DISCOVER_BUILD_INFO = 0x6,
SCMI_IMX_MISC_CTRL_NOTIFY = 0x8,
+ SCMI_IMX_MISC_CFG_INFO_GET = 0xC,
+ SCMI_IMX_MISC_BOARD_INFO = 0xE,
};
struct scmi_imx_misc_info {
@@ -65,6 +68,27 @@ struct scmi_imx_misc_ctrl_get_out {
__le32 val[];
};
+struct scmi_imx_misc_buildinfo_out {
+ __le32 buildnum;
+ __le32 buildcommit;
+#define MISC_MAX_BUILDDATE 16
+ u8 builddate[MISC_MAX_BUILDDATE];
+#define MISC_MAX_BUILDTIME 16
+ u8 buildtime[MISC_MAX_BUILDTIME];
+};
+
+struct scmi_imx_misc_board_info_out {
+ __le32 attributes;
+#define MISC_MAX_BRDNAME 16
+ u8 brdname[MISC_MAX_BRDNAME];
+};
+
+struct scmi_imx_misc_cfg_info_out {
+ __le32 msel;
+#define MISC_MAX_CFGNAME 16
+ u8 cfgname[MISC_MAX_CFGNAME];
+};
+
static int scmi_imx_misc_attributes_get(const struct scmi_protocol_handle *ph,
struct scmi_imx_misc_info *mi)
{
@@ -272,6 +296,81 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
return ret;
}
+static int scmi_imx_misc_build_info_discover(const struct scmi_protocol_handle *ph)
+{
+ char date[MISC_MAX_BUILDDATE], time[MISC_MAX_BUILDTIME];
+ struct scmi_imx_misc_buildinfo_out *out;
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_DISCOVER_BUILD_INFO, 0,
+ sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(date, out->builddate, MISC_MAX_BUILDDATE);
+ strscpy(time, out->buildtime, MISC_MAX_BUILDTIME);
+ dev_info(ph->dev, "SM Version\t= Build %u, Commit %08x %s %s\n",
+ le32_to_cpu(out->buildnum), le32_to_cpu(out->buildcommit),
+ date, time);
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_board_info(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_board_info_out *out;
+ char name[MISC_MAX_BRDNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_BOARD_INFO, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->brdname, MISC_MAX_BRDNAME);
+ dev_info(ph->dev, "Board\t\t= %s, attr=0x%08x\n",
+ name, le32_to_cpu(out->attributes));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
+static int scmi_imx_misc_cfg_info_get(const struct scmi_protocol_handle *ph)
+{
+ struct scmi_imx_misc_cfg_info_out *out;
+ char name[MISC_MAX_CFGNAME];
+ struct scmi_xfer *t;
+ int ret;
+
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CFG_INFO_GET, 0, sizeof(*out), &t);
+ if (ret)
+ return ret;
+
+ ret = ph->xops->do_xfer(ph, t);
+ if (!ret) {
+ out = t->rx.buf;
+ strscpy(name, out->cfgname, MISC_MAX_CFGNAME);
+ dev_info(ph->dev, "SM Config\t= %s, mSel = %u\n",
+ name, le32_to_cpu(out->msel));
+ }
+
+ ph->xops->xfer_put(ph, t);
+
+ return ret;
+}
+
static const struct scmi_imx_misc_proto_ops scmi_imx_misc_proto_ops = {
.misc_ctrl_set = scmi_imx_misc_ctrl_set,
.misc_ctrl_get = scmi_imx_misc_ctrl_get,
@@ -299,6 +398,18 @@ static int scmi_imx_misc_protocol_init(const struct scmi_protocol_handle *ph)
if (ret)
return ret;
+ ret = scmi_imx_misc_build_info_discover(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_board_info(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ ret = scmi_imx_misc_cfg_info_get(ph);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
return ph->set_priv(ph, minfo, version);
}
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
index 4e246a78a042..741f4eace350 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx95.rst
@@ -1660,6 +1660,7 @@ protocol_id: 0x84
|Name |Description |
+--------------------+---------------------------------------------------------+
|int32 status |SUCCESS: system log return |
+| |NOT_SUPPORTED: system log not available |
+--------------------+---------------------------------------------------------+
|uint32 numLogflags |Descriptor for the log data returned by this call. |
| |Bits[31:20] Number of remaining log words. |
@@ -1670,6 +1671,30 @@ protocol_id: 0x84
|uint32 syslog[N] |Log data array, N is defined in bits[11:0] of numLogflags|
+--------------------+---------------------------------------------------------+
+MISC_BOARD_INFO
+~~~~~~~~~~~~~~~
+
+message_id: 0xE
+protocol_id: 0x84
+
++--------------------+---------------------------------------------------------+
+|Return values |
++--------------------+---------------------------------------------------------+
+|Name |Description |
++--------------------+---------------------------------------------------------+
+|int32 status |SUCCESS: config name return |
+| |NOT_SUPPORTED: name not available |
++--------------------+---------------------------------------------------------+
+|uint32 attributes |Board-specific attributes reserved for future expansion |
+| |without breaking backwards compatibility. The firmware |
+| |sets the value to 0 |
++--------------------+---------------------------------------------------------+
+|uint8 boardname[16] |Board name. NULL terminated ASCII string, up to 16 bytes |
+| |in length. This is System Manager(SM) firmware-exported |
+| |board-name and may not align with the board name in the |
+| |device tree. |
++--------------------+---------------------------------------------------------+
+
NEGOTIATE_PROTOCOL_VERSION
~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c
index fda6a1573609..17127880e10a 100644
--- a/drivers/firmware/arm_scmi/voltage.c
+++ b/drivers/firmware/arm_scmi/voltage.c
@@ -393,7 +393,7 @@ static int scmi_voltage_domains_num_get(const struct scmi_protocol_handle *ph)
return vinfo->num_domains;
}
-static struct scmi_voltage_proto_ops voltage_proto_ops = {
+static const struct scmi_voltage_proto_ops voltage_proto_ops = {
.num_domains_get = scmi_voltage_domains_num_get,
.info_get = scmi_voltage_info_get,
.config_set = scmi_voltage_config_set,
diff --git a/drivers/firmware/broadcom/bcm47xx_sprom.c b/drivers/firmware/broadcom/bcm47xx_sprom.c
index 14fbcd11657c..fdcd3a07abcd 100644
--- a/drivers/firmware/broadcom/bcm47xx_sprom.c
+++ b/drivers/firmware/broadcom/bcm47xx_sprom.c
@@ -404,7 +404,7 @@ static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
}
-#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
+#undef ENTRY /* It's specific, uses local variable, don't use it (again). */
static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
const char *prefix, bool fallback)
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index f51047d8ea64..525ac0f0a75d 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -9,9 +9,11 @@
* Cirrus Logic International Semiconductor Ltd.
*/
+#include <linux/cleanup.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -316,44 +318,6 @@ struct cs_dsp_alg_region_list_item {
struct cs_dsp_alg_region alg_region;
};
-struct cs_dsp_buf {
- struct list_head list;
- void *buf;
-};
-
-static struct cs_dsp_buf *cs_dsp_buf_alloc(const void *src, size_t len,
- struct list_head *list)
-{
- struct cs_dsp_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL);
-
- if (buf == NULL)
- return NULL;
-
- buf->buf = vmalloc(len);
- if (!buf->buf) {
- kfree(buf);
- return NULL;
- }
- memcpy(buf->buf, src, len);
-
- if (list)
- list_add_tail(&buf->list, list);
-
- return buf;
-}
-
-static void cs_dsp_buf_free(struct list_head *list)
-{
- while (!list_empty(list)) {
- struct cs_dsp_buf *buf = list_first_entry(list,
- struct cs_dsp_buf,
- list);
- list_del(&buf->list);
- vfree(buf->buf);
- kfree(buf);
- }
-}
-
/**
* cs_dsp_mem_region_name() - Return a name string for a memory type
* @type: the memory type to match
@@ -388,18 +352,14 @@ EXPORT_SYMBOL_NS_GPL(cs_dsp_mem_region_name, "FW_CS_DSP");
#ifdef CONFIG_DEBUG_FS
static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
{
- char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
-
kfree(dsp->wmfw_file_name);
- dsp->wmfw_file_name = tmp;
+ dsp->wmfw_file_name = kstrdup(s, GFP_KERNEL);
}
static void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp, const char *s)
{
- char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
-
kfree(dsp->bin_file_name);
- dsp->bin_file_name = tmp;
+ dsp->bin_file_name = kstrdup(s, GFP_KERNEL);
}
static void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
@@ -410,24 +370,33 @@ static void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
dsp->bin_file_name = NULL;
}
+static ssize_t cs_dsp_debugfs_string_read(struct cs_dsp *dsp,
+ char __user *user_buf,
+ size_t count, loff_t *ppos,
+ const char **pstr)
+{
+ const char *str __free(kfree) = NULL;
+
+ scoped_guard(mutex, &dsp->pwr_lock) {
+ if (!*pstr)
+ return 0;
+
+ str = kasprintf(GFP_KERNEL, "%s\n", *pstr);
+ if (!str)
+ return -ENOMEM;
+
+ return simple_read_from_buffer(user_buf, count, ppos, str, strlen(str));
+ }
+}
+
static ssize_t cs_dsp_debugfs_wmfw_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct cs_dsp *dsp = file->private_data;
- ssize_t ret;
-
- mutex_lock(&dsp->pwr_lock);
- if (!dsp->wmfw_file_name || !dsp->booted)
- ret = 0;
- else
- ret = simple_read_from_buffer(user_buf, count, ppos,
- dsp->wmfw_file_name,
- strlen(dsp->wmfw_file_name));
-
- mutex_unlock(&dsp->pwr_lock);
- return ret;
+ return cs_dsp_debugfs_string_read(dsp, user_buf, count, ppos,
+ &dsp->wmfw_file_name);
}
static ssize_t cs_dsp_debugfs_bin_read(struct file *file,
@@ -435,19 +404,9 @@ static ssize_t cs_dsp_debugfs_bin_read(struct file *file,
size_t count, loff_t *ppos)
{
struct cs_dsp *dsp = file->private_data;
- ssize_t ret;
-
- mutex_lock(&dsp->pwr_lock);
- if (!dsp->bin_file_name || !dsp->booted)
- ret = 0;
- else
- ret = simple_read_from_buffer(user_buf, count, ppos,
- dsp->bin_file_name,
- strlen(dsp->bin_file_name));
-
- mutex_unlock(&dsp->pwr_lock);
- return ret;
+ return cs_dsp_debugfs_string_read(dsp, user_buf, count, ppos,
+ &dsp->bin_file_name);
}
static const struct {
@@ -479,9 +438,11 @@ static int cs_dsp_debugfs_read_controls_show(struct seq_file *s, void *ignored)
struct cs_dsp_coeff_ctl *ctl;
unsigned int reg;
+ guard(mutex)(&dsp->pwr_lock);
+
list_for_each_entry(ctl, &dsp->ctl_list, list) {
cs_dsp_coeff_base_reg(ctl, &reg, 0);
- seq_printf(s, "%22.*s: %#8zx %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n",
+ seq_printf(s, "%22.*s: %#8x %s:%08x %#8x %s %#8x %#4x %c%c%c%c %s %s\n",
ctl->subname_len, ctl->subname, ctl->len,
cs_dsp_mem_region_name(ctl->alg_region.type),
ctl->offset, reg, ctl->fw_name, ctl->alg_region.alg, ctl->type,
@@ -1028,7 +989,7 @@ static void cs_dsp_signal_event_controls(struct cs_dsp *dsp,
static void cs_dsp_free_ctl_blk(struct cs_dsp_coeff_ctl *ctl)
{
- kfree(ctl->cache);
+ kvfree(ctl->cache);
kfree(ctl->subname);
kfree(ctl);
}
@@ -1078,7 +1039,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
ctl->type = type;
ctl->offset = offset;
ctl->len = len;
- ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
+ ctl->cache = kvzalloc(ctl->len, GFP_KERNEL);
if (!ctl->cache) {
ret = -ENOMEM;
goto err_ctl_subname;
@@ -1096,7 +1057,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
err_list_del:
list_del(&ctl->list);
- kfree(ctl->cache);
+ kvfree(ctl->cache);
err_ctl_subname:
kfree(ctl->subname);
err_ctl:
@@ -1485,7 +1446,9 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
const struct wmfw_region *region;
const struct cs_dsp_region *mem;
const char *region_name;
- struct cs_dsp_buf *buf;
+ u8 *buf __free(kfree) = NULL;
+ size_t buf_len = 0;
+ size_t region_len;
unsigned int reg;
int regions = 0;
int ret, offset, type;
@@ -1605,23 +1568,23 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
region_name);
if (reg) {
- buf = cs_dsp_buf_alloc(region->data,
- le32_to_cpu(region->len),
- &buf_list);
- if (!buf) {
- cs_dsp_err(dsp, "Out of memory\n");
- ret = -ENOMEM;
- goto out_fw;
+ region_len = le32_to_cpu(region->len);
+ if (region_len > buf_len) {
+ buf_len = round_up(region_len, PAGE_SIZE);
+ kfree(buf);
+ buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_fw;
+ }
}
- ret = regmap_raw_write(regmap, reg, buf->buf,
- le32_to_cpu(region->len));
+ memcpy(buf, region->data, region_len);
+ ret = regmap_raw_write(regmap, reg, buf, region_len);
if (ret != 0) {
cs_dsp_err(dsp,
- "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
- file, regions,
- le32_to_cpu(region->len), offset,
- region_name, ret);
+ "%s.%d: Failed to write %zu bytes at %d in %s: %d\n",
+ file, regions, region_len, offset, region_name, ret);
goto out_fw;
}
}
@@ -1638,8 +1601,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
ret = 0;
out_fw:
- cs_dsp_buf_free(&buf_list);
-
if (ret == -EOVERFLOW)
cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
@@ -2171,7 +2132,9 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
struct cs_dsp_alg_region *alg_region;
const char *region_name;
int ret, pos, blocks, type, offset, reg, version;
- struct cs_dsp_buf *buf;
+ u8 *buf __free(kfree) = NULL;
+ size_t buf_len = 0;
+ size_t region_len;
if (!firmware)
return 0;
@@ -2313,20 +2276,22 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
}
if (reg) {
- buf = cs_dsp_buf_alloc(blk->data,
- le32_to_cpu(blk->len),
- &buf_list);
- if (!buf) {
- cs_dsp_err(dsp, "Out of memory\n");
- ret = -ENOMEM;
- goto out_fw;
+ region_len = le32_to_cpu(blk->len);
+ if (region_len > buf_len) {
+ buf_len = round_up(region_len, PAGE_SIZE);
+ kfree(buf);
+ buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_fw;
+ }
}
- cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
- file, blocks, le32_to_cpu(blk->len),
- reg);
- ret = regmap_raw_write(regmap, reg, buf->buf,
- le32_to_cpu(blk->len));
+ memcpy(buf, blk->data, region_len);
+
+ cs_dsp_dbg(dsp, "%s.%d: Writing %zu bytes at %x\n",
+ file, blocks, region_len, reg);
+ ret = regmap_raw_write(regmap, reg, buf, region_len);
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write to %x in %s: %d\n",
@@ -2346,8 +2311,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
ret = 0;
out_fw:
- cs_dsp_buf_free(&buf_list);
-
if (ret == -EOVERFLOW)
cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
@@ -2366,6 +2329,9 @@ static int cs_dsp_create_name(struct cs_dsp *dsp)
return 0;
}
+static const struct cs_dsp_client_ops cs_dsp_default_client_ops = {
+};
+
static int cs_dsp_common_init(struct cs_dsp *dsp)
{
int ret;
@@ -2379,6 +2345,9 @@ static int cs_dsp_common_init(struct cs_dsp *dsp)
mutex_init(&dsp->pwr_lock);
+ if (!dsp->client_ops)
+ dsp->client_ops = &cs_dsp_default_client_ops;
+
#ifdef CONFIG_DEBUG_FS
/* Ensure this is invalid if client never provides a debugfs root */
dsp->debugfs_root = ERR_PTR(-ENODEV);
diff --git a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
index 8a9b66a3b7d3..e5a389808e5f 100644
--- a/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
+++ b/drivers/firmware/cirrus/test/cs_dsp_test_callbacks.c
@@ -600,6 +600,7 @@ KUNIT_ARRAY_PARAM(cs_dsp_callbacks_ops,
static const struct cs_dsp_callbacks_test_param cs_dsp_no_callbacks_cases[] = {
{ .ops = &cs_dsp_callback_test_empty_client_ops, .case_name = "empty ops" },
+ { .ops = NULL, .case_name = "NULL ops" },
};
KUNIT_ARRAY_PARAM(cs_dsp_no_callbacks,
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d528c94c5859..29e0729299f5 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -267,9 +267,10 @@ config OVMF_DEBUG_LOG
bool "Expose OVMF firmware debug log via sysfs"
depends on EFI
help
- Recent OVMF versions (edk2-stable202508 + newer) can write
- their debug log to a memory buffer. This driver exposes the
- log content via sysfs (/sys/firmware/efi/ovmf_debug_log).
+ Recent versions of the Open Virtual Machine Firmware
+ (edk2-stable202508 + newer) can write their debug log to a memory
+ buffer. This driver exposes the log content via sysfs
+ (/sys/firmware/efi/ovmf_debug_log).
config UNACCEPTED_MEMORY
bool
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 83092d93f36a..53a5336cde5a 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -12,18 +12,18 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
+#include <linux/pgalloc.h>
+#include <linux/pgtable.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
-#include <asm/pgalloc.h>
#if defined(CONFIG_PTDUMP_DEBUGFS) || defined(CONFIG_ARM_PTDUMP_DEBUGFS)
#include <asm/ptdump.h>
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index f0a63d09d3c4..76542a53e202 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -93,15 +93,11 @@ static void cper_print_arm_err_info(const char *pfx, u32 type,
bool proc_context_corrupt, corrected, precise_pc, restartable_pc;
bool time_out, access_mode;
- /* If the type is unknown, bail. */
- if (type > CPER_ARM_MAX_TYPE)
- return;
-
/*
* Vendor type errors have error information values that are vendor
* specific.
*/
- if (type == CPER_ARM_VENDOR_ERROR)
+ if (type & CPER_ARM_VENDOR_ERROR)
return;
if (error_info & CPER_ARM_ERR_VALID_TRANSACTION_TYPE) {
@@ -116,43 +112,38 @@ static void cper_print_arm_err_info(const char *pfx, u32 type,
if (error_info & CPER_ARM_ERR_VALID_OPERATION_TYPE) {
op_type = ((error_info >> CPER_ARM_ERR_OPERATION_SHIFT)
& CPER_ARM_ERR_OPERATION_MASK);
- switch (type) {
- case CPER_ARM_CACHE_ERROR:
+ if (type & CPER_ARM_CACHE_ERROR) {
if (op_type < ARRAY_SIZE(arm_cache_err_op_strs)) {
- printk("%soperation type: %s\n", pfx,
+ printk("%scache error, operation type: %s\n", pfx,
arm_cache_err_op_strs[op_type]);
}
- break;
- case CPER_ARM_TLB_ERROR:
+ }
+ if (type & CPER_ARM_TLB_ERROR) {
if (op_type < ARRAY_SIZE(arm_tlb_err_op_strs)) {
- printk("%soperation type: %s\n", pfx,
+ printk("%sTLB error, operation type: %s\n", pfx,
arm_tlb_err_op_strs[op_type]);
}
- break;
- case CPER_ARM_BUS_ERROR:
+ }
+ if (type & CPER_ARM_BUS_ERROR) {
if (op_type < ARRAY_SIZE(arm_bus_err_op_strs)) {
- printk("%soperation type: %s\n", pfx,
+ printk("%sbus error, operation type: %s\n", pfx,
arm_bus_err_op_strs[op_type]);
}
- break;
}
}
if (error_info & CPER_ARM_ERR_VALID_LEVEL) {
level = ((error_info >> CPER_ARM_ERR_LEVEL_SHIFT)
& CPER_ARM_ERR_LEVEL_MASK);
- switch (type) {
- case CPER_ARM_CACHE_ERROR:
+ if (type & CPER_ARM_CACHE_ERROR)
printk("%scache level: %d\n", pfx, level);
- break;
- case CPER_ARM_TLB_ERROR:
+
+ if (type & CPER_ARM_TLB_ERROR)
printk("%sTLB level: %d\n", pfx, level);
- break;
- case CPER_ARM_BUS_ERROR:
+
+ if (type & CPER_ARM_BUS_ERROR)
printk("%saffinity level at which the bus error occurred: %d\n",
pfx, level);
- break;
- }
}
if (error_info & CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT) {
@@ -240,7 +231,8 @@ void cper_print_proc_arm(const char *pfx,
int i, len, max_ctx_type;
struct cper_arm_err_info *err_info;
struct cper_arm_ctx_info *ctx_info;
- char newpfx[64], infopfx[64];
+ char newpfx[64], infopfx[ARRAY_SIZE(newpfx) + 1];
+ char error_type[120];
printk("%sMIDR: 0x%016llx\n", pfx, proc->midr);
@@ -289,9 +281,15 @@ void cper_print_proc_arm(const char *pfx,
newpfx);
}
- printk("%serror_type: %d, %s\n", newpfx, err_info->type,
- err_info->type < ARRAY_SIZE(cper_proc_error_type_strs) ?
- cper_proc_error_type_strs[err_info->type] : "unknown");
+ cper_bits_to_str(error_type, sizeof(error_type),
+ FIELD_GET(CPER_ARM_ERR_TYPE_MASK, err_info->type),
+ cper_proc_error_type_strs,
+ ARRAY_SIZE(cper_proc_error_type_strs));
+
+ printk("%serror_type: 0x%02x: %s%s\n", newpfx, err_info->type,
+ error_type,
+ (err_info->type & ~CPER_ARM_ERR_TYPE_MASK) ? " with reserved bit(s)" : "");
+
if (err_info->validation_bits & CPER_ARM_INFO_VALID_ERR_INFO) {
printk("%serror_info: 0x%016llx\n", newpfx,
err_info->error_info);
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 928409199a1a..0232bd040f61 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -12,6 +12,7 @@
* Specification version 2.4.
*/
+#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/time.h>
@@ -69,7 +70,7 @@ const char *cper_severity_str(unsigned int severity)
}
EXPORT_SYMBOL_GPL(cper_severity_str);
-/*
+/**
* cper_print_bits - print strings for set bits
* @pfx: prefix for each line, including log level and prefix string
* @bits: bit mask
@@ -106,6 +107,65 @@ void cper_print_bits(const char *pfx, unsigned int bits,
printk("%s\n", buf);
}
+/**
+ * cper_bits_to_str - return a string for set bits
+ * @buf: buffer to store the output string
+ * @buf_size: size of the output string buffer
+ * @bits: bit mask
+ * @strs: string array, indexed by bit position
+ * @strs_size: size of the string array: @strs
+ *
+ * Add to @buf the bitmask in hexadecimal. Then, for each set bit in @bits,
+ * add the corresponding string describing the bit in @strs to @buf.
+ *
+ * A typical example is::
+ *
+ * const char * const bits[] = {
+ * "bit 3 name",
+ * "bit 4 name",
+ * "bit 5 name",
+ * };
+ * char str[120];
+ * unsigned int bitmask = BIT(3) | BIT(5);
+ * #define MASK GENMASK(5,3)
+ *
+ * cper_bits_to_str(str, sizeof(str), FIELD_GET(MASK, bitmask),
+ * bits, ARRAY_SIZE(bits));
+ *
+ * The above code fills the string ``str`` with ``bit 3 name|bit 5 name``.
+ *
+ * Return: number of bytes stored or an error code if lower than zero.
+ */
+int cper_bits_to_str(char *buf, int buf_size, unsigned long bits,
+ const char * const strs[], unsigned int strs_size)
+{
+ int len = buf_size;
+ char *str = buf;
+ int i, size;
+
+ *buf = '\0';
+
+ for_each_set_bit(i, &bits, strs_size) {
+ if (!(bits & BIT_ULL(i)))
+ continue;
+
+ if (*buf && len > 0) {
+ *str = '|';
+ len--;
+ str++;
+ }
+
+ size = strscpy(str, strs[i], len);
+ if (size < 0)
+ return size;
+
+ len -= size;
+ str += size;
+ }
+ return len - buf_size;
+}
+EXPORT_SYMBOL_GPL(cper_bits_to_str);
+
static const char * const proc_type_strs[] = {
"IA32/X64",
"IA64",
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index a00e07b853f2..a65c2d5b9e7b 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -12,6 +12,7 @@
#include <linux/efi.h>
#include <linux/fwnode.h>
#include <linux/init.h>
+#include <linux/kexec_handover.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
#include <linux/of.h>
@@ -164,12 +165,32 @@ static __init void reserve_regions(void)
pr_info("Processing EFI memory map:\n");
/*
- * Discard memblocks discovered so far: if there are any at this
- * point, they originate from memory nodes in the DT, and UEFI
- * uses its own memory map instead.
+ * Discard memblocks discovered so far except for KHO scratch
+ * regions. Most memblocks at this point originate from memory nodes
+ * in the DT and UEFI uses its own memory map instead. However, if
+ * KHO is enabled, scratch regions, which are good known memory
+ * must be preserved.
*/
memblock_dump_all();
- memblock_remove(0, PHYS_ADDR_MAX);
+
+ if (is_kho_boot()) {
+ struct memblock_region *r;
+
+ /* Remove all non-KHO regions */
+ for_each_mem_region(r) {
+ if (!memblock_is_kho_scratch(r)) {
+ memblock_remove(r->base, r->size);
+ r--;
+ }
+ }
+ } else {
+ /*
+ * KHO is disabled. Discard memblocks discovered so far:
+ * if there are any at this point, they originate from memory
+ * nodes in the DT, and UEFI uses its own memory map instead.
+ */
+ memblock_remove(0, PHYS_ADDR_MAX);
+ }
for_each_efi_memory_desc(md) {
paddr = md->phys_addr;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 1ce428e2ac8a..a9070d00b833 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -74,6 +74,9 @@ struct mm_struct efi_mm = {
.page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
.cpu_bitmap = { [BITS_TO_LONGS(NR_CPUS)] = 0},
+#ifdef CONFIG_SCHED_MM_CID
+ .mm_cid.lock = __RAW_SPIN_LOCK_UNLOCKED(efi_mm.mm_cid.lock),
+#endif
};
struct workqueue_struct *efi_rts_wq;
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index 94b05e4451dd..7d15a85d579f 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -11,12 +11,12 @@ cflags-y := $(KBUILD_CFLAGS)
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
-cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
+cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 -fms-extensions \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse -fshort-wchar \
-Wno-pointer-sign \
$(call cc-disable-warning, address-of-packed-member) \
- $(call cc-disable-warning, gnu) \
+ $(if $(CONFIG_CC_IS_CLANG),-Wno-gnu -Wno-microsoft-anon-tag) \
-fno-asynchronous-unwind-tables \
$(CLANG_FLAGS)
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 874f63b4a383..9cb814c5ba1b 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -56,7 +56,7 @@ static struct screen_info *setup_graphics(void)
{
struct screen_info *si, tmp = {};
- if (efi_setup_gop(&tmp) != EFI_SUCCESS)
+ if (efi_setup_graphics(&tmp, NULL) != EFI_SUCCESS)
return NULL;
si = alloc_screen_info();
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index f5ba032863a9..b2fb0c3fa721 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -34,6 +34,9 @@
#define EFI_ALLOC_LIMIT ULONG_MAX
#endif
+struct edid_info;
+struct screen_info;
+
extern bool efi_no5lvl;
extern bool efi_nochunk;
extern bool efi_nokaslr;
@@ -578,6 +581,32 @@ union efi_graphics_output_protocol {
} mixed_mode;
};
+typedef union efi_edid_discovered_protocol efi_edid_discovered_protocol_t;
+
+union efi_edid_discovered_protocol {
+ struct {
+ u32 size_of_edid;
+ u8 *edid;
+ };
+ struct {
+ u32 size_of_edid;
+ u32 edid;
+ } mixed_mode;
+};
+
+typedef union efi_edid_active_protocol efi_edid_active_protocol_t;
+
+union efi_edid_active_protocol {
+ struct {
+ u32 size_of_edid;
+ u8 *edid;
+ };
+ struct {
+ u32 size_of_edid;
+ u32 edid;
+ } mixed_mode;
+};
+
typedef union {
struct {
u32 revision;
@@ -1085,7 +1114,7 @@ efi_status_t efi_parse_options(char const *cmdline);
void efi_parse_option_graphics(char *option);
-efi_status_t efi_setup_gop(struct screen_info *si);
+efi_status_t efi_setup_graphics(struct screen_info *si, struct edid_info *edid);
efi_status_t handle_cmdline_files(efi_loaded_image_t *image,
const efi_char16_t *optstr,
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
index 3785fb4986b4..72d74436a7a4 100644
--- a/drivers/firmware/efi/libstub/gop.c
+++ b/drivers/firmware/efi/libstub/gop.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <asm/efi.h>
#include <asm/setup.h>
+#include <video/edid.h>
#include "efistub.h"
@@ -367,24 +368,31 @@ static void find_bits(u32 mask, u8 *pos, u8 *size)
*size = __fls(mask) - *pos + 1;
}
-static void
-setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
- efi_pixel_bitmask_t pixel_info, int pixel_format)
+static void setup_screen_info(struct screen_info *si, const efi_graphics_output_protocol_t *gop)
{
- if (pixel_format == PIXEL_BIT_MASK) {
- find_bits(pixel_info.red_mask,
- &si->red_pos, &si->red_size);
- find_bits(pixel_info.green_mask,
- &si->green_pos, &si->green_size);
- find_bits(pixel_info.blue_mask,
- &si->blue_pos, &si->blue_size);
- find_bits(pixel_info.reserved_mask,
- &si->rsvd_pos, &si->rsvd_size);
- si->lfb_depth = si->red_size + si->green_size +
- si->blue_size + si->rsvd_size;
- si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+ const efi_graphics_output_protocol_mode_t *mode = efi_table_attr(gop, mode);
+ const efi_graphics_output_mode_info_t *info = efi_table_attr(mode, info);
+
+ si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+ si->lfb_width = info->horizontal_resolution;
+ si->lfb_height = info->vertical_resolution;
+
+ efi_set_u64_split(efi_table_attr(mode, frame_buffer_base),
+ &si->lfb_base, &si->ext_lfb_base);
+ if (si->ext_lfb_base)
+ si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ si->pages = 1;
+
+ if (info->pixel_format == PIXEL_BIT_MASK) {
+ find_bits(info->pixel_information.red_mask, &si->red_pos, &si->red_size);
+ find_bits(info->pixel_information.green_mask, &si->green_pos, &si->green_size);
+ find_bits(info->pixel_information.blue_mask, &si->blue_pos, &si->blue_size);
+ find_bits(info->pixel_information.reserved_mask, &si->rsvd_pos, &si->rsvd_size);
+ si->lfb_depth = si->red_size + si->green_size + si->blue_size + si->rsvd_size;
+ si->lfb_linelength = (info->pixels_per_scan_line * si->lfb_depth) / 8;
} else {
- if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+ if (info->pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
si->red_pos = 0;
si->blue_pos = 16;
} else /* PIXEL_BGR_RESERVED_8BIT_PER_COLOR */ {
@@ -394,20 +402,33 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
si->green_pos = 8;
si->rsvd_pos = 24;
- si->red_size = si->green_size =
- si->blue_size = si->rsvd_size = 8;
-
+ si->red_size = 8;
+ si->green_size = 8;
+ si->blue_size = 8;
+ si->rsvd_size = 8;
si->lfb_depth = 32;
- si->lfb_linelength = pixels_per_scan_line * 4;
+ si->lfb_linelength = info->pixels_per_scan_line * 4;
}
+
+ si->lfb_size = si->lfb_linelength * si->lfb_height;
+ si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
}
-static efi_graphics_output_protocol_t *find_gop(unsigned long num,
- const efi_handle_t handles[])
+static void setup_edid_info(struct edid_info *edid, u32 gop_size_of_edid, u8 *gop_edid)
+{
+ if (!gop_edid || gop_size_of_edid < 128)
+ memset(edid->dummy, 0, sizeof(edid->dummy));
+ else
+ memcpy(edid->dummy, gop_edid, min(gop_size_of_edid, sizeof(edid->dummy)));
+}
+
+static efi_handle_t find_handle_with_primary_gop(unsigned long num, const efi_handle_t handles[],
+ efi_graphics_output_protocol_t **found_gop)
{
efi_graphics_output_protocol_t *first_gop;
- efi_handle_t h;
+ efi_handle_t h, first_gop_handle;
+ first_gop_handle = NULL;
first_gop = NULL;
for_each_efi_handle(h, handles, num) {
@@ -442,21 +463,25 @@ static efi_graphics_output_protocol_t *find_gop(unsigned long num,
*/
status = efi_bs_call(handle_protocol, h,
&EFI_CONSOLE_OUT_DEVICE_GUID, &dummy);
- if (status == EFI_SUCCESS)
- return gop;
-
- if (!first_gop)
+ if (status == EFI_SUCCESS) {
+ if (found_gop)
+ *found_gop = gop;
+ return h;
+ } else if (!first_gop_handle) {
+ first_gop_handle = h;
first_gop = gop;
+ }
}
- return first_gop;
+ if (found_gop)
+ *found_gop = first_gop;
+ return first_gop_handle;
}
-efi_status_t efi_setup_gop(struct screen_info *si)
+efi_status_t efi_setup_graphics(struct screen_info *si, struct edid_info *edid)
{
efi_handle_t *handles __free(efi_pool) = NULL;
- efi_graphics_output_protocol_mode_t *mode;
- efi_graphics_output_mode_info_t *info;
+ efi_handle_t handle;
efi_graphics_output_protocol_t *gop;
efi_status_t status;
unsigned long num;
@@ -467,35 +492,41 @@ efi_status_t efi_setup_gop(struct screen_info *si)
if (status != EFI_SUCCESS)
return status;
- gop = find_gop(num, handles);
- if (!gop)
+ handle = find_handle_with_primary_gop(num, handles, &gop);
+ if (!handle)
return EFI_NOT_FOUND;
/* Change mode if requested */
set_mode(gop);
/* EFI framebuffer */
- mode = efi_table_attr(gop, mode);
- info = efi_table_attr(mode, info);
-
- si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
- si->lfb_width = info->horizontal_resolution;
- si->lfb_height = info->vertical_resolution;
-
- efi_set_u64_split(efi_table_attr(mode, frame_buffer_base),
- &si->lfb_base, &si->ext_lfb_base);
- if (si->ext_lfb_base)
- si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
-
- si->pages = 1;
-
- setup_pixel_info(si, info->pixels_per_scan_line,
- info->pixel_information, info->pixel_format);
-
- si->lfb_size = si->lfb_linelength * si->lfb_height;
+ if (si)
+ setup_screen_info(si, gop);
+
+ /* Display EDID for primary GOP */
+ if (edid) {
+ efi_edid_discovered_protocol_t *discovered_edid;
+ efi_edid_active_protocol_t *active_edid;
+ u32 gop_size_of_edid = 0;
+ u8 *gop_edid = NULL;
+
+ status = efi_bs_call(handle_protocol, handle, &EFI_EDID_ACTIVE_PROTOCOL_GUID,
+ (void **)&active_edid);
+ if (status == EFI_SUCCESS) {
+ gop_size_of_edid = active_edid->size_of_edid;
+ gop_edid = active_edid->edid;
+ } else {
+ status = efi_bs_call(handle_protocol, handle,
+ &EFI_EDID_DISCOVERED_PROTOCOL_GUID,
+ (void **)&discovered_edid);
+ if (status == EFI_SUCCESS) {
+ gop_size_of_edid = discovered_edid->size_of_edid;
+ gop_edid = discovered_edid->edid;
+ }
+ }
- si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+ setup_edid_info(edid, gop_size_of_edid, gop_edid);
+ }
return EFI_SUCCESS;
}
diff --git a/drivers/firmware/efi/libstub/x86-5lvl.c b/drivers/firmware/efi/libstub/x86-5lvl.c
index f1c5fb45d5f7..c00d0ae7ed5d 100644
--- a/drivers/firmware/efi/libstub/x86-5lvl.c
+++ b/drivers/firmware/efi/libstub/x86-5lvl.c
@@ -66,7 +66,7 @@ void efi_5level_switch(void)
bool have_la57 = native_read_cr4() & X86_CR4_LA57;
bool need_toggle = want_la57 ^ have_la57;
u64 *pgt = (void *)la57_toggle + PAGE_SIZE;
- u64 *cr3 = (u64 *)__native_read_cr3();
+ pgd_t *cr3 = (pgd_t *)native_read_cr3_pa();
u64 *new_cr3;
if (!la57_toggle || !need_toggle)
@@ -82,7 +82,7 @@ void efi_5level_switch(void)
new_cr3[0] = (u64)cr3 | _PAGE_TABLE_NOENC;
} else {
/* take the new root table pointer from the current entry #0 */
- new_cr3 = (u64 *)(cr3[0] & PAGE_MASK);
+ new_cr3 = (u64 *)(native_pgd_val(cr3[0]) & PTE_PFN_MASK);
/* copy the new root table if it is not 32-bit addressable */
if ((u64)new_cr3 > U32_MAX)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index cafc90d4caaf..cef32e2c82d8 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -203,6 +203,104 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params)
}
}
+struct smbios_entry_point {
+ u8 anchor[4];
+ u8 ep_checksum;
+ u8 ep_length;
+ u8 major_version;
+ u8 minor_version;
+ u16 max_size_entry;
+ u8 ep_rev;
+ u8 reserved[5];
+
+ struct __packed {
+ u8 anchor[5];
+ u8 checksum;
+ u16 st_length;
+ u32 st_address;
+ u16 number_of_entries;
+ u8 bcd_rev;
+ } intm;
+};
+
+static bool verify_ep_checksum(const void *ptr, int length)
+{
+ u8 sum = 0;
+
+ for (int i = 0; i < length; i++)
+ sum += ((u8 *)ptr)[i];
+
+ return sum == 0;
+}
+
+static bool verify_ep_integrity(const struct smbios_entry_point *ep)
+{
+ if (memcmp(ep->anchor, "_SM_", sizeof(ep->anchor)) != 0)
+ return false;
+
+ if (memcmp(ep->intm.anchor, "_DMI_", sizeof(ep->intm.anchor)) != 0)
+ return false;
+
+ if (!verify_ep_checksum(ep, ep->ep_length) ||
+ !verify_ep_checksum(&ep->intm, sizeof(ep->intm)))
+ return false;
+
+ return true;
+}
+
+static const struct efi_smbios_record *search_record(void *table, u32 length,
+ u8 type)
+{
+ const u8 *p, *end;
+
+ p = (u8 *)table;
+ end = p + length;
+
+ while (p + sizeof(struct efi_smbios_record) < end) {
+ const struct efi_smbios_record *hdr =
+ (struct efi_smbios_record *)p;
+ const u8 *next;
+
+ if (hdr->type == type)
+ return hdr;
+
+ /* Type 127 = End-of-Table */
+ if (hdr->type == 0x7F)
+ return NULL;
+
+ /* Jumping to the unformed section */
+ next = p + hdr->length;
+
+ /* Unformed section ends with 0000h */
+ while ((next[0] != 0 || next[1] != 0) && next + 1 < end)
+ next++;
+
+ next += 2;
+ p = next;
+ }
+
+ return NULL;
+}
+
+static const struct efi_smbios_record *get_table_record(u8 type)
+{
+ const struct smbios_entry_point *ep;
+
+ /*
+ * Locate the legacy 32-bit SMBIOS entrypoint in memory, and parse it
+ * directly. Needed by some Macs that do not implement the EFI protocol.
+ */
+ ep = get_efi_config_table(SMBIOS_TABLE_GUID);
+ if (!ep)
+ return NULL;
+
+ if (!verify_ep_integrity(ep))
+ return NULL;
+
+ return search_record((void *)(unsigned long)ep->intm.st_address,
+ ep->intm.st_length, type);
+}
+
static bool apple_match_product_name(void)
{
static const char type1_product_matches[][15] = {
@@ -218,7 +316,8 @@ static bool apple_match_product_name(void)
const struct efi_smbios_type1_record *record;
const u8 *product;
- record = (struct efi_smbios_type1_record *)efi_get_smbios_record(1);
+ record = (struct efi_smbios_type1_record *)
+ (efi_get_smbios_record(1) ?: get_table_record(1));
if (!record)
return false;
@@ -300,7 +399,7 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
return EFI_SUCCESS;
/*
- * Don't modify memory region attributes, they are
+ * Don't modify memory region attributes, if they are
* already suitable, to lower the possibility to
* encounter firmware bugs.
*/
@@ -315,11 +414,13 @@ efi_status_t efi_adjust_memory_range_protection(unsigned long start,
next = desc.base_address + desc.length;
/*
- * Only system memory is suitable for trampoline/kernel image placement,
- * so only this type of memory needs its attributes to be modified.
+ * Only system memory and more reliable memory are suitable for
+ * trampoline/kernel image placement. So only those memory types
+ * may need to have attributes modified.
*/
- if (desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory ||
+ if ((desc.gcd_memory_type != EfiGcdMemoryTypeSystemMemory &&
+ desc.gcd_memory_type != EfiGcdMemoryTypeMoreReliable) ||
(desc.attributes & (EFI_MEMORY_RO | EFI_MEMORY_XP)) == 0)
continue;
@@ -386,8 +487,9 @@ static void setup_quirks(struct boot_params *boot_params)
static void setup_graphics(struct boot_params *boot_params)
{
struct screen_info *si = memset(&boot_params->screen_info, 0, sizeof(*si));
+ struct edid_info *edid = memset(&boot_params->edid_info, 0, sizeof(*edid));
- efi_setup_gop(si);
+ efi_setup_graphics(si, edid);
}
static void __noreturn efi_exit(efi_handle_t handle, efi_status_t status)
@@ -788,7 +890,9 @@ static efi_status_t efi_decompress_kernel(unsigned long *kernel_entry,
*kernel_entry = addr + entry;
- return efi_adjust_memory_range_protection(addr, kernel_text_size);
+ return efi_adjust_memory_range_protection(addr, kernel_text_size) ?:
+ efi_adjust_memory_range_protection(addr + kernel_inittext_offset,
+ kernel_inittext_size);
}
static void __noreturn enter_kernel(unsigned long kernel_addr,
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index c38b1a335590..e727cc5909cb 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -19,19 +19,19 @@ unsigned long __ro_after_init efi_mem_attr_table = EFI_INVALID_TABLE_ADDR;
* Reserve the memory associated with the Memory Attributes configuration
* table, if it exists.
*/
-int __init efi_memattr_init(void)
+void __init efi_memattr_init(void)
{
efi_memory_attributes_table_t *tbl;
unsigned long size;
if (efi_mem_attr_table == EFI_INVALID_TABLE_ADDR)
- return 0;
+ return;
tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl));
if (!tbl) {
pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
efi_mem_attr_table);
- return -ENOMEM;
+ return;
}
if (tbl->version > 2) {
@@ -61,7 +61,6 @@ int __init efi_memattr_init(void)
unmap:
early_memunmap(tbl, sizeof(*tbl));
- return 0;
}
/*
diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c
index fa71cd898120..66f584a228d0 100644
--- a/drivers/firmware/efi/riscv-runtime.c
+++ b/drivers/firmware/efi/riscv-runtime.c
@@ -14,18 +14,18 @@
#include <linux/io.h>
#include <linux/memblock.h>
#include <linux/mm_types.h>
+#include <linux/pgalloc.h>
+#include <linux/pgtable.h>
#include <linux/preempt.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/efi.h>
#include <asm/mmu.h>
-#include <asm/pgalloc.h>
static bool __init efi_virtmap_init(void)
{
@@ -36,20 +36,12 @@ static bool __init efi_virtmap_init(void)
init_new_context(NULL, &efi_mm);
for_each_efi_memory_desc(md) {
- phys_addr_t phys = md->phys_addr;
- int ret;
-
if (!(md->attribute & EFI_MEMORY_RUNTIME))
continue;
if (md->virt_addr == U64_MAX)
return false;
- ret = efi_create_mapping(&efi_mm, md);
- if (ret) {
- pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
- &phys, ret);
- return false;
- }
+ efi_create_mapping(&efi_mm, md);
}
if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 708b777857d3..da8d29621644 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -202,6 +202,8 @@ void efi_call_virt_check_flags(unsigned long flags, const void *caller)
*/
static DEFINE_SEMAPHORE(efi_runtime_lock, 1);
+static struct task_struct *efi_runtime_lock_owner;
+
/*
* Expose the EFI runtime lock to the UV platform
*/
@@ -219,6 +221,8 @@ static void __nocfi efi_call_rts(struct work_struct *work)
efi_status_t status = EFI_NOT_FOUND;
unsigned long flags;
+ efi_runtime_lock_owner = current;
+
arch_efi_call_virt_setup();
flags = efi_call_virt_save_flags();
@@ -310,6 +314,7 @@ static void __nocfi efi_call_rts(struct work_struct *work)
efi_rts_work.status = status;
complete(&efi_rts_work.efi_rts_comp);
+ efi_runtime_lock_owner = NULL;
}
static efi_status_t __efi_queue_work(enum efi_rts_ids id,
@@ -444,8 +449,10 @@ virt_efi_set_variable_nb(efi_char16_t *name, efi_guid_t *vendor, u32 attr,
if (down_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
+ efi_runtime_lock_owner = current;
status = efi_call_virt_pointer(efi.runtime, set_variable, name, vendor,
attr, data_size, data);
+ efi_runtime_lock_owner = NULL;
up(&efi_runtime_lock);
return status;
}
@@ -481,9 +488,11 @@ virt_efi_query_variable_info_nb(u32 attr, u64 *storage_space,
if (down_trylock(&efi_runtime_lock))
return EFI_NOT_READY;
+ efi_runtime_lock_owner = current;
status = efi_call_virt_pointer(efi.runtime, query_variable_info, attr,
storage_space, remaining_space,
max_variable_size);
+ efi_runtime_lock_owner = NULL;
up(&efi_runtime_lock);
return status;
}
@@ -509,12 +518,13 @@ virt_efi_reset_system(int reset_type, efi_status_t status,
return;
}
+ efi_runtime_lock_owner = current;
arch_efi_call_virt_setup();
efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
arch_efi_call_virt(efi.runtime, reset_system, reset_type, status,
data_size, data);
arch_efi_call_virt_teardown();
-
+ efi_runtime_lock_owner = NULL;
up(&efi_runtime_lock);
}
@@ -587,3 +597,8 @@ efi_call_acpi_prm_handler(efi_status_t (__efiapi *handler_addr)(u64, void *),
}
#endif
+
+void efi_runtime_assert_lock_held(void)
+{
+ WARN_ON(efi_runtime_lock_owner != current);
+}
diff --git a/drivers/firmware/efi/stmm/mm_communication.h b/drivers/firmware/efi/stmm/mm_communication.h
index 52a1f32cd1eb..06e7663f96dc 100644
--- a/drivers/firmware/efi/stmm/mm_communication.h
+++ b/drivers/firmware/efi/stmm/mm_communication.h
@@ -32,7 +32,7 @@
/**
* struct efi_mm_communicate_header - Header used for SMM variable communication
-
+ *
* @header_guid: header use for disambiguation of content
* @message_len: length of the message. Does not include the size of the
* header
@@ -111,7 +111,7 @@ struct efi_mm_communicate_header {
/**
* struct smm_variable_communicate_header - Used for SMM variable communication
-
+ *
* @function: function to call in Smm.
* @ret_status: return status
* @data: payload
@@ -128,7 +128,7 @@ struct smm_variable_communicate_header {
/**
* struct smm_variable_access - Used to communicate with StMM by
* SetVariable and GetVariable.
-
+ *
* @guid: vendor GUID
* @data_size: size of EFI variable data
* @name_size: size of EFI name
diff --git a/drivers/firmware/imx/imx-scu-irq.c b/drivers/firmware/imx/imx-scu-irq.c
index 6125cccc9ba7..a68d38f89254 100644
--- a/drivers/firmware/imx/imx-scu-irq.c
+++ b/drivers/firmware/imx/imx-scu-irq.c
@@ -203,6 +203,18 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
struct mbox_chan *ch;
int ret = 0, i = 0;
+ if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
+ "#mbox-cells", 0, &spec)) {
+ i = of_alias_get_id(spec.np, "mu");
+ of_node_put(spec.np);
+ }
+
+ /* use mu1 as general mu irq channel if failed */
+ if (i < 0)
+ i = 1;
+
+ mu_resource_id = IMX_SC_R_MU_0A + i;
+
ret = imx_scu_get_handle(&imx_sc_irq_ipc_handle);
if (ret)
return ret;
@@ -214,27 +226,16 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
cl->dev = dev;
cl->rx_callback = imx_scu_irq_callback;
+ INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
+
/* SCU general IRQ uses general interrupt channel 3 */
ch = mbox_request_channel_byname(cl, "gip3");
if (IS_ERR(ch)) {
ret = PTR_ERR(ch);
dev_err(dev, "failed to request mbox chan gip3, ret %d\n", ret);
- devm_kfree(dev, cl);
- return ret;
+ goto free_cl;
}
- INIT_WORK(&imx_sc_irq_work, imx_scu_irq_work_handler);
-
- if (!of_parse_phandle_with_args(dev->of_node, "mboxes",
- "#mbox-cells", 0, &spec))
- i = of_alias_get_id(spec.np, "mu");
-
- /* use mu1 as general mu irq channel if failed */
- if (i < 0)
- i = 1;
-
- mu_resource_id = IMX_SC_R_MU_0A + i;
-
/* Create directory under /sysfs/firmware */
wakeup_obj = kobject_create_and_add("scu_wakeup_source", firmware_kobj);
if (!wakeup_obj) {
@@ -253,7 +254,8 @@ int imx_scu_enable_general_irq_channel(struct device *dev)
free_ch:
mbox_free_channel(ch);
+free_cl:
+ devm_kfree(dev, cl);
return ret;
}
-EXPORT_SYMBOL(imx_scu_enable_general_irq_channel);
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 8c28e25ddc8a..67b267a7408a 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -73,9 +73,9 @@ static int imx_sc_linux_errmap[IMX_SC_ERR_LAST] = {
-EACCES, /* IMX_SC_ERR_NOACCESS */
-EACCES, /* IMX_SC_ERR_LOCKED */
-ERANGE, /* IMX_SC_ERR_UNAVAILABLE */
- -EEXIST, /* IMX_SC_ERR_NOTFOUND */
- -EPERM, /* IMX_SC_ERR_NOPOWER */
- -EPIPE, /* IMX_SC_ERR_IPC */
+ -ENOENT, /* IMX_SC_ERR_NOTFOUND */
+ -ENODEV, /* IMX_SC_ERR_NOPOWER */
+ -ECOMM, /* IMX_SC_ERR_IPC */
-EBUSY, /* IMX_SC_ERR_BUSY */
-EIO, /* IMX_SC_ERR_FAIL */
};
@@ -324,7 +324,9 @@ static int imx_scu_probe(struct platform_device *pdev)
}
sc_ipc->dev = dev;
- mutex_init(&sc_ipc->lock);
+ ret = devm_mutex_init(dev, &sc_ipc->lock);
+ if (ret)
+ return ret;
init_completion(&sc_ipc->done);
imx_sc_ipc_handle = sc_ipc;
@@ -352,6 +354,7 @@ static struct platform_driver imx_scu_driver = {
.driver = {
.name = "imx-scu",
.of_match_table = imx_scu_match,
+ .suppress_bind_attrs = true,
},
.probe = imx_scu_probe,
};
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index f2fdd3756648..179f5d46d8dd 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -5,7 +5,7 @@
config MESON_SM
tristate "Amlogic Secure Monitor driver"
depends on ARCH_MESON || COMPILE_TEST
- default y
+ default ARCH_MESON
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index f25a9746249b..3ab67aaa9e5d 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -232,11 +232,16 @@ EXPORT_SYMBOL(meson_sm_call_write);
struct meson_sm_firmware *meson_sm_get(struct device_node *sm_node)
{
struct platform_device *pdev = of_find_device_by_node(sm_node);
+ struct meson_sm_firmware *fw;
if (!pdev)
return NULL;
- return platform_get_drvdata(pdev);
+ fw = platform_get_drvdata(pdev);
+
+ put_device(&pdev->dev);
+
+ return fw;
}
EXPORT_SYMBOL_GPL(meson_sm_get);
diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index 26cd0458aacd..1a6f85e463e0 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -1119,7 +1119,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
if (ret) {
dev_err(__scm->dev,
"Assign memory protection call failed %d\n", ret);
- return -EINVAL;
+ return ret;
}
*srcvm = next_vm;
@@ -1994,11 +1994,14 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "asus,vivobook-s15" },
{ .compatible = "asus,zenbook-a14-ux3407qa" },
{ .compatible = "asus,zenbook-a14-ux3407ra" },
+ { .compatible = "dell,inspiron-14-plus-7441" },
+ { .compatible = "dell,latitude-7455" },
{ .compatible = "dell,xps13-9345" },
{ .compatible = "hp,elitebook-ultra-g1q" },
{ .compatible = "hp,omnibook-x14" },
{ .compatible = "huawei,gaokun3" },
{ .compatible = "lenovo,flex-5g" },
+ { .compatible = "lenovo,thinkbook-16" },
{ .compatible = "lenovo,thinkpad-t14s" },
{ .compatible = "lenovo,thinkpad-x13s", },
{ .compatible = "lenovo,yoga-slim7x" },
@@ -2006,6 +2009,7 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ .compatible = "microsoft,blackrock" },
{ .compatible = "microsoft,romulus13", },
{ .compatible = "microsoft,romulus15", },
+ { .compatible = "qcom,hamoa-iot-evk" },
{ .compatible = "qcom,sc8180x-primus" },
{ .compatible = "qcom,x1e001de-devkit" },
{ .compatible = "qcom,x1e80100-crd" },
@@ -2014,21 +2018,6 @@ static const struct of_device_id qcom_scm_qseecom_allowlist[] __maybe_unused = {
{ }
};
-static bool qcom_scm_qseecom_machine_is_allowed(void)
-{
- struct device_node *np;
- bool match;
-
- np = of_find_node_by_path("/");
- if (!np)
- return false;
-
- match = of_match_node(qcom_scm_qseecom_allowlist, np);
- of_node_put(np);
-
- return match;
-}
-
static void qcom_scm_qseecom_free(void *data)
{
struct platform_device *qseecom_dev = data;
@@ -2060,7 +2049,7 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
dev_info(scm->dev, "qseecom: found qseecom with version 0x%x\n", version);
- if (!qcom_scm_qseecom_machine_is_allowed()) {
+ if (!of_machine_device_match(qcom_scm_qseecom_allowlist)) {
dev_info(scm->dev, "qseecom: untested machine, skipping\n");
return 0;
}
@@ -2094,6 +2083,122 @@ static int qcom_scm_qseecom_init(struct qcom_scm *scm)
#endif /* CONFIG_QCOM_QSEECOM */
/**
+ * qcom_scm_qtee_invoke_smc() - Invoke a QTEE object.
+ * @inbuf: start address of memory area used for inbound buffer.
+ * @inbuf_size: size of the memory area used for inbound buffer.
+ * @outbuf: start address of memory area used for outbound buffer.
+ * @outbuf_size: size of the memory area used for outbound buffer.
+ * @result: result of QTEE object invocation.
+ * @response_type: response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @inbuf and @outbuf
+ * should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_invoke_smc(phys_addr_t inbuf, size_t inbuf_size,
+ phys_addr_t outbuf, size_t outbuf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_INVOKE,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = inbuf,
+ .args[1] = inbuf_size,
+ .args[2] = outbuf,
+ .args[3] = outbuf_size,
+ .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_RW, QCOM_SCM_VAL,
+ QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_invoke_smc);
+
+/**
+ * qcom_scm_qtee_callback_response() - Submit response for callback request.
+ * @buf: start address of memory area used for outbound buffer.
+ * @buf_size: size of the memory area used for outbound buffer.
+ * @result: Result of QTEE object invocation.
+ * @response_type: Response type returned by QTEE.
+ *
+ * @response_type determines how the contents of @buf should be processed.
+ *
+ * Return: On success, return 0 or <0 on failure.
+ */
+int qcom_scm_qtee_callback_response(phys_addr_t buf, size_t buf_size,
+ u64 *result, u64 *response_type)
+{
+ struct qcom_scm_desc desc = {
+ .svc = QCOM_SCM_SVC_SMCINVOKE,
+ .cmd = QCOM_SCM_SMCINVOKE_CB_RSP,
+ .owner = ARM_SMCCC_OWNER_TRUSTED_OS,
+ .args[0] = buf,
+ .args[1] = buf_size,
+ .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_RW, QCOM_SCM_VAL),
+ };
+ struct qcom_scm_res res;
+ int ret;
+
+ ret = qcom_scm_call(__scm->dev, &desc, &res);
+ if (ret)
+ return ret;
+
+ if (response_type)
+ *response_type = res.result[0];
+
+ if (result)
+ *result = res.result[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_scm_qtee_callback_response);
+
+static void qcom_scm_qtee_free(void *data)
+{
+ struct platform_device *qtee_dev = data;
+
+ platform_device_unregister(qtee_dev);
+}
+
+static void qcom_scm_qtee_init(struct qcom_scm *scm)
+{
+ struct platform_device *qtee_dev;
+ u64 result, response_type;
+ int ret;
+
+ /*
+ * Probe for smcinvoke support. This will fail due to invalid buffers,
+ * but first, it checks whether the call is supported in QTEE syscall
+ * handler. If it is not supported, -EIO is returned.
+ */
+ ret = qcom_scm_qtee_invoke_smc(0, 0, 0, 0, &result, &response_type);
+ if (ret == -EIO)
+ return;
+
+ /* Setup QTEE interface device. */
+ qtee_dev = platform_device_register_data(scm->dev, "qcomtee",
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(qtee_dev))
+ return;
+
+ devm_add_action_or_reset(scm->dev, qcom_scm_qtee_free, qtee_dev);
+}
+
+/**
* qcom_scm_is_available() - Checks if SCM is available
*/
bool qcom_scm_is_available(void)
@@ -2325,6 +2430,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
ret = qcom_scm_qseecom_init(scm);
WARN(ret < 0, "failed to initialize qseecom: %d\n", ret);
+ /* Initialize the QTEE object interface. */
+ qcom_scm_qtee_init(scm);
+
return 0;
}
diff --git a/drivers/firmware/qcom/qcom_scm.h b/drivers/firmware/qcom/qcom_scm.h
index 0e8dd838099e..a56c8212cc0c 100644
--- a/drivers/firmware/qcom/qcom_scm.h
+++ b/drivers/firmware/qcom/qcom_scm.h
@@ -156,6 +156,13 @@ int qcom_scm_shm_bridge_enable(struct device *scm_dev);
#define QCOM_SCM_SVC_GPU 0x28
#define QCOM_SCM_SVC_GPU_INIT_REGS 0x01
+/* ARM_SMCCC_OWNER_TRUSTED_OS calls */
+
+#define QCOM_SCM_SVC_SMCINVOKE 0x06
+#define QCOM_SCM_SMCINVOKE_INVOKE_LEGACY 0x00
+#define QCOM_SCM_SMCINVOKE_CB_RSP 0x01
+#define QCOM_SCM_SMCINVOKE_INVOKE 0x02
+
/* common error codes */
#define QCOM_SCM_V2_EBUSY -12
#define QCOM_SCM_ENOMEM -5
diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c
index ea0a35355657..9f232e53115e 100644
--- a/drivers/firmware/qcom/qcom_tzmem.c
+++ b/drivers/firmware/qcom/qcom_tzmem.c
@@ -77,6 +77,7 @@ static bool qcom_tzmem_using_shm_bridge;
/* List of machines that are known to not support SHM bridge correctly. */
static const char *const qcom_tzmem_blacklist[] = {
+ "qcom,sc7180", /* hang in rmtfs memory assignment */
"qcom,sc8180x",
"qcom,sdm670", /* failure in GPU firmware loading */
"qcom,sdm845", /* reset in rmtfs memory assignment */
@@ -109,7 +110,19 @@ notsupp:
return 0;
}
-static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+/**
+ * qcom_tzmem_shm_bridge_create() - Create a SHM bridge.
+ * @paddr: Physical address of the memory to share.
+ * @size: Size of the memory to share.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function creates a SHM bridge
+ * for the given memory region with QTEE. The handle returned by this function
+ * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge.
+ *
+ * Return: On success, returns 0; on failure, returns < 0.
+ */
+int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle)
{
u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
int ret;
@@ -117,17 +130,49 @@ static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
if (!qcom_tzmem_using_shm_bridge)
return 0;
- pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
- size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+ pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW;
+ ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW;
+ size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
+
+ ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
+ size_and_flags, QCOM_SCM_VMID_HLOS,
+ handle);
+ if (ret) {
+ dev_err(qcom_tzmem_dev,
+ "SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n",
+ ret, &paddr, size);
+
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create);
+
+/**
+ * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge.
+ * @handle: Handle to the SHM bridge.
+ *
+ * On platforms that support SHM bridge, this function deletes the SHM bridge
+ * for the given memory region. The handle must be the same as the one
+ * returned by qcom_tzmem_shm_bridge_create().
+ */
+void qcom_tzmem_shm_bridge_delete(u64 handle)
+{
+ if (qcom_tzmem_using_shm_bridge)
+ qcom_scm_shm_bridge_delete(handle);
+}
+EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete);
+
+static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
+{
+ int ret;
u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
- ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
- size_and_flags, QCOM_SCM_VMID_HLOS,
- handle);
+ ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle);
if (ret)
return ret;
@@ -140,10 +185,7 @@ static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
{
u64 *handle = area->priv;
- if (!qcom_tzmem_using_shm_bridge)
- return;
-
- qcom_scm_shm_bridge_delete(*handle);
+ qcom_tzmem_shm_bridge_delete(*handle);
kfree(handle);
}
diff --git a/drivers/firmware/samsung/Makefile b/drivers/firmware/samsung/Makefile
index 7b4c9f6f34f5..80d4f89b33a9 100644
--- a/drivers/firmware/samsung/Makefile
+++ b/drivers/firmware/samsung/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-acpm-protocol-objs := exynos-acpm.o exynos-acpm-pmic.o
+acpm-protocol-objs := exynos-acpm.o
+acpm-protocol-objs += exynos-acpm-pmic.o
+acpm-protocol-objs += exynos-acpm-dvfs.o
obj-$(CONFIG_EXYNOS_ACPM_PROTOCOL) += acpm-protocol.o
diff --git a/drivers/firmware/samsung/exynos-acpm-dvfs.c b/drivers/firmware/samsung/exynos-acpm-dvfs.c
new file mode 100644
index 000000000000..1c5b2b143bcc
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-dvfs.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2025 Linaro Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/firmware/samsung/exynos-acpm-protocol.h>
+#include <linux/ktime.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include "exynos-acpm.h"
+#include "exynos-acpm-dvfs.h"
+
+#define ACPM_DVFS_ID GENMASK(11, 0)
+#define ACPM_DVFS_REQ_TYPE GENMASK(15, 0)
+
+#define ACPM_DVFS_FREQ_REQ 0
+#define ACPM_DVFS_FREQ_GET 1
+
+static void acpm_dvfs_set_xfer(struct acpm_xfer *xfer, u32 *cmd, size_t cmdlen,
+ unsigned int acpm_chan_id, bool response)
+{
+ xfer->acpm_chan_id = acpm_chan_id;
+ xfer->txd = cmd;
+ xfer->txlen = cmdlen;
+
+ if (response) {
+ xfer->rxd = cmd;
+ xfer->rxlen = cmdlen;
+ }
+}
+
+static void acpm_dvfs_init_set_rate_cmd(u32 cmd[4], unsigned int clk_id,
+ unsigned long rate)
+{
+ cmd[0] = FIELD_PREP(ACPM_DVFS_ID, clk_id);
+ cmd[1] = rate / HZ_PER_KHZ;
+ cmd[2] = FIELD_PREP(ACPM_DVFS_REQ_TYPE, ACPM_DVFS_FREQ_REQ);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+int acpm_dvfs_set_rate(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int clk_id,
+ unsigned long rate)
+{
+ struct acpm_xfer xfer = {0};
+ u32 cmd[4];
+
+ acpm_dvfs_init_set_rate_cmd(cmd, clk_id, rate);
+ acpm_dvfs_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id, false);
+
+ return acpm_do_xfer(handle, &xfer);
+}
+
+static void acpm_dvfs_init_get_rate_cmd(u32 cmd[4], unsigned int clk_id)
+{
+ cmd[0] = FIELD_PREP(ACPM_DVFS_ID, clk_id);
+ cmd[2] = FIELD_PREP(ACPM_DVFS_REQ_TYPE, ACPM_DVFS_FREQ_GET);
+ cmd[3] = ktime_to_ms(ktime_get());
+}
+
+unsigned long acpm_dvfs_get_rate(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int clk_id)
+{
+ struct acpm_xfer xfer;
+ unsigned int cmd[4] = {0};
+ int ret;
+
+ acpm_dvfs_init_get_rate_cmd(cmd, clk_id);
+ acpm_dvfs_set_xfer(&xfer, cmd, sizeof(cmd), acpm_chan_id, true);
+
+ ret = acpm_do_xfer(handle, &xfer);
+ if (ret)
+ return 0;
+
+ return xfer.rxd[1] * HZ_PER_KHZ;
+}
diff --git a/drivers/firmware/samsung/exynos-acpm-dvfs.h b/drivers/firmware/samsung/exynos-acpm-dvfs.h
new file mode 100644
index 000000000000..9f2778e649c9
--- /dev/null
+++ b/drivers/firmware/samsung/exynos-acpm-dvfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2020 Samsung Electronics Co., Ltd.
+ * Copyright 2020 Google LLC.
+ * Copyright 2025 Linaro Ltd.
+ */
+#ifndef __EXYNOS_ACPM_DVFS_H__
+#define __EXYNOS_ACPM_DVFS_H__
+
+#include <linux/types.h>
+
+struct acpm_handle;
+
+int acpm_dvfs_set_rate(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id, unsigned int id,
+ unsigned long rate);
+unsigned long acpm_dvfs_get_rate(const struct acpm_handle *handle,
+ unsigned int acpm_chan_id,
+ unsigned int clk_id);
+
+#endif /* __EXYNOS_ACPM_DVFS_H__ */
diff --git a/drivers/firmware/samsung/exynos-acpm-pmic.c b/drivers/firmware/samsung/exynos-acpm-pmic.c
index 39b33a356ebd..961d7599e422 100644
--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
+++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
@@ -4,7 +4,9 @@
* Copyright 2020 Google LLC.
* Copyright 2024 Linaro Ltd.
*/
+#include <linux/array_size.h>
#include <linux/bitfield.h>
+#include <linux/errno.h>
#include <linux/firmware/samsung/exynos-acpm-protocol.h>
#include <linux/ktime.h>
#include <linux/types.h>
@@ -33,6 +35,19 @@ enum exynos_acpm_pmic_func {
ACPM_PMIC_BULK_WRITE,
};
+static const int acpm_pmic_linux_errmap[] = {
+ [0] = 0, /* ACPM_PMIC_SUCCESS */
+ [1] = -EACCES, /* Read register can't be accessed or issues to access it. */
+ [2] = -EACCES, /* Write register can't be accessed or issues to access it. */
+};
+
+static int acpm_pmic_to_linux_err(int err)
+{
+ if (err >= 0 && err < ARRAY_SIZE(acpm_pmic_linux_errmap))
+ return acpm_pmic_linux_errmap[err];
+ return -EIO;
+}
+
static inline u32 acpm_pmic_set_bulk(u32 data, unsigned int i)
{
return (data & ACPM_PMIC_BULK_MASK) << (ACPM_PMIC_BULK_SHIFT * i);
@@ -79,7 +94,7 @@ int acpm_pmic_read_reg(const struct acpm_handle *handle,
*buf = FIELD_GET(ACPM_PMIC_VALUE, xfer.rxd[1]);
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_bulk_read_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -110,7 +125,7 @@ int acpm_pmic_bulk_read(const struct acpm_handle *handle,
if (ret)
return ret;
- ret = FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ ret = acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
if (ret)
return ret;
@@ -150,7 +165,7 @@ int acpm_pmic_write_reg(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_bulk_write_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -190,7 +205,7 @@ int acpm_pmic_bulk_write(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
static void acpm_pmic_init_update_cmd(u32 cmd[4], u8 type, u8 reg, u8 chan,
@@ -220,5 +235,5 @@ int acpm_pmic_update_reg(const struct acpm_handle *handle,
if (ret)
return ret;
- return FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]);
+ return acpm_pmic_to_linux_err(FIELD_GET(ACPM_PMIC_RETURN, xfer.rxd[1]));
}
diff --git a/drivers/firmware/samsung/exynos-acpm.c b/drivers/firmware/samsung/exynos-acpm.c
index 3a69fe3234c7..0cb269c70460 100644
--- a/drivers/firmware/samsung/exynos-acpm.c
+++ b/drivers/firmware/samsung/exynos-acpm.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include "exynos-acpm.h"
+#include "exynos-acpm-dvfs.h"
#include "exynos-acpm-pmic.h"
#define ACPM_PROTOCOL_SEQNUM GENMASK(21, 16)
@@ -176,9 +177,11 @@ struct acpm_info {
/**
* struct acpm_match_data - of_device_id data.
* @initdata_base: offset in SRAM where the channels configuration resides.
+ * @acpm_clk_dev_name: base name for the ACPM clocks device that we're registering.
*/
struct acpm_match_data {
loff_t initdata_base;
+ const char *acpm_clk_dev_name;
};
#define client_to_acpm_chan(c) container_of(c, struct acpm_chan, cl)
@@ -590,8 +593,12 @@ static int acpm_channels_init(struct acpm_info *acpm)
*/
static void acpm_setup_ops(struct acpm_info *acpm)
{
+ struct acpm_dvfs_ops *dvfs_ops = &acpm->handle.ops.dvfs_ops;
struct acpm_pmic_ops *pmic_ops = &acpm->handle.ops.pmic_ops;
+ dvfs_ops->set_rate = acpm_dvfs_set_rate;
+ dvfs_ops->get_rate = acpm_dvfs_get_rate;
+
pmic_ops->read_reg = acpm_pmic_read_reg;
pmic_ops->bulk_read = acpm_pmic_bulk_read;
pmic_ops->write_reg = acpm_pmic_write_reg;
@@ -599,9 +606,15 @@ static void acpm_setup_ops(struct acpm_info *acpm)
pmic_ops->update_reg = acpm_pmic_update_reg;
}
+static void acpm_clk_pdev_unregister(void *data)
+{
+ platform_device_unregister(data);
+}
+
static int acpm_probe(struct platform_device *pdev)
{
const struct acpm_match_data *match_data;
+ struct platform_device *acpm_clk_pdev;
struct device *dev = &pdev->dev;
struct device_node *shmem;
struct acpm_info *acpm;
@@ -642,6 +655,18 @@ static int acpm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, acpm);
+ acpm_clk_pdev = platform_device_register_data(dev,
+ match_data->acpm_clk_dev_name,
+ PLATFORM_DEVID_NONE, NULL, 0);
+ if (IS_ERR(acpm_clk_pdev))
+ return dev_err_probe(dev, PTR_ERR(acpm_clk_pdev),
+ "Failed to register ACPM clocks device.\n");
+
+ ret = devm_add_action_or_reset(dev, acpm_clk_pdev_unregister,
+ acpm_clk_pdev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add devm action.\n");
+
return devm_of_platform_populate(dev);
}
@@ -741,6 +766,7 @@ EXPORT_SYMBOL_GPL(devm_acpm_get_by_node);
static const struct acpm_match_data acpm_gs101 = {
.initdata_base = ACPM_GS101_INITDATA_BASE,
+ .acpm_clk_dev_name = "gs101-acpm-clk",
};
static const struct of_device_id acpm_match[] = {
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index 1ea39a0a76c7..41da07c445a6 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2019, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
*/
#include <linux/arm-smccc.h>
@@ -14,11 +15,9 @@
#include <linux/firmware/intel/stratix10-svc-client.h>
#include <linux/string.h>
#include <linux/sysfs.h>
+#include <linux/delay.h>
-#define RSU_STATE_MASK GENMASK_ULL(31, 0)
-#define RSU_VERSION_MASK GENMASK_ULL(63, 32)
-#define RSU_ERROR_LOCATION_MASK GENMASK_ULL(31, 0)
-#define RSU_ERROR_DETAIL_MASK GENMASK_ULL(63, 32)
+#define RSU_ERASE_SIZE_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF0_MASK GENMASK_ULL(31, 0)
#define RSU_DCMF1_MASK GENMASK_ULL(63, 32)
#define RSU_DCMF2_MASK GENMASK_ULL(31, 0)
@@ -35,7 +34,8 @@
#define INVALID_DCMF_STATUS 0xFFFFFFFF
#define INVALID_SPT_ADDRESS 0x0
-#define RSU_GET_SPT_CMD 0x5A
+#define RSU_RETRY_SLEEP_MS (1U)
+#define RSU_ASYNC_MSG_RETRY (3U)
#define RSU_GET_SPT_RESP_LEN (4 * sizeof(unsigned int))
typedef void (*rsu_callback)(struct stratix10_svc_client *client,
@@ -64,7 +64,6 @@ typedef void (*rsu_callback)(struct stratix10_svc_client *client,
* @max_retry: the preset max retry value
* @spt0_address: address of spt0
* @spt1_address: address of spt1
- * @get_spt_response_buf: response from sdm for get_spt command
*/
struct stratix10_rsu_priv {
struct stratix10_svc_chan *chan;
@@ -99,47 +98,32 @@ struct stratix10_rsu_priv {
unsigned long spt0_address;
unsigned long spt1_address;
-
- unsigned int *get_spt_response_buf;
};
+typedef void (*rsu_async_callback)(struct device *dev,
+ struct stratix10_rsu_priv *priv, struct stratix10_svc_cb_data *data);
+
/**
- * rsu_status_callback() - Status callback from Intel Service Layer
- * @client: pointer to service client
+ * rsu_async_status_callback() - Status callback from rsu_async_send()
+ * @dev: pointer to device object
+ * @priv: pointer to priv object
* @data: pointer to callback data structure
*
- * Callback from Intel service layer for RSU status request. Status is
- * only updated after a system reboot, so a get updated status call is
- * made during driver probe.
+ * Callback from rsu_async_send() to get the system rsu error status.
*/
-static void rsu_status_callback(struct stratix10_svc_client *client,
- struct stratix10_svc_cb_data *data)
+static void rsu_async_status_callback(struct device *dev,
+ struct stratix10_rsu_priv *priv,
+ struct stratix10_svc_cb_data *data)
{
- struct stratix10_rsu_priv *priv = client->priv;
- struct arm_smccc_res *res = (struct arm_smccc_res *)data->kaddr1;
-
- if (data->status == BIT(SVC_STATUS_OK)) {
- priv->status.version = FIELD_GET(RSU_VERSION_MASK,
- res->a2);
- priv->status.state = FIELD_GET(RSU_STATE_MASK, res->a2);
- priv->status.fail_image = res->a1;
- priv->status.current_image = res->a0;
- priv->status.error_location =
- FIELD_GET(RSU_ERROR_LOCATION_MASK, res->a3);
- priv->status.error_details =
- FIELD_GET(RSU_ERROR_DETAIL_MASK, res->a3);
- } else {
- dev_err(client->dev, "COMMAND_RSU_STATUS returned 0x%lX\n",
- res->a0);
- priv->status.version = 0;
- priv->status.state = 0;
- priv->status.fail_image = 0;
- priv->status.current_image = 0;
- priv->status.error_location = 0;
- priv->status.error_details = 0;
- }
-
- complete(&priv->completion);
+ struct arm_smccc_1_2_regs *res = (struct arm_smccc_1_2_regs *)data->kaddr1;
+
+ priv->status.current_image = res->a2;
+ priv->status.fail_image = res->a3;
+ priv->status.state = res->a4;
+ priv->status.version = res->a5;
+ priv->status.error_location = res->a7;
+ priv->status.error_details = res->a8;
+ priv->retry_counter = res->a9;
}
/**
@@ -163,32 +147,6 @@ static void rsu_command_callback(struct stratix10_svc_client *client,
complete(&priv->completion);
}
-/**
- * rsu_retry_callback() - Callback from Intel service layer for getting
- * the current image's retry counter from the firmware
- * @client: pointer to client
- * @data: pointer to callback data structure
- *
- * Callback from Intel service layer for retry counter, which is used by
- * user to know how many times the images is still allowed to reload
- * itself before giving up and starting RSU fail-over flow.
- */
-static void rsu_retry_callback(struct stratix10_svc_client *client,
- struct stratix10_svc_cb_data *data)
-{
- struct stratix10_rsu_priv *priv = client->priv;
- unsigned int *counter = (unsigned int *)data->kaddr1;
-
- if (data->status == BIT(SVC_STATUS_OK))
- priv->retry_counter = *counter;
- else if (data->status == BIT(SVC_STATUS_NO_SUPPORT))
- dev_warn(client->dev, "Secure FW doesn't support retry\n");
- else
- dev_err(client->dev, "Failed to get retry counter %lu\n",
- BIT(data->status));
-
- complete(&priv->completion);
-}
/**
* rsu_max_retry_callback() - Callback from Intel service layer for getting
@@ -270,34 +228,19 @@ static void rsu_dcmf_status_callback(struct stratix10_svc_client *client,
complete(&priv->completion);
}
-static void rsu_get_spt_callback(struct stratix10_svc_client *client,
- struct stratix10_svc_cb_data *data)
+/**
+ * rsu_async_get_spt_table_callback() - Callback to be used by the rsu_async_send()
+ * to retrieve the SPT table information.
+ * @dev: pointer to device object
+ * @priv: pointer to priv object
+ * @data: pointer to callback data structure
+ */
+static void rsu_async_get_spt_table_callback(struct device *dev,
+ struct stratix10_rsu_priv *priv,
+ struct stratix10_svc_cb_data *data)
{
- struct stratix10_rsu_priv *priv = client->priv;
- unsigned long *mbox_err = (unsigned long *)data->kaddr1;
- unsigned long *resp_len = (unsigned long *)data->kaddr2;
-
- if (data->status != BIT(SVC_STATUS_OK) || (*mbox_err) ||
- (*resp_len != RSU_GET_SPT_RESP_LEN))
- goto error;
-
- priv->spt0_address = priv->get_spt_response_buf[0];
- priv->spt0_address <<= 32;
- priv->spt0_address |= priv->get_spt_response_buf[1];
-
- priv->spt1_address = priv->get_spt_response_buf[2];
- priv->spt1_address <<= 32;
- priv->spt1_address |= priv->get_spt_response_buf[3];
-
- goto complete;
-
-error:
- dev_err(client->dev, "failed to get SPTs\n");
-
-complete:
- stratix10_svc_free_memory(priv->chan, priv->get_spt_response_buf);
- priv->get_spt_response_buf = NULL;
- complete(&priv->completion);
+ priv->spt0_address = *((unsigned long *)data->kaddr1);
+ priv->spt1_address = *((unsigned long *)data->kaddr2);
}
/**
@@ -329,14 +272,6 @@ static int rsu_send_msg(struct stratix10_rsu_priv *priv,
if (arg)
msg.arg[0] = arg;
- if (command == COMMAND_MBOX_SEND_CMD) {
- msg.arg[1] = 0;
- msg.payload = NULL;
- msg.payload_length = 0;
- msg.payload_output = priv->get_spt_response_buf;
- msg.payload_length_output = RSU_GET_SPT_RESP_LEN;
- }
-
ret = stratix10_svc_send(priv->chan, &msg);
if (ret < 0)
goto status_done;
@@ -362,6 +297,95 @@ status_done:
return ret;
}
+/**
+ * soc64_async_callback() - Callback from Intel service layer for async requests
+ * @ptr: pointer to the completion object
+ */
+static void soc64_async_callback(void *ptr)
+{
+ if (ptr)
+ complete(ptr);
+}
+
+/**
+ * rsu_send_async_msg() - send an async message to Intel service layer
+ * @dev: pointer to device object
+ * @priv: pointer to rsu private data
+ * @command: RSU status or update command
+ * @arg: the request argument, notify status
+ * @callback: function pointer for the callback (status or update)
+ */
+static int rsu_send_async_msg(struct device *dev, struct stratix10_rsu_priv *priv,
+ enum stratix10_svc_command_code command,
+ unsigned long arg,
+ rsu_async_callback callback)
+{
+ struct stratix10_svc_client_msg msg = {0};
+ struct stratix10_svc_cb_data data = {0};
+ struct completion completion;
+ int status, index, ret;
+ void *handle = NULL;
+
+ msg.command = command;
+ msg.arg[0] = arg;
+
+ init_completion(&completion);
+
+ for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) {
+ status = stratix10_svc_async_send(priv->chan, &msg,
+ &handle, soc64_async_callback,
+ &completion);
+ if (status == 0)
+ break;
+ dev_warn(dev, "Failed to send async message\n");
+ msleep(RSU_RETRY_SLEEP_MS);
+ }
+
+ if (status && !handle) {
+ dev_err(dev, "Failed to send async message\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = wait_for_completion_io_timeout(&completion, RSU_TIMEOUT);
+ if (ret > 0)
+ dev_dbg(dev, "Received async interrupt\n");
+ else if (ret == 0)
+ dev_dbg(dev, "Timeout occurred. Trying to poll the response\n");
+
+ for (index = 0; index < RSU_ASYNC_MSG_RETRY; index++) {
+ status = stratix10_svc_async_poll(priv->chan, handle, &data);
+ if (status == -EAGAIN) {
+ dev_dbg(dev, "Async message is still in progress\n");
+ } else if (status < 0) {
+ dev_alert(dev, "Failed to poll async message\n");
+ ret = -ETIMEDOUT;
+ } else if (status == 0) {
+ ret = 0;
+ break;
+ }
+ msleep(RSU_RETRY_SLEEP_MS);
+ }
+
+ if (ret) {
+ dev_err(dev, "Failed to get async response\n");
+ goto status_done;
+ }
+
+ if (data.status == 0) {
+ ret = 0;
+ if (callback)
+ callback(dev, priv, &data);
+ } else {
+ dev_err(dev, "%s returned 0x%x from SDM\n", __func__,
+ data.status);
+ ret = -EFAULT;
+ }
+
+status_done:
+ stratix10_svc_async_done(priv->chan, handle);
+ return ret;
+}
+
/*
* This driver exposes some optional features of the Intel Stratix 10 SoC FPGA.
* The sysfs interfaces exposed here are FPGA Remote System Update (RSU)
@@ -454,8 +478,7 @@ static ssize_t max_retry_show(struct device *dev,
if (!priv)
return -ENODEV;
- return scnprintf(buf, sizeof(priv->max_retry),
- "0x%08x\n", priv->max_retry);
+ return sysfs_emit(buf, "0x%08x\n", priv->max_retry);
}
static ssize_t dcmf0_show(struct device *dev,
@@ -597,27 +620,20 @@ static ssize_t notify_store(struct device *dev,
if (ret)
return ret;
- ret = rsu_send_msg(priv, COMMAND_RSU_NOTIFY,
- status, rsu_command_callback);
+ ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_NOTIFY, status, NULL);
if (ret) {
dev_err(dev, "Error, RSU notify returned %i\n", ret);
return ret;
}
/* to get the updated state */
- ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
- 0, rsu_status_callback);
+ ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0,
+ rsu_async_status_callback);
if (ret) {
dev_err(dev, "Error, getting RSU status %i\n", ret);
return ret;
}
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
- if (ret) {
- dev_err(dev, "Error, getting RSU retry %i\n", ret);
- return ret;
- }
-
return count;
}
@@ -632,7 +648,7 @@ static ssize_t spt0_address_show(struct device *dev,
if (priv->spt0_address == INVALID_SPT_ADDRESS)
return -EIO;
- return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt0_address);
+ return sysfs_emit(buf, "0x%08lx\n", priv->spt0_address);
}
static ssize_t spt1_address_show(struct device *dev,
@@ -646,7 +662,7 @@ static ssize_t spt1_address_show(struct device *dev,
if (priv->spt1_address == INVALID_SPT_ADDRESS)
return -EIO;
- return scnprintf(buf, PAGE_SIZE, "0x%08lx\n", priv->spt1_address);
+ return sysfs_emit(buf, "0x%08lx\n", priv->spt1_address);
}
static DEVICE_ATTR_RO(current_image);
@@ -737,12 +753,19 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
return PTR_ERR(priv->chan);
}
+ ret = stratix10_svc_add_async_client(priv->chan, false);
+ if (ret) {
+ dev_err(dev, "failed to add async client\n");
+ stratix10_svc_free_channel(priv->chan);
+ return ret;
+ }
+
init_completion(&priv->completion);
platform_set_drvdata(pdev, priv);
/* get the initial state from firmware */
- ret = rsu_send_msg(priv, COMMAND_RSU_STATUS,
- 0, rsu_status_callback);
+ ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_STATUS, 0,
+ rsu_async_status_callback);
if (ret) {
dev_err(dev, "Error, getting RSU status %i\n", ret);
stratix10_svc_free_channel(priv->chan);
@@ -763,12 +786,6 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
- ret = rsu_send_msg(priv, COMMAND_RSU_RETRY, 0, rsu_retry_callback);
- if (ret) {
- dev_err(dev, "Error, getting RSU retry %i\n", ret);
- stratix10_svc_free_channel(priv->chan);
- }
-
ret = rsu_send_msg(priv, COMMAND_RSU_MAX_RETRY, 0,
rsu_max_retry_callback);
if (ret) {
@@ -776,18 +793,12 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
stratix10_svc_free_channel(priv->chan);
}
- priv->get_spt_response_buf =
- stratix10_svc_allocate_memory(priv->chan, RSU_GET_SPT_RESP_LEN);
- if (IS_ERR(priv->get_spt_response_buf)) {
- dev_err(dev, "failed to allocate get spt buffer\n");
- } else {
- ret = rsu_send_msg(priv, COMMAND_MBOX_SEND_CMD,
- RSU_GET_SPT_CMD, rsu_get_spt_callback);
- if (ret) {
- dev_err(dev, "Error, getting SPT table %i\n", ret);
- stratix10_svc_free_channel(priv->chan);
- }
+ ret = rsu_send_async_msg(dev, priv, COMMAND_RSU_GET_SPT_TABLE, 0,
+ rsu_async_get_spt_table_callback);
+ if (ret) {
+ dev_err(dev, "Error, getting SPT table %i\n", ret);
+ stratix10_svc_free_channel(priv->chan);
}
return ret;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index e3f990d888d7..515b948ff320 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1,11 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017-2018, Intel Corporation
+ * Copyright (C) 2025, Altera Corporation
*/
+#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/genalloc.h>
+#include <linux/hashtable.h>
+#include <linux/idr.h>
#include <linux/io.h>
#include <linux/kfifo.h>
#include <linux/kthread.h>
@@ -34,7 +38,7 @@
* timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
*/
#define SVC_NUM_DATA_IN_FIFO 32
-#define SVC_NUM_CHANNEL 3
+#define SVC_NUM_CHANNEL 4
#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
#define BYTE_TO_WORD_SIZE 4
@@ -43,6 +47,55 @@
#define STRATIX10_RSU "stratix10-rsu"
#define INTEL_FCS "intel-fcs"
+/* Maximum number of SDM client IDs. */
+#define MAX_SDM_CLIENT_IDS 16
+/* Client ID for SIP Service Version 1. */
+#define SIP_SVC_V1_CLIENT_ID 0x1
+/* Maximum number of SDM job IDs. */
+#define MAX_SDM_JOB_IDS 16
+/* Number of bits used for asynchronous transaction hashing. */
+#define ASYNC_TRX_HASH_BITS 3
+/*
+ * Total number of transaction IDs, which is a combination of
+ * client ID and job ID.
+ */
+#define TOTAL_TRANSACTION_IDS \
+ (MAX_SDM_CLIENT_IDS * MAX_SDM_JOB_IDS)
+
+/* Minimum major version of the ATF for Asynchronous transactions. */
+#define ASYNC_ATF_MINIMUM_MAJOR_VERSION 0x3
+/* Minimum minor version of the ATF for Asynchronous transactions.*/
+#define ASYNC_ATF_MINIMUM_MINOR_VERSION 0x0
+
+/* Job ID field in the transaction ID */
+#define STRATIX10_JOB_FIELD GENMASK(3, 0)
+/* Client ID field in the transaction ID */
+#define STRATIX10_CLIENT_FIELD GENMASK(7, 4)
+/* Transaction ID mask for Stratix10 service layer */
+#define STRATIX10_TRANS_ID_FIELD GENMASK(7, 0)
+
+/* Macro to extract the job ID from a transaction ID. */
+#define STRATIX10_GET_JOBID(transaction_id) \
+ (FIELD_GET(STRATIX10_JOB_FIELD, transaction_id))
+/* Macro to set the job ID in a transaction ID. */
+#define STRATIX10_SET_JOBID(jobid) \
+ (FIELD_PREP(STRATIX10_JOB_FIELD, jobid))
+/* Macro to set the client ID in a transaction ID. */
+#define STRATIX10_SET_CLIENTID(clientid) \
+ (FIELD_PREP(STRATIX10_CLIENT_FIELD, clientid))
+/* Macro to set a transaction ID using a client ID and a job ID. */
+#define STRATIX10_SET_TRANSACTIONID(clientid, jobid) \
+ (STRATIX10_SET_CLIENTID(clientid) | STRATIX10_SET_JOBID(jobid))
+/* Macro to set a transaction ID for SIP SMC Async transactions */
+#define STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(transaction_id) \
+ (FIELD_PREP(STRATIX10_TRANS_ID_FIELD, transaction_id))
+
+/* 10-bit mask for extracting the SDM status code */
+#define STRATIX10_SDM_STATUS_MASK GENMASK(9, 0)
+/* Macro to get the SDM mailbox error status */
+#define STRATIX10_GET_SDM_STATUS_CODE(status) \
+ (FIELD_GET(STRATIX10_SDM_STATUS_MASK, status))
+
typedef void (svc_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
@@ -52,6 +105,7 @@ struct stratix10_svc_chan;
/**
* struct stratix10_svc - svc private data
* @stratix10_svc_rsu: pointer to stratix10 RSU device
+ * @intel_svc_fcs: pointer to the FCS device
*/
struct stratix10_svc {
struct platform_device *stratix10_svc_rsu;
@@ -63,7 +117,7 @@ struct stratix10_svc {
* @sync_complete: state for a completion
* @addr: physical address of shared memory block
* @size: size of shared memory block
- * @invoke_fn: function to issue secure monitor or hypervisor call
+ * @invoke_fn: service clients to handle secure monitor or hypervisor calls
*
* This struct is used to save physical address and size of shared memory
* block. The shared memory blocked is allocated by secure monitor software
@@ -122,6 +176,74 @@ struct stratix10_svc_data {
};
/**
+ * struct stratix10_svc_async_handler - Asynchronous handler for Stratix10
+ * service layer
+ * @transaction_id: Unique identifier for the transaction
+ * @achan: Pointer to the asynchronous channel structure
+ * @cb_arg: Argument to be passed to the callback function
+ * @cb: Callback function to be called upon completion
+ * @msg: Pointer to the client message structure
+ * @next: Node in the hash list
+ * @res: Response structure to store result from the secure firmware
+ *
+ * This structure is used to handle asynchronous transactions in the
+ * Stratix10 service layer. It maintains the necessary information
+ * for processing and completing asynchronous requests.
+ */
+
+struct stratix10_svc_async_handler {
+ u8 transaction_id;
+ struct stratix10_async_chan *achan;
+ void *cb_arg;
+ async_callback_t cb;
+ struct stratix10_svc_client_msg *msg;
+ struct hlist_node next;
+ struct arm_smccc_1_2_regs res;
+};
+
+/**
+ * struct stratix10_async_chan - Structure representing an asynchronous channel
+ * @async_client_id: Unique client identifier for the asynchronous operation
+ * @job_id_pool: Pointer to the job ID pool associated with this channel
+ */
+
+struct stratix10_async_chan {
+ unsigned long async_client_id;
+ struct ida job_id_pool;
+};
+
+/**
+ * struct stratix10_async_ctrl - Control structure for Stratix10
+ * asynchronous operations
+ * @initialized: Flag indicating whether the control structure has
+ * been initialized
+ * @invoke_fn: Function pointer for invoking Stratix10 service calls
+ * to EL3 secure firmware
+ * @async_id_pool: Pointer to the ID pool used for asynchronous
+ * operations
+ * @common_achan_refcount: Atomic reference count for the common
+ * asynchronous channel usage
+ * @common_async_chan: Pointer to the common asynchronous channel
+ * structure
+ * @trx_list_lock: Spinlock for protecting the transaction list
+ * operations
+ * @trx_list: Hash table for managing asynchronous transactions
+ */
+
+struct stratix10_async_ctrl {
+ bool initialized;
+ void (*invoke_fn)(struct stratix10_async_ctrl *actrl,
+ const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res);
+ struct ida async_id_pool;
+ atomic_t common_achan_refcount;
+ struct stratix10_async_chan *common_async_chan;
+ /* spinlock to protect trx_list hash table */
+ spinlock_t trx_list_lock;
+ DECLARE_HASHTABLE(trx_list, ASYNC_TRX_HASH_BITS);
+};
+
+/**
* struct stratix10_svc_controller - service controller
* @dev: device
* @chans: array of service channels
@@ -134,6 +256,8 @@ struct stratix10_svc_data {
* @complete_status: state for completion
* @svc_fifo_lock: protect access to service message data queue
* @invoke_fn: function to issue secure monitor call or hypervisor call
+ * @svc: manages the list of client svc drivers
+ * @actrl: async control structure
*
* This struct is used to create communication channels for service clients, to
* handle secure monitor or hypervisor call.
@@ -150,6 +274,8 @@ struct stratix10_svc_controller {
struct completion complete_status;
spinlock_t svc_fifo_lock;
svc_invoke_fn *invoke_fn;
+ struct stratix10_svc *svc;
+ struct stratix10_async_ctrl actrl;
};
/**
@@ -158,20 +284,28 @@ struct stratix10_svc_controller {
* @scl: pointer to service client which owns the channel
* @name: service client name associated with the channel
* @lock: protect access to the channel
+ * @async_chan: reference to asynchronous channel object for this channel
*
- * This struct is used by service client to communicate with service layer, each
- * service client has its own channel created by service controller.
+ * This struct is used by service client to communicate with service layer.
+ * Each service client has its own channel created by service controller.
*/
struct stratix10_svc_chan {
struct stratix10_svc_controller *ctrl;
struct stratix10_svc_client *scl;
char *name;
spinlock_t lock;
+ struct stratix10_async_chan *async_chan;
};
static LIST_HEAD(svc_ctrl);
static LIST_HEAD(svc_data_mem);
+/*
+ * svc_mem_lock protects access to the svc_data_mem list for
+ * concurrent multi-client operations
+ */
+static DEFINE_MUTEX(svc_mem_lock);
+
/**
* svc_pa_to_va() - translate physical address to virtual address
* @addr: to be translated physical address
@@ -184,6 +318,7 @@ static void *svc_pa_to_va(unsigned long addr)
struct stratix10_svc_data_mem *pmem;
pr_debug("claim back P-addr=0x%016x\n", (unsigned int)addr);
+ guard(mutex)(&svc_mem_lock);
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->paddr == addr)
return pmem->vaddr;
@@ -341,6 +476,8 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
case COMMAND_RSU_MAX_RETRY:
case COMMAND_RSU_DCMF_STATUS:
case COMMAND_FIRMWARE_VERSION:
+ case COMMAND_HWMON_READTEMP:
+ case COMMAND_HWMON_READVOLT:
cb_data->status = BIT(SVC_STATUS_OK);
cb_data->kaddr1 = &res.a1;
break;
@@ -525,7 +662,17 @@ static int svc_normal_to_secure_thread(void *data)
a1 = (unsigned long)pdata->paddr;
a2 = 0;
break;
-
+ /* for HWMON */
+ case COMMAND_HWMON_READTEMP:
+ a0 = INTEL_SIP_SMC_HWMON_READTEMP;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
+ case COMMAND_HWMON_READVOLT:
+ a0 = INTEL_SIP_SMC_HWMON_READVOLT;
+ a1 = pdata->arg[0];
+ a2 = 0;
+ break;
/* for polling */
case COMMAND_POLL_SERVICE_STATUS:
a0 = INTEL_SIP_SMC_SERVICE_COMPLETED;
@@ -923,6 +1070,591 @@ struct stratix10_svc_chan *stratix10_svc_request_channel_byname(
EXPORT_SYMBOL_GPL(stratix10_svc_request_channel_byname);
/**
+ * stratix10_svc_add_async_client - Add an asynchronous client to the
+ * Stratix10 service channel.
+ * @chan: Pointer to the Stratix10 service channel structure.
+ * @use_unique_clientid: Boolean flag indicating whether to use a
+ * unique client ID.
+ *
+ * This function adds an asynchronous client to the specified
+ * Stratix10 service channel. If the `use_unique_clientid` flag is
+ * set to true, a unique client ID is allocated for the asynchronous
+ * channel. Otherwise, a common asynchronous channel is used.
+ *
+ * Return: 0 on success, or a negative error code on failure:
+ * -EINVAL if the channel is NULL or the async controller is
+ * not initialized.
+ * -EALREADY if the async channel is already allocated.
+ * -ENOMEM if memory allocation fails.
+ * Other negative values if ID allocation fails.
+ */
+int stratix10_svc_add_async_client(struct stratix10_svc_chan *chan,
+ bool use_unique_clientid)
+{
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_ctrl *actrl;
+ struct stratix10_async_chan *achan;
+ int ret = 0;
+
+ if (!chan)
+ return -EINVAL;
+
+ ctrl = chan->ctrl;
+ actrl = &ctrl->actrl;
+
+ if (!actrl->initialized) {
+ dev_err(ctrl->dev, "Async controller not initialized\n");
+ return -EINVAL;
+ }
+
+ if (chan->async_chan) {
+ dev_err(ctrl->dev, "async channel already allocated\n");
+ return -EALREADY;
+ }
+
+ if (use_unique_clientid &&
+ atomic_read(&actrl->common_achan_refcount) > 0) {
+ chan->async_chan = actrl->common_async_chan;
+ atomic_inc(&actrl->common_achan_refcount);
+ return 0;
+ }
+
+ achan = kzalloc(sizeof(*achan), GFP_KERNEL);
+ if (!achan)
+ return -ENOMEM;
+
+ ida_init(&achan->job_id_pool);
+
+ ret = ida_alloc_max(&actrl->async_id_pool, MAX_SDM_CLIENT_IDS,
+ GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(ctrl->dev,
+ "Failed to allocate async client id\n");
+ ida_destroy(&achan->job_id_pool);
+ kfree(achan);
+ return ret;
+ }
+
+ achan->async_client_id = ret;
+ chan->async_chan = achan;
+
+ if (use_unique_clientid &&
+ atomic_read(&actrl->common_achan_refcount) == 0) {
+ actrl->common_async_chan = achan;
+ atomic_inc(&actrl->common_achan_refcount);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_add_async_client);
+
+/**
+ * stratix10_svc_remove_async_client - Remove an asynchronous client
+ * from the Stratix10 service
+ * channel.
+ * @chan: Pointer to the Stratix10 service channel structure.
+ *
+ * This function removes an asynchronous client associated with the
+ * given service channel. It checks if the channel and the
+ * asynchronous channel are valid, and then proceeds to decrement
+ * the reference count for the common asynchronous channel if
+ * applicable. If the reference count reaches zero, it destroys the
+ * job ID pool and deallocates the asynchronous client ID. For
+ * non-common asynchronous channels, it directly destroys the job ID
+ * pool, deallocates the asynchronous client ID, and frees the
+ * memory allocated for the asynchronous channel.
+ *
+ * Return: 0 on success, -EINVAL if the channel or asynchronous
+ * channel is invalid.
+ */
+int stratix10_svc_remove_async_client(struct stratix10_svc_chan *chan)
+{
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_ctrl *actrl;
+ struct stratix10_async_chan *achan;
+
+ if (!chan)
+ return -EINVAL;
+
+ ctrl = chan->ctrl;
+ actrl = &ctrl->actrl;
+ achan = chan->async_chan;
+
+ if (!achan) {
+ dev_err(ctrl->dev, "async channel not allocated\n");
+ return -EINVAL;
+ }
+
+ if (achan == actrl->common_async_chan) {
+ atomic_dec(&actrl->common_achan_refcount);
+ if (atomic_read(&actrl->common_achan_refcount) == 0) {
+ ida_destroy(&achan->job_id_pool);
+ ida_free(&actrl->async_id_pool,
+ achan->async_client_id);
+ kfree(achan);
+ actrl->common_async_chan = NULL;
+ }
+ } else {
+ ida_destroy(&achan->job_id_pool);
+ ida_free(&actrl->async_id_pool, achan->async_client_id);
+ kfree(achan);
+ }
+ chan->async_chan = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_remove_async_client);
+
+/**
+ * stratix10_svc_async_send - Send an asynchronous message to the
+ * Stratix10 service
+ * @chan: Pointer to the service channel structure
+ * @msg: Pointer to the message to be sent
+ * @handler: Pointer to the handler for the asynchronous message
+ * used by caller for later reference.
+ * @cb: Callback function to be called upon completion
+ * @cb_arg: Argument to be passed to the callback function
+ *
+ * This function sends an asynchronous message to the SDM mailbox in
+ * EL3 secure firmware. It performs various checks and setups,
+ * including allocating a job ID, setting up the transaction ID and
+ * packaging it to El3 firmware. The function handles different
+ * commands by setting up the appropriate arguments for the SMC call.
+ * If the SMC call is successful, the handler is set up and the
+ * function returns 0. If the SMC call fails, appropriate error
+ * handling is performed along with cleanup of resources.
+ *
+ * Return: 0 on success, -EINVAL for invalid argument, -ENOMEM if
+ * memory is not available, -EAGAIN if EL3 firmware is busy, -EBADF
+ * if the message is rejected by EL3 firmware and -EIO on other
+ * errors from EL3 firmware.
+ */
+int stratix10_svc_async_send(struct stratix10_svc_chan *chan, void *msg,
+ void **handler, async_callback_t cb, void *cb_arg)
+{
+ struct arm_smccc_1_2_regs args = { 0 }, res = { 0 };
+ struct stratix10_svc_async_handler *handle = NULL;
+ struct stratix10_svc_client_msg *p_msg =
+ (struct stratix10_svc_client_msg *)msg;
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_ctrl *actrl;
+ struct stratix10_async_chan *achan;
+ int ret = 0;
+
+ if (!chan || !msg || !handler)
+ return -EINVAL;
+
+ achan = chan->async_chan;
+ ctrl = chan->ctrl;
+ actrl = &ctrl->actrl;
+
+ if (!actrl->initialized) {
+ dev_err(ctrl->dev, "Async controller not initialized\n");
+ return -EINVAL;
+ }
+
+ if (!achan) {
+ dev_err(ctrl->dev, "Async channel not allocated\n");
+ return -EINVAL;
+ }
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ ret = ida_alloc_max(&achan->job_id_pool, MAX_SDM_JOB_IDS,
+ GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to allocate job id\n");
+ kfree(handle);
+ return -ENOMEM;
+ }
+
+ handle->transaction_id =
+ STRATIX10_SET_TRANSACTIONID(achan->async_client_id, ret);
+ handle->cb = cb;
+ handle->msg = p_msg;
+ handle->cb_arg = cb_arg;
+ handle->achan = achan;
+
+ /*set the transaction jobid in args.a1*/
+ args.a1 =
+ STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id);
+
+ switch (p_msg->command) {
+ case COMMAND_RSU_GET_SPT_TABLE:
+ args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_SPT;
+ break;
+ case COMMAND_RSU_STATUS:
+ args.a0 = INTEL_SIP_SMC_ASYNC_RSU_GET_ERROR_STATUS;
+ break;
+ case COMMAND_RSU_NOTIFY:
+ args.a0 = INTEL_SIP_SMC_ASYNC_RSU_NOTIFY;
+ args.a2 = p_msg->arg[0];
+ break;
+ default:
+ dev_err(ctrl->dev, "Invalid command ,%d\n", p_msg->command);
+ ret = -EINVAL;
+ goto deallocate_id;
+ }
+
+ /**
+ * There is a chance that during the execution of async_send()
+ * in one core, an interrupt might be received in another core;
+ * to mitigate this we are adding the handle to the DB and then
+ * send the smc call. If the smc call is rejected or busy then
+ * we will deallocate the handle for the client to retry again.
+ */
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ hash_add(actrl->trx_list, &handle->next,
+ handle->transaction_id);
+ }
+
+ actrl->invoke_fn(actrl, &args, &res);
+
+ switch (res.a0) {
+ case INTEL_SIP_SMC_STATUS_OK:
+ dev_dbg(ctrl->dev,
+ "Async message sent with transaction_id 0x%02x\n",
+ handle->transaction_id);
+ *handler = handle;
+ return 0;
+ case INTEL_SIP_SMC_STATUS_BUSY:
+ dev_warn(ctrl->dev, "Mailbox is busy, try after some time\n");
+ ret = -EAGAIN;
+ break;
+ case INTEL_SIP_SMC_STATUS_REJECTED:
+ dev_err(ctrl->dev, "Async message rejected\n");
+ ret = -EBADF;
+ break;
+ default:
+ dev_err(ctrl->dev,
+ "Failed to send async message ,got status as %ld\n",
+ res.a0);
+ ret = -EIO;
+ }
+
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ hash_del(&handle->next);
+ }
+
+deallocate_id:
+ ida_free(&achan->job_id_pool,
+ STRATIX10_GET_JOBID(handle->transaction_id));
+ kfree(handle);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_async_send);
+
+/**
+ * stratix10_svc_async_prepare_response - Prepare the response data for
+ * an asynchronous transaction.
+ * @chan: Pointer to the service channel structure.
+ * @handle: Pointer to the asynchronous handler structure.
+ * @data: Pointer to the callback data structure.
+ *
+ * This function prepares the response data for an asynchronous transaction. It
+ * extracts the response data from the SMC response structure and stores it in
+ * the callback data structure. The function also logs the completion of the
+ * asynchronous transaction.
+ *
+ * Return: 0 on success, -ENOENT if the command is invalid
+ */
+static int stratix10_svc_async_prepare_response(struct stratix10_svc_chan *chan,
+ struct stratix10_svc_async_handler *handle,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_svc_client_msg *p_msg =
+ (struct stratix10_svc_client_msg *)handle->msg;
+ struct stratix10_svc_controller *ctrl = chan->ctrl;
+
+ data->status = STRATIX10_GET_SDM_STATUS_CODE(handle->res.a1);
+
+ switch (p_msg->command) {
+ case COMMAND_RSU_NOTIFY:
+ break;
+ case COMMAND_RSU_GET_SPT_TABLE:
+ data->kaddr1 = (void *)&handle->res.a2;
+ data->kaddr2 = (void *)&handle->res.a3;
+ break;
+ case COMMAND_RSU_STATUS:
+ /* COMMAND_RSU_STATUS has more elements than the cb_data
+ * can acomodate, so passing the response structure to the
+ * response function to be handled before done command is
+ * executed by the client.
+ */
+ data->kaddr1 = (void *)&handle->res;
+ break;
+
+ default:
+ dev_alert(ctrl->dev, "Invalid command\n ,%d", p_msg->command);
+ return -ENOENT;
+ }
+ dev_dbg(ctrl->dev, "Async message completed transaction_id 0x%02x\n",
+ handle->transaction_id);
+ return 0;
+}
+
+/**
+ * stratix10_svc_async_poll - Polls the status of an asynchronous
+ * transaction.
+ * @chan: Pointer to the service channel structure.
+ * @tx_handle: Handle to the transaction being polled.
+ * @data: Pointer to the callback data structure.
+ *
+ * This function polls the status of an asynchronous transaction
+ * identified by the given transaction handle. It ensures that the
+ * necessary structures are initialized and valid before proceeding
+ * with the poll operation. The function sets up the necessary
+ * arguments for the SMC call, invokes the call, and prepares the
+ * response data if the call is successful. If the call fails, the
+ * function returns the error mapped to the SVC status error.
+ *
+ * Return: 0 on success, -EINVAL if any input parameter is invalid,
+ * -EAGAIN if the transaction is still in progress,
+ * -EPERM if the command is invalid, or other negative
+ * error codes on failure.
+ */
+int stratix10_svc_async_poll(struct stratix10_svc_chan *chan,
+ void *tx_handle,
+ struct stratix10_svc_cb_data *data)
+{
+ struct stratix10_svc_async_handler *handle;
+ struct arm_smccc_1_2_regs args = { 0 };
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_ctrl *actrl;
+ struct stratix10_async_chan *achan;
+ int ret;
+
+ if (!chan || !tx_handle || !data)
+ return -EINVAL;
+
+ ctrl = chan->ctrl;
+ actrl = &ctrl->actrl;
+ achan = chan->async_chan;
+
+ if (!achan) {
+ dev_err(ctrl->dev, "Async channel not allocated\n");
+ return -EINVAL;
+ }
+
+ handle = (struct stratix10_svc_async_handler *)tx_handle;
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ if (!hash_hashed(&handle->next)) {
+ dev_err(ctrl->dev, "Invalid transaction handler");
+ return -EINVAL;
+ }
+ }
+
+ args.a0 = INTEL_SIP_SMC_ASYNC_POLL;
+ args.a1 =
+ STRATIX10_SIP_SMC_SET_TRANSACTIONID_X1(handle->transaction_id);
+
+ actrl->invoke_fn(actrl, &args, &handle->res);
+
+ /*clear data for response*/
+ memset(data, 0, sizeof(*data));
+
+ if (handle->res.a0 == INTEL_SIP_SMC_STATUS_OK) {
+ ret = stratix10_svc_async_prepare_response(chan, handle, data);
+ if (ret) {
+ dev_err(ctrl->dev, "Error in preparation of response,%d\n", ret);
+ WARN_ON_ONCE(1);
+ }
+ return 0;
+ } else if (handle->res.a0 == INTEL_SIP_SMC_STATUS_BUSY) {
+ dev_dbg(ctrl->dev, "async message is still in progress\n");
+ return -EAGAIN;
+ }
+
+ dev_err(ctrl->dev,
+ "Failed to poll async message ,got status as %ld\n",
+ handle->res.a0);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_async_poll);
+
+/**
+ * stratix10_svc_async_done - Completes an asynchronous transaction.
+ * @chan: Pointer to the service channel structure.
+ * @tx_handle: Handle to the transaction being completed.
+ *
+ * This function completes an asynchronous transaction identified by
+ * the given transaction handle. It ensures that the necessary
+ * structures are initialized and valid before proceeding with the
+ * completion operation. The function deallocates the transaction ID,
+ * frees the memory allocated for the handler, and removes the handler
+ * from the transaction list.
+ *
+ * Return: 0 on success, -EINVAL if any input parameter is invalid,
+ * or other negative error codes on failure.
+ */
+int stratix10_svc_async_done(struct stratix10_svc_chan *chan, void *tx_handle)
+{
+ struct stratix10_svc_async_handler *handle;
+ struct stratix10_svc_controller *ctrl;
+ struct stratix10_async_chan *achan;
+ struct stratix10_async_ctrl *actrl;
+
+ if (!chan || !tx_handle)
+ return -EINVAL;
+
+ ctrl = chan->ctrl;
+ achan = chan->async_chan;
+ actrl = &ctrl->actrl;
+
+ if (!achan) {
+ dev_err(ctrl->dev, "async channel not allocated\n");
+ return -EINVAL;
+ }
+
+ handle = (struct stratix10_svc_async_handler *)tx_handle;
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ if (!hash_hashed(&handle->next)) {
+ dev_err(ctrl->dev, "Invalid transaction handle");
+ return -EINVAL;
+ }
+ hash_del(&handle->next);
+ }
+ ida_free(&achan->job_id_pool,
+ STRATIX10_GET_JOBID(handle->transaction_id));
+ kfree(handle);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stratix10_svc_async_done);
+
+static inline void stratix10_smc_1_2(struct stratix10_async_ctrl *actrl,
+ const struct arm_smccc_1_2_regs *args,
+ struct arm_smccc_1_2_regs *res)
+{
+ arm_smccc_1_2_smc(args, res);
+}
+
+/**
+ * stratix10_svc_async_init - Initialize the Stratix10 service
+ * controller for asynchronous operations.
+ * @controller: Pointer to the Stratix10 service controller structure.
+ *
+ * This function initializes the asynchronous service controller by
+ * setting up the necessary data structures and initializing the
+ * transaction list.
+ *
+ * Return: 0 on success, -EINVAL if the controller is NULL or already
+ * initialized, -ENOMEM if memory allocation fails,
+ * -EADDRINUSE if the client ID is already reserved, or other
+ * negative error codes on failure.
+ */
+static int stratix10_svc_async_init(struct stratix10_svc_controller *controller)
+{
+ struct stratix10_async_ctrl *actrl;
+ struct arm_smccc_res res;
+ struct device *dev;
+ int ret;
+
+ if (!controller)
+ return -EINVAL;
+
+ actrl = &controller->actrl;
+
+ if (actrl->initialized)
+ return -EINVAL;
+
+ dev = controller->dev;
+
+ controller->invoke_fn(INTEL_SIP_SMC_SVC_VERSION, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != INTEL_SIP_SMC_STATUS_OK ||
+ !(res.a1 > ASYNC_ATF_MINIMUM_MAJOR_VERSION ||
+ (res.a1 == ASYNC_ATF_MINIMUM_MAJOR_VERSION &&
+ res.a2 >= ASYNC_ATF_MINIMUM_MINOR_VERSION))) {
+ dev_err(dev,
+ "Intel Service Layer Driver: ATF version is not compatible for async operation\n");
+ return -EINVAL;
+ }
+
+ actrl->invoke_fn = stratix10_smc_1_2;
+
+ ida_init(&actrl->async_id_pool);
+
+ /**
+ * SIP_SVC_V1_CLIENT_ID is used by V1/stratix10_svc_send() clients
+ * for communicating with SDM synchronously. We need to restrict
+ * this in V3/stratix10_svc_async_send() usage to distinguish
+ * between V1 and V3 messages in El3 firmware.
+ */
+ ret = ida_alloc_range(&actrl->async_id_pool, SIP_SVC_V1_CLIENT_ID,
+ SIP_SVC_V1_CLIENT_ID, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(dev,
+ "Intel Service Layer Driver: Error on reserving SIP_SVC_V1_CLIENT_ID\n");
+ ida_destroy(&actrl->async_id_pool);
+ actrl->invoke_fn = NULL;
+ return -EADDRINUSE;
+ }
+
+ spin_lock_init(&actrl->trx_list_lock);
+ hash_init(actrl->trx_list);
+ atomic_set(&actrl->common_achan_refcount, 0);
+
+ actrl->initialized = true;
+ return 0;
+}
+
+/**
+ * stratix10_svc_async_exit - Clean up and exit the asynchronous
+ * service controller
+ * @ctrl: Pointer to the stratix10_svc_controller structure
+ *
+ * This function performs the necessary cleanup for the asynchronous
+ * service controller. It checks if the controller is valid and if it
+ * has been initialized. It then locks the transaction list and safely
+ * removes and deallocates each handler in the list. The function also
+ * removes any asynchronous clients associated with the controller's
+ * channels and destroys the asynchronous ID pool. Finally, it resets
+ * the asynchronous ID pool and invoke function pointers to NULL.
+ *
+ * Return: 0 on success, -EINVAL if the controller is invalid or not
+ * initialized.
+ */
+static int stratix10_svc_async_exit(struct stratix10_svc_controller *ctrl)
+{
+ struct stratix10_svc_async_handler *handler;
+ struct stratix10_async_ctrl *actrl;
+ struct hlist_node *tmp;
+ int i;
+
+ if (!ctrl)
+ return -EINVAL;
+
+ actrl = &ctrl->actrl;
+
+ if (!actrl->initialized)
+ return -EINVAL;
+
+ actrl->initialized = false;
+
+ scoped_guard(spinlock_bh, &actrl->trx_list_lock) {
+ hash_for_each_safe(actrl->trx_list, i, tmp, handler, next) {
+ ida_free(&handler->achan->job_id_pool,
+ STRATIX10_GET_JOBID(handler->transaction_id));
+ hash_del(&handler->next);
+ kfree(handler);
+ }
+ }
+
+ for (i = 0; i < SVC_NUM_CHANNEL; i++) {
+ if (ctrl->chans[i].async_chan) {
+ stratix10_svc_remove_async_client(&ctrl->chans[i]);
+ ctrl->chans[i].async_chan = NULL;
+ }
+ }
+
+ ida_destroy(&actrl->async_id_pool);
+ actrl->invoke_fn = NULL;
+
+ return 0;
+}
+
+/**
* stratix10_svc_free_channel() - free service channel
* @chan: service channel to be freed
*
@@ -990,6 +1722,7 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
p_data->flag = ct->flags;
}
} else {
+ guard(mutex)(&svc_mem_lock);
list_for_each_entry(p_mem, &svc_data_mem, node)
if (p_mem->vaddr == p_msg->payload) {
p_data->paddr = p_mem->paddr;
@@ -1072,6 +1805,7 @@ void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
if (!pmem)
return ERR_PTR(-ENOMEM);
+ guard(mutex)(&svc_mem_lock);
va = gen_pool_alloc(genpool, s);
if (!va)
return ERR_PTR(-ENOMEM);
@@ -1100,6 +1834,7 @@ EXPORT_SYMBOL_GPL(stratix10_svc_allocate_memory);
void stratix10_svc_free_memory(struct stratix10_svc_chan *chan, void *kaddr)
{
struct stratix10_svc_data_mem *pmem;
+ guard(mutex)(&svc_mem_lock);
list_for_each_entry(pmem, &svc_data_mem, node)
if (pmem->vaddr == kaddr) {
@@ -1174,11 +1909,18 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
controller->invoke_fn = invoke_fn;
init_completion(&controller->complete_status);
+ ret = stratix10_svc_async_init(controller);
+ if (ret) {
+ dev_dbg(dev, "Intel Service Layer Driver: Error on stratix10_svc_async_init %d\n",
+ ret);
+ goto err_destroy_pool;
+ }
+
fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
if (ret) {
dev_err(dev, "failed to allocate FIFO\n");
- goto err_destroy_pool;
+ goto err_async_exit;
}
spin_lock_init(&controller->svc_fifo_lock);
@@ -1197,6 +1939,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
chans[2].name = SVC_CLIENT_FCS;
spin_lock_init(&chans[2].lock);
+ chans[3].scl = NULL;
+ chans[3].ctrl = controller;
+ chans[3].name = SVC_CLIENT_HWMON;
+ spin_lock_init(&chans[3].lock);
+
list_add_tail(&controller->node, &svc_ctrl);
platform_set_drvdata(pdev, controller);
@@ -1206,6 +1953,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err_free_kfifo;
}
+ controller->svc = svc;
svc->stratix10_svc_rsu = platform_device_alloc(STRATIX10_RSU, 0);
if (!svc->stratix10_svc_rsu) {
@@ -1237,8 +1985,6 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_fcs_dev;
- dev_set_drvdata(dev, svc);
-
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
@@ -1249,6 +1995,8 @@ err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
+err_async_exit:
+ stratix10_svc_async_exit(controller);
err_destroy_pool:
gen_pool_destroy(genpool);
return ret;
@@ -1256,8 +2004,10 @@ err_destroy_pool:
static void stratix10_svc_drv_remove(struct platform_device *pdev)
{
- struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev);
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
+ struct stratix10_svc *svc = ctrl->svc;
+
+ stratix10_svc_async_exit(ctrl);
of_platform_depopulate(ctrl->dev);
diff --git a/drivers/firmware/tegra/bpmp-tegra186.c b/drivers/firmware/tegra/bpmp-tegra186.c
index 7cfc5fdfa49d..64863db7a715 100644
--- a/drivers/firmware/tegra/bpmp-tegra186.c
+++ b/drivers/firmware/tegra/bpmp-tegra186.c
@@ -198,7 +198,10 @@ static int tegra186_bpmp_dram_init(struct tegra_bpmp *bpmp)
err = of_reserved_mem_region_to_resource(bpmp->dev->of_node, 0, &res);
if (err < 0) {
- dev_warn(bpmp->dev, "failed to parse memory region: %d\n", err);
+ if (err != -ENODEV)
+ dev_warn(bpmp->dev,
+ "failed to parse memory region: %d\n", err);
+
return err;
}
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index ae5fd1936ad3..e027a2bd8f26 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -398,6 +398,9 @@ static void ti_sci_put_one_xfer(struct ti_sci_xfers_info *minfo,
static inline int ti_sci_do_xfer(struct ti_sci_info *info,
struct ti_sci_xfer *xfer)
{
+ struct ti_sci_msg_hdr *hdr = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
+ bool response_expected = !!(hdr->flags & (TI_SCI_FLAG_REQ_ACK_ON_PROCESSED |
+ TI_SCI_FLAG_REQ_ACK_ON_RECEIVED));
int ret;
int timeout;
struct device *dev = info->dev;
@@ -409,12 +412,12 @@ static inline int ti_sci_do_xfer(struct ti_sci_info *info,
ret = 0;
- if (system_state <= SYSTEM_RUNNING) {
+ if (response_expected && system_state <= SYSTEM_RUNNING) {
/* And we wait for the response. */
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
if (!wait_for_completion_timeout(&xfer->done, timeout))
ret = -ETIMEDOUT;
- } else {
+ } else if (response_expected) {
/*
* If we are !running, we cannot use wait_for_completion_timeout
* during noirq phase, so we must manually poll the completion.
@@ -1670,6 +1673,9 @@ fail:
static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
u32 ctx_lo, u32 ctx_hi, u32 debug_flags)
{
+ u32 msg_flags = mode == TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO ?
+ TI_SCI_FLAG_REQ_GENERIC_NORESPONSE :
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED;
struct ti_sci_info *info;
struct ti_sci_msg_req_prepare_sleep *req;
struct ti_sci_msg_hdr *resp;
@@ -1686,7 +1692,7 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
dev = info->dev;
xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PREPARE_SLEEP,
- TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ msg_flags,
sizeof(*req), sizeof(*resp));
if (IS_ERR(xfer)) {
ret = PTR_ERR(xfer);
@@ -1706,11 +1712,12 @@ static int ti_sci_cmd_prepare_sleep(const struct ti_sci_handle *handle, u8 mode,
goto fail;
}
- resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
-
- if (!ti_sci_is_response_ack(resp)) {
- dev_err(dev, "Failed to prepare sleep\n");
- ret = -ENODEV;
+ if (msg_flags == TI_SCI_FLAG_REQ_ACK_ON_PROCESSED) {
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+ if (!ti_sci_is_response_ack(resp)) {
+ dev_err(dev, "Failed to prepare sleep\n");
+ ret = -ENODEV;
+ }
}
fail:
@@ -2015,6 +2022,47 @@ fail:
return ret;
}
+/**
+ * ti_sci_cmd_lpm_abort() - Abort entry to LPM by clearing selection of LPM to enter
+ * @dev: Device pointer corresponding to the SCI entity
+ *
+ * Return: 0 if all went well, else returns appropriate error value.
+ */
+static int ti_sci_cmd_lpm_abort(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+ struct ti_sci_msg_hdr *req;
+ struct ti_sci_msg_hdr *resp;
+ struct ti_sci_xfer *xfer;
+ int ret = 0;
+
+ xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_LPM_ABORT,
+ TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
+ sizeof(*req), sizeof(*resp));
+ if (IS_ERR(xfer)) {
+ ret = PTR_ERR(xfer);
+ dev_err(dev, "Message alloc failed(%d)\n", ret);
+ return ret;
+ }
+ req = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ ret = ti_sci_do_xfer(info, xfer);
+ if (ret) {
+ dev_err(dev, "Mbox send fail %d\n", ret);
+ goto fail;
+ }
+
+ resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
+
+ if (!ti_sci_is_response_ack(resp))
+ ret = -ENODEV;
+
+fail:
+ ti_sci_put_one_xfer(&info->minfo, xfer);
+
+ return ret;
+}
+
static int ti_sci_cmd_core_reboot(const struct ti_sci_handle *handle)
{
struct ti_sci_info *info;
@@ -3623,6 +3671,78 @@ devm_ti_sci_get_resource(const struct ti_sci_handle *handle, struct device *dev,
}
EXPORT_SYMBOL_GPL(devm_ti_sci_get_resource);
+/*
+ * Iterate all device nodes that have a wakeup-source property and check if one
+ * of the possible phandles points to a Partial-IO system state. If it
+ * does resolve the device node to an actual device and check if wakeup is
+ * enabled.
+ */
+static bool ti_sci_partial_io_wakeup_enabled(struct ti_sci_info *info)
+{
+ struct device_node *wakeup_node = NULL;
+
+ for_each_node_with_property(wakeup_node, "wakeup-source") {
+ struct of_phandle_iterator it;
+ int err;
+
+ of_for_each_phandle(&it, err, wakeup_node, "wakeup-source", NULL, 0) {
+ struct platform_device *pdev;
+ bool may_wakeup;
+
+ /*
+ * Continue if idle-state-name is not off-wake. Return
+ * value is the index of the string which should be 0 if
+ * off-wake is present.
+ */
+ if (of_property_match_string(it.node, "idle-state-name", "off-wake"))
+ continue;
+
+ pdev = of_find_device_by_node(wakeup_node);
+ if (!pdev)
+ continue;
+
+ may_wakeup = device_may_wakeup(&pdev->dev);
+ put_device(&pdev->dev);
+
+ if (may_wakeup) {
+ dev_dbg(info->dev, "%pOF identified as wakeup source for Partial-IO\n",
+ wakeup_node);
+ of_node_put(it.node);
+ of_node_put(wakeup_node);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static int ti_sci_sys_off_handler(struct sys_off_data *data)
+{
+ struct ti_sci_info *info = data->cb_data;
+ const struct ti_sci_handle *handle = &info->handle;
+ bool enter_partial_io = ti_sci_partial_io_wakeup_enabled(info);
+ int ret;
+
+ if (!enter_partial_io)
+ return NOTIFY_DONE;
+
+ dev_info(info->dev, "Entering Partial-IO because a powered wakeup-enabled device was found.\n");
+
+ ret = ti_sci_cmd_prepare_sleep(handle, TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO, 0, 0, 0);
+ if (ret) {
+ dev_err(info->dev,
+ "Failed to enter Partial-IO %pe, trying to do an emergency restart\n",
+ ERR_PTR(ret));
+ emergency_restart();
+ }
+
+ mdelay(5000);
+ emergency_restart();
+
+ return NOTIFY_DONE;
+}
+
static int tisci_reboot_handler(struct sys_off_data *data)
{
struct ti_sci_info *info = data->cb_data;
@@ -3665,7 +3785,7 @@ static int ti_sci_prepare_system_suspend(struct ti_sci_info *info)
}
}
-static int __maybe_unused ti_sci_suspend(struct device *dev)
+static int ti_sci_suspend(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
struct device *cpu_dev, *cpu_dev_max = NULL;
@@ -3705,19 +3825,21 @@ static int __maybe_unused ti_sci_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ti_sci_suspend_noirq(struct device *dev)
+static int ti_sci_suspend_noirq(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
int ret = 0;
- ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
- if (ret)
- return ret;
+ if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_ENABLE);
+ if (ret)
+ return ret;
+ }
return 0;
}
-static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
+static int ti_sci_resume_noirq(struct device *dev)
{
struct ti_sci_info *info = dev_get_drvdata(dev);
int ret = 0;
@@ -3726,9 +3848,11 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
u8 pin;
u8 mode;
- ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
- if (ret)
- return ret;
+ if (info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION) {
+ ret = ti_sci_cmd_set_io_isolation(&info->handle, TISCI_MSG_VALUE_IO_DISABLE);
+ if (ret)
+ return ret;
+ }
ret = ti_sci_msg_cmd_lpm_wake_reason(&info->handle, &source, &time, &pin, &mode);
/* Do not fail to resume on error as the wake reason is not critical */
@@ -3739,12 +3863,21 @@ static int __maybe_unused ti_sci_resume_noirq(struct device *dev)
return 0;
}
+static void ti_sci_pm_complete(struct device *dev)
+{
+ struct ti_sci_info *info = dev_get_drvdata(dev);
+
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT) {
+ if (ti_sci_cmd_lpm_abort(dev))
+ dev_err(dev, "LPM clear selection failed.\n");
+ }
+}
+
static const struct dev_pm_ops ti_sci_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .suspend = ti_sci_suspend,
- .suspend_noirq = ti_sci_suspend_noirq,
- .resume_noirq = ti_sci_resume_noirq,
-#endif
+ .suspend = pm_sleep_ptr(ti_sci_suspend),
+ .suspend_noirq = pm_sleep_ptr(ti_sci_suspend_noirq),
+ .resume_noirq = pm_sleep_ptr(ti_sci_resume_noirq),
+ .complete = pm_sleep_ptr(ti_sci_pm_complete),
};
/* Description for K2G */
@@ -3876,10 +4009,12 @@ static int ti_sci_probe(struct platform_device *pdev)
}
ti_sci_msg_cmd_query_fw_caps(&info->handle, &info->fw_caps);
- dev_dbg(dev, "Detected firmware capabilities: %s%s%s\n",
+ dev_dbg(dev, "Detected firmware capabilities: %s%s%s%s%s\n",
info->fw_caps & MSG_FLAG_CAPS_GENERIC ? "Generic" : "",
info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO ? " Partial-IO" : "",
- info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : ""
+ info->fw_caps & MSG_FLAG_CAPS_LPM_DM_MANAGED ? " DM-Managed" : "",
+ info->fw_caps & MSG_FLAG_CAPS_LPM_ABORT ? " LPM-Abort" : "",
+ info->fw_caps & MSG_FLAG_CAPS_IO_ISOLATION ? " IO-Isolation" : ""
);
ti_sci_setup_ops(info);
@@ -3890,6 +4025,19 @@ static int ti_sci_probe(struct platform_device *pdev)
goto out;
}
+ if (info->fw_caps & MSG_FLAG_CAPS_LPM_PARTIAL_IO) {
+ ret = devm_register_sys_off_handler(dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_FIRMWARE,
+ ti_sci_sys_off_handler,
+ info);
+ if (ret) {
+ dev_err(dev, "Failed to register sys_off_handler %pe\n",
+ ERR_PTR(ret));
+ goto out;
+ }
+ }
+
dev_info(dev, "ABI: %d.%d (firmware rev 0x%04x '%s')\n",
info->handle.version.abi_major, info->handle.version.abi_minor,
info->handle.version.firmware_revision,
@@ -3899,7 +4047,13 @@ static int ti_sci_probe(struct platform_device *pdev)
list_add_tail(&info->node, &ti_sci_list);
mutex_unlock(&ti_sci_list_mutex);
- return of_platform_populate(dev->of_node, NULL, NULL, dev);
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret) {
+ dev_err(dev, "platform_populate failed %pe\n", ERR_PTR(ret));
+ goto out;
+ }
+ return 0;
+
out:
if (!IS_ERR(info->chan_tx))
mbox_free_channel(info->chan_tx);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 053387d7baa0..91f234550c43 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -42,6 +42,7 @@
#define TI_SCI_MSG_SET_IO_ISOLATION 0x0307
#define TI_SCI_MSG_LPM_SET_DEVICE_CONSTRAINT 0x0309
#define TI_SCI_MSG_LPM_SET_LATENCY_CONSTRAINT 0x030A
+#define TI_SCI_MSG_LPM_ABORT 0x0311
/* Resource Management Requests */
#define TI_SCI_MSG_GET_RESOURCE_RANGE 0x1500
@@ -147,6 +148,8 @@ struct ti_sci_msg_req_reboot {
* MSG_FLAG_CAPS_GENERIC: Generic capability (LPM not supported)
* MSG_FLAG_CAPS_LPM_PARTIAL_IO: Partial IO in LPM
* MSG_FLAG_CAPS_LPM_DM_MANAGED: LPM can be managed by DM
+ * MSG_FLAG_CAPS_LPM_ABORT: Abort entry to LPM
+ * MSG_FLAG_CAPS_IO_ISOLATION: IO Isolation support
*
* Response to a generic message with message type TI_SCI_MSG_QUERY_FW_CAPS
* providing currently available SOC/firmware capabilities. SoC that don't
@@ -157,6 +160,8 @@ struct ti_sci_msg_resp_query_fw_caps {
#define MSG_FLAG_CAPS_GENERIC TI_SCI_MSG_FLAG(0)
#define MSG_FLAG_CAPS_LPM_PARTIAL_IO TI_SCI_MSG_FLAG(4)
#define MSG_FLAG_CAPS_LPM_DM_MANAGED TI_SCI_MSG_FLAG(5)
+#define MSG_FLAG_CAPS_LPM_ABORT TI_SCI_MSG_FLAG(9)
+#define MSG_FLAG_CAPS_IO_ISOLATION TI_SCI_MSG_FLAG(7)
#define MSG_MASK_CAPS_LPM GENMASK_ULL(4, 1)
u64 fw_caps;
} __packed;
@@ -592,6 +597,11 @@ struct ti_sci_msg_resp_get_clock_freq {
struct ti_sci_msg_req_prepare_sleep {
struct ti_sci_msg_hdr hdr;
+/*
+ * When sending prepare_sleep with MODE_PARTIAL_IO no response will be sent,
+ * no further steps are required.
+ */
+#define TISCI_MSG_VALUE_SLEEP_MODE_PARTIAL_IO 0x03
#define TISCI_MSG_VALUE_SLEEP_MODE_DM_MANAGED 0xfd
u8 mode;
u32 ctx_lo;
diff --git a/drivers/firmware/xilinx/Makefile b/drivers/firmware/xilinx/Makefile
index 875a53703c82..70f8f02f14a3 100644
--- a/drivers/firmware/xilinx/Makefile
+++ b/drivers/firmware/xilinx/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Xilinx firmwares
-obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o
+obj-$(CONFIG_ZYNQMP_FIRMWARE) += zynqmp.o zynqmp-ufs.o
obj-$(CONFIG_ZYNQMP_FIRMWARE_DEBUG) += zynqmp-debug.o
diff --git a/drivers/firmware/xilinx/zynqmp-debug.c b/drivers/firmware/xilinx/zynqmp-debug.c
index 22853ae0efdf..36efb827f3da 100644
--- a/drivers/firmware/xilinx/zynqmp-debug.c
+++ b/drivers/firmware/xilinx/zynqmp-debug.c
@@ -3,6 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer for debugfs APIs
*
* Copyright (C) 2014-2018 Xilinx, Inc.
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -38,6 +39,7 @@ static struct pm_api_info pm_api_list[] = {
PM_API(PM_RELEASE_NODE),
PM_API(PM_SET_REQUIREMENT),
PM_API(PM_GET_API_VERSION),
+ PM_API(PM_GET_NODE_STATUS),
PM_API(PM_REGISTER_NOTIFIER),
PM_API(PM_RESET_ASSERT),
PM_API(PM_RESET_GET_STATUS),
@@ -167,6 +169,17 @@ static int process_api_request(u32 pm_id, u64 *pm_api_arg, u32 *pm_api_ret)
pm_api_arg[3] ? pm_api_arg[3] :
ZYNQMP_PM_REQUEST_ACK_BLOCKING);
break;
+ case PM_GET_NODE_STATUS:
+ ret = zynqmp_pm_get_node_status(pm_api_arg[0],
+ &pm_api_ret[0],
+ &pm_api_ret[1],
+ &pm_api_ret[2]);
+ if (!ret)
+ sprintf(debugfs_buf,
+ "GET_NODE_STATUS:\n\tNodeId: %llu\n\tStatus: %u\n\tRequirements: %u\n\tUsage: %u\n",
+ pm_api_arg[0], pm_api_ret[0],
+ pm_api_ret[1], pm_api_ret[2]);
+ break;
case PM_REGISTER_NOTIFIER:
ret = zynqmp_pm_register_notifier(pm_api_arg[0],
pm_api_arg[1] ?
diff --git a/drivers/firmware/xilinx/zynqmp-ufs.c b/drivers/firmware/xilinx/zynqmp-ufs.c
new file mode 100644
index 000000000000..85da8a822f3a
--- /dev/null
+++ b/drivers/firmware/xilinx/zynqmp-ufs.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Firmware Layer for UFS APIs
+ *
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/module.h>
+
+/* Register Node IDs */
+#define PM_REGNODE_PMC_IOU_SLCR 0x30000002 /* PMC IOU SLCR */
+#define PM_REGNODE_EFUSE_CACHE 0x30000003 /* EFUSE Cache */
+
+/* Register Offsets for PMC IOU SLCR */
+#define SRAM_CSR_OFFSET 0x104C /* SRAM Control and Status */
+#define TXRX_CFGRDY_OFFSET 0x1054 /* M-PHY TX-RX Config ready */
+
+/* Masks for SRAM Control and Status Register */
+#define SRAM_CSR_INIT_DONE_MASK BIT(0) /* SRAM initialization done */
+#define SRAM_CSR_EXT_LD_DONE_MASK BIT(1) /* SRAM External load done */
+#define SRAM_CSR_BYPASS_MASK BIT(2) /* Bypass SRAM interface */
+
+/* Mask to check M-PHY TX-RX configuration readiness */
+#define TX_RX_CFG_RDY_MASK GENMASK(3, 0)
+
+/* Register Offsets for EFUSE Cache */
+#define UFS_CAL_1_OFFSET 0xBE8 /* UFS Calibration Value */
+
+/**
+ * zynqmp_pm_is_mphy_tx_rx_config_ready - check M-PHY TX-RX config readiness
+ * @is_ready: Store output status (true/false)
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_is_mphy_tx_rx_config_ready(bool *is_ready)
+{
+ u32 regval;
+ int ret;
+
+ if (!is_ready)
+ return -EINVAL;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, TXRX_CFGRDY_OFFSET, &regval);
+ if (ret)
+ return ret;
+
+ regval &= TX_RX_CFG_RDY_MASK;
+ if (regval)
+ *is_ready = true;
+ else
+ *is_ready = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_mphy_tx_rx_config_ready);
+
+/**
+ * zynqmp_pm_is_sram_init_done - check SRAM initialization
+ * @is_done: Store output status (true/false)
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_is_sram_init_done(bool *is_done)
+{
+ u32 regval;
+ int ret;
+
+ if (!is_done)
+ return -EINVAL;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &regval);
+ if (ret)
+ return ret;
+
+ regval &= SRAM_CSR_INIT_DONE_MASK;
+ if (regval)
+ *is_done = true;
+ else
+ *is_done = false;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_is_sram_init_done);
+
+/**
+ * zynqmp_pm_set_sram_bypass - Set SRAM bypass Control
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_sram_bypass(void)
+{
+ u32 sram_csr;
+ int ret;
+
+ ret = zynqmp_pm_sec_read_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET, &sram_csr);
+ if (ret)
+ return ret;
+
+ sram_csr &= ~SRAM_CSR_EXT_LD_DONE_MASK;
+ sram_csr |= SRAM_CSR_BYPASS_MASK;
+
+ return zynqmp_pm_sec_mask_write_reg(PM_REGNODE_PMC_IOU_SLCR, SRAM_CSR_OFFSET,
+ GENMASK(2, 1), sram_csr);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sram_bypass);
+
+/**
+ * zynqmp_pm_get_ufs_calibration_values - Read UFS calibration values
+ * @val: Store the calibration value
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_get_ufs_calibration_values(u32 *val)
+{
+ return zynqmp_pm_sec_read_reg(PM_REGNODE_EFUSE_CACHE, UFS_CAL_1_OFFSET, val);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_ufs_calibration_values);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 02da3e48bc8f..ad811f40e059 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -3,7 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
- * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc.
+ * Copyright (C) 2022 - 2025 Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -72,6 +72,15 @@ struct pm_api_feature_data {
struct hlist_node hentry;
};
+struct platform_fw_data {
+ /*
+ * Family code for platform.
+ */
+ const u32 family_code;
+};
+
+static struct platform_fw_data *active_platform_fw_data;
+
static const struct mfd_cell firmware_devs[] = {
{
.name = "zynqmp_power_controller",
@@ -464,8 +473,6 @@ int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...)
static u32 pm_api_version;
static u32 pm_tz_version;
-static u32 pm_family_code;
-static u32 pm_sub_family_code;
int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset)
{
@@ -532,32 +539,18 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid);
/**
* zynqmp_pm_get_family_info() - Get family info of platform
* @family: Returned family code value
- * @subfamily: Returned sub-family code value
*
* Return: Returns status, either success or error+reason
*/
-int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily)
+int zynqmp_pm_get_family_info(u32 *family)
{
- u32 ret_payload[PAYLOAD_ARG_CNT];
- u32 idcode;
- int ret;
+ if (!active_platform_fw_data)
+ return -ENODEV;
- /* Check is family or sub-family code already received */
- if (pm_family_code && pm_sub_family_code) {
- *family = pm_family_code;
- *subfamily = pm_sub_family_code;
- return 0;
- }
-
- ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0);
- if (ret < 0)
- return ret;
+ if (!family)
+ return -EINVAL;
- idcode = ret_payload[1];
- pm_family_code = FIELD_GET(FAMILY_CODE_MASK, idcode);
- pm_sub_family_code = FIELD_GET(SUB_FAMILY_CODE_MASK, idcode);
- *family = pm_family_code;
- *subfamily = pm_sub_family_code;
+ *family = active_platform_fw_data->family_code;
return 0;
}
@@ -1238,8 +1231,13 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
u32 value)
{
int ret;
+ u32 pm_family_code;
+
+ ret = zynqmp_pm_get_family_info(&pm_family_code);
+ if (ret)
+ return ret;
- if (pm_family_code == ZYNQMP_FAMILY_CODE &&
+ if (pm_family_code == PM_ZYNQMP_FAMILY_CODE &&
param == PM_PINCTRL_CONFIG_TRI_STATE) {
ret = zynqmp_pm_feature(PM_PINCTRL_CONFIG_PARAM_SET);
if (ret < PM_PINCTRL_PARAM_SET_VERSION) {
@@ -1414,6 +1412,45 @@ int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config);
/**
+ * zynqmp_pm_get_node_status - PM call to request a node's current power state
+ * @node: ID of the component or sub-system in question
+ * @status: Current operating state of the requested node
+ * @requirements: Current requirements asserted on the node,
+ * used for slave nodes only.
+ * @usage: Usage information, used for slave nodes only:
+ * PM_USAGE_NO_MASTER - No master is currently using
+ * the node
+ * PM_USAGE_CURRENT_MASTER - Only requesting master is
+ * currently using the node
+ * PM_USAGE_OTHER_MASTER - Only other masters are
+ * currently using the node
+ * PM_USAGE_BOTH_MASTERS - Both the current and at least
+ * one other master is currently
+ * using the node
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_get_node_status(const u32 node, u32 *const status,
+ u32 *const requirements, u32 *const usage)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!status || !requirements || !usage)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_GET_NODE_STATUS, ret_payload, 1, node);
+ if (ret_payload[0] == XST_PM_SUCCESS) {
+ *status = ret_payload[1];
+ *requirements = ret_payload[2];
+ *usage = ret_payload[3];
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_get_node_status);
+
+/**
* zynqmp_pm_force_pwrdwn - PM call to request for another PU or subsystem to
* be powered down forcefully
* @node: Node ID of the targeted PU or subsystem
@@ -1617,6 +1654,52 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
}
/**
+ * zynqmp_pm_sec_read_reg - PM call to securely read from given offset
+ * of the node
+ * @node_id: Node Id of the device
+ * @offset: Offset to be used (20-bit)
+ * @ret_value: Output data read from the given offset after
+ * firmware access policy is successfully enforced
+ *
+ * Return: Returns 0 on success or error value on failure
+ */
+int zynqmp_pm_sec_read_reg(u32 node_id, u32 offset, u32 *ret_value)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ u32 count = 1;
+ int ret;
+
+ if (!ret_value)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 4, node_id, IOCTL_READ_REG,
+ offset, count);
+
+ *ret_value = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sec_read_reg);
+
+/**
+ * zynqmp_pm_sec_mask_write_reg - PM call to securely write to given offset
+ * of the node
+ * @node_id: Node Id of the device
+ * @offset: Offset to be used (20-bit)
+ * @mask: Mask to be used
+ * @value: Value to be written
+ *
+ * Return: Returns 0 on success or error value on failure
+ */
+int zynqmp_pm_sec_mask_write_reg(const u32 node_id, const u32 offset, u32 mask,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 5, node_id, IOCTL_MASK_WRITE_REG,
+ offset, mask, value);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_sec_mask_write_reg);
+
+/**
* zynqmp_pm_set_sd_config - PM call to set value of SD config registers
* @node: SD node ID
* @config: The config type of SD registers
@@ -2007,12 +2090,18 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct zynqmp_devinfo *devinfo;
+ u32 pm_family_code;
int ret;
ret = get_set_conduit_method(dev->of_node);
if (ret)
return ret;
+ /* Get platform-specific firmware data from device tree match */
+ active_platform_fw_data = (struct platform_fw_data *)device_get_match_data(dev);
+ if (!active_platform_fw_data)
+ return -EINVAL;
+
/* Get SiP SVC version number */
ret = zynqmp_pm_get_sip_svc_version(&sip_svc_version);
if (ret)
@@ -2045,8 +2134,8 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
pr_info("%s Platform Management API v%d.%d\n", __func__,
pm_api_version >> 16, pm_api_version & 0xFFFF);
- /* Get the Family code and sub family code of platform */
- ret = zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code);
+ /* Get the Family code of platform */
+ ret = zynqmp_pm_get_family_info(&pm_family_code);
if (ret < 0)
return ret;
@@ -2073,7 +2162,7 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
- if (pm_family_code == VERSAL_FAMILY_CODE) {
+ if (pm_family_code != PM_ZYNQMP_FAMILY_CODE) {
em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
-1, NULL, 0);
if (IS_ERR(em_dev))
@@ -2113,9 +2202,22 @@ static void zynqmp_firmware_sync_state(struct device *dev)
dev_warn(dev, "failed to release power management to firmware\n");
}
+static const struct platform_fw_data platform_fw_data_versal = {
+ .family_code = PM_VERSAL_FAMILY_CODE,
+};
+
+static const struct platform_fw_data platform_fw_data_versal_net = {
+ .family_code = PM_VERSAL_NET_FAMILY_CODE,
+};
+
+static const struct platform_fw_data platform_fw_data_zynqmp = {
+ .family_code = PM_ZYNQMP_FAMILY_CODE,
+};
+
static const struct of_device_id zynqmp_firmware_of_match[] = {
- {.compatible = "xlnx,zynqmp-firmware"},
- {.compatible = "xlnx,versal-firmware"},
+ {.compatible = "xlnx,zynqmp-firmware", .data = &platform_fw_data_zynqmp},
+ {.compatible = "xlnx,versal-firmware", .data = &platform_fw_data_versal},
+ {.compatible = "xlnx,versal-net-firmware", .data = &platform_fw_data_versal_net},
{},
};
MODULE_DEVICE_TABLE(of, zynqmp_firmware_of_match);
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 5af0bd33890c..44badfd11e1b 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -22,9 +22,6 @@
#define TIMEOUT_US 2000 /* CVP STATUS timeout for USERMODE polling */
/* Vendor Specific Extended Capability Registers */
-#define VSE_PCIE_EXT_CAP_ID 0x0
-#define VSE_PCIE_EXT_CAP_ID_VAL 0x000b /* 16bit */
-
#define VSE_CVP_STATUS 0x1c /* 32bit */
#define VSE_CVP_STATUS_CFG_RDY BIT(18) /* CVP_CONFIG_READY */
#define VSE_CVP_STATUS_CFG_ERR BIT(19) /* CVP_CONFIG_ERROR */
@@ -577,25 +574,18 @@ static int altera_cvp_probe(struct pci_dev *pdev,
{
struct altera_cvp_conf *conf;
struct fpga_manager *mgr;
- int ret, offset;
- u16 cmd, val;
+ u16 cmd, offset;
u32 regval;
-
- /* Discover the Vendor Specific Offset for this device */
- offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR);
- if (!offset) {
- dev_err(&pdev->dev, "No Vendor Specific Offset.\n");
- return -ENODEV;
- }
+ int ret;
/*
* First check if this is the expected FPGA device. PCI config
* space access works without enabling the PCI device, memory
* space access is enabled further down.
*/
- pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);
- if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
- dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
+ offset = pci_find_vsec_capability(pdev, PCI_VENDOR_ID_ALTERA, 0x1172);
+ if (!offset) {
+ dev_err(&pdev->dev, "Wrong VSEC ID value\n");
return -ENODEV;
}
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 8756504340de..e294e3a6cc03 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -57,6 +57,12 @@ static int xilinx_spi_probe(struct spi_device *spi)
return xilinx_core_probe(core);
}
+static const struct spi_device_id xilinx_spi_ids[] = {
+ { "fpga-slave-serial" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, xilinx_spi_ids);
+
#ifdef CONFIG_OF
static const struct of_device_id xlnx_spi_of_match[] = {
{
@@ -73,6 +79,7 @@ static struct spi_driver xilinx_slave_spi_driver = {
.of_match_table = of_match_ptr(xlnx_spi_of_match),
},
.probe = xilinx_spi_probe,
+ .id_table = xilinx_spi_ids,
};
module_spi_driver(xilinx_slave_spi_driver)
diff --git a/drivers/fsi/fsi-occ.c b/drivers/fsi/fsi-occ.c
index d3e6bf37878a..e41ef12fa095 100644
--- a/drivers/fsi/fsi-occ.c
+++ b/drivers/fsi/fsi-occ.c
@@ -22,9 +22,9 @@
#include <linux/uaccess.h>
#include <linux/unaligned.h>
-#define OCC_SRAM_BYTES 4096
-#define OCC_CMD_DATA_BYTES 4090
-#define OCC_RESP_DATA_BYTES 4089
+#define OCC_SRAM_BYTES 8192
+#define OCC_CMD_DATA_BYTES 8186
+#define OCC_RESP_DATA_BYTES 8185
#define OCC_P9_SRAM_CMD_ADDR 0xFFFBE000
#define OCC_P9_SRAM_RSP_ADDR 0xFFFBF000
@@ -86,7 +86,7 @@ static int occ_open(struct inode *inode, struct file *file)
if (!client)
return -ENOMEM;
- client->buffer = (u8 *)__get_free_page(GFP_KERNEL);
+ client->buffer = kvmalloc(OCC_SRAM_BYTES, GFP_KERNEL);
if (!client->buffer) {
kfree(client);
return -ENOMEM;
@@ -97,10 +97,6 @@ static int occ_open(struct inode *inode, struct file *file)
file->private_data = client;
get_device(occ->dev);
- /* We allocate a 1-page buffer, make sure it all fits */
- BUILD_BUG_ON((OCC_CMD_DATA_BYTES + 3) > PAGE_SIZE);
- BUILD_BUG_ON((OCC_RESP_DATA_BYTES + 7) > PAGE_SIZE);
-
return 0;
}
@@ -176,7 +172,7 @@ static ssize_t occ_write(struct file *file, const char __user *buf,
}
/* Submit command; 4 bytes before the data and 2 bytes after */
- rlen = PAGE_SIZE;
+ rlen = OCC_SRAM_BYTES;
rc = fsi_occ_submit(client->occ->dev, cmd, data_length + 6, cmd,
&rlen);
if (rc)
@@ -200,7 +196,7 @@ static int occ_release(struct inode *inode, struct file *file)
struct occ_client *client = file->private_data;
put_device(client->occ->dev);
- free_page((unsigned long)client->buffer);
+ kvfree(client->buffer);
kfree(client);
return 0;
diff --git a/drivers/fwctl/mlx5/main.c b/drivers/fwctl/mlx5/main.c
index f93aa0cecdb9..3dacccf7855c 100644
--- a/drivers/fwctl/mlx5/main.c
+++ b/drivers/fwctl/mlx5/main.c
@@ -58,6 +58,9 @@ enum {
MLX5_CMD_OP_QUERY_DC_CNAK_TRACE = 0x716,
MLX5_CMD_OP_QUERY_NVMF_BACKEND_CONTROLLER = 0x722,
MLX5_CMD_OP_QUERY_NVMF_NAMESPACE_CONTEXT = 0x728,
+ MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID = 0x730,
+ MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT = 0x731,
+ MLX5_CMD_OP_QUERY_DELEGATED_VHCA = 0x732,
MLX5_CMD_OP_QUERY_BURST_SIZE = 0x813,
MLX5_CMD_OP_QUERY_DIAGNOSTIC_PARAMS = 0x819,
MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS = 0x820,
@@ -188,6 +191,7 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
* filter commands manually for now.
*/
switch (opcode) {
+ case MLX5_CMD_OP_MODIFY_CONG_STATUS:
case MLX5_CMD_OP_POSTPONE_CONNECTED_QP_TIMEOUT:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
@@ -196,6 +200,7 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OPCODE_QUERY_VUID:
+ case MLX5_CMD_OP_DELEGATE_VHCA_MANAGEMENT:
/*
* FW limits SET_HCA_CAP on the tools UID to only the other function
* mode which is used for function pre-configuration
@@ -281,6 +286,8 @@ static bool mlx5ctl_validate_rpc(const void *in, enum fwctl_rpc_scope scope)
case MLX5_CMD_OP_QUERY_XRQ:
case MLX5_CMD_OP_USER_QUERY_XRQ_DC_PARAMS_ENTRY:
case MLX5_CMD_OP_USER_QUERY_XRQ_ERROR_PARAMS:
+ case MLX5_CMD_OP_QUERY_ADJACENT_FUNCTIONS_ID:
+ case MLX5_CMD_OP_QUERY_DELEGATED_VHCA:
return scope >= FWCTL_RPC_DEBUG_READ_ONLY;
case MLX5_CMD_OP_SET_DIAGNOSTIC_PARAMS:
@@ -345,7 +352,7 @@ static void *mlx5ctl_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
*/
if (ret && ret != -EREMOTEIO) {
if (rpc_out != rpc_in)
- kfree(rpc_out);
+ kvfree(rpc_out);
return ERR_PTR(ret);
}
return rpc_out;
diff --git a/drivers/fwctl/pds/main.c b/drivers/fwctl/pds/main.c
index 9b9d1f6b5556..1809853f6353 100644
--- a/drivers/fwctl/pds/main.c
+++ b/drivers/fwctl/pds/main.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/bitfield.h>
+#include <linux/string.h>
#include <uapi/fwctl/fwctl.h>
#include <uapi/fwctl/pds.h>
@@ -366,18 +367,10 @@ static void *pdsfc_fw_rpc(struct fwctl_uctx *uctx, enum fwctl_rpc_scope scope,
return ERR_PTR(err);
if (rpc->in.len > 0) {
- in_payload = kzalloc(rpc->in.len, GFP_KERNEL);
- if (!in_payload) {
- dev_err(dev, "Failed to allocate in_payload\n");
- err = -ENOMEM;
- goto err_out;
- }
-
- if (copy_from_user(in_payload, u64_to_user_ptr(rpc->in.payload),
- rpc->in.len)) {
+ in_payload = memdup_user(u64_to_user_ptr(rpc->in.payload), rpc->in.len);
+ if (IS_ERR(in_payload)) {
dev_dbg(dev, "Failed to copy in_payload from user\n");
- err = -EFAULT;
- goto err_in_payload;
+ return in_payload;
}
in_payload_dma_addr = dma_map_single(dev->parent, in_payload,
@@ -453,7 +446,6 @@ err_out_payload:
rpc->in.len, DMA_TO_DEVICE);
err_in_payload:
kfree(in_payload);
-err_out:
if (err)
return ERR_PTR(err);
@@ -481,7 +473,7 @@ static int pdsfc_probe(struct auxiliary_device *adev,
pdsfc = fwctl_alloc_device(&padev->vf_pdev->dev, &pdsfc_ops,
struct pdsfc_dev, fwctl);
if (!pdsfc)
- return dev_err_probe(dev, -ENOMEM, "Failed to allocate fwctl device struct\n");
+ return -ENOMEM;
pdsfc->padev = padev;
err = pdsfc_identify(pdsfc);
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
index 92402f6082c4..23894ff75ff9 100644
--- a/drivers/gnss/ubx.c
+++ b/drivers/gnss/ubx.c
@@ -66,6 +66,7 @@ static const struct gnss_serial_ops ubx_gserial_ops = {
static int ubx_probe(struct serdev_device *serdev)
{
struct gnss_serial *gserial;
+ struct gpio_desc *safeboot;
struct gpio_desc *reset;
struct ubx_data *data;
int ret;
@@ -92,6 +93,13 @@ static int ubx_probe(struct serdev_device *serdev)
if (ret < 0 && ret != -ENODEV)
goto err_free_gserial;
+ /* Deassert safeboot */
+ safeboot = devm_gpiod_get_optional(&serdev->dev, "safeboot", GPIOD_OUT_LOW);
+ if (IS_ERR(safeboot)) {
+ ret = PTR_ERR(safeboot);
+ goto err_free_gserial;
+ }
+
/* Deassert reset */
reset = devm_gpiod_get_optional(&serdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(reset)) {
diff --git a/drivers/staging/gpib/Kconfig b/drivers/gpib/Kconfig
index aa01538d5beb..eeb50956ce85 100644
--- a/drivers/staging/gpib/Kconfig
+++ b/drivers/gpib/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig GPIB
- tristate "Linux GPIB drivers"
+ tristate "GPIB drivers"
help
- Enable support for GPIB cards and dongles for Linux. GPIB
- is the General Purpose Interface Bus which conforms to the
- IEEE488 standard.
+ Enable support for GPIB cards and dongles. GPIB is the
+ General Purpose Interface Bus which conforms to the IEEE488
+ standard.
This set of drivers can be used with the corresponding user
space library that can be found on Sourceforge under linux-gpib.
diff --git a/drivers/staging/gpib/Makefile b/drivers/gpib/Makefile
index d0e88f5c0844..2d44fed2a743 100644
--- a/drivers/staging/gpib/Makefile
+++ b/drivers/gpib/Makefile
@@ -1,5 +1,5 @@
-subdir-ccflags-y += -I$(src)/include -I$(src)/uapi
+subdir-ccflags-y += -I$(src)/include
obj-$(CONFIG_GPIB_AGILENT_82350B) += agilent_82350b/
obj-$(CONFIG_GPIB_AGILENT_82357A) += agilent_82357a/
diff --git a/drivers/staging/gpib/TODO b/drivers/gpib/TODO
index ab41a7f9ca5b..ac07dd90b4ef 100644
--- a/drivers/staging/gpib/TODO
+++ b/drivers/gpib/TODO
@@ -4,20 +4,6 @@ TODO:
CHECK:ALLOC_SIZEOF_STRUCT: Prefer kmalloc(sizeof(*board->private_data)...) over kmalloc(sizeof(struct xxx_priv)...)
./gpio/gpib_bitbang.c:50: ERROR:COMPLEX_MACRO: Macros with complex values should be enclosed in parenthese
This warning will be addressed later: WARNING:UNDOCUMENTED_DT_STRING: DT compatible string
-- tidy-up comments:
- - there are some "//comments" and "// comments" scattered around
- - sometimes they are misaligned
- - sometimes "// comments" are interleaved with "/* comments */"
- - multiline comments should start with initial almost-blank line:
- /*
- * Good
- * multiline
- * comment
- */
- /* Bad
- * multiline
- * comment
- */
- resolve XXX notes where possible
- fix FIXME notes
- clean-up commented-out code
diff --git a/drivers/staging/gpib/agilent_82350b/Makefile b/drivers/gpib/agilent_82350b/Makefile
index f24e1e713a63..f24e1e713a63 100644
--- a/drivers/staging/gpib/agilent_82350b/Makefile
+++ b/drivers/gpib/agilent_82350b/Makefile
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/gpib/agilent_82350b/agilent_82350b.c
index 94bbb3b6576d..01a5bb43cd2d 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c
+++ b/drivers/gpib/agilent_82350b/agilent_82350b.c
@@ -182,10 +182,12 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
return retval;
#endif
- retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
- *bytes_written += num_bytes;
- if (retval < 0)
- return retval;
+ if (fifotransferlength > 0) {
+ retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes);
+ *bytes_written += num_bytes;
+ if (retval < 0)
+ return retval;
+ }
write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0);
for (i = 1; i < fifotransferlength;) {
@@ -217,7 +219,7 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer,
break;
}
write_byte(tms_priv, tms_priv->imr0_bits, IMR0);
- if (retval)
+ if (retval < 0)
return retval;
if (send_eoi) {
diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h b/drivers/gpib/agilent_82350b/agilent_82350b.h
index ef841957297f..ef841957297f 100644
--- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.h
+++ b/drivers/gpib/agilent_82350b/agilent_82350b.h
diff --git a/drivers/staging/gpib/agilent_82357a/Makefile b/drivers/gpib/agilent_82357a/Makefile
index 81a55c257a6e..81a55c257a6e 100644
--- a/drivers/staging/gpib/agilent_82357a/Makefile
+++ b/drivers/gpib/agilent_82357a/Makefile
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c b/drivers/gpib/agilent_82357a/agilent_82357a.c
index b923dc606d1d..77c8e549b208 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.c
+++ b/drivers/gpib/agilent_82357a/agilent_82357a.c
@@ -449,8 +449,8 @@ static int agilent_82357a_read(struct gpib_board *board, u8 *buffer, size_t leng
if (!out_data)
return -ENOMEM;
out_data[i++] = DATA_PIPE_CMD_READ;
- out_data[i++] = 0; //primary address when ARF_NO_ADDR is not set
- out_data[i++] = 0; //secondary address when ARF_NO_ADDR is not set
+ out_data[i++] = 0; // primary address when ARF_NO_ADDR is not set
+ out_data[i++] = 0; // secondary address when ARF_NO_ADDR is not set
out_data[i] = ARF_NO_ADDRESS | ARF_END_ON_EOI;
if (a_priv->eos_mode & REOS)
out_data[i] |= ARF_END_ON_EOS_CHAR;
@@ -532,7 +532,7 @@ static int agilent_82357a_read(struct gpib_board *board, u8 *buffer, size_t leng
*/
agilent_82357a_take_control_internal(board, 0);
- //FIXME check trailing flags for error
+ // FIXME check trailing flags for error
return retval;
}
@@ -966,7 +966,7 @@ static int agilent_82357a_parallel_poll(struct gpib_board *board, u8 *result)
dev_err(&usb_dev->dev, "write_registers() returned error\n");
return retval;
}
- udelay(2); //silly, since usb write will take way longer
+ udelay(2); // silly, since usb write will take way longer
read.address = CPTR;
retval = agilent_82357a_read_registers(a_priv, &read, 1, 1);
if (retval) {
@@ -989,31 +989,31 @@ static int agilent_82357a_parallel_poll(struct gpib_board *board, u8 *result)
static void agilent_82357a_parallel_poll_configure(struct gpib_board *board, u8 config)
{
- //board can only be system controller
+ // board can only be system controller
return;// 0;
}
static void agilent_82357a_parallel_poll_response(struct gpib_board *board, int ist)
{
- //board can only be system controller
+ // board can only be system controller
return;// 0;
}
static void agilent_82357a_serial_poll_response(struct gpib_board *board, u8 status)
{
- //board can only be system controller
+ // board can only be system controller
return;// 0;
}
static u8 agilent_82357a_serial_poll_status(struct gpib_board *board)
{
- //board can only be system controller
+ // board can only be system controller
return 0;
}
static void agilent_82357a_return_to_local(struct gpib_board *board)
{
- //board can only be system controller
+ // board can only be system controller
return;// 0;
}
diff --git a/drivers/staging/gpib/agilent_82357a/agilent_82357a.h b/drivers/gpib/agilent_82357a/agilent_82357a.h
index 23aa4799eb86..33ac558e5552 100644
--- a/drivers/staging/gpib/agilent_82357a/agilent_82357a.h
+++ b/drivers/gpib/agilent_82357a/agilent_82357a.h
@@ -20,7 +20,7 @@ enum usb_vendor_ids {
enum usb_device_ids {
USB_DEVICE_ID_AGILENT_82357A = 0x0107,
USB_DEVICE_ID_AGILENT_82357A_PREINIT = 0x0007, // device id before firmware is loaded
- USB_DEVICE_ID_AGILENT_82357B = 0x0718, // device id before firmware is loaded
+ USB_DEVICE_ID_AGILENT_82357B = 0x0718, // device id before firmware is loaded
USB_DEVICE_ID_AGILENT_82357B_PREINIT = 0x0518, // device id before firmware is loaded
};
@@ -129,10 +129,10 @@ struct agilent_82357a_priv {
struct urb *bulk_urb;
struct urb *interrupt_urb;
u8 *interrupt_buffer;
- struct mutex bulk_transfer_lock; // bulk transfer lock
- struct mutex bulk_alloc_lock; // bulk transfer allocation lock
- struct mutex interrupt_alloc_lock; // interrupt allocation lock
- struct mutex control_alloc_lock; // control message allocation lock
+ struct mutex bulk_transfer_lock; // bulk transfer lock
+ struct mutex bulk_alloc_lock; // bulk transfer allocation lock
+ struct mutex interrupt_alloc_lock; // interrupt allocation lock
+ struct mutex control_alloc_lock; // control message allocation lock
struct timer_list bulk_timer;
struct agilent_82357a_urb_ctx context;
unsigned int bulk_out_endpoint;
diff --git a/drivers/staging/gpib/cb7210/Makefile b/drivers/gpib/cb7210/Makefile
index d239ae80b415..d239ae80b415 100644
--- a/drivers/staging/gpib/cb7210/Makefile
+++ b/drivers/gpib/cb7210/Makefile
diff --git a/drivers/staging/gpib/cb7210/cb7210.c b/drivers/gpib/cb7210/cb7210.c
index 3e2397898a9b..24c61b151071 100644
--- a/drivers/staging/gpib/cb7210/cb7210.c
+++ b/drivers/gpib/cb7210/cb7210.c
@@ -1290,26 +1290,14 @@ static void cb_gpib_release(struct pcmcia_device *link)
static int cb_gpib_suspend(struct pcmcia_device *link)
{
- //struct local_info *info = link->priv;
- //struct struct gpib_board *dev = info->dev;
-
if (link->open)
dev_warn(&link->dev, "Device still open\n");
- //netif_device_detach(dev);
return 0;
}
static int cb_gpib_resume(struct pcmcia_device *link)
{
- //struct local_info *info = link->priv;
- //struct struct gpib_board *dev = info->dev;
-
- /*if (link->open) {
- * ni_gpib_probe(dev); / really?
- * //netif_device_attach(dev);
- *
- */
return cb_gpib_config(link);
}
diff --git a/drivers/staging/gpib/cb7210/cb7210.h b/drivers/gpib/cb7210/cb7210.h
index 13f127563ab3..ddc841ff87ae 100644
--- a/drivers/staging/gpib/cb7210/cb7210.h
+++ b/drivers/gpib/cb7210/cb7210.h
@@ -56,10 +56,10 @@ enum cb7210_page_in {
};
enum hs_regs {
- //write registers
+ // write registers
HS_MODE = 0x8, /* HS_MODE register */
HS_INT_LEVEL = 0x9, /* HS_INT_LEVEL register */
- //read registers
+ // read registers
HS_STATUS = 0x8, /* HS_STATUS register */
};
diff --git a/drivers/staging/gpib/cec/Makefile b/drivers/gpib/cec/Makefile
index b7141e23d4e0..b7141e23d4e0 100644
--- a/drivers/staging/gpib/cec/Makefile
+++ b/drivers/gpib/cec/Makefile
diff --git a/drivers/staging/gpib/cec/cec.h b/drivers/gpib/cec/cec.h
index 3ce2869c7429..3ce2869c7429 100644
--- a/drivers/staging/gpib/cec/cec.h
+++ b/drivers/gpib/cec/cec.h
diff --git a/drivers/staging/gpib/cec/cec_gpib.c b/drivers/gpib/cec/cec_gpib.c
index 0c9d10ee7cd2..dbf9b95baabc 100644
--- a/drivers/staging/gpib/cec/cec_gpib.c
+++ b/drivers/gpib/cec/cec_gpib.c
@@ -206,7 +206,7 @@ static struct gpib_interface cec_pci_interface = {
.parallel_poll_configure = cec_parallel_poll_configure,
.parallel_poll_response = cec_parallel_poll_response,
.local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL, //XXX
+ .line_status = NULL, // XXX
.update_status = cec_update_status,
.primary_address = cec_primary_address,
.secondary_address = cec_secondary_address,
diff --git a/drivers/staging/gpib/common/Makefile b/drivers/gpib/common/Makefile
index 460586edb574..460586edb574 100644
--- a/drivers/staging/gpib/common/Makefile
+++ b/drivers/gpib/common/Makefile
diff --git a/drivers/staging/gpib/common/gpib_os.c b/drivers/gpib/common/gpib_os.c
index 2a0465ce16c4..9dbbac8b8436 100644
--- a/drivers/staging/gpib/common/gpib_os.c
+++ b/drivers/gpib/common/gpib_os.c
@@ -326,7 +326,7 @@ static int setup_serial_poll(struct gpib_board *board, unsigned int usec_timeout
cmd_string[i++] = MLA(board->pad); /* controller's listen address */
if (board->sad >= 0)
cmd_string[i++] = MSA(board->sad);
- cmd_string[i++] = SPE; //serial poll enable
+ cmd_string[i++] = SPE; // serial poll enable
ret = board->interface->command(board, cmd_string, i, &bytes_written);
if (ret < 0 || bytes_written < i) {
diff --git a/drivers/staging/gpib/common/iblib.c b/drivers/gpib/common/iblib.c
index 549280d9a6e9..7cbb6a467177 100644
--- a/drivers/staging/gpib/common/iblib.c
+++ b/drivers/gpib/common/iblib.c
@@ -608,7 +608,7 @@ static int wait_satisfied(struct wait_info *winfo, struct gpib_status_queue *sta
*status = temp_status;
return 1;
}
-//XXX does wait for END work?
+// XXX does wait for END work?
return 0;
}
diff --git a/drivers/staging/gpib/common/ibsys.h b/drivers/gpib/common/ibsys.h
index e5a148f513a8..e5a148f513a8 100644
--- a/drivers/staging/gpib/common/ibsys.h
+++ b/drivers/gpib/common/ibsys.h
diff --git a/drivers/staging/gpib/eastwood/Makefile b/drivers/gpib/eastwood/Makefile
index 384825195f77..384825195f77 100644
--- a/drivers/staging/gpib/eastwood/Makefile
+++ b/drivers/gpib/eastwood/Makefile
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.c b/drivers/gpib/eastwood/fluke_gpib.c
index 491356433249..3ae848e3f738 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.c
+++ b/drivers/gpib/eastwood/fluke_gpib.c
@@ -507,7 +507,7 @@ static int fluke_accel_write(struct gpib_board *board, u8 *buffer, size_t length
}
if (retval < 0)
return retval;
- //handle sending of last byte with eoi
+ // handle sending of last byte with eoi
if (send_eoi) {
size_t num_bytes;
diff --git a/drivers/staging/gpib/eastwood/fluke_gpib.h b/drivers/gpib/eastwood/fluke_gpib.h
index 493c200d0bbf..493c200d0bbf 100644
--- a/drivers/staging/gpib/eastwood/fluke_gpib.h
+++ b/drivers/gpib/eastwood/fluke_gpib.h
diff --git a/drivers/staging/gpib/fmh_gpib/Makefile b/drivers/gpib/fmh_gpib/Makefile
index cc4d9e7cd5cd..cc4d9e7cd5cd 100644
--- a/drivers/staging/gpib/fmh_gpib/Makefile
+++ b/drivers/gpib/fmh_gpib/Makefile
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/gpib/fmh_gpib/fmh_gpib.c
index 4138f3d2bae7..f7bfb4a8e553 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c
+++ b/drivers/gpib/fmh_gpib/fmh_gpib.c
@@ -523,7 +523,7 @@ static int fmh_gpib_accel_write(struct gpib_board *board, u8 *buffer,
}
if (retval < 0)
return retval;
- //handle sending of last byte with eoi
+ // handle sending of last byte with eoi
if (send_eoi) {
size_t num_bytes;
@@ -1517,6 +1517,11 @@ void fmh_gpib_detach(struct gpib_board *board)
resource_size(e_priv->gpib_iomem_res));
}
fmh_gpib_generic_detach(board);
+
+ if (board->dev) {
+ put_device(board->dev);
+ board->dev = NULL;
+ }
}
static int fmh_gpib_pci_attach_impl(struct gpib_board *board,
diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h b/drivers/gpib/fmh_gpib/fmh_gpib.h
index e7602d7e1401..e7602d7e1401 100644
--- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.h
+++ b/drivers/gpib/fmh_gpib/fmh_gpib.h
diff --git a/drivers/staging/gpib/gpio/Makefile b/drivers/gpib/gpio/Makefile
index 00ea52abdda7..00ea52abdda7 100644
--- a/drivers/staging/gpib/gpio/Makefile
+++ b/drivers/gpib/gpio/Makefile
diff --git a/drivers/staging/gpib/gpio/gpib_bitbang.c b/drivers/gpib/gpio/gpib_bitbang.c
index 17884810fd69..374cd61355e9 100644
--- a/drivers/staging/gpib/gpio/gpib_bitbang.c
+++ b/drivers/gpib/gpio/gpib_bitbang.c
@@ -277,8 +277,8 @@ struct bb_priv {
int ndac_mode; /* nrfd interrupt mode 0/1 -> edge/levels */
int dav_tx; /* keep trace of DAV status while sending */
int dav_rx; /* keep trace of DAV status while receiving */
- u8 eos; // eos character
- short eos_flags; // eos mode
+ u8 eos; /* eos character */
+ short eos_flags; /* eos mode */
short eos_check; /* eos check required in current operation ... */
short eos_check_8; /* ... with byte comparison */
short eos_mask_7; /* ... with 7 bit masked character */
@@ -290,14 +290,14 @@ struct bb_priv {
u8 *rbuf;
u8 *wbuf;
int end_flag;
- int r_busy; /* 0==idle 1==busy */
+ int r_busy; /* 0==idle 1==busy */
int w_busy;
int write_done;
- int cmd; /* 1 = cmd write in progress */
+ int cmd; /* 1 = cmd write in progress */
size_t w_cnt;
size_t length;
u8 *w_buf;
- spinlock_t rw_lock; // protect mods to rw_lock
+ spinlock_t rw_lock; /* protect mods to rw_lock */
int phase;
int ndac_idle;
int ndac_seq;
@@ -726,7 +726,7 @@ static irqreturn_t bb_SRQ_interrupt(int irq, void *arg)
static int bb_command(struct gpib_board *board, u8 *buffer,
size_t length, size_t *bytes_written)
{
- size_t ret;
+ int ret;
struct bb_priv *priv = board->private_data;
int i;
@@ -1462,8 +1462,8 @@ static inline void SET_DIR_READ(struct bb_priv *priv)
gpiod_set_value(TE, 0); /* set NDAC and NRFD to transmit and DAV to receive */
}
- gpiod_direction_output(NRFD, 0); // hold off the talker
- gpiod_direction_output(NDAC, 0); // data not accepted
+ gpiod_direction_output(NRFD, 0); /* hold off the talker */
+ gpiod_direction_output(NDAC, 0); /* data not accepted */
priv->direction = DIR_READ;
}
diff --git a/drivers/staging/gpib/hp_82335/Makefile b/drivers/gpib/hp_82335/Makefile
index 305ce44ee48a..305ce44ee48a 100644
--- a/drivers/staging/gpib/hp_82335/Makefile
+++ b/drivers/gpib/hp_82335/Makefile
diff --git a/drivers/staging/gpib/hp_82335/hp82335.c b/drivers/gpib/hp_82335/hp82335.c
index d0e47ef77c87..d0e47ef77c87 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.c
+++ b/drivers/gpib/hp_82335/hp82335.c
diff --git a/drivers/staging/gpib/hp_82335/hp82335.h b/drivers/gpib/hp_82335/hp82335.h
index 0c252a712ec9..0c252a712ec9 100644
--- a/drivers/staging/gpib/hp_82335/hp82335.h
+++ b/drivers/gpib/hp_82335/hp82335.h
diff --git a/drivers/staging/gpib/hp_82341/Makefile b/drivers/gpib/hp_82341/Makefile
index 21367310a17e..21367310a17e 100644
--- a/drivers/staging/gpib/hp_82341/Makefile
+++ b/drivers/gpib/hp_82341/Makefile
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.c b/drivers/gpib/hp_82341/hp_82341.c
index e5c1997ce7d9..1a2ad0560e14 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.c
+++ b/drivers/gpib/hp_82341/hp_82341.c
@@ -38,7 +38,7 @@ static int hp_82341_accel_read(struct gpib_board *board, u8 *buffer, size_t leng
unsigned short event_status;
int i;
int num_fifo_bytes;
- //hardware doesn't support checking for end-of-string character when using fifo
+ // hardware doesn't support checking for end-of-string character when using fifo
if (tms_priv->eos_flags & REOS)
return tms9914_read(board, tms_priv, buffer, length, end, bytes_read);
@@ -49,7 +49,7 @@ static int hp_82341_accel_read(struct gpib_board *board, u8 *buffer, size_t leng
*bytes_read = 0;
if (length == 0)
return 0;
- //disable fifo for the moment
+ // disable fifo for the moment
outb(DIRECTION_GPIB_TO_HOST_BIT, hp_priv->iobase[3] + BUFFER_CONTROL_REG);
/*
* Handle corner case of board not in holdoff and one byte has slipped in already.
@@ -154,7 +154,7 @@ static int restart_write_fifo(struct gpib_board *board, struct hp_82341_priv *hp
while (1) {
int status;
- //restart doesn't work if data holdoff is in effect
+ // restart doesn't work if data holdoff is in effect
status = tms9914_line_status(board, tms_priv);
if ((status & BUS_NRFD) == 0) {
outb(RESTART_STREAM_BIT, hp_priv->iobase[0] + STREAM_STATUS_REG);
@@ -764,7 +764,7 @@ static int hp_82341_attach(struct gpib_board *board, const struct gpib_board_con
ENABLE_TI_INTERRUPT_EVENT_BIT, hp_priv->iobase[0] + EVENT_ENABLE_REG);
outb(ENABLE_BUFFER_END_INTERRUPT_BIT | ENABLE_TERMINAL_COUNT_INTERRUPT_BIT |
ENABLE_TI_INTERRUPT_BIT, hp_priv->iobase[0] + INTERRUPT_ENABLE_REG);
- //write clear event register
+ // write clear event register
outb((TI_INTERRUPT_EVENT_BIT | POINTERS_EQUAL_EVENT_BIT |
BUFFER_END_EVENT_BIT | TERMINAL_COUNT_EVENT_BIT),
hp_priv->iobase[0] + EVENT_STATUS_REG);
@@ -867,7 +867,7 @@ static irqreturn_t hp_82341_interrupt(int irq, void *arg)
event_status = inb(hp_priv->iobase[0] + EVENT_STATUS_REG);
if (event_status & INTERRUPT_PENDING_EVENT_BIT)
retval = IRQ_HANDLED;
- //write-clear status bits
+ // write-clear status bits
if (event_status & (TI_INTERRUPT_EVENT_BIT | POINTERS_EQUAL_EVENT_BIT |
BUFFER_END_EVENT_BIT | TERMINAL_COUNT_EVENT_BIT)) {
outb(event_status & (TI_INTERRUPT_EVENT_BIT | POINTERS_EQUAL_EVENT_BIT |
@@ -901,7 +901,7 @@ static void set_transfer_counter(struct hp_82341_priv *hp_priv, int count)
outb(complement & 0xff, hp_priv->iobase[1] + TRANSFER_COUNT_LOW_REG);
outb((complement >> 8) & 0xff, hp_priv->iobase[1] + TRANSFER_COUNT_MID_REG);
- //I don't think the hi count reg is even used, but oh well
+ // I don't think the hi count reg is even used, but oh well
outb((complement >> 16) & 0xf, hp_priv->iobase[1] + TRANSFER_COUNT_HIGH_REG);
}
diff --git a/drivers/staging/gpib/hp_82341/hp_82341.h b/drivers/gpib/hp_82341/hp_82341.h
index 370a3d4576eb..859ef2899acb 100644
--- a/drivers/staging/gpib/hp_82341/hp_82341.h
+++ b/drivers/gpib/hp_82341/hp_82341.h
@@ -65,7 +65,7 @@ enum config_control_status_bits {
IRQ_SELECT_MASK = 0x7,
DMA_CONFIG_MASK = 0x18,
ENABLE_DMA_CONFIG_BIT = 0x20,
- XILINX_READY_BIT = 0x40, //read only
+ XILINX_READY_BIT = 0x40, // read only
DONE_PGL_BIT = 0x80
};
@@ -94,7 +94,7 @@ static inline unsigned int IRQ_SELECT_BITS(int irq)
};
enum mode_control_status_bits {
- SLOT8_BIT = 0x1, // read only
+ SLOT8_BIT = 0x1, // read only
ACTIVE_CONTROLLER_BIT = 0x2, // read only
ENABLE_DMA_BIT = 0x4,
SYSTEM_CONTROLLER_BIT = 0x8,
@@ -106,12 +106,12 @@ enum mode_control_status_bits {
enum monitor_bits {
MONITOR_INTERRUPT_PENDING_BIT = 0x1, // read only
MONITOR_CLEAR_HOLDOFF_BIT = 0x2, // write only
- MONITOR_PPOLL_BIT = 0x4, // write clear
- MONITOR_SRQ_BIT = 0x8, // write clear
- MONITOR_IFC_BIT = 0x10, // write clear
- MONITOR_REN_BIT = 0x20, // write clear
- MONITOR_END_BIT = 0x40, // write clear
- MONITOR_DAV_BIT = 0x80 // write clear
+ MONITOR_PPOLL_BIT = 0x4, // write clear
+ MONITOR_SRQ_BIT = 0x8, // write clear
+ MONITOR_IFC_BIT = 0x10, // write clear
+ MONITOR_REN_BIT = 0x20, // write clear
+ MONITOR_END_BIT = 0x40, // write clear
+ MONITOR_DAV_BIT = 0x80 // write clear
};
enum interrupt_enable_bits {
@@ -123,36 +123,36 @@ enum interrupt_enable_bits {
};
enum event_status_bits {
- TI_INTERRUPT_EVENT_BIT = 0x1, //write clear
+ TI_INTERRUPT_EVENT_BIT = 0x1, // write clear
INTERRUPT_PENDING_EVENT_BIT = 0x2, // read only
- POINTERS_EQUAL_EVENT_BIT = 0x4, //write clear
- BUFFER_END_EVENT_BIT = 0x10, //write clear
+ POINTERS_EQUAL_EVENT_BIT = 0x4, // write clear
+ BUFFER_END_EVENT_BIT = 0x10, // write clear
TERMINAL_COUNT_EVENT_BIT = 0x20, // write clear
DMA_TERMINAL_COUNT_EVENT_BIT = 0x80, // write clear
};
enum event_enable_bits {
- ENABLE_TI_INTERRUPT_EVENT_BIT = 0x1, //write clear
- ENABLE_POINTERS_EQUAL_EVENT_BIT = 0x4, //write clear
- ENABLE_BUFFER_END_EVENT_BIT = 0x10, //write clear
- ENABLE_TERMINAL_COUNT_EVENT_BIT = 0x20, // write clear
+ ENABLE_TI_INTERRUPT_EVENT_BIT = 0x1, // write clear
+ ENABLE_POINTERS_EQUAL_EVENT_BIT = 0x4, // write clear
+ ENABLE_BUFFER_END_EVENT_BIT = 0x10, // write clear
+ ENABLE_TERMINAL_COUNT_EVENT_BIT = 0x20, // write clear
ENABLE_DMA_TERMINAL_COUNT_EVENT_BIT = 0x80, // write clear
};
enum stream_status_bits {
- HALTED_STATUS_BIT = 0x1, //read
- RESTART_STREAM_BIT = 0x1 //write
+ HALTED_STATUS_BIT = 0x1, // read
+ RESTART_STREAM_BIT = 0x1 // write
};
enum buffer_control_bits {
DIRECTION_GPIB_TO_HOST_BIT = 0x20, // transfer direction (set for gpib to host)
- ENABLE_TI_BUFFER_BIT = 0x40, //enable fifo
- FAST_WR_EN_BIT = 0x80, // 350 ns t1 delay?
+ ENABLE_TI_BUFFER_BIT = 0x40, // enable fifo
+ FAST_WR_EN_BIT = 0x80, // 350 ns t1 delay?
};
// registers accessible through isapnp chip on 82341d
enum hp_82341d_pnp_registers {
- PIO_DATA_REG = 0x20, //read/write pio data lines
+ PIO_DATA_REG = 0x20, // read/write pio data lines
PIO_DIRECTION_REG = 0x21, // set pio data line directions (set for input)
};
diff --git a/drivers/staging/gpib/include/amcc5920.h b/drivers/gpib/include/amcc5920.h
index 7a88bd282feb..7a88bd282feb 100644
--- a/drivers/staging/gpib/include/amcc5920.h
+++ b/drivers/gpib/include/amcc5920.h
diff --git a/drivers/staging/gpib/include/amccs5933.h b/drivers/gpib/include/amccs5933.h
index 4de0f6797458..d7f63c795096 100644
--- a/drivers/staging/gpib/include/amccs5933.h
+++ b/drivers/gpib/include/amccs5933.h
@@ -24,7 +24,7 @@ extern inline int INCOMING_MAILBOX_REG(unsigned int mailbox)
enum {
OUTBOX_EMPTY_INTR_BIT = 0x10, // enable outbox empty interrupt
INBOX_FULL_INTR_BIT = 0x1000, // enable inbox full interrupt
- INBOX_INTR_CS_BIT = 0x20000, // read, or write clear inbox full interrupt
+ INBOX_INTR_CS_BIT = 0x20000, // read, or write clear inbox full interrupt
INTR_ASSERTED_BIT = 0x800000, // read only, interrupt asserted
};
@@ -52,7 +52,7 @@ extern inline int OUTBOX_SELECT_BITS(unsigned int mailbox)
return (mailbox & 0x3) << 2;
};
-//BMCSR bits
+// BMCSR bits
enum {
MBOX_FLAGS_RESET_BIT = 0x08000000, // resets mailbox empty/full flags
};
diff --git a/drivers/staging/gpib/include/gpibP.h b/drivers/gpib/include/gpibP.h
index 1b27f37e0ba0..e3938ada3e0d 100644
--- a/drivers/staging/gpib/include/gpibP.h
+++ b/drivers/gpib/include/gpibP.h
@@ -12,8 +12,8 @@
#include "gpib_types.h"
#include "gpib_proto.h"
#include "gpib_cmd.h"
-#include "gpib.h"
-#include "gpib_ioctl.h"
+#include <linux/gpib.h>
+#include <linux/gpib_ioctl.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
diff --git a/drivers/staging/gpib/include/gpib_cmd.h b/drivers/gpib/include/gpib_cmd.h
index 9e96a3bfa22d..9e96a3bfa22d 100644
--- a/drivers/staging/gpib/include/gpib_cmd.h
+++ b/drivers/gpib/include/gpib_cmd.h
diff --git a/drivers/staging/gpib/include/gpib_pci_ids.h b/drivers/gpib/include/gpib_pci_ids.h
index 52dcab07a7d1..52dcab07a7d1 100644
--- a/drivers/staging/gpib/include/gpib_pci_ids.h
+++ b/drivers/gpib/include/gpib_pci_ids.h
diff --git a/drivers/staging/gpib/include/gpib_proto.h b/drivers/gpib/include/gpib_proto.h
index 42e736e3b7cd..42e736e3b7cd 100644
--- a/drivers/staging/gpib/include/gpib_proto.h
+++ b/drivers/gpib/include/gpib_proto.h
diff --git a/drivers/staging/gpib/include/gpib_state_machines.h b/drivers/gpib/include/gpib_state_machines.h
index 7488c00f191e..7488c00f191e 100644
--- a/drivers/staging/gpib/include/gpib_state_machines.h
+++ b/drivers/gpib/include/gpib_state_machines.h
diff --git a/drivers/staging/gpib/include/gpib_types.h b/drivers/gpib/include/gpib_types.h
index db040c80d778..5a0978ae27e7 100644
--- a/drivers/staging/gpib/include/gpib_types.h
+++ b/drivers/gpib/include/gpib_types.h
@@ -8,7 +8,7 @@
#define _GPIB_TYPES_H
#ifdef __KERNEL__
-#include "gpib.h"
+#include <linux/gpib.h>
#include <linux/atomic.h>
#include <linux/device.h>
#include <linux/mutex.h>
@@ -273,7 +273,8 @@ struct gpib_board {
struct mutex big_gpib_mutex;
/* pid of last process to lock the board mutex */
pid_t locking_pid;
- spinlock_t locking_pid_spinlock; // lock for setting locking pid
+ /* lock for setting locking pid */
+ spinlock_t locking_pid_spinlock;
/* Spin lock for dealing with races with the interrupt handler */
spinlock_t spinlock;
/* Watchdog timer to enable timeouts */
diff --git a/drivers/staging/gpib/include/nec7210.h b/drivers/gpib/include/nec7210.h
index 312217b4580e..9835aa5ef4ff 100644
--- a/drivers/staging/gpib/include/nec7210.h
+++ b/drivers/gpib/include/nec7210.h
@@ -22,18 +22,18 @@ struct nec7210_priv {
u32 iobase;
#endif
void __iomem *mmiobase;
- unsigned int offset; // offset between successive nec7210 io addresses
+ unsigned int offset; // offset between successive nec7210 io addresses
unsigned int dma_channel;
u8 *dma_buffer;
unsigned int dma_buffer_length; // length of dma buffer
dma_addr_t dma_buffer_addr; // bus address of board->buffer for use with dma
// software copy of bits written to registers
u8 reg_bits[8];
- u8 auxa_bits; // bits written to auxiliary register A
- u8 auxb_bits; // bits written to auxiliary register B
+ u8 auxa_bits; // bits written to auxiliary register A
+ u8 auxb_bits; // bits written to auxiliary register B
// used to keep track of board's state, bit definitions given below
unsigned long state;
- /* lock for chips that extend the nec7210 registers by paging in alternate regs */
+ // lock for chips that extend the nec7210 registers by paging in alternate regs
spinlock_t register_page_lock;
// wrappers for outb, inb, readb, or writeb
u8 (*read_byte)(struct nec7210_priv *priv, unsigned int register_number);
@@ -64,17 +64,17 @@ static inline void write_byte(struct nec7210_priv *priv, u8 byte, unsigned int r
// struct nec7210_priv.state bit numbers
enum {
- PIO_IN_PROGRESS_BN, // pio transfer in progress
+ PIO_IN_PROGRESS_BN, // pio transfer in progress
DMA_READ_IN_PROGRESS_BN, // dma read transfer in progress
DMA_WRITE_IN_PROGRESS_BN, // dma write transfer in progress
- READ_READY_BN, // board has data byte available to read
- WRITE_READY_BN, // board is ready to send a data byte
- COMMAND_READY_BN, // board is ready to send a command byte
- RECEIVED_END_BN, // received END
- BUS_ERROR_BN, // output error has occurred
- RFD_HOLDOFF_BN, // rfd holdoff in effect
- DEV_CLEAR_BN, // device clear received
- ADR_CHANGE_BN, // address state change occurred
+ READ_READY_BN, // board has data byte available to read
+ WRITE_READY_BN, // board is ready to send a data byte
+ COMMAND_READY_BN, // board is ready to send a command byte
+ RECEIVED_END_BN, // received END
+ BUS_ERROR_BN, // output error has occurred
+ RFD_HOLDOFF_BN, // rfd holdoff in effect
+ DEV_CLEAR_BN, // device clear received
+ ADR_CHANGE_BN, // address state change occurred
};
// interface functions
diff --git a/drivers/staging/gpib/include/nec7210_registers.h b/drivers/gpib/include/nec7210_registers.h
index 97c53ac8e805..067983d7a07f 100644
--- a/drivers/staging/gpib/include/nec7210_registers.h
+++ b/drivers/gpib/include/nec7210_registers.h
@@ -11,7 +11,7 @@ enum nec7210_chipset {
NEC7210, // The original
TNT4882, // NI
NAT4882, // NI
- CB7210, // measurement computing
+ CB7210, // measurement computing
IOT7210, // iotech
IGPIB7210, // Ines
TNT5004, // NI (minor differences to TNT4882)
@@ -48,7 +48,7 @@ enum nec7210_read_regs {
ADR1, // address 2
};
-//bit definitions common to nec-7210 compatible registers
+// bit definitions common to nec-7210 compatible registers
// ISR1: interrupt status register 1
enum isr1_bits {
diff --git a/drivers/staging/gpib/include/plx9050.h b/drivers/gpib/include/plx9050.h
index 66c56335f5c0..c911b285a0ca 100644
--- a/drivers/staging/gpib/include/plx9050.h
+++ b/drivers/gpib/include/plx9050.h
@@ -23,10 +23,10 @@ enum plx9050_intcsr_bits {
PLX9050_LINTR2_STATUS_BIT = 0x20,
PLX9050_PCI_INTR_EN_BIT = 0x40,
PLX9050_SOFT_INTR_BIT = 0x80,
- PLX9050_LINTR1_SELECT_ENABLE_BIT = 0x100, //9052 extension
- PLX9050_LINTR2_SELECT_ENABLE_BIT = 0x200, //9052 extension
- PLX9050_LINTR1_EDGE_CLEAR_BIT = 0x400, //9052 extension
- PLX9050_LINTR2_EDGE_CLEAR_BIT = 0x800, //9052 extension
+ PLX9050_LINTR1_SELECT_ENABLE_BIT = 0x100, // 9052 extension
+ PLX9050_LINTR2_SELECT_ENABLE_BIT = 0x200, // 9052 extension
+ PLX9050_LINTR1_EDGE_CLEAR_BIT = 0x400, // 9052 extension
+ PLX9050_LINTR2_EDGE_CLEAR_BIT = 0x800, // 9052 extension
};
enum plx9050_cntrl_bits {
diff --git a/drivers/staging/gpib/include/quancom_pci.h b/drivers/gpib/include/quancom_pci.h
index cdaf0d056be9..cdaf0d056be9 100644
--- a/drivers/staging/gpib/include/quancom_pci.h
+++ b/drivers/gpib/include/quancom_pci.h
diff --git a/drivers/staging/gpib/include/tms9914.h b/drivers/gpib/include/tms9914.h
index 50a9d3b22619..e66b75e0fda8 100644
--- a/drivers/staging/gpib/include/tms9914.h
+++ b/drivers/gpib/include/tms9914.h
@@ -30,10 +30,10 @@ struct tms9914_priv {
u8 imr0_bits, imr1_bits;
// bits written to address mode register
u8 admr_bits;
- u8 auxa_bits; // bits written to auxiliary register A
+ u8 auxa_bits; // bits written to auxiliary register A
// used to keep track of board's state, bit definitions given below
unsigned long state;
- u8 eos; // eos character
+ u8 eos; // eos character
short eos_flags;
u8 spoll_status;
enum tms9914_holdoff_mode holdoff_mode;
@@ -67,15 +67,15 @@ static inline void write_byte(struct tms9914_priv *priv, u8 byte, unsigned int r
// struct tms9914_priv.state bit numbers
enum {
- PIO_IN_PROGRESS_BN, // pio transfer in progress
+ PIO_IN_PROGRESS_BN, // pio transfer in progress
DMA_READ_IN_PROGRESS_BN, // dma read transfer in progress
DMA_WRITE_IN_PROGRESS_BN, // dma write transfer in progress
- READ_READY_BN, // board has data byte available to read
- WRITE_READY_BN, // board is ready to send a data byte
- COMMAND_READY_BN, // board is ready to send a command byte
- RECEIVED_END_BN, // received END
- BUS_ERROR_BN, // bus error
- DEV_CLEAR_BN, // device clear received
+ READ_READY_BN, // board has data byte available to read
+ WRITE_READY_BN, // board is ready to send a data byte
+ COMMAND_READY_BN, // board is ready to send a command byte
+ RECEIVED_END_BN, // received END
+ BUS_ERROR_BN, // bus error
+ DEV_CLEAR_BN, // device clear received
};
// interface functions
@@ -150,23 +150,23 @@ enum {
IMR0 = 0, /* interrupt mask 0 */
IMR1 = 1, /* interrupt mask 1 */
AUXCR = 3, /* auxiliary command */
- ADR = 4, // address register
- SPMR = 5, // serial poll mode register
+ ADR = 4, /* address register */
+ SPMR = 5, /* serial poll mode register */
PPR = 6, /* parallel poll */
CDOR = 7, /* data out register */
};
// read registers
enum {
- ISR0 = 0, /* interrupt status 0 */
- ISR1 = 1, /* interrupt status 1 */
- ADSR = 2, /* address status */
- BSR = 3, /* bus status */
- CPTR = 6, /* command pass thru */
- DIR = 7, /* data in register */
+ ISR0 = 0, /* interrupt status 0 */
+ ISR1 = 1, /* interrupt status 1 */
+ ADSR = 2, /* address status */
+ BSR = 3, /* bus status */
+ CPTR = 6, /* command pass thru */
+ DIR = 7, /* data in register */
};
-//bit definitions common to tms9914 compatible registers
+// bit definitions common to tms9914 compatible registers
/* ISR0 - Register bits */
enum isr0_bits {
@@ -248,33 +248,33 @@ enum bus_status_bits {
/*---------------------------------------------------------*/
enum aux_cmd_bits {
- AUX_CS = 0x80, /* set bit instead of clearing it, used with commands marked 'd' below */
- AUX_CHIP_RESET = 0x0, /* d Chip reset */
- AUX_INVAL = 0x1, // release dac holdoff, invalid command byte
- AUX_VAL = (AUX_INVAL | AUX_CS), // release dac holdoff, valid command byte
- AUX_RHDF = 0x2, /* X Release RFD holdoff */
- AUX_HLDA = 0x3, /* d holdoff on all data */
- AUX_HLDE = 0x4, /* d holdoff on EOI only */
- AUX_NBAF = 0x5, /* X Set new byte available false */
- AUX_FGET = 0x6, /* d force GET */
- AUX_RTL = 0x7, /* d return to local */
- AUX_SEOI = 0x8, /* X send EOI with next byte */
- AUX_LON = 0x9, /* d Listen only */
- AUX_TON = 0xa, /* d Talk only */
- AUX_GTS = 0xb, /* X goto standby */
- AUX_TCA = 0xc, /* X take control asynchronously */
- AUX_TCS = 0xd, /* X take " synchronously */
- AUX_RPP = 0xe, /* d Request parallel poll */
- AUX_SIC = 0xf, /* d send interface clear */
- AUX_SRE = 0x10, /* d send remote enable */
- AUX_RQC = 0x11, /* X request control */
- AUX_RLC = 0x12, /* X release control */
- AUX_DAI = 0x13, /* d disable all interrupts */
- AUX_PTS = 0x14, /* X pass through next secondary */
- AUX_STDL = 0x15, /* d short T1 delay */
- AUX_SHDW = 0x16, /* d shadow handshake */
- AUX_VSTDL = 0x17, /* d very short T1 delay (smj9914 extension) */
- AUX_RSV2 = 0x18, /* d request service bit 2 (smj9914 extension) */
+ AUX_CS = 0x80, /* set bit instead of clearing it, used with commands marked 'd' below */
+ AUX_CHIP_RESET = 0x0, /* d Chip reset */
+ AUX_INVAL = 0x1, /* release dac holdoff, invalid command byte */
+ AUX_VAL = (AUX_INVAL | AUX_CS), /* release dac holdoff, valid command byte */
+ AUX_RHDF = 0x2, /* X Release RFD holdoff */
+ AUX_HLDA = 0x3, /* d holdoff on all data */
+ AUX_HLDE = 0x4, /* d holdoff on EOI only */
+ AUX_NBAF = 0x5, /* X Set new byte available false */
+ AUX_FGET = 0x6, /* d force GET */
+ AUX_RTL = 0x7, /* d return to local */
+ AUX_SEOI = 0x8, /* X send EOI with next byte */
+ AUX_LON = 0x9, /* d Listen only */
+ AUX_TON = 0xa, /* d Talk only */
+ AUX_GTS = 0xb, /* X goto standby */
+ AUX_TCA = 0xc, /* X take control asynchronously */
+ AUX_TCS = 0xd, /* X take " synchronously */
+ AUX_RPP = 0xe, /* d Request parallel poll */
+ AUX_SIC = 0xf, /* d send interface clear */
+ AUX_SRE = 0x10, /* d send remote enable */
+ AUX_RQC = 0x11, /* X request control */
+ AUX_RLC = 0x12, /* X release control */
+ AUX_DAI = 0x13, /* d disable all interrupts */
+ AUX_PTS = 0x14, /* X pass through next secondary */
+ AUX_STDL = 0x15, /* d short T1 delay */
+ AUX_SHDW = 0x16, /* d shadow handshake */
+ AUX_VSTDL = 0x17, /* d very short T1 delay (smj9914 extension) */
+ AUX_RSV2 = 0x18, /* d request service bit 2 (smj9914 extension) */
};
#endif //_TMS9914_H
diff --git a/drivers/staging/gpib/include/tnt4882_registers.h b/drivers/gpib/include/tnt4882_registers.h
index 1b1441cd03d5..d54c4cc61168 100644
--- a/drivers/staging/gpib/include/tnt4882_registers.h
+++ b/drivers/gpib/include/tnt4882_registers.h
@@ -32,11 +32,11 @@ enum {
CMDR = 0x1c, // command register
TIMER = 0x1e, // timer register
- STS1 = 0x10, /* T488 Status Register 1 */
- STS2 = 0x1c, /* T488 Status Register 2 */
+ STS1 = 0x10, // T488 Status Register 1
+ STS2 = 0x1c, // T488 Status Register 2
ISR0 = IMR0,
- ISR3 = 0x1a, /* T488 Interrupt Status Register 3 */
- BCR = 0x1f, /* bus control/status register */
+ ISR3 = 0x1a, // T488 Interrupt Status Register 3
+ BCR = 0x1f, // bus control/status register
BSR = BCR,
};
@@ -107,11 +107,11 @@ enum imr0_bits {
/* ISR0 -- Interrupt Status Register 0 */
enum isr0_bits {
- TNT_SYNC_BIT = 0x1, /* handshake sync */
- TNT_TO_BIT = 0x2, /* timeout */
- TNT_ATNI_BIT = 0x4, /* ATN interrupt */
+ TNT_SYNC_BIT = 0x1, /* handshake sync */
+ TNT_TO_BIT = 0x2, /* timeout */
+ TNT_ATNI_BIT = 0x4, /* ATN interrupt */
TNT_IFCI_BIT = 0x8, /* interface clear interrupt */
- TNT_EOS_BIT = 0x10, /* end of string */
+ TNT_EOS_BIT = 0x10, /* end of string */
TNT_NL_BIT = 0x20, /* new line receive */
TNT_STBO_BIT = 0x40, /* status byte out */
TNT_NBA_BIT = 0x80, /* new byte available */
@@ -129,7 +129,7 @@ enum isr3_bits {
};
enum keyreg_bits {
- MSTD = 0x20, // enable 350ns T1 delay
+ MSTD = 0x20, /* enable 350ns T1 delay */
};
/* STS1 -- Status Register 1 (read only) */
@@ -157,7 +157,7 @@ enum tnt4882_aux_cmds {
AUX_9914 = 0x15, // switch to 9914 mode
AUX_REQT = 0x18,
AUX_REQF = 0x19,
- AUX_PAGEIN = 0x50, /* page in alternate registers */
+ AUX_PAGEIN = 0x50, // page in alternate registers
AUX_HLDI = 0x51, // rfd holdoff immediately
AUX_CLEAR_END = 0x55,
AUX_7210 = 0x99, // switch to 7210 mode
@@ -183,7 +183,7 @@ enum auxi_bits {
enum sasr_bits {
ACRDY_BIT = 0x4, /* acceptor ready state */
- ADHS_BIT = 0x8, /* acceptor data holdoff state */
+ ADHS_BIT = 0x8, /* acceptor data holdoff state */
ANHS2_BIT = 0x10, /* acceptor not ready holdoff immediately state */
ANHS1_BIT = 0x20, /* acceptor not ready holdoff state */
AEHS_BIT = 0x40, /* acceptor end holdoff state */
diff --git a/drivers/staging/gpib/ines/Makefile b/drivers/gpib/ines/Makefile
index 88241f15ecea..88241f15ecea 100644
--- a/drivers/staging/gpib/ines/Makefile
+++ b/drivers/gpib/ines/Makefile
diff --git a/drivers/staging/gpib/ines/ines.h b/drivers/gpib/ines/ines.h
index f0210ce2470d..6ad57e9a1216 100644
--- a/drivers/staging/gpib/ines/ines.h
+++ b/drivers/gpib/ines/ines.h
@@ -97,9 +97,9 @@ enum extend_mode_bits {
TR3_TRIG_ENABLE_BIT = 0x1, // enable generation of trigger pulse T/R3 pin
// clear message available status bit when chip writes byte with EOI true
MAV_ENABLE_BIT = 0x2,
- EOS1_ENABLE_BIT = 0x4, // enable eos register 1
- EOS2_ENABLE_BIT = 0x8, // enable eos register 2
- EOIDIS_BIT = 0x10, // disable EOI interrupt when doing rfd holdoff on end?
+ EOS1_ENABLE_BIT = 0x4, // enable eos register 1
+ EOS2_ENABLE_BIT = 0x8, // enable eos register 2
+ EOIDIS_BIT = 0x10, // disable EOI interrupt when doing rfd holdoff on end?
XFER_COUNTER_ENABLE_BIT = 0x20,
XFER_COUNTER_OUTPUT_BIT = 0x40, // use counter for output, clear for input
// when xfer counter hits 0, assert EOI on write or RFD holdoff on read
@@ -121,10 +121,10 @@ enum ines_admr_bits {
};
enum xdma_control_bits {
- DMA_OUTPUT_BIT = 0x1, // use dma for output, clear for input
+ DMA_OUTPUT_BIT = 0x1, // use dma for output, clear for input
ENABLE_SYNC_DMA_BIT = 0x2,
- DMA_ACCESS_EVERY_CYCLE = 0x4,// dma accesses fifo every cycle, clear for every other cycle
- DMA_16BIT = 0x8, // clear for 8 bit transfers
+ DMA_ACCESS_EVERY_CYCLE = 0x4, // dma accesses fifo every cycle, clear for every other cycle
+ DMA_16BIT = 0x8, // clear for 8 bit transfers
};
enum bus_control_monitor_bits {
diff --git a/drivers/staging/gpib/ines/ines_gpib.c b/drivers/gpib/ines/ines_gpib.c
index c851fd014f48..a3cf846fd0f9 100644
--- a/drivers/staging/gpib/ines/ines_gpib.c
+++ b/drivers/gpib/ines/ines_gpib.c
@@ -152,7 +152,7 @@ static int ines_accel_read(struct gpib_board *board, u8 *buffer,
write_byte(nec_priv, INES_RFD_HLD_IMMEDIATE, AUXMR);
- //clear in fifo
+ // clear in fifo
nec7210_set_reg_bits(nec_priv, ADMR, IN_FIFO_ENABLE_BIT, 0);
nec7210_set_reg_bits(nec_priv, ADMR, IN_FIFO_ENABLE_BIT, IN_FIFO_ENABLE_BIT);
@@ -225,7 +225,7 @@ static int ines_accel_write(struct gpib_board *board, u8 *buffer, size_t length,
unsigned int num_bytes, i;
*bytes_written = 0;
- //clear out fifo
+ // clear out fifo
nec7210_set_reg_bits(nec_priv, ADMR, OUT_FIFO_ENABLE_BIT, 0);
nec7210_set_reg_bits(nec_priv, ADMR, OUT_FIFO_ENABLE_BIT, OUT_FIFO_ENABLE_BIT);
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/Makefile b/drivers/gpib/lpvo_usb_gpib/Makefile
index 360553488e6d..360553488e6d 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/Makefile
+++ b/drivers/gpib/lpvo_usb_gpib/Makefile
diff --git a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index dd68c4843490..dd68c4843490 100644
--- a/drivers/staging/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
diff --git a/drivers/staging/gpib/nec7210/Makefile b/drivers/gpib/nec7210/Makefile
index 64330f2e89d1..64330f2e89d1 100644
--- a/drivers/staging/gpib/nec7210/Makefile
+++ b/drivers/gpib/nec7210/Makefile
diff --git a/drivers/staging/gpib/nec7210/board.h b/drivers/gpib/nec7210/board.h
index ac3fe38ade57..ac3fe38ade57 100644
--- a/drivers/staging/gpib/nec7210/board.h
+++ b/drivers/gpib/nec7210/board.h
diff --git a/drivers/staging/gpib/nec7210/nec7210.c b/drivers/gpib/nec7210/nec7210.c
index 34a1cae4f486..bbf39367f5e4 100644
--- a/drivers/staging/gpib/nec7210/nec7210.c
+++ b/drivers/gpib/nec7210/nec7210.c
@@ -779,10 +779,10 @@ int nec7210_write(struct gpib_board *board, struct nec7210_priv *priv,
*bytes_written = 0;
- clear_bit(DEV_CLEAR_BN, &priv->state); //XXX
+ clear_bit(DEV_CLEAR_BN, &priv->state); // XXX
if (send_eoi)
- length-- ; /* save the last byte for sending EOI */
+ length-- ; // save the last byte for sending EOI
if (length > 0) {
// isa dma transfer
@@ -1005,7 +1005,7 @@ void nec7210_board_online(struct nec7210_priv *priv, const struct gpib_board *bo
nec7210_primary_address(board, priv, board->pad);
nec7210_secondary_address(board, priv, board->sad, board->sad >= 0);
- // enable interrupts
+ /* enable interrupts */
priv->reg_bits[IMR1] = HR_ERRIE | HR_DECIE | HR_ENDIE |
HR_DETIE | HR_CPTIE | HR_DOIE | HR_DIIE;
priv->reg_bits[IMR2] = IMR2_ENABLE_INTR_MASK;
diff --git a/drivers/staging/gpib/ni_usb/Makefile b/drivers/gpib/ni_usb/Makefile
index 469c5d16add3..469c5d16add3 100644
--- a/drivers/staging/gpib/ni_usb/Makefile
+++ b/drivers/gpib/ni_usb/Makefile
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/gpib/ni_usb/ni_usb_gpib.c
index 73ea72f34c0a..1f8412de9fa3 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c
+++ b/drivers/gpib/ni_usb/ni_usb_gpib.c
@@ -29,7 +29,7 @@ static void ni_usb_stop(struct ni_usb_priv *ni_priv);
static DEFINE_MUTEX(ni_usb_hotplug_lock);
-//calculates a reasonable timeout in that can be passed to usb functions
+// calculates a reasonable timeout in that can be passed to usb functions
static inline unsigned long ni_usb_timeout_msecs(unsigned int usec)
{
if (usec == 0)
@@ -327,7 +327,10 @@ static void ni_usb_soft_update_status(struct gpib_board *board, unsigned int ni_
board->status &= ~clear_mask;
board->status &= ~ni_usb_ibsta_mask;
board->status |= ni_usb_ibsta & ni_usb_ibsta_mask;
- //FIXME should generate events on DTAS and DCAS
+ if (ni_usb_ibsta & DCAS)
+ push_gpib_event(board, EVENT_DEV_CLR);
+ if (ni_usb_ibsta & DTAS)
+ push_gpib_event(board, EVENT_DEV_TRG);
spin_lock_irqsave(&board->spinlock, flags);
/* remove set status bits from monitored set why ?***/
@@ -569,7 +572,7 @@ static int ni_usb_write_registers(struct ni_usb_priv *ni_priv,
mutex_unlock(&ni_priv->addressed_transfer_lock);
ni_usb_parse_reg_write_status_block(in_data, &status, &reg_writes_completed);
- //FIXME parse extra 09 status bits and termination
+ // FIXME parse extra 09 status bits and termination
kfree(in_data);
if (status.id != NIUSB_REG_WRITE_ID) {
dev_err(&usb_dev->dev, "parse error, id=0x%x != NIUSB_REG_WRITE_ID\n", status.id);
@@ -694,8 +697,12 @@ static int ni_usb_read(struct gpib_board *board, u8 *buffer, size_t length,
*/
break;
case NIUSB_ATN_STATE_ERROR:
- retval = -EIO;
- dev_err(&usb_dev->dev, "read when ATN set\n");
+ if (status.ibsta & DCAS) {
+ retval = -EINTR;
+ } else {
+ retval = -EIO;
+ dev_dbg(&usb_dev->dev, "read when ATN set stat: 0x%06x\n", status.ibsta);
+ }
break;
case NIUSB_ADDRESSING_ERROR:
retval = -EIO;
@@ -1106,7 +1113,7 @@ static int ni_usb_request_system_control(struct gpib_board *board, int request_c
return 0;
}
-//FIXME maybe the interface should have a "pulse interface clear" function that can return an error?
+// FIXME maybe the interface should have a "pulse interface clear" function that can return an error?
static void ni_usb_interface_clear(struct gpib_board *board, int assert)
{
int retval;
@@ -1363,7 +1370,7 @@ static int ni_usb_parallel_poll(struct gpib_board *board, u8 *result)
return -ENOMEM;
out_data[i++] = NIUSB_IBRPP_ID;
- out_data[i++] = 0xf0; //FIXME: this should be the parallel poll timeout code
+ out_data[i++] = 0xf0; // FIXME: this should be the parallel poll timeout code
out_data[i++] = 0x0;
out_data[i++] = 0x0;
i += ni_usb_bulk_termination(&out_data[i]);
diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h b/drivers/gpib/ni_usb/ni_usb_gpib.h
index b011e131201c..688f5e08792f 100644
--- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.h
+++ b/drivers/gpib/ni_usb/ni_usb_gpib.h
@@ -72,10 +72,10 @@ struct ni_usb_priv {
struct urb *bulk_urb;
struct urb *interrupt_urb;
u8 interrupt_buffer[0x11];
- struct mutex addressed_transfer_lock; // protect transfer lock
- struct mutex bulk_transfer_lock; // protect bulk message sends
- struct mutex control_transfer_lock; // protect control messages
- struct mutex interrupt_transfer_lock; // protect interrupt messages
+ struct mutex addressed_transfer_lock; // protect transfer lock
+ struct mutex bulk_transfer_lock; // protect bulk message sends
+ struct mutex control_transfer_lock; // protect control messages
+ struct mutex interrupt_transfer_lock; // protect interrupt messages
struct timer_list bulk_timer;
struct ni_usb_urb_ctx context;
int product_id;
@@ -145,7 +145,7 @@ enum ni_usb_error_codes {
* CIC with no listener
*/
NIUSB_NO_LISTENER_ERROR = 8,
- // get NIUSB_TIMEOUT_ERROR on board read/write timeout
+ /* get NIUSB_TIMEOUT_ERROR on board read/write timeout */
NIUSB_TIMEOUT_ERROR = 10,
};
diff --git a/drivers/staging/gpib/pc2/Makefile b/drivers/gpib/pc2/Makefile
index 481ee4296e1b..481ee4296e1b 100644
--- a/drivers/staging/gpib/pc2/Makefile
+++ b/drivers/gpib/pc2/Makefile
diff --git a/drivers/staging/gpib/pc2/pc2_gpib.c b/drivers/gpib/pc2/pc2_gpib.c
index 2282492025b7..9f3943d1df66 100644
--- a/drivers/staging/gpib/pc2/pc2_gpib.c
+++ b/drivers/gpib/pc2/pc2_gpib.c
@@ -36,7 +36,7 @@ static const int pc2_2a_iosize = 16;
static const int pc2a_reg_offset = 0x400;
static const int pc2_reg_offset = 1;
-//interrupt service routine
+// interrupt service routine
static irqreturn_t pc2_interrupt(int irq, void *arg);
static irqreturn_t pc2a_interrupt(int irq, void *arg);
@@ -593,7 +593,7 @@ static struct gpib_interface pc2a_cb7210_interface = {
.parallel_poll_configure = pc2_parallel_poll_configure,
.parallel_poll_response = pc2_parallel_poll_response,
.local_parallel_poll_mode = NULL, // XXX
- .line_status = NULL, //XXX
+ .line_status = NULL, // XXX
.update_status = pc2_update_status,
.primary_address = pc2_primary_address,
.secondary_address = pc2_secondary_address,
diff --git a/drivers/staging/gpib/tms9914/Makefile b/drivers/gpib/tms9914/Makefile
index 4705ab07f413..4705ab07f413 100644
--- a/drivers/staging/gpib/tms9914/Makefile
+++ b/drivers/gpib/tms9914/Makefile
diff --git a/drivers/staging/gpib/tms9914/tms9914.c b/drivers/gpib/tms9914/tms9914.c
index 04d57108efc7..72a11596a35e 100644
--- a/drivers/staging/gpib/tms9914/tms9914.c
+++ b/drivers/gpib/tms9914/tms9914.c
@@ -535,7 +535,7 @@ int tms9914_read(struct gpib_board *board, struct tms9914_priv *priv, u8 *buffer
buffer += num_bytes;
length -= num_bytes;
}
- // read last bytes if we havn't received an END yet
+ // read last bytes if we haven't received an END yet
if (*end == 0) {
// make sure we holdoff after last byte read
tms9914_set_holdoff_mode(priv, TMS9914_HOLDOFF_ALL);
@@ -647,7 +647,7 @@ static void check_my_address_state(struct gpib_board *board,
} else if (cmd_byte == MTA(board->pad)) {
priv->primary_talk_addressed = 1;
if (board->sad < 0)
- //make active talker
+ // make active talker
write_byte(priv, AUX_TON | AUX_CS, AUXCR);
} else if (board->sad >= 0 && priv->primary_talk_addressed &&
cmd_byte == MSA(board->sad)) {
@@ -730,7 +730,7 @@ irqreturn_t tms9914_interrupt_have_status(struct gpib_board *board, struct tms99
if (status0 & HR_SPAS) {
priv->spoll_status &= ~request_service_bit;
write_byte(priv, priv->spoll_status, SPMR);
- //FIXME: set SPOLL status bit
+ // FIXME: set SPOLL status bit
}
// record service request in status
if (status1 & HR_SRQ)
@@ -841,7 +841,7 @@ void tms9914_board_reset(struct tms9914_priv *priv)
/* parallel poll unconfigure */
write_byte(priv, 0, PPR);
- // request for data holdoff
+ /* request for data holdoff */
tms9914_set_holdoff_mode(priv, TMS9914_HOLDOFF_ALL);
}
EXPORT_SYMBOL_GPL(tms9914_board_reset);
@@ -852,7 +852,7 @@ void tms9914_online(struct gpib_board *board, struct tms9914_priv *priv)
tms9914_primary_address(board, priv, board->pad);
tms9914_secondary_address(board, priv, board->sad, board->sad >= 0);
- // enable tms9914 interrupts
+ /* enable tms9914 interrupts */
priv->imr0_bits |= HR_MACIE | HR_RLCIE | HR_ENDIE | HR_BOIE | HR_BIIE |
HR_SPASIE;
priv->imr1_bits |= HR_MAIE | HR_SRQIE | HR_UNCIE | HR_ERRIE | HR_IFCIE |
@@ -861,7 +861,7 @@ void tms9914_online(struct gpib_board *board, struct tms9914_priv *priv)
write_byte(priv, priv->imr1_bits, IMR1);
write_byte(priv, AUX_DAI, AUXCR);
- // turn off reset state
+ /* turn off reset state */
write_byte(priv, AUX_CHIP_RESET, AUXCR);
}
EXPORT_SYMBOL_GPL(tms9914_online);
diff --git a/drivers/staging/gpib/tnt4882/Makefile b/drivers/gpib/tnt4882/Makefile
index fa1687ad0d1b..fa1687ad0d1b 100644
--- a/drivers/staging/gpib/tnt4882/Makefile
+++ b/drivers/gpib/tnt4882/Makefile
diff --git a/drivers/staging/gpib/tnt4882/mite.c b/drivers/gpib/tnt4882/mite.c
index 847b96f411bd..847b96f411bd 100644
--- a/drivers/staging/gpib/tnt4882/mite.c
+++ b/drivers/gpib/tnt4882/mite.c
diff --git a/drivers/staging/gpib/tnt4882/mite.h b/drivers/gpib/tnt4882/mite.h
index 522d6b56cb7d..a1fdba9672a0 100644
--- a/drivers/staging/gpib/tnt4882/mite.h
+++ b/drivers/gpib/tnt4882/mite.h
@@ -219,15 +219,15 @@ void mite_list_devices(void);
#define MITE_AMHOST_A24_BLOCK 0x3b
enum mite_registers {
- MITE_IODWBSR = 0xc0, //IO Device Window Base Size Register
- MITE_CSIGR = 0x460, //chip signature
- MITE_IODWBSR_1 = 0xc4, // IO Device Window Base Size Register 1 (used by 6602 boards)
+ MITE_IODWBSR = 0xc0, // IO Device Window Base Size Register
+ MITE_CSIGR = 0x460, // chip signature
+ MITE_IODWBSR_1 = 0xc4, // IO Device Window Base Size Register 1 (used by 6602 boards)
MITE_IODWCR_1 = 0xf4
};
enum MITE_IODWBSR_bits {
- WENAB = 0x80, // window enable
- WENAB_6602 = 0x8c // window enable for 6602 boards
+ WENAB = 0x80, // window enable
+ WENAB_6602 = 0x8c // window enable for 6602 boards
};
#endif
diff --git a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c b/drivers/gpib/tnt4882/tnt4882_gpib.c
index a17b69e34986..c03a976b7380 100644
--- a/drivers/staging/gpib/tnt4882/tnt4882_gpib.c
+++ b/drivers/gpib/tnt4882/tnt4882_gpib.c
@@ -570,7 +570,7 @@ static irqreturn_t tnt4882_internal_interrupt(struct gpib_board *board)
if (isr0_bits & TNT_IFCI_BIT)
push_gpib_event(board, EVENT_IFC);
- //XXX don't need this wakeup, one below should do?
+ // XXX don't need this wakeup, one below should do?
// wake_up_interruptible(&board->wait);
if (isr3_bits & HR_NFF)
@@ -730,7 +730,7 @@ static int tnt4882_parallel_poll(struct gpib_board *board, u8 *result)
if (tnt_priv->nec7210_priv.type != NEC7210) {
tnt_priv->auxg_bits |= RPP2_BIT;
write_byte(&tnt_priv->nec7210_priv, tnt_priv->auxg_bits, AUXMR);
- udelay(2); //FIXME use parallel poll timeout
+ udelay(2); // FIXME use parallel poll timeout
*result = read_byte(&tnt_priv->nec7210_priv, CPTR);
tnt_priv->auxg_bits &= ~RPP2_BIT;
write_byte(&tnt_priv->nec7210_priv, tnt_priv->auxg_bits, AUXMR);
@@ -1522,7 +1522,6 @@ static void __exit tnt4882_exit_module(void)
#include <linux/moduleparam.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
-#include <linux/ioport.h>
#include <linux/io.h>
#include <pcmcia/cistpl.h>
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index d8ac40d0eb6f..c74da29253e8 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -6,6 +6,9 @@
config GPIOLIB_LEGACY
def_bool y
+config HAVE_SHARED_GPIOS
+ bool
+
menuconfig GPIOLIB
bool "GPIO Support"
help
@@ -42,13 +45,10 @@ config GPIOLIB_IRQCHIP
select IRQ_DOMAIN
bool
-config OF_GPIO_MM_GPIOCHIP
- bool
- help
- This adds support for the legacy 'struct of_mm_gpio_chip' interface
- from PowerPC. Existing drivers using this interface need to select
- this symbol, but new drivers should use the generic gpio-regmap
- infrastructure instead.
+config GPIO_SHARED
+ def_bool y
+ depends on HAVE_SHARED_GPIOS || COMPILE_TEST
+ select AUXILIARY_BUS
config DEBUG_GPIO
bool "Debug GPIO calls"
@@ -303,7 +303,7 @@ config GPIO_EN7523
config GPIO_EP93XX
def_bool y
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
@@ -408,8 +408,7 @@ config GPIO_IMX_SCU
config GPIO_IXP4XX
bool "Intel IXP4xx GPIO"
- depends on ARCH_IXP4XX
- depends on OF
+ depends on (ARCH_IXP4XX && OF) || COMPILE_TEST
select GPIO_GENERIC
select GPIOLIB_IRQCHIP
select IRQ_DOMAIN_HIERARCHY
@@ -437,6 +436,7 @@ config GPIO_LOONGSON_64BIT
depends on LOONGARCH || COMPILE_TEST
depends on OF_GPIO
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say yes here to support the GPIO functionality of a number of
Loongson series of chips. The Loongson GPIO controller supports
@@ -476,7 +476,6 @@ config GPIO_MENZ127
config GPIO_MM_LANTIQ
bool "Lantiq Memory mapped GPIOs"
depends on LANTIQ && SOC_XWAY
- select OF_GPIO_MM_GPIOCHIP
help
This enables support for memory mapped GPIOs on the External Bus Unit
(EBU) found on Lantiq SoCs. The GPIOs are output only as they are
@@ -485,7 +484,6 @@ config GPIO_MM_LANTIQ
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
- select OF_GPIO_MM_GPIOCHIP
config GPIO_MPC8XXX
bool "MPC512x/MPC8xxx/QorIQ GPIO support"
@@ -735,7 +733,8 @@ config GPIO_TANGIER
If built as a module its name will be gpio-tangier.
config GPIO_TB10X
- bool
+ bool "Abilis Systems TB10x GPIO controller"
+ depends on ARC_PLAT_TB10X || COMPILE_TEST
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
select OF_GPIO
@@ -884,7 +883,7 @@ config GPIO_ZYNQMP_MODEPIN
config GPIO_LOONGSON1
tristate "Loongson1 GPIO support"
- depends on MACH_LOONGSON32
+ depends on MACH_LOONGSON32 || COMPILE_TEST
select GPIO_GENERIC
help
Say Y or M here to support GPIO on Loongson1 SoCs.
@@ -1194,14 +1193,18 @@ config GPIO_PCA953X
4 bits: pca9536, pca9537
8 bits: max7310, max7315, pca6107, pca9534, pca9538, pca9554,
- pca9556, pca9557, pca9574, tca6408, tca9554, xra1202
+ pca9556, pca9557, pca9574, tca6408, tca9554, xra1202,
+ pcal6408, pcal9554b, tca9538
16 bits: max7312, max7313, pca9535, pca9539, pca9555, pca9575,
- tca6416
+ tca6416, pca6416, pcal6416, pcal9535, pcal9555a, max7318,
+ tca9539
- 24 bits: tca6424
+ 18 bits: tca6418
- 40 bits: pca9505, pca9698
+ 24 bits: tca6424, pcal6524
+
+ 40 bits: pca9505, pca9698, pca9506
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
@@ -1409,7 +1412,7 @@ config HTC_EGPIO
config GPIO_ELKHARTLAKE
tristate "Intel Elkhart Lake PSE GPIO support"
- depends on X86 || COMPILE_TEST
+ depends on INTEL_EHL_PSE_IO
select GPIO_TANGIER
help
Select this option to enable GPIO support for Intel Elkhart Lake
@@ -1492,6 +1495,18 @@ config GPIO_MADERA
help
Support for GPIOs on Cirrus Logic Madera class codecs.
+config GPIO_MAX7360
+ tristate "MAX7360 GPIO support"
+ depends on MFD_MAX7360
+ select GPIO_REGMAP
+ select REGMAP_IRQ
+ help
+ Allows to use MAX7360 I/O Expander PWM lines as GPIO and keypad COL
+ lines as GPO.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-max7360.
+
config GPIO_MAX77620
tristate "GPIO support for PMIC MAX77620 and MAX20024"
depends on MFD_MAX77620
@@ -1522,6 +1537,18 @@ config GPIO_MAX77759
This driver can also be built as a module. If so, the module will be
called gpio-max77759.
+config GPIO_NCT6694
+ tristate "Nuvoton NCT6694 GPIO controller support"
+ depends on MFD_NCT6694
+ select GENERIC_IRQ_CHIP
+ select GPIOLIB_IRQCHIP
+ help
+ This driver supports 8 GPIO pins per bank that can all be interrupt
+ sources.
+
+ This driver can also be built as a module. If so, the module will be
+ called gpio-nct6694.
+
config GPIO_PALMAS
tristate "TI PALMAS series PMICs GPIO"
depends on MFD_PALMAS
@@ -1537,6 +1564,15 @@ config GPIO_PMIC_EIC_SPRD
help
Say yes here to support Spreadtrum PMIC EIC device.
+config GPIO_QIXIS_FPGA
+ tristate "NXP QIXIS FPGA GPIO support"
+ depends on MFD_SIMPLE_MFD_I2C || COMPILE_TEST
+ select GPIO_REGMAP
+ help
+ This enables support for the GPIOs found in the QIXIS FPGA which is
+ integrated on some NXP Layerscape boards such as LX2160ARDB and
+ LS1046AQDS.
+
config GPIO_RC5T583
bool "RICOH RC5T583 GPIO"
depends on MFD_RC5T583
@@ -1559,7 +1595,7 @@ config GPIO_SL28CPLD
called gpio-sl28cpld.
config GPIO_STMPE
- bool "STMPE GPIOs"
+ tristate "STMPE GPIOs"
depends on MFD_STMPE
depends on OF_GPIO
select GPIOLIB_IRQCHIP
@@ -1923,6 +1959,17 @@ config GPIO_MPSSE
GPIO driver for FTDI's MPSSE interface. These can do input and
output. Each MPSSE provides 16 IO pins.
+config GPIO_USBIO
+ tristate "Intel USBIO GPIO support"
+ depends on USB_USBIO
+ default USB_USBIO
+ help
+ Select this option to enable GPIO driver for the INTEL
+ USBIO driver stack.
+
+ This driver can also be built as a module. If so, the module
+ will be called gpio_usbio.
+
endmenu
menu "Virtual GPIO drivers"
@@ -1978,6 +2025,15 @@ config GPIO_SIM
This enables the GPIO simulator - a configfs-based GPIO testing
driver.
+config GPIO_SHARED_PROXY
+ tristate "Proxy driver for non-exclusive GPIOs"
+ default m
+ depends on GPIO_SHARED || COMPILE_TEST
+ select AUXILIARY_BUS
+ help
+ This enables the GPIO shared proxy driver - an abstraction layer
+ for GPIO pins that are shared by multiple devices.
+
endmenu
menu "GPIO Debugging utilities"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 379f55e9ed1e..2421a8fd3733 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_GPIO_SYSFS) += gpiolib-sysfs.o
obj-$(CONFIG_GPIO_ACPI) += gpiolib-acpi.o
gpiolib-acpi-y := gpiolib-acpi-core.o gpiolib-acpi-quirks.o
obj-$(CONFIG_GPIOLIB) += gpiolib-swnode.o
+obj-$(CONFIG_GPIO_SHARED) += gpiolib-shared.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_REGMAP) += gpio-regmap.o
@@ -106,6 +107,7 @@ obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MAX7360) += gpio-max7360.o
obj-$(CONFIG_GPIO_MAX77620) += gpio-max77620.o
obj-$(CONFIG_GPIO_MAX77650) += gpio-max77650.o
obj-$(CONFIG_GPIO_MAX77759) += gpio-max77759.o
@@ -128,6 +130,7 @@ obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_GPIO_NCT6694) += gpio-nct6694.o
obj-$(CONFIG_GPIO_NOMADIK) += gpio-nomadik.o
obj-$(CONFIG_GPIO_NPCM_SGPIO) += gpio-npcm-sgpio.o
obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o
@@ -144,6 +147,7 @@ obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o
obj-$(CONFIG_GPIO_POLARFIRE_SOC) += gpio-mpfs.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_QIXIS_FPGA) += gpio-qixis-fpga.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
@@ -157,6 +161,7 @@ obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o
obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o
obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_SHARED_PROXY) += gpio-shared-proxy.o
obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o
obj-$(CONFIG_GPIO_SIM) += gpio-sim.o
obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o
@@ -192,6 +197,7 @@ obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
+obj-$(CONFIG_GPIO_USBIO) += gpio-usbio.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
obj-$(CONFIG_GPIO_VIRTUSER) += gpio-virtuser.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 7a09a4f58551..5acaeab029ec 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -86,17 +86,6 @@ Work items:
-------------------------------------------------------------------------------
-Get rid of <linux/gpio/legacy-of-mm-gpiochip.h>
-
-Work items:
-
-- Get rid of struct of_mm_gpio_chip altogether: use the generic MMIO
- GPIO for all current users (see below). Delete struct of_mm_gpio_chip,
- to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_remove(),
- CONFIG_OF_GPIO_MM_GPIOCHIP from the kernel.
-
--------------------------------------------------------------------------------
-
Collect drivers
Collect GPIO drivers from arch/* and other places that should be placed
@@ -131,11 +120,6 @@ Work items:
helpers (x86 inb()/outb()) and convert port-mapped I/O drivers to use
this with dry-coding and sending to maintainers to test
-- Move the MMIO GPIO specific fields out of struct gpio_chip into a
- dedicated structure. Currently every GPIO chip has them if gpio-mmio is
- enabled in Kconfig even if it itself doesn't register with the helper
- library.
-
-------------------------------------------------------------------------------
Generic regmap GPIO
@@ -176,18 +160,6 @@ cannot be converted yet, but watch this space!
-------------------------------------------------------------------------------
-Convert all GPIO chips to using the new, value returning line setters
-
-struct gpio_chip's set() and set_multiple() callbacks are now deprecated. They
-return void and thus do not allow drivers to indicate failure to set the line
-value back to the caller.
-
-We've now added new variants - set_rv() and set_multiple_rv() that return an
-integer. Let's convert all GPIO drivers treewide to use the new callbacks,
-remove the old ones and finally rename the new ones back to the old names.
-
--------------------------------------------------------------------------------
-
Remove legacy sysfs features
We have two parallel per-chip class devices and per-exported-line attribute
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index ffe7e1cb6b23..fe5c10cd5c32 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x5,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index af9d8b3a711d..416f265d09d0 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -12,6 +12,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -28,6 +29,7 @@
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/forwarder.h>
#include <linux/gpio/machine.h>
#include "dev-sync-probe.h"
@@ -244,18 +246,34 @@ struct gpiochip_fwd {
spinlock_t slock; /* protects tmp[] if !can_sleep */
};
struct gpiochip_fwd_timing *delay_timings;
+ void *data;
+ unsigned long *valid_mask;
unsigned long tmp[]; /* values and descs for multiple ops */
};
-#define fwd_tmp_values(fwd) &(fwd)->tmp[0]
-#define fwd_tmp_descs(fwd) (void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)]
+#define fwd_tmp_values(fwd) (&(fwd)->tmp[0])
+#define fwd_tmp_descs(fwd) ((void *)&(fwd)->tmp[BITS_TO_LONGS((fwd)->chip.ngpio)])
#define fwd_tmp_size(ngpios) (BITS_TO_LONGS((ngpios)) + (ngpios))
+static int gpio_fwd_request(struct gpio_chip *chip, unsigned int offset)
+{
+ struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+
+ return test_bit(offset, fwd->valid_mask) ? 0 : -ENODEV;
+}
+
static int gpio_fwd_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
+ /*
+ * get_direction() is called during gpiochip registration, return
+ * -ENODEV if there is no GPIO desc for the line.
+ */
+ if (!test_bit(offset, fwd->valid_mask))
+ return -ENODEV;
+
return gpiod_get_direction(fwd->descs[offset]);
}
@@ -453,10 +471,11 @@ static int gpiochip_fwd_delay_of_xlate(struct gpio_chip *chip,
return line;
}
-static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
- struct gpiochip_fwd *fwd)
+static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
{
- fwd->delay_timings = devm_kcalloc(dev, chip->ngpio,
+ struct gpio_chip *chip = &fwd->chip;
+
+ fwd->delay_timings = devm_kcalloc(chip->parent, chip->ngpio,
sizeof(*fwd->delay_timings),
GFP_KERNEL);
if (!fwd->delay_timings)
@@ -468,67 +487,235 @@ static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *c
return 0;
}
#else
-static int gpiochip_fwd_setup_delay_line(struct device *dev, struct gpio_chip *chip,
- struct gpiochip_fwd *fwd)
+static int gpiochip_fwd_setup_delay_line(struct gpiochip_fwd *fwd)
{
return 0;
}
#endif /* !CONFIG_OF_GPIO */
/**
- * gpiochip_fwd_create() - Create a new GPIO forwarder
- * @dev: Parent device pointer
- * @ngpios: Number of GPIOs in the forwarder.
- * @descs: Array containing the GPIO descriptors to forward to.
- * This array must contain @ngpios entries, and must not be deallocated
- * before the forwarder has been destroyed again.
- * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
+ * gpiochip_fwd_get_gpiochip - Get the GPIO chip for the GPIO forwarder
+ * @fwd: GPIO forwarder
*
- * This function creates a new gpiochip, which forwards all GPIO operations to
- * the passed GPIO descriptors.
+ * Returns: The GPIO chip for the GPIO forwarder
+ */
+struct gpio_chip *gpiochip_fwd_get_gpiochip(struct gpiochip_fwd *fwd)
+{
+ return &fwd->chip;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_gpiochip, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_get_data - Get driver-private data for the GPIO forwarder
+ * @fwd: GPIO forwarder
*
- * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
- * code on failure.
+ * Returns: The driver-private data for the GPIO forwarder
*/
-static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
- unsigned int ngpios,
- struct gpio_desc *descs[],
- unsigned long features)
+void *gpiochip_fwd_get_data(struct gpiochip_fwd *fwd)
+{
+ return fwd->data;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_get_data, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_request - Request a line of the GPIO forwarder
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line to request
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_request(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_request(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_request, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get_direction - Return the current direction of a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: 0 for output, 1 for input, or an error code in case of error.
+ */
+int gpiochip_fwd_gpio_get_direction(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get_direction(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_direction, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_direction_output - Set a GPIO forwarder line direction to
+ * output
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @value: value to set
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_direction_output(struct gpiochip_fwd *fwd, unsigned int offset,
+ int value)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_direction_output(gc, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_output, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_direction_input - Set a GPIO forwarder line direction to input
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_direction_input(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_direction_input(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_direction_input, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get - Return a GPIO forwarder line's value
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: The GPIO's logical value, i.e. taking the ACTIVE_LOW status into
+ * account, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_get(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_get_multiple - Get values for multiple GPIO forwarder lines
+ * @fwd: GPIO forwarder
+ * @mask: bit mask array; one bit per line; BITS_PER_LONG bits per word defines
+ * which lines are to be read
+ * @bits: bit value array; one bit per line; BITS_PER_LONG bits per word will
+ * contains the read values for the lines specified by mask
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_get_multiple_locked(gc, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_get_multiple, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set - Assign value to a GPIO forwarder line.
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @value: value to set
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_set(struct gpiochip_fwd *fwd, unsigned int offset, int value)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set(gc, offset, value);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set_multiple - Assign values to multiple GPIO forwarder lines
+ * @fwd: GPIO forwarder
+ * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word
+ * defines which outputs are to be changed
+ * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word
+ * defines the values the outputs specified by mask are to be set to
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_gpio_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set_multiple_locked(gc, mask, bits);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_multiple, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_set_config - Set @config for a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ * @config: Same packed config format as generic pinconf
+ *
+ * Returns: 0 on success, %-ENOTSUPP if the controller doesn't support setting
+ * the configuration.
+ */
+int gpiochip_fwd_gpio_set_config(struct gpiochip_fwd *fwd, unsigned int offset,
+ unsigned long config)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_set_config(gc, offset, config);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_set_config, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_gpio_to_irq - Return the IRQ corresponding to a GPIO forwarder line
+ * @fwd: GPIO forwarder
+ * @offset: the offset of the line
+ *
+ * Returns: The Linux IRQ corresponding to the passed line, or an error code in
+ * case of error.
+ */
+int gpiochip_fwd_gpio_to_irq(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ struct gpio_chip *gc = gpiochip_fwd_get_gpiochip(fwd);
+
+ return gpio_fwd_to_irq(gc, offset);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_gpio_to_irq, "GPIO_FORWARDER");
+
+/**
+ * devm_gpiochip_fwd_alloc - Allocate and initialize a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder
+ *
+ * Returns: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+struct gpiochip_fwd *devm_gpiochip_fwd_alloc(struct device *dev,
+ unsigned int ngpios)
{
- const char *label = dev_name(dev);
struct gpiochip_fwd *fwd;
struct gpio_chip *chip;
- unsigned int i;
- int error;
- fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)),
- GFP_KERNEL);
+ fwd = devm_kzalloc(dev, struct_size(fwd, tmp, fwd_tmp_size(ngpios)), GFP_KERNEL);
if (!fwd)
return ERR_PTR(-ENOMEM);
- chip = &fwd->chip;
-
- /*
- * If any of the GPIO lines are sleeping, then the entire forwarder
- * will be sleeping.
- * If any of the chips support .set_config(), then the forwarder will
- * support setting configs.
- */
- for (i = 0; i < ngpios; i++) {
- struct gpio_chip *parent = gpiod_to_chip(descs[i]);
+ fwd->descs = devm_kcalloc(dev, ngpios, sizeof(*fwd->descs), GFP_KERNEL);
+ if (!fwd->descs)
+ return ERR_PTR(-ENOMEM);
- dev_dbg(dev, "%u => gpio %d irq %d\n", i,
- desc_to_gpio(descs[i]), gpiod_to_irq(descs[i]));
+ fwd->valid_mask = devm_bitmap_zalloc(dev, ngpios, GFP_KERNEL);
+ if (!fwd->valid_mask)
+ return ERR_PTR(-ENOMEM);
- if (gpiod_cansleep(descs[i]))
- chip->can_sleep = true;
- if (parent && parent->set_config)
- chip->set_config = gpio_fwd_set_config;
- }
+ chip = &fwd->chip;
- chip->label = label;
+ chip->label = dev_name(dev);
chip->parent = dev;
chip->owner = THIS_MODULE;
+ chip->request = gpio_fwd_request;
chip->get_direction = gpio_fwd_get_direction;
chip->direction_input = gpio_fwd_direction_input;
chip->direction_output = gpio_fwd_direction_output;
@@ -536,23 +723,132 @@ static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
chip->get_multiple = gpio_fwd_get_multiple_locked;
chip->set = gpio_fwd_set;
chip->set_multiple = gpio_fwd_set_multiple_locked;
+ chip->set_config = gpio_fwd_set_config;
chip->to_irq = gpio_fwd_to_irq;
chip->base = -1;
chip->ngpio = ngpios;
- fwd->descs = descs;
+
+ return fwd;
+}
+EXPORT_SYMBOL_NS_GPL(devm_gpiochip_fwd_alloc, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_desc_add - Add a GPIO desc in the forwarder
+ * @fwd: GPIO forwarder
+ * @desc: GPIO descriptor to register
+ * @offset: offset for the GPIO in the forwarder
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_desc_add(struct gpiochip_fwd *fwd, struct gpio_desc *desc,
+ unsigned int offset)
+{
+ struct gpio_chip *chip = &fwd->chip;
+
+ if (offset >= chip->ngpio)
+ return -EINVAL;
+
+ if (test_and_set_bit(offset, fwd->valid_mask))
+ return -EEXIST;
+
+ /*
+ * If any of the GPIO lines are sleeping, then the entire forwarder
+ * will be sleeping.
+ */
+ if (gpiod_cansleep(desc))
+ chip->can_sleep = true;
+
+ fwd->descs[offset] = desc;
+
+ dev_dbg(chip->parent, "%u => gpio %d irq %d\n", offset,
+ desc_to_gpio(desc), gpiod_to_irq(desc));
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_add, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_desc_free - Remove a GPIO desc from the forwarder
+ * @fwd: GPIO forwarder
+ * @offset: offset of GPIO desc to remove
+ */
+void gpiochip_fwd_desc_free(struct gpiochip_fwd *fwd, unsigned int offset)
+{
+ if (test_and_clear_bit(offset, fwd->valid_mask))
+ gpiod_put(fwd->descs[offset]);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_desc_free, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_register - Register a GPIO forwarder
+ * @fwd: GPIO forwarder
+ * @data: driver-private data associated with this forwarder
+ *
+ * Returns: 0 on success, or negative errno on failure.
+ */
+int gpiochip_fwd_register(struct gpiochip_fwd *fwd, void *data)
+{
+ struct gpio_chip *chip = &fwd->chip;
+
+ /*
+ * Some gpio_desc were not registered. They will be registered at runtime
+ * but we have to suppose they can sleep.
+ */
+ if (!bitmap_full(fwd->valid_mask, chip->ngpio))
+ chip->can_sleep = true;
if (chip->can_sleep)
mutex_init(&fwd->mlock);
else
spin_lock_init(&fwd->slock);
+ fwd->data = data;
+
+ return devm_gpiochip_add_data(chip->parent, chip, fwd);
+}
+EXPORT_SYMBOL_NS_GPL(gpiochip_fwd_register, "GPIO_FORWARDER");
+
+/**
+ * gpiochip_fwd_create() - Create a new GPIO forwarder
+ * @dev: Parent device pointer
+ * @ngpios: Number of GPIOs in the forwarder.
+ * @descs: Array containing the GPIO descriptors to forward to.
+ * This array must contain @ngpios entries, and can be deallocated
+ * as the forwarder has its own array.
+ * @features: Bitwise ORed features as defined with FWD_FEATURE_*.
+ *
+ * This function creates a new gpiochip, which forwards all GPIO operations to
+ * the passed GPIO descriptors.
+ *
+ * Return: An opaque object pointer, or an ERR_PTR()-encoded negative error
+ * code on failure.
+ */
+static struct gpiochip_fwd *gpiochip_fwd_create(struct device *dev,
+ unsigned int ngpios,
+ struct gpio_desc *descs[],
+ unsigned long features)
+{
+ struct gpiochip_fwd *fwd;
+ unsigned int i;
+ int error;
+
+ fwd = devm_gpiochip_fwd_alloc(dev, ngpios);
+ if (IS_ERR(fwd))
+ return fwd;
+
+ for (i = 0; i < ngpios; i++) {
+ error = gpiochip_fwd_desc_add(fwd, descs[i], i);
+ if (error)
+ return ERR_PTR(error);
+ }
+
if (features & FWD_FEATURE_DELAY) {
- error = gpiochip_fwd_setup_delay_line(dev, chip, fwd);
+ error = gpiochip_fwd_setup_delay_line(fwd);
if (error)
return ERR_PTR(error);
}
- error = devm_gpiochip_add_data(dev, chip, fwd);
+ error = gpiochip_fwd_register(fwd, NULL);
if (error)
return ERR_PTR(error);
@@ -1334,6 +1630,7 @@ static int gpio_aggregator_probe(struct platform_device *pdev)
return PTR_ERR(fwd);
platform_set_drvdata(pdev, fwd);
+ devm_kfree(dev, descs);
return 0;
}
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index b70036587d9c..8458a6949c65 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/spinlock.h>
#include <linux/acpi.h>
#include <linux/platform_device.h>
@@ -24,54 +25,50 @@
#define PT_SYNC_REG 0x28
struct pt_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
};
static int pt_gpio_request(struct gpio_chip *gc, unsigned offset)
{
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 using_pins;
dev_dbg(gc->parent, "pt_gpio_request offset=%x\n", offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
if (using_pins & BIT(offset)) {
dev_warn(gc->parent, "PT GPIO pin %x reconfigured\n",
offset);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return -EINVAL;
}
writel(using_pins | BIT(offset), pt_gpio->reg_base + PT_SYNC_REG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
return 0;
}
static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
{
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct pt_gpio_chip *pt_gpio = gpiochip_get_data(gc);
- unsigned long flags;
u32 using_pins;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
using_pins = readl(pt_gpio->reg_base + PT_SYNC_REG);
using_pins &= ~BIT(offset);
writel(using_pins, pt_gpio->reg_base + PT_SYNC_REG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
dev_dbg(gc->parent, "pt_gpio_free offset=%x\n", offset);
}
static int pt_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct pt_gpio_chip *pt_gpio;
int ret = 0;
@@ -91,22 +88,27 @@ static int pt_gpio_probe(struct platform_device *pdev)
return PTR_ERR(pt_gpio->reg_base);
}
- ret = bgpio_init(&pt_gpio->gc, dev, 4,
- pt_gpio->reg_base + PT_INPUTDATA_REG,
- pt_gpio->reg_base + PT_OUTPUTDATA_REG, NULL,
- pt_gpio->reg_base + PT_DIRECTION_REG, NULL,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = pt_gpio->reg_base + PT_INPUTDATA_REG,
+ .set = pt_gpio->reg_base + PT_OUTPUTDATA_REG,
+ .dirout = pt_gpio->reg_base + PT_DIRECTION_REG,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&pt_gpio->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
- pt_gpio->gc.owner = THIS_MODULE;
- pt_gpio->gc.request = pt_gpio_request;
- pt_gpio->gc.free = pt_gpio_free;
- pt_gpio->gc.ngpio = (uintptr_t)device_get_match_data(dev);
+ pt_gpio->chip.gc.owner = THIS_MODULE;
+ pt_gpio->chip.gc.request = pt_gpio_request;
+ pt_gpio->chip.gc.free = pt_gpio_free;
+ pt_gpio->chip.gc.ngpio = (uintptr_t)device_get_match_data(dev);
- ret = devm_gpiochip_add_data(dev, &pt_gpio->gc, pt_gpio);
+ ret = devm_gpiochip_add_data(dev, &pt_gpio->chip.gc, pt_gpio);
if (ret) {
dev_err(dev, "Failed to register GPIO lib\n");
return ret;
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 7953a9c4e36d..cbdf781994dc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -5,6 +5,7 @@
* Joel Stanley <joel@jms.id.au>
*/
+#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/gpio/aspeed.h>
@@ -24,16 +25,11 @@
/*
* These two headers aren't meant to be used by GPIO drivers. We need
- * them in order to access gpio_chip_hwgpio() which we need to implement
+ * them in order to access gpiod_hwgpio() which we need to implement
* the aspeed specific API which allows the coprocessor to request
* access to some GPIOs and to arbitrate between coprocessor and ARM.
*/
#include <linux/gpio/consumer.h>
-#include "gpiolib.h"
-
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
#define GPIO_G7_IRQ_STS_BASE 0x100
#define GPIO_G7_IRQ_STS_OFFSET(x) (GPIO_G7_IRQ_STS_BASE + (x) * 0x4)
@@ -942,7 +938,7 @@ int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
{
struct gpio_chip *chip = gpiod_to_chip(desc);
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ int rc = 0, bindex, offset = gpiod_hwgpio(desc);
const struct aspeed_gpio_bank *bank = to_bank(offset);
if (!aspeed_gpio_support_copro(gpio))
@@ -987,7 +983,7 @@ int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc)
{
struct gpio_chip *chip = gpiod_to_chip(desc);
struct aspeed_gpio *gpio = gpiochip_get_data(chip);
- int rc = 0, bindex, offset = gpio_chip_hwgpio(desc);
+ int rc = 0, bindex, offset = gpiod_hwgpio(desc);
if (!aspeed_gpio_support_copro(gpio))
return -EOPNOTSUPP;
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index de4cc12e5e03..2ad9f6ac6636 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mod_devicetable.h>
@@ -28,17 +29,17 @@
#define AR71XX_GPIO_REG_INT_MASK 0x24
struct ath79_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
- raw_spinlock_t lock;
unsigned long both_edges;
};
static struct ath79_gpio_ctrl *irq_data_to_ath79_gpio(struct irq_data *data)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
- return container_of(gc, struct ath79_gpio_ctrl, gc);
+ return container_of(gen_gc, struct ath79_gpio_ctrl, chip);
}
static u32 ath79_gpio_read(struct ath79_gpio_ctrl *ctrl, unsigned reg)
@@ -70,48 +71,43 @@ static void ath79_gpio_irq_unmask(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- gpiochip_enable_irq(&ctrl->gc, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ gpiochip_enable_irq(&ctrl->chip.gc, irqd_to_hwirq(data));
+
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
+
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_mask(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
- ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
- gpiochip_disable_irq(&ctrl->gc, irqd_to_hwirq(data));
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip)
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
+
+ gpiochip_disable_irq(&ctrl->chip.gc, irqd_to_hwirq(data));
}
static void ath79_gpio_irq_enable(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static void ath79_gpio_irq_disable(struct irq_data *data)
{
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
- unsigned long flags;
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_MASK, mask, 0);
ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, 0);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
static int ath79_gpio_irq_set_type(struct irq_data *data,
@@ -120,7 +116,6 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
struct ath79_gpio_ctrl *ctrl = irq_data_to_ath79_gpio(data);
u32 mask = BIT(irqd_to_hwirq(data));
u32 type = 0, polarity = 0;
- unsigned long flags;
bool disabled;
switch (flow_type) {
@@ -142,7 +137,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
return -EINVAL;
}
- raw_spin_lock_irqsave(&ctrl->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
if (flow_type == IRQ_TYPE_EDGE_BOTH) {
ctrl->both_edges |= mask;
@@ -167,8 +162,6 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
ath79_gpio_update_bits(
ctrl, AR71XX_GPIO_REG_INT_ENABLE, mask, mask);
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
-
return 0;
}
@@ -187,28 +180,27 @@ static void ath79_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct ath79_gpio_ctrl *ctrl =
- container_of(gc, struct ath79_gpio_ctrl, gc);
- unsigned long flags, pending;
+ container_of(gen_gc, struct ath79_gpio_ctrl, chip);
+ unsigned long pending;
u32 both_edges, state;
int irq;
chained_irq_enter(irqchip, desc);
- raw_spin_lock_irqsave(&ctrl->lock, flags);
-
- pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip) {
+ pending = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_INT_PENDING);
- /* Update the polarity of the both edges irqs */
- both_edges = ctrl->both_edges & pending;
- if (both_edges) {
- state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
- ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
- both_edges, ~state);
+ /* Update the polarity of the both edges irqs */
+ both_edges = ctrl->both_edges & pending;
+ if (both_edges) {
+ state = ath79_gpio_read(ctrl, AR71XX_GPIO_REG_IN);
+ ath79_gpio_update_bits(ctrl, AR71XX_GPIO_REG_INT_POLARITY,
+ both_edges, ~state);
+ }
}
- raw_spin_unlock_irqrestore(&ctrl->lock, flags);
-
for_each_set_bit(irq, &pending, gc->ngpio)
generic_handle_domain_irq(gc->irq.domain, irq);
@@ -224,6 +216,7 @@ MODULE_DEVICE_TABLE(of, ath79_gpio_of_match);
static int ath79_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ath79_gpio_ctrl *ctrl;
struct gpio_irq_chip *girq;
@@ -252,22 +245,25 @@ static int ath79_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->base))
return PTR_ERR(ctrl->base);
- raw_spin_lock_init(&ctrl->lock);
- err = bgpio_init(&ctrl->gc, dev, 4,
- ctrl->base + AR71XX_GPIO_REG_IN,
- ctrl->base + AR71XX_GPIO_REG_SET,
- ctrl->base + AR71XX_GPIO_REG_CLEAR,
- oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
- oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ctrl->base + AR71XX_GPIO_REG_IN,
+ .set = ctrl->base + AR71XX_GPIO_REG_SET,
+ .clr = ctrl->base + AR71XX_GPIO_REG_CLEAR,
+ .dirout = oe_inverted ? NULL : ctrl->base + AR71XX_GPIO_REG_OE,
+ .dirin = oe_inverted ? ctrl->base + AR71XX_GPIO_REG_OE : NULL,
+ };
+
+ err = gpio_generic_chip_init(&ctrl->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
return err;
}
/* Optional interrupt setup */
if (device_property_read_bool(dev, "interrupt-controller")) {
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ath79_gpio_irqchip);
girq->parent_handler = ath79_gpio_irq_handler;
girq->num_parents = 1;
@@ -280,7 +276,7 @@ static int ath79_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->chip.gc, ctrl);
}
static struct platform_driver ath79_gpio_driver = {
diff --git a/drivers/gpio/gpio-blzp1600.c b/drivers/gpio/gpio-blzp1600.c
index 055cb296ae54..0f8c826ba876 100644
--- a/drivers/gpio/gpio-blzp1600.c
+++ b/drivers/gpio/gpio-blzp1600.c
@@ -6,6 +6,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -36,7 +37,7 @@
struct blzp1600_gpio {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
int irq;
};
@@ -76,7 +77,7 @@ static void blzp1600_gpio_irq_mask(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 1);
}
@@ -84,7 +85,7 @@ static void blzp1600_gpio_irq_unmask(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IM_REG, BIT(d->hwirq), 0);
}
@@ -99,9 +100,9 @@ static void blzp1600_gpio_irq_enable(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- gpiochip_enable_irq(&chip->gc, irqd_to_hwirq(d));
+ gpiochip_enable_irq(&chip->gen_gc.gc, irqd_to_hwirq(d));
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_DIR_REG, BIT(d->hwirq), 0);
blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 1);
}
@@ -110,9 +111,9 @@ static void blzp1600_gpio_irq_disable(struct irq_data *d)
{
struct blzp1600_gpio *chip = get_blzp1600_gpio_from_irq_data(d);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_IEN_REG, BIT(d->hwirq), 0);
- gpiochip_disable_irq(&chip->gc, irqd_to_hwirq(d));
+ gpiochip_disable_irq(&chip->gen_gc.gc, irqd_to_hwirq(d));
}
static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
@@ -121,7 +122,7 @@ static int blzp1600_gpio_irq_set_type(struct irq_data *d, u32 type)
u32 edge_level, single_both, fall_rise;
int mask = BIT(d->hwirq);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
edge_level = blzp1600_gpio_read(chip, GPIO_IS_REG);
single_both = blzp1600_gpio_read(chip, GPIO_IBE_REG);
fall_rise = blzp1600_gpio_read(chip, GPIO_IEV_REG);
@@ -186,8 +187,8 @@ static void blzp1600_gpio_irqhandler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
irq_status = blzp1600_gpio_read(gpio, GPIO_RIS_REG);
- for_each_set_bit(hwirq, &irq_status, gpio->gc.ngpio)
- generic_handle_domain_irq(gpio->gc.irq.domain, hwirq);
+ for_each_set_bit(hwirq, &irq_status, gpio->gen_gc.gc.ngpio)
+ generic_handle_domain_irq(gpio->gen_gc.gc.irq.domain, hwirq);
chained_irq_exit(irqchip, desc);
}
@@ -197,7 +198,7 @@ static int blzp1600_gpio_set_debounce(struct gpio_chip *gc, unsigned int offset,
{
struct blzp1600_gpio *chip = gpiochip_get_data(gc);
- guard(raw_spinlock_irqsave)(&chip->gc.bgpio_lock);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
blzp1600_gpio_rmw(chip->base + GPIO_DB_REG, BIT(offset), debounce);
return 0;
@@ -216,6 +217,7 @@ static int blzp1600_gpio_set_config(struct gpio_chip *gc, unsigned int offset, u
static int blzp1600_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct blzp1600_gpio *chip;
struct gpio_chip *gc;
int ret;
@@ -228,14 +230,21 @@ static int blzp1600_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- ret = bgpio_init(&chip->gc, &pdev->dev, 4, chip->base + GPIO_IDATA_REG,
- chip->base + GPIO_SET_REG, chip->base + GPIO_CLR_REG,
- chip->base + GPIO_DIR_REG, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = chip->base + GPIO_IDATA_REG,
+ .set = chip->base + GPIO_SET_REG,
+ .clr = chip->base + GPIO_CLR_REG,
+ .dirout = chip->base + GPIO_DIR_REG,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Failed to register generic gpio\n");
/* configure the gpio chip */
- gc = &chip->gc;
+ gc = &chip->gen_gc.gc;
gc->set_config = blzp1600_gpio_set_config;
if (device_property_present(&pdev->dev, "interrupt-controller")) {
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index e29a9589b3cc..af9287ff5dc4 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/irqdomain.h>
@@ -37,7 +38,7 @@ enum gio_reg_index {
struct brcmstb_gpio_bank {
struct list_head node;
int id;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct brcmstb_gpio_priv *parent_priv;
u32 width;
u32 wake_active;
@@ -72,19 +73,18 @@ __brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
{
void __iomem *reg_base = bank->parent_priv->reg_base;
- return bank->gc.read_reg(reg_base + GIO_STAT(bank->id)) &
- bank->gc.read_reg(reg_base + GIO_MASK(bank->id));
+ return gpio_generic_read_reg(&bank->chip, reg_base + GIO_STAT(bank->id)) &
+ gpio_generic_read_reg(&bank->chip, reg_base + GIO_MASK(bank->id));
}
static unsigned long
brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
{
unsigned long status;
- unsigned long flags;
- raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
status = __brcmstb_gpio_get_active_irqs(bank);
- raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
return status;
}
@@ -92,26 +92,26 @@ brcmstb_gpio_get_active_irqs(struct brcmstb_gpio_bank *bank)
static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq,
struct brcmstb_gpio_bank *bank)
{
- return hwirq - bank->gc.offset;
+ return hwirq - bank->chip.gc.offset;
}
static void brcmstb_gpio_set_imask(struct brcmstb_gpio_bank *bank,
unsigned int hwirq, bool enable)
{
- struct gpio_chip *gc = &bank->gc;
struct brcmstb_gpio_priv *priv = bank->parent_priv;
u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(hwirq, bank));
u32 imask;
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- imask = gc->read_reg(priv->reg_base + GIO_MASK(bank->id));
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
+ imask = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id));
if (enable)
imask |= mask;
else
imask &= ~mask;
- gc->write_reg(priv->reg_base + GIO_MASK(bank->id), imask);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id), imask);
}
static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -150,7 +150,8 @@ static void brcmstb_gpio_irq_ack(struct irq_data *d)
struct brcmstb_gpio_priv *priv = bank->parent_priv;
u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
- gc->write_reg(priv->reg_base + GIO_STAT(bank->id), mask);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_STAT(bank->id), mask);
}
static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -162,7 +163,6 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
u32 edge_insensitive, iedge_insensitive;
u32 edge_config, iedge_config;
u32 level, ilevel;
- unsigned long flags;
switch (type) {
case IRQ_TYPE_LEVEL_LOW:
@@ -194,23 +194,25 @@ static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&bank->gc.bgpio_lock, flags);
-
- iedge_config = bank->gc.read_reg(priv->reg_base +
- GIO_EC(bank->id)) & ~mask;
- iedge_insensitive = bank->gc.read_reg(priv->reg_base +
- GIO_EI(bank->id)) & ~mask;
- ilevel = bank->gc.read_reg(priv->reg_base +
- GIO_LEVEL(bank->id)) & ~mask;
+ guard(gpio_generic_lock_irqsave)(&bank->chip);
+
+ iedge_config = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_EC(bank->id)) & ~mask;
+ iedge_insensitive = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_EI(bank->id)) & ~mask;
+ ilevel = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_LEVEL(bank->id)) & ~mask;
+
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_EC(bank->id),
+ iedge_config | edge_config);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_EI(bank->id),
+ iedge_insensitive | edge_insensitive);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_LEVEL(bank->id),
+ ilevel | level);
- bank->gc.write_reg(priv->reg_base + GIO_EC(bank->id),
- iedge_config | edge_config);
- bank->gc.write_reg(priv->reg_base + GIO_EI(bank->id),
- iedge_insensitive | edge_insensitive);
- bank->gc.write_reg(priv->reg_base + GIO_LEVEL(bank->id),
- ilevel | level);
-
- raw_spin_unlock_irqrestore(&bank->gc.bgpio_lock, flags);
return 0;
}
@@ -263,7 +265,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
{
struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct irq_domain *domain = priv->irq_domain;
- int hwbase = bank->gc.offset;
+ int hwbase = bank->chip.gc.offset;
unsigned long status;
while ((status = brcmstb_gpio_get_active_irqs(bank))) {
@@ -303,7 +305,7 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
/* banks are in descending order */
list_for_each_entry_reverse(bank, &priv->bank_list, node) {
- i += bank->gc.ngpio;
+ i += bank->chip.gc.ngpio;
if (hwirq < i)
return bank;
}
@@ -332,7 +334,7 @@ static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
dev_dbg(&pdev->dev, "Mapping irq %d for gpio line %d (bank %d)\n",
irq, (int)hwirq, bank->id);
- ret = irq_set_chip_data(irq, &bank->gc);
+ ret = irq_set_chip_data(irq, &bank->chip.gc);
if (ret < 0)
return ret;
irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class,
@@ -394,7 +396,7 @@ static void brcmstb_gpio_remove(struct platform_device *pdev)
* more important to actually perform all of the steps.
*/
list_for_each_entry(bank, &priv->bank_list, node)
- gpiochip_remove(&bank->gc);
+ gpiochip_remove(&bank->chip.gc);
}
static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
@@ -412,7 +414,7 @@ static int brcmstb_gpio_of_xlate(struct gpio_chip *gc,
if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
return -EINVAL;
- offset = gpiospec->args[0] - bank->gc.offset;
+ offset = gpiospec->args[0] - bank->chip.gc.offset;
if (offset >= gc->ngpio || offset < 0)
return -EINVAL;
@@ -493,19 +495,17 @@ out_free_domain:
static void brcmstb_gpio_bank_save(struct brcmstb_gpio_priv *priv,
struct brcmstb_gpio_bank *bank)
{
- struct gpio_chip *gc = &bank->gc;
unsigned int i;
for (i = 0; i < GIO_REG_STAT; i++)
- bank->saved_regs[i] = gc->read_reg(priv->reg_base +
- GIO_BANK_OFF(bank->id, i));
+ bank->saved_regs[i] = gpio_generic_read_reg(&bank->chip,
+ priv->reg_base + GIO_BANK_OFF(bank->id, i));
}
static void brcmstb_gpio_quiesce(struct device *dev, bool save)
{
struct brcmstb_gpio_priv *priv = dev_get_drvdata(dev);
struct brcmstb_gpio_bank *bank;
- struct gpio_chip *gc;
u32 imask;
/* disable non-wake interrupt */
@@ -513,8 +513,6 @@ static void brcmstb_gpio_quiesce(struct device *dev, bool save)
disable_irq(priv->parent_irq);
list_for_each_entry(bank, &priv->bank_list, node) {
- gc = &bank->gc;
-
if (save)
brcmstb_gpio_bank_save(priv, bank);
@@ -523,8 +521,9 @@ static void brcmstb_gpio_quiesce(struct device *dev, bool save)
imask = bank->wake_active;
else
imask = 0;
- gc->write_reg(priv->reg_base + GIO_MASK(bank->id),
- imask);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_MASK(bank->id),
+ imask);
}
}
@@ -534,16 +533,15 @@ static void brcmstb_gpio_shutdown(struct platform_device *pdev)
brcmstb_gpio_quiesce(&pdev->dev, false);
}
-#ifdef CONFIG_PM_SLEEP
static void brcmstb_gpio_bank_restore(struct brcmstb_gpio_priv *priv,
struct brcmstb_gpio_bank *bank)
{
- struct gpio_chip *gc = &bank->gc;
unsigned int i;
for (i = 0; i < GIO_REG_STAT; i++)
- gc->write_reg(priv->reg_base + GIO_BANK_OFF(bank->id, i),
- bank->saved_regs[i]);
+ gpio_generic_write_reg(&bank->chip,
+ priv->reg_base + GIO_BANK_OFF(bank->id, i),
+ bank->saved_regs[i]);
}
static int brcmstb_gpio_suspend(struct device *dev)
@@ -573,18 +571,14 @@ static int brcmstb_gpio_resume(struct device *dev)
return 0;
}
-#else
-#define brcmstb_gpio_suspend NULL
-#define brcmstb_gpio_resume NULL
-#endif /* CONFIG_PM_SLEEP */
-
static const struct dev_pm_ops brcmstb_gpio_pm_ops = {
- .suspend_noirq = brcmstb_gpio_suspend,
- .resume_noirq = brcmstb_gpio_resume,
+ .suspend_noirq = pm_sleep_ptr(brcmstb_gpio_suspend),
+ .resume_noirq = pm_sleep_ptr(brcmstb_gpio_resume),
};
static int brcmstb_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
void __iomem *reg_base;
@@ -630,7 +624,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
* else leave I/O in little endian mode.
*/
#if defined(CONFIG_MIPS) && defined(__BIG_ENDIAN)
- flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
#endif
of_property_for_each_u32(np, "brcm,gpio-bank-widths", bank_width) {
@@ -665,17 +659,24 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
bank->width = bank_width;
}
+ gc = &bank->chip.gc;
+
/*
* Regs are 4 bytes wide, have data reg, no set/clear regs,
* and direction bits have 0 = output and 1 = input
*/
- gc = &bank->gc;
- err = bgpio_init(gc, dev, 4,
- reg_base + GIO_DATA(bank->id),
- NULL, NULL, NULL,
- reg_base + GIO_IODIR(bank->id), flags);
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = reg_base + GIO_DATA(bank->id),
+ .dirin = reg_base + GIO_IODIR(bank->id),
+ .flags = flags,
+ };
+
+ err = gpio_generic_chip_init(&bank->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
goto fail;
}
@@ -700,7 +701,8 @@ static int brcmstb_gpio_probe(struct platform_device *pdev)
* be retained from S5 cold boot
*/
need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank);
- gc->write_reg(reg_base + GIO_MASK(bank->id), 0);
+ gpio_generic_write_reg(&bank->chip,
+ reg_base + GIO_MASK(bank->id), 0);
err = gpiochip_add_data(gc, bank);
if (err) {
@@ -747,7 +749,7 @@ static struct platform_driver brcmstb_gpio_driver = {
.driver = {
.name = "brcmstb-gpio",
.of_match_table = brcmstb_gpio_of_match,
- .pm = &brcmstb_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&brcmstb_gpio_pm_ops),
},
.probe = brcmstb_gpio_probe,
.remove = brcmstb_gpio_remove,
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index 05401da03ca3..324eeb77dbd5 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -52,10 +52,8 @@ struct bt8xxgpio {
struct pci_dev *pdev;
struct gpio_chip gpio;
-#ifdef CONFIG_PM
u32 saved_outen;
u32 saved_data;
-#endif
};
#define bgwrite(dat, adr) writel((dat), bg->mmio+(adr))
@@ -224,9 +222,10 @@ static void bt8xxgpio_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifdef CONFIG_PM
-static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
+
+static int bt8xxgpio_suspend(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
scoped_guard(spinlock_irqsave, &bg->lock) {
@@ -238,23 +237,13 @@ static int bt8xxgpio_suspend(struct pci_dev *pdev, pm_message_t state)
bgwrite(0x0, BT848_GPIO_OUT_EN);
}
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
return 0;
}
-static int bt8xxgpio_resume(struct pci_dev *pdev)
+static int bt8xxgpio_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct bt8xxgpio *bg = pci_get_drvdata(pdev);
- int err;
-
- pci_set_power_state(pdev, PCI_D0);
- err = pci_enable_device(pdev);
- if (err)
- return err;
- pci_restore_state(pdev);
guard(spinlock_irqsave)(&bg->lock);
@@ -267,10 +256,8 @@ static int bt8xxgpio_resume(struct pci_dev *pdev)
return 0;
}
-#else
-#define bt8xxgpio_suspend NULL
-#define bt8xxgpio_resume NULL
-#endif /* CONFIG_PM */
+
+static DEFINE_SIMPLE_DEV_PM_OPS(bt8xxgpio_pm_ops, bt8xxgpio_suspend, bt8xxgpio_resume);
static const struct pci_device_id bt8xxgpio_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROOKTREE, PCI_DEVICE_ID_BT848) },
@@ -286,8 +273,7 @@ static struct pci_driver bt8xxgpio_pci_driver = {
.id_table = bt8xxgpio_pci_tbl,
.probe = bt8xxgpio_probe,
.remove = bt8xxgpio_remove,
- .suspend = bt8xxgpio_suspend,
- .resume = bt8xxgpio_resume,
+ .driver.pm = &bt8xxgpio_pm_ops,
};
module_pci_driver(bt8xxgpio_pci_driver);
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index c647953521c7..b75734ca22dd 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -181,7 +181,7 @@ static int cdns_gpio_probe(struct platform_device *pdev)
config.dat = cgpio->regs + CDNS_GPIO_INPUT_VALUE;
config.set = cgpio->regs + CDNS_GPIO_OUTPUT_VALUE;
config.dirin = cgpio->regs + CDNS_GPIO_DIRECTION_MODE;
- config.flags = BGPIOF_READ_OUTPUT_REG_SET;
+ config.flags = GPIO_GENERIC_READ_OUTPUT_REG_SET;
ret = gpio_generic_chip_init(&cgpio->gen_gc, &config);
if (ret) {
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 43b667b41f5d..4986c465c9a8 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -78,7 +79,6 @@ struct dwapb_platform_data {
unsigned int nports;
};
-#ifdef CONFIG_PM_SLEEP
/* Store GPIO context across system-wide suspend/resume transitions */
struct dwapb_context {
u32 data;
@@ -91,7 +91,6 @@ struct dwapb_context {
u32 int_deb;
u32 wake_en;
};
-#endif
struct dwapb_gpio_port_irqchip {
unsigned int nr_irqs;
@@ -99,16 +98,18 @@ struct dwapb_gpio_port_irqchip {
};
struct dwapb_gpio_port {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct dwapb_gpio_port_irqchip *pirq;
struct dwapb_gpio *gpio;
-#ifdef CONFIG_PM_SLEEP
struct dwapb_context *ctx;
-#endif
unsigned int idx;
};
-#define to_dwapb_gpio(_gc) \
- (container_of(_gc, struct dwapb_gpio_port, gc)->gpio)
+
+static inline struct dwapb_gpio *to_dwapb_gpio(struct gpio_chip *gc)
+{
+ return container_of(to_gpio_generic_chip(gc),
+ struct dwapb_gpio_port, chip)->gpio;
+}
struct dwapb_gpio {
struct device *dev;
@@ -148,19 +149,19 @@ static inline u32 gpio_reg_convert(struct dwapb_gpio *gpio, unsigned int offset)
static inline u32 dwapb_read(struct dwapb_gpio *gpio, unsigned int offset)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
- void __iomem *reg_base = gpio->regs;
+ struct gpio_generic_chip *chip = &gpio->ports[0].chip;
+ void __iomem *reg_base = gpio->regs;
- return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset));
+ return gpio_generic_read_reg(chip, reg_base + gpio_reg_convert(gpio, offset));
}
static inline void dwapb_write(struct dwapb_gpio *gpio, unsigned int offset,
u32 val)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
- void __iomem *reg_base = gpio->regs;
+ struct gpio_generic_chip *chip = &gpio->ports[0].chip;
+ void __iomem *reg_base = gpio->regs;
- gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val);
+ gpio_generic_write_reg(chip, reg_base + gpio_reg_convert(gpio, offset), val);
}
static struct dwapb_gpio_port *dwapb_offs_to_port(struct dwapb_gpio *gpio, unsigned int offs)
@@ -186,7 +187,7 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
if (!port)
return;
- gc = &port->gc;
+ gc = &port->chip.gc;
pol = dwapb_read(gpio, GPIO_INT_POLARITY);
/* Just read the current value right out of the data register */
@@ -201,13 +202,13 @@ static void dwapb_toggle_trigger(struct dwapb_gpio *gpio, unsigned int offs)
static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
{
- struct gpio_chip *gc = &gpio->ports[0].gc;
+ struct gpio_generic_chip *gen_gc = &gpio->ports[0].chip;
unsigned long irq_status;
irq_hw_number_t hwirq;
irq_status = dwapb_read(gpio, GPIO_INTSTATUS);
for_each_set_bit(hwirq, &irq_status, DWAPB_MAX_GPIOS) {
- int gpio_irq = irq_find_mapping(gc->irq.domain, hwirq);
+ int gpio_irq = irq_find_mapping(gen_gc->gc.irq.domain, hwirq);
u32 irq_type = irq_get_trigger_type(gpio_irq);
generic_handle_irq(gpio_irq);
@@ -237,27 +238,27 @@ static irqreturn_t dwapb_irq_handler_mfd(int irq, void *dev_id)
static void dwapb_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
u32 val = BIT(irqd_to_hwirq(d));
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
dwapb_write(gpio, GPIO_PORTA_EOI, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
- dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, gen_gc) {
+ val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
+ dwapb_write(gpio, GPIO_INTMASK, val);
+ }
gpiochip_disable_irq(gc, hwirq);
}
@@ -265,59 +266,61 @@ static void dwapb_irq_mask(struct irq_data *d)
static void dwapb_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, hwirq);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static void dwapb_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int dwapb_irq_set_type(struct irq_data *d, u32 type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t bit = irqd_to_hwirq(d);
- unsigned long level, polarity, flags;
+ unsigned long level, polarity;
+
+ guard(gpio_generic_lock_irqsave)(gen_gc);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
level = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
polarity = dwapb_read(gpio, GPIO_INT_POLARITY);
@@ -352,12 +355,10 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type)
dwapb_write(gpio, GPIO_INTTYPE_LEVEL, level);
if (type != IRQ_TYPE_EDGE_BOTH)
dwapb_write(gpio, GPIO_INT_POLARITY, polarity);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -372,9 +373,6 @@ static int dwapb_irq_set_wake(struct irq_data *d, unsigned int enable)
return 0;
}
-#else
-#define dwapb_irq_set_wake NULL
-#endif
static const struct irq_chip dwapb_irq_chip = {
.name = DWAPB_DRIVER_NAME,
@@ -384,7 +382,7 @@ static const struct irq_chip dwapb_irq_chip = {
.irq_set_type = dwapb_irq_set_type,
.irq_enable = dwapb_irq_enable,
.irq_disable = dwapb_irq_disable,
- .irq_set_wake = dwapb_irq_set_wake,
+ .irq_set_wake = pm_sleep_ptr(dwapb_irq_set_wake),
.flags = IRQCHIP_IMMUTABLE,
GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
@@ -393,11 +391,12 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
unsigned offset, unsigned debounce)
{
struct dwapb_gpio_port *port = gpiochip_get_data(gc);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
struct dwapb_gpio *gpio = port->gpio;
- unsigned long flags, val_deb;
+ unsigned long val_deb;
unsigned long mask = BIT(offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
val_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
if (debounce)
@@ -406,8 +405,6 @@ static int dwapb_gpio_set_debounce(struct gpio_chip *gc,
val_deb &= ~mask;
dwapb_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
-
return 0;
}
@@ -445,7 +442,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp)
{
struct dwapb_gpio_port_irqchip *pirq;
- struct gpio_chip *gc = &port->gc;
+ struct gpio_chip *gc = &port->chip.gc;
struct gpio_irq_chip *girq;
int err;
@@ -501,6 +498,7 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
struct dwapb_port_property *pp,
unsigned int offs)
{
+ struct gpio_generic_chip_config config;
struct dwapb_gpio_port *port;
void __iomem *dat, *set, *dirout;
int err;
@@ -519,32 +517,39 @@ static int dwapb_gpio_add_port(struct dwapb_gpio *gpio,
set = gpio->regs + GPIO_SWPORTA_DR + pp->idx * GPIO_SWPORT_DR_STRIDE;
dirout = gpio->regs + GPIO_SWPORTA_DDR + pp->idx * GPIO_SWPORT_DDR_STRIDE;
+ config = (struct gpio_generic_chip_config) {
+ .dev = gpio->dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .dirout = dirout,
+ };
+
/* This registers 32 GPIO lines per port */
- err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout,
- NULL, 0);
+ err = gpio_generic_chip_init(&port->chip, &config);
if (err) {
dev_err(gpio->dev, "failed to init gpio chip for port%d\n",
port->idx);
return err;
}
- port->gc.fwnode = pp->fwnode;
- port->gc.ngpio = pp->ngpio;
- port->gc.base = pp->gpio_base;
- port->gc.request = gpiochip_generic_request;
- port->gc.free = gpiochip_generic_free;
+ port->chip.gc.fwnode = pp->fwnode;
+ port->chip.gc.ngpio = pp->ngpio;
+ port->chip.gc.base = pp->gpio_base;
+ port->chip.gc.request = gpiochip_generic_request;
+ port->chip.gc.free = gpiochip_generic_free;
/* Only port A support debounce */
if (pp->idx == 0)
- port->gc.set_config = dwapb_gpio_set_config;
+ port->chip.gc.set_config = dwapb_gpio_set_config;
else
- port->gc.set_config = gpiochip_generic_config;
+ port->chip.gc.set_config = gpiochip_generic_config;
/* Only port A can provide interrupts in all configurations of the IP */
if (pp->idx == 0)
dwapb_configure_irqs(gpio, port, pp);
- err = devm_gpiochip_add_data(gpio->dev, &port->gc, port);
+ err = devm_gpiochip_add_data(gpio->dev, &port->chip.gc, port);
if (err) {
dev_err(gpio->dev, "failed to register gpiochip for port%d\n",
port->idx);
@@ -746,42 +751,40 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int dwapb_gpio_suspend(struct device *dev)
{
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
- struct gpio_chip *gc = &gpio->ports[0].gc;
- unsigned long flags;
+ struct gpio_generic_chip *gen_gc = &gpio->ports[0].chip;
int i;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- for (i = 0; i < gpio->nr_ports; i++) {
- unsigned int offset;
- unsigned int idx = gpio->ports[i].idx;
- struct dwapb_context *ctx = gpio->ports[i].ctx;
+ scoped_guard(gpio_generic_lock_irqsave, gen_gc) {
+ for (i = 0; i < gpio->nr_ports; i++) {
+ unsigned int offset;
+ unsigned int idx = gpio->ports[i].idx;
+ struct dwapb_context *ctx = gpio->ports[i].ctx;
- offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
- ctx->dir = dwapb_read(gpio, offset);
+ offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE;
+ ctx->dir = dwapb_read(gpio, offset);
- offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
- ctx->data = dwapb_read(gpio, offset);
+ offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE;
+ ctx->data = dwapb_read(gpio, offset);
- offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE;
- ctx->ext = dwapb_read(gpio, offset);
+ offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE;
+ ctx->ext = dwapb_read(gpio, offset);
- /* Only port A can provide interrupts */
- if (idx == 0) {
- ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK);
- ctx->int_en = dwapb_read(gpio, GPIO_INTEN);
- ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY);
- ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
- ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
-
- /* Mask out interrupts */
- dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
+ /* Only port A can provide interrupts */
+ if (idx == 0) {
+ ctx->int_mask = dwapb_read(gpio, GPIO_INTMASK);
+ ctx->int_en = dwapb_read(gpio, GPIO_INTEN);
+ ctx->int_pol = dwapb_read(gpio, GPIO_INT_POLARITY);
+ ctx->int_type = dwapb_read(gpio, GPIO_INTTYPE_LEVEL);
+ ctx->int_deb = dwapb_read(gpio, GPIO_PORTA_DEBOUNCE);
+
+ /* Mask out interrupts */
+ dwapb_write(gpio, GPIO_INTMASK, ~ctx->wake_en);
+ }
}
}
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
clk_bulk_disable_unprepare(DWAPB_NR_CLOCKS, gpio->clks);
@@ -791,8 +794,8 @@ static int dwapb_gpio_suspend(struct device *dev)
static int dwapb_gpio_resume(struct device *dev)
{
struct dwapb_gpio *gpio = dev_get_drvdata(dev);
- struct gpio_chip *gc = &gpio->ports[0].gc;
- unsigned long flags;
+ struct gpio_chip *gc = &gpio->ports[0].chip.gc;
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(gc);
int i, err;
err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks);
@@ -801,7 +804,8 @@ static int dwapb_gpio_resume(struct device *dev)
return err;
}
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(gen_gc);
+
for (i = 0; i < gpio->nr_ports; i++) {
unsigned int offset;
unsigned int idx = gpio->ports[i].idx;
@@ -828,19 +832,17 @@ static int dwapb_gpio_resume(struct device *dev)
dwapb_write(gpio, GPIO_PORTA_EOI, 0xffffffff);
}
}
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
-#endif
-static SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops, dwapb_gpio_suspend,
- dwapb_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(dwapb_gpio_pm_ops,
+ dwapb_gpio_suspend, dwapb_gpio_resume);
static struct platform_driver dwapb_gpio_driver = {
.driver = {
.name = DWAPB_DRIVER_NAME,
- .pm = &dwapb_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&dwapb_gpio_pm_ops),
.of_match_table = dwapb_of_match,
.acpi_match_table = dwapb_acpi_match,
},
diff --git a/drivers/gpio/gpio-elkhartlake.c b/drivers/gpio/gpio-elkhartlake.c
index 95de52d2cc63..b96e7928b6e5 100644
--- a/drivers/gpio/gpio-elkhartlake.c
+++ b/drivers/gpio/gpio-elkhartlake.c
@@ -2,43 +2,46 @@
/*
* Intel Elkhart Lake PSE GPIO driver
*
- * Copyright (c) 2023 Intel Corporation.
+ * Copyright (c) 2023, 2025 Intel Corporation.
*
* Authors: Pandith N <pandith.n@intel.com>
* Raag Jadav <raag.jadav@intel.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/ehl_pse_io_aux.h>
+
#include "gpio-tangier.h"
/* Each Intel EHL PSE GPIO Controller has 30 GPIO pins */
#define EHL_PSE_NGPIO 30
-static int ehl_gpio_probe(struct platform_device *pdev)
+static int ehl_gpio_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
{
- struct device *dev = &pdev->dev;
+ struct device *dev = &adev->dev;
+ struct ehl_pse_io_data *data;
struct tng_gpio *priv;
- int irq, ret;
+ int ret;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ data = dev_get_platdata(dev);
+ if (!data)
+ return -ENODATA;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ priv->reg_base = devm_ioremap_resource(dev, &data->mem);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
priv->dev = dev;
- priv->irq = irq;
+ priv->irq = data->irq;
priv->info.base = -1;
priv->info.ngpio = EHL_PSE_NGPIO;
@@ -51,25 +54,24 @@ static int ehl_gpio_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "tng_gpio_probe error\n");
- platform_set_drvdata(pdev, priv);
+ auxiliary_set_drvdata(adev, priv);
return 0;
}
-static const struct platform_device_id ehl_gpio_ids[] = {
- { "gpio-elkhartlake" },
+static const struct auxiliary_device_id ehl_gpio_ids[] = {
+ { EHL_PSE_IO_NAME "." EHL_PSE_GPIO_NAME },
{ }
};
-MODULE_DEVICE_TABLE(platform, ehl_gpio_ids);
+MODULE_DEVICE_TABLE(auxiliary, ehl_gpio_ids);
-static struct platform_driver ehl_gpio_driver = {
+static struct auxiliary_driver ehl_gpio_driver = {
.driver = {
- .name = "gpio-elkhartlake",
.pm = pm_sleep_ptr(&tng_gpio_pm_ops),
},
.probe = ehl_gpio_probe,
.id_table = ehl_gpio_ids,
};
-module_platform_driver(ehl_gpio_driver);
+module_auxiliary_driver(ehl_gpio_driver);
MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>");
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 58d2464c07bc..1f56e44ffc9a 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -9,16 +9,17 @@
* linux/arch/arm/mach-ep93xx/core.c
*/
+#include <linux/bitops.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/gpio/driver.h>
-#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
struct ep93xx_gpio_irq_chip {
void __iomem *base;
@@ -31,11 +32,14 @@ struct ep93xx_gpio_irq_chip {
struct ep93xx_gpio_chip {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct ep93xx_gpio_irq_chip *eic;
};
-#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
+static struct ep93xx_gpio_chip *to_ep93xx_gpio_chip(struct gpio_chip *gc)
+{
+ return container_of(to_gpio_generic_chip(gc), struct ep93xx_gpio_chip, chip);
+}
static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
{
@@ -267,7 +271,7 @@ static const struct irq_chip gpio_eic_irq_chip = {
static int ep93xx_setup_irqs(struct platform_device *pdev,
struct ep93xx_gpio_chip *egc)
{
- struct gpio_chip *gc = &egc->gc;
+ struct gpio_chip *gc = &egc->chip.gc;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq = &gc->irq;
int ret, irq, i;
@@ -327,6 +331,7 @@ static int ep93xx_setup_irqs(struct platform_device *pdev,
static int ep93xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct ep93xx_gpio_chip *egc;
struct gpio_chip *gc;
void __iomem *data;
@@ -345,8 +350,16 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
if (IS_ERR(dir))
return PTR_ERR(dir);
- gc = &egc->gc;
- ret = bgpio_init(gc, &pdev->dev, 1, data, NULL, NULL, dir, NULL, 0);
+ gc = &egc->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 1,
+ .dat = data,
+ .dirout = dir,
+ };
+
+ ret = gpio_generic_chip_init(&egc->chip, &config);
if (ret)
return dev_err_probe(&pdev->dev, ret, "unable to init generic GPIO\n");
diff --git a/drivers/gpio/gpio-ftgpio010.c b/drivers/gpio/gpio-ftgpio010.c
index c35eaa2851d8..11e6907c3b54 100644
--- a/drivers/gpio/gpio-ftgpio010.c
+++ b/drivers/gpio/gpio-ftgpio010.c
@@ -10,12 +10,14 @@
* MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
* Copyright 2008 Juergen Beisert, kernel@pengutronix.de
*/
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/gpio/driver.h>
-#include <linux/io.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
-#include <linux/clk.h>
/* GPIO registers definition */
#define GPIO_DATA_OUT 0x00
@@ -40,13 +42,13 @@
/**
* struct ftgpio_gpio - Gemini GPIO state container
* @dev: containing device for this instance
- * @gc: gpiochip for this instance
+ * @chip: generic GPIO chip for this instance
* @base: remapped I/O-memory base
* @clk: silicon clock
*/
struct ftgpio_gpio {
struct device *dev;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
struct clk *clk;
};
@@ -233,6 +235,7 @@ static const struct irq_chip ftgpio_irq_chip = {
static int ftgpio_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ftgpio_gpio *g;
struct gpio_irq_chip *girq;
@@ -261,27 +264,30 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
*/
return PTR_ERR(g->clk);
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + GPIO_DATA_IN,
- g->base + GPIO_DATA_SET,
- g->base + GPIO_DATA_CLR,
- g->base + GPIO_DIR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = g->base + GPIO_DATA_IN,
+ .set = g->base + GPIO_DATA_SET,
+ .clr = g->base + GPIO_DATA_CLR,
+ .dirout = g->base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&g->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
- g->gc.label = dev_name(dev);
- g->gc.base = -1;
- g->gc.parent = dev;
- g->gc.owner = THIS_MODULE;
- /* ngpio is set by bgpio_init() */
+ g->chip.gc.label = dev_name(dev);
+ g->chip.gc.base = -1;
+ g->chip.gc.parent = dev;
+ g->chip.gc.owner = THIS_MODULE;
+ /* ngpio is set by gpio_generic_chip_init() */
/* We need a silicon clock to do debounce */
if (!IS_ERR(g->clk))
- g->gc.set_config = ftgpio_gpio_set_config;
+ g->chip.gc.set_config = ftgpio_gpio_set_config;
- girq = &g->gc.irq;
+ girq = &g->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ftgpio_irq_chip);
girq->parent_handler = ftgpio_gpio_irq_handler;
girq->num_parents = 1;
@@ -302,7 +308,7 @@ static int ftgpio_gpio_probe(struct platform_device *pdev)
/* Clear any use of debounce */
writel(0x0, g->base + GPIO_DEBOUNCE_EN);
- return devm_gpiochip_add_data(dev, &g->gc, g);
+ return devm_gpiochip_add_data(dev, &g->chip.gc, g);
}
static const struct of_device_id ftgpio_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-fxl6408.c b/drivers/gpio/gpio-fxl6408.c
index 86ebc66b1104..afc1b8461dab 100644
--- a/drivers/gpio/gpio-fxl6408.c
+++ b/drivers/gpio/gpio-fxl6408.c
@@ -123,6 +123,8 @@ static int fxl6408_probe(struct i2c_client *client)
if (ret)
return ret;
+ i2c_set_clientdata(client, gpio_config.regmap);
+
/* Disable High-Z of outputs, so that our OUTPUT updates actually take effect. */
ret = regmap_write(gpio_config.regmap, FXL6408_REG_OUTPUT_HIGH_Z, 0);
if (ret)
@@ -131,6 +133,16 @@ static int fxl6408_probe(struct i2c_client *client)
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
+static int fxl6408_resume(struct device *dev)
+{
+ struct regmap *regmap = dev_get_drvdata(dev);
+
+ regcache_mark_dirty(regmap);
+ return regcache_sync(regmap);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(fxl6408_pm_ops, NULL, fxl6408_resume);
+
static const __maybe_unused struct of_device_id fxl6408_dt_ids[] = {
{ .compatible = "fcs,fxl6408" },
{ }
@@ -146,6 +158,7 @@ MODULE_DEVICE_TABLE(i2c, fxl6408_id);
static struct i2c_driver fxl6408_driver = {
.driver = {
.name = "fxl6408",
+ .pm = pm_sleep_ptr(&fxl6408_pm_ops),
.of_match_table = fxl6408_dt_ids,
},
.probe = fxl6408_probe,
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index 5dc49648d8e3..66bdff36eb61 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -16,6 +16,7 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
@@ -51,24 +52,36 @@ MODULE_DEVICE_TABLE(of, gef_gpio_ids);
static int __init gef_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
+ struct gpio_generic_chip *chip;
struct gpio_chip *gc;
void __iomem *regs;
int ret;
- gc = devm_kzalloc(dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
return -ENOMEM;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
- ret = bgpio_init(gc, dev, 4, regs + GEF_GPIO_IN, regs + GEF_GPIO_OUT,
- NULL, NULL, regs + GEF_GPIO_DIRECT,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = regs + GEF_GPIO_IN,
+ .set = regs + GEF_GPIO_OUT,
+ .dirin = regs + GEF_GPIO_DIRECT,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ ret = gpio_generic_chip_init(chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "bgpio_init failed\n");
+ return dev_err_probe(dev, ret,
+ "failed to initialize the generic GPIO chip\n");
+
+ gc = &chip->gc;
/* Setup pointers to chip functions */
gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pfw", dev_fwnode(dev));
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index f3f8bab62f94..e4fa84e22726 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -19,6 +19,7 @@
#include <linux/bitops.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -45,7 +46,7 @@
/* Structure for an irq of the core - called an underlying irq */
struct grgpio_uirq {
- u8 refcnt; /* Reference counter to manage requesting/freeing of uirq */
+ atomic_t refcnt; /* Reference counter to manage requesting/freeing of uirq */
u8 uirq; /* Underlying irq of the gpio driver */
};
@@ -59,7 +60,7 @@ struct grgpio_lirq {
};
struct grgpio_priv {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
struct device *dev;
@@ -91,13 +92,12 @@ struct grgpio_priv {
static void grgpio_set_imask(struct grgpio_priv *priv, unsigned int offset,
int val)
{
- struct gpio_chip *gc = &priv->gc;
-
if (val)
priv->imask |= BIT(offset);
else
priv->imask &= ~BIT(offset);
- gc->write_reg(priv->regs + GRGPIO_IMASK, priv->imask);
+
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IMASK, priv->imask);
}
static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -118,7 +118,6 @@ static int grgpio_to_irq(struct gpio_chip *gc, unsigned offset)
static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
- unsigned long flags;
u32 mask = BIT(d->hwirq);
u32 ipol;
u32 iedge;
@@ -146,15 +145,13 @@ static int grgpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
- ipol = priv->gc.read_reg(priv->regs + GRGPIO_IPOL) & ~mask;
- iedge = priv->gc.read_reg(priv->regs + GRGPIO_IEDGE) & ~mask;
+ ipol = gpio_generic_read_reg(&priv->chip, priv->regs + GRGPIO_IPOL) & ~mask;
+ iedge = gpio_generic_read_reg(&priv->chip, priv->regs + GRGPIO_IEDGE) & ~mask;
- priv->gc.write_reg(priv->regs + GRGPIO_IPOL, ipol | pol);
- priv->gc.write_reg(priv->regs + GRGPIO_IEDGE, iedge | edge);
-
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IPOL, ipol | pol);
+ gpio_generic_write_reg(&priv->chip, priv->regs + GRGPIO_IEDGE, iedge | edge);
return 0;
}
@@ -163,29 +160,23 @@ static void grgpio_irq_mask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
- unsigned long flags;
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &priv->chip)
+ grgpio_set_imask(priv, offset, 0);
- grgpio_set_imask(priv, offset, 0);
-
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
- gpiochip_disable_irq(&priv->gc, d->hwirq);
+ gpiochip_disable_irq(&priv->chip.gc, d->hwirq);
}
static void grgpio_irq_unmask(struct irq_data *d)
{
struct grgpio_priv *priv = irq_data_get_irq_chip_data(d);
int offset = d->hwirq;
- unsigned long flags;
- gpiochip_enable_irq(&priv->gc, d->hwirq);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpiochip_enable_irq(&priv->chip.gc, d->hwirq);
- grgpio_set_imask(priv, offset, 1);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ grgpio_set_imask(priv, offset, 1);
}
static const struct irq_chip grgpio_irq_chip = {
@@ -200,12 +191,11 @@ static const struct irq_chip grgpio_irq_chip = {
static irqreturn_t grgpio_irq_handler(int irq, void *dev)
{
struct grgpio_priv *priv = dev;
- int ngpio = priv->gc.ngpio;
- unsigned long flags;
+ int ngpio = priv->chip.gc.ngpio;
int i;
int match = 0;
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&priv->chip);
/*
* For each gpio line, call its interrupt handler if it its underlying
@@ -221,8 +211,6 @@ static irqreturn_t grgpio_irq_handler(int irq, void *dev)
}
}
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
-
if (!match)
dev_warn(priv->dev, "No gpio line matched irq %d\n", irq);
@@ -253,26 +241,23 @@ static int grgpio_irq_map(struct irq_domain *d, unsigned int irq,
dev_dbg(priv->dev, "Mapping irq %d for gpio line %d\n",
irq, offset);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
-
- /* Request underlying irq if not already requested */
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
lirq->irq = irq;
uirq = &priv->uirqs[lirq->index];
- if (uirq->refcnt == 0) {
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
+
+ /* Request underlying irq if not already requested */
+ if (atomic_fetch_add(1, &uirq->refcnt) == 0) {
ret = request_irq(uirq->uirq, grgpio_irq_handler, 0,
dev_name(priv->dev), priv);
if (ret) {
dev_err(priv->dev,
"Could not request underlying irq %d\n",
uirq->uirq);
+ atomic_dec(&uirq->refcnt); /* rollback */
return ret;
}
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
}
- uirq->refcnt++;
-
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
/* Setup irq */
irq_set_chip_data(irq, priv);
@@ -290,13 +275,13 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
struct grgpio_lirq *lirq;
struct grgpio_uirq *uirq;
unsigned long flags;
- int ngpio = priv->gc.ngpio;
+ int ngpio = priv->chip.gc.ngpio;
int i;
irq_set_chip_and_handler(irq, NULL, NULL);
irq_set_chip_data(irq, NULL);
- raw_spin_lock_irqsave(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_lock_irqsave(&priv->chip, flags);
/* Free underlying irq if last user unmapped */
index = -1;
@@ -313,15 +298,14 @@ static void grgpio_irq_unmap(struct irq_domain *d, unsigned int irq)
if (index >= 0) {
uirq = &priv->uirqs[lirq->index];
- uirq->refcnt--;
- if (uirq->refcnt == 0) {
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ if (atomic_dec_and_test(&uirq->refcnt)) {
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
free_irq(uirq->uirq, priv);
return;
}
}
- raw_spin_unlock_irqrestore(&priv->gc.bgpio_lock, flags);
+ gpio_generic_chip_unlock_irqrestore(&priv->chip, flags);
}
static void grgpio_irq_domain_remove(void *data)
@@ -341,6 +325,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
static int grgpio_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
+ struct gpio_generic_chip_config config;
struct device *dev = &ofdev->dev;
void __iomem *regs;
struct gpio_chip *gc;
@@ -359,17 +344,24 @@ static int grgpio_probe(struct platform_device *ofdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- gc = &priv->gc;
- err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA,
- regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = regs + GRGPIO_DATA,
+ .set = regs + GRGPIO_OUTPUT,
+ .dirout = regs + GRGPIO_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ gc = &priv->chip.gc;
+ err = gpio_generic_chip_init(&priv->chip, &config);
if (err) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return err;
}
priv->regs = regs;
- priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
+ priv->imask = gpio_generic_read_reg(&priv->chip, regs + GRGPIO_IMASK);
priv->dev = dev;
gc->owner = THIS_MODULE;
@@ -433,6 +425,7 @@ static int grgpio_probe(struct platform_device *ofdev)
continue;
}
priv->uirqs[lirq->index].uirq = ret;
+ atomic_set(&priv->uirqs[lirq->index].refcnt, 0);
}
}
diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c
index ef5cc654a24e..d26298c8351b 100644
--- a/drivers/gpio/gpio-hisi.c
+++ b/drivers/gpio/gpio-hisi.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 HiSilicon Limited. */
+
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
@@ -33,7 +35,7 @@
#define HISI_GPIO_DRIVER_NAME "gpio-hisi"
struct hisi_gpio {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
struct device *dev;
void __iomem *reg_base;
unsigned int line_num;
@@ -43,8 +45,8 @@ struct hisi_gpio {
static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
unsigned int off)
{
- struct hisi_gpio *hisi_gpio =
- container_of(chip, struct hisi_gpio, chip);
+ struct hisi_gpio *hisi_gpio = container_of(to_gpio_generic_chip(chip),
+ struct hisi_gpio, chip);
void __iomem *reg = hisi_gpio->reg_base + off;
return readl(reg);
@@ -53,8 +55,8 @@ static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
static inline void hisi_gpio_write_reg(struct gpio_chip *chip,
unsigned int off, u32 val)
{
- struct hisi_gpio *hisi_gpio =
- container_of(chip, struct hisi_gpio, chip);
+ struct hisi_gpio *hisi_gpio = container_of(to_gpio_generic_chip(chip),
+ struct hisi_gpio, chip);
void __iomem *reg = hisi_gpio->reg_base + off;
writel(val, reg);
@@ -180,14 +182,14 @@ static void hisi_gpio_irq_disable(struct irq_data *d)
static void hisi_gpio_irq_handler(struct irq_desc *desc)
{
struct hisi_gpio *hisi_gpio = irq_desc_get_handler_data(desc);
- unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip,
+ unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip.gc,
HISI_GPIO_INTSTATUS_WX);
struct irq_chip *irq_c = irq_desc_get_chip(desc);
int hwirq;
chained_irq_enter(irq_c, desc);
for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX)
- generic_handle_domain_irq(hisi_gpio->chip.irq.domain,
+ generic_handle_domain_irq(hisi_gpio->chip.gc.irq.domain,
hwirq);
chained_irq_exit(irq_c, desc);
}
@@ -206,7 +208,7 @@ static const struct irq_chip hisi_gpio_irq_chip = {
static void hisi_gpio_init_irq(struct hisi_gpio *hisi_gpio)
{
- struct gpio_chip *chip = &hisi_gpio->chip;
+ struct gpio_chip *chip = &hisi_gpio->chip.gc;
struct gpio_irq_chip *girq_chip = &chip->irq;
gpio_irq_chip_set_chip(girq_chip, &hisi_gpio_irq_chip);
@@ -264,6 +266,7 @@ static void hisi_gpio_get_pdata(struct device *dev,
static int hisi_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct hisi_gpio *hisi_gpio;
int port_num;
@@ -289,27 +292,32 @@ static int hisi_gpio_probe(struct platform_device *pdev)
hisi_gpio->dev = dev;
- ret = bgpio_init(&hisi_gpio->chip, hisi_gpio->dev, 0x4,
- hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
- hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
- BGPIOF_NO_SET_ON_INPUT);
+ config = (struct gpio_generic_chip_config) {
+ .dev = hisi_gpio->dev,
+ .sz = 4,
+ .dat = hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
+ .set = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
+ .clr = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
+ .dirout = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
+ .dirin = hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
+ .flags = GPIO_GENERIC_NO_SET_ON_INPUT |
+ GPIO_GENERIC_UNREADABLE_REG_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&hisi_gpio->chip, &config);
if (ret) {
dev_err(dev, "failed to init, ret = %d\n", ret);
return ret;
}
- hisi_gpio->chip.set_config = hisi_gpio_set_config;
- hisi_gpio->chip.ngpio = hisi_gpio->line_num;
- hisi_gpio->chip.bgpio_dir_unreadable = 1;
- hisi_gpio->chip.base = -1;
+ hisi_gpio->chip.gc.set_config = hisi_gpio_set_config;
+ hisi_gpio->chip.gc.ngpio = hisi_gpio->line_num;
+ hisi_gpio->chip.gc.base = -1;
if (hisi_gpio->irq > 0)
hisi_gpio_init_irq(hisi_gpio);
- ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip, hisi_gpio);
+ ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip.gc, hisi_gpio);
if (ret) {
dev_err(dev, "failed to register gpiochip, ret = %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-hlwd.c b/drivers/gpio/gpio-hlwd.c
index 0580f6712bea..043ce5ef3b07 100644
--- a/drivers/gpio/gpio-hlwd.c
+++ b/drivers/gpio/gpio-hlwd.c
@@ -6,6 +6,7 @@
// Nintendo Wii (Hollywood) GPIO driver
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -48,7 +49,7 @@
#define HW_GPIO_OWNER 0x3c
struct hlwd_gpio {
- struct gpio_chip gpioc;
+ struct gpio_generic_chip gpioc;
struct device *dev;
void __iomem *regs;
int irq;
@@ -61,45 +62,44 @@ static void hlwd_gpio_irqhandler(struct irq_desc *desc)
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_desc_get_handler_data(desc));
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned long flags;
unsigned long pending;
int hwirq;
u32 emulated_pending;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
- pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
- pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
+ scoped_guard(gpio_generic_lock_irqsave, &hlwd->gpioc) {
+ pending = ioread32be(hlwd->regs + HW_GPIOB_INTFLAG);
+ pending &= ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
- /* Treat interrupts due to edge trigger emulation separately */
- emulated_pending = hlwd->edge_emulation & pending;
- pending &= ~emulated_pending;
- if (emulated_pending) {
- u32 level, rising, falling;
+ /* Treat interrupts due to edge trigger emulation separately */
+ emulated_pending = hlwd->edge_emulation & pending;
+ pending &= ~emulated_pending;
+ if (emulated_pending) {
+ u32 level, rising, falling;
- level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
- rising = level & emulated_pending;
- falling = ~level & emulated_pending;
+ level = ioread32be(hlwd->regs + HW_GPIOB_INTLVL);
+ rising = level & emulated_pending;
+ falling = ~level & emulated_pending;
- /* Invert the levels */
- iowrite32be(level ^ emulated_pending,
- hlwd->regs + HW_GPIOB_INTLVL);
+ /* Invert the levels */
+ iowrite32be(level ^ emulated_pending,
+ hlwd->regs + HW_GPIOB_INTLVL);
- /* Ack all emulated-edge interrupts */
- iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
+ /* Ack all emulated-edge interrupts */
+ iowrite32be(emulated_pending, hlwd->regs + HW_GPIOB_INTFLAG);
- /* Signal interrupts only on the correct edge */
- rising &= hlwd->rising_edge;
- falling &= hlwd->falling_edge;
+ /* Signal interrupts only on the correct edge */
+ rising &= hlwd->rising_edge;
+ falling &= hlwd->falling_edge;
- /* Mark emulated interrupts as pending */
- pending |= rising | falling;
+ /* Mark emulated interrupts as pending */
+ pending |= rising | falling;
+ }
}
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
chained_irq_enter(chip, desc);
for_each_set_bit(hwirq, &pending, 32)
- generic_handle_domain_irq(hlwd->gpioc.irq.domain, hwirq);
+ generic_handle_domain_irq(hlwd->gpioc.gc.irq.domain, hwirq);
chained_irq_exit(chip, desc);
}
@@ -116,30 +116,29 @@ static void hlwd_gpio_irq_mask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 mask;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
- mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
- mask &= ~BIT(data->hwirq);
- iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
- gpiochip_disable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
+ scoped_guard(gpio_generic_lock_irqsave, &hlwd->gpioc) {
+ mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
+ mask &= ~BIT(data->hwirq);
+ iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
+ }
+ gpiochip_disable_irq(&hlwd->gpioc.gc, irqd_to_hwirq(data));
}
static void hlwd_gpio_irq_unmask(struct irq_data *data)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 mask;
- gpiochip_enable_irq(&hlwd->gpioc, irqd_to_hwirq(data));
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
+ gpiochip_enable_irq(&hlwd->gpioc.gc, irqd_to_hwirq(data));
+
+ guard(gpio_generic_lock_irqsave)(&hlwd->gpioc);
+
mask = ioread32be(hlwd->regs + HW_GPIOB_INTMASK);
mask |= BIT(data->hwirq);
iowrite32be(mask, hlwd->regs + HW_GPIOB_INTMASK);
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
}
static void hlwd_gpio_irq_enable(struct irq_data *data)
@@ -173,10 +172,9 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
{
struct hlwd_gpio *hlwd =
gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned long flags;
u32 level;
- raw_spin_lock_irqsave(&hlwd->gpioc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&hlwd->gpioc);
hlwd->edge_emulation &= ~BIT(data->hwirq);
@@ -197,11 +195,9 @@ static int hlwd_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
hlwd_gpio_irq_setup_emulation(hlwd, data->hwirq, flow_type);
break;
default:
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return -EINVAL;
}
- raw_spin_unlock_irqrestore(&hlwd->gpioc.bgpio_lock, flags);
return 0;
}
@@ -225,6 +221,7 @@ static const struct irq_chip hlwd_gpio_irq_chip = {
static int hlwd_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct hlwd_gpio *hlwd;
u32 ngpios;
int res;
@@ -244,25 +241,31 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
* systems where the AHBPROT memory firewall hasn't been configured to
* permit PPC access to HW_GPIO_*.
*
- * Note that this has to happen before bgpio_init reads the
- * HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the wrong
- * values.
+ * Note that this has to happen before gpio_generic_chip_init() reads
+ * the HW_GPIOB_OUT and HW_GPIOB_DIR, because otherwise it reads the
+ * wrong values.
*/
iowrite32be(0xffffffff, hlwd->regs + HW_GPIO_OWNER);
- res = bgpio_init(&hlwd->gpioc, &pdev->dev, 4,
- hlwd->regs + HW_GPIOB_IN, hlwd->regs + HW_GPIOB_OUT,
- NULL, hlwd->regs + HW_GPIOB_DIR, NULL,
- BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = hlwd->regs + HW_GPIOB_IN,
+ .set = hlwd->regs + HW_GPIOB_OUT,
+ .dirout = hlwd->regs + HW_GPIOB_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER,
+ };
+
+ res = gpio_generic_chip_init(&hlwd->gpioc, &config);
if (res < 0) {
- dev_warn(&pdev->dev, "bgpio_init failed: %d\n", res);
+ dev_warn(&pdev->dev, "failed to initialize generic GPIO chip: %d\n", res);
return res;
}
res = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios);
if (res)
ngpios = 32;
- hlwd->gpioc.ngpio = ngpios;
+ hlwd->gpioc.gc.ngpio = ngpios;
/* Mask and ack all interrupts */
iowrite32be(0, hlwd->regs + HW_GPIOB_INTMASK);
@@ -282,7 +285,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
return hlwd->irq;
}
- girq = &hlwd->gpioc.irq;
+ girq = &hlwd->gpioc.gc.irq;
gpio_irq_chip_set_chip(girq, &hlwd_gpio_irq_chip);
girq->parent_handler = hlwd_gpio_irqhandler;
girq->num_parents = 1;
@@ -296,7 +299,7 @@ static int hlwd_gpio_probe(struct platform_device *pdev)
girq->handler = handle_level_irq;
}
- return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc, hlwd);
+ return devm_gpiochip_add_data(&pdev->dev, &hlwd->gpioc.gc, hlwd);
}
static const struct of_device_id hlwd_gpio_match[] = {
diff --git a/drivers/gpio/gpio-htc-egpio.c b/drivers/gpio/gpio-htc-egpio.c
index 2eaed83214d8..72935d6dbebf 100644
--- a/drivers/gpio/gpio-htc-egpio.c
+++ b/drivers/gpio/gpio-htc-egpio.c
@@ -364,21 +364,20 @@ static int __init egpio_probe(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int egpio_suspend(struct platform_device *pdev, pm_message_t state)
+static int egpio_suspend(struct device *dev)
{
- struct egpio_info *ei = platform_get_drvdata(pdev);
+ struct egpio_info *ei = dev_get_drvdata(dev);
- if (ei->chained_irq && device_may_wakeup(&pdev->dev))
+ if (ei->chained_irq && device_may_wakeup(dev))
enable_irq_wake(ei->chained_irq);
return 0;
}
-static int egpio_resume(struct platform_device *pdev)
+static int egpio_resume(struct device *dev)
{
- struct egpio_info *ei = platform_get_drvdata(pdev);
+ struct egpio_info *ei = dev_get_drvdata(dev);
- if (ei->chained_irq && device_may_wakeup(&pdev->dev))
+ if (ei->chained_irq && device_may_wakeup(dev))
disable_irq_wake(ei->chained_irq);
/* Update registers from the cache, in case
@@ -386,19 +385,15 @@ static int egpio_resume(struct platform_device *pdev)
egpio_write_cache(ei);
return 0;
}
-#else
-#define egpio_suspend NULL
-#define egpio_resume NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(egpio_pm_ops, egpio_suspend, egpio_resume);
static struct platform_driver egpio_driver = {
.driver = {
.name = "htc-egpio",
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&egpio_pm_ops),
},
- .suspend = egpio_suspend,
- .resume = egpio_resume,
};
static int __init egpio_init(void)
diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c
index 0103be977c66..4fbae6f6a497 100644
--- a/drivers/gpio/gpio-idio-16.c
+++ b/drivers/gpio/gpio-idio-16.c
@@ -6,6 +6,7 @@
#define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16"
+#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -107,6 +108,7 @@ int devm_idio_16_regmap_register(struct device *const dev,
struct idio_16_data *data;
struct regmap_irq_chip *chip;
struct regmap_irq_chip_data *chip_data;
+ DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO);
if (!config->parent)
return -EINVAL;
@@ -164,6 +166,9 @@ int devm_idio_16_regmap_register(struct device *const dev,
gpio_config.irq_domain = regmap_irq_get_domain(chip_data);
gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate;
+ bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0));
+ gpio_config.fixed_direction_output = fixed_direction_output;
+
return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register);
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 535f25514455..56f1f1e57b69 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -18,7 +19,7 @@
#define IDT_GPIO_ISTAT 0x0C
struct idt_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *pic;
void __iomem *gpio;
u32 mask_cache;
@@ -50,14 +51,13 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
unsigned int sense = flow_type & IRQ_TYPE_SENSE_MASK;
- unsigned long flags;
u32 ilevel;
/* hardware only supports level triggered */
if (sense == IRQ_TYPE_NONE || (sense & IRQ_TYPE_EDGE_BOTH))
return -EINVAL;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ilevel = readl(ctrl->gpio + IDT_GPIO_ILEVEL);
if (sense & IRQ_TYPE_LEVEL_HIGH)
@@ -68,7 +68,6 @@ static int idt_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
writel(ilevel, ctrl->gpio + IDT_GPIO_ILEVEL);
irq_set_handler_locked(d, handle_level_irq);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -84,14 +83,11 @@ static void idt_gpio_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- ctrl->mask_cache |= BIT(d->hwirq);
- writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &ctrl->chip) {
+ ctrl->mask_cache |= BIT(d->hwirq);
+ writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
+ }
gpiochip_disable_irq(gc, irqd_to_hwirq(d));
}
@@ -100,15 +96,13 @@ static void idt_gpio_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct idt_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned long flags;
gpiochip_enable_irq(gc, irqd_to_hwirq(d));
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+
+ guard(gpio_generic_lock_irqsave)(&ctrl->chip);
ctrl->mask_cache &= ~BIT(d->hwirq);
writel(ctrl->mask_cache, ctrl->pic + IDT_PIC_IRQ_MASK);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int idt_gpio_irq_init_hw(struct gpio_chip *gc)
@@ -134,6 +128,7 @@ static const struct irq_chip idt_gpio_irqchip = {
static int idt_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct idt_gpio_ctrl *ctrl;
@@ -150,18 +145,24 @@ static int idt_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ctrl->gpio))
return PTR_ERR(ctrl->gpio);
- ctrl->gc.parent = dev;
+ ctrl->chip.gc.parent = dev;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = ctrl->gpio + IDT_GPIO_DATA,
+ .dirout = ctrl->gpio + IDT_GPIO_DIR,
+ };
- ret = bgpio_init(&ctrl->gc, &pdev->dev, 4, ctrl->gpio + IDT_GPIO_DATA,
- NULL, NULL, ctrl->gpio + IDT_GPIO_DIR, NULL, 0);
+ ret = gpio_generic_chip_init(&ctrl->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
ret = device_property_read_u32(dev, "ngpios", &ngpios);
if (!ret)
- ctrl->gc.ngpio = ngpios;
+ ctrl->chip.gc.ngpio = ngpios;
if (device_property_read_bool(dev, "interrupt-controller")) {
ctrl->pic = devm_platform_ioremap_resource_byname(pdev, "pic");
@@ -172,7 +173,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
if (parent_irq < 0)
return parent_irq;
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &idt_gpio_irqchip);
girq->init_hw = idt_gpio_irq_init_hw;
girq->parent_handler = idt_gpio_dispatch;
@@ -188,7 +189,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
}
- return devm_gpiochip_add_data(&pdev->dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(&pdev->dev, &ctrl->chip.gc, ctrl);
}
static const struct of_device_id idt_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 28a8a6a8f05f..f34d87869c8b 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
@@ -53,14 +54,14 @@
/**
* struct ixp4xx_gpio - IXP4 GPIO state container
+ * @chip: generic GPIO chip for this instance
* @dev: containing device for this instance
- * @gc: gpiochip for this instance
* @base: remapped I/O-memory base
* @irq_edge: Each bit represents an IRQ: 1: edge-triggered,
* 0: level triggered
*/
struct ixp4xx_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct device *dev;
void __iomem *base;
unsigned long long irq_edge;
@@ -100,7 +101,6 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct ixp4xx_gpio *g = gpiochip_get_data(gc);
int line = d->hwirq;
- unsigned long flags;
u32 int_style;
u32 int_reg;
u32 val;
@@ -144,26 +144,24 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
int_reg = IXP4XX_REG_GPIT1;
}
- raw_spin_lock_irqsave(&g->gc.bgpio_lock, flags);
-
- /* Clear the style for the appropriate pin */
- val = __raw_readl(g->base + int_reg);
- val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
- __raw_writel(val, g->base + int_reg);
-
- __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
+ scoped_guard(gpio_generic_lock_irqsave, &g->chip) {
+ /* Clear the style for the appropriate pin */
+ val = __raw_readl(g->base + int_reg);
+ val &= ~(IXP4XX_GPIO_STYLE_MASK << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
- /* Set the new style */
- val = __raw_readl(g->base + int_reg);
- val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
- __raw_writel(val, g->base + int_reg);
+ __raw_writel(BIT(line), g->base + IXP4XX_REG_GPIS);
- /* Force-configure this line as an input */
- val = __raw_readl(g->base + IXP4XX_REG_GPOE);
- val |= BIT(d->hwirq);
- __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+ /* Set the new style */
+ val = __raw_readl(g->base + int_reg);
+ val |= (int_style << (line * IXP4XX_GPIO_STYLE_SIZE));
+ __raw_writel(val, g->base + int_reg);
- raw_spin_unlock_irqrestore(&g->gc.bgpio_lock, flags);
+ /* Force-configure this line as an input */
+ val = __raw_readl(g->base + IXP4XX_REG_GPOE);
+ val |= BIT(d->hwirq);
+ __raw_writel(val, g->base + IXP4XX_REG_GPOE);
+ }
/* This parent only accept level high (asserted) */
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
@@ -206,6 +204,7 @@ static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
static int ixp4xx_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
unsigned long flags;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -290,35 +289,38 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
* for big endian.
*/
#if defined(CONFIG_CPU_BIG_ENDIAN)
- flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
#else
flags = 0;
#endif
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = g->base + IXP4XX_REG_GPIN,
+ .set = g->base + IXP4XX_REG_GPOUT,
+ .dirin = g->base + IXP4XX_REG_GPOE,
+ .flags = flags,
+ };
+
/* Populate and register gpio chip */
- ret = bgpio_init(&g->gc, dev, 4,
- g->base + IXP4XX_REG_GPIN,
- g->base + IXP4XX_REG_GPOUT,
- NULL,
- NULL,
- g->base + IXP4XX_REG_GPOE,
- flags);
+ ret = gpio_generic_chip_init(&g->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- g->gc.ngpio = 16;
- g->gc.label = "IXP4XX_GPIO_CHIP";
+ g->chip.gc.ngpio = 16;
+ g->chip.gc.label = "IXP4XX_GPIO_CHIP";
/*
* TODO: when we have migrated to device tree and all GPIOs
* are fetched using phandles, set this to -1 to get rid of
* the fixed gpiochip base.
*/
- g->gc.base = 0;
- g->gc.parent = &pdev->dev;
- g->gc.owner = THIS_MODULE;
+ g->chip.gc.base = 0;
+ g->chip.gc.parent = &pdev->dev;
+ g->chip.gc.owner = THIS_MODULE;
- girq = &g->gc.irq;
+ girq = &g->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -326,7 +328,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- ret = devm_gpiochip_add_data(dev, &g->gc, g);
+ ret = devm_gpiochip_add_data(dev, &g->chip.gc, g);
if (ret) {
dev_err(dev, "failed to add SoC gpiochip\n");
return ret;
diff --git a/drivers/gpio/gpio-latch.c b/drivers/gpio/gpio-latch.c
index c64aaa896766..452a9ce61488 100644
--- a/drivers/gpio/gpio-latch.c
+++ b/drivers/gpio/gpio-latch.c
@@ -48,8 +48,6 @@
#include <linux/property.h>
#include <linux/delay.h>
-#include "gpiolib.h"
-
struct gpio_latch_priv {
struct gpio_chip gc;
struct gpio_descs *clk_gpios;
diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c
index 3b4f8830c741..f32d1d237795 100644
--- a/drivers/gpio/gpio-ljca.c
+++ b/drivers/gpio/gpio-ljca.c
@@ -286,22 +286,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data,
{
const struct ljca_gpio_packet *packet = evt_data;
struct ljca_gpio_dev *ljca_gpio = context;
- int i, irq;
+ int i;
if (cmd != LJCA_GPIO_INT_EVENT)
return;
for (i = 0; i < packet->num; i++) {
- irq = irq_find_mapping(ljca_gpio->gc.irq.domain,
- packet->item[i].index);
- if (!irq) {
- dev_err(ljca_gpio->gc.parent,
- "gpio_id %u does not mapped to IRQ yet\n",
- packet->item[i].index);
- return;
- }
-
- generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq);
+ generic_handle_domain_irq(ljca_gpio->gc.irq.domain,
+ packet->item[i].index);
set_bit(packet->item[i].index, ljca_gpio->reenable_irqs);
}
diff --git a/drivers/gpio/gpio-loongson-64bit.c b/drivers/gpio/gpio-loongson-64bit.c
index 818c606fbc51..77d07e31366f 100644
--- a/drivers/gpio/gpio-loongson-64bit.c
+++ b/drivers/gpio/gpio-loongson-64bit.c
@@ -7,12 +7,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
#include <linux/bitops.h>
+#include <linux/reset.h>
#include <asm/types.h>
enum loongson_gpio_mode {
@@ -27,10 +31,18 @@ struct loongson_gpio_chip_data {
unsigned int out_offset;
unsigned int in_offset;
unsigned int inten_offset;
+ unsigned int intpol_offset;
+ unsigned int intedge_offset;
+ unsigned int intclr_offset;
+ unsigned int intsts_offset;
+ unsigned int intdual_offset;
+ unsigned int intr_num;
+ irq_flow_handler_t irq_handler;
+ const struct irq_chip *girqchip;
};
struct loongson_gpio_chip {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
spinlock_t lock;
void __iomem *reg_base;
const struct loongson_gpio_chip_data *chip_data;
@@ -38,7 +50,8 @@ struct loongson_gpio_chip {
static inline struct loongson_gpio_chip *to_loongson_gpio_chip(struct gpio_chip *chip)
{
- return container_of(chip, struct loongson_gpio_chip, chip);
+ return container_of(to_gpio_generic_chip(chip),
+ struct loongson_gpio_chip, chip);
}
static inline void loongson_commit_direction(struct loongson_gpio_chip *lgpio, unsigned int pin,
@@ -135,39 +148,185 @@ static int loongson_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
return platform_get_irq(pdev, offset);
}
-static int loongson_gpio_init(struct device *dev, struct loongson_gpio_chip *lgpio,
+static void loongson_gpio_irq_ack(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x1, lgpio->reg_base + lgpio->chip_data->intclr_offset + hwirq);
+}
+
+static void loongson_gpio_irq_mask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x0, lgpio->reg_base + lgpio->chip_data->inten_offset + hwirq);
+}
+
+static void loongson_gpio_irq_unmask(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+
+ writeb(0x1, lgpio->reg_base + lgpio->chip_data->inten_offset + hwirq);
+}
+
+static int loongson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct loongson_gpio_chip *lgpio = to_loongson_gpio_chip(chip);
+ irq_hw_number_t hwirq = irqd_to_hwirq(data);
+ u8 pol = 0, edge = 0, dual = 0;
+
+ if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
+ edge = 1;
+ dual = 1;
+ irq_set_handler_locked(data, handle_edge_irq);
+ } else {
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ pol = 1;
+ fallthrough;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler_locked(data, handle_level_irq);
+ break;
+
+ case IRQ_TYPE_EDGE_RISING:
+ pol = 1;
+ fallthrough;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = 1;
+ irq_set_handler_locked(data, handle_edge_irq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ writeb(pol, lgpio->reg_base + lgpio->chip_data->intpol_offset + hwirq);
+ writeb(edge, lgpio->reg_base + lgpio->chip_data->intedge_offset + hwirq);
+ writeb(dual, lgpio->reg_base + lgpio->chip_data->intdual_offset + hwirq);
+
+ return 0;
+}
+
+static void loongson_gpio_ls2k0300_irq_handler(struct irq_desc *desc)
+{
+ struct loongson_gpio_chip *lgpio = irq_desc_get_handler_data(desc);
+ struct irq_chip *girqchip = irq_desc_get_chip(desc);
+ int i;
+
+ chained_irq_enter(girqchip, desc);
+
+ for (i = 0; i < lgpio->chip.gc.ngpio; i++) {
+ /*
+ * For the GPIO controller of LS2K0300, interrupts status bits
+ * may be wrongly set even if the corresponding interrupt is
+ * disabled. Thus interrupt enable bits are checked along with
+ * status bits to detect interrupts reliably.
+ */
+ if (readb(lgpio->reg_base + lgpio->chip_data->intsts_offset + i) &&
+ readb(lgpio->reg_base + lgpio->chip_data->inten_offset + i))
+ generic_handle_domain_irq(lgpio->chip.gc.irq.domain, i);
+ }
+
+ chained_irq_exit(girqchip, desc);
+}
+
+static const struct irq_chip loongson_gpio_ls2k0300_irqchip = {
+ .irq_ack = loongson_gpio_irq_ack,
+ .irq_mask = loongson_gpio_irq_mask,
+ .irq_unmask = loongson_gpio_irq_unmask,
+ .irq_set_type = loongson_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SKIP_SET_WAKE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int loongson_gpio_init_irqchip(struct platform_device *pdev,
+ struct loongson_gpio_chip *lgpio)
+{
+ const struct loongson_gpio_chip_data *data = lgpio->chip_data;
+ struct gpio_chip *chip = &lgpio->chip.gc;
+ int i;
+
+ chip->irq.default_type = IRQ_TYPE_NONE;
+ chip->irq.handler = handle_bad_irq;
+ chip->irq.parent_handler = data->irq_handler;
+ chip->irq.parent_handler_data = lgpio;
+ gpio_irq_chip_set_chip(&chip->irq, data->girqchip);
+
+ chip->irq.num_parents = data->intr_num;
+ chip->irq.parents = devm_kcalloc(&pdev->dev, data->intr_num,
+ sizeof(*chip->irq.parents), GFP_KERNEL);
+ if (!chip->parent)
+ return -ENOMEM;
+
+ for (i = 0; i < data->intr_num; i++) {
+ int ret;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to get IRQ %d\n", i);
+ chip->irq.parents[i] = ret;
+ }
+
+ for (i = 0; i < data->intr_num; i++) {
+ writeb(0x0, lgpio->reg_base + data->inten_offset + i);
+ writeb(0x1, lgpio->reg_base + data->intclr_offset + i);
+ }
+
+ return 0;
+}
+
+static int loongson_gpio_init(struct platform_device *pdev, struct loongson_gpio_chip *lgpio,
void __iomem *reg_base)
{
+ struct gpio_generic_chip_config config;
int ret;
lgpio->reg_base = reg_base;
if (lgpio->chip_data->mode == BIT_CTRL_MODE) {
- ret = bgpio_init(&lgpio->chip, dev, 8,
- lgpio->reg_base + lgpio->chip_data->in_offset,
- lgpio->reg_base + lgpio->chip_data->out_offset,
- NULL, NULL,
- lgpio->reg_base + lgpio->chip_data->conf_offset,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 8,
+ .dat = lgpio->reg_base + lgpio->chip_data->in_offset,
+ .set = lgpio->reg_base + lgpio->chip_data->out_offset,
+ .dirin = lgpio->reg_base + lgpio->chip_data->conf_offset,
+ };
+
+ ret = gpio_generic_chip_init(&lgpio->chip, &config);
if (ret) {
- dev_err(dev, "unable to init generic GPIO\n");
+ dev_err(&pdev->dev, "unable to init generic GPIO\n");
return ret;
}
} else {
- lgpio->chip.direction_input = loongson_gpio_direction_input;
- lgpio->chip.get = loongson_gpio_get;
- lgpio->chip.get_direction = loongson_gpio_get_direction;
- lgpio->chip.direction_output = loongson_gpio_direction_output;
- lgpio->chip.set = loongson_gpio_set;
- lgpio->chip.parent = dev;
+ lgpio->chip.gc.direction_input = loongson_gpio_direction_input;
+ lgpio->chip.gc.get = loongson_gpio_get;
+ lgpio->chip.gc.get_direction = loongson_gpio_get_direction;
+ lgpio->chip.gc.direction_output = loongson_gpio_direction_output;
+ lgpio->chip.gc.set = loongson_gpio_set;
+ lgpio->chip.gc.parent = &pdev->dev;
+ lgpio->chip.gc.base = -1;
spin_lock_init(&lgpio->lock);
}
- lgpio->chip.label = lgpio->chip_data->label;
- lgpio->chip.can_sleep = false;
- if (lgpio->chip_data->inten_offset)
- lgpio->chip.to_irq = loongson_gpio_to_irq;
+ lgpio->chip.gc.label = lgpio->chip_data->label;
+ lgpio->chip.gc.can_sleep = false;
+ if (lgpio->chip_data->girqchip) {
+ ret = loongson_gpio_init_irqchip(pdev, lgpio);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to initialize irqchip\n");
+ } else if (lgpio->chip_data->inten_offset) {
+ lgpio->chip.gc.to_irq = loongson_gpio_to_irq;
+ }
- return devm_gpiochip_add_data(dev, &lgpio->chip, lgpio);
+ return devm_gpiochip_add_data(&pdev->dev, &lgpio->chip.gc, lgpio);
}
static int loongson_gpio_probe(struct platform_device *pdev)
@@ -175,6 +334,7 @@ static int loongson_gpio_probe(struct platform_device *pdev)
void __iomem *reg_base;
struct loongson_gpio_chip *lgpio;
struct device *dev = &pdev->dev;
+ struct reset_control *rst;
lgpio = devm_kzalloc(dev, sizeof(*lgpio), GFP_KERNEL);
if (!lgpio)
@@ -186,7 +346,11 @@ static int loongson_gpio_probe(struct platform_device *pdev)
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
- return loongson_gpio_init(dev, lgpio, reg_base);
+ rst = devm_reset_control_get_optional_exclusive_deasserted(&pdev->dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(rst), "failed to get reset control\n");
+
+ return loongson_gpio_init(pdev, lgpio, reg_base);
}
static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
@@ -198,6 +362,23 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k_data = {
.inten_offset = 0x30,
};
+static const struct loongson_gpio_chip_data loongson_gpio_ls2k0300_data = {
+ .label = "ls2k0300_gpio",
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+ .inten_offset = 0xb00,
+ .intpol_offset = 0xc00,
+ .intedge_offset = 0xd00,
+ .intclr_offset = 0xe00,
+ .intsts_offset = 0xf00,
+ .intdual_offset = 0xf80,
+ .intr_num = 7,
+ .irq_handler = loongson_gpio_ls2k0300_irq_handler,
+ .girqchip = &loongson_gpio_ls2k0300_irqchip,
+};
+
static const struct loongson_gpio_chip_data loongson_gpio_ls2k0500_data0 = {
.label = "ls2k0500_gpio",
.mode = BIT_CTRL_MODE,
@@ -227,11 +408,11 @@ static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data0 = {
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data1 = {
.label = "ls2k2000_gpio",
- .mode = BIT_CTRL_MODE,
- .conf_offset = 0x0,
- .in_offset = 0x20,
- .out_offset = 0x10,
- .inten_offset = 0x30,
+ .mode = BYTE_CTRL_MODE,
+ .conf_offset = 0x800,
+ .in_offset = 0xa00,
+ .out_offset = 0x900,
+ .inten_offset = 0xb00,
};
static const struct loongson_gpio_chip_data loongson_gpio_ls2k2000_data2 = {
@@ -295,6 +476,10 @@ static const struct of_device_id loongson_gpio_of_match[] = {
.data = &loongson_gpio_ls2k_data,
},
{
+ .compatible = "loongson,ls2k0300-gpio",
+ .data = &loongson_gpio_ls2k0300_data,
+ },
+ {
.compatible = "loongson,ls2k0500-gpio0",
.data = &loongson_gpio_ls2k0500_data0,
},
diff --git a/drivers/gpio/gpio-loongson1.c b/drivers/gpio/gpio-loongson1.c
index 6ca3b969db4d..9750a7a17508 100644
--- a/drivers/gpio/gpio-loongson1.c
+++ b/drivers/gpio/gpio-loongson1.c
@@ -5,10 +5,11 @@
* Copyright (C) 2015-2023 Keguang Zhang <keguang.zhang@gmail.com>
*/
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/platform_device.h>
-#include <linux/bitops.h>
/* Loongson 1 GPIO Register Definitions */
#define GPIO_CFG 0x0
@@ -17,19 +18,18 @@
#define GPIO_OUTPUT 0x30
struct ls1x_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
};
static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
{
struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ls1x_gc->chip);
+
__raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) | BIT(offset),
ls1x_gc->reg_base + GPIO_CFG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
return 0;
}
@@ -37,16 +37,16 @@ static int ls1x_gpio_request(struct gpio_chip *gc, unsigned int offset)
static void ls1x_gpio_free(struct gpio_chip *gc, unsigned int offset)
{
struct ls1x_gpio_chip *ls1x_gc = gpiochip_get_data(gc);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&ls1x_gc->chip);
+
__raw_writel(__raw_readl(ls1x_gc->reg_base + GPIO_CFG) & ~BIT(offset),
ls1x_gc->reg_base + GPIO_CFG);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
static int ls1x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct ls1x_gpio_chip *ls1x_gc;
int ret;
@@ -59,29 +59,35 @@ static int ls1x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(ls1x_gc->reg_base))
return PTR_ERR(ls1x_gc->reg_base);
- ret = bgpio_init(&ls1x_gc->gc, dev, 4, ls1x_gc->reg_base + GPIO_DATA,
- ls1x_gc->reg_base + GPIO_OUTPUT, NULL,
- NULL, ls1x_gc->reg_base + GPIO_DIR, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ls1x_gc->reg_base + GPIO_DATA,
+ .set = ls1x_gc->reg_base + GPIO_OUTPUT,
+ .dirin = ls1x_gc->reg_base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&ls1x_gc->chip, &config);
if (ret)
goto err;
- ls1x_gc->gc.owner = THIS_MODULE;
- ls1x_gc->gc.request = ls1x_gpio_request;
- ls1x_gc->gc.free = ls1x_gpio_free;
+ ls1x_gc->chip.gc.owner = THIS_MODULE;
+ ls1x_gc->chip.gc.request = ls1x_gpio_request;
+ ls1x_gc->chip.gc.free = ls1x_gpio_free;
/*
* Clear ngpio to let gpiolib get the correct number
* by reading ngpios property
*/
- ls1x_gc->gc.ngpio = 0;
+ ls1x_gc->chip.gc.ngpio = 0;
- ret = devm_gpiochip_add_data(dev, &ls1x_gc->gc, ls1x_gc);
+ ret = devm_gpiochip_add_data(dev, &ls1x_gc->chip.gc, ls1x_gc);
if (ret)
goto err;
platform_set_drvdata(pdev, ls1x_gc);
dev_info(dev, "GPIO controller registered with %d pins\n",
- ls1x_gc->gc.ngpio);
+ ls1x_gc->chip.gc.ngpio);
return 0;
err:
diff --git a/drivers/gpio/gpio-max7360.c b/drivers/gpio/gpio-max7360.c
new file mode 100644
index 000000000000..db92a43776a9
--- /dev/null
+++ b/drivers/gpio/gpio-max7360.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Kamel BOUHARA <kamel.bouhara@bootlin.com>
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max7360.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define MAX7360_GPIO_PORT 1
+#define MAX7360_GPIO_COL 2
+
+struct max7360_gpio_plat_data {
+ unsigned int function;
+};
+
+static struct max7360_gpio_plat_data max7360_gpio_port_plat = { .function = MAX7360_GPIO_PORT };
+static struct max7360_gpio_plat_data max7360_gpio_col_plat = { .function = MAX7360_GPIO_COL };
+
+static int max7360_get_available_gpos(struct device *dev, unsigned int *available_gpios)
+{
+ u32 columns;
+ int ret;
+
+ ret = device_property_read_u32(dev->parent, "keypad,num-columns", &columns);
+ if (ret) {
+ dev_err(dev, "Failed to read columns count\n");
+ return ret;
+ }
+
+ *available_gpios = min(MAX7360_MAX_GPO, MAX7360_MAX_KEY_COLS - columns);
+
+ return 0;
+}
+
+static int max7360_gpo_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ unsigned int available_gpios;
+ int ret;
+
+ ret = max7360_get_available_gpos(gc->parent, &available_gpios);
+ if (ret)
+ return ret;
+
+ bitmap_clear(valid_mask, 0, MAX7360_MAX_KEY_COLS - available_gpios);
+
+ return 0;
+}
+
+static int max7360_set_gpos_count(struct device *dev, struct regmap *regmap)
+{
+ /*
+ * MAX7360 COL0 to COL7 pins can be used either as keypad columns,
+ * general purpose output or a mix of both.
+ * By default, all pins are used as keypad, here we update this
+ * configuration to allow to use some of them as GPIOs.
+ */
+ unsigned int available_gpios;
+ unsigned int val;
+ int ret;
+
+ ret = max7360_get_available_gpos(dev, &available_gpios);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure which GPIOs will be used for keypad.
+ * MAX7360_REG_DEBOUNCE contains configuration both for keypad debounce
+ * timings and gpos/keypad columns repartition. Only the later is
+ * modified here.
+ */
+ val = FIELD_PREP(MAX7360_PORTS, available_gpios);
+ ret = regmap_write_bits(regmap, MAX7360_REG_DEBOUNCE, MAX7360_PORTS, val);
+ if (ret)
+ dev_err(dev, "Failed to write max7360 columns/gpos configuration");
+
+ return ret;
+}
+
+static int max7360_gpio_reg_mask_xlate(struct gpio_regmap *gpio,
+ unsigned int base, unsigned int offset,
+ unsigned int *reg, unsigned int *mask)
+{
+ if (base == MAX7360_REG_PWMBASE) {
+ /*
+ * GPIO output is using PWM duty cycle registers: one register
+ * per line, with value being either 0 or 255.
+ */
+ *reg = base + offset;
+ *mask = GENMASK(7, 0);
+ } else {
+ *reg = base;
+ *mask = BIT(offset);
+ }
+
+ return 0;
+}
+
+static const struct regmap_irq max7360_regmap_irqs[MAX7360_MAX_GPIO] = {
+ REGMAP_IRQ_REG(0, 0, BIT(0)),
+ REGMAP_IRQ_REG(1, 0, BIT(1)),
+ REGMAP_IRQ_REG(2, 0, BIT(2)),
+ REGMAP_IRQ_REG(3, 0, BIT(3)),
+ REGMAP_IRQ_REG(4, 0, BIT(4)),
+ REGMAP_IRQ_REG(5, 0, BIT(5)),
+ REGMAP_IRQ_REG(6, 0, BIT(6)),
+ REGMAP_IRQ_REG(7, 0, BIT(7)),
+};
+
+static int max7360_handle_mask_sync(const int index,
+ const unsigned int mask_buf_def,
+ const unsigned int mask_buf,
+ void *const irq_drv_data)
+{
+ struct regmap *regmap = irq_drv_data;
+ int ret;
+
+ for (unsigned int i = 0; i < MAX7360_MAX_GPIO; i++) {
+ ret = regmap_assign_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_MASK, mask_buf & BIT(i));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int max7360_gpio_probe(struct platform_device *pdev)
+{
+ const struct max7360_gpio_plat_data *plat_data;
+ struct gpio_regmap_config gpio_config = { };
+ struct regmap_irq_chip *irq_chip;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ unsigned int outconf;
+ int ret;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "could not get parent regmap\n");
+
+ plat_data = device_get_match_data(dev);
+ if (plat_data->function == MAX7360_GPIO_PORT) {
+ if (device_property_read_bool(dev, "interrupt-controller")) {
+ /*
+ * Port GPIOs with interrupt-controller property: add IRQ
+ * controller.
+ */
+ gpio_config.regmap_irq_flags = IRQF_ONESHOT | IRQF_SHARED;
+ gpio_config.regmap_irq_line =
+ fwnode_irq_get_byname(dev_fwnode(dev->parent), "inti");
+ if (gpio_config.regmap_irq_line < 0)
+ return dev_err_probe(dev, gpio_config.regmap_irq_line,
+ "Failed to get IRQ\n");
+
+ /* Create custom IRQ configuration. */
+ irq_chip = devm_kzalloc(dev, sizeof(*irq_chip), GFP_KERNEL);
+ gpio_config.regmap_irq_chip = irq_chip;
+ if (!irq_chip)
+ return -ENOMEM;
+
+ irq_chip->name = dev_name(dev);
+ irq_chip->status_base = MAX7360_REG_GPIOIN;
+ irq_chip->status_is_level = true;
+ irq_chip->num_regs = 1;
+ irq_chip->num_irqs = MAX7360_MAX_GPIO;
+ irq_chip->irqs = max7360_regmap_irqs;
+ irq_chip->handle_mask_sync = max7360_handle_mask_sync;
+ irq_chip->irq_drv_data = regmap;
+
+ for (unsigned int i = 0; i < MAX7360_MAX_GPIO; i++) {
+ ret = regmap_write_bits(regmap, MAX7360_REG_PWMCFG(i),
+ MAX7360_PORT_CFG_INTERRUPT_EDGES,
+ MAX7360_PORT_CFG_INTERRUPT_EDGES);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable interrupts\n");
+ }
+ }
+
+ /*
+ * Port GPIOs: set output mode configuration (constant-current or not).
+ * This property is optional.
+ */
+ ret = device_property_read_u32(dev, "maxim,constant-current-disable", &outconf);
+ if (!ret) {
+ ret = regmap_write(regmap, MAX7360_REG_GPIOOUTM, outconf);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set constant-current configuration\n");
+ }
+ }
+
+ /* Add gpio device. */
+ gpio_config.parent = dev;
+ gpio_config.regmap = regmap;
+ if (plat_data->function == MAX7360_GPIO_PORT) {
+ gpio_config.ngpio = MAX7360_MAX_GPIO;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(MAX7360_REG_GPIOIN);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(MAX7360_REG_PWMBASE);
+ gpio_config.reg_dir_out_base = GPIO_REGMAP_ADDR(MAX7360_REG_GPIOCTRL);
+ gpio_config.ngpio_per_reg = MAX7360_MAX_GPIO;
+ gpio_config.reg_mask_xlate = max7360_gpio_reg_mask_xlate;
+ } else {
+ ret = max7360_set_gpos_count(dev, regmap);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set GPOS pin count\n");
+
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(MAX7360_REG_PORTS);
+ gpio_config.ngpio = MAX7360_MAX_KEY_COLS;
+ gpio_config.init_valid_mask = max7360_gpo_init_valid_mask;
+ }
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
+}
+
+static const struct of_device_id max7360_gpio_of_match[] = {
+ {
+ .compatible = "maxim,max7360-gpo",
+ .data = &max7360_gpio_col_plat
+ }, {
+ .compatible = "maxim,max7360-gpio",
+ .data = &max7360_gpio_port_plat
+ }, {
+ }
+};
+MODULE_DEVICE_TABLE(of, max7360_gpio_of_match);
+
+static struct platform_driver max7360_gpio_driver = {
+ .driver = {
+ .name = "max7360-gpio",
+ .of_match_table = max7360_gpio_of_match,
+ },
+ .probe = max7360_gpio_probe,
+};
+module_platform_driver(max7360_gpio_driver);
+
+MODULE_DESCRIPTION("MAX7360 GPIO driver");
+MODULE_AUTHOR("Kamel BOUHARA <kamel.bouhara@bootlin.com>");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index ebe5da4933bc..52b13c6ae496 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -12,6 +12,7 @@
#include <linux/mcb.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#define MEN_Z127_CTRL 0x00
#define MEN_Z127_PSR 0x04
@@ -23,6 +24,11 @@
#define MEN_Z127_ODER 0x1C
#define GPIO_TO_DBCNT_REG(gpio) ((gpio * 4) + 0x80)
+/* MEN Z127 supported model ids*/
+#define MEN_Z127_ID 0x7f
+#define MEN_Z034_ID 0x22
+#define MEN_Z037_ID 0x25
+
#define MEN_Z127_DB_MIN_US 50
/* 16 bit compare register. Each bit represents 50us */
#define MEN_Z127_DB_MAX_US (0xffff * MEN_Z127_DB_MIN_US)
@@ -30,7 +36,7 @@
(db <= MEN_Z127_DB_MAX_US))
struct men_z127_gpio {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *reg_base;
struct resource *mem;
};
@@ -64,7 +70,7 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
debounce /= 50;
}
- raw_spin_lock(&gc->bgpio_lock);
+ guard(gpio_generic_lock)(&priv->chip);
db_en = readl(priv->reg_base + MEN_Z127_DBER);
@@ -79,8 +85,6 @@ static int men_z127_debounce(struct gpio_chip *gc, unsigned gpio,
writel(db_en, priv->reg_base + MEN_Z127_DBER);
writel(db_cnt, priv->reg_base + GPIO_TO_DBCNT_REG(gpio));
- raw_spin_unlock(&gc->bgpio_lock);
-
return 0;
}
@@ -91,7 +95,8 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
struct men_z127_gpio *priv = gpiochip_get_data(gc);
u32 od_en;
- raw_spin_lock(&gc->bgpio_lock);
+ guard(gpio_generic_lock)(&priv->chip);
+
od_en = readl(priv->reg_base + MEN_Z127_ODER);
if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN)
@@ -101,7 +106,6 @@ static int men_z127_set_single_ended(struct gpio_chip *gc,
od_en &= ~BIT(offset);
writel(od_en, priv->reg_base + MEN_Z127_ODER);
- raw_spin_unlock(&gc->bgpio_lock);
return 0;
}
@@ -137,9 +141,11 @@ static void men_z127_release_mem(void *data)
static int men_z127_probe(struct mcb_device *mdev,
const struct mcb_device_id *id)
{
+ struct gpio_generic_chip_config config;
struct men_z127_gpio *men_z127_gpio;
struct device *dev = &mdev->dev;
int ret;
+ unsigned long sz;
men_z127_gpio = devm_kzalloc(dev, sizeof(struct men_z127_gpio),
GFP_KERNEL);
@@ -163,18 +169,33 @@ static int men_z127_probe(struct mcb_device *mdev,
mcb_set_drvdata(mdev, men_z127_gpio);
- ret = bgpio_init(&men_z127_gpio->gc, &mdev->dev, 4,
- men_z127_gpio->reg_base + MEN_Z127_PSR,
- men_z127_gpio->reg_base + MEN_Z127_CTRL,
- NULL,
- men_z127_gpio->reg_base + MEN_Z127_GPIODR,
- NULL, 0);
+ switch (mdev->id) {
+ case MEN_Z127_ID:
+ sz = 4;
+ break;
+ case MEN_Z034_ID:
+ case MEN_Z037_ID:
+ sz = 1;
+ break;
+ default:
+ return dev_err_probe(&mdev->dev, -EINVAL, "no size found for id %d", mdev->id);
+ }
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = &mdev->dev,
+ .sz = sz,
+ .dat = men_z127_gpio->reg_base + MEN_Z127_PSR,
+ .set = men_z127_gpio->reg_base + MEN_Z127_CTRL,
+ .dirout = men_z127_gpio->reg_base + MEN_Z127_GPIODR,
+ };
+
+ ret = gpio_generic_chip_init(&men_z127_gpio->chip, &config);
if (ret)
return ret;
- men_z127_gpio->gc.set_config = men_z127_set_config;
+ men_z127_gpio->chip.gc.set_config = men_z127_set_config;
- ret = devm_gpiochip_add_data(dev, &men_z127_gpio->gc, men_z127_gpio);
+ ret = devm_gpiochip_add_data(dev, &men_z127_gpio->chip.gc, men_z127_gpio);
if (ret)
return dev_err_probe(dev, ret,
"failed to register MEN 16Z127 GPIO controller");
@@ -183,7 +204,9 @@ static int men_z127_probe(struct mcb_device *mdev,
}
static const struct mcb_device_id men_z127_ids[] = {
- { .device = 0x7f },
+ { .device = MEN_Z127_ID },
+ { .device = MEN_Z034_ID },
+ { .device = MEN_Z037_ID },
{ }
};
MODULE_DEVICE_TABLE(mcb, men_z127_ids);
@@ -198,7 +221,7 @@ static struct mcb_driver men_z127_driver = {
module_mcb_driver(men_z127_driver);
MODULE_AUTHOR("Andreas Werner <andreas.werner@men.de>");
-MODULE_DESCRIPTION("MEN 16z127 GPIO Controller");
+MODULE_DESCRIPTION("MEN GPIO Controller");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("mcb:16z127");
MODULE_IMPORT_NS("MCB");
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index f6af81bf2b13..6576e5dcb0ee 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -160,7 +160,7 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
/*
* Save register configuration and disable interrupts.
*/
-static void __maybe_unused ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
+static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
{
int i;
@@ -186,7 +186,7 @@ static void __maybe_unused ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
/*
* This function restores the register configuration of the GPIO device.
*/
-static void __maybe_unused ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
+static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
{
int i;
@@ -479,7 +479,7 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
return 0;
}
-static int __maybe_unused ioh_gpio_suspend(struct device *dev)
+static int ioh_gpio_suspend(struct device *dev)
{
struct ioh_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
@@ -491,7 +491,7 @@ static int __maybe_unused ioh_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused ioh_gpio_resume(struct device *dev)
+static int ioh_gpio_resume(struct device *dev)
{
struct ioh_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
@@ -505,7 +505,7 @@ static int __maybe_unused ioh_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(ioh_gpio_pm_ops, ioh_gpio_suspend, ioh_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ioh_gpio_pm_ops, ioh_gpio_suspend, ioh_gpio_resume);
static const struct pci_device_id ioh_gpio_pcidev_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
@@ -518,7 +518,7 @@ static struct pci_driver ioh_gpio_driver = {
.id_table = ioh_gpio_pcidev_id,
.probe = ioh_gpio_probe,
.driver = {
- .pm = &ioh_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&ioh_gpio_pm_ops),
},
};
diff --git a/drivers/gpio/gpio-mlxbf.c b/drivers/gpio/gpio-mlxbf.c
index 1fa9973f55b9..a18fedbc463e 100644
--- a/drivers/gpio/gpio-mlxbf.c
+++ b/drivers/gpio/gpio-mlxbf.c
@@ -4,6 +4,7 @@
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -37,7 +38,7 @@ struct mlxbf_gpio_context_save_regs {
/* Device state structure. */
struct mlxbf_gpio_state {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* Memory Address */
void __iomem *base;
@@ -49,6 +50,7 @@ struct mlxbf_gpio_state {
static int mlxbf_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct mlxbf_gpio_state *gs;
struct device *dev = &pdev->dev;
struct gpio_chip *gc;
@@ -62,21 +64,24 @@ static int mlxbf_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gs->base))
return PTR_ERR(gs->base);
- gc = &gs->gc;
- ret = bgpio_init(gc, dev, 8,
- gs->base + MLXBF_GPIO_PIN_STATE,
- NULL,
- NULL,
- gs->base + MLXBF_GPIO_PIN_DIR_O,
- gs->base + MLXBF_GPIO_PIN_DIR_I,
- 0);
+ gc = &gs->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 8,
+ .dat = gs->base + MLXBF_GPIO_PIN_STATE,
+ .dirout = gs->base + MLXBF_GPIO_PIN_DIR_O,
+ .dirin = gs->base + MLXBF_GPIO_PIN_DIR_I,
+ };
+
+ ret = gpio_generic_chip_init(&gs->chip, &config);
if (ret)
return -ENODEV;
gc->owner = THIS_MODULE;
gc->ngpio = MLXBF_GPIO_NR;
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ ret = devm_gpiochip_add_data(dev, &gs->chip.gc, gs);
if (ret) {
dev_err(&pdev->dev, "Failed adding memory mapped gpiochip\n");
return ret;
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 390f2e74a9d8..6668686a28ff 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -6,8 +6,10 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
@@ -65,7 +67,7 @@ struct mlxbf2_gpio_context_save_regs {
/* BlueField-2 gpio block context structure. */
struct mlxbf2_gpio_context {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* YU GPIO blocks address */
void __iomem *gpio_io;
@@ -132,7 +134,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
u32 arm_gpio_lock_val;
mutex_lock(yu_arm_gpio_lock_param.lock);
- raw_spin_lock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_lock(&gs->chip);
arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io);
@@ -140,7 +142,7 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* When lock active bit[31] is set, ModeX is write enabled
*/
if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) {
- raw_spin_unlock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_unlock(&gs->chip);
mutex_unlock(yu_arm_gpio_lock_param.lock);
return -EINVAL;
}
@@ -154,11 +156,11 @@ static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs)
* Release the YU arm_gpio_lock after changing the direction mode.
*/
static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs)
- __releases(&gs->gc.bgpio_lock)
+ __releases(&gs->chip.lock)
__releases(yu_arm_gpio_lock_param.lock)
{
writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io);
- raw_spin_unlock(&gs->gc.bgpio_lock);
+ gpio_generic_chip_unlock(&gs->chip);
mutex_unlock(yu_arm_gpio_lock_param.lock);
}
@@ -235,11 +237,10 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, irqd_to_hwirq(irqd));
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
@@ -247,7 +248,6 @@ static void mlxbf2_gpio_irq_enable(struct irq_data *irqd)
val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
}
static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
@@ -255,21 +255,21 @@ static void mlxbf2_gpio_irq_disable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
- val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- val &= ~BIT(offset);
- writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ val = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_io + YU_GPIO_CAUSE_OR_EVTEN0);
+ }
+
gpiochip_disable_irq(gc, irqd_to_hwirq(irqd));
}
static irqreturn_t mlxbf2_gpio_irq_handler(int irq, void *ptr)
{
struct mlxbf2_gpio_context *gs = ptr;
- struct gpio_chip *gc = &gs->gc;
+ struct gpio_chip *gc = &gs->chip.gc;
unsigned long pending;
u32 level;
@@ -288,7 +288,6 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf2_gpio_context *gs = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(irqd);
- unsigned long flags;
bool fall = false;
bool rise = false;
u32 val;
@@ -308,7 +307,8 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
return -EINVAL;
}
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
+
if (fall) {
val = readl(gs->gpio_io + YU_GPIO_CAUSE_FALL_EN);
val |= BIT(offset);
@@ -320,7 +320,6 @@ mlxbf2_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
val |= BIT(offset);
writel(val, gs->gpio_io + YU_GPIO_CAUSE_RISE_EN);
}
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
return 0;
}
@@ -347,6 +346,7 @@ static const struct irq_chip mlxbf2_gpio_irq_chip = {
static int
mlxbf2_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct mlxbf2_gpio_context *gs;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
@@ -369,28 +369,25 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gs->gpio_io);
ret = mlxbf2_gpio_get_lock_res(pdev);
- if (ret) {
- dev_err(dev, "Failed to get yu_arm_gpio_lock resource\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get yu_arm_gpio_lock resource\n");
if (device_property_read_u32(dev, "npins", &npins))
npins = MLXBF2_GPIO_MAX_PINS_PER_BLOCK;
- gc = &gs->gc;
+ gc = &gs->chip.gc;
- ret = bgpio_init(gc, dev, 4,
- gs->gpio_io + YU_GPIO_DATAIN,
- gs->gpio_io + YU_GPIO_DATASET,
- gs->gpio_io + YU_GPIO_DATACLEAR,
- NULL,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = gs->gpio_io + YU_GPIO_DATAIN,
+ .set = gs->gpio_io + YU_GPIO_DATASET,
+ .clr = gs->gpio_io + YU_GPIO_DATACLEAR,
+ };
- if (ret) {
- dev_err(dev, "bgpio_init failed\n");
- return ret;
- }
+ ret = gpio_generic_chip_init(&gs->chip, &config);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize the generic GPIO chip\n");
gc->direction_input = mlxbf2_gpio_direction_input;
gc->direction_output = mlxbf2_gpio_direction_output;
@@ -399,7 +396,7 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
- girq = &gs->gc.irq;
+ girq = &gs->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &mlxbf2_gpio_irq_chip);
girq->handler = handle_simple_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -414,24 +411,20 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
*/
ret = devm_request_irq(dev, irq, mlxbf2_gpio_irq_handler,
IRQF_SHARED, name, gs);
- if (ret) {
- dev_err(dev, "failed to request IRQ");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request IRQ");
}
platform_set_drvdata(pdev, gs);
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
- if (ret) {
- dev_err(dev, "Failed adding memory mapped gpiochip\n");
- return ret;
- }
+ ret = devm_gpiochip_add_data(dev, &gs->chip.gc, gs);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed adding memory mapped gpiochip\n");
return 0;
}
-static int __maybe_unused mlxbf2_gpio_suspend(struct device *dev)
+static int mlxbf2_gpio_suspend(struct device *dev)
{
struct mlxbf2_gpio_context *gs = dev_get_drvdata(dev);
@@ -443,7 +436,7 @@ static int __maybe_unused mlxbf2_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused mlxbf2_gpio_resume(struct device *dev)
+static int mlxbf2_gpio_resume(struct device *dev)
{
struct mlxbf2_gpio_context *gs = dev_get_drvdata(dev);
@@ -454,7 +447,7 @@ static int __maybe_unused mlxbf2_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(mlxbf2_pm_ops, mlxbf2_gpio_suspend, mlxbf2_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mlxbf2_pm_ops, mlxbf2_gpio_suspend, mlxbf2_gpio_resume);
static const struct acpi_device_id __maybe_unused mlxbf2_gpio_acpi_match[] = {
{ "MLNXBF22", 0 },
@@ -466,7 +459,7 @@ static struct platform_driver mlxbf2_gpio_driver = {
.driver = {
.name = "mlxbf2_gpio",
.acpi_match_table = mlxbf2_gpio_acpi_match,
- .pm = &mlxbf2_pm_ops,
+ .pm = pm_sleep_ptr(&mlxbf2_pm_ops),
},
.probe = mlxbf2_gpio_probe,
};
diff --git a/drivers/gpio/gpio-mlxbf3.c b/drivers/gpio/gpio-mlxbf3.c
index ed29b07d16c1..4770578269ba 100644
--- a/drivers/gpio/gpio-mlxbf3.c
+++ b/drivers/gpio/gpio-mlxbf3.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -42,7 +43,7 @@
#define MLXBF_GPIO_CLR_ALL_INTS GENMASK(31, 0)
struct mlxbf3_gpio_context {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
/* YU GPIO block address */
void __iomem *gpio_set_io;
@@ -58,18 +59,17 @@ static void mlxbf3_gpio_irq_enable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
gpiochip_enable_irq(gc, offset);
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&gs->chip);
+
writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
val |= BIT(offset);
writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
}
static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
@@ -77,16 +77,15 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
- val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- val &= ~BIT(offset);
- writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ val = readl(gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
+ val &= ~BIT(offset);
+ writel(val, gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_EVTEN0);
- writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
+ writel(BIT(offset), gs->gpio_cause_io + MLXBF_GPIO_CAUSE_OR_CLRCAUSE);
+ }
gpiochip_disable_irq(gc, offset);
}
@@ -94,7 +93,7 @@ static void mlxbf3_gpio_irq_disable(struct irq_data *irqd)
static irqreturn_t mlxbf3_gpio_irq_handler(int irq, void *ptr)
{
struct mlxbf3_gpio_context *gs = ptr;
- struct gpio_chip *gc = &gs->gc;
+ struct gpio_chip *gc = &gs->chip.gc;
unsigned long pending;
u32 level;
@@ -113,37 +112,33 @@ mlxbf3_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct mlxbf3_gpio_context *gs = gpiochip_get_data(gc);
irq_hw_number_t offset = irqd_to_hwirq(irqd);
- unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&gs->gc.bgpio_lock, flags);
-
- switch (type & IRQ_TYPE_SENSE_MASK) {
- case IRQ_TYPE_EDGE_BOTH:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- break;
- case IRQ_TYPE_EDGE_RISING:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
- break;
- case IRQ_TYPE_EDGE_FALLING:
- val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- val |= BIT(offset);
- writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
- break;
- default:
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
- return -EINVAL;
+ scoped_guard(gpio_generic_lock_irqsave, &gs->chip) {
+ switch (type & IRQ_TYPE_SENSE_MASK) {
+ case IRQ_TYPE_EDGE_BOTH:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_RISE_EN);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val = readl(gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ val |= BIT(offset);
+ writel(val, gs->gpio_io + MLXBF_GPIO_CAUSE_FALL_EN);
+ break;
+ default:
+ return -EINVAL;
+ }
}
- raw_spin_unlock_irqrestore(&gs->gc.bgpio_lock, flags);
-
irq_set_handler_locked(irqd, handle_edge_irq);
return 0;
@@ -186,6 +181,7 @@ static int mlxbf3_gpio_add_pin_ranges(struct gpio_chip *chip)
static int mlxbf3_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct mlxbf3_gpio_context *gs;
struct gpio_irq_chip *girq;
@@ -211,16 +207,23 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
gs->gpio_clr_io = devm_platform_ioremap_resource(pdev, 3);
if (IS_ERR(gs->gpio_clr_io))
return PTR_ERR(gs->gpio_clr_io);
- gc = &gs->gc;
-
- ret = bgpio_init(gc, dev, 4,
- gs->gpio_io + MLXBF_GPIO_READ_DATA_IN,
- gs->gpio_set_io + MLXBF_GPIO_FW_DATA_OUT_SET,
- gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
- gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
- gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR, 0);
+ gc = &gs->chip.gc;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = gs->gpio_io + MLXBF_GPIO_READ_DATA_IN,
+ .set = gs->gpio_set_io + MLXBF_GPIO_FW_DATA_OUT_SET,
+ .clr = gs->gpio_clr_io + MLXBF_GPIO_FW_DATA_OUT_CLEAR,
+ .dirout = gs->gpio_set_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_SET,
+ .dirin = gs->gpio_clr_io + MLXBF_GPIO_FW_OUTPUT_ENABLE_CLEAR,
+ };
+
+ ret = gpio_generic_chip_init(&gs->chip, &config);
if (ret)
- return dev_err_probe(dev, ret, "%s: bgpio_init() failed", __func__);
+ return dev_err_probe(dev, ret,
+ "%s: failed to initialize the generic GPIO chip",
+ __func__);
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
@@ -229,7 +232,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
irq = platform_get_irq_optional(pdev, 0);
if (irq >= 0) {
- girq = &gs->gc.irq;
+ girq = &gs->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
girq->default_type = IRQ_TYPE_NONE;
/* This will let us handle the parent IRQ in the driver */
@@ -250,7 +253,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, gs);
- ret = devm_gpiochip_add_data(dev, &gs->gc, gs);
+ ret = devm_gpiochip_add_data(dev, gc, gs);
if (ret)
dev_err_probe(dev, ret, "Failed adding memory mapped gpiochip\n");
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
index 8f1405733d98..1bd98c50a459 100644
--- a/drivers/gpio/gpio-mm-lantiq.c
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -10,7 +10,6 @@
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/gpio/driver.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -27,7 +26,8 @@
#define LTQ_EBU_WP 0x80000000 /* write protect bit */
struct ltq_mm {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
u16 shadow; /* shadow the latches state */
};
@@ -44,7 +44,7 @@ static void ltq_mm_apply(struct ltq_mm *chip)
spin_lock_irqsave(&ebu_lock, flags);
ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
- __raw_writew(chip->shadow, chip->mmchip.regs);
+ __raw_writew(chip->shadow, chip->regs);
ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
spin_unlock_irqrestore(&ebu_lock, flags);
}
@@ -52,8 +52,8 @@ static void ltq_mm_apply(struct ltq_mm *chip)
/**
* ltq_mm_set() - gpio_chip->set - set gpios.
* @gc: Pointer to gpio_chip device structure.
- * @gpio: GPIO signal number.
- * @val: Value to be written to specified signal.
+ * @offset: GPIO signal number.
+ * @value: Value to be written to specified signal.
*
* Set the shadow value and call ltq_mm_apply. Always returns 0.
*/
@@ -73,8 +73,8 @@ static int ltq_mm_set(struct gpio_chip *gc, unsigned int offset, int value)
/**
* ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
* @gc: Pointer to gpio_chip device structure.
- * @gpio: GPIO signal number.
- * @val: Value to be written to specified signal.
+ * @offset: GPIO signal number.
+ * @value: Value to be written to specified signal.
*
* Same as ltq_mm_set, always returns 0.
*/
@@ -85,21 +85,21 @@ static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
/**
* ltq_mm_save_regs() - Set initial values of GPIO pins
- * @mm_gc: pointer to memory mapped GPIO chip structure
+ * @chip: Pointer to our private data structure.
*/
-static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
+static void ltq_mm_save_regs(struct ltq_mm *chip)
{
- struct ltq_mm *chip =
- container_of(mm_gc, struct ltq_mm, mmchip);
-
/* tell the ebu controller which memory address we will be using */
- ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
+ ltq_ebu_w32(CPHYSADDR((__force void *)chip->regs) | 0x1, LTQ_EBU_ADDRSEL1);
ltq_mm_apply(chip);
}
static int ltq_mm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct gpio_chip *gc;
struct ltq_mm *chip;
u32 shadow;
@@ -107,25 +107,29 @@ static int ltq_mm_probe(struct platform_device *pdev)
if (!chip)
return -ENOMEM;
- platform_set_drvdata(pdev, chip);
+ gc = &chip->gc;
+
+ gc->base = -1;
+ gc->ngpio = 16;
+ gc->direction_output = ltq_mm_dir_out;
+ gc->set = ltq_mm_set;
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
- chip->mmchip.gc.ngpio = 16;
- chip->mmchip.gc.direction_output = ltq_mm_dir_out;
- chip->mmchip.gc.set = ltq_mm_set;
- chip->mmchip.save_regs = ltq_mm_save_regs;
+ ltq_mm_save_regs(chip);
/* store the shadow value if one was passed by the devicetree */
if (!of_property_read_u32(pdev->dev.of_node, "lantiq,shadow", &shadow))
chip->shadow = shadow;
- return of_mm_gpiochip_add_data(pdev->dev.of_node, &chip->mmchip, chip);
-}
-
-static void ltq_mm_remove(struct platform_device *pdev)
-{
- struct ltq_mm *chip = platform_get_drvdata(pdev);
-
- of_mm_gpiochip_remove(&chip->mmchip);
+ return devm_gpiochip_add_data(dev, gc, chip);
}
static const struct of_device_id ltq_mm_match[] = {
@@ -136,7 +140,6 @@ MODULE_DEVICE_TABLE(of, ltq_mm_match);
static struct platform_driver ltq_mm_driver = {
.probe = ltq_mm_probe,
- .remove = ltq_mm_remove,
.driver = {
.name = "gpio-mm-ltq",
.of_match_table = ltq_mm_match,
diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c
index 021ad62778c2..b3a26a06260b 100644
--- a/drivers/gpio/gpio-mmio.c
+++ b/drivers/gpio/gpio-mmio.c
@@ -41,6 +41,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/init.h>
@@ -57,136 +58,145 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.`
#include <linux/types.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include "gpiolib.h"
-static void bgpio_write8(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write8(void __iomem *reg, unsigned long data)
{
writeb(data, reg);
}
-static unsigned long bgpio_read8(void __iomem *reg)
+static unsigned long gpio_mmio_read8(void __iomem *reg)
{
return readb(reg);
}
-static void bgpio_write16(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write16(void __iomem *reg, unsigned long data)
{
writew(data, reg);
}
-static unsigned long bgpio_read16(void __iomem *reg)
+static unsigned long gpio_mmio_read16(void __iomem *reg)
{
return readw(reg);
}
-static void bgpio_write32(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write32(void __iomem *reg, unsigned long data)
{
writel(data, reg);
}
-static unsigned long bgpio_read32(void __iomem *reg)
+static unsigned long gpio_mmio_read32(void __iomem *reg)
{
return readl(reg);
}
#if BITS_PER_LONG >= 64
-static void bgpio_write64(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write64(void __iomem *reg, unsigned long data)
{
writeq(data, reg);
}
-static unsigned long bgpio_read64(void __iomem *reg)
+static unsigned long gpio_mmio_read64(void __iomem *reg)
{
return readq(reg);
}
#endif /* BITS_PER_LONG >= 64 */
-static void bgpio_write16be(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write16be(void __iomem *reg, unsigned long data)
{
iowrite16be(data, reg);
}
-static unsigned long bgpio_read16be(void __iomem *reg)
+static unsigned long gpio_mmio_read16be(void __iomem *reg)
{
return ioread16be(reg);
}
-static void bgpio_write32be(void __iomem *reg, unsigned long data)
+static void gpio_mmio_write32be(void __iomem *reg, unsigned long data)
{
iowrite32be(data, reg);
}
-static unsigned long bgpio_read32be(void __iomem *reg)
+static unsigned long gpio_mmio_read32be(void __iomem *reg)
{
return ioread32be(reg);
}
-static unsigned long bgpio_line2mask(struct gpio_chip *gc, unsigned int line)
+static unsigned long gpio_mmio_line2mask(struct gpio_chip *gc, unsigned int line)
{
- if (gc->be_bits)
- return BIT(gc->bgpio_bits - 1 - line);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (chip->be_bits)
+ return BIT(chip->bits - 1 - line);
return BIT(line);
}
-static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_get_set(struct gpio_chip *gc, unsigned int gpio)
{
- unsigned long pinmask = bgpio_line2mask(gc, gpio);
- bool dir = !!(gc->bgpio_dir & pinmask);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long pinmask = gpio_mmio_line2mask(gc, gpio);
+ bool dir = !!(chip->sdir & pinmask);
if (dir)
- return !!(gc->read_reg(gc->reg_set) & pinmask);
- else
- return !!(gc->read_reg(gc->reg_dat) & pinmask);
+ return !!(chip->read_reg(chip->reg_set) & pinmask);
+
+ return !!(chip->read_reg(chip->reg_dat) & pinmask);
}
/*
* This assumes that the bits in the GPIO register are in native endianness.
* We only assign the function pointer if we have that.
*/
-static int bgpio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_get_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
- unsigned long get_mask = 0;
- unsigned long set_mask = 0;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long get_mask = 0, set_mask = 0;
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- set_mask = *mask & gc->bgpio_dir;
- get_mask = *mask & ~gc->bgpio_dir;
+ set_mask = *mask & chip->sdir;
+ get_mask = *mask & ~chip->sdir;
if (set_mask)
- *bits |= gc->read_reg(gc->reg_set) & set_mask;
+ *bits |= chip->read_reg(chip->reg_set) & set_mask;
if (get_mask)
- *bits |= gc->read_reg(gc->reg_dat) & get_mask;
+ *bits |= chip->read_reg(chip->reg_dat) & get_mask;
return 0;
}
-static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_get(struct gpio_chip *gc, unsigned int gpio)
{
- return !!(gc->read_reg(gc->reg_dat) & bgpio_line2mask(gc, gpio));
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ return !!(chip->read_reg(chip->reg_dat) & gpio_mmio_line2mask(gc, gpio));
}
/*
* This only works if the bits in the GPIO register are in native endianness.
*/
-static int bgpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
/* Make sure we first clear any bits that are zero when we read the register */
*bits &= ~*mask;
- *bits |= gc->read_reg(gc->reg_dat) & *mask;
+ *bits |= chip->read_reg(chip->reg_dat) & *mask;
return 0;
}
/*
* With big endian mirrored bit order it becomes more tedious.
*/
-static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long readmask = 0;
unsigned long val;
int bit;
@@ -196,150 +206,155 @@ static int bgpio_get_multiple_be(struct gpio_chip *gc, unsigned long *mask,
/* Create a mirrored mask */
for_each_set_bit(bit, mask, gc->ngpio)
- readmask |= bgpio_line2mask(gc, bit);
+ readmask |= gpio_mmio_line2mask(gc, bit);
/* Read the register */
- val = gc->read_reg(gc->reg_dat) & readmask;
+ val = chip->read_reg(chip->reg_dat) & readmask;
/*
* Mirror the result into the "bits" result, this will give line 0
* in bit 0 ... line 31 in bit 31 for a 32bit register.
*/
for_each_set_bit(bit, &val, gc->ngpio)
- *bits |= bgpio_line2mask(gc, bit);
+ *bits |= gpio_mmio_line2mask(gc, bit);
return 0;
}
-static int bgpio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
+static int gpio_mmio_set_none(struct gpio_chip *gc, unsigned int gpio, int val)
{
return 0;
}
-static int bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int gpio_mmio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = bgpio_line2mask(gc, gpio);
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long mask = gpio_mmio_line2mask(gc, gpio);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(raw_spinlock)(&chip->lock);
if (val)
- gc->bgpio_data |= mask;
+ chip->sdata |= mask;
else
- gc->bgpio_data &= ~mask;
+ chip->sdata &= ~mask;
- gc->write_reg(gc->reg_dat, gc->bgpio_data);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ chip->write_reg(chip->reg_dat, chip->sdata);
return 0;
}
-static int bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
- unsigned long mask = bgpio_line2mask(gc, gpio);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long mask = gpio_mmio_line2mask(gc, gpio);
if (val)
- gc->write_reg(gc->reg_set, mask);
+ chip->write_reg(chip->reg_set, mask);
else
- gc->write_reg(gc->reg_clr, mask);
+ chip->write_reg(chip->reg_clr, mask);
return 0;
}
-static int bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
+static int gpio_mmio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long mask = bgpio_line2mask(gc, gpio);
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+ unsigned long mask = gpio_mmio_line2mask(gc, gpio);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(raw_spinlock)(&chip->lock);
if (val)
- gc->bgpio_data |= mask;
+ chip->sdata |= mask;
else
- gc->bgpio_data &= ~mask;
-
- gc->write_reg(gc->reg_set, gc->bgpio_data);
+ chip->sdata &= ~mask;
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ chip->write_reg(chip->reg_set, chip->sdata);
return 0;
}
-static void bgpio_multiple_get_masks(struct gpio_chip *gc,
- unsigned long *mask, unsigned long *bits,
- unsigned long *set_mask,
- unsigned long *clear_mask)
+static void gpio_mmio_multiple_get_masks(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits,
+ unsigned long *set_mask,
+ unsigned long *clear_mask)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
int i;
*set_mask = 0;
*clear_mask = 0;
- for_each_set_bit(i, mask, gc->bgpio_bits) {
+ for_each_set_bit(i, mask, chip->bits) {
if (test_bit(i, bits))
- *set_mask |= bgpio_line2mask(gc, i);
+ *set_mask |= gpio_mmio_line2mask(gc, i);
else
- *clear_mask |= bgpio_line2mask(gc, i);
+ *clear_mask |= gpio_mmio_line2mask(gc, i);
}
}
-static void bgpio_set_multiple_single_reg(struct gpio_chip *gc,
- unsigned long *mask,
- unsigned long *bits,
- void __iomem *reg)
+static void gpio_mmio_set_multiple_single_reg(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits,
+ void __iomem *reg)
{
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long set_mask, clear_mask;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
-
- bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
+ guard(raw_spinlock)(&chip->lock);
- gc->bgpio_data |= set_mask;
- gc->bgpio_data &= ~clear_mask;
+ gpio_mmio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
- gc->write_reg(reg, gc->bgpio_data);
+ chip->sdata |= set_mask;
+ chip->sdata &= ~clear_mask;
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ chip->write_reg(reg, chip->sdata);
}
-static int bgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
- bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_dat);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ gpio_mmio_set_multiple_single_reg(gc, mask, bits, chip->reg_dat);
return 0;
}
-static int bgpio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_set_multiple_set(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
{
- bgpio_set_multiple_single_reg(gc, mask, bits, gc->reg_set);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ gpio_mmio_set_multiple_single_reg(gc, mask, bits, chip->reg_set);
return 0;
}
-static int bgpio_set_multiple_with_clear(struct gpio_chip *gc,
- unsigned long *mask,
- unsigned long *bits)
+static int gpio_mmio_set_multiple_with_clear(struct gpio_chip *gc,
+ unsigned long *mask,
+ unsigned long *bits)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
unsigned long set_mask, clear_mask;
- bgpio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
+ gpio_mmio_multiple_get_masks(gc, mask, bits, &set_mask, &clear_mask);
if (set_mask)
- gc->write_reg(gc->reg_set, set_mask);
+ chip->write_reg(chip->reg_set, set_mask);
if (clear_mask)
- gc->write_reg(gc->reg_clr, clear_mask);
+ chip->write_reg(chip->reg_clr, clear_mask);
return 0;
}
-static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_out)
+static int gpio_mmio_dir_return(struct gpio_chip *gc, unsigned int gpio,
+ bool dir_out)
{
- if (!gc->bgpio_pinctrl)
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (!chip->pinctrl)
return 0;
if (dir_out)
@@ -348,128 +363,125 @@ static int bgpio_dir_return(struct gpio_chip *gc, unsigned int gpio, bool dir_ou
return pinctrl_gpio_direction_input(gc, gpio);
}
-static int bgpio_dir_in_err(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_dir_in_err(struct gpio_chip *gc, unsigned int gpio)
{
return -EINVAL;
}
-static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- return bgpio_dir_return(gc, gpio, false);
+ return gpio_mmio_dir_return(gc, gpio, false);
}
-static int bgpio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_dir_out_err(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
return -EINVAL;
}
-static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
gc->set(gc, gpio, val);
- return bgpio_dir_return(gc, gpio, true);
+ return gpio_mmio_dir_return(gc, gpio, true);
}
-static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
- gc->bgpio_dir &= ~bgpio_line2mask(gc, gpio);
+ scoped_guard(raw_spinlock, &chip->lock) {
+ chip->sdir &= ~gpio_mmio_line2mask(gc, gpio);
- if (gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
- if (gc->reg_dir_out)
- gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ if (chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
+ if (chip->reg_dir_out)
+ chip->write_reg(chip->reg_dir_out, chip->sdir);
+ }
- return bgpio_dir_return(gc, gpio, false);
+ return gpio_mmio_dir_return(gc, gpio, false);
}
-static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
+static int gpio_mmio_get_dir(struct gpio_chip *gc, unsigned int gpio)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
/* Return 0 if output, 1 if input */
- if (gc->bgpio_dir_unreadable) {
- if (gc->bgpio_dir & bgpio_line2mask(gc, gpio))
+ if (chip->dir_unreadable) {
+ if (chip->sdir & gpio_mmio_line2mask(gc, gpio))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
- if (gc->reg_dir_out) {
- if (gc->read_reg(gc->reg_dir_out) & bgpio_line2mask(gc, gpio))
+ if (chip->reg_dir_out) {
+ if (chip->read_reg(chip->reg_dir_out) & gpio_mmio_line2mask(gc, gpio))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
- if (gc->reg_dir_in)
- if (!(gc->read_reg(gc->reg_dir_in) & bgpio_line2mask(gc, gpio)))
+ if (chip->reg_dir_in)
+ if (!(chip->read_reg(chip->reg_dir_in) & gpio_mmio_line2mask(gc, gpio)))
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
}
-static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+static void gpio_mmio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- unsigned long flags;
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
+ guard(raw_spinlock)(&chip->lock);
- gc->bgpio_dir |= bgpio_line2mask(gc, gpio);
+ chip->sdir |= gpio_mmio_line2mask(gc, gpio);
- if (gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
- if (gc->reg_dir_out)
- gc->write_reg(gc->reg_dir_out, gc->bgpio_dir);
-
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ if (chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
+ if (chip->reg_dir_out)
+ chip->write_reg(chip->reg_dir_out, chip->sdir);
}
-static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
- bgpio_dir_out(gc, gpio, val);
+ gpio_mmio_dir_out(gc, gpio, val);
gc->set(gc, gpio, val);
- return bgpio_dir_return(gc, gpio, true);
+ return gpio_mmio_dir_return(gc, gpio, true);
}
-static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
- int val)
+static int gpio_mmio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio,
+ int val)
{
gc->set(gc, gpio, val);
- bgpio_dir_out(gc, gpio, val);
- return bgpio_dir_return(gc, gpio, true);
+ gpio_mmio_dir_out(gc, gpio, val);
+ return gpio_mmio_dir_return(gc, gpio, true);
}
-static int bgpio_setup_accessors(struct device *dev,
- struct gpio_chip *gc,
- bool byte_be)
+static int gpio_mmio_setup_accessors(struct device *dev,
+ struct gpio_generic_chip *chip,
+ bool byte_be)
{
-
- switch (gc->bgpio_bits) {
+ switch (chip->bits) {
case 8:
- gc->read_reg = bgpio_read8;
- gc->write_reg = bgpio_write8;
+ chip->read_reg = gpio_mmio_read8;
+ chip->write_reg = gpio_mmio_write8;
break;
case 16:
if (byte_be) {
- gc->read_reg = bgpio_read16be;
- gc->write_reg = bgpio_write16be;
+ chip->read_reg = gpio_mmio_read16be;
+ chip->write_reg = gpio_mmio_write16be;
} else {
- gc->read_reg = bgpio_read16;
- gc->write_reg = bgpio_write16;
+ chip->read_reg = gpio_mmio_read16;
+ chip->write_reg = gpio_mmio_write16;
}
break;
case 32:
if (byte_be) {
- gc->read_reg = bgpio_read32be;
- gc->write_reg = bgpio_write32be;
+ chip->read_reg = gpio_mmio_read32be;
+ chip->write_reg = gpio_mmio_write32be;
} else {
- gc->read_reg = bgpio_read32;
- gc->write_reg = bgpio_write32;
+ chip->read_reg = gpio_mmio_read32;
+ chip->write_reg = gpio_mmio_write32;
}
break;
#if BITS_PER_LONG >= 64
@@ -479,13 +491,13 @@ static int bgpio_setup_accessors(struct device *dev,
"64 bit big endian byte order unsupported\n");
return -EINVAL;
} else {
- gc->read_reg = bgpio_read64;
- gc->write_reg = bgpio_write64;
+ chip->read_reg = gpio_mmio_read64;
+ chip->write_reg = gpio_mmio_write64;
}
break;
#endif /* BITS_PER_LONG >= 64 */
default:
- dev_err(dev, "unsupported data width %u bits\n", gc->bgpio_bits);
+ dev_err(dev, "unsupported data width %u bits\n", chip->bits);
return -EINVAL;
}
@@ -514,39 +526,37 @@ static int bgpio_setup_accessors(struct device *dev,
* - an input direction register (named "dirin") where a 1 bit indicates
* the GPIO is an input.
*/
-static int bgpio_setup_io(struct gpio_chip *gc,
- void __iomem *dat,
- void __iomem *set,
- void __iomem *clr,
- unsigned long flags)
+static int gpio_mmio_setup_io(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
+ struct gpio_chip *gc = &chip->gc;
- gc->reg_dat = dat;
- if (!gc->reg_dat)
+ chip->reg_dat = cfg->dat;
+ if (!chip->reg_dat)
return -EINVAL;
- if (set && clr) {
- gc->reg_set = set;
- gc->reg_clr = clr;
- gc->set = bgpio_set_with_clear;
- gc->set_multiple = bgpio_set_multiple_with_clear;
- } else if (set && !clr) {
- gc->reg_set = set;
- gc->set = bgpio_set_set;
- gc->set_multiple = bgpio_set_multiple_set;
- } else if (flags & BGPIOF_NO_OUTPUT) {
- gc->set = bgpio_set_none;
+ if (cfg->set && cfg->clr) {
+ chip->reg_set = cfg->set;
+ chip->reg_clr = cfg->clr;
+ gc->set = gpio_mmio_set_with_clear;
+ gc->set_multiple = gpio_mmio_set_multiple_with_clear;
+ } else if (cfg->set && !cfg->clr) {
+ chip->reg_set = cfg->set;
+ gc->set = gpio_mmio_set_set;
+ gc->set_multiple = gpio_mmio_set_multiple_set;
+ } else if (cfg->flags & GPIO_GENERIC_NO_OUTPUT) {
+ gc->set = gpio_mmio_set_none;
gc->set_multiple = NULL;
} else {
- gc->set = bgpio_set;
- gc->set_multiple = bgpio_set_multiple;
+ gc->set = gpio_mmio_set;
+ gc->set_multiple = gpio_mmio_set_multiple;
}
- if (!(flags & BGPIOF_UNREADABLE_REG_SET) &&
- (flags & BGPIOF_READ_OUTPUT_REG_SET)) {
- gc->get = bgpio_get_set;
- if (!gc->be_bits)
- gc->get_multiple = bgpio_get_set_multiple;
+ if (!(cfg->flags & GPIO_GENERIC_UNREADABLE_REG_SET) &&
+ (cfg->flags & GPIO_GENERIC_READ_OUTPUT_REG_SET)) {
+ gc->get = gpio_mmio_get_set;
+ if (!chip->be_bits)
+ gc->get_multiple = gpio_mmio_get_set_multiple;
/*
* We deliberately avoid assigning the ->get_multiple() call
* for big endian mirrored registers which are ALSO reflecting
@@ -555,162 +565,145 @@ static int bgpio_setup_io(struct gpio_chip *gc,
* reading each line individually in that fringe case.
*/
} else {
- gc->get = bgpio_get;
- if (gc->be_bits)
- gc->get_multiple = bgpio_get_multiple_be;
+ gc->get = gpio_mmio_get;
+ if (chip->be_bits)
+ gc->get_multiple = gpio_mmio_get_multiple_be;
else
- gc->get_multiple = bgpio_get_multiple;
+ gc->get_multiple = gpio_mmio_get_multiple;
}
return 0;
}
-static int bgpio_setup_direction(struct gpio_chip *gc,
- void __iomem *dirout,
- void __iomem *dirin,
- unsigned long flags)
+static int gpio_mmio_setup_direction(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
- if (dirout || dirin) {
- gc->reg_dir_out = dirout;
- gc->reg_dir_in = dirin;
- if (flags & BGPIOF_NO_SET_ON_INPUT)
- gc->direction_output = bgpio_dir_out_dir_first;
+ struct gpio_chip *gc = &chip->gc;
+
+ if (cfg->dirout || cfg->dirin) {
+ chip->reg_dir_out = cfg->dirout;
+ chip->reg_dir_in = cfg->dirin;
+ if (cfg->flags & GPIO_GENERIC_NO_SET_ON_INPUT)
+ gc->direction_output = gpio_mmio_dir_out_dir_first;
else
- gc->direction_output = bgpio_dir_out_val_first;
- gc->direction_input = bgpio_dir_in;
- gc->get_direction = bgpio_get_dir;
+ gc->direction_output = gpio_mmio_dir_out_val_first;
+ gc->direction_input = gpio_mmio_dir_in;
+ gc->get_direction = gpio_mmio_get_dir;
} else {
- if (flags & BGPIOF_NO_OUTPUT)
- gc->direction_output = bgpio_dir_out_err;
+ if (cfg->flags & GPIO_GENERIC_NO_OUTPUT)
+ gc->direction_output = gpio_mmio_dir_out_err;
else
- gc->direction_output = bgpio_simple_dir_out;
+ gc->direction_output = gpio_mmio_simple_dir_out;
- if (flags & BGPIOF_NO_INPUT)
- gc->direction_input = bgpio_dir_in_err;
+ if (cfg->flags & GPIO_GENERIC_NO_INPUT)
+ gc->direction_input = gpio_mmio_dir_in_err;
else
- gc->direction_input = bgpio_simple_dir_in;
+ gc->direction_input = gpio_mmio_simple_dir_in;
}
return 0;
}
-static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin)
+static int gpio_mmio_request(struct gpio_chip *gc, unsigned int gpio_pin)
{
- if (gpio_pin >= chip->ngpio)
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
+
+ if (gpio_pin >= gc->ngpio)
return -EINVAL;
- if (chip->bgpio_pinctrl)
- return gpiochip_generic_request(chip, gpio_pin);
+ if (chip->pinctrl)
+ return gpiochip_generic_request(gc, gpio_pin);
return 0;
}
/**
- * bgpio_init() - Initialize generic GPIO accessor functions
- * @gc: the GPIO chip to set up
- * @dev: the parent device of the new GPIO chip (compulsory)
- * @sz: the size (width) of the MMIO registers in bytes, typically 1, 2 or 4
- * @dat: MMIO address for the register to READ the value of the GPIO lines, it
- * is expected that a 1 in the corresponding bit in this register means the
- * line is asserted
- * @set: MMIO address for the register to SET the value of the GPIO lines, it is
- * expected that we write the line with 1 in this register to drive the GPIO line
- * high.
- * @clr: MMIO address for the register to CLEAR the value of the GPIO lines, it is
- * expected that we write the line with 1 in this register to drive the GPIO line
- * low. It is allowed to leave this address as NULL, in that case the SET register
- * will be assumed to also clear the GPIO lines, by actively writing the line
- * with 0.
- * @dirout: MMIO address for the register to set the line as OUTPUT. It is assumed
- * that setting a line to 1 in this register will turn that line into an
- * output line. Conversely, setting the line to 0 will turn that line into
- * an input.
- * @dirin: MMIO address for the register to set this line as INPUT. It is assumed
- * that setting a line to 1 in this register will turn that line into an
- * input line. Conversely, setting the line to 0 will turn that line into
- * an output.
- * @flags: Different flags that will affect the behaviour of the device, such as
- * endianness etc.
+ * gpio_generic_chip_init() - Initialize a generic GPIO chip.
+ * @chip: Generic GPIO chip to set up.
+ * @cfg: Generic GPIO chip configuration.
+ *
+ * Returns 0 on success, negative error number on failure.
*/
-int bgpio_init(struct gpio_chip *gc, struct device *dev,
- unsigned long sz, void __iomem *dat, void __iomem *set,
- void __iomem *clr, void __iomem *dirout, void __iomem *dirin,
- unsigned long flags)
+int gpio_generic_chip_init(struct gpio_generic_chip *chip,
+ const struct gpio_generic_chip_config *cfg)
{
+ struct gpio_chip *gc = &chip->gc;
+ unsigned long flags = cfg->flags;
+ struct device *dev = cfg->dev;
int ret;
- if (!is_power_of_2(sz))
+ if (!is_power_of_2(cfg->sz))
return -EINVAL;
- gc->bgpio_bits = sz * 8;
- if (gc->bgpio_bits > BITS_PER_LONG)
+ chip->bits = cfg->sz * 8;
+ if (chip->bits > BITS_PER_LONG)
return -EINVAL;
- raw_spin_lock_init(&gc->bgpio_lock);
+ raw_spin_lock_init(&chip->lock);
gc->parent = dev;
gc->label = dev_name(dev);
gc->base = -1;
- gc->request = bgpio_request;
- gc->be_bits = !!(flags & BGPIOF_BIG_ENDIAN);
+ gc->request = gpio_mmio_request;
+ chip->be_bits = !!(flags & GPIO_GENERIC_BIG_ENDIAN);
ret = gpiochip_get_ngpios(gc, dev);
if (ret)
- gc->ngpio = gc->bgpio_bits;
+ gc->ngpio = chip->bits;
- ret = bgpio_setup_io(gc, dat, set, clr, flags);
+ ret = gpio_mmio_setup_io(chip, cfg);
if (ret)
return ret;
- ret = bgpio_setup_accessors(dev, gc, flags & BGPIOF_BIG_ENDIAN_BYTE_ORDER);
+ ret = gpio_mmio_setup_accessors(dev, chip,
+ flags & GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER);
if (ret)
return ret;
- ret = bgpio_setup_direction(gc, dirout, dirin, flags);
+ ret = gpio_mmio_setup_direction(chip, cfg);
if (ret)
return ret;
- if (flags & BGPIOF_PINCTRL_BACKEND) {
- gc->bgpio_pinctrl = true;
+ if (flags & GPIO_GENERIC_PINCTRL_BACKEND) {
+ chip->pinctrl = true;
/* Currently this callback is only used for pincontrol */
gc->free = gpiochip_generic_free;
}
- gc->bgpio_data = gc->read_reg(gc->reg_dat);
- if (gc->set == bgpio_set_set &&
- !(flags & BGPIOF_UNREADABLE_REG_SET))
- gc->bgpio_data = gc->read_reg(gc->reg_set);
+ chip->sdata = chip->read_reg(chip->reg_dat);
+ if (gc->set == gpio_mmio_set_set &&
+ !(flags & GPIO_GENERIC_UNREADABLE_REG_SET))
+ chip->sdata = chip->read_reg(chip->reg_set);
- if (flags & BGPIOF_UNREADABLE_REG_DIR)
- gc->bgpio_dir_unreadable = true;
+ if (flags & GPIO_GENERIC_UNREADABLE_REG_DIR)
+ chip->dir_unreadable = true;
/*
* Inspect hardware to find initial direction setting.
*/
- if ((gc->reg_dir_out || gc->reg_dir_in) &&
- !(flags & BGPIOF_UNREADABLE_REG_DIR)) {
- if (gc->reg_dir_out)
- gc->bgpio_dir = gc->read_reg(gc->reg_dir_out);
- else if (gc->reg_dir_in)
- gc->bgpio_dir = ~gc->read_reg(gc->reg_dir_in);
+ if ((chip->reg_dir_out || chip->reg_dir_in) &&
+ !(flags & GPIO_GENERIC_UNREADABLE_REG_DIR)) {
+ if (chip->reg_dir_out)
+ chip->sdir = chip->read_reg(chip->reg_dir_out);
+ else if (chip->reg_dir_in)
+ chip->sdir = ~chip->read_reg(chip->reg_dir_in);
/*
* If we have two direction registers, synchronise
* input setting to output setting, the library
* can not handle a line being input and output at
* the same time.
*/
- if (gc->reg_dir_out && gc->reg_dir_in)
- gc->write_reg(gc->reg_dir_in, ~gc->bgpio_dir);
+ if (chip->reg_dir_out && chip->reg_dir_in)
+ chip->write_reg(chip->reg_dir_in, ~chip->sdir);
}
return ret;
}
-EXPORT_SYMBOL_GPL(bgpio_init);
+EXPORT_SYMBOL_GPL(gpio_generic_chip_init);
#if IS_ENABLED(CONFIG_GPIO_GENERIC_PLATFORM)
-static void __iomem *bgpio_map(struct platform_device *pdev,
- const char *name,
- resource_size_t sane_sz)
+static void __iomem *gpio_mmio_map(struct platform_device *pdev,
+ const char *name, resource_size_t sane_sz)
{
struct resource *r;
resource_size_t sz;
@@ -726,16 +719,19 @@ static void __iomem *bgpio_map(struct platform_device *pdev,
return devm_ioremap_resource(&pdev->dev, r);
}
-static const struct of_device_id bgpio_of_match[] = {
+static const struct of_device_id gpio_mmio_of_match[] = {
{ .compatible = "brcm,bcm6345-gpio" },
{ .compatible = "wd,mbl-gpio" },
{ .compatible = "ni,169445-nand-gpio" },
+ { .compatible = "intel,ixp4xx-expansion-bus-mmio-gpio" },
{ }
};
-MODULE_DEVICE_TABLE(of, bgpio_of_match);
+MODULE_DEVICE_TABLE(of, gpio_mmio_of_match);
-static int bgpio_pdev_probe(struct platform_device *pdev)
+static int gpio_mmio_pdev_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
+ struct gpio_generic_chip *gen_gc;
struct device *dev = &pdev->dev;
struct resource *r;
void __iomem *dat;
@@ -747,7 +743,6 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
unsigned long flags = 0;
unsigned int base;
int err;
- struct gpio_chip *gc;
const char *label;
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
@@ -756,43 +751,54 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
sz = resource_size(r);
- dat = bgpio_map(pdev, "dat", sz);
+ dat = gpio_mmio_map(pdev, "dat", sz);
if (IS_ERR(dat))
return PTR_ERR(dat);
- set = bgpio_map(pdev, "set", sz);
+ set = gpio_mmio_map(pdev, "set", sz);
if (IS_ERR(set))
return PTR_ERR(set);
- clr = bgpio_map(pdev, "clr", sz);
+ clr = gpio_mmio_map(pdev, "clr", sz);
if (IS_ERR(clr))
return PTR_ERR(clr);
- dirout = bgpio_map(pdev, "dirout", sz);
+ dirout = gpio_mmio_map(pdev, "dirout", sz);
if (IS_ERR(dirout))
return PTR_ERR(dirout);
- dirin = bgpio_map(pdev, "dirin", sz);
+ dirin = gpio_mmio_map(pdev, "dirin", sz);
if (IS_ERR(dirin))
return PTR_ERR(dirin);
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
+ gen_gc = devm_kzalloc(&pdev->dev, sizeof(*gen_gc), GFP_KERNEL);
+ if (!gen_gc)
return -ENOMEM;
if (device_is_big_endian(dev))
- flags |= BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ flags |= GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
if (device_property_read_bool(dev, "no-output"))
- flags |= BGPIOF_NO_OUTPUT;
-
- err = bgpio_init(gc, dev, sz, dat, set, clr, dirout, dirin, flags);
+ flags |= GPIO_GENERIC_NO_OUTPUT;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = sz,
+ .dat = dat,
+ .set = set,
+ .clr = clr,
+ .dirout = dirout,
+ .dirin = dirin,
+ .flags = flags,
+ };
+
+ err = gpio_generic_chip_init(gen_gc, &config);
if (err)
return err;
err = device_property_read_string(dev, "label", &label);
if (!err)
- gc->label = label;
+ gen_gc->gc.label = label;
/*
* This property *must not* be used in device-tree sources, it's only
@@ -800,32 +806,32 @@ static int bgpio_pdev_probe(struct platform_device *pdev)
*/
err = device_property_read_u32(dev, "gpio-mmio,base", &base);
if (!err && base <= INT_MAX)
- gc->base = base;
+ gen_gc->gc.base = base;
- platform_set_drvdata(pdev, gc);
+ platform_set_drvdata(pdev, &gen_gc->gc);
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
+ return devm_gpiochip_add_data(&pdev->dev, &gen_gc->gc, NULL);
}
-static const struct platform_device_id bgpio_id_table[] = {
+static const struct platform_device_id gpio_mmio_id_table[] = {
{
.name = "basic-mmio-gpio",
.driver_data = 0,
},
{ }
};
-MODULE_DEVICE_TABLE(platform, bgpio_id_table);
+MODULE_DEVICE_TABLE(platform, gpio_mmio_id_table);
-static struct platform_driver bgpio_driver = {
+static struct platform_driver gpio_mmio_driver = {
.driver = {
.name = "basic-mmio-gpio",
- .of_match_table = bgpio_of_match,
+ .of_match_table = gpio_mmio_of_match,
},
- .id_table = bgpio_id_table,
- .probe = bgpio_pdev_probe,
+ .id_table = gpio_mmio_id_table,
+ .probe = gpio_mmio_pdev_probe,
};
-module_platform_driver(bgpio_driver);
+module_platform_driver(gpio_mmio_driver);
#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
diff --git a/drivers/gpio/gpio-mpc5200.c b/drivers/gpio/gpio-mpc5200.c
index dad0eca1ca2e..00f209157fd0 100644
--- a/drivers/gpio/gpio-mpc5200.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -8,7 +8,7 @@
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
+#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -19,7 +19,8 @@
static DEFINE_SPINLOCK(gpio_lock);
struct mpc52xx_gpiochip {
- struct of_mm_gpio_chip mmchip;
+ struct gpio_chip gc;
+ void __iomem *regs;
unsigned int shadow_dvo;
unsigned int shadow_gpioe;
unsigned int shadow_ddr;
@@ -43,8 +44,8 @@ struct mpc52xx_gpiochip {
*/
static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
@@ -57,9 +58,8 @@ static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (7 - gpio);
@@ -87,9 +87,8 @@ mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -110,9 +109,8 @@ static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio_wkup __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -136,30 +134,41 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct mpc52xx_gpio_wkup __iomem *regs;
struct gpio_chip *gc;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 8;
gc->direction_input = mpc52xx_wkup_gpio_dir_in;
gc->direction_output = mpc52xx_wkup_gpio_dir_out;
gc->get = mpc52xx_wkup_gpio_get;
gc->set = mpc52xx_wkup_gpio_set;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
chip->shadow_ddr = in_8(&regs->wkup_ddr);
chip->shadow_dvo = in_8(&regs->wkup_dvo);
@@ -167,13 +176,6 @@ static int mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev)
return 0;
}
-static void mpc52xx_gpiochip_remove(struct platform_device *ofdev)
-{
- struct mpc52xx_gpiochip *chip = platform_get_drvdata(ofdev);
-
- of_mm_gpiochip_remove(&chip->mmchip);
-}
-
static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
{ .compatible = "fsl,mpc5200-gpio-wkup", },
{}
@@ -185,7 +187,6 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.of_match_table = mpc52xx_wkup_gpiochip_match,
},
.probe = mpc52xx_wkup_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
};
/*
@@ -207,8 +208,8 @@ static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
*/
static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned int ret;
ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
@@ -219,9 +220,8 @@ static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
static inline void
__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
if (val)
chip->shadow_dvo |= 1 << (31 - gpio);
@@ -248,9 +248,8 @@ mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -271,9 +270,8 @@ static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
static int
mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
- struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct mpc52xx_gpiochip *chip = gpiochip_get_data(gc);
- struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+ struct mpc52xx_gpio __iomem *regs = chip->regs;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -298,30 +296,41 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct mpc52xx_gpiochip *chip;
struct gpio_chip *gc;
struct mpc52xx_gpio __iomem *regs;
int ret;
- chip = devm_kzalloc(&ofdev->dev, sizeof(*chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
platform_set_drvdata(ofdev, chip);
- gc = &chip->mmchip.gc;
+ gc = &chip->gc;
+ gc->base = -1;
gc->ngpio = 32;
gc->direction_input = mpc52xx_simple_gpio_dir_in;
gc->direction_output = mpc52xx_simple_gpio_dir_out;
gc->get = mpc52xx_simple_gpio_get;
gc->set = mpc52xx_simple_gpio_set;
- ret = of_mm_gpiochip_add_data(ofdev->dev.of_node, &chip->mmchip, chip);
+ gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
+ if (!gc->label)
+ return -ENOMEM;
+
+ chip->regs = devm_of_iomap(dev, np, 0, NULL);
+ if (IS_ERR(chip->regs))
+ return PTR_ERR(chip->regs);
+
+ ret = devm_gpiochip_add_data(dev, gc, chip);
if (ret)
return ret;
- regs = chip->mmchip.regs;
+ regs = chip->regs;
chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
chip->shadow_ddr = in_be32(&regs->simple_ddr);
chip->shadow_dvo = in_be32(&regs->simple_dvo);
@@ -340,7 +349,6 @@ static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.of_match_table = mpc52xx_simple_gpiochip_match,
},
.probe = mpc52xx_simple_gpiochip_probe,
- .remove = mpc52xx_gpiochip_remove,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 121efdd71e45..bfe828734ee1 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -9,6 +9,7 @@
#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -34,7 +35,7 @@
#define GPIO_IBE 0x18
struct mpc8xxx_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
raw_spinlock_t lock;
@@ -66,9 +67,11 @@ static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio)
struct mpc8xxx_gpio_chip *mpc8xxx_gc = gpiochip_get_data(gc);
u32 out_mask, out_shadow;
- out_mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
- val = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
- out_shadow = gc->bgpio_data & out_mask;
+ out_mask = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DIR);
+ val = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DAT) & ~out_mask;
+ out_shadow = mpc8xxx_gc->chip.sdata & out_mask;
return !!((val | out_shadow) & mpc_pin2mask(gpio));
}
@@ -108,12 +111,13 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
static irqreturn_t mpc8xxx_gpio_irq_cascade(int irq, void *data)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = data;
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long mask;
int i;
- mask = gc->read_reg(mpc8xxx_gc->regs + GPIO_IER)
- & gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR);
+ mask = gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IER) &
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR);
for_each_set_bit(i, &mask, 32)
generic_handle_domain_irq(mpc8xxx_gc->irq, 31 - i);
@@ -124,15 +128,17 @@ static void mpc8xxx_irq_unmask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->chip.gc;
unsigned long flags;
gpiochip_enable_irq(gc, hwirq);
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR)
| mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
@@ -142,13 +148,14 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
+ struct gpio_chip *gc = &mpc8xxx_gc->chip.gc;
unsigned long flags;
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_IMR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, mpc8xxx_gc->regs + GPIO_IMR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
@@ -159,32 +166,34 @@ static void mpc8xxx_irq_mask(struct irq_data *d)
static void mpc8xxx_irq_ack(struct irq_data *d)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IER,
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, mpc8xxx_gc->regs + GPIO_IER,
mpc_pin2mask(irqd_to_hwirq(d)));
}
static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long flags;
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR)
| mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
- gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR,
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_ICR)
& ~mpc_pin2mask(irqd_to_hwirq(d)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -199,7 +208,6 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d);
- struct gpio_chip *gc = &mpc8xxx_gc->gc;
unsigned long gpio = irqd_to_hwirq(d);
void __iomem *reg;
unsigned int shift;
@@ -217,7 +225,9 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift))
| (2 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -225,14 +235,18 @@ static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type)
case IRQ_TYPE_EDGE_RISING:
case IRQ_TYPE_LEVEL_HIGH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift))
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift))
| (1 << shift));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
case IRQ_TYPE_EDGE_BOTH:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
- gc->write_reg(reg, (gc->read_reg(reg) & ~(3 << shift)));
+ gpio_generic_write_reg(&mpc8xxx_gc->chip, reg,
+ (gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ reg) & ~(3 << shift)));
raw_spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
break;
@@ -309,6 +323,7 @@ static const struct of_device_id mpc8xxx_gpio_ids[] = {
static int mpc8xxx_probe(struct platform_device *pdev)
{
const struct mpc8xxx_gpio_devtype *devtype = NULL;
+ struct gpio_generic_chip_config config;
struct mpc8xxx_gpio_chip *mpc8xxx_gc;
struct device *dev = &pdev->dev;
struct fwnode_handle *fwnode;
@@ -327,26 +342,28 @@ static int mpc8xxx_probe(struct platform_device *pdev)
if (IS_ERR(mpc8xxx_gc->regs))
return PTR_ERR(mpc8xxx_gc->regs);
- gc = &mpc8xxx_gc->gc;
+ gc = &mpc8xxx_gc->chip.gc;
gc->parent = dev;
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = mpc8xxx_gc->regs + GPIO_DAT,
+ .dirout = mpc8xxx_gc->regs + GPIO_DIR,
+ .flags = GPIO_GENERIC_BIG_ENDIAN
+ };
+
if (device_property_read_bool(dev, "little-endian")) {
- ret = bgpio_init(gc, dev, 4, mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL, mpc8xxx_gc->regs + GPIO_DIR,
- NULL, BGPIOF_BIG_ENDIAN);
- if (ret)
- return ret;
dev_dbg(dev, "GPIO registers are LITTLE endian\n");
} else {
- ret = bgpio_init(gc, dev, 4, mpc8xxx_gc->regs + GPIO_DAT,
- NULL, NULL, mpc8xxx_gc->regs + GPIO_DIR,
- NULL, BGPIOF_BIG_ENDIAN
- | BGPIOF_BIG_ENDIAN_BYTE_ORDER);
- if (ret)
- return ret;
+ config.flags |= GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
dev_dbg(dev, "GPIO registers are BIG endian\n");
}
+ ret = gpio_generic_chip_init(&mpc8xxx_gc->chip, &config);
+ if (ret)
+ return ret;
+
mpc8xxx_gc->direction_output = gc->direction_output;
devtype = device_get_match_data(dev);
@@ -379,10 +396,14 @@ static int mpc8xxx_probe(struct platform_device *pdev)
device_is_compatible(dev, "fsl,ls1028a-gpio") ||
device_is_compatible(dev, "fsl,ls1088a-gpio") ||
is_acpi_node(fwnode)) {
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IBE, 0xffffffff);
/* Also, latch state of GPIOs configured as output by bootloader. */
- gc->bgpio_data = gc->read_reg(mpc8xxx_gc->regs + GPIO_DAT) &
- gc->read_reg(mpc8xxx_gc->regs + GPIO_DIR);
+ mpc8xxx_gc->chip.sdata =
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DAT) &
+ gpio_generic_read_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_DIR);
}
ret = devm_gpiochip_add_data(dev, gc, mpc8xxx_gc);
@@ -405,8 +426,10 @@ static int mpc8xxx_probe(struct platform_device *pdev)
return 0;
/* ack and mask all irqs */
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
- gc->write_reg(mpc8xxx_gc->regs + GPIO_IMR, 0);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IER, 0xffffffff);
+ gpio_generic_write_reg(&mpc8xxx_gc->chip,
+ mpc8xxx_gc->regs + GPIO_IMR, 0);
ret = devm_request_irq(dev, mpc8xxx_gc->irqn,
mpc8xxx_gpio_irq_cascade,
diff --git a/drivers/gpio/gpio-mpfs.c b/drivers/gpio/gpio-mpfs.c
index 82d557a7e5d8..9468795b9634 100644
--- a/drivers/gpio/gpio-mpfs.c
+++ b/drivers/gpio/gpio-mpfs.c
@@ -69,7 +69,7 @@ static int mpfs_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio_in
struct mpfs_gpio_chip *mpfs_gpio = gpiochip_get_data(gc);
regmap_update_bits(mpfs_gpio->regs, MPFS_GPIO_CTRL(gpio_index),
- MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_IN);
+ MPFS_GPIO_DIR_MASK, MPFS_GPIO_EN_OUT | MPFS_GPIO_EN_OUT_BUF);
regmap_update_bits(mpfs_gpio->regs, mpfs_gpio->offsets->outp, BIT(gpio_index),
value << gpio_index);
diff --git a/drivers/gpio/gpio-mpsse.c b/drivers/gpio/gpio-mpsse.c
index 9f42bb30b4ec..ace652ba4df1 100644
--- a/drivers/gpio/gpio-mpsse.c
+++ b/drivers/gpio/gpio-mpsse.c
@@ -10,6 +10,7 @@
#include <linux/cleanup.h>
#include <linux/gpio/driver.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/usb.h>
struct mpsse_priv {
@@ -17,8 +18,10 @@ struct mpsse_priv {
struct usb_device *udev; /* USB device encompassing all MPSSEs */
struct usb_interface *intf; /* USB interface for this MPSSE */
u8 intf_id; /* USB interface number for this MPSSE */
- struct work_struct irq_work; /* polling work thread */
+ struct list_head workers; /* polling work threads */
struct mutex irq_mutex; /* lock over irq_data */
+ struct mutex irq_race; /* race for polling worker teardown */
+ raw_spinlock_t irq_spin; /* protects worker list */
atomic_t irq_type[16]; /* pin -> edge detection type */
atomic_t irq_enabled;
int id;
@@ -26,6 +29,9 @@ struct mpsse_priv {
u8 gpio_outputs[2]; /* Output states for GPIOs [L, H] */
u8 gpio_dir[2]; /* Directions for GPIOs [L, H] */
+ unsigned long dir_in; /* Bitmask of valid input pins */
+ unsigned long dir_out; /* Bitmask of valid output pins */
+
u8 *bulk_in_buf; /* Extra recv buffer to grab status bytes */
struct usb_endpoint_descriptor *bulk_in;
@@ -34,6 +40,14 @@ struct mpsse_priv {
struct mutex io_mutex; /* sync I/O with disconnect */
};
+struct mpsse_worker {
+ struct mpsse_priv *priv;
+ struct work_struct work;
+ atomic_t cancelled;
+ struct list_head list; /* linked list */
+ struct list_head destroy; /* teardown linked list */
+};
+
struct bulk_desc {
bool tx; /* direction of bulk transfer */
u8 *data; /* input (tx) or output (rx) */
@@ -43,8 +57,27 @@ struct bulk_desc {
int timeout;
};
+#define MPSSE_NGPIO 16
+
+struct mpsse_quirk {
+ const char *names[MPSSE_NGPIO]; /* Pin names, if applicable */
+ unsigned long dir_in; /* Bitmask of valid input pins */
+ unsigned long dir_out; /* Bitmask of valid output pins */
+};
+
+static struct mpsse_quirk bryx_brik_quirk = {
+ .names = {
+ [3] = "Push to Talk",
+ [5] = "Channel Activity",
+ },
+ .dir_out = BIT(3), /* Push to Talk */
+ .dir_in = BIT(5), /* Channel Activity */
+};
+
static const struct usb_device_id gpio_mpsse_table[] = {
{ USB_DEVICE(0x0c52, 0xa064) }, /* SeaLevel Systems, Inc. */
+ { USB_DEVICE(0x0403, 0x6988), /* FTDI, assigned to Bryx */
+ .driver_info = (kernel_ulong_t)&bryx_brik_quirk},
{ } /* Terminating entry */
};
@@ -160,6 +193,32 @@ static int gpio_mpsse_get_bank(struct mpsse_priv *priv, u8 bank)
return buf;
}
+static int mpsse_ensure_supported(struct gpio_chip *chip,
+ unsigned long mask, int direction)
+{
+ unsigned long supported, unsupported;
+ char *type = "input";
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ supported = priv->dir_in;
+ if (direction == GPIO_LINE_DIRECTION_OUT) {
+ supported = priv->dir_out;
+ type = "output";
+ }
+
+ /* An invalid bit was in the provided mask */
+ unsupported = mask & ~supported;
+ if (unsupported) {
+ dev_err(&priv->udev->dev,
+ "mpsse: GPIO %lu doesn't support %s\n",
+ find_first_bit(&unsupported, sizeof(unsupported) * 8),
+ type);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
unsigned long *bits)
{
@@ -167,6 +226,10 @@ static int gpio_mpsse_set_multiple(struct gpio_chip *chip, unsigned long *mask,
int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
+ ret = mpsse_ensure_supported(chip, *mask, GPIO_LINE_DIRECTION_OUT);
+ if (ret)
+ return ret;
+
guard(mutex)(&priv->io_mutex);
for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / 8;
@@ -194,6 +257,10 @@ static int gpio_mpsse_get_multiple(struct gpio_chip *chip, unsigned long *mask,
int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
+ ret = mpsse_ensure_supported(chip, *mask, GPIO_LINE_DIRECTION_IN);
+ if (ret)
+ return ret;
+
guard(mutex)(&priv->io_mutex);
for_each_set_clump8(i, bank_mask, mask, chip->ngpio) {
bank = i / 8;
@@ -242,10 +309,15 @@ static int gpio_mpsse_gpio_set(struct gpio_chip *chip, unsigned int offset,
static int gpio_mpsse_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
+ int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
int bank = (offset & 8) >> 3;
int bank_offset = offset & 7;
+ ret = mpsse_ensure_supported(chip, BIT(offset), GPIO_LINE_DIRECTION_OUT);
+ if (ret)
+ return ret;
+
scoped_guard(mutex, &priv->io_mutex)
priv->gpio_dir[bank] |= BIT(bank_offset);
@@ -255,15 +327,19 @@ static int gpio_mpsse_direction_output(struct gpio_chip *chip,
static int gpio_mpsse_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
+ int ret;
struct mpsse_priv *priv = gpiochip_get_data(chip);
int bank = (offset & 8) >> 3;
int bank_offset = offset & 7;
+ ret = mpsse_ensure_supported(chip, BIT(offset), GPIO_LINE_DIRECTION_IN);
+ if (ret)
+ return ret;
+
guard(mutex)(&priv->io_mutex);
priv->gpio_dir[bank] &= ~BIT(bank_offset);
- gpio_mpsse_set_bank(priv, bank);
- return 0;
+ return gpio_mpsse_set_bank(priv, bank);
}
static int gpio_mpsse_get_direction(struct gpio_chip *chip,
@@ -284,18 +360,62 @@ static int gpio_mpsse_get_direction(struct gpio_chip *chip,
return ret;
}
-static void gpio_mpsse_poll(struct work_struct *work)
+/*
+ * Stops all workers except `my_worker`.
+ * Safe to call only when `irq_race` is held.
+ */
+static void gpio_mpsse_stop_all_except(struct mpsse_priv *priv,
+ struct mpsse_worker *my_worker)
+{
+ struct mpsse_worker *worker, *worker_tmp;
+ struct list_head destructors = LIST_HEAD_INIT(destructors);
+
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin) {
+ list_for_each_entry_safe(worker, worker_tmp,
+ &priv->workers, list) {
+ /* Don't stop ourselves */
+ if (worker == my_worker)
+ continue;
+
+ list_del(&worker->list);
+
+ /* Give worker a chance to terminate itself */
+ atomic_set(&worker->cancelled, 1);
+ /* Keep track of stuff to cancel */
+ INIT_LIST_HEAD(&worker->destroy);
+ list_add(&worker->destroy, &destructors);
+ }
+ }
+
+ list_for_each_entry_safe(worker, worker_tmp,
+ &destructors, destroy) {
+ list_del(&worker->destroy);
+ cancel_work_sync(&worker->work);
+ kfree(worker);
+ }
+}
+
+static void gpio_mpsse_poll(struct work_struct *my_work)
{
unsigned long pin_mask, pin_states, flags;
int irq_enabled, offset, err, value, fire_irq,
irq, old_value[16], irq_type[16];
- struct mpsse_priv *priv = container_of(work, struct mpsse_priv,
- irq_work);
+ struct mpsse_worker *my_worker = container_of(my_work, struct mpsse_worker, work);
+ struct mpsse_priv *priv = my_worker->priv;
for (offset = 0; offset < priv->gpio.ngpio; ++offset)
old_value[offset] = -1;
- while ((irq_enabled = atomic_read(&priv->irq_enabled))) {
+ /*
+ * We only want one worker. Workers race to acquire irq_race and tear
+ * down all other workers. This is a cond guard so that we don't deadlock
+ * trying to cancel a worker.
+ */
+ scoped_cond_guard(mutex_try, return, &priv->irq_race)
+ gpio_mpsse_stop_all_except(priv, my_worker);
+
+ while ((irq_enabled = atomic_read(&priv->irq_enabled)) &&
+ !atomic_read(&my_worker->cancelled)) {
usleep_range(MPSSE_POLL_INTERVAL, MPSSE_POLL_INTERVAL + 1000);
/* Cleanup will trigger at the end of the loop */
guard(mutex)(&priv->irq_mutex);
@@ -370,21 +490,45 @@ static int gpio_mpsse_set_irq_type(struct irq_data *irqd, unsigned int type)
static void gpio_mpsse_irq_disable(struct irq_data *irqd)
{
+ struct mpsse_worker *worker;
struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
+
+ /*
+ * Can't actually do teardown in IRQ context (it blocks).
+ * As a result, these workers will stick around until irq is reenabled
+ * or device gets disconnected
+ */
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+ list_for_each_entry(worker, &priv->workers, list)
+ atomic_set(&worker->cancelled, 1);
}
static void gpio_mpsse_irq_enable(struct irq_data *irqd)
{
+ struct mpsse_worker *worker;
struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
/* If no-one else was using the IRQ, enable it */
if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
- INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
- schedule_work(&priv->irq_work);
+ /*
+ * Can't be devm because it uses a non-raw spinlock (illegal in
+ * this context, where a raw spinlock is held by our caller)
+ */
+ worker = kzalloc(sizeof(*worker), GFP_NOWAIT);
+ if (!worker)
+ return;
+
+ worker->priv = priv;
+ INIT_LIST_HEAD(&worker->list);
+ INIT_WORK(&worker->work, gpio_mpsse_poll);
+ schedule_work(&worker->work);
+
+ scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
+ list_add(&worker->list, &priv->workers);
}
}
@@ -404,18 +548,49 @@ static void gpio_mpsse_ida_remove(void *data)
ida_free(&gpio_mpsse_ida, priv->id);
}
+static int mpsse_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ if (WARN_ON(priv == NULL))
+ return -ENODEV;
+
+ *valid_mask = priv->dir_in | priv->dir_out;
+
+ return 0;
+}
+
+static void mpsse_irq_init_valid_mask(struct gpio_chip *chip,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct mpsse_priv *priv = gpiochip_get_data(chip);
+
+ if (WARN_ON(priv == NULL))
+ return;
+
+ /* Can only use IRQ on input capable pins */
+ *valid_mask = priv->dir_in;
+}
+
static int gpio_mpsse_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct mpsse_priv *priv;
struct device *dev;
+ char *serial;
int err;
+ struct mpsse_quirk *quirk = (void *)id->driver_info;
dev = &interface->dev;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ INIT_LIST_HEAD(&priv->workers);
+
priv->udev = usb_get_dev(interface_to_usbdev(interface));
priv->intf = interface;
priv->intf_id = interface->cur_altsetting->desc.bInterfaceNumber;
@@ -436,9 +611,21 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
if (err)
return err;
+ err = devm_mutex_init(dev, &priv->irq_race);
+ if (err)
+ return err;
+
+ raw_spin_lock_init(&priv->irq_spin);
+
+ serial = priv->udev->serial;
+ if (!serial)
+ serial = "NONE";
+
priv->gpio.label = devm_kasprintf(dev, GFP_KERNEL,
- "gpio-mpsse.%d.%d",
- priv->id, priv->intf_id);
+ "MPSSE%04x:%04x.%d.%d.%s",
+ id->idVendor, id->idProduct,
+ priv->intf_id, priv->id,
+ serial);
if (!priv->gpio.label)
return -ENOMEM;
@@ -452,10 +639,20 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.get_multiple = gpio_mpsse_get_multiple;
priv->gpio.set_multiple = gpio_mpsse_set_multiple;
priv->gpio.base = -1;
- priv->gpio.ngpio = 16;
+ priv->gpio.ngpio = MPSSE_NGPIO;
priv->gpio.offset = priv->intf_id * priv->gpio.ngpio;
priv->gpio.can_sleep = 1;
+ if (quirk) {
+ priv->dir_out = quirk->dir_out;
+ priv->dir_in = quirk->dir_in;
+ priv->gpio.names = quirk->names;
+ priv->gpio.init_valid_mask = mpsse_init_valid_mask;
+ } else {
+ priv->dir_in = U16_MAX;
+ priv->dir_out = U16_MAX;
+ }
+
err = usb_find_common_endpoints(interface->cur_altsetting,
&priv->bulk_in, &priv->bulk_out,
NULL, NULL);
@@ -494,6 +691,7 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
priv->gpio.irq.parents = NULL;
priv->gpio.irq.default_type = IRQ_TYPE_NONE;
priv->gpio.irq.handler = handle_simple_irq;
+ priv->gpio.irq.init_valid_mask = mpsse_irq_init_valid_mask;
err = devm_gpiochip_add_data(dev, &priv->gpio, priv);
if (err)
@@ -506,6 +704,13 @@ static void gpio_mpsse_disconnect(struct usb_interface *intf)
{
struct mpsse_priv *priv = usb_get_intfdata(intf);
+ /*
+ * Lock prevents double-free of worker from here and the teardown
+ * step at the beginning of gpio_mpsse_poll
+ */
+ scoped_guard(mutex, &priv->irq_race)
+ gpio_mpsse_stop_all_except(priv, NULL);
+
priv->intf = NULL;
usb_set_intfdata(intf, NULL);
usb_put_dev(priv->udev);
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index b0cccd856840..7345afdc78de 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -694,7 +694,7 @@ static const struct of_device_id msc313_gpio_of_match[] = {
* SoC goes into suspend to memory mode so we need to save some
* of the register bits before suspending and put it back when resuming
*/
-static int __maybe_unused msc313_gpio_suspend(struct device *dev)
+static int msc313_gpio_suspend(struct device *dev)
{
struct msc313_gpio *gpio = dev_get_drvdata(dev);
int i;
@@ -705,7 +705,7 @@ static int __maybe_unused msc313_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused msc313_gpio_resume(struct device *dev)
+static int msc313_gpio_resume(struct device *dev)
{
struct msc313_gpio *gpio = dev_get_drvdata(dev);
int i;
@@ -716,13 +716,13 @@ static int __maybe_unused msc313_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(msc313_gpio_ops, msc313_gpio_suspend, msc313_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(msc313_gpio_ops, msc313_gpio_suspend, msc313_gpio_resume);
static struct platform_driver msc313_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = msc313_gpio_of_match,
- .pm = &msc313_gpio_ops,
+ .pm = pm_sleep_ptr(&msc313_gpio_ops),
},
.probe = msc313_gpio_probe,
};
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index 93facbebb80e..91230be51587 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -6,11 +6,11 @@
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#define MTK_BANK_CNT 3
#define MTK_BANK_WIDTH 32
@@ -30,8 +30,7 @@
struct mtk_gc {
struct irq_chip irq_chip;
- struct gpio_chip chip;
- spinlock_t lock;
+ struct gpio_generic_chip chip;
int bank;
u32 rising;
u32 falling;
@@ -59,27 +58,29 @@ struct mtk {
static inline struct mtk_gc *
to_mediatek_gpio(struct gpio_chip *chip)
{
- return container_of(chip, struct mtk_gc, chip);
+ struct gpio_generic_chip *gen_gc = to_gpio_generic_chip(chip);
+
+ return container_of(gen_gc, struct mtk_gc, chip);
}
static inline void
mtk_gpio_w32(struct mtk_gc *rg, u32 offset, u32 val)
{
- struct gpio_chip *gc = &rg->chip;
+ struct gpio_chip *gc = &rg->chip.gc;
struct mtk *mtk = gpiochip_get_data(gc);
offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
- gc->write_reg(mtk->base + offset, val);
+ gpio_generic_write_reg(&rg->chip, mtk->base + offset, val);
}
static inline u32
mtk_gpio_r32(struct mtk_gc *rg, u32 offset)
{
- struct gpio_chip *gc = &rg->chip;
+ struct gpio_chip *gc = &rg->chip.gc;
struct mtk *mtk = gpiochip_get_data(gc);
offset = (rg->bank * GPIO_BANK_STRIDE) + offset;
- return gc->read_reg(mtk->base + offset);
+ return gpio_generic_read_reg(&rg->chip, mtk->base + offset);
}
static irqreturn_t
@@ -108,12 +109,12 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct mtk_gc *rg = to_mediatek_gpio(gc);
int pin = d->hwirq;
- unsigned long flags;
u32 rise, fall, high, low;
gpiochip_enable_irq(gc, d->hwirq);
- spin_lock_irqsave(&rg->lock, flags);
+ guard(gpio_generic_lock_irqsave)(&rg->chip);
+
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
@@ -122,7 +123,6 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall | (BIT(pin) & rg->falling));
mtk_gpio_w32(rg, GPIO_REG_HLVL, high | (BIT(pin) & rg->hlevel));
mtk_gpio_w32(rg, GPIO_REG_LLVL, low | (BIT(pin) & rg->llevel));
- spin_unlock_irqrestore(&rg->lock, flags);
}
static void
@@ -131,19 +131,18 @@ mediatek_gpio_irq_mask(struct irq_data *d)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct mtk_gc *rg = to_mediatek_gpio(gc);
int pin = d->hwirq;
- unsigned long flags;
u32 rise, fall, high, low;
- spin_lock_irqsave(&rg->lock, flags);
- rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
- fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
- high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
- low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
- mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
- mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
- spin_unlock_irqrestore(&rg->lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &rg->chip) {
+ rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
+ fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
+ high = mtk_gpio_r32(rg, GPIO_REG_HLVL);
+ low = mtk_gpio_r32(rg, GPIO_REG_LLVL);
+ mtk_gpio_w32(rg, GPIO_REG_FEDGE, fall & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_REDGE, rise & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
+ mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
+ }
gpiochip_disable_irq(gc, d->hwirq);
}
@@ -220,6 +219,7 @@ static const struct irq_chip mt7621_irq_chip = {
static int
mediatek_gpio_bank_probe(struct device *dev, int bank)
{
+ struct gpio_generic_chip_config config;
struct mtk *mtk = dev_get_drvdata(dev);
struct mtk_gc *rg;
void __iomem *dat, *set, *ctrl, *diro;
@@ -228,7 +228,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
rg = &mtk->gc_map[bank];
memset(rg, 0, sizeof(*rg));
- spin_lock_init(&rg->lock);
rg->bank = bank;
dat = mtk->base + GPIO_REG_DATA + (rg->bank * GPIO_BANK_STRIDE);
@@ -236,21 +235,30 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE);
diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE);
- ret = bgpio_init(&rg->chip, dev, 4, dat, set, ctrl, diro, NULL,
- BGPIOF_NO_SET_ON_INPUT);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .clr = ctrl,
+ .dirout = diro,
+ .flags = GPIO_GENERIC_NO_SET_ON_INPUT,
+ };
+
+ ret = gpio_generic_chip_init(&rg->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init() failed\n");
+ dev_err(dev, "failed to initialize generic GPIO chip\n");
return ret;
}
- rg->chip.of_gpio_n_cells = 2;
- rg->chip.of_xlate = mediatek_gpio_xlate;
- rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
+ rg->chip.gc.of_gpio_n_cells = 2;
+ rg->chip.gc.of_xlate = mediatek_gpio_xlate;
+ rg->chip.gc.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
dev_name(dev), bank);
- if (!rg->chip.label)
+ if (!rg->chip.gc.label)
return -ENOMEM;
- rg->chip.offset = bank * MTK_BANK_WIDTH;
+ rg->chip.gc.offset = bank * MTK_BANK_WIDTH;
if (mtk->gpio_irq) {
struct gpio_irq_chip *girq;
@@ -261,7 +269,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
*/
ret = devm_request_irq(dev, mtk->gpio_irq,
mediatek_gpio_irq_handler, IRQF_SHARED,
- rg->chip.label, &rg->chip);
+ rg->chip.gc.label, &rg->chip.gc);
if (ret) {
dev_err(dev, "Error requesting IRQ %d: %d\n",
@@ -269,7 +277,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
return ret;
}
- girq = &rg->chip.irq;
+ girq = &rg->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
@@ -279,17 +287,17 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
girq->handler = handle_simple_irq;
}
- ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
+ ret = devm_gpiochip_add_data(dev, &rg->chip.gc, mtk);
if (ret < 0) {
dev_err(dev, "Could not register gpio %d, ret=%d\n",
- rg->chip.ngpio, ret);
+ rg->chip.gc.ngpio, ret);
return ret;
}
/* set polarity to low for all gpios */
mtk_gpio_w32(rg, GPIO_REG_POL, 0);
- dev_info(dev, "registering %d gpios\n", rg->chip.ngpio);
+ dev_info(dev, "registering %d gpios\n", rg->chip.gc.ngpio);
return 0;
}
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 5e3f54cb8bc4..22c36b79e249 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -573,11 +573,10 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
for (i = 0; i < mvchip->chip.ngpio; i++) {
int irq;
- irq = irq_find_mapping(mvchip->domain, i);
-
if (!(cause & BIT(i)))
continue;
+ irq = irq_find_mapping(mvchip->domain, i);
type = irq_get_trigger_type(irq);
if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
/* Swap polarity (race with GPIO line) */
@@ -602,7 +601,6 @@ static const struct regmap_config mvebu_gpio_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
};
/*
@@ -899,7 +897,7 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
msk = BIT(i);
is_out = !(io_conf & msk);
- seq_printf(s, " gpio-%-3d (%-20.20s)", chip->base + i, label);
+ seq_printf(s, " gpio-%-3d (%-20.20s)", i, label);
if (is_out) {
seq_printf(s, " out %s %s\n",
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 433cbadc3a4c..d7666fe9dbf8 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -481,7 +481,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
config.dat = port->base + GPIO_PSR;
config.set = port->base + GPIO_DR;
config.dirout = port->base + GPIO_GDIR;
- config.flags = BGPIOF_READ_OUTPUT_REG_SET;
+ config.flags = GPIO_GENERIC_READ_OUTPUT_REG_SET;
err = gpio_generic_chip_init(&port->gen_gc, &config);
if (err)
@@ -667,7 +667,7 @@ static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
RUNTIME_PM_OPS(mxc_gpio_runtime_suspend, mxc_gpio_runtime_resume, NULL)
};
-static int mxc_gpio_syscore_suspend(void)
+static int mxc_gpio_syscore_suspend(void *data)
{
struct mxc_gpio_port *port;
int ret;
@@ -684,7 +684,7 @@ static int mxc_gpio_syscore_suspend(void)
return 0;
}
-static void mxc_gpio_syscore_resume(void)
+static void mxc_gpio_syscore_resume(void *data)
{
struct mxc_gpio_port *port;
int ret;
@@ -701,11 +701,15 @@ static void mxc_gpio_syscore_resume(void)
}
}
-static struct syscore_ops mxc_gpio_syscore_ops = {
+static const struct syscore_ops mxc_gpio_syscore_ops = {
.suspend = mxc_gpio_syscore_suspend,
.resume = mxc_gpio_syscore_resume,
};
+static struct syscore mxc_gpio_syscore = {
+ .ops = &mxc_gpio_syscore_ops,
+};
+
static struct platform_driver mxc_gpio_driver = {
.driver = {
.name = "gpio-mxc",
@@ -718,7 +722,7 @@ static struct platform_driver mxc_gpio_driver = {
static int __init gpio_mxc_init(void)
{
- register_syscore_ops(&mxc_gpio_syscore_ops);
+ register_syscore(&mxc_gpio_syscore);
return platform_driver_register(&mxc_gpio_driver);
}
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 0ea46f3d04e1..5635694bf9f4 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -7,17 +7,18 @@
// Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio/driver.h>
-#include <linux/module.h>
#define MXS_SET 0x4
#define MXS_CLR 0x8
@@ -48,7 +49,7 @@ struct mxs_gpio_port {
int id;
int irq;
struct irq_domain *domain;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct device *dev;
enum mxs_gpio_id devid;
u32 both_edges;
@@ -258,6 +259,7 @@ MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
static int mxs_gpio_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
+ struct gpio_generic_chip_config config;
struct device_node *parent;
static void __iomem *base;
struct mxs_gpio_port *port;
@@ -319,19 +321,24 @@ static int mxs_gpio_probe(struct platform_device *pdev)
irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler,
port);
- err = bgpio_init(&port->gc, &pdev->dev, 4,
- port->base + PINCTRL_DIN(port),
- port->base + PINCTRL_DOUT(port) + MXS_SET,
- port->base + PINCTRL_DOUT(port) + MXS_CLR,
- port->base + PINCTRL_DOE(port), NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = port->base + PINCTRL_DIN(port),
+ .set = port->base + PINCTRL_DOUT(port) + MXS_SET,
+ .clr = port->base + PINCTRL_DOUT(port) + MXS_CLR,
+ .dirout = port->base + PINCTRL_DOE(port),
+ };
+
+ err = gpio_generic_chip_init(&port->chip, &config);
if (err)
goto out_irqdomain_remove;
- port->gc.to_irq = mxs_gpio_to_irq;
- port->gc.get_direction = mxs_gpio_get_direction;
- port->gc.base = port->id * 32;
+ port->chip.gc.to_irq = mxs_gpio_to_irq;
+ port->chip.gc.get_direction = mxs_gpio_get_direction;
+ port->chip.gc.base = port->id * 32;
- err = gpiochip_add_data(&port->gc, port);
+ err = gpiochip_add_data(&port->chip.gc, port);
if (err)
goto out_irqdomain_remove;
diff --git a/drivers/gpio/gpio-nct6694.c b/drivers/gpio/gpio-nct6694.c
new file mode 100644
index 000000000000..a8607f0d9915
--- /dev/null
+++ b/drivers/gpio/gpio-nct6694.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 GPIO controller driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/driver.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/*
+ * USB command module type for NCT6694 GPIO controller.
+ * This defines the module type used for communication with the NCT6694
+ * GPIO controller over the USB interface.
+ */
+#define NCT6694_GPIO_MOD 0xFF
+
+#define NCT6694_GPIO_VER 0x90
+#define NCT6694_GPIO_VALID 0x110
+#define NCT6694_GPI_DATA 0x120
+#define NCT6694_GPO_DIR 0x170
+#define NCT6694_GPO_TYPE 0x180
+#define NCT6694_GPO_DATA 0x190
+
+#define NCT6694_GPI_STS 0x130
+#define NCT6694_GPI_CLR 0x140
+#define NCT6694_GPI_FALLING 0x150
+#define NCT6694_GPI_RISING 0x160
+
+#define NCT6694_NR_GPIO 8
+
+struct nct6694_gpio_data {
+ struct nct6694 *nct6694;
+ struct gpio_chip gpio;
+ struct mutex lock;
+ /* Protect irq operation */
+ struct mutex irq_lock;
+
+ unsigned char reg_val;
+ unsigned char irq_trig_falling;
+ unsigned char irq_trig_rising;
+
+ /* Current gpio group */
+ unsigned char group;
+ int irq;
+};
+
+static int nct6694_get_direction(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !(BIT(offset) & data->reg_val);
+}
+
+static int nct6694_direction_input(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_direction_output(struct gpio_chip *gpio,
+ unsigned int offset, int val)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Set direction to output */
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ data->reg_val |= BIT(offset);
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ /* Then set output level */
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPO_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ data->reg_val |= BIT(offset);
+ else
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_get_value(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DIR + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (BIT(offset) & data->reg_val) {
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPO_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !!(BIT(offset) & data->reg_val);
+ }
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_DATA + data->group);
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ return !!(BIT(offset) & data->reg_val);
+}
+
+static int nct6694_set_value(struct gpio_chip *gpio, unsigned int offset,
+ int val)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_DATA + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ data->reg_val |= BIT(offset);
+ else
+ data->reg_val &= ~BIT(offset);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_set_config(struct gpio_chip *gpio, unsigned int offset,
+ unsigned long config)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPO_TYPE + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ data->reg_val |= BIT(offset);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ data->reg_val &= ~BIT(offset);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+}
+
+static int nct6694_init_valid_mask(struct gpio_chip *gpio,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPIO_VALID + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret < 0)
+ return ret;
+
+ *valid_mask = data->reg_val;
+
+ return ret;
+}
+
+static irqreturn_t nct6694_irq_handler(int irq, void *priv)
+{
+ struct nct6694_gpio_data *data = priv;
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_STS + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ unsigned char status;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ if (ret)
+ return IRQ_NONE;
+
+ status = data->reg_val;
+
+ while (status) {
+ int bit = __ffs(status);
+
+ data->reg_val = BIT(bit);
+ handle_nested_irq(irq_find_mapping(data->gpio.irq.domain, bit));
+ status &= ~BIT(bit);
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_CLR + data->group);
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->reg_val);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int nct6694_get_irq_trig(struct nct6694_gpio_data *data)
+{
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_FALLING + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd, &data->irq_trig_falling);
+ if (ret)
+ return ret;
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_RISING + data->group);
+ return nct6694_read_msg(data->nct6694, &cmd_hd, &data->irq_trig_rising);
+}
+
+static void nct6694_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_disable_irq(gpio, hwirq);
+}
+
+static void nct6694_irq_unmask(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ gpiochip_enable_irq(gpio, hwirq);
+}
+
+static int nct6694_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+
+ guard(mutex)(&data->lock);
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ data->irq_trig_rising |= BIT(hwirq);
+ break;
+
+ case IRQ_TYPE_EDGE_FALLING:
+ data->irq_trig_falling |= BIT(hwirq);
+ break;
+
+ case IRQ_TYPE_EDGE_BOTH:
+ data->irq_trig_rising |= BIT(hwirq);
+ data->irq_trig_falling |= BIT(hwirq);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void nct6694_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+
+ mutex_lock(&data->irq_lock);
+}
+
+static void nct6694_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gpio = irq_data_get_irq_chip_data(d);
+ struct nct6694_gpio_data *data = gpiochip_get_data(gpio);
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_GPIO_MOD,
+ .offset = cpu_to_le16(NCT6694_GPI_FALLING + data->group),
+ .len = cpu_to_le16(sizeof(data->reg_val))
+ };
+
+ scoped_guard(mutex, &data->lock) {
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->irq_trig_falling);
+
+ cmd_hd.offset = cpu_to_le16(NCT6694_GPI_RISING + data->group);
+ nct6694_write_msg(data->nct6694, &cmd_hd, &data->irq_trig_rising);
+ }
+
+ mutex_unlock(&data->irq_lock);
+}
+
+static const struct irq_chip nct6694_irq_chip = {
+ .name = "gpio-nct6694",
+ .irq_mask = nct6694_irq_mask,
+ .irq_unmask = nct6694_irq_unmask,
+ .irq_set_type = nct6694_irq_set_type,
+ .irq_bus_lock = nct6694_irq_bus_lock,
+ .irq_bus_sync_unlock = nct6694_irq_bus_sync_unlock,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void nct6694_irq_dispose_mapping(void *d)
+{
+ struct nct6694_gpio_data *data = d;
+
+ irq_dispose_mapping(data->irq);
+}
+
+static void nct6694_gpio_ida_free(void *d)
+{
+ struct nct6694_gpio_data *data = d;
+ struct nct6694 *nct6694 = data->nct6694;
+
+ ida_free(&nct6694->gpio_ida, data->group);
+}
+
+static int nct6694_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nct6694 *nct6694 = dev_get_drvdata(dev->parent);
+ struct nct6694_gpio_data *data;
+ struct gpio_irq_chip *girq;
+ int ret, i;
+ char **names;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->nct6694 = nct6694;
+
+ ret = ida_alloc(&nct6694->gpio_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ data->group = ret;
+
+ ret = devm_add_action_or_reset(dev, nct6694_gpio_ida_free, data);
+ if (ret)
+ return ret;
+
+ names = devm_kcalloc(dev, NCT6694_NR_GPIO, sizeof(char *),
+ GFP_KERNEL);
+ if (!names)
+ return -ENOMEM;
+
+ for (i = 0; i < NCT6694_NR_GPIO; i++) {
+ names[i] = devm_kasprintf(dev, GFP_KERNEL, "GPIO%X%d",
+ data->group, i);
+ if (!names[i])
+ return -ENOMEM;
+ }
+
+ data->irq = irq_create_mapping(nct6694->domain,
+ NCT6694_IRQ_GPIO0 + data->group);
+ if (!data->irq)
+ return -EINVAL;
+
+ ret = devm_add_action_or_reset(dev, nct6694_irq_dispose_mapping, data);
+ if (ret)
+ return ret;
+
+ data->gpio.names = (const char * const*)names;
+ data->gpio.label = pdev->name;
+ data->gpio.direction_input = nct6694_direction_input;
+ data->gpio.get = nct6694_get_value;
+ data->gpio.direction_output = nct6694_direction_output;
+ data->gpio.set = nct6694_set_value;
+ data->gpio.get_direction = nct6694_get_direction;
+ data->gpio.set_config = nct6694_set_config;
+ data->gpio.init_valid_mask = nct6694_init_valid_mask;
+ data->gpio.base = -1;
+ data->gpio.can_sleep = false;
+ data->gpio.owner = THIS_MODULE;
+ data->gpio.ngpio = NCT6694_NR_GPIO;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(dev, &data->irq_lock);
+ if (ret)
+ return ret;
+
+ ret = nct6694_get_irq_trig(data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to get irq trigger type\n");
+ return ret;
+ }
+
+ girq = &data->gpio.irq;
+ gpio_irq_chip_set_chip(girq, &nct6694_irq_chip);
+ girq->parent_handler = NULL;
+ girq->num_parents = 0;
+ girq->parents = NULL;
+ girq->default_type = IRQ_TYPE_NONE;
+ girq->handler = handle_level_irq;
+ girq->threaded = true;
+
+ ret = devm_request_threaded_irq(dev, data->irq, NULL, nct6694_irq_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "gpio-nct6694", data);
+ if (ret) {
+ dev_err_probe(dev, ret, "Failed to request irq\n");
+ return ret;
+ }
+
+ return devm_gpiochip_add_data(dev, &data->gpio, data);
+}
+
+static struct platform_driver nct6694_gpio_driver = {
+ .driver = {
+ .name = "nct6694-gpio",
+ },
+ .probe = nct6694_gpio_probe,
+};
+
+module_platform_driver(nct6694_gpio_driver);
+
+MODULE_DESCRIPTION("USB-GPIO controller driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-gpio");
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index bcf4b07dd458..97c5cd33279d 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -20,6 +20,7 @@
*/
#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -396,10 +397,12 @@ static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
}
void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
- struct gpio_chip *chip, unsigned int offset,
- unsigned int gpio)
+ struct gpio_chip *chip, unsigned int offset)
{
struct nmk_gpio_chip *nmk_chip = gpiochip_get_data(chip);
+#ifdef CONFIG_PINCTRL_NOMADIK
+ struct gpio_desc *desc;
+#endif
int mode;
bool is_out;
bool data_out;
@@ -425,15 +428,15 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
data_out = !!(readl(nmk_chip->addr + NMK_GPIO_DAT) & BIT(offset));
mode = nmk_gpio_get_mode(nmk_chip, offset);
#ifdef CONFIG_PINCTRL_NOMADIK
- if (mode == NMK_GPIO_ALT_C && pctldev)
- mode = nmk_prcm_gpiocr_get_mode(pctldev, gpio);
+ if (mode == NMK_GPIO_ALT_C && pctldev) {
+ desc = gpio_device_get_desc(chip->gpiodev, offset);
+ mode = nmk_prcm_gpiocr_get_mode(pctldev, desc_to_gpio(desc));
+ }
#endif
if (is_out) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s %s",
- gpio,
- label ?: "(none)",
- str_hi_lo(data_out),
+ offset, label ?: "(none)", str_hi_lo(data_out),
(mode < 0) ? "unknown" : modes[mode]);
} else {
int irq = chip->to_irq(chip, offset);
@@ -445,9 +448,7 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
};
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s",
- gpio,
- label ?: "(none)",
- pulls[pullidx],
+ offset, label ?: "(none)", pulls[pullidx],
(mode < 0) ? "unknown" : modes[mode]);
val = nmk_gpio_get_input(chip, offset);
@@ -479,10 +480,10 @@ void nmk_gpio_dbg_show_one(struct seq_file *s, struct pinctrl_dev *pctldev,
static void nmk_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- unsigned int i, gpio = chip->base;
+ unsigned int i;
- for (i = 0; i < chip->ngpio; i++, gpio++) {
- nmk_gpio_dbg_show_one(s, NULL, chip, i, gpio);
+ for (i = 0; i < chip->ngpio; i++) {
+ nmk_gpio_dbg_show_one(s, NULL, chip, i);
seq_puts(s, "\n");
}
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index a268c76bdca6..e136e81794df 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1503,7 +1503,7 @@ static void omap_gpio_remove(struct platform_device *pdev)
clk_unprepare(bank->dbck);
}
-static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
+static int omap_gpio_runtime_suspend(struct device *dev)
{
struct gpio_bank *bank = dev_get_drvdata(dev);
unsigned long flags;
@@ -1516,7 +1516,7 @@ static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
+static int omap_gpio_runtime_resume(struct device *dev)
{
struct gpio_bank *bank = dev_get_drvdata(dev);
unsigned long flags;
@@ -1529,7 +1529,7 @@ static int __maybe_unused omap_gpio_runtime_resume(struct device *dev)
return 0;
}
-static int __maybe_unused omap_gpio_suspend(struct device *dev)
+static int omap_gpio_suspend(struct device *dev)
{
struct gpio_bank *bank = dev_get_drvdata(dev);
@@ -1541,7 +1541,7 @@ static int __maybe_unused omap_gpio_suspend(struct device *dev)
return omap_gpio_runtime_suspend(dev);
}
-static int __maybe_unused omap_gpio_resume(struct device *dev)
+static int omap_gpio_resume(struct device *dev)
{
struct gpio_bank *bank = dev_get_drvdata(dev);
@@ -1554,9 +1554,8 @@ static int __maybe_unused omap_gpio_resume(struct device *dev)
}
static const struct dev_pm_ops gpio_pm_ops = {
- SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
- NULL)
- SET_LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
+ RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, NULL)
+ LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
};
static struct platform_driver omap_gpio_driver = {
@@ -1564,7 +1563,7 @@ static struct platform_driver omap_gpio_driver = {
.remove = omap_gpio_remove,
.driver = {
.name = "omap_gpio",
- .pm = &gpio_pm_ops,
+ .pm = pm_ptr(&gpio_pm_ops),
.of_match_table = omap_gpio_match,
},
};
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index b46927f55038..0a3916cc2772 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -306,7 +306,7 @@ static inline u8 pca953x_get_bit_mask(struct pca953x_chip *chip, unsigned int of
* Interrupt mask register 0x40 + 5 * bank_size RW
* Interrupt status register 0x40 + 6 * bank_size R
*
- * - Registers with bit 0x80 set, the AI bit
+ * - Registers with bit 0x80 set, the AI bit (auto increment)
* The bit is cleared and the registers fall into one of the
* categories above.
*/
@@ -854,10 +854,13 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
int level;
if (chip->driver_data & PCA_PCAL) {
+ DECLARE_BITMAP(latched_inputs, MAX_LINE);
guard(mutex)(&chip->i2c_lock);
- /* Enable latch on interrupt-enabled inputs */
- pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
+ /* Enable latch on edge-triggered interrupt-enabled inputs */
+ bitmap_or(latched_inputs, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_and(latched_inputs, latched_inputs, chip->irq_mask, gc->ngpio);
+ pca953x_write_regs(chip, PCAL953X_IN_LATCH, latched_inputs);
bitmap_complement(irq_mask, chip->irq_mask, gc->ngpio);
@@ -1203,10 +1206,10 @@ static int pca953x_probe(struct i2c_client *client)
pca953x_setup_gpio(chip, chip->driver_data & PCA_GPIO_MASK);
if (NBANK(chip) > 2 || PCA_CHIP_TYPE(chip->driver_data) == PCA957X_TYPE) {
- dev_info(dev, "using AI\n");
+ dev_info(dev, "using auto increment\n");
regmap_config = &pca953x_ai_i2c_regmap;
} else {
- dev_info(dev, "using no AI\n");
+ dev_info(dev, "using no auto increment\n");
regmap_config = &pca953x_i2c_regmap;
}
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index 9925687e05fb..4ffa0955a9e3 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -171,7 +171,7 @@ static int pch_gpio_direction_input(struct gpio_chip *gpio, unsigned int nr)
/*
* Save register configuration and disable interrupts.
*/
-static void __maybe_unused pch_gpio_save_reg_conf(struct pch_gpio *chip)
+static void pch_gpio_save_reg_conf(struct pch_gpio *chip)
{
chip->pch_gpio_reg.ien_reg = ioread32(&chip->reg->ien);
chip->pch_gpio_reg.imask_reg = ioread32(&chip->reg->imask);
@@ -187,7 +187,7 @@ static void __maybe_unused pch_gpio_save_reg_conf(struct pch_gpio *chip)
/*
* This function restores the register configuration of the GPIO device.
*/
-static void __maybe_unused pch_gpio_restore_reg_conf(struct pch_gpio *chip)
+static void pch_gpio_restore_reg_conf(struct pch_gpio *chip)
{
iowrite32(chip->pch_gpio_reg.ien_reg, &chip->reg->ien);
iowrite32(chip->pch_gpio_reg.imask_reg, &chip->reg->imask);
@@ -402,7 +402,7 @@ static int pch_gpio_probe(struct pci_dev *pdev,
return pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]);
}
-static int __maybe_unused pch_gpio_suspend(struct device *dev)
+static int pch_gpio_suspend(struct device *dev)
{
struct pch_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
@@ -414,7 +414,7 @@ static int __maybe_unused pch_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused pch_gpio_resume(struct device *dev)
+static int pch_gpio_resume(struct device *dev)
{
struct pch_gpio *chip = dev_get_drvdata(dev);
unsigned long flags;
@@ -428,7 +428,7 @@ static int __maybe_unused pch_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(pch_gpio_pm_ops, pch_gpio_suspend, pch_gpio_resume);
static const struct pci_device_id pch_gpio_pcidev_id[] = {
{ PCI_DEVICE_DATA(INTEL, EG20T_PCH, INTEL_EG20T_PCH) },
@@ -444,7 +444,7 @@ static struct pci_driver pch_gpio_driver = {
.id_table = pch_gpio_pcidev_id,
.probe = pch_gpio_probe,
.driver = {
- .pm = &pch_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&pch_gpio_pm_ops),
},
};
diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c
index 476cea1b5ed7..9d28ca8e1d6f 100644
--- a/drivers/gpio/gpio-pci-idio-16.c
+++ b/drivers/gpio/gpio-pci-idio-16.c
@@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = {
.reg_stride = 1,
.val_bits = 8,
.io_port = true,
+ .max_register = 0x7,
.wr_table = &idio_16_wr_table,
.rd_table = &idio_16_rd_table,
.volatile_table = &idio_16_rd_table,
diff --git a/drivers/gpio/gpio-pisosr.c b/drivers/gpio/gpio-pisosr.c
index a69b74866a13..7ec6a46ed600 100644
--- a/drivers/gpio/gpio-pisosr.c
+++ b/drivers/gpio/gpio-pisosr.c
@@ -108,11 +108,6 @@ static const struct gpio_chip template_chip = {
.can_sleep = true,
};
-static void pisosr_mutex_destroy(void *lock)
-{
- mutex_destroy(lock);
-}
-
static int pisosr_gpio_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -139,8 +134,7 @@ static int pisosr_gpio_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(gpio->load_gpio),
"Unable to allocate load GPIO\n");
- mutex_init(&gpio->lock);
- ret = devm_add_action_or_reset(dev, pisosr_mutex_destroy, &gpio->lock);
+ ret = devm_mutex_init(dev, &gpio->lock);
if (ret)
return ret;
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 02e4ffcf5a6f..919cf86fd590 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -37,7 +37,6 @@
#define PL061_GPIO_NR 8
-#ifdef CONFIG_PM
struct pl061_context_save_regs {
u8 gpio_data;
u8 gpio_dir;
@@ -46,7 +45,6 @@ struct pl061_context_save_regs {
u8 gpio_iev;
u8 gpio_ie;
};
-#endif
struct pl061 {
raw_spinlock_t lock;
@@ -55,9 +53,7 @@ struct pl061 {
struct gpio_chip gc;
int parent_irq;
-#ifdef CONFIG_PM
struct pl061_context_save_regs csave_regs;
-#endif
};
static int pl061_get_direction(struct gpio_chip *gc, unsigned offset)
@@ -367,7 +363,6 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-#ifdef CONFIG_PM
static int pl061_suspend(struct device *dev)
{
struct pl061 *pl061 = dev_get_drvdata(dev);
@@ -411,13 +406,7 @@ static int pl061_resume(struct device *dev)
return 0;
}
-static const struct dev_pm_ops pl061_dev_pm_ops = {
- .suspend = pl061_suspend,
- .resume = pl061_resume,
- .freeze = pl061_suspend,
- .restore = pl061_resume,
-};
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(pl061_dev_pm_ops, pl061_suspend, pl061_resume);
static const struct amba_id pl061_ids[] = {
{
@@ -431,9 +420,7 @@ MODULE_DEVICE_TABLE(amba, pl061_ids);
static struct amba_driver pl061_gpio_driver = {
.drv = {
.name = "pl061_gpio",
-#ifdef CONFIG_PM
- .pm = &pl061_dev_pm_ops,
-#endif
+ .pm = pm_sleep_ptr(&pl061_dev_pm_ops),
},
.id_table = pl061_ids,
.probe = pl061_probe,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index fa22f3faa163..664cf1eef494 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -747,7 +747,7 @@ static int __init pxa_gpio_dt_init(void)
device_initcall(pxa_gpio_dt_init);
#ifdef CONFIG_PM
-static int pxa_gpio_suspend(void)
+static int pxa_gpio_suspend(void *data)
{
struct pxa_gpio_chip *pchip = pxa_gpio_chip;
struct pxa_gpio_bank *c;
@@ -768,7 +768,7 @@ static int pxa_gpio_suspend(void)
return 0;
}
-static void pxa_gpio_resume(void)
+static void pxa_gpio_resume(void *data)
{
struct pxa_gpio_chip *pchip = pxa_gpio_chip;
struct pxa_gpio_bank *c;
@@ -792,14 +792,18 @@ static void pxa_gpio_resume(void)
#define pxa_gpio_resume NULL
#endif
-static struct syscore_ops pxa_gpio_syscore_ops = {
+static const struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
+static struct syscore pxa_gpio_syscore = {
+ .ops = &pxa_gpio_syscore_ops,
+};
+
static int __init pxa_gpio_sysinit(void)
{
- register_syscore_ops(&pxa_gpio_syscore_ops);
+ register_syscore(&pxa_gpio_syscore);
return 0;
}
postcore_initcall(pxa_gpio_sysinit);
diff --git a/drivers/gpio/gpio-qixis-fpga.c b/drivers/gpio/gpio-qixis-fpga.c
new file mode 100644
index 000000000000..6e67f43ac0bd
--- /dev/null
+++ b/drivers/gpio/gpio-qixis-fpga.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Layerscape GPIO QIXIS FPGA driver
+ *
+ * Copyright 2025 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/regmap.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct qixis_cpld_gpio_config {
+ u64 output_lines;
+};
+
+static const struct qixis_cpld_gpio_config lx2160ardb_sfp_cfg = {
+ .output_lines = BIT(0),
+};
+
+static const struct qixis_cpld_gpio_config ls1046aqds_stat_pres2_cfg = {
+ .output_lines = 0x0,
+};
+
+static const struct regmap_config regmap_config_8r_8v = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int qixis_cpld_gpio_probe(struct platform_device *pdev)
+{
+ DECLARE_BITMAP(fixed_direction_output, 8);
+ const struct qixis_cpld_gpio_config *cfg;
+ struct gpio_regmap_config config = {0};
+ struct regmap *regmap;
+ void __iomem *reg;
+ u32 base;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ cfg = device_get_match_data(&pdev->dev);
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &base);
+ if (ret)
+ return ret;
+
+ regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!regmap) {
+ /* In case there is no regmap configured by the parent device,
+ * create our own from the MMIO space.
+ */
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, reg, &regmap_config_8r_8v);
+ if (!regmap)
+ return -ENODEV;
+
+ /* In this case, the offset of our register is 0 inside the
+ * regmap area that we just created.
+ */
+ base = 0;
+ }
+ config.reg_dat_base = GPIO_REGMAP_ADDR(base);
+ config.reg_set_base = GPIO_REGMAP_ADDR(base);
+
+ config.drvdata = (void *)cfg;
+ config.regmap = regmap;
+ config.parent = &pdev->dev;
+ config.ngpio_per_reg = 8;
+ config.ngpio = 8;
+
+ bitmap_from_u64(fixed_direction_output, cfg->output_lines);
+ config.fixed_direction_output = fixed_direction_output;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(&pdev->dev, &config));
+}
+
+static const struct of_device_id qixis_cpld_gpio_of_match[] = {
+ {
+ .compatible = "fsl,lx2160ardb-fpga-gpio-sfp",
+ .data = &lx2160ardb_sfp_cfg,
+ },
+ {
+ .compatible = "fsl,ls1046aqds-fpga-gpio-stat-pres2",
+ .data = &ls1046aqds_stat_pres2_cfg,
+ },
+
+ {}
+};
+MODULE_DEVICE_TABLE(of, qixis_cpld_gpio_of_match);
+
+static struct platform_driver qixis_cpld_gpio_driver = {
+ .probe = qixis_cpld_gpio_probe,
+ .driver = {
+ .name = "gpio-qixis-cpld",
+ .of_match_table = qixis_cpld_gpio_of_match,
+ },
+};
+module_platform_driver(qixis_cpld_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ioana Ciornei <ioana.ciornei@nxp.com>");
+MODULE_DESCRIPTION("Layerscape GPIO QIXIS FPGA driver");
diff --git a/drivers/gpio/gpio-rda.c b/drivers/gpio/gpio-rda.c
index cb2f63eee2aa..7bbc6f0ce4c8 100644
--- a/drivers/gpio/gpio-rda.c
+++ b/drivers/gpio/gpio-rda.c
@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -35,7 +36,7 @@
#define RDA_GPIO_BANK_NR 32
struct rda_gpio {
- struct gpio_chip chip;
+ struct gpio_generic_chip chip;
void __iomem *base;
spinlock_t lock;
int irq;
@@ -208,6 +209,7 @@ static const struct irq_chip rda_gpio_irq_chip = {
static int rda_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct rda_gpio *rda_gpio;
@@ -235,24 +237,29 @@ static int rda_gpio_probe(struct platform_device *pdev)
spin_lock_init(&rda_gpio->lock);
- ret = bgpio_init(&rda_gpio->chip, dev, 4,
- rda_gpio->base + RDA_GPIO_VAL,
- rda_gpio->base + RDA_GPIO_SET,
- rda_gpio->base + RDA_GPIO_CLR,
- rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
- rda_gpio->base + RDA_GPIO_OEN_SET_IN,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = rda_gpio->base + RDA_GPIO_VAL,
+ .set = rda_gpio->base + RDA_GPIO_SET,
+ .clr = rda_gpio->base + RDA_GPIO_CLR,
+ .dirout = rda_gpio->base + RDA_GPIO_OEN_SET_OUT,
+ .dirin = rda_gpio->base + RDA_GPIO_OEN_SET_IN,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&rda_gpio->chip, &config);
if (ret) {
- dev_err(dev, "bgpio_init failed\n");
+ dev_err(dev, "failed to initialize the generic GPIO chip\n");
return ret;
}
- rda_gpio->chip.label = dev_name(dev);
- rda_gpio->chip.ngpio = ngpios;
- rda_gpio->chip.base = -1;
+ rda_gpio->chip.gc.label = dev_name(dev);
+ rda_gpio->chip.gc.ngpio = ngpios;
+ rda_gpio->chip.gc.base = -1;
if (rda_gpio->irq >= 0) {
- girq = &rda_gpio->chip.irq;
+ girq = &rda_gpio->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &rda_gpio_irq_chip);
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
@@ -269,7 +276,7 @@ static int rda_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rda_gpio);
- return devm_gpiochip_add_data(dev, &rda_gpio->chip, rda_gpio);
+ return devm_gpiochip_add_data(dev, &rda_gpio->chip.gc, rda_gpio);
}
static const struct of_device_id rda_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index d6418f89d3f6..de527f4fc6c2 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/gpio/driver.h>
#include <linux/cpumask.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/irq.h>
#include <linux/minmax.h>
#include <linux/mod_devicetable.h>
@@ -41,7 +42,7 @@
/**
* realtek_gpio_ctrl - Realtek Otto GPIO driver data
*
- * @gc: Associated gpio_chip instance
+ * @chip: Associated gpio_generic_chip instance
* @base: Base address of the register block for a GPIO bank
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
@@ -64,7 +65,7 @@
* IMR on changes.
*/
struct realtek_gpio_ctrl {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
@@ -101,7 +102,7 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
- return container_of(gc, struct realtek_gpio_ctrl, gc);
+ return container_of(to_gpio_generic_chip(gc), struct realtek_gpio_ctrl, chip);
}
/*
@@ -194,7 +195,7 @@ static void realtek_gpio_irq_unmask(struct irq_data *data)
unsigned int line = irqd_to_hwirq(data);
unsigned long flags;
- gpiochip_enable_irq(&ctrl->gc, line);
+ gpiochip_enable_irq(&ctrl->chip.gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
@@ -213,7 +214,7 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
- gpiochip_disable_irq(&ctrl->gc, line);
+ gpiochip_disable_irq(&ctrl->chip.gc, line);
}
static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type)
@@ -356,8 +357,9 @@ MODULE_DEVICE_TABLE(of, realtek_gpio_of_match);
static int realtek_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
- unsigned long bgpio_flags;
+ unsigned long gen_gc_flags;
unsigned int dev_flags;
struct gpio_irq_chip *girq;
struct realtek_gpio_ctrl *ctrl;
@@ -388,32 +390,37 @@ static int realtek_gpio_probe(struct platform_device *pdev)
raw_spin_lock_init(&ctrl->lock);
if (dev_flags & GPIO_PORTS_REVERSED) {
- bgpio_flags = 0;
+ gen_gc_flags = 0;
ctrl->bank_read = realtek_gpio_bank_read;
ctrl->bank_write = realtek_gpio_bank_write;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
- bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
+ gen_gc_flags = GPIO_GENERIC_BIG_ENDIAN_BYTE_ORDER;
ctrl->bank_read = realtek_gpio_bank_read_swapped;
ctrl->bank_write = realtek_gpio_bank_write_swapped;
ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
- err = bgpio_init(&ctrl->gc, dev, 4,
- ctrl->base + REALTEK_GPIO_REG_DATA, NULL, NULL,
- ctrl->base + REALTEK_GPIO_REG_DIR, NULL,
- bgpio_flags);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = ctrl->base + REALTEK_GPIO_REG_DATA,
+ .dirout = ctrl->base + REALTEK_GPIO_REG_DIR,
+ .flags = gen_gc_flags,
+ };
+
+ err = gpio_generic_chip_init(&ctrl->chip, &config);
if (err) {
dev_err(dev, "unable to init generic GPIO");
return err;
}
- ctrl->gc.ngpio = ngpios;
- ctrl->gc.owner = THIS_MODULE;
+ ctrl->chip.gc.ngpio = ngpios;
+ ctrl->chip.gc.owner = THIS_MODULE;
irq = platform_get_irq_optional(pdev, 0);
if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) {
- girq = &ctrl->gc.irq;
+ girq = &ctrl->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &realtek_gpio_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
@@ -442,7 +449,7 @@ static int realtek_gpio_probe(struct platform_device *pdev)
cpumask_set_cpu(cpu, &ctrl->cpu_irq_maskable);
}
- return devm_gpiochip_add_data(dev, &ctrl->gc, ctrl);
+ return devm_gpiochip_add_data(dev, &ctrl->chip.gc, ctrl);
}
static struct platform_driver realtek_gpio_driver = {
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index e8a32dfebdcb..e5ba38e65c10 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -31,6 +31,12 @@ struct gpio_regmap {
unsigned int reg_clr_base;
unsigned int reg_dir_in_base;
unsigned int reg_dir_out_base;
+ unsigned long *fixed_direction_output;
+
+#ifdef CONFIG_REGMAP_IRQ
+ int regmap_irq_line;
+ struct regmap_irq_chip_data *irq_chip_data;
+#endif
int (*reg_mask_xlate)(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
@@ -76,7 +82,11 @@ static int gpio_regmap_get(struct gpio_chip *chip, unsigned int offset)
if (ret)
return ret;
- ret = regmap_read(gpio->regmap, reg, &val);
+ /* ensure we don't spoil any register cache with pin input values */
+ if (gpio->reg_dat_base == gpio->reg_set_base)
+ ret = regmap_read_bypassed(gpio->regmap, reg, &val);
+ else
+ ret = regmap_read(gpio->regmap, reg, &val);
if (ret)
return ret;
@@ -88,7 +98,7 @@ static int gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
{
struct gpio_regmap *gpio = gpiochip_get_data(chip);
unsigned int base = gpio_regmap_addr(gpio->reg_set_base);
- unsigned int reg, mask;
+ unsigned int reg, mask, mask_val;
int ret;
ret = gpio->reg_mask_xlate(gpio, base, offset, &reg, &mask);
@@ -96,9 +106,15 @@ static int gpio_regmap_set(struct gpio_chip *chip, unsigned int offset,
return ret;
if (val)
- ret = regmap_update_bits(gpio->regmap, reg, mask, mask);
+ mask_val = mask;
else
- ret = regmap_update_bits(gpio->regmap, reg, mask, 0);
+ mask_val = 0;
+
+ /* ignore input values which shadow the old output value */
+ if (gpio->reg_dat_base == gpio->reg_set_base)
+ ret = regmap_write_bits(gpio->regmap, reg, mask, mask_val);
+ else
+ ret = regmap_update_bits(gpio->regmap, reg, mask, mask_val);
return ret;
}
@@ -129,6 +145,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
unsigned int base, val, reg, mask;
int invert, ret;
+ if (gpio->fixed_direction_output) {
+ if (test_bit(offset, gpio->fixed_direction_output))
+ return GPIO_LINE_DIRECTION_OUT;
+ else
+ return GPIO_LINE_DIRECTION_IN;
+ }
+
if (gpio->reg_dat_base && !gpio->reg_set_base)
return GPIO_LINE_DIRECTION_IN;
if (gpio->reg_set_base && !gpio->reg_dat_base)
@@ -215,6 +238,7 @@ EXPORT_SYMBOL_GPL(gpio_regmap_get_drvdata);
*/
struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config)
{
+ struct irq_domain *irq_domain;
struct gpio_regmap *gpio;
struct gpio_chip *chip;
int ret;
@@ -255,6 +279,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
chip->can_sleep = regmap_might_sleep(config->regmap);
+ chip->init_valid_mask = config->init_valid_mask;
chip->request = gpiochip_generic_request;
chip->free = gpiochip_generic_free;
@@ -274,7 +299,18 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
if (!chip->ngpio) {
ret = gpiochip_get_ngpios(chip, chip->parent);
if (ret)
- return ERR_PTR(ret);
+ goto err_free_gpio;
+ }
+
+ if (config->fixed_direction_output) {
+ gpio->fixed_direction_output = bitmap_alloc(chip->ngpio,
+ GFP_KERNEL);
+ if (!gpio->fixed_direction_output) {
+ ret = -ENOMEM;
+ goto err_free_gpio;
+ }
+ bitmap_copy(gpio->fixed_direction_output,
+ config->fixed_direction_output, chip->ngpio);
}
/* if not set, assume there is only one register */
@@ -293,10 +329,24 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
ret = gpiochip_add_data(chip, gpio);
if (ret < 0)
- goto err_free_gpio;
+ goto err_free_bitmap;
+
+#ifdef CONFIG_REGMAP_IRQ
+ if (config->regmap_irq_chip) {
+ gpio->regmap_irq_line = config->regmap_irq_line;
+ ret = regmap_add_irq_chip_fwnode(dev_fwnode(config->parent), config->regmap,
+ config->regmap_irq_line, config->regmap_irq_flags,
+ 0, config->regmap_irq_chip, &gpio->irq_chip_data);
+ if (ret)
+ goto err_free_bitmap;
+
+ irq_domain = regmap_irq_get_domain(gpio->irq_chip_data);
+ } else
+#endif
+ irq_domain = config->irq_domain;
- if (config->irq_domain) {
- ret = gpiochip_irqchip_add_domain(chip, config->irq_domain);
+ if (irq_domain) {
+ ret = gpiochip_irqchip_add_domain(chip, irq_domain);
if (ret)
goto err_remove_gpiochip;
}
@@ -305,6 +355,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
err_remove_gpiochip:
gpiochip_remove(chip);
+err_free_bitmap:
+ bitmap_free(gpio->fixed_direction_output);
err_free_gpio:
kfree(gpio);
return ERR_PTR(ret);
@@ -317,7 +369,13 @@ EXPORT_SYMBOL_GPL(gpio_regmap_register);
*/
void gpio_regmap_unregister(struct gpio_regmap *gpio)
{
+#ifdef CONFIG_REGMAP_IRQ
+ if (gpio->irq_chip_data)
+ regmap_del_irq_chip(gpio->regmap_irq_line, gpio->irq_chip_data);
+#endif
+
gpiochip_remove(&gpio->gpio_chip);
+ bitmap_free(gpio->fixed_direction_output);
kfree(gpio);
}
EXPORT_SYMBOL_GPL(gpio_regmap_unregister);
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index bcfc323a8315..47174eb3ba76 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -769,7 +769,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
list_del(&cfg->head);
switch (cfg->param) {
- case PIN_CONFIG_OUTPUT:
+ case PIN_CONFIG_LEVEL:
ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
if (ret)
dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin,
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 7f6a62f5d1ee..1938ffa2f4f3 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -256,7 +256,7 @@ static void sa1100_gpio_handler(struct irq_desc *desc)
} while (mask);
}
-static int sa1100_gpio_suspend(void)
+static int sa1100_gpio_suspend(void *data)
{
struct sa1100_gpio_chip *sgc = &sa1100_gpio_chip;
@@ -275,19 +275,23 @@ static int sa1100_gpio_suspend(void)
return 0;
}
-static void sa1100_gpio_resume(void)
+static void sa1100_gpio_resume(void *data)
{
sa1100_update_edge_regs(&sa1100_gpio_chip);
}
-static struct syscore_ops sa1100_gpio_syscore_ops = {
+static const struct syscore_ops sa1100_gpio_syscore_ops = {
.suspend = sa1100_gpio_suspend,
.resume = sa1100_gpio_resume,
};
+static struct syscore sa1100_gpio_syscore = {
+ .ops = &sa1100_gpio_syscore_ops,
+};
+
static int __init sa1100_gpio_init_devicefs(void)
{
- register_syscore_ops(&sa1100_gpio_syscore_ops);
+ register_syscore(&sa1100_gpio_syscore);
return 0;
}
diff --git a/drivers/gpio/gpio-shared-proxy.c b/drivers/gpio/gpio-shared-proxy.c
new file mode 100644
index 000000000000..29d7d2e4dfc0
--- /dev/null
+++ b/drivers/gpio/gpio-shared-proxy.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Linaro Ltd.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/string_choices.h>
+#include <linux/types.h>
+
+#include "gpiolib-shared.h"
+
+struct gpio_shared_proxy_data {
+ struct gpio_chip gc;
+ struct gpio_shared_desc *shared_desc;
+ struct device *dev;
+ bool voted_high;
+};
+
+static int
+gpio_shared_proxy_set_unlocked(struct gpio_shared_proxy_data *proxy,
+ int (*set_func)(struct gpio_desc *desc, int value),
+ int value)
+{
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+ struct gpio_desc *desc = shared_desc->desc;
+ int ret = 0;
+
+ gpio_shared_lockdep_assert(shared_desc);
+
+ if (value) {
+ /* User wants to set value to high. */
+ if (proxy->voted_high)
+ /* Already voted for high, nothing to do. */
+ goto out;
+
+ /* Haven't voted for high yet. */
+ if (!shared_desc->highcnt) {
+ /*
+ * Current value is low, need to actually set value
+ * to high.
+ */
+ ret = set_func(desc, 1);
+ if (ret)
+ goto out;
+ }
+
+ shared_desc->highcnt++;
+ proxy->voted_high = true;
+
+ goto out;
+ }
+
+ /* Desired value is low. */
+ if (!proxy->voted_high)
+ /* We didn't vote for high, nothing to do. */
+ goto out;
+
+ /* We previously voted for high. */
+ if (shared_desc->highcnt == 1) {
+ /* This is the last remaining vote for high, set value to low. */
+ ret = set_func(desc, 0);
+ if (ret)
+ goto out;
+ }
+
+ shared_desc->highcnt--;
+ proxy->voted_high = false;
+
+out:
+ if (shared_desc->highcnt)
+ dev_dbg(proxy->dev,
+ "Voted for value '%s', effective value is 'high', number of votes for 'high': %u\n",
+ str_high_low(value), shared_desc->highcnt);
+ else
+ dev_dbg(proxy->dev, "Voted for value 'low', effective value is 'low'\n");
+
+ return ret;
+}
+
+static int gpio_shared_proxy_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ proxy->shared_desc->usecnt++;
+
+ dev_dbg(proxy->dev, "Shared GPIO requested, number of users: %u\n",
+ proxy->shared_desc->usecnt);
+
+ return 0;
+}
+
+static void gpio_shared_proxy_free(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ proxy->shared_desc->usecnt--;
+
+ dev_dbg(proxy->dev, "Shared GPIO freed, number of users: %u\n",
+ proxy->shared_desc->usecnt);
+}
+
+static int gpio_shared_proxy_set_config(struct gpio_chip *gc,
+ unsigned int offset, unsigned long cfg)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+ struct gpio_desc *desc = shared_desc->desc;
+ int ret;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ if (shared_desc->usecnt > 1) {
+ if (shared_desc->cfg != cfg) {
+ dev_dbg(proxy->dev,
+ "Shared GPIO's configuration already set, accepting changes but users may conflict!!\n");
+ } else {
+ dev_dbg(proxy->dev, "Equal config requested, nothing to do\n");
+ return 0;
+ }
+ }
+
+ ret = gpiod_set_config(desc, cfg);
+ if (ret && ret != -ENOTSUPP)
+ return ret;
+
+ shared_desc->cfg = cfg;
+ return 0;
+}
+
+static int gpio_shared_proxy_direction_input(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+ struct gpio_desc *desc = shared_desc->desc;
+ int dir;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ if (shared_desc->usecnt == 1) {
+ dev_dbg(proxy->dev,
+ "Only one user of this shared GPIO, allowing to set direction to input\n");
+
+ return gpiod_direction_input(desc);
+ }
+
+ dir = gpiod_get_direction(desc);
+ if (dir < 0)
+ return dir;
+
+ if (dir == GPIO_LINE_DIRECTION_OUT) {
+ dev_dbg(proxy->dev,
+ "Shared GPIO's direction already set to output, refusing to change\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int gpio_shared_proxy_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+ struct gpio_shared_desc *shared_desc = proxy->shared_desc;
+ struct gpio_desc *desc = shared_desc->desc;
+ int ret, dir;
+
+ guard(gpio_shared_desc_lock)(shared_desc);
+
+ if (shared_desc->usecnt == 1) {
+ dev_dbg(proxy->dev,
+ "Only one user of this shared GPIO, allowing to set direction to output with value '%s'\n",
+ str_high_low(value));
+
+ ret = gpiod_direction_output(desc, value);
+ if (ret)
+ return ret;
+
+ if (value) {
+ proxy->voted_high = true;
+ shared_desc->highcnt = 1;
+ } else {
+ proxy->voted_high = false;
+ shared_desc->highcnt = 0;
+ }
+
+ return 0;
+ }
+
+ dir = gpiod_get_direction(desc);
+ if (dir < 0)
+ return dir;
+
+ if (dir == GPIO_LINE_DIRECTION_IN) {
+ dev_dbg(proxy->dev,
+ "Shared GPIO's direction already set to input, refusing to change\n");
+ return -EPERM;
+ }
+
+ return gpio_shared_proxy_set_unlocked(proxy, gpiod_direction_output, value);
+}
+
+static int gpio_shared_proxy_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpiod_get_value(proxy->shared_desc->desc);
+}
+
+static int gpio_shared_proxy_get_cansleep(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpiod_get_value_cansleep(proxy->shared_desc->desc);
+}
+
+static int gpio_shared_proxy_do_set(struct gpio_shared_proxy_data *proxy,
+ int (*set_func)(struct gpio_desc *desc, int value),
+ int value)
+{
+ guard(gpio_shared_desc_lock)(proxy->shared_desc);
+
+ return gpio_shared_proxy_set_unlocked(proxy, set_func, value);
+}
+
+static int gpio_shared_proxy_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpio_shared_proxy_do_set(proxy, gpiod_set_value, value);
+}
+
+static int gpio_shared_proxy_set_cansleep(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpio_shared_proxy_do_set(proxy, gpiod_set_value_cansleep, value);
+}
+
+static int gpio_shared_proxy_get_direction(struct gpio_chip *gc,
+ unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpiod_get_direction(proxy->shared_desc->desc);
+}
+
+static int gpio_shared_proxy_to_irq(struct gpio_chip *gc, unsigned int offset)
+{
+ struct gpio_shared_proxy_data *proxy = gpiochip_get_data(gc);
+
+ return gpiod_to_irq(proxy->shared_desc->desc);
+}
+
+static int gpio_shared_proxy_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct gpio_shared_proxy_data *proxy;
+ struct gpio_shared_desc *shared_desc;
+ struct device *dev = &adev->dev;
+ struct gpio_chip *gc;
+
+ shared_desc = devm_gpiod_shared_get(dev);
+ if (IS_ERR(shared_desc))
+ return PTR_ERR(shared_desc);
+
+ proxy = devm_kzalloc(dev, sizeof(*proxy), GFP_KERNEL);
+ if (!proxy)
+ return -ENOMEM;
+
+ proxy->shared_desc = shared_desc;
+ proxy->dev = dev;
+
+ gc = &proxy->gc;
+ gc->base = -1;
+ gc->ngpio = 1;
+ gc->label = dev_name(dev);
+ gc->parent = dev;
+ gc->owner = THIS_MODULE;
+ gc->can_sleep = shared_desc->can_sleep;
+
+ gc->request = gpio_shared_proxy_request;
+ gc->free = gpio_shared_proxy_free;
+ gc->set_config = gpio_shared_proxy_set_config;
+ gc->direction_input = gpio_shared_proxy_direction_input;
+ gc->direction_output = gpio_shared_proxy_direction_output;
+ if (gc->can_sleep) {
+ gc->set = gpio_shared_proxy_set_cansleep;
+ gc->get = gpio_shared_proxy_get_cansleep;
+ } else {
+ gc->set = gpio_shared_proxy_set;
+ gc->get = gpio_shared_proxy_get;
+ }
+ gc->get_direction = gpio_shared_proxy_get_direction;
+ gc->to_irq = gpio_shared_proxy_to_irq;
+
+ return devm_gpiochip_add_data(dev, &proxy->gc, proxy);
+}
+
+static const struct auxiliary_device_id gpio_shared_proxy_id_table[] = {
+ { .name = "gpiolib_shared.proxy" },
+ {},
+};
+MODULE_DEVICE_TABLE(auxiliary, gpio_shared_proxy_id_table);
+
+static struct auxiliary_driver gpio_shared_proxy_driver = {
+ .driver = {
+ .name = "gpio-shared-proxy",
+ .suppress_bind_attrs = true,
+ },
+ .probe = gpio_shared_proxy_probe,
+ .id_table = gpio_shared_proxy_id_table,
+};
+module_auxiliary_driver(gpio_shared_proxy_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
+MODULE_DESCRIPTION("Shared GPIO mux driver.");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 067c8edb62e2..94ef2efbd14f 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -32,7 +33,7 @@
struct sifive_gpio {
void __iomem *base;
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
struct regmap *regs;
unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
@@ -41,10 +42,10 @@ struct sifive_gpio {
static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
{
- unsigned long flags;
unsigned int trigger;
- raw_spin_lock_irqsave(&chip->gc.bgpio_lock, flags);
+ guard(gpio_generic_lock_irqsave)(&chip->gen_gc);
+
trigger = (chip->irq_state & BIT(offset)) ? chip->trigger[offset] : 0;
regmap_update_bits(chip->regs, SIFIVE_GPIO_RISE_IE, BIT(offset),
(trigger & IRQ_TYPE_EDGE_RISING) ? BIT(offset) : 0);
@@ -54,7 +55,6 @@ static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
(trigger & IRQ_TYPE_LEVEL_HIGH) ? BIT(offset) : 0);
regmap_update_bits(chip->regs, SIFIVE_GPIO_LOW_IE, BIT(offset),
(trigger & IRQ_TYPE_LEVEL_LOW) ? BIT(offset) : 0);
- raw_spin_unlock_irqrestore(&chip->gc.bgpio_lock, flags);
}
static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
@@ -72,13 +72,12 @@ static int sifive_gpio_irq_set_type(struct irq_data *d, unsigned int trigger)
}
static void sifive_gpio_irq_enable(struct irq_data *d)
-{
+ {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct sifive_gpio *chip = gpiochip_get_data(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
int offset = hwirq % SIFIVE_GPIO_MAX;
u32 bit = BIT(offset);
- unsigned long flags;
gpiochip_enable_irq(gc, hwirq);
irq_chip_enable_parent(d);
@@ -86,13 +85,13 @@ static void sifive_gpio_irq_enable(struct irq_data *d)
/* Switch to input */
gc->direction_input(gc, offset);
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- /* Clear any sticky pending interrupts */
- regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &chip->gen_gc) {
+ /* Clear any sticky pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ }
/* Enable interrupts */
assign_bit(offset, &chip->irq_state, 1);
@@ -118,15 +117,14 @@ static void sifive_gpio_irq_eoi(struct irq_data *d)
struct sifive_gpio *chip = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
u32 bit = BIT(offset);
- unsigned long flags;
- raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
- /* Clear all pending interrupts */
- regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
- regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
- raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
+ scoped_guard(gpio_generic_lock_irqsave, &chip->gen_gc) {
+ /* Clear all pending interrupts */
+ regmap_write(chip->regs, SIFIVE_GPIO_RISE_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_FALL_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_HIGH_IP, bit);
+ regmap_write(chip->regs, SIFIVE_GPIO_LOW_IP, bit);
+ }
irq_chip_eoi_parent(d);
}
@@ -174,12 +172,12 @@ static const struct regmap_config sifive_gpio_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .fast_io = true,
.disable_locking = true,
};
static int sifive_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct irq_domain *parent;
struct gpio_irq_chip *girq;
@@ -218,13 +216,17 @@ static int sifive_gpio_probe(struct platform_device *pdev)
*/
parent = irq_get_irq_data(chip->irq_number[0])->domain;
- ret = bgpio_init(&chip->gc, dev, 4,
- chip->base + SIFIVE_GPIO_INPUT_VAL,
- chip->base + SIFIVE_GPIO_OUTPUT_VAL,
- NULL,
- chip->base + SIFIVE_GPIO_OUTPUT_EN,
- chip->base + SIFIVE_GPIO_INPUT_EN,
- BGPIOF_READ_OUTPUT_REG_SET);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = chip->base + SIFIVE_GPIO_INPUT_VAL,
+ .set = chip->base + SIFIVE_GPIO_OUTPUT_VAL,
+ .dirout = chip->base + SIFIVE_GPIO_OUTPUT_EN,
+ .dirin = chip->base + SIFIVE_GPIO_INPUT_EN,
+ .flags = GPIO_GENERIC_READ_OUTPUT_REG_SET,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
@@ -237,12 +239,12 @@ static int sifive_gpio_probe(struct platform_device *pdev)
regmap_write(chip->regs, SIFIVE_GPIO_LOW_IE, 0);
chip->irq_state = 0;
- chip->gc.base = -1;
- chip->gc.ngpio = ngpio;
- chip->gc.label = dev_name(dev);
- chip->gc.parent = dev;
- chip->gc.owner = THIS_MODULE;
- girq = &chip->gc.irq;
+ chip->gen_gc.gc.base = -1;
+ chip->gen_gc.gc.ngpio = ngpio;
+ chip->gen_gc.gc.label = dev_name(dev);
+ chip->gen_gc.gc.parent = dev;
+ chip->gen_gc.gc.owner = THIS_MODULE;
+ girq = &chip->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &sifive_gpio_irqchip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -250,7 +252,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
girq->handler = handle_bad_irq;
girq->default_type = IRQ_TYPE_NONE;
- return gpiochip_add_data(&chip->gc, chip);
+ return gpiochip_add_data(&chip->gen_gc.gc, chip);
}
static const struct of_device_id sifive_gpio_match[] = {
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 050092583f79..a83f5238427c 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -262,8 +262,7 @@ static void gpio_sim_dbg_show(struct seq_file *seq, struct gpio_chip *gc)
guard(mutex)(&chip->lock);
for_each_hwgpio(gc, i, label)
- seq_printf(seq, " gpio-%-3d (%s) %s,%s\n",
- gc->base + i,
+ seq_printf(seq, " gpio-%-3d (%s) %s,%s\n", i,
label ?: "<unused>",
test_bit(i, chip->direction_map) ? "input" :
test_bit(i, chip->value_map) ? "output-high" :
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index abd13c79ace0..37c133837729 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -39,7 +40,7 @@ struct sdv_gpio_chip_data {
void __iomem *gpio_pub_base;
struct irq_domain *id;
struct irq_chip_generic *gc;
- struct gpio_chip chip;
+ struct gpio_generic_chip gen_gc;
};
static int sdv_gpio_pub_set_type(struct irq_data *d, unsigned int type)
@@ -180,6 +181,7 @@ static int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
static int sdv_gpio_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
+ struct gpio_generic_chip_config config;
struct sdv_gpio_chip_data *sd;
int ret;
u32 mux_val;
@@ -206,15 +208,21 @@ static int sdv_gpio_probe(struct pci_dev *pdev,
if (!ret)
writel(mux_val, sd->gpio_pub_base + GPMUXCTL);
- ret = bgpio_init(&sd->chip, &pdev->dev, 4,
- sd->gpio_pub_base + GPINR, sd->gpio_pub_base + GPOUTR,
- NULL, sd->gpio_pub_base + GPOER, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = sd->gpio_pub_base + GPINR,
+ .set = sd->gpio_pub_base + GPOUTR,
+ .dirout = sd->gpio_pub_base + GPOER,
+ };
+
+ ret = gpio_generic_chip_init(&sd->gen_gc, &config);
if (ret)
return ret;
- sd->chip.ngpio = SDV_NUM_PUB_GPIOS;
+ sd->gen_gc.gc.ngpio = SDV_NUM_PUB_GPIOS;
- ret = devm_gpiochip_add_data(&pdev->dev, &sd->chip, sd);
+ ret = devm_gpiochip_add_data(&pdev->dev, &sd->gen_gc.gc, sd);
if (ret < 0) {
dev_err(&pdev->dev, "gpiochip_add() failed.\n");
return ret;
diff --git a/drivers/gpio/gpio-spacemit-k1.c b/drivers/gpio/gpio-spacemit-k1.c
index 3cc75c701ec4..eb66a15c002f 100644
--- a/drivers/gpio/gpio-spacemit-k1.c
+++ b/drivers/gpio/gpio-spacemit-k1.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -38,7 +39,7 @@
struct spacemit_gpio;
struct spacemit_gpio_bank {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
struct spacemit_gpio *sg;
void __iomem *base;
u32 irq_mask;
@@ -72,7 +73,7 @@ static irqreturn_t spacemit_gpio_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
for_each_set_bit(n, &pending, BITS_PER_LONG)
- handle_nested_irq(irq_find_mapping(gb->gc.irq.domain, n));
+ handle_nested_irq(irq_find_mapping(gb->chip.gc.irq.domain, n));
return IRQ_HANDLED;
}
@@ -143,7 +144,7 @@ static void spacemit_gpio_irq_print_chip(struct irq_data *data, struct seq_file
{
struct spacemit_gpio_bank *gb = irq_data_get_irq_chip_data(data);
- seq_printf(p, "%s-%d", dev_name(gb->gc.parent), spacemit_gpio_bank_index(gb));
+ seq_printf(p, "%s-%d", dev_name(gb->chip.gc.parent), spacemit_gpio_bank_index(gb));
}
static struct irq_chip spacemit_gpio_chip = {
@@ -165,7 +166,7 @@ static bool spacemit_of_node_instance_match(struct gpio_chip *gc, unsigned int i
if (i >= SPACEMIT_NR_BANKS)
return false;
- return (gc == &sg->sgb[i].gc);
+ return (gc == &sg->sgb[i].chip.gc);
}
static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
@@ -173,7 +174,8 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
int index, int irq)
{
struct spacemit_gpio_bank *gb = &sg->sgb[index];
- struct gpio_chip *gc = &gb->gc;
+ struct gpio_generic_chip_config config;
+ struct gpio_chip *gc = &gb->chip.gc;
struct device *dev = sg->dev;
struct gpio_irq_chip *girq;
void __iomem *dat, *set, *clr, *dirin, *dirout;
@@ -187,9 +189,20 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
dirin = gb->base + SPACEMIT_GCDR;
dirout = gb->base + SPACEMIT_GSDR;
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = dat,
+ .set = set,
+ .clr = clr,
+ .dirout = dirout,
+ .dirin = dirin,
+ .flags = GPIO_GENERIC_UNREADABLE_REG_SET |
+ GPIO_GENERIC_UNREADABLE_REG_DIR,
+ };
+
/* This registers 32 GPIO lines per bank */
- ret = bgpio_init(gc, dev, 4, dat, set, clr, dirout, dirin,
- BGPIOF_UNREADABLE_REG_SET | BGPIOF_UNREADABLE_REG_DIR);
+ ret = gpio_generic_chip_init(&gb->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "failed to init gpio chip\n");
@@ -221,7 +234,7 @@ static int spacemit_gpio_add_bank(struct spacemit_gpio *sg,
ret = devm_request_threaded_irq(dev, irq, NULL,
spacemit_gpio_irq_handler,
IRQF_ONESHOT | IRQF_SHARED,
- gb->gc.label, gb);
+ gb->chip.gc.label, gb);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to register IRQ\n");
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 5dd4c21a8e60..6faf30347a36 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -262,9 +262,8 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
stmpe_gpio->regs[REG_IE][regoffset] |= mask;
}
-static void stmpe_dbg_show_one(struct seq_file *s,
- struct gpio_chip *gc,
- unsigned offset, unsigned gpio)
+static void stmpe_dbg_show_one(struct seq_file *s, struct gpio_chip *gc,
+ unsigned int offset)
{
struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
struct stmpe *stmpe = stmpe_gpio->stmpe;
@@ -286,7 +285,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
if (dir) {
seq_printf(s, " gpio-%-3d (%-20.20s) out %s",
- gpio, label ?: "(none)", str_hi_lo(val));
+ offset, label ?: "(none)", str_hi_lo(val));
} else {
u8 edge_det_reg;
u8 rise_reg;
@@ -354,7 +353,7 @@ static void stmpe_dbg_show_one(struct seq_file *s,
irqen = !!(ret & mask);
seq_printf(s, " gpio-%-3d (%-20.20s) in %s %13s %13s %25s %25s",
- gpio, label ?: "(none)",
+ offset, label ?: "(none)",
str_hi_lo(val),
edge_det_values[edge_det],
irqen ? "IRQ-enabled" : "IRQ-disabled",
@@ -366,10 +365,9 @@ static void stmpe_dbg_show_one(struct seq_file *s,
static void stmpe_dbg_show(struct seq_file *s, struct gpio_chip *gc)
{
unsigned i;
- unsigned gpio = gc->base;
- for (i = 0; i < gc->ngpio; i++, gpio++) {
- stmpe_dbg_show_one(s, gc, i, gpio);
+ for (i = 0; i < gc->ngpio; i++) {
+ stmpe_dbg_show_one(s, gc, i);
seq_putc(s, '\n');
}
}
@@ -534,10 +532,16 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, &stmpe_gpio->chip, stmpe_gpio);
}
+static const struct of_device_id stmpe_gpio_of_matches[] = {
+ { .compatible = "st,stmpe-gpio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stmpe_gpio_of_matches);
+
static struct platform_driver stmpe_gpio_driver = {
.driver = {
- .suppress_bind_attrs = true,
- .name = "stmpe-gpio",
+ .name = "stmpe-gpio",
+ .of_match_table = stmpe_gpio_of_matches,
},
.probe = stmpe_gpio_probe,
};
@@ -547,3 +551,13 @@ static int __init stmpe_gpio_init(void)
return platform_driver_register(&stmpe_gpio_driver);
}
subsys_initcall(stmpe_gpio_init);
+
+static void __exit stmpe_gpio_exit(void)
+{
+ platform_driver_unregister(&stmpe_gpio_driver);
+}
+module_exit(stmpe_gpio_exit);
+
+MODULE_DESCRIPTION("STMPE expander GPIO");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-tb10x.c b/drivers/gpio/gpio-tb10x.c
index 1869ee7f9423..3c8fd322a713 100644
--- a/drivers/gpio/gpio-tb10x.c
+++ b/drivers/gpio/gpio-tb10x.c
@@ -7,20 +7,20 @@
* Christian Ruppert <christian.ruppert@abilis.com>
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
+#include <linux/bitops.h>
#include <linux/gpio/driver.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
+#include <linux/gpio/generic.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/spinlock.h>
-#include <linux/bitops.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#define TB10X_GPIO_DIR_IN (0x00000000)
#define TB10X_GPIO_DIR_OUT (0x00000001)
@@ -36,13 +36,13 @@
* @base: register base address
* @domain: IRQ domain of GPIO generated interrupts managed by this controller
* @irq: Interrupt line of parent interrupt controller
- * @gc: gpio_chip structure associated to this GPIO controller
+ * @chip: Generic GPIO chip structure associated with this GPIO controller
*/
struct tb10x_gpio {
void __iomem *base;
struct irq_domain *domain;
int irq;
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
};
static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
@@ -50,28 +50,6 @@ static inline u32 tb10x_reg_read(struct tb10x_gpio *gpio, unsigned int offs)
return ioread32(gpio->base + offs);
}
-static inline void tb10x_reg_write(struct tb10x_gpio *gpio, unsigned int offs,
- u32 val)
-{
- iowrite32(val, gpio->base + offs);
-}
-
-static inline void tb10x_set_bits(struct tb10x_gpio *gpio, unsigned int offs,
- u32 mask, u32 val)
-{
- u32 r;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&gpio->gc.bgpio_lock, flags);
-
- r = tb10x_reg_read(gpio, offs);
- r = (r & ~mask) | (val & mask);
-
- tb10x_reg_write(gpio, offs, r);
-
- raw_spin_unlock_irqrestore(&gpio->gc.bgpio_lock, flags);
-}
-
static int tb10x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct tb10x_gpio *tb10x_gpio = gpiochip_get_data(chip);
@@ -107,6 +85,7 @@ static irqreturn_t tb10x_gpio_irq_cascade(int irq, void *data)
static int tb10x_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct tb10x_gpio *tb10x_gpio;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@@ -127,9 +106,9 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (IS_ERR(tb10x_gpio->base))
return PTR_ERR(tb10x_gpio->base);
- tb10x_gpio->gc.label =
+ tb10x_gpio->chip.gc.label =
devm_kasprintf(dev, GFP_KERNEL, "%pOF", pdev->dev.of_node);
- if (!tb10x_gpio->gc.label)
+ if (!tb10x_gpio->chip.gc.label)
return -ENOMEM;
/*
@@ -137,29 +116,30 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
* the lines, no special set or clear registers and a data direction register
* wher 1 means "output".
*/
- ret = bgpio_init(&tb10x_gpio->gc, dev, 4,
- tb10x_gpio->base + OFFSET_TO_REG_DATA,
- NULL,
- NULL,
- tb10x_gpio->base + OFFSET_TO_REG_DDR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = tb10x_gpio->base + OFFSET_TO_REG_DATA,
+ .dirout = tb10x_gpio->base + OFFSET_TO_REG_DDR,
+ };
+
+ ret = gpio_generic_chip_init(&tb10x_gpio->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- tb10x_gpio->gc.base = -1;
- tb10x_gpio->gc.parent = dev;
- tb10x_gpio->gc.owner = THIS_MODULE;
+ tb10x_gpio->chip.gc.base = -1;
+ tb10x_gpio->chip.gc.parent = dev;
+ tb10x_gpio->chip.gc.owner = THIS_MODULE;
/*
- * ngpio is set by bgpio_init() but we override it, this .request()
- * callback also overrides the one set up by generic GPIO.
+ * ngpio is set by gpio_generic_chip_init() but we override it, this
+ * .request() callback also overrides the one set up by generic GPIO.
*/
- tb10x_gpio->gc.ngpio = ngpio;
- tb10x_gpio->gc.request = gpiochip_generic_request;
- tb10x_gpio->gc.free = gpiochip_generic_free;
+ tb10x_gpio->chip.gc.ngpio = ngpio;
+ tb10x_gpio->chip.gc.request = gpiochip_generic_request;
+ tb10x_gpio->chip.gc.free = gpiochip_generic_free;
- ret = devm_gpiochip_add_data(dev, &tb10x_gpio->gc, tb10x_gpio);
+ ret = devm_gpiochip_add_data(dev, &tb10x_gpio->chip.gc, tb10x_gpio);
if (ret < 0) {
dev_err(dev, "Could not add gpiochip.\n");
return ret;
@@ -174,7 +154,7 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- tb10x_gpio->gc.to_irq = tb10x_gpio_to_irq;
+ tb10x_gpio->chip.gc.to_irq = tb10x_gpio_to_irq;
tb10x_gpio->irq = ret;
ret = devm_request_irq(dev, ret, tb10x_gpio_irq_cascade,
@@ -183,14 +163,15 @@ static int tb10x_gpio_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- tb10x_gpio->domain = irq_domain_create_linear(dev_fwnode(dev), tb10x_gpio->gc.ngpio,
+ tb10x_gpio->domain = irq_domain_create_linear(dev_fwnode(dev),
+ tb10x_gpio->chip.gc.ngpio,
&irq_generic_chip_ops, NULL);
if (!tb10x_gpio->domain) {
return -ENOMEM;
}
ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain,
- tb10x_gpio->gc.ngpio, 1, tb10x_gpio->gc.label,
+ tb10x_gpio->chip.gc.ngpio, 1, tb10x_gpio->chip.gc.label,
handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
IRQ_GC_INIT_MASK_CACHE);
if (ret)
@@ -218,9 +199,9 @@ static void tb10x_gpio_remove(struct platform_device *pdev)
{
struct tb10x_gpio *tb10x_gpio = platform_get_drvdata(pdev);
- if (tb10x_gpio->gc.to_irq) {
+ if (tb10x_gpio->chip.gc.to_irq) {
irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0],
- BIT(tb10x_gpio->gc.ngpio) - 1, 0, 0);
+ BIT(tb10x_gpio->chip.gc.ngpio) - 1, 0, 0);
kfree(tb10x_gpio->domain->gc);
irq_domain_remove(tb10x_gpio->domain);
}
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 5fd3ec3e2c53..b1498b59a921 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2016-2022 NVIDIA Corporation
+ * Copyright (c) 2016-2025 NVIDIA Corporation
*
* Author: Thierry Reding <treding@nvidia.com>
* Dipen Patel <dpatel@nvidia.com>
@@ -20,6 +20,7 @@
#include <dt-bindings/gpio/tegra194-gpio.h>
#include <dt-bindings/gpio/tegra234-gpio.h>
#include <dt-bindings/gpio/tegra241-gpio.h>
+#include <dt-bindings/gpio/tegra256-gpio.h>
/* security registers */
#define TEGRA186_GPIO_CTL_SCR 0x0c
@@ -68,6 +69,30 @@
#define TEGRA186_GPIO_INTERRUPT_STATUS(x) (0x100 + (x) * 4)
+/* Tegra410 GPIOs implemented by the COMPUTE GPIO controller */
+#define TEGRA410_COMPUTE_GPIO_PORT_A 0
+#define TEGRA410_COMPUTE_GPIO_PORT_B 1
+#define TEGRA410_COMPUTE_GPIO_PORT_C 2
+#define TEGRA410_COMPUTE_GPIO_PORT_D 3
+#define TEGRA410_COMPUTE_GPIO_PORT_E 4
+
+/* Tegra410 GPIOs implemented by the SYSTEM GPIO controller */
+#define TEGRA410_SYSTEM_GPIO_PORT_A 0
+#define TEGRA410_SYSTEM_GPIO_PORT_B 1
+#define TEGRA410_SYSTEM_GPIO_PORT_C 2
+#define TEGRA410_SYSTEM_GPIO_PORT_D 3
+#define TEGRA410_SYSTEM_GPIO_PORT_E 4
+#define TEGRA410_SYSTEM_GPIO_PORT_I 5
+#define TEGRA410_SYSTEM_GPIO_PORT_J 6
+#define TEGRA410_SYSTEM_GPIO_PORT_K 7
+#define TEGRA410_SYSTEM_GPIO_PORT_L 8
+#define TEGRA410_SYSTEM_GPIO_PORT_M 9
+#define TEGRA410_SYSTEM_GPIO_PORT_N 10
+#define TEGRA410_SYSTEM_GPIO_PORT_P 11
+#define TEGRA410_SYSTEM_GPIO_PORT_Q 12
+#define TEGRA410_SYSTEM_GPIO_PORT_R 13
+#define TEGRA410_SYSTEM_GPIO_PORT_V 14
+
struct tegra_gpio_port {
const char *name;
unsigned int bank;
@@ -84,6 +109,7 @@ struct tegra_gpio_soc {
const struct tegra_gpio_port *ports;
unsigned int num_ports;
const char *name;
+ const char *prefix;
unsigned int instance;
unsigned int num_irqs_per_bank;
@@ -915,8 +941,12 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
char *name;
for (j = 0; j < port->pins; j++) {
- name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL,
- "P%s.%02x", port->name, j);
+ if (gpio->soc->prefix)
+ name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL, "%s-P%s.%02x",
+ gpio->soc->prefix, port->name, j);
+ else
+ name = devm_kasprintf(gpio->gpio.parent, GFP_KERNEL, "P%s.%02x",
+ port->name, j);
if (!name)
return -ENOMEM;
@@ -1001,14 +1031,17 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(&pdev->dev, &gpio->gpio, gpio);
}
-#define TEGRA186_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA186_MAIN_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
+#define TEGRA_GPIO_PORT(_prefix, _name, _bank, _port, _pins) \
+ [_prefix##_GPIO_PORT_##_name] = { \
+ .name = #_name, \
+ .bank = _bank, \
+ .port = _port, \
+ .pins = _pins, \
}
+#define TEGRA186_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA186_MAIN, _name, _bank, _port, _pins)
+
static const struct tegra_gpio_port tegra186_main_ports[] = {
TEGRA186_MAIN_GPIO_PORT( A, 2, 0, 7),
TEGRA186_MAIN_GPIO_PORT( B, 3, 0, 7),
@@ -1044,13 +1077,8 @@ static const struct tegra_gpio_soc tegra186_main_soc = {
.has_vm_support = false,
};
-#define TEGRA186_AON_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA186_AON_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA186_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA186_AON, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra186_aon_ports[] = {
TEGRA186_AON_GPIO_PORT( S, 0, 1, 5),
@@ -1072,13 +1100,8 @@ static const struct tegra_gpio_soc tegra186_aon_soc = {
.has_vm_support = false,
};
-#define TEGRA194_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA194_MAIN_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA194_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA194_MAIN, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra194_main_ports[] = {
TEGRA194_MAIN_GPIO_PORT( A, 1, 2, 8),
@@ -1128,13 +1151,8 @@ static const struct tegra_gpio_soc tegra194_main_soc = {
.has_vm_support = true,
};
-#define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA194_AON_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA194_AON, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra194_aon_ports[] = {
TEGRA194_AON_GPIO_PORT(AA, 0, 3, 8),
@@ -1154,13 +1172,8 @@ static const struct tegra_gpio_soc tegra194_aon_soc = {
.has_vm_support = false,
};
-#define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA234_MAIN_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA234_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA234_MAIN, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra234_main_ports[] = {
TEGRA234_MAIN_GPIO_PORT( A, 0, 0, 8),
@@ -1199,13 +1212,8 @@ static const struct tegra_gpio_soc tegra234_main_soc = {
.has_vm_support = true,
};
-#define TEGRA234_AON_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA234_AON_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA234_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA234_AON, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra234_aon_ports[] = {
TEGRA234_AON_GPIO_PORT(AA, 0, 4, 8),
@@ -1226,13 +1234,8 @@ static const struct tegra_gpio_soc tegra234_aon_soc = {
.has_vm_support = false,
};
-#define TEGRA241_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA241_MAIN_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA241_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA241_MAIN, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra241_main_ports[] = {
TEGRA241_MAIN_GPIO_PORT(A, 0, 0, 8),
@@ -1257,13 +1260,8 @@ static const struct tegra_gpio_soc tegra241_main_soc = {
.has_vm_support = false,
};
-#define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \
- [TEGRA241_AON_GPIO_PORT_##_name] = { \
- .name = #_name, \
- .bank = _bank, \
- .port = _port, \
- .pins = _pins, \
- }
+#define TEGRA241_AON_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA241_AON, _name, _bank, _port, _pins)
static const struct tegra_gpio_port tegra241_aon_ports[] = {
TEGRA241_AON_GPIO_PORT(AA, 0, 0, 8),
@@ -1279,6 +1277,75 @@ static const struct tegra_gpio_soc tegra241_aon_soc = {
.has_vm_support = false,
};
+#define TEGRA256_MAIN_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA256_MAIN, _name, _bank, _port, _pins)
+
+static const struct tegra_gpio_port tegra256_main_ports[] = {
+ TEGRA256_MAIN_GPIO_PORT(A, 0, 0, 8),
+ TEGRA256_MAIN_GPIO_PORT(B, 0, 1, 8),
+ TEGRA256_MAIN_GPIO_PORT(C, 0, 2, 8),
+ TEGRA256_MAIN_GPIO_PORT(D, 0, 3, 8),
+};
+
+static const struct tegra_gpio_soc tegra256_main_soc = {
+ .num_ports = ARRAY_SIZE(tegra256_main_ports),
+ .ports = tegra256_main_ports,
+ .name = "tegra256-gpio-main",
+ .instance = 1,
+ .num_irqs_per_bank = 8,
+ .has_vm_support = true,
+};
+
+#define TEGRA410_COMPUTE_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA410_COMPUTE, _name, _bank, _port, _pins)
+
+static const struct tegra_gpio_port tegra410_compute_ports[] = {
+ TEGRA410_COMPUTE_GPIO_PORT(A, 0, 0, 3),
+ TEGRA410_COMPUTE_GPIO_PORT(B, 1, 0, 8),
+ TEGRA410_COMPUTE_GPIO_PORT(C, 1, 1, 3),
+ TEGRA410_COMPUTE_GPIO_PORT(D, 2, 0, 8),
+ TEGRA410_COMPUTE_GPIO_PORT(E, 2, 1, 8),
+};
+
+static const struct tegra_gpio_soc tegra410_compute_soc = {
+ .num_ports = ARRAY_SIZE(tegra410_compute_ports),
+ .ports = tegra410_compute_ports,
+ .name = "tegra410-gpio-compute",
+ .prefix = "COMPUTE",
+ .num_irqs_per_bank = 8,
+ .instance = 0,
+};
+
+#define TEGRA410_SYSTEM_GPIO_PORT(_name, _bank, _port, _pins) \
+ TEGRA_GPIO_PORT(TEGRA410_SYSTEM, _name, _bank, _port, _pins)
+
+static const struct tegra_gpio_port tegra410_system_ports[] = {
+ TEGRA410_SYSTEM_GPIO_PORT(A, 0, 0, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(B, 0, 1, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(C, 0, 2, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(D, 0, 3, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(E, 0, 4, 6),
+ TEGRA410_SYSTEM_GPIO_PORT(I, 1, 0, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(J, 1, 1, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(K, 1, 2, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(L, 1, 3, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(M, 2, 0, 7),
+ TEGRA410_SYSTEM_GPIO_PORT(N, 2, 1, 6),
+ TEGRA410_SYSTEM_GPIO_PORT(P, 2, 2, 8),
+ TEGRA410_SYSTEM_GPIO_PORT(Q, 2, 3, 3),
+ TEGRA410_SYSTEM_GPIO_PORT(R, 2, 4, 2),
+ TEGRA410_SYSTEM_GPIO_PORT(V, 1, 4, 2),
+};
+
+static const struct tegra_gpio_soc tegra410_system_soc = {
+ .num_ports = ARRAY_SIZE(tegra410_system_ports),
+ .ports = tegra410_system_ports,
+ .name = "tegra410-gpio-system",
+ .prefix = "SYSTEM",
+ .num_irqs_per_bank = 8,
+ .instance = 0,
+};
+
static const struct of_device_id tegra186_gpio_of_match[] = {
{
.compatible = "nvidia,tegra186-gpio",
@@ -1299,6 +1366,9 @@ static const struct of_device_id tegra186_gpio_of_match[] = {
.compatible = "nvidia,tegra234-gpio-aon",
.data = &tegra234_aon_soc
}, {
+ .compatible = "nvidia,tegra256-gpio",
+ .data = &tegra256_main_soc
+ }, {
/* sentinel */
}
};
@@ -1311,6 +1381,8 @@ static const struct acpi_device_id tegra186_gpio_acpi_match[] = {
{ .id = "NVDA0408", .driver_data = (kernel_ulong_t)&tegra194_aon_soc },
{ .id = "NVDA0508", .driver_data = (kernel_ulong_t)&tegra241_main_soc },
{ .id = "NVDA0608", .driver_data = (kernel_ulong_t)&tegra241_aon_soc },
+ { .id = "NVDA0708", .driver_data = (kernel_ulong_t)&tegra410_compute_soc },
+ { .id = "NVDA0808", .driver_data = (kernel_ulong_t)&tegra410_system_soc },
{}
};
MODULE_DEVICE_TABLE(acpi, tegra186_gpio_acpi_match);
diff --git a/drivers/gpio/gpio-tqmx86.c b/drivers/gpio/gpio-tqmx86.c
index 27dd09273292..eedfc0e371e3 100644
--- a/drivers/gpio/gpio-tqmx86.c
+++ b/drivers/gpio/gpio-tqmx86.c
@@ -279,19 +279,18 @@ static void tqmx86_gpio_irq_handler(struct irq_desc *desc)
}
/* Minimal runtime PM is needed by the IRQ subsystem */
-static int __maybe_unused tqmx86_gpio_runtime_suspend(struct device *dev)
+static int tqmx86_gpio_runtime_suspend(struct device *dev)
{
return 0;
}
-static int __maybe_unused tqmx86_gpio_runtime_resume(struct device *dev)
+static int tqmx86_gpio_runtime_resume(struct device *dev)
{
return 0;
}
static const struct dev_pm_ops tqmx86_gpio_dev_pm_ops = {
- SET_RUNTIME_PM_OPS(tqmx86_gpio_runtime_suspend,
- tqmx86_gpio_runtime_resume, NULL)
+ RUNTIME_PM_OPS(tqmx86_gpio_runtime_suspend, tqmx86_gpio_runtime_resume, NULL)
};
static void tqmx86_init_irq_valid_mask(struct gpio_chip *chip,
@@ -425,7 +424,7 @@ out_pm_dis:
static struct platform_driver tqmx86_gpio_driver = {
.driver = {
.name = "tqmx86-gpio",
- .pm = &tqmx86_gpio_dev_pm_ops,
+ .pm = pm_ptr(&tqmx86_gpio_dev_pm_ops),
},
.probe = tqmx86_gpio_probe,
};
diff --git a/drivers/gpio/gpio-ts4800.c b/drivers/gpio/gpio-ts4800.c
index 4748e3d47106..992ee231db9f 100644
--- a/drivers/gpio/gpio-ts4800.c
+++ b/drivers/gpio/gpio-ts4800.c
@@ -6,9 +6,10 @@
*/
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#define DEFAULT_PIN_NUMBER 16
#define INPUT_REG_OFFSET 0x00
@@ -17,13 +18,14 @@
static int ts4800_gpio_probe(struct platform_device *pdev)
{
- struct device_node *node;
- struct gpio_chip *chip;
+ struct gpio_generic_chip_config config;
+ struct device *dev = &pdev->dev;
+ struct gpio_generic_chip *chip;
void __iomem *base_addr;
int retval;
u32 ngpios;
- chip = devm_kzalloc(&pdev->dev, sizeof(struct gpio_chip), GFP_KERNEL);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
@@ -31,29 +33,28 @@ static int ts4800_gpio_probe(struct platform_device *pdev)
if (IS_ERR(base_addr))
return PTR_ERR(base_addr);
- node = pdev->dev.of_node;
- if (!node)
- return -EINVAL;
-
- retval = of_property_read_u32(node, "ngpios", &ngpios);
+ retval = device_property_read_u32(dev, "ngpios", &ngpios);
if (retval == -EINVAL)
ngpios = DEFAULT_PIN_NUMBER;
else if (retval)
return retval;
- retval = bgpio_init(chip, &pdev->dev, 2, base_addr + INPUT_REG_OFFSET,
- base_addr + OUTPUT_REG_OFFSET, NULL,
- base_addr + DIRECTION_REG_OFFSET, NULL, 0);
- if (retval) {
- dev_err(&pdev->dev, "bgpio_init failed\n");
- return retval;
- }
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 2,
+ .dat = base_addr + INPUT_REG_OFFSET,
+ .set = base_addr + OUTPUT_REG_OFFSET,
+ .dirout = base_addr + DIRECTION_REG_OFFSET,
+ };
- chip->ngpio = ngpios;
+ retval = gpio_generic_chip_init(chip, &config);
+ if (retval)
+ return dev_err_probe(dev, retval,
+ "failed to initialize the generic GPIO chip\n");
- platform_set_drvdata(pdev, chip);
+ chip->gc.ngpio = ngpios;
- return devm_gpiochip_add_data(&pdev->dev, chip, NULL);
+ return devm_gpiochip_add_data(dev, &chip->gc, NULL);
}
static const struct of_device_id ts4800_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index a33dc7c7e7a0..a851702befde 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -597,9 +597,7 @@ no_irqs:
ret = devm_add_action_or_reset(&pdev->dev, gpio_twl4030_power_off_action, d);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "failed to install power off handler\n");
-
+ return ret;
}
return 0;
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index 197bb1d22b3c..0574dde5b5bb 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -426,7 +426,7 @@ static void uniphier_gpio_remove(struct platform_device *pdev)
irq_domain_remove(priv->domain);
}
-static int __maybe_unused uniphier_gpio_suspend(struct device *dev)
+static int uniphier_gpio_suspend(struct device *dev)
{
struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
@@ -448,7 +448,7 @@ static int __maybe_unused uniphier_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused uniphier_gpio_resume(struct device *dev)
+static int uniphier_gpio_resume(struct device *dev)
{
struct uniphier_gpio_priv *priv = dev_get_drvdata(dev);
unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio);
@@ -473,8 +473,7 @@ static int __maybe_unused uniphier_gpio_resume(struct device *dev)
}
static const struct dev_pm_ops uniphier_gpio_pm_ops = {
- SET_LATE_SYSTEM_SLEEP_PM_OPS(uniphier_gpio_suspend,
- uniphier_gpio_resume)
+ LATE_SYSTEM_SLEEP_PM_OPS(uniphier_gpio_suspend, uniphier_gpio_resume)
};
static const struct of_device_id uniphier_gpio_match[] = {
@@ -489,7 +488,7 @@ static struct platform_driver uniphier_gpio_driver = {
.driver = {
.name = "uniphier-gpio",
.of_match_table = uniphier_gpio_match,
- .pm = &uniphier_gpio_pm_ops,
+ .pm = pm_sleep_ptr(&uniphier_gpio_pm_ops),
},
};
module_platform_driver(uniphier_gpio_driver);
diff --git a/drivers/gpio/gpio-usbio.c b/drivers/gpio/gpio-usbio.c
new file mode 100644
index 000000000000..34d42c743d5b
--- /dev/null
+++ b/drivers/gpio/gpio-usbio.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Intel Corporation.
+ * Copyright (c) 2025 Red Hat, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/gpio/driver.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/usb/usbio.h>
+
+struct usbio_gpio_bank {
+ u8 config[USBIO_GPIOSPERBANK];
+ u32 bitmap;
+};
+
+struct usbio_gpio {
+ struct mutex config_mutex; /* Protects banks[x].config */
+ struct usbio_gpio_bank banks[USBIO_MAX_GPIOBANKS];
+ struct gpio_chip gc;
+ struct auxiliary_device *adev;
+};
+
+static const struct acpi_device_id usbio_gpio_acpi_hids[] = {
+ { "INTC1007" }, /* MTL */
+ { "INTC10B2" }, /* ARL */
+ { "INTC10B5" }, /* LNL */
+ { "INTC10D1" }, /* MTL-CVF */
+ { "INTC10E2" }, /* PTL */
+ { }
+};
+
+static void usbio_gpio_get_bank_and_pin(struct gpio_chip *gc, unsigned int offset,
+ struct usbio_gpio_bank **bank_ret,
+ unsigned int *pin_ret)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct device *dev = &gpio->adev->dev;
+ struct usbio_gpio_bank *bank;
+ unsigned int pin;
+
+ bank = &gpio->banks[offset / USBIO_GPIOSPERBANK];
+ pin = offset % USBIO_GPIOSPERBANK;
+ if (~bank->bitmap & BIT(pin)) {
+ /* The FW bitmap sometimes is invalid, warn and continue */
+ dev_warn_once(dev, FW_BUG "GPIO %u is not in FW pins bitmap\n", offset);
+ }
+
+ *bank_ret = bank;
+ *pin_ret = pin;
+}
+
+static int usbio_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usbio_gpio_bank *bank;
+ unsigned int pin;
+ u8 cfg;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ cfg = bank->config[pin] & USBIO_GPIO_PINMOD_MASK;
+
+ return (cfg == USBIO_GPIO_PINMOD_OUTPUT) ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int usbio_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_rw gbuf;
+ unsigned int pin;
+ int ret;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+
+ ret = usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_READ,
+ &gbuf, sizeof(gbuf) - sizeof(gbuf.value),
+ &gbuf, sizeof(gbuf));
+ if (ret != sizeof(gbuf))
+ return (ret < 0) ? ret : -EPROTO;
+
+ return (le32_to_cpu(gbuf.value) >> pin) & 1;
+}
+
+static int usbio_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_rw gbuf;
+ unsigned int pin;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+ gbuf.value = cpu_to_le32(value << pin);
+
+ return usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_WRITE,
+ &gbuf, sizeof(gbuf), NULL, 0);
+}
+
+static int usbio_gpio_update_config(struct gpio_chip *gc, unsigned int offset,
+ u8 mask, u8 value)
+{
+ struct usbio_gpio *gpio = gpiochip_get_data(gc);
+ struct usbio_gpio_bank *bank;
+ struct usbio_gpio_init gbuf;
+ unsigned int pin;
+
+ usbio_gpio_get_bank_and_pin(gc, offset, &bank, &pin);
+
+ guard(mutex)(&gpio->config_mutex);
+
+ bank->config[pin] &= ~mask;
+ bank->config[pin] |= value;
+
+ gbuf.bankid = offset / USBIO_GPIOSPERBANK;
+ gbuf.config = bank->config[pin];
+ gbuf.pincount = 1;
+ gbuf.pin = pin;
+
+ return usbio_control_msg(gpio->adev, USBIO_PKTTYPE_GPIO, USBIO_GPIOCMD_INIT,
+ &gbuf, sizeof(gbuf), NULL, 0);
+}
+
+static int usbio_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
+{
+ return usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINMOD_MASK,
+ USBIO_GPIO_SET_PINMOD(USBIO_GPIO_PINMOD_INPUT));
+}
+
+static int usbio_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int offset, int value)
+{
+ int ret;
+
+ ret = usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINMOD_MASK,
+ USBIO_GPIO_SET_PINMOD(USBIO_GPIO_PINMOD_OUTPUT));
+ if (ret)
+ return ret;
+
+ return usbio_gpio_set(gc, offset, value);
+}
+
+static int usbio_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
+{
+ u8 value;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_DEFAULT);
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PULLUP);
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PULLDOWN);
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ value = USBIO_GPIO_SET_PINCFG(USBIO_GPIO_PINCFG_PUSHPULL);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return usbio_gpio_update_config(gc, offset, USBIO_GPIO_PINCFG_MASK, value);
+}
+
+static int usbio_gpio_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *adev_id)
+{
+ struct usbio_gpio_bank_desc *bank_desc;
+ struct device *dev = &adev->dev;
+ struct usbio_gpio *gpio;
+ int bank, ret;
+
+ bank_desc = dev_get_platdata(dev);
+ if (!bank_desc)
+ return -EINVAL;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(dev, &gpio->config_mutex);
+ if (ret)
+ return ret;
+
+ gpio->adev = adev;
+
+ usbio_acpi_bind(gpio->adev, usbio_gpio_acpi_hids);
+
+ for (bank = 0; bank < USBIO_MAX_GPIOBANKS && bank_desc[bank].bmap; bank++)
+ gpio->banks[bank].bitmap = le32_to_cpu(bank_desc[bank].bmap);
+
+ gpio->gc.label = ACPI_COMPANION(dev) ?
+ acpi_dev_name(ACPI_COMPANION(dev)) : dev_name(dev);
+ gpio->gc.parent = dev;
+ gpio->gc.owner = THIS_MODULE;
+ gpio->gc.get_direction = usbio_gpio_get_direction;
+ gpio->gc.direction_input = usbio_gpio_direction_input;
+ gpio->gc.direction_output = usbio_gpio_direction_output;
+ gpio->gc.get = usbio_gpio_get;
+ gpio->gc.set = usbio_gpio_set;
+ gpio->gc.set_config = usbio_gpio_set_config;
+ gpio->gc.base = -1;
+ gpio->gc.ngpio = bank * USBIO_GPIOSPERBANK;
+ gpio->gc.can_sleep = true;
+
+ ret = devm_gpiochip_add_data(dev, &gpio->gc, gpio);
+ if (ret)
+ return ret;
+
+ if (has_acpi_companion(dev))
+ acpi_dev_clear_dependencies(ACPI_COMPANION(dev));
+
+ return 0;
+}
+
+static const struct auxiliary_device_id usbio_gpio_id_table[] = {
+ { "usbio.usbio-gpio" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, usbio_gpio_id_table);
+
+static struct auxiliary_driver usbio_gpio_driver = {
+ .name = USBIO_GPIO_CLIENT,
+ .probe = usbio_gpio_probe,
+ .id_table = usbio_gpio_id_table
+};
+module_auxiliary_driver(usbio_gpio_driver);
+
+MODULE_DESCRIPTION("Intel USBIO GPIO driver");
+MODULE_AUTHOR("Israel Cepeda <israel.a.cepeda.lopez@intel.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("USBIO");
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 7de0d5b53d56..aa8586d8a787 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -28,7 +29,7 @@ struct fsl_gpio_soc_data {
};
struct vf610_gpio_port {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -108,7 +109,7 @@ static void vf610_gpio_irq_handler(struct irq_desc *desc)
for_each_set_bit(pin, &irq_isfr, VF610_GPIO_PER_PORT) {
vf610_gpio_writel(BIT(pin), port->base + PORT_ISFR);
- generic_handle_domain_irq(port->gc.irq.domain, pin);
+ generic_handle_domain_irq(port->chip.gc.irq.domain, pin);
}
chained_irq_exit(chip, desc);
@@ -214,6 +215,7 @@ static void vf610_gpio_disable_clk(void *data)
static int vf610_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct vf610_gpio_port *port;
struct gpio_chip *gc;
@@ -293,22 +295,27 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return ret;
}
- gc = &port->gc;
- flags = BGPIOF_PINCTRL_BACKEND;
+ gc = &port->chip.gc;
+ flags = GPIO_GENERIC_PINCTRL_BACKEND;
/*
* We only read the output register for current value on output
* lines if the direction register is available so we can switch
* direction.
*/
if (port->sdata->have_paddr)
- flags |= BGPIOF_READ_OUTPUT_REG_SET;
- ret = bgpio_init(gc, dev, 4,
- port->gpio_base + GPIO_PDIR,
- port->gpio_base + GPIO_PDOR,
- NULL,
- port->sdata->have_paddr ? port->gpio_base + GPIO_PDDR : NULL,
- NULL,
- flags);
+ flags |= GPIO_GENERIC_READ_OUTPUT_REG_SET;
+
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = port->gpio_base + GPIO_PDIR,
+ .set = port->gpio_base + GPIO_PDOR,
+ .dirout = port->sdata->have_paddr ?
+ port->gpio_base + GPIO_PDDR : NULL,
+ .flags = flags,
+ };
+
+ ret = gpio_generic_chip_init(&port->chip, &config);
if (ret)
return dev_err_probe(dev, ret, "unable to init generic GPIO\n");
gc->label = dev_name(dev);
diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c
index a10eab7d2617..37f2ce20f1ae 100644
--- a/drivers/gpio/gpio-virtuser.c
+++ b/drivers/gpio/gpio-virtuser.c
@@ -500,9 +500,7 @@ static int gpio_virtuser_value_set(void *data, u64 val)
if (val > 1)
return -EINVAL;
- gpiod_set_value_cansleep(ld->ad.desc, (int)val);
-
- return 0;
+ return gpiod_set_value_cansleep(ld->ad.desc, (int)val);
}
DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_value_fops,
@@ -543,7 +541,7 @@ static void gpio_virtuser_set_value_atomic(struct irq_work *work)
struct gpio_virtuser_irq_work_context *ctx =
to_gpio_virtuser_irq_work_context(work);
- gpiod_set_value(ctx->desc, ctx->val);
+ ctx->ret = gpiod_set_value(ctx->desc, ctx->val);
complete(&ctx->work_completion);
}
@@ -562,7 +560,7 @@ static int gpio_virtuser_value_atomic_set(void *data, u64 val)
gpio_virtuser_irq_work_queue_sync(&ctx);
- return 0;
+ return ctx.ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(gpio_virtuser_value_atomic_fops,
diff --git a/drivers/gpio/gpio-visconti.c b/drivers/gpio/gpio-visconti.c
index 5bd965c18a46..6d5d829634ad 100644
--- a/drivers/gpio/gpio-visconti.c
+++ b/drivers/gpio/gpio-visconti.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -32,7 +33,7 @@
struct visconti_gpio {
void __iomem *base;
spinlock_t lock; /* protect gpio register */
- struct gpio_chip gpio_chip;
+ struct gpio_generic_chip chip;
struct device *dev;
};
@@ -158,6 +159,7 @@ static const struct irq_chip visconti_gpio_irq_chip = {
static int visconti_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct visconti_gpio *priv;
struct gpio_irq_chip *girq;
@@ -189,19 +191,22 @@ static int visconti_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = bgpio_init(&priv->gpio_chip, dev, 4,
- priv->base + GPIO_IDATA,
- priv->base + GPIO_OSET,
- priv->base + GPIO_OCLR,
- priv->base + GPIO_DIR,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = priv->base + GPIO_IDATA,
+ .set = priv->base + GPIO_OSET,
+ .clr = priv->base + GPIO_OCLR,
+ .dirout = priv->base + GPIO_DIR,
+ };
+
+ ret = gpio_generic_chip_init(&priv->chip, &config);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
}
- girq = &priv->gpio_chip.irq;
+ girq = &priv->chip.gc.irq;
gpio_irq_chip_set_chip(girq, &visconti_gpio_irq_chip);
girq->fwnode = dev_fwnode(dev);
girq->parent_domain = parent;
@@ -210,7 +215,7 @@ static int visconti_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- return devm_gpiochip_add_data(dev, &priv->gpio_chip, priv);
+ return devm_gpiochip_add_data(dev, &priv->chip.gc, priv);
}
static const struct of_device_id visconti_gpio_of_match[] = {
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 4af504c23e6f..572b85e77370 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -103,7 +103,7 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
chip->label = dev_name(dev);
- chip->can_sleep = false;
+ chip->can_sleep = true;
return devm_gpiochip_add_data(dev, chip, data);
}
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index f03c0e808fab..489479d6f32b 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -159,7 +159,6 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int i, tristated;
for (i = 0; i < chip->ngpio; i++) {
- int gpio = i + chip->base;
int reg;
const char *pull, *powerdomain;
@@ -175,13 +174,13 @@ static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
}
seq_printf(s, " gpio-%-3d (%-20.20s) ",
- gpio, label ?: "Unrequested");
+ i, label ?: "Unrequested");
reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i);
if (reg < 0) {
dev_err(wm831x->dev,
"GPIO control %d read failed: %d\n",
- gpio, reg);
+ i, reg);
seq_putc(s, '\n');
continue;
}
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index df47a27f508d..a0665cf3ff2f 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -194,7 +194,6 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
int i;
for (i = 0; i < chip->ngpio; i++) {
- int gpio = i + chip->base;
int reg;
/* We report the GPIO even if it's not requested since
@@ -208,14 +207,13 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
}
- seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio,
+ seq_printf(s, " gpio-%-3d (%-20.20s) ", i,
label ?: "Unrequested");
reg = wm8994_reg_read(wm8994, WM8994_GPIO_1 + i);
if (reg < 0) {
dev_err(wm8994->dev,
- "GPIO control %d read failed: %d\n",
- gpio, reg);
+ "GPIO control %d read failed: %d\n", i, reg);
seq_printf(s, "\n");
continue;
}
diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c
index b51b1fa726bb..661259f026e1 100644
--- a/drivers/gpio/gpio-xgene-sb.c
+++ b/drivers/gpio/gpio-xgene-sb.c
@@ -21,6 +21,7 @@
#include <linux/types.h>
#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include "gpiolib-acpi.h"
@@ -40,7 +41,7 @@
/**
* struct xgene_gpio_sb - GPIO-Standby private data structure.
- * @gc: memory-mapped GPIO controllers.
+ * @chip: Generic GPIO chip data
* @regs: GPIO register base offset
* @irq_domain: GPIO interrupt domain
* @irq_start: GPIO pin that start support interrupt
@@ -48,7 +49,7 @@
* @parent_irq_base: Start parent HWIRQ
*/
struct xgene_gpio_sb {
- struct gpio_chip gc;
+ struct gpio_generic_chip chip;
void __iomem *regs;
struct irq_domain *irq_domain;
u16 irq_start;
@@ -62,14 +63,15 @@ struct xgene_gpio_sb {
static void xgene_gpio_set_bit(struct gpio_chip *gc,
void __iomem *reg, u32 gpio, int val)
{
+ struct gpio_generic_chip *chip = to_gpio_generic_chip(gc);
u32 data;
- data = gc->read_reg(reg);
+ data = gpio_generic_read_reg(chip, reg);
if (val)
data |= GPIO_MASK(gpio);
else
data &= ~GPIO_MASK(gpio);
- gc->write_reg(reg, data);
+ gpio_generic_write_reg(chip, reg, data);
}
static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
@@ -91,9 +93,9 @@ static int xgene_gpio_sb_irq_set_type(struct irq_data *d, unsigned int type)
break;
}
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_INT_LVL,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_INT_LVL,
d->hwirq, lvl_type);
/* Propagate IRQ type setting to parent */
@@ -109,14 +111,14 @@ static void xgene_gpio_sb_irq_mask(struct irq_data *d)
irq_chip_mask_parent(d);
- gpiochip_disable_irq(&priv->gc, d->hwirq);
+ gpiochip_disable_irq(&priv->chip.gc, d->hwirq);
}
static void xgene_gpio_sb_irq_unmask(struct irq_data *d)
{
struct xgene_gpio_sb *priv = irq_data_get_irq_chip_data(d);
- gpiochip_enable_irq(&priv->gc, d->hwirq);
+ gpiochip_enable_irq(&priv->chip.gc, d->hwirq);
irq_chip_unmask_parent(d);
}
@@ -155,15 +157,15 @@ static int xgene_gpio_sb_domain_activate(struct irq_domain *d,
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
int ret;
- ret = gpiochip_lock_as_irq(&priv->gc, gpio);
+ ret = gpiochip_lock_as_irq(&priv->chip.gc, gpio);
if (ret) {
- dev_err(priv->gc.parent,
+ dev_err(priv->chip.gc.parent,
"Unable to configure XGene GPIO standby pin %d as IRQ\n",
gpio);
return ret;
}
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 1);
return 0;
}
@@ -174,8 +176,8 @@ static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d,
struct xgene_gpio_sb *priv = d->host_data;
u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq);
- gpiochip_unlock_as_irq(&priv->gc, gpio);
- xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO,
+ gpiochip_unlock_as_irq(&priv->chip.gc, gpio);
+ xgene_gpio_set_bit(&priv->chip.gc, priv->regs + MPA_GPIO_SEL_LO,
gpio * 2, 0);
}
@@ -237,6 +239,7 @@ static const struct irq_domain_ops xgene_gpio_sb_domain_ops = {
static int xgene_gpio_sb_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct xgene_gpio_sb *priv;
int ret;
void __iomem *regs;
@@ -263,14 +266,19 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = bgpio_init(&priv->gc, &pdev->dev, 4,
- regs + MPA_GPIO_IN_ADDR,
- regs + MPA_GPIO_OUT_ADDR, NULL,
- regs + MPA_GPIO_OE_ADDR, NULL, 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = &pdev->dev,
+ .sz = 4,
+ .dat = regs + MPA_GPIO_IN_ADDR,
+ .set = regs + MPA_GPIO_OUT_ADDR,
+ .dirout = regs + MPA_GPIO_OE_ADDR,
+ };
+
+ ret = gpio_generic_chip_init(&priv->chip, &config);
if (ret)
return ret;
- priv->gc.to_irq = xgene_gpio_sb_to_irq;
+ priv->chip.gc.to_irq = xgene_gpio_sb_to_irq;
/* Retrieve start irq pin, use default if property not found */
priv->irq_start = XGENE_DFLT_IRQ_START_PIN;
@@ -283,12 +291,12 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
priv->nirq = val32;
/* Retrieve number gpio, use default if property not found */
- priv->gc.ngpio = XGENE_DFLT_MAX_NGPIO;
+ priv->chip.gc.ngpio = XGENE_DFLT_MAX_NGPIO;
if (!device_property_read_u32(&pdev->dev, "apm,nr-gpios", &val32))
- priv->gc.ngpio = val32;
+ priv->chip.gc.ngpio = val32;
dev_info(&pdev->dev, "Support %d gpios, %d irqs start from pin %d\n",
- priv->gc.ngpio, priv->nirq, priv->irq_start);
+ priv->chip.gc.ngpio, priv->nirq, priv->irq_start);
platform_set_drvdata(pdev, priv);
@@ -298,9 +306,9 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
if (!priv->irq_domain)
return -ENODEV;
- priv->gc.irq.domain = priv->irq_domain;
+ priv->chip.gc.irq.domain = priv->irq_domain;
- ret = devm_gpiochip_add_data(&pdev->dev, &priv->gc, priv);
+ ret = devm_gpiochip_add_data(&pdev->dev, &priv->chip.gc, priv);
if (ret) {
dev_err(&pdev->dev,
"failed to register X-Gene GPIO Standby driver\n");
@@ -311,7 +319,7 @@ static int xgene_gpio_sb_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "X-Gene GPIO Standby driver registered\n");
/* Register interrupt handlers for GPIO signaled ACPI Events */
- acpi_gpiochip_request_interrupts(&priv->gc);
+ acpi_gpiochip_request_interrupts(&priv->chip.gc);
return ret;
}
@@ -320,7 +328,7 @@ static void xgene_gpio_sb_remove(struct platform_device *pdev)
{
struct xgene_gpio_sb *priv = platform_get_drvdata(pdev);
- acpi_gpiochip_free_interrupts(&priv->gc);
+ acpi_gpiochip_free_interrupts(&priv->chip.gc);
irq_domain_remove(priv->irq_domain);
}
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c
index 4f627de3f56c..809668449dbe 100644
--- a/drivers/gpio/gpio-xgene.c
+++ b/drivers/gpio/gpio-xgene.c
@@ -130,7 +130,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc,
return 0;
}
-static __maybe_unused int xgene_gpio_suspend(struct device *dev)
+static int xgene_gpio_suspend(struct device *dev)
{
struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset;
@@ -143,7 +143,7 @@ static __maybe_unused int xgene_gpio_suspend(struct device *dev)
return 0;
}
-static __maybe_unused int xgene_gpio_resume(struct device *dev)
+static int xgene_gpio_resume(struct device *dev)
{
struct xgene_gpio *gpio = dev_get_drvdata(dev);
unsigned long bank_offset;
@@ -156,7 +156,7 @@ static __maybe_unused int xgene_gpio_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume);
static int xgene_gpio_probe(struct platform_device *pdev)
{
@@ -204,7 +204,7 @@ static struct platform_driver xgene_gpio_driver = {
.name = "xgene-gpio",
.of_match_table = xgene_gpio_of_match,
.acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match),
- .pm = &xgene_gpio_pm,
+ .pm = pm_sleep_ptr(&xgene_gpio_pm),
},
.probe = xgene_gpio_probe,
};
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index 93544e98ccbd..77eb29dcc217 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -3,11 +3,12 @@
* Copyright (C) 2017 Broadcom
*/
-#include <linux/gpio/driver.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/generic.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -28,7 +29,7 @@
#define IPROC_GPIO_CCA_INT_EDGE 0x24
struct iproc_gpio_chip {
- struct gpio_chip gc;
+ struct gpio_generic_chip gen_gc;
spinlock_t lock;
struct device *dev;
void __iomem *base;
@@ -38,7 +39,7 @@ struct iproc_gpio_chip {
static inline struct iproc_gpio_chip *
to_iproc_gpio(struct gpio_chip *gc)
{
- return container_of(gc, struct iproc_gpio_chip, gc);
+ return container_of(to_gpio_generic_chip(gc), struct iproc_gpio_chip, gen_gc);
}
static void iproc_gpio_irq_ack(struct irq_data *d)
@@ -213,6 +214,7 @@ static const struct irq_chip iproc_gpio_irq_chip = {
static int iproc_gpio_probe(struct platform_device *pdev)
{
+ struct gpio_generic_chip_config config;
struct device *dev = &pdev->dev;
struct device_node *dn = pdev->dev.of_node;
struct iproc_gpio_chip *chip;
@@ -231,21 +233,23 @@ static int iproc_gpio_probe(struct platform_device *pdev)
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
- ret = bgpio_init(&chip->gc, dev, 4,
- chip->base + IPROC_GPIO_CCA_DIN,
- chip->base + IPROC_GPIO_CCA_DOUT,
- NULL,
- chip->base + IPROC_GPIO_CCA_OUT_EN,
- NULL,
- 0);
+ config = (struct gpio_generic_chip_config) {
+ .dev = dev,
+ .sz = 4,
+ .dat = chip->base + IPROC_GPIO_CCA_DIN,
+ .set = chip->base + IPROC_GPIO_CCA_DOUT,
+ .dirout = chip->base + IPROC_GPIO_CCA_OUT_EN,
+ };
+
+ ret = gpio_generic_chip_init(&chip->gen_gc, &config);
if (ret) {
dev_err(dev, "unable to init GPIO chip\n");
return ret;
}
- chip->gc.label = dev_name(dev);
+ chip->gen_gc.gc.label = dev_name(dev);
if (!of_property_read_u32(dn, "ngpios", &num_gpios))
- chip->gc.ngpio = num_gpios;
+ chip->gen_gc.gc.ngpio = num_gpios;
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
@@ -266,13 +270,13 @@ static int iproc_gpio_probe(struct platform_device *pdev)
* a flow-handler because the irq is shared.
*/
ret = devm_request_irq(dev, irq, iproc_gpio_irq_handler,
- IRQF_SHARED, chip->gc.label, &chip->gc);
+ IRQF_SHARED, chip->gen_gc.gc.label, &chip->gen_gc.gc);
if (ret) {
dev_err(dev, "Fail to request IRQ%d: %d\n", irq, ret);
return ret;
}
- girq = &chip->gc.irq;
+ girq = &chip->gen_gc.gc.irq;
gpio_irq_chip_set_chip(girq, &iproc_gpio_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
@@ -282,7 +286,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
girq->handler = handle_simple_irq;
}
- ret = devm_gpiochip_add_data(dev, &chip->gc, chip);
+ ret = devm_gpiochip_add_data(dev, &chip->gen_gc.gc, chip);
if (ret) {
dev_err(dev, "unable to add GPIO chip\n");
return ret;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 83675ac81077..be4b4d730547 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -286,7 +286,7 @@ static void xgpio_free(struct gpio_chip *chip, unsigned int offset)
pm_runtime_put(chip->parent);
}
-static int __maybe_unused xgpio_suspend(struct device *dev)
+static int xgpio_suspend(struct device *dev)
{
struct xgpio_instance *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
@@ -327,7 +327,7 @@ static void xgpio_irq_ack(struct irq_data *irq_data)
{
}
-static int __maybe_unused xgpio_resume(struct device *dev)
+static int xgpio_resume(struct device *dev)
{
struct xgpio_instance *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
@@ -343,7 +343,7 @@ static int __maybe_unused xgpio_resume(struct device *dev)
return 0;
}
-static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
+static int xgpio_runtime_suspend(struct device *dev)
{
struct xgpio_instance *gpio = dev_get_drvdata(dev);
@@ -352,7 +352,7 @@ static int __maybe_unused xgpio_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused xgpio_runtime_resume(struct device *dev)
+static int xgpio_runtime_resume(struct device *dev)
{
struct xgpio_instance *gpio = dev_get_drvdata(dev);
@@ -360,9 +360,8 @@ static int __maybe_unused xgpio_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops xgpio_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
- SET_RUNTIME_PM_OPS(xgpio_runtime_suspend,
- xgpio_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(xgpio_suspend, xgpio_resume)
+ RUNTIME_PM_OPS(xgpio_runtime_suspend, xgpio_runtime_resume, NULL)
};
/**
@@ -682,7 +681,7 @@ static struct platform_driver xgpio_plat_driver = {
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
- .pm = &xgpio_dev_pm_ops,
+ .pm = pm_ptr(&xgpio_dev_pm_ops),
},
};
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index faadcb4b0b2d..7f3c98f9f902 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -135,8 +135,7 @@ static void xra1403_dbg_show(struct seq_file *s, struct gpio_chip *chip)
gcr = value[XRA_GCR + 1] << 8 | value[XRA_GCR];
gsr = value[XRA_GSR + 1] << 8 | value[XRA_GSR];
for_each_requested_gpio(chip, i, label) {
- seq_printf(s, " gpio-%-3d (%-12s) %s %s\n",
- chip->base + i, label,
+ seq_printf(s, " gpio-%-3d (%-12s) %s %s\n", i, label,
(gcr & BIT(i)) ? "in" : "out",
str_hi_lo(gsr & BIT(i)));
}
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 0ffd76e8951f..97780c57ab56 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -735,7 +735,7 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
}
}
-static int __maybe_unused zynq_gpio_suspend(struct device *dev)
+static int zynq_gpio_suspend(struct device *dev)
{
struct zynq_gpio *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
@@ -756,7 +756,7 @@ static int __maybe_unused zynq_gpio_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused zynq_gpio_resume(struct device *dev)
+static int zynq_gpio_resume(struct device *dev)
{
struct zynq_gpio *gpio = dev_get_drvdata(dev);
struct irq_data *data = irq_get_irq_data(gpio->irq);
@@ -779,7 +779,7 @@ static int __maybe_unused zynq_gpio_resume(struct device *dev)
return 0;
}
-static int __maybe_unused zynq_gpio_runtime_suspend(struct device *dev)
+static int zynq_gpio_runtime_suspend(struct device *dev)
{
struct zynq_gpio *gpio = dev_get_drvdata(dev);
@@ -788,7 +788,7 @@ static int __maybe_unused zynq_gpio_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused zynq_gpio_runtime_resume(struct device *dev)
+static int zynq_gpio_runtime_resume(struct device *dev)
{
struct zynq_gpio *gpio = dev_get_drvdata(dev);
@@ -814,9 +814,8 @@ static void zynq_gpio_free(struct gpio_chip *chip, unsigned int offset)
}
static const struct dev_pm_ops zynq_gpio_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume)
- SET_RUNTIME_PM_OPS(zynq_gpio_runtime_suspend,
- zynq_gpio_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(zynq_gpio_suspend, zynq_gpio_resume)
+ RUNTIME_PM_OPS(zynq_gpio_runtime_suspend, zynq_gpio_runtime_resume, NULL)
};
static const struct zynq_platform_data versal_gpio_def = {
@@ -1022,7 +1021,7 @@ static void zynq_gpio_remove(struct platform_device *pdev)
static struct platform_driver zynq_gpio_driver = {
.driver = {
.name = DRIVER_NAME,
- .pm = &zynq_gpio_dev_pm_ops,
+ .pm = pm_ptr(&zynq_gpio_dev_pm_ops),
.of_match_table = zynq_gpio_of_match,
},
.probe = zynq_gpio_probe,
diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c
index 12b24a717e43..83dd227dbbec 100644
--- a/drivers/gpio/gpiolib-acpi-core.c
+++ b/drivers/gpio/gpiolib-acpi-core.c
@@ -291,6 +291,19 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
return GPIOD_ASIS;
}
+static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc,
+ unsigned int acpi_debounce)
+{
+ int ret;
+
+ /* ACPI uses hundredths of milliseconds units */
+ acpi_debounce *= 10;
+ ret = gpio_set_debounce_timeout(desc, acpi_debounce);
+ if (ret)
+ gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n",
+ acpi_debounce, ret);
+}
+
static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
struct acpi_resource_gpio *agpio,
unsigned int index,
@@ -300,18 +313,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
unsigned int pin = agpio->pin_table[index];
struct gpio_desc *desc;
- int ret;
desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
if (IS_ERR(desc))
return desc;
- /* ACPI uses hundredths of milliseconds units */
- ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10);
- if (ret)
- dev_warn(chip->parent,
- "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
- pin, ret);
+ acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
return desc;
}
@@ -375,8 +382,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->parent,
- "Failed to request GPIO for pin 0x%04X, err %ld\n",
- pin, PTR_ERR(desc));
+ "Failed to request GPIO for pin 0x%04X, err %pe\n",
+ pin, desc);
return AE_OK;
}
@@ -942,7 +949,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
{
struct acpi_device *adev = to_acpi_device_node(fwnode);
bool can_fallback = acpi_can_fallback_to_crs(adev, con_id);
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info);
@@ -957,6 +964,9 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode,
acpi_gpio_update_gpiod_flags(dflags, &info);
acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info);
+
+ acpi_gpio_set_debounce_timeout(desc, info.debounce);
+
return desc;
}
@@ -992,7 +1002,7 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id,
int ret;
for (i = 0, idx = 0; idx <= index; i++) {
- struct acpi_gpio_info info;
+ struct acpi_gpio_info info = {};
struct gpio_desc *desc;
/* Ignore -EPROBE_DEFER, it only matters if idx matches */
@@ -1089,7 +1099,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
return AE_BAD_PARAMETER;
}
- length = min_t(u16, agpio->pin_table_length, pin_index + bits);
+ length = min(agpio->pin_table_length, pin_index + bits);
for (i = pin_index; i < length; ++i) {
unsigned int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
diff --git a/drivers/gpio/gpiolib-acpi-quirks.c b/drivers/gpio/gpiolib-acpi-quirks.c
index bfb04e67c4bc..7b95d1b03361 100644
--- a/drivers/gpio/gpiolib-acpi-quirks.c
+++ b/drivers/gpio/gpiolib-acpi-quirks.c
@@ -319,6 +319,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
},
{
/*
+ * Same as G1619-04. New model.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "PNP0C50:00@8",
+ },
+ },
+ {
+ /*
* Spurious wakeups from GPIO 11
* Found in BIOS 1.04
* https://gitlab.freedesktop.org/drm/amd/-/issues/3954
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index e6a289fa0f8f..3735c9fe1502 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -144,17 +144,17 @@ static void linehandle_flags_to_desc_flags(u32 lflags, unsigned long *flagsp)
{
unsigned long flags = READ_ONCE(*flagsp);
- assign_bit(FLAG_ACTIVE_LOW, &flags,
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW);
- assign_bit(FLAG_OPEN_DRAIN, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, &flags,
+ assign_bit(GPIOD_FLAG_PULL_UP, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, &flags,
+ assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, &flags,
+ assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
lflags & GPIOHANDLE_REQUEST_BIAS_DISABLE);
WRITE_ONCE(*flagsp, flags);
@@ -238,7 +238,7 @@ static long linehandle_ioctl(struct file *file, unsigned int cmd,
* All line descriptors were created at once with the same
* flags so just check if the first one is really output.
*/
- if (!test_bit(FLAG_IS_OUT, &lh->descs[0]->flags))
+ if (!test_bit(GPIOD_FLAG_IS_OUT, &lh->descs[0]->flags))
return -EPERM;
if (copy_from_user(&ghd, ip, sizeof(ghd)))
@@ -298,12 +298,13 @@ static const struct file_operations linehandle_fileops = {
#endif
};
+DEFINE_FREE(linehandle_free, struct linehandle_state *, if (!IS_ERR_OR_NULL(_T)) linehandle_free(_T))
+
static int linehandle_create(struct gpio_device *gdev, void __user *ip)
{
struct gpiohandle_request handlereq;
- struct linehandle_state *lh;
- struct file *file;
- int fd, i, ret;
+ struct linehandle_state *lh __free(linehandle_free) = NULL;
+ int i, ret;
u32 lflags;
if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
@@ -327,10 +328,8 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
lh->label = kstrndup(handlereq.consumer_label,
sizeof(handlereq.consumer_label) - 1,
GFP_KERNEL);
- if (!lh->label) {
- ret = -ENOMEM;
- goto out_free_lh;
- }
+ if (!lh->label)
+ return -ENOMEM;
}
lh->num_descs = handlereq.lines;
@@ -340,20 +339,18 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
u32 offset = handlereq.lineoffsets[i];
struct gpio_desc *desc = gpio_device_get_desc(gdev, offset);
- if (IS_ERR(desc)) {
- ret = PTR_ERR(desc);
- goto out_free_lh;
- }
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
ret = gpiod_request_user(desc, lh->label);
if (ret)
- goto out_free_lh;
+ return ret;
lh->descs[i] = desc;
linehandle_flags_to_desc_flags(handlereq.flags, &desc->flags);
ret = gpiod_set_transitory(desc, false);
if (ret < 0)
- goto out_free_lh;
+ return ret;
/*
* Lines have to be requested explicitly for input
@@ -364,11 +361,11 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
ret = gpiod_direction_output_nonotify(desc, val);
if (ret)
- goto out_free_lh;
+ return ret;
} else if (lflags & GPIOHANDLE_REQUEST_INPUT) {
ret = gpiod_direction_input_nonotify(desc);
if (ret)
- goto out_free_lh;
+ return ret;
}
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
@@ -377,44 +374,23 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
offset);
}
- fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- ret = fd;
- goto out_free_lh;
- }
-
- file = anon_inode_getfile("gpio-linehandle",
- &linehandle_fileops,
- lh,
- O_RDONLY | O_CLOEXEC);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto out_put_unused_fd;
- }
+ FD_PREPARE(fdf, O_RDONLY | O_CLOEXEC,
+ anon_inode_getfile("gpio-linehandle", &linehandle_fileops,
+ lh, O_RDONLY | O_CLOEXEC));
+ if (fdf.err)
+ return fdf.err;
+ retain_and_null_ptr(lh);
- handlereq.fd = fd;
- if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
- /*
- * fput() will trigger the release() callback, so do not go onto
- * the regular error cleanup path here.
- */
- fput(file);
- put_unused_fd(fd);
+ handlereq.fd = fd_prepare_fd(fdf);
+ if (copy_to_user(ip, &handlereq, sizeof(handlereq)))
return -EFAULT;
- }
- fd_install(fd, file);
+ fd_publish(fdf);
dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
lh->num_descs);
return 0;
-
-out_put_unused_fd:
- put_unused_fd(fd);
-out_free_lh:
- linehandle_free(lh);
- return ret;
}
#endif /* CONFIG_GPIO_CDEV_V1 */
@@ -599,10 +575,10 @@ static void linereq_put_event(struct linereq *lr,
static u64 line_event_timestamp(struct line *line)
{
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
+ if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
return ktime_get_real_ns();
else if (IS_ENABLED(CONFIG_HTE) &&
- test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
+ test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))
return line->timestamp_ns;
return ktime_get_ns();
@@ -676,7 +652,7 @@ static enum hte_return process_hw_ts_thread(void *p)
}
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
- le.offset = gpio_chip_hwgpio(line->desc);
+ le.offset = gpiod_hwgpio(line->desc);
linereq_put_event(lr, &le);
@@ -700,7 +676,7 @@ static enum hte_return process_hw_ts(struct hte_ts_data *ts, void *p)
if (READ_ONCE(line->sw_debounced)) {
line->total_discard_seq++;
line->last_seqno = ts->seq;
- mod_delayed_work(system_wq, &line->work,
+ mod_delayed_work(system_percpu_wq, &line->work,
usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
} else {
if (unlikely(ts->seq < line->line_seqno))
@@ -725,11 +701,11 @@ static int hte_edge_setup(struct line *line, u64 eflags)
struct hte_ts_desc *hdesc = &line->hdesc;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_FALLING_EDGE_TS :
HTE_RISING_EDGE_TS;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- flags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
HTE_RISING_EDGE_TS :
HTE_FALLING_EDGE_TS;
@@ -793,7 +769,7 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
line->line_seqno++;
le.line_seqno = line->line_seqno;
le.seqno = (lr->num_lines == 1) ? le.line_seqno : line->req_seqno;
- le.offset = gpio_chip_hwgpio(line->desc);
+ le.offset = gpiod_hwgpio(line->desc);
linereq_put_event(lr, &le);
@@ -831,7 +807,7 @@ static bool debounced_value(struct line *line)
*/
value = READ_ONCE(line->level);
- if (test_bit(FLAG_ACTIVE_LOW, &line->desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags))
value = !value;
return value;
@@ -841,7 +817,7 @@ static irqreturn_t debounce_irq_handler(int irq, void *p)
{
struct line *line = p;
- mod_delayed_work(system_wq, &line->work,
+ mod_delayed_work(system_percpu_wq, &line->work,
usecs_to_jiffies(READ_ONCE(line->desc->debounce_period_us)));
return IRQ_HANDLED;
@@ -891,7 +867,7 @@ static void debounce_work_func(struct work_struct *work)
lr = line->req;
le.timestamp_ns = line_event_timestamp(line);
- le.offset = gpio_chip_hwgpio(line->desc);
+ le.offset = gpiod_hwgpio(line->desc);
#ifdef CONFIG_HTE
if (edflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE) {
/* discard events except the last one */
@@ -939,7 +915,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
return level;
if (!(IS_ENABLED(CONFIG_HTE) &&
- test_bit(FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
+ test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &line->desc->flags))) {
irq = gpiod_to_irq(line->desc);
if (irq < 0)
return -ENXIO;
@@ -1061,10 +1037,10 @@ static int edge_detector_setup(struct line *line,
return -ENXIO;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_RISING)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &line->desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &line->desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
@@ -1237,34 +1213,34 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 lflags,
{
unsigned long flags = READ_ONCE(*flagsp);
- assign_bit(FLAG_ACTIVE_LOW, &flags,
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &flags,
lflags & GPIO_V2_LINE_FLAG_ACTIVE_LOW);
if (lflags & GPIO_V2_LINE_FLAG_OUTPUT)
- set_bit(FLAG_IS_OUT, &flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &flags);
else if (lflags & GPIO_V2_LINE_FLAG_INPUT)
- clear_bit(FLAG_IS_OUT, &flags);
+ clear_bit(GPIOD_FLAG_IS_OUT, &flags);
- assign_bit(FLAG_EDGE_RISING, &flags,
+ assign_bit(GPIOD_FLAG_EDGE_RISING, &flags,
lflags & GPIO_V2_LINE_FLAG_EDGE_RISING);
- assign_bit(FLAG_EDGE_FALLING, &flags,
+ assign_bit(GPIOD_FLAG_EDGE_FALLING, &flags,
lflags & GPIO_V2_LINE_FLAG_EDGE_FALLING);
- assign_bit(FLAG_OPEN_DRAIN, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_DRAIN, &flags,
lflags & GPIO_V2_LINE_FLAG_OPEN_DRAIN);
- assign_bit(FLAG_OPEN_SOURCE, &flags,
+ assign_bit(GPIOD_FLAG_OPEN_SOURCE, &flags,
lflags & GPIO_V2_LINE_FLAG_OPEN_SOURCE);
- assign_bit(FLAG_PULL_UP, &flags,
+ assign_bit(GPIOD_FLAG_PULL_UP, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_UP);
- assign_bit(FLAG_PULL_DOWN, &flags,
+ assign_bit(GPIOD_FLAG_PULL_DOWN, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
- assign_bit(FLAG_BIAS_DISABLE, &flags,
+ assign_bit(GPIOD_FLAG_BIAS_DISABLE, &flags,
lflags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
- assign_bit(FLAG_EVENT_CLOCK_REALTIME, &flags,
+ assign_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &flags,
lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
- assign_bit(FLAG_EVENT_CLOCK_HTE, &flags,
+ assign_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &flags,
lflags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE);
WRITE_ONCE(*flagsp, flags);
@@ -1591,7 +1567,7 @@ static void linereq_show_fdinfo(struct seq_file *out, struct file *file)
for (i = 0; i < lr->num_lines; i++)
seq_printf(out, "gpio-line:\t%d\n",
- gpio_chip_hwgpio(lr->lines[i].desc));
+ gpiod_hwgpio(lr->lines[i].desc));
}
#endif
@@ -2115,10 +2091,10 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irqflags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
@@ -2244,7 +2220,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
return;
memset(info, 0, sizeof(*info));
- info->offset = gpio_chip_hwgpio(desc);
+ info->offset = gpiod_hwgpio(desc);
if (desc->name)
strscpy(info->name, desc->name, sizeof(info->name));
@@ -2253,7 +2229,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
scoped_guard(srcu, &desc->gdev->desc_srcu) {
label = gpiod_get_label(desc);
- if (label && test_bit(FLAG_REQUESTED, &dflags))
+ if (label && test_bit(GPIOD_FLAG_REQUESTED, &dflags))
strscpy(info->consumer, label,
sizeof(info->consumer));
}
@@ -2270,10 +2246,10 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
* The definitive test that a line is available to userspace is to
* request it.
*/
- if (test_bit(FLAG_REQUESTED, &dflags) ||
- test_bit(FLAG_IS_HOGGED, &dflags) ||
- test_bit(FLAG_EXPORT, &dflags) ||
- test_bit(FLAG_SYSFS, &dflags) ||
+ if (test_bit(GPIOD_FLAG_REQUESTED, &dflags) ||
+ test_bit(GPIOD_FLAG_IS_HOGGED, &dflags) ||
+ test_bit(GPIOD_FLAG_EXPORT, &dflags) ||
+ test_bit(GPIOD_FLAG_SYSFS, &dflags) ||
!gpiochip_line_is_valid(guard.gc, info->offset)) {
info->flags |= GPIO_V2_LINE_FLAG_USED;
} else if (!atomic) {
@@ -2281,34 +2257,34 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
info->flags |= GPIO_V2_LINE_FLAG_USED;
}
- if (test_bit(FLAG_IS_OUT, &dflags))
+ if (test_bit(GPIOD_FLAG_IS_OUT, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OUTPUT;
else
info->flags |= GPIO_V2_LINE_FLAG_INPUT;
- if (test_bit(FLAG_ACTIVE_LOW, &dflags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_ACTIVE_LOW;
- if (test_bit(FLAG_OPEN_DRAIN, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
- if (test_bit(FLAG_OPEN_SOURCE, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
- if (test_bit(FLAG_BIAS_DISABLE, &dflags))
+ if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_DISABLED;
- if (test_bit(FLAG_PULL_DOWN, &dflags))
+ if (test_bit(GPIOD_FLAG_PULL_DOWN, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN;
- if (test_bit(FLAG_PULL_UP, &dflags))
+ if (test_bit(GPIOD_FLAG_PULL_UP, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_BIAS_PULL_UP;
- if (test_bit(FLAG_EDGE_RISING, &dflags))
+ if (test_bit(GPIOD_FLAG_EDGE_RISING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
- if (test_bit(FLAG_EDGE_FALLING, &dflags))
+ if (test_bit(GPIOD_FLAG_EDGE_FALLING, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
- if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &dflags))
+ if (test_bit(GPIOD_FLAG_EVENT_CLOCK_REALTIME, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
- else if (test_bit(FLAG_EVENT_CLOCK_HTE, &dflags))
+ else if (test_bit(GPIOD_FLAG_EVENT_CLOCK_HTE, &dflags))
info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
debounce_period_us = READ_ONCE(desc->debounce_period_us);
@@ -2548,8 +2524,15 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
struct lineinfo_changed_ctx *ctx;
struct gpio_desc *desc = data;
+ struct file *fp;
+
+ if (!test_bit(gpiod_hwgpio(desc), cdev->watched_lines))
+ return NOTIFY_DONE;
- if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
+ /* Keep the file descriptor alive for the duration of the notification. */
+ fp = get_file_active(&cdev->fp);
+ if (!fp)
+ /* Chardev file descriptor was or is being released. */
return NOTIFY_DONE;
/*
@@ -2575,8 +2558,6 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
/* Keep the GPIO device alive until we emit the event. */
ctx->gdev = gpio_device_get(desc->gdev);
ctx->cdev = cdev;
- /* Keep the file descriptor alive too. */
- get_file(ctx->cdev->fp);
INIT_WORK(&ctx->work, lineinfo_changed_func);
queue_work(ctx->gdev->line_state_wq, &ctx->work);
@@ -2823,7 +2804,7 @@ int gpiolib_cdev_register(struct gpio_device *gdev, dev_t devt)
if (!gc)
return -ENODEV;
- chip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
+ gpiochip_dbg(gc, "added GPIO chardev (%d:%d)\n", MAJOR(devt), gdev->id);
return 0;
}
diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
index 3bc93ccadb5b..ef3f2ef30cf2 100644
--- a/drivers/gpio/gpiolib-legacy.c
+++ b/drivers/gpio/gpiolib-legacy.c
@@ -34,30 +34,20 @@ EXPORT_SYMBOL_GPL(gpio_free);
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
- struct gpio_desc *desc;
int err;
- /* Compatibility: assume unavailable "valid" GPIOs will appear later */
- desc = gpio_to_desc(gpio);
- if (!desc)
- return -EPROBE_DEFER;
-
- err = gpiod_request(desc, label);
+ err = gpio_request(gpio, label);
if (err)
return err;
if (flags & GPIOF_IN)
- err = gpiod_direction_input(desc);
+ err = gpio_direction_input(gpio);
else
- err = gpiod_direction_output_raw(desc, !!(flags & GPIOF_OUT_INIT_HIGH));
+ err = gpio_direction_output(gpio, !!(flags & GPIOF_OUT_INIT_HIGH));
if (err)
- goto free_gpio;
-
- return 0;
+ gpio_free(gpio);
- free_gpio:
- gpiod_free(desc);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
@@ -78,11 +68,9 @@ int gpio_request(unsigned gpio, const char *label)
}
EXPORT_SYMBOL_GPL(gpio_request);
-static void devm_gpio_release(struct device *dev, void *res)
+static void devm_gpio_release(void *gpio)
{
- unsigned *gpio = res;
-
- gpio_free(*gpio);
+ gpio_free((unsigned)(unsigned long)gpio);
}
/**
@@ -100,22 +88,22 @@ static void devm_gpio_release(struct device *dev, void *res)
int devm_gpio_request_one(struct device *dev, unsigned gpio,
unsigned long flags, const char *label)
{
- unsigned *dr;
int rc;
- dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
- if (!dr)
- return -ENOMEM;
+ rc = gpio_request(gpio, label);
+ if (rc)
+ return rc;
+
+ if (flags & GPIOF_IN)
+ rc = gpio_direction_input(gpio);
+ else
+ rc = gpio_direction_output(gpio, !!(flags & GPIOF_OUT_INIT_HIGH));
- rc = gpio_request_one(gpio, flags, label);
if (rc) {
- devres_free(dr);
+ gpio_free(gpio);
return rc;
}
- *dr = gpio;
- devres_add(dev, dr);
-
- return 0;
+ return devm_add_action_or_reset(dev, devm_gpio_release, (void *)(unsigned long)gpio);
}
EXPORT_SYMBOL_GPL(devm_gpio_request_one);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 37ab78243fab..8657379e9165 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -878,7 +878,7 @@ static void of_gpiochip_remove_hog(struct gpio_chip *chip,
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(chip, desc, FLAG_IS_HOGGED)
+ for_each_gpio_desc_with_flag(chip, desc, GPIOD_FLAG_IS_HOGGED)
if (READ_ONCE(desc->hog) == hog)
gpiochip_free_own_desc(desc);
}
@@ -1031,85 +1031,6 @@ static int of_gpio_threecell_xlate(struct gpio_chip *gc,
return gpiospec->args[1];
}
-#if IS_ENABLED(CONFIG_OF_GPIO_MM_GPIOCHIP)
-#include <linux/gpio/legacy-of-mm-gpiochip.h>
-/**
- * of_mm_gpiochip_add_data - Add memory mapped GPIO chip (bank)
- * @np: device node of the GPIO chip
- * @mm_gc: pointer to the of_mm_gpio_chip allocated structure
- * @data: driver data to store in the struct gpio_chip
- *
- * To use this function you should allocate and fill mm_gc with:
- *
- * 1) In the gpio_chip structure:
- * - all the callbacks
- * - of_gpio_n_cells
- * - of_xlate callback (optional)
- *
- * 3) In the of_mm_gpio_chip structure:
- * - save_regs callback (optional)
- *
- * If succeeded, this function will map bank's memory and will
- * do all necessary work for you. Then you'll able to use .regs
- * to manage GPIOs from the callbacks.
- *
- * Returns:
- * 0 on success, or negative errno on failure.
- */
-int of_mm_gpiochip_add_data(struct device_node *np,
- struct of_mm_gpio_chip *mm_gc,
- void *data)
-{
- int ret = -ENOMEM;
- struct gpio_chip *gc = &mm_gc->gc;
-
- gc->label = kasprintf(GFP_KERNEL, "%pOF", np);
- if (!gc->label)
- goto err0;
-
- mm_gc->regs = of_iomap(np, 0);
- if (!mm_gc->regs)
- goto err1;
-
- gc->base = -1;
-
- if (mm_gc->save_regs)
- mm_gc->save_regs(mm_gc);
-
- fwnode_handle_put(mm_gc->gc.fwnode);
- mm_gc->gc.fwnode = fwnode_handle_get(of_fwnode_handle(np));
-
- ret = gpiochip_add_data(gc, data);
- if (ret)
- goto err2;
-
- return 0;
-err2:
- of_node_put(np);
- iounmap(mm_gc->regs);
-err1:
- kfree(gc->label);
-err0:
- pr_err("%pOF: GPIO chip registration failed with status %d\n", np, ret);
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_mm_gpiochip_add_data);
-
-/**
- * of_mm_gpiochip_remove - Remove memory mapped GPIO chip (bank)
- * @mm_gc: pointer to the of_mm_gpio_chip allocated structure
- */
-void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc)
-{
- struct gpio_chip *gc = &mm_gc->gc;
-
- gpiochip_remove(gc);
- iounmap(mm_gc->regs);
- kfree(gc->label);
-}
-EXPORT_SYMBOL_GPL(of_mm_gpiochip_remove);
-#endif
-
#ifdef CONFIG_PINCTRL
static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
{
diff --git a/drivers/gpio/gpiolib-shared.c b/drivers/gpio/gpiolib-shared.c
new file mode 100644
index 000000000000..8bdd107b1ad1
--- /dev/null
+++ b/drivers/gpio/gpiolib-shared.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Linaro Ltd.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/auxiliary_bus.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/overflow.h>
+#include <linux/printk.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "gpiolib.h"
+#include "gpiolib-shared.h"
+
+/* Represents a single reference to a GPIO pin. */
+struct gpio_shared_ref {
+ struct list_head list;
+ /* Firmware node associated with this GPIO's consumer. */
+ struct fwnode_handle *fwnode;
+ /* GPIO flags this consumer uses for the request. */
+ enum gpiod_flags flags;
+ char *con_id;
+ int dev_id;
+ struct auxiliary_device adev;
+ struct gpiod_lookup_table *lookup;
+};
+
+/* Represents a single GPIO pin. */
+struct gpio_shared_entry {
+ struct list_head list;
+ /* Firmware node associated with the GPIO controller. */
+ struct fwnode_handle *fwnode;
+ /* Hardware offset of the GPIO within its chip. */
+ unsigned int offset;
+ /* Index in the property value array. */
+ size_t index;
+ struct mutex lock;
+ struct gpio_shared_desc *shared_desc;
+ struct kref ref;
+ struct list_head refs;
+};
+
+static LIST_HEAD(gpio_shared_list);
+static DEFINE_MUTEX(gpio_shared_lock);
+static DEFINE_IDA(gpio_shared_ida);
+
+#if IS_ENABLED(CONFIG_OF)
+static struct gpio_shared_entry *
+gpio_shared_find_entry(struct fwnode_handle *controller_node,
+ unsigned int offset)
+{
+ struct gpio_shared_entry *entry;
+
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ if (entry->fwnode == controller_node && entry->offset == offset)
+ return entry;
+ }
+
+ return NULL;
+}
+
+/* Handle all special nodes that we should ignore. */
+static bool gpio_shared_of_node_ignore(struct device_node *node)
+{
+ /*
+ * __symbols__ is a special, internal node and should not be considered
+ * when scanning for shared GPIOs.
+ */
+ if (of_node_name_eq(node, "__symbols__"))
+ return true;
+
+ /*
+ * GPIO hogs have a "gpios" property which is not a phandle and can't
+ * possibly refer to a shared GPIO.
+ */
+ if (of_property_present(node, "gpio-hog"))
+ return true;
+
+ return false;
+}
+
+static int gpio_shared_of_traverse(struct device_node *curr)
+{
+ struct gpio_shared_entry *entry;
+ size_t con_id_len, suffix_len;
+ struct fwnode_handle *fwnode;
+ struct of_phandle_args args;
+ struct property *prop;
+ unsigned int offset;
+ const char *suffix;
+ int ret, count, i;
+
+ if (gpio_shared_of_node_ignore(curr))
+ return 0;
+
+ for_each_property_of_node(curr, prop) {
+ /*
+ * The standard name for a GPIO property is "foo-gpios"
+ * or "foo-gpio". Some bindings also use "gpios" or "gpio".
+ * There are some legacy device-trees which have a different
+ * naming convention and for which we have rename quirks in
+ * place in gpiolib-of.c. I don't think any of them require
+ * support for shared GPIOs so for now let's just ignore
+ * them. We can always just export the quirk list and
+ * iterate over it here.
+ */
+ if (!strends(prop->name, "-gpios") &&
+ !strends(prop->name, "-gpio") &&
+ strcmp(prop->name, "gpios") != 0 &&
+ strcmp(prop->name, "gpio") != 0)
+ continue;
+
+ count = of_count_phandle_with_args(curr, prop->name,
+ "#gpio-cells");
+ if (count <= 0)
+ continue;
+
+ for (i = 0; i < count; i++) {
+ struct device_node *np __free(device_node) = NULL;
+
+ ret = of_parse_phandle_with_args(curr, prop->name,
+ "#gpio-cells", i,
+ &args);
+ if (ret)
+ continue;
+
+ np = args.np;
+
+ if (!of_property_present(np, "gpio-controller"))
+ continue;
+
+ /*
+ * We support 1, 2 and 3 cell GPIO bindings in the
+ * kernel currently. There's only one old MIPS dts that
+ * has a one-cell binding but there's no associated
+ * consumer so it may as well be an error. There don't
+ * seem to be any 3-cell users of non-exclusive GPIOs,
+ * so we can skip this as well. Let's occupy ourselves
+ * with the predominant 2-cell binding with the first
+ * cell indicating the hardware offset of the GPIO and
+ * the second defining the GPIO flags of the request.
+ */
+ if (args.args_count != 2)
+ continue;
+
+ fwnode = of_fwnode_handle(args.np);
+ offset = args.args[0];
+
+ entry = gpio_shared_find_entry(fwnode, offset);
+ if (!entry) {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->fwnode = fwnode_handle_get(fwnode);
+ entry->offset = offset;
+ entry->index = count;
+ INIT_LIST_HEAD(&entry->refs);
+ mutex_init(&entry->lock);
+
+ list_add_tail(&entry->list, &gpio_shared_list);
+ }
+
+ struct gpio_shared_ref *ref __free(kfree) =
+ kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref)
+ return -ENOMEM;
+
+ ref->fwnode = fwnode_handle_get(of_fwnode_handle(curr));
+ ref->flags = args.args[1];
+
+ if (strends(prop->name, "gpios"))
+ suffix = "-gpios";
+ else if (strends(prop->name, "gpio"))
+ suffix = "-gpio";
+ else
+ suffix = NULL;
+ if (!suffix)
+ continue;
+
+ /* We only set con_id if there's actually one. */
+ if (strcmp(prop->name, "gpios") && strcmp(prop->name, "gpio")) {
+ ref->con_id = kstrdup(prop->name, GFP_KERNEL);
+ if (!ref->con_id)
+ return -ENOMEM;
+
+ con_id_len = strlen(ref->con_id);
+ suffix_len = strlen(suffix);
+
+ ref->con_id[con_id_len - suffix_len] = '\0';
+ }
+
+ ref->dev_id = ida_alloc(&gpio_shared_ida, GFP_KERNEL);
+ if (ref->dev_id < 0) {
+ kfree(ref->con_id);
+ return -ENOMEM;
+ }
+
+ if (!list_empty(&entry->refs))
+ pr_debug("GPIO %u at %s is shared by multiple firmware nodes\n",
+ entry->offset, fwnode_get_name(entry->fwnode));
+
+ list_add_tail(&no_free_ptr(ref)->list, &entry->refs);
+ }
+ }
+
+ for_each_child_of_node_scoped(curr, child) {
+ ret = gpio_shared_of_traverse(child);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int gpio_shared_of_scan(void)
+{
+ if (of_root)
+ return gpio_shared_of_traverse(of_root);
+
+ return 0;
+}
+#else
+static int gpio_shared_of_scan(void)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static void gpio_shared_adev_release(struct device *dev)
+{
+
+}
+
+static int gpio_shared_make_adev(struct gpio_device *gdev,
+ struct gpio_shared_entry *entry,
+ struct gpio_shared_ref *ref)
+{
+ struct auxiliary_device *adev = &ref->adev;
+ int ret;
+
+ lockdep_assert_held(&gpio_shared_lock);
+
+ memset(adev, 0, sizeof(*adev));
+
+ adev->id = ref->dev_id;
+ adev->name = "proxy";
+ adev->dev.parent = gdev->dev.parent;
+ adev->dev.platform_data = entry;
+ adev->dev.release = gpio_shared_adev_release;
+
+ ret = auxiliary_device_init(adev);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ pr_debug("Created an auxiliary GPIO proxy %s for GPIO device %s\n",
+ dev_name(&adev->dev), gpio_device_get_label(gdev));
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_RESET_GPIO)
+/*
+ * Special case: reset-gpio is an auxiliary device that's created dynamically
+ * and put in between the GPIO controller and consumers of shared GPIOs
+ * referred to by the "reset-gpios" property.
+ *
+ * If the supposed consumer of a shared GPIO didn't match any of the mappings
+ * we created when scanning the firmware nodes, it's still possible that it's
+ * the reset-gpio device which didn't exist at the time of the scan.
+ *
+ * This function verifies it an return true if it's the case.
+ */
+static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
+ struct gpio_shared_entry *entry,
+ struct gpio_shared_ref *ref)
+{
+ struct fwnode_handle *reset_fwnode = dev_fwnode(consumer);
+ struct fwnode_reference_args ref_args, aux_args;
+ struct device *parent = consumer->parent;
+ bool match;
+ int ret;
+
+ /* The reset-gpio device must have a parent AND a firmware node. */
+ if (!parent || !reset_fwnode)
+ return false;
+
+ /*
+ * FIXME: use device_is_compatible() once the reset-gpio drivers gains
+ * a compatible string which it currently does not have.
+ */
+ if (!strstarts(dev_name(consumer), "reset.gpio."))
+ return false;
+
+ /*
+ * Parent of the reset-gpio auxiliary device is the GPIO chip whose
+ * fwnode we stored in the entry structure.
+ */
+ if (!device_match_fwnode(parent, entry->fwnode))
+ return false;
+
+ /*
+ * The device associated with the shared reference's firmware node is
+ * the consumer of the reset control exposed by the reset-gpio device.
+ * It must have a "reset-gpios" property that's referencing the entry's
+ * firmware node.
+ *
+ * The reference args must agree between the real consumer and the
+ * auxiliary reset-gpio device.
+ */
+ ret = fwnode_property_get_reference_args(ref->fwnode, "reset-gpios",
+ NULL, 2, 0, &ref_args);
+ if (ret)
+ return false;
+
+ ret = fwnode_property_get_reference_args(reset_fwnode, "reset-gpios",
+ NULL, 2, 0, &aux_args);
+ if (ret) {
+ fwnode_handle_put(ref_args.fwnode);
+ return false;
+ }
+
+ match = ((ref_args.fwnode == entry->fwnode) &&
+ (aux_args.fwnode == entry->fwnode) &&
+ (ref_args.args[0] == aux_args.args[0]));
+
+ fwnode_handle_put(ref_args.fwnode);
+ fwnode_handle_put(aux_args.fwnode);
+ return match;
+}
+#else
+static bool gpio_shared_dev_is_reset_gpio(struct device *consumer,
+ struct gpio_shared_entry *entry,
+ struct gpio_shared_ref *ref)
+{
+ return false;
+}
+#endif /* CONFIG_RESET_GPIO */
+
+int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags)
+{
+ const char *dev_id = dev_name(consumer);
+ struct gpio_shared_entry *entry;
+ struct gpio_shared_ref *ref;
+
+ struct gpiod_lookup_table *lookup __free(kfree) =
+ kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+
+ guard(mutex)(&gpio_shared_lock);
+
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ list_for_each_entry(ref, &entry->refs, list) {
+ if (!device_match_fwnode(consumer, ref->fwnode) &&
+ !gpio_shared_dev_is_reset_gpio(consumer, entry, ref))
+ continue;
+
+ /* We've already done that on a previous request. */
+ if (ref->lookup)
+ return 0;
+
+ char *key __free(kfree) =
+ kasprintf(GFP_KERNEL,
+ KBUILD_MODNAME ".proxy.%u",
+ ref->adev.id);
+ if (!key)
+ return -ENOMEM;
+
+ pr_debug("Adding machine lookup entry for a shared GPIO for consumer %s, with key '%s' and con_id '%s'\n",
+ dev_id, key, ref->con_id ?: "none");
+
+ lookup->dev_id = dev_id;
+ lookup->table[0] = GPIO_LOOKUP(no_free_ptr(key), 0,
+ ref->con_id, lflags);
+
+ gpiod_add_lookup_table(no_free_ptr(lookup));
+
+ return 0;
+ }
+ }
+
+ /* We warn here because this can only happen if the programmer borked. */
+ WARN_ON(1);
+ return -ENOENT;
+}
+
+static void gpio_shared_remove_adev(struct auxiliary_device *adev)
+{
+ lockdep_assert_held(&gpio_shared_lock);
+
+ auxiliary_device_uninit(adev);
+ auxiliary_device_delete(adev);
+}
+
+int gpio_device_setup_shared(struct gpio_device *gdev)
+{
+ struct gpio_shared_entry *entry;
+ struct gpio_shared_ref *ref;
+ unsigned long *flags;
+ int ret;
+
+ guard(mutex)(&gpio_shared_lock);
+
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ list_for_each_entry(ref, &entry->refs, list) {
+ if (gdev->dev.parent == &ref->adev.dev) {
+ /*
+ * This is a shared GPIO proxy. Mark its
+ * descriptor as such and return here.
+ */
+ __set_bit(GPIOD_FLAG_SHARED_PROXY,
+ &gdev->descs[0].flags);
+ return 0;
+ }
+ }
+ }
+
+ /*
+ * This is not a shared GPIO proxy but it still may be the device
+ * exposing shared pins. Find them and create the proxy devices.
+ */
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ if (!device_match_fwnode(&gdev->dev, entry->fwnode))
+ continue;
+
+ if (list_count_nodes(&entry->refs) <= 1)
+ continue;
+
+ flags = &gdev->descs[entry->offset].flags;
+
+ __set_bit(GPIOD_FLAG_SHARED, flags);
+ /*
+ * Shared GPIOs are not requested via the normal path. Make
+ * them inaccessible to anyone even before we register the
+ * chip.
+ */
+ __set_bit(GPIOD_FLAG_REQUESTED, flags);
+
+ pr_debug("GPIO %u owned by %s is shared by multiple consumers\n",
+ entry->offset, gpio_device_get_label(gdev));
+
+ list_for_each_entry(ref, &entry->refs, list) {
+ pr_debug("Setting up a shared GPIO entry for %s\n",
+ fwnode_get_name(ref->fwnode));
+
+ ret = gpio_shared_make_adev(gdev, entry, ref);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void gpio_device_teardown_shared(struct gpio_device *gdev)
+{
+ struct gpio_shared_entry *entry;
+ struct gpio_shared_ref *ref;
+
+ guard(mutex)(&gpio_shared_lock);
+
+ list_for_each_entry(entry, &gpio_shared_list, list) {
+ if (!device_match_fwnode(&gdev->dev, entry->fwnode))
+ continue;
+
+ list_for_each_entry(ref, &entry->refs, list) {
+ gpiod_remove_lookup_table(ref->lookup);
+ kfree(ref->lookup->table[0].key);
+ kfree(ref->lookup);
+ ref->lookup = NULL;
+ gpio_shared_remove_adev(&ref->adev);
+ }
+ }
+}
+
+static void gpio_shared_release(struct kref *kref)
+{
+ struct gpio_shared_entry *entry =
+ container_of(kref, struct gpio_shared_entry, ref);
+ struct gpio_shared_desc *shared_desc;
+
+ guard(mutex)(&entry->lock);
+
+ shared_desc = entry->shared_desc;
+ gpio_device_put(shared_desc->desc->gdev);
+ if (shared_desc->can_sleep)
+ mutex_destroy(&shared_desc->mutex);
+ kfree(shared_desc);
+ entry->shared_desc = NULL;
+}
+
+static void gpiod_shared_put(void *data)
+{
+ struct gpio_shared_entry *entry = data;
+
+ lockdep_assert_not_held(&gpio_shared_lock);
+
+ kref_put(&entry->ref, gpio_shared_release);
+}
+
+static struct gpio_shared_desc *
+gpiod_shared_desc_create(struct gpio_shared_entry *entry)
+{
+ struct gpio_shared_desc *shared_desc;
+ struct gpio_device *gdev;
+
+ lockdep_assert_held(&entry->lock);
+
+ shared_desc = kzalloc(sizeof(*shared_desc), GFP_KERNEL);
+ if (!shared_desc)
+ return ERR_PTR(-ENOMEM);
+
+ gdev = gpio_device_find_by_fwnode(entry->fwnode);
+ if (!gdev) {
+ kfree(shared_desc);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ shared_desc->desc = &gdev->descs[entry->offset];
+ shared_desc->can_sleep = gpiod_cansleep(shared_desc->desc);
+ if (shared_desc->can_sleep)
+ mutex_init(&shared_desc->mutex);
+ else
+ spin_lock_init(&shared_desc->spinlock);
+
+ return shared_desc;
+}
+
+struct gpio_shared_desc *devm_gpiod_shared_get(struct device *dev)
+{
+ struct gpio_shared_desc *shared_desc;
+ struct gpio_shared_entry *entry;
+ int ret;
+
+ lockdep_assert_not_held(&gpio_shared_lock);
+
+ entry = dev_get_platdata(dev);
+ if (WARN_ON(!entry))
+ /* Programmer bug */
+ return ERR_PTR(-ENOENT);
+
+ scoped_guard(mutex, &entry->lock) {
+ if (entry->shared_desc) {
+ kref_get(&entry->ref);
+ shared_desc = entry->shared_desc;
+ } else {
+ shared_desc = gpiod_shared_desc_create(entry);
+ if (IS_ERR(shared_desc))
+ return ERR_CAST(shared_desc);
+
+ kref_init(&entry->ref);
+ entry->shared_desc = shared_desc;
+ }
+
+ pr_debug("Device %s acquired a reference to the shared GPIO %u owned by %s\n",
+ dev_name(dev), gpiod_hwgpio(shared_desc->desc),
+ gpio_device_get_label(shared_desc->desc->gdev));
+ }
+
+ ret = devm_add_action_or_reset(dev, gpiod_shared_put, entry);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return shared_desc;
+}
+EXPORT_SYMBOL_GPL(devm_gpiod_shared_get);
+
+static void gpio_shared_drop_ref(struct gpio_shared_ref *ref)
+{
+ list_del(&ref->list);
+ kfree(ref->con_id);
+ ida_free(&gpio_shared_ida, ref->dev_id);
+ fwnode_handle_put(ref->fwnode);
+ kfree(ref);
+}
+
+static void gpio_shared_drop_entry(struct gpio_shared_entry *entry)
+{
+ list_del(&entry->list);
+ mutex_destroy(&entry->lock);
+ fwnode_handle_put(entry->fwnode);
+ kfree(entry);
+}
+
+/*
+ * This is only called if gpio_shared_init() fails so it's in fact __init and
+ * not __exit.
+ */
+static void __init gpio_shared_teardown(void)
+{
+ struct gpio_shared_entry *entry, *epos;
+ struct gpio_shared_ref *ref, *rpos;
+
+ list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) {
+ list_for_each_entry_safe(ref, rpos, &entry->refs, list)
+ gpio_shared_drop_ref(ref);
+
+ gpio_shared_drop_entry(entry);
+ }
+}
+
+static void gpio_shared_free_exclusive(void)
+{
+ struct gpio_shared_entry *entry, *epos;
+
+ list_for_each_entry_safe(entry, epos, &gpio_shared_list, list) {
+ if (list_count_nodes(&entry->refs) > 1)
+ continue;
+
+ gpio_shared_drop_ref(list_first_entry(&entry->refs,
+ struct gpio_shared_ref,
+ list));
+ gpio_shared_drop_entry(entry);
+ }
+}
+
+static int __init gpio_shared_init(void)
+{
+ int ret;
+
+ /* Right now, we only support OF-based systems. */
+ ret = gpio_shared_of_scan();
+ if (ret) {
+ gpio_shared_teardown();
+ pr_err("Failed to scan OF nodes for shared GPIOs: %d\n", ret);
+ return ret;
+ }
+
+ gpio_shared_free_exclusive();
+
+ pr_debug("Finished scanning firmware nodes for shared GPIOs\n");
+ return 0;
+}
+postcore_initcall(gpio_shared_init);
diff --git a/drivers/gpio/gpiolib-shared.h b/drivers/gpio/gpiolib-shared.h
new file mode 100644
index 000000000000..667dbdff3585
--- /dev/null
+++ b/drivers/gpio/gpiolib-shared.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LINUX_GPIO_SHARED_H
+#define __LINUX_GPIO_SHARED_H
+
+#include <linux/cleanup.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+
+struct gpio_device;
+struct gpio_desc;
+struct device;
+
+#if IS_ENABLED(CONFIG_GPIO_SHARED)
+
+int gpio_device_setup_shared(struct gpio_device *gdev);
+void gpio_device_teardown_shared(struct gpio_device *gdev);
+int gpio_shared_add_proxy_lookup(struct device *consumer, unsigned long lflags);
+
+#else
+
+static inline int gpio_device_setup_shared(struct gpio_device *gdev)
+{
+ return 0;
+}
+
+static inline void gpio_device_teardown_shared(struct gpio_device *gdev) { }
+
+static inline int gpio_shared_add_proxy_lookup(struct device *consumer,
+ unsigned long lflags)
+{
+ return 0;
+}
+
+#endif /* CONFIG_GPIO_SHARED */
+
+struct gpio_shared_desc {
+ struct gpio_desc *desc;
+ bool can_sleep;
+ unsigned long cfg;
+ unsigned int usecnt;
+ unsigned int highcnt;
+ union {
+ struct mutex mutex;
+ spinlock_t spinlock;
+ };
+};
+
+struct gpio_shared_desc *devm_gpiod_shared_get(struct device *dev);
+
+DEFINE_LOCK_GUARD_1(gpio_shared_desc_lock, struct gpio_shared_desc,
+ if (_T->lock->can_sleep)
+ mutex_lock(&_T->lock->mutex);
+ else
+ spin_lock_irqsave(&_T->lock->spinlock, _T->flags),
+ if (_T->lock->can_sleep)
+ mutex_unlock(&_T->lock->mutex);
+ else
+ spin_unlock_irqrestore(&_T->lock->spinlock, _T->flags),
+ unsigned long flags)
+
+static inline void gpio_shared_lockdep_assert(struct gpio_shared_desc *shared_desc)
+{
+ if (shared_desc->can_sleep)
+ lockdep_assert_held(&shared_desc->mutex);
+ else
+ lockdep_assert_held(&shared_desc->spinlock);
+}
+
+#endif /* __LINUX_GPIO_SHARED_H */
diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c
index f21dbc28cf2c..b44f35d68459 100644
--- a/drivers/gpio/gpiolib-swnode.c
+++ b/drivers/gpio/gpiolib-swnode.c
@@ -31,7 +31,7 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
gdev_node = to_software_node(fwnode);
if (!gdev_node || !gdev_node->name)
- return ERR_PTR(-EINVAL);
+ goto fwnode_lookup;
/*
* Check for a special node that identifies undefined GPIOs, this is
@@ -41,7 +41,8 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode)
!strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME))
return ERR_PTR(-ENOENT);
- gdev = gpio_device_find_by_label(gdev_node->name);
+fwnode_lookup:
+ gdev = gpio_device_find_by_fwnode(fwnode);
return gdev ?: ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index b64106f1cb7b..cd553acf3055 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -131,7 +131,7 @@ static ssize_t direction_show(struct device *dev,
scoped_guard(mutex, &data->mutex) {
gpiod_get_direction(desc);
- value = !!test_bit(FLAG_IS_OUT, &desc->flags);
+ value = !!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
}
return sysfs_emit(buf, "%s\n", value ? "out" : "in");
@@ -226,14 +226,14 @@ static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags)
irq_flags = IRQF_SHARED;
if (flags & GPIO_IRQF_TRIGGER_FALLING) {
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irq_flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
- set_bit(FLAG_EDGE_FALLING, &desc->flags);
+ set_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
}
if (flags & GPIO_IRQF_TRIGGER_RISING) {
- irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ irq_flags |= test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
- set_bit(FLAG_EDGE_RISING, &desc->flags);
+ set_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
}
/*
@@ -244,7 +244,7 @@ static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags)
* Remove this redundant call (along with the corresponding unlock)
* when those drivers have been fixed.
*/
- ret = gpiochip_lock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
+ ret = gpiochip_lock_as_irq(guard.gc, gpiod_hwgpio(desc));
if (ret < 0)
goto err_clr_bits;
@@ -258,10 +258,10 @@ static int gpio_sysfs_request_irq(struct gpiod_data *data, unsigned char flags)
return 0;
err_unlock:
- gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
+ gpiochip_unlock_as_irq(guard.gc, gpiod_hwgpio(desc));
err_clr_bits:
- clear_bit(FLAG_EDGE_RISING, &desc->flags);
- clear_bit(FLAG_EDGE_FALLING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
return ret;
}
@@ -280,9 +280,9 @@ static void gpio_sysfs_free_irq(struct gpiod_data *data)
data->irq_flags = 0;
free_irq(data->irq, data);
- gpiochip_unlock_as_irq(guard.gc, gpio_chip_hwgpio(desc));
- clear_bit(FLAG_EDGE_RISING, &desc->flags);
- clear_bit(FLAG_EDGE_FALLING, &desc->flags);
+ gpiochip_unlock_as_irq(guard.gc, gpiod_hwgpio(desc));
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &desc->flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &desc->flags);
}
static const char *const trigger_names[] = {
@@ -347,10 +347,10 @@ static int gpio_sysfs_set_active_low(struct gpiod_data *data, int value)
struct gpio_desc *desc = data->desc;
int status = 0;
- if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
+ if (!!test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
- assign_bit(FLAG_ACTIVE_LOW, &desc->flags, value);
+ assign_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags, value);
/* reconfigure poll(2) support if enabled on one edge only */
if (flags == GPIO_IRQF_TRIGGER_FALLING ||
@@ -373,7 +373,7 @@ static ssize_t active_low_show(struct device *dev,
int value;
scoped_guard(mutex, &data->mutex)
- value = !!test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ value = !!test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
return sysfs_emit(buf, "%d\n", value);
}
@@ -418,7 +418,7 @@ static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
mode = 0;
if (!data->direction_can_change &&
- test_bit(FLAG_IS_OUT, &data->desc->flags))
+ test_bit(GPIOD_FLAG_IS_OUT, &data->desc->flags))
mode = 0;
#endif /* CONFIG_GPIO_SYSFS_LEGACY */
}
@@ -478,15 +478,15 @@ static int export_gpio_desc(struct gpio_desc *desc)
if (!guard.gc)
return -ENODEV;
- offset = gpio_chip_hwgpio(desc);
+ offset = gpiod_hwgpio(desc);
if (!gpiochip_line_is_valid(guard.gc, offset)) {
pr_debug_ratelimited("%s: GPIO %d masked\n", __func__,
- gpio_chip_hwgpio(desc));
+ gpiod_hwgpio(desc));
return -EINVAL;
}
/*
- * No extra locking here; FLAG_SYSFS just signifies that the
+ * No extra locking here; GPIOD_FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
@@ -505,7 +505,7 @@ static int export_gpio_desc(struct gpio_desc *desc)
if (ret < 0) {
gpiod_free(desc);
} else {
- set_bit(FLAG_SYSFS, &desc->flags);
+ set_bit(GPIOD_FLAG_SYSFS, &desc->flags);
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_REQUESTED);
}
@@ -515,11 +515,11 @@ static int export_gpio_desc(struct gpio_desc *desc)
static int unexport_gpio_desc(struct gpio_desc *desc)
{
/*
- * No extra locking here; FLAG_SYSFS just signifies that the
+ * No extra locking here; GPIOD_FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
- if (!test_and_clear_bit(FLAG_SYSFS, &desc->flags))
+ if (!test_and_clear_bit(GPIOD_FLAG_SYSFS, &desc->flags))
return -EINVAL;
gpiod_unexport(desc);
@@ -748,14 +748,14 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_EXPORT, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_EXPORT, &desc->flags))
return -EPERM;
gdev = desc->gdev;
guard(mutex)(&sysfs_lock);
- if (!test_bit(FLAG_REQUESTED, &desc->flags)) {
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &desc->flags)) {
gpiod_dbg(desc, "%s: unavailable (not requested)\n", __func__);
status = -EPERM;
goto err_clear_bit;
@@ -823,7 +823,7 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
}
desc_data->chip_attr_group.name = kasprintf(GFP_KERNEL, "gpio%u",
- gpio_chip_hwgpio(desc));
+ gpiod_hwgpio(desc));
if (!desc_data->chip_attr_group.name) {
status = -ENOMEM;
goto err_put_dirent;
@@ -843,7 +843,7 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
if (status)
goto err_free_name;
- path = kasprintf(GFP_KERNEL, "gpio%u/value", gpio_chip_hwgpio(desc));
+ path = kasprintf(GFP_KERNEL, "gpio%u/value", gpiod_hwgpio(desc));
if (!path) {
status = -ENOMEM;
goto err_remove_groups;
@@ -866,7 +866,7 @@ err_free_data:
#endif /* CONFIG_GPIO_SYSFS_LEGACY */
kfree(desc_data);
err_clear_bit:
- clear_bit(FLAG_EXPORT, &desc->flags);
+ clear_bit(GPIOD_FLAG_EXPORT, &desc->flags);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
return status;
}
@@ -937,7 +937,7 @@ void gpiod_unexport(struct gpio_desc *desc)
}
scoped_guard(mutex, &sysfs_lock) {
- if (!test_bit(FLAG_EXPORT, &desc->flags))
+ if (!test_bit(GPIOD_FLAG_EXPORT, &desc->flags))
return;
gdev = gpiod_to_gpio_device(desc);
@@ -956,7 +956,7 @@ void gpiod_unexport(struct gpio_desc *desc)
return;
list_del(&desc_data->list);
- clear_bit(FLAG_EXPORT, &desc->flags);
+ clear_bit(GPIOD_FLAG_EXPORT, &desc->flags);
#if IS_ENABLED(CONFIG_GPIO_SYSFS_LEGACY)
sysfs_put(desc_data->value_kn);
device_unregister(desc_data->dev);
@@ -1073,7 +1073,7 @@ void gpiochip_sysfs_unregister(struct gpio_device *gdev)
return;
/* unregister gpiod class devices owned by sysfs */
- for_each_gpio_desc_with_flag(chip, desc, FLAG_SYSFS) {
+ for_each_gpio_desc_with_flag(chip, desc, GPIOD_FLAG_SYSFS) {
gpiod_unexport(desc);
gpiod_free(desc);
}
@@ -1091,7 +1091,7 @@ static int gpiofind_sysfs_register(struct gpio_chip *gc, const void *data)
ret = gpiochip_sysfs_register(gdev);
if (ret)
- chip_err(gc, "failed to register the sysfs entry: %d\n", ret);
+ gpiochip_err(gc, "failed to register the sysfs entry: %d\n", ret);
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 0d2b470a252e..91e0c384f34a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -37,6 +37,7 @@
#include "gpiolib-acpi.h"
#include "gpiolib-cdev.h"
#include "gpiolib-of.h"
+#include "gpiolib-shared.h"
#include "gpiolib-swnode.h"
#include "gpiolib-sysfs.h"
#include "gpiolib.h"
@@ -127,10 +128,10 @@ const char *gpiod_get_label(struct gpio_desc *desc)
label = srcu_dereference_check(desc->label, &desc->gdev->desc_srcu,
srcu_read_lock_held(&desc->gdev->desc_srcu));
- if (test_bit(FLAG_USED_AS_IRQ, &flags))
+ if (test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags))
return label ? label->str : "interrupt";
- if (!test_bit(FLAG_REQUESTED, &flags))
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &flags))
return NULL;
return label ? label->str : NULL;
@@ -235,6 +236,19 @@ int desc_to_gpio(const struct gpio_desc *desc)
}
EXPORT_SYMBOL_GPL(desc_to_gpio);
+/**
+ * gpiod_hwgpio - Return the GPIO number of the passed descriptor relative to
+ * its chip.
+ * @desc: GPIO descriptor
+ *
+ * Returns:
+ * Hardware offset of the GPIO represented by the descriptor.
+ */
+int gpiod_hwgpio(const struct gpio_desc *desc)
+{
+ return desc - &desc->gdev->descs[0];
+}
+EXPORT_SYMBOL_GPL(gpiod_hwgpio);
/**
* gpiod_to_chip - Return the GPIO chip to which a GPIO descriptor belongs
@@ -443,15 +457,15 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (!guard.gc)
return -ENODEV;
- offset = gpio_chip_hwgpio(desc);
+ offset = gpiod_hwgpio(desc);
flags = READ_ONCE(desc->flags);
/*
* Open drain emulation using input mode may incorrectly report
* input here, fix that up.
*/
- if (test_bit(FLAG_OPEN_DRAIN, &flags) &&
- test_bit(FLAG_IS_OUT, &flags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &flags) &&
+ test_bit(GPIOD_FLAG_IS_OUT, &flags))
return 0;
if (!guard.gc->get_direction)
@@ -468,7 +482,7 @@ int gpiod_get_direction(struct gpio_desc *desc)
if (ret > 0)
ret = 1;
- assign_bit(FLAG_IS_OUT, &flags, !ret);
+ assign_bit(GPIOD_FLAG_IS_OUT, &flags, !ret);
WRITE_ONCE(desc->flags, flags);
return ret;
@@ -846,7 +860,7 @@ static void gpiochip_free_remaining_irqs(struct gpio_chip *gc)
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(gc, desc, FLAG_USED_AS_IRQ)
+ for_each_gpio_desc_with_flag(gc, desc, GPIOD_FLAG_USED_AS_IRQ)
gpiod_free_irqs(desc);
}
@@ -921,8 +935,8 @@ static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog)
desc = gpiochip_get_desc(gc, hog->chip_hwnum);
if (IS_ERR(desc)) {
- chip_err(gc, "%s: unable to get GPIO desc: %ld\n", __func__,
- PTR_ERR(desc));
+ gpiochip_err(gc, "%s: unable to get GPIO desc: %ld\n",
+ __func__, PTR_ERR(desc));
return;
}
@@ -1124,7 +1138,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
ret = gpiodev_add_to_list_unlocked(gdev);
if (ret) {
- chip_err(gc, "GPIO integer space overlap, cannot add chip\n");
+ gpiochip_err(gc, "GPIO integer space overlap, cannot add chip\n");
goto err_free_label;
}
}
@@ -1169,10 +1183,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
* lock here.
*/
if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index))
- assign_bit(FLAG_IS_OUT, &desc->flags,
+ assign_bit(GPIOD_FLAG_IS_OUT, &desc->flags,
!gc->get_direction(gc, desc_index));
else
- assign_bit(FLAG_IS_OUT,
+ assign_bit(GPIOD_FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
}
@@ -1200,6 +1214,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (ret)
goto err_remove_irqchip_mask;
+ ret = gpio_device_setup_shared(gdev);
+ if (ret)
+ goto err_remove_irqchip;
+
/*
* By first adding the chardev, and then adding the device,
* we get a device node entry in sysfs under
@@ -1211,10 +1229,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
if (gpiolib_initialized) {
ret = gpiochip_setup_dev(gdev);
if (ret)
- goto err_remove_irqchip;
+ goto err_teardown_shared;
}
+
return 0;
+err_teardown_shared:
+ gpio_device_teardown_shared(gdev);
err_remove_irqchip:
gpiochip_irqchip_remove(gc);
err_remove_irqchip_mask:
@@ -1283,6 +1304,7 @@ void gpiochip_remove(struct gpio_chip *gc)
/* Numb the device, cancelling all outstanding operations */
rcu_assign_pointer(gdev->chip, NULL);
synchronize_srcu(&gdev->srcu);
+ gpio_device_teardown_shared(gdev);
gpiochip_irqchip_remove(gc);
acpi_gpiochip_remove(gc);
of_gpiochip_remove(gc);
@@ -1528,8 +1550,7 @@ static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc,
&parent_hwirq,
&parent_type);
if (ret) {
- chip_err(gc, "skip set-up on hwirq %d\n",
- i);
+ gpiochip_err(gc, "skip set-up on hwirq %d\n", i);
continue;
}
@@ -1542,15 +1563,14 @@ static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc,
ret = irq_domain_alloc_irqs(gc->irq.domain, 1,
NUMA_NO_NODE, &fwspec);
if (ret < 0) {
- chip_err(gc,
- "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
- i, parent_hwirq,
- ret);
+ gpiochip_err(gc,
+ "can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
+ i, parent_hwirq, ret);
}
}
}
- chip_err(gc, "%s unknown fwnode type proceed anyway\n", __func__);
+ gpiochip_err(gc, "%s unknown fwnode type proceed anyway\n", __func__);
return;
}
@@ -1602,15 +1622,15 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (ret)
return ret;
- chip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
+ gpiochip_dbg(gc, "allocate IRQ %d, hwirq %lu\n", irq, hwirq);
ret = girq->child_to_parent_hwirq(gc, hwirq, type,
&parent_hwirq, &parent_type);
if (ret) {
- chip_err(gc, "can't look up hwirq %lu\n", hwirq);
+ gpiochip_err(gc, "can't look up hwirq %lu\n", hwirq);
return ret;
}
- chip_dbg(gc, "found parent hwirq %u\n", parent_hwirq);
+ gpiochip_dbg(gc, "found parent hwirq %u\n", parent_hwirq);
/*
* We set handle_bad_irq because the .set_type() should
@@ -1631,8 +1651,8 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (ret)
return ret;
- chip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
- irq, parent_hwirq);
+ gpiochip_dbg(gc, "alloc_irqs_parent for %d parent hwirq %d\n",
+ irq, parent_hwirq);
irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key);
ret = irq_domain_alloc_irqs_parent(d, irq, 1, &gpio_parent_fwspec);
/*
@@ -1642,9 +1662,9 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d,
if (irq_domain_is_msi(d->parent) && (ret == -EEXIST))
ret = 0;
if (ret)
- chip_err(gc,
- "failed to allocate parent hwirq %d for hwirq %lu\n",
- parent_hwirq, hwirq);
+ gpiochip_err(gc,
+ "failed to allocate parent hwirq %d for hwirq %lu\n",
+ parent_hwirq, hwirq);
return ret;
}
@@ -1720,7 +1740,7 @@ static struct irq_domain *gpiochip_hierarchy_create_domain(struct gpio_chip *gc)
if (!gc->irq.child_to_parent_hwirq ||
!gc->irq.fwnode) {
- chip_err(gc, "missing irqdomain vital data\n");
+ gpiochip_err(gc, "missing irqdomain vital data\n");
return ERR_PTR(-EINVAL);
}
@@ -1993,7 +2013,7 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
if (irqchip->flags & IRQCHIP_IMMUTABLE)
return;
- chip_warn(gc, "not an immutable chip, please consider fixing it!\n");
+ gpiochip_warn(gc, "not an immutable chip, please consider fixing it!\n");
if (!irqchip->irq_request_resources &&
!irqchip->irq_release_resources) {
@@ -2009,8 +2029,8 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
* ...and if so, give a gentle warning that this is bad
* practice.
*/
- chip_info(gc,
- "detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
+ gpiochip_info(gc,
+ "detected irqchip that is shared with multiple gpiochips: please fix the driver.\n");
return;
}
@@ -2039,7 +2059,8 @@ static int gpiochip_irqchip_add_allocated_domain(struct gpio_chip *gc,
return -EINVAL;
if (gc->to_irq)
- chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
+ gpiochip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n",
+ __func__);
gc->to_irq = gpiochip_to_irq;
gc->irq.domain = domain;
@@ -2080,7 +2101,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
return 0;
if (gc->irq.parent_handler && gc->can_sleep) {
- chip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n");
+ gpiochip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n");
return -EINVAL;
}
@@ -2316,10 +2337,8 @@ int gpiochip_add_pingroup_range(struct gpio_chip *gc,
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
- if (!pin_range) {
- chip_err(gc, "failed to allocate pin ranges\n");
+ if (!pin_range)
return -ENOMEM;
- }
/* Use local offset as range ID */
pin_range->range.id = gpio_offset;
@@ -2338,7 +2357,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *gc,
pinctrl_add_gpio_range(pctldev, &pin_range->range);
- chip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n",
+ gpiochip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n",
gpio_offset, gpio_offset + pin_range->range.npins - 1,
pinctrl_dev_get_devname(pctldev), pin_group);
@@ -2349,11 +2368,13 @@ int gpiochip_add_pingroup_range(struct gpio_chip *gc,
EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
/**
- * gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping
+ * gpiochip_add_pin_range_with_pins() - add a range for GPIO <-> pin mapping
* @gc: the gpiochip to add the range for
* @pinctl_name: the dev_name() of the pin controller to map to
* @gpio_offset: the start offset in the current gpio_chip number space
* @pin_offset: the start offset in the pin controller number space
+ * @pins: the list of non consecutive pins to accumulate in this range (if not
+ * NULL, pin_offset is ignored by pinctrl core)
* @npins: the number of pins from the offset of each pin space (GPIO and
* pin controller) to accumulate in this range
*
@@ -2365,19 +2386,20 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range);
* Returns:
* 0 on success, or a negative errno on failure.
*/
-int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
- unsigned int gpio_offset, unsigned int pin_offset,
- unsigned int npins)
+int gpiochip_add_pin_range_with_pins(struct gpio_chip *gc,
+ const char *pinctl_name,
+ unsigned int gpio_offset,
+ unsigned int pin_offset,
+ unsigned int const *pins,
+ unsigned int npins)
{
struct gpio_pin_range *pin_range;
struct gpio_device *gdev = gc->gpiodev;
int ret;
pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL);
- if (!pin_range) {
- chip_err(gc, "failed to allocate pin ranges\n");
+ if (!pin_range)
return -ENOMEM;
- }
/* Use local offset as range ID */
pin_range->range.id = gpio_offset;
@@ -2385,25 +2407,30 @@ int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name,
pin_range->range.name = gc->label;
pin_range->range.base = gdev->base + gpio_offset;
pin_range->range.pin_base = pin_offset;
+ pin_range->range.pins = pins;
pin_range->range.npins = npins;
pin_range->pctldev = pinctrl_find_and_add_gpio_range(pinctl_name,
&pin_range->range);
if (IS_ERR(pin_range->pctldev)) {
ret = PTR_ERR(pin_range->pctldev);
- chip_err(gc, "could not create pin range\n");
+ gpiochip_err(gc, "could not create pin range\n");
kfree(pin_range);
return ret;
}
- chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
- gpio_offset, gpio_offset + npins - 1,
- pinctl_name,
- pin_offset, pin_offset + npins - 1);
+ if (pin_range->range.pins)
+ gpiochip_dbg(gc, "created GPIO range %d->%d ==> %s %d sparse PIN range { %d, ... }",
+ gpio_offset, gpio_offset + npins - 1,
+ pinctl_name, npins, pins[0]);
+ else
+ gpiochip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n",
+ gpio_offset, gpio_offset + npins - 1, pinctl_name,
+ pin_offset, pin_offset + npins - 1);
list_add_tail(&pin_range->node, &gdev->pin_ranges);
return 0;
}
-EXPORT_SYMBOL_GPL(gpiochip_add_pin_range);
+EXPORT_SYMBOL_GPL(gpiochip_add_pin_range_with_pins);
/**
* gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings
@@ -2438,10 +2465,10 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_REQUESTED, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_REQUESTED, &desc->flags))
return -EBUSY;
- offset = gpio_chip_hwgpio(desc);
+ offset = gpiod_hwgpio(desc);
if (!gpiochip_line_is_valid(guard.gc, offset))
return -EINVAL;
@@ -2467,7 +2494,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
return 0;
out_clear_bit:
- clear_bit(FLAG_REQUESTED, &desc->flags);
+ clear_bit(GPIOD_FLAG_REQUESTED, &desc->flags);
return ret;
}
@@ -2501,20 +2528,20 @@ static void gpiod_free_commit(struct gpio_desc *desc)
flags = READ_ONCE(desc->flags);
- if (guard.gc && test_bit(FLAG_REQUESTED, &flags)) {
+ if (guard.gc && test_bit(GPIOD_FLAG_REQUESTED, &flags)) {
if (guard.gc->free)
- guard.gc->free(guard.gc, gpio_chip_hwgpio(desc));
-
- clear_bit(FLAG_ACTIVE_LOW, &flags);
- clear_bit(FLAG_REQUESTED, &flags);
- clear_bit(FLAG_OPEN_DRAIN, &flags);
- clear_bit(FLAG_OPEN_SOURCE, &flags);
- clear_bit(FLAG_PULL_UP, &flags);
- clear_bit(FLAG_PULL_DOWN, &flags);
- clear_bit(FLAG_BIAS_DISABLE, &flags);
- clear_bit(FLAG_EDGE_RISING, &flags);
- clear_bit(FLAG_EDGE_FALLING, &flags);
- clear_bit(FLAG_IS_HOGGED, &flags);
+ guard.gc->free(guard.gc, gpiod_hwgpio(desc));
+
+ clear_bit(GPIOD_FLAG_ACTIVE_LOW, &flags);
+ clear_bit(GPIOD_FLAG_REQUESTED, &flags);
+ clear_bit(GPIOD_FLAG_OPEN_DRAIN, &flags);
+ clear_bit(GPIOD_FLAG_OPEN_SOURCE, &flags);
+ clear_bit(GPIOD_FLAG_PULL_UP, &flags);
+ clear_bit(GPIOD_FLAG_PULL_DOWN, &flags);
+ clear_bit(GPIOD_FLAG_BIAS_DISABLE, &flags);
+ clear_bit(GPIOD_FLAG_EDGE_RISING, &flags);
+ clear_bit(GPIOD_FLAG_EDGE_FALLING, &flags);
+ clear_bit(GPIOD_FLAG_IS_HOGGED, &flags);
#ifdef CONFIG_OF_DYNAMIC
WRITE_ONCE(desc->hog, NULL);
#endif
@@ -2557,7 +2584,7 @@ char *gpiochip_dup_line_label(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return NULL;
- if (!test_bit(FLAG_REQUESTED, &desc->flags))
+ if (!test_bit(GPIOD_FLAG_REQUESTED, &desc->flags))
return NULL;
guard(srcu)(&desc->gdev->desc_srcu);
@@ -2607,7 +2634,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
int ret;
if (IS_ERR(desc)) {
- chip_err(gc, "failed to get GPIO %s descriptor\n", name);
+ gpiochip_err(gc, "failed to get GPIO %s descriptor\n", name);
return desc;
}
@@ -2618,7 +2645,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret) {
gpiod_free_commit(desc);
- chip_err(gc, "setup of own GPIO %s failed\n", name);
+ gpiochip_err(gc, "setup of own GPIO %s failed\n", name);
return ERR_PTR(ret);
}
@@ -2663,7 +2690,7 @@ int gpio_do_set_config(struct gpio_desc *desc, unsigned long config)
if (!guard.gc->set_config)
return -ENOTSUPP;
- ret = guard.gc->set_config(guard.gc, gpio_chip_hwgpio(desc), config);
+ ret = guard.gc->set_config(guard.gc, gpiod_hwgpio(desc), config);
if (ret > 0)
ret = -EBADE;
@@ -2694,7 +2721,7 @@ static int gpio_set_config_with_argument_optional(struct gpio_desc *desc,
u32 argument)
{
struct device *dev = &desc->gdev->dev;
- int gpio = gpio_chip_hwgpio(desc);
+ int gpio = gpiod_hwgpio(desc);
int ret;
ret = gpio_set_config_with_argument(desc, mode, argument);
@@ -2725,11 +2752,11 @@ static int gpio_set_bias(struct gpio_desc *desc)
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_BIAS_DISABLE, &flags))
+ if (test_bit(GPIOD_FLAG_BIAS_DISABLE, &flags))
bias = PIN_CONFIG_BIAS_DISABLE;
- else if (test_bit(FLAG_PULL_UP, &flags))
+ else if (test_bit(GPIOD_FLAG_PULL_UP, &flags))
bias = PIN_CONFIG_BIAS_PULL_UP;
- else if (test_bit(FLAG_PULL_DOWN, &flags))
+ else if (test_bit(GPIOD_FLAG_PULL_DOWN, &flags))
bias = PIN_CONFIG_BIAS_PULL_DOWN;
else
return 0;
@@ -2857,9 +2884,9 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
*/
if (guard.gc->direction_input) {
ret = gpiochip_direction_input(guard.gc,
- gpio_chip_hwgpio(desc));
+ gpiod_hwgpio(desc));
} else if (guard.gc->get_direction) {
- dir = gpiochip_get_direction(guard.gc, gpio_chip_hwgpio(desc));
+ dir = gpiochip_get_direction(guard.gc, gpiod_hwgpio(desc));
if (dir < 0)
return dir;
@@ -2871,7 +2898,7 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
}
}
if (ret == 0) {
- clear_bit(FLAG_IS_OUT, &desc->flags);
+ clear_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
ret = gpio_set_bias(desc);
}
@@ -2918,12 +2945,12 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
if (guard.gc->direction_output) {
ret = gpiochip_direction_output(guard.gc,
- gpio_chip_hwgpio(desc), val);
+ gpiod_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
if (guard.gc->get_direction) {
dir = gpiochip_get_direction(guard.gc,
- gpio_chip_hwgpio(desc));
+ gpiod_hwgpio(desc));
if (dir < 0)
return dir;
@@ -2938,13 +2965,13 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
* If we can't actively set the direction, we are some
* output-only chip, so just drive the output as desired.
*/
- ret = gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), val);
+ ret = gpiochip_set(guard.gc, gpiod_hwgpio(desc), val);
if (ret)
return ret;
}
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
trace_gpio_value(desc_to_gpio(desc), 0, val);
trace_gpio_direction(desc_to_gpio(desc), 0, ret);
return ret;
@@ -3010,21 +3037,21 @@ int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
flags = READ_ONCE(desc->flags);
- if (test_bit(FLAG_ACTIVE_LOW, &flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &flags))
value = !value;
else
value = !!value;
/* GPIOs used for enabled IRQs shall not be set as output */
- if (test_bit(FLAG_USED_AS_IRQ, &flags) &&
- test_bit(FLAG_IRQ_IS_ENABLED, &flags)) {
+ if (test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags) &&
+ test_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &flags)) {
gpiod_err(desc,
"%s: tried to set a GPIO tied to an IRQ as output\n",
__func__);
return -EIO;
}
- if (test_bit(FLAG_OPEN_DRAIN, &flags)) {
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &flags)) {
/* First see if we can enable open drain in hardware */
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN);
if (!ret)
@@ -3032,7 +3059,7 @@ int gpiod_direction_output_nonotify(struct gpio_desc *desc, int value)
/* Emulate open drain by not actively driving the line high */
if (value)
goto set_output_flag;
- } else if (test_bit(FLAG_OPEN_SOURCE, &flags)) {
+ } else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &flags)) {
ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE);
if (!ret)
goto set_output_value;
@@ -3059,7 +3086,7 @@ set_output_flag:
* set the IS_OUT flag or otherwise we won't be able to set the line
* value anymore.
*/
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
return 0;
}
@@ -3089,7 +3116,7 @@ int gpiod_enable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
}
ret = guard.gc->en_hw_timestamp(guard.gc,
- gpio_chip_hwgpio(desc), flags);
+ gpiod_hwgpio(desc), flags);
if (ret)
gpiod_warn(desc, "%s: hw ts request failed\n", __func__);
@@ -3121,7 +3148,7 @@ int gpiod_disable_hw_timestamp_ns(struct gpio_desc *desc, unsigned long flags)
return -ENOTSUPP;
}
- ret = guard.gc->dis_hw_timestamp(guard.gc, gpio_chip_hwgpio(desc),
+ ret = guard.gc->dis_hw_timestamp(guard.gc, gpiod_hwgpio(desc),
flags);
if (ret)
gpiod_warn(desc, "%s: hw ts release failed\n", __func__);
@@ -3199,10 +3226,10 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
VALIDATE_DESC(desc);
/*
- * Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
+ * Handle GPIOD_FLAG_TRANSITORY first, enabling queries to gpiolib for
* persistence state.
*/
- assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
+ assign_bit(GPIOD_FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
return gpio_set_config_with_argument_optional(desc,
@@ -3220,7 +3247,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
int gpiod_is_active_low(const struct gpio_desc *desc)
{
VALIDATE_DESC(desc);
- return test_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ return test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiod_is_active_low);
@@ -3231,7 +3258,7 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
void gpiod_toggle_active_low(struct gpio_desc *desc)
{
VALIDATE_DESC_VOID(desc);
- change_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ change_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_CONFIG);
}
EXPORT_SYMBOL_GPL(gpiod_toggle_active_low);
@@ -3252,7 +3279,7 @@ static int gpiochip_get(struct gpio_chip *gc, unsigned int offset)
static int gpio_chip_get_value(struct gpio_chip *gc, const struct gpio_desc *desc)
{
- return gc->get ? gpiochip_get(gc, gpio_chip_hwgpio(desc)) : -EIO;
+ return gc->get ? gpiochip_get(gc, gpiod_hwgpio(desc)) : -EIO;
}
/* I/O calls are only valid after configuration completed; the relevant
@@ -3412,7 +3439,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
first = i;
do {
const struct gpio_desc *desc = desc_array[i];
- int hwgpio = gpio_chip_hwgpio(desc);
+ int hwgpio = gpiod_hwgpio(desc);
__set_bit(hwgpio, mask);
i++;
@@ -3434,10 +3461,10 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
for (j = first; j < i; ) {
const struct gpio_desc *desc = desc_array[j];
- int hwgpio = gpio_chip_hwgpio(desc);
+ int hwgpio = gpiod_hwgpio(desc);
int value = test_bit(hwgpio, bits);
- if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (!raw && test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
__assign_bit(j, value_bitmap, value);
trace_gpio_value(desc_to_gpio(desc), 1, value);
@@ -3499,7 +3526,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
if (value < 0)
return value;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return value;
@@ -3571,7 +3598,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value);
*/
static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
{
- int ret = 0, offset = gpio_chip_hwgpio(desc);
+ int ret = 0, offset = gpiod_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -3582,7 +3609,7 @@ static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
} else {
ret = gpiochip_direction_output(guard.gc, offset, 0);
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
}
trace_gpio_direction(desc_to_gpio(desc), value, ret);
if (ret < 0)
@@ -3600,7 +3627,7 @@ static int gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
*/
static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
{
- int ret = 0, offset = gpio_chip_hwgpio(desc);
+ int ret = 0, offset = gpiod_hwgpio(desc);
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -3609,7 +3636,7 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
if (value) {
ret = gpiochip_direction_output(guard.gc, offset, 1);
if (!ret)
- set_bit(FLAG_IS_OUT, &desc->flags);
+ set_bit(GPIOD_FLAG_IS_OUT, &desc->flags);
} else {
ret = gpiochip_direction_input(guard.gc, offset);
}
@@ -3624,7 +3651,7 @@ static int gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value)
static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
{
- if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags)))
return -EPERM;
CLASS(gpio_chip_guard, guard)(desc);
@@ -3632,7 +3659,7 @@ static int gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
return -ENODEV;
trace_gpio_value(desc_to_gpio(desc), 0, value);
- return gpiochip_set(guard.gc, gpio_chip_hwgpio(desc), value);
+ return gpiochip_set(guard.gc, gpiod_hwgpio(desc), value);
}
/*
@@ -3694,7 +3721,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
WARN_ON(array_info->gdev->can_sleep);
for (i = 0; i < array_size; i++) {
- if (unlikely(!test_bit(FLAG_IS_OUT,
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT,
&desc_array[i]->flags)))
return -EPERM;
}
@@ -3755,10 +3782,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
do {
struct gpio_desc *desc = desc_array[i];
- int hwgpio = gpio_chip_hwgpio(desc);
+ int hwgpio = gpiod_hwgpio(desc);
int value = test_bit(i, value_bitmap);
- if (unlikely(!test_bit(FLAG_IS_OUT, &desc->flags)))
+ if (unlikely(!test_bit(GPIOD_FLAG_IS_OUT, &desc->flags)))
return -EPERM;
/*
@@ -3768,16 +3795,16 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
*/
if (!raw && !(array_info &&
test_bit(i, array_info->invert_mask)) &&
- test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
trace_gpio_value(desc_to_gpio(desc), 0, value);
/*
* collect all normal outputs belonging to the same chip
* open drain and open source outputs are set individually
*/
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags) && !raw) {
gpio_set_open_drain_value_commit(desc, value);
- } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
+ } else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags) && !raw) {
gpio_set_open_source_value_commit(desc, value);
} else {
__set_bit(hwgpio, mask);
@@ -3843,12 +3870,12 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
*/
static int gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
{
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
- if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags))
return gpio_set_open_drain_value_commit(desc, value);
- else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
+ else if (test_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags))
return gpio_set_open_source_value_commit(desc, value);
return gpiod_set_raw_value_commit(desc, value);
@@ -3971,6 +3998,26 @@ int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
/**
+ * gpiod_is_shared() - check if this GPIO can be shared by multiple consumers
+ * @desc: GPIO to inspect
+ *
+ * Returns:
+ * True if this GPIO can be shared by multiple consumers at once. False if it's
+ * a regular, exclusive GPIO.
+ *
+ * Note:
+ * This function returning true does not mean that this GPIO is currently being
+ * shared. It means the GPIO core has registered the fact that the firmware
+ * configuration indicates that it can be shared by multiple consumers and is
+ * in charge of arbitrating the access.
+ */
+bool gpiod_is_shared(const struct gpio_desc *desc)
+{
+ return test_bit(GPIOD_FLAG_SHARED_PROXY, &desc->flags);
+}
+EXPORT_SYMBOL_GPL(gpiod_is_shared);
+
+/**
* gpiod_to_irq() - return the IRQ corresponding to a GPIO
* @desc: gpio whose IRQ will be returned (already requested)
*
@@ -3995,7 +4042,7 @@ int gpiod_to_irq(const struct gpio_desc *desc)
if (!gc)
return -ENODEV;
- offset = gpio_chip_hwgpio(desc);
+ offset = gpiod_hwgpio(desc);
if (gc->to_irq) {
ret = gc->to_irq(gc, offset);
if (ret)
@@ -4045,23 +4092,23 @@ int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset)
int dir = gpiod_get_direction(desc);
if (dir < 0) {
- chip_err(gc, "%s: cannot get GPIO direction\n",
- __func__);
+ gpiochip_err(gc, "%s: cannot get GPIO direction\n",
+ __func__);
return dir;
}
}
/* To be valid for IRQ the line needs to be input or open drain */
- if (test_bit(FLAG_IS_OUT, &desc->flags) &&
- !test_bit(FLAG_OPEN_DRAIN, &desc->flags)) {
- chip_err(gc,
- "%s: tried to flag a GPIO set as output for IRQ\n",
- __func__);
+ if (test_bit(GPIOD_FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags)) {
+ gpiochip_err(gc,
+ "%s: tried to flag a GPIO set as output for IRQ\n",
+ __func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &desc->flags);
- set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ set_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags);
+ set_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
return 0;
}
@@ -4083,8 +4130,8 @@ void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset)
if (IS_ERR(desc))
return;
- clear_bit(FLAG_USED_AS_IRQ, &desc->flags);
- clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ clear_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags);
+ clear_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
@@ -4093,8 +4140,8 @@ void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset)
struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
- !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags)))
- clear_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ !WARN_ON(!test_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags)))
+ clear_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
EXPORT_SYMBOL_GPL(gpiochip_disable_irq);
@@ -4103,14 +4150,14 @@ void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset)
struct gpio_desc *desc = gpiochip_get_desc(gc, offset);
if (!IS_ERR(desc) &&
- !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) {
+ !WARN_ON(!test_bit(GPIOD_FLAG_USED_AS_IRQ, &desc->flags))) {
/*
* We must not be output when using IRQ UNLESS we are
* open drain.
*/
- WARN_ON(test_bit(FLAG_IS_OUT, &desc->flags) &&
- !test_bit(FLAG_OPEN_DRAIN, &desc->flags));
- set_bit(FLAG_IRQ_IS_ENABLED, &desc->flags);
+ WARN_ON(test_bit(GPIOD_FLAG_IS_OUT, &desc->flags) &&
+ !test_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags));
+ set_bit(GPIOD_FLAG_IRQ_IS_ENABLED, &desc->flags);
}
}
EXPORT_SYMBOL_GPL(gpiochip_enable_irq);
@@ -4120,7 +4167,7 @@ bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_irq);
@@ -4133,7 +4180,7 @@ int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset)
ret = gpiochip_lock_as_irq(gc, offset);
if (ret) {
- chip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset);
+ gpiochip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset);
module_put(gc->gpiodev->owner);
return ret;
}
@@ -4153,7 +4200,7 @@ bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain);
@@ -4162,7 +4209,7 @@ bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
+ return test_bit(GPIOD_FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source);
@@ -4171,7 +4218,7 @@ bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset)
if (offset >= gc->ngpio)
return false;
- return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
+ return !test_bit(GPIOD_FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags);
}
EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent);
@@ -4213,7 +4260,7 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
if (value < 0)
return value;
- if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
+ if (test_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
return value;
@@ -4604,6 +4651,23 @@ static struct gpio_desc *gpiod_find_by_fwnode(struct fwnode_handle *fwnode,
return desc;
}
+static struct gpio_desc *gpiod_fwnode_lookup(struct fwnode_handle *fwnode,
+ struct device *consumer,
+ const char *con_id,
+ unsigned int idx,
+ enum gpiod_flags *flags,
+ unsigned long *lookupflags)
+{
+ struct gpio_desc *desc;
+
+ desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx, flags, lookupflags);
+ if (gpiod_not_found(desc) && !IS_ERR_OR_NULL(fwnode))
+ desc = gpiod_find_by_fwnode(fwnode->secondary, consumer, con_id,
+ idx, flags, lookupflags);
+
+ return desc;
+}
+
struct gpio_desc *gpiod_find_and_request(struct device *consumer,
struct fwnode_handle *fwnode,
const char *con_id,
@@ -4622,13 +4686,31 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
int ret = 0;
scoped_guard(srcu, &gpio_devices_srcu) {
- desc = gpiod_find_by_fwnode(fwnode, consumer, con_id, idx,
- &flags, &lookupflags);
+ desc = gpiod_fwnode_lookup(fwnode, consumer, con_id, idx,
+ &flags, &lookupflags);
+ if (!IS_ERR_OR_NULL(desc) &&
+ test_bit(GPIOD_FLAG_SHARED, &desc->flags)) {
+ /*
+ * We're dealing with a GPIO shared by multiple
+ * consumers. This is the moment to add the machine
+ * lookup table for the proxy device as previously
+ * we only knew the consumer's fwnode.
+ */
+ ret = gpio_shared_add_proxy_lookup(consumer, lookupflags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* Trigger platform lookup for shared GPIO proxy. */
+ desc = ERR_PTR(-ENOENT);
+ /* Trigger it even for fwnode-only gpiod_get(). */
+ platform_lookup_allowed = true;
+ }
+
if (gpiod_not_found(desc) && platform_lookup_allowed) {
/*
* Either we are not using DT or ACPI, or their lookup
- * did not return a result. In that case, use platform
- * lookup as a fallback.
+ * did not return a result or this is a shared GPIO. In
+ * that case, use platform lookup as a fallback.
*/
dev_dbg(consumer,
"using lookup tables for GPIO lookup\n");
@@ -4651,14 +4733,19 @@ struct gpio_desc *gpiod_find_and_request(struct device *consumer,
return ERR_PTR(ret);
/*
- * This happens when there are several consumers for
- * the same GPIO line: we just return here without
- * further initialization. It is a bit of a hack.
- * This is necessary to support fixed regulators.
+ * This happens when there are several consumers for the same
+ * GPIO line: we just return here without further
+ * initialization. It's a hack introduced long ago to support
+ * fixed regulators. We now have a better solution with
+ * automated scanning where affected platforms just need to
+ * select the provided Kconfig option.
*
- * FIXME: Make this more sane and safe.
+ * FIXME: Remove the GPIOD_FLAGS_BIT_NONEXCLUSIVE flag after
+ * making sure all platforms use the new mechanism.
*/
- dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);
+ dev_info(consumer,
+ "nonexclusive access to GPIO for %s, consider updating your code to using gpio-shared-proxy\n",
+ name);
return desc;
}
@@ -4795,10 +4882,10 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
int ret;
if (lflags & GPIO_ACTIVE_LOW)
- set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+ set_bit(GPIOD_FLAG_ACTIVE_LOW, &desc->flags);
if (lflags & GPIO_OPEN_DRAIN)
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags);
else if (dflags & GPIOD_FLAGS_BIT_OPEN_DRAIN) {
/*
* This enforces open drain mode from the consumer side.
@@ -4806,13 +4893,13 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
* should *REALLY* have specified them as open drain in the
* first place, so print a little warning here.
*/
- set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_DRAIN, &desc->flags);
gpiod_warn(desc,
"enforced open drain please flag it properly in DT/ACPI DSDT/board file\n");
}
if (lflags & GPIO_OPEN_SOURCE)
- set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+ set_bit(GPIOD_FLAG_OPEN_SOURCE, &desc->flags);
if (((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DOWN)) ||
((lflags & GPIO_PULL_UP) && (lflags & GPIO_PULL_DISABLE)) ||
@@ -4823,11 +4910,11 @@ int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
}
if (lflags & GPIO_PULL_UP)
- set_bit(FLAG_PULL_UP, &desc->flags);
+ set_bit(GPIOD_FLAG_PULL_UP, &desc->flags);
else if (lflags & GPIO_PULL_DOWN)
- set_bit(FLAG_PULL_DOWN, &desc->flags);
+ set_bit(GPIOD_FLAG_PULL_DOWN, &desc->flags);
else if (lflags & GPIO_PULL_DISABLE)
- set_bit(FLAG_BIAS_DISABLE, &desc->flags);
+ set_bit(GPIOD_FLAG_BIAS_DISABLE, &desc->flags);
ret = gpiod_set_transitory(desc, (lflags & GPIO_TRANSITORY));
if (ret < 0)
@@ -4932,15 +5019,15 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
if (!guard.gc)
return -ENODEV;
- if (test_and_set_bit(FLAG_IS_HOGGED, &desc->flags))
+ if (test_and_set_bit(GPIOD_FLAG_IS_HOGGED, &desc->flags))
return 0;
- hwnum = gpio_chip_hwgpio(desc);
+ hwnum = gpiod_hwgpio(desc);
local_desc = gpiochip_request_own_desc(guard.gc, hwnum, name,
lflags, dflags);
if (IS_ERR(local_desc)) {
- clear_bit(FLAG_IS_HOGGED, &desc->flags);
+ clear_bit(GPIOD_FLAG_IS_HOGGED, &desc->flags);
ret = PTR_ERR(local_desc);
pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n",
name, gdev->label, hwnum, ret);
@@ -4963,7 +5050,7 @@ static void gpiochip_free_hogs(struct gpio_chip *gc)
{
struct gpio_desc *desc;
- for_each_gpio_desc_with_flag(gc, desc, FLAG_IS_HOGGED)
+ for_each_gpio_desc_with_flag(gc, desc, GPIOD_FLAG_IS_HOGGED)
gpiochip_free_own_desc(desc);
}
@@ -5016,7 +5103,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
* If pin hardware number of array member 0 is also 0, select
* its chip as a candidate for fast bitmap processing path.
*/
- if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
+ if (descs->ndescs == 0 && gpiod_hwgpio(desc) == 0) {
struct gpio_descs *array;
bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ?
@@ -5061,7 +5148,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
* Detect array members which belong to the 'fast' chip
* but their pins are not in hardware order.
*/
- else if (gpio_chip_hwgpio(desc) != descs->ndescs) {
+ else if (gpiod_hwgpio(desc) != descs->ndescs) {
/*
* Don't use fast path if all array members processed so
* far belong to the same chip as this one but its pin
@@ -5078,8 +5165,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
} else {
dflags = READ_ONCE(desc->flags);
/* Exclude open drain or open source from fast output */
- if (test_bit(FLAG_OPEN_DRAIN, &dflags) ||
- test_bit(FLAG_OPEN_SOURCE, &dflags))
+ if (test_bit(GPIOD_FLAG_OPEN_DRAIN, &dflags) ||
+ test_bit(GPIOD_FLAG_OPEN_SOURCE, &dflags))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -5237,12 +5324,12 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev)
for_each_gpio_desc(gc, desc) {
guard(srcu)(&desc->gdev->desc_srcu);
flags = READ_ONCE(desc->flags);
- is_irq = test_bit(FLAG_USED_AS_IRQ, &flags);
- if (is_irq || test_bit(FLAG_REQUESTED, &flags)) {
+ is_irq = test_bit(GPIOD_FLAG_USED_AS_IRQ, &flags);
+ if (is_irq || test_bit(GPIOD_FLAG_REQUESTED, &flags)) {
gpiod_get_direction(desc);
- is_out = test_bit(FLAG_IS_OUT, &flags);
+ is_out = test_bit(GPIOD_FLAG_IS_OUT, &flags);
value = gpio_chip_get_value(gc, desc);
- active_low = test_bit(FLAG_ACTIVE_LOW, &flags);
+ active_low = test_bit(GPIOD_FLAG_ACTIVE_LOW, &flags);
seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n",
gpio, desc->name ?: "", gpiod_get_label(desc),
is_out ? "out" : "in ",
@@ -5268,6 +5355,8 @@ static void *gpiolib_seq_start(struct seq_file *s, loff_t *pos)
struct gpio_device *gdev;
loff_t index = *pos;
+ s->private = NULL;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
@@ -5301,7 +5390,11 @@ static void *gpiolib_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void gpiolib_seq_stop(struct seq_file *s, void *v)
{
- struct gpiolib_seq_priv *priv = s->private;
+ struct gpiolib_seq_priv *priv;
+
+ priv = s->private;
+ if (!priv)
+ return;
srcu_read_unlock(&gpio_devices_srcu, priv->idx);
kfree(priv);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 9b74738a9ca5..77f6f2936dc2 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -186,24 +186,26 @@ struct gpio_desc {
struct gpio_device *gdev;
unsigned long flags;
/* flag symbols are bit numbers */
-#define FLAG_REQUESTED 0
-#define FLAG_IS_OUT 1
-#define FLAG_EXPORT 2 /* protected by sysfs_lock */
-#define FLAG_SYSFS 3 /* exported via /sys/class/gpio/control */
-#define FLAG_ACTIVE_LOW 6 /* value has active low */
-#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
-#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
-#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
-#define FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
-#define FLAG_IS_HOGGED 11 /* GPIO is hogged */
-#define FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
-#define FLAG_PULL_UP 13 /* GPIO has pull up enabled */
-#define FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
-#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
-#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
-#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
-#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
-#define FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
+#define GPIOD_FLAG_REQUESTED 0 /* GPIO is in use */
+#define GPIOD_FLAG_IS_OUT 1 /* GPIO is in output mode */
+#define GPIOD_FLAG_EXPORT 2 /* GPIO is exported to user-space */
+#define GPIOD_FLAG_SYSFS 3 /* GPIO is exported via /sys/class/gpio */
+#define GPIOD_FLAG_ACTIVE_LOW 6 /* GPIO is active-low */
+#define GPIOD_FLAG_OPEN_DRAIN 7 /* GPIO is open drain type */
+#define GPIOD_FLAG_OPEN_SOURCE 8 /* GPIO is open source type */
+#define GPIOD_FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define GPIOD_FLAG_IRQ_IS_ENABLED 10 /* GPIO is connected to an enabled IRQ */
+#define GPIOD_FLAG_IS_HOGGED 11 /* GPIO is hogged */
+#define GPIOD_FLAG_TRANSITORY 12 /* GPIO may lose value in sleep or reset */
+#define GPIOD_FLAG_PULL_UP 13 /* GPIO has pull up enabled */
+#define GPIOD_FLAG_PULL_DOWN 14 /* GPIO has pull down enabled */
+#define GPIOD_FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
+#define GPIOD_FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
+#define GPIOD_FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
+#define GPIOD_FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
+#define GPIOD_FLAG_EVENT_CLOCK_HTE 19 /* GPIO CDEV reports hardware timestamps in events */
+#define GPIOD_FLAG_SHARED 20 /* GPIO is shared by multiple consumers */
+#define GPIOD_FLAG_SHARED_PROXY 21 /* GPIO is a virtual proxy to a physically shared pin. */
/* Connection label */
struct gpio_desc_label __rcu *label;
@@ -273,49 +275,30 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev);
struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
const char *gpiod_get_label(struct gpio_desc *desc);
-/*
- * Return the GPIO number of the passed descriptor relative to its chip
- */
-static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
-{
- return desc - &desc->gdev->descs[0];
-}
-
/* With descriptor prefix */
-#define gpiod_err(desc, fmt, ...) \
+#define __gpiod_pr(level, desc, fmt, ...) \
do { \
scoped_guard(srcu, &desc->gdev->desc_srcu) { \
- pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
- gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
+ pr_##level("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
+ gpiod_get_label(desc) ?: "?", ##__VA_ARGS__); \
} \
} while (0)
-#define gpiod_warn(desc, fmt, ...) \
-do { \
- scoped_guard(srcu, &desc->gdev->desc_srcu) { \
- pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
- gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
- } \
-} while (0)
+#define gpiod_err(desc, fmt, ...) __gpiod_pr(err, desc, fmt, ##__VA_ARGS__)
+#define gpiod_warn(desc, fmt, ...) __gpiod_pr(warn, desc, fmt, ##__VA_ARGS__)
+#define gpiod_dbg(desc, fmt, ...) __gpiod_pr(debug, desc, fmt, ##__VA_ARGS__)
+
+/* With chip prefix */
-#define gpiod_dbg(desc, fmt, ...) \
+#define __gpiochip_pr(level, gc, fmt, ...) \
do { \
- scoped_guard(srcu, &desc->gdev->desc_srcu) { \
- pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), \
- gpiod_get_label(desc) ? : "?", ##__VA_ARGS__); \
- } \
+ dev_##level(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__); \
} while (0)
-/* With chip prefix */
-
-#define chip_err(gc, fmt, ...) \
- dev_err(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
-#define chip_warn(gc, fmt, ...) \
- dev_warn(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
-#define chip_info(gc, fmt, ...) \
- dev_info(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
-#define chip_dbg(gc, fmt, ...) \
- dev_dbg(&gc->gpiodev->dev, "(%s): " fmt, gc->label, ##__VA_ARGS__)
+#define gpiochip_err(gc, fmt, ...) __gpiochip_pr(err, gc, fmt, ##__VA_ARGS__)
+#define gpiochip_warn(gc, fmt, ...) __gpiochip_pr(warn, gc, fmt, ##__VA_ARGS__)
+#define gpiochip_info(gc, fmt, ...) __gpiochip_pr(info, gc, fmt, ##__VA_ARGS__)
+#define gpiochip_dbg(gc, fmt, ...) __gpiochip_pr(dbg, gc, fmt, ##__VA_ARGS__)
#endif /* GPIOLIB_H */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f7ea8e895c0c..7e6bc0b3a589 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -396,9 +396,11 @@ source "drivers/gpu/drm/sprd/Kconfig"
source "drivers/gpu/drm/imagination/Kconfig"
+source "drivers/gpu/drm/tyr/Kconfig"
+
config DRM_HYPERV
tristate "DRM Support for Hyper-V synthetic video device"
- depends on DRM && PCI && HYPERV
+ depends on DRM && PCI && HYPERV_VMBUS
select DRM_CLIENT_SELECTION
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4dafbdc8f86a..0e1c668b46d2 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -6,7 +6,7 @@
CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
# Unconditionally enable W=1 warnings locally
-# --- begin copy-paste W=1 warnings from scripts/Makefile.extrawarn
+# --- begin copy-paste W=1 warnings from scripts/Makefile.warn
subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
subdir-ccflags-y += $(call cc-option, -Wrestrict)
subdir-ccflags-y += -Wmissing-format-attribute
@@ -41,6 +41,7 @@ drm-y := \
drm_bridge.o \
drm_cache.o \
drm_color_mgmt.o \
+ drm_colorop.o \
drm_connector.o \
drm_crtc.o \
drm_displayid.o \
@@ -76,7 +77,8 @@ drm-y := \
drm-$(CONFIG_DRM_CLIENT) += \
drm_client.o \
drm_client_event.o \
- drm_client_modeset.o
+ drm_client_modeset.o \
+ drm_client_sysrq.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
@@ -150,7 +152,8 @@ drm_kms_helper-y := \
drm_plane_helper.o \
drm_probe_helper.o \
drm_self_refresh_helper.o \
- drm_simple_kms_helper.o
+ drm_simple_kms_helper.o \
+ drm_vblank_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
@@ -220,6 +223,7 @@ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_PANTHOR) += panthor/
+obj-$(CONFIG_DRM_TYR) += tyr/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
obj-$(CONFIG_DRM_TIDSS) += tidss/
@@ -244,7 +248,7 @@ always-$(CONFIG_DRM_HEADER_TEST) += \
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
cmd_hdrtest = \
$(CC) $(c_flags) -fsyntax-only -x c /dev/null -include $< -include $<; \
- PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
+ PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(if $(CONFIG_WERROR)$(CONFIG_DRM_WERROR),-Werror) $<; \
touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
diff --git a/drivers/gpu/drm/adp/adp_drv.c b/drivers/gpu/drm/adp/adp_drv.c
index 54cde090c3f4..4554cf75565e 100644
--- a/drivers/gpu/drm/adp/adp_drv.c
+++ b/drivers/gpu/drm/adp/adp_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 1acfed2f92ef..7f515be5185d 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -43,14 +43,16 @@ config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU
help
- Choose this option if you want to enable experimental support
+ Choose this option if you want to enable support
for SI (Southern Islands) asics.
- SI is already supported in radeon. Experimental support for SI
- in amdgpu will be disabled by default and is still provided by
- radeon. Use module options to override this:
+ SI (Southern Islands) are first generation GCN GPUs,
+ supported by both drivers: radeon (old) and amdgpu (new).
+ By default, SI dedicated GPUs are supported by amdgpu.
- radeon.si_support=0 amdgpu.si_support=1
+ Use module options to override this:
+ To use radeon for SI,
+ radeon.si_support=1 amdgpu.si_support=0
config DRM_AMDGPU_CIK
bool "Enable amdgpu support for CIK parts"
@@ -59,11 +61,17 @@ config DRM_AMDGPU_CIK
Choose this option if you want to enable support for CIK (Sea
Islands) asics.
- CIK is already supported in radeon. Support for CIK in amdgpu
- will be disabled by default and is still provided by radeon.
- Use module options to override this:
+ CIK (Sea Islands) are second generation GCN GPUs,
+ supported by both drivers: radeon (old) and amdgpu (new).
+ By default,
+ CIK dedicated GPUs are supported by amdgpu
+ CIK APUs are supported by radeon
+ Use module options to override this:
+ To use amdgpu for CIK,
radeon.cik_support=0 amdgpu.cik_support=1
+ To use radeon for CIK,
+ radeon.cik_support=1 amdgpu.cik_support=0
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 930de203d533..c88760fb52ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -37,7 +37,8 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_DISPLAY_PATH)/modules/inc \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
- -I$(FULL_AMD_PATH)/amdkfd
+ -I$(FULL_AMD_PATH)/amdkfd \
+ -I$(FULL_AMD_PATH)/ras/ras_mgr
# Locally disable W=1 warnings enabled in drm subsystem Makefile
subdir-ccflags-y += -Wno-override-init
@@ -77,14 +78,15 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
- uvd_v3_1.o
+ uvd_v3_1.o vce_v1_0.o
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
- nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
+ cyan_skillfish_reg_init.o
# add DF block
amdgpu-y += \
@@ -137,7 +139,6 @@ amdgpu-y += \
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o \
amdgpu_vkms.o
# add GFX block
@@ -324,4 +325,9 @@ amdgpu-y += \
isp_v4_1_1.o
endif
+AMD_GPU_RAS_PATH := ../ras
+AMD_GPU_RAS_FULL_PATH := $(FULL_AMD_PATH)/ras
+include $(AMD_GPU_RAS_FULL_PATH)/Makefile
+amdgpu-y += $(AMD_GPU_RAS_FILES)
+
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 9569dc16dd3d..daa7b23bc775 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -88,6 +88,10 @@ static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
uint32_t ip_block;
int r, i;
+ /* Skip suspend of SDMA IP versions >= 4.4.2. They are multi-aid */
+ if (adev->aid_mask)
+ ip_block_mask &= ~BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index ef3af170dda4..9f9774f58ce1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -63,6 +63,7 @@
#include "kgd_pp_interface.h"
#include "amd_shared.h"
+#include "amdgpu_utils.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
@@ -371,13 +372,15 @@ void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
u64 *flags);
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type);
int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
-#define AMDGPU_MAX_IP_NUM 16
+#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM
struct amdgpu_ip_block_status {
bool valid;
@@ -434,7 +437,6 @@ struct amdgpu_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
- uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
@@ -545,7 +547,7 @@ struct amdgpu_wb {
* this value can be accessed directly by using the offset as an index.
* For the GPU address, it is necessary to use gpu_addr and the offset.
*/
- volatile uint32_t *wb;
+ uint32_t *wb;
/**
* @gpu_addr:
@@ -721,7 +723,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
/* VRAM scratch page for HDP bug, default vram page */
struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
- volatile uint32_t *ptr;
+ uint32_t *ptr;
u64 gpu_addr;
};
@@ -752,6 +754,7 @@ typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, u
struct amdgpu_mmio_remap {
u32 reg_offset;
resource_size_t bus_addr;
+ struct amdgpu_bo *bo;
};
/* Define the HW IP blocks will be used in driver , add more if necessary */
@@ -819,13 +822,25 @@ struct amdgpu_ip_map_info {
uint32_t mask);
};
+enum amdgpu_uid_type {
+ AMDGPU_UID_TYPE_XCD,
+ AMDGPU_UID_TYPE_AID,
+ AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MAX
+};
+
+#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */
+
+struct amdgpu_uid {
+ uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX];
+ struct amdgpu_device *adev;
+};
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
};
-struct ip_discovery_top;
-
/* polaris10 kickers */
#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
((rid == 0xE3) || \
@@ -896,6 +911,9 @@ struct amdgpu_pcie_reset_ctx {
bool in_link_reset;
bool occurs_dpc;
bool audio_suspended;
+ struct pci_dev *swus;
+ struct pci_saved_state *swus_pcistate;
+ struct pci_saved_state *swds_pcistate;
};
/*
@@ -929,12 +947,6 @@ enum amdgpu_enforce_isolation_mode {
AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
};
-
-/*
- * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
- */
-#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
-
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -960,8 +972,7 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
- struct debugfs_blob_wrapper debugfs_vbios_blob;
- struct debugfs_blob_wrapper debugfs_discovery_blob;
+ struct debugfs_blob_wrapper debugfs_vbios_blob;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -1051,6 +1062,9 @@ struct amdgpu_device {
u32 log2_max_MBps;
} mm_stats;
+ /* discovery*/
+ struct amdgpu_discovery_info discovery;
+
/* display */
bool enable_virtual_display;
struct amdgpu_vkms_output *amdgpu_vkms_output;
@@ -1138,9 +1152,6 @@ struct amdgpu_device {
/* for userq and VM fences */
struct amdgpu_seq64 seq64;
- /* KFD */
- struct amdgpu_kfd_dev kfd;
-
/* UMC */
struct amdgpu_umc umc;
@@ -1165,6 +1176,12 @@ struct amdgpu_device {
* queue fence.
*/
struct xarray userq_xa;
+ /**
+ * @userq_doorbell_xa: Global user queue map (doorbell index → queue)
+ * Key: doorbell_index (unique global identifier for the queue)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_doorbell_xa;
/* df */
struct amdgpu_df df;
@@ -1256,8 +1273,6 @@ struct amdgpu_device {
struct list_head ras_list;
- struct ip_discovery_top *ip_top;
-
struct amdgpu_reset_domain *reset_domain;
struct mutex benchmark_mutex;
@@ -1281,6 +1296,7 @@ struct amdgpu_device {
bool debug_disable_gpu_ring_reset;
bool debug_vm_userptr;
bool debug_disable_ce_logs;
+ bool debug_enable_ce_cs;
/* Protection for the following isolation structure */
struct mutex enforce_isolation_mutex;
@@ -1299,9 +1315,14 @@ struct amdgpu_device {
*/
bool apu_prefer_gtt;
- struct list_head userq_mgr_list;
- struct mutex userq_mutex;
bool userq_halt_for_enforce_isolation;
+ struct work_struct userq_reset_work;
+ struct amdgpu_uid *uid_info;
+
+ /* KFD
+ * Must be last --ends in a flexible-array member.
+ */
+ struct amdgpu_kfd_dev kfd;
};
static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
@@ -1519,11 +1540,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_asic_flush_hdp(adev, r) \
- ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
-#define amdgpu_asic_invalidate_hdp(adev, r) \
- ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
- ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
@@ -1622,7 +1638,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
void amdgpu_driver_release_kms(struct drm_device *dev);
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_prepare(struct drm_device *dev);
void amdgpu_device_complete(struct drm_device *dev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
@@ -1785,4 +1800,9 @@ static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid);
+uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
index cbc40cad581b..9b3180449150 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -76,6 +76,7 @@ static void aca_banks_release(struct aca_banks *banks)
list_for_each_entry_safe(node, tmp, &banks->list, node) {
list_del(&node->node);
kvfree(node);
+ banks->nr_banks--;
}
}
@@ -130,6 +131,27 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st
RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
}
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
int start, int count,
struct aca_banks *banks, struct ras_query_context *qctx)
@@ -168,6 +190,15 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
bank.smu_err_type = type;
+ /*
+ * Poison being consumed when injecting a UE while running background workloads,
+ * which are unexpected.
+ */
+ if (type == ACA_SMU_TYPE_UE &&
+ ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
+ !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
+ continue;
+
aca_smu_bank_dump(adev, i, count, &bank, qctx);
ret = aca_banks_add_bank(banks, &bank);
@@ -178,27 +209,6 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_
return 0;
}
-static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
-{
-
- struct aca_hwip *hwip;
- int hwid, mcatype;
- u64 ipid;
-
- if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
- return false;
-
- hwip = &aca_hwid_mcatypes[type];
- if (!hwip->hwid)
- return false;
-
- ipid = bank->regs[ACA_REG_IDX_IPID];
- hwid = ACA_REG__IPID__HARDWAREID(ipid);
- mcatype = ACA_REG__IPID__MCATYPE(ipid);
-
- return hwip->hwid == hwid && hwip->mcatype == mcatype;
-}
-
static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
{
const struct aca_bank_ops *bank_ops = handle->bank_ops;
@@ -229,6 +239,7 @@ static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_
mutex_lock(&aerr->lock);
list_add_tail(&bank_error->node, &aerr->list);
+ aerr->nr_errors++;
mutex_unlock(&aerr->lock);
return bank_error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 4926996f94da..381ef205b0df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -302,17 +302,19 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].id = 0;
adev->acp.acp_cell[0].num_resources = 3;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].id = 1;
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
+ r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, 2, NULL, 0, NULL);
if (r)
goto failure;
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
@@ -410,30 +412,34 @@ static int acp_hw_init(struct amdgpu_ip_block *ip_block)
adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].id = 0;
adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].id = 1;
adev->acp.acp_cell[1].num_resources = 1;
adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[2].name = "designware-i2s";
+ adev->acp.acp_cell[2].id = 2;
adev->acp.acp_cell[2].num_resources = 1;
adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].id = 3;
adev->acp.acp_cell[3].num_resources = 1;
adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
+ r = mfd_add_devices(adev->acp.parent, 0, adev->acp.acp_cell, ACP_DEVS, NULL, 0, NULL);
if (r)
goto failure;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 6c62e27b9800..d31460a9e958 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -507,7 +507,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
pm_runtime_get_sync(adev_to_drm(adev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(adev_to_drm(adev));
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fbe7616555c8..a2879d2b7c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -250,16 +250,24 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
{
- if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
}
int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
{
int r = 0;
- if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 33eb4826b58b..8bdfcde2029b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -71,7 +71,7 @@ struct kgd_mem {
struct mutex lock;
struct amdgpu_bo *bo;
struct dma_buf *dmabuf;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct list_head attachments;
/* protected by amdkfd_process_info.lock */
struct list_head validate_list;
@@ -107,11 +107,13 @@ struct amdgpu_kfd_dev {
bool init_complete;
struct work_struct reset_work;
- /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
- struct dev_pagemap pgmap;
-
/* Client for KFD BO GEM handle allocations */
struct drm_client_dev client;
+
+ /* HMM page migration MEMORY_DEVICE_PRIVATE mapping
+ * Must be last --ends in a flexible-array member.
+ */
+ struct dev_pagemap pgmap;
};
enum kgd_engine_type {
@@ -426,7 +428,9 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
bool retry_fault);
@@ -516,11 +520,21 @@ static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return 0;
}
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
return 0;
}
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 04ef0ca10541..0239114fb6c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -352,7 +352,7 @@ static int kgd_hqd_dump(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -449,7 +449,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+10)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
index 6d08bc2781a3..f2278a0937ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -338,7 +338,7 @@ static int hqd_dump_v10_3(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -435,7 +435,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (19+6+7+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
index e0e6a6a49d90..aaccf0b9947d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -323,7 +323,7 @@ static int hqd_dump_v11(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -420,7 +420,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (7+11+1+12+12)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
index 6f0dc23c901b..e0ceab400b2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -115,7 +115,7 @@ static int hqd_dump_v12(struct amdgpu_device *adev,
(*dump)[i++][1] = RREG32(addr); \
} while (0)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
@@ -146,7 +146,7 @@ static int hqd_sdma_dump_v12(struct amdgpu_device *adev,
#undef HQD_N_REGS
#define HQD_N_REGS (last_reg - first_reg + 1)
- *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
if (*dump == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index b16cce7c22c3..b1c24c8fa686 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -510,7 +510,8 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
}
-static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct kgd_mem *mem)
{
uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
AMDGPU_VM_MTYPE_DEFAULT;
@@ -520,7 +521,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ return mapping_flags;
}
/**
@@ -977,7 +978,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
goto unwind;
}
attachment[i]->va = va;
- attachment[i]->pte_flags = get_pte_flags(adev, mem);
+ attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
attachment[i]->adev = adev;
list_add(&attachment[i]->list, &mem->attachments);
@@ -1056,7 +1057,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false };
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
int ret = 0;
mutex_lock(&process_info->lock);
@@ -1088,8 +1089,15 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0;
}
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range)) {
+ ret = -ENOMEM;
+ goto unregister_out;
+ }
+
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
if (ret) {
+ amdgpu_hmm_range_free(range);
if (ret == -EAGAIN)
pr_debug("Failed to get user pages, try again\n");
else
@@ -1102,6 +1110,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -1109,7 +1120,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo);
release_out:
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+ amdgpu_hmm_range_free(range);
unregister_out:
if (ret)
amdgpu_hmm_unregister(bo);
@@ -1263,6 +1274,10 @@ static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
(void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+ /* VM entity stopped if process killed, don't clear freed pt bo */
+ if (!amdgpu_vm_ready(vm))
+ return 0;
+
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
(void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
@@ -1912,7 +1927,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
amdgpu_hmm_unregister(mem->bo);
mutex_lock(&process_info->notifier_lock);
- amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mutex_unlock(&process_info->notifier_lock);
}
@@ -1950,9 +1965,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
*/
if (size) {
if (!is_imported &&
- (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
- (adev->apu_prefer_gtt &&
- mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
*size = bo_size;
else
*size = 0;
@@ -2325,10 +2338,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
struct kfd_vm_fault_info *mem)
{
- if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ if (atomic_read_acquire(&adev->gmc.vm_fault_info_updated) == 1) {
*mem = *adev->gmc.vm_fault_info;
- mb(); /* make sure read happened */
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
}
return 0;
}
@@ -2539,7 +2551,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo;
- amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
/* BO reservations and getting user pages (hmm_range_fault)
@@ -2563,10 +2575,14 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
}
}
+ mem->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!mem->range))
+ return -ENOMEM;
/* Get updated user pages */
- ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &mem->range);
+ ret = amdgpu_ttm_tt_get_user_pages(bo, mem->range);
if (ret) {
+ amdgpu_hmm_range_free(mem->range);
+ mem->range = NULL;
pr_debug("Failed %d to get user pages\n", ret);
/* Return -EFAULT bad address error as success. It will
@@ -2583,17 +2599,24 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* from the KFD, trigger a segmentation fault in VM debug mode.
*/
if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ struct kfd_process *p;
+
pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
pid_nr(process_info->pid), mem->va);
// Send GPU VM fault to user space
- kfd_signal_vm_fault_event_with_userptr(kfd_lookup_process_by_pid(process_info->pid),
- mem->va);
+ p = kfd_lookup_process_by_pid(process_info->pid);
+ if (p) {
+ kfd_signal_vm_fault_event_with_userptr(p, mem->va);
+ kfd_unref_process(p);
+ }
}
ret = 0;
}
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
+
mutex_lock(&process_info->notifier_lock);
/* Mark the BO as valid unless it was invalidated
@@ -2732,8 +2755,8 @@ static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_i
continue;
/* Only check mem with hmm range associated */
- valid = amdgpu_ttm_tt_get_user_pages_done(
- mem->bo->tbo.ttm, mem->range);
+ valid = amdgpu_hmm_range_valid(mem->range);
+ amdgpu_hmm_range_free(mem->range);
mem->range = NULL;
if (!valid) {
@@ -2989,9 +3012,22 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
struct amdgpu_device *adev = amdgpu_ttm_adev(
peer_vm->root.bo->tbo.bdev);
+ struct amdgpu_fpriv *fpriv =
+ container_of(peer_vm, struct amdgpu_fpriv, vm);
+
+ ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+
ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
if (ret) {
- pr_debug("Memory eviction: handle moved failed. Try again\n");
+ dev_dbg(adev->dev,
+ "Memory eviction: handle moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
goto validate_map_fail;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index e476e45b996a..763f2b8dcf13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -706,7 +706,6 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
- adev->clock.current_dispclk = adev->clock.default_dispclk;
adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (adev->clock.max_pixel_clock == 0)
@@ -1816,16 +1815,43 @@ static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
}
+static ssize_t amdgpu_atombios_get_vbios_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return sysfs_emit(buf, "%s\n", ctx->build_num);
+}
+
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
+static DEVICE_ATTR(vbios_build, 0444, amdgpu_atombios_get_vbios_build, NULL);
static struct attribute *amdgpu_vbios_version_attrs[] = {
- &dev_attr_vbios_version.attr,
- NULL
+ &dev_attr_vbios_version.attr, &dev_attr_vbios_build.attr, NULL
};
+static umode_t amdgpu_vbios_version_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ if (attr == &dev_attr_vbios_build.attr && !strlen(ctx->build_num))
+ return 0;
+
+ return attr->mode;
+}
+
const struct attribute_group amdgpu_vbios_version_attr_group = {
- .attrs = amdgpu_vbios_version_attrs
+ .attrs = amdgpu_vbios_version_attrs,
+ .is_visible = amdgpu_vbios_version_attrs_is_visible,
};
int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index c7d32fb216e4..636385c80f64 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -181,19 +181,22 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
u8 frev, crev;
int usage_bytes = 0;
- if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
- if (frev == 2 && crev == 1) {
- fw_usage_v2_1 =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_1(adev,
- fw_usage_v2_1,
- &usage_bytes);
- } else if (frev >= 2 && crev >= 2) {
- fw_usage_v2_2 =
- (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
- amdgpu_atomfirmware_allocate_fb_v2_2(adev,
- fw_usage_v2_2,
- &usage_bytes);
+ /* Skip atomfirmware allocation for SRIOV VFs when dynamic crit regn is enabled */
+ if (!(amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled)) {
+ if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
+ if (frev == 2 && crev == 1) {
+ fw_usage_v2_1 =
+ (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_1(adev,
+ fw_usage_v2_1,
+ &usage_bytes);
+ } else if (frev >= 2 && crev >= 2) {
+ fw_usage_v2_2 =
+ (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_2(adev,
+ fw_usage_v2_2,
+ &usage_bytes);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 00e96419fcda..35d04e69aec0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -96,13 +96,14 @@ void amdgpu_bios_release(struct amdgpu_device *adev)
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
- * For SR-IOV, the vbios image is also put in VRAM in the VF.
+ * For SR-IOV, if dynamic critical region is not enabled,
+ * the vbios image is also put at the start of VRAM in the VF.
*/
static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
+ uint8_t __iomem *bios = NULL;
resource_size_t vram_base;
- resource_size_t size = 256 * 1024; /* ??? */
+ u32 size = 256U * 1024U; /* ??? */
if (!(adev->flags & AMD_IS_APU))
if (amdgpu_device_need_post(adev))
@@ -114,18 +115,33 @@ static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
- bios = ioremap_wc(vram_base, size);
- if (!bios)
- return false;
adev->bios = kmalloc(size, GFP_KERNEL);
- if (!adev->bios) {
- iounmap(bios);
+ if (!adev->bios)
return false;
+
+ /* For SRIOV with dynamic critical region is enabled,
+ * the vbios image is put at a dynamic offset of VRAM in the VF.
+ * If dynamic critical region is disabled, follow the existing logic as on baremetal.
+ */
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID, adev->bios, &size)) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+ } else {
+ bios = ioremap_wc(vram_base, size);
+ if (!bios) {
+ amdgpu_bios_release(adev);
+ return false;
+ }
+
+ memcpy_fromio(adev->bios, bios, size);
+ iounmap(bios);
}
+
adev->bios_size = size;
- memcpy_fromio(adev->bios, bios, size);
- iounmap(bios);
if (!check_atom_bios(adev, size)) {
amdgpu_bios_release(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 702f6610d024..66fb37b64388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
struct drm_amdgpu_bo_list_entry **info_param)
{
- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t bo_info_size = in->bo_info_size;
+ const uint32_t bo_number = in->bo_number;
struct drm_amdgpu_bo_list_entry *info;
- int r;
-
- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
/* copy the handle array from userspace to a kernel buffer */
- r = -EFAULT;
- if (likely(info_size == in->bo_info_size)) {
- unsigned long bytes = in->bo_number *
- in->bo_info_size;
-
- if (copy_from_user(info, uptr, bytes))
- goto error_free;
-
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
} else {
- unsigned long bytes = min(in->bo_info_size, info_size);
+ const uint32_t bytes = min(bo_info_size, info_size);
unsigned i;
- memset(info, 0, in->bo_number * info_size);
- for (i = 0; i < in->bo_number; ++i) {
- if (copy_from_user(&info[i], uptr, bytes))
- goto error_free;
+ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- uptr += in->bo_info_size;
+ memset(info, 0, bo_number * info_size);
+ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
+ if (copy_from_user(&info[i], uptr, bytes)) {
+ kvfree(info);
+ return -EFAULT;
+ }
}
}
*info_param = info;
return 0;
-
-error_free:
- kvfree(info);
- return r;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index 555cd6d877c3..2b5e7c46a39d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -38,8 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo *bo;
struct amdgpu_bo_va *bo_va;
uint32_t priority;
- struct page **user_pages;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
bool user_invalidated;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 5e375e9c4f5d..9f96d568acf2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -398,30 +398,28 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
- static const struct mode_size {
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
int w;
int h;
- } common_modes[17] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
};
- for (i = 0; i < 17; i++) {
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
if (common_modes[i].w > 1024 ||
common_modes[i].h > 768)
@@ -434,12 +432,11 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
common_modes[i].h == native_mode->vdisplay))
continue;
}
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
if (!mode)
return;
+ strscpy(mode->name, common_modes[i].name, DRM_DISPLAY_MODE_LEN);
drm_mode_probed_add(connector, mode);
}
@@ -737,10 +734,8 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -922,10 +917,8 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1149,10 +1142,8 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1195,29 +1186,69 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector)
amdgpu_connector->use_digital = true;
}
+/**
+ * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock
+ * @adev: pointer to amdgpu_device
+ *
+ * Return: maximum supported HDMI (TMDS) pixel clock in KHz.
+ */
+static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev)
+{
+ if (adev->asic_type >= CHIP_POLARIS10)
+ return 600000;
+ else if (adev->asic_type >= CHIP_TONGA)
+ return 300000;
+ else
+ return 297000;
+}
+
+/**
+ * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors
+ * @connector: DRM connector to validate the mode on
+ * @mode: display mode to validate
+ *
+ * Validate the given display mode on DVI and HDMI connectors, including
+ * analog signals on DVI-I.
+ *
+ * Return: drm_mode_status indicating whether the mode is valid.
+ */
static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev);
+ const int max_dvi_single_link_pixel_clock = 165000;
+ int max_digital_pixel_clock_khz;
/* XXX check mode bandwidth */
- if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
- return MODE_OK;
- } else if (connector->display_info.is_hdmi) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
- return MODE_CLOCK_HIGH;
+ if (amdgpu_connector->use_digital) {
+ switch (amdgpu_connector->connector_object_id) {
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2;
+ break;
}
+
+ /* When the display EDID claims that it's an HDMI display,
+ * we use the HDMI encoder mode of the display HW,
+ * so we should verify against the max HDMI clock here.
+ */
+ if (connector->display_info.is_hdmi)
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+
+ if (mode->clock > max_digital_pixel_clock_khz)
+ return MODE_CLOCK_HIGH;
}
/* check against the max pixel clock */
@@ -1449,10 +1480,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
index 25252231a68a..425a3e564360 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
@@ -68,7 +68,6 @@ void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
hdr->error_severity = sev;
hdr->valid_bits.platform_id = 1;
- hdr->valid_bits.partition_id = 1;
hdr->valid_bits.timestamp = 1;
amdgpu_cper_get_timestamp(&hdr->timestamp);
@@ -174,7 +173,7 @@ int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
struct cper_sec_nonstd_err *section;
bool poison;
- poison = (sev == CPER_SEV_NON_FATAL_CORRECTED) ? false : true;
+ poison = sev != CPER_SEV_NON_FATAL_CORRECTED;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
@@ -206,6 +205,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
{
struct cper_sec_desc *section_desc;
struct cper_sec_nonstd_err *section;
+ uint32_t socket_id;
section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
@@ -219,11 +219,17 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->hdr.valid_bits.err_context_cnt = 1;
section->info.error_type = RUNTIME;
+ section->info.valid_bits.ms_chk = 1;
section->info.ms_chk_bits.err_type_valid = 1;
+ section->info.ms_chk_bits.err_type = 1;
+ section->info.ms_chk_bits.pcc = 1;
section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
/* Hardcoded Reg dump for bad page threshold CPER */
+ socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0;
section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
@@ -234,8 +240,8 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev
section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = 0x0;
- section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x96;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12);
section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
index bcb97d245673..353421807387 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d3f220be2ef9..ecdfe6cb36cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -40,6 +40,7 @@
#include "amdgpu_gmc.h"
#include "amdgpu_gem.h"
#include "amdgpu_ras.h"
+#include "amdgpu_hmm.h"
static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
struct amdgpu_device *adev,
@@ -178,25 +179,17 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
- uint64_t *chunk_array_user;
uint64_t *chunk_array;
uint32_t uf_offset = 0;
size_t size;
int ret;
int i;
- chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
- GFP_KERNEL);
- if (!chunk_array)
- return -ENOMEM;
-
- /* get chunks */
- chunk_array_user = u64_to_user_ptr(cs->in.chunks);
- if (copy_from_user(chunk_array, chunk_array_user,
- sizeof(uint64_t)*cs->in.num_chunks)) {
- ret = -EFAULT;
- goto free_chunk;
- }
+ chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
+ cs->in.num_chunks,
+ sizeof(uint64_t));
+ if (IS_ERR(chunk_array))
+ return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
@@ -209,7 +202,6 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
- uint32_t __user *cdata;
chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
@@ -222,20 +214,16 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = u64_to_user_ptr(user_chunk.chunk_data);
- p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
- GFP_KERNEL);
- if (p->chunks[i].kdata == NULL) {
- ret = -ENOMEM;
+ p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
+ size,
+ sizeof(uint32_t));
+ if (IS_ERR(p->chunks[i].kdata)) {
+ ret = PTR_ERR(p->chunks[i].kdata);
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t);
- if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- ret = -EFAULT;
- goto free_partial_kdata;
- }
/* Assume the worst on the following checks */
ret = -EINVAL;
@@ -286,7 +274,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
}
- if (!p->gang_size) {
+ if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
ret = -EINVAL;
goto free_all_kdata;
}
@@ -376,6 +364,12 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
if (p->uf_bo && ring->funcs->no_user_fence)
return -EINVAL;
+ if (!p->adev->debug_enable_ce_cs &&
+ chunk_ib->flags & AMDGPU_IB_FLAG_CE) {
+ dev_err_ratelimited(p->adev->dev, "CE CS is blocked, use debug=0x400 to override\n");
+ return -EINVAL;
+ }
+
if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
@@ -396,7 +390,7 @@ static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
chunk_ib->ib_bytes : 0,
AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
- DRM_ERROR("Failed to get ib !\n");
+ drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
return r;
}
@@ -468,7 +462,7 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
if (r) {
- DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
handle, point, r);
return r;
}
@@ -714,7 +708,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*/
const s64 us_upper_bound = 200000;
- if (!adev->mm_stats.log2_max_MBps) {
+ if ((!adev->mm_stats.log2_max_MBps) || !ttm_resource_manager_used(&adev->mman.vram_mgr.manager)) {
*max_bytes = 0;
*max_vis_bytes = 0;
return;
@@ -896,26 +890,18 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
bool userpage_invalidated = false;
struct amdgpu_bo *bo = e->bo;
- int i;
-
- e->user_pages = kvcalloc(bo->tbo.ttm->num_pages,
- sizeof(struct page *),
- GFP_KERNEL);
- if (!e->user_pages) {
- DRM_ERROR("kvmalloc_array failure\n");
- r = -ENOMEM;
- goto out_free_user_pages;
- }
- r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
- if (r) {
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ e->range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!e->range))
+ return -ENOMEM;
+
+ r = amdgpu_ttm_tt_get_user_pages(bo, e->range);
+ if (r)
goto out_free_user_pages;
- }
for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
- if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
+ if (bo->tbo.ttm->pages[i] !=
+ hmm_pfn_to_page(e->range->hmm_range.hmm_pfns[i])) {
userpage_invalidated = true;
break;
}
@@ -959,7 +945,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
- e->user_invalidated && e->user_pages) {
+ e->user_invalidated) {
amdgpu_bo_placement_from_domain(e->bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
@@ -968,11 +954,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages;
amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
- e->user_pages);
+ e->range);
}
-
- kvfree(e->user_pages);
- e->user_pages = NULL;
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
@@ -983,7 +966,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate() failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
goto out_free_user_pages;
}
@@ -1012,13 +995,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
out_free_user_pages:
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- struct amdgpu_bo *bo = e->bo;
-
- if (!e->user_pages)
- continue;
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
- kvfree(e->user_pages);
- e->user_pages = NULL;
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
mutex_unlock(&p->bo_list->bo_list_mutex);
@@ -1061,13 +1038,13 @@ static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
- DRM_ERROR("IB va_start is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
return r;
}
if ((va_start + ib->length_dw * 4) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
+ drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -1238,7 +1215,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
- DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+ drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
@@ -1349,8 +1326,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
*/
r = 0;
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
- r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
- e->range);
+ r |= !amdgpu_hmm_range_valid(e->range);
+ amdgpu_hmm_range_free(e->range);
e->range = NULL;
}
if (r) {
@@ -1451,7 +1428,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
- DRM_ERROR_RATELIMITED("Failed to initialize parser %d!\n", r);
+ drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
return r;
}
@@ -1466,9 +1443,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
+ drm_err(dev, "Not enough memory for command submission!\n");
else if (r != -ERESTARTSYS && r != -EAGAIN)
- DRM_DEBUG("Failed to process the buffer list %d!\n", r);
+ drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
goto error_fini;
}
@@ -1767,30 +1744,21 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
{
struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
- uint32_t fence_count = wait->in.fence_count;
- struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
- fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
- GFP_KERNEL);
- if (fences == NULL)
- return -ENOMEM;
-
- fences_user = u64_to_user_ptr(wait->in.fences);
- if (copy_from_user(fences, fences_user,
- sizeof(struct drm_amdgpu_fence) * fence_count)) {
- r = -EFAULT;
- goto err_free_fences;
- }
+ fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
+ wait->in.fence_count,
+ sizeof(struct drm_amdgpu_fence));
+ if (IS_ERR(fences))
+ return PTR_ERR(fences);
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
-err_free_fences:
kfree(fences);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index f5d5c45ddc0d..afedea02188d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -236,7 +236,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
&num_scheds, &scheds);
if (r)
- goto cleanup_entity;
+ goto error_free_entity;
}
/* disable load balance if the hw engine retains context among dependent jobs */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0e6e2e2acf5b..62d43b8cbe58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -129,7 +129,6 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
@@ -179,7 +178,6 @@ end:
if (pm_pg_lock)
mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -255,7 +253,6 @@ static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 off
if (rd->id.use_grbm) {
if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
(rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
mutex_unlock(&rd->lock);
@@ -310,7 +307,6 @@ end:
mutex_unlock(&rd->lock);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
@@ -446,7 +442,6 @@ static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, siz
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -557,7 +552,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -617,7 +611,6 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -676,7 +669,6 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -736,7 +728,6 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -795,7 +786,6 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -855,7 +845,6 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
amdgpu_virt_disable_access_debugfs(adev);
return r;
@@ -1003,7 +992,6 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (r) {
@@ -1094,7 +1082,6 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
if (!x) {
@@ -1192,7 +1179,6 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
mutex_unlock(&adev->grbm_idx_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
while (size) {
@@ -1266,7 +1252,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1315,7 +1300,6 @@ static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1365,7 +1349,6 @@ static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1414,7 +1397,6 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1460,7 +1442,6 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1501,7 +1482,6 @@ static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *bu
r = result;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -1701,7 +1681,6 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
up_write(&adev->reset_domain->sem);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1721,7 +1700,6 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1742,7 +1720,6 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
*val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -1762,7 +1739,6 @@ static int amdgpu_debugfs_benchmark(void *data, u64 val)
r = amdgpu_benchmark(adev, val);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
@@ -1902,7 +1878,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
- if (preempted && (&job->hw_fence.base) == fence)
+ if (preempted && (&job->hw_fence->base) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
@@ -2014,7 +1990,6 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
ret = -EINVAL;
out:
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return ret;
@@ -2123,10 +2098,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
debugfs_create_blob("amdgpu_vbios", 0444, root,
&adev->debugfs_vbios_blob);
- adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
- adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
- debugfs_create_blob("amdgpu_discovery", 0444, root,
- &adev->debugfs_discovery_blob);
+ if (adev->discovery.debugfs_blob.size)
+ debugfs_create_blob("amdgpu_discovery", 0444, root,
+ &adev->discovery.debugfs_blob);
return 0;
}
@@ -2136,12 +2110,14 @@ static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
struct drm_file *file;
struct amdgpu_fpriv *fpriv;
struct amdgpu_bo *root_bo;
+ struct amdgpu_device *adev;
int r;
file = m->private;
if (!file)
return -EINVAL;
+ adev = drm_to_adev(file->minor->dev);
fpriv = file->driver_priv;
if (!fpriv || !fpriv->vm.root.bo)
return -ENODEV;
@@ -2153,7 +2129,11 @@ static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
return -EINVAL;
}
- seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo));
+ seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
+ seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn);
+ seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level);
+ seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size);
+ seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size);
amdgpu_bo_unreserve(root_bo);
amdgpu_bo_unref(&root_bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 8a026bc9ea44..4e2fe6674db8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -217,8 +217,7 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
- coredump->reset_time.tv_nsec);
+ drm_printf(&p, "time: %ptSp\n", &coredump->reset_time);
if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 01d234cf8156..58c3ffe707d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -71,6 +71,7 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_pmu.h"
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_reset.h"
@@ -95,6 +96,7 @@ MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
#define AMDGPU_RESUME_MS 2000
#define AMDGPU_MAX_RETRY_LIMIT 2
@@ -178,6 +180,12 @@ struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
BIT(AMD_IP_BLOCK_TYPE_PSP)
};
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev);
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev);
+static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev);
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
+
static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
enum amd_ip_block_type block)
{
@@ -1670,9 +1678,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
struct pci_bus *root;
struct resource *res;
+ int max_size, r;
unsigned int i;
u16 cmd;
- int r;
if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
return 0;
@@ -1718,30 +1726,28 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
return 0;
/* Limit the BAR size to what is available */
- rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
- rbar_size);
+ max_size = pci_rebar_get_max_size(adev->pdev, 0);
+ if (max_size < 0)
+ return 0;
+ rbar_size = min(max_size, rbar_size);
/* Disable memory decoding while we change the BAR addresses and size */
pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
pci_write_config_word(adev->pdev, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
- /* Free the VRAM and doorbell BAR, we most likely need to move both. */
+ /* Tear down doorbell as resizing will release BARs */
amdgpu_doorbell_fini(adev);
- if (adev->asic_type >= CHIP_BONAIRE)
- pci_release_resource(adev->pdev, 2);
-
- pci_release_resource(adev->pdev, 0);
- r = pci_resize_resource(adev->pdev, 0, rbar_size);
+ r = pci_resize_resource(adev->pdev, 0, rbar_size,
+ (adev->asic_type >= CHIP_BONAIRE) ? 1 << 5
+ : 1 << 2);
if (r == -ENOSPC)
dev_info(adev->dev,
"Not enough PCI address space for a large BAR.");
else if (r && r != -ENOTSUPP)
dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
- pci_assign_unassigned_bus_resources(adev->pdev->bus);
-
/* When the doorbell or fb BAR isn't available we have no chance of
* using the device.
*/
@@ -1879,6 +1885,13 @@ static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device
static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
{
+ /* Enabling ASPM causes randoms hangs on Tahiti and Oland on Zen4.
+ * It's unclear if this is a platform-specific or GPU-specific issue.
+ * Disable ASPM on SI for the time being.
+ */
+ if (adev->family == AMDGPU_FAMILY_SI)
+ return true;
+
#if IS_ENABLED(CONFIG_X86)
struct cpuinfo_x86 *c = &cpu_data(0);
@@ -2377,7 +2390,7 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
}
/**
- * amdgpu_device_ip_is_valid - is the hardware IP enabled
+ * amdgpu_device_ip_is_hw - is the hardware IP enabled
*
* @adev: amdgpu_device pointer
* @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
@@ -2385,6 +2398,27 @@ int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
* Check if the hardware IP is enable or not.
* Returns true if it the IP is enable, false if not.
*/
+bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
+{
+ int i;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == block_type)
+ return adev->ip_blocks[i].status.hw;
+ }
+ return false;
+}
+
+/**
+ * amdgpu_device_ip_is_valid - is the hardware IP valid
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Check if the hardware IP is valid or not.
+ * Returns true if it the IP is valid, false if not.
+ */
bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
enum amd_ip_block_type block_type)
{
@@ -2445,6 +2479,34 @@ int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static const char *ip_block_names[] = {
+ [AMD_IP_BLOCK_TYPE_COMMON] = "common",
+ [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
+ [AMD_IP_BLOCK_TYPE_IH] = "ih",
+ [AMD_IP_BLOCK_TYPE_SMC] = "smu",
+ [AMD_IP_BLOCK_TYPE_PSP] = "psp",
+ [AMD_IP_BLOCK_TYPE_DCE] = "dce",
+ [AMD_IP_BLOCK_TYPE_GFX] = "gfx",
+ [AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
+ [AMD_IP_BLOCK_TYPE_UVD] = "uvd",
+ [AMD_IP_BLOCK_TYPE_VCE] = "vce",
+ [AMD_IP_BLOCK_TYPE_ACP] = "acp",
+ [AMD_IP_BLOCK_TYPE_VCN] = "vcn",
+ [AMD_IP_BLOCK_TYPE_MES] = "mes",
+ [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
+ [AMD_IP_BLOCK_TYPE_VPE] = "vpe",
+ [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
+ [AMD_IP_BLOCK_TYPE_ISP] = "isp",
+ [AMD_IP_BLOCK_TYPE_RAS] = "ras",
+};
+
+static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
+{
+ int idx = (int)type;
+
+ return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
+}
+
/**
* amdgpu_device_ip_block_add
*
@@ -2473,8 +2535,13 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
break;
}
- dev_info(adev->dev, "detected ip block number %d <%s>\n",
- adev->num_ip_blocks, ip_block_version->funcs->name);
+ dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
+ adev->num_ip_blocks,
+ ip_block_name(adev, ip_block_version->type),
+ ip_block_version->major,
+ ip_block_version->minor,
+ ip_block_version->rev,
+ ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks].adev = adev;
@@ -2591,10 +2658,15 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "arcturus";
break;
case CHIP_NAVI12:
- if (adev->mman.discovery_bin)
+ if (adev->discovery.bin)
return 0;
chip_name = "navi12";
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->discovery.bin)
+ return 0;
+ chip_name = "cyan_skillfish";
+ break;
}
err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
@@ -2674,6 +2746,24 @@ out:
return err;
}
+static void amdgpu_uid_init(struct amdgpu_device *adev)
+{
+ /* Initialize the UID for the device */
+ adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
+ if (!adev->uid_info) {
+ dev_warn(adev->dev, "Failed to allocate memory for UID\n");
+ return;
+ }
+ adev->uid_info->adev = adev;
+}
+
+static void amdgpu_uid_fini(struct amdgpu_device *adev)
+{
+ /* Free the UID memory */
+ kfree(adev->uid_info);
+ adev->uid_info = NULL;
+}
+
/**
* amdgpu_device_ip_early_init - run early init for hardware IPs
*
@@ -2698,6 +2788,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return r;
+
+ r = amdgpu_virt_init_critical_region(adev);
+ if (r)
+ return r;
}
switch (adev->asic_type) {
@@ -2857,6 +2951,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (adev->gmc.xgmi.supported)
amdgpu_xgmi_early_init(adev);
+ if (amdgpu_is_multi_aid(adev))
+ amdgpu_uid_init(adev);
ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
if (ip_block->status.valid != false)
amdgpu_amdkfd_device_probe(adev);
@@ -3349,10 +3445,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
- /* skip CG for VCE/UVD, it's handled specially */
+ /* skip CG for VCE/UVD/VPE, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
adev->ip_blocks[i].version->funcs->set_powergating_state) {
/* enable powergating to save power */
@@ -3389,7 +3486,7 @@ static int amdgpu_device_enable_mgpu_fan_boost(void)
for (i = 0; i < mgpu_info.num_dgpu; i++) {
gpu_ins = &(mgpu_info.gpu_ins[i]);
adev = gpu_ins->adev;
- if (!(adev->flags & AMD_IS_APU) &&
+ if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
!gpu_ins->mgpu_fan_enabled) {
ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
if (ret)
@@ -3584,6 +3681,20 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
"failed to release exclusive mode on fini\n");
}
+ /*
+ * Driver reload on the APU can fail due to firmware validation because
+ * the PSP is always running, as it is shared across the whole SoC.
+ * This same issue does not occur on dGPU because it has a mechanism
+ * that checks whether the PSP is running. A solution for those issues
+ * in the APU is to trigger a GPU reset, but this should be done during
+ * the unload phase to avoid adding boot latency and screen flicker.
+ */
+ if ((adev->flags & AMD_IS_APU) && !adev->gmc.is_app_apu) {
+ r = amdgpu_asic_reset(adev);
+ if (r)
+ dev_err(adev->dev, "asic reset on %s failed\n", __func__);
+ }
+
return 0;
}
@@ -3648,6 +3759,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
}
amdgpu_ras_fini(adev);
+ amdgpu_uid_fini(adev);
return 0;
}
@@ -3693,7 +3805,7 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
*/
static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, rec;
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
@@ -3714,13 +3826,25 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
if (r)
- return r;
+ goto unwind;
}
return 0;
+unwind:
+ rec = amdgpu_device_ip_resume_phase3(adev);
+ if (rec)
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase3 failed during unwind: %d\n",
+ rec);
+
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW);
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+
+ return r;
}
/**
@@ -3736,7 +3860,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
*/
static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
- int i, r;
+ int i, r, rec;
if (adev->in_s0ix)
amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
@@ -3797,9 +3921,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
- /* XXX handle errors */
r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
- adev->ip_blocks[i].status.hw = false;
+ if (r)
+ goto unwind;
/* handle putting the SMC in the appropriate state */
if (!amdgpu_sriov_vf(adev)) {
@@ -3809,13 +3933,40 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
dev_err(adev->dev,
"SMC failed to set mp1 state %d, %d\n",
adev->mp1_state, r);
- return r;
+ goto unwind;
}
}
}
}
return 0;
+unwind:
+ /* suspend phase 2 = resume phase 1 + resume phase 2 */
+ rec = amdgpu_device_ip_resume_phase1(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase1 failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ rec = amdgpu_device_fw_loading(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_fw_loading failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ rec = amdgpu_device_ip_resume_phase2(adev);
+ if (rec) {
+ dev_err(adev->dev,
+ "amdgpu_device_ip_resume_phase2 failed during unwind: %d\n",
+ rec);
+ return r;
+ }
+
+ return r;
}
/**
@@ -3829,7 +3980,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
@@ -4113,25 +4264,13 @@ bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
- /*
- * We have systems in the wild with these ASICs that require
- * LVDS and VGA support which is not supported with DC.
- *
- * Fallback to the non-DC driver here by default so as not to
- * cause regressions.
- */
-#if defined(CONFIG_DRM_AMD_DC_SI)
- return amdgpu_dc > 0;
-#else
- return false;
-#endif
- case CHIP_BONAIRE:
+ return amdgpu_dc != 0 && IS_ENABLED(CONFIG_DRM_AMD_DC_SI);
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
/*
* We have systems in the wild with these ASICs that require
- * VGA support which is not supported with DC.
+ * TRAVIS and NUTMEG support which is not supported with DC.
*
* Fallback to the non-DC driver here by default so as not to
* cause regressions.
@@ -4219,58 +4358,53 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
long timeout;
int ret = 0;
- /*
- * By default timeout for jobs is 10 sec
- */
- adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ /* By default timeout for all queues is 2 sec */
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = msecs_to_jiffies(2000);
- if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- while ((timeout_setting = strsep(&input, ",")) &&
- strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
- ret = kstrtol(timeout_setting, 0, &timeout);
- if (ret)
- return ret;
+ if (!strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH))
+ return 0;
- if (timeout == 0) {
- index++;
- continue;
- } else if (timeout < 0) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- dev_warn(adev->dev, "lockup timeout disabled");
- add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
- } else {
- timeout = msecs_to_jiffies(timeout);
- }
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
- switch (index++) {
- case 0:
- adev->gfx_timeout = timeout;
- break;
- case 1:
- adev->compute_timeout = timeout;
- break;
- case 2:
- adev->sdma_timeout = timeout;
- break;
- case 3:
- adev->video_timeout = timeout;
- break;
- default:
- break;
- }
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ dev_warn(adev->dev, "lockup timeout disabled");
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+ } else {
+ timeout = msecs_to_jiffies(timeout);
}
- /*
- * There is only one value specified and
- * it should apply to all non-compute jobs.
- */
- if (index == 1) {
- adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
- if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
- adev->compute_timeout = adev->gfx_timeout;
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
}
}
+ /* When only one value specified apply it to all queues. */
+ if (index == 1)
+ adev->gfx_timeout = adev->compute_timeout = adev->sdma_timeout =
+ adev->video_timeout = timeout;
+
return ret;
}
@@ -4325,6 +4459,55 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
dev_info(adev->dev, "MCBP is enabled\n");
}
+static int amdgpu_device_sys_interface_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_atombios_sysfs_init(adev);
+ if (r)
+ drm_err(&adev->ddev,
+ "registering atombios sysfs failed (%d).\n", r);
+
+ r = amdgpu_pm_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
+
+ r = amdgpu_ucode_sysfs_init(adev);
+ if (r) {
+ adev->ucode_sysfs_en = false;
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
+ } else
+ adev->ucode_sysfs_en = true;
+
+ r = amdgpu_device_attr_sysfs_init(adev);
+ if (r)
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
+
+ r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
+ if (r)
+ dev_err(adev->dev,
+ "Could not create amdgpu board attributes\n");
+
+ amdgpu_fru_sysfs_init(adev);
+ amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
+
+ return r;
+}
+
+static void amdgpu_device_sys_interface_fini(struct amdgpu_device *adev)
+{
+ if (adev->pm.sysfs_initialized)
+ amdgpu_pm_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
+ amdgpu_device_attr_sysfs_fini(adev);
+ amdgpu_fru_sysfs_fini(adev);
+
+ amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
+}
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -4424,7 +4607,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.userq_sch_mutex);
mutex_init(&adev->gfx.workload_profile_mutex);
mutex_init(&adev->vcn.workload_profile_mutex);
- mutex_init(&adev->userq_mutex);
amdgpu_device_init_apu_flags(adev);
@@ -4452,7 +4634,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->pm.od_kobj_list);
- INIT_LIST_HEAD(&adev->userq_mgr_list);
+ xa_init(&adev->userq_doorbell_xa);
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
@@ -4475,6 +4657,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
}
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
+ INIT_WORK(&adev->userq_reset_work, amdgpu_userq_reset_work);
adev->gfx.gfx_off_req_count = 1;
adev->gfx.gfx_off_residency = 0;
@@ -4748,39 +4931,14 @@ fence_driver_init:
flush_delayed_work(&adev->delayed_init_work);
}
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
/*
* Place those sysfs registering after `late_init`. As some of those
* operations performed in `late_init` might affect the sysfs
* interfaces creating.
*/
- r = amdgpu_atombios_sysfs_init(adev);
- if (r)
- drm_err(&adev->ddev,
- "registering atombios sysfs failed (%d).\n", r);
-
- r = amdgpu_pm_sysfs_init(adev);
- if (r)
- dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
-
- r = amdgpu_ucode_sysfs_init(adev);
- if (r) {
- adev->ucode_sysfs_en = false;
- dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
- } else
- adev->ucode_sysfs_en = true;
-
- r = amdgpu_device_attr_sysfs_init(adev);
- if (r)
- dev_err(adev->dev, "Could not create amdgpu device attr\n");
-
- r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
- if (r)
- dev_err(adev->dev,
- "Could not create amdgpu board attributes\n");
-
- amdgpu_fru_sysfs_init(adev);
- amdgpu_reg_state_sysfs_init(adev);
- amdgpu_xcp_sysfs_init(adev);
+ r = amdgpu_device_sys_interface_init(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
r = amdgpu_pmu_init(adev);
@@ -4808,9 +4966,6 @@ fence_driver_init:
if (px)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
- amdgpu_xgmi_reset_on_init(adev);
-
amdgpu_device_check_iommu_direct_map(adev);
adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
@@ -4902,15 +5057,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->pm.sysfs_initialized)
- amdgpu_pm_sysfs_fini(adev);
- if (adev->ucode_sysfs_en)
- amdgpu_ucode_sysfs_fini(adev);
- amdgpu_device_attr_sysfs_fini(adev);
- amdgpu_fru_sysfs_fini(adev);
-
- amdgpu_reg_state_sysfs_fini(adev);
- amdgpu_xcp_sysfs_fini(adev);
+ amdgpu_device_sys_interface_fini(adev);
/* disable ras feature must before hw fini */
amdgpu_ras_pre_fini(adev);
@@ -4985,14 +5132,15 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- if (adev->mman.discovery_bin)
+ if (adev->discovery.bin)
amdgpu_discovery_fini(adev);
amdgpu_reset_put_reset_domain(adev->reset_domain);
adev->reset_domain = NULL;
kfree(adev->pci_state);
-
+ kfree(adev->pcie_reset_ctx.swds_pcistate);
+ kfree(adev->pcie_reset_ctx.swus_pcistate);
}
/**
@@ -5012,6 +5160,10 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
return 0;
+ /* No need to evict when going to S5 through S4 callbacks */
+ if (system_state == SYSTEM_POWER_OFF)
+ return 0;
+
ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
if (ret) {
dev_warn(adev->dev, "evicting device resources failed\n");
@@ -5128,7 +5280,7 @@ void amdgpu_device_complete(struct drm_device *dev)
int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- int r = 0;
+ int r, rec;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -5136,7 +5288,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
adev->in_suspend = true;
if (amdgpu_sriov_vf(adev)) {
- if (!adev->in_s0ix && !adev->in_runpm)
+ if (!adev->in_runpm)
amdgpu_amdkfd_suspend_process(adev);
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_virt_request_full_gpu(adev, false);
@@ -5144,41 +5296,92 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
return r;
}
- if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3))
- dev_warn(adev->dev, "smart shift update failed\n");
+ r = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3);
+ if (r)
+ goto unwind_sriov;
if (notify_clients)
- drm_client_dev_suspend(adev_to_drm(adev), false);
+ drm_client_dev_suspend(adev_to_drm(adev));
cancel_delayed_work_sync(&adev->delayed_init_work);
amdgpu_ras_suspend(adev);
- amdgpu_device_ip_suspend_phase1(adev);
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ goto unwind_smartshift;
- if (!adev->in_s0ix) {
- amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- amdgpu_userq_suspend(adev);
- }
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ r = amdgpu_userq_suspend(adev);
+ if (r)
+ goto unwind_ip_phase1;
r = amdgpu_device_evict_resources(adev);
if (r)
- return r;
+ goto unwind_userq;
amdgpu_ttm_set_buffer_funcs_status(adev, false);
amdgpu_fence_driver_hw_fini(adev);
- amdgpu_device_ip_suspend_phase2(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
+ if (r)
+ goto unwind_evict;
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- r = amdgpu_dpm_notify_rlc_state(adev, false);
- if (r)
+ return 0;
+
+unwind_evict:
+ if (adev->mman.buffer_funcs_ring->sched.ready)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ amdgpu_fence_driver_hw_init(adev);
+
+unwind_userq:
+ rec = amdgpu_userq_resume(adev);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize user queues: %d\n", rec);
+ return r;
+ }
+ rec = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize kfd: %d\n", rec);
return r;
+ }
- return 0;
+unwind_ip_phase1:
+ /* suspend phase 1 = resume phase 3 */
+ rec = amdgpu_device_ip_resume_phase3(adev);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-initialize IPs phase1: %d\n", rec);
+ return r;
+ }
+
+unwind_smartshift:
+ rec = amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0);
+ if (rec) {
+ dev_warn(adev->dev, "failed to re-update smart shift: %d\n", rec);
+ return r;
+ }
+
+ if (notify_clients)
+ drm_client_dev_resume(adev_to_drm(adev));
+
+ amdgpu_ras_resume(adev);
+
+unwind_sriov:
+ if (amdgpu_sriov_vf(adev)) {
+ rec = amdgpu_virt_request_full_gpu(adev, true);
+ if (rec) {
+ dev_warn(adev->dev, "failed to reinitialize sriov: %d\n", rec);
+ return r;
+ }
+ }
+
+ adev->in_suspend = adev->in_s0ix = adev->in_s3 = false;
+
+ return r;
}
static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
@@ -5254,15 +5457,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
goto exit;
}
- if (!adev->in_s0ix) {
- r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
- if (r)
- goto exit;
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
- r = amdgpu_userq_resume(adev);
- if (r)
- goto exit;
- }
+ r = amdgpu_userq_resume(adev);
+ if (r)
+ goto exit;
r = amdgpu_device_ip_late_init(adev);
if (r)
@@ -5275,7 +5476,7 @@ exit:
amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
- if (!adev->in_s0ix && !r && !adev->in_runpm)
+ if (!r && !adev->in_runpm)
r = amdgpu_amdkfd_resume_process(adev);
}
@@ -5286,7 +5487,7 @@ exit:
flush_delayed_work(&adev->delayed_init_work);
if (notify_clients)
- drm_client_dev_resume(adev_to_drm(adev), false);
+ drm_client_dev_resume(adev_to_drm(adev));
amdgpu_ras_resume(adev);
@@ -5701,7 +5902,7 @@ int amdgpu_device_link_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU link reset\n");
- if (!adev->pcie_reset_ctx.occurs_dpc)
+ if (!amdgpu_reset_in_dpc(adev))
ret = amdgpu_dpm_link_reset(adev);
if (ret)
@@ -5742,11 +5943,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!amdgpu_ring_sched_ready(ring))
continue;
- /* Clear job fence from fence drv to avoid force_completion
- * leave NULL and vm flush fence in fence drv
- */
- amdgpu_fence_driver_clear_job_fences(ring);
-
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
@@ -5830,6 +6026,7 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
amdgpu_set_init_level(tmp_adev, init_level);
if (full_reset) {
/* post card */
+ amdgpu_reset_set_dpc_status(tmp_adev, false);
amdgpu_ras_clear_err_state(tmp_adev);
r = amdgpu_device_asic_init(tmp_adev);
if (r) {
@@ -5890,7 +6087,11 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
if (r)
goto out;
- drm_client_dev_resume(adev_to_drm(tmp_adev), false);
+ r = amdgpu_userq_post_reset(tmp_adev, vram_lost);
+ if (r)
+ goto out;
+
+ drm_client_dev_resume(adev_to_drm(tmp_adev));
/*
* The GPU enters bad state once faulty pages
@@ -6112,6 +6313,7 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
cancel_work(&adev->reset_work);
#endif
+ cancel_work(&adev->userq_reset_work);
if (adev->kfd.dev)
cancel_work(&adev->kfd.reset_work);
@@ -6136,12 +6338,11 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle)
return ret;
}
-static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
struct list_head *device_list,
struct amdgpu_hive_info *hive)
{
struct amdgpu_device *tmp_adev = NULL;
- int r;
/*
* Build list of devices to reset.
@@ -6153,7 +6354,7 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
list_add_tail(&tmp_adev->reset_list, device_list);
if (adev->shutdown)
tmp_adev->shutdown = true;
- if (adev->pcie_reset_ctx.occurs_dpc)
+ if (amdgpu_reset_in_dpc(adev))
tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
if (!list_is_first(&adev->reset_list, device_list))
@@ -6161,14 +6362,6 @@ static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
} else {
list_add_tail(&adev->reset_list, device_list);
}
-
- if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) {
- r = amdgpu_device_health_check(device_list);
- if (r)
- return r;
- }
-
- return 0;
}
static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
@@ -6234,14 +6427,15 @@ static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
+ drm_client_dev_suspend(adev_to_drm(tmp_adev));
/* disable ras on ALL IPs */
- if (!need_emergency_restart &&
- (!adev->pcie_reset_ctx.occurs_dpc) &&
- amdgpu_device_ip_need_full_reset(tmp_adev))
+ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
amdgpu_ras_suspend(tmp_adev);
+ amdgpu_userq_pre_reset(tmp_adev);
+
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -6267,11 +6461,7 @@ static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list, reset_list) {
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = true;
r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
- if (adev->pcie_reset_ctx.occurs_dpc)
- tmp_adev->no_hw_access = false;
/*TODO Should we stop ?*/
if (r) {
dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
@@ -6342,23 +6532,28 @@ static int amdgpu_device_sched_resume(struct list_head *device_list,
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
- if (tmp_adev->asic_reset_res)
- r = tmp_adev->asic_reset_res;
-
- tmp_adev->asic_reset_res = 0;
-
- if (r) {
+ if (tmp_adev->asic_reset_res) {
/* bad news, how to tell it to userspace ?
* for ras error, we should report GPU bad status instead of
* reset failure
*/
if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
!amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
- dev_info(tmp_adev->dev, "GPU reset(%d) failed\n",
- atomic_read(&tmp_adev->gpu_reset_counter));
- amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
+ dev_info(
+ tmp_adev->dev,
+ "GPU reset(%d) failed with error %d \n",
+ atomic_read(
+ &tmp_adev->gpu_reset_counter),
+ tmp_adev->asic_reset_res);
+ amdgpu_vf_error_put(tmp_adev,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
+ tmp_adev->asic_reset_res);
+ if (!r)
+ r = tmp_adev->asic_reset_res;
+ tmp_adev->asic_reset_res = 0;
} else {
- dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
if (amdgpu_acpi_smart_shift_update(tmp_adev,
AMDGPU_SS_DEV_D0))
dev_warn(tmp_adev->dev,
@@ -6449,8 +6644,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
emergency_restart();
}
- dev_info(adev->dev, "GPU %s begin!\n",
- need_emergency_restart ? "jobs stop":"reset");
+ dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
+ need_emergency_restart ? "jobs stop" : "reset",
+ reset_context->src);
if (!amdgpu_sriov_vf(adev))
hive = amdgpu_get_xgmi_hive(adev);
@@ -6461,8 +6657,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
reset_context->hive = hive;
INIT_LIST_HEAD(&device_list);
- if (amdgpu_device_recovery_prepare(adev, &device_list, hive))
- goto end_reset;
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(&device_list);
+ if (r)
+ goto end_reset;
+ }
+
+ /* Cannot be called after locking reset domain */
+ amdgpu_ras_pre_reset(adev, &device_list);
/* We need to lock reset domain only once both for XGMI and single device */
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
@@ -6477,7 +6681,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*
* job->base holds a reference to parent fence
*/
- if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
+ if (job && dma_fence_is_signaled(&job->hw_fence->base)) {
job_signaled = true;
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
@@ -6494,6 +6698,7 @@ skip_sched_resume:
amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
reset_unlock:
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+ amdgpu_ras_post_reset(adev, &device_list);
end_reset:
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -6884,17 +7089,13 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
+ amdgpu_get_xgmi_hive(adev);
struct amdgpu_reset_context reset_context;
struct list_head device_list;
dev_info(adev->dev, "PCI error: detected callback!!\n");
- if (!amdgpu_dpm_is_link_reset_supported(adev)) {
- dev_warn(adev->dev, "No support for XGMI hive yet...\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
adev->pci_channel_state = state;
switch (state) {
@@ -6904,10 +7105,23 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
case pci_channel_io_frozen:
/* Fatal error, prepare for slot reset */
dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+ if (hive) {
+ /* Hive devices should be able to support FW based
+ * link reset on other devices, if not return.
+ */
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev,
+ "No support for XGMI hive yet...\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ /* Set dpc status only if device is part of hive
+ * Non-hive devices should be able to recover after
+ * link reset.
+ */
+ amdgpu_reset_set_dpc_status(adev, true);
- if (hive)
mutex_lock(&hive->hive_lock);
- adev->pcie_reset_ctx.occurs_dpc = true;
+ }
memset(&reset_context, 0, sizeof(reset_context));
INIT_LIST_HEAD(&device_list);
@@ -6915,10 +7129,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
amdgpu_device_recovery_get_reset_lock(adev, &device_list);
amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
hive, false);
- if (hive) {
+ if (hive)
mutex_unlock(&hive->hive_lock);
- amdgpu_put_xgmi_hive(hive);
- }
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@@ -6966,22 +7178,34 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
struct amdgpu_device *tmp_adev;
struct amdgpu_hive_info *hive;
struct list_head device_list;
- int r = 0, i;
+ struct pci_dev *link_dev;
+ int r = 0, i, timeout;
u32 memsize;
-
- /* PCI error slot reset should be skipped During RAS recovery */
- if ((amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
- amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) &&
- amdgpu_ras_in_recovery(adev))
- return PCI_ERS_RESULT_RECOVERED;
+ u16 status;
dev_info(adev->dev, "PCI error: slot reset callback!!\n");
memset(&reset_context, 0, sizeof(reset_context));
- /* wait for asic to come out of reset */
- msleep(700);
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+ else
+ link_dev = adev->pdev;
+ /* wait for asic to come out of reset, timeout = 10s */
+ timeout = 10000;
+ do {
+ usleep_range(10000, 10500);
+ r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
+ timeout -= 10;
+ } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
+ (status != PCI_VENDOR_ID_AMD));
+
+ if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
+ r = -ETIME;
+ goto out;
+ }
+ amdgpu_device_load_switch_state(adev);
/* Restore PCI confspace */
amdgpu_device_load_pci_state(pdev);
@@ -7076,7 +7300,6 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
amdgpu_device_sched_resume(&device_list, NULL, NULL);
amdgpu_device_gpu_resume(adev, &device_list, false);
amdgpu_device_recovery_put_reset_lock(adev, &device_list);
- adev->pcie_reset_ctx.occurs_dpc = false;
if (hive) {
mutex_unlock(&hive->hive_lock);
@@ -7084,6 +7307,65 @@ void amdgpu_pci_resume(struct pci_dev *pdev)
}
}
+static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *swus, *swds;
+ int r;
+
+ swds = pci_upstream_bridge(adev->pdev);
+ if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
+ pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+ swus = pci_upstream_bridge(swds);
+ if (!swus ||
+ (swus->vendor != PCI_VENDOR_ID_ATI &&
+ swus->vendor != PCI_VENDOR_ID_AMD) ||
+ pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
+ return;
+
+ /* If already saved, return */
+ if (adev->pcie_reset_ctx.swus)
+ return;
+ /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
+ r = pci_save_state(swds);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
+
+ r = pci_save_state(swus);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
+
+ adev->pcie_reset_ctx.swus = swus;
+}
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
+{
+ struct pci_dev *pdev;
+ int r;
+
+ if (!adev->pcie_reset_ctx.swds_pcistate ||
+ !adev->pcie_reset_ctx.swus_pcistate)
+ return;
+
+ pdev = adev->pcie_reset_ctx.swus;
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
+ return;
+ }
+
+ pdev = pci_upstream_bridge(adev->pdev);
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
+ if (!r)
+ pci_restore_state(pdev);
+ else
+ dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
+}
+
bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -7108,6 +7390,8 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
return false;
}
+ amdgpu_device_cache_switch_state(adev);
+
return true;
}
@@ -7142,10 +7426,17 @@ void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- if (ring && ring->funcs->emit_hdp_flush)
+ if (ring && ring->funcs->emit_hdp_flush) {
amdgpu_ring_emit_hdp_flush(ring);
- else
- amdgpu_asic_flush_hdp(adev, ring);
+ return;
+ }
+
+ if (!ring && amdgpu_sriov_runtime(adev)) {
+ if (!amdgpu_kiq_hdp_flush(adev))
+ return;
+ }
+
+ amdgpu_hdp_flush(adev, ring);
}
void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
@@ -7158,7 +7449,7 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
if (adev->gmc.xgmi.connected_to_cpu)
return;
- amdgpu_asic_invalidate_hdp(adev, ring);
+ amdgpu_hdp_invalidate(adev, ring);
}
int amdgpu_in_reset(struct amdgpu_device *adev)
@@ -7494,3 +7785,53 @@ ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
size += sysfs_emit_at(buf, size, "\n");
return size;
}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid)
+{
+ if (!uid_info)
+ return;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return;
+ }
+
+ if (uid_info->uid[type][inst] != 0) {
+ dev_warn_once(
+ uid_info->adev->dev,
+ "Overwriting existing UID %llu for type %d instance %d\n",
+ uid_info->uid[type][inst], type, inst);
+ }
+
+ uid_info->uid[type][inst] = uid;
+}
+
+u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst)
+{
+ if (!uid_info)
+ return 0;
+
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return 0;
+ }
+
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return 0;
+ }
+
+ return uid_info->uid[type][inst];
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index efe0058b48ca..fa2a22dfa048 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -107,6 +107,7 @@
#include "vcn_v5_0_1.h"
#include "jpeg_v5_0_0.h"
#include "jpeg_v5_0_1.h"
+#include "amdgpu_ras_mgr.h"
#include "amdgpu_vpe.h"
#if defined(CONFIG_DRM_AMD_ISP)
@@ -254,9 +255,9 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
/* This region is read-only and reserved from system use */
- discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC);
if (discv_regn) {
- memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memcpy(binary, discv_regn, adev->discovery.size);
memunmap(discv_regn);
return 0;
}
@@ -298,10 +299,31 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
else
vram_size <<= 20;
+ /*
+ * If in VRAM, discovery TMR is marked for reservation. If it is in system mem,
+ * then it is not required to be reserved.
+ */
if (sz_valid) {
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->mman.discovery_tmr_size, false);
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ /* For SRIOV VFs with dynamic critical region enabled,
+ * we will get the IPD binary via below call.
+ * If dynamic critical is disabled, fall through to normal seq.
+ */
+ if (amdgpu_virt_get_dynamic_data_info(adev,
+ AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
+ &adev->discovery.size)) {
+ dev_err(adev->dev,
+ "failed to read discovery info from dynamic critical region.");
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->discovery.size, false);
+ adev->discovery.reserve_tmr = true;
+ }
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
@@ -310,7 +332,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
dev_err(adev->dev,
"failed to read discovery info from memory, vram size read: %llx",
vram_size);
-
+exit:
return ret;
}
@@ -389,6 +411,7 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
struct binary_header *bhdr)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct table_info *info;
uint16_t checksum;
uint16_t offset;
@@ -398,14 +421,14 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
checksum = le16_to_cpu(info->checksum);
struct nps_info_header *nhdr =
- (struct nps_info_header *)(adev->mman.discovery_bin + offset);
+ (struct nps_info_header *)(discovery_bin + offset);
if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
return -EINVAL;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
le32_to_cpu(nhdr->size_bytes),
checksum)) {
dev_dbg(adev->dev, "invalid nps info data table checksum\n");
@@ -417,8 +440,11 @@ static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
{
- if (amdgpu_discovery == 2)
+ if (amdgpu_discovery == 2) {
+ /* Assume there is valid discovery TMR in VRAM even if binary is sideloaded */
+ adev->discovery.reserve_tmr = true;
return "amdgpu/ip_discovery.bin";
+ }
switch (adev->asic_type) {
case CHIP_VEGA10:
@@ -447,49 +473,53 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
+ uint8_t *discovery_bin;
const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
int r;
- adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
- adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
- if (!adev->mman.discovery_bin)
+ adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
+ if (!adev->discovery.bin)
return -ENOMEM;
+ adev->discovery.size = DISCOVERY_TMR_SIZE;
+ adev->discovery.debugfs_blob.data = adev->discovery.bin;
+ adev->discovery.debugfs_blob.size = adev->discovery.size;
+ discovery_bin = adev->discovery.bin;
/* Read from file if it is the preferred option */
fw_name = amdgpu_discovery_get_fw_name(adev);
if (fw_name != NULL) {
drm_dbg(&adev->ddev, "use ip discovery information from file");
- r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
+ r = amdgpu_discovery_read_binary_from_file(adev, discovery_bin,
+ fw_name);
if (r)
goto out;
} else {
drm_dbg(&adev->ddev, "use ip discovery information from memory");
- r = amdgpu_discovery_read_binary_from_mem(
- adev, adev->mman.discovery_bin);
+ r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin);
if (r)
goto out;
}
/* check the ip discovery binary signature */
- if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ if (!amdgpu_discovery_verify_binary_signature(discovery_bin)) {
dev_err(adev->dev,
"get invalid ip discovery binary signature\n");
r = -EINVAL;
goto out;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = offsetof(struct binary_header, binary_checksum) +
sizeof(bhdr->binary_checksum);
size = le16_to_cpu(bhdr->binary_size) - offset;
checksum = le16_to_cpu(bhdr->binary_checksum);
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- size, checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset, size,
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery binary checksum\n");
r = -EINVAL;
goto out;
@@ -501,15 +531,16 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct ip_discovery_header *ihdr =
- (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ (struct ip_discovery_header *)(discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery data table signature\n");
r = -EINVAL;
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le16_to_cpu(ihdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le16_to_cpu(ihdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid ip discovery data table checksum\n");
r = -EINVAL;
goto out;
@@ -522,7 +553,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct gpu_info_header *ghdr =
- (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+ (struct gpu_info_header *)(discovery_bin + offset);
if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery gc table id\n");
@@ -530,8 +561,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(ghdr->size), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(discovery_bin + offset,
+ le32_to_cpu(ghdr->size),
+ checksum)) {
dev_err(adev->dev, "invalid gc data table checksum\n");
r = -EINVAL;
goto out;
@@ -544,7 +576,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct harvest_info_header *hhdr =
- (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+ (struct harvest_info_header *)(discovery_bin + offset);
if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
@@ -552,8 +584,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- sizeof(struct harvest_table), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
dev_err(adev->dev, "invalid harvest data table checksum\n");
r = -EINVAL;
goto out;
@@ -566,7 +599,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (offset) {
struct vcn_info_header *vhdr =
- (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+ (struct vcn_info_header *)(discovery_bin + offset);
if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery vcn table id\n");
@@ -574,8 +607,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(vhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid vcn data table checksum\n");
r = -EINVAL;
goto out;
@@ -588,7 +622,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (0 && offset) {
struct mall_info_header *mhdr =
- (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+ (struct mall_info_header *)(discovery_bin + offset);
if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
dev_err(adev->dev, "invalid ip discovery mall table id\n");
@@ -596,8 +630,9 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
- le32_to_cpu(mhdr->size_bytes), checksum)) {
+ if (!amdgpu_discovery_verify_checksum(
+ discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
dev_err(adev->dev, "invalid mall data table checksum\n");
r = -EINVAL;
goto out;
@@ -607,8 +642,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
if ((amdgpu_discovery != 2) &&
(RREG32(mmIP_DISCOVERY_VERSION) == 4))
amdgpu_ras_query_boot_status(adev, 4);
@@ -620,8 +655,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
amdgpu_discovery_sysfs_fini(adev);
- kfree(adev->mman.discovery_bin);
- adev->mman.discovery_bin = NULL;
+ kfree(adev->discovery.bin);
+ adev->discovery.bin = NULL;
}
static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
@@ -646,6 +681,7 @@ static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
@@ -655,21 +691,21 @@ static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
uint8_t inst;
int i, j;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
/* scan harvest bit of all IP data structures */
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->mman.discovery_bin +
- ip_offset);
+ ip = (struct ip *)(discovery_bin + ip_offset);
inst = ip->number_instance;
hw_id = le16_to_cpu(ip->hw_id);
if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
@@ -711,13 +747,14 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *vcn_harvest_count,
uint32_t *umc_harvest_count)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct harvest_table *harvest_info;
u16 offset;
int i;
uint32_t umc_harvest_config = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
if (!offset) {
@@ -725,7 +762,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
return;
}
- harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+ harvest_info = (struct harvest_table *)(discovery_bin + offset);
for (i = 0; i < 32; i++) {
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
@@ -1021,8 +1058,8 @@ static void ip_disc_release(struct kobject *kobj)
kobj);
struct amdgpu_device *adev = ip_top->adev;
- adev->ip_top = NULL;
kfree(ip_top);
+ adev->discovery.ip_top = NULL;
}
static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
@@ -1033,7 +1070,9 @@ static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
/* Until a uniform way is figured, get mask based on hwid */
switch (hw_id) {
case VCN_HWID:
- harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ /* VCN vs UVD+VCE */
+ if (!amdgpu_ip_version(adev, VCE_HWIP, 0))
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
break;
case DMU_HWID:
if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
@@ -1060,6 +1099,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
const size_t _ip_offset, const int num_ips,
bool reg_base_64)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
int ii, jj, kk, res;
uint16_t hw_id;
uint8_t inst;
@@ -1077,7 +1117,7 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
struct ip_v4 *ip;
struct ip_hw_instance *ip_hw_instance;
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
inst = ip->instance_number;
hw_id = le16_to_cpu(ip->hw_id);
if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
@@ -1164,17 +1204,20 @@ next_ip:
static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
- struct kset *die_kset = &adev->ip_top->die_kset;
+ struct kset *die_kset = &ip_top->die_kset;
u16 num_dies, die_offset, num_ips;
size_t ip_offset;
int ii, res;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1183,7 +1226,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
struct ip_die_entry *ip_die_entry;
die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1217,30 +1260,32 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
+ struct ip_discovery_top *ip_top;
struct kset *die_kset;
int res, ii;
- if (!adev->mman.discovery_bin)
+ if (!discovery_bin)
return -EINVAL;
- adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
- if (!adev->ip_top)
+ ip_top = kzalloc(sizeof(*ip_top), GFP_KERNEL);
+ if (!ip_top)
return -ENOMEM;
- adev->ip_top->adev = adev;
-
- res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ ip_top->adev = adev;
+ adev->discovery.ip_top = ip_top;
+ res = kobject_init_and_add(&ip_top->kobj, &ip_discovery_ktype,
&adev->dev->kobj, "ip_discovery");
if (res) {
DRM_ERROR("Couldn't init and add ip_discovery/");
goto Err;
}
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
kobject_set_name(&die_kset->kobj, "%s", "die");
- die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.parent = &ip_top->kobj;
die_kset->kobj.ktype = &die_kobj_ktype;
- res = kset_register(&adev->ip_top->die_kset);
+ res = kset_register(&ip_top->die_kset);
if (res) {
DRM_ERROR("Couldn't register die_kset");
goto Err;
@@ -1254,7 +1299,7 @@ static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
return res;
Err:
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->kobj);
return res;
}
@@ -1299,10 +1344,11 @@ static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
struct list_head *el, *tmp;
struct kset *die_kset;
- die_kset = &adev->ip_top->die_kset;
+ die_kset = &ip_top->die_kset;
spin_lock(&die_kset->list_lock);
list_for_each_prev_safe(el, tmp, &die_kset->list) {
list_del_init(el);
@@ -1311,8 +1357,8 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
spin_lock(&die_kset->list_lock);
}
spin_unlock(&die_kset->list_lock);
- kobject_put(&adev->ip_top->die_kset.kobj);
- kobject_put(&adev->ip_top->kobj);
+ kobject_put(&ip_top->die_kset.kobj);
+ kobject_put(&ip_top->kobj);
}
/* ================================================== */
@@ -1323,6 +1369,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
+ uint8_t *discovery_bin;
struct ip_v4 *ip;
uint16_t die_offset;
uint16_t ip_offset;
@@ -1338,22 +1385,23 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
r = amdgpu_discovery_init(adev);
if (r)
return r;
-
+ discovery_bin = adev->discovery.bin;
wafl_ver = 0;
adev->gfx.xcc_mask = 0;
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
adev->jpeg.inst_mask = 0;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ bhdr = (struct binary_header *)discovery_bin;
+ ihdr = (struct ip_discovery_header
+ *)(discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ dhdr = (struct die_header *)(discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -1367,7 +1415,7 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ ip = (struct ip_v4 *)(discovery_bin + ip_offset);
inst = ip->instance_number;
hw_id = le16_to_cpu(ip->hw_id);
@@ -1517,16 +1565,16 @@ next_ip:
static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct ip_discovery_header *ihdr;
struct binary_header *bhdr;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
uint16_t offset, ihdr_ver;
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
- ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
- offset);
+ ihdr = (struct ip_discovery_header *)(discovery_bin + offset);
ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
@@ -1573,22 +1621,23 @@ union gc_info {
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union gc_info *gc_info;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[GC].offset);
if (!offset)
return 0;
- gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+ gc_info = (union gc_info *)(discovery_bin + offset);
switch (le16_to_cpu(gc_info->v1.header.version_major)) {
case 1:
@@ -1681,24 +1730,25 @@ union mall_info {
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
u16 offset;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
if (!offset)
return 0;
- mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+ mall_info = (union mall_info *)(discovery_bin + offset);
switch (le16_to_cpu(mall_info->v1.header.version_major)) {
case 1:
@@ -1737,12 +1787,13 @@ union vcn_info {
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct binary_header *bhdr;
union vcn_info *vcn_info;
u16 offset;
int v;
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
@@ -1757,13 +1808,13 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
if (!offset)
return 0;
- vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+ vcn_info = (union vcn_info *)(discovery_bin + offset);
switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
case 1:
@@ -1823,6 +1874,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
struct amdgpu_gmc_memrange **ranges,
int *range_cnt, bool refresh)
{
+ uint8_t *discovery_bin = adev->discovery.bin;
struct amdgpu_gmc_memrange *mem_ranges;
struct binary_header *bhdr;
union nps_info *nps_info;
@@ -1839,13 +1891,13 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
return r;
nps_info = &nps_data;
} else {
- if (!adev->mman.discovery_bin) {
+ if (!discovery_bin) {
dev_err(adev->dev,
"fetch mem range failed, ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ bhdr = (struct binary_header *)discovery_bin;
offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
if (!offset)
@@ -1855,8 +1907,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
return -ENOENT;
- nps_info =
- (union nps_info *)(adev->mman.discovery_bin + offset);
+ nps_info = (union nps_info *)(discovery_bin + offset);
}
switch (le16_to_cpu(nps_info->v1.header.version_major)) {
@@ -2124,7 +2175,6 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
case IP_VERSION(11, 0, 7):
- case IP_VERSION(11, 0, 8):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
@@ -2132,6 +2182,10 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(11, 5, 2):
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
break;
+ case IP_VERSION(11, 0, 8):
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
@@ -2356,6 +2410,21 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_version(adev, SDMA0_HWIP, 0));
return -EINVAL;
}
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_ras_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ amdgpu_device_ip_block_add(adev, &ras_v1_0_ip_block);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -2562,7 +2631,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
@@ -2589,7 +2660,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
@@ -2616,8 +2689,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega10_reg_base_init(adev);
adev->sdma.num_instances = 1;
+ adev->sdma.sdma_mask = 1;
adev->vcn.num_vcn_inst = 1;
adev->gmc.num_umc = 2;
+ adev->gfx.xcc_mask = 1;
if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
@@ -2662,7 +2737,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
vega20_reg_base_init(adev);
adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
@@ -2690,8 +2767,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
arct_reg_base_init(adev);
adev->sdma.num_instances = 8;
+ adev->sdma.sdma_mask = 0xff;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 8;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
@@ -2723,8 +2802,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_discovery_init(adev);
aldebaran_reg_base_init(adev);
adev->sdma.num_instances = 5;
+ adev->sdma.sdma_mask = 0x1f;
adev->vcn.num_vcn_inst = 2;
adev->gmc.num_umc = 4;
+ adev->gfx.xcc_mask = 1;
adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
@@ -2746,6 +2827,38 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ } else {
+ cyan_skillfish_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->sdma.sdma_mask = 3;
+ adev->gfx.xcc_mask = 1;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
+ }
+ break;
default:
r = amdgpu_discovery_reg_base_init(adev);
if (r) {
@@ -3092,6 +3205,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
if (r)
return r;
+ r = amdgpu_discovery_set_ras_ip_blocks(adev);
+ if (r)
+ return r;
+
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev)) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index b44d56465c5b..4ce04486cc31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -24,9 +24,21 @@
#ifndef __AMDGPU_DISCOVERY__
#define __AMDGPU_DISCOVERY__
+#include <linux/debugfs.h>
+
#define DISCOVERY_TMR_SIZE (10 << 10)
#define DISCOVERY_TMR_OFFSET (64 << 10)
+struct ip_discovery_top;
+
+struct amdgpu_discovery_info {
+ struct debugfs_blob_wrapper debugfs_blob;
+ struct ip_discovery_top *ip_top;
+ uint32_t size;
+ uint8_t *bin;
+ bool reserve_tmr;
+};
+
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 51bab32fd8c6..b5d34797d606 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -332,8 +332,6 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
if (crtc->enabled)
active = true;
- pm_runtime_mark_last_busy(dev->dev);
-
adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
* take the current one
@@ -1365,6 +1363,64 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
+/**
+ * DOC: property for adaptive backlight modulation
+ *
+ * The 'adaptive backlight modulation' property is used for the compositor to
+ * directly control the adaptive backlight modulation power savings feature
+ * that is part of DCN hardware.
+ *
+ * The property will be attached specifically to eDP panels that support it.
+ *
+ * The property is by default set to 'sysfs' to allow the sysfs file 'panel_power_savings'
+ * to be able to control it.
+ * If set to 'off' the compositor will ensure it stays off.
+ * The other values 'min', 'bias min', 'bias max', and 'max' will control the
+ * intensity of the power savings.
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+static int amdgpu_display_setup_abm_prop(struct amdgpu_device *adev)
+{
+ const struct drm_prop_enum_list props[] = {
+ { ABM_SYSFS_CONTROL, "sysfs" },
+ { ABM_LEVEL_OFF, "off" },
+ { ABM_LEVEL_MIN, "min" },
+ { ABM_LEVEL_BIAS_MIN, "bias min" },
+ { ABM_LEVEL_BIAS_MAX, "bias max" },
+ { ABM_LEVEL_MAX, "max" },
+ };
+ struct drm_property *prop;
+ int i;
+
+ if (!adev->dc_enabled)
+ return 0;
+
+ prop = drm_property_create(adev_to_drm(adev), DRM_MODE_PROP_ENUM,
+ "adaptive backlight modulation",
+ 6);
+ if (!prop)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ ret = drm_property_add_enum(prop, props[i].type,
+ props[i].name);
+
+ if (ret) {
+ drm_property_destroy(adev_to_drm(adev), prop);
+
+ return ret;
+ }
+ }
+
+ adev->mode_info.abm_level_property = prop;
+
+ return 0;
+}
+
int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
@@ -1411,7 +1467,7 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
"dither",
amdgpu_dither_enum_list, sz);
- return 0;
+ return amdgpu_display_setup_abm_prop(adev);
}
void amdgpu_display_update_priority(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
index 930c171473b4..49a29bf47a37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -55,4 +55,11 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev);
int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
struct drm_scanout_buffer *sb);
+#define ABM_SYSFS_CONTROL -1
+#define ABM_LEVEL_OFF 0
+#define ABM_LEVEL_MIN 1
+#define ABM_LEVEL_BIAS_MIN 2
+#define ABM_LEVEL_BIAS_MAX 3
+#define ABM_LEVEL_MAX 4
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ce27cb5bb05e..e22cfa7c6d32 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -81,13 +81,44 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct drm_gem_object *obj = dmabuf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ int r;
+
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
+
+ /*
+ * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+.
+ * Such buffers cannot be safely accessed over P2P due to device-local
+ * compression metadata. Fallback to system-memory path instead.
+ * Device supports GFX12 (GC 12.x or newer)
+ * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag
+ *
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) &&
+ bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC)
+ attach->peer2peer = false;
if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
attach->peer2peer = false;
+ r = dma_resv_lock(bo->tbo.base.resv, NULL);
+ if (r)
+ return r;
+
amdgpu_vm_bo_update_shared(bo);
+ dma_resv_unlock(bo->tbo.base.resv);
+
return 0;
}
@@ -343,11 +374,23 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->tbo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(gobj, flags);
if (!IS_ERR(buf))
buf->ops = &amdgpu_dmabuf_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 395c6be901ce..2dfbddcef9ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -144,7 +144,8 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
AMDGPU_DEBUG_SMU_POOL = BIT(7),
AMDGPU_DEBUG_VM_USERPTR = BIT(8),
- AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9)
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9),
+ AMDGPU_DEBUG_ENABLE_CE_CS = BIT(10)
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -311,7 +312,7 @@ module_param_named(moverate, amdgpu_moverate, int, 0600);
* DOC: audio (int)
* Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
*/
-MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
+MODULE_PARM_DESC(audio, "HDMI/DP Audio enable for non DC displays (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);
/**
@@ -353,22 +354,16 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint
* DOC: lockup_timeout (string)
* Set GPU scheduler timeout value in ms.
*
- * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
- * multiple values specified. 0 and negative values are invalidated. They will be adjusted
- * to the default timeout.
+ * The format can be [single value] for setting all timeouts at once or
+ * [GFX,Compute,SDMA,Video] to set individual timeouts.
+ * Negative values mean infinity.
*
- * - With one value specified, the setting will apply to all non-compute jobs.
- * - With multiple values specified, the first one will be for GFX.
- * The second one is for Compute. The third and fourth ones are
- * for SDMA and Video.
- *
- * By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
+ * By default(with no lockup_timeout settings), the timeout for all queues is 2000.
*/
MODULE_PARM_DESC(lockup_timeout,
- "GPU lockup timeout in ms (default: 10000 for all jobs. "
- "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
- "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
-module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
+ "GPU lockup timeout in ms (default: 2000. 0: keep default value. negative: infinity timeout), format: [single value for all] or [GFX,Compute,SDMA,Video].");
+module_param_string(lockup_timeout, amdgpu_lockup_timeout,
+ sizeof(amdgpu_lockup_timeout), 0444);
/**
* DOC: dpm (int)
@@ -623,39 +618,39 @@ module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
/**
* DOC: si_support (int)
- * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
- * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
- * otherwise using amdgpu driver.
- */
+ * 1 = enabled, 0 = disabled, -1 = default
+ *
+ * SI (Southern Islands) are first generation GCN GPUs, supported by both
+ * drivers: radeon (old) and amdgpu (new). This parameter controls whether
+ * amdgpu should support SI.
+ * By default, SI dedicated GPUs are supported by amdgpu.
+ * Only relevant when CONFIG_DRM_AMDGPU_SI is enabled to build SI support in amdgpu.
+ * See also radeon.si_support which should be disabled when amdgpu.si_support is
+ * enabled, and vice versa.
+ */
+int amdgpu_si_support = -1;
#ifdef CONFIG_DRM_AMDGPU_SI
-
-#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_si_support;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
-#else
-int amdgpu_si_support = 1;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
-#endif
-
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif
/**
* DOC: cik_support (int)
- * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
- * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
- * otherwise using amdgpu driver.
- */
+ * 1 = enabled, 0 = disabled, -1 = default
+ *
+ * CIK (Sea Islands) are second generation GCN GPUs, supported by both
+ * drivers: radeon (old) and amdgpu (new). This parameter controls whether
+ * amdgpu should support CIK.
+ * By default:
+ * - CIK dedicated GPUs are supported by amdgpu.
+ * - CIK APUs are supported by radeon (except when radeon is not built).
+ * Only relevant when CONFIG_DRM_AMDGPU_CIK is enabled to build CIK support in amdgpu.
+ * See also radeon.cik_support which should be disabled when amdgpu.cik_support is
+ * enabled, and vice versa.
+ */
+int amdgpu_cik_support = -1;
#ifdef CONFIG_DRM_AMDGPU_CIK
-
-#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
-int amdgpu_cik_support;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
-#else
-int amdgpu_cik_support = 1;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
-#endif
-
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
@@ -886,7 +881,7 @@ module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
* DOC: dcdebugmask (uint)
- * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
*/
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
@@ -960,7 +955,7 @@ module_param_named(tmz, amdgpu_tmz, int, 0444);
*/
MODULE_PARM_DESC(
freesync_video,
- "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+ "Adds additional modes via VRR for refresh changes without a full modeset (0 = off (default), 1 = on)");
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
/**
@@ -2172,6 +2167,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
/* CYAN_SKILLFISH */
+ {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
{0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
@@ -2228,7 +2228,6 @@ static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
adev->pdev->bus->number, i);
if (p) {
pm_runtime_get_sync(&p->dev);
- pm_runtime_mark_last_busy(&p->dev);
pm_runtime_put_autosuspend(&p->dev);
pci_dev_put(p);
}
@@ -2284,6 +2283,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: disable kernel logs of correctable errors\n");
adev->debug_disable_ce_logs = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_CE_CS) {
+ pr_info("debug: allowing command submission to CE engine\n");
+ adev->debug_enable_ce_cs = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
@@ -2302,6 +2306,72 @@ static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long fl
return flags;
}
+static bool amdgpu_support_enabled(struct device *dev,
+ const enum amd_asic_type family)
+{
+ const char *gen;
+ const char *param;
+ int module_param = -1;
+ bool radeon_support_built = IS_ENABLED(CONFIG_DRM_RADEON);
+ bool amdgpu_support_built = false;
+ bool support_by_default = false;
+
+ switch (family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ gen = "SI";
+ param = "si_support";
+ module_param = amdgpu_si_support;
+ amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_SI);
+ support_by_default = true;
+ break;
+
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ support_by_default = true;
+ fallthrough;
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ gen = "CIK";
+ param = "cik_support";
+ module_param = amdgpu_cik_support;
+ amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU_CIK);
+ break;
+
+ default:
+ /* All other chips are supported by amdgpu only */
+ return true;
+ }
+
+ if (!amdgpu_support_built) {
+ dev_info(dev, "amdgpu built without %s support\n", gen);
+ return false;
+ }
+
+ if ((module_param == -1 && (support_by_default || !radeon_support_built)) ||
+ module_param == 1) {
+ if (radeon_support_built)
+ dev_info(dev, "%s support provided by amdgpu.\n"
+ "Use radeon.%s=1 amdgpu.%s=0 to override.\n",
+ gen, param, param);
+
+ return true;
+ }
+
+ if (radeon_support_built)
+ dev_info(dev, "%s support provided by radeon.\n"
+ "Use radeon.%s=0 amdgpu.%s=1 to override.\n",
+ gen, param, param);
+ else if (module_param == 0)
+ dev_info(dev, "%s support disabled by module param\n", gen);
+
+ return false;
+}
+
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -2349,48 +2419,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENOTSUPP;
}
- switch (flags & AMD_ASIC_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
-#ifdef CONFIG_DRM_AMDGPU_SI
- if (!amdgpu_si_support) {
- dev_info(&pdev->dev,
- "SI support provided by radeon.\n");
- dev_info(&pdev->dev,
- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
- );
- return -ENODEV;
- }
- break;
-#else
- dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
+ if (!amdgpu_support_enabled(&pdev->dev, flags & AMD_ASIC_MASK))
return -ENODEV;
-#endif
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
-#ifdef CONFIG_DRM_AMDGPU_CIK
- if (!amdgpu_cik_support) {
- dev_info(&pdev->dev,
- "CIK support provided by radeon.\n");
- dev_info(&pdev->dev,
- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
- );
- return -ENODEV;
- }
- break;
-#else
- dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
- return -ENODEV;
-#endif
- default:
- break;
- }
adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
if (IS_ERR(adev))
@@ -2469,7 +2499,6 @@ retry_init:
pm_runtime_allow(ddev->dev);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
pci_wake_from_d3(pdev, TRUE);
@@ -2553,7 +2582,8 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
- amdgpu_device_ip_suspend(adev);
+ amdgpu_device_prepare(dev);
+ amdgpu_device_suspend(dev, true);
adev->mp1_state = PP_MP1_STATE_NONE;
}
@@ -2597,6 +2627,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
else if (amdgpu_acpi_is_s3_active(adev))
adev->in_s3 = true;
if (!adev->in_s0ix && !adev->in_s3) {
+#if IS_ENABLED(CONFIG_SUSPEND)
/* don't allow going deep first time followed by s2idle the next time */
if (adev->last_suspend_state != PM_SUSPEND_ON &&
adev->last_suspend_state != pm_suspend_target_state) {
@@ -2604,11 +2635,14 @@ static int amdgpu_pmops_suspend(struct device *dev)
pm_suspend_target_state);
return -EINVAL;
}
+#endif
return 0;
}
+#if IS_ENABLED(CONFIG_SUSPEND)
/* cache the state last used for suspend */
adev->last_suspend_state = pm_suspend_target_state;
+#endif
return amdgpu_device_suspend(drm_dev, true);
}
@@ -2617,9 +2651,14 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- if (amdgpu_acpi_should_gpu_reset(adev))
- return amdgpu_asic_reset(adev);
+ if (amdgpu_acpi_should_gpu_reset(adev)) {
+ amdgpu_device_lock_reset_domain(adev->reset_domain);
+ r = amdgpu_asic_reset(adev);
+ amdgpu_device_unlock_reset_domain(adev->reset_domain);
+ return r;
+ }
return 0;
}
@@ -2665,7 +2704,7 @@ static int amdgpu_pmops_thaw(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
/* do not resume device if it's normal hibernation */
- if (!pm_hibernate_is_recovering())
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
return 0;
return amdgpu_device_resume(drm_dev, true);
@@ -2762,22 +2801,8 @@ static int amdgpu_runtime_idle_check_userq(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
- struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0;
-
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- ret = -EBUSY;
- goto done;
- }
- }
-done:
- mutex_unlock(&adev->userq_mutex);
- return ret;
+ return xa_empty(&adev->userq_doorbell_xa) ? 0 : -EBUSY;
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
@@ -2924,7 +2949,6 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
ret = amdgpu_runtime_idle_check_userq(dev);
done:
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
return ret;
}
@@ -2933,11 +2957,14 @@ static int amdgpu_drm_release(struct inode *inode, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct drm_device *dev = file_priv->minor->dev;
+ int idx;
- if (fpriv) {
+ if (fpriv && drm_dev_enter(dev, &idx)) {
fpriv->evf_mgr.fd_closing = true;
amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ drm_dev_exit(idx);
}
return drm_release(inode, filp);
@@ -2957,22 +2984,21 @@ long amdgpu_drm_ioctl(struct file *filp,
ret = drm_ioctl(filp, cmd, arg);
- pm_runtime_mark_last_busy(dev->dev);
out:
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .prepare = amdgpu_pmops_prepare,
- .complete = amdgpu_pmops_complete,
- .suspend = amdgpu_pmops_suspend,
- .suspend_noirq = amdgpu_pmops_suspend_noirq,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
@@ -3044,6 +3070,7 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct drm_driver amdgpu_kms_driver = {
@@ -3117,7 +3144,7 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
.err_handler = &amdgpu_pci_err_handler,
.dev_groups = amdgpu_sysfs_groups,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
index 91d638098889..b349bb3676d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -70,6 +70,7 @@ void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
[AMDGPU_PL_GWS] = "gws",
[AMDGPU_PL_OA] = "oa",
[AMDGPU_PL_DOORBELL] = "doorbell",
+ [AMDGPU_PL_MMIO_REMAP] = "mmioremap",
};
unsigned int hw_ip, i;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 9e7506965cab..c7843e336310 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -45,16 +45,11 @@
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
-static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops ||
- __f->base.ops == &amdgpu_job_fence_ops)
- return __f;
-
- return NULL;
+ return __f;
}
/**
@@ -98,52 +93,32 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
* amdgpu_fence_emit - emit a fence on the requested ring
*
* @ring: ring the fence is associated with
- * @f: resulting fence object
* @af: amdgpu fence input
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
- struct amdgpu_fence *af, unsigned int flags)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
- struct amdgpu_fence *am_fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
- if (!af) {
- /* create a separate hw fence */
- am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
- if (!am_fence)
- return -ENOMEM;
- am_fence->context = 0;
- } else {
- am_fence = af;
- }
- fence = &am_fence->base;
- am_fence->ring = ring;
+ fence = &af->base;
+ af->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- am_fence->seq = seq;
- if (af) {
- dma_fence_init(fence, &amdgpu_job_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- /* Against remove in amdgpu_job_{free, free_cb} */
- dma_fence_get(fence);
- } else {
- dma_fence_init(fence, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx, seq);
- }
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
- amdgpu_fence_save_wptr(fence);
+ amdgpu_fence_save_wptr(af);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
@@ -168,8 +143,6 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
*/
rcu_assign_pointer(*ptr, dma_fence_get(fence));
- *f = fence;
-
return 0;
}
@@ -277,7 +250,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
drv->signalled_wptr = am_fence->wptr;
dma_fence_signal(fence);
dma_fence_put(fence);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
@@ -671,36 +643,6 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
- *
- * @ring: fence of the ring to be cleared
- *
- */
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
-{
- int i;
- struct dma_fence *old, **ptr;
-
- for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
- ptr = &ring->fence_drv.fences[i];
- old = rcu_dereference_protected(*ptr, 1);
- if (old && old->ops == &amdgpu_job_fence_ops) {
- struct amdgpu_job *job;
-
- /* For non-scheduler bad job, i.e. failed ib test, we need to signal
- * it right here or we won't be able to track them in fence_drv
- * and they will remain unsignaled during sa_bo free.
- */
- job = container_of(old, struct amdgpu_job, hw_fence.base);
- if (!job->base.s_fence && !dma_fence_is_signaled(old))
- dma_fence_signal(old);
- RCU_INIT_POINTER(*ptr, NULL);
- dma_fence_put(old);
- }
- }
-}
-
-/**
* amdgpu_fence_driver_set_error - set error code on fences
* @ring: the ring which contains the fences
* @error: the error code to set
@@ -738,7 +680,7 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
}
-/**
+/*
* Kernel queue reset handling
*
* The driver can reset individual queues for most engines, but those queues
@@ -756,21 +698,50 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
/**
* amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
*
- * @fence: fence of the ring to signal
+ * @af: fence of the ring to signal
*
*/
-void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af)
{
- dma_fence_set_error(&fence->base, -ETIME);
- amdgpu_fence_write(fence->ring, fence->seq);
- amdgpu_fence_process(fence->ring);
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ struct amdgpu_ring *ring = af->ring;
+ unsigned long flags;
+ u32 seq, last_seq;
+
+ last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
+ seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
+
+ /* mark all fences from the guilty context with an error */
+ spin_lock_irqsave(&ring->fence_drv.lock, flags);
+ do {
+ last_seq++;
+ last_seq &= ring->fence_drv.num_fences_mask;
+
+ ptr = &ring->fence_drv.fences[last_seq];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled_locked(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
+
+ if (fence == af)
+ dma_fence_set_error(&fence->base, -ETIME);
+ else if (fence->context == af->context)
+ dma_fence_set_error(&fence->base, -ECANCELED);
+ }
+ rcu_read_unlock();
+ } while (last_seq != seq);
+ spin_unlock_irqrestore(&ring->fence_drv.lock, flags);
+ /* signal the guilty fence */
+ amdgpu_fence_write(ring, (u32)af->base.seqno);
+ amdgpu_fence_process(ring);
}
-void amdgpu_fence_save_wptr(struct dma_fence *fence)
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af)
{
- struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
-
- am_fence->wptr = am_fence->ring->wptr;
+ af->wptr = af->ring->wptr;
}
static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
@@ -791,14 +762,19 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
struct dma_fence *unprocessed;
struct dma_fence __rcu **ptr;
struct amdgpu_fence *fence;
- u64 wptr, i, seqno;
+ u64 wptr;
+ u32 seq, last_seq;
- seqno = amdgpu_fence_read(ring);
+ last_seq = amdgpu_fence_read(ring) & ring->fence_drv.num_fences_mask;
+ seq = ring->fence_drv.sync_seq & ring->fence_drv.num_fences_mask;
wptr = ring->fence_drv.signalled_wptr;
ring->ring_backup_entries_to_copy = 0;
- for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
- ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
+ do {
+ last_seq++;
+ last_seq &= ring->fence_drv.num_fences_mask;
+
+ ptr = &ring->fence_drv.fences[last_seq];
rcu_read_lock();
unprocessed = rcu_dereference(*ptr);
@@ -814,7 +790,7 @@ void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
wptr = fence->wptr;
}
rcu_read_unlock();
- }
+ } while (last_seq != seq);
}
/*
@@ -831,13 +807,6 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
return (const char *)to_amdgpu_fence(f)->ring->name;
}
-static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
-
- return (const char *)to_amdgpu_ring(job->base.sched)->name;
-}
-
/**
* amdgpu_fence_enable_signaling - enable signalling on fence
* @f: fence
@@ -855,23 +824,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
}
/**
- * amdgpu_job_fence_enable_signaling - enable signalling on job fence
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_enable_signaling above, it
- * only handles the job embedded fence.
- */
-static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
-{
- struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
-
- if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
-
- return true;
-}
-
-/**
* amdgpu_fence_free - free up the fence memory
*
* @rcu: RCU callback head
@@ -887,21 +839,6 @@ static void amdgpu_fence_free(struct rcu_head *rcu)
}
/**
- * amdgpu_job_fence_free - free up the job with embedded fence
- *
- * @rcu: RCU callback head
- *
- * Free up the job with embedded fence after the RCU grace period.
- */
-static void amdgpu_job_fence_free(struct rcu_head *rcu)
-{
- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
-
- /* free job if fence has a parent job */
- kfree(container_of(f, struct amdgpu_job, hw_fence.base));
-}
-
-/**
* amdgpu_fence_release - callback that fence can be freed
*
* @f: fence
@@ -914,19 +851,6 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
-/**
- * amdgpu_job_fence_release - callback that job embedded fence can be freed
- *
- * @f: fence
- *
- * This is the simliar function with amdgpu_fence_release above, it
- * only handles the job embedded fence.
- */
-static void amdgpu_job_fence_release(struct dma_fence *f)
-{
- call_rcu(&f->rcu, amdgpu_job_fence_free);
-}
-
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
@@ -934,13 +858,6 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
-static const struct dma_fence_ops amdgpu_job_fence_ops = {
- .get_driver_name = amdgpu_fence_get_driver_name,
- .get_timeline_name = amdgpu_job_fence_get_timeline_name,
- .enable_signaling = amdgpu_job_fence_enable_signaling,
- .release = amdgpu_job_fence_release,
-};
-
/*
* Fence debugfs
*/
@@ -1010,7 +927,6 @@ static int gpu_recover_get(void *data, u64 *val)
*val = atomic_read(&adev->reset_domain->reset_res);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index b2033f8352f5..d2237ce9da70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -302,7 +302,6 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
{
unsigned t;
- unsigned p;
int i, j;
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
@@ -316,8 +315,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- for (i = 0; i < pages; i++, p++) {
+ for (i = 0; i < pages; i++) {
page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
@@ -370,6 +368,42 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
}
/**
+ * amdgpu_gart_map_vram_range - map VRAM pages into the GART page table
+ *
+ * @adev: amdgpu_device pointer
+ * @pa: physical address of the first page to be mapped
+ * @start_page: first page to map in the GART aperture
+ * @num_pages: number of pages to be mapped
+ * @flags: page table entry flags
+ * @dst: CPU address of the GART table
+ *
+ * Binds a BO that is allocated in VRAM to the GART page table
+ * (all ASICs).
+ *
+ * Useful when a kernel BO is located in VRAM but
+ * needs to be accessed from the GART address space.
+ */
+void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+ uint64_t start_page, uint64_t num_pages,
+ uint64_t flags, void *dst)
+{
+ u32 i, idx;
+
+ /* The SYSTEM flag indicates the pages aren't in VRAM. */
+ WARN_ON_ONCE(flags & AMDGPU_PTE_SYSTEM);
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
+
+ for (i = 0; i < num_pages; ++i) {
+ amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ start_page + i, pa + AMDGPU_GPU_PAGE_SIZE * i, flags);
+ }
+
+ drm_dev_exit(idx);
+}
+
+/**
* amdgpu_gart_bind - bind pages into the gart page table
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index 7cc980bf4725..d3118275ddae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -64,5 +64,8 @@ void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
void *dst);
void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_map_vram_range(struct amdgpu_device *adev, uint64_t pa,
+ uint64_t start_page, uint64_t num_pages,
+ uint64_t flags, void *dst);
void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d1ccbfcf21fa..3e38c5db2987 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -198,7 +198,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
amdgpu_hmm_unregister(aobj);
- ttm_bo_put(&aobj->tbo);
+ ttm_bo_fini(&aobj->tbo);
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
@@ -443,15 +443,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
int r;
/* reject invalid gem flags */
- if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
- AMDGPU_GEM_CREATE_ENCRYPTED |
- AMDGPU_GEM_CREATE_GFX12_DCC |
- AMDGPU_GEM_CREATE_DISCARDABLE))
+ if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK)
return -EINVAL;
/* reject invalid gem domains */
@@ -466,6 +458,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* always clear VRAM */
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+ if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ return -EINVAL;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -536,7 +531,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_userptr *args = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_gem_object *gobj;
- struct hmm_range *range;
+ struct amdgpu_hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
@@ -577,15 +572,20 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
- &range);
- if (r)
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (unlikely(!range))
+ return -ENOMEM;
+ r = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (r) {
+ amdgpu_hmm_range_free(range);
goto release_object;
-
+ }
r = amdgpu_bo_reserve(bo, true);
if (r)
goto user_pages_done;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
@@ -601,8 +601,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
-
+ amdgpu_hmm_range_free(range);
release_object:
drm_gem_object_put(gobj);
@@ -791,36 +790,6 @@ error:
return fence;
}
-/**
- * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
- *
- * @adev: amdgpu_device pointer
- * @flags: GEM UAPI flags
- *
- * Returns the GEM UAPI flags mapped into hardware for the ASIC.
- */
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
-{
- uint64_t pte_flag = 0;
-
- if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
- pte_flag |= AMDGPU_PTE_EXECUTABLE;
- if (flags & AMDGPU_VM_PAGE_READABLE)
- pte_flag |= AMDGPU_PTE_READABLE;
- if (flags & AMDGPU_VM_PAGE_WRITEABLE)
- pte_flag |= AMDGPU_PTE_WRITEABLE;
- if (flags & AMDGPU_VM_PAGE_PRT)
- pte_flag |= AMDGPU_PTE_PRT_FLAG(adev);
- if (flags & AMDGPU_VM_PAGE_NOALLOC)
- pte_flag |= AMDGPU_PTE_NOALLOC;
-
- if (adev->gmc.gmc_funcs->map_mtype)
- pte_flag |= amdgpu_gmc_map_mtype(adev,
- flags & AMDGPU_VM_MTYPE_MASK);
-
- return pte_flag;
-}
-
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -841,7 +810,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct dma_fence_chain *timeline_chain = NULL;
struct dma_fence *fence;
struct drm_exec exec;
- uint64_t va_flags;
uint64_t vm_size;
int r = 0;
@@ -945,10 +913,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
case AMDGPU_VA_OP_UNMAP:
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
@@ -960,10 +927,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
default:
break;
@@ -997,17 +963,34 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
+ struct drm_exec exec;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
+ if (args->padding)
+ return -EINVAL;
+
gobj = drm_gem_object_lookup(filp, args->handle);
if (!gobj)
return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
- r = amdgpu_bo_reserve(robj, false);
- if (unlikely(r))
- goto out;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+
+ if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+ }
+ }
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
@@ -1018,7 +1001,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT;
break;
@@ -1027,20 +1010,17 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (drm_gem_is_imported(&robj->tbo.base) &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- goto out;
+ goto out_exec;
}
@@ -1053,17 +1033,146 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(robj, true);
+ drm_exec_fini(&exec);
+ break;
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
+ struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
+ struct drm_amdgpu_gem_vm_entry *vm_entries;
+ struct amdgpu_bo_va_mapping *mapping;
+ int num_mappings = 0;
+ /*
+ * num_entries is set as an input to the size of the user-allocated array of
+ * drm_amdgpu_gem_vm_entry stored at args->value.
+ * num_entries is sent back as output as the number of mappings the bo has.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ */
+ vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
+ if (!vm_entries)
+ return -ENOMEM;
+
+ amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
+
+ if (num_mappings > 0 && num_mappings <= args->num_entries)
+ if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries)))
+ r = -EFAULT;
+
+ args->num_entries = num_mappings;
+
+ kvfree(vm_entries);
break;
+ }
default:
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
r = -EINVAL;
}
-out:
drm_gem_object_put(gobj);
return r;
+out_exec:
+ drm_exec_fini(&exec);
+ drm_gem_object_put(gobj);
+ return r;
+}
+
+/**
+ * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_gem_list_handles
+ * @filp: drm file pointer
+ *
+ * num_entries is set as an input to the size of the entries array.
+ * num_entries is sent back as output as the number of bos in the process.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_list_handles *args = data;
+ struct drm_amdgpu_gem_list_handles_entry *bo_entries;
+ struct drm_gem_object *gobj;
+ int id, ret = 0;
+ int bo_index = 0;
+ int num_bos = 0;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id)
+ num_bos += 1;
+ spin_unlock(&filp->table_lock);
+
+ if (args->num_entries < num_bos) {
+ args->num_entries = num_bos;
+ return 0;
+ }
+
+ if (num_bos == 0) {
+ args->num_entries = 0;
+ return 0;
+ }
+
+ bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
+ if (!bo_entries)
+ return -ENOMEM;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct drm_amdgpu_gem_list_handles_entry *bo_entry;
+
+ if (bo_index >= num_bos) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ bo_entry = &bo_entries[bo_index];
+
+ bo_entry->size = amdgpu_bo_size(bo);
+ bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
+ bo_entry->preferred_domains = bo->preferred_domains;
+ bo_entry->gem_handle = id;
+ bo_entry->alignment = bo->tbo.page_alignment;
+
+ if (bo->tbo.base.import_attach)
+ bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
+
+ bo_index += 1;
+ }
+ spin_unlock(&filp->table_lock);
+
+ args->num_entries = bo_index;
+
+ if (!ret)
+ if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)))
+ ret = -EFAULT;
+
+ kvfree(bo_entries);
+
+ return ret;
}
static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index 3a8f57900a3a..b558336bc4c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -63,13 +63,28 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags);
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+#define AMDGPU_GEM_CREATE_SETTABLE_MASK (AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS | \
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC | \
+ AMDGPU_GEM_CREATE_VRAM_CLEARED | \
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | \
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC | \
+ AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | \
+ AMDGPU_GEM_CREATE_ENCRYPTED | \
+ AMDGPU_GEM_CREATE_GFX12_DCC | \
+ AMDGPU_GEM_CREATE_DISCARDABLE | \
+ AMDGPU_GEM_CREATE_COHERENT | \
+ AMDGPU_GEM_CREATE_UNCACHED | \
+ AMDGPU_GEM_CREATE_EXT_COHERENT)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c80c8f543532..8b118c53f351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -33,6 +33,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_xcp.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_mes.h"
#include "nvd.h"
/* delay 0.1 second to enable gfx off feature */
@@ -1102,6 +1103,9 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_read;
+
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
@@ -1171,6 +1175,8 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint3
might_sleep();
while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_write;
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
@@ -1189,6 +1195,75 @@ failed_kiq_write:
dev_err(adev->dev, "failed to write reg:%x\n", reg);
}
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (adev->enable_mes_kiq && adev->mes.ring[0].sched.ready)
+ return amdgpu_mes_hdp_flush(adev);
+
+ if (!ring->funcs->emit_hdp_flush) {
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
+ amdgpu_ring_emit_hdp_flush(ring);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ goto failed_kiq_hdp_flush;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_hdp_flush;
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
+ dev_err(adev->dev, "failed to flush HDP via KIQ timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_hdp_flush:
+ dev_err(adev->dev, "failed to flush HDP via KIQ\n");
+ return r < 0 ? r : -EIO;
+}
+
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
{
if (amdgpu_num_kcq == -1) {
@@ -1474,7 +1549,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
owner = (void *)(unsigned long)atomic_inc_return(&counter);
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
- 64, 0, &job);
+ 64, 0, &job,
+ AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
if (r)
goto err;
@@ -1594,7 +1670,6 @@ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
ret = amdgpu_gfx_run_cleaner_shader(adev, value);
- pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
if (ret)
@@ -2279,7 +2354,7 @@ void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
* Return:
* return the latest index.
*/
-u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
{
u32 count = 0;
@@ -2303,7 +2378,7 @@ u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer)
* Return:
* return the latest index.
*/
-u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count)
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
{
const struct cs_section_def *sect = NULL;
const struct cs_extent_def *ext = NULL;
@@ -2330,7 +2405,7 @@ u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer,
* @buffer: This is an output variable that gets the PACKET3 preamble end.
* @count: Index to start set the preemble end.
*/
-void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count)
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
{
buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
@@ -2479,3 +2554,4 @@ void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
&amdgpu_debugfs_compute_sched_mask_fops);
#endif
}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 08f268dab8f5..efd61a1ccc66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -615,6 +615,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
+int amdgpu_kiq_hdp_flush(struct amdgpu_device *adev);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
@@ -642,9 +643,9 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
-u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer);
-u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count);
-void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count);
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count);
void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 97b562a79ea8..869bceb0fe2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -597,6 +597,9 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
/* reserve engine 5 for firmware */
if (adev->enable_mes)
vm_inv_engs[i] &= ~(1 << 5);
+ /* reserve engine 6 for uni mes */
+ if (adev->enable_uni_mes)
+ vm_inv_engs[i] &= ~(1 << 6);
/* reserve mmhub engine 3 for firmware */
if (adev->enable_umsch_mm)
vm_inv_engs[i] &= ~(1 << 3);
@@ -690,7 +693,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
if (r)
goto error_alloc;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 397c6ccdb903..727342689d4b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -86,6 +86,11 @@ enum amdgpu_memory_partition {
#define AMDGPU_MAX_MEM_RANGES 8
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY 0x80
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_READ 0x40
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20
+#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10
+
/*
* GMC page fault information
*/
@@ -154,15 +159,15 @@ struct amdgpu_gmc_funcs {
unsigned pasid);
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* map mtype to hardware flags */
- uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
- /* get the pte flags to use for a BO VA mapping */
+ /* get the pte flags to use for PTEs */
void (*get_vm_pte)(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t *flags);
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
+ uint64_t *pte_flags);
/* override per-page pte flags */
void (*override_vm_pte_flags)(struct amdgpu_device *dev,
struct amdgpu_vm *vm,
@@ -356,9 +361,10 @@ struct amdgpu_gmc {
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
-#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
+ ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
+ (pte_flags)))
#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
(adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 0760e70402ec..895c1e4c6747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -284,6 +284,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+ start += amdgpu_vce_required_gart_pages(adev);
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
index 6e02fb9ac2f6..5a60d69a3e1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -66,3 +66,19 @@ void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
0);
}
}
+
+void amdgpu_hdp_invalidate(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->asic_funcs && adev->asic_funcs->invalidate_hdp)
+ adev->asic_funcs->invalidate_hdp(adev, ring);
+ else if (adev->hdp.funcs && adev->hdp.funcs->invalidate_hdp)
+ adev->hdp.funcs->invalidate_hdp(adev, ring);
+}
+
+void amdgpu_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->asic_funcs && adev->asic_funcs->flush_hdp)
+ adev->asic_funcs->flush_hdp(adev, ring);
+ else if (adev->hdp.funcs && adev->hdp.funcs->flush_hdp)
+ adev->hdp.funcs->flush_hdp(adev, ring);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
index 4cfd932b7e91..d9f488fa76b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -46,4 +46,8 @@ struct amdgpu_hdp {
int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+void amdgpu_hdp_invalidate(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_hdp_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index e36fede7f74c..90d26d820bac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -167,19 +167,14 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
- struct hmm_range **phmm_range)
+ void *owner,
+ struct amdgpu_hmm_range *range)
{
- struct hmm_range *hmm_range;
unsigned long end;
unsigned long timeout;
- unsigned long i;
unsigned long *pfns;
int r = 0;
-
- hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
- if (unlikely(!hmm_range))
- return -ENOMEM;
+ struct hmm_range *hmm_range = &range->hmm_range;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
if (unlikely(!pfns)) {
@@ -222,36 +217,77 @@ retry:
hmm_range->start = start;
hmm_range->hmm_pfns = pfns;
- /*
- * Due to default_flags, all pages are HMM_PFN_VALID or
- * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
- * the notifier_lock, and mmu_interval_read_retry() must be done first.
- */
- for (i = 0; pages && i < npages; i++)
- pages[i] = hmm_pfn_to_page(pfns[i]);
-
- *phmm_range = hmm_range;
-
return 0;
out_free_pfns:
kvfree(pfns);
+ hmm_range->hmm_pfns = NULL;
out_free_range:
- kfree(hmm_range);
-
if (r == -EBUSY)
r = -EAGAIN;
return r;
}
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+/**
+ * amdgpu_hmm_range_valid - check if an HMM range is still valid
+ * @range: pointer to the &struct amdgpu_hmm_range to validate
+ *
+ * Determines whether the given HMM range @range is still valid by
+ * checking for invalidations via the MMU notifier sequence. This is
+ * typically used to verify that the range has not been invalidated
+ * by concurrent address space updates before it is accessed.
+ *
+ * Return:
+ * * true if @range is valid and can be used safely
+ * * false if @range is NULL or has been invalidated
+ */
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
{
- bool r;
+ if (!range)
+ return false;
- r = mmu_interval_read_retry(hmm_range->notifier,
- hmm_range->notifier_seq);
- kvfree(hmm_range->hmm_pfns);
- kfree(hmm_range);
+ return !mmu_interval_read_retry(range->hmm_range.notifier,
+ range->hmm_range.notifier_seq);
+}
- return r;
+/**
+ * amdgpu_hmm_range_alloc - allocate and initialize an AMDGPU HMM range
+ * @bo: optional buffer object to associate with this HMM range
+ *
+ * Allocates memory for amdgpu_hmm_range and associates it with the @bo passed.
+ * The reference count of the @bo is incremented.
+ *
+ * Return:
+ * Pointer to a newly allocated struct amdgpu_hmm_range on success,
+ * or NULL if memory allocation fails.
+ */
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ struct amdgpu_hmm_range *range;
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return NULL;
+
+ range->bo = amdgpu_bo_ref(bo);
+ return range;
+}
+
+/**
+ * amdgpu_hmm_range_free - release an AMDGPU HMM range
+ * @range: pointer to the range object to free
+ *
+ * Releases all resources held by @range, including the associated
+ * hmm_pfns and the dropping reference of associated bo if any.
+ *
+ * Return: void
+ */
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range)
+{
+ if (!range)
+ return;
+
+ kvfree(range->hmm_range.hmm_pfns);
+ amdgpu_bo_unref(&range->bo);
+ kfree(range);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
index e2edcd010ccc..140bc9cd57b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -31,13 +31,20 @@
#include <linux/interval_tree.h>
#include <linux/mmu_notifier.h>
+struct amdgpu_hmm_range {
+ struct hmm_range hmm_range;
+ struct amdgpu_bo *bo;
+};
+
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
uint64_t start, uint64_t npages, bool readonly,
- void *owner, struct page **pages,
- struct hmm_range **phmm_range);
-bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+ void *owner,
+ struct amdgpu_hmm_range *range);
#if defined(CONFIG_HMM_MIRROR)
+bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range);
+struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo);
+void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range);
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
#else
@@ -47,7 +54,20 @@ static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
"add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
return -ENODEV;
}
+
static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
+
+static inline bool amdgpu_hmm_range_valid(struct amdgpu_hmm_range *range)
+{
+ return false;
+}
+
+static inline struct amdgpu_hmm_range *amdgpu_hmm_range_alloc(struct amdgpu_bo *bo)
+{
+ return NULL;
+}
+
+static inline void amdgpu_hmm_range_free(struct amdgpu_hmm_range *range) {}
#endif
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index 57101d24422f..9cb72f0c5277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -184,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter);
if (ret)
goto out_free;
} else {
@@ -215,15 +215,6 @@ out_free:
}
-void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- WARN_ON(i2c->has_aux);
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
-
void amdgpu_i2c_init(struct amdgpu_device *adev)
{
if (!adev->is_atom_fw) {
@@ -248,12 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (adev->i2c_bus[i]) {
- amdgpu_i2c_destroy(adev->i2c_bus[i]);
+ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++)
+ if (adev->i2c_bus[i])
adev->i2c_bus[i] = NULL;
- }
- }
}
/* looks up bus based on id */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7d9bcb72e8dd..586a58facca1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -149,17 +149,19 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
if (job) {
vm = job->vm;
fence_ctx = job->base.s_fence ?
- job->base.s_fence->scheduled.context : 0;
+ job->base.s_fence->finished.context : 0;
shadow_va = job->shadow_va;
csa_va = job->csa_va;
gds_va = job->gds_va;
init_shadow = job->init_shadow;
- af = &job->hw_fence;
+ af = job->hw_fence;
/* Save the context of the job for reset handling.
* The driver needs this so it can skip the ring
* contents for guilty contexts.
*/
- af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
+ af->context = fence_ctx;
+ /* the vm fence is also part of the job's context */
+ job->hw_vm_fence->context = fence_ctx;
} else {
vm = NULL;
fence_ctx = 0;
@@ -167,23 +169,28 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
csa_va = 0;
gds_va = 0;
init_shadow = false;
- af = NULL;
+ af = kzalloc(sizeof(*af), GFP_ATOMIC);
+ if (!af)
+ return -ENOMEM;
}
if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
(!ring->funcs->secure_submission_supported)) {
dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
- return -EINVAL;
+ r = -EINVAL;
+ goto free_fence;
}
alloc_size = ring->funcs->emit_frame_size + num_ibs *
@@ -192,7 +199,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
r = amdgpu_ring_alloc(ring, alloc_size);
if (r) {
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
- return r;
+ goto free_fence;
}
need_ctx_switch = ring->current_ctx != fence_ctx;
@@ -289,7 +296,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
}
- r = amdgpu_fence_emit(ring, f, af, fence_flags);
+ r = amdgpu_fence_emit(ring, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)
@@ -297,6 +304,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
amdgpu_ring_undo(ring);
return r;
}
+ *f = &af->base;
+ /* get a ref for the job */
+ if (job)
+ dma_fence_get(*f);
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
@@ -317,12 +328,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
* fence so we know what rings contents to backup
* after we reset the queue.
*/
- amdgpu_fence_save_wptr(*f);
+ amdgpu_fence_save_wptr(af);
amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
return 0;
+
+free_fence:
+ if (!job)
+ kfree(af);
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 5dd78a9cb12d..9cab36322c16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -201,58 +201,34 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct dma_fence **fences;
- unsigned i;
+ /* If anybody is waiting for a VMID let everybody wait for fairness */
if (!dma_fence_is_signaled(ring->vmid_wait)) {
*fence = dma_fence_get(ring->vmid_wait);
return 0;
}
- fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
- if (!fences)
- return -ENOMEM;
-
/* Check if we have an idle VMID */
- i = 0;
- list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+ list_for_each_entry_reverse((*idle), &id_mgr->ids_lru, list) {
/* Don't use per engine and per process VMID at the same time */
struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
NULL : ring;
- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
- if (!fences[i])
- break;
- ++i;
+ *fence = amdgpu_sync_peek_fence(&(*idle)->active, r);
+ if (!(*fence))
+ return 0;
}
- /* If we can't find a idle VMID to use, wait till one becomes available */
- if (&(*idle)->list == &id_mgr->ids_lru) {
- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct dma_fence_array *array;
- unsigned j;
-
- *idle = NULL;
- for (j = 0; j < i; ++j)
- dma_fence_get(fences[j]);
-
- array = dma_fence_array_create(i, fences, fence_context,
- seqno, true);
- if (!array) {
- for (j = 0; j < i; ++j)
- dma_fence_put(fences[j]);
- kfree(fences);
- return -ENOMEM;
- }
-
- *fence = dma_fence_get(&array->base);
- dma_fence_put(ring->vmid_wait);
- ring->vmid_wait = &array->base;
- return 0;
- }
- kfree(fences);
+ /*
+ * If we can't find a idle VMID to use, wait on a fence from the least
+ * recently used in the hope that it will be available soon.
+ */
+ *idle = NULL;
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = dma_fence_get(*fence);
+ /* This is the reference we return */
+ dma_fence_get(*fence);
return 0;
}
@@ -275,13 +251,12 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->vm_hub;
- struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
bool needs_flush = vm->use_cpu_for_update;
uint64_t updates = amdgpu_vm_tlb_seq(vm);
int r;
- *id = id_mgr->reserved;
+ *id = vm->reserved_vmid[vmhub];
if ((*id)->owner != vm->immediate.fence_context ||
!amdgpu_vmid_compatible(*id, job) ||
(*id)->flushed_updates < updates ||
@@ -314,7 +289,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
* user of the VMID.
*/
r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
return r;
@@ -374,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
*/
r = amdgpu_sync_fence(&(*id)->active,
&job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
return r;
@@ -427,7 +402,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(&id->active,
&job->base.s_fence->finished,
- GFP_NOWAIT);
+ GFP_ATOMIC);
if (r)
goto error;
@@ -474,40 +449,61 @@ bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
return vm->reserved_vmid[vmhub];
}
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
+ * @adev: amdgpu device structure
+ * @vm: the VM to reserve an ID for
+ * @vmhub: the VMHUB which should be used
+ *
+ * Mostly used to have a reserved VMID for debugging and SPM.
+ *
+ * Returns: 0 for success, -ENOENT if an ID is already reserved.
+ */
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
+ int r = 0;
mutex_lock(&id_mgr->lock);
-
- ++id_mgr->reserved_use_count;
- if (!id_mgr->reserved) {
- struct amdgpu_vmid *id;
-
- id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid,
- list);
- /* Remove from normal round robin handling */
- list_del_init(&id->list);
- id_mgr->reserved = id;
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (id_mgr->reserved_vmid) {
+ r = -ENOENT;
+ goto unlock;
}
-
+ /* Remove from normal round robin handling */
+ id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&id->list);
+ vm->reserved_vmid[vmhub] = id;
+ id_mgr->reserved_vmid = true;
mutex_unlock(&id_mgr->lock);
+
return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
}
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
+/*
+ * amdgpu_vmid_free_reserved - free up a reserved VMID again
+ * @adev: amdgpu device structure
+ * @vm: the VM with the reserved ID
+ * @vmhub: the VMHUB which should be used
+ */
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned vmhub)
{
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock);
- if (!--id_mgr->reserved_use_count) {
- /* give the reserved ID back to normal round robin */
- list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
- id_mgr->reserved = NULL;
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ id_mgr->reserved_vmid = false;
}
-
mutex_unlock(&id_mgr->lock);
}
@@ -574,7 +570,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
mutex_init(&id_mgr->lock);
INIT_LIST_HEAD(&id_mgr->ids_lru);
- id_mgr->reserved_use_count = 0;
/* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
@@ -594,11 +589,6 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
}
}
- /* alloc a default reserved vmid to enforce isolation */
- for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
- if (adev->enforce_isolation[i] != AMDGPU_ENFORCE_ISOLATION_DISABLE)
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
index 240fa6751260..b3649cd3af56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -67,8 +67,7 @@ struct amdgpu_vmid_mgr {
unsigned num_ids;
struct list_head ids_lru;
struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
- struct amdgpu_vmid *reserved;
- unsigned int reserved_use_count;
+ bool reserved_vmid;
};
int amdgpu_pasid_alloc(unsigned int bits);
@@ -79,10 +78,10 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id);
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
-int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
-void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
- unsigned vmhub);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 7f7ea046e209..f58b6be7fccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -56,14 +56,14 @@ struct amdgpu_ih_ring {
bool use_bus_addr;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
uint64_t gpu_addr;
uint64_t wptr_addr;
- volatile uint32_t *wptr_cpu;
+ uint32_t *wptr_cpu;
uint64_t rptr_addr;
- volatile uint32_t *rptr_cpu;
+ uint32_t *rptr_cpu;
bool enabled;
unsigned rptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
index 9cddbf50442a..37270c4dab8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -280,6 +280,8 @@ int isp_kernel_buffer_alloc(struct device *dev, u64 size,
if (ret)
return ret;
+ /* Ensure *bo is NULL so a new BO will be created */
+ *bo = NULL;
ret = amdgpu_bo_create_kernel(adev,
size,
ISP_MC_ADDR_ALIGN,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 9b1c55115921..0a0dcbf0798d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -130,14 +130,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
}
/* attempt a per ring reset */
- if (unlikely(adev->debug_disable_gpu_ring_reset)) {
- dev_err(adev->dev, "Ring reset disabled by debug mask\n");
- } else if (amdgpu_gpu_recovery &&
- amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
- ring->funcs->reset) {
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
dev_err(adev->dev, "Starting %s ring reset\n",
s_job->sched->name);
- r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
+ r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
if (!r) {
atomic_inc(&ring->adev->gpu_reset_counter);
dev_err(adev->dev, "Ring %s reset succeeded\n",
@@ -186,6 +184,9 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned int num_ibs, struct amdgpu_job **job,
u64 drm_client_id)
{
+ struct amdgpu_fence *af;
+ int r;
+
if (num_ibs == 0)
return -EINVAL;
@@ -193,6 +194,20 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!*job)
return -ENOMEM;
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_job;
+ }
+ (*job)->hw_fence = af;
+
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_fence;
+ }
+ (*job)->hw_vm_fence = af;
+
(*job)->vm = vm;
amdgpu_sync_create(&(*job)->explicit_sync);
@@ -204,16 +219,25 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return drm_sched_job_init(&(*job)->base, entity, 1, owner,
drm_client_id);
+
+err_fence:
+ kfree((*job)->hw_fence);
+err_job:
+ kfree(*job);
+ *job = NULL;
+
+ return r;
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job)
+ struct amdgpu_job **job, u64 k_job_id)
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+ k_job_id);
if (r)
return r;
@@ -222,7 +246,10 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
if (r) {
if (entity)
drm_sched_job_cleanup(&(*job)->base);
+ kfree((*job)->hw_vm_fence);
+ kfree((*job)->hw_fence);
kfree(*job);
+ *job = NULL;
}
return r;
@@ -250,11 +277,11 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
struct dma_fence *f;
unsigned i;
- /* Check if any fences where initialized */
+ /* Check if any fences were initialized */
if (job->base.s_fence && job->base.s_fence->finished.ops)
f = &job->base.s_fence->finished;
- else if (job->hw_fence.base.ops)
- f = &job->hw_fence.base;
+ else if (job->hw_fence && job->hw_fence->base.ops)
+ f = &job->hw_fence->base;
else
f = NULL;
@@ -270,11 +297,16 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
amdgpu_sync_free(&job->explicit_sync);
- /* only put the hw fence if has embedded fence */
- if (!job->hw_fence.base.ops)
- kfree(job);
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
else
- dma_fence_put(&job->hw_fence.base);
+ kfree(job->hw_vm_fence);
+
+ kfree(job);
}
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
@@ -303,10 +335,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
if (job->gang_submit != &job->base.s_fence->scheduled)
dma_fence_put(job->gang_submit);
- if (!job->hw_fence.base.ops)
- kfree(job);
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
else
- dma_fence_put(&job->hw_fence.base);
+ kfree(job->hw_vm_fence);
+
+ kfree(job);
}
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 2f302266662b..7abf069d17d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -44,11 +44,28 @@
struct amdgpu_fence;
enum amdgpu_ib_pool_type;
+/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL)
+#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL)
+#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL)
+#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL)
+#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL)
+
struct amdgpu_job {
struct drm_sched_job base;
struct amdgpu_vm *vm;
struct amdgpu_sync explicit_sync;
- struct amdgpu_fence hw_fence;
+ struct amdgpu_fence *hw_fence;
+ struct amdgpu_fence *hw_vm_fence;
struct dma_fence *gang_submit;
uint32_t preamble_status;
uint32_t preemption_status;
@@ -96,7 +113,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
- struct amdgpu_job **job);
+ struct amdgpu_job **job,
+ u64 k_job_id);
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
void amdgpu_job_free_resources(struct amdgpu_job *job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 82d58ac7afb0..63ee6ba6a931 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -121,10 +121,12 @@ static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
}
- if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt))
+ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) {
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
AMD_PG_STATE_GATE);
- else
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+ } else
schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
}
@@ -194,7 +196,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -368,7 +371,7 @@ static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
ring = &adev->jpeg.inst[i].ring_dec[j];
- if (val & (1 << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ if (val & (BIT_ULL((i * adev->jpeg.num_jpeg_rings) + j)))
ring->sched.ready = true;
else
ring->sched.ready = false;
@@ -537,3 +540,68 @@ void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_pri
drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
}
}
+
+static inline bool amdgpu_jpeg_reg_valid(u32 reg)
+{
+ if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
+ (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
+ return false;
+ else
+ return true;
+}
+
+/**
+ * amdgpu_jpeg_dec_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index 4f0775e39b54..346ae0ab09d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -25,11 +25,18 @@
#define __AMDGPU_JPEG_H__
#include "amdgpu_ras.h"
+#include "amdgpu_cs.h"
#define AMDGPU_MAX_JPEG_INSTANCES 4
#define AMDGPU_MAX_JPEG_RINGS 10
#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+#define JPEG_ATOMIC_RANGE_START 0x4120
+#define JPEG_ATOMIC_RANGE_END 0x412A
+
+
#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
@@ -170,5 +177,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
const struct amdgpu_hwip_reg_entry *reg, u32 count);
void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8a76960803c6..6ee77f431d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -758,7 +758,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
+ ui64 = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) : 0;
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
@@ -804,8 +805,8 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.vram.usable_heap_size = adev->gmc.real_vram_size -
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
- mem.vram.heap_usage =
- ttm_resource_manager_usage(vram_man);
+ mem.vram.heap_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(vram_man) : 0;
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -939,6 +940,10 @@ out:
if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
+ /* Gang submit is not supported under SRIOV currently */
+ if (!amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT;
+
if (amdgpu_passthrough(adev))
dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
AMDGPU_IDS_FLAGS_MODE_SHIFT) &
@@ -1417,14 +1422,10 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_debugfs_vm_init(file_priv);
- r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
+ r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);
if (r)
goto error_pasid;
- r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
- if (r)
- goto error_vm;
-
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
r = -ENOMEM;
@@ -1464,15 +1465,12 @@ error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
- if (pasid) {
+ if (pasid)
amdgpu_pasid_free(pasid);
- amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
- }
kfree(fpriv);
out_suspend:
- pm_runtime_mark_last_busy(dev->dev);
pm_put:
pm_runtime_put_autosuspend(dev->dev);
@@ -1540,7 +1538,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 135598502c8d..9c182ce501af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -105,8 +105,8 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
spin_lock_init(&adev->mes.ring_lock[i]);
adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
- adev->mes.vmid_mask_mmhub = 0xffffff00;
- adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+ adev->mes.vmid_mask_mmhub = 0xFF00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xFFFE : 0xFF00;
num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
@@ -191,6 +191,20 @@ int amdgpu_mes_init(struct amdgpu_device *adev)
if (r)
goto error_doorbell;
+ if (adev->mes.hung_queue_db_array_size) {
+ r = amdgpu_bo_create_kernel(adev,
+ adev->mes.hung_queue_db_array_size * sizeof(u32),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
+ goto error_doorbell;
+ }
+ }
+
return 0;
error_doorbell:
@@ -216,6 +230,10 @@ void amdgpu_mes_fini(struct amdgpu_device *adev)
{
int i;
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+
amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
&adev->mes.event_log_gpu_addr,
&adev->mes.event_log_cpu_addr);
@@ -366,6 +384,58 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
return r;
}
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
+{
+ return adev->mes.hung_queue_db_array_size;
+}
+
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array)
+
+{
+ struct mes_detect_and_reset_queue_input input;
+ u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
+ int r, i;
+
+ if (!hung_db_num || !hung_db_array)
+ return -EINVAL;
+
+ if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
+ (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
+ (queue_type != AMDGPU_RING_TYPE_SDMA))
+ return -EINVAL;
+
+ /* Clear the doorbell array before detection */
+ memset(adev->mes.hung_queue_db_array_cpu_addr, AMDGPU_MES_INVALID_DB_OFFSET,
+ adev->mes.hung_queue_db_array_size * sizeof(u32));
+ input.queue_type = queue_type;
+ input.detect_only = detect_only;
+
+ r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
+ &input);
+ if (r) {
+ dev_err(adev->dev, "failed to detect and reset\n");
+ } else {
+ *hung_db_num = 0;
+ for (i = 0; i < adev->mes.hung_queue_hqd_info_offset; i++) {
+ if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
+ hung_db_array[i] = db_array[i];
+ *hung_db_num += 1;
+ }
+ }
+
+ /*
+ * TODO: return HQD info for MES scheduled user compute queue reset cases
+ * stored in hung_db_array hqd info offset to full array size
+ */
+ }
+
+ return r;
+}
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
{
struct mes_misc_op_input op_input;
@@ -458,6 +528,18 @@ error:
return r;
}
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev)
+{
+ uint32_t hdp_flush_req_offset, hdp_flush_done_offset, ref_and_mask;
+
+ hdp_flush_req_offset = adev->nbio.funcs->get_hdp_flush_req_offset(adev);
+ hdp_flush_done_offset = adev->nbio.funcs->get_hdp_flush_done_offset(adev);
+ ref_and_mask = adev->nbio.hdp_flush_reg->ref_and_mask_cp0;
+
+ return amdgpu_mes_reg_write_reg_wait(adev, hdp_flush_req_offset, hdp_flush_done_offset,
+ ref_and_mask, ref_and_mask);
+}
+
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
@@ -621,14 +703,11 @@ out:
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
{
uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
- bool is_supported = false;
-
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
- amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
- mes_rev >= 0x63)
- is_supported = true;
- return is_supported;
+ return ((amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
+ mes_rev >= 0x63) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0));
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index c0d2c195fe2e..e989225b354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -41,6 +41,7 @@
#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
+#define AMDGPU_MES_INVALID_DB_OFFSET 0xffffffff
enum amdgpu_mes_priority_level {
AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
@@ -147,6 +148,11 @@ struct amdgpu_mes {
uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES];
void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
+ int hung_queue_db_array_size;
+ int hung_queue_hqd_info_offset;
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj;
+ uint64_t hung_queue_db_array_gpu_addr;
+ void *hung_queue_db_array_cpu_addr;
};
struct amdgpu_mes_gang {
@@ -233,6 +239,7 @@ struct mes_add_queue_input {
struct mes_remove_queue_input {
uint32_t doorbell_offset;
uint64_t gang_context_addr;
+ bool remove_queue_after_reset;
};
struct mes_map_legacy_queue_input {
@@ -280,6 +287,18 @@ struct mes_reset_queue_input {
bool is_kq;
};
+struct mes_detect_and_reset_queue_input {
+ uint32_t queue_type;
+ bool detect_only;
+};
+
+struct mes_inv_tlbs_pasid_input {
+ uint32_t xcc_id;
+ uint16_t pasid;
+ uint8_t hub_id;
+ uint8_t flush_type;
+};
+
enum mes_misc_opcode {
MES_MISC_OP_WRITE_REG,
MES_MISC_OP_READ_REG,
@@ -367,6 +386,13 @@ struct amdgpu_mes_funcs {
int (*reset_hw_queue)(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input);
+
+ int (*detect_and_reset_hung_queues)(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input);
+
+
+ int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input);
};
#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
@@ -390,12 +416,20 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
unsigned int vmid,
bool use_mmio);
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev);
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array);
+
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t val);
int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
+int amdgpu_mes_hdp_flush(struct amdgpu_device *adev);
int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
uint64_t process_context_addr,
uint32_t spi_gdbg_per_vmid_cntl,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 6da4f946cac0..dc8d2f52c7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -326,6 +326,8 @@ struct amdgpu_mode_info {
struct drm_property *audio_property;
/* FMT dithering */
struct drm_property *dither_property;
+ /* Adaptive Backlight Modulation (power feature) */
+ struct drm_property *abm_level_property;
/* hardcoded DFP edid from BIOS */
const struct drm_edid *bios_hardcoded_edid;
@@ -496,8 +498,6 @@ struct amdgpu_crtc {
struct drm_connector *connector;
/* for dpm */
u32 line_time;
- u32 wm_low;
- u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
/* for virtual dce */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 122a88294883..e08f58de4b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -153,6 +153,14 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
+ places[c].flags = 0;
+ c++;
+ }
+
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
@@ -1313,7 +1321,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (r)
goto out;
- r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true);
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
+ AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
if (WARN_ON(r))
goto out;
@@ -1545,6 +1554,8 @@ uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
return AMDGPU_PL_OA;
case AMDGPU_GEM_DOMAIN_DOORBELL:
return AMDGPU_PL_DOORBELL;
+ case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
+ return AMDGPU_PL_MMIO_REMAP;
default:
return TTM_PL_SYSTEM;
}
@@ -1628,6 +1639,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
case AMDGPU_PL_DOORBELL:
placement = "DOORBELL";
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ placement = "MMIO REMAP";
+ break;
case TTM_PL_SYSTEM:
default:
placement = "CPU";
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index c316920f3450..52c2d1731aab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -69,7 +69,7 @@ struct amdgpu_bo_va_mapping {
uint64_t last;
uint64_t __subtree_last;
uint64_t offset;
- uint64_t flags;
+ uint32_t flags;
};
/* User space allocated BO in a VM */
@@ -96,6 +96,7 @@ struct amdgpu_bo_va {
* if non-zero, cannot unmap from GPU because user queues may still access it
*/
unsigned int queue_refcount;
+ atomic_t userq_va_mapped;
};
struct amdgpu_bo {
@@ -167,6 +168,8 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_OA;
case AMDGPU_PL_DOORBELL:
return AMDGPU_GEM_DOMAIN_DOORBELL;
+ case AMDGPU_PL_MMIO_REMAP:
+ return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 693357caa9a8..0b10497d487c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -666,6 +666,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
return "FB_FW_RESERV_ADDR";
case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
return "FB_FW_RESERV_EXT_ADDR";
+ case GFX_CMD_ID_SRIOV_SPATIAL_PART:
+ return "SPATIAL_PARTITION";
+ case GFX_CMD_ID_FB_NPS_MODE:
+ return "NPS_MODE_CHANGE";
default:
return "UNKNOWN CMD";
}
@@ -877,9 +881,7 @@ static int psp_tmr_init(struct psp_context *psp)
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
PSP_TMR_ALIGNMENT,
- AMDGPU_HAS_VRAM(psp->adev) ?
- AMDGPU_GEM_DOMAIN_VRAM :
- AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr,
pptr);
}
@@ -1537,6 +1539,7 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
uint64_t dst_node_id = node_info.node_id;
uint8_t dst_num_hops = node_info.num_hops;
+ uint8_t dst_is_sharing_enabled = node_info.is_sharing_enabled;
uint8_t dst_num_links = node_info.num_links;
hive = amdgpu_get_xgmi_hive(psp->adev);
@@ -1556,13 +1559,20 @@ static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
continue;
mirror_top_info->nodes[j].num_hops = dst_num_hops;
- /*
- * prevent 0 num_links value re-reflection since reflection
+ mirror_top_info->nodes[j].is_sharing_enabled = dst_is_sharing_enabled;
+ /* prevent 0 num_links value re-reflection since reflection
* criteria is based on num_hops (direct or indirect).
- *
*/
- if (dst_num_links)
+ if (dst_num_links) {
mirror_top_info->nodes[j].num_links = dst_num_links;
+ /* swap src and dst due to frame of reference */
+ for (int k = 0; k < dst_num_links; k++) {
+ mirror_top_info->nodes[j].port_num[k].src_xgmi_port_num =
+ node_info.port_num[k].dst_xgmi_port_num;
+ mirror_top_info->nodes[j].port_num[k].dst_xgmi_port_num =
+ node_info.port_num[k].src_xgmi_port_num;
+ }
+ }
break;
}
@@ -1637,9 +1647,10 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
IP_VERSION(13, 0, 6) ||
amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
- IP_VERSION(13, 0, 14);
- bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
- psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
+ IP_VERSION(13, 0, 14) ||
+ amdgpu_sriov_vf(psp->adev);
+ bool ta_port_num_support = psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG ||
+ amdgpu_sriov_xgmi_ta_ext_peer_link_en(psp->adev);
/* popluate the shared output buffer rather than the cmd input buffer
* with node_ids as the input for GET_PEER_LINKS command execution.
@@ -2350,11 +2361,14 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_ta_load(psp, &psp->securedisplay_context.context);
- if (!ret) {
+ if (!ret && !psp->securedisplay_context.context.resp_status) {
psp->securedisplay_context.context.initialized = true;
mutex_init(&psp->securedisplay_context.mutex);
- } else
+ } else {
+ /* don't try again */
+ psp->securedisplay_context.context.bin_desc.size_bytes = 0;
return ret;
+ }
mutex_lock(&psp->securedisplay_context.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index 38face981c3e..6e8aad91bcd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -171,13 +171,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
copy_pos += sizeof(uint32_t);
- ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
- if (!ta_bin)
- return -ENOMEM;
- if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) {
- ret = -EFAULT;
- goto err_free_bin;
- }
+ ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
+ if (IS_ERR(ta_bin))
+ return PTR_ERR(ta_bin);
/* Set TA context and functions */
set_ta_context_funcs(psp, ta_type, &context);
@@ -327,13 +323,9 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
return -EFAULT;
copy_pos += sizeof(uint32_t);
- shared_buf = kzalloc(shared_buf_len, GFP_KERNEL);
- if (!shared_buf)
- return -ENOMEM;
- if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) {
- ret = -EFAULT;
- goto err_free_shared_buf;
- }
+ shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
+ if (IS_ERR(shared_buf))
+ return PTR_ERR(shared_buf);
set_ta_context_funcs(psp, ta_type, &context);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 123bcf5c2bb1..bacf888735db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -101,7 +101,6 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
}
amdgpu_gfx_off_ctrl(adev, true);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 540817e296da..2a6cf7963dde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -41,6 +41,7 @@
#include "atom.h"
#include "amdgpu_reset.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras_mgr.h"
#ifdef CONFIG_X86_MCE_AMD
#include <asm/mce.h>
@@ -122,12 +123,15 @@ const char *get_ras_block_str(struct ras_common_if *ras_block)
/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
-#define MAX_UMC_POISON_POLLING_TIME_ASYNC 300 //ms
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
+#define BYPASS_ALLOCATED_ADDRESS 0x0
+#define BYPASS_INITIALIZATION_ADDRESS 0x1
+
enum amdgpu_ras_retire_page_reservation {
AMDGPU_RAS_RETIRE_PAGE_RESERVED,
AMDGPU_RAS_RETIRE_PAGE_PENDING,
@@ -136,12 +140,18 @@ enum amdgpu_ras_retire_page_reservation {
atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
+
#ifdef CONFIG_X86_MCE_AMD
static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
+static void
+amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev);
struct mce_notifier_adev_list {
struct amdgpu_device *devs[MAX_GPU_INSTANCE];
int num_gpu;
@@ -169,18 +179,16 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
struct eeprom_table_record err_rec;
int ret;
- if ((address >= adev->gmc.mc_vram_size) ||
- (address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ ret = amdgpu_ras_check_bad_page(adev, address);
+ if (ret == -EINVAL) {
dev_warn(adev->dev,
- "RAS WARN: input address 0x%llx is invalid.\n",
- address);
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
return -EINVAL;
- }
-
- if (amdgpu_ras_check_bad_page(adev, address)) {
+ } else if (ret == 1) {
dev_warn(adev->dev,
- "RAS WARN: 0x%llx has already been marked as bad page!\n",
- address);
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
return 0;
}
@@ -207,6 +215,56 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre
return 0;
}
+static int amdgpu_check_address_validity(struct amdgpu_device *adev,
+ uint64_t address, uint64_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_block_info blk_info;
+ uint64_t page_pfns[32] = {0};
+ int i, ret, count;
+ bool hit = false;
+
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
+ return 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
+ return -EPERM;
+ return hit ? -EACCES : 0;
+ }
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EFAULT;
+
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
+ address, page_pfns, ARRAY_SIZE(page_pfns));
+ if (count <= 0)
+ return -EPERM;
+
+ for (i = 0; i < count; i++) {
+ memset(&blk_info, 0, sizeof(blk_info));
+ ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
+ page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
+ if (!ret) {
+ /* The input address that needs to be checked is allocated by
+ * current calling process, so it is necessary to exclude
+ * the calling process.
+ */
+ if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
+ ((blk_info.task.pid != task_pid_nr(current)) ||
+ strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
+ return -EACCES;
+ else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
+ (blk_info.task.pid == con->init_task_pid) &&
+ !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -297,6 +355,8 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
op = 2;
else if (strstr(str, "retire_page") != NULL)
op = 3;
+ else if (strstr(str, "check_address") != NULL)
+ op = 4;
else if (str[0] && str[1] && str[2] && str[3])
/* ascii string, but commands are not matched. */
return -EINVAL;
@@ -311,6 +371,15 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
data->inject.address = address;
return 0;
+ } else if (op == 4) {
+ if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
+ sscanf(str, "%*s %llu %llu", &address, &value) != 2)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+ data->inject.value = value;
+ return 0;
}
if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
@@ -500,6 +569,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
else
return ret;
+ } else if (data.op == 4) {
+ ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
+ return ret ? ret : size;
}
if (!amdgpu_ras_is_supported(adev, data.head.block))
@@ -513,22 +585,16 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
break;
case 2:
- if ((data.inject.address >= adev->gmc.mc_vram_size &&
- adev->gmc.mc_vram_size) ||
- (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
- dev_warn(adev->dev, "RAS WARN: input address "
- "0x%llx is invalid.",
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
+ ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
data.inject.address);
- ret = -EINVAL;
break;
- }
-
- /* umc ce/ue error injection for a bad page is not allowed */
- if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
- amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has "
- "already been marked as bad!\n",
- data.inject.address);
+ } else if (ret == 1) {
+ dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ data.inject.address);
break;
}
@@ -548,6 +614,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
return size;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev);
+
/**
* DOC: AMDGPU RAS debugfs EEPROM table reset interface
*
@@ -572,6 +640,11 @@ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
(struct amdgpu_device *)file_inode(f)->i_private;
int ret;
+ if (amdgpu_uniras_enabled(adev)) {
+ ret = amdgpu_uniras_clear_badpages_info(adev);
+ return ret ? ret : size;
+ }
+
ret = amdgpu_ras_eeprom_reset_table(
&(amdgpu_ras_get_context(adev)->eeprom_control));
@@ -1479,9 +1552,51 @@ out_fini_err_data:
return ret;
}
+static int amdgpu_uniras_clear_badpages_info(struct amdgpu_device *adev)
+{
+ struct ras_cmd_dev_handle req = {0};
+ int ret;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ &req, sizeof(req), NULL, 0);
+ if (ret) {
+ dev_err(adev->dev, "Failed to clear bad pages info, ret: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amdgpu_uniras_query_block_ecc(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct ras_cmd_block_ecc_info_req req = {0};
+ struct ras_cmd_block_ecc_info_rsp rsp = {0};
+ int ret;
+
+ if (!info)
+ return -EINVAL;
+
+ req.block_id = info->head.block;
+ req.subblock_id = info->head.sub_block_index;
+
+ ret = amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BLOCK_ECC_STATUS,
+ &req, sizeof(req), &rsp, sizeof(rsp));
+ if (!ret) {
+ info->ce_count = rsp.ce_count;
+ info->ue_count = rsp.ue_count;
+ info->de_count = rsp.de_count;
+ }
+
+ return ret;
+}
+
int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
{
- return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_query_block_ecc(adev, info);
+ else
+ return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
}
int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
@@ -1533,6 +1648,27 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_uniras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_cmd_inject_error_req inject_req;
+ struct ras_cmd_inject_error_rsp rsp;
+
+ if (!info)
+ return -EINVAL;
+
+ memset(&inject_req, 0, sizeof(inject_req));
+ inject_req.block_id = info->head.block;
+ inject_req.subblock_id = info->head.sub_block_index;
+ inject_req.address = info->address;
+ inject_req.error_type = info->head.type;
+ inject_req.instance_mask = info->instance_mask;
+ inject_req.method = info->value;
+
+ return amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__INJECT_ERROR,
+ &inject_req, sizeof(inject_req), &rsp, sizeof(rsp));
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -1550,6 +1686,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
info->head.block,
info->head.sub_block_index);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_uniras_error_inject(adev, info);
+
/* inject on guest isn't allowed, return success directly */
if (amdgpu_sriov_vf(adev))
return 0;
@@ -1694,7 +1833,9 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
/* sysfs begin */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count);
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start);
static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
{
@@ -1752,19 +1893,50 @@ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
unsigned int end = div64_ul(ppos + count - 1, element_size);
ssize_t s = 0;
struct ras_badpage *bps = NULL;
- unsigned int bps_count = 0;
+ int bps_count = 0, i, status;
+ uint64_t address;
memset(buf, 0, count);
- if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
+ bps_count = end - start;
+ bps = kmalloc_array(bps_count, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
return 0;
- for (; start < end && start < bps_count; start++)
+ memset(bps, 0, sizeof(*bps) * bps_count);
+
+ if (amdgpu_uniras_enabled(adev))
+ bps_count = amdgpu_uniras_badpages_read(adev, bps, bps_count, start);
+ else
+ bps_count = amdgpu_ras_badpages_read(adev, bps, bps_count, start);
+
+ if (bps_count <= 0) {
+ kfree(bps);
+ return 0;
+ }
+
+ for (i = 0; i < bps_count; i++) {
+ address = ((uint64_t)bps[i].bp) << AMDGPU_GPU_PAGE_SHIFT;
+ if (amdgpu_ras_check_critical_address(adev, address))
+ continue;
+
+ bps[i].size = AMDGPU_GPU_PAGE_SIZE;
+
+ status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
+ address);
+ if (status == -EBUSY)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (status == -ENOENT)
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ else
+ bps[i].flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED;
+
s += scnprintf(&buf[s], element_size + 1,
"0x%08x : 0x%08x : %1s\n",
- bps[start].bp,
- bps[start].size,
- amdgpu_ras_badpage_flags_str(bps[start].flags));
+ bps[i].bp,
+ bps[i].size,
+ amdgpu_ras_badpage_flags_str(bps[i].flags));
+ }
kfree(bps);
@@ -1780,12 +1952,42 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
}
+static bool amdgpu_ras_get_version_info(struct amdgpu_device *adev, u32 *major,
+ u32 *minor, u32 *rev)
+{
+ int i;
+
+ if (!adev || !major || !minor || !rev || !amdgpu_uniras_enabled(adev))
+ return false;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_RAS) {
+ *major = adev->ip_blocks[i].version->major;
+ *minor = adev->ip_blocks[i].version->minor;
+ *rev = adev->ip_blocks[i].version->rev;
+ return true;
+ }
+ }
+
+ return false;
+}
+
static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct amdgpu_ras *con =
container_of(attr, struct amdgpu_ras, version_attr);
- return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
+ u32 major, minor, rev;
+ ssize_t size = 0;
+
+ size += sysfs_emit_at(buf, size, "table version: 0x%x\n",
+ con->eeprom_control.tbl_hdr.version);
+
+ if (amdgpu_ras_get_version_info(con->adev, &major, &minor, &rev))
+ size += sysfs_emit_at(buf, size, "ras version: %u.%u.%u\n",
+ major, minor, rev);
+
+ return size;
}
static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
@@ -2178,6 +2380,11 @@ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
return;
+ if (amdgpu_uniras_enabled(adev)) {
+ amdgpu_ras_mgr_handle_fatal_interrupt(adev, NULL);
+ return;
+ }
+
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
@@ -2348,6 +2555,16 @@ int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_manager *obj;
struct ras_ih_data *data;
+ if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info;
+
+ memset(&ih_info, 0, sizeof(ih_info));
+ ih_info.block = info->head.block;
+ memcpy(&ih_info.iv_entry, info->entry, sizeof(struct amdgpu_iv_entry));
+
+ return amdgpu_ras_mgr_handle_controller_interrupt(adev, &ih_info);
+ }
+
obj = amdgpu_ras_find_obj(adev, &info->head);
if (!obj)
return -EINVAL;
@@ -2542,54 +2759,83 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
}
}
-/* recovery begin */
-
-/* return 0 on success.
- * caller need free bps.
- */
static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
- struct ras_badpage **bps, unsigned int *count)
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
- int i = 0;
- int ret = 0, status;
+ int r = 0;
+ uint32_t i;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
mutex_lock(&con->recovery_lock);
data = con->eh_data;
- if (!data || data->count == 0) {
- *bps = NULL;
- ret = -EINVAL;
- goto out;
+ if (start < data->count) {
+ for (i = start; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
+ bps[r].bp = data->bps[i].retired_page;
+ r++;
+ if (r >= count)
+ break;
+ }
}
+ mutex_unlock(&con->recovery_lock);
- *bps = kmalloc(sizeof(struct ras_badpage) * data->count, GFP_KERNEL);
- if (!*bps) {
- ret = -ENOMEM;
- goto out;
- }
+ return r;
+}
- for (; i < data->count; i++) {
- (*bps)[i] = (struct ras_badpage){
- .bp = data->bps[i].retired_page,
- .size = AMDGPU_GPU_PAGE_SIZE,
- .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
- };
- status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
- data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
- if (status == -EBUSY)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (status == -ENOENT)
- (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+static int amdgpu_uniras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage *bps, uint32_t count, uint32_t start)
+{
+ struct ras_cmd_bad_pages_info_req cmd_input;
+ struct ras_cmd_bad_pages_info_rsp *output;
+ uint32_t group, start_group, end_group;
+ uint32_t pos, pos_in_group;
+ int r = 0, i;
+
+ if (!bps || !count)
+ return -EINVAL;
+
+ output = kmalloc(sizeof(*output), GFP_KERNEL);
+ if (!output)
+ return -ENOMEM;
+
+ memset(&cmd_input, 0, sizeof(cmd_input));
+
+ start_group = start / RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ end_group = (start + count + RAS_CMD_MAX_BAD_PAGES_PER_GROUP - 1) /
+ RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+
+ pos = start;
+ for (group = start_group; group < end_group; group++) {
+ memset(output, 0, sizeof(*output));
+ cmd_input.group_index = group;
+ if (amdgpu_ras_mgr_handle_ras_cmd(adev, RAS_CMD__GET_BAD_PAGES,
+ &cmd_input, sizeof(cmd_input), output, sizeof(*output)))
+ goto out;
+
+ if (pos >= output->bp_total_cnt)
+ goto out;
+
+ pos_in_group = pos - group * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (i = pos_in_group; i < output->bp_in_group; i++, pos++) {
+ if (!output->records[i].ts)
+ continue;
+
+ bps[r].bp = output->records[i].retired_page;
+ r++;
+ if (r >= count)
+ goto out;
+ }
}
- *count = data->count;
out:
- mutex_unlock(&con->recovery_lock);
- return ret;
+ kfree(output);
+ return r;
}
static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
@@ -2638,6 +2884,7 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_device *adev = ras->adev;
struct list_head device_list, *device_list_handle = NULL;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ unsigned int error_query_mode;
enum ras_event_type type;
if (hive) {
@@ -2666,11 +2913,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
device_list_handle = &device_list;
}
+ if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
+ if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
+ /* wait 500ms to ensure pmfw polling mca bank info done */
+ msleep(500);
+ }
+ }
+
type = amdgpu_ras_get_fatal_error_event(adev);
list_for_each_entry(remote_adev,
device_list_handle, gmc.xgmi.head) {
- amdgpu_ras_query_err_status(remote_adev);
- amdgpu_ras_log_on_err_counter(remote_adev, type);
+ if (amdgpu_uniras_enabled(remote_adev)) {
+ amdgpu_ras_mgr_update_ras_ecc(remote_adev);
+ } else {
+ amdgpu_ras_query_err_status(remote_adev);
+ amdgpu_ras_log_on_err_counter(remote_adev, type);
+ }
}
}
@@ -2722,7 +2980,7 @@ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
unsigned int old_space = data->count + data->space_left;
unsigned int new_space = old_space + pages;
unsigned int align_space = ALIGN(new_space, 512);
- void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+ void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
if (!bps) {
return -ENOMEM;
@@ -2758,8 +3016,13 @@ static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
addr_in.ma.err_addr = bps->address;
addr_in.ma.socket_id = socket;
addr_in.ma.ch_inst = bps->mem_channel;
- /* tell RAS TA the node instance is not used */
- addr_in.ma.node_inst = TA_RAS_INV_NODE;
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+ } else {
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = bps->cu;
+ }
if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
@@ -2814,8 +3077,11 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
for (j = 0; j < count; j++) {
if (amdgpu_ras_check_bad_page_unlock(con,
- bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ data->count++;
+ data->space_left--;
continue;
+ }
if (!data->space_left &&
amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
@@ -2828,6 +3094,7 @@ static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
sizeof(struct eeprom_table_record));
data->count++;
data->space_left--;
+ con->bad_page_num++;
}
return 0;
@@ -2898,8 +3165,16 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
int i = 0;
enum amdgpu_memory_partition save_nps;
- save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
- bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+ bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+ } else {
+ /* if pmfw manages eeprom, save_nps is not stored on eeprom,
+ * we should always convert mca address into physical address,
+ * make save_nps different from nps
+ */
+ save_nps = nps + 1;
+ }
if (save_nps == nps) {
if (amdgpu_umc_pages_in_a_row(adev, err_data,
@@ -2965,7 +3240,8 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
if (from_rom) {
/* there is no pa recs in V3, so skip pa recs processing */
- if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
+ !amdgpu_ras_smu_eeprom_supported(adev)) {
for (i = 0; i < pages; i++) {
if (control->ras_num_recs - i >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
@@ -2974,7 +3250,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
i += (adev->umc.retire_unit - 1);
} else {
break;
@@ -2988,8 +3264,10 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
ret = __amdgpu_ras_convert_rec_from_rom(adev,
&bps[i], &err_data, nps);
if (ret)
- control->ras_num_bad_pages -= adev->umc.retire_unit;
+ con->bad_page_num -= adev->umc.retire_unit;
}
+
+ con->eh_data->count_saved = con->eh_data->count;
} else {
ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
}
@@ -3012,7 +3290,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
struct amdgpu_ras_eeprom_control *control;
- int save_count, unit_num, bad_page_num, i;
+ int save_count, unit_num, i;
if (!con || !con->eh_data) {
if (new_cnt)
@@ -3033,27 +3311,32 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
mutex_lock(&con->recovery_lock);
control = &con->eeprom_control;
data = con->eh_data;
- bad_page_num = control->ras_num_bad_pages;
- save_count = data->count - bad_page_num;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ unit_num = control->ras_num_recs -
+ control->ras_num_recs_old;
+ else
+ unit_num = data->count / adev->umc.retire_unit -
+ control->ras_num_recs;
+
+ save_count = con->bad_page_num - control->ras_num_bad_pages;
mutex_unlock(&con->recovery_lock);
- unit_num = save_count / adev->umc.retire_unit;
if (new_cnt)
*new_cnt = unit_num;
/* only new entries are saved */
- if (save_count > 0) {
+ if (unit_num && save_count) {
/*old asics only save pa to eeprom like before*/
if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num], save_count)) {
+ &data->bps[data->count_saved], unit_num)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
} else {
for (i = 0; i < unit_num; i++) {
if (amdgpu_ras_eeprom_append(control,
- &data->bps[bad_page_num +
+ &data->bps[data->count_saved +
i * adev->umc.retire_unit], 1)) {
dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
@@ -3062,6 +3345,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
}
dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ data->count_saved = data->count;
}
return 0;
@@ -3094,7 +3378,8 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
/*In V3, there is no pa recs, and some cases(when address==0) may be parsed
as pa recs, so add verion check to avoid it.
*/
- if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ if ((control->tbl_hdr.version < RAS_TABLE_VER_V3) &&
+ !amdgpu_ras_smu_eeprom_supported(adev)) {
for (i = 0; i < control->ras_num_recs; i++) {
if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
if ((bps[i].address == bps[i + 1].address) &&
@@ -3116,17 +3401,17 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
}
}
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ if (ret)
+ goto out;
+
ret = amdgpu_ras_eeprom_check(control);
if (ret)
goto out;
/* HW not usable */
- if (amdgpu_ras_is_rma(adev)) {
+ if (amdgpu_ras_is_rma(adev))
ret = -EHWPOISON;
- goto out;
- }
-
- ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
}
out:
@@ -3134,18 +3419,24 @@ out:
return ret;
}
-static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr)
{
struct ras_err_handler_data *data = con->eh_data;
+ struct amdgpu_device *adev = con->adev;
int i;
+ if ((addr >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EINVAL;
+
addr >>= AMDGPU_GPU_PAGE_SHIFT;
for (i = 0; i < data->count; i++)
if (addr == data->bps[i].retired_page)
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
@@ -3153,11 +3444,11 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
*
* Note: this check is only for umc block
*/
-static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool ret = false;
+ int ret = 0;
if (!con || !con->eh_data)
return ret;
@@ -3241,7 +3532,7 @@ static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
@@ -3261,7 +3552,7 @@ static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
mutex_destroy(&ecc_log->lock);
ecc_log->de_queried_count = 0;
- ecc_log->prev_de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
}
static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
@@ -3287,7 +3578,6 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
page_retirement_dwork.work);
struct amdgpu_device *adev = con->adev;
struct ras_err_data err_data;
- unsigned long err_cnt;
/* If gpu reset is ongoing, delay retiring the bad pages */
if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
@@ -3299,13 +3589,9 @@ static void amdgpu_ras_do_page_retirement(struct work_struct *work)
amdgpu_ras_error_data_init(&err_data);
amdgpu_umc_handle_bad_pages(adev, &err_data);
- err_cnt = err_data.err_addr_cnt;
amdgpu_ras_error_data_fini(&err_data);
- if (err_cnt && amdgpu_ras_is_rma(adev))
- amdgpu_ras_reset_gpu(adev);
-
amdgpu_ras_schedule_retirement_dwork(con,
AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
}
@@ -3316,49 +3602,39 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
int ret = 0;
struct ras_ecc_log_info *ecc_log;
struct ras_query_if info;
- uint32_t timeout = 0;
+ u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint64_t de_queried_count;
- uint32_t new_detect_count, total_detect_count;
- uint32_t need_query_count = poison_creation_count;
+ u64 de_queried_count;
+ u64 consumption_q_count;
enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
memset(&info, 0, sizeof(info));
info.head.block = AMDGPU_RAS_BLOCK__UMC;
ecc_log = &ras->umc_ecc_log;
- total_detect_count = 0;
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+
do {
ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
if (ret)
return ret;
de_queried_count = ecc_log->de_queried_count;
- if (de_queried_count > ecc_log->prev_de_queried_count) {
- new_detect_count = de_queried_count - ecc_log->prev_de_queried_count;
- ecc_log->prev_de_queried_count = de_queried_count;
- timeout = 0;
- } else {
- new_detect_count = 0;
- }
+ consumption_q_count = ecc_log->consumption_q_count;
- if (new_detect_count) {
- total_detect_count += new_detect_count;
- } else {
- if (!timeout && need_query_count)
- timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+ if (de_queried_count && consumption_q_count)
+ break;
- if (timeout) {
- if (!--timeout)
- break;
- msleep(1);
- }
- }
- } while (total_detect_count < need_query_count);
+ msleep(100);
+ } while (--timeout);
- if (total_detect_count)
+ if (de_queried_count)
schedule_delayed_work(&ras->page_retirement_dwork, 0);
+ if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
+ amdgpu_ras_reset_gpu(adev);
+
return 0;
}
@@ -3394,6 +3670,12 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
reset_flags |= msg.reset;
}
+ /*
+ * Try to ensure poison creation handler is completed first
+ * to set rma if bad page exceed threshold.
+ */
+ flush_delayed_work(&con->page_retirement_dwork);
+
/* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
if (reset_flags && !amdgpu_ras_is_rma(adev)) {
if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
@@ -3403,8 +3685,6 @@ static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
else
reset = reset_flags;
- flush_delayed_work(&con->page_retirement_dwork);
-
con->gpu_reset_flags |= reset;
amdgpu_ras_reset_gpu(adev);
@@ -3434,6 +3714,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
if (kthread_should_stop())
break;
+ mutex_lock(&con->poison_lock);
gpu_reset = 0;
do {
@@ -3446,7 +3727,8 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(poison_creation_count, &con->poison_creation_count);
atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
}
- } while (atomic_read(&con->poison_creation_count));
+ } while (atomic_read(&con->poison_creation_count) &&
+ !atomic_read(&con->poison_consumption_count));
if (ret != -EIO) {
msg_count = kfifo_len(&con->poison_fifo);
@@ -3463,6 +3745,7 @@ static int amdgpu_ras_page_retirement_thread(void *param)
/* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
/* Clear poison creation request */
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
/* Clear poison fifo */
amdgpu_ras_clear_poison_fifo(adev);
@@ -3487,9 +3770,12 @@ static int amdgpu_ras_page_retirement_thread(void *param)
atomic_sub(msg_count, &con->page_retirement_req_cnt);
}
+ atomic_set(&con->poison_consumption_count, 0);
+
/* Wake up work to save bad pages to eeprom */
schedule_delayed_work(&con->page_retirement_dwork, 0);
}
+ mutex_unlock(&con->poison_lock);
}
return 0;
@@ -3504,7 +3790,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
if (!con || amdgpu_sriov_vf(adev))
return 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
control = &con->eeprom_control;
+ con->ras_smu_drv = amdgpu_dpm_get_ras_smu_driver(adev);
+
ret = amdgpu_ras_eeprom_init(control);
control->is_eeprom_valid = !ret;
@@ -3570,8 +3861,10 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
}
mutex_init(&con->recovery_lock);
+ mutex_init(&con->poison_lock);
INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
atomic_set(&con->in_recovery, 0);
+ atomic_set(&con->rma_in_recovery, 0);
con->eeprom_control.bad_channel_bitmap = 0;
max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
@@ -3589,6 +3882,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
init_waitqueue_head(&con->page_retirement_wq);
atomic_set(&con->page_retirement_req_cnt, 0);
atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
con->page_retirement_thread =
kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
if (IS_ERR(con->page_retirement_thread)) {
@@ -3661,6 +3955,10 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
kfree(data);
mutex_unlock(&con->recovery_lock);
+ amdgpu_ras_critical_region_init(adev);
+#ifdef CONFIG_X86_MCE_AMD
+ amdgpu_unregister_bad_pages_mca_notifier(adev);
+#endif
return 0;
}
/* recovery end */
@@ -3884,7 +4182,6 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
atomic_set(&con->ras_ue_count, ue_count);
}
- pm_runtime_mark_last_busy(dev->dev);
Out:
pm_runtime_put_autosuspend(dev->dev);
}
@@ -4087,6 +4384,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ con->init_task_pid = task_pid_nr(current);
+ get_task_comm(con->init_task_comm, current);
+
+ mutex_init(&con->critical_region_lock);
+ INIT_LIST_HEAD(&con->critical_region_head);
+
dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
adev->ras_hw_enabled, adev->ras_enabled);
@@ -4366,6 +4669,9 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
if (!adev->ras_enabled || !con)
return 0;
+ amdgpu_ras_critical_region_fini(adev);
+ mutex_destroy(&con->critical_region_lock);
+
list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
if (ras_node->ras_obj) {
obj = ras_node->ras_obj;
@@ -4484,6 +4790,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
struct ras_event_state *event_state;
int ret = 0;
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
+
if (type >= RAS_EVENT_TYPE_COUNT) {
ret = -EINVAL;
goto out;
@@ -4534,20 +4843,18 @@ u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type
return id;
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
{
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
- u64 event_id;
+ u64 event_id = RAS_EVENT_INVALID_ID;
- if (amdgpu_ras_mark_ras_event(adev, type)) {
- dev_err(adev->dev,
- "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
- return;
- }
+ if (amdgpu_uniras_enabled(adev))
+ return 0;
- event_id = amdgpu_ras_acquire_event_id(adev, type);
+ if (!amdgpu_ras_mark_ras_event(adev, type))
+ event_id = amdgpu_ras_acquire_event_id(adev, type);
RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
"(ERREVENT_ATHUB_INTERRUPT) detected!\n");
@@ -4556,6 +4863,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
amdgpu_ras_reset_gpu(adev);
}
+
+ return -EBUSY;
}
bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
@@ -4683,6 +4992,28 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
notifier_registered = true;
}
}
+static void amdgpu_unregister_bad_pages_mca_notifier(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ if (!notifier_registered && !mce_adev_list.num_gpu)
+ return;
+ for (i = 0, j = 0; i < mce_adev_list.num_gpu; i++) {
+ if (mce_adev_list.devs[i] == adev)
+ mce_adev_list.devs[i] = NULL;
+ if (!mce_adev_list.devs[i])
+ ++j;
+ }
+
+ if (j == mce_adev_list.num_gpu) {
+ mce_adev_list.num_gpu = 0;
+ /* Unregister x86 notifier with MCE subsystem. */
+ if (notifier_registered) {
+ mce_unregister_decode_chain(&amdgpu_bad_page_nb);
+ notifier_registered = false;
+ }
+ }
+}
#endif
struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
@@ -5274,6 +5605,9 @@ int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
int ret = 0;
+ if (amdgpu_ras_check_critical_address(adev, start))
+ return 0;
+
mutex_lock(&con->page_rsv_lock);
ret = amdgpu_vram_mgr_query_page_status(mgr, start);
if (ret == -ENOENT)
@@ -5305,8 +5639,110 @@ bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_is_rma(adev);
+
if (!con)
return false;
return con->is_rma;
}
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr_resource *vres;
+ struct ras_critical_region *region;
+ struct drm_buddy_block *block;
+ int ret = 0;
+
+ if (!bo || !bo->tbo.resource)
+ return -EINVAL;
+
+ vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
+
+ mutex_lock(&con->critical_region_lock);
+
+ /* Check if the bo had been recorded */
+ list_for_each_entry(region, &con->critical_region_head, node)
+ if (region->bo == bo)
+ goto out;
+
+ /* Record new critical amdgpu bo */
+ list_for_each_entry(block, &vres->blocks, link) {
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ region->bo = bo;
+ region->start = amdgpu_vram_mgr_block_start(block);
+ region->size = amdgpu_vram_mgr_block_size(block);
+ list_add_tail(&region->node, &con->critical_region_head);
+ }
+
+out:
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
+}
+
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region, *tmp;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
+ list_del(&region->node);
+ kfree(region);
+ }
+ mutex_unlock(&con->critical_region_lock);
+}
+
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region;
+ bool ret = false;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry(region, &con->critical_region_head, node) {
+ if ((region->start <= addr) &&
+ (addr < (region->start + region->size))) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (amdgpu_uniras_enabled(tmp_adev))
+ amdgpu_ras_mgr_pre_reset(tmp_adev);
+ }
+}
+
+void amdgpu_ras_post_reset(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ if (amdgpu_uniras_enabled(tmp_adev))
+ amdgpu_ras_mgr_post_reset(tmp_adev);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 927d6bff734a..ff44190d7d98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -492,11 +492,45 @@ struct ras_ecc_err {
struct ras_ecc_log_info {
struct mutex lock;
struct radix_tree_root de_page_tree;
- uint64_t de_queried_count;
- uint64_t prev_de_queried_count;
+ uint64_t de_queried_count;
+ uint64_t consumption_q_count;
+};
+
+struct ras_critical_region {
+ struct list_head node;
+ struct amdgpu_bo *bo;
+ uint64_t start;
+ uint64_t size;
+};
+
+struct ras_eeprom_table_version {
+ uint32_t minor : 16;
+ uint32_t major : 16;
+};
+
+struct ras_eeprom_smu_funcs {
+ int (*get_ras_table_version)(struct amdgpu_device *adev,
+ uint32_t *table_version);
+ int (*get_badpage_count)(struct amdgpu_device *adev, uint32_t *count, uint32_t timeout);
+ int (*get_badpage_mca_addr)(struct amdgpu_device *adev, uint16_t index, uint64_t *mca_addr);
+ int (*set_timestamp)(struct amdgpu_device *adev, uint64_t timestamp);
+ int (*get_timestamp)(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp);
+ int (*get_badpage_ipid)(struct amdgpu_device *adev, uint16_t index, uint64_t *ipid);
+ int (*erase_ras_table)(struct amdgpu_device *adev, uint32_t *result);
+};
+
+enum ras_smu_feature_flags {
+ RAS_SMU_FEATURE_BIT__RAS_EEPROM = BIT_ULL(0),
+};
+
+struct ras_smu_drv {
+ const struct ras_eeprom_smu_funcs *smu_eeprom_funcs;
+ void (*ras_smu_feature_flags)(struct amdgpu_device *adev, uint64_t *flags);
};
struct amdgpu_ras {
+ void *ras_mgr;
/* ras infrastructure */
/* for ras itself. */
uint32_t features;
@@ -515,6 +549,7 @@ struct amdgpu_ras {
/* gpu recovery */
struct work_struct recovery_work;
atomic_t in_recovery;
+ atomic_t rma_in_recovery;
struct amdgpu_device *adev;
/* error handler data */
struct ras_err_handler_data *eh_data;
@@ -557,6 +592,7 @@ struct amdgpu_ras {
struct mutex page_retirement_lock;
atomic_t page_retirement_req_cnt;
atomic_t poison_creation_count;
+ atomic_t poison_consumption_count;
struct mutex page_rsv_lock;
DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
struct ras_ecc_log_info umc_ecc_log;
@@ -570,6 +606,21 @@ struct amdgpu_ras {
struct ras_event_manager *event_mgr;
uint64_t reserved_pages_in_bytes;
+
+ pid_t init_task_pid;
+ char init_task_comm[TASK_COMM_LEN];
+
+ int bad_page_num;
+
+ struct list_head critical_region_head;
+ struct mutex critical_region_lock;
+
+ /* Protect poison injection */
+ struct mutex poison_lock;
+
+ /* Disable/Enable uniras switch */
+ bool uniras_enabled;
+ const struct ras_smu_drv *ras_smu_drv;
};
struct ras_fs_data {
@@ -608,6 +659,7 @@ struct ras_err_handler_data {
struct eeprom_table_record *bps;
/* the count of entries */
int count;
+ int count_saved;
/* the space can place new entries */
int space_left;
};
@@ -888,7 +940,7 @@ static inline void amdgpu_ras_intr_cleared(void)
atomic_set(&amdgpu_ras_in_intr, 0);
}
-void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+int amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
@@ -973,6 +1025,9 @@ int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_
int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr);
+
int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
enum amdgpu_ras_block block, uint16_t pasid,
pasid_notify pasid_fn, void *data, uint32_t reset);
@@ -984,4 +1039,9 @@ void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
const char *fmt, ...);
bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
+
+void amdgpu_ras_pre_reset(struct amdgpu_device *adev,
+ struct list_head *device_list);
+void amdgpu_ras_post_reset(struct amdgpu_device *adev,
+ struct list_head *device_list);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 9bda9ad13f88..64dd7a81bff5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include "amdgpu_reset.h"
+#include "amdgpu_ras_mgr.h"
/* These are memory addresses as would be seen by one or more EEPROM
* chips strung on the I2C bus, usually by manipulating pins 1-3 of a
@@ -123,6 +124,8 @@
RAS_TABLE_V2_1_INFO_SIZE) \
/ RAS_TABLE_RECORD_SIZE)
+#define RAS_SMU_MESSAGE_TIMEOUT_MS 1000 /* 1s */
+
/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
* offset off of RAS_TABLE_START. That is, this is something you can
* add to control->i2c_address, and then tell I2C layer to read
@@ -443,40 +446,57 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ u32 erase_res = 0;
u8 csum;
int res;
mutex_lock(&control->ras_tbl_mutex);
- hdr->header = RAS_TABLE_HDR_VAL;
- amdgpu_ras_set_eeprom_table_version(control);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ hdr->header = RAS_TABLE_HDR_VAL;
+ amdgpu_ras_set_eeprom_table_version(control);
- if (hdr->version >= RAS_TABLE_VER_V2_1) {
- hdr->first_rec_offset = RAS_RECORD_START_V2_1;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
- RAS_TABLE_V2_1_INFO_SIZE;
- rai->rma_status = GPU_HEALTH_USABLE;
- /**
- * GPU health represented as a percentage.
- * 0 means worst health, 100 means fully health.
- */
- rai->health_percent = 100;
- /* ecc_page_threshold = 0 means disable bad page retirement */
- rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = GPU_HEALTH_USABLE;
+
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
} else {
- hdr->first_rec_offset = RAS_RECORD_START;
- hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ res = amdgpu_ras_smu_erase_ras_table(adev, &erase_res);
+ if (res || erase_res) {
+ dev_warn(adev->dev, "RAS EEPROM reset failed, res:%d result:%d",
+ res, erase_res);
+ if (!res)
+ res = -EIO;
+ }
}
- csum = __calc_hdr_byte_sum(control);
- if (hdr->version >= RAS_TABLE_VER_V2_1)
- csum += __calc_ras_info_byte_sum(control);
- csum = -csum;
- hdr->checksum = csum;
- res = __write_table_header(control);
- if (!res && hdr->version > RAS_TABLE_VER_V1)
- res = __write_table_ras_info(control);
-
control->ras_num_recs = 0;
control->ras_num_bad_pages = 0;
control->ras_num_mca_recs = 0;
@@ -556,6 +576,9 @@ bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ if (amdgpu_uniras_enabled(adev))
+ return amdgpu_ras_mgr_check_eeprom_safety_watermark(adev);
+
if (!__is_ras_eeprom_supported(adev) ||
!amdgpu_bad_page_threshold)
return false;
@@ -743,8 +766,7 @@ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
else
control->ras_num_mca_recs += num;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = con->bad_page_num;
Out:
kfree(buf);
return res;
@@ -766,6 +788,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
dev_warn(adev->dev,
"Saved bad pages %d reaches threshold value %d\n",
control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && !amdgpu_uniras_enabled(adev) &&
+ amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
if ((amdgpu_bad_page_threshold != -1) &&
(amdgpu_bad_page_threshold != -2)) {
control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
@@ -774,9 +801,10 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
control->tbl_rai.health_percent = 0;
}
ras->is_rma = true;
- /* ignore the -ENOTSUPP return value */
- amdgpu_dpm_send_rma_reason(adev);
}
+
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
}
if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
@@ -845,6 +873,71 @@ Out:
return res;
}
+int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int ret, retry = 20;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
+ control->ras_num_recs_old = control->ras_num_recs;
+
+ do {
+ /* 1000ms timeout is long enough, smu_get_badpage_count won't
+ * return -EBUSY before timeout.
+ */
+ ret = amdgpu_ras_smu_get_badpage_count(adev,
+ &(control->ras_num_recs), RAS_SMU_MESSAGE_TIMEOUT_MS);
+ if (!ret &&
+ (control->ras_num_recs_old == control->ras_num_recs)) {
+ /* record number update in PMFW needs some time,
+ * smu_get_badpage_count may return immediately without
+ * count update, sleep for a while and retry again.
+ */
+ msleep(50);
+ retry--;
+ } else {
+ break;
+ }
+ } while (retry);
+
+ /* no update of record number is not a real failure,
+ * don't print warning here
+ */
+ if (!ret && (control->ras_num_recs_old == control->ras_num_recs))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int amdgpu_ras_smu_eeprom_append(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev) || !con)
+ return 0;
+
+ control->ras_num_bad_pages = con->bad_page_num;
+
+ if (amdgpu_bad_page_threshold != 0 &&
+ control->ras_num_bad_pages > con->bad_page_cnt_threshold) {
+ dev_warn(adev->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ control->ras_num_bad_pages, con->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
+ if ((amdgpu_bad_page_threshold != -1) &&
+ (amdgpu_bad_page_threshold != -2))
+ con->is_rma = true;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table
* @control: pointer to control structure
@@ -869,6 +962,9 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
if (!__is_ras_eeprom_supported(adev))
return 0;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_append(control);
+
if (num == 0) {
dev_err(adev->dev, "will not append 0 records\n");
return -EINVAL;
@@ -944,6 +1040,50 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
return res;
}
+int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record, u32 rec_idx,
+ const u32 num)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ uint64_t ts, end_idx;
+ int i, ret;
+ u64 mca, ipid;
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
+ if (!adev->umc.ras || !adev->umc.ras->mca_ipid_parse)
+ return -EOPNOTSUPP;
+
+ end_idx = rec_idx + num;
+ for (i = rec_idx; i < end_idx; i++) {
+ ret = amdgpu_ras_smu_get_badpage_mca_addr(adev, i, &mca);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_smu_get_badpage_ipid(adev, i, &ipid);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_ras_smu_get_timestamp(adev, i, &ts);
+ if (ret)
+ return ret;
+
+ record[i - rec_idx].address = mca;
+ /* retired_page (pa) is unused now */
+ record[i - rec_idx].retired_page = 0x1ULL;
+ record[i - rec_idx].ts = ts;
+ record[i - rec_idx].err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+
+ adev->umc.ras->mca_ipid_parse(adev, ipid,
+ (uint32_t *)&(record[i - rec_idx].cu),
+ (uint32_t *)&(record[i - rec_idx].mem_channel),
+ (uint32_t *)&(record[i - rec_idx].mcumc_id), NULL);
+ }
+
+ return 0;
+}
+
/**
* amdgpu_ras_eeprom_read -- read EEPROM
* @control: pointer to control structure
@@ -965,6 +1105,9 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
u8 *buf, *pp;
u32 g0, g1;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_eeprom_read_idx(control, record, 0, num);
+
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1136,6 +1279,10 @@ static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf,
int res = -EFAULT;
size_t data_len;
+ /* pmfw manages eeprom data by itself */
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return 0;
+
mutex_lock(&control->ras_tbl_mutex);
/* We want *pos - data_len > 0, which means there's
@@ -1366,6 +1513,42 @@ Out:
return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
}
+static int amdgpu_ras_smu_eeprom_init(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t local_time;
+ int res;
+
+ ras->is_rma = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+ mutex_init(&control->ras_tbl_mutex);
+
+ res = amdgpu_ras_smu_get_table_version(adev, &(hdr->version));
+ if (res)
+ return res;
+
+ res = amdgpu_ras_smu_get_badpage_count(adev,
+ &(control->ras_num_recs), 100);
+ if (res)
+ return res;
+
+ local_time = (uint64_t)ktime_get_real_seconds();
+ res = amdgpu_ras_smu_set_timestamp(adev, local_time);
+ if (res)
+ return res;
+
+ control->ras_max_record_count = 4000;
+
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
+
+ return 0;
+}
+
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
@@ -1374,6 +1557,9 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int res;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_init(control);
+
ras->is_rma = false;
if (!__is_ras_eeprom_supported(adev))
@@ -1440,6 +1626,47 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return 0;
}
+static int amdgpu_ras_smu_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ control->ras_num_bad_pages = ras->bad_page_num;
+
+ if ((ras->bad_page_cnt_threshold < control->ras_num_bad_pages) &&
+ amdgpu_bad_page_threshold != 0) {
+ dev_warn(adev->dev,
+ "RAS records:%d exceed threshold:%d\n",
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ } else {
+ ras->is_rma = true;
+ dev_warn(adev->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+
+ return 0;
+ }
+
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
+ dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
+ control->ras_num_bad_pages,
+ ras->bad_page_cnt_threshold);
+ return 0;
+}
+
int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
@@ -1447,6 +1674,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
int res = 0;
+ if (amdgpu_ras_smu_eeprom_supported(adev))
+ return amdgpu_ras_smu_eeprom_check(control);
+
if (!__is_ras_eeprom_supported(adev))
return 0;
@@ -1457,8 +1687,7 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
if (!__get_eeprom_i2c_addr(adev, control))
return -EINVAL;
- control->ras_num_bad_pages = control->ras_num_pa_recs +
- control->ras_num_mca_recs * adev->umc.retire_unit;
+ control->ras_num_bad_pages = ras->bad_page_num;
if (hdr->header == RAS_TABLE_HDR_VAL) {
dev_dbg(adev->dev,
@@ -1538,7 +1767,8 @@ void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
struct amdgpu_ras_eeprom_control *control;
int res;
- if (!__is_ras_eeprom_supported(adev) || !ras)
+ if (!__is_ras_eeprom_supported(adev) || !ras ||
+ amdgpu_ras_smu_eeprom_supported(adev))
return;
control = &ras->eeprom_control;
if (!control->is_eeprom_valid)
@@ -1558,4 +1788,143 @@ void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
control->is_eeprom_valid = false;
}
return;
-} \ No newline at end of file
+}
+
+static const struct ras_smu_drv *amdgpu_ras_get_smu_ras_drv(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (!ras)
+ return NULL;
+
+ return ras->ras_smu_drv;
+}
+
+static uint64_t amdgpu_ras_smu_get_feature_flags(struct amdgpu_device *adev)
+{
+ const struct ras_smu_drv *ras_smu_drv = amdgpu_ras_get_smu_ras_drv(adev);
+ uint64_t flags = 0ULL;
+
+ if (!ras_smu_drv)
+ goto out;
+
+ if (ras_smu_drv->ras_smu_feature_flags)
+ ras_smu_drv->ras_smu_feature_flags(adev, &flags);
+
+out:
+ return flags;
+}
+
+bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+ uint64_t flags = 0ULL;
+
+ if (!__is_ras_eeprom_supported(adev) || !smu_ras_drv)
+ return false;
+
+ if (!smu_ras_drv->smu_eeprom_funcs)
+ return false;
+
+ flags = amdgpu_ras_smu_get_feature_flags(adev);
+
+ return !!(flags & RAS_SMU_FEATURE_BIT__RAS_EEPROM);
+}
+
+int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_ras_table_version)
+ return smu_ras_drv->smu_eeprom_funcs->get_ras_table_version(adev,
+ table_version);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev,
+ uint32_t *count, uint32_t timeout)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_count)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_count(adev,
+ count, timeout);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_mca_addr(adev,
+ index, mca_addr);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev,
+ uint64_t timestamp)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->set_timestamp)
+ return smu_ras_drv->smu_eeprom_funcs->set_timestamp(adev,
+ timestamp);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_timestamp)
+ return smu_ras_drv->smu_eeprom_funcs->get_timestamp(adev,
+ index, timestamp);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid)
+ return smu_ras_drv->smu_eeprom_funcs->get_badpage_ipid(adev,
+ index, ipid);
+ return -EOPNOTSUPP;
+}
+
+int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result)
+{
+ const struct ras_smu_drv *smu_ras_drv = amdgpu_ras_get_smu_ras_drv(adev);
+
+ if (!amdgpu_ras_smu_eeprom_supported(adev))
+ return -EOPNOTSUPP;
+
+ if (smu_ras_drv->smu_eeprom_funcs->erase_ras_table)
+ return smu_ras_drv->smu_eeprom_funcs->erase_ras_table(adev,
+ result);
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ebfca4cb5688..2e5d63957e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -82,6 +82,7 @@ struct amdgpu_ras_eeprom_control {
/* Number of records in the table.
*/
u32 ras_num_recs;
+ u32 ras_num_recs_old;
/* the bad page number is ras_num_recs or
* ras_num_recs * umc.retire_unit
@@ -163,6 +164,35 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+bool amdgpu_ras_smu_eeprom_supported(struct amdgpu_device *adev);
+
+int amdgpu_ras_smu_get_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version);
+
+int amdgpu_ras_smu_get_badpage_count(struct amdgpu_device *adev,
+ uint32_t *count, uint32_t timeout);
+
+int amdgpu_ras_smu_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr);
+
+int amdgpu_ras_smu_set_timestamp(struct amdgpu_device *adev,
+ uint64_t timestamp);
+
+int amdgpu_ras_smu_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp);
+
+int amdgpu_ras_smu_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid);
+
+int amdgpu_ras_smu_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result);
+
+int amdgpu_ras_eeprom_read_idx(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record, u32 rec_idx,
+ const u32 num);
+
+int amdgpu_ras_eeprom_update_record_num(struct amdgpu_ras_eeprom_control *control);
+
extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
index 50fcd86e1033..be2e56ce1355 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -91,6 +91,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = to_ttm_range_mgr_node(res)->mm_nodes;
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
@@ -153,6 +154,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
break;
case TTM_PL_TT:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
node = cur->node;
cur->node = ++node;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index dabfbdf6f1ce..28c4ad62f50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -340,6 +340,9 @@ void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
case AMDGPU_RESET_SRC_USER:
strscpy(buf, "user trigger", len);
break;
+ case AMDGPU_RESET_SRC_USERQ:
+ strscpy(buf, "user queue trigger", len);
+ break;
default:
strscpy(buf, "unknown", len);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
index 4d9b9701139b..07b4d37f1db6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -43,6 +43,7 @@ enum AMDGPU_RESET_SRCS {
AMDGPU_RESET_SRC_MES,
AMDGPU_RESET_SRC_HWS,
AMDGPU_RESET_SRC_USER,
+ AMDGPU_RESET_SRC_USERQ,
};
struct amdgpu_reset_context {
@@ -160,4 +161,16 @@ int amdgpu_reset_do_xgmi_reset_on_init(
bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev,
+ bool status)
+{
+ adev->pcie_reset_ctx.occurs_dpc = status;
+ adev->no_hw_access = status;
+}
+
+static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev)
+{
+ return adev->pcie_reset_ctx.occurs_dpc;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 486c3646710c..c596b6df2e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#include "amdgpu_ras_mgr.h"
#include "atom.h"
/*
@@ -159,8 +160,16 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
*/
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- while (ib->length_dw & ring->funcs->align_mask)
- ib->ptr[ib->length_dw++] = ring->funcs->nop;
+ u32 align_mask = ring->funcs->align_mask;
+ u32 count = ib->length_dw & align_mask;
+
+ if (count) {
+ count = align_mask + 1 - count;
+
+ memset32(&ib->ptr[ib->length_dw], ring->funcs->nop, count);
+
+ ib->length_dw += count;
+ }
}
/**
@@ -364,7 +373,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
+ PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
@@ -459,9 +469,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
ktime_t deadline;
bool ret;
- if (unlikely(ring->adev->debug_disable_soft_recovery))
- return false;
-
deadline = ktime_add_us(ktime_get(), 10000);
if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
@@ -489,6 +496,66 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
*/
#if defined(CONFIG_DEBUG_FS)
+static ssize_t amdgpu_ras_cper_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *offset)
+{
+ const uint8_t ring_header_size = 12;
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ struct ras_cmd_cper_snapshot_req *snapshot_req __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_snapshot_req), GFP_KERNEL);
+ struct ras_cmd_cper_snapshot_rsp *snapshot_rsp __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_snapshot_rsp), GFP_KERNEL);
+ struct ras_cmd_cper_record_req *record_req __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_record_req), GFP_KERNEL);
+ struct ras_cmd_cper_record_rsp *record_rsp __free(kfree) =
+ kzalloc(sizeof(struct ras_cmd_cper_record_rsp), GFP_KERNEL);
+ uint8_t *ring_header __free(kfree) =
+ kzalloc(ring_header_size, GFP_KERNEL);
+ uint32_t total_cper_num;
+ uint64_t start_cper_id;
+ int r;
+
+ if (!snapshot_req || !snapshot_rsp || !record_req || !record_rsp ||
+ !ring_header)
+ return -ENOMEM;
+
+ if (!(*offset)) {
+ /* Need at least 12 bytes for the header on the first read */
+ if (size < ring_header_size)
+ return -EINVAL;
+
+ if (copy_to_user(buf, ring_header, ring_header_size))
+ return -EFAULT;
+ buf += ring_header_size;
+ size -= ring_header_size;
+ }
+
+ r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev,
+ RAS_CMD__GET_CPER_SNAPSHOT,
+ snapshot_req, sizeof(struct ras_cmd_cper_snapshot_req),
+ snapshot_rsp, sizeof(struct ras_cmd_cper_snapshot_rsp));
+ if (r || !snapshot_rsp->total_cper_num)
+ return r;
+
+ start_cper_id = snapshot_rsp->start_cper_id;
+ total_cper_num = snapshot_rsp->total_cper_num;
+
+ record_req->buf_ptr = (uint64_t)(uintptr_t)buf;
+ record_req->buf_size = size;
+ record_req->cper_start_id = start_cper_id + *offset;
+ record_req->cper_num = total_cper_num;
+ r = amdgpu_ras_mgr_handle_ras_cmd(ring->adev, RAS_CMD__GET_CPER_RECORD,
+ record_req, sizeof(struct ras_cmd_cper_record_req),
+ record_rsp, sizeof(struct ras_cmd_cper_record_rsp));
+ if (r)
+ return r;
+
+ r = *offset ? record_rsp->real_data_size : record_rsp->real_data_size + ring_header_size;
+ (*offset) += record_rsp->real_cper_num;
+
+ return r;
+}
+
/* Layout of file is 12 bytes consisting of
* - rptr
* - wptr
@@ -505,6 +572,9 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
loff_t i;
int r;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER && amdgpu_uniras_enabled(ring->adev))
+ return amdgpu_ras_cper_debugfs_read(f, buf, size, pos);
+
if (*pos & 3 || size & 3)
return -EINVAL;
@@ -810,7 +880,7 @@ int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
if (r)
return r;
- /* signal the fence of the bad job */
+ /* signal the guilty fence and set an error on all fences from the context */
if (guilty_fence)
amdgpu_fence_driver_guilty_force_completion(guilty_fence);
/* Re-emit the non-guilty commands */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 7670f5d82b9e..7a27c6c4bb44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -83,6 +83,7 @@ enum amdgpu_ring_type {
AMDGPU_RING_TYPE_MES,
AMDGPU_RING_TYPE_UMSCH_MM,
AMDGPU_RING_TYPE_CPER,
+ AMDGPU_RING_TYPE_MAX,
};
enum amdgpu_ib_pool_type {
@@ -114,7 +115,7 @@ struct amdgpu_sched {
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
+ uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
@@ -147,16 +148,14 @@ struct amdgpu_fence {
u64 wptr;
/* fence context for resets */
u64 context;
- uint32_t seq;
};
extern const struct drm_sched_backend_ops amdgpu_sched_ops;
-void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
-void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
-void amdgpu_fence_save_wptr(struct dma_fence *fence);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *af);
+void amdgpu_fence_save_wptr(struct amdgpu_fence *af);
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
@@ -166,8 +165,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
- struct amdgpu_fence *af, unsigned int flags);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct amdgpu_fence *af,
+ unsigned int flags);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
@@ -211,7 +210,18 @@ struct amdgpu_ring_funcs {
bool support_64bit_ptrs;
bool no_user_fence;
bool secure_submission_supported;
- unsigned extra_dw;
+
+ /**
+ * @extra_bytes:
+ *
+ * Optional extra space in bytes that is added to the ring size
+ * when allocating the BO that holds the contents of the ring.
+ * This space isn't used for command submission to the ring,
+ * but is just there to satisfy some hardware requirements or
+ * implement workarounds. It's up to the implementation of each
+ * specific ring to initialize this space.
+ */
+ unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
@@ -298,7 +308,7 @@ struct amdgpu_ring {
unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
u64 rptr_gpu_addr;
- volatile u32 *rptr_cpu_addr;
+ u32 *rptr_cpu_addr;
/**
* @wptr:
@@ -378,19 +388,19 @@ struct amdgpu_ring {
* This is the CPU address pointer in the writeback slot. This is used
* to commit changes to the GPU.
*/
- volatile u32 *wptr_cpu_addr;
+ u32 *wptr_cpu_addr;
unsigned fence_offs;
u64 fence_gpu_addr;
- volatile u32 *fence_cpu_addr;
+ u32 *fence_cpu_addr;
uint64_t current_ctx;
char name[16];
u32 trail_seq;
unsigned trail_fence_offs;
u64 trail_fence_gpu_addr;
- volatile u32 *trail_fence_cpu_addr;
+ u32 *trail_fence_cpu_addr;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u32 *cond_exe_cpu_addr;
unsigned int set_q_mode_offs;
u32 *set_q_mode_ptr;
u64 set_q_mode_token;
@@ -470,10 +480,7 @@ static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
- int i = 0;
- while (i <= ring->buf_mask)
- ring->ring[i++] = ring->funcs->nop;
-
+ memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1);
}
static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index db5791e1a7ce..5aa830a02d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -89,7 +89,7 @@ void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 i;
int r;
@@ -189,7 +189,7 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
{
const __le32 *fw_data;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
int me, i, max_me;
u32 bo_offset = 0;
u32 table_offset, table_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index c210625be220..2ce310b31942 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -251,7 +251,7 @@ struct amdgpu_rlc_funcs {
* and it also provides a pointer to it which is used by the firmware
* to load the clear state in some cases.
*/
- void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer);
+ void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer);
int (*get_cp_table_num)(struct amdgpu_device *adev);
int (*resume)(struct amdgpu_device *adev);
void (*stop)(struct amdgpu_device *adev);
@@ -275,19 +275,19 @@ struct amdgpu_rlc {
/* for power gating */
struct amdgpu_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
+ uint32_t *sr_ptr;
const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct amdgpu_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
+ uint32_t *cs_ptr;
const struct cs_section_def *cs_data;
u32 clear_state_size;
/* for cp tables */
struct amdgpu_bo *cp_table_obj;
uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
+ uint32_t *cp_table_ptr;
u32 cp_table_size;
/* safe mode for updating CG/PG state */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 41ebe690eeff..3739be1b71e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -159,7 +159,6 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
dev_err(adev->dev, "Invalid input: %s\n", str);
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
index d45ebfb642ca..a0b479d5fff1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -67,9 +67,9 @@ static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
- u64 seq64_addr, va_flags;
struct amdgpu_bo *bo;
struct drm_exec exec;
+ u64 seq64_addr;
int r;
bo = adev->seq64.sbo;
@@ -94,9 +94,9 @@ int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
- va_flags = amdgpu_gem_va_map_flags(adev, AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
- r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE,
- va_flags);
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0,
+ AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
if (r) {
DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
amdgpu_vm_bo_del(adev, *bo_va);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 27ab4e754b2a..2b931e855abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -123,6 +123,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
placement->num_placement = 0;
return;
@@ -187,7 +188,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct amdgpu_job *job;
void *cpu_addr;
uint64_t flags;
- unsigned int i;
int r;
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
@@ -226,7 +226,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
if (r)
return r;
@@ -253,16 +254,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
} else {
- dma_addr_t dma_address;
-
- dma_address = mm_cur->start;
- dma_address += adev->vm_manager.vram_base_offset;
+ u64 pa = mm_cur->start + adev->vm_manager.vram_base_offset;
- for (i = 0; i < num_pages; ++i) {
- amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
- flags, cpu_addr);
- dma_address += PAGE_SIZE;
- }
+ amdgpu_gart_map_vram_range(adev, pa, 0, num_pages, flags, cpu_addr);
}
dma_fence_put(amdgpu_job_submit(job));
@@ -284,12 +278,13 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
* move and different for a BO to BO copy.
*
*/
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f)
+__attribute__((nonnull))
+static int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
+ struct dma_resv *resv,
+ struct dma_fence **f)
{
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor src_mm, dst_mm;
@@ -363,9 +358,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
- if (f)
- *f = dma_fence_get(fence);
- dma_fence_put(fence);
+ *f = fence;
return r;
}
@@ -406,7 +399,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
- false);
+ false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
if (r) {
goto error;
} else if (wipe_fence) {
@@ -447,7 +440,8 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
return false;
if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
- res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
+ res->mem_type == AMDGPU_PL_MMIO_REMAP)
return true;
if (res->mem_type != TTM_PL_VRAM)
@@ -538,10 +532,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
old_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
new_mem->mem_type == AMDGPU_PL_GDS ||
new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA ||
- new_mem->mem_type == AMDGPU_PL_DOORBELL) {
+ new_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
/* Nothing to save here */
amdgpu_bo_move_notify(bo, evict, new_mem);
ttm_bo_move_null(bo, new_mem);
@@ -629,6 +625,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
mem->bus.is_iomem = true;
mem->bus.caching = ttm_uncached;
break;
+ case AMDGPU_PL_MMIO_REMAP:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->rmmio_remap.bus_addr;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
default:
return -EINVAL;
}
@@ -646,6 +648,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
+ else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
+ return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -693,10 +697,11 @@ struct amdgpu_ttm_tt {
* memory and start HMM tracking CPU page table update
*
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
- * once afterwards to stop HMM tracking
+ * once afterwards to stop HMM tracking. Its the caller responsibility to ensure
+ * that range is a valid memory and it is freed too.
*/
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
- struct hmm_range **range)
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct amdgpu_hmm_range *range)
{
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
@@ -706,9 +711,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
bool readonly;
int r = 0;
- /* Make sure get_user_pages_done() can cleanup gracefully */
- *range = NULL;
-
mm = bo->notifier.mm;
if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n");
@@ -732,7 +734,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
- readonly, NULL, pages, range);
+ readonly, NULL, range);
out_unlock:
mmap_read_unlock(mm);
if (r)
@@ -743,38 +745,6 @@ out_unlock:
return r;
}
-/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
- */
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
- if (gtt && gtt->userptr && range)
- amdgpu_hmm_range_get_pages_done(range);
-}
-
-/*
- * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
- * Check if the pages backing this ttm range have been invalidated
- *
- * Returns: true if pages are still valid
- */
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
-
- if (!gtt || !gtt->userptr || !range)
- return false;
-
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
- gtt->userptr, ttm->num_pages);
-
- WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
-
- return !amdgpu_hmm_range_get_pages_done(range);
-}
#endif
/*
@@ -784,12 +754,12 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range)
{
unsigned long i;
for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i] = pages ? pages[i] : NULL;
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_range.hmm_pfns[i]) : NULL;
}
/*
@@ -1355,10 +1325,11 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && (mem->mem_type == TTM_PL_TT ||
mem->mem_type == AMDGPU_PL_DOORBELL ||
- mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ mem->mem_type == AMDGPU_PL_PREEMPT ||
+ mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching == ttm_cached)
+ if (ttm && ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
@@ -1510,10 +1481,12 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
if (r)
goto out;
+ mutex_lock(&adev->mman.gtt_window_lock);
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
src_mm.start;
@@ -1528,6 +1501,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
WARN_ON(job->ibs[0].length_dw > num_dw);
fence = amdgpu_job_submit(job);
+ mutex_unlock(&adev->mman.gtt_window_lock);
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
r = -ETIMEDOUT;
@@ -1789,18 +1763,14 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
}
- if (!adev->gmc.is_app_apu) {
- ret = amdgpu_bo_create_kernel_at(
- adev, adev->gmc.real_vram_size - reserve_size,
- reserve_size, &adev->mman.fw_reserved_memory, NULL);
- if (ret) {
- dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
- amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
- NULL, NULL);
- return ret;
- }
- } else {
- DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
+ ret = amdgpu_bo_create_kernel_at(
+ adev, adev->gmc.real_vram_size - reserve_size, reserve_size,
+ &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
+ NULL);
+ return ret;
}
return 0;
@@ -1822,7 +1792,7 @@ static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
adev->gmc.mem_partitions[i].numa.node,
- false, false);
+ TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
}
return 0;
}
@@ -1841,6 +1811,59 @@ static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
adev->mman.ttm_pools = NULL;
}
+/**
+ * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
+ * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
+ * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
+ * GEM object (amdgpu_bo_create).
+ *
+ * Return:
+ * * 0 on success or intentional skip (feature not present/unsupported)
+ * * negative errno on allocation failure
+ */
+static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_bo_param bp;
+ int r;
+
+ /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
+ if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
+ return 0;
+
+ memset(&bp, 0, sizeof(bp));
+
+ /* Create exactly one GEM BO in the MMIO_REMAP domain. */
+ bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
+ bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
+ bp.flags = 0;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
+ * amdgpu_ttm_mmio_remap_bo_init().
+ */
+static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_unref(&adev->rmmio_remap.bo);
+ adev->rmmio_remap.bo = NULL;
+}
+
/*
* amdgpu_ttm_init - Init the memory management (ttm) as well as various
* gtt/vram related fields.
@@ -1862,8 +1885,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
- adev->need_swiotlb,
- dma_addressing_limited(adev->dev));
+ (adev->need_swiotlb ?
+ TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
+ (dma_addressing_limited(adev->dev) ?
+ TTM_ALLOCATION_POOL_USE_DMA32 : 0) |
+ TTM_ALLOCATION_POOL_BENEFICIAL_ORDER(get_order(SZ_2M)));
if (r) {
dev_err(adev->dev,
"failed initializing buffer object driver(%d).\n", r);
@@ -1877,11 +1903,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
adev->mman.initialized = true;
- /* Initialize VRAM pool with all of VRAM divided into pages */
- r = amdgpu_vram_mgr_init(adev);
- if (r) {
- dev_err(adev->dev, "Failed initializing VRAM heap.\n");
- return r;
+ if (!adev->gmc.is_app_apu) {
+ /* Initialize VRAM pool with all of VRAM divided into pages */
+ r = amdgpu_vram_mgr_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
+ return r;
+ }
}
/* Change the size here instead of the init above so only lpfn is affected */
@@ -1910,19 +1938,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
- *The reserved vram for driver must be pinned to the specified
- *place on the VRAM, so reserve it early.
+ * The reserved VRAM for the driver must be pinned to a specific
+ * location in VRAM, so reserve it early.
*/
r = amdgpu_ttm_drv_reserve_vram_init(adev);
if (r)
return r;
/*
- * only NAVI10 and onwards ASIC support for IP discovery.
- * If IP discovery enabled, a block of memory should be
- * reserved for IP discovey.
+ * only NAVI10 and later ASICs support IP discovery.
+ * If IP discovery is enabled, a block of memory should be
+ * reserved for it.
*/
- if (adev->mman.discovery_bin) {
+ if (adev->discovery.reserve_tmr) {
r = amdgpu_ttm_reserve_tmr(adev);
if (r)
return r;
@@ -2008,6 +2036,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
+ /* Initialize MMIO-remap pool (single page 4K) */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
+ return r;
+ }
+
+ /* Allocate the singleton MMIO_REMAP BO (4K) if supported */
+ r = amdgpu_ttm_mmio_remap_bo_init(adev);
+ if (r)
+ return r;
+
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev);
if (r) {
@@ -2070,6 +2110,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
}
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
+
+ amdgpu_ttm_mmio_remap_bo_fini(adev);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
@@ -2082,7 +2124,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
drm_dev_exit(idx);
}
- amdgpu_vram_mgr_fini(adev);
+ if (!adev->gmc.is_app_apu)
+ amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
amdgpu_doorbell_fini(adev);
@@ -2091,6 +2134,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
dev_info(adev->dev, "amdgpu: ttm finalized\n");
@@ -2143,8 +2187,10 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
} else {
drm_sched_entity_destroy(&adev->mman.high_pr);
drm_sched_entity_destroy(&adev->mman.low_pr);
- dma_fence_put(man->move);
- man->move = NULL;
+ /* Drop all the old fences since re-creating the scheduler entities
+ * will allocate new contexts.
+ */
+ ttm_resource_manager_cleanup(man);
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
@@ -2167,7 +2213,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
struct dma_resv *resv,
bool vm_needs_flush,
struct amdgpu_job **job,
- bool delayed)
+ bool delayed, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = direct_submit ?
AMDGPU_IB_POOL_DIRECT :
@@ -2177,7 +2223,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
&adev->mman.high_pr;
r = amdgpu_job_alloc_with_ib(adev, entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
- num_dw * 4, pool, job);
+ num_dw * 4, pool, job, k_job_id);
if (r)
return r;
@@ -2217,7 +2263,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
- resv, vm_needs_flush, &job, false);
+ resv, vm_needs_flush, &job, false,
+ AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
return r;
@@ -2252,7 +2299,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
uint64_t dst_addr, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence,
- bool vm_needs_flush, bool delayed)
+ bool vm_needs_flush, bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
@@ -2265,7 +2313,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
- &job, delayed);
+ &job, delayed, k_job_id);
if (r)
return r;
@@ -2335,7 +2383,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
goto err;
r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
- &next, true, true);
+ &next, true, true,
+ AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
goto err;
@@ -2354,7 +2403,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **f,
- bool delayed)
+ bool delayed,
+ u64 k_job_id)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
@@ -2384,7 +2434,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
goto error;
r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
- &next, true, delayed);
+ &next, true, delayed, k_job_id);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 2309df3f68a9..577ee04ce0bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -28,13 +28,15 @@
#include <drm/gpu_scheduler.h>
#include <drm/ttm/ttm_placement.h>
#include "amdgpu_vram_mgr.h"
+#include "amdgpu_hmm.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
-#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5)
+#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
@@ -81,9 +83,6 @@ struct amdgpu_mman {
uint64_t stolen_reserved_offset;
uint64_t stolen_reserved_size;
- /* discovery */
- uint8_t *discovery_bin;
- uint32_t discovery_tmr_size;
/* fw reserved memory */
struct amdgpu_bo *fw_reserved_memory;
struct amdgpu_bo *fw_reserved_memory_extend;
@@ -169,12 +168,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush, uint32_t copy_flags);
-int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- const struct amdgpu_copy_mem *src,
- const struct amdgpu_copy_mem *dst,
- uint64_t size, bool tmz,
- struct dma_resv *resv,
- struct dma_fence **f);
int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct dma_resv *resv,
struct dma_fence **fence);
@@ -182,38 +175,25 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
struct dma_resv *resv,
struct dma_fence **fence,
- bool delayed);
+ bool delayed,
+ u64 k_job_id);
int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
- struct hmm_range **range);
-void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range);
-bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range);
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct amdgpu_hmm_range *range);
#else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
- struct page **pages,
- struct hmm_range **range)
+ struct amdgpu_hmm_range *range)
{
return -EPERM;
}
-static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
-}
-static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
- struct hmm_range *range)
-{
- return false;
-}
#endif
-void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct amdgpu_hmm_range *range);
int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
uint64_t *user_addr);
int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index c92b8794aa73..3f0b0e9af4f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -24,6 +24,7 @@
#include <linux/sort.h>
#include "amdgpu.h"
#include "umc_v6_7.h"
+#include "amdgpu_ras_mgr.h"
#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
#define MAX_UMC_HASH_STRING_SIZE 256
@@ -96,67 +97,96 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control = &con->eeprom_control;
unsigned int error_query_mode;
int ret = 0;
unsigned long err_count;
amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
mutex_lock(&con->page_retirement_lock);
- ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
- if (ret == -EOPNOTSUPP &&
- error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
- if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
- adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
- adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
- adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
- else
- err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
+ if (!amdgpu_ras_smu_eeprom_supported(adev)) {
+ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
+ if (ret == -EOPNOTSUPP &&
+ error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev,
+ ras_error_status);
+
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len =
+ adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev,
+ ras_error_status);
+ }
+ } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
+ (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_count)
+ adev->umc.ras->ecc_info_query_ras_error_count(adev,
+ ras_error_status);
+
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if (!err_data->err_addr)
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error address record!\n");
+ else
+ err_data->err_addr_len =
+ adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ecc_info_query_ras_error_address(adev,
+ ras_error_status);
+ }
}
- } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
- (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
- if (adev->umc.ras &&
- adev->umc.ras->ecc_info_query_ras_error_count)
- adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
-
- if (adev->umc.ras &&
- adev->umc.ras->ecc_info_query_ras_error_address &&
- adev->umc.max_ras_err_cnt_per_query) {
- err_data->err_addr =
- kcalloc(adev->umc.max_ras_err_cnt_per_query,
- sizeof(struct eeprom_table_record), GFP_KERNEL);
-
- /* still call query_ras_error_address to clear error status
- * even NOMEM error is encountered
- */
- if(!err_data->err_addr)
- dev_warn(adev->dev, "Failed to alloc memory for "
- "umc error address record!\n");
- else
- err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
-
- /* umc query_ras_error_address is also responsible for clearing
- * error status
- */
- adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
+ } else {
+ if (!amdgpu_ras_eeprom_update_record_num(control)) {
+ err_data->err_addr_cnt = err_data->de_count =
+ control->ras_num_recs - control->ras_num_recs_old;
+ amdgpu_ras_eeprom_read_idx(control, err_data->err_addr,
+ control->ras_num_recs_old, err_data->de_count);
}
}
@@ -166,7 +196,7 @@ void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
if ((amdgpu_bad_page_threshold != 0) &&
err_data->err_addr_cnt) {
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
- err_data->err_addr_cnt, false);
+ err_data->err_addr_cnt, amdgpu_ras_smu_eeprom_supported(adev));
amdgpu_ras_save_bad_pages(adev, &err_count);
amdgpu_dpm_send_hbm_bad_pages_num(adev,
@@ -244,6 +274,15 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
}
amdgpu_ras_error_data_fini(&err_data);
+ } else if (amdgpu_uniras_enabled(adev)) {
+ struct ras_ih_info ih_info = {0};
+
+ ih_info.block = block;
+ ih_info.pasid = pasid;
+ ih_info.reset = reset;
+ ih_info.pasid_fn = pasid_fn;
+ ih_info.data = data;
+ amdgpu_ras_mgr_handle_consumer_interrupt(adev, &ih_info);
} else {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int ret;
@@ -252,6 +291,7 @@ int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
block, pasid, pasid_fn, data, reset);
if (!ret) {
atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_consumption_count);
wake_up(&con->page_retirement_wq);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index ec203f9e5ffa..28dff750c47e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -113,6 +113,8 @@ struct amdgpu_umc_ras {
uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
uint64_t mca_addr, uint64_t retired_page);
void (*get_retire_flip_bits)(struct amdgpu_device *adev);
+ void (*mca_ipid_parse)(struct amdgpu_device *adev, uint64_t ipid,
+ uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid);
};
struct amdgpu_umc_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 8190c24a649a..9a969175900e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -25,10 +25,13 @@
#include <drm/drm_auth.h>
#include <drm/drm_exec.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
+#include "amdgpu_reset.h"
#include "amdgpu_vm.h"
#include "amdgpu_userq.h"
+#include "amdgpu_hmm.h"
#include "amdgpu_userq_fence.h"
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
@@ -44,22 +47,301 @@ u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
return userq_ip_mask;
}
+static bool amdgpu_userq_is_reset_type_supported(struct amdgpu_device *adev,
+ enum amdgpu_ring_type ring_type, int reset_type)
+{
+
+ if (ring_type < 0 || ring_type >= AMDGPU_RING_TYPE_MAX)
+ return false;
+
+ switch (ring_type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+static void amdgpu_userq_gpu_reset(struct amdgpu_device *adev)
+{
+ if (amdgpu_device_should_recover_gpu(adev)) {
+ amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->userq_reset_work);
+ /* Wait for the reset job to complete */
+ flush_work(&adev->userq_reset_work);
+ }
+}
+
static int
-amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+amdgpu_userq_detect_and_reset_queues(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const int queue_types[] = {
+ AMDGPU_RING_TYPE_COMPUTE,
+ AMDGPU_RING_TYPE_GFX,
+ AMDGPU_RING_TYPE_SDMA
+ };
+ const int num_queue_types = ARRAY_SIZE(queue_types);
+ bool gpu_reset = false;
+ int r = 0;
+ int i;
+
+ /* Warning if current process mutex is not held */
+ WARN_ON(!mutex_is_locked(&uq_mgr->userq_mutex));
+
+ if (unlikely(adev->debug_disable_gpu_ring_reset)) {
+ dev_err(adev->dev, "userq reset disabled by debug mask\n");
+ return 0;
+ }
+
+ /*
+ * If GPU recovery feature is disabled system-wide,
+ * skip all reset detection logic
+ */
+ if (!amdgpu_gpu_recovery)
+ return 0;
+
+ /*
+ * Iterate through all queue types to detect and reset problematic queues
+ * Process each queue type in the defined order
+ */
+ for (i = 0; i < num_queue_types; i++) {
+ int ring_type = queue_types[i];
+ const struct amdgpu_userq_funcs *funcs = adev->userq_funcs[ring_type];
+
+ if (!amdgpu_userq_is_reset_type_supported(adev, ring_type, AMDGPU_RESET_TYPE_PER_QUEUE))
+ continue;
+
+ if (atomic_read(&uq_mgr->userq_count[ring_type]) > 0 &&
+ funcs && funcs->detect_and_reset) {
+ r = funcs->detect_and_reset(adev, ring_type);
+ if (r) {
+ gpu_reset = true;
+ break;
+ }
+ }
+ }
+
+ if (gpu_reset)
+ amdgpu_userq_gpu_reset(adev);
+
+ return r;
+}
+
+static int amdgpu_userq_buffer_va_list_add(struct amdgpu_usermode_queue *queue,
+ struct amdgpu_bo_va_mapping *va_map, u64 addr)
+{
+ struct amdgpu_userq_va_cursor *va_cursor;
+ struct userq_va_list;
+
+ va_cursor = kzalloc(sizeof(*va_cursor), GFP_KERNEL);
+ if (!va_cursor)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&va_cursor->list);
+ va_cursor->gpu_addr = addr;
+ atomic_set(&va_map->bo_va->userq_va_mapped, 1);
+ list_add(&va_cursor->list, &queue->userq_va_list);
+
+ return 0;
+}
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *va_map;
+ struct amdgpu_vm *vm = queue->vm;
+ u64 user_addr;
+ u64 size;
+ int r = 0;
+
+ user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!va_map) {
+ r = -EINVAL;
+ goto out_err;
+ }
+ /* Only validate the userq whether resident in the VM mapping range */
+ if (user_addr >= va_map->start &&
+ va_map->last - user_addr + 1 >= size) {
+ amdgpu_userq_buffer_va_list_add(queue, va_map, user_addr);
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+ }
+
+ r = -EINVAL;
+out_err:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return r;
+}
+
+static bool amdgpu_userq_buffer_va_mapped(struct amdgpu_vm *vm, u64 addr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ bool r;
+
+ if (amdgpu_bo_reserve(vm->root.bo, false))
+ return false;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!IS_ERR_OR_NULL(mapping) && atomic_read(&mapping->bo_va->userq_va_mapped))
+ r = true;
+ else
+ r = false;
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ return r;
+}
+
+static bool amdgpu_userq_buffer_vas_mapped(struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ int r = 0;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ r += amdgpu_userq_buffer_va_mapped(queue->vm, va_cursor->gpu_addr);
+ dev_dbg(queue->userq_mgr->adev->dev,
+ "validate the userq mapping:%p va:%llx r:%d\n",
+ queue, va_cursor->gpu_addr, r);
+ }
+
+ if (r != 0)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_buffer_va_list_del(struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_userq_va_cursor *va_cursor)
+{
+ atomic_set(&mapping->bo_va->userq_va_mapped, 0);
+ list_del(&va_cursor->list);
+ kfree(va_cursor);
+}
+
+static int amdgpu_userq_buffer_vas_list_cleanup(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_userq_va_cursor *va_cursor, *tmp;
+ struct amdgpu_bo_va_mapping *mapping;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ list_for_each_entry_safe(va_cursor, tmp, &queue->userq_va_list, list) {
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, va_cursor->gpu_addr);
+ if (!mapping) {
+ r = -EINVAL;
+ goto err;
+ }
+ dev_dbg(adev->dev, "delete the userq:%p va:%llx\n",
+ queue, va_cursor->gpu_addr);
+ amdgpu_userq_buffer_va_list_del(mapping, va_cursor);
+ }
+err:
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ return r;
+}
+
+static int
+amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *userq_funcs =
adev->userq_funcs[queue->queue_type];
+ bool found_hung_queue = false;
int r = 0;
if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->preempt(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
+ }
+ }
+
+ if (found_hung_queue)
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
+ r = userq_funcs->restore(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ bool found_hung_queue = false;
+ int r = 0;
+
+ if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
+ (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
r = userq_funcs->unmap(uq_mgr, queue);
- if (r)
+ if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
- else
+ found_hung_queue = true;
+ } else {
queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
}
+
+ if (found_hung_queue)
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
+
return r;
}
@@ -76,26 +358,33 @@ amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
r = userq_funcs->map(uq_mgr, queue);
if (r) {
queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
} else {
queue->state = AMDGPU_USERQ_STATE_MAPPED;
}
}
+
return r;
}
-static void
+static int
amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue)
{
struct dma_fence *f = queue->last_fence;
- int ret;
+ int ret = 0;
if (f && !dma_fence_is_signaled(f)) {
- ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
- if (ret <= 0)
+ ret = dma_fence_wait_timeout(f, true, MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0) {
drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
f->context, f->seqno);
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ return -ETIME;
+ }
}
+
+ return ret;
}
static void
@@ -106,32 +395,27 @@ amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+
+ /* Drop the userq reference. */
+ amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
amdgpu_userq_fence_driver_free(queue);
- idr_remove(&uq_mgr->userq_idr, queue_id);
+ /* Use interrupt-safe locking since IRQ handlers may access these XArrays */
+ xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
+ xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
+ queue->userq_mgr = NULL;
+ list_del(&queue->userq_va_list);
kfree(queue);
-}
-int
-amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr)
-{
- struct amdgpu_usermode_queue *queue;
- int queue_id;
- int ret = 0;
-
- mutex_lock(&uq_mgr->userq_mutex);
- /* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id)
- ret += queue->state == AMDGPU_USERQ_STATE_MAPPED;
-
- mutex_unlock(&uq_mgr->userq_mutex);
- return ret;
+ up_read(&adev->reset_domain->sem);
}
static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
{
- return idr_find(&uq_mgr->userq_idr, qid);
+ return xa_load(&uq_mgr->userq_mgr_xa, qid);
}
void
@@ -259,17 +543,6 @@ amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
case AMDGPU_HW_IP_DMA:
db_size = sizeof(u64);
break;
-
- case AMDGPU_HW_IP_VCN_ENC:
- db_size = sizeof(u32);
- db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
- break;
-
- case AMDGPU_HW_IP_VPE:
- db_size = sizeof(u32);
- db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
- break;
-
default:
drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
db_info->queue_type);
@@ -318,15 +591,20 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
amdgpu_bo_unreserve(queue->db_obj.obj);
}
amdgpu_bo_unref(&queue->db_obj.obj);
-
+ atomic_dec(&uq_mgr->userq_count[queue->queue_type]);
#if defined(CONFIG_DEBUG_FS)
debugfs_remove_recursive(queue->debugfs_queue);
#endif
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ /*TODO: It requires a reset for userq hw unmap error*/
+ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
+ drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ }
amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
mutex_unlock(&uq_mgr->userq_mutex);
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
@@ -364,7 +642,7 @@ static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
return -EINVAL;
}
- seq_printf(m, "queue_type %d\n", queue->queue_type);
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
amdgpu_bo_unreserve(bo);
@@ -398,33 +676,17 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
struct amdgpu_db_info db_info;
char *queue_name;
bool skip_map_queue;
+ u32 qid;
uint64_t index;
- int qid, r = 0;
+ int r = 0;
int priority =
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
- /* Usermode queues are only supported for GFX IP as of now */
- if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
- args->in.ip_type != AMDGPU_HW_IP_DMA &&
- args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
- drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n",
- args->in.ip_type);
- return -EINVAL;
- }
-
r = amdgpu_userq_priority_permit(filp, priority);
if (r)
return r;
- if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
- (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
- (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
- !amdgpu_is_tmz(adev)) {
- drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n");
- return -EINVAL;
- }
-
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
@@ -439,7 +701,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
*
* This will also make sure we have a valid eviction fence ready to be used.
*/
- mutex_lock(&adev->userq_mutex);
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
uq_funcs = adev->userq_funcs[args->in.ip_type];
@@ -456,6 +717,8 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = -ENOMEM;
goto unlock;
}
+
+ INIT_LIST_HEAD(&queue->userq_va_list);
queue->doorbell_handle = args->in.doorbell_handle;
queue->queue_type = args->in.ip_type;
queue->vm = &fpriv->vm;
@@ -466,6 +729,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
db_info.db_obj = &queue->db_obj;
db_info.doorbell_offset = args->in.doorbell_offset;
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(queue, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
+
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
@@ -491,16 +763,27 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
goto unlock;
}
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+ r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
+ if (r) {
+ kfree(queue);
+ up_read(&adev->reset_domain->sem);
+ goto unlock;
+ }
- qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
- if (qid < 0) {
+ r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+ if (r) {
drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
r = -ENOMEM;
+ up_read(&adev->reset_domain->sem);
goto unlock;
}
+ up_read(&adev->reset_domain->sem);
+ queue->userq_mgr = uq_mgr;
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
@@ -513,7 +796,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(uq_mgr, queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
- idr_remove(&uq_mgr->userq_idr, qid);
+ xa_erase(&uq_mgr->userq_mgr_xa, qid);
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
@@ -535,30 +818,53 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
kfree(queue_name);
args->out.queue_id = qid;
+ atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
return r;
}
-int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp)
+static int amdgpu_userq_input_args_validate(struct drm_device *dev,
+ union drm_amdgpu_userq *args,
+ struct drm_file *filp)
{
- union drm_amdgpu_userq *args = data;
- int r;
+ struct amdgpu_device *adev = drm_to_adev(dev);
switch (args->in.op) {
case AMDGPU_USERQ_OP_CREATE:
if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
return -EINVAL;
- r = amdgpu_userq_create(filp, args);
- if (r)
- drm_file_err(filp, "Failed to create usermode queue\n");
- break;
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+ if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
+ args->in.queue_va == 0 ||
+ args->in.queue_size == 0) {
+ drm_file_err(filp, "invalidate userq queue va or size\n");
+ return -EINVAL;
+ }
+ if (!args->in.wptr_va || !args->in.rptr_va) {
+ drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
+ return -EINVAL;
+ }
+ break;
case AMDGPU_USERQ_OP_FREE:
if (args->in.ip_type ||
args->in.doorbell_handle ||
@@ -568,10 +874,34 @@ int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
args->in.queue_size ||
args->in.rptr_va ||
args->in.wptr_va ||
- args->in.wptr_va ||
args->in.mqd ||
args->in.mqd_size)
return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
r = amdgpu_userq_destroy(filp, args->in.queue_id);
if (r)
drm_file_err(filp, "Failed to destroy usermode queue\n");
@@ -589,12 +919,20 @@ static int
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
/* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
- r = amdgpu_userq_map_helper(uq_mgr, queue);
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+
+ if (!amdgpu_userq_buffer_vas_mapped(queue)) {
+ drm_file_err(uq_mgr->file,
+ "trying restore queue without va mapping\n");
+ queue->state = AMDGPU_USERQ_STATE_INVALID_VA;
+ continue;
+ }
+
+ r = amdgpu_userq_restore_helper(uq_mgr, queue);
if (r)
ret = r;
}
@@ -604,108 +942,179 @@ amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
return ret;
}
+static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+/* Handle all BOs on the invalidated list, validate them and update the PTs */
static int
-amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
+ struct amdgpu_vm *vm)
{
struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
int ret;
- amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (ret)
- DRM_ERROR("Fail to validate\n");
+ bo = bo_va->base.bo;
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
+ if (unlikely(ret))
+ return ret;
- return ret;
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ return ret;
+
+ /* This moves the bo_va to the done list */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
}
+/* Make sure the whole VM is ready to be used */
static int
-amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
+amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
- struct amdgpu_vm *vm = &fpriv->vm;
+ bool invalidated = false, new_addition = false;
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_hmm_range *range;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ unsigned long key, tmp_key;
struct amdgpu_bo_va *bo_va;
- struct ww_acquire_ctx *ticket;
- struct drm_exec exec;
struct amdgpu_bo *bo;
- struct dma_resv *resv;
- bool clear, unlock;
- int ret = 0;
+ struct drm_exec exec;
+ struct xarray xa;
+ int ret;
+
+ xa_init(&xa);
+retry_lock:
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
- ret = amdgpu_vm_lock_pd(vm, &exec, 2);
+ ret = amdgpu_vm_lock_pd(vm, &exec, 1);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret)) {
- drm_file_err(uq_mgr->file, "Failed to lock PD\n");
+ if (unlikely(ret))
goto unlock_all;
- }
- /* Lock the done list */
- list_for_each_entry(bo_va, &vm->done, base.vm_status) {
- bo = bo_va->base.bo;
- if (!bo)
- continue;
+ ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
- ret = drm_exec_lock_obj(&exec, &bo->tbo.base);
- drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
- goto unlock_all;
- }
+ /* This validates PDs, PTs and per VM BOs */
+ ret = amdgpu_vm_validate(adev, vm, NULL,
+ amdgpu_userq_validate_vm,
+ NULL);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ /* This locks and validates the remaining evicted BOs */
+ ret = amdgpu_userq_bo_validate(adev, &exec, vm);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
}
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->moved)) {
- bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
- base.vm_status);
- spin_unlock(&vm->status_lock);
+ if (invalidated) {
+ xa_for_each(&xa, tmp_key, range) {
+ bo = range->bo;
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
- /* Per VM BOs never need to bo cleared in the page tables */
- ret = amdgpu_vm_bo_update(adev, bo_va, false);
- if (ret)
- goto unlock_all;
- spin_lock(&vm->status_lock);
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unlock_all;
+ }
+ invalidated = false;
}
- ticket = &exec.ticket;
- while (!list_empty(&vm->invalidated)) {
- bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
- base.vm_status);
- resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->status_lock);
+ ret = amdgpu_vm_handle_moved(adev, vm, NULL);
+ if (ret)
+ goto unlock_all;
+ key = 0;
+ /* Validate User Ptr BOs */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status) {
bo = bo_va->base.bo;
- ret = amdgpu_userq_validate_vm_bo(NULL, bo);
- if (ret) {
- drm_file_err(uq_mgr->file, "Failed to validate BO\n");
- goto unlock_all;
- }
+ if (!bo)
+ continue;
- /* Try to reserve the BO to avoid clearing its ptes */
- if (!adev->debug_vm && dma_resv_trylock(resv)) {
- clear = false;
- unlock = true;
- /* The caller is already holding the reservation lock */
- } else if (dma_resv_locking_ctx(resv) == ticket) {
- clear = false;
- unlock = false;
- /* Somebody else is using the BO right now */
- } else {
- clear = true;
- unlock = false;
+ if (!amdgpu_ttm_tt_is_userptr(bo->tbo.ttm))
+ continue;
+
+ range = xa_load(&xa, key);
+ if (range && range->bo != bo) {
+ xa_erase(&xa, key);
+ amdgpu_hmm_range_free(range);
+ range = NULL;
}
- ret = amdgpu_vm_bo_update(adev, bo_va, clear);
+ if (!range) {
+ range = amdgpu_hmm_range_alloc(bo);
+ if (!range) {
+ ret = -ENOMEM;
+ goto unlock_all;
+ }
- if (unlock)
- dma_resv_unlock(resv);
- if (ret)
- goto unlock_all;
+ xa_store(&xa, key, range, GFP_KERNEL);
+ new_addition = true;
+ }
+ key++;
+ }
- spin_lock(&vm->status_lock);
+ if (new_addition) {
+ drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ ret = amdgpu_ttm_tt_get_user_pages(bo, range);
+ if (ret)
+ goto unlock_all;
+ }
+
+ invalidated = true;
+ new_addition = false;
+ goto retry_lock;
}
- spin_unlock(&vm->status_lock);
+
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
+ if (ret)
+ goto unlock_all;
+
+ /*
+ * We need to wait for all VM updates to finish before restarting the
+ * queues. Using the done list like that is now ok since everything is
+ * locked in place.
+ */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status)
+ dma_fence_wait(bo_va->last_pt_update, false);
+ dma_fence_wait(vm->last_update, false);
ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
if (ret)
@@ -713,6 +1122,13 @@ amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr)
unlock_all:
drm_exec_fini(&exec);
+ xa_for_each(&xa, tmp_key, range) {
+ if (!range)
+ continue;
+ bo = range->bo;
+ amdgpu_hmm_range_free(range);
+ }
+ xa_destroy(&xa);
return ret;
}
@@ -726,7 +1142,7 @@ static void amdgpu_userq_restore_worker(struct work_struct *work)
mutex_lock(&uq_mgr->userq_mutex);
- ret = amdgpu_userq_validate_bos(uq_mgr);
+ ret = amdgpu_userq_vm_validate(uq_mgr);
if (ret) {
drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
goto unlock;
@@ -746,12 +1162,13 @@ static int
amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
+ amdgpu_userq_detect_and_reset_queues(uq_mgr);
/* Try to unmap all the queues in this process ctx */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
- r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
+ r = amdgpu_userq_preempt_helper(uq_mgr, queue);
if (r)
ret = r;
}
@@ -761,13 +1178,31 @@ amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
return ret;
}
+void amdgpu_userq_reset_work(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ userq_reset_work);
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_USERQ;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /*set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);*/
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+}
+
static int
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id, ret;
+ unsigned long queue_id;
+ int ret;
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
struct dma_fence *f = queue->last_fence;
if (!f || dma_fence_is_signaled(f))
@@ -787,22 +1222,19 @@ void
amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence *ev_fence)
{
- int ret;
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ int ret;
/* Wait for any pending userqueue fence work to finish */
ret = amdgpu_userq_wait_for_signal(uq_mgr);
- if (ret) {
- drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
- return;
- }
+ if (ret)
+ dev_err(adev->dev, "Not evicting userqueue, timeout waiting for work\n");
ret = amdgpu_userq_evict_all(uq_mgr);
- if (ret) {
- drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
- return;
- }
+ if (ret)
+ dev_err(adev->dev, "Failed to evict userqueue\n");
/* Signal current eviction fence */
amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
@@ -820,44 +1252,31 @@ int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *f
struct amdgpu_device *adev)
{
mutex_init(&userq_mgr->userq_mutex);
- idr_init_base(&userq_mgr->userq_idr, 1);
+ xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
userq_mgr->adev = adev;
userq_mgr->file = file_priv;
- mutex_lock(&adev->userq_mutex);
- list_add(&userq_mgr->list, &adev->userq_mgr_list);
- mutex_unlock(&adev->userq_mutex);
-
INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
return 0;
}
void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
{
- struct amdgpu_device *adev = userq_mgr->adev;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- uint32_t queue_id;
+ unsigned long queue_id;
cancel_delayed_work_sync(&userq_mgr->resume_work);
- mutex_lock(&adev->userq_mutex);
mutex_lock(&userq_mgr->userq_mutex);
- idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_detect_and_reset_queues(userq_mgr);
+ xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
amdgpu_userq_unmap_helper(userq_mgr, queue);
amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
}
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- if (uqm == userq_mgr) {
- list_del(&uqm->list);
- break;
- }
- }
- idr_destroy(&userq_mgr->userq_idr);
+ xa_destroy(&userq_mgr->userq_mgr_xa);
mutex_unlock(&userq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
mutex_destroy(&userq_mgr->userq_mutex);
}
@@ -865,51 +1284,51 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0, r;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
if (!ip_mask)
return 0;
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
- mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ guard(mutex)(&uqm->userq_mutex);
+ amdgpu_userq_detect_and_reset_queues(uqm);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
r = amdgpu_userq_unmap_helper(uqm, queue);
- if (r)
- ret = r;
- }
- mutex_unlock(&uqm->userq_mutex);
+ if (r)
+ return r;
}
- mutex_unlock(&adev->userq_mutex);
- return ret;
+ return 0;
}
int amdgpu_userq_resume(struct amdgpu_device *adev)
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
- int ret = 0, r;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+ int r;
if (!ip_mask)
return 0;
- mutex_lock(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ guard(mutex)(&uqm->userq_mutex);
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
r = amdgpu_userq_map_helper(uqm, queue);
- if (r)
- ret = r;
- }
- mutex_unlock(&uqm->userq_mutex);
+ if (r)
+ return r;
}
- mutex_unlock(&adev->userq_mutex);
- return ret;
+
+ return 0;
}
int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
@@ -917,33 +1336,32 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already stopped!\n");
adev->userq_halt_for_enforce_isolation = true;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
- (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
- (queue->xcp_id == idx)) {
- r = amdgpu_userq_unmap_helper(uqm, queue);
- if (r)
- ret = r;
- }
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ amdgpu_userq_detect_and_reset_queues(uqm);
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ if (r)
+ ret = r;
}
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
@@ -952,31 +1370,113 @@ int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (!adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already started!\n");
adev->userq_halt_for_enforce_isolation = false;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
- r = amdgpu_userq_map_helper(uqm, queue);
+ r = amdgpu_userq_restore_helper(uqm, queue);
if (r)
ret = r;
}
- }
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
+
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_bo_va *bo_va = mapping->bo_va;
+ struct dma_resv *resv = bo_va->base.bo->tbo.base.resv;
+ int ret = 0;
+
+ if (!ip_mask)
+ return 0;
+
+ dev_warn_once(adev->dev, "now unmapping a vital queue va:%llx\n", saddr);
+ /**
+ * The userq VA mapping reservation should include the eviction fence,
+ * if the eviction fence can't signal successfully during unmapping,
+ * then driver will warn to flag this improper unmap of the userq VA.
+ * Note: The eviction fence may be attached to different BOs, and this
+ * unmap is only for one kind of userq VAs, so at this point suppose
+ * the eviction fence is always unsignaled.
+ */
+ if (!dma_resv_test_signaled(resv, DMA_RESV_USAGE_BOOKKEEP)) {
+ ret = dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret <= 0)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void amdgpu_userq_pre_reset(struct amdgpu_device *adev)
+{
+ const struct amdgpu_userq_funcs *userq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ cancel_delayed_work_sync(&uqm->resume_work);
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ amdgpu_userq_wait_for_last_fence(uqm, queue);
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ userq_funcs->unmap(uqm, queue);
+ /* just mark all queues as hung at this point.
+ * if unmap succeeds, we could map again
+ * in amdgpu_userq_post_reset() if vram is not lost
+ */
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ amdgpu_userq_fence_driver_force_completion(queue);
+ }
+ }
+}
+
+int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost)
+{
+ /* if any queue state is AMDGPU_USERQ_STATE_UNMAPPED
+ * at this point, we should be able to map it again
+ * and continue if vram is not lost.
+ */
+ struct amdgpu_userq_mgr *uqm;
+ struct amdgpu_usermode_queue *queue;
+ const struct amdgpu_userq_funcs *userq_funcs;
+ unsigned long queue_id;
+ int r = 0;
+
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG && !vram_lost) {
+ userq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Re-map queue */
+ r = userq_funcs->map(uqm, queue);
+ if (r) {
+ dev_err(adev->dev, "Failed to remap queue %ld\n", queue_id);
+ continue;
+ }
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
index b1ca91b7cda4..c37444427a14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -37,6 +37,7 @@ enum amdgpu_userq_state {
AMDGPU_USERQ_STATE_MAPPED,
AMDGPU_USERQ_STATE_PREEMPTED,
AMDGPU_USERQ_STATE_HUNG,
+ AMDGPU_USERQ_STATE_INVALID_VA,
};
struct amdgpu_mqd_prop;
@@ -47,6 +48,11 @@ struct amdgpu_userq_obj {
struct amdgpu_bo *obj;
};
+struct amdgpu_userq_va_cursor {
+ u64 gpu_addr;
+ struct list_head list;
+};
+
struct amdgpu_usermode_queue {
int queue_type;
enum amdgpu_userq_state state;
@@ -66,6 +72,8 @@ struct amdgpu_usermode_queue {
u32 xcp_id;
int priority;
struct dentry *debugfs_queue;
+
+ struct list_head userq_va_list;
};
struct amdgpu_userq_funcs {
@@ -78,16 +86,27 @@ struct amdgpu_userq_funcs {
struct amdgpu_usermode_queue *queue);
int (*map)(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_usermode_queue *queue);
+ int (*preempt)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*restore)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*detect_and_reset)(struct amdgpu_device *adev,
+ int queue_type);
};
/* Usermode queues for gfx */
struct amdgpu_userq_mgr {
- struct idr userq_idr;
+ /**
+ * @userq_mgr_xa: Per-process user queue map (queue ID → queue)
+ * Key: queue_id (unique ID within the process's userq manager)
+ * Value: struct amdgpu_usermode_queue
+ */
+ struct xarray userq_mgr_xa;
struct mutex userq_mutex;
struct amdgpu_device *adev;
struct delayed_work resume_work;
- struct list_head list;
struct drm_file *file;
+ atomic_t userq_count[AMDGPU_RING_TYPE_MAX];
};
struct amdgpu_db_info {
@@ -114,8 +133,6 @@ void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence *ev_fence);
-int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr);
-
void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr);
@@ -132,5 +149,13 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx);
-
+void amdgpu_userq_reset_work(struct work_struct *work);
+void amdgpu_userq_pre_reset(struct amdgpu_device *adev);
+int amdgpu_userq_post_reset(struct amdgpu_device *adev, bool vram_lost);
+
+int amdgpu_userq_input_va_validate(struct amdgpu_usermode_queue *queue,
+ u64 addr, u64 expected_size);
+int amdgpu_userq_gem_va_unmap_validate(struct amdgpu_device *adev,
+ struct amdgpu_bo_va_mapping *mapping,
+ uint64_t saddr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index c2a983ff23c9..eba9fb359047 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -67,6 +67,14 @@ static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
return le64_to_cpu(*fence_drv->cpu_addr);
}
+static void
+amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
+ u64 seq)
+{
+ if (fence_drv->cpu_addr)
+ *fence_drv->cpu_addr = cpu_to_le64(seq);
+}
+
int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq)
{
@@ -143,15 +151,16 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
{
struct amdgpu_userq_fence *userq_fence, *tmp;
struct dma_fence *fence;
+ unsigned long flags;
u64 rptr;
int i;
if (!fence_drv)
return;
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
rptr = amdgpu_userq_fence_read(fence_drv);
- spin_lock(&fence_drv->fence_list_lock);
list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
fence = &userq_fence->base;
@@ -166,7 +175,7 @@ void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_d
list_del(&userq_fence->link);
dma_fence_put(fence);
}
- spin_unlock(&fence_drv->fence_list_lock);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
}
void amdgpu_userq_fence_driver_destroy(struct kref *ref)
@@ -276,7 +285,7 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
/* Check if hardware has already processed the job */
spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
- if (!dma_fence_is_signaled_locked(fence))
+ if (!dma_fence_is_signaled(fence))
list_add_tail(&userq_fence->link, &fence_drv->fences);
else
dma_fence_put(fence);
@@ -378,6 +387,7 @@ static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
amdgpu_bo_unreserve(queue->vm->root.bo);
r = amdgpu_bo_reserve(bo, true);
if (r) {
+ amdgpu_bo_unref(&bo);
DRM_ERROR("Failed to reserve userqueue wptr bo");
return r;
}
@@ -408,6 +418,40 @@ static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
dma_fence_put(fence);
}
+static void
+amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
+ int error)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ unsigned long flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+
+ f = rcu_dereference_protected(&fence->base,
+ lockdep_is_held(&fence_drv->fence_list_lock));
+ if (f && !dma_fence_is_signaled_locked(f))
+ dma_fence_set_error(f, error);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void
+amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
+{
+ struct dma_fence *f = userq->last_fence;
+
+ if (f) {
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 wptr = fence->base.seqno;
+
+ amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
+ amdgpu_userq_fence_write(fence_drv, wptr);
+ amdgpu_userq_fence_driver_process(fence_drv);
+
+ }
+}
+
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -495,7 +539,7 @@ int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
}
/* Retrieve the user queue */
- queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ queue = xa_load(&userq_mgr->userq_mgr_xa, args->queue_id);
if (!queue) {
r = -ENOENT;
goto put_gobj_write;
@@ -857,7 +901,7 @@ int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
*/
num_fences = dma_fence_dedup_array(fences, num_fences);
- waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ waitq = xa_load(&userq_mgr->userq_mgr_xa, wait_info->waitq_id);
if (!waitq) {
r = -EINVAL;
goto free_fences;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
index 97a125ab8a78..d76add2afc77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -67,6 +67,7 @@ int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq);
void amdgpu_userq_fence_driver_destroy(struct kref *ref);
int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
new file mode 100644
index 000000000000..1e40ca3b1584
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_UTILS_H_
+#define AMDGPU_UTILS_H_
+
+/* ---------- Generic 2‑bit capability attribute encoding ----------
+ * 00 INVALID, 01 RO, 10 WO, 11 RW
+ */
+enum amdgpu_cap_attr {
+ AMDGPU_CAP_ATTR_INVALID = 0,
+ AMDGPU_CAP_ATTR_RO = 1 << 0,
+ AMDGPU_CAP_ATTR_WO = 1 << 1,
+ AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO),
+};
+
+#define AMDGPU_CAP_ATTR_BITS 2
+#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1)
+
+/* Internal helper to build helpers for a given enum NAME */
+#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \
+enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \
+struct NAME##_caps { \
+ DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \
+}; \
+static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \
+{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \
+static inline void NAME##_attr_init(struct NAME##_caps *c) \
+{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \
+static inline int NAME##_attr_set(struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \
+{ \
+ if (!c) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \
+ return -EINVAL; \
+ bitmap_write(c->bmap, (unsigned long)attr, \
+ NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ return 0; \
+} \
+static inline int NAME##_attr_get(const struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \
+{ \
+ unsigned long v; \
+ if (!c || !out) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ *out = (enum amdgpu_cap_attr)v; \
+ return 0; \
+} \
+static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \
+static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \
+static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; }
+
+/* Element expander for enum creation */
+#define _CAP_ENUM_ELEM(x) x,
+
+/* Public macro: declare enum + helpers from an X‑macro list */
+#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \
+ enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \
+ DECLARE_ATTR_CAP_CLASS_HELPERS(NAME)
+
+#endif /* AMDGPU_UTILS_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 74758b5ffc6c..5c38f0d30c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
64, direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b9060bcd4806..a7d8f1ce6ac2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -41,6 +41,9 @@
#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
/* Firmware Names */
+#ifdef CONFIG_DRM_AMDGPU_SI
+#define FIRMWARE_VCE_V1_0 "amdgpu/vce_1_0_0.bin"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
@@ -61,6 +64,9 @@
#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
+#ifdef CONFIG_DRM_AMDGPU_SI
+MODULE_FIRMWARE(FIRMWARE_VCE_V1_0);
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
MODULE_FIRMWARE(FIRMWARE_KABINI);
@@ -88,82 +94,93 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
/**
- * amdgpu_vce_sw_init - allocate memory, load vce firmware
+ * amdgpu_vce_firmware_name() - determine the firmware file name for VCE
*
* @adev: amdgpu_device pointer
- * @size: size for the new BO
*
- * First step to get VCE online, allocate memory and load the firmware
+ * Each chip that has VCE IP may need a different firmware.
+ * This function returns the name of the VCE firmware file
+ * appropriate for the current chip.
*/
-int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+static const char *amdgpu_vce_firmware_name(struct amdgpu_device *adev)
{
- const char *fw_name;
- const struct common_firmware_header *hdr;
- unsigned int ucode_version, version_major, version_minor, binary_id;
- int i, r;
-
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_PITCAIRN:
+ case CHIP_TAHITI:
+ case CHIP_VERDE:
+ return FIRMWARE_VCE_V1_0;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
- fw_name = FIRMWARE_BONAIRE;
- break;
+ return FIRMWARE_BONAIRE;
case CHIP_KAVERI:
- fw_name = FIRMWARE_KAVERI;
- break;
+ return FIRMWARE_KAVERI;
case CHIP_KABINI:
- fw_name = FIRMWARE_KABINI;
- break;
+ return FIRMWARE_KABINI;
case CHIP_HAWAII:
- fw_name = FIRMWARE_HAWAII;
- break;
+ return FIRMWARE_HAWAII;
case CHIP_MULLINS:
- fw_name = FIRMWARE_MULLINS;
- break;
+ return FIRMWARE_MULLINS;
#endif
case CHIP_TONGA:
- fw_name = FIRMWARE_TONGA;
- break;
+ return FIRMWARE_TONGA;
case CHIP_CARRIZO:
- fw_name = FIRMWARE_CARRIZO;
- break;
+ return FIRMWARE_CARRIZO;
case CHIP_FIJI:
- fw_name = FIRMWARE_FIJI;
- break;
+ return FIRMWARE_FIJI;
case CHIP_STONEY:
- fw_name = FIRMWARE_STONEY;
- break;
+ return FIRMWARE_STONEY;
case CHIP_POLARIS10:
- fw_name = FIRMWARE_POLARIS10;
- break;
+ return FIRMWARE_POLARIS10;
case CHIP_POLARIS11:
- fw_name = FIRMWARE_POLARIS11;
- break;
+ return FIRMWARE_POLARIS11;
case CHIP_POLARIS12:
- fw_name = FIRMWARE_POLARIS12;
- break;
+ return FIRMWARE_POLARIS12;
case CHIP_VEGAM:
- fw_name = FIRMWARE_VEGAM;
- break;
+ return FIRMWARE_VEGAM;
case CHIP_VEGA10:
- fw_name = FIRMWARE_VEGA10;
- break;
+ return FIRMWARE_VEGA10;
case CHIP_VEGA12:
- fw_name = FIRMWARE_VEGA12;
- break;
+ return FIRMWARE_VEGA12;
case CHIP_VEGA20:
- fw_name = FIRMWARE_VEGA20;
- break;
+ return FIRMWARE_VEGA20;
default:
- return -EINVAL;
+ return NULL;
}
+}
+
+/**
+ * amdgpu_vce_early_init() - try to load VCE firmware
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tries to load the VCE firmware.
+ *
+ * When not found, returns ENOENT so that the driver can
+ * still load and initialize the rest of the IP blocks.
+ * The GPU can function just fine without VCE, they will just
+ * not support video encoding.
+ */
+int amdgpu_vce_early_init(struct amdgpu_device *adev)
+{
+ const char *fw_name = amdgpu_vce_firmware_name(adev);
+ const struct common_firmware_header *hdr;
+ unsigned int ucode_version, version_major, version_minor, binary_id;
+ int r;
+
+ if (!fw_name)
+ return -ENOENT;
r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
- dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
- fw_name);
+ dev_err(adev->dev,
+ "amdgpu_vce: Firmware \"%s\" not found or failed to validate (%d)\n",
+ fw_name, r);
+
amdgpu_ucode_release(&adev->vce.fw);
- return r;
+ return -ENOENT;
}
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
@@ -172,11 +189,35 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
version_major = (ucode_version >> 20) & 0xfff;
version_minor = (ucode_version >> 8) & 0xfff;
binary_id = ucode_version & 0xff;
- DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
+ dev_info(adev->dev, "Found VCE firmware Version: %d.%d Binary ID: %d\n",
version_major, version_minor, binary_id);
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
(binary_id << 8));
+ return 0;
+}
+
+/**
+ * amdgpu_vce_sw_init() - allocate memory for VCE BO
+ *
+ * @adev: amdgpu_device pointer
+ * @size: size for the new BO
+ *
+ * First step to get VCE online: allocate memory for VCE BO.
+ * The VCE firmware binary is copied into the VCE BO later,
+ * in amdgpu_vce_resume. The VCE executes its code from the
+ * VCE BO and also uses the space in this BO for its stack and data.
+ *
+ * Ideally this BO should be placed in VRAM for optimal performance,
+ * although technically it also runs from system RAM (albeit slowly).
+ */
+int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
+{
+ int i, r;
+
+ if (!adev->vce.fw)
+ return -ENOENT;
+
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
@@ -285,40 +326,23 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
*/
int amdgpu_vce_resume(struct amdgpu_device *adev)
{
- void *cpu_addr;
const struct common_firmware_header *hdr;
unsigned int offset;
- int r, idx;
+ int idx;
if (adev->vce.vcpu_bo == NULL)
return -EINVAL;
- r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
- if (r) {
- dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
- return r;
- }
-
- r = amdgpu_bo_kmap(adev->vce.vcpu_bo, &cpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
- dev_err(adev->dev, "(%d) VCE map failed\n", r);
- return r;
- }
-
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ memset_io(adev->vce.cpu_addr, 0, amdgpu_bo_size(adev->vce.vcpu_bo));
+ memcpy_toio(adev->vce.cpu_addr, adev->vce.fw->data + offset,
adev->vce.fw->size - offset);
drm_dev_exit(idx);
}
- amdgpu_bo_kunmap(adev->vce.vcpu_bo);
-
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
-
return 0;
}
@@ -427,6 +451,24 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
/**
+ * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns how many GART pages we need before GTT for the VCE IP block.
+ * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details.
+ * For VCE2+, this is not needed so return zero.
+ */
+u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev)
+{
+ /* VCE IP block not added yet, so can't use amdgpu_ip_version */
+ if (adev->family == AMDGPU_FAMILY_SI)
+ return 512;
+
+ return 0;
+}
+
+/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
* @ring: ring we should submit the msg to
@@ -449,7 +491,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -540,7 +582,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
AMDGPU_FENCE_OWNER_UNDEFINED,
ib_size_dw * 4,
direct ? AMDGPU_IB_POOL_DIRECT :
- AMDGPU_IB_POOL_DELAYED, &job);
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 6e53f872d084..1c3464ce5037 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -51,14 +51,17 @@ struct amdgpu_vce {
struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
+ uint32_t keyselect;
};
+int amdgpu_vce_early_init(struct amdgpu_device *adev);
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
+u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
struct amdgpu_ib *ib);
int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f1f67521c29c..5e0786ea911b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -92,6 +92,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
{
@@ -184,16 +185,16 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
dev_info(adev->dev,
- "Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
- enc_major, enc_minor, dec_ver, vep, fw_rev);
+ "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ i, enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- dev_info(adev->dev, "Found VCN firmware Version: %u.%u Family ID: %u\n",
- version_major, version_minor, family_id);
+ dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
+ i, version_major, version_minor, family_id);
}
bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
@@ -256,12 +257,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
return 0;
}
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
{
int j;
if (adev->vcn.harvest_config & (1 << i))
- return 0;
+ return;
amdgpu_bo_free_kernel(
&adev->vcn.inst[i].dpg_sram_bo,
@@ -285,10 +286,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
amdgpu_ucode_release(&adev->vcn.inst[0].fw);
adev->vcn.inst[i].fw = NULL;
}
+
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
+
mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
-
- return 0;
}
bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
@@ -352,8 +355,6 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
if (adev->vcn.harvest_config & (1 << i))
return 0;
- cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work);
-
/* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
* restore fw data and clear buffer in amdgpu_vcn_resume() */
if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
@@ -405,6 +406,54 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
return 0;
}
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
{
struct amdgpu_vcn_inst *vcn_inst =
@@ -412,7 +461,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i = vcn_inst->inst, j;
- int r = 0;
if (adev->vcn.harvest_config & (1 << i))
return;
@@ -438,16 +486,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[i];
if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
} else {
schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
}
@@ -457,30 +500,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&vcn_inst->total_submission_cnt);
cancel_delayed_work_sync(&vcn_inst->idle_work);
- /* We can safely return early here because we've cancelled the
- * the delayed work so there is no one else to set it to false
- * and we don't care if someone else sets it to true.
- */
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&vcn_inst->vcn_pg_lock);
vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
@@ -508,6 +532,7 @@ pg_lock:
vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
}
mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
@@ -601,7 +626,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
64, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -781,7 +806,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
goto err;
@@ -911,7 +936,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -978,7 +1003,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
- &job);
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -1132,7 +1157,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
{
struct amdgpu_vcn_inst *vcn;
void *log_buf;
- volatile struct amdgpu_vcn_fwlog *plog;
+ struct amdgpu_vcn_fwlog *plog;
unsigned int read_pos, write_pos, available, i, read_bytes = 0;
unsigned int read_num[2] = {0};
@@ -1145,7 +1170,7 @@ static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
- plog = (volatile struct amdgpu_vcn_fwlog *)log_buf;
+ plog = (struct amdgpu_vcn_fwlog *)log_buf;
read_pos = plog->rptr;
write_pos = plog->wptr;
@@ -1212,11 +1237,11 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
{
#if defined(CONFIG_DEBUG_FS)
- volatile uint32_t *flag = vcn->fw_shared.cpu_addr;
+ uint32_t *flag = vcn->fw_shared.cpu_addr;
void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
- volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
- volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
+ struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ vcn->fw_shared.log_offset;
*flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
fw_log->is_enabled = 1;
@@ -1527,3 +1552,86 @@ int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
return amdgpu_vcn_reset_engine(adev, ring->me);
}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 0bc0a94d7cf0..82624b44e661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -100,7 +100,8 @@
#define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
@@ -161,7 +162,8 @@
#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
({ \
- uint32_t internal_reg_offset, addr; \
+ /* To avoid a -Wunused-but-set-variable warning. */ \
+ uint32_t internal_reg_offset __maybe_unused, addr; \
bool video_range, video1_range, aon_range, aon1_range; \
\
addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
@@ -237,6 +239,8 @@
#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+struct amdgpu_hwip_reg_entry;
+
enum amdgpu_vcn_caps {
AMDGPU_VCN_RRMT_ENABLED,
};
@@ -362,6 +366,8 @@ struct amdgpu_vcn {
bool workload_profile_active;
struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
};
struct amdgpu_fw_shared_rb_ptrs_struct {
@@ -497,7 +503,7 @@ struct amdgpu_vcn5_fw_shared {
struct amdgpu_fw_shared_rb_setup rb_setup;
struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
- uint8_t pad3[9];
+ uint8_t pad3[404];
};
#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
@@ -512,7 +518,7 @@ enum vcn_ring_type {
int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
-int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
@@ -557,4 +563,11 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
unsigned int vmid,
struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 13f0cdeb59c4..47a6ce4fdc74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -44,6 +44,18 @@
vf2pf_info->ucode_info[ucode].version = ver; \
} while (0)
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+
+const char *amdgpu_virt_dynamic_crit_table_name[] = {
+ "IP DISCOVERY",
+ "VBIOS IMG",
+ "RAS TELEMETRY",
+ "DATA EXCHANGE",
+ "BAD PAGE INFO",
+ "INIT HEADER",
+ "LAST",
+};
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
/* By now all MMIO pages except mailbox are blocked */
@@ -150,9 +162,10 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
virt->ops->req_init_data(adev);
if (adev->virt.req_init_data_ver > 0)
- DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ dev_info(adev->dev, "host supports REQ_INIT_DATA handshake of critical_region_version %d\n",
+ adev->virt.req_init_data_ver);
else
- DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+ dev_warn(adev->dev, "host doesn't support REQ_INIT_DATA handshake\n");
}
/**
@@ -205,12 +218,12 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
if (r) {
- DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
+ dev_err(adev->dev, "failed to alloc mm table and error = %d.\n", r);
return r;
}
memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
- DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
+ dev_info(adev->dev, "MM table gpu addr = 0x%llx, cpu addr = %p.\n",
adev->virt.mm_table.gpu_addr,
adev->virt.mm_table.cpu_addr);
return 0;
@@ -390,7 +403,9 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE,
&bo, NULL))
- DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_dbg(adev->dev,
+ "RAS WARN: reserve vram for retired page %llx fail\n",
+ bp);
data->bps_bo[i] = bo;
}
data->last_reserved = i + 1;
@@ -598,8 +613,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0;
- vf2pf_info->fb_usage =
- ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
+ vf2pf_info->fb_usage = ttm_resource_manager_used(&adev->mman.vram_mgr.manager) ?
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20 : 0;
vf2pf_info->fb_vis_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
@@ -658,10 +673,34 @@ out:
schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
}
+static int amdgpu_virt_read_exchange_data_from_mem(struct amdgpu_device *adev, uint32_t *pfvf_data)
+{
+ uint32_t dataexchange_offset =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset;
+ uint32_t dataexchange_size =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10;
+ uint64_t pos = 0;
+
+ dev_info(adev->dev,
+ "Got data exchange info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ dataexchange_offset, dataexchange_size);
+
+ if (!IS_ALIGNED(dataexchange_offset, 4) || !IS_ALIGNED(dataexchange_size, 4)) {
+ dev_err(adev->dev, "Data exchange data not aligned to 4 bytes\n");
+ return -EINVAL;
+ }
+
+ pos = (uint64_t)dataexchange_offset;
+ amdgpu_device_vram_access(adev, pos, pfvf_data,
+ dataexchange_size, false);
+
+ return 0;
+}
+
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
{
if (adev->virt.vf2pf_update_interval_ms != 0) {
- DRM_INFO("clean up the vf2pf work item\n");
+ dev_info(adev->dev, "clean up the vf2pf work item\n");
cancel_delayed_work_sync(&adev->virt.vf2pf_work);
adev->virt.vf2pf_update_interval_ms = 0;
}
@@ -669,13 +708,15 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
+ uint32_t *pfvf_data = NULL;
+
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
adev->virt.vf2pf_update_retry_cnt = 0;
if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
- DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
+ dev_warn(adev->dev, "Currently fw_vram and drv_vram should not have values at the same time!");
} else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
/* go through this logic in ip_init and reset to init workqueue*/
amdgpu_virt_exchange_data(adev);
@@ -684,11 +725,34 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
} else if (adev->bios != NULL) {
/* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ pfvf_data =
+ kzalloc(adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb << 10,
+ GFP_KERNEL);
+ if (!pfvf_data) {
+ dev_err(adev->dev, "Failed to allocate memory for pfvf_data\n");
+ return;
+ }
- amdgpu_virt_read_pf2vf_data(adev);
+ if (amdgpu_virt_read_exchange_data_from_mem(adev, pfvf_data))
+ goto free_pfvf_data;
+
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)pfvf_data;
+
+ amdgpu_virt_read_pf2vf_data(adev);
+
+free_pfvf_data:
+ kfree(pfvf_data);
+ pfvf_data = NULL;
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
}
}
@@ -701,23 +765,38 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
if (adev->mman.fw_vram_usage_va) {
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
- adev->virt.fw_reserve.p_vf2pf =
- (struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
- adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ if (adev->virt.req_init_data_ver == GPU_CRIT_REGION_V2) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset);
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset +
+ (AMD_SRIOV_MSG_SIZE_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va +
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset);
+ } else {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
+ }
} else if (adev->mman.drv_vram_usage_va) {
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.p_vf2pf =
(struct amd_sriov_msg_vf2pf_info_header *)
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 << 10));
adev->virt.fw_reserve.ras_telemetry =
- (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 << 10));
}
amdgpu_virt_read_pf2vf_data(adev);
@@ -816,7 +895,7 @@ static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
break;
default: /* other chip doesn't support SRIOV */
is_sriov = false;
- DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
+ dev_err(adev->dev, "Unknown asic type: %d!\n", adev->asic_type);
break;
}
}
@@ -828,17 +907,230 @@ static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
{
ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
RATELIMIT_MSG_ON_RELEASE);
ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
+ RATELIMIT_MSG_ON_RELEASE);
mutex_init(&adev->virt.ras.ras_telemetry_mutex);
+ mutex_init(&adev->virt.access_req_mutex);
adev->virt.ras.cper_rptr = 0;
}
+static uint8_t amdgpu_virt_crit_region_calc_checksum(uint8_t *buf_start, uint8_t *buf_end)
+{
+ uint32_t sum = 0;
+
+ if (buf_start >= buf_end)
+ return 0;
+
+ for (; buf_start < buf_end; buf_start++)
+ sum += buf_start[0];
+
+ return 0xffffffff - sum;
+}
+
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_init_data_header *init_data_hdr = NULL;
+ u64 init_hdr_offset = adev->virt.init_data_header.offset;
+ u64 init_hdr_size = (u64)adev->virt.init_data_header.size_kb << 10; /* KB → bytes */
+ u64 vram_size;
+ u64 end;
+ int r = 0;
+ uint8_t checksum = 0;
+
+ /* Skip below init if critical region version != v2 */
+ if (adev->virt.req_init_data_ver != GPU_CRIT_REGION_V2)
+ return 0;
+
+ if (init_hdr_offset < 0) {
+ dev_err(adev->dev, "Invalid init header offset\n");
+ return -EINVAL;
+ }
+
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ return -EINVAL;
+ vram_size <<= 20;
+
+ if (check_add_overflow(init_hdr_offset, init_hdr_size, &end) || end > vram_size) {
+ dev_err(adev->dev, "init_data_header exceeds VRAM size, exiting\n");
+ return -EINVAL;
+ }
+
+ /* Allocate for init_data_hdr */
+ init_data_hdr = kzalloc(sizeof(struct amd_sriov_msg_init_data_header), GFP_KERNEL);
+ if (!init_data_hdr)
+ return -ENOMEM;
+
+ amdgpu_device_vram_access(adev, (uint64_t)init_hdr_offset, (uint32_t *)init_data_hdr,
+ sizeof(struct amd_sriov_msg_init_data_header), false);
+
+ /* Table validation */
+ if (strncmp(init_data_hdr->signature,
+ AMDGPU_SRIOV_CRIT_DATA_SIGNATURE,
+ AMDGPU_SRIOV_CRIT_DATA_SIG_LEN) != 0) {
+ dev_err(adev->dev, "Invalid init data signature: %.4s\n",
+ init_data_hdr->signature);
+ r = -EINVAL;
+ goto out;
+ }
+
+ checksum = amdgpu_virt_crit_region_calc_checksum(
+ (uint8_t *)&init_data_hdr->initdata_offset,
+ (uint8_t *)init_data_hdr +
+ sizeof(struct amd_sriov_msg_init_data_header));
+ if (checksum != init_data_hdr->checksum) {
+ dev_err(adev->dev, "Found unmatching checksum from calculation 0x%x and init_data 0x%x\n",
+ checksum, init_data_hdr->checksum);
+ r = -EINVAL;
+ goto out;
+ }
+
+ memset(&adev->virt.crit_regn, 0, sizeof(adev->virt.crit_regn));
+ memset(adev->virt.crit_regn_tbl, 0, sizeof(adev->virt.crit_regn_tbl));
+
+ adev->virt.crit_regn.offset = init_data_hdr->initdata_offset;
+ adev->virt.crit_regn.size_kb = init_data_hdr->initdata_size_in_kb;
+
+ /* Validation and initialization for each table entry */
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_IPD_TABLE_ID)) {
+ if (!init_data_hdr->ip_discovery_size_in_kb ||
+ init_data_hdr->ip_discovery_size_in_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_IPD_TABLE_ID],
+ init_data_hdr->ip_discovery_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset =
+ init_data_hdr->ip_discovery_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb =
+ init_data_hdr->ip_discovery_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID)) {
+ if (!init_data_hdr->vbios_img_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID],
+ init_data_hdr->vbios_img_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].offset =
+ init_data_hdr->vbios_img_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID].size_kb =
+ init_data_hdr->vbios_img_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID)) {
+ if (!init_data_hdr->ras_tele_info_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID],
+ init_data_hdr->ras_tele_info_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].offset =
+ init_data_hdr->ras_tele_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID].size_kb =
+ init_data_hdr->ras_tele_info_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID)) {
+ if (!init_data_hdr->dataexchange_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID],
+ init_data_hdr->dataexchange_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].offset =
+ init_data_hdr->dataexchange_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID].size_kb =
+ init_data_hdr->dataexchange_size_in_kb;
+ }
+
+ if (IS_SRIOV_CRIT_REGN_ENTRY_VALID(init_data_hdr, AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID)) {
+ if (!init_data_hdr->bad_page_size_in_kb) {
+ dev_err(adev->dev, "Invalid %s size: 0x%x\n",
+ amdgpu_virt_dynamic_crit_table_name[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID],
+ init_data_hdr->bad_page_size_in_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].offset =
+ init_data_hdr->bad_page_info_offset;
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID].size_kb =
+ init_data_hdr->bad_page_size_in_kb;
+ }
+
+ /* Validation for critical region info */
+ if (adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb > DISCOVERY_TMR_SIZE) {
+ dev_err(adev->dev, "Invalid IP discovery size: 0x%x\n",
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb);
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* reserved memory starts from crit region base offset with the size of 5MB */
+ adev->mman.fw_vram_usage_start_offset = adev->virt.crit_regn.offset;
+ adev->mman.fw_vram_usage_size = adev->virt.crit_regn.size_kb << 10;
+ dev_info(adev->dev,
+ "critical region v%d requested to reserve memory start at %08llx with %llu KB.\n",
+ init_data_hdr->version,
+ adev->mman.fw_vram_usage_start_offset,
+ adev->mman.fw_vram_usage_size >> 10);
+
+ adev->virt.is_dynamic_crit_regn_enabled = true;
+
+out:
+ kfree(init_data_hdr);
+ init_data_hdr = NULL;
+
+ return r;
+}
+
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size)
+{
+ uint32_t data_offset = 0;
+ uint32_t data_size = 0;
+ enum amd_sriov_msg_table_id_enum data_table_id = data_id;
+
+ if (data_table_id >= AMD_SRIOV_MSG_MAX_TABLE_ID)
+ return -EINVAL;
+
+ data_offset = adev->virt.crit_regn_tbl[data_table_id].offset;
+ data_size = adev->virt.crit_regn_tbl[data_table_id].size_kb << 10;
+
+ /* Validate on input params */
+ if (!binary || !size || *size < (uint64_t)data_size)
+ return -EINVAL;
+
+ /* Proceed to copy the dynamic content */
+ amdgpu_device_vram_access(adev,
+ (uint64_t)data_offset, (uint32_t *)binary, data_size, false);
+ *size = (uint64_t)data_size;
+
+ dev_dbg(adev->dev,
+ "Got %s info from dynamic crit_region_table at offset 0x%x with size of 0x%x bytes.\n",
+ amdgpu_virt_dynamic_crit_table_name[data_id], data_offset, data_size);
+
+ return 0;
+}
+
void amdgpu_virt_init(struct amdgpu_device *adev)
{
bool is_sriov = false;
@@ -1286,7 +1578,7 @@ amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block bloc
case AMDGPU_RAS_BLOCK__MPIO:
return RAS_TELEMETRY_GPU_BLOCK_MPIO;
default:
- DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
+ dev_warn(adev->dev, "Unsupported SRIOV RAS telemetry block 0x%x\n",
block);
return RAS_TELEMETRY_GPU_BLOCK_COUNT;
}
@@ -1301,7 +1593,7 @@ static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return 0;
tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
@@ -1380,7 +1672,7 @@ amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
checksum = host_telemetry->header.checksum;
used_size = host_telemetry->header.used_size;
- if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
return -EINVAL;
cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
@@ -1501,3 +1793,55 @@ void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
if (virt->ops && virt->ops->req_bad_pages)
virt->ops->req_bad_pages(adev);
}
+
+static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ bool *hit)
+{
+ struct amd_sriov_ras_chk_criti *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ if (hit)
+ *hit = tmp->hit ? true : false;
+
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = -EPERM;
+
+ if (!virt->ops || !virt->ops->req_ras_chk_criti)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_chk_criti(adev, addr))
+ r = amdgpu_virt_cache_chk_criti_hit(
+ adev, virt->fw_reserve.ras_telemetry, hit);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 3da3ebb1d9a1..01d5bca2dee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -54,6 +54,12 @@
#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2
+/* Signature used to validate the SR-IOV dynamic critical region init data header ("INDA") */
+#define AMDGPU_SRIOV_CRIT_DATA_SIGNATURE "INDA"
+#define AMDGPU_SRIOV_CRIT_DATA_SIG_LEN 4
+
+#define IS_SRIOV_CRIT_REGN_ENTRY_VALID(hdr, id) ((hdr)->valid_tables & (1 << (id)))
+
enum amdgpu_sriov_vf_mode {
SRIOV_VF_MODE_BARE_METAL = 0,
SRIOV_VF_MODE_ONE_VF,
@@ -98,6 +104,7 @@ struct amdgpu_virt_ops {
int (*req_ras_err_count)(struct amdgpu_device *adev);
int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
int (*req_bad_pages)(struct amdgpu_device *adev);
+ int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
};
/*
@@ -143,6 +150,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
AMDGIM_FEATURE_RAS_CPER = (1 << 11),
+ AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -252,10 +260,20 @@ struct amdgpu_virt_ras_err_handler_data {
struct amdgpu_virt_ras {
struct ratelimit_state ras_error_cnt_rs;
struct ratelimit_state ras_cper_dump_rs;
+ struct ratelimit_state ras_chk_criti_rs;
struct mutex ras_telemetry_mutex;
uint64_t cper_rptr;
};
+#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT)
+
+DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+
+struct amdgpu_virt_region {
+ uint32_t offset;
+ uint32_t size_kb;
+};
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@@ -267,12 +285,14 @@ struct amdgpu_virt {
struct amdgpu_irq_src rcv_irq;
struct work_struct flr_work;
- struct work_struct bad_pages_work;
+ struct work_struct req_bad_pages_work;
+ struct work_struct handle_bad_pages_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
+ struct amdgpu_virt_caps virt_caps;
uint32_t gim_feature;
uint32_t reg_access_mode;
int req_init_data_ver;
@@ -281,6 +301,12 @@ struct amdgpu_virt {
bool ras_init_done;
uint32_t reg_access;
+ /* dynamic(v2) critical regions */
+ struct amdgpu_virt_region init_data_header;
+ struct amdgpu_virt_region crit_regn;
+ struct amdgpu_virt_region crit_regn_tbl[AMD_SRIOV_MSG_MAX_TABLE_ID];
+ bool is_dynamic_crit_regn_enabled;
+
/* vf2pf message */
struct delayed_work vf2pf_work;
uint32_t vf2pf_update_interval_ms;
@@ -299,6 +325,8 @@ struct amdgpu_virt {
/* Spinlock to protect access to the RLCG register interface */
spinlock_t rlcg_reg_lock;
+ struct mutex access_req_mutex;
+
union amd_sriov_ras_caps ras_en_caps;
union amd_sriov_ras_caps ras_telemetry_en_caps;
struct amdgpu_virt_ras ras;
@@ -370,6 +398,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_ras_cper_en(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
+#define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
@@ -416,6 +447,10 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
void amdgpu_virt_init(struct amdgpu_device *adev);
+int amdgpu_virt_init_critical_region(struct amdgpu_device *adev);
+int amdgpu_virt_get_dynamic_data_info(struct amdgpu_device *adev,
+ int data_id, uint8_t *binary, u32 *size);
+
bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
@@ -447,4 +482,5 @@ int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
enum amdgpu_ras_block block);
void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 155bb9891a17..79bad9cbe2ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -14,7 +14,6 @@
#include "dce_v8_0.h"
#endif
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_vkms.h"
#include "amdgpu_display.h"
@@ -581,13 +580,6 @@ static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
case CHIP_TONGA:
dce_v10_0_disable_dce(adev);
break;
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_VEGAM:
- dce_v11_0_disable_dce(adev);
- break;
case CHIP_TOPAZ:
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_HAINAN:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c39bb06ebda1..a67285118c37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -128,43 +128,14 @@ struct amdgpu_vm_tlb_seq_struct {
};
/**
- * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping
- *
- * @adev: amdgpu_device pointer
- * @vm: amdgpu_vm pointer
- * @pasid: the pasid the VM is using on this GPU
- *
- * Set the pasid this VM is using on this GPU, can also be used to remove the
- * pasid by passing in zero.
+ * amdgpu_vm_assert_locked - check if VM is correctly locked
+ * @vm: the VM which schould be tested
*
+ * Asserts that the VM root PD is locked.
*/
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid)
+static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
{
- int r;
-
- if (vm->pasid == pasid)
- return 0;
-
- if (vm->pasid) {
- r = xa_err(xa_erase_irq(&adev->vm_manager.pasids, vm->pasid));
- if (r < 0)
- return r;
-
- vm->pasid = 0;
- }
-
- if (pasid) {
- r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm,
- GFP_KERNEL));
- if (r < 0)
- return r;
-
- vm->pasid = pasid;
- }
-
-
- return 0;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
}
/**
@@ -181,6 +152,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
+ amdgpu_vm_assert_locked(vm);
spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
@@ -198,6 +170,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
spin_unlock(&vm_bo->vm->status_lock);
@@ -213,6 +186,7 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
spin_unlock(&vm_bo->vm->status_lock);
@@ -260,6 +234,7 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
if (vm_bo->bo->parent) {
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
@@ -279,6 +254,7 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
spin_unlock(&vm_bo->vm->status_lock);
@@ -295,10 +271,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
+
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -327,6 +306,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
bool shared;
+ dma_resv_assert_held(bo->tbo.base.resv);
spin_lock(&vm->status_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
@@ -485,6 +465,46 @@ int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
}
/**
+ * amdgpu_vm_lock_done_list - lock all BOs on the done list
+ * @vm: vm providing the BOs
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
+ *
+ * Lock the BOs on the done list in the DRM execution context.
+ */
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct list_head *prev = &vm->done;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ /* We can only trust prev->next while holding the lock */
+ spin_lock(&vm->status_lock);
+ while (!list_is_head(prev->next, &vm->done)) {
+ bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
+
+ bo = bo_va->base.bo;
+ if (bo) {
+ amdgpu_bo_ref(bo);
+ spin_unlock(&vm->status_lock);
+
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
+ amdgpu_bo_unref(&bo);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ prev = prev->next;
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
+}
+
+/**
* amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
* @adev: amdgpu device pointer
@@ -616,18 +636,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
spin_unlock(&vm->status_lock);
bo = bo_base->bo;
-
- if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
- struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
-
- pr_warn_ratelimited("Evicted user BO is not reserved\n");
- if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->task.pid);
- amdgpu_vm_put_task_info(ti);
- }
-
- return -EINVAL;
- }
+ dma_resv_assert_held(bo->tbo.base.resv);
r = validate(param, bo);
if (r)
@@ -660,6 +669,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
bool ret;
+ amdgpu_vm_assert_locked(vm);
+
amdgpu_vm_eviction_lock(vm);
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
@@ -772,7 +783,6 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
bool cleaner_shader_needed = false;
bool pasid_mapping_needed = false;
struct dma_fence *fence = NULL;
- struct amdgpu_fence *af;
unsigned int patch;
int r;
@@ -835,12 +845,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
}
if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
- r = amdgpu_fence_emit(ring, &fence, NULL, 0);
+ r = amdgpu_fence_emit(ring, job->hw_vm_fence, 0);
if (r)
return r;
- /* this is part of the job's context */
- af = container_of(fence, struct amdgpu_fence, base);
- af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
+ fence = &job->hw_vm_fence->base;
+ /* get a ref for the job */
+ dma_fence_get(fence);
}
if (vm_flush_needed) {
@@ -962,6 +972,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
LIST_HEAD(relocated);
int r, idx;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
list_splice_init(&vm->relocated, &relocated);
spin_unlock(&vm->status_lock);
@@ -977,7 +989,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
if (r)
goto error;
@@ -1056,7 +1069,7 @@ amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
}
/* Prepare a TLB flush fence to be attached to PTs */
- if (!params->unlocked && vm->is_compute_context) {
+ if (!params->unlocked) {
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
/* Makes sure no PD/PT is freed before the flush */
@@ -1146,7 +1159,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
dma_fence_put(tmp);
}
- r = vm->update_funcs->prepare(&params, sync);
+ r = vm->update_funcs->prepare(&params, sync,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
if (r)
goto error_free;
@@ -1339,13 +1353,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
* but in case of something, we filter the flags in first place
*/
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
update_flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
+ if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
update_flags &= ~AMDGPU_PTE_WRITEABLE;
/* Apply ASIC specific mapping flags */
- amdgpu_gmc_get_vm_pte(adev, mapping, &update_flags);
+ amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
+ &update_flags);
trace_amdgpu_vm_bo_update(mapping);
@@ -1486,7 +1501,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
struct dma_fence *fence)
{
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_add_prt_cb(adev, fence);
kfree(mapping);
}
@@ -1765,7 +1780,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
list_add(&mapping->list, &bo_va->invalids);
amdgpu_vm_it_insert(mapping, &vm->va);
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
@@ -1825,7 +1840,7 @@ static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1884,7 +1899,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
@@ -1940,6 +1955,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
+ int r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1960,6 +1976,17 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
return -ENOENT;
}
+ /* It's unlikely to happen that the mapping userq hasn't been idled
+ * during user requests GEM unmap IOCTL except for forcing the unmap
+ * from user space.
+ */
+ if (unlikely(atomic_read(&bo_va->userq_va_mapped) > 0)) {
+ r = amdgpu_userq_gem_va_unmap_validate(adev, mapping, saddr);
+ if (unlikely(r == -EBUSY))
+ dev_warn_once(adev->dev,
+ "Attempt to unmap an active userq buffer\n");
+ }
+
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
mapping->bo_va = NULL;
@@ -2066,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = before->bo_va->base.bo;
amdgpu_vm_it_insert(before, &vm->va);
- if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (before->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2081,7 +2108,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo *bo = after->bo_va->base.bo;
amdgpu_vm_it_insert(after, &vm->va);
- if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
+ if (after->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_prt_get(adev);
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
@@ -2537,6 +2564,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: requested vm
* @xcp_id: GPU partition selection id
+ * @pasid: the pasid the VM is using on this GPU
*
* Init @vm fields.
*
@@ -2544,7 +2572,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int32_t xcp_id)
+ int32_t xcp_id, uint32_t pasid)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
@@ -2620,12 +2648,26 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r)
dev_dbg(adev->dev, "Failed to create task info for VM\n");
+ /* Store new PASID in XArray (if non-zero) */
+ if (pasid != 0) {
+ r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
+
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
return 0;
error_free_root:
+ /* If PASID was partially set, erase it from XArray before failing */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
amdgpu_vm_pt_free_root(adev, vm);
amdgpu_bo_unreserve(vm->root.bo);
amdgpu_bo_unref(&root_bo);
@@ -2731,7 +2773,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- amdgpu_vm_set_pasid(adev, vm, 0);
+ /* Remove PASID mapping before destroying VM */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
dma_fence_wait(vm->last_tlb_flush, false);
@@ -2741,7 +2787,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_tlb_flush);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT_FLAG(adev) && prt_fini_needed) {
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
prt_fini_needed = false;
}
@@ -2772,10 +2818,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
- if (vm->reserved_vmid[i]) {
- amdgpu_vmid_free_reserved(adev, i);
- vm->reserved_vmid[i] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, i);
}
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
@@ -2800,8 +2843,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
- unsigned i;
-
/* Concurrent flushes are only possible starting with Vega10 and
* are broken on Navi10 and Navi14.
*/
@@ -2810,11 +2851,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->asic_type == CHIP_NAVI14);
amdgpu_vmid_mgr_init(adev);
- adev->vm_manager.fence_context =
- dma_fence_context_alloc(AMDGPU_MAX_RINGS);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- adev->vm_manager.seqno[i] = 0;
-
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
@@ -2871,6 +2907,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
/* No valid flags defined yet */
if (args->in.flags)
@@ -2879,17 +2916,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */
- if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
- }
-
- break;
+ return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
case AMDGPU_VM_OP_UNRESERVE_VMID:
- if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
- amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
- fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
- }
+ amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
break;
default:
return -EINVAL;
@@ -3027,6 +3056,8 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
unsigned int total_done_objs = 0;
unsigned int id = 0;
+ amdgpu_vm_assert_locked(vm);
+
spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index fd086efd8457..15d757c016cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -308,7 +308,7 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo_vm *bo);
int (*prepare)(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync);
+ struct amdgpu_sync *sync, u64 k_job_id);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -349,12 +349,16 @@ struct amdgpu_vm {
/* Memory statistics for this vm, protected by status_lock */
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for either
+ * PDs, PTs or per VM BOs. The state transits are:
+ *
+ * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ */
+
/* Per-VM and PT BOs who needs a validation */
struct list_head evicted;
- /* BOs for user mode queues that need a validation */
- struct list_head evicted_user;
-
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
@@ -364,15 +368,29 @@ struct amdgpu_vm {
/* All BOs of this VM not currently in the state machine */
struct list_head idle;
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for BOs which
+ * have their own dma_resv object and not depend on the root PD. Their
+ * state transits are:
+ *
+ * evicted_user or invalidated -> done
+ */
+
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
/* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- /* BO mappings freed, but not yet updated in the PT */
- struct list_head freed;
-
/* BOs which are invalidated, has been updated in the PTs */
struct list_head done;
+ /*
+ * This list contains amdgpu_bo_va_mapping objects which have been freed
+ * but not updated in the PTs
+ */
+ struct list_head freed;
+
/* contains the page directory */
struct amdgpu_vm_bo_base root;
struct dma_fence *last_update;
@@ -394,7 +412,7 @@ struct amdgpu_vm {
struct dma_fence *last_unlocked;
unsigned int pasid;
- bool reserved_vmid[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
bool use_cpu_for_update;
@@ -435,10 +453,6 @@ struct amdgpu_vm_manager {
unsigned int first_kfd_vmid;
bool concurrent_flush;
- /* Handling of VM fences */
- u64 fence_context;
- unsigned seqno[AMDGPU_MAX_RINGS];
-
uint64_t max_pfn;
uint32_t num_level;
uint32_t block_size;
@@ -482,15 +496,14 @@ extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- u32 pasid);
-
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -538,11 +551,11 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
@@ -670,4 +683,9 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
struct amdgpu_task_info *task_info);
+#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->valids, list)
+#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->invalids, list)
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 0c1ef5850a5e..22e2e5b47341 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
*
* @p: see amdgpu_vm_update_params definition
* @sync: sync obj with fences to wait on
+ * @k_job_id: the id for tracing/debug purposes
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync,
+ u64 k_job_id)
{
if (!sync)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 30022123b0bf..f794fb1cc06e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
/*
* amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
@@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
params.vm = vm;
params.immediate = immediate;
- r = vm->update_funcs->prepare(&params, NULL);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
if (r)
goto exit;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 46d9fb433ab2..36805dcfa159 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
/* Allocate a new job for @count PTE updates */
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
- unsigned int count)
+ unsigned int count, u64 k_job_id)
{
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
: AMDGPU_IB_POOL_DELAYED;
@@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
- ndw * 4, pool, &p->job);
+ ndw * 4, pool, &p->job, k_job_id);
if (r)
return r;
@@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
*
* @p: see amdgpu_vm_update_params definition
* @sync: amdgpu_sync object with fences to wait for
+ * @k_job_id: identifier of the job, for tracing purpose
*
* Returns:
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- struct amdgpu_sync *sync)
+ struct amdgpu_sync *sync, u64 k_job_id)
{
int r;
- r = amdgpu_vm_sdma_alloc_job(p, 0);
+ r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
if (r)
return r;
@@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
if (r)
return r;
- r = amdgpu_vm_sdma_alloc_job(p, count);
+ r = amdgpu_vm_sdma_alloc_job(p, count,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 121ee17b522b..aa78c2ee9e21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 1):
+ return adev->pm.fw_version < 0x0a640500;
+ default:
+ return false;
+ }
+}
+
+static int vpe_get_dpm_level(struct amdgpu_device *adev)
+{
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
+}
+
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+ if (fences)
+ goto reschedule;
+
+ if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
+ goto reschedule;
+
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ return;
- if (fences == 0)
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
- else
- schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+reschedule:
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)
@@ -379,9 +405,10 @@ static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
if (ret)
goto out;
- /* TODO: Add queue reset mask when FW fully supports it */
adev->vpe.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
if (ret)
goto out;
@@ -435,6 +462,8 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_vpe *vpe = &adev->vpe;
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
vpe_ring_stop(vpe);
/* Power off VPE */
@@ -445,10 +474,6 @@ static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
static int vpe_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = ip_block->adev;
-
- cancel_delayed_work_sync(&adev->vpe.idle_work);
-
return vpe_hw_fini(ip_block);
}
@@ -874,6 +899,27 @@ static void vpe_ring_end_use(struct amdgpu_ring *ring)
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
+static int vpe_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_GATE);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -942,6 +988,7 @@ static const struct amdgpu_ring_funcs vpe_ring_funcs = {
.preempt_ib = vpe_ring_preempt_ib,
.begin_use = vpe_ring_begin_use,
.end_use = vpe_ring_end_use,
+ .reset = vpe_ring_reset,
};
static void vpe_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 78f9e86ccc09..9d934c07fa6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -234,6 +234,9 @@ static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
!adev->gmc.vram_vendor)
return 0;
+ if (!ttm_resource_manager_used(&adev->mman.vram_mgr.manager))
+ return 0;
+
return attr->mode;
}
@@ -396,43 +399,33 @@ out:
return ret;
}
-static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info)
{
- DRM_DEBUG_DRIVER("Dummy vram mgr debug\n");
-}
-
-static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n");
- return false;
-}
+ struct amdgpu_vram_mgr_resource *vres;
+ struct drm_buddy_block *block;
+ u64 start, size;
+ int ret = -ENOENT;
-static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man,
- struct ttm_resource *res,
- const struct ttm_place *place,
- size_t size)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n");
- return true;
-}
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
+ list_for_each_entry(block, &vres->blocks, link) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+ if ((start <= address) && (address < (start + size))) {
+ info->start = start;
+ info->size = size;
+ memcpy(&info->task, &vres->task, sizeof(vres->task));
+ ret = 0;
+ goto out;
+ }
+ }
+ }
-static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man,
- struct ttm_resource *res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n");
-}
+out:
+ mutex_unlock(&mgr->lock);
-static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_resource **res)
-{
- DRM_DEBUG_DRIVER("Dummy vram mgr new\n");
- return -ENOSPC;
+ return ret;
}
/**
@@ -568,6 +561,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
remaining_size -= size;
}
+ vres->task.pid = task_pid_nr(current);
+ get_task_comm(vres->task.comm, current);
+ list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
+
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
unsigned long dcc_start;
@@ -645,6 +642,10 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
uint64_t vis_usage = 0;
mutex_lock(&mgr->lock);
+
+ list_del(&vres->vres_node);
+ memset(&vres->task, 0, sizeof(vres->task));
+
list_for_each_entry(block, &vres->blocks, link)
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
@@ -895,14 +896,6 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
mutex_unlock(&mgr->lock);
}
-static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = {
- .alloc = amdgpu_dummy_vram_mgr_new,
- .free = amdgpu_dummy_vram_mgr_del,
- .intersects = amdgpu_dummy_vram_mgr_intersects,
- .compatible = amdgpu_dummy_vram_mgr_compatible,
- .debug = amdgpu_dummy_vram_mgr_debug
-};
-
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
.alloc = amdgpu_vram_mgr_new,
.free = amdgpu_vram_mgr_del,
@@ -933,18 +926,13 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
mutex_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
+ INIT_LIST_HEAD(&mgr->allocated_vres_list);
mgr->default_page_size = PAGE_SIZE;
- if (!adev->gmc.is_app_apu) {
- man->func = &amdgpu_vram_mgr_func;
-
- err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
- if (err)
- return err;
- } else {
- man->func = &amdgpu_dummy_vram_mgr_func;
- DRM_INFO("Setup dummy vram mgr\n");
- }
+ man->func = &amdgpu_vram_mgr_func;
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
ttm_resource_manager_set_used(man, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
index 2c88d5fd87da..5f5fd9a911c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -35,12 +35,26 @@ struct amdgpu_vram_mgr {
struct list_head reserved_pages;
atomic64_t vis_usage;
u64 default_page_size;
+ struct list_head allocated_vres_list;
+};
+
+struct amdgpu_vres_task {
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+};
+
+struct amdgpu_vram_block_info {
+ u64 start;
+ u64 size;
+ struct amdgpu_vres_task task;
};
struct amdgpu_vram_mgr_resource {
struct ttm_resource base;
struct list_head blocks;
unsigned long flags;
+ struct list_head vres_node;
+ struct amdgpu_vres_task task;
};
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
@@ -72,4 +86,7 @@ static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
ares->flags |= DRM_BUDDY_CLEARED;
}
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index c417f8689220..1083db8cea2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -120,6 +120,25 @@ static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
xcp->valid = true;
}
+static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ int xcp_id)
+{
+ struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t inst_mask;
+ uint64_t uid;
+ int i;
+
+ if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
+ inst_mask) {
+ i = GET_INST(GC, (ffs(inst_mask) - 1));
+ uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
+ AMDGPU_UID_TYPE_XCD, i);
+ if (uid)
+ xcp->unique_id = uid;
+ }
+}
+
int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
{
struct amdgpu_device *adev = xcp_mgr->adev;
@@ -158,6 +177,7 @@ int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
else
xcp_mgr->xcp[i].mem_id = mem_id;
}
+ __amdgpu_xcp_set_unique_id(xcp_mgr, i);
}
xcp_mgr->num_xcps = num_xcps;
@@ -406,6 +426,7 @@ void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
+ amdgpu_xcp_drm_dev_free(p_ddev);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
index 70a0f8400b57..1928d9e224fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -112,6 +112,7 @@ struct amdgpu_xcp {
struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
struct amdgpu_xcp_mgr *xcp_mgr;
struct kobject kobj;
+ uint64_t unique_id;
};
struct amdgpu_xcp_mgr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 1ede308a7c67..aad530c46a9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -298,6 +298,9 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num)
{
int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 };
+ if (adev->gmc.xgmi.num_physical_nodes <= 1)
+ return -EINVAL;
+
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
case IP_VERSION(6, 4, 1):
@@ -333,6 +336,10 @@ static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link
}
i = global_link_num / n;
+
+ if (!(adev->aid_mask & BIT(i)))
+ return U32_MAX;
+
addr += adev->asic_funcs->encode_ext_smn_addressing(i);
return RREG32_PCIE_EXT(addr);
@@ -342,6 +349,9 @@ int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
{
u32 xgmi_state_reg_val;
+ if (adev->gmc.xgmi.num_physical_nodes <= 1)
+ return -EINVAL;
+
switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
case IP_VERSION(6, 4, 0):
case IP_VERSION(6, 4, 1):
@@ -958,28 +968,6 @@ static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_inf
return 0;
}
-static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev,
- struct amdgpu_device *peer_adev)
-{
- struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info;
- struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info;
-
- for (int i = 0; i < peer_info->num_nodes; i++) {
- if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) {
- for (int j = 0; j < top_info->num_nodes; j++) {
- if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) {
- peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops;
- peer_info->nodes[i].is_sharing_enabled =
- top_info->nodes[j].is_sharing_enabled;
- peer_info->nodes[i].num_links =
- top_info->nodes[j].num_links;
- return;
- }
- }
- }
- }
-}
-
int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
{
struct psp_xgmi_topology_info *top_info;
@@ -1065,11 +1053,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
/* To do: continue with some node failed or disable the whole hive*/
goto exit_unlock;
}
-
- /* fill the topology info for peers instead of getting from PSP */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- amdgpu_xgmi_fill_topology_info(adev, tmp_adev);
- }
} else {
/* get latest topology info for each device from psp */
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index bba0b26fee8f..5f36aff17e79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -126,4 +126,8 @@ uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
uint16_t max_speed, uint8_t max_width);
+
+/* Cleanup macro for use with __free(xgmi_put_hive) */
+DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T))
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index 33edad1f9dcd..3cdb1e0eca37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -23,26 +23,84 @@
#ifndef AMDGV_SRIOV_MSG__H_
#define AMDGV_SRIOV_MSG__H_
-/* unit in kilobytes */
-#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
-#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
-#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
-#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
-#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
-#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2
-#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64
+#define AMD_SRIOV_MSG_SIZE_KB 1
+
/*
- * layout
+ * layout v1
* 0 64KB 65KB 66KB 68KB 132KB
* | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
* | 64KB | 1KB | 1KB | 2KB | 64KB | ...
*/
-#define AMD_SRIOV_MSG_SIZE_KB 1
-#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
-#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
-#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB)
+/*
+ * layout v2 (offsets are dynamically allocated and the offsets below are examples)
+ * 0 1KB 64KB 65KB 66KB 68KB 132KB
+ * | INITD_H | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
+ * | 1KB | 64KB | 1KB | 1KB | 2KB | 64KB | ...
+ *
+ * Note: PF2VF + VF2PF + Bad Page = DataExchange region (allocated contiguously)
+ */
+
+/* v1 layout sizes */
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 1
+#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1 2
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_SIZE_KB_V1 + AMD_SRIOV_MSG_VF2PF_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+
+/* v1 offsets */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET_V1 0
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1 AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1
+#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB_V1
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_PF2VF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_VF2PF_OFFSET_KB_V1 + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB_V1 \
+ (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB_V1 + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB_V1)
+#define AMD_SRIOV_MSG_INIT_DATA_TOT_SIZE_KB_V1 \
+ (AMD_SRIOV_MSG_VBIOS_SIZE_KB_V1 + AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB_V1 + \
+ AMD_SRIOV_MSG_RAS_TELEMETRY_SIZE_KB_V1)
+
+enum amd_sriov_crit_region_version {
+ GPU_CRIT_REGION_V1 = 1,
+ GPU_CRIT_REGION_V2 = 2,
+};
+
+/* v2 layout offset enum (in order of allocation) */
+enum amd_sriov_msg_table_id_enum {
+ AMD_SRIOV_MSG_IPD_TABLE_ID = 0,
+ AMD_SRIOV_MSG_VBIOS_IMG_TABLE_ID,
+ AMD_SRIOV_MSG_RAS_TELEMETRY_TABLE_ID,
+ AMD_SRIOV_MSG_DATAEXCHANGE_TABLE_ID,
+ AMD_SRIOV_MSG_BAD_PAGE_INFO_TABLE_ID,
+ AMD_SRIOV_MSG_INITD_H_TABLE_ID,
+ AMD_SRIOV_MSG_MAX_TABLE_ID,
+};
+
+struct amd_sriov_msg_init_data_header {
+ char signature[4]; /* "INDA" */
+ uint32_t version;
+ uint32_t checksum;
+ uint32_t initdata_offset; /* 0 */
+ uint32_t initdata_size_in_kb; /* 5MB */
+ uint32_t valid_tables;
+ uint32_t vbios_img_offset;
+ uint32_t vbios_img_size_in_kb;
+ uint32_t dataexchange_offset;
+ uint32_t dataexchange_size_in_kb;
+ uint32_t ras_tele_info_offset;
+ uint32_t ras_tele_info_size_in_kb;
+ uint32_t ip_discovery_offset;
+ uint32_t ip_discovery_size_in_kb;
+ uint32_t bad_page_info_offset;
+ uint32_t bad_page_size_in_kb;
+ uint32_t reserved[8];
+};
/*
* PF2VF history log:
@@ -102,7 +160,8 @@ union amd_sriov_msg_feature_flags {
uint32_t ras_caps : 1;
uint32_t ras_telemetry : 1;
uint32_t ras_cper : 1;
- uint32_t reserved : 20;
+ uint32_t xgmi_ta_ext_peer_link : 1;
+ uint32_t reserved : 19;
} flags;
uint32_t all;
};
@@ -140,8 +199,9 @@ union amd_sriov_ras_caps {
uint64_t block_jpeg : 1;
uint64_t block_ih : 1;
uint64_t block_mpio : 1;
+ uint64_t block_mmsch : 1;
uint64_t poison_propogation_mode : 1;
- uint64_t reserved : 44;
+ uint64_t reserved : 43;
} bits;
uint64_t all;
};
@@ -405,12 +465,17 @@ struct amd_sriov_ras_cper_dump {
uint32_t buf[];
};
+struct amd_sriov_ras_chk_criti {
+ uint32_t hit;
+};
+
struct amdsriov_ras_telemetry {
struct amd_sriov_ras_telemetry_header header;
union {
struct amd_sriov_ras_telemetry_error_count error_count;
struct amd_sriov_ras_cper_dump cper_dump;
+ struct amd_sriov_ras_chk_criti chk_criti;
} body;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
index 811124ff88a8..f9e2edf5260b 100644
--- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -407,7 +407,8 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
return -EINVAL;
}
- if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
+ if (adev->kfd.init_complete && !amdgpu_in_reset(adev) &&
+ !adev->in_suspend)
flags |= AMDGPU_XCP_OPS_KFD;
if (flags & AMDGPU_XCP_OPS_KFD) {
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 427b073de2fc..7a063e44d429 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1246,6 +1246,10 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.last_jump_jiffies = 0;
if (ws) {
ectx.ws = kcalloc(4, ws, GFP_KERNEL);
+ if (!ectx.ws) {
+ ret = -ENOMEM;
+ goto free;
+ }
ectx.ws_size = ws;
} else {
ectx.ws = NULL;
@@ -1494,6 +1498,28 @@ static void atom_get_vbios_version(struct atom_context *ctx)
}
}
+static void atom_get_vbios_build(struct atom_context *ctx)
+{
+ unsigned char *atom_rom_hdr;
+ unsigned char *str;
+ uint16_t base, len;
+
+ base = CU16(ATOM_ROM_TABLE_PTR);
+ atom_rom_hdr = CSTR(base);
+
+ str = CSTR(CU16(base + ATOM_ROM_CFG_PTR));
+ /* Skip config string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+ /* Skip change list string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+
+ len = min(atom_rom_hdr - str, STRLEN_NORMAL);
+ if (len)
+ strscpy(ctx->build_num, str, len);
+}
+
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
@@ -1554,6 +1580,7 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
atom_get_vbios_pn(ctx);
atom_get_vbios_date(ctx);
atom_get_vbios_version(ctx);
+ atom_get_vbios_build(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index b807f6639a4c..825ff28731f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -37,6 +37,7 @@ struct drm_device;
#define ATOM_ROM_MAGIC "ATOM"
#define ATOM_ROM_MAGIC_PTR 4
+#define ATOM_ROM_CFG_PTR 0xC
#define ATOM_ROM_MSG_PTR 0x10
#define ATOM_ROM_CMD_PTR 0x1E
#define ATOM_ROM_DATA_PTR 0x20
@@ -151,6 +152,7 @@ struct atom_context {
uint32_t version;
uint8_t vbios_ver_str[STRLEN_NORMAL];
uint8_t date[STRLEN_NORMAL];
+ uint8_t build_num[STRLEN_NORMAL];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 41f4705bdbbd..876a3256dba4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -156,6 +156,9 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
/* enable irqs */
cik_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -192,6 +195,9 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
/* When a ring buffer overflow happen start parsing interrupt
@@ -211,6 +217,8 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
}
+
+out:
return (wptr & ih->ptr_mask);
}
@@ -306,6 +314,10 @@ static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
new file mode 100644
index 000000000000..ed1e25661706
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "cyan_skillfish_ip_offset.h"
+
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke needed by driver */
+ uint32_t i;
+
+ adev->gfx.xcc_mask = 1;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 2f891fb846d5..bc7a2e06ab5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -157,6 +157,9 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
cz_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -194,6 +197,9 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -297,6 +303,10 @@ static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index ba73518f5cdf..72ca6538b2e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1141,8 +1141,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
deleted file mode 100644
index b01d88d078fa..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ /dev/null
@@ -1,3818 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drm_edid.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_modeset_helper.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_vblank.h>
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_i2c.h"
-#include "vid.h"
-#include "atom.h"
-#include "amdgpu_atombios.h"
-#include "atombios_crtc.h"
-#include "atombios_encoders.h"
-#include "amdgpu_pll.h"
-#include "amdgpu_connectors.h"
-#include "amdgpu_display.h"
-#include "dce_v11_0.h"
-
-#include "dce/dce_11_0_d.h"
-#include "dce/dce_11_0_sh_mask.h"
-#include "dce/dce_11_0_enum.h"
-#include "oss/oss_3_0_d.h"
-#include "oss/oss_3_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-#include "ivsrcid/ivsrcid_vislands30.h"
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
-
-static const u32 crtc_offsets[] =
-{
- CRTC0_REGISTER_OFFSET,
- CRTC1_REGISTER_OFFSET,
- CRTC2_REGISTER_OFFSET,
- CRTC3_REGISTER_OFFSET,
- CRTC4_REGISTER_OFFSET,
- CRTC5_REGISTER_OFFSET,
- CRTC6_REGISTER_OFFSET
-};
-
-static const u32 hpd_offsets[] =
-{
- HPD0_REGISTER_OFFSET,
- HPD1_REGISTER_OFFSET,
- HPD2_REGISTER_OFFSET,
- HPD3_REGISTER_OFFSET,
- HPD4_REGISTER_OFFSET,
- HPD5_REGISTER_OFFSET
-};
-
-static const uint32_t dig_offsets[] = {
- DIG0_REGISTER_OFFSET,
- DIG1_REGISTER_OFFSET,
- DIG2_REGISTER_OFFSET,
- DIG3_REGISTER_OFFSET,
- DIG4_REGISTER_OFFSET,
- DIG5_REGISTER_OFFSET,
- DIG6_REGISTER_OFFSET,
- DIG7_REGISTER_OFFSET,
- DIG8_REGISTER_OFFSET
-};
-
-static const struct {
- uint32_t reg;
- uint32_t vblank;
- uint32_t vline;
- uint32_t hpd;
-
-} interrupt_status_offsets[] = { {
- .reg = mmDISP_INTERRUPT_STATUS,
- .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
-} };
-
-static const u32 cz_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14300000,
-};
-
-static const u32 cz_mgcg_cgcg_init[] =
-{
- mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
- mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
-};
-
-static const u32 stoney_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14302000,
-};
-
-static const u32 polaris11_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_DEBUG1, 0xffffffff, 0x00000008,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static const u32 polaris10_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- amdgpu_device_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_device_program_register_sequence(adev,
- cz_golden_settings_a11,
- ARRAY_SIZE(cz_golden_settings_a11));
- break;
- case CHIP_STONEY:
- amdgpu_device_program_register_sequence(adev,
- stoney_golden_settings_a11,
- ARRAY_SIZE(stoney_golden_settings_a11));
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- amdgpu_device_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- ARRAY_SIZE(polaris11_golden_settings_a11));
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- amdgpu_device_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- ARRAY_SIZE(polaris10_golden_settings_a11));
- break;
- default:
- break;
- }
-}
-
-static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-
- return r;
-}
-
-static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-}
-
-static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
-{
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Enable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_get(adev, &adev->pageflip_irq, i);
-}
-
-static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Disable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_put(adev, &adev->pageflip_irq, i);
-}
-
-/**
- * dce_v11_0_page_flip - pageflip callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc_id: crtc to cleanup pageflip on
- * @crtc_base: new address of the crtc (GPU MC address)
- * @async: asynchronous flip
- *
- * Triggers the actual pageflip by updating the primary
- * surface base address.
- */
-static void dce_v11_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base, bool async)
-{
- struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
- struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
- u32 tmp;
-
- /* flip immediate for async, default is vsync */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* update pitch */
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
- fb->pitches[0] / fb->format->cpp[0]);
- /* update the scanout addresses */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(crtc_base));
- /* writing to the low address triggers the update */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(crtc_base));
- /* post the write */
- RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
-}
-
-static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
- u32 *vbl, u32 *position)
-{
- if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
- return -EINVAL;
-
- *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
- *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- return 0;
-}
-
-/**
- * dce_v11_0_hpd_sense - hpd sense callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Checks if a digital monitor is connected (evergreen+).
- * Returns true if connected, false if not connected.
- */
-static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- bool connected = false;
-
- if (hpd >= adev->mode_info.num_hpd)
- return connected;
-
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
- DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
- connected = true;
-
- return connected;
-}
-
-/**
- * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Set the polarity of the hpd pin (evergreen+).
- */
-static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- u32 tmp;
- bool connected = dce_v11_0_hpd_sense(adev, hpd);
-
- if (hpd >= adev->mode_info.num_hpd)
- return;
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- if (connected)
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
- else
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-/**
- * dce_v11_0_hpd_init - hpd setup callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Setup the hpd pins used by the card (evergreen+).
- * Enable the pin, set the polarity, and enable the hpd interrupts.
- */
-static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- continue;
- }
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_CONNECT_INT_DELAY,
- AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_DISCONNECT_INT_DELAY,
- AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
- dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
- amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-/**
- * dce_v11_0_hpd_fini - hpd tear down callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the hpd pins used by the card (evergreen+).
- * Disable the hpd interrupts.
- */
-static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- u32 tmp;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
- drm_connector_list_iter_end(&iter);
-}
-
-static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
-{
- return mmDC_GPIO_HPD_A;
-}
-
-static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
-{
- u32 crtc_hung = 0;
- u32 crtc_status[6];
- u32 i, j, tmp;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
- crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- crtc_hung |= (1 << i);
- }
- }
-
- for (j = 0; j < 10; j++) {
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (crtc_hung & (1 << i)) {
- tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- if (tmp != crtc_status[i])
- crtc_hung &= ~(1 << i);
- }
- }
- if (crtc_hung == 0)
- return false;
- udelay(100);
- }
-
- return true;
-}
-
-static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
- bool render)
-{
- u32 tmp;
-
- /* Lockout access through VGA aperture*/
- tmp = RREG32(mmVGA_HDP_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
- else
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32(mmVGA_HDP_CONTROL, tmp);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
- else
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-}
-
-static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
-{
- int num_crtc = 0;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- num_crtc = 3;
- break;
- case CHIP_STONEY:
- num_crtc = 2;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- num_crtc = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- num_crtc = 5;
- break;
- default:
- num_crtc = 0;
- }
- return num_crtc;
-}
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev)
-{
- /*Disable VGA render and enabled crtc, if has DCE engine*/
- if (amdgpu_atombios_has_dce_engine_info(adev)) {
- u32 tmp;
- int crtc_enabled, i;
-
- dce_v11_0_set_vga_render_state(adev, false);
-
- /*Disable crtc*/
- for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- }
- }
-}
-
-static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- bpc = amdgpu_connector_get_monitor_bpc(connector);
- dither = amdgpu_connector->dither;
- }
-
- /* LVDS/eDP FMT is set up by atom */
- if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- /* not needed for analog */
- if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
- (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
- }
- break;
- case 8:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
- }
- break;
- case 10:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
- }
- break;
- default:
- /* not needed */
- break;
- }
-
- WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-
-/* display watermark setup */
-/**
- * dce_v11_0_line_buffer_adjust - Set up the line buffer
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @mode: the current display mode on the selected display
- * controller
- *
- * Setup up the line buffer allocation for
- * the selected display controller (CIK).
- * Returns the line buffer size in pixels.
- */
-static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- struct drm_display_mode *mode)
-{
- u32 tmp, buffer_alloc, i, mem_cfg;
- u32 pipe_offset = amdgpu_crtc->crtc_id;
- /*
- * Line Buffer Setup
- * There are 6 line buffers, one for each display controllers.
- * There are 3 partitions per LB. Select the number of partitions
- * to enable based on the display width. For display widths larger
- * than 4096, you need use to use 2 display controllers and combine
- * them using the stereo blender.
- */
- if (amdgpu_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920) {
- mem_cfg = 1;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 2560) {
- mem_cfg = 2;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 4096) {
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- } else {
- DRM_DEBUG_KMS("Mode too big for LB!\n");
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- }
- } else {
- mem_cfg = 1;
- buffer_alloc = 0;
- }
-
- tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
- WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
- WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
- break;
- udelay(1);
- }
-
- if (amdgpu_crtc->base.enabled && mode) {
- switch (mem_cfg) {
- case 0:
- default:
- return 4096 * 2;
- case 1:
- return 1920 * 2;
- case 2:
- return 2560 * 2;
- }
- }
-
- /* controller not enabled, so no lb used */
- return 0;
-}
-
-/**
- * cik_get_number_of_dram_channels - get the number of dram channels
- *
- * @adev: amdgpu_device pointer
- *
- * Look up the number of video ram channels (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the number of dram channels
- */
-static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32(mmMC_SHARED_CHMAP);
-
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- case 3:
- return 8;
- case 4:
- return 3;
- case 5:
- return 6;
- case 6:
- return 10;
- case 7:
- return 12;
- case 8:
- return 16;
- }
-}
-
-struct dce10_wm_params {
- u32 dram_channels; /* number of dram channels */
- u32 yclk; /* bandwidth per dram data pin in kHz */
- u32 sclk; /* engine clock in kHz */
- u32 disp_clk; /* display clock in kHz */
- u32 src_width; /* viewport width */
- u32 active_time; /* active display time in ns */
- u32 blank_time; /* blank time in ns */
- bool interlaced; /* mode is interlaced */
- fixed20_12 vsc; /* vertical scale ratio */
- u32 num_heads; /* number of active crtcs */
- u32 bytes_per_pixel; /* bytes per pixel display + overlay */
- u32 lb_size; /* line buffer allocated to pipe */
- u32 vtaps; /* vertical scaler taps */
-};
-
-/**
- * dce_v11_0_dram_bandwidth - get the dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the raw dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate raw DRAM Bandwidth */
- fixed20_12 dram_efficiency; /* 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- dram_efficiency.full = dfixed_const(7);
- dram_efficiency.full = dfixed_div(dram_efficiency, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
- *
- * @wm: watermark calculation data
- *
- * Calculate the dram bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth for display in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- /* Calculate DRAM Bandwidth and the part allocated to display. */
- fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
- disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_data_return_bandwidth - get the data return bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the data return bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the data return bandwidth in MBytes/s
- */
-static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display Data return Bandwidth */
- fixed20_12 return_efficiency; /* 0.8 */
- fixed20_12 sclk, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- sclk.full = dfixed_const(wm->sclk);
- sclk.full = dfixed_div(sclk, a);
- a.full = dfixed_const(10);
- return_efficiency.full = dfixed_const(8);
- return_efficiency.full = dfixed_div(return_efficiency, a);
- a.full = dfixed_const(32);
- bandwidth.full = dfixed_mul(a, sclk);
- bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the dmif bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dmif bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the DMIF Request Bandwidth */
- fixed20_12 disp_clk_request_efficiency; /* 0.8 */
- fixed20_12 disp_clk, bandwidth;
- fixed20_12 a, b;
-
- a.full = dfixed_const(1000);
- disp_clk.full = dfixed_const(wm->disp_clk);
- disp_clk.full = dfixed_div(disp_clk, a);
- a.full = dfixed_const(32);
- b.full = dfixed_mul(a, disp_clk);
-
- a.full = dfixed_const(10);
- disp_clk_request_efficiency.full = dfixed_const(8);
- disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
-
- bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_available_bandwidth - get the min available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the min available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the min available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
- u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
- u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
- u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
-
- return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
-}
-
-/**
- * dce_v11_0_average_bandwidth - get the average available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the average available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the average available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display mode Average Bandwidth
- * DisplayMode should contain the source and destination dimensions,
- * timing, etc.
- */
- fixed20_12 bpp;
- fixed20_12 line_time;
- fixed20_12 src_width;
- fixed20_12 bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- line_time.full = dfixed_const(wm->active_time + wm->blank_time);
- line_time.full = dfixed_div(line_time, a);
- bpp.full = dfixed_const(wm->bytes_per_pixel);
- src_width.full = dfixed_const(wm->src_width);
- bandwidth.full = dfixed_mul(src_width, bpp);
- bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
- bandwidth.full = dfixed_div(bandwidth, line_time);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_latency_watermark - get the latency watermark
- *
- * @wm: watermark calculation data
- *
- * Calculate the latency watermark (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the latency watermark in ns
- */
-static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
-{
- /* First calculate the latency in ns */
- u32 mc_latency = 2000; /* 2000 ns. */
- u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
- u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
- u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
- u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
- u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
- (wm->num_heads * cursor_line_pair_return_time);
- u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
- u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
- u32 tmp, dmif_size = 12288;
- fixed20_12 a, b, c;
-
- if (wm->num_heads == 0)
- return 0;
-
- a.full = dfixed_const(2);
- b.full = dfixed_const(1);
- if ((wm->vsc.full > a.full) ||
- ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
- (wm->vtaps >= 5) ||
- ((wm->vsc.full >= a.full) && wm->interlaced))
- max_src_lines_per_dst_line = 4;
- else
- max_src_lines_per_dst_line = 2;
-
- a.full = dfixed_const(available_bandwidth);
- b.full = dfixed_const(wm->num_heads);
- a.full = dfixed_div(a, b);
- tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
- tmp = min(dfixed_trunc(a), tmp);
-
- lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
-
- a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
- b.full = dfixed_const(1000);
- c.full = dfixed_const(lb_fill_bw);
- b.full = dfixed_div(c, b);
- a.full = dfixed_div(a, b);
- line_fill_time = dfixed_trunc(a);
-
- if (line_fill_time < wm->active_time)
- return latency;
- else
- return latency + (line_fill_time - wm->active_time);
-
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
- * average and available dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
- * average and available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * available bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_check_latency_hiding - check latency hiding
- *
- * @wm: watermark calculation data
- *
- * Check latency hiding (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
-{
- u32 lb_partitions = wm->lb_size / wm->src_width;
- u32 line_time = wm->active_time + wm->blank_time;
- u32 latency_tolerant_lines;
- u32 latency_hiding;
- fixed20_12 a;
-
- a.full = dfixed_const(1);
- if (wm->vsc.full > a.full)
- latency_tolerant_lines = 1;
- else {
- if (lb_partitions <= (wm->vtaps + 1))
- latency_tolerant_lines = 1;
- else
- latency_tolerant_lines = 2;
- }
-
- latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
-
- if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_program_watermarks - program display watermarks
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @lb_size: line buffer size
- * @num_heads: number of display controllers in use
- *
- * Calculate and program the display watermarks for the
- * selected display controller (CIK).
- */
-static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- u32 lb_size, u32 num_heads)
-{
- struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
- struct dce10_wm_params wm_low, wm_high;
- u32 active_time;
- u32 line_time = 0;
- u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
-
- if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
- (u32)mode->clock);
- line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
- (u32)mode->clock);
- line_time = min_t(u32, line_time, 65535);
-
- /* watermark for high clocks */
- if (adev->pm.dpm_enabled) {
- wm_high.yclk =
- amdgpu_dpm_get_mclk(adev, false) * 10;
- wm_high.sclk =
- amdgpu_dpm_get_sclk(adev, false) * 10;
- } else {
- wm_high.yclk = adev->pm.current_mclk * 10;
- wm_high.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_high.disp_clk = mode->clock;
- wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = active_time;
- wm_high.blank_time = line_time - wm_high.active_time;
- wm_high.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_high.interlaced = true;
- wm_high.vsc = amdgpu_crtc->vsc;
- wm_high.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_high.vtaps = 2;
- wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_high.lb_size = lb_size;
- wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_high.num_heads = num_heads;
-
- /* set for high clocks */
- latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
- !dce_v11_0_check_latency_hiding(&wm_high) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
-
- /* watermark for low clocks */
- if (adev->pm.dpm_enabled) {
- wm_low.yclk =
- amdgpu_dpm_get_mclk(adev, true) * 10;
- wm_low.sclk =
- amdgpu_dpm_get_sclk(adev, true) * 10;
- } else {
- wm_low.yclk = adev->pm.current_mclk * 10;
- wm_low.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_low.disp_clk = mode->clock;
- wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = active_time;
- wm_low.blank_time = line_time - wm_low.active_time;
- wm_low.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_low.interlaced = true;
- wm_low.vsc = amdgpu_crtc->vsc;
- wm_low.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_low.vtaps = 2;
- wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_low.lb_size = lb_size;
- wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_low.num_heads = num_heads;
-
- /* set for low clocks */
- latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
- !dce_v11_0_check_latency_hiding(&wm_low) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
- lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
- }
-
- /* select wm A */
- wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* select wm B */
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* restore original selection */
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
-
- /* save values for DPM */
- amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
- /* Save number of lines the linebuffer leads before the scanout */
- amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
-}
-
-/**
- * dce_v11_0_bandwidth_update - program display watermarks
- *
- * @adev: amdgpu_device pointer
- *
- * Calculate and program the display watermarks and line
- * buffer allocation (CIK).
- */
-static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
-{
- struct drm_display_mode *mode = NULL;
- u32 num_heads = 0, lb_size;
- int i;
-
- amdgpu_display_update_priority(adev);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i]->base.enabled)
- num_heads++;
- }
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- mode = &adev->mode_info.crtcs[i]->base.mode;
- lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
- dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
- lb_size, num_heads);
- }
-}
-
-static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
-{
- int i;
- u32 offset, tmp;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- offset = adev->mode_info.audio.pin[i].offset;
- tmp = RREG32_AUDIO_ENDPT(offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
- if (((tmp &
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
- adev->mode_info.audio.pin[i].connected = false;
- else
- adev->mode_info.audio.pin[i].connected = true;
- }
-}
-
-static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
-{
- int i;
-
- dce_v11_0_audio_get_connected_pins(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- if (adev->mode_info.audio.pin[i].connected)
- return &adev->mode_info.audio.pin[i];
- }
- DRM_ERROR("No connected audio pins found!\n");
- return NULL;
-}
-
-static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
- WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
-}
-
-static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- int interlace = 0;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- interlace = 1;
- if (connector->latency_present[interlace]) {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, connector->video_latency[interlace]);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, connector->audio_latency[interlace]);
- } else {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, 0);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, 0);
- }
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
-}
-
-static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- u8 *sadb = NULL;
- int sad_count;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
-
- /* program the speaker allocation */
- tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- DP_CONNECTION, 0);
- /* set HDMI mode */
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- HDMI_CONNECTION, 1);
- if (sad_count)
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, sadb[0]);
- else
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, 5); /* stereo */
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
-}
-
-static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct drm_connector_list_iter iter;
- struct amdgpu_connector *amdgpu_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
-
- static const u16 eld_reg_to_type[][2] = {
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- drm_connector_list_iter_begin(dev, &iter);
- drm_for_each_connector_iter(connector, &iter) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
- drm_connector_list_iter_end(&iter);
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
- if (sad_count < 0)
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- if (sad_count <= 0)
- return;
- BUG_ON(!sads);
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
- }
-
- kfree(sads);
-}
-
-static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
- struct amdgpu_audio_pin *pin,
- bool enable)
-{
- if (!pin)
- return;
-
- WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
-}
-
-static const u32 pin_offsets[] =
-{
- AUD0_REGISTER_OFFSET,
- AUD1_REGISTER_OFFSET,
- AUD2_REGISTER_OFFSET,
- AUD3_REGISTER_OFFSET,
- AUD4_REGISTER_OFFSET,
- AUD5_REGISTER_OFFSET,
- AUD6_REGISTER_OFFSET,
- AUD7_REGISTER_OFFSET,
-};
-
-static int dce_v11_0_audio_init(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return 0;
-
- adev->mode_info.audio.enabled = true;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->mode_info.audio.num_pins = 7;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.audio.num_pins = 8;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.audio.num_pins = 6;
- break;
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- adev->mode_info.audio.pin[i].channels = -1;
- adev->mode_info.audio.pin[i].rate = -1;
- adev->mode_info.audio.pin[i].bits_per_sample = -1;
- adev->mode_info.audio.pin[i].status_bits = 0;
- adev->mode_info.audio.pin[i].category_code = 0;
- adev->mode_info.audio.pin[i].connected = false;
- adev->mode_info.audio.pin[i].offset = pin_offsets[i];
- adev->mode_info.audio.pin[i].id = i;
- /* disable audio. it will be set up later */
- /* XXX remove once we switch to ip funcs */
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- return 0;
-}
-
-static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
-{
- if (!amdgpu_audio)
- return;
-
- if (!adev->mode_info.audio.enabled)
- return;
-
- adev->mode_info.audio.enabled = false;
-}
-
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
- WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
- WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
- WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
- WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
- WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
- WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
-
-}
-
-/*
- * build a HDMI Video Info Frame
- */
-static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- uint8_t *frame = buffer + 3;
- uint8_t *header = buffer;
-
- WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
- frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
- frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
- frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
- frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
-}
-
-static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- u32 dto_phase = 24 * 1000;
- u32 dto_modulo = clock;
- u32 tmp;
-
- if (!dig || !dig->afmt)
- return;
-
- /* XXX two dtos; generally use dto0 for hdmi */
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
- tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
- amdgpu_crtc->crtc_id);
- WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
- WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
-}
-
-/*
- * update the info frames with the data from the current display mode
- */
-static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- ssize_t err;
- u32 tmp;
- int bpc = 8;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
- return;
-
- /* hdmi deep color mode general control packets setup, if bpc > 8 */
- if (encoder->crtc) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- bpc = amdgpu_crtc->bpc;
- }
-
- /* disable audio prior to setting up hw */
- dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
-
- dce_v11_0_audio_set_dto(encoder, mode->clock);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
-
- WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
-
- tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
- switch (bpc) {
- case 0:
- case 6:
- case 8:
- case 16:
- default:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
- DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
- connector->name, bpc);
- break;
- case 10:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
- DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
- connector->name);
- break;
- case 12:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
- DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
- connector->name);
- break;
- }
- WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable audio info frames (frames won't be set until audio is enabled) */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
- WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- /* anything other than 0 */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
-
- tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* set the default audio delay */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
- /* should be suffient for all audio modes and small enough for all hblanks */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
- WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* allow 60958 channel status fields to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
- if (bpc > 8)
- /* clear SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
- else
- /* select SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
- /* allow hw to sent ACR packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
- WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- dce_v11_0_afmt_update_ACR(encoder, mode->clock);
-
- tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
- WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
- WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
- WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
-
- dce_v11_0_audio_write_speaker_allocation(encoder);
-
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
- (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
-
- dce_v11_0_afmt_audio_select_pin(encoder);
- dce_v11_0_audio_write_sad_regs(encoder);
- dce_v11_0_audio_write_latency_fields(encoder, mode);
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
-
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
-
- dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable AVI info frames */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* send audio packets */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
- WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
- WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
- WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
-
- /* enable audio after to setting up hw */
- dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
-}
-
-static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (enable && dig->afmt->enabled)
- return;
- if (!enable && !dig->afmt->enabled)
- return;
-
- if (!enable && dig->afmt->pin) {
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
- dig->afmt->pin = NULL;
- }
-
- dig->afmt->enabled = enable;
-
- DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
- enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
-}
-
-static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++)
- adev->mode_info.afmt[i] = NULL;
-
- /* DCE11 has audio blocks tied to DIG encoders */
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
- if (adev->mode_info.afmt[i]) {
- adev->mode_info.afmt[i]->offset = dig_offsets[i];
- adev->mode_info.afmt[i]->id = i;
- } else {
- int j;
- for (j = 0; j < i; j++) {
- kfree(adev->mode_info.afmt[j]);
- adev->mode_info.afmt[j] = NULL;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- kfree(adev->mode_info.afmt[i]);
- adev->mode_info.afmt[i] = NULL;
- }
-}
-
-static const u32 vga_control_regs[6] =
-{
- mmD1VGA_CONTROL,
- mmD2VGA_CONTROL,
- mmD3VGA_CONTROL,
- mmD4VGA_CONTROL,
- mmD5VGA_CONTROL,
- mmD6VGA_CONTROL,
-};
-
-static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 vga_control;
-
- vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
- if (enable)
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
- else
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
-}
-
-static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (enable)
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
- else
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
-}
-
-static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct drm_framebuffer *target_fb;
- struct drm_gem_object *obj;
- struct amdgpu_bo *abo;
- uint64_t fb_location, tiling_flags;
- uint32_t fb_format, fb_pitch_pixels;
- u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
- u32 pipe_config;
- u32 tmp, viewport_w, viewport_h;
- int r;
- bool bypass_lut = false;
-
- /* no fb bound */
- if (!atomic && !crtc->primary->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
- return 0;
- }
-
- if (atomic)
- target_fb = fb;
- else
- target_fb = crtc->primary->fb;
-
- /* If atomic, assume fb object is pinned & idle & fenced and
- * just update base pointers
- */
- obj = target_fb->obj[0];
- abo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(abo, false);
- if (unlikely(r != 0))
- return r;
-
- if (!atomic) {
- abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(abo);
- return -EINVAL;
- }
- }
- fb_location = amdgpu_bo_gpu_offset(abo);
-
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- amdgpu_bo_unreserve(abo);
-
- pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-
- switch (target_fb->format->format) {
- case DRM_FORMAT_C8:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- break;
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_ARGB4444:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRA5551:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_RGB565:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRA1010102:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- default:
- DRM_ERROR("Unsupported screen format %p4cc\n",
- &target_fb->format->format);
- return -EINVAL;
- }
-
- if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
- unsigned bankw, bankh, mtaspect, tile_split, num_banks;
-
- bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
- bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
- mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
- tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
- num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_2D_TILED_THIN1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
- tile_split);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
- mtaspect);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
- ADDR_SURF_MICRO_TILING_DISPLAY);
- } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_1D_TILED_THIN1);
- }
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
- pipe_config);
-
- dce_v11_0_vga_enable(crtc, false);
-
- /* Make sure surface address is updated at vertical blank rather than
- * horizontal blank
- */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
- WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
-
- /*
- * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
- * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
- * retain the full precision throughout the pipeline.
- */
- tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
- if (bypass_lut)
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
- WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
-
- if (bypass_lut)
- DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
-
- WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
- WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
-
- fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
-
- dce_v11_0_grph_enable(crtc, true);
-
- WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
- target_fb->height);
-
- x &= ~3;
- y &= ~1;
- WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
- (x << 16) | y);
- viewport_w = crtc->mode.hdisplay;
- viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
- (viewport_w << 16) | viewport_h);
-
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
-
- if (!atomic && fb && fb != crtc->primary->fb) {
- abo = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- return r;
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
-
- /* Bytes per pixel may have changed */
- dce_v11_0_bandwidth_update(adev);
-
- return 0;
-}
-
-static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- u32 tmp;
-
- tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
- WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u16 *r, *g, *b;
- int i;
- u32 tmp;
-
- DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
-
- tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
- WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
- WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
- WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
-
- WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
-
- WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
- for (i = 0; i < 256; i++) {
- WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- ((*r++ & 0xffc0) << 14) |
- ((*g++ & 0xffc0) << 4) |
- (*b++ >> 6));
- }
-
- tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
- WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
- WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
- WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
- WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- /* XXX match this to the depth of the crtc fmt block, move to modeset? */
- WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
- /* XXX this only needs to be programmed once per crtc at startup,
- * not sure where the best place for it is
- */
- tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
- WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return 1;
- else
- return 0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return 6;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return 0;
- }
-}
-
-/**
- * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
- *
- * @crtc: drm crtc
- *
- * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
- * a single PPLL can be used for all DP crtcs/encoders. For non-DP
- * monitors a dedicated PPLL must be used. If a particular board has
- * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
- * as there is no need to program the PLL itself. If we are not able to
- * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
- * avoid messing up an existing monitor.
- *
- * Asic specific PLL information
- *
- * DCE 10.x
- * Tonga
- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
- * CI
- * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
- *
- */
-static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- u32 pll_in_use;
- int pll;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return ATOM_DP_DTO;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL1;
- else
- return ATOM_COMBOPHY_PLL0;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL3;
- else
- return ATOM_COMBOPHY_PLL2;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL5;
- else
- return ATOM_COMBOPHY_PLL4;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return ATOM_PPLL_INVALID;
- }
- }
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
- if (adev->clock.dp_extclk)
- /* skip PPLL programming if using ext clock */
- return ATOM_PPLL_INVALID;
- else {
- /* use the same PPLL for all DP monitors */
- pll = amdgpu_pll_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
-
- /* XXX need to determine what plls are available on each DCE11 part */
- pll_in_use = amdgpu_pll_get_use_mask(crtc);
- if (adev->flags & AMD_IS_APU) {
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- }
- return ATOM_PPLL_INVALID;
-}
-
-static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
-{
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- uint32_t cur_lock;
-
- cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
- if (lock)
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
- else
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
- WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
-}
-
-static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- u32 tmp;
-
- WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(amdgpu_crtc->cursor_addr));
- WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(amdgpu_crtc->cursor_addr));
-
- tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
- int x, int y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = drm_to_adev(crtc->dev);
- int xorigin = 0, yorigin = 0;
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
-
- /* avivo cursor are offset into the total surface */
- x += crtc->x;
- y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- if (x < 0) {
- xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
- yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
- y = 0;
- }
-
- WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
- WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- return 0;
-}
-
-static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- int ret;
-
- dce_v11_0_lock_cursor(crtc, true);
- ret = dce_v11_0_cursor_move_locked(crtc, x, y);
- dce_v11_0_lock_cursor(crtc, false);
-
- return ret;
-}
-
-static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x,
- int32_t hot_y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_gem_object *obj;
- struct amdgpu_bo *aobj;
- int ret;
-
- if (!handle) {
- /* turn off cursor */
- dce_v11_0_hide_cursor(crtc);
- obj = NULL;
- goto unpin;
- }
-
- if ((width > amdgpu_crtc->max_cursor_width) ||
- (height > amdgpu_crtc->max_cursor_height)) {
- DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
- return -ENOENT;
- }
-
- aobj = gem_to_amdgpu_bo(obj);
- ret = amdgpu_bo_reserve(aobj, false);
- if (ret != 0) {
- drm_gem_object_put(obj);
- return ret;
- }
-
- aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_bo_unreserve(aobj);
- if (ret) {
- DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put(obj);
- return ret;
- }
- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
-
- dce_v11_0_lock_cursor(crtc, true);
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height ||
- hot_x != amdgpu_crtc->cursor_hot_x ||
- hot_y != amdgpu_crtc->cursor_hot_y) {
- int x, y;
-
- x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
- y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
-
- dce_v11_0_cursor_move_locked(crtc, x, y);
-
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- dce_v11_0_show_cursor(crtc);
- dce_v11_0_lock_cursor(crtc, false);
-
-unpin:
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, true);
- if (likely(ret == 0)) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- drm_gem_object_put(amdgpu_crtc->cursor_bo);
- }
-
- amdgpu_crtc->cursor_bo = obj;
- return 0;
-}
-
-static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- dce_v11_0_lock_cursor(crtc, true);
-
- dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
- amdgpu_crtc->cursor_y);
-
- dce_v11_0_show_cursor(crtc);
-
- dce_v11_0_lock_cursor(crtc, false);
- }
-}
-
-static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- dce_v11_0_crtc_load_lut(crtc);
-
- return 0;
-}
-
-static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(amdgpu_crtc);
-}
-
-static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
- .cursor_set2 = dce_v11_0_crtc_cursor_set2,
- .cursor_move = dce_v11_0_crtc_cursor_move,
- .gamma_set = dce_v11_0_crtc_gamma_set,
- .set_config = amdgpu_display_crtc_set_config,
- .destroy = dce_v11_0_crtc_destroy,
- .page_flip_target = amdgpu_display_crtc_page_flip_target,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-};
-
-static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- unsigned type;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- amdgpu_crtc->enabled = true;
- amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
- dce_v11_0_vga_enable(crtc, false);
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_display_crtc_idx_to_irq_type(adev,
- amdgpu_crtc->crtc_id);
- amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_crtc_vblank_on(crtc);
- dce_v11_0_crtc_load_lut(crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- drm_crtc_vblank_off(crtc);
- if (amdgpu_crtc->enabled) {
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, false);
- }
- amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
- amdgpu_crtc->enabled = false;
- break;
- }
- /* adjust pm to dpms */
- amdgpu_dpm_compute_clocks(adev);
-}
-
-static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
-{
- /* disable crtc pair power gating before programming */
- amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
- amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
-{
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
-}
-
-static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_atom_ss ss;
- int i;
-
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_bo *abo;
-
- abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
- /* disable the GRPH */
- dce_v11_0_grph_enable(crtc, false);
-
- amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i] &&
- adev->mode_info.crtcs[i]->enabled &&
- i != amdgpu_crtc->crtc_id &&
- amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
- /* one other crtc is using this pll don't turn
- * off the pll
- */
- goto done;
- }
- }
-
- switch (amdgpu_crtc->pll_id) {
- case ATOM_PPLL0:
- case ATOM_PPLL1:
- case ATOM_PPLL2:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- case ATOM_COMBOPHY_PLL0:
- case ATOM_COMBOPHY_PLL1:
- case ATOM_COMBOPHY_PLL2:
- case ATOM_COMBOPHY_PLL3:
- case ATOM_COMBOPHY_PLL4:
- case ATOM_COMBOPHY_PLL5:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- default:
- break;
- }
-done:
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
-}
-
-static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- if (!amdgpu_crtc->adjusted_clock)
- return -EINVAL;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- int encoder_mode =
- amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
-
- /* SetPixelClock calculates the plls and ss values now */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
- amdgpu_crtc->pll_id,
- encoder_mode, amdgpu_encoder->encoder_id,
- adjusted_mode->clock, 0, 0, 0, 0,
- amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
- } else {
- amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
- }
- amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
- dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
- amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
- amdgpu_atombios_crtc_scaler_setup(crtc);
- dce_v11_0_cursor_reset(crtc);
- /* update the hw version fpr dpm */
- amdgpu_crtc->hw_mode = *adjusted_mode;
-
- return 0;
-}
-
-static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
- if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
- return false;
- if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
- return false;
- /* pick pll */
- amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
- /* if we can't get a PPLL for a non-DP encoder, fail */
- if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
- !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return false;
-
- return true;
-}
-
-static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
-}
-
-static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
- .dpms = dce_v11_0_crtc_dpms,
- .mode_fixup = dce_v11_0_crtc_mode_fixup,
- .mode_set = dce_v11_0_crtc_mode_set,
- .mode_set_base = dce_v11_0_crtc_set_base,
- .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
- .prepare = dce_v11_0_crtc_prepare,
- .commit = dce_v11_0_crtc_commit,
- .disable = dce_v11_0_crtc_disable,
- .get_scanout_position = amdgpu_crtc_get_scanout_position,
-};
-
-static void dce_v11_0_panic_flush(struct drm_plane *plane)
-{
- struct drm_framebuffer *fb;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_device *adev;
- uint32_t fb_format;
-
- if (!plane->fb)
- return;
-
- fb = plane->fb;
- amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
- adev = drm_to_adev(fb->dev);
-
- /* Disable DC tiling */
- fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
- fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
-
-}
-
-static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
- .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
- .panic_flush = dce_v11_0_panic_flush,
-};
-
-static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
-{
- struct amdgpu_crtc *amdgpu_crtc;
-
- amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
- (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
- if (amdgpu_crtc == NULL)
- return -ENOMEM;
-
- drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
-
- drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
- amdgpu_crtc->crtc_id = index;
- adev->mode_info.crtcs[index] = amdgpu_crtc;
-
- amdgpu_crtc->max_cursor_width = 128;
- amdgpu_crtc->max_cursor_height = 128;
- adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
-
- switch (amdgpu_crtc->crtc_id) {
- case 0:
- default:
- amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
- break;
- case 1:
- amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
- break;
- case 2:
- amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
- break;
- case 3:
- amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
- break;
- case 4:
- amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
- break;
- case 5:
- amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
- break;
- }
-
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
- drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
-
- return 0;
-}
-
-static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
- adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
-
- dce_v11_0_set_display_funcs(adev);
-
- adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_STONEY:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_POLARIS10:
- case CHIP_VEGAM:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.num_hpd = 5;
- adev->mode_info.num_dig = 5;
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
-
- dce_v11_0_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
-{
- int r, i;
- struct amdgpu_device *adev = ip_block->adev;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
- if (r)
- return r;
- }
-
- for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
- if (r)
- return r;
- }
-
- /* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
-
- adev_to_drm(adev)->mode_config.async_page_flip = true;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
- adev_to_drm(adev)->mode_config.preferred_depth = 24;
- adev_to_drm(adev)->mode_config.prefer_shadow = 1;
-
- adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
-
- r = amdgpu_display_modeset_create_props(adev);
- if (r)
- return r;
-
- adev_to_drm(adev)->mode_config.max_width = 16384;
- adev_to_drm(adev)->mode_config.max_height = 16384;
-
-
- /* allocate crtcs */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = dce_v11_0_crtc_init(adev, i);
- if (r)
- return r;
- }
-
- if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_display_print_display_setup(adev_to_drm(adev));
- else
- return -EINVAL;
-
- /* setup afmt */
- r = dce_v11_0_afmt_init(adev);
- if (r)
- return r;
-
- r = dce_v11_0_audio_init(adev);
- if (r)
- return r;
-
- /* Disable vblank IRQs aggressively for power-saving */
- /* XXX: can this be enabled for DC? */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
- r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
- if (r)
- return r;
-
- INIT_DELAYED_WORK(&adev->hotplug_work,
- amdgpu_display_hotplug_work_func);
-
- drm_kms_helper_poll_init(adev_to_drm(adev));
-
- adev->mode_info.mode_config_initialized = true;
- return 0;
-}
-
-static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
-
- drm_edid_free(adev->mode_info.bios_hardcoded_edid);
-
- drm_kms_helper_poll_fini(adev_to_drm(adev));
-
- dce_v11_0_audio_fini(adev);
-
- dce_v11_0_afmt_fini(adev);
-
- drm_mode_config_cleanup(adev_to_drm(adev));
- adev->mode_info.mode_config_initialized = false;
-
- return 0;
-}
-
-static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_init_golden_registers(adev);
-
- /* disable vga render */
- dce_v11_0_set_vga_render_state(adev, false);
- /* init dig PHYs, disp eng pll */
- amdgpu_atombios_crtc_powergate_init(adev);
- amdgpu_atombios_encoder_init_dig(adev);
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12) ||
- (adev->asic_type == CHIP_VEGAM)) {
- amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
- DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
- amdgpu_atombios_crtc_set_dce_clock(adev, 0,
- DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
- } else {
- amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
- }
-
- /* initialize hpd */
- dce_v11_0_hpd_init(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_init(adev);
-
- return 0;
-}
-
-static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
-{
- int i;
- struct amdgpu_device *adev = ip_block->adev;
-
- dce_v11_0_hpd_fini(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_fini(adev);
-
- flush_delayed_work(&adev->hotplug_work);
-
- return 0;
-}
-
-static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int r;
-
- r = amdgpu_display_suspend_helper(adev);
- if (r)
- return r;
-
- adev->mode_info.bl_level =
- amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
-
- return dce_v11_0_hw_fini(ip_block);
-}
-
-static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int ret;
-
- amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
- adev->mode_info.bl_level);
-
- ret = dce_v11_0_hw_init(ip_block);
-
- /* turn on the BL */
- if (adev->mode_info.bl_encoder) {
- u8 bl_level = amdgpu_display_backlight_get_level(adev,
- adev->mode_info.bl_encoder);
- amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
- bl_level);
- }
- if (ret)
- return ret;
-
- return amdgpu_display_resume_helper(adev);
-}
-
-static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block)
-{
- return true;
-}
-
-static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
-{
- u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = ip_block->adev;
-
- if (dce_v11_0_is_display_hung(adev))
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
- return 0;
-}
-
-static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned hpd,
- enum amdgpu_interrupt_state state)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hpd %d\n", hpd);
- return 0;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK2:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK3:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK4:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK5:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK6:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE1:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE2:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE3:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE4:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE5:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE6:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 reg;
-
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
-
- reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
- if (state == AMDGPU_IRQ_STATE_DISABLE)
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
- else
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
-
- return 0;
-}
-
-static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned long flags;
- unsigned crtc_id;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_flip_work *works;
-
- crtc_id = (entry->src_id - 8) >> 1;
- amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
-
- if (crtc_id >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
- return -EINVAL;
- }
-
- if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
- WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
-
- /* IRQ could occur when in initial stage */
- if(amdgpu_crtc == NULL)
- return 0;
-
- spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
- "AMDGPU_FLIP_SUBMITTED(%d)\n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
- return 0;
- }
-
- /* page flip completed. clean up */
- amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
- amdgpu_crtc->pflip_works = NULL;
-
- /* wakeup usersapce */
- if(works->event)
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
-
- spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
-
- drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
-
- return 0;
-}
-
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
- int hpd)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hpd %d\n", hpd);
- return;
- }
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
- WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
- WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = entry->src_id - 1;
- uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
- crtc);
-
- switch (entry->src_data[0]) {
- case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank)
- dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev_to_drm(adev), crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
- break;
- case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline)
- dce_v11_0_crtc_vline_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
- break;
- default:
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- uint32_t disp_int, mask;
- unsigned hpd;
-
- if (entry->src_data[0] >= adev->mode_info.num_hpd) {
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- return 0;
- }
-
- hpd = entry->src_data[0];
- disp_int = RREG32(interrupt_status_offsets[hpd].reg);
- mask = interrupt_status_offsets[hpd].hpd;
-
- if (disp_int & mask) {
- dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_delayed_work(&adev->hotplug_work, 0);
- DRM_DEBUG("IH: HPD%d\n", hpd + 1);
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
- .name = "dce_v11_0",
- .early_init = dce_v11_0_early_init,
- .sw_init = dce_v11_0_sw_init,
- .sw_fini = dce_v11_0_sw_fini,
- .hw_init = dce_v11_0_hw_init,
- .hw_fini = dce_v11_0_hw_fini,
- .suspend = dce_v11_0_suspend,
- .resume = dce_v11_0_resume,
- .is_idle = dce_v11_0_is_idle,
- .soft_reset = dce_v11_0_soft_reset,
- .set_clockgating_state = dce_v11_0_set_clockgating_state,
- .set_powergating_state = dce_v11_0_set_powergating_state,
-};
-
-static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- amdgpu_encoder->pixel_clock = adjusted_mode->clock;
-
- /* need to call this here rather than in prepare() since we need some crtc info */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- /* set scaler clears this on some chips */
- dce_v11_0_set_interleave(encoder->crtc, mode);
-
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- dce_v11_0_afmt_enable(encoder, true);
- dce_v11_0_afmt_setmode(encoder, adjusted_mode);
- }
-}
-
-static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = drm_to_adev(encoder->dev);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
-
- if ((amdgpu_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
- ENCODER_OBJECT_ID_NONE)) {
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- if (dig) {
- dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
- if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
- dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
- }
- }
-
- amdgpu_atombios_scratch_regs_lock(adev, true);
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- /* select the clock/data port if it uses a router */
- if (amdgpu_connector->router.cd_valid)
- amdgpu_i2c_router_select_cd_port(amdgpu_connector);
-
- /* turn eDP panel on for mode set */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_atombios_encoder_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- }
-
- /* this is needed for the pll/ss setup to work correctly in some cases */
- amdgpu_atombios_encoder_set_crtc_source(encoder);
- /* set up the FMT blocks */
- dce_v11_0_program_fmt(encoder);
-}
-
-static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- /* need to call this here as we need the crtc set up */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- amdgpu_atombios_scratch_regs_lock(adev, false);
-}
-
-static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
-
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- if (amdgpu_atombios_encoder_is_digital(encoder)) {
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- dce_v11_0_afmt_enable(encoder, false);
- dig = amdgpu_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- amdgpu_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
- .dpms = dce_v11_0_ext_dpms,
- .prepare = dce_v11_0_ext_prepare,
- .mode_set = dce_v11_0_ext_mode_set,
- .commit = dce_v11_0_ext_commit,
- .disable = dce_v11_0_ext_disable,
- /* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .disable = dce_v11_0_encoder_disable,
- .detect = amdgpu_atombios_encoder_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .detect = amdgpu_atombios_encoder_dac_detect,
-};
-
-static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
- kfree(amdgpu_encoder->enc_priv);
- drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
-}
-
-static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
- .destroy = dce_v11_0_encoder_destroy,
-};
-
-static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
-
- }
-
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
-
- encoder = &amdgpu_encoder->base;
- switch (adev->mode_info.num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 3:
- encoder->possible_crtcs = 0x7;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 5:
- encoder->possible_crtcs = 0x1f;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- amdgpu_encoder->enc_priv = NULL;
-
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- amdgpu_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
- } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- } else {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- }
- drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_SI170B:
- case ENCODER_OBJECT_ID_CH7303:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
- case ENCODER_OBJECT_ID_TITFP513:
- case ENCODER_OBJECT_ID_VT1623:
- case ENCODER_OBJECT_ID_HDMI_SI1930:
- case ENCODER_OBJECT_ID_TRAVIS:
- case ENCODER_OBJECT_ID_NUTMEG:
- /* these are handled by the primary encoders */
- amdgpu_encoder->is_ext_encoder = true;
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- else
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
- break;
- }
-}
-
-static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
- .bandwidth_update = &dce_v11_0_bandwidth_update,
- .vblank_get_counter = &dce_v11_0_vblank_get_counter,
- .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
- .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
- .hpd_sense = &dce_v11_0_hpd_sense,
- .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
- .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
- .page_flip = &dce_v11_0_page_flip,
- .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
- .add_encoder = &dce_v11_0_encoder_add,
- .add_connector = &amdgpu_connector_add,
-};
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
-{
- adev->mode_info.funcs = &dce_v11_0_display_funcs;
-}
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
- .set = dce_v11_0_set_crtc_irq_state,
- .process = dce_v11_0_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
- .set = dce_v11_0_set_pageflip_irq_state,
- .process = dce_v11_0_pageflip_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
- .set = dce_v11_0_set_hpd_irq_state,
- .process = dce_v11_0_hpd_irq,
-};
-
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
-{
- if (adev->mode_info.num_crtc > 0)
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
- else
- adev->crtc_irq.num_types = 0;
- adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
- adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
-
- adev->hpd_irq.num_types = adev->mode_info.num_hpd;
- adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
-}
-
-const struct amdgpu_ip_block_version dce_v11_0_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
-
-const struct amdgpu_ip_block_version dce_v11_2_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 81760a26f2ff..acc887a58518 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1034,7 +1034,6 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 19a265bd4d19..2ccd6aad8dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1096,8 +1096,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 7bd506f06eb1..d75b9940f248 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4075,7 +4075,7 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned int index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
memset(&ib, 0, sizeof(ib));
@@ -4322,8 +4322,7 @@ static u32 gfx_v10_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@@ -4957,7 +4956,8 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
@@ -7668,19 +7668,17 @@ static int gfx_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v10_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -9954,6 +9952,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v10_0_ring_emit_hdp_flush,
};
static void gfx_v10_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index c85de8c8f6f5..8a2ee2de390f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -603,7 +603,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -850,8 +850,7 @@ static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
int ctx_reg_offset;
@@ -1654,6 +1653,21 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
}
}
break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
+ adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex);
+ if (adev->gfx.pfp_fw_version >= 102 &&
+ adev->gfx.mec_fw_version >= 66 &&
+ adev->mes.fw_version[0] >= 128) {
+ adev->gfx.enable_cleaner_shader = true;
+ r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size);
+ if (r) {
+ adev->gfx.enable_cleaner_shader = false;
+ dev_err(adev->dev, "Failed to initialize cleaner shader\n");
+ }
+ }
+ break;
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 5, 1):
adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex;
@@ -1807,13 +1821,15 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 2280) &&
(adev->gfx.mec_fw_version >= 2410) &&
- !amdgpu_sriov_vf(adev)) {
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
break;
default:
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
@@ -2424,7 +2440,7 @@ static int gfx_v11_0_rlc_load_microcode(struct amdgpu_device *adev)
if (version_minor == 3)
gfx_v11_0_load_rlcp_rlcv_microcode(adev);
}
-
+
return 0;
}
@@ -3872,7 +3888,7 @@ static int gfx_v11_0_cp_compute_load_microcode(struct amdgpu_device *adev)
}
memcpy(fw, fw_data, fw_size);
-
+
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
@@ -4643,8 +4659,7 @@ static int gfx_v11_0_gfxhub_enable(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
@@ -5849,8 +5864,6 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
-
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
@@ -5861,9 +5874,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (vmid)
+ if (vmid && !ring->adev->gfx.rs64_enable)
gfx_v11_0_ring_emit_de_meta(ring,
- (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
+ !amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
}
amdgpu_ring_write(ring, header);
@@ -7307,6 +7320,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
.emit_wreg = gfx_v11_0_ring_emit_wreg,
.emit_reg_wait = gfx_v11_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v11_0_ring_emit_hdp_flush,
};
static void gfx_v11_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index fd44d5503e28..d01d2712cf57 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -497,7 +497,7 @@ static int gfx_v12_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *f = NULL;
unsigned index;
uint64_t gpu_addr;
- volatile uint32_t *cpu_ptr;
+ uint32_t *cpu_ptr;
long r;
/* MES KIQ fw hasn't indirect buffer support for now */
@@ -685,8 +685,7 @@ static u32 gfx_v12_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0, clustercount = 0, i;
const struct cs_section_def *sect = NULL;
@@ -1549,7 +1548,8 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(12, 0, 1):
if ((adev->gfx.me_fw_version >= 2660) &&
(adev->gfx.mec_fw_version >= 2920) &&
- !amdgpu_sriov_vf(adev)) {
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
}
@@ -3524,8 +3524,7 @@ static int gfx_v12_0_gfxhub_enable(struct amdgpu_device *adev)
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
/* TODO investigate why this and the hdp flush above is needed,
@@ -4421,8 +4420,6 @@ static void gfx_v12_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
u32 header, control = 0;
- BUG_ON(ib->flags & AMDGPU_IB_FLAG_CE);
-
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (vmid << 24);
@@ -5599,6 +5596,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
.emit_wreg = gfx_v12_0_ring_emit_wreg,
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v12_0_ring_emit_hdp_flush,
};
static void gfx_v12_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 70d7a1f434c4..80565392313f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -86,7 +86,7 @@ MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
//static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v6_0_init_pg(struct amdgpu_device *adev);
@@ -2354,7 +2354,7 @@ static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
{
const u32 *src_ptr;
- volatile u32 *dst_ptr;
+ u32 *dst_ptr;
u32 dws;
u64 reg_list_mc_addr;
const struct cs_section_def *cs_data;
@@ -2855,8 +2855,7 @@ static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -3103,6 +3102,11 @@ static int gfx_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2aa323dab34e..2b7aba22ecc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -883,7 +883,7 @@ static const u32 kalindi_rlc_save_restore_register_list[] = {
};
static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
@@ -3882,8 +3882,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -4400,6 +4399,11 @@ static int gfx_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
gfx_v7_0_gpu_early_init(adev);
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 367449d8061b..1c87375e1dd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1220,8 +1220,7 @@ out:
return err;
}
-static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -2024,6 +2023,11 @@ static int gfx_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ adev->gfx.gfx_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
+ adev->gfx.compute_supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
+
return 0;
}
@@ -6940,6 +6944,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v8_0_ring_emit_rreg,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 20b30f4b3c7d..0148d7ff34d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1648,8 +1648,7 @@ static u32 gfx_v9_0_get_csb_size(struct amdgpu_device *adev)
return count;
}
-static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev,
- volatile u32 *buffer)
+static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer)
{
u32 count = 0;
@@ -2410,7 +2409,7 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]);
adev->gfx.compute_supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]);
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev) && !adev->debug_disable_gpu_ring_reset)
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0);
@@ -2650,6 +2649,9 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
!READ_ONCE(adev->barrier_has_auto_waitcnt));
WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp);
break;
+ case IP_VERSION(9, 4, 2):
+ gfx_v9_4_2_init_sq(adev);
+ break;
default:
break;
}
@@ -4172,19 +4174,17 @@ static int gfx_v9_0_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_0_cp_compute_enable(adev, false);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -7586,6 +7586,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
};
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index c48cd47b531f..8058ea91ecaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -748,6 +748,18 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
}
}
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ if (adev->gfx.mec_fw_version >= 98) {
+ adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN;
+ data = RREG32_SOC15(GC, 0, regSQ_CONFIG1);
+ data = REG_SET_FIELD(data, SQ_CONFIG1, DISABLE_XNACK_CHECK_IN_RETRY_DISABLE, 1);
+ WREG32_SOC15(GC, 0, regSQ_CONFIG1, data);
+ }
+}
+
void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid,
uint32_t last_vmid)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
index 7584624b641c..a603724c1dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -28,6 +28,7 @@ void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
uint32_t first_vmid, uint32_t last_vmid);
void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
uint32_t die_id);
+void gfx_v9_4_2_init_sq(struct amdgpu_device *adev);
void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 51babf5c78c8..cbb74ffc4792 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -1149,14 +1149,16 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
if ((adev->gfx.mec_fw_version >= 155) &&
- !amdgpu_sriov_vf(adev)) {
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
break;
case IP_VERSION(9, 5, 0):
if ((adev->gfx.mec_fw_version >= 21) &&
- !amdgpu_sriov_vf(adev)) {
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset) {
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE;
}
@@ -2152,7 +2154,8 @@ static int gfx_v9_4_3_xcc_kiq_init_queue(struct amdgpu_ring *ring, int xcc_id)
return 0;
}
-static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, bool restore)
+static void gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id,
+ bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
@@ -2186,8 +2189,6 @@ static int gfx_v9_4_3_xcc_kcq_init_queue(struct amdgpu_ring *ring, int xcc_id, b
atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
}
-
- return 0;
}
static int gfx_v9_4_3_xcc_kcq_fini_register(struct amdgpu_device *adev, int xcc_id)
@@ -2220,7 +2221,7 @@ static int gfx_v9_4_3_xcc_kiq_resume(struct amdgpu_device *adev, int xcc_id)
static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
{
struct amdgpu_ring *ring;
- int i, r;
+ int i;
gfx_v9_4_3_xcc_cp_compute_enable(adev, true, xcc_id);
@@ -2228,9 +2229,7 @@ static int gfx_v9_4_3_xcc_kcq_resume(struct amdgpu_device *adev, int xcc_id)
ring = &adev->gfx.compute_ring[i + xcc_id *
adev->gfx.num_compute_rings];
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
- if (r)
- return r;
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, xcc_id, false);
}
return amdgpu_gfx_enable_kcq(adev, xcc_id);
@@ -2292,7 +2291,9 @@ static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
} else {
- if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ if (adev->in_suspend)
+ amdgpu_xcp_restore_partition_mode(adev->xcp_mgr);
+ else if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
AMDGPU_XCP_FL_NONE) ==
AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
r = amdgpu_xcp_switch_partition_mode(
@@ -2461,19 +2462,17 @@ static int gfx_v9_4_3_soft_reset(struct amdgpu_ip_block *ip_block)
/* Disable MEC parsing/prefetching */
gfx_v9_4_3_xcc_cp_compute_enable(adev, false, 0);
- if (grbm_soft_reset) {
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- tmp |= grbm_soft_reset;
- dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ dev_info(adev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- udelay(50);
+ udelay(50);
- tmp &= ~grbm_soft_reset;
- WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
- tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
- }
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, GET_INST(GC, 0), regGRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
@@ -3562,6 +3561,7 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int reset_mode = AMDGPU_RESET_TYPE_PER_QUEUE;
unsigned long flags;
int r;
@@ -3599,17 +3599,15 @@ pipe_reset:
if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE))
return -EOPNOTSUPP;
r = gfx_v9_4_3_reset_hw_pipe(ring);
+ reset_mode = AMDGPU_RESET_TYPE_PER_PIPE;
dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
r ? "failed" : "successfully");
if (r)
return r;
}
- r = gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
- if (r) {
- dev_err(adev->dev, "fail to init kcq\n");
- return r;
- }
+ gfx_v9_4_3_xcc_kcq_init_queue(ring, ring->xcc_id, true);
+
spin_lock_irqsave(&kiq->ring_lock, flags);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size);
if (r) {
@@ -3621,10 +3619,20 @@ pipe_reset:
r = amdgpu_ring_test_ring(kiq_ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
if (r) {
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE)
+ goto pipe_reset;
+
dev_err(adev->dev, "fail to remap queue\n");
return r;
}
+ if (reset_mode == AMDGPU_RESET_TYPE_PER_QUEUE) {
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ goto pipe_reset;
+ }
+
+
return amdgpu_ring_reset_helper_end(ring, timedout_fence);
}
@@ -4788,6 +4796,7 @@ static const struct amdgpu_ring_funcs gfx_v9_4_3_ring_funcs_kiq = {
.emit_wreg = gfx_v9_4_3_ring_emit_wreg,
.emit_reg_wait = gfx_v9_4_3_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_4_3_ring_emit_reg_write_reg_wait,
+ .emit_hdp_flush = gfx_v9_4_3_ring_emit_hdp_flush,
};
static void gfx_v9_4_3_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 7923f491cf73..ce6e04242c52 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -103,8 +103,10 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
uint32_t vmhub_index = entry->client_id == SOC15_IH_CLIENTID_VMC ?
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
- bool retry_fault = !!(entry->src_data[1] & 0x80);
- bool write_fault = !!(entry->src_data[1] & 0x20);
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
struct amdgpu_task_info *task_info;
uint32_t status = 0;
u64 addr;
@@ -466,24 +468,6 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -508,21 +492,39 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -563,7 +565,6 @@ static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
- .map_mtype = gmc_v10_0_map_mtype,
.get_vm_pde = gmc_v10_0_get_vm_pde,
.get_vm_pte = gmc_v10_0_get_vm_pte,
.get_vbios_fb_size = gmc_v10_0_get_vbios_fb_size,
@@ -964,8 +965,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
if (!adev->in_s0ix)
adev->gfxhub.funcs->set_fault_enable_default(adev, value);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index f15d691e9a20..ba59ee8e398a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -103,12 +103,41 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
uint32_t vmhub_index = entry->client_id == SOC21_IH_CLIENTID_VMC ?
AMDGPU_MMHUB0(0) : AMDGPU_GFXHUB(0);
struct amdgpu_vmhub *hub = &adev->vmhub[vmhub_index];
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+ if (retry_fault) {
+ /* Returning 1 here also prevents sending the IV to the KFD */
+
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
+ return 1;
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -430,24 +459,6 @@ static void gmc_v11_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int
* 0 valid
*/
-static uint64_t gmc_v11_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_NV10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -472,21 +483,39 @@ static void gmc_v11_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_NV10(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_NOALLOC;
- *flags |= (mapping->flags & AMDGPU_PTE_NOALLOC);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_LOG;
@@ -527,7 +556,6 @@ static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v11_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v11_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v11_0_emit_pasid_mapping,
- .map_mtype = gmc_v11_0_map_mtype,
.get_vm_pde = gmc_v11_0_get_vm_pde,
.get_vm_pte = gmc_v11_0_get_vm_pte,
.get_vbios_fb_size = gmc_v11_0_get_vbios_fb_size,
@@ -906,8 +934,7 @@ static int gmc_v11_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v11_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index de763105fdfd..7a9d6894e321 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -91,6 +91,10 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
struct amdgpu_vmhub *hub;
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0;
u64 addr;
@@ -102,6 +106,31 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
else
hub = &adev->vmhub[AMDGPU_GFXHUB(0)];
+ if (retry_fault) {
+ /* Returning 1 here also prevents sending the IV to the KFD */
+
+ /* Process it only if it's the first fault for this address */
+ if (entry->ih != &adev->irq.ih_soft &&
+ amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
+ entry->timestamp))
+ return 1;
+
+ /* Delegate it to a different ring if the hardware hasn't
+ * already done it.
+ */
+ if (entry->ih == &adev->irq.ih) {
+ amdgpu_irq_delegate(adev, entry, 8);
+ return 1;
+ }
+
+ /* Try to handle the recoverable page faults by filling page
+ * tables
+ */
+ if (amdgpu_vm_handle_fault(adev, entry->pasid, 0, 0, addr,
+ entry->timestamp, write_fault))
+ return 1;
+ }
+
if (!amdgpu_sriov_vf(adev)) {
/*
* Issue a dummy read to wait for the status register to
@@ -312,9 +341,7 @@ static void gmc_v12_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
return;
}
- mutex_lock(&adev->mman.gtt_window_lock);
gmc_v12_0_flush_vm_hub(adev, vmid, vmhub, 0);
- mutex_unlock(&adev->mman.gtt_window_lock);
return;
}
@@ -336,6 +363,22 @@ static void gmc_v12_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
uint16_t queried;
int vmid, i;
+ if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready &&
+ (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x84) {
+ struct mes_inv_tlbs_pasid_input input = {0};
+ input.pasid = pasid;
+ input.flush_type = flush_type;
+ input.hub_id = AMDGPU_GFXHUB(0);
+ /* MES will invalidate all gc_hub for the device from master */
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ if (all_hub) {
+ /* Only need to invalidate mm_hub now, gfx12 only support one mmhub */
+ input.hub_id = AMDGPU_MMHUB0(0);
+ adev->mes.funcs->invalidate_tlbs_pasid(&adev->mes, &input);
+ }
+ return;
+ }
+
for (vmid = 1; vmid < 16; vmid++) {
bool valid;
@@ -453,20 +496,6 @@ static void gmc_v12_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid
* 0 valid
*/
-static uint64_t gmc_v12_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_GFX12(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -490,18 +519,35 @@ static void gmc_v12_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v12_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags = AMDGPU_PTE_MTYPE_GFX12(*flags, MTYPE_UC);
+ break;
+ }
- *flags &= ~AMDGPU_PTE_MTYPE_GFX12_MASK;
- *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_GFX12_MASK);
+ if (vm_flags & AMDGPU_VM_PAGE_NOALLOC)
+ *flags |= AMDGPU_PTE_NOALLOC;
+ else
+ *flags &= ~AMDGPU_PTE_NOALLOC;
- if (mapping->flags & AMDGPU_PTE_PRT_GFX12) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT_GFX12;
*flags |= AMDGPU_PTE_SNOOPED;
*flags |= AMDGPU_PTE_SYSTEM;
@@ -543,7 +589,6 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v12_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v12_0_emit_pasid_mapping,
- .map_mtype = gmc_v12_0_map_mtype,
.get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
@@ -876,8 +921,7 @@ static int gmc_v12_0_gart_enable(struct amdgpu_device *adev)
/* Flush HDP after it is initialized */
amdgpu_device_flush_hdp(adev, NULL);
- value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
- false : true;
+ value = amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS;
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v12_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB0(0), 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 8030fcd64210..a8ec95f42926 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -213,7 +213,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, mc, base);
- amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
+ amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_LOW);
}
static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
@@ -382,7 +382,9 @@ static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
@@ -608,23 +610,21 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
- u32 status, u32 addr, u32 mc_client)
+ u32 status, u32 addr)
{
u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS);
- char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
- (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID);
- dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
+ dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from %d\n",
protections, vmid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ?
- "write" : "read", block, mc_client, mc_id);
+ "write" : "read", mc_id);
}
static const u32 mc_cg_registers[] = {
@@ -1070,6 +1070,12 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status;
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
@@ -1077,6 +1083,10 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
if (!addr && !status)
return 0;
+ amdgpu_vm_update_fault_cache(adev, entry->pasid,
+ ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT,
+ status, AMDGPU_GFXHUB(0));
+
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST)
gmc_v6_0_set_fault_enable_default(adev, false);
@@ -1087,7 +1097,7 @@ static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev,
addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
- gmc_v6_0_vm_decode_fault(adev, status, addr, 0);
+ gmc_v6_0_vm_decode_fault(adev, status, addr);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index a8d5795084fc..fbd0bf147f50 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -504,7 +504,9 @@ static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
*flags &= ~AMDGPU_PTE_EXECUTABLE;
@@ -1066,7 +1068,7 @@ static int gmc_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1259,6 +1261,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
{
u32 addr, status, mc_client, vmid;
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
@@ -1288,7 +1296,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1304,8 +1312,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index b45fa0cea9d2..6551b60f2584 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -716,11 +716,15 @@ static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level,
}
static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
*flags &= ~AMDGPU_PTE_PRT;
}
@@ -1179,7 +1183,7 @@ static int gmc_v8_0_sw_init(struct amdgpu_ip_block *ip_block)
GFP_KERNEL);
if (!adev->gmc.vm_fault_info)
return -ENOMEM;
- atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 0);
return 0;
}
@@ -1435,6 +1439,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
return 0;
}
+ /* Delegate to the soft IRQ handler ring */
+ if (adev->irq.ih_soft.enabled && entry->ih != &adev->irq.ih_soft) {
+ amdgpu_irq_delegate(adev, entry, 4);
+ return 1;
+ }
+
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
@@ -1474,7 +1484,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
VMID);
if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
- && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ && !atomic_read_acquire(&adev->gmc.vm_fault_info_updated)) {
struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
u32 protections = REG_GET_FIELD(status,
VM_CONTEXT1_PROTECTION_FAULT_STATUS,
@@ -1490,8 +1500,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
info->prot_read = protections & 0x8 ? true : false;
info->prot_write = protections & 0x10 ? true : false;
info->prot_exec = protections & 0x20 ? true : false;
- mb();
- atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ atomic_set_release(&adev->gmc.vm_fault_info_updated, 1);
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index c4d69cf4e06c..8ad7519f7b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -544,8 +544,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- bool retry_fault = !!(entry->src_data[1] & 0x80);
- bool write_fault = !!(entry->src_data[1] & 0x20);
+ bool retry_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_RETRY);
+ bool write_fault = !!(entry->src_data[1] &
+ AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE);
uint32_t status = 0, cid = 0, rw = 0, fed = 0;
struct amdgpu_task_info *task_info;
struct amdgpu_vmhub *hub;
@@ -1073,27 +1075,6 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int v
* 0 valid
*/
-static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
-
-{
- switch (flags) {
- case AMDGPU_VM_MTYPE_DEFAULT:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_NC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- case AMDGPU_VM_MTYPE_WC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_WC);
- case AMDGPU_VM_MTYPE_RW:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_RW);
- case AMDGPU_VM_MTYPE_CC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_CC);
- case AMDGPU_VM_MTYPE_UC:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_UC);
- default:
- return AMDGPU_PTE_MTYPE_VG10(0ULL, MTYPE_NC);
- }
-}
-
static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
uint64_t *addr, uint64_t *flags)
{
@@ -1123,6 +1104,7 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -1236,25 +1218,43 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
}
static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
- struct amdgpu_bo_va_mapping *mapping,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
uint64_t *flags)
{
- struct amdgpu_bo *bo = mapping->bo_va->base.bo;
-
- *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
+ if (vm_flags & AMDGPU_VM_PAGE_EXECUTABLE)
+ *flags |= AMDGPU_PTE_EXECUTABLE;
+ else
+ *flags &= ~AMDGPU_PTE_EXECUTABLE;
- *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
- *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
+ switch (vm_flags & AMDGPU_VM_MTYPE_MASK) {
+ case AMDGPU_VM_MTYPE_DEFAULT:
+ case AMDGPU_VM_MTYPE_NC:
+ default:
+ *flags = AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_NC);
+ break;
+ case AMDGPU_VM_MTYPE_WC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_WC);
+ break;
+ case AMDGPU_VM_MTYPE_RW:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_RW);
+ break;
+ case AMDGPU_VM_MTYPE_CC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_CC);
+ break;
+ case AMDGPU_VM_MTYPE_UC:
+ *flags |= AMDGPU_PTE_MTYPE_VG10(*flags, MTYPE_UC);
+ break;
+ }
- if (mapping->flags & AMDGPU_PTE_PRT) {
+ if (vm_flags & AMDGPU_VM_PAGE_PRT) {
*flags |= AMDGPU_PTE_PRT;
*flags &= ~AMDGPU_PTE_VALID;
}
if ((*flags & AMDGPU_PTE_VALID) && bo)
- gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo,
- flags);
+ gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags);
}
static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
@@ -1391,7 +1391,6 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .map_mtype = gmc_v9_0_map_mtype,
.get_vm_pde = gmc_v9_0_get_vm_pde,
.get_vm_pte = gmc_v9_0_get_vm_pte,
.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
@@ -1837,11 +1836,23 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
{
+ static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
+ u32 vram_info;
+
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
adev->gmc.vram_width = 128 * 64;
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) &&
+ adev->rev_id == 0x3)
+ adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+
+ if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) {
+ vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
+ adev->gmc.vram_vendor = vram_info & 0xF;
+ }
}
static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 1317ede131b6..01cadf898c00 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -157,6 +157,9 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
iceland_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -194,6 +197,9 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -296,6 +302,10 @@ static int iceland_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 5900b560b7de..333e9c30c091 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -587,8 +587,7 @@ static int ih_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, IH_RING_SIZE, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
index 068ed849dbad..95b3f4e55ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c
@@ -562,8 +562,7 @@ static int ih_v6_1_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 40a3530e0453..b32ea4129c61 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -552,8 +552,7 @@ static int ih_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
- use_bus_addr =
- (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) ? false : true;
+ use_bus_addr = adev->firmware.load_type != AMDGPU_FW_LOAD_PSP;
r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, use_bus_addr);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 9e428e669ada..b5bb7f4d607c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -557,7 +557,7 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
.nop = PACKET0(0x81ff, 0),
.support_64bit_ptrs = false,
.no_user_fence = true,
- .extra_dw = 64,
+ .extra_bytes = 256,
.get_rptr = jpeg_v1_0_decode_ring_get_rptr,
.get_wptr = jpeg_v1_0_decode_ring_get_wptr,
.set_wptr = jpeg_v1_0_decode_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 58239c405fda..27c76bd424cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -23,7 +23,6 @@
#include "amdgpu.h"
#include "amdgpu_jpeg.h"
-#include "amdgpu_cs.h"
#include "amdgpu_pm.h"
#include "soc15.h"
#include "soc15d.h"
@@ -806,7 +805,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_0_dec_ring_get_rptr,
.get_wptr = jpeg_v2_0_dec_ring_get_wptr,
.set_wptr = jpeg_v2_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -854,58 +853,3 @@ const struct amdgpu_ip_block_version jpeg_v2_0_ip_block = {
.rev = 0,
.funcs = &jpeg_v2_0_ip_funcs,
};
-
-/**
- * jpeg_v2_dec_ring_parse_cs - command submission parser
- *
- * @parser: Command submission parser context
- * @job: the job to parse
- * @ib: the IB to parse
- *
- * Parse the command stream, return -EINVAL for invalid packet,
- * 0 otherwise
- */
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib)
-{
- u32 i, reg, res, cond, type;
- struct amdgpu_device *adev = parser->adev;
-
- for (i = 0; i < ib->length_dw ; i += 2) {
- reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
- res = CP_PACKETJ_GET_RES(ib->ptr[i]);
- cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
- type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
-
- if (res) /* only support 0 at the moment */
- return -EINVAL;
-
- switch (type) {
- case PACKETJ_TYPE0:
- if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE3:
- if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START ||
- reg > JPEG_REG_RANGE_END) {
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- }
- break;
- case PACKETJ_TYPE6:
- if (ib->ptr[i] == CP_PACKETJ_NOP)
- continue;
- dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
- return -EINVAL;
- default:
- dev_err(adev->dev, "Unknown packet type %d !\n", type);
- return -EINVAL;
- }
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 63fadda7a673..654e43e83e2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -45,9 +45,6 @@
#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-#define JPEG_REG_RANGE_START 0x4000
-#define JPEG_REG_RANGE_END 0x41c2
-
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
@@ -60,9 +57,6 @@ void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr);
void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
-int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser,
- struct amdgpu_job *job,
- struct amdgpu_ib *ib);
extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 3e2c389242db..20983f126b49 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -696,7 +696,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -727,7 +727,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.get_rptr = jpeg_v2_5_dec_ring_get_rptr,
.get_wptr = jpeg_v2_5_dec_ring_get_wptr,
.set_wptr = jpeg_v2_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index a44eb2667664..d1a011c40ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -597,7 +597,7 @@ static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
index da3ee69f1a3b..33db2c1ae6cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
@@ -762,7 +762,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
index b86288a69e7b..aae7328973d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
@@ -444,7 +444,7 @@ static int jpeg_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v4_0_3_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -1177,7 +1177,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_3_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_3_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_3_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_3_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
index 481d1a2dbe5a..54fd9c800c40 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
@@ -686,7 +686,7 @@ static int jpeg_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
@@ -807,7 +807,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
index e0a71909252b..46bf15dce2bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
@@ -584,7 +584,7 @@ static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
if (enable) {
if (!jpeg_v5_0_0_is_idle(ip_block))
@@ -683,7 +683,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
- .parse_cs = jpeg_v2_dec_ring_parse_cs,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
index 54523dc1f702..ab0bf880d3d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c
@@ -196,6 +196,14 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
}
}
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG)) {
+ r = amdgpu_jpeg_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize jpeg ras block!\n");
+ return r;
+ }
+ }
+
r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0_1, ARRAY_SIZE(jpeg_reg_list_5_0_1));
if (r)
return r;
@@ -307,7 +315,7 @@ static int jpeg_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
ret = jpeg_v5_0_1_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
return ret;
@@ -689,7 +697,7 @@ static int jpeg_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
@@ -870,6 +878,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_1_dec_ring_vm_funcs = {
.get_rptr = jpeg_v5_0_1_dec_ring_get_rptr,
.get_wptr = jpeg_v5_0_1_dec_ring_get_wptr,
.set_wptr = jpeg_v5_0_1_dec_ring_set_wptr,
+ .parse_cs = amdgpu_jpeg_dec_parse_cs,
.emit_frame_size =
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
@@ -1016,8 +1025,9 @@ static int jpeg_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_ban
/* reference to smu driver if header file */
static int jpeg_v5_0_1_err_codes[] = {
- 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-7][S|D] */
- 24, 25, 26, 27, 28, 29, 30, 31
+ 16, 17, 18, 19, 20, 21, 22, 23, /* JPEG[0-9][S|D] */
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 48, 49, 50, 51,
};
static bool jpeg_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1058,6 +1068,11 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
if (r)
return r;
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
+ &jpeg_v5_0_1_aca_info, NULL);
+ if (r)
+ goto late_fini;
+
if (amdgpu_ras_is_supported(adev, ras_block->block) &&
adev->jpeg.inst->ras_poison_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->jpeg.inst->ras_poison_irq, 0);
@@ -1065,11 +1080,6 @@ static int jpeg_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_comm
goto late_fini;
}
- r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__JPEG,
- &jpeg_v5_0_1_aca_info, NULL);
- if (r)
- goto late_fini;
-
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
index d6f50b13e2ba..64cae89357b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
@@ -21,6 +21,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_gfx.h"
#include "mes_userqueue.h"
@@ -198,6 +199,58 @@ static int mes_userq_create_ctx_space(struct amdgpu_userq_mgr *uq_mgr,
return 0;
}
+static int mes_userq_detect_and_reset(struct amdgpu_device *adev,
+ int queue_type)
+{
+ int db_array_size = amdgpu_mes_get_hung_queue_db_array_size(adev);
+ struct mes_detect_and_reset_queue_input input;
+ struct amdgpu_usermode_queue *queue;
+ unsigned int hung_db_num = 0;
+ unsigned long queue_id;
+ u32 db_array[8];
+ bool found_hung_queue = false;
+ int r, i;
+
+ if (db_array_size > 8) {
+ dev_err(adev->dev, "DB array size (%d vs 8) too small\n",
+ db_array_size);
+ return -EINVAL;
+ }
+
+ memset(&input, 0x0, sizeof(struct mes_detect_and_reset_queue_input));
+
+ input.queue_type = queue_type;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = amdgpu_mes_detect_and_reset_hung_queues(adev, queue_type, false,
+ &hung_db_num, db_array);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ dev_err(adev->dev, "Failed to detect and reset queues, err (%d)\n", r);
+ } else if (hung_db_num) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ if (queue->queue_type == queue_type) {
+ for (i = 0; i < hung_db_num; i++) {
+ if (queue->doorbell_index == db_array[i]) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ found_hung_queue = true;
+ atomic_inc(&adev->gpu_reset_counter);
+ amdgpu_userq_fence_driver_force_completion(queue);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, NULL);
+ }
+ }
+ }
+ }
+ }
+
+ if (found_hung_queue) {
+ /* Resume scheduling after hang recovery */
+ r = amdgpu_mes_resume(adev);
+ }
+
+ return r;
+}
+
static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
struct drm_amdgpu_userq_in *args_in,
struct amdgpu_usermode_queue *queue)
@@ -215,13 +268,6 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
return -ENOMEM;
}
- if (!mqd_user->wptr_va || !mqd_user->rptr_va ||
- !mqd_user->queue_va || mqd_user->queue_size == 0) {
- DRM_ERROR("Invalid MQD parameters for userqueue\n");
- r = -EINVAL;
- goto free_props;
- }
-
r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size);
if (r) {
DRM_ERROR("Failed to create MQD object for userqueue\n");
@@ -254,6 +300,11 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
goto free_mqd;
}
+ r = amdgpu_userq_input_va_validate(queue, compute_mqd->eop_va,
+ 2048);
+ if (r)
+ goto free_mqd;
+
userq_props->eop_gpu_addr = compute_mqd->eop_va;
userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL;
userq_props->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM;
@@ -263,6 +314,14 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
kfree(compute_mqd);
} else if (queue->queue_type == AMDGPU_HW_IP_GFX) {
struct drm_amdgpu_userq_mqd_gfx11 *mqd_gfx_v11;
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true);
+ } else {
+ r = -EINVAL;
+ goto free_mqd;
+ }
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid GFX MQD\n");
@@ -281,6 +340,16 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
userq_props->csa_addr = mqd_gfx_v11->csa_va;
userq_props->tmz_queue =
mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE;
+
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->shadow_va,
+ shadow_info.shadow_size);
+ if (r)
+ goto free_mqd;
+ r = amdgpu_userq_input_va_validate(queue, mqd_gfx_v11->csa_va,
+ shadow_info.csa_size);
+ if (r)
+ goto free_mqd;
+
kfree(mqd_gfx_v11);
} else if (queue->queue_type == AMDGPU_HW_IP_DMA) {
struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11;
@@ -297,6 +366,10 @@ static int mes_userq_mqd_create(struct amdgpu_userq_mgr *uq_mgr,
r = -ENOMEM;
goto free_mqd;
}
+ r = amdgpu_userq_input_va_validate(queue, mqd_sdma_v11->csa_va,
+ 32);
+ if (r)
+ goto free_mqd;
userq_props->csa_addr = mqd_sdma_v11->csa_va;
kfree(mqd_sdma_v11);
@@ -347,9 +420,82 @@ mes_userq_mqd_destroy(struct amdgpu_userq_mgr *uq_mgr,
amdgpu_userq_destroy_object(uq_mgr, &queue->mqd);
}
+static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_suspend_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ signed long timeout = 2100000; /* 2100 ms */
+ u64 fence_gpu_addr;
+ u32 fence_offset;
+ u64 *fence_ptr;
+ int i, r;
+
+ if (queue->state != AMDGPU_USERQ_STATE_MAPPED)
+ return 0;
+ r = amdgpu_device_wb_get(adev, &fence_offset);
+ if (r)
+ return r;
+
+ fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4);
+ fence_ptr = (u64 *)&adev->wb.wb[fence_offset];
+ *fence_ptr = 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+ queue_input.suspend_fence_addr = fence_gpu_addr;
+ queue_input.suspend_fence_value = 1;
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r) {
+ DRM_ERROR("Failed to suspend gang: %d\n", r);
+ goto out;
+ }
+
+ for (i = 0; i < timeout; i++) {
+ if (*fence_ptr == 1)
+ goto out;
+ udelay(1);
+ }
+ r = -ETIMEDOUT;
+
+out:
+ amdgpu_device_wb_free(adev, fence_offset);
+ return r;
+}
+
+static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct mes_resume_gang_input queue_input;
+ struct amdgpu_userq_obj *ctx = &queue->fw_obj;
+ int r;
+
+ if (queue->state == AMDGPU_USERQ_STATE_HUNG)
+ return -EINVAL;
+ if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED)
+ return 0;
+
+ memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input));
+ queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r);
+ return r;
+}
+
const struct amdgpu_userq_funcs userq_mes_funcs = {
.mqd_create = mes_userq_mqd_create,
.mqd_destroy = mes_userq_mqd_destroy,
.unmap = mes_userq_unmap,
.map = mes_userq_map,
+ .detect_and_reset = mes_userq_detect_and_reset,
+ .preempt = mes_userq_preempt,
+ .restore = mes_userq_restore,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 3f6a828cad8a..3a52754b5cad 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -66,6 +66,9 @@ static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev);
#define GFX_MES_DRAM_SIZE 0x80000
#define MES11_HW_RESOURCE_1_SIZE (128 * AMDGPU_GPU_PAGE_SIZE)
+#define MES11_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset, [4:7] = hqd info */
+#define MES11_HUNG_HQD_INFO_OFFSET 4
+
static void mes_v11_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -366,6 +369,7 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -376,6 +380,9 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x60)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
@@ -711,6 +718,12 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
mes_set_hw_res_pkt.oversubscription_timer = 50;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(mes->adev->dev,
+ "MES FW version must be >= 0x7f to enable LR compute workaround.\n");
+
if (amdgpu_mes_log_enable) {
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
@@ -784,6 +797,32 @@ static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v11_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v11_0_submit_pkt_and_poll_completion(mes,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.add_hw_queue = mes_v11_0_add_hw_queue,
.remove_hw_queue = mes_v11_0_remove_hw_queue,
@@ -793,6 +832,7 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.resume_gang = mes_v11_0_resume_gang,
.misc_op = mes_v11_0_misc_op,
.reset_hw_queue = mes_v11_0_reset_hw_queue,
+ .detect_and_reset_hung_queues = mes_v11_0_detect_and_reset_hung_queues,
};
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1685,6 +1725,9 @@ static int mes_v11_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size = MES11_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_hqd_info_offset = MES11_HUNG_HQD_INFO_OFFSET;
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 6b222630f3fa..744e95d3984a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -47,6 +47,9 @@ static int mes_v12_0_kiq_hw_fini(struct amdgpu_device *adev);
#define MES_EOP_SIZE 2048
+#define MES12_HUNG_DB_OFFSET_ARRAY_SIZE 8 /* [0:3] = db offset [4:7] hqd info */
+#define MES12_HUNG_HQD_INFO_OFFSET 4
+
static void mes_v12_0_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -108,6 +111,7 @@ static const char *mes_v12_0_opcodes[] = {
"SET_SE_MODE",
"SET_GANG_SUBMIT",
"SET_HW_RSRC_1",
+ "INVALIDATE_TLBS",
};
static const char *mes_v12_0_misc_opcodes[] = {
@@ -225,7 +229,12 @@ static int mes_v12_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
pipe, x_pkt->header.opcode);
r = amdgpu_fence_wait_polling(ring, seq, timeout);
- if (r < 1 || !*status_ptr) {
+
+ /*
+ * status_ptr[31:0] == 0 (fail) or status_ptr[63:0] == 1 (success).
+ * If status_ptr[31:0] == 0 then status_ptr[63:32] will have debug error information.
+ */
+ if (r < 1 || !(lower_32_bits(*status_ptr))) {
if (misc_op_str)
dev_err(adev->dev, "MES(%d) failed to respond to msg=%s (%s)\n",
@@ -352,6 +361,7 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
struct mes_remove_queue_input *input)
{
union MESAPI__REMOVE_QUEUE mes_remove_queue_pkt;
+ uint32_t mes_rev = mes->sched_version & AMDGPU_MES_VERSION_MASK;
memset(&mes_remove_queue_pkt, 0, sizeof(mes_remove_queue_pkt));
@@ -362,6 +372,9 @@ static int mes_v12_0_remove_hw_queue(struct amdgpu_mes *mes,
mes_remove_queue_pkt.doorbell_offset = input->doorbell_offset;
mes_remove_queue_pkt.gang_context_addr = input->gang_context_addr;
+ if (mes_rev >= 0x5a)
+ mes_remove_queue_pkt.remove_queue_after_reset = input->remove_queue_after_reset;
+
return mes_v12_0_submit_pkt_and_poll_completion(mes,
AMDGPU_MES_SCHED_PIPE,
&mes_remove_queue_pkt, sizeof(mes_remove_queue_pkt),
@@ -567,13 +580,41 @@ static int mes_v12_0_unmap_legacy_queue(struct amdgpu_mes *mes,
static int mes_v12_0_suspend_gang(struct amdgpu_mes *mes,
struct mes_suspend_gang_input *input)
{
- return 0;
+ union MESAPI__SUSPEND mes_suspend_gang_pkt;
+
+ memset(&mes_suspend_gang_pkt, 0, sizeof(mes_suspend_gang_pkt));
+
+ mes_suspend_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_suspend_gang_pkt.header.opcode = MES_SCH_API_SUSPEND;
+ mes_suspend_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_suspend_gang_pkt.suspend_all_gangs = input->suspend_all_gangs;
+ mes_suspend_gang_pkt.gang_context_addr = input->gang_context_addr;
+ mes_suspend_gang_pkt.suspend_fence_addr = input->suspend_fence_addr;
+ mes_suspend_gang_pkt.suspend_fence_value = input->suspend_fence_value;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_suspend_gang_pkt, sizeof(mes_suspend_gang_pkt),
+ offsetof(union MESAPI__SUSPEND, api_status));
}
static int mes_v12_0_resume_gang(struct amdgpu_mes *mes,
struct mes_resume_gang_input *input)
{
- return 0;
+ union MESAPI__RESUME mes_resume_gang_pkt;
+
+ memset(&mes_resume_gang_pkt, 0, sizeof(mes_resume_gang_pkt));
+
+ mes_resume_gang_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_resume_gang_pkt.header.opcode = MES_SCH_API_RESUME;
+ mes_resume_gang_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_resume_gang_pkt.resume_all_gangs = input->resume_all_gangs;
+ mes_resume_gang_pkt.gang_context_addr = input->gang_context_addr;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_resume_gang_pkt, sizeof(mes_resume_gang_pkt),
+ offsetof(union MESAPI__RESUME, api_status));
}
static int mes_v12_0_query_sched_status(struct amdgpu_mes *mes, int pipe)
@@ -738,6 +779,11 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
mes_set_hw_res_pkt.use_different_vmid_compute = 1;
mes_set_hw_res_pkt.enable_reg_active_poll = 1;
mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
+ if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82)
+ mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+ else
+ dev_info_once(adev->dev,
+ "MES FW version must be >= 0x82 to enable LR compute workaround.\n");
/*
* Keep oversubscribe timer for sdma . When we have unmapped doorbell
@@ -879,6 +925,74 @@ static int mes_v12_0_reset_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__RESET, api_status));
}
+static int mes_v12_0_detect_and_reset_hung_queues(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input)
+{
+ union MESAPI__RESET mes_reset_queue_pkt;
+
+ memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
+
+ mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
+ mes_reset_queue_pkt.header.opcode = MES_SCH_API_RESET;
+ mes_reset_queue_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_reset_queue_pkt.queue_type =
+ convert_to_mes_queue_type(input->queue_type);
+ mes_reset_queue_pkt.doorbell_offset_addr =
+ mes->hung_queue_db_array_gpu_addr;
+
+ if (input->detect_only)
+ mes_reset_queue_pkt.hang_detect_only = 1;
+ else
+ mes_reset_queue_pkt.hang_detect_then_reset = 1;
+
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_SCHED_PIPE,
+ &mes_reset_queue_pkt, sizeof(mes_reset_queue_pkt),
+ offsetof(union MESAPI__RESET, api_status));
+}
+
+static int mes_v12_inv_tlb_convert_hub_id(uint8_t id)
+{
+ /*
+ * MES doesn't support invalidate gc_hub on slave xcc individually
+ * master xcc will invalidate all gc_hub for the partition
+ */
+ if (AMDGPU_IS_GFXHUB(id))
+ return 0;
+ else if (AMDGPU_IS_MMHUB0(id))
+ return 1;
+ else
+ return -EINVAL;
+
+}
+
+static int mes_v12_0_inv_tlbs_pasid(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input)
+{
+ union MESAPI__INV_TLBS mes_inv_tlbs;
+ int ret;
+
+ memset(&mes_inv_tlbs, 0, sizeof(mes_inv_tlbs));
+
+ mes_inv_tlbs.header.type = MES_API_TYPE_SCHEDULER;
+ mes_inv_tlbs.header.opcode = MES_SCH_API_INV_TLBS;
+ mes_inv_tlbs.header.dwsize = API_FRAME_SIZE_IN_DWORDS;
+
+ mes_inv_tlbs.invalidate_tlbs.inv_sel = 0;
+ mes_inv_tlbs.invalidate_tlbs.flush_type = input->flush_type;
+ mes_inv_tlbs.invalidate_tlbs.inv_sel_id = input->pasid;
+
+ /*convert amdgpu_mes_hub_id to mes expected hub_id */
+ ret = mes_v12_inv_tlb_convert_hub_id(input->hub_id);
+ if (ret < 0)
+ return -EINVAL;
+ mes_inv_tlbs.invalidate_tlbs.hub_id = ret;
+ return mes_v12_0_submit_pkt_and_poll_completion(mes, AMDGPU_MES_KIQ_PIPE,
+ &mes_inv_tlbs, sizeof(mes_inv_tlbs),
+ offsetof(union MESAPI__INV_TLBS, api_status));
+
+}
+
static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.add_hw_queue = mes_v12_0_add_hw_queue,
.remove_hw_queue = mes_v12_0_remove_hw_queue,
@@ -888,6 +1002,8 @@ static const struct amdgpu_mes_funcs mes_v12_0_funcs = {
.resume_gang = mes_v12_0_resume_gang,
.misc_op = mes_v12_0_misc_op,
.reset_hw_queue = mes_v12_0_reset_hw_queue,
+ .invalidate_tlbs_pasid = mes_v12_0_inv_tlbs_pasid,
+ .detect_and_reset_hung_queues = mes_v12_0_detect_and_reset_hung_queues,
};
static int mes_v12_0_allocate_ucode_buffer(struct amdgpu_device *adev,
@@ -1793,6 +1909,9 @@ static int mes_v12_0_early_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
int pipe, r;
+ adev->mes.hung_queue_db_array_size = MES12_HUNG_DB_OFFSET_ARRAY_SIZE;
+ adev->mes.hung_queue_hqd_info_offset = MES12_HUNG_HQD_INFO_OFFSET;
+
for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
r = amdgpu_mes_init_microcode(adev, pipe);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 48101a34e049..9a40107a0869 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -292,14 +292,32 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_ai_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_ai_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_ai_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_ai_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -327,10 +345,15 @@ static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_ai_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_ai_mailbox_send_ack(adev);
@@ -415,7 +438,8 @@ int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_ai_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_ai_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_ai_mailbox_handle_bad_pages_work);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index f6d8597452ed..e7cd07383d56 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -173,13 +173,17 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
static int xgpu_nv_send_access_requests_with_param(struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
- int r, retry = 1;
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = 0, retry = 1;
enum idh_event event = -1;
+ mutex_lock(&virt->access_req_mutex);
send_request:
- if (amdgpu_ras_is_rma(adev))
- return -ENODEV;
+ if (amdgpu_ras_is_rma(adev)) {
+ r = -ENODEV;
+ goto out;
+ }
xgpu_nv_mailbox_trans_msg(adev, req, data1, data2, data3);
@@ -202,8 +206,8 @@ send_request:
case IDH_REQ_RAS_CPER_DUMP:
event = IDH_RAS_CPER_DUMP_READY;
break;
- case IDH_REQ_RAS_BAD_PAGES:
- event = IDH_RAS_BAD_PAGES_READY;
+ case IDH_REQ_RAS_CHK_CRITI:
+ event = IDH_REQ_RAS_CHK_CRITI_READY;
break;
default:
break;
@@ -217,17 +221,25 @@ send_request:
if (req != IDH_REQ_GPU_INIT_DATA) {
dev_err(adev->dev, "Doesn't get msg:%d from pf, error=%d\n", event, r);
- return r;
+ goto out;
} else /* host doesn't support REQ_GPU_INIT_DATA handshake */
adev->virt.req_init_data_ver = 0;
} else {
if (req == IDH_REQ_GPU_INIT_DATA) {
- adev->virt.req_init_data_ver =
- RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
-
- /* assume V1 in case host doesn't set version number */
- if (adev->virt.req_init_data_ver < 1)
- adev->virt.req_init_data_ver = 1;
+ switch (RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1)) {
+ case GPU_CRIT_REGION_V2:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V2;
+ adev->virt.init_data_header.offset =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
+ adev->virt.init_data_header.size_kb =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW3);
+ break;
+ default:
+ adev->virt.req_init_data_ver = GPU_CRIT_REGION_V1;
+ adev->virt.init_data_header.offset = -1;
+ adev->virt.init_data_header.size_kb = 0;
+ break;
+ }
}
}
@@ -238,7 +250,10 @@ send_request:
}
}
- return 0;
+out:
+ mutex_unlock(&virt->access_req_mutex);
+
+ return r;
}
static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
@@ -285,7 +300,8 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
{
- return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+ return xgpu_nv_send_access_requests_with_param(adev, IDH_REQ_GPU_INIT_DATA,
+ 0, GPU_CRIT_REGION_V2, 0);
}
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
@@ -359,14 +375,32 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
}
}
-static void xgpu_nv_mailbox_bad_pages_work(struct work_struct *work)
+static void xgpu_nv_mailbox_req_bad_pages_work(struct work_struct *work)
{
- struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, bad_pages_work);
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, req_bad_pages_work);
struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
if (down_read_trylock(&adev->reset_domain->sem)) {
amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_bad_pages(adev);
+ up_read(&adev->reset_domain->sem);
+ }
+}
+
+/**
+ * xgpu_nv_mailbox_handle_bad_pages_work - Reinitialize the data exchange region to get fresh bad page information
+ * @work: pointer to the work_struct
+ *
+ * This work handler is triggered when bad pages are ready, and it reinitializes
+ * the data exchange region to retrieve updated bad page information from the host.
+ */
+static void xgpu_nv_mailbox_handle_bad_pages_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, handle_bad_pages_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_init_data_exchange(adev);
up_read(&adev->reset_domain->sem);
}
@@ -397,10 +431,15 @@ static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
switch (event) {
+ case IDH_RAS_BAD_PAGES_READY:
+ xgpu_nv_mailbox_send_ack(adev);
+ if (amdgpu_sriov_runtime(adev))
+ schedule_work(&adev->virt.handle_bad_pages_work);
+ break;
case IDH_RAS_BAD_PAGES_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
if (amdgpu_sriov_runtime(adev))
- schedule_work(&adev->virt.bad_pages_work);
+ schedule_work(&adev->virt.req_bad_pages_work);
break;
case IDH_UNRECOV_ERR_NOTIFICATION:
xgpu_nv_mailbox_send_ack(adev);
@@ -485,7 +524,8 @@ int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
}
INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
- INIT_WORK(&adev->virt.bad_pages_work, xgpu_nv_mailbox_bad_pages_work);
+ INIT_WORK(&adev->virt.req_bad_pages_work, xgpu_nv_mailbox_req_bad_pages_work);
+ INIT_WORK(&adev->virt.handle_bad_pages_work, xgpu_nv_mailbox_handle_bad_pages_work);
return 0;
}
@@ -535,6 +575,16 @@ static int xgpu_nv_req_ras_bad_pages(struct amdgpu_device *adev)
return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES);
}
+static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr)
+{
+ uint32_t addr_hi, addr_lo;
+
+ addr_hi = (uint32_t)(addr >> 32);
+ addr_lo = (uint32_t)(addr & 0xFFFFFFFF);
+ return xgpu_nv_send_access_requests_with_param(
+ adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -548,4 +598,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_ras_err_count = xgpu_nv_req_ras_err_count,
.req_ras_cper_dump = xgpu_nv_req_ras_cper_dump,
.req_bad_pages = xgpu_nv_req_ras_bad_pages,
+ .req_ras_chk_criti = xgpu_nv_check_vf_critical_region
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 5808689562cc..c1083e5e41e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -43,6 +43,7 @@ enum idh_request {
IDH_REQ_RAS_ERROR_COUNT = 203,
IDH_REQ_RAS_CPER_DUMP = 204,
IDH_REQ_RAS_BAD_PAGES = 205,
+ IDH_REQ_RAS_CHK_CRITI = 206
};
enum idh_event {
@@ -62,6 +63,7 @@ enum idh_event {
IDH_RAS_BAD_PAGES_READY = 15,
IDH_RAS_BAD_PAGES_NOTIFICATION = 16,
IDH_UNRECOV_ERR_NOTIFICATION = 17,
+ IDH_REQ_RAS_CHK_CRITI_READY = 18,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index d5002ff931d8..860bc5cb03c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -151,9 +151,9 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
-+ * BIF_SDMA4_DOORBELL_RANGE:
-+ * ARCTURUS: 0x3be0
-+ * ALDEBARAN: 0x3be4
+ * BIF_SDMA4_DOORBELL_RANGE:
+ * ARCTURUS: 0x3be0
+ * ALDEBARAN: 0x3be4
*/
if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
reg = instance + 0x4 + 0x1 +
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
index 1c22bc11c1f8..bdfd2917e3ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c
@@ -41,19 +41,21 @@ static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev)
static u32 nbio_v7_9_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp;
-
- tmp = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
- /* If it is VF or subrevision holds a non-zero value, that should be used */
- if (tmp || amdgpu_sriov_vf(adev))
- return tmp;
+ u32 rev_id;
- /* If discovery subrev is not updated, use register version */
- tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
- tmp = REG_GET_FIELD(tmp, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
- STRAP_ATI_REV_ID_DEV0_F0);
+ /*
+ * fetch the sub-revision field from the IP-discovery table
+ * (returns zero if the table entry is not populated).
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ rev_id = IP_VERSION_SUBREV(amdgpu_ip_version_full(adev, NBIO_HWIP, 0));
+ } else {
+ rev_id = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0);
+ rev_id = REG_GET_FIELD(rev_id, RCC_STRAP0_RCC_DEV0_EPF0_STRAP0,
+ STRAP_ATI_REV_ID_DEV0_F0);
+ }
- return tmp;
+ return rev_id;
}
static void nbio_v7_9_mc_access_enable(struct amdgpu_device *adev, bool enable)
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 83e9782aef39..8f4817404f10 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -31,5 +31,6 @@ extern const struct amdgpu_ip_block_version nv_common_ip_block;
void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 64b240b51f1a..a9be7a505026 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -142,13 +142,37 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
return err;
}
-static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+static int psp_v11_wait_for_tos_unload(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg1, sol_reg2;
+ int retry_loop;
+ /* Wait for the TOS to be unloaded */
+ for (retry_loop = 0; retry_loop < 20; retry_loop++) {
+ sol_reg1 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ usleep_range(1000, 2000);
+ sol_reg2 = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+ if (sol_reg1 == sol_reg2)
+ return 0;
+ }
+ dev_err(adev->dev, "TOS unload failed, C2PMSG_33: %x C2PMSG_81: %x",
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_33),
+ RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81));
+
+ return -ETIME;
+}
+
+static int psp_v11_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
int ret;
int retry_loop;
+ /* For a reset done at the end of S3, only wait for TOS to be unloaded */
+ if (adev->in_s3 && !(adev->flags & AMD_IS_APU) && amdgpu_in_reset(adev))
+ return psp_v11_wait_for_tos_unload(psp);
+
for (retry_loop = 0; retry_loop < 20; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 36b1ca73c2ed..a1443990d5c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -2361,11 +2361,15 @@ static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(9, 4, 3):
case IP_VERSION(9, 4, 4):
- if ((adev->gfx.mec_fw_version >= 0xb0) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ if ((adev->gfx.mec_fw_version >= 0xb0) &&
+ amdgpu_dpm_reset_sdma_is_supported(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(9, 5, 0):
- if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev))
+ if ((adev->gfx.mec_fw_version >= 0xf) &&
+ amdgpu_dpm_reset_sdma_is_supported(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 7dc67a22a7a0..8ddc4df06a1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -1429,7 +1429,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 0, 2):
case IP_VERSION(5, 0, 5):
if ((adev->sdma.instance[0].fw_version >= 35) &&
- !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 3bd44c24f692..51101b0aa2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -342,7 +342,7 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
if (ring->me > 1) {
- amdgpu_asic_flush_hdp(adev, ring);
+ amdgpu_hdp_flush(adev, ring);
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
@@ -1348,12 +1348,14 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(5, 2, 3):
case IP_VERSION(5, 2, 4):
if ((adev->sdma.instance[0].fw_version >= 76) &&
- !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
case IP_VERSION(5, 2, 5):
if ((adev->sdma.instance[0].fw_version >= 34) &&
- !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index db6e41967f12..217040044987 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -1356,7 +1356,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
case IP_VERSION(6, 0, 2):
case IP_VERSION(6, 0, 3):
if ((adev->sdma.instance[0].fw_version >= 21) &&
- !amdgpu_sriov_vf(adev))
+ !amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
break;
default:
@@ -1389,7 +1390,7 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 0, 3):
- if ((adev->sdma.instance[0].fw_version >= 27) && !adev->sdma.disable_uq)
+ if (adev->sdma.instance[0].fw_version >= 29 && !adev->sdma.disable_uq)
adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs;
break;
case IP_VERSION(6, 1, 0):
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 326ecc8d37d2..2b81344dcd66 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1337,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->sdma.supported_reset =
amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
- if (!amdgpu_sriov_vf(adev))
+ if (!amdgpu_sriov_vf(adev) &&
+ !adev->debug_disable_gpu_ring_reset)
adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
r = amdgpu_sdma_sysfs_reset_mask_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index e0f139de7991..f7288372ee61 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -45,6 +45,7 @@
#include "dce_v6_0.h"
#include "si.h"
#include "uvd_v3_1.h"
+#include "vce_v1_0.h"
#include "uvd/uvd_4_0_d.h"
@@ -921,8 +922,6 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x3630, 0xfffffff0, 0x00000100,
};
-/* XXX: update when we support VCE */
-#if 0
/* tahiti, pitcairn, verde */
static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
{
@@ -940,13 +939,7 @@ static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
.codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array),
.codec_array = tahiti_video_codecs_encode_array,
};
-#else
-static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
-{
- .codec_count = 0,
- .codec_array = NULL,
-};
-#endif
+
/* oland and hainan don't support encode */
static const struct amdgpu_video_codecs hainan_video_codecs_encode =
{
@@ -1925,6 +1918,14 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
~VCEPLL_BYPASS_EN_MASK);
if (!evclk || !ecclk) {
+ /*
+ * On some chips, the PLL takes way too long to get out of
+ * sleep mode, causing a timeout waiting on CTLACK/CTLACK2.
+ * Leave the PLL running in bypass mode.
+ */
+ if (adev->pdev->device == 0x6780)
+ return 0;
+
/* Keep the Bypass mode, put PLL to sleep */
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, VCEPLL_SLEEP_MASK,
~VCEPLL_SLEEP_MASK);
@@ -2717,7 +2718,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
else
amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
- /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
+ amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block);
break;
case CHIP_OLAND:
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
@@ -2735,7 +2736,6 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
else
amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block);
- /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */
break;
case CHIP_HAINAN:
amdgpu_device_ip_block_add(adev, &si_common_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 1df00f8a2406..66f650f87243 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -96,6 +96,9 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
pci_set_master(adev->pdev);
si_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -112,6 +115,9 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
@@ -127,6 +133,8 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
}
+
+out:
return (wptr & ih->ptr_mask);
}
@@ -175,6 +183,10 @@ static int si_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
return amdgpu_irq_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sid.h b/drivers/gpu/drm/amd/amdgpu/sid.h
index cbd4f8951cfa..561462a8332e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sid.h
+++ b/drivers/gpu/drm/amd/amdgpu/sid.h
@@ -582,45 +582,6 @@
#define DMA_PACKET_NOP 0xf
/* VCE */
-#define VCE_STATUS 0x20004
-#define VCE_VCPU_CNTL 0x20014
-#define VCE_CLK_EN (1 << 0)
-#define VCE_VCPU_CACHE_OFFSET0 0x20024
-#define VCE_VCPU_CACHE_SIZE0 0x20028
-#define VCE_VCPU_CACHE_OFFSET1 0x2002c
-#define VCE_VCPU_CACHE_SIZE1 0x20030
-#define VCE_VCPU_CACHE_OFFSET2 0x20034
-#define VCE_VCPU_CACHE_SIZE2 0x20038
-#define VCE_SOFT_RESET 0x20120
-#define VCE_ECPU_SOFT_RESET (1 << 0)
-#define VCE_FME_SOFT_RESET (1 << 2)
-#define VCE_RB_BASE_LO2 0x2016c
-#define VCE_RB_BASE_HI2 0x20170
-#define VCE_RB_SIZE2 0x20174
-#define VCE_RB_RPTR2 0x20178
-#define VCE_RB_WPTR2 0x2017c
-#define VCE_RB_BASE_LO 0x20180
-#define VCE_RB_BASE_HI 0x20184
-#define VCE_RB_SIZE 0x20188
-#define VCE_RB_RPTR 0x2018c
-#define VCE_RB_WPTR 0x20190
-#define VCE_CLOCK_GATING_A 0x202f8
-#define VCE_CLOCK_GATING_B 0x202fc
-#define VCE_UENC_CLOCK_GATING 0x205bc
-#define VCE_UENC_REG_CLOCK_GATING 0x205c0
-#define VCE_FW_REG_STATUS 0x20e10
-# define VCE_FW_REG_STATUS_BUSY (1 << 0)
-# define VCE_FW_REG_STATUS_PASS (1 << 3)
-# define VCE_FW_REG_STATUS_DONE (1 << 11)
-#define VCE_LMI_FW_START_KEYSEL 0x20e18
-#define VCE_LMI_FW_PERIODIC_CTRL 0x20e20
-#define VCE_LMI_CTRL2 0x20e74
-#define VCE_LMI_CTRL 0x20e98
-#define VCE_LMI_VM_CTRL 0x20ea0
-#define VCE_LMI_SWAP_CNTL 0x20eb4
-#define VCE_LMI_SWAP_CNTL1 0x20eb8
-#define VCE_LMI_CACHE_CTRL 0x20ef4
-
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
#define VCE_CMD_IB 0x00000002
@@ -629,7 +590,6 @@
#define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006
-
//#dce stupp
/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
#define CRTC0_REGISTER_OFFSET (0x1b7c - 0x1b7c) //(0x6df0 - 0x6df0)/4
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index dd2d66090d23..68aef47254a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -743,7 +743,7 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res)
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
@@ -752,9 +752,6 @@ int smu_v11_0_i2c_control_init(struct amdgpu_device *adev)
void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev)
{
- struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus;
-
- i2c_del_adapter(control);
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 9e74c9822e62..42f5d9c0e3af 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -741,7 +741,6 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
void soc15_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_ai_virt_ops;
-
/* init soc15 reg base early enough so we can
* request request full access for sriov before
* set_ip_blocks. */
@@ -854,10 +853,6 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
{
u32 sol_reg;
- /* CP hangs in IGT reloading test on RN, reset to WA */
- if (adev->asic_type == CHIP_RENOIR)
- return true;
-
if (amdgpu_gmc_need_reset_on_init(adev))
return true;
if (amdgpu_psp_tos_reload_needed(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 7d17ae56f901..ee8038df17e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -159,6 +159,9 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
tonga_ih_enable_interrupts(adev);
+ if (adev->irq.ih_soft.ring_size)
+ adev->irq.ih_soft.enabled = true;
+
return 0;
}
@@ -196,6 +199,9 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
wptr = le32_to_cpu(*ih->wptr_cpu);
+ if (ih == &adev->irq.ih_soft)
+ goto out;
+
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -306,6 +312,10 @@ static int tonga_ih_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih_soft, IH_SW_RING_SIZE, true);
+ if (r)
+ return r;
+
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih;
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
index e590cbdd8de9..0f5b1719fda5 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
@@ -536,8 +536,11 @@ static int umc_v12_0_update_ecc_status(struct amdgpu_device *adev,
hwid = REG_GET_FIELD(ipid, MCMP1_IPIDT0, HardwareID);
mcatype = REG_GET_FIELD(ipid, MCMP1_IPIDT0, McaType);
- if ((hwid != MCA_UMC_HWID_V12_0) || (mcatype != MCA_UMC_MCATYPE_V12_0))
+ /* The IP block decode of consumption is SMU */
+ if (hwid != MCA_UMC_HWID_V12_0 || mcatype != MCA_UMC_MCATYPE_V12_0) {
+ con->umc_ecc_log.consumption_q_count++;
return 0;
+ }
if (!status)
return 0;
@@ -708,6 +711,19 @@ static uint32_t umc_v12_0_get_die_id(struct amdgpu_device *adev,
return die;
}
+static void umc_v12_0_mca_ipid_parse(struct amdgpu_device *adev, uint64_t ipid,
+ uint32_t *did, uint32_t *ch, uint32_t *umc_inst, uint32_t *sid)
+{
+ if (did)
+ *did = MCA_IPID_2_DIE_ID(ipid);
+ if (ch)
+ *ch = MCA_IPID_2_UMC_CH(ipid);
+ if (umc_inst)
+ *umc_inst = MCA_IPID_2_UMC_INST(ipid);
+ if (sid)
+ *sid = MCA_IPID_2_SOCKET_ID(ipid);
+}
+
struct amdgpu_umc_ras umc_v12_0_ras = {
.ras_block = {
.hw_ops = &umc_v12_0_ras_hw_ops,
@@ -721,5 +737,6 @@ struct amdgpu_umc_ras umc_v12_0_ras = {
.convert_ras_err_addr = umc_v12_0_convert_error_address,
.get_die_id_from_pa = umc_v12_0_get_die_id,
.get_retire_flip_bits = umc_v12_0_get_retire_flip_bits,
+ .mca_ipid_parse = umc_v12_0_mca_ipid_parse,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 5dbaebb592b3..2e79a3afc774 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -623,7 +623,22 @@ static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
*
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
- * Initialize the hardware, boot up the VCPU and do some testing
+ * Initialize the hardware, boot up the VCPU and do some testing.
+ *
+ * On SI, the UVD is meant to be used in a specific power state,
+ * or alternatively the driver can manually enable its clock.
+ * In amdgpu we use the dedicated UVD power state when DPM is enabled.
+ * Calling amdgpu_dpm_enable_uvd makes DPM select the UVD power state
+ * for the SMU and afterwards enables the UVD clock.
+ * This is automatically done by amdgpu_uvd_ring_begin_use when work
+ * is submitted to the UVD ring. Here, we have to call it manually
+ * in order to power up UVD before firmware validation.
+ *
+ * Note that we must not disable the UVD clock here, as that would
+ * cause the ring test to fail. However, UVD is powered off
+ * automatically after the ring test: amdgpu_uvd_ring_end_use calls
+ * the UVD idle work handler which will disable the UVD clock when
+ * all fences are signalled.
*/
static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
{
@@ -633,6 +648,15 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
int r;
uvd_v3_1_mc_resume(adev);
+ uvd_v3_1_enable_mgcg(adev, true);
+
+ /* Make sure UVD is powered during FW validation.
+ * It's going to be automatically powered off after the ring test.
+ */
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_uvd(adev, true);
+ else
+ amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
r = uvd_v3_1_fw_validate(adev);
if (r) {
@@ -640,9 +664,6 @@ static int uvd_v3_1_hw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- uvd_v3_1_enable_mgcg(adev, true);
- amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
-
uvd_v3_1_start(adev);
r = amdgpu_ring_test_helper(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 1c07b701d0e4..ceb94bbb03a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 9d237b5937fb..1f8866f3f63c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
int i, r;
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
- AMDGPU_IB_POOL_DIRECT, &job);
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
new file mode 100644
index 000000000000..9ae424618556
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.c
@@ -0,0 +1,839 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ * Copyright 2025 Valve Corporation
+ * Copyright 2025 Alexandre Demers
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ * Timur Kristóf <timur.kristof@gmail.com>
+ * Alexandre Demers <alexandre.f.demers@gmail.com>
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vce.h"
+#include "amdgpu_gart.h"
+#include "sid.h"
+#include "vce_v1_0.h"
+#include "vce/vce_1_0_d.h"
+#include "vce/vce_1_0_sh_mask.h"
+#include "oss/oss_1_0_d.h"
+#include "oss/oss_1_0_sh_mask.h"
+
+#define VCE_V1_0_FW_SIZE (256 * 1024)
+#define VCE_V1_0_STACK_SIZE (64 * 1024)
+#define VCE_V1_0_DATA_SIZE (7808 * (AMDGPU_MAX_VCE_HANDLES + 1))
+#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
+
+#define VCE_V1_0_GART_PAGE_START \
+ (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS)
+#define VCE_V1_0_GART_ADDR_START \
+ (VCE_V1_0_GART_PAGE_START * AMDGPU_GPU_PAGE_SIZE)
+
+static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev);
+static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+
+struct vce_v1_0_fw_signature {
+ int32_t offset;
+ uint32_t length;
+ int32_t number;
+ struct {
+ uint32_t chip_id;
+ uint32_t keyselect;
+ uint32_t nonce[4];
+ uint32_t sigval[4];
+ } val[8];
+};
+
+/**
+ * vce_v1_0_ring_get_rptr - get read pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+static uint64_t vce_v1_0_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ return RREG32(mmVCE_RB_RPTR);
+ else
+ return RREG32(mmVCE_RB_RPTR2);
+}
+
+/**
+ * vce_v1_0_ring_get_wptr - get write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+static uint64_t vce_v1_0_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ return RREG32(mmVCE_RB_WPTR);
+ else
+ return RREG32(mmVCE_RB_WPTR2);
+}
+
+/**
+ * vce_v1_0_ring_set_wptr - set write pointer
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+static void vce_v1_0_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->me == 0)
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ else
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+}
+
+static int vce_v1_0_lmi_clean(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ if (RREG32(mmVCE_LMI_STATUS) & 0x337f)
+ return 0;
+
+ mdelay(10);
+ }
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int vce_v1_0_firmware_loaded(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < 10; ++i) {
+ for (j = 0; j < 100; ++j) {
+ if (RREG32(mmVCE_STATUS) & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
+ return 0;
+ mdelay(10);
+ }
+
+ dev_err(adev->dev, "VCE not responding, trying to reset the ECPU\n");
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
+ mdelay(10);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void vce_v1_0_init_cg(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_CLOCK_GATING_B);
+ tmp |= 0x1e;
+ tmp &= ~0xe100e1;
+ WREG32(mmVCE_CLOCK_GATING_B, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0xff9ff000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+}
+
+/**
+ * vce_v1_0_load_fw_signature - load firmware signature into VCPU BO
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The VCE1 firmware validation mechanism needs a firmware signature.
+ * This function finds the signature appropriate for the current
+ * ASIC and writes that into the VCPU BO.
+ */
+static int vce_v1_0_load_fw_signature(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *hdr;
+ struct vce_v1_0_fw_signature *sign;
+ unsigned int ucode_offset;
+ uint32_t chip_id;
+ u32 *cpu_addr;
+ int i;
+
+ hdr = (const struct common_firmware_header *)adev->vce.fw->data;
+ ucode_offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ cpu_addr = adev->vce.cpu_addr;
+
+ sign = (void *)adev->vce.fw->data + ucode_offset;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ default:
+ dev_err(adev->dev, "asic_type %#010x was not found!", adev->asic_type);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < le32_to_cpu(sign->number); ++i) {
+ if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
+ break;
+ }
+
+ if (i == le32_to_cpu(sign->number)) {
+ dev_err(adev->dev, "chip_id 0x%x for %s was not found in VCE firmware",
+ chip_id, amdgpu_asic_name[adev->asic_type]);
+ return -EINVAL;
+ }
+
+ cpu_addr += (256 - 64) / 4;
+ memcpy_toio(&cpu_addr[0], &sign->val[i].nonce[0], 16);
+ cpu_addr[4] = cpu_to_le32(le32_to_cpu(sign->length) + 64);
+
+ memset_io(&cpu_addr[5], 0, 44);
+ memcpy_toio(&cpu_addr[16], &sign[1], hdr->ucode_size_bytes - sizeof(*sign));
+
+ cpu_addr += (le32_to_cpu(sign->length) + 64) / 4;
+ memcpy_toio(&cpu_addr[0], &sign->val[i].sigval[0], 16);
+
+ adev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
+
+ return 0;
+}
+
+static int vce_v1_0_wait_for_fw_validation(struct amdgpu_device *adev)
+{
+ int i;
+
+ dev_dbg(adev->dev, "VCE keyselect: %d", adev->vce.keyselect);
+ WREG32(mmVCE_LMI_FW_START_KEYSEL, adev->vce.keyselect);
+
+ for (i = 0; i < 10; ++i) {
+ mdelay(10);
+ if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)
+ break;
+ }
+
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__DONE_MASK)) {
+ dev_err(adev->dev, "VCE FW validation timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__PASS_MASK)) {
+ dev_err(adev->dev, "VCE FW validation failed\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 10; ++i) {
+ mdelay(10);
+ if (!(RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK))
+ break;
+ }
+
+ if (RREG32(mmVCE_FW_REG_STATUS) & VCE_FW_REG_STATUS__BUSY_MASK) {
+ dev_err(adev->dev, "VCE FW busy timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_mc_resume(struct amdgpu_device *adev)
+{
+ uint32_t offset;
+ uint32_t size;
+
+ /*
+ * When the keyselect is already set, don't perturb VCE FW.
+ * Validation seems to always fail the second time.
+ */
+ if (RREG32(mmVCE_LMI_FW_START_KEYSEL)) {
+ dev_dbg(adev->dev, "keyselect already set: 0x%x (on CPU: 0x%x)\n",
+ RREG32(mmVCE_LMI_FW_START_KEYSEL), adev->vce.keyselect);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+ return 0;
+ }
+
+ WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
+ WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
+ WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
+ WREG32(mmVCE_CLOCK_GATING_B, 0);
+
+ WREG32_P(mmVCE_LMI_FW_PERIODIC_CTRL, 0x4, ~0x4);
+
+ WREG32(mmVCE_LMI_CTRL, 0x00398000);
+
+ WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
+ WREG32(mmVCE_LMI_SWAP_CNTL, 0);
+ WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
+ WREG32(mmVCE_LMI_VM_CTRL, 0);
+
+ WREG32(mmVCE_VCPU_SCRATCH7, AMDGPU_MAX_VCE_HANDLES);
+
+ offset = adev->vce.gpu_addr + AMDGPU_VCE_FIRMWARE_OFFSET;
+ size = VCE_V1_0_FW_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
+
+ offset += size;
+ size = VCE_V1_0_STACK_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
+
+ offset += size;
+ size = VCE_V1_0_DATA_SIZE;
+ WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
+ WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
+
+ WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
+
+ return vce_v1_0_wait_for_fw_validation(adev);
+}
+
+/**
+ * vce_v1_0_is_idle() - Check idle status of VCE1 IP block
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ *
+ * Check whether VCE is busy according to VCE_STATUS.
+ * Also check whether the SRBM thinks VCE is busy, although
+ * SRBM_STATUS.VCE_BUSY seems to be bogus because it
+ * appears to mirror the VCE_STATUS.VCPU_REPORT_FW_LOADED bit.
+ */
+static bool vce_v1_0_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool busy =
+ (RREG32(mmVCE_STATUS) & (VCE_STATUS__JOB_BUSY_MASK | VCE_STATUS__UENC_BUSY_MASK)) ||
+ (RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
+
+ return !busy;
+}
+
+static int vce_v1_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ unsigned int i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ udelay(1);
+ if (vce_v1_0_is_idle(ip_block))
+ return 0;
+ }
+ return -ETIMEDOUT;
+}
+
+/**
+ * vce_v1_0_start - start VCE block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Setup and start the VCE block
+ */
+static int vce_v1_0_start(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int r;
+
+ WREG32_P(mmVCE_STATUS, 1, ~1);
+
+ r = vce_v1_0_mc_resume(adev);
+ if (r)
+ return r;
+
+ ring = &adev->vce.ring[0];
+ WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO, lower_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
+
+ ring = &adev->vce.ring[1];
+ WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32(mmVCE_RB_BASE_LO2, lower_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
+
+ WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
+ ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ mdelay(100);
+
+ WREG32_P(mmVCE_SOFT_RESET, 0,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ r = vce_v1_0_firmware_loaded(adev);
+
+ /* Clear VCE_STATUS, otherwise SRBM thinks VCE1 is busy. */
+ WREG32(mmVCE_STATUS, 0);
+
+ if (r) {
+ dev_err(adev->dev, "VCE not responding, giving up\n");
+ return r;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_stop(struct amdgpu_device *adev)
+{
+ struct amdgpu_ip_block *ip_block;
+ int status;
+ int i;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE);
+ if (!ip_block)
+ return -EINVAL;
+
+ if (vce_v1_0_lmi_clean(adev))
+ dev_warn(adev->dev, "VCE not idle\n");
+
+ if (vce_v1_0_wait_for_idle(ip_block))
+ dev_warn(adev->dev, "VCE busy: VCE_STATUS=0x%x, SRBM_STATUS2=0x%x\n",
+ RREG32(mmVCE_STATUS), RREG32(mmSRBM_STATUS2));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
+
+ for (i = 0; i < 100; ++i) {
+ status = RREG32(mmVCE_LMI_STATUS);
+ if (status & 0x240)
+ break;
+ mdelay(1);
+ }
+
+ WREG32_P(mmVCE_VCPU_CNTL, 0, ~VCE_VCPU_CNTL__CLK_EN_MASK);
+
+ WREG32_P(mmVCE_SOFT_RESET,
+ VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK,
+ ~(VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK |
+ VCE_SOFT_RESET__FME_SOFT_RESET_MASK));
+
+ WREG32(mmVCE_STATUS, 0);
+
+ return 0;
+}
+
+static void vce_v1_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
+{
+ u32 tmp;
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp |= VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp &= ~0x1ff000;
+ tmp |= 0xff800000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp &= ~0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ } else {
+ tmp = RREG32(mmVCE_CLOCK_GATING_A);
+ tmp &= ~VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK;
+ WREG32(mmVCE_CLOCK_GATING_A, tmp);
+
+ tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
+ tmp |= 0x1ff000;
+ tmp &= ~0xff800000;
+ WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
+
+ tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
+ tmp |= 0x3ff;
+ WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
+ }
+}
+
+static int vce_v1_0_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
+
+ adev->vce.num_rings = 2;
+
+ vce_v1_0_set_ring_funcs(adev);
+ vce_v1_0_set_irq_funcs(adev);
+
+ return 0;
+}
+
+/**
+ * vce_v1_0_ensure_vcpu_bo_32bit_addr() - ensure the VCPU BO has a 32-bit address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Due to various hardware limitations, the VCE1 requires
+ * the VCPU BO to be in the low 32 bit address range.
+ * Ensure that the VCPU BO has a 32-bit GPU address,
+ * or return an error code when that isn't possible.
+ *
+ * To accomodate that, we put GART to the LOW address range
+ * and reserve some GART pages where we map the VCPU BO,
+ * so that it gets a 32-bit address.
+ */
+static int vce_v1_0_ensure_vcpu_bo_32bit_addr(struct amdgpu_device *adev)
+{
+ u64 gpu_addr = amdgpu_bo_gpu_offset(adev->vce.vcpu_bo);
+ u64 bo_size = amdgpu_bo_size(adev->vce.vcpu_bo);
+ u64 max_vcpu_bo_addr = 0xffffffff - bo_size;
+ u64 num_pages = ALIGN(bo_size, AMDGPU_GPU_PAGE_SIZE) / AMDGPU_GPU_PAGE_SIZE;
+ u64 pa = amdgpu_gmc_vram_pa(adev, adev->vce.vcpu_bo);
+ u64 flags = AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_VALID;
+
+ /*
+ * Check if the VCPU BO already has a 32-bit address.
+ * Eg. if MC is configured to put VRAM in the low address range.
+ */
+ if (gpu_addr <= max_vcpu_bo_addr)
+ return 0;
+
+ /* Check if we can map the VCPU BO in GART to a 32-bit address. */
+ if (adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START > max_vcpu_bo_addr)
+ return -EINVAL;
+
+ amdgpu_gart_map_vram_range(adev, pa, VCE_V1_0_GART_PAGE_START,
+ num_pages, flags, adev->gart.ptr);
+ adev->vce.gpu_addr = adev->gmc.gart_start + VCE_V1_0_GART_ADDR_START;
+ if (adev->vce.gpu_addr > max_vcpu_bo_addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vce_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ring *ring;
+ int r, i;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_sw_init(adev, VCE_V1_0_FW_SIZE +
+ VCE_V1_0_STACK_SIZE + VCE_V1_0_DATA_SIZE);
+ if (r)
+ return r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_load_fw_signature(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev);
+ if (r)
+ return r;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
+ ring = &adev->vce.ring[i];
+ sprintf(ring->name, "vce%d", i);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ hw_prio, NULL);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static int vce_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_suspend(adev);
+ if (r)
+ return r;
+
+ return amdgpu_vce_sw_fini(adev);
+}
+
+/**
+ * vce_v1_0_hw_init - start and test VCE block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+static int vce_v1_0_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, r;
+
+ if (adev->pm.dpm_enabled)
+ amdgpu_dpm_enable_vce(adev, true);
+ else
+ amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
+ if (r)
+ return r;
+ }
+
+ dev_info(adev->dev, "VCE initialized successfully.\n");
+
+ return 0;
+}
+
+static int vce_v1_0_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ r = vce_v1_0_stop(ip_block->adev);
+ if (r)
+ return r;
+
+ cancel_delayed_work_sync(&ip_block->adev->vce.idle_work);
+ return 0;
+}
+
+static int vce_v1_0_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
+ r = vce_v1_0_hw_fini(ip_block);
+ if (r) {
+ dev_err(adev->dev, "vce_v1_0_hw_fini() failed with error %i", r);
+ return r;
+ }
+
+ return amdgpu_vce_suspend(adev);
+}
+
+static int vce_v1_0_resume(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_resume(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_load_fw_signature(adev);
+ if (r)
+ return r;
+ r = vce_v1_0_ensure_vcpu_bo_32bit_addr(adev);
+ if (r)
+ return r;
+
+ return vce_v1_0_hw_init(ip_block);
+}
+
+static int vce_v1_0_set_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t val = 0;
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
+
+ WREG32_P(mmVCE_SYS_INT_EN, val,
+ ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
+ return 0;
+}
+
+static int vce_v1_0_process_interrupt(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_dbg(adev->dev, "IH: VCE\n");
+ switch (entry->src_data[0]) {
+ case 0:
+ case 1:
+ amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
+ break;
+ default:
+ dev_err(adev->dev, "Unhandled interrupt: %d %d\n",
+ entry->src_id, entry->src_data[0]);
+ break;
+ }
+
+ return 0;
+}
+
+static int vce_v1_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ vce_v1_0_init_cg(adev);
+ vce_v1_0_enable_mgcg(adev, state == AMD_CG_STATE_GATE);
+
+ return 0;
+}
+
+static int vce_v1_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ /*
+ * This doesn't actually powergate the VCE block.
+ * That's done in the dpm code via the SMC. This
+ * just re-inits the block as necessary. The actual
+ * gating still happens in the dpm code. We should
+ * revisit this when there is a cleaner line between
+ * the smc and the hw blocks
+ */
+ if (state == AMD_PG_STATE_GATE)
+ return vce_v1_0_stop(adev);
+ else
+ return vce_v1_0_start(adev);
+}
+
+static const struct amd_ip_funcs vce_v1_0_ip_funcs = {
+ .name = "vce_v1_0",
+ .early_init = vce_v1_0_early_init,
+ .sw_init = vce_v1_0_sw_init,
+ .sw_fini = vce_v1_0_sw_fini,
+ .hw_init = vce_v1_0_hw_init,
+ .hw_fini = vce_v1_0_hw_fini,
+ .suspend = vce_v1_0_suspend,
+ .resume = vce_v1_0_resume,
+ .is_idle = vce_v1_0_is_idle,
+ .wait_for_idle = vce_v1_0_wait_for_idle,
+ .set_clockgating_state = vce_v1_0_set_clockgating_state,
+ .set_powergating_state = vce_v1_0_set_powergating_state,
+};
+
+static const struct amdgpu_ring_funcs vce_v1_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VCE,
+ .align_mask = 0xf,
+ .nop = VCE_CMD_NO_OP,
+ .support_64bit_ptrs = false,
+ .no_user_fence = true,
+ .get_rptr = vce_v1_0_ring_get_rptr,
+ .get_wptr = vce_v1_0_ring_get_wptr,
+ .set_wptr = vce_v1_0_ring_set_wptr,
+ .parse_cs = amdgpu_vce_ring_parse_cs,
+ .emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib = amdgpu_vce_ring_emit_ib,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .test_ring = amdgpu_vce_ring_test_ring,
+ .test_ib = amdgpu_vce_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vce_ring_begin_use,
+ .end_use = amdgpu_vce_ring_end_use,
+};
+
+static void vce_v1_0_set_ring_funcs(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->vce.num_rings; i++) {
+ adev->vce.ring[i].funcs = &vce_v1_0_ring_funcs;
+ adev->vce.ring[i].me = i;
+ }
+};
+
+static const struct amdgpu_irq_src_funcs vce_v1_0_irq_funcs = {
+ .set = vce_v1_0_set_interrupt_state,
+ .process = vce_v1_0_process_interrupt,
+};
+
+static void vce_v1_0_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->vce.irq.num_types = 1;
+ adev->vce.irq.funcs = &vce_v1_0_irq_funcs;
+};
+
+const struct amdgpu_ip_block_version vce_v1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &vce_v1_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h
index 0d878ca3acba..206e7bec897f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v1_0.h
@@ -1,5 +1,8 @@
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ * Copyright 2025 Valve Corporation
+ * Copyright 2025 Alexandre Demers
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,12 +24,9 @@
*
*/
-#ifndef __DCE_V11_0_H__
-#define __DCE_V11_0_H__
+#ifndef __VCE_V1_0_H__
+#define __VCE_V1_0_H__
-extern const struct amdgpu_ip_block_version dce_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version dce_v11_2_ip_block;
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev);
+extern const struct amdgpu_ip_block_version vce_v1_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index bee3e904a6bc..8ea8a6193492 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -407,6 +407,11 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
static int vce_v2_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
adev->vce.num_rings = 2;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 708123899c41..719e9643c43d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -399,6 +399,7 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
@@ -407,6 +408,10 @@ static int vce_v3_0_early_init(struct amdgpu_ip_block *ip_block)
(AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
return -ENOENT;
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
+
adev->vce.num_rings = 3;
vce_v3_0_set_ring_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 335bda64ff5b..2d64002bed61 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -410,6 +410,11 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
static int vce_v4_0_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_vce_early_init(adev);
+ if (r)
+ return r;
if (amdgpu_sriov_vf(adev)) /* currently only VCN0 support SRIOV */
adev->vce.num_rings = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index c74947705d77..a316797875a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -193,7 +193,7 @@ static int vcn_v1_0_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode;
if (amdgpu_vcnfw_log) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
amdgpu_vcn_fwlog_init(adev->vcn.inst);
@@ -230,11 +230,11 @@ static int vcn_v1_0_sw_fini(struct amdgpu_ip_block *ip_block)
jpeg_v1_0_sw_fini(ip_block);
- r = amdgpu_vcn_sw_fini(adev, 0);
+ amdgpu_vcn_sw_fini(adev, 0);
kfree(adev->vcn.ip_dump);
- return r;
+ return 0;
}
/**
@@ -1338,7 +1338,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
@@ -1399,7 +1398,6 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL,
UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK);
- ring = &adev->vcn.inst->ring_dec;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 68b4371df0f1..8897dcc9c1a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -136,10 +136,8 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -232,14 +230,9 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block)
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(adev->vcn.inst);
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_0, ARRAY_SIZE(vcn_reg_list_2_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -259,7 +252,7 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
int r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
fw_shared->present_flag_0 = 0;
@@ -274,11 +267,9 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- r = amdgpu_vcn_sw_fini(adev, 0);
-
- kfree(adev->vcn.ip_dump);
+ amdgpu_vcn_sw_fini(adev, 0);
- return r;
+ return 0;
}
/**
@@ -862,9 +853,10 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_vcn_inst *vinst)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
+ int ret;
vcn_v2_0_enable_static_power_gating(vinst);
@@ -948,8 +940,13 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, 0, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* force RBC into idle state */
rb_bufsz = order_base_2(ring->ring_size);
@@ -1004,7 +1001,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1311,7 +1308,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -2095,66 +2092,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
}
-static void vcn_v2_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
@@ -2168,8 +2105,8 @@ static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.wait_for_idle = vcn_v2_0_wait_for_idle,
.set_clockgating_state = vcn_v2_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_0_dump_ip_state,
- .print_ip_state = vcn_v2_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index bc30a5326866..cebee453871c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -116,7 +116,6 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
struct amdgpu_device *adev = vcn_inst->adev;
unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
unsigned int i, j;
- int r = 0;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
struct amdgpu_vcn_inst *v = &adev->vcn.inst[i];
@@ -149,15 +148,7 @@ static void vcn_v2_5_idle_work_handler(struct work_struct *work)
if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- false);
- if (r)
- dev_warn(adev->dev, "(%d) failed to disable video power profile mode\n", r);
- adev->vcn.workload_profile_active = false;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
+ amdgpu_vcn_put_profile(adev);
} else {
schedule_delayed_work(&adev->vcn.inst[0].idle_work, VCN_IDLE_TIMEOUT);
}
@@ -167,7 +158,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vcn_inst *v = &adev->vcn.inst[ring->me];
- int r = 0;
atomic_inc(&adev->vcn.inst[0].total_submission_cnt);
@@ -177,20 +167,6 @@ static void vcn_v2_5_ring_begin_use(struct amdgpu_ring *ring)
* the delayed work so there is no one else to set it to false
* and we don't care if someone else sets it to true.
*/
- if (adev->vcn.workload_profile_active)
- goto pg_lock;
-
- mutex_lock(&adev->vcn.workload_profile_mutex);
- if (!adev->vcn.workload_profile_active) {
- r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
- true);
- if (r)
- dev_warn(adev->dev, "(%d) failed to switch to video power profile mode\n", r);
- adev->vcn.workload_profile_active = true;
- }
- mutex_unlock(&adev->vcn.workload_profile_mutex);
-
-pg_lock:
mutex_lock(&adev->vcn.inst[0].vcn_pg_lock);
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -218,6 +194,7 @@ pg_lock:
v->pause_dpg_mode(v, &new_state);
}
mutex_unlock(&adev->vcn.inst[0].vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
}
static void vcn_v2_5_ring_end_use(struct amdgpu_ring *ring)
@@ -297,12 +274,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
int i, j, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << j))
continue;
@@ -423,14 +398,9 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_2_5, ARRAY_SIZE(vcn_reg_list_2_5));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -450,7 +420,7 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
{
int i, r, idx;
struct amdgpu_device *adev = ip_block->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
@@ -472,13 +442,9 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block)
r = amdgpu_vcn_suspend(adev, i);
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -1032,9 +998,10 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1125,8 +1092,13 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1183,7 +1155,7 @@ static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared =
+ struct amdgpu_fw_shared *fw_shared =
adev->vcn.inst[i].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
@@ -1695,7 +1667,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
if (!ret_code) {
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
@@ -2127,66 +2099,6 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v2_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_2_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v2_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_2_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_2_5[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.name = "vcn_v2_5",
.early_init = vcn_v2_5_early_init,
@@ -2200,8 +2112,8 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
@@ -2217,8 +2129,8 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
.wait_for_idle = vcn_v2_5_wait_for_idle,
.set_clockgating_state = vcn_v2_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v2_5_dump_ip_state,
- .print_ip_state = vcn_v2_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 2811226b0ea5..d9cf8f0feeb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -175,8 +175,6 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
int i, j, r;
int vcn_doorbell_index = 0;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t *ptr;
struct amdgpu_device *adev = ip_block->adev;
/*
@@ -193,7 +191,7 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
}
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -304,14 +302,9 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (ptr == NULL) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_3_0, ARRAY_SIZE(vcn_reg_list_3_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -334,7 +327,7 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -356,12 +349,9 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
return 0;
}
@@ -1039,9 +1029,10 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
@@ -1134,8 +1125,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_dec;
/* force RBC into idle state */
@@ -1198,7 +1194,7 @@ static int vcn_v3_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
int j, k, r;
@@ -1719,7 +1715,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
int ret_code;
@@ -1838,7 +1834,7 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- volatile struct amdgpu_fw_shared *fw_shared;
+ struct amdgpu_fw_shared *fw_shared;
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
/*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
@@ -2346,67 +2342,6 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v3_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
- uint32_t inst_off;
- bool is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_3_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v3_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_3_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, mmUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_3_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.name = "vcn_v3_0",
.early_init = vcn_v3_0_early_init,
@@ -2420,8 +2355,8 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = {
.wait_for_idle = vcn_v3_0_wait_for_idle,
.set_clockgating_state = vcn_v3_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v3_0_dump_ip_state,
- .print_ip_state = vcn_v3_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v3_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 706f3b2f484f..3ae666522d57 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -148,7 +148,7 @@ static int vcn_v4_0_early_init(struct amdgpu_ip_block *ip_block)
static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE);
@@ -183,8 +183,6 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t *ptr;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
@@ -255,14 +253,9 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0, ARRAY_SIZE(vcn_reg_list_4_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -285,7 +278,7 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -309,13 +302,8 @@ static int vcn_v4_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -1009,9 +997,10 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1094,8 +1083,13 @@ static int vcn_v4_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect)
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1143,7 +1137,7 @@ static int vcn_v4_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1360,8 +1354,8 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1612,7 +1606,7 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1624,7 +1618,6 @@ static int vcn_v4_0_stop(struct amdgpu_vcn_inst *vinst)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
vcn_v4_0_stop_dpg_mode(vinst);
- r = 0;
goto done;
}
@@ -1984,7 +1977,7 @@ static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
.nop = VCN_ENC_CMD_NO_OP,
- .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata),
+ .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata),
.get_rptr = vcn_v4_0_unified_ring_get_rptr,
.get_wptr = vcn_v4_0_unified_ring_get_wptr,
.set_wptr = vcn_v4_0_unified_ring_set_wptr,
@@ -2240,67 +2233,6 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.name = "vcn_v4_0",
.early_init = vcn_v4_0_early_init,
@@ -2314,8 +2246,8 @@ static const struct amd_ip_funcs vcn_v4_0_ip_funcs = {
.wait_for_idle = vcn_v4_0_wait_for_idle,
.set_clockgating_state = vcn_v4_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_dump_ip_state,
- .print_ip_state = vcn_v4_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 2a3663b551af..cb7123ec1a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -134,6 +134,19 @@ static int vcn_v4_0_3_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+
+ return 0;
+}
+
static int vcn_v4_0_3_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn4_fw_shared *fw_shared;
@@ -160,8 +173,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
int i, r, vcn_inst;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t *ptr;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -201,7 +212,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id);
sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+
+ /* There are no per-instance irq source IDs on 4.0.3, the IH
+ * packets use a separate field to differentiate instances.
+ */
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0,
AMDGPU_RING_PRIO_DEFAULT,
&adev->vcn.inst[i].sched_score);
if (r)
@@ -213,10 +228,6 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
adev->vcn.inst[i].pause_dpg_mode = vcn_v4_0_3_pause_dpg_mode;
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
@@ -231,20 +242,11 @@ static int vcn_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block)
}
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-
- r = amdgpu_vcn_sysfs_reset_mask_init(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_3, ARRAY_SIZE(vcn_reg_list_4_0_3));
if (r)
return r;
- return 0;
+ return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
/**
@@ -261,7 +263,7 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(&adev->ddev, &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -281,13 +283,8 @@ static int vcn_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -391,7 +388,7 @@ static int vcn_v4_0_3_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -848,10 +845,10 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared =
+ struct amdgpu_vcn4_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -944,8 +941,13 @@ static int vcn_v4_0_3_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1010,8 +1012,8 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
struct mmsch_v4_0_cmd_end end = { {0} };
struct mmsch_v4_0_3_init_header header;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -1185,7 +1187,7 @@ static int vcn_v4_0_3_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
int j, k, r, vcn_inst;
uint32_t tmp;
@@ -1395,7 +1397,7 @@ static int vcn_v4_0_3_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
int r = 0, vcn_inst;
uint32_t tmp;
@@ -1872,71 +1874,10 @@ static void vcn_v4_0_3_set_irq_funcs(struct amdgpu_device *adev)
adev->vcn.inst->ras_poison_irq.funcs = &vcn_v4_0_3_ras_irq_funcs;
}
-static void vcn_v4_0_3_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_3[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_3_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off, inst_id;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_3);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_id = GET_INST(VCN, i);
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, inst_id, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_3[j],
- inst_id));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.name = "vcn_v4_0_3",
.early_init = vcn_v4_0_3_early_init,
+ .late_init = vcn_v4_0_3_late_init,
.sw_init = vcn_v4_0_3_sw_init,
.sw_fini = vcn_v4_0_3_sw_fini,
.hw_init = vcn_v4_0_3_hw_init,
@@ -1947,8 +1888,8 @@ static const struct amd_ip_funcs vcn_v4_0_3_ip_funcs = {
.wait_for_idle = vcn_v4_0_3_wait_for_idle,
.set_clockgating_state = vcn_v4_0_3_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_3_dump_ip_state,
- .print_ip_state = vcn_v4_0_3_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_3_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index caf2d95a85d4..b107ee80e472 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -147,12 +147,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
struct amdgpu_ring *ring;
struct amdgpu_device *adev = ip_block->adev;
int i, r;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t *ptr;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -233,15 +230,9 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block)
return r;
}
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
- return 0;
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_4_0_5, ARRAY_SIZE(vcn_reg_list_4_0_5));
+
+ return r;
}
/**
@@ -258,7 +249,7 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -279,13 +270,9 @@ static int vcn_v4_0_5_sw_fini(struct amdgpu_ip_block *ip_block)
if (r)
return r;
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
+ amdgpu_vcn_sw_fini(adev, i);
}
- kfree(adev->vcn.ip_dump);
-
return 0;
}
@@ -923,9 +910,10 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -1006,8 +994,13 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -1054,7 +1047,7 @@ static int vcn_v4_0_5_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1273,7 +1266,7 @@ static int vcn_v4_0_5_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn4_fw_shared *fw_shared;
+ struct amdgpu_vcn4_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1596,7 +1589,7 @@ static int vcn_v4_0_5_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1704,67 +1697,6 @@ static void vcn_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
}
}
-static void vcn_v4_0_5_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_4_0_5[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-static void vcn_v4_0_5_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_4_0_5);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_4_0_5[j],
- i));
- }
-}
-
static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.name = "vcn_v4_0_5",
.early_init = vcn_v4_0_5_early_init,
@@ -1778,8 +1710,8 @@ static const struct amd_ip_funcs vcn_v4_0_5_ip_funcs = {
.wait_for_idle = vcn_v4_0_5_wait_for_idle,
.set_clockgating_state = vcn_v4_0_5_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v4_0_5_dump_ip_state,
- .print_ip_state = vcn_v4_0_5_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v4_0_5_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index 07a6e9582880..0202df5db1e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -115,21 +115,6 @@ static int vcn_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev)
-{
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t *ptr;
-
- /* Allocate memory for VCN IP Dump buffer */
- ptr = kcalloc(adev->vcn.num_vcn_inst * reg_count, sizeof(uint32_t), GFP_KERNEL);
- if (!ptr) {
- DRM_ERROR("Failed to allocate memory for VCN IP Dump\n");
- adev->vcn.ip_dump = NULL;
- } else {
- adev->vcn.ip_dump = ptr;
- }
-}
-
/**
* vcn_v5_0_0_sw_init - sw init for VCN block
*
@@ -144,7 +129,7 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
int i, r;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -201,7 +186,9 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_sriov_vf(adev))
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
- vcn_v5_0_0_alloc_ip_dump(adev);
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0, ARRAY_SIZE(vcn_reg_list_5_0));
+ if (r)
+ return r;
r = amdgpu_vcn_sysfs_reset_mask_init(adev);
if (r)
@@ -224,7 +211,7 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
if (adev->vcn.harvest_config & (1 << i))
continue;
@@ -245,13 +232,8 @@ static int vcn_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
return 0;
}
@@ -710,9 +692,10 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
+ struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
uint32_t tmp;
+ int ret;
/* disable register anti-hang mechanism */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 1,
@@ -766,8 +749,13 @@ static int vcn_v5_0_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
+ if (ret) {
+ dev_err(adev->dev, "%s: vcn sram load failed %d\n", __func__, ret);
+ return ret;
+ }
+ }
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
@@ -814,7 +802,7 @@ static int vcn_v5_0_0_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r;
@@ -1007,7 +995,7 @@ static int vcn_v5_0_0_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0;
@@ -1320,7 +1308,7 @@ static int vcn_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = ip_block->adev;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = state == AMD_CG_STATE_GATE;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
@@ -1428,67 +1416,6 @@ static void vcn_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
}
}
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
- uint32_t inst_off, is_powered;
-
- if (!adev->vcn.ip_dump)
- return;
-
- drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i)) {
- drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
- continue;
- }
-
- inst_off = i * reg_count;
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered) {
- drm_printf(p, "\nActive Instance:VCN%d\n", i);
- for (j = 0; j < reg_count; j++)
- drm_printf(p, "%-50s \t 0x%08x\n", vcn_reg_list_5_0[j].reg_name,
- adev->vcn.ip_dump[inst_off + j]);
- } else {
- drm_printf(p, "\nInactive Instance:VCN%d\n", i);
- }
- }
-}
-
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block)
-{
- struct amdgpu_device *adev = ip_block->adev;
- int i, j;
- bool is_powered;
- uint32_t inst_off;
- uint32_t reg_count = ARRAY_SIZE(vcn_reg_list_5_0);
-
- if (!adev->vcn.ip_dump)
- return;
-
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
-
- inst_off = i * reg_count;
- /* mmUVD_POWER_STATUS is always readable and is first element of the array */
- adev->vcn.ip_dump[inst_off] = RREG32_SOC15(VCN, i, regUVD_POWER_STATUS);
- is_powered = (adev->vcn.ip_dump[inst_off] &
- UVD_POWER_STATUS__UVD_POWER_STATUS_MASK) != 1;
-
- if (is_powered)
- for (j = 1; j < reg_count; j++)
- adev->vcn.ip_dump[inst_off + j] =
- RREG32(SOC15_REG_ENTRY_OFFSET_INST(vcn_reg_list_5_0[j], i));
- }
-}
-
static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.name = "vcn_v5_0_0",
.early_init = vcn_v5_0_0_early_init,
@@ -1502,8 +1429,8 @@ static const struct amd_ip_funcs vcn_v5_0_0_ip_funcs = {
.wait_for_idle = vcn_v5_0_0_wait_for_idle,
.set_clockgating_state = vcn_v5_0_0_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
index b8927652bc50..51bbccd4360f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h
@@ -32,11 +32,6 @@
#define VCN_VID_IP_ADDRESS 0x0
#define VCN_AON_IP_ADDRESS 0x30000
-void vcn_v5_0_0_alloc_ip_dump(struct amdgpu_device *adev);
-void vcn_v5_0_0_print_ip_state(struct amdgpu_ip_block *ip_block,
- struct drm_printer *p);
-void vcn_v5_0_0_dump_ip_state(struct amdgpu_ip_block *ip_block);
-
extern const struct amdgpu_ip_block_version vcn_v5_0_0_ip_block;
#endif /* __VCN_V5_0_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
index cdefd7fcb0da..8bd457dea4cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
@@ -40,6 +40,40 @@
#include <drm/drm_drv.h>
+static const struct amdgpu_hwip_reg_entry vcn_reg_list_5_0_1[] = {
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_POWER_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_STATUS),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_CONTEXT_ID2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA0),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_DATA1),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_GPCOM_VCPU_CMD),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_HI4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_BASE_LO4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_RPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_WPTR4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE2),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE3),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_RB_SIZE4),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_CTL),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_DATA),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_LMA_MASK),
+ SOC15_REG_ENTRY_STR(VCN, 0, regUVD_DPG_PAUSE)
+};
+
static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev);
static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev);
@@ -79,6 +113,27 @@ static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
+static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->vcn.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ if ((adev->psp.sos.fw_version >= 0x00450025) &&
+ amdgpu_dpm_reset_vcn_is_supported(adev) &&
+ !amdgpu_sriov_vf(adev))
+ adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx)
{
struct amdgpu_vcn5_fw_shared *fw_shared;
@@ -153,17 +208,23 @@ static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block)
vcn_v5_0_1_fw_shared_init(adev, i);
}
- /* TODO: Add queue reset mask when FW fully supports it */
- adev->vcn.supported_reset =
- amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
-
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
}
- vcn_v5_0_0_alloc_ip_dump(adev);
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) {
+ r = amdgpu_vcn_ras_sw_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed to initialize vcn ras block!\n");
+ return r;
+ }
+ }
+
+ r = amdgpu_vcn_reg_dump_init(adev, vcn_reg_list_5_0_1, ARRAY_SIZE(vcn_reg_list_5_0_1));
+ if (r)
+ return r;
return amdgpu_vcn_sysfs_reset_mask_init(adev);
}
@@ -182,7 +243,7 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
fw_shared->present_flag_0 = 0;
@@ -201,15 +262,27 @@ static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block)
return r;
}
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- r = amdgpu_vcn_sw_fini(adev, i);
- if (r)
- return r;
- }
-
amdgpu_vcn_sysfs_reset_mask_fini(adev);
- kfree(adev->vcn.ip_dump);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ amdgpu_vcn_sw_fini(adev, i);
+
+ return 0;
+}
+
+static int vcn_v5_0_1_hw_init_inst(struct amdgpu_device *adev, int i)
+{
+ struct amdgpu_ring *ring;
+ int vcn_inst;
+
+ vcn_inst = GET_INST(VCN, i);
+ ring = &adev->vcn.inst[i].ring_enc[0];
+
+ if (ring->use_doorbell)
+ adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
+ ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
+ 11 * vcn_inst),
+ adev->vcn.inst[i].aid_id);
return 0;
}
@@ -225,7 +298,7 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
struct amdgpu_ring *ring;
- int i, r, vcn_inst;
+ int i, r;
if (amdgpu_sriov_vf(adev)) {
r = vcn_v5_0_1_start_sriov(adev);
@@ -243,14 +316,8 @@ static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block)
if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100)
adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- vcn_inst = GET_INST(VCN, i);
ring = &adev->vcn.inst[i].ring_enc[0];
-
- if (ring->use_doorbell)
- adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
- ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
- 11 * vcn_inst),
- adev->vcn.inst[i].aid_id);
+ vcn_v5_0_1_hw_init_inst(adev, i);
/* Re-init fw_shared, if required */
vcn_v5_0_1_fw_shared_init(adev, i);
@@ -284,7 +351,7 @@ static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block)
vinst->set_pg_state(vinst, AMD_PG_STATE_GATE);
}
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN) && !amdgpu_sriov_vf(adev))
amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0);
return 0;
@@ -601,11 +668,11 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
{
struct amdgpu_device *adev = vinst->adev;
int inst_idx = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared =
+ struct amdgpu_vcn5_fw_shared *fw_shared =
adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
struct amdgpu_ring *ring;
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE};
- int vcn_inst;
+ int vcn_inst, ret;
uint32_t tmp;
vcn_inst = GET_INST(VCN, inst_idx);
@@ -666,8 +733,13 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst,
VCN, 0, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
- if (indirect)
- amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (indirect) {
+ ret = amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM);
+ if (ret) {
+ dev_err(adev->dev, "vcn sram load failed %d\n", ret);
+ return ret;
+ }
+ }
/* resetting ring, fw should not check RB ring */
fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET;
@@ -732,8 +804,8 @@ static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev)
struct mmsch_v5_0_cmd_end end = { {0} };
struct mmsch_v5_0_init_header header;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
- volatile struct amdgpu_fw_shared_rb_setup *rb_setup;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_fw_shared_rb_setup *rb_setup;
direct_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_WRITE;
@@ -907,7 +979,7 @@ static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
struct amdgpu_ring *ring;
uint32_t tmp;
int j, k, r, vcn_inst;
@@ -1099,7 +1171,7 @@ static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst)
{
struct amdgpu_device *adev = vinst->adev;
int i = vinst->inst;
- volatile struct amdgpu_vcn5_fw_shared *fw_shared;
+ struct amdgpu_vcn5_fw_shared *fw_shared;
uint32_t tmp;
int r = 0, vcn_inst;
@@ -1229,6 +1301,31 @@ static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+static int vcn_v5_0_1_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ int r = 0;
+ int vcn_inst;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me];
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ vcn_inst = GET_INST(VCN, ring->me);
+ r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst);
+
+ if (r) {
+ DRM_DEV_ERROR(adev->dev, "VCN reset fail : %d\n", r);
+ return r;
+ }
+
+ vcn_v5_0_1_hw_init_inst(adev, ring->me);
+ vcn_v5_0_1_start_dpg_mode(vinst, vinst->indirect_sram);
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_ENC,
.align_mask = 0x3f,
@@ -1257,6 +1354,7 @@ static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = {
.emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait,
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .reset = vcn_v5_0_1_ring_reset,
};
/**
@@ -1460,7 +1558,7 @@ static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev)
static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.name = "vcn_v5_0_1",
.early_init = vcn_v5_0_1_early_init,
- .late_init = NULL,
+ .late_init = vcn_v5_0_1_late_init,
.sw_init = vcn_v5_0_1_sw_init,
.sw_fini = vcn_v5_0_1_sw_fini,
.hw_init = vcn_v5_0_1_hw_init,
@@ -1475,8 +1573,8 @@ static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = {
.post_soft_reset = NULL,
.set_clockgating_state = vcn_v5_0_1_set_clockgating_state,
.set_powergating_state = vcn_set_powergating_state,
- .dump_ip_state = vcn_v5_0_0_dump_ip_state,
- .print_ip_state = vcn_v5_0_0_print_ip_state,
+ .dump_ip_state = amdgpu_vcn_dump_ip_state,
+ .print_ip_state = amdgpu_vcn_print_ip_state,
};
const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = {
@@ -1557,7 +1655,7 @@ static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank
/* reference to smu driver if header file */
static int vcn_v5_0_1_err_codes[] = {
- 14, 15, /* VCN */
+ 14, 15, 47, /* VCN [D|V|S] */
};
static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
@@ -1603,6 +1701,13 @@ static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_commo
if (r)
goto late_fini;
+ if (amdgpu_ras_is_supported(adev, ras_block->block) &&
+ adev->vcn.inst->ras_poison_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->vcn.inst->ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
return 0;
late_fini:
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 9b3510e53112..a611a7345125 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -67,7 +67,6 @@
#include "sdma_v2_4.h"
#include "sdma_v3_0.h"
#include "dce_v10_0.h"
-#include "dce_v11_0.h"
#include "iceland_ih.h"
#include "tonga_ih.h"
#include "cz_ih.h"
@@ -2124,8 +2123,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
break;
@@ -2142,8 +2139,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
@@ -2163,8 +2158,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
else if (amdgpu_device_has_dc_support(adev))
amdgpu_device_ip_block_add(adev, &dm_ip_block);
#endif
- else
- amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block);
amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block);
#if defined(CONFIG_DRM_AMD_ACP)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 828a9ceef1e7..22925df6a791 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -521,15 +521,10 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
- minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
- if (!minfo.cu_mask.ptr)
- return -ENOMEM;
-
- retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
- if (retval) {
+ minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size);
+ if (IS_ERR(minfo.cu_mask.ptr)) {
pr_debug("Could not copy CU mask from userspace");
- retval = -EFAULT;
- goto out;
+ return PTR_ERR(minfo.cu_mask.ptr);
}
mutex_lock(&p->mutex);
@@ -538,7 +533,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
mutex_unlock(&p->mutex);
-out:
kfree(minfo.cu_mask.ptr);
return retval;
}
@@ -1070,7 +1064,12 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
mutex_lock(&p->svms.lock);
mmap_write_unlock(current->mm);
- if (interval_tree_iter_first(&p->svms.objects,
+
+ /* Skip a special case that allocates VRAM without VA,
+ * VA will be invalid of 0.
+ */
+ if (!(!args->va_addr && (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)) &&
+ interval_tree_iter_first(&p->svms.objects,
args->va_addr >> PAGE_SHIFT,
(args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
pr_err("Address: 0x%llx already allocated by SVM\n",
@@ -2566,8 +2565,8 @@ static int criu_restore(struct file *filep,
pr_debug("CRIU restore (num_devices:%u num_bos:%u num_objects:%u priv_data_size:%llu)\n",
args->num_devices, args->num_bos, args->num_objects, args->priv_data_size);
- if (!args->bos || !args->devices || !args->priv_data || !args->priv_data_size ||
- !args->num_devices || !args->num_bos)
+ if ((args->num_bos > 0 && !args->bos) || !args->devices || !args->priv_data ||
+ !args->priv_data_size || !args->num_devices)
return -EINVAL;
mutex_lock(&p->mutex);
@@ -2827,7 +2826,7 @@ retry:
static int runtime_disable(struct kfd_process *p)
{
- int i = 0, ret;
+ int i = 0, ret = 0;
bool was_enabled = p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED;
p->runtime_info.runtime_state = DEBUG_RUNTIME_STATE_DISABLED;
@@ -2864,6 +2863,7 @@ static int runtime_disable(struct kfd_process *p)
/* disable ttmp setup */
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
+ int last_err = 0;
if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
pdd->spi_dbg_override =
@@ -2873,14 +2873,17 @@ static int runtime_disable(struct kfd_process *p)
pdd->dev->vm_info.last_vmid_kfd);
if (!pdd->dev->kfd->shared_resources.enable_mes)
- debug_refresh_runlist(pdd->dev->dqm);
+ last_err = debug_refresh_runlist(pdd->dev->dqm);
else
- kfd_dbg_set_mes_debug_mode(pdd,
+ last_err = kfd_dbg_set_mes_debug_mode(pdd,
!kfd_dbg_has_cwsr_workaround(pdd->dev));
+
+ if (last_err)
+ ret = last_err;
}
}
- return 0;
+ return ret;
}
static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, void *data)
@@ -3252,8 +3255,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int retcode = -EINVAL;
bool ptrace_attached = false;
- if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT) {
+ retcode = -ENOTTY;
goto err_i1;
+ }
if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
u32 amdkfd_size;
@@ -3266,8 +3271,10 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
asize = amdkfd_size;
cmd = ioctl->cmd;
- } else
+ } else {
+ retcode = -ENOTTY;
goto err_i1;
+ }
dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7e749f9b6d69..e9cfb80bd436 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -495,6 +495,7 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
mutex_init(&kfd->doorbell_mutex);
ida_init(&kfd->doorbell_ida);
+ atomic_set(&kfd->kfd_processes_count, 0);
return kfd;
}
@@ -1133,7 +1134,15 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
}
for (i = 0; i < kfd->num_nodes; i++) {
- node = kfd->nodes[i];
+ /* Race if another thread in b/w
+ * kfd_cleanup_nodes and kfree(kfd),
+ * when kfd->nodes[i] = NULL
+ */
+ if (kfd->nodes[i])
+ node = kfd->nodes[i];
+ else
+ return;
+
spin_lock_irqsave(&node->interrupt_lock, flags);
if (node->interrupts_active
@@ -1485,6 +1494,15 @@ int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
mutex_lock(&kfd_processes_mutex);
+ /* kfd_processes_count is per kfd_dev, return -EBUSY without
+ * further check
+ */
+ if (!!atomic_read(&kfd->kfd_processes_count)) {
+ pr_debug("process_wq_release not finished\n");
+ r = -EBUSY;
+ goto out;
+ }
+
if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd))
goto out;
@@ -1550,6 +1568,25 @@ int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
return ret;
}
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.unhalt(node->dqm);
+ if (r) {
+ dev_err(kfd_device, "Error in starting scheduler\n");
+ return r;
+ }
+ }
+ return 0;
+}
+
int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
@@ -1567,6 +1604,23 @@ int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
return node->dqm->ops.halt(node->dqm);
}
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ struct kfd_node *node;
+ int i, r;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ for (i = 0; i < kfd->num_nodes; i++) {
+ node = kfd->nodes[i];
+ r = node->dqm->ops.halt(node->dqm);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
{
struct kfd_node *node;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 6c5c7c1bf5ed..d7a2e7178ea9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1209,6 +1209,15 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
pr_debug_ratelimited("Evicting process pid %d queues\n",
pdd->process->lead_thread->pid);
+ if (dqm->dev->kfd->shared_resources.enable_mes) {
+ pdd->last_evict_timestamp = get_jiffies_64();
+ retval = suspend_all_queues_mes(dqm);
+ if (retval) {
+ dev_err(dev, "Suspending all queues failed");
+ goto out;
+ }
+ }
+
/* Mark all queues as evicted. Deactivate all active queues on
* the qpd.
*/
@@ -1221,23 +1230,27 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
decrement_queue_count(dqm, qpd, q);
if (dqm->dev->kfd->shared_resources.enable_mes) {
- int err;
-
- err = remove_queue_mes(dqm, q, qpd);
- if (err) {
+ retval = remove_queue_mes(dqm, q, qpd);
+ if (retval) {
dev_err(dev, "Failed to evict queue %d\n",
q->properties.queue_id);
- retval = err;
+ goto out;
}
}
}
- pdd->last_evict_timestamp = get_jiffies_64();
- if (!dqm->dev->kfd->shared_resources.enable_mes)
+
+ if (!dqm->dev->kfd->shared_resources.enable_mes) {
+ pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
USE_DEFAULT_GRACE_PERIOD);
+ } else {
+ retval = resume_all_queues_mes(dqm);
+ if (retval)
+ dev_err(dev, "Resuming all queues failed");
+ }
out:
dqm_unlock(dqm);
@@ -1884,6 +1897,8 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ int ret = 0;
+
dqm_lock(dqm);
if (!dqm->sched_running) {
dqm_unlock(dqm);
@@ -1891,9 +1906,10 @@ static int stop_cpsch(struct device_queue_manager *dqm)
}
if (!dqm->dev->kfd->shared_resources.enable_mes)
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
+ ret = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
+ 0, USE_DEFAULT_GRACE_PERIOD, false);
else
- remove_all_kfd_queues_mes(dqm);
+ ret = remove_all_kfd_queues_mes(dqm);
dqm->sched_running = false;
@@ -1907,7 +1923,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
dqm->detect_hang_info = NULL;
dqm_unlock(dqm);
- return 0;
+ return ret;
}
static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
@@ -2078,7 +2094,8 @@ int amdkfd_fence_wait_timeout(struct device_queue_manager *dqm,
while (*fence_addr != fence_value) {
/* Fatal err detected, this response won't come */
- if (amdgpu_amdkfd_is_fed(dqm->dev->adev))
+ if (amdgpu_amdkfd_is_fed(dqm->dev->adev) ||
+ amdgpu_in_reset(dqm->dev->adev))
return -EIO;
if (time_after(jiffies, end_jiffies)) {
@@ -3098,61 +3115,17 @@ out:
return ret;
}
-static int kfd_dqm_evict_pasid_mes(struct device_queue_manager *dqm,
- struct qcm_process_device *qpd)
-{
- struct device *dev = dqm->dev->adev->dev;
- int ret = 0;
-
- /* Check if process is already evicted */
- dqm_lock(dqm);
- if (qpd->evicted) {
- /* Increment the evicted count to make sure the
- * process stays evicted before its terminated.
- */
- qpd->evicted++;
- dqm_unlock(dqm);
- goto out;
- }
- dqm_unlock(dqm);
-
- ret = suspend_all_queues_mes(dqm);
- if (ret) {
- dev_err(dev, "Suspending all queues failed");
- goto out;
- }
-
- ret = dqm->ops.evict_process_queues(dqm, qpd);
- if (ret) {
- dev_err(dev, "Evicting process queues failed");
- goto out;
- }
-
- ret = resume_all_queues_mes(dqm);
- if (ret)
- dev_err(dev, "Resuming all queues failed");
-
-out:
- return ret;
-}
-
int kfd_evict_process_device(struct kfd_process_device *pdd)
{
struct device_queue_manager *dqm;
struct kfd_process *p;
- int ret = 0;
p = pdd->process;
dqm = pdd->dev->dqm;
WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
- if (dqm->dev->kfd->shared_resources.enable_mes)
- ret = kfd_dqm_evict_pasid_mes(dqm, &pdd->qpd);
- else
- ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
-
- return ret;
+ return dqm->ops.evict_process_queues(dqm, &pdd->qpd);
}
int reserve_debug_trap_vmid(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 82905f3e54dd..5a190dd6be4e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -748,16 +748,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
uint64_t *slots = page_slots(p->signal_page);
uint32_t id;
- /*
- * If id is valid but slot is not signaled, GPU may signal the same event twice
- * before driver have chance to process the first interrupt, then signal slot is
- * auto-reset after set_event wakeup the user space, just drop the second event as
- * the application only need wakeup once.
- */
- if ((valid_id_bits > 31 || (1U << valid_id_bits) >= KFD_SIGNAL_EVENT_LIMIT) &&
- partial_id < KFD_SIGNAL_EVENT_LIMIT && slots[partial_id] == UNSIGNALED_EVENT_SLOT)
- goto out_unlock;
-
if (valid_id_bits)
pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
partial_id, valid_id_bits);
@@ -786,7 +776,6 @@ void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
}
}
-out_unlock:
rcu_read_unlock();
kfd_unref_process(p);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 4ceb251312a6..d76fb61869c7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -28,6 +28,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_smi_events.h"
#include "amdgpu_ras.h"
+#include "amdgpu_ras_mgr.h"
/*
* GFX9 SQ Interrupts
@@ -228,7 +229,11 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
kfd_signal_poison_consumed_event(dev, pasid);
- event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
+ if (amdgpu_uniras_enabled(dev->adev))
+ event_id = amdgpu_ras_mgr_gen_ras_event_seqno(dev->adev,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION);
+ else
+ event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
RAS_EVENT_LOG(dev->adev, event_id,
"poison is consumed by client %d, kick off gpu reset flow\n", client_id);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 2b0a830f5b29..fb3129883a4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -46,11 +46,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
int retval;
union PM4_MES_TYPE_3_HEADER nop;
- if (WARN_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ))
- return false;
-
- pr_debug("Initializing queue type %d size %d\n", KFD_QUEUE_TYPE_HIQ,
- queue_size);
+ pr_debug("Initializing queue type %d size %d\n", type, queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
@@ -69,6 +65,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_node *dev,
kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
break;
default:
+ WARN(1, "Invalid queue type %d\n", type);
dev_err(dev->adev->dev, "Invalid queue type %d\n", type);
return false;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 79251f22b702..af53e796ea1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -21,7 +21,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/types.h>
-#include <linux/hmm.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
@@ -39,22 +38,22 @@
#endif
#define dev_fmt(fmt) "kfd_migrate: " fmt
-static uint64_t
-svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
+static u64
+svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr)
{
return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM);
}
static int
-svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
- dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags)
+svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages,
+ dma_addr_t *addr, u64 *gart_addr, u64 flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
unsigned int num_dw, num_bytes;
struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t pte_flags;
+ u64 src_addr, dst_addr;
+ u64 pte_flags;
void *cpu_addr;
int r;
@@ -68,7 +67,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
AMDGPU_FENCE_OWNER_UNDEFINED,
num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED,
- &job);
+ &job,
+ AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP);
if (r)
return r;
@@ -122,15 +122,15 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
static int
svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
- uint64_t *vram, uint64_t npages,
+ u64 *vram, u64 npages,
enum MIGRATION_COPY_DIR direction,
struct dma_fence **mfence)
{
- const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
+ const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- uint64_t gart_s, gart_d;
+ u64 gart_s, gart_d;
struct dma_fence *next;
- uint64_t size;
+ u64 size;
int r;
mutex_lock(&adev->mman.gtt_window_lock);
@@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn)
page = pfn_to_page(pfn);
svm_range_bo_ref(prange->svm_bo);
page->zone_device_data = prange->svm_bo;
- zone_device_page_init(page);
+ zone_device_page_init(page, 0);
}
static void
@@ -260,39 +260,39 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
-static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate)
+static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
{
- unsigned long upages = 0;
+ unsigned long mpages = 0;
unsigned long i;
for (i = 0; i < migrate->npages; i++) {
- if (migrate->src[i] & MIGRATE_PFN_VALID &&
- !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
- upages++;
+ if (migrate->dst[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ mpages++;
}
- return upages;
+ return mpages;
}
static int
svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t ttm_res_offset)
+ dma_addr_t *scratch, u64 ttm_res_offset)
{
- uint64_t npages = migrate->npages;
+ u64 npages = migrate->npages;
struct amdgpu_device *adev = node->adev;
struct device *dev = adev->dev;
struct amdgpu_res_cursor cursor;
- uint64_t mpages = 0;
+ u64 mpages = 0;
dma_addr_t *src;
- uint64_t *dst;
- uint64_t i, j;
+ u64 *dst;
+ u64 i, j;
int r;
pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start,
prange->last, ttm_res_offset);
src = scratch;
- dst = (uint64_t *)(scratch + npages);
+ dst = (u64 *)(scratch + npages);
amdgpu_res_first(prange->ttm_res, ttm_res_offset,
npages << PAGE_SHIFT, &cursor);
@@ -385,11 +385,11 @@ out_free_vram_pages:
static long
svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start,
- uint64_t end, uint32_t trigger, uint64_t ttm_res_offset)
+ struct vm_area_struct *vma, u64 start,
+ u64 end, uint32_t trigger, u64 ttm_res_offset)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
+ u64 npages = (end - start) >> PAGE_SHIFT;
struct amdgpu_device *adev = node->adev;
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
@@ -408,7 +408,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev);
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -447,9 +447,9 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange,
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
- mpages = cpages - svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- mpages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages);
@@ -490,7 +490,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
{
unsigned long addr, start, end;
struct vm_area_struct *vma;
- uint64_t ttm_res_offset;
+ u64 ttm_res_offset;
struct kfd_node *node;
unsigned long mpages = 0;
long r = 0;
@@ -567,8 +567,9 @@ out:
return r < 0 ? r : 0;
}
-static void svm_migrate_page_free(struct page *page)
+static void svm_migrate_folio_free(struct folio *folio)
{
+ struct page *page = &folio->page;
struct svm_range_bo *svm_bo = page->zone_device_data;
if (svm_bo) {
@@ -580,14 +581,14 @@ static void svm_migrate_page_free(struct page *page)
static int
svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
- dma_addr_t *scratch, uint64_t npages)
+ dma_addr_t *scratch, u64 npages)
{
struct device *dev = adev->dev;
- uint64_t *src;
+ u64 *src;
dma_addr_t *dst;
struct page *dpage;
- uint64_t i = 0, j;
- uint64_t addr;
+ u64 i = 0, j;
+ u64 addr;
int r = 0;
pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start,
@@ -595,7 +596,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
addr = migrate->start;
- src = (uint64_t *)(scratch + npages);
+ src = (u64 *)(scratch + npages);
dst = scratch;
for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) {
@@ -683,12 +684,11 @@ out_oom:
*/
static long
svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
- struct vm_area_struct *vma, uint64_t start, uint64_t end,
+ struct vm_area_struct *vma, u64 start, u64 end,
uint32_t trigger, struct page *fault_page)
{
struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms);
- uint64_t npages = (end - start) >> PAGE_SHIFT;
- unsigned long upages = npages;
+ u64 npages = (end - start) >> PAGE_SHIFT;
unsigned long cpages = 0;
unsigned long mpages = 0;
struct amdgpu_device *adev = node->adev;
@@ -710,7 +710,7 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
buf = kvcalloc(npages,
- 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t),
+ 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t),
GFP_KERNEL);
if (!buf)
goto out;
@@ -736,7 +736,6 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
- upages = svm_migrate_unsuccessful_pages(&migrate);
goto out_free;
}
if (cpages != npages)
@@ -749,9 +748,9 @@ svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange,
scratch, npages);
migrate_vma_pages(&migrate);
- upages = svm_migrate_unsuccessful_pages(&migrate);
- pr_debug("unsuccessful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
- upages, cpages, migrate.npages);
+ mpages = svm_migrate_successful_pages(&migrate);
+ pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n",
+ mpages, cpages, migrate.npages);
svm_migrate_copy_done(adev, mfence);
migrate_vma_finalize(&migrate);
@@ -764,8 +763,7 @@ out_free:
start >> PAGE_SHIFT, end >> PAGE_SHIFT,
node->id, 0, trigger, r);
out:
- if (!r && cpages) {
- mpages = cpages - upages;
+ if (!r && mpages) {
pdd = svm_range_get_pdd_by_node(prange, node);
if (pdd)
WRITE_ONCE(pdd->page_out, pdd->page_out + mpages);
@@ -848,6 +846,9 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
}
if (r >= 0) {
+ WARN_ONCE(prange->vram_pages < mpages,
+ "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).",
+ prange->vram_pages, mpages);
prange->vram_pages -= mpages;
/* prange does not have vram page set its actual_loc to system
@@ -1008,7 +1009,7 @@ out_mmput:
}
static const struct dev_pagemap_ops svm_migrate_pgmap_ops = {
- .page_free = svm_migrate_page_free,
+ .folio_free = svm_migrate_folio_free,
.migrate_to_ram = svm_migrate_to_ram,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 2eebf67f9c2c..2b7fd442d29c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "kfd_priv.h"
#include "kfd_svm.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 67694bcd9464..70ef051511bb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -111,7 +111,14 @@
#define KFD_KERNEL_QUEUE_SIZE 2048
-#define KFD_UNMAP_LATENCY_MS (4000)
+/* KFD_UNMAP_LATENCY_MS is the timeout CP waiting for SDMA preemption. One XCC
+ * can be associated to 2 SDMA engines. queue_preemption_timeout_ms is the time
+ * driver waiting for CP returning the UNMAP_QUEUE fence. Thus the math is
+ * queue_preemption_timeout_ms = sdma_preemption_time * 2 + cp workload
+ * The format here makes CP workload 10% of total timeout
+ */
+#define KFD_UNMAP_LATENCY_MS \
+ ((queue_preemption_timeout_ms - queue_preemption_timeout_ms / 10) >> 1)
#define KFD_MAX_SDMA_QUEUES 128
@@ -375,6 +382,8 @@ struct kfd_dev {
/* for dynamic partitioning */
int kfd_dev_lock;
+
+ atomic_t kfd_processes_count;
};
enum kfd_mempool {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 5be28c6c4f6a..a085faac9fe1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1083,11 +1083,12 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
* for auto suspend
*/
if (pdd->runtime_inuse) {
- pm_runtime_mark_last_busy(adev_to_drm(pdd->dev->adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(pdd->dev->adev)->dev);
pdd->runtime_inuse = false;
}
+ atomic_dec(&pdd->dev->kfd->kfd_processes_count);
+
kfree(pdd);
p->pdds[i] = NULL;
}
@@ -1160,9 +1161,6 @@ static void kfd_process_wq_release(struct work_struct *work)
release_work);
struct dma_fence *ef;
- kfd_process_dequeue_from_all_devices(p);
- pqm_uninit(&p->pqm);
-
/*
* If GPU in reset, user queues may still running, wait for reset complete.
*/
@@ -1224,6 +1222,14 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p)
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
+ /*
+ * Dequeue and destroy user queues, it is not safe for GPU to access
+ * system memory after mmu release notifier callback returns because
+ * exit_mmap free process memory afterwards.
+ */
+ kfd_process_dequeue_from_all_devices(p);
+ pqm_uninit(&p->pqm);
+
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
@@ -1649,6 +1655,8 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
/* Init idr used for memory handle translation */
idr_init(&pdd->alloc_idr);
+ atomic_inc(&dev->kfd->kfd_processes_count);
+
return pdd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index a65c67cf56ff..f1e7583650c4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -297,16 +297,16 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
goto out_err_unreserve;
}
- if (properties->ctx_save_restore_area_size != topo_dev->node_props.cwsr_size) {
- pr_debug("queue cwsr size 0x%x not equal to node cwsr size 0x%x\n",
+ if (properties->ctx_save_restore_area_size < topo_dev->node_props.cwsr_size) {
+ pr_debug("queue cwsr size 0x%x not sufficient for node cwsr size 0x%x\n",
properties->ctx_save_restore_area_size,
topo_dev->node_props.cwsr_size);
err = -EINVAL;
goto out_err_unreserve;
}
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
@@ -352,8 +352,8 @@ int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_prope
topo_dev = kfd_topology_device_by_id(pdd->dev->id);
if (!topo_dev)
return -EINVAL;
- total_cwsr_size = (topo_dev->node_props.cwsr_size + topo_dev->node_props.debug_memory_size)
- * NUM_XCC(pdd->dev->xcc_mask);
+ total_cwsr_size = (properties->ctx_save_restore_area_size +
+ topo_dev->node_props.debug_memory_size) * NUM_XCC(pdd->dev->xcc_mask);
total_cwsr_size = ALIGN(total_cwsr_size, PAGE_SIZE);
kfd_queue_buffer_svm_put(pdd, properties->ctx_save_restore_area_address, total_cwsr_size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index a0f22ea6d15a..97c2270f278f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1189,7 +1189,7 @@ svm_nodes_in_same_hive(struct kfd_node *node_a, struct kfd_node *node_b)
}
static uint64_t
-svm_range_get_pte_flags(struct kfd_node *node,
+svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
struct svm_range *prange, int domain)
{
struct kfd_node *bo_node;
@@ -1292,10 +1292,6 @@ svm_range_get_pte_flags(struct kfd_node *node,
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
- mapping_flags |= AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE;
-
- if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO)
- mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE;
if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
@@ -1305,7 +1301,10 @@ svm_range_get_pte_flags(struct kfd_node *node,
if (gc_ip_version >= IP_VERSION(12, 0, 0))
pte_flags |= AMDGPU_PTE_IS_PTE;
- pte_flags |= amdgpu_gem_va_map_flags(node->adev, mapping_flags);
+ amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
+ pte_flags |= AMDGPU_PTE_READABLE;
+ if (!(flags & KFD_IOCTL_SVM_FLAG_GPU_RO))
+ pte_flags |= AMDGPU_PTE_WRITEABLE;
return pte_flags;
}
@@ -1412,7 +1411,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
pr_debug("Mapping range [0x%lx 0x%llx] on domain: %s\n",
last_start, prange->start + i, last_domain ? "GPU" : "CPU");
- pte_flags = svm_range_get_pte_flags(pdd->dev, prange, last_domain);
+ pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
if (readonly)
pte_flags &= ~AMDGPU_PTE_WRITEABLE;
@@ -1699,7 +1698,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
start = map_start << PAGE_SHIFT;
end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
- struct hmm_range *hmm_range = NULL;
+ struct amdgpu_hmm_range *range = NULL;
unsigned long map_start_vma;
unsigned long map_last_vma;
struct vm_area_struct *vma;
@@ -1714,10 +1713,36 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
next = min(vma->vm_end, end);
npages = (next - addr) >> PAGE_SHIFT;
+ /* HMM requires at least READ permissions. If provided with PROT_NONE,
+ * unmap the memory. If it's not already mapped, this is a no-op
+ * If PROT_WRITE is provided without READ, warn first then unmap
+ */
+ if (!(vma->vm_flags & VM_READ)) {
+ unsigned long e, s;
+
+ svm_range_lock(prange);
+ if (vma->vm_flags & VM_WRITE)
+ pr_debug("VM_WRITE without VM_READ is not supported");
+ s = max(start, prange->start);
+ e = min(end, prange->last);
+ if (e >= s)
+ r = svm_range_unmap_from_gpus(prange, s, e,
+ KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU);
+ svm_range_unlock(prange);
+ /* If unmap returns non-zero, we'll bail on the next for loop
+ * iteration, so just leave r and continue
+ */
+ addr = next;
+ continue;
+ }
+
WRITE_ONCE(p->svms.faulting_task, current);
- r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
- readonly, owner, NULL,
- &hmm_range);
+ range = amdgpu_hmm_range_alloc(NULL);
+ if (likely(range))
+ r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages,
+ readonly, owner, range);
+ else
+ r = -ENOMEM;
WRITE_ONCE(p->svms.faulting_task, NULL);
if (r)
pr_debug("failed %d to get svm range pages\n", r);
@@ -1728,7 +1753,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (!r) {
offset = (addr >> PAGE_SHIFT) - prange->start;
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages,
- hmm_range->hmm_pfns);
+ range->hmm_range.hmm_pfns);
if (r)
pr_debug("failed %d to dma map range\n", r);
}
@@ -1736,14 +1761,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
svm_range_lock(prange);
/* Free backing memory of hmm_range if it was initialized
- * Overrride return value to TRY AGAIN only if prior returns
+ * Override return value to TRY AGAIN only if prior returns
* were successful
*/
- if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
+ if (range && !amdgpu_hmm_range_valid(range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
+ /* Free the hmm range */
+ amdgpu_hmm_range_free(range);
+
if (!r && !list_empty(&prange->child_list)) {
pr_debug("range split by unmap in parallel, validate again\n");
r = -EAGAIN;
@@ -3023,6 +3051,8 @@ retry_write_locked:
if (svms->checkpoint_ts[gpuidx] != 0) {
if (amdgpu_ih_ts_after_or_equal(ts, svms->checkpoint_ts[gpuidx])) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
+ if (write_locked)
+ mmap_write_downgrade(mm);
r = -EAGAIN;
goto out_unlock_svms;
} else {
@@ -3663,6 +3693,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
+ update_mapping |= !p->xnack_enabled && !list_empty(&remap_list);
+
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
@@ -4239,7 +4271,7 @@ svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start,
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
break;
default:
- r = EINVAL;
+ r = -EINVAL;
break;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 01c7a4877904..a63dfc95b602 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -31,7 +31,6 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/sched/mm.h>
-#include <linux/hmm.h>
#include "amdgpu.h"
#include "kfd_priv.h"
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 720b20e842ba..811636af14ea 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -530,6 +530,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version",
dev->gpu->kfd->sdma_fw_version);
sysfs_show_64bit_prop(buffer, offs, "unique_id",
+ dev->gpu->xcp &&
+ (dev->gpu->xcp->xcp_mgr->mode !=
+ AMDGPU_SPX_PARTITION_MODE) ?
+ dev->gpu->xcp->unique_id :
dev->gpu->adev->unique_id);
sysfs_show_32bit_prop(buffer, offs, "num_xcc",
NUM_XCC(dev->gpu->xcc_mask));
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
index 8bc36f04b1b7..44009aa8216e 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c
@@ -46,18 +46,29 @@ static const struct drm_driver amdgpu_xcp_driver = {
static int8_t pdev_num;
static struct xcp_device *xcp_dev[MAX_XCP_PLATFORM_DEVICE];
+static DEFINE_MUTEX(xcp_mutex);
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
{
struct platform_device *pdev;
struct xcp_device *pxcp_dev;
char dev_name[20];
- int ret;
+ int ret, i;
+
+ guard(mutex)(&xcp_mutex);
if (pdev_num >= MAX_XCP_PLATFORM_DEVICE)
return -ENODEV;
- snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", pdev_num);
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if (!xcp_dev[i])
+ break;
+ }
+
+ if (i >= MAX_XCP_PLATFORM_DEVICE)
+ return -ENODEV;
+
+ snprintf(dev_name, sizeof(dev_name), "amdgpu_xcp_%d", i);
pdev = platform_device_register_simple(dev_name, -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
@@ -73,8 +84,8 @@ int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev)
goto out_devres;
}
- xcp_dev[pdev_num] = pxcp_dev;
- xcp_dev[pdev_num]->pdev = pdev;
+ xcp_dev[i] = pxcp_dev;
+ xcp_dev[i]->pdev = pdev;
*ddev = &pxcp_dev->drm;
pdev_num++;
@@ -89,16 +100,43 @@ out_unregister:
}
EXPORT_SYMBOL(amdgpu_xcp_drm_dev_alloc);
-void amdgpu_xcp_drv_release(void)
+static void free_xcp_dev(int8_t index)
{
- for (--pdev_num; pdev_num >= 0; --pdev_num) {
- struct platform_device *pdev = xcp_dev[pdev_num]->pdev;
+ if ((index < MAX_XCP_PLATFORM_DEVICE) && (xcp_dev[index])) {
+ struct platform_device *pdev = xcp_dev[index]->pdev;
devres_release_group(&pdev->dev, NULL);
platform_device_unregister(pdev);
- xcp_dev[pdev_num] = NULL;
+
+ xcp_dev[index] = NULL;
+ pdev_num--;
+ }
+}
+
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ if ((xcp_dev[i]) && (&xcp_dev[i]->drm == ddev)) {
+ free_xcp_dev(i);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(amdgpu_xcp_drm_dev_free);
+
+void amdgpu_xcp_drv_release(void)
+{
+ int8_t i;
+
+ guard(mutex)(&xcp_mutex);
+
+ for (i = 0; pdev_num && i < MAX_XCP_PLATFORM_DEVICE; i++) {
+ free_xcp_dev(i);
}
- pdev_num = 0;
}
EXPORT_SYMBOL(amdgpu_xcp_drv_release);
diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
index c1c4b679bf95..580a1602c8e3 100644
--- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
+++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h
@@ -25,5 +25,6 @@
#define _AMDGPU_XCP_DRV_H_
int amdgpu_xcp_drm_dev_alloc(struct drm_device **ddev);
+void amdgpu_xcp_drm_dev_free(struct drm_device *ddev);
void amdgpu_xcp_drv_release(void);
#endif /* _AMDGPU_XCP_DRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile
index 89d605de0595..0084a8d55254 100644
--- a/drivers/gpu/drm/amd/display/Makefile
+++ b/drivers/gpu/drm/amd/display/Makefile
@@ -44,6 +44,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mmhubbub
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/mpc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/opp
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/pg
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/soc_and_ip_translator
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
index 7329b8cc2576..8e949fe77312 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
@@ -39,7 +39,8 @@ AMDGPUDM = \
amdgpu_dm_psr.o \
amdgpu_dm_replay.o \
amdgpu_dm_quirks.o \
- amdgpu_dm_wb.o
+ amdgpu_dm_wb.o \
+ amdgpu_dm_colorop.o
ifdef CONFIG_DRM_AMD_DC_FP
AMDGPUDM += dc_fpu.o
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 4e86370ae705..740711ac1037 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -39,13 +40,11 @@
#include "dc/dc_stat.h"
#include "dc/dc_state.h"
#include "amdgpu_dm_trace.h"
-#include "dpcd_defs.h"
#include "link/protocols/link_dpcd.h"
#include "link_service_types.h"
#include "link/protocols/link_dp_capability.h"
#include "link/protocols/link_ddc.h"
-#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
@@ -56,7 +55,6 @@
#include "amdgpu_dm_hdcp.h"
#include <drm/display/drm_hdcp_helper.h>
#include "amdgpu_dm_wb.h"
-#include "amdgpu_pm.h"
#include "amdgpu_atombios.h"
#include "amd_shared.h"
@@ -82,6 +80,7 @@
#include <linux/component.h>
#include <linux/sort.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic.h>
@@ -102,15 +101,6 @@
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-#include "dcn/dcn_1_0_offset.h"
-#include "dcn/dcn_1_0_sh_mask.h"
-#include "soc15_hw_ip.h"
-#include "soc15_common.h"
-#include "vega10_ip_offset.h"
-
-#include "gc/gc_11_0_0_offset.h"
-#include "gc/gc_11_0_0_sh_mask.h"
-
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
@@ -243,6 +233,7 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@@ -427,8 +418,7 @@ static inline bool update_planes_and_stream_adapter(struct dc *dc,
/*
* Previous frame finished and HW is ready for optimization.
*/
- if (update_type == UPDATE_TYPE_FAST)
- dc_post_update_surfaces_to_stream(dc);
+ dc_post_update_surfaces_to_stream(dc);
return dc_update_planes_and_stream(dc,
array_of_surface_update,
@@ -541,6 +531,50 @@ static void dm_pflip_high_irq(void *interrupt_params)
amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
}
+static void dm_handle_vmin_vmax_update(struct work_struct *offload_work)
+{
+ struct vupdate_offload_work *work = container_of(offload_work, struct vupdate_offload_work, work);
+ struct amdgpu_device *adev = work->adev;
+ struct dc_stream_state *stream = work->stream;
+ struct dc_crtc_timing_adjust *adjust = work->adjust;
+
+ mutex_lock(&adev->dm.dc_lock);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, stream, adjust);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ dc_stream_release(stream);
+ kfree(work->adjust);
+ kfree(work);
+}
+
+static void schedule_dc_vmin_vmax(struct amdgpu_device *adev,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT);
+ if (!offload_work) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n");
+ return;
+ }
+
+ struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT);
+ if (!adjust_copy) {
+ drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n");
+ kfree(offload_work);
+ return;
+ }
+
+ dc_stream_retain(stream);
+ memcpy(adjust_copy, adjust, sizeof(*adjust_copy));
+
+ INIT_WORK(&offload_work->work, dm_handle_vmin_vmax_update);
+ offload_work->adev = adev;
+ offload_work->stream = stream;
+ offload_work->adjust = adjust_copy;
+
+ queue_work(system_wq, &offload_work->work);
+}
+
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
@@ -578,22 +612,27 @@ static void dm_vupdate_high_irq(void *interrupt_params)
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
- if (vrr_active) {
+ if (vrr_active && acrtc->dm_irq_params.stream) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state
+ == VRR_STATE_ACTIVE_VARIABLE;
+
amdgpu_dm_crtc_handle_vblank(acrtc);
/* BTR processing for pre-DCE12 ASICs */
- if (acrtc->dm_irq_params.stream &&
- adev->family < AMDGPU_FAMILY_AI) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
}
@@ -676,15 +715,20 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
- acrtc->dm_irq_params.vrr_params.supported &&
- acrtc->dm_irq_params.freesync_config.state ==
- VRR_STATE_ACTIVE_VARIABLE) {
+ acrtc->dm_irq_params.vrr_params.supported) {
+ bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
+ bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
+ bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
+
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
- dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
- &acrtc->dm_irq_params.vrr_params.adjust);
+ /* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
+ if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
+ schedule_dc_vmin_vmax(adev, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
}
/*
@@ -1956,6 +2000,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.flags.disable_ips_in_vpb = 0;
+ /* DCN35 and above supports dynamic DTBCLK switch */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+ init_data.flags.allow_0_dtb_clk = true;
+
/* Enable DWB for tested platforms only */
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
@@ -2142,7 +2190,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
drm_err(adev_to_drm(adev),
- "failed to initialize sw for display support.\n");
+ "failed to initialize vblank for display support.\n");
goto error;
}
@@ -2901,7 +2949,7 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
return -ENOMEM;
}
- r = i2c_add_adapter(&oem_i2c->base);
+ r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base);
if (r) {
drm_info(adev_to_drm(adev), "Failed to register oem i2c\n");
kfree(oem_i2c);
@@ -2913,17 +2961,6 @@ static int dm_oem_i2c_hw_init(struct amdgpu_device *adev)
return 0;
}
-static void dm_oem_i2c_hw_fini(struct amdgpu_device *adev)
-{
- struct amdgpu_display_manager *dm = &adev->dm;
-
- if (dm->oem_i2c) {
- i2c_del_adapter(&dm->oem_i2c->base);
- kfree(dm->oem_i2c);
- dm->oem_i2c = NULL;
- }
-}
-
/**
* dm_hw_init() - Initialize DC device
* @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
@@ -2974,8 +3011,6 @@ static int dm_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
- dm_oem_i2c_hw_fini(adev);
-
amdgpu_dm_hpd_fini(adev);
amdgpu_dm_irq_fini(adev);
@@ -3003,14 +3038,20 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n",
enable ? "enable" : "disable");
- if (enable) {
- if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
- } else
- rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
-
- if (rc)
- drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+ if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) {
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(
+ to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(
+ &acrtc->base, false);
+
+ if (rc)
+ drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n",
+ enable ? "en" : "dis");
+ }
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
/* During gpu-reset we disable and then enable vblank irq, so
@@ -3351,6 +3392,67 @@ static void apply_delay_after_dpcd_poweroff(struct amdgpu_device *adev,
}
}
+/**
+ * amdgpu_dm_dump_links_and_sinks - Debug dump of all DC links and their sinks
+ * @adev: amdgpu device pointer
+ *
+ * Iterates through all DC links and dumps information about local and remote
+ * (MST) sinks. Should be called after connector detection is complete to see
+ * the final state of all links.
+ */
+static void amdgpu_dm_dump_links_and_sinks(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct drm_device *dev = adev_to_drm(adev);
+ int li;
+
+ if (!dc)
+ return;
+
+ for (li = 0; li < dc->link_count; li++) {
+ struct dc_link *l = dc->links[li];
+ const char *name = NULL;
+ int rs;
+
+ if (!l)
+ continue;
+ if (l->local_sink && l->local_sink->edid_caps.display_name[0])
+ name = l->local_sink->edid_caps.display_name;
+ else
+ name = "n/a";
+
+ drm_dbg_kms(dev,
+ "LINK_DUMP[%d]: local_sink=%p type=%d sink_signal=%d sink_count=%u edid_name=%s mst_capable=%d mst_alloc_streams=%d\n",
+ li,
+ l->local_sink,
+ l->type,
+ l->local_sink ? l->local_sink->sink_signal : SIGNAL_TYPE_NONE,
+ l->sink_count,
+ name,
+ l->dpcd_caps.is_mst_capable,
+ l->mst_stream_alloc_table.stream_count);
+
+ /* Dump remote (MST) sinks if any */
+ for (rs = 0; rs < l->sink_count; rs++) {
+ struct dc_sink *rsink = l->remote_sinks[rs];
+ const char *rname = NULL;
+
+ if (!rsink)
+ continue;
+ if (rsink->edid_caps.display_name[0])
+ rname = rsink->edid_caps.display_name;
+ else
+ rname = "n/a";
+ drm_dbg_kms(dev,
+ " REMOTE_SINK[%d:%d]: sink=%p signal=%d edid_name=%s\n",
+ li, rs,
+ rsink,
+ rsink->sink_signal,
+ rname);
+ }
+ }
+}
+
static int dm_resume(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -3522,6 +3624,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
+ bool init = false;
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -3531,10 +3634,23 @@ static int dm_resume(struct amdgpu_ip_block *ip_block)
aconnector->mst_root)
continue;
- drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
+ scoped_guard(mutex, &aconnector->mst_mgr.lock) {
+ init = !aconnector->mst_mgr.mst_primary;
+ }
+ if (init)
+ dm_helpers_dp_mst_start_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link, false);
+ else
+ drm_dp_mst_topology_queue_probe(&aconnector->mst_mgr);
}
drm_connector_list_iter_end(&iter);
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
@@ -3597,23 +3713,25 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
- .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+ .atomic_commit_setup = amdgpu_dm_atomic_setup_commit,
};
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
+ const struct drm_panel_backlight_quirk *panel_backlight_quirk;
struct amdgpu_dm_backlight_caps *caps;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
struct drm_luminance_range_info *luminance_range;
- int min_input_signal_override;
+ struct drm_device *drm;
if (aconnector->bl_idx == -1 ||
aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
return;
conn_base = &aconnector->base;
- adev = drm_to_adev(conn_base->dev);
+ drm = conn_base->dev;
+ adev = drm_to_adev(drm);
caps = &adev->dm.backlight_caps[aconnector->bl_idx];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
@@ -3646,9 +3764,24 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
else
caps->aux_min_input_signal = 1;
- min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid);
- if (min_input_signal_override >= 0)
- caps->min_input_signal = min_input_signal_override;
+ panel_backlight_quirk =
+ drm_get_panel_backlight_quirk(aconnector->drm_edid);
+ if (!IS_ERR_OR_NULL(panel_backlight_quirk)) {
+ if (panel_backlight_quirk->min_brightness) {
+ caps->min_input_signal =
+ panel_backlight_quirk->min_brightness - 1;
+ drm_info(drm,
+ "Applying panel backlight quirk, min_brightness: %d\n",
+ caps->min_input_signal);
+ }
+ if (panel_backlight_quirk->brightness_mask) {
+ drm_info(drm,
+ "Applying panel backlight quirk, brightness_mask: 0x%X\n",
+ panel_backlight_quirk->brightness_mask);
+ caps->brightness_mask =
+ panel_backlight_quirk->brightness_mask;
+ }
+ }
}
DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T))
@@ -3728,7 +3861,9 @@ void amdgpu_dm_update_connector_after_detect(
drm_dbg_kms(dev, "DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
- guard(mutex)(&dev->mode_config.mutex);
+ /* When polling, DRM has already locked the mutex for us. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_lock(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
@@ -3791,6 +3926,101 @@ void amdgpu_dm_update_connector_after_detect(
}
update_subconnector_property(aconnector);
+
+ /* When polling, the mutex will be unlocked for us by DRM. */
+ if (!drm_kms_helper_is_poll_worker())
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
+{
+ if (!sink1 || !sink2)
+ return false;
+ if (sink1->sink_signal != sink2->sink_signal)
+ return false;
+
+ if (sink1->dc_edid.length != sink2->dc_edid.length)
+ return false;
+
+ if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
+ sink1->dc_edid.length) != 0)
+ return false;
+ return true;
+}
+
+
+/**
+ * DOC: hdmi_hpd_debounce_work
+ *
+ * HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
+ * (such as during power save transitions), this delay determines how long to
+ * wait before processing the HPD event. This allows distinguishing between a
+ * physical unplug (>hdmi_hpd_debounce_delay)
+ * and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
+ *
+ * If the toggle is less than this delay, the driver compares sink capabilities
+ * and permits a hotplug event if they changed.
+ *
+ * The default value of 1500ms was chosen based on experimental testing with
+ * various monitors that exhibit spontaneous HPD toggling behavior.
+ */
+static void hdmi_hpd_debounce_work(struct work_struct *work)
+{
+ struct amdgpu_dm_connector *aconnector =
+ container_of(to_delayed_work(work), struct amdgpu_dm_connector,
+ hdmi_hpd_debounce_work);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = aconnector->dc_link->ctx->dc;
+ bool fake_reconnect = false;
+ bool reallow_idle = false;
+ bool ret = false;
+ guard(mutex)(&aconnector->hpd_lock);
+
+ /* Re-detect the display */
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
+ dc_allow_idle_optimizations(dc, false);
+ reallow_idle = true;
+ }
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ }
+
+ if (ret) {
+ /* Apply workaround delay for certain panels */
+ apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
+ /* Compare sinks to determine if this was a spontaneous HPD toggle */
+ if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
+ /*
+ * Sinks match - this was a spontaneous HDMI HPD toggle.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
+ fake_reconnect = true;
+ }
+
+ /* Update connector state */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ /* Only notify OS if sink actually changed */
+ if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+ }
+
+ /* Release the cached sink reference */
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
+ scoped_guard(mutex, &adev->dm.dc_lock) {
+ if (reallow_idle && dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, true);
+ }
}
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
@@ -3802,6 +4032,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
struct dc *dc = aconnector->dc_link->ctx->dc;
bool ret = false;
+ bool debounce_required = false;
if (adev->dm.disable_hpd_irq)
return;
@@ -3824,6 +4055,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
+ /*
+ * Check for HDMI disconnect with debounce enabled.
+ */
+ debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
+ dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
+ new_connection_type == dc_connection_none &&
+ aconnector->dc_link->local_sink != NULL);
+
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
@@ -3833,7 +4072,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_connector_hotplug_event(connector);
+ } else if (debounce_required) {
+ /*
+ * HDMI disconnect detected - schedule delayed work instead of
+ * processing immediately. This allows us to coalesce spurious
+ * HDMI signals from physical unplugs.
+ */
+ drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
+ aconnector->hdmi_hpd_debounce_delay_ms);
+
+ /* Cache the current sink for later comparison */
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
+ if (aconnector->hdmi_prev_sink)
+ dc_sink_retain(aconnector->hdmi_prev_sink);
+
+ /* Schedule delayed detection. */
+ if (mod_delayed_work(system_wq,
+ &aconnector->hdmi_hpd_debounce_work,
+ msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
+ drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
+
} else {
+
+ /* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
+ if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
+ return;
+
scoped_guard(mutex, &adev->dm.dc_lock) {
dc_exit_ips_for_hw_access(dc);
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
@@ -4763,8 +5029,8 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
uint32_t *user_brightness)
{
u32 brightness = scale_input_to_fw(min, max, *user_brightness);
- u8 prev_signal = 0, prev_lum = 0;
- int i = 0;
+ u8 lower_signal, upper_signal, upper_lum, lower_lum, lum;
+ int left, right;
if (amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)
return;
@@ -4772,32 +5038,54 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap
if (!caps->data_points)
return;
- /* choose start to run less interpolation steps */
- if (caps->luminance_data[caps->data_points/2].input_signal > brightness)
- i = caps->data_points/2;
- do {
- u8 signal = caps->luminance_data[i].input_signal;
- u8 lum = caps->luminance_data[i].luminance;
+ /*
+ * Handle the case where brightness is below the first data point
+ * Interpolate between (0,0) and (first_signal, first_lum)
+ */
+ if (brightness < caps->luminance_data[0].input_signal) {
+ lum = DIV_ROUND_CLOSEST(caps->luminance_data[0].luminance * brightness,
+ caps->luminance_data[0].input_signal);
+ goto scale;
+ }
- /*
- * brightness == signal: luminance is percent numerator
- * brightness < signal: interpolate between previous and current luminance numerator
- * brightness > signal: find next data point
- */
- if (brightness > signal) {
- prev_signal = signal;
- prev_lum = lum;
- i++;
- continue;
+ left = 0;
+ right = caps->data_points - 1;
+ while (left <= right) {
+ int mid = left + (right - left) / 2;
+ u8 signal = caps->luminance_data[mid].input_signal;
+
+ /* Exact match found */
+ if (signal == brightness) {
+ lum = caps->luminance_data[mid].luminance;
+ goto scale;
}
- if (brightness < signal)
- lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) *
- (brightness - prev_signal),
- signal - prev_signal);
- *user_brightness = scale_fw_to_input(min, max,
- DIV_ROUND_CLOSEST(lum * brightness, 101));
- return;
- } while (i < caps->data_points);
+
+ if (signal < brightness)
+ left = mid + 1;
+ else
+ right = mid - 1;
+ }
+
+ /* verify bound */
+ if (left >= caps->data_points)
+ left = caps->data_points - 1;
+
+ /* At this point, left > right */
+ lower_signal = caps->luminance_data[right].input_signal;
+ upper_signal = caps->luminance_data[left].input_signal;
+ lower_lum = caps->luminance_data[right].luminance;
+ upper_lum = caps->luminance_data[left].luminance;
+
+ /* interpolate */
+ if (right == left || !lower_lum)
+ lum = upper_lum;
+ else
+ lum = lower_lum + DIV_ROUND_CLOSEST((upper_lum - lower_lum) *
+ (brightness - lower_signal),
+ upper_signal - lower_signal);
+scale:
+ *user_brightness = scale_fw_to_input(min, max,
+ DIV_ROUND_CLOSEST(lum * brightness, 101));
}
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
@@ -4837,6 +5125,21 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
struct dc_link *link;
u32 brightness;
bool rc, reallow_idle = false;
+ struct drm_connector *connector;
+
+ list_for_each_entry(connector, &dm->ddev->mode_config.connector_list, head) {
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (aconnector->bl_idx != bl_idx)
+ continue;
+
+ /* if connector is off, save the brightness for next time it's on */
+ if (!aconnector->base.encoder) {
+ dm->brightness[bl_idx] = user_brightness;
+ dm->actual_brightness[bl_idx] = 0;
+ return;
+ }
+ }
amdgpu_dm_update_backlight_caps(dm, bl_idx);
caps = &dm->backlight_caps[bl_idx];
@@ -4848,6 +5151,10 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]);
link = (struct dc_link *)dm->backlight_link[bl_idx];
+ /* Apply brightness quirk */
+ if (caps->brightness_mask)
+ brightness |= caps->brightness_mask;
+
/* Change brightness based on AUX property */
mutex_lock(&dm->dc_lock);
if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
@@ -4916,10 +5223,8 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (caps.aux_support) {
u32 avg, peak;
- bool rc;
- rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
- if (!rc)
+ if (!dc_link_get_backlight_level_nits(link, &avg, &peak))
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
}
@@ -4985,8 +5290,11 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
} else
props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL;
- if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE))
+ if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) {
drm_info(drm, "Using custom brightness curve\n");
+ props.scale = BACKLIGHT_SCALE_NON_LINEAR;
+ } else
+ props.scale = BACKLIGHT_SCALE_LINEAR;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -5048,6 +5356,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
static void setup_backlight_device(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
+ struct amdgpu_dm_backlight_caps *caps;
struct dc_link *link = aconnector->dc_link;
int bl_idx = dm->num_of_edps;
@@ -5067,6 +5376,13 @@ static void setup_backlight_device(struct amdgpu_display_manager *dm,
dm->num_of_edps++;
update_connector_ext_caps(aconnector);
+ caps = &dm->backlight_caps[aconnector->bl_idx];
+
+ /* Only offer ABM property when non-OLED and user didn't turn off by module parameter */
+ if (!caps->ext_caps->bits.oled && amdgpu_dm_abm_level < 0)
+ drm_object_attach_property(&aconnector->base.base,
+ dm->adev->mode_info.abm_level_property,
+ ABM_SYSFS_CONTROL);
}
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
@@ -5322,6 +5638,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
amdgpu_set_panel_orientation(&aconnector->base);
}
+ /* Debug dump: list all DC links and their associated sinks after detection
+ * is complete for all connectors. This provides a comprehensive view of the
+ * final state without repeating the dump for each connector.
+ */
+ amdgpu_dm_dump_links_and_sinks(adev);
+
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -5708,6 +6030,10 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
*color_space = COLOR_SPACE_SRGB;
+ /* Ignore properties when DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE is set */
+ if (plane_state->state && plane_state->state->plane_color_pipeline)
+ return 0;
+
/* DRM color properties only affect non-RGB formats. */
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0;
@@ -6349,6 +6675,10 @@ static void fill_stream_properties_from_drm_display_mode(
&& aconnector
&& aconnector->force_yuv420_output)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ && aconnector
+ && aconnector->force_yuv422_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422;
else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
@@ -6380,13 +6710,15 @@ static void fill_stream_properties_from_drm_display_mode(
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup avi infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->vic = avi_frame.video_code;
err = drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame,
(struct drm_connector *)connector,
mode_in);
if (err < 0)
- drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd \n", connector->name, err);
+ drm_warn_once(connector->dev, "Failed to setup vendor infoframe on connector %s: %zd\n",
+ connector->name, err);
timing_out->hdmi_vic = hv_frame.vic;
}
@@ -7054,29 +7386,117 @@ finish:
return stream;
}
+/**
+ * amdgpu_dm_connector_poll - Poll a connector to see if it's connected to a display
+ * @aconnector: DM connector to poll (owns @base drm_connector and @dc_link)
+ * @force: if true, force polling even when DAC load detection was used
+ *
+ * Used for connectors that don't support HPD (hotplug detection) to
+ * periodically check whether the connector is connected to a display.
+ *
+ * When connection was determined via DAC load detection, we avoid
+ * re-running it on normal polls to prevent visible glitches, unless
+ * @force is set.
+ *
+ * Return: The probed connector status (connected/disconnected/unknown).
+ */
static enum drm_connector_status
-amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+amdgpu_dm_connector_poll(struct amdgpu_dm_connector *aconnector, bool force)
{
- bool connected;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc_link *link = aconnector->dc_link;
+ enum dc_connection_type conn_type = dc_connection_none;
+ enum drm_connector_status status = connector_status_disconnected;
- /*
- * Notes:
- * 1. This interface is NOT called in context of HPD irq.
- * 2. This interface *is called* in context of user-mode ioctl. Which
- * makes it a bad place for *any* MST-related activity.
+ /* When we determined the connection using DAC load detection,
+ * do NOT poll the connector do detect disconnect because
+ * that would run DAC load detection again which can cause
+ * visible visual glitches.
+ *
+ * Only allow to poll such a connector again when forcing.
*/
+ if (!force && link->local_sink && link->type == dc_connection_dac_load)
+ return connector->status;
- if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
- !aconnector->fake_enable)
- connected = (aconnector->dc_sink != NULL);
- else
- connected = (aconnector->base.force == DRM_FORCE_ON ||
- aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (dc_link_detect_connection_type(aconnector->dc_link, &conn_type) &&
+ conn_type != dc_connection_none) {
+ mutex_lock(&adev->dm.dc_lock);
+
+ /* Only call full link detection when a sink isn't created yet,
+ * ie. just when the display is plugged in, otherwise we risk flickering.
+ */
+ if (link->local_sink ||
+ dc_link_detect(link, DETECT_REASON_HPD))
+ status = connector_status_connected;
+
+ mutex_unlock(&adev->dm.dc_lock);
+ }
+
+ if (connector->status != status) {
+ if (status == connector_status_disconnected) {
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ link->local_sink = NULL;
+ link->dpcd_sink_count = 0;
+ link->type = dc_connection_none;
+ }
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ }
+
+ mutex_unlock(&aconnector->hpd_lock);
+ return status;
+}
+
+/**
+ * amdgpu_dm_connector_detect() - Detect whether a DRM connector is connected to a display
+ *
+ * A connector is considered connected when it has a sink that is not NULL.
+ * For connectors that support HPD (hotplug detection), the connection is
+ * handled in the HPD interrupt.
+ * For connectors that may not support HPD, such as analog connectors,
+ * DRM will call this function repeatedly to poll them.
+ *
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ *
+ * @connector: The DRM connector we are checking. We convert it to
+ * amdgpu_dm_connector so we can read the DC link and state.
+ * @force: If true, do a full detect again. This is used even when
+ * a lighter check would normally be used to avoid flicker.
+ *
+ * Return: The connector status (connected, disconnected, or unknown).
+ *
+ */
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
update_subconnector_property(aconnector);
- return (connected ? connector_status_connected :
+ if (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL)
+ return connector_status_connected;
+ else if (aconnector->base.force == DRM_FORCE_OFF)
+ return connector_status_disconnected;
+
+ /* Poll analog connectors and only when either
+ * disconnected or connected to an analog display.
+ */
+ if (drm_kms_helper_is_poll_worker() &&
+ dc_connector_supports_analog(aconnector->dc_link->link_id.id) &&
+ (!aconnector->dc_sink || aconnector->dc_sink->edid_caps.analog))
+ return amdgpu_dm_connector_poll(aconnector, force);
+
+ return (aconnector->dc_sink ? connector_status_connected :
connector_status_disconnected);
}
@@ -7127,6 +7547,20 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
dm_new_state->underscan_enable = val;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ switch (val) {
+ case ABM_SYSFS_CONTROL:
+ dm_new_state->abm_sysfs_forbidden = false;
+ break;
+ case ABM_LEVEL_OFF:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ break;
+ default:
+ dm_new_state->abm_sysfs_forbidden = true;
+ dm_new_state->abm_level = val;
+ }
+ ret = 0;
}
return ret;
@@ -7169,6 +7603,13 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
} else if (property == adev->mode_info.underscan_property) {
*val = dm_state->underscan_enable;
ret = 0;
+ } else if (property == adev->mode_info.abm_level_property) {
+ if (!dm_state->abm_sysfs_forbidden)
+ *val = ABM_SYSFS_CONTROL;
+ else
+ *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
+ dm_state->abm_level : 0;
+ ret = 0;
}
return ret;
@@ -7221,10 +7662,16 @@ static ssize_t panel_power_savings_store(struct device *device,
return -EINVAL;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- to_dm_connector_state(connector->state)->abm_level = val ?:
- ABM_LEVEL_IMMEDIATE_DISABLE;
+ if (to_dm_connector_state(connector->state)->abm_sysfs_forbidden)
+ ret = -EBUSY;
+ else
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
drm_kms_helper_hotplug_event(dev);
return count;
@@ -7289,6 +7736,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+ /* Cancel and flush any pending HDMI HPD debounce work */
+ cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
+ if (aconnector->hdmi_prev_sink) {
+ dc_sink_release(aconnector->hdmi_prev_sink);
+ aconnector->hdmi_prev_sink = NULL;
+ }
+
if (aconnector->bl_idx != -1) {
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
dm->backlight_dev[aconnector->bl_idx] = NULL;
@@ -7304,10 +7758,6 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (aconnector->i2c) {
- i2c_del_adapter(&aconnector->i2c->base);
- kfree(aconnector->i2c);
- }
kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
@@ -7607,6 +8057,7 @@ create_validate_stream_for_sink(struct drm_connector *connector,
bpc_limit = 8;
do {
+ drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc);
stream = create_stream_for_sink(connector, drm_mode,
dm_state, old_stream,
requested_bpc);
@@ -7642,16 +8093,41 @@ create_validate_stream_for_sink(struct drm_connector *connector,
} while (stream == NULL && requested_bpc >= bpc_limit);
- if ((dc_result == DC_FAIL_ENC_VALIDATE ||
- dc_result == DC_EXCEED_DONGLE_CAP) &&
- !aconnector->force_yuv420_output) {
- DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n",
- __func__, __LINE__);
-
- aconnector->force_yuv420_output = true;
+ switch (dc_result) {
+ /*
+ * If we failed to validate DP bandwidth stream with the requested RGB color depth,
+ * we try to fallback and configure in order:
+ * YUV422 (8bpc, 6bpc)
+ * YUV420 (8bpc, 6bpc)
+ */
+ case DC_FAIL_ENC_VALIDATE:
+ case DC_EXCEED_DONGLE_CAP:
+ case DC_NO_DP_LINK_BANDWIDTH:
+ /* recursively entered twice and already tried both YUV422 and YUV420 */
+ if (aconnector->force_yuv422_output && aconnector->force_yuv420_output)
+ break;
+ /* first failure; try YUV422 */
+ if (!aconnector->force_yuv422_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv422_output = true;
+ /* recursively entered and YUV422 failed, try YUV420 */
+ } else if (!aconnector->force_yuv420_output) {
+ drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n",
+ __func__, __LINE__, dc_result);
+ aconnector->force_yuv420_output = true;
+ }
stream = create_validate_stream_for_sink(connector, drm_mode,
- dm_state, old_stream);
+ dm_state, old_stream);
+ aconnector->force_yuv422_output = false;
aconnector->force_yuv420_output = false;
+ break;
+ case DC_OK:
+ break;
+ default:
+ drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n",
+ __func__, __LINE__, dc_result);
+ break;
}
return stream;
@@ -7796,6 +8272,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
+ if (new_con_state->privacy_screen_sw_state != old_con_state->privacy_screen_sw_state) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
if (new_con_state->colorspace != old_con_state->colorspace) {
new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(new_crtc_state))
@@ -7909,7 +8393,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
"mode %dx%d@%dHz is not native, enabling scaling\n",
adjusted_mode->hdisplay, adjusted_mode->vdisplay,
drm_mode_vrefresh(adjusted_mode));
- dm_new_connector_state->scaling = RMX_FULL;
+ dm_new_connector_state->scaling = RMX_ASPECT;
}
return 0;
}
@@ -7927,7 +8411,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (IS_ERR(mst_state))
return PTR_ERR(mst_state);
- mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+ mst_state->pbn_div.full = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -8034,7 +8518,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
return 0;
}
-static int to_drm_connector_type(enum signal_type st)
+static int to_drm_connector_type(enum signal_type st, uint32_t connector_id)
{
switch (st) {
case SIGNAL_TYPE_HDMI_TYPE_A:
@@ -8050,6 +8534,10 @@ static int to_drm_connector_type(enum signal_type st)
return DRM_MODE_CONNECTOR_DisplayPort;
case SIGNAL_TYPE_DVI_DUAL_LINK:
case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ if (connector_id == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ connector_id == CONNECTOR_ID_DUAL_LINK_DVII)
+ return DRM_MODE_CONNECTOR_DVII;
+
return DRM_MODE_CONNECTOR_DVID;
case SIGNAL_TYPE_VIRTUAL:
return DRM_MODE_CONNECTOR_VIRTUAL;
@@ -8101,7 +8589,7 @@ static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
- char *name,
+ const char *name,
int hdisplay, int vdisplay)
{
struct drm_device *dev = encoder->dev;
@@ -8123,6 +8611,24 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
}
+static const struct amdgpu_dm_mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+} common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+};
+
static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
@@ -8133,23 +8639,10 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
to_amdgpu_dm_connector(connector);
int i;
int n;
- struct mode_size {
- char name[DRM_DISPLAY_MODE_LEN];
- int w;
- int h;
- } common_modes[] = {
- { "640x480", 640, 480},
- { "800x600", 800, 600},
- { "1024x768", 1024, 768},
- { "1280x720", 1280, 720},
- { "1280x800", 1280, 800},
- {"1280x1024", 1280, 1024},
- { "1440x900", 1440, 900},
- {"1680x1050", 1680, 1050},
- {"1600x1200", 1600, 1200},
- {"1920x1080", 1920, 1080},
- {"1920x1200", 1920, 1200}
- };
+
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_eDP) &&
+ (connector->connector_type != DRM_MODE_CONNECTOR_LVDS))
+ return;
n = ARRAY_SIZE(common_modes);
@@ -8346,6 +8839,16 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
if (!(amdgpu_freesync_vid_mode && drm_edid))
return;
+ if (!amdgpu_dm_connector->dc_sink || !amdgpu_dm_connector->dc_link)
+ return;
+
+ if (!dc_supports_vrr(amdgpu_dm_connector->dc_sink->ctx->dce_version))
+ return;
+
+ if (dc_connector_supports_analog(amdgpu_dm_connector->dc_link->link_id.id) &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog)
+ return;
+
if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
amdgpu_dm_connector->num_modes +=
add_fs_modes(amdgpu_dm_connector);
@@ -8355,11 +8858,11 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
struct drm_encoder *encoder;
const struct drm_edid *drm_edid = amdgpu_dm_connector->drm_edid;
- struct dc_link_settings *verified_link_cap =
- &amdgpu_dm_connector->dc_link->verified_link_cap;
- const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+ struct dc_link_settings *verified_link_cap = &dc_link->verified_link_cap;
+ const struct dc *dc = dc_link->dc;
encoder = amdgpu_dm_connector_to_encoder(connector);
@@ -8369,6 +8872,17 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
amdgpu_dm_connector->num_modes +=
drm_add_modes_noedid(connector, 1920, 1080);
+
+ if (amdgpu_dm_connector->dc_sink &&
+ amdgpu_dm_connector->dc_sink->edid_caps.analog &&
+ dc_connector_supports_analog(dc_link->link_id.id)) {
+ /* Analog monitor connected by DAC load detection.
+ * Add common modes. It will be up to the user to select one that works.
+ */
+ for (int i = 0; i < ARRAY_SIZE(common_modes); i++)
+ amdgpu_dm_connector->num_modes += drm_add_modes_noedid(
+ connector, common_modes[i].w, common_modes[i].h);
+ }
} else {
amdgpu_dm_connector_ddc_get_modes(connector, drm_edid);
if (encoder)
@@ -8416,6 +8930,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
mutex_init(&aconnector->hpd_lock);
mutex_init(&aconnector->handle_mst_msg_ready);
+ aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
+ INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
+ aconnector->hdmi_prev_sink = NULL;
+
/*
* configure support HPD hot plug connector_>polled default value is 0
* which means HPD hot plug not supported
@@ -8437,6 +8955,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
case DRM_MODE_CONNECTOR_DVID:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_VGA:
+ aconnector->base.polled =
+ DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ break;
default:
break;
}
@@ -8490,6 +9013,18 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
}
+
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ struct drm_privacy_screen *privacy_screen;
+
+ privacy_screen = drm_privacy_screen_get(adev_to_drm(adev)->dev, NULL);
+ if (!IS_ERR(privacy_screen)) {
+ drm_connector_attach_privacy_screen_provider(&aconnector->base,
+ privacy_screen);
+ } else if (PTR_ERR(privacy_screen) != -ENODEV) {
+ drm_warn(adev_to_drm(adev), "Error getting privacy-screen\n");
+ }
+ }
}
static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
@@ -8619,14 +9154,14 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
}
aconnector->i2c = i2c;
- res = i2c_add_adapter(&i2c->base);
+ res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base);
if (res) {
drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index);
goto out_free;
}
- connector_type = to_drm_connector_type(link->connector_signal);
+ connector_type = to_drm_connector_type(link->connector_signal, link->link_id.id);
res = drm_connector_init_with_ddc(
dm->ddev,
@@ -8717,7 +9252,16 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static void manage_dm_interrupts(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dm_crtc_state *acrtc_state)
-{
+{ /*
+ * We cannot be sure that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ * So we have to go through the CRTC to find the right IRQ.
+ */
+ int irq_type = amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+ struct drm_device *dev = adev_to_drm(adev);
+
struct drm_vblank_crtc_config config = {0};
struct dc_crtc_timing *timing;
int offdelay;
@@ -8770,7 +9314,35 @@ static void manage_dm_interrupts(struct amdgpu_device *adev,
drm_crtc_vblank_on_config(&acrtc->base,
&config);
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+ if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n");
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n");
+#endif
+ }
+
} else {
+ /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 2, 0):
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n");
+#endif
+ if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type))
+ drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n");
+ }
+
drm_crtc_vblank_off(&acrtc->base);
}
}
@@ -10078,69 +10650,40 @@ static void dm_set_writeback(struct amdgpu_display_manager *dm,
drm_writeback_queue_job(wb_conn, new_con_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_update_hdcp(struct drm_atomic_state *state)
{
+ struct drm_connector_state *old_con_state, *new_con_state;
struct drm_device *dev = state->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL;
- u32 i, j;
- struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
- struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- amdgpu_dm_commit_streams(state, dc_state);
- }
+ if (!adev->dm.hdcp_workqueue)
+ return;
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
struct amdgpu_dm_connector *aconnector;
- if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ if (!connector || connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!adev->dm.hdcp_workqueue)
- continue;
-
- pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
-
- if (!connector)
- continue;
+ drm_dbg(dev, "[HDCP_DM] -------------- i : %x ----------\n", i);
- pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ drm_dbg(dev, "[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
connector->index, connector->status, connector->dpms);
- pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ drm_dbg(dev, "[HDCP_DM] state protection old: %x new: %x\n",
old_con_state->content_protection, new_con_state->content_protection);
if (aconnector->dc_sink) {
if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
- pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ drm_dbg(dev, "[HDCP_DM] pipe_ctx dispname=%s\n",
aconnector->dc_sink->edid_caps.display_name);
}
}
@@ -10154,7 +10697,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
if (old_crtc_state)
- pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
old_crtc_state->enable,
old_crtc_state->active,
old_crtc_state->mode_changed,
@@ -10162,29 +10705,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
old_crtc_state->connectors_changed);
if (new_crtc_state)
- pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ drm_dbg(dev, "NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
new_crtc_state->enable,
new_crtc_state->active,
new_crtc_state->mode_changed,
new_crtc_state->active_changed,
new_crtc_state->connectors_changed);
- }
-
- for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
- struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- if (!adev->dm.hdcp_workqueue)
- continue;
- new_crtc_state = NULL;
- old_crtc_state = NULL;
-
- if (acrtc) {
- new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
- old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
- }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -10228,7 +10755,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
enable_encryption = true;
- drm_info(adev_to_drm(adev), "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+ drm_info(dev, "[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
if (aconnector->dc_link)
hdcp_update_display(
@@ -10236,6 +10763,78 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
new_con_state->hdcp_content_type, enable_encryption);
}
}
+}
+
+static int amdgpu_dm_atomic_setup_commit(struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int i, ret;
+
+ ret = drm_dp_mst_atomic_setup_commit(state);
+ if (ret)
+ return ret;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.active && dm_new_crtc_state->stream &&
+ (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret) {
+ drm_dbg_atomic(state->dev, "Failed to update color state\n");
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state = NULL, *new_con_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ amdgpu_dm_update_hdcp(state);
/* Handle connector state changes */
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -10318,7 +10917,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
- dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_KERNEL);
if (!dummy_updates) {
drm_err(adev_to_drm(adev), "Failed to allocate memory for dummy_updates.\n");
continue;
@@ -10338,6 +10937,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
&stream_update);
mutex_unlock(&dm->dc_lock);
kfree(dummy_updates);
+
+ drm_connector_update_privacy_screen(new_con_state);
}
/**
@@ -10389,6 +10990,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
uint8_t cnt;
+
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
for (cnt = 0; cnt < MAX_CRC_WINDOW_NUM; cnt++) {
if (acrtc->dm_irq_params.window_param[cnt].enable) {
@@ -10691,6 +11293,8 @@ static void get_freesync_config_for_crtc(
} else {
config.state = VRR_STATE_INACTIVE;
}
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
}
out:
new_crtc_state->freesync_config = config;
@@ -11008,7 +11612,7 @@ skip_modeset:
if (dm_new_crtc_state->base.color_mgmt_changed ||
dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
drm_atomic_crtc_needs_modeset(new_crtc_state)) {
- ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ ret = amdgpu_dm_check_crtc_color_mgmt(dm_new_crtc_state, true);
if (ret)
goto fail;
}
@@ -12240,7 +12844,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
int j = state->num_private_objs-1;
dm_atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
@@ -12251,7 +12855,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
state->private_objs[j];
state->private_objs[j].ptr = NULL;
- state->private_objs[j].state = NULL;
+ state->private_objs[j].state_to_destroy = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
@@ -12592,7 +13196,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
dm_con_state = to_dm_connector_state(connector->state);
- if (!adev->dm.freesync_module)
+ if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version))
goto update;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index b937da0a4e4a..ef97cede9926 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
*
@@ -58,6 +59,7 @@
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
+#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -152,6 +154,20 @@ struct idle_workqueue {
bool running;
};
+/**
+ * struct vupdate_offload_work - Work data for offloading task from vupdate handler
+ * @work: Kernel work data for the work event
+ * @adev: amdgpu_device back pointer
+ * @stream: DC stream associated with the crtc
+ * @adjust: DC CRTC timing adjust to be applied to the crtc
+ */
+struct vupdate_offload_work {
+ struct work_struct work;
+ struct amdgpu_device *adev;
+ struct dc_stream_state *stream;
+ struct dc_crtc_timing_adjust *adjust;
+};
+
#define MAX_LUMINANCE_DATA_POINTS 99
/**
@@ -201,6 +217,11 @@ struct amdgpu_dm_backlight_caps {
*/
bool aux_support;
/**
+ * @brightness_mask: After deriving brightness, OR it with this mask.
+ * Workaround for panels with issues with certain brightness values.
+ */
+ u32 brightness_mask;
+ /**
* @ac_level: the default brightness if booted on AC
*/
u8 ac_level;
@@ -753,6 +774,9 @@ struct amdgpu_dm_connector {
uint16_t vc_full_pbn;
struct mutex handle_mst_msg_ready;
+ /* branch device specific data */
+ uint32_t branch_ieee_oui;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -776,6 +800,7 @@ struct amdgpu_dm_connector {
bool fake_enable;
bool force_yuv420_output;
+ bool force_yuv422_output;
struct dsc_preferred_settings dsc_settings;
union dp_downstream_port_present mst_downstream_port_present;
/* Cached display modes */
@@ -795,6 +820,11 @@ struct amdgpu_dm_connector {
bool pack_sdp_v1_3;
enum adaptive_sync_type as_type;
struct amdgpu_hdmi_vsdb_info vsdb_info;
+
+ /* HDMI HPD debounce support */
+ unsigned int hdmi_hpd_debounce_delay_ms;
+ struct delayed_work hdmi_hpd_debounce_work;
+ struct dc_sink *hdmi_prev_sink;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
@@ -969,6 +999,7 @@ struct dm_connector_state {
bool underscan_enable;
bool freesync_capable;
bool update_hdcp;
+ bool abm_sysfs_forbidden;
uint8_t abm_level;
int vcpi_slots;
uint64_t pbn;
@@ -1023,6 +1054,8 @@ void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_create_color_properties(struct amdgpu_device *adev);
int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index ebabfe3a512f..1dcc79b35225 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -25,13 +26,39 @@
#include "amdgpu.h"
#include "amdgpu_mode.h"
#include "amdgpu_dm.h"
+#include "amdgpu_dm_colorop.h"
#include "dc.h"
#include "modules/color/color_gamma.h"
-#include "basics/conversion.h"
/**
* DOC: overview
*
+ * We have three types of color management in the AMD display driver.
+ * 1. the legacy &drm_crtc DEGAMMA, CTM, and GAMMA properties
+ * 2. AMD driver private color management on &drm_plane and &drm_crtc
+ * 3. AMD plane color pipeline
+ *
+ * The CRTC properties are the original color management. When they were
+ * implemented per-plane color management was not a thing yet. Because
+ * of that we could get away with plumbing the DEGAMMA and CTM
+ * properties to pre-blending HW functions. This is incompatible with
+ * per-plane color management, such as via the AMD private properties or
+ * the new drm_plane color pipeline. The only compatible CRTC property
+ * with per-plane color management is the GAMMA property as it is
+ * applied post-blending.
+ *
+ * The AMD driver private color management properties are only exposed
+ * when the kernel is built explicitly with -DAMD_PRIVATE_COLOR. They
+ * are temporary building blocks on the path to full-fledged &drm_plane
+ * and &drm_crtc color pipelines and lay the driver's groundwork for the
+ * color pipelines.
+ *
+ * The AMD plane color pipeline describes AMD's &drm_colorops via the
+ * &drm_plane's COLOR_PIPELINE property.
+ *
+ * drm_crtc Properties
+ * -------------------
+ *
* The DC interface to HW gives us the following color management blocks
* per pipe (surface):
*
@@ -42,36 +69,93 @@
* - Surface regamma LUT (normalized)
* - Output CSC (normalized)
*
- * But these aren't a direct mapping to DRM color properties. The current DRM
- * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware
- * is essentially giving:
+ * But these aren't a direct mapping to DRM color properties. The
+ * current DRM interface exposes CRTC degamma, CRTC CTM and CRTC regamma
+ * while our hardware is essentially giving:
*
* Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM
*
- * The input gamma LUT block isn't really applicable here since it operates
- * on the actual input data itself rather than the HW fp representation. The
- * input and output CSC blocks are technically available to use as part of
- * the DC interface but are typically used internally by DC for conversions
- * between color spaces. These could be blended together with user
- * adjustments in the future but for now these should remain untouched.
+ * The input gamma LUT block isn't really applicable here since it
+ * operates on the actual input data itself rather than the HW fp
+ * representation. The input and output CSC blocks are technically
+ * available to use as part of the DC interface but are typically used
+ * internally by DC for conversions between color spaces. These could be
+ * blended together with user adjustments in the future but for now
+ * these should remain untouched.
+ *
+ * The pipe blending also happens after these blocks so we don't
+ * actually support any CRTC props with correct blending with multiple
+ * planes - but we can still support CRTC color management properties in
+ * DM in most single plane cases correctly with clever management of the
+ * DC interface in DM.
+ *
+ * As per DRM documentation, blocks should be in hardware bypass when
+ * their respective property is set to NULL. A linear DGM/RGM LUT should
+ * also considered as putting the respective block into bypass mode.
+ *
+ * This means that the following configuration is assumed to be the
+ * default:
+ *
+ * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... CRTC
+ * DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
+ *
+ * AMD Private Color Management on drm_plane
+ * -----------------------------------------
+ *
+ * The AMD private color management properties on a &drm_plane are:
+ *
+ * - AMD_PLANE_DEGAMMA_LUT
+ * - AMD_PLANE_DEGAMMA_LUT_SIZE
+ * - AMD_PLANE_DEGAMMA_TF
+ * - AMD_PLANE_HDR_MULT
+ * - AMD_PLANE_CTM
+ * - AMD_PLANE_SHAPER_LUT
+ * - AMD_PLANE_SHAPER_LUT_SIZE
+ * - AMD_PLANE_SHAPER_TF
+ * - AMD_PLANE_LUT3D
+ * - AMD_PLANE_LUT3D_SIZE
+ * - AMD_PLANE_BLEND_LUT
+ * - AMD_PLANE_BLEND_LUT_SIZE
+ * - AMD_PLANE_BLEND_TF
+ *
+ * The AMD private color management property on a &drm_crtc is:
*
- * The pipe blending also happens after these blocks so we don't actually
- * support any CRTC props with correct blending with multiple planes - but we
- * can still support CRTC color management properties in DM in most single
- * plane cases correctly with clever management of the DC interface in DM.
+ * - AMD_CRTC_REGAMMA_TF
*
- * As per DRM documentation, blocks should be in hardware bypass when their
- * respective property is set to NULL. A linear DGM/RGM LUT should also
- * considered as putting the respective block into bypass mode.
+ * Use of these properties is discouraged.
*
- * This means that the following
- * configuration is assumed to be the default:
+ * AMD plane color pipeline
+ * ------------------------
+ *
+ * The AMD &drm_plane color pipeline is advertised for DCN generations
+ * 3.0 and newer. It exposes these elements in this order:
+ *
+ * 1. 1D curve colorop
+ * 2. Multiplier
+ * 3. 3x4 CTM
+ * 4. 1D curve colorop
+ * 5. 1D LUT
+ * 6. 3D LUT
+ * 7. 1D curve colorop
+ * 8. 1D LUT
+ *
+ * The multiplier (#2) is a simple multiplier that is applied to all
+ * channels.
+ *
+ * The 3x4 CTM (#3) is a simple 3x4 matrix.
+ *
+ * #1, and #7 are non-linear to linear curves. #4 is a linear to
+ * non-linear curve. They support sRGB, PQ, and BT.709/BT.2020 EOTFs or
+ * their inverse.
+ *
+ * The 1D LUTs (#5 and #8) are plain 4096 entry LUTs.
+ *
+ * The 3DLUT (#6) is a tetrahedrally interpolated 17 cube LUT.
*
- * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ...
- * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass
*/
#define MAX_DRM_LUT_VALUE 0xFFFF
+#define MAX_DRM_LUT32_VALUE 0xFFFFFFFF
#define SDR_WHITE_LEVEL_INIT_VALUE 80
/**
@@ -342,6 +426,21 @@ __extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size)
}
/**
+ * __extract_blob_lut32 - Extracts the DRM lut and lut size from a blob.
+ * @blob: DRM color mgmt property blob
+ * @size: lut size
+ *
+ * Returns:
+ * DRM LUT or NULL
+ */
+static const struct drm_color_lut32 *
+__extract_blob_lut32(const struct drm_property_blob *blob, uint32_t *size)
+{
+ *size = blob ? drm_color_lut32_size(blob) : 0;
+ return blob ? (struct drm_color_lut32 *)blob->data : NULL;
+}
+
+/**
* __is_lut_linear - check if the given lut is a linear mapping of values
* @lut: given lut to check values
* @size: lut size
@@ -415,6 +514,24 @@ static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut,
}
/**
+ * __drm_lut32_to_dc_gamma - convert the drm_color_lut to dc_gamma.
+ * @lut: DRM lookup table for color conversion
+ * @gamma: DC gamma to set entries
+ *
+ * The conversion depends on the size of the lut - whether or not it's legacy.
+ */
+static void __drm_lut32_to_dc_gamma(const struct drm_color_lut32 *lut, struct dc_gamma *gamma)
+{
+ int i;
+
+ for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) {
+ gamma->entries.red[i] = dc_fixpt_from_fraction(lut[i].red, MAX_DRM_LUT32_VALUE);
+ gamma->entries.green[i] = dc_fixpt_from_fraction(lut[i].green, MAX_DRM_LUT32_VALUE);
+ gamma->entries.blue[i] = dc_fixpt_from_fraction(lut[i].blue, MAX_DRM_LUT32_VALUE);
+ }
+}
+
+/**
* __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix
* @ctm: DRM color transformation matrix
* @matrix: DC CSC float matrix
@@ -566,12 +683,68 @@ static int __set_output_tf(struct dc_transfer_func *func,
return res ? 0 : -ENOMEM;
}
-static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream,
+/**
+ * __set_output_tf_32 - calculates the output transfer function based on expected input space.
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut
+ * @has_rom: if ROM can be used for hardcoded curve
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_output_tf_32(struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size,
+ bool has_rom)
+{
+ struct dc_gamma *gamma = NULL;
+ struct calculate_buffer cal_buffer = {0};
+ bool res;
+
+ cal_buffer.buffer_index = -1;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ if (func->tf == TRANSFER_FUNCTION_LINEAR) {
+ /*
+ * Color module doesn't like calculating regamma params
+ * on top of a linear input. But degamma params can be used
+ * instead to simulate this.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CUSTOM;
+ res = mod_color_calculate_degamma_params(NULL, func,
+ gamma, gamma != NULL);
+ } else {
+ /*
+ * Assume sRGB. The actual mapping will depend on whether the
+ * input was legacy or not.
+ */
+ if (gamma)
+ gamma->type = GAMMA_CS_TFM_1D;
+ res = mod_color_calculate_regamma_params(func, gamma, gamma != NULL,
+ has_rom, NULL, &cal_buffer);
+ }
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
+
+static int amdgpu_dm_set_atomic_regamma(struct dc_transfer_func *out_tf,
const struct drm_color_lut *regamma_lut,
uint32_t regamma_size, bool has_rom,
enum dc_transfer_func_predefined tf)
{
- struct dc_transfer_func *out_tf = &stream->out_transfer_func;
int ret = 0;
if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) {
@@ -639,6 +812,42 @@ static int __set_input_tf(struct dc_color_caps *caps, struct dc_transfer_func *f
return res ? 0 : -ENOMEM;
}
+/**
+ * __set_input_tf_32 - calculates the input transfer function based on expected
+ * input space.
+ * @caps: dc color capabilities
+ * @func: transfer function
+ * @lut: lookup table that defines the color space
+ * @lut_size: size of respective lut.
+ *
+ * Returns:
+ * 0 in case of success. -ENOMEM if fails.
+ */
+static int __set_input_tf_32(struct dc_color_caps *caps, struct dc_transfer_func *func,
+ const struct drm_color_lut32 *lut, uint32_t lut_size)
+{
+ struct dc_gamma *gamma = NULL;
+ bool res;
+
+ if (lut_size) {
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->type = GAMMA_CUSTOM;
+ gamma->num_entries = lut_size;
+
+ __drm_lut32_to_dc_gamma(lut, gamma);
+ }
+
+ res = mod_color_calculate_degamma_params(caps, func, gamma, gamma != NULL);
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
+ return res ? 0 : -ENOMEM;
+}
+
static enum dc_transfer_func_predefined
amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
{
@@ -668,6 +877,27 @@ amdgpu_tf_to_dc_tf(enum amdgpu_transfer_function tf)
}
}
+static enum dc_transfer_func_predefined
+amdgpu_colorop_tf_to_dc_tf(enum drm_colorop_curve_1d_type tf)
+{
+ switch (tf) {
+ case DRM_COLOROP_1D_CURVE_SRGB_EOTF:
+ case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
+ return TRANSFER_FUNCTION_SRGB;
+ case DRM_COLOROP_1D_CURVE_PQ_125_EOTF:
+ case DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF:
+ return TRANSFER_FUNCTION_PQ;
+ case DRM_COLOROP_1D_CURVE_BT2020_INV_OETF:
+ case DRM_COLOROP_1D_CURVE_BT2020_OETF:
+ return TRANSFER_FUNCTION_BT709;
+ case DRM_COLOROP_1D_CURVE_GAMMA22:
+ case DRM_COLOROP_1D_CURVE_GAMMA22_INV:
+ return TRANSFER_FUNCTION_GAMMA22;
+ default:
+ return TRANSFER_FUNCTION_LINEAR;
+ }
+}
+
static void __to_dc_lut3d_color(struct dc_rgb *rgb,
const struct drm_color_lut lut,
int bit_precision)
@@ -721,6 +951,59 @@ static void __drm_3dlut_to_dc_3dlut(const struct drm_color_lut *lut,
__to_dc_lut3d_color(&lut0[lut_i], lut[i], bit_depth);
}
+static void __to_dc_lut3d_32_color(struct dc_rgb *rgb,
+ const struct drm_color_lut32 lut,
+ int bit_precision)
+{
+ rgb->red = drm_color_lut32_extract(lut.red, bit_precision);
+ rgb->green = drm_color_lut32_extract(lut.green, bit_precision);
+ rgb->blue = drm_color_lut32_extract(lut.blue, bit_precision);
+}
+
+static void __drm_3dlut32_to_dc_3dlut(const struct drm_color_lut32 *lut,
+ uint32_t lut3d_size,
+ struct tetrahedral_params *params,
+ bool use_tetrahedral_9,
+ int bit_depth)
+{
+ struct dc_rgb *lut0;
+ struct dc_rgb *lut1;
+ struct dc_rgb *lut2;
+ struct dc_rgb *lut3;
+ int lut_i, i;
+
+
+ if (use_tetrahedral_9) {
+ lut0 = params->tetrahedral_9.lut0;
+ lut1 = params->tetrahedral_9.lut1;
+ lut2 = params->tetrahedral_9.lut2;
+ lut3 = params->tetrahedral_9.lut3;
+ } else {
+ lut0 = params->tetrahedral_17.lut0;
+ lut1 = params->tetrahedral_17.lut1;
+ lut2 = params->tetrahedral_17.lut2;
+ lut3 = params->tetrahedral_17.lut3;
+ }
+
+ for (lut_i = 0, i = 0; i < lut3d_size - 4; lut_i++, i += 4) {
+ /*
+ * We should consider the 3D LUT RGB values are distributed
+ * along four arrays lut0-3 where the first sizes 1229 and the
+ * other 1228. The bit depth supported for 3dlut channel is
+ * 12-bit, but DC also supports 10-bit.
+ *
+ * TODO: improve color pipeline API to enable the userspace set
+ * bit depth and 3D LUT size/stride, as specified by VA-API.
+ */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+ __to_dc_lut3d_32_color(&lut1[lut_i], lut[i + 1], bit_depth);
+ __to_dc_lut3d_32_color(&lut2[lut_i], lut[i + 2], bit_depth);
+ __to_dc_lut3d_32_color(&lut3[lut_i], lut[i + 3], bit_depth);
+ }
+ /* lut0 has 1229 points (lut_size/4 + 1) */
+ __to_dc_lut3d_32_color(&lut0[lut_i], lut[i], bit_depth);
+}
+
/* amdgpu_dm_atomic_lut3d - set DRM 3D LUT to DC stream
* @drm_lut3d: user 3D LUT
* @drm_lut3d_size: size of 3D LUT
@@ -821,7 +1104,7 @@ int amdgpu_dm_verify_lut3d_size(struct amdgpu_device *adev,
struct dm_plane_state *dm_plane_state = to_dm_plane_state(plane_state);
const struct drm_color_lut *shaper = NULL, *lut3d = NULL;
uint32_t exp_size, size, dim_size = MAX_COLOR_3DLUT_SIZE;
- bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut;
+ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
/* shaper LUT is only available if 3D LUT color caps */
exp_size = has_3dlut ? MAX_COLOR_LUT_ENTRIES : 0;
@@ -885,33 +1168,33 @@ int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state)
}
/**
- * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * amdgpu_dm_check_crtc_color_mgmt: Check if DRM color props are programmable by DC.
* @crtc: amdgpu_dm crtc state
+ * @check_only: only check color state without update dc stream
*
- * With no plane level color management properties we're free to use any
- * of the HW blocks as long as the CRTC CTM always comes before the
- * CRTC RGM and after the CRTC DGM.
- *
- * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
- * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
- * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ * This function just verifies CRTC LUT sizes, if there is enough space for
+ * output transfer function and if its parameters can be calculated by AMD
+ * color module. It also adjusts some settings for programming CRTC degamma at
+ * plane stage, using plane DGM block.
*
* The RGM block is typically more fully featured and accurate across
* all ASICs - DCE can't support a custom non-linear CRTC DGM.
*
* For supporting both plane level color management and CRTC level color
- * management at once we have to either restrict the usage of CRTC properties
- * or blend adjustments together.
+ * management at once we have to either restrict the usage of some CRTC
+ * properties or blend adjustments together.
*
* Returns:
- * 0 on success. Error code if setup fails.
+ * 0 on success. Error code if validation fails.
*/
-int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+
+int amdgpu_dm_check_crtc_color_mgmt(struct dm_crtc_state *crtc,
+ bool check_only)
{
struct dc_stream_state *stream = crtc->stream;
struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev);
bool has_rom = adev->asic_type <= CHIP_RAVEN;
- struct drm_color_ctm *ctm = NULL;
+ struct dc_transfer_func *out_tf;
const struct drm_color_lut *degamma_lut, *regamma_lut;
uint32_t degamma_size, regamma_size;
bool has_regamma, has_degamma;
@@ -940,6 +1223,14 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
crtc->cm_has_degamma = false;
crtc->cm_is_degamma_srgb = false;
+ if (check_only) {
+ out_tf = kvzalloc(sizeof(*out_tf), GFP_KERNEL);
+ if (!out_tf)
+ return -ENOMEM;
+ } else {
+ out_tf = &stream->out_transfer_func;
+ }
+
/* Setup regamma and degamma. */
if (is_legacy) {
/*
@@ -954,8 +1245,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* inverse color ramp in legacy userspace.
*/
crtc->cm_is_degamma_srgb = true;
- stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS;
- stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ out_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ out_tf->tf = TRANSFER_FUNCTION_SRGB;
/*
* Note: although we pass has_rom as parameter here, we never
* actually use ROM because the color module only takes the ROM
@@ -963,16 +1254,12 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
*
* See more in mod_color_calculate_regamma_params()
*/
- r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut,
+ r = __set_legacy_tf(out_tf, regamma_lut,
regamma_size, has_rom);
- if (r)
- return r;
} else {
regamma_size = has_regamma ? regamma_size : 0;
- r = amdgpu_dm_set_atomic_regamma(stream, regamma_lut,
+ r = amdgpu_dm_set_atomic_regamma(out_tf, regamma_lut,
regamma_size, has_rom, tf);
- if (r)
- return r;
}
/*
@@ -981,6 +1268,43 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
* have to place the CTM in the OCSC in that case.
*/
crtc->cm_has_degamma = has_degamma;
+ if (check_only)
+ kvfree(out_tf);
+
+ return r;
+}
+
+/**
+ * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream.
+ * @crtc: amdgpu_dm crtc state
+ *
+ * With no plane level color management properties we're free to use any
+ * of the HW blocks as long as the CRTC CTM always comes before the
+ * CRTC RGM and after the CRTC DGM.
+ *
+ * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear.
+ * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear.
+ * - The CRTC CTM will be placed in the gamut remap block if it is non-linear.
+ *
+ * The RGM block is typically more fully featured and accurate across
+ * all ASICs - DCE can't support a custom non-linear CRTC DGM.
+ *
+ * For supporting both plane level color management and CRTC level color
+ * management at once we have to either restrict the usage of CRTC properties
+ * or blend adjustments together.
+ *
+ * Returns:
+ * 0 on success. Error code if setup fails.
+ */
+int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc)
+{
+ struct dc_stream_state *stream = crtc->stream;
+ struct drm_color_ctm *ctm = NULL;
+ int ret;
+
+ ret = amdgpu_dm_check_crtc_color_mgmt(crtc, false);
+ if (ret)
+ return ret;
/* Setup CRTC CTM. */
if (crtc->base.ctm) {
@@ -1138,6 +1462,360 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state,
}
static int
+__set_colorop_in_tf_1d_curve(struct dc_plane_state *dc_plane_state,
+ struct drm_colorop_state *colorop_state)
+{
+ struct dc_transfer_func *tf = &dc_plane_state->in_transfer_func;
+ struct drm_colorop *colorop = colorop_state->colorop;
+ struct drm_device *drm = colorop->dev;
+
+ if (colorop->type != DRM_COLOROP_1D_CURVE)
+ return -EINVAL;
+
+ if (!(BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs))
+ return -EINVAL;
+
+ if (colorop_state->bypass) {
+ tf->type = TF_TYPE_BYPASS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ return 0;
+ }
+
+ drm_dbg(drm, "Degamma colorop with ID: %d\n", colorop->base.id);
+
+ tf->type = TF_TYPE_PREDEFINED;
+ tf->tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_degamma(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ int i = 0;
+
+ old_colorop = colorop;
+
+ /* 1st op: 1d curve - degamma */
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_degam_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (!colorop_state)
+ return -EINVAL;
+
+ return __set_colorop_in_tf_1d_curve(dc_plane_state, colorop_state);
+}
+
+static int
+__set_dm_plane_colorop_3x4_matrix(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_property_blob *blob;
+ struct drm_color_ctm_3x4 *ctm = NULL;
+ int i = 0;
+
+ /* 3x4 matrix */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_CTM_3X4) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_CTM_3X4) {
+ drm_dbg(dev, "3x4 matrix colorop with ID: %d\n", colorop->base.id);
+ blob = colorop_state->data;
+ if (blob->length == sizeof(struct drm_color_ctm_3x4)) {
+ ctm = (struct drm_color_ctm_3x4 *) blob->data;
+ __drm_ctm_3x4_to_dc_matrix(ctm, dc_plane_state->gamut_remap_matrix.matrix);
+ dc_plane_state->gamut_remap_matrix.enable_remap = true;
+ dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
+ } else {
+ drm_warn(dev, "blob->length (%zu) isn't equal to drm_color_ctm_3x4 (%zu)\n",
+ blob->length, sizeof(struct drm_color_ctm_3x4));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_multiplier(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct drm_device *dev = colorop->dev;
+ int i = 0;
+
+ /* Multiplier */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_MULTIPLIER) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_MULTIPLIER) {
+ drm_dbg(dev, "Multiplier colorop with ID: %d\n", colorop->base.id);
+ dc_plane_state->hdr_mult = amdgpu_dm_fixpt_from_s3132(colorop_state->multiplier);
+ }
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_shaper(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ const struct drm_color_lut32 *shaper_lut;
+ struct drm_device *dev = colorop->dev;
+ bool enabled = false;
+ u32 shaper_size;
+ int i = 0, ret = 0;
+
+ /* 1D Curve - SHAPER TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_shaper_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE) {
+ drm_dbg(dev, "Shaper TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf(tf, 0, 0, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+
+ /* 1D LUT - SHAPER LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT) {
+ drm_dbg(dev, "Shaper LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ shaper_lut = __extract_blob_lut32(colorop_state->data, &shaper_size);
+ shaper_size = shaper_lut != NULL ? shaper_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (shaper_size == colorop->size) {
+ ret = __set_output_tf_32(tf, shaper_lut, shaper_size, false);
+ if (ret)
+ return ret;
+ enabled = true;
+ }
+ }
+
+ if (!enabled)
+ tf->type = TF_TYPE_BYPASS;
+
+ return 0;
+}
+
+/* __set_colorop_3dlut - set DRM 3D LUT to DC stream
+ * @drm_lut3d: user 3D LUT
+ * @drm_lut3d_size: size of 3D LUT
+ * @lut3d: DC 3D LUT
+ *
+ * Map user 3D LUT data to DC 3D LUT and all necessary bits to program it
+ * on DCN accordingly.
+ *
+ * Returns:
+ * 0 on success. -EINVAL if drm_lut3d_size is zero.
+ */
+static int __set_colorop_3dlut(const struct drm_color_lut32 *drm_lut3d,
+ uint32_t drm_lut3d_size,
+ struct dc_3dlut *lut)
+{
+ if (!drm_lut3d_size) {
+ lut->state.bits.initialized = 0;
+ return -EINVAL;
+ }
+
+ /* Only supports 17x17x17 3D LUT (12-bit) now */
+ lut->lut_3d.use_12bits = true;
+ lut->lut_3d.use_tetrahedral_9 = false;
+
+ lut->state.bits.initialized = 1;
+ __drm_3dlut32_to_dc_3dlut(drm_lut3d, drm_lut3d_size, &lut->lut_3d,
+ lut->lut_3d.use_tetrahedral_9, 12);
+
+ return 0;
+}
+
+static int
+__set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct amdgpu_device *adev = drm_to_adev(colorop->dev);
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_color_lut32 *lut3d;
+ uint32_t lut3d_size;
+ int i = 0, ret = 0;
+
+ /* 3D LUT */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_3D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) {
+ if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ drm_dbg(dev, "3D LUT is not supported by hardware\n");
+ return -EINVAL;
+ }
+
+ drm_dbg(dev, "3D LUT colorop with ID: %d\n", colorop->base.id);
+ lut3d = __extract_blob_lut32(colorop_state->data, &lut3d_size);
+ lut3d_size = lut3d != NULL ? lut3d_size : 0;
+ ret = __set_colorop_3dlut(lut3d, lut3d_size, &dc_plane_state->lut3d_func);
+ if (ret) {
+ drm_dbg(dev, "3D LUT colorop with ID: %d has LUT size = %d\n",
+ colorop->base.id, lut3d_size);
+ return ret;
+ }
+
+ /* 3D LUT requires shaper. If shaper colorop is bypassed, enable shaper curve
+ * with TRANSFER_FUNCTION_LINEAR
+ */
+ if (tf->type == TF_TYPE_BYPASS) {
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = TRANSFER_FUNCTION_LINEAR;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ ret = __set_output_tf_32(tf, NULL, 0, false);
+ }
+ }
+
+ return ret;
+}
+
+static int
+__set_dm_plane_colorop_blend(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_colorop *old_colorop;
+ struct drm_colorop_state *colorop_state = NULL, *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->state;
+ enum dc_transfer_func_predefined default_tf = TRANSFER_FUNCTION_LINEAR;
+ struct dc_transfer_func *tf = &dc_plane_state->blend_tf;
+ const struct drm_color_lut32 *blend_lut = NULL;
+ struct drm_device *dev = colorop->dev;
+ uint32_t blend_size = 0;
+ int i = 0;
+
+ /* 1D Curve - BLND TF */
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ (BIT(new_colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_CURVE &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend TF colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf = amdgpu_colorop_tf_to_dc_tf(colorop_state->curve_1d_type);
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ /* 1D Curve - BLND LUT */
+ colorop = old_colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ old_colorop = colorop;
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == old_colorop &&
+ new_colorop_state->colorop->type == DRM_COLOROP_1D_LUT) {
+ colorop_state = new_colorop_state;
+ break;
+ }
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_1D_LUT &&
+ (BIT(colorop_state->curve_1d_type) & amdgpu_dm_supported_blnd_tfs)) {
+ drm_dbg(dev, "Blend LUT colorop with ID: %d\n", colorop->base.id);
+ tf->type = TF_TYPE_DISTRIBUTED_POINTS;
+ tf->tf = default_tf;
+ tf->sdr_ref_white_level = SDR_WHITE_LEVEL_INIT_VALUE;
+ blend_lut = __extract_blob_lut32(colorop_state->data, &blend_size);
+ blend_size = blend_lut != NULL ? blend_size : 0;
+
+ /* Custom LUT size must be the same as supported size */
+ if (blend_size == colorop->size)
+ __set_input_tf_32(NULL, tf, blend_lut, blend_size);
+ }
+
+ return 0;
+}
+
+static int
amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
struct dc_plane_state *dc_plane_state)
{
@@ -1187,6 +1865,93 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state,
return 0;
}
+static int
+amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
+ struct dc_plane_state *dc_plane_state)
+{
+ struct drm_colorop *colorop = plane_state->color_pipeline;
+ struct drm_device *dev = plane_state->plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+
+ /* 1D Curve - DEGAM TF */
+ if (!colorop)
+ return -EINVAL;
+
+ ret = __set_dm_plane_colorop_degamma(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Multiplier */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no multiplier colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_multiplier(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* 3x4 matrix */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3x4 matrix colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3x4_matrix(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D Curve & LUT - SHAPER TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Shaper TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_shaper(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* Shaper LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ /* 3D LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no 3D LUT colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_3dlut(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+ }
+
+ /* 1D Curve & LUT - BLND TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+ drm_dbg(dev, "no Blend TF colorop found\n");
+ return -EINVAL;
+ }
+
+ ret = __set_dm_plane_colorop_blend(plane_state, dc_plane_state, colorop);
+ if (ret)
+ return ret;
+
+ /* BLND LUT colorop is already handled, just skip here */
+ colorop = colorop->next;
+ if (!colorop)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane.
* @crtc: amdgpu_dm crtc state
@@ -1283,5 +2048,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
dc_plane_state->input_csc_color_matrix.enable_adjustment = false;
}
+ if (!amdgpu_dm_plane_set_colorop_properties(plane_state, dc_plane_state))
+ return 0;
+
return amdgpu_dm_plane_set_color_properties(plane_state, dc_plane_state);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
new file mode 100644
index 000000000000..d585618b8064
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drm_print.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_property.h>
+#include <drm/drm_colorop.h>
+
+#include "amdgpu.h"
+#include "amdgpu_dm_colorop.h"
+#include "dc.h"
+
+const u64 amdgpu_dm_supported_degam_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+const u64 amdgpu_dm_supported_shaper_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22);
+
+const u64 amdgpu_dm_supported_blnd_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_PQ_125_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_BT2020_INV_OETF) |
+ BIT(DRM_COLOROP_1D_CURVE_GAMMA22_INV);
+
+#define MAX_COLOR_PIPELINE_OPS 10
+
+#define LUT3D_SIZE 17
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list)
+{
+ struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret;
+ int i = 0;
+
+ memset(ops, 0, sizeof(ops));
+
+ /* 1D curve - DEGAM TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_degam_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ list->type = ops[i]->base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
+
+ i++;
+
+ /* Multiplier */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_mult_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3x4 matrix */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
+ /* 1D curve - SHAPER TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_shaper_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - SHAPER LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+
+ /* 3D LUT */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_3dlut_init(dev, ops[i], plane, LUT3D_SIZE,
+ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+
+ i++;
+ }
+
+ /* 1D curve - BLND TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane,
+ amdgpu_dm_supported_blnd_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i - 1], ops[i]);
+
+ i++;
+
+ /* 1D LUT - BLND LUT */
+ ops[i] = kzalloc(sizeof(struct drm_colorop), GFP_KERNEL);
+ if (!ops[i]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, ops[i], plane, MAX_COLOR_LUT_ENTRIES,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i-1], ops[i]);
+ return 0;
+
+cleanup:
+ if (ret == -ENOMEM)
+ drm_err(plane->dev, "KMS: Failed to allocate colorop\n");
+
+ drm_colorop_pipeline_destroy(dev);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
new file mode 100644
index 000000000000..2e1617ffc8ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __AMDGPU_DM_COLOROP_H__
+#define __AMDGPU_DM_COLOROP_H__
+
+extern const u64 amdgpu_dm_supported_degam_tfs;
+extern const u64 amdgpu_dm_supported_shaper_tfs;
+extern const u64 amdgpu_dm_supported_blnd_tfs;
+
+int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list);
+
+#endif /* __AMDGPU_DM_COLOROP_H__*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 033bd817d871..e20aa7438066 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 3da056c8d20b..95bdb8699d7f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 45feb404b097..697e232acebf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -218,8 +218,10 @@ static void amdgpu_dm_idle_worker(struct work_struct *work)
break;
}
- if (idle_work->enable)
+ if (idle_work->enable) {
+ dc_post_update_surfaces_to_stream(idle_work->dm->dc);
dc_allow_idle_optimizations(idle_work->dm->dc, true);
+ }
mutex_unlock(&idle_work->dm->dc_lock);
}
idle_work->dm->idle_workqueue->running = false;
@@ -246,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+ int r;
mutex_lock(&dm->dc_lock);
@@ -273,9 +277,20 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
vblank_work->acrtc->dm_irq_params.allow_sr_entry);
}
- if (dm->active_vblank_irq_count == 0)
+ if (dm->active_vblank_irq_count == 0) {
+ dc_post_update_surfaces_to_stream(dm->dc);
+
+ r = amdgpu_dpm_pause_power_profile(adev, true);
+ if (r)
+ dev_warn(adev->dev, "failed to set default power profile mode\n");
+
dc_allow_idle_optimizations(dm->dc, true);
+ r = amdgpu_dpm_pause_power_profile(adev, false);
+ if (r)
+ dev_warn(adev->dev, "failed to restore the power profile mode\n");
+ }
+
mutex_unlock(&dm->dc_lock);
dc_stream_release(vblank_work->stream);
@@ -293,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
- if (acrtc->otg_inst == -1)
- goto skip;
+ if (enable && !acrtc->base.enabled) {
+ drm_dbg_vbl(crtc->dev,
+ "Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
+ acrtc->crtc_id, acrtc->base.enabled);
+ return -EINVAL;
+ }
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
@@ -317,13 +336,17 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
dc->config.disable_ips != DMUB_IPS_DISABLE_ALL &&
sr_supported && vblank->config.disable_immediate)
drm_crtc_vblank_restore(crtc);
+ }
- /* vblank irq on -> Only need vupdate irq in vrr mode */
- if (amdgpu_dm_crtc_vrr_active(acrtc_state))
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
- } else {
- /* vblank irq off -> vupdate irq off */
- rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ if (dc_supports_vrr(dm->dc->ctx->dce_version)) {
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_crtc_vrr_active(acrtc_state))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false);
+ }
}
if (rc)
@@ -375,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
-skip:
+
if (amdgpu_in_reset(adev))
return 0;
@@ -713,7 +736,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
{
struct amdgpu_crtc *acrtc = NULL;
struct drm_plane *cursor_plane;
- bool is_dcn;
+ bool has_degamma;
int res = -ENOMEM;
cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
@@ -752,20 +775,18 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
dm->adev->mode_info.crtcs[crtc_index] = acrtc;
- /* Don't enable DRM CRTC degamma property for DCE since it doesn't
- * support programmable degamma anywhere.
- */
- is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch;
- /* Dont't enable DRM CRTC degamma property for DCN401 since the
- * pre-blending degamma LUT doesn't apply to cursor, and therefore
- * can't work similar to a post-blending degamma LUT as in other hw
- * versions.
- * TODO: revisit it once KMS plane color API is merged.
+ /* Don't enable DRM CRTC degamma property for
+ * 1. Degamma is replaced by color pipeline.
+ * 2. DCE since it doesn't support programmable degamma anywhere.
+ * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
*/
- drm_crtc_enable_color_mgmt(&acrtc->base,
- (is_dcn &&
- dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ?
- MAX_COLOR_LUT_ENTRIES : 0,
+ if (plane->color_pipeline_property)
+ has_degamma = false;
+ else
+ has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
+ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
+
+ drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0,
true, MAX_COLOR_LUT_ENTRIES);
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index b726bcd18e29..a9839485f2a2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -758,6 +759,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
int max_param_num = 11;
enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
bool disable_hpd = false;
+ bool supports_hpd = link->irq_source_hpd != DC_IRQ_SOURCE_INVALID;
bool valid_test_pattern = false;
uint8_t param_nums = 0;
/* init with default 80bit custom pattern */
@@ -849,7 +851,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* because it might have been disabled after a test pattern was set.
* AUX depends on HPD * sequence dependent, do not move!
*/
- if (!disable_hpd)
+ if (supports_hpd && !disable_hpd)
dc_link_enable_hpd(link);
prefer_link_settings.lane_count = link->verified_link_cap.lane_count;
@@ -887,7 +889,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
* Need disable interrupt to avoid SW driver disable DP output. This is
* done after the test pattern is set.
*/
- if (valid_test_pattern && disable_hpd)
+ if (valid_test_pattern && supports_hpd && disable_hpd)
dc_link_disable_hpd(link);
kfree(wr_buf);
@@ -1301,7 +1303,8 @@ static int odm_combine_segments_show(struct seq_file *m, void *unused)
if (connector->status != connector_status_connected)
return -ENODEV;
- if (pipe_ctx != NULL && pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
+ if (pipe_ctx && pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments)
pipe_ctx->stream_res.tg->funcs->get_odm_combine_segments(pipe_ctx->stream_res.tg, &segments);
seq_printf(m, "%d\n", segments);
@@ -3106,6 +3109,35 @@ static int replay_get_state(void *data, u64 *val)
}
/*
+ * Start / Stop capture Replay residency
+ */
+static int replay_set_residency(void *data, u64 val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ bool is_start = (val != 0);
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, is_start, PR_RESIDENCY_MODE_PHY);
+ return 0;
+}
+
+/*
+ * Read Replay residency
+ */
+static int replay_get_residency(void *data, u64 *val)
+{
+ struct amdgpu_dm_connector *connector = data;
+ struct dc_link *link = connector->dc_link;
+ u32 residency = 0;
+
+ link->dc->link_srv->edp_replay_residency(link, &residency, false, PR_RESIDENCY_MODE_PHY);
+ *val = (u64)residency;
+
+ return 0;
+}
+
+/*
* Read PSR state
*/
static int psr_get(void *data, u64 *val)
@@ -3324,7 +3356,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_g
dmcub_trace_event_state_set, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(replay_state_fops, replay_get_state, NULL, "%llu\n");
-
+DEFINE_DEBUGFS_ATTRIBUTE(replay_residency_fops, replay_get_residency, replay_set_residency,
+ "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n");
DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL,
"%llu\n");
@@ -3502,6 +3535,8 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file("replay_capability", 0444, dir, connector,
&replay_capability_fops);
debugfs_create_file("replay_state", 0444, dir, connector, &replay_state_fops);
+ debugfs_create_file_unsafe("replay_residency", 0444, dir,
+ connector, &replay_residency_fops);
debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);
debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);
debugfs_create_file_unsafe("psr_residency", 0444, dir,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
index 071200473c27..122cdc124b3b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index b1d1897f5eaf..85ce558cefc5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
@@ -200,6 +201,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_link_adjustment link_adjust;
struct mod_hdcp_display_adjustment display_adjust;
unsigned int conn_index = aconnector->base.index;
+ const struct dc *dc = aconnector->dc_link->dc;
guard(mutex)(&hdcp_w->mutex);
drm_connector_get(&aconnector->base);
@@ -222,6 +224,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
link_adjust.auth_delay = 2;
+ link_adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -229,6 +232,9 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
link_adjust.hdcp1.disable = 1;
link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
}
+ link_adjust.hdcp2.use_fw_locality_check =
+ (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link_adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
@@ -532,6 +538,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct dc_sink *sink = NULL;
bool link_is_hdcp14 = false;
+ const struct dc *dc = aconnector->dc_link->dc;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -571,7 +578,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.usb4_enabled = config->usb4_enabled;
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
link->adjust.auth_delay = 2;
+ link->adjust.retry_limit = MAX_NUM_OF_ATTEMPTS;
link->adjust.hdcp1.disable = 0;
+ link->adjust.hdcp2.use_fw_locality_check = (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable);
+ link->adjust.hdcp2.use_sw_locality_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
@@ -765,29 +775,26 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
struct mod_hdcp_ddc_funcs *ddc_funcs = &config->ddc.funcs;
config->psp.handle = &adev->psp;
- if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
+ if (dc->ctx->dce_version == DCN_VERSION_3_1 ||
dc->ctx->dce_version == DCN_VERSION_3_14 ||
dc->ctx->dce_version == DCN_VERSION_3_15 ||
- dc->ctx->dce_version == DCN_VERSION_3_5 ||
+ dc->ctx->dce_version == DCN_VERSION_3_16 ||
+ dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21 ||
+ dc->ctx->dce_version == DCN_VERSION_3_5 ||
dc->ctx->dce_version == DCN_VERSION_3_51 ||
- dc->ctx->dce_version == DCN_VERSION_3_6 ||
- dc->ctx->dce_version == DCN_VERSION_3_16)
+ dc->ctx->dce_version == DCN_VERSION_3_6 ||
+ dc->ctx->dce_version == DCN_VERSION_4_01)
config->psp.caps.dtm_v3_supported = 1;
+
config->ddc.handle = dc_get_link_at_index(dc, i);
ddc_funcs->write_i2c = lp_write_i2c;
ddc_funcs->read_i2c = lp_read_i2c;
ddc_funcs->write_dpcd = lp_write_dpcd;
ddc_funcs->read_dpcd = lp_read_dpcd;
-
- config->debug.lc_enable_sw_fallback = dc->debug.hdcp_lc_enable_sw_fallback;
- if (dc->caps.fused_io_supported || dc->debug.hdcp_lc_force_fw_enable) {
- ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
- ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
- } else {
- ddc_funcs->atomic_write_poll_read_i2c = NULL;
- ddc_funcs->atomic_write_poll_read_aux = NULL;
- }
+ ddc_funcs->atomic_write_poll_read_i2c = lp_atomic_write_poll_read_i2c;
+ ddc_funcs->atomic_write_poll_read_aux = lp_atomic_write_poll_read_aux;
memset(hdcp_work[i].aconnector, 0,
sizeof(struct amdgpu_dm_connector *) *
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 69b445b011c8..4faa344f196e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2019 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9e3e51a2dc49..ac98c746c3de 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -82,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
+ case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;
@@ -129,6 +131,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
edid_caps->serial_number = edid_buf->serial;
edid_caps->manufacture_week = edid_buf->mfg_week;
edid_caps->manufacture_year = edid_buf->mfg_year;
+ edid_caps->analog = !(edid_buf->input & DRM_EDID_INPUT_DIGITAL);
drm_edid_get_monitor_name(edid_buf,
edid_caps->display_name,
@@ -995,8 +998,8 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct amdgpu_dm_connector *aconnector = link->priv;
struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
- int retry = 3;
- enum dc_edid_status edid_status;
+ int retry = 25;
+ enum dc_edid_status edid_status = EDID_NO_RESPONSE;
const struct drm_edid *drm_edid;
const struct edid *edid;
@@ -1026,7 +1029,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
}
if (!drm_edid)
- return EDID_NO_RESPONSE;
+ continue;
edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
if (!edid ||
@@ -1044,7 +1047,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
&sink->dc_edid,
&sink->edid_caps);
- } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
+ } while ((edid_status == EDID_BAD_CHECKSUM || edid_status == EDID_NO_RESPONSE) && --retry > 0);
if (edid_status != EDID_OK)
DRM_ERROR("EDID err: %d, on connector: %s",
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index b61e210f6246..0a2a3f233a0e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -475,6 +476,7 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h;
struct list_head *hnd_list_l;
@@ -511,6 +513,9 @@ void amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_disable(dev);
}
void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
@@ -536,6 +541,7 @@ void amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
{
+ struct drm_device *dev = adev_to_drm(adev);
int src;
struct list_head *hnd_list_h, *hnd_list_l;
unsigned long irq_table_flags;
@@ -556,6 +562,9 @@ void amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
}
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
}
/*
@@ -892,6 +901,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_connector_list_iter iter;
int irq_type;
int i;
+ bool use_polling = false;
/* First, clear all hpd and hpdrx interrupts */
for (i = DC_IRQ_SOURCE_HPD1; i <= DC_IRQ_SOURCE_HPD6RX; i++) {
@@ -905,6 +915,8 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct amdgpu_dm_connector *amdgpu_dm_connector;
const struct dc_link *dc_link;
+ use_polling |= connector->polled != DRM_CONNECTOR_POLL_HPD;
+
if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
continue;
@@ -946,6 +958,9 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (use_polling)
+ drm_kms_helper_poll_init(dev);
}
/**
@@ -996,4 +1011,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ if (dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_fini(dev);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
index ba17c23b2706..4f6b58f4f90d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
index 6c9de834455b..3c9995275cbd 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2020 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 77a9d2c7d318..dbd1da4d85d3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -329,6 +330,34 @@ static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnect
return true;
}
+static bool retrieve_branch_specific_data(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct drm_dp_mst_port *port_parent;
+ struct drm_dp_aux *immediate_upstream_aux;
+ struct drm_dp_desc branch_desc;
+
+ if (!port->parent)
+ return false;
+
+ port_parent = port->parent->port_parent;
+
+ immediate_upstream_aux = port_parent ? &port_parent->aux : port->mgr->aux;
+
+ if (drm_dp_read_desc(immediate_upstream_aux, &branch_desc, true))
+ return false;
+
+ aconnector->branch_ieee_oui = (branch_desc.ident.oui[0] << 16) +
+ (branch_desc.ident.oui[1] << 8) +
+ (branch_desc.ident.oui[2]);
+
+ drm_dbg_dp(port->aux.drm_dev, "MST branch oui 0x%x detected at %s\n",
+ aconnector->branch_ieee_oui, connector->name);
+
+ return true;
+}
+
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -668,6 +697,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_set_path_property(connector, pathprop);
+ if (!retrieve_branch_specific_data(aconnector))
+ aconnector->branch_ieee_oui = 0;
+
/*
* Initialize connector state before adding the connectror to drm and
* framebuffer lists
@@ -822,13 +854,20 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
drm_connector_attach_dp_subconnector_property(&aconnector->base);
}
-int dm_mst_get_pbn_divider(struct dc_link *link)
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link)
{
+ uint32_t pbn_div_x100;
+ uint64_t dividend, divisor;
+
if (!link)
return 0;
- return dc_link_bandwidth_kbps(link,
- dc_link_get_link_cap(link)) / (8 * 1000 * 54);
+ dividend = (uint64_t)dc_link_bandwidth_kbps(link, dc_link_get_link_cap(link)) * 100;
+ divisor = 8 * 1000 * 54;
+
+ pbn_div_x100 = div64_u64(dividend, divisor);
+
+ return dfixed_const(pbn_div_x100) / 100;
}
struct dsc_mst_fairness_params {
@@ -845,26 +884,28 @@ struct dsc_mst_fairness_params {
};
#if defined(CONFIG_DRM_AMD_DC_FP)
-static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
+static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
{
- u8 link_coding_cap;
- uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
+ uint64_t effective_kbps = (uint64_t)kbps;
- link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
- if (link_coding_cap == DP_128b_132b_ENCODING)
- fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
+ if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
+ effective_kbps *= 1006;
+ effective_kbps = div_u64(effective_kbps, 1000);
+ }
- return fec_overhead_multiplier_x1000;
+ return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
}
-static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
+static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
{
- u64 peak_kbps = kbps;
+ uint64_t pbn_effective = (uint64_t)pbn;
+
+ if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
+ pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
+ else
+ pbn_effective *= 1000;
- peak_kbps *= 1006;
- peak_kbps *= fec_overhead_multiplier_x1000;
- peak_kbps = div_u64(peak_kbps, 1000 * 1000);
- return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
+ return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
}
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
@@ -935,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
- kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
+ kbps = pbn_to_kbps(pbn, false);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
@@ -964,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
int link_timeslots_used;
int fair_pbn_alloc;
int ret = 0;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
for (i = 0; i < count; i++) {
if (vars[i + k].dsc_enabled) {
initial_slack[i] =
- kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
+ kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
bpp_increased[i] = false;
remaining_to_increase += 1;
} else {
@@ -1065,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
int next_index;
int remaining_to_try = 0;
int ret;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
int var_pbn;
for (i = 0; i < count; i++) {
@@ -1098,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
var_pbn = vars[next_index].pbn;
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1158,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
int count = 0;
int i, k, ret;
bool debugfs_overwrite = false;
- uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
struct drm_connector_state *new_conn_state;
memset(params, 0, sizeof(params));
@@ -1239,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
@@ -1261,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
vars[i + k].dsc_enabled = true;
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1269,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret < 0)
return ret;
} else {
- vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
vars[i + k].dsc_enabled = false;
vars[i + k].bpp_x16 = 0;
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
@@ -1724,18 +1762,6 @@ clean_exit:
return ret;
}
-static uint32_t kbps_from_pbn(unsigned int pbn)
-{
- uint64_t kbps = (uint64_t)pbn;
-
- kbps *= (1000000 / PEAK_FACTOR_X1000);
- kbps *= 8;
- kbps *= 54;
- kbps /= 64;
-
- return (uint32_t)kbps;
-}
-
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
struct dc_dsc_bw_range *bw_range)
{
@@ -1763,14 +1789,20 @@ static bool dp_get_link_current_set_bw(struct drm_dp_aux *aux, uint32_t *cur_lin
union lane_count_set lane_count;
u8 dp_link_encoding;
u8 link_bw_set = 0;
+ u8 data[16] = {0};
*cur_link_bw = 0;
- if (drm_dp_dpcd_read(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, &dp_link_encoding, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LANE_COUNT_SET, &lane_count.raw, 1) != 1 ||
- drm_dp_dpcd_read(aux, DP_LINK_BW_SET, &link_bw_set, 1) != 1)
+ if (drm_dp_dpcd_read(aux, DP_LINK_BW_SET, data, 16) != 16)
return false;
+ dp_link_encoding = data[DP_MAIN_LINK_CHANNEL_CODING_SET - DP_LINK_BW_SET];
+ link_bw_set = data[DP_LINK_BW_SET - DP_LINK_BW_SET];
+ lane_count.raw = data[DP_LANE_COUNT_SET - DP_LINK_BW_SET];
+
+ drm_dbg_dp(aux->drm_dev, "MST_DSC downlink setting: %d, 0x%x x %d\n",
+ dp_link_encoding, link_bw_set, lane_count.bits.LANE_COUNT_SET);
+
switch (dp_link_encoding) {
case DP_8b_10b_ENCODING:
link_rate = link_bw_set;
@@ -1828,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link));
cur_link_settings = stream->link->verified_link_cap;
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
- virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
/* pick the end to end bw bottleneck */
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
@@ -1867,8 +1899,10 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_link_bw = aconnector->mst_local_bw;
}
- if (end_link_bw > 0 && stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ if (end_link_bw > 0 &&
+ stream_kbps > end_link_bw &&
+ aconnector->branch_ieee_oui != DP_BRANCH_DEVICE_ID_90CC24) {
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link. "
"Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1879,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
if (immediate_upstream_port) {
- virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
+ virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
} else {
/* For topology LCT 1 case - only one mstb*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 600d6e221011..6f7ea684b555 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2012-15 Advanced Micro Devices, Inc.
*
@@ -59,7 +60,7 @@ enum mst_msg_ready_type {
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
-int dm_mst_get_pbn_divider(struct dc_link *link);
+uint32_t dm_mst_get_pbn_divider(struct dc_link *link);
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index eef51652ca35..2e3ee78999d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -37,6 +37,7 @@
#include "amdgpu_display.h"
#include "amdgpu_dm_trace.h"
#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_colorop.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
@@ -146,7 +147,7 @@ static void amdgpu_dm_plane_add_modifier(uint64_t **mods, uint64_t *size, uint64
if (*cap - *size < 1) {
uint64_t new_cap = *cap * 2;
- uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
+ uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL);
if (!new_mods) {
kfree(*mods);
@@ -732,7 +733,7 @@ static int amdgpu_dm_plane_get_plane_modifiers(struct amdgpu_device *adev, unsig
if (adev->family < AMDGPU_FAMILY_AI)
return 0;
- *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
+ *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL);
if (plane_type == DRM_PLANE_TYPE_CURSOR) {
amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
@@ -1633,7 +1634,7 @@ dm_atomic_plane_attach_color_mgmt_properties(struct amdgpu_display_manager *dm,
drm_object_attach_property(&plane->base,
dm->adev->mode_info.plane_ctm_property, 0);
- if (dpp_color_caps.hw_3d_lut) {
+ if (dpp_color_caps.hw_3d_lut || dm->dc->caps.color.mpc.preblend) {
drm_object_attach_property(&plane->base,
mode_info.plane_shaper_lut_property, 0);
drm_object_attach_property(&plane->base,
@@ -1782,6 +1783,39 @@ dm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
+#else
+
+#define MAX_COLOR_PIPELINES 5
+
+static int
+dm_plane_init_colorops(struct drm_plane *plane)
+{
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int len = 0;
+ int ret;
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return 0;
+
+ /* initialize pipeline */
+ if (dc->ctx->dce_version >= DCN_VERSION_3_0) {
+ ret = amdgpu_dm_initialize_default_pipeline(plane, &pipelines[len]);
+ if (ret) {
+ drm_err(plane->dev, "Failed to create color pipeline for plane %d: %d\n",
+ plane->base.id, ret);
+ return ret;
+ }
+ len++;
+
+ /* Create COLOR_PIPELINE property and attach */
+ drm_plane_create_color_pipeline_property(plane, pipelines, len);
+ }
+
+ return 0;
+}
#endif
static const struct drm_plane_funcs dm_plane_funcs = {
@@ -1890,7 +1924,12 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
#ifdef AMD_PRIVATE_COLOR
dm_atomic_plane_attach_color_mgmt_properties(dm, plane);
+#else
+ res = dm_plane_init_colorops(plane);
+ if (res)
+ return res;
#endif
+
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 848c5b4bb301..11b2ea6edf95 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
@@ -97,6 +98,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_single_disp_config *dc_cfg =
&pp_display_cfg->disp_configs[i];
adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
+ adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock;
}
amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index ff7b867ae98b..fd491b7a3cd7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
@@ -26,7 +27,6 @@
#include "amdgpu_dm_psr.h"
#include "dc_dmub_srv.h"
#include "dc.h"
-#include "dm_helpers.h"
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
index e2366321a3c1..4fb8626913cf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
index 41f07f13a7b5..da94e3544b65 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2023 Advanced Micro Devices, Inc.
*
@@ -30,7 +31,7 @@
#include "amdgpu_dm.h"
#include "modules/power/power_helpers.h"
#include "dmub/inc/dmub_cmd.h"
-#include "dc/inc/link.h"
+#include "dc/inc/link_service.h"
/*
* amdgpu_dm_link_supports_replay() - check if the link supports replay
@@ -161,7 +162,7 @@ bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait)
if (link) {
link->dc->link_srv->edp_setup_replay(link, stream);
- link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total);
+ link->dc->link_srv->edp_set_coasting_vtotal(link, stream->timing.v_total, 0);
DRM_DEBUG_DRIVER("Enabling replay...\n");
link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, wait, false, NULL);
return true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
index 8126bdb1eb6b..73b6c67ae5e7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 0005f5f8f34f..8550d5e8b753 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
@@ -52,11 +53,11 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
func_name, line);
}
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx)
{
}
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx)
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx)
{
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
index 95f890fda8aa..aa56fd6d56c3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
@@ -1,3 +1,4 @@
+//SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3c9ecea7eebc..7277ed21552f 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -36,7 +36,8 @@ DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn31
DC_LIBS += dml
-DC_LIBS += dml2
+DC_LIBS += dml2_0
+DC_LIBS += soc_and_ip_translator
endif
DC_LIBS += dce120
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index d897f8a30ede..4da5adab799c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -1136,7 +1136,7 @@ static void calculate_bandwidth(
}
}
}
- data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
+ data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed((uint64_t)vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1));
data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips);
data->total_display_reads_required_data = bw_int_to_fixed(0);
data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index 452206b5095e..6073cadde76c 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -284,7 +284,7 @@ struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg)
dc_fixpt_mul(
square,
res),
- n * (n - 1)));
+ (long long)n * (n - 1)));
n -= 2;
} while (n != 0);
diff --git a/drivers/gpu/drm/amd/display/dc/basics/vector.c b/drivers/gpu/drm/amd/display/dc/basics/vector.c
index 6d2924114a3e..b413a672c2c0 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/vector.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/vector.c
@@ -170,7 +170,7 @@ bool dal_vector_remove_at_index(
memmove(
vector->container + (index * vector->struct_size),
vector->container + ((index + 1) * vector->struct_size),
- (vector->count - index - 1) * vector->struct_size);
+ (size_t)(vector->count - index - 1) * vector->struct_size);
vector->count -= 1;
return true;
@@ -219,7 +219,7 @@ bool dal_vector_insert_at(
memmove(
insert_address + vector->struct_size,
insert_address,
- vector->struct_size * (vector->count - position));
+ (size_t)vector->struct_size * (vector->count - position));
memmove(
insert_address,
@@ -271,7 +271,7 @@ struct vector *dal_vector_clone(
/* copy vector's data */
memmove(vec_cloned->container, vector->container,
- vec_cloned->struct_size * vec_cloned->capacity);
+ (size_t)vec_cloned->struct_size * vec_cloned->capacity);
return vec_cloned;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 154fd2c18e88..d1471f34e419 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -67,7 +67,9 @@ static ATOM_HPD_INT_RECORD *get_hpd_record(struct bios_parser *bp,
ATOM_OBJECT *object);
static struct device_id device_type_from_device_id(uint16_t device_id);
static uint32_t signal_to_ss_id(enum as_signal_type signal);
-static uint32_t get_support_mask_for_device_id(struct device_id device_id);
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id);
static ATOM_ENCODER_CAP_RECORD_V2 *get_encoder_cap_record(
struct bios_parser *bp,
ATOM_OBJECT *object);
@@ -441,6 +443,7 @@ static enum bp_result get_firmware_info_v1_4(
le32_to_cpu(firmware_info->ulMinPixelClockPLL_Output) * 10;
info->pll_info.max_output_pxl_clk_pll_frequency =
le32_to_cpu(firmware_info->ulMaxPixelClockPLL_Output) * 10;
+ info->max_pixel_clock = le16_to_cpu(firmware_info->usMaxPixelClock) * 10;
if (firmware_info->usFirmwareCapability.sbfAccess.MemoryClockSS_Support)
/* Since there is no information on the SS, report conservative
@@ -497,6 +500,7 @@ static enum bp_result get_firmware_info_v2_1(
info->external_clock_source_frequency_for_dp =
le16_to_cpu(firmwareInfo->usUniphyDPModeExtClkFreq) * 10;
info->min_allowed_bl_level = firmwareInfo->ucMinAllowedBL_Level;
+ info->max_pixel_clock = le16_to_cpu(firmwareInfo->usMaxPixelClock) * 10;
/* There should be only one entry in the SS info table for Memory Clock
*/
@@ -736,18 +740,94 @@ static enum bp_result bios_parser_transmitter_control(
return bp->cmd_tbl.transmitter_control(bp, cntl);
}
+static enum bp_result bios_parser_select_crtc_source(
+ struct dc_bios *dcb,
+ struct bp_crtc_source_select *bp_params)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+
+ if (!bp->cmd_tbl.select_crtc_source)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.select_crtc_source(bp, bp_params);
+}
+
static enum bp_result bios_parser_encoder_control(
struct dc_bios *dcb,
struct bp_encoder_control *cntl)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
+ if (cntl->engine_id == ENGINE_ID_DACA) {
+ if (!bp->cmd_tbl.dac1_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac1_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ } else if (cntl->engine_id == ENGINE_ID_DACB) {
+ if (!bp->cmd_tbl.dac2_encoder_control)
+ return BP_RESULT_FAILURE;
+
+ return bp->cmd_tbl.dac2_encoder_control(
+ bp, cntl->action == ENCODER_CONTROL_ENABLE,
+ cntl->pixel_clock, ATOM_DAC1_PS2);
+ }
+
if (!bp->cmd_tbl.dig_encoder_control)
return BP_RESULT_FAILURE;
return bp->cmd_tbl.dig_encoder_control(bp, cntl);
}
+static enum bp_result bios_parser_dac_load_detection(
+ struct dc_bios *dcb,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id)
+{
+ struct bios_parser *bp = BP_FROM_DCB(dcb);
+ struct dc_context *ctx = dcb->ctx;
+ struct bp_load_detection_parameters bp_params = {0};
+ enum bp_result bp_result;
+ uint32_t bios_0_scratch;
+ uint32_t device_id_mask = 0;
+
+ bp_params.engine_id = engine_id;
+ bp_params.device_id = get_support_mask_for_device_id(device_type, enum_id);
+
+ if (engine_id != ENGINE_ID_DACA &&
+ engine_id != ENGINE_ID_DACB)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (!bp->cmd_tbl.dac_load_detection)
+ return BP_RESULT_UNSUPPORTED;
+
+ if (bp_params.device_id == ATOM_DEVICE_CRT1_SUPPORT)
+ device_id_mask = ATOM_S0_CRT1_MASK;
+ else if (bp_params.device_id == ATOM_DEVICE_CRT2_SUPPORT)
+ device_id_mask = ATOM_S0_CRT2_MASK;
+ else
+ return BP_RESULT_UNSUPPORTED;
+
+ /* BIOS will write the detected devices to BIOS_SCRATCH_0, clear corresponding bit */
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+ bios_0_scratch &= ~device_id_mask;
+ dm_write_reg(ctx, bp->base.regs->BIOS_SCRATCH_0, bios_0_scratch);
+
+ bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params);
+
+ if (bp_result != BP_RESULT_OK)
+ return bp_result;
+
+ bios_0_scratch = dm_read_reg(ctx, bp->base.regs->BIOS_SCRATCH_0);
+
+ if (bios_0_scratch & device_id_mask)
+ return BP_RESULT_OK;
+
+ return BP_RESULT_FAILURE;
+}
+
static enum bp_result bios_parser_adjust_pixel_clock(
struct dc_bios *dcb,
struct bp_adjust_pixel_clock_parameters *bp_params)
@@ -858,7 +938,7 @@ static bool bios_parser_is_device_id_supported(
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
- uint32_t mask = get_support_mask_for_device_id(id);
+ uint32_t mask = get_support_mask_for_device_id(id.device_type, id.enum_id);
return (le16_to_cpu(bp->object_info_tbl.v1_1->usDeviceSupport) & mask) != 0;
}
@@ -2149,11 +2229,10 @@ static uint32_t signal_to_ss_id(enum as_signal_type signal)
return clk_id_ss;
}
-static uint32_t get_support_mask_for_device_id(struct device_id device_id)
+static uint32_t get_support_mask_for_device_id(
+ enum dal_device_type device_type,
+ uint32_t enum_id)
{
- enum dal_device_type device_type = device_id.device_type;
- uint32_t enum_id = device_id.enum_id;
-
switch (device_type) {
case DEVICE_TYPE_LCD:
switch (enum_id) {
@@ -2829,8 +2908,12 @@ static const struct dc_vbios_funcs vbios_funcs = {
.is_device_id_supported = bios_parser_is_device_id_supported,
/* COMMANDS */
+ .select_crtc_source = bios_parser_select_crtc_source,
+
.encoder_control = bios_parser_encoder_control,
+ .dac_load_detection = bios_parser_dac_load_detection,
+
.transmitter_control = bios_parser_transmitter_control,
.enable_crtc = bios_parser_enable_crtc,
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 04eb647acc4e..550a9f1d03f8 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -1480,10 +1480,10 @@ static enum bp_result get_embedded_panel_info_v2_1(
/* not provided by VBIOS */
info->lcd_timing.misc_info.HORIZONTAL_CUT_OFF = 0;
- info->lcd_timing.misc_info.H_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
- & ATOM_HSYNC_POLARITY);
- info->lcd_timing.misc_info.V_SYNC_POLARITY = ~(uint32_t) (lvds->lcd_timing.miscinfo
- & ATOM_VSYNC_POLARITY);
+ info->lcd_timing.misc_info.H_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
+ ATOM_HSYNC_POLARITY);
+ info->lcd_timing.misc_info.V_SYNC_POLARITY = !(lvds->lcd_timing.miscinfo &
+ ATOM_VSYNC_POLARITY);
/* not provided by VBIOS */
info->lcd_timing.misc_info.VERTICAL_CUT_OFF = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 58e88778da7f..22457f417e65 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -52,7 +52,9 @@ static void init_transmitter_control(struct bios_parser *bp);
static void init_set_pixel_clock(struct bios_parser *bp);
static void init_enable_spread_spectrum_on_ppll(struct bios_parser *bp);
static void init_adjust_display_pll(struct bios_parser *bp);
+static void init_select_crtc_source(struct bios_parser *bp);
static void init_dac_encoder_control(struct bios_parser *bp);
+static void init_dac_load_detection(struct bios_parser *bp);
static void init_dac_output_control(struct bios_parser *bp);
static void init_set_crtc_timing(struct bios_parser *bp);
static void init_enable_crtc(struct bios_parser *bp);
@@ -69,7 +71,9 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp)
init_set_pixel_clock(bp);
init_enable_spread_spectrum_on_ppll(bp);
init_adjust_display_pll(bp);
+ init_select_crtc_source(bp);
init_dac_encoder_control(bp);
+ init_dac_load_detection(bp);
init_dac_output_control(bp);
init_set_crtc_timing(bp);
init_enable_crtc(bp);
@@ -1612,6 +1616,198 @@ static enum bp_result adjust_display_pll_v3(
/*******************************************************************************
********************************************************************************
**
+ ** SELECT CRTC SOURCE
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
+
+static void init_select_crtc_source(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(SelectCRTC_Source)) {
+ case 1:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v1;
+ break;
+ case 2:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v2;
+ break;
+ case 3:
+ bp->cmd_tbl.select_crtc_source = select_crtc_source_v3;
+ break;
+ default:
+ bp->cmd_tbl.select_crtc_source = NULL;
+ break;
+ }
+}
+
+static enum bp_result select_crtc_source_v1(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PS_ALLOCATION params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ switch (bp_params->engine_id) {
+ case ENGINE_ID_DACA:
+ params.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+ break;
+ case ENGINE_ID_DACB:
+ params.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+ break;
+ default:
+ return BP_RESULT_BADINPUT;
+ }
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static bool select_crtc_source_v2_encoder_id(
+ enum engine_id engine_id, uint8_t *out_encoder_id)
+{
+ uint8_t encoder_id = 0;
+
+ switch (engine_id) {
+ case ENGINE_ID_DIGA:
+ encoder_id = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGB:
+ encoder_id = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGC:
+ encoder_id = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGD:
+ encoder_id = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGE:
+ encoder_id = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGF:
+ encoder_id = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ case ENGINE_ID_DIGG:
+ encoder_id = ASIC_INT_DIG7_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACA:
+ encoder_id = ASIC_INT_DAC1_ENCODER_ID;
+ break;
+ case ENGINE_ID_DACB:
+ encoder_id = ASIC_INT_DAC2_ENCODER_ID;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_id = encoder_id;
+ return true;
+}
+
+static bool select_crtc_source_v2_encoder_mode(
+ enum signal_type signal_type, uint8_t *out_encoder_mode)
+{
+ uint8_t encoder_mode = 0;
+
+ switch (signal_type) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ encoder_mode = ATOM_ENCODER_MODE_DVI;
+ break;
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ encoder_mode = ATOM_ENCODER_MODE_HDMI;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ encoder_mode = ATOM_ENCODER_MODE_LVDS;
+ break;
+ case SIGNAL_TYPE_RGB:
+ encoder_mode = ATOM_ENCODER_MODE_CRT;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ encoder_mode = ATOM_ENCODER_MODE_DP_MST;
+ break;
+ case SIGNAL_TYPE_EDP:
+ encoder_mode = ATOM_ENCODER_MODE_DP;
+ break;
+ default:
+ return false;
+ }
+
+ *out_encoder_mode = encoder_mode;
+ return true;
+}
+
+static enum bp_result select_crtc_source_v2(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result select_crtc_source_v3(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ SELECT_CRTC_SOURCE_PARAMETERS_V3 params;
+
+ if (!bp->cmd_helper->controller_id_to_atom(bp_params->controller_id, &params.ucCRTC))
+ return BP_RESULT_BADINPUT;
+
+ if (!select_crtc_source_v2_encoder_id(
+ bp_params->engine_id,
+ &params.ucEncoderID))
+ return BP_RESULT_BADINPUT;
+ if (!select_crtc_source_v2_encoder_mode(
+ bp_params->sink_signal,
+ &params.ucEncodeMode))
+ return BP_RESULT_BADINPUT;
+
+ params.ucDstBpc = bp_params->bit_depth;
+
+ if (EXEC_BIOS_CMD_TABLE(SelectCRTC_Source, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC ENCODER CONTROL
**
********************************************************************************
@@ -1711,6 +1907,96 @@ static enum bp_result dac2_encoder_control_v1(
/*******************************************************************************
********************************************************************************
**
+ ** DAC LOAD DETECTION
+ **
+ ********************************************************************************
+ *******************************************************************************/
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
+
+static void init_dac_load_detection(struct bios_parser *bp)
+{
+ switch (BIOS_CMD_TABLE_PARA_REVISION(DAC_LoadDetection)) {
+ case 1:
+ case 2:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v1;
+ break;
+ case 3:
+ default:
+ bp->cmd_tbl.dac_load_detection = dac_load_detection_v3;
+ break;
+ }
+}
+
+static void dac_load_detect_prepare_params(
+ struct _DAC_LOAD_DETECTION_PS_ALLOCATION *params,
+ enum engine_id engine_id,
+ uint16_t device_id,
+ uint8_t misc)
+{
+ uint8_t dac_type = ENGINE_ID_DACA;
+
+ if (engine_id == ENGINE_ID_DACB)
+ dac_type = ATOM_DAC_B;
+
+ params->sDacload.usDeviceID = cpu_to_le16(device_id);
+ params->sDacload.ucDacType = dac_type;
+ params->sDacload.ucMisc = misc;
+}
+
+static enum bp_result dac_load_detection_v1(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ 0);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+static enum bp_result dac_load_detection_v3(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params)
+{
+ enum bp_result result = BP_RESULT_FAILURE;
+ DAC_LOAD_DETECTION_PS_ALLOCATION params;
+
+ uint8_t misc = 0;
+
+ if (bp_params->device_id == ATOM_DEVICE_CV_SUPPORT ||
+ bp_params->device_id == ATOM_DEVICE_TV1_SUPPORT)
+ misc = DAC_LOAD_MISC_YPrPb;
+
+ dac_load_detect_prepare_params(
+ &params,
+ bp_params->engine_id,
+ bp_params->device_id,
+ misc);
+
+ if (EXEC_BIOS_CMD_TABLE(DAC_LoadDetection, params))
+ result = BP_RESULT_OK;
+
+ return result;
+}
+
+/*******************************************************************************
+ ********************************************************************************
+ **
** DAC OUTPUT CONTROL
**
********************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.h b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
index ad533775e724..e89b1ba0048b 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.h
@@ -52,6 +52,9 @@ struct cmd_tbl {
enum bp_result (*adjust_display_pll)(
struct bios_parser *bp,
struct bp_adjust_pixel_clock_parameters *bp_params);
+ enum bp_result (*select_crtc_source)(
+ struct bios_parser *bp,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*dac1_encoder_control)(
struct bios_parser *bp,
bool enable,
@@ -68,6 +71,9 @@ struct cmd_tbl {
enum bp_result (*dac2_output_control)(
struct bios_parser *bp,
bool enable);
+ enum bp_result (*dac_load_detection)(
+ struct bios_parser *bp,
+ struct bp_load_detection_parameters *bp_params);
enum bp_result (*set_crtc_timing)(
struct bios_parser *bp,
struct bp_hw_crtc_timing_parameters *bp_params);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 4071851f9e86..15cf13ec5302 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -28,7 +28,7 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
#include "dc_state_priv.h"
-#include "link.h"
+#include "link_service.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index dbd6ef1b60a0..6131ede2db7a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -463,6 +463,9 @@ void dce_clk_mgr_construct(
clk_mgr->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
clk_mgr->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
+
dce_clock_read_integrated_info(clk_mgr);
dce_clock_read_ss_info(clk_mgr);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index 13cf415e38e5..d50b9440210e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -164,7 +164,7 @@ void dce110_fill_display_configs(
stream->link->cur_link_settings.link_rate;
cfg->link_settings.link_spread =
stream->link->cur_link_settings.link_spread;
- cfg->sym_clock = stream->phy_pix_clk;
+ cfg->pixel_clock = stream->phy_pix_clk;
/* Round v_refresh*/
cfg->v_refresh = stream->timing.pix_clk_100hz * 100;
cfg->v_refresh /= stream->timing.h_total;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
index a39641a0ff09..69dd80d9f738 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
@@ -147,6 +147,8 @@ void dce60_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ struct clk_mgr *base = &clk_mgr->base;
+
dce_clk_mgr_construct(ctx, clk_mgr);
memcpy(clk_mgr->max_clks_by_state,
@@ -157,5 +159,8 @@ void dce60_clk_mgr_construct(
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
clk_mgr->base.funcs = &dce60_funcs;
+
+ base->clks.max_supported_dispclk_khz =
+ clk_mgr->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
index 3253115a153d..827bc2431d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
@@ -69,7 +69,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
/* handle DALSMC_Result_CmdRejectedBusy? */
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9e2ef0e724fc..7aee02d56292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -563,6 +563,7 @@ static void vg_clk_mgr_helper_populate_bw_params(
{
int i, j;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
@@ -584,6 +585,15 @@ static void vg_clk_mgr_helper_populate_bw_params(
return;
}
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= VG_NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= VG_NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
+ } else {
+ ASSERT(0);
+ }
+
bw_params->clk_table.num_entries = j + 1;
for (i = 0; i < bw_params->clk_table.num_entries - 1; i++, j--) {
@@ -591,11 +601,17 @@ static void vg_clk_mgr_helper_populate_bw_params(
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->DfPstateTable[j].voltage);
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
}
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].memclk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, VG_NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, VG_NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, VG_NUM_DPPCLK_DPM_LEVELS);
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index bc123f1884da..051052bd10c9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -47,7 +47,7 @@
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 91d872d6d392..db687a13174d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -48,7 +48,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn314_smu.h"
@@ -77,6 +77,7 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
+
#define regCLK1_CLK_PLL_REQ 0x0237
#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
@@ -87,8 +88,70 @@ static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0,
#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+#define regCLK1_CLK0_DFS_CNTL 0x0269
+#define regCLK1_CLK0_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DFS_CNTL 0x026c
+#define regCLK1_CLK1_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DFS_CNTL 0x026f
+#define regCLK1_CLK2_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DFS_CNTL 0x0272
+#define regCLK1_CLK3_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DFS_CNTL 0x0275
+#define regCLK1_CLK4_DFS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DFS_CNTL 0x0278
+#define regCLK1_CLK5_DFS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_CURRENT_CNT 0x02fb
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x02fc
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x02fd
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x02fe
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x02ff
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0300
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
+
+#define regCLK1_CLK0_BYPASS_CNTL 0x028a
+#define regCLK1_CLK0_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_BYPASS_CNTL 0x0293
+#define regCLK1_CLK1_BYPASS_CNTL_BASE_IDX 0
#define regCLK1_CLK2_BYPASS_CNTL 0x029c
#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_BYPASS_CNTL 0x02a5
+#define regCLK1_CLK3_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_BYPASS_CNTL 0x02ae
+#define regCLK1_CLK4_BYPASS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_BYPASS_CNTL 0x02b7
+#define regCLK1_CLK5_BYPASS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_DS_CNTL 0x0283
+#define regCLK1_CLK0_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK1_DS_CNTL 0x028c
+#define regCLK1_CLK1_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK2_DS_CNTL 0x0295
+#define regCLK1_CLK2_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK3_DS_CNTL 0x029e
+#define regCLK1_CLK3_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK4_DS_CNTL 0x02a7
+#define regCLK1_CLK4_DS_CNTL_BASE_IDX 0
+#define regCLK1_CLK5_DS_CNTL 0x02b0
+#define regCLK1_CLK5_DS_CNTL_BASE_IDX 0
+
+#define regCLK1_CLK0_ALLOW_DS 0x0284
+#define regCLK1_CLK0_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK1_ALLOW_DS 0x028d
+#define regCLK1_CLK1_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK2_ALLOW_DS 0x0296
+#define regCLK1_CLK2_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK3_ALLOW_DS 0x029f
+#define regCLK1_CLK3_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK4_ALLOW_DS 0x02a8
+#define regCLK1_CLK4_ALLOW_DS_BASE_IDX 0
+#define regCLK1_CLK5_ALLOW_DS 0x02b1
+#define regCLK1_CLK5_ALLOW_DS_BASE_IDX 0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
@@ -185,6 +248,8 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn314 *clk_mgr_dcn314 = TO_CLK_MGR_DCN314(clk_mgr_int);
+ struct clk_log_info log_info = {0};
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
// Assumption is that boot state always supports pstate
@@ -200,6 +265,9 @@ void dcn314_init_clocks(struct clk_mgr *clk_mgr)
dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
+ dcn314_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn314->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
}
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
@@ -218,6 +286,8 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->work_arounds.skip_clock_update)
return;
+ display_count = dcn314_get_active_display_cnt_wa(dc, context);
+
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -236,7 +306,6 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn314_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0) {
union display_idle_optimization_u idle_info = { 0 };
@@ -293,11 +362,19 @@ void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
update_dppclk = true;
}
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ int requested_dispclk_khz = new_clocks->dispclk_khz;
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ /* Clamp the requested clock to PMFW based on their limit. */
+ if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz)
+ requested_dispclk_khz = dc->debug.min_disp_clk_khz;
+
+ dcn314_smu_set_dispclk(clk_mgr, requested_dispclk_khz);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- dcn314_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+
dcn314_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
@@ -385,10 +462,65 @@ bool dcn314_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+
+static void dcn314_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
- return;
+
+ struct dcn35_clk_internal internal = {0};
+
+ dcn314_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
}
static struct clk_bw_params dcn314_bw_params = {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
index 002c28e80720..0577eb527bc3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h
@@ -65,4 +65,9 @@ void dcn314_clk_mgr_construct(struct dc_context *ctx,
void dcn314_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
+
+void dcn314_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info);
+
+
#endif //__DCN314_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index e4d22f74f986..3a881451e9da 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -40,17 +40,51 @@
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-
+#include "reg_helper.h"
#include "logger_types.h"
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#include "link.h"
+#include "link_service.h"
+
+#define MAX_INSTANCE 7
+#define MAX_SEGMENT 8
+
+struct IP_BASE_INSTANCE {
+ unsigned int segment[MAX_SEGMENT];
+};
+
+struct IP_BASE {
+ struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
+};
+
+static const struct IP_BASE CLK_BASE = { { { { 0x00016C00, 0x02401800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00016E00, 0x02401C00, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017000, 0x02402000, 0, 0, 0, 0, 0, 0 } },
+ { { 0x00017200, 0x02402400, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B000, 0x0242D800, 0, 0, 0, 0, 0, 0 } },
+ { { 0x0001B200, 0x0242DC00, 0, 0, 0, 0, 0, 0 } } } };
+
+#define regCLK1_CLK0_CURRENT_CNT 0x0314
+#define regCLK1_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK1_CURRENT_CNT 0x0315
+#define regCLK1_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK2_CURRENT_CNT 0x0316
+#define regCLK1_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK3_CURRENT_CNT 0x0317
+#define regCLK1_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK4_CURRENT_CNT 0x0318
+#define regCLK1_CLK4_CURRENT_CNT_BASE_IDX 0
+#define regCLK1_CLK5_CURRENT_CNT 0x0319
+#define regCLK1_CLK5_CURRENT_CNT_BASE_IDX 0
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
+#define REG(reg_name) \
+ (CLK_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
#define UNSUPPORTED_DCFCLK 10000000
#define MIN_DPP_DISP_CLK 100000
@@ -245,9 +279,38 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+static void dcn315_dump_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+}
+
static void dcn315_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
{
+ struct dcn35_clk_internal internal = {0};
+
+ dcn315_dump_clk_registers_internal(&internal, clk_mgr_base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
return;
}
@@ -594,13 +657,32 @@ static struct clk_mgr_funcs dcn315_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
.update_clocks = dcn315_update_clocks,
- .init_clocks = dcn31_init_clocks,
+ .init_clocks = dcn315_init_clocks,
.enable_pme_wa = dcn315_enable_pme_wa,
.are_clock_states_equal = dcn31_are_clock_states_equal,
.notify_wm_ranges = dcn315_notify_wm_ranges
};
extern struct clk_mgr_funcs dcn3_fpga_funcs;
+void dcn315_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ struct clk_mgr_dcn315 *clk_mgr_dcn315 = TO_CLK_MGR_DCN315(clk_mgr_int);
+ struct clk_log_info log_info = {0};
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+
+ dcn315_dump_clk_registers(&clk_mgr->boot_snapshot, &clk_mgr_dcn315->base.base, &log_info);
+ clk_mgr->clks.dispclk_khz = clk_mgr->boot_snapshot.dispclk * 1000;
+}
+
void dcn315_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_dcn315 *clk_mgr,
@@ -661,6 +743,7 @@ void dcn315_clk_mgr_construct(
/* Saved clocks configured at boot for debug purposes */
dcn315_dump_clk_registers(&clk_mgr->base.base.boot_snapshot,
&clk_mgr->base.base, &log_info);
+ clk_mgr->base.base.clks.dispclk_khz = clk_mgr->base.base.boot_snapshot.dispclk * 1000;
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
index ac36ddf5dd1a..642ae3d4a790 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h
@@ -44,6 +44,7 @@ void dcn315_clk_mgr_construct(struct dc_context *ctx,
struct pp_smu_funcs *pp_smu,
struct dccg *dccg);
+void dcn315_init_clocks(struct clk_mgr *clk_mgr);
void dcn315_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
#endif //__DCN315_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 49efea0c8fcf..1769b1f26e75 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -39,7 +39,7 @@
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 084994c650c4..7da7b41bd092 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -33,7 +33,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "dcn32_smu13_driver_if.h"
@@ -1047,11 +1047,8 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
&num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
+
clk_mgr_base->bw_params->max_memclk_mhz =
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
index cf2d35363e8b..5d80fdf63ffc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
@@ -63,7 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
udelay(delay_us);
} while (max_retries--);
- TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx);
+
return reg;
}
@@ -120,7 +121,7 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
+ TRACE_SMU_MSG_DELAY(0, 0, *total_delay_us, clk_mgr->base.ctx);
return reg;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index bb1ac12a2b09..dfd0c9505af0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -44,7 +44,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "link.h"
+#include "link_service.h"
#include "logger_types.h"
#undef DC_LOGGER
@@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
+ else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
+ new_clocks->ref_dtbclk_khz = 0;
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
@@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
- if (actual_dtbclk) {
+ if (actual_dtbclk > 590000) {
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
}
@@ -587,9 +589,118 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
return true;
}
-static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+static void dcn35_save_clk_registers_internal(struct dcn35_clk_internal *internal, struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ // read dtbclk
+ internal->CLK1_CLK4_CURRENT_CNT = REG_READ(CLK1_CLK4_CURRENT_CNT);
+ internal->CLK1_CLK4_BYPASS_CNTL = REG_READ(CLK1_CLK4_BYPASS_CNTL);
+
+ // read dcfclk
+ internal->CLK1_CLK3_CURRENT_CNT = REG_READ(CLK1_CLK3_CURRENT_CNT);
+ internal->CLK1_CLK3_BYPASS_CNTL = REG_READ(CLK1_CLK3_BYPASS_CNTL);
+
+ // read dcf deep sleep divider
+ internal->CLK1_CLK3_DS_CNTL = REG_READ(CLK1_CLK3_DS_CNTL);
+ internal->CLK1_CLK3_ALLOW_DS = REG_READ(CLK1_CLK3_ALLOW_DS);
+
+ // read dppclk
+ internal->CLK1_CLK1_CURRENT_CNT = REG_READ(CLK1_CLK1_CURRENT_CNT);
+ internal->CLK1_CLK1_BYPASS_CNTL = REG_READ(CLK1_CLK1_BYPASS_CNTL);
+
+ // read dprefclk
+ internal->CLK1_CLK2_CURRENT_CNT = REG_READ(CLK1_CLK2_CURRENT_CNT);
+ internal->CLK1_CLK2_BYPASS_CNTL = REG_READ(CLK1_CLK2_BYPASS_CNTL);
+
+ // read dispclk
+ internal->CLK1_CLK0_CURRENT_CNT = REG_READ(CLK1_CLK0_CURRENT_CNT);
+ internal->CLK1_CLK0_BYPASS_CNTL = REG_READ(CLK1_CLK0_BYPASS_CNTL);
+}
+
+static void dcn35_save_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
struct clk_mgr_dcn35 *clk_mgr)
{
+ struct dcn35_clk_internal internal = {0};
+ char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
+
+ dcn35_save_clk_registers_internal(&internal, &clk_mgr->base.base);
+
+ regs_and_bypass->dcfclk = internal.CLK1_CLK3_CURRENT_CNT / 10;
+ regs_and_bypass->dcf_deep_sleep_divider = internal.CLK1_CLK3_DS_CNTL / 10;
+ regs_and_bypass->dcf_deep_sleep_allow = internal.CLK1_CLK3_ALLOW_DS;
+ regs_and_bypass->dprefclk = internal.CLK1_CLK2_CURRENT_CNT / 10;
+ regs_and_bypass->dispclk = internal.CLK1_CLK0_CURRENT_CNT / 10;
+ regs_and_bypass->dppclk = internal.CLK1_CLK1_CURRENT_CNT / 10;
+ regs_and_bypass->dtbclk = internal.CLK1_CLK4_CURRENT_CNT / 10;
+
+ regs_and_bypass->dppclk_bypass = internal.CLK1_CLK1_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dppclk_bypass > 4)
+ regs_and_bypass->dppclk_bypass = 0;
+ regs_and_bypass->dcfclk_bypass = internal.CLK1_CLK3_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dcfclk_bypass > 4)
+ regs_and_bypass->dcfclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = internal.CLK1_CLK0_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dispclk_bypass > 4)
+ regs_and_bypass->dispclk_bypass = 0;
+ regs_and_bypass->dprefclk_bypass = internal.CLK1_CLK2_BYPASS_CNTL & 0x0007;
+ if (regs_and_bypass->dprefclk_bypass > 4)
+ regs_and_bypass->dprefclk_bypass = 0;
+
+ if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
+
+ DC_LOG_SMU("dcfclk,%d,%d,%d,%s\n",
+ regs_and_bypass->dcfclk,
+ regs_and_bypass->dcf_deep_sleep_divider,
+ regs_and_bypass->dcf_deep_sleep_allow,
+ bypass_clks[(int) regs_and_bypass->dcfclk_bypass]);
+
+ DC_LOG_SMU("dprefclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dprefclk,
+ bypass_clks[(int) regs_and_bypass->dprefclk_bypass]);
+
+ DC_LOG_SMU("dispclk,%d,N/A,N/A,%s\n",
+ regs_and_bypass->dispclk,
+ bypass_clks[(int) regs_and_bypass->dispclk_bypass]);
+
+ // REGISTER VALUES
+ DC_LOG_SMU("reg_name,value,clk_type");
+
+ DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk",
+ internal.CLK1_CLK3_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK4_CURRENT_CNT,%d,dtbclk",
+ internal.CLK1_CLK4_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_DS_CNTL,%d,dcf_deep_sleep_divider",
+ internal.CLK1_CLK3_DS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK3_ALLOW_DS,%d,dcf_deep_sleep_allow",
+ internal.CLK1_CLK3_ALLOW_DS);
+
+ DC_LOG_SMU("CLK1_CLK2_CURRENT_CNT,%d,dprefclk",
+ internal.CLK1_CLK2_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK0_CURRENT_CNT,%d,dispclk",
+ internal.CLK1_CLK0_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK1_CURRENT_CNT,%d,dppclk",
+ internal.CLK1_CLK1_CURRENT_CNT);
+
+ DC_LOG_SMU("CLK1_CLK3_BYPASS_CNTL,%d,dcfclk_bypass",
+ internal.CLK1_CLK3_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK2_BYPASS_CNTL,%d,dprefclk_bypass",
+ internal.CLK1_CLK2_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK0_BYPASS_CNTL,%d,dispclk_bypass",
+ internal.CLK1_CLK0_BYPASS_CNTL);
+
+ DC_LOG_SMU("CLK1_CLK1_BYPASS_CNTL,%d,dppclk_bypass",
+ internal.CLK1_CLK1_BYPASS_CNTL);
+
+ }
}
static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
@@ -623,6 +734,7 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
void dcn35_init_clocks(struct clk_mgr *clk_mgr)
{
struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr_int);
init_clk_states(clk_mgr);
@@ -633,6 +745,13 @@ void dcn35_init_clocks(struct clk_mgr *clk_mgr)
else
clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+ dcn35_save_clk_registers(&clk_mgr->boot_snapshot, clk_mgr_dcn35);
+
+ clk_mgr->clks.ref_dtbclk_khz = clk_mgr->boot_snapshot.dtbclk * 10;
+ if (clk_mgr->boot_snapshot.dtbclk > 59000) {
+ /*dtbclk enabled based on */
+ clk_mgr->clks.dtbclk_en = true;
+ }
}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
@@ -1176,6 +1295,35 @@ static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr,
dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
}
+static unsigned int dcn35_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ unsigned int num_clk_levels;
+
+ switch (clk_type) {
+ case CLK_TYPE_DISPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dispclk;
+ case CLK_TYPE_DPPCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 :
+ clk_mgr->base.boot_snapshot.dppclk;
+ case CLK_TYPE_DSCCLK:
+ num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels;
+ return num_clk_levels ?
+ clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 :
+ clk_mgr->base.boot_snapshot.dispclk / 3;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static struct clk_mgr_funcs dcn35_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
@@ -1187,6 +1335,7 @@ static struct clk_mgr_funcs dcn35_funcs = {
.set_low_power_state = dcn35_set_low_power_state,
.exit_low_power_state = dcn35_exit_low_power_state,
.is_ips_supported = dcn35_is_ips_supported,
+ .get_max_clock_khz = dcn35_get_max_clock_khz,
};
struct clk_mgr_funcs dcn35_fpga_funcs = {
@@ -1323,7 +1472,7 @@ void dcn35_clk_mgr_construct(
dcn35_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
+ dcn35_save_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index b59703467128..306016c1f109 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -13,7 +13,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#include "atomfirmware.h"
@@ -162,7 +162,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
unsigned int i;
char *entry_i = (char *)entry_0;
- uint32_t ret = dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
+ uint32_t ret = dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, 0xFF);
if (ret & (1 << 31))
/* fine-grained, only min and max */
@@ -174,7 +174,7 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c
/* if the initial message failed, num_levels will be 0 */
for (i = 0; i < *num_levels && i < ARRAY_SIZE(clk_mgr->base.bw_params->clk_table.entries); i++) {
- *((unsigned int *)entry_i) = (dcn30_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
+ *((unsigned int *)entry_i) = (dcn401_smu_get_dpm_freq_by_index(clk_mgr, clk, i) & 0xFFFF);
entry_i += sizeof(clk_mgr->base.bw_params->clk_table.entries[0]);
}
}
@@ -231,20 +231,20 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr->smu_present = false;
clk_mgr->dpm_present = false;
- if (!clk_mgr_base->force_smu_not_present && dcn30_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
+ if (!clk_mgr_base->force_smu_not_present && dcn401_smu_get_smu_version(clk_mgr, &clk_mgr->smu_ver))
clk_mgr->smu_present = true;
if (!clk_mgr->smu_present)
return;
- dcn30_smu_check_driver_if_version(clk_mgr);
- dcn30_smu_check_msg_header_version(clk_mgr);
+ dcn401_smu_check_driver_if_version(clk_mgr);
+ dcn401_smu_check_msg_header_version(clk_mgr);
/* DCFCLK */
dcn401_init_single_clock(clk_mgr, PPCLK_DCFCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz,
&num_entries_per_clk->num_dcfclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DCFCLK);
if (num_entries_per_clk->num_dcfclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dcfclk_levels - 1].dcfclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz = 0;
@@ -253,7 +253,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_SOCCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].socclk_mhz,
&num_entries_per_clk->num_socclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_SOCCLK);
if (num_entries_per_clk->num_socclk_levels && clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_socclk_levels - 1].socclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz = 0;
@@ -263,7 +263,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DTBCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz,
&num_entries_per_clk->num_dtbclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DTBCLK);
if (num_entries_per_clk->num_dtbclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dtbclk_levels - 1].dtbclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz = 0;
@@ -273,7 +273,7 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_DISPCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].dispclk_mhz,
&num_entries_per_clk->num_dispclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DISPCLK);
if (num_entries_per_clk->num_dispclk_levels && clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_dispclk_levels - 1].dispclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 0;
@@ -1318,8 +1318,8 @@ static void dcn401_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
table->Watermarks.WatermarkRow[i].WmSetting = i;
table->Watermarks.WatermarkRow[i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type;
}
- dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
- dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
+ dcn401_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32);
+ dcn401_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF);
dcn401_smu_transfer_wm_table_dram_2_smu(clk_mgr);
}
@@ -1390,7 +1390,7 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz;
}
- clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK);
if (num_entries_per_clk->num_memclk_levels && clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_memclk_levels - 1].memclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz = 0;
@@ -1399,16 +1399,12 @@ static void dcn401_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)
dcn401_init_single_clock(clk_mgr, PPCLK_FCLK,
&clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz,
&num_entries_per_clk->num_fclk_levels);
- clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
+ clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = dcn401_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_FCLK);
if (num_entries_per_clk->num_fclk_levels && clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz ==
clk_mgr_base->bw_params->clk_table.entries[num_entries_per_clk->num_fclk_levels - 1].fclk_mhz)
clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz = 0;
- if (num_entries_per_clk->num_memclk_levels >= num_entries_per_clk->num_fclk_levels) {
- num_levels = num_entries_per_clk->num_memclk_levels;
- } else {
- num_levels = num_entries_per_clk->num_fclk_levels;
- }
+ num_levels = max(num_entries_per_clk->num_memclk_levels, num_entries_per_clk->num_fclk_levels);
clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
index 21c35528f61f..3a263840893e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
@@ -57,6 +57,8 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
/* Wait for response register to be ready */
dcn401_smu_wait_for_response(clk_mgr, 10, 200000);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -71,9 +73,11 @@ static bool dcn401_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uin
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
return false;
}
@@ -102,8 +106,6 @@ static uint32_t dcn401_smu_wait_for_response_delay(struct clk_mgr_internal *clk_
*total_delay_us += delay_us;
} while (max_retries--);
- TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx);
-
return reg;
}
@@ -115,6 +117,8 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Wait for response register to be ready */
dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay1_us);
+ TRACE_SMU_MSG_ENTER(msg_id, param_in, clk_mgr->base.ctx);
+
/* Clear response register */
REG_WRITE(DAL_RESP_REG, 0);
@@ -124,18 +128,71 @@ static bool dcn401_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mg
/* Trigger the message transaction by writing the message ID */
REG_WRITE(DAL_MSG_REG, msg_id);
- TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx);
-
/* Wait for response */
if (dcn401_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) {
if (param_out)
*param_out = REG_READ(DAL_ARG_REG);
*total_delay_us = delay1_us + delay2_us;
+ TRACE_SMU_MSG_EXIT(true, param_out ? *param_out : 0, clk_mgr->base.ctx);
return true;
}
*total_delay_us = delay1_us + 2000000;
+ TRACE_SMU_MSG_EXIT(false, 0, clk_mgr->base.ctx);
+ return false;
+}
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version)
+{
+ smu_print("SMU Get SMU version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetSmuVersion, 0, version)) {
+
+ smu_print("SMU version: %d\n", *version);
+
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match SMU11_DRIVER_IF_VERSION in smu11_driver_if.h */
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check driver if version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDriverIfVersion, 0, &response)) {
+
+ smu_print("SMU driver if version: %d\n", response);
+
+ if (response == SMU14_DRIVER_IF_VERSION)
+ return true;
+ }
+
+ return false;
+}
+
+/* Message output should match DALSMC_VERSION in dalsmc.h */
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr)
+{
+ uint32_t response = 0;
+
+ smu_print("SMU Check msg header version\n");
+
+ if (dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetMsgHeaderVersion, 0, &response)) {
+
+ smu_print("SMU msg header version: %d\n", response);
+
+ if (response == DALSMC_VERSION)
+ return true;
+ }
+
return false;
}
@@ -163,6 +220,22 @@ void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsi
smu_print("Numways for SubVP : %d\n", num_ways);
}
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
+{
+ smu_print("SMU Set DRAM addr high: %d\n", addr_high);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrHigh, addr_high, NULL);
+}
+
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low)
+{
+ smu_print("SMU Set DRAM addr low: %d\n", addr_low);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_SetDalDramAddrLow, addr_low, NULL);
+}
+
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
{
smu_print("SMU Transfer WM table DRAM 2 SMU\n");
@@ -348,3 +421,52 @@ unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr
return response;
}
+
+/*
+ * Frequency in MHz returned in lower 16 bits for valid DPM level
+ *
+ * Call with dpm_level = 0xFF to query features, return value will be:
+ * Bits 7:0 - number of DPM levels
+ * Bit 28 - 1 = auto DPM on
+ * Bit 29 - 1 = sweep DPM on
+ * Bit 30 - 1 = forced DPM on
+ * Bit 31 - 0 = discrete, 1 = fine-grained
+ *
+ * With fine-grained DPM, only min and max frequencies will be reported
+ *
+ * Returns 0 on failure
+ */
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type, lower 8 bits for DPM level */
+ uint32_t param = (clk << 16) | dpm_level;
+
+ smu_print("SMU Get dpm freq by index: clk = %d, dpm_level = %d\n", clk, dpm_level);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDpmFreqByIndex, param, &response);
+
+ smu_print("SMU dpm freq: %d MHz\n", response);
+
+ return response;
+}
+
+/* Returns the max DPM frequency in DC mode in MHz, 0 on failure */
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk)
+{
+ uint32_t response = 0;
+
+ /* bits 23:16 for clock type */
+ uint32_t param = clk << 16;
+
+ smu_print("SMU Get DC mode max DPM freq: clk = %d\n", clk);
+
+ dcn401_smu_send_msg_with_param(clk_mgr,
+ DALSMC_MSG_GetDcModeMaxDpmFreq, param, &response);
+
+ smu_print("SMU DC mode max DMP freq: %d MHz\n", response);
+
+ return response;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
index e02eb1294b37..4f5ac603e822 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h
@@ -7,11 +7,17 @@
#include "os_types.h"
#include "core_types.h"
-#include "dcn32/dcn32_clk_mgr_smu_msg.h"
+struct clk_mgr_internal;
+
+bool dcn401_smu_get_smu_version(struct clk_mgr_internal *clk_mgr, unsigned int *version);
+bool dcn401_smu_check_driver_if_version(struct clk_mgr_internal *clk_mgr);
+bool dcn401_smu_check_msg_header_version(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_uclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool support);
void dcn401_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways);
+void dcn401_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high);
+void dcn401_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr_low);
void dcn401_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr);
void dcn401_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr);
unsigned int dcn401_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz);
@@ -29,5 +35,7 @@ bool dcn401_smu_set_subvp_uclk_fclk_hardmin(struct clk_mgr_internal *clk_mgr,
void dcn401_smu_set_min_deep_sleep_dcef_clk(struct clk_mgr_internal *clk_mgr, uint32_t freq_mhz);
void dcn401_smu_set_num_of_displays(struct clk_mgr_internal *clk_mgr, uint32_t num_displays);
unsigned int dcn401_smu_get_num_of_umc_channels(struct clk_mgr_internal *clk_mgr);
+unsigned int dcn401_smu_get_dc_mode_max_dpm_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk);
+unsigned int dcn401_smu_get_dpm_freq_by_index(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint8_t dpm_level);
#endif /* __DCN401_CLK_MGR_SMU_MSG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index dcc48b5238e5..8be9cbd43e18 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -60,7 +60,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "dm_helpers.h"
#include "mem_input.h"
@@ -83,7 +83,8 @@
#include "hw_sequencer_private.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_internal_types.h"
+#include "soc_and_ip_translator.h"
#endif
#include "dce/dmub_outbox.h"
@@ -147,10 +148,16 @@ static const char DC_BUILD_ID[] = "production-build";
/* Private functions */
-static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+static inline void elevate_update_type(
+ struct surface_update_descriptor *descriptor,
+ enum surface_update_type new_type,
+ enum dc_lock_descriptor new_locks
+)
{
- if (new > *original)
- *original = new;
+ if (new_type > descriptor->update_type)
+ descriptor->update_type = new_type;
+
+ descriptor->lock_descriptor |= new_locks;
}
static void destroy_links(struct dc *dc)
@@ -296,6 +303,7 @@ static bool create_links(
link->link_id.id = CONNECTOR_ID_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
if (!link->link_enc) {
@@ -459,7 +467,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX) {
- if (dc->optimized_required || dc->wm_optimized_required) {
+ if (dc->optimized_required &&
+ (stream->adjust.v_total_max != adjust->v_total_max ||
+ stream->adjust.v_total_min != adjust->v_total_min)) {
stream->adjust.timing_adjust_pending = true;
return false;
}
@@ -490,9 +500,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
1,
*adjust);
stream->adjust.timing_adjust_pending = false;
+
+ if (dc->hwss.notify_cursor_offload_drr_update)
+ dc->hwss.notify_cursor_offload_drr_update(dc, dc->current_state, stream);
+
return true;
}
}
+
return false;
}
@@ -947,7 +962,9 @@ static void dc_destruct(struct dc *dc)
}
dc_destroy_resource_pool(dc);
-
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dc_destroy_soc_and_ip_translator(&dc->soc_and_ip_translator);
+#endif
if (dc->link_srv)
link_destroy_link_service(&dc->link_srv);
@@ -1138,8 +1155,8 @@ static bool dc_construct(struct dc *dc,
/* set i2c speed if not done by the respective dcnxxx__resource.c */
if (dc->caps.i2c_speed_in_khz_hdcp == 0)
dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
- if (dc->caps.max_optimizable_video_width == 0)
- dc->caps.max_optimizable_video_width = 5120;
+ if (dc->check_config.max_optimizable_video_width == 0)
+ dc->check_config.max_optimizable_video_width = 5120;
dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
if (!dc->clk_mgr)
goto fail;
@@ -1151,6 +1168,9 @@ static bool dc_construct(struct dc *dc,
dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
DC_FP_END();
}
+ dc->soc_and_ip_translator = dc_create_soc_and_ip_translator(dc_ctx->dce_version);
+ if (!dc->soc_and_ip_translator)
+ goto fail;
#endif
if (!create_links(dc, init_params->num_virtual_links))
@@ -2127,6 +2147,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb)) {
disable_vbios_mode_if_required(dc, context);
dc->hwss.enable_accelerated_mode(dc, context);
+ } else if (get_seamless_boot_stream_count(dc->current_state) > 0) {
+ /* If the previous Stream still retains the apply seamless boot flag,
+ * it means the OS has not actually performed a flip yet.
+ * At this point, if we receive dc_commit_streams again, we should
+ * once more check whether the actual HW timing matches what the OS
+ * has provided
+ */
+ disable_vbios_mode_if_required(dc, context);
}
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed) {
@@ -2150,8 +2178,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
*/
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
if (dc->hwss.update_dsc_pg)
dc->hwss.update_dsc_pg(dc, context, false);
@@ -2180,8 +2208,14 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
}
+ for (i = 0; i < dc->current_state->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, dc->current_state, dc->current_state->streams[i], false);
+
result = dc->hwss.apply_ctx_to_hw(dc, context);
+ for (i = 0; i < context->stream_count; i++)
+ dc_dmub_srv_control_cursor_offload(dc, context, context->streams[i], true);
+
if (result != DC_OK) {
/* Application of dc_state to hardware stopped. */
dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
@@ -2221,8 +2255,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc->hwss.commit_subvp_config(dc, context);
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
@@ -2411,6 +2445,18 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params
goto fail;
}
+ /*
+ * If not already seamless, make transition seamless by inserting intermediate minimal transition
+ */
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(dc, dc->current_state, context)) {
+ res = commit_minimal_transition_state(dc, context);
+ if (res != DC_OK) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+
res = dc_commit_state_no_check(dc, context);
for (i = 0; i < params->stream_count; i++) {
@@ -2557,7 +2603,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
- dc->wm_optimized_required = false;
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -2626,47 +2671,50 @@ static bool is_surface_in_context(
return false;
}
-static enum surface_update_type get_plane_info_update_type(const struct dc *dc, const struct dc_surface_update *u)
+static struct surface_update_descriptor get_plane_info_update_type(const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
- enum surface_update_type update_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->plane_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ // `plane_info` present means at least `STREAM` lock is required
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->plane_info->color_space != u->surface->color_space) {
update_flags->bits.color_space_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
update_flags->bits.horizontal_mirror_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->rotation != u->surface->rotation) {
update_flags->bits.rotation_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->format != u->surface->format) {
update_flags->bits.pixel_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->stereo_format != u->surface->stereo_format) {
update_flags->bits.stereo_format_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
update_flags->bits.per_pixel_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
update_flags->bits.global_alpha_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->plane_info->dcc.enable != u->surface->dcc.enable
@@ -2678,7 +2726,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* recalculate stutter period.
*/
update_flags->bits.dcc_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (resource_pixel_format_to_bpp(u->plane_info->format) !=
@@ -2687,30 +2735,41 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
* and DML calculation
*/
update_flags->bits.bpp_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
update_flags->bits.plane_size_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
+ const struct dc_tiling_info *tiling = &u->plane_info->tiling_info;
- if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
- sizeof(struct dc_tiling_info)) != 0) {
+ if (memcmp(tiling, &u->surface->tiling_info, sizeof(*tiling)) != 0) {
update_flags->bits.swizzle_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_MED);
-
- /* todo: below are HW dependent, we should add a hook to
- * DCE/N resource and validated there.
- */
- if (!dc->debug.skip_full_updated_if_possible) {
- /* swizzled mode requires RQ to be setup properly,
- * thus need to run DML to calculate RQ settings
- */
- update_flags->bits.bandwidth_change = 1;
- elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+
+ switch (tiling->gfxversion) {
+ case DcGfxVersion9:
+ case DcGfxVersion10:
+ case DcGfxVersion11:
+ if (tiling->gfx9.swizzle != DC_SW_LINEAR) {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ break;
+ case DcGfxAddr3:
+ if (tiling->gfx_addr3.swizzle != DC_ADDR3_SW_LINEAR) {
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
+ break;
+ case DcGfxVersion7:
+ case DcGfxVersion8:
+ case DcGfxVersionUnknown:
+ default:
+ break;
}
}
@@ -2718,14 +2777,18 @@ static enum surface_update_type get_plane_info_update_type(const struct dc *dc,
return update_type;
}
-static enum surface_update_type get_scaling_info_update_type(
- const struct dc *dc,
+static struct surface_update_descriptor get_scaling_info_update_type(
+ const struct dc_check_config *check_config,
const struct dc_surface_update *u)
{
union surface_update_flags *update_flags = &u->surface->update_flags;
+ struct surface_update_descriptor update_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (!u->scaling_info)
- return UPDATE_TYPE_FAST;
+ return update_type;
+
+ // `scaling_info` present means at least `STREAM` lock is required
+ elevate_update_type(&update_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
if (u->scaling_info->src_rect.width != u->surface->src_rect.width
|| u->scaling_info->src_rect.height != u->surface->src_rect.height
@@ -2736,6 +2799,7 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->scaling_quality.integer_scaling !=
u->surface->scaling_quality.integer_scaling) {
update_flags->bits.scaling_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
if (u->scaling_info->src_rect.width > u->surface->src_rect.width
|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
@@ -2749,7 +2813,7 @@ static enum surface_update_type get_scaling_info_update_type(
/* Making dst rect smaller requires a bandwidth change */
update_flags->bits.bandwidth_change = 1;
- if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ if (u->scaling_info->src_rect.width > check_config->max_optimizable_video_width &&
(u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
/* Changing clip size of a large surface may result in MPC slice count change */
@@ -2761,123 +2825,109 @@ static enum surface_update_type get_scaling_info_update_type(
|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
- || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y) {
+ elevate_update_type(&update_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
update_flags->bits.position_change = 1;
+ }
- /* process every update flag before returning */
- if (update_flags->bits.clock_change
- || update_flags->bits.bandwidth_change
- || update_flags->bits.scaling_change)
- return UPDATE_TYPE_FULL;
-
- if (update_flags->bits.position_change)
- return UPDATE_TYPE_MED;
-
- return UPDATE_TYPE_FAST;
+ return update_type;
}
-static enum surface_update_type det_surface_update(const struct dc *dc,
- const struct dc_surface_update *u)
+static struct surface_update_descriptor det_surface_update(
+ const struct dc_check_config *check_config,
+ struct dc_surface_update *u)
{
- const struct dc_state *context = dc->current_state;
- enum surface_update_type type;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
union surface_update_flags *update_flags = &u->surface->update_flags;
- if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ if (u->surface->force_full_update) {
update_flags->raw = 0xFFFFFFFF;
- return UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ return overall_type;
}
update_flags->raw = 0; // Reset all flags
- type = get_plane_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ struct surface_update_descriptor inner_type = get_plane_info_update_type(u);
+
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
- type = get_scaling_info_update_type(dc, u);
- elevate_update_type(&overall_type, type);
+ inner_type = get_scaling_info_update_type(check_config, u);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
if (u->flip_addr) {
update_flags->bits.addr_update = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
update_flags->bits.tmz_changed = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
}
- if (u->in_transfer_func)
+ if (u->in_transfer_func) {
update_flags->bits.in_transfer_func_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->input_csc_color_matrix)
+ if (u->input_csc_color_matrix) {
update_flags->bits.input_csc_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->coeff_reduction_factor)
+ if (u->coeff_reduction_factor) {
update_flags->bits.coeff_reduction_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->gamut_remap_matrix)
+ if (u->gamut_remap_matrix) {
update_flags->bits.gamut_remap_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- if (u->blend_tf)
+ if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
-
- if (u->gamma) {
- enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
-
- if (u->plane_info)
- format = u->plane_info->format;
- else
- format = u->surface->format;
-
- if (dce_use_lut(format))
- update_flags->bits.gamma_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
- if (u->lut3d_func || u->func_shaper)
+ if (u->lut3d_func || u->func_shaper) {
update_flags->bits.lut_3d = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ // TODO: Should be fast?
update_flags->bits.hdr_mult = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED, LOCK_DESCRIPTOR_STREAM);
}
if (u->sdr_white_level_nits)
if (u->sdr_white_level_nits != u->surface->sdr_white_level_nits) {
+ // TODO: Should be fast?
update_flags->bits.sdr_white_level_nits = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (u->cm2_params) {
- if ((u->cm2_params->component_settings.shaper_3dlut_setting
- != u->surface->mcm_shaper_3dlut_setting)
- || (u->cm2_params->component_settings.lut1d_enable
- != u->surface->mcm_lut1d_enable))
+ if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting
+ || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable
+ || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) {
update_flags->bits.mcm_transfer_function_enable_change = 1;
- if (u->cm2_params->cm2_luts.lut3d_data.lut3d_src
- != u->surface->mcm_luts.lut3d_data.lut3d_src)
- update_flags->bits.mcm_transfer_function_enable_change = 1;
- }
- if (update_flags->bits.in_transfer_func_change) {
- type = UPDATE_TYPE_MED;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ }
}
if (update_flags->bits.lut_3d &&
u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
- }
- if (update_flags->bits.mcm_transfer_function_enable_change) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
- if (dc->debug.enable_legacy_fast_update &&
+ if (check_config->enable_legacy_fast_update &&
(update_flags->bits.gamma_change ||
update_flags->bits.gamut_remap_change ||
update_flags->bits.input_csc_change ||
update_flags->bits.coeff_reduction_change)) {
- type = UPDATE_TYPE_FULL;
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
return overall_type;
}
@@ -2905,32 +2955,26 @@ static void force_immediate_gsl_plane_flip(struct dc *dc, struct dc_surface_upda
}
}
-static enum surface_update_type check_update_surfaces_for_stream(
- struct dc *dc,
+static struct surface_update_descriptor check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type overall_type = UPDATE_TYPE_FAST;
-
- if (dc->idle_optimizations_allowed || dc_can_clear_cursor_limit(dc))
- overall_type = UPDATE_TYPE_FULL;
-
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- overall_type = UPDATE_TYPE_FULL;
+ struct surface_update_descriptor overall_type = { UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_NONE };
if (stream_update && stream_update->pending_test_pattern) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
if (stream_update && stream_update->hw_cursor_req) {
- overall_type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
/* some stream updates require passive update */
if (stream_update) {
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+
union stream_update_flags *su_flags = &stream_update->stream->update_flags;
if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
@@ -2938,14 +2982,16 @@ static enum surface_update_type check_update_surfaces_for_stream(
stream_update->integer_scaling_update)
su_flags->bits.scaling = 1;
- if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (check_config->enable_legacy_fast_update && stream_update->out_transfer_func)
su_flags->bits.out_tf = 1;
if (stream_update->abm_level)
su_flags->bits.abm_level = 1;
- if (stream_update->dpms_off)
+ if (stream_update->dpms_off) {
su_flags->bits.dpms_off = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL | LOCK_DESCRIPTOR_LINK);
+ }
if (stream_update->gamut_remap)
su_flags->bits.gamut_remap = 1;
@@ -2973,24 +3019,27 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->output_color_space)
su_flags->bits.out_csc = 1;
- if (su_flags->raw != 0)
- overall_type = UPDATE_TYPE_FULL;
+ // TODO: Make each elevation explicit, as to not override fast stream in crct_timing_adjust
+ if (su_flags->raw)
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- if (stream_update->output_csc_transform)
+ // Non-global cases
+ if (stream_update->output_csc_transform) {
su_flags->bits.out_csc = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
- /* Output transfer function changes do not require bandwidth recalculation,
- * so don't trigger a full update
- */
- if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ if (!check_config->enable_legacy_fast_update && stream_update->out_transfer_func) {
su_flags->bits.out_tf = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
+ }
}
- for (i = 0 ; i < surface_count; i++) {
- enum surface_update_type type =
- det_surface_update(dc, &updates[i]);
+ for (int i = 0 ; i < surface_count; i++) {
+ struct surface_update_descriptor inner_type =
+ det_surface_update(check_config, &updates[i]);
- elevate_update_type(&overall_type, type);
+ elevate_update_type(&overall_type, inner_type.update_type, inner_type.lock_descriptor);
}
return overall_type;
@@ -3001,46 +3050,18 @@ static enum surface_update_type check_update_surfaces_for_stream(
*
* See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
*/
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status)
+ struct dc_stream_update *stream_update)
{
- int i;
- enum surface_update_type type;
-
if (stream_update)
stream_update->stream->update_flags.raw = 0;
- for (i = 0; i < surface_count; i++)
+ for (size_t i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0;
- type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
- if (type == UPDATE_TYPE_FULL) {
- if (stream_update) {
- uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
- stream_update->stream->update_flags.raw = 0xFFFFFFFF;
- stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
- }
- for (i = 0; i < surface_count; i++)
- updates[i].surface->update_flags.raw = 0xFFFFFFFF;
- }
-
- if (type == UPDATE_TYPE_FAST) {
- // If there's an available clock comparator, we use that.
- if (dc->clk_mgr->funcs->are_clock_states_equal) {
- if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
- dc->optimized_required = true;
- // Else we fallback to mem compare.
- } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
- dc->optimized_required = true;
- }
-
- dc->optimized_required |= dc->wm_optimized_required;
- }
-
- return type;
+ return check_update_surfaces_for_stream(check_config, updates, surface_count, stream_update);
}
static struct dc_stream_status *stream_get_status(
@@ -3293,6 +3314,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->adaptive_sync_infopacket)
stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+ if (update->avi_infopacket)
+ stream->avi_infopacket = *update->avi_infopacket;
+
if (update->dither_option)
stream->dither_option = *update->dither_option;
@@ -3368,7 +3392,11 @@ static void restore_planes_and_stream_state(
for (i = 0; i < status->plane_count; i++) {
dc_plane_copy_config(status->plane_states[i], &scratch->plane_states[i]);
}
+
+ // refcount is persistent
+ struct kref temp_refcount = stream->refcount;
*stream = scratch->stream_state;
+ stream->refcount = temp_refcount;
}
/**
@@ -3390,7 +3418,7 @@ static void update_seamless_boot_flags(struct dc *dc,
int surface_count,
struct dc_stream_state *stream)
{
- if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ if (get_seamless_boot_stream_count(context) > 0 && (surface_count > 0 || stream->dpms_off)) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
* bandwidth. Important to note that it is expected UEFI will
@@ -3406,6 +3434,13 @@ static void update_seamless_boot_flags(struct dc *dc,
}
}
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream);
+
/**
* update_planes_and_stream_state() - The function takes planes and stream
* updates as inputs and determines the appropriate update type. If update type
@@ -3452,7 +3487,10 @@ static bool update_planes_and_stream_state(struct dc *dc,
context = dc->current_state;
update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
+ &dc->check_config, srf_updates, surface_count, stream_update).update_type;
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ update_type = UPDATE_TYPE_FULL;
+
/* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
* E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
* Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
@@ -3484,6 +3522,16 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
}
+ if (update_type == UPDATE_TYPE_FULL) {
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
+ for (i = 0; i < surface_count; i++)
+ srf_updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
+
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
@@ -3587,7 +3635,8 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
stream_update->adaptive_sync_infopacket ||
- stream_update->vtem_infopacket) {
+ stream_update->vtem_infopacket ||
+ stream_update->avi_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -4128,7 +4177,7 @@ static void commit_planes_for_stream(struct dc *dc,
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
if (top_pipe_to_program &&
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4149,22 +4198,22 @@ static void commit_planes_for_stream(struct dc *dc,
}
if (dc->hwseq->funcs.wait_for_pipe_update_if_needed)
- dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type == UPDATE_TYPE_FAST);
+ dc->hwseq->funcs.wait_for_pipe_update_if_needed(dc, top_pipe_to_program, update_type < UPDATE_TYPE_FULL);
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
dc->hwss.interdependent_update_lock(dc, context, true);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, true);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, true);
/* Lock the top pipe while updating plane addrs, since freesync requires
* plane addr update event triggers to be synchronized.
@@ -4207,9 +4256,8 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
-
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
return;
}
@@ -4398,7 +4446,7 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg,
CRTC_STATE_VACTIVE);
- if (should_use_dmub_lock(stream->link)) {
+ if (should_use_dmub_inbox1_lock(dc, stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -4446,13 +4494,13 @@ static void commit_planes_for_stream(struct dc *dc,
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
} else {
if (dc->hwss.subvp_pipe_control_lock)
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
- if (dc->hwss.fams2_global_control_lock)
- dc->hwss.fams2_global_control_lock(dc, context, false);
+ if (dc->hwss.dmub_hw_control_lock)
+ dc->hwss.dmub_hw_control_lock(dc, context, false);
}
// Fire manual trigger only when bottom plane is flipped
@@ -4468,6 +4516,8 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->plane_state->skip_manual_trigger)
continue;
+ if (dc->hwss.program_cursor_offload_now)
+ dc->hwss.program_cursor_offload_now(dc, pipe_ctx);
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
@@ -4973,7 +5023,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update,
}
}
-static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+static bool fast_updates_exist(const struct dc_fast_update *fast_update, int surface_count)
{
int i;
@@ -5014,18 +5064,44 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_
return false;
}
-static bool full_update_required(struct dc *dc,
- struct dc_surface_update *srf_updates,
+static bool full_update_required_weak(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
-
- int i;
- struct dc_stream_status *stream_status;
const struct dc_state *context = dc->current_state;
+ if (srf_updates)
+ for (int i = 0; i < surface_count; i++)
+ if (!is_surface_in_context(context, srf_updates[i].surface))
+ return true;
- for (i = 0; i < surface_count; i++) {
+ if (stream) {
+ const struct dc_stream_status *stream_status = dc_stream_get_status_const(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ if (dc_can_clear_cursor_limit(dc))
+ return true;
+
+ return false;
+}
+
+static bool full_update_required(
+ const struct dc *dc,
+ const struct dc_surface_update *srf_updates,
+ int surface_count,
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
+{
+ if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
+ return true;
+
+ for (int i = 0; i < surface_count; i++) {
if (srf_updates &&
(srf_updates[i].plane_info ||
srf_updates[i].scaling_info ||
@@ -5041,8 +5117,7 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
(srf_updates[i].cm2_params &&
(srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
- srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable)) ||
- !is_surface_in_context(context, srf_updates[i].surface)))
+ srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable))))
return true;
}
@@ -5059,6 +5134,7 @@ static bool full_update_required(struct dc *dc,
stream_update->hfvsif_infopacket ||
stream_update->vtem_infopacket ||
stream_update->adaptive_sync_infopacket ||
+ stream_update->avi_infopacket ||
stream_update->dpms_off ||
stream_update->allow_freesync ||
stream_update->vrr_active_variable ||
@@ -5077,154 +5153,21 @@ static bool full_update_required(struct dc *dc,
stream_update->hw_cursor_req))
return true;
- if (stream) {
- stream_status = dc_stream_get_status(stream);
- if (stream_status == NULL || stream_status->plane_count != surface_count)
- return true;
- }
- if (dc->idle_optimizations_allowed)
- return true;
-
- if (dc_can_clear_cursor_limit(dc))
- return true;
-
return false;
}
-static bool fast_update_only(struct dc *dc,
- struct dc_fast_update *fast_update,
- struct dc_surface_update *srf_updates,
+static bool fast_update_only(
+ const struct dc *dc,
+ const struct dc_fast_update *fast_update,
+ const struct dc_surface_update *srf_updates,
int surface_count,
- struct dc_stream_update *stream_update,
- struct dc_stream_state *stream)
+ const struct dc_stream_update *stream_update,
+ const struct dc_stream_state *stream)
{
return fast_updates_exist(fast_update, surface_count)
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
-static bool update_planes_and_stream_v1(struct dc *dc,
- struct dc_surface_update *srf_updates, int surface_count,
- struct dc_stream_state *stream,
- struct dc_stream_update *stream_update,
- struct dc_state *state)
-{
- const struct dc_stream_status *stream_status;
- enum surface_update_type update_type;
- struct dc_state *context;
- struct dc_context *dc_ctx = dc->ctx;
- int i, j;
- struct dc_fast_update fast_update[MAX_SURFACES] = {0};
-
- dc_exit_ips_for_hw_access(dc);
-
- populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
- stream_status = dc_stream_get_status(stream);
- context = dc->current_state;
-
- update_type = dc_check_update_surfaces_for_stream(
- dc, srf_updates, surface_count, stream_update, stream_status);
- /* It is possible to receive a flip for one plane while there are multiple flip_immediate planes in the same stream.
- * E.g. Desktop and MPO plane are flip_immediate but only the MPO plane received a flip
- * Force the other flip_immediate planes to flip so GSL doesn't wait for a flip that won't come.
- */
- force_immediate_gsl_plane_flip(dc, srf_updates, surface_count);
-
- if (update_type >= UPDATE_TYPE_FULL) {
-
- /* initialize scratch memory for building context */
- context = dc_state_create_copy(state);
- if (context == NULL) {
- DC_ERROR("Failed to allocate new validate context!\n");
- return false;
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
- new_pipe->plane_state->force_full_update = true;
- }
- } else if (update_type == UPDATE_TYPE_FAST) {
- /*
- * Previous frame finished and HW is ready for optimization.
- */
- dc_post_update_surfaces_to_stream(dc);
- }
-
- for (i = 0; i < surface_count; i++) {
- struct dc_plane_state *surface = srf_updates[i].surface;
-
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
- if (update_type >= UPDATE_TYPE_MED) {
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe_ctx =
- &context->res_ctx.pipe_ctx[j];
-
- if (pipe_ctx->plane_state != surface)
- continue;
-
- resource_build_scaling_params(pipe_ctx);
- }
- }
- }
-
- copy_stream_update_to_stream(dc, context, stream, stream_update);
-
- if (update_type >= UPDATE_TYPE_FULL) {
- if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_state_release(context);
- return false;
- }
- }
-
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
-
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
- commit_planes_for_stream_fast(dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- } else {
- commit_planes_for_stream(
- dc,
- srf_updates,
- surface_count,
- stream,
- stream_update,
- update_type,
- context);
- }
- /*update current_State*/
- if (dc->current_state != context) {
-
- struct dc_state *old = dc->current_state;
-
- dc->current_state = context;
- dc_state_release(old);
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
- pipe_ctx->plane_state->force_full_update = false;
- }
- }
-
- /* Legacy optimization path for DCE. */
- if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
- dc_post_update_surfaces_to_stream(dc);
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
- }
- return true;
-}
-
static bool update_planes_and_stream_v2(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -5282,7 +5225,7 @@ static bool update_planes_and_stream_v2(struct dc *dc,
commit_minimal_transition_state_in_dc_update(dc, context, stream,
srf_updates, surface_count);
- if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ if (is_fast_update_only && !dc->check_config.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5325,7 +5268,7 @@ static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
stream_update);
if (fast_update_only(dc, fast_update, srf_updates, surface_count,
stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update)
+ !dc->check_config.enable_legacy_fast_update)
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -5451,7 +5394,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
* specially handle compatibility problems with transitions among those
* features as they are now transparent to the new sequence.
*/
- if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01 || dc->ctx->dce_version == DCN_VERSION_3_2 ||
+ dc->ctx->dce_version == DCN_VERSION_3_21)
ret = update_planes_and_stream_v3(dc, srf_updates,
surface_count, stream, stream_update);
else
@@ -5482,12 +5426,10 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (dc->ctx->dce_version >= DCN_VERSION_4_01) {
ret = update_planes_and_stream_v3(dc, srf_updates, surface_count,
stream, stream_update);
- } else if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ } else {
ret = update_planes_and_stream_v2(dc, srf_updates, surface_count,
stream, stream_update);
- } else
- ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
- stream_update, state);
+ }
if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2)
clear_update_flags(srf_updates, surface_count, stream);
@@ -5727,8 +5669,8 @@ void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const
subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe);
}
}
-
- DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
+ if (!dc->caps.is_apu)
+ DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n",
__func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2],
subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name);
@@ -6038,6 +5980,101 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
return true;
}
+bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits,
+ uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline)
+{
+ bool status = false;
+ struct dc *dc = link->ctx->dc;
+ union dmub_rb_cmd cmd;
+ uint8_t otg_inst = 0;
+ unsigned int panel_inst = 0;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
+
+ // get panel_inst
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return status;
+
+ // get otg_inst
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx &&
+ res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ pipe_ctx = &res_ctx->pipe_ctx[i];
+ //TODO: refactor for multi edp support
+ break;
+ }
+ }
+
+ if (pipe_ctx)
+ otg_inst = pipe_ctx->stream_res.tg->inst;
+
+ // before enable smart power OLED, we need to call set pipe for DMUB to set ABM config
+ if (enable) {
+ if (dc->hwss.set_pipe && pipe_ctx)
+ dc->hwss.set_pipe(pipe_ctx);
+ }
+
+ // fill in cmd
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.smart_power_oled_enable.header.type = DMUB_CMD__SMART_POWER_OLED;
+ cmd.smart_power_oled_enable.header.sub_type = DMUB_CMD__SMART_POWER_OLED_ENABLE;
+ cmd.smart_power_oled_enable.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_smart_power_oled_enable_data) - sizeof(struct dmub_cmd_header);
+ cmd.smart_power_oled_enable.header.ret_status = 1;
+ cmd.smart_power_oled_enable.data.enable = enable;
+ cmd.smart_power_oled_enable.data.panel_inst = panel_inst;
+ cmd.smart_power_oled_enable.data.peak_nits = peak_nits;
+ cmd.smart_power_oled_enable.data.otg_inst = otg_inst;
+ cmd.smart_power_oled_enable.data.digfe_inst = link->link_enc->preferred_engine;
+ cmd.smart_power_oled_enable.data.digbe_inst = link->link_enc->transmitter;
+
+ cmd.smart_power_oled_enable.data.debugcontrol = debug_control;
+ cmd.smart_power_oled_enable.data.triggerline = triggerline;
+ cmd.smart_power_oled_enable.data.fixed_max_cll = fixed_CLL;
+
+ // send cmd
+ status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return status;
+}
+
+bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL)
+{
+ struct dc *dc = link->ctx->dc;
+ union dmub_rb_cmd cmd;
+ bool status = false;
+ unsigned int panel_inst = 0;
+
+ // get panel_inst
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return status;
+
+ // fill in cmd
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.smart_power_oled_getmaxcll.header.type = DMUB_CMD__SMART_POWER_OLED;
+ cmd.smart_power_oled_getmaxcll.header.sub_type = DMUB_CMD__SMART_POWER_OLED_GETMAXCLL;
+ cmd.smart_power_oled_getmaxcll.header.payload_bytes = sizeof(cmd.smart_power_oled_getmaxcll.data);
+ cmd.smart_power_oled_getmaxcll.header.ret_status = 1;
+
+ cmd.smart_power_oled_getmaxcll.data.input.panel_inst = panel_inst;
+
+ // send cmd and wait for reply
+ status = dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+
+ if (status)
+ *pCurrent_MaxCLL = cmd.smart_power_oled_getmaxcll.data.output.current_max_cll;
+ else
+ *pCurrent_MaxCLL = 0;
+
+ return status;
+}
+
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index)
{
@@ -6452,7 +6489,7 @@ bool dc_is_cursor_limit_pending(struct dc *dc)
return false;
}
-bool dc_can_clear_cursor_limit(struct dc *dc)
+bool dc_can_clear_cursor_limit(const struct dc *dc)
{
uint32_t i;
@@ -6463,3 +6500,594 @@ bool dc_can_clear_cursor_limit(struct dc *dc)
return false;
}
+
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct timing_generator *tg = NULL;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ if (dc->res_pool->timing_generators[i] &&
+ dc->res_pool->timing_generators[i]->inst == primary_otg_inst) {
+ tg = dc->res_pool->timing_generators[i];
+ break;
+ }
+ }
+
+ dc_exit_ips_for_hw_access(dc);
+ if (dc->hwss.get_underflow_debug_data)
+ dc->hwss.get_underflow_debug_data(dc, tg, out_data);
+}
+
+void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst,
+ struct power_features *out_data)
+{
+ out_data->uclk_p_state = dc->current_state->clk_mgr->clks.p_state_change_support;
+ out_data->fams = dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
+}
+
+bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state)
+{
+ struct dc_state *context;
+ struct resource_context *res_ctx;
+ int i;
+
+ if (!dc || !dc->current_state || !state) {
+ if (state)
+ state->state_valid = false;
+ return false;
+ }
+
+ /* Initialize the state structure */
+ memset(state, 0, sizeof(struct dc_register_software_state));
+
+ context = dc->current_state;
+ res_ctx = &context->res_ctx;
+
+ /* Count active pipes and streams */
+ state->active_pipe_count = 0;
+ state->active_stream_count = context->stream_count;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (res_ctx->pipe_ctx[i].stream)
+ state->active_pipe_count++;
+ }
+
+ /* Capture HUBP programming state for each pipe */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ state->hubp[i].valid_stream = false;
+ if (!pipe_ctx->stream)
+ continue;
+
+ state->hubp[i].valid_stream = true;
+
+ /* HUBP register programming variables */
+ if (pipe_ctx->stream_res.tg)
+ state->hubp[i].vtg_sel = pipe_ctx->stream_res.tg->inst;
+
+ state->hubp[i].hubp_clock_enable = (pipe_ctx->plane_res.hubp != NULL) ? 1 : 0;
+
+ state->hubp[i].valid_plane_state = false;
+ if (pipe_ctx->plane_state) {
+ state->hubp[i].valid_plane_state = true;
+ state->hubp[i].surface_pixel_format = pipe_ctx->plane_state->format;
+ state->hubp[i].rotation_angle = pipe_ctx->plane_state->rotation;
+ state->hubp[i].h_mirror_en = pipe_ctx->plane_state->horizontal_mirror ? 1 : 0;
+
+ /* Surface size */
+ if (pipe_ctx->plane_state->plane_size.surface_size.width > 0) {
+ state->hubp[i].surface_size_width = pipe_ctx->plane_state->plane_size.surface_size.width;
+ state->hubp[i].surface_size_height = pipe_ctx->plane_state->plane_size.surface_size.height;
+ }
+
+ /* Viewport dimensions from scaler data */
+ if (pipe_ctx->plane_state->src_rect.width > 0) {
+ state->hubp[i].pri_viewport_width = pipe_ctx->plane_state->src_rect.width;
+ state->hubp[i].pri_viewport_height = pipe_ctx->plane_state->src_rect.height;
+ state->hubp[i].pri_viewport_x_start = pipe_ctx->plane_state->src_rect.x;
+ state->hubp[i].pri_viewport_y_start = pipe_ctx->plane_state->src_rect.y;
+ }
+
+ /* DCC settings */
+ state->hubp[i].surface_dcc_en = (pipe_ctx->plane_state->dcc.enable) ? 1 : 0;
+ state->hubp[i].surface_dcc_ind_64b_blk = pipe_ctx->plane_state->dcc.independent_64b_blks;
+ state->hubp[i].surface_dcc_ind_128b_blk = pipe_ctx->plane_state->dcc.dcc_ind_blk;
+
+ /* Surface pitch */
+ state->hubp[i].surface_pitch = pipe_ctx->plane_state->plane_size.surface_pitch;
+ state->hubp[i].meta_pitch = pipe_ctx->plane_state->dcc.meta_pitch;
+ state->hubp[i].chroma_pitch = pipe_ctx->plane_state->plane_size.chroma_pitch;
+ state->hubp[i].meta_pitch_c = pipe_ctx->plane_state->dcc.meta_pitch_c;
+
+ /* Surface addresses - primary */
+ state->hubp[i].primary_surface_address_low = pipe_ctx->plane_state->address.grph.addr.low_part;
+ state->hubp[i].primary_surface_address_high = pipe_ctx->plane_state->address.grph.addr.high_part;
+ state->hubp[i].primary_meta_surface_address_low = pipe_ctx->plane_state->address.grph.meta_addr.low_part;
+ state->hubp[i].primary_meta_surface_address_high = pipe_ctx->plane_state->address.grph.meta_addr.high_part;
+
+ /* TMZ settings */
+ state->hubp[i].primary_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
+ state->hubp[i].primary_meta_surface_tmz = pipe_ctx->plane_state->address.tmz_surface;
+
+ /* Tiling configuration */
+ state->hubp[i].min_dc_gfx_version9 = false;
+ if (pipe_ctx->plane_state->tiling_info.gfxversion >= DcGfxVersion9) {
+ state->hubp[i].min_dc_gfx_version9 = true;
+ state->hubp[i].sw_mode = pipe_ctx->plane_state->tiling_info.gfx9.swizzle;
+ state->hubp[i].num_pipes = pipe_ctx->plane_state->tiling_info.gfx9.num_pipes;
+ state->hubp[i].num_banks = pipe_ctx->plane_state->tiling_info.gfx9.num_banks;
+ state->hubp[i].pipe_interleave = pipe_ctx->plane_state->tiling_info.gfx9.pipe_interleave;
+ state->hubp[i].num_shader_engines = pipe_ctx->plane_state->tiling_info.gfx9.num_shader_engines;
+ state->hubp[i].num_rb_per_se = pipe_ctx->plane_state->tiling_info.gfx9.num_rb_per_se;
+ state->hubp[i].num_pkrs = pipe_ctx->plane_state->tiling_info.gfx9.num_pkrs;
+ }
+ }
+
+ /* DML Request Size Configuration */
+ if (pipe_ctx->rq_regs.rq_regs_l.chunk_size > 0) {
+ state->hubp[i].rq_chunk_size = pipe_ctx->rq_regs.rq_regs_l.chunk_size;
+ state->hubp[i].rq_min_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_chunk_size;
+ state->hubp[i].rq_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size;
+ state->hubp[i].rq_min_meta_chunk_size = pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size;
+ state->hubp[i].rq_dpte_group_size = pipe_ctx->rq_regs.rq_regs_l.dpte_group_size;
+ state->hubp[i].rq_mpte_group_size = pipe_ctx->rq_regs.rq_regs_l.mpte_group_size;
+ state->hubp[i].rq_swath_height_l = pipe_ctx->rq_regs.rq_regs_l.swath_height;
+ state->hubp[i].rq_pte_row_height_l = pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear;
+ }
+
+ /* Chroma request size configuration */
+ if (pipe_ctx->rq_regs.rq_regs_c.chunk_size > 0) {
+ state->hubp[i].rq_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.chunk_size;
+ state->hubp[i].rq_min_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_chunk_size;
+ state->hubp[i].rq_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.meta_chunk_size;
+ state->hubp[i].rq_min_meta_chunk_size_c = pipe_ctx->rq_regs.rq_regs_c.min_meta_chunk_size;
+ state->hubp[i].rq_dpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.dpte_group_size;
+ state->hubp[i].rq_mpte_group_size_c = pipe_ctx->rq_regs.rq_regs_c.mpte_group_size;
+ state->hubp[i].rq_swath_height_c = pipe_ctx->rq_regs.rq_regs_c.swath_height;
+ state->hubp[i].rq_pte_row_height_c = pipe_ctx->rq_regs.rq_regs_c.pte_row_height_linear;
+ }
+
+ /* DML expansion modes */
+ state->hubp[i].drq_expansion_mode = pipe_ctx->rq_regs.drq_expansion_mode;
+ state->hubp[i].prq_expansion_mode = pipe_ctx->rq_regs.prq_expansion_mode;
+ state->hubp[i].mrq_expansion_mode = pipe_ctx->rq_regs.mrq_expansion_mode;
+ state->hubp[i].crq_expansion_mode = pipe_ctx->rq_regs.crq_expansion_mode;
+
+ /* DML DLG parameters - nominal */
+ state->hubp[i].dst_y_per_vm_vblank = pipe_ctx->dlg_regs.dst_y_per_vm_vblank;
+ state->hubp[i].dst_y_per_row_vblank = pipe_ctx->dlg_regs.dst_y_per_row_vblank;
+ state->hubp[i].dst_y_per_vm_flip = pipe_ctx->dlg_regs.dst_y_per_vm_flip;
+ state->hubp[i].dst_y_per_row_flip = pipe_ctx->dlg_regs.dst_y_per_row_flip;
+
+ /* DML prefetch settings */
+ state->hubp[i].dst_y_prefetch = pipe_ctx->dlg_regs.dst_y_prefetch;
+ state->hubp[i].vratio_prefetch = pipe_ctx->dlg_regs.vratio_prefetch;
+ state->hubp[i].vratio_prefetch_c = pipe_ctx->dlg_regs.vratio_prefetch_c;
+
+ /* TTU parameters */
+ state->hubp[i].qos_level_low_wm = pipe_ctx->ttu_regs.qos_level_low_wm;
+ state->hubp[i].qos_level_high_wm = pipe_ctx->ttu_regs.qos_level_high_wm;
+ state->hubp[i].qos_level_flip = pipe_ctx->ttu_regs.qos_level_flip;
+ state->hubp[i].min_ttu_vblank = pipe_ctx->ttu_regs.min_ttu_vblank;
+ }
+
+ /* Capture HUBBUB programming state */
+ if (dc->res_pool->hubbub) {
+ /* Individual DET buffer sizes - software state variables that program DET registers */
+ for (i = 0; i < 4 && i < dc->res_pool->pipe_count; i++) {
+ uint32_t det_size = res_ctx->pipe_ctx[i].det_buffer_size_kb;
+ switch (i) {
+ case 0:
+ state->hubbub.det0_size = det_size;
+ break;
+ case 1:
+ state->hubbub.det1_size = det_size;
+ break;
+ case 2:
+ state->hubbub.det2_size = det_size;
+ break;
+ case 3:
+ state->hubbub.det3_size = det_size;
+ break;
+ }
+ }
+
+ /* Compression buffer configuration - software state that programs COMPBUF_SIZE register */
+ // TODO: Handle logic for legacy DCN pre-DCN401
+ state->hubbub.compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
+ }
+
+ /* Capture DPP programming state for each pipe */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ state->dpp[i].dpp_clock_enable = (pipe_ctx->plane_res.dpp != NULL) ? 1 : 0;
+
+ if (pipe_ctx->plane_state && pipe_ctx->plane_res.scl_data.recout.width > 0) {
+ /* Access dscl_prog_data directly - this contains the actual software state used for register programming */
+ struct dscl_prog_data *dscl_data = &pipe_ctx->plane_res.scl_data.dscl_prog_data;
+
+ /* Recout (Rectangle of Interest) configuration - software state that programs RECOUT registers */
+ state->dpp[i].recout_start_x = dscl_data->recout.x;
+ state->dpp[i].recout_start_y = dscl_data->recout.y;
+ state->dpp[i].recout_width = dscl_data->recout.width;
+ state->dpp[i].recout_height = dscl_data->recout.height;
+
+ /* MPC (Multiple Pipe/Plane Combiner) size - software state that programs MPC_SIZE registers */
+ state->dpp[i].mpc_width = dscl_data->mpc_size.width;
+ state->dpp[i].mpc_height = dscl_data->mpc_size.height;
+
+ /* DSCL mode - software state that programs SCL_MODE registers */
+ state->dpp[i].dscl_mode = dscl_data->dscl_mode;
+
+ /* Scaler ratios - software state that programs scale ratio registers (use actual programmed ratios) */
+ state->dpp[i].horz_ratio_int = dscl_data->ratios.h_scale_ratio >> 19; // Extract integer part from programmed ratio
+ state->dpp[i].vert_ratio_int = dscl_data->ratios.v_scale_ratio >> 19; // Extract integer part from programmed ratio
+
+ /* Basic scaler taps - software state that programs tap control registers (use actual programmed taps) */
+ state->dpp[i].h_taps = dscl_data->taps.h_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
+ state->dpp[i].v_taps = dscl_data->taps.v_taps + 1; // dscl_prog_data.taps stores (taps - 1), so add 1 back
+ }
+ }
+
+ /* Capture essential clock state for underflow analysis */
+ if (dc->clk_mgr && dc->clk_mgr->clks.dispclk_khz > 0) {
+ /* Core display clocks affecting bandwidth and timing */
+ state->dccg.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
+
+ /* Per-pipe clock configuration - only capture what's essential */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+ if (pipe_ctx->stream) {
+ /* Essential clocks that directly affect underflow risk */
+ state->dccg.dppclk_khz[i] = dc->clk_mgr->clks.dppclk_khz;
+ state->dccg.pixclk_khz[i] = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ state->dccg.dppclk_enable[i] = 1;
+
+ /* DP stream clock only for DP signals */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ state->dccg.dpstreamclk_enable[i] = 1;
+ } else {
+ state->dccg.dpstreamclk_enable[i] = 0;
+ }
+ } else {
+ /* Inactive pipe - no clocks */
+ state->dccg.dppclk_khz[i] = 0;
+ state->dccg.pixclk_khz[i] = 0;
+ state->dccg.dppclk_enable[i] = 0;
+ if (i < 4) {
+ state->dccg.dpstreamclk_enable[i] = 0;
+ }
+ }
+ }
+
+ /* DSC clock state - only when actually using DSC */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = (i < dc->res_pool->pipe_count) ? &res_ctx->pipe_ctx[i] : NULL;
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
+ state->dccg.dscclk_khz[i] = 400000; /* Typical DSC clock frequency */
+ } else {
+ state->dccg.dscclk_khz[i] = 0;
+ }
+ }
+
+ /* SYMCLK32 LE Control - only the essential HPO state for underflow analysis */
+ for (i = 0; i < 2; i++) {
+ state->dccg.symclk32_le_enable[i] = 0; /* Default: disabled */
+ }
+
+ }
+
+ /* Capture essential DSC configuration for underflow analysis */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream && pipe_ctx->stream->timing.dsc_cfg.num_slices_h > 0) {
+ /* DSC is enabled - capture essential configuration */
+ state->dsc[i].dsc_clock_enable = 1;
+
+ /* DSC configuration affecting bandwidth and timing */
+ struct dc_dsc_config *dsc_cfg = &pipe_ctx->stream->timing.dsc_cfg;
+ state->dsc[i].dsc_num_slices_h = dsc_cfg->num_slices_h;
+ state->dsc[i].dsc_num_slices_v = dsc_cfg->num_slices_v;
+ state->dsc[i].dsc_bits_per_pixel = dsc_cfg->bits_per_pixel;
+
+ /* OPP pipe source for DSC forwarding */
+ if (pipe_ctx->stream_res.opp) {
+ state->dsc[i].dscrm_dsc_forward_enable = 1;
+ state->dsc[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
+ } else {
+ state->dsc[i].dscrm_dsc_forward_enable = 0;
+ state->dsc[i].dscrm_dsc_opp_pipe_source = 0;
+ }
+ } else {
+ /* DSC not enabled - clear all fields */
+ memset(&state->dsc[i], 0, sizeof(state->dsc[i]));
+ }
+ }
+
+ /* Capture MPC programming state - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream) {
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+
+ /* MPCC blending tree and mode control - capture actual blend configuration */
+ state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0;
+ state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0;
+ state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0;
+ state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */
+ state->mpc.mpcc_global_alpha[i] = plane_state->global_alpha_value;
+ state->mpc.mpcc_global_gain[i] = plane_state->global_alpha ? 255 : 0;
+ state->mpc.mpcc_bg_bpc[i] = 8; /* Standard 8-bit background */
+ state->mpc.mpcc_bot_gain_mode[i] = 0; /* Standard gain mode */
+
+ /* MPCC blending tree connections - capture tree topology */
+ if (pipe_ctx->bottom_pipe) {
+ state->mpc.mpcc_bot_sel[i] = pipe_ctx->bottom_pipe->pipe_idx;
+ } else {
+ state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
+ }
+ state->mpc.mpcc_top_sel[i] = pipe_ctx->pipe_idx; /* This pipe's DPP ID */
+
+ /* MPCC output gamma control - capture gamma programming */
+ if (plane_state->gamma_correction.type != GAMMA_CS_TFM_1D && plane_state->gamma_correction.num_entries > 0) {
+ state->mpc.mpcc_ogam_mode[i] = 1; /* Gamma enabled */
+ state->mpc.mpcc_ogam_select[i] = 0; /* Bank A selection */
+ state->mpc.mpcc_ogam_pwl_disable[i] = 0; /* PWL enabled */
+ } else {
+ state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass mode */
+ state->mpc.mpcc_ogam_select[i] = 0;
+ state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
+ }
+
+ /* MPCC pipe assignment and operational status */
+ if (pipe_ctx->stream_res.opp) {
+ state->mpc.mpcc_opp_id[i] = pipe_ctx->stream_res.opp->inst;
+ } else {
+ state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
+ }
+
+ /* MPCC status indicators - active pipe state */
+ state->mpc.mpcc_idle[i] = 0; /* Active pipe - not idle */
+ state->mpc.mpcc_busy[i] = 1; /* Active pipe - busy processing */
+
+ } else {
+ /* Pipe not active - set disabled/idle state for all fields */
+ state->mpc.mpcc_mode[i] = 0;
+ state->mpc.mpcc_alpha_blend_mode[i] = 0;
+ state->mpc.mpcc_alpha_multiplied_mode[i] = 0;
+ state->mpc.mpcc_blnd_active_overlap_only[i] = 0;
+ state->mpc.mpcc_global_alpha[i] = 0;
+ state->mpc.mpcc_global_gain[i] = 0;
+ state->mpc.mpcc_bg_bpc[i] = 0;
+ state->mpc.mpcc_bot_gain_mode[i] = 0;
+ state->mpc.mpcc_bot_sel[i] = 0xF; /* No bottom connection */
+ state->mpc.mpcc_top_sel[i] = 0xF; /* No top connection */
+ state->mpc.mpcc_ogam_mode[i] = 0; /* Bypass */
+ state->mpc.mpcc_ogam_select[i] = 0;
+ state->mpc.mpcc_ogam_pwl_disable[i] = 1; /* PWL disabled */
+ state->mpc.mpcc_opp_id[i] = 0xF; /* No OPP assignment */
+ state->mpc.mpcc_idle[i] = 1; /* Idle */
+ state->mpc.mpcc_busy[i] = 0; /* Not busy */
+ }
+ }
+
+ /* Capture OPP programming state for each pipe - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.opp) {
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+
+ /* OPP Pipe Control */
+ state->opp[i].opp_pipe_clock_enable = 1; /* Active pipe has clock enabled */
+
+ /* Display Pattern Generator (DPG) Control - 19 fields */
+ if (pipe_ctx->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+ state->opp[i].dpg_enable = 1;
+ } else {
+ /* Video mode - DPG disabled */
+ state->opp[i].dpg_enable = 0;
+ }
+
+ /* Format Control (FMT) - 18 fields */
+ state->opp[i].fmt_pixel_encoding = timing->pixel_encoding;
+
+ /* Chroma subsampling mode based on pixel encoding */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ state->opp[i].fmt_subsampling_mode = 1; /* 4:2:0 subsampling */
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ state->opp[i].fmt_subsampling_mode = 2; /* 4:2:2 subsampling */
+ } else {
+ state->opp[i].fmt_subsampling_mode = 0; /* No subsampling (4:4:4) */
+ }
+
+ state->opp[i].fmt_cbcr_bit_reduction_bypass = (timing->pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
+ state->opp[i].fmt_stereosync_override = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
+
+ /* Dithering control based on bit depth */
+ if (timing->display_color_depth < COLOR_DEPTH_121212) {
+ state->opp[i].fmt_spatial_dither_frame_counter_max = 15; /* Typical frame counter max */
+ state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0; /* No bit swapping */
+ state->opp[i].fmt_spatial_dither_enable = 1;
+ state->opp[i].fmt_spatial_dither_mode = 0; /* Spatial dithering mode */
+ state->opp[i].fmt_spatial_dither_depth = timing->display_color_depth;
+ state->opp[i].fmt_temporal_dither_enable = 0; /* Spatial dithering preferred */
+ } else {
+ state->opp[i].fmt_spatial_dither_frame_counter_max = 0;
+ state->opp[i].fmt_spatial_dither_frame_counter_bit_swap = 0;
+ state->opp[i].fmt_spatial_dither_enable = 0;
+ state->opp[i].fmt_spatial_dither_mode = 0;
+ state->opp[i].fmt_spatial_dither_depth = 0;
+ state->opp[i].fmt_temporal_dither_enable = 0;
+ }
+
+ /* Truncation control for bit depth reduction */
+ if (timing->display_color_depth < COLOR_DEPTH_121212) {
+ state->opp[i].fmt_truncate_enable = 1;
+ state->opp[i].fmt_truncate_depth = timing->display_color_depth;
+ state->opp[i].fmt_truncate_mode = 0; /* Round mode */
+ } else {
+ state->opp[i].fmt_truncate_enable = 0;
+ state->opp[i].fmt_truncate_depth = 0;
+ state->opp[i].fmt_truncate_mode = 0;
+ }
+
+ /* Data clamping control */
+ state->opp[i].fmt_clamp_data_enable = 1; /* Clamping typically enabled */
+ state->opp[i].fmt_clamp_color_format = timing->pixel_encoding;
+
+ /* Dynamic expansion for limited range content */
+ if (timing->pixel_encoding != PIXEL_ENCODING_RGB) {
+ state->opp[i].fmt_dynamic_exp_enable = 1; /* YCbCr typically needs expansion */
+ state->opp[i].fmt_dynamic_exp_mode = 0; /* Standard expansion */
+ } else {
+ state->opp[i].fmt_dynamic_exp_enable = 0; /* RGB typically full range */
+ state->opp[i].fmt_dynamic_exp_mode = 0;
+ }
+
+ /* Legacy field for compatibility */
+ state->opp[i].fmt_bit_depth_control = timing->display_color_depth;
+
+ /* Output Buffer (OPPBUF) Control - 6 fields */
+ state->opp[i].oppbuf_active_width = timing->h_addressable;
+ state->opp[i].oppbuf_pixel_repetition = 0; /* No pixel repetition by default */
+
+ /* Multi-Stream Output (MSO) / ODM segmentation */
+ if (pipe_ctx->next_odm_pipe) {
+ state->opp[i].oppbuf_display_segmentation = 1; /* Segmented display */
+ state->opp[i].oppbuf_overlap_pixel_num = 0; /* ODM overlap pixels */
+ } else {
+ state->opp[i].oppbuf_display_segmentation = 0; /* Single segment */
+ state->opp[i].oppbuf_overlap_pixel_num = 0;
+ }
+
+ /* 3D/Stereo control */
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) {
+ state->opp[i].oppbuf_3d_vact_space1_size = 30; /* Typical stereo blanking */
+ state->opp[i].oppbuf_3d_vact_space2_size = 30;
+ } else {
+ state->opp[i].oppbuf_3d_vact_space1_size = 0;
+ state->opp[i].oppbuf_3d_vact_space2_size = 0;
+ }
+
+ /* DSC Forward Config - 3 fields */
+ if (timing->dsc_cfg.num_slices_h > 0) {
+ state->opp[i].dscrm_dsc_forward_enable = 1;
+ state->opp[i].dscrm_dsc_opp_pipe_source = pipe_ctx->stream_res.opp->inst;
+ state->opp[i].dscrm_dsc_forward_enable_status = 1; /* Status follows enable */
+ } else {
+ state->opp[i].dscrm_dsc_forward_enable = 0;
+ state->opp[i].dscrm_dsc_opp_pipe_source = 0;
+ state->opp[i].dscrm_dsc_forward_enable_status = 0;
+ }
+ } else {
+ /* No OPP resource - set all fields to disabled state */
+ memset(&state->opp[i], 0, sizeof(state->opp[i]));
+ }
+ }
+
+ /* Capture OPTC programming state for each pipe - comprehensive register field coverage */
+ for (i = 0; i < MAX_PIPES && i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg) {
+ struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
+
+ state->optc[i].otg_master_inst = pipe_ctx->stream_res.tg->inst;
+
+ /* OTG_CONTROL register - 5 fields */
+ state->optc[i].otg_master_enable = 1; /* Active stream */
+ state->optc[i].otg_disable_point_cntl = 0; /* Normal operation */
+ state->optc[i].otg_start_point_cntl = 0; /* Normal start */
+ state->optc[i].otg_field_number_cntl = (timing->flags.INTERLACE) ? 1 : 0;
+ state->optc[i].otg_out_mux = 0; /* Direct output */
+
+ /* OTG Horizontal Timing - 7 fields */
+ state->optc[i].otg_h_total = timing->h_total;
+ state->optc[i].otg_h_blank_start = timing->h_addressable;
+ state->optc[i].otg_h_blank_end = timing->h_total - timing->h_front_porch;
+ state->optc[i].otg_h_sync_start = timing->h_addressable + timing->h_front_porch;
+ state->optc[i].otg_h_sync_end = timing->h_addressable + timing->h_front_porch + timing->h_sync_width;
+ state->optc[i].otg_h_sync_polarity = timing->flags.HSYNC_POSITIVE_POLARITY ? 0 : 1;
+ state->optc[i].otg_h_timing_div_mode = (pipe_ctx->next_odm_pipe) ? 1 : 0; /* ODM divide mode */
+
+ /* OTG Vertical Timing - 7 fields */
+ state->optc[i].otg_v_total = timing->v_total;
+ state->optc[i].otg_v_blank_start = timing->v_addressable;
+ state->optc[i].otg_v_blank_end = timing->v_total - timing->v_front_porch;
+ state->optc[i].otg_v_sync_start = timing->v_addressable + timing->v_front_porch;
+ state->optc[i].otg_v_sync_end = timing->v_addressable + timing->v_front_porch + timing->v_sync_width;
+ state->optc[i].otg_v_sync_polarity = timing->flags.VSYNC_POSITIVE_POLARITY ? 0 : 1;
+ state->optc[i].otg_v_sync_mode = 0; /* Normal sync mode */
+
+ /* Initialize remaining core fields with appropriate defaults */
+ // TODO: Update logic for accurate vtotal min/max
+ state->optc[i].otg_v_total_max = timing->v_total + 100; /* Typical DRR range */
+ state->optc[i].otg_v_total_min = timing->v_total - 50;
+ state->optc[i].otg_v_total_mid = timing->v_total;
+
+ /* ODM configuration */
+ // TODO: Update logic to have complete ODM mappings (e.g. 3:1 and 4:1) stored in single pipe
+ if (pipe_ctx->next_odm_pipe) {
+ state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
+ state->optc[i].optc_seg1_src_sel = pipe_ctx->next_odm_pipe->stream_res.opp ? pipe_ctx->next_odm_pipe->stream_res.opp->inst : 0;
+ state->optc[i].optc_num_of_input_segment = 1; /* 2 segments - 1 */
+ } else {
+ state->optc[i].optc_seg0_src_sel = pipe_ctx->stream_res.opp ? pipe_ctx->stream_res.opp->inst : 0;
+ state->optc[i].optc_seg1_src_sel = 0;
+ state->optc[i].optc_num_of_input_segment = 0; /* Single segment */
+ }
+
+ /* DSC configuration */
+ if (timing->dsc_cfg.num_slices_h > 0) {
+ state->optc[i].optc_dsc_mode = 1; /* DSC enabled */
+ state->optc[i].optc_dsc_bytes_per_pixel = timing->dsc_cfg.bits_per_pixel / 16; /* Convert to bytes */
+ state->optc[i].optc_dsc_slice_width = timing->h_addressable / timing->dsc_cfg.num_slices_h;
+ } else {
+ state->optc[i].optc_dsc_mode = 0;
+ state->optc[i].optc_dsc_bytes_per_pixel = 0;
+ state->optc[i].optc_dsc_slice_width = 0;
+ }
+
+ /* Essential control fields */
+ state->optc[i].otg_stereo_enable = (timing->timing_3d_format != TIMING_3D_FORMAT_NONE) ? 1 : 0;
+ state->optc[i].otg_interlace_enable = timing->flags.INTERLACE ? 1 : 0;
+ state->optc[i].otg_clock_enable = 1; /* OTG clock enabled */
+ state->optc[i].vtg0_enable = 1; /* VTG enabled for timing generation */
+
+ /* Initialize other key fields to defaults */
+ state->optc[i].optc_input_pix_clk_en = 1;
+ state->optc[i].optc_segment_width = (pipe_ctx->next_odm_pipe) ? (timing->h_addressable / 2) : timing->h_addressable;
+ state->optc[i].otg_vready_offset = 1;
+ state->optc[i].otg_vstartup_start = timing->v_addressable + 10;
+ state->optc[i].otg_vupdate_offset = 0;
+ state->optc[i].otg_vupdate_width = 5;
+ } else {
+ /* No timing generator resource - initialize all fields to 0 */
+ memset(&state->optc[i], 0, sizeof(state->optc[i]));
+ }
+ }
+
+ state->state_valid = true;
+ return true;
+}
+
+void dc_log_preos_dmcub_info(const struct dc *dc)
+{
+ dc_dmub_srv_log_preos_dmcub_info(dc->ctx->dmub_srv);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index ec4e80e5b6eb..e2763b60482a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -32,6 +32,13 @@
#include "resource.h"
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
+#include "opp.h"
+#include "dsc.h"
+#include "dchubbub.h"
+#include "dccg.h"
+#include "abm.h"
+#include "dcn10/dcn10_hubbub.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
#define MAX_NUM_MCACHE 8
@@ -258,7 +265,7 @@ void color_space_to_black_color(
black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED];
break;
- /**
+ /*
* Remove default and add case for all color space
* so when we forget to add new color space
* compiler will give a warning
@@ -755,11 +762,13 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = true;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = true;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required =
+ dc_state_is_fams2_in_use(dc, context) ||
+ dmub_hw_lock_mgr_does_link_require_lock(dc, stream->link);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
if (dc->hwss.pipe_control_lock) {
@@ -784,7 +793,7 @@ void hwss_build_fast_sequence(struct dc *dc,
while (current_mpc_pipe) {
if (current_mpc_pipe->plane_state) {
if (dc->hwss.set_flip_control_gsl && current_mpc_pipe->plane_state->update_flags.raw) {
- block_sequence[*num_steps].params.set_flip_control_gsl_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].params.set_flip_control_gsl_params.hubp = current_mpc_pipe->plane_res.hubp;
block_sequence[*num_steps].params.set_flip_control_gsl_params.flip_immediate = current_mpc_pipe->plane_state->flip_immediate;
block_sequence[*num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
(*num_steps)++;
@@ -894,11 +903,11 @@ void hwss_build_fast_sequence(struct dc *dc,
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
- if (dc->hwss.fams2_global_control_lock_fast) {
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.dc = dc;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.lock = false;
- block_sequence[*num_steps].params.fams2_global_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
- block_sequence[*num_steps].func = DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST;
+ if (dc->hwss.dmub_hw_control_lock_fast) {
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.dc = dc;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.lock = false;
+ block_sequence[*num_steps].params.dmub_hw_control_lock_fast_params.is_required = dc_state_is_fams2_in_use(dc, context);
+ block_sequence[*num_steps].func = DMUB_HW_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -911,6 +920,13 @@ void hwss_build_fast_sequence(struct dc *dc,
current_mpc_pipe->stream && current_mpc_pipe->plane_state &&
current_mpc_pipe->plane_state->update_flags.bits.addr_update &&
!current_mpc_pipe->plane_state->skip_manual_trigger) {
+ if (dc->hwss.program_cursor_offload_now) {
+ block_sequence[*num_steps].params.program_cursor_update_now_params.dc = dc;
+ block_sequence[*num_steps].params.program_cursor_update_now_params.pipe_ctx = current_mpc_pipe;
+ block_sequence[*num_steps].func = PROGRAM_CURSOR_UPDATE_NOW;
+ (*num_steps)++;
+ }
+
block_sequence[*num_steps].params.program_manual_trigger_params.pipe_ctx = current_mpc_pipe;
block_sequence[*num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
(*num_steps)++;
@@ -942,8 +958,9 @@ void hwss_execute_sequence(struct dc *dc,
params->pipe_control_lock_params.lock);
break;
case HUBP_SET_FLIP_CONTROL_GSL:
- dc->hwss.set_flip_control_gsl(params->set_flip_control_gsl_params.pipe_ctx,
- params->set_flip_control_gsl_params.flip_immediate);
+ params->set_flip_control_gsl_params.hubp->funcs->hubp_set_flip_control_surface_gsl(
+ params->set_flip_control_gsl_params.hubp,
+ params->set_flip_control_gsl_params.flip_immediate);
break;
case HUBP_PROGRAM_TRIPLEBUFFER:
dc->hwss.program_triplebuffer(params->program_triplebuffer_params.dc,
@@ -1001,8 +1018,301 @@ void hwss_execute_sequence(struct dc *dc,
params->wait_for_dcc_meta_propagation_params.dc,
params->wait_for_dcc_meta_propagation_params.top_pipe_to_program);
break;
- case DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST:
- dc->hwss.fams2_global_control_lock_fast(params);
+ case DMUB_HW_CONTROL_LOCK_FAST:
+ dc->hwss.dmub_hw_control_lock_fast(params);
+ break;
+ case HUBP_PROGRAM_SURFACE_CONFIG:
+ hwss_program_surface_config(params);
+ break;
+ case HUBP_PROGRAM_MCACHE_ID:
+ hwss_program_mcache_id_and_split_coordinate(params);
+ break;
+ case PROGRAM_CURSOR_UPDATE_NOW:
+ dc->hwss.program_cursor_offload_now(
+ params->program_cursor_update_now_params.dc,
+ params->program_cursor_update_now_params.pipe_ctx);
+ break;
+ case HUBP_WAIT_PIPE_READ_START:
+ params->hubp_wait_pipe_read_start_params.hubp->funcs->hubp_wait_pipe_read_start(
+ params->hubp_wait_pipe_read_start_params.hubp);
+ break;
+ case HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM:
+ dc->hwss.apply_update_flags_for_phantom(params->apply_update_flags_for_phantom_params.pipe_ctx);
+ break;
+ case HWS_UPDATE_PHANTOM_VP_POSITION:
+ dc->hwss.update_phantom_vp_position(params->update_phantom_vp_position_params.dc,
+ params->update_phantom_vp_position_params.context,
+ params->update_phantom_vp_position_params.pipe_ctx);
+ break;
+ case OPTC_SET_ODM_COMBINE:
+ hwss_set_odm_combine(params);
+ break;
+ case OPTC_SET_ODM_BYPASS:
+ hwss_set_odm_bypass(params);
+ break;
+ case OPP_PIPE_CLOCK_CONTROL:
+ hwss_opp_pipe_clock_control(params);
+ break;
+ case OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL:
+ hwss_opp_program_left_edge_extra_pixel(params);
+ break;
+ case DCCG_SET_DTO_DSCCLK:
+ hwss_dccg_set_dto_dscclk(params);
+ break;
+ case DSC_SET_CONFIG:
+ hwss_dsc_set_config(params);
+ break;
+ case DSC_ENABLE:
+ hwss_dsc_enable(params);
+ break;
+ case TG_SET_DSC_CONFIG:
+ hwss_tg_set_dsc_config(params);
+ break;
+ case DSC_DISCONNECT:
+ hwss_dsc_disconnect(params);
+ break;
+ case DSC_READ_STATE:
+ hwss_dsc_read_state(params);
+ break;
+ case DSC_CALCULATE_AND_SET_CONFIG:
+ hwss_dsc_calculate_and_set_config(params);
+ break;
+ case DSC_ENABLE_WITH_OPP:
+ hwss_dsc_enable_with_opp(params);
+ break;
+ case TG_PROGRAM_GLOBAL_SYNC:
+ hwss_tg_program_global_sync(params);
+ break;
+ case TG_WAIT_FOR_STATE:
+ hwss_tg_wait_for_state(params);
+ break;
+ case TG_SET_VTG_PARAMS:
+ hwss_tg_set_vtg_params(params);
+ break;
+ case TG_SETUP_VERTICAL_INTERRUPT2:
+ hwss_tg_setup_vertical_interrupt2(params);
+ break;
+ case DPP_SET_HDR_MULTIPLIER:
+ hwss_dpp_set_hdr_multiplier(params);
+ break;
+ case HUBP_PROGRAM_DET_SIZE:
+ hwss_program_det_size(params);
+ break;
+ case HUBP_PROGRAM_DET_SEGMENTS:
+ hwss_program_det_segments(params);
+ break;
+ case OPP_SET_DYN_EXPANSION:
+ hwss_opp_set_dyn_expansion(params);
+ break;
+ case OPP_PROGRAM_FMT:
+ hwss_opp_program_fmt(params);
+ break;
+ case OPP_PROGRAM_BIT_DEPTH_REDUCTION:
+ hwss_opp_program_bit_depth_reduction(params);
+ break;
+ case OPP_SET_DISP_PATTERN_GENERATOR:
+ hwss_opp_set_disp_pattern_generator(params);
+ break;
+ case ABM_SET_PIPE:
+ hwss_set_abm_pipe(params);
+ break;
+ case ABM_SET_LEVEL:
+ hwss_set_abm_level(params);
+ break;
+ case ABM_SET_IMMEDIATE_DISABLE:
+ hwss_set_abm_immediate_disable(params);
+ break;
+ case MPC_REMOVE_MPCC:
+ hwss_mpc_remove_mpcc(params);
+ break;
+ case OPP_SET_MPCC_DISCONNECT_PENDING:
+ hwss_opp_set_mpcc_disconnect_pending(params);
+ break;
+ case DC_SET_OPTIMIZED_REQUIRED:
+ hwss_dc_set_optimized_required(params);
+ break;
+ case HUBP_DISCONNECT:
+ hwss_hubp_disconnect(params);
+ break;
+ case HUBBUB_FORCE_PSTATE_CHANGE_CONTROL:
+ hwss_hubbub_force_pstate_change_control(params);
+ break;
+ case TG_ENABLE_CRTC:
+ hwss_tg_enable_crtc(params);
+ break;
+ case TG_SET_GSL:
+ hwss_tg_set_gsl(params);
+ break;
+ case TG_SET_GSL_SOURCE_SELECT:
+ hwss_tg_set_gsl_source_select(params);
+ break;
+ case HUBP_WAIT_FLIP_PENDING:
+ hwss_hubp_wait_flip_pending(params);
+ break;
+ case TG_WAIT_DOUBLE_BUFFER_PENDING:
+ hwss_tg_wait_double_buffer_pending(params);
+ break;
+ case UPDATE_FORCE_PSTATE:
+ hwss_update_force_pstate(params);
+ break;
+ case HUBBUB_APPLY_DEDCN21_147_WA:
+ hwss_hubbub_apply_dedcn21_147_wa(params);
+ break;
+ case HUBBUB_ALLOW_SELF_REFRESH_CONTROL:
+ hwss_hubbub_allow_self_refresh_control(params);
+ break;
+ case TG_GET_FRAME_COUNT:
+ hwss_tg_get_frame_count(params);
+ break;
+ case MPC_SET_DWB_MUX:
+ hwss_mpc_set_dwb_mux(params);
+ break;
+ case MPC_DISABLE_DWB_MUX:
+ hwss_mpc_disable_dwb_mux(params);
+ break;
+ case MCIF_WB_CONFIG_BUF:
+ hwss_mcif_wb_config_buf(params);
+ break;
+ case MCIF_WB_CONFIG_ARB:
+ hwss_mcif_wb_config_arb(params);
+ break;
+ case MCIF_WB_ENABLE:
+ hwss_mcif_wb_enable(params);
+ break;
+ case MCIF_WB_DISABLE:
+ hwss_mcif_wb_disable(params);
+ break;
+ case DWBC_ENABLE:
+ hwss_dwbc_enable(params);
+ break;
+ case DWBC_DISABLE:
+ hwss_dwbc_disable(params);
+ break;
+ case DWBC_UPDATE:
+ hwss_dwbc_update(params);
+ break;
+ case HUBP_UPDATE_MALL_SEL:
+ hwss_hubp_update_mall_sel(params);
+ break;
+ case HUBP_PREPARE_SUBVP_BUFFERING:
+ hwss_hubp_prepare_subvp_buffering(params);
+ break;
+ case HUBP_SET_BLANK_EN:
+ hwss_hubp_set_blank_en(params);
+ break;
+ case HUBP_DISABLE_CONTROL:
+ hwss_hubp_disable_control(params);
+ break;
+ case HUBBUB_SOFT_RESET:
+ hwss_hubbub_soft_reset(params);
+ break;
+ case HUBP_CLK_CNTL:
+ hwss_hubp_clk_cntl(params);
+ break;
+ case HUBP_INIT:
+ hwss_hubp_init(params);
+ break;
+ case HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS:
+ hwss_hubp_set_vm_system_aperture_settings(params);
+ break;
+ case HUBP_SET_FLIP_INT:
+ hwss_hubp_set_flip_int(params);
+ break;
+ case DPP_DPPCLK_CONTROL:
+ hwss_dpp_dppclk_control(params);
+ break;
+ case DISABLE_PHANTOM_CRTC:
+ hwss_disable_phantom_crtc(params);
+ break;
+ case DSC_PG_STATUS:
+ hwss_dsc_pg_status(params);
+ break;
+ case DSC_WAIT_DISCONNECT_PENDING_CLEAR:
+ hwss_dsc_wait_disconnect_pending_clear(params);
+ break;
+ case DSC_DISABLE:
+ hwss_dsc_disable(params);
+ break;
+ case DCCG_SET_REF_DSCCLK:
+ hwss_dccg_set_ref_dscclk(params);
+ break;
+ case DPP_PG_CONTROL:
+ hwss_dpp_pg_control(params);
+ break;
+ case HUBP_PG_CONTROL:
+ hwss_hubp_pg_control(params);
+ break;
+ case HUBP_RESET:
+ hwss_hubp_reset(params);
+ break;
+ case DPP_RESET:
+ hwss_dpp_reset(params);
+ break;
+ case DPP_ROOT_CLOCK_CONTROL:
+ hwss_dpp_root_clock_control(params);
+ break;
+ case DC_IP_REQUEST_CNTL:
+ hwss_dc_ip_request_cntl(params);
+ break;
+ case DCCG_UPDATE_DPP_DTO:
+ hwss_dccg_update_dpp_dto(params);
+ break;
+ case HUBP_VTG_SEL:
+ hwss_hubp_vtg_sel(params);
+ break;
+ case HUBP_SETUP2:
+ hwss_hubp_setup2(params);
+ break;
+ case HUBP_SETUP:
+ hwss_hubp_setup(params);
+ break;
+ case HUBP_SET_UNBOUNDED_REQUESTING:
+ hwss_hubp_set_unbounded_requesting(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT2:
+ hwss_hubp_setup_interdependent2(params);
+ break;
+ case HUBP_SETUP_INTERDEPENDENT:
+ hwss_hubp_setup_interdependent(params);
+ break;
+ case DPP_SET_CURSOR_MATRIX:
+ hwss_dpp_set_cursor_matrix(params);
+ break;
+ case MPC_UPDATE_BLENDING:
+ hwss_mpc_update_blending(params);
+ break;
+ case MPC_ASSERT_IDLE_MPCC:
+ hwss_mpc_assert_idle_mpcc(params);
+ break;
+ case MPC_INSERT_PLANE:
+ hwss_mpc_insert_plane(params);
+ break;
+ case DPP_SET_SCALER:
+ hwss_dpp_set_scaler(params);
+ break;
+ case HUBP_MEM_PROGRAM_VIEWPORT:
+ hwss_hubp_mem_program_viewport(params);
+ break;
+ case ABORT_CURSOR_OFFLOAD_UPDATE:
+ hwss_abort_cursor_offload_update(params);
+ break;
+ case SET_CURSOR_ATTRIBUTE:
+ hwss_set_cursor_attribute(params);
+ break;
+ case SET_CURSOR_POSITION:
+ hwss_set_cursor_position(params);
+ break;
+ case SET_CURSOR_SDR_WHITE_LEVEL:
+ hwss_set_cursor_sdr_white_level(params);
+ break;
+ case PROGRAM_OUTPUT_CSC:
+ hwss_program_output_csc(params);
+ break;
+ case HUBP_SET_BLANK:
+ hwss_hubp_set_blank(params);
+ break;
+ case PHANTOM_HUBP_POST_ENABLE:
+ hwss_phantom_hubp_post_enable(params);
break;
default:
ASSERT(false);
@@ -1011,6 +1321,338 @@ void hwss_execute_sequence(struct dc *dc,
}
}
+/*
+ * Helper function to add OPTC pipe control lock to block sequence
+ */
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool lock)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.pipe_control_lock_params.lock = lock;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PIPE_CONTROL_LOCK;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP set flip control GSL to block sequence
+ */
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool flip_immediate)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.set_flip_control_gsl_params.flip_immediate = flip_immediate;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_CONTROL_GSL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program triplebuffer to block sequence
+ */
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enableTripleBuffer)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_triplebuffer_params.enableTripleBuffer = enableTripleBuffer;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_TRIPLEBUFFER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP update plane address to block sequence
+ */
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_plane_addr_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_PLANE_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set input transfer function to block sequence
+ */
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_input_transfer_func_params.plane_state = plane_state;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_INPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP program gamut remap to block sequence
+ */
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_gamut_remap_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_GAMUT_REMAP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP program bias and scale to block sequence
+ */
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_bias_and_scale_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = DPP_PROGRAM_BIAS_AND_SCALE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC program manual trigger to block sequence
+ */
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_manual_trigger_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_PROGRAM_MANUAL_TRIGGER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set output transfer function to block sequence
+ */
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.set_output_transfer_func_params.stream = stream;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_OUTPUT_TRANSFER_FUNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC update visual confirm to block sequence
+ */
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.update_visual_confirm_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_VISUAL_CONFIRM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC power on MPC mem PWR to block sequence
+ */
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.mpcc_id = mpcc_id;
+ seq_state->steps[*seq_state->num_steps].params.power_on_mpc_mem_pwr_params.power_on = power_on;
+ seq_state->steps[*seq_state->num_steps].func = MPC_POWER_ON_MPC_MEM_PWR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC set output CSC to block sequence
+ */
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ const uint16_t *regval,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.regval = regval;
+ seq_state->steps[*seq_state->num_steps].params.set_output_csc_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OUTPUT_CSC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC set OCSC default to block sequence
+ */
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int opp_id,
+ enum dc_color_space colorspace,
+ enum mpc_output_csc_mode ocsc_mode)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.opp_id = opp_id;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.color_space = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.set_ocsc_default_params.ocsc_mode = ocsc_mode;
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_OCSC_DEFAULT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DMUB send DMCUB command to block sequence
+ */
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx,
+ union dmub_rb_cmd *cmd,
+ enum dm_dmub_wait_type wait_type)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.ctx = ctx;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.cmd = cmd;
+ seq_state->steps[*seq_state->num_steps].params.send_dmcub_cmd_params.wait_type = wait_type;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SEND_DMCUB_CMD;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DMUB SubVP save surface address to block sequence
+ */
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct dc_plane_address *addr,
+ uint8_t subvp_index)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc_dmub_srv;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.addr = addr;
+ seq_state->steps[*seq_state->num_steps].params.subvp_save_surf_addr.subvp_index = subvp_index;
+ seq_state->steps[*seq_state->num_steps].func = DMUB_SUBVP_SAVE_SURF_ADDR;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait for DCC meta propagation to block sequence
+ */
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *top_pipe_to_program)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.wait_for_dcc_meta_propagation_params.top_pipe_to_program = top_pipe_to_program;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FOR_DCC_META_PROP;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait pipe read start to block sequence
+ */
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_pipe_read_start_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_PIPE_READ_START;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HWS apply update flags for phantom to block sequence
+ */
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.apply_update_flags_for_phantom_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HWS update phantom VP position to block sequence
+ */
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.context = context;
+ seq_state->steps[*seq_state->num_steps].params.update_phantom_vp_position_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = HWS_UPDATE_PHANTOM_VP_POSITION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC set ODM combine to block sequence
+ */
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.tg = tg;
+ memcpy(seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_inst, opp_inst, sizeof(int) * MAX_PIPES);
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.opp_head_count = opp_head_count;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.odm_slice_width = odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_combine_params.last_odm_slice_width = last_odm_slice_width;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_COMBINE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPTC set ODM bypass to block sequence
+ */
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.set_odm_bypass_params.timing = timing;
+ seq_state->steps[*seq_state->num_steps].func = OPTC_SET_ODM_BYPASS;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_send_dmcub_cmd(union block_sequence_params *params)
{
struct dc_context *ctx = params->send_dmcub_cmd_params.ctx;
@@ -1020,6 +1662,276 @@ void hwss_send_dmcub_cmd(union block_sequence_params *params)
dc_wake_and_execute_dmub_cmd(ctx, cmd, wait_type);
}
+/*
+ * Helper function to add TG program global sync to block sequence
+ */
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vready_offset = vready_offset;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vstartup_lines = vstartup_lines;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_offset_pixels = vupdate_offset_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.vupdate_vupdate_width_pixels = vupdate_vupdate_width_pixels;
+ seq_state->steps[*seq_state->num_steps].params.tg_program_global_sync_params.pstate_keepout_start_lines = pstate_keepout_start_lines;
+ seq_state->steps[*seq_state->num_steps].func = TG_PROGRAM_GLOBAL_SYNC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG wait for state to block sequence
+ */
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ enum crtc_state state)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_for_state_params.state = state;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_FOR_STATE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG set VTG params to block sequence
+ */
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dc_crtc_timing *dc_crtc_timing,
+ bool program_fp2)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.timing = dc_crtc_timing;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_vtg_params_params.program_fp2 = program_fp2;
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_VTG_PARAMS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG setup vertical interrupt2 to block sequence
+ */
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_setup_vertical_interrupt2_params.start_line = start_line;
+ seq_state->steps[*seq_state->num_steps].func = TG_SETUP_VERTICAL_INTERRUPT2;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add DPP set HDR multiplier to block sequence
+ */
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_hdr_multiplier_params.hw_mult = hw_mult;
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_HDR_MULTIPLIER;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program DET size to block sequence
+ */
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_buffer_size_kb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_size_params.det_buffer_size_kb = det_buffer_size_kb;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SIZE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_mcache_id_and_split_coordinate.mcache_regs = mcache_regs;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_MCACHE_ID;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool enable,
+ bool wait)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_force_pstate_change_control_params.wait = wait;
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_FORCE_PSTATE_CHANGE_CONTROL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP program DET segments to block sequence
+ */
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ unsigned int hubp_inst,
+ unsigned int det_size)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.program_det_segments_params.det_size = det_size;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_DET_SEGMENTS;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPP set dynamic expansion to block sequence
+ */
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_color_space color_space,
+ enum dc_color_depth color_depth,
+ enum signal_type signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_dyn_expansion_params.signal = signal;
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DYN_EXPANSION;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add OPP program FMT to block sequence
+ */
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.fmt_bit_depth = fmt_bit_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_fmt_params.clamping = clamping;
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_FMT;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.pixel_encoding = pixel_encoding;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_left_edge_extra_pixel_params.is_otg_master = is_otg_master;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add ABM set pipe to block sequence
+ */
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_pipe_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_PIPE;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add ABM set level to block sequence
+ */
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm,
+ uint32_t abm_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm = abm;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_level_params.abm_level = abm_level;
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_LEVEL;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG enable CRTC to block sequence
+ */
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_enable_crtc_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].func = TG_ENABLE_CRTC;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP wait flip pending to block sequence
+ */
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.hubp_wait_flip_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = HUBP_WAIT_FLIP_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add TG wait double buffer pending to block sequence
+ */
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int timeout_us,
+ unsigned int polling_interval_us)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.timeout_us = timeout_us;
+ seq_state->steps[*seq_state->num_steps].params.tg_wait_double_buffer_pending_params.polling_interval_us = polling_interval_us;
+ seq_state->steps[*seq_state->num_steps].func = TG_WAIT_DOUBLE_BUFFER_PENDING;
+ (*seq_state->num_steps)++;
+ }
+}
+
void hwss_program_manual_trigger(union block_sequence_params *params)
{
struct pipe_ctx *pipe_ctx = params->program_manual_trigger_params.pipe_ctx;
@@ -1046,12 +1958,6 @@ void hwss_setup_dpp(union block_sequence_params *params)
plane_state->color_space,
NULL);
}
-
- if (dpp && dpp->funcs->set_cursor_matrix) {
- dpp->funcs->set_cursor_matrix(dpp,
- plane_state->color_space,
- plane_state->cursor_csc_color_matrix);
- }
}
void hwss_program_bias_and_scale(union block_sequence_params *params)
@@ -1062,9 +1968,8 @@ void hwss_program_bias_and_scale(union block_sequence_params *params)
struct dc_bias_and_scale bns_params = plane_state->bias_and_scale;
//TODO :for CNVC set scale and bias registers if necessary
- if (dpp->funcs->dpp_program_bias_and_scale) {
+ if (dpp->funcs->dpp_program_bias_and_scale)
dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
- }
}
void hwss_power_on_mpc_mem_pwr(union block_sequence_params *params)
@@ -1114,6 +2019,39 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
}
+void hwss_program_surface_config(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_surface_config_params.hubp;
+ enum surface_pixel_format format = params->program_surface_config_params.format;
+ struct dc_tiling_info *tiling_info = params->program_surface_config_params.tiling_info;
+ struct plane_size size = params->program_surface_config_params.plane_size;
+ enum dc_rotation_angle rotation = params->program_surface_config_params.rotation;
+ struct dc_plane_dcc_param *dcc = params->program_surface_config_params.dcc;
+ bool horizontal_mirror = params->program_surface_config_params.horizontal_mirror;
+ int compat_level = params->program_surface_config_params.compat_level;
+
+ hubp->funcs->hubp_program_surface_config(
+ hubp,
+ format,
+ tiling_info,
+ &size,
+ rotation,
+ dcc,
+ horizontal_mirror,
+ compat_level);
+
+ hubp->power_gated = false;
+}
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->program_mcache_id_and_split_coordinate.hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs = params->program_mcache_id_and_split_coordinate.mcache_regs;
+
+ hubp->funcs->hubp_program_mcache_id_and_split_coordinate(hubp, mcache_regs);
+
+}
+
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
@@ -1177,6 +2115,8 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
tg = otg_master->stream_res.tg;
if (tg->funcs->wait_odm_doublebuffer_pending_clear)
tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ if (tg->funcs->wait_otg_disable)
+ tg->funcs->wait_otg_disable(tg);
}
/* ODM update may require to reprogram blank pattern for each OPP */
@@ -1186,6 +2126,7 @@ void hwss_wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *c
void hwss_wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
+
for (i = 0; i < MAX_PIPES; i++) {
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1262,3 +2203,1869 @@ void hwss_process_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_cont
if (dc->hwss.program_outstanding_updates)
dc->hwss.program_outstanding_updates(dc, dc_context);
}
+
+void hwss_set_odm_combine(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_combine_params.tg;
+ int *opp_inst = params->set_odm_combine_params.opp_inst;
+ int opp_head_count = params->set_odm_combine_params.opp_head_count;
+ int odm_slice_width = params->set_odm_combine_params.odm_slice_width;
+ int last_odm_slice_width = params->set_odm_combine_params.last_odm_slice_width;
+
+ if (tg && tg->funcs->set_odm_combine)
+ tg->funcs->set_odm_combine(tg, opp_inst, opp_head_count,
+ odm_slice_width, last_odm_slice_width);
+}
+
+void hwss_set_odm_bypass(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->set_odm_bypass_params.tg;
+ const struct dc_crtc_timing *timing = params->set_odm_bypass_params.timing;
+
+ if (tg && tg->funcs->set_odm_bypass)
+ tg->funcs->set_odm_bypass(tg, timing);
+}
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_pipe_clock_control_params.opp;
+ bool enable = params->opp_pipe_clock_control_params.enable;
+
+ if (opp && opp->funcs->opp_pipe_clock_control)
+ opp->funcs->opp_pipe_clock_control(opp, enable);
+}
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_left_edge_extra_pixel_params.opp;
+ enum dc_pixel_encoding pixel_encoding = params->opp_program_left_edge_extra_pixel_params.pixel_encoding;
+ bool is_otg_master = params->opp_program_left_edge_extra_pixel_params.is_otg_master;
+
+ if (opp && opp->funcs->opp_program_left_edge_extra_pixel)
+ opp->funcs->opp_program_left_edge_extra_pixel(opp, pixel_encoding, is_otg_master);
+}
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_dto_dscclk_params.dccg;
+ int inst = params->dccg_set_dto_dscclk_params.inst;
+ int num_slices_h = params->dccg_set_dto_dscclk_params.num_slices_h;
+
+ if (dccg && dccg->funcs->set_dto_dscclk)
+ dccg->funcs->set_dto_dscclk(dccg, inst, num_slices_h);
+}
+
+void hwss_dsc_set_config(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_set_config_params.dsc;
+ struct dsc_config *dsc_cfg = params->dsc_set_config_params.dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg = params->dsc_set_config_params.dsc_optc_cfg;
+
+ if (dsc && dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, dsc_cfg, dsc_optc_cfg);
+}
+
+void hwss_dsc_enable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_enable_params.dsc;
+ int opp_inst = params->dsc_enable_params.opp_inst;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, opp_inst);
+}
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_dsc_config_params.tg;
+ enum optc_dsc_mode optc_dsc_mode = OPTC_DSC_DISABLED;
+ uint32_t bytes_per_pixel = 0;
+ uint32_t slice_width = 0;
+
+ if (params->tg_set_dsc_config_params.enable) {
+ struct dsc_optc_config *dsc_optc_cfg = params->tg_set_dsc_config_params.dsc_optc_cfg;
+
+ if (dsc_optc_cfg) {
+ bytes_per_pixel = dsc_optc_cfg->bytes_per_pixel;
+ slice_width = dsc_optc_cfg->slice_width;
+ optc_dsc_mode = dsc_optc_cfg->is_pixel_format_444 ?
+ OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+ }
+ }
+
+ if (tg && tg->funcs->set_dsc_config)
+ tg->funcs->set_dsc_config(tg, optc_dsc_mode, bytes_per_pixel, slice_width);
+}
+
+void hwss_dsc_disconnect(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disconnect_params.dsc;
+
+ if (dsc && dsc->funcs->dsc_disconnect)
+ dsc->funcs->dsc_disconnect(dsc);
+}
+
+void hwss_dsc_read_state(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_read_state_params.dsc;
+ struct dcn_dsc_state *dsc_state = params->dsc_read_state_params.dsc_state;
+
+ if (dsc && dsc->funcs->dsc_read_state)
+ dsc->funcs->dsc_read_state(dsc, dsc_state);
+}
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_calculate_and_set_config_params.pipe_ctx;
+ struct pipe_ctx *top_pipe = pipe_ctx;
+ bool enable = params->dsc_calculate_and_set_config_params.enable;
+ int opp_cnt = params->dsc_calculate_and_set_config_params.opp_cnt;
+
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ if (!dsc || !enable)
+ return;
+
+ /* Calculate DSC configuration - extracted from dcn32_update_dsc_on_stream */
+ struct dsc_config dsc_cfg;
+
+ while (top_pipe->prev_odm_pipe)
+ top_pipe = top_pipe->prev_odm_pipe;
+
+ dsc_cfg.pic_width = (stream->timing.h_addressable + top_pipe->dsc_padding_params.dsc_hactive_padding +
+ stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = top_pipe->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = top_pipe->dsc_padding_params.dsc_hactive_padding;
+
+ /* Set DSC configuration */
+ if (dsc->funcs->dsc_set_config)
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg,
+ &params->dsc_calculate_and_set_config_params.dsc_optc_cfg);
+}
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params)
+{
+ struct pipe_ctx *pipe_ctx = params->dsc_enable_with_opp_params.pipe_ctx;
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+ if (dsc && dsc->funcs->dsc_enable)
+ dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+}
+
+void hwss_tg_program_global_sync(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_program_global_sync_params.tg;
+ int vready_offset = params->tg_program_global_sync_params.vready_offset;
+ unsigned int vstartup_lines = params->tg_program_global_sync_params.vstartup_lines;
+ unsigned int vupdate_offset_pixels = params->tg_program_global_sync_params.vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels = params->tg_program_global_sync_params.vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines = params->tg_program_global_sync_params.pstate_keepout_start_lines;
+
+ if (tg->funcs->program_global_sync) {
+ tg->funcs->program_global_sync(tg, vready_offset, vstartup_lines,
+ vupdate_offset_pixels, vupdate_vupdate_width_pixels, pstate_keepout_start_lines);
+ }
+}
+
+void hwss_tg_wait_for_state(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_for_state_params.tg;
+ enum crtc_state state = params->tg_wait_for_state_params.state;
+
+ if (tg->funcs->wait_for_state)
+ tg->funcs->wait_for_state(tg, state);
+}
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_vtg_params_params.tg;
+ struct dc_crtc_timing *timing = params->tg_set_vtg_params_params.timing;
+ bool program_fp2 = params->tg_set_vtg_params_params.program_fp2;
+
+ if (tg->funcs->set_vtg_params)
+ tg->funcs->set_vtg_params(tg, timing, program_fp2);
+}
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_setup_vertical_interrupt2_params.tg;
+ int start_line = params->tg_setup_vertical_interrupt2_params.start_line;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ tg->funcs->setup_vertical_interrupt2(tg, start_line);
+}
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_hdr_multiplier_params.dpp;
+ uint32_t hw_mult = params->dpp_set_hdr_multiplier_params.hw_mult;
+
+ if (dpp->funcs->dpp_set_hdr_multiplier)
+ dpp->funcs->dpp_set_hdr_multiplier(dpp, hw_mult);
+}
+
+void hwss_program_det_size(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_size_params.hubbub;
+ unsigned int hubp_inst = params->program_det_size_params.hubp_inst;
+ unsigned int det_buffer_size_kb = params->program_det_size_params.det_buffer_size_kb;
+
+ if (hubbub->funcs->program_det_size)
+ hubbub->funcs->program_det_size(hubbub, hubp_inst, det_buffer_size_kb);
+}
+
+void hwss_program_det_segments(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->program_det_segments_params.hubbub;
+ unsigned int hubp_inst = params->program_det_segments_params.hubp_inst;
+ unsigned int det_size = params->program_det_segments_params.det_size;
+
+ if (hubbub->funcs->program_det_segments)
+ hubbub->funcs->program_det_segments(hubbub, hubp_inst, det_size);
+}
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_dyn_expansion_params.opp;
+ enum dc_color_space color_space = params->opp_set_dyn_expansion_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_dyn_expansion_params.color_depth;
+ enum signal_type signal = params->opp_set_dyn_expansion_params.signal;
+
+ if (opp->funcs->opp_set_dyn_expansion)
+ opp->funcs->opp_set_dyn_expansion(opp, color_space, color_depth, signal);
+}
+
+void hwss_opp_program_fmt(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_fmt_params.opp;
+ struct bit_depth_reduction_params *fmt_bit_depth = params->opp_program_fmt_params.fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping = params->opp_program_fmt_params.clamping;
+
+ if (opp->funcs->opp_program_fmt)
+ opp->funcs->opp_program_fmt(opp, fmt_bit_depth, clamping);
+}
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_program_bit_depth_reduction_params.opp;
+ bool use_default_params = params->opp_program_bit_depth_reduction_params.use_default_params;
+ struct pipe_ctx *pipe_ctx = params->opp_program_bit_depth_reduction_params.pipe_ctx;
+ struct bit_depth_reduction_params bit_depth_params;
+
+ if (use_default_params)
+ memset(&bit_depth_params, 0, sizeof(bit_depth_params));
+ else
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &bit_depth_params);
+
+ if (opp->funcs->opp_program_bit_depth_reduction)
+ opp->funcs->opp_program_bit_depth_reduction(opp, &bit_depth_params);
+}
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_disp_pattern_generator_params.opp;
+ enum controller_dp_test_pattern test_pattern = params->opp_set_disp_pattern_generator_params.test_pattern;
+ enum controller_dp_color_space color_space = params->opp_set_disp_pattern_generator_params.color_space;
+ enum dc_color_depth color_depth = params->opp_set_disp_pattern_generator_params.color_depth;
+ struct tg_color *solid_color = params->opp_set_disp_pattern_generator_params.use_solid_color ?
+ &params->opp_set_disp_pattern_generator_params.solid_color : NULL;
+ int width = params->opp_set_disp_pattern_generator_params.width;
+ int height = params->opp_set_disp_pattern_generator_params.height;
+ int offset = params->opp_set_disp_pattern_generator_params.offset;
+
+ if (opp && opp->funcs->opp_set_disp_pattern_generator) {
+ opp->funcs->opp_set_disp_pattern_generator(opp, test_pattern, color_space,
+ color_depth, solid_color, width, height, offset);
+ }
+}
+
+void hwss_set_abm_pipe(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_pipe_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_pipe_params.pipe_ctx;
+
+ dc->hwss.set_pipe(pipe_ctx);
+}
+
+void hwss_set_abm_level(union block_sequence_params *params)
+{
+ struct abm *abm = params->set_abm_level_params.abm;
+ unsigned int abm_level = params->set_abm_level_params.abm_level;
+
+ if (abm->funcs->set_abm_level)
+ abm->funcs->set_abm_level(abm, abm_level);
+}
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_abm_immediate_disable_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_abm_immediate_disable_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_abm_immediate_disable)
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+}
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_remove_mpcc_params.mpc;
+ struct mpc_tree *mpc_tree_params = params->mpc_remove_mpcc_params.mpc_tree_params;
+ struct mpcc *mpcc_to_remove = params->mpc_remove_mpcc_params.mpcc_to_remove;
+
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+}
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params)
+{
+ struct output_pixel_processor *opp = params->opp_set_mpcc_disconnect_pending_params.opp;
+ int mpcc_inst = params->opp_set_mpcc_disconnect_pending_params.mpcc_inst;
+ bool pending = params->opp_set_mpcc_disconnect_pending_params.pending;
+
+ opp->mpcc_disconnect_pending[mpcc_inst] = pending;
+}
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_set_optimized_required_params.dc;
+ bool optimized_required = params->dc_set_optimized_required_params.optimized_required;
+
+ dc->optimized_required = optimized_required;
+}
+
+void hwss_hubp_disconnect(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disconnect_params.hubp;
+
+ if (hubp->funcs->hubp_disconnect)
+ hubp->funcs->hubp_disconnect(hubp);
+}
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_force_pstate_change_control_params.hubbub;
+ bool enable = params->hubbub_force_pstate_change_control_params.enable;
+ bool wait = params->hubbub_force_pstate_change_control_params.wait;
+
+ if (hubbub->funcs->force_pstate_change_control) {
+ hubbub->funcs->force_pstate_change_control(hubbub, enable, wait);
+ /* Add delay when enabling pstate change control */
+ if (enable)
+ udelay(500);
+ }
+}
+
+void hwss_tg_enable_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_enable_crtc_params.tg;
+
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+}
+
+void hwss_tg_set_gsl(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_params.tg;
+ struct gsl_params *gsl = &params->tg_set_gsl_params.gsl;
+
+ if (tg->funcs->set_gsl)
+ tg->funcs->set_gsl(tg, gsl);
+}
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_set_gsl_source_select_params.tg;
+ int group_idx = params->tg_set_gsl_source_select_params.group_idx;
+ uint32_t gsl_ready_signal = params->tg_set_gsl_source_select_params.gsl_ready_signal;
+
+ if (tg->funcs->set_gsl_source_select)
+ tg->funcs->set_gsl_source_select(tg, group_idx, gsl_ready_signal);
+}
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_wait_flip_pending_params.hubp;
+ unsigned int timeout_us = params->hubp_wait_flip_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->hubp_wait_flip_pending_params.polling_interval_us;
+ int j = 0;
+
+ for (j = 0; j < timeout_us / polling_interval_us
+ && hubp->funcs->hubp_is_flip_pending(hubp); j++)
+ udelay(polling_interval_us);
+}
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_wait_double_buffer_pending_params.tg;
+ unsigned int timeout_us = params->tg_wait_double_buffer_pending_params.timeout_us;
+ unsigned int polling_interval_us = params->tg_wait_double_buffer_pending_params.polling_interval_us;
+ int j = 0;
+
+ if (tg->funcs->get_optc_double_buffer_pending) {
+ for (j = 0; j < timeout_us / polling_interval_us
+ && tg->funcs->get_optc_double_buffer_pending(tg); j++)
+ udelay(polling_interval_us);
+ }
+}
+
+void hwss_update_force_pstate(union block_sequence_params *params)
+{
+ struct dc *dc = params->update_force_pstate_params.dc;
+ struct dc_state *context = params->update_force_pstate_params.context;
+ struct dce_hwseq *hwseq = dc->hwseq;
+
+ if (hwseq->funcs.update_force_pstate)
+ hwseq->funcs.update_force_pstate(dc, context);
+}
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_apply_dedcn21_147_wa_params.hubbub;
+
+ hubbub->funcs->apply_DEDCN21_147_wa(hubbub);
+}
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_allow_self_refresh_control_params.hubbub;
+ bool allow = params->hubbub_allow_self_refresh_control_params.allow;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, allow);
+
+ if (!allow && params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied)
+ *params->hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = true;
+}
+
+void hwss_tg_get_frame_count(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->tg_get_frame_count_params.tg;
+ unsigned int *frame_count = params->tg_get_frame_count_params.frame_count;
+
+ *frame_count = tg->funcs->get_frame_count(tg);
+}
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_set_dwb_mux_params.mpc;
+ int dwb_id = params->mpc_set_dwb_mux_params.dwb_id;
+ int mpcc_id = params->mpc_set_dwb_mux_params.mpcc_id;
+
+ if (mpc->funcs->set_dwb_mux)
+ mpc->funcs->set_dwb_mux(mpc, dwb_id, mpcc_id);
+}
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_disable_dwb_mux_params.mpc;
+ unsigned int dwb_id = params->mpc_disable_dwb_mux_params.dwb_id;
+
+ if (mpc->funcs->disable_dwb_mux)
+ mpc->funcs->disable_dwb_mux(mpc, dwb_id);
+}
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_buf_params.mcif_wb;
+ struct mcif_buf_params *mcif_buf_params = params->mcif_wb_config_buf_params.mcif_buf_params;
+ unsigned int dest_height = params->mcif_wb_config_buf_params.dest_height;
+
+ if (mcif_wb->funcs->config_mcif_buf)
+ mcif_wb->funcs->config_mcif_buf(mcif_wb, mcif_buf_params, dest_height);
+}
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_config_arb_params.mcif_wb;
+ struct mcif_arb_params *mcif_arb_params = params->mcif_wb_config_arb_params.mcif_arb_params;
+
+ if (mcif_wb->funcs->config_mcif_arb)
+ mcif_wb->funcs->config_mcif_arb(mcif_wb, mcif_arb_params);
+}
+
+void hwss_mcif_wb_enable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_enable_params.mcif_wb;
+
+ if (mcif_wb->funcs->enable_mcif)
+ mcif_wb->funcs->enable_mcif(mcif_wb);
+}
+
+void hwss_mcif_wb_disable(union block_sequence_params *params)
+{
+ struct mcif_wb *mcif_wb = params->mcif_wb_disable_params.mcif_wb;
+
+ if (mcif_wb->funcs->disable_mcif)
+ mcif_wb->funcs->disable_mcif(mcif_wb);
+}
+
+void hwss_dwbc_enable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_enable_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_enable_params.dwb_params;
+
+ if (dwb->funcs->enable)
+ dwb->funcs->enable(dwb, dwb_params);
+}
+
+void hwss_dwbc_disable(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_disable_params.dwb;
+
+ if (dwb->funcs->disable)
+ dwb->funcs->disable(dwb);
+}
+
+void hwss_dwbc_update(union block_sequence_params *params)
+{
+ struct dwbc *dwb = params->dwbc_update_params.dwb;
+ struct dc_dwb_params *dwb_params = params->dwbc_update_params.dwb_params;
+
+ if (dwb->funcs->update)
+ dwb->funcs->update(dwb, dwb_params);
+}
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_update_mall_sel_params.hubp;
+ uint32_t mall_sel = params->hubp_update_mall_sel_params.mall_sel;
+ bool cache_cursor = params->hubp_update_mall_sel_params.cache_cursor;
+
+ if (hubp && hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, mall_sel, cache_cursor);
+}
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_prepare_subvp_buffering_params.hubp;
+ bool enable = params->hubp_prepare_subvp_buffering_params.enable;
+
+ if (hubp && hubp->funcs->hubp_prepare_subvp_buffering)
+ hubp->funcs->hubp_prepare_subvp_buffering(hubp, enable);
+}
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_en_params.hubp;
+ bool enable = params->hubp_set_blank_en_params.enable;
+
+ if (hubp && hubp->funcs->set_hubp_blank_en)
+ hubp->funcs->set_hubp_blank_en(hubp, enable);
+}
+
+void hwss_hubp_disable_control(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_disable_control_params.hubp;
+ bool disable = params->hubp_disable_control_params.disable;
+
+ if (hubp && hubp->funcs->hubp_disable_control)
+ hubp->funcs->hubp_disable_control(hubp, disable);
+}
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params)
+{
+ struct hubbub *hubbub = params->hubbub_soft_reset_params.hubbub;
+ bool reset = params->hubbub_soft_reset_params.reset;
+
+ if (hubbub)
+ params->hubbub_soft_reset_params.hubbub_soft_reset(hubbub, reset);
+}
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_clk_cntl_params.hubp;
+ bool enable = params->hubp_clk_cntl_params.enable;
+
+ if (hubp && hubp->funcs->hubp_clk_cntl) {
+ hubp->funcs->hubp_clk_cntl(hubp, enable);
+ hubp->power_gated = !enable;
+ }
+}
+
+void hwss_hubp_init(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_init_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_init)
+ hubp->funcs->hubp_init(hubp);
+}
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_vm_system_aperture_settings_params.hubp;
+ struct vm_system_aperture_param apt;
+
+ apt.sys_default = params->hubp_set_vm_system_aperture_settings_params.sys_default;
+ apt.sys_high = params->hubp_set_vm_system_aperture_settings_params.sys_high;
+ apt.sys_low = params->hubp_set_vm_system_aperture_settings_params.sys_low;
+
+ if (hubp && hubp->funcs->hubp_set_vm_system_aperture_settings)
+ hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
+}
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_flip_int_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_set_flip_int)
+ hubp->funcs->hubp_set_flip_int(hubp);
+}
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_dppclk_control_params.dpp;
+ bool dppclk_div = params->dpp_dppclk_control_params.dppclk_div;
+ bool enable = params->dpp_dppclk_control_params.enable;
+
+ if (dpp && dpp->funcs->dpp_dppclk_control)
+ dpp->funcs->dpp_dppclk_control(dpp, dppclk_div, enable);
+}
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params)
+{
+ struct timing_generator *tg = params->disable_phantom_crtc_params.tg;
+
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+}
+
+void hwss_dsc_pg_status(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dsc_pg_status_params.hws;
+ int dsc_inst = params->dsc_pg_status_params.dsc_inst;
+
+ if (hws && hws->funcs.dsc_pg_status)
+ params->dsc_pg_status_params.is_ungated = hws->funcs.dsc_pg_status(hws, dsc_inst);
+}
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_wait_disconnect_pending_clear_params.dsc;
+
+ if (!params->dsc_wait_disconnect_pending_clear_params.is_ungated)
+ return;
+ if (*params->dsc_wait_disconnect_pending_clear_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_wait_disconnect_pending_clear)
+ dsc->funcs->dsc_wait_disconnect_pending_clear(dsc);
+}
+
+void hwss_dsc_disable(union block_sequence_params *params)
+{
+ struct display_stream_compressor *dsc = params->dsc_disable_params.dsc;
+
+ if (!params->dsc_disable_params.is_ungated)
+ return;
+ if (*params->dsc_disable_params.is_ungated == false)
+ return;
+
+ if (dsc && dsc->funcs->dsc_disable)
+ dsc->funcs->dsc_disable(dsc);
+}
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_set_ref_dscclk_params.dccg;
+ int dsc_inst = params->dccg_set_ref_dscclk_params.dsc_inst;
+
+ if (!params->dccg_set_ref_dscclk_params.is_ungated)
+ return;
+ if (*params->dccg_set_ref_dscclk_params.is_ungated == false)
+ return;
+
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ dccg->funcs->set_ref_dscclk(dccg, dsc_inst);
+}
+
+void hwss_dpp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_pg_control_params.hws;
+ unsigned int dpp_inst = params->dpp_pg_control_params.dpp_inst;
+ bool power_on = params->dpp_pg_control_params.power_on;
+
+ if (hws->funcs.dpp_pg_control)
+ hws->funcs.dpp_pg_control(hws, dpp_inst, power_on);
+}
+
+void hwss_hubp_pg_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->hubp_pg_control_params.hws;
+ unsigned int hubp_inst = params->hubp_pg_control_params.hubp_inst;
+ bool power_on = params->hubp_pg_control_params.power_on;
+
+ if (hws->funcs.hubp_pg_control)
+ hws->funcs.hubp_pg_control(hws, hubp_inst, power_on);
+}
+
+void hwss_hubp_reset(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_reset_params.hubp;
+
+ if (hubp && hubp->funcs->hubp_reset)
+ hubp->funcs->hubp_reset(hubp);
+}
+
+void hwss_dpp_reset(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_reset_params.dpp;
+
+ if (dpp && dpp->funcs->dpp_reset)
+ dpp->funcs->dpp_reset(dpp);
+}
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params)
+{
+ struct dce_hwseq *hws = params->dpp_root_clock_control_params.hws;
+ unsigned int dpp_inst = params->dpp_root_clock_control_params.dpp_inst;
+ bool clock_on = params->dpp_root_clock_control_params.clock_on;
+
+ if (hws->funcs.dpp_root_clock_control)
+ hws->funcs.dpp_root_clock_control(hws, dpp_inst, clock_on);
+}
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params)
+{
+ struct dc *dc = params->dc_ip_request_cntl_params.dc;
+ bool enable = params->dc_ip_request_cntl_params.enable;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.dc_ip_request_cntl)
+ hws->funcs.dc_ip_request_cntl(dc, enable);
+}
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params)
+{
+ struct dccg *dccg = params->dccg_update_dpp_dto_params.dccg;
+ int dpp_inst = params->dccg_update_dpp_dto_params.dpp_inst;
+ int dppclk_khz = params->dccg_update_dpp_dto_params.dppclk_khz;
+
+ if (dccg && dccg->funcs->update_dpp_dto)
+ dccg->funcs->update_dpp_dto(dccg, dpp_inst, dppclk_khz);
+}
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_vtg_sel_params.hubp;
+ uint32_t otg_inst = params->hubp_vtg_sel_params.otg_inst;
+
+ if (hubp && hubp->funcs->hubp_vtg_sel)
+ hubp->funcs->hubp_vtg_sel(hubp, otg_inst);
+}
+
+void hwss_hubp_setup2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup2_params.hubp_regs;
+ union dml2_global_sync_programming *global_sync = params->hubp_setup2_params.global_sync;
+ struct dc_crtc_timing *timing = params->hubp_setup2_params.timing;
+
+ if (hubp && hubp->funcs->hubp_setup2)
+ hubp->funcs->hubp_setup2(hubp, hubp_regs, global_sync, timing);
+}
+
+void hwss_hubp_setup(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_params.ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs = params->hubp_setup_params.rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest = params->hubp_setup_params.pipe_dest;
+
+ if (hubp && hubp->funcs->hubp_setup)
+ hubp->funcs->hubp_setup(hubp, dlg_regs, ttu_regs, rq_regs, pipe_dest);
+}
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_unbounded_requesting_params.hubp;
+ bool unbounded_req = params->hubp_set_unbounded_requesting_params.unbounded_req;
+
+ if (hubp && hubp->funcs->set_unbounded_requesting)
+ hubp->funcs->set_unbounded_requesting(hubp, unbounded_req);
+}
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent2_params.hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs = params->hubp_setup_interdependent2_params.hubp_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent2)
+ hubp->funcs->hubp_setup_interdependent2(hubp, hubp_regs);
+}
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_setup_interdependent_params.hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = params->hubp_setup_interdependent_params.dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs = params->hubp_setup_interdependent_params.ttu_regs;
+
+ if (hubp && hubp->funcs->hubp_setup_interdependent)
+ hubp->funcs->hubp_setup_interdependent(hubp, dlg_regs, ttu_regs);
+}
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_cursor_matrix_params.dpp;
+ enum dc_color_space color_space = params->dpp_set_cursor_matrix_params.color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix = params->dpp_set_cursor_matrix_params.cursor_csc_color_matrix;
+
+ if (dpp && dpp->funcs->set_cursor_matrix)
+ dpp->funcs->set_cursor_matrix(dpp, color_space, *cursor_csc_color_matrix);
+}
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params)
+{
+ struct dc *dc = params->mpc_update_mpcc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->mpc_update_mpcc_params.pipe_ctx;
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (hws->funcs.update_mpcc)
+ hws->funcs.update_mpcc(dc, pipe_ctx);
+}
+
+void hwss_mpc_update_blending(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_update_blending_params.mpc;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_update_blending_params.blnd_cfg;
+ int mpcc_id = params->mpc_update_blending_params.mpcc_id;
+
+ if (mpc && mpc->funcs->update_blending)
+ mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
+}
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_assert_idle_mpcc_params.mpc;
+ int mpcc_id = params->mpc_assert_idle_mpcc_params.mpcc_id;
+
+ if (mpc && mpc->funcs->wait_for_idle)
+ mpc->funcs->wait_for_idle(mpc, mpcc_id);
+}
+
+void hwss_mpc_insert_plane(union block_sequence_params *params)
+{
+ struct mpc *mpc = params->mpc_insert_plane_params.mpc;
+ struct mpc_tree *tree = params->mpc_insert_plane_params.mpc_tree_params;
+ struct mpcc_blnd_cfg *blnd_cfg = &params->mpc_insert_plane_params.blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg = params->mpc_insert_plane_params.sm_cfg;
+ struct mpcc *insert_above_mpcc = params->mpc_insert_plane_params.insert_above_mpcc;
+ int mpcc_id = params->mpc_insert_plane_params.mpcc_id;
+ int dpp_id = params->mpc_insert_plane_params.dpp_id;
+
+ if (mpc && mpc->funcs->insert_plane)
+ mpc->funcs->insert_plane(mpc, tree, blnd_cfg, sm_cfg, insert_above_mpcc,
+ dpp_id, mpcc_id);
+}
+
+void hwss_dpp_set_scaler(union block_sequence_params *params)
+{
+ struct dpp *dpp = params->dpp_set_scaler_params.dpp;
+ const struct scaler_data *scl_data = params->dpp_set_scaler_params.scl_data;
+
+ if (dpp && dpp->funcs->dpp_set_scaler)
+ dpp->funcs->dpp_set_scaler(dpp, scl_data);
+}
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_mem_program_viewport_params.hubp;
+ const struct rect *viewport = params->hubp_mem_program_viewport_params.viewport;
+ const struct rect *viewport_c = params->hubp_mem_program_viewport_params.viewport_c;
+
+ if (hubp && hubp->funcs->mem_program_viewport)
+ hubp->funcs->mem_program_viewport(hubp, viewport, viewport_c);
+}
+
+void hwss_abort_cursor_offload_update(union block_sequence_params *params)
+{
+ struct dc *dc = params->abort_cursor_offload_update_params.dc;
+ struct pipe_ctx *pipe_ctx = params->abort_cursor_offload_update_params.pipe_ctx;
+
+ if (dc && dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+}
+
+void hwss_set_cursor_attribute(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_attribute_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_attribute_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_attribute)
+ dc->hwss.set_cursor_attribute(pipe_ctx);
+}
+
+void hwss_set_cursor_position(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_position_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_position_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_position)
+ dc->hwss.set_cursor_position(pipe_ctx);
+}
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params)
+{
+ struct dc *dc = params->set_cursor_sdr_white_level_params.dc;
+ struct pipe_ctx *pipe_ctx = params->set_cursor_sdr_white_level_params.pipe_ctx;
+
+ if (dc && dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+}
+
+void hwss_program_output_csc(union block_sequence_params *params)
+{
+ struct dc *dc = params->program_output_csc_params.dc;
+ struct pipe_ctx *pipe_ctx = params->program_output_csc_params.pipe_ctx;
+ enum dc_color_space colorspace = params->program_output_csc_params.colorspace;
+ uint16_t *matrix = params->program_output_csc_params.matrix;
+ int opp_id = params->program_output_csc_params.opp_id;
+
+ if (dc && dc->hwss.program_output_csc)
+ dc->hwss.program_output_csc(dc, pipe_ctx, colorspace, matrix, opp_id);
+}
+
+void hwss_hubp_set_blank(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->hubp_set_blank_params.hubp;
+ bool blank = params->hubp_set_blank_params.blank;
+
+ if (hubp && hubp->funcs->set_blank)
+ hubp->funcs->set_blank(hubp, blank);
+}
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params)
+{
+ struct hubp *hubp = params->phantom_hubp_post_enable_params.hubp;
+
+ if (hubp && hubp->funcs->phantom_hubp_post_enable)
+ hubp->funcs->phantom_hubp_post_enable(hubp);
+}
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_DTO_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.inst = inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_dto_dscclk_params.num_slices_h = num_slices_h;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_CALCULATE_AND_SET_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.enable = enable;
+ seq_state->steps[*seq_state->num_steps].params.dsc_calculate_and_set_config_params.opp_cnt = opp_cnt;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_REMOVE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_remove_mpcc_params.mpcc_to_remove = mpcc_to_remove;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_MPCC_DISCONNECT_PENDING;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.mpcc_inst = mpcc_inst;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_mpcc_disconnect_pending_params.pending = pending;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disconnect_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_ENABLE_WITH_OPP;
+ seq_state->steps[*seq_state->num_steps].params.dsc_enable_with_opp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dsc_optc_config *dsc_optc_cfg, bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_DSC_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.dsc_optc_cfg = dsc_optc_cfg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_dsc_config_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISCONNECT;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disconnect_params.dsc = dsc;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_SET_OPTIMIZED_REQUIRED;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_set_optimized_required_params.optimized_required = optimized_required;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABM_SET_IMMEDIATE_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_abm_immediate_disable_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_SET_DISP_PATTERN_GENERATOR;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.test_pattern = test_pattern;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.color_depth = color_depth;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.solid_color = solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.use_solid_color = use_solid_color;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.width = width;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.height = height;
+ seq_state->steps[*seq_state->num_steps].params.opp_set_disp_pattern_generator_params.offset = offset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC update blending to block sequence
+ */
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_UPDATE_BLENDING;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_update_blending_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC insert plane to block sequence
+ */
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_INSERT_PLANE;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpc_tree_params = mpc_tree_params;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.blnd_cfg = blnd_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.sm_cfg = sm_cfg;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.insert_above_mpcc = insert_above_mpcc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.dpp_id = dpp_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_insert_plane_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add MPC assert idle MPCC to block sequence
+ */
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_ASSERT_IDLE_MPCC;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_assert_idle_mpcc_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+/*
+ * Helper function to add HUBP set blank to block sequence
+ */
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_params.blank = blank;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PROGRAM_BIT_DEPTH_REDUCTION;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.use_default_params = use_default_params;
+ seq_state->steps[*seq_state->num_steps].params.opp_program_bit_depth_reduction_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DC_IP_REQUEST_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.dc_ip_request_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_update_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_BUF;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.mcif_buf_params = mcif_buf_params;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_buf_params.dest_height = dest_height;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_CONFIG_ARB;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_wb = mcif_wb;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_config_arb_params.mcif_arb_params = mcif_arb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_enable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MCIF_WB_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.mcif_wb_disable_params.mcif_wb = mcif_wb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_SET_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.dwb_id = dwb_id;
+ seq_state->steps[*seq_state->num_steps].params.mpc_set_dwb_mux_params.mpcc_id = mpcc_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = MPC_DISABLE_DWB_MUX;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.mpc = mpc;
+ seq_state->steps[*seq_state->num_steps].params.mpc_disable_dwb_mux_params.dwb_id = dwb_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb = dwb;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_enable_params.dwb_params = dwb_params;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DWBC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dwbc_disable_params.dwb = dwb;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_params.gsl = gsl;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_SET_GSL_SOURCE_SELECT;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.group_idx = group_idx;
+ seq_state->steps[*seq_state->num_steps].params.tg_set_gsl_source_select_params.gsl_ready_signal = gsl_ready_signal;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_UPDATE_MALL_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.mall_sel = mall_sel;
+ seq_state->steps[*seq_state->num_steps].params.hubp_update_mall_sel_params.cache_cursor = cache_cursor;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PREPARE_SUBVP_BUFFERING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_prepare_subvp_buffering_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_BLANK_EN;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_blank_en_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_DISABLE_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_disable_control_params.disable = disable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_SOFT_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.hubbub_soft_reset = hubbub_soft_reset;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_soft_reset_params.reset = reset;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_CLK_CNTL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_clk_cntl_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_DPPCLK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.dppclk_div = dppclk_div;
+ seq_state->steps[*seq_state->num_steps].params.dpp_dppclk_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DISABLE_PHANTOM_CRTC;
+ seq_state->steps[*seq_state->num_steps].params.disable_phantom_crtc_params.tg = tg;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_PG_STATUS;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dsc_pg_status_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_WAIT_DISCONNECT_PENDING_CLEAR;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_wait_disconnect_pending_clear_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DSC_DISABLE;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.dsc = dsc;
+ seq_state->steps[*seq_state->num_steps].params.dsc_disable_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_SET_REF_DSCCLK;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.dsc_inst = dsc_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_set_ref_dscclk_params.is_ungated = is_ungated;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_ROOT_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_root_clock_control_params.clock_on = clock_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dpp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PG_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hws = hws;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.hubp_inst = hubp_inst;
+ seq_state->steps[*seq_state->num_steps].params.hubp_pg_control_params.power_on = power_on;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_INIT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_init_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.hubp_reset_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_RESET;
+ seq_state->steps[*seq_state->num_steps].params.dpp_reset_params.dpp = dpp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = OPP_PIPE_CLOCK_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.opp = opp;
+ seq_state->steps[*seq_state->num_steps].params.opp_pipe_clock_control_params.enable = enable;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_default.quad_part = sys_default;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_low.quad_part = sys_low;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_vm_system_aperture_settings_params.sys_high.quad_part = sys_high;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_FLIP_INT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_flip_int_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DCCG_UPDATE_DPP_DTO;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dccg = dccg;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dpp_inst = dpp_inst;
+ seq_state->steps[*seq_state->num_steps].params.dccg_update_dpp_dto_params.dppclk_khz = dppclk_khz;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_VTG_SEL;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_vtg_sel_params.otg_inst = otg_inst;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.hubp_regs = hubp_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.global_sync = global_sync;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup2_params.timing = timing;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.ttu_regs = ttu_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.rq_regs = rq_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_params.pipe_dest = pipe_dest;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SET_UNBOUNDED_REQUESTING;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_set_unbounded_requesting_params.unbounded_req = unbounded_req;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT2;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent2_params.hubp_regs = hubp_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_SETUP_INTERDEPENDENT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.dlg_regs = dlg_regs;
+ seq_state->steps[*seq_state->num_steps].params.hubp_setup_interdependent_params.ttu_regs = ttu_regs;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_PROGRAM_SURFACE_CONFIG;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.format = format;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.tiling_info = tiling_info;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.plane_size = plane_size;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.rotation = rotation;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.dcc = dcc;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.horizontal_mirror = horizontal_mirror;
+ seq_state->steps[*seq_state->num_steps].params.program_surface_config_params.compat_level = compat_level;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SETUP_DPP;
+ seq_state->steps[*seq_state->num_steps].params.setup_dpp_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_CURSOR_MATRIX;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.color_space = color_space;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_cursor_matrix_params.cursor_csc_color_matrix = cursor_csc_color_matrix;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = DPP_SET_SCALER;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.dpp = dpp;
+ seq_state->steps[*seq_state->num_steps].params.dpp_set_scaler_params.scl_data = scl_data;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBP_MEM_PROGRAM_VIEWPORT;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.hubp = hubp;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport = viewport;
+ seq_state->steps[*seq_state->num_steps].params.hubp_mem_program_viewport_params.viewport_c = viewport_c;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = ABORT_CURSOR_OFFLOAD_UPDATE;
+ seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.abort_cursor_offload_update_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_ATTRIBUTE;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_attribute_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_POSITION;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_position_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = SET_CURSOR_SDR_WHITE_LEVEL;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.set_cursor_sdr_white_level_params.pipe_ctx = pipe_ctx;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PROGRAM_OUTPUT_CSC;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.pipe_ctx = pipe_ctx;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.colorspace = colorspace;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.matrix = matrix;
+ seq_state->steps[*seq_state->num_steps].params.program_output_csc_params.opp_id = opp_id;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = PHANTOM_HUBP_POST_ENABLE;
+ seq_state->steps[*seq_state->num_steps].params.phantom_hubp_post_enable_params.hubp = hubp;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = UPDATE_FORCE_PSTATE;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.dc = dc;
+ seq_state->steps[*seq_state->num_steps].params.update_force_pstate_params.context = context;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_APPLY_DEDCN21_147_WA;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_apply_dedcn21_147_wa_params.hubbub = hubbub;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = HUBBUB_ALLOW_SELF_REFRESH_CONTROL;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.hubbub = hubbub;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.allow = allow;
+ seq_state->steps[*seq_state->num_steps].params.hubbub_allow_self_refresh_control_params.disallow_self_refresh_applied = disallow_self_refresh_applied;
+ (*seq_state->num_steps)++;
+ }
+}
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count)
+{
+ if (*seq_state->num_steps < MAX_HWSS_BLOCK_SEQUENCE_SIZE) {
+ seq_state->steps[*seq_state->num_steps].func = TG_GET_FRAME_COUNT;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.tg = tg;
+ seq_state->steps[*seq_state->num_steps].params.tg_get_frame_count_params.frame_count = frame_count;
+ (*seq_state->num_steps)++;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 814f68d76257..deb23d20bca6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -24,7 +24,7 @@
#include "link_enc_cfg.h"
#include "resource.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER dc->ctx->logger
@@ -522,10 +522,10 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
{
struct link_encoder *link_enc = NULL;
- enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
+ enum engine_id encs_assigned[MAX_LINK_ENCODERS];
int i;
- for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
+ for (i = 0; i < MAX_LINK_ENCODERS; i++)
encs_assigned[i] = ENGINE_ID_UNKNOWN;
/* Add assigned encoders to list. */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index 130455f2802a..9acd30019717 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -33,8 +33,9 @@
* dc.h with detail interface documentation, then add function implementation
* in this file which calls link functions.
*/
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_i2c.h"
+
struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
{
if (link_index >= MAX_LINKS)
@@ -520,3 +521,10 @@ enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, cons
return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx);
}
+void dc_link_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ link->dc->link_srv->edp_get_alpm_support(link, auxless_support, auxwake_support);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 4d6181e7c612..848c267ef11e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -40,7 +40,7 @@
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#include "clk_mgr.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
@@ -95,11 +95,44 @@
#define DC_LOGGER \
dc->ctx->logger
#define DC_LOGGER_INIT(logger)
-
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define UNABLE_TO_SPLIT -1
+static void capture_pipe_topology_data(struct dc *dc, int plane_idx, int slice_idx, int stream_idx,
+ int dpp_inst, int opp_inst, int tg_inst, bool is_phantom_pipe)
+{
+ struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
+
+ if (current_snapshot->line_count >= MAX_PIPES)
+ return;
+
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].is_phantom_pipe = is_phantom_pipe;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].plane_idx = plane_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].slice_idx = slice_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].stream_idx = stream_idx;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].dpp_inst = dpp_inst;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].opp_inst = opp_inst;
+ current_snapshot->pipe_log_lines[current_snapshot->line_count].tg_inst = tg_inst;
+
+ current_snapshot->line_count++;
+}
+
+static void start_new_topology_snapshot(struct dc *dc, struct dc_state *state)
+{
+ // Move to next snapshot slot (circular buffer)
+ dc->debug_data.topology_history.current_snapshot_index = (dc->debug_data.topology_history.current_snapshot_index + 1) % MAX_TOPOLOGY_SNAPSHOTS;
+
+ // Clear the new snapshot
+ struct pipe_topology_snapshot *current_snapshot = &dc->debug_data.topology_history.snapshots[dc->debug_data.topology_history.current_snapshot_index];
+ memset(current_snapshot, 0, sizeof(*current_snapshot));
+
+ // Set metadata
+ current_snapshot->timestamp_us = dm_get_timestamp(dc->ctx);
+ current_snapshot->stream_count = state->stream_count;
+ current_snapshot->phantom_stream_count = state->phantom_stream_count;
+}
+
enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
{
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
@@ -165,7 +198,13 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
- if (asic_id.chip_id == DEVICE_ID_NV_13FE || asic_id.chip_id == DEVICE_ID_NV_143F) {
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE ||
+ asic_id.chip_id == DEVICE_ID_NV_143F ||
+ asic_id.chip_id == DEVICE_ID_NV_13F9 ||
+ asic_id.chip_id == DEVICE_ID_NV_13FA ||
+ asic_id.chip_id == DEVICE_ID_NV_13FB ||
+ asic_id.chip_id == DEVICE_ID_NV_13FC ||
+ asic_id.chip_id == DEVICE_ID_NV_13DB) {
dc_version = DCN_VERSION_2_01;
break;
}
@@ -441,6 +480,14 @@ bool resource_construct(
DC_ERR("DC: failed to create stream_encoder!\n");
pool->stream_enc_count++;
}
+
+ for (i = 0; i < caps->num_analog_stream_encoder; i++) {
+ pool->stream_enc[caps->num_stream_encoder + i] =
+ create_funcs->create_stream_encoder(ENGINE_ID_DACA + i, ctx);
+ if (pool->stream_enc[caps->num_stream_encoder + i] == NULL)
+ DC_ERR("DC: failed to create analog stream_encoder %d!\n", i);
+ pool->stream_enc_count++;
+ }
}
pool->hpo_dp_stream_enc_count = 0;
@@ -2143,7 +2190,7 @@ int resource_get_odm_slice_dst_width(struct pipe_ctx *otg_master,
h_active = timing->h_addressable +
timing->h_border_left +
timing->h_border_right +
- otg_master->hblank_borrow;
+ otg_master->dsc_padding_params.dsc_hactive_padding;
width = h_active / count;
if (otg_master->stream_res.tg)
@@ -2298,10 +2345,11 @@ bool resource_is_odm_topology_changed(const struct pipe_ctx *otg_master_a,
static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
int stream_idx, int slice_idx, int plane_idx, int slice_count,
- bool is_primary)
+ bool is_primary, bool is_phantom_pipe)
{
DC_LOGGER_INIT(dc->ctx->logger);
+ // new format for logging: bit storing code
if (slice_idx == 0 && plane_idx == 0 && is_primary) {
/* case 0 (OTG master pipe with plane) */
DC_LOG_DC(" | plane%d slice%d stream%d|",
@@ -2310,6 +2358,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst,
pipe->stream_res.tg->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx == 0 && plane_idx == -1) {
/* case 1 (OTG master pipe without plane) */
DC_LOG_DC(" | slice%d stream%d|",
@@ -2318,6 +2370,10 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
pipe->stream_res.opp->inst,
pipe->stream_res.opp->inst,
pipe->stream_res.tg->inst);
+ capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx != 0 && plane_idx == 0 && is_primary) {
/* case 2 (OPP head pipe with plane) */
DC_LOG_DC(" | plane%d slice%d | |",
@@ -2325,27 +2381,43 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe,
DC_LOG_DC(" |DPP%d----OPP%d----| |",
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx != 0 && plane_idx == -1) {
/* case 3 (OPP head pipe without plane) */
DC_LOG_DC(" | slice%d | |", slice_idx);
DC_LOG_DC(" |DPG%d----OPP%d----| |",
pipe->plane_res.dpp->inst,
pipe->stream_res.opp->inst);
+ capture_pipe_topology_data(dc, 0xF, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else if (slice_idx == slice_count - 1) {
/* case 4 (DPP pipe in last slice) */
DC_LOG_DC(" | plane%d | |", plane_idx);
DC_LOG_DC(" |DPP%d----| |",
pipe->plane_res.dpp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
} else {
/* case 5 (DPP pipe not in last slice) */
DC_LOG_DC(" | plane%d | | |", plane_idx);
DC_LOG_DC(" |DPP%d----| | |",
pipe->plane_res.dpp->inst);
+ capture_pipe_topology_data(dc, plane_idx, slice_idx, stream_idx,
+ pipe->plane_res.dpp->inst,
+ pipe->stream_res.opp->inst,
+ pipe->stream_res.tg->inst, is_phantom_pipe);
}
}
static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
- struct pipe_ctx *otg_master, int stream_idx)
+ struct pipe_ctx *otg_master, int stream_idx, bool is_phantom_pipe)
{
struct pipe_ctx *opp_heads[MAX_PIPES];
struct pipe_ctx *dpp_pipes[MAX_PIPES];
@@ -2371,12 +2443,12 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
resource_log_pipe(dc, dpp_pipes[dpp_idx],
stream_idx, slice_idx,
plane_idx, slice_count,
- is_primary);
+ is_primary, is_phantom_pipe);
}
} else {
resource_log_pipe(dc, opp_heads[slice_idx],
stream_idx, slice_idx, plane_idx,
- slice_count, true);
+ slice_count, true, is_phantom_pipe);
}
}
@@ -2407,6 +2479,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
struct pipe_ctx *otg_master;
int stream_idx, phantom_stream_idx;
DC_LOGGER_INIT(dc->ctx->logger);
+ bool is_phantom_pipe = false;
+
+ // Start a new snapshot for this topology update
+ start_new_topology_snapshot(dc, state);
DC_LOG_DC(" pipe topology update");
DC_LOG_DC(" ________________________");
@@ -2420,9 +2496,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
if (!otg_master)
continue;
- resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
}
if (state->phantom_stream_count > 0) {
+ is_phantom_pipe = true;
DC_LOG_DC(" | (phantom pipes) |");
for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) {
if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN)
@@ -2435,7 +2512,7 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
if (!otg_master)
continue;
- resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
+ resource_log_pipe_for_stream(dc, state, otg_master, stream_idx, is_phantom_pipe);
}
}
DC_LOG_DC(" |________________________|\n");
@@ -2685,17 +2762,40 @@ static inline int find_fixed_dio_link_enc(const struct dc_link *link)
}
static inline int find_free_dio_link_enc(const struct resource_context *res_ctx,
- const struct dc_link *link, const struct resource_pool *pool)
+ const struct dc_link *link, const struct resource_pool *pool, struct dc_stream_state *stream)
{
- int i;
+ int i, j = -1;
+ int stream_enc_inst = -1;
int enc_count = pool->dig_link_enc_count;
- /* for dpia, check preferred encoder first and then the next one */
- for (i = 0; i < enc_count; i++)
- if (res_ctx->dio_link_enc_ref_cnts[(link->dpia_preferred_eng_id + i) % enc_count] == 0)
- break;
+ /* Find stream encoder instance for the stream */
+ if (stream) {
+ for (i = 0; i < pool->pipe_count; i++) {
+ if ((res_ctx->pipe_ctx[i].stream == stream) &&
+ (res_ctx->pipe_ctx[i].stream_res.stream_enc != NULL)) {
+ stream_enc_inst = res_ctx->pipe_ctx[i].stream_res.stream_enc->id;
+ break;
+ }
+ }
+ }
+
+ /* Assign dpia preferred > stream enc instance > available */
+ for (i = 0; i < enc_count; i++) {
+ if (res_ctx->dio_link_enc_ref_cnts[i] == 0) {
+ if (j == -1)
+ j = i;
+
+ if (link->dpia_preferred_eng_id == i) {
+ j = i;
+ break;
+ }
- return (i >= 0 && i < enc_count) ? (link->dpia_preferred_eng_id + i) % enc_count : -1;
+ if (stream_enc_inst == i) {
+ j = stream_enc_inst;
+ }
+ }
+ }
+ return j;
}
static inline void acquire_dio_link_enc(
@@ -2776,7 +2876,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
retain_dio_link_enc(res_ctx, enc_index);
} else {
if (stream->link->is_dig_mapping_flexible)
- enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool);
+ enc_index = find_free_dio_link_enc(res_ctx, stream->link, pool, stream);
else {
int link_index = 0;
@@ -2786,7 +2886,7 @@ static bool add_dio_link_enc_to_ctx(const struct dc *dc,
* one into the acquiring link.
*/
if (enc_index >= 0 && is_dio_enc_acquired_by_other_link(stream->link, enc_index, &link_index)) {
- int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool);
+ int new_enc_index = find_free_dio_link_enc(res_ctx, dc->links[link_index], pool, stream);
if (new_enc_index >= 0)
swap_dio_link_enc_to_muxable_ctx(context, pool, new_enc_index, enc_index);
@@ -4261,39 +4361,33 @@ fail:
return res;
}
+#if defined(CONFIG_DRM_AMD_DC_FP)
+#endif /* CONFIG_DRM_AMD_DC_FP */
+
/**
- * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context.
+ * calculate_timing_params_for_dsc_with_padding - Calculates timing parameters for DSC with padding.
* @pipe_ctx: Pointer to the pipe context structure.
*
- * This function calculates the horizontal blanking borrow value for a given pipe context based on the
+ * This function calculates the timing parameters for a given pipe context based on the
* display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less
- * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the
- * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow
+ * than the total width of the DSC slices, it sets the dsc_hactive_padding value to the difference. If the
+ * total horizontal timing minus the dsc_hactive_padding value is less than 32, it resets the dsc_hactive_padding
* value to 0.
*/
-static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx)
+static void calculate_timing_params_for_dsc_with_padding(struct pipe_ctx *pipe_ctx)
{
- uint32_t hactive;
- uint32_t ceil_slice_width;
struct dc_stream_state *stream = NULL;
if (!pipe_ctx)
return;
stream = pipe_ctx->stream;
+ pipe_ctx->dsc_padding_params.dsc_hactive_padding = 0;
+ pipe_ctx->dsc_padding_params.dsc_htotal_padding = 0;
- if (stream->timing.flags.DSC) {
- hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
-
- /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/
- if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) {
- ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1;
- pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive;
+ if (stream)
+ pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz = stream->timing.pix_clk_100hz;
- if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32)
- pipe_ctx->hblank_borrow = 0;
- }
- }
}
/**
@@ -4336,7 +4430,7 @@ enum dc_status dc_validate_global_state(
/* Decide whether hblank borrow is needed and save it in pipe_ctx */
if (dc->debug.enable_hblank_borrow)
- decide_hblank_borrow(pipe_ctx);
+ calculate_timing_params_for_dsc_with_padding(pipe_ctx);
if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
@@ -4411,8 +4505,14 @@ static void set_avi_info_frame(
unsigned int fr_ind = pipe_ctx->stream->timing.fr_index;
enum dc_timing_3d_format format;
+ if (stream->avi_infopacket.valid) {
+ *info_packet = stream->avi_infopacket;
+ return;
+ }
+
memset(&hdmi_info, 0, sizeof(union hdmi_info_packet));
+
color_space = pipe_ctx->stream->output_color_space;
if (color_space == COLOR_SPACE_UNKNOWN)
color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ?
@@ -5196,7 +5296,7 @@ struct link_encoder *get_temp_dio_link_enc(
enc_index = link->eng_id;
if (enc_index < 0)
- enc_index = find_free_dio_link_enc(res_ctx, link, pool);
+ enc_index = find_free_dio_link_enc(res_ctx, link, pool, NULL);
if (enc_index >= 0)
link_enc = pool->link_encoders[enc_index];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index fe9f99f1bdf9..f976ffd6d466 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,7 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
- get_link_index_from_dpia_port_index(dc, notify->link_index);
+ get_link_index_from_dpia_port_index(dc, notify->instance);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 883054bb18e7..2de8ef4a58ec 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -35,8 +35,8 @@
#include "link_enc_cfg.h"
#if defined(CONFIG_DRM_AMD_DC_FP)
-#include "dml2/dml2_wrapper.h"
-#include "dml2/dml2_internal_types.h"
+#include "dml2_0/dml2_wrapper.h"
+#include "dml2_0/dml2_internal_types.h"
#endif
#define DC_LOGGER \
@@ -211,7 +211,7 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p
return NULL;
}
- if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
+ if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) {
dc_state_release(state);
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4d6bc9fd4faa..129cd5f84983 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -224,6 +224,14 @@ struct dc_stream_status *dc_stream_get_status(
return dc_state_get_stream_status(dc->current_state, stream);
}
+const struct dc_stream_status *dc_stream_get_status_const(
+ const struct dc_stream_state *stream)
+{
+ struct dc *dc = stream->ctx->dc;
+
+ return dc_state_get_stream_status(dc->current_state, stream);
+}
+
void program_cursor_attributes(
struct dc *dc,
struct dc_stream_state *stream)
@@ -231,6 +239,7 @@ void program_cursor_attributes(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -245,9 +254,14 @@ void program_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
+ }
}
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -255,12 +269,18 @@ void program_cursor_attributes(
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
}
if (pipe_to_program) {
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
- if (pipe_to_program->next_odm_pipe)
- dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ } else {
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program->next_odm_pipe)
+ dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+ }
}
}
@@ -316,6 +336,9 @@ bool dc_stream_set_cursor_attributes(
{
bool result = false;
+ if (!stream)
+ return false;
+
if (dc_stream_check_cursor_attributes(stream, stream->ctx->dc->current_state, attributes)) {
stream->cursor_attributes = *attributes;
result = true;
@@ -331,7 +354,10 @@ bool dc_stream_program_cursor_attributes(
struct dc *dc;
bool reset_idle_optimizations = false;
- dc = stream ? stream->ctx->dc : NULL;
+ if (!stream)
+ return false;
+
+ dc = stream->ctx->dc;
if (dc_stream_set_cursor_attributes(stream, attributes)) {
dc_z10_restore(dc);
@@ -360,6 +386,7 @@ void program_cursor_position(
int i;
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
+ bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
if (!stream)
return;
@@ -378,16 +405,27 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- dc->hwss.cursor_lock(dc, pipe_to_program, true);
+
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_position(pipe_ctx);
+ if (enable_cursor_offload && dc->hwss.update_cursor_offload_pipe)
+ dc->hwss.update_cursor_offload_pipe(dc, pipe_ctx);
+
if (dc->ctx->dmub_srv)
dc_send_update_cursor_info_to_dmu(pipe_ctx, i);
}
- if (pipe_to_program)
- dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ if (pipe_to_program) {
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
+ else
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
+ }
}
bool dc_stream_set_cursor_position(
@@ -699,9 +737,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
{
uint8_t i;
bool ret = false;
- struct dc *dc = stream->ctx->dc;
- struct resource_context *res_ctx =
- &dc->current_state->res_ctx;
+ struct dc *dc;
+ struct resource_context *res_ctx;
+
+ if (!stream->ctx)
+ return false;
+
+ dc = stream->ctx->dc;
+ res_ctx = &dc->current_state->res_ctx;
dc_exit_ips_for_hw_access(dc);
@@ -849,9 +892,11 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream)
stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
DC_LOG_DC(
- "\tdispname: %s signal: %x\n",
+ "\tsignal: %x dispname: %s manufacturer_id: 0x%x product_id: 0x%x\n",
+ stream->signal,
stream->sink->edid_caps.display_name,
- stream->signal);
+ stream->sink->edid_caps.manufacturer_id,
+ stream->sink->edid_caps.product_id);
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index f24e1da68269..29edfa51ea2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dmub/inc/dmub_cmd.h"
@@ -54,8 +54,16 @@ struct abm_save_restore;
struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
+struct dcn_hubbub_reg_state;
+struct dcn_hubp_reg_state;
+struct dcn_dpp_reg_state;
+struct dcn_mpc_reg_state;
+struct dcn_opp_reg_state;
+struct dcn_dsc_reg_state;
+struct dcn_optc_reg_state;
+struct dcn_dccg_reg_state;
-#define DC_VER "3.2.340"
+#define DC_VER "3.2.359"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -234,6 +242,7 @@ struct lut3d_caps {
* @ogam_ram: programmable out gamma LUT
* @ocsc: output color space conversion matrix
* @num_3dluts: MPC 3D LUT; always assumes a preceding shaper LUT
+ * @num_rmcm_3dluts: number of RMCM 3D LUTS; always assumes a preceding shaper LUT
* @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single
* instance
* @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT
@@ -277,6 +286,15 @@ struct dc_scl_caps {
bool sharpener_support;
};
+struct dc_check_config {
+ /**
+ * max video plane width that can be safely assumed to be always
+ * supported by single DPP pipe.
+ */
+ unsigned int max_optimizable_video_width;
+ bool enable_legacy_fast_update;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -292,11 +310,6 @@ struct dc_caps {
unsigned int max_cursor_size;
unsigned int max_buffered_cursor_size;
unsigned int max_video_width;
- /*
- * max video plane width that can be safely assumed to be always
- * supported by single DPP pipe.
- */
- unsigned int max_optimizable_video_width;
unsigned int min_horizontal_blanking_period;
int linear_pitch_alignment;
bool dcc_const_color;
@@ -454,6 +467,18 @@ enum surface_update_type {
UPDATE_TYPE_FULL, /* may need to shuffle resources */
};
+enum dc_lock_descriptor {
+ LOCK_DESCRIPTOR_NONE = 0x0,
+ LOCK_DESCRIPTOR_STREAM = 0x1,
+ LOCK_DESCRIPTOR_LINK = 0x2,
+ LOCK_DESCRIPTOR_GLOBAL = 0x4,
+};
+
+struct surface_update_descriptor {
+ enum surface_update_type update_type;
+ enum dc_lock_descriptor lock_descriptor;
+};
+
/* Forward declaration*/
struct dc;
struct dc_plane_state;
@@ -529,6 +554,7 @@ struct dc_config {
bool set_pipe_unlock_order;
bool enable_dpia_pre_training;
bool unify_link_enc_assignment;
+ bool enable_cursor_offload;
struct spl_sharpness_range dcn_sharpness_range;
struct spl_sharpness_range dcn_override_sharpness_range;
};
@@ -694,6 +720,15 @@ struct dc_clocks {
int idle_fclk_khz;
int subvp_prefetch_dramclk_khz;
int subvp_prefetch_fclk_khz;
+
+ /* Stutter efficiency is technically not clock values
+ * but stored here so the values are part of the update_clocks call similar to num_ways
+ * Efficiencies are stored as percentage (0-100)
+ */
+ struct {
+ uint8_t base_efficiency; //LP1
+ uint8_t low_power_efficiency; //LP2
+ } stutter_efficiency;
};
struct dc_bw_validation_profile {
@@ -864,6 +899,7 @@ struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
uint32_t auxErrorCount;
+ struct pipe_topology_history topology_history;
};
struct dc_phy_addr_space_config {
@@ -1072,6 +1108,7 @@ struct dc_debug_options {
unsigned int force_mall_ss_num_ways;
bool alloc_extra_way_for_cursor;
uint32_t subvp_extra_lines;
+ bool disable_force_pstate_allow_on_hw_release;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
@@ -1108,7 +1145,6 @@ struct dc_debug_options {
uint32_t fpo_vactive_min_active_margin_us;
uint32_t fpo_vactive_max_blank_us;
bool enable_hpo_pg_support;
- bool enable_legacy_fast_update;
bool disable_dc_mode_overwrite;
bool replay_skip_crtc_disabled;
bool ignore_pg;/*do nothing, let pmfw control it*/
@@ -1140,12 +1176,18 @@ struct dc_debug_options {
bool enable_ips_visual_confirm;
unsigned int sharpen_policy;
unsigned int scale_to_sharpness_policy;
- bool skip_full_updated_if_possible;
unsigned int enable_oled_edp_power_up_opt;
bool enable_hblank_borrow;
bool force_subvp_df_throttle;
uint32_t acpi_transition_bitmasks[MAX_PIPES];
bool enable_pg_cntl_debug_logs;
+ unsigned int auxless_alpm_lfps_setup_ns;
+ unsigned int auxless_alpm_lfps_period_ns;
+ unsigned int auxless_alpm_lfps_silence_ns;
+ unsigned int auxless_alpm_lfps_t1t2_us;
+ short auxless_alpm_lfps_t1t2_offset_us;
+ bool disable_stutter_for_wm_program;
+ bool enable_block_sequence_programming;
};
@@ -1306,6 +1348,32 @@ union dc_3dlut_state {
};
+#define MATRIX_9C__DIM_128_ALIGNED_LEN 16 // 9+8 : 9 * 8 + 7 * 8 = 72 + 56 = 128 % 128 = 0
+#define MATRIX_17C__DIM_128_ALIGNED_LEN 32 //17+15: 17 * 8 + 15 * 8 = 136 + 120 = 256 % 128 = 0
+#define MATRIX_33C__DIM_128_ALIGNED_LEN 64 //17+47: 17 * 8 + 47 * 8 = 136 + 376 = 512 % 128 = 0
+
+struct lut_rgb {
+ uint16_t b;
+ uint16_t g;
+ uint16_t r;
+ uint16_t padding;
+};
+
+//this structure maps directly to how the lut will read it from memory
+struct lut_mem_mapping {
+ union {
+ //NATIVE MODE 1, 2
+ //RGB layout [b][g][r] //red is 128 byte aligned
+ //BGR layout [r][g][b] //blue is 128 byte aligned
+ struct lut_rgb rgb_17c[17][17][MATRIX_17C__DIM_128_ALIGNED_LEN];
+ struct lut_rgb rgb_33c[33][33][MATRIX_33C__DIM_128_ALIGNED_LEN];
+
+ //TRANSFORMED
+ uint16_t linear_rgb[(33*33*33*4/128+1)*128];
+ };
+ uint16_t size;
+};
+
struct dc_rmcm_3dlut {
bool isInUse;
const struct dc_stream_state *stream;
@@ -1348,7 +1416,6 @@ union surface_update_flags {
uint32_t in_transfer_func_change:1;
uint32_t input_csc_change:1;
uint32_t coeff_reduction_change:1;
- uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
uint32_t gamut_remap_change:1;
@@ -1659,6 +1726,7 @@ struct dc {
struct dc_debug_options debug;
struct dc_versions versions;
struct dc_caps caps;
+ struct dc_check_config check_config;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_bounding_box_overrides bb_overrides;
@@ -1692,7 +1760,6 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
- bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
@@ -1734,7 +1801,7 @@ struct dc {
struct dml2_configuration_options dml2_options;
struct dml2_configuration_options dml2_dc_power_options;
enum dc_acpi_cm_power_state power_state;
-
+ struct soc_and_ip_translator *soc_and_ip_translator;
};
struct dc_scaling_info {
@@ -1787,6 +1854,29 @@ struct dc_surface_update {
struct dc_bias_and_scale bias_and_scale;
};
+struct dc_underflow_debug_data {
+ struct dcn_hubbub_reg_state *hubbub_reg_state;
+ struct dcn_hubp_reg_state *hubp_reg_state[MAX_PIPES];
+ struct dcn_dpp_reg_state *dpp_reg_state[MAX_PIPES];
+ struct dcn_mpc_reg_state *mpc_reg_state[MAX_PIPES];
+ struct dcn_opp_reg_state *opp_reg_state[MAX_PIPES];
+ struct dcn_dsc_reg_state *dsc_reg_state[MAX_PIPES];
+ struct dcn_optc_reg_state *optc_reg_state[MAX_PIPES];
+ struct dcn_dccg_reg_state *dccg_reg_state[MAX_PIPES];
+};
+
+struct power_features {
+ bool ips;
+ bool rcg;
+ bool replay;
+ bool dds;
+ bool sprs;
+ bool psr;
+ bool fams;
+ bool mpo;
+ bool uclk_p_state;
+};
+
/*
* Create a new surface with default parameters;
*/
@@ -1805,8 +1895,6 @@ void dc_3dlut_func_retain(struct dc_3dlut *lut);
void dc_post_update_surfaces_to_stream(
struct dc *dc);
-#include "dc_stream.h"
-
/**
* struct dc_validation_set - Struct to store surface/stream associations for validation
*/
@@ -2448,6 +2536,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(
*/
enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx);
+/*
+ * Get if ALPM is supported by the link
+ */
+void dc_link_get_alpm_support(struct dc_link *link, bool *auxless_support,
+ bool *auxwake_support);
+
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
@@ -2625,6 +2719,13 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
uint32_t link_index,
struct aux_payload *payload);
+/*
+ * smart power OLED Interfaces
+ */
+bool dc_smart_power_oled_enable(const struct dc_link *link, bool enable, uint16_t peak_nits,
+ uint8_t debug_control, uint16_t fixed_CLL, uint32_t triggerline);
+bool dc_smart_power_oled_get_max_cll(const struct dc_link *link, unsigned int *pCurrent_MaxCLL);
+
/* Get dc link index from dpia port index */
uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
uint8_t dpia_port_index);
@@ -2658,6 +2759,8 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context);
bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index);
+void dc_log_preos_dmcub_info(const struct dc *dc);
+
/* DSC Interfaces */
#include "dc_dsc.h"
@@ -2673,6 +2776,508 @@ bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
bool dc_is_cursor_limit_pending(struct dc *dc);
-bool dc_can_clear_cursor_limit(struct dc *dc);
+bool dc_can_clear_cursor_limit(const struct dc *dc);
+
+/**
+ * dc_get_underflow_debug_data_for_otg() - Retrieve underflow debug data.
+ *
+ * @dc: Pointer to the display core context.
+ * @primary_otg_inst: Instance index of the primary OTG that underflowed.
+ * @out_data: Pointer to a dc_underflow_debug_data struct to be filled with debug information.
+ *
+ * This function collects and logs underflow-related HW states when underflow happens,
+ * including OTG underflow status, current read positions, frame count, and per-HUBP debug data.
+ * The results are stored in the provided out_data structure for further analysis or logging.
+ */
+void dc_get_underflow_debug_data_for_otg(struct dc *dc, int primary_otg_inst, struct dc_underflow_debug_data *out_data);
+
+void dc_get_power_feature_status(struct dc *dc, int primary_otg_inst, struct power_features *out_data);
+
+/**
+ * Software state variables used to program register fields across the display pipeline
+ */
+struct dc_register_software_state {
+ /* HUBP register programming variables for each pipe */
+ struct {
+ bool valid_plane_state;
+ bool valid_stream;
+ bool min_dc_gfx_version9;
+ uint32_t vtg_sel; /* DCHUBP_CNTL->HUBP_VTG_SEL from pipe_ctx->stream_res.tg->inst */
+ uint32_t hubp_clock_enable; /* HUBP_CLK_CNTL->HUBP_CLOCK_ENABLE from power management */
+ uint32_t surface_pixel_format; /* DCSURF_SURFACE_CONFIG->SURFACE_PIXEL_FORMAT from plane_state->format */
+ uint32_t rotation_angle; /* DCSURF_SURFACE_CONFIG->ROTATION_ANGLE from plane_state->rotation */
+ uint32_t h_mirror_en; /* DCSURF_SURFACE_CONFIG->H_MIRROR_EN from plane_state->horizontal_mirror */
+ uint32_t surface_dcc_en; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_EN from dcc->enable */
+ uint32_t surface_size_width; /* HUBP_SIZE->SURFACE_SIZE_WIDTH from plane_size.surface_size.width */
+ uint32_t surface_size_height; /* HUBP_SIZE->SURFACE_SIZE_HEIGHT from plane_size.surface_size.height */
+ uint32_t pri_viewport_width; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_WIDTH from scaler_data.viewport.width */
+ uint32_t pri_viewport_height; /* DCSURF_PRI_VIEWPORT_DIMENSION->PRI_VIEWPORT_HEIGHT from scaler_data.viewport.height */
+ uint32_t pri_viewport_x_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_X_START from scaler_data.viewport.x */
+ uint32_t pri_viewport_y_start; /* DCSURF_PRI_VIEWPORT_START->PRI_VIEWPORT_Y_START from scaler_data.viewport.y */
+ uint32_t cursor_enable; /* CURSOR_CONTROL->CURSOR_ENABLE from cursor_attributes.enable */
+ uint32_t cursor_width; /* CURSOR_SETTINGS->CURSOR_WIDTH from cursor_position.width */
+ uint32_t cursor_height; /* CURSOR_SETTINGS->CURSOR_HEIGHT from cursor_position.height */
+
+ /* Additional DCC configuration */
+ uint32_t surface_dcc_ind_64b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_64B_BLK from dcc.independent_64b_blks */
+ uint32_t surface_dcc_ind_128b_blk; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_DCC_IND_128B_BLK from dcc.independent_128b_blks */
+
+ /* Surface pitch configuration */
+ uint32_t surface_pitch; /* DCSURF_SURFACE_PITCH->PITCH from plane_size.surface_pitch */
+ uint32_t meta_pitch; /* DCSURF_SURFACE_PITCH->META_PITCH from dcc.meta_pitch */
+ uint32_t chroma_pitch; /* DCSURF_SURFACE_PITCH_C->PITCH_C from plane_size.chroma_pitch */
+ uint32_t meta_pitch_c; /* DCSURF_SURFACE_PITCH_C->META_PITCH_C from dcc.meta_pitch_c */
+
+ /* Surface addresses */
+ uint32_t primary_surface_address_low; /* DCSURF_PRIMARY_SURFACE_ADDRESS->PRIMARY_SURFACE_ADDRESS from address.grph.addr.low_part */
+ uint32_t primary_surface_address_high; /* DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH->PRIMARY_SURFACE_ADDRESS_HIGH from address.grph.addr.high_part */
+ uint32_t primary_meta_surface_address_low; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS->PRIMARY_META_SURFACE_ADDRESS from address.grph.meta_addr.low_part */
+ uint32_t primary_meta_surface_address_high; /* DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH->PRIMARY_META_SURFACE_ADDRESS_HIGH from address.grph.meta_addr.high_part */
+
+ /* TMZ configuration */
+ uint32_t primary_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_SURFACE_TMZ from address.tmz_surface */
+ uint32_t primary_meta_surface_tmz; /* DCSURF_SURFACE_CONTROL->PRIMARY_META_SURFACE_TMZ from address.tmz_surface */
+
+ /* Tiling configuration */
+ uint32_t sw_mode; /* DCSURF_TILING_CONFIG->SW_MODE from tiling_info.gfx9.swizzle */
+ uint32_t num_pipes; /* DCSURF_ADDR_CONFIG->NUM_PIPES from tiling_info.gfx9.num_pipes */
+ uint32_t num_banks; /* DCSURF_ADDR_CONFIG->NUM_BANKS from tiling_info.gfx9.num_banks */
+ uint32_t pipe_interleave; /* DCSURF_ADDR_CONFIG->PIPE_INTERLEAVE from tiling_info.gfx9.pipe_interleave */
+ uint32_t num_shader_engines; /* DCSURF_ADDR_CONFIG->NUM_SE from tiling_info.gfx9.num_shader_engines */
+ uint32_t num_rb_per_se; /* DCSURF_ADDR_CONFIG->NUM_RB_PER_SE from tiling_info.gfx9.num_rb_per_se */
+ uint32_t num_pkrs; /* DCSURF_ADDR_CONFIG->NUM_PKRS from tiling_info.gfx9.num_pkrs */
+
+ /* DML Request Size Configuration - Luma */
+ uint32_t rq_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->CHUNK_SIZE from rq_regs.rq_regs_l.chunk_size */
+ uint32_t rq_min_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_CHUNK_SIZE from rq_regs.rq_regs_l.min_chunk_size */
+ uint32_t rq_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->META_CHUNK_SIZE from rq_regs.rq_regs_l.meta_chunk_size */
+ uint32_t rq_min_meta_chunk_size; /* DCHUBP_REQ_SIZE_CONFIG->MIN_META_CHUNK_SIZE from rq_regs.rq_regs_l.min_meta_chunk_size */
+ uint32_t rq_dpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->DPTE_GROUP_SIZE from rq_regs.rq_regs_l.dpte_group_size */
+ uint32_t rq_mpte_group_size; /* DCHUBP_REQ_SIZE_CONFIG->MPTE_GROUP_SIZE from rq_regs.rq_regs_l.mpte_group_size */
+ uint32_t rq_swath_height_l; /* DCHUBP_REQ_SIZE_CONFIG->SWATH_HEIGHT_L from rq_regs.rq_regs_l.swath_height */
+ uint32_t rq_pte_row_height_l; /* DCHUBP_REQ_SIZE_CONFIG->PTE_ROW_HEIGHT_L from rq_regs.rq_regs_l.pte_row_height */
+
+ /* DML Request Size Configuration - Chroma */
+ uint32_t rq_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->CHUNK_SIZE_C from rq_regs.rq_regs_c.chunk_size */
+ uint32_t rq_min_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_chunk_size */
+ uint32_t rq_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->META_CHUNK_SIZE_C from rq_regs.rq_regs_c.meta_chunk_size */
+ uint32_t rq_min_meta_chunk_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MIN_META_CHUNK_SIZE_C from rq_regs.rq_regs_c.min_meta_chunk_size */
+ uint32_t rq_dpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->DPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.dpte_group_size */
+ uint32_t rq_mpte_group_size_c; /* DCHUBP_REQ_SIZE_CONFIG_C->MPTE_GROUP_SIZE_C from rq_regs.rq_regs_c.mpte_group_size */
+ uint32_t rq_swath_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->SWATH_HEIGHT_C from rq_regs.rq_regs_c.swath_height */
+ uint32_t rq_pte_row_height_c; /* DCHUBP_REQ_SIZE_CONFIG_C->PTE_ROW_HEIGHT_C from rq_regs.rq_regs_c.pte_row_height */
+
+ /* DML Expansion Modes */
+ uint32_t drq_expansion_mode; /* DCN_EXPANSION_MODE->DRQ_EXPANSION_MODE from rq_regs.drq_expansion_mode */
+ uint32_t prq_expansion_mode; /* DCN_EXPANSION_MODE->PRQ_EXPANSION_MODE from rq_regs.prq_expansion_mode */
+ uint32_t mrq_expansion_mode; /* DCN_EXPANSION_MODE->MRQ_EXPANSION_MODE from rq_regs.mrq_expansion_mode */
+ uint32_t crq_expansion_mode; /* DCN_EXPANSION_MODE->CRQ_EXPANSION_MODE from rq_regs.crq_expansion_mode */
+
+ /* DML DLG parameters - nominal */
+ uint32_t dst_y_per_vm_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_VM_VBLANK from dlg_regs.dst_y_per_vm_vblank */
+ uint32_t dst_y_per_row_vblank; /* NOM_PARAMETERS_0->DST_Y_PER_ROW_VBLANK from dlg_regs.dst_y_per_row_vblank */
+ uint32_t dst_y_per_vm_flip; /* NOM_PARAMETERS_1->DST_Y_PER_VM_FLIP from dlg_regs.dst_y_per_vm_flip */
+ uint32_t dst_y_per_row_flip; /* NOM_PARAMETERS_1->DST_Y_PER_ROW_FLIP from dlg_regs.dst_y_per_row_flip */
+
+ /* DML prefetch settings */
+ uint32_t dst_y_prefetch; /* PREFETCH_SETTINS->DST_Y_PREFETCH from dlg_regs.dst_y_prefetch */
+ uint32_t vratio_prefetch; /* PREFETCH_SETTINS->VRATIO_PREFETCH from dlg_regs.vratio_prefetch */
+ uint32_t vratio_prefetch_c; /* PREFETCH_SETTINS_C->VRATIO_PREFETCH_C from dlg_regs.vratio_prefetch_c */
+
+ /* TTU parameters */
+ uint32_t qos_level_low_wm; /* TTU_CNTL1->QoSLevelLowWaterMark from ttu_regs.qos_level_low_wm */
+ uint32_t qos_level_high_wm; /* TTU_CNTL1->QoSLevelHighWaterMark from ttu_regs.qos_level_high_wm */
+ uint32_t qos_level_flip; /* TTU_CNTL2->QoS_LEVEL_FLIP_L from ttu_regs.qos_level_flip */
+ uint32_t min_ttu_vblank; /* DCN_GLOBAL_TTU_CNTL->MIN_TTU_VBLANK from ttu_regs.min_ttu_vblank */
+ } hubp[MAX_PIPES];
+
+ /* HUBBUB register programming variables */
+ struct {
+ /* Individual DET buffer control per pipe - software state that programs DET registers */
+ uint32_t det0_size; /* DCHUBBUB_DET0_CTRL->DET0_SIZE from hubbub->funcs->program_det_size(hubbub, 0, det_buffer_size_kb) */
+ uint32_t det1_size; /* DCHUBBUB_DET1_CTRL->DET1_SIZE from hubbub->funcs->program_det_size(hubbub, 1, det_buffer_size_kb) */
+ uint32_t det2_size; /* DCHUBBUB_DET2_CTRL->DET2_SIZE from hubbub->funcs->program_det_size(hubbub, 2, det_buffer_size_kb) */
+ uint32_t det3_size; /* DCHUBBUB_DET3_CTRL->DET3_SIZE from hubbub->funcs->program_det_size(hubbub, 3, det_buffer_size_kb) */
+
+ /* Compression buffer control - software state that programs COMPBUF registers */
+ uint32_t compbuf_size; /* DCHUBBUB_COMPBUF_CTRL->COMPBUF_SIZE from hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, safe_to_increase) */
+ uint32_t compbuf_reserved_space_64b; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_64B from hubbub2->pixel_chunk_size / 32 */
+ uint32_t compbuf_reserved_space_zs; /* COMPBUF_RESERVED_SPACE->COMPBUF_RESERVED_SPACE_ZS from hubbub2->pixel_chunk_size / 128 */
+ } hubbub;
+
+ /* DPP register programming variables for each pipe (simplified for available fields) */
+ struct {
+ uint32_t dpp_clock_enable; /* DPP_CONTROL->DPP_CLOCK_ENABLE from dppclk_enable */
+
+ /* Recout (Rectangle of Interest) configuration */
+ uint32_t recout_start_x; /* RECOUT_START->RECOUT_START_X from pipe_ctx->plane_res.scl_data.recout.x */
+ uint32_t recout_start_y; /* RECOUT_START->RECOUT_START_Y from pipe_ctx->plane_res.scl_data.recout.y */
+ uint32_t recout_width; /* RECOUT_SIZE->RECOUT_WIDTH from pipe_ctx->plane_res.scl_data.recout.width */
+ uint32_t recout_height; /* RECOUT_SIZE->RECOUT_HEIGHT from pipe_ctx->plane_res.scl_data.recout.height */
+
+ /* MPC (Multiple Pipe/Plane Combiner) size configuration */
+ uint32_t mpc_width; /* MPC_SIZE->MPC_WIDTH from pipe_ctx->plane_res.scl_data.h_active */
+ uint32_t mpc_height; /* MPC_SIZE->MPC_HEIGHT from pipe_ctx->plane_res.scl_data.v_active */
+
+ /* DSCL mode configuration */
+ uint32_t dscl_mode; /* SCL_MODE->DSCL_MODE from pipe_ctx->plane_res.scl_data.dscl_prog_data.dscl_mode */
+
+ /* Scaler ratios (simplified to integer parts) */
+ uint32_t horz_ratio_int; /* SCL_HORZ_FILTER_SCALE_RATIO->SCL_H_SCALE_RATIO integer part from ratios.horz */
+ uint32_t vert_ratio_int; /* SCL_VERT_FILTER_SCALE_RATIO->SCL_V_SCALE_RATIO integer part from ratios.vert */
+
+ /* Basic scaler taps */
+ uint32_t h_taps; /* SCL_TAP_CONTROL->SCL_H_NUM_TAPS from taps.h_taps */
+ uint32_t v_taps; /* SCL_TAP_CONTROL->SCL_V_NUM_TAPS from taps.v_taps */
+ } dpp[MAX_PIPES];
+
+ /* DCCG register programming variables */
+ struct {
+ /* Core Display Clock Control */
+ uint32_t dispclk_khz; /* DENTIST_DISPCLK_CNTL->DENTIST_DISPCLK_WDIVIDER from clk_mgr.dispclk_khz */
+ uint32_t dc_mem_global_pwr_req_dis; /* DC_MEM_GLOBAL_PWR_REQ_CNTL->DC_MEM_GLOBAL_PWR_REQ_DIS from memory power management settings */
+
+ /* DPP Clock Control - 4 fields per pipe */
+ uint32_t dppclk_khz[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK_R_GATE_DISABLE from dpp_clocks[pipe] */
+ uint32_t dppclk_enable[MAX_PIPES]; /* DPPCLK_CTRL->DPPCLK0_EN,DPPCLK1_EN,DPPCLK2_EN,DPPCLK3_EN from dccg31_update_dpp_dto() */
+ uint32_t dppclk_dto_enable[MAX_PIPES]; /* DPPCLK_DTO_CTRL->DPPCLK_DTO_ENABLE from dccg->dpp_clock_gated[dpp_inst] state */
+ uint32_t dppclk_dto_phase[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_PHASE from phase calculation req_dppclk/ref_dppclk */
+ uint32_t dppclk_dto_modulo[MAX_PIPES]; /* DPPCLK0_DTO_PARAM->DPPCLK0_DTO_MODULO from modulo = 0xff */
+
+ /* DSC Clock Control - 4 fields per DSC resource */
+ uint32_t dscclk_khz[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK_DTO_ENABLE from dsc_clocks */
+ uint32_t dscclk_dto_enable[MAX_PIPES]; /* DSCCLK_DTO_CTRL->DSCCLK0_DTO_ENABLE,DSCCLK1_DTO_ENABLE,DSCCLK2_DTO_ENABLE,DSCCLK3_DTO_ENABLE */
+ uint32_t dscclk_dto_phase[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_PHASE from dccg31_enable_dscclk() */
+ uint32_t dscclk_dto_modulo[MAX_PIPES]; /* DSCCLK0_DTO_PARAM->DSCCLK0_DTO_MODULO from dccg31_enable_dscclk() */
+
+ /* Pixel Clock Control - per pipe */
+ uint32_t pixclk_khz[MAX_PIPES]; /* PIXCLK_RESYNC_CNTL->PIXCLK_RESYNC_ENABLE from stream.timing.pix_clk_100hz */
+ uint32_t otg_pixel_rate_div[MAX_PIPES]; /* OTG_PIXEL_RATE_DIV->OTG_PIXEL_RATE_DIV from OTG pixel rate divider control */
+ uint32_t dtbclk_dto_enable[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_ENABLE from dccg31_set_dtbclk_dto() */
+ uint32_t pipe_dto_src_sel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->PIPE_DTO_SRC_SEL from dccg31_set_dtbclk_dto() source selection */
+ uint32_t dtbclk_dto_div[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->DTBCLK_DTO_DIV from dtbdto_div calculation */
+ uint32_t otg_add_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_ADD_PIXEL from dccg31_otg_add_pixel() */
+ uint32_t otg_drop_pixel[MAX_PIPES]; /* OTG0_PIXEL_RATE_CNTL->OTG_DROP_PIXEL from dccg31_otg_drop_pixel() */
+
+ /* DTBCLK DTO Control - 4 DTOs */
+ uint32_t dtbclk_dto_modulo[4]; /* DTBCLK_DTO0_MODULO->DTBCLK_DTO0_MODULO from dccg31_set_dtbclk_dto() modulo calculation */
+ uint32_t dtbclk_dto_phase[4]; /* DTBCLK_DTO0_PHASE->DTBCLK_DTO0_PHASE from phase calculation pixclk_khz/ref_dtbclk_khz */
+ uint32_t dtbclk_dto_dbuf_en; /* DTBCLK_DTO_DBUF_EN->DTBCLK DTO data buffer enable */
+
+ /* DP Stream Clock Control - 4 pipes */
+ uint32_t dpstreamclk_enable[MAX_PIPES]; /* DPSTREAMCLK_CNTL->DPSTREAMCLK_PIPE0_EN,DPSTREAMCLK_PIPE1_EN,DPSTREAMCLK_PIPE2_EN,DPSTREAMCLK_PIPE3_EN */
+ uint32_t dp_dto_modulo[4]; /* DP_DTO0_MODULO->DP_DTO0_MODULO from DP stream DTO programming */
+ uint32_t dp_dto_phase[4]; /* DP_DTO0_PHASE->DP_DTO0_PHASE from DP stream DTO programming */
+ uint32_t dp_dto_dbuf_en; /* DP_DTO_DBUF_EN->DP DTO data buffer enable */
+
+ /* PHY Symbol Clock Control - 5 PHYs (A,B,C,D,E) */
+ uint32_t phy_symclk_force_en[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_EN from dccg31_set_physymclk() force_enable */
+ uint32_t phy_symclk_force_src_sel[5]; /* PHYASYMCLK_CLOCK_CNTL->PHYASYMCLK_FORCE_SRC_SEL from dccg31_set_physymclk() clk_src */
+ uint32_t phy_symclk_gate_disable[5]; /* DCCG_GATE_DISABLE_CNTL2->PHYASYMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.physymclk */
+
+ /* SYMCLK32 SE Control - 4 instances */
+ uint32_t symclk32_se_src_sel[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_SRC_SEL from dccg31_enable_symclk32_se() with get_phy_mux_symclk() mapping */
+ uint32_t symclk32_se_enable[4]; /* SYMCLK32_SE_CNTL->SYMCLK32_SE0_EN from dccg31_enable_symclk32_se() enable */
+ uint32_t symclk32_se_gate_disable[4]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_SE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_se */
+
+ /* SYMCLK32 LE Control - 2 instances */
+ uint32_t symclk32_le_src_sel[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_SRC_SEL from dccg31_enable_symclk32_le() phyd32clk source */
+ uint32_t symclk32_le_enable[2]; /* SYMCLK32_LE_CNTL->SYMCLK32_LE0_EN from dccg31_enable_symclk32_le() enable */
+ uint32_t symclk32_le_gate_disable[2]; /* DCCG_GATE_DISABLE_CNTL3->SYMCLK32_LE0_GATE_DISABLE from debug.root_clock_optimization.bits.symclk32_le */
+
+ /* DPIA Clock Control */
+ uint32_t dpiaclk_540m_dto_modulo; /* DPIACLK_540M_DTO_MODULO->DPIA 540MHz DTO modulo */
+ uint32_t dpiaclk_540m_dto_phase; /* DPIACLK_540M_DTO_PHASE->DPIA 540MHz DTO phase */
+ uint32_t dpiaclk_810m_dto_modulo; /* DPIACLK_810M_DTO_MODULO->DPIA 810MHz DTO modulo */
+ uint32_t dpiaclk_810m_dto_phase; /* DPIACLK_810M_DTO_PHASE->DPIA 810MHz DTO phase */
+ uint32_t dpiaclk_dto_cntl; /* DPIACLK_DTO_CNTL->DPIA clock DTO control */
+ uint32_t dpiasymclk_cntl; /* DPIASYMCLK_CNTL->DPIA symbol clock control */
+
+ /* Clock Gating Control */
+ uint32_t dccg_gate_disable_cntl; /* DCCG_GATE_DISABLE_CNTL->Clock gate disable control from dccg31_init() */
+ uint32_t dpstreamclk_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
+ uint32_t dpstreamclk_root_gate_disable; /* DCCG_GATE_DISABLE_CNTL3->DPSTREAMCLK_ROOT_GATE_DISABLE from debug.root_clock_optimization.bits.dpstream */
+
+ /* VSync Control */
+ uint32_t vsync_cnt_ctrl; /* DCCG_VSYNC_CNT_CTRL->VSync counter control */
+ uint32_t vsync_cnt_int_ctrl; /* DCCG_VSYNC_CNT_INT_CTRL->VSync counter interrupt control */
+ uint32_t vsync_otg_latch_value[6]; /* DCCG_VSYNC_OTG0_LATCH_VALUE->OTG0 VSync latch value (for OTG0-5) */
+
+ /* Time Base Control */
+ uint32_t microsecond_time_base_div; /* MICROSECOND_TIME_BASE_DIV->Microsecond time base divider */
+ uint32_t millisecond_time_base_div; /* MILLISECOND_TIME_BASE_DIV->Millisecond time base divider */
+ } dccg;
+
+ /* DSC essential configuration for underflow analysis */
+ struct {
+ /* DSC active state - critical for bandwidth analysis */
+ uint32_t dsc_clock_enable; /* DSC enabled - affects bandwidth requirements */
+
+ /* DSC configuration affecting bandwidth and timing */
+ uint32_t dsc_num_slices_h; /* Horizontal slice count - affects throughput */
+ uint32_t dsc_num_slices_v; /* Vertical slice count - affects throughput */
+ uint32_t dsc_bits_per_pixel; /* Compression ratio - affects bandwidth */
+
+ /* OPP integration - affects pipeline flow */
+ uint32_t dscrm_dsc_forward_enable; /* DSC forwarding to OPP enabled */
+ uint32_t dscrm_dsc_opp_pipe_source; /* Which OPP receives DSC output */
+ } dsc[MAX_PIPES];
+
+ /* MPC register programming variables */
+ struct {
+ /* MPCC blending tree and mode control */
+ uint32_t mpcc_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_MODE from blend_cfg.blend_mode */
+ uint32_t mpcc_alpha_blend_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_BLND_MODE from blend_cfg.alpha_mode */
+ uint32_t mpcc_alpha_multiplied_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_ALPHA_MULTIPLIED_MODE from blend_cfg.pre_multiplied_alpha */
+ uint32_t mpcc_blnd_active_overlap_only[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BLND_ACTIVE_OVERLAP_ONLY from blend_cfg.overlap_only */
+ uint32_t mpcc_global_alpha[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_ALPHA from blend_cfg.global_alpha */
+ uint32_t mpcc_global_gain[MAX_PIPES]; /* MPCC_CONTROL->MPCC_GLOBAL_GAIN from blend_cfg.global_gain */
+ uint32_t mpcc_bg_bpc[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BG_BPC from background color depth */
+ uint32_t mpcc_bot_gain_mode[MAX_PIPES]; /* MPCC_CONTROL->MPCC_BOT_GAIN_MODE from bottom layer gain control */
+
+ /* MPCC blending tree connections */
+ uint32_t mpcc_bot_sel[MAX_PIPES]; /* MPCC_BOT_SEL->MPCC_BOT_SEL from mpcc_state->bot_sel */
+ uint32_t mpcc_top_sel[MAX_PIPES]; /* MPCC_TOP_SEL->MPCC_TOP_SEL from mpcc_state->dpp_id */
+
+ /* MPCC output gamma control */
+ uint32_t mpcc_ogam_mode[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_MODE from output gamma mode */
+ uint32_t mpcc_ogam_select[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_SELECT from gamma LUT bank selection */
+ uint32_t mpcc_ogam_pwl_disable[MAX_PIPES]; /* MPCC_OGAM_CONTROL->MPCC_OGAM_PWL_DISABLE from PWL control */
+
+ /* MPCC pipe assignment and status */
+ uint32_t mpcc_opp_id[MAX_PIPES]; /* MPCC_OPP_ID->MPCC_OPP_ID from mpcc_state->opp_id */
+ uint32_t mpcc_idle[MAX_PIPES]; /* MPCC_STATUS->MPCC_IDLE from mpcc idle status */
+ uint32_t mpcc_busy[MAX_PIPES]; /* MPCC_STATUS->MPCC_BUSY from mpcc busy status */
+
+ /* MPC output processing */
+ uint32_t mpc_out_csc_mode; /* MPC_OUT_CSC_COEF->MPC_OUT_CSC_MODE from output_csc */
+ uint32_t mpc_out_gamma_mode; /* MPC_OUT_GAMMA_LUT->MPC_OUT_GAMMA_MODE from output_gamma */
+ } mpc;
+
+ /* OPP register programming variables for each pipe */
+ struct {
+ /* Display Pattern Generator (DPG) Control - 19 fields from DPG_CONTROL register */
+ uint32_t dpg_enable; /* DPG_CONTROL->DPG_EN from test_pattern parameter (enable/disable) */
+
+ /* Format Control (FMT) - 18 fields from FMT_CONTROL register */
+ uint32_t fmt_pixel_encoding; /* FMT_CONTROL->FMT_PIXEL_ENCODING from clamping->pixel_encoding */
+ uint32_t fmt_subsampling_mode; /* FMT_CONTROL->FMT_SUBSAMPLING_MODE from force_chroma_subsampling_1tap */
+ uint32_t fmt_cbcr_bit_reduction_bypass; /* FMT_CONTROL->FMT_CBCR_BIT_REDUCTION_BYPASS from pixel_encoding bypass control */
+ uint32_t fmt_stereosync_override; /* FMT_CONTROL->FMT_STEREOSYNC_OVERRIDE from stereo timing override */
+ uint32_t fmt_spatial_dither_frame_counter_max; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX from fmt_bit_depth->flags */
+ uint32_t fmt_spatial_dither_frame_counter_bit_swap; /* FMT_CONTROL->FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP from dither control */
+ uint32_t fmt_truncate_enable; /* FMT_CONTROL->FMT_TRUNCATE_EN from fmt_bit_depth->flags.TRUNCATE_ENABLED */
+ uint32_t fmt_truncate_depth; /* FMT_CONTROL->FMT_TRUNCATE_DEPTH from fmt_bit_depth->flags.TRUNCATE_DEPTH */
+ uint32_t fmt_truncate_mode; /* FMT_CONTROL->FMT_TRUNCATE_MODE from fmt_bit_depth->flags.TRUNCATE_MODE */
+ uint32_t fmt_spatial_dither_enable; /* FMT_CONTROL->FMT_SPATIAL_DITHER_EN from fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED */
+ uint32_t fmt_spatial_dither_mode; /* FMT_CONTROL->FMT_SPATIAL_DITHER_MODE from fmt_bit_depth->flags.SPATIAL_DITHER_MODE */
+ uint32_t fmt_spatial_dither_depth; /* FMT_CONTROL->FMT_SPATIAL_DITHER_DEPTH from fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH */
+ uint32_t fmt_temporal_dither_enable; /* FMT_CONTROL->FMT_TEMPORAL_DITHER_EN from fmt_bit_depth->flags.TEMPORAL_DITHER_ENABLED */
+ uint32_t fmt_clamp_data_enable; /* FMT_CONTROL->FMT_CLAMP_DATA_EN from clamping->clamping_range enable */
+ uint32_t fmt_clamp_color_format; /* FMT_CONTROL->FMT_CLAMP_COLOR_FORMAT from clamping->color_format */
+ uint32_t fmt_dynamic_exp_enable; /* FMT_CONTROL->FMT_DYNAMIC_EXP_EN from color_sp/color_dpth/signal */
+ uint32_t fmt_dynamic_exp_mode; /* FMT_CONTROL->FMT_DYNAMIC_EXP_MODE from color space mode mapping */
+ uint32_t fmt_bit_depth_control; /* Legacy field - kept for compatibility */
+
+ /* OPP Pipe Control - 1 field from OPP_PIPE_CONTROL register */
+ uint32_t opp_pipe_clock_enable; /* OPP_PIPE_CONTROL->OPP_PIPE_CLOCK_EN from enable parameter (bool) */
+
+ /* OPP CRC Control - 3 fields from OPP_PIPE_CRC_CONTROL register */
+ uint32_t opp_crc_enable; /* OPP_PIPE_CRC_CONTROL->CRC_EN from CRC enable control */
+ uint32_t opp_crc_select_source; /* OPP_PIPE_CRC_CONTROL->CRC_SELECT_SOURCE from CRC source selection */
+ uint32_t opp_crc_stereo_cont; /* OPP_PIPE_CRC_CONTROL->CRC_STEREO_CONT from stereo continuous CRC */
+
+ /* Output Buffer (OPPBUF) Control - 6 fields from OPPBUF_CONTROL register */
+ uint32_t oppbuf_active_width; /* OPPBUF_CONTROL->OPPBUF_ACTIVE_WIDTH from oppbuf_params->active_width */
+ uint32_t oppbuf_pixel_repetition; /* OPPBUF_CONTROL->OPPBUF_PIXEL_REPETITION from oppbuf_params->pixel_repetition */
+ uint32_t oppbuf_display_segmentation; /* OPPBUF_CONTROL->OPPBUF_DISPLAY_SEGMENTATION from oppbuf_params->mso_segmentation */
+ uint32_t oppbuf_overlap_pixel_num; /* OPPBUF_CONTROL->OPPBUF_OVERLAP_PIXEL_NUM from oppbuf_params->mso_overlap_pixel_num */
+ uint32_t oppbuf_3d_vact_space1_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE1_SIZE from 3D timing space1_size */
+ uint32_t oppbuf_3d_vact_space2_size; /* OPPBUF_CONTROL->OPPBUF_3D_VACT_SPACE2_SIZE from 3D timing space2_size */
+
+ /* DSC Forward Config - 3 fields from DSCRM_DSC_FORWARD_CONFIG register */
+ uint32_t dscrm_dsc_forward_enable; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN from DSC forward enable control */
+ uint32_t dscrm_dsc_opp_pipe_source; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_OPP_PIPE_SOURCE from opp_pipe parameter */
+ uint32_t dscrm_dsc_forward_enable_status; /* DSCRM_DSC_FORWARD_CONFIG->DSCRM_DSC_FORWARD_EN_STATUS from DSC forward status (read-only) */
+ } opp[MAX_PIPES];
+
+ /* OPTC register programming variables for each pipe */
+ struct {
+ uint32_t otg_master_inst;
+
+ /* OTG_CONTROL register - 5 fields for OTG control */
+ uint32_t otg_master_enable; /* OTG_CONTROL->OTG_MASTER_EN from timing enable/disable control */
+ uint32_t otg_disable_point_cntl; /* OTG_CONTROL->OTG_DISABLE_POINT_CNTL from disable timing control */
+ uint32_t otg_start_point_cntl; /* OTG_CONTROL->OTG_START_POINT_CNTL from start timing control */
+ uint32_t otg_field_number_cntl; /* OTG_CONTROL->OTG_FIELD_NUMBER_CNTL from interlace field control */
+ uint32_t otg_out_mux; /* OTG_CONTROL->OTG_OUT_MUX from output mux selection */
+
+ /* OTG Horizontal Timing - 7 fields */
+ uint32_t otg_h_total; /* OTG_H_TOTAL->OTG_H_TOTAL from dc_crtc_timing->h_total */
+ uint32_t otg_h_blank_start; /* OTG_H_BLANK_START_END->OTG_H_BLANK_START from dc_crtc_timing->h_front_porch */
+ uint32_t otg_h_blank_end; /* OTG_H_BLANK_START_END->OTG_H_BLANK_END from dc_crtc_timing->h_addressable_video_pixel_width */
+ uint32_t otg_h_sync_start; /* OTG_H_SYNC_A->OTG_H_SYNC_A_START from dc_crtc_timing->h_sync_width */
+ uint32_t otg_h_sync_end; /* OTG_H_SYNC_A->OTG_H_SYNC_A_END from calculated sync end position */
+ uint32_t otg_h_sync_polarity; /* OTG_H_SYNC_A_CNTL->OTG_H_SYNC_A_POL from dc_crtc_timing->flags.HSYNC_POSITIVE_POLARITY */
+ uint32_t otg_h_timing_div_mode; /* OTG_H_TIMING_CNTL->OTG_H_TIMING_DIV_MODE from horizontal timing division mode */
+
+ /* OTG Vertical Timing - 7 fields */
+ uint32_t otg_v_total; /* OTG_V_TOTAL->OTG_V_TOTAL from dc_crtc_timing->v_total */
+ uint32_t otg_v_blank_start; /* OTG_V_BLANK_START_END->OTG_V_BLANK_START from dc_crtc_timing->v_front_porch */
+ uint32_t otg_v_blank_end; /* OTG_V_BLANK_START_END->OTG_V_BLANK_END from dc_crtc_timing->v_addressable_video_line_width */
+ uint32_t otg_v_sync_start; /* OTG_V_SYNC_A->OTG_V_SYNC_A_START from dc_crtc_timing->v_sync_width */
+ uint32_t otg_v_sync_end; /* OTG_V_SYNC_A->OTG_V_SYNC_A_END from calculated sync end position */
+ uint32_t otg_v_sync_polarity; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_A_POL from dc_crtc_timing->flags.VSYNC_POSITIVE_POLARITY */
+ uint32_t otg_v_sync_mode; /* OTG_V_SYNC_A_CNTL->OTG_V_SYNC_MODE from sync mode selection */
+
+ /* OTG DRR (Dynamic Refresh Rate) Control - 8 fields */
+ uint32_t otg_v_total_max; /* OTG_V_TOTAL_MAX->OTG_V_TOTAL_MAX from drr_params->vertical_total_max */
+ uint32_t otg_v_total_min; /* OTG_V_TOTAL_MIN->OTG_V_TOTAL_MIN from drr_params->vertical_total_min */
+ uint32_t otg_v_total_mid; /* OTG_V_TOTAL_MID->OTG_V_TOTAL_MID from drr_params->vertical_total_mid */
+ uint32_t otg_v_total_max_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MAX_SEL from DRR max selection enable */
+ uint32_t otg_v_total_min_sel; /* OTG_V_TOTAL_CONTROL->OTG_V_TOTAL_MIN_SEL from DRR min selection enable */
+ uint32_t otg_vtotal_mid_replacing_max_en; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_REPLACING_MAX_EN from DRR mid-frame enable */
+ uint32_t otg_vtotal_mid_frame_num; /* OTG_V_TOTAL_CONTROL->OTG_VTOTAL_MID_FRAME_NUM from drr_params->vertical_total_mid_frame_num */
+ uint32_t otg_set_v_total_min_mask; /* OTG_V_TOTAL_CONTROL->OTG_SET_V_TOTAL_MIN_MASK from DRR trigger mask */
+ uint32_t otg_force_lock_on_event; /* OTG_V_TOTAL_CONTROL->OTG_FORCE_LOCK_ON_EVENT from DRR force lock control */
+
+ /* OPTC Data Source and ODM - 6 fields */
+ uint32_t optc_seg0_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG0_SRC_SEL from opp_id[0] ODM segment 0 source */
+ uint32_t optc_seg1_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG1_SRC_SEL from opp_id[1] ODM segment 1 source */
+ uint32_t optc_seg2_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG2_SRC_SEL from opp_id[2] ODM segment 2 source */
+ uint32_t optc_seg3_src_sel; /* OPTC_DATA_SOURCE_SELECT->OPTC_SEG3_SRC_SEL from opp_id[3] ODM segment 3 source */
+ uint32_t optc_num_of_input_segment; /* OPTC_DATA_SOURCE_SELECT->OPTC_NUM_OF_INPUT_SEGMENT from opp_cnt-1 number of input segments */
+ uint32_t optc_mem_sel; /* OPTC_MEMORY_CONFIG->OPTC_MEM_SEL from memory_mask ODM memory selection */
+
+ /* OPTC Data Format and DSC - 4 fields */
+ uint32_t optc_data_format; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DATA_FORMAT from data format selection */
+ uint32_t optc_dsc_mode; /* OPTC_DATA_FORMAT_CONTROL->OPTC_DSC_MODE from dsc_mode parameter */
+ uint32_t optc_dsc_bytes_per_pixel; /* OPTC_BYTES_PER_PIXEL->OPTC_DSC_BYTES_PER_PIXEL from dsc_bytes_per_pixel parameter */
+ uint32_t optc_segment_width; /* OPTC_WIDTH_CONTROL->OPTC_SEGMENT_WIDTH from segment_width parameter */
+ uint32_t optc_dsc_slice_width; /* OPTC_WIDTH_CONTROL->OPTC_DSC_SLICE_WIDTH from dsc_slice_width parameter */
+
+ /* OPTC Clock and Underflow Control - 4 fields */
+ uint32_t optc_input_pix_clk_en; /* OPTC_INPUT_CLOCK_CONTROL->OPTC_INPUT_PIX_CLK_EN from pixel clock enable */
+ uint32_t optc_underflow_occurred_status; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_OCCURRED_STATUS from underflow status (read-only) */
+ uint32_t optc_underflow_clear; /* OPTC_INPUT_GLOBAL_CONTROL->OPTC_UNDERFLOW_CLEAR from underflow clear control */
+ uint32_t otg_clock_enable; /* OTG_CLOCK_CONTROL->OTG_CLOCK_EN from OTG clock enable */
+ uint32_t otg_clock_gate_dis; /* OTG_CLOCK_CONTROL->OTG_CLOCK_GATE_DIS from clock gate disable */
+
+ /* OTG Stereo and 3D Control - 6 fields */
+ uint32_t otg_stereo_enable; /* OTG_STEREO_CONTROL->OTG_STEREO_EN from stereo enable control */
+ uint32_t otg_stereo_sync_output_line_num; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_LINE_NUM from timing->stereo_3d_format line num */
+ uint32_t otg_stereo_sync_output_polarity; /* OTG_STEREO_CONTROL->OTG_STEREO_SYNC_OUTPUT_POLARITY from stereo polarity control */
+ uint32_t otg_3d_structure_en; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_EN from 3D structure enable */
+ uint32_t otg_3d_structure_v_update_mode; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_V_UPDATE_MODE from 3D vertical update mode */
+ uint32_t otg_3d_structure_stereo_sel_ovr; /* OTG_3D_STRUCTURE_CONTROL->OTG_3D_STRUCTURE_STEREO_SEL_OVR from 3D stereo selection override */
+ uint32_t otg_interlace_enable; /* OTG_INTERLACE_CONTROL->OTG_INTERLACE_ENABLE from dc_crtc_timing->flags.INTERLACE */
+
+ /* OTG GSL (Global Sync Lock) Control - 5 fields */
+ uint32_t otg_gsl0_en; /* OTG_GSL_CONTROL->OTG_GSL0_EN from GSL group 0 enable */
+ uint32_t otg_gsl1_en; /* OTG_GSL_CONTROL->OTG_GSL1_EN from GSL group 1 enable */
+ uint32_t otg_gsl2_en; /* OTG_GSL_CONTROL->OTG_GSL2_EN from GSL group 2 enable */
+ uint32_t otg_gsl_master_en; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_EN from GSL master enable */
+ uint32_t otg_gsl_master_mode; /* OTG_GSL_CONTROL->OTG_GSL_MASTER_MODE from gsl_params->gsl_master mode */
+
+ /* OTG DRR Advanced Control - 4 fields */
+ uint32_t otg_v_total_last_used_by_drr; /* OTG_DRR_CONTROL->OTG_V_TOTAL_LAST_USED_BY_DRR from last used DRR V_TOTAL (read-only) */
+ uint32_t otg_drr_trigger_window_start_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_START_X from window_start parameter */
+ uint32_t otg_drr_trigger_window_end_x; /* OTG_DRR_TRIGGER_WINDOW->OTG_DRR_TRIGGER_WINDOW_END_X from window_end parameter */
+ uint32_t otg_drr_v_total_change_limit; /* OTG_DRR_V_TOTAL_CHANGE->OTG_DRR_V_TOTAL_CHANGE_LIMIT from limit parameter */
+
+ /* OTG DSC Position Control - 2 fields */
+ uint32_t otg_dsc_start_position_x; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_X from DSC start X position */
+ uint32_t otg_dsc_start_position_line_num; /* OTG_DSC_START_POSITION->OTG_DSC_START_POSITION_LINE_NUM from DSC start line number */
+
+ /* OTG Double Buffer Control - 2 fields */
+ uint32_t otg_drr_timing_dbuf_update_mode; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_DRR_TIMING_DBUF_UPDATE_MODE from DRR double buffer mode */
+ uint32_t otg_blank_data_double_buffer_en; /* OTG_DOUBLE_BUFFER_CONTROL->OTG_BLANK_DATA_DOUBLE_BUFFER_EN from blank data double buffer enable */
+
+ /* OTG Vertical Interrupts - 6 fields */
+ uint32_t otg_vertical_interrupt0_int_enable; /* OTG_VERTICAL_INTERRUPT0_CONTROL->OTG_VERTICAL_INTERRUPT0_INT_ENABLE from interrupt 0 enable */
+ uint32_t otg_vertical_interrupt0_line_start; /* OTG_VERTICAL_INTERRUPT0_POSITION->OTG_VERTICAL_INTERRUPT0_LINE_START from start_line parameter */
+ uint32_t otg_vertical_interrupt1_int_enable; /* OTG_VERTICAL_INTERRUPT1_CONTROL->OTG_VERTICAL_INTERRUPT1_INT_ENABLE from interrupt 1 enable */
+ uint32_t otg_vertical_interrupt1_line_start; /* OTG_VERTICAL_INTERRUPT1_POSITION->OTG_VERTICAL_INTERRUPT1_LINE_START from start_line parameter */
+ uint32_t otg_vertical_interrupt2_int_enable; /* OTG_VERTICAL_INTERRUPT2_CONTROL->OTG_VERTICAL_INTERRUPT2_INT_ENABLE from interrupt 2 enable */
+ uint32_t otg_vertical_interrupt2_line_start; /* OTG_VERTICAL_INTERRUPT2_POSITION->OTG_VERTICAL_INTERRUPT2_LINE_START from start_line parameter */
+
+ /* OTG Global Sync Parameters - 6 fields */
+ uint32_t otg_vready_offset; /* OTG_VREADY_PARAM->OTG_VREADY_OFFSET from vready_offset parameter */
+ uint32_t otg_vstartup_start; /* OTG_VSTARTUP_PARAM->OTG_VSTARTUP_START from vstartup_start parameter */
+ uint32_t otg_vupdate_offset; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_OFFSET from vupdate_offset parameter */
+ uint32_t otg_vupdate_width; /* OTG_VUPDATE_PARAM->OTG_VUPDATE_WIDTH from vupdate_width parameter */
+ uint32_t master_update_lock_vupdate_keepout_start_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET from pstate_keepout start */
+ uint32_t master_update_lock_vupdate_keepout_end_offset; /* OTG_VUPDATE_KEEPOUT->MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET from pstate_keepout end */
+
+ /* OTG Manual Trigger Control - 11 fields */
+ uint32_t otg_triga_source_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_SELECT from trigger A source selection */
+ uint32_t otg_triga_source_pipe_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_SOURCE_PIPE_SELECT from trigger A pipe selection */
+ uint32_t otg_triga_rising_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_RISING_EDGE_DETECT_CNTL from trigger A rising edge detect */
+ uint32_t otg_triga_falling_edge_detect_cntl; /* OTG_TRIGA_CNTL->OTG_TRIGA_FALLING_EDGE_DETECT_CNTL from trigger A falling edge detect */
+ uint32_t otg_triga_polarity_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_POLARITY_SELECT from trigger A polarity selection */
+ uint32_t otg_triga_frequency_select; /* OTG_TRIGA_CNTL->OTG_TRIGA_FREQUENCY_SELECT from trigger A frequency selection */
+ uint32_t otg_triga_delay; /* OTG_TRIGA_CNTL->OTG_TRIGA_DELAY from trigger A delay */
+ uint32_t otg_triga_clear; /* OTG_TRIGA_CNTL->OTG_TRIGA_CLEAR from trigger A clear */
+ uint32_t otg_triga_manual_trig; /* OTG_TRIGA_MANUAL_TRIG->OTG_TRIGA_MANUAL_TRIG from manual trigger A */
+ uint32_t otg_trigb_source_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_SOURCE_SELECT from trigger B source selection */
+ uint32_t otg_trigb_polarity_select; /* OTG_TRIGB_CNTL->OTG_TRIGB_POLARITY_SELECT from trigger B polarity selection */
+ uint32_t otg_trigb_manual_trig; /* OTG_TRIGB_MANUAL_TRIG->OTG_TRIGB_MANUAL_TRIG from manual trigger B */
+
+ /* OTG Static Screen and Update Control - 6 fields */
+ uint32_t otg_static_screen_event_mask; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_EVENT_MASK from event_triggers parameter */
+ uint32_t otg_static_screen_frame_count; /* OTG_STATIC_SCREEN_CONTROL->OTG_STATIC_SCREEN_FRAME_COUNT from num_frames parameter */
+ uint32_t master_update_lock; /* OTG_MASTER_UPDATE_LOCK->MASTER_UPDATE_LOCK from update lock control */
+ uint32_t master_update_mode; /* OTG_MASTER_UPDATE_MODE->MASTER_UPDATE_MODE from update mode selection */
+ uint32_t otg_force_count_now_mode; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_MODE from force count mode */
+ uint32_t otg_force_count_now_clear; /* OTG_FORCE_COUNT_NOW_CNTL->OTG_FORCE_COUNT_NOW_CLEAR from force count clear */
+
+ /* VTG Control - 3 fields */
+ uint32_t vtg0_enable; /* CONTROL->VTG0_ENABLE from VTG enable control */
+ uint32_t vtg0_fp2; /* CONTROL->VTG0_FP2 from VTG front porch 2 */
+ uint32_t vtg0_vcount_init; /* CONTROL->VTG0_VCOUNT_INIT from VTG vertical count init */
+
+ /* OTG Status (Read-Only) - 12 fields */
+ uint32_t otg_v_blank; /* OTG_STATUS->OTG_V_BLANK from vertical blank status (read-only) */
+ uint32_t otg_v_active_disp; /* OTG_STATUS->OTG_V_ACTIVE_DISP from vertical active display (read-only) */
+ uint32_t otg_frame_count; /* OTG_STATUS_FRAME_COUNT->OTG_FRAME_COUNT from frame count (read-only) */
+ uint32_t otg_horz_count; /* OTG_STATUS_POSITION->OTG_HORZ_COUNT from horizontal position (read-only) */
+ uint32_t otg_vert_count; /* OTG_STATUS_POSITION->OTG_VERT_COUNT from vertical position (read-only) */
+ uint32_t otg_horz_count_hv; /* OTG_STATUS_HV_COUNT->OTG_HORZ_COUNT from horizontal count (read-only) */
+ uint32_t otg_vert_count_nom; /* OTG_STATUS_HV_COUNT->OTG_VERT_COUNT_NOM from vertical count nominal (read-only) */
+ uint32_t otg_flip_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_FLIP_PENDING from flip pending status (read-only) */
+ uint32_t otg_dc_reg_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_DC_REG_UPDATE_PENDING from DC register update pending (read-only) */
+ uint32_t otg_cursor_update_pending; /* OTG_PIPE_UPDATE_STATUS->OTG_CURSOR_UPDATE_PENDING from cursor update pending (read-only) */
+ uint32_t otg_vupdate_keepout_status; /* OTG_PIPE_UPDATE_STATUS->OTG_VUPDATE_KEEPOUT_STATUS from VUPDATE keepout status (read-only) */
+ } optc[MAX_PIPES];
+
+ /* Metadata */
+ uint32_t active_pipe_count;
+ uint32_t active_stream_count;
+ bool state_valid;
+};
+
+/**
+ * dc_capture_register_software_state() - Capture software state for register programming
+ * @dc: DC context containing current display configuration
+ * @state: Pointer to dc_register_software_state structure to populate
+ *
+ * Extracts all software state variables that are used to program hardware register
+ * fields across the display driver pipeline. This provides a complete snapshot
+ * of the software configuration that drives hardware register programming.
+ *
+ * The function traverses the DC context and extracts values from:
+ * - Stream configurations (timing, format, DSC settings)
+ * - Plane states (surface format, rotation, scaling, cursor)
+ * - Pipe contexts (resource allocation, blending, viewport)
+ * - Clock manager (display clocks, DPP clocks, pixel clocks)
+ * - Resource context (DET buffer allocation, ODM configuration)
+ *
+ * This is essential for underflow debugging as it captures the exact software
+ * state that determines how registers are programmed, allowing analysis of
+ * whether underflow is caused by incorrect register programming or timing issues.
+ *
+ * Return: true if state was successfully captured, false on error
+ */
+bool dc_capture_register_software_state(struct dc *dc, struct dc_register_software_state *state);
#endif /* DC_INTERFACE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 5fa5e2b63fb7..40d7a7d83c40 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -91,9 +91,17 @@ struct dc_vbios_funcs {
struct device_id id);
/* COMMANDS */
+ enum bp_result (*select_crtc_source)(
+ struct dc_bios *bios,
+ struct bp_crtc_source_select *bp_params);
enum bp_result (*encoder_control)(
struct dc_bios *bios,
struct bp_encoder_control *cntl);
+ enum bp_result (*dac_load_detection)(
+ struct dc_bios *bios,
+ enum engine_id engine_id,
+ enum dal_device_type device_type,
+ uint32_t enum_id);
enum bp_result (*transmitter_control)(
struct dc_bios *bios,
struct bp_transmitter_control *cntl);
@@ -165,6 +173,7 @@ struct dc_vbios_funcs {
};
struct bios_registers {
+ uint32_t BIOS_SCRATCH_0;
uint32_t BIOS_SCRATCH_3;
uint32_t BIOS_SCRATCH_6;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index f5ef1a07078e..7b09af1cb306 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -442,7 +442,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
int i = 0, k = 0;
int ramp_up_num_steps = 1; // TODO: Ramp is currently disabled. Reenable it.
uint8_t visual_confirm_enabled;
- int pipe_idx = 0;
struct dc_stream_status *stream_status = NULL;
if (dc == NULL)
@@ -457,7 +456,7 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.visual_confirm_enabled = visual_confirm_enabled;
if (should_manage_pstate) {
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -472,7 +471,6 @@ bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool should_manage_pstate, stru
cmd.fw_assisted_mclk_switch.config_data.vactive_stretch_margin_us = dc->debug.fpo_vactive_margin_us;
break;
}
- pipe_idx++;
}
}
@@ -872,7 +870,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
bool enable)
{
uint8_t cmd_pipe_index = 0;
- uint32_t i, pipe_idx;
+ uint32_t i;
uint8_t subvp_count = 0;
union dmub_rb_cmd cmd;
struct pipe_ctx *subvp_pipes[2];
@@ -899,7 +897,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
if (enable) {
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
@@ -922,7 +920,6 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
}
- pipe_idx++;
}
if (subvp_count == 2) {
update_subvp_prefetch_end_to_mall_start(dc, context, &cmd, subvp_pipes);
@@ -1174,6 +1171,100 @@ void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, con
dmub_srv_subvp_save_surf_addr(dc_dmub_srv->dmub, addr, subvp_index);
}
+void dc_dmub_srv_cursor_offload_init(struct dc *dc)
+{
+ struct dmub_rb_cmd_cursor_offload_init *init;
+ struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
+ union dmub_rb_cmd cmd;
+
+ if (!dc->config.enable_cursor_offload)
+ return;
+
+ if (!dc_dmub_srv->dmub->meta_info.feature_bits.bits.cursor_offload_v1_support)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr || !dc_dmub_srv->dmub->cursor_offload_fb.cpu_addr)
+ return;
+
+ if (!dc_dmub_srv->dmub->cursor_offload_v1)
+ return;
+
+ if (!dc_dmub_srv->dmub->shared_state)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ init = &cmd.cursor_offload_init;
+ init->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ init->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_INIT;
+ init->header.payload_bytes = sizeof(init->init_data);
+ init->init_data.state_addr.quad_part = dc_dmub_srv->dmub->cursor_offload_fb.gpu_addr;
+ init->init_data.state_size = dc_dmub_srv->dmub->cursor_offload_fb.size;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ dc_dmub_srv->cursor_offload_enabled = true;
+}
+
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable)
+{
+ struct pipe_ctx const *pipe_ctx;
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!stream)
+ return;
+
+ pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ if (!pipe_ctx || !pipe_ctx->stream_res.tg || pipe_ctx->stream != stream)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type =
+ enable ? DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE : DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+
+ cntl->data.otg_inst = pipe_ctx->stream_res.tg->inst;
+ cntl->data.line_time_in_ns = 1u + (uint32_t)(div64_u64(stream->timing.h_total * 1000000ull,
+ stream->timing.pix_clk_100hz / 10));
+
+ cntl->data.v_total_max = stream->adjust.v_total_max > stream->timing.v_total ?
+ stream->adjust.v_total_max :
+ stream->timing.v_total;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd,
+ enable ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ struct dmub_rb_cmd_cursor_offload_stream_cntl *cntl;
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ if (!pipe || !pipe->stream || !pipe->stream_res.tg)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cntl = &cmd.cursor_offload_stream_ctnl;
+ cntl->header.type = DMUB_CMD__CURSOR_OFFLOAD;
+ cntl->header.sub_type = DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM;
+ cntl->header.payload_bytes = sizeof(cntl->data);
+ cntl->data.otg_inst = pipe->stream_res.tg->inst;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
{
struct dc_context *dc_ctx;
@@ -1993,6 +2084,9 @@ bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
bool result;
+ if (!dc_dmub_srv->dmub->feature_caps.lsdma_support_in_dmu)
+ return false;
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
@@ -2010,11 +2104,12 @@ bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv)
return result;
}
-bool dmub_lsdma_send_linear_copy_packet(
+bool dmub_lsdma_send_linear_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
uint64_t dst_addr,
- uint32_t count)
+ uint32_t count
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2042,9 +2137,54 @@ bool dmub_lsdma_send_linear_copy_packet(
return result;
}
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+)
+{
+ struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+ union dmub_rb_cmd cmd;
+ enum dm_dmub_wait_type wait_type;
+ struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data;
+ bool result;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.cmd_common.header.type = DMUB_CMD__LSDMA;
+ cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY;
+ wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+
+ lsdma_data->u.linear_sub_window_copy_data.tmz = copy_data.tmz;
+ lsdma_data->u.linear_sub_window_copy_data.element_size = copy_data.element_size;
+ lsdma_data->u.linear_sub_window_copy_data.src_lo = copy_data.src_lo;
+ lsdma_data->u.linear_sub_window_copy_data.src_hi = copy_data.src_hi;
+ lsdma_data->u.linear_sub_window_copy_data.src_x = copy_data.src_x;
+ lsdma_data->u.linear_sub_window_copy_data.src_y = copy_data.src_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_pitch = copy_data.src_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.src_slice_pitch = copy_data.src_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_lo = copy_data.dst_lo;
+ lsdma_data->u.linear_sub_window_copy_data.dst_hi = copy_data.dst_hi;
+ lsdma_data->u.linear_sub_window_copy_data.dst_x = copy_data.dst_x;
+ lsdma_data->u.linear_sub_window_copy_data.dst_y = copy_data.dst_y;
+ lsdma_data->u.linear_sub_window_copy_data.dst_pitch = copy_data.dst_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.dst_slice_pitch = copy_data.dst_slice_pitch;
+ lsdma_data->u.linear_sub_window_copy_data.rect_x = copy_data.rect_x;
+ lsdma_data->u.linear_sub_window_copy_data.rect_y = copy_data.rect_y;
+ lsdma_data->u.linear_sub_window_copy_data.src_cache_policy = copy_data.src_cache_policy;
+ lsdma_data->u.linear_sub_window_copy_data.dst_cache_policy = copy_data.dst_cache_policy;
+
+ result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type);
+
+ if (!result)
+ DC_ERROR("LSDMA Linear Sub Window Copy failed in DMUB");
+
+ return result;
+}
+
bool dmub_lsdma_send_tiled_to_tiled_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
- struct lsdma_send_tiled_to_tiled_copy_command_params params)
+ struct lsdma_send_tiled_to_tiled_copy_command_params params
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2066,8 +2206,8 @@ bool dmub_lsdma_send_tiled_to_tiled_copy_command(
lsdma_data->u.tiled_copy_data.src_y = params.src_y;
lsdma_data->u.tiled_copy_data.dst_x = params.dst_x;
lsdma_data->u.tiled_copy_data.dst_y = params.dst_y;
- lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1
- lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1
+ lsdma_data->u.tiled_copy_data.src_width = params.src_width;
+ lsdma_data->u.tiled_copy_data.dst_width = params.dst_width;
lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode;
lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode;
lsdma_data->u.tiled_copy_data.src_element_size = params.element_size;
@@ -2078,8 +2218,8 @@ bool dmub_lsdma_send_tiled_to_tiled_copy_command(
lsdma_data->u.tiled_copy_data.tmz = params.tmz;
lsdma_data->u.tiled_copy_data.read_compress = params.read_compress;
lsdma_data->u.tiled_copy_data.write_compress = params.write_compress;
- lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1
- lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1
+ lsdma_data->u.tiled_copy_data.src_height = params.src_height;
+ lsdma_data->u.tiled_copy_data.dst_height = params.dst_height;
lsdma_data->u.tiled_copy_data.data_format = params.data_format;
lsdma_data->u.tiled_copy_data.max_com = params.max_com;
lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom;
@@ -2097,7 +2237,8 @@ bool dmub_lsdma_send_pio_copy_command(
uint64_t src_addr,
uint64_t dst_addr,
uint32_t byte_count,
- uint32_t overlap_disable)
+ uint32_t overlap_disable
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2130,7 +2271,8 @@ bool dmub_lsdma_send_pio_constfill_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t dst_addr,
uint32_t byte_count,
- uint32_t data)
+ uint32_t data
+)
{
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
union dmub_rb_cmd cmd;
@@ -2183,6 +2325,11 @@ bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uin
return result;
}
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc)
+{
+ return dc->ctx->dmub_srv && dc->ctx->dmub_srv->cursor_offload_enabled;
+}
+
void dc_dmub_srv_release_hw(const struct dc *dc)
{
struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv;
@@ -2200,3 +2347,24 @@ void dc_dmub_srv_release_hw(const struct dc *dc)
dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
+
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv)
+{
+ struct dmub_srv *dmub;
+
+ if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+ return;
+
+ dmub = dc_dmub_srv->dmub;
+
+ if (dmub_srv_get_preos_info(dmub)) {
+ DC_LOG_DEBUG("%s: PreOS DMCUB Info", __func__);
+ DC_LOG_DEBUG("fw_version : 0x%08x", dmub->preos_info.fw_version);
+ DC_LOG_DEBUG("boot_options : 0x%08x", dmub->preos_info.boot_options);
+ DC_LOG_DEBUG("boot_status : 0x%08x", dmub->preos_info.boot_status);
+ DC_LOG_DEBUG("trace_buffer_phy_addr : 0x%016llx", dmub->preos_info.trace_buffer_phy_addr);
+ DC_LOG_DEBUG("trace_buffer_size_bytes : 0x%08x", dmub->preos_info.trace_buffer_size);
+ DC_LOG_DEBUG("fb_base : 0x%016llx", dmub->preos_info.fb_base);
+ DC_LOG_DEBUG("fb_offset : 0x%016llx", dmub->preos_info.fb_offset);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 8ea320f21269..72e0a41f39f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -56,6 +56,7 @@ struct dc_dmub_srv {
union dmub_shared_state_ips_driver_signals driver_signals;
bool idle_allowed;
bool needs_idle_wake;
+ bool cursor_offload_enabled;
};
bool dc_dmub_srv_wait_for_pending(struct dc_dmub_srv *dc_dmub_srv);
@@ -211,11 +212,45 @@ void dc_dmub_srv_fams2_passthrough_flip(
int surface_count);
bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv);
-bool dmub_lsdma_send_linear_copy_packet(
+bool dmub_lsdma_send_linear_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
uint64_t dst_addr,
uint32_t count);
+
+struct lsdma_linear_sub_window_copy_params {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t padding : 22;
+};
+
+bool dmub_lsdma_send_linear_sub_window_copy_command(
+ struct dc_dmub_srv *dc_dmub_srv,
+ struct lsdma_linear_sub_window_copy_params copy_data
+);
bool dmub_lsdma_send_pio_copy_command(
struct dc_dmub_srv *dc_dmub_srv,
uint64_t src_addr,
@@ -292,9 +327,51 @@ bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t
enum ips_residency_mode ips_mode);
/**
+ * dc_dmub_srv_cursor_offload_init() - Enables or disables cursor offloading for a stream.
+ *
+ * @dc: pointer to DC object
+ */
+void dc_dmub_srv_cursor_offload_init(struct dc *dc);
+
+/**
+ * dc_dmub_srv_control_cursor_offload() - Enables or disables cursor offloading for a stream.
+ *
+ * @dc: pointer to DC object
+ * @context: the DC context to reference for pipe allocations
+ * @stream: the stream to control
+ * @enable: true to enable cursor offload, false to disable
+ */
+void dc_dmub_srv_control_cursor_offload(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream, bool enable);
+
+/**
+ * dc_dmub_srv_program_cursor_now() - Requests immediate cursor programming for a given pipe.
+ *
+ * @dc: pointer to DC object
+ * @pipe: top-most pipe for a stream.
+ */
+void dc_dmub_srv_program_cursor_now(struct dc *dc, const struct pipe_ctx *pipe);
+
+/**
+ * dc_dmub_srv_is_cursor_offload_enabled() - Checks if cursor offload is supported.
+ *
+ * @dc: pointer to DC object
+ *
+ * Return: true if cursor offload is supported, false otherwise
+ */
+bool dc_dmub_srv_is_cursor_offload_enabled(const struct dc *dc);
+
+/**
* dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required.
*
* @dc - pointer to DC object
*/
void dc_dmub_srv_release_hw(const struct dc *dc);
+
+/**
+ * dc_dmub_srv_log_preos_dmcub_info() - Logs preos dmcub fw info.
+ *
+ * @dc - pointer to DC object
+ */
+void dc_dmub_srv_log_preos_dmcub_info(struct dc_dmub_srv *dc_dmub_srv);
#endif /* _DMUB_DC_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 5ce1be362534..79e1696def63 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1021,7 +1021,8 @@ union dp_128b_132b_supported_lttpr_link_rates {
union dp_alpm_lttpr_cap {
struct {
uint8_t AUX_LESS_ALPM_SUPPORTED :1;
- uint8_t RESERVED :7;
+ uint8_t ASSR_SUPPORTED :1;
+ uint8_t RESERVED :6;
} bits;
uint8_t raw;
};
@@ -1119,10 +1120,11 @@ union dp_128b_132b_training_aux_rd_interval {
union edp_alpm_caps {
struct {
- uint8_t AUX_WAKE_ALPM_CAP :1;
- uint8_t PM_STATE_2A_SUPPORT :1;
- uint8_t AUX_LESS_ALPM_CAP :1;
- uint8_t RESERVED :5;
+ uint8_t AUX_WAKE_ALPM_CAP :1;
+ uint8_t PM_STATE_2A_SUPPORT :1;
+ uint8_t AUX_LESS_ALPM_CAP :1;
+ uint8_t AUX_LESS_ALPM_ML_PHY_SLEEP_STATUS_SUPPORTED :1;
+ uint8_t RESERVED :4;
} bits;
uint8_t raw;
};
@@ -1155,6 +1157,16 @@ struct dprx_states {
bool cable_id_written;
};
+union dpcd_panel_replay_capability_supported {
+ struct {
+ unsigned char PANEL_REPLAY_SUPPORT :1;
+ unsigned char SELECTIVE_UPDATE_SUPPORT :1;
+ unsigned char EARLY_TRANSPORT_SUPPORT :1;
+ unsigned char RESERVED :5;
+ } bits;
+ unsigned char raw;
+};
+
enum dpcd_downstream_port_max_bpc {
DOWN_STREAM_MAX_8BPC = 0,
DOWN_STREAM_MAX_10BPC,
@@ -1278,10 +1290,12 @@ struct dpcd_caps {
struct edp_psr_info psr_info;
struct replay_info pr_info;
+ union dpcd_panel_replay_capability_supported pr_caps_supported;
uint16_t edp_oled_emission_rate;
union dp_receive_port0_cap receive_port0_cap;
/* Indicates the number of SST links supported by MSO (Multi-Stream Output) */
uint8_t mso_cap_sst_links_supported;
+ uint8_t dp_edp_general_cap_2;
};
union dpcd_sink_ext_caps {
@@ -1343,11 +1357,38 @@ union dpcd_replay_configuration {
unsigned char raw;
};
+union panel_replay_enable_and_configuration_1 {
+ struct {
+ unsigned char PANEL_REPLAY_ENABLE :1;
+ unsigned char PANEL_REPLAY_CRC_ENABLE :1;
+ unsigned char IRQ_HPD_ASSDP_MISSING :1;
+ unsigned char IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR :1;
+ unsigned char IRQ_HPD_RFB_ERROR :1;
+ unsigned char IRQ_HPD_ACTIVE_FRAME_CRC_ERROR :1;
+ unsigned char PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE :1;
+ unsigned char PANEL_REPLAY_EARLY_TRANSPORT_ENABLE :1;
+ } bits;
+ unsigned char raw;
+};
+
+union panel_replay_enable_and_configuration_2 {
+ struct {
+ unsigned char SINK_REFRESH_RATE_UNLOCK_GRANTED :1;
+ unsigned char RESERVED :1;
+ unsigned char SU_Y_GRANULARITY_EXT_VALUE_ENABLED :1;
+ unsigned char SU_Y_GRANULARITY_EXT_VALUE :4;
+ unsigned char SU_REGION_SCAN_LINE_CAPTURE_INDICATION :1;
+ } bits;
+ unsigned char raw;
+};
+
union dpcd_alpm_configuration {
struct {
unsigned char ENABLE : 1;
unsigned char IRQ_HPD_ENABLE : 1;
- unsigned char RESERVED : 6;
+ unsigned char ALPM_MODE_SEL : 1;
+ unsigned char ACDS_PERIOD_DURATION : 1;
+ unsigned char RESERVED : 4;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 7217de258851..5a365bd19933 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -732,7 +732,7 @@ char *dce_version_to_string(const int version)
case DCN_VERSION_3_03:
return "DCN 3.0.3";
case DCN_VERSION_3_1:
- return "DCN 3.1";
+ return "DCN 3.1.2";
case DCN_VERSION_3_14:
return "DCN 3.1.4";
case DCN_VERSION_3_15:
@@ -755,3 +755,8 @@ char *dce_version_to_string(const int version)
return "Unknown";
}
}
+
+bool dc_supports_vrr(const enum dce_version v)
+{
+ return v >= DCE_VERSION_8_0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 7f57661433eb..37d1a79e8241 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -128,7 +128,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx);
// Make spl input basic out info output_size width point to stream h active
spl_in->basic_out.output_size.width =
- stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow;
+ stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
// Make spl input basic out info output_size height point to v active
spl_in->basic_out.output_size.height =
stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
@@ -147,6 +147,8 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->prefer_easf = false;
else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2)
spl_in->disable_easf = true;
+ else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 3)
+ spl_in->override_easf = true;
/* Translate adaptive sharpening preference */
unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness;
unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 5fc6fea211de..321cfe92d799 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -203,6 +203,7 @@ struct dc_stream_state {
struct dc_info_packet hfvsif_infopacket;
struct dc_info_packet vtem_infopacket;
struct dc_info_packet adaptive_sync_infopacket;
+ struct dc_info_packet avi_infopacket;
uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -335,6 +336,8 @@ struct dc_stream_update {
struct dc_info_packet *hfvsif_infopacket;
struct dc_info_packet *vtem_infopacket;
struct dc_info_packet *adaptive_sync_infopacket;
+ struct dc_info_packet *avi_infopacket;
+
bool *dpms_off;
bool integer_scaling_update;
bool *allow_freesync;
@@ -470,12 +473,11 @@ void dc_enable_stereo(
/* Triggers multi-stream synchronization. */
void dc_trigger_sync(struct dc *dc, struct dc_state *context);
-enum surface_update_type dc_check_update_surfaces_for_stream(
- struct dc *dc,
+struct surface_update_descriptor dc_check_update_surfaces_for_stream(
+ const struct dc_check_config *check_config,
struct dc_surface_update *updates,
int surface_count,
- struct dc_stream_update *stream_update,
- const struct dc_stream_status *stream_status);
+ struct dc_stream_update *stream_update);
/**
* Create a new default stream for the requested sink
@@ -489,8 +491,8 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
-struct dc_stream_status *dc_stream_get_status(
- struct dc_stream_state *dc_stream);
+struct dc_stream_status *dc_stream_get_status(struct dc_stream_state *dc_stream);
+const struct dc_stream_status *dc_stream_get_status_const(const struct dc_stream_state *dc_stream);
/*******************************************************************************
* Cursor interfaces - To manages the cursor within a stream
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 375ca2f13b7a..f46039f64203 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -185,6 +185,10 @@ struct dc_panel_patch {
unsigned int wait_after_dpcd_poweroff_ms;
};
+/**
+ * struct dc_edid_caps - Capabilities read from EDID.
+ * @analog: Whether the monitor is analog. Used by DVI-I handling.
+ */
struct dc_edid_caps {
/* sink identification */
uint16_t manufacturer_id;
@@ -212,6 +216,8 @@ struct dc_edid_caps {
bool edid_hdmi;
bool hdr_supported;
bool rr_capable;
+ bool scdc_present;
+ bool analog;
struct dc_panel_patch panel_patch;
};
@@ -347,7 +353,8 @@ enum dc_connection_type {
dc_connection_none,
dc_connection_single,
dc_connection_mst_branch,
- dc_connection_sst_branch
+ dc_connection_sst_branch,
+ dc_connection_dac_load
};
struct dc_csc_adjustments {
@@ -563,6 +570,12 @@ struct dc_info_packet_128 {
uint8_t sb[128];
};
+struct dc_edid_read_policy {
+ uint32_t max_retry_count;
+ uint32_t delay_time_ms;
+ uint32_t ignore_checksum;
+};
+
#define DC_PLANE_UPDATE_TIMES_MAX 10
struct dc_plane_flip_time {
@@ -571,6 +584,12 @@ struct dc_plane_flip_time {
unsigned int prev_update_time_in_us;
};
+enum dc_alpm_mode {
+ DC_ALPM_AUXWAKE = 0,
+ DC_ALPM_AUXLESS = 1,
+ DC_ALPM_UNSUPPORTED = 0xF,
+};
+
enum dc_psr_state {
PSR_STATE0 = 0x0,
PSR_STATE1,
@@ -616,6 +635,7 @@ struct psr_config {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
union dmcu_psr_level {
@@ -728,6 +748,7 @@ struct psr_context {
unsigned int line_time_in_us;
uint8_t rate_control_caps;
uint16_t dsc_slice_height;
+ bool os_request_force_ffu;
};
struct colorspace_transform {
@@ -920,6 +941,12 @@ enum dc_psr_version {
DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
+enum dc_replay_version {
+ DC_FREESYNC_REPLAY = 0,
+ DC_VESA_PANEL_REPLAY = 1,
+ DC_REPLAY_VERSION_UNSUPPORTED = 0XFF,
+};
+
/* Possible values of display_endpoint_id.endpoint */
enum display_endpoint_type {
DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
@@ -1072,6 +1099,7 @@ enum replay_FW_Message_type {
Replay_Set_Residency_Frameupdate_Timer,
Replay_Set_Pseudo_VTotal,
Replay_Disabled_Adaptive_Sync_SDP,
+ Replay_Set_Version,
Replay_Set_General_Cmd,
};
@@ -1107,6 +1135,8 @@ union replay_low_refresh_rate_enable_options {
};
struct replay_config {
+ /* Replay version */
+ enum dc_replay_version replay_version;
/* Replay feature is supported */
bool replay_supported;
/* Replay caps support DPCD & EDID caps*/
@@ -1137,6 +1167,10 @@ struct replay_config {
bool low_rr_supported;
/* Replay Video Conferencing Optimization Enabled */
bool replay_video_conferencing_optimization_enabled;
+ /* Replay alpm mode */
+ enum dc_alpm_mode alpm_mode;
+ /* Replay full screen only */
+ bool os_request_force_ffu;
};
/* Replay feature flags*/
@@ -1159,6 +1193,10 @@ struct replay_settings {
uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Defer Update Coasting vtotal table */
uint32_t defer_update_coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ /* Skip frame number table */
+ uint32_t frame_skip_number_table[PR_COASTING_TYPE_NUM];
+ /* Defer skip frame number table */
+ uint32_t defer_frame_skip_number_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
uint32_t link_off_frame_count;
/* Replay pseudo vtotal for low refresh rate*/
@@ -1167,6 +1205,8 @@ struct replay_settings {
uint16_t last_pseudo_vtotal;
/* Replay desync error */
uint32_t replay_desync_error_fail_count;
+ /* The frame skip number dal send to DMUB */
+ uint16_t frame_skip_number;
};
/* To split out "global" and "per-panel" config settings.
@@ -1199,6 +1239,7 @@ struct dc_panel_config {
bool rc_disable;
bool rc_allow_static_screen;
bool rc_allow_fullscreen_VPB;
+ bool read_psrcap_again;
unsigned int replay_enable_option;
} psr;
/* ABM */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
index 5999b2da3a01..33d8bd91cb01 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c
@@ -148,7 +148,7 @@ struct dccg *dccg2_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask)
{
- struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
struct dccg *base;
if (dccg_dcn == NULL) {
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index a9b88f5e0c04..8bdffd9ff31b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -425,7 +425,69 @@ struct dccg_mask {
uint32_t SYMCLKD_CLOCK_ENABLE; \
uint32_t SYMCLKE_CLOCK_ENABLE; \
uint32_t DP_DTO_MODULO[MAX_PIPES]; \
- uint32_t DP_DTO_PHASE[MAX_PIPES]
+ uint32_t DP_DTO_PHASE[MAX_PIPES]; \
+ uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL; \
+ uint32_t DCCG_AUDIO_DTO0_MODULE; \
+ uint32_t DCCG_AUDIO_DTO0_PHASE; \
+ uint32_t DCCG_AUDIO_DTO1_MODULE; \
+ uint32_t DCCG_AUDIO_DTO1_PHASE; \
+ uint32_t DCCG_CAC_STATUS; \
+ uint32_t DCCG_CAC_STATUS2; \
+ uint32_t DCCG_DISP_CNTL_REG; \
+ uint32_t DCCG_DS_CNTL; \
+ uint32_t DCCG_DS_DTO_INCR; \
+ uint32_t DCCG_DS_DTO_MODULO; \
+ uint32_t DCCG_DS_HW_CAL_INTERVAL; \
+ uint32_t DCCG_GTC_CNTL; \
+ uint32_t DCCG_GTC_CURRENT; \
+ uint32_t DCCG_GTC_DTO_INCR; \
+ uint32_t DCCG_GTC_DTO_MODULO; \
+ uint32_t DCCG_PERFMON_CNTL; \
+ uint32_t DCCG_PERFMON_CNTL2; \
+ uint32_t DCCG_SOFT_RESET; \
+ uint32_t DCCG_TEST_CLK_SEL; \
+ uint32_t DCCG_VSYNC_CNT_CTRL; \
+ uint32_t DCCG_VSYNC_CNT_INT_CTRL; \
+ uint32_t DCCG_VSYNC_OTG0_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG1_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG2_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG3_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG4_LATCH_VALUE; \
+ uint32_t DCCG_VSYNC_OTG5_LATCH_VALUE; \
+ uint32_t DISPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DP_DTO_DBUF_EN; \
+ uint32_t DPIACLK_540M_DTO_MODULO; \
+ uint32_t DPIACLK_540M_DTO_PHASE; \
+ uint32_t DPIACLK_810M_DTO_MODULO; \
+ uint32_t DPIACLK_810M_DTO_PHASE; \
+ uint32_t DPIACLK_DTO_CNTL; \
+ uint32_t DPIASYMCLK_CNTL; \
+ uint32_t DPPCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t DPREFCLK_CNTL; \
+ uint32_t DTBCLK_DTO_DBUF_EN; \
+ uint32_t FORCE_SYMCLK_DISABLE; \
+ uint32_t HDMICHARCLK0_CLOCK_CNTL; \
+ uint32_t MICROSECOND_TIME_BASE_DIV; \
+ uint32_t MILLISECOND_TIME_BASE_DIV; \
+ uint32_t OTG0_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG0_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG1_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG2_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PHYPLL_PIXEL_RATE_CNTL; \
+ uint32_t OTG3_PIXEL_RATE_CNTL; \
+ uint32_t PHYPLLA_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLB_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLC_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLD_PIXCLK_RESYNC_CNTL; \
+ uint32_t PHYPLLE_PIXCLK_RESYNC_CNTL; \
+ uint32_t REFCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SOCCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_CGTT_BLK_CTRL_REG; \
+ uint32_t SYMCLK_PSP_CNTL
+
struct dccg_registers {
DCCG_REG_VARIABLE_LIST;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
index 8664f0c4c9b7..97df04b7e39d 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c
@@ -709,6 +709,128 @@ void dccg31_otg_drop_pixel(struct dccg *dccg,
OTG_DROP_PIXEL[otg_inst], 1);
}
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ dccg_reg_state->dc_mem_global_pwr_req_cntl = REG_READ(DC_MEM_GLOBAL_PWR_REQ_CNTL);
+ dccg_reg_state->dccg_audio_dtbclk_dto_modulo = REG_READ(DCCG_AUDIO_DTBCLK_DTO_MODULO);
+ dccg_reg_state->dccg_audio_dtbclk_dto_phase = REG_READ(DCCG_AUDIO_DTBCLK_DTO_PHASE);
+ dccg_reg_state->dccg_audio_dto_source = REG_READ(DCCG_AUDIO_DTO_SOURCE);
+ dccg_reg_state->dccg_audio_dto0_module = REG_READ(DCCG_AUDIO_DTO0_MODULE);
+ dccg_reg_state->dccg_audio_dto0_phase = REG_READ(DCCG_AUDIO_DTO0_PHASE);
+ dccg_reg_state->dccg_audio_dto1_module = REG_READ(DCCG_AUDIO_DTO1_MODULE);
+ dccg_reg_state->dccg_audio_dto1_phase = REG_READ(DCCG_AUDIO_DTO1_PHASE);
+ dccg_reg_state->dccg_cac_status = REG_READ(DCCG_CAC_STATUS);
+ dccg_reg_state->dccg_cac_status2 = REG_READ(DCCG_CAC_STATUS2);
+ dccg_reg_state->dccg_disp_cntl_reg = REG_READ(DCCG_DISP_CNTL_REG);
+ dccg_reg_state->dccg_ds_cntl = REG_READ(DCCG_DS_CNTL);
+ dccg_reg_state->dccg_ds_dto_incr = REG_READ(DCCG_DS_DTO_INCR);
+ dccg_reg_state->dccg_ds_dto_modulo = REG_READ(DCCG_DS_DTO_MODULO);
+ dccg_reg_state->dccg_ds_hw_cal_interval = REG_READ(DCCG_DS_HW_CAL_INTERVAL);
+ dccg_reg_state->dccg_gate_disable_cntl = REG_READ(DCCG_GATE_DISABLE_CNTL);
+ dccg_reg_state->dccg_gate_disable_cntl2 = REG_READ(DCCG_GATE_DISABLE_CNTL2);
+ dccg_reg_state->dccg_gate_disable_cntl3 = REG_READ(DCCG_GATE_DISABLE_CNTL3);
+ dccg_reg_state->dccg_gate_disable_cntl4 = REG_READ(DCCG_GATE_DISABLE_CNTL4);
+ dccg_reg_state->dccg_gate_disable_cntl5 = REG_READ(DCCG_GATE_DISABLE_CNTL5);
+ dccg_reg_state->dccg_gate_disable_cntl6 = REG_READ(DCCG_GATE_DISABLE_CNTL6);
+ dccg_reg_state->dccg_global_fgcg_rep_cntl = REG_READ(DCCG_GLOBAL_FGCG_REP_CNTL);
+ dccg_reg_state->dccg_gtc_cntl = REG_READ(DCCG_GTC_CNTL);
+ dccg_reg_state->dccg_gtc_current = REG_READ(DCCG_GTC_CURRENT);
+ dccg_reg_state->dccg_gtc_dto_incr = REG_READ(DCCG_GTC_DTO_INCR);
+ dccg_reg_state->dccg_gtc_dto_modulo = REG_READ(DCCG_GTC_DTO_MODULO);
+ dccg_reg_state->dccg_perfmon_cntl = REG_READ(DCCG_PERFMON_CNTL);
+ dccg_reg_state->dccg_perfmon_cntl2 = REG_READ(DCCG_PERFMON_CNTL2);
+ dccg_reg_state->dccg_soft_reset = REG_READ(DCCG_SOFT_RESET);
+ dccg_reg_state->dccg_test_clk_sel = REG_READ(DCCG_TEST_CLK_SEL);
+ dccg_reg_state->dccg_vsync_cnt_ctrl = REG_READ(DCCG_VSYNC_CNT_CTRL);
+ dccg_reg_state->dccg_vsync_cnt_int_ctrl = REG_READ(DCCG_VSYNC_CNT_INT_CTRL);
+ dccg_reg_state->dccg_vsync_otg0_latch_value = REG_READ(DCCG_VSYNC_OTG0_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg1_latch_value = REG_READ(DCCG_VSYNC_OTG1_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg2_latch_value = REG_READ(DCCG_VSYNC_OTG2_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg3_latch_value = REG_READ(DCCG_VSYNC_OTG3_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg4_latch_value = REG_READ(DCCG_VSYNC_OTG4_LATCH_VALUE);
+ dccg_reg_state->dccg_vsync_otg5_latch_value = REG_READ(DCCG_VSYNC_OTG5_LATCH_VALUE);
+ dccg_reg_state->dispclk_cgtt_blk_ctrl_reg = REG_READ(DISPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dispclk_freq_change_cntl = REG_READ(DISPCLK_FREQ_CHANGE_CNTL);
+ dccg_reg_state->dp_dto_dbuf_en = REG_READ(DP_DTO_DBUF_EN);
+ dccg_reg_state->dp_dto0_modulo = REG_READ(DP_DTO_MODULO[0]);
+ dccg_reg_state->dp_dto0_phase = REG_READ(DP_DTO_PHASE[0]);
+ dccg_reg_state->dp_dto1_modulo = REG_READ(DP_DTO_MODULO[1]);
+ dccg_reg_state->dp_dto1_phase = REG_READ(DP_DTO_PHASE[1]);
+ dccg_reg_state->dp_dto2_modulo = REG_READ(DP_DTO_MODULO[2]);
+ dccg_reg_state->dp_dto2_phase = REG_READ(DP_DTO_PHASE[2]);
+ dccg_reg_state->dp_dto3_modulo = REG_READ(DP_DTO_MODULO[3]);
+ dccg_reg_state->dp_dto3_phase = REG_READ(DP_DTO_PHASE[3]);
+ dccg_reg_state->dpiaclk_540m_dto_modulo = REG_READ(DPIACLK_540M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_540m_dto_phase = REG_READ(DPIACLK_540M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_810m_dto_modulo = REG_READ(DPIACLK_810M_DTO_MODULO);
+ dccg_reg_state->dpiaclk_810m_dto_phase = REG_READ(DPIACLK_810M_DTO_PHASE);
+ dccg_reg_state->dpiaclk_dto_cntl = REG_READ(DPIACLK_DTO_CNTL);
+ dccg_reg_state->dpiasymclk_cntl = REG_READ(DPIASYMCLK_CNTL);
+ dccg_reg_state->dppclk_cgtt_blk_ctrl_reg = REG_READ(DPPCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dppclk_ctrl = REG_READ(DPPCLK_CTRL);
+ dccg_reg_state->dppclk_dto_ctrl = REG_READ(DPPCLK_DTO_CTRL);
+ dccg_reg_state->dppclk0_dto_param = REG_READ(DPPCLK_DTO_PARAM[0]);
+ dccg_reg_state->dppclk1_dto_param = REG_READ(DPPCLK_DTO_PARAM[1]);
+ dccg_reg_state->dppclk2_dto_param = REG_READ(DPPCLK_DTO_PARAM[2]);
+ dccg_reg_state->dppclk3_dto_param = REG_READ(DPPCLK_DTO_PARAM[3]);
+ dccg_reg_state->dprefclk_cgtt_blk_ctrl_reg = REG_READ(DPREFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->dprefclk_cntl = REG_READ(DPREFCLK_CNTL);
+ dccg_reg_state->dpstreamclk_cntl = REG_READ(DPSTREAMCLK_CNTL);
+ dccg_reg_state->dscclk_dto_ctrl = REG_READ(DSCCLK_DTO_CTRL);
+ dccg_reg_state->dscclk0_dto_param = REG_READ(DSCCLK0_DTO_PARAM);
+ dccg_reg_state->dscclk1_dto_param = REG_READ(DSCCLK1_DTO_PARAM);
+ dccg_reg_state->dscclk2_dto_param = REG_READ(DSCCLK2_DTO_PARAM);
+ dccg_reg_state->dscclk3_dto_param = REG_READ(DSCCLK3_DTO_PARAM);
+ dccg_reg_state->dtbclk_dto_dbuf_en = REG_READ(DTBCLK_DTO_DBUF_EN);
+ dccg_reg_state->dtbclk_dto0_modulo = REG_READ(DTBCLK_DTO_MODULO[0]);
+ dccg_reg_state->dtbclk_dto0_phase = REG_READ(DTBCLK_DTO_PHASE[0]);
+ dccg_reg_state->dtbclk_dto1_modulo = REG_READ(DTBCLK_DTO_MODULO[1]);
+ dccg_reg_state->dtbclk_dto1_phase = REG_READ(DTBCLK_DTO_PHASE[1]);
+ dccg_reg_state->dtbclk_dto2_modulo = REG_READ(DTBCLK_DTO_MODULO[2]);
+ dccg_reg_state->dtbclk_dto2_phase = REG_READ(DTBCLK_DTO_PHASE[2]);
+ dccg_reg_state->dtbclk_dto3_modulo = REG_READ(DTBCLK_DTO_MODULO[3]);
+ dccg_reg_state->dtbclk_dto3_phase = REG_READ(DTBCLK_DTO_PHASE[3]);
+ dccg_reg_state->dtbclk_p_cntl = REG_READ(DTBCLK_P_CNTL);
+ dccg_reg_state->force_symclk_disable = REG_READ(FORCE_SYMCLK_DISABLE);
+ dccg_reg_state->hdmicharclk0_clock_cntl = REG_READ(HDMICHARCLK0_CLOCK_CNTL);
+ dccg_reg_state->hdmistreamclk_cntl = REG_READ(HDMISTREAMCLK_CNTL);
+ dccg_reg_state->hdmistreamclk0_dto_param = REG_READ(HDMISTREAMCLK0_DTO_PARAM);
+ dccg_reg_state->microsecond_time_base_div = REG_READ(MICROSECOND_TIME_BASE_DIV);
+ dccg_reg_state->millisecond_time_base_div = REG_READ(MILLISECOND_TIME_BASE_DIV);
+ dccg_reg_state->otg_pixel_rate_div = REG_READ(OTG_PIXEL_RATE_DIV);
+ dccg_reg_state->otg0_phypll_pixel_rate_cntl = REG_READ(OTG0_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg0_pixel_rate_cntl = REG_READ(OTG0_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_phypll_pixel_rate_cntl = REG_READ(OTG1_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg1_pixel_rate_cntl = REG_READ(OTG1_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_phypll_pixel_rate_cntl = REG_READ(OTG2_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg2_pixel_rate_cntl = REG_READ(OTG2_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_phypll_pixel_rate_cntl = REG_READ(OTG3_PHYPLL_PIXEL_RATE_CNTL);
+ dccg_reg_state->otg3_pixel_rate_cntl = REG_READ(OTG3_PIXEL_RATE_CNTL);
+ dccg_reg_state->phyasymclk_clock_cntl = REG_READ(PHYASYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phybsymclk_clock_cntl = REG_READ(PHYBSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phycsymclk_clock_cntl = REG_READ(PHYCSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phydsymclk_clock_cntl = REG_READ(PHYDSYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyesymclk_clock_cntl = REG_READ(PHYESYMCLK_CLOCK_CNTL);
+ dccg_reg_state->phyplla_pixclk_resync_cntl = REG_READ(PHYPLLA_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllb_pixclk_resync_cntl = REG_READ(PHYPLLB_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phypllc_pixclk_resync_cntl = REG_READ(PHYPLLC_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplld_pixclk_resync_cntl = REG_READ(PHYPLLD_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->phyplle_pixclk_resync_cntl = REG_READ(PHYPLLE_PIXCLK_RESYNC_CNTL);
+ dccg_reg_state->refclk_cgtt_blk_ctrl_reg = REG_READ(REFCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->socclk_cgtt_blk_ctrl_reg = REG_READ(SOCCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_cgtt_blk_ctrl_reg = REG_READ(SYMCLK_CGTT_BLK_CTRL_REG);
+ dccg_reg_state->symclk_psp_cntl = REG_READ(SYMCLK_PSP_CNTL);
+ dccg_reg_state->symclk32_le_cntl = REG_READ(SYMCLK32_LE_CNTL);
+ dccg_reg_state->symclk32_se_cntl = REG_READ(SYMCLK32_SE_CNTL);
+ dccg_reg_state->symclka_clock_enable = REG_READ(SYMCLKA_CLOCK_ENABLE);
+ dccg_reg_state->symclkb_clock_enable = REG_READ(SYMCLKB_CLOCK_ENABLE);
+ dccg_reg_state->symclkc_clock_enable = REG_READ(SYMCLKC_CLOCK_ENABLE);
+ dccg_reg_state->symclkd_clock_enable = REG_READ(SYMCLKD_CLOCK_ENABLE);
+ dccg_reg_state->symclke_clock_enable = REG_READ(SYMCLKE_CLOCK_ENABLE);
+}
+
static const struct dccg_funcs dccg31_funcs = {
.update_dpp_dto = dccg31_update_dpp_dto,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
@@ -727,6 +849,7 @@ static const struct dccg_funcs dccg31_funcs = {
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
.disable_dsc = dccg31_disable_dscclk,
.enable_dsc = dccg31_enable_dscclk,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg31_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
index cd261051dc2c..bf659920d4cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
@@ -236,4 +236,6 @@ void dccg31_disable_dscclk(struct dccg *dccg, int inst);
void dccg31_enable_dscclk(struct dccg *dccg, int inst);
+void dccg31_read_reg_state(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
+
#endif //__DCN31_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
index 8f6edd8e9beb..ef3db6beba25 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c
@@ -377,7 +377,8 @@ static const struct dccg_funcs dccg314_funcs = {
.get_pixel_rate_div = dccg314_get_pixel_rate_div,
.trigger_dio_fifo_resync = dccg314_trigger_dio_fifo_resync,
.set_valid_pixel_rate = dccg314_set_valid_pixel_rate,
- .set_dtbclk_p_src = dccg314_set_dtbclk_p_src
+ .set_dtbclk_p_src = dccg314_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg314_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
index 60ea1d248deb..a609635f35db 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
@@ -74,8 +74,7 @@
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL),\
- SR(DCCG_AUDIO_DTO_SOURCE)
+ SR(DTBCLK_P_CNTL)
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 0ce9489ac6b7..bd2f528137b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -39,6 +39,7 @@
#define CTX \
dccg_dcn->base.ctx
+#include "logger_types.h"
#define DC_LOGGER \
dccg->ctx->logger
@@ -1113,6 +1114,16 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg)
if (dispclk_rdivider_value != 0)
REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
+static void dccg35_wait_for_dentist_change_done(
+ struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
+
+ REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
+ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
+}
static void dcn35_set_dppclk_enable(struct dccg *dccg,
uint32_t dpp_inst, uint32_t enable)
@@ -1136,7 +1147,7 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg,
default:
break;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
+ DC_LOG_DEBUG("%s: dpp_inst(%d) DPPCLK_EN = %d\n", __func__, dpp_inst, enable);
}
@@ -1173,9 +1184,9 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst,
dcn35_set_dppclk_enable(dccg, dpp_inst, true);
} else {
dcn35_set_dppclk_enable(dccg, dpp_inst, false);
- /*we have this in hwss: disable_plane*/
- //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ udelay(10);
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
@@ -1299,6 +1310,8 @@ static void dccg35_set_pixel_rate_div(
BREAK_TO_DEBUGGER();
return;
}
+ if (otg_inst < 4)
+ dccg35_wait_for_dentist_change_done(dccg);
}
static void dccg35_set_dtbclk_p_src(
@@ -1406,7 +1419,11 @@ static void dccg35_set_dtbclk_dto(
* PIPEx_DTO_SRC_SEL should not be programmed during DTBCLK update since OTG may still be on, and the
* programming is handled in program_pix_clk() regardless, so it can be removed from here.
*/
- } else {
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO enabled: pixclk_khz=%d, ref_dtbclk_khz=%d, req_dtbclk_khz=%d, phase=%d, modulo=%d\n",
+ __func__, params->otg_inst, params->pixclk_khz,
+ params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
+
+ } else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
switch (params->otg_inst) {
case 0:
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
@@ -1431,6 +1448,8 @@ static void dccg35_set_dtbclk_dto(
REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0);
REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0);
+
+ DC_LOG_DEBUG("%s: OTG%d DTBCLK DTO disabled\n", __func__, params->otg_inst);
}
}
@@ -1475,6 +1494,8 @@ static void dccg35_set_dpstreamclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_EN = %d, DPSTREAMCLK_SRC_SEL = %d\n",
+ __func__, dp_hpo_inst, (src == REFCLK) ? 0 : 1, otg_inst);
}
@@ -1514,6 +1535,8 @@ static void dccg35_set_dpstreamclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: dp_hpo_inst(%d) DPSTREAMCLK_ROOT_GATE_DISABLE = %d\n",
+ __func__, dp_hpo_inst, enable ? 1 : 0);
}
@@ -1553,7 +1576,7 @@ static void dccg35_set_physymclk_root_clock_gating(
BREAK_TO_DEBUGGER();
return;
}
- //DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE:\n", __func__, phy_inst, enable ? 0 : 1);
+ DC_LOG_DEBUG("%s: dpp_inst(%d) PHYESYMCLK_ROOT_GATE_DISABLE: %d\n", __func__, phy_inst, enable ? 0 : 1);
}
@@ -1626,6 +1649,8 @@ static void dccg35_set_physymclk(
BREAK_TO_DEBUGGER();
return;
}
+ DC_LOG_DEBUG("%s: phy_inst(%d) PHYxSYMCLK_EN = %d, PHYxSYMCLK_SRC_SEL = %d\n",
+ __func__, phy_inst, force_enable ? 1 : 0, clk_src);
}
static void dccg35_set_valid_pixel_rate(
@@ -1651,7 +1676,7 @@ static void dccg35_dpp_root_clock_control(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (dccg->dpp_clock_gated[dpp_inst] == clock_on)
+ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
return;
if (clock_on) {
@@ -1669,10 +1694,14 @@ static void dccg35_dpp_root_clock_control(
DPPCLK0_DTO_PHASE, 0,
DPPCLK0_DTO_MODULO, 1);
/*we have this in hwss: disable_plane*/
- //dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, true);
}
+ // wait for clock to fully ramp
+ udelay(10);
+
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
+ DC_LOG_DEBUG("%s: dpp_inst(%d) clock_on = %d\n", __func__, dpp_inst, clock_on);
}
static void dccg35_disable_symclk32_se(
@@ -1731,6 +1760,7 @@ static void dccg35_disable_symclk32_se(
BREAK_TO_DEBUGGER();
return;
}
+
}
static void dccg35_init_cb(struct dccg *dccg)
@@ -1738,7 +1768,6 @@ static void dccg35_init_cb(struct dccg *dccg)
(void)dccg;
/* Any RCG should be done when driver enter low power mode*/
}
-
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -1753,6 +1782,8 @@ void dccg35_init(struct dccg *dccg)
for (otg_inst = 0; otg_inst < 2; otg_inst++) {
dccg31_disable_symclk32_le(dccg, otg_inst);
dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d SYMCLK32_LE disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
@@ -1765,6 +1796,8 @@ void dccg35_init(struct dccg *dccg)
dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst,
otg_inst);
dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false);
+ DC_LOG_DEBUG("%s: OTG%d DPSTREAMCLK disabled and root clock gating disabled\n",
+ __func__, otg_inst);
}
/*
@@ -2420,6 +2453,7 @@ static const struct dccg_funcs dccg35_funcs = {
.disable_symclk_se = dccg35_disable_symclk_se,
.set_dtbclk_p_src = dccg35_set_dtbclk_p_src,
.dccg_root_gate_disable_control = dccg35_root_gate_disable_control,
+ .dccg_read_reg_state = dccg31_read_reg_state,
};
struct dccg *dccg35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
index 51f98c5c51c4..7b9c36456cd9 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h
@@ -41,8 +41,9 @@
SR(SYMCLKA_CLOCK_ENABLE),\
SR(SYMCLKB_CLOCK_ENABLE),\
SR(SYMCLKC_CLOCK_ENABLE),\
- SR(SYMCLKD_CLOCK_ENABLE),\
- SR(SYMCLKE_CLOCK_ENABLE)
+ SR(SYMCLKD_CLOCK_ENABLE), \
+ SR(SYMCLKE_CLOCK_ENABLE),\
+ SR(SYMCLK_PSP_CNTL)
#define DCCG_MASK_SH_LIST_DCN35(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
@@ -231,6 +232,14 @@
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\
struct dccg *dccg35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index 668ee2d405fd..663a18ee5162 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -619,7 +619,7 @@ void dccg401_set_dp_dto(
dto_integer = div_u64(params->pixclk_hz, dto_modulo_hz);
dto_phase_hz = params->pixclk_hz - dto_integer * dto_modulo_hz;
- if (dto_phase_hz <= 0) {
+ if (dto_phase_hz <= 0 && dto_integer <= 0) {
/* negative pixel rate should never happen */
BREAK_TO_DEBUGGER();
return;
@@ -886,6 +886,7 @@ static const struct dccg_funcs dccg401_funcs = {
.enable_symclk_se = dccg401_enable_symclk_se,
.disable_symclk_se = dccg401_disable_symclk_se,
.set_dtbclk_p_src = dccg401_set_dtbclk_p_src,
+ .dccg_read_reg_state = dccg31_read_reg_state
};
struct dccg *dccg401_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index a6006776333d..2dcf394edf22 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
const struct dce_abm_shift *abm_shift,
const struct dce_abm_mask *abm_mask)
{
- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
if (abm_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index eeed840073fe..fcad61c618a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -1143,7 +1143,8 @@ void dce_aud_wall_dto_setup(
REG_UPDATE(DCCG_AUDIO_DTO1_PHASE,
DCCG_AUDIO_DTO1_PHASE, clock_info.audio_dto_phase);
- REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
+ if (aud->masks->DCCG_AUDIO_DTO2_USE_512FBR_DTO)
+ REG_UPDATE(DCCG_AUDIO_DTO_SOURCE,
DCCG_AUDIO_DTO2_USE_512FBR_DTO, 1);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index bb4ac5042c80..673bb87d2c17 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -725,14 +725,18 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
for (i = 0; i < AUX_MAX_RETRIES; i++) {
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: address=0x%04x length=%u write=%d mot=%d",
+ "dce_aux_transfer_with_retries: link_index=%u: START: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
payload->address,
payload->length,
(unsigned int) payload->write,
- (unsigned int) payload->mot);
+ (unsigned int) payload->mot,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (payload->write)
dce_aux_log_payload(" write", payload->data, payload->length, 16);
@@ -746,7 +750,9 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
LOG_FLAG_I2cAux_DceAux,
- "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d payload->reply=%u",
+ "dce_aux_transfer_with_retries: link_index=%u: END: retry %d of %d: "
+ "address=0x%04x length=%u write=%d mot=%d: ret=%d operation_result=%d "
+ "payload->reply=%u is_i2c=%d is_dpia=%d ddc_hw_inst=%d",
ddc && ddc->link ? ddc->link->link_index : UINT_MAX,
i + 1,
(int)AUX_MAX_RETRIES,
@@ -756,7 +762,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
(unsigned int) payload->mot,
ret,
(int)operation_result,
- (unsigned int) *payload->reply);
+ (unsigned int) *payload->reply,
+ payload->i2c_over_aux,
+ (ddc->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? true : false,
+ ddc->link->ddc_hw_inst);
if (!payload->write)
dce_aux_log_payload(" read", payload->data, ret > 0 ? ret : 0, 16);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index a8e79104b684..5f8fba45d98d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -1126,7 +1126,7 @@ struct dmcu *dcn10_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1147,7 +1147,7 @@ struct dmcu *dcn20_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
@@ -1168,7 +1168,7 @@ struct dmcu *dcn21_dmcu_create(
const struct dce_dmcu_shift *dmcu_shift,
const struct dce_dmcu_mask *dmcu_mask)
{
- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
+ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
if (dmcu_dce == NULL) {
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 0421b267a0b5..365dd2e37aea 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -591,7 +591,7 @@ static bool dce_i2c_hw_engine_submit_payload(struct dce_i2c_hw *dce_i2c_hw,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
index e188447c8156..2d73b94c515c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c
@@ -451,7 +451,7 @@ static bool dce_i2c_sw_engine_submit_payload(struct dce_i2c_sw *engine,
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE_MOT :
DCE_I2C_TRANSACTION_ACTION_I2C_WRITE;
- request.address = (uint8_t) ((payload->address << 1) | !payload->write);
+ request.address = (uint8_t) ((payload->address << 1) | (payload->write ? 0 : 1));
request.length = payload->length;
request.data = payload->data;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 0c50fe266c8a..87dbb8d7ed27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -302,6 +302,10 @@ static void setup_panel_mode(
if (ctx->dc->caps.psp_setup_panel_mode)
return;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -804,6 +808,33 @@ bool dce110_link_encoder_validate_dp_output(
return true;
}
+static bool dce110_link_encoder_validate_rgb_output(
+ const struct dce110_link_encoder *enc110,
+ const struct dc_crtc_timing *crtc_timing)
+{
+ /* When the VBIOS doesn't specify any limits, use 400 MHz.
+ * The value comes from amdgpu_atombios_get_clock_info.
+ */
+ uint32_t max_pixel_clock_khz = 400000;
+
+ if (enc110->base.ctx->dc_bios->fw_info_valid &&
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock) {
+ max_pixel_clock_khz =
+ enc110->base.ctx->dc_bios->fw_info.max_pixel_clock;
+ }
+
+ if (crtc_timing->pix_clk_100hz > max_pixel_clock_khz * 10)
+ return false;
+
+ if (crtc_timing->display_color_depth != COLOR_DEPTH_888)
+ return false;
+
+ if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB)
+ return false;
+
+ return true;
+}
+
void dce110_link_encoder_construct(
struct dce110_link_encoder *enc110,
const struct encoder_init_data *init_data,
@@ -824,6 +855,7 @@ void dce110_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -847,6 +879,11 @@ void dce110_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -885,6 +922,13 @@ void dce110_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
@@ -939,6 +983,10 @@ bool dce110_link_encoder_validate_output_with_stream(
is_valid = dce110_link_encoder_validate_dp_output(
enc110, &stream->timing);
break;
+ case SIGNAL_TYPE_RGB:
+ is_valid = dce110_link_encoder_validate_rgb_output(
+ enc110, &stream->timing);
+ break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_LVDS:
is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
@@ -969,6 +1017,10 @@ void dce110_link_encoder_hw_init(
cntl.coherent = false;
cntl.hpd_sel = enc110->base.hpd_source;
+ /* The code below is only applicable to encoders with a digital transmitter. */
+ if (enc110->base.transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (enc110->base.connector.id == CONNECTOR_ID_EDP)
cntl.signal = SIGNAL_TYPE_EDP;
@@ -1034,6 +1086,8 @@ void dce110_link_encoder_setup(
/* DP MST */
REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5);
break;
+ case SIGNAL_TYPE_RGB:
+ break;
default:
ASSERT_CRITICAL(false);
/* invalid mode ! */
@@ -1282,6 +1336,24 @@ void dce110_link_encoder_disable_output(
struct bp_transmitter_control cntl = { 0 };
enum bp_result result;
+ switch (enc->analog_engine) {
+ case ENGINE_ID_DACA:
+ REG_UPDATE(DAC_ENABLE, DAC_ENABLE, 0);
+ break;
+ case ENGINE_ID_DACB:
+ /* DACB doesn't seem to be present on DCE6+,
+ * although there are references to it in the register file.
+ */
+ DC_LOG_ERROR("%s DACB is unsupported\n", __func__);
+ break;
+ default:
+ break;
+ }
+
+ /* The code below only applies to connectors that support digital signals. */
+ if (enc->transmitter == TRANSMITTER_UNKNOWN)
+ return;
+
if (!dce110_is_dig_enabled(enc)) {
/* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */
return;
@@ -1726,6 +1798,7 @@ void dce60_link_encoder_construct(
enc110->base.connector = init_data->connector;
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ enc110->base.analog_engine = init_data->analog_engine;
enc110->base.features = *enc_features;
@@ -1749,6 +1822,11 @@ void dce60_link_encoder_construct(
SIGNAL_TYPE_EDP |
SIGNAL_TYPE_HDMI_TYPE_A;
+ if ((enc110->base.connector.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ enc110->base.connector.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ enc110->base.analog_engine != ENGINE_ID_UNKNOWN)
+ enc110->base.output_signals |= SIGNAL_TYPE_RGB;
+
/* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
* SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
* SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
@@ -1787,6 +1865,13 @@ void dce60_link_encoder_construct(
enc110->base.preferred_engine = ENGINE_ID_DIGG;
break;
default:
+ if (init_data->analog_engine != ENGINE_ID_UNKNOWN) {
+ /* The connector is analog-only, ie. VGA */
+ enc110->base.preferred_engine = init_data->analog_engine;
+ enc110->base.output_signals = SIGNAL_TYPE_RGB;
+ enc110->base.transmitter = TRANSMITTER_UNKNOWN;
+ break;
+ }
ASSERT_CRITICAL(false);
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 261c70e01e33..c58b69bc319b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -101,18 +101,21 @@
SRI(DP_SEC_CNTL, DP, id), \
SRI(DP_VID_STREAM_CNTL, DP, id), \
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
- SRI(DP_SEC_CNTL1, DP, id)
+ SRI(DP_SEC_CNTL1, DP, id), \
+ SR(DAC_ENABLE)
#endif
#define LE_DCE80_REG_LIST(id)\
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- LE_COMMON_REG_LIST_BASE(id)
+ LE_COMMON_REG_LIST_BASE(id), \
+ SR(DAC_ENABLE)
#define LE_DCE100_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
- SR(DCI_MEM_PWR_STATUS)
+ SR(DCI_MEM_PWR_STATUS), \
+ SR(DAC_ENABLE)
#define LE_DCE110_REG_LIST(id)\
LE_COMMON_REG_LIST_BASE(id), \
@@ -181,6 +184,9 @@ struct dce110_link_enc_registers {
uint32_t DP_DPHY_BS_SR_SWAP_CNTL;
uint32_t DP_DPHY_HBR2_PATTERN_CONTROL;
uint32_t DP_SEC_CNTL1;
+
+ /* DAC registers */
+ uint32_t DAC_ENABLE;
};
struct dce110_link_encoder {
@@ -215,10 +221,6 @@ bool dce110_link_encoder_validate_dvi_output(
enum signal_type signal,
const struct dc_crtc_timing *crtc_timing);
-bool dce110_link_encoder_validate_rgb_output(
- const struct dce110_link_encoder *enc110,
- const struct dc_crtc_timing *crtc_timing);
-
bool dce110_link_encoder_validate_dp_output(
const struct dce110_link_encoder *enc110,
const struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 1130d7619b26..574618d5d4a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -1567,3 +1567,17 @@ void dce110_stream_encoder_construct(
enc110->se_shift = se_shift;
enc110->se_mask = se_mask;
}
+
+static const struct stream_encoder_funcs dce110_an_str_enc_funcs = {};
+
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id)
+{
+ enc110->base.funcs = &dce110_an_str_enc_funcs;
+ enc110->base.ctx = ctx;
+ enc110->base.id = eng_id;
+ enc110->base.bp = bp;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
index cc5020a8e1e1..068de1392121 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h
@@ -708,6 +708,11 @@ void dce110_stream_encoder_construct(
const struct dce_stream_encoder_shift *se_shift,
const struct dce_stream_encoder_mask *se_mask);
+void dce110_analog_stream_encoder_construct(
+ struct dce110_stream_encoder *enc110,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ enum engine_id eng_id);
void dce110_se_audio_mute_control(
struct stream_encoder *enc, bool mute);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
index 2b1673d69ea8..1ab5ae9b5ea5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
@@ -154,10 +154,13 @@ static bool dce60_setup_scaling_configuration(
REG_SET(SCL_BYPASS_CONTROL, 0, SCL_BYPASS_MODE, 0);
if (data->taps.h_taps + data->taps.v_taps <= 2) {
- /* Set bypass */
-
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ /* Disable scaler functionality */
+ REG_WRITE(SCL_SCALER_ENABLE, 0);
+ /* Clear registers that can cause glitches even when the scaler is off */
+ REG_WRITE(SCL_TAP_CONTROL, 0);
+ REG_WRITE(SCL_AUTOMATIC_MODE_CONTROL, 0);
+ REG_WRITE(SCL_F_SHARP_CONTROL, 0);
return false;
}
@@ -165,7 +168,7 @@ static bool dce60_setup_scaling_configuration(
SCL_H_NUM_OF_TAPS, data->taps.h_taps - 1,
SCL_V_NUM_OF_TAPS, data->taps.v_taps - 1);
- /* DCE6 has no SCL_MODE register, skip scale mode programming */
+ REG_WRITE(SCL_SCALER_ENABLE, 1);
/* DCE6 has no SCL_BOUNDARY_MODE bit, skip replace out of bound pixels */
@@ -502,6 +505,8 @@ static void dce60_transform_set_scaler(
REG_SET(DC_LB_MEM_SIZE, 0,
DC_LB_MEM_SIZE, xfm_dce->lb_memory_size);
+ REG_WRITE(SCL_UPDATE, 0x00010000);
+
/* Clear SCL_F_SHARP_CONTROL value to 0 */
REG_WRITE(SCL_F_SHARP_CONTROL, 0);
@@ -527,8 +532,7 @@ static void dce60_transform_set_scaler(
if (coeffs_v != xfm_dce->filter_v || coeffs_h != xfm_dce->filter_h) {
/* 4. Program vertical filters */
if (xfm_dce->filter_v == NULL)
- REG_SET(SCL_VERT_FILTER_CONTROL, 0,
- SCL_V_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_VERT_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.v_taps,
@@ -542,8 +546,7 @@ static void dce60_transform_set_scaler(
/* 5. Program horizontal filters */
if (xfm_dce->filter_h == NULL)
- REG_SET(SCL_HORZ_FILTER_CONTROL, 0,
- SCL_H_2TAP_HARDCODE_COEF_EN, 0);
+ REG_WRITE(SCL_HORZ_FILTER_CONTROL, 0);
program_multi_taps_filter(
xfm_dce,
data->taps.h_taps,
@@ -566,6 +569,8 @@ static void dce60_transform_set_scaler(
/* DCE6 has no SCL_COEF_UPDATE_COMPLETE bit to flip to new coefficient memory */
/* DCE6 DATA_FORMAT register does not support ALPHA_EN */
+
+ REG_WRITE(SCL_UPDATE, 0);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
index cbce194ec7b8..eb716e8337e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.h
@@ -155,6 +155,9 @@
SRI(SCL_COEF_RAM_TAP_DATA, SCL, id), \
SRI(VIEWPORT_START, SCL, id), \
SRI(VIEWPORT_SIZE, SCL, id), \
+ SRI(SCL_SCALER_ENABLE, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_RGB_LUMA, SCL, id), \
+ SRI(SCL_HORZ_FILTER_INIT_CHROMA, SCL, id), \
SRI(SCL_HORZ_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_SCALE_RATIO, SCL, id), \
SRI(SCL_VERT_FILTER_INIT, SCL, id), \
@@ -590,6 +593,7 @@ struct dce_transform_registers {
uint32_t SCL_VERT_FILTER_SCALE_RATIO;
uint32_t SCL_HORZ_FILTER_INIT;
#if defined(CONFIG_DRM_AMD_DC_SI)
+ uint32_t SCL_SCALER_ENABLE;
uint32_t SCL_HORZ_FILTER_INIT_RGB_LUMA;
uint32_t SCL_HORZ_FILTER_INIT_CHROMA;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index d37ecfdde4f1..5bfa2b0d2afd 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -61,10 +61,9 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);
}
-bool should_use_dmub_lock(struct dc_link *link)
+bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link)
{
- /* ASIC doesn't support DMUB */
- if (!link->ctx->dmub_srv)
+ if (!link)
return false;
if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
@@ -73,16 +72,38 @@ bool should_use_dmub_lock(struct dc_link *link)
if (link->replay_settings.replay_feature_enabled)
return true;
- /* only use HW lock for PSR1 on single eDP */
if (link->psr_settings.psr_version == DC_PSR_VERSION_1) {
struct dc_link *edp_links[MAX_NUM_EDP];
int edp_num;
- dc_get_edp_links(link->dc, edp_links, &edp_num);
-
+ dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num == 1)
return true;
}
+ return false;
+}
+bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context)
+{
+ if (!context)
+ return false;
+ for (int i = 0; i < context->stream_count; i++) {
+ const struct dc_link *link = context->streams[i]->link;
+
+ if (dmub_hw_lock_mgr_does_link_require_lock(dc, link))
+ return true;
+ }
return false;
}
+
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link)
+{
+ /* ASIC doesn't support DMUB */
+ if (!dc->ctx->dmub_srv)
+ return false;
+
+ if (dc->ctx->dce_version >= DCN_VERSION_4_01)
+ return false;
+
+ return dmub_hw_lock_mgr_does_link_require_lock(dc, link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
index 5a72b168fb4a..4c80ca8484ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -37,6 +37,16 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
union dmub_inbox0_cmd_lock_hw hw_lock_cmd);
-bool should_use_dmub_lock(struct dc_link *link);
+/**
+ * should_use_dmub_inbox1_lock() - Checks if the DMCUB hardware lock via inbox1 should be used.
+ *
+ * @dc: pointer to DC object
+ * @link: optional pointer to the link object to check for enabled link features
+ *
+ * Return: true if the inbox1 lock should be used, false otherwise
+ */
+bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link);
+bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link);
+bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context);
#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index ff3b8244ba3d..87af4fdc04a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -391,7 +391,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
sizeof(DP_SINK_DEVICE_STR_ID_1)))
link->psr_settings.force_ffu_mode = 1;
- copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode;
+ copy_settings_data->force_ffu_mode = link->psr_settings.force_ffu_mode || psr_context->os_request_force_ffu;
if (((link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!link->dc->debug.disable_fec) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index fcd3d86ad517..cf1372aaff6c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -3,6 +3,7 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dc.h"
+#include "link_service.h"
#include "dc_dmub_srv.h"
#include "dmub/dmub_srv.h"
#include "core_types.h"
@@ -168,6 +169,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
copy_settings_data->max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
copy_settings_data->smu_optimizations_en = link->replay_settings.replay_smu_opt_enable;
copy_settings_data->replay_timing_sync_supported = link->replay_settings.config.replay_timing_sync_supported;
+ copy_settings_data->replay_support_fast_resync_in_ultra_sleep_mode = link->replay_settings.config.replay_support_fast_resync_in_ultra_sleep_mode;
copy_settings_data->debug.bitfields.enable_ips_visual_confirm = dc->dc->debug.enable_ips_visual_confirm;
@@ -189,6 +191,18 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
+ copy_settings_data->flags.bitfields.alpm_mode = (enum dmub_alpm_mode)link->replay_settings.config.alpm_mode;
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ copy_settings_data->auxless_alpm_data.lfps_setup_ns = dc->dc->debug.auxless_alpm_lfps_setup_ns;
+ copy_settings_data->auxless_alpm_data.lfps_period_ns = dc->dc->debug.auxless_alpm_lfps_period_ns;
+ copy_settings_data->auxless_alpm_data.lfps_silence_ns = dc->dc->debug.auxless_alpm_lfps_silence_ns;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_override_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_us;
+ copy_settings_data->auxless_alpm_data.lfps_t1_t2_offset_us =
+ dc->dc->debug.auxless_alpm_lfps_t1t2_offset_us;
+ copy_settings_data->auxless_alpm_data.lttpr_count = link->dc->link_srv->dp_get_lttpr_count(link);
+ }
+
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
@@ -199,7 +213,8 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
*/
static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
uint32_t coasting_vtotal,
- uint8_t panel_inst)
+ uint8_t panel_inst,
+ uint16_t frame_skip_number)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -213,6 +228,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
pCmd->header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
+ pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -269,7 +285,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
* Set REPLAY power optimization flags and coasting vtotal.
*/
static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal)
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -287,6 +303,7 @@ static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dm
pCmd->replay_set_power_opt_data.panel_inst = panel_inst;
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal = (coasting_vtotal & 0xFFFF);
pCmd->replay_set_coasting_vtotal_data.coasting_vtotal_high = (coasting_vtotal & 0xFFFF0000) >> 16;
+ pCmd->replay_set_coasting_vtotal_data.frame_skip_number = frame_skip_number;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
@@ -370,6 +387,19 @@ static void dmub_replay_send_cmd(struct dmub_replay *dmub,
cmd.replay_disabled_adaptive_sync_sdp.data.force_disabled =
cmd_element->disabled_adaptive_sync_sdp_data.force_disabled;
break;
+ case Replay_Set_Version:
+ //Header
+ cmd.replay_set_version.header.sub_type =
+ DMUB_CMD__REPLAY_SET_VERSION;
+ cmd.replay_set_version.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_version) -
+ sizeof(struct dmub_cmd_header);
+ //Cmd Body
+ cmd.replay_set_version.replay_set_version_data.panel_inst =
+ cmd_element->version_data.panel_inst;
+ cmd.replay_set_version.replay_set_version_data.version =
+ cmd_element->version_data.version;
+ break;
case Replay_Set_General_Cmd:
//Header
cmd.replay_set_general_cmd.header.sub_type =
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e6346c0ffc0e..07c79739a980 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -27,11 +27,12 @@ struct dmub_replay_funcs {
void (*replay_send_cmd)(struct dmub_replay *dmub,
enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element);
void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint32_t coasting_vtotal,
- uint8_t panel_inst);
+ uint8_t panel_inst, uint16_t frame_skip_number);
void (*replay_residency)(struct dmub_replay *dmub,
uint8_t panel_inst, uint32_t *residency, const bool is_start, const enum pr_residency_mode mode);
void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
- unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal);
+ unsigned int power_opt, uint8_t panel_inst, uint32_t coasting_vtotal,
+ uint16_t frame_skip_number);
};
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
index e0558a78b11c..1c1228116487 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c
@@ -812,7 +812,7 @@ bool dcn10_link_encoder_validate_output_with_stream(
enc10, &stream->timing);
break;
case SIGNAL_TYPE_EDP:
- is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false;
+ is_valid = stream->timing.pixel_encoding == PIXEL_ENCODING_RGB;
break;
case SIGNAL_TYPE_VIRTUAL:
is_valid = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
index 22e66b375a7f..d928b4dcf6b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
@@ -28,7 +28,7 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
index 0b47aeb60e79..bec0b4aaeb2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
index 9a92f73d5b7f..84cc2ddc52fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c
@@ -37,7 +37,7 @@
#include "link_enc_cfg.h"
#include "dc_dmub_srv.h"
#include "dal_asic_id.h"
-#include "link.h"
+#include "link_service.h"
#define CTX \
enc10->base.ctx
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
index ae81451a3a72..3e85e9c3d2cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
@@ -30,7 +30,7 @@
#include "dcn314_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
index 1a9bb614c41e..3523d1cdc1a3 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn32_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
index 6ab2a218b769..fd5d1dbf9dc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn35_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -397,7 +397,7 @@ static bool enc35_is_fifo_enabled(struct stream_encoder *enc)
uint32_t reset_val;
REG_GET(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, &reset_val);
- return (reset_val == 0) ? false : true;
+ return reset_val != 0;
}
void enc35_disable_fifo(struct stream_encoder *enc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
index d5fa551dd3c9..99aab70ef3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
@@ -32,7 +32,7 @@
#include "dcn401_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 7b9c22c45453..fbbf9c757b3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -277,12 +277,13 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc
/*
* SMU message tracing
*/
-void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx);
-void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx);
-
-#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx)
-#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx)
+void dm_trace_smu_enter(uint32_t msg_id, uint32_t param_in, unsigned int delay, struct dc_context *ctx);
+void dm_trace_smu_exit(bool success, uint32_t response, struct dc_context *ctx);
+#define TRACE_SMU_MSG_DELAY(msg_id, param_in, delay, ctx) dm_trace_smu_enter(msg_id, param_in, delay, ctx)
+#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_ENTER(msg_id, param_in, ctx) dm_trace_smu_enter(msg_id, param_in, 0, ctx)
+#define TRACE_SMU_MSG_EXIT(success, response, ctx) dm_trace_smu_exit(success, response, ctx)
/*
* DMUB Interfaces
@@ -311,4 +312,6 @@ void dm_dtn_log_end(struct dc_context *ctx,
char *dce_version_to_string(const int version);
+bool dc_supports_vrr(const enum dce_version v);
+
#endif /* __DM_SERVICES_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index bf63da266a18..3b093b8699ab 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -127,7 +127,7 @@ struct dm_pp_single_disp_config {
uint32_t src_height;
uint32_t src_width;
uint32_t v_refresh;
- uint32_t sym_clock; /* HDMI only */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
struct dc_link_settings link_settings; /* DP only */
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index 2a2eaf6adf26..7aaf13bbd4e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -30,8 +30,7 @@
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"
-
-#include "link.h"
+#include "link_service.h"
#include "dcn20_fpu.h"
#include "dc_state_priv.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 390c1a77fda6..9c58ff1069d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 843d6004258c..570e6e39eb45 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -646,7 +646,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
// the dpte_group_bytes is reduced for the specific case of vertical
// access of a tile surface that has dpte request of 8x1 ptes.
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 5718000627b0..f549da082c01 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -652,7 +652,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
//full size
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 8d4873f80df0..4fb37df54d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -620,7 +620,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 0c0b2d67c9cd..1aaa77265eed 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -326,7 +326,7 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
unsigned int i, closest_clk_lvl;
- int j;
+ int j = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0;
dc_assert_fp_enabled();
@@ -338,6 +338,15 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
dcn3_01_soc.num_chans = bw_params->num_channels;
ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
for (i = 0; i < clk_table->num_entries; i++) {
/* loop backwards*/
for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
@@ -353,8 +362,13 @@ void dcn301_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
s[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
s[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
- s[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- s[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ /* Clocks independent of voltage level. */
+ s[i].dispclk_mhz = max_dispclk_mhz ? max_dispclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ s[i].dppclk_mhz = max_dppclk_mhz ? max_dppclk_mhz :
+ dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
s[i].dram_bw_per_chan_gbps =
dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
s[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
@@ -435,12 +449,12 @@ void dcn301_fpu_calculate_wm_and_dlg(struct dc *dc,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set C */
table_entry = &bw_params->wm_table.entries[WM_C];
- vlevel = min(max(vlevel_req, 2), vlevel_max);
+ vlevel = clamp(vlevel_req, 2, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
&context->bw_ctx.dml, pipes, pipe_cnt);
/* WM Set B */
table_entry = &bw_params->wm_table.entries[WM_B];
- vlevel = min(max(vlevel_req, 1), vlevel_max);
+ vlevel = clamp(vlevel_req, 1, vlevel_max);
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
&context->bw_ctx.dml, pipes, pipe_cnt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
index 8da97a96b1ce..8d7c59ec701d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c
@@ -280,7 +280,7 @@ void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
index e968870a4b81..b5d3fd4c3694 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c
@@ -285,7 +285,7 @@ void dcn303_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
/* create the final dcfclk and uclk table */
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
index 17a21bcbde17..1a28061bb9ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
@@ -808,6 +808,8 @@ void dcn316_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
int dcn_get_max_non_odm_pix_rate_100hz(struct _vcs_dpi_soc_bounding_box_st *soc)
{
+ dc_assert_fp_enabled();
+
return soc->clock_limits[0].dispclk_mhz * 10000.0 / (1.0 + soc->dcn_downspread_percent / 100.0);
}
@@ -815,6 +817,8 @@ int dcn_get_approx_det_segs_required_for_pstate(
struct _vcs_dpi_soc_bounding_box_st *soc,
int pix_clk_100hz, int bpp, int seg_size_kb)
{
+ dc_assert_fp_enabled();
+
/* Roughly calculate required crb to hide latency. In practice there is slightly
* more buffer available for latency hiding
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index c46bda2141ac..bfeb01477f0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -615,7 +615,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index b7d2a0caec11..04df263ff65e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -703,7 +703,7 @@ static void get_meta_and_pte_attr(
if (hostvm_enable)
rq_sizing_param->dpte_group_bytes = 512;
else {
- if (!surf_linear & (log2_dpte_req_height_ptes == 0) & surf_vert) //reduced, in this case, will have page fault within a group
+ if (!surf_linear && (log2_dpte_req_height_ptes == 0) && surf_vert) //reduced, in this case, will have page fault within a group
rq_sizing_param->dpte_group_bytes = 512;
else
rq_sizing_param->dpte_group_bytes = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 6160952245b4..8a0f128722b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -31,7 +31,7 @@
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -3229,7 +3229,7 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
@@ -3401,7 +3401,7 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
uint32_t height = subvp_active_margin_list.res[i].height;
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ (uint64_t)pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
index 9ba6cb67655f..6c75aa82327a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c
@@ -139,7 +139,6 @@ void dml32_rq_dlg_get_rq_reg(display_rq_regs_st *rq_regs,
if (dual_plane) {
unsigned int p1_pte_row_height_linear = get_dpte_row_height_linear_c(mode_lib, e2e_pipe_param,
num_pipes, pipe_idx);
- ;
if (src->sw_mode == dm_sw_linear)
ASSERT(p1_pte_row_height_linear >= 8);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index 8839faf42207..e0a1dc89ce43 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -779,7 +779,7 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index 5d73efa2f0c9..817a370e80a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -31,7 +31,7 @@
#include "dml/dcn31/dcn31_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -445,6 +445,8 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
validate_mode);
@@ -498,9 +500,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
@@ -581,6 +581,8 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
unsigned int i, plane_count = 0;
DC_LOGGER_INIT(dc->ctx->logger);
+ dc_assert_fp_enabled();
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (context->res_ctx.pipe_ctx[i].plane_state)
plane_count++;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
index 6f516af82956..77023b619f1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
@@ -10,7 +10,7 @@
#include "dml/dcn35/dcn35_fpu.h"
#include "dml/dml_inline_defs.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -478,6 +478,8 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
bool upscaled = false;
const unsigned int max_allowed_vblank_nom = 1023;
+ dc_assert_fp_enabled();
+
dcn31_populate_dml_pipes_from_context(dc, context, pipes,
validate_mode);
@@ -531,9 +533,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
- DC_FP_START();
dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
- DC_FP_END();
pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
pipes[pipe_cnt].pipe.src.dcc_rate = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
deleted file mode 100644
index 4c21ce42054c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ /dev/null
@@ -1,141 +0,0 @@
-# SPDX-License-Identifier: MIT */
-#
-# Copyright 2023 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dml2.
-
-dml2_ccflags := $(CC_FLAGS_FPU)
-dml2_rcflags := $(CC_FLAGS_NO_FPU)
-
-ifneq ($(CONFIG_FRAME_WARN),0)
- ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
- ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
- frame_warn_limit := 4096
- else
- frame_warn_limit := 3072
- endif
- else
- frame_warn_limit := 2048
- endif
-
- ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
- frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
- endif
-endif
-
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_core
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_mcg/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_dpmm/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_pmo/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/dml2_standalone_libraries/
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/src/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/inc
-subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2/dml21/
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_policy.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_mall_phantom.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
-
-DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
- dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
- dml_display_rq_dlg_calc.o
-
-AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
-
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags)
-
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_rcflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags)
-
-DML21 := src/dml2_top/dml2_top_interfaces.o
-DML21 += src/dml2_top/dml2_top_soc15.o
-DML21 += src/dml2_core/dml2_core_dcn4.o
-DML21 += src/dml2_core/dml2_core_utils.o
-DML21 += src/dml2_core/dml2_core_factory.o
-DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
-DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
-DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
-DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
-DML21 += src/dml2_mcg/dml2_mcg_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
-DML21 += src/dml2_pmo/dml2_pmo_factory.o
-DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
-DML21 += src/dml2_standalone_libraries/lib_float_math.o
-DML21 += dml21_translation_helper.o
-DML21 += dml21_wrapper.o
-DML21 += dml21_utils.o
-
-AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2/dml21/,$(DML21))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
-
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
new file mode 100644
index 000000000000..97e068b6bf6b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: MIT */
+#
+# Copyright 2023 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Authors: AMD
+#
+# Makefile for dml2.
+
+dml2_ccflags := $(CC_FLAGS_FPU)
+dml2_rcflags := $(CC_FLAGS_NO_FPU)
+
+ifneq ($(CONFIG_FRAME_WARN),0)
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2056
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
+endif
+
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_core
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_mcg/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_dpmm/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_pmo/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/src/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/inc
+subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2_0/dml21/
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_core.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/display_mode_util.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_policy.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_mall_phantom.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml_display_rq_dlg_calc.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml2_dc_resource_mgmt.o := $(dml2_rcflags)
+
+DML2 = display_mode_core.o display_mode_util.o dml2_wrapper.o \
+ dml2_utils.o dml2_policy.o dml2_translation_helper.o dml2_dc_resource_mgmt.o dml2_mall_phantom.o \
+ dml_display_rq_dlg_calc.o
+
+AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2_0/,$(DML2))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML2)
+
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml21_wrapper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_translation_helper.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/dml21_utils.o := $(dml2_rcflags)
+
+DML21 := src/dml2_top/dml2_top_interfaces.o
+DML21 += src/dml2_top/dml2_top_soc15.o
+DML21 += src/dml2_core/dml2_core_dcn4.o
+DML21 += src/dml2_core/dml2_core_utils.o
+DML21 += src/dml2_core/dml2_core_factory.o
+DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
+DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
+DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
+DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
+DML21 += src/dml2_mcg/dml2_mcg_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
+DML21 += src/dml2_pmo/dml2_pmo_factory.o
+DML21 += src/dml2_pmo/dml2_pmo_dcn4_fams2.o
+DML21 += src/dml2_standalone_libraries/lib_float_math.o
+DML21 += dml21_translation_helper.o
+DML21 += dml21_wrapper.o
+DML21 += dml21_utils.o
+
+AMD_DAL_DML21 = $(addprefix $(AMDDALPATH)/dc/dml2_0/dml21/,$(DML21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DML21)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
index e450445bc05d..b954c9648fbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/cmntypes.h
@@ -53,17 +53,17 @@ typedef const void *const_pvoid;
typedef const char *const_pchar;
typedef struct rgba_struct {
- uint8 a;
- uint8 r;
- uint8 g;
- uint8 b;
+ uint8 a;
+ uint8 r;
+ uint8 g;
+ uint8 b;
} rgba_t;
typedef struct {
- uint8 blue;
- uint8 green;
- uint8 red;
- uint8 alpha;
+ uint8 blue;
+ uint8 green;
+ uint8 red;
+ uint8 alpha;
} gen_color_t;
typedef union {
@@ -87,7 +87,7 @@ typedef union {
} uintfloat64;
#ifndef UNREFERENCED_PARAMETER
-#define UNREFERENCED_PARAMETER(x) x = x
+#define UNREFERENCED_PARAMETER(x) (x = x)
#endif
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
index 715f9019a33e..c468f492b876 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
@@ -6529,7 +6529,7 @@ static noinline_for_stack void dml_prefetch_check(struct display_mode_lib_st *mo
mode_lib->ms.TotImmediateFlipBytes = 0;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (!(mode_lib->ms.policy.ImmediateFlipRequirement[k] == dml_immediate_flip_not_required)) {
- mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k];
+ mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]);
if (mode_lib->ms.use_one_row_for_frame_flip[j][k]) {
mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (2 * mode_lib->ms.DPTEBytesPerRow[j][k]);
} else {
@@ -10205,6 +10205,7 @@ dml_bool_t dml_get_is_phantom_pipe(struct display_mode_lib_st *mode_lib, dml_uin
return (mode_lib->ms.cache_display_cfg.plane.UseMALLForPStateChange[plane_idx] == dml_use_mall_pstate_change_phantom_pipe);
}
+
#define dml_get_per_surface_var_func(variable, type, interval_var) type dml_get_##variable(struct display_mode_lib_st *mode_lib, dml_uint_t surface_idx) \
{ \
dml_uint_t plane_idx; \
@@ -10333,3 +10334,4 @@ dml_get_per_surface_var_func(bigk_fragment_size, dml_uint_t, mode_lib->mp.BIGK_F
dml_get_per_surface_var_func(dpte_bytes_per_row, dml_uint_t, mode_lib->mp.PixelPTEBytesPerRow);
dml_get_per_surface_var_func(meta_bytes_per_row, dml_uint_t, mode_lib->mp.MetaRowByte);
dml_get_per_surface_var_func(det_buffer_size_kbytes, dml_uint_t, mode_lib->ms.DETBufferSizeInKByte);
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
index a38ed89c47a9..a38ed89c47a9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
index dbeb08466092..5b40dcdc4406 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core_structs.h
@@ -274,7 +274,6 @@ enum dml_clk_cfg_policy {
dml_use_state_freq = 2
};
-
struct soc_state_bounding_box_st {
dml_float_t socclk_mhz;
dml_float_t dscclk_mhz;
@@ -1894,7 +1893,7 @@ struct display_mode_lib_scratch_st {
struct CalculatePrefetchSchedule_params_st CalculatePrefetchSchedule_params;
};
-/// @brief Represent the overall soc/ip enviroment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
+/// @brief Represent the overall soc/ip environment. It contains data structure represent the soc/ip characteristic and also structures that hold calculation output
struct display_mode_lib_st {
dml_uint_t project;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
index 14d389525296..e574c81edf5e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_lib_defines.h
@@ -52,7 +52,7 @@
#define __DML_VBA_DEBUG__
#define __DML_VBA_ENABLE_INLINE_CHECK_ 0
#define __DML_VBA_MIN_VSTARTUP__ 9 //<brief At which vstartup the DML start to try if the mode can be supported
-#define __DML_ARB_TO_RET_DELAY__ 7 + 95 //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
+#define __DML_ARB_TO_RET_DELAY__ (7 + 95) //<brief Delay in DCFCLK from ARB to DET (1st num is ARB to SDPIF, 2nd number is SDPIF to DET)
#define __DML_MIN_DCFCLK_FACTOR__ 1.15 //<brief fudge factor for min dcfclk calclation
#define __DML_MAX_VRATIO_PRE__ 4.0 //<brief Prefetch schedule max vratio
#define __DML_MAX_VRATIO_PRE_OTO__ 4.0 //<brief Prefetch schedule max vratio for one to one scheduling calculation for prefetch
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
index 89890c88fd66..89890c88fd66 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
index 113b0265e1d1..a82b49cf7fb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_util.h
@@ -30,7 +30,6 @@
#include "display_mode_core_structs.h"
#include "cmntypes.h"
-
#include "dml_assert.h"
#include "dml_logging.h"
@@ -72,5 +71,4 @@ __DML_DLL_EXPORT__ dml_uint_t dml_get_plane_idx(const struct display_mode_lib_st
__DML_DLL_EXPORT__ dml_uint_t dml_get_pipe_idx(const struct display_mode_lib_st *mode_lib, dml_uint_t plane_idx);
__DML_DLL_EXPORT__ void dml_calc_pipe_plane_mapping(const struct dml_hw_resource_st *hw, dml_uint_t *pipe_plane);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
index a06217a9eef6..bf5e7f4e0416 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
@@ -8,7 +8,7 @@
#include "dml2_internal_types.h"
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
-#include "bounding_boxes/dcn4_soc_bb.h"
+#include "soc_and_ip_translator.h"
static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
const struct dc *in_dc,
@@ -38,375 +38,37 @@ static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options,
pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE;
}
-/*
- * Populate dml_init based on default static values in soc bb. The default
- * values are for reference and support at least minimal operation of current
- * SoC and DCN hardware. The values could be modifed by subsequent override
- * functions to reflect our true hardware capability.
- */
-static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
+static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_version dcn_version)
{
- switch (in_dc->ctx->dce_version) {
+ enum dml2_project_id project_id;
+ switch (dcn_version) {
case DCN_VERSION_4_01:
- dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
- dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
- dml_init->soc_bb = dml2_socbb_dcn401;
- dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params;
- dml_init->ip_caps = dml2_dcn401_max_ip_caps;
+ project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
default:
- memset(dml_init, 0, sizeof(*dml_init));
+ project_id = dml2_project_invalid;
DC_ERR("unsupported dcn version for DML21!");
- return;
- }
-}
-
-static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
- dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000;
- dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-}
-
-/*
- * SMU stands for System Management Unit. It is a power management processor.
- * It owns the initialization of dc's clock table and programming of clock values
- * based on dc's requests.
- * Our clock values in base soc bb is a dummy placeholder. The real clock values
- * are retrieved from SMU firmware to dc clock table at runtime.
- * This function overrides our dummy placeholder values with real values in dc
- * clock table.
- */
-static void override_dml_init_with_values_from_smu(
- struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- int i;
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table;
- struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
-
- if (!in_dc->clk_mgr->funcs->is_smu_present ||
- !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr))
- /* skip if smu is not present */
- return;
-
- /* dcfclk */
- if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
- dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dcfclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
- dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
- dml_clk_table->dcfclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- dml_clk_table->dcfclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dcfclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* fclk */
- if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
- dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->fclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
- dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
- dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
- dml_clk_table->fclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- dml_clk_table->fclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
- }
- } else {
- dml_clk_table->fclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* uclk */
- if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
- dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->uclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
- dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
- dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
- dml_clk_table->uclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- dml_clk_table->uclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
- }
- } else {
- dml_clk_table->uclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dispclk */
- if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
- dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dispclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
- dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
- dml_clk_table->dispclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- dml_clk_table->dispclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dispclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dppclk */
- if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
- dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dppclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
- dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
- dml_clk_table->dppclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- dml_clk_table->dppclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dppclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* dtbclk */
- if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
- dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->dtbclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
- dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
- dml_clk_table->dtbclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- dml_clk_table->dtbclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
- }
- } else {
- dml_clk_table->dtbclk.clk_values_khz[i] = 0;
- }
- }
- }
-
- /* socclk */
- if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
- dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
- for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
- if (i < dml_clk_table->socclk.num_clk_values) {
- if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
- dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
- if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
- dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
- dml_clk_table->socclk.num_clk_values = i + 1;
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- dml_clk_table->socclk.num_clk_values = i;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
- }
- } else {
- dml_clk_table->socclk.clk_values_khz[i] = 0;
- }
- }
- }
-}
-
-static void override_dml_init_with_values_from_vbios(
- struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params;
- struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb;
- struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us =
- (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns + 9) / 10;
-
- if (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns > 0)
- dml_soc_bb->power_management_parameters.stutter_exit_latency_us =
- (in_dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns + 9) / 10;
-
- if (dc_bw_params->num_channels) {
- dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- } else if (in_dc->ctx->dc_bios->vram_info.num_chans) {
- dml_clk_table->dram_config.channel_count = in_dc->ctx->dc_bios->vram_info.num_chans;
- dml_soc_bb->mall_allocated_for_dcn_mbytes = in_dc->caps.mall_size_total / 1048576;
- }
-
- if (dc_bw_params->dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
- } else if (in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) {
- dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
+ break;
}
- dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+ return project_id;
}
-
-static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init,
+void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
const struct dml2_configuration_options *config,
const struct dc *in_dc)
{
- /*
- * TODO - There seems to be overlaps between the values overriden from
- * dmub and vbios. Investigate and identify the values that DMUB needs
- * to own.
- */
-// const struct dmub_soc_bb_params *dmub_bb_params =
-// (const struct dmub_soc_bb_params *)config->bb_from_dmub;
-
-// if (dmub_bb_params == NULL)
-// return;
-
-// if (dmub_bb_params->dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
-// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->dram_clk_change_read_only_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us =
-// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0;
-// if (dmub_bb_params->dram_clk_change_write_only_ns > 0)
-// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us =
-// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0;
-// if (dmub_bb_params->fclk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
-// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->g7_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us =
-// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0;
-// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
-// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->stutter_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
-// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
-// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us =
-// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0;
-// if (dmub_bb_params->z8_min_idle_time_ns > 0)
-// dml_init->soc_bb.power_management_parameters.z8_min_idle_time =
-// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0;
-// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE
-// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us =
-// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us =
-// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
-// #else
-// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us =
-// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0;
-// if (dmub_bb_params->type_b_ppt_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us =
-// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0;
-// #endif
-// if (dmub_bb_params->vmin_limit_dispclk_khz > 0)
-// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz;
-// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0)
-// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz;
-// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0)
-// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us =
-// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0;
-}
+ dml_init->options.project_id = dml21_dcn_revision_to_dml2_project_id(in_dc->ctx->dce_version);
-static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- if (!config->use_native_soc_bb_construction) {
+ if (config->use_native_soc_bb_construction) {
+ in_dc->soc_and_ip_translator->translator_funcs->get_soc_bb(&dml_init->soc_bb, in_dc, config);
+ in_dc->soc_and_ip_translator->translator_funcs->get_ip_caps(&dml_init->ip_caps);
+ } else {
dml_init->soc_bb = config->external_socbb_ip_params->soc_bb;
dml_init->ip_caps = config->external_socbb_ip_params->ip_params;
}
- if (in_dc->bb_overrides.sr_exit_time_ns)
- dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us =
- in_dc->bb_overrides.sr_exit_time_ns / 1000.0;
-
- if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns)
- dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us =
- in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
-
- if (in_dc->bb_overrides.dram_clock_change_latency_ns)
- dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us =
- in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
-
- if (in_dc->bb_overrides.fclk_clock_change_latency_ns)
- dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us =
- in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
-}
-
-void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init,
- const struct dml2_configuration_options *config,
- const struct dc *in_dc)
-{
- populate_default_dml_init_params(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_smu(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_vbios(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_dmub(dml_init, config, in_dc);
-
- override_dml_init_with_values_from_software_policy(dml_init, config, in_dc);
+ dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config);
}
static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream)
@@ -422,25 +84,29 @@ static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stre
static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing,
struct dc_stream_state *stream,
+ struct pipe_ctx *pipe_ctx,
struct dml2_context *dml_ctx)
{
unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz;
+ uint32_t pix_clk_100hz;
- timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
timing->h_front_porch = stream->timing.h_front_porch;
timing->v_front_porch = stream->timing.v_front_porch;
timing->pixel_clock_khz = stream->timing.pix_clk_100hz / 10;
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ timing->pixel_clock_khz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz / 10;
if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
timing->pixel_clock_khz *= 2;
- timing->h_total = stream->timing.h_total;
+ timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
timing->v_total = stream->timing.v_total;
timing->h_sync_width = stream->timing.h_sync_width;
timing->interlaced = stream->timing.flags.INTERLACE;
hblank_start = stream->timing.h_total - stream->timing.h_front_porch;
- timing->h_blank_end = hblank_start - stream->timing.h_addressable
+ timing->h_blank_end = hblank_start - stream->timing.h_addressable - pipe_ctx->dsc_padding_params.dsc_hactive_padding
- stream->timing.h_border_left - stream->timing.h_border_right;
if (hblank_start < stream->timing.h_addressable)
@@ -459,15 +125,16 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
/* limit min refresh rate to DC cap */
min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz;
if (stream->ctx->dc->caps.max_v_total != 0) {
- min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL),
- (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream)));
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) {
+ pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ } else {
+ pix_clk_100hz = stream->timing.pix_clk_100hz;
+ }
+ min_hardware_refresh_in_uhz = div64_u64((pix_clk_100hz * 100000000ULL),
+ (timing->h_total * (long long)calc_max_hardware_v_total(stream)));
}
- if (stream->timing.min_refresh_in_uhz > min_hardware_refresh_in_uhz) {
- timing->drr_config.min_refresh_uhz = stream->timing.min_refresh_in_uhz;
- } else {
- timing->drr_config.min_refresh_uhz = min_hardware_refresh_in_uhz;
- }
+ timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz);
if (dml_ctx->config.callbacks.get_max_flickerless_instant_vtotal_increase &&
stream->ctx->dc->config.enable_fpo_flicker_detection == 1)
@@ -515,21 +182,6 @@ static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cf
timing->vblank_nom = timing->v_total - timing->v_active;
}
-/**
- * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration
- * based on the pipe context.
- * @timing: Pointer to the dml2_timing_cfg structure to be adjusted.
- * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value.
- *
- * This function modifies the horizontal active and blank end timings by adding and subtracting
- * the horizontal blanking borrow value from the pipe context, respectively.
- */
-static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe)
-{
- timing->h_active += pipe->hblank_borrow;
- timing->h_blank_end -= pipe->hblank_borrow;
-}
-
static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output,
struct dc_stream_state *stream, const struct pipe_ctx *pipe)
{
@@ -829,7 +481,9 @@ static const struct scaler_data *get_scaler_data_for_plane(
temp_pipe->plane_state = pipe->plane_state;
temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps;
temp_pipe->stream_res = pipe->stream_res;
- temp_pipe->hblank_borrow = pipe->hblank_borrow;
+ temp_pipe->dsc_padding_params.dsc_hactive_padding = pipe->dsc_padding_params.dsc_hactive_padding;
+ temp_pipe->dsc_padding_params.dsc_htotal_padding = pipe->dsc_padding_params.dsc_htotal_padding;
+ temp_pipe->dsc_padding_params.dsc_pix_clk_100hz = pipe->dsc_padding_params.dsc_pix_clk_100hz;
dml_ctx->config.callbacks.build_scaling_params(temp_pipe);
break;
}
@@ -1097,8 +751,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
disp_cfg_stream_location = dml_dispcfg->num_streams++;
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
- populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
- adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
+ populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index], dml_ctx);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]);
@@ -1165,6 +818,8 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.socclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.socclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_dramclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.uclk_khz;
context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency;
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
index 9880d3e0398e..9880d3e0398e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
index ee721606b883..ee721606b883 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
index 4bff52eaaef8..4bff52eaaef8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
index 03de3cf06ae5..798abb2b2e67 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
@@ -60,7 +60,7 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, con
DC_FP_START();
- dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc);
+ dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, &dml_ctx->config, in_dc);
dml2_initialize_instance(&dml_ctx->v21.dml_init);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
index 15f92029d2e5..15f92029d2e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
index 793e1c038efd..16a4f97bca4e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn4_soc_bb.h
@@ -2,7 +2,6 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-
#ifndef __DML_DML_DCN4_SOC_BB__
#define __DML_DML_DCN4_SOC_BB__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
index 281d7ad230d8..281d7ad230d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml2_external_lib_deps.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
index a64ec4dcf11a..a64ec4dcf11a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
index b05030926ce8..bf57df42d1d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_dchub_registers.h
@@ -46,7 +46,6 @@ struct dml2_display_dlg_regs {
uint32_t dst_y_delta_drq_limit;
uint32_t refcyc_per_vm_dmdata;
uint32_t dmdata_dl_delta;
- uint32_t dst_y_svp_drq_limit;
// MRQ
uint32_t refcyc_per_meta_chunk_vblank_l;
@@ -122,6 +121,8 @@ struct dml2_display_rq_regs {
uint32_t crq_expansion_mode;
uint32_t plane1_base_address;
uint32_t unbounded_request_enabled;
+ bool pte_buffer_mode;
+ bool force_one_row_for_frame;
// MRQ
uint32_t mrq_expansion_mode;
@@ -159,6 +160,8 @@ struct dml2_dchub_watermark_regs {
uint32_t sr_exit;
uint32_t sr_enter_z8;
uint32_t sr_exit_z8;
+ uint32_t sr_enter_low_power;
+ uint32_t sr_exit_low_power;
uint32_t uclk_pstate;
uint32_t fclk_pstate;
uint32_t temp_read_or_ppt;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
index e8dc6471c0be..35aa954248cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
@@ -49,6 +49,11 @@ enum dml2_source_format_class {
dml2_422_packed_12 = 18
};
+enum dml2_sample_positioning {
+ dml2_interstitial = 0,
+ dml2_cosited = 1
+};
+
enum dml2_rotation_angle {
dml2_rotation_0 = 0,
dml2_rotation_90 = 1,
@@ -82,6 +87,15 @@ enum dml2_output_link_dp_rate {
dml2_dp_rate_uhbr20 = 6
};
+enum dml2_pstate_type {
+ dml2_pstate_type_uclk = 0,
+ dml2_pstate_type_fclk = 1,
+ dml2_pstate_type_ppt = 2,
+ dml2_pstate_type_temp_read = 3,
+ dml2_pstate_type_dummy_pstate = 4,
+ dml2_pstate_type_count = 5
+};
+
enum dml2_uclk_pstate_change_strategy {
dml2_uclk_pstate_change_strategy_auto = 0,
dml2_uclk_pstate_change_strategy_force_vactive = 1,
@@ -222,7 +236,11 @@ struct dml2_composition_cfg {
struct {
bool enabled;
+ bool easf_enabled;
+ bool isharp_enabled;
bool upsp_enabled;
+ enum dml2_sample_positioning upsp_sample_positioning;
+ unsigned int upsp_vtaps;
struct {
double h_ratio;
double v_ratio;
@@ -384,7 +402,7 @@ struct dml2_plane_parameters {
// reserved_vblank_time_ns is the minimum time to reserve in vblank for Twait
// The actual reserved vblank time used for the corresponding stream in mode_programming would be at least as much as this per-plane override.
long reserved_vblank_time_ns;
- unsigned int max_vactive_det_fill_delay_us; // 0 = no reserved time, +ve = explicit max delay
+ unsigned int max_vactive_det_fill_delay_us[dml2_pstate_type_count]; // 0 = no reserved time, +ve = explicit max delay
unsigned int gpuvm_min_page_size_kbytes;
unsigned int hostvm_min_page_size_kbytes;
@@ -413,7 +431,6 @@ struct dml2_stream_parameters {
bool disable_dynamic_odm;
bool disable_subvp;
int minimum_vblank_idle_requirement_us;
- bool minimize_active_latency_hiding;
struct {
struct {
@@ -456,6 +473,7 @@ struct dml2_display_cfg {
bool enable;
bool value;
} force_nom_det_size_kbytes;
+
bool mode_support_check_disable;
bool mcache_admissibility_check_disable;
bool surface_viewport_size_check_disable;
@@ -478,7 +496,6 @@ struct dml2_display_cfg {
bool synchronize_ddr_displays_for_uclk_pstate_change;
bool max_outstanding_when_urgent_expected_disable;
bool enable_subvp_implicit_pmo; //enables PMO to switch pipe uclk strategy to subvp, and generate phantom programming
- unsigned int best_effort_min_active_latency_hiding_us;
bool all_streams_blanked;
} overrides;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
index 8f624a912e78..8f624a912e78 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_policy_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
index 8c9f414aa6bf..1fbc520c2540 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_soc_parameter_types.h
@@ -89,13 +89,15 @@ struct dml2_soc_qos_parameters {
struct dml2_soc_power_management_parameters {
double dram_clk_change_blackout_us;
- double dram_clk_change_read_only_us;
- double dram_clk_change_write_only_us;
+ double dram_clk_change_read_only_us; // deprecated
+ double dram_clk_change_write_only_us; // deprecated
double fclk_change_blackout_us;
double g7_ppt_blackout_us;
double g7_temperature_read_blackout_us;
double stutter_enter_plus_exit_latency_us;
double stutter_exit_latency_us;
+ double low_power_stutter_enter_plus_exit_latency_us;
+ double low_power_stutter_exit_latency_us;
double z8_stutter_enter_plus_exit_latency_us;
double z8_stutter_exit_latency_us;
double z8_min_idle_time;
@@ -143,6 +145,8 @@ struct dml2_soc_bb {
struct dml2_soc_vmin_clock_limits vmin_limit;
double lower_bound_bandwidth_dchub;
+ double fraction_of_urgent_bandwidth_nominal_target;
+ double fraction_of_urgent_bandwidth_flip_target;
unsigned int dprefclk_mhz;
unsigned int xtalclk_mhz;
unsigned int pcie_refclk_mhz;
@@ -168,6 +172,7 @@ struct dml2_soc_bb {
struct dml2_ip_capabilities {
unsigned int pipe_count;
unsigned int otg_count;
+ unsigned int TDLUT_33cube_count;
unsigned int num_dsc;
unsigned int max_num_dp2p0_streams;
unsigned int max_num_hdmi_frl_outputs;
@@ -186,7 +191,9 @@ struct dml2_ip_capabilities {
unsigned int subvp_prefetch_end_to_mall_start_us;
unsigned int subvp_fw_processing_delay;
unsigned int max_vactive_det_fill_delay_us;
-
+ unsigned int ppt_max_allow_delay_us;
+ unsigned int temp_read_max_allow_delay_us;
+ unsigned int dummy_pstate_max_allow_delay_us;
/* FAMS2 delays */
struct {
unsigned int max_allow_delay_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
index 98c0234e2f47..452e4a2e72c0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
@@ -16,9 +16,9 @@ struct dml2_instance;
enum dml2_project_id {
dml2_project_invalid = 0,
- dml2_project_dcn4x_stage1 = 1,
- dml2_project_dcn4x_stage2 = 2,
- dml2_project_dcn4x_stage2_auto_drr_svp = 3,
+ dml2_project_dcn4x_stage1,
+ dml2_project_dcn4x_stage2,
+ dml2_project_dcn4x_stage2_auto_drr_svp,
};
enum dml2_pstate_change_support {
@@ -70,6 +70,8 @@ struct dml2_pmo_options {
bool disable_dyn_odm;
bool disable_dyn_odm_for_multi_stream;
bool disable_dyn_odm_for_stream_with_svp;
+ struct dml2_pmo_pstate_strategy *override_strategy_lists[DML2_MAX_PLANES];
+ unsigned int num_override_strategies_per_list[DML2_MAX_PLANES];
};
struct dml2_options {
@@ -310,6 +312,7 @@ struct dml2_mode_support_info {
bool NumberOfOTGSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
+ bool NumberOfTDLUT33cubeSupport;
bool WritebackScaleRatioAndTapsSupport;
bool CursorSupport;
bool PitchSupport;
@@ -357,6 +360,8 @@ struct dml2_mode_support_info {
unsigned int AlignedCPitch[DML2_MAX_PLANES];
bool g6_temp_read_support;
bool temp_read_or_ppt_support;
+ bool qos_bandwidth_support;
+ bool dcfclk_support;
}; // dml2_mode_support_info
struct dml2_display_cfg_programming {
@@ -417,6 +422,8 @@ struct dml2_display_cfg_programming {
struct {
bool supported_in_blank; // Changing to configurations where this is false requires stutter to be disabled during the transition
+ uint8_t base_percent_efficiency; //LP1
+ uint8_t low_power_percent_efficiency; //LP2
} stutter;
struct {
@@ -669,6 +676,8 @@ struct dml2_display_cfg_programming {
unsigned int PrefetchMode[DML2_MAX_PLANES]; // LEGACY_ONLY
bool ROBUrgencyAvoidance;
double LowestPrefetchMargin;
+
+ unsigned int pstate_recout_reduction_lines[DML2_MAX_PLANES];
} misc;
struct dml2_mode_support_info mode_support_info;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
index 6ee37386f672..eba948e187c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -28,6 +28,7 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.writeback_interface_buffer_size_kbytes = 90,
//Number of pipes after DCN Pipe harvesting
.max_num_dpp = 4,
+ .max_num_opp = 4,
.max_num_otg = 4,
.max_num_wb = 1,
.max_dchub_pscl_bw_pix_per_clk = 4,
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
index a68bb001a346..a68bb001a346 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index b9cff2198511..a02e9fd6b5ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -1238,18 +1238,27 @@ static void CalculateDETBufferSize(
static double CalculateRequiredDispclk(
enum dml2_odm_mode ODMMode,
- double PixelClock)
+ double PixelClock,
+ bool isTMDS420)
{
+ double DispClk;
if (ODMMode == dml2_odm_mode_combine_4to1) {
- return PixelClock / 4.0;
+ DispClk = PixelClock / 4.0;
} else if (ODMMode == dml2_odm_mode_combine_3to1) {
- return PixelClock / 3.0;
+ DispClk = PixelClock / 3.0;
} else if (ODMMode == dml2_odm_mode_combine_2to1) {
- return PixelClock / 2.0;
+ DispClk = PixelClock / 2.0;
} else {
- return PixelClock;
+ DispClk = PixelClock;
+ }
+
+ if (isTMDS420) {
+ double TMDS420MinPixClock = PixelClock / 2.0;
+ DispClk = math_max2(DispClk, TMDS420MinPixClock);
}
+
+ return DispClk;
}
static double TruncToValidBPP(
@@ -1294,6 +1303,7 @@ static double TruncToValidBPP(
MinDSCBPP = 8;
MaxDSCBPP = 16;
} else {
+
if (Output == dml2_hdmi || Output == dml2_hdmifrl) {
NonDSCBPP0 = 24;
NonDSCBPP1 = 24;
@@ -1311,6 +1321,7 @@ static double TruncToValidBPP(
MaxDSCBPP = 16;
}
}
+
if (Output == dml2_dp2p0) {
MaxLinkBPP = LinkBitRate * Lanes / PixelClock * 128.0 / 132.0 * 383.0 / 384.0 * 65536.0 / 65540.0;
} else if (DSCEnable && Output == dml2_dp) {
@@ -4038,7 +4049,9 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
bool UseDSC,
unsigned int NumberOfDSCSlices,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double DISPCLKRequired,
unsigned int NumberOfDPPRequired,
unsigned int MaxHActiveForDSC,
@@ -4054,7 +4067,7 @@ static bool ValidateODMMode(enum dml2_odm_mode ODMMode,
if (DISPCLKRequired > MaxDispclk)
return false;
- if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP)
+ if ((TotalNumberOfActiveDPP + NumberOfDPPRequired) > MaxNumDPP || (TotalNumberOfActiveOPP + NumberOfDPPRequired) > MaxNumOPP)
return false;
if (are_odm_segments_symmetrical) {
if (HActive % (NumberOfDPPRequired * pixels_per_clock_cycle))
@@ -4100,7 +4113,9 @@ static noinline_for_stack void CalculateODMMode(
double MaxDispclk,
bool DSCEnable,
unsigned int TotalNumberOfActiveDPP,
+ unsigned int TotalNumberOfActiveOPP,
unsigned int MaxNumDPP,
+ unsigned int MaxNumOPP,
double PixelClock,
unsigned int NumberOfDSCSlices,
@@ -4122,11 +4137,12 @@ static noinline_for_stack void CalculateODMMode(
bool success;
bool UseDSC = DSCEnable && (NumberOfDSCSlices > 0);
enum dml2_odm_mode DecidedODMMode;
+ bool isTMDS420 = (OutFormat == dml2_420 && Output == dml2_hdmi);
- SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock);
- SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock);
+ SurfaceRequiredDISPCLKWithoutODMCombine = CalculateRequiredDispclk(dml2_odm_mode_bypass, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineTwoToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_2to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineThreeToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_3to1, PixelClock, isTMDS420);
+ SurfaceRequiredDISPCLKWithODMCombineFourToOne = CalculateRequiredDispclk(dml2_odm_mode_combine_4to1, PixelClock, isTMDS420);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: ODMUse = %d\n", __func__, ODMUse);
DML_LOG_VERBOSE("DML::%s: Output = %d\n", __func__, Output);
@@ -4169,7 +4185,9 @@ static noinline_for_stack void CalculateODMMode(
UseDSC,
NumberOfDSCSlices,
TotalNumberOfActiveDPP,
+ TotalNumberOfActiveOPP,
MaxNumDPP,
+ MaxNumOPP,
DISPCLKRequired,
NumberOfDPPRequired,
MaxHActiveForDSC,
@@ -6954,7 +6972,7 @@ static void calculate_bytes_to_fetch_required_to_hide_latency(
stream_index = p->display_cfg->plane_descriptors[plane_index].stream_index;
- dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us /
+ dst_lines_to_hide = (unsigned int)math_ceil(p->latency_to_hide_us[0] /
((double)p->display_cfg->stream_descriptors[stream_index].timing.h_total /
(double)p->display_cfg->stream_descriptors[stream_index].timing.pixel_clock_khz * 1000.0));
@@ -7051,9 +7069,9 @@ static void calculate_excess_vactive_bandwidth_required(
excess_vactive_fill_bw_l[plane_index] = 0.0;
excess_vactive_fill_bw_c[plane_index] = 0.0;
- if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us > 0) {
- excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us;
- excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us;
+ if (display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] > 0) {
+ excess_vactive_fill_bw_l[plane_index] = (double)bytes_required_l[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk];
+ excess_vactive_fill_bw_c[plane_index] = (double)bytes_required_c[plane_index] / (double)display_cfg->plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk];
}
}
}
@@ -8348,6 +8366,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
CalculateSwathAndDETConfiguration(&mode_lib->scratch, CalculateSwathAndDETConfiguration_params);
mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
mode_lib->ms.support.TotalAvailablePipesSupport = true;
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
@@ -8383,7 +8402,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
false, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8402,7 +8423,9 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.max_dispclk_freq_mhz,
true, // DSCEnable
mode_lib->ms.TotalNumberOfActiveDPP,
+ mode_lib->ms.TotalNumberOfActiveOPP,
mode_lib->ip.max_num_dpp,
+ mode_lib->ip.max_num_opp,
((double)display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.pixel_clock_khz / 1000),
mode_lib->ms.support.NumberOfDSCSlices[k],
@@ -8506,20 +8529,23 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
+ mode_lib->ms.NoOfOPP[k] = 1;
if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_4to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 4;
+ mode_lib->ms.NoOfOPP[k] = 4;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_3to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 3;
+ mode_lib->ms.NoOfOPP[k] = 3;
} else if (mode_lib->ms.ODMMode[k] == dml2_odm_mode_combine_2to1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 2;
+ mode_lib->ms.NoOfOPP[k] = 2;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 2) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
} else if (display_cfg->plane_descriptors[k].overrides.mpcc_combine_factor == 1) {
mode_lib->ms.MPCCombine[k] = false;
mode_lib->ms.NoOfDPP[k] = 1;
@@ -8530,7 +8556,6 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
if ((mode_lib->ms.MinDPPCLKUsingSingleDPP[k] > mode_lib->ms.max_dppclk_freq_mhz) || !mode_lib->ms.SingleDPPViewportSizeSupportPerSurface[k]) {
mode_lib->ms.MPCCombine[k] = true;
mode_lib->ms.NoOfDPP[k] = 2;
- mode_lib->ms.TotalNumberOfActiveDPP++;
}
}
#if defined(__DML_VBA_DEBUG__)
@@ -8538,8 +8563,16 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
#endif
}
+ mode_lib->ms.TotalNumberOfActiveDPP = 0;
+ mode_lib->ms.TotalNumberOfActiveOPP = 0;
+ for (k = 0; k < mode_lib->ms.num_active_planes; ++k) {
+ mode_lib->ms.TotalNumberOfActiveDPP += mode_lib->ms.NoOfDPP[k];
+ mode_lib->ms.TotalNumberOfActiveOPP += mode_lib->ms.NoOfOPP[k];
+ }
if (mode_lib->ms.TotalNumberOfActiveDPP > (unsigned int)mode_lib->ip.max_num_dpp)
mode_lib->ms.support.TotalAvailablePipesSupport = false;
+ if (mode_lib->ms.TotalNumberOfActiveOPP > (unsigned int)mode_lib->ip.max_num_opp)
+ mode_lib->ms.support.TotalAvailablePipesSupport = false;
mode_lib->ms.TotalNumberOfSingleDPPSurfaces = 0;
@@ -9018,11 +9051,11 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->ms.SwathWidthC;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->ms.SwathHeightY;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->ms.SwathHeightC;
- calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
/* outputs */
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l;
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk];
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk];
calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params);
@@ -9030,8 +9063,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_excess_vactive_bandwidth_required(
display_cfg,
mode_lib->ms.num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
/* outputs */
mode_lib->ms.excess_vactive_fill_bw_l,
mode_lib->ms.excess_vactive_fill_bw_c);
@@ -9473,8 +9506,8 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
calculate_vactive_det_fill_latency(
display_cfg,
mode_lib->ms.num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p0,
mode_lib->ms.dcc_dram_bw_nom_overhead_factor_p1,
mode_lib->ms.vactive_sw_bw_l,
@@ -9482,7 +9515,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.surface_avg_vactive_required_bw,
mode_lib->ms.surface_peak_required_bw,
/* outputs */
- mode_lib->ms.dram_change_vactive_det_fill_delay_us);
+ mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk]);
#ifdef __DML_VBA_DEBUG__
DML_LOG_VERBOSE("DML::%s: max_urgent_latency_us = %f\n", __func__, s->mSOCParameters.max_urgent_latency_us);
@@ -10976,11 +11009,11 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_width_c = mode_lib->mp.SwathWidthC;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_l = mode_lib->mp.SwathHeightY;
calculate_bytes_to_fetch_required_to_hide_latency_params->swath_height_c = mode_lib->mp.SwathHeightC;
- calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->latency_to_hide_us[0] = mode_lib->soc.power_management_parameters.dram_clk_change_blackout_us;
/* outputs */
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l;
- calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c;
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_l = s->pstate_bytes_required_l[dml2_pstate_type_uclk];
+ calculate_bytes_to_fetch_required_to_hide_latency_params->bytes_required_c = s->pstate_bytes_required_c[dml2_pstate_type_uclk];
calculate_bytes_to_fetch_required_to_hide_latency(calculate_bytes_to_fetch_required_to_hide_latency_params);
@@ -10988,8 +11021,8 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
calculate_excess_vactive_bandwidth_required(
display_cfg,
s->num_active_planes,
- s->pstate_bytes_required_l,
- s->pstate_bytes_required_c,
+ s->pstate_bytes_required_l[dml2_pstate_type_uclk],
+ s->pstate_bytes_required_c[dml2_pstate_type_uclk],
/* outputs */
mode_lib->mp.excess_vactive_fill_bw_l,
mode_lib->mp.excess_vactive_fill_bw_c);
@@ -12746,7 +12779,7 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
{
const struct dml2_plane_parameters *plane_descriptor = &display_cfg->display_config.plane_descriptors[plane_index];
const struct dml2_stream_parameters *stream_descriptor = &display_cfg->display_config.stream_descriptors[plane_descriptor->stream_index];
- const struct dml2_fams2_meta *stream_fams2_meta = &display_cfg->stage3.stream_fams2_meta[plane_descriptor->stream_index];
+ const struct dml2_pstate_meta *stream_pstate_meta = &display_cfg->stage3.stream_pstate_meta[plane_descriptor->stream_index];
struct dmub_fams2_cmd_stream_static_base_state *base_programming = &fams2_base_programming->stream_v1.base;
union dmub_fams2_cmd_stream_static_sub_state *sub_programming = &fams2_sub_programming->stream_v1.sub_state;
@@ -12761,24 +12794,24 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* from display configuration */
base_programming->htotal = (uint16_t)stream_descriptor->timing.h_total;
base_programming->vtotal = (uint16_t)stream_descriptor->timing.v_total;
- base_programming->vblank_start = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_start = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch);
- base_programming->vblank_end = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ base_programming->vblank_end = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
stream_descriptor->timing.v_active);
base_programming->config.bits.is_drr = stream_descriptor->timing.drr_config.enabled;
/* from meta */
base_programming->otg_vline_time_ns =
- (unsigned int)(stream_fams2_meta->otg_vline_time_us * 1000.0);
- base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_fams2_meta->scheduling_delay_otg_vlines;
- base_programming->contention_delay_otg_vlines = (uint8_t)stream_fams2_meta->contention_delay_otg_vlines;
- base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines;
- base_programming->drr_keepout_otg_vline = (uint16_t)(stream_fams2_meta->nom_vtotal -
+ (unsigned int)(stream_pstate_meta->otg_vline_time_us * 1000.0);
+ base_programming->scheduling_delay_otg_vlines = (uint8_t)stream_pstate_meta->scheduling_delay_otg_vlines;
+ base_programming->contention_delay_otg_vlines = (uint8_t)stream_pstate_meta->contention_delay_otg_vlines;
+ base_programming->vline_int_ack_delay_otg_vlines = (uint8_t)stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines;
+ base_programming->drr_keepout_otg_vline = (uint16_t)(stream_pstate_meta->nom_vtotal -
stream_descriptor->timing.v_front_porch -
- stream_fams2_meta->method_drr.programming_delay_otg_vlines);
- base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_fams2_meta->allow_to_target_delay_otg_vlines;
- base_programming->max_vtotal = (uint16_t)stream_fams2_meta->max_vtotal;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines);
+ base_programming->allow_to_target_delay_otg_vlines = (uint8_t)stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ base_programming->max_vtotal = (uint16_t)stream_pstate_meta->max_vtotal;
/* from core */
base_programming->config.bits.min_ttu_vblank_usable = true;
@@ -12797,11 +12830,11 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vactive */
base_programming->type = FAMS2_STREAM_TYPE_VACTIVE;
sub_programming->legacy.vactive_det_fill_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vactive.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vactive.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_vblank:
@@ -12809,22 +12842,22 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
/* legacy vblank */
base_programming->type = FAMS2_STREAM_TYPE_VBLANK;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_vblank.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_vblank.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_fw_drr:
/* drr */
base_programming->type = FAMS2_STREAM_TYPE_DRR;
sub_programming->drr.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_drr.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_drr.programming_delay_otg_vlines;
sub_programming->drr.nom_stretched_vtotal =
- (uint16_t)stream_fams2_meta->method_drr.stretched_vtotal;
+ (uint16_t)stream_pstate_meta->method_drr.stretched_vtotal;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_drr.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_drr.common.allow_end_otg_vline;
/* drr only clamps to vtotal min for single display */
base_programming->config.bits.clamp_vtotal_min = display_cfg->display_config.num_streams == 1;
sub_programming->drr.only_stretch_if_required = true;
@@ -12837,13 +12870,13 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
(uint16_t)(plane_descriptor->composition.scaler_info.plane0.v_ratio * 1000.0);
sub_programming->subvp.vratio_denominator = 1000;
sub_programming->subvp.programming_delay_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.programming_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.programming_delay_otg_vlines;
sub_programming->subvp.prefetch_to_mall_otg_vlines =
- (uint8_t)stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
+ (uint8_t)stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines;
sub_programming->subvp.phantom_vtotal =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vtotal;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vtotal;
sub_programming->subvp.phantom_vactive =
- (uint16_t)stream_fams2_meta->method_subvp.phantom_vactive;
+ (uint16_t)stream_pstate_meta->method_subvp.phantom_vactive;
sub_programming->subvp.config.bits.is_multi_planar =
plane_descriptor->surface.plane1.height > 0;
sub_programming->subvp.config.bits.is_yuv420 =
@@ -12852,9 +12885,9 @@ void dml2_core_calcs_get_stream_fams2_programming(const struct dml2_core_interna
plane_descriptor->pixel_format == dml2_420_12;
base_programming->allow_start_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_start_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_start_otg_vline;
base_programming->allow_end_otg_vline =
- (uint16_t)stream_fams2_meta->method_subvp.common.allow_end_otg_vline;
+ (uint16_t)stream_pstate_meta->method_subvp.common.allow_end_otg_vline;
base_programming->config.bits.clamp_vtotal_min = true;
break;
case dml2_pstate_method_reserved_hw:
@@ -12910,7 +12943,8 @@ void dml2_core_calcs_get_plane_support_info(const struct dml2_display_cfg *displ
out->active_latency_hiding_us = (int)mode_lib->ms.VActiveLatencyHidingUs[plane_idx];
- out->dram_change_vactive_det_fill_delay_us = (unsigned int)math_ceil(mode_lib->ms.dram_change_vactive_det_fill_delay_us[plane_idx]);
+ out->vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_ceil(mode_lib->ms.pstate_vactive_det_fill_delay_us[dml2_pstate_type_uclk][plane_idx]);
}
void dml2_core_calcs_get_stream_support_info(const struct dml2_display_cfg *display_cfg, const struct dml2_core_internal_display_mode_lib *mode_lib, struct core_stream_support_info *out, int plane_index)
@@ -12991,7 +13025,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.InvalidCombinationOfMALLUseForPState = mode_lib->ms.support.InvalidCombinationOfMALLUseForPState;
out->informative.mode_support_info.ExceededMALLSize = mode_lib->ms.support.ExceededMALLSize;
out->informative.mode_support_info.EnoughWritebackUnits = mode_lib->ms.support.EnoughWritebackUnits;
- out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.temp_read_or_ppt_support;
+ out->informative.mode_support_info.temp_read_or_ppt_support = mode_lib->ms.support.global_temp_read_or_ppt_supported;
out->informative.mode_support_info.g6_temp_read_support = mode_lib->ms.support.g6_temp_read_support;
out->informative.mode_support_info.ExceededMultistreamSlots = mode_lib->ms.support.ExceededMultistreamSlots;
@@ -13017,7 +13051,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod
out->informative.mode_support_info.VRatioInPrefetchSupported = mode_lib->ms.support.VRatioInPrefetchSupported;
out->informative.mode_support_info.DISPCLK_DPPCLK_Support = mode_lib->ms.support.DISPCLK_DPPCLK_Support;
out->informative.mode_support_info.TotalAvailablePipesSupport = mode_lib->ms.support.TotalAvailablePipesSupport;
+ out->informative.mode_support_info.NumberOfTDLUT33cubeSupport = mode_lib->ms.support.NumberOfTDLUT33cubeSupport;
out->informative.mode_support_info.ViewportSizeSupport = mode_lib->ms.support.ViewportSizeSupport;
+ out->informative.mode_support_info.qos_bandwidth_support = mode_lib->ms.support.qos_bandwidth_support;
+ out->informative.mode_support_info.dcfclk_support = mode_lib->ms.support.dcfclk_support;
for (k = 0; k < out->display_config.num_planes; k++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
index 27ef0e096b25..27ef0e096b25 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4_calcs.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
index 28394de02885..cc4f0663c6d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
@@ -10,11 +10,13 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_core_instance));
+ out->project_id = project_id;
+
switch (project_id) {
case dml2_project_dcn4x_stage1:
result = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
index 411c514fe65c..411c514fe65c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
index 28687565ac22..1087a8c926ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -36,7 +36,9 @@ struct dml2_core_ip_params {
unsigned int max_line_buffer_lines;
unsigned int writeback_interface_buffer_size_kbytes;
unsigned int max_num_dpp;
+ unsigned int max_num_opp;
unsigned int max_num_otg;
+ unsigned int TDLUT_33cube_count;
unsigned int max_num_wb;
unsigned int max_dchub_pscl_bw_pix_per_clk;
unsigned int max_pscl_lb_bw_pix_per_clk;
@@ -46,6 +48,7 @@ struct dml2_core_ip_params {
double max_vscl_ratio;
unsigned int max_hscl_taps;
unsigned int max_vscl_taps;
+ unsigned int odm_combine_support_mask;
unsigned int num_dsc;
unsigned int maximum_dsc_bits_per_component;
unsigned int maximum_pixels_per_line_per_dsc_unit;
@@ -82,7 +85,6 @@ struct dml2_core_ip_params {
unsigned int subvp_swath_height_margin_lines;
unsigned int subvp_fw_processing_delay_us;
unsigned int subvp_pstate_allow_width_us;
-
// MRQ
bool dcn_mrq_present;
unsigned int zero_size_buffer_entries;
@@ -103,6 +105,8 @@ struct dml2_core_internal_DmlPipe {
unsigned int DPPPerSurface;
bool ScalerEnabled;
bool UPSPEnabled;
+ unsigned int UPSPVTaps;
+ enum dml2_sample_positioning UPSPSamplePositioning;
enum dml2_rotation_angle RotationAngle;
bool mirrored;
unsigned int ViewportHeight;
@@ -201,6 +205,8 @@ struct dml2_core_internal_watermarks {
double WritebackFCLKChangeWatermark;
double StutterExitWatermark;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterExitWatermark;
double Z8StutterEnterPlusExitWatermark;
double USRRetrainingWatermark;
@@ -228,6 +234,7 @@ struct dml2_core_internal_mode_support_info {
bool MSOOrODMSplitWithNonDPLink;
bool NotEnoughLanesForMSO;
bool NumberOfOTGSupport;
+ bool NumberOfTDLUT33cubeSupport;
bool NumberOfHDMIFRLSupport;
bool NumberOfDP2p0Support;
bool WritebackScaleRatioAndTapsSupport;
@@ -257,8 +264,11 @@ struct dml2_core_internal_mode_support_info {
bool DCCMetaBufferSizeNotExceeded;
enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES];
+ bool global_dram_clock_change_support_required;
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
+ bool global_temp_read_or_ppt_supported;
bool USRRetrainingSupport;
bool AvgBandwidthSupport;
bool UrgVactiveBandwidthSupport;
@@ -329,7 +339,6 @@ struct dml2_core_internal_mode_support_info {
bool incorrect_imall_usage;
bool g6_temp_read_support;
- bool temp_read_or_ppt_support;
struct dml2_core_internal_watermarks watermarks;
bool dcfclk_support;
@@ -564,6 +573,7 @@ struct dml2_core_internal_mode_support {
enum dml2_odm_mode ODMMode[DML2_MAX_PLANES];
unsigned int SurfaceSizeInMALL[DML2_MAX_PLANES];
unsigned int NoOfDPP[DML2_MAX_PLANES];
+ unsigned int NoOfOPP[DML2_MAX_PLANES];
bool MPCCombine[DML2_MAX_PLANES];
double dcfclk_deepsleep;
double MinDPPCLKUsingSingleDPP[DML2_MAX_PLANES];
@@ -574,6 +584,7 @@ struct dml2_core_internal_mode_support {
bool PTEBufferSizeNotExceeded[DML2_MAX_PLANES];
bool DCCMetaBufferSizeNotExceeded[DML2_MAX_PLANES];
unsigned int TotalNumberOfActiveDPP;
+ unsigned int TotalNumberOfActiveOPP;
unsigned int TotalNumberOfSingleDPPSurfaces;
unsigned int TotalNumberOfDCCActiveDPP;
unsigned int Total3dlutActive;
@@ -582,7 +593,7 @@ struct dml2_core_internal_mode_support {
double VActiveLatencyHidingMargin[DML2_MAX_PLANES];
double VActiveLatencyHidingUs[DML2_MAX_PLANES];
unsigned int MaxVStartupLines[DML2_MAX_PLANES];
- double dram_change_vactive_det_fill_delay_us[DML2_MAX_PLANES];
+ double pstate_vactive_det_fill_delay_us[dml2_pstate_type_count][DML2_MAX_PLANES];
unsigned int num_mcaches_l[DML2_MAX_PLANES];
unsigned int mcache_row_bytes_l[DML2_MAX_PLANES];
@@ -612,8 +623,8 @@ struct dml2_core_internal_mode_support {
unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES];
unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES];
unsigned int cursor_bytes_per_line[DML2_MAX_PLANES];
@@ -637,7 +648,7 @@ struct dml2_core_internal_mode_support {
unsigned int DSTYAfterScaler[DML2_MAX_PLANES];
unsigned int DSTXAfterScaler[DML2_MAX_PLANES];
- enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
+ enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES];
};
/// @brief A mega structure that houses various info for model programming step.
@@ -828,6 +839,7 @@ struct dml2_core_internal_mode_program {
double max_urgent_latency_us;
double df_response_time_us;
+ enum dml2_pstate_method uclk_pstate_switch_modes[DML2_MAX_PLANES];
// -------------------
// Output
// -------------------
@@ -877,6 +889,9 @@ struct dml2_core_internal_mode_program {
double Z8StutterEfficiency;
unsigned int Z8NumberOfStutterBurstsPerFrame;
double Z8StutterEfficiencyNotIncludingVBlank;
+ double LowPowerStutterEfficiency;
+ double LowPowerStutterEfficiencyNotIncludingVBlank;
+ unsigned int LowPowerNumberOfStutterBurstsPerFrame;
double StutterPeriod;
double Z8StutterEfficiencyBestCase;
unsigned int Z8NumberOfStutterBurstsPerFrameBestCase;
@@ -951,11 +966,12 @@ struct dml2_core_internal_mode_program {
double MaxActiveFCLKChangeLatencySupported;
bool USRRetrainingSupport;
bool g6_temp_read_support;
- bool temp_read_or_ppt_support;
enum dml2_pstate_change_support FCLKChangeSupport[DML2_MAX_PLANES];
enum dml2_pstate_change_support DRAMClockChangeSupport[DML2_MAX_PLANES];
+ enum dml2_pstate_change_support temp_read_or_ppt_support[DML2_MAX_PLANES];
bool global_dram_clock_change_supported;
bool global_fclk_change_supported;
+ bool global_temp_read_or_ppt_supported;
double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES];
double WritebackAllowFCLKChangeEndPosition[DML2_MAX_PLANES];
double WritebackAllowDRAMClockChangeEndPosition[DML2_MAX_PLANES];
@@ -1016,6 +1032,8 @@ struct dml2_core_internal_SOCParametersList {
double FCLKChangeLatency;
double SRExitTime;
double SREnterPlusExitTime;
+ double SRExitTimeLowPower;
+ double SREnterPlusExitTimeLowPower;
double SRExitZ8Time;
double SREnterPlusExitZ8Time;
double USRRetrainingLatency;
@@ -1120,8 +1138,8 @@ struct dml2_core_calcs_mode_support_locals {
unsigned int cursor_bytes[DML2_MAX_PLANES];
bool stream_visited[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
double prefetch_sw_bytes[DML2_MAX_PLANES];
double Tpre_rounded[DML2_MAX_PLANES];
@@ -1212,8 +1230,8 @@ struct dml2_core_calcs_mode_programming_locals {
double Tr0_trips_flip_rounded[DML2_MAX_PLANES];
unsigned int per_pipe_flip_bytes[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_l[DML2_MAX_PLANES];
- unsigned int pstate_bytes_required_c[DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_l[dml2_pstate_type_count][DML2_MAX_PLANES];
+ unsigned int pstate_bytes_required_c[dml2_pstate_type_count][DML2_MAX_PLANES];
double prefetch_sw_bytes[DML2_MAX_PLANES];
double Tpre_rounded[DML2_MAX_PLANES];
@@ -1299,7 +1317,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params {
unsigned int HostVMMinPageSize;
unsigned int DCCMetaBufferSizeBytes;
bool mrq_present;
- enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES];
+ enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
bool *PTEBufferSizeNotExceeded;
@@ -1726,10 +1744,12 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
unsigned int max_request_size_bytes;
unsigned int *meta_row_height_l;
unsigned int *meta_row_height_c;
+ enum dml2_pstate_method *uclk_pstate_switch_modes;
// Output
struct dml2_core_internal_watermarks *Watermark;
enum dml2_pstate_change_support *DRAMClockChangeSupport;
+ bool *global_dram_clock_change_support_required;
bool *global_dram_clock_change_supported;
double *MaxActiveDRAMClockChangeLatencySupported;
unsigned int *SubViewportLinesNeededInMALL;
@@ -1740,10 +1760,10 @@ struct dml2_core_calcs_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport_param
double *VActiveLatencyHidingMargin;
double *VActiveLatencyHidingUs;
bool *g6_temp_read_support;
- bool *temp_read_or_ppt_support;
+ enum dml2_pstate_change_support *temp_read_or_ppt_support;
+ bool *global_temp_read_or_ppt_supported;
};
-
struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params {
const struct dml2_display_cfg *display_cfg;
unsigned int ConfigReturnBufferSizeInKByte;
@@ -1851,9 +1871,11 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
unsigned int CompbufReservedSpaceZs;
bool hw_debug5;
double SRExitTime;
+ double SRExitTimeLowPower;
double SRExitZ8Time;
bool SynchronizeTimings;
double StutterEnterPlusExitWatermark;
+ double LowPowerStutterEnterPlusExitWatermark;
double Z8StutterEnterPlusExitWatermark;
bool ProgressiveToInterlaceUnitInOPP;
double *MinTTUVBlank;
@@ -1879,7 +1901,10 @@ struct dml2_core_calcs_CalculateStutterEfficiency_params {
// output
double *StutterEfficiencyNotIncludingVBlank;
double *StutterEfficiency;
+ double *LowPowerStutterEfficiencyNotIncludingVBlank;
+ double *LowPowerStutterEfficiency;
unsigned int *NumberOfStutterBurstsPerFrame;
+ unsigned int *LowPowerNumberOfStutterBurstsPerFrame;
double *Z8StutterEfficiencyNotIncludingVBlank;
double *Z8StutterEfficiency;
unsigned int *Z8NumberOfStutterBurstsPerFrame;
@@ -2228,7 +2253,7 @@ struct dml2_core_calcs_calculate_bytes_to_fetch_required_to_hide_latency_params
unsigned int *swath_width_c;
unsigned int *swath_height_l;
unsigned int *swath_height_c;
- double latency_to_hide_us;
+ double latency_to_hide_us[DML2_MAX_PLANES];
/* outputs */
unsigned int *bytes_required_l;
@@ -2296,6 +2321,7 @@ struct dml2_core_calcs_mode_support_ex {
const struct dml2_display_cfg *in_display_cfg;
const struct dml2_mcg_min_clock_table *min_clk_table;
int min_clk_index;
+ enum dml2_project_id project_id;
//unsigned int in_state_index;
struct dml2_core_internal_mode_support_info *out_evaluation_info;
};
@@ -2308,6 +2334,7 @@ struct dml2_core_calcs_mode_programming_ex {
const struct dml2_mcg_min_clock_table *min_clk_table;
const struct core_display_cfg_support_info *cfg_support_info;
int min_clk_index;
+ enum dml2_project_id project_id;
struct dml2_display_cfg_programming *programming;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
index 5f301befed16..b57d0f6ea6a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
@@ -306,6 +306,8 @@ void dml2_core_utils_print_mode_support_info(const struct dml2_core_internal_mod
DML_LOG_VERBOSE("DML: support: ExceededMALLSize = %d\n", support->ExceededMALLSize);
if (!fail_only || support->g6_temp_read_support == 0)
DML_LOG_VERBOSE("DML: support: g6_temp_read_support = %d\n", support->g6_temp_read_support);
+ if (!fail_only || (support->global_dram_clock_change_supported == 0 && support->global_dram_clock_change_support_required))
+ DML_LOG_VERBOSE("DML: support: dram_clock_change_support = %d\n", support->global_dram_clock_change_supported);
if (!fail_only || support->ImmediateFlipSupport == 0)
DML_LOG_VERBOSE("DML: support: ImmediateFlipSupport = %d\n", support->ImmediateFlipSupport);
if (!fail_only || support->LinkCapacitySupport == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
index 95f0d017add4..95f0d017add4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 22969a533a7b..22969a533a7b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index e7b58f2efda4..e7b58f2efda4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index 3861bc6c9621..dfd01440737d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -20,7 +20,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_dpmm_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
index 20ba2e446f1d..20ba2e446f1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
index a265f254152c..a265f254152c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index 02da6f45cbf7..f54fde8fba90 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -10,4 +10,4 @@
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool mcg_dcn4_unit_test(void);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
index cd3fbc0591d8..c60b8fe90819 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -15,7 +15,7 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_mcg_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
index ad307deca3b0..ad307deca3b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index e763c8e45da8..1b9579a32ff2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -48,18 +48,19 @@ static void set_reserved_time_on_all_planes_with_stream_index(struct display_con
static void remove_duplicates(double *list_a, int *list_a_size)
{
- int cur_element = 0;
- // For all elements b[i] in list_b[]
- while (cur_element < *list_a_size - 1) {
- if (list_a[cur_element] == list_a[cur_element + 1]) {
- for (int j = cur_element + 1; j < *list_a_size - 1; j++) {
- list_a[j] = list_a[j + 1];
- }
- *list_a_size = *list_a_size - 1;
- } else {
- cur_element++;
+ int j = 0;
+
+ if (*list_a_size == 0)
+ return;
+
+ for (int i = 1; i < *list_a_size; i++) {
+ if (list_a[j] != list_a[i]) {
+ j++;
+ list_a[j] = list_a[i];
}
}
+
+ *list_a_size = j + 1;
}
static bool increase_mpc_combine_factor(unsigned int *mpc_combine_factor, unsigned int limit)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
index f00bd9e72a86..f00bd9e72a86 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index d88b3e0082dd..c26e100fcaf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -642,6 +642,11 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
int i = 0;
struct dml2_pmo_instance *pmo = in_out->instance;
+ unsigned int base_list_size = 0;
+ const struct dml2_pmo_pstate_strategy *base_list = NULL;
+ unsigned int *expanded_list_size = NULL;
+ struct dml2_pmo_pstate_strategy *expanded_list = NULL;
+
pmo->soc_bb = in_out->soc_bb;
pmo->ip_caps = in_out->ip_caps;
pmo->mpc_combine_limit = 2;
@@ -656,53 +661,71 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
pmo->options = in_out->options;
/* generate permutations of p-state configs from base strategy list */
- for (i = 1; i <= PMO_DCN4_MAX_DISPLAYS; i++) {
- switch (i) {
+ for (i = 0; i < PMO_DCN4_MAX_DISPLAYS; i++) {
+ switch (i+1) {
case 1:
- DML_ASSERT(base_strategy_list_1_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_1_display,
- base_strategy_list_1_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_1_display;
+ base_list_size = base_strategy_list_1_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_1_display;
+
break;
case 2:
- DML_ASSERT(base_strategy_list_2_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_2_display,
- base_strategy_list_2_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_2_display;
+ base_list_size = base_strategy_list_2_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_2_display;
+
break;
case 3:
- DML_ASSERT(base_strategy_list_3_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_3_display,
- base_strategy_list_3_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_3_display;
+ base_list_size = base_strategy_list_3_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_3_display;
+
break;
case 4:
- DML_ASSERT(base_strategy_list_4_display_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
-
- /* populate list */
- pmo_dcn4_fams2_expand_base_pstate_strategies(
- base_strategy_list_4_display,
- base_strategy_list_4_display_size,
- i,
- pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display,
- &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i - 1]);
+ if (pmo->options->override_strategy_lists[i] && pmo->options->num_override_strategies_per_list[i]) {
+ base_list = pmo->options->override_strategy_lists[i];
+ base_list_size = pmo->options->num_override_strategies_per_list[i];
+ } else {
+ base_list = base_strategy_list_4_display;
+ base_list_size = base_strategy_list_4_display_size;
+ }
+
+ expanded_list_size = &pmo->init_data.pmo_dcn4.num_expanded_strategies_per_list[i];
+ expanded_list = pmo->init_data.pmo_dcn4.expanded_strategy_list_4_display;
+
break;
}
+
+ DML_ASSERT(base_list_size <= PMO_DCN4_MAX_BASE_STRATEGIES);
+
+ /* populate list */
+ pmo_dcn4_fams2_expand_base_pstate_strategies(
+ base_list,
+ base_list_size,
+ i + 1,
+ expanded_list,
+ expanded_list_size);
}
return true;
@@ -1026,13 +1049,13 @@ static bool all_timings_support_vblank(const struct dml2_pmo_instance *pmo,
return synchronizable;
}
-static unsigned int calc_svp_microschedule(const struct dml2_fams2_meta *fams2_meta)
+static unsigned int calc_svp_microschedule(const struct dml2_pstate_meta *pstate_meta)
{
- return fams2_meta->contention_delay_otg_vlines +
- fams2_meta->method_subvp.programming_delay_otg_vlines +
- fams2_meta->method_subvp.phantom_vtotal +
- fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- fams2_meta->dram_clk_change_blackout_otg_vlines;
+ return pstate_meta->contention_delay_otg_vlines +
+ pstate_meta->method_subvp.programming_delay_otg_vlines +
+ pstate_meta->method_subvp.phantom_vtotal +
+ pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ pstate_meta->blackout_otg_vlines;
}
static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
@@ -1042,29 +1065,29 @@ static bool all_timings_support_drr(const struct dml2_pmo_instance *pmo,
unsigned int i;
for (i = 0; i < DML2_MAX_PLANES; i++) {
const struct dml2_stream_parameters *stream_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (!stream_descriptor->timing.drr_config.enabled)
return false;
/* cannot support required vtotal */
- if (stream_fams2_meta->method_drr.stretched_vtotal > stream_fams2_meta->max_vtotal) {
+ if (stream_pstate_meta->method_drr.stretched_vtotal > stream_pstate_meta->max_vtotal) {
return false;
}
/* check rr is within bounds */
- if (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
+ if (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.drr.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.drr.refresh_rate_limit_max) {
return false;
}
/* check required stretch is allowed */
if (stream_descriptor->timing.drr_config.max_instant_vtotal_delta > 0 &&
- stream_fams2_meta->method_drr.stretched_vtotal - stream_fams2_meta->nom_vtotal > stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
+ stream_pstate_meta->method_drr.stretched_vtotal - stream_pstate_meta->nom_vtotal > (int)stream_descriptor->timing.drr_config.max_instant_vtotal_delta) {
return false;
}
}
@@ -1079,7 +1102,7 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
{
const struct dml2_stream_parameters *stream_descriptor;
const struct dml2_plane_parameters *plane_descriptor;
- const struct dml2_fams2_meta *stream_fams2_meta;
+ const struct dml2_pstate_meta *stream_pstate_meta;
unsigned int microschedule_vlines;
unsigned int i;
unsigned int mcaches_per_plane;
@@ -1124,13 +1147,13 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(mask, i)) {
stream_descriptor = &display_config->display_config.stream_descriptors[i];
- stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[i];
+ stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[i];
if (stream_descriptor->overrides.disable_subvp) {
return false;
}
- microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_fams2_meta[i]);
+ microschedule_vlines = calc_svp_microschedule(&pmo->scratch.pmo_dcn4.stream_pstate_meta[i]);
/* block if using an interlaced timing */
if (stream_descriptor->timing.interlaced) {
@@ -1141,8 +1164,8 @@ static bool all_timings_support_svp(const struct dml2_pmo_instance *pmo,
* 2) refresh rate must be within the allowed bounds
*/
if (microschedule_vlines >= stream_descriptor->timing.v_active ||
- (stream_fams2_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
- stream_fams2_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
+ (stream_pstate_meta->nom_refresh_rate_hz < pmo->fams_params.v2.subvp.refresh_rate_limit_min ||
+ stream_pstate_meta->nom_refresh_rate_hz > pmo->fams_params.v2.subvp.refresh_rate_limit_max)) {
return false;
}
}
@@ -1232,43 +1255,43 @@ static bool all_planes_match_method(const struct display_configuation_with_meta
}
static void build_method_scheduling_params(
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta,
- struct dml2_fams2_meta *stream_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta,
+ struct dml2_pstate_meta *stream_pstate_meta)
{
- stream_method_fams2_meta->allow_time_us =
- (double)((int)stream_method_fams2_meta->allow_end_otg_vline - (int)stream_method_fams2_meta->allow_start_otg_vline) *
- stream_fams2_meta->otg_vline_time_us;
- if (stream_method_fams2_meta->allow_time_us >= stream_method_fams2_meta->period_us) {
+ stream_method_pstate_meta->allow_time_us =
+ (double)((int)stream_method_pstate_meta->allow_end_otg_vline - (int)stream_method_pstate_meta->allow_start_otg_vline) *
+ stream_pstate_meta->otg_vline_time_us;
+ if (stream_method_pstate_meta->allow_time_us >= stream_method_pstate_meta->period_us) {
/* when allow wave overlaps an entire frame, it is always schedulable (DRR can do this)*/
- stream_method_fams2_meta->disallow_time_us = 0.0;
+ stream_method_pstate_meta->disallow_time_us = 0.0;
} else {
- stream_method_fams2_meta->disallow_time_us =
- stream_method_fams2_meta->period_us - stream_method_fams2_meta->allow_time_us;
+ stream_method_pstate_meta->disallow_time_us =
+ stream_method_pstate_meta->period_us - stream_method_pstate_meta->allow_time_us;
}
}
-static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
+static struct dml2_pstate_per_method_common_meta *get_per_method_common_meta(
struct dml2_pmo_instance *pmo,
enum dml2_pstate_method stream_pstate_method,
int stream_idx)
{
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta = NULL;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta = NULL;
switch (stream_pstate_method) {
case dml2_pstate_method_vactive:
case dml2_pstate_method_fw_vactive_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vactive.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vactive.common;
break;
case dml2_pstate_method_vblank:
case dml2_pstate_method_fw_vblank_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_vblank.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_vblank.common;
break;
case dml2_pstate_method_fw_svp:
case dml2_pstate_method_fw_svp_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_subvp.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_subvp.common;
break;
case dml2_pstate_method_fw_drr:
- stream_method_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_idx].method_drr.common;
+ stream_method_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_idx].method_drr.common;
break;
case dml2_pstate_method_reserved_hw:
case dml2_pstate_method_reserved_fw:
@@ -1277,10 +1300,10 @@ static struct dml2_fams2_per_method_common_meta *get_per_method_common_meta(
case dml2_pstate_method_count:
case dml2_pstate_method_na:
default:
- stream_method_fams2_meta = NULL;
+ stream_method_pstate_meta = NULL;
}
- return stream_method_fams2_meta;
+ return stream_method_pstate_meta;
}
static bool is_timing_group_schedulable(
@@ -1288,10 +1311,10 @@ static bool is_timing_group_schedulable(
const struct display_configuation_with_meta *display_cfg,
const struct dml2_pmo_pstate_strategy *pstate_strategy,
const unsigned int timing_group_idx,
- struct dml2_fams2_per_method_common_meta *group_fams2_meta)
+ struct dml2_pstate_per_method_common_meta *group_pstate_meta)
{
unsigned int i;
- struct dml2_fams2_per_method_common_meta *stream_method_fams2_meta;
+ struct dml2_pstate_per_method_common_meta *stream_method_pstate_meta;
unsigned int base_stream_idx = 0;
struct dml2_pmo_scratch *s = &pmo->scratch;
@@ -1305,31 +1328,31 @@ static bool is_timing_group_schedulable(
}
/* init allow start and end lines for timing group */
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[base_stream_idx], base_stream_idx);
+ if (!stream_method_pstate_meta)
return false;
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
- group_fams2_meta->period_us = stream_method_fams2_meta->period_us;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
+ group_pstate_meta->period_us = stream_method_pstate_meta->period_us;
for (i = base_stream_idx + 1; i < display_cfg->display_config.num_streams; i++) {
if (is_bit_set_in_bitfield(pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], i)) {
- stream_method_fams2_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
- if (!stream_method_fams2_meta)
+ stream_method_pstate_meta = get_per_method_common_meta(pmo, pstate_strategy->per_stream_pstate_method[i], i);
+ if (!stream_method_pstate_meta)
continue;
- if (group_fams2_meta->allow_start_otg_vline < stream_method_fams2_meta->allow_start_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline < stream_method_pstate_meta->allow_start_otg_vline) {
/* set group allow start to larger otg vline */
- group_fams2_meta->allow_start_otg_vline = stream_method_fams2_meta->allow_start_otg_vline;
+ group_pstate_meta->allow_start_otg_vline = stream_method_pstate_meta->allow_start_otg_vline;
}
- if (group_fams2_meta->allow_end_otg_vline > stream_method_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_end_otg_vline > stream_method_pstate_meta->allow_end_otg_vline) {
/* set group allow end to smaller otg vline */
- group_fams2_meta->allow_end_otg_vline = stream_method_fams2_meta->allow_end_otg_vline;
+ group_pstate_meta->allow_end_otg_vline = stream_method_pstate_meta->allow_end_otg_vline;
}
/* check waveform still has positive width */
- if (group_fams2_meta->allow_start_otg_vline >= group_fams2_meta->allow_end_otg_vline) {
+ if (group_pstate_meta->allow_start_otg_vline >= group_pstate_meta->allow_end_otg_vline) {
/* timing group is not schedulable */
return false;
}
@@ -1337,10 +1360,10 @@ static bool is_timing_group_schedulable(
}
/* calculate the rest of the meta */
- build_method_scheduling_params(group_fams2_meta, &pmo->scratch.pmo_dcn4.stream_fams2_meta[base_stream_idx]);
+ build_method_scheduling_params(group_pstate_meta, &pmo->scratch.pmo_dcn4.stream_pstate_meta[base_stream_idx]);
- return group_fams2_meta->allow_time_us > 0.0 &&
- group_fams2_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
+ return group_pstate_meta->allow_time_us > 0.0 &&
+ group_pstate_meta->disallow_time_us < pmo->ip_caps->fams2.max_allow_delay_us;
}
static bool is_config_schedulable(
@@ -1354,7 +1377,7 @@ static bool is_config_schedulable(
double max_allow_delay_us = 0.0;
- memset(s->pmo_dcn4.group_common_fams2_meta, 0, sizeof(s->pmo_dcn4.group_common_fams2_meta));
+ memset(s->pmo_dcn4.group_common_pstate_meta, 0, sizeof(s->pmo_dcn4.group_common_pstate_meta));
memset(s->pmo_dcn4.sorted_group_gtl_disallow_index, 0, sizeof(unsigned int) * DML2_MAX_PLANES);
/* search for a general solution to the schedule */
@@ -1369,12 +1392,12 @@ static bool is_config_schedulable(
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
s->pmo_dcn4.sorted_group_gtl_disallow_index[i] = i;
s->pmo_dcn4.sorted_group_gtl_period_index[i] = i;
- if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_fams2_meta[i])) {
+ if (!is_timing_group_schedulable(pmo, display_cfg, pstate_strategy, i, &s->pmo_dcn4.group_common_pstate_meta[i])) {
/* synchronized timing group was not schedulable */
schedulable = false;
break;
}
- max_allow_delay_us += s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us;
+ max_allow_delay_us += s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us;
}
if ((schedulable && s->pmo_dcn4.num_timing_groups <= 1) || !schedulable) {
@@ -1391,8 +1414,8 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
- double jp1_disallow_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
+ double j_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j]].disallow_time_us;
+ double jp1_disallow_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_disallow_index[j + 1]].disallow_time_us;
if (j_disallow_us < jp1_disallow_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_disallow_index[j],
@@ -1410,19 +1433,19 @@ static bool is_config_schedulable(
* other display, or when >2 streams continue to halve the remaining allow time.
*/
for (i = 0; i < s->pmo_dcn4.num_timing_groups; i++) {
- if (s->pmo_dcn4.group_common_fams2_meta[i].disallow_time_us <= 0.0) {
+ if (s->pmo_dcn4.group_common_pstate_meta[i].disallow_time_us <= 0.0) {
/* this timing group always allows */
continue;
}
- double max_allow_time_us = s->pmo_dcn4.group_common_fams2_meta[i].allow_time_us;
+ double max_allow_time_us = s->pmo_dcn4.group_common_pstate_meta[i].allow_time_us;
for (j = 0; j < s->pmo_dcn4.num_timing_groups; j++) {
unsigned int sorted_j = s->pmo_dcn4.sorted_group_gtl_disallow_index[j];
/* stream can't overlap itself */
- if (i != sorted_j && s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us > 0.0) {
+ if (i != sorted_j && s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us > 0.0) {
max_allow_time_us = math_min2(
- s->pmo_dcn4.group_common_fams2_meta[sorted_j].allow_time_us,
- (max_allow_time_us - s->pmo_dcn4.group_common_fams2_meta[sorted_j].disallow_time_us) / 2);
+ s->pmo_dcn4.group_common_pstate_meta[sorted_j].allow_time_us,
+ (max_allow_time_us - s->pmo_dcn4.group_common_pstate_meta[sorted_j].disallow_time_us) / 2);
if (max_allow_time_us < 0.0) {
/* failed exit early */
@@ -1450,8 +1473,8 @@ static bool is_config_schedulable(
bool swapped = false;
for (j = 0; j < s->pmo_dcn4.num_timing_groups - 1; j++) {
- double j_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
- double jp1_period_us = s->pmo_dcn4.group_common_fams2_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
+ double j_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j]].period_us;
+ double jp1_period_us = s->pmo_dcn4.group_common_pstate_meta[s->pmo_dcn4.sorted_group_gtl_period_index[j + 1]].period_us;
if (j_period_us < jp1_period_us) {
/* swap as A < B */
swap(s->pmo_dcn4.sorted_group_gtl_period_index[j],
@@ -1470,7 +1493,7 @@ static bool is_config_schedulable(
unsigned int sorted_i = s->pmo_dcn4.sorted_group_gtl_period_index[i];
unsigned int sorted_ip1 = s->pmo_dcn4.sorted_group_gtl_period_index[i + 1];
- if (s->pmo_dcn4.group_common_fams2_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_fams2_meta[sorted_ip1].period_us ||
+ if (s->pmo_dcn4.group_common_pstate_meta[sorted_i].allow_time_us < s->pmo_dcn4.group_common_pstate_meta[sorted_ip1].period_us ||
(s->pmo_dcn4.group_is_drr_enabled[sorted_ip1] && s->pmo_dcn4.group_is_drr_active[sorted_ip1])) {
schedulable = false;
break;
@@ -1492,18 +1515,18 @@ static bool is_config_schedulable(
/* default period_0 > period_1 */
unsigned int lrg_idx = 0;
unsigned int sml_idx = 1;
- if (s->pmo_dcn4.group_common_fams2_meta[0].period_us < s->pmo_dcn4.group_common_fams2_meta[1].period_us) {
+ if (s->pmo_dcn4.group_common_pstate_meta[0].period_us < s->pmo_dcn4.group_common_pstate_meta[1].period_us) {
/* period_0 < period_1 */
lrg_idx = 1;
sml_idx = 0;
}
- period_ratio = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us;
- shift_per_period = s->pmo_dcn4.group_common_fams2_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
- max_shift_us = s->pmo_dcn4.group_common_fams2_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us;
- max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_fams2_meta[lrg_idx].period_us;
+ period_ratio = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us / s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us;
+ shift_per_period = s->pmo_dcn4.group_common_pstate_meta[sml_idx].period_us * (period_ratio - math_floor(period_ratio));
+ max_shift_us = s->pmo_dcn4.group_common_pstate_meta[lrg_idx].disallow_time_us - s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us;
+ max_allow_delay_us = max_shift_us / shift_per_period * s->pmo_dcn4.group_common_pstate_meta[lrg_idx].period_us;
if (shift_per_period > 0.0 &&
- shift_per_period < s->pmo_dcn4.group_common_fams2_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_fams2_meta[sml_idx].allow_time_us &&
+ shift_per_period < s->pmo_dcn4.group_common_pstate_meta[lrg_idx].allow_time_us + s->pmo_dcn4.group_common_pstate_meta[sml_idx].allow_time_us &&
max_allow_delay_us < pmo->ip_caps->fams2.max_allow_delay_us) {
schedulable = true;
}
@@ -1646,22 +1669,22 @@ static int get_vactive_pstate_margin(const struct display_configuation_with_meta
return min_vactive_margin_us;
}
-static unsigned int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
+static int get_vactive_det_fill_latency_delay_us(const struct display_configuation_with_meta *display_cfg, int plane_mask)
{
unsigned char i;
- unsigned int max_vactive_fill_us = 0;
+ int max_vactive_fill_us = 0;
for (i = 0; i < DML2_MAX_PLANES; i++) {
if (is_bit_set_in_bitfield(plane_mask, i)) {
- if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us > max_vactive_fill_us)
- max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].dram_change_vactive_det_fill_delay_us;
+ if (display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk] > max_vactive_fill_us)
+ max_vactive_fill_us = display_cfg->mode_support_result.cfg_support_info.plane_support_info[i].vactive_det_fill_delay_us[dml2_pstate_type_uclk];
}
}
return max_vactive_fill_us;
}
-static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
+static void build_pstate_meta_per_stream(struct dml2_pmo_instance *pmo,
struct display_configuation_with_meta *display_config,
int stream_index)
{
@@ -1669,7 +1692,7 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
const struct dml2_stream_parameters *stream_descriptor = &display_config->display_config.stream_descriptors[stream_index];
const struct core_stream_support_info *stream_info = &display_config->mode_support_result.cfg_support_info.stream_support_info[stream_index];
const struct dml2_timing_cfg *timing = &stream_descriptor->timing;
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
/* worst case all other streams require some programming at the same time, 0 if only 1 stream */
unsigned int contention_delay_us = (ip_caps->fams2.vertical_interrupt_ack_delay_us +
@@ -1677,142 +1700,142 @@ static void build_fams2_meta_per_stream(struct dml2_pmo_instance *pmo,
(display_config->display_config.num_streams - 1);
/* common */
- stream_fams2_meta->valid = true;
- stream_fams2_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
- stream_fams2_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
- stream_fams2_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->nom_vtotal * timing->h_total);
- stream_fams2_meta->nom_frame_time_us =
- (double)stream_fams2_meta->nom_vtotal * stream_fams2_meta->otg_vline_time_us;
- stream_fams2_meta->vblank_start = timing->v_blank_end + timing->v_active;
+ stream_pstate_meta->valid = true;
+ stream_pstate_meta->otg_vline_time_us = (double)timing->h_total / timing->pixel_clock_khz * 1000.0;
+ stream_pstate_meta->nom_vtotal = stream_descriptor->timing.vblank_nom + stream_descriptor->timing.v_active;
+ stream_pstate_meta->nom_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->nom_vtotal * timing->h_total);
+ stream_pstate_meta->nom_frame_time_us =
+ (double)stream_pstate_meta->nom_vtotal * stream_pstate_meta->otg_vline_time_us;
+ stream_pstate_meta->vblank_start = timing->v_blank_end + timing->v_active;
if (stream_descriptor->timing.drr_config.enabled == true) {
if (stream_descriptor->timing.drr_config.min_refresh_uhz != 0.0) {
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
((double)stream_descriptor->timing.drr_config.min_refresh_uhz * stream_descriptor->timing.h_total) * 1e9);
} else {
/* assume min of 48Hz */
- stream_fams2_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
+ stream_pstate_meta->max_vtotal = (unsigned int)math_floor((double)stream_descriptor->timing.pixel_clock_khz /
(48000000.0 * stream_descriptor->timing.h_total) * 1e9);
}
} else {
- stream_fams2_meta->max_vtotal = stream_fams2_meta->nom_vtotal;
- }
- stream_fams2_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
- (stream_fams2_meta->max_vtotal * timing->h_total);
- stream_fams2_meta->max_frame_time_us =
- (double)stream_fams2_meta->max_vtotal * stream_fams2_meta->otg_vline_time_us;
-
- stream_fams2_meta->scheduling_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->vertical_interrupt_ack_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->contention_delay_otg_vlines =
- (unsigned int)math_ceil(contention_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->max_vtotal = stream_pstate_meta->nom_vtotal;
+ }
+ stream_pstate_meta->min_refresh_rate_hz = timing->pixel_clock_khz * 1000.0 /
+ (stream_pstate_meta->max_vtotal * timing->h_total);
+ stream_pstate_meta->max_frame_time_us =
+ (double)stream_pstate_meta->max_vtotal * stream_pstate_meta->otg_vline_time_us;
+
+ stream_pstate_meta->scheduling_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.scheduling_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->vertical_interrupt_ack_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.vertical_interrupt_ack_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->contention_delay_otg_vlines =
+ (unsigned int)math_ceil(contention_delay_us / stream_pstate_meta->otg_vline_time_us);
/* worst case allow to target needs to account for all streams' allow events overlapping, and 1 line for error */
- stream_fams2_meta->allow_to_target_delay_otg_vlines =
- (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_fams2_meta->otg_vline_time_us)) + 1;
- stream_fams2_meta->min_allow_width_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->allow_to_target_delay_otg_vlines =
+ (unsigned int)(math_ceil((ip_caps->fams2.vertical_interrupt_ack_delay_us + contention_delay_us + ip_caps->fams2.allow_programming_delay_us) / stream_pstate_meta->otg_vline_time_us)) + 1;
+ stream_pstate_meta->min_allow_width_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.min_allow_width_us / stream_pstate_meta->otg_vline_time_us);
/* this value should account for urgent latency */
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines =
+ stream_pstate_meta->blackout_otg_vlines =
(unsigned int)math_ceil(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us /
- stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->otg_vline_time_us);
/* scheduling params should be built based on the worst case for allow_time:disallow_time */
/* vactive */
if (display_config->display_config.num_streams == 1) {
/* for single stream, guarantee at least an instant of allow */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines = (unsigned int)math_floor(
math_max2(0.0,
- timing->v_active - math_max2(1.0, stream_fams2_meta->min_allow_width_otg_vlines) - stream_fams2_meta->dram_clk_change_blackout_otg_vlines));
+ timing->v_active - math_max2(1.0, stream_pstate_meta->min_allow_width_otg_vlines) - stream_pstate_meta->blackout_otg_vlines));
} else {
/* for multi stream, bound to a max fill time defined by IP caps */
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
- (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_fams2_meta->otg_vline_time_us);
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines =
+ (unsigned int)math_floor((double)ip_caps->max_vactive_det_fill_delay_us / stream_pstate_meta->otg_vline_time_us);
}
- stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us = stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_fams2_meta->otg_vline_time_us;
+ stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us = stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines * stream_pstate_meta->otg_vline_time_us;
- if (stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline =
- timing->v_blank_end + stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ if (stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us > 0.0) {
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline =
+ timing->v_blank_end + stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_otg_vlines;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
} else {
- stream_fams2_meta->method_vactive.common.allow_start_otg_vline = 0;
- stream_fams2_meta->method_vactive.common.allow_end_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_start_otg_vline = 0;
+ stream_pstate_meta->method_vactive.common.allow_end_otg_vline = 0;
}
- stream_fams2_meta->method_vactive.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vactive.common, stream_fams2_meta);
+ stream_pstate_meta->method_vactive.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vactive.common, stream_pstate_meta);
/* vblank */
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline = stream_fams2_meta->vblank_start;
- stream_fams2_meta->method_vblank.common.allow_end_otg_vline =
- stream_fams2_meta->method_vblank.common.allow_start_otg_vline + 1;
- stream_fams2_meta->method_vblank.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_vblank.common, stream_fams2_meta);
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline = stream_pstate_meta->vblank_start;
+ stream_pstate_meta->method_vblank.common.allow_end_otg_vline =
+ stream_pstate_meta->method_vblank.common.allow_start_otg_vline + 1;
+ stream_pstate_meta->method_vblank.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_vblank.common, stream_pstate_meta);
/* subvp */
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_subvp.phantom_vactive =
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_df_throttle_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.subvp_prefetch_to_mall_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_subvp.phantom_vactive =
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
stream_info->phantom_min_v_active;
- stream_fams2_meta->method_subvp.phantom_vfp =
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.phantom_vfp =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines;
/* phantom vtotal = v_bp(vstartup) + v_sync(1) + v_fp(throttle_delay) + v_active(allow_to_target + min_allow + min_vactive)*/
- stream_fams2_meta->method_subvp.phantom_vtotal =
+ stream_pstate_meta->method_subvp.phantom_vtotal =
stream_info->phantom_v_startup +
- stream_fams2_meta->method_subvp.phantom_vfp +
+ stream_pstate_meta->method_subvp.phantom_vfp +
1 +
- stream_fams2_meta->method_subvp.df_throttle_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vactive;
- stream_fams2_meta->method_subvp.common.allow_start_otg_vline =
+ stream_pstate_meta->method_subvp.df_throttle_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_pstate_meta->method_subvp.common.allow_start_otg_vline =
stream_descriptor->timing.v_blank_end +
- stream_fams2_meta->contention_delay_otg_vlines +
- stream_fams2_meta->method_subvp.programming_delay_otg_vlines +
- stream_fams2_meta->method_subvp.phantom_vtotal +
- stream_fams2_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_subvp.common.allow_end_otg_vline =
- stream_fams2_meta->vblank_start -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- stream_fams2_meta->method_subvp.common.period_us = stream_fams2_meta->nom_frame_time_us;
- build_method_scheduling_params(&stream_fams2_meta->method_subvp.common, stream_fams2_meta);
+ stream_pstate_meta->contention_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.programming_delay_otg_vlines +
+ stream_pstate_meta->method_subvp.phantom_vtotal +
+ stream_pstate_meta->method_subvp.prefetch_to_mall_delay_otg_vlines +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_subvp.common.allow_end_otg_vline =
+ stream_pstate_meta->vblank_start -
+ stream_pstate_meta->blackout_otg_vlines;
+ stream_pstate_meta->method_subvp.common.period_us = stream_pstate_meta->nom_frame_time_us;
+ build_method_scheduling_params(&stream_pstate_meta->method_subvp.common, stream_pstate_meta);
/* drr */
- stream_fams2_meta->method_drr.programming_delay_otg_vlines =
- (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_fams2_meta->otg_vline_time_us);
- stream_fams2_meta->method_drr.common.allow_start_otg_vline =
- stream_fams2_meta->vblank_start +
- stream_fams2_meta->allow_to_target_delay_otg_vlines;
- stream_fams2_meta->method_drr.common.period_us = stream_fams2_meta->nom_frame_time_us;
+ stream_pstate_meta->method_drr.programming_delay_otg_vlines =
+ (unsigned int)math_ceil(ip_caps->fams2.drr_programming_delay_us / stream_pstate_meta->otg_vline_time_us);
+ stream_pstate_meta->method_drr.common.allow_start_otg_vline =
+ stream_pstate_meta->vblank_start +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines;
+ stream_pstate_meta->method_drr.common.period_us = stream_pstate_meta->nom_frame_time_us;
if (display_config->display_config.num_streams <= 1) {
/* only need to stretch vblank for blackout time */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
} else {
/* multi display needs to always be schedulable */
- stream_fams2_meta->method_drr.stretched_vtotal =
- stream_fams2_meta->nom_vtotal * 2 +
- stream_fams2_meta->allow_to_target_delay_otg_vlines +
- stream_fams2_meta->min_allow_width_otg_vlines +
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- }
- stream_fams2_meta->method_drr.common.allow_end_otg_vline =
- stream_fams2_meta->method_drr.stretched_vtotal -
- stream_fams2_meta->dram_clk_change_blackout_otg_vlines;
- build_method_scheduling_params(&stream_fams2_meta->method_drr.common, stream_fams2_meta);
+ stream_pstate_meta->method_drr.stretched_vtotal =
+ stream_pstate_meta->nom_vtotal * 2 +
+ stream_pstate_meta->allow_to_target_delay_otg_vlines +
+ stream_pstate_meta->min_allow_width_otg_vlines +
+ stream_pstate_meta->blackout_otg_vlines;
+ }
+ stream_pstate_meta->method_drr.common.allow_end_otg_vline =
+ stream_pstate_meta->method_drr.stretched_vtotal -
+ stream_pstate_meta->blackout_otg_vlines;
+ build_method_scheduling_params(&stream_pstate_meta->method_drr.common, stream_pstate_meta);
}
static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
@@ -1820,14 +1843,14 @@ static void build_subvp_meta_per_stream(struct dml2_pmo_instance *pmo,
int stream_index)
{
struct dml2_implicit_svp_meta *stream_svp_meta = &pmo->scratch.pmo_dcn4.stream_svp_meta[stream_index];
- struct dml2_fams2_meta *stream_fams2_meta = &pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index];
stream_svp_meta->valid = true;
/* PMO FAMS2 precaulcates these values */
- stream_svp_meta->v_active = stream_fams2_meta->method_subvp.phantom_vactive;
- stream_svp_meta->v_front_porch = stream_fams2_meta->method_subvp.phantom_vfp;
- stream_svp_meta->v_total = stream_fams2_meta->method_subvp.phantom_vtotal;
+ stream_svp_meta->v_active = stream_pstate_meta->method_subvp.phantom_vactive;
+ stream_svp_meta->v_front_porch = stream_pstate_meta->method_subvp.phantom_vfp;
+ stream_svp_meta->v_total = stream_pstate_meta->method_subvp.phantom_vtotal;
}
bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_support_in_out *in_out)
@@ -1879,7 +1902,7 @@ bool pmo_dcn4_fams2_init_for_pstate_support(struct dml2_pmo_init_for_pstate_supp
set_bit_in_bitfield(&s->pmo_dcn4.stream_vactive_capability_mask, stream_index);
/* FAMS2 meta */
- build_fams2_meta_per_stream(pmo, display_config, stream_index);
+ build_pstate_meta_per_stream(pmo, display_config, stream_index);
/* SVP meta */
build_subvp_meta_per_stream(pmo, display_config, stream_index);
@@ -1939,9 +1962,6 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
for (stream_index = 0; stream_index < display_config->display_config.num_streams; stream_index++) {
display_config->stage3.stream_svp_meta[stream_index].valid = false;
-
- display_config->display_config.stream_descriptors[stream_index].overrides.minimize_active_latency_hiding = false;
- display_config->display_config.overrides.best_effort_min_active_latency_hiding_us = 0;
}
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
@@ -1974,7 +1994,6 @@ static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *
plane->overrides.uclk_pstate_change_strategy = dml2_uclk_pstate_change_strategy_force_drr;
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_drr;
-
}
}
}
@@ -2040,7 +2059,6 @@ static void setup_planes_for_vblank_by_mask(struct display_configuation_with_met
plane->overrides.reserved_vblank_time_ns);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vblank;
-
}
}
}
@@ -2055,6 +2073,7 @@ static void setup_planes_for_vblank_drr_by_mask(struct display_configuation_with
for (plane_index = 0; plane_index < display_config->display_config.num_planes; plane_index++) {
if (is_bit_set_in_bitfield(plane_mask, plane_index)) {
plane = &display_config->display_config.plane_descriptors[plane_index];
+
plane->overrides.reserved_vblank_time_ns = (long)(pmo->soc_bb->power_management_parameters.dram_clk_change_blackout_us * 1000);
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vblank_drr;
@@ -2076,8 +2095,8 @@ static void setup_planes_for_vactive_by_mask(struct display_configuation_with_me
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_vactive;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
- display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2097,8 +2116,8 @@ static void setup_planes_for_vactive_drr_by_mask(struct display_configuation_wit
display_config->stage3.pstate_switch_modes[plane_index] = dml2_pstate_method_fw_vactive_drr;
if (!pmo->options->disable_vactive_det_fill_bw_pad) {
- display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us =
- (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_fams2_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
+ display_config->display_config.plane_descriptors[plane_index].overrides.max_vactive_det_fill_delay_us[dml2_pstate_type_uclk] =
+ (unsigned int)math_floor(pmo->scratch.pmo_dcn4.stream_pstate_meta[stream_index].method_vactive.max_vactive_det_fill_delay_us);
}
}
}
@@ -2144,9 +2163,9 @@ static bool setup_display_config(struct display_configuation_with_meta *display_
/* copy FAMS2 meta */
if (success) {
display_config->stage3.fams2_required = fams2_required;
- memcpy(&display_config->stage3.stream_fams2_meta,
- &scratch->pmo_dcn4.stream_fams2_meta,
- sizeof(struct dml2_fams2_meta) * DML2_MAX_PLANES);
+ memcpy(&display_config->stage3.stream_pstate_meta,
+ &scratch->pmo_dcn4.stream_pstate_meta,
+ sizeof(struct dml2_pstate_meta) * DML2_MAX_PLANES);
}
return success;
@@ -2188,12 +2207,12 @@ bool pmo_dcn4_fams2_test_for_pstate_support(struct dml2_pmo_test_for_pstate_supp
return false;
for (stream_index = 0; stream_index < in_out->base_display_config->display_config.num_streams; stream_index++) {
- struct dml2_fams2_meta *stream_fams2_meta = &s->pmo_dcn4.stream_fams2_meta[stream_index];
+ struct dml2_pstate_meta *stream_pstate_meta = &s->pmo_dcn4.stream_pstate_meta[stream_index];
if (s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_vactive ||
s->pmo_dcn4.pstate_strategy_candidates[s->pmo_dcn4.cur_pstate_candidate].per_stream_pstate_method[stream_index] == dml2_pstate_method_fw_vactive_drr) {
if (get_vactive_pstate_margin(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) < (MIN_VACTIVE_MARGIN_PCT * in_out->instance->soc_bb->power_management_parameters.dram_clk_change_blackout_us) ||
- get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_fams2_meta->method_vactive.max_vactive_det_fill_delay_us) {
+ get_vactive_det_fill_latency_delay_us(in_out->base_display_config, s->pmo_dcn4.stream_plane_mask[stream_index]) > stream_pstate_meta->method_vactive.max_vactive_det_fill_delay_us) {
p_state_supported = false;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
index 6baab7ad6ecc..6baab7ad6ecc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 7ed0242a4b33..55d2464365d0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -26,7 +26,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
{
bool result = false;
- if (out == 0)
+ if (!out)
return false;
memset(out, 0, sizeof(struct dml2_pmo_instance));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
index 7218de1824cc..b90f6263cd85 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -10,4 +10,4 @@
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out);
-#endif
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
index e17b5ceba447..e17b5ceba447 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
index e13b0c5939b0..e13b0c5939b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_standalone_libraries/lib_float_math.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
index 5a33e2f357f4..5a33e2f357f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
index 5e14d85821e2..5e14d85821e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
index 14d0ae03dce6..14d0ae03dce6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
index 4a7c4c62111e..4a7c4c62111e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
index 53bd8602f9ef..53bd8602f9ef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
index 611c80f4f1bf..611c80f4f1bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_debug.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
index d52aa82283b3..1a6c0727cd2a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
@@ -152,7 +152,7 @@ struct core_plane_support_info {
int active_latency_hiding_us;
int mall_svp_size_requirement_ways;
int nominal_vblank_pstate_latency_hiding_us;
- unsigned int dram_change_vactive_det_fill_delay_us;
+ int vactive_det_fill_delay_us[dml2_pstate_type_count];
};
struct core_stream_support_info {
@@ -209,6 +209,7 @@ struct dml2_core_mode_support_result {
unsigned int uclk_pstate_supported;
unsigned int fclk_pstate_supported;
+ struct dml2_core_internal_watermarks watermarks;
} global;
struct {
@@ -255,56 +256,70 @@ struct dml2_implicit_svp_meta {
unsigned long v_front_porch;
};
-struct dml2_fams2_per_method_common_meta {
+struct dml2_pstate_per_method_common_meta {
/* generic params */
- unsigned int allow_start_otg_vline;
- unsigned int allow_end_otg_vline;
+ int allow_start_otg_vline;
+ int allow_end_otg_vline;
/* scheduling params */
double allow_time_us;
double disallow_time_us;
double period_us;
};
-struct dml2_fams2_meta {
+struct dml2_pstate_meta {
bool valid;
double otg_vline_time_us;
- unsigned int scheduling_delay_otg_vlines;
- unsigned int vertical_interrupt_ack_delay_otg_vlines;
- unsigned int allow_to_target_delay_otg_vlines;
- unsigned int contention_delay_otg_vlines;
- unsigned int min_allow_width_otg_vlines;
- unsigned int nom_vtotal;
- unsigned int vblank_start;
+ int scheduling_delay_otg_vlines;
+ int vertical_interrupt_ack_delay_otg_vlines;
+ int allow_to_target_delay_otg_vlines;
+ int contention_delay_otg_vlines;
+ int min_allow_width_otg_vlines;
+ int nom_vtotal;
+ int vblank_start;
double nom_refresh_rate_hz;
double nom_frame_time_us;
- unsigned int max_vtotal;
+ int max_vtotal;
double min_refresh_rate_hz;
double max_frame_time_us;
- unsigned int dram_clk_change_blackout_otg_vlines;
+ int blackout_otg_vlines;
+ int max_allow_delay_otg_vlines;
+ double nom_vblank_time_us;
struct {
double max_vactive_det_fill_delay_us;
- unsigned int max_vactive_det_fill_delay_otg_vlines;
- struct dml2_fams2_per_method_common_meta common;
+ double vactive_latency_hiding_us;
+ double reserved_vblank_required_us;
+ int max_vactive_det_fill_delay_otg_vlines;
+ int reserved_blank_required_vlines;
+ struct dml2_pstate_per_method_common_meta common;
} method_vactive;
struct {
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_vblank;
struct {
- unsigned int programming_delay_otg_vlines;
- unsigned int df_throttle_delay_otg_vlines;
- unsigned int prefetch_to_mall_delay_otg_vlines;
+ int programming_delay_otg_vlines;
+ int df_throttle_delay_otg_vlines;
+ int prefetch_to_mall_delay_otg_vlines;
unsigned long phantom_vactive;
unsigned long phantom_vfp;
unsigned long phantom_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ struct dml2_pstate_per_method_common_meta common;
} method_subvp;
struct {
- unsigned int programming_delay_otg_vlines;
- unsigned int stretched_vtotal;
- struct dml2_fams2_per_method_common_meta common;
+ int programming_delay_otg_vlines;
+ int stretched_vtotal;
+ struct dml2_pstate_per_method_common_meta common;
} method_drr;
};
+/* mask of synchronized timings by stream index */
+struct dml2_pmo_synchronized_timing_groups {
+ unsigned int num_timing_groups;
+ unsigned int synchronized_timing_group_masks[DML2_MAX_PLANES];
+ bool group_is_drr_enabled[DML2_MAX_PLANES];
+ bool group_is_drr_active[DML2_MAX_PLANES];
+ double group_line_time_us[DML2_MAX_PLANES];
+};
+
struct dml2_optimization_stage3_state {
bool performed;
bool success;
@@ -319,7 +334,7 @@ struct dml2_optimization_stage3_state {
// Meta-data for FAMS2
bool fams2_required;
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
int min_clk_index_for_latency;
};
@@ -472,6 +487,7 @@ struct dml2_core_scratch {
};
struct dml2_core_instance {
+ enum dml2_project_id project_id;
struct dml2_mcg_min_clock_table *minimum_clock_table;
struct dml2_core_internal_state_inputs inputs;
struct dml2_core_internal_state_intermediates intermediates;
@@ -619,6 +635,12 @@ struct dml2_pmo_optimize_for_stutter_in_out {
#define PMO_DCN4_MAX_NUM_VARIANTS 2
#define PMO_DCN4_MAX_BASE_STRATEGIES 10
+struct dml2_scheduling_check_locals {
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
+ unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
+};
+
struct dml2_pmo_scratch {
union {
struct {
@@ -648,7 +670,7 @@ struct dml2_pmo_scratch {
// Stores all the implicit SVP meta information indexed by stream index of the display
// configuration under inspection, built at optimization stage init
struct dml2_implicit_svp_meta stream_svp_meta[DML2_MAX_PLANES];
- struct dml2_fams2_meta stream_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_meta stream_pstate_meta[DML2_MAX_PLANES];
unsigned int optimal_vblank_reserved_time_for_stutter_us[DML2_PMO_STUTTER_CANDIDATE_LIST_SIZE];
unsigned int num_stutter_candidates;
@@ -663,7 +685,7 @@ struct dml2_pmo_scratch {
double group_line_time_us[DML2_MAX_PLANES];
/* scheduling check locals */
- struct dml2_fams2_per_method_common_meta group_common_fams2_meta[DML2_MAX_PLANES];
+ struct dml2_pstate_per_method_common_meta group_common_pstate_meta[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_disallow_index[DML2_MAX_PLANES];
unsigned int sorted_group_gtl_period_index[DML2_MAX_PLANES];
double group_phase_offset[DML2_MAX_PLANES];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
index 5f1b49a50049..4cfe64aa8492 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.c
@@ -473,7 +473,6 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
{
bool sorted, swapped;
unsigned int cur_index;
- unsigned int temp;
int odm_slice_index;
for (odm_slice_index = 0; odm_slice_index < pipes->num_pipes_assigned_to_plane_for_odm_combine; odm_slice_index++) {
@@ -489,9 +488,8 @@ static void sort_pipes_for_splitting(struct dc_plane_pipe_pool *pipes)
swapped = false;
while (!sorted) {
if (pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] > pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1]) {
- temp = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index] = pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1];
- pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1] = temp;
+ swap(pipes->pipes_assigned_to_plane[odm_slice_index][cur_index + 1],
+ pipes->pipes_assigned_to_plane[odm_slice_index][cur_index]);
swapped = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
index 1538b708d8be..1538b708d8be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_resource_mgmt.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
index 7ca7f2a743c2..7ca7f2a743c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_dc_types.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
index 140ec01545db..55b3e3ca54f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_internal_types.h
@@ -23,7 +23,7 @@
* Authors: AMD
*
*/
-
+
#ifndef __DML2_INTERNAL_TYPES_H__
#define __DML2_INTERNAL_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
index a56e75cdf712..66040c877d68 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
@@ -24,6 +24,7 @@
*
*/
+
#include "dml2_dc_types.h"
#include "dml2_internal_types.h"
#include "dml2_utils.h"
@@ -654,14 +655,14 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
unsigned int svp_height,
unsigned int svp_vstartup)
{
- unsigned int i, pipe_idx;
+ unsigned int i;
double line_time, fp_and_sync_width_time;
struct pipe_ctx *pipe;
uint32_t phantom_vactive, phantom_bp, pstate_width_fw_delay_lines;
static const double cvt_rb_vblank_max = ((double) 460 / (1000 * 1000));
// Find DML pipe index (pipe_idx) using dc_pipe_idx
- for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
+ for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &state->res_ctx.pipe_ctx[i];
if (!pipe->stream)
@@ -669,8 +670,6 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
if (i == dc_pipe_idx)
break;
-
- pipe_idx++;
}
// Calculate lines required for pstate allow width and FW processing delays
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
index 9d64851f54e7..9d64851f54e7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
index ef693f608d59..ef693f608d59 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
index e83e05248592..e83e05248592 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_policy.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
index 3b866e876bf4..d834cb595afa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.c
@@ -301,6 +301,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
out->pct_ideal_dram_bw_after_urgent_pixel_only = 65.0;
break;
+
case dml_project_dcn401:
out->pct_ideal_fabric_bw_after_urgent = 76; //67;
out->max_avg_sdp_bw_use_normal_percent = 75; //80;
@@ -424,6 +425,8 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
p->in_states->state_array[1].dcfclk_mhz = 1434.0;
p->in_states->state_array[1].dram_speed_mts = 1000 * transactions_per_mem_clock;
break;
+
+
case dml_project_dcn401:
p->in_states->num_states = 2;
transactions_per_mem_clock = 16;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
index d764773938f4..d764773938f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_translation_helper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
index 9a33158b63bf..9a33158b63bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
index 04fcfe637119..04fcfe637119 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_utils.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
index 0318260370ed..9deb03a18ccc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
@@ -535,7 +535,7 @@ static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode v
if (result)
result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info);
- return (result == 1) ? true : false;
+ return result == 1;
}
static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *dml2)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
index c384e141cebc..c384e141cebc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
index 17f0972b1af7..17f0972b1af7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_assert.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
index f7d30b47beff..d459f93cf40b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_depedencies.h
@@ -31,3 +31,4 @@
*/
#include "os_types.h"
#include "cmntypes.h"
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
index 00d22e542469..00d22e542469 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
index bf491cf0582d..bf491cf0582d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_display_rq_dlg_calc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
index 2a2f84e07ca8..7fadbe6d7af4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml_logging.h
@@ -23,6 +23,7 @@
* Authors: AMD
*
*/
+
#ifndef __DML_LOGGING_H__
#define __DML_LOGGING_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index 01480a04f85e..ce91e5d28956 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -199,6 +199,8 @@ void dpp_reset(struct dpp *dpp_base)
memset(&dpp->scl_data, 0, sizeof(dpp->scl_data));
memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data));
+
+ dpp_base->cursor_offload = false;
}
@@ -484,10 +486,12 @@ void dpp1_set_cursor_position(
cur_en = 0; /* not visible beyond top edge*/
if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
-
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
}
+
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp1_cnv_set_optional_cursor_attributes(
@@ -497,8 +501,13 @@ void dpp1_cnv_set_optional_cursor_attributes(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias.bits.fp_bias = attr->bias;
+ dpp_base->att.fp_scale_bias.bits.fp_scale = attr->scale;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
index f466182963f7..b12f34345a58 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h
@@ -1348,7 +1348,8 @@ struct dcn_dpp_mask {
uint32_t CURSOR0_COLOR1; \
uint32_t DPP_CONTROL; \
uint32_t CM_HDR_MULT_COEF; \
- uint32_t CURSOR0_FP_SCALE_BIAS;
+ uint32_t CURSOR0_FP_SCALE_BIAS; \
+ uint32_t OBUF_CONTROL;
struct dcn_dpp_registers {
DPP_COMMON_REG_VARIABLE_LIST
@@ -1450,7 +1451,6 @@ void dpp1_set_degamma(
void dpp1_set_degamma_pwl(struct dpp *dpp_base,
const struct pwl_params *params);
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index 09be2a90cc79..ef4a16117181 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -84,6 +84,22 @@ void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)
}
}
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state)
+{
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ dpp_reg_state->recout_start = REG_READ(RECOUT_START);
+ dpp_reg_state->recout_size = REG_READ(RECOUT_SIZE);
+ dpp_reg_state->scl_horz_filter_scale_ratio = REG_READ(SCL_HORZ_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_vert_filter_scale_ratio = REG_READ(SCL_VERT_FILTER_SCALE_RATIO);
+ dpp_reg_state->scl_mode = REG_READ(SCL_MODE);
+ dpp_reg_state->cm_control = REG_READ(CM_CONTROL);
+ dpp_reg_state->dpp_control = REG_READ(DPP_CONTROL);
+ dpp_reg_state->dscl_control = REG_READ(DSCL_CONTROL);
+ dpp_reg_state->obuf_control = REG_READ(OBUF_CONTROL);
+ dpp_reg_state->mpc_size = REG_READ(MPC_SIZE);
+}
+
/*program post scaler scs block in dpp CM*/
void dpp3_program_post_csc(
struct dpp *dpp_base,
@@ -396,17 +412,21 @@ void dpp3_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -578,9 +598,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
- } else {
- REG_SET(CM_MEM_PWR_CTRL, 0,
- BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
index f236824126e9..d4a70b4379ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h
@@ -594,6 +594,8 @@ void dpp3_program_CM_dealpha(
void dpp30_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s);
+void dpp30_read_reg_state(struct dpp *dpp_base, struct dcn_dpp_reg_state *dpp_reg_state);
+
bool dpp3_get_optimal_number_of_taps(
struct dpp *dpp,
struct scaler_data *scl_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
index fa67e54bf94e..8a5aa5e86850 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c
@@ -134,6 +134,7 @@ static struct dpp_funcs dcn32_dpp_funcs = {
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
.dpp_get_gamut_remap = dpp3_cm_get_gamut_remap,
+ .dpp_read_reg_state = dpp30_read_reg_state,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
index f7a373a3d70a..977d83bf7741 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c
@@ -95,6 +95,7 @@ void dpp35_program_bias_and_scale_fcnv(
static struct dpp_funcs dcn35_dpp_funcs = {
.dpp_program_gamcor_lut = dpp3_program_gamcor_lut,
.dpp_read_state = dpp30_read_state,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.dpp_reset = dpp_reset,
.dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
.dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
index 36187f890d5d..96c2c853de42 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c
@@ -248,6 +248,7 @@ static struct dpp_funcs dcn401_dpp_funcs = {
.set_optional_cursor_attributes = dpp401_set_optional_cursor_attributes,
.dpp_dppclk_control = dpp1_dppclk_control,
.dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier,
+ .dpp_read_reg_state = dpp30_read_reg_state,
.set_cursor_matrix = dpp401_set_cursor_matrix,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
index 5a6a861402b3..5f6b431ec398 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h
@@ -673,6 +673,16 @@ struct dcn401_dpp {
struct pwl_params pwl_data;
};
+enum dcn401_dscl_mode_sel {
+ DCN401_DSCL_MODE_SCALING_444_BYPASS = 0,
+ DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
+ DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
+ DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
+ DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
+ DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
+ DCN401_DSCL_MODE_DSCL_BYPASS = 6
+};
+
bool dpp401_construct(struct dcn401_dpp *dpp401,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
index 7aab77b58869..62bf7cea21d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c
@@ -103,17 +103,21 @@ void dpp401_set_cursor_attributes(
}
}
- REG_UPDATE_3(CURSOR0_CONTROL,
- CUR0_MODE, color_format,
- CUR0_EXPANSION_MODE, 0,
- CUR0_ROM_EN, cur_rom_en);
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE_3(CURSOR0_CONTROL,
+ CUR0_MODE, color_format,
+ CUR0_EXPANSION_MODE, 0,
+ CUR0_ROM_EN, cur_rom_en);
if (color_format == CURSOR_MODE_MONO) {
/* todo: clarify what to program these to */
- REG_UPDATE(CURSOR0_COLOR0,
- CUR0_COLOR0, 0x00000000);
- REG_UPDATE(CURSOR0_COLOR1,
- CUR0_COLOR1, 0xFFFFFFFF);
+
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_COLOR0,
+ CUR0_COLOR0, 0x00000000);
+ REG_UPDATE(CURSOR0_COLOR1,
+ CUR0_COLOR1, 0xFFFFFFFF);
+ }
}
dpp_base->att.cur0_ctl.bits.expansion_mode = 0;
@@ -132,10 +136,12 @@ void dpp401_set_cursor_position(
uint32_t cur_en = pos->enable ? 1 : 0;
if (dpp_base->pos.cur0_ctl.bits.cur0_enable != cur_en) {
- REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
-
- dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ if (!dpp_base->cursor_offload)
+ REG_UPDATE(CURSOR0_CONTROL, CUR0_ENABLE, cur_en);
}
+
+ dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en;
+ dpp_base->att.cur0_ctl.bits.cur0_enable = cur_en;
}
void dpp401_set_optional_cursor_attributes(
@@ -145,10 +151,17 @@ void dpp401_set_optional_cursor_attributes(
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
if (attr) {
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
- REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ if (!dpp_base->cursor_offload) {
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_BIAS_G_Y, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_G_Y, CUR0_FP_SCALE_G_Y, attr->scale);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_BIAS_RB_CRCB, attr->bias);
+ REG_UPDATE(CURSOR0_FP_SCALE_BIAS_RB_CRCB, CUR0_FP_SCALE_RB_CRCB, attr->scale);
+ }
+
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_bias_g_y = attr->bias;
+ dpp_base->att.fp_scale_bias_g_y.bits.fp_scale_g_y = attr->scale;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb = attr->bias;
+ dpp_base->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb = attr->scale;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 2f92e7d4981b..6df3419f825f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -78,16 +78,6 @@ enum dscl_autocal_mode {
AUTOCAL_MODE_AUTOREPLICATE = 3
};
-enum dscl_mode_sel {
- DSCL_MODE_SCALING_444_BYPASS = 0,
- DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
- DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
- DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
- DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
- DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
- DSCL_MODE_DSCL_BYPASS = 6
-};
-
static int dpp401_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)
{
if (depth == LB_PIXEL_DEPTH_30BPP)
@@ -122,7 +112,7 @@ static bool dpp401_dscl_is_420_format(enum pixel_format format)
return false;
}
-static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
+static enum dcn401_dscl_mode_sel dpp401_dscl_get_dscl_mode(
struct dpp *dpp_base,
const struct scaler_data *data,
bool dbg_always_scale)
@@ -132,7 +122,7 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) {
/* DSCL is processing data in fixed format */
if (data->format == PIXEL_FORMAT_FP16)
- return DSCL_MODE_DSCL_BYPASS;
+ return DCN401_DSCL_MODE_DSCL_BYPASS;
}
if (data->ratios.horz.value == one
@@ -140,20 +130,20 @@ static enum dscl_mode_sel dpp401_dscl_get_dscl_mode(
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
&& !dbg_always_scale)
- return DSCL_MODE_SCALING_444_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_444_BYPASS;
if (!dpp401_dscl_is_420_format(data->format)) {
if (dpp401_dscl_is_video_format(data->format))
- return DSCL_MODE_SCALING_444_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_YCBCR_ENABLE;
else
- return DSCL_MODE_SCALING_444_RGB_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_444_RGB_ENABLE;
}
if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return DSCL_MODE_SCALING_420_LUMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_LUMA_BYPASS;
if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return DSCL_MODE_SCALING_420_CHROMA_BYPASS;
+ return DCN401_DSCL_MODE_SCALING_420_CHROMA_BYPASS;
- return DSCL_MODE_SCALING_420_YCBCR_ENABLE;
+ return DCN401_DSCL_MODE_SCALING_420_YCBCR_ENABLE;
}
static void dpp401_power_on_dscl(
@@ -1071,7 +1061,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
uint32_t v_num_taps_c = scl_data->taps.v_taps_c - 1;
uint32_t h_num_taps = scl_data->taps.h_taps - 1;
uint32_t h_num_taps_c = scl_data->taps.h_taps_c - 1;
- enum dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
+ enum dcn401_dscl_mode_sel dscl_mode = dpp401_dscl_get_dscl_mode(
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
@@ -1102,7 +1092,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
- dscl_mode = (enum dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
+ dscl_mode = (enum dcn401_dscl_mode_sel) scl_data->dscl_prog_data.dscl_mode;
rect = (struct rect *)&scl_data->dscl_prog_data.recout;
mpc_width = scl_data->dscl_prog_data.mpc_size.width;
mpc_height = scl_data->dscl_prog_data.mpc_size.height;
@@ -1112,7 +1102,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
h_num_taps_c = scl_data->dscl_prog_data.taps.h_taps_c;
}
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) {
- if (dscl_mode != DSCL_MODE_DSCL_BYPASS)
+ if (dscl_mode != DCN401_DSCL_MODE_DSCL_BYPASS)
dpp401_power_on_dscl(dpp_base, true);
}
@@ -1139,7 +1129,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
/* SCL mode */
REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode);
- if (dscl_mode == DSCL_MODE_DSCL_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_DSCL_BYPASS) {
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl)
dpp401_power_on_dscl(dpp_base, false);
return;
@@ -1149,7 +1139,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
lb_config = dpp401_dscl_find_lb_memory_config(dpp, scl_data);
dpp401_dscl_set_lb(dpp, &scl_data->lb_params, lb_config);
- if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
+ if (dscl_mode == DCN401_DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 1f53a9f0c0ac..e4144b244332 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -1157,6 +1157,11 @@ static bool setup_dsc_config(
if (!is_dsc_possible)
goto done;
+ /* increase miniumum slice count to meet sink slice width limitations */
+ min_slices_h = dc_fixpt_ceil(dc_fixpt_max(
+ dc_fixpt_div_int(dc_fixpt_from_int(pic_width), dsc_common_caps.max_slice_width), // sink min
+ dc_fixpt_from_int(min_slices_h))); // source min
+
min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h);
/* increase minimum slice count to meet sink throughput limitations */
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index bd1b9aef6d5c..242f1e6f0d8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
@@ -35,6 +35,7 @@ static void dsc_write_to_registers(struct display_stream_compressor *dsc, const
static const struct dsc_funcs dcn20_dsc_funcs = {
.dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -155,6 +156,13 @@ void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state
DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);
}
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state)
+{
+ struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
+
+ dccg_reg_state->dsc_top_control = REG_READ(DSC_TOP_CONTROL);
+ dccg_reg_state->dscc_interrupt_control_status = REG_READ(DSCC_INTERRUPT_CONTROL_STATUS);
+}
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg)
{
@@ -406,9 +414,10 @@ bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
+ // Need to find the ceiling value for the slice width
+ dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dsc_padding + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
- dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h;
dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v;
ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index a9c04fc95bd1..2337c3a97235 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
@@ -606,6 +606,7 @@ bool dsc2_get_packed_pps(struct display_stream_compressor *dsc,
uint8_t *dsc_packed_pps);
void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+void dsc2_read_reg_state(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool dsc2_validate_stream(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void dsc2_set_config(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 6f4f5a3c4861..e712985f7abd 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
@@ -28,10 +28,11 @@
#include "reg_helper.h"
static void dsc35_enable(struct display_stream_compressor *dsc, int opp_pipe);
+static void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz);
static const struct dsc_funcs dcn35_dsc_funcs = {
- .dsc_get_enc_caps = dsc2_get_enc_caps,
.dsc_read_state = dsc2_read_state,
+ .dsc_read_reg_state = dsc2_read_reg_state,
.dsc_validate_stream = dsc2_validate_stream,
.dsc_set_config = dsc2_set_config,
.dsc_get_packed_pps = dsc2_get_packed_pps,
@@ -39,6 +40,7 @@ static const struct dsc_funcs dcn35_dsc_funcs = {
.dsc_disable = dsc2_disable,
.dsc_disconnect = dsc2_disconnect,
.dsc_wait_disconnect_pending_clear = dsc2_wait_disconnect_pending_clear,
+ .dsc_get_single_enc_caps = dsc35_get_single_enc_caps,
};
/* Macro definitios for REG_SET macros*/
@@ -110,3 +112,31 @@ void dsc35_set_fgcg(struct dcn20_dsc *dsc20, bool enable)
{
REG_UPDATE(DSC_TOP_CONTROL, DSC_FGCG_REP_DIS, !enable);
}
+
+void dsc35_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz)
+{
+ dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */
+
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1;
+ dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1;
+
+ dsc_enc_caps->lb_bit_depth = 13;
+ dsc_enc_caps->is_block_pred_supported = true;
+
+ dsc_enc_caps->color_formats.bits.RGB = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_444 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0;
+ dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1;
+
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1;
+ dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1;
+
+ dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000;
+
+ dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
+ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
index 7bd92ae8b13e..c1bdbb38c690 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c
@@ -26,6 +26,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = {
.dsc_disconnect = dsc401_disconnect,
.dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear,
.dsc_get_single_enc_caps = dsc401_get_single_enc_caps,
+ .dsc_read_reg_state = dsc2_read_reg_state
};
/* Macro definitios for REG_SET macros*/
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index b0bd1f9425b5..81c83d5fe042 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
@@ -41,6 +41,7 @@ struct dsc_config {
enum dc_color_depth color_depth; /* Bits per component */
bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
+ uint32_t dsc_padding;
};
@@ -65,6 +66,10 @@ struct dcn_dsc_state {
uint32_t dsc_opp_source;
};
+struct dcn_dsc_reg_state {
+ uint32_t dsc_top_control;
+ uint32_t dscc_interrupt_control_status;
+};
/* DSC encoder capabilities
* They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps.
@@ -99,6 +104,7 @@ struct dsc_enc_caps {
struct dsc_funcs {
void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz);
void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s);
+ void (*dsc_read_reg_state)(struct display_stream_compressor *dsc, struct dcn_dsc_reg_state *dccg_reg_state);
bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg);
void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg,
struct dsc_optc_config *dsc_optc_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 1313a7c5d87b..73a1e6a03719 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -28,7 +28,7 @@
#include "include/hdcp_msg_types.h"
#include "include/signal_types.h"
#include "core_types.h"
-#include "link.h"
+#include "link_service.h"
#include "link_hwss.h"
#include "link/protocols/link_dpcd.h"
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index d347bb06577a..181a93dc46e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -440,6 +440,17 @@ void hubbub3_init_watermarks(struct hubbub *hubbub)
REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, reg);
}
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+
+ hubbub_reg_state->det0_ctrl = REG_READ(DCHUBBUB_DET0_CTRL);
+ hubbub_reg_state->det1_ctrl = REG_READ(DCHUBBUB_DET1_CTRL);
+ hubbub_reg_state->det2_ctrl = REG_READ(DCHUBBUB_DET2_CTRL);
+ hubbub_reg_state->det3_ctrl = REG_READ(DCHUBBUB_DET3_CTRL);
+ hubbub_reg_state->compbuf_ctrl = REG_READ(DCHUBBUB_COMPBUF_CTRL);
+}
+
static const struct hubbub_funcs hubbub30_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
@@ -457,6 +468,7 @@ static const struct hubbub_funcs hubbub30_funcs = {
.force_pstate_change_control = hubbub3_force_pstate_change_control,
.init_watermarks = hubbub3_init_watermarks,
.hubbub_read_state = hubbub2_read_state,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub3_construct(struct dcn20_hubbub *hubbub3,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
index ca6233e8f1f4..9e14de3ccaee 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h
@@ -133,4 +133,6 @@ void hubbub3_force_pstate_change_control(struct hubbub *hubbub,
void hubbub3_init_watermarks(struct hubbub *hubbub);
+void hubbub3_read_reg_state(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index b98505b240a7..5a03758e3de6 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -933,8 +933,8 @@ int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
dcn20_vmid_setup(&hubbub2->vmid[15], &phys_config);
}
-
- dcn21_dchvm_init(hubbub);
+ if (hubbub->funcs->dchvm_init)
+ hubbub->funcs->dchvm_init(hubbub);
return NUM_VMID;
}
@@ -1071,6 +1071,8 @@ static const struct hubbub_funcs hubbub31_funcs = {
.program_compbuf_size = dcn31_program_compbuf_size,
.init_crb = dcn31_init_crb,
.hubbub_read_state = hubbub2_read_state,
+ .hubbub_read_reg_state = hubbub3_read_reg_state,
+ .dchvm_init = dcn21_dchvm_init
};
void hubbub31_construct(struct dcn20_hubbub *hubbub31,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 32a6be543105..237331b35378 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -28,6 +28,7 @@
#include "dcn32_hubbub.h"
#include "dm_services.h"
#include "reg_helper.h"
+#include "dal_asic_id.h"
#define CTX \
@@ -72,6 +73,14 @@ static void dcn32_init_crb(struct hubbub *hubbub)
REG_UPDATE(DCHUBBUB_DEBUG_CTRL_0, DET_DEPTH, 0x47F);
}
+static void hubbub32_set_sdp_control(struct hubbub *hubbub, bool dc_control)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
+ SDPIF_PORT_CONTROL, dc_control);
+}
+
void hubbub32_set_request_limit(struct hubbub *hubbub, int memory_channel_count, int words_per_channel)
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
@@ -754,8 +763,18 @@ static bool hubbub32_program_watermarks(
unsigned int refclk_mhz,
bool safe_to_lower)
{
+ struct dc *dc = hubbub->ctx->dc;
bool wm_pending = false;
+ if (!safe_to_lower && dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ /* before raising watermarks, SDP control give to DF, stutter must be disabled */
+ wm_pending = true;
+ hubbub32_set_sdp_control(hubbub, false);
+ hubbub1_allow_self_refresh_control(hubbub, false);
+ }
+
if (hubbub32_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
wm_pending = true;
@@ -786,10 +805,20 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower) {
+ /* after lowering watermarks, stutter setting is restored, SDP control given to DC */
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+
+ if (dc->debug.disable_stutter_for_wm_program &&
+ (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev) ||
+ ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev))) {
+ hubbub32_set_sdp_control(hubbub, true);
+ }
+ } else if (dc->debug.disable_stutter) {
+ hubbub1_allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ }
- hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
+ hubbub32_force_usr_retraining_allow(hubbub, dc->debug.force_usr_allow);
return wm_pending;
}
@@ -974,8 +1003,7 @@ void hubbub32_init(struct hubbub *hubbub)
ignore the "df_pre_cstate_req" from the SDP port control.
only the DCN will determine when to connect the SDP port
*/
- REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
- SDPIF_PORT_CONTROL, 1);
+ hubbub32_set_sdp_control(hubbub, true);
/*Set SDP's max outstanding request to 512
must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
@@ -1009,6 +1037,7 @@ static const struct hubbub_funcs hubbub32_funcs = {
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.set_request_limit = hubbub32_set_request_limit,
.get_mall_en = hubbub32_get_mall_en,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 6d41953011f5..43ba399f4822 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -549,6 +549,55 @@ void hubbub35_init(struct hubbub *hubbub)
memset(&hubbub2->watermarks.a.cstate_pstate, 0, sizeof(hubbub2->watermarks.a.cstate_pstate));
}
+void dcn35_dchvm_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+ uint32_t riommu_active;
+ int i;
+
+ //Init DCHVM block
+ REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1);
+
+ //Poll until RIOMMU_ACTIVE = 1
+ for (i = 0; i < 100; i++) {
+ REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active);
+
+ if (riommu_active)
+ break;
+ else
+ udelay(5);
+ }
+
+ if (riommu_active) {
+ // Disable gating and memory power requests
+ REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 1);
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 1,
+ HVM_DISPCLK_G_GATE_DIS, 1,
+ HVM_DCFCLK_R_GATE_DIS, 1,
+ HVM_DCFCLK_G_GATE_DIS, 1);
+
+ //Reflect the power status of DCHUBBUB
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1);
+
+ //Start rIOMMU prefetching
+ REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1);
+
+ //Poll until HOSTVM_PREFETCH_DONE = 1
+ REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100);
+
+ //Enable memory power requests
+ REG_UPDATE(DCHVM_MEM_CTRL, HVM_GPUVMRET_PWR_REQ_DIS, 0);
+ // Enable dynamic clock gating
+ REG_UPDATE_4(DCHVM_CLK_CTRL,
+ HVM_DISPCLK_R_GATE_DIS, 0,
+ HVM_DISPCLK_G_GATE_DIS, 0,
+ HVM_DCFCLK_R_GATE_DIS, 0,
+ HVM_DCFCLK_G_GATE_DIS, 0);
+ hubbub->riommu_active = true;
+ }
+}
+
/*static void hubbub35_set_request_limit(struct hubbub *hubbub,
int memory_channel_count,
int words_per_channel)
@@ -589,6 +638,8 @@ static const struct hubbub_funcs hubbub35_funcs = {
.hubbub_read_state = hubbub2_read_state,
.force_usr_retraining_allow = hubbub32_force_usr_retraining_allow,
.dchubbub_init = hubbub35_init,
+ .hubbub_read_reg_state = hubbub3_read_reg_state,
+ .dchvm_init = dcn35_dchvm_init
};
void hubbub35_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
index 23fecf88556c..9f65fff1bd4d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h
@@ -168,4 +168,5 @@ void dcn35_program_compbuf_size(struct hubbub *hubbub,
unsigned int compbuf_size_kb, bool safe_to_increase);
void dcn35_init_crb(struct hubbub *hubbub);
void hubbub35_init(struct hubbub *hubbub);
+void dcn35_dchvm_init(struct hubbub *hubbub);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 92fab471b183..d11afd1ce72a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1247,6 +1247,7 @@ static const struct hubbub_funcs hubbub4_01_funcs = {
.program_compbuf_segments = dcn401_program_compbuf_segments,
.wait_for_det_update = dcn401_wait_for_det_update,
.program_arbiter = dcn401_program_arbiter,
+ .hubbub_read_reg_state = hubbub3_read_reg_state
};
void hubbub401_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
index 9b026600b90e..6378e3fd7249 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c
@@ -550,6 +550,7 @@ void hubp_reset(struct hubp *hubp)
{
memset(&hubp->pos, 0, sizeof(hubp->pos));
memset(&hubp->att, 0, sizeof(hubp->att));
+ hubp->cursor_offload = false;
}
void hubp1_program_surface_config(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
index f8f991785d4f..f2571076fc50 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h
@@ -104,7 +104,10 @@
SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\
SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\
- SRI(HUBP_CLK_CNTL, HUBP, id)
+ SRI(HUBP_CLK_CNTL, HUBP, id),\
+ SRI(HUBPRET_READ_LINE_VALUE, HUBPRET, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DCFCLK, HUBP, id),\
+ SRI(HUBP_MEASURE_WIN_CTRL_DPPCLK, HUBP, id)
/* Register address initialization macro for ASICs with VM */
#define HUBP_REG_LIST_DCN_VM(id)\
@@ -249,7 +252,20 @@
uint32_t CURSOR_POSITION; \
uint32_t CURSOR_HOT_SPOT; \
uint32_t CURSOR_DST_OFFSET; \
- uint32_t HUBP_CLK_CNTL
+ uint32_t HUBP_CLK_CNTL; \
+ uint32_t HUBPRET_READ_LINE_VALUE; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DCFCLK; \
+ uint32_t HUBP_MEASURE_WIN_CTRL_DPPCLK; \
+ uint32_t HUBPRET_INTERRUPT; \
+ uint32_t HUBPRET_MEM_PWR_CTRL; \
+ uint32_t HUBPRET_MEM_PWR_STATUS; \
+ uint32_t HUBPRET_READ_LINE_CTRL0; \
+ uint32_t HUBPRET_READ_LINE_CTRL1; \
+ uint32_t HUBPRET_READ_LINE0; \
+ uint32_t HUBPRET_READ_LINE1; \
+ uint32_t HUBPREQ_MEM_PWR_CTRL; \
+ uint32_t HUBPREQ_MEM_PWR_STATUS
+
#define HUBP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -622,6 +638,8 @@
type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
+ type PIPE_READ_LINE;\
+ type HUBP_SEG_ALLOC_ERR_STATUS;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -671,6 +689,7 @@ struct dcn_fl_regs_st {
uint32_t lut_done;
uint32_t lut_addr_mode;
uint32_t lut_width;
+ uint32_t lut_mpc_width;
uint32_t lut_tmz;
uint32_t lut_crossbar_sel_r;
uint32_t lut_crossbar_sel_g;
@@ -683,6 +702,123 @@ struct dcn_fl_regs_st {
uint32_t lut_fl_mode;
uint32_t lut_fl_format;
};
+struct dcn_hubp_reg_state {
+ uint32_t hubp_cntl;
+ uint32_t mall_config;
+ uint32_t mall_sub_vp;
+ uint32_t hubp_req_size_config;
+ uint32_t hubp_req_size_config_c;
+ uint32_t vmpg_config;
+ uint32_t addr_config;
+ uint32_t pri_viewport_dimension;
+ uint32_t pri_viewport_dimension_c;
+ uint32_t pri_viewport_start;
+ uint32_t pri_viewport_start_c;
+ uint32_t sec_viewport_dimension;
+ uint32_t sec_viewport_dimension_c;
+ uint32_t sec_viewport_start;
+ uint32_t sec_viewport_start_c;
+ uint32_t surface_config;
+ uint32_t tiling_config;
+ uint32_t clk_cntl;
+ uint32_t mall_status;
+ uint32_t measure_win_ctrl_dcfclk;
+ uint32_t measure_win_ctrl_dppclk;
+
+ uint32_t blank_offset_0;
+ uint32_t blank_offset_1;
+ uint32_t cursor_settings;
+ uint32_t dcn_cur0_ttu_cntl0;
+ uint32_t dcn_cur0_ttu_cntl1;
+ uint32_t dcn_cur1_ttu_cntl0;
+ uint32_t dcn_cur1_ttu_cntl1;
+ uint32_t dcn_dmdat_vm_cntl;
+ uint32_t dcn_expansion_mode;
+ uint32_t dcn_global_ttu_cntl;
+ uint32_t dcn_surf0_ttu_cntl0;
+ uint32_t dcn_surf0_ttu_cntl1;
+ uint32_t dcn_surf1_ttu_cntl0;
+ uint32_t dcn_surf1_ttu_cntl1;
+ uint32_t dcn_ttu_qos_wm;
+ uint32_t dcn_vm_mx_l1_tlb_cntl;
+ uint32_t dcn_vm_system_aperture_high_addr;
+ uint32_t dcn_vm_system_aperture_low_addr;
+ uint32_t dcsurf_flip_control;
+ uint32_t dcsurf_flip_control2;
+ uint32_t dcsurf_primary_meta_surface_address;
+ uint32_t dcsurf_primary_meta_surface_address_c;
+ uint32_t dcsurf_primary_meta_surface_address_high;
+ uint32_t dcsurf_primary_meta_surface_address_high_c;
+ uint32_t dcsurf_primary_surface_address;
+ uint32_t dcsurf_primary_surface_address_c;
+ uint32_t dcsurf_primary_surface_address_high;
+ uint32_t dcsurf_primary_surface_address_high_c;
+ uint32_t dcsurf_secondary_meta_surface_address;
+ uint32_t dcsurf_secondary_meta_surface_address_c;
+ uint32_t dcsurf_secondary_meta_surface_address_high;
+ uint32_t dcsurf_secondary_meta_surface_address_high_c;
+ uint32_t dcsurf_secondary_surface_address;
+ uint32_t dcsurf_secondary_surface_address_c;
+ uint32_t dcsurf_secondary_surface_address_high;
+ uint32_t dcsurf_secondary_surface_address_high_c;
+ uint32_t dcsurf_surface_control;
+ uint32_t dcsurf_surface_earliest_inuse;
+ uint32_t dcsurf_surface_earliest_inuse_c;
+ uint32_t dcsurf_surface_earliest_inuse_high;
+ uint32_t dcsurf_surface_earliest_inuse_high_c;
+ uint32_t dcsurf_surface_flip_interrupt;
+ uint32_t dcsurf_surface_inuse;
+ uint32_t dcsurf_surface_inuse_c;
+ uint32_t dcsurf_surface_inuse_high;
+ uint32_t dcsurf_surface_inuse_high_c;
+ uint32_t dcsurf_surface_pitch;
+ uint32_t dcsurf_surface_pitch_c;
+ uint32_t dst_after_scaler;
+ uint32_t dst_dimensions;
+ uint32_t dst_y_delta_drq_limit;
+ uint32_t flip_parameters_0;
+ uint32_t flip_parameters_1;
+ uint32_t flip_parameters_2;
+ uint32_t flip_parameters_3;
+ uint32_t flip_parameters_4;
+ uint32_t flip_parameters_5;
+ uint32_t flip_parameters_6;
+ uint32_t hubpreq_mem_pwr_ctrl;
+ uint32_t hubpreq_mem_pwr_status;
+ uint32_t nom_parameters_0;
+ uint32_t nom_parameters_1;
+ uint32_t nom_parameters_2;
+ uint32_t nom_parameters_3;
+ uint32_t nom_parameters_4;
+ uint32_t nom_parameters_5;
+ uint32_t nom_parameters_6;
+ uint32_t nom_parameters_7;
+ uint32_t per_line_delivery;
+ uint32_t per_line_delivery_pre;
+ uint32_t prefetch_settings;
+ uint32_t prefetch_settings_c;
+ uint32_t ref_freq_to_pix_freq;
+ uint32_t uclk_pstate_force;
+ uint32_t vblank_parameters_0;
+ uint32_t vblank_parameters_1;
+ uint32_t vblank_parameters_2;
+ uint32_t vblank_parameters_3;
+ uint32_t vblank_parameters_4;
+ uint32_t vblank_parameters_5;
+ uint32_t vblank_parameters_6;
+ uint32_t vmid_settings_0;
+
+ uint32_t hubpret_control;
+ uint32_t hubpret_interrupt;
+ uint32_t hubpret_mem_pwr_ctrl;
+ uint32_t hubpret_mem_pwr_status;
+ uint32_t hubpret_read_line_ctrl0;
+ uint32_t hubpret_read_line_ctrl1;
+ uint32_t hubpret_read_line_status;
+ uint32_t hubpret_read_line_value;
+ uint32_t hubpret_read_line0;
+ uint32_t hubpret_read_line1;
+};
struct dcn_hubp_state {
struct _vcs_dpi_display_dlg_regs_st dlg_attr;
@@ -713,7 +849,6 @@ struct dcn_hubp_state {
uint32_t hubp_cntl;
uint32_t flip_control;
};
-
struct dcn10_hubp {
struct hubp base;
struct dcn_hubp_state state;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
index 91259b896e03..92288de4cc10 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c
@@ -613,26 +613,28 @@ void hubp2_cursor_set_attributes(
hubp->curs_attr = *attr;
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+ }
hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
hubp->att.SURFACE_ADDR = attr->address.low_part;
@@ -1059,23 +1061,28 @@ void hubp2_cursor_set_position(
cur_en = 0; /* not visible beyond top edge*/
if (hubp->pos.cur_ctl.bits.cur_enable != cur_en) {
- if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
+ bool cursor_not_programmed = hubp->att.SURFACE_ADDR == 0 && hubp->att.SURFACE_ADDR_HIGH == 0;
+
+ if (cur_en && cursor_not_programmed)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL, CURSOR_ENABLE, cur_en);
}
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, pos->x,
- CURSOR_Y_POSITION, pos->y);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, pos->x,
+ CURSOR_Y_POSITION, pos->y);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
/* TODO Handle surface pixel formats other than 4:4:4 */
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
index 62369be070ea..7062e6653062 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h
@@ -145,7 +145,8 @@
uint32_t FLIP_PARAMETERS_2;\
uint32_t DCN_CUR1_TTU_CNTL0;\
uint32_t DCN_CUR1_TTU_CNTL1;\
- uint32_t VMID_SETTINGS_0
+ uint32_t VMID_SETTINGS_0;\
+ uint32_t DST_Y_DELTA_DRQ_LIMIT
/*shared with dcn3.x*/
#define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \
@@ -176,7 +177,10 @@
uint32_t HUBP_3DLUT_CONTROL;\
uint32_t HUBP_3DLUT_DLG_PARAM;\
uint32_t DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE;\
- uint32_t DCHUBP_MCACHEID_CONFIG
+ uint32_t DCHUBP_MCACHEID_CONFIG;\
+ uint32_t DCHUBP_MALL_SUB_VP;\
+ uint32_t DCHUBP_ADDR_CONFIG;\
+ uint32_t HUBP_MALL_STATUS
#define DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type) \
DCN_HUBP_REG_FIELD_BASE_LIST(type); \
@@ -264,6 +268,7 @@
type HUBP_3DLUT_DONE;\
type HUBP_3DLUT_ADDRESSING_MODE;\
type HUBP_3DLUT_WIDTH;\
+ type HUBP_3DLUT_MPC_WIDTH;\
type HUBP_3DLUT_TMZ;\
type HUBP_3DLUT_CROSSBAR_SELECT_Y_G;\
type HUBP_3DLUT_CROSSBAR_SELECT_CB_B;\
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
index e2740482e1cf..08ea0a1b9e7f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c
@@ -73,8 +73,6 @@
* On any mode switch, if the new reg values are smaller than the current values,
* then update the regs with the new values.
*
- * Link to the ticket: http://ontrack-internal.amd.com/browse/DEDCN21-142
- *
*/
void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index 0da70b50e86d..0cc6f4558989 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -476,6 +476,126 @@ void hubp3_read_state(struct hubp *hubp)
}
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ reg_state->hubp_cntl = REG_READ(DCHUBP_CNTL);
+ reg_state->mall_config = REG_READ(DCHUBP_MALL_CONFIG);
+ reg_state->mall_sub_vp = REG_READ(DCHUBP_MALL_SUB_VP);
+ reg_state->hubp_req_size_config = REG_READ(DCHUBP_REQ_SIZE_CONFIG);
+ reg_state->hubp_req_size_config_c = REG_READ(DCHUBP_REQ_SIZE_CONFIG_C);
+ reg_state->vmpg_config = REG_READ(DCHUBP_VMPG_CONFIG);
+ reg_state->addr_config = REG_READ(DCSURF_ADDR_CONFIG);
+ reg_state->pri_viewport_dimension = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION);
+ reg_state->pri_viewport_dimension_c = REG_READ(DCSURF_PRI_VIEWPORT_DIMENSION_C);
+ reg_state->pri_viewport_start = REG_READ(DCSURF_PRI_VIEWPORT_START);
+ reg_state->pri_viewport_start_c = REG_READ(DCSURF_PRI_VIEWPORT_START_C);
+ reg_state->sec_viewport_dimension = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION);
+ reg_state->sec_viewport_dimension_c = REG_READ(DCSURF_SEC_VIEWPORT_DIMENSION_C);
+ reg_state->sec_viewport_start = REG_READ(DCSURF_SEC_VIEWPORT_START);
+ reg_state->sec_viewport_start_c = REG_READ(DCSURF_SEC_VIEWPORT_START_C);
+ reg_state->surface_config = REG_READ(DCSURF_SURFACE_CONFIG);
+ reg_state->tiling_config = REG_READ(DCSURF_TILING_CONFIG);
+ reg_state->clk_cntl = REG_READ(HUBP_CLK_CNTL);
+ reg_state->mall_status = REG_READ(HUBP_MALL_STATUS);
+ reg_state->measure_win_ctrl_dcfclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DCFCLK);
+ reg_state->measure_win_ctrl_dppclk = REG_READ(HUBP_MEASURE_WIN_CTRL_DPPCLK);
+
+ reg_state->blank_offset_0 = REG_READ(BLANK_OFFSET_0);
+ reg_state->blank_offset_1 = REG_READ(BLANK_OFFSET_1);
+ reg_state->cursor_settings = REG_READ(CURSOR_SETTINGS);
+ reg_state->dcn_cur0_ttu_cntl0 = REG_READ(DCN_CUR0_TTU_CNTL0);
+ reg_state->dcn_cur0_ttu_cntl1 = REG_READ(DCN_CUR0_TTU_CNTL1);
+ reg_state->dcn_cur1_ttu_cntl0 = REG_READ(DCN_CUR1_TTU_CNTL0);
+ reg_state->dcn_cur1_ttu_cntl1 = REG_READ(DCN_CUR1_TTU_CNTL1);
+ reg_state->dcn_dmdat_vm_cntl = REG_READ(DCN_DMDATA_VM_CNTL);
+ reg_state->dcn_expansion_mode = REG_READ(DCN_EXPANSION_MODE);
+ reg_state->dcn_global_ttu_cntl = REG_READ(DCN_GLOBAL_TTU_CNTL);
+ reg_state->dcn_surf0_ttu_cntl0 = REG_READ(DCN_SURF0_TTU_CNTL0);
+ reg_state->dcn_surf0_ttu_cntl1 = REG_READ(DCN_SURF0_TTU_CNTL1);
+ reg_state->dcn_surf1_ttu_cntl0 = REG_READ(DCN_SURF1_TTU_CNTL0);
+ reg_state->dcn_surf1_ttu_cntl1 = REG_READ(DCN_SURF1_TTU_CNTL1);
+ reg_state->dcn_ttu_qos_wm = REG_READ(DCN_TTU_QOS_WM);
+ reg_state->dcn_vm_mx_l1_tlb_cntl = REG_READ(DCN_VM_MX_L1_TLB_CNTL);
+ reg_state->dcn_vm_system_aperture_high_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_HIGH_ADDR);
+ reg_state->dcn_vm_system_aperture_low_addr = REG_READ(DCN_VM_SYSTEM_APERTURE_LOW_ADDR);
+ reg_state->dcsurf_flip_control = REG_READ(DCSURF_FLIP_CONTROL);
+ reg_state->dcsurf_flip_control2 = REG_READ(DCSURF_FLIP_CONTROL2);
+ reg_state->dcsurf_primary_meta_surface_address = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_meta_surface_address_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_meta_surface_address_high = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_meta_surface_address_high_c = REG_READ(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_primary_surface_address = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_primary_surface_address_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_primary_surface_address_high = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_primary_surface_address_high_c = REG_READ(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_meta_surface_address = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_meta_surface_address_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_meta_surface_address_high = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_meta_surface_address_high_c = REG_READ(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_secondary_surface_address = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS);
+ reg_state->dcsurf_secondary_surface_address_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_C);
+ reg_state->dcsurf_secondary_surface_address_high = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH);
+ reg_state->dcsurf_secondary_surface_address_high_c = REG_READ(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C);
+ reg_state->dcsurf_surface_control = REG_READ(DCSURF_SURFACE_CONTROL);
+ reg_state->dcsurf_surface_earliest_inuse = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE);
+ reg_state->dcsurf_surface_earliest_inuse_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_C);
+ reg_state->dcsurf_surface_earliest_inuse_high = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH);
+ reg_state->dcsurf_surface_earliest_inuse_high_c = REG_READ(DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_flip_interrupt = REG_READ(DCSURF_SURFACE_FLIP_INTERRUPT);
+ reg_state->dcsurf_surface_inuse = REG_READ(DCSURF_SURFACE_INUSE);
+ reg_state->dcsurf_surface_inuse_c = REG_READ(DCSURF_SURFACE_INUSE_C);
+ reg_state->dcsurf_surface_inuse_high = REG_READ(DCSURF_SURFACE_INUSE_HIGH);
+ reg_state->dcsurf_surface_inuse_high_c = REG_READ(DCSURF_SURFACE_INUSE_HIGH_C);
+ reg_state->dcsurf_surface_pitch = REG_READ(DCSURF_SURFACE_PITCH);
+ reg_state->dcsurf_surface_pitch_c = REG_READ(DCSURF_SURFACE_PITCH_C);
+ reg_state->dst_after_scaler = REG_READ(DST_AFTER_SCALER);
+ reg_state->dst_dimensions = REG_READ(DST_DIMENSIONS);
+ reg_state->dst_y_delta_drq_limit = REG_READ(DST_Y_DELTA_DRQ_LIMIT);
+ reg_state->flip_parameters_0 = REG_READ(FLIP_PARAMETERS_0);
+ reg_state->flip_parameters_1 = REG_READ(FLIP_PARAMETERS_1);
+ reg_state->flip_parameters_2 = REG_READ(FLIP_PARAMETERS_2);
+ reg_state->flip_parameters_3 = REG_READ(FLIP_PARAMETERS_3);
+ reg_state->flip_parameters_4 = REG_READ(FLIP_PARAMETERS_4);
+ reg_state->flip_parameters_5 = REG_READ(FLIP_PARAMETERS_5);
+ reg_state->flip_parameters_6 = REG_READ(FLIP_PARAMETERS_6);
+ reg_state->hubpreq_mem_pwr_ctrl = REG_READ(HUBPREQ_MEM_PWR_CTRL);
+ reg_state->hubpreq_mem_pwr_status = REG_READ(HUBPREQ_MEM_PWR_STATUS);
+ reg_state->nom_parameters_0 = REG_READ(NOM_PARAMETERS_0);
+ reg_state->nom_parameters_1 = REG_READ(NOM_PARAMETERS_1);
+ reg_state->nom_parameters_2 = REG_READ(NOM_PARAMETERS_2);
+ reg_state->nom_parameters_3 = REG_READ(NOM_PARAMETERS_3);
+ reg_state->nom_parameters_4 = REG_READ(NOM_PARAMETERS_4);
+ reg_state->nom_parameters_5 = REG_READ(NOM_PARAMETERS_5);
+ reg_state->nom_parameters_6 = REG_READ(NOM_PARAMETERS_6);
+ reg_state->nom_parameters_7 = REG_READ(NOM_PARAMETERS_7);
+ reg_state->per_line_delivery = REG_READ(PER_LINE_DELIVERY);
+ reg_state->per_line_delivery_pre = REG_READ(PER_LINE_DELIVERY_PRE);
+ reg_state->prefetch_settings = REG_READ(PREFETCH_SETTINGS);
+ reg_state->prefetch_settings_c = REG_READ(PREFETCH_SETTINGS_C);
+ reg_state->ref_freq_to_pix_freq = REG_READ(REF_FREQ_TO_PIX_FREQ);
+ reg_state->uclk_pstate_force = REG_READ(UCLK_PSTATE_FORCE);
+ reg_state->vblank_parameters_0 = REG_READ(VBLANK_PARAMETERS_0);
+ reg_state->vblank_parameters_1 = REG_READ(VBLANK_PARAMETERS_1);
+ reg_state->vblank_parameters_2 = REG_READ(VBLANK_PARAMETERS_2);
+ reg_state->vblank_parameters_3 = REG_READ(VBLANK_PARAMETERS_3);
+ reg_state->vblank_parameters_4 = REG_READ(VBLANK_PARAMETERS_4);
+ reg_state->vblank_parameters_5 = REG_READ(VBLANK_PARAMETERS_5);
+ reg_state->vblank_parameters_6 = REG_READ(VBLANK_PARAMETERS_6);
+ reg_state->vmid_settings_0 = REG_READ(VMID_SETTINGS_0);
+ reg_state->hubpret_control = REG_READ(HUBPRET_CONTROL);
+ reg_state->hubpret_interrupt = REG_READ(HUBPRET_INTERRUPT);
+ reg_state->hubpret_mem_pwr_ctrl = REG_READ(HUBPRET_MEM_PWR_CTRL);
+ reg_state->hubpret_mem_pwr_status = REG_READ(HUBPRET_MEM_PWR_STATUS);
+ reg_state->hubpret_read_line_ctrl0 = REG_READ(HUBPRET_READ_LINE_CTRL0);
+ reg_state->hubpret_read_line_ctrl1 = REG_READ(HUBPRET_READ_LINE_CTRL1);
+ reg_state->hubpret_read_line_status = REG_READ(HUBPRET_READ_LINE_STATUS);
+ reg_state->hubpret_read_line_value = REG_READ(HUBPRET_READ_LINE_VALUE);
+ reg_state->hubpret_read_line0 = REG_READ(HUBPRET_READ_LINE0);
+ reg_state->hubpret_read_line1 = REG_READ(HUBPRET_READ_LINE1);
+}
+
void hubp3_setup(
struct hubp *hubp,
struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
@@ -534,6 +654,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_soft_reset = hubp1_soft_reset,
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
index b7d7adf0b58c..c767e9f4f9b3 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h
@@ -243,7 +243,8 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh)
bool hubp3_construct(
struct dcn20_hubp *hubp2,
@@ -295,10 +296,17 @@ void hubp3_dmdata_set_attributes(
void hubp3_read_state(struct hubp *hubp);
+void hubp3_read_reg_state(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
+
void hubp3_init(struct hubp *hubp);
void hubp3_clear_tiling(struct hubp *hubp);
+uint32_t hubp3_get_current_read_line(struct hubp *hubp);
+
+uint32_t hubp3_get_underflow_status(struct hubp *hubp);
+
+
#endif /* __DC_HUBP_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
index 7fd582a8a4ba..189045f85039 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c
@@ -68,6 +68,18 @@ void hubp31_program_extended_blank_value(
hubp31_program_extended_blank(hubp, min_dst_y_next_start_optimized);
}
+uint32_t hubp31_get_det_config_error(struct hubp *hubp)
+{
+ uint32_t config_error = 0;
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL,
+ HUBP_SEG_ALLOC_ERR_STATUS,
+ &config_error);
+
+ return config_error;
+}
+
static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
@@ -98,6 +110,7 @@ static struct hubp_funcs dcn31_hubp_funcs = {
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state,
};
bool hubp31_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
index d688db79b750..5952c4671507 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h
@@ -228,7 +228,9 @@
HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_6, REFCYC_PER_META_CHUNK_FLIP_C, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, mask_sh),\
HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_6, REFCYC_PER_VM_REQ_VBLANK, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, VM_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
bool hubp31_construct(
@@ -246,4 +248,6 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable);
void hubp31_program_extended_blank_value(
struct hubp *hubp, unsigned int min_dst_y_next_start_optimized);
+uint32_t hubp31_get_det_config_error(struct hubp *hubp);
+
#endif /* __DC_HUBP_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index f3a21c623f44..a781085b046b 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -118,29 +118,7 @@ void hubp32_cursor_set_attributes(
uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
uint32_t cursor_height = attr->height;
uint32_t cursor_size = cursor_width * cursor_height;
-
- hubp->curs_attr = *attr;
-
- REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
- CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
- REG_UPDATE(CURSOR_SURFACE_ADDRESS,
- CURSOR_SURFACE_ADDRESS, attr->address.low_part);
-
- REG_UPDATE_2(CURSOR_SIZE,
- CURSOR_WIDTH, attr->width,
- CURSOR_HEIGHT, attr->height);
-
- REG_UPDATE_4(CURSOR_CONTROL,
- CURSOR_MODE, attr->color_format,
- CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
- CURSOR_PITCH, hw_pitch,
- CURSOR_LINES_PER_CHUNK, lpc);
-
- REG_SET_2(CURSOR_SETTINGS, 0,
- /* no shift of the cursor HDL schedule */
- CURSOR0_DST_Y_OFFSET, 0,
- /* used to shift the cursor chunk request deadline */
- CURSOR0_CHUNK_HDL_ADJUST, 3);
+ bool use_mall_for_cursor;
switch (attr->color_format) {
case CURSOR_MODE_MONO:
@@ -158,11 +136,49 @@ void hubp32_cursor_set_attributes(
cursor_size *= 8;
break;
}
+ use_mall_for_cursor = cursor_size > 16384 ? 1 : 0;
+
+ hubp->curs_attr = *attr;
- if (cursor_size > 16384)
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
- else
- REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
+ if (!hubp->cursor_offload) {
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
+ CURSOR_SURFACE_ADDRESS_HIGH, attr->address.high_part);
+ REG_UPDATE(CURSOR_SURFACE_ADDRESS,
+ CURSOR_SURFACE_ADDRESS, attr->address.low_part);
+
+ REG_UPDATE_2(CURSOR_SIZE,
+ CURSOR_WIDTH, attr->width,
+ CURSOR_HEIGHT, attr->height);
+
+ REG_UPDATE_4(CURSOR_CONTROL,
+ CURSOR_MODE, attr->color_format,
+ CURSOR_2X_MAGNIFY, attr->attribute_flags.bits.ENABLE_MAGNIFICATION,
+ CURSOR_PITCH, hw_pitch,
+ CURSOR_LINES_PER_CHUNK, lpc);
+
+ REG_SET_2(CURSOR_SETTINGS, 0,
+ /* no shift of the cursor HDL schedule */
+ CURSOR0_DST_Y_OFFSET, 0,
+ /* used to shift the cursor chunk request deadline */
+ CURSOR0_CHUNK_HDL_ADJUST, 3);
+
+ REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, use_mall_for_cursor);
+ }
+ hubp->att.SURFACE_ADDR_HIGH = attr->address.high_part;
+ hubp->att.SURFACE_ADDR = attr->address.low_part;
+ hubp->att.size.bits.width = attr->width;
+ hubp->att.size.bits.height = attr->height;
+ hubp->att.cur_ctl.bits.mode = attr->color_format;
+
+ hubp->cur_rect.w = attr->width;
+ hubp->cur_rect.h = attr->height;
+
+ hubp->att.cur_ctl.bits.pitch = hw_pitch;
+ hubp->att.cur_ctl.bits.line_per_chunk = lpc;
+ hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION;
+ hubp->att.settings.bits.dst_y_offset = 0;
+ hubp->att.settings.bits.chunk_hdl_adjust = 3;
+ hubp->use_mall_for_cursor = use_mall_for_cursor;
}
void hubp32_init(struct hubp *hubp)
{
@@ -206,6 +222,7 @@ static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_update_mall_sel = hubp32_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_clear_tiling = hubp3_clear_tiling,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp32_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
index 6d060ba12da8..79c583e258c7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c
@@ -209,6 +209,7 @@ static struct hubp_funcs dcn35_hubp_funcs = {
.dmdata_load = hubp2_dmdata_load,
.dmdata_status_done = hubp2_dmdata_status_done,
.hubp_read_state = hubp3_read_state,
+ .hubp_read_reg_state = hubp3_read_reg_state,
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp35_init,
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index 705b98b1b6cc..f01eae50d02f 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -127,6 +127,43 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format);
}
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1};
+ uint32_t width = {cfg->width};
+
+ if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR)
+ width = (cfg->width == 17) ? 4916 : 35940;
+
+ REG_UPDATE_2(_3DLUT_FL_CONFIG,
+ HUBP0_3DLUT_FL_MODE, cfg->mode,
+ HUBP0_3DLUT_FL_FORMAT, cfg->format);
+
+ REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE,
+ HUBP0_3DLUT_FL_BIAS, cfg->bias,
+ HUBP0_3DLUT_FL_SCALE, cfg->scale);
+
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH,
+ HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part);
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW,
+ HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part);
+
+ //cross bar
+ REG_UPDATE_8(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_MPC_WIDTH, mpc_width,
+ HUBP_3DLUT_WIDTH, width,
+ HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r,
+ HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g,
+ HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b,
+ HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode,
+ HUBP_3DLUT_TMZ, cfg->protection_bits,
+ HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0);
+}
+
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
@@ -746,21 +783,23 @@ void hubp401_cursor_set_position(
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
hubp->funcs->set_cursor_attributes(hubp, &hubp->curs_attr);
- REG_UPDATE(CURSOR_CONTROL,
- CURSOR_ENABLE, cur_en);
+ if (!hubp->cursor_offload)
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, cur_en);
}
- REG_SET_2(CURSOR_POSITION, 0,
- CURSOR_X_POSITION, x_pos,
- CURSOR_Y_POSITION, y_pos);
+ if (!hubp->cursor_offload) {
+ REG_SET_2(CURSOR_POSITION, 0,
+ CURSOR_X_POSITION, x_pos,
+ CURSOR_Y_POSITION, y_pos);
- REG_SET_2(CURSOR_HOT_SPOT, 0,
- CURSOR_HOT_SPOT_X, pos->x_hotspot,
- CURSOR_HOT_SPOT_Y, pos->y_hotspot);
-
- REG_SET(CURSOR_DST_OFFSET, 0,
- CURSOR_DST_X_OFFSET, dst_x_offset);
+ REG_SET_2(CURSOR_HOT_SPOT, 0,
+ CURSOR_HOT_SPOT_X, pos->x_hotspot,
+ CURSOR_HOT_SPOT_Y, pos->y_hotspot);
+ REG_SET(CURSOR_DST_OFFSET, 0,
+ CURSOR_DST_X_OFFSET, dst_x_offset);
+ }
/* Cursor Position Register Config */
hubp->pos.cur_ctl.bits.cur_enable = cur_en;
hubp->pos.position.bits.x_pos = pos->x;
@@ -1033,6 +1072,8 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
+ .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
+ .hubp_read_reg_state = hubp3_read_reg_state
};
bool hubp401_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 608e6153fa68..4570b8016de5 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -31,7 +31,7 @@
#include "dcn30/dcn30_hubp.h"
#include "dcn31/dcn31_hubp.h"
#include "dcn32/dcn32_hubp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
#define HUBP_3DLUT_FL_REG_LIST_DCN401(inst)\
SRI_ARR_US(_3DLUT_FL_CONFIG, HUBP, inst),\
@@ -252,7 +252,9 @@
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P0, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_1H_P1, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh)
+ HUBP_SF(HUBP0_DCHUBP_MCACHEID_CONFIG, MCACHEID_MALL_PREF_2H_P1, mask_sh),\
+ HUBP_SF(HUBPRET0_HUBPRET_READ_LINE_VALUE, PIPE_READ_LINE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_SEG_ALLOC_ERR_STATUS, mask_sh)
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor);
@@ -349,6 +351,10 @@ void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_forma
void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
+void hubp401_program_3dlut_fl_config(
+ struct hubp *hubp,
+ struct hubp_fl_3dlut_config *cfg);
+
void hubp401_clear_tiling(struct hubp *hubp);
void hubp401_vready_at_or_After_vsync(struct hubp *hubp,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 4ea13d0bf815..8fe399939220 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -48,7 +48,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dccg.h"
#include "clock_source.h"
#include "clk_mgr.h"
@@ -659,6 +659,20 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
}
}
+static void
+dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_encoder_control encoder_control = {0};
+
+ encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE;
+ encoder_control.engine_id = link->link_enc->analog_engine;
+ encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+
+ bios->funcs->encoder_control(bios, &encoder_control);
+}
+
void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
@@ -688,6 +702,9 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
early_control = lane_count;
tg->funcs->set_early_control(tg, early_control);
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, true);
}
static enum bp_result link_transmitter_control(
@@ -1085,6 +1102,9 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
if (!pipe_ctx->stream)
return;
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ return;
+
dc = pipe_ctx->stream->ctx->dc;
clk_mgr = dc->clk_mgr;
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
@@ -1121,6 +1141,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (!pipe_ctx || !pipe_ctx->stream)
return;
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ return;
+
dc = pipe_ctx->stream->ctx->dc;
clk_mgr = dc->clk_mgr;
link_hwss = get_link_hwss(pipe_ctx->stream->link, &pipe_ctx->link_res);
@@ -1195,6 +1218,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst,
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, false);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1269,7 +1295,7 @@ void dce110_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(pipe_ctx->stream_res.stream_enc, enable);
}
-static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
{
switch (crtc_id) {
case CONTROLLER_ID_D0:
@@ -1289,7 +1315,7 @@ static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
}
}
-static void populate_audio_dp_link_info(
+void populate_audio_dp_link_info(
const struct pipe_ctx *pipe_ctx,
struct audio_dp_link_info *dp_link_info)
{
@@ -1580,6 +1606,51 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
+static void
+dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_crtc_source_select crtc_source_select = {0};
+ enum engine_id engine_id = link->link_enc->preferred_engine;
+ uint8_t bit_depth;
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ engine_id = link->link_enc->analog_engine;
+
+ switch (pipe_ctx->stream->timing.display_color_depth) {
+ case COLOR_DEPTH_UNDEFINED:
+ bit_depth = 0;
+ break;
+ case COLOR_DEPTH_666:
+ bit_depth = 6;
+ break;
+ default:
+ case COLOR_DEPTH_888:
+ bit_depth = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bit_depth = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bit_depth = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bit_depth = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bit_depth = 16;
+ break;
+ }
+
+ crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
+ crtc_source_select.bit_depth = bit_depth;
+ crtc_source_select.engine_id = engine_id;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+
+ bios->funcs->select_crtc_source(bios, &crtc_source_select);
+}
+
enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -1599,6 +1670,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) {
+ dce110_select_crtc_source(pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output = {0};
@@ -1678,7 +1753,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal) &&
+ !dc_is_rgb_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1912,6 +1988,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_apply_edp_fast_boot = false;
bool can_apply_seamless_boot = false;
bool keep_edp_vdd_on = false;
+ bool should_clean_dsc_block = true;
struct dc_bios *dcb = dc->ctx->dc_bios;
DC_LOGGER_INIT();
@@ -1924,10 +2001,8 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
get_edp_streams(context, edp_streams, &edp_stream_num);
- // Check fastboot support, disable on DCE8 because of blank screens
- if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&
- dc->ctx->dce_version != DCE_VERSION_8_1 &&
- dc->ctx->dce_version != DCE_VERSION_8_3) {
+ /* Check fastboot support, disable on DCE 6-8 because of blank screens */
+ if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) {
for (i = 0; i < edp_num; i++) {
edp_link = edp_links[i];
if (edp_link != edp_streams[0]->link)
@@ -2006,9 +2081,15 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
power_down_all_hw_blocks(dc);
/* DSC could be enabled on eDP during VBIOS post.
- * To clean up dsc blocks if eDP is in link but not active.
+ * To clean up dsc blocks if all eDP dpms_off is true.
*/
- if (edp_link_with_sink && (edp_stream_num == 0))
+ for (i = 0; i < edp_stream_num; i++) {
+ if (!edp_streams[i]->dpms_off) {
+ should_clean_dsc_block = false;
+ }
+ }
+
+ if (should_clean_dsc_block)
clean_up_dsc_blocks(dc);
disable_vga_and_power_gate_all_controllers(dc);
@@ -2254,7 +2335,7 @@ static bool should_enable_fbc(struct dc *dc,
/*
* Enable FBC
*/
-static void enable_fbc(
+void enable_fbc(
struct dc *dc,
struct dc_state *context)
{
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 7cd8c1576988..9c032e449481 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -114,5 +114,12 @@ void build_audio_output(
struct dc_state *state,
const struct pipe_ctx *pipe_ctx,
struct audio_output *audio_output);
+enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id);
+void populate_audio_dp_link_info(
+ const struct pipe_ctx *pipe_ctx,
+ struct audio_dp_link_info *dp_link_info);
+void enable_fbc(
+ struct dc *dc,
+ struct dc_state *context);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 39910f73ecd0..fa62e40a9858 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -55,7 +55,7 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -328,19 +328,25 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
}
DTN_INFO("\n=======HUBP FL======\n");
- DTN_INFO(
- "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n");
+ static const char * const pLabels[] = {
+ "inst", "Enabled ", "Done ", "adr_mode ", "width ", "mpc_width ",
+ "tmz", "xbar_sel_R", "xbar_sel_G", "xbar_sel_B", "adr_hi ",
+ "adr_low", "REFCYC", "Bias", "Scale", "Mode",
+ "Format", "prefetch"};
+
for (i = 0; i < pool->pipe_count; i++) {
struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
struct dcn_fl_regs_st *fl_regs = &s->fl_regs;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
if (!s->blank_en) {
- DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x",
+ uint32_t values[] = {
pool->hubps[i]->inst,
fl_regs->lut_enable,
fl_regs->lut_done,
fl_regs->lut_addr_mode,
fl_regs->lut_width,
+ fl_regs->lut_mpc_width,
fl_regs->lut_tmz,
fl_regs->lut_crossbar_sel_r,
fl_regs->lut_crossbar_sel_g,
@@ -351,8 +357,13 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
fl_regs->lut_fl_bias,
fl_regs->lut_fl_scale,
fl_regs->lut_fl_mode,
- fl_regs->lut_fl_format);
- DTN_INFO("\n");
+ fl_regs->lut_fl_format,
+ dlg_regs->dst_y_prefetch};
+
+ int num_elements = 18;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
}
}
@@ -541,19 +552,43 @@ static void dcn10_log_color_state(struct dc *dc,
dc->caps.color.mpc.ogam_ram,
dc->caps.color.mpc.ocsc);
DTN_INFO("===== MPC RMCM 3DLUT =====\n");
- DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n");
+ static const char * const pLabels[] = {
+ "MPCC", "SIZE", "MODE", "MODE_CUR", "RD_SEL",
+ "30BIT_EN", "WR_EN_MASK", "RAM_SEL", "OUT_NORM_FACTOR", "FL_SEL",
+ "OUT_OFFSET", "OUT_SCALE", "FL_DONE", "SOFT_UNDERFLOW", "HARD_UNDERFLOW",
+ "MEM_PWR_ST", "FORCE", "DIS", "MODE"};
+
for (i = 0; i < pool->mpcc_count; i++) {
struct mpcc_state s = {0};
pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
- if (s.opp_id != 0xf)
- DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n",
- i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur,
- s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask,
- s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel,
- s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done,
- s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
- s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode);
+ if (s.opp_id != 0xf) {
+ uint32_t values[] = {
+ i,
+ s.rmcm_regs.rmcm_3dlut_size,
+ s.rmcm_regs.rmcm_3dlut_mode,
+ s.rmcm_regs.rmcm_3dlut_mode_cur,
+ s.rmcm_regs.rmcm_3dlut_read_sel,
+ s.rmcm_regs.rmcm_3dlut_30bit_en,
+ s.rmcm_regs.rmcm_3dlut_wr_en_mask,
+ s.rmcm_regs.rmcm_3dlut_ram_sel,
+ s.rmcm_regs.rmcm_3dlut_out_norm_factor,
+ s.rmcm_regs.rmcm_3dlut_fl_sel,
+ s.rmcm_regs.rmcm_3dlut_out_offset_r,
+ s.rmcm_regs.rmcm_3dlut_out_scale_r,
+ s.rmcm_regs.rmcm_3dlut_fl_done,
+ s.rmcm_regs.rmcm_3dlut_fl_soft_underflow,
+ s.rmcm_regs.rmcm_3dlut_fl_hard_underflow,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_state,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_force,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_dis,
+ s.rmcm_regs.rmcm_3dlut_mem_pwr_mode};
+
+ int num_elements = 19;
+
+ for (int j = 0; j < num_elements; j++)
+ DTN_INFO("%s \t %8xh\n", pLabels[j], values[j]);
+ }
}
DTN_INFO("\n");
DTN_INFO("===== MPC RMCM Shaper =====\n");
@@ -2210,7 +2245,7 @@ void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
if (lock)
delay_cursor_until_vupdate(dc, pipe);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -3055,6 +3090,9 @@ static void dcn10_update_dchubp_dpp(
}
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
@@ -3312,7 +3350,7 @@ void dcn10_prepare_bandwidth(
context,
false);
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -3628,6 +3666,8 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int y_plane = pipe_ctx->plane_state->dst_rect.y;
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
+ int clip_x = pipe_ctx->plane_state->clip_rect.x;
+ int clip_width = pipe_ctx->plane_state->clip_rect.width;
if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
@@ -3646,7 +3686,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
*/
/**
- * Translate cursor from stream space to plane space.
+ * Translate cursor and clip offset from stream space to plane space.
*
* If the cursor is scaled then we need to scale the position
* to be in the approximately correct place. We can't do anything
@@ -3663,6 +3703,10 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ clip_x = (clip_x - x_plane) * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
+ clip_width = clip_width * pipe_ctx->plane_state->src_rect.width /
+ pipe_ctx->plane_state->dst_rect.width;
}
/**
@@ -3709,30 +3753,18 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
if (param.rotation == ROTATION_ANGLE_0) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
if (param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = 2 * viewport_width - temp_x;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
}
// Swap axis and mirror horizontally
@@ -3802,30 +3834,17 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
}
// Mirror horizontally and vertically
else if (param.rotation == ROTATION_ANGLE_180) {
- int viewport_width =
- pipe_ctx->plane_res.scl_data.viewport.width;
- int viewport_x =
- pipe_ctx->plane_res.scl_data.viewport.x;
-
if (!param.mirror) {
- if (pipe_split_on || odm_combine_on) {
- if (pos_cpy.x >= viewport_width + viewport_x) {
- pos_cpy.x = 2 * viewport_width
- - pos_cpy.x + 2 * viewport_x;
- } else {
- uint32_t temp_x = pos_cpy.x;
-
- pos_cpy.x = 2 * viewport_x - pos_cpy.x;
- if (temp_x >= viewport_x +
- (int)hubp->curs_attr.width || pos_cpy.x
- <= (int)hubp->curs_attr.width +
- pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
- }
- }
- } else {
- pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
- }
+ /*
+ * The plane is split into multiple viewports.
+ * The combination of all viewports span the
+ * entirety of the clip rect.
+ *
+ * For no pipe_split, viewport_width is represents
+ * the full width of the clip_rect, so we can just
+ * mirror it.
+ */
+ pos_cpy.x = clip_width - pos_cpy.x + 2 * clip_x;
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index b7c2d3095b25..c8ff8ae85a03 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -54,7 +54,7 @@
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
#define DC_LOGGER \
@@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
* 1, 1000);
*/
+
+ /* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
+ if (!power_on) {
+ struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
+ if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
+ dpp5->funcs->dpp_force_disable_cursor(dpp5);
+ }
+
break;
default:
BREAK_TO_DEBUGGER();
@@ -1449,7 +1457,7 @@ void dcn20_pipe_control_lock(
!flip_immediate)
dcn20_setup_gsl_group_as_lock(dc, pipe, false);
- if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
+ if (pipe->stream && should_use_dmub_inbox1_lock(dc, pipe->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -1793,6 +1801,9 @@ void dcn20_update_dchubp_dpp(
if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+ if (dc->hwss.abort_cursor_offload_update)
+ dc->hwss.abort_cursor_offload_update(dc, pipe_ctx);
+
dc->hwss.set_cursor_attribute(pipe_ctx);
dc->hwss.set_cursor_position(pipe_ctx);
@@ -1982,10 +1993,8 @@ static void dcn20_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -2390,10 +2399,10 @@ void dcn20_prepare_bandwidth(
}
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2406,10 +2415,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -3129,7 +3138,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
// Specific to FPGA dccg and registers
REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 61efb15572ff..e2269211553c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -35,7 +35,7 @@
#include "hw/clk_mgr.h"
#include "dc_dmub_srv.h"
#include "abm.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 37a239219dfe..81bcadf5e57e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -50,10 +50,11 @@
#include "dpcd_defs.h"
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dc_state_priv.h"
-
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
#define DC_LOGGER_INIT(logger)
@@ -1228,3 +1229,54 @@ void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx)
}
}
}
+
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (hubbub) {
+ if (hubbub->funcs->hubbub_read_reg_state) {
+ hubbub->funcs->hubbub_read_reg_state(hubbub, out_data->hubbub_reg_state);
+ }
+ }
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+ struct output_pixel_processor *opp = dc->res_pool->opps[i];
+ struct display_stream_compressor *dsc = dc->res_pool->dscs[i];
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct timing_generator *optc = dc->res_pool->timing_generators[i];
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ if (hubp)
+ if (hubp->funcs->hubp_read_reg_state)
+ hubp->funcs->hubp_read_reg_state(hubp, out_data->hubp_reg_state[i]);
+
+ if (dpp)
+ if (dpp->funcs->dpp_read_reg_state)
+ dpp->funcs->dpp_read_reg_state(dpp, out_data->dpp_reg_state[i]);
+
+ if (opp)
+ if (opp->funcs->opp_read_reg_state)
+ opp->funcs->opp_read_reg_state(opp, out_data->opp_reg_state[i]);
+
+ if (dsc)
+ if (dsc->funcs->dsc_read_reg_state)
+ dsc->funcs->dsc_read_reg_state(dsc, out_data->dsc_reg_state[i]);
+
+ if (mpc)
+ if (mpc->funcs->mpc_read_reg_state)
+ mpc->funcs->mpc_read_reg_state(mpc, i, out_data->mpc_reg_state[i]);
+
+ if (optc)
+ if (optc->funcs->optc_read_reg_state)
+ optc->funcs->optc_read_reg_state(optc, out_data->optc_reg_state[i]);
+
+ if (dccg)
+ if (dccg->funcs->dccg_read_reg_state)
+ dccg->funcs->dccg_read_reg_state(dccg, out_data->dccg_reg_state[i]);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
index 4b90b781c4f2..40afbbfb5b9c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h
@@ -29,6 +29,7 @@
#include "hw_sequencer_private.h"
struct dc;
+struct dc_underflow_debug_data;
void dcn30_init_hw(struct dc *dc);
void dcn30_program_all_writeback_pipes_in_tree(
@@ -98,4 +99,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
void dcn30_wait_for_all_pending_updates(const struct pipe_ctx *pipe_ctx);
+void dcn30_get_underflow_debug_data(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
+
#endif /* __DC_HWSS_DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 2ac5d54d1626..d7ff55669bac 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
@@ -110,6 +110,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.is_abm_supported = dcn21_is_abm_supported,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 8ba934b83957..d1ecdb92b072 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -45,7 +45,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "dcn21/dcn21_hwseq.h"
#include "inc/link_enc_cfg.h"
@@ -710,7 +710,8 @@ bool dcn31_set_backlight_level(struct pipe_ctx *pipe_ctx,
panel_cntl->inst,
panel_cntl->pwrseq_inst);
- dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
+ if (backlight_level_params->control_type != BACKLIGHT_CONTROL_AMD_AUX)
+ dmub_abm_set_backlight(dc, backlight_level_params, panel_cntl->inst);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 556f4fe57eda..5a6a459da224 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
@@ -112,6 +112,7 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn31_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
index 560984533950..4ee6ed610de0 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -108,6 +108,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index 9f454fa90e65..79faab1125d4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
@@ -115,6 +115,7 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.calculate_pix_rate_divider = dcn314_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn31_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn314_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 416b1dca3dac..bf19ba65d09a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -49,7 +49,7 @@
#include "dcn20/dcn20_optc.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32/dcn32_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "../dcn20/dcn20_hwseq.h"
#include "dc_state_priv.h"
@@ -1052,7 +1052,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
}
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
@@ -1061,6 +1061,7 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index b971356d30b1..c19ef075c882 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -121,6 +121,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.program_outstanding_updates = dcn32_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn32_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 764eff6a4ec6..7aa0f452e8f7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -46,7 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn10/dcn10_hwseq.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -364,6 +364,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
@@ -816,8 +817,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context)
{
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dccg *dccg = dc->res_pool->dccg;
-
/* enable DCFCLK current DCHUB */
pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
@@ -825,7 +824,6 @@ void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
/* initialize HUBP on power up */
pipe_ctx->plane_res.hubp->funcs->hubp_init(pipe_ctx->plane_res.hubp);
/*make sure DPPCLK is on*/
- dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, true);
dpp->funcs->dpp_dppclk_control(dpp, false, true);
/* make sure OPP_PIPE_CLOCK_EN = 1 */
pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
@@ -859,7 +857,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
- struct dccg *dccg = dc->res_pool->dccg;
dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
@@ -878,7 +875,6 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->funcs->hubp_clk_cntl(hubp, false);
dpp->funcs->dpp_dppclk_control(dpp, false, false);
- dccg->funcs->dccg_root_gate_disable_control(dccg, dpp->inst, false);
hubp->power_gated = true;
@@ -1592,3 +1588,141 @@ void dcn35_hardware_release(struct dc *dc)
if (dc->hwss.hw_block_power_up)
dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
+
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ if (!dc_dmub_srv_is_cursor_offload_enabled(dc))
+ return;
+
+ /*
+ * Insert a blank update to modify the write index and set pipe_mask to 0.
+ *
+ * While the DMU is interlocked with driver full pipe programming via
+ * the DMU HW lock, if the cursor update begins to execute after a full
+ * pipe programming occurs there are two possible issues:
+ *
+ * 1. Outdated cursor information is programmed, replacing the current update
+ * 2. The cursor update in firmware holds the cursor lock, preventing
+ * the current update from being latched atomically in the same frame
+ * as the rest of the update.
+ *
+ * This blank update, treated as a no-op, will allow the firmware to skip
+ * the programming.
+ */
+
+ if (dc->hwss.begin_cursor_offload_update)
+ dc->hwss.begin_cursor_offload_update(dc, pipe);
+
+ if (dc->hwss.commit_cursor_offload_update)
+ dc->hwss.commit_cursor_offload_update(dc, pipe);
+}
+
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_start = write_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = true;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = true;
+}
+
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ volatile struct dmub_shared_state_cursor_offload_stream_v1 *shared_stream;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (pipe->plane_res.hubp)
+ pipe->plane_res.hubp->cursor_offload = false;
+
+ if (pipe->plane_res.dpp)
+ pipe->plane_res.dpp->cursor_offload = false;
+
+ if (!top_pipe)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ shared_stream = &dc->ctx->dmub_srv->dmub->shared_state[DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1]
+ .data.cursor_offload_v1.offload_streams[stream_idx];
+
+ shared_stream->last_write_idx = write_idx;
+
+ cs->offload_streams[stream_idx].write_idx = write_idx;
+ cs->offload_streams[stream_idx].payloads[payload_idx].write_idx_finish = write_idx;
+}
+
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn30_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn30;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS = dpp->att.fp_scale_bias.bits.fp_bias;
+ p->CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE = dpp->att.fp_scale_bias.bits.fp_scale;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream)
+{
+ dc_dmub_srv_control_cursor_offload(dc, context, stream, true);
+}
+
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ dc_dmub_srv_program_cursor_now(dc, pipe);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index 0b1d6f608edd..1ff41dba556c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -101,4 +101,12 @@ bool dcn35_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx);
void dcn35_hardware_release(struct dc *dc);
+void dcn35_abort_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_begin_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_commit_cursor_offload_update(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+void dcn35_notify_cursor_offload_drr_update(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+void dcn35_program_cursor_offload_now(struct dc *dc, const struct pipe_ctx *pipe);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index aefb7c473741..5a66c9db2670 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -86,6 +86,12 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.set_cursor_position = dcn10_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn35_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
@@ -127,6 +133,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.enable_plane = dcn20_enable_plane,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn35_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
index a580a55695c3..09e60158f0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -122,6 +122,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = {
.set_long_vtotal = dcn35_set_long_vblank,
.calculate_pix_rate_divider = dcn32_calculate_pix_rate_divider,
.setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn351_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index cc9f40d97af2..2fbc22afb89c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -25,10 +25,12 @@
#include "dpcd_defs.h"
#include "clk_mgr.h"
#include "dsc.h"
-#include "link.h"
+#include "link_service.h"
+#include "custom_float.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn10/dcn10_cm_common.h"
+#include "dcn10/dcn10_hubbub.h"
#include "dcn20/dcn20_optc.h"
#include "dcn30/dcn30_cm_common.h"
#include "dcn32/dcn32_hwseq.h"
@@ -36,6 +38,7 @@
#include "dcn401/dcn401_resource.h"
#include "dc_state_priv.h"
#include "link_enc_cfg.h"
+#include "../hw_sequencer.h"
#define DC_LOGGER_INIT(logger)
@@ -200,6 +203,9 @@ void dcn401_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@@ -810,9 +816,12 @@ enum dc_status dcn401_enable_stream_timing(
if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
- /* if we are borrowing from hblank, h_addressable needs to be adjusted */
- if (dc->debug.enable_hblank_borrow)
- patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow;
+ /* if we are padding, h_addressable needs to be adjusted */
+ if (dc->debug.enable_hblank_borrow) {
+ patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding;
+ patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding;
+ patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+ }
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
@@ -1378,30 +1387,30 @@ void dcn401_prepare_bandwidth(struct dc *dc,
false);
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
/* update timeout thresholds */
if (hubbub->funcs->program_arbiter) {
- dc->wm_optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
+ dc->optimized_required |= hubbub->funcs->program_arbiter(hubbub, &context->bw_ctx.bw.dcn.arb_regs, false);
}
/* decrease compbuf size */
if (hubbub->funcs->program_compbuf_segments) {
compbuf_size = context->bw_ctx.bw.dcn.arb_regs.compbuf_size;
- dc->wm_optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
+ dc->optimized_required |= (compbuf_size != dc->current_state->bw_ctx.bw.dcn.arb_regs.compbuf_size);
hubbub->funcs->program_compbuf_segments(hubbub, compbuf_size, false);
}
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, false);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
if (p_state_change_support != context->bw_ctx.bw.dcn.clk.p_state_change_support) {
@@ -1420,9 +1429,9 @@ void dcn401_optimize_bandwidth(
/* enable fams2 if needed */
if (dc->debug.fams2_config.bits.enable) {
- dcn401_fams2_global_control_lock(dc, context, true);
+ dcn401_dmub_hw_control_lock(dc, context, true);
dcn401_fams2_update_config(dc, context, true);
- dcn401_fams2_global_control_lock(dc, context, false);
+ dcn401_dmub_hw_control_lock(dc, context, false);
}
/* program dchubbub watermarks */
@@ -1461,14 +1470,17 @@ void dcn401_optimize_bandwidth(
}
}
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock)
{
/* use always for now */
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
- if (!dc->ctx || !dc->ctx->dmub_srv || !dc->debug.fams2_config.bits.enable)
+ if (!dc->ctx || !dc->ctx->dmub_srv)
+ return;
+
+ if (!dc->debug.fams2_config.bits.enable && !dc_dmub_srv_is_cursor_offload_enabled(dc))
return;
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1478,12 +1490,12 @@ void dcn401_fams2_global_control_lock(struct dc *dc,
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
}
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params)
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params)
{
- struct dc *dc = params->fams2_global_control_lock_fast_params.dc;
- bool lock = params->fams2_global_control_lock_fast_params.lock;
+ struct dc *dc = params->dmub_hw_control_lock_fast_params.dc;
+ bool lock = params->dmub_hw_control_lock_fast_params.lock;
- if (params->fams2_global_control_lock_fast_params.is_required) {
+ if (params->dmub_hw_control_lock_fast_params.is_required) {
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
@@ -1590,6 +1602,143 @@ void dcn401_update_odm(struct dc *dc, struct dc_state *context,
dc->hwseq->funcs.blank_pixel_data(dc, otg_master, true);
}
+static void dcn401_add_dsc_sequence_for_odm_change(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *old_pipe;
+ struct pipe_ctx *new_pipe;
+ struct pipe_ctx *old_opp_heads[MAX_PIPES];
+ struct pipe_ctx *old_otg_master;
+ int old_opp_head_count = 0;
+ int i;
+
+ old_otg_master = &dc->current_state->res_ctx.pipe_ctx[otg_master->pipe_idx];
+
+ if (resource_is_pipe_type(old_otg_master, OTG_MASTER)) {
+ old_opp_head_count = resource_get_opp_heads_for_otg_master(old_otg_master,
+ &dc->current_state->res_ctx,
+ old_opp_heads);
+ } else {
+ old_otg_master = NULL;
+ }
+
+ /* Process new DSC configuration if DSC is enabled */
+ if (otg_master->stream_res.dsc && otg_master->stream->timing.flags.DSC) {
+ struct dc_stream_state *stream = otg_master->stream;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int last_dsc_calc = 0;
+ bool should_use_dto_dscclk = (dc->res_pool->dccg->funcs->set_dto_dscclk != NULL) &&
+ stream->timing.pix_clk_100hz > 480000;
+
+ /* Count ODM pipes */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ int num_slices_h = stream->timing.dsc_cfg.num_slices_h / opp_cnt;
+
+ /* Step 1: Set DTO DSCCLK for main DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ otg_master->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Step 2: Calculate and set DSC config for main DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, otg_master, true, opp_cnt);
+
+ /* Step 3: Enable main DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, otg_master);
+
+ /* Step 4: Configure and enable ODM DSC blocks */
+ for (odm_pipe = otg_master->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ if (!odm_pipe->stream_res.dsc)
+ continue;
+
+ /* Set DTO DSCCLK for ODM DSC if needed */
+ if (should_use_dto_dscclk) {
+ hwss_add_dccg_set_dto_dscclk(seq_state, dc->res_pool->dccg,
+ odm_pipe->stream_res.dsc->inst, num_slices_h);
+ }
+
+ /* Calculate and set DSC config for ODM DSC */
+ last_dsc_calc = *seq_state->num_steps;
+ hwss_add_dsc_calculate_and_set_config(seq_state, odm_pipe, true, opp_cnt);
+
+ /* Enable ODM DSC block */
+ hwss_add_dsc_enable_with_opp(seq_state, odm_pipe);
+ }
+
+ /* Step 5: Configure DSC in timing generator */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg,
+ &seq_state->steps[last_dsc_calc].params.dsc_calculate_and_set_config_params.dsc_optc_cfg, true);
+ } else if (otg_master->stream_res.dsc && !otg_master->stream->timing.flags.DSC) {
+ /* Disable DSC in OPTC */
+ hwss_add_tg_set_dsc_config(seq_state, otg_master->stream_res.tg, NULL, false);
+
+ hwss_add_dsc_disconnect(seq_state, otg_master->stream_res.dsc);
+ }
+
+ /* Disable DSC for old pipes that no longer need it */
+ if (old_otg_master && old_otg_master->stream_res.dsc) {
+ for (i = 0; i < old_opp_head_count; i++) {
+ old_pipe = old_opp_heads[i];
+ new_pipe = &context->res_ctx.pipe_ctx[old_pipe->pipe_idx];
+
+ /* If old pipe had DSC but new pipe doesn't, disable the old DSC */
+ if (old_pipe->stream_res.dsc && !new_pipe->stream_res.dsc) {
+ /* Then disconnect DSC block */
+ hwss_add_dsc_disconnect(seq_state, old_pipe->stream_res.dsc);
+ }
+ }
+ }
+}
+
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state)
+{
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ int opp_inst[MAX_PIPES] = {0};
+ int opp_head_count;
+ int odm_slice_width = resource_get_odm_slice_dst_width(otg_master, false);
+ int last_odm_slice_width = resource_get_odm_slice_dst_width(otg_master, true);
+ int i;
+
+ opp_head_count = resource_get_opp_heads_for_otg_master(
+ otg_master, &context->res_ctx, opp_heads);
+
+ for (i = 0; i < opp_head_count; i++)
+ opp_inst[i] = opp_heads[i]->stream_res.opp->inst;
+
+ /* Add ODM combine/bypass operation to sequence */
+ if (opp_head_count > 1) {
+ hwss_add_optc_set_odm_combine(seq_state, otg_master->stream_res.tg, opp_inst,
+ opp_head_count, odm_slice_width, last_odm_slice_width);
+ } else {
+ hwss_add_optc_set_odm_bypass(seq_state, otg_master->stream_res.tg, &otg_master->stream->timing);
+ }
+
+ /* Add OPP operations to sequence */
+ for (i = 0; i < opp_head_count; i++) {
+ /* Add OPP pipe clock control operation */
+ hwss_add_opp_pipe_clock_control(seq_state, opp_heads[i]->stream_res.opp, true);
+
+ /* Add OPP program left edge extra pixel operation */
+ hwss_add_opp_program_left_edge_extra_pixel(seq_state, opp_heads[i]->stream_res.opp,
+ opp_heads[i]->stream->timing.pixel_encoding, resource_is_pipe_type(opp_heads[i], OTG_MASTER));
+ }
+
+ /* Add DSC update operations to sequence */
+ dcn401_add_dsc_sequence_for_odm_change(dc, context, otg_master, seq_state);
+
+ /* Add blank pixel data operation if needed */
+ if (!resource_is_pipe_type(otg_master, DPP_PIPE)) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(
+ dc, otg_master, true, seq_state);
+ }
+}
+
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
@@ -1619,20 +1768,28 @@ void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx,
void dcn401_hardware_release(struct dc *dc)
{
- dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
-
- /* If pstate unsupported, or still supported
- * by firmware, force it supported by dcn
- */
- if (dc->current_state) {
- if ((!dc->clk_mgr->clks.p_state_change_support ||
- dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
- dc->res_pool->hubbub->funcs->force_pstate_change_control)
- dc->res_pool->hubbub->funcs->force_pstate_change_control(
- dc->res_pool->hubbub, true, true);
-
- dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
- dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ if (!dc->debug.disable_force_pstate_allow_on_hw_release) {
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
+
+ /* If pstate unsupported, or still supported
+ * by firmware, force it supported by dcn
+ */
+ if (dc->current_state) {
+ if ((!dc->clk_mgr->clks.p_state_change_support ||
+ dc->current_state->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) &&
+ dc->res_pool->hubbub->funcs->force_pstate_change_control)
+ dc->res_pool->hubbub->funcs->force_pstate_change_control(
+ dc->res_pool->hubbub, true, true);
+
+ dc->current_state->bw_ctx.bw.dcn.clk.p_state_change_support = true;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ } else {
+ if (dc->current_state) {
+ dc->clk_mgr->clks.p_state_change_support = false;
+ dc->clk_mgr->funcs->update_clocks(dc->clk_mgr, dc->current_state, true);
+ }
+ dc_dmub_srv_fams2_update_config(dc, dc->current_state, false);
}
}
@@ -2019,10 +2176,8 @@ void dcn401_program_pipe(
* updating on slave planes
*/
if (pipe_ctx->update_flags.bits.enable ||
- pipe_ctx->update_flags.bits.plane_changed ||
- pipe_ctx->stream->update_flags.bits.out_tf ||
- (pipe_ctx->plane_state &&
- pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf)
hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
/* If the pipe has been enabled or has a different opp, we
@@ -2072,6 +2227,157 @@ void dcn401_program_pipe(
}
}
+/*
+ * dcn401_program_pipe_sequence - Sequence-based version of dcn401_program_pipe
+ *
+ * This function creates a sequence-based version of the original dcn401_program_pipe
+ * function. Instead of directly calling hardware programming functions, it appends
+ * sequence steps to the provided block_sequence array that can later be executed
+ * as part of hwss_execute_sequence.
+ *
+ */
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ /* Only need to unblank on top pipe */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER)) {
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.odm ||
+ pipe_ctx->stream->update_flags.bits.abm_level) {
+ if (dc->hwseq->funcs.blank_pixel_data_sequence)
+ dc->hwseq->funcs.blank_pixel_data_sequence(dc, pipe_ctx,
+ !pipe_ctx->plane_state || !pipe_ctx->plane_state->visible,
+ seq_state);
+ }
+ }
+
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
+
+ /* Step 1: Program global sync */
+ hwss_add_tg_program_global_sync(seq_state, pipe_ctx->stream_res.tg,
+ dcn401_calculate_vready_offset_for_group(pipe_ctx),
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vstartup_lines,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_offset_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.vupdate_vupdate_width_pixels,
+ (unsigned int)pipe_ctx->global_sync.dcn4x.pstate_keepout_start_lines);
+
+ /* Step 2: Wait for VACTIVE state (if not phantom pipe) */
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ hwss_add_tg_wait_for_state(seq_state, pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ /* Step 3: Set VTG params */
+ hwss_add_tg_set_vtg_params(seq_state, pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
+
+ /* Step 4: Setup vupdate interrupt (if available) */
+ if (hws->funcs.setup_vupdate_interrupt)
+ dcn401_setup_vupdate_interrupt_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.odm) {
+ if (hws->funcs.update_odm_sequence)
+ hws->funcs.update_odm_sequence(dc, context, pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.enable) {
+ if (dc->hwss.enable_plane_sequence)
+ dc->hwss.enable_plane_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->update_flags.bits.det_size) {
+ if (dc->res_pool->hubbub->funcs->program_det_size) {
+ hwss_add_hubp_program_det_size(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+ }
+
+ if (dc->res_pool->hubbub->funcs->program_det_segments) {
+ hwss_add_hubp_program_det_segments(seq_state, dc->res_pool->hubbub,
+ pipe_ctx->plane_res.hubp->inst, pipe_ctx->hubp_regs.det_size);
+ }
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.raw ||
+ pipe_ctx->plane_state->update_flags.raw ||
+ pipe_ctx->stream->update_flags.raw)) {
+
+ if (dc->hwss.update_dchubp_dpp_sequence)
+ dc->hwss.update_dchubp_dpp_sequence(dc, pipe_ctx, context, seq_state);
+ }
+
+ if (pipe_ctx->plane_state && (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->plane_state->update_flags.bits.hdr_mult)) {
+
+ hws->funcs.set_hdr_multiplier_sequence(pipe_ctx, seq_state);
+ }
+
+ if (pipe_ctx->plane_state &&
+ (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+ pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+ pipe_ctx->plane_state->update_flags.bits.lut_3d ||
+ pipe_ctx->update_flags.bits.enable)) {
+
+ hwss_add_dpp_set_input_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->plane_state);
+ }
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+ * only do gamma programming for powering on, internal memcmp to avoid
+ * updating on slave planes
+ */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf) {
+ hwss_add_dpp_set_output_transfer_func(seq_state, dc, pipe_ctx, pipe_ctx->stream);
+ }
+
+ /* If the pipe has been enabled or has a different opp, we
+ * should reprogram the fmt. This deals with cases where
+ * interation between mpc and odm combine on different streams
+ * causes a different pipe to be chosen to odm combine with.
+ */
+ if (pipe_ctx->update_flags.bits.enable
+ || pipe_ctx->update_flags.bits.opp_changed) {
+
+ hwss_add_opp_set_dyn_expansion(seq_state, pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601,
+ pipe_ctx->stream->timing.display_color_depth, pipe_ctx->stream->signal);
+
+ hwss_add_opp_program_fmt(seq_state, pipe_ctx->stream_res.opp,
+ &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping);
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ hwss_add_abm_set_level(seq_state, pipe_ctx->stream_res.abm, pipe_ctx->stream->abm_level);
+ }
+ }
+
+ if (pipe_ctx->update_flags.bits.test_pattern_changed) {
+ struct output_pixel_processor *odm_opp = pipe_ctx->stream_res.opp;
+
+ hwss_add_opp_program_bit_depth_reduction(seq_state, odm_opp, true, pipe_ctx);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_opp,
+ pipe_ctx->stream_res.test_pattern_params.test_pattern,
+ pipe_ctx->stream_res.test_pattern_params.color_space,
+ pipe_ctx->stream_res.test_pattern_params.color_depth,
+ (struct tg_color){0},
+ false,
+ pipe_ctx->stream_res.test_pattern_params.width,
+ pipe_ctx->stream_res.test_pattern_params.height,
+ pipe_ctx->stream_res.test_pattern_params.offset);
+ }
+
+}
+
void dcn401_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
@@ -2149,7 +2455,6 @@ void dcn401_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
-
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -2228,11 +2533,11 @@ void dcn401_program_front_end_for_ctx(
/* Avoid underflow by check of pipe line read when adding 2nd plane. */
if (hws->wa.wait_hubpret_read_start_during_mpo_transition &&
- !pipe->top_pipe &&
- pipe->stream &&
- pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
- dc->current_state->stream_status[0].plane_count == 1 &&
- context->stream_status[0].plane_count > 1) {
+ !pipe->top_pipe &&
+ pipe->stream &&
+ pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start &&
+ dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
}
@@ -2344,7 +2649,6 @@ void dcn401_post_unlock_program_front_end(
*/
if (hwseq->funcs.update_force_pstate)
dc->hwseq->funcs.update_force_pstate(dc, context);
-
/* Only program the MALL registers after all the main and phantom pipes
* are done programming.
*/
@@ -2658,3 +2962,1084 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
}
+
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe)
+{
+ volatile struct dmub_cursor_offload_v1 *cs = dc->ctx->dmub_srv->dmub->cursor_offload_v1;
+ const struct pipe_ctx *top_pipe = resource_get_otg_master(pipe);
+ const struct hubp *hubp = pipe->plane_res.hubp;
+ const struct dpp *dpp = pipe->plane_res.dpp;
+ volatile struct dmub_cursor_offload_pipe_data_dcn401_v1 *p;
+ uint32_t stream_idx, write_idx, payload_idx;
+
+ if (!top_pipe || !hubp || !dpp)
+ return;
+
+ stream_idx = top_pipe->pipe_idx;
+ write_idx = cs->offload_streams[stream_idx].write_idx + 1; /* new payload (+1) */
+ payload_idx = write_idx % ARRAY_SIZE(cs->offload_streams[stream_idx].payloads);
+
+ p = &cs->offload_streams[stream_idx].payloads[payload_idx].pipe_data[pipe->pipe_idx].dcn401;
+
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS = hubp->att.SURFACE_ADDR;
+ p->CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH = hubp->att.SURFACE_ADDR_HIGH;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH = hubp->att.size.bits.width;
+ p->CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT = hubp->att.size.bits.height;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION = hubp->pos.position.bits.x_pos;
+ p->CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION = hubp->pos.position.bits.y_pos;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X = hubp->pos.hot_spot.bits.x_hot;
+ p->CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y = hubp->pos.hot_spot.bits.y_hot;
+ p->CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET = hubp->pos.dst_offset.bits.dst_x_offset;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE = hubp->pos.cur_ctl.bits.cur_enable;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE = hubp->att.cur_ctl.bits.mode;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY = hubp->pos.cur_ctl.bits.cur_2x_magnify;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH = hubp->att.cur_ctl.bits.pitch;
+ p->CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK = hubp->att.cur_ctl.bits.line_per_chunk;
+
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE = dpp->att.cur0_ctl.bits.cur0_enable;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_MODE = dpp->att.cur0_ctl.bits.mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE = dpp->att.cur0_ctl.bits.expansion_mode;
+ p->CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN = dpp->att.cur0_ctl.bits.cur0_rom_en;
+ p->CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 = 0x000000;
+ p->CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 = 0xFFFFFF;
+
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_bias_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y =
+ dpp->att.fp_scale_bias_g_y.bits.fp_scale_g_y;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_bias_rb_crcb;
+ p->CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB =
+ dpp->att.fp_scale_bias_rb_crcb.bits.fp_scale_rb_crcb;
+
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET = hubp->att.settings.bits.dst_y_offset;
+ p->HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST = hubp->att.settings.bits.chunk_hdl_adjust;
+ p->HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR = hubp->use_mall_for_cursor;
+
+ cs->offload_streams[stream_idx].payloads[payload_idx].pipe_mask |= (1u << pipe->pipe_idx);
+}
+
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ /* Check and set DC_IP_REQUEST_CNTL if needed */
+ if (REG(DC_IP_REQUEST_CNTL)) {
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+ if (org_ip_request_cntl == 0)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+ }
+
+ /* DPP power gating control */
+ hwss_add_dpp_pg_control(seq_state, hws, dpp->inst, false);
+
+ /* HUBP power gating control */
+ hwss_add_hubp_pg_control(seq_state, hws, hubp->inst, false);
+
+ /* HUBP reset */
+ hwss_add_hubp_reset(seq_state, hubp);
+
+ /* DPP reset */
+ hwss_add_dpp_reset(seq_state, dpp);
+
+ /* Restore DC_IP_REQUEST_CNTL if it was originally 0 */
+ if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL))
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ DC_LOG_DEBUG("Power gated front end %d\n", hubp->inst);
+
+ /* DPP root clock control */
+ hwss_add_dpp_root_clock_control(seq_state, hws, dpp->inst, false);
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync using block sequence */
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+
+ mpc_tree_params = &(opp->mpc_tree_params);
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+
+ /*Already reset*/
+ if (mpcc_to_remove == NULL)
+ return;
+
+ /* Step 1: Remove MPCC from MPC tree */
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, mpcc_to_remove);
+
+ // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
+ // so don't wait for MPCC_IDLE in the programming sequence
+ if (dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM) {
+ /* Step 2: Set MPCC disconnect pending flag */
+ hwss_add_opp_set_mpcc_disconnect_pending(seq_state, opp, pipe_ctx->plane_res.mpcc_inst, true);
+ }
+
+ /* Step 3: Set optimized required flag */
+ hwss_add_dc_set_optimized_required(seq_state, dc, true);
+
+ /* Step 4: Disconnect HUBP if function exists */
+ if (hubp->funcs->hubp_disconnect)
+ hwss_add_hubp_disconnect(seq_state, hubp);
+
+ /* Step 5: Verify pstate change high if debug sanity checks are enabled */
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state)
+{
+ struct tg_color black_color = {0};
+ struct stream_resource *stream_res = &pipe_ctx->stream_res;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_color_space color_space = stream->output_color_space;
+ enum controller_dp_test_pattern test_pattern = CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR;
+ enum controller_dp_color_space test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ struct pipe_ctx *odm_pipe;
+ struct rect odm_slice_src;
+
+ if (stream->link->test_pattern_enabled)
+ return;
+
+ /* get opp dpg blank color */
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ if (blank) {
+ /* Set ABM immediate disable */
+ hwss_add_abm_set_immediate_disable(seq_state, dc, pipe_ctx);
+
+ if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ test_pattern_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ }
+ } else {
+ test_pattern = CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ }
+
+ odm_pipe = pipe_ctx;
+
+ /* Set display pattern generator for all ODM pipes */
+ while (odm_pipe->next_odm_pipe) {
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ /* Set display pattern generator for final ODM pipe */
+ odm_slice_src = resource_get_odm_slice_src_rect(odm_pipe);
+
+ hwss_add_opp_set_disp_pattern_generator(seq_state,
+ odm_pipe->stream_res.opp,
+ test_pattern,
+ test_pattern_color_space,
+ stream->timing.display_color_depth,
+ black_color,
+ true,
+ odm_slice_src.width,
+ odm_slice_src.height,
+ odm_slice_src.x);
+
+ /* Handle ABM level setting when not blanking */
+ if (!blank) {
+ if (stream_res->abm) {
+ /* Set pipe for ABM */
+ hwss_add_abm_set_pipe(seq_state, dc, pipe_ctx);
+
+ /* Set ABM level */
+ hwss_add_abm_set_level(seq_state, stream_res->abm, stream->abm_level);
+ }
+ }
+}
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ int i_wb, i_pipe;
+
+ if (!stream || stream->num_wb_info > dc->res_pool->res_cap->num_dwb)
+ return;
+
+ /* For each writeback pipe */
+ for (i_wb = 0; i_wb < stream->num_wb_info; i_wb++) {
+ /* Get direct pointer to writeback info */
+ struct dc_writeback_info *wb_info = (struct dc_writeback_info *)&stream->writeback_info[i_wb];
+ int mpcc_inst = -1;
+
+ if (wb_info->wb_enabled) {
+ /* Get the MPCC instance for writeback_source_plane */
+ for (i_pipe = 0; i_pipe < dc->res_pool->pipe_count; i_pipe++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i_pipe];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ if (pipe_ctx->plane_state == wb_info->writeback_source_plane) {
+ mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
+ break;
+ }
+ }
+
+ if (mpcc_inst == -1) {
+ /* Disable writeback pipe and disconnect from MPCC
+ * if source plane has been removed
+ */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ continue;
+ }
+
+ ASSERT(wb_info->dwb_pipe_inst < dc->res_pool->res_cap->num_dwb);
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* Writeback pipe already enabled, only need to update */
+ dcn401_update_writeback_sequence(dc, wb_info, context, seq_state);
+ } else {
+ /* Enable writeback pipe and connect to MPCC */
+ dcn401_enable_writeback_sequence(dc, wb_info, context, mpcc_inst, seq_state);
+ }
+ } else {
+ /* Disable writeback pipe and disconnect from MPCC */
+ dcn401_disable_writeback_sequence(dc, wb_info, seq_state);
+ }
+ }
+}
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update DWBC with new parameters */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Configure MCIF_WB buffer settings */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+
+ /* Configure MCIF_WB arbitration */
+ hwss_add_mcif_wb_config_arb(seq_state, mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]);
+
+ /* Enable MCIF_WB */
+ hwss_add_mcif_wb_enable(seq_state, mcif_wb);
+
+ /* Set DWB MUX to connect writeback to MPCC */
+ hwss_add_mpc_set_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst, mpcc_inst);
+
+ /* Enable DWBC */
+ hwss_add_dwbc_enable(seq_state, dwb, &wb_info->dwb_params);
+}
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Disable DWBC */
+ hwss_add_dwbc_disable(seq_state, dwb);
+
+ /* Disable DWB MUX */
+ hwss_add_mpc_disable_dwb_mux(seq_state, dc->res_pool->mpc, wb_info->dwb_pipe_inst);
+
+ /* Disable MCIF_WB */
+ hwss_add_mcif_wb_disable(seq_state, mcif_wb);
+}
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dwbc *dwb;
+ struct mcif_wb *mcif_wb;
+
+ if (!wb_info->wb_enabled || wb_info->dwb_pipe_inst >= dc->res_pool->res_cap->num_dwb)
+ return;
+
+ dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+ mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst];
+
+ /* Update writeback pipe */
+ hwss_add_dwbc_update(seq_state, dwb, &wb_info->dwb_params);
+
+ /* Update MCIF_WB buffer settings if needed */
+ hwss_add_mcif_wb_config_buf(seq_state, mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height);
+}
+
+static int find_free_gsl_group(const struct dc *dc)
+{
+ if (dc->res_pool->gsl_groups.gsl_0 == 0)
+ return 1;
+ if (dc->res_pool->gsl_groups.gsl_1 == 0)
+ return 2;
+ if (dc->res_pool->gsl_groups.gsl_2 == 0)
+ return 3;
+
+ return 0;
+}
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state)
+{
+ struct gsl_params gsl;
+ int group_idx;
+
+ memset(&gsl, 0, sizeof(struct gsl_params));
+
+ if (enable) {
+ /* return if group already assigned since GSL was set up
+ * for vsync flip, we would unassign so it can't be "left over"
+ */
+ if (pipe_ctx->stream_res.gsl_group > 0)
+ return;
+
+ group_idx = find_free_gsl_group(dc);
+ ASSERT(group_idx != 0);
+ pipe_ctx->stream_res.gsl_group = group_idx;
+
+ /* set gsl group reg field and mark resource used */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 1;
+ dc->res_pool->gsl_groups.gsl_0 = 1;
+ break;
+ case 2:
+ gsl.gsl1_en = 1;
+ dc->res_pool->gsl_groups.gsl_1 = 1;
+ break;
+ case 3:
+ gsl.gsl2_en = 1;
+ dc->res_pool->gsl_groups.gsl_2 = 1;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return; // invalid case
+ }
+ gsl.gsl_master_en = 1;
+ } else {
+ group_idx = pipe_ctx->stream_res.gsl_group;
+ if (group_idx == 0)
+ return; // if not in use, just return
+
+ pipe_ctx->stream_res.gsl_group = 0;
+
+ /* unset gsl group reg field and mark resource free */
+ switch (group_idx) {
+ case 1:
+ gsl.gsl0_en = 0;
+ dc->res_pool->gsl_groups.gsl_0 = 0;
+ break;
+ case 2:
+ gsl.gsl1_en = 0;
+ dc->res_pool->gsl_groups.gsl_1 = 0;
+ break;
+ case 3:
+ gsl.gsl2_en = 0;
+ dc->res_pool->gsl_groups.gsl_2 = 0;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ gsl.gsl_master_en = 0;
+ }
+
+ hwss_add_tg_set_gsl(seq_state, pipe_ctx->stream_res.tg, gsl);
+ hwss_add_tg_set_gsl_source_select(seq_state, pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0);
+}
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
+ if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
+ return;
+
+ /* Wait for MPCC disconnect */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, pipe_ctx, seq_state);
+
+ /* In flip immediate with pipe splitting case GSL is used for synchronization
+ * so we must disable it when the plane is disabled.
+ */
+ if (pipe_ctx->stream_res.gsl_group != 0)
+ dcn401_setup_gsl_group_as_lock_sequence(dc, pipe_ctx, false, seq_state);
+
+ /* Update HUBP mall sel */
+ if (pipe_ctx->plane_res.hubp && pipe_ctx->plane_res.hubp->funcs->hubp_update_mall_sel)
+ hwss_add_hubp_update_mall_sel(seq_state, pipe_ctx->plane_res.hubp, 0, false);
+
+ /* Set flip control GSL */
+ hwss_add_hubp_set_flip_control_gsl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* HUBP clock control */
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, false);
+
+ /* DPP clock control */
+ hwss_add_dpp_dppclk_control(seq_state, pipe_ctx->plane_res.dpp, false, false);
+
+ /* Plane atomic power down */
+ if (dc->hwseq->funcs.plane_atomic_power_down_sequence)
+ dc->hwseq->funcs.plane_atomic_power_down_sequence(dc, pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp, seq_state);
+
+ pipe_ctx->stream = NULL;
+ memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
+ memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
+ pipe_ctx->top_pipe = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ pipe_ctx->prev_odm_pipe = NULL;
+ pipe_ctx->next_odm_pipe = NULL;
+ pipe_ctx->plane_state = NULL;
+
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled */
+ if (is_phantom && tg && tg->funcs->disable_phantom_crtc)
+ hwss_add_disable_phantom_crtc(seq_state, tg);
+}
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state)
+{
+ struct display_stream_compressor *dsc = opp_head->stream_res.dsc;
+ struct dccg *dccg = dc->res_pool->dccg;
+
+ /* Wait for all DPP pipes in current mpc blending tree completes double
+ * buffered disconnection before resetting OPP
+ */
+ if (dc->hwss.wait_for_mpcc_disconnect_sequence)
+ dc->hwss.wait_for_mpcc_disconnect_sequence(dc, dc->res_pool, opp_head, seq_state);
+
+ if (dsc) {
+ bool *is_ungated = NULL;
+ /* Check DSC power gate status */
+ if (dc->hwseq && dc->hwseq->funcs.dsc_pg_status)
+ hwss_add_dsc_pg_status(seq_state, dc->hwseq, dsc->inst, false);
+
+ /* Seamless update specific where we will postpone non
+ * double buffered DSCCLK disable logic in post unlock
+ * sequence after DSC is disconnected from OPP but not
+ * yet power gated.
+ */
+
+ /* DSC wait disconnect pending clear */
+ hwss_add_dsc_wait_disconnect_pending_clear(seq_state, dsc, is_ungated);
+
+ /* DSC disable */
+ hwss_add_dsc_disable(seq_state, dsc, is_ungated);
+
+ /* Set reference DSCCLK */
+ if (dccg && dccg->funcs->set_ref_dscclk)
+ hwss_add_dccg_set_ref_dscclk(seq_state, dccg, dsc->inst, 0);
+ }
+}
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, enable ? 1 : 0);
+}
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ uint32_t org_ip_request_cntl = 0;
+
+ if (!pipe_ctx->plane_res.dpp || !pipe_ctx->plane_res.hubp || !pipe_ctx->stream_res.opp)
+ return;
+
+ if (REG(DC_IP_REQUEST_CNTL))
+ REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
+
+ /* Step 1: DPP root clock control - enable clock */
+ if (hws->funcs.dpp_root_clock_control)
+ hwss_add_dpp_root_clock_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 2: Enable DC IP request (if needed) */
+ if (hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, true);
+
+ /* Step 3: DPP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.dpp_pg_control)
+ hwss_add_dpp_pg_control(seq_state, hws, pipe_ctx->plane_res.dpp->inst, true);
+
+ /* Step 4: HUBP power gating control - power on */
+ if (REG(DC_IP_REQUEST_CNTL) && hws->funcs.hubp_pg_control)
+ hwss_add_hubp_pg_control(seq_state, hws, pipe_ctx->plane_res.hubp->inst, true);
+
+ /* Step 5: Disable DC IP request (restore state) */
+ if (org_ip_request_cntl == 0 && hws->funcs.dc_ip_request_cntl)
+ hwss_add_dc_ip_request_cntl(seq_state, dc, false);
+
+ /* Step 6: HUBP clock control - enable DCFCLK */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl)
+ hwss_add_hubp_clk_cntl(seq_state, pipe_ctx->plane_res.hubp, true);
+
+ /* Step 7: HUBP initialization */
+ if (pipe_ctx->plane_res.hubp->funcs->hubp_init)
+ hwss_add_hubp_init(seq_state, pipe_ctx->plane_res.hubp);
+
+ /* Step 8: OPP pipe clock control - enable */
+ if (pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control)
+ hwss_add_opp_pipe_clock_control(seq_state, pipe_ctx->stream_res.opp, true);
+
+ /* Step 9: VM system aperture settings */
+ if (dc->vm_pa_config.valid && pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings) {
+ hwss_add_hubp_set_vm_system_aperture_settings(seq_state, pipe_ctx->plane_res.hubp, 0,
+ dc->vm_pa_config.system_aperture.start_addr, dc->vm_pa_config.system_aperture.end_addr);
+ }
+
+ /* Step 10: Flip interrupt setup */
+ if (!pipe_ctx->top_pipe
+ && pipe_ctx->plane_state
+ && pipe_ctx->plane_state->flip_int_enabled
+ && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int) {
+ hwss_add_hubp_set_flip_int(seq_state, pipe_ctx->plane_res.hubp);
+ }
+}
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dccg *dccg = dc->res_pool->dccg;
+ bool viewport_changed = false;
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
+
+ if (!hubp || !dpp || !plane_state)
+ return;
+
+ /* Step 1: DPP DPPCLK control */
+ if (pipe_ctx->update_flags.bits.dppclk)
+ hwss_add_dpp_dppclk_control(seq_state, dpp, false, true);
+
+ /* Step 2: DCCG update DPP DTO */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_dccg_update_dpp_dto(seq_state, dccg, dpp->inst, pipe_ctx->plane_res.bw.dppclk_khz);
+
+ /* Step 3: HUBP VTG selection */
+ if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) {
+ hwss_add_hubp_vtg_sel(seq_state, hubp, pipe_ctx->stream_res.tg->inst);
+
+ /* Step 4: HUBP setup (choose setup2 or setup) */
+ if (hubp->funcs->hubp_setup2) {
+ hwss_add_hubp_setup2(seq_state, hubp, &pipe_ctx->hubp_regs,
+ &pipe_ctx->global_sync, &pipe_ctx->stream->timing);
+ } else if (hubp->funcs->hubp_setup) {
+ hwss_add_hubp_setup(seq_state, hubp, &pipe_ctx->dlg_regs,
+ &pipe_ctx->ttu_regs, &pipe_ctx->rq_regs, &pipe_ctx->pipe_dlg_param);
+ }
+ }
+
+ /* Step 5: Set unbounded requesting */
+ if (pipe_ctx->update_flags.bits.unbounded_req && hubp->funcs->set_unbounded_requesting)
+ hwss_add_hubp_set_unbounded_requesting(seq_state, hubp, pipe_ctx->unbounded_req);
+
+ /* Step 6: HUBP interdependent setup */
+ if (pipe_ctx->update_flags.bits.hubp_interdependent) {
+ if (hubp->funcs->hubp_setup_interdependent2)
+ hwss_add_hubp_setup_interdependent2(seq_state, hubp, &pipe_ctx->hubp_regs);
+ else if (hubp->funcs->hubp_setup_interdependent)
+ hwss_add_hubp_setup_interdependent(seq_state, hubp, &pipe_ctx->dlg_regs, &pipe_ctx->ttu_regs);
+ }
+
+ /* Step 7: DPP setup - input CSC and format setup */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.input_csc_change ||
+ plane_state->update_flags.bits.color_space_change ||
+ plane_state->update_flags.bits.coeff_reduction_change) {
+ hwss_add_dpp_setup_dpp(seq_state, pipe_ctx);
+
+ /* Step 8: DPP cursor matrix setup */
+ if (dpp->funcs->set_cursor_matrix) {
+ hwss_add_dpp_set_cursor_matrix(seq_state, dpp, plane_state->color_space,
+ &plane_state->cursor_csc_color_matrix);
+ }
+
+ /* Step 9: DPP program bias and scale */
+ if (dpp->funcs->dpp_program_bias_and_scale)
+ hwss_add_dpp_program_bias_and_scale(seq_state, pipe_ctx);
+ }
+
+ /* Step 10: MPCC updates */
+ if (pipe_ctx->update_flags.bits.mpcc ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.global_alpha_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change) {
+
+ /* Check if update_mpcc_sequence is implemented and prefer it over single MPC_UPDATE_MPCC step */
+ if (hws->funcs.update_mpcc_sequence)
+ hws->funcs.update_mpcc_sequence(dc, pipe_ctx, seq_state);
+ }
+
+ /* Step 11: DPP scaler setup */
+ if (pipe_ctx->update_flags.bits.scaler ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.position_change ||
+ plane_state->update_flags.bits.per_pixel_alpha_change ||
+ pipe_ctx->stream->update_flags.bits.scaling) {
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
+ ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
+ hwss_add_dpp_set_scaler(seq_state, pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
+ }
+
+ /* Step 12: HUBP viewport programming */
+ if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
+ (context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
+ (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
+ hwss_add_hubp_mem_program_viewport(seq_state, hubp,
+ &pipe_ctx->plane_res.scl_data.viewport, &pipe_ctx->plane_res.scl_data.viewport_c);
+ viewport_changed = true;
+ }
+
+ /* Step 13: HUBP program mcache if available */
+ if (hubp->funcs->hubp_program_mcache_id_and_split_coordinate)
+ hwss_add_hubp_program_mcache_id(seq_state, hubp, &pipe_ctx->mcache_regs);
+
+ /* Step 14: Cursor attribute setup */
+ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.scaler || viewport_changed == true) &&
+ pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
+
+ hwss_add_abort_cursor_offload_update(seq_state, dc, pipe_ctx);
+
+ hwss_add_set_cursor_attribute(seq_state, dc, pipe_ctx);
+
+ /* Step 15: Cursor position setup */
+ hwss_add_set_cursor_position(seq_state, dc, pipe_ctx);
+
+ /* Step 16: Cursor SDR white level */
+ if (dc->hwss.set_cursor_sdr_white_level)
+ hwss_add_set_cursor_sdr_white_level(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 17: Gamut remap and output CSC */
+ if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.gamut_remap ||
+ plane_state->update_flags.bits.gamut_remap_change ||
+ pipe_ctx->stream->update_flags.bits.out_csc) {
+
+ /* Gamut remap */
+ hwss_add_dpp_program_gamut_remap(seq_state, pipe_ctx);
+
+ /* Output CSC */
+ hwss_add_program_output_csc(seq_state, dc, pipe_ctx, pipe_ctx->stream->output_color_space,
+ pipe_ctx->stream->csc_color_matrix.matrix, hubp->opp_id);
+ }
+
+ /* Step 18: HUBP surface configuration */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->update_flags.bits.opp_changed ||
+ plane_state->update_flags.bits.pixel_format_change ||
+ plane_state->update_flags.bits.horizontal_mirror_change ||
+ plane_state->update_flags.bits.rotation_change ||
+ plane_state->update_flags.bits.swizzle_change ||
+ plane_state->update_flags.bits.dcc_change ||
+ plane_state->update_flags.bits.bpp_change ||
+ plane_state->update_flags.bits.scaling_change ||
+ plane_state->update_flags.bits.plane_size_change) {
+ struct plane_size size = plane_state->plane_size;
+
+ size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
+ hwss_add_hubp_program_surface_config(seq_state, hubp,
+ plane_state->format, &plane_state->tiling_info, size,
+ plane_state->rotation, &plane_state->dcc,
+ plane_state->horizontal_mirror, 0);
+ hubp->power_gated = false;
+ }
+
+ /* Step 19: Update plane address (with SubVP support) */
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ plane_state->update_flags.bits.addr_update) {
+
+ /* SubVP save surface address if needed */
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_mall_type == SUBVP_MAIN) {
+ hwss_add_dmub_subvp_save_surf_addr(seq_state, dc->ctx->dmub_srv,
+ &pipe_ctx->plane_state->address, pipe_ctx->subvp_index);
+ }
+
+ /* Update plane address */
+ hwss_add_hubp_update_plane_addr(seq_state, dc, pipe_ctx);
+ }
+
+ /* Step 20: HUBP set blank - enable plane */
+ if (pipe_ctx->update_flags.bits.enable)
+ hwss_add_hubp_set_blank(seq_state, hubp, false);
+
+ /* Step 21: Phantom HUBP post enable */
+ if (pipe_mall_type == SUBVP_PHANTOM && hubp->funcs->phantom_hubp_post_enable)
+ hwss_add_phantom_hubp_post_enable(seq_state, hubp);
+}
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg = {0};
+ bool per_pixel_alpha;
+ int mpcc_id;
+ struct mpcc *new_mpcc;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+ if (!hubp || !pipe_ctx->plane_state)
+ return;
+
+ per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
+
+ /* Initialize blend configuration */
+ blnd_cfg.overlap_only = false;
+ blnd_cfg.global_gain = 0xff;
+
+ if (per_pixel_alpha) {
+ blnd_cfg.pre_multiplied_alpha = pipe_ctx->plane_state->pre_multiplied_alpha;
+ if (pipe_ctx->plane_state->global_alpha) {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
+ blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
+ } else {
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ }
+ } else {
+ blnd_cfg.pre_multiplied_alpha = false;
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+ }
+
+ if (pipe_ctx->plane_state->global_alpha)
+ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ else
+ blnd_cfg.global_alpha = 0xff;
+
+ blnd_cfg.background_color_bpc = 4;
+ blnd_cfg.bottom_gain_mode = 0;
+ blnd_cfg.top_gain = 0x1f000;
+ blnd_cfg.bottom_inside_gain = 0x1f000;
+ blnd_cfg.bottom_outside_gain = 0x1f000;
+
+ if (pipe_ctx->plane_state->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA)
+ blnd_cfg.pre_multiplied_alpha = false;
+
+ /* MPCC instance is equal to HUBP instance */
+ mpcc_id = hubp->inst;
+
+ /* Step 1: Update blending if no full update needed */
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+
+ /* Update blending configuration */
+ hwss_add_mpc_update_blending(seq_state, mpc, blnd_cfg, mpcc_id);
+
+ /* Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+ return;
+ }
+
+ /* Step 2: Get existing MPCC for DPP */
+ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
+
+ /* Step 3: Remove MPCC if being used */
+ if (new_mpcc != NULL) {
+ hwss_add_mpc_remove_mpcc(seq_state, mpc, mpc_tree_params, new_mpcc);
+ } else {
+ /* Step 4: Assert MPCC idle (debug only) */
+ if (dc->debug.sanity_checks)
+ hwss_add_mpc_assert_idle_mpcc(seq_state, mpc, mpcc_id);
+ }
+
+ /* Step 5: Insert new plane into MPC tree */
+ hwss_add_mpc_insert_plane(seq_state, mpc, mpc_tree_params, blnd_cfg, NULL, NULL, hubp->inst, mpcc_id);
+
+ /* Step 6: Update visual confirm color */
+ hwss_add_mpc_update_visual_confirm(seq_state, dc, pipe_ctx, mpcc_id);
+
+ /* Step 7: Set HUBP OPP and MPCC IDs */
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
+}
+
+static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
+{
+ int i;
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ if (res_pool->hubps[i]->inst == mpcc_inst)
+ return res_pool->hubps[i];
+ }
+ ASSERT(false);
+ return NULL;
+}
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ int mpcc_inst;
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+
+ if (!pipe_ctx->stream_res.opp)
+ return;
+
+ for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
+ if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
+ struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
+
+ if (pipe_ctx->stream_res.tg &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) {
+ hwss_add_mpc_assert_idle_mpcc(seq_state, res_pool->mpc, mpcc_inst);
+ }
+ pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
+ if (hubp)
+ hwss_add_hubp_set_blank(seq_state, hubp, true);
+ }
+ }
+
+ if (dc->debug.sanity_checks)
+ dc->hwseq->funcs.verify_allow_pstate_change_high_sequence(dc, seq_state);
+}
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+
+ if (start_line < 0)
+ start_line = 0;
+
+ if (tg->funcs->setup_vertical_interrupt2)
+ hwss_add_tg_setup_vertical_interrupt2(seq_state, tg, start_line);
+}
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state)
+{
+ struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
+ uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
+ struct custom_float_format fmt;
+
+ fmt.exponenta_bits = 6;
+ fmt.mantissa_bits = 12;
+ fmt.sign = true;
+
+ if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
+ convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
+
+ hwss_add_dpp_set_hdr_multiplier(seq_state, pipe_ctx->plane_res.dpp, hw_mult);
+}
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state)
+{
+ int i;
+ unsigned int num_ways = dcn401_calculate_cab_allocation(dc, context);
+ bool cache_cursor = false;
+
+ // Don't force p-state disallow -- can't block dummy p-state
+
+ // Update MALL_SEL register for each pipe (break down update_mall_sel call)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
+ int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
+ cache_cursor = true;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, 1, false);
+ } else {
+ // MALL not supported with Stereo3D
+ uint32_t mall_sel = (num_ways <= dc->caps.cache_num_ways &&
+ pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED &&
+ pipe->plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO &&
+ !pipe->plane_state->address.tmz_surface) ? 2 : 0;
+ hwss_add_hubp_update_mall_sel(seq_state, hubp, mall_sel, cache_cursor);
+ }
+ }
+ }
+
+ // Program FORCE_ONE_ROW_FOR_FRAME and CURSOR_REQ_MODE for main subvp pipes
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = pipe->plane_res.hubp;
+
+ if (pipe->stream && hubp && hubp->funcs->hubp_prepare_subvp_buffering) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
+ hwss_add_hubp_prepare_subvp_buffering(seq_state, hubp, true);
+ }
+ }
+}
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high)
+ return;
+
+ if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
+ /* Attempt hardware workaround force recovery */
+ dcn401_hw_wa_force_recovery_sequence(dc, seq_state);
+ }
+}
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state)
+{
+ struct hubp *hubp;
+ unsigned int i;
+
+ if (!dc->debug.recovery_enabled)
+ return false;
+
+ /* Step 1: Set HUBP_BLANK_EN=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 2: DCHUBBUB_GLOBAL_SOFT_RESET=1 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, true);
+
+ /* Step 3: Set HUBP_DISABLE=1 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, true);
+ }
+ }
+
+ /* Step 4: Set HUBP_DISABLE=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->hubp_disable_control)
+ hwss_add_hubp_disable_control(seq_state, hubp, false);
+ }
+ }
+
+ /* Step 5: DCHUBBUB_GLOBAL_SOFT_RESET=0 */
+ hwss_add_hubbub_soft_reset(seq_state, dc->res_pool->hubbub, hubbub1_soft_reset, false);
+
+ /* Step 6: Set HUBP_BLANK_EN=0 for all active pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx != NULL) {
+ hubp = pipe_ctx->plane_res.hubp;
+ if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
+ hwss_add_hubp_set_blank_en(seq_state, hubp, false);
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
index 2621b7725267..f78162ab859b 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h
@@ -9,6 +9,7 @@
#include "dc.h"
#include "dc_stream.h"
#include "hw_sequencer_private.h"
+#include "hwss/hw_sequencer.h"
#include "dcn401/dcn401_dccg.h"
struct dc;
@@ -73,15 +74,17 @@ void dcn401_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dcn401_fams2_global_control_lock(struct dc *dc,
+void dcn401_dmub_hw_control_lock(struct dc *dc,
struct dc_state *context,
bool lock);
void dcn401_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable);
-void dcn401_fams2_global_control_lock_fast(union block_sequence_params *params);
+void dcn401_dmub_hw_control_lock_fast(union block_sequence_params *params);
void dcn401_unblank_stream(struct pipe_ctx *pipe_ctx, struct dc_link_settings *link_settings);
void dcn401_hardware_release(struct dc *dc);
void dcn401_update_odm(struct dc *dc, struct dc_state *context,
struct pipe_ctx *otg_master);
+void dcn401_update_odm_sequence(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *otg_master, struct block_sequence_state *seq_state);
void adjust_hotspot_between_slices_for_2x_magnify(uint32_t cursor_width, struct dc_cursor_position *pos_cpy);
void dcn401_wait_for_det_buffer_update_under_otg_master(struct dc *dc, struct dc_state *context, struct pipe_ctx *otg_master);
void dcn401_interdependent_update_lock(struct dc *dc, struct dc_state *context, bool lock);
@@ -97,6 +100,11 @@ void dcn401_program_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+void dcn401_program_pipe_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx);
void dcn401_program_front_end_for_ctx(struct dc *dc, struct dc_state *context);
void dcn401_post_unlock_program_front_end(struct dc *dc, struct dc_state *context);
@@ -109,5 +117,97 @@ void dcn401_detect_pipe_changes(
void dcn401_plane_atomic_power_down(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
+void dcn401_plane_atomic_disconnect_sequence(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+void dcn401_blank_pixel_data_sequence(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
void dcn401_initialize_min_clocks(struct dc *dc);
+void dcn401_update_cursor_offload_pipe(struct dc *dc, const struct pipe_ctx *pipe);
+
+void dcn401_program_all_writeback_pipes_in_tree_sequence(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_enable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ int mpcc_inst,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_writeback_sequence(
+ struct dc *dc,
+ struct dc_writeback_info *wb_info,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_gsl_group_as_lock_sequence(
+ const struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool enable,
+ struct block_sequence_state *seq_state);
+
+void dcn401_disable_plane_sequence(
+ struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_post_unlock_reset_opp_sequence(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
+
+void dcn401_dc_ip_request_cntl(struct dc *dc, bool enable);
+
+void dcn401_enable_plane_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_dchubp_dpp_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_update_mpcc_sequence(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_wait_for_mpcc_disconnect_sequence(
+ struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_setup_vupdate_interrupt_sequence(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_set_hdr_multiplier_sequence(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
+
+void dcn401_program_mall_pipe_config_sequence(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
+
+void dcn401_verify_allow_pstate_change_high_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
+bool dcn401_hw_wa_force_recovery_sequence(struct dc *dc,
+ struct block_sequence_state *seq_state);
+
#endif /* __DC_HWSS_DCN401_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
index fe7aceb2f510..162096ce0bdf 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c
@@ -9,6 +9,7 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
#include "dcn401/dcn401_hwseq.h"
#include "dcn401_init.h"
@@ -38,6 +39,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
+ .disable_plane_sequence = dcn401_disable_plane_sequence,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn401_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
@@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.get_hw_state = dcn10_get_hw_state,
.clear_status_bits = dcn10_clear_status_bits,
.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .wait_for_mpcc_disconnect_sequence = dcn401_wait_for_mpcc_disconnect_sequence,
.edp_backlight_control = dce110_edp_backlight_control,
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
@@ -60,6 +63,12 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.set_cursor_position = dcn401_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
@@ -95,54 +104,70 @@ static const struct hw_sequencer_funcs dcn401_funcs = {
.apply_update_flags_for_phantom = dcn32_apply_update_flags_for_phantom,
.wait_for_dcc_meta_propagation = dcn401_wait_for_dcc_meta_propagation,
.is_pipe_topology_transition_seamless = dcn32_is_pipe_topology_transition_seamless,
- .fams2_global_control_lock = dcn401_fams2_global_control_lock,
+ .dmub_hw_control_lock = dcn401_dmub_hw_control_lock,
.fams2_update_config = dcn401_fams2_update_config,
- .fams2_global_control_lock_fast = dcn401_fams2_global_control_lock_fast,
+ .dmub_hw_control_lock_fast = dcn401_dmub_hw_control_lock_fast,
.program_outstanding_updates = dcn401_program_outstanding_updates,
.wait_for_all_pending_updates = dcn30_wait_for_all_pending_updates,
.detect_pipe_changes = dcn401_detect_pipe_changes,
.enable_plane = dcn20_enable_plane,
+ .enable_plane_sequence = dcn401_enable_plane_sequence,
.update_dchubp_dpp = dcn20_update_dchubp_dpp,
+ .update_dchubp_dpp_sequence = dcn401_update_dchubp_dpp_sequence,
.post_unlock_reset_opp = dcn20_post_unlock_reset_opp,
+ .post_unlock_reset_opp_sequence = dcn401_post_unlock_reset_opp_sequence,
+ .get_underflow_debug_data = dcn30_get_underflow_debug_data,
};
static const struct hwseq_private_funcs dcn401_private_funcs = {
.init_pipes = dcn10_init_pipes,
.plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .plane_atomic_disconnect_sequence = dcn401_plane_atomic_disconnect_sequence,
.update_mpcc = dcn20_update_mpcc,
+ .update_mpcc_sequence = dcn401_update_mpcc_sequence,
.set_input_transfer_func = dcn32_set_input_transfer_func,
.set_output_transfer_func = dcn401_set_output_transfer_func,
.power_down = dce110_power_down,
.enable_display_power_gating = dcn10_dummy_display_power_gating,
.blank_pixel_data = dcn20_blank_pixel_data,
+ .blank_pixel_data_sequence = dcn401_blank_pixel_data_sequence,
.reset_hw_ctx_wrap = dcn401_reset_hw_ctx_wrap,
.enable_stream_timing = dcn401_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .setup_vupdate_interrupt_sequence = dcn401_setup_vupdate_interrupt_sequence,
.did_underflow_occur = dcn10_did_underflow_occur,
.init_blank = dcn32_init_blank,
.disable_vga = dcn20_disable_vga,
.bios_golden_init = dcn10_bios_golden_init,
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn401_plane_atomic_power_down,
+ .plane_atomic_power_down_sequence = dcn401_plane_atomic_power_down_sequence,
.enable_power_gating_plane = dcn32_enable_power_gating_plane,
.hubp_pg_control = dcn32_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .program_all_writeback_pipes_in_tree_sequence = dcn401_program_all_writeback_pipes_in_tree_sequence,
.update_odm = dcn401_update_odm,
+ .update_odm_sequence = dcn401_update_odm_sequence,
.dsc_pg_control = dcn32_dsc_pg_control,
.dsc_pg_status = dcn32_dsc_pg_status,
.set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .set_hdr_multiplier_sequence = dcn401_set_hdr_multiplier_sequence,
.verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .verify_allow_pstate_change_high_sequence = dcn401_verify_allow_pstate_change_high_sequence,
.wait_for_blank_complete = dcn20_wait_for_blank_complete,
.dccg_init = dcn20_dccg_init,
.set_mcm_luts = dcn401_set_mcm_luts,
.program_mall_pipe_config = dcn32_program_mall_pipe_config,
+ .program_mall_pipe_config_sequence = dcn401_program_mall_pipe_config_sequence,
.update_mall_sel = dcn32_update_mall_sel,
.calculate_dccg_k1_k2_values = NULL,
.apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
.reset_back_end_for_pipe = dcn401_reset_back_end_for_pipe,
.populate_mcm_luts = NULL,
.perform_3dlut_wa_unlock = dcn401_perform_3dlut_wa_unlock,
+ .program_pipe_sequence = dcn401_program_pipe_sequence,
+ .dc_ip_request_cntl = dcn401_dc_ip_request_cntl,
};
void dcn401_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 9df8030e37f7..8ed9eea40c56 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -31,6 +31,8 @@
#include "inc/hw/opp.h"
#include "inc/hw/link_encoder.h"
#include "inc/core_status.h"
+#include "inc/hw/hw_shared.h"
+#include "dsc/dsc.h"
struct pipe_ctx;
struct dc_state;
@@ -47,6 +49,9 @@ struct link_resource;
struct dc_dmub_cmd;
struct pg_block_update;
struct drr_params;
+struct dc_underflow_debug_data;
+struct dsc_optc_config;
+struct vm_system_aperture_param;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
@@ -61,7 +66,7 @@ struct pipe_control_lock_params {
};
struct set_flip_control_gsl_params {
- struct pipe_ctx *pipe_ctx;
+ struct hubp *hubp;
bool flip_immediate;
};
@@ -147,12 +152,587 @@ struct wait_for_dcc_meta_propagation_params {
const struct pipe_ctx *top_pipe_to_program;
};
-struct fams2_global_control_lock_fast_params {
+struct dmub_hw_control_lock_fast_params {
struct dc *dc;
bool is_required;
bool lock;
};
+struct program_surface_config_params {
+ struct hubp *hubp;
+ enum surface_pixel_format format;
+ struct dc_tiling_info *tiling_info;
+ struct plane_size plane_size;
+ enum dc_rotation_angle rotation;
+ struct dc_plane_dcc_param *dcc;
+ bool horizontal_mirror;
+ int compat_level;
+};
+
+struct program_mcache_id_and_split_coordinate {
+ struct hubp *hubp;
+ struct dml2_hubp_pipe_mcache_regs *mcache_regs;
+};
+
+struct program_cursor_update_now_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct hubp_wait_pipe_read_start_params {
+ struct hubp *hubp;
+};
+
+struct apply_update_flags_for_phantom_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct update_phantom_vp_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct set_odm_combine_params {
+ struct timing_generator *tg;
+ int opp_inst[MAX_PIPES];
+ int opp_head_count;
+ int odm_slice_width;
+ int last_odm_slice_width;
+};
+
+struct set_odm_bypass_params {
+ struct timing_generator *tg;
+ const struct dc_crtc_timing *timing;
+};
+
+struct opp_pipe_clock_control_params {
+ struct output_pixel_processor *opp;
+ bool enable;
+};
+
+struct opp_program_left_edge_extra_pixel_params {
+ struct output_pixel_processor *opp;
+ enum dc_pixel_encoding pixel_encoding;
+ bool is_otg_master;
+};
+
+struct dccg_set_dto_dscclk_params {
+ struct dccg *dccg;
+ int inst;
+ int num_slices_h;
+};
+
+struct dsc_set_config_params {
+ struct display_stream_compressor *dsc;
+ struct dsc_config *dsc_cfg;
+ struct dsc_optc_config *dsc_optc_cfg;
+};
+
+struct dsc_enable_params {
+ struct display_stream_compressor *dsc;
+ int opp_inst;
+};
+
+struct tg_set_dsc_config_params {
+ struct timing_generator *tg;
+ struct dsc_optc_config *dsc_optc_cfg;
+ bool enable;
+};
+
+struct dsc_disconnect_params {
+ struct display_stream_compressor *dsc;
+};
+
+struct dsc_read_state_params {
+ struct display_stream_compressor *dsc;
+ struct dcn_dsc_state *dsc_state;
+};
+
+struct dsc_calculate_and_set_config_params {
+ struct pipe_ctx *pipe_ctx;
+ struct dsc_optc_config dsc_optc_cfg;
+ bool enable;
+ int opp_cnt;
+};
+
+struct dsc_enable_with_opp_params {
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_tg_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct tg_program_global_sync_params {
+ struct timing_generator *tg;
+ int vready_offset;
+ unsigned int vstartup_lines;
+ unsigned int vupdate_offset_pixels;
+ unsigned int vupdate_vupdate_width_pixels;
+ unsigned int pstate_keepout_start_lines;
+};
+
+struct tg_wait_for_state_params {
+ struct timing_generator *tg;
+ enum crtc_state state;
+};
+
+struct tg_set_vtg_params_params {
+ struct timing_generator *tg;
+ struct dc_crtc_timing *timing;
+ bool program_fp2;
+};
+
+struct tg_set_gsl_params {
+ struct timing_generator *tg;
+ struct gsl_params gsl;
+};
+
+struct tg_set_gsl_source_select_params {
+ struct timing_generator *tg;
+ int group_idx;
+ uint32_t gsl_ready_signal;
+};
+
+struct setup_vupdate_interrupt_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct tg_setup_vertical_interrupt2_params {
+ struct timing_generator *tg;
+ int start_line;
+};
+
+struct dpp_set_hdr_multiplier_params {
+ struct dpp *dpp;
+ uint32_t hw_mult;
+};
+
+struct program_det_size_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_buffer_size_kb;
+};
+
+struct program_det_segments_params {
+ struct hubbub *hubbub;
+ unsigned int hubp_inst;
+ unsigned int det_size;
+};
+
+struct update_dchubp_dpp_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ struct dc_state *context;
+};
+
+struct opp_set_dyn_expansion_params {
+ struct output_pixel_processor *opp;
+ enum dc_color_space color_space;
+ enum dc_color_depth color_depth;
+ enum signal_type signal;
+};
+
+struct opp_program_fmt_params {
+ struct output_pixel_processor *opp;
+ struct bit_depth_reduction_params *fmt_bit_depth;
+ struct clamping_and_pixel_encoding_params *clamping;
+};
+
+struct opp_program_bit_depth_reduction_params {
+ struct output_pixel_processor *opp;
+ bool use_default_params;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct opp_set_disp_pattern_generator_params {
+ struct output_pixel_processor *opp;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ struct tg_color solid_color;
+ bool use_solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct set_abm_pipe_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_abm_level_params {
+ struct abm *abm;
+ unsigned int abm_level;
+};
+
+struct set_abm_immediate_disable_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_disp_pattern_generator_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum controller_dp_test_pattern test_pattern;
+ enum controller_dp_color_space color_space;
+ enum dc_color_depth color_depth;
+ const struct tg_color *solid_color;
+ int width;
+ int height;
+ int offset;
+};
+
+struct mpc_update_blending_params {
+ struct mpc *mpc;
+ struct mpcc_blnd_cfg blnd_cfg;
+ int mpcc_id;
+};
+
+struct mpc_assert_idle_mpcc_params {
+ struct mpc *mpc;
+ int mpcc_id;
+};
+
+struct mpc_insert_plane_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc_blnd_cfg blnd_cfg;
+ struct mpcc_sm_cfg *sm_cfg;
+ struct mpcc *insert_above_mpcc;
+ int dpp_id;
+ int mpcc_id;
+};
+
+struct mpc_remove_mpcc_params {
+ struct mpc *mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove;
+};
+
+struct opp_set_mpcc_disconnect_pending_params {
+ struct output_pixel_processor *opp;
+ int mpcc_inst;
+ bool pending;
+};
+
+struct dc_set_optimized_required_params {
+ struct dc *dc;
+ bool optimized_required;
+};
+
+struct hubp_disconnect_params {
+ struct hubp *hubp;
+};
+
+struct hubbub_force_pstate_change_control_params {
+ struct hubbub *hubbub;
+ bool enable;
+ bool wait;
+};
+
+struct tg_enable_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct hubp_wait_flip_pending_params {
+ struct hubp *hubp;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct tg_wait_double_buffer_pending_params {
+ struct timing_generator *tg;
+ unsigned int timeout_us;
+ unsigned int polling_interval_us;
+};
+
+struct update_force_pstate_params {
+ struct dc *dc;
+ struct dc_state *context;
+};
+
+struct hubbub_apply_dedcn21_147_wa_params {
+ struct hubbub *hubbub;
+};
+
+struct hubbub_allow_self_refresh_control_params {
+ struct hubbub *hubbub;
+ bool allow;
+ bool *disallow_self_refresh_applied;
+};
+
+struct tg_get_frame_count_params {
+ struct timing_generator *tg;
+ unsigned int *frame_count;
+};
+
+struct mpc_set_dwb_mux_params {
+ struct mpc *mpc;
+ int dwb_id;
+ int mpcc_id;
+};
+
+struct mpc_disable_dwb_mux_params {
+ struct mpc *mpc;
+ unsigned int dwb_id;
+};
+
+struct mcif_wb_config_buf_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_buf_params *mcif_buf_params;
+ unsigned int dest_height;
+};
+
+struct mcif_wb_config_arb_params {
+ struct mcif_wb *mcif_wb;
+ struct mcif_arb_params *mcif_arb_params;
+};
+
+struct mcif_wb_enable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct mcif_wb_disable_params {
+ struct mcif_wb *mcif_wb;
+};
+
+struct dwbc_enable_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct dwbc_disable_params {
+ struct dwbc *dwb;
+};
+
+struct dwbc_update_params {
+ struct dwbc *dwb;
+ struct dc_dwb_params *dwb_params;
+};
+
+struct hubp_update_mall_sel_params {
+ struct hubp *hubp;
+ uint32_t mall_sel;
+ bool cache_cursor;
+};
+
+struct hubp_prepare_subvp_buffering_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_set_blank_en_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_disable_control_params {
+ struct hubp *hubp;
+ bool disable;
+};
+
+struct hubbub_soft_reset_params {
+ struct hubbub *hubbub;
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset);
+ bool reset;
+};
+
+struct hubp_clk_cntl_params {
+ struct hubp *hubp;
+ bool enable;
+};
+
+struct hubp_init_params {
+ struct hubp *hubp;
+};
+
+struct hubp_set_vm_system_aperture_settings_params {
+ struct hubp *hubp;
+ //struct vm_system_aperture_param apt;
+ PHYSICAL_ADDRESS_LOC sys_default;
+ PHYSICAL_ADDRESS_LOC sys_low;
+ PHYSICAL_ADDRESS_LOC sys_high;
+};
+
+struct hubp_set_flip_int_params {
+ struct hubp *hubp;
+};
+
+struct dpp_dppclk_control_params {
+ struct dpp *dpp;
+ bool dppclk_div;
+ bool enable;
+};
+
+struct disable_phantom_crtc_params {
+ struct timing_generator *tg;
+};
+
+struct dpp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool power_on;
+};
+
+struct hubp_pg_control_params {
+ struct dce_hwseq *hws;
+ unsigned int hubp_inst;
+ bool power_on;
+};
+
+struct hubp_reset_params {
+ struct hubp *hubp;
+};
+
+struct dpp_reset_params {
+ struct dpp *dpp;
+};
+
+struct dpp_root_clock_control_params {
+ struct dce_hwseq *hws;
+ unsigned int dpp_inst;
+ bool clock_on;
+};
+
+struct dc_ip_request_cntl_params {
+ struct dc *dc;
+ bool enable;
+};
+
+struct dsc_pg_status_params {
+ struct dce_hwseq *hws;
+ int dsc_inst;
+ bool is_ungated;
+};
+
+struct dsc_wait_disconnect_pending_clear_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dsc_disable_params {
+ struct display_stream_compressor *dsc;
+ bool *is_ungated;
+};
+
+struct dccg_set_ref_dscclk_params {
+ struct dccg *dccg;
+ int dsc_inst;
+ bool *is_ungated;
+};
+
+struct dccg_update_dpp_dto_params {
+ struct dccg *dccg;
+ int dpp_inst;
+ int dppclk_khz;
+};
+
+struct hubp_vtg_sel_params {
+ struct hubp *hubp;
+ uint32_t otg_inst;
+};
+
+struct hubp_setup2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+ union dml2_global_sync_programming *global_sync;
+ struct dc_crtc_timing *timing;
+};
+
+struct hubp_setup_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+ struct _vcs_dpi_display_rq_regs_st *rq_regs;
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest;
+};
+
+struct hubp_set_unbounded_requesting_params {
+ struct hubp *hubp;
+ bool unbounded_req;
+};
+
+struct hubp_setup_interdependent2_params {
+ struct hubp *hubp;
+ struct dml2_dchub_per_pipe_register_set *hubp_regs;
+};
+
+struct hubp_setup_interdependent_params {
+ struct hubp *hubp;
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs;
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs;
+};
+
+struct dpp_set_cursor_matrix_params {
+ struct dpp *dpp;
+ enum dc_color_space color_space;
+ struct dc_csc_transform *cursor_csc_color_matrix;
+};
+
+struct mpc_update_mpcc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct dpp_set_scaler_params {
+ struct dpp *dpp;
+ const struct scaler_data *scl_data;
+};
+
+struct hubp_mem_program_viewport_params {
+ struct hubp *hubp;
+ const struct rect *viewport;
+ const struct rect *viewport_c;
+};
+
+struct hubp_program_mcache_id_and_split_coordinate_params {
+ struct hubp *hubp;
+ struct mcache_regs_struct *mcache_regs;
+};
+
+struct abort_cursor_offload_update_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_attribute_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_position_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct set_cursor_sdr_white_level_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+};
+
+struct program_output_csc_params {
+ struct dc *dc;
+ struct pipe_ctx *pipe_ctx;
+ enum dc_color_space colorspace;
+ uint16_t *matrix;
+ int opp_id;
+};
+
+struct hubp_set_blank_params {
+ struct hubp *hubp;
+ bool blank;
+};
+
+struct phantom_hubp_post_enable_params {
+ struct hubp *hubp;
+};
+
union block_sequence_params {
struct update_plane_addr_params update_plane_addr_params;
struct subvp_pipe_control_lock_fast_params subvp_pipe_control_lock_fast_params;
@@ -172,7 +752,108 @@ union block_sequence_params {
struct set_ocsc_default_params set_ocsc_default_params;
struct subvp_save_surf_addr subvp_save_surf_addr;
struct wait_for_dcc_meta_propagation_params wait_for_dcc_meta_propagation_params;
- struct fams2_global_control_lock_fast_params fams2_global_control_lock_fast_params;
+ struct dmub_hw_control_lock_fast_params dmub_hw_control_lock_fast_params;
+ struct program_surface_config_params program_surface_config_params;
+ struct program_mcache_id_and_split_coordinate program_mcache_id_and_split_coordinate;
+ struct program_cursor_update_now_params program_cursor_update_now_params;
+ struct hubp_wait_pipe_read_start_params hubp_wait_pipe_read_start_params;
+ struct apply_update_flags_for_phantom_params apply_update_flags_for_phantom_params;
+ struct update_phantom_vp_position_params update_phantom_vp_position_params;
+ struct set_odm_combine_params set_odm_combine_params;
+ struct set_odm_bypass_params set_odm_bypass_params;
+ struct opp_pipe_clock_control_params opp_pipe_clock_control_params;
+ struct opp_program_left_edge_extra_pixel_params opp_program_left_edge_extra_pixel_params;
+ struct dccg_set_dto_dscclk_params dccg_set_dto_dscclk_params;
+ struct dsc_set_config_params dsc_set_config_params;
+ struct dsc_enable_params dsc_enable_params;
+ struct tg_set_dsc_config_params tg_set_dsc_config_params;
+ struct dsc_disconnect_params dsc_disconnect_params;
+ struct dsc_read_state_params dsc_read_state_params;
+ struct dsc_calculate_and_set_config_params dsc_calculate_and_set_config_params;
+ struct dsc_enable_with_opp_params dsc_enable_with_opp_params;
+ struct program_tg_params program_tg_params;
+ struct tg_program_global_sync_params tg_program_global_sync_params;
+ struct tg_wait_for_state_params tg_wait_for_state_params;
+ struct tg_set_vtg_params_params tg_set_vtg_params_params;
+ struct tg_setup_vertical_interrupt2_params tg_setup_vertical_interrupt2_params;
+ struct dpp_set_hdr_multiplier_params dpp_set_hdr_multiplier_params;
+ struct tg_set_gsl_params tg_set_gsl_params;
+ struct tg_set_gsl_source_select_params tg_set_gsl_source_select_params;
+ struct setup_vupdate_interrupt_params setup_vupdate_interrupt_params;
+ struct program_det_size_params program_det_size_params;
+ struct program_det_segments_params program_det_segments_params;
+ struct update_dchubp_dpp_params update_dchubp_dpp_params;
+ struct opp_set_dyn_expansion_params opp_set_dyn_expansion_params;
+ struct opp_program_fmt_params opp_program_fmt_params;
+ struct opp_program_bit_depth_reduction_params opp_program_bit_depth_reduction_params;
+ struct opp_set_disp_pattern_generator_params opp_set_disp_pattern_generator_params;
+ struct set_abm_pipe_params set_abm_pipe_params;
+ struct set_abm_level_params set_abm_level_params;
+ struct set_abm_immediate_disable_params set_abm_immediate_disable_params;
+ struct set_disp_pattern_generator_params set_disp_pattern_generator_params;
+ struct mpc_remove_mpcc_params mpc_remove_mpcc_params;
+ struct opp_set_mpcc_disconnect_pending_params opp_set_mpcc_disconnect_pending_params;
+ struct dc_set_optimized_required_params dc_set_optimized_required_params;
+ struct hubp_disconnect_params hubp_disconnect_params;
+ struct hubbub_force_pstate_change_control_params hubbub_force_pstate_change_control_params;
+ struct tg_enable_crtc_params tg_enable_crtc_params;
+ struct hubp_wait_flip_pending_params hubp_wait_flip_pending_params;
+ struct tg_wait_double_buffer_pending_params tg_wait_double_buffer_pending_params;
+ struct update_force_pstate_params update_force_pstate_params;
+ struct hubbub_apply_dedcn21_147_wa_params hubbub_apply_dedcn21_147_wa_params;
+ struct hubbub_allow_self_refresh_control_params hubbub_allow_self_refresh_control_params;
+ struct tg_get_frame_count_params tg_get_frame_count_params;
+ struct mpc_set_dwb_mux_params mpc_set_dwb_mux_params;
+ struct mpc_disable_dwb_mux_params mpc_disable_dwb_mux_params;
+ struct mcif_wb_config_buf_params mcif_wb_config_buf_params;
+ struct mcif_wb_config_arb_params mcif_wb_config_arb_params;
+ struct mcif_wb_enable_params mcif_wb_enable_params;
+ struct mcif_wb_disable_params mcif_wb_disable_params;
+ struct dwbc_enable_params dwbc_enable_params;
+ struct dwbc_disable_params dwbc_disable_params;
+ struct dwbc_update_params dwbc_update_params;
+ struct hubp_update_mall_sel_params hubp_update_mall_sel_params;
+ struct hubp_prepare_subvp_buffering_params hubp_prepare_subvp_buffering_params;
+ struct hubp_set_blank_en_params hubp_set_blank_en_params;
+ struct hubp_disable_control_params hubp_disable_control_params;
+ struct hubbub_soft_reset_params hubbub_soft_reset_params;
+ struct hubp_clk_cntl_params hubp_clk_cntl_params;
+ struct hubp_init_params hubp_init_params;
+ struct hubp_set_vm_system_aperture_settings_params hubp_set_vm_system_aperture_settings_params;
+ struct hubp_set_flip_int_params hubp_set_flip_int_params;
+ struct dpp_dppclk_control_params dpp_dppclk_control_params;
+ struct disable_phantom_crtc_params disable_phantom_crtc_params;
+ struct dpp_pg_control_params dpp_pg_control_params;
+ struct hubp_pg_control_params hubp_pg_control_params;
+ struct hubp_reset_params hubp_reset_params;
+ struct dpp_reset_params dpp_reset_params;
+ struct dpp_root_clock_control_params dpp_root_clock_control_params;
+ struct dc_ip_request_cntl_params dc_ip_request_cntl_params;
+ struct dsc_pg_status_params dsc_pg_status_params;
+ struct dsc_wait_disconnect_pending_clear_params dsc_wait_disconnect_pending_clear_params;
+ struct dsc_disable_params dsc_disable_params;
+ struct dccg_set_ref_dscclk_params dccg_set_ref_dscclk_params;
+ struct dccg_update_dpp_dto_params dccg_update_dpp_dto_params;
+ struct hubp_vtg_sel_params hubp_vtg_sel_params;
+ struct hubp_setup2_params hubp_setup2_params;
+ struct hubp_setup_params hubp_setup_params;
+ struct hubp_set_unbounded_requesting_params hubp_set_unbounded_requesting_params;
+ struct hubp_setup_interdependent2_params hubp_setup_interdependent2_params;
+ struct hubp_setup_interdependent_params hubp_setup_interdependent_params;
+ struct dpp_set_cursor_matrix_params dpp_set_cursor_matrix_params;
+ struct mpc_update_mpcc_params mpc_update_mpcc_params;
+ struct mpc_update_blending_params mpc_update_blending_params;
+ struct mpc_assert_idle_mpcc_params mpc_assert_idle_mpcc_params;
+ struct mpc_insert_plane_params mpc_insert_plane_params;
+ struct dpp_set_scaler_params dpp_set_scaler_params;
+ struct hubp_mem_program_viewport_params hubp_mem_program_viewport_params;
+ struct abort_cursor_offload_update_params abort_cursor_offload_update_params;
+ struct set_cursor_attribute_params set_cursor_attribute_params;
+ struct set_cursor_position_params set_cursor_position_params;
+ struct set_cursor_sdr_white_level_params set_cursor_sdr_white_level_params;
+ struct program_output_csc_params program_output_csc_params;
+ struct hubp_set_blank_params hubp_set_blank_params;
+ struct phantom_hubp_post_enable_params phantom_hubp_post_enable_params;
};
enum block_sequence_func {
@@ -188,13 +869,111 @@ enum block_sequence_func {
DPP_SETUP_DPP,
DPP_PROGRAM_BIAS_AND_SCALE,
DPP_SET_OUTPUT_TRANSFER_FUNC,
+ DPP_SET_HDR_MULTIPLIER,
MPC_UPDATE_VISUAL_CONFIRM,
MPC_POWER_ON_MPC_MEM_PWR,
MPC_SET_OUTPUT_CSC,
MPC_SET_OCSC_DEFAULT,
DMUB_SUBVP_SAVE_SURF_ADDR,
HUBP_WAIT_FOR_DCC_META_PROP,
- DMUB_FAMS2_GLOBAL_CONTROL_LOCK_FAST,
+ DMUB_HW_CONTROL_LOCK_FAST,
+ HUBP_PROGRAM_SURFACE_CONFIG,
+ HUBP_PROGRAM_MCACHE_ID,
+ PROGRAM_CURSOR_UPDATE_NOW,
+ HUBP_WAIT_PIPE_READ_START,
+ HWS_APPLY_UPDATE_FLAGS_FOR_PHANTOM,
+ HWS_UPDATE_PHANTOM_VP_POSITION,
+ OPTC_SET_ODM_COMBINE,
+ OPTC_SET_ODM_BYPASS,
+ OPP_PIPE_CLOCK_CONTROL,
+ OPP_PROGRAM_LEFT_EDGE_EXTRA_PIXEL,
+ DCCG_SET_DTO_DSCCLK,
+ DSC_SET_CONFIG,
+ DSC_ENABLE,
+ TG_SET_DSC_CONFIG,
+ DSC_DISCONNECT,
+ DSC_READ_STATE,
+ DSC_CALCULATE_AND_SET_CONFIG,
+ DSC_ENABLE_WITH_OPP,
+ TG_PROGRAM_GLOBAL_SYNC,
+ TG_WAIT_FOR_STATE,
+ TG_SET_VTG_PARAMS,
+ TG_SETUP_VERTICAL_INTERRUPT2,
+ HUBP_PROGRAM_DET_SIZE,
+ HUBP_PROGRAM_DET_SEGMENTS,
+ OPP_SET_DYN_EXPANSION,
+ OPP_PROGRAM_FMT,
+ OPP_PROGRAM_BIT_DEPTH_REDUCTION,
+ OPP_SET_DISP_PATTERN_GENERATOR,
+ ABM_SET_PIPE,
+ ABM_SET_LEVEL,
+ ABM_SET_IMMEDIATE_DISABLE,
+ MPC_REMOVE_MPCC,
+ OPP_SET_MPCC_DISCONNECT_PENDING,
+ DC_SET_OPTIMIZED_REQUIRED,
+ HUBP_DISCONNECT,
+ HUBBUB_FORCE_PSTATE_CHANGE_CONTROL,
+ TG_ENABLE_CRTC,
+ TG_SET_GSL,
+ TG_SET_GSL_SOURCE_SELECT,
+ HUBP_WAIT_FLIP_PENDING,
+ TG_WAIT_DOUBLE_BUFFER_PENDING,
+ UPDATE_FORCE_PSTATE,
+ PROGRAM_MALL_PIPE_CONFIG,
+ HUBBUB_APPLY_DEDCN21_147_WA,
+ HUBBUB_ALLOW_SELF_REFRESH_CONTROL,
+ TG_GET_FRAME_COUNT,
+ MPC_SET_DWB_MUX,
+ MPC_DISABLE_DWB_MUX,
+ MCIF_WB_CONFIG_BUF,
+ MCIF_WB_CONFIG_ARB,
+ MCIF_WB_ENABLE,
+ MCIF_WB_DISABLE,
+ DWBC_ENABLE,
+ DWBC_DISABLE,
+ DWBC_UPDATE,
+ HUBP_UPDATE_MALL_SEL,
+ HUBP_PREPARE_SUBVP_BUFFERING,
+ HUBP_SET_BLANK_EN,
+ HUBP_DISABLE_CONTROL,
+ HUBBUB_SOFT_RESET,
+ HUBP_CLK_CNTL,
+ HUBP_INIT,
+ HUBP_SET_VM_SYSTEM_APERTURE_SETTINGS,
+ HUBP_SET_FLIP_INT,
+ DPP_DPPCLK_CONTROL,
+ DISABLE_PHANTOM_CRTC,
+ DSC_PG_STATUS,
+ DSC_WAIT_DISCONNECT_PENDING_CLEAR,
+ DSC_DISABLE,
+ DCCG_SET_REF_DSCCLK,
+ DPP_PG_CONTROL,
+ HUBP_PG_CONTROL,
+ HUBP_RESET,
+ DPP_RESET,
+ DPP_ROOT_CLOCK_CONTROL,
+ DC_IP_REQUEST_CNTL,
+ DCCG_UPDATE_DPP_DTO,
+ HUBP_VTG_SEL,
+ HUBP_SETUP2,
+ HUBP_SETUP,
+ HUBP_SET_UNBOUNDED_REQUESTING,
+ HUBP_SETUP_INTERDEPENDENT2,
+ HUBP_SETUP_INTERDEPENDENT,
+ DPP_SET_CURSOR_MATRIX,
+ MPC_UPDATE_BLENDING,
+ MPC_ASSERT_IDLE_MPCC,
+ MPC_INSERT_PLANE,
+ DPP_SET_SCALER,
+ HUBP_MEM_PROGRAM_VIEWPORT,
+ ABORT_CURSOR_OFFLOAD_UPDATE,
+ SET_CURSOR_ATTRIBUTE,
+ SET_CURSOR_POSITION,
+ SET_CURSOR_SDR_WHITE_LEVEL,
+ PROGRAM_OUTPUT_CSC,
+ HUBP_SET_LEGACY_TILING_COMPAT_LEVEL,
+ HUBP_SET_BLANK,
+ PHANTOM_HUBP_POST_ENABLE,
/* This must be the last value in this enum, add new ones above */
HWSS_BLOCK_SEQUENCE_FUNC_COUNT
};
@@ -204,6 +983,11 @@ struct block_sequence {
enum block_sequence_func func;
};
+struct block_sequence_state {
+ struct block_sequence *steps;
+ unsigned int *num_steps;
+};
+
#define MAX_HWSS_BLOCK_SEQUENCE_SIZE (HWSS_BLOCK_SEQUENCE_FUNC_COUNT * MAX_PIPES)
struct hw_sequencer_funcs {
@@ -221,6 +1005,8 @@ struct hw_sequencer_funcs {
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
+ void (*disable_plane_sequence)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
@@ -238,6 +1024,10 @@ struct hw_sequencer_funcs {
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
+ void (*wait_for_mpcc_disconnect_sequence)(struct dc *dc,
+ struct resource_pool *res_pool,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*edp_backlight_control)(
struct dc_link *link,
bool enable);
@@ -309,6 +1099,13 @@ struct hw_sequencer_funcs {
void (*set_cursor_position)(struct pipe_ctx *pipe);
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
void (*set_cursor_sdr_white_level)(struct pipe_ctx *pipe);
+ void (*abort_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*begin_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*commit_cursor_offload_update)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*update_cursor_offload_pipe)(struct dc *dc, const struct pipe_ctx *pipe);
+ void (*notify_cursor_offload_drr_update)(struct dc *dc, struct dc_state *context,
+ const struct dc_stream_state *stream);
+ void (*program_cursor_offload_now)(struct dc *dc, const struct pipe_ctx *pipe);
/* Colour Related */
void (*program_gamut_remap)(struct pipe_ctx *pipe_ctx);
@@ -451,13 +1248,13 @@ struct hw_sequencer_funcs {
const struct dc_state *new_ctx);
void (*wait_for_dcc_meta_propagation)(const struct dc *dc,
const struct pipe_ctx *top_pipe_to_program);
- void (*fams2_global_control_lock)(struct dc *dc,
+ void (*dmub_hw_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock);
void (*fams2_update_config)(struct dc *dc,
struct dc_state *context,
bool enable);
- void (*fams2_global_control_lock_fast)(union block_sequence_params *params);
+ void (*dmub_hw_control_lock_fast)(union block_sequence_params *params);
void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max);
void (*program_outstanding_updates)(struct dc *dc,
struct dc_state *context);
@@ -470,11 +1267,26 @@ struct hw_sequencer_funcs {
void (*enable_plane)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*enable_plane_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_dchubp_dpp)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*update_dchubp_dpp_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*post_unlock_reset_opp)(struct dc *dc,
struct pipe_ctx *opp_head);
+ void (*post_unlock_reset_opp_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *opp_head,
+ struct block_sequence_state *seq_state);
+ void (*get_underflow_debug_data)(const struct dc *dc,
+ struct timing_generator *tg,
+ struct dc_underflow_debug_data *out_data);
};
void color_space_to_black_color(
@@ -584,4 +1396,630 @@ void hwss_set_ocsc_default(union block_sequence_params *params);
void hwss_subvp_save_surf_addr(union block_sequence_params *params);
+void hwss_program_surface_config(union block_sequence_params *params);
+
+void hwss_program_mcache_id_and_split_coordinate(union block_sequence_params *params);
+
+void hwss_set_odm_combine(union block_sequence_params *params);
+
+void hwss_set_odm_bypass(union block_sequence_params *params);
+
+void hwss_opp_pipe_clock_control(union block_sequence_params *params);
+
+void hwss_opp_program_left_edge_extra_pixel(union block_sequence_params *params);
+
+void hwss_blank_pixel_data(union block_sequence_params *params);
+
+void hwss_dccg_set_dto_dscclk(union block_sequence_params *params);
+
+void hwss_dsc_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable(union block_sequence_params *params);
+
+void hwss_tg_set_dsc_config(union block_sequence_params *params);
+
+void hwss_dsc_disconnect(union block_sequence_params *params);
+
+void hwss_dsc_read_state(union block_sequence_params *params);
+
+void hwss_dsc_calculate_and_set_config(union block_sequence_params *params);
+
+void hwss_dsc_enable_with_opp(union block_sequence_params *params);
+
+void hwss_program_tg(union block_sequence_params *params);
+
+void hwss_tg_program_global_sync(union block_sequence_params *params);
+
+void hwss_tg_wait_for_state(union block_sequence_params *params);
+
+void hwss_tg_set_vtg_params(union block_sequence_params *params);
+
+void hwss_tg_setup_vertical_interrupt2(union block_sequence_params *params);
+
+void hwss_dpp_set_hdr_multiplier(union block_sequence_params *params);
+
+void hwss_program_det_size(union block_sequence_params *params);
+
+void hwss_program_det_segments(union block_sequence_params *params);
+
+void hwss_opp_set_dyn_expansion(union block_sequence_params *params);
+
+void hwss_opp_program_fmt(union block_sequence_params *params);
+
+void hwss_opp_program_bit_depth_reduction(union block_sequence_params *params);
+
+void hwss_opp_set_disp_pattern_generator(union block_sequence_params *params);
+
+void hwss_set_abm_pipe(union block_sequence_params *params);
+
+void hwss_set_abm_level(union block_sequence_params *params);
+
+void hwss_set_abm_immediate_disable(union block_sequence_params *params);
+
+void hwss_mpc_remove_mpcc(union block_sequence_params *params);
+
+void hwss_opp_set_mpcc_disconnect_pending(union block_sequence_params *params);
+
+void hwss_dc_set_optimized_required(union block_sequence_params *params);
+
+void hwss_hubp_disconnect(union block_sequence_params *params);
+
+void hwss_hubbub_force_pstate_change_control(union block_sequence_params *params);
+
+void hwss_tg_enable_crtc(union block_sequence_params *params);
+
+void hwss_tg_set_gsl(union block_sequence_params *params);
+
+void hwss_tg_set_gsl_source_select(union block_sequence_params *params);
+
+void hwss_hubp_wait_flip_pending(union block_sequence_params *params);
+
+void hwss_tg_wait_double_buffer_pending(union block_sequence_params *params);
+
+void hwss_update_force_pstate(union block_sequence_params *params);
+
+void hwss_hubbub_apply_dedcn21_147_wa(union block_sequence_params *params);
+
+void hwss_hubbub_allow_self_refresh_control(union block_sequence_params *params);
+
+void hwss_tg_get_frame_count(union block_sequence_params *params);
+
+void hwss_mpc_set_dwb_mux(union block_sequence_params *params);
+
+void hwss_mpc_disable_dwb_mux(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_buf(union block_sequence_params *params);
+
+void hwss_mcif_wb_config_arb(union block_sequence_params *params);
+
+void hwss_mcif_wb_enable(union block_sequence_params *params);
+
+void hwss_mcif_wb_disable(union block_sequence_params *params);
+
+void hwss_dwbc_enable(union block_sequence_params *params);
+
+void hwss_dwbc_disable(union block_sequence_params *params);
+
+void hwss_dwbc_update(union block_sequence_params *params);
+
+void hwss_hubp_update_mall_sel(union block_sequence_params *params);
+
+void hwss_hubp_prepare_subvp_buffering(union block_sequence_params *params);
+
+void hwss_hubp_set_blank_en(union block_sequence_params *params);
+
+void hwss_hubp_disable_control(union block_sequence_params *params);
+
+void hwss_hubbub_soft_reset(union block_sequence_params *params);
+
+void hwss_hubp_clk_cntl(union block_sequence_params *params);
+
+void hwss_hubp_init(union block_sequence_params *params);
+
+void hwss_hubp_set_vm_system_aperture_settings(union block_sequence_params *params);
+
+void hwss_hubp_set_flip_int(union block_sequence_params *params);
+
+void hwss_dpp_dppclk_control(union block_sequence_params *params);
+
+void hwss_disable_phantom_crtc(union block_sequence_params *params);
+
+void hwss_dsc_pg_status(union block_sequence_params *params);
+
+void hwss_dsc_wait_disconnect_pending_clear(union block_sequence_params *params);
+
+void hwss_dsc_disable(union block_sequence_params *params);
+
+void hwss_dccg_set_ref_dscclk(union block_sequence_params *params);
+
+void hwss_dpp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_pg_control(union block_sequence_params *params);
+
+void hwss_hubp_reset(union block_sequence_params *params);
+
+void hwss_dpp_reset(union block_sequence_params *params);
+
+void hwss_dpp_root_clock_control(union block_sequence_params *params);
+
+void hwss_dc_ip_request_cntl(union block_sequence_params *params);
+
+void hwss_dccg_update_dpp_dto(union block_sequence_params *params);
+
+void hwss_hubp_vtg_sel(union block_sequence_params *params);
+
+void hwss_hubp_setup2(union block_sequence_params *params);
+
+void hwss_hubp_setup(union block_sequence_params *params);
+
+void hwss_hubp_set_unbounded_requesting(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent2(union block_sequence_params *params);
+
+void hwss_hubp_setup_interdependent(union block_sequence_params *params);
+
+void hwss_dpp_set_cursor_matrix(union block_sequence_params *params);
+
+void hwss_mpc_update_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_update_blending(union block_sequence_params *params);
+
+void hwss_mpc_assert_idle_mpcc(union block_sequence_params *params);
+
+void hwss_mpc_insert_plane(union block_sequence_params *params);
+
+void hwss_dpp_set_scaler(union block_sequence_params *params);
+
+void hwss_hubp_mem_program_viewport(union block_sequence_params *params);
+
+void hwss_abort_cursor_offload_update(union block_sequence_params *params);
+
+void hwss_set_cursor_attribute(union block_sequence_params *params);
+
+void hwss_set_cursor_position(union block_sequence_params *params);
+
+void hwss_set_cursor_sdr_white_level(union block_sequence_params *params);
+
+void hwss_program_output_csc(union block_sequence_params *params);
+
+void hwss_hubp_set_legacy_tiling_compat_level(union block_sequence_params *params);
+
+void hwss_hubp_set_blank(union block_sequence_params *params);
+
+void hwss_phantom_hubp_post_enable(union block_sequence_params *params);
+
+void hwss_add_optc_pipe_control_lock(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool lock);
+
+void hwss_add_hubp_set_flip_control_gsl(struct block_sequence_state *seq_state,
+ struct hubp *hubp, bool flip_immediate);
+
+void hwss_add_hubp_program_triplebuffer(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
+
+void hwss_add_hubp_update_plane_addr(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_input_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_plane_state *plane_state);
+
+void hwss_add_dpp_program_gamut_remap(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_program_bias_and_scale(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_program_manual_trigger(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_output_transfer_func(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_stream_state *stream);
+
+void hwss_add_mpc_update_visual_confirm(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx, int mpcc_id);
+
+void hwss_add_mpc_power_on_mpc_mem_pwr(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int mpcc_id, bool power_on);
+
+void hwss_add_mpc_set_output_csc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, const uint16_t *regval, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_mpc_set_ocsc_default(struct block_sequence_state *seq_state,
+ struct mpc *mpc, int opp_id, enum dc_color_space colorspace, enum mpc_output_csc_mode ocsc_mode);
+
+void hwss_add_dmub_send_dmcub_cmd(struct block_sequence_state *seq_state,
+ struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
+
+void hwss_add_dmub_subvp_save_surf_addr(struct block_sequence_state *seq_state,
+ struct dc_dmub_srv *dc_dmub_srv, struct dc_plane_address *addr, uint8_t subvp_index);
+
+void hwss_add_hubp_wait_for_dcc_meta_prop(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *top_pipe_to_program);
+
+void hwss_add_hubp_wait_pipe_read_start(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hws_apply_update_flags_for_phantom(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_hws_update_phantom_vp_position(struct block_sequence_state *seq_state,
+ struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_optc_set_odm_combine(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int opp_inst[MAX_PIPES], int opp_head_count,
+ int odm_slice_width, int last_odm_slice_width);
+
+void hwss_add_optc_set_odm_bypass(struct block_sequence_state *seq_state,
+ struct timing_generator *optc, struct dc_crtc_timing *timing);
+
+void hwss_add_tg_program_global_sync(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int vready_offset,
+ unsigned int vstartup_lines,
+ unsigned int vupdate_offset_pixels,
+ unsigned int vupdate_vupdate_width_pixels,
+ unsigned int pstate_keepout_start_lines);
+
+void hwss_add_tg_wait_for_state(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, enum crtc_state state);
+
+void hwss_add_tg_set_vtg_params(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
+
+void hwss_add_tg_setup_vertical_interrupt2(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, int start_line);
+
+void hwss_add_dpp_set_hdr_multiplier(struct block_sequence_state *seq_state,
+ struct dpp *dpp, uint32_t hw_mult);
+
+void hwss_add_hubp_program_det_size(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_buffer_size_kb);
+
+void hwss_add_hubp_program_mcache_id(struct block_sequence_state *seq_state,
+ struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs);
+
+void hwss_add_hubbub_force_pstate_change_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, bool enable, bool wait);
+
+void hwss_add_hubp_program_det_segments(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub, unsigned int hubp_inst, unsigned int det_size);
+
+void hwss_add_opp_set_dyn_expansion(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, enum dc_color_space color_sp,
+ enum dc_color_depth color_dpth, enum signal_type signal);
+
+void hwss_add_opp_program_fmt(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, struct bit_depth_reduction_params *fmt_bit_depth,
+ struct clamping_and_pixel_encoding_params *clamping);
+
+void hwss_add_abm_set_pipe(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_abm_set_level(struct block_sequence_state *seq_state,
+ struct abm *abm, uint32_t abm_level);
+
+void hwss_add_tg_enable_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_hubp_wait_flip_pending(struct block_sequence_state *seq_state,
+ struct hubp *hubp, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_tg_wait_double_buffer_pending(struct block_sequence_state *seq_state,
+ struct timing_generator *tg, unsigned int timeout_us, unsigned int polling_interval_us);
+
+void hwss_add_dccg_set_dto_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg, int inst, int num_slices_h);
+
+void hwss_add_dsc_calculate_and_set_config(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx, bool enable, int opp_cnt);
+
+void hwss_add_mpc_remove_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc, struct mpc_tree *mpc_tree_params, struct mpcc *mpcc_to_remove);
+
+void hwss_add_opp_set_mpcc_disconnect_pending(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp, int mpcc_inst, bool pending);
+
+void hwss_add_hubp_disconnect(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dsc_enable_with_opp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dsc_disconnect(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc);
+
+void hwss_add_dc_set_optimized_required(struct block_sequence_state *seq_state,
+ struct dc *dc, bool optimized_required);
+
+void hwss_add_abm_set_immediate_disable(struct block_sequence_state *seq_state,
+ struct dc *dc, struct pipe_ctx *pipe_ctx);
+
+void hwss_add_opp_set_disp_pattern_generator(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum controller_dp_test_pattern test_pattern,
+ enum controller_dp_color_space color_space,
+ enum dc_color_depth color_depth,
+ struct tg_color solid_color,
+ bool use_solid_color,
+ int width,
+ int height,
+ int offset);
+
+void hwss_add_opp_program_bit_depth_reduction(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool use_default_params,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dc_ip_request_cntl(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ bool enable);
+
+void hwss_add_dwbc_update(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_mcif_wb_config_buf(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_buf_params *mcif_buf_params,
+ unsigned int dest_height);
+
+void hwss_add_mcif_wb_config_arb(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb,
+ struct mcif_arb_params *mcif_arb_params);
+
+void hwss_add_mcif_wb_enable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mcif_wb_disable(struct block_sequence_state *seq_state,
+ struct mcif_wb *mcif_wb);
+
+void hwss_add_mpc_set_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int dwb_id,
+ int mpcc_id);
+
+void hwss_add_mpc_disable_dwb_mux(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ unsigned int dwb_id);
+
+void hwss_add_dwbc_enable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb,
+ struct dc_dwb_params *dwb_params);
+
+void hwss_add_dwbc_disable(struct block_sequence_state *seq_state,
+ struct dwbc *dwb);
+
+void hwss_add_tg_set_gsl(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct gsl_params gsl);
+
+void hwss_add_tg_set_gsl_source_select(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ int group_idx,
+ uint32_t gsl_ready_signal);
+
+void hwss_add_hubp_update_mall_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t mall_sel,
+ bool cache_cursor);
+
+void hwss_add_hubp_prepare_subvp_buffering(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_set_blank_en(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_hubp_disable_control(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool disable);
+
+void hwss_add_hubbub_soft_reset(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ void (*hubbub_soft_reset)(struct hubbub *hubbub, bool reset),
+ bool reset);
+
+void hwss_add_hubp_clk_cntl(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool enable);
+
+void hwss_add_dpp_dppclk_control(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ bool dppclk_div,
+ bool enable);
+
+void hwss_add_disable_phantom_crtc(struct block_sequence_state *seq_state,
+ struct timing_generator *tg);
+
+void hwss_add_dsc_pg_status(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ int dsc_inst,
+ bool is_ungated);
+
+void hwss_add_dsc_wait_disconnect_pending_clear(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dsc_disable(struct block_sequence_state *seq_state,
+ struct display_stream_compressor *dsc,
+ bool *is_ungated);
+
+void hwss_add_dccg_set_ref_dscclk(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dsc_inst,
+ bool *is_ungated);
+
+void hwss_add_dpp_root_clock_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool clock_on);
+
+void hwss_add_dpp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int dpp_inst,
+ bool power_on);
+
+void hwss_add_hubp_pg_control(struct block_sequence_state *seq_state,
+ struct dce_hwseq *hws,
+ unsigned int hubp_inst,
+ bool power_on);
+
+void hwss_add_hubp_set_blank(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool blank);
+
+void hwss_add_hubp_init(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_hubp_reset(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dpp_reset(struct block_sequence_state *seq_state,
+ struct dpp *dpp);
+
+void hwss_add_opp_pipe_clock_control(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ bool enable);
+
+void hwss_add_hubp_set_vm_system_aperture_settings(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint64_t sys_default,
+ uint64_t sys_low,
+ uint64_t sys_high);
+
+void hwss_add_hubp_set_flip_int(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_dccg_update_dpp_dto(struct block_sequence_state *seq_state,
+ struct dccg *dccg,
+ int dpp_inst,
+ int dppclk_khz);
+
+void hwss_add_hubp_vtg_sel(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ uint32_t otg_inst);
+
+void hwss_add_hubp_setup2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs,
+ union dml2_global_sync_programming *global_sync,
+ struct dc_crtc_timing *timing);
+
+void hwss_add_hubp_setup(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+
+void hwss_add_hubp_set_unbounded_requesting(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ bool unbounded_req);
+
+void hwss_add_hubp_setup_interdependent2(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct dml2_dchub_per_pipe_register_set *hubp_regs);
+
+void hwss_add_hubp_setup_interdependent(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
+void hwss_add_hubp_program_surface_config(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ struct dc_tiling_info *tiling_info,
+ struct plane_size plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ int compat_level);
+
+void hwss_add_dpp_setup_dpp(struct block_sequence_state *seq_state,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_dpp_set_cursor_matrix(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ enum dc_color_space color_space,
+ struct dc_csc_transform *cursor_csc_color_matrix);
+
+void hwss_add_mpc_update_blending(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpcc_blnd_cfg blnd_cfg,
+ int mpcc_id);
+
+void hwss_add_mpc_assert_idle_mpcc(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ int mpcc_id);
+
+void hwss_add_mpc_insert_plane(struct block_sequence_state *seq_state,
+ struct mpc *mpc,
+ struct mpc_tree *mpc_tree_params,
+ struct mpcc_blnd_cfg blnd_cfg,
+ struct mpcc_sm_cfg *sm_cfg,
+ struct mpcc *insert_above_mpcc,
+ int dpp_id,
+ int mpcc_id);
+
+void hwss_add_dpp_set_scaler(struct block_sequence_state *seq_state,
+ struct dpp *dpp,
+ const struct scaler_data *scl_data);
+
+void hwss_add_hubp_mem_program_viewport(struct block_sequence_state *seq_state,
+ struct hubp *hubp,
+ const struct rect *viewport,
+ const struct rect *viewport_c);
+
+void hwss_add_abort_cursor_offload_update(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_attribute(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_position(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_set_cursor_sdr_white_level(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx);
+
+void hwss_add_program_output_csc(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ enum dc_color_space colorspace,
+ uint16_t *matrix,
+ int opp_id);
+
+void hwss_add_phantom_hubp_post_enable(struct block_sequence_state *seq_state,
+ struct hubp *hubp);
+
+void hwss_add_update_force_pstate(struct block_sequence_state *seq_state,
+ struct dc *dc,
+ struct dc_state *context);
+
+void hwss_add_hubbub_apply_dedcn21_147_wa(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub);
+
+void hwss_add_hubbub_allow_self_refresh_control(struct block_sequence_state *seq_state,
+ struct hubbub *hubbub,
+ bool allow,
+ bool *disallow_self_refresh_applied);
+
+void hwss_add_tg_get_frame_count(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ unsigned int *frame_count);
+
+void hwss_add_tg_set_dsc_config(struct block_sequence_state *seq_state,
+ struct timing_generator *tg,
+ struct dsc_optc_config *dsc_optc_cfg,
+ bool enable);
+
+void hwss_add_opp_program_left_edge_extra_pixel(struct block_sequence_state *seq_state,
+ struct output_pixel_processor *opp,
+ enum dc_pixel_encoding pixel_encoding,
+ bool is_otg_master);
+
#endif /* __DC_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 1e2d247fbbac..406db231bc72 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -27,6 +27,7 @@
#define __DC_HW_SEQUENCER_PRIVATE_H__
#include "dc_types.h"
+#include "hw_sequencer.h"
enum pipe_gating_control {
PIPE_GATING_CONTROL_DISABLE = 0,
@@ -80,7 +81,13 @@ struct hwseq_private_funcs {
void (*plane_atomic_disconnect)(struct dc *dc,
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
+ void (*plane_atomic_disconnect_sequence)(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*update_mpcc_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*set_input_transfer_func)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
@@ -97,6 +104,10 @@ struct hwseq_private_funcs {
void (*blank_pixel_data)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
bool blank);
+ void (*blank_pixel_data_sequence)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool blank,
+ struct block_sequence_state *seq_state);
enum dc_status (*enable_stream_timing)(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
@@ -105,6 +116,8 @@ struct hwseq_private_funcs {
bool enable);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
+ void (*setup_vupdate_interrupt_sequence)(struct dc *dc, struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*init_blank)(struct dc *dc, struct timing_generator *tg);
void (*disable_vga)(struct dce_hwseq *hws);
@@ -112,6 +125,10 @@ struct hwseq_private_funcs {
void (*plane_atomic_power_down)(struct dc *dc,
struct dpp *dpp,
struct hubp *hubp);
+ void (*plane_atomic_power_down_sequence)(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp,
+ struct block_sequence_state *seq_state);
void (*plane_atomic_disable)(struct dc *dc, struct pipe_ctx *pipe_ctx);
void (*enable_power_gating_plane)(struct dce_hwseq *hws,
bool enable);
@@ -140,15 +157,31 @@ struct hwseq_private_funcs {
unsigned int dsc_inst);
void (*update_odm)(struct dc *dc, struct dc_state *context,
struct pipe_ctx *pipe_ctx);
+ void (*update_odm_sequence)(struct dc *dc, struct dc_state *context,
+ struct pipe_ctx *pipe_ctx, struct block_sequence_state *seq_state);
void (*program_all_writeback_pipes_in_tree)(struct dc *dc,
const struct dc_stream_state *stream,
struct dc_state *context);
+ void (*program_all_writeback_pipes_in_tree_sequence)(
+ struct dc *dc,
+ const struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*s0i3_golden_init_wa)(struct dc *dc);
void (*set_hdr_multiplier)(struct pipe_ctx *pipe_ctx);
+ void (*set_hdr_multiplier_sequence)(struct pipe_ctx *pipe_ctx,
+ struct block_sequence_state *seq_state);
void (*verify_allow_pstate_change_high)(struct dc *dc);
+ void (*verify_allow_pstate_change_high_sequence)(struct dc *dc,
+ struct block_sequence_state *seq_state);
void (*program_pipe)(struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context);
+ void (*program_pipe_sequence)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct block_sequence_state *seq_state);
bool (*wait_for_blank_complete)(struct output_pixel_processor *opp);
void (*dccg_init)(struct dce_hwseq *hws);
bool (*set_blend_lut)(struct pipe_ctx *pipe_ctx,
@@ -163,6 +196,8 @@ struct hwseq_private_funcs {
void (*enable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
void (*program_mall_pipe_config)(struct dc *dc, struct dc_state *context);
+ void (*program_mall_pipe_config_sequence)(struct dc *dc, struct dc_state *context,
+ struct block_sequence_state *seq_state);
void (*update_force_pstate)(struct dc *dc, struct dc_state *context);
void (*update_mall_sel)(struct dc *dc, struct dc_state *context);
unsigned int (*calculate_dccg_k1_k2_values)(struct pipe_ctx *pipe_ctx,
@@ -186,6 +221,7 @@ struct hwseq_private_funcs {
void (*perform_3dlut_wa_unlock)(struct pipe_ctx *pipe_ctx);
void (*wait_for_pipe_update_if_needed)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool is_surface_update_only);
void (*set_wait_for_update_needed_for_pipe)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*dc_ip_request_cntl)(struct dc *dc, bool enable);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f0d7185153b2..5ed2cd344804 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -58,8 +58,8 @@
#include "transform.h"
#include "dpp.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
struct resource_pool;
struct dc_state;
@@ -274,7 +274,7 @@ struct resource_pool {
/* An array for accessing the link encoder objects that have been created.
* Index in array corresponds to engine ID - viz. 0: ENGINE_ID_DIGA
*/
- struct link_encoder *link_encoders[MAX_DIG_LINK_ENCODERS];
+ struct link_encoder *link_encoders[MAX_LINK_ENCODERS];
/* Number of DIG link encoder objects created - i.e. number of valid
* entries in link_encoders array.
*/
@@ -433,7 +433,14 @@ enum p_state_switch_method {
P_STATE_V_ACTIVE,
P_STATE_SUB_VP,
P_STATE_DRR_SUB_VP,
- P_STATE_V_BLANK_SUB_VP
+ P_STATE_V_BLANK_SUB_VP,
+};
+
+struct dsc_padding_params {
+ /* pixels borrowed from hblank to hactive */
+ uint8_t dsc_hactive_padding;
+ uint32_t dsc_htotal_padding;
+ uint32_t dsc_pix_clk_100hz;
};
struct pipe_ctx {
@@ -493,8 +500,7 @@ struct pipe_ctx {
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
uint8_t subvp_index;
struct pixel_rate_divider pixel_rate_divider;
- /* pixels borrowed from hblank to hactive */
- uint8_t hblank_borrow;
+ struct dsc_padding_params dsc_padding_params;
/* next vupdate */
uint32_t next_vupdate;
uint32_t wait_frame_count;
@@ -508,7 +514,7 @@ struct pipe_ctx {
struct link_enc_cfg_context {
enum link_enc_cfg_mode mode;
struct link_enc_assignment link_enc_assignments[MAX_PIPES];
- enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ enum engine_id link_enc_avail[MAX_LINK_ENCODERS];
struct link_enc_assignment transient_assignments[MAX_PIPES];
};
@@ -520,8 +526,8 @@ struct resource_context {
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
struct link_enc_cfg_context link_enc_cfg_ctx;
- unsigned int dio_link_enc_to_link_idx[MAX_DIG_LINK_ENCODERS];
- int dio_link_enc_ref_cnts[MAX_DIG_LINK_ENCODERS];
+ unsigned int dio_link_enc_to_link_idx[MAX_LINK_ENCODERS];
+ int dio_link_enc_ref_cnts[MAX_LINK_ENCODERS];
bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS];
int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];
@@ -683,6 +689,7 @@ struct replay_context {
/* Controller Id used for Dig Fe source select */
enum controller_id controllerId;
unsigned int line_time_in_ns;
+ bool os_request_force_ffu;
};
enum dc_replay_enable {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
index 45645f9fd86c..7ce2f417f86a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h
@@ -57,9 +57,9 @@ struct cursor_attribute_cache_hubp {
} size;
union reg_cursor_settings_cfg {
struct {
- uint32_t dst_y_offset: 8;
- uint32_t chunk_hdl_adjust: 2;
- uint32_t reserved: 22;
+ uint32_t dst_y_offset: 8;
+ uint32_t chunk_hdl_adjust: 2;
+ uint32_t reserved: 22;
} bits;
uint32_t raw;
} settings;
@@ -83,12 +83,34 @@ union reg_cur0_control_cfg {
} bits;
uint32_t raw;
};
+
struct cursor_position_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
};
struct cursor_attribute_cache_dpp {
union reg_cur0_control_cfg cur0_ctl;
+ union reg_cur0_fp_scale_bias {
+ struct {
+ uint32_t fp_bias: 16;
+ uint32_t fp_scale: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias;
+ union reg_cur0_fp_scale_bias_g_y {
+ struct {
+ uint32_t fp_bias_g_y: 16;
+ uint32_t fp_scale_g_y: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_g_y;
+ union reg_cur0_fp_scale_bias_rb_crcb {
+ struct {
+ uint32_t fp_bias_rb_crcb: 16;
+ uint32_t fp_scale_rb_crcb: 16;
+ } bits;
+ uint32_t raw;
+ } fp_scale_bias_rb_crcb;
};
struct cursor_attributes_cfg {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 61c4d2a7db1c..500a601e99b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -71,6 +71,125 @@ enum pixel_rate_div {
PIXEL_RATE_DIV_NA = 0xF
};
+struct dcn_dccg_reg_state {
+ uint32_t dc_mem_global_pwr_req_cntl;
+ uint32_t dccg_audio_dtbclk_dto_modulo;
+ uint32_t dccg_audio_dtbclk_dto_phase;
+ uint32_t dccg_audio_dto_source;
+ uint32_t dccg_audio_dto0_module;
+ uint32_t dccg_audio_dto0_phase;
+ uint32_t dccg_audio_dto1_module;
+ uint32_t dccg_audio_dto1_phase;
+ uint32_t dccg_cac_status;
+ uint32_t dccg_cac_status2;
+ uint32_t dccg_disp_cntl_reg;
+ uint32_t dccg_ds_cntl;
+ uint32_t dccg_ds_dto_incr;
+ uint32_t dccg_ds_dto_modulo;
+ uint32_t dccg_ds_hw_cal_interval;
+ uint32_t dccg_gate_disable_cntl;
+ uint32_t dccg_gate_disable_cntl2;
+ uint32_t dccg_gate_disable_cntl3;
+ uint32_t dccg_gate_disable_cntl4;
+ uint32_t dccg_gate_disable_cntl5;
+ uint32_t dccg_gate_disable_cntl6;
+ uint32_t dccg_global_fgcg_rep_cntl;
+ uint32_t dccg_gtc_cntl;
+ uint32_t dccg_gtc_current;
+ uint32_t dccg_gtc_dto_incr;
+ uint32_t dccg_gtc_dto_modulo;
+ uint32_t dccg_perfmon_cntl;
+ uint32_t dccg_perfmon_cntl2;
+ uint32_t dccg_soft_reset;
+ uint32_t dccg_test_clk_sel;
+ uint32_t dccg_vsync_cnt_ctrl;
+ uint32_t dccg_vsync_cnt_int_ctrl;
+ uint32_t dccg_vsync_otg0_latch_value;
+ uint32_t dccg_vsync_otg1_latch_value;
+ uint32_t dccg_vsync_otg2_latch_value;
+ uint32_t dccg_vsync_otg3_latch_value;
+ uint32_t dccg_vsync_otg4_latch_value;
+ uint32_t dccg_vsync_otg5_latch_value;
+ uint32_t dispclk_cgtt_blk_ctrl_reg;
+ uint32_t dispclk_freq_change_cntl;
+ uint32_t dp_dto_dbuf_en;
+ uint32_t dp_dto0_modulo;
+ uint32_t dp_dto0_phase;
+ uint32_t dp_dto1_modulo;
+ uint32_t dp_dto1_phase;
+ uint32_t dp_dto2_modulo;
+ uint32_t dp_dto2_phase;
+ uint32_t dp_dto3_modulo;
+ uint32_t dp_dto3_phase;
+ uint32_t dpiaclk_540m_dto_modulo;
+ uint32_t dpiaclk_540m_dto_phase;
+ uint32_t dpiaclk_810m_dto_modulo;
+ uint32_t dpiaclk_810m_dto_phase;
+ uint32_t dpiaclk_dto_cntl;
+ uint32_t dpiasymclk_cntl;
+ uint32_t dppclk_cgtt_blk_ctrl_reg;
+ uint32_t dppclk_ctrl;
+ uint32_t dppclk_dto_ctrl;
+ uint32_t dppclk0_dto_param;
+ uint32_t dppclk1_dto_param;
+ uint32_t dppclk2_dto_param;
+ uint32_t dppclk3_dto_param;
+ uint32_t dprefclk_cgtt_blk_ctrl_reg;
+ uint32_t dprefclk_cntl;
+ uint32_t dpstreamclk_cntl;
+ uint32_t dscclk_dto_ctrl;
+ uint32_t dscclk0_dto_param;
+ uint32_t dscclk1_dto_param;
+ uint32_t dscclk2_dto_param;
+ uint32_t dscclk3_dto_param;
+ uint32_t dtbclk_dto_dbuf_en;
+ uint32_t dtbclk_dto0_modulo;
+ uint32_t dtbclk_dto0_phase;
+ uint32_t dtbclk_dto1_modulo;
+ uint32_t dtbclk_dto1_phase;
+ uint32_t dtbclk_dto2_modulo;
+ uint32_t dtbclk_dto2_phase;
+ uint32_t dtbclk_dto3_modulo;
+ uint32_t dtbclk_dto3_phase;
+ uint32_t dtbclk_p_cntl;
+ uint32_t force_symclk_disable;
+ uint32_t hdmicharclk0_clock_cntl;
+ uint32_t hdmistreamclk_cntl;
+ uint32_t hdmistreamclk0_dto_param;
+ uint32_t microsecond_time_base_div;
+ uint32_t millisecond_time_base_div;
+ uint32_t otg_pixel_rate_div;
+ uint32_t otg0_phypll_pixel_rate_cntl;
+ uint32_t otg0_pixel_rate_cntl;
+ uint32_t otg1_phypll_pixel_rate_cntl;
+ uint32_t otg1_pixel_rate_cntl;
+ uint32_t otg2_phypll_pixel_rate_cntl;
+ uint32_t otg2_pixel_rate_cntl;
+ uint32_t otg3_phypll_pixel_rate_cntl;
+ uint32_t otg3_pixel_rate_cntl;
+ uint32_t phyasymclk_clock_cntl;
+ uint32_t phybsymclk_clock_cntl;
+ uint32_t phycsymclk_clock_cntl;
+ uint32_t phydsymclk_clock_cntl;
+ uint32_t phyesymclk_clock_cntl;
+ uint32_t phyplla_pixclk_resync_cntl;
+ uint32_t phypllb_pixclk_resync_cntl;
+ uint32_t phypllc_pixclk_resync_cntl;
+ uint32_t phyplld_pixclk_resync_cntl;
+ uint32_t phyplle_pixclk_resync_cntl;
+ uint32_t refclk_cgtt_blk_ctrl_reg;
+ uint32_t socclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_cgtt_blk_ctrl_reg;
+ uint32_t symclk_psp_cntl;
+ uint32_t symclk32_le_cntl;
+ uint32_t symclk32_se_cntl;
+ uint32_t symclka_clock_enable;
+ uint32_t symclkb_clock_enable;
+ uint32_t symclkc_clock_enable;
+ uint32_t symclkd_clock_enable;
+ uint32_t symclke_clock_enable;
+};
+
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
@@ -81,7 +200,6 @@ struct dccg {
//int audio_dtbclk_khz;/* TODO needs to be removed */
//int ref_dtbclk_khz;/* TODO needs to be removed */
};
-
struct dtbclk_dto_params {
const struct dc_crtc_timing *timing;
int otg_inst;
@@ -214,6 +332,7 @@ struct dccg_funcs {
void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h);
void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst);
void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating);
+ void (*dccg_read_reg_state)(struct dccg *dccg, struct dcn_dccg_reg_state *dccg_reg_state);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 52b745667ef7..1ddfa30411c8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -137,6 +137,27 @@ struct dcn_hubbub_state {
uint32_t dram_state_cntl;
};
+struct dcn_hubbub_reg_state {
+ uint32_t det0_ctrl;
+ uint32_t det1_ctrl;
+ uint32_t det2_ctrl;
+ uint32_t det3_ctrl;
+ uint32_t compbuf_ctrl;
+};
+
+struct hubbub_system_latencies {
+ uint32_t max_latency_ns;
+ uint32_t avg_latency_ns;
+ uint32_t min_latency_ns;
+};
+
+struct hubbub_urgent_latency_params {
+ uint32_t refclk_mhz;
+ uint32_t t_win_ns;
+ uint32_t bandwidth_mbps;
+ uint32_t bw_factor_x1000;
+};
+
struct hubbub_funcs {
void (*update_dchub)(
struct hubbub *hubbub,
@@ -203,6 +224,8 @@ struct hubbub_funcs {
void (*init_watermarks)(struct hubbub *hubbub);
+ void (*hubbub_read_reg_state)(struct hubbub *hubbub, struct dcn_hubbub_reg_state *hubbub_reg_state);
+
/**
* @program_det_size:
*
@@ -229,6 +252,39 @@ struct hubbub_funcs {
void (*program_compbuf_segments)(struct hubbub *hubbub, unsigned compbuf_size_seg, bool safe_to_increase);
void (*wait_for_det_update)(struct hubbub *hubbub, int hubp_inst);
bool (*program_arbiter)(struct hubbub *hubbub, struct dml2_display_arb_regs *arb_regs, bool safe_to_lower);
+ void (*dchvm_init)(struct hubbub *hubbub);
+
+ struct hubbub_perfmon_funcs {
+ void (*reset)(struct hubbub *hubbub);
+ void (*start_measuring_max_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_max_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_average_memory_latency_ns)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_memory_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *sample_count);
+ void (*start_measuring_urgent_ramp_latency_ns)(
+ struct hubbub *hubbub,
+ const struct hubbub_urgent_latency_params *params);
+ uint32_t (*get_urgent_ramp_latency_ns)(struct hubbub *hubbub,
+ uint32_t refclk_mhz);
+ void (*start_measuring_unbounded_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_unbounded_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t *duration_ns);
+ void (*start_measuring_average_bandwidth_mbps)(
+ struct hubbub *hubbub);
+ uint32_t (*get_average_bandwidth_mbps)(struct hubbub *hubbub,
+ uint32_t refclk_mhz, uint32_t min_duration_ns,
+ uint32_t *duration_ns);
+ } perfmon;
+
+ struct hubbub_qos_funcs {
+ void (*force_display_nominal_profile)(struct hubbub *hubbub);
+ void (*force_display_urgent_profile)(struct hubbub *hubbub);
+ void (*reset_display_qos_profile)(struct hubbub *hubbub);
+ } qos;
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 1b7c085dc2cc..d88b57d4f512 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -65,7 +65,6 @@ union defer_reg_writes {
} bits;
uint32_t raw;
};
-
struct dpp {
const struct dpp_funcs *funcs;
struct dc_context *ctx;
@@ -84,6 +83,7 @@ struct dpp {
struct pwl_params shaper_params;
bool cm_bypass_mode;
+ bool cursor_offload;
struct cursor_position_cache_dpp pos;
struct cursor_attribute_cache_dpp att;
@@ -202,6 +202,19 @@ struct dcn_dpp_state {
uint32_t gamcor_mode;
};
+struct dcn_dpp_reg_state {
+ uint32_t recout_start;
+ uint32_t recout_size;
+ uint32_t scl_horz_filter_scale_ratio;
+ uint32_t scl_vert_filter_scale_ratio;
+ uint32_t scl_mode;
+ uint32_t cm_control;
+ uint32_t dpp_control;
+ uint32_t dscl_control;
+ uint32_t obuf_control;
+ uint32_t mpc_size;
+};
+
struct CM_bias_params {
uint32_t cm_bias_cr_r;
uint32_t cm_bias_y_g;
@@ -225,6 +238,8 @@ struct dpp_funcs {
void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s);
+ void (*dpp_read_reg_state)(struct dpp *dpp, struct dcn_dpp_reg_state *dpp_reg_state);
+
void (*dpp_reset)(struct dpp *dpp);
void (*dpp_set_scaler)(struct dpp *dpp,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index cee29e89ec5c..a79019365af8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -41,8 +41,8 @@
#include "mem_input.h"
#include "cursor_reg_cache.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
-#include "dml2/dml21/inc/dml_top_types.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_types.h"
#define OPP_ID_INVALID 0xf
#define MAX_TTU 0xffffff
@@ -89,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode {
enum hubp_3dlut_fl_width {
hubp_3dlut_fl_width_17 = 17,
hubp_3dlut_fl_width_33 = 33,
- hubp_3dlut_fl_width_transformed = 4916
+ hubp_3dlut_fl_width_transformed = 4916, //mpc default
};
enum hubp_3dlut_fl_crossbar_bit_slice {
@@ -99,6 +99,22 @@ enum hubp_3dlut_fl_crossbar_bit_slice {
hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
};
+struct hubp_fl_3dlut_config {
+ bool enabled;
+ enum hubp_3dlut_fl_width width;
+ enum hubp_3dlut_fl_mode mode;
+ enum hubp_3dlut_fl_format format;
+ uint16_t bias;
+ uint16_t scale;
+ struct dc_plane_address address;
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+ enum dc_cm2_gpu_mem_layout layout;
+ uint8_t protection_bits;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
+ enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
+};
+
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -110,11 +126,13 @@ struct hubp {
int mpcc_id;
struct dc_cursor_attributes curs_attr;
struct dc_cursor_position curs_pos;
+ bool cursor_offload;
bool power_gated;
struct cursor_position_cache_hubp pos;
struct cursor_attribute_cache_hubp att;
struct cursor_rect cur_rect;
+ bool use_mall_for_cursor;
};
struct surface_flip_registers {
@@ -220,6 +238,7 @@ struct hubp_funcs {
void (*hubp_clk_cntl)(struct hubp *hubp, bool enable);
void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst);
void (*hubp_read_state)(struct hubp *hubp);
+ void (*hubp_read_reg_state)(struct hubp *hubp, struct dcn_hubp_reg_state *reg_state);
void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
@@ -288,7 +307,10 @@ struct hubp_funcs {
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
+ void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg);
void (*hubp_clear_tiling)(struct hubp *hubp);
+ uint32_t (*hubp_get_current_read_line)(struct hubp *hubp);
+ uint32_t (*hubp_get_det_config_error)(struct hubp *hubp);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 41c76ba9ba56..a61d12ec61bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -44,12 +44,67 @@
*/
#define MAX_PIPES 6
#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
-#define MAX_LINKS (MAX_PIPES * 2 +2)
+
+#define MAX_DPIA 6
+#define MAX_CONNECTOR 6
+#define MAX_VIRTUAL_LINKS 4
+
+#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS)
+
+/**
+ * define MAX_DIG_LINK_ENCODERS - maximum number of digital encoders
+ *
+ * Digital encoders are ENGINE_ID_DIGA...G, there are at most 7,
+ * although not every GPU may have that many.
+ */
#define MAX_DIG_LINK_ENCODERS 7
+
+/**
+ * define MAX_DAC_LINK_ENCODERS - maximum number of analog link encoders
+ *
+ * Analog encoders are ENGINE_ID_DACA/B, there are at most 2,
+ * although not every GPU may have that many. Modern GPUs typically
+ * don't have analog encoders.
+ */
+#define MAX_DAC_LINK_ENCODERS 2
+
+/**
+ * define MAX_LINK_ENCODERS - maximum number link encoders in total
+ *
+ * This includes both analog and digital encoders.
+ */
+#define MAX_LINK_ENCODERS (MAX_DIG_LINK_ENCODERS + MAX_DAC_LINK_ENCODERS)
+
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
#define MAX_HPO_DP2_LINK_ENCODERS 4
+/* Pipe topology snapshot structures */
+#define MAX_TOPOLOGY_SNAPSHOTS 4
+
+struct pipe_topology_line {
+ bool is_phantom_pipe;
+ int plane_idx;
+ int slice_idx;
+ int stream_idx;
+ int dpp_inst;
+ int opp_inst;
+ int tg_inst;
+};
+
+struct pipe_topology_snapshot {
+ struct pipe_topology_line pipe_log_lines[MAX_PIPES];
+ int line_count;
+ uint64_t timestamp_us;
+ int stream_count;
+ int phantom_stream_count;
+};
+
+struct pipe_topology_history {
+ struct pipe_topology_snapshot snapshots[MAX_TOPOLOGY_SNAPSHOTS];
+ int current_snapshot_index;
+};
+
struct gamma_curve {
uint32_t offset;
uint32_t segments_num;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 08c16ba52a51..df512920a9fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -47,6 +47,7 @@ struct encoder_init_data {
enum hpd_source_id hpd_source;
/* TODO: in DAL2, here was pointer to EventManagerInterface */
struct graphics_object_id encoder;
+ enum engine_id analog_engine;
struct dc_context *ctx;
enum transmitter transmitter;
};
@@ -83,6 +84,7 @@ struct link_encoder {
struct graphics_object_id connector;
uint32_t output_signals;
enum engine_id preferred_engine;
+ enum engine_id analog_engine;
struct encoder_feature_support features;
enum transmitter transmitter;
enum hpd_source_id hpd_source;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
index 42fbc70f7056..d468bc85566a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h
@@ -29,7 +29,7 @@
#include "include/grph_object_id.h"
#include "dml/display_mode_structs.h"
-#include "dml2/dml21/inc/dml_top_dchub_registers.h"
+#include "dml2_0/dml21/inc/dml_top_dchub_registers.h"
struct dchub_init_data;
struct cstate_pstate_watermarks_st {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 7641439f6ca0..a8d1abe20f62 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -115,6 +115,16 @@ enum MCM_LUT_ID {
MCM_LUT_SHAPER
};
+struct mpc_fl_3dlut_config {
+ bool enabled;
+ uint16_t width;
+ bool select_lut_bank_a;
+ uint16_t bit_depth;
+ int hubp_index;
+ uint16_t bias;
+ uint16_t scale;
+};
+
union mcm_lut_params {
const struct pwl_params *pwl;
const struct tetrahedral_params *lut3d;
@@ -340,6 +350,15 @@ struct mpcc_state {
struct mpc_rmcm_regs rmcm_regs;
};
+struct dcn_mpc_reg_state {
+ uint32_t mpcc_bot_sel;
+ uint32_t mpcc_control;
+ uint32_t mpcc_status;
+ uint32_t mpcc_top_sel;
+ uint32_t mpcc_opp_id;
+ uint32_t mpcc_ogam_control;
+};
+
/**
* struct mpc_funcs - funcs
*/
@@ -363,6 +382,24 @@ struct mpc_funcs {
struct mpc *mpc,
int mpcc_inst,
struct mpcc_state *s);
+ /**
+ * @mpc_read_reg_state:
+ *
+ * Read MPC register state for debugging underflow purposes.
+ *
+ * Parameters:
+ *
+ * - [in] mpc - MPC context
+ * - [out] reg_state - MPC register state structure
+ *
+ * Return:
+ *
+ * void
+ */
+ void (*mpc_read_reg_state)(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
/**
* @insert_plane:
@@ -1059,21 +1096,6 @@ struct mpc_funcs {
*/
void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
bool lut_bank_a, int mpcc_id);
- /**
- * @program_3dlut_size:
- *
- * Program 3D LUT size.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] is_17x17x17 - is 3dlut 17x17x17
- * - [in] mpcc_id
- *
- * Return:
- *
- * void
- */
- void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id);
/**
* @mcm:
@@ -1098,6 +1120,7 @@ struct mpc_funcs {
* MPC RMCM new HW sequential programming functions
*/
struct {
+ void (*fl_3dlut_configure)(struct mpc *mpc, struct mpc_fl_3dlut_config *cfg, int mpcc_id);
void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id);
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 747679cb4944..e1428a83ecbc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -297,6 +297,16 @@ struct oppbuf_params {
uint32_t num_segment_padded_pixels;
};
+struct dcn_opp_reg_state {
+ uint32_t dpg_control;
+ uint32_t fmt_control;
+ uint32_t oppbuf_control;
+ uint32_t opp_pipe_control;
+ uint32_t opp_pipe_crc_control;
+ uint32_t opp_abm_control;
+ uint32_t dscrm_dsc_forward_config;
+};
+
struct opp_funcs {
@@ -368,6 +378,9 @@ struct opp_funcs {
struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding,
bool is_primary);
+
+ void (*opp_read_reg_state)(
+ struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 267ace4eef8a..da7bf59c4b9d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -175,6 +175,135 @@ struct dcn_otg_state {
uint32_t otg_double_buffer_control;
};
+struct dcn_optc_reg_state {
+ uint32_t optc_bytes_per_pixel;
+ uint32_t optc_data_format_control;
+ uint32_t optc_data_source_select;
+ uint32_t optc_input_clock_control;
+ uint32_t optc_input_global_control;
+ uint32_t optc_input_spare_register;
+ uint32_t optc_memory_config;
+ uint32_t optc_rsmu_underflow;
+ uint32_t optc_underflow_threshold;
+ uint32_t optc_width_control;
+
+ uint32_t otg_3d_structure_control;
+ uint32_t otg_clock_control;
+ uint32_t otg_control;
+ uint32_t otg_count_control;
+ uint32_t otg_count_reset;
+ uint32_t otg_crc_cntl;
+ uint32_t otg_crc_sig_blue_control_mask;
+ uint32_t otg_crc_sig_red_green_mask;
+ uint32_t otg_crc0_data_b;
+ uint32_t otg_crc0_data_rg;
+ uint32_t otg_crc0_windowa_x_control;
+ uint32_t otg_crc0_windowa_x_control_readback;
+ uint32_t otg_crc0_windowa_y_control;
+ uint32_t otg_crc0_windowa_y_control_readback;
+ uint32_t otg_crc0_windowb_x_control;
+ uint32_t otg_crc0_windowb_x_control_readback;
+ uint32_t otg_crc0_windowb_y_control;
+ uint32_t otg_crc0_windowb_y_control_readback;
+ uint32_t otg_crc1_data_b;
+ uint32_t otg_crc1_data_rg;
+ uint32_t otg_crc1_windowa_x_control;
+ uint32_t otg_crc1_windowa_x_control_readback;
+ uint32_t otg_crc1_windowa_y_control;
+ uint32_t otg_crc1_windowa_y_control_readback;
+ uint32_t otg_crc1_windowb_x_control;
+ uint32_t otg_crc1_windowb_x_control_readback;
+ uint32_t otg_crc1_windowb_y_control;
+ uint32_t otg_crc1_windowb_y_control_readback;
+ uint32_t otg_crc2_data_b;
+ uint32_t otg_crc2_data_rg;
+ uint32_t otg_crc3_data_b;
+ uint32_t otg_crc3_data_rg;
+ uint32_t otg_dlpc_control;
+ uint32_t otg_double_buffer_control;
+ uint32_t otg_drr_control2;
+ uint32_t otg_drr_control;
+ uint32_t otg_drr_timing_int_status;
+ uint32_t otg_drr_trigger_window;
+ uint32_t otg_drr_v_total_change;
+ uint32_t otg_drr_v_total_reach_range;
+ uint32_t otg_dsc_start_position;
+ uint32_t otg_force_count_now_cntl;
+ uint32_t otg_global_control0;
+ uint32_t otg_global_control1;
+ uint32_t otg_global_control2;
+ uint32_t otg_global_control3;
+ uint32_t otg_global_control4;
+ uint32_t otg_global_sync_status;
+ uint32_t otg_gsl_control;
+ uint32_t otg_gsl_vsync_gap;
+ uint32_t otg_gsl_window_x;
+ uint32_t otg_gsl_window_y;
+ uint32_t otg_h_blank_start_end;
+ uint32_t otg_h_sync_a;
+ uint32_t otg_h_sync_a_cntl;
+ uint32_t otg_h_timing_cntl;
+ uint32_t otg_h_total;
+ uint32_t otg_interlace_control;
+ uint32_t otg_interlace_status;
+ uint32_t otg_interrupt_control;
+ uint32_t otg_long_vblank_status;
+ uint32_t otg_m_const_dto0;
+ uint32_t otg_m_const_dto1;
+ uint32_t otg_manual_force_vsync_next_line;
+ uint32_t otg_master_en;
+ uint32_t otg_master_update_lock;
+ uint32_t otg_master_update_mode;
+ uint32_t otg_nom_vert_position;
+ uint32_t otg_pipe_update_status;
+ uint32_t otg_pixel_data_readback0;
+ uint32_t otg_pixel_data_readback1;
+ uint32_t otg_request_control;
+ uint32_t otg_snapshot_control;
+ uint32_t otg_snapshot_frame;
+ uint32_t otg_snapshot_position;
+ uint32_t otg_snapshot_status;
+ uint32_t otg_spare_register;
+ uint32_t otg_static_screen_control;
+ uint32_t otg_status;
+ uint32_t otg_status_frame_count;
+ uint32_t otg_status_hv_count;
+ uint32_t otg_status_position;
+ uint32_t otg_status_vf_count;
+ uint32_t otg_stereo_control;
+ uint32_t otg_stereo_force_next_eye;
+ uint32_t otg_stereo_status;
+ uint32_t otg_trig_manual_control;
+ uint32_t otg_triga_cntl;
+ uint32_t otg_triga_manual_trig;
+ uint32_t otg_trigb_cntl;
+ uint32_t otg_trigb_manual_trig;
+ uint32_t otg_update_lock;
+ uint32_t otg_v_blank_start_end;
+ uint32_t otg_v_count_stop_control;
+ uint32_t otg_v_count_stop_control2;
+ uint32_t otg_v_sync_a;
+ uint32_t otg_v_sync_a_cntl;
+ uint32_t otg_v_total;
+ uint32_t otg_v_total_control;
+ uint32_t otg_v_total_int_status;
+ uint32_t otg_v_total_max;
+ uint32_t otg_v_total_mid;
+ uint32_t otg_v_total_min;
+ uint32_t otg_vert_sync_control;
+ uint32_t otg_vertical_interrupt0_control;
+ uint32_t otg_vertical_interrupt0_position;
+ uint32_t otg_vertical_interrupt1_control;
+ uint32_t otg_vertical_interrupt1_position;
+ uint32_t otg_vertical_interrupt2_control;
+ uint32_t otg_vertical_interrupt2_position;
+ uint32_t otg_vready_param;
+ uint32_t otg_vstartup_param;
+ uint32_t otg_vsync_nom_int_status;
+ uint32_t otg_vupdate_keepout;
+ uint32_t otg_vupdate_param;
+};
+
/**
* struct timing_generator - Entry point to Output Timing Generator feature.
*/
@@ -374,12 +503,14 @@ struct timing_generator_funcs {
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params);
void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_otg_disable)(struct timing_generator *optc);
bool (*get_optc_double_buffer_pending)(struct timing_generator *tg);
bool (*get_otg_double_buffer_pending)(struct timing_generator *tg);
bool (*get_pipe_update_pending)(struct timing_generator *tg);
void (*set_vupdate_keepout)(struct timing_generator *tg, bool enable);
bool (*wait_update_lock_status)(struct timing_generator *tg, bool locked);
void (*read_otg_state)(struct timing_generator *tg, struct dcn_otg_state *s);
+ void (*optc_read_reg_state)(struct timing_generator *tg, struct dcn_optc_reg_state *optc_reg_state);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
index f2503402c10e..6f94e48a24d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_service.h
@@ -42,8 +42,8 @@
* dc_link_exports.c or other dc files implement dc.h
*
* DC to Link:
- * dc_link_exports.c or other dc files include link.h
- * link_factory.c implements link.h
+ * dc_link_exports.c or other dc files include link_service.h
+ * link_factory.c implements link_service.h
*
* Link sub-component to Link sub-component:
* link_factory.c includes --> link_xxx.h
@@ -73,7 +73,7 @@
* 2. Implement your function in the suitable link_xxx.c file.
* 3. Assign the function to link_service in link_factory.c
* 4. NEVER include link_xxx.h headers outside link component.
- * 5. NEVER include link.h on DM side.
+ * 5. NEVER include link_service.h on DM side.
*/
#include "core_types.h"
@@ -218,7 +218,10 @@ struct link_service {
bool (*dp_overwrite_extended_receiver_cap)(struct dc_link *link);
enum lttpr_mode (*dp_decide_lttpr_mode)(struct dc_link *link,
struct dc_link_settings *link_setting);
-
+ uint8_t (*dp_get_lttpr_count)(struct dc_link *link);
+ void (*edp_get_alpm_support)(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
/*************************** DP DPIA/PHY ******************************/
void (*dpia_handle_usb4_bandwidth_allocation_for_link)(
@@ -289,12 +292,12 @@ struct link_service {
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint32_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const enum pr_residency_mode mode);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index a890f581f4e8..79746d931471 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -45,9 +45,11 @@ enum dce_version resource_parse_asic_id(
struct resource_caps {
int num_timing_generator;
int num_opp;
+ int num_dpp;
int num_video_plane;
int num_audio;
int num_stream_encoder;
+ int num_analog_stream_encoder;
int num_pll;
int num_dwb;
int num_ddc;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
new file mode 100644
index 000000000000..23daf98b8aa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef __SOC_AND_IP_TRANSLATOR_H__
+#define __SOC_AND_IP_TRANSLATOR_H__
+
+#include "dc.h"
+#include "dml_top_soc_parameter_types.h"
+
+struct soc_and_ip_translator_funcs {
+ void (*get_soc_bb)(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+ void (*get_ip_caps)(struct dml2_ip_capabilities *dml_ip_caps);
+};
+
+struct soc_and_ip_translator {
+ const struct soc_and_ip_translator_funcs *translator_funcs;
+};
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version);
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator);
+
+
+#endif // __SOC_AND_IP_TRANSLATOR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
index 2956c2b3ad1a..1045c268672e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -76,6 +76,9 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
uint8_t count;
int i;
struct audio_output audio_output[MAX_PIPES];
+ struct dc_stream_state *streams_on_link[MAX_PIPES];
+ int num_streams_on_link = 0;
+ struct dc *dc = (struct dc *)link->dc;
needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) !=
link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings));
@@ -138,12 +141,19 @@ static void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg);
// Set DPMS on with stream update
- for (i = 0; i < state->stream_count; i++)
- if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) {
- stream_update.stream = state->streams[i];
+ // Cache all streams on current link since dc_update_planes_and_stream might kill current_state
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link)
+ streams_on_link[num_streams_on_link++] = state->streams[i];
+ }
+
+ for (i = 0; i < num_streams_on_link; i++) {
+ if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) {
+ stream_update.stream = streams_on_link[i];
stream_update.dpms_off = &dpms_off;
- dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update);
+ dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update);
}
+ }
}
static void dp_test_send_link_training(struct dc_link *link)
@@ -867,7 +877,7 @@ bool dp_set_test_pattern(
return false;
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
@@ -915,7 +925,7 @@ bool dp_set_test_pattern(
CRTC_STATE_VACTIVE);
if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
- if (should_use_dmub_lock(pipe_ctx->stream->link)) {
+ if (should_use_dmub_inbox1_lock(pipe_ctx->stream->link->dc, pipe_ctx->stream->link)) {
union dmub_hw_lock_flags hw_locks = { 0 };
struct dmub_hw_lock_inst_flags inst_flags = { 0 };
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
index eae23ea7f6ec..033650cdb811 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_CTS_H__
#define __LINK_DP_CTS_H__
-#include "link.h"
+#include "link_service.h"
void dp_handle_automated_test(struct dc_link *link);
bool dp_set_test_pattern(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index ab437a0c9101..9ff4a6c46a2b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_DP_TRACE_H__
#define __LINK_DP_TRACE_H__
-#include "link.h"
+#include "link_service.h"
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index b68bcc9fca0a..befa67b2b2ae 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -58,8 +58,9 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
return;
}
- link_enc->funcs->connect_dig_be_to_fe(link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(link_enc,
+ pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
@@ -98,10 +99,13 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
if (stream_enc->funcs->enable_stream)
stream_enc->funcs->enable_stream(stream_enc,
pipe_ctx->stream->signal, false);
- link_enc->funcs->connect_dig_be_to_fe(
- link_enc,
- pipe_ctx->stream_res.stream_enc->id,
- false);
+
+ if (!dc_is_rgb_signal(pipe_ctx->stream->signal))
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(
pipe_ctx->stream->link,
@@ -115,7 +119,8 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- if (!dc_is_virtual_signal(stream->signal))
+ if (!dc_is_virtual_signal(stream->signal) &&
+ !dc_is_rgb_signal(stream->signal))
stream_encoder->funcs->setup_stereo_sync(
stream_encoder,
pipe_ctx->stream_res.tg->inst,
@@ -138,8 +143,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
stream_encoder->funcs->dvi_set_stream_attribute(
stream_encoder,
&stream->timing,
- (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ?
- true : false);
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK);
else if (dc_is_lvds_signal(stream->signal))
stream_encoder->funcs->lvds_set_stream_attribute(
stream_encoder,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 45f0e091fcb0..4a25210a344f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -27,7 +27,7 @@
#define __LINK_HWSS_DIO_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
const struct link_hwss *get_dio_link_hwss(void);
bool can_use_dio_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
index 9ac08a332540..cf578a8662a4 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__
-#include "link.h"
+#include "link_service.h"
uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link);
uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 1d3ed8ca83b5..7c9005bc2587 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -26,7 +26,7 @@
#define __LINK_HWSS_HPO_DP_H__
#include "link_hwss.h"
-#include "link.h"
+#include "link_service.h"
void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
struct fixed31_32 throttled_vcp_size);
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
index 82301187bc7c..8bf36827ecfb 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h
@@ -25,7 +25,7 @@
#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__
-#include "link.h"
+#include "link_service.h"
bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link);
const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index 827b630daf49..6d31f4967f1a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -270,6 +270,10 @@ static void read_scdc_caps(struct ddc_service *ddc_service,
uint8_t slave_address = HDMI_SCDC_ADDRESS;
uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI;
+ if (ddc_service->link->local_sink &&
+ !ddc_service->link->local_sink->edid_caps.scdc_present)
+ return;
+
link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte,
sizeof(sink->scdc_caps.manufacturer_OUI.byte));
@@ -656,7 +660,7 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
return true;
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
- DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
+ DC_LOG_DC("DP Alt mode state on HPD: %d Link=%d\n", is_in_alt_mode, link->link_index);
if (is_in_alt_mode)
return true;
@@ -858,6 +862,94 @@ static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
verify_link_capability_non_destructive(link);
}
+/**
+ * link_detect_evaluate_edid_header() - Evaluate if an EDID header is acceptable.
+ *
+ * Evaluates an 8-byte EDID header to check if it's good enough
+ * for the purpose of determining whether a display is connected
+ * without reading the full EDID.
+ *
+ * @edid_header: The first 8 bytes of the EDID read from DDC.
+ *
+ * Return: true if the header looks valid (>= 6 of 8 bytes match the
+ * expected 00/FF pattern), false otherwise.
+ */
+static bool link_detect_evaluate_edid_header(uint8_t edid_header[8])
+{
+ int edid_header_score = 0;
+ int i;
+
+ for (i = 0; i < 8; ++i)
+ edid_header_score += edid_header[i] == ((i == 0 || i == 7) ? 0x00 : 0xff);
+
+ return edid_header_score >= 6;
+}
+
+/**
+ * link_detect_ddc_probe() - Probe the DDC to see if a display is connected.
+ *
+ * Detect whether a display is connected to DDC without reading full EDID.
+ * Reads only the EDID header (the first 8 bytes of EDID) from DDC and
+ * evaluates whether that matches.
+ *
+ * @link: DC link whose DDC/I2C is probed for the EDID header.
+ *
+ * Return: true if the EDID header was read and passes validation,
+ * false otherwise.
+ */
+static bool link_detect_ddc_probe(struct dc_link *link)
+{
+ if (!link->ddc)
+ return false;
+
+ uint8_t edid_header[8] = {0};
+ bool ddc_probed = i2c_read(link->ddc, 0x50, edid_header, sizeof(edid_header));
+
+ if (!ddc_probed)
+ return false;
+
+ if (!link_detect_evaluate_edid_header(edid_header))
+ return false;
+
+ return true;
+}
+
+/**
+ * link_detect_dac_load_detect() - Performs DAC load detection.
+ *
+ * Load detection can be used to detect the presence of an
+ * analog display when we can't read DDC. This causes a visible
+ * visual glitch so it should be used sparingly.
+ *
+ * @link: DC link to test using the DAC load-detect path.
+ *
+ * Return: true if the VBIOS load-detect call reports OK, false
+ * otherwise.
+ */
+static bool link_detect_dac_load_detect(struct dc_link *link)
+{
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct link_encoder *link_enc = link->link_enc;
+ enum engine_id engine_id = link_enc->preferred_engine;
+ enum dal_device_type device_type = DEVICE_TYPE_CRT;
+ enum bp_result bp_result;
+ uint32_t enum_id;
+
+ switch (engine_id) {
+ case ENGINE_ID_DACB:
+ enum_id = 2;
+ break;
+ case ENGINE_ID_DACA:
+ default:
+ engine_id = ENGINE_ID_DACA;
+ enum_id = 1;
+ break;
+ }
+
+ bp_result = bios->funcs->dac_load_detection(bios, engine_id, device_type, enum_id);
+ return bp_result == BP_RESULT_OK;
+}
+
/*
* detect_link_and_local_sink() - Detect if a sink is attached to a given link
*
@@ -942,6 +1034,12 @@ static bool detect_link_and_local_sink(struct dc_link *link,
break;
}
+ case SIGNAL_TYPE_RGB: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_RGB;
+ break;
+ }
+
case SIGNAL_TYPE_LVDS: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_LVDS;
@@ -1066,7 +1164,30 @@ static bool detect_link_and_local_sink(struct dc_link *link,
DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
break;
case EDID_NO_RESPONSE:
+ /* Analog connectors without EDID:
+ * - old monitor that actually doesn't have EDID
+ * - cheap DVI-A cable or adapter that doesn't connect DDC
+ */
+ if (dc_connector_supports_analog(link->link_id.id)) {
+ /* If we didn't do DAC load detection yet, do it now
+ * to verify there really is a display connected.
+ */
+ if (link->type != dc_connection_dac_load &&
+ !link_detect_dac_load_detect(link)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+ return false;
+ }
+
+ DC_LOG_INFO("%s detected analog display without EDID\n", __func__);
+ link->type = dc_connection_dac_load;
+ sink->edid_caps.analog = true;
+ break;
+ }
+
DC_LOG_ERROR("No EDID read.\n");
+
/*
* Abort detection for non-DP connectors if we have
* no EDID
@@ -1133,13 +1254,26 @@ static bool detect_link_and_local_sink(struct dc_link *link,
sink = prev_sink;
prev_sink = NULL;
}
- query_hdcp_capability(sink->sink_signal, link);
+
+ if (!sink->edid_caps.analog)
+ query_hdcp_capability(sink->sink_signal, link);
}
+ /* DVI-I connector connected to analog display. */
+ if ((link->link_id.id == CONNECTOR_ID_DUAL_LINK_DVII ||
+ link->link_id.id == CONNECTOR_ID_SINGLE_LINK_DVII) &&
+ sink->edid_caps.analog)
+ sink->sink_signal = SIGNAL_TYPE_RGB;
+
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ else if (dc_is_dvi_signal(sink->sink_signal) &&
+ dc_is_dvi_signal(link->connector_signal) &&
+ aud_support->hdmi_audio_native &&
+ sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
dp_trace_init(link);
@@ -1228,6 +1362,36 @@ static bool detect_link_and_local_sink(struct dc_link *link,
return true;
}
+/**
+ * link_detect_analog() - Determines if an analog sink is connected.
+ *
+ * @link: DC link to evaluate (must support analog signalling).
+ * @type: Updated with the detected connection type:
+ * dc_connection_single (analog via DDC),
+ * dc_connection_dac_load (via load-detect),
+ * or dc_connection_none.
+ *
+ * Return: true if detection completed.
+ */
+static bool link_detect_analog(struct dc_link *link, enum dc_connection_type *type)
+{
+ /* Don't care about connectors that don't support an analog signal. */
+ ASSERT(dc_connector_supports_analog(link->link_id.id));
+
+ if (link_detect_ddc_probe(link)) {
+ *type = dc_connection_single;
+ return true;
+ }
+
+ if (link_detect_dac_load_detect(link)) {
+ *type = dc_connection_dac_load;
+ return true;
+ }
+
+ *type = dc_connection_none;
+ return true;
+}
+
/*
* link_detect_connection_type() - Determine if there is a sink connected
*
@@ -1244,6 +1408,17 @@ bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *
return true;
}
+ /* Ignore the HPD pin (if any) for analog connectors.
+ * Instead rely on DDC and DAC.
+ *
+ * - VGA connectors don't have any HPD at all.
+ * - Some DVI-A cables don't connect the HPD pin.
+ * - Some DVI-A cables pull up the HPD pin.
+ * (So it's high even when no display is connected.)
+ */
+ if (dc_connector_supports_analog(link->link_id.id))
+ return link_detect_analog(link, type);
+
if (link->connector_signal == SIGNAL_TYPE_EDP) {
/*in case it is not on*/
if (!link->dc->config.edp_no_power_sequencing)
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
index 7da05078721e..1ab29476060b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -25,7 +25,7 @@
#ifndef __DC_LINK_DETECTION_H__
#define __DC_LINK_DETECTION_H__
-#include "link.h"
+#include "link_service.h"
bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
bool link_detect_connection_type(struct dc_link *link,
enum dc_connection_type *type);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 8c8682f743d6..6ae134147617 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -832,7 +832,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
enum optc_dsc_mode optc_dsc_mode;
/* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow +
+ dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding +
stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
@@ -841,6 +841,7 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (should_use_dto_dscclk)
dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h);
@@ -970,6 +971,7 @@ bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immedi
dsc_cfg.color_depth = stream->timing.display_color_depth;
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
@@ -2224,7 +2226,11 @@ static enum dc_status enable_link(
{
enum dc_status status = DC_ERROR_UNEXPECTED;
struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
+ struct dc_link *link = NULL;
+
+ if (stream == NULL)
+ return DC_ERROR_UNEXPECTED;
+ link = stream->link;
/* There's some scenarios where driver is unloaded with display
* still enabled. When driver is reloaded, it may cause a display
@@ -2256,6 +2262,9 @@ static enum dc_status enable_link(
enable_link_lvds(pipe_ctx);
status = DC_OK;
break;
+ case SIGNAL_TYPE_RGB:
+ status = DC_OK;
+ break;
case SIGNAL_TYPE_VIRTUAL:
status = enable_link_virtual(pipe_ctx);
break;
@@ -2358,9 +2367,9 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal, link->link_index);
}
}
@@ -2474,9 +2483,10 @@ void link_set_dpms_on(
if (pipe_ctx->stream->sink) {
if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x link=%d\n", __func__,
pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
+ pipe_ctx->stream->signal,
+ link->link_index);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
index 9398f9c1666a..bd6fc63064a3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DPMS_H__
#define __DC_LINK_DPMS_H__
-#include "link.h"
+#include "link_service.h"
void link_set_dpms_on(
struct dc_state *state,
struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index de1143dbbd25..a6e2b0821969 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -165,6 +165,8 @@ static void construct_link_service_dp_capability(struct link_service *link_srv)
link_srv->dp_overwrite_extended_receiver_cap =
dp_overwrite_extended_receiver_cap;
link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode;
+ link_srv->dp_get_lttpr_count = dp_get_lttpr_count;
+ link_srv->edp_get_alpm_support = edp_get_alpm_support;
}
/* link dp phy/dpia implements basic dp phy/dpia functionality such as
@@ -449,6 +451,46 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
+static enum engine_id find_analog_engine(struct dc_link *link)
+{
+ struct dc_bios *bp = link->ctx->dc_bios;
+ struct graphics_object_id encoder = {0};
+ enum bp_result bp_result = BP_RESULT_OK;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ bp_result = bp->funcs->get_src_obj(bp, link->link_id, i, &encoder);
+
+ if (bp_result != BP_RESULT_OK)
+ return ENGINE_ID_UNKNOWN;
+
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return ENGINE_ID_DACA;
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return ENGINE_ID_DACB;
+ }
+ }
+
+ return ENGINE_ID_UNKNOWN;
+}
+
+static bool transmitter_supported(const enum transmitter transmitter)
+{
+ return transmitter != TRANSMITTER_UNKNOWN &&
+ transmitter != TRANSMITTER_NUTMEG_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_CRT &&
+ transmitter != TRANSMITTER_TRAVIS_LCD;
+}
+
+static bool analog_engine_supported(const enum engine_id engine_id)
+{
+ return engine_id == ENGINE_ID_DACA ||
+ engine_id == ENGINE_ID_DACB;
+}
+
static bool construct_phy(struct dc_link *link,
const struct link_init_data *init_params)
{
@@ -480,10 +522,23 @@ static bool construct_phy(struct dc_link *link,
link->link_id =
bios->funcs->get_connector_id(bios, init_params->connector_index);
+ /* Determine early if the link has any supported encoders,
+ * so that we avoid initializing DDC and HPD, etc.
+ */
+ bp_funcs->get_src_obj(bios, link->link_id, 0, &enc_init_data.encoder);
+ enc_init_data.transmitter = translate_encoder_to_transmitter(enc_init_data.encoder);
+ enc_init_data.analog_engine = find_analog_engine(link);
+
link->ep_type = DISPLAY_ENDPOINT_PHY;
DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
+ if (!transmitter_supported(enc_init_data.transmitter) &&
+ !analog_engine_supported(enc_init_data.analog_engine)) {
+ DC_LOG_WARNING("link_id %d has unsupported encoder\n", link->link_id.id);
+ goto unsupported_fail;
+ }
+
if (bios->funcs->get_disp_connector_caps_info) {
bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
@@ -528,6 +583,9 @@ static bool construct_phy(struct dc_link *link,
case CONNECTOR_ID_DUAL_LINK_DVII:
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
+ case CONNECTOR_ID_VGA:
+ link->connector_signal = SIGNAL_TYPE_RGB;
+ break;
case CONNECTOR_ID_DISPLAY_PORT:
case CONNECTOR_ID_MXM:
case CONNECTOR_ID_USBC:
@@ -609,16 +667,12 @@ static bool construct_phy(struct dc_link *link,
dal_ddc_get_line(get_ddc_pin(link->ddc));
enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
- &enc_init_data.encoder);
enc_init_data.connector = link->link_id;
enc_init_data.channel = get_ddc_line(link);
enc_init_data.hpd_source = get_hpd_line(link);
link->hpd_src = enc_init_data.hpd_source;
- enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
link->link_enc =
link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
@@ -733,6 +787,7 @@ static bool construct_phy(struct dc_link *link,
link->psr_settings.psr_vtotal_control_support = false;
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
return true;
@@ -751,6 +806,7 @@ create_fail:
link->hpd_gpio = NULL;
}
+unsupported_fail:
DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
return false;
}
@@ -814,9 +870,7 @@ static bool construct_dpia(struct dc_link *link,
/* TODO: Create link encoder */
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-
- /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
- link->wa_flags.dp_mot_reset_segment = true;
+ link->replay_settings.config.replay_version = DC_REPLAY_VERSION_UNSUPPORTED;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
index e96220d48d03..aad36ca1a31c 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_FACTORY_H__
#define __LINK_FACTORY_H__
-#include "link.h"
+#include "link_service.h"
struct dc_link *link_create(const struct link_init_data *init_params);
void link_destroy(struct dc_link **link);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
index 1907bda3cb6e..f7aa3bc3a93a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_RESOURCE_H__
#define __LINK_RESOURCE_H__
-#include "link.h"
+#include "link_service.h"
void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
void link_restore_res_map(const struct dc *dc, uint32_t *map);
void link_get_cur_link_res(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
index aecaf37eee35..acdc162de535 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -408,8 +408,10 @@ enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const stru
link = stream->link;
if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT
- || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- && link->hpd_status))
+ || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)))
+ continue;
+
+ if ((link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) && (link->hpd_status == false))
continue;
dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 9553c81053fe..595774e76453 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -24,7 +24,7 @@
*/
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
-#include "link.h"
+#include "link_service.h"
enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index 267180e7bc48..5d2bcce2f669 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -549,7 +549,8 @@ void write_scdc_data(struct ddc_service *ddc_service,
/*Lower than 340 Scramble bit from SCDC caps*/
if (ddc_service->link->local_sink &&
- ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ (ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite ||
+ !ddc_service->link->local_sink->edid_caps.scdc_present))
return;
link_query_ddc_data(ddc_service, slave_address, &offset,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index a3e25e55bed6..d3e6f01a6a90 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -26,7 +26,7 @@
#ifndef __DAL_DDC_SERVICE_H__
#define __DAL_DDC_SERVICE_H__
-#include "link.h"
+#include "link_service.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 651926e547b9..ad90a0106938 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -357,7 +357,9 @@ bool dp_should_enable_fec(const struct dc_link *link)
{
bool force_disable = false;
- if (link->fec_state == dc_link_fec_enabled)
+ if (link->dc->debug.disable_fec)
+ force_disable = true;
+ else if (link->fec_state == dc_link_fec_enabled)
force_disable = false;
else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
@@ -424,6 +426,21 @@ static enum dc_link_rate get_link_rate_from_max_link_bw(
return link_rate;
}
+static enum dc_lane_count get_lttpr_max_lane_count(struct dc_link *link)
+{
+ enum dc_lane_count lttpr_max_lane_count = LANE_COUNT_UNKNOWN;
+
+ if (link->dpcd_caps.lttpr_caps.max_lane_count <= LANE_COUNT_DP_MAX)
+ lttpr_max_lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+
+ /* if bw_allocation is enabled and nrd_max_lane_count is set, use it */
+ if (link->dpia_bw_alloc_config.bw_alloc_enabled &&
+ link->dpia_bw_alloc_config.nrd_max_lane_count > 0)
+ lttpr_max_lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+
+ return lttpr_max_lane_count;
+}
+
static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
{
@@ -438,6 +455,11 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
break;
}
+ /* if bw_allocation is enabled and nrd_max_link_rate is set, use it */
+ if (link->dpia_bw_alloc_config.bw_alloc_enabled &&
+ link->dpia_bw_alloc_config.nrd_max_link_rate > 0)
+ lttpr_max_link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+
if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
lttpr_max_link_rate = LINK_RATE_UHBR20;
else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
@@ -1525,8 +1547,8 @@ bool read_is_mst_supported(struct dc_link *link)
return false;
}
- rev.raw = 0;
- cap.raw = 0;
+ rev.raw = 0;
+ cap.raw = 0;
st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
sizeof(rev));
@@ -1691,7 +1713,7 @@ static bool retrieve_link_cap(struct dc_link *link)
union edp_configuration_cap edp_config_cap;
union dp_downstream_port_present ds_port = { 0 };
enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t read_dpcd_retry_cnt = 3;
+ uint32_t read_dpcd_retry_cnt = 20;
int i;
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
const uint32_t post_oui_delay = 30; // 30ms
@@ -1734,12 +1756,13 @@ static bool retrieve_link_cap(struct dc_link *link)
}
dpcd_set_source_specific_data(link);
- /* Sink may need to configure internals based on vendor, so allow some
- * time before proceeding with possibly vendor specific transactions
- */
- msleep(post_oui_delay);
for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ /*
+ * Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
status = core_link_read_dpcd(
link,
DP_DPCD_REV,
@@ -1845,6 +1868,12 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.is_mst_capable = read_is_mst_supported(link);
DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable));
+ /* Some MST docks seem to NAK I2C writes to segment pointer with mot=0. */
+ if (link->dpcd_caps.is_mst_capable)
+ link->wa_flags.dp_mot_reset_segment = true;
+ else
+ link->wa_flags.dp_mot_reset_segment = false;
+
get_active_converter_info(ds_port.byte, link);
dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
@@ -2063,6 +2092,11 @@ static bool retrieve_link_cap(struct dc_link *link)
link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw,
sizeof(link->dpcd_caps.max_uncompressed_pixel_rate_cap.raw));
+ core_link_read_dpcd(link,
+ DP_PANEL_REPLAY_CAPABILITY_SUPPORT,
+ &link->dpcd_caps.pr_caps_supported.raw,
+ sizeof(link->dpcd_caps.pr_caps_supported.raw));
+
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
if (status != DC_OK)
@@ -2125,13 +2159,13 @@ void detect_edp_sink_caps(struct dc_link *link)
&backlight_adj_cap, sizeof(backlight_adj_cap));
link->dpcd_caps.dynamic_backlight_capable_edp =
- (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
+ (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true : false;
core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
&general_edp_cap, sizeof(general_edp_cap));
link->dpcd_caps.set_power_state_capable_edp =
- (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
+ (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true : false;
set_default_brightness_aux(link);
@@ -2195,6 +2229,12 @@ void detect_edp_sink_caps(struct dc_link *link)
DP_EDP_MSO_LINK_CAPABILITIES,
(uint8_t *)&link->dpcd_caps.mso_cap_sst_links_supported,
sizeof(link->dpcd_caps.mso_cap_sst_links_supported));
+ /*
+ * Read eDP general capability 2
+ */
+ core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2,
+ (uint8_t *)&link->dpcd_caps.dp_edp_general_cap_2,
+ sizeof(link->dpcd_caps.dp_edp_general_cap_2));
}
bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
@@ -2235,6 +2275,7 @@ const struct dc_link_settings *dp_get_verified_link_cap(
struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
+ enum dc_lane_count lttpr_max_lane_count;
enum dc_link_rate lttpr_max_link_rate;
enum dc_link_rate cable_max_link_rate;
struct resource_context *res_ctx = &link->dc->current_state->res_ctx;
@@ -2299,8 +2340,11 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
/* Some LTTPR devices do not report valid DPCD revisions, if so, do not take it's link cap into consideration. */
if (link->dpcd_caps.lttpr_caps.revision.raw >= DPCD_REV_14) {
- if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+ lttpr_max_lane_count = get_lttpr_max_lane_count(link);
+
+ if (lttpr_max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = lttpr_max_lane_count;
+
lttpr_max_link_rate = get_lttpr_max_link_rate(link);
if (lttpr_max_link_rate < max_link_cap.link_rate)
@@ -2406,6 +2450,11 @@ bool dp_verify_link_cap_with_retries(
dp_trace_detect_lt_init(link);
+ DC_LOG_HW_LINK_TRAINING("%s: Link[%d] LinkRate=0x%x LaneCount=%d",
+ __func__, link->link_index,
+ known_limit_link_setting->link_rate,
+ known_limit_link_setting->lane_count);
+
if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa)
apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
@@ -2442,6 +2491,11 @@ bool dp_verify_link_cap_with_retries(
dp_trace_lt_fail_count_update(link, fail_count, true);
dp_trace_set_lt_end_timestamp(link, true);
+ DC_LOG_HW_LINK_TRAINING("%s: Link[%d] Exit. is_success=%d fail_count=%d",
+ __func__, link->link_index,
+ success,
+ fail_count);
+
return success;
}
@@ -2506,3 +2560,40 @@ bool dp_is_sink_present(struct dc_link *link)
return present;
}
+
+uint8_t dp_get_lttpr_count(struct dc_link *link)
+{
+ if (dp_is_lttpr_present(link))
+ return dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ return 0;
+}
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support)
+{
+ bool lttpr_present = dp_is_lttpr_present(link);
+
+ if (auxless_support == NULL || auxwake_support == NULL)
+ return;
+
+ *auxless_support = false;
+ *auxwake_support = false;
+
+ if (!dc_is_embedded_signal(link->connector_signal))
+ return;
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_LESS_ALPM_CAP) {
+ if (lttpr_present) {
+ if (link->dpcd_caps.lttpr_caps.alpm.bits.AUX_LESS_ALPM_SUPPORTED)
+ *auxless_support = true;
+ } else
+ *auxless_support = true;
+ }
+
+ if (link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP) {
+ if (!lttpr_present)
+ *auxwake_support = true;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
index 940b147cc5d4..6e17f72a752f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_CAPABILITY_H__
#define __DC_LINK_DP_CAPABILITY_H__
-#include "link.h"
+#include "link_service.h"
bool detect_dp_sink_caps(struct dc_link *link);
@@ -108,4 +108,10 @@ uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+uint8_t dp_get_lttpr_count(struct dc_link *link);
+
+void edp_get_alpm_support(struct dc_link *link,
+ bool *auxless_support,
+ bool *auxwake_support);
+
#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index a61edfc9ca7a..7cd03fa4892b 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -27,7 +27,7 @@
#ifndef __DC_LINK_DPIA_H__
#define __DC_LINK_DPIA_H__
-#include "link.h"
+#include "link_service.h"
/* Read tunneling device capability from DPCD and update link capability
* accordingly.
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
index 819bf2d8ba53..c958d3f600c8 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -48,8 +48,7 @@
*/
static bool link_dp_is_bw_alloc_available(struct dc_link *link)
{
- return (link && link->hpd_status
- && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
+ return (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc
&& link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support);
}
@@ -226,35 +225,35 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
bool ret = false;
uint8_t val;
- if (link->hpd_status) {
- val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
+ val = DPTX_BW_ALLOC_MODE_ENABLE | DPTX_BW_ALLOC_UNMASK_IRQ;
- if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
- DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
+ if (core_link_write_dpcd(link, DPTX_BW_ALLOCATION_MODE_CONTROL, &val, sizeof(uint8_t)) == DC_OK) {
+ DC_LOG_DEBUG("%s: link[%d] DPTX BW allocation mode enabled", __func__, link->link_index);
- retrieve_usb4_dp_bw_allocation_info(link);
+ retrieve_usb4_dp_bw_allocation_info(link);
- if (link->dpia_bw_alloc_config.nrd_max_link_rate && link->dpia_bw_alloc_config.nrd_max_lane_count) {
- link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
- link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
- }
+ if (
+ link->dpia_bw_alloc_config.nrd_max_link_rate
+ && link->dpia_bw_alloc_config.nrd_max_lane_count) {
+ link->reported_link_cap.link_rate = link->dpia_bw_alloc_config.nrd_max_link_rate;
+ link->reported_link_cap.lane_count = link->dpia_bw_alloc_config.nrd_max_lane_count;
+ }
- link->dpia_bw_alloc_config.bw_alloc_enabled = true;
- ret = true;
-
- if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
- /*
- * During DP tunnel creation, the CM preallocates BW
- * and reduces the estimated BW of other DPIAs.
- * The CM releases the preallocation only when the allocation is complete.
- * Perform a zero allocation to make the CM release the preallocation
- * and correctly update the estimated BW for all DPIAs per host router.
- */
- link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
- }
- } else
- DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
- }
+ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ ret = true;
+
+ if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) {
+ /*
+ * During DP tunnel creation, the CM preallocates BW
+ * and reduces the estimated BW of other DPIAs.
+ * The CM releases the preallocation only when the allocation is complete.
+ * Perform a zero allocation to make the CM release the preallocation
+ * and correctly update the estimated BW for all DPIAs per host router.
+ */
+ link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
+ }
+ } else
+ DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index);
return ret;
}
@@ -269,17 +268,28 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link)
*/
void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status)
{
- link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
-
if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) {
DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)",
__func__, link->link_index);
- } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
+ }
+
+ if (status & DP_TUNNELING_BW_REQUEST_FAILED) {
DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw);
- } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ }
+
+ if (status & DP_TUNNELING_BW_ALLOC_CAP_CHANGED) {
+ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
+
+ DC_LOG_DEBUG("%s: Granularity changed on link(%d) new granularity=%d",
+ __func__, link->link_index, link->dpia_bw_alloc_config.bw_granularity);
+ }
+
+ if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) {
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+
DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d",
__func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw);
}
@@ -297,15 +307,12 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe
{
if (link && link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling
&& link->dpia_bw_alloc_config.bw_alloc_enabled) {
- //1. Hot Plug
- if (link->hpd_status && peak_bw > 0) {
+ if (peak_bw > 0) {
// If DP over USB4 then we need to check BW allocation
link->dpia_bw_alloc_config.link_max_bw = peak_bw;
link_dpia_send_bw_alloc_request(link, peak_bw);
- }
- //2. Cold Unplug
- else if (!link->hpd_status)
+ } else
dpia_bw_alloc_unplug(link);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 41efcb3e44e2..30cd8e2b9d35 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -26,7 +26,7 @@
#ifndef DC_INC_LINK_DP_DPIA_BW_H_
#define DC_INC_LINK_DP_DPIA_BW_H_
-#include "link.h"
+#include "link_service.h"
/*
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 693477413347..4b01ab0a5a7f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -398,10 +398,12 @@ bool dp_should_allow_hpd_rx_irq(const struct dc_link *link)
* Don't handle RX IRQ unless one of following is met:
* 1) The link is established (cur_link_settings != unknown)
* 2) We know we're dealing with a branch device, SST or MST
+ * 3) The link is bw_alloc enabled.
*/
if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- is_dp_branch_device(link))
+ is_dp_branch_device(link) ||
+ link->dpia_bw_alloc_config.bw_alloc_enabled)
return true;
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
index ac33730fedd4..87516fb3b45a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_IRQ_HANDLER_H__
#define __DC_LINK_DP_IRQ_HANDLER_H__
-#include "link.h"
+#include "link_service.h"
bool dp_parse_link_loss_status(
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
index ab1c1f8f1f8b..58e154494582 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_PHY_H__
#define __DC_LINK_DP_PHY_H__
-#include "link.h"
+#include "link_service.h"
void dp_enable_link_phy(
struct dc_link *link,
const struct link_resource *link_res,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
index 2dc1a660e504..08e2b572e0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -1018,7 +1018,12 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
{
enum dc_status status;
uint8_t sink_status = 0;
- uint8_t i;
+ uint32_t i;
+ uint8_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t intra_hop_disable_time_ms = (lttpr_count > 0 ? lttpr_count * 300 : 10);
+
+ // Each hop could theoretically take over 256ms (max 128b/132b AUX RD INTERVAL)
+ // To be safe, allow 300ms per LTTPR and 10ms for no LTTPR case
/* clear training pattern set */
status = dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
@@ -1028,7 +1033,7 @@ static enum link_training_result dpcd_exit_training_mode(struct dc_link *link, e
if (encoding == DP_128b_132b_ENCODING) {
/* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < intra_hop_disable_time_ms; i++) {
if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
break;
@@ -1724,6 +1729,15 @@ bool perform_link_training_with_retries(
break;
}
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ !link->dc->config.enable_dpia_pre_training) {
+ if (j == (attempts - 1))
+ do_fallback = true;
+ else
+ do_fallback = false;
+ }
+
if (j == (attempts - 1)) {
DC_LOG_WARNING(
"%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n",
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
index 574b083e0936..ce52de22ab7a 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_DP_TRAINING_H__
#define __DC_LINK_DP_TRAINING_H__
-#include "link.h"
+#include "link_service.h"
bool perform_link_training_with_retries(
const struct dc_link_settings *link_setting,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
index 08d787a1e451..c2717c678c72 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
@@ -25,7 +25,7 @@
#ifndef __LINK_DPCD_H__
#define __LINK_DPCD_H__
-#include "link.h"
+#include "link_service.h"
#include "dpcd_defs.h"
enum dc_status core_link_read_dpcd(
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 98ec9b5a559c..c56e69eb27ef 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -703,6 +703,20 @@ bool edp_setup_psr(struct dc_link *link,
if (!link)
return false;
+ /* This is a workaround: some vendors require the source to
+ * read the PSR cap; otherwise, the vendor's PSR feature will
+ * fall back to its default behavior, causing a misconfiguration
+ * of this feature.
+ */
+ if (link->panel_config.psr.read_psrcap_again) {
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_PSR_SUPPORT,
+ &link->dpcd_caps.psr_info.psr_version,
+ sizeof(link->dpcd_caps.psr_info.psr_version));
+ }
+
//Clear PSR cfg
memset(&psr_configuration, 0, sizeof(psr_configuration));
dm_helpers_dp_write_dpcd(
@@ -870,6 +884,8 @@ bool edp_setup_psr(struct dc_link *link,
psr_context->dsc_slice_height = psr_config->dsc_slice_height;
+ psr_context->os_request_force_ffu = psr_config->os_request_force_ffu;
+
if (psr) {
link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
link, psr_context, panel_inst);
@@ -933,7 +949,7 @@ bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
/* Set power optimization flag */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) {
if (replay != NULL && link->replay_settings.replay_feature_enabled &&
- replay->funcs->replay_set_power_opt) {
+ replay->funcs->replay_set_power_opt) {
replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst);
link->replay_settings.replay_power_opt_active = *power_opts;
}
@@ -968,7 +984,117 @@ bool edp_get_replay_state(const struct dc_link *link, uint64_t *state)
return true;
}
-bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+static bool edp_setup_panel_replay(struct dc_link *link, const struct dc_stream_state *stream)
+{
+ /* To-do: Setup Replay */
+ struct dc *dc;
+ struct dmub_replay *replay;
+ int i;
+ unsigned int panel_inst;
+ struct replay_context replay_context = { 0 };
+ unsigned int lineTimeInNs = 0;
+
+ union panel_replay_enable_and_configuration_1 pr_config_1 = { 0 };
+ union panel_replay_enable_and_configuration_2 pr_config_2 = { 0 };
+
+ union dpcd_alpm_configuration alpm_config;
+
+ replay_context.controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ //Clear Panel Replay enable & config
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1,
+ (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t));
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2,
+ (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t));
+
+ if (!(link->replay_settings.config.replay_supported))
+ return false;
+
+ dc = link->ctx->dc;
+
+ //not sure should keep or not
+ replay = dc->res_pool->replay;
+
+ if (!replay)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel;
+ replay_context.digbe_inst = link->link_enc->transmitter;
+ replay_context.digfe_inst = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ replay_context.controllerId =
+ dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ lineTimeInNs =
+ ((stream->timing.h_total * 1000000) /
+ (stream->timing.pix_clk_100hz / 10)) + 1;
+
+ replay_context.line_time_in_ns = lineTimeInNs;
+
+ link->replay_settings.replay_feature_enabled =
+ replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
+
+ if (link->replay_settings.replay_feature_enabled) {
+ pr_config_1.bits.PANEL_REPLAY_ENABLE = 1;
+ pr_config_1.bits.PANEL_REPLAY_CRC_ENABLE = 1;
+ pr_config_1.bits.IRQ_HPD_ASSDP_MISSING = 1;
+ pr_config_1.bits.IRQ_HPD_VSCSDP_UNCORRECTABLE_ERROR = 1;
+ pr_config_1.bits.IRQ_HPD_RFB_ERROR = 1;
+ pr_config_1.bits.IRQ_HPD_ACTIVE_FRAME_CRC_ERROR = 1;
+ pr_config_1.bits.PANEL_REPLAY_SELECTIVE_UPDATE_ENABLE = 1;
+ pr_config_1.bits.PANEL_REPLAY_EARLY_TRANSPORT_ENABLE = 1;
+
+ pr_config_2.bits.SINK_REFRESH_RATE_UNLOCK_GRANTED = 0;
+ pr_config_2.bits.SU_Y_GRANULARITY_EXT_VALUE_ENABLED = 0;
+ pr_config_2.bits.SU_REGION_SCAN_LINE_CAPTURE_INDICATION = 0;
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1,
+ (uint8_t *)&(pr_config_1.raw), sizeof(uint8_t));
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2,
+ (uint8_t *)&(pr_config_2.raw), sizeof(uint8_t));
+
+ //ALPM Setup
+ memset(&alpm_config, 0, sizeof(alpm_config));
+ alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
+
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ alpm_config.bits.ALPM_MODE_SEL = 1;
+ alpm_config.bits.ACDS_PERIOD_DURATION = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ DP_RECEIVER_ALPM_CONFIG,
+ &alpm_config.raw,
+ sizeof(alpm_config.raw));
+ }
+
+ return true;
+}
+
+static bool edp_setup_freesync_replay(struct dc_link *link, const struct dc_stream_state *stream)
{
/* To-do: Setup Replay */
struct dc *dc;
@@ -1029,6 +1155,8 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
replay_context.line_time_in_ns = lineTimeInNs;
+ replay_context.os_request_force_ffu = link->replay_settings.config.os_request_force_ffu;
+
link->replay_settings.replay_feature_enabled =
replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst);
if (link->replay_settings.replay_feature_enabled) {
@@ -1042,7 +1170,13 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
(uint8_t *)&(replay_config.raw), sizeof(uint8_t));
memset(&alpm_config, 0, sizeof(alpm_config));
- alpm_config.bits.ENABLE = 1;
+ alpm_config.bits.ENABLE = link->replay_settings.config.alpm_mode != DC_ALPM_UNSUPPORTED ? 1 : 0;
+
+ if (link->replay_settings.config.alpm_mode == DC_ALPM_AUXLESS) {
+ alpm_config.bits.ALPM_MODE_SEL = 1;
+ alpm_config.bits.ACDS_PERIOD_DURATION = 0;
+ }
+
dm_helpers_dp_write_dpcd(
link->ctx,
link,
@@ -1056,6 +1190,18 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
return true;
}
+bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream)
+{
+ if (!link)
+ return false;
+ if (link->replay_settings.config.replay_version == DC_VESA_PANEL_REPLAY)
+ return edp_setup_panel_replay(link, stream);
+ else if (link->replay_settings.config.replay_version == DC_FREESYNC_REPLAY)
+ return edp_setup_freesync_replay(link, stream);
+ else
+ return false;
+}
+
/*
* This is general Interface for Replay to set an 32 bit variable to dmub
* replay_FW_Message_type: Indicates which instruction or variable pass to DMUB
@@ -1086,7 +1232,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1098,9 +1244,11 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
- replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst);
+ if (coasting_vtotal && (link->replay_settings.coasting_vtotal != coasting_vtotal ||
+ link->replay_settings.frame_skip_number != frame_skip_number)) {
+ replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst, frame_skip_number);
link->replay_settings.coasting_vtotal = coasting_vtotal;
+ link->replay_settings.frame_skip_number = frame_skip_number;
}
return true;
@@ -1128,7 +1276,7 @@ bool edp_replay_residency(const struct dc_link *link,
}
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal)
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1139,13 +1287,16 @@ bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
/* Only both power and coasting vtotal changed, this func could return true */
if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts &&
- coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+ (coasting_vtotal &&
+ (link->replay_settings.coasting_vtotal != coasting_vtotal ||
+ link->replay_settings.frame_skip_number != frame_skip_number))) {
if (link->replay_settings.replay_feature_enabled &&
replay->funcs->replay_set_power_opt_and_coasting_vtotal) {
replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay,
- *power_opts, panel_inst, coasting_vtotal);
+ *power_opts, panel_inst, coasting_vtotal, frame_skip_number);
link->replay_settings.replay_power_opt_active = *power_opts;
link->replay_settings.coasting_vtotal = coasting_vtotal;
+ link->replay_settings.frame_skip_number = frame_skip_number;
} else
return false;
} else
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 4a475d5b9dde..dd79c7cd2828 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -25,7 +25,7 @@
#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__
#define __DC_LINK_EDP_PANEL_CONTROL_H__
-#include "link.h"
+#include "link_service.h"
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
@@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const enum pr_residency_mode mode);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint32_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal, uint16_t frame_skip_number);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
index 4fb526b264f9..af529328ba17 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
@@ -26,7 +26,7 @@
#ifndef __DC_LINK_HPD_H__
#define __DC_LINK_HPD_H__
-#include "link.h"
+#include "link_service.h"
enum hpd_source_id get_hpd_line(struct dc_link *link);
/*
diff --git a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
index 259a98e4ee2c..2a422e223bf2 100644
--- a/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c
@@ -284,7 +284,7 @@ void mcifwb2_dump_frame(struct mcif_wb *mcif_wb,
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0xf);
- memcpy(dest_luma_buffer, luma_buffer, mcif_params->luma_pitch * dest_height);
+ memcpy(dest_luma_buffer, luma_buffer, (size_t)mcif_params->luma_pitch * dest_height);
memcpy(dest_chroma_buffer, chroma_buffer, mcif_params->chroma_pitch * dest_height / 2);
REG_UPDATE(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB_BUFMGR_SW_LOCK, 0x0);
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
index 85298b8a1b5e..6bfd2c1294e5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c
@@ -1514,6 +1514,21 @@ static void mpc3_read_mpcc_state(
MPCC_OGAM_SELECT_CURRENT, &s->rgam_lut);
}
+void mpc3_read_reg_state(
+ struct mpc *mpc,
+ int mpcc_inst, struct dcn_mpc_reg_state *mpc_reg_state)
+{
+ struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+
+ mpc_reg_state->mpcc_bot_sel = REG_READ(MPCC_BOT_SEL[mpcc_inst]);
+ mpc_reg_state->mpcc_control = REG_READ(MPCC_CONTROL[mpcc_inst]);
+ mpc_reg_state->mpcc_ogam_control = REG_READ(MPCC_OGAM_CONTROL[mpcc_inst]);
+ mpc_reg_state->mpcc_opp_id = REG_READ(MPCC_OPP_ID[mpcc_inst]);
+ mpc_reg_state->mpcc_status = REG_READ(MPCC_STATUS[mpcc_inst]);
+ mpc_reg_state->mpcc_top_sel = REG_READ(MPCC_TOP_SEL[mpcc_inst]);
+
+}
+
static const struct mpc_funcs dcn30_mpc_funcs = {
.read_mpcc_state = mpc3_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -1544,6 +1559,7 @@ static const struct mpc_funcs dcn30_mpc_funcs = {
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
.set_mpc_mem_lp_mode = mpc3_set_mpc_mem_lp_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
index 103f29900a2c..e2f147d17178 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
@@ -1096,6 +1096,11 @@ void mpc3_power_on_ogam_lut(
struct mpc *mpc, int mpcc_id,
bool power_on);
+void mpc3_read_reg_state(
+ struct mpc *mpc,
+ int mpcc_inst,
+ struct dcn_mpc_reg_state *mpc_reg_state);
+
void mpc3_init_mpcc(struct mpcc *mpcc, int mpcc_inst);
enum dc_lut_mode mpc3_get_ogam_current(
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
index 6f0e017a8ae2..83bbbf34bcac 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c
@@ -1020,6 +1020,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = {
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index f3fb3fe13757..eeac13fdd6f5 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -287,13 +287,6 @@ void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_I
}
}
-void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id)
-{
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1);
-}
-
void mpc_program_gamut_remap(
struct mpc *mpc,
unsigned int mpcc_id,
@@ -605,13 +598,13 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.release_rmu = NULL,
.power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
.get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .mpc_read_reg_state = mpc3_read_reg_state,
.set_bg_color = mpc1_set_bg_color,
.set_movable_cm_location = mpc401_set_movable_cm_location,
.update_3dlut_fast_load_select = mpc401_update_3dlut_fast_load_select,
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
- .program_3dlut_size = mpc401_program_3dlut_size,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index eb0c68d0b0c7..fdc42f8ab3ff 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -221,11 +221,6 @@ void mpc401_program_lut_read_write_control(
bool lut_bank_a,
int mpcc_id);
-void mpc401_program_3dlut_size(
- struct mpc *mpc,
- bool is_17x17x17,
- int mpcc_id);
-
void mpc401_set_gamut_remap(
struct mpc *mpc,
int mpcc_id,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
index 71e9288d60ed..45d418636d0c 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c
@@ -372,6 +372,17 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)
REG_UPDATE(OPP_PIPE_CONTROL, OPP_PIPE_CLOCK_EN, regval);
}
+
+void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp);
+
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+}
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -392,7 +403,8 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
.dpg_is_pending = NULL,
- .opp_destroy = opp1_destroy
+ .opp_destroy = opp1_destroy,
+ .opp_read_reg_state = opp1_read_reg_state
};
void dcn10_opp_construct(struct dcn10_opp *oppn10,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
index c87de68a509e..38d0d530a9b7 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h
@@ -63,7 +63,8 @@
uint32_t OPPBUF_CONTROL1; \
uint32_t OPPBUF_3D_PARAMETERS_0; \
uint32_t OPPBUF_3D_PARAMETERS_1; \
- uint32_t OPP_PIPE_CONTROL
+ uint32_t OPP_PIPE_CONTROL; \
+ uint32_t OPP_PIPE_CRC_CONTROL
#define OPP_MASK_SH_LIST_DCN(mask_sh) \
OPP_SF(FMT0_FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, mask_sh), \
@@ -153,7 +154,6 @@ struct dcn10_opp {
const struct dcn10_opp_registers *regs;
const struct dcn10_opp_shift *opp_shift;
const struct dcn10_opp_mask *opp_mask;
-
bool is_write_to_ram_a_safe;
};
@@ -188,4 +188,6 @@ void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable);
void opp1_destroy(struct output_pixel_processor **opp);
+void opp1_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
index f5fe0cac7cb0..ce826a5be4c7 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c
@@ -377,6 +377,18 @@ uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp
return 0;
}
+void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ opp_reg_state->dpg_control = REG_READ(DPG_CONTROL);
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+ opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG);
+}
+
/*****************************************/
/* Constructor, Destructor */
/*****************************************/
@@ -395,6 +407,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
.opp_get_left_edge_extra_pixel_count = opp2_get_left_edge_extra_pixel_count,
+ .opp_read_reg_state = opp2_read_reg_state
};
void dcn20_opp_construct(struct dcn20_opp *oppn20,
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
index 34936e6c49f3..fb0c047c1788 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h
@@ -59,7 +59,8 @@
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
uint32_t DPG_RAMP_CONTROL; \
- uint32_t DPG_STATUS
+ uint32_t DPG_STATUS; \
+ uint32_t DSCRM_DSC_FORWARD_CONFIG
#define OPP_DPG_MASK_SH_LIST(mask_sh) \
OPP_SF(DPG0_DPG_CONTROL, DPG_EN, mask_sh), \
@@ -171,4 +172,7 @@ void opp2_program_left_edge_extra_pixel (
uint32_t opp2_get_left_edge_extra_pixel_count(struct output_pixel_processor *opp,
enum dc_pixel_encoding pixel_encoding, bool is_primary);
+
+void opp2_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
index 3542b51c9aac..e11c4e16402f 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c
@@ -51,3 +51,16 @@ void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable)
{
REG_UPDATE(OPP_TOP_CLK_CONTROL, OPP_FGCG_REP_DIS, !enable);
}
+
+void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+
+ opp_reg_state->dpg_control = REG_READ(DPG_CONTROL);
+ opp_reg_state->fmt_control = REG_READ(FMT_CONTROL);
+ opp_reg_state->opp_abm_control = REG_READ(OPP_ABM_CONTROL);
+ opp_reg_state->opp_pipe_control = REG_READ(OPP_PIPE_CONTROL);
+ opp_reg_state->opp_pipe_crc_control = REG_READ(OPP_PIPE_CRC_CONTROL);
+ opp_reg_state->oppbuf_control = REG_READ(OPPBUF_CONTROL);
+ opp_reg_state->dscrm_dsc_forward_config = REG_READ(DSCRM_DSC_FORWARD_CONFIG);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
index a9a413527801..c6cace90e8f2 100644
--- a/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h
@@ -31,7 +31,8 @@
#define OPP_REG_VARIABLE_LIST_DCN3_5 \
OPP_REG_VARIABLE_LIST_DCN2_0; \
- uint32_t OPP_TOP_CLK_CONTROL
+ uint32_t OPP_TOP_CLK_CONTROL; \
+ uint32_t OPP_ABM_CONTROL
#define OPP_MASK_SH_LIST_DCN35(mask_sh) \
OPP_MASK_SH_LIST_DCN20(mask_sh), \
@@ -64,4 +65,5 @@ void dcn35_opp_construct(struct dcn20_opp *oppn20,
void dcn35_opp_set_fgcg(struct dcn20_opp *oppn20, bool enable);
+void dcn35_opp_read_reg_state(struct output_pixel_processor *opp, struct dcn_opp_reg_state *opp_reg_state);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index 8b2a8455eb56..803bcc25601c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -209,7 +209,43 @@
uint32_t OPTC_WIDTH_CONTROL2; \
uint32_t OTG_PSTATE_REGISTER; \
uint32_t OTG_PIPE_UPDATE_STATUS; \
- uint32_t INTERRUPT_DEST
+ uint32_t INTERRUPT_DEST; \
+ uint32_t OPTC_INPUT_SPARE_REGISTER; \
+ uint32_t OPTC_RSMU_UNDERFLOW; \
+ uint32_t OPTC_UNDERFLOW_THRESHOLD; \
+ uint32_t OTG_COUNT_CONTROL; \
+ uint32_t OTG_COUNT_RESET; \
+ uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \
+ uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \
+ uint32_t OTG_DLPC_CONTROL; \
+ uint32_t OTG_DRR_CONTROL2; \
+ uint32_t OTG_DRR_TIMING_INT_STATUS; \
+ uint32_t OTG_GLOBAL_CONTROL3; \
+ uint32_t OTG_GLOBAL_SYNC_STATUS; \
+ uint32_t OTG_GSL_VSYNC_GAP; \
+ uint32_t OTG_INTERLACE_STATUS; \
+ uint32_t OTG_INTERRUPT_CONTROL; \
+ uint32_t OTG_LONG_VBLANK_STATUS; \
+ uint32_t OTG_MANUAL_FORCE_VSYNC_NEXT_LINE; \
+ uint32_t OTG_MASTER_EN; \
+ uint32_t OTG_PIXEL_DATA_READBACK0; \
+ uint32_t OTG_PIXEL_DATA_READBACK1; \
+ uint32_t OTG_REQUEST_CONTROL; \
+ uint32_t OTG_SNAPSHOT_CONTROL; \
+ uint32_t OTG_SNAPSHOT_FRAME; \
+ uint32_t OTG_SNAPSHOT_POSITION; \
+ uint32_t OTG_SNAPSHOT_STATUS; \
+ uint32_t OTG_SPARE_REGISTER; \
+ uint32_t OTG_STATUS_HV_COUNT; \
+ uint32_t OTG_STATUS_VF_COUNT; \
+ uint32_t OTG_STEREO_FORCE_NEXT_EYE; \
+ uint32_t OTG_TRIG_MANUAL_CONTROL; \
+ uint32_t OTG_TRIGB_CNTL; \
+ uint32_t OTG_TRIGB_MANUAL_TRIG; \
+ uint32_t OTG_UPDATE_LOCK; \
+ uint32_t OTG_V_TOTAL_INT_STATUS; \
+ uint32_t OTG_VSYNC_NOM_INT_STATUS
+
struct dcn_optc_registers {
OPTC_REG_VARIABLE_LIST_DCN;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 4f1830ba619f..c6417538090f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -315,6 +315,136 @@ void optc31_read_otg_state(struct timing_generator *optc,
s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
}
+void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ optc_reg_state->optc_bytes_per_pixel = REG_READ(OPTC_BYTES_PER_PIXEL);
+ optc_reg_state->optc_data_format_control = REG_READ(OPTC_DATA_FORMAT_CONTROL);
+ optc_reg_state->optc_data_source_select = REG_READ(OPTC_DATA_SOURCE_SELECT);
+ optc_reg_state->optc_input_clock_control = REG_READ(OPTC_INPUT_CLOCK_CONTROL);
+ optc_reg_state->optc_input_global_control = REG_READ(OPTC_INPUT_GLOBAL_CONTROL);
+ optc_reg_state->optc_input_spare_register = REG_READ(OPTC_INPUT_SPARE_REGISTER);
+ optc_reg_state->optc_memory_config = REG_READ(OPTC_MEMORY_CONFIG);
+ optc_reg_state->optc_rsmu_underflow = REG_READ(OPTC_RSMU_UNDERFLOW);
+ optc_reg_state->optc_underflow_threshold = REG_READ(OPTC_UNDERFLOW_THRESHOLD);
+ optc_reg_state->optc_width_control = REG_READ(OPTC_WIDTH_CONTROL);
+ optc_reg_state->otg_3d_structure_control = REG_READ(OTG_3D_STRUCTURE_CONTROL);
+ optc_reg_state->otg_clock_control = REG_READ(OTG_CLOCK_CONTROL);
+ optc_reg_state->otg_control = REG_READ(OTG_CONTROL);
+ optc_reg_state->otg_count_control = REG_READ(OTG_COUNT_CONTROL);
+ optc_reg_state->otg_count_reset = REG_READ(OTG_COUNT_RESET);
+ optc_reg_state->otg_crc_cntl = REG_READ(OTG_CRC_CNTL);
+ optc_reg_state->otg_crc_sig_blue_control_mask = REG_READ(OTG_CRC_SIG_BLUE_CONTROL_MASK);
+ optc_reg_state->otg_crc_sig_red_green_mask = REG_READ(OTG_CRC_SIG_RED_GREEN_MASK);
+ optc_reg_state->otg_crc0_data_b = REG_READ(OTG_CRC0_DATA_B);
+ optc_reg_state->otg_crc0_data_rg = REG_READ(OTG_CRC0_DATA_RG);
+ optc_reg_state->otg_crc0_windowa_x_control = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL);
+ optc_reg_state->otg_crc0_windowa_x_control_readback = REG_READ(OTG_CRC0_WINDOWA_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowa_y_control = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL);
+ optc_reg_state->otg_crc0_windowa_y_control_readback = REG_READ(OTG_CRC0_WINDOWA_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowb_x_control = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL);
+ optc_reg_state->otg_crc0_windowb_x_control_readback = REG_READ(OTG_CRC0_WINDOWB_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc0_windowb_y_control = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL);
+ optc_reg_state->otg_crc0_windowb_y_control_readback = REG_READ(OTG_CRC0_WINDOWB_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_data_b = REG_READ(OTG_CRC1_DATA_B);
+ optc_reg_state->otg_crc1_data_rg = REG_READ(OTG_CRC1_DATA_RG);
+ optc_reg_state->otg_crc1_windowa_x_control = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL);
+ optc_reg_state->otg_crc1_windowa_x_control_readback = REG_READ(OTG_CRC1_WINDOWA_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowa_y_control = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL);
+ optc_reg_state->otg_crc1_windowa_y_control_readback = REG_READ(OTG_CRC1_WINDOWA_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowb_x_control = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL);
+ optc_reg_state->otg_crc1_windowb_x_control_readback = REG_READ(OTG_CRC1_WINDOWB_X_CONTROL_READBACK);
+ optc_reg_state->otg_crc1_windowb_y_control = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL);
+ optc_reg_state->otg_crc1_windowb_y_control_readback = REG_READ(OTG_CRC1_WINDOWB_Y_CONTROL_READBACK);
+ optc_reg_state->otg_crc2_data_b = REG_READ(OTG_CRC2_DATA_B);
+ optc_reg_state->otg_crc2_data_rg = REG_READ(OTG_CRC2_DATA_RG);
+ optc_reg_state->otg_crc3_data_b = REG_READ(OTG_CRC3_DATA_B);
+ optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG);
+ optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL);
+ optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
+ optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2);
+ optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL);
+ optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS);
+ optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW);
+ optc_reg_state->otg_drr_v_total_change = REG_READ(OTG_DRR_V_TOTAL_CHANGE);
+ optc_reg_state->otg_dsc_start_position = REG_READ(OTG_DSC_START_POSITION);
+ optc_reg_state->otg_force_count_now_cntl = REG_READ(OTG_FORCE_COUNT_NOW_CNTL);
+ optc_reg_state->otg_global_control0 = REG_READ(OTG_GLOBAL_CONTROL0);
+ optc_reg_state->otg_global_control1 = REG_READ(OTG_GLOBAL_CONTROL1);
+ optc_reg_state->otg_global_control2 = REG_READ(OTG_GLOBAL_CONTROL2);
+ optc_reg_state->otg_global_control3 = REG_READ(OTG_GLOBAL_CONTROL3);
+ optc_reg_state->otg_global_control4 = REG_READ(OTG_GLOBAL_CONTROL4);
+ optc_reg_state->otg_global_sync_status = REG_READ(OTG_GLOBAL_SYNC_STATUS);
+ optc_reg_state->otg_gsl_control = REG_READ(OTG_GSL_CONTROL);
+ optc_reg_state->otg_gsl_vsync_gap = REG_READ(OTG_GSL_VSYNC_GAP);
+ optc_reg_state->otg_gsl_window_x = REG_READ(OTG_GSL_WINDOW_X);
+ optc_reg_state->otg_gsl_window_y = REG_READ(OTG_GSL_WINDOW_Y);
+ optc_reg_state->otg_h_blank_start_end = REG_READ(OTG_H_BLANK_START_END);
+ optc_reg_state->otg_h_sync_a = REG_READ(OTG_H_SYNC_A);
+ optc_reg_state->otg_h_sync_a_cntl = REG_READ(OTG_H_SYNC_A_CNTL);
+ optc_reg_state->otg_h_timing_cntl = REG_READ(OTG_H_TIMING_CNTL);
+ optc_reg_state->otg_h_total = REG_READ(OTG_H_TOTAL);
+ optc_reg_state->otg_interlace_control = REG_READ(OTG_INTERLACE_CONTROL);
+ optc_reg_state->otg_interlace_status = REG_READ(OTG_INTERLACE_STATUS);
+ optc_reg_state->otg_interrupt_control = REG_READ(OTG_INTERRUPT_CONTROL);
+ optc_reg_state->otg_long_vblank_status = REG_READ(OTG_LONG_VBLANK_STATUS);
+ optc_reg_state->otg_m_const_dto0 = REG_READ(OTG_M_CONST_DTO0);
+ optc_reg_state->otg_m_const_dto1 = REG_READ(OTG_M_CONST_DTO1);
+ optc_reg_state->otg_manual_force_vsync_next_line = REG_READ(OTG_MANUAL_FORCE_VSYNC_NEXT_LINE);
+ optc_reg_state->otg_master_en = REG_READ(OTG_MASTER_EN);
+ optc_reg_state->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK);
+ optc_reg_state->otg_master_update_mode = REG_READ(OTG_MASTER_UPDATE_MODE);
+ optc_reg_state->otg_nom_vert_position = REG_READ(OTG_NOM_VERT_POSITION);
+ optc_reg_state->otg_pipe_update_status = REG_READ(OTG_PIPE_UPDATE_STATUS);
+ optc_reg_state->otg_pixel_data_readback0 = REG_READ(OTG_PIXEL_DATA_READBACK0);
+ optc_reg_state->otg_pixel_data_readback1 = REG_READ(OTG_PIXEL_DATA_READBACK1);
+ optc_reg_state->otg_request_control = REG_READ(OTG_REQUEST_CONTROL);
+ optc_reg_state->otg_snapshot_control = REG_READ(OTG_SNAPSHOT_CONTROL);
+ optc_reg_state->otg_snapshot_frame = REG_READ(OTG_SNAPSHOT_FRAME);
+ optc_reg_state->otg_snapshot_position = REG_READ(OTG_SNAPSHOT_POSITION);
+ optc_reg_state->otg_snapshot_status = REG_READ(OTG_SNAPSHOT_STATUS);
+ optc_reg_state->otg_spare_register = REG_READ(OTG_SPARE_REGISTER);
+ optc_reg_state->otg_static_screen_control = REG_READ(OTG_STATIC_SCREEN_CONTROL);
+ optc_reg_state->otg_status = REG_READ(OTG_STATUS);
+ optc_reg_state->otg_status_frame_count = REG_READ(OTG_STATUS_FRAME_COUNT);
+ optc_reg_state->otg_status_hv_count = REG_READ(OTG_STATUS_HV_COUNT);
+ optc_reg_state->otg_status_position = REG_READ(OTG_STATUS_POSITION);
+ optc_reg_state->otg_status_vf_count = REG_READ(OTG_STATUS_VF_COUNT);
+ optc_reg_state->otg_stereo_control = REG_READ(OTG_STEREO_CONTROL);
+ optc_reg_state->otg_stereo_force_next_eye = REG_READ(OTG_STEREO_FORCE_NEXT_EYE);
+ optc_reg_state->otg_stereo_status = REG_READ(OTG_STEREO_STATUS);
+ optc_reg_state->otg_trig_manual_control = REG_READ(OTG_TRIG_MANUAL_CONTROL);
+ optc_reg_state->otg_triga_cntl = REG_READ(OTG_TRIGA_CNTL);
+ optc_reg_state->otg_triga_manual_trig = REG_READ(OTG_TRIGA_MANUAL_TRIG);
+ optc_reg_state->otg_trigb_cntl = REG_READ(OTG_TRIGB_CNTL);
+ optc_reg_state->otg_trigb_manual_trig = REG_READ(OTG_TRIGB_MANUAL_TRIG);
+ optc_reg_state->otg_update_lock = REG_READ(OTG_UPDATE_LOCK);
+ optc_reg_state->otg_v_blank_start_end = REG_READ(OTG_V_BLANK_START_END);
+ optc_reg_state->otg_v_count_stop_control = REG_READ(OTG_V_COUNT_STOP_CONTROL);
+ optc_reg_state->otg_v_count_stop_control2 = REG_READ(OTG_V_COUNT_STOP_CONTROL2);
+ optc_reg_state->otg_v_sync_a = REG_READ(OTG_V_SYNC_A);
+ optc_reg_state->otg_v_sync_a_cntl = REG_READ(OTG_V_SYNC_A_CNTL);
+ optc_reg_state->otg_v_total = REG_READ(OTG_V_TOTAL);
+ optc_reg_state->otg_v_total_control = REG_READ(OTG_V_TOTAL_CONTROL);
+ optc_reg_state->otg_v_total_int_status = REG_READ(OTG_V_TOTAL_INT_STATUS);
+ optc_reg_state->otg_v_total_max = REG_READ(OTG_V_TOTAL_MAX);
+ optc_reg_state->otg_v_total_mid = REG_READ(OTG_V_TOTAL_MID);
+ optc_reg_state->otg_v_total_min = REG_READ(OTG_V_TOTAL_MIN);
+ optc_reg_state->otg_vert_sync_control = REG_READ(OTG_VERT_SYNC_CONTROL);
+ optc_reg_state->otg_vertical_interrupt0_control = REG_READ(OTG_VERTICAL_INTERRUPT0_CONTROL);
+ optc_reg_state->otg_vertical_interrupt0_position = REG_READ(OTG_VERTICAL_INTERRUPT0_POSITION);
+ optc_reg_state->otg_vertical_interrupt1_control = REG_READ(OTG_VERTICAL_INTERRUPT1_CONTROL);
+ optc_reg_state->otg_vertical_interrupt1_position = REG_READ(OTG_VERTICAL_INTERRUPT1_POSITION);
+ optc_reg_state->otg_vertical_interrupt2_control = REG_READ(OTG_VERTICAL_INTERRUPT2_CONTROL);
+ optc_reg_state->otg_vertical_interrupt2_position = REG_READ(OTG_VERTICAL_INTERRUPT2_POSITION);
+ optc_reg_state->otg_vready_param = REG_READ(OTG_VREADY_PARAM);
+ optc_reg_state->otg_vstartup_param = REG_READ(OTG_VSTARTUP_PARAM);
+ optc_reg_state->otg_vsync_nom_int_status = REG_READ(OTG_VSYNC_NOM_INT_STATUS);
+ optc_reg_state->otg_vupdate_keepout = REG_READ(OTG_VUPDATE_KEEPOUT);
+ optc_reg_state->otg_vupdate_param = REG_READ(OTG_VUPDATE_PARAM);
+}
+
static const struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -377,6 +507,7 @@ static const struct timing_generator_funcs dcn31_tg_funcs = {
.init_odm = optc3_init_odm,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn31_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index 0f72c274f40b..98f7d2e299c5 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
@@ -274,4 +274,6 @@ void optc3_init_odm(struct timing_generator *optc);
void optc31_read_otg_state(struct timing_generator *optc,
struct dcn_otg_state *s);
+void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_state *optc_reg_state);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 4a2caca37255..43ff957288b2 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
@@ -256,6 +256,7 @@ static const struct timing_generator_funcs dcn314_tg_funcs = {
.set_h_timing_div_manual_mode = optc314_set_h_timing_div_manual_mode,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn314_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index b2b226bcd871..3dcb0d0c931c 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -365,6 +365,7 @@ static const struct timing_generator_funcs dcn32_tg_funcs = {
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
.get_pipe_update_pending = optc3_get_pipe_update_pending,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn32_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index d159e3ed3bb3..ead92ad78a23 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -62,6 +62,7 @@
SF(OTG0_OTG_CONTROL, OTG_DISABLE_POINT_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_FIELD_NUMBER_CNTL, mask_sh),\
SF(OTG0_OTG_CONTROL, OTG_OUT_MUX, mask_sh),\
+ SF(OTG0_OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_EN, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_LINE_NUM, mask_sh),\
SF(OTG0_OTG_STEREO_CONTROL, OTG_STEREO_SYNC_OUTPUT_POLARITY, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 72bff94cb57d..f699e95059f3 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
@@ -162,6 +162,8 @@ static bool optc35_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
+ REG_WAIT(OTG_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+
optc1_clear_optc_underflow(optc);
return true;
@@ -428,6 +430,21 @@ static void optc35_set_long_vtotal(
}
}
+static void optc35_wait_otg_disable(struct timing_generator *optc)
+{
+ struct optc *optc1;
+ uint32_t is_master_en;
+
+ if (!optc || !optc->ctx)
+ return;
+
+ optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OTG_CONTROL, OTG_MASTER_EN, &is_master_en);
+ if (!is_master_en)
+ REG_WAIT(OTG_CLOCK_CONTROL, OTG_CURRENT_MASTER_EN_STATE, 0, 1, 100000);
+}
+
static const struct timing_generator_funcs dcn35_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -479,6 +496,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc35_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
+ .wait_otg_disable = optc35_wait_otg_disable,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
@@ -493,6 +511,7 @@ static const struct timing_generator_funcs dcn35_tg_funcs = {
.set_long_vtotal = optc35_set_long_vtotal,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn35_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
index ff79c38287df..a8e978d1fae8 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c
@@ -226,6 +226,11 @@ bool optc401_disable_crtc(struct timing_generator *optc)
REG_UPDATE(CONTROL,
VTG0_ENABLE, 0);
+ // wait until CRTC_CURRENT_MASTER_EN_STATE == 0
+ REG_WAIT(OTG_CONTROL,
+ OTG_CURRENT_MASTER_EN_STATE,
+ 0, 10, 15000);
+
/* CRTC disabled, so disable clock. */
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
@@ -528,6 +533,7 @@ static const struct timing_generator_funcs dcn401_tg_funcs = {
.set_vupdate_keepout = optc401_set_vupdate_keepout,
.wait_update_lock_status = optc401_wait_update_lock_status,
.read_otg_state = optc31_read_otg_state,
+ .optc_read_reg_state = optc31_read_reg_state,
};
void dcn401_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 3a51be63f020..d40d91ec2035 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
@@ -29,6 +29,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
@@ -77,6 +78,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -224,6 +226,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(4),
link_regs(5),
link_regs(6),
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -367,6 +370,7 @@ static const struct dce_abm_mask abm_mask = {
#define DCFE_MEM_PWR_CTRL_REG_BASE 0x1b03
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -374,6 +378,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -401,8 +406,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
#define CTX ctx
@@ -483,6 +490,11 @@ static struct stream_encoder *dce100_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id], &se_shift, &se_mask);
return &enc110->base;
@@ -623,7 +635,20 @@ static struct link_encoder *dce100_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -836,17 +861,24 @@ static enum dc_status build_mapped_resource(
return DC_OK;
}
-static enum dc_status dce100_validate_bandwidth(
+enum dc_status dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context,
enum dc_validate_mode validate_mode)
{
int i;
bool at_least_one_pipe = false;
+ struct dc_stream_state *stream = NULL;
+ const uint32_t max_pix_clk_khz = max(dc->clk_mgr->clks.max_supported_dispclk_khz, 400000);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
+ stream = context->res_ctx.pipe_ctx[i].stream;
+ if (stream) {
at_least_one_pipe = true;
+
+ if (stream->timing.pix_clk_100hz >= max_pix_clk_khz * 10)
+ return DC_FAIL_BANDWIDTH_VALIDATE;
+ }
}
if (at_least_one_pipe) {
@@ -854,7 +886,16 @@ static enum dc_status dce100_validate_bandwidth(
context->bw_ctx.bw.dce.dispclk_khz = 681000;
context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
+ /* On DCE 6.0 and 6.4 the PLL0 is both the display engine clock and
+ * the DP clock, and shouldn't be turned off. Just select the display
+ * clock value from its low power mode.
+ */
+ if (dc->ctx->dce_version == DCE_VERSION_6_0 ||
+ dc->ctx->dce_version == DCE_VERSION_6_4)
+ context->bw_ctx.bw.dce.dispclk_khz = 352000;
+ else
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+
context->bw_ctx.bw.dce.yclk_khz = 0;
}
@@ -881,7 +922,7 @@ static bool dce100_validate_surface_sets(
return true;
}
-static enum dc_status dce100_validate_global(
+enum dc_status dce100_validate_global(
struct dc *dc,
struct dc_state *context)
{
@@ -935,6 +976,10 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
int i;
int j = -1;
struct dc_link *link = stream->link;
+ enum engine_id preferred_engine = link->link_enc->preferred_engine;
+
+ if (dc_is_rgb_signal(stream->signal))
+ preferred_engine = link->link_enc->analog_engine;
for (i = 0; i < pool->stream_enc_count; i++) {
if (!res_ctx->is_stream_enc_acquired[i] &&
@@ -943,8 +988,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
* in daisy chain use case
*/
j = i;
- if (pool->stream_enc[i]->id ==
- link->link_enc->preferred_engine)
+ if (pool->stream_enc[i]->id == preferred_engine)
return pool->stream_enc[i];
}
}
@@ -1076,6 +1120,7 @@ static bool dce100_resource_construct(
dc->caps.disable_dp_clk_share = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
index fecab7c560f5..dd150a4b4610 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
@@ -41,6 +41,15 @@ struct resource_pool *dce100_create_resource_pool(
enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, struct dc_caps *caps);
+enum dc_status dce100_validate_global(
+ struct dc *dc,
+ struct dc_state *context);
+
+enum dc_status dce100_validate_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ enum dc_validate_mode validate_mode);
+
enum dc_status dce100_add_stream_to_ctx(
struct dc *dc,
struct dc_state *new_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index cccde5a6f3cd..cd54382c0af3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
@@ -82,6 +82,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -377,6 +378,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -424,7 +426,9 @@ static const struct dc_plane_cap plane_cap = {
64
};
-static const struct dc_debug_options debug_defaults = {
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
.enable_legacy_fast_update = true,
};
@@ -1376,6 +1380,7 @@ static bool dce110_resource_construct(
dc->caps.is_apu = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index 164ba796f64c..3f0a6bc4dcc2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
@@ -76,6 +76,7 @@
#endif
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -385,6 +386,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -429,8 +431,10 @@ static const struct dc_plane_cap plane_cap = {
64
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
#define CTX ctx
@@ -1111,12 +1115,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
&clks);
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
+ (int64_t)clks.clocks_in_khz[0] * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels>>1] * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
+ (int64_t)clks.clocks_in_khz[clks.num_levels-1] * memory_type_multiplier,
1000);
return;
@@ -1152,12 +1156,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
* YCLK = UMACLK*m_memoryTypeMultiplier
*/
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1247,6 +1251,7 @@ static bool dce112_resource_construct(
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index eb1e158d3436..b1570b6b1af3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -67,7 +67,7 @@
#include "reg_helper.h"
#include "dce100/dce100_resource.h"
-#include "link.h"
+#include "link_service.h"
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
@@ -491,6 +491,7 @@ static struct dce_i2c_hw *dce120_i2c_hw_create(
return dce_i2c_hw;
}
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0 + NBIO_BASE(mmBIOS_SCRATCH_0_BASE_IDX),
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3 + NBIO_BASE(mmBIOS_SCRATCH_3_BASE_IDX),
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
};
@@ -526,8 +527,11 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults = {
- .disable_clock_gate = true,
- .enable_legacy_fast_update = true,
+ .disable_clock_gate = true,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static struct clock_source *dce120_clock_source_create(
@@ -990,12 +994,12 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc)
memory_type_multiplier = MEMORY_TYPE_HBM;
dc->bw_vbios->low_yclk = bw_frc_to_fixed(
- mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
+ (int64_t)mem_clks.data[0].clocks_in_khz * memory_type_multiplier, 1000);
dc->bw_vbios->mid_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels>>1].clocks_in_khz * memory_type_multiplier,
1000);
dc->bw_vbios->high_yclk = bw_frc_to_fixed(
- mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
+ (int64_t)mem_clks.data[mem_clks.num_levels-1].clocks_in_khz * memory_type_multiplier,
1000);
/* Now notify PPLib/SMU about which Watermarks sets they should select
@@ -1089,6 +1093,7 @@ static bool dce120_resource_construct(
dc->caps.psp_setup_panel_mode = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
index 53b60044653f..f0152933bee2 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
@@ -34,6 +34,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "irq/dce60/irq_service_dce60.h"
#include "dce110/dce110_timing_generator.h"
@@ -79,6 +80,7 @@
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -239,7 +241,9 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(2),
link_regs(3),
link_regs(4),
- link_regs(5)
+ link_regs(5),
+ {0},
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -365,6 +369,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -372,6 +377,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -381,6 +387,7 @@ static const struct resource_caps res_cap_61 = {
.num_timing_generator = 4,
.num_audio = 6,
.num_stream_encoder = 6,
+ .num_analog_stream_encoder = 1,
.num_pll = 3,
.num_ddc = 6,
};
@@ -388,6 +395,7 @@ static const struct resource_caps res_cap_61 = {
static const struct resource_caps res_cap_64 = {
.num_timing_generator = 2,
.num_audio = 2,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 2,
.num_pll = 3,
.num_ddc = 2,
@@ -403,13 +411,13 @@ static const struct dc_plane_cap plane_cap = {
},
.max_upscale_factor = {
- .argb8888 = 16000,
+ .argb8888 = 1,
.nv12 = 1,
.fp16 = 1
},
.max_downscale_factor = {
- .argb8888 = 250,
+ .argb8888 = 1,
.nv12 = 1,
.fp16 = 1
}
@@ -598,6 +606,11 @@ static struct stream_encoder *dce60_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -717,7 +730,20 @@ static struct link_encoder *dce60_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -863,61 +889,6 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static enum dc_status dce60_validate_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- enum dc_validate_mode validate_mode)
-{
- int i;
- bool at_least_one_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
- at_least_one_pipe = true;
- }
-
- if (at_least_one_pipe) {
- /* TODO implement when needed but for now hardcode max value*/
- context->bw_ctx.bw.dce.dispclk_khz = 681000;
- context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
- } else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
- context->bw_ctx.bw.dce.yclk_khz = 0;
- }
-
- return DC_OK;
-}
-
-static bool dce60_validate_surface_sets(
- struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count == 0)
- continue;
-
- if (context->stream_status[i].plane_count > 1)
- return false;
-
- if (context->stream_status[i].plane_states[0]->format
- >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return false;
- }
-
- return true;
-}
-
-static enum dc_status dce60_validate_global(
- struct dc *dc,
- struct dc_state *context)
-{
- if (!dce60_validate_surface_sets(context))
- return DC_FAIL_SURFACE_VALIDATE;
-
- return DC_OK;
-}
-
static void dce60_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -931,10 +902,10 @@ static const struct resource_funcs dce60_res_pool_funcs = {
.destroy = dce60_destroy_resource_pool,
.link_enc_create = dce60_link_encoder_create,
.panel_cntl_create = dce60_panel_cntl_create,
- .validate_bandwidth = dce60_validate_bandwidth,
+ .validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce60_validate_global,
+ .validate_global = dce100_validate_global,
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 3e8b0ac11d90..8687104cabb7 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
@@ -32,6 +32,7 @@
#include "stream_encoder.h"
#include "resource.h"
+#include "clk_mgr.h"
#include "include/irq_service_interface.h"
#include "irq/dce80/irq_service_dce80.h"
#include "dce110/dce110_timing_generator.h"
@@ -77,6 +78,7 @@
#ifndef mmBIOS_SCRATCH_2
+ #define mmBIOS_SCRATCH_0 0x05C9
#define mmBIOS_SCRATCH_2 0x05CB
#define mmBIOS_SCRATCH_3 0x05CC
#define mmBIOS_SCRATCH_6 0x05CF
@@ -240,6 +242,7 @@ static const struct dce110_link_enc_registers link_enc_regs[] = {
link_regs(4),
link_regs(5),
link_regs(6),
+ { .DAC_ENABLE = mmDAC_ENABLE },
};
#define stream_enc_regs(id)\
@@ -367,6 +370,7 @@ static const struct dce110_clk_src_mask cs_mask = {
};
static const struct bios_registers bios_regs = {
+ .BIOS_SCRATCH_0 = mmBIOS_SCRATCH_0,
.BIOS_SCRATCH_3 = mmBIOS_SCRATCH_3,
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6
};
@@ -374,6 +378,7 @@ static const struct bios_registers bios_regs = {
static const struct resource_caps res_cap = {
.num_timing_generator = 6,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 3,
.num_ddc = 6,
@@ -382,6 +387,7 @@ static const struct resource_caps res_cap = {
static const struct resource_caps res_cap_81 = {
.num_timing_generator = 4,
.num_audio = 7,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 7,
.num_pll = 3,
.num_ddc = 6,
@@ -390,6 +396,7 @@ static const struct resource_caps res_cap_81 = {
static const struct resource_caps res_cap_83 = {
.num_timing_generator = 2,
.num_audio = 6,
+ .num_analog_stream_encoder = 1,
.num_stream_encoder = 6,
.num_pll = 2,
.num_ddc = 2,
@@ -417,8 +424,10 @@ static const struct dc_plane_cap plane_cap = {
}
};
-static const struct dc_debug_options debug_defaults = {
- .enable_legacy_fast_update = true,
+static const struct dc_debug_options debug_defaults = { 0 };
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dce_dmcu_registers dmcu_regs = {
@@ -604,6 +613,11 @@ static struct stream_encoder *dce80_stream_encoder_create(
if (!enc110)
return NULL;
+ if (eng_id == ENGINE_ID_DACA || eng_id == ENGINE_ID_DACB) {
+ dce110_analog_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id);
+ return &enc110->base;
+ }
+
dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id,
&stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -723,7 +737,20 @@ static struct link_encoder *dce80_link_encoder_create(
kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL);
int link_regs_id;
- if (!enc110 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
+ if (!enc110)
+ return NULL;
+
+ if (enc_init_data->connector.id == CONNECTOR_ID_VGA) {
+ dce110_link_encoder_construct(enc110,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[ENGINE_ID_DACA],
+ NULL,
+ NULL);
+ return &enc110->base;
+ }
+
+ if (enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
return NULL;
link_regs_id =
@@ -869,61 +896,6 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool)
}
}
-static enum dc_status dce80_validate_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- enum dc_validate_mode validate_mode)
-{
- int i;
- bool at_least_one_pipe = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].stream)
- at_least_one_pipe = true;
- }
-
- if (at_least_one_pipe) {
- /* TODO implement when needed but for now hardcode max value*/
- context->bw_ctx.bw.dce.dispclk_khz = 681000;
- context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
- } else {
- context->bw_ctx.bw.dce.dispclk_khz = 0;
- context->bw_ctx.bw.dce.yclk_khz = 0;
- }
-
- return DC_OK;
-}
-
-static bool dce80_validate_surface_sets(
- struct dc_state *context)
-{
- int i;
-
- for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count == 0)
- continue;
-
- if (context->stream_status[i].plane_count > 1)
- return false;
-
- if (context->stream_status[i].plane_states[0]->format
- >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
- return false;
- }
-
- return true;
-}
-
-static enum dc_status dce80_validate_global(
- struct dc *dc,
- struct dc_state *context)
-{
- if (!dce80_validate_surface_sets(context))
- return DC_FAIL_SURFACE_VALIDATE;
-
- return DC_OK;
-}
-
static void dce80_destroy_resource_pool(struct resource_pool **pool)
{
struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool);
@@ -937,10 +909,10 @@ static const struct resource_funcs dce80_res_pool_funcs = {
.destroy = dce80_destroy_resource_pool,
.link_enc_create = dce80_link_encoder_create,
.panel_cntl_create = dce80_panel_cntl_create,
- .validate_bandwidth = dce80_validate_bandwidth,
+ .validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
- .validate_global = dce80_validate_global,
+ .validate_global = dce100_validate_global,
.find_first_free_match_stream_enc_for_link = dce100_find_first_free_match_stream_enc_for_link
};
@@ -973,6 +945,7 @@ static bool dce80_construct(
dc->caps.dual_link_dvi = true;
dc->caps.extended_aux_timeout_support = false;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
@@ -1374,6 +1347,7 @@ static bool dce83_construct(
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.is_apu = true;
dc->debug = debug_defaults;
+ dc->check_config = config_defaults;
/*************************************************
* Create resources *
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index 652c05c35494..f12367adf145 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -556,10 +556,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.recovery_enabled = false, /*enable this by default after testing.*/
.max_downscale_src_width = 3840,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn10_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN10_DPP(*dpp));
@@ -1395,6 +1398,8 @@ static bool dcn10_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.pq = 0;
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 0;
+ dc->debug = debug_defaults_drv;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index f9cbdad3ef37..6679c1a14f2f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -85,7 +85,7 @@
#include "vm_helper.h"
#include "link_enc_cfg.h"
-#include "link.h"
+#include "link_service.h"
#define DC_LOGGER_INIT(logger)
@@ -718,10 +718,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.scl_reset_length10 = true,
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
void dcn20_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -733,7 +736,7 @@ struct dpp *dcn20_dpp_create(
uint32_t inst)
{
struct dcn20_dpp *dpp =
- kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
@@ -751,7 +754,7 @@ struct input_pixel_processor *dcn20_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
BREAK_TO_DEBUGGER();
@@ -768,7 +771,7 @@ struct output_pixel_processor *dcn20_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_opp *opp =
- kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
if (!opp) {
BREAK_TO_DEBUGGER();
@@ -785,7 +788,7 @@ struct dce_aux *dcn20_aux_engine_create(
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
@@ -823,7 +826,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
@@ -835,8 +838,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
}
struct mpc *dcn20_mpc_create(struct dc_context *ctx)
{
- struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
- GFP_ATOMIC);
+ struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc), GFP_KERNEL);
if (!mpc20)
return NULL;
@@ -853,8 +855,7 @@ struct mpc *dcn20_mpc_create(struct dc_context *ctx)
struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
{
int i;
- struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_ATOMIC);
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL);
if (!hubbub)
return NULL;
@@ -882,7 +883,7 @@ struct timing_generator *dcn20_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -962,7 +963,7 @@ static struct clock_source *dcn20_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
@@ -1061,7 +1062,7 @@ struct display_stream_compressor *dcn20_dsc_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn20_dsc *dsc =
- kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
if (!dsc) {
BREAK_TO_DEBUGGER();
@@ -1198,7 +1199,7 @@ struct hubp *dcn20_hubp_create(
uint32_t inst)
{
struct dcn20_hubp *hubp2 =
- kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
if (!hubp2)
return NULL;
@@ -1668,6 +1669,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+ dsc_cfg.dsc_padding = pipe_ctx->dsc_padding_params.dsc_hactive_padding;
if (!pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
return false;
@@ -2286,7 +2288,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
@@ -2472,6 +2474,7 @@ static bool dcn20_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2765,7 +2768,7 @@ struct resource_pool *dcn20_create_resource_pool(
struct dc *dc)
{
struct dcn20_resource_pool *pool =
- kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index e4a1338d21e0..055107843a70 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -614,10 +614,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = false,
.underflow_assert_delay_us = 0xFFFFFFFF,
.enable_tri_buf = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn201_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN201_DPP(*dpp));
@@ -629,7 +632,7 @@ static struct dpp *dcn201_dpp_create(
uint32_t inst)
{
struct dcn201_dpp *dpp =
- kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_dpp), GFP_KERNEL);
if (!dpp)
return NULL;
@@ -646,7 +649,7 @@ static struct input_pixel_processor *dcn201_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn10_ipp *ipp =
- kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
if (!ipp) {
return NULL;
@@ -662,7 +665,7 @@ static struct output_pixel_processor *dcn201_opp_create(
struct dc_context *ctx, uint32_t inst)
{
struct dcn201_opp *opp =
- kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_opp), GFP_KERNEL);
if (!opp) {
return NULL;
@@ -677,7 +680,7 @@ static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx,
uint32_t inst)
{
struct aux_engine_dce110 *aux_engine =
- kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
if (!aux_engine)
return NULL;
@@ -710,7 +713,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
uint32_t inst)
{
struct dce_i2c_hw *dce_i2c_hw =
- kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
if (!dce_i2c_hw)
return NULL;
@@ -723,8 +726,7 @@ static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx,
static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
{
- struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc),
- GFP_ATOMIC);
+ struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc), GFP_KERNEL);
if (!mpc201)
return NULL;
@@ -740,8 +742,7 @@ static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx)
{
- struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
- GFP_ATOMIC);
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub), GFP_KERNEL);
if (!hubbub)
return NULL;
@@ -759,7 +760,7 @@ static struct timing_generator *dcn201_timing_generator_create(
uint32_t instance)
{
struct optc *tgn10 =
- kzalloc(sizeof(struct optc), GFP_ATOMIC);
+ kzalloc(sizeof(struct optc), GFP_KERNEL);
if (!tgn10)
return NULL;
@@ -793,7 +794,7 @@ static struct link_encoder *dcn201_link_encoder_create(
const struct encoder_init_data *enc_init_data)
{
struct dcn20_link_encoder *enc20 =
- kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL);
struct dcn10_link_encoder *enc10;
if (!enc20 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs))
@@ -821,7 +822,7 @@ static struct clock_source *dcn201_clock_source_create(
bool dp_clk_src)
{
struct dce110_clk_src *clk_src =
- kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+ kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
if (!clk_src)
return NULL;
@@ -856,7 +857,7 @@ static struct stream_encoder *dcn201_stream_encoder_create(
struct dc_context *ctx)
{
struct dcn10_stream_encoder *enc1 =
- kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL);
if (!enc1)
return NULL;
@@ -883,7 +884,7 @@ static const struct dce_hwseq_mask hwseq_mask = {
static struct dce_hwseq *dcn201_hwseq_create(
struct dc_context *ctx)
{
- struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC);
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL);
if (hws) {
hws->ctx = ctx;
@@ -983,7 +984,7 @@ static struct hubp *dcn201_hubp_create(
uint32_t inst)
{
struct dcn201_hubp *hubp201 =
- kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_hubp), GFP_KERNEL);
if (!hubp201)
return NULL;
@@ -1153,6 +1154,7 @@ static bool dcn201_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->debug = debug_defaults_drv;
+ dc->check_config = config_defaults;
/*a0 only, remove later*/
dc->work_arounds.no_connect_phy_config = true;
@@ -1303,7 +1305,7 @@ struct resource_pool *dcn201_create_resource_pool(
struct dc *dc)
{
struct dcn201_resource_pool *pool =
- kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC);
+ kzalloc(sizeof(struct dcn201_resource_pool), GFP_KERNEL);
if (!pool)
return NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 918742a42ded..2060acd5ae09 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -626,10 +626,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.usbc_combo_phy_reset_wa = true,
.dmub_command_table = true,
.use_max_lb = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1458,6 +1461,7 @@ static bool dcn21_resource_construct(
dc->caps.color.mpc.ocsc = 1;
dc->caps.dp_hdmi21_pcon_support = true;
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 895349d9ca07..d0ebb733e802 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -60,7 +60,7 @@
#include "dml/display_mode_vba.h"
#include "dcn30/dcn30_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -727,10 +727,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -2192,7 +2195,7 @@ void dcn30_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params
j = 0;
// create the final dcfclk and uclk table
while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) {
- if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) {
+ if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) {
dcfclk_mhz[num_states] = dcfclk_sta_targets[i];
dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++];
} else {
@@ -2374,6 +2377,7 @@ static bool dcn30_resource_construct(
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index 82a205a7c25c..3ad6a3d4858e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -701,10 +701,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = false,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static void dcn301_dpp_destroy(struct dpp **dpp)
{
kfree(TO_DCN20_DPP(*dpp));
@@ -1498,6 +1501,7 @@ static bool dcn301_resource_construct(
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 3345068a878c..c0d4a1dc94f8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -47,7 +47,8 @@
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
+
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
#include "dce/dce_aux.h"
@@ -97,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1289,6 +1293,7 @@ static bool dcn302_resource_construct(
&is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 3479e1eab4cd..75e09c2c283e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -47,7 +47,7 @@
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
@@ -98,10 +98,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.dmub_command_table = true,
.use_max_lb = true,
.exit_idle_opt_for_cursor_updates = true,
- .enable_legacy_fast_update = false,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1234,6 +1237,7 @@ static bool dcn303_resource_construct(
bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled);
dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 3ed7f50554e2..0d667b54ccf8 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -888,12 +888,15 @@ static const struct dc_debug_options debug_defaults_drv = {
}
},
.disable_z10 = true,
- .enable_legacy_fast_update = true,
.enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/
.dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1978,6 +1981,7 @@ static bool dcn31_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index 663c49cce4aa..3ccde75a4ecb 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
@@ -924,9 +924,13 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.seamless_boot_odm_combine = true,
- .enable_legacy_fast_update = true,
.using_dml2 = false,
.disable_dsc_power_gate = true,
+ .min_disp_clk_khz = 100000,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1909,6 +1913,7 @@ static bool dcn314_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index 82cc78c291d8..4e962f522f1b 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -887,9 +887,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .enable_legacy_fast_update = true,
.psr_power_use_phy_fsm = 0,
.using_dml2 = false,
+ .min_disp_clk_khz = 100000,
+};
+
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
};
static const struct dc_panel_config panel_config_defaults = {
@@ -1939,6 +1943,7 @@ static bool dcn315_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index 636110e48d01..5a95dd54cb42 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
@@ -882,10 +882,13 @@ static const struct dc_debug_options debug_defaults_drv = {
.afmt = true,
}
},
- .enable_legacy_fast_update = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1815,6 +1818,7 @@ static bool dcn316_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 9917b366f00c..b276fec3e479 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -69,7 +69,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -92,7 +92,7 @@
#include "dc_state_priv.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -738,6 +738,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dp_plus_plus_wa = true,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
+ .disable_stutter_for_wm_program = true
+};
+
+static const struct dc_check_config config_defaults = {
.enable_legacy_fast_update = false,
};
@@ -1843,7 +1847,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc,
dc_state_set_stream_cursor_subvp_limit(stream, context, true);
status = DC_FAIL_HW_CURSOR_SUPPORT;
}
- };
+ }
}
if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
@@ -2196,7 +2200,8 @@ static bool dcn32_resource_construct(
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
/* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
dc->caps.max_cursor_size = 64;
- dc->caps.max_buffered_cursor_size = 64; // sqrt(16 * 1024 / 4)
+ /* floor(sqrt(buf_size_bytes / bpp ) * bpp, fixed_req_size) / bpp = max_width */
+ dc->caps.max_buffered_cursor_size = 64; // floor(sqrt(16 * 1024 / 4) * 4, 256) / 4 = 64
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 4;
@@ -2293,6 +2298,7 @@ static bool dcn32_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -2852,7 +2858,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_opp_head(
free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx];
free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx];
free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst;
- free_pipe->hblank_borrow = otg_master->hblank_borrow;
+ free_pipe->dsc_padding_params = otg_master->dsc_padding_params;
if (free_pipe->stream->timing.flags.DSC == 1) {
dcn20_acquire_dsc(free_pipe->stream->ctx->dc,
&new_ctx->res_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 82f966cf4ed2..99f0432288b4 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -1141,7 +1141,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
SRI_ARR(DCN_SURF1_TTU_CNTL1, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL0, HUBPREQ, id), \
SRI_ARR(DCN_CUR0_TTU_CNTL1, HUBPREQ, id), \
- SRI_ARR(HUBP_CLK_CNTL, HUBP, id)
+ SRI_ARR(HUBP_CLK_CNTL, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
#define HUBP_REG_LIST_DCN2_COMMON_RI(id) \
HUBP_REG_LIST_DCN_RI(id), HUBP_REG_LIST_DCN_VM_RI(id), \
SRI_ARR(PREFETCH_SETTINGS, HUBPREQ, id), \
@@ -1229,7 +1230,8 @@ unsigned int dcn32_get_max_hw_cursor_size(const struct dc *dc,
SR(DCHUBBUB_ARB_MALL_CNTL), \
SR(DCN_VM_FAULT_ADDR_MSB), SR(DCN_VM_FAULT_ADDR_LSB), \
SR(DCN_VM_FAULT_CNTL), SR(DCN_VM_FAULT_STATUS), \
- SR(SDPIF_REQUEST_RATE_LIMIT)
+ SR(SDPIF_REQUEST_RATE_LIMIT), \
+ SR(DCHUBBUB_SDPIF_CFG0)
/* DCCG */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 061c0907d802..3466ca34c93f 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -72,7 +72,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -731,11 +731,14 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_subvp_high_refresh = false,
.fpo_vactive_min_active_margin_us = 200,
.fpo_vactive_max_blank_us = 1000,
- .enable_legacy_fast_update = false,
.disable_dc_mode_overwrite = true,
.using_dml2 = false,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn321_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1797,6 +1800,7 @@ static bool dcn321_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 8475c6eec547..ef69898d2cc5 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -33,7 +33,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn35_resource.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
@@ -61,7 +61,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -767,7 +767,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -788,6 +787,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1760,6 +1763,20 @@ enum dc_status dcn35_patch_unknown_plane_state(struct dc_plane_state *plane_stat
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+}
+
static struct resource_funcs dcn35_res_pool_funcs = {
.destroy = dcn35_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1770,7 +1787,7 @@ static struct resource_funcs dcn35_res_pool_funcs = {
.validate_bandwidth = dcn35_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1900,9 +1917,6 @@ static bool dcn35_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
@@ -1935,6 +1949,7 @@ static bool dcn35_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
index 0971c0f74186..f3c614c4490c 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
@@ -40,7 +40,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -83,7 +83,7 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -747,7 +747,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -768,6 +767,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1732,6 +1735,21 @@ static enum dc_status dcn351_validate_bandwidth(struct dc *dc,
return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE;
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn351_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+
+}
+
static struct resource_funcs dcn351_res_pool_funcs = {
.destroy = dcn351_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1742,7 +1760,7 @@ static struct resource_funcs dcn351_res_pool_funcs = {
.validate_bandwidth = dcn351_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1872,9 +1890,6 @@ static bool dcn351_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
@@ -1905,6 +1920,7 @@ static bool dcn351_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
index 8bae7fcedc22..6469d5fe2e6d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
@@ -11,7 +11,7 @@
#include "resource.h"
#include "include/irq_service_interface.h"
#include "dcn36_resource.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#include "dcn20/dcn20_resource.h"
#include "dcn30/dcn30_resource.h"
@@ -40,7 +40,7 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "link.h"
+#include "link_service.h"
#include "dcn31/dcn31_apg.h"
#include "dcn32/dcn32_dio_link_encoder.h"
#include "dcn31/dcn31_vpg.h"
@@ -748,7 +748,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.using_dml2 = true,
.support_eDP1_5 = true,
.enable_hpo_pg_support = false,
- .enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = true,
.disable_idle_power_optimizations = false,
.dmcub_emulation = false,
@@ -769,6 +768,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_disp_clk_khz = 50000,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = true,
+};
+
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
@@ -1734,6 +1737,20 @@ static enum dc_status dcn35_validate_bandwidth(struct dc *dc,
}
+static int populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ enum dc_validate_mode validate_mode)
+{
+ int ret;
+
+ DC_FP_START();
+ ret = dcn35_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode);
+ DC_FP_END();
+
+ return ret;
+}
+
static struct resource_funcs dcn36_res_pool_funcs = {
.destroy = dcn36_destroy_resource_pool,
.link_enc_create = dcn35_link_encoder_create,
@@ -1744,7 +1761,7 @@ static struct resource_funcs dcn36_res_pool_funcs = {
.validate_bandwidth = dcn35_validate_bandwidth,
.calculate_wm_and_dlg = NULL,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
- .populate_dml_pipes = dcn35_populate_dml_pipes_from_context_fpu,
+ .populate_dml_pipes = populate_dml_pipes_from_context_fpu,
.acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer,
.release_pipe = dcn20_release_pipe,
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
@@ -1873,9 +1890,6 @@ static bool dcn36_resource_construct(
dc->caps.num_of_host_routers = 2;
dc->caps.num_of_dpias_per_host_router = 2;
- dc->caps.num_of_host_routers = 2;
- dc->caps.num_of_dpias_per_host_router = 2;
-
/* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order
* to provide some margin.
* It's expected for furture ASIC to have equal or higher value, in order to
@@ -1907,6 +1921,7 @@ static bool dcn36_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index b3988e38d0a6..875ae97489d3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -50,7 +50,7 @@
#include "dml/display_mode_vba.h"
#include "dcn401/dcn401_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "link.h"
+#include "link_service.h"
#include "link_enc_cfg.h"
#include "dcn31/dcn31_panel_cntl.h"
@@ -73,7 +73,7 @@
#include "dc_state_priv.h"
-#include "dml2/dml2_wrapper.h"
+#include "dml2_0/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -708,6 +708,7 @@ static const struct dc_debug_options debug_defaults_drv = {
},
.use_max_lb = true,
.force_disable_subvp = false,
+ .disable_force_pstate_allow_on_hw_release = false,
.exit_idle_opt_for_cursor_updates = true,
.using_dml2 = true,
.using_dml21 = true,
@@ -720,7 +721,6 @@ static const struct dc_debug_options debug_defaults_drv = {
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false,
- .enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
.fams_version = {
.minor = 1,
@@ -736,6 +736,10 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_cositing = CHROMA_COSITING_NONE + 1,
};
+static const struct dc_check_config config_defaults = {
+ .enable_legacy_fast_update = false,
+};
+
static struct dce_aux *dcn401_aux_engine_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1667,7 +1671,7 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc,
dc_state_set_stream_cursor_subvp_limit(stream, context, true);
status = DC_FAIL_HW_CURSOR_SUPPORT;
}
- };
+ }
}
if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) {
@@ -1698,6 +1702,9 @@ static void dcn401_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx)
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
+ if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0)
+ pixel_clk_params->requested_pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz;
+
if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment)
link_enc = link_enc_cfg_get_link_enc(link);
if (link_enc)
@@ -1991,6 +1998,7 @@ static bool dcn401_resource_construct(
dc->caps.vbios_lttpr_aware = true;
}
}
+ dc->check_config = config_defaults;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
index 2ae6831c31ef..e1fa2e80a15a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h
@@ -140,7 +140,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
HUBP_3DLUT_FL_REG_LIST_DCN401(id), \
SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \
- SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id)
+ SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id), \
+ SRI_ARR(HUBPRET_READ_LINE_VALUE, HUBPRET, id)
/* ABM */
#define ABM_DCN401_REG_LIST_RI(id) \
@@ -226,7 +227,8 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
#define LE_DCN401_REG_LIST_RI(id) \
LE_DCN3_REG_LIST_RI(id), \
SRI_ARR(DP_DPHY_INTERNAL_CTRL, DP, id), \
- SRI_ARR(DIG_BE_CLK_CNTL, DIG, id)
+ SRI_ARR(DIG_BE_CLK_CNTL, DIG, id),\
+ SR_ARR(DIO_CLK_CNTL, id)
/* DPP */
#define DPP_REG_LIST_DCN401_COMMON_RI(id) \
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
new file mode 100644
index 000000000000..bc93356a0b5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MIT
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+# Makefile for bounding box component.
+# Floating point required due to nature of bounding box values
+
+soc_and_ip_translator_ccflags := $(CC_FLAGS_FPU)
+soc_and_ip_translator_rcflags := $(CC_FLAGS_NO_FPU)
+
+CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
+
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
+
+soc_and_ip_translator := soc_and_ip_translator.o
+soc_and_ip_translator += dcn401/dcn401_soc_and_ip_translator.o
+
+AMD_DAL_soc_and_ip_translator := $(addprefix $(AMDDALPATH)/dc/soc_and_ip_translator/, $(soc_and_ip_translator))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_soc_and_ip_translator)
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
new file mode 100644
index 000000000000..3190c76eb482
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn4_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void get_default_soc_bb(struct dml2_soc_bb *soc_bb)
+{
+ memcpy(soc_bb, &dml2_socbb_dcn401, sizeof(struct dml2_soc_bb));
+ memcpy(&soc_bb->qos_parameters, &dml_dcn4_variant_a_soc_qos_params, sizeof(struct dml2_soc_qos_parameters));
+}
+
+/*
+ * DC clock table is obtained from SMU during runtime.
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void dcn401_convert_dc_clock_table_to_soc_bb_clock_table(
+ struct dml2_soc_state_table *dml_clk_table,
+ const struct clk_bw_params *dc_bw_params,
+ bool use_clock_dc_limits)
+{
+ int i;
+ const struct clk_limit_table *dc_clk_table;
+
+ if (dc_bw_params == NULL)
+ /* skip if bw params could not be obtained from smu */
+ return;
+
+ dc_clk_table = &dc_bw_params->clk_table;
+
+ /* dcfclk */
+ if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dcfclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz &&
+ dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000;
+ dml_clk_table->dcfclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->dcfclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* fclk */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->fclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz &&
+ dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000;
+ dml_clk_table->fclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz &&
+ dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000;
+ dml_clk_table->uclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->uclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz &&
+ dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000;
+ dml_clk_table->dispclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ dml_clk_table->dispclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz &&
+ dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000;
+ dml_clk_table->dppclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ dml_clk_table->dppclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz &&
+ dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000;
+ dml_clk_table->dtbclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ dml_clk_table->dtbclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ if (use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz &&
+ dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) {
+ if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000;
+ dml_clk_table->socclk.num_clk_values = i + 1;
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ dml_clk_table->socclk.num_clk_values = i;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ }
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dram config */
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+}
+
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ soc_bb->dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000;
+ soc_bb->dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ soc_bb->mall_allocated_for_dcn_mbytes = dc->caps.mall_size_total / (1024 * 1024);
+
+ if (dc->clk_mgr->funcs->is_smu_present &&
+ dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) {
+ dcn401_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table,
+ dc->clk_mgr->bw_params,
+ config->use_clock_dc_limits);
+ }
+}
+
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ soc_bb->dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
+ soc_bb->xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000;
+
+ /* latencies in vbios are platform specific and should be used if provided */
+ if (dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_enter_exit_latency_100ns / 10.0;
+
+ if (dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->ctx->dc_bios->bb_info.dram_sr_exit_latency_100ns / 10.0;
+}
+
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ /* set if the value is provided */
+ if (dc->bb_overrides.sr_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns)
+ soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns)
+ soc_bb->power_management_parameters.dram_clk_change_blackout_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.fclk_clock_change_latency_ns)
+ soc_bb->power_management_parameters.fclk_change_blackout_us =
+ dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0;
+
+ //Z8 values not expected nor used on DCN401 but still added for completeness
+ if (dc->bb_overrides.sr_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_exit_latency_us =
+ dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns)
+ soc_bb->power_management_parameters.z8_stutter_enter_plus_exit_latency_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+}
+
+static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ /* Individual modification can be overwritten even if it was obtained by a previous function.
+ * Modifications are acquired in order of priority (lowest to highest).
+ */
+ dc_assert_fp_enabled();
+
+ dcn401_update_soc_bb_with_values_from_clk_mgr(soc_bb, dc, config);
+ dcn401_update_soc_bb_with_values_from_vbios(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_software_policy(soc_bb, dc);
+}
+
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ //get default soc_bb with static values
+ get_default_soc_bb(soc_bb);
+ //update soc_bb values with more accurate values
+ apply_soc_bb_updates(soc_bb, dc, config);
+}
+
+static void dcn401_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn401_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn401_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn401_get_ip_caps,
+};
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn401_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
new file mode 100644
index 000000000000..88c11b6be004
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN401_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN401_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "soc_and_ip_translator.h"
+#include "dml2_0/dml21/inc/dml_top_soc_parameter_types.h"
+
+void dcn401_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+/* Functions that can be re-used by higher DCN revisions of this component */
+void dcn401_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
+void dcn401_update_soc_bb_with_values_from_vbios(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+void dcn401_update_soc_bb_with_values_from_software_policy(struct dml2_soc_bb *soc_bb, const struct dc *dc);
+
+#endif /* _DCN401_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
new file mode 100644
index 000000000000..c9e224d262c9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "dcn42_soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "bounding_boxes/dcn42_soc_bb.h"
+
+/* soc_and_ip_translator component used to get up-to-date values for bounding box.
+ * Bounding box values are stored in several locations and locations can vary with DCN revision.
+ * This component provides an interface to get DCN-specific bounding box values.
+ */
+
+static void dcn42_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
+{
+ *ip_caps = dml2_dcn42_max_ip_caps;
+}
+
+static struct soc_and_ip_translator_funcs dcn42_translator_funcs = {
+ .get_soc_bb = dcn401_get_soc_bb,
+ .get_ip_caps = dcn42_get_ip_caps,
+};
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator)
+{
+ soc_and_ip_translator->translator_funcs = &dcn42_translator_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
new file mode 100644
index 000000000000..914dcbb369a7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#ifndef _DCN42_SOC_AND_IP_TRANSLATOR_H_
+#define _DCN42_SOC_AND_IP_TRANSLATOR_H_
+
+#include "core_types.h"
+#include "dc.h"
+#include "clk_mgr.h"
+#include "dml_top_soc_parameter_types.h"
+#include "soc_and_ip_translator.h"
+
+void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+
+#endif /* _DCN42_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
new file mode 100644
index 000000000000..0fc0e5a6c171
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2025 Advanced Micro Devices, Inc.
+
+#include "soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+
+static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator,
+ enum dce_version dc_version)
+{
+ switch (dc_version) {
+ case DCN_VERSION_4_01:
+ dcn401_construct_soc_and_ip_translator(soc_and_ip_translator);
+ break;
+ default:
+ break;
+ }
+}
+
+struct soc_and_ip_translator *dc_create_soc_and_ip_translator(enum dce_version dc_version)
+{
+ struct soc_and_ip_translator *soc_and_ip_translator;
+
+ soc_and_ip_translator = kzalloc(sizeof(*soc_and_ip_translator), GFP_KERNEL);
+ if (!soc_and_ip_translator)
+ return NULL;
+
+ dc_construct_soc_and_ip_translator(soc_and_ip_translator, dc_version);
+
+ return soc_and_ip_translator;
+}
+
+void dc_destroy_soc_and_ip_translator(struct soc_and_ip_translator **soc_and_ip_translator)
+{
+ kfree(*soc_and_ip_translator);
+ *soc_and_ip_translator = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
index 55b929ca7982..7a839984dbc0 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c
@@ -641,16 +641,16 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
/* this gives the direction of the cositing (negative will move
* left, right otherwise)
*/
- int sign = 1;
+ int h_sign = flip_horz_scan_dir ? -1 : 1;
+ int v_sign = flip_vert_scan_dir ? -1 : 1;
switch (spl_in->basic_in.cositing) {
-
case CHROMA_COSITING_TOPLEFT:
- init_adj_h = spl_fixpt_from_fraction(sign, 4);
- init_adj_v = spl_fixpt_from_fraction(sign, 4);
+ init_adj_h = spl_fixpt_from_fraction(h_sign, 4);
+ init_adj_v = spl_fixpt_from_fraction(v_sign, 4);
break;
case CHROMA_COSITING_LEFT:
- init_adj_h = spl_fixpt_from_fraction(sign, 4);
+ init_adj_h = spl_fixpt_from_fraction(h_sign, 4);
init_adj_v = spl_fixpt_zero;
break;
case CHROMA_COSITING_NONE:
@@ -1018,6 +1018,21 @@ static bool spl_get_optimal_number_of_taps(
spl_scratch->scl_data.taps.h_taps_c = 6;
spl_scratch->scl_data.taps.v_taps_c = 6;
}
+
+ /* Override mode: keep EASF enabled but use input taps if valid */
+ if (spl_in->override_easf) {
+ spl_scratch->scl_data.taps.h_taps = (in_taps->h_taps != 0) ? in_taps->h_taps : spl_scratch->scl_data.taps.h_taps;
+ spl_scratch->scl_data.taps.v_taps = (in_taps->v_taps != 0) ? in_taps->v_taps : spl_scratch->scl_data.taps.v_taps;
+ spl_scratch->scl_data.taps.h_taps_c = (in_taps->h_taps_c != 0) ? in_taps->h_taps_c : spl_scratch->scl_data.taps.h_taps_c;
+ spl_scratch->scl_data.taps.v_taps_c = (in_taps->v_taps_c != 0) ? in_taps->v_taps_c : spl_scratch->scl_data.taps.v_taps_c;
+
+ if ((spl_scratch->scl_data.taps.h_taps > 6) || (spl_scratch->scl_data.taps.v_taps > 6))
+ skip_easf = true;
+ if ((spl_scratch->scl_data.taps.h_taps > 1) && (spl_scratch->scl_data.taps.h_taps % 2))
+ spl_scratch->scl_data.taps.h_taps--;
+ if ((spl_scratch->scl_data.taps.h_taps_c > 1) && (spl_scratch->scl_data.taps.h_taps_c % 2))
+ spl_scratch->scl_data.taps.h_taps_c--;
+ }
}
/*Ensure we can support the requested number of vtaps*/
diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
index 23d254dea18f..20e4e52a77ac 100644
--- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h
@@ -545,6 +545,7 @@ struct spl_in {
enum linear_light_scaling lls_pref; // Linear Light Scaling
bool prefer_easf;
bool disable_easf;
+ bool override_easf; /* If true, keep EASF enabled but use provided in_taps */
struct spl_debug debug;
bool is_fullscreen;
bool is_hdr_on;
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index 0bafb6710761..9d0168986fe7 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -132,6 +132,7 @@ enum dmub_window_id {
DMUB_WINDOW_IB_MEM,
DMUB_WINDOW_SHARED_STATE,
DMUB_WINDOW_LSDMA_BUFFER,
+ DMUB_WINDOW_CURSOR_OFFLOAD,
DMUB_WINDOW_TOTAL,
};
@@ -316,6 +317,8 @@ struct dmub_srv_hw_params {
bool disable_sldo_opt;
bool enable_non_transparent_setconfig;
bool lower_hbr3_phy_ssc;
+ bool override_hbr3_pll_vco;
+ bool disable_dpia_bw_allocation;
};
/**
@@ -360,6 +363,19 @@ struct dmub_diagnostic_data {
uint8_t is_pwait : 1;
};
+/**
+ * struct dmub_preos_info - preos fw info before loading post os fw.
+ */
+struct dmub_preos_info {
+ uint64_t fb_base;
+ uint64_t fb_offset;
+ uint64_t trace_buffer_phy_addr;
+ uint32_t trace_buffer_size;
+ uint32_t fw_version;
+ uint32_t boot_status;
+ uint32_t boot_options;
+};
+
struct dmub_srv_inbox {
/* generic status */
uint64_t num_submitted;
@@ -485,6 +501,7 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_current_time)(struct dmub_srv *dmub);
void (*get_diagnostic_data)(struct dmub_srv *dmub);
+ bool (*get_preos_fw_info)(struct dmub_srv *dmub);
bool (*should_detect)(struct dmub_srv *dmub);
void (*init_reg_offsets)(struct dmub_srv *dmub, struct dc_context *ctx);
@@ -534,7 +551,8 @@ struct dmub_srv_create_params {
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
* @shared_state: dmub shared state between firmware and driver
- * @fw_state: dmub firmware state pointer
+ * @cursor_offload_v1: Cursor offload state
+ * @fw_state: dmub firmware state pointer (debug purpose only)
*/
struct dmub_srv {
enum dmub_asic asic;
@@ -543,7 +561,9 @@ struct dmub_srv {
bool is_virtual;
struct dmub_fb scratch_mem_fb;
struct dmub_fb ib_mem_gart;
+ struct dmub_fb cursor_offload_fb;
volatile struct dmub_shared_state_feature_block *shared_state;
+ volatile struct dmub_cursor_offload_v1 *cursor_offload_v1;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -567,6 +587,7 @@ struct dmub_srv {
bool sw_init;
bool hw_init;
+ bool dpia_supported;
uint64_t fb_base;
uint64_t fb_offset;
@@ -581,6 +602,7 @@ struct dmub_srv {
enum dmub_srv_power_state_type power_state;
struct dmub_diagnostic_data debug;
struct dmub_fb lsdma_rb_fb;
+ struct dmub_preos_info preos_info;
};
/**
@@ -597,6 +619,8 @@ struct dmub_notification {
enum dmub_notification_type type;
uint8_t link_index;
uint8_t result;
+ /* notify instance from DMUB */
+ uint8_t instance;
bool pending_notification;
union {
struct aux_reply_data aux_reply;
@@ -1064,4 +1088,14 @@ enum dmub_status dmub_srv_wait_for_inbox_free(struct dmub_srv *dmub,
*/
enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub);
+/**
+ * dmub_srv_get_preos_info() - retrieves preos fw info
+ * @dmub: the dmub service
+ *
+ * Return:
+ * true - preos fw info retrieved successfully
+ * false - preos fw info not retrieved successfully
+ */
+bool dmub_srv_get_preos_info(struct dmub_srv *dmub);
+
#endif /* _DMUB_SRV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 6a69a788abe8..3f2a0ed02c59 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -485,7 +485,19 @@ union replay_debug_flags {
*/
uint32_t enable_visual_confirm_debug : 1;
- uint32_t reserved : 18;
+ /**
+ * 0x4000 (bit 14)
+ * @debug_log_enabled: Debug Log Enabled
+ */
+ uint32_t debug_log_enabled : 1;
+
+ /**
+ * 0x8000 (bit 15)
+ * @enable_sub_feature_visual_confirm: Enable Sub Feature Visual Confirm
+ */
+ uint32_t enable_sub_feature_visual_confirm : 1;
+
+ uint32_t reserved : 16;
} bitfields;
uint32_t u32All;
@@ -593,6 +605,104 @@ union replay_hw_flags {
uint32_t u32All;
};
+/**
+ * Flags that can be set by driver to change some Panel Replay behaviour.
+ */
+union pr_debug_flags {
+ struct {
+ /**
+ * 0x1 (bit 0)
+ * Enable visual confirm in FW.
+ */
+ uint32_t visual_confirm : 1;
+
+ /**
+ * 0x2 (bit 1)
+ * @skip_crc: Set if need to skip CRC.
+ */
+ uint32_t skip_crc : 1;
+
+ /**
+ * 0x4 (bit 2)
+ * @force_link_power_on: Force disable ALPM control
+ */
+ uint32_t force_link_power_on : 1;
+
+ /**
+ * 0x8 (bit 3)
+ * @force_phy_power_on: Force phy power on
+ */
+ uint32_t force_phy_power_on : 1;
+
+ /**
+ * 0x10 (bit 4)
+ * @skip_crtc_disabled: CRTC disable skipped
+ */
+ uint32_t skip_crtc_disabled : 1;
+
+ /*
+ * 0x20 (bit 5)
+ * @visual_confirm_rate_control: Enable Visual Confirm rate control detection
+ */
+ uint32_t visual_confirm_rate_control : 1;
+
+ uint32_t reserved : 26;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
+union pr_hw_flags {
+ struct {
+ /**
+ * @allow_alpm_fw_standby_mode: To indicate whether the
+ * ALPM FW standby mode is allowed
+ */
+ uint32_t allow_alpm_fw_standby_mode : 1;
+
+ /*
+ * @dsc_enable_status: DSC enable status in driver
+ */
+ uint32_t dsc_enable_status : 1;
+
+ /**
+ * @fec_enable_status: receive fec enable/disable status from driver
+ */
+ uint32_t fec_enable_status : 1;
+
+ /*
+ * @smu_optimizations_en: SMU power optimization.
+ * Only when active display is Replay capable and display enters Replay.
+ * Trigger interrupt to SMU to powerup/down.
+ */
+ uint32_t smu_optimizations_en : 1;
+
+ /**
+ * @phy_power_state: Indicates current phy power state
+ */
+ uint32_t phy_power_state : 1;
+
+ /**
+ * @link_power_state: Indicates current link power state
+ */
+ uint32_t link_power_state : 1;
+ /**
+ * Use TPS3 signal when restore main link.
+ */
+ uint32_t force_wakeup_by_tps3 : 1;
+ /**
+ * @is_alpm_initialized: Indicates whether ALPM is initialized
+ */
+ uint32_t is_alpm_initialized : 1;
+ /**
+ * @alpm_mode: Indicates ALPM mode selected
+ */
+ uint32_t alpm_mode : 2;
+ } bitfields;
+
+ uint32_t u32All;
+};
+
union fw_assisted_mclk_switch_version {
struct {
uint8_t minor : 5;
@@ -617,6 +727,7 @@ struct dmub_feature_caps {
uint8_t replay_supported;
uint8_t replay_reserved[3];
uint8_t abm_aux_backlight_support;
+ uint8_t lsdma_support_in_dmu;
};
struct dmub_visual_confirm_color {
@@ -629,6 +740,112 @@ struct dmub_visual_confirm_color {
uint16_t panel_inst;
};
+/**
+ * struct dmub_cursor_offload_pipe_data_dcn30_v1 - DCN30+ per pipe data.
+ */
+struct dmub_cursor_offload_pipe_data_dcn30_v1 {
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16;
+ uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5;
+ uint32_t reserved0[4];
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1;
+ uint32_t CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1;
+ uint32_t CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24;
+ uint32_t CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24;
+ uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS : 16;
+ uint32_t CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE, : 16;
+ uint32_t reserved1[5];
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8;
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8;
+ uint32_t reserved2[3];
+};
+
+/**
+ * struct dmub_cursor_offload_pipe_data_dcn401_v1 - DCN401 per pipe data.
+ */
+struct dmub_cursor_offload_pipe_data_dcn401_v1 {
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS;
+ uint32_t CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH : 16;
+ uint32_t CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X : 16;
+ uint32_t CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y : 16;
+ uint32_t CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET : 13;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE : 3;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY : 1;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH : 2;
+ uint32_t CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK : 5;
+ uint32_t reserved0[4];
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ENABLE : 1;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_MODE : 3;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE : 1;
+ uint32_t CM_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN : 1;
+ uint32_t CM_CUR0_CURSOR0_COLOR0__CUR0_COLOR0 : 24;
+ uint32_t CM_CUR0_CURSOR0_COLOR1__CUR0_COLOR1 : 24;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_BIAS_G_Y : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_G_Y__CUR0_FP_SCALE_G_Y, : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_BIAS_RB_CRCB : 16;
+ uint32_t CM_CUR0_CURSOR0_FP_SCALE_BIAS_RB_CRCB__CUR0_FP_SCALE_RB_CRCB : 16;
+ uint32_t reserved1[4];
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET : 8;
+ uint32_t HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST : 8;
+ uint32_t HUBP0_DCHUBP_MALL_CONFIG__USE_MALL_FOR_CURSOR : 1;
+ uint32_t reserved2[3];
+};
+
+/**
+ * struct dmub_cursor_offload_pipe_data_v1 - Per pipe data for cursor offload.
+ */
+struct dmub_cursor_offload_pipe_data_v1 {
+ union {
+ struct dmub_cursor_offload_pipe_data_dcn30_v1 dcn30; /**< DCN30 cursor data. */
+ struct dmub_cursor_offload_pipe_data_dcn401_v1 dcn401; /**< DCN401 cursor data. */
+ uint8_t payload[96]; /**< Guarantees the cursor pipe data size per-pipe. */
+ };
+};
+
+/**
+ * struct dmub_cursor_offload_payload_data_v1 - A payload of stream data.
+ */
+struct dmub_cursor_offload_payload_data_v1 {
+ uint32_t write_idx_start; /**< Write index, updated before pipe_data is written. */
+ uint32_t write_idx_finish; /**< Write index, updated after pipe_data is written. */
+ uint32_t pipe_mask; /**< Mask of pipes to update. */
+ uint32_t reserved; /**< Reserved for future use. */
+ struct dmub_cursor_offload_pipe_data_v1 pipe_data[6]; /**< Per-pipe cursor data. */
+};
+
+/**
+ * struct dmub_cursor_offload_stream_v1 - Per-stream data for cursor offload.
+ */
+struct dmub_cursor_offload_stream_v1 {
+ struct dmub_cursor_offload_payload_data_v1 payloads[4]; /**< A small buffer of cursor payloads. */
+ uint32_t write_idx; /**< The index of the last written payload. */
+};
+
+/**
+ * struct dmub_cursor_offload_v1 - Cursor offload feature state.
+ */
+struct dmub_cursor_offload_v1 {
+ struct dmub_cursor_offload_stream_v1 offload_streams[6]; /**< Per-stream cursor offload data */
+};
+
//==============================================================================
//</DMUB_TYPES>=================================================================
//==============================================================================
@@ -648,7 +865,8 @@ struct dmub_visual_confirm_color {
union dmub_fw_meta_feature_bits {
struct {
uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */
- uint32_t reserved : 31;
+ uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */
+ uint32_t reserved : 30;
} bits; /**< status bits */
uint32_t all; /**< 32-bit access to status bits */
};
@@ -814,6 +1032,28 @@ enum dmub_ips_comand_type {
};
/**
+ * enum dmub_cursor_offload_comand_type - Cursor offload subcommands.
+ */
+enum dmub_cursor_offload_comand_type {
+ /**
+ * Initializes the cursor offload feature.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_INIT = 0,
+ /**
+ * Enables cursor offloading for a stream and updates the timing parameters.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE = 1,
+ /**
+ * Disables cursor offloading for a given stream.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE = 2,
+ /**
+ * Programs the latest data for a given stream.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM = 3,
+};
+
+/**
* union dmub_fw_boot_options - Boot option definitions for SCRATCH14
*/
union dmub_fw_boot_options {
@@ -843,7 +1083,9 @@ union dmub_fw_boot_options {
uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */
uint32_t disable_sldo_opt: 1; /**< 1 to disable SLDO optimizations */
uint32_t lower_hbr3_phy_ssc: 1; /**< 1 to lower hbr3 phy ssc to 0.125 percent */
- uint32_t reserved : 6; /**< reserved */
+ uint32_t override_hbr3_pll_vco: 1; /**< 1 to override the hbr3 pll vco to 0 */
+ uint32_t disable_dpia_bw_allocation: 1; /**< 1 to disable the USB4 DPIA BW allocation */
+ uint32_t reserved : 4; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -876,13 +1118,14 @@ enum dmub_shared_state_feature_id {
DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
DMUB_SHARED_SHARE_FEATURE__DEBUG_SETUP = 3,
+ DMUB_SHARED_STATE_FEATURE__CURSOR_OFFLOAD_V1 = 4,
DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
};
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
- union dmub_shared_state_ips_fw_signals {
+union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
@@ -897,7 +1140,7 @@ enum dmub_shared_state_feature_id {
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
- union dmub_shared_state_ips_driver_signals {
+union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
@@ -957,6 +1200,22 @@ struct dmub_shared_state_ips_driver {
}; /* 248-bytes, fixed */
/**
+ * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload.
+ */
+struct dmub_shared_state_cursor_offload_stream_v1 {
+ uint32_t last_write_idx; /**< Last write index */
+ uint8_t reserved[28]; /**< Reserved bytes. */
+}; /* 32-bytes, fixed */
+
+/**
+ * struct dmub_shared_state_cursor_offload_v1 - Header metadata for cursor offload.
+ */
+struct dmub_shared_state_cursor_offload_v1 {
+ struct dmub_shared_state_cursor_offload_stream_v1 offload_streams[6]; /**< stream state, 32-bytes each */
+ uint8_t reserved[56]; /**< reserved for future use */
+}; /* 248-bytes, fixed */
+
+/**
* enum dmub_shared_state_feature_common - Generic payload.
*/
struct dmub_shared_state_feature_common {
@@ -982,6 +1241,7 @@ struct dmub_shared_state_feature_block {
struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
struct dmub_shared_state_debug_setup debug_setup; /**< Debug setup */
+ struct dmub_shared_state_cursor_offload_v1 cursor_offload_v1; /**< Cursor offload */
} data; /**< Shared state data. */
}; /* 256-bytes, fixed */
@@ -1571,6 +1831,25 @@ enum dmub_cmd_type {
*/
DMUB_CMD__IPS = 91,
+ /**
+ * Command type use for Cursor offload.
+ */
+ DMUB_CMD__CURSOR_OFFLOAD = 92,
+
+ /**
+ * Command type used for all SMART_POWER_OLED commands.
+ */
+ DMUB_CMD__SMART_POWER_OLED = 93,
+
+ /**
+ * Command type use for all Panel Replay commands.
+ */
+ DMUB_CMD__PR = 94,
+
+
+ /**
+ * Command type use for VBIOS shared commands.
+ */
DMUB_CMD__VBIOS = 128,
};
@@ -1990,18 +2269,19 @@ struct dmub_cmd_lsdma_data {
struct lsdma_tiled_copy_data {
uint32_t src_addr_lo;
uint32_t src_addr_hi;
+
uint32_t dst_addr_lo;
uint32_t dst_addr_hi;
uint32_t src_x : 16;
uint32_t src_y : 16;
- uint32_t src_width : 16;
- uint32_t src_height : 16;
-
uint32_t dst_x : 16;
uint32_t dst_y : 16;
+ uint32_t src_width : 16;
+ uint32_t src_height : 16;
+
uint32_t dst_width : 16;
uint32_t dst_height : 16;
@@ -2034,23 +2314,58 @@ struct dmub_cmd_lsdma_data {
uint32_t padding : 30;
} tiled_copy_data;
struct lsdma_linear_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
uint32_t count : 30;
uint32_t cache_policy_dst : 2;
uint32_t tmz : 1;
uint32_t cache_policy_src : 2;
uint32_t padding : 29;
-
+ } linear_copy_data;
+ struct lsdma_linear_sub_window_copy_data {
uint32_t src_lo;
uint32_t src_hi;
+
uint32_t dst_lo;
uint32_t dst_hi;
- } linear_copy_data;
+
+ uint32_t src_x : 16;
+ uint32_t src_y : 16;
+
+ uint32_t dst_x : 16;
+ uint32_t dst_y : 16;
+
+ uint32_t rect_x : 16;
+ uint32_t rect_y : 16;
+
+ uint32_t src_pitch : 16;
+ uint32_t dst_pitch : 16;
+
+ uint32_t src_slice_pitch;
+ uint32_t dst_slice_pitch;
+
+ uint32_t tmz : 1;
+ uint32_t element_size : 3;
+ uint32_t src_cache_policy : 3;
+ uint32_t dst_cache_policy : 3;
+ uint32_t reserved0 : 22;
+ } linear_sub_window_copy_data;
struct lsdma_reg_write_data {
uint32_t reg_addr;
uint32_t reg_data;
} reg_write_data;
struct lsdma_pio_copy_data {
+ uint32_t src_lo;
+ uint32_t src_hi;
+
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
union {
struct {
uint32_t byte_count : 26;
@@ -2063,12 +2378,11 @@ struct dmub_cmd_lsdma_data {
} fields;
uint32_t raw;
} packet;
- uint32_t src_lo;
- uint32_t src_hi;
- uint32_t dst_lo;
- uint32_t dst_hi;
} pio_copy_data;
struct lsdma_pio_constfill_data {
+ uint32_t dst_lo;
+ uint32_t dst_hi;
+
union {
struct {
uint32_t byte_count : 26;
@@ -2081,14 +2395,12 @@ struct dmub_cmd_lsdma_data {
} fields;
uint32_t raw;
} packet;
- uint32_t dst_lo;
- uint32_t dst_hi;
+
uint32_t data;
} pio_constfill_data;
uint32_t all[14];
} u;
-
};
struct dmub_rb_cmd_lsdma {
@@ -2330,10 +2642,12 @@ struct dmub_cmd_fams2_global_config {
union dmub_fams2_global_feature_config features;
uint32_t recovery_timeout_us;
uint32_t hwfq_flip_programming_delay_us;
+ uint32_t max_allow_to_target_delta_us; // how early DCN could assert P-State allow compared to the P-State target
};
union dmub_cmd_fams2_config {
struct dmub_cmd_fams2_global_config global;
+// coverity[cert_dcl37_c_violation:FALSE] errno.h, stddef.h, stdint.h not included in atombios.h
struct dmub_fams2_stream_static_state stream; //v0
union {
struct dmub_fams2_cmd_stream_static_base_state base;
@@ -3946,6 +4260,33 @@ enum replay_state {
};
/**
+ * Definition of a panel replay state
+ */
+enum pr_state {
+ PR_STATE_0 = 0x00, // State 0 steady state
+ // Pending SDP and Unlock before back to State 0
+ PR_STATE_0_PENDING_SDP_AND_UNLOCK = 0x01,
+ PR_STATE_1 = 0x10, // State 1
+ PR_STATE_2 = 0x20, // State 2 steady state
+ // Pending frame transmission before transition to State 2
+ PR_STATE_2_PENDING_FRAME_TRANSMISSION = 0x30,
+ // Active and Powered Up
+ PR_STATE_2_POWERED = 0x31,
+ // Active and Powered Down, but need to blank HUBP after DPG_EN latch
+ PR_STATE_2_PENDING_HUBP_BLANK = 0x32,
+ // Active and Pending Power Up
+ PR_STATE_2_PENDING_POWER_UP = 0x33,
+ // Active and Powered Up, Pending DPG latch
+ PR_STATE_2_PENDING_LOCK_FOR_DPG_POWER_ON = 0x34,
+ // Active and Powered Up, Pending SDP and Unlock
+ PR_STATE_2_PENDING_SDP_AND_UNLOCK = 0x35,
+ // Pending transmission of AS SDP for timing sync, but no rfb update
+ PR_STATE_2_PENDING_AS_SDP = 0x36,
+ // Invalid
+ PR_STATE_INVALID = 0xFF,
+};
+
+/**
* Replay command sub-types.
*/
enum dmub_cmd_replay_type {
@@ -3986,11 +4327,34 @@ enum dmub_cmd_replay_type {
*/
DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8,
/**
+ * Set version
+ */
+ DMUB_CMD__REPLAY_SET_VERSION = 9,
+ /**
* Set Replay General command.
*/
DMUB_CMD__REPLAY_SET_GENERAL_CMD = 16,
};
+/*
+ * Panel Replay sub-types
+ */
+enum dmub_cmd_panel_replay_type {
+ DMUB_CMD__PR_ENABLE = 0,
+ DMUB_CMD__PR_COPY_SETTINGS = 1,
+ DMUB_CMD__PR_UPDATE_STATE = 2,
+ DMUB_CMD__PR_GENERAL_CMD = 3,
+};
+
+enum dmub_cmd_panel_replay_state_update_subtype {
+ PR_STATE_UPDATE_COASTING_VTOTAL = 0x1,
+ PR_STATE_UPDATE_SYNC_MODE = 0x2,
+};
+
+enum dmub_cmd_panel_replay_general_subtype {
+ PR_GENERAL_CMD_DEBUG_OPTION = 0x1,
+};
+
/**
* Replay general command sub-types.
*/
@@ -4006,6 +4370,7 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_DISABLED_DESYNC_ERROR_DETECTION,
REPLAY_GENERAL_CMD_UPDATE_ERROR_STATUS,
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
+ REPLAY_GENERAL_CMD_VIDEO_CONFERENCING,
};
struct dmub_alpm_auxless_data {
@@ -4015,6 +4380,10 @@ struct dmub_alpm_auxless_data {
uint16_t lfps_t1_t2_override_us;
short lfps_t1_t2_offset_us;
uint8_t lttpr_count;
+ /*
+ * Padding to align structure to 4 byte boundary.
+ */
+ uint8_t pad[1];
};
/**
@@ -4091,11 +4460,75 @@ struct dmub_cmd_replay_copy_settings_data {
* Use for AUX-less ALPM LFPS wake operation
*/
struct dmub_alpm_auxless_data auxless_alpm_data;
-
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * Determines if fast resync in ultra sleep mode is enabled/disabled.
+ */
+ uint8_t replay_support_fast_resync_in_ultra_sleep_mode;
/**
* @pad: Align structure to 4 byte boundary.
*/
- uint8_t pad[2];
+ uint8_t pad[1];
+};
+
+
+/**
+ * Replay versions.
+ */
+enum replay_version {
+ /**
+ * FreeSync Replay
+ */
+ REPLAY_VERSION_FREESYNC_REPLAY = 0,
+ /**
+ * Panel Replay
+ */
+ REPLAY_VERSION_PANEL_REPLAY = 1,
+ /**
+ * Replay not supported.
+ */
+ REPLAY_VERSION_UNSUPPORTED = 0xFF,
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD___SET_REPLAY_VERSION command.
+ */
+struct dmub_cmd_replay_set_version_data {
+ /**
+ * Panel Instance.
+ * Panel instance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Replay version that FW should implement.
+ */
+ enum replay_version version;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[3];
+};
+
+/**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+struct dmub_rb_cmd_replay_set_version {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_cmd_replay_set_version_data replay_set_version_data;
};
/**
@@ -4127,6 +4560,45 @@ enum replay_enable {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_ENABLE command.
+ */
+struct dmub_rb_cmd_smart_power_oled_enable_data {
+ /**
+ * SMART_POWER_OLED enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+
+ uint16_t peak_nits;
+ /**
+ * OTG HW instance.
+ */
+ uint8_t otg_inst;
+ /**
+ * DIG FE HW instance.
+ */
+ uint8_t digfe_inst;
+ /**
+ * DIG BE HW instance.
+ */
+ uint8_t digbe_inst;
+ uint8_t debugcontrol;
+ /*
+ * vertical interrupt trigger line
+ */
+ uint32_t triggerline;
+
+ uint16_t fixed_max_cll;
+
+ uint8_t pad[2];
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__REPLAY_ENABLE command.
*/
struct dmub_rb_cmd_replay_enable_data {
@@ -4297,9 +4769,9 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
*/
uint16_t coasting_vtotal_high;
/**
- * Explicit padding to 4 byte boundary.
+ * frame skip number.
*/
- uint8_t pad[2];
+ uint16_t frame_skip_number;
};
/**
@@ -4450,12 +4922,68 @@ union dmub_replay_cmd_set {
*/
struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data;
/**
+ * Definition of DMUB_CMD__REPLAY_SET_VERSION command data.
+ */
+ struct dmub_cmd_replay_set_version_data version_data;
+ /**
* Definition of DMUB_CMD__REPLAY_SET_GENERAL_CMD command data.
*/
struct dmub_cmd_replay_set_general_cmd_data set_general_cmd_data;
};
/**
+ * SMART POWER OLED command sub-types.
+ */
+enum dmub_cmd_smart_power_oled_type {
+
+ /**
+ * Enable/Disable SMART_POWER_OLED.
+ */
+ DMUB_CMD__SMART_POWER_OLED_ENABLE = 1,
+ /**
+ * Get current MaxCLL value if SMART POWER OLED is enabled.
+ */
+ DMUB_CMD__SMART_POWER_OLED_GETMAXCLL = 2,
+};
+
+/**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED command.
+ */
+struct dmub_rb_cmd_smart_power_oled_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ struct dmub_rb_cmd_smart_power_oled_enable_data data;
+};
+
+struct dmub_cmd_smart_power_oled_getmaxcll_input {
+ uint8_t panel_inst;
+ uint8_t pad[3];
+};
+
+struct dmub_cmd_smart_power_oled_getmaxcll_output {
+ uint16_t current_max_cll;
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED command.
+ */
+struct dmub_rb_cmd_smart_power_oled_getmaxcll {
+ struct dmub_cmd_header header; /**< Command header */
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command.
+ */
+ union dmub_cmd_smart_power_oled_getmaxcll_data {
+ struct dmub_cmd_smart_power_oled_getmaxcll_input input; /**< Input */
+ struct dmub_cmd_smart_power_oled_getmaxcll_output output; /**< Output */
+ uint32_t output_raw; /**< Raw data output */
+ } data;
+};
+
+/**
* Set of HW components that can be locked.
*
* Note: If updating with more HW components, fields
@@ -4537,6 +5065,7 @@ enum hw_lock_client {
*/
HW_LOCK_CLIENT_REPLAY = 4,
HW_LOCK_CLIENT_FAMS2 = 5,
+ HW_LOCK_CLIENT_CURSOR_OFFLOAD = 6,
/**
* Invalid client.
*/
@@ -4665,21 +5194,25 @@ enum dmub_cmd_lsdma_type {
*/
DMUB_CMD__LSDMA_LINEAR_COPY = 1,
/**
+ * LSDMA copies data from source to destination linearly in sub window
+ */
+ DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY = 2,
+ /**
* Send the tiled-to-tiled copy command
*/
- DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2,
+ DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 3,
/**
* Send the poll reg write command
*/
- DMUB_CMD__LSDMA_POLL_REG_WRITE = 3,
+ DMUB_CMD__LSDMA_POLL_REG_WRITE = 4,
/**
* Send the pio copy command
*/
- DMUB_CMD__LSDMA_PIO_COPY = 4,
+ DMUB_CMD__LSDMA_PIO_COPY = 5,
/**
* Send the pio constfill command
*/
- DMUB_CMD__LSDMA_PIO_CONSTFILL = 5,
+ DMUB_CMD__LSDMA_PIO_CONSTFILL = 6,
};
struct abm_ace_curve {
@@ -5894,6 +6427,9 @@ enum ips_residency_mode {
IPS_RESIDENCY__IPS2,
IPS_RESIDENCY__IPS1_RCG,
IPS_RESIDENCY__IPS1_ONO2_ON,
+ IPS_RESIDENCY__IPS1_Z8_RETENTION,
+ IPS_RESIDENCY__PG_ONO_LAST_SEEN_IN_IPS,
+ IPS_RESIDENCY__PG_ONO_CURRENT_STATE
};
#define NUM_IPS_HISTOGRAM_BUCKETS 16
@@ -5907,6 +6443,8 @@ struct dmub_ips_residency_info {
uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS];
uint64_t total_time_us;
uint64_t total_inactive_time_us;
+ uint32_t ono_pg_state_at_collection;
+ uint32_t ono_pg_state_last_seen_in_ips;
};
/**
@@ -5940,6 +6478,257 @@ struct dmub_rb_cmd_ips_query_residency_info {
};
/**
+ * struct dmub_cmd_cursor_offload_init_data - Payload for cursor offload init command.
+ */
+struct dmub_cmd_cursor_offload_init_data {
+ union dmub_addr state_addr; /**< State address for dmub_cursor_offload */
+ uint32_t state_size; /**< State size for dmub_cursor_offload */
+};
+
+/**
+ * struct dmub_rb_cmd_cursor_offload_init - Data for initializing cursor offload.
+ */
+struct dmub_rb_cmd_cursor_offload_init {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_cursor_offload_init_data init_data;
+};
+
+/**
+ * struct dmub_cmd_cursor_offload_stream_data - Payload for cursor offload stream command.
+ */
+struct dmub_cmd_cursor_offload_stream_data {
+ uint32_t otg_inst: 4; /**< OTG instance to control */
+ uint32_t reserved: 28; /**< Reserved for future use */
+ uint32_t line_time_in_ns; /**< Line time in ns for the OTG */
+ uint32_t v_total_max; /**< OTG v_total_max */
+};
+
+/**
+ * struct dmub_rb_cmd_cursor_offload_stream_cntl - Controls a stream for cursor offload.
+ */
+struct dmub_rb_cmd_cursor_offload_stream_cntl {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_cursor_offload_stream_data data;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PR_ENABLE command.
+ */
+struct dmub_cmd_pr_enable_data {
+ /**
+ * Panel Replay enable or disable.
+ */
+ uint8_t enable;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which replay_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Phy state to enter.
+ * Values to use are defined in dmub_phy_fsm_state
+ */
+ uint8_t phy_fsm_state;
+ /**
+ * Phy rate for DP - RBR/HBR/HBR2/HBR3.
+ * Set this using enum phy_link_rate.
+ * This does not support HDMI/DP2 for now.
+ */
+ uint8_t phy_rate;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_ENABLE command.
+ * Panel Replay enable/disable is controlled using action in data.
+ */
+struct dmub_rb_cmd_pr_enable {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ struct dmub_cmd_pr_enable_data data;
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+struct dmub_cmd_pr_copy_settings_data {
+ /**
+ * Flags that can be set by driver to change some replay behaviour.
+ */
+ union pr_debug_flags debug;
+
+ /**
+ * @flags: Flags used to determine feature functionality.
+ */
+ union pr_hw_flags flags;
+
+ /**
+ * DPP HW instance.
+ */
+ uint8_t dpp_inst;
+ /**
+ * OTG HW instance.
+ */
+ uint8_t otg_inst;
+ /**
+ * DIG FE HW instance.
+ */
+ uint8_t digfe_inst;
+ /**
+ * DIG BE HW instance.
+ */
+ uint8_t digbe_inst;
+ /**
+ * AUX HW instance.
+ */
+ uint8_t aux_inst;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Length of each horizontal line in ns.
+ */
+ uint32_t line_time_in_ns;
+ /**
+ * PHY instance.
+ */
+ uint8_t dpphy_inst;
+ /**
+ * Determines if SMU optimzations are enabled/disabled.
+ */
+ uint8_t smu_optimizations_en;
+ /*
+ * Use FSM state for Replay power up/down
+ */
+ uint8_t use_phy_fsm;
+ /*
+ * Use FSFT afftet pixel clk
+ */
+ uint32_t pix_clk_100hz;
+ /*
+ * Use Original pixel clock
+ */
+ uint32_t sink_pix_clk_100hz;
+ /**
+ * Use for AUX-less ALPM LFPS wake operation
+ */
+ struct dmub_alpm_auxless_data auxless_alpm_data;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+struct dmub_rb_cmd_pr_copy_settings {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_COPY_SETTINGS command.
+ */
+ struct dmub_cmd_pr_copy_settings_data data;
+};
+
+struct dmub_cmd_pr_update_state_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+
+ uint8_t pad[3]; // align to 4-byte boundary
+ /*
+ * Update flags to control the update behavior.
+ */
+ uint32_t update_flag;
+ /**
+ * state/data to set.
+ */
+ uint32_t coasting_vtotal;
+ uint32_t sync_mode;
+};
+
+struct dmub_cmd_pr_general_cmd_data {
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * subtype: PR general cmd sub type
+ */
+ uint8_t subtype;
+
+ uint8_t pad[2];
+ /**
+ * config data by different subtypes
+ */
+ union {
+ uint32_t u32All;
+ } data;
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_UPDATE_STATE command.
+ */
+struct dmub_rb_cmd_pr_update_state {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_UPDATE_STATE command.
+ */
+ struct dmub_cmd_pr_update_state_data data;
+};
+
+/**
+ * Definition of a DMUB_CMD__PR_GENERAL_CMD command.
+ */
+struct dmub_rb_cmd_pr_general_cmd {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__PR_GENERAL_CMD command.
+ */
+ struct dmub_cmd_pr_general_cmd_data data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -6203,6 +6992,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
*/
struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_SET_VERSION command.
+ */
+ struct dmub_rb_cmd_replay_set_version replay_set_version;
/*
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
@@ -6264,6 +7057,38 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl;
struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info;
+ /**
+ * Definition of a DMUB_CMD__CURSOR_OFFLOAD_INIT command.
+ */
+ struct dmub_rb_cmd_cursor_offload_init cursor_offload_init;
+ /**
+ * Definition of a DMUB_CMD__CURSOR_OFFLOAD control commands.
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_ENABLE
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_DISABLE
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_PROGRAM
+ * - DMUB_CMD__CURSOR_OFFLOAD_STREAM_UPDATE_DRR
+ */
+ struct dmub_rb_cmd_cursor_offload_stream_cntl cursor_offload_stream_ctnl;
+ /**
+ * Definition of a DMUB_CMD__SMART_POWER_OLED_ENABLE command.
+ */
+ struct dmub_rb_cmd_smart_power_oled_enable smart_power_oled_enable;
+ /**
+ * Definition of a DMUB_CMD__DMUB_CMD__SMART_POWER_OLED_GETMAXCLL command.
+ */
+ struct dmub_rb_cmd_smart_power_oled_getmaxcll smart_power_oled_getmaxcll;
+ /*
+ * Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
+ */
+ struct dmub_rb_cmd_pr_copy_settings pr_copy_settings;
+ /**
+ * Definition of a DMUB_CMD__REPLAY_ENABLE command.
+ */
+ struct dmub_rb_cmd_pr_enable pr_enable;
+
+ struct dmub_rb_cmd_pr_update_state pr_update_state;
+
+ struct dmub_rb_cmd_pr_general_cmd pr_general_cmd;
};
/**
@@ -6411,15 +7236,18 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);
- const uint64_t *src = (const uint64_t *)cmd;
+ uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
+ const uint8_t *src = (const uint8_t *)cmd;
uint8_t i;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
// copying data
- for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ for (i = 0; i < DMUB_RB_CMD_SIZE; i++)
*dst++ = *src++;
rb->wrpt += DMUB_RB_CMD_SIZE;
@@ -6444,6 +7272,9 @@ static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
const uint8_t *src = (const uint8_t *)cmd;
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_full(rb))
return false;
@@ -6489,6 +7320,9 @@ static inline void dmub_rb_get_rptr_with_offset(struct dmub_rb *rb,
uint32_t num_cmds,
uint32_t *next_rptr)
{
+ if (rb->capacity == 0)
+ return;
+
*next_rptr = rb->rptr + DMUB_RB_CMD_SIZE * num_cmds;
if (*next_rptr >= rb->capacity)
@@ -6552,6 +7386,9 @@ static inline bool dmub_rb_out_front(struct dmub_rb *rb,
*/
static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
{
+ if (rb->capacity == 0)
+ return false;
+
if (dmub_rb_empty(rb))
return false;
@@ -6576,6 +7413,9 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
uint32_t rptr = rb->rptr;
uint32_t wptr = rb->wrpt;
+ if (rb->capacity == 0)
+ return;
+
while (rptr != wptr) {
uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
uint8_t i;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index 3f38db752b84..cd04d7c756c3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -377,8 +377,10 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.override_hbr3_pll_vco = params->override_hbr3_pll_vco;
boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
+ boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
index e7056205b050..7e9856289910 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
@@ -89,44 +89,58 @@ static inline void dmub_dcn32_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn32_reset(struct dmub_srv *dmub)
{
union dmub_gpint_data_register cmd;
- const uint32_t timeout = 30;
- uint32_t in_reset, scratch, i;
+ const uint32_t timeout_us = 1 * 1000 * 1000; //1s
+ const uint32_t poll_delay_us = 1; //1us
+ uint32_t i = 0;
+ uint32_t enabled, in_reset, scratch, pwait_mode;
- REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset);
+ REG_GET(DMCUB_CNTL,
+ DMCUB_ENABLE, &enabled);
+ REG_GET(DMCUB_CNTL2,
+ DMCUB_SOFT_RESET, &in_reset);
- if (in_reset == 0) {
+ if (enabled && in_reset == 0) {
cmd.bits.status = 1;
cmd.bits.command_code = DMUB_GPINT__STOP_FW;
cmd.bits.param = 0;
dmub->hw_funcs.set_gpint(dmub, cmd);
- /**
- * Timeout covers both the ACK and the wait
- * for remaining work to finish.
- *
- * This is mostly bound by the PHY disable sequence.
- * Each register check will be greater than 1us, so
- * don't bother using udelay.
- */
-
- for (i = 0; i < timeout; ++i) {
- if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ for (; i < timeout_us; i++) {
+ scratch = REG_READ(DMCUB_SCRATCH7);
+ if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
+
+ udelay(poll_delay_us);
}
- for (i = 0; i < timeout; ++i) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
- if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ for (; i < timeout_us; i++) {
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode);
+ if (pwait_mode & (1 << 0))
break;
+
+ udelay(poll_delay_us);
}
+ }
- /* Force reset in case we timed out, DMCUB is likely hung. */
+ if (enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ udelay(1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
}
- REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
- REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
+ if (i >= timeout_us) {
+ /* timeout should never occur */
+ BREAK_TO_DEBUGGER();
+ }
+
+ REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0);
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -157,7 +171,9 @@ void dmub_dcn32_backdoor_load(struct dmub_srv *dmub,
dmub_dcn32_get_fb_base_offset(dmub, &fb_base, &fb_offset);
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn32_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -187,7 +203,9 @@ void dmub_dcn32_backdoor_load_zfb_mode(struct dmub_srv *dmub,
{
union dmub_addr offset;
+ /* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
@@ -419,8 +437,8 @@ uint32_t dmub_dcn32_get_current_time(struct dmub_srv *dmub)
void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
{
- uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset;
- uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled;
+ uint32_t is_dmub_enabled, is_soft_reset, is_pwait;
+ uint32_t is_traceport_enabled, is_cw6_enabled;
struct dmub_timeout_info timeout = {0};
if (!dmub)
@@ -470,18 +488,15 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub)
REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled);
dmub->debug.is_dmcub_enabled = is_dmub_enabled;
+ REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait);
+ dmub->debug.is_pwait = is_pwait;
+
REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset);
dmub->debug.is_dmcub_soft_reset = is_soft_reset;
- REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset);
- dmub->debug.is_dmcub_secure_reset = is_sec_reset;
-
REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled);
dmub->debug.is_traceport_en = is_traceport_enabled;
- REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled);
- dmub->debug.is_cw0_enabled = is_cw0_enabled;
-
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
dmub->debug.is_cw6_enabled = is_cw6_enabled;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
index 1a229450c53d..daf81027d663 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
@@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
+ DMUB_SR(DMCUB_REGION6_OFFSET) \
+ DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
+ DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
@@ -155,6 +158,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
+ DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
@@ -162,7 +167,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \
DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \
DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \
- DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK)
+ DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \
+ DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS)
struct dmub_srv_dcn32_reg_offset {
#define DMUB_SR(reg) uint32_t reg;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
index 2228d62adc7e..e13557ed97be 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c
@@ -400,13 +400,14 @@ union dmub_fw_boot_options dmub_dcn35_get_fw_boot_option(struct dmub_srv *dmub)
void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmub_srv_hw_params *params)
{
union dmub_fw_boot_options boot_options = {0};
- union dmub_fw_boot_options cur_boot_options = {0};
- cur_boot_options = dmub_dcn35_get_fw_boot_option(dmub);
+ if (!dmub->dpia_supported) {
+ dmub->dpia_supported = dmub_dcn35_get_fw_boot_option(dmub).bits.enable_dpia;
+ }
boot_options.bits.z10_disable = params->disable_z10;
boot_options.bits.dpia_supported = params->dpia_supported;
- boot_options.bits.enable_dpia = cur_boot_options.bits.enable_dpia && !params->disable_dpia;
+ boot_options.bits.enable_dpia = dmub->dpia_supported && !params->disable_dpia;
boot_options.bits.usb4_cm_version = params->usb4_cm_version;
boot_options.bits.dpia_hpd_int_enable_supported = params->dpia_hpd_int_enable_supported;
boot_options.bits.power_optimization = params->power_optimization;
@@ -417,6 +418,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
boot_options.bits.disable_sldo_opt = params->disable_sldo_opt;
boot_options.bits.enable_non_transparent_setconfig = params->enable_non_transparent_setconfig;
boot_options.bits.lower_hbr3_phy_ssc = params->lower_hbr3_phy_ssc;
+ boot_options.bits.disable_dpia_bw_allocation = params->disable_dpia_bw_allocation;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -519,6 +521,45 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub)
dmub->debug.gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0);
}
+
+bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub)
+{
+ uint64_t region3_cw5_offset;
+ uint32_t top_addr, top_addr_enable, offset_low;
+ uint32_t offset_high, base_addr, fw_version;
+ bool is_vbios_fw = false;
+
+ memset(&dmub->preos_info, 0, sizeof(dmub->preos_info));
+
+ fw_version = REG_READ(DMCUB_SCRATCH1);
+ is_vbios_fw = ((fw_version >> 6) & 0x01) ? true : false;
+ if (!is_vbios_fw)
+ return false;
+
+ dmub->preos_info.boot_status = REG_READ(DMCUB_SCRATCH0);
+ dmub->preos_info.fw_version = REG_READ(DMCUB_SCRATCH1);
+ dmub->preos_info.boot_options = REG_READ(DMCUB_SCRATCH14);
+ REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS,
+ DMCUB_REGION3_CW5_ENABLE, &top_addr_enable);
+ if (top_addr_enable) {
+ dmub_dcn35_get_fb_base_offset(dmub,
+ &dmub->preos_info.fb_base, &dmub->preos_info.fb_offset);
+ offset_low = REG_READ(DMCUB_REGION3_CW5_OFFSET);
+ offset_high = REG_READ(DMCUB_REGION3_CW5_OFFSET_HIGH);
+ region3_cw5_offset = ((uint64_t)offset_high << 32) | offset_low;
+ dmub->preos_info.trace_buffer_phy_addr = region3_cw5_offset
+ - dmub->preos_info.fb_base + dmub->preos_info.fb_offset;
+
+ REG_GET(DMCUB_REGION3_CW5_TOP_ADDRESS,
+ DMCUB_REGION3_CW5_TOP_ADDRESS, &top_addr);
+ base_addr = REG_READ(DMCUB_REGION3_CW5_BASE_ADDRESS) & 0x1FFFFFFF;
+ dmub->preos_info.trace_buffer_size =
+ (top_addr > base_addr) ? (top_addr - base_addr + 1) : 0;
+ }
+
+ return true;
+}
+
void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub)
{
/* DMCUB_REGION3_TMR_AXI_SPACE values:
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
index 39fcb7275da5..92e6695a2c9b 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h
@@ -285,4 +285,6 @@ bool dmub_dcn35_is_hw_powered_up(struct dmub_srv *dmub);
void dmub_srv_dcn35_regs_init(struct dmub_srv *dmub, struct dc_context *ctx);
+bool dmub_dcn35_get_preos_fw_info(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN35_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
index b31adbd0d685..95542299e3b3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c
@@ -81,7 +81,7 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
dmub->hw_funcs.set_gpint(dmub, cmd);
for (; i < timeout_us; i++) {
- scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ scratch = REG_READ(DMCUB_SCRATCH7);
if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
break;
@@ -97,11 +97,24 @@ void dmub_dcn401_reset(struct dmub_srv *dmub)
}
}
+ if (enabled) {
+ REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1);
+ udelay(1);
+ REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
+ }
+
if (i >= timeout_us) {
/* timeout should never occur */
BREAK_TO_DEBUGGER();
}
+ REG_UPDATE(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, 0);
+ REG_UPDATE(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE, 0);
+
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
REG_WRITE(DMCUB_OUTBOX1_RPTR, 0);
@@ -134,7 +147,6 @@ void dmub_dcn401_backdoor_load(struct dmub_srv *dmub,
/* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
dmub_dcn401_translate_addr(&cw0->offset, fb_base, fb_offset, &offset);
@@ -168,7 +180,6 @@ void dmub_dcn401_backdoor_load_zfb_mode(struct dmub_srv *dmub,
/* reset and disable DMCUB and MMHUBBUB DMUIF */
REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1);
- REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
offset = cw0->offset;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index b17a19400c06..a6ae1d2e9685 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -66,7 +66,7 @@
#define DMUB_SCRATCH_MEM_SIZE (1024)
/* Default indirect buffer size. */
-#define DMUB_IB_MEM_SIZE (1280)
+#define DMUB_IB_MEM_SIZE (2560)
/* Default LSDMA ring buffer size. */
#define DMUB_LSDMA_RB_SIZE (64 * 1024)
@@ -359,6 +359,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->get_current_time = dmub_dcn35_get_current_time;
funcs->get_diagnostic_data = dmub_dcn35_get_diagnostic_data;
+ funcs->get_preos_fw_info = dmub_dcn35_get_preos_fw_info;
funcs->init_reg_offsets = dmub_srv_dcn35_regs_init;
if (asic == DMUB_ASIC_DCN351)
@@ -564,10 +565,11 @@ enum dmub_status
window_sizes[DMUB_WINDOW_4_MAILBOX] = DMUB_MAILBOX_SIZE;
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
- window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
+ window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = dmub_align(DMUB_SCRATCH_MEM_SIZE, 64);
window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size);
window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE;
+ window_sizes[DMUB_WINDOW_CURSOR_OFFLOAD] = dmub_align(sizeof(struct dmub_cursor_offload_v1), 64);
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
@@ -652,21 +654,22 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
- struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
- struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
+ uint32_t i;
+
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
- if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb ||
- !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) {
- ASSERT(0);
- return DMUB_STATUS_INVALID;
+ for (i = 0; i < DMUB_WINDOW_TOTAL; ++i) {
+ if (!params->fb[i]) {
+ ASSERT(0);
+ return DMUB_STATUS_INVALID;
+ }
}
dmub->fb_base = params->fb_base;
@@ -748,9 +751,11 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->shared_state = shared_state_fb->cpu_addr;
- dmub->scratch_mem_fb = *scratch_mem_fb;
+ dmub->scratch_mem_fb = *params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
+ dmub->ib_mem_gart = *params->fb[DMUB_WINDOW_IB_MEM];
- dmub->ib_mem_gart = *ib_mem_gart;
+ dmub->cursor_offload_fb = *params->fb[DMUB_WINDOW_CURSOR_OFFLOAD];
+ dmub->cursor_offload_v1 = (struct dmub_cursor_offload_v1 *)dmub->cursor_offload_fb.cpu_addr;
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
@@ -1368,3 +1373,11 @@ enum dmub_status dmub_srv_update_inbox_status(struct dmub_srv *dmub)
return DMUB_STATUS_OK;
}
+
+bool dmub_srv_get_preos_info(struct dmub_srv *dmub)
+{
+ if (!dmub || !dmub->hw_funcs.get_preos_fw_info)
+ return false;
+
+ return dmub->hw_funcs.get_preos_fw_info(dmub);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 567c5b1aeb7a..e7a58b140388 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -71,7 +71,7 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
switch (cmd.cmd_common.header.type) {
case DMUB_OUT_CMD__DP_AUX_REPLY:
notify->type = DMUB_NOTIFICATION_AUX_REPLY;
- notify->link_index = cmd.dp_aux_reply.control.instance;
+ notify->instance = cmd.dp_aux_reply.control.instance;
notify->result = cmd.dp_aux_reply.control.result;
dmub_memcpy((void *)&notify->aux_reply,
(void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data));
@@ -84,17 +84,17 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->type = DMUB_NOTIFICATION_HPD_IRQ;
}
- notify->link_index = cmd.dp_hpd_notify.hpd_data.instance;
+ notify->instance = cmd.dp_hpd_notify.hpd_data.instance;
notify->result = AUX_RET_SUCCESS;
break;
case DMUB_OUT_CMD__SET_CONFIG_REPLY:
notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY;
- notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
+ notify->instance = cmd.set_config_reply.set_config_reply_control.instance;
notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
break;
case DMUB_OUT_CMD__DPIA_NOTIFICATION:
notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
- notify->link_index = cmd.dpia_notification.payload.header.instance;
+ notify->instance = cmd.dpia_notification.payload.header.instance;
break;
case DMUB_OUT_CMD__HPD_SENSE_NOTIFY:
notify->type = DMUB_NOTIFICATION_HPD_SENSE_NOTIFY;
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 812377d9e48f..973b6bdbac63 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -135,12 +135,8 @@ struct bp_external_encoder_control {
struct bp_crtc_source_select {
enum engine_id engine_id;
enum controller_id controller_id;
- /* from GPU Tx aka asic_signal */
- enum signal_type signal;
- /* sink_signal may differ from asicSignal if Translator encoder */
enum signal_type sink_signal;
- enum display_output_bit_depth display_output_bit_depth;
- bool enable_dp_audio;
+ uint8_t bit_depth;
};
struct bp_transmitter_control {
@@ -166,6 +162,11 @@ struct bp_transmitter_control {
bool single_pll_mode;
};
+struct bp_load_detection_parameters {
+ enum engine_id engine_id;
+ uint16_t device_id;
+};
+
struct bp_hw_crtc_timing_parameters {
enum controller_id controller_id;
/* horizontal part */
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 5fc29164e4b4..8aea50aa9533 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -213,6 +213,11 @@ enum {
#endif
#define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH
#define DEVICE_ID_NV_143F 0x143F
+#define DEVICE_ID_NV_13F9 0x13F9
+#define DEVICE_ID_NV_13FA 0x13FA
+#define DEVICE_ID_NV_13FB 0x13FB
+#define DEVICE_ID_NV_13FC 0x13FC
+#define DEVICE_ID_NV_13DB 0x13DB
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
#define DEVICE_ID_VGH_1435 0x1435
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index de8f3cfed6c8..07b937b92efc 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -30,6 +30,22 @@
#ifndef DP_SINK_HW_REVISION_START // can remove this once the define gets into linux drm_dp_helper.h
#define DP_SINK_HW_REVISION_START 0x409
#endif
+/* Panel Replay*/
+#ifndef DP_PANEL_REPLAY_CAPABILITY_SUPPORT // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_CAPABILITY_SUPPORT 0x0b0
+#endif /* DP_PANEL_REPLAY_CAPABILITY_SUPPORT */
+#ifndef DP_PANEL_REPLAY_CAPABILITY // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_CAPABILITY 0x0b1
+#endif /* DP_PANEL_REPLAY_CAPABILITY */
+#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 0x1b0
+#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_1 */
+#ifndef DP_PANEL_REPLAY_ENABLE // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE (1 << 0)
+#endif /* DP_PANEL_REPLAY_ENABLE */
+#ifndef DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 0x1b1
+#endif /* DP_PANEL_REPLAY_ENABLE_AND_CONFIGURATION_2 */
enum dpcd_revision {
DPCD_REV_10 = 0x10,
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index cc467031651d..38a77fa9b4af 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -169,6 +169,7 @@ struct dc_firmware_info {
uint32_t engine_clk_ss_percentage;
} feature;
+ uint32_t max_pixel_clock; /* in KHz */
uint32_t default_display_engine_pll_frequency; /* in KHz */
uint32_t external_clock_source_frequency_for_dp; /* in KHz */
uint32_t smu_gpu_pll_output_freq; /* in KHz */
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 54e33062b3c0..1386fa124e85 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -310,4 +310,11 @@ static inline bool dal_graphics_object_id_equal(
}
return false;
}
+
+static inline bool dc_connector_supports_analog(const enum connector_id conn)
+{
+ return conn == CONNECTOR_ID_VGA ||
+ conn == CONNECTOR_ID_SINGLE_LINK_DVII ||
+ conn == CONNECTOR_ID_DUAL_LINK_DVII;
+}
#endif
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index a10d6b988aab..3a2c2d2fb629 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -118,6 +118,18 @@ static inline bool dc_is_dvi_signal(enum signal_type signal)
}
}
+/**
+ * dc_is_rgb_signal() - Whether the signal is analog RGB.
+ *
+ * Returns whether the given signal type is an analog RGB signal
+ * that is used with a DAC on VGA or DVI-I connectors.
+ * Not to be confused with other uses of "RGB", such as RGB color space.
+ */
+static inline bool dc_is_rgb_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_RGB);
+}
+
static inline bool dc_is_tmds_signal(enum signal_type signal)
{
switch (signal) {
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 71efd2770c99..1aae46d703ba 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -226,8 +226,8 @@ static void update_v_total_for_static_ramp(
unsigned int target_duration_in_us =
calc_duration_in_us_from_refresh_in_uhz(
in_out_vrr->fixed.target_refresh_in_uhz);
- bool ramp_direction_is_up = (current_duration_in_us >
- target_duration_in_us) ? true : false;
+ bool ramp_direction_is_up = current_duration_in_us >
+ target_duration_in_us;
/* Calculate ratio between new and current frame duration with 3 digit */
unsigned int frame_duration_ratio = div64_u64(1000000,
@@ -1260,6 +1260,17 @@ void mod_freesync_handle_v_update(struct mod_freesync *mod_freesync,
update_v_total_for_static_ramp(
core_freesync, stream, in_out_vrr);
}
+
+ /*
+ * If VRR is inactive, set vtotal min and max to nominal vtotal
+ */
+ if (in_out_vrr->state == VRR_STATE_INACTIVE) {
+ in_out_vrr->adjust.v_total_min =
+ mod_freesync_calc_v_total_from_refresh(stream,
+ in_out_vrr->max_refresh_in_uhz);
+ in_out_vrr->adjust.v_total_max = in_out_vrr->adjust.v_total_min;
+ return;
+ }
}
unsigned long long mod_freesync_calc_nominal_field_rate(
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 5e01c6e24cbc..ca402ddcdacc 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -29,6 +29,7 @@ static void push_error_status(struct mod_hdcp *hdcp,
enum mod_hdcp_status status)
{
struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+ const uint8_t retry_limit = hdcp->connection.link.adjust.retry_limit;
if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) {
trace->errors[trace->error_count].status = status;
@@ -39,11 +40,11 @@ static void push_error_status(struct mod_hdcp *hdcp,
if (is_hdcp1(hdcp)) {
hdcp->connection.hdcp1_retry_count++;
- if (hdcp->connection.hdcp1_retry_count == MAX_NUM_OF_ATTEMPTS)
+ if (hdcp->connection.hdcp1_retry_count == retry_limit)
hdcp->connection.link.adjust.hdcp1.disable = 1;
} else if (is_hdcp2(hdcp)) {
hdcp->connection.hdcp2_retry_count++;
- if (hdcp->connection.hdcp2_retry_count == MAX_NUM_OF_ATTEMPTS)
+ if (hdcp->connection.hdcp2_retry_count == retry_limit)
hdcp->connection.link.adjust.hdcp2.disable = 1;
}
}
@@ -353,7 +354,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* reset retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* add display to connection */
@@ -399,7 +400,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
/* clear retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* remove display */
@@ -463,7 +464,7 @@ enum mod_hdcp_status mod_hdcp_update_display(struct mod_hdcp *hdcp,
/* clear retry counters */
reset_retry_counts(hdcp);
- /* reset error trace */
+ /* reset trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
/* set new adjustment */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index a37634942b07..26a351a184f3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -88,6 +88,7 @@ struct mod_hdcp_transition_input_hdcp2 {
uint8_t lc_init_write;
uint8_t l_prime_available_poll;
uint8_t l_prime_read;
+ uint8_t l_prime_combo_read;
uint8_t l_prime_validation;
uint8_t eks_prepare;
uint8_t eks_write;
@@ -508,7 +509,7 @@ static inline void set_auth_complete(struct mod_hdcp *hdcp,
struct mod_hdcp_output *output)
{
output->auth_complete = 1;
- mod_hdcp_log_ddc_trace(hdcp);
+ HDCP_AUTH_COMPLETE_TRACE(hdcp);
}
/* connection topology helpers */
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 8bc377560787..1bbd728d4345 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -29,6 +29,7 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
{
uint64_t n = 0;
uint8_t count = 0;
+ enum mod_hdcp_status status;
u8 bksv[sizeof(n)] = { };
memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
@@ -38,8 +39,14 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
count++;
n &= (n - 1);
}
- return (count == 20) ? MOD_HDCP_STATUS_SUCCESS :
- MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+
+ if (count == 20) {
+ hdcp->connection.trace.hdcp1.attempt_count++;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP1_INVALID_BKSV;
+ }
+ return status;
}
static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp)
@@ -135,6 +142,8 @@ static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
+ hdcp->connection.trace.hdcp1.downstream_device_count = get_device_count(hdcp);
+
/* Some MST display may choose to report the internal panel as an HDCP RX.
* To update this condition with 1(because the immediate repeater's internal
* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp).
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index bb8ae80b37f8..27500abf9fee 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -48,6 +48,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
{
enum mod_hdcp_status status;
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
if (is_dp_hdcp(hdcp))
status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) &&
@@ -55,9 +56,14 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
else
- status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi & HDCP_2_2_HDMI_SUPPORT_MASK) ?
- MOD_HDCP_STATUS_SUCCESS :
- MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+ status = (hdcp->auth.msg.hdcp2.hdcp2version_hdmi
+ & HDCP_2_2_HDMI_SUPPORT_MASK)
+ ? MOD_HDCP_STATUS_SUCCESS
+ : MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
+
+ if (status == MOD_HDCP_STATUS_SUCCESS)
+ trace->hdcp2.attempt_count++;
+
return status;
}
@@ -201,10 +207,17 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
+ struct mod_hdcp_trace *trace = &hdcp->connection.trace;
+
/* Avoid device count == 0 to do authentication */
if (get_device_count(hdcp) == 0)
return MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE;
+ trace->hdcp2.downstream_device_count = get_device_count(hdcp);
+ trace->hdcp2.hdcp1_device_downstream =
+ HDCP_2_2_HDCP1_DEVICE_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]);
+ trace->hdcp2.hdcp2_legacy_device_downstream =
+ HDCP_2_2_HDCP_2_0_REP_CONNECTED(hdcp->auth.msg.hdcp2.rx_id_list[2]);
/* Some MST display may choose to report the internal panel as an HDCP RX. */
/* To update this condition with 1(because the immediate repeater's internal */
/* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */
@@ -452,54 +465,11 @@ out:
return status;
}
-static enum mod_hdcp_status locality_check_sw(struct mod_hdcp *hdcp,
- struct mod_hdcp_event_context *event_ctx,
- struct mod_hdcp_transition_input_hdcp2 *input)
-{
- enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
-
- if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
- &input->lc_init_write, &status,
- hdcp, "lc_init_write"))
- goto out;
- if (is_dp_hdcp(hdcp))
- msleep(16);
- else
- if (!mod_hdcp_execute_and_set(poll_l_prime_available,
- &input->l_prime_available_poll, &status,
- hdcp, "l_prime_available_poll"))
- goto out;
- if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
- &input->l_prime_read, &status,
- hdcp, "l_prime_read"))
- goto out;
-out:
- return status;
-}
-
-static enum mod_hdcp_status locality_check_fw(struct mod_hdcp *hdcp,
- struct mod_hdcp_event_context *event_ctx,
- struct mod_hdcp_transition_input_hdcp2 *input)
-{
- enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
-
- if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
- &input->l_prime_read, &status,
- hdcp, "l_prime_read"))
- goto out;
-
-out:
- return status;
-}
-
static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
struct mod_hdcp_event_context *event_ctx,
struct mod_hdcp_transition_input_hdcp2 *input)
{
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
- const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
- && hdcp->config.ddc.funcs.atomic_write_poll_read_aux
- && !hdcp->connection.link.adjust.hdcp2.force_sw_locality_check;
if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) {
event_ctx->unexpected_event = 1;
@@ -511,9 +481,28 @@ static enum mod_hdcp_status locality_check(struct mod_hdcp *hdcp,
hdcp, "lc_init_prepare"))
goto out;
- status = (use_fw ? locality_check_fw : locality_check_sw)(hdcp, event_ctx, input);
- if (status != MOD_HDCP_STATUS_SUCCESS)
- goto out;
+ if (hdcp->connection.link.adjust.hdcp2.use_fw_locality_check) {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_poll_read_lc_fw,
+ &input->l_prime_combo_read, &status,
+ hdcp, "l_prime_combo_read"))
+ goto out;
+ } else {
+ if (!mod_hdcp_execute_and_set(mod_hdcp_write_lc_init,
+ &input->lc_init_write, &status,
+ hdcp, "lc_init_write"))
+ goto out;
+ if (is_dp_hdcp(hdcp))
+ msleep(16);
+ else
+ if (!mod_hdcp_execute_and_set(poll_l_prime_available,
+ &input->l_prime_available_poll, &status,
+ hdcp, "l_prime_available_poll"))
+ goto out;
+ if (!mod_hdcp_execute_and_set(mod_hdcp_read_l_prime,
+ &input->l_prime_read, &status,
+ hdcp, "l_prime_read"))
+ goto out;
+ }
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_validate_l_prime,
&input->l_prime_validation, &status,
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index 89ffb89e1932..9316312a4df5 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -184,31 +184,33 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A2_LOCALITY_CHECK);
break;
- case H2_A2_LOCALITY_CHECK: {
- const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_i2c
- && !adjust->hdcp2.force_sw_locality_check;
-
- /*
- * 1A-05: consider disconnection after LC init a failure
- * 1A-13-1: consider invalid l' a failure
- * 1A-13-2: consider l' timeout a failure
- */
+ case H2_A2_LOCALITY_CHECK:
+ /* 1A-05: consider disconnection after LC init a failure */
if (hdcp->state.stay_count > 10 ||
- input->lc_init_prepare != PASS ||
- (!use_fw && input->lc_init_write != PASS) ||
- (!use_fw && input->l_prime_available_poll != PASS)) {
+ input->lc_init_prepare != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
- } else if (input->l_prime_read != PASS) {
- if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
- adjust->hdcp2.force_sw_locality_check = true;
+ } else if (adjust->hdcp2.use_fw_locality_check &&
+ input->l_prime_combo_read != PASS) {
+ /* 1A-13-2: consider l' timeout a failure */
+ if (adjust->hdcp2.use_sw_locality_fallback) {
+ /* switch to software locality check */
+ adjust->hdcp2.use_fw_locality_check = 0;
callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
break;
}
-
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!adjust->hdcp2.use_fw_locality_check &&
+ (input->lc_init_write != PASS ||
+ input->l_prime_available_poll != PASS ||
+ input->l_prime_read != PASS)) {
+ /* 1A-13-2: consider l' timeout a failure */
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
+ /* 1A-13-1: consider invalid l' a failure */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
break;
@@ -216,7 +218,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
- }
case H2_A3_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
@@ -510,26 +511,29 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A2_LOCALITY_CHECK);
break;
- case D2_A2_LOCALITY_CHECK: {
- const bool use_fw = hdcp->config.ddc.funcs.atomic_write_poll_read_aux
- && !adjust->hdcp2.force_sw_locality_check;
-
+ case D2_A2_LOCALITY_CHECK:
if (hdcp->state.stay_count > 10 ||
- input->lc_init_prepare != PASS ||
- (!use_fw && input->lc_init_write != PASS)) {
- /* 1A-12: consider invalid l' a failure */
+ input->lc_init_prepare != PASS) {
fail_and_restart_in_ms(0, &status, output);
break;
- } else if (input->l_prime_read != PASS) {
- if (use_fw && hdcp->config.debug.lc_enable_sw_fallback) {
- adjust->hdcp2.force_sw_locality_check = true;
+ } else if (adjust->hdcp2.use_fw_locality_check &&
+ input->l_prime_combo_read != PASS) {
+ if (adjust->hdcp2.use_sw_locality_fallback) {
+ /* switch to software locality check */
+ adjust->hdcp2.use_fw_locality_check = 0;
callback_in_ms(0, output);
+ increment_stay_counter(hdcp);
break;
}
-
+ fail_and_restart_in_ms(0, &status, output);
+ break;
+ } else if (!adjust->hdcp2.use_fw_locality_check &&
+ (input->lc_init_write != PASS ||
+ input->l_prime_read != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->l_prime_validation != PASS) {
+ /* 1A-12: consider invalid l' a failure */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
break;
@@ -537,7 +541,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
callback_in_ms(0, output);
set_state_id(hdcp, output, D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER);
break;
- }
case D2_A34_EXCHANGE_KS_AND_TEST_FOR_REPEATER:
if (input->eks_prepare != PASS ||
input->eks_write != PASS) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index 2e6408579194..0ca39873f807 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -758,6 +758,6 @@ enum mod_hdcp_status mod_hdcp_write_poll_read_lc_fw(struct mod_hdcp *hdcp)
{
const bool success = (is_dp_hdcp(hdcp) ? write_stall_read_lc_fw_aux : write_poll_read_lc_fw_i2c)(hdcp);
- return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE;
+ return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 6b3b5f610907..5cb979c2cf8c 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -125,129 +125,11 @@ void mod_hdcp_log_ddc_trace(struct mod_hdcp *hdcp)
}
}
+#define CASE_FORMAT(entry) case entry: return #entry;
char *mod_hdcp_status_to_str(int32_t status)
{
switch (status) {
- case MOD_HDCP_STATUS_SUCCESS:
- return "MOD_HDCP_STATUS_SUCCESS";
- case MOD_HDCP_STATUS_FAILURE:
- return "MOD_HDCP_STATUS_FAILURE";
- case MOD_HDCP_STATUS_RESET_NEEDED:
- return "MOD_HDCP_STATUS_RESET_NEEDED";
- case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND:
- return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND";
- case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND:
- return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND";
- case MOD_HDCP_STATUS_INVALID_STATE:
- return "MOD_HDCP_STATUS_INVALID_STATE";
- case MOD_HDCP_STATUS_NOT_IMPLEMENTED:
- return "MOD_HDCP_STATUS_NOT_IMPLEMENTED";
- case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE:
- return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE";
- case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE:
- return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE";
- case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE:
- return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE";
- case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE:
- return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER:
- return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER";
- case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE:
- return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE";
- case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
- return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
- return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
- return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED:
- return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED";
- case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE:
- return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE";
- case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV:
- return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV";
- case MOD_HDCP_STATUS_DDC_FAILURE:
- return "MOD_HDCP_STATUS_DDC_FAILURE";
- case MOD_HDCP_STATUS_INVALID_OPERATION:
- return "MOD_HDCP_STATUS_INVALID_OPERATION";
- case MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE:
- return "MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE";
- case MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING";
- case MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED:
- return "MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED:
- return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
- return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
- return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
- case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST:
- return "MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST";
- case MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE";
- case MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE:
- return "MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE";
- case MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE:
- return "MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE";
+ MOD_HDCP_STATUS_LIST(CASE_FORMAT)
default:
return "MOD_HDCP_STATUS_UNKNOWN";
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index 1d83c1b9da10..26553aa4c5ca 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -31,6 +31,7 @@
#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__)
#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__)
#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__)
+#define HDCP_LOG_TRA(hdcp) do {} while (0)
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
@@ -131,4 +132,9 @@
HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \
} while (0)
+#define HDCP_AUTH_COMPLETE_TRACE(hdcp) do { \
+ mod_hdcp_log_ddc_trace(hdcp); \
+ HDCP_LOG_TRA(hdcp); \
+} while (0)
+
#endif // MOD_HDCP_LOG_H_
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index c42468bb70ac..835467225458 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -35,69 +35,74 @@ struct mod_hdcp;
#define MAX_NUM_OF_DISPLAYS 6
#define MAX_NUM_OF_ATTEMPTS 4
#define MAX_NUM_OF_ERROR_TRACE 10
+#define MOD_HDCP_STATUS_LIST(FORMAT) \
+ FORMAT(MOD_HDCP_STATUS_SUCCESS) \
+ FORMAT(MOD_HDCP_STATUS_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_RESET_NEEDED) \
+ FORMAT(MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND) \
+ FORMAT(MOD_HDCP_STATUS_DISPLAY_NOT_FOUND) \
+ FORMAT(MOD_HDCP_STATUS_INVALID_STATE) \
+ FORMAT(MOD_HDCP_STATUS_NOT_IMPLEMENTED) \
+ FORMAT(MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP1_INVALID_BKSV) \
+ FORMAT(MOD_HDCP_STATUS_DDC_FAILURE) /* TODO: specific errors */ \
+ FORMAT(MOD_HDCP_STATUS_INVALID_OPERATION) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE) \
+ FORMAT(MOD_HDCP_STATUS_HDCP2_LOCALITY_COMBO_READ_FAILURE)
+
+#define ENUM_FORMAT(entry) entry,
/* detailed return status */
enum mod_hdcp_status {
- MOD_HDCP_STATUS_SUCCESS = 0,
- MOD_HDCP_STATUS_FAILURE,
- MOD_HDCP_STATUS_RESET_NEEDED,
- MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND,
- MOD_HDCP_STATUS_DISPLAY_NOT_FOUND,
- MOD_HDCP_STATUS_INVALID_STATE,
- MOD_HDCP_STATUS_NOT_IMPLEMENTED,
- MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE,
- MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE,
- MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE,
- MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE,
- MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER,
- MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
- MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
- MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED,
- MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
- MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
- MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
- MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
- MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE,
- MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE,
- MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED,
- MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE,
- MOD_HDCP_STATUS_HDCP1_INVALID_BKSV,
- MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */
- MOD_HDCP_STATUS_INVALID_OPERATION,
- MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE,
- MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING,
- MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE,
- MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
- MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE,
- MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
- MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
- MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
- MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST,
- MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE,
- MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE,
- MOD_HDCP_STATUS_UNSUPPORTED_PSP_VER_FAILURE,
+ MOD_HDCP_STATUS_LIST(ENUM_FORMAT)
};
struct mod_hdcp_displayport {
@@ -214,12 +219,14 @@ struct mod_hdcp_link_adjustment_hdcp2 {
uint8_t force_type : 2;
uint8_t force_no_stored_km : 1;
uint8_t increase_h_prime_timeout: 1;
- uint8_t force_sw_locality_check : 1;
- uint8_t reserved : 2;
+ uint8_t use_fw_locality_check : 1;
+ uint8_t use_sw_locality_fallback: 1;
+ uint8_t reserved : 1;
};
struct mod_hdcp_link_adjustment {
uint8_t auth_delay;
+ uint8_t retry_limit;
struct mod_hdcp_link_adjustment_hdcp1 hdcp1;
struct mod_hdcp_link_adjustment_hdcp2 hdcp2;
};
@@ -229,9 +236,23 @@ struct mod_hdcp_error {
uint8_t state_id;
};
+struct mod_hdcp1_trace {
+ uint8_t attempt_count;
+ uint8_t downstream_device_count;
+};
+
+struct mod_hdcp2_trace {
+ uint8_t attempt_count;
+ uint8_t downstream_device_count;
+ uint8_t hdcp1_device_downstream;
+ uint8_t hdcp2_legacy_device_downstream;
+};
+
struct mod_hdcp_trace {
struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE];
uint8_t error_count;
+ struct mod_hdcp1_trace hdcp1;
+ struct mod_hdcp2_trace hdcp2;
};
enum mod_hdcp_encryption_status {
@@ -302,10 +323,6 @@ struct mod_hdcp_display_query {
struct mod_hdcp_config {
struct mod_hdcp_psp psp;
struct mod_hdcp_ddc ddc;
- struct {
- uint8_t lc_enable_sw_fallback : 1;
- uint8_t reserved : 7;
- } debug;
uint8_t index;
};
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 29ccd3532d13..fd139b219bf9 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -975,6 +975,34 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
return true;
}
+void set_replay_frame_skip_number(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t coasting_vtotal_refresh_rate_mhz,
+ uint32_t flicker_free_refresh_rate_mhz,
+ bool is_defer)
+{
+ uint32_t *frame_skip_number_array = NULL;
+ uint32_t frame_skip_number = 0;
+
+ if (link == NULL || flicker_free_refresh_rate_mhz == 0 || coasting_vtotal_refresh_rate_mhz == 0)
+ return;
+
+ if (is_defer)
+ frame_skip_number_array = link->replay_settings.defer_frame_skip_number_table;
+ else
+ frame_skip_number_array = link->replay_settings.frame_skip_number_table;
+
+ if (frame_skip_number_array == NULL)
+ return;
+
+ frame_skip_number = coasting_vtotal_refresh_rate_mhz / flicker_free_refresh_rate_mhz;
+
+ if (frame_skip_number >= 1)
+ frame_skip_number_array[type] = frame_skip_number - 1;
+ else
+ frame_skip_number_array[type] = 0;
+}
+
void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint32_t vtotal)
@@ -987,6 +1015,8 @@ void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
{
link->replay_settings.coasting_vtotal_table[type] =
link->replay_settings.defer_update_coasting_vtotal_table[type];
+ link->replay_settings.frame_skip_number_table[type] =
+ link->replay_settings.defer_frame_skip_number_table[type];
}
void set_replay_coasting_vtotal(struct dc_link *link,
@@ -1007,6 +1037,9 @@ void calculate_replay_link_off_frame_count(struct dc_link *link,
uint8_t max_link_off_frame_count = 0;
uint16_t max_deviation_line = 0, pixel_deviation_per_line = 0;
+ if (!link || link->replay_settings.config.replay_version != DC_FREESYNC_REPLAY)
+ return;
+
max_deviation_line = link->dpcd_caps.pr_info.max_deviation_line;
pixel_deviation_per_line = link->dpcd_caps.pr_info.pixel_deviation_per_line;
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 391209a3bf29..87d31d9dce5a 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -60,6 +60,11 @@ void set_replay_coasting_vtotal(struct dc_link *link,
void set_replay_defer_update_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
uint32_t vtotal);
+void set_replay_frame_skip_number(struct dc_link *link,
+ enum replay_coasting_vtotal_type type,
+ uint32_t coasting_vtotal_refresh_rate_Mhz,
+ uint32_t flicker_free_refresh_rate_Mhz,
+ bool is_defer);
void update_replay_coasting_vtotal_from_defer(struct dc_link *link,
enum replay_coasting_vtotal_type type);
void set_replay_low_rr_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
diff --git a/drivers/gpu/drm/amd/include/amd_cper.h b/drivers/gpu/drm/amd/include/amd_cper.h
index 086869264425..a252ee4c7874 100644
--- a/drivers/gpu/drm/amd/include/amd_cper.h
+++ b/drivers/gpu/drm/amd/include/amd_cper.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index bfb446736ca8..17945094a138 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -109,6 +109,7 @@ enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_VPE,
AMD_IP_BLOCK_TYPE_UMSCH_MM,
AMD_IP_BLOCK_TYPE_ISP,
+ AMD_IP_BLOCK_TYPE_RAS,
AMD_IP_BLOCK_TYPE_NUM,
};
@@ -239,18 +240,51 @@ enum amd_harvest_ip_mask {
AMD_HARVEST_IP_DMU_MASK = 0x4,
};
+/**
+ * enum DC_FEATURE_MASK - Bits that control DC feature defaults
+ */
enum DC_FEATURE_MASK {
//Default value can be found at "uint amdgpu_dc_feature_mask"
- DC_FBC_MASK = (1 << 0), //0x1, disabled by default
- DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
- DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
- DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
- DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
- DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default
- DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
- DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
- DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
- DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
+ /**
+ * @DC_FBC_MASK: (0x1) disabled by default
+ */
+ DC_FBC_MASK = (1 << 0),
+ /**
+ * @DC_MULTI_MON_PP_MCLK_SWITCH_MASK: (0x2) enabled by default
+ */
+ DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1),
+ /**
+ * @DC_DISABLE_FRACTIONAL_PWM_MASK: (0x4) disabled by default
+ */
+ DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2),
+ /**
+ * @DC_PSR_MASK: (0x8) disabled by default for DCN < 3.1
+ */
+ DC_PSR_MASK = (1 << 3),
+ /**
+ * @DC_EDP_NO_POWER_SEQUENCING: (0x10) disabled by default
+ */
+ DC_EDP_NO_POWER_SEQUENCING = (1 << 4),
+ /**
+ * @DC_DISABLE_LTTPR_DP1_4A: (0x20) disabled by default
+ */
+ DC_DISABLE_LTTPR_DP1_4A = (1 << 5),
+ /**
+ * @DC_DISABLE_LTTPR_DP2_0: (0x40) disabled by default
+ */
+ DC_DISABLE_LTTPR_DP2_0 = (1 << 6),
+ /**
+ * @DC_PSR_ALLOW_SMU_OPT: (0x80) disabled by default
+ */
+ DC_PSR_ALLOW_SMU_OPT = (1 << 7),
+ /**
+ * @DC_PSR_ALLOW_MULTI_DISP_OPT: (0x100) disabled by default
+ */
+ DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8),
+ /**
+ * @DC_REPLAY_MASK: (0x200) disabled by default for DCN < 3.1.4
+ */
+ DC_REPLAY_MASK = (1 << 9),
};
/**
@@ -258,64 +292,64 @@ enum DC_FEATURE_MASK {
*/
enum DC_DEBUG_MASK {
/**
- * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting
+ * @DC_DISABLE_PIPE_SPLIT: (0x1) If set, disable pipe-splitting
*/
DC_DISABLE_PIPE_SPLIT = 0x1,
/**
- * @DC_DISABLE_STUTTER: If set, disable memory stutter mode
+ * @DC_DISABLE_STUTTER: (0x2) If set, disable memory stutter mode
*/
DC_DISABLE_STUTTER = 0x2,
/**
- * @DC_DISABLE_DSC: If set, disable display stream compression
+ * @DC_DISABLE_DSC: (0x4) If set, disable display stream compression
*/
DC_DISABLE_DSC = 0x4,
/**
- * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+ * @DC_DISABLE_CLOCK_GATING: (0x8) If set, disable clock gating optimizations
*/
DC_DISABLE_CLOCK_GATING = 0x8,
/**
- * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU
+ * @DC_DISABLE_PSR: (0x10) If set, disable Panel self refresh v1 and PSR-SU
*/
DC_DISABLE_PSR = 0x10,
/**
- * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+ * @DC_FORCE_SUBVP_MCLK_SWITCH: (0x20) If set, force mclk switch in subvp, even
* if mclk switch in vblank is possible
*/
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
/**
- * @DC_DISABLE_MPO: If set, disable multi-plane offloading
+ * @DC_DISABLE_MPO: (0x40) If set, disable multi-plane offloading
*/
DC_DISABLE_MPO = 0x40,
/**
- * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA
+ * @DC_ENABLE_DPIA_TRACE: (0x80) If set, enable trace logging for DPIA
*/
DC_ENABLE_DPIA_TRACE = 0x80,
/**
- * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+ * @DC_ENABLE_DML2: (0x100) If set, force usage of DML2, even if the DCN version
* does not default to it.
*/
DC_ENABLE_DML2 = 0x100,
/**
- * @DC_DISABLE_PSR_SU: If set, disable PSR SU
+ * @DC_DISABLE_PSR_SU: (0x200) If set, disable PSR SU
*/
DC_DISABLE_PSR_SU = 0x200,
/**
- * @DC_DISABLE_REPLAY: If set, disable Panel Replay
+ * @DC_DISABLE_REPLAY: (0x400) If set, disable Panel Replay
*/
DC_DISABLE_REPLAY = 0x400,
/**
- * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+ * @DC_DISABLE_IPS: (0x800) If set, disable all Idle Power States, all the time.
* If more than one IPS debug bit is set, the lowest bit takes
* precedence. For example, if DC_FORCE_IPS_ENABLE and
* DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes
@@ -324,56 +358,57 @@ enum DC_DEBUG_MASK {
DC_DISABLE_IPS = 0x800,
/**
- * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time,
+ * @DC_DISABLE_IPS_DYNAMIC: (0x1000) If set, disable all IPS, all the time,
* *except* when driver goes into suspend.
*/
DC_DISABLE_IPS_DYNAMIC = 0x1000,
/**
- * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if
+ * @DC_DISABLE_IPS2_DYNAMIC: (0x2000) If set, disable IPS2 (IPS1 allowed) if
* there is an enabled display. Otherwise, enable all IPS.
*/
DC_DISABLE_IPS2_DYNAMIC = 0x2000,
/**
- * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
+ * @DC_FORCE_IPS_ENABLE: (0x4000) If set, force enable all IPS, all the time.
*/
DC_FORCE_IPS_ENABLE = 0x4000,
/**
- * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for
+ * @DC_DISABLE_ACPI_EDID: (0x8000) If set, don't attempt to fetch EDID for
* eDP display from ACPI _DDC method.
*/
DC_DISABLE_ACPI_EDID = 0x8000,
/**
- * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver.
+ * @DC_DISABLE_HDMI_CEC: (0x10000) If set, disable HDMI-CEC feature in amdgpu driver.
*/
DC_DISABLE_HDMI_CEC = 0x10000,
/**
- * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted
+ * @DC_DISABLE_SUBVP_FAMS: (0x20000) If set, disable DCN Sub-Viewport & Firmware Assisted
* Memory Clock Switching (FAMS) feature in amdgpu driver.
*/
DC_DISABLE_SUBVP_FAMS = 0x20000,
/**
- * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves
+ * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: (0x40000) If set, disable support for custom
+ * brightness curves
*/
DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000,
/**
- * @DC_HDCP_LC_FORCE_FW_ENABLE: If set, use HDCP Locality Check FW
+ * @DC_HDCP_LC_FORCE_FW_ENABLE: (0x80000) If set, use HDCP Locality Check FW
* path regardless of reported HW capabilities.
*/
DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000,
/**
- * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW
+ * @DC_HDCP_LC_ENABLE_SW_FALLBACK: (0x100000) If set, upon HDCP Locality Check FW
* path failure, retry using legacy SW path.
*/
DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000,
/**
- * @DC_SKIP_DETECTION_LT: If set, skip detection link training
+ * @DC_SKIP_DETECTION_LT: (0x200000) If set, skip detection link training
*/
DC_SKIP_DETECTION_LT = 0x200000,
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
index 9de01ae574c0..067eddd9c62d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h
@@ -4115,6 +4115,7 @@
#define mmSCL0_SCL_COEF_RAM_CONFLICT_STATUS 0x1B55
#define mmSCL0_SCL_COEF_RAM_SELECT 0x1B40
#define mmSCL0_SCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL0_SCL_SCALER_ENABLE 0x1B42
#define mmSCL0_SCL_CONTROL 0x1B44
#define mmSCL0_SCL_DEBUG 0x1B6A
#define mmSCL0_SCL_DEBUG2 0x1B69
@@ -4144,6 +4145,7 @@
#define mmSCL1_SCL_COEF_RAM_CONFLICT_STATUS 0x1E55
#define mmSCL1_SCL_COEF_RAM_SELECT 0x1E40
#define mmSCL1_SCL_COEF_RAM_TAP_DATA 0x1E41
+#define mmSCL1_SCL_SCALER_ENABLE 0x1E42
#define mmSCL1_SCL_CONTROL 0x1E44
#define mmSCL1_SCL_DEBUG 0x1E6A
#define mmSCL1_SCL_DEBUG2 0x1E69
@@ -4173,6 +4175,7 @@
#define mmSCL2_SCL_COEF_RAM_CONFLICT_STATUS 0x4155
#define mmSCL2_SCL_COEF_RAM_SELECT 0x4140
#define mmSCL2_SCL_COEF_RAM_TAP_DATA 0x4141
+#define mmSCL2_SCL_SCALER_ENABLE 0x4142
#define mmSCL2_SCL_CONTROL 0x4144
#define mmSCL2_SCL_DEBUG 0x416A
#define mmSCL2_SCL_DEBUG2 0x4169
@@ -4202,6 +4205,7 @@
#define mmSCL3_SCL_COEF_RAM_CONFLICT_STATUS 0x4455
#define mmSCL3_SCL_COEF_RAM_SELECT 0x4440
#define mmSCL3_SCL_COEF_RAM_TAP_DATA 0x4441
+#define mmSCL3_SCL_SCALER_ENABLE 0x4442
#define mmSCL3_SCL_CONTROL 0x4444
#define mmSCL3_SCL_DEBUG 0x446A
#define mmSCL3_SCL_DEBUG2 0x4469
@@ -4231,6 +4235,7 @@
#define mmSCL4_SCL_COEF_RAM_CONFLICT_STATUS 0x4755
#define mmSCL4_SCL_COEF_RAM_SELECT 0x4740
#define mmSCL4_SCL_COEF_RAM_TAP_DATA 0x4741
+#define mmSCL4_SCL_SCALER_ENABLE 0x4742
#define mmSCL4_SCL_CONTROL 0x4744
#define mmSCL4_SCL_DEBUG 0x476A
#define mmSCL4_SCL_DEBUG2 0x4769
@@ -4260,6 +4265,7 @@
#define mmSCL5_SCL_COEF_RAM_CONFLICT_STATUS 0x4A55
#define mmSCL5_SCL_COEF_RAM_SELECT 0x4A40
#define mmSCL5_SCL_COEF_RAM_TAP_DATA 0x4A41
+#define mmSCL5_SCL_SCALER_ENABLE 0x4A42
#define mmSCL5_SCL_CONTROL 0x4A44
#define mmSCL5_SCL_DEBUG 0x4A6A
#define mmSCL5_SCL_DEBUG2 0x4A69
@@ -4287,6 +4293,7 @@
#define mmSCL_COEF_RAM_CONFLICT_STATUS 0x1B55
#define mmSCL_COEF_RAM_SELECT 0x1B40
#define mmSCL_COEF_RAM_TAP_DATA 0x1B41
+#define mmSCL_SCALER_ENABLE 0x1B42
#define mmSCL_CONTROL 0x1B44
#define mmSCL_DEBUG 0x1B6A
#define mmSCL_DEBUG2 0x1B69
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
index 2d6a598a6c25..9317a7afa621 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h
@@ -8650,6 +8650,8 @@
#define REGAMMA_LUT_INDEX__REGAMMA_LUT_INDEX__SHIFT 0x00000000
#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK_MASK 0x00000007L
#define REGAMMA_LUT_WRITE_EN_MASK__REGAMMA_LUT_WRITE_EN_MASK__SHIFT 0x00000000
+#define SCL_SCALER_ENABLE__SCL_SCALE_EN_MASK 0x00000001L
+#define SCL_SCALER_ENABLE__SCL_SCALE_EN__SHIFT 0x00000000
#define SCL_ALU_CONTROL__SCL_ALU_DISABLE_MASK 0x00000001L
#define SCL_ALU_CONTROL__SCL_ALU_DISABLE__SHIFT 0x00000000
#define SCL_BYPASS_CONTROL__SCL_BYPASS_MODE_MASK 0x00000003L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
index 2176548e9203..9778822dd2a0 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_d.h
@@ -60,5 +60,10 @@
#define mmVCE_VCPU_CACHE_SIZE1 0x800C
#define mmVCE_VCPU_CACHE_SIZE2 0x800E
#define mmVCE_VCPU_CNTL 0x8005
+#define mmVCE_VCPU_SCRATCH7 0x8037
+#define mmVCE_FW_REG_STATUS 0x8384
+#define mmVCE_LMI_FW_PERIODIC_CTRL 0x8388
+#define mmVCE_LMI_FW_START_KEYSEL 0x8386
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
index ea5b26b11cb1..1f82d6f5abde 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vce/vce_1_0_sh_mask.h
@@ -61,6 +61,8 @@
#define VCE_RB_WPTR__RB_WPTR__SHIFT 0x00000004
#define VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK 0x00000001L
#define VCE_SOFT_RESET__ECPU_SOFT_RESET__SHIFT 0x00000000
+#define VCE_SOFT_RESET__FME_SOFT_RESET_MASK 0x00000004L
+#define VCE_SOFT_RESET__FME_SOFT_RESET__SHIFT 0x00000002
#define VCE_STATUS__JOB_BUSY_MASK 0x00000001L
#define VCE_STATUS__JOB_BUSY__SHIFT 0x00000000
#define VCE_STATUS__UENC_BUSY_MASK 0x00000100L
@@ -95,5 +97,13 @@
#define VCE_VCPU_CNTL__CLK_EN__SHIFT 0x00000000
#define VCE_VCPU_CNTL__RBBM_SOFT_RESET_MASK 0x00040000L
#define VCE_VCPU_CNTL__RBBM_SOFT_RESET__SHIFT 0x00000012
+#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_MASK 0x00010000
+#define VCE_CLOCK_GATING_A__CGC_DYN_CLOCK_MODE_SHIFT 0x00000010
+#define VCE_FW_REG_STATUS__BUSY_MASK 0x0000001
+#define VCE_FW_REG_STATUS__BUSY__SHIFT 0x0000001
+#define VCE_FW_REG_STATUS__PASS_MASK 0x0000008
+#define VCE_FW_REG_STATUS__PASS__SHIFT 0x0000003
+#define VCE_FW_REG_STATUS__DONE_MASK 0x0000800
+#define VCE_FW_REG_STATUS__DONE__SHIFT 0x000000b
#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 5c86423c2e92..3d083010e734 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -211,7 +211,7 @@ atom_bios_string = "ATOM"
};
*/
-#pragma pack(1) /* BIOS data must use byte aligment*/
+#pragma pack(1) /* BIOS data must use byte alignment*/
enum atombios_image_offset{
OFFSET_TO_ATOM_ROM_HEADER_POINTER = 0x00000048,
@@ -255,8 +255,8 @@ struct atom_rom_header_v2_2
uint16_t subsystem_vendor_id;
uint16_t subsystem_id;
uint16_t pci_info_offset;
- uint16_t masterhwfunction_offset; //Offest for SW to get all command function offsets, Don't change the position
- uint16_t masterdatatable_offset; //Offest for SW to get all data table offsets, Don't change the position
+ uint16_t masterhwfunction_offset; //Offset for SW to get all command function offsets, Don't change the position
+ uint16_t masterdatatable_offset; //Offset for SW to get all data table offsets, Don't change the position
uint16_t reserved;
uint32_t pspdirtableoffset;
};
@@ -453,7 +453,7 @@ struct atom_dtd_format
uint8_t refreshrate;
};
-/* atom_dtd_format.modemiscinfo defintion */
+/* atom_dtd_format.modemiscinfo definition */
enum atom_dtd_format_modemiscinfo{
ATOM_HSYNC_POLARITY = 0x0002,
ATOM_VSYNC_POLARITY = 0x0004,
@@ -678,7 +678,7 @@ struct lcd_info_v2_1
uint32_t reserved1[8];
};
-/* lcd_info_v2_1.panel_misc defintion */
+/* lcd_info_v2_1.panel_misc definition */
enum atom_lcd_info_panel_misc{
ATOM_PANEL_MISC_FPDI =0x0002,
};
@@ -716,7 +716,7 @@ enum atom_gpio_pin_assignment_gpio_id {
/* gpio_id pre-define id for multiple usage */
/* GPIO use to control PCIE_VDDC in certain SLT board */
PCIE_VDDC_CONTROL_GPIO_PINID = 56,
- /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC swithing feature is enable */
+ /* if PP_AC_DC_SWITCH_GPIO_PINID in Gpio_Pin_LutTable, AC/DC switching feature is enable */
PP_AC_DC_SWITCH_GPIO_PINID = 60,
/* VDDC_REGULATOR_VRHOT_GPIO_PINID in Gpio_Pin_LutTable, VRHot feature is enable */
VDDC_VRHOT_GPIO_PINID = 61,
@@ -734,7 +734,7 @@ enum atom_gpio_pin_assignment_gpio_id {
struct atom_gpio_pin_lut_v2_1
{
struct atom_common_table_header table_header;
- /*the real number of this included in the structure is calcualted by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
+ /*the real number of this included in the structure is calculated by using the (whole structure size - the header size)/size of atom_gpio_pin_lut */
struct atom_gpio_pin_assignment gpio_pin[];
};
@@ -997,7 +997,7 @@ enum atom_connector_layout_info_mini_type_def {
enum atom_display_device_tag_def{
ATOM_DISPLAY_LCD1_SUPPORT = 0x0002, //an embedded display is either an LVDS or eDP signal type of display
- ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compability
+ ATOM_DISPLAY_LCD2_SUPPORT = 0x0020, //second edp device tag 0x0020 for backward compatibility
ATOM_DISPLAY_DFP1_SUPPORT = 0x0008,
ATOM_DISPLAY_DFP2_SUPPORT = 0x0080,
ATOM_DISPLAY_DFP3_SUPPORT = 0x0200,
@@ -1011,7 +1011,7 @@ struct atom_display_object_path_v2
{
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t extencoderobjid; //2nd encoder after the first encoder, from the connector point of view;
uint16_t encoder_recordoffset;
uint16_t extencoder_recordoffset;
@@ -1023,7 +1023,7 @@ struct atom_display_object_path_v2
struct atom_display_object_path_v3 {
uint16_t display_objid; //Connector Object ID or Misc Object ID
uint16_t disp_recordoffset;
- uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or intenal encoder
+ uint16_t encoderobjid; //first encoder closer to the connector, could be either an external or internal encoder
uint16_t reserved1; //only on USBC case, otherwise always = 0
uint16_t reserved2; //reserved and always = 0
uint16_t reserved3; //reserved and always = 0
@@ -3547,7 +3547,7 @@ struct atom_voltage_object_header_v4{
enum atom_voltage_object_mode
{
VOLTAGE_OBJ_GPIO_LUT = 0, //VOLTAGE and GPIO Lookup table ->atom_gpio_voltage_object_v4
- VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequece through I2C -> atom_i2c_voltage_object_v4
+ VOLTAGE_OBJ_VR_I2C_INIT_SEQ = 3, //VOLTAGE REGULATOR INIT sequence through I2C -> atom_i2c_voltage_object_v4
VOLTAGE_OBJ_PHASE_LUT = 4, //Set Vregulator Phase lookup table ->atom_gpio_voltage_object_v4
VOLTAGE_OBJ_SVID2 = 7, //Indicate voltage control by SVID2 ->atom_svid2_voltage_object_v4
VOLTAGE_OBJ_EVV = 8,
@@ -3585,7 +3585,7 @@ struct atom_gpio_voltage_object_v4
{
struct atom_voltage_object_header_v4 header; // voltage mode = VOLTAGE_OBJ_GPIO_LUT or VOLTAGE_OBJ_PHASE_LUT
uint8_t gpio_control_id; // default is 0 which indicate control through CG VID mode
- uint8_t gpio_entry_num; // indiate the entry numbers of Votlage/Gpio value Look up table
+ uint8_t gpio_entry_num; // indicate the entry numbers of Votlage/Gpio value Look up table
uint8_t phase_delay_us; // phase delay in unit of micro second
uint8_t reserved;
uint32_t gpio_mask_val; // GPIO Mask value
@@ -4507,8 +4507,8 @@ struct amd_acpi_description_header{
struct uefi_acpi_vfct{
struct amd_acpi_description_header sheader;
uint8_t tableUUID[16]; //0x24
- uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
- uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
+ uint32_t vbiosimageoffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the structure.
+ uint32_t lib1Imageoffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the structure.
uint32_t reserved[4]; //0x3C
};
@@ -4540,7 +4540,7 @@ struct gop_lib1_content {
/*
***************************************************************************
Scratch Register definitions
- Each number below indicates which scratch regiser request, Active and
+ Each number below indicates which scratch register request, Active and
Connect all share the same definitions as display_device_tag defines
***************************************************************************
*/
diff --git a/drivers/gpu/drm/amd/include/dm_pp_interface.h b/drivers/gpu/drm/amd/include/dm_pp_interface.h
index acd1cef61b7c..349544504c93 100644
--- a/drivers/gpu/drm/amd/include/dm_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/dm_pp_interface.h
@@ -65,6 +65,7 @@ struct single_display_configuration {
uint32_t view_resolution_cy;
enum amd_pp_display_config_type displayconfigtype;
uint32_t vertical_refresh; /* for active display */
+ uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */
};
#define MAX_NUM_DISPLAY 32
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
index 64b553e7de1a..e7fdcee22a71 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index e2b1ea7467b0..2366e68262e6 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -30,6 +30,12 @@ extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
extern const struct amdgpu_ip_block_version smu_v14_0_ip_block;
+enum smu_temp_metric_type {
+ SMU_TEMP_METRIC_BASEBOARD,
+ SMU_TEMP_METRIC_GPUBOARD,
+ SMU_TEMP_METRIC_MAX,
+};
+
enum smu_event_type {
SMU_EVENT_RESET_COMPLETE = 0,
};
@@ -156,6 +162,10 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
AMDGPU_PP_SENSOR_VCN_LOAD,
+ AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
+ AMDGPU_PP_SENSOR_NODEPOWER,
+ AMDGPU_PP_SENSOR_GPPTRESIDENCY,
+ AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
};
enum amd_pp_task {
@@ -444,7 +454,7 @@ struct amd_pm_funcs {
bool gate,
int inst);
int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id);
- int (*set_power_limit)(void *handle, uint32_t n);
+ int (*set_power_limit)(void *handle, uint32_t limit_type, uint32_t n);
int (*get_power_limit)(void *handle, uint32_t *limit,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
@@ -496,6 +506,8 @@ struct amd_pm_funcs {
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
+ ssize_t (*get_temp_metrics)(void *handle, enum smu_temp_metric_type type, void *table);
+ bool (*temp_metrics_is_supported)(void *handle, enum smu_temp_metric_type type);
ssize_t (*get_xcp_metrics)(void *handle, int xcp_id, void *table);
ssize_t (*get_pm_metrics)(void *handle, void *pmmetrics, size_t size);
int (*set_watermarks_for_clock_ranges)(void *handle,
@@ -520,6 +532,110 @@ struct metrics_table_header {
uint8_t content_revision;
};
+enum amdgpu_metrics_attr_id {
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HOTSPOT,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_MEM,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_VRSOC,
+ AMDGPU_METRICS_ATTR_ID_CURR_SOCKET_POWER,
+ AMDGPU_METRICS_ATTR_ID_AVERAGE_GFX_ACTIVITY,
+ AMDGPU_METRICS_ATTR_ID_AVERAGE_UMC_ACTIVITY,
+ AMDGPU_METRICS_ATTR_ID_MEM_MAX_BANDWIDTH,
+ AMDGPU_METRICS_ATTR_ID_ENERGY_ACCUMULATOR,
+ AMDGPU_METRICS_ATTR_ID_SYSTEM_CLOCK_COUNTER,
+ AMDGPU_METRICS_ATTR_ID_ACCUMULATION_COUNTER,
+ AMDGPU_METRICS_ATTR_ID_PROCHOT_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_PPT_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_SOCKET_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_VR_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_HBM_THM_RESIDENCY_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFXCLK_LOCK_STATUS,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LINK_WIDTH,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LINK_SPEED,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_WIDTH,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_SPEED,
+ AMDGPU_METRICS_ATTR_ID_GFX_ACTIVITY_ACC,
+ AMDGPU_METRICS_ATTR_ID_MEM_ACTIVITY_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_BANDWIDTH_INST,
+ AMDGPU_METRICS_ATTR_ID_PCIE_L0_TO_RECOV_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_REPLAY_ROVER_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_NAK_SENT_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_PCIE_NAK_RCVD_COUNT_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_READ_DATA_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_WRITE_DATA_ACC,
+ AMDGPU_METRICS_ATTR_ID_XGMI_LINK_STATUS,
+ AMDGPU_METRICS_ATTR_ID_FIRMWARE_TIMESTAMP,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_GFXCLK,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_SOCCLK,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_VCLK0,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_DCLK0,
+ AMDGPU_METRICS_ATTR_ID_CURRENT_UCLK,
+ AMDGPU_METRICS_ATTR_ID_NUM_PARTITION,
+ AMDGPU_METRICS_ATTR_ID_PCIE_LC_PERF_OTHER_END_RECOVERY,
+ AMDGPU_METRICS_ATTR_ID_GFX_BUSY_INST,
+ AMDGPU_METRICS_ATTR_ID_JPEG_BUSY,
+ AMDGPU_METRICS_ATTR_ID_VCN_BUSY,
+ AMDGPU_METRICS_ATTR_ID_GFX_BUSY_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_PPT_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
+ AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
+ AMDGPU_METRICS_ATTR_ID_MAX,
+};
+
+enum amdgpu_metrics_attr_type {
+ AMDGPU_METRICS_TYPE_U8,
+ AMDGPU_METRICS_TYPE_S8,
+ AMDGPU_METRICS_TYPE_U16,
+ AMDGPU_METRICS_TYPE_S16,
+ AMDGPU_METRICS_TYPE_U32,
+ AMDGPU_METRICS_TYPE_S32,
+ AMDGPU_METRICS_TYPE_U64,
+ AMDGPU_METRICS_TYPE_S64,
+ AMDGPU_METRICS_TYPE_MAX,
+};
+
+enum amdgpu_metrics_attr_unit {
+ /* None */
+ AMDGPU_METRICS_UNIT_NONE,
+ /* MHz*/
+ AMDGPU_METRICS_UNIT_CLOCK_1,
+ /* Degree Celsius*/
+ AMDGPU_METRICS_UNIT_TEMP_1,
+ /* Watts*/
+ AMDGPU_METRICS_UNIT_POWER_1,
+ /* In nanoseconds*/
+ AMDGPU_METRICS_UNIT_TIME_1,
+ /* In 10 nanoseconds*/
+ AMDGPU_METRICS_UNIT_TIME_2,
+ /* Speed in GT/s */
+ AMDGPU_METRICS_UNIT_SPEED_1,
+ /* Speed in 0.1 GT/s */
+ AMDGPU_METRICS_UNIT_SPEED_2,
+ /* Bandwidth GB/s */
+ AMDGPU_METRICS_UNIT_BW_1,
+ /* Data in KB */
+ AMDGPU_METRICS_UNIT_DATA_1,
+ /* Percentage */
+ AMDGPU_METRICS_UNIT_PERCENT,
+ AMDGPU_METRICS_UNIT_MAX,
+};
+
+#define AMDGPU_METRICS_ATTR_UNIT_MASK 0xFF000000
+#define AMDGPU_METRICS_ATTR_UNIT_SHIFT 24
+#define AMDGPU_METRICS_ATTR_TYPE_MASK 0x00F00000
+#define AMDGPU_METRICS_ATTR_TYPE_SHIFT 20
+#define AMDGPU_METRICS_ATTR_ID_MASK 0x000FFC00
+#define AMDGPU_METRICS_ATTR_ID_SHIFT 10
+#define AMDGPU_METRICS_ATTR_INST_MASK 0x000003FF
+#define AMDGPU_METRICS_ATTR_INST_SHIFT 0
+
+#define AMDGPU_METRICS_ENC_ATTR(unit, type, id, inst) \
+ (((u64)(unit) << AMDGPU_METRICS_ATTR_UNIT_SHIFT) | \
+ ((u64)(type) << AMDGPU_METRICS_ATTR_TYPE_SHIFT) | \
+ ((u64)(id) << AMDGPU_METRICS_ATTR_ID_SHIFT) | (inst))
+
/*
* gpu_metrics_v1_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v1_1 or later instead.
@@ -1209,6 +1325,19 @@ struct gpu_metrics_v1_8 {
uint32_t pcie_lc_perf_other_end_recovery;
};
+struct gpu_metrics_attr {
+ /* Field type encoded with AMDGPU_METRICS_ENC_ATTR */
+ uint64_t attr_encoding;
+ /* Attribute value, depends on attr_encoding */
+ void *attr_value;
+};
+
+struct gpu_metrics_v1_9 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
/*
* gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
* Use gpu_metrics_v2_1 or later instead.
@@ -1595,6 +1724,79 @@ struct amdgpu_pm_metrics {
uint8_t data[];
};
+enum amdgpu_vr_temp {
+ AMDGPU_VDDCR_VDD0_TEMP,
+ AMDGPU_VDDCR_VDD1_TEMP,
+ AMDGPU_VDDCR_VDD2_TEMP,
+ AMDGPU_VDDCR_VDD3_TEMP,
+ AMDGPU_VDDCR_SOC_A_TEMP,
+ AMDGPU_VDDCR_SOC_C_TEMP,
+ AMDGPU_VDDCR_SOCIO_A_TEMP,
+ AMDGPU_VDDCR_SOCIO_C_TEMP,
+ AMDGPU_VDD_085_HBM_TEMP,
+ AMDGPU_VDDCR_11_HBM_B_TEMP,
+ AMDGPU_VDDCR_11_HBM_D_TEMP,
+ AMDGPU_VDD_USR_TEMP,
+ AMDGPU_VDDIO_11_E32_TEMP,
+ AMDGPU_VR_MAX_TEMP_ENTRIES,
+};
+
+enum amdgpu_system_temp {
+ AMDGPU_UBB_FPGA_TEMP,
+ AMDGPU_UBB_FRONT_TEMP,
+ AMDGPU_UBB_BACK_TEMP,
+ AMDGPU_UBB_OAM7_TEMP,
+ AMDGPU_UBB_IBC_TEMP,
+ AMDGPU_UBB_UFPGA_TEMP,
+ AMDGPU_UBB_OAM1_TEMP,
+ AMDGPU_OAM_0_1_HSC_TEMP,
+ AMDGPU_OAM_2_3_HSC_TEMP,
+ AMDGPU_OAM_4_5_HSC_TEMP,
+ AMDGPU_OAM_6_7_HSC_TEMP,
+ AMDGPU_UBB_FPGA_0V72_VR_TEMP,
+ AMDGPU_UBB_FPGA_3V3_VR_TEMP,
+ AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP,
+ AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP,
+ AMDGPU_RETIMER_0_1_0V9_VR_TEMP,
+ AMDGPU_RETIMER_4_5_0V9_VR_TEMP,
+ AMDGPU_RETIMER_2_3_0V9_VR_TEMP,
+ AMDGPU_RETIMER_6_7_0V9_VR_TEMP,
+ AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP,
+ AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP,
+ AMDGPU_IBC_HSC_TEMP,
+ AMDGPU_IBC_TEMP,
+ AMDGPU_SYSTEM_MAX_TEMP_ENTRIES = 32,
+};
+
+enum amdgpu_node_temp {
+ AMDGPU_RETIMER_X_TEMP,
+ AMDGPU_OAM_X_IBC_TEMP,
+ AMDGPU_OAM_X_IBC_2_TEMP,
+ AMDGPU_OAM_X_VDD18_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_B_VR_TEMP,
+ AMDGPU_OAM_X_04_HBM_D_VR_TEMP,
+ AMDGPU_NODE_MAX_TEMP_ENTRIES = 12,
+};
+
+struct amdgpu_gpuboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t node_temp[AMDGPU_NODE_MAX_TEMP_ENTRIES];
+ uint32_t vr_temp[AMDGPU_VR_MAX_TEMP_ENTRIES];
+};
+
+struct amdgpu_baseboard_temp_metrics_v1_0 {
+ struct metrics_table_header common_header;
+ uint16_t label_version;
+ uint16_t node_id;
+ uint64_t accumulation_counter;
+ /* Encoded temperature in Celcius, 24:31 is sensor id 0:23 is temp value */
+ uint32_t system_temp[AMDGPU_SYSTEM_MAX_TEMP_ENTRIES];
+};
+
struct amdgpu_partition_metrics_v1_0 {
struct metrics_table_header common_header;
/* Current clocks (Mhz) */
@@ -1618,4 +1820,10 @@ struct amdgpu_partition_metrics_v1_0 {
uint64_t gfx_below_host_limit_total_acc[MAX_XCC];
};
+struct amdgpu_partition_metrics_v1_1 {
+ struct metrics_table_header common_header;
+ int attr_count;
+ struct gpu_metrics_attr metrics_attrs[];
+};
+
#endif
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 15680c3f4970..f9629d42ada2 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -238,7 +238,8 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t enable_mes_sch_stb_log : 1;
uint32_t limit_single_process : 1;
uint32_t is_strix_tmz_wa_enabled :1;
- uint32_t reserved : 13;
+ uint32_t enable_lr_compute_wa : 1;
+ uint32_t reserved : 12;
};
uint32_t uint32_t_all;
};
@@ -344,7 +345,8 @@ union MESAPI__REMOVE_QUEUE {
uint32_t unmap_kiq_utility_queue : 1;
uint32_t preempt_legacy_gfx_queue : 1;
uint32_t unmap_legacy_queue : 1;
- uint32_t reserved : 28;
+ uint32_t remove_queue_after_reset : 1;
+ uint32_t reserved : 27;
};
struct MES_API_STATUS api_status;
diff --git a/drivers/gpu/drm/amd/include/mes_v12_api_def.h b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
index d85ffab2aff9..2f12cba4eb66 100644
--- a/drivers/gpu/drm/amd/include/mes_v12_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v12_api_def.h
@@ -66,6 +66,7 @@ enum MES_SCH_API_OPCODE {
MES_SCH_API_SET_SE_MODE = 17,
MES_SCH_API_SET_GANG_SUBMIT = 18,
MES_SCH_API_SET_HW_RSRC_1 = 19,
+ MES_SCH_API_INV_TLBS = 20,
MES_SCH_API_MAX = 0xFF
};
@@ -286,7 +287,8 @@ union MESAPI_SET_HW_RESOURCES {
uint32_t limit_single_process : 1;
uint32_t unmapped_doorbell_handling: 2;
uint32_t enable_mes_fence_int: 1;
- uint32_t reserved : 10;
+ uint32_t enable_lr_compute_wa : 1;
+ uint32_t reserved : 9;
};
uint32_t uint32_all;
};
@@ -397,7 +399,8 @@ union MESAPI__REMOVE_QUEUE {
uint32_t unmap_kiq_utility_queue : 1;
uint32_t preempt_legacy_gfx_queue : 1;
uint32_t unmap_legacy_queue : 1;
- uint32_t reserved : 28;
+ uint32_t remove_queue_after_reset : 1;
+ uint32_t reserved : 27;
};
struct MES_API_STATUS api_status;
@@ -870,6 +873,35 @@ union MESAPI__SET_GANG_SUBMIT {
uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
};
+/*
+ * @inv_sel 0-select pasid as input to do the invalidation , 1-select vmid
+ * @flush_type 0-old style, 1-light weight, 2-heavyweight, 3-heavyweight2
+ * @inv_sel_id specific pasid when inv_sel is 0 and specific vmid if inv_sel is 1
+ * @hub_id 0-gc_hub, 1-mm_hub
+ */
+struct INV_TLBS {
+ uint8_t inv_sel;
+ uint8_t flush_type;
+ uint16_t inv_sel_id;
+ uint32_t hub_id;
+ /* If following two inv_range setting are all 0 , whole VM will be invalidated,
+ * otherwise only required range be invalidated
+ */
+ uint64_t inv_range_va_start;
+ uint64_t inv_range_size;
+ uint64_t reserved;
+};
+
+union MESAPI__INV_TLBS {
+ struct {
+ union MES_API_HEADER header;
+ struct MES_API_STATUS api_status;
+ struct INV_TLBS invalidate_tlbs;
+ };
+
+ uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS];
+};
+
#pragma pack(pop)
#endif
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 71d986dd7a6e..79b174e5326d 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -195,24 +195,6 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
return ret;
}
-int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
-{
- int ret = 0;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (pp_funcs && pp_funcs->notify_rlc_state) {
- mutex_lock(&adev->pm.mutex);
-
- ret = pp_funcs->notify_rlc_state(
- adev->powerplay.pp_handle,
- en);
-
- mutex_unlock(&adev->pm.mutex);
- }
-
- return ret;
-}
-
int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -764,10 +746,6 @@ int amdgpu_dpm_send_rma_reason(struct amdgpu_device *adev)
ret = smu_send_rma_reason(smu);
mutex_unlock(&adev->pm.mutex);
- if (adev->cper.enabled)
- if (amdgpu_cper_generate_bp_threshold_record(adev))
- dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
-
return ret;
}
@@ -824,6 +802,21 @@ int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask)
return ret;
}
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool ret;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_reset_vcn_is_supported(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
enum pp_clock_type type,
uint32_t *min,
@@ -1194,8 +1187,11 @@ int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (!pp_funcs->get_pp_table)
- return 0;
+ if (!table)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) || !pp_funcs->get_pp_table || adev->scpm_enabled)
+ return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
@@ -1605,6 +1601,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
}
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+ uint32_t limit_type,
uint32_t limit)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
@@ -1615,7 +1612,7 @@ int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
- limit);
+ limit_type, limit);
mutex_unlock(&adev->pm.mutex);
return ret;
@@ -1721,7 +1718,10 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (!pp_funcs->set_pp_table)
+ if (!buf || !size)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) || !pp_funcs->set_pp_table || adev->scpm_enabled)
return -EOPNOTSUPP;
mutex_lock(&adev->pm.mutex);
@@ -2038,6 +2038,66 @@ int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
}
/**
+ * amdgpu_dpm_get_temp_metrics - Retrieve metrics for a specific compute
+ * partition
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ * @table: Pointer to a buffer where the metrics will be stored. If NULL, the
+ * function returns the size of the metrics structure.
+ *
+ * This function retrieves metrics for a specific temperature type, If the
+ * table parameter is NULL, the function returns the size of the metrics
+ * structure without populating it.
+ *
+ * Return: Size of the metrics structure on success, or a negative error code on failure.
+ */
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret;
+
+ if (!pp_funcs->get_temp_metrics ||
+ !amdgpu_dpm_is_temp_metrics_supported(adev, type))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_temp_metrics(adev->powerplay.pp_handle, type, table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+/**
+ * amdgpu_dpm_is_temp_metrics_supported - Return if specific temperature metrics support
+ * is available
+ * @adev: Pointer to the device.
+ * @type: Identifier for the temperature type metrics to be fetched.
+ *
+ * This function returns metrics if specific temperature metrics type is supported or not.
+ *
+ * Return: True in case of metrics type supported else false.
+ */
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ bool support_temp_metrics = false;
+
+ if (!pp_funcs->temp_metrics_is_supported)
+ return support_temp_metrics;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_temp_metrics =
+ pp_funcs->temp_metrics_is_supported(adev->powerplay.pp_handle, type);
+ mutex_unlock(&adev->pm.mutex);
+ }
+
+ return support_temp_metrics;
+}
+
+/**
* amdgpu_dpm_get_xcp_metrics - Retrieve metrics for a specific compute
* partition
* @adev: Pointer to the device.
@@ -2068,3 +2128,10 @@ ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
return ret;
}
+
+const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev)
+{
+ void *pp_handle = adev->powerplay.pp_handle;
+
+ return smu_get_ras_smu_driver(pp_handle);
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
index 42efe838fa85..b5e9c3ecf703 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
@@ -27,69 +27,69 @@
#include "amdgpu_smu.h"
#include "amdgpu_dpm_internal.h"
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev)
{
struct drm_device *ddev = adev_to_drm(adev);
+ struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ struct single_display_configuration *display_cfg;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_connector *conn;
+ int num_crtcs = 0;
+ int vrefresh;
+ u32 vblank_in_pixels, vblank_time_us;
+
+ cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
+ list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-}
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vblank_in_pixels;
- u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+ /* The array should only contain active displays. */
+ if (!amdgpu_crtc->enabled)
+ continue;
+
+ conn = to_amdgpu_connector(amdgpu_crtc->connector);
+ display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++];
+
+ if (amdgpu_crtc->hw_mode.clock) {
+ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
vblank_in_pixels =
amdgpu_crtc->hw_mode.crtc_htotal *
(amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2));
- vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- break;
- }
- }
- }
+ vblank_time_us =
+ vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- return vblank_time_us;
-}
+ /* The legacy (non-DC) code has issues with mclk switching
+ * with refresh rates over 120 Hz. Disable mclk switching.
+ */
+ if (vrefresh > 120)
+ vblank_time_us = 0;
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vrefresh = 0;
+ /* Find minimum vblank time. */
+ if (vblank_time_us < cfg->min_vblank_time)
+ cfg->min_vblank_time = vblank_time_us;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- break;
+ /* Find vertical refresh rate of first active display. */
+ if (!cfg->vrefresh)
+ cfg->vrefresh = vrefresh;
}
+
+ if (amdgpu_crtc->crtc_id < cfg->crtc_index) {
+ /* Find first active CRTC and its line time. */
+ cfg->crtc_index = amdgpu_crtc->crtc_id;
+ cfg->line_time_in_us = amdgpu_crtc->line_time;
+ }
+
+ display_cfg->controller_id = amdgpu_crtc->crtc_id;
+ display_cfg->pixel_clock = conn->pixelclock_for_modeset;
}
}
- return vrefresh;
+ cfg->display_clk = adev->clock.default_dispclk;
+ cfg->num_display = num_crtcs;
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 5fbfe7333b54..65296a819e6a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -108,11 +108,13 @@ const char * const amdgpu_pp_profile_name[] = {
static int amdgpu_pm_dev_state_check(struct amdgpu_device *adev, bool runpm)
{
bool runpm_check = runpm ? adev->in_runpm : false;
+ bool full_init = (adev->init_lvl->level == AMDGPU_INIT_LEVEL_DEFAULT);
+
+ if (amdgpu_in_reset(adev) || !full_init)
+ return -EBUSY;
- if (amdgpu_in_reset(adev))
- return -EPERM;
if (adev->in_suspend && !runpm_check)
- return -EPERM;
+ return -EBUSY;
return 0;
}
@@ -172,7 +174,6 @@ static int amdgpu_pm_get_access_if_active(struct amdgpu_device *adev)
*/
static inline void amdgpu_pm_put_access(struct amdgpu_device *adev)
{
- pm_runtime_mark_last_busy(adev->dev);
pm_runtime_put_autosuspend(adev->dev);
}
@@ -1420,9 +1421,9 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return -EINVAL;
}
-static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev,
- enum amd_pp_sensors sensor,
- void *query)
+static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev,
+ enum amd_pp_sensors sensor,
+ void *query)
{
int r, size = sizeof(uint32_t);
@@ -1455,7 +1456,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value);
if (r)
return r;
@@ -1479,7 +1480,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value);
if (r)
return r;
@@ -1503,7 +1504,7 @@ static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev,
unsigned int value;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value);
if (r)
return r;
@@ -1782,7 +1783,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
uint32_t ss_power;
int r = 0, i;
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
if (r == -EOPNOTSUPP) {
/* sensor not available on dGPU, try to read from APU */
adev = NULL;
@@ -1795,7 +1796,7 @@ static int amdgpu_show_powershift_percent(struct device *dev,
}
mutex_unlock(&mgpu_info.mutex);
if (adev)
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power);
}
if (r)
@@ -1905,11 +1906,11 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (!amdgpu_device_supports_smart_shift(adev))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
- (void *)&ss_power))
+ else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
- else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
- (void *)&ss_power))
+ else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+ (void *)&ss_power))
*states = ATTR_STATE_UNSUPPORTED;
return 0;
@@ -2073,6 +2074,265 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd
return 0;
}
+/**
+ * DOC: board
+ *
+ * Certain SOCs can support various board attributes reporting. This is useful
+ * for user application to monitor various board reated attributes.
+ *
+ * The amdgpu driver provides a sysfs API for reporting board attributes. Presently,
+ * seven types of attributes are reported. Baseboard temperature and
+ * gpu board temperature are reported as binary files. Npm status, current node power limit,
+ * max node power limit, node power and global ppt residency is reported as ASCII text file.
+ *
+ * * .. code-block:: console
+ *
+ * hexdump /sys/bus/pci/devices/.../board/baseboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp
+ *
+ * hexdump /sys/bus/pci/devices/.../board/npm_status
+ *
+ * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit
+ *
+ * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit
+ *
+ * hexdump /sys/bus/pci/devices/.../board/node_power
+ *
+ * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid
+ */
+
+/**
+ * DOC: baseboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current baseboard
+ * temperature metrics data. The file baseboard_temp is used for this.
+ * Reading the file will dump all the current baseboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_baseboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_BASEBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: gpuboard_temp
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current gpuboard
+ * temperature metrics data. The file gpuboard_temp is used for this.
+ * Reading the file will dump all the current gpuboard temperature metrics data.
+ */
+static ssize_t amdgpu_get_gpuboard_temp_metrics(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t size;
+ int ret;
+
+ ret = amdgpu_pm_get_access_if_active(adev);
+ if (ret)
+ return ret;
+
+ size = amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, NULL);
+ if (size <= 0)
+ goto out;
+ if (size >= PAGE_SIZE) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ amdgpu_dpm_get_temp_metrics(adev, SMU_TEMP_METRIC_GPUBOARD, buf);
+
+out:
+ amdgpu_pm_put_access(adev);
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: cur_node_power_limit
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power limit.
+ * The file cur_node_power_limit is used for this.
+ */
+static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 nplimit;
+ int r;
+
+ /* get the current node power limit */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT,
+ (void *)&nplimit);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", nplimit);
+}
+
+/**
+ * DOC: node_power
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power.
+ * The file node_power is used for this.
+ */
+static ssize_t amdgpu_show_node_power(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 npower;
+ int r;
+
+ /* get the node power */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
+ (void *)&npower);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", npower);
+}
+
+/**
+ * DOC: npm_status
+ *
+ * The amdgpu driver provides a sysfs API for retrieving current node power management status.
+ * The file npm_status is used for this. It shows the status as enabled or disabled based on
+ * current node power value. If node power is zero, status is disabled else enabled.
+ */
+static ssize_t amdgpu_show_npm_status(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 npower;
+ int r;
+
+ /* get the node power */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER,
+ (void *)&npower);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled");
+}
+
+/**
+ * DOC: global_ppt_resid
+ *
+ * The amdgpu driver provides a sysfs API for retrieving global ppt residency.
+ * The file global_ppt_resid is used for this.
+ */
+static ssize_t amdgpu_show_global_ppt_resid(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 gpptresid;
+ int r;
+
+ /* get the global ppt residency */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY,
+ (void *)&gpptresid);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", gpptresid);
+}
+
+/**
+ * DOC: max_node_power_limit
+ *
+ * The amdgpu driver provides a sysfs API for retrieving maximum node power limit.
+ * The file max_node_power_limit is used for this.
+ */
+static ssize_t amdgpu_show_max_node_power_limit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ u32 max_nplimit;
+ int r;
+
+ /* get the max node power limit */
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
+ (void *)&max_nplimit);
+ if (r)
+ return r;
+
+ return sysfs_emit(buf, "%u\n", max_nplimit);
+}
+
+static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL);
+static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL);
+static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL);
+static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL);
+static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL);
+static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL);
+static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL);
+
+static struct attribute *board_attrs[] = {
+ &dev_attr_baseboard_temp.attr,
+ &dev_attr_gpuboard_temp.attr,
+ NULL
+};
+
+static umode_t amdgpu_board_attr_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_baseboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_BASEBOARD))
+ return 0;
+ }
+
+ if (attr == &dev_attr_gpuboard_temp.attr) {
+ if (!amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD))
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+const struct attribute_group amdgpu_board_attr_group = {
+ .name = "board",
+ .attrs = board_attrs,
+ .is_visible = amdgpu_board_attr_visible,
+};
+
/* pm policy attributes */
struct amdgpu_pm_policy_attr {
struct device_attribute dev_attr;
@@ -2246,7 +2506,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
.attr_update = pp_dpm_clk_default_attr_update),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,
@@ -2378,6 +2638,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
-EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_table)) {
+ int ret;
+ char *tmp = NULL;
+
+ ret = amdgpu_dpm_get_pp_table(adev, &tmp);
+ if (ret == -EOPNOTSUPP || !tmp)
+ *states = ATTR_STATE_UNSUPPORTED;
+ else
+ *states = ATTR_STATE_SUPPORTED;
}
switch (gc_ver) {
@@ -2507,18 +2776,18 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
switch (channel) {
case PP_TEMP_JUNCTION:
/* get current junction temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_EDGE:
/* get current edge temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
+ (void *)&temp);
break;
case PP_TEMP_MEM:
/* get current memory temperature */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
- (void *)&temp);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
+ (void *)&temp);
break;
default:
r = -EINVAL;
@@ -2780,8 +3049,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 min_rpm = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
- (void *)&min_rpm);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
+ (void *)&min_rpm);
if (r)
return r;
@@ -2797,8 +3066,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 max_rpm = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
- (void *)&max_rpm);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ (void *)&max_rpm);
if (r)
return r;
@@ -2931,8 +3200,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
int r;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
- (void *)&vddgfx);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX,
+ (void *)&vddgfx);
if (r)
return r;
@@ -2948,8 +3217,8 @@ static ssize_t amdgpu_hwmon_show_vddboard(struct device *dev,
int r;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
- (void *)&vddboard);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&vddboard);
if (r)
return r;
@@ -2982,8 +3251,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
return -EINVAL;
/* get the voltage */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
- (void *)&vddnb);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB,
+ (void *)&vddnb);
if (r)
return r;
@@ -3005,7 +3274,7 @@ static int amdgpu_hwmon_get_power(struct device *dev,
u32 query = 0;
int r;
- r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query);
+ r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query);
if (r)
return r;
@@ -3112,7 +3381,9 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
"fastPPT" : "slowPPT");
else
- return sysfs_emit(buf, "PPT\n");
+ return sysfs_emit(buf, "%s\n",
+ to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
+ "PPT1" : "PPT");
}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
@@ -3125,21 +3396,17 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
-
err = kstrtou32(buf, 10, &value);
if (err)
return err;
value = value / 1000000; /* convert to Watt */
- value |= limit_type << 24;
err = amdgpu_pm_get_access(adev);
if (err < 0)
return err;
- err = amdgpu_dpm_set_power_limit(adev, value);
+ err = amdgpu_dpm_set_power_limit(adev, limit_type, value);
amdgpu_pm_put_access(adev);
@@ -3158,8 +3425,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
int r;
/* get the sclk */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
- (void *)&sclk);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
+ (void *)&sclk);
if (r)
return r;
@@ -3182,8 +3449,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
int r;
/* get the sclk */
- r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
- (void *)&mclk);
+ r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
+ (void *)&mclk);
if (r)
return r;
@@ -3321,7 +3588,6 @@ static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_m
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
-static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
@@ -3370,7 +3636,6 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_cap.dev_attr.attr,
&sensor_dev_attr_power1_cap_default.dev_attr.attr,
&sensor_dev_attr_power1_label.dev_attr.attr,
- &sensor_dev_attr_power2_average.dev_attr.attr,
&sensor_dev_attr_power2_cap_max.dev_attr.attr,
&sensor_dev_attr_power2_cap_min.dev_attr.attr,
&sensor_dev_attr_power2_cap.dev_attr.attr,
@@ -3469,6 +3734,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
}
+ if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr &&
+ amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT))
+ effective_mode |= S_IWUSR;
+
/* not implemented yet for APUs having < GC 9.3.0 (Renoir) */
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) &&
@@ -3477,10 +3746,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* not all products support both average and instantaneous */
if (attr == &sensor_dev_attr_power1_average.dev_attr.attr &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
if (attr == &sensor_dev_attr_power1_input.dev_attr.attr &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
/* hide max/min values if we can't both query and manage the fan */
@@ -3519,8 +3790,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
/* only few boards support vddboard */
if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr ||
attr == &sensor_dev_attr_in2_label.dev_attr.attr) &&
- amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
- (void *)&tmp) == -EOPNOTSUPP)
+ amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD,
+ (void *)&tmp) == -EOPNOTSUPP)
return 0;
/* no mclk on APUs other than gc 9,4,3*/
@@ -3563,13 +3834,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* only Vangogh has fast PPT limit and power labels */
- if (!(gc_ver == IP_VERSION(10, 3, 1)) &&
- (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
+ if ((attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr) &&
+ (amdgpu_dpm_get_power_limit(adev, &tmp,
+ PP_PWR_LIMIT_MAX,
+ PP_PWR_TYPE_FAST) == -EOPNOTSUPP))
return 0;
return effective_mode;
@@ -4402,6 +4674,7 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
enum amdgpu_sriov_vf_mode mode;
uint32_t mask = 0;
+ uint32_t tmp;
int ret;
if (adev->pm.sysfs_initialized)
@@ -4460,7 +4733,29 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
ret = devm_device_add_group(adev->dev,
&amdgpu_pm_policy_attr_group);
if (ret)
- goto err_out0;
+ goto err_out1;
+ }
+
+ if (amdgpu_dpm_is_temp_metrics_supported(adev, SMU_TEMP_METRIC_GPUBOARD)) {
+ ret = devm_device_add_group(adev->dev,
+ &amdgpu_board_attr_group);
+ if (ret)
+ goto err_out1;
+ if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT,
+ (void *)&tmp) != -EOPNOTSUPP) {
+ sysfs_add_file_to_group(&adev->dev->kobj,
+ &dev_attr_cur_node_power_limit.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj,
+ &dev_attr_max_node_power_limit.attr,
+ amdgpu_board_attr_group.name);
+ sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr,
+ amdgpu_board_attr_group.name);
+ }
}
adev->pm.sysfs_initialized = true;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 768317ee1486..aa3f427819a0 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -263,10 +263,6 @@ struct amdgpu_dpm {
u32 voltage_response_time;
u32 backbias_response_time;
void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
struct amdgpu_dpm_dynamic_state dyn_state;
struct amdgpu_dpm_fan fan;
u32 tdp_limit;
@@ -428,8 +424,6 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
-int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
-
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
@@ -526,6 +520,8 @@ int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
ssize_t amdgpu_dpm_get_xcp_metrics(struct amdgpu_device *adev, int xcp_id,
void *table);
+ssize_t amdgpu_dpm_get_temp_metrics(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type, void *table);
/**
* @get_pm_metrics: Get one snapshot of power management metrics from PMFW. The
@@ -555,7 +551,7 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
enum pp_power_limit_level pp_limit_level,
enum pp_power_type power_type);
int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
- uint32_t limit);
+ uint32_t limit_type, uint32_t limit);
int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev);
int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
struct seq_file *m);
@@ -613,5 +609,9 @@ ssize_t amdgpu_dpm_get_pm_policy_info(struct amdgpu_device *adev,
int amdgpu_dpm_reset_sdma(struct amdgpu_device *adev, uint32_t inst_mask);
bool amdgpu_dpm_reset_sdma_is_supported(struct amdgpu_device *adev);
int amdgpu_dpm_reset_vcn(struct amdgpu_device *adev, uint32_t inst_mask);
+bool amdgpu_dpm_reset_vcn_is_supported(struct amdgpu_device *adev);
+bool amdgpu_dpm_is_temp_metrics_supported(struct amdgpu_device *adev,
+ enum smu_temp_metric_type type);
+const struct ras_smu_drv *amdgpu_dpm_get_ras_smu_driver(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
index 5c2a89f0d5d5..cc6d7ba040e9 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
@@ -23,10 +23,6 @@
#ifndef __AMDGPU_DPM_INTERNAL_H__
#define __AMDGPU_DPM_INTERNAL_H__
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 307ebf7e3226..33eb85dd68e9 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -2299,7 +2299,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->sys_info.nb_dpm_enable) {
force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) ||
pi->disable_nb_ps3_in_battery;
ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
ps->dpm0_pg_nb_ps_hi = 0x2;
@@ -2358,7 +2358,7 @@ static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
return 0;
force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
- (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+ (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start);
if (force_high) {
for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index ea3ace882a10..c7ed0b457129 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -771,8 +771,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
int i;
struct amdgpu_ps *ps;
u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ bool single_display = adev->pm.pm_display_cfg.num_display < 2;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
@@ -945,9 +944,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
amdgpu_dpm_post_set_power_state(adev);
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
if (pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -968,7 +964,8 @@ void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- amdgpu_dpm_get_active_displays(adev);
+ if (!adev->dc_enabled)
+ amdgpu_dpm_get_display_cfg(adev);
amdgpu_dpm_change_power_state_locked(adev);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 52e732be59e3..1f539cc65f41 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -2558,18 +2558,13 @@ static int si_enable_power_containment(struct amdgpu_device *adev,
if (enable) {
if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
- if (smc_result != PPSMC_Result_OK) {
+ if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
- ni_pi->pc_enabled = false;
- } else {
- ni_pi->pc_enabled = true;
- }
}
} else {
smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
if (smc_result != PPSMC_Result_OK)
ret = -EINVAL;
- ni_pi->pc_enabled = false;
}
}
@@ -3081,11 +3076,17 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
static bool si_dpm_vblank_too_short(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time;
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
- if (vblank_time < switch_limit)
+ /* Consider zero vblank time too short and disable MCLK switching.
+ * Note that the vblank time is set to maximum when no displays are attached,
+ * so we'll still enable MCLK switching in that case.
+ */
+ if (vblank_time == 0)
+ return true;
+ else if (vblank_time < switch_limit)
return true;
else
return false;
@@ -3441,6 +3442,8 @@ static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
{
+ const struct amd_pp_display_configuration *display_cfg =
+ &adev->pm.pm_display_cfg;
struct si_ps *ps = si_get_ps(rps);
struct amdgpu_clock_and_voltage_limits *max_limits;
bool disable_mclk_switching = false;
@@ -3449,6 +3452,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
u16 vddc, vddci, min_vce_voltage = 0;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
u32 max_sclk = 0, max_mclk = 0;
+ u32 high_pixelclock_count = 0;
int i;
if (adev->asic_type == CHIP_HAINAN) {
@@ -3476,6 +3480,35 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
}
}
+ /* We define "high pixelclock" for SI as higher than necessary for 4K 30Hz.
+ * For example, 4K 60Hz and 1080p 144Hz fall into this category.
+ * Find number of such displays connected.
+ */
+ for (i = 0; i < display_cfg->num_display; i++) {
+ /* The array only contains active displays. */
+ if (display_cfg->displays[i].pixel_clock > 297000)
+ high_pixelclock_count++;
+ }
+
+ /* These are some ad-hoc fixes to some issues observed with SI GPUs.
+ * They are necessary because we don't have something like dce_calcs
+ * for these GPUs to calculate bandwidth requirements.
+ */
+ if (high_pixelclock_count) {
+ /* Work around flickering lines at the bottom edge
+ * of the screen when using a single 4K 60Hz monitor.
+ */
+ disable_mclk_switching = true;
+
+ /* On Oland, we observe some flickering when two 4K 60Hz
+ * displays are connected, possibly because voltage is too low.
+ * Raise the voltage by requiring a higher SCLK.
+ * (Voltage cannot be adjusted independently without also SCLK.)
+ */
+ if (high_pixelclock_count > 1 && adev->asic_type == CHIP_OLAND)
+ disable_sclk_switching = true;
+ }
+
if (rps->vce_active) {
rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
@@ -3486,7 +3519,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
rps->ecclk = 0;
}
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
+ if ((adev->pm.pm_display_cfg.num_display > 1) ||
si_dpm_vblank_too_short(adev))
disable_mclk_switching = true;
@@ -3634,7 +3667,7 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
ps->performance_levels[i].mclk,
max_limits->vddc, &ps->performance_levels[i].vddc);
btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
- adev->clock.current_dispclk,
+ display_cfg->display_clk,
max_limits->vddc, &ps->performance_levels[i].vddc);
}
@@ -4159,16 +4192,16 @@ static void si_program_ds_registers(struct amdgpu_device *adev)
static void si_program_display_gap(struct amdgpu_device *adev)
{
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
u32 tmp, pipe;
- int i;
tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK);
- if (adev->pm.dpm.new_active_crtc_count > 0)
+ if (cfg->num_display > 0)
tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
else
tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT;
- if (adev->pm.dpm.new_active_crtc_count > 1)
+ if (cfg->num_display > 1)
tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
else
tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT;
@@ -4178,17 +4211,8 @@ static void si_program_display_gap(struct amdgpu_device *adev)
tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
- if ((adev->pm.dpm.new_active_crtc_count > 0) &&
- (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
- /* find the first active crtc */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i))
- break;
- }
- if (i == adev->mode_info.num_crtc)
- pipe = 0;
- else
- pipe = i;
+ if (cfg->num_display > 0 && pipe != cfg->crtc_index) {
+ pipe = cfg->crtc_index;
tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
@@ -4199,7 +4223,7 @@ static void si_program_display_gap(struct amdgpu_device *adev)
* This can be a problem on PowerXpress systems or if you want to use the card
* for offscreen rendering or compute if there are no crtcs enabled.
*/
- si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
+ si_notify_smc_display_change(adev, cfg->num_display > 0);
}
static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
@@ -5508,7 +5532,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
(pl->mclk <= pi->mclk_stutter_mode_threshold) &&
!eg_pi->uvd_enabled &&
(RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
- (adev->pm.dpm.new_active_crtc_count <= 2)) {
+ (adev->pm.pm_display_cfg.num_display <= 2)) {
level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
}
@@ -5637,14 +5661,10 @@ static int si_populate_smc_t(struct amdgpu_device *adev,
static int si_disable_ulv(struct amdgpu_device *adev)
{
- struct si_power_info *si_pi = si_get_pi(adev);
- struct si_ulv_param *ulv = &si_pi->ulv;
+ PPSMC_Result r;
- if (ulv->supported)
- return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-
- return 0;
+ r = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV);
+ return (r == PPSMC_Result_OK) ? 0 : -EINVAL;
}
static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
@@ -5661,7 +5681,7 @@ static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
/* XXX validate against display requirements! */
for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
- if (adev->clock.current_dispclk <=
+ if (adev->pm.pm_display_cfg.display_clk <=
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
if (ulv->pl.vddc <
adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
@@ -5815,39 +5835,36 @@ static int si_upload_ulv_state(struct amdgpu_device *adev)
static int si_upload_smc_data(struct amdgpu_device *adev)
{
- struct amdgpu_crtc *amdgpu_crtc = NULL;
- int i;
+ const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg;
+ u32 crtc_index = 0;
+ u32 mclk_change_block_cp_min = 0;
+ u32 mclk_change_block_cp_max = 0;
- if (adev->pm.dpm.new_active_crtc_count == 0)
- return 0;
+ /* When a display is plugged in, program these so that the SMC
+ * performs MCLK switching when it doesn't cause flickering.
+ * When no display is plugged in, there is no need to restrict
+ * MCLK switching, so program them to zero.
+ */
+ if (cfg->num_display) {
+ crtc_index = cfg->crtc_index;
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
- amdgpu_crtc = adev->mode_info.crtcs[i];
- break;
+ if (cfg->line_time_in_us) {
+ mclk_change_block_cp_min = 200 / cfg->line_time_in_us;
+ mclk_change_block_cp_max = 100 / cfg->line_time_in_us;
}
}
- if (amdgpu_crtc == NULL)
- return 0;
-
- if (amdgpu_crtc->line_time <= 0)
- return 0;
-
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_crtc_index,
- amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_crtc_index,
+ crtc_index);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
- amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
+ mclk_change_block_cp_min);
- if (si_write_smc_soft_register(adev,
- SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
- amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
- return 0;
+ si_write_smc_soft_register(adev,
+ SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
+ mclk_change_block_cp_max);
return 0;
}
@@ -7029,13 +7046,20 @@ static void si_set_vce_clock(struct amdgpu_device *adev,
if ((old_rps->evclk != new_rps->evclk) ||
(old_rps->ecclk != new_rps->ecclk)) {
/* Turn the clocks on when encoding, off otherwise */
+ dev_dbg(adev->dev, "set VCE clocks: %u, %u\n", new_rps->evclk, new_rps->ecclk);
+
if (new_rps->evclk || new_rps->ecclk) {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, false, false);*/
+ amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);
+ amdgpu_device_ip_set_clockgating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE);
} else {
- /* Place holder for future VCE1.0 porting to amdgpu
- vce_v1_0_enable_mgcg(adev, true, false);
- amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/
+ amdgpu_device_ip_set_powergating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(
+ adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE);
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
}
}
}
@@ -7487,8 +7511,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
pi->pasi = CYPRESS_HASI_DFLT;
pi->vrc = SISLANDS_VRC_DFLT;
- pi->gfx_clock_gating = true;
-
eg_pi->sclk_deep_sleep = true;
si_pi->sclk_deep_sleep_above_low = false;
@@ -7499,7 +7521,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
eg_pi->dynamic_ac_timing = true;
- eg_pi->light_sleep = true;
#if defined(CONFIG_ACPI)
eg_pi->pcie_performance_request =
amdgpu_acpi_is_pcie_performance_request_supported(adev);
@@ -7560,6 +7581,7 @@ static void si_dpm_debugfs_print_current_performance_level(void *handle,
} else {
pl = &ps->performance_levels[current_index];
seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ seq_printf(m, "vce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
}
@@ -7954,6 +7976,7 @@ static void si_dpm_print_power_state(void *handle,
amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2);
amdgpu_dpm_dbg_print_cap_info(adev, rps->caps);
drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ drm_dbg(adev_to_drm(adev), "\tvce evclk: %d ecclk: %d\n", rps->evclk, rps->ecclk);
for (i = 0; i < ps->performance_level_count; i++) {
pl = &ps->performance_levels[i];
drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
index 11cb7874a6bb..3aed75fbf913 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
@@ -38,11 +38,7 @@
#define MC_ARB_DRAM_TIMING2_2 0xa00
#define MC_ARB_DRAM_TIMING2_3 0xa01
-#define MAX_NO_OF_MVDD_VALUES 2
-#define MAX_NO_VREG_STEPS 32
#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32
-#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20
#define RV770_ASI_DFLT 1000
#define CYPRESS_HASI_DFLT 400000
#define PCIE_PERF_REQ_PECI_GEN1 2
@@ -51,11 +47,6 @@
#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */
#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */
-#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16
-
-#define RV770_SMC_TABLE_ADDRESS 0xB000
-#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3
-
#define SMC_STROBE_RATIO 0x0F
#define SMC_STROBE_ENABLE 0x10
@@ -64,27 +55,6 @@
#define SMC_MC_RTT_ENABLE 0x04
#define SMC_MC_STUTTER_EN 0x08
-#define RV770_SMC_VOLTAGEMASK_VDDC 0
-#define RV770_SMC_VOLTAGEMASK_MVDD 1
-#define RV770_SMC_VOLTAGEMASK_VDDCI 2
-#define RV770_SMC_VOLTAGEMASK_MAX 4
-
-#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16
-#define NISLANDS_SMC_STROBE_RATIO 0x0F
-#define NISLANDS_SMC_STROBE_ENABLE 0x10
-
-#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01
-#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02
-#define NISLANDS_SMC_MC_RTT_ENABLE 0x04
-#define NISLANDS_SMC_MC_STUTTER_EN 0x08
-
-#define MAX_NO_VREG_STEPS 32
-
-#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
-#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
-#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
-#define NISLANDS_SMC_VOLTAGEMASK_MAX 4
-
#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0
#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1
#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2
@@ -219,32 +189,6 @@ enum si_cac_config_reg_type
SISLANDS_CACCONFIG_MAX
};
-enum si_power_level {
- SI_POWER_LEVEL_LOW = 0,
- SI_POWER_LEVEL_MEDIUM = 1,
- SI_POWER_LEVEL_HIGH = 2,
- SI_POWER_LEVEL_CTXSW = 3,
-};
-
-enum si_td {
- SI_TD_AUTO,
- SI_TD_UP,
- SI_TD_DOWN,
-};
-
-enum si_display_watermark {
- SI_DISPLAY_WATERMARK_LOW = 0,
- SI_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum si_display_gap
-{
- SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- SI_PM_DISPLAY_GAP_VBLANK = 1,
- SI_PM_DISPLAY_GAP_WATERMARK = 2,
- SI_PM_DISPLAY_GAP_IGNORE = 3,
-};
-
extern const struct amdgpu_ip_block_version si_smu_ip_block;
struct ni_leakage_coeffients
@@ -258,56 +202,6 @@ struct ni_leakage_coeffients
u32 t_ref;
};
-struct SMC_Evergreen_MCRegisterAddress
-{
- uint16_t s0;
- uint16_t s1;
-};
-
-typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress;
-
-struct evergreen_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct evergreen_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct SMC_Evergreen_MCRegisterSet
-{
- uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet;
-
-struct SMC_Evergreen_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE];
- SMC_Evergreen_MCRegisterSet data[5];
-};
-
-typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
-
-struct SMC_NIslands_MCRegisterSet
-{
- uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet;
-
-struct ni_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
struct SMC_NIslands_MCRegisterAddress
{
uint16_t s0;
@@ -316,257 +210,20 @@ struct SMC_NIslands_MCRegisterAddress
typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress;
-struct SMC_NIslands_MCRegisters
-{
- uint8_t last;
- uint8_t reserved[3];
- SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
- SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT];
-};
-
-typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters;
-
-struct evergreen_ulv_param {
- bool supported;
- struct rv7xx_pl *pl;
-};
-
-struct evergreen_arb_registers {
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 mc_arb_rfsh_rate;
- u32 mc_arb_burst_time;
-};
-
-struct at {
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
-};
-
-struct ni_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct RV770_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE;
-
-struct RV770_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE;
-
-
-struct RV730_SMC_MCLK_VALUE
-{
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL2;
- uint32_t vMPLL_FUNC_CNTL3;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE;
-
-struct RV770_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE;
-
-union RV7XX_SMC_MCLK_VALUE
-{
- RV770_SMC_MCLK_VALUE mclk770;
- RV730_SMC_MCLK_VALUE mclk730;
-};
-
-typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE;
-
-struct RV770_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- union{
- uint8_t seqValue;
- uint8_t ACIndex;
- };
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t gen2XSP;
- uint8_t backbias;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- RV770_SMC_SCLK_VALUE sclk;
- RV7XX_SMC_MCLK_VALUE mclk;
- RV770_SMC_VOLTAGE_VALUE vddc;
- RV770_SMC_VOLTAGE_VALUE mvdd;
- RV770_SMC_VOLTAGE_VALUE vddci;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t stateFlags;
- uint8_t padding;
-};
-
-typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL;
-
-struct RV770_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t padding1;
- uint8_t padding2;
- uint8_t padding3;
- RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
-};
-
-typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE;
-
-struct RV770_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE;
-
-struct RV770_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[MAX_NO_VREG_STEPS];
- RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- RV770_SMC_SWSTATE initialState;
- RV770_SMC_SWSTATE ACPIState;
- RV770_SMC_SWSTATE driverState;
- RV770_SMC_SWSTATE ULVState;
-};
-
-typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE;
-
-struct vddc_table_entry {
- u16 vddc;
- u8 vddc_index;
- u8 high_smio;
- u32 low_smio;
-};
-
-struct rv770_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mpll_ad_func_cntl;
- u32 mpll_ad_func_cntl_2;
- u32 mpll_dq_func_cntl;
- u32 mpll_dq_func_cntl_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct rv730_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 mclk_pwrmgt_cntl;
- u32 dll_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl2;
- u32 mpll_func_cntl3;
- u32 mpll_ss;
- u32 mpll_ss2;
-};
-
-union r7xx_clock_registers {
- struct rv770_clock_registers rv770;
- struct rv730_clock_registers rv730;
-};
-
struct rv7xx_power_info {
/* flags */
- bool mem_gddr5;
- bool pcie_gen2;
- bool dynamic_pcie_gen2;
- bool acpi_pcie_gen2;
- bool boot_in_gen2;
bool voltage_control; /* vddc */
bool mvdd_control;
bool sclk_ss;
bool mclk_ss;
bool dynamic_ss;
- bool gfx_clock_gating;
- bool mg_clock_gating;
- bool mgcgtssm;
- bool power_gating;
bool thermal_protection;
- bool display_gap;
- bool dcodt;
- bool ulps;
- /* registers */
- union r7xx_clock_registers clk_regs;
- u32 s0_vid_lower_smio_cntl;
/* voltage */
- u32 vddc_mask_low;
- u32 mvdd_mask_low;
u32 mvdd_split_frequency;
- u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES];
u16 max_vddc;
u16 max_vddc_in_table;
u16 min_vddc_in_table;
- struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS];
- u8 valid_vddc_entries;
- /* dc odt */
- u32 mclk_odt_threshold;
- u8 odt_value_0[2];
- u8 odt_value_1[2];
/* stored values */
- u32 boot_sclk;
u16 acpi_vddc;
u32 ref_div;
u32 active_auto_throttle_sources;
@@ -582,17 +239,6 @@ struct rv7xx_power_info {
u32 asi;
u32 pasi;
u32 vrc;
- u32 restricted_levels;
- u32 rlp;
- u32 rmp;
- u32 lhp;
- u32 lmp;
- /* smc offsets */
- u16 state_table_start;
- u16 soft_regs_start;
- u16 sram_end;
- /* scratch structs */
- RV770_SMC_STATETABLE smc_statetable;
};
enum si_pcie_gen {
@@ -611,44 +257,12 @@ struct rv7xx_pl {
enum si_pcie_gen pcie_gen; /* si+ only */
};
-struct rv7xx_ps {
- struct rv7xx_pl high;
- struct rv7xx_pl medium;
- struct rv7xx_pl low;
- bool dc_compatible;
-};
-
struct si_ps {
u16 performance_level_count;
bool dc_compatible;
struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
-struct ni_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ni_cac_data
-{
- struct ni_leakage_coeffients leakage_coefficients;
- u32 i_leakage;
- s32 leakage_minimum_temperature;
- u32 pwr_const;
- u32 dc_cac_value;
- u32 bif_cac_value;
- u32 lkge_pwr;
- u8 mc_wr_weight;
- u8 mc_rd_weight;
- u8 allow_ovrflw;
- u8 num_win_tdp;
- u8 l2num_win_tdp;
- u8 lts_truncate_n;
-};
-
struct evergreen_power_info {
/* must be first! */
struct rv7xx_power_info rv7xx;
@@ -657,203 +271,33 @@ struct evergreen_power_info {
bool dynamic_ac_timing;
bool abm;
bool mcls;
- bool light_sleep;
- bool memory_transition;
bool pcie_performance_request;
- bool pcie_performance_request_registered;
bool sclk_deep_sleep;
- bool dll_default_on;
- bool ls_clock_gating;
bool smu_uvd_hs;
bool uvd_enabled;
/* stored values */
u16 acpi_vddci;
- u8 mvdd_high_index;
- u8 mvdd_low_index;
u32 mclk_edc_wr_enable_threshold;
- struct evergreen_mc_reg_table mc_reg_table;
struct atom_voltage_table vddc_voltage_table;
struct atom_voltage_table vddci_voltage_table;
- struct evergreen_arb_registers bootup_arb_registers;
- struct evergreen_ulv_param ulv;
- struct at ats[2];
- /* smc offsets */
- u16 mc_reg_table_start;
struct amdgpu_ps current_rps;
- struct rv7xx_ps current_ps;
struct amdgpu_ps requested_rps;
- struct rv7xx_ps requested_ps;
-};
-
-struct PP_NIslands_Dpm2PerfLevel
-{
- uint8_t MaxPS;
- uint8_t TgtAct;
- uint8_t MaxPS_StepInc;
- uint8_t MaxPS_StepDec;
- uint8_t PSST;
- uint8_t NearTDPDec;
- uint8_t AboveSafeInc;
- uint8_t BelowSafeInc;
- uint8_t PSDeltaLimit;
- uint8_t PSDeltaWin;
- uint8_t Reserved[6];
-};
-
-typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel;
-
-struct PP_NIslands_DPM2Parameters
-{
- uint32_t TDPLimit;
- uint32_t NearTDPLimit;
- uint32_t SafePowerLimit;
- uint32_t PowerBoostLimit;
-};
-typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters;
-
-struct NISLANDS_SMC_SCLK_VALUE
-{
- uint32_t vCG_SPLL_FUNC_CNTL;
- uint32_t vCG_SPLL_FUNC_CNTL_2;
- uint32_t vCG_SPLL_FUNC_CNTL_3;
- uint32_t vCG_SPLL_FUNC_CNTL_4;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM;
- uint32_t vCG_SPLL_SPREAD_SPECTRUM_2;
- uint32_t sclk_value;
-};
-
-typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE;
-
-struct NISLANDS_SMC_MCLK_VALUE
-{
- uint32_t vMPLL_FUNC_CNTL;
- uint32_t vMPLL_FUNC_CNTL_1;
- uint32_t vMPLL_FUNC_CNTL_2;
- uint32_t vMPLL_AD_FUNC_CNTL;
- uint32_t vMPLL_AD_FUNC_CNTL_2;
- uint32_t vMPLL_DQ_FUNC_CNTL;
- uint32_t vMPLL_DQ_FUNC_CNTL_2;
- uint32_t vMCLK_PWRMGT_CNTL;
- uint32_t vDLL_CNTL;
- uint32_t vMPLL_SS;
- uint32_t vMPLL_SS2;
- uint32_t mclk_value;
-};
-
-typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE;
-
-struct NISLANDS_SMC_VOLTAGE_VALUE
-{
- uint16_t value;
- uint8_t index;
- uint8_t padding;
-};
-
-typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE;
-
-struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL
-{
- uint8_t arbValue;
- uint8_t ACIndex;
- uint8_t displayWatermark;
- uint8_t gen2PCIE;
- uint8_t reserved1;
- uint8_t reserved2;
- uint8_t strobeMode;
- uint8_t mcFlags;
- uint32_t aT;
- uint32_t bSP;
- NISLANDS_SMC_SCLK_VALUE sclk;
- NISLANDS_SMC_MCLK_VALUE mclk;
- NISLANDS_SMC_VOLTAGE_VALUE vddc;
- NISLANDS_SMC_VOLTAGE_VALUE mvdd;
- NISLANDS_SMC_VOLTAGE_VALUE vddci;
- NISLANDS_SMC_VOLTAGE_VALUE std_vddc;
- uint32_t powergate_en;
- uint8_t hUp;
- uint8_t hDown;
- uint8_t stateFlags;
- uint8_t arbRefreshState;
- uint32_t SQPowerThrottle;
- uint32_t SQPowerThrottle_2;
- uint32_t reserved[2];
- PP_NIslands_Dpm2PerfLevel dpm2;
-};
-
-typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL;
-
-struct NISLANDS_SMC_SWSTATE
-{
- uint8_t flags;
- uint8_t levelCount;
- uint8_t padding2;
- uint8_t padding3;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[];
-};
-
-typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
-
-struct NISLANDS_SMC_VOLTAGEMASKTABLE
-{
- uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
- uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX];
-};
-
-typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
-
-#define NISLANDS_MAX_NO_VREG_STEPS 32
-
-struct NISLANDS_SMC_STATETABLE
-{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- PP_NIslands_DPM2Parameters dpm2Params;
- NISLANDS_SMC_SWSTATE initialState;
- NISLANDS_SMC_SWSTATE ACPIState;
- NISLANDS_SMC_SWSTATE ULVState;
- NISLANDS_SMC_SWSTATE driverState;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
};
-typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
-
struct ni_power_info {
/* must be first! */
struct evergreen_power_info eg;
- struct ni_clock_registers clock_registers;
- struct ni_mc_reg_table mc_reg_table;
u32 mclk_rtt_mode_threshold;
/* flags */
- bool use_power_boost_limit;
bool support_cac_long_term_average;
bool cac_enabled;
bool cac_configuration_required;
bool driver_calculate_cac_leakage;
- bool pc_enabled;
bool enable_power_containment;
bool enable_cac;
bool enable_sq_ramping;
- /* smc offsets */
- u16 arb_table_start;
- u16 fan_table_start;
- u16 cac_table_start;
- u16 spll_table_start;
- /* CAC stuff */
- struct ni_cac_data cac_data;
- u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS];
- const struct ni_cac_weights *cac_weights;
- u8 lta_window_size;
- u8 lts_truncate;
struct si_ps current_ps;
struct si_ps requested_ps;
- /* scratch structs */
- SMC_NIslands_MCRegisters smc_mc_reg_table;
- NISLANDS_SMC_STATETABLE smc_statetable;
};
struct si_cac_config_reg
@@ -952,7 +396,6 @@ struct si_leakage_voltage
struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT];
};
-
struct si_ulv_param {
bool supported;
u32 cg_ulv_control;
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 4e65ab9e931c..281a5e377aee 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
@@ -172,20 +172,42 @@ PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev,
{
u32 tmp;
int i;
+ int usec_timeout;
+
+ /* SMC seems to process some messages exceptionally slowly. */
+ switch (msg) {
+ case PPSMC_MSG_NoForcedLevel:
+ case PPSMC_MSG_SetEnabledLevels:
+ case PPSMC_MSG_SetForcedLevels:
+ case PPSMC_MSG_DisableULV:
+ case PPSMC_MSG_SwitchToSwState:
+ usec_timeout = 1000000; /* 1 sec */
+ break;
+ default:
+ usec_timeout = 200000; /* 200 ms */
+ break;
+ }
if (!amdgpu_si_is_smc_running(adev))
return PPSMC_Result_Failed;
WREG32(mmSMC_MESSAGE_0, msg);
- for (i = 0; i < adev->usec_timeout; i++) {
+ for (i = 0; i < usec_timeout; i++) {
tmp = RREG32(mmSMC_RESP_0);
if (tmp != 0)
break;
udelay(1);
}
- return (PPSMC_Result)RREG32(mmSMC_RESP_0);
+ tmp = RREG32(mmSMC_RESP_0);
+ if (tmp == 0) {
+ drm_warn(adev_to_drm(adev),
+ "%s timeout on message: %x (SMC_SCRATCH0: %x)\n",
+ __func__, msg, RREG32(mmSMC_SCRATCH0));
+ }
+
+ return (PPSMC_Result)tmp;
}
PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index b48a031cbba0..3aaf3dd71868 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "pp_debug.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/gfp.h>
@@ -28,12 +27,10 @@
#include <linux/firmware.h>
#include <linux/reboot.h>
#include "amd_shared.h"
-#include "amd_powerplay.h"
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
#include "amdgpu_dpm_internal.h"
-#include "amdgpu_display.h"
static const struct amd_pm_funcs pp_dpm_funcs;
@@ -634,9 +631,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
- if (!hwmgr || !hwmgr->pm_en || !hwmgr->soft_pp_table)
+ if (!hwmgr || !hwmgr->pm_en || !table)
return -EINVAL;
+ if (!hwmgr->soft_pp_table)
+ return -EOPNOTSUPP;
+
*table = (char *)hwmgr->soft_pp_table;
return hwmgr->soft_pp_table_size;
}
@@ -955,7 +955,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return 0;
}
-static int pp_set_power_limit(void *handle, uint32_t limit)
+static int pp_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
{
struct pp_hwmgr *hwmgr = handle;
uint32_t max_power_limit;
@@ -1554,16 +1554,7 @@ static void pp_pm_compute_clocks(void *handle)
struct amdgpu_device *adev = hwmgr->adev;
if (!adev->dc_enabled) {
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with
- * refresh rates over 120 hz on the non-DC code.
- */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
-
+ amdgpu_dpm_get_display_cfg(adev);
pp_display_configuration_change(handle,
&adev->pm.pm_display_cfg);
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
index 8d40ed0f0e83..ce166a7f8e42 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c
@@ -563,8 +563,8 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3(
PP_ASSERT_WITH_CODE((NULL != voltage_info),
"Could not find Voltage Table in BIOS.", return false;);
- ret = (NULL != atomctrl_lookup_voltage_type_v3
- (voltage_info, voltage_type, voltage_mode)) ? true : false;
+ ret = atomctrl_lookup_voltage_type_v3
+ (voltage_info, voltage_type, voltage_mode) != NULL;
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index 9a821563bc8e..14ccd743ca1d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -1032,7 +1032,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
uint32_t min_freq, max_freq = 0;
- uint32_t ret = 0;
+ int ret = 0;
switch (type) {
case PP_SCLK:
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 8da882c51856..9b28c0728269 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -5444,8 +5444,7 @@ static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
else if (hwmgr->pp_table_version == PP_TABLE_V0)
- thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
- PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ thermal_data->max = data->thermal_temp_setting.temperature_shutdown;
thermal_data->sw_ctf_threshold = thermal_data->max;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
index 5e43ad2b2956..0a876c840c79 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c
@@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
table->VRConfig = 0;
@@ -2540,9 +2540,8 @@ static int fiji_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool fiji_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 17d2f5bff4a7..aa3ae9b115c4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = 0;
+ table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
result = iceland_populate_smc_svi2_config(hwmgr, table);
@@ -2655,9 +2655,8 @@ static int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool iceland_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
const struct pp_smumgr_func iceland_smu_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
index ff6b563ecbf5..bf6d09572cfc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c
@@ -2578,9 +2578,8 @@ static int polaris10_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool polaris10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index ac9ec8257f82..38e19e5cad4d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
@@ -139,7 +139,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id,
NULL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -164,7 +164,7 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index baf51cd82a35..0d4cbe4113a0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -401,7 +401,7 @@ failed:
int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
- uint32_t ret;
+ int ret;
ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
index 6fe6e6abb5d8..2e21f9d066cb 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
@@ -3139,9 +3139,8 @@ static int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
static bool tonga_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
- CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON))
- ? true : false;
+ return PHM_READ_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON) == 1;
}
static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
index f9c0f117725d..0bf1bf5528c2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
@@ -60,7 +60,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
priv->smu_tables.entry[table_id].table_id,
NULL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -90,7 +90,7 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
index d3ff6a831ed5..e2ba593faa5d 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c
@@ -68,7 +68,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -98,7 +98,7 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
index a5c95b180672..e3515156d26f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
@@ -192,7 +192,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[table_id].table,
priv->smu_tables.entry[table_id].size);
@@ -223,7 +223,7 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
@@ -256,7 +256,7 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
@@ -306,7 +306,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table,
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index b47cb4a5f488..f51fa265230b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -68,7 +68,7 @@ static int smu_handle_task(struct smu_context *smu,
static int smu_reset(struct smu_context *smu);
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
static int smu_set_fan_control_mode(void *handle, u32 value);
-static int smu_set_power_limit(void *handle, uint32_t limit);
+static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
@@ -508,11 +508,14 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
/* Enable restore flag */
smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
- /* set the user dpm power limit */
- if (smu->user_dpm_profile.power_limit) {
- ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
+ /* set the user dpm power limits */
+ for (int i = SMU_DEFAULT_PPT_LIMIT; i < SMU_LIMIT_TYPE_COUNT; i++) {
+ if (!smu->user_dpm_profile.power_limits[i])
+ continue;
+ ret = smu_set_power_limit(smu, i,
+ smu->user_dpm_profile.power_limits[i]);
if (ret)
- dev_err(smu->adev->dev, "Failed to set power limit value\n");
+ dev_err(smu->adev->dev, "Failed to set %d power limit value\n", i);
}
/* set the user dpm clock configurations */
@@ -609,6 +612,17 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev)
return true;
}
+int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
+ uint32_t param, uint32_t *read_arg)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->ras_send_msg)
+ ret = smu->ppt_funcs->ras_send_msg(smu, msg, param, read_arg);
+
+ return ret;
+}
static int smu_sys_get_pp_table(void *handle,
char **table)
@@ -620,7 +634,7 @@ static int smu_sys_get_pp_table(void *handle,
return -EOPNOTSUPP;
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
@@ -1315,6 +1329,33 @@ static void smu_init_power_profile(struct smu_context *smu)
smu_power_profile_mode_get(smu, smu->power_profile_mode);
}
+void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
+ return;
+
+ set_bit(fea_id, fea_cap->cap_map);
+}
+
+bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ if (fea_id >= SMU_FEATURE_CAP_ID__COUNT)
+ return false;
+
+ return test_bit(fea_id, fea_cap->cap_map);
+}
+
+static void smu_feature_cap_init(struct smu_context *smu)
+{
+ struct smu_feature_cap *fea_cap = &smu->fea_cap;
+
+ bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT);
+}
+
static int smu_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -1347,6 +1388,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block)
INIT_DELAYED_WORK(&smu->swctf_delayed_work,
smu_swctf_delayed_work_handler);
+ smu_feature_cap_init(smu);
+
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -1626,9 +1669,12 @@ static int smu_smc_hw_setup(struct smu_context *smu)
if (adev->in_suspend && smu_is_dpm_running(smu)) {
dev_info(adev->dev, "dpm has been enabled\n");
ret = smu_system_features_control(smu, true);
- if (ret)
+ if (ret) {
dev_err(adev->dev, "Failed system features control!\n");
- return ret;
+ return ret;
+ }
+
+ return smu_enable_thermal_alert(smu);
}
break;
default:
@@ -1896,7 +1942,6 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
smu_dpm_set_vcn_enable(smu, true, i);
smu_dpm_set_jpeg_enable(smu, true);
- smu_dpm_set_vpe_enable(smu, true);
smu_dpm_set_umsch_mm_enable(smu, true);
smu_set_mall_enable(smu);
smu_set_gfx_cgpg(smu, true);
@@ -2012,6 +2057,12 @@ static int smu_disable_dpms(struct smu_context *smu)
smu->is_apu && (amdgpu_in_reset(adev) || adev->in_s0ix))
return 0;
+ /* vangogh s0ix */
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
+ amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 2)) &&
+ adev->in_s0ix)
+ return 0;
+
/*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.
@@ -2104,7 +2155,6 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block)
}
smu_dpm_set_jpeg_enable(smu, false);
adev->jpeg.cur_state = AMD_PG_STATE_GATE;
- smu_dpm_set_vpe_enable(smu, false);
smu_dpm_set_umsch_mm_enable(smu, false);
if (!smu->pm_enabled)
@@ -2198,7 +2248,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
int ret;
struct amdgpu_device *adev = ip_block->adev;
struct smu_context *smu = adev->powerplay.pp_handle;
- struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (amdgpu_sriov_multi_vf_mode(adev))
return 0;
@@ -2230,18 +2279,6 @@ static int smu_resume(struct amdgpu_ip_block *ip_block)
adev->pm.dpm_enabled = true;
- if (smu->current_power_limit) {
- ret = smu_set_power_limit(smu, smu->current_power_limit);
- if (ret && ret != -EOPNOTSUPP)
- return ret;
- }
-
- if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
- ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0);
- if (ret)
- return ret;
- }
-
dev_info(adev->dev, "SMU is resumed successfully!\n");
return 0;
@@ -2769,6 +2806,17 @@ const struct amdgpu_ip_block_version smu_v14_0_ip_block = {
.funcs = &smu_ip_funcs,
};
+const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle)
+{
+ struct smu_context *smu = (struct smu_context *)handle;
+ const struct ras_smu_drv *tmp = NULL;
+ int ret;
+
+ ret = smu_get_ras_smu_drv(smu, &tmp);
+
+ return ret ? NULL : tmp;
+}
+
static int smu_load_microcode(void *handle)
{
struct smu_context *smu = handle;
@@ -2862,6 +2910,9 @@ int smu_get_power_limit(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
+ if (!limit)
+ return -EINVAL;
+
switch (pp_power_type) {
case PP_PWR_TYPE_SUSTAINED:
limit_type = SMU_DEFAULT_PPT_LIMIT;
@@ -2893,6 +2944,8 @@ int smu_get_power_limit(void *handle,
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
if (smu->ppt_funcs->get_ppt_limit)
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
+ else
+ return -EOPNOTSUPP;
} else {
switch (limit_level) {
case SMU_PPT_LIMIT_CURRENT:
@@ -2931,37 +2984,34 @@ int smu_get_power_limit(void *handle,
return ret;
}
-static int smu_set_power_limit(void *handle, uint32_t limit)
+static int smu_set_power_limit(void *handle, uint32_t limit_type, uint32_t limit)
{
struct smu_context *smu = handle;
- uint32_t limit_type = limit >> 24;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- limit &= (1<<24)-1;
- if (limit_type != SMU_DEFAULT_PPT_LIMIT)
- if (smu->ppt_funcs->set_power_limit)
- return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
-
- if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
- dev_err(smu->adev->dev,
- "New power limit (%d) is out of range [%d,%d]\n",
- limit, smu->min_power_limit, smu->max_power_limit);
- return -EINVAL;
+ if (limit_type == SMU_DEFAULT_PPT_LIMIT) {
+ if (!limit)
+ limit = smu->current_power_limit;
+ if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) is out of range [%d,%d]\n",
+ limit, smu->min_power_limit, smu->max_power_limit);
+ return -EINVAL;
+ }
}
- if (!limit)
- limit = smu->current_power_limit;
-
if (smu->ppt_funcs->set_power_limit) {
ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.power_limit = limit;
+ if (ret)
+ return ret;
+ if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
+ smu->user_dpm_profile.power_limits[limit_type] = limit;
}
- return ret;
+ return 0;
}
static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
@@ -3507,15 +3557,10 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
bool smu_link_reset_is_support(struct smu_context *smu)
{
- bool ret = false;
-
if (!smu->pm_enabled)
return false;
- if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support)
- ret = smu->ppt_funcs->link_reset_is_support(smu);
-
- return ret;
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
}
int smu_mode1_reset(struct smu_context *smu)
@@ -3831,6 +3876,51 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
return ret;
}
+static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
+{
+ struct smu_context *smu = handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ enum smu_table_id table_id;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)
+ return -EOPNOTSUPP;
+
+ table_id = smu_metrics_get_temp_table_id(type);
+
+ if (table_id == SMU_TABLE_COUNT)
+ return -EINVAL;
+
+ /* If the request is to get size alone, return the cached table size */
+ if (!table && tables[table_id].cache.size)
+ return tables[table_id].cache.size;
+
+ if (smu_table_cache_is_valid(&tables[table_id])) {
+ memcpy(table, tables[table_id].cache.buffer,
+ tables[table_id].cache.size);
+ return tables[table_id].cache.size;
+ }
+
+ return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
+}
+
+static bool smu_temp_metrics_is_supported(void *handle, enum smu_temp_metric_type type)
+{
+ struct smu_context *smu = handle;
+ bool ret = false;
+
+ if (!smu->pm_enabled)
+ return false;
+
+ if (smu->smu_temp.temp_funcs && smu->smu_temp.temp_funcs->temp_metrics_is_supported)
+ ret = smu->smu_temp.temp_funcs->temp_metrics_is_supported(smu, type);
+
+ return ret;
+}
+
static ssize_t smu_sys_get_xcp_metrics(void *handle, int xcp_id, void *table)
{
struct smu_context *smu = handle;
@@ -3903,6 +3993,8 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_dpm_clock_table = smu_get_dpm_clock_table,
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
.get_xcp_metrics = smu_sys_get_xcp_metrics,
+ .get_temp_metrics = smu_sys_get_temp_metrics,
+ .temp_metrics_is_supported = smu_temp_metrics_is_supported,
};
int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
@@ -4058,12 +4150,7 @@ int smu_send_rma_reason(struct smu_context *smu)
*/
bool smu_reset_sdma_is_supported(struct smu_context *smu)
{
- bool ret = false;
-
- if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported)
- ret = smu->ppt_funcs->reset_sdma_is_supported(smu);
-
- return ret;
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
}
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
@@ -4076,6 +4163,11 @@ int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+bool smu_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
+}
+
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
if (smu->ppt_funcs && smu->ppt_funcs->dpm_reset_vcn)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index b52e194397e2..8815fc70b63b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -212,6 +212,7 @@ enum smu_power_src_type {
enum smu_ppt_limit_type {
SMU_DEFAULT_PPT_LIMIT = 0,
SMU_FAST_PPT_LIMIT,
+ SMU_LIMIT_TYPE_COUNT,
};
enum smu_ppt_limit_level {
@@ -231,7 +232,7 @@ enum smu_memory_pool_size {
struct smu_user_dpm_profile {
uint32_t fan_mode;
- uint32_t power_limit;
+ uint32_t power_limits[SMU_LIMIT_TYPE_COUNT];
uint32_t fan_speed_pwm;
uint32_t fan_speed_rpm;
uint32_t flags;
@@ -249,6 +250,14 @@ struct smu_user_dpm_profile {
tables[table_id].domain = d; \
} while (0)
+struct smu_table_cache {
+ void *buffer;
+ size_t size;
+ /* interval in ms*/
+ uint32_t interval;
+ unsigned long last_cache_time;
+};
+
struct smu_table {
uint64_t size;
uint32_t align;
@@ -257,6 +266,7 @@ struct smu_table {
void *cpu_addr;
struct amdgpu_bo *bo;
uint32_t version;
+ struct smu_table_cache cache;
};
enum smu_perf_level_designation {
@@ -322,6 +332,9 @@ enum smu_table_id {
SMU_TABLE_ECCINFO,
SMU_TABLE_COMBO_PPTABLE,
SMU_TABLE_WIFIBAND,
+ SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ SMU_TABLE_PMFW_SYSTEM_METRICS,
SMU_TABLE_COUNT,
};
@@ -396,6 +409,10 @@ struct smu_dpm_context {
struct smu_dpm_policy_ctxt *dpm_policies;
};
+struct smu_temp_context {
+ const struct smu_temp_funcs *temp_funcs;
+};
+
struct smu_power_gate {
bool uvd_gated;
bool vce_gated;
@@ -512,6 +529,17 @@ enum smu_fw_status {
*/
#define SMU_WBRF_EVENT_HANDLING_PACE 10
+enum smu_feature_cap_id {
+ SMU_FEATURE_CAP_ID__LINK_RESET = 0,
+ SMU_FEATURE_CAP_ID__SDMA_RESET,
+ SMU_FEATURE_CAP_ID__VCN_RESET,
+ SMU_FEATURE_CAP_ID__COUNT,
+};
+
+struct smu_feature_cap {
+ DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT);
+};
+
struct smu_context {
struct amdgpu_device *adev;
struct amdgpu_irq_src irq_source;
@@ -529,10 +557,12 @@ struct smu_context {
struct smu_table_context smu_table;
struct smu_dpm_context smu_dpm;
struct smu_power_context smu_power;
+ struct smu_temp_context smu_temp;
struct smu_feature smu_feature;
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
struct smu_temperature_range thermal_range;
+ struct smu_feature_cap fea_cap;
void *od_settings;
struct smu_umd_pstate_table pstate_table;
@@ -624,6 +654,28 @@ struct smu_context {
struct i2c_adapter;
/**
+ * struct smu_temp_funcs - Callbacks used to get temperature data.
+ */
+struct smu_temp_funcs {
+ /**
+ * @get_temp_metrics: Calibrate voltage/frequency curve to fit the system's
+ * power delivery and voltage margins. Required for adaptive
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: Size of &table
+ */
+ ssize_t (*get_temp_metrics)(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table);
+
+ /**
+ * @temp_metrics_is_support: Get if specific temperature metrics is supported
+ * @type Temperature metrics type(baseboard/gpuboard)
+ * Return: true if supported else false
+ */
+ bool (*temp_metrics_is_supported)(struct smu_context *smu, enum smu_temp_metric_type type);
+
+};
+
+/**
* struct pptable_funcs - Callbacks used to interact with the SMU.
*/
struct pptable_funcs {
@@ -1234,11 +1286,6 @@ struct pptable_funcs {
bool (*mode1_reset_is_support)(struct smu_context *smu);
/**
- * @link_reset_is_support: Check if GPU supports link reset.
- */
- bool (*link_reset_is_support)(struct smu_context *smu);
-
- /**
* @mode1_reset: Perform mode1 reset.
*
* Complete GPU reset.
@@ -1388,10 +1435,6 @@ struct pptable_funcs {
* @reset_sdma: message SMU to soft reset sdma instance.
*/
int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask);
- /**
- * @reset_sdma_is_supported: Check if support resets the SDMA engine.
- */
- bool (*reset_sdma_is_supported)(struct smu_context *smu);
/**
* @reset_vcn: message SMU to soft reset vcn instance.
@@ -1479,6 +1522,21 @@ struct pptable_funcs {
*/
ssize_t (*get_xcp_metrics)(struct smu_context *smu, int xcp_id,
void *table);
+ /**
+ * @ras_send_msg: Send a message with a parameter from Ras
+ * &msg: Type of message.
+ * &param: Message parameter.
+ * &read_arg: SMU response (optional).
+ */
+ int (*ras_send_msg)(struct smu_context *smu,
+ enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
+
+
+ /**
+ * @get_ras_smu_drv: Get RAS smu driver interface
+ * Return: ras_smu_drv *
+ */
+ int (*get_ras_smu_drv)(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv);
};
typedef enum {
@@ -1622,6 +1680,71 @@ typedef struct {
struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
enum pp_pm_policy p_type);
+static inline enum smu_table_id
+smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ return SMU_TABLE_BASEBOARD_TEMP_METRICS;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return SMU_TABLE_GPUBOARD_TEMP_METRICS;
+ default:
+ return SMU_TABLE_COUNT;
+ }
+
+ return SMU_TABLE_COUNT;
+}
+
+static inline void smu_table_cache_update_time(struct smu_table *table,
+ unsigned long time)
+{
+ table->cache.last_cache_time = time;
+}
+
+static inline bool smu_table_cache_is_valid(struct smu_table *table)
+{
+ if (!table->cache.buffer || !table->cache.last_cache_time ||
+ !table->cache.interval || !table->cache.size ||
+ time_after(jiffies,
+ table->cache.last_cache_time +
+ msecs_to_jiffies(table->cache.interval)))
+ return false;
+
+ return true;
+}
+
+static inline int smu_table_cache_init(struct smu_context *smu,
+ enum smu_table_id table_id, size_t size,
+ uint32_t cache_interval)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
+ if (!tables[table_id].cache.buffer)
+ return -ENOMEM;
+
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = cache_interval;
+ tables[table_id].cache.size = size;
+
+ return 0;
+}
+
+static inline void smu_table_cache_fini(struct smu_context *smu,
+ enum smu_table_id table_id)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ if (tables[table_id].cache.buffer) {
+ kfree(tables[table_id].cache.buffer);
+ tables[table_id].cache.buffer = NULL;
+ tables[table_id].cache.last_cache_time = 0;
+ tables[table_id].cache.interval = 0;
+ }
+}
+
#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
int smu_get_power_limit(void *handle,
uint32_t *limit,
@@ -1673,10 +1796,17 @@ int smu_send_rma_reason(struct smu_context *smu);
int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask);
bool smu_reset_sdma_is_supported(struct smu_context *smu);
int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask);
+bool smu_reset_vcn_is_supported(struct smu_context *smu);
int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
int level);
ssize_t smu_get_pm_policy_info(struct smu_context *smu,
enum pp_pm_policy p_type, char *sysbuf);
+const struct ras_smu_drv *smu_get_ras_smu_driver(void *handle);
+int amdgpu_smu_ras_send_msg(struct amdgpu_device *adev, enum smu_message_type msg,
+ uint32_t param, uint32_t *readarg);
#endif
+
+void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id);
+bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
index 0a2ca544f4e3..dd30d96e1ca2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
@@ -87,7 +87,7 @@ typedef enum {
/*37*/ FEATURE_DVO = 37,
/*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38,
/*39*/ FEATURE_GLOBAL_DPM = 39,
-/*40*/ FEATURE_NODE_POWER_MANAGER = 40,
+/*40*/ FEATURE_HROM_EN = 40,
/*41*/ NUM_FEATURES = 41
} FEATURE_LIST_e;
@@ -135,7 +135,63 @@ typedef enum {
GFX_DVM_MARGIN_COUNT
} GFX_DVM_MARGIN_e;
-#define SMU_METRICS_TABLE_VERSION 0x13
+typedef enum{
+ SYSTEM_TEMP_UBB_FPGA,
+ SYSTEM_TEMP_UBB_FRONT,
+ SYSTEM_TEMP_UBB_BACK,
+ SYSTEM_TEMP_UBB_OAM7,
+ SYSTEM_TEMP_UBB_IBC,
+ SYSTEM_TEMP_UBB_UFPGA,
+ SYSTEM_TEMP_UBB_OAM1,
+ SYSTEM_TEMP_OAM_0_1_HSC,
+ SYSTEM_TEMP_OAM_2_3_HSC,
+ SYSTEM_TEMP_OAM_4_5_HSC,
+ SYSTEM_TEMP_OAM_6_7_HSC,
+ SYSTEM_TEMP_UBB_FPGA_0V72_VR,
+ SYSTEM_TEMP_UBB_FPGA_3V3_VR,
+ SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR,
+ SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR,
+ SYSTEM_TEMP_RETIMER_0_1_0V9_VR,
+ SYSTEM_TEMP_RETIMER_4_5_0V9_VR,
+ SYSTEM_TEMP_RETIMER_2_3_0V9_VR,
+ SYSTEM_TEMP_RETIMER_6_7_0V9_VR,
+ SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR,
+ SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR,
+ SYSTEM_TEMP_IBC_HSC,
+ SYSTEM_TEMP_IBC,
+ SYSTEM_TEMP_MAX_ENTRIES = 32
+} SYSTEM_TEMP_e;
+
+typedef enum{
+ NODE_TEMP_RETIMER,
+ NODE_TEMP_IBC_TEMP,
+ NODE_TEMP_IBC_2_TEMP,
+ NODE_TEMP_VDD18_VR_TEMP,
+ NODE_TEMP_04_HBM_B_VR_TEMP,
+ NODE_TEMP_04_HBM_D_VR_TEMP,
+ NODE_TEMP_MAX_TEMP_ENTRIES = 12
+} NODE_TEMP_e;
+
+typedef enum {
+ SVI_VDDCR_VDD0_TEMP,
+ SVI_VDDCR_VDD1_TEMP,
+ SVI_VDDCR_VDD2_TEMP,
+ SVI_VDDCR_VDD3_TEMP,
+ SVI_VDDCR_SOC_A_TEMP,
+ SVI_VDDCR_SOC_C_TEMP,
+ SVI_VDDCR_SOCIO_A_TEMP,
+ SVI_VDDCR_SOCIO_C_TEMP,
+ SVI_VDD_085_HBM_TEMP,
+ SVI_VDDCR_11_HBM_B_TEMP,
+ SVI_VDDCR_11_HBM_D_TEMP,
+ SVI_VDD_USR_TEMP,
+ SVI_VDDIO_11_E32_TEMP,
+ SVI_MAX_TEMP_ENTRIES, // 13
+} SVI_TEMP_e;
+
+#define SMU_METRICS_TABLE_VERSION 0x15
+
+#define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1
typedef struct __attribute__((packed, aligned(4))) {
uint64_t AccumulationCounter;
@@ -231,11 +287,32 @@ typedef struct __attribute__((packed, aligned(4))) {
uint64_t GfxclkBelowHostLimitThmAcc[8];
uint64_t GfxclkBelowHostLimitTotalAcc[8];
uint64_t GfxclkLowUtilizationAcc[8];
+
+ uint32_t AidTemperature[4];
+ uint32_t XcdTemperature[8];
+ uint32_t HbmTemperature[8];
} MetricsTable_t;
#define SMU_VF_METRICS_TABLE_MASK (1 << 31)
#define SMU_VF_METRICS_TABLE_VERSION (0x6 | SMU_VF_METRICS_TABLE_MASK)
+#pragma pack(push, 4)
+typedef struct {
+ uint64_t AccumulationCounter; // Last update timestamp
+ uint16_t LabelVersion; // Defaults to 0.
+ uint16_t NodeIdentifier; // Unique identifier to each node on system.
+ int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF
+ int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius
+ int16_t spare[7];
+
+ //NPM: NODE POWER MANAGEMENT
+ uint32_t NodePowerLimit;
+ uint32_t NodePower;
+ uint32_t GlobalPPTResidencyAcc;
+} SystemMetricsTable_t;
+#pragma pack(pop)
+
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
uint32_t InstGfxclk_TargFreq;
@@ -287,6 +364,14 @@ typedef struct {
// General info
uint32_t pldmVersion[2];
+
+ //Node Power Limit
+ uint32_t MaxNodePowerLimit;
+
+ // PPT1 Configuration
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
} StaticMetricsTable_t;
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
index e1f490b6ce64..d09b6ae9827e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h
@@ -105,18 +105,21 @@
#define PPSMC_MSG_UpdatePccWaitDecMaxStr 0x4C
#define PPSMC_MSG_ResetSDMA 0x4D
#define PPSMC_MSG_GetRasTableVersion 0x4E
-#define PPSMC_MSG_GetRmaStatus 0x4F
-#define PPSMC_MSG_GetErrorCount 0x50
-#define PPSMC_MSG_GetBadPageCount 0x51
-#define PPSMC_MSG_GetBadPageInfo 0x52
-#define PPSMC_MSG_GetBadPagePaAddrLoHi 0x53
-#define PPSMC_MSG_SetTimestampLoHi 0x54
-#define PPSMC_MSG_GetTimestampLoHi 0x55
-#define PPSMC_MSG_GetRasPolicy 0x56
-#define PPSMC_MSG_DumpErrorRecord 0x57
+#define PPSMC_MSG_GetBadPageCount 0x50
+#define PPSMC_MSG_GetBadPageMcaAddress 0x51
+#define PPSMC_MSG_SetTimestamp 0x53
+#define PPSMC_MSG_SetTimestampHi 0x54
+#define PPSMC_MSG_GetTimestamp 0x55
+#define PPSMC_MSG_GetBadPageIpIdLoHi 0x57
#define PPSMC_MSG_EraseRasTable 0x58
#define PPSMC_MSG_GetStaticMetricsTable 0x59
-#define PPSMC_Message_Count 0x5A
+#define PPSMC_MSG_ResetVfArbitersByIndex 0x5A
+#define PPSMC_MSG_GetSystemMetricsTable 0x5C
+#define PPSMC_MSG_GetSystemMetricsVersion 0x5D
+#define PPSMC_MSG_ResetVCN 0x5E
+#define PPSMC_MSG_SetFastPptLimit 0x5F
+#define PPSMC_MSG_GetFastPptLimit 0x60
+#define PPSMC_Message_Count 0x61
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
index 41f268313613..63a088ef7169 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h
@@ -94,9 +94,9 @@
#define PPSMC_MSG_RmaDueToBadPageThreshold 0x43
#define PPSMC_MSG_SetThrottlingPolicy 0x44
#define PPSMC_MSG_ResetSDMA 0x4D
-#define PPSMC_MSG_ResetVCN 0x4E
#define PPSMC_MSG_GetStaticMetricsTable 0x59
-#define PPSMC_Message_Count 0x5A
+#define PPSMC_MSG_ResetVCN 0x5B
+#define PPSMC_Message_Count 0x5C
//PPSMC Reset Types for driver msg argument
#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x1
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index d7a9e41820fa..9b71a8afdd35 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -278,7 +278,17 @@
__SMU_DUMMY_MAP(MALLPowerState), \
__SMU_DUMMY_MAP(ResetSDMA), \
__SMU_DUMMY_MAP(ResetVCN), \
- __SMU_DUMMY_MAP(GetStaticMetricsTable),
+ __SMU_DUMMY_MAP(GetStaticMetricsTable), \
+ __SMU_DUMMY_MAP(GetSystemMetricsTable), \
+ __SMU_DUMMY_MAP(GetRASTableVersion), \
+ __SMU_DUMMY_MAP(GetBadPageCount), \
+ __SMU_DUMMY_MAP(GetBadPageMcaAddr), \
+ __SMU_DUMMY_MAP(SetTimestamp), \
+ __SMU_DUMMY_MAP(GetTimestamp), \
+ __SMU_DUMMY_MAP(GetBadPageIpid), \
+ __SMU_DUMMY_MAP(EraseRasTable), \
+ __SMU_DUMMY_MAP(SetFastPptLimit), \
+ __SMU_DUMMY_MAP(GetFastPptLimit),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -457,7 +467,8 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(GFX_EDC_XVMIN), \
__SMU_DUMMY_MAP(GFX_DIDT_XVMIN), \
__SMU_DUMMY_MAP(FAN_ABNORMAL), \
- __SMU_DUMMY_MAP(PIT),
+ __SMU_DUMMY_MAP(PIT), \
+ __SMU_DUMMY_MAP(HROM_EN),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
@@ -469,6 +480,7 @@ enum smu_feature_mask {
/* Message category flags */
#define SMU_MSG_VF_FLAG (1U << 0)
#define SMU_MSG_RAS_PRI (1U << 1)
+#define SMU_MSG_NO_PRECHECK (1U << 2)
/* Firmware capability flags */
#define SMU_FW_CAP_RAS_PRI (1U << 0)
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h
index 251ed011b3b0..251ed011b3b0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 9ad46f545d15..4fff78da81ff 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1745,10 +1745,10 @@ static int arcturus_i2c_control_init(struct smu_context *smu)
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -1756,27 +1756,12 @@ static int arcturus_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void arcturus_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -1897,7 +1882,7 @@ static ssize_t arcturus_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 9548bd3c624b..55401e6b2b0b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -291,11 +291,12 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type,
char *buf)
{
- int ret = 0, size = 0;
+ int ret = 0, size = 0, start_offset = 0;
uint32_t cur_value = 0;
int i;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -353,7 +354,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu,
return ret;
}
- return size;
+ return size - start_offset;
}
static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index aac202d0c30e..7c9f77124ab2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1469,7 +1469,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
uint16_t *curve_settings;
- int i, levels, size = 0, ret = 0;
+ int i, levels, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1484,6 +1484,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_GFXCLK:
@@ -1497,11 +1498,11 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_DCEFCLK:
ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
if (ret)
- return size;
+ return size - start_offset;
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
- return size;
+ return size - start_offset;
ret = navi10_is_support_fine_grained_dpm(smu, clk_type);
if (ret < 0)
@@ -1511,7 +1512,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
for (i = 0; i < count; i++) {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
if (ret)
- return size;
+ return size - start_offset;
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
@@ -1519,10 +1520,10 @@ static int navi10_print_clk_levels(struct smu_context *smu,
} else {
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
if (ret)
- return size;
+ return size - start_offset;
ret = smu_v11_0_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
if (ret)
- return size;
+ return size - start_offset;
freq_values[1] = cur_value;
mark_index = cur_value == freq_values[0] ? 0 :
@@ -1653,7 +1654,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int navi10_force_clk_levels(struct smu_context *smu,
@@ -2888,7 +2889,7 @@ static int navi10_set_dummy_pstates_table_location(struct smu_context *smu)
dummy_table += 0x1000;
}
- amdgpu_asic_flush_hdp(smu->adev, NULL);
+ amdgpu_hdp_flush(smu->adev, NULL);
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH,
@@ -3145,10 +3146,10 @@ static int navi10_i2c_control_init(struct smu_context *smu)
control->quirks = &navi10_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -3156,27 +3157,12 @@ static int navi10_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void navi10_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index d57591509aed..774283ac7827 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1281,7 +1281,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
OverDriveTable_t *od_table =
(OverDriveTable_t *)table_context->overdrive_table;
- int i, size = 0, ret = 0;
+ int i, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
@@ -1289,6 +1289,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
uint32_t min_value, max_value;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_GFXCLK:
@@ -1434,7 +1435,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
@@ -2648,10 +2649,10 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
control->quirks = &sienna_cichlid_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
/* assign the buses used for the FRU EEPROM and RAS EEPROM */
@@ -2660,27 +2661,12 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void sienna_cichlid_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 78e4186d06cc..b0d6487171d7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1022,7 +1022,12 @@ int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
{
- return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
+ int ret = 0;
+
+ if (smu->smu_table.thermal_controller_type)
+ ret = amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
+
+ return ret;
}
static uint16_t convert_to_vddc(uint8_t vid)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 2c9869feba61..9626da2dba58 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -565,7 +565,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_legacy_t metrics;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
@@ -576,6 +576,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -658,7 +659,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int vangogh_print_clk_levels(struct smu_context *smu,
@@ -666,7 +667,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
{
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
bool cur_value_match_level = false;
uint32_t min, max;
@@ -678,6 +679,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -779,7 +781,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int vangogh_common_print_clk_levels(struct smu_context *smu,
@@ -2217,6 +2219,9 @@ static int vangogh_post_smu_init(struct smu_context *smu)
uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
+ if (adev->in_s0ix)
+ return 0;
+
/* allow message will be sent after enable message on Vangogh*/
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
@@ -2308,8 +2313,7 @@ static int vangogh_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
+ struct smu_11_5_power_context *power_context = smu->smu_power.power_context;
uint32_t ppt_limit;
int ret = 0;
@@ -2345,12 +2349,11 @@ static int vangogh_get_power_limit(struct smu_context *smu,
}
static int vangogh_get_ppt_limit(struct smu_context *smu,
- uint32_t *ppt_limit,
- enum smu_ppt_limit_type type,
- enum smu_ppt_limit_level level)
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
{
- struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
+ struct smu_11_5_power_context *power_context = smu->smu_power.power_context;
if (!power_context)
return -EOPNOTSUPP;
@@ -2399,7 +2402,6 @@ static int vangogh_set_power_limit(struct smu_context *smu,
smu->current_power_limit = ppt_limit;
break;
case SMU_FAST_PPT_LIMIT:
- ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
if (ppt_limit > power_context->max_fast_ppt_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index e97b0cf19197..eaa9ea162f16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -470,7 +470,7 @@ static int renoir_od_edit_dpm_table(struct smu_context *smu,
static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
{
uint32_t min = 0, max = 0;
- uint32_t ret = 0;
+ int ret = 0;
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetMinGfxclkFrequency,
@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
static int renoir_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
SmuMetrics_t metrics;
bool cur_value_match_level = false;
@@ -506,6 +506,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
return ret;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_RANGE:
@@ -550,7 +551,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
i == 2 ? "*" : "");
}
- return size;
+ return size - start_offset;
case SMU_SOCCLK:
count = NUM_SOCCLK_DPM_LEVELS;
cur_value = metrics.ClockFrequency[CLOCK_SOCCLK];
@@ -607,7 +608,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index c63d2e28954d..18d5d0704509 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1641,33 +1641,22 @@ static int aldebaran_i2c_control_init(struct smu_context *smu)
control->quirks = &aldebaran_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- i2c_del_adapter(control);
-
- return res;
}
static void aldebaran_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -1781,7 +1770,7 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
ret = smu_cmn_get_metrics_table(smu,
&metrics,
- true);
+ false);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 1a1f2a6b2e52..a89075e25717 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -288,7 +288,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a verbal message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_driver_if_version) {
+ if (smu->smc_driver_if_version != SMU_IGNORE_IF_VERSION &&
+ if_version != smu->smc_driver_if_version) {
dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index e084ed99ec0e..677781060246 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -1195,15 +1195,16 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1534,7 +1535,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
@@ -2825,10 +2826,10 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v13_0_0_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2838,27 +2839,12 @@ static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 02a455a31c25..9e635f733fbf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -34,6 +34,7 @@
#include "amdgpu_fru_eeprom.h"
#include <linux/pci.h>
#include "smu_cmn.h"
+#include "amdgpu_ras.h"
#undef MP1_Public
#undef smnMP1_FIRMWARE_FLAGS
@@ -58,7 +59,7 @@
#define NUM_JPEG_RINGS_FW 10
#define NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics) \
- (ARRAY_SIZE(gpu_metrics->xcp_stats[0].jpeg_busy) / 4)
+ (ARRAY_SIZE(gpu_metrics->jpeg_busy) / 4)
const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] = {
SMU_13_0_12_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, FEATURE_DATA_CALCULATION),
@@ -81,9 +82,9 @@ const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[SMU_FEATURE_COUNT] =
SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MPIOCLK_BIT, FEATURE_DS_MPIOCLK),
SMU_13_0_12_FEA_MAP(SMU_FEATURE_DS_MP0CLK_BIT, FEATURE_DS_MP0CLK),
SMU_13_0_12_FEA_MAP(SMU_FEATURE_PIT_BIT, FEATURE_PIT),
+ SMU_13_0_12_FEA_MAP(SMU_FEATURE_HROM_EN_BIT, FEATURE_HROM_EN),
};
-// clang-format off
const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -106,7 +107,7 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -137,9 +138,66 @@ const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0),
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
+ MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
+ MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1),
+ MSG_MAP(GetRASTableVersion, PPSMC_MSG_GetRasTableVersion, 0),
+ MSG_MAP(GetBadPageCount, PPSMC_MSG_GetBadPageCount, 0),
+ MSG_MAP(GetBadPageMcaAddr, PPSMC_MSG_GetBadPageMcaAddress, 0),
+ MSG_MAP(SetTimestamp, PPSMC_MSG_SetTimestamp, 0),
+ MSG_MAP(GetTimestamp, PPSMC_MSG_GetTimestamp, 0),
+ MSG_MAP(GetBadPageIpid, PPSMC_MSG_GetBadPageIpIdLoHi, 0),
+ MSG_MAP(EraseRasTable, PPSMC_MSG_EraseRasTable, 0),
+ MSG_MAP(SetFastPptLimit, PPSMC_MSG_SetFastPptLimit, 1),
+ MSG_MAP(GetFastPptLimit, PPSMC_MSG_GetFastPptLimit, 1),
};
+int smu_v13_0_12_tables_init(struct smu_context *smu)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table_cache *cache;
+ int ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v13_0_12_get_system_metrics_size(), 5);
+
+ if (ret)
+ return ret;
+
+ ret = smu_table_cache_init(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS,
+ sizeof(*baseboard_temp_metrics), 50);
+ if (ret)
+ return ret;
+ /* Initialize base board temperature metrics */
+ cache = &(tables[SMU_TABLE_BASEBOARD_TEMP_METRICS].cache);
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *) cache->buffer;
+ smu_cmn_init_baseboard_temp_metrics(baseboard_temp_metrics, 1, 0);
+ /* Initialize GPU board temperature metrics */
+ ret = smu_table_cache_init(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS,
+ sizeof(*gpuboard_temp_metrics), 50);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ return ret;
+ }
+ cache = &(tables[SMU_TABLE_GPUBOARD_TEMP_METRICS].cache);
+ gpuboard_temp_metrics = (struct amdgpu_gpuboard_temp_metrics_v1_0 *)cache->buffer;
+ smu_cmn_init_gpuboard_temp_metrics(gpuboard_temp_metrics, 1, 0);
+
+ return 0;
+}
+
+void smu_v13_0_12_tables_fini(struct smu_context *smu)
+{
+ smu_table_cache_fini(smu, SMU_TABLE_BASEBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_GPUBOARD_TEMP_METRICS);
+ smu_table_cache_fini(smu, SMU_TABLE_PMFW_SYSTEM_METRICS);
+}
+
static int smu_v13_0_12_get_enabled_mask(struct smu_context *smu,
uint64_t *feature_mask)
{
@@ -187,6 +245,11 @@ int smu_v13_0_12_get_max_metrics_size(void)
return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t));
}
+size_t smu_v13_0_12_get_system_metrics_size(void)
+{
+ return sizeof(SystemMetricsTable_t);
+}
+
static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
StaticMetricsTable_t *static_metrics)
{
@@ -220,7 +283,7 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
uint32_t table_version;
- int ret, i;
+ int ret, i, n;
if (!pptable->Init) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
@@ -259,6 +322,22 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
/* use AID0 serial number by default */
pptable->PublicSerialNumber_AID =
static_metrics->PublicSerialNumber_AID[0];
+
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ static_metrics->PublicSerialNumber_AID[i]);
+ }
+ n = ARRAY_SIZE(static_metrics->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ static_metrics->PublicSerialNumber_XCD[i]);
+ }
+
ret = smu_v13_0_12_fru_get_product_info(smu, static_metrics);
if (ret)
return ret;
@@ -274,6 +353,15 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu)
static_metrics->pldmVersion[0] != 0xFFFFFFFF)
smu->adev->firmware.pldm_version =
static_metrics->pldmVersion[0];
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
+ pptable->MaxNodePowerLimit =
+ SMUQ10_ROUND(static_metrics->MaxNodePowerLimit);
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) &&
+ static_metrics->PPT1Max) {
+ pptable->PPT1Max = static_metrics->PPT1Max;
+ pptable->PPT1Min = static_metrics->PPT1Min;
+ pptable->PPT1Default = static_metrics->PPT1Default;
+ }
smu_v13_0_12_init_xgmi_data(smu, static_metrics);
pptable->Init = true;
}
@@ -359,18 +447,303 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_12_get_system_metrics_table(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_table *sys_table;
+ int ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ if (smu_table_cache_is_valid(sys_table))
+ return 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSystemMetricsTable, NULL);
+ if (ret) {
+ dev_info(smu->adev->dev,
+ "Failed to export system metrics table!\n");
+ return ret;
+ }
+
+ amdgpu_hdp_invalidate(smu->adev, NULL);
+ smu_table_cache_update_time(sys_table, jiffies);
+ memcpy(sys_table->cache.buffer, table->cpu_addr,
+ smu_v13_0_12_get_system_metrics_size());
+
+ return 0;
+}
+
+static enum amdgpu_node_temp smu_v13_0_12_get_node_sensor_type(NODE_TEMP_e type)
+{
+ switch (type) {
+ case NODE_TEMP_RETIMER:
+ return AMDGPU_RETIMER_X_TEMP;
+ case NODE_TEMP_IBC_TEMP:
+ return AMDGPU_OAM_X_IBC_TEMP;
+ case NODE_TEMP_IBC_2_TEMP:
+ return AMDGPU_OAM_X_IBC_2_TEMP;
+ case NODE_TEMP_VDD18_VR_TEMP:
+ return AMDGPU_OAM_X_VDD18_VR_TEMP;
+ case NODE_TEMP_04_HBM_B_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_B_VR_TEMP;
+ case NODE_TEMP_04_HBM_D_VR_TEMP:
+ return AMDGPU_OAM_X_04_HBM_D_VR_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_vr_temp smu_v13_0_12_get_vr_sensor_type(SVI_TEMP_e type)
+{
+ switch (type) {
+ case SVI_VDDCR_VDD0_TEMP:
+ return AMDGPU_VDDCR_VDD0_TEMP;
+ case SVI_VDDCR_VDD1_TEMP:
+ return AMDGPU_VDDCR_VDD1_TEMP;
+ case SVI_VDDCR_VDD2_TEMP:
+ return AMDGPU_VDDCR_VDD2_TEMP;
+ case SVI_VDDCR_VDD3_TEMP:
+ return AMDGPU_VDDCR_VDD3_TEMP;
+ case SVI_VDDCR_SOC_A_TEMP:
+ return AMDGPU_VDDCR_SOC_A_TEMP;
+ case SVI_VDDCR_SOC_C_TEMP:
+ return AMDGPU_VDDCR_SOC_C_TEMP;
+ case SVI_VDDCR_SOCIO_A_TEMP:
+ return AMDGPU_VDDCR_SOCIO_A_TEMP;
+ case SVI_VDDCR_SOCIO_C_TEMP:
+ return AMDGPU_VDDCR_SOCIO_C_TEMP;
+ case SVI_VDD_085_HBM_TEMP:
+ return AMDGPU_VDD_085_HBM_TEMP;
+ case SVI_VDDCR_11_HBM_B_TEMP:
+ return AMDGPU_VDDCR_11_HBM_B_TEMP;
+ case SVI_VDDCR_11_HBM_D_TEMP:
+ return AMDGPU_VDDCR_11_HBM_D_TEMP;
+ case SVI_VDD_USR_TEMP:
+ return AMDGPU_VDD_USR_TEMP;
+ case SVI_VDDIO_11_E32_TEMP:
+ return AMDGPU_VDDIO_11_E32_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static enum amdgpu_system_temp smu_v13_0_12_get_system_sensor_type(SYSTEM_TEMP_e type)
+{
+ switch (type) {
+ case SYSTEM_TEMP_UBB_FPGA:
+ return AMDGPU_UBB_FPGA_TEMP;
+ case SYSTEM_TEMP_UBB_FRONT:
+ return AMDGPU_UBB_FRONT_TEMP;
+ case SYSTEM_TEMP_UBB_BACK:
+ return AMDGPU_UBB_BACK_TEMP;
+ case SYSTEM_TEMP_UBB_OAM7:
+ return AMDGPU_UBB_OAM7_TEMP;
+ case SYSTEM_TEMP_UBB_IBC:
+ return AMDGPU_UBB_IBC_TEMP;
+ case SYSTEM_TEMP_UBB_UFPGA:
+ return AMDGPU_UBB_UFPGA_TEMP;
+ case SYSTEM_TEMP_UBB_OAM1:
+ return AMDGPU_UBB_OAM1_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_HSC:
+ return AMDGPU_OAM_0_1_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_2_3_HSC:
+ return AMDGPU_OAM_2_3_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_HSC:
+ return AMDGPU_OAM_4_5_HSC_TEMP;
+ case SYSTEM_TEMP_OAM_6_7_HSC:
+ return AMDGPU_OAM_6_7_HSC_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_0V72_VR:
+ return AMDGPU_UBB_FPGA_0V72_VR_TEMP;
+ case SYSTEM_TEMP_UBB_FPGA_3V3_VR:
+ return AMDGPU_UBB_FPGA_3V3_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_2_3_1V2_VR:
+ return AMDGPU_RETIMER_0_1_2_3_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_6_7_1V2_VR:
+ return AMDGPU_RETIMER_4_5_6_7_1V2_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_0_1_0V9_VR:
+ return AMDGPU_RETIMER_0_1_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_4_5_0V9_VR:
+ return AMDGPU_RETIMER_4_5_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_2_3_0V9_VR:
+ return AMDGPU_RETIMER_2_3_0V9_VR_TEMP;
+ case SYSTEM_TEMP_RETIMER_6_7_0V9_VR:
+ return AMDGPU_RETIMER_6_7_0V9_VR_TEMP;
+ case SYSTEM_TEMP_OAM_0_1_2_3_3V3_VR:
+ return AMDGPU_OAM_0_1_2_3_3V3_VR_TEMP;
+ case SYSTEM_TEMP_OAM_4_5_6_7_3V3_VR:
+ return AMDGPU_OAM_4_5_6_7_3V3_VR_TEMP;
+ case SYSTEM_TEMP_IBC_HSC:
+ return AMDGPU_IBC_HSC_TEMP;
+ case SYSTEM_TEMP_IBC:
+ return AMDGPU_IBC_TEMP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static bool smu_v13_0_12_is_temp_metrics_supported(struct smu_context *smu,
+ enum smu_temp_metric_type type)
+{
+ switch (type) {
+ case SMU_TEMP_METRIC_BASEBOARD:
+ if (smu->adev->gmc.xgmi.physical_node_id == 0 &&
+ smu->adev->gmc.xgmi.num_physical_nodes > 1 &&
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS)))
+ return true;
+ break;
+ case SMU_TEMP_METRIC_GPUBOARD:
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(TEMP_METRICS));
+ default:
+ break;
+ }
+
+ return false;
+}
+
+int smu_v13_0_12_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *sys_table;
+ int ret;
+
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS)))
+ return -EOPNOTSUPP;
+
+ if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) {
+ *value = pptable->MaxNodePowerLimit;
+ return 0;
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ *value = SMUQ10_ROUND(metrics->NodePowerLimit);
+ break;
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ *value = SMUQ10_ROUND(metrics->NodePower);
+ break;
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu,
+ enum smu_temp_metric_type type, void *table)
+{
+ struct amdgpu_baseboard_temp_metrics_v1_0 *baseboard_temp_metrics;
+ struct amdgpu_gpuboard_temp_metrics_v1_0 *gpuboard_temp_metrics;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ SystemMetricsTable_t *metrics;
+ struct smu_table *data_table;
+ struct smu_table *sys_table;
+ int ret, sensor_type;
+ u32 idx, sensors;
+ ssize_t size;
+
+ if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ /* Initialize base board temperature metrics */
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_BASEBOARD_TEMP_METRICS];
+ baseboard_temp_metrics =
+ (struct amdgpu_baseboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ } else {
+ data_table =
+ &smu->smu_table.tables[SMU_TABLE_GPUBOARD_TEMP_METRICS];
+ gpuboard_temp_metrics =
+ (struct amdgpu_gpuboard_temp_metrics_v1_0 *)
+ data_table->cache.buffer;
+ size = sizeof(*baseboard_temp_metrics);
+ }
+
+ ret = smu_v13_0_12_get_system_metrics_table(smu);
+ if (ret)
+ return ret;
+
+ sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS];
+ metrics = (SystemMetricsTable_t *)sys_table->cache.buffer;
+ smu_table_cache_update_time(data_table, jiffies);
+
+ if (type == SMU_TEMP_METRIC_GPUBOARD) {
+ gpuboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ gpuboard_temp_metrics->label_version = metrics->LabelVersion;
+ gpuboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < NODE_TEMP_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->NodeTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_node_sensor_type(sensors);
+ gpuboard_temp_metrics->node_temp[idx] =
+ ((int)metrics->NodeTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->node_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+
+ idx = 0;
+
+ for (sensors = 0; sensors < SVI_MAX_TEMP_ENTRIES; sensors++) {
+ if (metrics->VrTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_vr_sensor_type(sensors);
+ gpuboard_temp_metrics->vr_temp[idx] =
+ ((int)metrics->VrTemperatures[sensors]) & 0xFFFFFF;
+ gpuboard_temp_metrics->vr_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ } else if (type == SMU_TEMP_METRIC_BASEBOARD) {
+ baseboard_temp_metrics->accumulation_counter = metrics->AccumulationCounter;
+ baseboard_temp_metrics->label_version = metrics->LabelVersion;
+ baseboard_temp_metrics->node_id = metrics->NodeIdentifier;
+
+ idx = 0;
+ for (sensors = 0; sensors < SYSTEM_TEMP_MAX_ENTRIES; sensors++) {
+ if (metrics->SystemTemperatures[sensors] != -1) {
+ sensor_type = smu_v13_0_12_get_system_sensor_type(sensors);
+ baseboard_temp_metrics->system_temp[idx] =
+ ((int)metrics->SystemTemperatures[sensors]) & 0xFFFFFF;
+ baseboard_temp_metrics->system_temp[idx] |= (sensor_type << 24);
+ idx++;
+ }
+ }
+ }
+
+ memcpy(table, data_table->cache.buffer, size);
+
+ return size;
+}
+
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics)
{
const u8 num_jpeg_rings = NUM_JPEG_RINGS_FW;
- struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct smu_v13_0_6_partition_metrics *xcp_metrics;
struct amdgpu_device *adev = smu->adev;
MetricsTable_t *metrics;
int inst, j, k, idx;
u32 inst_mask;
metrics = (MetricsTable_t *)smu_metrics;
- xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *) table;
- smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+ xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
idx = 0;
for_each_inst(k, inst_mask) {
@@ -415,22 +788,17 @@ ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp
return sizeof(*xcp_metrics);
}
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics)
+void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
+ void *smu_metrics,
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics)
{
- struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_8 *gpu_metrics =
- (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
- int ret = 0, xcc_id, inst, i, j, k, idx;
struct amdgpu_device *adev = smu->adev;
+ int ret = 0, xcc_id, inst, i, j;
u8 num_jpeg_rings_gpu_metrics;
MetricsTable_t *metrics;
- struct amdgpu_xcp *xcp;
- u32 inst_mask;
metrics = (MetricsTable_t *)smu_metrics;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
-
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(metrics->MaxSocketTemperature);
/* Individual HBM stack temperature is not reported */
@@ -520,55 +888,186 @@ ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void
gpu_metrics->xgmi_link_status[j] = ret;
}
- gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
-
num_jpeg_rings_gpu_metrics = NUM_JPEG_RINGS_GPU_METRICS(gpu_metrics);
- for_each_xcp(adev->xcp_mgr, xcp, i) {
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- /* Both JPEG and VCN has same instances */
- inst = GET_INST(VCN, k);
-
- for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) {
- gpu_metrics->xcp_stats[i].jpeg_busy
- [(idx * num_jpeg_rings_gpu_metrics) + j] =
- SMUQ10_ROUND(metrics->JpegBusy
- [(inst * NUM_JPEG_RINGS_FW) + j]);
- }
- gpu_metrics->xcp_stats[i].vcn_busy[idx] =
- SMUQ10_ROUND(metrics->VcnBusy[inst]);
- idx++;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+
+ for (j = 0; j < num_jpeg_rings_gpu_metrics; ++j) {
+ gpu_metrics->jpeg_busy[(i * num_jpeg_rings_gpu_metrics) +
+ j] =
+ SMUQ10_ROUND(
+ metrics->JpegBusy[(inst *
+ NUM_JPEG_RINGS_FW) +
+ j]);
}
+ gpu_metrics->vcn_busy[i] = SMUQ10_ROUND(metrics->VcnBusy[inst]);
+ }
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- inst = GET_INST(GC, k);
- gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
- SMUQ10_ROUND(metrics->GfxBusy[inst]);
- gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
- SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
- SMUQ10_ROUND(metrics->GfxclkBelowHostLimitPptAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
- SMUQ10_ROUND(metrics->GfxclkBelowHostLimitThmAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
- SMUQ10_ROUND(metrics->GfxclkLowUtilizationAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
- SMUQ10_ROUND(metrics->GfxclkBelowHostLimitTotalAcc[inst]);
- }
- idx++;
- }
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ inst = GET_INST(GC, i);
+ gpu_metrics->gfx_busy_inst[i] =
+ SMUQ10_ROUND(metrics->GfxBusy[inst]);
+ gpu_metrics->gfx_busy_acc[i] =
+ SMUQ10_ROUND(metrics->GfxBusyAcc[inst]);
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics
+ ->gfx_below_host_limit_ppt_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitPptAcc[inst]);
+ gpu_metrics
+ ->gfx_below_host_limit_thm_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitThmAcc[inst]);
+ gpu_metrics->gfx_low_utilization_acc[i] = SMUQ10_ROUND(
+ metrics->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_total_acc
+ [i] = SMUQ10_ROUND(
+ metrics->GfxclkBelowHostLimitTotalAcc[inst]);
+ };
}
gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
gpu_metrics->xgmi_link_speed = metrics->XgmiBitrate;
gpu_metrics->firmware_timestamp = metrics->Timestamp;
+}
- *table = (void *)gpu_metrics;
+const struct smu_temp_funcs smu_v13_0_12_temp_funcs = {
+ .temp_metrics_is_supported = smu_v13_0_12_is_temp_metrics_supported,
+ .get_temp_metrics = smu_v13_0_12_get_temp_metrics,
+};
- return sizeof(*gpu_metrics);
+static int smu_v13_0_12_get_ras_table_version(struct amdgpu_device *adev,
+ uint32_t *table_version)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetRASTableVersion, 0, table_version);
+}
+
+static int smu_v13_0_12_get_badpage_count(struct amdgpu_device *adev, uint32_t *count,
+ uint32_t timeout)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint64_t end, now;
+ int ret = 0;
+
+ now = (uint64_t)ktime_to_ms(ktime_get());
+ end = now + timeout;
+ do {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageCount, 0, count);
+ /* eeprom is not ready */
+ if (ret != -EBUSY)
+ return ret;
+ mdelay(10);
+ now = (uint64_t)ktime_to_ms(ktime_get());
+ } while (now < end);
+
+ dev_err(adev->dev,
+ "smu get bad page count timeout!\n");
+ return ret;
}
+
+static int smu_v13_0_12_set_timestamp(struct amdgpu_device *adev, uint64_t timestamp)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetTimestamp, (uint32_t)timestamp, 0);
+}
+
+static int smu_v13_0_12_get_timestamp(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *timestamp)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp;
+ int ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetTimestamp, index, &temp);
+ if (!ret)
+ *timestamp = temp;
+
+ return ret;
+}
+
+static int smu_v13_0_12_get_badpage_ipid(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *ipid)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp_arg, temp_ipid_lo, temp_ipid_high;
+ int ret;
+
+ temp_arg = index | (1 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_lo);
+ if (ret)
+ return ret;
+
+ temp_arg = index | (2 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageIpid, temp_arg, &temp_ipid_high);
+ if (!ret)
+ *ipid = (uint64_t)temp_ipid_high << 32 | temp_ipid_lo;
+ return ret;
+}
+
+static int smu_v13_0_12_erase_ras_table(struct amdgpu_device *adev,
+ uint32_t *result)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_EraseRasTable, 0, result);
+}
+
+static int smu_v13_0_12_get_badpage_mca_addr(struct amdgpu_device *adev,
+ uint16_t index, uint64_t *mca_addr)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ uint32_t temp_arg, temp_addr_lo, temp_addr_high;
+ int ret;
+
+ temp_arg = index | (1 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_lo);
+ if (ret)
+ return ret;
+
+ temp_arg = index | (2 << 16);
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetBadPageMcaAddr, temp_arg, &temp_addr_high);
+ if (!ret)
+ *mca_addr = (uint64_t)temp_addr_high << 32 | temp_addr_lo;
+ return ret;
+}
+
+static const struct ras_eeprom_smu_funcs smu_v13_0_12_eeprom_smu_funcs = {
+ .get_ras_table_version = smu_v13_0_12_get_ras_table_version,
+ .get_badpage_count = smu_v13_0_12_get_badpage_count,
+ .get_badpage_mca_addr = smu_v13_0_12_get_badpage_mca_addr,
+ .set_timestamp = smu_v13_0_12_set_timestamp,
+ .get_timestamp = smu_v13_0_12_get_timestamp,
+ .get_badpage_ipid = smu_v13_0_12_get_badpage_ipid,
+ .erase_ras_table = smu_v13_0_12_erase_ras_table,
+};
+
+static void smu_v13_0_12_ras_smu_feature_flags(struct amdgpu_device *adev, uint64_t *flags)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if (!flags)
+ return;
+
+ *flags = 0ULL;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(RAS_EEPROM)))
+ *flags |= RAS_SMU_FEATURE_BIT__RAS_EEPROM;
+
+}
+
+const struct ras_smu_drv smu_v13_0_12_ras_smu_drv = {
+ .smu_eeprom_funcs = &smu_v13_0_12_eeprom_smu_funcs,
+ .ras_smu_feature_flags = smu_v13_0_12_ras_smu_feature_flags,
+};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index b081ae3e8f43..6908f9930f16 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -497,11 +497,12 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -565,7 +566,7 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_4_read_sensor(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index f5db181ef489..4576bf008b22 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -861,11 +861,12 @@ out:
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min = 0, max = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -928,7 +929,7 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 9cc294f4708b..44e1cd821eec 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -143,9 +143,9 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1),
- MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
+ MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1),
- MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
@@ -177,7 +177,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0),
MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0),
MSG_MAP(ResetVCN, PPSMC_MSG_ResetVCN, 0),
- MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 0),
+ MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1),
};
// clang-format on
@@ -312,6 +312,8 @@ static void smu_v13_0_14_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
if (fw_ver >= 0x5551200)
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+ if (fw_ver >= 0x5551800)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
if (fw_ver >= 0x5551600) {
smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
@@ -350,6 +352,23 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
+
+ if (fw_ver > 0x04560900)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
+
+ if (fw_ver >= 0x04560D00)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(FAST_PPT));
+
+ if (fw_ver >= 0x04560700) {
+ if (fw_ver >= 0x04560900) {
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ if (smu->adev->gmc.xgmi.physical_node_id == 0)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(NPM_METRICS));
+ } else if (!amdgpu_sriov_vf(smu->adev))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS));
+ } else {
+ smu_v13_0_12_tables_fini(smu);
+ }
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
@@ -402,19 +421,41 @@ static void smu_v13_0_6_init_caps(struct smu_context *smu)
if ((pgm == 7 && fw_ver >= 0x7550E00) ||
(pgm == 0 && fw_ver >= 0x00557E00))
smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
- if ((pgm == 0 && fw_ver >= 0x00557F01) ||
- (pgm == 7 && fw_ver >= 0x7551000)) {
- smu_v13_0_6_cap_set(smu, SMU_CAP(STATIC_METRICS));
- smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE));
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (fw_ver >= 0x00558200)
+ amdgpu_virt_attr_set(&adev->virt.virt_caps,
+ AMDGPU_VIRT_CAP_POWER_LIMIT,
+ AMDGPU_CAP_ATTR_RW);
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
+ }
+ } else {
+ if ((pgm == 0 && fw_ver >= 0x00557F01) ||
+ (pgm == 7 && fw_ver >= 0x7551000)) {
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(STATIC_METRICS));
+ smu_v13_0_6_cap_set(smu,
+ SMU_CAP(BOARD_VOLTAGE));
+ }
+ if ((pgm == 0 && fw_ver >= 0x00558000) ||
+ (pgm == 7 && fw_ver >= 0x7551000))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
- if ((pgm == 0 && fw_ver >= 0x00558000) ||
- (pgm == 7 && fw_ver >= 0x7551000))
- smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION));
}
if (((pgm == 7) && (fw_ver >= 0x7550700)) ||
((pgm == 0) && (fw_ver >= 0x00557900)) ||
((pgm == 4) && (fw_ver >= 0x4557000)))
smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
+
+ if ((pgm == 0 && fw_ver >= 0x00558200) ||
+ (pgm == 7 && fw_ver >= 0x07551400))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(VCN_RESET));
}
static void smu_v13_0_x_init_caps(struct smu_context *smu)
@@ -511,8 +552,12 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics;
+ void *driver_pptable __free(kfree) = NULL;
+ void *metrics_table __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int gpu_metrcs_size = METRICS_TABLE_SIZE;
+ int ret;
if (!(adev->flags & AMD_IS_APU))
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
@@ -528,27 +573,40 @@ static int smu_v13_0_6_tables_init(struct smu_context *smu)
PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
- smu_table->metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
- if (!smu_table->metrics_table)
+ SMU_TABLE_INIT(tables, SMU_TABLE_PMFW_SYSTEM_METRICS,
+ smu_v13_0_12_get_system_metrics_size(), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
+
+ metrics_table = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
+ if (!metrics_table)
return -ENOMEM;
smu_table->metrics_time = 0;
- smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_8);
- smu_table->gpu_metrics_table =
- kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
- if (!smu_table->gpu_metrics_table) {
- kfree(smu_table->metrics_table);
+ driver_pptable = kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
+ if (!driver_pptable)
return -ENOMEM;
- }
- smu_table->driver_pptable =
- kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
- if (!smu_table->driver_pptable) {
- kfree(smu_table->metrics_table);
- kfree(smu_table->gpu_metrics_table);
- return -ENOMEM;
+ ret = smu_table_cache_init(smu, SMU_TABLE_SMU_METRICS,
+ sizeof(struct smu_v13_0_6_gpu_metrics), 1);
+ if (ret)
+ return ret;
+
+ gpu_metrics = (struct smu_v13_0_6_gpu_metrics
+ *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer);
+
+ smu_v13_0_6_gpu_metrics_init(gpu_metrics, 1, 9);
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 12)) {
+ ret = smu_v13_0_12_tables_init(smu);
+ if (ret) {
+ smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS);
+ return ret;
+ }
}
+ smu_table->metrics_table = no_free_ptr(metrics_table);
+ smu_table->driver_pptable = no_free_ptr(driver_pptable);
+
return 0;
}
@@ -677,6 +735,14 @@ static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_6_fini_smc_tables(struct smu_context *smu)
+{
+ if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12))
+ smu_v13_0_12_tables_fini(smu);
+ smu_table_cache_fini(smu, SMU_TABLE_SMU_METRICS);
+ return smu_v13_0_fini_smc_tables(smu);
+}
+
static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
uint32_t *feature_mask,
uint32_t num)
@@ -708,7 +774,7 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
return ret;
}
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ amdgpu_hdp_invalidate(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
smu_table->metrics_time = jiffies;
@@ -787,12 +853,23 @@ int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu)
return ret;
}
- amdgpu_asic_invalidate_hdp(smu->adev, NULL);
+ amdgpu_hdp_invalidate(smu->adev, NULL);
memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
return 0;
}
+static void smu_v13_0_6_update_caps(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+
+ if (smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)) &&
+ !pptable->PPT1Max)
+ smu_v13_0_6_cap_clear(smu, SMU_CAP(FAST_PPT));
+}
+
static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -803,14 +880,18 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
struct PPTable_t *pptable =
(struct PPTable_t *)smu_table->driver_pptable;
int version = smu_v13_0_6_get_metrics_version(smu);
- int ret, i, retry = 100;
+ int ret, i, retry = 100, n;
uint32_t table_version;
uint16_t max_speed;
uint8_t max_width;
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
- return smu_v13_0_12_setup_driver_pptable(smu);
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
+ ret = smu_v13_0_12_setup_driver_pptable(smu);
+ if (ret)
+ return ret;
+ goto out;
+ }
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
@@ -865,6 +946,23 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
pptable->PublicSerialNumber_AID =
GET_METRIC_FIELD(PublicSerialNumber_AID, version)[0];
+ amdgpu_device_set_uid(smu->adev->uid_info, AMDGPU_UID_TYPE_SOC,
+ 0, pptable->PublicSerialNumber_AID);
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_AID);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_AID, i,
+ GET_METRIC_FIELD(PublicSerialNumber_AID,
+ version)[i]);
+ }
+ n = ARRAY_SIZE(metrics_v0->PublicSerialNumber_XCD);
+ for (i = 0; i < n; i++) {
+ amdgpu_device_set_uid(
+ smu->adev->uid_info, AMDGPU_UID_TYPE_XCD, i,
+ GET_METRIC_FIELD(PublicSerialNumber_XCD,
+ version)[i]);
+ }
+
pptable->Init = true;
if (smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
ret = smu_v13_0_6_get_static_metrics_table(smu);
@@ -873,7 +971,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
smu_v13_0_6_fill_static_metrics_table(smu, static_metrics);
}
}
-
+out:
+ smu_v13_0_6_update_caps(smu);
return 0;
}
@@ -1319,7 +1418,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
return -EINVAL;
if (curr_clk < SMU_13_0_6_DSCLK_THRESHOLD) {
- size = sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk);
+ size += sysfs_emit_at(buf, size, "S: %uMhz *\n", curr_clk);
for (i = 0; i < clocks.num_levels; i++)
size += sysfs_emit_at(buf, size, "%d: %uMhz\n", i,
clocks.data[i].clocks_in_khz /
@@ -1354,7 +1453,7 @@ static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
enum smu_clk_type type, char *buf)
{
- int now, size = 0;
+ int now, size = 0, start_offset = 0;
int ret = 0;
struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
struct smu_13_0_dpm_table *single_dpm_table;
@@ -1363,10 +1462,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
uint32_t min_clk, max_clk;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
dpm_context = smu_dpm->dpm_context;
@@ -1438,9 +1538,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "mclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "mclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_SOCCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
&now);
@@ -1452,9 +1556,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.soc_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "socclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "socclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_FCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
&now);
@@ -1466,9 +1574,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "fclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "fclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_VCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
&now);
@@ -1480,9 +1592,13 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "vclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "vclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
case SMU_DCLK:
ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
&now);
@@ -1494,14 +1610,18 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
- return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
- now, "dclk");
+ ret = smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
+ now, "dclk");
+ if (ret < 0)
+ return ret;
+ size += ret;
+ break;
default:
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
@@ -1731,6 +1851,15 @@ static int smu_v13_0_6_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
+ case AMDGPU_PP_SENSOR_NODEPOWERLIMIT:
+ case AMDGPU_PP_SENSOR_NODEPOWER:
+ case AMDGPU_PP_SENSOR_GPPTRESIDENCY:
+ case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT:
+ ret = smu_v13_0_12_get_npm_data(smu, sensor, (uint32_t *)data);
+ if (ret)
+ return ret;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
default:
ret = -EOPNOTSUPP;
@@ -1762,7 +1891,7 @@ static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
if (current_power_limit)
*current_power_limit = power_limit;
if (default_power_limit)
- *default_power_limit = power_limit;
+ *default_power_limit = pptable->MaxSocketPowerLimit;
if (max_power_limit) {
*max_power_limit = pptable->MaxSocketPowerLimit;
@@ -1777,9 +1906,66 @@ static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
enum smu_ppt_limit_type limit_type,
uint32_t limit)
{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret;
+
+ if (limit_type == SMU_FAST_PPT_LIMIT) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)))
+ return -EOPNOTSUPP;
+ if (limit > pptable->PPT1Max || limit < pptable->PPT1Min) {
+ dev_err(smu->adev->dev,
+ "New power limit (%d) should be between min %d max %d\n",
+ limit, pptable->PPT1Min, pptable->PPT1Max);
+ return -EINVAL;
+ }
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetFastPptLimit,
+ limit, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Set fast PPT limit failed!\n");
+ return ret;
+ }
+
return smu_v13_0_set_power_limit(smu, limit_type, limit);
}
+static int smu_v13_0_6_get_ppt_limit(struct smu_context *smu,
+ uint32_t *ppt_limit,
+ enum smu_ppt_limit_type type,
+ enum smu_ppt_limit_level level)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct PPTable_t *pptable =
+ (struct PPTable_t *)smu_table->driver_pptable;
+ int ret = 0;
+
+ if (type == SMU_FAST_PPT_LIMIT) {
+ if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(FAST_PPT)))
+ return -EOPNOTSUPP;
+ switch (level) {
+ case SMU_PPT_LIMIT_MAX:
+ *ppt_limit = pptable->PPT1Max;
+ break;
+ case SMU_PPT_LIMIT_CURRENT:
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPptLimit, ppt_limit);
+ if (ret)
+ dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
+ break;
+ case SMU_PPT_LIMIT_DEFAULT:
+ *ppt_limit = pptable->PPT1Default;
+ break;
+ case SMU_PPT_LIMIT_MIN:
+ *ppt_limit = pptable->PPT1Min;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+ }
+ return -EOPNOTSUPP;
+}
+
static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -2300,7 +2486,7 @@ static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
memcpy(table->cpu_addr, table_data, table_size);
/* Flush hdp cache */
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
NULL);
@@ -2426,10 +2612,10 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v13_0_6_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2437,27 +2623,12 @@ static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
@@ -2559,10 +2730,10 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
{
const u8 num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
int version = smu_v13_0_6_get_metrics_version(smu);
- struct amdgpu_partition_metrics_v1_0 *xcp_metrics;
+ struct smu_v13_0_6_partition_metrics *xcp_metrics;
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
int ret, inst, i, j, k, idx;
- MetricsTableV0_t *metrics_v0;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
struct amdgpu_xcp *xcp;
@@ -2579,25 +2750,22 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
if (i == adev->xcp_mgr->num_xcps)
return -EINVAL;
- xcp_metrics = (struct amdgpu_partition_metrics_v1_0 *)table;
- smu_cmn_init_partition_metrics(xcp_metrics, 1, 0);
+ xcp_metrics = (struct smu_v13_0_6_partition_metrics *)table;
+ smu_v13_0_6_partition_metrics_init(xcp_metrics, 1, 1);
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
if (!metrics_v0)
return -ENOMEM;
ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
- if (ret) {
- kfree(metrics_v0);
+ if (ret)
return ret;
- }
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) ==
IP_VERSION(13, 0, 12) &&
- smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
- ret = smu_v13_0_12_get_xcp_metrics(smu, xcp, table, metrics_v0);
- goto out;
- }
+ smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS)))
+ return smu_v13_0_12_get_xcp_metrics(smu, xcp, table,
+ metrics_v0);
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
@@ -2668,8 +2836,6 @@ static ssize_t smu_v13_0_6_get_xcp_metrics(struct smu_context *smu, int xcp_id,
idx++;
}
}
-out:
- kfree(metrics_v0);
return sizeof(*xcp_metrics);
}
@@ -2677,40 +2843,37 @@ out:
static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
- struct gpu_metrics_v1_8 *gpu_metrics =
- (struct gpu_metrics_v1_8 *)smu_table->gpu_metrics_table;
+ struct smu_table *tables = smu_table->tables;
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics;
int version = smu_v13_0_6_get_metrics_version(smu);
- int ret = 0, xcc_id, inst, i, j, k, idx;
+ MetricsTableV0_t *metrics_v0 __free(kfree) = NULL;
struct amdgpu_device *adev = smu->adev;
- MetricsTableV0_t *metrics_v0;
+ int ret = 0, xcc_id, inst, i, j;
MetricsTableV1_t *metrics_v1;
MetricsTableV2_t *metrics_v2;
- struct amdgpu_xcp *xcp;
u16 link_width_level;
- ssize_t num_bytes;
u8 num_jpeg_rings;
- u32 inst_mask;
bool per_inst;
metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL);
ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false);
- if (ret) {
- kfree(metrics_v0);
+ if (ret)
return ret;
- }
+
+ metrics_v2 = (MetricsTableV2_t *)metrics_v0;
+ gpu_metrics = (struct smu_v13_0_6_gpu_metrics
+ *)(tables[SMU_TABLE_SMU_METRICS].cache.buffer);
if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) &&
smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) {
- num_bytes = smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0);
- kfree(metrics_v0);
- return num_bytes;
+ smu_v13_0_12_get_gpu_metrics(smu, table, metrics_v0,
+ gpu_metrics);
+ goto fill;
}
metrics_v1 = (MetricsTableV1_t *)metrics_v0;
metrics_v2 = (MetricsTableV2_t *)metrics_v0;
- smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 8);
-
gpu_metrics->temperature_hotspot =
SMUQ10_ROUND(GET_METRIC_FIELD(MaxSocketTemperature, version));
/* Individual HBM stack temperature is not reported */
@@ -2831,55 +2994,49 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->xgmi_link_status[j] = ret;
}
- gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;
-
per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));
num_jpeg_rings = AMDGPU_MAX_JPEG_RINGS_4_0_3;
- for_each_xcp(adev->xcp_mgr, xcp, i) {
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- /* Both JPEG and VCN has same instances */
- inst = GET_INST(VCN, k);
-
- for (j = 0; j < num_jpeg_rings; ++j) {
- gpu_metrics->xcp_stats[i].jpeg_busy
- [(idx * num_jpeg_rings) + j] =
- SMUQ10_ROUND(GET_METRIC_FIELD(JpegBusy, version)
- [(inst * num_jpeg_rings) + j]);
- }
- gpu_metrics->xcp_stats[i].vcn_busy[idx] =
- SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
- idx++;
-
- }
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ inst = GET_INST(JPEG, i);
+ for (j = 0; j < num_jpeg_rings; ++j)
+ gpu_metrics->jpeg_busy[(i * num_jpeg_rings) + j] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(
+ JpegBusy,
+ version)[(inst * num_jpeg_rings) + j]);
+ }
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ inst = GET_INST(VCN, i);
+ gpu_metrics->vcn_busy[i] =
+ SMUQ10_ROUND(GET_METRIC_FIELD(VcnBusy, version)[inst]);
+ }
- if (per_inst) {
- amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
- idx = 0;
- for_each_inst(k, inst_mask) {
- inst = GET_INST(GC, k);
- gpu_metrics->xcp_stats[i].gfx_busy_inst[idx] =
- SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
- gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
- SMUQ10_ROUND(GET_GPU_METRIC_FIELD(GfxBusyAcc,
- version)[inst]);
- if (smu_v13_0_6_cap_supported(smu, SMU_CAP(HST_LIMIT_METRICS))) {
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_ppt_acc[idx] =
- SMUQ10_ROUND
- (metrics_v0->GfxclkBelowHostLimitPptAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_thm_acc[idx] =
- SMUQ10_ROUND
- (metrics_v0->GfxclkBelowHostLimitThmAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_low_utilization_acc[idx] =
- SMUQ10_ROUND
- (metrics_v0->GfxclkLowUtilizationAcc[inst]);
- gpu_metrics->xcp_stats[i].gfx_below_host_limit_total_acc[idx] =
- SMUQ10_ROUND
- (metrics_v0->GfxclkBelowHostLimitTotalAcc[inst]);
- }
- idx++;
+ if (per_inst) {
+ for (i = 0; i < NUM_XCC(adev->gfx.xcc_mask); ++i) {
+ inst = GET_INST(GC, i);
+ gpu_metrics->gfx_busy_inst[i] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusy, version)[inst]);
+ gpu_metrics->gfx_busy_acc[i] = SMUQ10_ROUND(
+ GET_GPU_METRIC_FIELD(GfxBusyAcc,
+ version)[inst]);
+ if (smu_v13_0_6_cap_supported(
+ smu, SMU_CAP(HST_LIMIT_METRICS))) {
+ gpu_metrics->gfx_below_host_limit_ppt_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitPptAcc
+ [inst]);
+ gpu_metrics->gfx_below_host_limit_thm_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitThmAcc
+ [inst]);
+ gpu_metrics->gfx_low_utilization_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0
+ ->GfxclkLowUtilizationAcc[inst]);
+ gpu_metrics->gfx_below_host_limit_total_acc
+ [i] = SMUQ10_ROUND(
+ metrics_v0->GfxclkBelowHostLimitTotalAcc
+ [inst]);
}
}
}
@@ -2889,8 +3046,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->firmware_timestamp = GET_METRIC_FIELD(Timestamp, version);
- *table = (void *)gpu_metrics;
- kfree(metrics_v0);
+fill:
+ *table = tables[SMU_TABLE_SMU_METRICS].cache.buffer;
return sizeof(*gpu_metrics);
}
@@ -3076,7 +3233,7 @@ static inline bool smu_v13_0_6_is_link_reset_supported(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int var = (adev->pdev->device & 0xF);
- if (var == 0x1)
+ if (var == 0x0 || var == 0x1 || var == 0x3)
return true;
return false;
@@ -3152,6 +3309,11 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static bool smu_v13_0_6_reset_vcn_is_supported(struct smu_context *smu)
+{
+ return smu_v13_0_6_cap_supported(smu, SMU_CAP(VCN_RESET));
+}
+
static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
{
int ret = 0;
@@ -3164,6 +3326,38 @@ static int smu_v13_0_6_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
return ret;
}
+static int smu_v13_0_6_ras_send_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t param, uint32_t *read_arg)
+{
+ int ret;
+
+ switch (msg) {
+ case SMU_MSG_QueryValidMcaCount:
+ case SMU_MSG_QueryValidMcaCeCount:
+ case SMU_MSG_McaBankDumpDW:
+ case SMU_MSG_McaBankCeDumpDW:
+ case SMU_MSG_ClearMcaOnRead:
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg, param, read_arg);
+ break;
+ default:
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_6_post_init(struct smu_context *smu)
+{
+ if (smu_v13_0_6_is_link_reset_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__LINK_RESET);
+
+ if (smu_v13_0_6_reset_sdma_is_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__SDMA_RESET);
+
+ if (smu_v13_0_6_reset_vcn_is_supported(smu))
+ smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__VCN_RESET);
+
+ return 0;
+}
static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
{
@@ -3781,6 +3975,35 @@ static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = {
.parse_error_code = aca_smu_parse_error_code,
};
+static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu)
+{
+ smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)
+ == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL;
+}
+
+static int smu_v13_0_6_get_ras_smu_drv(struct smu_context *smu, const struct ras_smu_drv **ras_smu_drv)
+{
+ if (!ras_smu_drv)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(smu->adev))
+ return -EOPNOTSUPP;
+
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_HROM_EN_BIT))
+ smu_v13_0_6_cap_set(smu, SMU_CAP(RAS_EEPROM));
+
+ switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 12):
+ *ras_smu_drv = &smu_v13_0_12_ras_smu_drv;
+ break;
+ default:
+ *ras_smu_drv = NULL;
+ break;
+ }
+
+ return 0;
+}
+
static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
@@ -3797,7 +4020,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.init_microcode = smu_v13_0_6_init_microcode,
.fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_6_init_smc_tables,
- .fini_smc_tables = smu_v13_0_fini_smc_tables,
+ .fini_smc_tables = smu_v13_0_6_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_6_check_fw_status,
@@ -3812,6 +4035,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_enabled_mask = smu_v13_0_6_get_enabled_mask,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.set_power_limit = smu_v13_0_6_set_power_limit,
+ .get_ppt_limit = smu_v13_0_6_get_ppt_limit,
.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
.register_irq_handler = smu_v13_0_6_register_irq_handler,
.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
@@ -3828,7 +4052,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.get_xcp_metrics = smu_v13_0_6_get_xcp_metrics,
.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
- .link_reset_is_support = smu_v13_0_6_is_link_reset_supported,
.mode1_reset = smu_v13_0_6_mode1_reset,
.mode2_reset = smu_v13_0_6_mode2_reset,
.link_reset = smu_v13_0_6_link_reset,
@@ -3838,8 +4061,10 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
.send_rma_reason = smu_v13_0_6_send_rma_reason,
.reset_sdma = smu_v13_0_6_reset_sdma,
- .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported,
.dpm_reset_vcn = smu_v13_0_6_reset_vcn,
+ .post_init = smu_v13_0_6_post_init,
+ .ras_send_msg = smu_v13_0_6_ras_send_msg,
+ .get_ras_smu_drv = smu_v13_0_6_get_ras_smu_drv,
};
void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
@@ -3851,9 +4076,11 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12)) ?
smu_v13_0_12_feature_mask_map : smu_v13_0_6_feature_mask_map;
smu->table_map = smu_v13_0_6_table_map;
- smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
+ smu->smc_driver_if_version = SMU_IGNORE_IF_VERSION;
smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
smu_v13_0_set_smu_mailbox_registers(smu);
+ smu_v13_0_6_set_temp_funcs(smu);
amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);
}
+
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index 67b30674fd31..6cbdd7c5ded9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -49,6 +49,10 @@ struct PPTable_t {
uint32_t MaxLclkDpmRange;
uint32_t MinLclkDpmRange;
uint64_t PublicSerialNumber_AID;
+ uint32_t MaxNodePowerLimit;
+ uint32_t PPT1Max;
+ uint32_t PPT1Min;
+ uint32_t PPT1Default;
bool Init;
};
@@ -64,13 +68,25 @@ enum smu_v13_0_6_caps {
SMU_CAP(RMA_MSG),
SMU_CAP(ACA_SYND),
SMU_CAP(SDMA_RESET),
+ SMU_CAP(VCN_RESET),
SMU_CAP(STATIC_METRICS),
SMU_CAP(HST_LIMIT_METRICS),
SMU_CAP(BOARD_VOLTAGE),
SMU_CAP(PLDM_VERSION),
+ SMU_CAP(TEMP_METRICS),
+ SMU_CAP(NPM_METRICS),
+ SMU_CAP(RAS_EEPROM),
+ SMU_CAP(FAST_PPT),
SMU_CAP(ALL),
};
+#define SMU_13_0_6_NUM_XGMI_LINKS 8
+#define SMU_13_0_6_MAX_GFX_CLKS 8
+#define SMU_13_0_6_MAX_CLKS 4
+#define SMU_13_0_6_MAX_XCC 8
+#define SMU_13_0_6_MAX_VCN 4
+#define SMU_13_0_6_MAX_JPEG 40
+
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu);
@@ -79,13 +95,171 @@ int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table,
bool smu_v13_0_12_is_dpm_running(struct smu_context *smu);
int smu_v13_0_12_get_max_metrics_size(void);
+size_t smu_v13_0_12_get_system_metrics_size(void);
int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu);
int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member, uint32_t *value);
-ssize_t smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table, void *smu_metrics);
ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu,
struct amdgpu_xcp *xcp, void *table,
void *smu_metrics);
+int smu_v13_0_12_tables_init(struct smu_context *smu);
+void smu_v13_0_12_tables_fini(struct smu_context *smu);
+int smu_v13_0_12_get_npm_data(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ uint32_t *value);
extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[];
extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[];
+extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs;
+extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv;
+
+#if defined(SWSMU_CODE_LAYER_L2)
+#include "smu_cmn.h"
+
+/* SMUv 13.0.6 GPU metrics*/
+#define SMU_13_0_6_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_HOTSPOT), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hotspot); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_MEM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_mem); \
+ SMU_SCALAR(SMU_MATTR(TEMPERATURE_VRSOC), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_vrsoc); \
+ SMU_SCALAR(SMU_MATTR(CURR_SOCKET_POWER), SMU_MUNIT(POWER_1), \
+ SMU_MTYPE(U16), curr_socket_power); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_GFX_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_gfx_activity); \
+ SMU_SCALAR(SMU_MATTR(AVERAGE_UMC_ACTIVITY), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U16), average_umc_activity); \
+ SMU_SCALAR(SMU_MATTR(MEM_MAX_BANDWIDTH), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U64), mem_max_bandwidth); \
+ SMU_SCALAR(SMU_MATTR(ENERGY_ACCUMULATOR), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), energy_accumulator); \
+ SMU_SCALAR(SMU_MATTR(SYSTEM_CLOCK_COUNTER), SMU_MUNIT(TIME_1), \
+ SMU_MTYPE(U64), system_clock_counter); \
+ SMU_SCALAR(SMU_MATTR(ACCUMULATION_COUNTER), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), accumulation_counter); \
+ SMU_SCALAR(SMU_MATTR(PROCHOT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), prochot_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(PPT_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), ppt_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(SOCKET_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), socket_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(VR_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), vr_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(HBM_THM_RESIDENCY_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), hbm_thm_residency_acc); \
+ SMU_SCALAR(SMU_MATTR(GFXCLK_LOCK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), gfxclk_lock_status); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), pcie_link_width); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LINK_SPEED), SMU_MUNIT(SPEED_2), \
+ SMU_MTYPE(U16), pcie_link_speed); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_WIDTH), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_width); \
+ SMU_SCALAR(SMU_MATTR(XGMI_LINK_SPEED), SMU_MUNIT(SPEED_1), \
+ SMU_MTYPE(U16), xgmi_link_speed); \
+ SMU_SCALAR(SMU_MATTR(GFX_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_activity_acc); \
+ SMU_SCALAR(SMU_MATTR(MEM_ACTIVITY_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), mem_activity_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_ACC), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U64), pcie_bandwidth_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_BANDWIDTH_INST), SMU_MUNIT(BW_1), \
+ SMU_MTYPE(U64), pcie_bandwidth_inst); \
+ SMU_SCALAR(SMU_MATTR(PCIE_L0_TO_RECOV_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_l0_to_recov_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_REPLAY_ROVER_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), pcie_replay_rover_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_SENT_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), pcie_nak_sent_count_acc); \
+ SMU_SCALAR(SMU_MATTR(PCIE_NAK_RCVD_COUNT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U32), pcie_nak_rcvd_count_acc); \
+ SMU_ARRAY(SMU_MATTR(XGMI_READ_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_read_data_acc, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_ARRAY(SMU_MATTR(XGMI_WRITE_DATA_ACC), SMU_MUNIT(DATA_1), \
+ SMU_MTYPE(U64), xgmi_write_data_acc, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_ARRAY(SMU_MATTR(XGMI_LINK_STATUS), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U16), xgmi_link_status, \
+ SMU_13_0_6_NUM_XGMI_LINKS); \
+ SMU_SCALAR(SMU_MATTR(FIRMWARE_TIMESTAMP), SMU_MUNIT(TIME_2), \
+ SMU_MTYPE(U64), firmware_timestamp); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_GFX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_uclk); \
+ SMU_SCALAR(SMU_MATTR(PCIE_LC_PERF_OTHER_END_RECOVERY), \
+ SMU_MUNIT(NONE), SMU_MTYPE(U32), \
+ pcie_lc_perf_other_end_recovery); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ jpeg_busy, SMU_13_0_6_MAX_JPEG); \
+ SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ vcn_busy, SMU_13_0_6_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \
+ gfx_busy_acc, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_low_utilization_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
+ SMU_13_0_6_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS);
+void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
+ void *smu_metrics,
+ struct smu_v13_0_6_gpu_metrics *gpu_metrics);
+
+#define SMU_13_0_6_PARTITION_METRICS_FIELDS(SMU_SCALAR, SMU_ARRAY) \
+ SMU_ARRAY(SMU_MATTR(CURRENT_GFXCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_gfxclk, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_SOCCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_socclk, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_VCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_vclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_ARRAY(SMU_MATTR(CURRENT_DCLK0), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_dclk0, SMU_13_0_6_MAX_CLKS); \
+ SMU_SCALAR(SMU_MATTR(CURRENT_UCLK), SMU_MUNIT(CLOCK_1), \
+ SMU_MTYPE(U16), current_uclk); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_INST), SMU_MUNIT(PERCENT), \
+ SMU_MTYPE(U32), gfx_busy_inst, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(JPEG_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ jpeg_busy, SMU_13_0_6_MAX_JPEG); \
+ SMU_ARRAY(SMU_MATTR(VCN_BUSY), SMU_MUNIT(PERCENT), SMU_MTYPE(U16), \
+ vcn_busy, SMU_13_0_6_MAX_VCN); \
+ SMU_ARRAY(SMU_MATTR(GFX_BUSY_ACC), SMU_MUNIT(PERCENT), SMU_MTYPE(U64), \
+ gfx_busy_acc, SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_PPT_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_ppt_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_THM_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_thm_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_LOW_UTILIZATION_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_low_utilization_acc, \
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
+ SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
+ SMU_13_0_6_MAX_XCC);
+
+DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_partition_metrics,
+ SMU_13_0_6_PARTITION_METRICS_FIELDS);
+
+#endif /* SWSMU_CODE_LAYER_L2 */
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index c96fa5e49ed6..a3fc35b9011e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -1184,15 +1184,16 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1523,7 +1524,7 @@ static int smu_v13_0_7_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v13_0_7_od_restore_table_single(struct smu_context *smu, long input)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index 73b4506ef5a8..5d7e671fa3c3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -1041,12 +1041,13 @@ static uint32_t yellow_carp_get_umd_pstate_clk_default(struct smu_context *smu,
static int yellow_carp_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, size = 0, ret = 0;
+ int i, idx, size = 0, ret = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
uint32_t clk_limit = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -1111,7 +1112,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- return size;
+ return size - start_offset;
}
static int yellow_carp_force_clk_levels(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
index fe00c84b1cc6..b1bd946d8e30 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
@@ -1132,11 +1132,12 @@ static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
- int i, idx, ret = 0, size = 0;
+ int i, idx, ret = 0, size = 0, start_offset = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t min, max;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
switch (clk_type) {
case SMU_OD_SCLK:
@@ -1202,7 +1203,7 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index f32474af90b3..2cea688c604f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -1056,15 +1056,16 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
struct smu_14_0_dpm_table *single_dpm_table;
struct smu_14_0_pcie_table *pcie_table;
uint32_t gen_speed, lane_width;
- int i, curr_freq, size = 0;
+ int i, curr_freq, size = 0, start_offset = 0;
int32_t min_value, max_value;
int ret = 0;
smu_cmn_get_sysfs_buf(&buf, &size);
+ start_offset = size;
if (amdgpu_ras_intr_triggered()) {
size += sysfs_emit_at(buf, size, "unavailable\n");
- return size;
+ return size - start_offset;
}
switch (clk_type) {
@@ -1374,7 +1375,7 @@ static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
break;
}
- return size;
+ return size - start_offset;
}
static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
@@ -2087,10 +2088,10 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
control->quirks = &smu_v14_0_2_i2c_control_quirks;
i2c_set_adapdata(control, smu_i2c);
- res = i2c_add_adapter(control);
+ res = devm_i2c_add_adapter(adev->dev, control);
if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
- goto Out_err;
+ return res;
}
}
@@ -2100,27 +2101,12 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
return 0;
-Out_err:
- for ( ; i >= 0; i--) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
-
- i2c_del_adapter(control);
- }
- return res;
}
static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- int i;
-
- for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
- struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
- struct i2c_adapter *control = &smu_i2c->adapter;
- i2c_del_adapter(control);
- }
adev->pm.ras_eeprom_i2c_bus = NULL;
adev->pm.fru_eeprom_i2c_bus = NULL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 59f9abd0f7b8..4040ff926544 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -164,9 +164,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
msg_index, param, message);
break;
case SMU_RESP_BUSY_OTHER:
- dev_err_ratelimited(adev->dev,
- "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
- msg_index, param, message);
+ /* It is normal for SMU_MSG_GetBadPageCount to return busy
+ * so don't print error at this case.
+ */
+ if (msg != SMU_MSG_GetBadPageCount)
+ dev_err_ratelimited(adev->dev,
+ "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
+ msg_index, param, message);
break;
case SMU_RESP_DEBUG_END:
dev_err_ratelimited(adev->dev,
@@ -256,11 +260,12 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
uint32_t flags, resp;
- bool fed_status;
+ bool fed_status, pri;
flags = __smu_cmn_get_msg_flags(smu, msg);
*poll = true;
+ pri = !!(flags & SMU_MSG_NO_PRECHECK);
/* When there is RAS fatal error, FW won't process non-RAS priority
* messages. Don't allow any messages other than RAS priority messages.
*/
@@ -272,15 +277,18 @@ static int __smu_cmn_ras_filter_msg(struct smu_context *smu,
smu_get_message_name(smu, msg));
return -EACCES;
}
+ }
+ if (pri || fed_status) {
/* FW will ignore non-priority messages when a RAS fatal error
- * is detected. Hence it is possible that a previous message
- * wouldn't have got response. Allow to continue without polling
- * for response status for priority messages.
+ * or reset condition is detected. Hence it is possible that a
+ * previous message wouldn't have got response. Allow to
+ * continue without polling for response status for priority
+ * messages.
*/
resp = RREG32(smu->resp_reg);
dev_dbg(adev->dev,
- "Sending RAS priority message %s response status: %x",
+ "Sending priority message %s response status: %x",
smu_get_message_name(smu, msg), resp);
if (resp == 0)
*poll = false;
@@ -965,7 +973,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
- if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
+ if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;
@@ -976,7 +984,7 @@ int smu_cmn_update_table(struct smu_context *smu,
* Flush hdp cache: to guard the content seen by
* GPU is consitent with CPU.
*/
- amdgpu_asic_flush_hdp(adev, NULL);
+ amdgpu_hdp_flush(adev, NULL);
}
ret = smu_cmn_send_smc_msg_with_param(smu, drv2smu ?
@@ -988,7 +996,7 @@ int smu_cmn_update_table(struct smu_context *smu,
return ret;
if (!drv2smu) {
- amdgpu_asic_invalidate_hdp(adev, NULL);
+ amdgpu_hdp_invalidate(adev, NULL);
memcpy(table_data, table->cpu_addr, table_size);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index a608cdbdada4..8d7c4814c68f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -40,6 +40,8 @@
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
#define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
+#define SMU_IGNORE_IF_VERSION 0xFFFFFFFF
+
#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \
do { \
typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \
@@ -65,6 +67,32 @@
header->structure_size = sizeof(*tmp); \
} while (0)
+#define smu_cmn_init_baseboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_baseboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
+#define smu_cmn_init_gpuboard_temp_metrics(ptr, fr, cr) \
+ do { \
+ typecheck(struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *, \
+ (ptr)); \
+ struct amdgpu_gpuboard_temp_metrics_v##fr##_##cr *tmp = (ptr); \
+ struct metrics_table_header *header = \
+ (struct metrics_table_header *)tmp; \
+ memset(header, 0xFF, sizeof(*tmp)); \
+ header->format_revision = fr; \
+ header->content_revision = cr; \
+ header->structure_size = sizeof(*tmp); \
+ } while (0)
+
extern const int link_speed[];
/* Helper to Convert from PCIE Gen 1/2/3/4/5/6 to 0.1 GT/s speed units */
@@ -174,5 +202,72 @@ void smu_cmn_get_backend_workload_mask(struct smu_context *smu,
u32 workload_mask,
u32 *backend_workload_mask);
+/*SMU gpu metrics */
+
+/* Attribute ID mapping */
+#define SMU_MATTR(X) AMDGPU_METRICS_ATTR_ID_##X
+/* Type ID mapping */
+#define SMU_MTYPE(X) AMDGPU_METRICS_TYPE_##X
+/* Unit ID mapping */
+#define SMU_MUNIT(X) AMDGPU_METRICS_UNIT_##X
+
+/* Map TYPEID to C type */
+#define SMU_CTYPE(TYPEID) SMU_CTYPE_##TYPEID
+
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U8 u8
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S8 s8
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U16 u16
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S16 s16
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U32 u32
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S32 s32
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_U64 u64
+#define SMU_CTYPE_AMDGPU_METRICS_TYPE_S64 s64
+
+/* struct members */
+#define SMU_METRICS_SCALAR(ID, UNIT, TYPEID, NAME) \
+ u64 NAME##_ftype; \
+ SMU_CTYPE(TYPEID) NAME
+
+#define SMU_METRICS_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \
+ u64 NAME##_ftype; \
+ SMU_CTYPE(TYPEID) NAME[SIZE]
+
+/* Init functions for scalar/array fields - init to 0xFFs */
+#define SMU_METRICS_INIT_SCALAR(ID, UNIT, TYPEID, NAME) \
+ do { \
+ obj->NAME##_ftype = \
+ AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, 1); \
+ obj->NAME = (SMU_CTYPE(TYPEID)) ~0; \
+ count++; \
+ } while (0)
+
+#define SMU_METRICS_INIT_ARRAY(ID, UNIT, TYPEID, NAME, SIZE) \
+ do { \
+ obj->NAME##_ftype = \
+ AMDGPU_METRICS_ENC_ATTR(UNIT, TYPEID, ID, SIZE); \
+ memset(obj->NAME, 0xFF, sizeof(obj->NAME)); \
+ count++; \
+ } while (0)
+
+/* Declare Metrics Class and Template object */
+#define DECLARE_SMU_METRICS_CLASS(CLASSNAME, SMU_METRICS_FIELD_LIST) \
+ struct __packed CLASSNAME { \
+ struct metrics_table_header header; \
+ int attr_count; \
+ SMU_METRICS_FIELD_LIST(SMU_METRICS_SCALAR, SMU_METRICS_ARRAY); \
+ }; \
+ static inline void CLASSNAME##_init(struct CLASSNAME *obj, \
+ uint8_t frev, uint8_t crev) \
+ { \
+ int count = 0; \
+ memset(obj, 0xFF, sizeof(*obj)); \
+ obj->header.format_revision = frev; \
+ obj->header.content_revision = crev; \
+ obj->header.structure_size = sizeof(*obj); \
+ SMU_METRICS_FIELD_LIST(SMU_METRICS_INIT_SCALAR, \
+ SMU_METRICS_INIT_ARRAY) \
+ obj->attr_count = count; \
+ }
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index c09ecf1a68a0..34f6b4b1c3ba 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -100,6 +100,7 @@
#define smu_is_asic_wbrf_supported(smu) smu_ppt_funcs(is_asic_wbrf_supported, false, smu)
#define smu_enable_uclk_shadow(smu, enable) smu_ppt_funcs(enable_uclk_shadow, 0, smu, enable)
#define smu_set_wbrf_exclusion_ranges(smu, freq_band_range) smu_ppt_funcs(set_wbrf_exclusion_ranges, -EOPNOTSUPP, smu, freq_band_range)
+#define smu_get_ras_smu_drv(smu, ras_smu_drv) smu_ppt_funcs(get_ras_smu_drv, -EOPNOTSUPP, smu, ras_smu_drv)
#endif
#endif
diff --git a/drivers/gpu/drm/amd/ras/Makefile b/drivers/gpu/drm/amd/ras/Makefile
new file mode 100644
index 000000000000..bbdaba811d34
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/Makefile
@@ -0,0 +1,34 @@
+#
+# Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+ifeq ($(AMD_GPU_RAS_MGR),)
+ AMD_GPU_RAS_MGR := ras_mgr
+endif
+
+subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/rascore
+subdir-ccflags-y += -I$(AMD_GPU_RAS_FULL_PATH)/$(AMD_GPU_RAS_MGR)
+
+RAS_LIBS = $(AMD_GPU_RAS_MGR) rascore
+
+AMD_RAS = $(addsuffix /Makefile, $(addprefix $(AMD_GPU_RAS_FULL_PATH)/,$(RAS_LIBS)))
+
+include $(AMD_RAS)
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/Makefile b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile
new file mode 100644
index 000000000000..5e5a2cfa4068
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/Makefile
@@ -0,0 +1,33 @@
+# Copyright 2025 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+
+RAS_MGR_FILES = amdgpu_ras_sys.o \
+ amdgpu_ras_mgr.o \
+ amdgpu_ras_eeprom_i2c.o \
+ amdgpu_ras_mp1_v13_0.o \
+ amdgpu_ras_cmd.o \
+ amdgpu_ras_process.o \
+ amdgpu_ras_nbio_v7_9.o
+
+
+RAS_MGR = $(addprefix $(AMD_GPU_RAS_PATH)/ras_mgr/, $(RAS_MGR_FILES))
+
+AMD_GPU_RAS_FILES += $(RAS_MGR)
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
new file mode 100644
index 000000000000..78419b7f7729
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_cmd.h"
+#include "amdgpu_ras_mgr.h"
+
+/* inject address is 52 bits */
+#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+
+#define AMDGPU_RAS_TYPE_RASCORE 0x1
+#define AMDGPU_RAS_TYPE_AMDGPU 0x2
+#define AMDGPU_RAS_TYPE_VF 0x3
+
+static int amdgpu_ras_trigger_error_prepare(struct ras_core_context *ras_core,
+ struct ras_cmd_inject_error_req *block_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+
+ if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) {
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ RAS_DEV_WARN(adev, "Failed to disallow df cstate");
+
+ ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW);
+ if (ret && (ret != -EOPNOTSUPP))
+ RAS_DEV_WARN(adev, "Failed to disallow XGMI power down");
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_trigger_error_end(struct ras_core_context *ras_core,
+ struct ras_cmd_inject_error_req *block_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+
+ if (block_info->block_id == TA_RAS_BLOCK__XGMI_WAFL) {
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ ret = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT);
+ if (ret && (ret != -EOPNOTSUPP))
+ RAS_DEV_WARN(adev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ RAS_DEV_WARN(adev, "Failed to allow df cstate");
+ }
+
+ return 0;
+}
+
+static uint64_t local_addr_to_xgmi_global_addr(struct ras_core_context *ras_core,
+ uint64_t addr)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+
+ return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
+
+static int amdgpu_ras_inject_error(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_cmd_inject_error_req *req =
+ (struct ras_cmd_inject_error_req *)cmd->input_buff_raw;
+ int ret = RAS_CMD__ERROR_GENERIC;
+
+ if (req->block_id == RAS_BLOCK_ID__UMC) {
+ if (amdgpu_ras_mgr_check_retired_addr(adev, req->address)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ req->address);
+ return RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+ if ((req->address >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (req->address >= RAS_UMC_INJECT_ADDR_LIMIT)) {
+ RAS_DEV_WARN(adev, "RAS WARN: input address 0x%llx is invalid.",
+ req->address);
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+ }
+
+ /* Calculate XGMI relative offset */
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ req->block_id != RAS_BLOCK_ID__GFX) {
+ req->address = local_addr_to_xgmi_global_addr(ras_core, req->address);
+ }
+ }
+
+ amdgpu_ras_trigger_error_prepare(ras_core, req);
+ ret = rascore_handle_cmd(ras_core, cmd, data);
+ amdgpu_ras_trigger_error_end(ras_core, req);
+ if (ret) {
+ RAS_DEV_ERR(adev, "ras inject block %u failed %d\n", req->block_id, ret);
+ ret = RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+
+ return ret;
+}
+
+static int amdgpu_ras_get_ras_safe_fb_addr_ranges(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_cmd_dev_handle *input_data =
+ (struct ras_cmd_dev_handle *)cmd->input_buff_raw;
+ struct ras_cmd_ras_safe_fb_address_ranges_rsp *ranges =
+ (struct ras_cmd_ras_safe_fb_address_ranges_rsp *)cmd->output_buff_raw;
+ struct amdgpu_mem_partition_info *mem_ranges;
+ uint32_t i = 0;
+
+ if (cmd->input_size != sizeof(*input_data))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ mem_ranges = adev->gmc.mem_partitions;
+ for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
+ ranges->range[i].start = mem_ranges[i].range.fpfn << AMDGPU_GPU_PAGE_SHIFT;
+ ranges->range[i].size = mem_ranges[i].size;
+ ranges->range[i].idx = i;
+ }
+
+ ranges->num_ranges = adev->gmc.num_mem_partitions;
+
+ ranges->version = 0;
+ cmd->output_size = sizeof(struct ras_cmd_ras_safe_fb_address_ranges_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_translate_fb_address(struct ras_core_context *ras_core,
+ enum ras_fb_addr_type src_type,
+ enum ras_fb_addr_type dest_type,
+ union ras_translate_fb_address *src_addr,
+ union ras_translate_fb_address *dest_addr)
+{
+ uint64_t soc_phy_addr;
+ int ret = RAS_CMD__SUCCESS;
+
+ /* Does not need to be queued as event as this is a SW translation */
+ switch (src_type) {
+ case RAS_FB_ADDR_SOC_PHY:
+ soc_phy_addr = src_addr->soc_phy_addr;
+ break;
+ case RAS_FB_ADDR_BANK:
+ ret = ras_cmd_translate_bank_to_soc_pa(ras_core,
+ src_addr->bank_addr, &soc_phy_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+ break;
+ default:
+ return RAS_CMD__ERROR_INVALID_CMD;
+ }
+
+ switch (dest_type) {
+ case RAS_FB_ADDR_SOC_PHY:
+ dest_addr->soc_phy_addr = soc_phy_addr;
+ break;
+ case RAS_FB_ADDR_BANK:
+ ret = ras_cmd_translate_soc_pa_to_bank(ras_core,
+ soc_phy_addr, &dest_addr->bank_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+ break;
+ default:
+ return RAS_CMD__ERROR_INVALID_CMD;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_translate_fb_address(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_translate_fb_address_req *req_buff =
+ (struct ras_cmd_translate_fb_address_req *)cmd->input_buff_raw;
+ struct ras_cmd_translate_fb_address_rsp *rsp_buff =
+ (struct ras_cmd_translate_fb_address_rsp *)cmd->output_buff_raw;
+ int ret = RAS_CMD__ERROR_GENERIC;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_translate_fb_address_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if ((req_buff->src_addr_type >= RAS_FB_ADDR_UNKNOWN) ||
+ (req_buff->dest_addr_type >= RAS_FB_ADDR_UNKNOWN))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ ret = ras_translate_fb_address(ras_core, req_buff->src_addr_type,
+ req_buff->dest_addr_type, &req_buff->trans_addr, &rsp_buff->trans_addr);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ rsp_buff->version = 0;
+ cmd->output_size = sizeof(struct ras_cmd_translate_fb_address_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static struct ras_cmd_func_map amdgpu_ras_cmd_maps[] = {
+ {RAS_CMD__INJECT_ERROR, amdgpu_ras_inject_error},
+ {RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES, amdgpu_ras_get_ras_safe_fb_addr_ranges},
+ {RAS_CMD__TRANSLATE_FB_ADDRESS, amdgpu_ras_translate_fb_address},
+};
+
+int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_func_map *ras_cmd = NULL;
+ int i, res;
+
+ for (i = 0; i < ARRAY_SIZE(amdgpu_ras_cmd_maps); i++) {
+ if (cmd->cmd_id == amdgpu_ras_cmd_maps[i].cmd_id) {
+ ras_cmd = &amdgpu_ras_cmd_maps[i];
+ break;
+ }
+ }
+
+ if (ras_cmd)
+ res = ras_cmd->func(ras_core, cmd, NULL);
+ else
+ res = RAS_CMD__ERROR_UKNOWN_CMD;
+
+ return res;
+}
+
+int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd)
+{
+ struct ras_core_context *cmd_core = ras_core;
+ int timeout = 60;
+ int res;
+
+ cmd->cmd_res = RAS_CMD__ERROR_INVALID_CMD;
+ cmd->output_size = 0;
+
+ if (!ras_core_is_enabled(cmd_core))
+ return RAS_CMD__ERROR_ACCESS_DENIED;
+
+ while (ras_core_gpu_in_reset(cmd_core)) {
+ msleep(1000);
+ if (!timeout--)
+ return RAS_CMD__ERROR_TIMEOUT;
+ }
+
+ res = amdgpu_ras_handle_cmd(cmd_core, cmd, NULL);
+ if (res == RAS_CMD__ERROR_UKNOWN_CMD)
+ res = rascore_handle_cmd(cmd_core, cmd, NULL);
+
+ cmd->cmd_res = res;
+
+ if (cmd->output_size > cmd->output_buf_size) {
+ RAS_DEV_ERR(cmd_core->dev,
+ "Output size 0x%x exceeds output buffer size 0x%x!\n",
+ cmd->output_size, cmd->output_buf_size);
+ return RAS_CMD__SUCCESS_EXEED_BUFFER;
+ }
+
+ return RAS_CMD__SUCCESS;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h
new file mode 100644
index 000000000000..5973b156cc85
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_cmd.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_RAS_CMD_H__
+#define __AMDGPU_RAS_CMD_H__
+#include "ras.h"
+
+enum amdgpu_ras_cmd_id {
+ RAS_CMD__AMDGPU_BEGIN = RAS_CMD_ID_AMDGPU_START,
+ RAS_CMD__TRANSLATE_MEMORY_FD,
+ RAS_CMD__AMDGPU_SUPPORTED_MAX = RAS_CMD_ID_AMDGPU_END,
+};
+
+struct ras_cmd_translate_memory_fd_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t type;
+ uint32_t fd;
+ uint64_t address;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_translate_memory_fd_rsp {
+ uint32_t version;
+ uint32_t padding;
+ uint64_t start;
+ uint64_t size;
+ uint32_t reserved[2];
+};
+
+int amdgpu_ras_handle_cmd(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data);
+int amdgpu_ras_submit_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd);
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c
new file mode 100644
index 000000000000..3ed3ff42b7e1
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_eeprom_i2c.h"
+#include "ras_eeprom.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3. See top of amdgpu_eeprom.c.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+#define to_amdgpu_ras(x) (container_of(x, struct amdgpu_ras, eeprom_control))
+
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+
+static int ras_eeprom_i2c_config(struct ras_core_context *ras_core)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ u8 i2c_addr;
+
+ if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
+ return 0;
+ }
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return 0;
+ default:
+ return -ENODATA;
+ }
+ return -ENODATA;
+}
+
+static int ras_eeprom_i2c_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ struct i2c_adapter *i2c_adap = ras_core->ras_eeprom.i2c_adapter;
+ u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE];
+ struct i2c_msg msgs[] = {
+ {
+ .flags = 0,
+ .len = EEPROM_OFFSET_SIZE,
+ .buf = eeprom_offset_buf,
+ },
+ {
+ .flags = read ? I2C_M_RD : 0,
+ },
+ };
+ const u8 *p = eeprom_buf;
+ int r;
+ u16 len;
+
+ for (r = 0; buf_size > 0;
+ buf_size -= len, eeprom_addr += len, eeprom_buf += len) {
+ /* Set the EEPROM address we want to write to/read from.
+ */
+ msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr);
+ msgs[1].addr = msgs[0].addr;
+ msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff;
+ msgs[0].buf[1] = eeprom_addr & 0xff;
+
+ if (!read) {
+ /* Write the maximum amount of data, without
+ * crossing the device's page boundary, as per
+ * its spec. Partial page writes are allowed,
+ * starting at any location within the page,
+ * so long as the page boundary isn't crossed
+ * over (actually the page pointer rolls
+ * over).
+ *
+ * As per the AT24CM02 EEPROM spec, after
+ * writing into a page, the I2C driver should
+ * terminate the transfer, i.e. in
+ * "i2c_transfer()" below, with a STOP
+ * condition, so that the self-timed write
+ * cycle begins. This is implied for the
+ * "i2c_transfer()" abstraction.
+ */
+ len = min(EEPROM_PAGE_SIZE - (eeprom_addr & EEPROM_PAGE_MASK),
+ buf_size);
+ } else {
+ /* Reading from the EEPROM has no limitation
+ * on the number of bytes read from the EEPROM
+ * device--they are simply sequenced out.
+ * Keep in mind that i2c_msg.len is u16 type.
+ */
+ len = min(U16_MAX, buf_size);
+ }
+ msgs[1].len = len;
+ msgs[1].buf = eeprom_buf;
+
+
+ /* This constitutes a START-STOP transaction.
+ */
+ r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs));
+ if (r != ARRAY_SIZE(msgs))
+ break;
+
+ if (!read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+ }
+
+ return r < 0 ? r : eeprom_buf - p;
+}
+
+const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func = {
+ .eeprom_i2c_xfer = ras_eeprom_i2c_xfer,
+ .update_eeprom_i2c_config = ras_eeprom_i2c_config,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h
new file mode 100644
index 000000000000..3b5878605411
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_eeprom_i2c.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_EEPROM_I2C_H__
+#define __AMDGPU_RAS_EEPROM_I2C_H__
+#include "ras.h"
+
+extern const struct ras_eeprom_sys_func amdgpu_ras_eeprom_i2c_sys_func;
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
new file mode 100644
index 000000000000..afe8135b6258
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_cmd.h"
+#include "amdgpu_ras_process.h"
+#include "amdgpu_ras_eeprom_i2c.h"
+#include "amdgpu_ras_mp1_v13_0.h"
+#include "amdgpu_ras_nbio_v7_9.h"
+
+#define MAX_SOCKET_NUM_PER_HIVE 8
+#define MAX_AID_NUM_PER_SOCKET 4
+#define MAX_XCD_NUM_PER_AID 2
+
+/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
+#define TYPICAL_ECC_BAD_PAGE_RATE (100ULL * SZ_1M)
+
+#define COUNT_BAD_PAGE_THRESHOLD(size) (((size) >> 21) << 4)
+
+/* Reserve 8 physical dram row for possible retirement.
+ * In worst cases, it will lose 8 * 2MB memory in vram domain
+ */
+#define RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20)
+
+
+static void ras_mgr_init_event_mgr(struct ras_event_manager *mgr)
+{
+ struct ras_event_state *event_state;
+ int i;
+
+ memset(mgr, 0, sizeof(*mgr));
+ atomic64_set(&mgr->seqno, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
+ event_state = &mgr->event_state[i];
+ event_state->last_seqno = RAS_EVENT_INVALID_ID;
+ atomic64_set(&event_state->count, 0);
+ }
+}
+
+static void amdgpu_ras_mgr_init_event_mgr(struct ras_core_context *ras_core)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_event_manager *event_mgr;
+ struct amdgpu_hive_info *hive;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr;
+
+ /* init event manager with node 0 on xgmi system */
+ if (!amdgpu_reset_in_recovery(adev)) {
+ if (!hive || adev->gmc.xgmi.node_id == 0)
+ ras_mgr_init_event_mgr(event_mgr);
+ }
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+}
+
+static int amdgpu_ras_mgr_init_aca_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_aca_config *aca_cfg = &config->aca_cfg;
+
+ aca_cfg->socket_num_per_hive = MAX_SOCKET_NUM_PER_HIVE;
+ aca_cfg->aid_num_per_socket = MAX_AID_NUM_PER_SOCKET;
+ aca_cfg->xcd_num_per_aid = MAX_XCD_NUM_PER_AID;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_eeprom_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_eeprom_config *eeprom_cfg = &config->eeprom_cfg;
+
+ eeprom_cfg->eeprom_sys_fn = &amdgpu_ras_eeprom_i2c_sys_func;
+ eeprom_cfg->eeprom_i2c_adapter = adev->pm.ras_eeprom_i2c_bus;
+ if (eeprom_cfg->eeprom_i2c_adapter) {
+ const struct i2c_adapter_quirks *quirks =
+ ((struct i2c_adapter *)eeprom_cfg->eeprom_i2c_adapter)->quirks;
+
+ if (quirks) {
+ eeprom_cfg->max_i2c_read_len = quirks->max_read_len;
+ eeprom_cfg->max_i2c_write_len = quirks->max_write_len;
+ }
+ }
+
+ /*
+ * amdgpu_bad_page_threshold is used to config
+ * the threshold for the number of bad pages.
+ * -1: Threshold is set to default value
+ * Driver will issue a warning message when threshold is reached
+ * and continue runtime services.
+ * 0: Disable bad page retirement
+ * Driver will not retire bad pages
+ * which is intended for debugging purpose.
+ * -2: Threshold is determined by a formula
+ * that assumes 1 bad page per 100M of local memory.
+ * Driver will continue runtime services when threhold is reached.
+ * 0 < threshold < max number of bad page records in EEPROM,
+ * A user-defined threshold is set
+ * Driver will halt runtime services when this custom threshold is reached.
+ */
+ if (amdgpu_bad_page_threshold == NONSTOP_OVER_THRESHOLD)
+ eeprom_cfg->eeprom_record_threshold_count =
+ div64_u64(adev->gmc.mc_vram_size, TYPICAL_ECC_BAD_PAGE_RATE);
+ else if (amdgpu_bad_page_threshold == WARN_NONSTOP_OVER_THRESHOLD)
+ eeprom_cfg->eeprom_record_threshold_count =
+ COUNT_BAD_PAGE_THRESHOLD(RAS_RESERVED_VRAM_SIZE_DEFAULT);
+ else
+ eeprom_cfg->eeprom_record_threshold_count = amdgpu_bad_page_threshold;
+
+ eeprom_cfg->eeprom_record_threshold_config = amdgpu_bad_page_threshold;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_mp1_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_mp1_config *mp1_cfg = &config->mp1_cfg;
+ int ret = 0;
+
+ switch (config->mp1_ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ mp1_cfg->mp1_sys_fn = &amdgpu_ras_mp1_sys_func_v13_0;
+ break;
+ default:
+ RAS_DEV_ERR(adev,
+ "The mp1(0x%x) ras config is not right!\n",
+ config->mp1_ip_version);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_mgr_init_nbio_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_nbio_config *nbio_cfg = &config->nbio_cfg;
+ int ret = 0;
+
+ switch (config->nbio_ip_version) {
+ case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
+ nbio_cfg->nbio_sys_fn = &amdgpu_ras_nbio_sys_func_v7_9;
+ break;
+ default:
+ RAS_DEV_ERR(adev,
+ "The nbio(0x%x) ras config is not right!\n",
+ config->nbio_ip_version);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_ras_mgr_get_ras_psp_system_status(struct ras_core_context *ras_core,
+ struct ras_psp_sys_status *status)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ta_context *context = &adev->psp.ras_context.context;
+
+ status->initialized = context->initialized;
+ status->session_id = context->session_id;
+ status->psp_cmd_mutex = &adev->psp.mutex;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_get_ras_ta_init_param(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t nps_mode;
+
+ if (amdgpu_ras_is_poison_mode_supported(adev))
+ ras_ta_param->poison_mode_en = 1;
+
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
+ ras_ta_param->dgpu_mode = 1;
+
+ ras_ta_param->xcc_mask = adev->gfx.xcc_mask;
+ ras_ta_param->channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
+
+ ras_ta_param->active_umc_mask = adev->umc.active_mask;
+
+ if (!amdgpu_ras_mgr_get_curr_nps_mode(adev, &nps_mode))
+ ras_ta_param->nps_mode = nps_mode;
+
+ return 0;
+}
+
+const struct ras_psp_sys_func amdgpu_ras_psp_sys_func = {
+ .get_ras_psp_system_status = amdgpu_ras_mgr_get_ras_psp_system_status,
+ .get_ras_ta_init_param = amdgpu_ras_mgr_get_ras_ta_init_param,
+};
+
+static int amdgpu_ras_mgr_init_psp_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_psp_config *psp_cfg = &config->psp_cfg;
+
+ psp_cfg->psp_sys_fn = &amdgpu_ras_psp_sys_func;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_init_umc_config(struct amdgpu_device *adev,
+ struct ras_core_config *config)
+{
+ struct ras_umc_config *umc_cfg = &config->umc_cfg;
+
+ umc_cfg->umc_vram_type = adev->gmc.vram_type;
+
+ return 0;
+}
+
+static struct ras_core_context *amdgpu_ras_mgr_create_ras_core(struct amdgpu_device *adev)
+{
+ struct ras_core_config init_config;
+
+ memset(&init_config, 0, sizeof(init_config));
+
+ init_config.umc_ip_version = amdgpu_ip_version(adev, UMC_HWIP, 0);
+ init_config.mp1_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+ init_config.gfx_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0);
+ init_config.nbio_ip_version = amdgpu_ip_version(adev, NBIO_HWIP, 0);
+ init_config.psp_ip_version = amdgpu_ip_version(adev, MP1_HWIP, 0);
+
+ if (init_config.umc_ip_version == IP_VERSION(12, 0, 0) ||
+ init_config.umc_ip_version == IP_VERSION(12, 5, 0))
+ init_config.aca_ip_version = IP_VERSION(1, 0, 0);
+
+ init_config.sys_fn = &amdgpu_ras_sys_fn;
+ init_config.ras_eeprom_supported = true;
+ init_config.poison_supported =
+ amdgpu_ras_is_poison_mode_supported(adev);
+
+ amdgpu_ras_mgr_init_aca_config(adev, &init_config);
+ amdgpu_ras_mgr_init_eeprom_config(adev, &init_config);
+ amdgpu_ras_mgr_init_mp1_config(adev, &init_config);
+ amdgpu_ras_mgr_init_nbio_config(adev, &init_config);
+ amdgpu_ras_mgr_init_psp_config(adev, &init_config);
+ amdgpu_ras_mgr_init_umc_config(adev, &init_config);
+
+ return ras_core_create(&init_config);
+}
+
+static int amdgpu_ras_mgr_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr;
+ int ret = 0;
+
+ /* Disabled by default */
+ con->uniras_enabled = false;
+
+ /* Enabled only in debug mode */
+ if (adev->debug_enable_ras_aca) {
+ con->uniras_enabled = true;
+ RAS_DEV_INFO(adev, "Debug amdgpu uniras!");
+ }
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ ras_mgr = kzalloc(sizeof(*ras_mgr), GFP_KERNEL);
+ if (!ras_mgr)
+ return -EINVAL;
+
+ con->ras_mgr = ras_mgr;
+ ras_mgr->adev = adev;
+
+ ras_mgr->ras_core = amdgpu_ras_mgr_create_ras_core(adev);
+ if (!ras_mgr->ras_core) {
+ RAS_DEV_ERR(adev, "Failed to create ras core!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ras_mgr->ras_core->dev = adev;
+
+ amdgpu_ras_process_init(adev);
+ ras_core_sw_init(ras_mgr->ras_core);
+ amdgpu_ras_mgr_init_event_mgr(ras_mgr->ras_core);
+ return 0;
+
+err:
+ kfree(ras_mgr);
+ return ret;
+}
+
+static int amdgpu_ras_mgr_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = (struct amdgpu_ras_mgr *)con->ras_mgr;
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr)
+ return 0;
+
+ amdgpu_ras_process_fini(adev);
+ ras_core_sw_fini(ras_mgr->ras_core);
+ ras_core_destroy(ras_mgr->ras_core);
+ ras_mgr->ras_core = NULL;
+
+ kfree(con->ras_mgr);
+ con->ras_mgr = NULL;
+
+ return 0;
+}
+
+static int amdgpu_ras_mgr_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ int ret;
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ ret = ras_core_hw_init(ras_mgr->ras_core);
+ if (ret) {
+ RAS_DEV_ERR(adev, "Failed to initialize ras core!\n");
+ return ret;
+ }
+
+ ras_mgr->ras_is_ready = true;
+
+ amdgpu_enable_uniras(adev, true);
+
+ RAS_DEV_INFO(adev, "AMDGPU RAS Is Ready.\n");
+ return 0;
+}
+
+static int amdgpu_ras_mgr_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!con->uniras_enabled)
+ return 0;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ ras_core_hw_fini(ras_mgr->ras_core);
+
+ ras_mgr->ras_is_ready = false;
+
+ return 0;
+}
+
+struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(struct amdgpu_device *adev)
+{
+ if (!adev || !adev->psp.ras_context.ras)
+ return NULL;
+
+ return (struct amdgpu_ras_mgr *)adev->psp.ras_context.ras->ras_mgr;
+}
+
+static const struct amd_ip_funcs __maybe_unused ras_v1_0_ip_funcs = {
+ .name = "ras_v1_0",
+ .sw_init = amdgpu_ras_mgr_sw_init,
+ .sw_fini = amdgpu_ras_mgr_sw_fini,
+ .hw_init = amdgpu_ras_mgr_hw_init,
+ .hw_fini = amdgpu_ras_mgr_hw_fini,
+};
+
+const struct amdgpu_ip_block_version ras_v1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_RAS,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &ras_v1_0_ip_funcs,
+};
+
+int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EPERM;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EPERM;
+
+ RAS_DEV_INFO(adev, "Enable amdgpu unified ras!");
+ return ras_core_set_status(ras_mgr->ras_core, enable);
+}
+
+bool amdgpu_uniras_enabled(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return ras_core_is_enabled(ras_mgr->ras_core);
+}
+
+static bool amdgpu_ras_mgr_is_ready(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (ras_mgr && ras_mgr->ras_core && ras_mgr->ras_is_ready &&
+ ras_core_is_ready(ras_mgr->ras_core))
+ return true;
+
+ return false;
+}
+
+int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return ras_core_handle_nbio_irq(ras_mgr->ras_core, data);
+}
+
+uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev,
+ enum ras_seqno_type seqno_type)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ int ret;
+ uint64_t seq_no;
+
+ if (!amdgpu_ras_mgr_is_ready(adev) ||
+ (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX))
+ return 0;
+
+ seq_no = ras_core_gen_seqno(ras_mgr->ras_core, seqno_type);
+
+ if ((seqno_type == RAS_SEQNO_TYPE_DE) ||
+ (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)) {
+ ret = ras_core_put_seqno(ras_mgr->ras_core, seqno_type, seq_no);
+ if (ret)
+ RAS_DEV_WARN(adev, "There are too many ras interrupts!");
+ }
+
+ return seq_no;
+}
+
+int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_ih_info *ih_info = (struct ras_ih_info *)data;
+ uint64_t seq_no = 0;
+ int ret = 0;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ if (ih_info && (ih_info->block == AMDGPU_RAS_BLOCK__UMC)) {
+ if (ras_mgr->ras_core->poison_supported) {
+ seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_DE);
+ RAS_DEV_INFO(adev,
+ "{%llu} RAS poison is created, no user action is needed.\n",
+ seq_no);
+ }
+
+ ret = amdgpu_ras_process_handle_umc_interrupt(adev, ih_info);
+ } else if (ras_mgr->ras_core->poison_supported) {
+ ret = amdgpu_ras_process_handle_unexpected_interrupt(adev, ih_info);
+ } else {
+ RAS_DEV_WARN(adev,
+ "No RAS interrupt handler for non-UMC block with poison disabled.\n");
+ }
+
+ return ret;
+}
+
+int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return amdgpu_ras_process_handle_consumption_interrupt(adev, data);
+}
+
+int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ return ras_core_update_ecc_info(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ con->gpu_reset_flags |= flags;
+ return amdgpu_ras_reset_gpu(adev);
+}
+
+bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return false;
+
+ return ras_eeprom_check_safety_watermark(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev,
+ uint32_t *nps_mode)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ uint32_t mode;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EINVAL;
+
+ mode = ras_core_get_curr_nps_mode(ras_mgr->ras_core);
+ if (!mode || mode > AMDGPU_NPS8_PARTITION_MODE)
+ return -EINVAL;
+
+ *nps_mode = mode;
+
+ return 0;
+}
+
+bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return false;
+
+ return ras_umc_check_retired_addr(ras_mgr->ras_core, addr);
+}
+
+bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core || !ras_mgr->ras_is_ready)
+ return false;
+
+ return ras_core_gpu_is_rma(ras_mgr->ras_core);
+}
+
+int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev,
+ uint32_t cmd_id, void *input, uint32_t input_size,
+ void *output, uint32_t out_size)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_cmd_ctx *cmd_ctx;
+ uint32_t ctx_buf_size = PAGE_SIZE;
+ int ret;
+
+ if (!amdgpu_ras_mgr_is_ready(adev))
+ return -EPERM;
+
+ cmd_ctx = kzalloc(ctx_buf_size, GFP_KERNEL);
+ if (!cmd_ctx)
+ return -ENOMEM;
+
+ cmd_ctx->cmd_id = cmd_id;
+
+ memcpy(cmd_ctx->input_buff_raw, input, input_size);
+ cmd_ctx->input_size = input_size;
+ cmd_ctx->output_buf_size = ctx_buf_size - sizeof(*cmd_ctx);
+
+ ret = amdgpu_ras_submit_cmd(ras_mgr->ras_core, cmd_ctx);
+ if (!ret && !cmd_ctx->cmd_res && output && (out_size == cmd_ctx->output_size))
+ memcpy(output, cmd_ctx->output_buff_raw, cmd_ctx->output_size);
+
+ kfree(cmd_ctx);
+
+ return ret;
+}
+
+int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev)) {
+ RAS_DEV_ERR(adev, "Invalid ras suspend!\n");
+ return -EPERM;
+ }
+
+ amdgpu_ras_process_pre_reset(adev);
+ return 0;
+}
+
+int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_mgr_is_ready(adev)) {
+ RAS_DEV_ERR(adev, "Invalid ras resume!\n");
+ return -EPERM;
+ }
+
+ amdgpu_ras_process_post_reset(adev);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h
new file mode 100644
index 000000000000..8fb7eb4b8f13
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mgr.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_MGR_H__
+#define __AMDGPU_RAS_MGR_H__
+#include "ras.h"
+#include "amdgpu_ras_process.h"
+
+enum ras_ih_type {
+ RAS_IH_NONE,
+ RAS_IH_FROM_BLOCK_CONTROLLER,
+ RAS_IH_FROM_CONSUMER_CLIENT,
+ RAS_IH_FROM_FATAL_ERROR,
+};
+
+struct ras_ih_info {
+ uint32_t block;
+ union {
+ struct amdgpu_iv_entry iv_entry;
+ struct {
+ uint16_t pasid;
+ uint32_t reset;
+ pasid_notify pasid_fn;
+ void *data;
+ };
+ };
+};
+
+struct amdgpu_ras_mgr {
+ struct amdgpu_device *adev;
+ struct ras_core_context *ras_core;
+ struct delayed_work retire_page_dwork;
+ struct ras_event_manager ras_event_mgr;
+ uint64_t last_poison_consumption_seqno;
+ bool ras_is_ready;
+
+ bool is_paused;
+ struct completion ras_event_done;
+};
+
+extern const struct amdgpu_ip_block_version ras_v1_0_ip_block;
+
+struct amdgpu_ras_mgr *amdgpu_ras_mgr_get_context(
+ struct amdgpu_device *adev);
+int amdgpu_enable_uniras(struct amdgpu_device *adev, bool enable);
+bool amdgpu_uniras_enabled(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_handle_fatal_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_handle_controller_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_handle_consumer_interrupt(struct amdgpu_device *adev, void *data);
+int amdgpu_ras_mgr_update_ras_ecc(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_reset_gpu(struct amdgpu_device *adev, uint32_t flags);
+uint64_t amdgpu_ras_mgr_gen_ras_event_seqno(struct amdgpu_device *adev,
+ enum ras_seqno_type seqno_type);
+bool amdgpu_ras_mgr_check_eeprom_safety_watermark(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_get_curr_nps_mode(struct amdgpu_device *adev, uint32_t *nps_mode);
+bool amdgpu_ras_mgr_check_retired_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+bool amdgpu_ras_mgr_is_rma(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_handle_ras_cmd(struct amdgpu_device *adev,
+ uint32_t cmd_id, void *input, uint32_t input_size,
+ void *output, uint32_t out_size);
+int amdgpu_ras_mgr_pre_reset(struct amdgpu_device *adev);
+int amdgpu_ras_mgr_post_reset(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c
new file mode 100644
index 000000000000..79a51b1603ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_smu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_ras_mp1_v13_0.h"
+
+#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A
+#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B
+
+static int mp1_v13_0_get_valid_bank_count(struct ras_core_context *ras_core,
+ u32 msg, u32 *count)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ u32 smu_msg;
+ int ret = 0;
+
+ if (!count)
+ return -EINVAL;
+
+ smu_msg = (msg == RAS_MP1_MSG_QueryValidMcaCeCount) ?
+ SMU_MSG_QueryValidMcaCeCount : SMU_MSG_QueryValidMcaCount;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ ret = amdgpu_smu_ras_send_msg(adev, smu_msg, 0, count);
+ up_read(&adev->reset_domain->sem);
+ } else {
+ ret = -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ if (ret)
+ *count = 0;
+
+ return ret;
+}
+
+static int mp1_v13_0_dump_valid_bank(struct ras_core_context *ras_core,
+ u32 msg, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t data[2] = {0, 0};
+ uint32_t param;
+ int ret = 0;
+ int i, offset;
+ u32 smu_msg = (msg == RAS_MP1_MSG_McaBankCeDumpDW) ?
+ SMU_MSG_McaBankCeDumpDW : SMU_MSG_McaBankDumpDW;
+
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ offset = reg_idx * 8;
+ for (i = 0; i < ARRAY_SIZE(data); i++) {
+ param = ((idx & 0xffff) << 16) | ((offset + (i << 2)) & 0xfffc);
+ ret = amdgpu_smu_ras_send_msg(adev, smu_msg, param, &data[i]);
+ if (ret) {
+ RAS_DEV_ERR(adev, "ACA failed to read register[%d], offset:0x%x\n",
+ reg_idx, offset);
+ break;
+ }
+ }
+ up_read(&adev->reset_domain->sem);
+
+ if (!ret)
+ *val = (uint64_t)data[1] << 32 | data[0];
+ } else {
+ ret = -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ return ret;
+}
+
+const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0 = {
+ .mp1_get_valid_bank_count = mp1_v13_0_get_valid_bank_count,
+ .mp1_dump_valid_bank = mp1_v13_0_dump_valid_bank,
+};
+
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h
new file mode 100644
index 000000000000..71c614ae1ae4
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_mp1_v13_0.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_RAS_MP1_V13_0_H__
+#define __AMDGPU_RAS_MP1_V13_0_H__
+#include "ras.h"
+
+extern const struct ras_mp1_sys_func amdgpu_ras_mp1_sys_func_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c
new file mode 100644
index 000000000000..2783f5875c7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_nbio_v7_9.h"
+#include "nbio/nbio_7_9_0_offset.h"
+#include "nbio/nbio_7_9_0_sh_mask.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+
+static int nbio_v7_9_set_ras_controller_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_ras_controller_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for ras_controller_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static int nbio_v7_9_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned int type,
+ enum amdgpu_interrupt_state state)
+{
+ /* Dummy function, there is no initialization operation in driver */
+
+ return 0;
+}
+
+static int nbio_v7_9_process_err_event_athub_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ /* By design, the ih cookie for err_event_athub_irq should be written
+ * to BIFring instead of general iv ring. However, due to known bif ring
+ * hw bug, it has to be disabled. There is no chance the process function
+ * will be involked. Just left it as a dummy one.
+ */
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_controller_irq_funcs = {
+ .set = nbio_v7_9_set_ras_controller_irq_state,
+ .process = nbio_v7_9_process_ras_controller_irq,
+};
+
+static const struct amdgpu_irq_src_funcs nbio_v7_9_ras_err_event_athub_irq_funcs = {
+ .set = nbio_v7_9_set_ras_err_event_athub_irq_state,
+ .process = nbio_v7_9_process_err_event_athub_irq,
+};
+
+static int nbio_v7_9_init_ras_controller_interrupt(struct ras_core_context *ras_core, bool state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_controller_irq.funcs =
+ &nbio_v7_9_ras_controller_irq_funcs;
+ adev->nbio.ras_controller_irq.num_types = 1;
+
+ /* register ras controller interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT,
+ &adev->nbio.ras_controller_irq);
+
+ return r;
+}
+
+static int nbio_v7_9_init_ras_err_event_athub_interrupt(struct ras_core_context *ras_core,
+ bool state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int r;
+
+ /* init the irq funcs */
+ adev->nbio.ras_err_event_athub_irq.funcs =
+ &nbio_v7_9_ras_err_event_athub_irq_funcs;
+ adev->nbio.ras_err_event_athub_irq.num_types = 1;
+
+ /* register ras err event athub interrupt */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF,
+ NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT,
+ &adev->nbio.ras_err_event_athub_irq);
+
+ return r;
+}
+
+const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9 = {
+ .set_ras_controller_irq_state = nbio_v7_9_init_ras_controller_interrupt,
+ .set_ras_err_event_athub_irq_state = nbio_v7_9_init_ras_err_event_athub_interrupt,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h
new file mode 100644
index 000000000000..272259e9a0e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_nbio_v7_9.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RAS_NBIO_V7_9_H__
+#define __AMDGPU_RAS_NBIO_V7_9_H__
+
+extern const struct ras_nbio_sys_func amdgpu_ras_nbio_sys_func_v7_9;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
new file mode 100644
index 000000000000..5782c007de71
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras_process.h"
+
+#define RAS_MGR_RETIRE_PAGE_INTERVAL 100
+#define RAS_EVENT_PROCESS_TIMEOUT 1200
+
+static void ras_process_retire_page_dwork(struct work_struct *work)
+{
+ struct amdgpu_ras_mgr *ras_mgr =
+ container_of(work, struct amdgpu_ras_mgr, retire_page_dwork.work);
+ struct amdgpu_device *adev = ras_mgr->adev;
+ int ret;
+
+ if (amdgpu_ras_is_rma(adev))
+ return;
+
+ /* If gpu reset is ongoing, delay retiring the bad pages */
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
+ schedule_delayed_work(&ras_mgr->retire_page_dwork,
+ msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL * 3));
+ return;
+ }
+
+ ret = ras_umc_handle_bad_pages(ras_mgr->ras_core, NULL);
+ if (!ret)
+ schedule_delayed_work(&ras_mgr->retire_page_dwork,
+ msecs_to_jiffies(RAS_MGR_RETIRE_PAGE_INTERVAL));
+}
+
+int amdgpu_ras_process_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ ras_mgr->is_paused = false;
+ init_completion(&ras_mgr->ras_event_done);
+
+ INIT_DELAYED_WORK(&ras_mgr->retire_page_dwork, ras_process_retire_page_dwork);
+
+ return 0;
+}
+
+int amdgpu_ras_process_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ ras_mgr->is_paused = false;
+ /* Save all cached bad pages to eeprom */
+ flush_delayed_work(&ras_mgr->retire_page_dwork);
+ cancel_delayed_work_sync(&ras_mgr->retire_page_dwork);
+ return 0;
+}
+
+int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr->ras_core)
+ return -EINVAL;
+
+ return ras_process_add_interrupt_req(ras_mgr->ras_core, NULL, true);
+}
+
+int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev, void *data)
+{
+ amdgpu_ras_set_fed(adev, true);
+ return amdgpu_ras_mgr_reset_gpu(adev, AMDGPU_RAS_GPU_RESET_MODE1_RESET);
+}
+
+int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_ih_info *ih_info = (struct ras_ih_info *)data;
+ struct ras_event_req req;
+ uint64_t seqno;
+
+ if (!ih_info)
+ return -EINVAL;
+
+ memset(&req, 0, sizeof(req));
+ req.block = ih_info->block;
+ req.data = ih_info->data;
+ req.pasid = ih_info->pasid;
+ req.pasid_fn = ih_info->pasid_fn;
+ req.reset = ih_info->reset;
+
+ seqno = ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, false);
+
+ /* When the ACA register cannot be read from FW, the poison
+ * consumption seqno in the fifo will not pop up, so it is
+ * necessary to check whether the seqno is the previous seqno.
+ */
+ if (seqno == ras_mgr->last_poison_consumption_seqno) {
+ /* Pop and discard the previous seqno */
+ ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, true);
+ seqno = ras_core_get_seqno(ras_mgr->ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, false);
+ }
+ ras_mgr->last_poison_consumption_seqno = seqno;
+ req.seqno = seqno;
+
+ return ras_process_add_interrupt_req(ras_mgr->ras_core, &req, false);
+}
+
+int amdgpu_ras_process_begin(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (ras_mgr->is_paused)
+ return -EAGAIN;
+
+ reinit_completion(&ras_mgr->ras_event_done);
+ return 0;
+}
+
+int amdgpu_ras_process_end(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ complete(&ras_mgr->ras_event_done);
+ return 0;
+}
+
+int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ long rc;
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ if (!ras_mgr->ras_core->is_initialized)
+ return -EPERM;
+
+ ras_mgr->is_paused = true;
+
+ /* Wait for RAS event processing to complete */
+ rc = wait_for_completion_interruptible_timeout(&ras_mgr->ras_event_done,
+ msecs_to_jiffies(RAS_EVENT_PROCESS_TIMEOUT));
+ if (rc <= 0)
+ RAS_DEV_WARN(adev, "Waiting for ras process to complete %s\n",
+ rc ? "interrupted" : "timeout");
+
+ flush_delayed_work(&ras_mgr->retire_page_dwork);
+ return 0;
+}
+
+int amdgpu_ras_process_post_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+
+ if (!ras_mgr || !ras_mgr->ras_core)
+ return -EINVAL;
+
+ if (!ras_mgr->ras_core->is_initialized)
+ return -EPERM;
+
+ ras_mgr->is_paused = false;
+
+ schedule_delayed_work(&ras_mgr->retire_page_dwork, 0);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h
new file mode 100644
index 000000000000..d55cdaeac441
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_process.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (c) 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+#ifndef __AMDGPU_RAS_PROCESS_H__
+#define __AMDGPU_RAS_PROCESS_H__
+#include "ras_process.h"
+#include "amdgpu_ras_mgr.h"
+
+enum ras_ih_type;
+int amdgpu_ras_process_init(struct amdgpu_device *adev);
+int amdgpu_ras_process_fini(struct amdgpu_device *adev);
+int amdgpu_ras_process_handle_umc_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_handle_unexpected_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_handle_consumption_interrupt(struct amdgpu_device *adev,
+ void *data);
+int amdgpu_ras_process_begin(struct amdgpu_device *adev);
+int amdgpu_ras_process_end(struct amdgpu_device *adev);
+int amdgpu_ras_process_pre_reset(struct amdgpu_device *adev);
+int amdgpu_ras_process_post_reset(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c
new file mode 100644
index 000000000000..45ed8c3b5563
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/amdgpu_ras_sys.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras_sys.h"
+#include "amdgpu_ras_mgr.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+
+static int amdgpu_ras_sys_detect_fatal_event(struct ras_core_context *ras_core, void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret;
+ uint64_t seq_no;
+
+ ret = amdgpu_ras_global_ras_isr(adev);
+ if (ret)
+ return ret;
+
+ seq_no = amdgpu_ras_mgr_gen_ras_event_seqno(adev, RAS_SEQNO_TYPE_UE);
+ RAS_DEV_INFO(adev,
+ "{%llu} Uncorrectable hardware error(ERREVENT_ATHUB_INTERRUPT) detected!\n",
+ seq_no);
+
+ return amdgpu_ras_process_handle_unexpected_interrupt(adev, data);
+}
+
+static int amdgpu_ras_sys_poison_consumption_event(struct ras_core_context *ras_core,
+ void *data)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct ras_event_req *req = (struct ras_event_req *)data;
+ pasid_notify pasid_fn;
+
+ if (!req)
+ return -EINVAL;
+
+ if (req->pasid_fn) {
+ pasid_fn = (pasid_notify)req->pasid_fn;
+ pasid_fn(adev, req->pasid, req->data);
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t *seqno)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(adev);
+ struct ras_event_manager *event_mgr;
+ struct ras_event_state *event_state;
+ struct amdgpu_hive_info *hive;
+ enum ras_event_type event_type;
+ uint64_t seq_no;
+
+ if (!ras_mgr || !seqno ||
+ (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX))
+ return -EINVAL;
+
+ switch (seqno_type) {
+ case RAS_SEQNO_TYPE_UE:
+ event_type = RAS_EVENT_TYPE_FATAL;
+ break;
+ case RAS_SEQNO_TYPE_CE:
+ case RAS_SEQNO_TYPE_DE:
+ event_type = RAS_EVENT_TYPE_POISON_CREATION;
+ break;
+ case RAS_SEQNO_TYPE_POISON_CONSUMPTION:
+ event_type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
+ break;
+ default:
+ event_type = RAS_EVENT_TYPE_INVALID;
+ break;
+ }
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ event_mgr = hive ? &hive->event_mgr : &ras_mgr->ras_event_mgr;
+ event_state = &event_mgr->event_state[event_type];
+ if ((event_type == RAS_EVENT_TYPE_FATAL) && amdgpu_ras_in_recovery(adev)) {
+ seq_no = event_state->last_seqno;
+ } else {
+ seq_no = atomic64_inc_return(&event_mgr->seqno);
+ event_state->last_seqno = seq_no;
+ atomic64_inc(&event_state->count);
+ }
+ amdgpu_put_xgmi_hive(hive);
+
+ *seqno = seq_no;
+ return 0;
+
+}
+
+static int amdgpu_ras_sys_event_notifier(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data)
+{
+ struct amdgpu_ras_mgr *ras_mgr = amdgpu_ras_mgr_get_context(ras_core->dev);
+ int ret = 0;
+
+ switch (event_id) {
+ case RAS_EVENT_ID__BAD_PAGE_DETECTED:
+ schedule_delayed_work(&ras_mgr->retire_page_dwork, 0);
+ break;
+ case RAS_EVENT_ID__POISON_CONSUMPTION:
+ amdgpu_ras_sys_poison_consumption_event(ras_core, data);
+ break;
+ case RAS_EVENT_ID__RESERVE_BAD_PAGE:
+ ret = amdgpu_ras_reserve_page(ras_core->dev, *(uint64_t *)data);
+ break;
+ case RAS_EVENT_ID__FATAL_ERROR_DETECTED:
+ ret = amdgpu_ras_sys_detect_fatal_event(ras_core, data);
+ break;
+ case RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM:
+ ret = amdgpu_dpm_send_hbm_bad_pages_num(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP:
+ ret = amdgpu_dpm_send_hbm_bad_channel_flag(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__DEVICE_RMA:
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_RMA, NULL, NULL);
+ ret = amdgpu_dpm_send_rma_reason(ras_core->dev);
+ break;
+ case RAS_EVENT_ID__RESET_GPU:
+ ret = amdgpu_ras_mgr_reset_gpu(ras_core->dev, *(uint32_t *)data);
+ break;
+ case RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN:
+ ret = amdgpu_ras_process_begin(ras_core->dev);
+ break;
+ case RAS_EVENT_ID__RAS_EVENT_PROC_END:
+ ret = amdgpu_ras_process_end(ras_core->dev);
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev, "Invalid ras notify event:%d\n", event_id);
+ break;
+ }
+
+ return ret;
+}
+
+static u64 amdgpu_ras_sys_get_utc_second_timestamp(struct ras_core_context *ras_core)
+{
+ return ktime_get_real_seconds();
+}
+
+static int amdgpu_ras_sys_check_gpu_status(struct ras_core_context *ras_core,
+ uint32_t *status)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ uint32_t gpu_status = 0;
+
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev))
+ gpu_status |= RAS_GPU_STATUS__IN_RESET;
+
+ if (amdgpu_sriov_vf(adev))
+ gpu_status |= RAS_GPU_STATUS__IS_VF;
+
+ *status = gpu_status;
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+
+ dev_info->device_id = adev->pdev->device;
+ dev_info->vendor_id = adev->pdev->vendor;
+ dev_info->socket_id = adev->smuio.funcs->get_socket_id(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_gpu_reset_lock(struct ras_core_context *ras_core,
+ bool down, bool try)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ int ret = 0;
+
+ if (down && try)
+ ret = down_read_trylock(&adev->reset_domain->sem);
+ else if (down)
+ down_read(&adev->reset_domain->sem);
+ else
+ up_read(&adev->reset_domain->sem);
+
+ return ret;
+}
+
+static bool amdgpu_ras_sys_detect_ras_interrupt(struct ras_core_context *ras_core)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+static int amdgpu_ras_sys_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)ras_core->dev;
+ struct psp_context *psp = &adev->psp;
+ struct psp_ring *psp_ring;
+ struct ta_mem_context *mem_ctx;
+
+ if (mem_type == GPU_MEM_TYPE_RAS_PSP_RING) {
+ psp_ring = &psp->km_ring;
+ gpu_mem->mem_bo = adev->firmware.rbuf;
+ gpu_mem->mem_size = psp_ring->ring_size;
+ gpu_mem->mem_mc_addr = psp_ring->ring_mem_mc_addr;
+ gpu_mem->mem_cpu_addr = psp_ring->ring_mem;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_CMD) {
+ gpu_mem->mem_bo = psp->cmd_buf_bo;
+ gpu_mem->mem_size = PSP_CMD_BUFFER_SIZE;
+ gpu_mem->mem_mc_addr = psp->cmd_buf_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->cmd_buf_mem;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_PSP_FENCE) {
+ gpu_mem->mem_bo = psp->fence_buf_bo;
+ gpu_mem->mem_size = PSP_FENCE_BUFFER_SIZE;
+ gpu_mem->mem_mc_addr = psp->fence_buf_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->fence_buf;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_TA_FW) {
+ gpu_mem->mem_bo = psp->fw_pri_bo;
+ gpu_mem->mem_size = PSP_1_MEG;
+ gpu_mem->mem_mc_addr = psp->fw_pri_mc_addr;
+ gpu_mem->mem_cpu_addr = psp->fw_pri_buf;
+ } else if (mem_type == GPU_MEM_TYPE_RAS_TA_CMD) {
+ mem_ctx = &psp->ras_context.context.mem_context;
+ gpu_mem->mem_bo = mem_ctx->shared_bo;
+ gpu_mem->mem_size = mem_ctx->shared_mem_size;
+ gpu_mem->mem_mc_addr = mem_ctx->shared_mc_addr;
+ gpu_mem->mem_cpu_addr = mem_ctx->shared_buf;
+ } else {
+ return -EINVAL;
+ }
+
+ if (!gpu_mem->mem_bo || !gpu_mem->mem_size ||
+ !gpu_mem->mem_mc_addr || !gpu_mem->mem_cpu_addr) {
+ RAS_DEV_ERR(ras_core->dev, "The ras psp gpu memory is invalid!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_sys_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+
+ return 0;
+}
+
+const struct ras_sys_func amdgpu_ras_sys_fn = {
+ .ras_notifier = amdgpu_ras_sys_event_notifier,
+ .get_utc_second_timestamp = amdgpu_ras_sys_get_utc_second_timestamp,
+ .gen_seqno = amdgpu_ras_sys_gen_seqno,
+ .check_gpu_status = amdgpu_ras_sys_check_gpu_status,
+ .get_device_system_info = amdgpu_ras_sys_get_device_system_info,
+ .gpu_reset_lock = amdgpu_ras_sys_gpu_reset_lock,
+ .detect_ras_interrupt = amdgpu_ras_sys_detect_ras_interrupt,
+ .get_gpu_mem = amdgpu_ras_sys_get_gpu_mem,
+ .put_gpu_mem = amdgpu_ras_sys_put_gpu_mem,
+};
diff --git a/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
new file mode 100644
index 000000000000..8156531a7b63
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/ras_mgr/ras_sys.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_SYS_H__
+#define __RAS_SYS_H__
+#include <linux/stdarg.h>
+#include <linux/printk.h>
+#include <linux/dev_printk.h>
+#include <linux/mempool.h>
+#include "amdgpu.h"
+
+#define RAS_DEV_ERR(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_err(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_ERR fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_WARN(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_warn(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_WARNING fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_INFO(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_info(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_INFO fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_DEV_DBG(device, fmt, ...) \
+ do { \
+ if (device) \
+ dev_dbg(((struct amdgpu_device *)device)->dev, fmt, ##__VA_ARGS__); \
+ else \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define RAS_INFO(fmt, ...) printk(KERN_INFO fmt, ##__VA_ARGS__)
+
+#define RAS_DEV_RREG32_SOC15(dev, ip, inst, reg) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+ 0, ip##_HWIP, inst); \
+})
+
+#define RAS_DEV_WREG32_SOC15(dev, ip, inst, reg, value) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
+ value, 0, ip##_HWIP, inst); \
+})
+
+/* GET_INST returns the physical instance corresponding to a logical instance */
+#define RAS_GET_INST(dev, ip, inst) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ adev->ip_map.logical_to_dev_inst ? \
+ adev->ip_map.logical_to_dev_inst(adev, ip##_HWIP, inst) : inst; \
+})
+
+#define RAS_GET_MASK(dev, ip, mask) \
+({ \
+ struct amdgpu_device *adev = (struct amdgpu_device *)dev; \
+ (adev->ip_map.logical_to_dev_mask ? \
+ adev->ip_map.logical_to_dev_mask(adev, ip##_HWIP, mask) : mask); \
+})
+
+static inline void *ras_radix_tree_delete_iter(struct radix_tree_root *root, void *iter)
+{
+ return radix_tree_delete(root, ((struct radix_tree_iter *)iter)->index);
+}
+
+static inline long ras_wait_event_interruptible_timeout(void *wq_head,
+ int (*condition)(void *param), void *param, unsigned int timeout)
+{
+ return wait_event_interruptible_timeout(*(wait_queue_head_t *)wq_head,
+ condition(param), timeout);
+}
+
+extern const struct ras_sys_func amdgpu_ras_sys_fn;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/Makefile b/drivers/gpu/drm/amd/ras/rascore/Makefile
new file mode 100644
index 000000000000..e826a1f86424
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/Makefile
@@ -0,0 +1,44 @@
+#
+# Copyright 2025 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+RAS_CORE_FILES = ras_core.o \
+ ras_mp1.o \
+ ras_mp1_v13_0.o \
+ ras_aca.o \
+ ras_aca_v1_0.o \
+ ras_eeprom.o \
+ ras_umc.o \
+ ras_umc_v12_0.o \
+ ras_cmd.o \
+ ras_gfx.o \
+ ras_gfx_v9_0.o \
+ ras_process.o \
+ ras_nbio.o \
+ ras_nbio_v7_9.o \
+ ras_log_ring.o \
+ ras_cper.o \
+ ras_psp.o \
+ ras_psp_v13_0.o
+
+
+RAS_CORE = $(addprefix $(AMD_GPU_RAS_PATH)/rascore/,$(RAS_CORE_FILES))
+
+AMD_GPU_RAS_FILES += $(RAS_CORE)
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras.h b/drivers/gpu/drm/amd/ras/rascore/ras.h
new file mode 100644
index 000000000000..3396b2e0949d
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras.h
@@ -0,0 +1,370 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_H__
+#define __RAS_H__
+#include "ras_sys.h"
+#include "ras_umc.h"
+#include "ras_aca.h"
+#include "ras_eeprom.h"
+#include "ras_core_status.h"
+#include "ras_process.h"
+#include "ras_gfx.h"
+#include "ras_cmd.h"
+#include "ras_nbio.h"
+#include "ras_mp1.h"
+#include "ras_psp.h"
+#include "ras_log_ring.h"
+
+#define RAS_HW_ERR "[Hardware Error]: "
+
+#define RAS_GPU_PAGE_SHIFT 12
+#define RAS_ADDR_TO_PFN(addr) ((addr) >> RAS_GPU_PAGE_SHIFT)
+#define RAS_PFN_TO_ADDR(pfn) ((pfn) << RAS_GPU_PAGE_SHIFT)
+
+#define RAS_CORE_RESET_GPU 0x10000
+
+#define GPU_RESET_CAUSE_POISON (RAS_CORE_RESET_GPU | 0x0001)
+#define GPU_RESET_CAUSE_FATAL (RAS_CORE_RESET_GPU | 0x0002)
+#define GPU_RESET_CAUSE_RMA (RAS_CORE_RESET_GPU | 0x0004)
+
+enum ras_block_id {
+ RAS_BLOCK_ID__UMC = 0,
+ RAS_BLOCK_ID__SDMA,
+ RAS_BLOCK_ID__GFX,
+ RAS_BLOCK_ID__MMHUB,
+ RAS_BLOCK_ID__ATHUB,
+ RAS_BLOCK_ID__PCIE_BIF,
+ RAS_BLOCK_ID__HDP,
+ RAS_BLOCK_ID__XGMI_WAFL,
+ RAS_BLOCK_ID__DF,
+ RAS_BLOCK_ID__SMN,
+ RAS_BLOCK_ID__SEM,
+ RAS_BLOCK_ID__MP0,
+ RAS_BLOCK_ID__MP1,
+ RAS_BLOCK_ID__FUSE,
+ RAS_BLOCK_ID__MCA,
+ RAS_BLOCK_ID__VCN,
+ RAS_BLOCK_ID__JPEG,
+ RAS_BLOCK_ID__IH,
+ RAS_BLOCK_ID__MPIO,
+
+ RAS_BLOCK_ID__LAST
+};
+
+enum ras_ecc_err_type {
+ RAS_ECC_ERR__NONE = 0,
+ RAS_ECC_ERR__PARITY = 1,
+ RAS_ECC_ERR__SINGLE_CORRECTABLE = 2,
+ RAS_ECC_ERR__MULTI_UNCORRECTABLE = 4,
+ RAS_ECC_ERR__POISON = 8,
+};
+
+enum ras_err_type {
+ RAS_ERR_TYPE__UE = 0,
+ RAS_ERR_TYPE__CE,
+ RAS_ERR_TYPE__DE,
+ RAS_ERR_TYPE__LAST
+};
+
+enum ras_seqno_type {
+ RAS_SEQNO_TYPE_INVALID = 0,
+ RAS_SEQNO_TYPE_UE,
+ RAS_SEQNO_TYPE_CE,
+ RAS_SEQNO_TYPE_DE,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION,
+ RAS_SEQNO_TYPE_COUNT_MAX,
+};
+
+enum ras_seqno_fifo {
+ SEQNO_FIFO_INVALID = 0,
+ SEQNO_FIFO_POISON_CREATION,
+ SEQNO_FIFO_POISON_CONSUMPTION,
+ SEQNO_FIFO_COUNT_MAX
+};
+
+enum ras_notify_event {
+ RAS_EVENT_ID__NONE,
+ RAS_EVENT_ID__BAD_PAGE_DETECTED,
+ RAS_EVENT_ID__POISON_CONSUMPTION,
+ RAS_EVENT_ID__RESERVE_BAD_PAGE,
+ RAS_EVENT_ID__DEVICE_RMA,
+ RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ RAS_EVENT_ID__FATAL_ERROR_DETECTED,
+ RAS_EVENT_ID__RESET_GPU,
+ RAS_EVENT_ID__RESET_VF,
+ RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN,
+ RAS_EVENT_ID__RAS_EVENT_PROC_END,
+};
+
+enum ras_gpu_status {
+ RAS_GPU_STATUS__NOT_READY = 0,
+ RAS_GPU_STATUS__READY = 0x1,
+ RAS_GPU_STATUS__IN_RESET = 0x2,
+ RAS_GPU_STATUS__IS_RMA = 0x4,
+ RAS_GPU_STATUS__IS_VF = 0x8,
+};
+
+struct ras_core_context;
+struct ras_bank_ecc;
+struct ras_umc;
+struct ras_aca;
+struct ras_process;
+struct ras_nbio;
+struct ras_log_ring;
+struct ras_psp;
+
+struct ras_mp1_sys_func {
+ int (*mp1_get_valid_bank_count)(struct ras_core_context *ras_core,
+ u32 msg, u32 *count);
+ int (*mp1_dump_valid_bank)(struct ras_core_context *ras_core,
+ u32 msg, u32 idx, u32 reg_idx, u64 *val);
+};
+
+struct ras_eeprom_sys_func {
+ int (*eeprom_i2c_xfer)(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 buf_size, bool read);
+ int (*update_eeprom_i2c_config)(struct ras_core_context *ras_core);
+};
+
+struct ras_nbio_sys_func {
+ int (*set_ras_controller_irq_state)(struct ras_core_context *ras_core,
+ bool state);
+ int (*set_ras_err_event_athub_irq_state)(struct ras_core_context *ras_core,
+ bool state);
+};
+
+struct ras_time {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ long tm_year;
+};
+
+struct device_system_info {
+ uint32_t device_id;
+ uint32_t vendor_id;
+ uint32_t socket_id;
+};
+
+enum gpu_mem_type {
+ GPU_MEM_TYPE_DEFAULT,
+ GPU_MEM_TYPE_RAS_PSP_RING,
+ GPU_MEM_TYPE_RAS_PSP_CMD,
+ GPU_MEM_TYPE_RAS_PSP_FENCE,
+ GPU_MEM_TYPE_RAS_TA_FW,
+ GPU_MEM_TYPE_RAS_TA_CMD,
+};
+
+struct ras_psp_sys_func {
+ int (*get_ras_psp_system_status)(struct ras_core_context *ras_core,
+ struct ras_psp_sys_status *status);
+ int (*get_ras_ta_init_param)(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param);
+};
+
+struct ras_sys_func {
+ int (*gpu_reset_lock)(struct ras_core_context *ras_core,
+ bool down, bool try);
+ int (*check_gpu_status)(struct ras_core_context *ras_core,
+ uint32_t *status);
+ int (*gen_seqno)(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t *seqno);
+ int (*async_handle_ras_event)(struct ras_core_context *ras_core, void *data);
+ int (*ras_notifier)(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data);
+ u64 (*get_utc_second_timestamp)(struct ras_core_context *ras_core);
+ int (*get_device_system_info)(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info);
+ bool (*detect_ras_interrupt)(struct ras_core_context *ras_core);
+ int (*get_gpu_mem)(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+ int (*put_gpu_mem)(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+};
+
+struct ras_ecc_count {
+ uint64_t new_ce_count;
+ uint64_t total_ce_count;
+ uint64_t new_ue_count;
+ uint64_t total_ue_count;
+ uint64_t new_de_count;
+ uint64_t total_de_count;
+};
+
+struct ras_bank_ecc {
+ uint32_t nps;
+ uint64_t seq_no;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+};
+
+struct ras_bank_ecc_node {
+ struct list_head node;
+ struct ras_bank_ecc ecc;
+};
+
+struct ras_aca_config {
+ u32 socket_num_per_hive;
+ u32 aid_num_per_socket;
+ u32 xcd_num_per_aid;
+};
+
+struct ras_mp1_config {
+ const struct ras_mp1_sys_func *mp1_sys_fn;
+};
+
+struct ras_nbio_config {
+ const struct ras_nbio_sys_func *nbio_sys_fn;
+};
+
+struct ras_psp_config {
+ const struct ras_psp_sys_func *psp_sys_fn;
+};
+
+struct ras_umc_config {
+ uint32_t umc_vram_type;
+};
+
+struct ras_eeprom_config {
+ const struct ras_eeprom_sys_func *eeprom_sys_fn;
+ int eeprom_record_threshold_config;
+ uint32_t eeprom_record_threshold_count;
+ void *eeprom_i2c_adapter;
+ u32 eeprom_i2c_addr;
+ u32 eeprom_i2c_port;
+ u16 max_i2c_read_len;
+ u16 max_i2c_write_len;
+};
+
+struct ras_core_config {
+ u32 aca_ip_version;
+ u32 umc_ip_version;
+ u32 mp1_ip_version;
+ u32 gfx_ip_version;
+ u32 nbio_ip_version;
+ u32 psp_ip_version;
+
+ bool poison_supported;
+ bool ras_eeprom_supported;
+ const struct ras_sys_func *sys_fn;
+
+ struct ras_aca_config aca_cfg;
+ struct ras_mp1_config mp1_cfg;
+ struct ras_nbio_config nbio_cfg;
+ struct ras_psp_config psp_cfg;
+ struct ras_eeprom_config eeprom_cfg;
+ struct ras_umc_config umc_cfg;
+};
+
+struct ras_core_context {
+ void *dev;
+ struct ras_core_config *config;
+ u32 socket_num_per_hive;
+ u32 aid_num_per_socket;
+ u32 xcd_num_per_aid;
+ int max_ue_banks_per_query;
+ int max_ce_banks_per_query;
+ struct ras_aca ras_aca;
+
+ bool ras_eeprom_supported;
+ struct ras_eeprom_control ras_eeprom;
+
+ struct ras_psp ras_psp;
+ struct ras_umc ras_umc;
+ struct ras_nbio ras_nbio;
+ struct ras_gfx ras_gfx;
+ struct ras_mp1 ras_mp1;
+ struct ras_process ras_proc;
+ struct ras_cmd_mgr ras_cmd;
+ struct ras_log_ring ras_log_ring;
+
+ const struct ras_sys_func *sys_fn;
+
+ /* is poison mode supported */
+ bool poison_supported;
+
+ bool is_rma;
+ bool is_initialized;
+
+ struct kfifo de_seqno_fifo;
+ struct kfifo consumption_seqno_fifo;
+ spinlock_t seqno_lock;
+
+ bool ras_core_enabled;
+};
+
+struct ras_core_context *ras_core_create(struct ras_core_config *init_config);
+void ras_core_destroy(struct ras_core_context *ras_core);
+int ras_core_sw_init(struct ras_core_context *ras_core);
+int ras_core_sw_fini(struct ras_core_context *ras_core);
+int ras_core_hw_init(struct ras_core_context *ras_core);
+int ras_core_hw_fini(struct ras_core_context *ras_core);
+bool ras_core_is_ready(struct ras_core_context *ras_core);
+uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type);
+uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, bool pop);
+
+int ras_core_put_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t seqno);
+
+int ras_core_update_ecc_info(struct ras_core_context *ras_core);
+int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
+ enum ras_block_id block, struct ras_ecc_count *ecc_count);
+
+bool ras_core_gpu_in_reset(struct ras_core_context *ras_core);
+bool ras_core_gpu_is_rma(struct ras_core_context *ras_core);
+bool ras_core_gpu_is_vf(struct ras_core_context *ras_core);
+bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data);
+int ras_core_handle_fatal_error(struct ras_core_context *ras_core);
+
+uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core);
+const char *ras_core_get_ras_block_name(enum ras_block_id block_id);
+int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
+ uint64_t timestamp, struct ras_time *tm);
+
+int ras_core_set_status(struct ras_core_context *ras_core, bool enable);
+bool ras_core_is_enabled(struct ras_core_context *ras_core);
+uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core);
+int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa);
+bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core);
+int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem);
+bool ras_core_check_safety_watermark(struct ras_core_context *ras_core);
+int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core);
+void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core);
+void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core);
+int ras_core_event_notify(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data);
+int ras_core_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c
new file mode 100644
index 000000000000..e433c70d2989
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_aca.h"
+#include "ras_aca_v1_0.h"
+#include "ras_mp1_v13_0.h"
+
+#define ACA_MARK_FATAL_FLAG 0x100
+#define ACA_MARK_UE_READ_FLAG 0x1
+
+#define blk_name(block_id) ras_core_get_ras_block_name(block_id)
+
+static struct aca_regs_dump {
+ const char *name;
+ int reg_idx;
+} aca_regs[] = {
+ {"CONTROL", ACA_REG_IDX__CTL},
+ {"STATUS", ACA_REG_IDX__STATUS},
+ {"ADDR", ACA_REG_IDX__ADDR},
+ {"MISC", ACA_REG_IDX__MISC0},
+ {"CONFIG", ACA_REG_IDX__CONFG},
+ {"IPID", ACA_REG_IDX__IPID},
+ {"SYND", ACA_REG_IDX__SYND},
+ {"DESTAT", ACA_REG_IDX__DESTAT},
+ {"DEADDR", ACA_REG_IDX__DEADDR},
+ {"CONTROL_MASK", ACA_REG_IDX__CTL_MASK},
+};
+
+
+static void aca_report_ecc_info(struct ras_core_context *ras_core,
+ u64 seq_no, u32 blk, u32 skt, u32 aid,
+ struct aca_aid_ecc *aid_ecc,
+ struct aca_bank_ecc *new_ecc)
+{
+ struct aca_ecc_count ecc_count = {0};
+
+ ecc_count.new_ue_count = new_ecc->ue_count;
+ ecc_count.new_de_count = new_ecc->de_count;
+ ecc_count.new_ce_count = new_ecc->ce_count;
+ if (blk == RAS_BLOCK_ID__GFX) {
+ struct aca_ecc_count *xcd_ecc;
+ int xcd_id;
+
+ for (xcd_id = 0; xcd_id < aid_ecc->xcd.xcd_num; xcd_id++) {
+ xcd_ecc = &aid_ecc->xcd.xcd[xcd_id].ecc_err;
+ ecc_count.total_ue_count += xcd_ecc->total_ue_count;
+ ecc_count.total_de_count += xcd_ecc->total_de_count;
+ ecc_count.total_ce_count += xcd_ecc->total_ce_count;
+ }
+ } else {
+ ecc_count.total_ue_count = aid_ecc->ecc_err.total_ue_count;
+ ecc_count.total_de_count = aid_ecc->ecc_err.total_de_count;
+ ecc_count.total_ce_count = aid_ecc->ecc_err.total_ce_count;
+ }
+
+ if (ecc_count.new_ue_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new uncorrectable hardware errors detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_ue_count, blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u uncorrectable hardware errors detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_ue_count, blk_name(blk));
+ }
+
+ if (ecc_count.new_de_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new %s detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_de_count,
+ (blk == RAS_BLOCK_ID__UMC) ?
+ "deferred hardware errors" : "poison consumption",
+ blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u %s detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_de_count,
+ (blk == RAS_BLOCK_ID__UMC) ?
+ "deferred hardware errors" : "poison consumption",
+ blk_name(blk));
+ }
+
+ if (ecc_count.new_ce_count) {
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u new correctable hardware errors detected in %s block\n",
+ seq_no, skt, aid, ecc_count.new_ce_count, blk_name(blk));
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} socket: %d, die: %d, %u correctable hardware errors detected in total in %s block\n",
+ seq_no, skt, aid, ecc_count.total_ce_count, blk_name(blk));
+ }
+}
+
+static void aca_bank_log(struct ras_core_context *ras_core,
+ int idx, int total, struct aca_bank_reg *bank,
+ struct aca_bank_ecc *bank_ecc)
+{
+ int i;
+
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu}" RAS_HW_ERR "Accelerator Check Architecture events logged\n",
+ bank->seq_no);
+ /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu}" RAS_HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ bank->seq_no, idx + 1, total,
+ aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+}
+
+static void aca_log_bank_data(struct ras_core_context *ras_core,
+ struct aca_bank_reg *bank, struct aca_bank_ecc *bank_ecc,
+ struct ras_log_batch_tag *batch)
+{
+ if (bank_ecc->ue_count)
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_UE, bank->regs, batch);
+ else if (bank_ecc->de_count)
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_DE, bank->regs, batch);
+ else
+ ras_log_ring_add_log_event(ras_core, RAS_LOG_EVENT_CE, bank->regs, batch);
+}
+
+static int aca_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ return ras_mp1_get_bank_count(ras_core, type, count);
+}
+
+static bool aca_match_bank(struct aca_block *aca_blk, struct aca_bank_reg *bank)
+{
+ const struct aca_bank_hw_ops *bank_ops;
+
+ if (!aca_blk->blk_info)
+ return false;
+
+ bank_ops = &aca_blk->blk_info->bank_ops;
+ if (!bank_ops->bank_match)
+ return false;
+
+ return bank_ops->bank_match(aca_blk, bank);
+}
+
+static int aca_parse_bank(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk,
+ struct aca_bank_reg *bank,
+ struct aca_bank_ecc *ecc)
+{
+ const struct aca_bank_hw_ops *bank_ops = &aca_blk->blk_info->bank_ops;
+
+ if (!bank_ops || !bank_ops->bank_parse)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ return bank_ops->bank_parse(ras_core, aca_blk, bank, ecc);
+}
+
+static int aca_check_block_ecc_info(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, struct aca_ecc_info *info)
+{
+ if (info->socket_id >= aca_blk->ecc.socket_num_per_hive) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Socket id (%d) is out of config! max:%u\n",
+ info->socket_id, aca_blk->ecc.socket_num_per_hive);
+ return -ENODATA;
+ }
+
+ if (info->die_id >= aca_blk->ecc.socket[info->socket_id].aid_num) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Die id (%d) is out of config! max:%u\n",
+ info->die_id, aca_blk->ecc.socket[info->socket_id].aid_num);
+ return -ENODATA;
+ }
+
+ if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX) &&
+ (info->xcd_id >=
+ aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Xcd id (%d) is out of config! max:%u\n",
+ info->xcd_id,
+ aca_blk->ecc.socket[info->socket_id].aid[info->die_id].xcd.xcd_num);
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
+static int aca_log_bad_bank(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, struct aca_bank_reg *bank,
+ struct aca_bank_ecc *bank_ecc)
+{
+ struct aca_ecc_info *info;
+ struct aca_ecc_count *ecc_err;
+ struct aca_aid_ecc *aid_ecc;
+ int ret;
+
+ info = &bank_ecc->bank_info;
+
+ ret = aca_check_block_ecc_info(ras_core, aca_blk, info);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aid_ecc = &aca_blk->ecc.socket[info->socket_id].aid[info->die_id];
+ if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX)
+ ecc_err = &aid_ecc->xcd.xcd[info->xcd_id].ecc_err;
+ else
+ ecc_err = &aid_ecc->ecc_err;
+
+ ecc_err->new_ce_count += bank_ecc->ce_count;
+ ecc_err->total_ce_count += bank_ecc->ce_count;
+ ecc_err->new_ue_count += bank_ecc->ue_count;
+ ecc_err->total_ue_count += bank_ecc->ue_count;
+ ecc_err->new_de_count += bank_ecc->de_count;
+ ecc_err->total_de_count += bank_ecc->de_count;
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ if ((aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC) &&
+ bank_ecc->de_count) {
+ struct ras_bank_ecc ras_ecc = {0};
+
+ ras_ecc.nps = ras_core_get_curr_nps_mode(ras_core);
+ ras_ecc.addr = bank_ecc->bank_info.addr;
+ ras_ecc.ipid = bank_ecc->bank_info.ipid;
+ ras_ecc.status = bank_ecc->bank_info.status;
+ ras_ecc.seq_no = bank->seq_no;
+
+ if (ras_core_gpu_in_reset(ras_core))
+ ras_umc_log_bad_bank_pending(ras_core, &ras_ecc);
+ else
+ ras_umc_log_bad_bank(ras_core, &ras_ecc);
+ }
+
+ aca_report_ecc_info(ras_core,
+ bank->seq_no, aca_blk->blk_info->ras_block_id, info->socket_id, info->die_id,
+ &aca_blk->ecc.socket[info->socket_id].aid[info->die_id], bank_ecc);
+
+ return 0;
+}
+
+static struct aca_block *aca_get_bank_aca_block(struct ras_core_context *ras_core,
+ struct aca_bank_reg *bank)
+{
+ int i = 0;
+
+ for (i = 0; i < RAS_BLOCK_ID__LAST; i++)
+ if (aca_match_bank(&ras_core->ras_aca.aca_blk[i], bank))
+ return &ras_core->ras_aca.aca_blk[i];
+
+ return NULL;
+}
+
+static int aca_dump_bank(struct ras_core_context *ras_core, u32 ecc_type,
+ int idx, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ int i, ret, reg_cnt;
+
+ reg_cnt = min_t(int, 16, ARRAY_SIZE(bank->regs));
+ for (i = 0; i < reg_cnt; i++) {
+ ret = ras_mp1_dump_bank(ras_core, ecc_type, idx, i, &bank->regs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static uint64_t aca_get_bank_seqno(struct ras_core_context *ras_core,
+ enum ras_err_type err_type, struct aca_block *aca_blk,
+ struct aca_bank_ecc *bank_ecc)
+{
+ uint64_t seq_no = 0;
+
+ if (bank_ecc->de_count) {
+ if (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__UMC)
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_DE, true);
+ else
+ seq_no = ras_core_get_seqno(ras_core,
+ RAS_SEQNO_TYPE_POISON_CONSUMPTION, true);
+ } else if (bank_ecc->ue_count) {
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_UE, true);
+ } else {
+ seq_no = ras_core_get_seqno(ras_core, RAS_SEQNO_TYPE_CE, true);
+ }
+
+ return seq_no;
+}
+
+static bool aca_dup_update_ue_in_fatal(struct ras_core_context *ras_core,
+ u32 ecc_type)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (ecc_type != RAS_ERR_TYPE__UE)
+ return false;
+
+ if (aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) {
+ if (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG)
+ return true;
+
+ aca->ue_updated_mark |= ACA_MARK_UE_READ_FLAG;
+ }
+
+ return false;
+}
+
+void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (!aca)
+ return;
+
+ aca->ue_updated_mark |= ACA_MARK_FATAL_FLAG;
+}
+
+void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core)
+{
+ struct ras_aca *aca = &ras_core->ras_aca;
+
+ if (!aca)
+ return;
+
+ if ((aca->ue_updated_mark & ACA_MARK_FATAL_FLAG) &&
+ (aca->ue_updated_mark & ACA_MARK_UE_READ_FLAG))
+ aca->ue_updated_mark = 0;
+}
+
+static int aca_banks_update(struct ras_core_context *ras_core,
+ u32 ecc_type, void *data)
+{
+ struct aca_bank_reg bank;
+ struct aca_block *aca_blk;
+ struct aca_bank_ecc bank_ecc;
+ struct ras_log_batch_tag *batch_tag = NULL;
+ u32 count = 0;
+ int ret = 0;
+ int i;
+
+ mutex_lock(&ras_core->ras_aca.bank_op_lock);
+
+ if (aca_dup_update_ue_in_fatal(ras_core, ecc_type))
+ goto out;
+
+ ret = aca_get_bank_count(ras_core, ecc_type, &count);
+ if (ret)
+ goto out;
+
+ if (!count)
+ goto out;
+
+ batch_tag = ras_log_ring_create_batch_tag(ras_core);
+ for (i = 0; i < count; i++) {
+ memset(&bank, 0, sizeof(bank));
+ ret = aca_dump_bank(ras_core, ecc_type, i, &bank);
+ if (ret)
+ break;
+
+ bank.ecc_type = ecc_type;
+
+ memset(&bank_ecc, 0, sizeof(bank_ecc));
+ aca_blk = aca_get_bank_aca_block(ras_core, &bank);
+ if (aca_blk)
+ ret = aca_parse_bank(ras_core, aca_blk, &bank, &bank_ecc);
+
+ bank.seq_no = aca_get_bank_seqno(ras_core, ecc_type, aca_blk, &bank_ecc);
+
+ aca_log_bank_data(ras_core, &bank, &bank_ecc, batch_tag);
+ aca_bank_log(ras_core, i, count, &bank, &bank_ecc);
+
+ if (!ret && aca_blk)
+ ret = aca_log_bad_bank(ras_core, aca_blk, &bank, &bank_ecc);
+
+ if (ret)
+ break;
+ }
+ ras_log_ring_destroy_batch_tag(ras_core, batch_tag);
+
+out:
+ mutex_unlock(&ras_core->ras_aca.bank_op_lock);
+ return ret;
+}
+
+int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 type, void *data)
+{
+ /* Update aca bank to aca source error_cache first */
+ return aca_banks_update(ras_core, type, data);
+}
+
+static struct aca_block *ras_aca_get_block_handle(struct ras_core_context *ras_core, uint32_t blk)
+{
+ return &ras_core->ras_aca.aca_blk[blk];
+}
+
+static int ras_aca_clear_block_ecc_count(struct ras_core_context *ras_core, u32 blk)
+{
+ struct aca_block *aca_blk;
+ struct aca_aid_ecc *aid_ecc;
+ int skt, aid, xcd;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ aid_ecc = &aca_blk->ecc.socket[skt].aid[aid];
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++)
+ memset(&aid_ecc->xcd.xcd[xcd],
+ 0, sizeof(struct aca_xcd_ecc));
+ } else {
+ memset(&aid_ecc->ecc_err, 0, sizeof(aid_ecc->ecc_err));
+ }
+ }
+ }
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core)
+{
+ enum ras_block_id blk;
+ int ret;
+
+ for (blk = RAS_BLOCK_ID__UMC; blk < RAS_BLOCK_ID__LAST; blk++) {
+ ret = ras_aca_clear_block_ecc_count(ras_core, blk);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk)
+{
+ struct aca_block *aca_blk;
+ int skt, aid, xcd;
+ struct aca_ecc_count *ecc_err;
+ struct aca_aid_ecc *aid_ecc;
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ aid_ecc = &aca_blk->ecc.socket[skt].aid[aid];
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (xcd = 0; xcd < aid_ecc->xcd.xcd_num; xcd++) {
+ ecc_err = &aid_ecc->xcd.xcd[xcd].ecc_err;
+ ecc_err->new_ce_count = 0;
+ ecc_err->new_ue_count = 0;
+ ecc_err->new_de_count = 0;
+ }
+ } else {
+ ecc_err = &aid_ecc->ecc_err;
+ ecc_err->new_ce_count = 0;
+ ecc_err->new_ue_count = 0;
+ ecc_err->new_de_count = 0;
+ }
+ }
+ }
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+static int ras_aca_get_block_each_aid_ecc_count(struct ras_core_context *ras_core,
+ u32 blk, u32 skt, u32 aid, u32 xcd,
+ struct aca_ecc_count *ecc_count)
+{
+ struct aca_block *aca_blk;
+ struct aca_ecc_count *ecc_err;
+
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ if (blk == RAS_BLOCK_ID__GFX)
+ ecc_err = &aca_blk->ecc.socket[skt].aid[aid].xcd.xcd[xcd].ecc_err;
+ else
+ ecc_err = &aca_blk->ecc.socket[skt].aid[aid].ecc_err;
+
+ ecc_count->new_ce_count = ecc_err->new_ce_count;
+ ecc_count->total_ce_count = ecc_err->total_ce_count;
+ ecc_count->new_ue_count = ecc_err->new_ue_count;
+ ecc_count->total_ue_count = ecc_err->total_ue_count;
+ ecc_count->new_de_count = ecc_err->new_de_count;
+ ecc_count->total_de_count = ecc_err->total_de_count;
+
+ return 0;
+}
+
+static inline void _add_ecc_count(struct aca_ecc_count *des, struct aca_ecc_count *src)
+{
+ des->new_ce_count += src->new_ce_count;
+ des->total_ce_count += src->total_ce_count;
+ des->new_ue_count += src->new_ue_count;
+ des->total_ue_count += src->total_ue_count;
+ des->new_de_count += src->new_de_count;
+ des->total_de_count += src->total_de_count;
+}
+
+static const struct ras_aca_ip_func *aca_get_ip_func(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(1, 0, 0):
+ return &ras_aca_func_v1_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "ACA ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core,
+ u32 blk, void *data)
+{
+ struct ras_ecc_count *err_data = (struct ras_ecc_count *)data;
+ struct aca_block *aca_blk;
+ int skt, aid, xcd;
+ struct aca_ecc_count ecc_xcd;
+ struct aca_ecc_count ecc_aid;
+ struct aca_ecc_count ecc;
+
+ if (blk >= RAS_BLOCK_ID__LAST)
+ return -EINVAL;
+
+ if (!err_data)
+ return -EINVAL;
+
+ aca_blk = ras_aca_get_block_handle(ras_core, blk);
+ memset(&ecc, 0, sizeof(ecc));
+
+ mutex_lock(&ras_core->ras_aca.aca_lock);
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ memset(&ecc_aid, 0, sizeof(ecc_aid));
+ for (xcd = 0;
+ xcd < aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num;
+ xcd++) {
+ memset(&ecc_xcd, 0, sizeof(ecc_xcd));
+ if (ras_aca_get_block_each_aid_ecc_count(ras_core,
+ blk, skt, aid, xcd, &ecc_xcd))
+ continue;
+ _add_ecc_count(&ecc_aid, &ecc_xcd);
+ }
+ _add_ecc_count(&ecc, &ecc_aid);
+ }
+ }
+ } else {
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++) {
+ memset(&ecc_aid, 0, sizeof(ecc_aid));
+ if (ras_aca_get_block_each_aid_ecc_count(ras_core,
+ blk, skt, aid, 0, &ecc_aid))
+ continue;
+ _add_ecc_count(&ecc, &ecc_aid);
+ }
+ }
+ }
+
+ err_data->new_ce_count = ecc.new_ce_count;
+ err_data->total_ce_count = ecc.total_ce_count;
+ err_data->new_ue_count = ecc.new_ue_count;
+ err_data->total_ue_count = ecc.total_ue_count;
+ err_data->new_de_count = ecc.new_de_count;
+ err_data->total_de_count = ecc.total_de_count;
+ mutex_unlock(&ras_core->ras_aca.aca_lock);
+
+ return 0;
+}
+
+int ras_aca_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+ struct ras_aca_config *aca_cfg = &ras_core->config->aca_cfg;
+ struct aca_block *aca_blk;
+ uint32_t socket_num_per_hive;
+ uint32_t aid_num_per_socket;
+ uint32_t xcd_num_per_aid;
+ int blk, skt, aid;
+
+ socket_num_per_hive = aca_cfg->socket_num_per_hive;
+ aid_num_per_socket = aca_cfg->aid_num_per_socket;
+ xcd_num_per_aid = aca_cfg->xcd_num_per_aid;
+
+ if (!xcd_num_per_aid || !aid_num_per_socket ||
+ (socket_num_per_hive > MAX_SOCKET_NUM_PER_HIVE) ||
+ (aid_num_per_socket > MAX_AID_NUM_PER_SOCKET) ||
+ (xcd_num_per_aid > MAX_XCD_NUM_PER_AID)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid ACA system configuration: %d, %d, %d\n",
+ socket_num_per_hive, aid_num_per_socket, xcd_num_per_aid);
+ return -EINVAL;
+ }
+
+ memset(ras_aca, 0, sizeof(*ras_aca));
+
+ for (blk = 0; blk < RAS_BLOCK_ID__LAST; blk++) {
+ aca_blk = &ras_aca->aca_blk[blk];
+ aca_blk->ecc.socket_num_per_hive = socket_num_per_hive;
+ for (skt = 0; skt < aca_blk->ecc.socket_num_per_hive; skt++) {
+ aca_blk->ecc.socket[skt].aid_num = aid_num_per_socket;
+ if (blk == RAS_BLOCK_ID__GFX) {
+ for (aid = 0; aid < aca_blk->ecc.socket[skt].aid_num; aid++)
+ aca_blk->ecc.socket[skt].aid[aid].xcd.xcd_num =
+ xcd_num_per_aid;
+ }
+ }
+ }
+
+ mutex_init(&ras_aca->aca_lock);
+ mutex_init(&ras_aca->bank_op_lock);
+
+ return 0;
+}
+
+int ras_aca_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+
+ mutex_destroy(&ras_aca->aca_lock);
+ mutex_destroy(&ras_aca->bank_op_lock);
+
+ return 0;
+}
+
+int ras_aca_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+ struct aca_block *aca_blk;
+ const struct ras_aca_ip_func *ip_func;
+ int i;
+
+ ras_aca->aca_ip_version = ras_core->config->aca_ip_version;
+ ip_func = aca_get_ip_func(ras_core, ras_aca->aca_ip_version);
+ if (!ip_func)
+ return -EINVAL;
+
+ for (i = 0; i < ip_func->block_num; i++) {
+ aca_blk = &ras_aca->aca_blk[ip_func->block_info[i]->ras_block_id];
+ aca_blk->blk_info = ip_func->block_info[i];
+ }
+
+ ras_aca->ue_updated_mark = 0;
+
+ return 0;
+}
+
+int ras_aca_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_aca *ras_aca = &ras_core->ras_aca;
+
+ ras_aca->ue_updated_mark = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h
new file mode 100644
index 000000000000..f61b02a5f0fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_ACA_H__
+#define __RAS_ACA_H__
+#include "ras.h"
+
+#define MAX_SOCKET_NUM_PER_HIVE 8
+#define MAX_AID_NUM_PER_SOCKET 4
+#define MAX_XCD_NUM_PER_AID 2
+#define MAX_ACA_RAS_BLOCK 20
+
+#define ACA_ERROR__UE_MASK (0x1 << RAS_ERR_TYPE__UE)
+#define ACA_ERROR__CE_MASK (0x1 << RAS_ERR_TYPE__CE)
+#define ACA_ERROR__DE_MASK (0x1 << RAS_ERR_TYPE__DE)
+
+enum ras_aca_reg_idx {
+ ACA_REG_IDX__CTL = 0,
+ ACA_REG_IDX__STATUS = 1,
+ ACA_REG_IDX__ADDR = 2,
+ ACA_REG_IDX__MISC0 = 3,
+ ACA_REG_IDX__CONFG = 4,
+ ACA_REG_IDX__IPID = 5,
+ ACA_REG_IDX__SYND = 6,
+ ACA_REG_IDX__DESTAT = 8,
+ ACA_REG_IDX__DEADDR = 9,
+ ACA_REG_IDX__CTL_MASK = 10,
+ ACA_REG_MAX_COUNT = 16,
+};
+
+struct ras_core_context;
+struct aca_block;
+
+struct aca_bank_reg {
+ u32 ecc_type;
+ u64 seq_no;
+ u64 regs[ACA_REG_MAX_COUNT];
+};
+
+enum aca_ecc_hwip {
+ ACA_ECC_HWIP__UNKNOWN = -1,
+ ACA_ECC_HWIP__PSP = 0,
+ ACA_ECC_HWIP__UMC,
+ ACA_ECC_HWIP__SMU,
+ ACA_ECC_HWIP__PCS_XGMI,
+ ACA_ECC_HWIP_COUNT,
+};
+
+struct aca_ecc_info {
+ int die_id;
+ int socket_id;
+ int xcd_id;
+ int hwid;
+ int mcatype;
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+};
+
+struct aca_bank_ecc {
+ struct aca_ecc_info bank_info;
+ u32 ce_count;
+ u32 ue_count;
+ u32 de_count;
+};
+
+struct aca_ecc_count {
+ u32 new_ce_count;
+ u32 total_ce_count;
+ u32 new_ue_count;
+ u32 total_ue_count;
+ u32 new_de_count;
+ u32 total_de_count;
+};
+
+struct aca_xcd_ecc {
+ struct aca_ecc_count ecc_err;
+};
+
+struct aca_aid_ecc {
+ union {
+ struct aca_xcd {
+ struct aca_xcd_ecc xcd[MAX_XCD_NUM_PER_AID];
+ u32 xcd_num;
+ } xcd;
+ struct aca_ecc_count ecc_err;
+ };
+};
+
+struct aca_socket_ecc {
+ struct aca_aid_ecc aid[MAX_AID_NUM_PER_SOCKET];
+ u32 aid_num;
+};
+
+struct aca_block_ecc {
+ struct aca_socket_ecc socket[MAX_SOCKET_NUM_PER_HIVE];
+ u32 socket_num_per_hive;
+};
+
+struct aca_bank_hw_ops {
+ bool (*bank_match)(struct aca_block *ras_blk, void *data);
+ int (*bank_parse)(struct ras_core_context *ras_core,
+ struct aca_block *aca_blk, void *data, void *buf);
+};
+
+struct aca_block_info {
+ char name[32];
+ u32 ras_block_id;
+ enum aca_ecc_hwip hwip;
+ struct aca_bank_hw_ops bank_ops;
+ u32 mask;
+};
+
+struct aca_block {
+ const struct aca_block_info *blk_info;
+ struct aca_block_ecc ecc;
+};
+
+struct ras_aca_ip_func {
+ uint32_t block_num;
+ const struct aca_block_info **block_info;
+};
+
+struct ras_aca {
+ uint32_t aca_ip_version;
+ const struct ras_aca_ip_func *ip_func;
+ struct mutex aca_lock;
+ struct mutex bank_op_lock;
+ struct aca_block aca_blk[MAX_ACA_RAS_BLOCK];
+ uint32_t ue_updated_mark;
+};
+
+int ras_aca_sw_init(struct ras_core_context *ras_core);
+int ras_aca_sw_fini(struct ras_core_context *ras_core);
+int ras_aca_hw_init(struct ras_core_context *ras_core);
+int ras_aca_hw_fini(struct ras_core_context *ras_core);
+int ras_aca_get_block_ecc_count(struct ras_core_context *ras_core, u32 blk, void *data);
+int ras_aca_clear_block_new_ecc_count(struct ras_core_context *ras_core, u32 blk);
+int ras_aca_clear_all_blocks_ecc_count(struct ras_core_context *ras_core);
+int ras_aca_update_ecc(struct ras_core_context *ras_core, u32 ecc_type, void *data);
+void ras_aca_mark_fatal_flag(struct ras_core_context *ras_core);
+void ras_aca_clear_fatal_flag(struct ras_core_context *ras_core);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
new file mode 100644
index 000000000000..29df98948703
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_aca.h"
+#include "ras_core_status.h"
+#include "ras_aca_v1_0.h"
+
+struct ras_aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
+static struct ras_aca_hwip aca_hwid_mcatypes[ACA_ECC_HWIP_COUNT] = {
+ [ACA_ECC_HWIP__SMU] = {0x01, 0x01},
+ [ACA_ECC_HWIP__PCS_XGMI] = {0x50, 0x00},
+ [ACA_ECC_HWIP__UMC] = {0x96, 0x00},
+};
+
+static int aca_decode_bank_info(struct aca_block *aca_blk,
+ struct aca_bank_reg *bank, struct aca_ecc_info *info)
+{
+ u64 ipid;
+ u32 instidhi, instidlo;
+
+ ipid = bank->regs[ACA_REG_IDX__IPID];
+ info->hwid = ACA_REG_IPID_HARDWAREID(ipid);
+ info->mcatype = ACA_REG_IPID_MCATYPE(ipid);
+ /*
+ * Unified DieID Format: SAASS. A:AID, S:Socket.
+ * Unified DieID[4:4] = InstanceId[0:0]
+ * Unified DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = ACA_REG_IPID_INSTANCEIDHI(ipid);
+ instidlo = ACA_REG_IPID_INSTANCEIDLO(ipid);
+ info->die_id = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
+
+ if ((aca_blk->blk_info->hwip == ACA_ECC_HWIP__SMU) &&
+ (aca_blk->blk_info->ras_block_id == RAS_BLOCK_ID__GFX))
+ info->xcd_id =
+ ((instidlo & GENMASK_ULL(31, 1)) == mmSMNAID_XCD0_MCA_SMU) ? 0 : 1;
+
+ return 0;
+}
+
+static bool aca_check_bank_hwip(struct aca_bank_reg *bank, enum aca_ecc_hwip type)
+{
+ struct ras_aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || (type == ACA_ECC_HWIP__UNKNOWN))
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX__IPID];
+ hwid = ACA_REG_IPID_HARDWAREID(ipid);
+ mcatype = ACA_REG_IPID_MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
+static bool aca_match_bank_default(struct aca_block *aca_blk, void *data)
+{
+ return aca_check_bank_hwip((struct aca_bank_reg *)data, aca_blk->blk_info->hwip);
+}
+
+static bool aca_match_gfx_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ u32 instlo;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ switch (instlo) {
+ case mmSMNAID_XCD0_MCA_SMU:
+ case mmSMNAID_XCD1_MCA_SMU:
+ case mmSMNXCD_XCD0_MCA_SMU:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool aca_match_sdma_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
+ static int sdma_err_codes[] = { 33, 34, 35, 36 };
+ u32 instlo;
+ int errcode, i;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]);
+ errcode &= 0xff;
+
+ /* Check SDMA error codes */
+ for (i = 0; i < ARRAY_SIZE(sdma_err_codes); i++) {
+ if (errcode == sdma_err_codes[i])
+ return true;
+ }
+
+ return false;
+}
+
+static bool aca_match_mmhub_bank(struct aca_block *aca_blk, void *data)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ /* reference to smu driver if header file */
+ const int mmhub_err_codes[] = {
+ 0, 1, 2, 3, 4, /* CODE_DAGB0 - 4 */
+ 5, 6, 7, 8, 9, /* CODE_EA0 - 4 */
+ 10, /* CODE_UTCL2_ROUTER */
+ 11, /* CODE_VML2 */
+ 12, /* CODE_VML2_WALKER */
+ 13, /* CODE_MMCANE */
+ };
+ u32 instlo;
+ int errcode, i;
+
+ if (!aca_check_bank_hwip(bank, aca_blk->blk_info->hwip))
+ return false;
+
+ instlo = ACA_REG_IPID_INSTANCEIDLO(bank->regs[ACA_REG_IDX__IPID]);
+ instlo &= GENMASK_ULL(31, 1);
+ if (instlo != mmSMNAID_AID0_MCA_SMU)
+ return false;
+
+ errcode = ACA_REG_SYND_ERRORINFORMATION(bank->regs[ACA_REG_IDX__SYND]);
+ errcode &= 0xff;
+
+ /* Check MMHUB error codes */
+ for (i = 0; i < ARRAY_SIZE(mmhub_err_codes); i++) {
+ if (errcode == mmhub_err_codes[i])
+ return true;
+ }
+
+ return false;
+}
+
+static bool aca_check_umc_de(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ return (ras_core->poison_supported &&
+ ACA_REG_STATUS_VAL(mc_umc_status) &&
+ ACA_REG_STATUS_DEFERRED(mc_umc_status));
+}
+
+static bool aca_check_umc_ue(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ if (aca_check_umc_de(ras_core, mc_umc_status))
+ return false;
+
+ return (ACA_REG_STATUS_VAL(mc_umc_status) &&
+ (ACA_REG_STATUS_PCC(mc_umc_status) ||
+ ACA_REG_STATUS_UC(mc_umc_status) ||
+ ACA_REG_STATUS_TCC(mc_umc_status)));
+}
+
+static bool aca_check_umc_ce(struct ras_core_context *ras_core, uint64_t mc_umc_status)
+{
+ if (aca_check_umc_de(ras_core, mc_umc_status))
+ return false;
+
+ return (ACA_REG_STATUS_VAL(mc_umc_status) &&
+ (ACA_REG_STATUS_CECC(mc_umc_status) ||
+ (ACA_REG_STATUS_UECC(mc_umc_status) &&
+ ACA_REG_STATUS_UC(mc_umc_status) == 0) ||
+ /* Identify data parity error in replay mode */
+ ((ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0x5 ||
+ ACA_REG_STATUS_ERRORCODEEXT(mc_umc_status) == 0xb) &&
+ !(aca_check_umc_ue(ras_core, mc_umc_status)))));
+}
+
+static int aca_parse_umc_bank(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk, void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ uint32_t ext_error_code;
+ uint64_t status0;
+
+ status0 = bank->regs[ACA_REG_IDX__STATUS];
+ if (!ACA_REG_STATUS_VAL(status0))
+ return 0;
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS];
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status0);
+
+ if (aca_check_umc_de(ras_core, status0))
+ ecc->de_count = 1;
+ else if (aca_check_umc_ue(ras_core, status0))
+ ecc->ue_count = ext_error_code ?
+ 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+ else if (aca_check_umc_ce(ras_core, status0))
+ ecc->ce_count = ext_error_code ?
+ 1 : ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+
+ return 0;
+}
+
+static bool aca_check_bank_is_de(struct ras_core_context *ras_core,
+ uint64_t status)
+{
+ return (ACA_REG_STATUS_POISON(status) ||
+ ACA_REG_STATUS_DEFERRED(status));
+}
+
+static int aca_parse_bank_default(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk,
+ void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ u64 misc0 = bank->regs[ACA_REG_IDX__MISC0];
+ u64 status = bank->regs[ACA_REG_IDX__STATUS];
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = status;
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ if (aca_check_bank_is_de(ras_core, status)) {
+ ecc->de_count = 1;
+ } else {
+ if (bank->ecc_type == RAS_ERR_TYPE__UE)
+ ecc->ue_count = 1;
+ else if (bank->ecc_type == RAS_ERR_TYPE__CE)
+ ecc->ce_count = ACA_REG_MISC0_ERRCNT(misc0);
+ }
+
+ return 0;
+}
+
+static int aca_parse_xgmi_bank(struct ras_core_context *ras_core,
+ struct aca_block *ras_blk,
+ void *data, void *buf)
+{
+ struct aca_bank_reg *bank = (struct aca_bank_reg *)data;
+ struct aca_bank_ecc *ecc = (struct aca_bank_ecc *)buf;
+ struct aca_ecc_info bank_info;
+ u64 status, count;
+ int ext_error_code;
+
+ memset(&bank_info, 0, sizeof(bank_info));
+ aca_decode_bank_info(ras_blk, bank, &bank_info);
+ memcpy(&ecc->bank_info, &bank_info, sizeof(bank_info));
+ ecc->bank_info.status = bank->regs[ACA_REG_IDX__STATUS];
+ ecc->bank_info.ipid = bank->regs[ACA_REG_IDX__IPID];
+ ecc->bank_info.addr = bank->regs[ACA_REG_IDX__ADDR];
+
+ status = bank->regs[ACA_REG_IDX__STATUS];
+ ext_error_code = ACA_REG_STATUS_ERRORCODEEXT(status);
+
+ count = ACA_REG_MISC0_ERRCNT(bank->regs[ACA_REG_IDX__MISC0]);
+ if (bank->ecc_type == RAS_ERR_TYPE__UE) {
+ if (ext_error_code != 0 && ext_error_code != 9)
+ count = 0ULL;
+ ecc->ue_count = count;
+ } else if (bank->ecc_type == RAS_ERR_TYPE__CE) {
+ count = ext_error_code == 6 ? count : 0ULL;
+ ecc->ce_count = count;
+ }
+
+ return 0;
+}
+
+static const struct aca_block_info aca_v1_0_umc = {
+ .name = "umc",
+ .ras_block_id = RAS_BLOCK_ID__UMC,
+ .hwip = ACA_ECC_HWIP__UMC,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK | ACA_ERROR__DE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_bank_default,
+ .bank_parse = aca_parse_umc_bank,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_gfx = {
+ .name = "gfx",
+ .ras_block_id = RAS_BLOCK_ID__GFX,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_gfx_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_sdma = {
+ .name = "sdma",
+ .ras_block_id = RAS_BLOCK_ID__SDMA,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_sdma_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_mmhub = {
+ .name = "mmhub",
+ .ras_block_id = RAS_BLOCK_ID__MMHUB,
+ .hwip = ACA_ECC_HWIP__SMU,
+ .mask = ACA_ERROR__UE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_mmhub_bank,
+ .bank_parse = aca_parse_bank_default,
+ },
+};
+
+static const struct aca_block_info aca_v1_0_xgmi = {
+ .name = "xgmi",
+ .ras_block_id = RAS_BLOCK_ID__XGMI_WAFL,
+ .hwip = ACA_ECC_HWIP__PCS_XGMI,
+ .mask = ACA_ERROR__UE_MASK | ACA_ERROR__CE_MASK,
+ .bank_ops = {
+ .bank_match = aca_match_bank_default,
+ .bank_parse = aca_parse_xgmi_bank,
+ },
+};
+
+static const struct aca_block_info *aca_block_info_v1_0[] = {
+ &aca_v1_0_umc,
+ &aca_v1_0_gfx,
+ &aca_v1_0_sdma,
+ &aca_v1_0_mmhub,
+ &aca_v1_0_xgmi,
+};
+
+const struct ras_aca_ip_func ras_aca_func_v1_0 = {
+ .block_num = ARRAY_SIZE(aca_block_info_v1_0),
+ .block_info = aca_block_info_v1_0,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h
new file mode 100644
index 000000000000..40e5d94b037f
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_aca_v1_0.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_ACA_V1_0_H__
+#define __RAS_ACA_V1_0_H__
+#include "ras.h"
+
+#define ACA__REG__FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
+#define ACA_REG_STATUS_VAL(x) ACA__REG__FIELD(x, 63, 63)
+#define ACA_REG_STATUS_OVERFLOW(x) ACA__REG__FIELD(x, 62, 62)
+#define ACA_REG_STATUS_UC(x) ACA__REG__FIELD(x, 61, 61)
+#define ACA_REG_STATUS_EN(x) ACA__REG__FIELD(x, 60, 60)
+#define ACA_REG_STATUS_MISCV(x) ACA__REG__FIELD(x, 59, 59)
+#define ACA_REG_STATUS_ADDRV(x) ACA__REG__FIELD(x, 58, 58)
+#define ACA_REG_STATUS_PCC(x) ACA__REG__FIELD(x, 57, 57)
+#define ACA_REG_STATUS_ERRCOREIDVAL(x) ACA__REG__FIELD(x, 56, 56)
+#define ACA_REG_STATUS_TCC(x) ACA__REG__FIELD(x, 55, 55)
+#define ACA_REG_STATUS_SYNDV(x) ACA__REG__FIELD(x, 53, 53)
+#define ACA_REG_STATUS_CECC(x) ACA__REG__FIELD(x, 46, 46)
+#define ACA_REG_STATUS_UECC(x) ACA__REG__FIELD(x, 45, 45)
+#define ACA_REG_STATUS_DEFERRED(x) ACA__REG__FIELD(x, 44, 44)
+#define ACA_REG_STATUS_POISON(x) ACA__REG__FIELD(x, 43, 43)
+#define ACA_REG_STATUS_SCRUB(x) ACA__REG__FIELD(x, 40, 40)
+#define ACA_REG_STATUS_ERRCOREID(x) ACA__REG__FIELD(x, 37, 32)
+#define ACA_REG_STATUS_ADDRLSB(x) ACA__REG__FIELD(x, 29, 24)
+#define ACA_REG_STATUS_ERRORCODEEXT(x) ACA__REG__FIELD(x, 21, 16)
+#define ACA_REG_STATUS_ERRORCODE(x) ACA__REG__FIELD(x, 15, 0)
+
+#define ACA_REG_IPID_MCATYPE(x) ACA__REG__FIELD(x, 63, 48)
+#define ACA_REG_IPID_INSTANCEIDHI(x) ACA__REG__FIELD(x, 47, 44)
+#define ACA_REG_IPID_HARDWAREID(x) ACA__REG__FIELD(x, 43, 32)
+#define ACA_REG_IPID_INSTANCEIDLO(x) ACA__REG__FIELD(x, 31, 0)
+
+#define ACA_REG_MISC0_VALID(x) ACA__REG__FIELD(x, 63, 63)
+#define ACA_REG_MISC0_OVRFLW(x) ACA__REG__FIELD(x, 48, 48)
+#define ACA_REG_MISC0_ERRCNT(x) ACA__REG__FIELD(x, 43, 32)
+
+#define ACA_REG_SYND_ERRORINFORMATION(x) ACA__REG__FIELD(x, 17, 0)
+
+/* NOTE: The following codes refers to the smu header file */
+#define ACA_EXTERROR_CODE_CE 0x3a
+#define ACA_EXTERROR_CODE_FAULT 0x3b
+
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+
+extern const struct ras_aca_ip_func ras_aca_func_v1_0;
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c
new file mode 100644
index 000000000000..94e6d7420d94
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_cmd.h"
+
+#define RAS_CMD_MAJOR_VERSION 6
+#define RAS_CMD_MINOR_VERSION 0
+#define RAS_CMD_VERSION (((RAS_CMD_MAJOR_VERSION) << 10) | (RAS_CMD_MINOR_VERSION))
+
+static int ras_cmd_add_device(struct ras_core_context *ras_core)
+{
+ INIT_LIST_HEAD(&ras_core->ras_cmd.head);
+ ras_core->ras_cmd.ras_core = ras_core;
+ ras_core->ras_cmd.dev_handle = (uintptr_t)ras_core ^ RAS_CMD_DEV_HANDLE_MAGIC;
+ return 0;
+}
+
+static int ras_cmd_remove_device(struct ras_core_context *ras_core)
+{
+ memset(&ras_core->ras_cmd, 0, sizeof(ras_core->ras_cmd));
+ return 0;
+}
+
+static int ras_get_block_ecc_info(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_block_ecc_info_req *input_data =
+ (struct ras_cmd_block_ecc_info_req *)cmd->input_buff_raw;
+ struct ras_cmd_block_ecc_info_rsp *output_data =
+ (struct ras_cmd_block_ecc_info_rsp *)cmd->output_buff_raw;
+ struct ras_ecc_count err_data;
+ int ret;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_block_ecc_info_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ memset(&err_data, 0, sizeof(err_data));
+ ret = ras_aca_get_block_ecc_count(ras_core, input_data->block_id, &err_data);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ output_data->ce_count = err_data.total_ce_count;
+ output_data->ue_count = err_data.total_ue_count;
+ output_data->de_count = err_data.total_de_count;
+
+ cmd->output_size = sizeof(struct ras_cmd_block_ecc_info_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static void ras_cmd_update_bad_page_info(struct ras_cmd_bad_page_record *ras_cmd_record,
+ struct eeprom_umc_record *record)
+{
+ ras_cmd_record->retired_page = record->cur_nps_retired_row_pfn;
+ ras_cmd_record->ts = record->ts;
+ ras_cmd_record->err_type = record->err_type;
+ ras_cmd_record->mem_channel = record->mem_channel;
+ ras_cmd_record->mcumc_id = record->mcumc_id;
+ ras_cmd_record->address = record->address;
+ ras_cmd_record->bank = record->bank;
+ ras_cmd_record->valid = 1;
+}
+
+static int ras_cmd_get_group_bad_pages(struct ras_core_context *ras_core,
+ uint32_t group_index, struct ras_cmd_bad_pages_info_rsp *output_data)
+{
+ struct eeprom_umc_record record;
+ struct ras_cmd_bad_page_record *ras_cmd_record;
+ uint32_t i = 0, bp_cnt = 0, group_cnt = 0;
+
+ output_data->bp_in_group = 0;
+ output_data->group_index = 0;
+
+ bp_cnt = ras_umc_get_badpage_count(ras_core);
+ if (bp_cnt) {
+ output_data->group_index = group_index;
+ group_cnt = bp_cnt / RAS_CMD_MAX_BAD_PAGES_PER_GROUP
+ + ((bp_cnt % RAS_CMD_MAX_BAD_PAGES_PER_GROUP) ? 1 : 0);
+
+ if (group_index >= group_cnt)
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ i = group_index * RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ for (;
+ i < bp_cnt && output_data->bp_in_group < RAS_CMD_MAX_BAD_PAGES_PER_GROUP;
+ i++) {
+ if (ras_umc_get_badpage_record(ras_core, i, &record))
+ return RAS_CMD__ERROR_GENERIC;
+
+ ras_cmd_record = &output_data->records[i % RAS_CMD_MAX_BAD_PAGES_PER_GROUP];
+
+ memset(ras_cmd_record, 0, sizeof(*ras_cmd_record));
+ ras_cmd_update_bad_page_info(ras_cmd_record, &record);
+ output_data->bp_in_group++;
+ }
+ }
+ output_data->bp_total_cnt = bp_cnt;
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_bad_pages(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_bad_pages_info_req *input_data =
+ (struct ras_cmd_bad_pages_info_req *)cmd->input_buff_raw;
+ struct ras_cmd_bad_pages_info_rsp *output_data =
+ (struct ras_cmd_bad_pages_info_rsp *)cmd->output_buff_raw;
+ int ret;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_bad_pages_info_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ret = ras_cmd_get_group_bad_pages(ras_core, input_data->group_index, output_data);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_bad_pages_info_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_clear_bad_page_info(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ if (cmd->input_size != sizeof(struct ras_cmd_dev_handle))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (ras_eeprom_reset_table(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ if (ras_umc_clean_badpage_data(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_reset_all_error_counts(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ if (cmd->input_size != sizeof(struct ras_cmd_dev_handle))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (ras_aca_clear_all_blocks_ecc_count(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ if (ras_umc_clear_logged_ecc(ras_core))
+ return RAS_CMD__ERROR_GENERIC;
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_cper_snapshot(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_cper_snapshot_rsp *output_data =
+ (struct ras_cmd_cper_snapshot_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_cper_snapshot_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+
+ output_data->total_cper_num = overview.logged_batch_count;
+ output_data->start_cper_id = overview.first_batch_id;
+ output_data->latest_cper_id = overview.last_batch_id;
+
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_cper_snapshot_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_cper_records(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_cper_record_req *req =
+ (struct ras_cmd_cper_record_req *)cmd->input_buff_raw;
+ struct ras_cmd_cper_record_rsp *rsp =
+ (struct ras_cmd_cper_record_rsp *)cmd->output_buff_raw;
+ struct ras_log_info *trace[MAX_RECORD_PER_BATCH] = {0};
+ struct ras_log_batch_overview overview;
+ uint32_t offset = 0, real_data_len = 0;
+ uint64_t batch_id;
+ uint8_t *buffer;
+ int ret = 0, i, count;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_cper_record_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if (!req->buf_size || !req->buf_ptr || !req->cper_num)
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ buffer = kzalloc(req->buf_size, GFP_KERNEL);
+ if (!buffer)
+ return RAS_CMD__ERROR_GENERIC;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+ for (i = 0; i < req->cper_num; i++) {
+ batch_id = req->cper_start_id + i;
+ if (batch_id >= overview.last_batch_id)
+ break;
+
+ count = ras_log_ring_get_batch_records(ras_core, batch_id, trace,
+ ARRAY_SIZE(trace));
+ if (count > 0) {
+ ret = ras_cper_generate_cper(ras_core, trace, count,
+ &buffer[offset], req->buf_size - offset, &real_data_len);
+ if (ret)
+ break;
+
+ offset += real_data_len;
+ }
+ }
+
+ if ((ret && (ret != -ENOMEM)) ||
+ copy_to_user(u64_to_user_ptr(req->buf_ptr), buffer, offset)) {
+ kfree(buffer);
+ return RAS_CMD__ERROR_GENERIC;
+ }
+
+ rsp->real_data_size = offset;
+ rsp->real_cper_num = i;
+ rsp->remain_num = (ret == -ENOMEM) ? (req->cper_num - i) : 0;
+ rsp->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_cper_record_rsp);
+
+ kfree(buffer);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_batch_trace_snapshot(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_batch_trace_snapshot_rsp *rsp =
+ (struct ras_cmd_batch_trace_snapshot_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+
+
+ if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_snapshot_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+
+ rsp->total_batch_num = overview.logged_batch_count;
+ rsp->start_batch_id = overview.first_batch_id;
+ rsp->latest_batch_id = overview.last_batch_id;
+ rsp->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_batch_trace_snapshot_rsp);
+ return RAS_CMD__SUCCESS;
+}
+
+static int ras_cmd_get_batch_trace_records(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_batch_trace_record_req *input_data =
+ (struct ras_cmd_batch_trace_record_req *)cmd->input_buff_raw;
+ struct ras_cmd_batch_trace_record_rsp *output_data =
+ (struct ras_cmd_batch_trace_record_rsp *)cmd->output_buff_raw;
+ struct ras_log_batch_overview overview;
+ struct ras_log_info *trace_arry[MAX_RECORD_PER_BATCH] = {0};
+ struct ras_log_info *record;
+ int i, j, count = 0, offset = 0;
+ uint64_t id;
+ bool completed = false;
+
+ if (cmd->input_size != sizeof(struct ras_cmd_batch_trace_record_req))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ if ((!input_data->batch_num) || (input_data->batch_num > RAS_CMD_MAX_BATCH_NUM))
+ return RAS_CMD__ERROR_INVALID_INPUT_DATA;
+
+ ras_log_ring_get_batch_overview(ras_core, &overview);
+ if ((input_data->start_batch_id < overview.first_batch_id) ||
+ (input_data->start_batch_id >= overview.last_batch_id))
+ return RAS_CMD__ERROR_INVALID_INPUT_SIZE;
+
+ for (i = 0; i < input_data->batch_num; i++) {
+ id = input_data->start_batch_id + i;
+ if (id >= overview.last_batch_id) {
+ completed = true;
+ break;
+ }
+
+ count = ras_log_ring_get_batch_records(ras_core,
+ id, trace_arry, ARRAY_SIZE(trace_arry));
+ if (count > 0) {
+ if ((offset + count) > RAS_CMD_MAX_TRACE_NUM)
+ break;
+ for (j = 0; j < count; j++) {
+ record = &output_data->records[offset + j];
+ record->seqno = trace_arry[j]->seqno;
+ record->timestamp = trace_arry[j]->timestamp;
+ record->event = trace_arry[j]->event;
+ memcpy(&record->aca_reg,
+ &trace_arry[j]->aca_reg, sizeof(trace_arry[j]->aca_reg));
+ }
+ } else {
+ count = 0;
+ }
+
+ output_data->batchs[i].batch_id = id;
+ output_data->batchs[i].offset = offset;
+ output_data->batchs[i].trace_num = count;
+ offset += count;
+ }
+
+ output_data->start_batch_id = input_data->start_batch_id;
+ output_data->real_batch_num = i;
+ output_data->remain_num = completed ? 0 : (input_data->batch_num - i);
+ output_data->version = 0;
+
+ cmd->output_size = sizeof(struct ras_cmd_batch_trace_record_rsp);
+
+ return RAS_CMD__SUCCESS;
+}
+
+static enum ras_ta_block __get_ras_ta_block(enum ras_block_id block)
+{
+ switch (block) {
+ case RAS_BLOCK_ID__UMC:
+ return RAS_TA_BLOCK__UMC;
+ case RAS_BLOCK_ID__SDMA:
+ return RAS_TA_BLOCK__SDMA;
+ case RAS_BLOCK_ID__GFX:
+ return RAS_TA_BLOCK__GFX;
+ case RAS_BLOCK_ID__MMHUB:
+ return RAS_TA_BLOCK__MMHUB;
+ case RAS_BLOCK_ID__ATHUB:
+ return RAS_TA_BLOCK__ATHUB;
+ case RAS_BLOCK_ID__PCIE_BIF:
+ return RAS_TA_BLOCK__PCIE_BIF;
+ case RAS_BLOCK_ID__HDP:
+ return RAS_TA_BLOCK__HDP;
+ case RAS_BLOCK_ID__XGMI_WAFL:
+ return RAS_TA_BLOCK__XGMI_WAFL;
+ case RAS_BLOCK_ID__DF:
+ return RAS_TA_BLOCK__DF;
+ case RAS_BLOCK_ID__SMN:
+ return RAS_TA_BLOCK__SMN;
+ case RAS_BLOCK_ID__SEM:
+ return RAS_TA_BLOCK__SEM;
+ case RAS_BLOCK_ID__MP0:
+ return RAS_TA_BLOCK__MP0;
+ case RAS_BLOCK_ID__MP1:
+ return RAS_TA_BLOCK__MP1;
+ case RAS_BLOCK_ID__FUSE:
+ return RAS_TA_BLOCK__FUSE;
+ case RAS_BLOCK_ID__MCA:
+ return RAS_TA_BLOCK__MCA;
+ case RAS_BLOCK_ID__VCN:
+ return RAS_TA_BLOCK__VCN;
+ case RAS_BLOCK_ID__JPEG:
+ return RAS_TA_BLOCK__JPEG;
+ default:
+ return RAS_TA_BLOCK__UMC;
+ }
+}
+
+static enum ras_ta_error_type __get_ras_ta_err_type(enum ras_ecc_err_type error)
+{
+ switch (error) {
+ case RAS_ECC_ERR__NONE:
+ return RAS_TA_ERROR__NONE;
+ case RAS_ECC_ERR__PARITY:
+ return RAS_TA_ERROR__PARITY;
+ case RAS_ECC_ERR__SINGLE_CORRECTABLE:
+ return RAS_TA_ERROR__SINGLE_CORRECTABLE;
+ case RAS_ECC_ERR__MULTI_UNCORRECTABLE:
+ return RAS_TA_ERROR__MULTI_UNCORRECTABLE;
+ case RAS_ECC_ERR__POISON:
+ return RAS_TA_ERROR__POISON;
+ default:
+ return RAS_TA_ERROR__NONE;
+ }
+}
+
+static int ras_cmd_inject_error(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_inject_error_req *req =
+ (struct ras_cmd_inject_error_req *)cmd->input_buff_raw;
+ struct ras_cmd_inject_error_rsp *output_data =
+ (struct ras_cmd_inject_error_rsp *)cmd->output_buff_raw;
+ int ret = 0;
+ struct ras_ta_trigger_error_input block_info = {
+ .block_id = __get_ras_ta_block(req->block_id),
+ .sub_block_index = req->subblock_id,
+ .inject_error_type = __get_ras_ta_err_type(req->error_type),
+ .address = req->address,
+ .value = req->method,
+ };
+
+ ret = ras_psp_trigger_error(ras_core, &block_info, req->instance_mask);
+ if (!ret) {
+ output_data->version = 0;
+ output_data->address = block_info.address;
+ cmd->output_size = sizeof(struct ras_cmd_inject_error_rsp);
+ } else {
+ RAS_DEV_ERR(ras_core->dev, "ras inject block %u failed %d\n", req->block_id, ret);
+ ret = RAS_CMD__ERROR_ACCESS_DENIED;
+ }
+
+ return ret;
+}
+
+static struct ras_cmd_func_map ras_cmd_maps[] = {
+ {RAS_CMD__INJECT_ERROR, ras_cmd_inject_error},
+ {RAS_CMD__GET_BLOCK_ECC_STATUS, ras_get_block_ecc_info},
+ {RAS_CMD__GET_BAD_PAGES, ras_cmd_get_bad_pages},
+ {RAS_CMD__CLEAR_BAD_PAGE_INFO, ras_cmd_clear_bad_page_info},
+ {RAS_CMD__RESET_ALL_ERROR_COUNTS, ras_cmd_reset_all_error_counts},
+ {RAS_CMD__GET_CPER_SNAPSHOT, ras_cmd_get_cper_snapshot},
+ {RAS_CMD__GET_CPER_RECORD, ras_cmd_get_cper_records},
+ {RAS_CMD__GET_BATCH_TRACE_SNAPSHOT, ras_cmd_get_batch_trace_snapshot},
+ {RAS_CMD__GET_BATCH_TRACE_RECORD, ras_cmd_get_batch_trace_records},
+};
+
+int rascore_handle_cmd(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data)
+{
+ struct ras_cmd_func_map *ras_cmd = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ras_cmd_maps); i++) {
+ if (cmd->cmd_id == ras_cmd_maps[i].cmd_id) {
+ ras_cmd = &ras_cmd_maps[i];
+ break;
+ }
+ }
+
+ if (!ras_cmd)
+ return RAS_CMD__ERROR_UKNOWN_CMD;
+
+ return ras_cmd->func(ras_core, cmd, data);
+}
+
+int ras_cmd_init(struct ras_core_context *ras_core)
+{
+ return ras_cmd_add_device(ras_core);
+}
+
+int ras_cmd_fini(struct ras_core_context *ras_core)
+{
+ ras_cmd_remove_device(ras_core);
+ return 0;
+}
+
+int ras_cmd_query_interface_info(struct ras_core_context *ras_core,
+ struct ras_query_interface_info_rsp *rsp)
+{
+ rsp->ras_cmd_major_ver = RAS_CMD_MAJOR_VERSION;
+ rsp->ras_cmd_minor_ver = RAS_CMD_MINOR_VERSION;
+
+ return 0;
+}
+
+int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr)
+{
+ struct umc_bank_addr umc_bank = {0};
+ int ret;
+
+ ret = ras_umc_translate_soc_pa_and_bank(ras_core, &soc_pa, &umc_bank, false);
+ if (ret)
+ return RAS_CMD__ERROR_GENERIC;
+
+ bank_addr->stack_id = umc_bank.stack_id;
+ bank_addr->bank_group = umc_bank.bank_group;
+ bank_addr->bank = umc_bank.bank;
+ bank_addr->row = umc_bank.row;
+ bank_addr->column = umc_bank.column;
+ bank_addr->channel = umc_bank.channel;
+ bank_addr->subchannel = umc_bank.subchannel;
+
+ return 0;
+}
+
+int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa)
+{
+ struct umc_bank_addr umc_bank = {0};
+
+ umc_bank.stack_id = bank_addr.stack_id;
+ umc_bank.bank_group = bank_addr.bank_group;
+ umc_bank.bank = bank_addr.bank;
+ umc_bank.row = bank_addr.row;
+ umc_bank.column = bank_addr.column;
+ umc_bank.channel = bank_addr.channel;
+ umc_bank.subchannel = bank_addr.subchannel;
+
+ return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, &umc_bank, true);
+}
+
+uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_cmd.dev_handle;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
new file mode 100644
index 000000000000..48a0715eb821
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cmd.h
@@ -0,0 +1,426 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_CMD_H__
+#define __RAS_CMD_H__
+#include "ras.h"
+#include "ras_eeprom.h"
+#include "ras_log_ring.h"
+#include "ras_cper.h"
+
+#define RAS_CMD_DEV_HANDLE_MAGIC 0xFEEDAD00UL
+
+#define RAS_CMD_MAX_IN_SIZE 256
+#define RAS_CMD_MAX_GPU_NUM 32
+#define RAS_CMD_MAX_BAD_PAGES_PER_GROUP 32
+
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define RAS_TA_INST_MASK 0xfffff000
+#define RAS_TA_INST_SHIFT 0xc
+
+enum ras_cmd_interface_type {
+ RAS_CMD_INTERFACE_TYPE_NONE,
+ RAS_CMD_INTERFACE_TYPE_AMDGPU,
+ RAS_CMD_INTERFACE_TYPE_VF,
+ RAS_CMD_INTERFACE_TYPE_PF,
+};
+
+enum ras_cmd_id_range {
+ RAS_CMD_ID_COMMON_START = 0,
+ RAS_CMD_ID_COMMON_END = 0x10000,
+ RAS_CMD_ID_AMDGPU_START = RAS_CMD_ID_COMMON_END,
+ RAS_CMD_ID_AMDGPU_END = 0x20000,
+ RAS_CMD_ID_MXGPU_START = RAS_CMD_ID_AMDGPU_END,
+ RAS_CMD_ID_MXGPU_END = 0x30000,
+ RAS_CMD_ID_MXGPU_VF_START = RAS_CMD_ID_MXGPU_END,
+ RAS_CMD_ID_MXGPU_VF_END = 0x40000,
+};
+
+enum ras_cmd_id {
+ RAS_CMD__BEGIN = RAS_CMD_ID_COMMON_START,
+ RAS_CMD__QUERY_INTERFACE_INFO,
+ RAS_CMD__GET_DEVICES_INFO,
+ RAS_CMD__GET_BLOCK_ECC_STATUS,
+ RAS_CMD__INJECT_ERROR,
+ RAS_CMD__GET_BAD_PAGES,
+ RAS_CMD__CLEAR_BAD_PAGE_INFO,
+ RAS_CMD__RESET_ALL_ERROR_COUNTS,
+ RAS_CMD__GET_SAFE_FB_ADDRESS_RANGES,
+ RAS_CMD__TRANSLATE_FB_ADDRESS,
+ RAS_CMD__GET_LINK_TOPOLOGY,
+ RAS_CMD__GET_CPER_SNAPSHOT,
+ RAS_CMD__GET_CPER_RECORD,
+ RAS_CMD__GET_BATCH_TRACE_SNAPSHOT,
+ RAS_CMD__GET_BATCH_TRACE_RECORD,
+ RAS_CMD__SUPPORTED_MAX = RAS_CMD_ID_COMMON_END,
+};
+
+enum ras_cmd_response {
+ RAS_CMD__SUCCESS = 0,
+ RAS_CMD__SUCCESS_EXEED_BUFFER,
+ RAS_CMD__ERROR_UKNOWN_CMD,
+ RAS_CMD__ERROR_INVALID_CMD,
+ RAS_CMD__ERROR_VERSION,
+ RAS_CMD__ERROR_INVALID_INPUT_SIZE,
+ RAS_CMD__ERROR_INVALID_INPUT_DATA,
+ RAS_CMD__ERROR_DRV_INIT_FAIL,
+ RAS_CMD__ERROR_ACCESS_DENIED,
+ RAS_CMD__ERROR_GENERIC,
+ RAS_CMD__ERROR_TIMEOUT,
+};
+
+enum ras_error_type {
+ RAS_TYPE_ERROR__NONE = 0,
+ RAS_TYPE_ERROR__PARITY = 1,
+ RAS_TYPE_ERROR__SINGLE_CORRECTABLE = 2,
+ RAS_TYPE_ERROR__MULTI_UNCORRECTABLE = 4,
+ RAS_TYPE_ERROR__POISON = 8,
+};
+
+struct ras_core_context;
+struct ras_cmd_ctx;
+
+struct ras_cmd_mgr {
+ struct list_head head;
+ struct ras_core_context *ras_core;
+ uint64_t dev_handle;
+};
+
+struct ras_cmd_func_map {
+ uint32_t cmd_id;
+ int (*func)(struct ras_core_context *ras_core,
+ struct ras_cmd_ctx *cmd, void *data);
+};
+
+struct ras_device_bdf {
+ union {
+ struct {
+ uint32_t function : 3;
+ uint32_t device : 5;
+ uint32_t bus : 8;
+ uint32_t domain : 16;
+ };
+ uint32_t u32_all;
+ };
+};
+
+struct ras_cmd_param {
+ uint32_t idx_vf;
+ void *data;
+};
+
+#pragma pack(push, 8)
+struct ras_cmd_ctx {
+ uint32_t magic;
+ union {
+ struct {
+ uint16_t ras_cmd_minor_ver : 10;
+ uint16_t ras_cmd_major_ver : 6;
+ };
+ uint16_t ras_cmd_ver;
+ };
+ union {
+ struct {
+ uint16_t plat_major_ver : 10;
+ uint16_t plat_minor_ver : 6;
+ };
+ uint16_t plat_ver;
+ };
+ uint32_t cmd_id;
+ uint32_t cmd_res;
+ uint32_t input_size;
+ uint32_t output_size;
+ uint32_t output_buf_size;
+ uint32_t reserved[5];
+ uint8_t input_buff_raw[RAS_CMD_MAX_IN_SIZE];
+ uint8_t output_buff_raw[];
+};
+
+struct ras_cmd_dev_handle {
+ uint64_t dev_handle;
+};
+
+struct ras_cmd_block_ecc_info_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t block_id;
+ uint32_t subblock_id;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_block_ecc_info_rsp {
+ uint32_t version;
+ uint32_t ce_count;
+ uint32_t ue_count;
+ uint32_t de_count;
+ uint32_t reserved[6];
+};
+
+struct ras_cmd_inject_error_req {
+ struct ras_cmd_dev_handle dev;
+ uint32_t block_id;
+ uint32_t subblock_id;
+ uint64_t address;
+ uint32_t error_type;
+ uint32_t instance_mask;
+ union {
+ struct {
+ /* vf index */
+ uint64_t vf_idx : 6;
+ /* method of error injection. i.e persistent, coherent etc */
+ uint64_t method : 10;
+ uint64_t rsv : 48;
+ };
+ uint64_t value;
+ };
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_inject_error_rsp {
+ uint32_t version;
+ uint32_t reserved[5];
+ uint64_t address;
+};
+
+struct ras_cmd_dev_info {
+ uint64_t dev_handle;
+ uint32_t location_id;
+ uint32_t ecc_enabled;
+ uint32_t ecc_supported;
+ uint32_t vf_num;
+ uint32_t asic_type;
+ uint32_t oam_id;
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_devices_info_rsp {
+ uint32_t version;
+ uint32_t dev_num;
+ uint32_t reserved[6];
+ struct ras_cmd_dev_info devs[RAS_CMD_MAX_GPU_NUM];
+};
+
+struct ras_cmd_bad_page_record {
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+ uint64_t retired_page;
+ uint64_t ts;
+
+ uint32_t err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+
+ unsigned char valid;
+ unsigned char reserved[8];
+};
+
+struct ras_cmd_bad_pages_info_req {
+ struct ras_cmd_dev_handle device;
+ uint32_t group_index;
+ uint32_t reserved[5];
+};
+
+struct ras_cmd_bad_pages_info_rsp {
+ uint32_t version;
+ uint32_t group_index;
+ uint32_t bp_in_group;
+ uint32_t bp_total_cnt;
+ uint32_t reserved[4];
+ struct ras_cmd_bad_page_record records[RAS_CMD_MAX_BAD_PAGES_PER_GROUP];
+};
+
+struct ras_query_interface_info_req {
+ uint32_t reserved[8];
+};
+
+struct ras_query_interface_info_rsp {
+ uint32_t version;
+ uint32_t ras_cmd_major_ver;
+ uint32_t ras_cmd_minor_ver;
+ uint32_t plat_major_ver;
+ uint32_t plat_minor_ver;
+ uint8_t interface_type;
+ uint8_t rsv[3];
+ uint32_t reserved[8];
+};
+
+#define RAS_MAX_NUM_SAFE_RANGES 64
+struct ras_cmd_ras_safe_fb_address_ranges_rsp {
+ uint32_t version;
+ uint32_t num_ranges;
+ uint32_t reserved[4];
+ struct {
+ uint64_t start;
+ uint64_t size;
+ uint32_t idx;
+ uint32_t reserved[3];
+ } range[RAS_MAX_NUM_SAFE_RANGES];
+};
+
+enum ras_fb_addr_type {
+ RAS_FB_ADDR_SOC_PHY, /* SPA */
+ RAS_FB_ADDR_BANK,
+ RAS_FB_ADDR_VF_PHY, /* GPA */
+ RAS_FB_ADDR_UNKNOWN
+};
+
+struct ras_fb_bank_addr {
+ uint32_t stack_id; /* SID */
+ uint32_t bank_group;
+ uint32_t bank;
+ uint32_t row;
+ uint32_t column;
+ uint32_t channel;
+ uint32_t subchannel; /* Also called Pseudochannel (PC) */
+ uint32_t reserved[3];
+};
+
+struct ras_fb_vf_phy_addr {
+ uint32_t vf_idx;
+ uint32_t reserved;
+ uint64_t addr;
+};
+
+union ras_translate_fb_address {
+ struct ras_fb_bank_addr bank_addr;
+ uint64_t soc_phy_addr;
+ struct ras_fb_vf_phy_addr vf_phy_addr;
+};
+
+struct ras_cmd_translate_fb_address_req {
+ struct ras_cmd_dev_handle dev;
+ enum ras_fb_addr_type src_addr_type;
+ enum ras_fb_addr_type dest_addr_type;
+ union ras_translate_fb_address trans_addr;
+};
+
+struct ras_cmd_translate_fb_address_rsp {
+ uint32_t version;
+ uint32_t reserved[5];
+ union ras_translate_fb_address trans_addr;
+};
+
+struct ras_dev_link_topology_req {
+ struct ras_cmd_dev_handle src;
+ struct ras_cmd_dev_handle dst;
+};
+
+struct ras_dev_link_topology_rsp {
+ uint32_t version;
+ uint32_t link_status; /* HW status of the link */
+ uint32_t link_type; /* type of the link */
+ uint32_t num_hops; /* number of hops */
+ uint32_t reserved[8];
+};
+
+struct ras_cmd_cper_snapshot_req {
+ struct ras_cmd_dev_handle dev;
+};
+
+struct ras_cmd_cper_snapshot_rsp {
+ uint32_t version;
+ uint32_t reserved[4];
+ uint32_t total_cper_num;
+ uint64_t start_cper_id;
+ uint64_t latest_cper_id;
+};
+
+struct ras_cmd_cper_record_req {
+ struct ras_cmd_dev_handle dev;
+ uint64_t cper_start_id;
+ uint32_t cper_num;
+ uint32_t buf_size;
+ uint64_t buf_ptr;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_cper_record_rsp {
+ uint32_t version;
+ uint32_t real_data_size;
+ uint32_t real_cper_num;
+ uint32_t remain_num;
+ uint32_t reserved[4];
+};
+
+struct ras_cmd_batch_trace_snapshot_req {
+ struct ras_cmd_dev_handle dev;
+};
+
+struct ras_cmd_batch_trace_snapshot_rsp {
+ uint32_t version;
+ uint32_t reserved[4];
+ uint32_t total_batch_num;
+ uint64_t start_batch_id;
+ uint64_t latest_batch_id;
+};
+
+struct ras_cmd_batch_trace_record_req {
+ struct ras_cmd_dev_handle dev;
+ uint64_t start_batch_id;
+ uint32_t batch_num;
+ uint32_t reserved[5];
+};
+
+struct batch_ras_trace_info {
+ uint64_t batch_id;
+ uint16_t offset;
+ uint8_t trace_num;
+ uint8_t rsv;
+ uint32_t reserved;
+};
+
+#define RAS_CMD_MAX_BATCH_NUM 300
+#define RAS_CMD_MAX_TRACE_NUM 300
+struct ras_cmd_batch_trace_record_rsp {
+ uint32_t version;
+ uint16_t real_batch_num;
+ uint16_t remain_num;
+ uint64_t start_batch_id;
+ uint32_t reserved[2];
+ struct batch_ras_trace_info batchs[RAS_CMD_MAX_BATCH_NUM];
+ struct ras_log_info records[RAS_CMD_MAX_TRACE_NUM];
+};
+
+#pragma pack(pop)
+
+int ras_cmd_init(struct ras_core_context *ras_core);
+int ras_cmd_fini(struct ras_core_context *ras_core);
+int rascore_handle_cmd(struct ras_core_context *ras_core, struct ras_cmd_ctx *cmd, void *data);
+uint64_t ras_cmd_get_dev_handle(struct ras_core_context *ras_core);
+int ras_cmd_query_interface_info(struct ras_core_context *ras_core,
+ struct ras_query_interface_info_rsp *rsp);
+int ras_cmd_translate_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct ras_fb_bank_addr *bank_addr);
+int ras_cmd_translate_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct ras_fb_bank_addr bank_addr, uint64_t *soc_pa);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core.c b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
new file mode 100644
index 000000000000..01122b55c98a
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+
+#define RAS_SEQNO_FIFO_SIZE (128 * sizeof(uint64_t))
+
+#define IS_LEAP_YEAR(x) ((x % 4 == 0 && x % 100 != 0) || x % 400 == 0)
+
+static const char * const ras_block_name[] = {
+ "umc",
+ "sdma",
+ "gfx",
+ "mmhub",
+ "athub",
+ "pcie_bif",
+ "hdp",
+ "xgmi_wafl",
+ "df",
+ "smn",
+ "sem",
+ "mp0",
+ "mp1",
+ "fuse",
+ "mca",
+ "vcn",
+ "jpeg",
+ "ih",
+ "mpio",
+};
+
+const char *ras_core_get_ras_block_name(enum ras_block_id block_id)
+{
+ if (block_id >= ARRAY_SIZE(ras_block_name))
+ return "";
+
+ return ras_block_name[block_id];
+}
+
+int ras_core_convert_timestamp_to_time(struct ras_core_context *ras_core,
+ uint64_t timestamp, struct ras_time *tm)
+{
+ int days_in_month[] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
+ uint64_t month = 0, day = 0, hour = 0, minute = 0, second = 0;
+ uint32_t year = 0;
+ int seconds_per_day = 24 * 60 * 60;
+ int seconds_per_hour = 60 * 60;
+ int seconds_per_minute = 60;
+ int days, remaining_seconds;
+
+ days = div64_u64_rem(timestamp, seconds_per_day, (uint64_t *)&remaining_seconds);
+
+ /* utc_timestamp follows the Unix epoch */
+ year = 1970;
+ while (days >= 365) {
+ if (IS_LEAP_YEAR(year)) {
+ if (days < 366)
+ break;
+ days -= 366;
+ } else {
+ days -= 365;
+ }
+ year++;
+ }
+
+ days_in_month[1] += IS_LEAP_YEAR(year);
+
+ month = 0;
+ while (days >= days_in_month[month]) {
+ days -= days_in_month[month];
+ month++;
+ }
+ month++;
+ day = days + 1;
+
+ if (remaining_seconds) {
+ hour = remaining_seconds / seconds_per_hour;
+ minute = (remaining_seconds % seconds_per_hour) / seconds_per_minute;
+ second = remaining_seconds % seconds_per_minute;
+ }
+
+ tm->tm_year = year;
+ tm->tm_mon = month;
+ tm->tm_mday = day;
+ tm->tm_hour = hour;
+ tm->tm_min = minute;
+ tm->tm_sec = second;
+
+ return 0;
+}
+
+bool ras_core_gpu_in_reset(struct ras_core_context *ras_core)
+{
+ uint32_t status = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->check_gpu_status)
+ ras_core->sys_fn->check_gpu_status(ras_core, &status);
+
+ return (status & RAS_GPU_STATUS__IN_RESET) ? true : false;
+}
+
+bool ras_core_gpu_is_vf(struct ras_core_context *ras_core)
+{
+ uint32_t status = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->check_gpu_status)
+ ras_core->sys_fn->check_gpu_status(ras_core, &status);
+
+ return (status & RAS_GPU_STATUS__IS_VF) ? true : false;
+}
+
+bool ras_core_gpu_is_rma(struct ras_core_context *ras_core)
+{
+ if (!ras_core)
+ return false;
+
+ return ras_core->is_rma;
+}
+
+static int ras_core_seqno_fifo_write(struct ras_core_context *ras_core,
+ enum ras_seqno_fifo fifo_type, uint64_t seqno)
+{
+ int ret = 0;
+ struct kfifo *seqno_fifo = NULL;
+
+ if (fifo_type == SEQNO_FIFO_POISON_CREATION)
+ seqno_fifo = &ras_core->de_seqno_fifo;
+ else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
+ seqno_fifo = &ras_core->consumption_seqno_fifo;
+
+ if (seqno_fifo)
+ ret = kfifo_in_spinlocked(seqno_fifo,
+ &seqno, sizeof(seqno), &ras_core->seqno_lock);
+
+ return ret ? 0 : -EINVAL;
+}
+
+static int ras_core_seqno_fifo_read(struct ras_core_context *ras_core,
+ enum ras_seqno_fifo fifo_type, uint64_t *seqno, bool pop)
+{
+ int ret = 0;
+ struct kfifo *seqno_fifo = NULL;
+
+ if (fifo_type == SEQNO_FIFO_POISON_CREATION)
+ seqno_fifo = &ras_core->de_seqno_fifo;
+ else if (fifo_type == SEQNO_FIFO_POISON_CONSUMPTION)
+ seqno_fifo = &ras_core->consumption_seqno_fifo;
+
+ if (seqno_fifo) {
+ if (pop)
+ ret = kfifo_out_spinlocked(seqno_fifo,
+ seqno, sizeof(*seqno), &ras_core->seqno_lock);
+ else
+ ret = kfifo_out_peek(seqno_fifo, seqno, sizeof(*seqno));
+ }
+
+ return ret ? 0 : -EINVAL;
+}
+
+uint64_t ras_core_gen_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type type)
+{
+ uint64_t seqno = 0;
+
+ if (ras_core->sys_fn &&
+ ras_core->sys_fn->gen_seqno)
+ ras_core->sys_fn->gen_seqno(ras_core, type, &seqno);
+
+ return seqno;
+}
+
+int ras_core_put_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, uint64_t seqno)
+{
+ int ret = 0;
+
+ if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
+ return -EINVAL;
+
+ if (seqno_type == RAS_SEQNO_TYPE_DE)
+ ret = ras_core_seqno_fifo_write(ras_core,
+ SEQNO_FIFO_POISON_CREATION, seqno);
+ else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
+ ret = ras_core_seqno_fifo_write(ras_core,
+ SEQNO_FIFO_POISON_CONSUMPTION, seqno);
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+uint64_t ras_core_get_seqno(struct ras_core_context *ras_core,
+ enum ras_seqno_type seqno_type, bool pop)
+{
+ uint64_t seq_no;
+ int ret = -ENODATA;
+
+ if (seqno_type >= RAS_SEQNO_TYPE_COUNT_MAX)
+ return 0;
+
+ if (seqno_type == RAS_SEQNO_TYPE_DE)
+ ret = ras_core_seqno_fifo_read(ras_core,
+ SEQNO_FIFO_POISON_CREATION, &seq_no, pop);
+ else if (seqno_type == RAS_SEQNO_TYPE_POISON_CONSUMPTION)
+ ret = ras_core_seqno_fifo_read(ras_core,
+ SEQNO_FIFO_POISON_CONSUMPTION, &seq_no, pop);
+
+ if (ret)
+ seq_no = ras_core_gen_seqno(ras_core, seqno_type);
+
+ return seq_no;
+}
+
+static int ras_core_eeprom_recovery(struct ras_core_context *ras_core)
+{
+ int count;
+ int ret;
+
+ count = ras_eeprom_get_record_count(ras_core);
+ if (!count)
+ return 0;
+
+ /* Avoid bad page to be loaded again after gpu reset */
+ if (ras_umc_get_saved_eeprom_count(ras_core) >= count)
+ return 0;
+
+ ret = ras_umc_load_bad_pages(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "ras_umc_load_bad_pages failed: %d\n", ret);
+ return ret;
+ }
+
+ ras_eeprom_sync_info(ras_core);
+
+ return ret;
+}
+
+struct ras_core_context *ras_core_create(struct ras_core_config *init_config)
+{
+ struct ras_core_context *ras_core;
+ struct ras_core_config *config;
+
+ ras_core = kzalloc(sizeof(*ras_core), GFP_KERNEL);
+ if (!ras_core)
+ return NULL;
+
+ config = kzalloc(sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ kfree(ras_core);
+ return NULL;
+ }
+
+ memcpy(config, init_config, sizeof(*config));
+ ras_core->config = config;
+
+ return ras_core;
+}
+
+void ras_core_destroy(struct ras_core_context *ras_core)
+{
+ if (ras_core)
+ kfree(ras_core->config);
+
+ kfree(ras_core);
+}
+
+int ras_core_sw_init(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ if (!ras_core->config) {
+ RAS_DEV_ERR(ras_core->dev, "No ras core config!\n");
+ return -EINVAL;
+ }
+
+ ras_core->sys_fn = ras_core->config->sys_fn;
+ if (!ras_core->sys_fn)
+ return -EINVAL;
+
+ ret = kfifo_alloc(&ras_core->de_seqno_fifo,
+ RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ ret = kfifo_alloc(&ras_core->consumption_seqno_fifo,
+ RAS_SEQNO_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&ras_core->seqno_lock);
+
+ ret = ras_aca_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_umc_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_cmd_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_log_ring_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_psp_sw_init(ras_core);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ras_core_sw_fini(struct ras_core_context *ras_core)
+{
+ kfifo_free(&ras_core->de_seqno_fifo);
+ kfifo_free(&ras_core->consumption_seqno_fifo);
+
+ ras_psp_sw_fini(ras_core);
+ ras_log_ring_sw_fini(ras_core);
+ ras_cmd_fini(ras_core);
+ ras_umc_sw_fini(ras_core);
+ ras_aca_sw_fini(ras_core);
+
+ return 0;
+}
+
+int ras_core_hw_init(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ ras_core->ras_eeprom_supported =
+ ras_core->config->ras_eeprom_supported;
+
+ ras_core->poison_supported = ras_core->config->poison_supported;
+
+ ret = ras_psp_hw_init(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_aca_hw_init(ras_core);
+ if (ret)
+ goto init_err1;
+
+ ret = ras_mp1_hw_init(ras_core);
+ if (ret)
+ goto init_err2;
+
+ ret = ras_nbio_hw_init(ras_core);
+ if (ret)
+ goto init_err3;
+
+ ret = ras_umc_hw_init(ras_core);
+ if (ret)
+ goto init_err4;
+
+ ret = ras_gfx_hw_init(ras_core);
+ if (ret)
+ goto init_err5;
+
+ ret = ras_eeprom_hw_init(ras_core);
+ if (ret)
+ goto init_err6;
+
+ ret = ras_core_eeprom_recovery(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to recovery ras core, ret:%d\n", ret);
+ goto init_err6;
+ }
+
+ ret = ras_eeprom_check_storage_status(ras_core);
+ if (ret)
+ goto init_err6;
+
+ ret = ras_process_init(ras_core);
+ if (ret)
+ goto init_err7;
+
+ ras_core->is_initialized = true;
+
+ return 0;
+
+init_err7:
+ ras_eeprom_hw_fini(ras_core);
+init_err6:
+ ras_gfx_hw_fini(ras_core);
+init_err5:
+ ras_umc_hw_fini(ras_core);
+init_err4:
+ ras_nbio_hw_fini(ras_core);
+init_err3:
+ ras_mp1_hw_fini(ras_core);
+init_err2:
+ ras_aca_hw_fini(ras_core);
+init_err1:
+ ras_psp_hw_fini(ras_core);
+ return ret;
+}
+
+int ras_core_hw_fini(struct ras_core_context *ras_core)
+{
+ ras_core->is_initialized = false;
+
+ ras_process_fini(ras_core);
+ ras_eeprom_hw_fini(ras_core);
+ ras_gfx_hw_fini(ras_core);
+ ras_nbio_hw_fini(ras_core);
+ ras_umc_hw_fini(ras_core);
+ ras_mp1_hw_fini(ras_core);
+ ras_aca_hw_fini(ras_core);
+ ras_psp_hw_fini(ras_core);
+
+ return 0;
+}
+
+bool ras_core_handle_nbio_irq(struct ras_core_context *ras_core, void *data)
+{
+ return ras_nbio_handle_irq_error(ras_core, data);
+}
+
+int ras_core_handle_fatal_error(struct ras_core_context *ras_core)
+{
+ int ret = 0;
+
+ ras_aca_mark_fatal_flag(ras_core);
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__FATAL_ERROR_DETECTED, NULL);
+
+ return ret;
+}
+
+uint32_t ras_core_get_curr_nps_mode(struct ras_core_context *ras_core)
+{
+ if (ras_core->ras_nbio.ip_func &&
+ ras_core->ras_nbio.ip_func->get_memory_partition_mode)
+ return ras_core->ras_nbio.ip_func->get_memory_partition_mode(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to get gpu memory nps mode!\n");
+ return 0;
+}
+
+int ras_core_update_ecc_info(struct ras_core_context *ras_core)
+{
+ int ret;
+
+ ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__CE, NULL);
+ if (!ret)
+ ret = ras_aca_update_ecc(ras_core, RAS_ERR_TYPE__UE, NULL);
+
+ return ret;
+}
+
+int ras_core_query_block_ecc_data(struct ras_core_context *ras_core,
+ enum ras_block_id block, struct ras_ecc_count *ecc_count)
+{
+ int ret;
+
+ if (!ecc_count || (block >= RAS_BLOCK_ID__LAST) || !ras_core)
+ return -EINVAL;
+
+ ret = ras_aca_get_block_ecc_count(ras_core, block, ecc_count);
+ if (!ret)
+ ras_aca_clear_block_new_ecc_count(ras_core, block);
+
+ return ret;
+}
+
+int ras_core_set_status(struct ras_core_context *ras_core, bool enable)
+{
+ ras_core->ras_core_enabled = enable;
+
+ return 0;
+}
+
+bool ras_core_is_enabled(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_core_enabled;
+}
+
+uint64_t ras_core_get_utc_second_timestamp(struct ras_core_context *ras_core)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->get_utc_second_timestamp)
+ return ras_core->sys_fn->get_utc_second_timestamp(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to get system timestamp!\n");
+ return 0;
+}
+
+int ras_core_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
+{
+ if (!ras_core || !soc_pa || !bank_addr)
+ return -EINVAL;
+
+ return ras_umc_translate_soc_pa_and_bank(ras_core, soc_pa, bank_addr, bank_to_pa);
+}
+
+bool ras_core_ras_interrupt_detected(struct ras_core_context *ras_core)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->detect_ras_interrupt)
+ return ras_core->sys_fn->detect_ras_interrupt(ras_core);
+
+ RAS_DEV_ERR(ras_core->dev, "Failed to detect ras interrupt!\n");
+ return false;
+}
+
+int ras_core_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->get_gpu_mem)
+ return ras_core->sys_fn->get_gpu_mem(ras_core, mem_type, gpu_mem);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config get gpu memory API!\n");
+ return -EACCES;
+}
+
+int ras_core_put_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type, struct gpu_mem_block *gpu_mem)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->put_gpu_mem)
+ return ras_core->sys_fn->put_gpu_mem(ras_core, mem_type, gpu_mem);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config put gpu memory API!!\n");
+ return -EACCES;
+}
+
+bool ras_core_is_ready(struct ras_core_context *ras_core)
+{
+ return ras_core ? ras_core->is_initialized : false;
+}
+
+bool ras_core_check_safety_watermark(struct ras_core_context *ras_core)
+{
+ return ras_eeprom_check_safety_watermark(ras_core);
+}
+
+int ras_core_down_trylock_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ return ras_core->sys_fn->gpu_reset_lock(ras_core, true, true);
+
+ return 1;
+}
+
+void ras_core_down_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ ras_core->sys_fn->gpu_reset_lock(ras_core, true, false);
+}
+
+void ras_core_up_gpu_reset_lock(struct ras_core_context *ras_core)
+{
+ if (ras_core->sys_fn && ras_core->sys_fn->gpu_reset_lock)
+ ras_core->sys_fn->gpu_reset_lock(ras_core, false, false);
+}
+
+int ras_core_event_notify(struct ras_core_context *ras_core,
+ enum ras_notify_event event_id, void *data)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->ras_notifier)
+ return ras_core->sys_fn->ras_notifier(ras_core, event_id, data);
+
+ return -RAS_CORE_NOT_SUPPORTED;
+}
+
+int ras_core_get_device_system_info(struct ras_core_context *ras_core,
+ struct device_system_info *dev_info)
+{
+ if (ras_core && ras_core->sys_fn &&
+ ras_core->sys_fn->get_device_system_info)
+ return ras_core->sys_fn->get_device_system_info(ras_core, dev_info);
+
+ return -RAS_CORE_NOT_SUPPORTED;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h
new file mode 100644
index 000000000000..144fbe4ceb9a
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_core_status.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_CORE_STATUS_H__
+#define __RAS_CORE_STATUS_H__
+
+#define RAS_CORE_OK 0
+#define RAS_CORE_NOT_SUPPORTED 248
+#define RAS_CORE_FAIL_ERROR_QUERY 249
+#define RAS_CORE_FAIL_ERROR_INJECTION 250
+#define RAS_CORE_FAIL_FATAL_RECOVERY 251
+#define RAS_CORE_FAIL_POISON_CONSUMPTION 252
+#define RAS_CORE_FAIL_POISON_CREATION 253
+#define RAS_CORE_FAIL_NO_VALID_BANKS 254
+#define RAS_CORE_GPU_IN_MODE1_RESET 255
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.c b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c
new file mode 100644
index 000000000000..0fc7522b7ab6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+#include "ras_log_ring.h"
+#include "ras_cper.h"
+
+static const struct ras_cper_guid MCE = CPER_NOTIFY__MCE;
+static const struct ras_cper_guid CMC = CPER_NOTIFY__CMC;
+static const struct ras_cper_guid BOOT = BOOT__TYPE;
+
+static const struct ras_cper_guid CRASHDUMP = GPU__CRASHDUMP;
+static const struct ras_cper_guid RUNTIME = GPU__NONSTANDARD_ERROR;
+
+static void cper_get_timestamp(struct ras_core_context *ras_core,
+ struct ras_cper_timestamp *timestamp, uint64_t utc_second_timestamp)
+{
+ struct ras_time tm = {0};
+
+ ras_core_convert_timestamp_to_time(ras_core, utc_second_timestamp, &tm);
+ timestamp->seconds = tm.tm_sec;
+ timestamp->minutes = tm.tm_min;
+ timestamp->hours = tm.tm_hour;
+ timestamp->flag = 0;
+ timestamp->day = tm.tm_mday;
+ timestamp->month = tm.tm_mon;
+ timestamp->year = tm.tm_year % 100;
+ timestamp->century = tm.tm_year / 100;
+}
+
+static void fill_section_hdr(struct ras_core_context *ras_core,
+ struct cper_section_hdr *hdr, enum ras_cper_type type,
+ enum ras_cper_severity sev, struct ras_log_info *trace)
+{
+ struct device_system_info dev_info = {0};
+ char record_id[32];
+
+ hdr->signature[0] = 'C';
+ hdr->signature[1] = 'P';
+ hdr->signature[2] = 'E';
+ hdr->signature[3] = 'R';
+ hdr->revision = CPER_HDR__REV_1;
+ hdr->signature_end = 0xFFFFFFFF;
+ hdr->error_severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev);
+
+ hdr->valid_bits.platform_id = 1;
+ hdr->valid_bits.timestamp = 1;
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ cper_get_timestamp(ras_core, &hdr->timestamp, trace->timestamp);
+
+ snprintf(record_id, sizeof(record_id), "%d:%llX", dev_info.socket_id,
+ RAS_LOG_SEQNO_TO_BATCH_IDX(trace->seqno));
+ memcpy(hdr->record_id, record_id, 8);
+
+ snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
+ dev_info.vendor_id, dev_info.device_id);
+ /* pmfw version should be part of creator_id according to CPER spec */
+ snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID__AMDGPU);
+
+ switch (type) {
+ case RAS_CPER_TYPE_BOOT:
+ hdr->notify_type = BOOT;
+ break;
+ case RAS_CPER_TYPE_FATAL:
+ case RAS_CPER_TYPE_RMA:
+ hdr->notify_type = MCE;
+ break;
+ case RAS_CPER_TYPE_RUNTIME:
+ if (sev == RAS_CPER_SEV_NON_FATAL_CE)
+ hdr->notify_type = CMC;
+ else
+ hdr->notify_type = MCE;
+ break;
+ default:
+ RAS_DEV_ERR(ras_core->dev, "Unknown CPER Type\n");
+ break;
+ }
+}
+
+static int fill_section_descriptor(struct ras_core_context *ras_core,
+ struct cper_section_descriptor *descriptor,
+ enum ras_cper_severity sev,
+ struct ras_cper_guid sec_type,
+ uint32_t section_offset,
+ uint32_t section_length)
+{
+ struct device_system_info dev_info = {0};
+
+ descriptor->revision_minor = CPER_SEC__MINOR_REV_1;
+ descriptor->revision_major = CPER_SEC__MAJOR_REV_22;
+ descriptor->sec_offset = section_offset;
+ descriptor->sec_length = section_length;
+ descriptor->valid_bits.fru_text = 1;
+ descriptor->flag_bits.primary = 1;
+ descriptor->severity = (sev == RAS_CPER_SEV_RMA ? RAS_CPER_SEV_FATAL_UE : sev);
+ descriptor->sec_type = sec_type;
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ snprintf(descriptor->fru_text, 20, "OAM%d", dev_info.socket_id);
+
+ if (sev == RAS_CPER_SEV_RMA)
+ descriptor->flag_bits.exceed_err_threshold = 1;
+
+ if (sev == RAS_CPER_SEV_NON_FATAL_UE)
+ descriptor->flag_bits.latent_err = 1;
+
+ return 0;
+}
+
+static int fill_section_fatal(struct ras_core_context *ras_core,
+ struct cper_section_fatal *fatal, struct ras_log_info *trace)
+{
+ fatal->data.reg_ctx_type = CPER_CTX_TYPE__CRASH;
+ fatal->data.reg_arr_size = sizeof(fatal->data.reg);
+
+ fatal->data.reg.status = trace->aca_reg.regs[RAS_CPER_ACA_REG_STATUS];
+ fatal->data.reg.addr = trace->aca_reg.regs[RAS_CPER_ACA_REG_ADDR];
+ fatal->data.reg.ipid = trace->aca_reg.regs[RAS_CPER_ACA_REG_IPID];
+ fatal->data.reg.synd = trace->aca_reg.regs[RAS_CPER_ACA_REG_SYND];
+
+ return 0;
+}
+
+static int fill_section_runtime(struct ras_core_context *ras_core,
+ struct cper_section_runtime *runtime, struct ras_log_info *trace,
+ enum ras_cper_severity sev)
+{
+ runtime->hdr.valid_bits.err_info_cnt = 1;
+ runtime->hdr.valid_bits.err_context_cnt = 1;
+
+ runtime->descriptor.error_type = RUNTIME;
+ runtime->descriptor.ms_chk_bits.err_type_valid = 1;
+ if (sev == RAS_CPER_SEV_RMA) {
+ runtime->descriptor.valid_bits.ms_chk = 1;
+ runtime->descriptor.ms_chk_bits.err_type = 1;
+ runtime->descriptor.ms_chk_bits.pcc = 1;
+ }
+
+ runtime->reg.reg_ctx_type = CPER_CTX_TYPE__CRASH;
+ runtime->reg.reg_arr_size = sizeof(runtime->reg.reg_dump);
+
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_CTL] = trace->aca_reg.regs[ACA_REG_IDX__CTL];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_STATUS] = trace->aca_reg.regs[ACA_REG_IDX__STATUS];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_ADDR] = trace->aca_reg.regs[ACA_REG_IDX__ADDR];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_MISC0] = trace->aca_reg.regs[ACA_REG_IDX__MISC0];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_CONFIG] = trace->aca_reg.regs[ACA_REG_IDX__CONFG];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_IPID] = trace->aca_reg.regs[ACA_REG_IDX__IPID];
+ runtime->reg.reg_dump[RAS_CPER_ACA_REG_SYND] = trace->aca_reg.regs[ACA_REG_IDX__SYND];
+
+ return 0;
+}
+
+static int cper_generate_runtime_record(struct ras_core_context *ras_core,
+ struct cper_section_hdr *hdr, struct ras_log_info **trace_arr, uint32_t arr_num,
+ enum ras_cper_severity sev)
+{
+ struct cper_section_descriptor *descriptor;
+ struct cper_section_runtime *runtime;
+ int i;
+
+ fill_section_hdr(ras_core, hdr, RAS_CPER_TYPE_RUNTIME, sev, trace_arr[0]);
+ hdr->record_length = RAS_HDR_LEN + ((RAS_SEC_DESC_LEN + RAS_NONSTD_SEC_LEN) * arr_num);
+ hdr->sec_cnt = arr_num;
+ for (i = 0; i < arr_num; i++) {
+ descriptor = (struct cper_section_descriptor *)((uint8_t *)hdr +
+ RAS_SEC_DESC_OFFSET(i));
+ runtime = (struct cper_section_runtime *)((uint8_t *)hdr +
+ RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i));
+
+ fill_section_descriptor(ras_core, descriptor, sev, RUNTIME,
+ RAS_NONSTD_SEC_OFFSET(hdr->sec_cnt, i),
+ sizeof(struct cper_section_runtime));
+ fill_section_runtime(ras_core, runtime, trace_arr[i], sev);
+ }
+
+ return 0;
+}
+
+static int cper_generate_fatal_record(struct ras_core_context *ras_core,
+ uint8_t *buffer, struct ras_log_info **trace_arr, uint32_t arr_num)
+{
+ struct ras_cper_fatal_record record = {0};
+ int i = 0;
+
+ for (i = 0; i < arr_num; i++) {
+ fill_section_hdr(ras_core, &record.hdr, RAS_CPER_TYPE_FATAL,
+ RAS_CPER_SEV_FATAL_UE, trace_arr[i]);
+ record.hdr.record_length = RAS_HDR_LEN + RAS_SEC_DESC_LEN + RAS_FATAL_SEC_LEN;
+ record.hdr.sec_cnt = 1;
+
+ fill_section_descriptor(ras_core, &record.descriptor, RAS_CPER_SEV_FATAL_UE,
+ CRASHDUMP, offsetof(struct ras_cper_fatal_record, fatal),
+ sizeof(struct cper_section_fatal));
+
+ fill_section_fatal(ras_core, &record.fatal, trace_arr[i]);
+
+ memcpy(buffer + (i * record.hdr.record_length),
+ &record, record.hdr.record_length);
+ }
+
+ return 0;
+}
+
+static int cper_get_record_size(enum ras_cper_type type, uint16_t section_count)
+{
+ int size = 0;
+
+ size += RAS_HDR_LEN;
+ size += (RAS_SEC_DESC_LEN * section_count);
+
+ switch (type) {
+ case RAS_CPER_TYPE_RUNTIME:
+ case RAS_CPER_TYPE_RMA:
+ size += (RAS_NONSTD_SEC_LEN * section_count);
+ break;
+ case RAS_CPER_TYPE_FATAL:
+ size += (RAS_FATAL_SEC_LEN * section_count);
+ size += (RAS_HDR_LEN * (section_count - 1));
+ break;
+ case RAS_CPER_TYPE_BOOT:
+ size += (RAS_BOOT_SEC_LEN * section_count);
+ break;
+ default:
+ /* should never reach here */
+ break;
+ }
+
+ return size;
+}
+
+static enum ras_cper_type cper_ras_log_event_to_cper_type(enum ras_log_event event)
+{
+ switch (event) {
+ case RAS_LOG_EVENT_UE:
+ return RAS_CPER_TYPE_FATAL;
+ case RAS_LOG_EVENT_DE:
+ case RAS_LOG_EVENT_CE:
+ case RAS_LOG_EVENT_POISON_CREATION:
+ case RAS_LOG_EVENT_POISON_CONSUMPTION:
+ return RAS_CPER_TYPE_RUNTIME;
+ case RAS_LOG_EVENT_RMA:
+ return RAS_CPER_TYPE_RMA;
+ default:
+ /* should never reach here */
+ return RAS_CPER_TYPE_RUNTIME;
+ }
+}
+
+int ras_cper_generate_cper(struct ras_core_context *ras_core,
+ struct ras_log_info **trace_list, uint32_t count,
+ uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len)
+{
+ uint8_t *buffer = buf;
+ uint64_t buf_size = buf_len;
+ int record_size, saved_size = 0;
+ struct cper_section_hdr *hdr;
+
+ /* All the batch traces share the same event */
+ record_size = cper_get_record_size(
+ cper_ras_log_event_to_cper_type(trace_list[0]->event), count);
+
+ if ((record_size + saved_size) > buf_size)
+ return -ENOMEM;
+
+ hdr = (struct cper_section_hdr *)(buffer + saved_size);
+
+ switch (trace_list[0]->event) {
+ case RAS_LOG_EVENT_RMA:
+ cper_generate_runtime_record(ras_core, hdr, trace_list, count, RAS_CPER_SEV_RMA);
+ break;
+ case RAS_LOG_EVENT_DE:
+ cper_generate_runtime_record(ras_core,
+ hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_UE);
+ break;
+ case RAS_LOG_EVENT_CE:
+ cper_generate_runtime_record(ras_core,
+ hdr, trace_list, count, RAS_CPER_SEV_NON_FATAL_CE);
+ break;
+ case RAS_LOG_EVENT_UE:
+ cper_generate_fatal_record(ras_core, buffer + saved_size, trace_list, count);
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev, "Unprocessed trace event: %d\n", trace_list[0]->event);
+ break;
+ }
+
+ saved_size += record_size;
+
+ *real_data_len = saved_size;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_cper.h b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h
new file mode 100644
index 000000000000..076c1883c1ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_cper.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_CPER_H__
+#define __RAS_CPER_H__
+
+#define CPER_UUID_MAX_SIZE 16
+struct ras_cper_guid {
+ uint8_t b[CPER_UUID_MAX_SIZE];
+};
+
+#define CPER_GUID__INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
+ ((struct ras_cper_guid) \
+ {{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
+ (b) & 0xff, ((b) >> 8) & 0xff, \
+ (c) & 0xff, ((c) >> 8) & 0xff, \
+ (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
+
+#define CPER_HDR__REV_1 (0x100)
+#define CPER_SEC__MINOR_REV_1 (0x01)
+#define CPER_SEC__MAJOR_REV_22 (0x22)
+#define CPER_OAM_MAX_COUNT (8)
+
+#define CPER_CTX_TYPE__CRASH (1)
+#define CPER_CTX_TYPE__BOOT (9)
+
+#define CPER_CREATOR_ID__AMDGPU "amdgpu"
+
+#define CPER_NOTIFY__MCE \
+ CPER_GUID__INIT(0xE8F56FFE, 0x919C, 0x4cc5, 0xBA, 0x88, 0x65, 0xAB, \
+ 0xE1, 0x49, 0x13, 0xBB)
+#define CPER_NOTIFY__CMC \
+ CPER_GUID__INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
+ 0xEB, 0xD4, 0xF8, 0x90)
+#define BOOT__TYPE \
+ CPER_GUID__INIT(0x3D61A466, 0xAB40, 0x409a, 0xA6, 0x98, 0xF3, 0x62, \
+ 0xD4, 0x64, 0xB3, 0x8F)
+
+#define GPU__CRASHDUMP \
+ CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0xB0, 0xD0, 0x73, 0x65, \
+ 0x72, 0x5F, 0xD6, 0xAE)
+#define GPU__NONSTANDARD_ERROR \
+ CPER_GUID__INIT(0x32AC0C78, 0x2623, 0x48F6, 0x81, 0xA2, 0xAC, 0x69, \
+ 0x17, 0x80, 0x55, 0x1D)
+#define PROC_ERR__SECTION_TYPE \
+ CPER_GUID__INIT(0xDC3EA0B0, 0xA144, 0x4797, 0xB9, 0x5B, 0x53, 0xFA, \
+ 0x24, 0x2B, 0x6E, 0x1D)
+
+enum ras_cper_type {
+ RAS_CPER_TYPE_RUNTIME,
+ RAS_CPER_TYPE_FATAL,
+ RAS_CPER_TYPE_BOOT,
+ RAS_CPER_TYPE_RMA,
+};
+
+enum ras_cper_severity {
+ RAS_CPER_SEV_NON_FATAL_UE = 0,
+ RAS_CPER_SEV_FATAL_UE = 1,
+ RAS_CPER_SEV_NON_FATAL_CE = 2,
+ RAS_CPER_SEV_RMA = 3,
+
+ RAS_CPER_SEV_UNUSED = 10,
+};
+
+enum ras_cper_aca_reg {
+ RAS_CPER_ACA_REG_CTL = 0,
+ RAS_CPER_ACA_REG_STATUS = 1,
+ RAS_CPER_ACA_REG_ADDR = 2,
+ RAS_CPER_ACA_REG_MISC0 = 3,
+ RAS_CPER_ACA_REG_CONFIG = 4,
+ RAS_CPER_ACA_REG_IPID = 5,
+ RAS_CPER_ACA_REG_SYND = 6,
+ RAS_CPER_ACA_REG_DESTAT = 8,
+ RAS_CPER_ACA_REG_DEADDR = 9,
+ RAS_CPER_ACA_REG_MASK = 10,
+
+ RAS_CPER_ACA_REG_COUNT = 16,
+};
+
+#pragma pack(push, 1)
+
+struct ras_cper_timestamp {
+ uint8_t seconds;
+ uint8_t minutes;
+ uint8_t hours;
+ uint8_t flag;
+ uint8_t day;
+ uint8_t month;
+ uint8_t year;
+ uint8_t century;
+};
+
+struct cper_section_hdr {
+ char signature[4]; /* "CPER" */
+ uint16_t revision;
+ uint32_t signature_end; /* 0xFFFFFFFF */
+ uint16_t sec_cnt;
+ enum ras_cper_severity error_severity;
+ union {
+ struct {
+ uint32_t platform_id : 1;
+ uint32_t timestamp : 1;
+ uint32_t partition_id : 1;
+ uint32_t reserved : 29;
+ } valid_bits;
+ uint32_t valid_mask;
+ };
+ uint32_t record_length; /* Total size of CPER Entry */
+ struct ras_cper_timestamp timestamp;
+ char platform_id[16];
+ struct ras_cper_guid partition_id; /* Reserved */
+ char creator_id[16];
+ struct ras_cper_guid notify_type; /* CMC, MCE */
+ char record_id[8]; /* Unique CPER Entry ID */
+ uint32_t flags; /* Reserved */
+ uint64_t persistence_info; /* Reserved */
+ uint8_t reserved[12]; /* Reserved */
+};
+
+struct cper_section_descriptor {
+ uint32_t sec_offset; /* Offset from the start of CPER entry */
+ uint32_t sec_length;
+ uint8_t revision_minor; /* CPER_SEC_MINOR_REV_1 */
+ uint8_t revision_major; /* CPER_SEC_MAJOR_REV_22 */
+ union {
+ struct {
+ uint8_t fru_id : 1;
+ uint8_t fru_text : 1;
+ uint8_t reserved : 6;
+ } valid_bits;
+ uint8_t valid_mask;
+ };
+ uint8_t reserved;
+ union {
+ struct {
+ uint32_t primary : 1;
+ uint32_t reserved1 : 2;
+ uint32_t exceed_err_threshold : 1;
+ uint32_t latent_err : 1;
+ uint32_t reserved2 : 27;
+ } flag_bits;
+ uint32_t flag_mask;
+ };
+ struct ras_cper_guid sec_type;
+ char fru_id[16];
+ enum ras_cper_severity severity;
+ char fru_text[20];
+};
+
+struct runtime_hdr {
+ union {
+ struct {
+ uint64_t apic_id : 1;
+ uint64_t fw_id : 1;
+ uint64_t err_info_cnt : 6;
+ uint64_t err_context_cnt : 6;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ uint64_t apic_id;
+ char fw_id[48];
+};
+
+struct runtime_descriptor {
+ struct ras_cper_guid error_type;
+ union {
+ struct {
+ uint64_t ms_chk : 1;
+ uint64_t target_addr_id : 1;
+ uint64_t req_id : 1;
+ uint64_t resp_id : 1;
+ uint64_t instr_ptr : 1;
+ uint64_t reserved : 59;
+ } valid_bits;
+ uint64_t valid_mask;
+ };
+ union {
+ struct {
+ uint64_t err_type_valid : 1;
+ uint64_t pcc_valid : 1;
+ uint64_t uncorr_valid : 1;
+ uint64_t precise_ip_valid : 1;
+ uint64_t restartable_ip_valid : 1;
+ uint64_t overflow_valid : 1;
+ uint64_t reserved1 : 10;
+ uint64_t err_type : 2;
+ uint64_t pcc : 1;
+ uint64_t uncorr : 1;
+ uint64_t precised_ip : 1;
+ uint64_t restartable_ip : 1;
+ uint64_t overflow : 1;
+ uint64_t reserved2 : 41;
+ } ms_chk_bits;
+ uint64_t ms_chk_mask;
+ };
+ uint64_t target_addr_id;
+ uint64_t req_id;
+ uint64_t resp_id;
+ uint64_t instr_ptr;
+};
+
+struct runtime_error_reg {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t msr_addr;
+ uint64_t mm_reg_addr;
+ uint64_t reg_dump[RAS_CPER_ACA_REG_COUNT];
+};
+
+struct cper_section_runtime {
+ struct runtime_hdr hdr;
+ struct runtime_descriptor descriptor;
+ struct runtime_error_reg reg;
+};
+
+struct crashdump_hdr {
+ uint64_t reserved1;
+ uint64_t reserved2;
+ char fw_id[48];
+ uint64_t reserved3[8];
+};
+
+struct fatal_reg_info {
+ uint64_t status;
+ uint64_t addr;
+ uint64_t ipid;
+ uint64_t synd;
+};
+
+struct crashdump_fatal {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ struct fatal_reg_info reg;
+};
+
+struct crashdump_boot {
+ uint16_t reg_ctx_type;
+ uint16_t reg_arr_size;
+ uint32_t reserved1;
+ uint64_t reserved2;
+ uint64_t msg[CPER_OAM_MAX_COUNT];
+};
+
+struct cper_section_fatal {
+ struct crashdump_hdr hdr;
+ struct crashdump_fatal data;
+};
+
+struct cper_section_boot {
+ struct crashdump_hdr hdr;
+ struct crashdump_boot data;
+};
+
+struct ras_cper_fatal_record {
+ struct cper_section_hdr hdr;
+ struct cper_section_descriptor descriptor;
+ struct cper_section_fatal fatal;
+};
+#pragma pack(pop)
+
+#define RAS_HDR_LEN (sizeof(struct cper_section_hdr))
+#define RAS_SEC_DESC_LEN (sizeof(struct cper_sec_desc))
+
+#define RAS_BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot))
+#define RAS_FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal))
+#define RAS_NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err))
+
+#define RAS_SEC_DESC_OFFSET(idx) (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * idx))
+
+#define RAS_BOOT_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_BOOT_SEC_LEN * idx))
+#define RAS_FATAL_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_FATAL_SEC_LEN * idx))
+#define RAS_NONSTD_SEC_OFFSET(count, idx) \
+ (RAS_HDR_LEN + (RAS_SEC_DESC_LEN * count) + (RAS_NONSTD_SEC_LEN * idx))
+
+struct ras_core_context;
+struct ras_log_info;
+int ras_cper_generate_cper(struct ras_core_context *ras_core,
+ struct ras_log_info **trace_list, uint32_t count,
+ uint8_t *buf, uint32_t buf_len, uint32_t *real_data_len);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c
new file mode 100644
index 000000000000..cd6b057bdaf3
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.c
@@ -0,0 +1,1339 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras_eeprom.h"
+#include "ras.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+
+/*
+ * The 2 macros bellow represent the actual size in bytes that
+ * those entities occupy in the EEPROM memory.
+ * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_umc_record) which
+ * uses uint64 to store 6b fields such as retired_page.
+ */
+#define RAS_TABLE_HEADER_SIZE 20
+#define RAS_TABLE_RECORD_SIZE 24
+
+/* Table hdr is 'AMDR' */
+#define RAS_TABLE_HDR_VAL 0x414d4452
+
+/* Bad GPU tag ‘BADG’ */
+#define RAS_TABLE_HDR_BAD 0x42414447
+
+/*
+ * EEPROM Table structure v1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* Assume 2-Mbit size EEPROM and take up the whole space. */
+#define RAS_TBL_SIZE_BYTES (256 * 1024)
+#define RAS_TABLE_START 0
+#define RAS_HDR_START RAS_TABLE_START
+#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE)
+#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/*
+ * EEPROM Table structrue v2.1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE RAS INFO |
+ * | (available info size 4 Bytes) |
+ * | ( reserved size 252 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* EEPROM Table V2_1 */
+#define RAS_TABLE_V2_1_INFO_SIZE 256
+#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE
+#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \
+ RAS_TABLE_V2_1_INFO_SIZE)
+#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
+ * offset off of RAS_TABLE_START. That is, this is something you can
+ * add to control->i2c_address, and then tell I2C layer to read
+ * from/write to there. _N is the so called absolute index,
+ * because it starts right after the table header.
+ */
+#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \
+ (_N) * RAS_TABLE_RECORD_SIZE)
+
+#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \
+ (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE)
+
+/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off
+ * of "fri", return the absolute record index off of the end of
+ * the table header.
+ */
+#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \
+ (_C)->ras_max_record_count)
+
+#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define to_ras_core_context(x) (container_of(x, struct ras_core_context, ras_eeprom))
+
+static bool __is_ras_eeprom_supported(struct ras_core_context *ras_core)
+{
+ return ras_core->ras_eeprom_supported;
+}
+
+static bool __get_eeprom_i2c_addr(struct ras_core_context *ras_core,
+ struct ras_eeprom_control *control)
+{
+ int ret = -EINVAL;
+
+ if (control->sys_func &&
+ control->sys_func->update_eeprom_i2c_config)
+ ret = control->sys_func->update_eeprom_i2c_config(ras_core);
+ else
+ RAS_DEV_WARN(ras_core->dev,
+ "No eeprom i2c system config!\n");
+
+ return !ret ? true : false;
+}
+
+static int __ras_eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int ret;
+
+ if (control->sys_func && control->sys_func->eeprom_i2c_xfer) {
+ ret = control->sys_func->eeprom_i2c_xfer(ras_core,
+ eeprom_addr, eeprom_buf, buf_size, read);
+
+ if ((ret > 0) && !read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+
+ return ret;
+ }
+
+ RAS_DEV_ERR(ras_core->dev, "Error: No eeprom i2c system xfer function!\n");
+ return -EINVAL;
+}
+
+static int __eeprom_xfer(struct ras_core_context *ras_core, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ u16 limit;
+ u16 ps; /* Partial size */
+ int res = 0, r;
+
+ if (read)
+ limit = ras_core->ras_eeprom.max_read_len;
+ else
+ limit = ras_core->ras_eeprom.max_write_len;
+
+ if (limit && (limit <= EEPROM_OFFSET_SIZE)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
+ eeprom_addr, buf_size,
+ read ? "read" : "write", EEPROM_OFFSET_SIZE);
+ return -EINVAL;
+ }
+
+ ras_core_down_gpu_reset_lock(ras_core);
+
+ if (limit == 0) {
+ res = __ras_eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, buf_size, read);
+ } else {
+ /* The "limit" includes all data bytes sent/received,
+ * which would include the EEPROM_OFFSET_SIZE bytes.
+ * Account for them here.
+ */
+ limit -= EEPROM_OFFSET_SIZE;
+ for ( ; buf_size > 0;
+ buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
+ ps = (buf_size < limit) ? buf_size : limit;
+
+ r = __ras_eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, ps, read);
+ if (r < 0)
+ break;
+
+ res += r;
+ }
+ }
+
+ ras_core_up_gpu_reset_lock(ras_core);
+
+ return res;
+}
+
+static int __eeprom_read(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 bytes)
+{
+ return __eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, bytes, true);
+}
+
+static int __eeprom_write(struct ras_core_context *ras_core,
+ u32 eeprom_addr, u8 *eeprom_buf, u32 bytes)
+{
+ return __eeprom_xfer(ras_core, eeprom_addr,
+ eeprom_buf, bytes, false);
+}
+
+static void
+__encode_table_header_to_buf(struct ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ pp[0] = cpu_to_le32(hdr->header);
+ pp[1] = cpu_to_le32(hdr->version);
+ pp[2] = cpu_to_le32(hdr->first_rec_offset);
+ pp[3] = cpu_to_le32(hdr->tbl_size);
+ pp[4] = cpu_to_le32(hdr->checksum);
+}
+
+static void
+__decode_table_header_from_buf(struct ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ hdr->header = le32_to_cpu(pp[0]);
+ hdr->version = le32_to_cpu(pp[1]);
+ hdr->first_rec_offset = le32_to_cpu(pp[2]);
+ hdr->tbl_size = le32_to_cpu(pp[3]);
+ hdr->checksum = le32_to_cpu(pp[4]);
+}
+
+static int __write_table_header(struct ras_eeprom_control *control)
+{
+ u8 buf[RAS_TABLE_HEADER_SIZE];
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int res;
+
+ memset(buf, 0, sizeof(buf));
+ __encode_table_header_to_buf(&control->tbl_hdr, buf);
+
+ /* i2c may be unstable in gpu reset */
+ res = __eeprom_write(ras_core,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to write EEPROM table header:%d\n", res);
+ } else if (res < RAS_TABLE_HEADER_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Short write:%d out of %d\n", res, RAS_TABLE_HEADER_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static void
+__encode_table_ras_info_to_buf(struct ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = ((uint32_t)(rai->rma_status) & 0xFF) |
+ (((uint32_t)(rai->health_percent) << 8) & 0xFF00) |
+ (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000);
+ pp[0] = cpu_to_le32(tmp);
+}
+
+static void
+__decode_table_ras_info_from_buf(struct ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = le32_to_cpu(pp[0]);
+ rai->rma_status = tmp & 0xFF;
+ rai->health_percent = (tmp >> 8) & 0xFF;
+ rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF;
+}
+
+static int __write_table_ras_info(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u8 *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to alloc buf to write table ras info\n");
+ return -ENOMEM;
+ }
+
+ __encode_table_ras_info_to_buf(&control->tbl_rai, buf);
+
+ /* i2c may be unstable in gpu reset */
+ res = __eeprom_write(ras_core,
+ control->i2c_address +
+ control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to write EEPROM table ras info:%d\n", res);
+ } else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Short write:%d out of %d\n", res, RAS_TABLE_V2_1_INFO_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ kfree(buf);
+
+ return res;
+}
+
+static u8 __calc_hdr_byte_sum(const struct ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ u32 sz;
+
+ /* Header checksum, skip checksum field in the calculation */
+ sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum);
+ pp = (u8 *) &control->tbl_hdr;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static u8 __calc_ras_info_byte_sum(const struct ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ u32 sz;
+
+ sz = sizeof(control->tbl_rai);
+ pp = (u8 *) &control->tbl_rai;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static int ras_eeprom_correct_header_tag(
+ struct ras_eeprom_control *control,
+ uint32_t header)
+{
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ u8 *hh;
+ int res;
+ u8 csum;
+
+ csum = -hdr->checksum;
+
+ hh = (void *) &hdr->header;
+ csum -= (hh[0] + hh[1] + hh[2] + hh[3]);
+ hh = (void *) &header;
+ csum += hh[0] + hh[1] + hh[2] + hh[3];
+ csum = -csum;
+ mutex_lock(&control->ras_tbl_mutex);
+ hdr->header = header;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void ras_set_eeprom_table_version(struct ras_eeprom_control *control)
+{
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+
+ hdr->version = RAS_TABLE_VER_V3;
+}
+
+int ras_eeprom_reset_table(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ u8 csum;
+ int res;
+
+ mutex_lock(&control->ras_tbl_mutex);
+
+ hdr->header = RAS_TABLE_HDR_VAL;
+ ras_set_eeprom_table_version(control);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = RAS_GPU_HEALTH_USABLE;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = control->record_threshold_count;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+
+ control->ras_num_recs = 0;
+ control->ras_fri = 0;
+
+ control->bad_channel_bitmap = 0;
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ &control->ras_num_recs);
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ &control->bad_channel_bitmap);
+ control->update_channel_flag = false;
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void
+__encode_table_record_to_buf(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ buf[i++] = record->err_type;
+
+ buf[i++] = record->bank;
+
+ tmp = cpu_to_le64(record->ts);
+ memcpy(buf + i, &tmp, 8);
+ i += 8;
+
+ tmp = cpu_to_le64((record->offset & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+ i += 6;
+
+ buf[i++] = record->mem_channel;
+ buf[i++] = record->mcumc_id;
+
+ tmp = cpu_to_le64((record->retired_row_pfn & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+}
+
+static void
+__decode_table_record_from_buf(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ record->err_type = buf[i++];
+
+ record->bank = buf[i++];
+
+ memcpy(&tmp, buf + i, 8);
+ record->ts = le64_to_cpu(tmp);
+ i += 8;
+
+ memcpy(&tmp, buf + i, 6);
+ record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
+ i += 6;
+
+ record->mem_channel = buf[i++];
+ record->mcumc_id = buf[i++];
+
+ memcpy(&tmp, buf + i, 6);
+ record->retired_row_pfn = (le64_to_cpu(tmp) & 0xffffffffffff);
+}
+
+bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ bool ret = false;
+ int bad_page_count;
+
+ if (!__is_ras_eeprom_supported(ras_core) ||
+ !control->record_threshold_config)
+ return false;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD) {
+ if (bad_page_count > control->record_threshold_count)
+ RAS_DEV_WARN(ras_core->dev, "RAS records:%d exceed threshold:%d",
+ bad_page_count, control->record_threshold_count);
+
+ if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) ||
+ (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n");
+ ret = false;
+ } else {
+ ras_core->is_rma = true;
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consider adjusting the customized threshold.\n");
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * __ras_eeprom_write -- write indexed from buffer to EEPROM
+ * @control: pointer to control structure
+ * @buf: pointer to buffer containing data to write
+ * @fri: start writing at this index
+ * @num: number of records to write
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __ras_eeprom_write(struct ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = __eeprom_write(ras_core,
+ control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Writing %d EEPROM table records error:%d\n", num, res);
+ } else if (res < buf_size) {
+ /* Short write, return error.*/
+ RAS_DEV_ERR(ras_core->dev,
+ "Wrote %d records out of %d\n",
+ (res/RAS_TABLE_RECORD_SIZE), num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static int ras_eeprom_append_table(struct ras_eeprom_control *control,
+ struct eeprom_umc_record *record,
+ const u32 num)
+{
+ u32 a, b, i;
+ u8 *buf, *pp;
+ int res;
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Encode all of them in one go.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __encode_table_record_to_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ control->update_channel_flag = true;
+ }
+ }
+
+ /* a, first record index to write into.
+ * b, last record index to write into.
+ * a = first index to read (fri) + number of records in the table,
+ * b = a + @num - 1.
+ * Let N = control->ras_max_num_record_count, then we have,
+ * case 0: 0 <= a <= b < N,
+ * just append @num records starting at a;
+ * case 1: 0 <= a < N <= b,
+ * append (N - a) records starting at a, and
+ * append the remainder, b % N + 1, starting at 0.
+ * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases,
+ * case 2a: 0 <= a <= b < N
+ * append num records starting at a; and fix fri if b overwrote it,
+ * and since a <= b, if b overwrote it then a must've also,
+ * and if b didn't overwrite it, then a didn't also.
+ * case 2b: 0 <= b < a < N
+ * write num records starting at a, which wraps around 0=N
+ * and overwrite fri unconditionally. Now from case 2a,
+ * this means that b eclipsed fri to overwrite it and wrap
+ * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally
+ * set fri = b + 1 (mod N).
+ * Now, since fri is updated in every case, except the trivial case 0,
+ * the number of records present in the table after writing, is,
+ * num_recs - 1 = b - fri (mod N), and we take the positive value,
+ * by adding an arbitrary multiple of N before taking the modulo N
+ * as shown below.
+ */
+ a = control->ras_fri + control->ras_num_recs;
+ b = a + num - 1;
+ if (b < control->ras_max_record_count) {
+ res = __ras_eeprom_write(control, buf, a, num);
+ } else if (a < control->ras_max_record_count) {
+ u32 g0, g1;
+
+ g0 = control->ras_max_record_count - a;
+ g1 = b % control->ras_max_record_count + 1;
+ res = __ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE,
+ 0, g1);
+ if (res)
+ goto Out;
+ if (g1 > control->ras_fri)
+ control->ras_fri = g1 % control->ras_max_record_count;
+ } else {
+ a %= control->ras_max_record_count;
+ b %= control->ras_max_record_count;
+
+ if (a <= b) {
+ /* Note that, b - a + 1 = num. */
+ res = __ras_eeprom_write(control, buf, a, num);
+ if (res)
+ goto Out;
+ if (b >= control->ras_fri)
+ control->ras_fri = (b + 1) % control->ras_max_record_count;
+ } else {
+ u32 g0, g1;
+
+ /* b < a, which means, we write from
+ * a to the end of the table, and from
+ * the start of the table to b.
+ */
+ g0 = control->ras_max_record_count - a;
+ g1 = b + 1;
+ res = __ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1);
+ if (res)
+ goto Out;
+ control->ras_fri = g1 % control->ras_max_record_count;
+ }
+ }
+ control->ras_num_recs = 1 +
+ (control->ras_max_record_count + b - control->ras_fri)
+ % control->ras_max_record_count;
+Out:
+ kfree(buf);
+ return res;
+}
+
+static int ras_eeprom_update_header(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int threshold_config = control->record_threshold_config;
+ u8 *buf, *pp, csum;
+ u32 buf_size;
+ int bad_page_count;
+ int res;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ /* Modify the header if it exceeds.
+ */
+ if (threshold_config != 0 &&
+ bad_page_count > control->record_threshold_count) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ bad_page_count, control->record_threshold_count);
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = RAS_GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
+
+ if ((threshold_config != WARN_NONSTOP_OVER_THRESHOLD) &&
+ (threshold_config != NONSTOP_OVER_THRESHOLD))
+ ras_core->is_rma = true;
+
+ /* ignore the -ENOTSUPP return value */
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__DEVICE_RMA, NULL);
+ }
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ control->tbl_hdr.checksum = 0;
+
+ buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
+ res = -ENOMEM;
+ goto Out;
+ }
+
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ control->ras_record_offset,
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "EEPROM failed reading records:%d\n", res);
+ goto Out;
+ } else if (res < buf_size) {
+ RAS_DEV_ERR(ras_core->dev,
+ "EEPROM read %d out of %d bytes\n", res, buf_size);
+ res = -EIO;
+ goto Out;
+ }
+
+ /**
+ * bad page records have been stored in eeprom,
+ * now calculate gpu health percent
+ */
+ if (threshold_config != 0 &&
+ control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
+ bad_page_count <= control->record_threshold_count)
+ control->tbl_rai.health_percent = ((control->record_threshold_count -
+ bad_page_count) * 100) / control->record_threshold_count;
+
+ /* Recalc the checksum.
+ */
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+
+ csum += __calc_hdr_byte_sum(control);
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ /* avoid sign extension when assigning to "checksum" */
+ csum = -csum;
+ control->tbl_hdr.checksum = csum;
+ res = __write_table_header(control);
+ if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+Out:
+ kfree(buf);
+ return res;
+}
+
+/**
+ * ras_core_eeprom_append -- append records to the EEPROM RAS table
+ * @control: pointer to control structure
+ * @record: array of records to append
+ * @num: number of records in @record array
+ *
+ * Append @num records to the table, calculate the checksum and write
+ * the table back to EEPROM. The maximum number of records that
+ * can be appended is between 1 and control->ras_max_record_count,
+ * regardless of how many records are already stored in the table.
+ *
+ * Return 0 on success or if EEPROM is not supported, -errno on error.
+ */
+int ras_eeprom_append(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, const u32 num)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int res;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (num == 0) {
+ RAS_DEV_ERR(ras_core->dev, "will not append 0 records\n");
+ return -EINVAL;
+ } else if ((num + control->ras_num_recs) > control->ras_max_record_count) {
+ RAS_DEV_ERR(ras_core->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = ras_eeprom_append_table(control, record, num);
+ if (!res)
+ res = ras_eeprom_update_header(control);
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+/**
+ * __ras_eeprom_read -- read indexed from EEPROM into buffer
+ * @control: pointer to control structure
+ * @buf: pointer to buffer to read into
+ * @fri: first record index, start reading at this index, absolute index
+ * @num: number of records to read
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __ras_eeprom_read(struct ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ if (res < 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Reading %d EEPROM table records error:%d\n", num, res);
+ } else if (res < buf_size) {
+ /* Short read, return error.
+ */
+ RAS_DEV_ERR(ras_core->dev,
+ "Read %d records out of %d\n",
+ (res/RAS_TABLE_RECORD_SIZE), num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+int ras_eeprom_read(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, const u32 num)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ int i, res;
+ u8 *buf, *pp;
+ u32 g0, g1;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (num == 0) {
+ RAS_DEV_ERR(ras_core->dev, "will not read 0 records\n");
+ return -EINVAL;
+ } else if (num > control->ras_num_recs) {
+ RAS_DEV_ERR(ras_core->dev,
+ "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
+ return -EINVAL;
+ }
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Determine how many records to read, from the first record
+ * index, fri, to the end of the table, and from the beginning
+ * of the table, such that the total number of records is
+ * @num, and we handle wrap around when fri > 0 and
+ * fri + num > RAS_MAX_RECORD_COUNT.
+ *
+ * First we compute the index of the last element
+ * which would be fetched from each region,
+ * g0 is in [fri, fri + num - 1], and
+ * g1 is in [0, RAS_MAX_RECORD_COUNT - 1].
+ * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of
+ * the last element to fetch, we set g0 to _the number_
+ * of elements to fetch, @num, since we know that the last
+ * indexed to be fetched does not exceed the table.
+ *
+ * If, however, g0 >= RAS_MAX_RECORD_COUNT, then
+ * we set g0 to the number of elements to read
+ * until the end of the table, and g1 to the number of
+ * elements to read from the beginning of the table.
+ */
+ g0 = control->ras_fri + num - 1;
+ g1 = g0 % control->ras_max_record_count;
+ if (g0 < control->ras_max_record_count) {
+ g0 = num;
+ g1 = 0;
+ } else {
+ g0 = control->ras_max_record_count - control->ras_fri;
+ g1 += 1;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = __ras_eeprom_read(control, buf, control->ras_fri, g0);
+ if (res)
+ goto Out;
+ if (g1) {
+ res = __ras_eeprom_read(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE, 0, g1);
+ if (res)
+ goto Out;
+ }
+
+ res = 0;
+
+ /* Read up everything? Then transform.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __decode_table_record_from_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ control->update_channel_flag = true;
+ }
+ }
+Out:
+ kfree(buf);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+
+ /* get available eeprom table version first before eeprom table init */
+ ras_set_eeprom_table_version(control);
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ return RAS_MAX_RECORD_COUNT_V2_1;
+ else
+ return RAS_MAX_RECORD_COUNT;
+}
+
+/**
+ * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum
+ * @control: pointer to control structure
+ *
+ * Check the checksum of the stored in EEPROM RAS table.
+ *
+ * Return 0 if the checksum is correct,
+ * positive if it is not correct, and
+ * -errno on I/O error.
+ */
+static int __verify_ras_table_checksum(struct ras_eeprom_control *control)
+{
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ int buf_size, res;
+ u8 csum, *buf, *pp;
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Out of memory checking RAS table checksum.\n");
+ return -ENOMEM;
+ }
+
+ res = __eeprom_read(ras_core,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, buf_size);
+ if (res < buf_size) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Partial read for checksum, res:%d\n", res);
+ /* On partial reads, return -EIO.
+ */
+ if (res >= 0)
+ res = -EIO;
+ goto Out;
+ }
+
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+Out:
+ kfree(buf);
+ return res < 0 ? res : csum;
+}
+
+static int __read_table_ras_info(struct ras_eeprom_control *control)
+{
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ struct ras_core_context *ras_core = to_ras_core_context(control);
+ unsigned char *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
+ return -ENOMEM;
+ }
+
+ /**
+ * EEPROM table V2_1 supports ras info,
+ * read EEPROM table ras info
+ */
+ res = __eeprom_read(ras_core,
+ control->i2c_address + control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+ if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to read EEPROM table ras info, res:%d\n", res);
+ res = res >= 0 ? -EIO : res;
+ goto Out;
+ }
+
+ __decode_table_ras_info_from_buf(rai, buf);
+
+Out:
+ kfree(buf);
+ return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
+}
+
+static int __check_ras_table_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 };
+ struct ras_eeprom_table_header *hdr;
+ int res;
+
+ hdr = &control->tbl_hdr;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (!__get_eeprom_i2c_addr(ras_core, control))
+ return -EINVAL;
+
+ control->ras_header_offset = RAS_HDR_START;
+ control->ras_info_offset = RAS_TABLE_V2_1_INFO_START;
+ mutex_init(&control->ras_tbl_mutex);
+
+ /* Read the table header from EEPROM address */
+ res = __eeprom_read(ras_core,
+ control->i2c_address + control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+ if (res < RAS_TABLE_HEADER_SIZE) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to read EEPROM table header, res:%d\n", res);
+ return res >= 0 ? -EIO : res;
+ }
+
+ __decode_table_header_from_buf(hdr, buf);
+
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ RAS_DEV_INFO(ras_core->dev, "Creating a new EEPROM table");
+ return ras_eeprom_reset_table(ras_core);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
+ control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ break;
+ case RAS_TABLE_VER_V1:
+ control->ras_num_recs = RAS_NUM_RECS(hdr);
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
+ }
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+
+ return 0;
+}
+
+int ras_eeprom_check_storage_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_header *hdr;
+ int bad_page_count;
+ int res = 0;
+
+ if (!__is_ras_eeprom_supported(ras_core))
+ return 0;
+
+ if (!__get_eeprom_i2c_addr(ras_core, control))
+ return -EINVAL;
+
+ hdr = &control->tbl_hdr;
+
+ bad_page_count = ras_umc_get_badpage_count(ras_core);
+ if (hdr->header == RAS_TABLE_HDR_VAL) {
+ RAS_DEV_INFO(ras_core->dev,
+ "Found existing EEPROM table with %d records\n",
+ bad_page_count);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS table incorrect checksum or error:%d\n", res);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * bad_page_count >= 9 * control->record_threshold_count)
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS records:%u exceeds 90%% of threshold:%d\n",
+ bad_page_count,
+ control->record_threshold_count);
+
+ } else if (hdr->header == RAS_TABLE_HDR_BAD &&
+ control->record_threshold_config != 0) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS Table incorrect checksum or error:%d\n", res);
+
+ if (control->record_threshold_count >= bad_page_count) {
+ /* This means that, the threshold was increased since
+ * the last time the system was booted, and now,
+ * ras->record_threshold_count - control->num_recs > 0,
+ * so that at least one more record can be saved,
+ * before the page count threshold is reached.
+ */
+ RAS_DEV_INFO(ras_core->dev,
+ "records:%d threshold:%d, resetting RAS table header signature",
+ bad_page_count,
+ control->record_threshold_count);
+ res = ras_eeprom_correct_header_tag(control, RAS_TABLE_HDR_VAL);
+ } else {
+ RAS_DEV_ERR(ras_core->dev, "RAS records:%d exceed threshold:%d",
+ bad_page_count, control->record_threshold_count);
+ if ((control->record_threshold_config == WARN_NONSTOP_OVER_THRESHOLD) ||
+ (control->record_threshold_config == NONSTOP_OVER_THRESHOLD)) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ res = 0;
+ } else {
+ ras_core->is_rma = true;
+ RAS_DEV_ERR(ras_core->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+ }
+ }
+
+ return res < 0 ? res : 0;
+}
+
+int ras_eeprom_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+ struct ras_eeprom_config *eeprom_cfg;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ ras_core->is_rma = false;
+
+ control = &ras_core->ras_eeprom;
+
+ memset(control, 0, sizeof(*control));
+
+ eeprom_cfg = &ras_core->config->eeprom_cfg;
+ control->record_threshold_config =
+ eeprom_cfg->eeprom_record_threshold_config;
+
+ control->record_threshold_count = ras_eeprom_max_record_count(ras_core);
+ if (eeprom_cfg->eeprom_record_threshold_count <
+ control->record_threshold_count)
+ control->record_threshold_count =
+ eeprom_cfg->eeprom_record_threshold_count;
+
+ control->sys_func = eeprom_cfg->eeprom_sys_fn;
+ control->max_read_len = eeprom_cfg->max_i2c_read_len;
+ control->max_write_len = eeprom_cfg->max_i2c_write_len;
+ control->i2c_adapter = eeprom_cfg->eeprom_i2c_adapter;
+ control->i2c_port = eeprom_cfg->eeprom_i2c_port;
+ control->i2c_address = eeprom_cfg->eeprom_i2c_addr;
+
+ control->update_channel_flag = false;
+
+ return __check_ras_table_status(ras_core);
+}
+
+int ras_eeprom_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ control = &ras_core->ras_eeprom;
+ mutex_destroy(&control->ras_tbl_mutex);
+
+ return 0;
+}
+
+uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core)
+{
+ if (!ras_core)
+ return 0;
+
+ return ras_core->ras_eeprom.ras_num_recs;
+}
+
+void ras_eeprom_sync_info(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control;
+
+ if (!ras_core)
+ return;
+
+ control = &ras_core->ras_eeprom;
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_PAGE_NUM,
+ &control->ras_num_recs);
+ ras_core_event_notify(ras_core, RAS_EVENT_ID__UPDATE_BAD_CHANNEL_BITMAP,
+ &control->bad_channel_bitmap);
+}
+
+enum ras_gpu_health_status
+ ras_eeprom_check_gpu_status(struct ras_core_context *ras_core)
+{
+ struct ras_eeprom_control *control = &ras_core->ras_eeprom;
+ struct ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+
+ if (!__is_ras_eeprom_supported(ras_core) ||
+ !control->record_threshold_config)
+ return RAS_GPU_HEALTH_NONE;
+
+ if (control->tbl_hdr.header == RAS_TABLE_HDR_BAD)
+ return RAS_GPU_IN_BAD_STATUS;
+
+ return rai->rma_status;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h
new file mode 100644
index 000000000000..2abe566c18b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_eeprom.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_EEPROM_H__
+#define __RAS_EEPROM_H__
+#include "ras_sys.h"
+
+#define RAS_TABLE_VER_V1 0x00010000
+#define RAS_TABLE_VER_V2_1 0x00021000
+#define RAS_TABLE_VER_V3 0x00030000
+
+#define NONSTOP_OVER_THRESHOLD -2
+#define WARN_NONSTOP_OVER_THRESHOLD -1
+#define DISABLE_RETIRE_PAGE 0
+
+/*
+ * Bad address pfn : eeprom_umc_record.retired_row_pfn[39:0],
+ * nps mode: eeprom_umc_record.retired_row_pfn[47:40]
+ */
+#define EEPROM_RECORD_UMC_ADDR_MASK 0xFFFFFFFFFFULL
+#define EEPROM_RECORD_UMC_NPS_MASK 0xFF0000000000ULL
+#define EEPROM_RECORD_UMC_NPS_SHIFT 40
+
+#define EEPROM_RECORD_UMC_NPS_MODE(RECORD) \
+ (((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_NPS_MASK) >> \
+ EEPROM_RECORD_UMC_NPS_SHIFT)
+
+#define EEPROM_RECORD_UMC_ADDR_PFN(RECORD) \
+ ((RECORD)->retired_row_pfn & EEPROM_RECORD_UMC_ADDR_MASK)
+
+#define EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(RECORD, ADDR, NPS) \
+do { \
+ uint64_t tmp = (NPS); \
+ tmp = ((tmp << EEPROM_RECORD_UMC_NPS_SHIFT) & EEPROM_RECORD_UMC_NPS_MASK); \
+ tmp |= (ADDR) & EEPROM_RECORD_UMC_ADDR_MASK; \
+ (RECORD)->retired_row_pfn = tmp; \
+} while (0)
+
+enum ras_gpu_health_status {
+ RAS_GPU_HEALTH_NONE = 0,
+ RAS_GPU_HEALTH_USABLE = 1,
+ RAS_GPU_RETIRED__ECC_REACH_THRESHOLD = 2,
+ RAS_GPU_IN_BAD_STATUS = 3,
+};
+
+enum ras_eeprom_err_type {
+ RAS_EEPROM_ERR_NA,
+ RAS_EEPROM_ERR_RECOVERABLE,
+ RAS_EEPROM_ERR_NON_RECOVERABLE,
+ RAS_EEPROM_ERR_COUNT,
+};
+
+struct ras_eeprom_table_header {
+ uint32_t header;
+ uint32_t version;
+ uint32_t first_rec_offset;
+ uint32_t tbl_size;
+ uint32_t checksum;
+} __packed;
+
+struct ras_eeprom_table_ras_info {
+ u8 rma_status;
+ u8 health_percent;
+ u16 ecc_page_threshold;
+ u32 padding[64 - 1];
+} __packed;
+
+struct ras_eeprom_control {
+ struct ras_eeprom_table_header tbl_hdr;
+ struct ras_eeprom_table_ras_info tbl_rai;
+
+ /* record threshold */
+ int record_threshold_config;
+ uint32_t record_threshold_count;
+ bool update_channel_flag;
+
+ const struct ras_eeprom_sys_func *sys_func;
+ void *i2c_adapter;
+ u32 i2c_port;
+ u16 max_read_len;
+ u16 max_write_len;
+
+ /* Base I2C EEPPROM 19-bit memory address,
+ * where the table is located. For more information,
+ * see top of amdgpu_eeprom.c.
+ */
+ u32 i2c_address;
+
+ /* The byte offset off of @i2c_address
+ * where the table header is found,
+ * and where the records start--always
+ * right after the header.
+ */
+ u32 ras_header_offset;
+ u32 ras_info_offset;
+ u32 ras_record_offset;
+
+ /* Number of records in the table.
+ */
+ u32 ras_num_recs;
+
+ /* First record index to read, 0-based.
+ * Range is [0, num_recs-1]. This is
+ * an absolute index, starting right after
+ * the table header.
+ */
+ u32 ras_fri;
+
+ /* Maximum possible number of records
+ * we could store, i.e. the maximum capacity
+ * of the table.
+ */
+ u32 ras_max_record_count;
+
+ /* Protect table access via this mutex.
+ */
+ struct mutex ras_tbl_mutex;
+
+ /* Record channel info which occurred bad pages
+ */
+ u32 bad_channel_bitmap;
+};
+
+/*
+ * Represents single table record. Packed to be easily serialized into byte
+ * stream.
+ */
+struct eeprom_umc_record {
+
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+
+ uint64_t retired_row_pfn;
+ uint64_t ts;
+
+ enum ras_eeprom_err_type err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+
+ /* The following variables will not be saved to eeprom.
+ */
+ uint64_t cur_nps_retired_row_pfn;
+ uint32_t cur_nps_bank;
+ uint32_t cur_nps;
+};
+
+struct ras_core_context;
+int ras_eeprom_hw_init(struct ras_core_context *ras_core);
+int ras_eeprom_hw_fini(struct ras_core_context *ras_core);
+
+int ras_eeprom_reset_table(struct ras_core_context *ras_core);
+
+bool ras_eeprom_check_safety_watermark(struct ras_core_context *ras_core);
+
+int ras_eeprom_read(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, const u32 num);
+
+int ras_eeprom_append(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, const u32 num);
+
+uint32_t ras_eeprom_max_record_count(struct ras_core_context *ras_core);
+uint32_t ras_eeprom_get_record_count(struct ras_core_context *ras_core);
+void ras_eeprom_sync_info(struct ras_core_context *ras_core);
+
+int ras_eeprom_check_storage_status(struct ras_core_context *ras_core);
+enum ras_gpu_health_status
+ ras_eeprom_check_gpu_status(struct ras_core_context *ras_core);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c
new file mode 100644
index 000000000000..f5ce28777705
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_gfx_v9_0.h"
+#include "ras_gfx.h"
+#include "ras_core_status.h"
+
+static const struct ras_gfx_ip_func *ras_gfx_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ return &gfx_ras_func_v9_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "GFX ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock)
+{
+ struct ras_gfx *gfx = &ras_core->ras_gfx;
+
+ return gfx->ip_func->get_ta_subblock(ras_core,
+ error_type, subblock, ta_subblock);
+}
+
+int ras_gfx_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_gfx *gfx = &ras_core->ras_gfx;
+
+ gfx->gfx_ip_version = ras_core->config->gfx_ip_version;
+
+ gfx->ip_func = ras_gfx_get_ip_funcs(ras_core, gfx->gfx_ip_version);
+
+ return gfx->ip_func ? RAS_CORE_OK : -EINVAL;
+}
+
+int ras_gfx_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h
new file mode 100644
index 000000000000..8a42d69fb0ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_GFX_H__
+#define __RAS_GFX_H__
+
+struct ras_gfx_ip_func {
+ int (*get_ta_subblock)(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock);
+};
+
+struct ras_gfx {
+ uint32_t gfx_ip_version;
+ const struct ras_gfx_ip_func *ip_func;
+};
+
+int ras_gfx_hw_init(struct ras_core_context *ras_core);
+int ras_gfx_hw_fini(struct ras_core_context *ras_core);
+
+int ras_gfx_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock);
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c
new file mode 100644
index 000000000000..6213d3f125be
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_gfx_v9_0.h"
+#include "ras_core_status.h"
+
+enum ta_gfx_v9_subblock {
+ /*CPC*/
+ TA_GFX_V9__GFX_CPC_INDEX_START = 0,
+ TA_GFX_V9__GFX_CPC_SCRATCH = TA_GFX_V9__GFX_CPC_INDEX_START,
+ TA_GFX_V9__GFX_CPC_UCODE,
+ TA_GFX_V9__GFX_DC_STATE_ME1,
+ TA_GFX_V9__GFX_DC_CSINVOC_ME1,
+ TA_GFX_V9__GFX_DC_RESTORE_ME1,
+ TA_GFX_V9__GFX_DC_STATE_ME2,
+ TA_GFX_V9__GFX_DC_CSINVOC_ME2,
+ TA_GFX_V9__GFX_DC_RESTORE_ME2,
+ TA_GFX_V9__GFX_CPC_INDEX_END = TA_GFX_V9__GFX_DC_RESTORE_ME2,
+ /* CPF*/
+ TA_GFX_V9__GFX_CPF_INDEX_START,
+ TA_GFX_V9__GFX_CPF_ROQ_ME2 = TA_GFX_V9__GFX_CPF_INDEX_START,
+ TA_GFX_V9__GFX_CPF_ROQ_ME1,
+ TA_GFX_V9__GFX_CPF_TAG,
+ TA_GFX_V9__GFX_CPF_INDEX_END = TA_GFX_V9__GFX_CPF_TAG,
+ /* CPG*/
+ TA_GFX_V9__GFX_CPG_INDEX_START,
+ TA_GFX_V9__GFX_CPG_DMA_ROQ = TA_GFX_V9__GFX_CPG_INDEX_START,
+ TA_GFX_V9__GFX_CPG_DMA_TAG,
+ TA_GFX_V9__GFX_CPG_TAG,
+ TA_GFX_V9__GFX_CPG_INDEX_END = TA_GFX_V9__GFX_CPG_TAG,
+ /* GDS*/
+ TA_GFX_V9__GFX_GDS_INDEX_START,
+ TA_GFX_V9__GFX_GDS_MEM = TA_GFX_V9__GFX_GDS_INDEX_START,
+ TA_GFX_V9__GFX_GDS_INPUT_QUEUE,
+ TA_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ TA_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ TA_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ TA_GFX_V9__GFX_GDS_INDEX_END = TA_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ /* SPI*/
+ TA_GFX_V9__GFX_SPI_SR_MEM,
+ /* SQ*/
+ TA_GFX_V9__GFX_SQ_INDEX_START,
+ TA_GFX_V9__GFX_SQ_SGPR = TA_GFX_V9__GFX_SQ_INDEX_START,
+ TA_GFX_V9__GFX_SQ_LDS_D,
+ TA_GFX_V9__GFX_SQ_LDS_I,
+ TA_GFX_V9__GFX_SQ_VGPR, /* VGPR = SP*/
+ TA_GFX_V9__GFX_SQ_INDEX_END = TA_GFX_V9__GFX_SQ_VGPR,
+ /* SQC (3 ranges)*/
+ TA_GFX_V9__GFX_SQC_INDEX_START,
+ /* SQC range 0*/
+ TA_GFX_V9__GFX_SQC_INDEX0_START = TA_GFX_V9__GFX_SQC_INDEX_START,
+ TA_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO =
+ TA_GFX_V9__GFX_SQC_INDEX0_START,
+ TA_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ TA_GFX_V9__GFX_SQC_INDEX0_END =
+ TA_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1*/
+ TA_GFX_V9__GFX_SQC_INDEX1_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM =
+ TA_GFX_V9__GFX_SQC_INDEX1_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX1_END =
+ TA_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2*/
+ TA_GFX_V9__GFX_SQC_INDEX2_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM =
+ TA_GFX_V9__GFX_SQC_INDEX2_START,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX2_END =
+ TA_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ TA_GFX_V9__GFX_SQC_INDEX_END = TA_GFX_V9__GFX_SQC_INDEX2_END,
+ /* TA*/
+ TA_GFX_V9__GFX_TA_INDEX_START,
+ TA_GFX_V9__GFX_TA_FS_DFIFO = TA_GFX_V9__GFX_TA_INDEX_START,
+ TA_GFX_V9__GFX_TA_FS_AFIFO,
+ TA_GFX_V9__GFX_TA_FL_LFIFO,
+ TA_GFX_V9__GFX_TA_FX_LFIFO,
+ TA_GFX_V9__GFX_TA_FS_CFIFO,
+ TA_GFX_V9__GFX_TA_INDEX_END = TA_GFX_V9__GFX_TA_FS_CFIFO,
+ /* TCA*/
+ TA_GFX_V9__GFX_TCA_INDEX_START,
+ TA_GFX_V9__GFX_TCA_HOLE_FIFO = TA_GFX_V9__GFX_TCA_INDEX_START,
+ TA_GFX_V9__GFX_TCA_REQ_FIFO,
+ TA_GFX_V9__GFX_TCA_INDEX_END = TA_GFX_V9__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges)*/
+ TA_GFX_V9__GFX_TCC_INDEX_START,
+ /* TCC range 0*/
+ TA_GFX_V9__GFX_TCC_INDEX0_START = TA_GFX_V9__GFX_TCC_INDEX_START,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA = TA_GFX_V9__GFX_TCC_INDEX0_START,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0,
+ TA_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1,
+ TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0,
+ TA_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1,
+ TA_GFX_V9__GFX_TCC_HIGH_RATE_TAG,
+ TA_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ TA_GFX_V9__GFX_TCC_INDEX0_END = TA_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1*/
+ TA_GFX_V9__GFX_TCC_INDEX1_START,
+ TA_GFX_V9__GFX_TCC_IN_USE_DEC = TA_GFX_V9__GFX_TCC_INDEX1_START,
+ TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ TA_GFX_V9__GFX_TCC_INDEX1_END =
+ TA_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2*/
+ TA_GFX_V9__GFX_TCC_INDEX2_START,
+ TA_GFX_V9__GFX_TCC_RETURN_DATA = TA_GFX_V9__GFX_TCC_INDEX2_START,
+ TA_GFX_V9__GFX_TCC_RETURN_CONTROL,
+ TA_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO,
+ TA_GFX_V9__GFX_TCC_WRITE_RETURN,
+ TA_GFX_V9__GFX_TCC_WRITE_CACHE_READ,
+ TA_GFX_V9__GFX_TCC_SRC_FIFO,
+ TA_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ TA_GFX_V9__GFX_TCC_INDEX2_END =
+ TA_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3*/
+ TA_GFX_V9__GFX_TCC_INDEX3_START,
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO = TA_GFX_V9__GFX_TCC_INDEX3_START,
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ TA_GFX_V9__GFX_TCC_INDEX3_END =
+ TA_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4*/
+ TA_GFX_V9__GFX_TCC_INDEX4_START,
+ TA_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ TA_GFX_V9__GFX_TCC_INDEX4_START,
+ TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_GFX_V9__GFX_TCC_INDEX4_END =
+ TA_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ TA_GFX_V9__GFX_TCC_INDEX_END = TA_GFX_V9__GFX_TCC_INDEX4_END,
+ /* TCI*/
+ TA_GFX_V9__GFX_TCI_WRITE_RAM,
+ /* TCP*/
+ TA_GFX_V9__GFX_TCP_INDEX_START,
+ TA_GFX_V9__GFX_TCP_CACHE_RAM = TA_GFX_V9__GFX_TCP_INDEX_START,
+ TA_GFX_V9__GFX_TCP_LFIFO_RAM,
+ TA_GFX_V9__GFX_TCP_CMD_FIFO,
+ TA_GFX_V9__GFX_TCP_VM_FIFO,
+ TA_GFX_V9__GFX_TCP_DB_RAM,
+ TA_GFX_V9__GFX_TCP_UTCL1_LFIFO0,
+ TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ TA_GFX_V9__GFX_TCP_INDEX_END = TA_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ /* TD*/
+ TA_GFX_V9__GFX_TD_INDEX_START,
+ TA_GFX_V9__GFX_TD_SS_FIFO_LO = TA_GFX_V9__GFX_TD_INDEX_START,
+ TA_GFX_V9__GFX_TD_SS_FIFO_HI,
+ TA_GFX_V9__GFX_TD_CS_FIFO,
+ TA_GFX_V9__GFX_TD_INDEX_END = TA_GFX_V9__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges)*/
+ TA_GFX_V9__GFX_EA_INDEX_START,
+ /* EA range 0*/
+ TA_GFX_V9__GFX_EA_INDEX0_START = TA_GFX_V9__GFX_EA_INDEX_START,
+ TA_GFX_V9__GFX_EA_DRAMRD_CMDMEM = TA_GFX_V9__GFX_EA_INDEX0_START,
+ TA_GFX_V9__GFX_EA_DRAMWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_DRAMWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_RRET_TAGMEM,
+ TA_GFX_V9__GFX_EA_WRET_TAGMEM,
+ TA_GFX_V9__GFX_EA_GMIRD_CMDMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_INDEX0_END = TA_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1*/
+ TA_GFX_V9__GFX_EA_INDEX1_START,
+ TA_GFX_V9__GFX_EA_DRAMRD_PAGEMEM = TA_GFX_V9__GFX_EA_INDEX1_START,
+ TA_GFX_V9__GFX_EA_DRAMWR_PAGEMEM,
+ TA_GFX_V9__GFX_EA_IORD_CMDMEM,
+ TA_GFX_V9__GFX_EA_IOWR_CMDMEM,
+ TA_GFX_V9__GFX_EA_IOWR_DATAMEM,
+ TA_GFX_V9__GFX_EA_GMIRD_PAGEMEM,
+ TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ TA_GFX_V9__GFX_EA_INDEX1_END = TA_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2*/
+ TA_GFX_V9__GFX_EA_INDEX2_START,
+ TA_GFX_V9__GFX_EA_MAM_D0MEM = TA_GFX_V9__GFX_EA_INDEX2_START,
+ TA_GFX_V9__GFX_EA_MAM_D1MEM,
+ TA_GFX_V9__GFX_EA_MAM_D2MEM,
+ TA_GFX_V9__GFX_EA_MAM_D3MEM,
+ TA_GFX_V9__GFX_EA_INDEX2_END = TA_GFX_V9__GFX_EA_MAM_D3MEM,
+ TA_GFX_V9__GFX_EA_INDEX_END = TA_GFX_V9__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank*/
+ TA_GFX_V9__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker*/
+ TA_GFX_V9__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache*/
+ TA_GFX_V9__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache*/
+ TA_GFX_V9__UTC_ATCL2_CACHE_4K_BANK,
+ TA_GFX_V9__GFX_MAX
+};
+
+struct ras_gfx_subblock_t {
+ unsigned char *name;
+ int ta_subblock;
+ int hw_supported_error_type;
+ int sw_supported_error_type;
+};
+
+#define RAS_GFX_SUB_BLOCK(subblock, a, b, c, d, e, f, g, h) \
+ [RAS_GFX_V9__##subblock] = { \
+ #subblock, \
+ TA_GFX_V9__##subblock, \
+ ((a) | ((b) << 1) | ((c) << 2) | ((d) << 3)), \
+ (((e) << 1) | ((f) << 3) | (g) | ((h) << 2)), \
+ }
+
+const struct ras_gfx_subblock_t ras_gfx_v9_0_subblocks[] = {
+ RAS_GFX_SUB_BLOCK(GFX_CPC_SCRATCH, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPC_UCODE, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME1, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_STATE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_CSINVOC_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_DC_RESTORE_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME2, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_ROQ_ME1, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPF_TAG, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_ROQ, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_DMA_TAG, 0, 1, 1, 1, 0, 1, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_CPG_TAG, 0, 1, 1, 1, 1, 1, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_INPUT_QUEUE, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_CMD_RAM_MEM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PHY_DATA_RAM_MEM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_GDS_OA_PIPE_MEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SPI_SR_MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_SGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_D, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_LDS_I, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQ_VGPR, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU0_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU1_UTCL1_LFIFO, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_WRITE_DATA_BUF, 0, 1, 1, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_CU2_UTCL1_LFIFO, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKA_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_TAG_RAM, 0, 1, 1, 1, 1, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO, 1, 0, 0, 1, 0,
+ 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_INST_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_TAG_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_HIT_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_MISS_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_SQC_DATA_BANKB_BANK_RAM, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_DFIFO, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_AFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FL_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FX_LFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TA_FS_CFIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCA_HOLE_FIFO, 1, 0, 0, 1, 0, 1, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCA_REQ_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_0_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_0, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DATA_BANK_1_1, 0, 1, 1, 1, 1, 0, 0,
+ 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_0, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_DIRTY_BANK_1, 0, 1, 1, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_HIGH_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LOW_RATE_TAG, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_DEC, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_IN_USE_TRANSFER, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_DATA, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_RETURN_CONTROL, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_UC_ATOMIC_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_RETURN, 1, 0, 0, 1, 0, 1, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRITE_CACHE_READ, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_SRC_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 1, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_CACHE_TAG_PROBE_FIFO, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_LATENCY_FIFO_NEXT_RAM, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_WRRET_TAG_WRITE_RETURN, 1, 0, 0, 1, 0, 0,
+ 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCC_ATOMIC_RETURN_BUFFER, 1, 0, 0, 1, 0, 0, 0,
+ 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCI_WRITE_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_CACHE_RAM, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_LFIFO_RAM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_CMD_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_VM_FIFO, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_DB_RAM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO0, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TCP_UTCL1_LFIFO1, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_LO, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_TD_SS_FIFO_HI, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_TD_CS_FIFO, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_CMDMEM, 0, 1, 1, 1, 1, 0, 0, 1),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_RRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_WRET_TAGMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_CMDMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_DATAMEM, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_DRAMWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IORD_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_CMDMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_IOWR_DATAMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIRD_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_GMIWR_PAGEMEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D0MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D1MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D2MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(GFX_EA_MAM_D3MEM, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_VML2_BANK_CACHE, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_VML2_WALKER, 0, 1, 1, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_2M_BANK, 1, 0, 0, 1, 0, 0, 0, 0),
+ RAS_GFX_SUB_BLOCK(UTC_ATCL2_CACHE_4K_BANK, 0, 1, 1, 1, 0, 0, 0, 0),
+};
+
+static int gfx_v9_0_get_ta_subblock(struct ras_core_context *ras_core,
+ uint32_t error_type, uint32_t subblock, uint32_t *ta_subblock)
+{
+ const struct ras_gfx_subblock_t *gfx_subblock;
+
+ if (subblock >= ARRAY_SIZE(ras_gfx_v9_0_subblocks))
+ return -EINVAL;
+
+ gfx_subblock = &ras_gfx_v9_0_subblocks[subblock];
+ if (!gfx_subblock->name)
+ return -EPERM;
+
+ if (!(gfx_subblock->hw_supported_error_type & error_type)) {
+ RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, hardware do not support type 0x%x\n",
+ gfx_subblock->name, error_type);
+ return -EPERM;
+ }
+
+ if (!(gfx_subblock->sw_supported_error_type & error_type)) {
+ RAS_DEV_ERR(ras_core->dev, "GFX Subblock %s, driver do not support type 0x%x\n",
+ gfx_subblock->name, error_type);
+ return -EPERM;
+ }
+
+ *ta_subblock = gfx_subblock->ta_subblock;
+
+ return 0;
+}
+
+const struct ras_gfx_ip_func gfx_ras_func_v9_0 = {
+ .get_ta_subblock = gfx_v9_0_get_ta_subblock,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h
new file mode 100644
index 000000000000..659b56619747
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_gfx_v9_0.h
@@ -0,0 +1,259 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_GFX_V9_0_H__
+#define __RAS_GFX_V9_0_H__
+
+enum ras_gfx_v9_subblock {
+ /* CPC */
+ RAS_GFX_V9__GFX_CPC_INDEX_START = 0,
+ RAS_GFX_V9__GFX_CPC_SCRATCH =
+ RAS_GFX_V9__GFX_CPC_INDEX_START,
+ RAS_GFX_V9__GFX_CPC_UCODE,
+ RAS_GFX_V9__GFX_DC_STATE_ME1,
+ RAS_GFX_V9__GFX_DC_CSINVOC_ME1,
+ RAS_GFX_V9__GFX_DC_RESTORE_ME1,
+ RAS_GFX_V9__GFX_DC_STATE_ME2,
+ RAS_GFX_V9__GFX_DC_CSINVOC_ME2,
+ RAS_GFX_V9__GFX_DC_RESTORE_ME2,
+ RAS_GFX_V9__GFX_CPC_INDEX_END =
+ RAS_GFX_V9__GFX_DC_RESTORE_ME2,
+ /* CPF */
+ RAS_GFX_V9__GFX_CPF_INDEX_START,
+ RAS_GFX_V9__GFX_CPF_ROQ_ME2 =
+ RAS_GFX_V9__GFX_CPF_INDEX_START,
+ RAS_GFX_V9__GFX_CPF_ROQ_ME1,
+ RAS_GFX_V9__GFX_CPF_TAG,
+ RAS_GFX_V9__GFX_CPF_INDEX_END = RAS_GFX_V9__GFX_CPF_TAG,
+ /* CPG */
+ RAS_GFX_V9__GFX_CPG_INDEX_START,
+ RAS_GFX_V9__GFX_CPG_DMA_ROQ =
+ RAS_GFX_V9__GFX_CPG_INDEX_START,
+ RAS_GFX_V9__GFX_CPG_DMA_TAG,
+ RAS_GFX_V9__GFX_CPG_TAG,
+ RAS_GFX_V9__GFX_CPG_INDEX_END = RAS_GFX_V9__GFX_CPG_TAG,
+ /* GDS */
+ RAS_GFX_V9__GFX_GDS_INDEX_START,
+ RAS_GFX_V9__GFX_GDS_MEM = RAS_GFX_V9__GFX_GDS_INDEX_START,
+ RAS_GFX_V9__GFX_GDS_INPUT_QUEUE,
+ RAS_GFX_V9__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ RAS_GFX_V9__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ RAS_GFX_V9__GFX_GDS_INDEX_END =
+ RAS_GFX_V9__GFX_GDS_OA_PIPE_MEM,
+ /* SPI */
+ RAS_GFX_V9__GFX_SPI_SR_MEM,
+ /* SQ */
+ RAS_GFX_V9__GFX_SQ_INDEX_START,
+ RAS_GFX_V9__GFX_SQ_SGPR = RAS_GFX_V9__GFX_SQ_INDEX_START,
+ RAS_GFX_V9__GFX_SQ_LDS_D,
+ RAS_GFX_V9__GFX_SQ_LDS_I,
+ RAS_GFX_V9__GFX_SQ_VGPR,
+ RAS_GFX_V9__GFX_SQ_INDEX_END = RAS_GFX_V9__GFX_SQ_VGPR,
+ /* SQC (3 ranges) */
+ RAS_GFX_V9__GFX_SQC_INDEX_START,
+ /* SQC range 0 */
+ RAS_GFX_V9__GFX_SQC_INDEX0_START =
+ RAS_GFX_V9__GFX_SQC_INDEX_START,
+ RAS_GFX_V9__GFX_SQC_INST_UTCL1_LFIFO =
+ RAS_GFX_V9__GFX_SQC_INDEX0_START,
+ RAS_GFX_V9__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ RAS_GFX_V9__GFX_SQC_INDEX0_END =
+ RAS_GFX_V9__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1 */
+ RAS_GFX_V9__GFX_SQC_INDEX1_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_TAG_RAM =
+ RAS_GFX_V9__GFX_SQC_INDEX1_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKA_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_TAG_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX1_END =
+ RAS_GFX_V9__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2 */
+ RAS_GFX_V9__GFX_SQC_INDEX2_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_TAG_RAM =
+ RAS_GFX_V9__GFX_SQC_INDEX2_START,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_INST_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_TAG_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX2_END =
+ RAS_GFX_V9__GFX_SQC_DATA_BANKB_BANK_RAM,
+ RAS_GFX_V9__GFX_SQC_INDEX_END =
+ RAS_GFX_V9__GFX_SQC_INDEX2_END,
+ /* TA */
+ RAS_GFX_V9__GFX_TA_INDEX_START,
+ RAS_GFX_V9__GFX_TA_FS_DFIFO =
+ RAS_GFX_V9__GFX_TA_INDEX_START,
+ RAS_GFX_V9__GFX_TA_FS_AFIFO,
+ RAS_GFX_V9__GFX_TA_FL_LFIFO,
+ RAS_GFX_V9__GFX_TA_FX_LFIFO,
+ RAS_GFX_V9__GFX_TA_FS_CFIFO,
+ RAS_GFX_V9__GFX_TA_INDEX_END = RAS_GFX_V9__GFX_TA_FS_CFIFO,
+ /* TCA */
+ RAS_GFX_V9__GFX_TCA_INDEX_START,
+ RAS_GFX_V9__GFX_TCA_HOLE_FIFO =
+ RAS_GFX_V9__GFX_TCA_INDEX_START,
+ RAS_GFX_V9__GFX_TCA_REQ_FIFO,
+ RAS_GFX_V9__GFX_TCA_INDEX_END =
+ RAS_GFX_V9__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges) */
+ RAS_GFX_V9__GFX_TCC_INDEX_START,
+ /* TCC range 0 */
+ RAS_GFX_V9__GFX_TCC_INDEX0_START =
+ RAS_GFX_V9__GFX_TCC_INDEX_START,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA =
+ RAS_GFX_V9__GFX_TCC_INDEX0_START,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_0_1,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_0,
+ RAS_GFX_V9__GFX_TCC_CACHE_DATA_BANK_1_1,
+ RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_0,
+ RAS_GFX_V9__GFX_TCC_CACHE_DIRTY_BANK_1,
+ RAS_GFX_V9__GFX_TCC_HIGH_RATE_TAG,
+ RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ RAS_GFX_V9__GFX_TCC_INDEX0_END =
+ RAS_GFX_V9__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1 */
+ RAS_GFX_V9__GFX_TCC_INDEX1_START,
+ RAS_GFX_V9__GFX_TCC_IN_USE_DEC =
+ RAS_GFX_V9__GFX_TCC_INDEX1_START,
+ RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ RAS_GFX_V9__GFX_TCC_INDEX1_END =
+ RAS_GFX_V9__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2 */
+ RAS_GFX_V9__GFX_TCC_INDEX2_START,
+ RAS_GFX_V9__GFX_TCC_RETURN_DATA =
+ RAS_GFX_V9__GFX_TCC_INDEX2_START,
+ RAS_GFX_V9__GFX_TCC_RETURN_CONTROL,
+ RAS_GFX_V9__GFX_TCC_UC_ATOMIC_FIFO,
+ RAS_GFX_V9__GFX_TCC_WRITE_RETURN,
+ RAS_GFX_V9__GFX_TCC_WRITE_CACHE_READ,
+ RAS_GFX_V9__GFX_TCC_SRC_FIFO,
+ RAS_GFX_V9__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ RAS_GFX_V9__GFX_TCC_INDEX2_END =
+ RAS_GFX_V9__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3 */
+ RAS_GFX_V9__GFX_TCC_INDEX3_START,
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO =
+ RAS_GFX_V9__GFX_TCC_INDEX3_START,
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ RAS_GFX_V9__GFX_TCC_INDEX3_END =
+ RAS_GFX_V9__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4 */
+ RAS_GFX_V9__GFX_TCC_INDEX4_START,
+ RAS_GFX_V9__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ RAS_GFX_V9__GFX_TCC_INDEX4_START,
+ RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ RAS_GFX_V9__GFX_TCC_INDEX4_END =
+ RAS_GFX_V9__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ RAS_GFX_V9__GFX_TCC_INDEX_END =
+ RAS_GFX_V9__GFX_TCC_INDEX4_END,
+ /* TCI */
+ RAS_GFX_V9__GFX_TCI_WRITE_RAM,
+ /* TCP */
+ RAS_GFX_V9__GFX_TCP_INDEX_START,
+ RAS_GFX_V9__GFX_TCP_CACHE_RAM =
+ RAS_GFX_V9__GFX_TCP_INDEX_START,
+ RAS_GFX_V9__GFX_TCP_LFIFO_RAM,
+ RAS_GFX_V9__GFX_TCP_CMD_FIFO,
+ RAS_GFX_V9__GFX_TCP_VM_FIFO,
+ RAS_GFX_V9__GFX_TCP_DB_RAM,
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO0,
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ RAS_GFX_V9__GFX_TCP_INDEX_END =
+ RAS_GFX_V9__GFX_TCP_UTCL1_LFIFO1,
+ /* TD */
+ RAS_GFX_V9__GFX_TD_INDEX_START,
+ RAS_GFX_V9__GFX_TD_SS_FIFO_LO =
+ RAS_GFX_V9__GFX_TD_INDEX_START,
+ RAS_GFX_V9__GFX_TD_SS_FIFO_HI,
+ RAS_GFX_V9__GFX_TD_CS_FIFO,
+ RAS_GFX_V9__GFX_TD_INDEX_END = RAS_GFX_V9__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges) */
+ RAS_GFX_V9__GFX_EA_INDEX_START,
+ /* EA range 0 */
+ RAS_GFX_V9__GFX_EA_INDEX0_START =
+ RAS_GFX_V9__GFX_EA_INDEX_START,
+ RAS_GFX_V9__GFX_EA_DRAMRD_CMDMEM =
+ RAS_GFX_V9__GFX_EA_INDEX0_START,
+ RAS_GFX_V9__GFX_EA_DRAMWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_DRAMWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_RRET_TAGMEM,
+ RAS_GFX_V9__GFX_EA_WRET_TAGMEM,
+ RAS_GFX_V9__GFX_EA_GMIRD_CMDMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_INDEX0_END =
+ RAS_GFX_V9__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1 */
+ RAS_GFX_V9__GFX_EA_INDEX1_START,
+ RAS_GFX_V9__GFX_EA_DRAMRD_PAGEMEM =
+ RAS_GFX_V9__GFX_EA_INDEX1_START,
+ RAS_GFX_V9__GFX_EA_DRAMWR_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_IORD_CMDMEM,
+ RAS_GFX_V9__GFX_EA_IOWR_CMDMEM,
+ RAS_GFX_V9__GFX_EA_IOWR_DATAMEM,
+ RAS_GFX_V9__GFX_EA_GMIRD_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ RAS_GFX_V9__GFX_EA_INDEX1_END =
+ RAS_GFX_V9__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2 */
+ RAS_GFX_V9__GFX_EA_INDEX2_START,
+ RAS_GFX_V9__GFX_EA_MAM_D0MEM =
+ RAS_GFX_V9__GFX_EA_INDEX2_START,
+ RAS_GFX_V9__GFX_EA_MAM_D1MEM,
+ RAS_GFX_V9__GFX_EA_MAM_D2MEM,
+ RAS_GFX_V9__GFX_EA_MAM_D3MEM,
+ RAS_GFX_V9__GFX_EA_INDEX2_END =
+ RAS_GFX_V9__GFX_EA_MAM_D3MEM,
+ RAS_GFX_V9__GFX_EA_INDEX_END =
+ RAS_GFX_V9__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank */
+ RAS_GFX_V9__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker */
+ RAS_GFX_V9__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache */
+ RAS_GFX_V9__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache */
+ RAS_GFX_V9__UTC_ATCL2_CACHE_4K_BANK,
+ RAS_GFX_V9__GFX_MAX
+};
+
+extern const struct ras_gfx_ip_func gfx_ras_func_v9_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c
new file mode 100644
index 000000000000..0a838fdcb2f6
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_core_status.h"
+#include "ras_log_ring.h"
+
+#define RAS_LOG_MAX_QUERY_SIZE 0xC000
+#define RAS_LOG_MEM_TEMP_SIZE 0x200
+#define RAS_LOG_MEMPOOL_SIZE \
+ (RAS_LOG_MAX_QUERY_SIZE + RAS_LOG_MEM_TEMP_SIZE)
+
+#define BATCH_IDX_TO_TREE_IDX(batch_idx, sn) (((batch_idx) << 8) | (sn))
+
+static const uint64_t ras_rma_aca_reg[ACA_REG_MAX_COUNT] = {
+ [ACA_REG_IDX__CTL] = 0x1,
+ [ACA_REG_IDX__STATUS] = 0xB000000000000137,
+ [ACA_REG_IDX__ADDR] = 0x0,
+ [ACA_REG_IDX__MISC0] = 0x0,
+ [ACA_REG_IDX__CONFG] = 0x1ff00000002,
+ [ACA_REG_IDX__IPID] = 0x9600000000,
+ [ACA_REG_IDX__SYND] = 0x0,
+};
+
+static uint64_t ras_log_ring_get_logged_ecc_count(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint64_t count = 0;
+
+ if (log_ring->logged_ecc_count < 0) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Error: the logged ras count should not less than 0!\n");
+ count = 0;
+ } else {
+ count = log_ring->logged_ecc_count;
+ }
+
+ if (count > RAS_LOG_MEMPOOL_SIZE)
+ RAS_DEV_WARN(ras_core->dev,
+ "Error: the logged ras count is out of range!\n");
+
+ return count;
+}
+
+static int ras_log_ring_add_data(struct ras_core_context *ras_core,
+ struct ras_log_info *log, struct ras_log_batch_tag *batch_tag)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ if (batch_tag && (batch_tag->sub_seqno >= MAX_RECORD_PER_BATCH)) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Invalid batch sub seqno:%d, batch:0x%llx\n",
+ batch_tag->sub_seqno, batch_tag->batch_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ if (batch_tag) {
+ log->seqno =
+ BATCH_IDX_TO_TREE_IDX(batch_tag->batch_id, batch_tag->sub_seqno);
+ batch_tag->sub_seqno++;
+ } else {
+ log->seqno = BATCH_IDX_TO_TREE_IDX(log_ring->mono_upward_batch_id, 0);
+ log_ring->mono_upward_batch_id++;
+ }
+ ret = radix_tree_insert(&log_ring->ras_log_root, log->seqno, log);
+ if (!ret)
+ log_ring->logged_ecc_count++;
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to add ras log! seqno:0x%llx, ret:%d\n",
+ log->seqno, ret);
+ mempool_free(log, log_ring->ras_log_mempool);
+ }
+
+ return ret;
+}
+
+static int ras_log_ring_delete_data(struct ras_core_context *ras_core, uint32_t count)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ uint32_t i = 0, j = 0;
+ uint64_t batch_id, idx;
+ void *data;
+ int ret = -ENODATA;
+
+ if (count > ras_log_ring_get_logged_ecc_count(ras_core))
+ return -EINVAL;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_id = log_ring->last_del_batch_id;
+ while (batch_id < log_ring->mono_upward_batch_id) {
+ for (j = 0; j < MAX_RECORD_PER_BATCH; j++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, j);
+ data = radix_tree_delete(&log_ring->ras_log_root, idx);
+ if (data) {
+ mempool_free(data, log_ring->ras_log_mempool);
+ log_ring->logged_ecc_count--;
+ i++;
+ }
+ }
+ batch_id = ++log_ring->last_del_batch_id;
+ if (i >= count) {
+ ret = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ return ret;
+}
+
+static void ras_log_ring_clear_log_tree(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint64_t batch_id, idx;
+ unsigned long flags = 0;
+ void *data;
+ int j;
+
+ if ((log_ring->mono_upward_batch_id <= log_ring->last_del_batch_id) &&
+ !log_ring->logged_ecc_count)
+ return;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_id = log_ring->last_del_batch_id;
+ while (batch_id < log_ring->mono_upward_batch_id) {
+ for (j = 0; j < MAX_RECORD_PER_BATCH; j++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, j);
+ data = radix_tree_delete(&log_ring->ras_log_root, idx);
+ if (data) {
+ mempool_free(data, log_ring->ras_log_mempool);
+ log_ring->logged_ecc_count--;
+ }
+ }
+ batch_id++;
+ }
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+}
+
+int ras_log_ring_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ memset(log_ring, 0, sizeof(*log_ring));
+
+ log_ring->ras_log_mempool = mempool_create_kmalloc_pool(
+ RAS_LOG_MEMPOOL_SIZE, sizeof(struct ras_log_info));
+ if (!log_ring->ras_log_mempool)
+ return -ENOMEM;
+
+ INIT_RADIX_TREE(&log_ring->ras_log_root, GFP_KERNEL);
+
+ spin_lock_init(&log_ring->spin_lock);
+
+ return 0;
+}
+
+int ras_log_ring_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ ras_log_ring_clear_log_tree(ras_core);
+ log_ring->logged_ecc_count = 0;
+ log_ring->last_del_batch_id = 0;
+ log_ring->mono_upward_batch_id = 0;
+
+ mempool_destroy(log_ring->ras_log_mempool);
+
+ return 0;
+}
+
+struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ struct ras_log_batch_tag *batch_tag;
+ unsigned long flags = 0;
+
+ batch_tag = kzalloc(sizeof(*batch_tag), GFP_KERNEL);
+ if (!batch_tag)
+ return NULL;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ batch_tag->batch_id = log_ring->mono_upward_batch_id;
+ log_ring->mono_upward_batch_id++;
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ batch_tag->sub_seqno = 0;
+ batch_tag->timestamp = ras_core_get_utc_second_timestamp(ras_core);
+ return batch_tag;
+}
+
+void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core,
+ struct ras_log_batch_tag *batch_tag)
+{
+ kfree(batch_tag);
+}
+
+void ras_log_ring_add_log_event(struct ras_core_context *ras_core,
+ enum ras_log_event event, void *data, struct ras_log_batch_tag *batch_tag)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ struct device_system_info dev_info = {0};
+ struct ras_log_info *log;
+ uint64_t socket_id;
+ void *obj;
+
+ obj = mempool_alloc_preallocated(log_ring->ras_log_mempool);
+ if (!obj ||
+ (ras_log_ring_get_logged_ecc_count(ras_core) >= RAS_LOG_MEMPOOL_SIZE)) {
+ ras_log_ring_delete_data(ras_core, RAS_LOG_MEM_TEMP_SIZE);
+ if (!obj)
+ obj = mempool_alloc_preallocated(log_ring->ras_log_mempool);
+ }
+
+ if (!obj) {
+ RAS_DEV_ERR(ras_core->dev, "ERROR: Failed to alloc ras log buffer!\n");
+ return;
+ }
+
+ log = (struct ras_log_info *)obj;
+
+ memset(log, 0, sizeof(*log));
+ log->timestamp =
+ batch_tag ? batch_tag->timestamp : ras_core_get_utc_second_timestamp(ras_core);
+ log->event = event;
+
+ if (data)
+ memcpy(&log->aca_reg, data, sizeof(log->aca_reg));
+
+ if (event == RAS_LOG_EVENT_RMA) {
+ memcpy(&log->aca_reg, ras_rma_aca_reg, sizeof(log->aca_reg));
+ ras_core_get_device_system_info(ras_core, &dev_info);
+ socket_id = dev_info.socket_id;
+ log->aca_reg.regs[ACA_REG_IDX__IPID] |= ((socket_id / 4) & 0x01);
+ log->aca_reg.regs[ACA_REG_IDX__IPID] |= (((socket_id % 4) & 0x3) << 44);
+ }
+
+ ras_log_ring_add_data(ras_core, log, batch_tag);
+}
+
+static struct ras_log_info *ras_log_ring_lookup_data(struct ras_core_context *ras_core,
+ uint64_t idx)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ unsigned long flags = 0;
+ void *data;
+
+ spin_lock_irqsave(&log_ring->spin_lock, flags);
+ data = radix_tree_lookup(&log_ring->ras_log_root, idx);
+ spin_unlock_irqrestore(&log_ring->spin_lock, flags);
+
+ return (struct ras_log_info *)data;
+}
+
+int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_id,
+ struct ras_log_info **log_arr, uint32_t arr_num)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+ uint32_t i, idx, count = 0;
+ void *data;
+
+ if ((batch_id >= log_ring->mono_upward_batch_id) ||
+ (batch_id < log_ring->last_del_batch_id))
+ return -EINVAL;
+
+ for (i = 0; i < MAX_RECORD_PER_BATCH; i++) {
+ idx = BATCH_IDX_TO_TREE_IDX(batch_id, i);
+ data = ras_log_ring_lookup_data(ras_core, idx);
+ if (data) {
+ log_arr[count++] = data;
+ if (count >= arr_num)
+ break;
+ }
+ }
+
+ return count;
+}
+
+int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core,
+ struct ras_log_batch_overview *overview)
+{
+ struct ras_log_ring *log_ring = &ras_core->ras_log_ring;
+
+ overview->logged_batch_count =
+ log_ring->mono_upward_batch_id - log_ring->last_del_batch_id;
+ overview->last_batch_id = log_ring->mono_upward_batch_id;
+ overview->first_batch_id = log_ring->last_del_batch_id;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h
new file mode 100644
index 000000000000..0ff6cc35678d
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_log_ring.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_LOG_RING_H__
+#define __RAS_LOG_RING_H__
+#include "ras_aca.h"
+
+#define MAX_RECORD_PER_BATCH 32
+
+#define RAS_LOG_SEQNO_TO_BATCH_IDX(seqno) ((seqno) >> 8)
+
+enum ras_log_event {
+ RAS_LOG_EVENT_NONE,
+ RAS_LOG_EVENT_UE,
+ RAS_LOG_EVENT_DE,
+ RAS_LOG_EVENT_CE,
+ RAS_LOG_EVENT_POISON_CREATION,
+ RAS_LOG_EVENT_POISON_CONSUMPTION,
+ RAS_LOG_EVENT_RMA,
+ RAS_LOG_EVENT_COUNT_MAX,
+};
+
+struct ras_aca_reg {
+ uint64_t regs[ACA_REG_MAX_COUNT];
+};
+
+struct ras_log_info {
+ uint64_t seqno;
+ uint64_t timestamp;
+ enum ras_log_event event;
+ union {
+ struct ras_aca_reg aca_reg;
+ };
+};
+
+struct ras_log_batch_tag {
+ uint64_t batch_id;
+ uint64_t timestamp;
+ uint32_t sub_seqno;
+};
+
+struct ras_log_ring {
+ void *ras_log_mempool;
+ struct radix_tree_root ras_log_root;
+ spinlock_t spin_lock;
+ uint64_t mono_upward_batch_id;
+ uint64_t last_del_batch_id;
+ int logged_ecc_count;
+};
+
+struct ras_log_batch_overview {
+ uint64_t first_batch_id;
+ uint64_t last_batch_id;
+ uint32_t logged_batch_count;
+};
+
+struct ras_core_context;
+
+int ras_log_ring_sw_init(struct ras_core_context *ras_core);
+int ras_log_ring_sw_fini(struct ras_core_context *ras_core);
+
+struct ras_log_batch_tag *ras_log_ring_create_batch_tag(struct ras_core_context *ras_core);
+void ras_log_ring_destroy_batch_tag(struct ras_core_context *ras_core,
+ struct ras_log_batch_tag *tag);
+void ras_log_ring_add_log_event(struct ras_core_context *ras_core,
+ enum ras_log_event event, void *data, struct ras_log_batch_tag *tag);
+
+int ras_log_ring_get_batch_records(struct ras_core_context *ras_core, uint64_t batch_idx,
+ struct ras_log_info **log_arr, uint32_t arr_num);
+
+int ras_log_ring_get_batch_overview(struct ras_core_context *ras_core,
+ struct ras_log_batch_overview *overview);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c
new file mode 100644
index 000000000000..f3321df85021
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_mp1.h"
+#include "ras_mp1_v13_0.h"
+
+static const struct ras_mp1_ip_func *ras_mp1_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ return &mp1_ras_func_v13_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "MP1 ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_mp1_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ return mp1->ip_func->get_valid_bank_count(ras_core, type, count);
+}
+
+int ras_mp1_dump_bank(struct ras_core_context *ras_core,
+ u32 type, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ return mp1->ip_func->dump_valid_bank(ras_core, type, idx, reg_idx, val);
+}
+
+int ras_mp1_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+
+ mp1->mp1_ip_version = ras_core->config->mp1_ip_version;
+ mp1->sys_func = ras_core->config->mp1_cfg.mp1_sys_fn;
+ if (!mp1->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS mp1 sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ mp1->ip_func = ras_mp1_get_ip_funcs(ras_core, mp1->mp1_ip_version);
+
+ return mp1->ip_func ? RAS_CORE_OK : -EINVAL;
+}
+
+int ras_mp1_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h
new file mode 100644
index 000000000000..de1d08286f41
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_MP1_H__
+#define __RAS_MP1_H__
+#include "ras.h"
+
+enum ras_err_type;
+struct ras_mp1_ip_func {
+ int (*get_valid_bank_count)(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count);
+ int (*dump_valid_bank)(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val);
+};
+
+struct ras_mp1 {
+ uint32_t mp1_ip_version;
+ const struct ras_mp1_ip_func *ip_func;
+ const struct ras_mp1_sys_func *sys_func;
+};
+
+int ras_mp1_hw_init(struct ras_core_context *ras_core);
+int ras_mp1_hw_fini(struct ras_core_context *ras_core);
+
+int ras_mp1_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count);
+
+int ras_mp1_dump_bank(struct ras_core_context *ras_core,
+ u32 ecc_type, u32 idx, u32 reg_idx, u64 *val);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c
new file mode 100644
index 000000000000..310d39fc816b
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_mp1.h"
+#include "ras_core_status.h"
+#include "ras_mp1_v13_0.h"
+
+#define RAS_MP1_MSG_QueryValidMcaCount 0x36
+#define RAS_MP1_MSG_McaBankDumpDW 0x37
+#define RAS_MP1_MSG_ClearMcaOnRead 0x39
+#define RAS_MP1_MSG_QueryValidMcaCeCount 0x3A
+#define RAS_MP1_MSG_McaBankCeDumpDW 0x3B
+
+#define MAX_UE_BANKS_PER_QUERY 12
+#define MAX_CE_BANKS_PER_QUERY 12
+
+static int mp1_v13_0_get_bank_count(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 *count)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+ const struct ras_mp1_sys_func *sys_func = mp1->sys_func;
+ uint32_t bank_count = 0;
+ u32 msg;
+ int ret;
+
+ if (!count)
+ return -EINVAL;
+
+ if (!sys_func || !sys_func->mp1_get_valid_bank_count)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ switch (type) {
+ case RAS_ERR_TYPE__UE:
+ msg = RAS_MP1_MSG_QueryValidMcaCount;
+ break;
+ case RAS_ERR_TYPE__CE:
+ case RAS_ERR_TYPE__DE:
+ msg = RAS_MP1_MSG_QueryValidMcaCeCount;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = sys_func->mp1_get_valid_bank_count(ras_core, msg, &bank_count);
+ if (!ret) {
+ if (((type == RAS_ERR_TYPE__UE) && (bank_count >= MAX_UE_BANKS_PER_QUERY)) ||
+ ((type == RAS_ERR_TYPE__CE) && (bank_count >= MAX_CE_BANKS_PER_QUERY)))
+ return -EINVAL;
+
+ *count = bank_count;
+ }
+
+ return ret;
+}
+
+static int mp1_v13_0_dump_bank(struct ras_core_context *ras_core,
+ enum ras_err_type type, u32 idx, u32 reg_idx, u64 *val)
+{
+ struct ras_mp1 *mp1 = &ras_core->ras_mp1;
+ const struct ras_mp1_sys_func *sys_func = mp1->sys_func;
+ u32 msg;
+
+ if (!sys_func || !sys_func->mp1_dump_valid_bank)
+ return -RAS_CORE_NOT_SUPPORTED;
+
+ switch (type) {
+ case RAS_ERR_TYPE__UE:
+ msg = RAS_MP1_MSG_McaBankDumpDW;
+ break;
+ case RAS_ERR_TYPE__CE:
+ case RAS_ERR_TYPE__DE:
+ msg = RAS_MP1_MSG_McaBankCeDumpDW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return sys_func->mp1_dump_valid_bank(ras_core, msg, idx, reg_idx, val);
+}
+
+const struct ras_mp1_ip_func mp1_ras_func_v13_0 = {
+ .get_valid_bank_count = mp1_v13_0_get_bank_count,
+ .dump_valid_bank = mp1_v13_0_dump_bank,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h
new file mode 100644
index 000000000000..2edfdb5f6a75
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_mp1_v13_0.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_MP1_V13_0_H__
+#define __RAS_MP1_V13_0_H__
+#include "ras_mp1.h"
+
+extern const struct ras_mp1_ip_func mp1_ras_func_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c
new file mode 100644
index 000000000000..bfddd104d548
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_nbio.h"
+#include "ras_nbio_v7_9.h"
+
+static const struct ras_nbio_ip_func *ras_nbio_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
+ return &ras_nbio_v7_9;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "NBIO ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_nbio_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ nbio->nbio_ip_version = ras_core->config->nbio_ip_version;
+ nbio->sys_func = ras_core->config->nbio_cfg.nbio_sys_fn;
+ if (!nbio->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS nbio sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ nbio->ip_func = ras_nbio_get_ip_funcs(ras_core, nbio->nbio_ip_version);
+ if (!nbio->ip_func)
+ return -EINVAL;
+
+ if (nbio->sys_func) {
+ if (nbio->sys_func->set_ras_controller_irq_state)
+ nbio->sys_func->set_ras_controller_irq_state(ras_core, true);
+ if (nbio->sys_func->set_ras_err_event_athub_irq_state)
+ nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, true);
+ }
+
+ return 0;
+}
+
+int ras_nbio_hw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ if (nbio->sys_func) {
+ if (nbio->sys_func->set_ras_controller_irq_state)
+ nbio->sys_func->set_ras_controller_irq_state(ras_core, false);
+ if (nbio->sys_func->set_ras_err_event_athub_irq_state)
+ nbio->sys_func->set_ras_err_event_athub_irq_state(ras_core, false);
+ }
+
+ return 0;
+}
+
+bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data)
+{
+ struct ras_nbio *nbio = &ras_core->ras_nbio;
+
+ if (nbio->ip_func) {
+ if (nbio->ip_func->handle_ras_controller_intr_no_bifring)
+ nbio->ip_func->handle_ras_controller_intr_no_bifring(ras_core);
+ if (nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring)
+ nbio->ip_func->handle_ras_err_event_athub_intr_no_bifring(ras_core);
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h
new file mode 100644
index 000000000000..0a1313e59a02
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_NBIO_H__
+#define __RAS_NBIO_H__
+#include "ras.h"
+
+struct ras_core_context;
+
+struct ras_nbio_ip_func {
+ int (*handle_ras_controller_intr_no_bifring)(struct ras_core_context *ras_core);
+ int (*handle_ras_err_event_athub_intr_no_bifring)(struct ras_core_context *ras_core);
+ uint32_t (*get_memory_partition_mode)(struct ras_core_context *ras_core);
+};
+
+struct ras_nbio {
+ uint32_t nbio_ip_version;
+ const struct ras_nbio_ip_func *ip_func;
+ const struct ras_nbio_sys_func *sys_func;
+};
+
+int ras_nbio_hw_init(struct ras_core_context *ras_core);
+int ras_nbio_hw_fini(struct ras_core_context *ras_core);
+bool ras_nbio_handle_irq_error(struct ras_core_context *ras_core, void *data);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c
new file mode 100644
index 000000000000..f17d708ec668
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_nbio_v7_9.h"
+
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR__SHIFT 0x12
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR_MASK 0x00040000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS__SHIFT 0x2
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS_MASK 0x00000004L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR__SHIFT 0x11
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_CLEAR_MASK 0x00020000L
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS__SHIFT 0x1
+#define BIF_BX0_BIF_DOORBELL_INT_CNTL__RAS_CNTLR_INTERRUPT_STATUS_MASK 0x00000002L
+
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL_BASE_IDX 2
+#define regBIF_BX0_BIF_DOORBELL_INT_CNTL 0x00fe
+
+#define regBIF_BX0_BIF_INTR_CNTL 0x0101
+#define regBIF_BX0_BIF_INTR_CNTL_BASE_IDX 2
+
+/* BIF_BX0_BIF_INTR_CNTL */
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0
+#define BIF_BX0_BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L
+
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS 0x0164
+#define regBIF_BX_PF0_PARTITION_MEM_STATUS_BASE_IDX 2
+/* BIF_BX_PF0_PARTITION_MEM_STATUS */
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE__SHIFT 0x0
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE__SHIFT 0x4
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__CHANGE_STATUE_MASK 0x0000000FL
+#define BIF_BX_PF0_PARTITION_MEM_STATUS__NPS_MODE_MASK 0x00000FF0L
+
+
+static int nbio_v7_9_handle_ras_controller_intr_no_bifring(struct ras_core_context *ras_core)
+{
+ uint32_t bif_doorbell_intr_cntl = 0;
+
+ bif_doorbell_intr_cntl =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_CNTLR_INTERRUPT_CLEAR, 1);
+
+ RAS_DEV_WREG32_SOC15(ras_core->dev,
+ NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ /* TODO: handle ras controller interrupt */
+ }
+
+ return 0;
+}
+
+static int nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring(struct ras_core_context *ras_core)
+{
+ uint32_t bif_doorbell_intr_cntl = 0;
+ int ret = 0;
+
+ bif_doorbell_intr_cntl =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL);
+
+ if (REG_GET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
+ /* driver has to clear the interrupt status when bif ring is disabled */
+ bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
+ BIF_BX0_BIF_DOORBELL_INT_CNTL,
+ RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
+
+ RAS_DEV_WREG32_SOC15(ras_core->dev,
+ NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+ ret = ras_core_handle_fatal_error(ras_core);
+ }
+
+ return ret;
+}
+
+static uint32_t nbio_v7_9_get_memory_partition_mode(struct ras_core_context *ras_core)
+{
+ uint32_t mem_status;
+ uint32_t mem_mode;
+
+ mem_status =
+ RAS_DEV_RREG32_SOC15(ras_core->dev, NBIO, 0, regBIF_BX_PF0_PARTITION_MEM_STATUS);
+
+ /* Each bit represents a mode 1-8*/
+ mem_mode = REG_GET_FIELD(mem_status, BIF_BX_PF0_PARTITION_MEM_STATUS, NPS_MODE);
+
+ return ffs(mem_mode);
+}
+
+const struct ras_nbio_ip_func ras_nbio_v7_9 = {
+ .handle_ras_controller_intr_no_bifring =
+ nbio_v7_9_handle_ras_controller_intr_no_bifring,
+ .handle_ras_err_event_athub_intr_no_bifring =
+ nbio_v7_9_handle_ras_err_event_athub_intr_no_bifring,
+ .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h
new file mode 100644
index 000000000000..8711c82a927f
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_nbio_v7_9.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_NBIO_V7_9_H__
+#define __RAS_NBIO_V7_9_H__
+#include "ras_nbio.h"
+
+extern const struct ras_nbio_ip_func ras_nbio_v7_9;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.c b/drivers/gpu/drm/amd/ras/rascore/ras_process.c
new file mode 100644
index 000000000000..3267dcdb169c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_process.h"
+
+#define RAS_EVENT_FIFO_SIZE (128 * sizeof(struct ras_event_req))
+
+#define RAS_POLLING_ECC_TIMEOUT 300
+
+static int ras_process_put_event(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = kfifo_in_spinlocked(&ras_proc->event_fifo,
+ req, sizeof(*req), &ras_proc->fifo_spinlock);
+ if (!ret) {
+ RAS_DEV_ERR(ras_core->dev, "Poison message fifo is full!\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int ras_process_add_reset_gpu_event(struct ras_core_context *ras_core,
+ uint32_t reset_cause)
+{
+ struct ras_event_req req = {0};
+
+ req.reset = reset_cause;
+
+ return ras_process_put_event(ras_core, &req);
+}
+
+static int ras_process_get_event(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ return kfifo_out_spinlocked(&ras_proc->event_fifo,
+ req, sizeof(*req), &ras_proc->fifo_spinlock);
+}
+
+static void ras_process_clear_event_fifo(struct ras_core_context *ras_core)
+{
+ struct ras_event_req req;
+ int ret;
+
+ do {
+ ret = ras_process_get_event(ras_core, &req);
+ } while (ret);
+}
+
+#define AMDGPU_RAS_WAITING_DATA_READY 200
+static int ras_process_umc_event(struct ras_core_context *ras_core,
+ uint32_t event_count)
+{
+ struct ras_ecc_count ecc_data;
+ int ret = 0;
+ uint32_t timeout = 0;
+ uint32_t detected_de_count = 0;
+
+ do {
+ memset(&ecc_data, 0, sizeof(ecc_data));
+ ret = ras_core_update_ecc_info(ras_core);
+ if (ret)
+ return ret;
+
+ ret = ras_core_query_block_ecc_data(ras_core, RAS_BLOCK_ID__UMC, &ecc_data);
+ if (ret)
+ return ret;
+
+ if (ecc_data.new_de_count) {
+ detected_de_count += ecc_data.new_de_count;
+ timeout = 0;
+ } else {
+ if (!timeout && event_count)
+ timeout = AMDGPU_RAS_WAITING_DATA_READY;
+
+ if (timeout) {
+ if (!--timeout)
+ break;
+
+ msleep(1);
+ }
+ }
+ } while (detected_de_count < event_count);
+
+ if (detected_de_count && ras_core_gpu_is_rma(ras_core))
+ ras_process_add_reset_gpu_event(ras_core, GPU_RESET_CAUSE_RMA);
+
+ return 0;
+}
+
+static int ras_process_non_umc_event(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ struct ras_event_req req;
+ uint32_t event_count = kfifo_len(&ras_proc->event_fifo);
+ uint32_t reset_flags = 0;
+ int ret = 0, i;
+
+ for (i = 0; i < event_count; i++) {
+ memset(&req, 0, sizeof(req));
+ ret = ras_process_get_event(ras_core, &req);
+ if (!ret)
+ continue;
+
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__POISON_CONSUMPTION, &req);
+
+ reset_flags |= req.reset;
+
+ if (req.reset == GPU_RESET_CAUSE_RMA)
+ continue;
+
+ if (req.reset)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} GPU reset for %s RAS poison consumption is issued!\n",
+ req.seqno, ras_core_get_ras_block_name(req.block));
+ else
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} %s RAS poison consumption is issued!\n",
+ req.seqno, ras_core_get_ras_block_name(req.block));
+ }
+
+ if (reset_flags) {
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RESET_GPU, &reset_flags);
+ if (!ret && (reset_flags & GPU_RESET_CAUSE_RMA))
+ return -RAS_CORE_GPU_IN_MODE1_RESET;
+ }
+
+ return ret;
+}
+
+int ras_process_handle_ras_event(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ uint32_t umc_event_count;
+ int ret;
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RAS_EVENT_PROC_BEGIN, NULL);
+ if (ret)
+ return ret;
+
+ ras_aca_clear_fatal_flag(ras_core);
+ ras_umc_log_pending_bad_bank(ras_core);
+
+ do {
+ umc_event_count = atomic_read(&ras_proc->umc_interrupt_count);
+ ret = ras_process_umc_event(ras_core, umc_event_count);
+ if (ret == -RAS_CORE_GPU_IN_MODE1_RESET)
+ break;
+
+ if (umc_event_count)
+ atomic_sub(umc_event_count, &ras_proc->umc_interrupt_count);
+ } while (atomic_read(&ras_proc->umc_interrupt_count));
+
+ if ((ret != -RAS_CORE_GPU_IN_MODE1_RESET) &&
+ (kfifo_len(&ras_proc->event_fifo)))
+ ret = ras_process_non_umc_event(ras_core);
+
+ if (ret == -RAS_CORE_GPU_IN_MODE1_RESET) {
+ /* Clear poison fifo */
+ ras_process_clear_event_fifo(ras_core);
+ atomic_set(&ras_proc->umc_interrupt_count, 0);
+ }
+
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RAS_EVENT_PROC_END, NULL);
+ return ret;
+}
+
+static int thread_wait_condition(void *param)
+{
+ struct ras_process *ras_proc = (struct ras_process *)param;
+
+ return (kthread_should_stop() ||
+ atomic_read(&ras_proc->ras_interrupt_req));
+}
+
+static int ras_process_thread(void *context)
+{
+ struct ras_core_context *ras_core = (struct ras_core_context *)context;
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ while (!kthread_should_stop()) {
+ ras_wait_event_interruptible_timeout(&ras_proc->ras_process_wq,
+ thread_wait_condition, ras_proc,
+ msecs_to_jiffies(RAS_POLLING_ECC_TIMEOUT));
+
+ if (kthread_should_stop())
+ break;
+
+ if (!ras_core->is_initialized)
+ continue;
+
+ atomic_set(&ras_proc->ras_interrupt_req, 0);
+
+ if (ras_core_gpu_in_reset(ras_core))
+ continue;
+
+ if (ras_core->sys_fn && ras_core->sys_fn->async_handle_ras_event)
+ ras_core->sys_fn->async_handle_ras_event(ras_core, NULL);
+ else
+ ras_process_handle_ras_event(ras_core);
+ }
+
+ return 0;
+}
+
+int ras_process_init(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = kfifo_alloc(&ras_proc->event_fifo, RAS_EVENT_FIFO_SIZE, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&ras_proc->fifo_spinlock);
+
+ init_waitqueue_head(&ras_proc->ras_process_wq);
+
+ ras_proc->ras_process_thread = kthread_run(ras_process_thread,
+ (void *)ras_core, "ras_process_thread");
+ if (!ras_proc->ras_process_thread) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to create ras_process_thread.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ras_process_fini(ras_core);
+ return ret;
+}
+
+int ras_process_fini(struct ras_core_context *ras_core)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ if (ras_proc->ras_process_thread) {
+ kthread_stop(ras_proc->ras_process_thread);
+ ras_proc->ras_process_thread = NULL;
+ }
+
+ kfifo_free(&ras_proc->event_fifo);
+
+ return 0;
+}
+
+static int ras_process_add_umc_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+
+ atomic_inc(&ras_proc->umc_interrupt_count);
+ atomic_inc(&ras_proc->ras_interrupt_req);
+
+ wake_up(&ras_proc->ras_process_wq);
+ return 0;
+}
+
+static int ras_process_add_non_umc_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req)
+{
+ struct ras_process *ras_proc = &ras_core->ras_proc;
+ int ret;
+
+ ret = ras_process_put_event(ras_core, req);
+ if (!ret) {
+ atomic_inc(&ras_proc->ras_interrupt_req);
+ wake_up(&ras_proc->ras_process_wq);
+ }
+
+ return ret;
+}
+
+int ras_process_add_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req, bool is_umc)
+{
+ int ret;
+
+ if (!ras_core)
+ return -EINVAL;
+
+ if (!ras_core->is_initialized)
+ return -EPERM;
+
+ if (is_umc)
+ ret = ras_process_add_umc_interrupt_req(ras_core, req);
+ else
+ ret = ras_process_add_non_umc_interrupt_req(ras_core, req);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_process.h b/drivers/gpu/drm/amd/ras/rascore/ras_process.h
new file mode 100644
index 000000000000..28458b50510e
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_process.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_PROCESS_H__
+#define __RAS_PROCESS_H__
+
+struct ras_event_req {
+ uint64_t seqno;
+ uint32_t idx_vf;
+ uint32_t block;
+ uint16_t pasid;
+ uint32_t reset;
+ void *pasid_fn;
+ void *data;
+};
+
+struct ras_process {
+ void *dev;
+ void *ras_process_thread;
+ wait_queue_head_t ras_process_wq;
+ atomic_t ras_interrupt_req;
+ atomic_t umc_interrupt_count;
+ struct kfifo event_fifo;
+ spinlock_t fifo_spinlock;
+};
+
+struct ras_core_context;
+int ras_process_init(struct ras_core_context *ras_core);
+int ras_process_fini(struct ras_core_context *ras_core);
+int ras_process_handle_ras_event(struct ras_core_context *ras_core);
+int ras_process_add_interrupt_req(struct ras_core_context *ras_core,
+ struct ras_event_req *req, bool is_umc);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c
new file mode 100644
index 000000000000..ccdb42d2dd60
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_ta_if.h"
+#include "ras_psp.h"
+#include "ras_psp_v13_0.h"
+
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define RAS_TA_INST_MASK 0xfffff000
+#define RAS_TA_INST_SHIFT 0xc
+
+static const struct ras_psp_ip_func *ras_psp_get_ip_funcs(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ return &ras_psp_v13_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "psp ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+static int ras_psp_sync_system_ras_psp_status(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+ struct ras_psp_sys_status status = {0};
+ int ret;
+
+ if (psp->sys_func && psp->sys_func->get_ras_psp_system_status) {
+ ret = psp->sys_func->get_ras_psp_system_status(ras_core, &status);
+ if (ret)
+ return ret;
+
+ if (status.initialized) {
+ ta_ctx->preload_ras_ta_enabled = true;
+ ta_ctx->ras_ta_initialized = status.initialized;
+ ta_ctx->session_id = status.session_id;
+ }
+
+ psp_ctx->external_mutex = status.psp_cmd_mutex;
+ }
+
+ return 0;
+}
+
+static int ras_psp_get_ras_ta_init_param(struct ras_core_context *ras_core,
+ struct ras_ta_init_param *ras_ta_param)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ if (psp->sys_func && psp->sys_func->get_ras_ta_init_param)
+ return psp->sys_func->get_ras_ta_init_param(ras_core, ras_ta_param);
+
+ RAS_DEV_ERR(ras_core->dev, "Not config get_ras_ta_init_param API!!\n");
+ return -EACCES;
+}
+
+static struct gpu_mem_block *ras_psp_get_gpu_mem(struct ras_core_context *ras_core,
+ enum gpu_mem_type mem_type)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ struct gpu_mem_block *gpu_mem = NULL;
+ int ret;
+
+ switch (mem_type) {
+ case GPU_MEM_TYPE_RAS_PSP_RING:
+ gpu_mem = &psp->psp_ring.ras_ring_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_PSP_CMD:
+ gpu_mem = &psp->psp_ctx.psp_cmd_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_PSP_FENCE:
+ gpu_mem = &psp->psp_ctx.out_fence_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_TA_FW:
+ gpu_mem = &psp->ta_ctx.fw_gpu_mem;
+ break;
+ case GPU_MEM_TYPE_RAS_TA_CMD:
+ gpu_mem = &psp->ta_ctx.cmd_gpu_mem;
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!gpu_mem->ref_count) {
+ ret = ras_core_get_gpu_mem(ras_core, mem_type, gpu_mem);
+ if (ret)
+ return NULL;
+ gpu_mem->mem_type = mem_type;
+ }
+
+ gpu_mem->ref_count++;
+
+ return gpu_mem;
+}
+
+static int ras_psp_put_gpu_mem(struct ras_core_context *ras_core,
+ struct gpu_mem_block *gpu_mem)
+{
+ if (!gpu_mem)
+ return 0;
+
+ gpu_mem->ref_count--;
+
+ if (gpu_mem->ref_count > 0) {
+ return 0;
+ } else if (gpu_mem->ref_count < 0) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Duplicate free gpu memory %u\n", gpu_mem->mem_type);
+ } else {
+ ras_core_put_gpu_mem(ras_core, gpu_mem->mem_type, gpu_mem);
+ memset(gpu_mem, 0, sizeof(*gpu_mem));
+ }
+
+ return 0;
+}
+
+static void __acquire_psp_cmd_lock(struct ras_core_context *ras_core)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+
+ if (psp_ctx->external_mutex)
+ mutex_lock(psp_ctx->external_mutex);
+ else
+ mutex_lock(&psp_ctx->internal_mutex);
+}
+
+static void __release_psp_cmd_lock(struct ras_core_context *ras_core)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+
+ if (psp_ctx->external_mutex)
+ mutex_unlock(psp_ctx->external_mutex);
+ else
+ mutex_unlock(&psp_ctx->internal_mutex);
+}
+
+static uint32_t __get_ring_frame_slot(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+ uint32_t ras_ring_wptr_dw;
+
+ ras_ring_wptr_dw = psp->ip_func->psp_ras_ring_wptr_get(ras_core);
+
+ return div64_u64((ras_ring_wptr_dw << 2), sizeof(struct psp_gfx_rb_frame));
+}
+
+static int __set_ring_frame_slot(struct ras_core_context *ras_core,
+ uint32_t slot)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ return psp->ip_func->psp_ras_ring_wptr_set(ras_core,
+ (slot * sizeof(struct psp_gfx_rb_frame)) >> 2);
+}
+
+static int write_frame_to_ras_psp_ring(struct ras_core_context *ras_core,
+ struct psp_gfx_rb_frame *frame)
+{
+ struct gpu_mem_block *ring_mem;
+ struct psp_gfx_rb_frame *rb_frame;
+ uint32_t max_frame_slot;
+ uint32_t slot_idx;
+ uint32_t write_flush_read_back = 0;
+ int ret = 0;
+
+ ring_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_RING);
+ if (!ring_mem)
+ return -ENOMEM;
+
+ max_frame_slot =
+ div64_u64(ring_mem->mem_size, sizeof(struct psp_gfx_rb_frame));
+
+ rb_frame =
+ (struct psp_gfx_rb_frame *)ring_mem->mem_cpu_addr;
+
+ slot_idx = __get_ring_frame_slot(ras_core);
+ if (slot_idx >= max_frame_slot)
+ slot_idx = 0;
+
+ memcpy(&rb_frame[slot_idx], frame, sizeof(*frame));
+
+ /* Do a read to force the write of the frame before writing
+ * write pointer.
+ */
+ write_flush_read_back = rb_frame[slot_idx].fence_value;
+ if (write_flush_read_back != frame->fence_value) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to submit ring cmd! cmd:0x%x:0x%x, fence:0x%x:0x%x value:%u, expected:%u\n",
+ rb_frame[slot_idx].cmd_buf_addr_hi,
+ rb_frame[slot_idx].cmd_buf_addr_lo,
+ rb_frame[slot_idx].fence_addr_hi,
+ rb_frame[slot_idx].fence_addr_lo,
+ write_flush_read_back, frame->fence_value);
+ ret = -EACCES;
+ goto err;
+ }
+
+ slot_idx++;
+
+ if (slot_idx >= max_frame_slot)
+ slot_idx = 0;
+
+ __set_ring_frame_slot(ras_core, slot_idx);
+
+err:
+ ras_psp_put_gpu_mem(ras_core, ring_mem);
+ return ret;
+}
+
+static int send_psp_cmd(struct ras_core_context *ras_core,
+ enum psp_gfx_cmd_id gfx_cmd_id, void *cmd_data,
+ uint32_t cmd_size, struct psp_cmd_resp *resp)
+{
+ struct ras_psp_ctx *psp_ctx = &ras_core->ras_psp.psp_ctx;
+ struct gpu_mem_block *psp_cmd_buf = NULL;
+ struct gpu_mem_block *psp_fence_buf = NULL;
+ struct psp_gfx_cmd_resp *gfx_cmd;
+ struct psp_gfx_rb_frame rb_frame;
+ int ret = 0;
+ int timeout = 1000;
+
+ if (!cmd_data || (cmd_size > sizeof(union psp_gfx_commands)) || !resp) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid RAS PSP command, id: %u\n", gfx_cmd_id);
+ return -EINVAL;
+ }
+
+ __acquire_psp_cmd_lock(ras_core);
+
+ psp_cmd_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_CMD);
+ if (!psp_cmd_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ psp_fence_buf = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_PSP_FENCE);
+ if (!psp_fence_buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ gfx_cmd = (struct psp_gfx_cmd_resp *)psp_cmd_buf->mem_cpu_addr;
+ memset(gfx_cmd, 0, sizeof(*gfx_cmd));
+ gfx_cmd->cmd_id = gfx_cmd_id;
+ memcpy(&gfx_cmd->cmd, cmd_data, cmd_size);
+
+ psp_ctx->in_fence_value++;
+
+ memset(&rb_frame, 0, sizeof(rb_frame));
+ rb_frame.cmd_buf_addr_hi = upper_32_bits(psp_cmd_buf->mem_mc_addr);
+ rb_frame.cmd_buf_addr_lo = lower_32_bits(psp_cmd_buf->mem_mc_addr);
+ rb_frame.fence_addr_hi = upper_32_bits(psp_fence_buf->mem_mc_addr);
+ rb_frame.fence_addr_lo = lower_32_bits(psp_fence_buf->mem_mc_addr);
+ rb_frame.fence_value = psp_ctx->in_fence_value;
+
+ ret = write_frame_to_ras_psp_ring(ras_core, &rb_frame);
+ if (ret) {
+ psp_ctx->in_fence_value--;
+ goto exit;
+ }
+
+ while (*((uint64_t *)psp_fence_buf->mem_cpu_addr) !=
+ psp_ctx->in_fence_value) {
+ if (--timeout == 0)
+ break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ if (ras_core_ras_interrupt_detected(ras_core))
+ break;
+
+ msleep(2);
+ }
+
+ resp->status = gfx_cmd->resp.status;
+ resp->session_id = gfx_cmd->resp.session_id;
+
+exit:
+ ras_psp_put_gpu_mem(ras_core, psp_cmd_buf);
+ ras_psp_put_gpu_mem(ras_core, psp_fence_buf);
+
+ __release_psp_cmd_lock(ras_core);
+
+ return ret;
+}
+
+static void __check_ras_ta_cmd_resp(struct ras_core_context *ras_core,
+ struct ras_ta_cmd *ras_cmd)
+{
+
+ if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+ RAS_DEV_WARN(ras_core->dev, "ECC switch disabled\n");
+ ras_cmd->ras_status = RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE;
+ } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ RAS_DEV_WARN(ras_core->dev, "RAS internal register access blocked\n");
+
+ switch (ras_cmd->ras_status) {
+ case RAS_TA_STATUS__ERROR_UNSUPPORTED_IP:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: cmd failed due to unsupported ip\n");
+ break;
+ case RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
+ break;
+ case RAS_TA_STATUS__SUCCESS:
+ break;
+ case RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED:
+ if (ras_cmd->cmd_id == RAS_TA_CMD_ID__TRIGGER_ERROR)
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: Inject error to critical region is not allowed\n");
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev,
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ break;
+ }
+}
+
+static int send_ras_ta_runtime_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id, void *in, uint32_t in_size,
+ void *out, uint32_t out_size)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct gpu_mem_block *cmd_mem;
+ struct ras_ta_cmd *ras_cmd;
+ struct psp_gfx_cmd_invoke_cmd invoke_cmd = {0};
+ struct psp_cmd_resp resp = {0};
+ int ret = 0;
+
+ if (!in || (in_size > sizeof(union ras_ta_cmd_input)) ||
+ (cmd_id >= MAX_RAS_TA_CMD_ID)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid RAS TA command, id: %u\n", cmd_id);
+ return -EINVAL;
+ }
+
+ ras_psp_sync_system_ras_psp_status(ras_core);
+
+ cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD);
+ if (!cmd_mem)
+ return -ENOMEM;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) {
+ ret = -EACCES;
+ goto out;
+ }
+
+ ras_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr;
+
+ mutex_lock(&ta_ctx->ta_mutex);
+
+ memset(ras_cmd, 0, sizeof(*ras_cmd));
+ ras_cmd->cmd_id = cmd_id;
+ memcpy(&ras_cmd->ras_in_message, in, in_size);
+
+ invoke_cmd.ta_cmd_id = cmd_id;
+ invoke_cmd.session_id = ta_ctx->session_id;
+
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_INVOKE_CMD,
+ &invoke_cmd, sizeof(invoke_cmd), &resp);
+
+ /* If err_event_athub occurs error inject was successful, however
+ * return status from TA is no long reliable
+ */
+ if (ras_core_ras_interrupt_detected(ras_core)) {
+ ret = 0;
+ goto unlock;
+ }
+
+ if (ret || resp.status) {
+ RAS_DEV_ERR(ras_core->dev,
+ "RAS: Failed to send psp cmd! ret:%d, status:%u\n",
+ ret, resp.status);
+ ret = -ESTRPIPE;
+ goto unlock;
+ }
+
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
+ RAS_DEV_WARN(ras_core->dev, "RAS: Unsupported Interface\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!ras_cmd->ras_status && out && out_size)
+ memcpy(out, &ras_cmd->ras_out_message, out_size);
+
+ __check_ras_ta_cmd_resp(ras_core, ras_cmd);
+
+unlock:
+ mutex_unlock(&ta_ctx->ta_mutex);
+ ras_core_up_gpu_reset_lock(ras_core);
+out:
+ ras_psp_put_gpu_mem(ras_core, cmd_mem);
+ return ret;
+}
+
+static int trigger_ras_ta_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask)
+{
+ uint32_t dev_mask = 0;
+
+ switch (info->block_id) {
+ case RAS_TA_BLOCK__GFX:
+ if (ras_gfx_get_ta_subblock(ras_core, info->inject_error_type,
+ info->sub_block_index, &info->sub_block_index))
+ return -EINVAL;
+
+ dev_mask = RAS_GET_MASK(ras_core->dev, GC, instance_mask);
+ break;
+ case RAS_TA_BLOCK__SDMA:
+ dev_mask = RAS_GET_MASK(ras_core->dev, SDMA0, instance_mask);
+ break;
+ case RAS_TA_BLOCK__VCN:
+ case RAS_TA_BLOCK__JPEG:
+ dev_mask = RAS_GET_MASK(ras_core->dev, VCN, instance_mask);
+ break;
+ default:
+ dev_mask = instance_mask;
+ break;
+ }
+
+ /* reuse sub_block_index for backward compatibility */
+ dev_mask <<= RAS_TA_INST_SHIFT;
+ dev_mask &= RAS_TA_INST_MASK;
+ info->sub_block_index |= dev_mask;
+
+ return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__TRIGGER_ERROR,
+ info, sizeof(*info), NULL, 0);
+}
+
+static int send_load_ta_fw_cmd(struct ras_core_context *ras_core,
+ struct ras_ta_ctx *ta_ctx)
+{
+ struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin;
+ struct gpu_mem_block *fw_mem;
+ struct gpu_mem_block *cmd_mem;
+ struct ras_ta_cmd *ta_cmd;
+ struct ras_ta_init_flags *ta_init_flags;
+ struct psp_gfx_cmd_load_ta psp_load_ta_cmd;
+ struct psp_cmd_resp resp = {0};
+ struct ras_ta_image_header *fw_hdr = NULL;
+ int ret;
+
+ fw_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_FW);
+ if (!fw_mem)
+ return -ENOMEM;
+
+ cmd_mem = ras_psp_get_gpu_mem(ras_core, GPU_MEM_TYPE_RAS_TA_CMD);
+ if (!cmd_mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ras_psp_get_ras_ta_init_param(ras_core, &ta_ctx->init_param);
+ if (ret)
+ goto err;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core)) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ /* copy ras ta binary to shared gpu memory */
+ memcpy(fw_mem->mem_cpu_addr, fw_bin->bin_addr, fw_bin->bin_size);
+ fw_mem->mem_size = fw_bin->bin_size;
+
+ /* Initialize ras ta startup parameter */
+ ta_cmd = (struct ras_ta_cmd *)cmd_mem->mem_cpu_addr;
+ ta_init_flags = &ta_cmd->ras_in_message.init_flags;
+
+ ta_init_flags->poison_mode_en = ta_ctx->init_param.poison_mode_en;
+ ta_init_flags->dgpu_mode = ta_ctx->init_param.dgpu_mode;
+ ta_init_flags->xcc_mask = ta_ctx->init_param.xcc_mask;
+ ta_init_flags->channel_dis_num = ta_ctx->init_param.channel_dis_num;
+ ta_init_flags->nps_mode = ta_ctx->init_param.nps_mode;
+ ta_init_flags->active_umc_mask = ta_ctx->init_param.active_umc_mask;
+
+ /* Setup load ras ta command */
+ memset(&psp_load_ta_cmd, 0, sizeof(psp_load_ta_cmd));
+ psp_load_ta_cmd.app_phy_addr_lo = lower_32_bits(fw_mem->mem_mc_addr);
+ psp_load_ta_cmd.app_phy_addr_hi = upper_32_bits(fw_mem->mem_mc_addr);
+ psp_load_ta_cmd.app_len = fw_mem->mem_size;
+ psp_load_ta_cmd.cmd_buf_phy_addr_lo = lower_32_bits(cmd_mem->mem_mc_addr);
+ psp_load_ta_cmd.cmd_buf_phy_addr_hi = upper_32_bits(cmd_mem->mem_mc_addr);
+ psp_load_ta_cmd.cmd_buf_len = cmd_mem->mem_size;
+
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_LOAD_TA,
+ &psp_load_ta_cmd, sizeof(psp_load_ta_cmd), &resp);
+ if (!ret && !resp.status) {
+ /* Read TA version at FW offset 0x60 if TA version not found*/
+ fw_hdr = (struct ras_ta_image_header *)fw_bin->bin_addr;
+ RAS_DEV_INFO(ras_core->dev, "PSP: RAS TA(version:%X.%X.%X.%X) is loaded.\n",
+ (fw_hdr->image_version >> 24) & 0xFF, (fw_hdr->image_version >> 16) & 0xFF,
+ (fw_hdr->image_version >> 8) & 0xFF, fw_hdr->image_version & 0xFF);
+ ta_ctx->ta_version = fw_hdr->image_version;
+ ta_ctx->session_id = resp.session_id;
+ ta_ctx->ras_ta_initialized = true;
+ } else {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to load RAS TA! ret:%d, status:%d\n", ret, resp.status);
+ }
+
+ ras_core_up_gpu_reset_lock(ras_core);
+
+err:
+ ras_psp_put_gpu_mem(ras_core, fw_mem);
+ ras_psp_put_gpu_mem(ras_core, cmd_mem);
+ return ret;
+}
+
+static int load_ras_ta_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_ta_fw_bin *fw_bin = &ta_ctx->fw_bin;
+ int ret;
+
+ fw_bin->bin_addr = ras_ta_load->bin_addr;
+ fw_bin->bin_size = ras_ta_load->bin_size;
+ fw_bin->fw_version = ras_ta_load->fw_version;
+ fw_bin->feature_version = ras_ta_load->feature_version;
+
+ ret = send_load_ta_fw_cmd(ras_core, ta_ctx);
+ if (!ret) {
+ ras_ta_load->out_session_id = ta_ctx->session_id;
+ ras_ta_load->out_loaded_ta_version = ta_ctx->ta_version;
+ }
+
+ return ret;
+}
+
+static int unload_ras_ta_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct psp_gfx_cmd_unload_ta cmd_unload_ta = {0};
+ struct psp_cmd_resp resp = {0};
+ int ret;
+
+ if (!ras_core_down_trylock_gpu_reset_lock(ras_core))
+ return -EACCES;
+
+ cmd_unload_ta.session_id = ta_ctx->session_id;
+ ret = send_psp_cmd(ras_core, GFX_CMD_ID_UNLOAD_TA,
+ &cmd_unload_ta, sizeof(cmd_unload_ta), &resp);
+ if (ret || resp.status) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Failed to unload RAS TA! ret:%d, status:%u\n",
+ ret, resp.status);
+ goto unlock;
+ }
+
+ kfree(ta_ctx->fw_bin.bin_addr);
+ memset(&ta_ctx->fw_bin, 0, sizeof(ta_ctx->fw_bin));
+ ta_ctx->ta_version = 0;
+ ta_ctx->ras_ta_initialized = false;
+ ta_ctx->session_id = 0;
+
+unlock:
+ ras_core_up_gpu_reset_lock(ras_core);
+
+ return ret;
+}
+
+int ras_psp_load_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ struct ras_psp_ta_unload ras_ta_unload = {0};
+ int ret;
+
+ if (ta_ctx->preload_ras_ta_enabled)
+ return 0;
+
+ if (!ras_ta_load)
+ return -EINVAL;
+
+ if (ta_ctx->ras_ta_initialized) {
+ ras_ta_unload.ras_session_id = ta_ctx->session_id;
+ ret = unload_ras_ta_firmware(ras_core, &ras_ta_unload);
+ if (ret)
+ return ret;
+ }
+
+ return load_ras_ta_firmware(ras_core, ras_ta_load);
+}
+
+int ras_psp_unload_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (ta_ctx->preload_ras_ta_enabled)
+ return 0;
+
+ if ((!ras_ta_unload) ||
+ (ras_ta_unload->ras_session_id != ta_ctx->session_id))
+ return -EINVAL;
+
+ return unload_ras_ta_firmware(ras_core, ras_ta_unload);
+}
+
+int ras_psp_trigger_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized) {
+ RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!");
+ return -ENOEXEC;
+ }
+
+ if (!info)
+ return -EINVAL;
+
+ return trigger_ras_ta_error(ras_core, info, instance_mask);
+}
+
+int ras_psp_query_address(struct ras_core_context *ras_core,
+ struct ras_ta_query_address_input *addr_in,
+ struct ras_ta_query_address_output *addr_out)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+
+ if (!ta_ctx->preload_ras_ta_enabled &&
+ !ta_ctx->ras_ta_initialized) {
+ RAS_DEV_ERR(ras_core->dev, "RAS: ras firmware not initialized!");
+ return -ENOEXEC;
+ }
+
+ if (!addr_in || !addr_out)
+ return -EINVAL;
+
+ return send_ras_ta_runtime_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS,
+ addr_in, sizeof(*addr_in), addr_out, sizeof(*addr_out));
+}
+
+int ras_psp_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ memset(psp, 0, sizeof(*psp));
+
+ psp->sys_func = ras_core->config->psp_cfg.psp_sys_fn;
+ if (!psp->sys_func) {
+ RAS_DEV_ERR(ras_core->dev, "RAS psp sys function not configured!\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&psp->psp_ctx.internal_mutex);
+ mutex_init(&psp->ta_ctx.ta_mutex);
+
+ return 0;
+}
+
+int ras_psp_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ mutex_destroy(&psp->psp_ctx.internal_mutex);
+ mutex_destroy(&psp->ta_ctx.ta_mutex);
+
+ memset(psp, 0, sizeof(*psp));
+
+ return 0;
+}
+
+int ras_psp_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_psp *psp = &ras_core->ras_psp;
+
+ psp->psp_ip_version = ras_core->config->psp_ip_version;
+
+ psp->ip_func = ras_psp_get_ip_funcs(ras_core, psp->psp_ip_version);
+ if (!psp->ip_func)
+ return -EINVAL;
+
+ /* After GPU reset, the system RAS PSP status may change.
+ * therefore, it is necessary to synchronize the system status again.
+ */
+ ras_psp_sync_system_ras_psp_status(ras_core);
+
+ return 0;
+}
+
+int ras_psp_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
+
+bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id)
+{
+ struct ras_ta_ctx *ta_ctx = &ras_core->ras_psp.ta_ctx;
+ bool ret = false;
+
+ if (!ta_ctx->preload_ras_ta_enabled && !ta_ctx->ras_ta_initialized)
+ return false;
+
+ switch (cmd_id) {
+ case RAS_TA_CMD_ID__QUERY_ADDRESS:
+ /* Currently, querying the address from RAS TA is only supported
+ * when the RAS TA firmware is loaded during driver installation.
+ */
+ if (ta_ctx->preload_ras_ta_enabled)
+ ret = true;
+ break;
+ case RAS_TA_CMD_ID__TRIGGER_ERROR:
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h
new file mode 100644
index 000000000000..71776fecfd66
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_PSP_H__
+#define __RAS_PSP_H__
+#include "ras.h"
+#include "ras_ta_if.h"
+
+struct ras_core_context;
+struct ras_ta_trigger_error_input;
+struct ras_ta_query_address_input;
+struct ras_ta_query_address_output;
+enum ras_ta_cmd_id;
+
+struct ras_ta_image_header {
+ uint32_t reserved1[24];
+ uint32_t image_version; /* [0x60] Off Chip Firmware Version */
+ uint32_t reserved2[39];
+};
+
+struct ras_psp_sys_status {
+ bool initialized;
+ uint32_t session_id;
+ void *psp_cmd_mutex;
+};
+
+struct ras_ta_init_param {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
+ uint8_t nps_mode;
+ uint32_t active_umc_mask;
+};
+
+struct gpu_mem_block {
+ uint32_t mem_type;
+ void *mem_bo;
+ uint64_t mem_mc_addr;
+ void *mem_cpu_addr;
+ uint32_t mem_size;
+ int ref_count;
+ void *private;
+};
+
+struct ras_psp_ip_func {
+ uint32_t (*psp_ras_ring_wptr_get)(struct ras_core_context *ras_core);
+ int (*psp_ras_ring_wptr_set)(struct ras_core_context *ras_core, uint32_t wptr);
+};
+
+struct ras_psp_ring {
+ struct gpu_mem_block ras_ring_gpu_mem;
+};
+
+struct psp_cmd_resp {
+ uint32_t status;
+ uint32_t session_id;
+};
+
+struct ras_psp_ctx {
+ void *external_mutex;
+ struct mutex internal_mutex;
+ uint64_t in_fence_value;
+ struct gpu_mem_block psp_cmd_gpu_mem;
+ struct gpu_mem_block out_fence_gpu_mem;
+};
+
+struct ras_ta_fw_bin {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t bin_size;
+ uint8_t *bin_addr;
+};
+
+struct ras_ta_ctx {
+ bool preload_ras_ta_enabled;
+ bool ras_ta_initialized;
+ uint32_t session_id;
+ uint32_t resp_status;
+ uint32_t ta_version;
+ struct mutex ta_mutex;
+ struct ras_ta_fw_bin fw_bin;
+ struct ras_ta_init_param init_param;
+ struct gpu_mem_block fw_gpu_mem;
+ struct gpu_mem_block cmd_gpu_mem;
+};
+
+struct ras_psp {
+ uint32_t psp_ip_version;
+ struct ras_psp_ring psp_ring;
+ struct ras_psp_ctx psp_ctx;
+ struct ras_ta_ctx ta_ctx;
+ const struct ras_psp_ip_func *ip_func;
+ const struct ras_psp_sys_func *sys_func;
+};
+
+struct ras_psp_ta_load {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t bin_size;
+ uint8_t *bin_addr;
+ uint64_t out_session_id;
+ uint32_t out_loaded_ta_version;
+};
+
+struct ras_psp_ta_unload {
+ uint64_t ras_session_id;
+};
+
+int ras_psp_sw_init(struct ras_core_context *ras_core);
+int ras_psp_sw_fini(struct ras_core_context *ras_core);
+int ras_psp_hw_init(struct ras_core_context *ras_core);
+int ras_psp_hw_fini(struct ras_core_context *ras_core);
+int ras_psp_load_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_load *ras_ta_load);
+int ras_psp_unload_firmware(struct ras_core_context *ras_core,
+ struct ras_psp_ta_unload *ras_ta_unload);
+int ras_psp_trigger_error(struct ras_core_context *ras_core,
+ struct ras_ta_trigger_error_input *info, uint32_t instance_mask);
+int ras_psp_query_address(struct ras_core_context *ras_core,
+ struct ras_ta_query_address_input *addr_in,
+ struct ras_ta_query_address_output *addr_out);
+bool ras_psp_check_supported_cmd(struct ras_core_context *ras_core,
+ enum ras_ta_cmd_id cmd_id);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c
new file mode 100644
index 000000000000..626cf39b75ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "ras.h"
+#include "ras_psp_v13_0.h"
+
+#define regMP0_SMN_C2PMSG_67 0x0083
+#define regMP0_SMN_C2PMSG_67_BASE_IDX 0
+
+static uint32_t ras_psp_v13_0_ring_wptr_get(struct ras_core_context *ras_core)
+{
+ return RAS_DEV_RREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67);
+}
+
+static int ras_psp_v13_0_ring_wptr_set(struct ras_core_context *ras_core, uint32_t value)
+{
+ RAS_DEV_WREG32_SOC15(ras_core->dev, MP0, 0, regMP0_SMN_C2PMSG_67, value);
+
+ return 0;
+}
+
+const struct ras_psp_ip_func ras_psp_v13_0 = {
+ .psp_ras_ring_wptr_get = ras_psp_v13_0_ring_wptr_get,
+ .psp_ras_ring_wptr_set = ras_psp_v13_0_ring_wptr_set,
+};
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h
new file mode 100644
index 000000000000..b705ffe38a12
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_psp_v13_0.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_PSP_V13_0_H__
+#define __RAS_PSP_V13_0_H__
+#include "ras_psp.h"
+
+extern const struct ras_psp_ip_func ras_psp_v13_0;
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h
new file mode 100644
index 000000000000..0921e36d3274
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_ta_if.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _RAS_TA_IF_H
+#define _RAS_TA_IF_H
+#include "ras.h"
+
+#define RAS_TA_HOST_IF_VER 0
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+/* invalid node instance value */
+#define RAS_TA_INV_NODE 0xffff
+
+/* RAS related enumerations */
+/**********************************************************/
+enum ras_ta_cmd_id {
+ RAS_TA_CMD_ID__ENABLE_FEATURES = 0,
+ RAS_TA_CMD_ID__DISABLE_FEATURES,
+ RAS_TA_CMD_ID__TRIGGER_ERROR,
+ RAS_TA_CMD_ID__QUERY_BLOCK_INFO,
+ RAS_TA_CMD_ID__QUERY_SUB_BLOCK_INFO,
+ RAS_TA_CMD_ID__QUERY_ADDRESS,
+ MAX_RAS_TA_CMD_ID
+};
+
+enum ras_ta_status {
+ RAS_TA_STATUS__SUCCESS = 0x0000,
+ RAS_TA_STATUS__RESET_NEEDED = 0xA001,
+ RAS_TA_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
+ RAS_TA_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
+ RAS_TA_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004,
+ RAS_TA_STATUS__ERROR_INJECTION_FAILED = 0xA005,
+ RAS_TA_STATUS__ERROR_ASD_READ_WRITE = 0xA006,
+ RAS_TA_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007,
+ RAS_TA_STATUS__ERROR_TIMEOUT = 0xA008,
+ RAS_TA_STATUS__ERROR_BLOCK_DISABLED = 0XA009,
+ RAS_TA_STATUS__ERROR_GENERIC = 0xA00A,
+ RAS_TA_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B,
+ RAS_TA_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
+ RAS_TA_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
+ RAS_TA_STATUS__ERROR_TEE_INTERNAL = 0xA00F,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010,
+ RAS_TA_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011,
+ RAS_TA_STATUS__ERROR_RAS_READ_WRITE = 0xA012,
+ RAS_TA_STATUS__ERROR_NULL_PTR = 0xA013,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_IP = 0xA014,
+ RAS_TA_STATUS__ERROR_PCS_STATE_QUIET = 0xA015,
+ RAS_TA_STATUS__ERROR_PCS_STATE_ERROR = 0xA016,
+ RAS_TA_STATUS__ERROR_PCS_STATE_HANG = 0xA017,
+ RAS_TA_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018,
+ RAS_TA_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019,
+ RAS_TA_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A
+};
+
+enum ras_ta_block {
+ RAS_TA_BLOCK__UMC = 0,
+ RAS_TA_BLOCK__SDMA,
+ RAS_TA_BLOCK__GFX,
+ RAS_TA_BLOCK__MMHUB,
+ RAS_TA_BLOCK__ATHUB,
+ RAS_TA_BLOCK__PCIE_BIF,
+ RAS_TA_BLOCK__HDP,
+ RAS_TA_BLOCK__XGMI_WAFL,
+ RAS_TA_BLOCK__DF,
+ RAS_TA_BLOCK__SMN,
+ RAS_TA_BLOCK__SEM,
+ RAS_TA_BLOCK__MP0,
+ RAS_TA_BLOCK__MP1,
+ RAS_TA_BLOCK__FUSE,
+ RAS_TA_BLOCK__MCA,
+ RAS_TA_BLOCK__VCN,
+ RAS_TA_BLOCK__JPEG,
+ RAS_TA_BLOCK__IH,
+ RAS_TA_BLOCK__MPIO,
+ RAS_TA_BLOCK__MMSCH,
+ RAS_TA_NUM_BLOCK_MAX
+};
+
+enum ras_ta_mca_block {
+ RAS_TA_MCA_BLOCK__MP0 = 0,
+ RAS_TA_MCA_BLOCK__MP1 = 1,
+ RAS_TA_MCA_BLOCK__MPIO = 2,
+ RAS_TA_MCA_BLOCK__IOHC = 3,
+ RAS_TA_MCA_NUM_BLOCK_MAX
+};
+
+enum ras_ta_error_type {
+ RAS_TA_ERROR__NONE = 0,
+ RAS_TA_ERROR__PARITY = 1,
+ RAS_TA_ERROR__SINGLE_CORRECTABLE = 2,
+ RAS_TA_ERROR__MULTI_UNCORRECTABLE = 4,
+ RAS_TA_ERROR__POISON = 8,
+};
+
+enum ras_ta_address_type {
+ RAS_TA_MCA_TO_PA,
+ RAS_TA_PA_TO_MCA,
+};
+
+enum ras_ta_nps_mode {
+ RAS_TA_UNKNOWN_MODE = 0,
+ RAS_TA_NPS1_MODE = 1,
+ RAS_TA_NPS2_MODE = 2,
+ RAS_TA_NPS4_MODE = 4,
+ RAS_TA_NPS8_MODE = 8,
+};
+
+/* Input/output structures for RAS commands */
+/**********************************************************/
+
+struct ras_ta_enable_features_input {
+ enum ras_ta_block block_id;
+ enum ras_ta_error_type error_type;
+};
+
+struct ras_ta_disable_features_input {
+ enum ras_ta_block block_id;
+ enum ras_ta_error_type error_type;
+};
+
+struct ras_ta_trigger_error_input {
+ /* ras-block. i.e. umc, gfx */
+ enum ras_ta_block block_id;
+
+ /* type of error. i.e. single_correctable */
+ enum ras_ta_error_type inject_error_type;
+
+ /* mem block. i.e. hbm, sram etc. */
+ uint32_t sub_block_index;
+
+ /* explicit address of error */
+ uint64_t address;
+
+ /* method if error injection. i.e persistent, coherent etc. */
+ uint64_t value;
+};
+
+struct ras_ta_init_flags {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
+ uint16_t xcc_mask;
+ uint8_t channel_dis_num;
+ uint8_t nps_mode;
+ uint32_t active_umc_mask;
+};
+
+struct ras_ta_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+ uint32_t socket_id;
+};
+
+struct ras_ta_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct ras_ta_query_address_input {
+ enum ras_ta_address_type addr_type;
+ struct ras_ta_mca_addr ma;
+ struct ras_ta_phy_addr pa;
+};
+
+struct ras_ta_output_flags {
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
+};
+
+struct ras_ta_query_address_output {
+ /* don't use the flags here */
+ struct ras_ta_output_flags flags;
+ struct ras_ta_mca_addr ma;
+ struct ras_ta_phy_addr pa;
+};
+
+/* Common input structure for RAS callbacks */
+/**********************************************************/
+union ras_ta_cmd_input {
+ struct ras_ta_init_flags init_flags;
+ struct ras_ta_enable_features_input enable_features;
+ struct ras_ta_disable_features_input disable_features;
+ struct ras_ta_trigger_error_input trigger_error;
+ struct ras_ta_query_address_input address;
+ uint32_t reserve_pad[256];
+};
+
+union ras_ta_cmd_output {
+ struct ras_ta_output_flags flags;
+ struct ras_ta_query_address_output address;
+ uint32_t reserve_pad[256];
+};
+
+struct ras_ta_cmd {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ uint32_t ras_status;
+ uint32_t if_version;
+ union ras_ta_cmd_input ras_in_message;
+ union ras_ta_cmd_output ras_out_message;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
new file mode 100644
index 000000000000..4dae64c424a2
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.c
@@ -0,0 +1,707 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_umc.h"
+#include "ras_umc_v12_0.h"
+
+#define MAX_ECC_NUM_PER_RETIREMENT 16
+
+/* bad page timestamp format
+ * yy[31:27] mm[26:23] day[22:17] hh[16:12] mm[11:6] ss[5:0]
+ */
+#define EEPROM_TIMESTAMP_MINUTE 6
+#define EEPROM_TIMESTAMP_HOUR 12
+#define EEPROM_TIMESTAMP_DAY 17
+#define EEPROM_TIMESTAMP_MONTH 23
+#define EEPROM_TIMESTAMP_YEAR 27
+
+static uint64_t ras_umc_get_eeprom_timestamp(struct ras_core_context *ras_core)
+{
+ struct ras_time tm = {0};
+ uint64_t utc_timestamp = 0;
+ uint64_t eeprom_timestamp = 0;
+
+ utc_timestamp = ras_core_get_utc_second_timestamp(ras_core);
+ if (!utc_timestamp)
+ return utc_timestamp;
+
+ ras_core_convert_timestamp_to_time(ras_core, utc_timestamp, &tm);
+
+ /* the year range is 2000 ~ 2031, set the year if not in the range */
+ if (tm.tm_year < 2000)
+ tm.tm_year = 2000;
+ if (tm.tm_year > 2031)
+ tm.tm_year = 2031;
+
+ tm.tm_year -= 2000;
+
+ eeprom_timestamp = tm.tm_sec + (tm.tm_min << EEPROM_TIMESTAMP_MINUTE)
+ + (tm.tm_hour << EEPROM_TIMESTAMP_HOUR)
+ + (tm.tm_mday << EEPROM_TIMESTAMP_DAY)
+ + (tm.tm_mon << EEPROM_TIMESTAMP_MONTH)
+ + (tm.tm_year << EEPROM_TIMESTAMP_YEAR);
+ eeprom_timestamp &= 0xffffffff;
+
+ return eeprom_timestamp;
+}
+
+static const struct ras_umc_ip_func *ras_umc_get_ip_func(
+ struct ras_core_context *ras_core, uint32_t ip_version)
+{
+ switch (ip_version) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
+ return &ras_umc_func_v12_0;
+ default:
+ RAS_DEV_ERR(ras_core->dev,
+ "UMC ip version(0x%x) is not supported!\n", ip_version);
+ break;
+ }
+
+ return NULL;
+}
+
+int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *in, struct umc_phy_addr *out,
+ uint32_t nps)
+{
+ struct ras_ta_query_address_input addr_in;
+ struct ras_ta_query_address_output addr_out;
+ int ret;
+
+ if (!in)
+ return -EINVAL;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ addr_in.ma.err_addr = in->err_addr;
+ addr_in.ma.ch_inst = in->ch_inst;
+ addr_in.ma.umc_inst = in->umc_inst;
+ addr_in.ma.node_inst = in->node_inst;
+ addr_in.ma.socket_id = in->socket_id;
+
+ addr_in.addr_type = RAS_TA_MCA_TO_PA;
+
+ ret = ras_psp_query_address(ras_core, &addr_in, &addr_out);
+ if (ret) {
+ RAS_DEV_WARN(ras_core->dev,
+ "Failed to query RAS physical address for 0x%llx, ret:%d",
+ in->err_addr, ret);
+ return -EREMOTEIO;
+ }
+
+ if (out) {
+ out->pa = addr_out.pa.pa;
+ out->bank = addr_out.pa.bank;
+ out->channel_idx = addr_out.pa.channel_idx;
+ }
+
+ return 0;
+}
+
+static int ras_umc_log_ecc(struct ras_core_context *ras_core,
+ unsigned long idx, void *data)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ int ret;
+
+ mutex_lock(&ras_umc->tree_lock);
+ ret = radix_tree_insert(&ras_umc->root, idx, data);
+ if (!ret)
+ radix_tree_tag_set(&ras_umc->root, idx, UMC_ECC_NEW_DETECTED_TAG);
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return ret;
+}
+
+int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint64_t buf[8] = {0};
+ void **slot;
+ void *data;
+ void *iter = buf;
+
+ mutex_lock(&ras_umc->tree_lock);
+ radix_tree_for_each_slot(slot, &ras_umc->root, iter, 0) {
+ data = ras_radix_tree_delete_iter(&ras_umc->root, iter);
+ kfree(data);
+ }
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return 0;
+}
+
+static void ras_umc_reserve_eeprom_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint64_t page_pfn[16];
+ int count = 0, i;
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages) {
+ count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core,
+ record, record->cur_nps, page_pfn, ARRAY_SIZE(page_pfn));
+ if (count <= 0) {
+ RAS_DEV_ERR(ras_core->dev,
+ "Fail to convert error address! count:%d\n", count);
+ return;
+ }
+ }
+
+ /* Reserve memory */
+ for (i = 0; i < count; i++)
+ ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__RESERVE_BAD_PAGE, &page_pfn[i]);
+}
+
+/* When gpu reset is ongoing, ecc logging operations will be pended.
+ */
+int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_bank_ecc_node *ecc_node;
+
+ ecc_node = kzalloc(sizeof(*ecc_node), GFP_KERNEL);
+ if (!ecc_node)
+ return -ENOMEM;
+
+ memcpy(&ecc_node->ecc, bank, sizeof(ecc_node->ecc));
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_add_tail(&ecc_node->node, &ras_umc->pending_ecc_list);
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+/* After gpu reset is complete, re-log the pending error banks.
+ */
+int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_bank_ecc_node *ecc_node, *tmp;
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_for_each_entry_safe(ecc_node,
+ tmp, &ras_umc->pending_ecc_list, node){
+ if (ecc_node && !ras_umc_log_bad_bank(ras_core, &ecc_node->ecc)) {
+ list_del(&ecc_node->node);
+ kfree(ecc_node);
+ }
+ }
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+int ras_umc_log_bad_bank(struct ras_core_context *ras_core, struct ras_bank_ecc *bank)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_umc_record umc_rec;
+ struct eeprom_umc_record *err_rec;
+ int ret;
+
+ memset(&umc_rec, 0, sizeof(umc_rec));
+
+ mutex_lock(&ras_umc->bank_log_lock);
+ ret = ras_umc->ip_func->bank_to_eeprom_record(ras_core, bank, &umc_rec);
+ if (ret)
+ goto out;
+
+ err_rec = kzalloc(sizeof(*err_rec), GFP_KERNEL);
+ if (!err_rec) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(err_rec, &umc_rec, sizeof(umc_rec));
+ ret = ras_umc_log_ecc(ras_core, err_rec->cur_nps_retired_row_pfn, err_rec);
+ if (ret) {
+ if (ret == -EEXIST) {
+ RAS_DEV_INFO(ras_core->dev, "The bad pages have been logged before.\n");
+ ret = 0;
+ }
+
+ kfree(err_rec);
+ goto out;
+ }
+
+ ras_umc_reserve_eeprom_record(ras_core, err_rec);
+
+ ret = ras_core_event_notify(ras_core,
+ RAS_EVENT_ID__BAD_PAGE_DETECTED, NULL);
+
+out:
+ mutex_unlock(&ras_umc->bank_log_lock);
+ return ret;
+}
+
+static int ras_umc_get_new_records(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *records, u32 num)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_umc_record *entries[MAX_ECC_NUM_PER_RETIREMENT];
+ u32 entry_num = num < MAX_ECC_NUM_PER_RETIREMENT ? num : MAX_ECC_NUM_PER_RETIREMENT;
+ int count = 0;
+ int new_detected, i;
+
+ mutex_lock(&ras_umc->tree_lock);
+ new_detected = radix_tree_gang_lookup_tag(&ras_umc->root, (void **)entries,
+ 0, entry_num, UMC_ECC_NEW_DETECTED_TAG);
+ for (i = 0; i < new_detected; i++) {
+ if (!entries[i])
+ continue;
+
+ memcpy(&records[i], entries[i], sizeof(struct eeprom_umc_record));
+ count++;
+ radix_tree_tag_clear(&ras_umc->root,
+ entries[i]->cur_nps_retired_row_pfn, UMC_ECC_NEW_DETECTED_TAG);
+ }
+ mutex_unlock(&ras_umc->tree_lock);
+
+ return count;
+}
+
+static bool ras_umc_check_retired_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, bool from_eeprom)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data;
+ uint32_t nps = 0;
+ int i, ret;
+
+ if (from_eeprom) {
+ nps = ras_umc->umc_err_data.umc_nps_mode;
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_record) {
+ ret = ras_umc->ip_func->eeprom_record_to_nps_record(ras_core, record, nps);
+ if (ret)
+ RAS_DEV_WARN(ras_core->dev,
+ "Failed to adjust eeprom record, ret:%d", ret);
+ }
+ return false;
+ }
+
+ for (i = 0; i < data->count; i++) {
+ if ((data->bps[i].retired_row_pfn == record->retired_row_pfn) &&
+ (data->bps[i].cur_nps_retired_row_pfn == record->cur_nps_retired_row_pfn))
+ return true;
+ }
+
+ return false;
+}
+
+/* alloc/realloc bps array */
+static int ras_umc_realloc_err_data_space(struct ras_core_context *ras_core,
+ struct eeprom_store_record *data, int pages)
+{
+ unsigned int old_space = data->count + data->space_left;
+ unsigned int new_space = old_space + pages;
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kzalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+
+ if (!bps)
+ return -ENOMEM;
+
+ if (data->bps) {
+ memcpy(bps, data->bps,
+ data->count * sizeof(*data->bps));
+ kfree(data->bps);
+ }
+
+ data->bps = bps;
+ data->space_left += align_space - old_space;
+ return 0;
+}
+
+static int ras_umc_update_eeprom_rom_data(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.rom_data;
+
+ if (!data->space_left &&
+ ras_umc_realloc_err_data_space(ras_core, data, 256)) {
+ return -ENOMEM;
+ }
+
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ return 0;
+}
+
+static int ras_umc_update_eeprom_ram_data(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data;
+ uint64_t page_pfn[16];
+ int count = 0, j;
+
+ if (!data->space_left &&
+ ras_umc_realloc_err_data_space(ras_core, data, 256)) {
+ return -ENOMEM;
+ }
+
+ memset(page_pfn, 0, sizeof(page_pfn));
+ if (ras_umc->ip_func && ras_umc->ip_func->eeprom_record_to_nps_pages)
+ count = ras_umc->ip_func->eeprom_record_to_nps_pages(ras_core,
+ bps, bps->cur_nps, page_pfn, ARRAY_SIZE(page_pfn));
+
+ if (count > 0) {
+ for (j = 0; j < count; j++) {
+ bps->cur_nps_retired_row_pfn = page_pfn[j];
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
+ } else {
+ memcpy(&data->bps[data->count], bps, sizeof(*data->bps));
+ data->count++;
+ data->space_left--;
+ }
+
+ return 0;
+}
+
+/* it deal with vram only. */
+static int ras_umc_add_bad_pages(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *bps,
+ int pages, bool from_eeprom)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_umc_err_data *data = &ras_umc->umc_err_data;
+ int i, ret = 0;
+
+ if (!bps || pages <= 0)
+ return 0;
+
+ mutex_lock(&ras_umc->umc_lock);
+ for (i = 0; i < pages; i++) {
+ if (ras_umc_check_retired_record(ras_core, &bps[i], from_eeprom))
+ continue;
+
+ ret = ras_umc_update_eeprom_rom_data(ras_core, &bps[i]);
+ if (ret)
+ goto out;
+
+ if (data->last_retired_pfn == bps[i].cur_nps_retired_row_pfn)
+ continue;
+
+ data->last_retired_pfn = bps[i].cur_nps_retired_row_pfn;
+
+ if (from_eeprom)
+ ras_umc_reserve_eeprom_record(ras_core, &bps[i]);
+
+ ret = ras_umc_update_eeprom_ram_data(ras_core, &bps[i]);
+ if (ret)
+ goto out;
+ }
+out:
+ mutex_unlock(&ras_umc->umc_lock);
+
+ return ret;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+int ras_umc_load_bad_pages(struct ras_core_context *ras_core)
+{
+ struct eeprom_umc_record *bps;
+ uint32_t ras_num_recs;
+ int ret;
+
+ ras_num_recs = ras_eeprom_get_record_count(ras_core);
+ /* no bad page record, skip eeprom access */
+ if (!ras_num_recs ||
+ ras_core->ras_eeprom.record_threshold_config == DISABLE_RETIRE_PAGE)
+ return 0;
+
+ bps = kcalloc(ras_num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ ret = ras_eeprom_read(ras_core, bps, ras_num_recs);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to load EEPROM table records!");
+ } else {
+ ras_core->ras_umc.umc_err_data.last_retired_pfn = UMC_INV_MEM_PFN;
+ ret = ras_umc_add_bad_pages(ras_core, bps, ras_num_recs, true);
+ }
+
+ kfree(bps);
+ return ret;
+}
+
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
+ */
+static int ras_umc_save_bad_pages(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.rom_data;
+ uint32_t eeprom_record_num;
+ int save_count;
+ int ret = 0;
+
+ if (!data->bps)
+ return 0;
+
+ eeprom_record_num = ras_eeprom_get_record_count(ras_core);
+ mutex_lock(&ras_umc->umc_lock);
+ save_count = data->count - eeprom_record_num;
+ /* only new entries are saved */
+ if (save_count > 0) {
+ if (ras_eeprom_append(ras_core,
+ &data->bps[eeprom_record_num],
+ save_count)) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to save EEPROM table data!");
+ ret = -EIO;
+ goto exit;
+ }
+
+ RAS_DEV_INFO(ras_core->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ }
+
+exit:
+ mutex_unlock(&ras_umc->umc_lock);
+ return ret;
+}
+
+int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data)
+{
+ struct eeprom_umc_record records[MAX_ECC_NUM_PER_RETIREMENT];
+ int count, ret;
+
+ memset(records, 0, sizeof(records));
+ count = ras_umc_get_new_records(ras_core, records, ARRAY_SIZE(records));
+ if (count <= 0)
+ return -ENODATA;
+
+ ret = ras_umc_add_bad_pages(ras_core, records, count, false);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to add ras bad page!\n");
+ return -EINVAL;
+ }
+
+ ret = ras_umc_save_bad_pages(ras_core);
+ if (ret) {
+ RAS_DEV_ERR(ras_core->dev, "Failed to save ras bad page\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int ras_umc_sw_init(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+
+ memset(ras_umc, 0, sizeof(*ras_umc));
+
+ INIT_LIST_HEAD(&ras_umc->pending_ecc_list);
+
+ INIT_RADIX_TREE(&ras_umc->root, GFP_KERNEL);
+
+ mutex_init(&ras_umc->tree_lock);
+ mutex_init(&ras_umc->pending_ecc_lock);
+ mutex_init(&ras_umc->umc_lock);
+ mutex_init(&ras_umc->bank_log_lock);
+
+ return 0;
+}
+
+int ras_umc_sw_fini(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct ras_umc_err_data *umc_err_data = &ras_umc->umc_err_data;
+ struct ras_bank_ecc_node *ecc_node, *tmp;
+
+ mutex_destroy(&ras_umc->umc_lock);
+ mutex_destroy(&ras_umc->bank_log_lock);
+
+ if (umc_err_data->rom_data.bps) {
+ umc_err_data->rom_data.count = 0;
+ kfree(umc_err_data->rom_data.bps);
+ umc_err_data->rom_data.bps = NULL;
+ umc_err_data->rom_data.space_left = 0;
+ }
+
+ if (umc_err_data->ram_data.bps) {
+ umc_err_data->ram_data.count = 0;
+ kfree(umc_err_data->ram_data.bps);
+ umc_err_data->ram_data.bps = NULL;
+ umc_err_data->ram_data.space_left = 0;
+ }
+
+ ras_umc_clear_logged_ecc(ras_core);
+
+ mutex_lock(&ras_umc->pending_ecc_lock);
+ list_for_each_entry_safe(ecc_node,
+ tmp, &ras_umc->pending_ecc_list, node){
+ list_del(&ecc_node->node);
+ kfree(ecc_node);
+ }
+ mutex_unlock(&ras_umc->pending_ecc_lock);
+
+ mutex_destroy(&ras_umc->tree_lock);
+ mutex_destroy(&ras_umc->pending_ecc_lock);
+
+ return 0;
+}
+
+int ras_umc_hw_init(struct ras_core_context *ras_core)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ uint32_t nps;
+
+ nps = ras_core_get_curr_nps_mode(ras_core);
+
+ if (!nps || (nps >= UMC_MEMORY_PARTITION_MODE_UNKNOWN)) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid memory NPS mode: %u!\n", nps);
+ return -ENODATA;
+ }
+
+ ras_umc->umc_err_data.umc_nps_mode = nps;
+
+ ras_umc->umc_vram_type = ras_core->config->umc_cfg.umc_vram_type;
+ if (!ras_umc->umc_vram_type) {
+ RAS_DEV_ERR(ras_core->dev, "Invalid UMC VRAM Type: %u!\n",
+ ras_umc->umc_vram_type);
+ return -ENODATA;
+ }
+
+ ras_umc->umc_ip_version = ras_core->config->umc_ip_version;
+ ras_umc->ip_func = ras_umc_get_ip_func(ras_core, ras_umc->umc_ip_version);
+ if (!ras_umc->ip_func)
+ return -EINVAL;
+
+ return 0;
+}
+
+int ras_umc_hw_fini(struct ras_core_context *ras_core)
+{
+ return 0;
+}
+
+int ras_umc_clean_badpage_data(struct ras_core_context *ras_core)
+{
+ struct ras_umc_err_data *data = &ras_core->ras_umc.umc_err_data;
+
+ mutex_lock(&ras_core->ras_umc.umc_lock);
+
+ kfree(data->rom_data.bps);
+ kfree(data->ram_data.bps);
+
+ memset(data, 0, sizeof(*data));
+ mutex_unlock(&ras_core->ras_umc.umc_lock);
+
+ return 0;
+}
+
+int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core,
+ uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr,
+ enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record)
+{
+ struct eeprom_umc_record *err_rec = record;
+
+ /* Set bad page pfn and nps mode */
+ EEPROM_RECORD_SETUP_UMC_ADDR_AND_NPS(err_rec,
+ RAS_ADDR_TO_PFN(cur_nps_addr->pa), cur_nps);
+
+ err_rec->address = err_addr;
+ err_rec->ts = ras_umc_get_eeprom_timestamp(ras_core);
+ err_rec->err_type = RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = cur_nps_addr->channel_idx;
+ err_rec->mcumc_id = umc_inst;
+ err_rec->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(cur_nps_addr->pa);
+ err_rec->cur_nps_bank = cur_nps_addr->bank;
+ err_rec->cur_nps = cur_nps;
+ return 0;
+}
+
+int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core)
+{
+ struct ras_umc_err_data *err_data = &ras_core->ras_umc.umc_err_data;
+
+ return err_data->rom_data.count;
+}
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data;
+
+ return data->count;
+}
+
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record)
+{
+ struct eeprom_store_record *data = &ras_core->ras_umc.umc_err_data.ram_data;
+
+ if (index >= data->count)
+ return -EINVAL;
+
+ memcpy(record, &data->bps[index], sizeof(struct eeprom_umc_record));
+ return 0;
+}
+
+bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ struct eeprom_store_record *data = &ras_umc->umc_err_data.ram_data;
+ uint64_t page_pfn = RAS_ADDR_TO_PFN(addr);
+ int i, ret = false;
+
+ mutex_lock(&ras_umc->umc_lock);
+ for (i = 0; i < data->count; i++) {
+ if (data->bps[i].cur_nps_retired_row_pfn == page_pfn) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&ras_umc->umc_lock);
+
+ return ret;
+}
+
+int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa)
+{
+ struct ras_umc *ras_umc = &ras_core->ras_umc;
+ int ret = 0;
+
+ if (bank_to_pa)
+ ret = ras_umc->ip_func->bank_to_soc_pa(ras_core, *bank_addr, soc_pa);
+ else
+ ret = ras_umc->ip_func->soc_pa_to_bank(ras_core, *soc_pa, bank_addr);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h
new file mode 100644
index 000000000000..7d9e779d8c4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __RAS_UMC_H__
+#define __RAS_UMC_H__
+#include "ras.h"
+#include "ras_eeprom.h"
+#include "ras_cmd.h"
+
+#define UMC_VRAM_TYPE_UNKNOWN 0
+#define UMC_VRAM_TYPE_GDDR1 1
+#define UMC_VRAM_TYPE_DDR2 2
+#define UMC_VRAM_TYPE_GDDR3 3
+#define UMC_VRAM_TYPE_GDDR4 4
+#define UMC_VRAM_TYPE_GDDR5 5
+#define UMC_VRAM_TYPE_HBM 6
+#define UMC_VRAM_TYPE_DDR3 7
+#define UMC_VRAM_TYPE_DDR4 8
+#define UMC_VRAM_TYPE_GDDR6 9
+#define UMC_VRAM_TYPE_DDR5 10
+#define UMC_VRAM_TYPE_LPDDR4 11
+#define UMC_VRAM_TYPE_LPDDR5 12
+#define UMC_VRAM_TYPE_HBM3E 13
+
+#define UMC_ECC_NEW_DETECTED_TAG 0x1
+#define UMC_INV_MEM_PFN (0xFFFFFFFFFFFFFFFF)
+
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define UMC_PA_FLIP_BITS_NUM 4
+
+enum umc_memory_partition_mode {
+ UMC_MEMORY_PARTITION_MODE_NONE = 0,
+ UMC_MEMORY_PARTITION_MODE_NPS1 = 1,
+ UMC_MEMORY_PARTITION_MODE_NPS2 = 2,
+ UMC_MEMORY_PARTITION_MODE_NPS3 = 3,
+ UMC_MEMORY_PARTITION_MODE_NPS4 = 4,
+ UMC_MEMORY_PARTITION_MODE_NPS6 = 6,
+ UMC_MEMORY_PARTITION_MODE_NPS8 = 8,
+ UMC_MEMORY_PARTITION_MODE_UNKNOWN
+};
+
+struct ras_core_context;
+struct ras_bank_ecc;
+
+struct umc_flip_bits {
+ uint32_t flip_bits_in_pa[UMC_PA_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
+struct umc_mca_addr {
+ uint64_t err_addr;
+ uint32_t ch_inst;
+ uint32_t umc_inst;
+ uint32_t node_inst;
+ uint32_t socket_id;
+};
+
+struct umc_phy_addr {
+ uint64_t pa;
+ uint32_t bank;
+ uint32_t channel_idx;
+};
+
+struct umc_bank_addr {
+ uint32_t stack_id; /* SID */
+ uint32_t bank_group;
+ uint32_t bank;
+ uint32_t row;
+ uint32_t column;
+ uint32_t channel;
+ uint32_t subchannel; /* Also called Pseudochannel (PC) */
+};
+
+struct ras_umc_ip_func {
+ int (*bank_to_eeprom_record)(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct eeprom_umc_record *record);
+ int (*eeprom_record_to_nps_record)(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps);
+ int (*eeprom_record_to_nps_pages)(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num);
+ int (*bank_to_soc_pa)(struct ras_core_context *ras_core,
+ struct umc_bank_addr bank_addr, uint64_t *soc_pa);
+ int (*soc_pa_to_bank)(struct ras_core_context *ras_core,
+ uint64_t soc_pa, struct umc_bank_addr *bank_addr);
+};
+
+struct eeprom_store_record {
+ /* point to data records array */
+ struct eeprom_umc_record *bps;
+ /* the count of entries */
+ int count;
+ /* the space can place new entries */
+ int space_left;
+};
+
+struct ras_umc_err_data {
+ struct eeprom_store_record rom_data;
+ struct eeprom_store_record ram_data;
+ enum umc_memory_partition_mode umc_nps_mode;
+ uint64_t last_retired_pfn;
+};
+
+struct ras_umc {
+ u32 umc_ip_version;
+ u32 umc_vram_type;
+ const struct ras_umc_ip_func *ip_func;
+ struct radix_tree_root root;
+ struct mutex tree_lock;
+ struct mutex umc_lock;
+ struct mutex bank_log_lock;
+ struct mutex pending_ecc_lock;
+ struct ras_umc_err_data umc_err_data;
+ struct list_head pending_ecc_list;
+};
+
+int ras_umc_sw_init(struct ras_core_context *ras);
+int ras_umc_sw_fini(struct ras_core_context *ras);
+int ras_umc_hw_init(struct ras_core_context *ras);
+int ras_umc_hw_fini(struct ras_core_context *ras);
+int ras_umc_psp_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *in, struct umc_phy_addr *out,
+ uint32_t nps);
+int ras_umc_handle_bad_pages(struct ras_core_context *ras_core, void *data);
+int ras_umc_log_bad_bank(struct ras_core_context *ras, struct ras_bank_ecc *bank);
+int ras_umc_log_bad_bank_pending(struct ras_core_context *ras_core, struct ras_bank_ecc *bank);
+int ras_umc_log_pending_bad_bank(struct ras_core_context *ras_core);
+int ras_umc_clear_logged_ecc(struct ras_core_context *ras_core);
+int ras_umc_load_bad_pages(struct ras_core_context *ras_core);
+int ras_umc_get_saved_eeprom_count(struct ras_core_context *ras_core);
+int ras_umc_clean_badpage_data(struct ras_core_context *ras_core);
+int ras_umc_fill_eeprom_record(struct ras_core_context *ras_core,
+ uint64_t err_addr, uint32_t umc_inst, struct umc_phy_addr *cur_nps_addr,
+ enum umc_memory_partition_mode cur_nps, struct eeprom_umc_record *record);
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core);
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record);
+bool ras_umc_check_retired_addr(struct ras_core_context *ras_core, uint64_t addr);
+int ras_umc_translate_soc_pa_and_bank(struct ras_core_context *ras_core,
+ uint64_t *soc_pa, struct umc_bank_addr *bank_addr, bool bank_to_pa);
+#endif
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c
new file mode 100644
index 000000000000..5d9a11c17a86
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "ras.h"
+#include "ras_umc.h"
+#include "ras_core_status.h"
+#include "ras_umc_v12_0.h"
+
+#define NumDieInterleaved 4
+
+static const uint32_t umc_v12_0_channel_idx_tbl[]
+ [UMC_V12_0_UMC_INSTANCE_NUM][UMC_V12_0_CHANNEL_INSTANCE_NUM] = {
+ {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12},
+ {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}},
+ {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32},
+ {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}},
+ {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64},
+ {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}},
+ {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108},
+ {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}}
+};
+
+/* mapping of MCA error address to normalized address */
+static const uint32_t umc_v12_0_ma2na_mapping[] = {
+ 0, 5, 6, 8, 9, 14, 12, 13,
+ 10, 11, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28,
+ 24, 7, 29, 30,
+};
+
+static bool umc_v12_0_bit_wise_xor(uint32_t val)
+{
+ bool result = 0;
+ int i;
+
+ for (i = 0; i < 32; i++)
+ result = result ^ ((val >> i) & 0x1);
+
+ return result;
+}
+
+static void __get_nps_pa_flip_bits(struct ras_core_context *ras_core,
+ enum umc_memory_partition_mode nps,
+ struct umc_flip_bits *flip_bits)
+{
+ uint32_t vram_type = ras_core->ras_umc.umc_vram_type;
+
+ /* default setting */
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C3_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_C4_BIT;
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R13_BIT;
+ flip_bits->flip_row_bit = 13;
+ flip_bits->bit_num = 4;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R13_BIT;
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_C2_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B1_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R12_BIT;
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) {
+ flip_bits->flip_bits_in_pa[0] = UMC_V12_0_PA_CH4_BIT;
+ flip_bits->flip_bits_in_pa[1] = UMC_V12_0_PA_CH5_BIT;
+ flip_bits->flip_bits_in_pa[2] = UMC_V12_0_PA_B0_BIT;
+ flip_bits->r13_in_pa = UMC_V12_0_PA_R11_BIT;
+ }
+
+ switch (vram_type) {
+ case UMC_VRAM_TYPE_HBM:
+ /* other nps modes are taken as nps1 */
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+
+ break;
+ case UMC_VRAM_TYPE_HBM3E:
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R12_BIT;
+ flip_bits->flip_row_bit = 12;
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R11_BIT;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ flip_bits->flip_bits_in_pa[3] = UMC_V12_0_PA_R10_BIT;
+
+ break;
+ default:
+ RAS_DEV_WARN(ras_core->dev,
+ "Unknown HBM type, set RAS retire flip bits to the value in NPS1 mode.\n");
+ break;
+ }
+}
+
+static uint64_t convert_nps_pa_to_row_pa(struct ras_core_context *ras_core,
+ uint64_t pa, enum umc_memory_partition_mode nps, bool zero_pfn_ok)
+{
+ struct umc_flip_bits flip_bits = {0};
+ uint64_t row_pa;
+ int i;
+
+ __get_nps_pa_flip_bits(ras_core, nps, &flip_bits);
+
+ row_pa = pa;
+ /* clear loop bits in soc physical address */
+ for (i = 0; i < flip_bits.bit_num; i++)
+ row_pa &= ~BIT_ULL(flip_bits.flip_bits_in_pa[i]);
+
+ if (!zero_pfn_ok && !RAS_ADDR_TO_PFN(row_pa))
+ row_pa |= BIT_ULL(flip_bits.flip_bits_in_pa[2]);
+
+ return row_pa;
+}
+
+static int lookup_bad_pages_in_a_row(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num,
+ uint64_t seq_no, bool dump)
+{
+ uint32_t col, col_lower, row, row_lower, idx, row_high;
+ uint64_t soc_pa, row_pa, column, err_addr;
+ uint64_t retired_addr = RAS_PFN_TO_ADDR(record->cur_nps_retired_row_pfn);
+ struct umc_flip_bits flip_bits = {0};
+ uint32_t retire_unit;
+ uint32_t i;
+
+ __get_nps_pa_flip_bits(ras_core, nps, &flip_bits);
+
+ row_pa = convert_nps_pa_to_row_pa(ras_core, retired_addr, nps, true);
+
+ err_addr = record->address;
+ /* get column bit 0 and 1 in mca address */
+ col_lower = (err_addr >> 1) & 0x3ULL;
+ /* MA_R13_BIT will be handled later */
+ row_lower = (err_addr >> UMC_V12_0_MCA_R0_BIT) & 0x1fffULL;
+ row_lower &= ~BIT_ULL(flip_bits.flip_row_bit);
+
+ if (ras_core->ras_gfx.gfx_ip_version >= IP_VERSION(9, 5, 0)) {
+ row_high = (row_pa >> flip_bits.r13_in_pa) & 0x3ULL;
+ /* it's 2.25GB in each channel, from MCA address to PA
+ * [R14 R13] is converted if the two bits value are 0x3,
+ * get them from PA instead of MCA address.
+ */
+ row_lower |= (row_high << 13);
+ }
+
+ idx = 0;
+ row = 0;
+ retire_unit = 0x1 << flip_bits.bit_num;
+ /* loop for all possibilities of retire bits */
+ for (column = 0; column < retire_unit; column++) {
+ soc_pa = row_pa;
+ for (i = 0; i < flip_bits.bit_num; i++)
+ soc_pa |= (((column >> i) & 0x1ULL) << flip_bits.flip_bits_in_pa[i]);
+
+ col = ((column & 0x7) << 2) | col_lower;
+
+ /* add row bit 13 */
+ if (flip_bits.bit_num == UMC_PA_FLIP_BITS_NUM)
+ row = ((column >> 3) << flip_bits.flip_row_bit) | row_lower;
+
+ if (dump)
+ RAS_DEV_INFO(ras_core->dev,
+ "{%llu} Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n",
+ seq_no, soc_pa, row, col,
+ record->cur_nps_bank, record->mem_channel);
+
+
+ if (pfns && (idx < num))
+ pfns[idx++] = RAS_ADDR_TO_PFN(soc_pa);
+ }
+
+ return idx;
+}
+
+static int umc_v12_convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out,
+ uint32_t nps)
+{
+ uint32_t i, na_shift;
+ uint64_t soc_pa, na, na_nps;
+ uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row;
+ uint32_t bank0, bank1, bank2, bank3, bank;
+ uint32_t ch_inst = addr_in->ch_inst;
+ uint32_t umc_inst = addr_in->umc_inst;
+ uint32_t node_inst = addr_in->node_inst;
+ uint32_t socket_id = addr_in->socket_id;
+ uint32_t channel_index;
+ uint64_t err_addr = addr_in->err_addr;
+
+ if (node_inst != UMC_INV_AID_NODE) {
+ if (ch_inst >= UMC_V12_0_CHANNEL_INSTANCE_NUM ||
+ umc_inst >= UMC_V12_0_UMC_INSTANCE_NUM ||
+ node_inst >= UMC_V12_0_AID_NUM_MAX ||
+ socket_id >= UMC_V12_0_SOCKET_NUM_MAX)
+ return -EINVAL;
+ } else {
+ if (socket_id >= UMC_V12_0_SOCKET_NUM_MAX ||
+ ch_inst >= UMC_V12_0_TOTAL_CHANNEL_NUM)
+ return -EINVAL;
+ }
+
+ bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL;
+ bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL;
+ bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL;
+ bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL;
+ col = (err_addr >> 1) & 0x1fULL;
+ row = (err_addr >> 10) & 0x3fffULL;
+
+ /* apply bank hash algorithm */
+ bank0 =
+ bank_hash0 ^ (UMC_V12_0_XOR_EN0 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0))));
+ bank1 =
+ bank_hash1 ^ (UMC_V12_0_XOR_EN1 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1))));
+ bank2 =
+ bank_hash2 ^ (UMC_V12_0_XOR_EN2 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2))));
+ bank3 =
+ bank_hash3 ^ (UMC_V12_0_XOR_EN3 &
+ (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^
+ (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3))));
+
+ bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3);
+ err_addr &= ~0x3c0ULL;
+ err_addr |= (bank << UMC_V12_0_MCA_B0_BIT);
+
+ na_nps = 0x0;
+ /* convert mca error address to normalized address */
+ for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++)
+ na_nps |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i];
+
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS1)
+ na_shift = 8;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS2)
+ na_shift = 9;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4)
+ na_shift = 10;
+ else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8)
+ na_shift = 11;
+ else
+ return -EINVAL;
+
+ na = ((na_nps >> na_shift) << 8) | (na_nps & 0xff);
+
+ if (node_inst != UMC_INV_AID_NODE)
+ channel_index =
+ umc_v12_0_channel_idx_tbl[node_inst][umc_inst][ch_inst];
+ else {
+ channel_index = ch_inst;
+ node_inst = channel_index /
+ (UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM);
+ }
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ soc_pa = ADDR_OF_32KB_BLOCK(na) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(na);
+
+ /* calc channel hash based on absolute address */
+ soc_pa += socket_id * SOCKET_LFB_SIZE;
+ /* the umc channel bits are not original values, they are hashed */
+ UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa);
+ /* restore pa */
+ soc_pa -= socket_id * SOCKET_LFB_SIZE;
+
+ /* get some channel bits from na_nps directly and
+ * add nps section offset
+ */
+ if (nps == UMC_MEMORY_PARTITION_MODE_NPS2) {
+ soc_pa &= ~(0x1ULL << UMC_V12_0_PA_CH5_BIT);
+ soc_pa |= ((na_nps & 0x100) << 5);
+ soc_pa += (node_inst >> 1) * (SOCKET_LFB_SIZE >> 1);
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS4) {
+ soc_pa &= ~(0x3ULL << UMC_V12_0_PA_CH4_BIT);
+ soc_pa |= ((na_nps & 0x300) << 4);
+ soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2);
+ } else if (nps == UMC_MEMORY_PARTITION_MODE_NPS8) {
+ soc_pa &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT);
+ soc_pa |= ((na_nps & 0x700) << 4);
+ soc_pa += node_inst * (SOCKET_LFB_SIZE >> 2) +
+ (channel_index >> 4) * (SOCKET_LFB_SIZE >> 3);
+ }
+
+ addr_out->pa = soc_pa;
+ addr_out->bank = bank;
+ addr_out->channel_idx = channel_index;
+
+ return 0;
+}
+
+static int convert_ma_to_pa(struct ras_core_context *ras_core,
+ struct umc_mca_addr *addr_in, struct umc_phy_addr *addr_out,
+ uint32_t nps)
+{
+ int ret;
+
+ if (ras_psp_check_supported_cmd(ras_core, RAS_TA_CMD_ID__QUERY_ADDRESS))
+ ret = ras_umc_psp_convert_ma_to_pa(ras_core,
+ addr_in, addr_out, nps);
+ else
+ ret = umc_v12_convert_ma_to_pa(ras_core,
+ addr_in, addr_out, nps);
+
+ return ret;
+}
+
+static int convert_bank_to_nps_addr(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct umc_phy_addr *pa_addr, uint32_t nps)
+{
+ struct umc_mca_addr addr_in;
+ struct umc_phy_addr addr_out;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ addr_in.err_addr = ACA_ADDR_2_ERR_ADDR(bank->addr);
+ addr_in.ch_inst = ACA_IPID_2_UMC_CH(bank->ipid);
+ addr_in.umc_inst = ACA_IPID_2_UMC_INST(bank->ipid);
+ addr_in.node_inst = ACA_IPID_2_DIE_ID(bank->ipid);
+ addr_in.socket_id = ACA_IPID_2_SOCKET_ID(bank->ipid);
+
+ ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps);
+ if (!ret) {
+ pa_addr->pa =
+ convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false);
+ pa_addr->channel_idx = addr_out.channel_idx;
+ pa_addr->bank = addr_out.bank;
+ }
+
+ return ret;
+}
+
+static int umc_v12_0_bank_to_eeprom_record(struct ras_core_context *ras_core,
+ struct ras_bank_ecc *bank, struct eeprom_umc_record *record)
+{
+ struct umc_phy_addr nps_addr;
+ int ret;
+
+ memset(&nps_addr, 0, sizeof(nps_addr));
+
+ ret = convert_bank_to_nps_addr(ras_core, bank,
+ &nps_addr, bank->nps);
+ if (ret)
+ return ret;
+
+ ras_umc_fill_eeprom_record(ras_core,
+ ACA_ADDR_2_ERR_ADDR(bank->addr), ACA_IPID_2_UMC_INST(bank->ipid),
+ &nps_addr, bank->nps, record);
+
+ lookup_bad_pages_in_a_row(ras_core, record,
+ bank->nps, NULL, 0, bank->seq_no, true);
+
+ return 0;
+}
+
+static int convert_eeprom_record_to_nps_addr(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint64_t *pa, uint32_t nps)
+{
+ struct device_system_info dev_info = {0};
+ struct umc_mca_addr addr_in;
+ struct umc_phy_addr addr_out;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ memset(&addr_out, 0, sizeof(addr_out));
+
+ ras_core_get_device_system_info(ras_core, &dev_info);
+
+ addr_in.err_addr = record->address;
+ addr_in.ch_inst = record->mem_channel;
+ addr_in.umc_inst = record->mcumc_id;
+ addr_in.node_inst = UMC_INV_AID_NODE;
+ addr_in.socket_id = dev_info.socket_id;
+
+ ret = convert_ma_to_pa(ras_core, &addr_in, &addr_out, nps);
+ if (ret)
+ return ret;
+
+ *pa = convert_nps_pa_to_row_pa(ras_core, addr_out.pa, nps, false);
+
+ return 0;
+}
+
+static int umc_v12_0_eeprom_record_to_nps_record(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps)
+{
+ uint64_t pa = 0;
+ int ret = 0;
+
+ if (nps == EEPROM_RECORD_UMC_NPS_MODE(record)) {
+ record->cur_nps_retired_row_pfn = EEPROM_RECORD_UMC_ADDR_PFN(record);
+ } else {
+ ret = convert_eeprom_record_to_nps_addr(ras_core,
+ record, &pa, nps);
+ if (!ret)
+ record->cur_nps_retired_row_pfn = RAS_ADDR_TO_PFN(pa);
+ }
+
+ record->cur_nps = nps;
+
+ return ret;
+}
+
+static int umc_v12_0_eeprom_record_to_nps_pages(struct ras_core_context *ras_core,
+ struct eeprom_umc_record *record, uint32_t nps,
+ uint64_t *pfns, uint32_t num)
+{
+ return lookup_bad_pages_in_a_row(ras_core,
+ record, nps, pfns, num, 0, false);
+}
+
+static int umc_12_0_soc_pa_to_bank(struct ras_core_context *ras_core,
+ uint64_t soc_pa,
+ struct umc_bank_addr *bank_addr)
+{
+
+ int channel_hashed = 0;
+ int channel_real = 0;
+ int channel_reversed = 0;
+ int i = 0;
+
+ bank_addr->stack_id = UMC_V12_0_SOC_PA_TO_SID(soc_pa);
+ bank_addr->bank_group = 0; /* This is a combination of SID & Bank. Needed?? */
+ bank_addr->bank = UMC_V12_0_SOC_PA_TO_BANK(soc_pa);
+ bank_addr->row = UMC_V12_0_SOC_PA_TO_ROW(soc_pa);
+ bank_addr->column = UMC_V12_0_SOC_PA_TO_COL(soc_pa);
+
+ /* Channel bits 4-6 are hashed. Bruteforce reverse the hash */
+ channel_hashed = (soc_pa >> UMC_V12_0_PA_CH4_BIT) & 0x7;
+
+ for (i = 0; i < 8; i++) {
+ channel_reversed = 0;
+ channel_reversed |= UMC_V12_0_CHANNEL_HASH_CH4((i << 4), soc_pa);
+ channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH5((i << 4), soc_pa) << 1);
+ channel_reversed |= (UMC_V12_0_CHANNEL_HASH_CH6((i << 4), soc_pa) << 2);
+ if (channel_reversed == channel_hashed)
+ channel_real = ((i << 4)) | ((soc_pa >> UMC_V12_0_PA_CH0_BIT) & 0xf);
+ }
+
+ bank_addr->channel = channel_real;
+ bank_addr->subchannel = UMC_V12_0_SOC_PA_TO_PC(soc_pa);
+
+ return 0;
+}
+
+static int umc_12_0_bank_to_soc_pa(struct ras_core_context *ras_core,
+ struct umc_bank_addr bank_addr,
+ uint64_t *soc_pa)
+{
+ uint64_t na = 0;
+ uint64_t tmp_pa = 0;
+ *soc_pa = 0;
+
+ tmp_pa |= UMC_V12_0_SOC_SID_TO_PA(bank_addr.stack_id);
+ tmp_pa |= UMC_V12_0_SOC_BANK_TO_PA(bank_addr.bank);
+ tmp_pa |= UMC_V12_0_SOC_ROW_TO_PA(bank_addr.row);
+ tmp_pa |= UMC_V12_0_SOC_COL_TO_PA(bank_addr.column);
+ tmp_pa |= UMC_V12_0_SOC_CH_TO_PA(bank_addr.channel);
+ tmp_pa |= UMC_V12_0_SOC_PC_TO_PA(bank_addr.subchannel);
+
+ /* Get the NA */
+ na = ((tmp_pa >> UMC_V12_0_PA_C2_BIT) << UMC_V12_0_NA_C2_BIT);
+ na |= tmp_pa & 0xff;
+
+ /* translate umc channel address to soc pa, 3 parts are included */
+ tmp_pa = ADDR_OF_32KB_BLOCK(na) |
+ ADDR_OF_256B_BLOCK(bank_addr.channel) |
+ OFFSET_IN_256B_BLOCK(na);
+
+ /* the umc channel bits are not original values, they are hashed */
+ UMC_V12_0_SET_CHANNEL_HASH(bank_addr.channel, tmp_pa);
+
+ *soc_pa = tmp_pa;
+
+ return 0;
+}
+
+const struct ras_umc_ip_func ras_umc_func_v12_0 = {
+ .bank_to_eeprom_record = umc_v12_0_bank_to_eeprom_record,
+ .eeprom_record_to_nps_record = umc_v12_0_eeprom_record_to_nps_record,
+ .eeprom_record_to_nps_pages = umc_v12_0_eeprom_record_to_nps_pages,
+ .bank_to_soc_pa = umc_12_0_bank_to_soc_pa,
+ .soc_pa_to_bank = umc_12_0_soc_pa_to_bank,
+};
+
diff --git a/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h
new file mode 100644
index 000000000000..8a35ad856165
--- /dev/null
+++ b/drivers/gpu/drm/amd/ras/rascore/ras_umc_v12_0.h
@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __RAS_UMC_V12_0_H__
+#define __RAS_UMC_V12_0_H__
+#include "ras.h"
+
+/* MCA_UMC_UMC0_MCUMC_ADDRT0 */
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr__SHIFT 0x0
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved__SHIFT 0x38
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__ErrorAddr_MASK 0x00FFFFFFFFFFFFFFL
+#define MCA_UMC_UMC0_MCUMC_ADDRT0__Reserved_MASK 0xFF00000000000000L
+
+/* MCMP1_IPIDT0 */
+#define MCMP1_IPIDT0__InstanceIdLo__SHIFT 0x0
+#define MCMP1_IPIDT0__HardwareID__SHIFT 0x20
+#define MCMP1_IPIDT0__InstanceIdHi__SHIFT 0x2c
+#define MCMP1_IPIDT0__McaType__SHIFT 0x30
+
+#define MCMP1_IPIDT0__InstanceIdLo_MASK 0x00000000FFFFFFFFL
+#define MCMP1_IPIDT0__HardwareID_MASK 0x00000FFF00000000L
+#define MCMP1_IPIDT0__InstanceIdHi_MASK 0x0000F00000000000L
+#define MCMP1_IPIDT0__McaType_MASK 0xFFFF000000000000L
+
+/* number of umc channel instance with memory map register access */
+#define UMC_V12_0_CHANNEL_INSTANCE_NUM 8
+/* number of umc instance with memory map register access */
+#define UMC_V12_0_UMC_INSTANCE_NUM 4
+
+/* one piece of normalized address is mapped to 8 pieces of physical address */
+#define UMC_V12_0_NA_MAP_PA_NUM 8
+
+/* bank bits in MCA error address */
+#define UMC_V12_0_MCA_B0_BIT 6
+#define UMC_V12_0_MCA_B1_BIT 7
+#define UMC_V12_0_MCA_B2_BIT 8
+#define UMC_V12_0_MCA_B3_BIT 9
+
+/* row bits in MCA address */
+#define UMC_V12_0_MCA_R0_BIT 10
+
+/* Stack ID bits in SOC physical address */
+#define UMC_V12_0_PA_SID1_BIT 37
+#define UMC_V12_0_PA_SID0_BIT 36
+
+/* bank bits in SOC physical address */
+#define UMC_V12_0_PA_B3_BIT 18
+#define UMC_V12_0_PA_B2_BIT 17
+#define UMC_V12_0_PA_B1_BIT 20
+#define UMC_V12_0_PA_B0_BIT 19
+
+/* row bits in SOC physical address */
+#define UMC_V12_0_PA_R13_BIT 35
+#define UMC_V12_0_PA_R12_BIT 34
+#define UMC_V12_0_PA_R11_BIT 33
+#define UMC_V12_0_PA_R10_BIT 32
+#define UMC_V12_0_PA_R9_BIT 31
+#define UMC_V12_0_PA_R8_BIT 30
+#define UMC_V12_0_PA_R7_BIT 29
+#define UMC_V12_0_PA_R6_BIT 28
+#define UMC_V12_0_PA_R5_BIT 27
+#define UMC_V12_0_PA_R4_BIT 26
+#define UMC_V12_0_PA_R3_BIT 25
+#define UMC_V12_0_PA_R2_BIT 24
+#define UMC_V12_0_PA_R1_BIT 23
+#define UMC_V12_0_PA_R0_BIT 22
+
+/* column bits in SOC physical address */
+#define UMC_V12_0_PA_C4_BIT 21
+#define UMC_V12_0_PA_C3_BIT 16
+#define UMC_V12_0_PA_C2_BIT 15
+#define UMC_V12_0_PA_C1_BIT 6
+#define UMC_V12_0_PA_C0_BIT 5
+
+/* channel index bits in SOC physical address */
+#define UMC_V12_0_PA_CH6_BIT 14
+#define UMC_V12_0_PA_CH5_BIT 13
+#define UMC_V12_0_PA_CH4_BIT 12
+#define UMC_V12_0_PA_CH3_BIT 11
+#define UMC_V12_0_PA_CH2_BIT 10
+#define UMC_V12_0_PA_CH1_BIT 9
+#define UMC_V12_0_PA_CH0_BIT 8
+
+/* Pseudochannel index bits in SOC physical address */
+#define UMC_V12_0_PA_PC0_BIT 7
+
+#define UMC_V12_0_NA_C2_BIT 8
+
+#define UMC_V12_0_SOC_PA_TO_SID(pa) \
+ ((((pa >> UMC_V12_0_PA_SID0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_SID1_BIT) & 0x1ULL) << 1ULL))
+
+#define UMC_V12_0_SOC_PA_TO_BANK(pa) \
+ ((((pa >> UMC_V12_0_PA_B0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_B1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_B2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_B3_BIT) & 0x1ULL) << 3ULL))
+
+#define UMC_V12_0_SOC_PA_TO_ROW(pa) \
+ ((((pa >> UMC_V12_0_PA_R0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_R1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_R2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_R3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_R4_BIT) & 0x1ULL) << 4ULL) | \
+ (((pa >> UMC_V12_0_PA_R5_BIT) & 0x1ULL) << 5ULL) | \
+ (((pa >> UMC_V12_0_PA_R6_BIT) & 0x1ULL) << 6ULL) | \
+ (((pa >> UMC_V12_0_PA_R7_BIT) & 0x1ULL) << 7ULL) | \
+ (((pa >> UMC_V12_0_PA_R8_BIT) & 0x1ULL) << 8ULL) | \
+ (((pa >> UMC_V12_0_PA_R9_BIT) & 0x1ULL) << 9ULL) | \
+ (((pa >> UMC_V12_0_PA_R10_BIT) & 0x1ULL) << 10ULL) | \
+ (((pa >> UMC_V12_0_PA_R11_BIT) & 0x1ULL) << 11ULL) | \
+ (((pa >> UMC_V12_0_PA_R12_BIT) & 0x1ULL) << 12ULL) | \
+ (((pa >> UMC_V12_0_PA_R13_BIT) & 0x1ULL) << 13ULL))
+
+#define UMC_V12_0_SOC_PA_TO_COL(pa) \
+ ((((pa >> UMC_V12_0_PA_C0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_C1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_C2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_C3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_C4_BIT) & 0x1ULL) << 4ULL))
+
+#define UMC_V12_0_SOC_PA_TO_CH(pa) \
+ ((((pa >> UMC_V12_0_PA_CH0_BIT) & 0x1ULL) << 0ULL) | \
+ (((pa >> UMC_V12_0_PA_CH1_BIT) & 0x1ULL) << 1ULL) | \
+ (((pa >> UMC_V12_0_PA_CH2_BIT) & 0x1ULL) << 2ULL) | \
+ (((pa >> UMC_V12_0_PA_CH3_BIT) & 0x1ULL) << 3ULL) | \
+ (((pa >> UMC_V12_0_PA_CH4_BIT) & 0x1ULL) << 4ULL) | \
+ (((pa >> UMC_V12_0_PA_CH5_BIT) & 0x1ULL) << 5ULL) | \
+ (((pa >> UMC_V12_0_PA_CH6_BIT) & 0x1ULL) << 6ULL))
+
+#define UMC_V12_0_SOC_PA_TO_PC(pa) (((pa >> UMC_V12_0_PA_PC0_BIT) & 0x1ULL) << 0ULL)
+
+#define UMC_V12_0_SOC_SID_TO_PA(sid) \
+ ((((sid >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_SID0_BIT) | \
+ (((sid >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_SID1_BIT))
+
+#define UMC_V12_0_SOC_BANK_TO_PA(bank) \
+ ((((bank >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_B0_BIT) | \
+ (((bank >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_B1_BIT) | \
+ (((bank >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_B2_BIT) | \
+ (((bank >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_B3_BIT))
+
+#define UMC_V12_0_SOC_ROW_TO_PA(row) \
+ ((((row >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_R0_BIT) | \
+ (((row >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_R1_BIT) | \
+ (((row >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_R2_BIT) | \
+ (((row >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_R3_BIT) | \
+ (((row >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_R4_BIT) | \
+ (((row >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_R5_BIT) | \
+ (((row >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_R6_BIT) | \
+ (((row >> 7ULL) & 0x1ULL) << UMC_V12_0_PA_R7_BIT) | \
+ (((row >> 8ULL) & 0x1ULL) << UMC_V12_0_PA_R8_BIT) | \
+ (((row >> 9ULL) & 0x1ULL) << UMC_V12_0_PA_R9_BIT) | \
+ (((row >> 10ULL) & 0x1ULL) << UMC_V12_0_PA_R10_BIT) | \
+ (((row >> 11ULL) & 0x1ULL) << UMC_V12_0_PA_R11_BIT) | \
+ (((row >> 12ULL) & 0x1ULL) << UMC_V12_0_PA_R12_BIT) | \
+ (((row >> 13ULL) & 0x1ULL) << UMC_V12_0_PA_R13_BIT))
+
+#define UMC_V12_0_SOC_COL_TO_PA(col) \
+ ((((col >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_C0_BIT) | \
+ (((col >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_C1_BIT) | \
+ (((col >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_C2_BIT) | \
+ (((col >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_C3_BIT) | \
+ (((col >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_C4_BIT))
+
+#define UMC_V12_0_SOC_CH_TO_PA(ch) \
+ ((((ch >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_CH0_BIT) | \
+ (((ch >> 1ULL) & 0x1ULL) << UMC_V12_0_PA_CH1_BIT) | \
+ (((ch >> 2ULL) & 0x1ULL) << UMC_V12_0_PA_CH2_BIT) | \
+ (((ch >> 3ULL) & 0x1ULL) << UMC_V12_0_PA_CH3_BIT) | \
+ (((ch >> 4ULL) & 0x1ULL) << UMC_V12_0_PA_CH4_BIT) | \
+ (((ch >> 5ULL) & 0x1ULL) << UMC_V12_0_PA_CH5_BIT) | \
+ (((ch >> 6ULL) & 0x1ULL) << UMC_V12_0_PA_CH6_BIT))
+
+#define UMC_V12_0_SOC_PC_TO_PA(pc) (((pc >> 0ULL) & 0x1ULL) << UMC_V12_0_PA_PC0_BIT)
+
+/* bank hash settings */
+#define UMC_V12_0_XOR_EN0 1
+#define UMC_V12_0_XOR_EN1 1
+#define UMC_V12_0_XOR_EN2 1
+#define UMC_V12_0_XOR_EN3 1
+#define UMC_V12_0_COL_XOR0 0x0
+#define UMC_V12_0_COL_XOR1 0x0
+#define UMC_V12_0_COL_XOR2 0x800
+#define UMC_V12_0_COL_XOR3 0x1000
+#define UMC_V12_0_ROW_XOR0 0x11111
+#define UMC_V12_0_ROW_XOR1 0x22222
+#define UMC_V12_0_ROW_XOR2 0x4444
+#define UMC_V12_0_ROW_XOR3 0x8888
+
+/* channel hash settings */
+#define UMC_V12_0_HASH_4K 0
+#define UMC_V12_0_HASH_64K 1
+#define UMC_V12_0_HASH_2M 1
+#define UMC_V12_0_HASH_1G 1
+#define UMC_V12_0_HASH_1T 1
+
+/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA),
+ * hash bit is only effective when related setting is enabled
+ */
+#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \
+ (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \
+ (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \
+ (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \
+ (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \
+ (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \
+ (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \
+ (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_1T))
+#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \
+ (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \
+ (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
+ } while (0)
+
+
+/*
+ * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
+ * is the index of 4KB block
+ */
+#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4)
+/*
+ * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+/*
+ * (addr / 256) * 32768, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7)
+/* channel index is the index of 256B block */
+#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
+/* offset in 256B block */
+#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+
+
+#define UMC_V12_ADDR_MASK_BAD_COLS(addr) \
+ ((addr) & ~((0x3ULL << UMC_V12_0_PA_C2_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_C4_BIT) | \
+ (0x1ULL << UMC_V12_0_PA_R13_BIT)))
+
+#define ACA_IPID_HI_2_UMC_AID(_ipid_hi) (((_ipid_hi) >> 2) & 0x3)
+#define ACA_IPID_LO_2_UMC_CH(_ipid_lo) \
+ (((((_ipid_lo) >> 20) & 0x1) * 4) + (((_ipid_lo) >> 12) & 0xF))
+#define ACA_IPID_LO_2_UMC_INST(_ipid_lo) (((_ipid_lo) >> 21) & 0x7)
+
+#define ACA_IPID_2_DIE_ID(ipid) ((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) >> 2) & 0x03)
+#define ACA_IPID_2_UMC_CH(ipid) \
+ (ACA_IPID_LO_2_UMC_CH(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define ACA_IPID_2_UMC_INST(ipid) \
+ (ACA_IPID_LO_2_UMC_INST(REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo)))
+
+#define ACA_IPID_2_SOCKET_ID(ipid) \
+ (((REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdLo) & 0x1) << 2) | \
+ (REG_GET_FIELD(ipid, MCMP1_IPIDT0, InstanceIdHi) & 0x03))
+
+#define ACA_ADDR_2_ERR_ADDR(addr) \
+ REG_GET_FIELD(addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr)
+
+/* R13 bit shift should be considered, double the number */
+#define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2)
+
+
+/* C2, C3, C4, R13, four MCA bits are looped in page retirement */
+#define UMC_V12_0_RETIRE_LOOP_BITS 4
+
+/* invalid node instance value */
+#define UMC_INV_AID_NODE 0xffff
+
+#define UMC_V12_0_AID_NUM_MAX 4
+#define UMC_V12_0_SOCKET_NUM_MAX 8
+
+#define UMC_V12_0_TOTAL_CHANNEL_NUM \
+ (UMC_V12_0_AID_NUM_MAX * UMC_V12_0_UMC_INSTANCE_NUM * UMC_V12_0_CHANNEL_INSTANCE_NUM)
+
+/* one device has 192GB HBM */
+#define SOCKET_LFB_SIZE 0x3000000000ULL
+
+extern const struct ras_umc_ip_func ras_umc_func_v12_0;
+
+int ras_umc_get_badpage_count(struct ras_core_context *ras_core);
+int ras_umc_get_badpage_record(struct ras_core_context *ras_core, uint32_t index, void *record);
+#endif
+
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 2ad33559a33a..5a66948ffd24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -111,6 +111,7 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc,
static int
komeda_crtc_prepare(struct komeda_crtc *kcrtc)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
@@ -128,8 +129,8 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
err = mdev->funcs->change_opmode(mdev, new_mode);
if (err) {
- DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
- mdev->dpmode, new_mode);
+ drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
goto unlock;
}
@@ -142,18 +143,18 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
if (new_mode != KOMEDA_MODE_DUAL_DISP) {
err = clk_set_rate(mdev->aclk, komeda_crtc_get_aclk(kcrtc_st));
if (err)
- DRM_ERROR("failed to set aclk.\n");
+ drm_err(drm, "failed to set aclk.\n");
err = clk_prepare_enable(mdev->aclk);
if (err)
- DRM_ERROR("failed to enable aclk.\n");
+ drm_err(drm, "failed to enable aclk.\n");
}
err = clk_set_rate(master->pxlclk, mode->crtc_clock * 1000);
if (err)
- DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
+ drm_err(drm, "failed to set pxlclk for pipe%d\n", master->id);
err = clk_prepare_enable(master->pxlclk);
if (err)
- DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id);
+ drm_err(drm, "failed to enable pxl clk for pipe%d.\n", master->id);
unlock:
mutex_unlock(&mdev->lock);
@@ -164,6 +165,7 @@ unlock:
static int
komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
u32 new_mode;
@@ -180,8 +182,8 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
err = mdev->funcs->change_opmode(mdev, new_mode);
if (err) {
- DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
- mdev->dpmode, new_mode);
+ drm_err(drm, "failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
goto unlock;
}
@@ -200,6 +202,7 @@ unlock:
void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
struct komeda_events *evts)
{
+ struct drm_device *drm = kcrtc->base.dev;
struct drm_crtc *crtc = &kcrtc->base;
u32 events = evts->pipes[kcrtc->master->id];
@@ -212,7 +215,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
if (wb_conn)
drm_writeback_signal_completion(&wb_conn->base, 0);
else
- DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n",
+ drm_warn(drm, "CRTC[%d]: EOW happen but no wb_connector.\n",
drm_crtc_index(&kcrtc->base));
}
/* will handle it together with the write back support */
@@ -236,7 +239,7 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
crtc->state->event = NULL;
drm_crtc_send_vblank_event(crtc, event);
} else {
- DRM_WARN("CRTC[%d]: FLIP happened but no pending commit.\n",
+ drm_warn(drm, "CRTC[%d]: FLIP happened but no pending commit.\n",
drm_crtc_index(&kcrtc->base));
}
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
@@ -309,7 +312,7 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
/* wait the flip take affect.*/
if (wait_for_completion_timeout(flip_done, HZ) == 0) {
- DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id);
+ drm_err(drm, "wait pipe%d flip done timeout\n", kcrtc->master->id);
if (!input_flip_done) {
unsigned long flags;
@@ -562,6 +565,7 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = {
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
+ struct drm_device *drm = &kms->base;
struct komeda_crtc *crtc;
struct komeda_pipeline *master;
char str[16];
@@ -581,7 +585,7 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
else
sprintf(str, "None");
- DRM_INFO("CRTC-%d: master(pipe-%d) slave(%s).\n",
+ drm_info(drm, "CRTC-%d: master(pipe-%d) slave(%s).\n",
kms->n_crtcs, master->id, str);
kms->n_crtcs++;
@@ -613,6 +617,7 @@ static int komeda_attach_bridge(struct device *dev,
struct komeda_pipeline *pipe,
struct drm_encoder *encoder)
{
+ struct drm_device *drm = encoder->dev;
struct drm_bridge *bridge;
int err;
@@ -624,7 +629,7 @@ static int komeda_attach_bridge(struct device *dev,
err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err)
- dev_err(dev, "bridge_attach() failed for pipe: %s\n",
+ drm_err(drm, "bridge_attach() failed for pipe: %s\n",
of_node_full_name(pipe->of_node));
return err;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 901f938aefe0..3ca461eb0a24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -9,6 +9,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include "komeda_framebuffer.h"
#include "komeda_dev.h"
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 806da0aaedf7..4b4a08cb396d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -22,6 +22,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index c3179d74f3f5..81d45f2dd6a7 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -33,6 +33,7 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index bc5f5e9798c3..b765f6c9eea4 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -29,6 +29,7 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 600af5ad81b1..47733c85d271 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -14,6 +14,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_writeback.h>
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 87f2e5ee8790..f1a5014bcfa1 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -263,7 +263,7 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ drm_atomic_get_new_crtc_state(state->state, state->crtc);
struct malidp_crtc_state *mc;
u32 src_w, src_h;
int ret;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 0900e4466ffb..033b19b31f63 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/armada/armada_debugfs.c b/drivers/gpu/drm/armada/armada_debugfs.c
index a763349dd89f..2445365c823f 100644
--- a/drivers/gpu/drm/armada/armada_debugfs.c
+++ b/drivers/gpu/drm/armada/armada_debugfs.c
@@ -12,6 +12,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index aa4289127086..77098928f821 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -6,6 +6,7 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include "armada_drm.h"
#include "armada_fb.h"
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index cb53cc91bafb..8bbae94804f8 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -13,6 +13,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "armada_crtc.h"
#include "armada_drm.h"
@@ -28,8 +29,6 @@ static void armada_fbdev_fb_destroy(struct fb_info *info)
fbh->fb->funcs->destroy(fbh->fb);
drm_client_release(&fbh->client);
- drm_fb_helper_unprepare(fbh);
- kfree(fbh);
}
static const struct fb_ops armada_fb_ops = {
@@ -45,10 +44,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
+ struct fb_info *info = fbh->info;
struct drm_mode_fb_cmd2 mode;
struct armada_framebuffer *dfb;
struct armada_gem_object *obj;
- struct fb_info *info;
int size, ret;
void *ptr;
@@ -92,12 +91,6 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
if (IS_ERR(dfb))
return PTR_ERR(dfb);
- info = drm_fb_helper_alloc_info(fbh);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_fballoc;
- }
-
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
@@ -113,8 +106,4 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh,
(unsigned long long)obj->phys_addr);
return 0;
-
- err_fballoc:
- dfb->fb.funcs->destroy(&dfb->fb);
- return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 1a1680d71486..35fcfa0d85ff 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -10,6 +10,7 @@
#include <drm/armada_drm.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "armada_drm.h"
#include "armada_gem.h"
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 3b9bd8ecda13..21fd3b4ba10f 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "armada_crtc.h"
#include "armada_drm.h"
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index cc47c032dbc1..a0326b4f568e 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "armada_crtc.h"
#include "armada_drm.h"
@@ -94,12 +95,7 @@ int armada_drm_plane_atomic_check(struct drm_plane *plane,
return 0;
}
- if (state)
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- else
- crtc_state = crtc->state;
-
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
0,
INT_MAX, true, false);
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 2547613155da..cdbcba3b43ad 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -6,7 +6,9 @@
ast-y := \
ast_2000.o \
ast_2100.o \
+ ast_2200.o \
ast_2300.o \
+ ast_2400.o \
ast_2500.o \
ast_2600.o \
ast_cursor.o \
@@ -14,7 +16,6 @@ ast-y := \
ast_dp501.o \
ast_dp.o \
ast_drv.o \
- ast_main.o \
ast_mm.o \
ast_mode.o \
ast_post.o \
diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c
index 41c2aa1e425a..fa3bc23ce098 100644
--- a/drivers/gpu/drm/ast/ast_2000.c
+++ b/drivers/gpu/drm/ast/ast_2000.c
@@ -27,6 +27,9 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
#include "ast_drv.h"
#include "ast_post.h"
@@ -147,3 +150,108 @@ int ast_2000_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Mode setting
+ */
+
+const struct ast_vbios_dclk_info ast_2000_dclk_table[] = {
+ {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xee, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xc6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7b, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */
+ {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */
+ {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0d: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */
+ {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+ {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
+ {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
+ {0x77, 0x58, 0x80}, /* 17: VCLK119 */
+ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
+ {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
+ {0x3b, 0x2c, 0x81}, /* 1a: VCLK118_25 */
+};
+
+/*
+ * Device initialization
+ */
+
+void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post)
+{
+ enum ast_tx_chip tx_chip = AST_TX_NONE;
+ u8 vgacra3;
+
+ /*
+ * VGACRA3 Enhanced Color Mode Register, check if DVO is already
+ * enabled, in that case, assume we have a SIL164 TMDS transmitter
+ *
+ * Don't make that assumption if we the chip wasn't enabled and
+ * is at power-on reset, otherwise we'll incorrectly "detect" a
+ * SIL164 when there is none.
+ */
+ if (!need_post) {
+ vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
+ if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
+ tx_chip = AST_TX_SIL164;
+ }
+
+ __ast_device_set_tx_chip(ast, tx_chip);
+}
+
+static const struct ast_device_quirks ast_2000_device_quirks = {
+ .crtc_mem_req_threshold_low = 31,
+ .crtc_mem_req_threshold_high = 47,
+};
+
+struct drm_device *ast_2000_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2000_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c
index 477ee15eff5d..05aeb0624d41 100644
--- a/drivers/gpu/drm/ast/ast_2100.c
+++ b/drivers/gpu/drm/ast/ast_2100.c
@@ -27,11 +27,47 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
#include "ast_drv.h"
#include "ast_post.h"
/*
+ * DRAM type
+ */
+
+static enum ast_dram_layout ast_2100_get_dram_layout_p2a(struct ast_device *ast)
+{
+ u32 mcr_cfg;
+ enum ast_dram_layout dram_layout;
+
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ mcr_cfg = ast_read32(ast, 0x10004);
+
+ switch (mcr_cfg & 0x0c) {
+ case 0:
+ case 4:
+ default:
+ dram_layout = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (mcr_cfg & 0x40)
+ dram_layout = AST_DRAM_1Gx16;
+ else
+ dram_layout = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ dram_layout = AST_DRAM_1Gx32;
+ break;
+ }
+
+ return dram_layout;
+}
+
+/*
* POST
*/
@@ -266,6 +302,7 @@ static void ast_post_chip_2100(struct ast_device *ast)
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
+ enum ast_dram_layout dram_layout = ast_2100_get_dram_layout_p2a(ast);
j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff);
@@ -292,11 +329,17 @@ static void ast_post_chip_2100(struct ast_device *ast)
for (i = 0; i < 15; i++)
udelay(dram_reg_info->data);
} else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) {
- data = dram_reg_info->data;
- if (ast->dram_type == AST_DRAM_1Gx16)
+ switch (dram_layout) {
+ case AST_DRAM_1Gx16:
data = 0x00000d89;
- else if (ast->dram_type == AST_DRAM_1Gx32)
+ break;
+ case AST_DRAM_1Gx32:
data = 0x00000c8d;
+ break;
+ default:
+ data = dram_reg_info->data;
+ break;
+ }
temp = ast_read32(ast, 0x12070);
temp &= 0xc;
@@ -346,3 +389,92 @@ int ast_2100_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Widescreen detection
+ */
+
+/* Try to detect WSXGA+ on Gen2+ */
+bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
+{
+ u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
+
+ if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
+ return true;
+ if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
+ return true;
+
+ return false;
+}
+
+/* Try to detect WUXGA on Gen2+ */
+bool __ast_2100_detect_wuxga(struct ast_device *ast)
+{
+ u8 vgacrd1;
+
+ if (ast->support_fullhd) {
+ vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
+ if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
+ return true;
+ }
+
+ return false;
+}
+
+static void ast_2100_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast)) {
+ ast->support_wsxga_p = true;
+ if (ast->chip == AST2100)
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2100_device_quirks = {
+ .crtc_mem_req_threshold_low = 47,
+ .crtc_mem_req_threshold_high = 63,
+};
+
+struct drm_device *ast_2100_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2100_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2100_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2200.c b/drivers/gpu/drm/ast/ast_2200.c
new file mode 100644
index 000000000000..b64345d11ffa
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2200.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
+#include "ast_drv.h"
+
+static void ast_2200_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast)) {
+ ast->support_wsxga_p = true;
+ if (ast->chip == AST2200)
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2200_device_quirks = {
+ .crtc_mem_req_threshold_low = 47,
+ .crtc_mem_req_threshold_high = 63,
+};
+
+struct drm_device *ast_2200_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2200_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2000_detect_tx_chip(ast, need_post);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2200_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
+
diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c
index dc2a32244689..5f50d9f91ffd 100644
--- a/drivers/gpu/drm/ast/ast_2300.c
+++ b/drivers/gpu/drm/ast/ast_2300.c
@@ -27,6 +27,12 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/sizes.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "ast_drv.h"
#include "ast_post.h"
@@ -1326,3 +1332,132 @@ int ast_2300_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Device initialization
+ */
+
+void ast_2300_detect_tx_chip(struct ast_device *ast)
+{
+ enum ast_tx_chip tx_chip = AST_TX_NONE;
+ struct drm_device *dev = &ast->base;
+ u8 vgacrd1;
+
+ /*
+ * On AST GEN4+, look at the configuration set by the SoC in
+ * the SOC scratch register #1 bits 11:8 (interestingly marked
+ * as "reserved" in the spec)
+ */
+ vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
+ AST_IO_VGACRD1_TX_TYPE_MASK);
+ switch (vgacrd1) {
+ /*
+ * GEN4 to GEN6
+ */
+ case AST_IO_VGACRD1_TX_SIL164_VBIOS:
+ tx_chip = AST_TX_SIL164;
+ break;
+ case AST_IO_VGACRD1_TX_DP501_VBIOS:
+ ast->dp501_fw_addr = drmm_kzalloc(dev, SZ_32K, GFP_KERNEL);
+ if (ast->dp501_fw_addr) {
+ /* backup firmware */
+ if (ast_backup_fw(ast, ast->dp501_fw_addr, SZ_32K)) {
+ drmm_kfree(dev, ast->dp501_fw_addr);
+ ast->dp501_fw_addr = NULL;
+ }
+ }
+ fallthrough;
+ case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
+ tx_chip = AST_TX_DP501;
+ break;
+ /*
+ * GEN7+
+ */
+ case AST_IO_VGACRD1_TX_ASTDP:
+ tx_chip = AST_TX_ASTDP;
+ break;
+ /*
+ * Several of the listed TX chips are not explicitly supported
+ * by the ast driver. If these exist in real-world devices, they
+ * are most likely reported as VGA or SIL164 outputs. We warn here
+ * to get bug reports for these devices. If none come in for some
+ * time, we can begin to fail device probing on these values.
+ */
+ case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
+ drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_CH7003_VBIOS:
+ drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
+ drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n", vgacrd1, AST_GEN(ast));
+ break;
+ }
+
+ __ast_device_set_tx_chip(ast, tx_chip);
+}
+
+static void ast_2300_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1300) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2300_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+};
+
+struct drm_device *ast_2300_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2300_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2300_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2400.c b/drivers/gpu/drm/ast/ast_2400.c
new file mode 100644
index 000000000000..2e6befd24f91
--- /dev/null
+++ b/drivers/gpu/drm/ast/ast_2400.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+
+#include "ast_drv.h"
+
+static void ast_2400_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST1400) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2400_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+};
+
+struct drm_device *ast_2400_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2400_device_quirks);
+
+ ast->dclk_table = ast_2000_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2400_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c
index 1e541498ea67..2a52af0ded56 100644
--- a/drivers/gpu/drm/ast/ast_2500.c
+++ b/drivers/gpu/drm/ast/ast_2500.c
@@ -27,7 +27,9 @@
*/
#include <linux/delay.h>
+#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include "ast_drv.h"
@@ -567,3 +569,107 @@ int ast_2500_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Mode setting
+ */
+
+const struct ast_vbios_dclk_info ast_2500_dclk_table[] = {
+ {0x2c, 0xe7, 0x03}, /* 00: VCLK25_175 */
+ {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
+ {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
+ {0x76, 0x63, 0x01}, /* 03: VCLK36 */
+ {0xee, 0x67, 0x01}, /* 04: VCLK40 */
+ {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
+ {0xc6, 0x64, 0x01}, /* 06: VCLK50 */
+ {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
+ {0x80, 0x64, 0x00}, /* 08: VCLK65 */
+ {0x7b, 0x63, 0x00}, /* 09: VCLK75 */
+ {0x67, 0x62, 0x00}, /* 0a: VCLK78_75 */
+ {0x7c, 0x62, 0x00}, /* 0b: VCLK94_5 */
+ {0x8e, 0x62, 0x00}, /* 0c: VCLK108 */
+ {0x85, 0x24, 0x00}, /* 0d: VCLK135 */
+ {0x67, 0x22, 0x00}, /* 0e: VCLK157_5 */
+ {0x6a, 0x22, 0x00}, /* 0f: VCLK162 */
+ {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
+ {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
+ {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
+ {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
+ {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
+ {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
+ {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
+ {0x58, 0x01, 0x42}, /* 17: VCLK119 */
+ {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
+ {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
+ {0x44, 0x20, 0x43}, /* 1a: VCLK118_25 */
+};
+
+/*
+ * Device initialization
+ */
+
+static void ast_2500_detect_widescreen(struct ast_device *ast)
+{
+ if (__ast_2100_detect_wsxga_p(ast) || ast->chip == AST2510) {
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ }
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2500_device_quirks = {
+ .crtc_mem_req_threshold_low = 96,
+ .crtc_mem_req_threshold_high = 120,
+ .crtc_hsync_precatch_needed = true,
+};
+
+struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2500_device_quirks);
+
+ ast->dclk_table = ast_2500_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ if (need_post) {
+ ret = ast_post_gpu(ast);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (ast->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
+ ast_2500_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c
index 8d75a47444f5..dee78fd5b022 100644
--- a/drivers/gpu/drm/ast/ast_2600.c
+++ b/drivers/gpu/drm/ast/ast_2600.c
@@ -26,6 +26,10 @@
* Authors: Dave Airlie <airlied@redhat.com>
*/
+#include <linux/pci.h>
+
+#include <drm/drm_drv.h>
+
#include "ast_drv.h"
#include "ast_post.h"
@@ -42,3 +46,71 @@ int ast_2600_post(struct ast_device *ast)
return 0;
}
+
+/*
+ * Device initialization
+ */
+
+static void ast_2600_detect_widescreen(struct ast_device *ast)
+{
+ ast->support_wsxga_p = true;
+ ast->support_fullhd = true;
+ if (__ast_2100_detect_wuxga(ast))
+ ast->support_wuxga = true;
+}
+
+static const struct ast_device_quirks ast_2600_device_quirks = {
+ .crtc_mem_req_threshold_low = 160,
+ .crtc_mem_req_threshold_high = 224,
+ .crtc_hsync_precatch_needed = true,
+ .crtc_hsync_add4_needed = true,
+};
+
+struct drm_device *ast_2600_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post)
+{
+ struct drm_device *dev;
+ struct ast_device *ast;
+ int ret;
+
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
+ if (IS_ERR(ast))
+ return ERR_CAST(ast);
+ dev = &ast->base;
+
+ ast_device_init(ast, chip, config_mode, regs, ioregs, &ast_2600_device_quirks);
+
+ ast->dclk_table = ast_2500_dclk_table;
+
+ ast_2300_detect_tx_chip(ast);
+
+ switch (ast->tx_chip) {
+ case AST_TX_ASTDP:
+ ret = ast_post_gpu(ast);
+ break;
+ default:
+ ret = 0;
+ if (need_post)
+ ret = ast_post_gpu(ast);
+ break;
+ }
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = ast_mm_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ast_2600_detect_widescreen(ast);
+
+ ret = ast_mode_config_init(ast);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dev;
+}
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 19c04687b0fe..8e650a02c528 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -134,7 +134,7 @@ static int ast_astdp_read_edid_block(void *data, u8 *buf, unsigned int block, si
* 3. The Delays are often longer a lot when system resume from S3/S4.
*/
if (j)
- mdelay(j + 1);
+ msleep(j + 1);
/* Wait for EDID offset to show up in mirror register */
vgacrd7 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd7);
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 473faa92d08c..b9a9b050b546 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -37,6 +37,7 @@
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_module.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
@@ -46,6 +47,34 @@ static int ast_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, ast_modeset, int, 0400);
+void ast_device_init(struct ast_device *ast,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ const struct ast_device_quirks *quirks)
+{
+ ast->quirks = quirks;
+ ast->chip = chip;
+ ast->config_mode = config_mode;
+ ast->regs = regs;
+ ast->ioregs = ioregs;
+}
+
+void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip)
+{
+ static const char * const info_str[] = {
+ "analog VGA",
+ "Sil164 TMDS transmitter",
+ "DP501 DisplayPort transmitter",
+ "ASPEED DisplayPort transmitter",
+ };
+
+ drm_info(&ast->base, "Using %s\n", info_str[tx_chip]);
+
+ ast->tx_chip = tx_chip;
+}
+
/*
* DRM driver
*/
@@ -266,7 +295,7 @@ static int ast_detect_chip(struct pci_dev *pdev,
*chip_out = chip;
*config_mode_out = config_mode;
- return 0;
+ return __AST_CHIP_GEN(chip);
}
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -277,6 +306,7 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *ioregs;
enum ast_config_mode config_mode;
enum ast_chip chip;
+ unsigned int chip_gen;
struct drm_device *drm;
bool need_post = false;
@@ -349,10 +379,43 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return ret;
ret = ast_detect_chip(pdev, regs, ioregs, &chip, &config_mode);
- if (ret)
+ if (ret < 0)
return ret;
+ chip_gen = ret;
- drm = ast_device_create(pdev, &ast_driver, chip, config_mode, regs, ioregs, need_post);
+ switch (chip_gen) {
+ case 1:
+ drm = ast_2000_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 2:
+ drm = ast_2100_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 3:
+ drm = ast_2200_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 4:
+ drm = ast_2300_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 5:
+ drm = ast_2400_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 6:
+ drm = ast_2500_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ case 7:
+ drm = ast_2600_device_create(pdev, &ast_driver, chip, config_mode,
+ regs, ioregs, need_post);
+ break;
+ default:
+ dev_err(&pdev->dev, "Gen%d not supported\n", chip_gen);
+ return -ENODEV;
+ }
if (IS_ERR(drm))
return PTR_ERR(drm);
pci_set_drvdata(pdev, drm);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e37a55295ed7..787e38c6c17d 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,13 +98,15 @@ enum ast_config_mode {
ast_use_defaults
};
-#define AST_DRAM_512Mx16 0
-#define AST_DRAM_1Gx16 1
-#define AST_DRAM_512Mx32 2
-#define AST_DRAM_1Gx32 3
-#define AST_DRAM_2Gx16 6
-#define AST_DRAM_4Gx16 7
-#define AST_DRAM_8Gx16 8
+enum ast_dram_layout {
+ AST_DRAM_512Mx16 = 0,
+ AST_DRAM_1Gx16 = 1,
+ AST_DRAM_512Mx32 = 2,
+ AST_DRAM_1Gx32 = 3,
+ AST_DRAM_2Gx16 = 6,
+ AST_DRAM_4Gx16 = 7,
+ AST_DRAM_8Gx16 = 8,
+};
/*
* Hardware cursor
@@ -162,9 +164,31 @@ to_ast_connector(struct drm_connector *connector)
* Device
*/
+struct ast_device_quirks {
+ /*
+ * CRTC memory request threshold
+ */
+ unsigned char crtc_mem_req_threshold_low;
+ unsigned char crtc_mem_req_threshold_high;
+
+ /*
+ * Adjust hsync values to load next scanline early. Signalled
+ * by AST2500PreCatchCRT in VBIOS mode flags.
+ */
+ bool crtc_hsync_precatch_needed;
+
+ /*
+ * Workaround for modes with HSync Time that is not a multiple
+ * of 8 (e.g., 1920x1080@60Hz, HSync +44 pixels).
+ */
+ bool crtc_hsync_add4_needed;
+};
+
struct ast_device {
struct drm_device base;
+ const struct ast_device_quirks *quirks;
+
void __iomem *regs;
void __iomem *ioregs;
void __iomem *dp501_fw_buf;
@@ -172,9 +196,7 @@ struct ast_device {
enum ast_config_mode config_mode;
enum ast_chip chip;
- uint32_t dram_bus_width;
- uint32_t dram_type;
- uint32_t mclk;
+ const struct ast_vbios_dclk_info *dclk_table;
void __iomem *vram;
unsigned long vram_base;
@@ -219,14 +241,6 @@ static inline struct ast_device *to_ast_device(struct drm_device *dev)
return container_of(dev, struct ast_device, base);
}
-struct drm_device *ast_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum ast_chip chip,
- enum ast_config_mode config_mode,
- void __iomem *regs,
- void __iomem *ioregs,
- bool need_post);
-
static inline unsigned long __ast_gen(struct ast_device *ast)
{
return __AST_CHIP_GEN(ast->chip);
@@ -284,13 +298,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
__ast_write8(addr, reg + 1, val);
}
-static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
+static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,
u8 val)
{
- u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
+ u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask);
- tmp |= val;
- __ast_write8_i(addr, reg, index, tmp);
+ val &= ~preserve_mask;
+ __ast_write8_i(addr, reg, index, tmp | val);
}
static inline u32 ast_read32(struct ast_device *ast, u32 reg)
@@ -417,21 +431,89 @@ struct ast_crtc_state {
int ast_mm_init(struct ast_device *ast);
+/* ast_drv.c */
+void ast_device_init(struct ast_device *ast,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ const struct ast_device_quirks *quirks);
+void __ast_device_set_tx_chip(struct ast_device *ast, enum ast_tx_chip tx_chip);
+
/* ast_2000.c */
int ast_2000_post(struct ast_device *ast);
+extern const struct ast_vbios_dclk_info ast_2000_dclk_table[];
+void ast_2000_detect_tx_chip(struct ast_device *ast, bool need_post);
+struct drm_device *ast_2000_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast_2100.c */
int ast_2100_post(struct ast_device *ast);
+bool __ast_2100_detect_wsxga_p(struct ast_device *ast);
+bool __ast_2100_detect_wuxga(struct ast_device *ast);
+struct drm_device *ast_2100_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2200.c */
+struct drm_device *ast_2200_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast_2300.c */
int ast_2300_post(struct ast_device *ast);
+void ast_2300_detect_tx_chip(struct ast_device *ast);
+struct drm_device *ast_2300_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
+
+/* ast_2400.c */
+struct drm_device *ast_2400_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast_2500.c */
void ast_2500_patch_ahb(void __iomem *regs);
int ast_2500_post(struct ast_device *ast);
+extern const struct ast_vbios_dclk_info ast_2500_dclk_table[];
+struct drm_device *ast_2500_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast_2600.c */
int ast_2600_post(struct ast_device *ast);
+struct drm_device *ast_2600_device_create(struct pci_dev *pdev,
+ const struct drm_driver *drv,
+ enum ast_chip chip,
+ enum ast_config_mode config_mode,
+ void __iomem *regs,
+ void __iomem *ioregs,
+ bool need_post);
/* ast post */
int ast_post_gpu(struct ast_device *ast);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
deleted file mode 100644
index 44b9b5f659fc..000000000000
--- a/drivers/gpu/drm/ast/ast_main.c
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-
-#include <linux/of.h>
-#include <linux/pci.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_gem.h>
-#include <drm/drm_managed.h>
-
-#include "ast_drv.h"
-
-/* Try to detect WSXGA+ on Gen2+ */
-static bool __ast_2100_detect_wsxga_p(struct ast_device *ast)
-{
- u8 vgacrd0 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd0);
-
- if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_BY_BMC))
- return true;
- if (vgacrd0 & AST_IO_VGACRD0_IKVM_WIDESCREEN)
- return true;
-
- return false;
-}
-
-/* Try to detect WUXGA on Gen2+ */
-static bool __ast_2100_detect_wuxga(struct ast_device *ast)
-{
- u8 vgacrd1;
-
- if (ast->support_fullhd) {
- vgacrd1 = ast_get_index_reg(ast, AST_IO_VGACRI, 0xd1);
- if (!(vgacrd1 & AST_IO_VGACRD1_SUPPORTS_WUXGA))
- return true;
- }
-
- return false;
-}
-
-static void ast_detect_widescreen(struct ast_device *ast)
-{
- ast->support_wsxga_p = false;
- ast->support_fullhd = false;
- ast->support_wuxga = false;
-
- if (AST_GEN(ast) >= 7) {
- ast->support_wsxga_p = true;
- ast->support_fullhd = true;
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 6) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- else if (ast->chip == AST2510)
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p)
- ast->support_fullhd = true;
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 5) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- else if (ast->chip == AST1400)
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p)
- ast->support_fullhd = true;
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 4) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- else if (ast->chip == AST1300)
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p)
- ast->support_fullhd = true;
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 3) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p) {
- if (ast->chip == AST2200)
- ast->support_fullhd = true;
- }
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- } else if (AST_GEN(ast) >= 2) {
- if (__ast_2100_detect_wsxga_p(ast))
- ast->support_wsxga_p = true;
- if (ast->support_wsxga_p) {
- if (ast->chip == AST2100)
- ast->support_fullhd = true;
- }
- if (__ast_2100_detect_wuxga(ast))
- ast->support_wuxga = true;
- }
-}
-
-static void ast_detect_tx_chip(struct ast_device *ast, bool need_post)
-{
- static const char * const info_str[] = {
- "analog VGA",
- "Sil164 TMDS transmitter",
- "DP501 DisplayPort transmitter",
- "ASPEED DisplayPort transmitter",
- };
-
- struct drm_device *dev = &ast->base;
- u8 vgacra3, vgacrd1;
-
- /* Check 3rd Tx option (digital output afaik) */
- ast->tx_chip = AST_TX_NONE;
-
- if (AST_GEN(ast) <= 3) {
- /*
- * VGACRA3 Enhanced Color Mode Register, check if DVO is already
- * enabled, in that case, assume we have a SIL164 TMDS transmitter
- *
- * Don't make that assumption if we the chip wasn't enabled and
- * is at power-on reset, otherwise we'll incorrectly "detect" a
- * SIL164 when there is none.
- */
- if (!need_post) {
- vgacra3 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xff);
- if (vgacra3 & AST_IO_VGACRA3_DVO_ENABLED)
- ast->tx_chip = AST_TX_SIL164;
- }
- } else {
- /*
- * On AST GEN4+, look at the configuration set by the SoC in
- * the SOC scratch register #1 bits 11:8 (interestingly marked
- * as "reserved" in the spec)
- */
- vgacrd1 = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd1,
- AST_IO_VGACRD1_TX_TYPE_MASK);
- switch (vgacrd1) {
- /*
- * GEN4 to GEN6
- */
- case AST_IO_VGACRD1_TX_SIL164_VBIOS:
- ast->tx_chip = AST_TX_SIL164;
- break;
- case AST_IO_VGACRD1_TX_DP501_VBIOS:
- ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL);
- if (ast->dp501_fw_addr) {
- /* backup firmware */
- if (ast_backup_fw(ast, ast->dp501_fw_addr, 32*1024)) {
- drmm_kfree(dev, ast->dp501_fw_addr);
- ast->dp501_fw_addr = NULL;
- }
- }
- fallthrough;
- case AST_IO_VGACRD1_TX_FW_EMBEDDED_FW:
- ast->tx_chip = AST_TX_DP501;
- break;
- /*
- * GEN7+
- */
- case AST_IO_VGACRD1_TX_ASTDP:
- ast->tx_chip = AST_TX_ASTDP;
- break;
- /*
- * Several of the listed TX chips are not explicitly supported
- * by the ast driver. If these exist in real-world devices, they
- * are most likely reported as VGA or SIL164 outputs. We warn here
- * to get bug reports for these devices. If none come in for some
- * time, we can begin to fail device probing on these values.
- */
- case AST_IO_VGACRD1_TX_ITE66121_VBIOS:
- drm_warn(dev, "ITE IT66121 detected, 0x%x, Gen%lu\n",
- vgacrd1, AST_GEN(ast));
- break;
- case AST_IO_VGACRD1_TX_CH7003_VBIOS:
- drm_warn(dev, "Chrontel CH7003 detected, 0x%x, Gen%lu\n",
- vgacrd1, AST_GEN(ast));
- break;
- case AST_IO_VGACRD1_TX_ANX9807_VBIOS:
- drm_warn(dev, "Analogix ANX9807 detected, 0x%x, Gen%lu\n",
- vgacrd1, AST_GEN(ast));
- break;
- }
- }
-
- drm_info(dev, "Using %s\n", info_str[ast->tx_chip]);
-}
-
-static int ast_get_dram_info(struct ast_device *ast)
-{
- struct drm_device *dev = &ast->base;
- struct device_node *np = dev->dev->of_node;
- uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
- uint32_t denum, num, div, ref_pll, dsel;
-
- switch (ast->config_mode) {
- case ast_use_dt:
- /*
- * If some properties are missing, use reasonable
- * defaults for GEN5
- */
- if (of_property_read_u32(np, "aspeed,mcr-configuration",
- &mcr_cfg))
- mcr_cfg = 0x00000577;
- if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
- &mcr_scu_mpll))
- mcr_scu_mpll = 0x000050C0;
- if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
- &mcr_scu_strap))
- mcr_scu_strap = 0;
- break;
- case ast_use_p2a:
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- mcr_cfg = ast_read32(ast, 0x10004);
- mcr_scu_mpll = ast_read32(ast, 0x10120);
- mcr_scu_strap = ast_read32(ast, 0x10170);
- break;
- case ast_use_defaults:
- default:
- ast->dram_bus_width = 16;
- ast->dram_type = AST_DRAM_1Gx16;
- if (IS_AST_GEN6(ast))
- ast->mclk = 800;
- else
- ast->mclk = 396;
- return 0;
- }
-
- if (mcr_cfg & 0x40)
- ast->dram_bus_width = 16;
- else
- ast->dram_bus_width = 32;
-
- if (IS_AST_GEN6(ast)) {
- switch (mcr_cfg & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_8Gx16;
- break;
- }
- } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) {
- switch (mcr_cfg & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (mcr_cfg & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- case 8:
- if (mcr_cfg & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
- break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
- break;
- }
- }
-
- if (mcr_scu_strap & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = mcr_scu_mpll & 0x1f;
- num = (mcr_scu_mpll & 0x3fe0) >> 5;
- dsel = (mcr_scu_mpll & 0xc000) >> 14;
- switch (dsel) {
- case 3:
- div = 0x4;
- break;
- case 2:
- case 1:
- div = 0x2;
- break;
- default:
- div = 0x1;
- break;
- }
- ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000));
- return 0;
-}
-
-struct drm_device *ast_device_create(struct pci_dev *pdev,
- const struct drm_driver *drv,
- enum ast_chip chip,
- enum ast_config_mode config_mode,
- void __iomem *regs,
- void __iomem *ioregs,
- bool need_post)
-{
- struct drm_device *dev;
- struct ast_device *ast;
- int ret;
-
- ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
- if (IS_ERR(ast))
- return ERR_CAST(ast);
- dev = &ast->base;
-
- ast->chip = chip;
- ast->config_mode = config_mode;
- ast->regs = regs;
- ast->ioregs = ioregs;
-
- ret = ast_get_dram_info(ast);
- if (ret)
- return ERR_PTR(ret);
- drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n",
- ast->mclk, ast->dram_type, ast->dram_bus_width);
-
- ast_detect_tx_chip(ast, need_post);
- switch (ast->tx_chip) {
- case AST_TX_ASTDP:
- ret = ast_post_gpu(ast);
- break;
- default:
- ret = 0;
- if (need_post)
- ret = ast_post_gpu(ast);
- break;
- }
- if (ret)
- return ERR_PTR(ret);
-
- ret = ast_mm_init(ast);
- if (ret)
- return ERR_PTR(ret);
-
- /* map reserved buffer */
- ast->dp501_fw_buf = NULL;
- if (ast->vram_size < pci_resource_len(pdev, 0)) {
- ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
- if (!ast->dp501_fw_buf)
- drm_info(dev, "failed to map reserved buffer!\n");
- }
-
- ast_detect_widescreen(ast);
-
- ret = ast_mode_config_init(ast);
- if (ret)
- return ERR_PTR(ret);
-
- return dev;
-}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index b4e8edc7c767..cd08990a10f9 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -43,6 +43,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "ast_drv.h"
@@ -241,16 +242,15 @@ static void ast_set_std_reg(struct ast_device *ast,
ast_set_index_reg(ast, AST_IO_VGAGRI, i, stdtable->gr[i]);
}
-static void ast_set_crtc_reg(struct ast_device *ast,
- struct drm_display_mode *mode,
+static void ast_set_crtc_reg(struct ast_device *ast, struct drm_display_mode *mode,
const struct ast_vbios_enhtable *vmode)
{
u8 jreg05 = 0, jreg07 = 0, jreg09 = 0, jregAC = 0, jregAD = 0, jregAE = 0;
- u16 temp, precache = 0;
+ u16 temp;
+ unsigned char crtc_hsync_precatch = 0;
- if ((IS_AST_GEN6(ast) || IS_AST_GEN7(ast)) &&
- (vmode->flags & AST2500PreCatchCRT))
- precache = 40;
+ if (ast->quirks->crtc_hsync_precatch_needed && (vmode->flags & AST2500PreCatchCRT))
+ crtc_hsync_precatch = 40;
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x11, 0x7f, 0x00);
@@ -276,12 +276,12 @@ static void ast_set_crtc_reg(struct ast_device *ast,
jregAD |= 0x01; /* HBE D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x03, 0xE0, (temp & 0x1f));
- temp = ((mode->crtc_hsync_start-precache) >> 3) - 1;
+ temp = ((mode->crtc_hsync_start - crtc_hsync_precatch) >> 3) - 1;
if (temp & 0x100)
jregAC |= 0x40; /* HRS D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x04, 0x00, temp);
- temp = (((mode->crtc_hsync_end-precache) >> 3) - 1) & 0x3f;
+ temp = (((mode->crtc_hsync_end - crtc_hsync_precatch) >> 3) - 1) & 0x3f;
if (temp & 0x20)
jregAD |= 0x04; /* HRE D[5] */
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x05, 0x60, (u8)((temp & 0x1f) | jreg05));
@@ -289,8 +289,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAC, 0x00, jregAC);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAD, 0x00, jregAD);
- // Workaround for HSync Time non octave pixels (1920x1080@60Hz HSync 44 pixels);
- if (IS_AST_GEN7(ast) && (mode->crtc_vdisplay == 1080))
+ if (ast->quirks->crtc_hsync_add4_needed && mode->crtc_vdisplay == 1080)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x02);
else
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xFC, 0xFD, 0x00);
@@ -348,7 +347,7 @@ static void ast_set_crtc_reg(struct ast_device *ast,
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x09, 0xdf, jreg09);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xAE, 0x00, (jregAE | 0x80));
- if (precache)
+ if (crtc_hsync_precatch)
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x80);
else
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0x3f, 0x00);
@@ -370,12 +369,7 @@ static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
const struct ast_vbios_enhtable *vmode)
{
- const struct ast_vbios_dclk_info *clk_info;
-
- if (IS_AST_GEN6(ast) || IS_AST_GEN7(ast))
- clk_info = &dclk_table_ast2500[vmode->dclk_index];
- else
- clk_info = &dclk_table[vmode->dclk_index];
+ const struct ast_vbios_dclk_info *clk_info = &ast->dclk_table[vmode->dclk_index];
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc0, 0x00, clk_info->param1);
ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xc1, 0x00, clk_info->param2);
@@ -415,20 +409,11 @@ static void ast_set_color_reg(struct ast_device *ast,
static void ast_set_crtthd_reg(struct ast_device *ast)
{
- /* Set Threshold */
- if (IS_AST_GEN7(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0xe0);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0xa0);
- } else if (IS_AST_GEN6(ast) || IS_AST_GEN5(ast) || IS_AST_GEN4(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x78);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x60);
- } else if (IS_AST_GEN3(ast) || IS_AST_GEN2(ast)) {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x3f);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x2f);
- } else {
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, 0x2f);
- ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, 0x1f);
- }
+ u8 vgacra6 = ast->quirks->crtc_mem_req_threshold_low;
+ u8 vgacra7 = ast->quirks->crtc_mem_req_threshold_high;
+
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xa7, vgacra7);
+ ast_set_index_reg(ast, AST_IO_VGACRI, 0xa6, vgacra6);
}
static void ast_set_sync_reg(struct ast_device *ast,
@@ -572,9 +557,14 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
ast_set_vbios_color_reg(ast, fb->format, ast_crtc_state->vmode);
}
- drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
- drm_atomic_for_each_plane_damage(&iter, &damage) {
- ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage);
+ /* if the buffer comes from another device */
+ if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE) == 0) {
+ drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage);
+ }
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
/*
@@ -836,22 +826,24 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct ast_device *ast = to_ast_device(crtc->dev);
+ u8 vgacr17 = 0x00;
+ u8 vgacrb6 = 0xff;
+
+ vgacr17 |= AST_IO_VGACR17_SYNC_ENABLE;
+ vgacrb6 &= ~(AST_IO_VGACRB6_VSYNC_OFF | AST_IO_VGACRB6_HSYNC_OFF);
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, 0x00);
- ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, 0x00);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17);
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6);
}
static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct ast_device *ast = to_ast_device(crtc->dev);
- u8 vgacrb6;
-
- ast_set_index_reg_mask(ast, AST_IO_VGASRI, 0x01, 0xdf, AST_IO_VGASR1_SD);
+ u8 vgacr17 = 0xff;
- vgacrb6 = AST_IO_VGACRB6_VSYNC_OFF |
- AST_IO_VGACRB6_HSYNC_OFF;
- ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xfc, vgacrb6);
+ vgacr17 &= ~AST_IO_VGACR17_SYNC_ENABLE;
+ ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x17, 0x7f, vgacr17);
/*
* HW cursors require the underlying primary plane and CRTC to
diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h
index e15adaf3a80e..30578e3b07e4 100644
--- a/drivers/gpu/drm/ast/ast_reg.h
+++ b/drivers/gpu/drm/ast/ast_reg.h
@@ -29,6 +29,7 @@
#define AST_IO_VGAGRI (0x4E)
#define AST_IO_VGACRI (0x54)
+#define AST_IO_VGACR17_SYNC_ENABLE BIT(7) /* called "Hardware reset" in docs */
#define AST_IO_VGACR80_PASSWORD (0xa8)
#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0)
#define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1)
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index f1c9f7e1f1fc..7da5b5c60f41 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -33,66 +33,6 @@
#define HiCModeIndex 3
#define TrueCModeIndex 4
-static const struct ast_vbios_dclk_info dclk_table[] = {
- {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
- {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
- {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
- {0x76, 0x63, 0x01}, /* 03: VCLK36 */
- {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
- {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
- {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
- {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
- {0x80, 0x64, 0x00}, /* 08: VCLK65 */
- {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
- {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
- {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
- {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
- {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
- {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
- {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
- {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
- {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
- {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
- {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
- {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
- {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
- {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
- {0x77, 0x58, 0x80}, /* 17: VCLK119 */
- {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
- {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
- {0x3b, 0x2c, 0x81}, /* 1A: VCLK118_25 */
-};
-
-static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
- {0x2C, 0xE7, 0x03}, /* 00: VCLK25_175 */
- {0x95, 0x62, 0x03}, /* 01: VCLK28_322 */
- {0x67, 0x63, 0x01}, /* 02: VCLK31_5 */
- {0x76, 0x63, 0x01}, /* 03: VCLK36 */
- {0xEE, 0x67, 0x01}, /* 04: VCLK40 */
- {0x82, 0x62, 0x01}, /* 05: VCLK49_5 */
- {0xC6, 0x64, 0x01}, /* 06: VCLK50 */
- {0x94, 0x62, 0x01}, /* 07: VCLK56_25 */
- {0x80, 0x64, 0x00}, /* 08: VCLK65 */
- {0x7B, 0x63, 0x00}, /* 09: VCLK75 */
- {0x67, 0x62, 0x00}, /* 0A: VCLK78_75 */
- {0x7C, 0x62, 0x00}, /* 0B: VCLK94_5 */
- {0x8E, 0x62, 0x00}, /* 0C: VCLK108 */
- {0x85, 0x24, 0x00}, /* 0D: VCLK135 */
- {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
- {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
- {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
- {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
- {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
- {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
- {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
- {0x47, 0x6c, 0x80}, /* 15: VCLK71 */
- {0x25, 0x65, 0x80}, /* 16: VCLK88.75 */
- {0x58, 0x01, 0x42}, /* 17: VCLK119 */
- {0x32, 0x67, 0x80}, /* 18: VCLK85_5 */
- {0x6a, 0x6d, 0x80}, /* 19: VCLK97_75 */
- {0x44, 0x20, 0x43}, /* 1A: VCLK118_25 */
-};
-
static const struct ast_vbios_stdtable vbios_stdtable[] = {
/* MD_2_3_400 */
{
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 0f7ffb3ced20..e0efc7309b1b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -20,6 +20,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -215,32 +216,32 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_XLCDC_CM),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_XLCDC_SD);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_XLCDC_SD,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n");
}
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_DISP),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_SYNC),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_HLCDC_PIXEL_CLK),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n");
clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
pinctrl_pm_select_sleep_state(dev->dev);
@@ -269,32 +270,32 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_PIXEL_CLK,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CLKSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CLKSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_SYNC,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register LCDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register LCDSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_HLCDC_DISP,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register DISPSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register DISPSTS timeout\n");
if (crtc->dc->desc->is_xlcdc) {
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_CM);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
status & ATMEL_XLCDC_CM,
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register CMSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register CMSTS timeout\n");
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_XLCDC_SD);
if (regmap_read_poll_timeout(regmap, ATMEL_HLCDC_SR, status,
!(status & ATMEL_XLCDC_SD),
10, 1000))
- dev_warn(dev->dev, "Atmel LCDC status register SDSTS timeout\n");
+ drm_warn(dev, "Atmel LCDC status register SDSTS timeout\n");
}
pm_runtime_put_sync(dev->dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index fa8ad94e431a..dd70894c8f38 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -724,19 +725,19 @@ static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
ret = atmel_hlcdc_create_outputs(dev);
if (ret) {
- dev_err(dev->dev, "failed to create HLCDC outputs: %d\n", ret);
+ drm_err(dev, "failed to create HLCDC outputs: %d\n", ret);
return ret;
}
ret = atmel_hlcdc_create_planes(dev);
if (ret) {
- dev_err(dev->dev, "failed to create planes: %d\n", ret);
+ drm_err(dev, "failed to create planes: %d\n", ret);
return ret;
}
ret = atmel_hlcdc_crtc_create(dev);
if (ret) {
- dev_err(dev->dev, "failed to create crtc\n");
+ drm_err(dev, "failed to create crtc\n");
return ret;
}
@@ -778,7 +779,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
- dev_err(dev->dev, "failed to enable periph_clk\n");
+ drm_err(dev, "failed to enable periph_clk\n");
return ret;
}
@@ -786,13 +787,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = drm_vblank_init(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ drm_err(dev, "failed to initialize vblank\n");
goto err_periph_clk_disable;
}
ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize mode setting\n");
+ drm_err(dev, "failed to initialize mode setting\n");
goto err_periph_clk_disable;
}
@@ -802,7 +803,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = atmel_hlcdc_dc_irq_install(dev, dc->hlcdc->irq);
pm_runtime_put_sync(dev->dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to install IRQ handler\n");
+ drm_err(dev, "failed to install IRQ handler\n");
goto err_periph_clk_disable;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index e1a0bb24b511..53d47f01db0b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -378,7 +378,8 @@ struct atmel_lcdc_dc_ops {
void (*lcdc_update_buffers)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state,
u32 sr, int i);
- void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane);
+ void (*lcdc_atomic_disable)(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc);
void (*lcdc_update_general_settings)(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_state *state);
void (*lcdc_atomic_update)(struct atmel_hlcdc_plane *plane,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 50fee6a93964..0b8a86afb096 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -15,6 +15,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "atmel_hlcdc_dc.h"
@@ -92,7 +93,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep);
of_node_put(ep);
if (output->bus_fmt < 0) {
- dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint);
+ drm_err(dev, "endpoint %d: invalid bus width\n", endpoint);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 4a7ba0918eca..92132be9823f 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -16,6 +16,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "atmel_hlcdc_dc.h"
@@ -365,13 +366,34 @@ void atmel_xlcdc_plane_setup_scaler(struct atmel_hlcdc_plane *plane,
xfactor);
/*
- * With YCbCr 4:2:2 and YCbYcr 4:2:0 window resampling, configuration
- * register LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT is half
+ * With YCbCr 4:2:0 window resampling, configuration register
+ * LCDC_HEOCFG25.VXSCFACT and LCDC_HEOCFG27.HXSCFACT values are half
* the value of yfactor and xfactor.
+ *
+ * On the other hand, with YCbCr 4:2:2 window resampling, only the
+ * configuration register LCDC_HEOCFG27.HXSCFACT value is half the value
+ * of the xfactor; the value of LCDC_HEOCFG25.VXSCFACT is yfactor (no
+ * division by 2).
*/
- if (state->base.fb->format->format == DRM_FORMAT_YUV420) {
+ switch (state->base.fb->format->format) {
+ /* YCbCr 4:2:2 */
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_NV61:
+ xfactor /= 2;
+ break;
+
+ /* YCbCr 4:2:0 */
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_NV21:
yfactor /= 2;
xfactor /= 2;
+ break;
+ default:
+ break;
}
atmel_hlcdc_layer_write_cfg(&plane->layer, desc->layout.scaler_config + 2,
@@ -714,7 +736,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (!hstate->base.crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, s->crtc);
mode = &crtc_state->adjusted_mode;
ret = drm_atomic_helper_check_plane_state(s, crtc_state,
@@ -816,7 +838,8 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
return 0;
}
-static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
+static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc)
{
/* Disable interrupts */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IDR,
@@ -832,7 +855,8 @@ static void atmel_hlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_HLCDC_LAYER_ISR);
}
-static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
+static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane,
+ struct atmel_hlcdc_dc *dc)
{
/* Disable interrupts */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_XLCDC_LAYER_IDR,
@@ -842,6 +866,15 @@ static void atmel_xlcdc_atomic_disable(struct atmel_hlcdc_plane *plane)
atmel_hlcdc_layer_write_reg(&plane->layer,
ATMEL_XLCDC_LAYER_ENR, 0);
+ /*
+ * Updating XLCDC_xxxCFGx, XLCDC_xxxFBA and XLCDC_xxxEN,
+ * (where xxx indicates each layer) requires writing one to the
+ * Update Attribute field for each layer in LCDC_ATTRE register for SAM9X7.
+ */
+ regmap_write(dc->hlcdc->regmap, ATMEL_XLCDC_ATTRE, ATMEL_XLCDC_BASE_UPDATE |
+ ATMEL_XLCDC_OVR1_UPDATE | ATMEL_XLCDC_OVR3_UPDATE |
+ ATMEL_XLCDC_HEO_UPDATE);
+
/* Clear all pending interrupts */
atmel_hlcdc_layer_read_reg(&plane->layer, ATMEL_XLCDC_LAYER_ISR);
}
@@ -852,7 +885,7 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_dc *dc = plane->base.dev->dev_private;
- dc->desc->ops->lcdc_atomic_disable(plane);
+ dc->desc->ops->lcdc_atomic_disable(plane, dc);
}
static void atmel_hlcdc_atomic_update(struct atmel_hlcdc_plane *plane,
@@ -1034,7 +1067,7 @@ static void atmel_hlcdc_irq_dbg(struct atmel_hlcdc_plane *plane,
if (isr &
(ATMEL_HLCDC_LAYER_OVR_IRQ(0) | ATMEL_HLCDC_LAYER_OVR_IRQ(1) |
ATMEL_HLCDC_LAYER_OVR_IRQ(2)))
- dev_dbg(plane->base.dev->dev, "overrun on plane %s\n",
+ drm_dbg(plane->base.dev, "overrun on plane %s\n",
desc->name);
}
@@ -1051,7 +1084,7 @@ static void atmel_xlcdc_irq_dbg(struct atmel_hlcdc_plane *plane,
if (isr &
(ATMEL_XLCDC_LAYER_OVR_IRQ(0) | ATMEL_XLCDC_LAYER_OVR_IRQ(1) |
ATMEL_XLCDC_LAYER_OVR_IRQ(2)))
- dev_dbg(plane->base.dev->dev, "overrun on plane %s\n",
+ drm_dbg(plane->base.dev, "overrun on plane %s\n",
desc->name);
}
@@ -1140,7 +1173,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
if (state) {
if (atmel_hlcdc_plane_alloc_dscrs(p, state)) {
kfree(state);
- dev_err(p->dev->dev,
+ drm_err(p->dev,
"Failed to allocate initial plane state\n");
return;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index b9e0ca85226a..a250afd8d662 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -120,8 +120,8 @@ config DRM_ITE_IT6505
select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
select EXTCON
- select CRYPTO
- select CRYPTO_HASH
+ select CRYPTO_LIB_SHA1
+ select REGMAP_I2C
help
ITE IT6505 DisplayPort bridge chip driver.
@@ -316,6 +316,19 @@ config DRM_SIMPLE_BRIDGE
Support for non-programmable DRM bridges, such as ADI ADV7123, TI
THS8134 and THS8135 or passive resistor ladder DACs.
+config DRM_SOLOMON_SSD2825
+ tristate "SSD2825 RGB/DSI bridge"
+ depends on SPI_MASTER && OF
+ select DRM_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_PANEL
+ help
+ Say Y here if you want support for the Solomon SSD2825 RGB/DSI
+ SPI bridge driver.
+
+ Say M here if you want to support this hardware as a module.
+ The module will be named "ssd2825".
+
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
@@ -438,6 +451,18 @@ config DRM_TI_TPD12S015
Texas Instruments TPD12S015 HDMI level shifter and ESD protection
driver.
+config DRM_WAVESHARE_BRIDGE
+ tristate "Waveshare DSI bridge"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_PANEL_BRIDGE
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select REGMAP_I2C
+ help
+ Driver for waveshare DSI to DPI bridge board.
+ Please say Y if you have such hardware
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 245e8a27e3fc..c7dc03182e59 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
+obj-$(CONFIG_DRM_SOLOMON_SSD2825) += ssd2825.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358762) += tc358762.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_WAVESHARE_BRIDGE) += waveshare-dsi.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
obj-$(CONFIG_DRM_ITE_IT66121) += ite-it66121.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 85ebead9809c..8be7266fd4f4 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -195,13 +195,14 @@
#define ADV7511_I2S_IEC958_DIRECT 3
#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
-#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
+#define ADV7511_PACKET_SPD(x) ADV7511_PACKET(0, x)
#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
-#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
+#define ADV7511_PACKET_SPARE1(x) ADV7511_PACKET(6, x)
+#define ADV7511_PACKET_SPARE2(x) ADV7511_PACKET(7, x)
#define ADV7511_REG_CEC_TX_FRAME_HDR 0x00
#define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01
@@ -348,6 +349,7 @@ struct adv7511 {
struct i2c_client *i2c_cec;
struct regmap *regmap;
+ struct regmap *regmap_packet;
struct regmap *regmap_cec;
enum drm_connector_status status;
bool powered;
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 766b1c96bc88..87e7e820810a 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -12,6 +12,8 @@
#include <sound/soc.h>
#include <linux/of_graph.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+
#include "adv7511.h"
static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
@@ -155,17 +157,8 @@ int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge,
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
- /* send current Audio infoframe values while updating */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), BIT(5));
-
- regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
-
- /* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), 0);
-
- return 0;
+ return drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector,
+ &hparms->cea);
}
int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
@@ -188,15 +181,9 @@ int adv7511_hdmi_audio_startup(struct drm_bridge *bridge,
/* not copyrighted */
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1,
BIT(5), BIT(5));
- /* enable audio infoframes */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
- BIT(3), BIT(3));
/* AV mute disable */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
BIT(7) | BIT(6), BIT(7));
- /* use Audio infoframe updated info */
- regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
- BIT(5), 0);
/* enable SPDIF receiver */
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
@@ -214,4 +201,6 @@ void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge,
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
+
+ drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector);
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 00d6417c177b..b9be86541307 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -132,6 +132,13 @@ static const struct regmap_config adv7511_regmap_config = {
.volatile_reg = adv7511_register_volatile,
};
+static const struct regmap_config adv7511_packet_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+};
+
/* -----------------------------------------------------------------------------
* Hardware configuration
*/
@@ -886,9 +893,18 @@ static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge,
struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ break;
case HDMI_INFOFRAME_TYPE_AVI:
adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ break;
default:
drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
break;
@@ -903,16 +919,52 @@ static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge,
{
struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
- adv7511_bridge_hdmi_clear_infoframe(bridge, type);
-
switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ /* send current Audio infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), BIT(5));
+
+ /* The Audio infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ /* use Audio infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ break;
case HDMI_INFOFRAME_TYPE_AVI:
+ /* send current AVI infoframe values while updating */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(6), BIT(6));
+
/* The AVI infoframe id is not configurable */
regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
buffer + 1, len - 1);
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_LENGTH, 0x2);
+ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(1), 0x1);
+
+ /* use AVI infoframe updated info */
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(6), 0);
+
adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPD(0),
+ buffer, len);
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPD);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPARE1(0),
+ buffer, len);
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPARE1);
+ break;
default:
drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
break;
@@ -1242,6 +1294,13 @@ static int adv7511_probe(struct i2c_client *i2c)
goto err_i2c_unregister_edid;
}
+ adv7511->regmap_packet = devm_regmap_init_i2c(adv7511->i2c_packet,
+ &adv7511_packet_config);
+ if (IS_ERR(adv7511->regmap_packet)) {
+ ret = PTR_ERR(adv7511->regmap_packet);
+ goto err_i2c_unregister_packet;
+ }
+
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
adv7511->i2c_packet->addr << 1);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index c0ad8f59e483..6f3fdcb6afdb 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2604,6 +2604,7 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
platform->bridge.type = platform->pdata.panel_bridge ?
DRM_MODE_CONNECTOR_eDP :
DRM_MODE_CONNECTOR_DisplayPort;
+ platform->bridge.support_hdcp = true;
drm_bridge_add(&platform->bridge);
@@ -2677,7 +2678,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
NULL, anx7625_intr_hpd_isr,
IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN,
"anx7625-intp", platform);
if (ret) {
DRM_DEV_ERROR(dev, "fail to request irq\n");
@@ -2746,8 +2747,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
/* Add work function */
- if (platform->pdata.intp_irq)
+ if (platform->pdata.intp_irq) {
+ enable_irq(platform->pdata.intp_irq);
queue_work(platform->workqueue, &platform->work);
+ }
if (platform->pdata.audio_en)
anx7625_register_audio(dev, platform);
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index cced81633ddc..f1d8a8a151d8 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -6,6 +6,7 @@ config DRM_CDNS_DSI
select DRM_PANEL_BRIDGE
select GENERIC_PHY
select GENERIC_PHY_MIPI_DPHY
+ select VIDEOMODE_HELPERS
depends on OF
help
Support Cadence DPI to DSI bridge. This is an internal
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index a57ca8c3bdae..09b289f0fcbf 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -9,6 +9,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
#include <video/mipi_display.h>
+#include <video/videomode.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
@@ -417,7 +418,8 @@
#define DSI_OUTPUT_PORT 0
#define DSI_INPUT_PORT(inputid) (1 + (inputid))
-#define DSI_HBP_FRAME_OVERHEAD 12
+#define DSI_HBP_FRAME_PULSE_OVERHEAD 12
+#define DSI_HBP_FRAME_EVENT_OVERHEAD 16
#define DSI_HSA_FRAME_OVERHEAD 14
#define DSI_HFP_FRAME_OVERHEAD 6
#define DSI_HSS_VSS_VSE_FRAME_OVERHEAD 4
@@ -452,15 +454,6 @@ bridge_to_cdns_dsi_input(struct drm_bridge *bridge)
return container_of(bridge, struct cdns_dsi_input, bridge);
}
-static unsigned int mode_to_dpi_hfp(const struct drm_display_mode *mode,
- bool mode_valid_check)
-{
- if (mode_valid_check)
- return mode->hsync_start - mode->hdisplay;
-
- return mode->crtc_hsync_start - mode->crtc_hdisplay;
-}
-
static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
unsigned int dpi_bpp,
unsigned int dsi_pkt_overhead)
@@ -476,145 +469,77 @@ static unsigned int dpi_to_dsi_timing(unsigned int dpi_timing,
}
static int cdns_dsi_mode2cfg(struct cdns_dsi *dsi,
- const struct drm_display_mode *mode,
- struct cdns_dsi_cfg *dsi_cfg,
- bool mode_valid_check)
+ const struct videomode *vm,
+ struct cdns_dsi_cfg *dsi_cfg)
{
struct cdns_dsi_output *output = &dsi->output;
- unsigned int tmp;
- bool sync_pulse = false;
+ u32 dpi_hsa, dpi_hbp, dpi_hfp, dpi_hact;
+ bool sync_pulse;
int bpp;
+ dpi_hsa = vm->hsync_len;
+ dpi_hbp = vm->hback_porch;
+ dpi_hfp = vm->hfront_porch;
+ dpi_hact = vm->hactive;
+
memset(dsi_cfg, 0, sizeof(*dsi_cfg));
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- sync_pulse = true;
+ sync_pulse = output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
bpp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
- if (mode_valid_check)
- tmp = mode->htotal -
- (sync_pulse ? mode->hsync_end : mode->hsync_start);
- else
- tmp = mode->crtc_htotal -
- (sync_pulse ?
- mode->crtc_hsync_end : mode->crtc_hsync_start);
-
- dsi_cfg->hbp = dpi_to_dsi_timing(tmp, bpp, DSI_HBP_FRAME_OVERHEAD);
-
if (sync_pulse) {
- if (mode_valid_check)
- tmp = mode->hsync_end - mode->hsync_start;
- else
- tmp = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp, bpp,
+ DSI_HBP_FRAME_PULSE_OVERHEAD);
- dsi_cfg->hsa = dpi_to_dsi_timing(tmp, bpp,
+ dsi_cfg->hsa = dpi_to_dsi_timing(dpi_hsa, bpp,
DSI_HSA_FRAME_OVERHEAD);
- }
-
- dsi_cfg->hact = dpi_to_dsi_timing(mode_valid_check ?
- mode->hdisplay : mode->crtc_hdisplay,
- bpp, 0);
- dsi_cfg->hfp = dpi_to_dsi_timing(mode_to_dpi_hfp(mode, mode_valid_check),
- bpp, DSI_HFP_FRAME_OVERHEAD);
-
- return 0;
-}
-
-static int cdns_dsi_adjust_phy_config(struct cdns_dsi *dsi,
- struct cdns_dsi_cfg *dsi_cfg,
- struct phy_configure_opts_mipi_dphy *phy_cfg,
- const struct drm_display_mode *mode,
- bool mode_valid_check)
-{
- struct cdns_dsi_output *output = &dsi->output;
- unsigned long long dlane_bps;
- unsigned long adj_dsi_htotal;
- unsigned long dsi_htotal;
- unsigned long dpi_htotal;
- unsigned long dpi_hz;
- unsigned int dsi_hfp_ext;
- unsigned int lanes = output->dev->lanes;
-
- dsi_htotal = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- dsi_htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+ } else {
+ dsi_cfg->hbp = dpi_to_dsi_timing(dpi_hbp + dpi_hsa, bpp,
+ DSI_HBP_FRAME_EVENT_OVERHEAD);
- dsi_htotal += dsi_cfg->hact;
- dsi_htotal += dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
-
- /*
- * Make sure DSI htotal is aligned on a lane boundary when calculating
- * the expected data rate. This is done by extending HFP in case of
- * misalignment.
- */
- adj_dsi_htotal = dsi_htotal;
- if (dsi_htotal % lanes)
- adj_dsi_htotal += lanes - (dsi_htotal % lanes);
+ dsi_cfg->hsa = 0;
+ }
- dpi_hz = (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000;
- dlane_bps = (unsigned long long)dpi_hz * adj_dsi_htotal;
+ dsi_cfg->hact = dpi_to_dsi_timing(dpi_hact, bpp, 0);
- /* data rate in bytes/sec is not an integer, refuse the mode. */
- dpi_htotal = mode_valid_check ? mode->htotal : mode->crtc_htotal;
- if (do_div(dlane_bps, lanes * dpi_htotal))
- return -EINVAL;
+ dsi_cfg->hfp = dpi_to_dsi_timing(dpi_hfp, bpp, DSI_HFP_FRAME_OVERHEAD);
- /* data rate was in bytes/sec, convert to bits/sec. */
- phy_cfg->hs_clk_rate = dlane_bps * 8;
+ dsi_cfg->htotal = dsi_cfg->hact + dsi_cfg->hfp + DSI_HFP_FRAME_OVERHEAD;
- dsi_hfp_ext = adj_dsi_htotal - dsi_htotal;
- dsi_cfg->hfp += dsi_hfp_ext;
- dsi_cfg->htotal = dsi_htotal + dsi_hfp_ext;
+ if (sync_pulse) {
+ dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_PULSE_OVERHEAD;
+ dsi_cfg->htotal += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
+ } else {
+ dsi_cfg->htotal += dsi_cfg->hbp + DSI_HBP_FRAME_EVENT_OVERHEAD;
+ }
return 0;
}
static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
- const struct drm_display_mode *mode,
- struct cdns_dsi_cfg *dsi_cfg,
- bool mode_valid_check)
+ const struct videomode *vm,
+ struct cdns_dsi_cfg *dsi_cfg)
{
struct cdns_dsi_output *output = &dsi->output;
struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy;
- unsigned long dsi_hss_hsa_hse_hbp;
unsigned int nlanes = output->dev->lanes;
- int mode_clock = (mode_valid_check ? mode->clock : mode->crtc_clock);
int ret;
- ret = cdns_dsi_mode2cfg(dsi, mode, dsi_cfg, mode_valid_check);
+ ret = cdns_dsi_mode2cfg(dsi, vm, dsi_cfg);
if (ret)
return ret;
- ret = phy_mipi_dphy_get_default_config(mode_clock * 1000,
+ ret = phy_mipi_dphy_get_default_config(vm->pixelclock,
mipi_dsi_pixel_format_to_bpp(output->dev->format),
nlanes, phy_cfg);
if (ret)
return ret;
- ret = cdns_dsi_adjust_phy_config(dsi, dsi_cfg, phy_cfg, mode, mode_valid_check);
- if (ret)
- return ret;
-
ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &output->phy_opts);
if (ret)
return ret;
- dsi_hss_hsa_hse_hbp = dsi_cfg->hbp + DSI_HBP_FRAME_OVERHEAD;
- if (output->dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
- dsi_hss_hsa_hse_hbp += dsi_cfg->hsa + DSI_HSA_FRAME_OVERHEAD;
-
- /*
- * Make sure DPI(HFP) > DSI(HSS+HSA+HSE+HBP) to guarantee that the FIFO
- * is empty before we start a receiving a new line on the DPI
- * interface.
- */
- if ((u64)phy_cfg->hs_clk_rate *
- mode_to_dpi_hfp(mode, mode_valid_check) * nlanes <
- (u64)dsi_hss_hsa_hse_hbp *
- (mode_valid_check ? mode->clock : mode->crtc_clock) * 1000)
- return -EINVAL;
-
return 0;
}
@@ -644,8 +569,7 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_output *output = &dsi->output;
- struct cdns_dsi_cfg dsi_cfg;
- int bpp, ret;
+ int bpp;
/*
* VFP_DSI should be less than VFP_DPI and VFP_DSI should be at
@@ -663,10 +587,6 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
if ((mode->hdisplay * bpp) % 32)
return MODE_H_ILLEGAL;
- ret = cdns_dsi_check_conf(dsi, mode, &dsi_cfg, true);
- if (ret)
- return MODE_BAD;
-
return MODE_OK;
}
@@ -882,7 +802,13 @@ static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
tx_byte_period = DIV_ROUND_DOWN_ULL((u64)NSEC_PER_SEC * 8,
phy_cfg->hs_clk_rate);
- reg_wakeup = (phy_cfg->hs_prepare + phy_cfg->hs_zero) / tx_byte_period;
+
+ /*
+ * Estimated time [in clock cycles] to perform LP->HS on D-PHY.
+ * It is not clear how to calculate this, so for now,
+ * set it to 1/10 of the total number of clocks in a line.
+ */
+ reg_wakeup = dsi_cfg.htotal / nlanes / 10;
writel(REG_WAKEUP_TIME(reg_wakeup) | REG_LINE_DURATION(tmp),
dsi->regs + VID_DPHY_TIME);
@@ -989,6 +915,28 @@ static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static long cdns_dsi_round_pclk(struct cdns_dsi *dsi, unsigned long pclk)
+{
+ struct cdns_dsi_output *output = &dsi->output;
+ unsigned int nlanes = output->dev->lanes;
+ union phy_configure_opts phy_opts = { 0 };
+ u32 bitspp;
+ int ret;
+
+ bitspp = mipi_dsi_pixel_format_to_bpp(output->dev->format);
+
+ ret = phy_mipi_dphy_get_default_config(pclk, bitspp, nlanes,
+ &phy_opts.mipi_dphy);
+ if (ret)
+ return ret;
+
+ ret = phy_validate(dsi->dphy, PHY_MODE_MIPI_DPHY, 0, &phy_opts);
+ if (ret)
+ return ret;
+
+ return div_u64((u64)phy_opts.mipi_dphy.hs_clk_rate * nlanes, bitspp);
+}
+
static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -997,10 +945,32 @@ static int cdns_dsi_bridge_atomic_check(struct drm_bridge *bridge,
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
struct cdns_dsi_bridge_state *dsi_state = to_cdns_dsi_bridge_state(bridge_state);
- const struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct cdns_dsi_cfg *dsi_cfg = &dsi_state->dsi_cfg;
+ struct videomode vm;
+ long pclk;
+
+ /* cdns-dsi requires negative syncs */
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC;
- return cdns_dsi_check_conf(dsi, mode, dsi_cfg, false);
+ /*
+ * The DPHY PLL has quite a coarsely grained clock rate options. See
+ * what hsclk rate we can achieve based on the pixel clock, convert it
+ * back to pixel clock, set that to the adjusted_mode->clock. This is
+ * all in hopes that the CRTC will be able to provide us the requested
+ * clock, as otherwise the DPI and DSI clocks will be out of sync.
+ */
+
+ pclk = cdns_dsi_round_pclk(dsi, adjusted_mode->clock * 1000);
+ if (pclk < 0)
+ return (int)pclk;
+
+ adjusted_mode->clock = pclk / 1000;
+
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+
+ return cdns_dsi_check_conf(dsi, &vm, dsi_cfg);
}
static struct drm_bridge_state *
@@ -1082,10 +1052,6 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host,
if (output->dev)
return -EBUSY;
- /* We do not support burst mode yet. */
- if (dev->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
- return -ENOTSUPP;
-
/*
* The host <-> device link might be described using an OF-graph
* representation, in this case we extract the device of_node from
@@ -1442,4 +1408,3 @@ MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
MODULE_DESCRIPTION("Cadence DSI driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cdns-dsi");
-
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index a614d1384f71..38726ae1bf15 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -1984,8 +1984,10 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
mhdp_state = to_cdns_mhdp_bridge_state(new_state);
mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
- if (!mhdp_state->current_mode)
- return;
+ if (!mhdp_state->current_mode) {
+ ret = -EINVAL;
+ goto out;
+ }
drm_mode_set_name(mhdp_state->current_mode);
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
index 52b7b5889e6f..e9f16dbc9535 100644
--- a/drivers/gpu/drm/bridge/display-connector.c
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -108,7 +108,7 @@ static u32 *display_connector_get_output_bus_fmts(struct drm_bridge *bridge,
struct drm_connector_state *conn_state,
unsigned int *num_output_fmts)
{
- struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge);
struct drm_bridge_state *prev_bridge_state;
if (!prev_bridge || !prev_bridge->funcs->atomic_get_output_bus_fmts) {
@@ -151,7 +151,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts)
{
- struct drm_bridge *prev_bridge = drm_bridge_get_prev_bridge(bridge);
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) = drm_bridge_get_prev_bridge(bridge);
struct drm_bridge_state *prev_bridge_state;
if (!prev_bridge || !prev_bridge->funcs->atomic_get_input_bus_fmts) {
@@ -373,7 +373,8 @@ static int display_connector_probe(struct platform_device *pdev)
if (conn->bridge.ddc)
conn->bridge.ops |= DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_DETECT;
- if (conn->hpd_gpio)
+ /* Detecting the monitor requires reading DPCD */
+ if (conn->hpd_gpio && type != DRM_MODE_CONNECTOR_DisplayPort)
conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
if (conn->hpd_irq >= 0)
conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig
index 9a480c6abb85..b9028a5e5a06 100644
--- a/drivers/gpu/drm/bridge/imx/Kconfig
+++ b/drivers/gpu/drm/bridge/imx/Kconfig
@@ -18,12 +18,23 @@ config DRM_IMX8MP_DW_HDMI_BRIDGE
depends on OF
depends on COMMON_CLK
select DRM_DW_HDMI
+ imply DRM_IMX8MP_HDMI_PAI
imply DRM_IMX8MP_HDMI_PVI
imply PHY_FSL_SAMSUNG_HDMI_PHY
help
Choose this to enable support for the internal HDMI encoder found
on the i.MX8MP SoC.
+config DRM_IMX8MP_HDMI_PAI
+ tristate "Freescale i.MX8MP HDMI PAI bridge support"
+ depends on OF
+ select DRM_DW_HDMI
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ Choose this to enable support for the internal HDMI TX Parallel
+ Audio Interface found on the Freescale i.MX8MP SoC.
+
config DRM_IMX8MP_HDMI_PVI
tristate "Freescale i.MX8MP HDMI PVI bridge support"
depends on OF
diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile
index dd5d48584806..8d01fda25451 100644
--- a/drivers/gpu/drm/bridge/imx/Makefile
+++ b/drivers/gpu/drm/bridge/imx/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o
obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o
obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o
+obj-$(CONFIG_DRM_IMX8MP_HDMI_PAI) += imx8mp-hdmi-pai.o
obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o
obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o
obj-$(CONFIG_DRM_IMX8QXP_LDB) += imx8qxp-ldb.o
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
new file mode 100644
index 000000000000..8d13a35b206a
--- /dev/null
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pai.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2025 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/component.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <drm/bridge/dw_hdmi.h>
+#include <sound/asoundef.h>
+
+#define HTX_PAI_CTRL 0x00
+#define ENABLE BIT(0)
+
+#define HTX_PAI_CTRL_EXT 0x04
+#define WTMK_HIGH_MASK GENMASK(31, 24)
+#define WTMK_LOW_MASK GENMASK(23, 16)
+#define NUM_CH_MASK GENMASK(10, 8)
+#define WTMK_HIGH(n) FIELD_PREP(WTMK_HIGH_MASK, (n))
+#define WTMK_LOW(n) FIELD_PREP(WTMK_LOW_MASK, (n))
+#define NUM_CH(n) FIELD_PREP(NUM_CH_MASK, (n) - 1)
+
+#define HTX_PAI_FIELD_CTRL 0x08
+#define PRE_SEL GENMASK(28, 24)
+#define D_SEL GENMASK(23, 20)
+#define V_SEL GENMASK(19, 15)
+#define U_SEL GENMASK(14, 10)
+#define C_SEL GENMASK(9, 5)
+#define P_SEL GENMASK(4, 0)
+
+struct imx8mp_hdmi_pai {
+ struct regmap *regmap;
+};
+
+static void imx8mp_hdmi_pai_enable(struct dw_hdmi *dw_hdmi, int channel,
+ int width, int rate, int non_pcm,
+ int iec958)
+{
+ const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);
+ struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;
+ int val;
+
+ /* PAI set control extended */
+ val = WTMK_HIGH(3) | WTMK_LOW(3);
+ val |= NUM_CH(channel);
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL_EXT, val);
+
+ /* IEC60958 format */
+ if (iec958) {
+ val = FIELD_PREP_CONST(P_SEL,
+ __bf_shf(IEC958_SUBFRAME_PARITY));
+ val |= FIELD_PREP_CONST(C_SEL,
+ __bf_shf(IEC958_SUBFRAME_CHANNEL_STATUS));
+ val |= FIELD_PREP_CONST(U_SEL,
+ __bf_shf(IEC958_SUBFRAME_USER_DATA));
+ val |= FIELD_PREP_CONST(V_SEL,
+ __bf_shf(IEC958_SUBFRAME_VALIDITY));
+ val |= FIELD_PREP_CONST(D_SEL,
+ __bf_shf(IEC958_SUBFRAME_SAMPLE_24_MASK));
+ val |= FIELD_PREP_CONST(PRE_SEL,
+ __bf_shf(IEC958_SUBFRAME_PREAMBLE_MASK));
+ } else {
+ /*
+ * The allowed PCM widths are 24bit and 32bit, as they are supported
+ * by aud2htx module.
+ * for 24bit, D_SEL = 0, select all the bits.
+ * for 32bit, D_SEL = 8, select 24bit in MSB.
+ */
+ val = FIELD_PREP(D_SEL, width - 24);
+ }
+
+ regmap_write(hdmi_pai->regmap, HTX_PAI_FIELD_CTRL, val);
+
+ /* PAI start running */
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, ENABLE);
+}
+
+static void imx8mp_hdmi_pai_disable(struct dw_hdmi *dw_hdmi)
+{
+ const struct dw_hdmi_plat_data *pdata = dw_hdmi_to_plat_data(dw_hdmi);
+ struct imx8mp_hdmi_pai *hdmi_pai = pdata->priv_audio;
+
+ /* Stop PAI */
+ regmap_write(hdmi_pai->regmap, HTX_PAI_CTRL, 0);
+}
+
+static const struct regmap_config imx8mp_hdmi_pai_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = HTX_PAI_FIELD_CTRL,
+};
+
+static int imx8mp_hdmi_pai_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_hdmi_plat_data *plat_data = data;
+ struct imx8mp_hdmi_pai *hdmi_pai;
+ struct resource *res;
+ void __iomem *base;
+
+ hdmi_pai = devm_kzalloc(dev, sizeof(*hdmi_pai), GFP_KERNEL);
+ if (!hdmi_pai)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ hdmi_pai->regmap = devm_regmap_init_mmio_clk(dev, "apb", base,
+ &imx8mp_hdmi_pai_regmap_config);
+ if (IS_ERR(hdmi_pai->regmap)) {
+ dev_err(dev, "regmap init failed\n");
+ return PTR_ERR(hdmi_pai->regmap);
+ }
+
+ plat_data->enable_audio = imx8mp_hdmi_pai_enable;
+ plat_data->disable_audio = imx8mp_hdmi_pai_disable;
+ plat_data->priv_audio = hdmi_pai;
+
+ return 0;
+}
+
+static const struct component_ops imx8mp_hdmi_pai_ops = {
+ .bind = imx8mp_hdmi_pai_bind,
+};
+
+static int imx8mp_hdmi_pai_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &imx8mp_hdmi_pai_ops);
+}
+
+static void imx8mp_hdmi_pai_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &imx8mp_hdmi_pai_ops);
+}
+
+static const struct of_device_id imx8mp_hdmi_pai_of_table[] = {
+ { .compatible = "fsl,imx8mp-hdmi-pai" },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx8mp_hdmi_pai_of_table);
+
+static struct platform_driver imx8mp_hdmi_pai_platform_driver = {
+ .probe = imx8mp_hdmi_pai_probe,
+ .remove = imx8mp_hdmi_pai_remove,
+ .driver = {
+ .name = "imx8mp-hdmi-pai",
+ .of_match_table = imx8mp_hdmi_pai_of_table,
+ },
+};
+module_platform_driver(imx8mp_hdmi_pai_platform_driver);
+
+MODULE_DESCRIPTION("i.MX8MP HDMI PAI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
index 1e7a789ec289..32fd3554e267 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-tx.c
@@ -5,11 +5,13 @@
*/
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_modes.h>
+#include <drm/drm_of.h>
struct imx8mp_hdmi {
struct dw_hdmi_plat_data plat_data;
@@ -79,10 +81,45 @@ static const struct dw_hdmi_phy_ops imx8mp_hdmi_phy_ops = {
.update_hpd = dw_hdmi_phy_update_hpd,
};
+static int imx8mp_dw_hdmi_bind(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = component_bind_all(dev, &hdmi->plat_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "component_bind_all failed!\n");
+
+ hdmi->dw_hdmi = dw_hdmi_probe(pdev, &hdmi->plat_data);
+ if (IS_ERR(hdmi->dw_hdmi)) {
+ component_unbind_all(dev, &hdmi->plat_data);
+ return PTR_ERR(hdmi->dw_hdmi);
+ }
+
+ return 0;
+}
+
+static void imx8mp_dw_hdmi_unbind(struct device *dev)
+{
+ struct imx8mp_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dw_hdmi_remove(hdmi->dw_hdmi);
+
+ component_unbind_all(dev, &hdmi->plat_data);
+}
+
+static const struct component_master_ops imx8mp_dw_hdmi_ops = {
+ .bind = imx8mp_dw_hdmi_bind,
+ .unbind = imx8mp_dw_hdmi_unbind,
+};
+
static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dw_hdmi_plat_data *plat_data;
+ struct component_match *match = NULL;
+ struct device_node *remote;
struct imx8mp_hdmi *hdmi;
hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
@@ -102,20 +139,38 @@ static int imx8mp_dw_hdmi_probe(struct platform_device *pdev)
plat_data->priv_data = hdmi;
plat_data->phy_force_vendor = true;
- hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
- if (IS_ERR(hdmi->dw_hdmi))
- return PTR_ERR(hdmi->dw_hdmi);
-
platform_set_drvdata(pdev, hdmi);
+ /* port@2 is for hdmi_pai device */
+ remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0);
+ if (!remote) {
+ hdmi->dw_hdmi = dw_hdmi_probe(pdev, plat_data);
+ if (IS_ERR(hdmi->dw_hdmi))
+ return PTR_ERR(hdmi->dw_hdmi);
+ } else {
+ drm_of_component_match_add(dev, &match, component_compare_of, remote);
+
+ of_node_put(remote);
+
+ return component_master_add_with_match(dev, &imx8mp_dw_hdmi_ops, match);
+ }
+
return 0;
}
static void imx8mp_dw_hdmi_remove(struct platform_device *pdev)
{
struct imx8mp_hdmi *hdmi = platform_get_drvdata(pdev);
+ struct device_node *remote;
- dw_hdmi_remove(hdmi->dw_hdmi);
+ remote = of_graph_get_remote_node(pdev->dev.of_node, 2, 0);
+ if (remote) {
+ of_node_put(remote);
+
+ component_master_del(&pdev->dev, &imx8mp_dw_hdmi_ops);
+ } else {
+ dw_hdmi_remove(hdmi->dw_hdmi);
+ }
}
static int imx8mp_dw_hdmi_pm_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
index 5d272916e200..122502968927 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c
@@ -683,11 +683,6 @@ static void imx8qxp_ldb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
}
-static int imx8qxp_ldb_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
static int imx8qxp_ldb_runtime_resume(struct device *dev)
{
struct imx8qxp_ldb *imx8qxp_ldb = dev_get_drvdata(dev);
@@ -700,7 +695,7 @@ static int imx8qxp_ldb_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops imx8qxp_ldb_pm_ops = {
- RUNTIME_PM_OPS(imx8qxp_ldb_runtime_suspend, imx8qxp_ldb_runtime_resume, NULL)
+ RUNTIME_PM_OPS(NULL, imx8qxp_ldb_runtime_resume, NULL)
};
static const struct of_device_id imx8qxp_ldb_dt_ids[] = {
diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
index bea8346515b8..8f7a0d46601a 100644
--- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c
@@ -492,14 +492,12 @@ static int imx93_dsi_get_phy_configure_opts(struct imx93_dsi *dsi,
static enum drm_mode_status
imx93_dsi_validate_mode(struct imx93_dsi *dsi, const struct drm_display_mode *mode)
{
- struct drm_bridge *bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
+ struct drm_bridge *dmd_bridge = dw_mipi_dsi_get_bridge(dsi->dmd);
+ struct drm_bridge *last_bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_last_bridge(dmd_bridge->encoder);
- /* Get the last bridge */
- while (drm_bridge_get_next_bridge(bridge))
- bridge = drm_bridge_get_next_bridge(bridge);
-
- if ((bridge->ops & DRM_BRIDGE_OP_DETECT) &&
- (bridge->ops & DRM_BRIDGE_OP_EDID)) {
+ if ((last_bridge->ops & DRM_BRIDGE_OP_DETECT) &&
+ (last_bridge->ops & DRM_BRIDGE_OP_EDID)) {
unsigned long pixel_clock_rate = mode->clock * 1000;
unsigned long rounded_rate;
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index cf813672b4ff..2eb8fba7016c 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -146,6 +146,7 @@
#define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4)
#define HDMI_REG_PKT_GENERAL_CTRL 0xc6
+#define HDMI_REG_PKT_NULL_CTRL 0xc9
#define HDMI_REG_AVI_INFOFRM_CTRL 0xcd
#define ENABLE_PKT BIT(0)
#define REPEAT_PKT BIT(1)
@@ -154,6 +155,12 @@
* 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers)
*/
+/* NULL packet registers */
+/* Header Byte(HB): n = 0 ~ 2 */
+#define HDMI_REG_PKT_HB(n) (0x138 + (n))
+/* Packet Byte(PB): n = 0 ~ 27(HDMI_MAX_INFOFRAME_SIZE), n = 0 for checksum */
+#define HDMI_REG_PKT_PB(n) (0x13b + (n))
+
/* AVI packet registers */
#define HDMI_REG_AVI_DB1 0x158
#define HDMI_REG_AVI_DB2 0x159
@@ -224,7 +231,9 @@ static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg)
case HDMI_REG_HDMI_MODE:
case HDMI_REG_GCP:
case HDMI_REG_PKT_GENERAL_CTRL:
+ case HDMI_REG_PKT_NULL_CTRL:
case HDMI_REG_AVI_INFOFRM_CTRL:
+ case HDMI_REG_PKT_HB(0) ... HDMI_REG_PKT_PB(HDMI_MAX_INFOFRAME_SIZE):
case HDMI_REG_AVI_DB1:
case HDMI_REG_AVI_DB2:
case HDMI_REG_AVI_DB3:
@@ -755,10 +764,16 @@ static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge,
{
struct it6263 *it = bridge_to_it6263(bridge);
- if (type == HDMI_INFOFRAME_TYPE_AVI)
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0);
- else
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ regmap_write(it->hdmi_regmap, HDMI_REG_PKT_NULL_CTRL, 0);
+ break;
+ default:
dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
+ }
return 0;
}
@@ -770,27 +785,36 @@ static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge,
struct it6263 *it = bridge_to_it6263(bridge);
struct regmap *regmap = it->hdmi_regmap;
- if (type != HDMI_INFOFRAME_TYPE_AVI) {
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* write the first AVI infoframe data byte chunk(DB1-DB5) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_AVI_DB_CHUNK1_SIZE);
+
+ /* write the second AVI infoframe data byte chunk(DB6-DB13) */
+ regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
+ &buffer[HDMI_INFOFRAME_HEADER_SIZE +
+ HDMI_AVI_DB_CHUNK1_SIZE],
+ HDMI_AVI_DB_CHUNK2_SIZE);
+
+ /* write checksum */
+ regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
+
+ regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL,
+ ENABLE_PKT | REPEAT_PKT);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ /* write header and payload */
+ regmap_bulk_write(regmap, HDMI_REG_PKT_HB(0), buffer, len);
+
+ regmap_write(regmap, HDMI_REG_PKT_NULL_CTRL,
+ ENABLE_PKT | REPEAT_PKT);
+ break;
+ default:
dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type);
- return 0;
}
- /* write the first AVI infoframe data byte chunk(DB1-DB5) */
- regmap_bulk_write(regmap, HDMI_REG_AVI_DB1,
- &buffer[HDMI_INFOFRAME_HEADER_SIZE],
- HDMI_AVI_DB_CHUNK1_SIZE);
-
- /* write the second AVI infoframe data byte chunk(DB6-DB13) */
- regmap_bulk_write(regmap, HDMI_REG_AVI_DB6,
- &buffer[HDMI_INFOFRAME_HEADER_SIZE +
- HDMI_AVI_DB_CHUNK1_SIZE],
- HDMI_AVI_DB_CHUNK2_SIZE);
-
- /* write checksum */
- regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]);
-
- regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 89649c17ffad..a094803ba7aa 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -21,7 +21,7 @@
#include <linux/wait.h>
#include <linux/bitfield.h>
-#include <crypto/hash.h>
+#include <crypto/sha1.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -2107,35 +2107,6 @@ static void it6505_hdcp_part1_auth(struct it6505 *it6505)
it6505->hdcp_status = HDCP_AUTH_GOING;
}
-static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input,
- unsigned int size, u8 *output_av)
-{
- struct shash_desc *desc;
- struct crypto_shash *tfm;
- int err;
- struct device *dev = it6505->dev;
-
- tfm = crypto_alloc_shash("sha1", 0, 0);
- if (IS_ERR(tfm)) {
- dev_err(dev, "crypto_alloc_shash sha1 failed");
- return PTR_ERR(tfm);
- }
- desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
- if (!desc) {
- crypto_free_shash(tfm);
- return -ENOMEM;
- }
-
- desc->tfm = tfm;
- err = crypto_shash_digest(desc, sha1_input, size, output_av);
- if (err)
- dev_err(dev, "crypto_shash_digest sha1 failed");
-
- crypto_free_shash(tfm);
- kfree(desc);
- return err;
-}
-
static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input)
{
struct device *dev = it6505->dev;
@@ -2205,7 +2176,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505)
return false;
}
- it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av);
+ sha1(it6505->sha1_input, i, (u8 *)av);
/*1B-05 V' must retry 3 times */
for (retry = 0; retry < 3; retry++) {
err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv,
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index aa7b1dcc5d70..0185f61e6e59 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -287,6 +287,7 @@
enum chip_id {
ID_IT6610,
ID_IT66121,
+ ID_IT66122,
};
struct it66121_chip_info {
@@ -312,7 +313,7 @@ struct it66121_ctx {
u8 swl;
bool auto_cts;
} audio;
- const struct it66121_chip_info *info;
+ enum chip_id id;
};
static const struct regmap_range_cfg it66121_regmap_banks[] = {
@@ -402,7 +403,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
- if (ctx->info->id == ID_IT66121) {
+ if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_EC1, 0);
if (ret)
@@ -428,7 +429,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
- if (ctx->info->id == ID_IT66121) {
+ if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_EC1,
IT66121_AFE_IP_EC1);
@@ -449,7 +450,7 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
- if (ctx->info->id == ID_IT6610) {
+ if (ctx->id == ID_IT6610) {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
IT6610_AFE_XP_BYPASS,
IT6610_AFE_XP_BYPASS);
@@ -599,7 +600,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- if (ctx->info->id == ID_IT66121) {
+ if (ctx->id == ID_IT66121 || ctx->id == ID_IT66122) {
ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_RCLK, 0);
if (ret)
@@ -748,7 +749,7 @@ static int it66121_bridge_check(struct drm_bridge *bridge,
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- if (ctx->info->id == ID_IT6610) {
+ if (ctx->id == ID_IT6610) {
/* The IT6610 only supports these settings */
bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH |
DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
@@ -802,7 +803,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI))
goto unlock;
- if (ctx->info->id == ID_IT66121 &&
+ if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) &&
regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_TXCLK,
IT66121_CLK_BANK_PWROFF_TXCLK)) {
@@ -815,7 +816,7 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (it66121_configure_afe(ctx, adjusted_mode))
goto unlock;
- if (ctx->info->id == ID_IT66121 &&
+ if ((ctx->id == ID_IT66121 || ctx->id == ID_IT66122) &&
regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
IT66121_CLK_BANK_PWROFF_TXCLK, 0)) {
goto unlock;
@@ -1384,8 +1385,6 @@ static int it66121_audio_startup(struct device *dev, void *data)
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mutex_lock(&ctx->lock);
ret = it661221_audio_output_enable(ctx, true);
if (ret)
@@ -1401,8 +1400,6 @@ static void it66121_audio_shutdown(struct device *dev, void *data)
int ret;
struct it66121_ctx *ctx = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mutex_lock(&ctx->lock);
ret = it661221_audio_output_enable(ctx, false);
if (ret)
@@ -1479,8 +1476,6 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
.no_capture_mute = 1,
};
- dev_dbg(dev, "%s\n", __func__);
-
if (!of_property_present(dev->of_node, "#sound-dai-cells")) {
dev_info(dev, "No \"#sound-dai-cells\", no audio\n");
return 0;
@@ -1504,13 +1499,20 @@ static const char * const it66121_supplies[] = {
"vcn33", "vcn18", "vrf12"
};
+static const struct it66121_chip_info it66xx_chip_info[] = {
+ {.id = ID_IT6610, .vid = 0xca00, .pid = 0x0611 },
+ {.id = ID_IT66121, .vid = 0x4954, .pid = 0x0612 },
+ {.id = ID_IT66122, .vid = 0x4954, .pid = 0x0622 },
+};
+
static int it66121_probe(struct i2c_client *client)
{
u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
- int ret;
+ int ret, i;
struct it66121_ctx *ctx;
struct device *dev = &client->dev;
+ const struct it66121_chip_info *chip_info;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(dev, "I2C check functionality failed.\n");
@@ -1528,7 +1530,6 @@ static int it66121_probe(struct i2c_client *client)
ctx->dev = dev;
ctx->client = client;
- ctx->info = i2c_get_match_data(client);
of_property_read_u32(ep, "bus-width", &ctx->bus_width);
of_node_put(ep);
@@ -1574,11 +1575,18 @@ static int it66121_probe(struct i2c_client *client)
revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]);
device_ids[1] &= IT66121_DEVICE_ID1_MASK;
- if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid ||
- (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) {
- return -ENODEV;
+ for (i = 0; i < ARRAY_SIZE(it66xx_chip_info); i++) {
+ chip_info = &it66xx_chip_info[i];
+ if ((vendor_ids[1] << 8 | vendor_ids[0]) == chip_info->vid &&
+ (device_ids[1] << 8 | device_ids[0]) == chip_info->pid) {
+ ctx->id = chip_info->id;
+ break;
+ }
}
+ if (i == ARRAY_SIZE(it66xx_chip_info))
+ return -ENODEV;
+
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
@@ -1612,28 +1620,18 @@ static void it66121_remove(struct i2c_client *client)
mutex_destroy(&ctx->lock);
}
-static const struct it66121_chip_info it66121_chip_info = {
- .id = ID_IT66121,
- .vid = 0x4954,
- .pid = 0x0612,
-};
-
-static const struct it66121_chip_info it6610_chip_info = {
- .id = ID_IT6610,
- .vid = 0xca00,
- .pid = 0x0611,
-};
-
static const struct of_device_id it66121_dt_match[] = {
- { .compatible = "ite,it66121", &it66121_chip_info },
- { .compatible = "ite,it6610", &it6610_chip_info },
+ { .compatible = "ite,it6610" },
+ { .compatible = "ite,it66121" },
+ { .compatible = "ite,it66122" },
{ }
};
MODULE_DEVICE_TABLE(of, it66121_dt_match);
static const struct i2c_device_id it66121_id[] = {
- { "it66121", (kernel_ulong_t) &it66121_chip_info },
- { "it6610", (kernel_ulong_t) &it6610_chip_info },
+ { .name = "it6610" },
+ { .name = "it66121" },
+ { .name = "it66122" },
{ }
};
MODULE_DEVICE_TABLE(i2c, it66121_id);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 399fa7eebd49..03fc8fd10f20 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -121,8 +121,7 @@ static int lt9211_read_chipid(struct lt9211 *ctx)
}
/* Test for known Chip ID. */
- if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE ||
- chipid[2] != REG_CHIPID2_VALUE) {
+ if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE) {
dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",
chipid[0], chipid[1], chipid[2]);
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index b5dd71f6a990..eabc4c32f6ab 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -31,11 +31,10 @@
/* returns true iff both arguments logically differs */
#define NEQV(a, b) (!(a) ^ !(b))
-/* DSIM_STATUS */
+/* DSIM_STATUS or DSIM_DPHY_STATUS */
#define DSIM_STOP_STATE_DAT(x) (((x) & 0xf) << 0)
#define DSIM_STOP_STATE_CLK BIT(8)
#define DSIM_TX_READY_HS_CLK BIT(10)
-#define DSIM_PLL_STABLE BIT(31)
/* DSIM_SWRST */
#define DSIM_FUNCRST BIT(16)
@@ -46,17 +45,13 @@
#define DSIM_BTA_TIMEOUT(x) ((x) << 16)
/* DSIM_CLKCTRL */
-#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
-#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
-#define DSIM_LANE_ESC_CLK_EN_CLK BIT(19)
-#define DSIM_LANE_ESC_CLK_EN_DATA(x) (((x) & 0xf) << 20)
-#define DSIM_LANE_ESC_CLK_EN_DATA_MASK (0xf << 20)
-#define DSIM_BYTE_CLKEN BIT(24)
-#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
-#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
-#define DSIM_PLL_BYPASS BIT(27)
-#define DSIM_ESC_CLKEN BIT(28)
-#define DSIM_TX_REQUEST_HSCLK BIT(31)
+#define DSIM_ESC_PRESCALER(x) (((x) & 0xffff) << 0)
+#define DSIM_ESC_PRESCALER_MASK (0xffff << 0)
+#define DSIM_LANE_ESC_CLK_EN_DATA(x, offset) (((x) & 0xf) << offset)
+#define DSIM_LANE_ESC_CLK_EN_DATA_MASK(offset) (0xf << offset)
+#define DSIM_BYTE_CLK_SRC(x) (((x) & 0x3) << 25)
+#define DSIM_BYTE_CLK_SRC_MASK (0x3 << 25)
+#define DSIM_PLL_BYPASS BIT(27)
/* DSIM_CONFIG */
#define DSIM_LANE_EN_CLK BIT(0)
@@ -91,7 +86,6 @@
*/
#define DSIM_HSE_DISABLE_MODE BIT(23)
#define DSIM_AUTO_MODE BIT(24)
-#define DSIM_VIDEO_MODE BIT(25)
#define DSIM_BURST_MODE BIT(26)
#define DSIM_SYNC_INFORM BIT(27)
#define DSIM_EOT_DISABLE BIT(28)
@@ -129,9 +123,9 @@
#define DSIM_MAIN_HBP_MASK ((0xffff) << 0)
/* DSIM_MSYNC */
-#define DSIM_MAIN_VSA(x) ((x) << 22)
+#define DSIM_MAIN_VSA(x, offset) ((x) << offset)
#define DSIM_MAIN_HSA(x) ((x) << 0)
-#define DSIM_MAIN_VSA_MASK ((0x3ff) << 22)
+#define DSIM_MAIN_VSA_MASK(offset) ((0x3ff) << offset)
#define DSIM_MAIN_HSA_MASK ((0xffff) << 0)
/* DSIM_SDRESOL */
@@ -157,6 +151,11 @@
#define DSIM_INT_RX_ECC_ERR BIT(15)
#define DSIM_INT_RX_CRC_ERR BIT(14)
+/* DSIM_SFRCTRL */
+#define DSIM_SFR_CTRL_STAND_BY BIT(4)
+#define DSIM_SFR_CTRL_SHADOW_UPDATE BIT(1)
+#define DSIM_SFR_CTRL_SHADOW_EN BIT(0)
+
/* DSIM_FIFOCTRL */
#define DSIM_RX_DATA_FULL BIT(25)
#define DSIM_RX_DATA_EMPTY BIT(24)
@@ -191,9 +190,7 @@
#define DSIM_PLL_DPDNSWAP_DAT (1 << 24)
#define DSIM_FREQ_BAND(x) ((x) << 24)
#define DSIM_PLL_EN BIT(23)
-#define DSIM_PLL_P(x, offset) ((x) << (offset))
-#define DSIM_PLL_M(x) ((x) << 4)
-#define DSIM_PLL_S(x) ((x) << 1)
+#define DSIM_PLL(x, offset) ((x) << (offset))
/* DSIM_PHYCTRL */
#define DSIM_PHYCTRL_ULPS_EXIT(x) (((x) & 0x1ff) << 0)
@@ -222,25 +219,42 @@
#define DSI_XFER_TIMEOUT_MS 100
#define DSI_RX_FIFO_EMPTY 0x30800002
-#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-
#define PS_TO_CYCLE(ps, hz) DIV64_U64_ROUND_CLOSEST(((ps) * (hz)), 1000000000000ULL)
-static const char *const clk_names[5] = {
- "bus_clk",
- "sclk_mipi",
- "phyclk_mipidphy0_bitclkdiv8",
- "phyclk_mipidphy0_rxclkesc0",
- "sclk_rgb_vclk_to_dsim0"
-};
-
enum samsung_dsim_transfer_type {
EXYNOS_DSI_TX,
EXYNOS_DSI_RX,
};
+static struct clk_bulk_data exynos3_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "pll_clk" },
+};
+
+static struct clk_bulk_data exynos4_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "sclk_mipi" },
+};
+
+static struct clk_bulk_data exynos5433_clk_bulk_data[] = {
+ { .id = "bus_clk" },
+ { .id = "sclk_mipi" },
+ { .id = "phyclk_mipidphy0_bitclkdiv8" },
+ { .id = "phyclk_mipidphy0_rxclkesc0" },
+ { .id = "sclk_rgb_vclk_to_dsim0" },
+};
+
+static struct clk_bulk_data exynos7870_clk_bulk_data[] = {
+ { .id = "bus" },
+ { .id = "pll" },
+ { .id = "byte" },
+ { .id = "esc" },
+};
+
enum reg_idx {
- DSIM_STATUS_REG, /* Status register */
+ DSIM_STATUS_REG, /* Status register (legacy) */
+ DSIM_LINK_STATUS_REG, /* Link status register */
+ DSIM_DPHY_STATUS_REG, /* D-PHY status register */
DSIM_SWRST_REG, /* Software reset register */
DSIM_CLKCTRL_REG, /* Clock control register */
DSIM_TIMEOUT_REG, /* Time out register */
@@ -255,6 +269,7 @@ enum reg_idx {
DSIM_PKTHDR_REG, /* Packet Header FIFO register */
DSIM_PAYLOAD_REG, /* Payload FIFO register */
DSIM_RXFIFO_REG, /* Read FIFO register */
+ DSIM_SFRCTRL_REG, /* SFR standby and shadow control register */
DSIM_FIFOCTRL_REG, /* FIFO status and control register */
DSIM_PLLCTRL_REG, /* PLL control register */
DSIM_PHYCTRL_REG,
@@ -312,6 +327,32 @@ static const unsigned int exynos5433_reg_ofs[] = {
[DSIM_PHYTIMING2_REG] = 0xBC,
};
+static const unsigned int exynos7870_reg_ofs[] = {
+ [DSIM_LINK_STATUS_REG] = 0x04,
+ [DSIM_DPHY_STATUS_REG] = 0x08,
+ [DSIM_SWRST_REG] = 0x0C,
+ [DSIM_CLKCTRL_REG] = 0x10,
+ [DSIM_TIMEOUT_REG] = 0x14,
+ [DSIM_ESCMODE_REG] = 0x1C,
+ [DSIM_MDRESOL_REG] = 0x20,
+ [DSIM_MVPORCH_REG] = 0x24,
+ [DSIM_MHPORCH_REG] = 0x28,
+ [DSIM_MSYNC_REG] = 0x2C,
+ [DSIM_CONFIG_REG] = 0x30,
+ [DSIM_INTSRC_REG] = 0x34,
+ [DSIM_INTMSK_REG] = 0x38,
+ [DSIM_PKTHDR_REG] = 0x3C,
+ [DSIM_PAYLOAD_REG] = 0x40,
+ [DSIM_RXFIFO_REG] = 0x44,
+ [DSIM_SFRCTRL_REG] = 0x48,
+ [DSIM_FIFOCTRL_REG] = 0x4C,
+ [DSIM_PLLCTRL_REG] = 0x94,
+ [DSIM_PHYCTRL_REG] = 0xA4,
+ [DSIM_PHYTIMING_REG] = 0xB4,
+ [DSIM_PHYTIMING1_REG] = 0xB8,
+ [DSIM_PHYTIMING2_REG] = 0xBC,
+};
+
enum reg_value_idx {
RESET_TYPE,
PLL_TIMER,
@@ -384,6 +425,24 @@ static const unsigned int exynos5433_reg_values[] = {
[PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
};
+static const unsigned int exynos7870_reg_values[] = {
+ [RESET_TYPE] = DSIM_SWRST,
+ [PLL_TIMER] = 80000,
+ [STOP_STATE_CNT] = 0xa,
+ [PHYCTRL_ULPS_EXIT] = DSIM_PHYCTRL_ULPS_EXIT(0x177),
+ [PHYCTRL_VREG_LP] = 0,
+ [PHYCTRL_SLEW_UP] = 0,
+ [PHYTIMING_LPX] = DSIM_PHYTIMING_LPX(0x07),
+ [PHYTIMING_HS_EXIT] = DSIM_PHYTIMING_HS_EXIT(0x0c),
+ [PHYTIMING_CLK_PREPARE] = DSIM_PHYTIMING1_CLK_PREPARE(0x08),
+ [PHYTIMING_CLK_ZERO] = DSIM_PHYTIMING1_CLK_ZERO(0x2b),
+ [PHYTIMING_CLK_POST] = DSIM_PHYTIMING1_CLK_POST(0x0d),
+ [PHYTIMING_CLK_TRAIL] = DSIM_PHYTIMING1_CLK_TRAIL(0x09),
+ [PHYTIMING_HS_PREPARE] = DSIM_PHYTIMING2_HS_PREPARE(0x09),
+ [PHYTIMING_HS_ZERO] = DSIM_PHYTIMING2_HS_ZERO(0x0f),
+ [PHYTIMING_HS_TRAIL] = DSIM_PHYTIMING2_HS_TRAIL(0x0c),
+};
+
static const unsigned int imx8mm_dsim_reg_values[] = {
[RESET_TYPE] = DSIM_SWRST,
[PLL_TIMER] = 500,
@@ -405,13 +464,26 @@ static const unsigned int imx8mm_dsim_reg_values[] = {
static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
+ .has_legacy_status_reg = 1,
.has_freqband = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -424,13 +496,26 @@ static const struct samsung_dsim_driver_data exynos3_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x50,
+ .has_legacy_status_reg = 1,
.has_freqband = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos4_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos4_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -443,11 +528,24 @@ static const struct samsung_dsim_driver_data exynos4_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
.reg_ofs = exynos_reg_ofs,
.plltmr_reg = 0x58,
- .num_clks = 2,
+ .has_legacy_status_reg = 1,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1000,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 11,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -459,12 +557,25 @@ static const struct samsung_dsim_driver_data exynos5_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 5,
+ .clk_data = exynos5433_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos5433_clk_bulk_data),
.max_freq = 1500,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 0,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = exynos5433_reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -476,12 +587,25 @@ static const struct samsung_dsim_driver_data exynos5433_dsi_driver_data = {
static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos3_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos3_clk_bulk_data),
.max_freq = 1500,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 1,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
.pll_p_offset = 13,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = exynos5422_reg_values,
.pll_fin_min = 6,
.pll_fin_max = 12,
@@ -490,19 +614,62 @@ static const struct samsung_dsim_driver_data exynos5422_dsi_driver_data = {
.min_freq = 500,
};
+static const struct samsung_dsim_driver_data exynos7870_dsi_driver_data = {
+ .reg_ofs = exynos7870_reg_ofs,
+ .plltmr_reg = 0xa0,
+ .has_clklane_stop = 1,
+ .has_sfrctrl = 1,
+ .clk_data = exynos7870_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos7870_clk_bulk_data),
+ .max_freq = 1500,
+ .wait_for_hdr_fifo = 0,
+ .wait_for_reset = 1,
+ .num_bits_resol = 12,
+ .video_mode_bit = 18,
+ .pll_stable_bit = 24,
+ .esc_clken_bit = 16,
+ .byte_clken_bit = 17,
+ .tx_req_hsclk_bit = 20,
+ .lane_esc_clk_bit = 8,
+ .lane_esc_data_offset = 9,
+ .pll_p_offset = 13,
+ .pll_m_offset = 3,
+ .pll_s_offset = 0,
+ .main_vsa_offset = 16,
+ .reg_values = exynos7870_reg_values,
+ .pll_fin_min = 6,
+ .pll_fin_max = 12,
+ .m_min = 41,
+ .m_max = 125,
+ .min_freq = 500,
+};
+
static const struct samsung_dsim_driver_data imx8mm_dsi_driver_data = {
.reg_ofs = exynos5433_reg_ofs,
.plltmr_reg = 0xa0,
+ .has_legacy_status_reg = 1,
.has_clklane_stop = 1,
- .num_clks = 2,
+ .clk_data = exynos4_clk_bulk_data,
+ .num_clks = ARRAY_SIZE(exynos4_clk_bulk_data),
.max_freq = 2100,
+ .wait_for_hdr_fifo = 1,
.wait_for_reset = 0,
.num_bits_resol = 12,
+ .video_mode_bit = 25,
+ .pll_stable_bit = 31,
+ .esc_clken_bit = 28,
+ .byte_clken_bit = 24,
+ .tx_req_hsclk_bit = 31,
+ .lane_esc_clk_bit = 19,
+ .lane_esc_data_offset = 20,
/*
* Unlike Exynos, PLL_P(PMS_P) offset 14 is used in i.MX8M Mini/Nano/Plus
* downstream driver - drivers/gpu/drm/bridge/sec-dsim.c
*/
.pll_p_offset = 14,
+ .pll_m_offset = 4,
+ .pll_s_offset = 1,
+ .main_vsa_offset = 22,
.reg_values = imx8mm_dsim_reg_values,
.pll_fin_min = 2,
.pll_fin_max = 30,
@@ -518,6 +685,7 @@ samsung_dsim_types[DSIM_TYPE_COUNT] = {
[DSIM_TYPE_EXYNOS5410] = &exynos5_dsi_driver_data,
[DSIM_TYPE_EXYNOS5422] = &exynos5422_dsi_driver_data,
[DSIM_TYPE_EXYNOS5433] = &exynos5433_dsi_driver_data,
+ [DSIM_TYPE_EXYNOS7870] = &exynos7870_dsi_driver_data,
[DSIM_TYPE_IMX8MM] = &imx8mm_dsi_driver_data,
[DSIM_TYPE_IMX8MP] = &imx8mm_dsi_driver_data,
};
@@ -653,8 +821,9 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
writel(driver_data->reg_values[PLL_TIMER],
dsi->reg_base + driver_data->plltmr_reg);
- reg = DSIM_PLL_EN | DSIM_PLL_P(p, driver_data->pll_p_offset) |
- DSIM_PLL_M(m) | DSIM_PLL_S(s);
+ reg = DSIM_PLL_EN | DSIM_PLL(p, driver_data->pll_p_offset)
+ | DSIM_PLL(m, driver_data->pll_m_offset)
+ | DSIM_PLL(s, driver_data->pll_s_offset);
if (driver_data->has_freqband) {
static const unsigned long freq_bands[] = {
@@ -682,14 +851,17 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
samsung_dsim_write(dsi, DSIM_PLLCTRL_REG, reg);
- timeout = 1000;
+ timeout = 3000;
do {
if (timeout-- == 0) {
dev_err(dsi->dev, "PLL failed to stabilize\n");
return 0;
}
- reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
- } while ((reg & DSIM_PLL_STABLE) == 0);
+ if (driver_data->has_legacy_status_reg)
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ else
+ reg = samsung_dsim_read(dsi, DSIM_LINK_STATUS_REG);
+ } while ((reg & BIT(driver_data->pll_stable_bit)) == 0);
dsi->hs_clock = fout;
@@ -698,6 +870,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi,
static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
unsigned long hs_clk, byte_clk, esc_clk, pix_clk;
unsigned long esc_div;
u32 reg;
@@ -731,15 +904,17 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi)
hs_clk, byte_clk, esc_clk);
reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_ESC_PRESCALER_MASK | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA_MASK | DSIM_PLL_BYPASS
- | DSIM_BYTE_CLK_SRC_MASK);
- reg |= DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN
- | DSIM_ESC_PRESCALER(esc_div)
- | DSIM_LANE_ESC_CLK_EN_CLK
- | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1)
- | DSIM_BYTE_CLK_SRC(0)
- | DSIM_TX_REQUEST_HSCLK;
+ reg &= ~(DSIM_ESC_PRESCALER_MASK | BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK(driver_data->lane_esc_data_offset)
+ | DSIM_PLL_BYPASS
+ | DSIM_BYTE_CLK_SRC_MASK);
+ reg |= BIT(driver_data->esc_clken_bit) | BIT(driver_data->byte_clken_bit)
+ | DSIM_ESC_PRESCALER(esc_div)
+ | BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA(BIT(dsi->lanes) - 1,
+ driver_data->lane_esc_data_offset)
+ | DSIM_BYTE_CLK_SRC(0)
+ | BIT(driver_data->tx_req_hsclk_bit);
samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
return 0;
@@ -843,11 +1018,14 @@ static void samsung_dsim_set_phy_ctrl(struct samsung_dsim *dsi)
static void samsung_dsim_disable_clock(struct samsung_dsim *dsi)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
u32 reg;
reg = samsung_dsim_read(dsi, DSIM_CLKCTRL_REG);
- reg &= ~(DSIM_LANE_ESC_CLK_EN_CLK | DSIM_LANE_ESC_CLK_EN_DATA_MASK
- | DSIM_ESC_CLKEN | DSIM_BYTE_CLKEN);
+ reg &= ~(BIT(driver_data->lane_esc_clk_bit)
+ | DSIM_LANE_ESC_CLK_EN_DATA_MASK(driver_data->lane_esc_data_offset)
+ | BIT(driver_data->esc_clken_bit)
+ | BIT(driver_data->byte_clken_bit));
samsung_dsim_write(dsi, DSIM_CLKCTRL_REG, reg);
reg = samsung_dsim_read(dsi, DSIM_PLLCTRL_REG);
@@ -891,7 +1069,7 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
* mode, otherwise it will support command mode.
*/
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- reg |= DSIM_VIDEO_MODE;
+ reg |= BIT(driver_data->video_mode_bit);
/*
* The user manual describes that following bits are ignored in
@@ -962,7 +1140,10 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
return -EFAULT;
}
- reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ if (driver_data->has_legacy_status_reg)
+ reg = samsung_dsim_read(dsi, DSIM_STATUS_REG);
+ else
+ reg = samsung_dsim_read(dsi, DSIM_DPHY_STATUS_REG);
if ((reg & DSIM_STOP_STATE_DAT(lanes_mask))
!= DSIM_STOP_STATE_DAT(lanes_mask))
continue;
@@ -983,6 +1164,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
{
struct drm_display_mode *m = &dsi->mode;
unsigned int num_bits_resol = dsi->driver_data->num_bits_resol;
+ unsigned int main_vsa_offset = dsi->driver_data->main_vsa_offset;
u32 reg;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
@@ -1009,7 +1191,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
reg = DSIM_MAIN_HFP(hfp) | DSIM_MAIN_HBP(hbp);
samsung_dsim_write(dsi, DSIM_MHPORCH_REG, reg);
- reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start)
+ reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start, main_vsa_offset)
| DSIM_MAIN_HSA(hsa);
samsung_dsim_write(dsi, DSIM_MSYNC_REG, reg);
}
@@ -1023,6 +1205,7 @@ static void samsung_dsim_set_display_mode(struct samsung_dsim *dsi)
static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enable)
{
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
u32 reg;
reg = samsung_dsim_read(dsi, DSIM_MDRESOL_REG);
@@ -1031,6 +1214,15 @@ static void samsung_dsim_set_display_enable(struct samsung_dsim *dsi, bool enabl
else
reg &= ~DSIM_MAIN_STAND_BY;
samsung_dsim_write(dsi, DSIM_MDRESOL_REG, reg);
+
+ if (driver_data->has_sfrctrl) {
+ reg = samsung_dsim_read(dsi, DSIM_SFRCTRL_REG);
+ if (enable)
+ reg |= DSIM_SFR_CTRL_STAND_BY;
+ else
+ reg &= ~DSIM_SFR_CTRL_STAND_BY;
+ samsung_dsim_write(dsi, DSIM_SFRCTRL_REG, reg);
+ }
}
static int samsung_dsim_wait_for_hdr_fifo(struct samsung_dsim *dsi)
@@ -1087,6 +1279,7 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
{
struct device *dev = dsi->dev;
struct mipi_dsi_packet *pkt = &xfer->packet;
+ const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
const u8 *payload = pkt->payload + xfer->tx_done;
u16 length = pkt->payload_length - xfer->tx_done;
bool first = !xfer->tx_done;
@@ -1127,9 +1320,11 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi,
return;
reg = get_unaligned_le32(pkt->header);
- if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
- dev_err(dev, "waiting for header FIFO timed out\n");
- return;
+ if (driver_data->wait_for_hdr_fifo) {
+ if (samsung_dsim_wait_for_hdr_fifo(dsi)) {
+ dev_err(dev, "waiting for header FIFO timed out\n");
+ return;
+ }
}
if (NEQV(xfer->flags & MIPI_DSI_MSG_USE_LPM,
@@ -1922,7 +2117,7 @@ int samsung_dsim_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct samsung_dsim *dsi;
- int ret, i;
+ int ret;
dsi = devm_drm_bridge_alloc(dev, struct samsung_dsim, bridge, &samsung_dsim_bridge_funcs);
if (IS_ERR(dsi))
@@ -1946,23 +2141,11 @@ int samsung_dsim_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- dsi->clks = devm_kcalloc(dev, dsi->driver_data->num_clks,
- sizeof(*dsi->clks), GFP_KERNEL);
- if (!dsi->clks)
- return -ENOMEM;
-
- for (i = 0; i < dsi->driver_data->num_clks; i++) {
- dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
- if (IS_ERR(dsi->clks[i])) {
- if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- dsi->clks[i] = devm_clk_get(dev, OLD_SCLK_MIPI_CLK_NAME);
- if (!IS_ERR(dsi->clks[i]))
- continue;
- }
-
- dev_info(dev, "failed to get the clock: %s\n", clk_names[i]);
- return PTR_ERR(dsi->clks[i]);
- }
+ ret = devm_clk_bulk_get(dev, dsi->driver_data->num_clks,
+ dsi->driver_data->clk_data);
+ if (ret) {
+ dev_err(dev, "failed to get clocks in bulk (%d)\n", ret);
+ return ret;
}
dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
@@ -2035,7 +2218,7 @@ static int samsung_dsim_suspend(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
- int ret, i;
+ int ret;
usleep_range(10000, 20000);
@@ -2051,8 +2234,7 @@ static int samsung_dsim_suspend(struct device *dev)
phy_power_off(dsi->phy);
- for (i = driver_data->num_clks - 1; i > -1; i--)
- clk_disable_unprepare(dsi->clks[i]);
+ clk_bulk_disable_unprepare(driver_data->num_clks, driver_data->clk_data);
ret = regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
if (ret < 0)
@@ -2065,7 +2247,7 @@ static int samsung_dsim_resume(struct device *dev)
{
struct samsung_dsim *dsi = dev_get_drvdata(dev);
const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
- int ret, i;
+ int ret;
ret = regulator_bulk_enable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
if (ret < 0) {
@@ -2073,11 +2255,9 @@ static int samsung_dsim_resume(struct device *dev)
return ret;
}
- for (i = 0; i < driver_data->num_clks; i++) {
- ret = clk_prepare_enable(dsi->clks[i]);
- if (ret < 0)
- goto err_clk;
- }
+ ret = clk_bulk_prepare_enable(driver_data->num_clks, driver_data->clk_data);
+ if (ret < 0)
+ goto err_clk;
ret = phy_power_on(dsi->phy);
if (ret < 0) {
@@ -2088,8 +2268,7 @@ static int samsung_dsim_resume(struct device *dev)
return 0;
err_clk:
- while (--i > -1)
- clk_disable_unprepare(dsi->clks[i]);
+ clk_bulk_disable_unprepare(driver_data->num_clks, driver_data->clk_data);
regulator_bulk_disable(ARRAY_SIZE(dsi->supplies), dsi->supplies);
return ret;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index d537b1d036fb..1f0aba28ad1e 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -179,7 +179,6 @@ struct sii902x {
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
- bool sink_is_hdmi;
u32 bus_width;
/*
@@ -315,8 +314,6 @@ static int sii902x_get_modes(struct drm_connector *connector)
drm_edid_free(drm_edid);
}
- sii902x->sink_is_hdmi = connector->display_info.is_hdmi;
-
return num;
}
@@ -342,9 +339,17 @@ static void sii902x_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
+ struct drm_connector *connector;
+ u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (connector && connector->display_info.is_hdmi)
+ output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
mutex_lock(&sii902x->mutex);
+ regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
+ SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL,
SII902X_AVI_POWER_STATE_MSK,
SII902X_AVI_POWER_STATE_D(0));
@@ -359,16 +364,12 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *adj)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
- u8 output_mode = SII902X_SYS_CTRL_OUTPUT_DVI;
struct regmap *regmap = sii902x->regmap;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
u16 pixel_clock_10kHz = adj->clock / 10;
int ret;
- if (sii902x->sink_is_hdmi)
- output_mode = SII902X_SYS_CTRL_OUTPUT_HDMI;
-
buf[0] = pixel_clock_10kHz & 0xff;
buf[1] = pixel_clock_10kHz >> 8;
buf[2] = drm_mode_vrefresh(adj);
@@ -384,11 +385,6 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
mutex_lock(&sii902x->mutex);
- ret = regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
- SII902X_SYS_CTRL_OUTPUT_MODE, output_mode);
- if (ret)
- goto out;
-
ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index 3d15ddd39470..2cd1847ba776 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -262,6 +262,26 @@ static const struct of_device_id simple_bridge_match[] = {
.connector_type = DRM_MODE_CONNECTOR_VGA,
},
}, {
+ .compatible = "asl-tek,cs5263",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
+ .compatible = "parade,ps185hdm",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
+ .compatible = "radxa,ra620",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
+ .compatible = "realtek,rtd2171",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIA,
+ },
+ }, {
.compatible = "ti,opa362",
.data = &(const struct simple_bridge_info) {
.connector_type = DRM_MODE_CONNECTOR_Composite,
diff --git a/drivers/gpu/drm/bridge/ssd2825.c b/drivers/gpu/drm/bridge/ssd2825.c
new file mode 100644
index 000000000000..f2fdbf7c117d
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ssd2825.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+
+#define SSD2825_DEVICE_ID_REG 0xb0
+#define SSD2825_RGB_INTERFACE_CTRL_REG_1 0xb1
+#define SSD2825_RGB_INTERFACE_CTRL_REG_2 0xb2
+#define SSD2825_RGB_INTERFACE_CTRL_REG_3 0xb3
+#define SSD2825_RGB_INTERFACE_CTRL_REG_4 0xb4
+#define SSD2825_RGB_INTERFACE_CTRL_REG_5 0xb5
+#define SSD2825_RGB_INTERFACE_CTRL_REG_6 0xb6
+#define SSD2825_NON_BURST_EV BIT(2)
+#define SSD2825_BURST BIT(3)
+#define SSD2825_PCKL_HIGH BIT(13)
+#define SSD2825_HSYNC_HIGH BIT(14)
+#define SSD2825_VSYNC_HIGH BIT(15)
+#define SSD2825_CONFIGURATION_REG 0xb7
+#define SSD2825_CONF_REG_HS BIT(0)
+#define SSD2825_CONF_REG_CKE BIT(1)
+#define SSD2825_CONF_REG_SLP BIT(2)
+#define SSD2825_CONF_REG_VEN BIT(3)
+#define SSD2825_CONF_REG_HCLK BIT(4)
+#define SSD2825_CONF_REG_CSS BIT(5)
+#define SSD2825_CONF_REG_DCS BIT(6)
+#define SSD2825_CONF_REG_REN BIT(7)
+#define SSD2825_CONF_REG_ECD BIT(8)
+#define SSD2825_CONF_REG_EOT BIT(9)
+#define SSD2825_CONF_REG_LPE BIT(10)
+#define SSD2825_VC_CTRL_REG 0xb8
+#define SSD2825_PLL_CTRL_REG 0xb9
+#define SSD2825_PLL_CONFIGURATION_REG 0xba
+#define SSD2825_CLOCK_CTRL_REG 0xbb
+#define SSD2825_PACKET_SIZE_CTRL_REG_1 0xbc
+#define SSD2825_PACKET_SIZE_CTRL_REG_2 0xbd
+#define SSD2825_PACKET_SIZE_CTRL_REG_3 0xbe
+#define SSD2825_PACKET_DROP_REG 0xbf
+#define SSD2825_OPERATION_CTRL_REG 0xc0
+#define SSD2825_MAX_RETURN_SIZE_REG 0xc1
+#define SSD2825_RETURN_DATA_COUNT_REG 0xc2
+#define SSD2825_ACK_RESPONSE_REG 0xc3
+#define SSD2825_LINE_CTRL_REG 0xc4
+#define SSD2825_INTERRUPT_CTRL_REG 0xc5
+#define SSD2825_INTERRUPT_STATUS_REG 0xc6
+#define SSD2825_ERROR_STATUS_REG 0xc7
+#define SSD2825_DATA_FORMAT_REG 0xc8
+#define SSD2825_DELAY_ADJ_REG_1 0xc9
+#define SSD2825_DELAY_ADJ_REG_2 0xca
+#define SSD2825_DELAY_ADJ_REG_3 0xcb
+#define SSD2825_DELAY_ADJ_REG_4 0xcc
+#define SSD2825_DELAY_ADJ_REG_5 0xcd
+#define SSD2825_DELAY_ADJ_REG_6 0xce
+#define SSD2825_HS_TX_TIMER_REG_1 0xcf
+#define SSD2825_HS_TX_TIMER_REG_2 0xd0
+#define SSD2825_LP_RX_TIMER_REG_1 0xd1
+#define SSD2825_LP_RX_TIMER_REG_2 0xd2
+#define SSD2825_TE_STATUS_REG 0xd3
+#define SSD2825_SPI_READ_REG 0xd4
+#define SSD2825_SPI_READ_REG_RESET 0xfa
+#define SSD2825_PLL_LOCK_REG 0xd5
+#define SSD2825_TEST_REG 0xd6
+#define SSD2825_TE_COUNT_REG 0xd7
+#define SSD2825_ANALOG_CTRL_REG_1 0xd8
+#define SSD2825_ANALOG_CTRL_REG_2 0xd9
+#define SSD2825_ANALOG_CTRL_REG_3 0xda
+#define SSD2825_ANALOG_CTRL_REG_4 0xdb
+#define SSD2825_INTERRUPT_OUT_CTRL_REG 0xdc
+#define SSD2825_RGB_INTERFACE_CTRL_REG_7 0xdd
+#define SSD2825_LANE_CONFIGURATION_REG 0xde
+#define SSD2825_DELAY_ADJ_REG_7 0xdf
+#define SSD2825_INPUT_PIN_CTRL_REG_1 0xe0
+#define SSD2825_INPUT_PIN_CTRL_REG_2 0xe1
+#define SSD2825_BIDIR_PIN_CTRL_REG_1 0xe2
+#define SSD2825_BIDIR_PIN_CTRL_REG_2 0xe3
+#define SSD2825_BIDIR_PIN_CTRL_REG_3 0xe4
+#define SSD2825_BIDIR_PIN_CTRL_REG_4 0xe5
+#define SSD2825_BIDIR_PIN_CTRL_REG_5 0xe6
+#define SSD2825_BIDIR_PIN_CTRL_REG_6 0xe7
+#define SSD2825_BIDIR_PIN_CTRL_REG_7 0xe8
+#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_1 0xe9
+#define SSD2825_CABC_BRIGHTNESS_CTRL_REG_2 0xea
+#define SSD2825_CABC_BRIGHTNESS_STATUS_REG 0xeb
+#define SSD2825_READ_REG 0xff
+
+#define SSD2825_COM_BYTE 0x00
+#define SSD2825_DAT_BYTE 0x01
+
+#define SSD2828_LP_CLOCK_DIVIDER(n) (((n) - 1) & 0x3f)
+#define SSD2825_LP_MIN_CLK 5000 /* KHz */
+#define SSD2825_REF_MIN_CLK 2000 /* KHz */
+
+static const struct regulator_bulk_data ssd2825_supplies[] = {
+ { .supply = "dvdd" },
+ { .supply = "avdd" },
+ { .supply = "vddio" },
+};
+
+struct ssd2825_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+};
+
+struct ssd2825_priv {
+ struct spi_device *spi;
+ struct device *dev;
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+
+ struct clk *tx_clk;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct ssd2825_dsi_output output;
+
+ struct mutex mlock; /* for host transfer operations */
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
+
+ /* Parameters for PLL programming */
+ u32 pll_freq_kbps; /* PLL in kbps */
+ u32 nibble_freq_khz; /* PLL div by 4 */
+
+ u32 hzd; /* HS Zero Delay in ns*/
+ u32 hpd; /* HS Prepare Delay is ns */
+};
+
+static inline struct ssd2825_priv *dsi_host_to_ssd2825(struct mipi_dsi_host *host)
+{
+ return container_of(host, struct ssd2825_priv, dsi_host);
+}
+
+static inline struct ssd2825_priv *bridge_to_ssd2825(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ssd2825_priv, bridge);
+}
+
+static int ssd2825_write_raw(struct ssd2825_priv *priv, u8 high_byte, u8 low_byte)
+{
+ struct spi_device *spi = priv->spi;
+ u8 tx_buf[2];
+
+ /*
+ * Low byte is the value, high byte defines type of
+ * write cycle, 0 for command and 1 for data.
+ */
+ tx_buf[0] = low_byte;
+ tx_buf[1] = high_byte;
+
+ return spi_write(spi, tx_buf, 2);
+}
+
+static int ssd2825_write_reg(struct ssd2825_priv *priv, u8 reg, u16 command)
+{
+ u8 datal = (command & 0x00FF);
+ u8 datah = (command & 0xFF00) >> 8;
+ int ret;
+
+ /* Command write cycle */
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg);
+ if (ret)
+ return ret;
+
+ /* Data write cycle bits 7-0 */
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datal);
+ if (ret)
+ return ret;
+
+ /* Data write cycle bits 15-8 */
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, datah);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ssd2825_write_dsi(struct ssd2825_priv *priv, const u8 *command, int len)
+{
+ int ret, i;
+
+ ret = ssd2825_write_reg(priv, SSD2825_PACKET_SIZE_CTRL_REG_1, len);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, SSD2825_PACKET_DROP_REG);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; i++) {
+ ret = ssd2825_write_raw(priv, SSD2825_DAT_BYTE, command[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ssd2825_read_raw(struct ssd2825_priv *priv, u8 cmd, u16 *data)
+{
+ struct spi_device *spi = priv->spi;
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ u8 tx_buf[2];
+ u8 rx_buf[2];
+ int ret;
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ tx_buf[1] = (cmd & 0xFF00) >> 8;
+ tx_buf[0] = (cmd & 0x00FF);
+
+ xfer[0].tx_buf = tx_buf;
+ xfer[0].bits_per_word = 9;
+ xfer[0].len = 2;
+
+ xfer[1].rx_buf = rx_buf;
+ xfer[1].bits_per_word = 16;
+ xfer[1].len = 2;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+ spi_message_add_tail(&xfer[1], &msg);
+
+ ret = spi_sync(spi, &msg);
+ if (ret) {
+ dev_err(&spi->dev, "ssd2825 read raw failed %d\n", ret);
+ return ret;
+ }
+
+ *data = rx_buf[1] | (rx_buf[0] << 8);
+
+ return 0;
+}
+
+static int ssd2825_read_reg(struct ssd2825_priv *priv, u8 reg, u16 *data)
+{
+ int ret;
+
+ /* Reset the read register */
+ ret = ssd2825_write_reg(priv, SSD2825_SPI_READ_REG, SSD2825_SPI_READ_REG_RESET);
+ if (ret)
+ return ret;
+
+ /* Push the address to read */
+ ret = ssd2825_write_raw(priv, SSD2825_COM_BYTE, reg);
+ if (ret)
+ return ret;
+
+ /* Perform a reading cycle */
+ ret = ssd2825_read_raw(priv, SSD2825_SPI_READ_REG_RESET, data);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ssd2825_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct device_node *ep;
+ int ret;
+
+ if (dev->lanes > 4) {
+ dev_err(priv->dev, "unsupported number of data lanes(%u)\n", dev->lanes);
+ return -EINVAL;
+ }
+
+ /*
+ * ssd2825 supports both Video and Pulse mode, but the driver only
+ * implements Video (event) mode currently
+ */
+ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel, &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ priv->output.dev = dev;
+ priv->output.bridge = bridge;
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+ ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+ if (ep) {
+ ret = of_property_read_u32(ep, "bus-width", &priv->pd_lines);
+ of_node_put(ep);
+ }
+
+ if (ret)
+ priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ drm_bridge_add(&priv->bridge);
+
+ return 0;
+}
+
+static int ssd2825_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *dev)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+
+ drm_bridge_remove(&priv->bridge);
+ if (priv->output.panel)
+ drm_panel_bridge_remove(priv->output.bridge);
+
+ return 0;
+}
+
+static ssize_t ssd2825_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct ssd2825_priv *priv = dsi_host_to_ssd2825(host);
+ u16 config;
+ int ret;
+
+ if (msg->rx_len) {
+ dev_warn(priv->dev, "MIPI rx is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ guard(mutex)(&priv->mlock);
+
+ ret = ssd2825_read_reg(priv, SSD2825_CONFIGURATION_REG, &config);
+ if (ret)
+ return ret;
+
+ switch (msg->type) {
+ case MIPI_DSI_DCS_SHORT_WRITE:
+ case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
+ case MIPI_DSI_DCS_LONG_WRITE:
+ config |= SSD2825_CONF_REG_DCS;
+ break;
+ case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
+ case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
+ case MIPI_DSI_GENERIC_LONG_WRITE:
+ config &= ~SSD2825_CONF_REG_DCS;
+ break;
+ case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
+ case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
+ default:
+ return 0;
+ }
+
+ ret = ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000);
+ if (ret)
+ return ret;
+
+ ret = ssd2825_write_dsi(priv, msg->tx_buf, msg->tx_len);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct mipi_dsi_host_ops ssd2825_dsi_host_ops = {
+ .attach = ssd2825_dsi_host_attach,
+ .detach = ssd2825_dsi_host_detach,
+ .transfer = ssd2825_dsi_host_transfer,
+};
+
+static void ssd2825_hw_reset(struct ssd2825_priv *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(5000, 6000);
+}
+
+/*
+ * PLL configuration register settings.
+ *
+ * See the "PLL Configuration Register Description" in the SSD2825 datasheet.
+ */
+static u16 construct_pll_config(struct ssd2825_priv *priv,
+ u32 desired_pll_freq_kbps, u32 reference_freq_khz)
+{
+ u32 div_factor = 1, mul_factor, fr = 0;
+
+ while (reference_freq_khz / (div_factor + 1) >= SSD2825_REF_MIN_CLK)
+ div_factor++;
+ if (div_factor > 31)
+ div_factor = 31;
+
+ mul_factor = DIV_ROUND_UP(desired_pll_freq_kbps * div_factor,
+ reference_freq_khz);
+
+ priv->pll_freq_kbps = reference_freq_khz * mul_factor / div_factor;
+ priv->nibble_freq_khz = priv->pll_freq_kbps / 4;
+
+ if (priv->pll_freq_kbps >= 501000)
+ fr = 3;
+ else if (priv->pll_freq_kbps >= 251000)
+ fr = 2;
+ else if (priv->pll_freq_kbps >= 126000)
+ fr = 1;
+
+ return (fr << 14) | (div_factor << 8) | mul_factor;
+}
+
+static int ssd2825_setup_pll(struct ssd2825_priv *priv,
+ const struct drm_display_mode *mode)
+{
+ u16 pll_config, lp_div;
+ u32 nibble_delay, pclk_mult, tx_freq_khz;
+ u8 hzd, hpd;
+
+ tx_freq_khz = clk_get_rate(priv->tx_clk) / KILO;
+ if (!tx_freq_khz)
+ tx_freq_khz = SSD2825_REF_MIN_CLK;
+
+ pclk_mult = priv->pd_lines / priv->dsi_lanes + 1;
+ pll_config = construct_pll_config(priv, pclk_mult * mode->clock,
+ tx_freq_khz);
+
+ lp_div = priv->pll_freq_kbps / (SSD2825_LP_MIN_CLK * 8);
+
+ /* nibble_delay in nanoseconds */
+ nibble_delay = MICRO / priv->nibble_freq_khz;
+
+ hzd = priv->hzd / nibble_delay;
+ hpd = (priv->hpd - 4 * nibble_delay) / nibble_delay;
+
+ /* Disable PLL */
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0000);
+ ssd2825_write_reg(priv, SSD2825_LINE_CTRL_REG, 0x0001);
+
+ /* Set delays */
+ ssd2825_write_reg(priv, SSD2825_DELAY_ADJ_REG_1, (hzd << 8) | hpd);
+
+ /* Set PLL coefficients */
+ ssd2825_write_reg(priv, SSD2825_PLL_CONFIGURATION_REG, pll_config);
+
+ /* Clock Control Register */
+ ssd2825_write_reg(priv, SSD2825_CLOCK_CTRL_REG,
+ SSD2828_LP_CLOCK_DIVIDER(lp_div));
+
+ /* Enable PLL */
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ return 0;
+}
+
+static void ssd2825_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ const struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ u32 input_bus_flags = bridge->timings->input_bus_flags;
+ u16 flags = 0, config;
+ u8 pixel_format;
+ int ret;
+
+ /* Power Sequence */
+ ret = clk_prepare_enable(priv->tx_clk);
+ if (ret)
+ dev_err(priv->dev, "error enabling tx_clk (%d)\n", ret);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ssd2825_supplies), priv->supplies);
+ if (ret)
+ dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+ usleep_range(1000, 2000);
+
+ ssd2825_hw_reset(priv);
+
+ /* Perform SW reset */
+ ssd2825_write_reg(priv, SSD2825_OPERATION_CTRL_REG, 0x0100);
+
+ /* Set pixel format */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB565:
+ pixel_format = 0x00;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ pixel_format = 0x01;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ pixel_format = 0x02;
+ break;
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ pixel_format = 0x03;
+ break;
+ }
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ mode = &crtc_state->adjusted_mode;
+
+ /* Set panel timings */
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_1,
+ ((mode->vtotal - mode->vsync_end) << 8) |
+ (mode->htotal - mode->hsync_end));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_2,
+ ((mode->vtotal - mode->vsync_start) << 8) |
+ (mode->htotal - mode->hsync_start));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_3,
+ ((mode->vsync_start - mode->vdisplay) << 8) |
+ (mode->hsync_start - mode->hdisplay));
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_4, mode->hdisplay);
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_5, mode->vdisplay);
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ flags |= SSD2825_HSYNC_HIGH;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ flags |= SSD2825_VSYNC_HIGH;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+ flags |= SSD2825_NON_BURST_EV;
+
+ if (input_bus_flags & DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE)
+ flags |= SSD2825_PCKL_HIGH;
+
+ ssd2825_write_reg(priv, SSD2825_RGB_INTERFACE_CTRL_REG_6, flags | pixel_format);
+ ssd2825_write_reg(priv, SSD2825_LANE_CONFIGURATION_REG, dsi_dev->lanes - 1);
+ ssd2825_write_reg(priv, SSD2825_TEST_REG, 0x0004);
+
+ /* Call PLL configuration */
+ ssd2825_setup_pll(priv, mode);
+
+ usleep_range(10000, 11000);
+
+ config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_CKE | SSD2825_CONF_REG_DCS |
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_LPM)
+ config &= ~SSD2825_CONF_REG_HS;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ config &= ~SSD2825_CONF_REG_EOT;
+
+ /* Initial DSI configuration register set */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ if (priv->output.panel)
+ drm_panel_enable(priv->output.panel);
+}
+
+static void ssd2825_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ u16 config;
+
+ config = SSD2825_CONF_REG_HS | SSD2825_CONF_REG_DCS |
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO)
+ config |= SSD2825_CONF_REG_VEN;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
+ config &= ~SSD2825_CONF_REG_EOT;
+
+ /* Complete configuration after DSI commands were sent */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG, config);
+ ssd2825_write_reg(priv, SSD2825_PLL_CTRL_REG, 0x0001);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0x0000);
+}
+
+static void ssd2825_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+ int ret;
+
+ msleep(100);
+
+ /* Exit DSI configuration register set */
+ ssd2825_write_reg(priv, SSD2825_CONFIGURATION_REG,
+ SSD2825_CONF_REG_ECD | SSD2825_CONF_REG_EOT);
+ ssd2825_write_reg(priv, SSD2825_VC_CTRL_REG, 0);
+
+ /* HW disable */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(ssd2825_supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+ clk_disable_unprepare(priv->tx_clk);
+}
+
+static int ssd2825_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ssd2825_priv *priv = bridge_to_ssd2825(bridge);
+
+ return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ flags);
+}
+
+static enum drm_mode_status
+ssd2825_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ if (mode->hdisplay > 1366)
+ return MODE_H_ILLEGAL;
+
+ if (mode->vdisplay > 1366)
+ return MODE_V_ILLEGAL;
+
+ return MODE_OK;
+}
+
+static bool ssd2825_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Default to positive sync */
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+
+ return true;
+}
+
+static const struct drm_bridge_funcs ssd2825_bridge_funcs = {
+ .attach = ssd2825_bridge_attach,
+ .mode_valid = ssd2825_bridge_mode_valid,
+ .mode_fixup = ssd2825_mode_fixup,
+
+ .atomic_pre_enable = ssd2825_bridge_atomic_pre_enable,
+ .atomic_enable = ssd2825_bridge_atomic_enable,
+ .atomic_disable = ssd2825_bridge_atomic_disable,
+
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+};
+
+static const struct drm_bridge_timings default_ssd2825_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static int ssd2825_probe(struct spi_device *spi)
+{
+ struct ssd2825_priv *priv;
+ struct device *dev = &spi->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /* Driver supports only 8 bit 3 Wire mode */
+ spi->bits_per_word = 9;
+
+ ret = spi_setup(spi);
+ if (ret)
+ return ret;
+
+ priv = devm_drm_bridge_alloc(dev, struct ssd2825_priv, bridge, &ssd2825_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ spi_set_drvdata(spi, priv);
+
+ priv->spi = spi;
+ priv->dev = dev;
+
+ mutex_init(&priv->mlock);
+
+ priv->tx_clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->tx_clk))
+ return dev_err_probe(dev, PTR_ERR(priv->tx_clk),
+ "can't retrieve bridge tx_clk\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(ssd2825_supplies),
+ ssd2825_supplies, &priv->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ priv->hzd = 133; /* ns */
+ device_property_read_u32(dev, "solomon,hs-zero-delay-ns", &priv->hzd);
+
+ priv->hpd = 40; /* ns */
+ device_property_read_u32(dev, "solomon,hs-prep-delay-ns", &priv->hpd);
+
+ priv->dsi_host.dev = dev;
+ priv->dsi_host.ops = &ssd2825_dsi_host_ops;
+
+ priv->bridge.timings = &default_ssd2825_timings;
+ priv->bridge.of_node = np;
+
+ return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static void ssd2825_remove(struct spi_device *spi)
+{
+ struct ssd2825_priv *priv = spi_get_drvdata(spi);
+
+ mipi_dsi_host_unregister(&priv->dsi_host);
+}
+
+static const struct of_device_id ssd2825_of_match[] = {
+ { .compatible = "solomon,ssd2825" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ssd2825_of_match);
+
+static struct spi_driver ssd2825_driver = {
+ .driver = {
+ .name = "ssd2825",
+ .of_match_table = ssd2825_of_match,
+ },
+ .probe = ssd2825_probe,
+ .remove = ssd2825_remove,
+};
+module_spi_driver(ssd2825_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Solomon SSD2825 RGB to MIPI-DSI bridge driver SPI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig
index f3ab2f985f8c..a46df7583bcf 100644
--- a/drivers/gpu/drm/bridge/synopsys/Kconfig
+++ b/drivers/gpu/drm/bridge/synopsys/Kconfig
@@ -1,4 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_DW_DP
+ tristate
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_KMS_HELPER
+ select REGMAP_MMIO
+
config DRM_DW_HDMI
tristate
select DRM_DISPLAY_HDMI_HELPER
@@ -54,6 +61,14 @@ config DRM_DW_HDMI_QP
select DRM_KMS_HELPER
select REGMAP_MMIO
+config DRM_DW_HDMI_QP_CEC
+ bool "Synopsis Designware QP CEC interface"
+ depends on DRM_DW_HDMI_QP
+ select DRM_DISPLAY_HDMI_CEC_HELPER
+ help
+ Support the CEC interface which is part of the Synopsys
+ Designware HDMI QP block.
+
config DRM_DW_MIPI_DSI
tristate
select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 9dc376d220ad..4dada44029ac 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_DW_DP) += dw-dp.o
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
new file mode 100644
index 000000000000..82aaf74e1bc0
--- /dev/null
+++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c
@@ -0,0 +1,2097 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare Cores DisplayPort Transmitter Controller
+ *
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/media-bus-format.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/phy/phy.h>
+#include <linux/unaligned.h>
+
+#include <drm/bridge/dw_dp.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define DW_DP_VERSION_NUMBER 0x0000
+#define DW_DP_VERSION_TYPE 0x0004
+#define DW_DP_ID 0x0008
+
+#define DW_DP_CONFIG_REG1 0x0100
+#define DW_DP_CONFIG_REG2 0x0104
+#define DW_DP_CONFIG_REG3 0x0108
+
+#define DW_DP_CCTL 0x0200
+#define FORCE_HPD BIT(4)
+#define DEFAULT_FAST_LINK_TRAIN_EN BIT(2)
+#define ENHANCE_FRAMING_EN BIT(1)
+#define SCRAMBLE_DIS BIT(0)
+#define DW_DP_SOFT_RESET_CTRL 0x0204
+#define VIDEO_RESET BIT(5)
+#define AUX_RESET BIT(4)
+#define AUDIO_SAMPLER_RESET BIT(3)
+#define HDCP_MODULE_RESET BIT(2)
+#define PHY_SOFT_RESET BIT(1)
+#define CONTROLLER_RESET BIT(0)
+
+#define DW_DP_VSAMPLE_CTRL 0x0300
+#define PIXEL_MODE_SELECT GENMASK(22, 21)
+#define VIDEO_MAPPING GENMASK(20, 16)
+#define VIDEO_STREAM_ENABLE BIT(5)
+
+#define DW_DP_VSAMPLE_STUFF_CTRL1 0x0304
+
+#define DW_DP_VSAMPLE_STUFF_CTRL2 0x0308
+
+#define DW_DP_VINPUT_POLARITY_CTRL 0x030c
+#define DE_IN_POLARITY BIT(2)
+#define HSYNC_IN_POLARITY BIT(1)
+#define VSYNC_IN_POLARITY BIT(0)
+
+#define DW_DP_VIDEO_CONFIG1 0x0310
+#define HACTIVE GENMASK(31, 16)
+#define HBLANK GENMASK(15, 2)
+#define I_P BIT(1)
+#define R_V_BLANK_IN_OSC BIT(0)
+
+#define DW_DP_VIDEO_CONFIG2 0x0314
+#define VBLANK GENMASK(31, 16)
+#define VACTIVE GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG3 0x0318
+#define H_SYNC_WIDTH GENMASK(31, 16)
+#define H_FRONT_PORCH GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG4 0x031c
+#define V_SYNC_WIDTH GENMASK(31, 16)
+#define V_FRONT_PORCH GENMASK(15, 0)
+
+#define DW_DP_VIDEO_CONFIG5 0x0320
+#define INIT_THRESHOLD_HI GENMASK(22, 21)
+#define AVERAGE_BYTES_PER_TU_FRAC GENMASK(19, 16)
+#define INIT_THRESHOLD GENMASK(13, 7)
+#define AVERAGE_BYTES_PER_TU GENMASK(6, 0)
+
+#define DW_DP_VIDEO_MSA1 0x0324
+#define VSTART GENMASK(31, 16)
+#define HSTART GENMASK(15, 0)
+
+#define DW_DP_VIDEO_MSA2 0x0328
+#define MISC0 GENMASK(31, 24)
+
+#define DW_DP_VIDEO_MSA3 0x032c
+#define MISC1 GENMASK(31, 24)
+
+#define DW_DP_VIDEO_HBLANK_INTERVAL 0x0330
+#define HBLANK_INTERVAL_EN BIT(16)
+#define HBLANK_INTERVAL GENMASK(15, 0)
+
+#define DW_DP_AUD_CONFIG1 0x0400
+#define AUDIO_TIMESTAMP_VERSION_NUM GENMASK(29, 24)
+#define AUDIO_PACKET_ID GENMASK(23, 16)
+#define AUDIO_MUTE BIT(15)
+#define NUM_CHANNELS GENMASK(14, 12)
+#define HBR_MODE_ENABLE BIT(10)
+#define AUDIO_DATA_WIDTH GENMASK(9, 5)
+#define AUDIO_DATA_IN_EN GENMASK(4, 1)
+#define AUDIO_INF_SELECT BIT(0)
+
+#define DW_DP_SDP_VERTICAL_CTRL 0x0500
+#define EN_VERTICAL_SDP BIT(2)
+#define EN_AUDIO_STREAM_SDP BIT(1)
+#define EN_AUDIO_TIMESTAMP_SDP BIT(0)
+#define DW_DP_SDP_HORIZONTAL_CTRL 0x0504
+#define EN_HORIZONTAL_SDP BIT(2)
+#define DW_DP_SDP_STATUS_REGISTER 0x0508
+#define DW_DP_SDP_MANUAL_CTRL 0x050c
+#define DW_DP_SDP_STATUS_EN 0x0510
+
+#define DW_DP_SDP_REGISTER_BANK 0x0600
+#define SDP_REGS GENMASK(31, 0)
+
+#define DW_DP_PHYIF_CTRL 0x0a00
+#define PHY_WIDTH BIT(25)
+#define PHY_POWERDOWN GENMASK(20, 17)
+#define PHY_BUSY GENMASK(15, 12)
+#define SSC_DIS BIT(16)
+#define XMIT_ENABLE GENMASK(11, 8)
+#define PHY_LANES GENMASK(7, 6)
+#define PHY_RATE GENMASK(5, 4)
+#define TPS_SEL GENMASK(3, 0)
+
+#define DW_DP_PHY_TX_EQ 0x0a04
+#define DW_DP_CUSTOMPAT0 0x0a08
+#define DW_DP_CUSTOMPAT1 0x0a0c
+#define DW_DP_CUSTOMPAT2 0x0a10
+#define DW_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET 0x0a14
+#define DW_DP_PHYIF_PWRDOWN_CTRL 0x0a18
+
+#define DW_DP_AUX_CMD 0x0b00
+#define AUX_CMD_TYPE GENMASK(31, 28)
+#define AUX_ADDR GENMASK(27, 8)
+#define I2C_ADDR_ONLY BIT(4)
+#define AUX_LEN_REQ GENMASK(3, 0)
+
+#define DW_DP_AUX_STATUS 0x0b04
+#define AUX_TIMEOUT BIT(17)
+#define AUX_BYTES_READ GENMASK(23, 19)
+#define AUX_STATUS GENMASK(7, 4)
+
+#define DW_DP_AUX_DATA0 0x0b08
+#define DW_DP_AUX_DATA1 0x0b0c
+#define DW_DP_AUX_DATA2 0x0b10
+#define DW_DP_AUX_DATA3 0x0b14
+
+#define DW_DP_GENERAL_INTERRUPT 0x0d00
+#define VIDEO_FIFO_OVERFLOW_STREAM0 BIT(6)
+#define AUDIO_FIFO_OVERFLOW_STREAM0 BIT(5)
+#define SDP_EVENT_STREAM0 BIT(4)
+#define AUX_CMD_INVALID BIT(3)
+#define HDCP_EVENT BIT(2)
+#define AUX_REPLY_EVENT BIT(1)
+#define HPD_EVENT BIT(0)
+
+#define DW_DP_GENERAL_INTERRUPT_ENABLE 0x0d04
+#define HDCP_EVENT_EN BIT(2)
+#define AUX_REPLY_EVENT_EN BIT(1)
+#define HPD_EVENT_EN BIT(0)
+
+#define DW_DP_HPD_STATUS 0x0d08
+#define HPD_STATE GENMASK(11, 9)
+#define HPD_STATUS BIT(8)
+#define HPD_HOT_UNPLUG BIT(2)
+#define HPD_HOT_PLUG BIT(1)
+#define HPD_IRQ BIT(0)
+
+#define DW_DP_HPD_INTERRUPT_ENABLE 0x0d0c
+#define HPD_UNPLUG_ERR_EN BIT(3)
+#define HPD_UNPLUG_EN BIT(2)
+#define HPD_PLUG_EN BIT(1)
+#define HPD_IRQ_EN BIT(0)
+
+#define DW_DP_HDCP_CFG 0x0e00
+#define DPCD12PLUS BIT(7)
+#define CP_IRQ BIT(6)
+#define BYPENCRYPTION BIT(5)
+#define HDCP_LOCK BIT(4)
+#define ENCRYPTIONDISABLE BIT(3)
+#define ENABLE_HDCP_13 BIT(2)
+#define ENABLE_HDCP BIT(1)
+
+#define DW_DP_HDCP_OBS 0x0e04
+#define HDCP22_RE_AUTHENTICATION_REQ BIT(31)
+#define HDCP22_AUTHENTICATION_FAILED BIT(30)
+#define HDCP22_AUTHENTICATION_SUCCESS BIT(29)
+#define HDCP22_CAPABLE_SINK BIT(28)
+#define HDCP22_SINK_CAP_CHECK_COMPLETE BIT(27)
+#define HDCP22_STATE GENMASK(26, 24)
+#define HDCP22_BOOTED BIT(23)
+#define HDCP13_BSTATUS GENMASK(22, 19)
+#define REPEATER BIT(18)
+#define HDCP_CAPABLE BIT(17)
+#define STATEE GENMASK(16, 14)
+#define STATEOEG GENMASK(13, 11)
+#define STATER GENMASK(10, 8)
+#define STATEA GENMASK(7, 4)
+#define SUBSTATEA GENMASK(3, 1)
+#define HDCPENGAGED BIT(0)
+
+#define DW_DP_HDCP_APIINTCLR 0x0e08
+#define DW_DP_HDCP_APIINTSTAT 0x0e0c
+#define DW_DP_HDCP_APIINTMSK 0x0e10
+#define HDCP22_GPIOINT BIT(8)
+#define HDCP_ENGAGED BIT(7)
+#define HDCP_FAILED BIT(6)
+#define KSVSHA1CALCDONEINT BIT(5)
+#define AUXRESPNACK7TIMES BIT(4)
+#define AUXRESPTIMEOUT BIT(3)
+#define AUXRESPDEFER7TIMES BIT(2)
+#define KSVACCESSINT BIT(0)
+
+#define DW_DP_HDCP_KSVMEMCTRL 0x0e18
+#define KSVSHA1STATUS BIT(4)
+#define KSVMEMACCESS BIT(1)
+#define KSVMEMREQUEST BIT(0)
+
+#define DW_DP_HDCP_REG_BKSV0 0x3600
+#define DW_DP_HDCP_REG_BKSV1 0x3604
+#define DW_DP_HDCP_REG_ANCONF 0x3608
+#define AN_BYPASS BIT(0)
+
+#define DW_DP_HDCP_REG_AN0 0x360c
+#define DW_DP_HDCP_REG_AN1 0x3610
+#define DW_DP_HDCP_REG_RMLCTL 0x3614
+#define ODPK_DECRYPT_ENABLE BIT(0)
+
+#define DW_DP_HDCP_REG_RMLSTS 0x3618
+#define IDPK_WR_OK_STS BIT(6)
+#define IDPK_DATA_INDEX GENMASK(5, 0)
+#define DW_DP_HDCP_REG_SEED 0x361c
+#define DW_DP_HDCP_REG_DPK0 0x3620
+#define DW_DP_HDCP_REG_DPK1 0x3624
+#define DW_DP_HDCP22_GPIOSTS 0x3628
+#define DW_DP_HDCP22_GPIOCHNGSTS 0x362c
+#define DW_DP_HDCP_REG_DPK_CRC 0x3630
+
+#define DW_DP_MAX_REGISTER DW_DP_HDCP_REG_DPK_CRC
+
+#define SDP_REG_BANK_SIZE 16
+
+struct dw_dp_link_caps {
+ bool enhanced_framing;
+ bool tps3_supported;
+ bool tps4_supported;
+ bool fast_training;
+ bool channel_coding;
+ bool ssc;
+};
+
+struct dw_dp_link_train_set {
+ unsigned int voltage_swing[4];
+ unsigned int pre_emphasis[4];
+ bool voltage_max_reached[4];
+ bool pre_max_reached[4];
+};
+
+struct dw_dp_link_train {
+ struct dw_dp_link_train_set adjust;
+ bool clock_recovered;
+ bool channel_equalized;
+};
+
+struct dw_dp_link {
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ unsigned char revision;
+ unsigned int rate;
+ unsigned int lanes;
+ u8 sink_count;
+ u8 vsc_sdp_supported;
+ struct dw_dp_link_caps caps;
+ struct dw_dp_link_train train;
+ struct drm_dp_desc desc;
+};
+
+struct dw_dp_bridge_state {
+ struct drm_bridge_state base;
+ struct drm_display_mode mode;
+ u8 video_mapping;
+ u8 color_format;
+ u8 bpc;
+ u8 bpp;
+};
+
+struct dw_dp_sdp {
+ struct dp_sdp base;
+ unsigned long flags;
+};
+
+struct dw_dp_hotplug {
+ bool long_hpd;
+};
+
+struct dw_dp {
+ struct drm_bridge bridge;
+ struct device *dev;
+ struct regmap *regmap;
+ struct phy *phy;
+ struct clk *apb_clk;
+ struct clk *aux_clk;
+ struct clk *i2s_clk;
+ struct clk *spdif_clk;
+ struct clk *hdcp_clk;
+ struct reset_control *rstc;
+ struct completion complete;
+ int irq;
+ struct work_struct hpd_work;
+ struct dw_dp_hotplug hotplug;
+ /* Serialize hpd status access */
+ struct mutex irq_lock;
+
+ struct drm_dp_aux aux;
+
+ struct dw_dp_link link;
+ struct dw_dp_plat_data plat_data;
+ u8 pixel_mode;
+
+ DECLARE_BITMAP(sdp_reg_bank, SDP_REG_BANK_SIZE);
+};
+
+enum {
+ DW_DP_RGB_6BIT,
+ DW_DP_RGB_8BIT,
+ DW_DP_RGB_10BIT,
+ DW_DP_RGB_12BIT,
+ DW_DP_RGB_16BIT,
+ DW_DP_YCBCR444_8BIT,
+ DW_DP_YCBCR444_10BIT,
+ DW_DP_YCBCR444_12BIT,
+ DW_DP_YCBCR444_16BIT,
+ DW_DP_YCBCR422_8BIT,
+ DW_DP_YCBCR422_10BIT,
+ DW_DP_YCBCR422_12BIT,
+ DW_DP_YCBCR422_16BIT,
+ DW_DP_YCBCR420_8BIT,
+ DW_DP_YCBCR420_10BIT,
+ DW_DP_YCBCR420_12BIT,
+ DW_DP_YCBCR420_16BIT,
+};
+
+enum {
+ DW_DP_MP_SINGLE_PIXEL,
+ DW_DP_MP_DUAL_PIXEL,
+ DW_DP_MP_QUAD_PIXEL,
+};
+
+enum {
+ DW_DP_SDP_VERTICAL_INTERVAL = BIT(0),
+ DW_DP_SDP_HORIZONTAL_INTERVAL = BIT(1),
+};
+
+enum {
+ DW_DP_HPD_STATE_IDLE,
+ DW_DP_HPD_STATE_UNPLUG,
+ DP_DP_HPD_STATE_TIMEOUT = 4,
+ DW_DP_HPD_STATE_PLUG = 7
+};
+
+enum {
+ DW_DP_PHY_PATTERN_NONE,
+ DW_DP_PHY_PATTERN_TPS_1,
+ DW_DP_PHY_PATTERN_TPS_2,
+ DW_DP_PHY_PATTERN_TPS_3,
+ DW_DP_PHY_PATTERN_TPS_4,
+ DW_DP_PHY_PATTERN_SERM,
+ DW_DP_PHY_PATTERN_PBRS7,
+ DW_DP_PHY_PATTERN_CUSTOM_80BIT,
+ DW_DP_PHY_PATTERN_CP2520_1,
+ DW_DP_PHY_PATTERN_CP2520_2,
+};
+
+struct dw_dp_output_format {
+ u32 bus_format;
+ u32 color_format;
+ u8 video_mapping;
+ u8 bpc;
+ u8 bpp;
+};
+
+#define to_dw_dp_bridge_state(s) container_of(s, struct dw_dp_bridge_state, base)
+
+static const struct dw_dp_output_format dw_dp_output_formats[] = {
+ { MEDIA_BUS_FMT_RGB101010_1X30, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_10BIT, 10, 30 },
+ { MEDIA_BUS_FMT_RGB888_1X24, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_8BIT, 8, 24 },
+ { MEDIA_BUS_FMT_YUV10_1X30, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_10BIT, 10, 30 },
+ { MEDIA_BUS_FMT_YUV8_1X24, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_8BIT, 8, 24},
+ { MEDIA_BUS_FMT_YUYV10_1X20, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_10BIT, 10, 20 },
+ { MEDIA_BUS_FMT_YUYV8_1X16, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_8BIT, 8, 16 },
+ { MEDIA_BUS_FMT_UYYVYY10_0_5X30, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_10BIT, 10, 15 },
+ { MEDIA_BUS_FMT_UYYVYY8_0_5X24, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_8BIT, 8, 12 },
+ { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_6BIT, 6, 18 },
+};
+
+static const struct dw_dp_output_format *dw_dp_get_output_format(u32 bus_format)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++)
+ if (dw_dp_output_formats[i].bus_format == bus_format)
+ return &dw_dp_output_formats[i];
+
+ return NULL;
+}
+
+static inline struct dw_dp *bridge_to_dp(struct drm_bridge *b)
+{
+ return container_of(b, struct dw_dp, bridge);
+}
+
+static struct dw_dp_bridge_state *dw_dp_get_bridge_state(struct dw_dp *dp)
+{
+ struct dw_dp_bridge_state *dw_bridge_state;
+ struct drm_bridge_state *state;
+
+ state = drm_priv_to_bridge_state(dp->bridge.base.state);
+ if (!state)
+ return NULL;
+
+ dw_bridge_state = to_dw_dp_bridge_state(state);
+ if (!dw_bridge_state)
+ return NULL;
+
+ return dw_bridge_state;
+}
+
+static inline void dw_dp_phy_set_pattern(struct dw_dp *dp, u32 pattern)
+{
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, TPS_SEL,
+ FIELD_PREP(TPS_SEL, pattern));
+}
+
+static void dw_dp_phy_xmit_enable(struct dw_dp *dp, u32 lanes)
+{
+ u32 xmit_enable;
+
+ switch (lanes) {
+ case 4:
+ case 2:
+ case 1:
+ xmit_enable = GENMASK(lanes - 1, 0);
+ break;
+ case 0:
+ default:
+ xmit_enable = 0;
+ break;
+ }
+
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, XMIT_ENABLE,
+ FIELD_PREP(XMIT_ENABLE, xmit_enable));
+}
+
+static bool dw_dp_bandwidth_ok(struct dw_dp *dp,
+ const struct drm_display_mode *mode, u32 bpp,
+ unsigned int lanes, unsigned int rate)
+{
+ u32 max_bw, req_bw;
+
+ req_bw = mode->clock * bpp / 8;
+ max_bw = lanes * rate;
+ if (req_bw > max_bw)
+ return false;
+
+ return true;
+}
+
+static bool dw_dp_hpd_detect(struct dw_dp *dp)
+{
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value);
+
+ return FIELD_GET(HPD_STATE, value) == DW_DP_HPD_STATE_PLUG;
+}
+
+static void dw_dp_link_caps_reset(struct dw_dp_link_caps *caps)
+{
+ caps->enhanced_framing = false;
+ caps->tps3_supported = false;
+ caps->tps4_supported = false;
+ caps->fast_training = false;
+ caps->channel_coding = false;
+}
+
+static void dw_dp_link_reset(struct dw_dp_link *link)
+{
+ link->vsc_sdp_supported = 0;
+ link->sink_count = 0;
+ link->revision = 0;
+ link->rate = 0;
+ link->lanes = 0;
+
+ dw_dp_link_caps_reset(&link->caps);
+ memset(link->dpcd, 0, sizeof(link->dpcd));
+}
+
+static int dw_dp_link_parse(struct dw_dp *dp, struct drm_connector *connector)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+ dw_dp_link_reset(link);
+
+ ret = drm_dp_read_dpcd_caps(&dp->aux, link->dpcd);
+ if (ret < 0)
+ return ret;
+
+ drm_dp_read_desc(&dp->aux, &link->desc, drm_dp_is_branch(link->dpcd));
+
+ if (drm_dp_read_sink_count_cap(connector, link->dpcd, &link->desc)) {
+ ret = drm_dp_read_sink_count(&dp->aux);
+ if (ret < 0)
+ return ret;
+
+ link->sink_count = ret;
+
+ /* Dongle connected, but no display */
+ if (!link->sink_count)
+ return -ENODEV;
+ }
+
+ link->vsc_sdp_supported = drm_dp_vsc_sdp_supported(&dp->aux, link->dpcd);
+
+ link->revision = link->dpcd[DP_DPCD_REV];
+ link->rate = min_t(u32, min(dp->plat_data.max_link_rate,
+ dp->phy->attrs.max_link_rate * 100),
+ drm_dp_max_link_rate(link->dpcd));
+ link->lanes = min_t(u8, phy_get_bus_width(dp->phy),
+ drm_dp_max_lane_count(link->dpcd));
+
+ link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(link->dpcd);
+ link->caps.tps3_supported = drm_dp_tps3_supported(link->dpcd);
+ link->caps.tps4_supported = drm_dp_tps4_supported(link->dpcd);
+ link->caps.fast_training = drm_dp_fast_training_cap(link->dpcd);
+ link->caps.channel_coding = drm_dp_channel_coding_supported(link->dpcd);
+ link->caps.ssc = !!(link->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
+
+ return 0;
+}
+
+static int dw_dp_link_train_update_vs_emph(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_link_train_set *train_set = &link->train.adjust;
+ unsigned int lanes = dp->link.lanes;
+ union phy_configure_opts phy_cfg;
+ unsigned int *vs, *pe;
+ int i, ret;
+ u8 buf[4];
+
+ vs = train_set->voltage_swing;
+ pe = train_set->pre_emphasis;
+
+ for (i = 0; i < lanes; i++) {
+ phy_cfg.dp.voltage[i] = vs[i];
+ phy_cfg.dp.pre[i] = pe[i];
+ }
+
+ phy_cfg.dp.set_lanes = false;
+ phy_cfg.dp.set_rate = false;
+ phy_cfg.dp.set_voltages = true;
+
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < lanes; i++) {
+ buf[i] = (vs[i] << DP_TRAIN_VOLTAGE_SWING_SHIFT) |
+ (pe[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ if (train_set->voltage_max_reached[i])
+ buf[i] |= DP_TRAIN_MAX_SWING_REACHED;
+ if (train_set->pre_max_reached[i])
+ buf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf, lanes);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int dw_dp_phy_configure(struct dw_dp *dp, unsigned int rate,
+ unsigned int lanes, bool ssc)
+{
+ union phy_configure_opts phy_cfg;
+ int ret;
+
+ /* Move PHY to P3 */
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN,
+ FIELD_PREP(PHY_POWERDOWN, 0x3));
+
+ phy_cfg.dp.lanes = lanes;
+ phy_cfg.dp.link_rate = rate / 100;
+ phy_cfg.dp.ssc = ssc;
+ phy_cfg.dp.set_lanes = true;
+ phy_cfg.dp.set_rate = true;
+ phy_cfg.dp.set_voltages = false;
+ ret = phy_configure(dp->phy, &phy_cfg);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_LANES,
+ FIELD_PREP(PHY_LANES, lanes / 2));
+
+ /* Move PHY to P0 */
+ regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN,
+ FIELD_PREP(PHY_POWERDOWN, 0x0));
+
+ dw_dp_phy_xmit_enable(dp, lanes);
+
+ return 0;
+}
+
+static int dw_dp_link_configure(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 buf[2];
+ int ret;
+
+ ret = dw_dp_phy_configure(dp, link->rate, link->lanes, link->caps.ssc);
+ if (ret)
+ return ret;
+
+ buf[0] = drm_dp_link_rate_to_bw_code(link->rate);
+ buf[1] = link->lanes;
+
+ if (link->caps.enhanced_framing) {
+ buf[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN,
+ FIELD_PREP(ENHANCE_FRAMING_EN, 1));
+ } else {
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN,
+ FIELD_PREP(ENHANCE_FRAMING_EN, 0));
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ buf[0] = link->caps.ssc ? DP_SPREAD_AMP_0_5 : 0;
+ buf[1] = link->caps.channel_coding ? DP_SET_ANSI_8B10B : 0;
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void dw_dp_link_train_init(struct dw_dp_link_train *train)
+{
+ struct dw_dp_link_train_set *adj = &train->adjust;
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ adj->voltage_swing[i] = 0;
+ adj->pre_emphasis[i] = 0;
+ adj->voltage_max_reached[i] = false;
+ adj->pre_max_reached[i] = false;
+ }
+
+ train->clock_recovered = false;
+ train->channel_equalized = false;
+}
+
+static bool dw_dp_link_train_valid(const struct dw_dp_link_train *train)
+{
+ return train->clock_recovered && train->channel_equalized;
+}
+
+static int dw_dp_link_train_set_pattern(struct dw_dp *dp, u32 pattern)
+{
+ u8 buf = 0;
+ int ret;
+
+ if (pattern && pattern != DP_TRAINING_PATTERN_4) {
+ buf |= DP_LINK_SCRAMBLING_DISABLE;
+
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS,
+ FIELD_PREP(SCRAMBLE_DIS, 1));
+ } else {
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS,
+ FIELD_PREP(SCRAMBLE_DIS, 0));
+ }
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_DISABLE:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_NONE);
+ break;
+ case DP_TRAINING_PATTERN_1:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_1);
+ break;
+ case DP_TRAINING_PATTERN_2:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_2);
+ break;
+ case DP_TRAINING_PATTERN_3:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_3);
+ break;
+ case DP_TRAINING_PATTERN_4:
+ dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET,
+ buf | pattern);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u8 dw_dp_voltage_max(u8 preemph)
+{
+ switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) {
+ case DP_TRAIN_PRE_EMPH_LEVEL_0:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
+ case DP_TRAIN_PRE_EMPH_LEVEL_1:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ case DP_TRAIN_PRE_EMPH_LEVEL_2:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_1;
+ case DP_TRAIN_PRE_EMPH_LEVEL_3:
+ default:
+ return DP_TRAIN_VOLTAGE_SWING_LEVEL_0;
+ }
+}
+
+static bool dw_dp_link_get_adjustments(struct dw_dp_link *link,
+ u8 status[DP_LINK_STATUS_SIZE])
+{
+ struct dw_dp_link_train_set *adj = &link->train.adjust;
+ unsigned int i;
+ bool changed = false;
+ u8 v = 0;
+ u8 p = 0;
+
+ for (i = 0; i < link->lanes; i++) {
+ v = drm_dp_get_adjust_request_voltage(status, i);
+ v >>= DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ p = drm_dp_get_adjust_request_pre_emphasis(status, i);
+ p >>= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if (v != adj->voltage_swing[i] || p != adj->pre_emphasis[i])
+ changed = true;
+
+ if (p >= (DP_TRAIN_PRE_EMPH_LEVEL_3 >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) {
+ adj->pre_emphasis[i] = DP_TRAIN_PRE_EMPH_LEVEL_3 >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ adj->pre_max_reached[i] = true;
+ } else {
+ adj->pre_emphasis[i] = p;
+ adj->pre_max_reached[i] = false;
+ }
+
+ v = min(v, dw_dp_voltage_max(p));
+ if (v >= (DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >> DP_TRAIN_VOLTAGE_SWING_SHIFT)) {
+ adj->voltage_swing[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ adj->voltage_max_reached[i] = true;
+ } else {
+ adj->voltage_swing[i] = v;
+ adj->voltage_max_reached[i] = false;
+ }
+ }
+
+ return changed;
+}
+
+static int dw_dp_link_clock_recovery(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 status[DP_LINK_STATUS_SIZE];
+ unsigned int tries = 0;
+ int ret;
+ bool adj_changed;
+
+ ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ return ret;
+
+ for (;;) {
+ ret = dw_dp_link_train_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_clock_recovery_delay(&dp->aux, link->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to read link status: %d\n", ret);
+ return ret;
+ }
+
+ if (drm_dp_clock_recovery_ok(status, link->lanes)) {
+ link->train.clock_recovered = true;
+ break;
+ }
+
+ /*
+ * According to DP spec 1.4, if current ADJ is the same
+ * with previous REQ, we need to retry 5 times.
+ */
+ adj_changed = dw_dp_link_get_adjustments(link, status);
+ if (!adj_changed)
+ tries++;
+ else
+ tries = 0;
+
+ if (tries == 5)
+ break;
+ }
+
+ return 0;
+}
+
+static int dw_dp_link_channel_equalization(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 status[DP_LINK_STATUS_SIZE], pattern;
+ unsigned int tries;
+ int ret;
+
+ if (link->caps.tps4_supported)
+ pattern = DP_TRAINING_PATTERN_4;
+ else if (link->caps.tps3_supported)
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+ ret = dw_dp_link_train_set_pattern(dp, pattern);
+ if (ret)
+ return ret;
+
+ for (tries = 1; tries < 5; tries++) {
+ ret = dw_dp_link_train_update_vs_emph(dp);
+ if (ret)
+ return ret;
+
+ drm_dp_link_train_channel_eq_delay(&dp->aux, link->dpcd);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0)
+ return ret;
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ dev_err(dp->dev, "clock recovery lost while equalizing channel\n");
+ link->train.clock_recovered = false;
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(status, link->lanes)) {
+ link->train.channel_equalized = true;
+ break;
+ }
+
+ dw_dp_link_get_adjustments(link, status);
+ }
+
+ return 0;
+}
+
+static int dw_dp_link_downgrade(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+
+ state = dw_dp_get_bridge_state(dp);
+
+ switch (link->rate) {
+ case 162000:
+ return -EINVAL;
+ case 270000:
+ link->rate = 162000;
+ break;
+ case 540000:
+ link->rate = 270000;
+ break;
+ case 810000:
+ link->rate = 540000;
+ break;
+ }
+
+ if (!dw_dp_bandwidth_ok(dp, &state->mode, state->bpp, link->lanes,
+ link->rate))
+ return -E2BIG;
+
+ return 0;
+}
+
+static int dw_dp_link_train_full(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+retry:
+ dw_dp_link_train_init(&link->train);
+
+ dev_dbg(dp->dev, "full-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100);
+
+ ret = dw_dp_link_configure(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to configure DP link: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_dp_link_clock_recovery(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "clock recovery failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!link->train.clock_recovered) {
+ dev_err(dp->dev, "clock recovery failed, downgrading link\n");
+
+ ret = dw_dp_link_downgrade(dp);
+ if (ret < 0)
+ goto out;
+ else
+ goto retry;
+ }
+
+ dev_dbg(dp->dev, "clock recovery succeeded\n");
+
+ ret = dw_dp_link_channel_equalization(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "channel equalization failed: %d\n", ret);
+ goto out;
+ }
+
+ if (!link->train.channel_equalized) {
+ dev_err(dp->dev, "channel equalization failed, downgrading link\n");
+
+ ret = dw_dp_link_downgrade(dp);
+ if (ret < 0)
+ goto out;
+ else
+ goto retry;
+ }
+
+ dev_dbg(dp->dev, "channel equalization succeeded\n");
+
+out:
+ dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+ return ret;
+}
+
+static int dw_dp_link_train_fast(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+ u8 status[DP_LINK_STATUS_SIZE];
+ u8 pattern;
+
+ dw_dp_link_train_init(&link->train);
+
+ dev_dbg(dp->dev, "fast-training link: %u lane%s at %u MHz\n",
+ link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100);
+
+ ret = dw_dp_link_configure(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to configure DP link: %d\n", ret);
+ return ret;
+ }
+
+ ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1);
+ if (ret)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ if (link->caps.tps4_supported)
+ pattern = DP_TRAINING_PATTERN_4;
+ else if (link->caps.tps3_supported)
+ pattern = DP_TRAINING_PATTERN_3;
+ else
+ pattern = DP_TRAINING_PATTERN_2;
+ ret = dw_dp_link_train_set_pattern(dp, pattern);
+ if (ret)
+ goto out;
+
+ usleep_range(500, 1000);
+
+ ret = drm_dp_dpcd_read_link_status(&dp->aux, status);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to read link status: %d\n", ret);
+ goto out;
+ }
+
+ if (!drm_dp_clock_recovery_ok(status, link->lanes)) {
+ dev_err(dp->dev, "clock recovery failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!drm_dp_channel_eq_ok(status, link->lanes)) {
+ dev_err(dp->dev, "channel equalization failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+ return ret;
+}
+
+static int dw_dp_link_train(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ int ret;
+
+ if (link->caps.fast_training) {
+ if (dw_dp_link_train_valid(&link->train)) {
+ ret = dw_dp_link_train_fast(dp);
+ if (ret < 0)
+ dev_err(dp->dev, "fast link training failed: %d\n", ret);
+ else
+ return 0;
+ }
+ }
+
+ ret = dw_dp_link_train_full(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "full link training failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int dw_dp_send_sdp(struct dw_dp *dp, struct dw_dp_sdp *sdp)
+{
+ const u8 *payload = sdp->base.db;
+ u32 reg;
+ int i, nr;
+
+ nr = find_first_zero_bit(dp->sdp_reg_bank, SDP_REG_BANK_SIZE);
+ if (nr < SDP_REG_BANK_SIZE)
+ set_bit(nr, dp->sdp_reg_bank);
+ else
+ return -EBUSY;
+
+ reg = DW_DP_SDP_REGISTER_BANK + nr * 9 * 4;
+
+ /* SDP header */
+ regmap_write(dp->regmap, reg, get_unaligned_le32(&sdp->base.sdp_header));
+
+ /* SDP data payload */
+ for (i = 1; i < 9; i++, payload += 4)
+ regmap_write(dp->regmap, reg + i * 4,
+ FIELD_PREP(SDP_REGS, get_unaligned_le32(payload)));
+
+ if (sdp->flags & DW_DP_SDP_VERTICAL_INTERVAL)
+ regmap_update_bits(dp->regmap, DW_DP_SDP_VERTICAL_CTRL,
+ EN_VERTICAL_SDP << nr,
+ EN_VERTICAL_SDP << nr);
+
+ if (sdp->flags & DW_DP_SDP_HORIZONTAL_INTERVAL)
+ regmap_update_bits(dp->regmap, DW_DP_SDP_HORIZONTAL_CTRL,
+ EN_HORIZONTAL_SDP << nr,
+ EN_HORIZONTAL_SDP << nr);
+
+ return 0;
+}
+
+static int dw_dp_send_vsc_sdp(struct dw_dp *dp)
+{
+ struct dw_dp_bridge_state *state;
+ struct dw_dp_sdp sdp = {};
+ struct drm_dp_vsc_sdp vsc = {};
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ vsc.bpc = state->bpc;
+
+ vsc.sdp_type = DP_SDP_VSC;
+ vsc.revision = 0x5;
+ vsc.length = 0x13;
+ vsc.content_type = DP_CONTENT_TYPE_NOT_DEFINED;
+
+ sdp.flags = DW_DP_SDP_VERTICAL_INTERVAL;
+
+ switch (state->color_format) {
+ case DRM_COLOR_FORMAT_YCBCR444:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV444;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR420:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV420;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR422:
+ vsc.pixelformat = DP_PIXELFORMAT_YUV422;
+ break;
+ case DRM_COLOR_FORMAT_RGB444:
+ default:
+ vsc.pixelformat = DP_PIXELFORMAT_RGB;
+ break;
+ }
+
+ if (state->color_format == DRM_COLOR_FORMAT_RGB444) {
+ vsc.colorimetry = DP_COLORIMETRY_DEFAULT;
+ vsc.dynamic_range = DP_DYNAMIC_RANGE_VESA;
+ } else {
+ vsc.colorimetry = DP_COLORIMETRY_BT709_YCC;
+ vsc.dynamic_range = DP_DYNAMIC_RANGE_CTA;
+ }
+
+ drm_dp_vsc_sdp_pack(&vsc, &sdp.base);
+
+ return dw_dp_send_sdp(dp, &sdp);
+}
+
+static int dw_dp_video_set_pixel_mode(struct dw_dp *dp)
+{
+ switch (dp->pixel_mode) {
+ case DW_DP_MP_SINGLE_PIXEL:
+ case DW_DP_MP_DUAL_PIXEL:
+ case DW_DP_MP_QUAD_PIXEL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, PIXEL_MODE_SELECT,
+ FIELD_PREP(PIXEL_MODE_SELECT, dp->pixel_mode));
+
+ return 0;
+}
+
+static bool dw_dp_video_need_vsc_sdp(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ if (!link->vsc_sdp_supported)
+ return false;
+
+ if (state->color_format == DRM_COLOR_FORMAT_YCBCR420)
+ return true;
+
+ return false;
+}
+
+static int dw_dp_video_set_msa(struct dw_dp *dp, u8 color_format, u8 bpc,
+ u16 vstart, u16 hstart)
+{
+ u16 misc = 0;
+
+ if (dw_dp_video_need_vsc_sdp(dp))
+ misc |= DP_MSA_MISC_COLOR_VSC_SDP;
+
+ switch (color_format) {
+ case DRM_COLOR_FORMAT_RGB444:
+ misc |= DP_MSA_MISC_COLOR_RGB;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR444:
+ misc |= DP_MSA_MISC_COLOR_YCBCR_444_BT709;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR422:
+ misc |= DP_MSA_MISC_COLOR_YCBCR_422_BT709;
+ break;
+ case DRM_COLOR_FORMAT_YCBCR420:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (bpc) {
+ case 6:
+ misc |= DP_MSA_MISC_6_BPC;
+ break;
+ case 8:
+ misc |= DP_MSA_MISC_8_BPC;
+ break;
+ case 10:
+ misc |= DP_MSA_MISC_10_BPC;
+ break;
+ case 12:
+ misc |= DP_MSA_MISC_12_BPC;
+ break;
+ case 16:
+ misc |= DP_MSA_MISC_16_BPC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA1,
+ FIELD_PREP(VSTART, vstart) | FIELD_PREP(HSTART, hstart));
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA2, FIELD_PREP(MISC0, misc));
+ regmap_write(dp->regmap, DW_DP_VIDEO_MSA3, FIELD_PREP(MISC1, misc >> 8));
+
+ return 0;
+}
+
+static void dw_dp_video_disable(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE,
+ FIELD_PREP(VIDEO_STREAM_ENABLE, 0));
+}
+
+static int dw_dp_video_enable(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ struct dw_dp_bridge_state *state;
+ struct drm_display_mode *mode;
+ u8 color_format, bpc, bpp;
+ u8 init_threshold, vic;
+ u32 hstart, hactive, hblank, h_sync_width, h_front_porch;
+ u32 vstart, vactive, vblank, v_sync_width, v_front_porch;
+ u32 peak_stream_bandwidth, link_bandwidth;
+ u32 average_bytes_per_tu, average_bytes_per_tu_frac;
+ u32 ts, hblank_interval;
+ u32 value;
+ int ret;
+
+ state = dw_dp_get_bridge_state(dp);
+ if (!state)
+ return -EINVAL;
+
+ bpc = state->bpc;
+ bpp = state->bpp;
+ color_format = state->color_format;
+ mode = &state->mode;
+
+ vstart = mode->vtotal - mode->vsync_start;
+ hstart = mode->htotal - mode->hsync_start;
+
+ ret = dw_dp_video_set_pixel_mode(dp);
+ if (ret)
+ return ret;
+
+ ret = dw_dp_video_set_msa(dp, color_format, bpc, vstart, hstart);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_MAPPING,
+ FIELD_PREP(VIDEO_MAPPING, state->video_mapping));
+
+ /* Configure DW_DP_VINPUT_POLARITY_CTRL register */
+ value = 0;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ value |= FIELD_PREP(HSYNC_IN_POLARITY, 1);
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ value |= FIELD_PREP(VSYNC_IN_POLARITY, 1);
+ regmap_write(dp->regmap, DW_DP_VINPUT_POLARITY_CTRL, value);
+
+ /* Configure DW_DP_VIDEO_CONFIG1 register */
+ hactive = mode->hdisplay;
+ hblank = mode->htotal - mode->hdisplay;
+ value = FIELD_PREP(HACTIVE, hactive) | FIELD_PREP(HBLANK, hblank);
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ value |= FIELD_PREP(I_P, 1);
+ vic = drm_match_cea_mode(mode);
+ if (vic == 5 || vic == 6 || vic == 7 ||
+ vic == 10 || vic == 11 || vic == 20 ||
+ vic == 21 || vic == 22 || vic == 39 ||
+ vic == 25 || vic == 26 || vic == 40 ||
+ vic == 44 || vic == 45 || vic == 46 ||
+ vic == 50 || vic == 51 || vic == 54 ||
+ vic == 55 || vic == 58 || vic == 59)
+ value |= R_V_BLANK_IN_OSC;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG1, value);
+
+ /* Configure DW_DP_VIDEO_CONFIG2 register */
+ vblank = mode->vtotal - mode->vdisplay;
+ vactive = mode->vdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG2,
+ FIELD_PREP(VBLANK, vblank) | FIELD_PREP(VACTIVE, vactive));
+
+ /* Configure DW_DP_VIDEO_CONFIG3 register */
+ h_sync_width = mode->hsync_end - mode->hsync_start;
+ h_front_porch = mode->hsync_start - mode->hdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG3,
+ FIELD_PREP(H_SYNC_WIDTH, h_sync_width) |
+ FIELD_PREP(H_FRONT_PORCH, h_front_porch));
+
+ /* Configure DW_DP_VIDEO_CONFIG4 register */
+ v_sync_width = mode->vsync_end - mode->vsync_start;
+ v_front_porch = mode->vsync_start - mode->vdisplay;
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG4,
+ FIELD_PREP(V_SYNC_WIDTH, v_sync_width) |
+ FIELD_PREP(V_FRONT_PORCH, v_front_porch));
+
+ /* Configure DW_DP_VIDEO_CONFIG5 register */
+ peak_stream_bandwidth = mode->clock * bpp / 8;
+ link_bandwidth = (link->rate / 1000) * link->lanes;
+ ts = peak_stream_bandwidth * 64 / link_bandwidth;
+ average_bytes_per_tu = ts / 1000;
+ average_bytes_per_tu_frac = ts / 100 - average_bytes_per_tu * 10;
+ if (dp->pixel_mode == DW_DP_MP_SINGLE_PIXEL) {
+ if (average_bytes_per_tu < 6)
+ init_threshold = 32;
+ else if (hblank <= 80 && color_format != DRM_COLOR_FORMAT_YCBCR420)
+ init_threshold = 12;
+ else if (hblank <= 40 && color_format == DRM_COLOR_FORMAT_YCBCR420)
+ init_threshold = 3;
+ else
+ init_threshold = 16;
+ } else {
+ u32 t1 = 0, t2 = 0, t3 = 0;
+
+ switch (bpc) {
+ case 6:
+ t1 = (4 * 1000 / 9) * link->lanes;
+ break;
+ case 8:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422) {
+ t1 = (1000 / 2) * link->lanes;
+ } else {
+ if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 3) * link->lanes;
+ else
+ t1 = (3000 / 16) * link->lanes;
+ }
+ break;
+ case 10:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422)
+ t1 = (2000 / 5) * link->lanes;
+ else
+ t1 = (4000 / 15) * link->lanes;
+ break;
+ case 12:
+ if (color_format == DRM_COLOR_FORMAT_YCBCR422) {
+ if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 6) * link->lanes;
+ else
+ t1 = (1000 / 3) * link->lanes;
+ } else {
+ t1 = (2000 / 9) * link->lanes;
+ }
+ break;
+ case 16:
+ if (color_format != DRM_COLOR_FORMAT_YCBCR422 &&
+ dp->pixel_mode == DW_DP_MP_DUAL_PIXEL)
+ t1 = (1000 / 6) * link->lanes;
+ else
+ t1 = (1000 / 4) * link->lanes;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (color_format == DRM_COLOR_FORMAT_YCBCR420)
+ t2 = (link->rate / 4) * 1000 / (mode->clock / 2);
+ else
+ t2 = (link->rate / 4) * 1000 / mode->clock;
+
+ if (average_bytes_per_tu_frac)
+ t3 = average_bytes_per_tu + 1;
+ else
+ t3 = average_bytes_per_tu;
+ init_threshold = t1 * t2 * t3 / (1000 * 1000);
+ if (init_threshold <= 16 || average_bytes_per_tu < 10)
+ init_threshold = 40;
+ }
+
+ regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG5,
+ FIELD_PREP(INIT_THRESHOLD_HI, init_threshold >> 6) |
+ FIELD_PREP(AVERAGE_BYTES_PER_TU_FRAC, average_bytes_per_tu_frac) |
+ FIELD_PREP(INIT_THRESHOLD, init_threshold) |
+ FIELD_PREP(AVERAGE_BYTES_PER_TU, average_bytes_per_tu));
+
+ /* Configure DW_DP_VIDEO_HBLANK_INTERVAL register */
+ hblank_interval = hblank * (link->rate / 4) / mode->clock;
+ regmap_write(dp->regmap, DW_DP_VIDEO_HBLANK_INTERVAL,
+ FIELD_PREP(HBLANK_INTERVAL_EN, 1) |
+ FIELD_PREP(HBLANK_INTERVAL, hblank_interval));
+
+ /* Video stream enable */
+ regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE,
+ FIELD_PREP(VIDEO_STREAM_ENABLE, 1));
+
+ if (dw_dp_video_need_vsc_sdp(dp))
+ dw_dp_send_vsc_sdp(dp);
+
+ return 0;
+}
+
+static void dw_dp_hpd_init(struct dw_dp *dp)
+{
+ /* Enable all HPD interrupts */
+ regmap_update_bits(dp->regmap, DW_DP_HPD_INTERRUPT_ENABLE,
+ HPD_UNPLUG_EN | HPD_PLUG_EN | HPD_IRQ_EN,
+ FIELD_PREP(HPD_UNPLUG_EN, 1) |
+ FIELD_PREP(HPD_PLUG_EN, 1) |
+ FIELD_PREP(HPD_IRQ_EN, 1));
+
+ /* Enable all top-level interrupts */
+ regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE,
+ HPD_EVENT_EN, FIELD_PREP(HPD_EVENT_EN, 1));
+}
+
+static void dw_dp_aux_init(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE,
+ AUX_REPLY_EVENT_EN, FIELD_PREP(AUX_REPLY_EVENT_EN, 1));
+}
+
+static void dw_dp_init_hw(struct dw_dp *dp)
+{
+ regmap_update_bits(dp->regmap, DW_DP_CCTL, DEFAULT_FAST_LINK_TRAIN_EN,
+ FIELD_PREP(DEFAULT_FAST_LINK_TRAIN_EN, 0));
+
+ dw_dp_hpd_init(dp);
+ dw_dp_aux_init(dp);
+}
+
+static int dw_dp_aux_write_data(struct dw_dp *dp, const u8 *buffer, size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value = 0;
+
+ for (j = 0; j < num; j++)
+ value |= buffer[i * 4 + j] << (j * 8);
+
+ regmap_write(dp->regmap, DW_DP_AUX_DATA0 + i * 4, value);
+ }
+
+ return size;
+}
+
+static int dw_dp_aux_read_data(struct dw_dp *dp, u8 *buffer, size_t size)
+{
+ size_t i, j;
+
+ for (i = 0; i < DIV_ROUND_UP(size, 4); i++) {
+ size_t num = min_t(size_t, size - i * 4, 4);
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_AUX_DATA0 + i * 4, &value);
+
+ for (j = 0; j < num; j++)
+ buffer[i * 4 + j] = value >> (j * 8);
+ }
+
+ return size;
+}
+
+static ssize_t dw_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct dw_dp *dp = container_of(aux, struct dw_dp, aux);
+ unsigned long timeout = msecs_to_jiffies(10);
+ u32 status, value;
+ ssize_t ret = 0;
+
+ if (WARN_ON(msg->size > 16))
+ return -E2BIG;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ ret = dw_dp_aux_write_data(dp, msg->buffer, msg->size);
+ if (ret < 0)
+ return ret;
+ break;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (msg->size > 0)
+ value = FIELD_PREP(AUX_LEN_REQ, msg->size - 1);
+ else
+ value = FIELD_PREP(I2C_ADDR_ONLY, 1);
+ value |= FIELD_PREP(AUX_CMD_TYPE, msg->request);
+ value |= FIELD_PREP(AUX_ADDR, msg->address);
+ regmap_write(dp->regmap, DW_DP_AUX_CMD, value);
+
+ status = wait_for_completion_timeout(&dp->complete, timeout);
+ if (!status) {
+ dev_err(dp->dev, "timeout waiting for AUX reply\n");
+ return -ETIMEDOUT;
+ }
+
+ regmap_read(dp->regmap, DW_DP_AUX_STATUS, &value);
+ if (value & AUX_TIMEOUT)
+ return -ETIMEDOUT;
+
+ msg->reply = FIELD_GET(AUX_STATUS, value);
+
+ if (msg->size > 0 && msg->reply == DP_AUX_NATIVE_REPLY_ACK) {
+ if (msg->request & DP_AUX_I2C_READ) {
+ size_t count = FIELD_GET(AUX_BYTES_READ, value) - 1;
+
+ if (count != msg->size)
+ return -EBUSY;
+
+ ret = dw_dp_aux_read_data(dp, msg->buffer, count);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Limits for the video timing for DP:
+ * 1. the hfp should be 2 pixels aligned;
+ * 2. the minimum hsync should be 9 pixel;
+ * 3. the minimum hbp should be 16 pixel;
+ */
+static int dw_dp_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_bridge_state *state;
+ const struct dw_dp_output_format *fmt;
+ struct drm_display_mode *mode;
+ int min_hbp = 16;
+ int min_hsync = 9;
+
+ state = to_dw_dp_bridge_state(bridge_state);
+ mode = &state->mode;
+
+ fmt = dw_dp_get_output_format(bridge_state->output_bus_cfg.format);
+ if (!fmt)
+ return -EINVAL;
+
+ state->video_mapping = fmt->video_mapping;
+ state->color_format = fmt->color_format;
+ state->bpc = fmt->bpc;
+ state->bpp = fmt->bpp;
+
+ if ((adjusted_mode->hsync_start - adjusted_mode->hdisplay) & 0x1) {
+ adjusted_mode->hsync_start += 1;
+ dev_warn(dp->dev, "hfp is not 2 pixeel aligned, fixup to aligned hfp\n");
+ }
+
+ if (adjusted_mode->hsync_end - adjusted_mode->hsync_start < min_hsync) {
+ adjusted_mode->hsync_end = adjusted_mode->hsync_start + min_hsync;
+ dev_warn(dp->dev, "hsync is too narrow, fixup to min hsync:%d\n", min_hsync);
+ }
+
+ if (adjusted_mode->htotal - adjusted_mode->hsync_end < min_hbp) {
+ adjusted_mode->htotal = adjusted_mode->hsync_end + min_hbp;
+ dev_warn(dp->dev, "hbp is too narrow, fixup to min hbp:%d\n", min_hbp);
+ }
+
+ drm_mode_copy(mode, adjusted_mode);
+
+ return 0;
+}
+
+static enum drm_mode_status dw_dp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_link *link = &dp->link;
+ u32 min_bpp;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCBCR420 &&
+ link->vsc_sdp_supported &&
+ (drm_mode_is_420_only(info, mode) || drm_mode_is_420_also(info, mode)))
+ min_bpp = 12;
+ else if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422)
+ min_bpp = 16;
+ else if (info->color_formats & DRM_COLOR_FORMAT_RGB444)
+ min_bpp = 18;
+ else
+ min_bpp = 24;
+
+ if (!link->vsc_sdp_supported &&
+ drm_mode_is_420_only(info, mode))
+ return MODE_NO_420;
+
+ if (!dw_dp_bandwidth_ok(dp, mode, min_bpp, link->lanes, link->rate))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static bool dw_dp_needs_link_retrain(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+
+ if (!dw_dp_link_train_valid(&link->train))
+ return false;
+
+ if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) < 0)
+ return false;
+
+ /* Retrain if Channel EQ or CR not ok */
+ return !drm_dp_channel_eq_ok(link_status, dp->link.lanes);
+}
+
+static void dw_dp_link_disable(struct dw_dp *dp)
+{
+ struct dw_dp_link *link = &dp->link;
+
+ if (dw_dp_hpd_detect(dp))
+ drm_dp_link_power_down(&dp->aux, dp->link.revision);
+
+ dw_dp_phy_xmit_enable(dp, 0);
+
+ phy_power_off(dp->phy);
+
+ link->train.clock_recovered = false;
+ link->train.channel_equalized = false;
+}
+
+static int dw_dp_link_enable(struct dw_dp *dp)
+{
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret)
+ return ret;
+
+ ret = drm_dp_link_power_up(&dp->aux, dp->link.revision);
+ if (ret < 0)
+ return ret;
+
+ ret = dw_dp_link_train(dp);
+
+ return ret;
+}
+
+static void dw_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ int ret;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (!connector) {
+ dev_err(dp->dev, "failed to get connector\n");
+ return;
+ }
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (!conn_state) {
+ dev_err(dp->dev, "failed to get connector state\n");
+ return;
+ }
+
+ set_bit(0, dp->sdp_reg_bank);
+
+ ret = dw_dp_link_enable(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable link: %d\n", ret);
+ return;
+ }
+
+ ret = dw_dp_video_enable(dp);
+ if (ret < 0) {
+ dev_err(dp->dev, "failed to enable video: %d\n", ret);
+ return;
+ }
+}
+
+static void dw_dp_reset(struct dw_dp *dp)
+{
+ int val;
+
+ disable_irq(dp->irq);
+ regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET,
+ FIELD_PREP(CONTROLLER_RESET, 1));
+ usleep_range(10, 20);
+ regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET,
+ FIELD_PREP(CONTROLLER_RESET, 0));
+
+ dw_dp_init_hw(dp);
+ regmap_read_poll_timeout(dp->regmap, DW_DP_HPD_STATUS, val,
+ FIELD_GET(HPD_HOT_PLUG, val), 200, 200000);
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG);
+ enable_irq(dp->irq);
+}
+
+static void dw_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+
+ dw_dp_video_disable(dp);
+ dw_dp_link_disable(dp);
+ bitmap_zero(dp->sdp_reg_bank, SDP_REG_BANK_SIZE);
+ dw_dp_reset(dp);
+}
+
+static bool dw_dp_hpd_detect_link(struct dw_dp *dp, struct drm_connector *connector)
+{
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret < 0)
+ return false;
+ ret = dw_dp_link_parse(dp, connector);
+ phy_power_off(dp->phy);
+
+ return !ret;
+}
+
+static enum drm_connector_status dw_dp_bridge_detect(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+
+ if (!dw_dp_hpd_detect(dp))
+ return connector_status_disconnected;
+
+ if (!dw_dp_hpd_detect_link(dp, connector))
+ return connector_status_disconnected;
+
+ return connector_status_connected;
+}
+
+static const struct drm_edid *dw_dp_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ const struct drm_edid *edid;
+ int ret;
+
+ ret = phy_power_on(dp->phy);
+ if (ret)
+ return NULL;
+
+ edid = drm_edid_read_ddc(connector, &dp->aux.ddc);
+
+ phy_power_off(dp->phy);
+
+ return edid;
+}
+
+static u32 *dw_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct dw_dp *dp = bridge_to_dp(bridge);
+ struct dw_dp_link *link = &dp->link;
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_display_mode mode = crtc_state->mode;
+ const struct dw_dp_output_format *fmt;
+ u32 i, j = 0;
+ u32 *output_fmts;
+
+ *num_output_fmts = 0;
+
+ output_fmts = kcalloc(ARRAY_SIZE(dw_dp_output_formats), sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++) {
+ fmt = &dw_dp_output_formats[i];
+
+ if (fmt->bpc > conn_state->max_bpc)
+ continue;
+
+ if (!(fmt->color_format & di->color_formats))
+ continue;
+
+ if (fmt->color_format == DRM_COLOR_FORMAT_YCBCR420 &&
+ !link->vsc_sdp_supported)
+ continue;
+
+ if (fmt->color_format != DRM_COLOR_FORMAT_YCBCR420 &&
+ drm_mode_is_420_only(di, &mode))
+ continue;
+
+ if (!dw_dp_bandwidth_ok(dp, &mode, fmt->bpp, link->lanes, link->rate))
+ continue;
+
+ output_fmts[j++] = fmt->bus_format;
+ }
+
+ *num_output_fmts = j;
+
+ return output_fmts;
+}
+
+static struct drm_bridge_state *dw_dp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
+{
+ struct dw_dp_bridge_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
+
+ return &state->base;
+}
+
+static const struct drm_bridge_funcs dw_dp_bridge_funcs = {
+ .atomic_duplicate_state = dw_dp_bridge_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
+ .atomic_get_output_bus_fmts = dw_dp_bridge_atomic_get_output_bus_fmts,
+ .atomic_check = dw_dp_bridge_atomic_check,
+ .mode_valid = dw_dp_bridge_mode_valid,
+ .atomic_enable = dw_dp_bridge_atomic_enable,
+ .atomic_disable = dw_dp_bridge_atomic_disable,
+ .detect = dw_dp_bridge_detect,
+ .edid_read = dw_dp_bridge_edid_read,
+};
+
+static int dw_dp_link_retrain(struct dw_dp *dp)
+{
+ struct drm_device *dev = dp->bridge.dev;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
+
+ if (!dw_dp_needs_link_retrain(dp))
+ return 0;
+
+ dev_dbg(dp->dev, "Retraining link\n");
+
+ drm_modeset_acquire_init(&ctx, 0);
+ for (;;) {
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ if (!ret)
+ ret = dw_dp_link_train(dp);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static void dw_dp_hpd_work(struct work_struct *work)
+{
+ struct dw_dp *dp = container_of(work, struct dw_dp, hpd_work);
+ bool long_hpd;
+ int ret;
+
+ mutex_lock(&dp->irq_lock);
+ long_hpd = dp->hotplug.long_hpd;
+ mutex_unlock(&dp->irq_lock);
+
+ dev_dbg(dp->dev, "[drm] Get hpd irq - %s\n", long_hpd ? "long" : "short");
+
+ if (!long_hpd) {
+ if (dw_dp_needs_link_retrain(dp)) {
+ ret = dw_dp_link_retrain(dp);
+ if (ret)
+ dev_warn(dp->dev, "Retrain link failed\n");
+ }
+ } else {
+ drm_helper_hpd_irq_event(dp->bridge.dev);
+ }
+}
+
+static void dw_dp_handle_hpd_event(struct dw_dp *dp)
+{
+ u32 value;
+
+ mutex_lock(&dp->irq_lock);
+ regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value);
+
+ if (value & HPD_IRQ) {
+ dev_dbg(dp->dev, "IRQ from the HPD\n");
+ dp->hotplug.long_hpd = false;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_IRQ);
+ }
+
+ if (value & HPD_HOT_PLUG) {
+ dev_dbg(dp->dev, "Hot plug detected\n");
+ dp->hotplug.long_hpd = true;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG);
+ }
+
+ if (value & HPD_HOT_UNPLUG) {
+ dev_dbg(dp->dev, "Unplug detected\n");
+ dp->hotplug.long_hpd = true;
+ regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_UNPLUG);
+ }
+ mutex_unlock(&dp->irq_lock);
+
+ schedule_work(&dp->hpd_work);
+}
+
+static irqreturn_t dw_dp_irq(int irq, void *data)
+{
+ struct dw_dp *dp = data;
+ u32 value;
+
+ regmap_read(dp->regmap, DW_DP_GENERAL_INTERRUPT, &value);
+ if (!value)
+ return IRQ_NONE;
+
+ if (value & HPD_EVENT)
+ dw_dp_handle_hpd_event(dp);
+
+ if (value & AUX_REPLY_EVENT) {
+ regmap_write(dp->regmap, DW_DP_GENERAL_INTERRUPT, AUX_REPLY_EVENT);
+ complete(&dp->complete);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_range dw_dp_readable_ranges[] = {
+ regmap_reg_range(DW_DP_VERSION_NUMBER, DW_DP_ID),
+ regmap_reg_range(DW_DP_CONFIG_REG1, DW_DP_CONFIG_REG3),
+ regmap_reg_range(DW_DP_CCTL, DW_DP_SOFT_RESET_CTRL),
+ regmap_reg_range(DW_DP_VSAMPLE_CTRL, DW_DP_VIDEO_HBLANK_INTERVAL),
+ regmap_reg_range(DW_DP_AUD_CONFIG1, DW_DP_AUD_CONFIG1),
+ regmap_reg_range(DW_DP_SDP_VERTICAL_CTRL, DW_DP_SDP_STATUS_EN),
+ regmap_reg_range(DW_DP_PHYIF_CTRL, DW_DP_PHYIF_PWRDOWN_CTRL),
+ regmap_reg_range(DW_DP_AUX_CMD, DW_DP_AUX_DATA3),
+ regmap_reg_range(DW_DP_GENERAL_INTERRUPT, DW_DP_HPD_INTERRUPT_ENABLE),
+};
+
+static const struct regmap_access_table dw_dp_readable_table = {
+ .yes_ranges = dw_dp_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dw_dp_readable_ranges),
+};
+
+static const struct regmap_config dw_dp_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .max_register = DW_DP_MAX_REGISTER,
+ .rd_table = &dw_dp_readable_table,
+};
+
+static void dw_dp_phy_exit(void *data)
+{
+ struct dw_dp *dp = data;
+
+ phy_exit(dp->phy);
+}
+
+struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder,
+ const struct dw_dp_plat_data *plat_data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dp *dp;
+ struct drm_bridge *bridge;
+ void __iomem *res;
+ int ret;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return ERR_PTR(-ENOMEM);
+
+ dp = devm_drm_bridge_alloc(dev, struct dw_dp, bridge, &dw_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return ERR_CAST(dp);
+
+ dp->dev = dev;
+ dp->pixel_mode = DW_DP_MP_QUAD_PIXEL;
+
+ dp->plat_data.max_link_rate = plat_data->max_link_rate;
+ bridge = &dp->bridge;
+ mutex_init(&dp->irq_lock);
+ INIT_WORK(&dp->hpd_work, dw_dp_hpd_work);
+ init_completion(&dp->complete);
+
+ res = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(res))
+ return ERR_CAST(res);
+
+ dp->regmap = devm_regmap_init_mmio(dev, res, &dw_dp_regmap_config);
+ if (IS_ERR(dp->regmap)) {
+ dev_err_probe(dev, PTR_ERR(dp->regmap), "failed to create regmap\n");
+ return ERR_CAST(dp->regmap);
+ }
+
+ dp->phy = devm_of_phy_get(dev, dev->of_node, NULL);
+ if (IS_ERR(dp->phy)) {
+ dev_err_probe(dev, PTR_ERR(dp->phy), "failed to get phy\n");
+ return ERR_CAST(dp->phy);
+ }
+
+ dp->apb_clk = devm_clk_get_enabled(dev, "apb");
+ if (IS_ERR(dp->apb_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->apb_clk), "failed to get apb clock\n");
+ return ERR_CAST(dp->apb_clk);
+ }
+
+ dp->aux_clk = devm_clk_get_enabled(dev, "aux");
+ if (IS_ERR(dp->aux_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->aux_clk), "failed to get aux clock\n");
+ return ERR_CAST(dp->aux_clk);
+ }
+
+ dp->i2s_clk = devm_clk_get(dev, "i2s");
+ if (IS_ERR(dp->i2s_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->i2s_clk), "failed to get i2s clock\n");
+ return ERR_CAST(dp->i2s_clk);
+ }
+
+ dp->spdif_clk = devm_clk_get(dev, "spdif");
+ if (IS_ERR(dp->spdif_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->spdif_clk), "failed to get spdif clock\n");
+ return ERR_CAST(dp->spdif_clk);
+ }
+
+ dp->hdcp_clk = devm_clk_get(dev, "hdcp");
+ if (IS_ERR(dp->hdcp_clk)) {
+ dev_err_probe(dev, PTR_ERR(dp->hdcp_clk), "failed to get hdcp clock\n");
+ return ERR_CAST(dp->hdcp_clk);
+ }
+
+ dp->rstc = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(dp->rstc)) {
+ dev_err_probe(dev, PTR_ERR(dp->rstc), "failed to get reset control\n");
+ return ERR_CAST(dp->rstc);
+ }
+
+ bridge->of_node = dev->of_node;
+ bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
+ bridge->ycbcr_420_allowed = true;
+
+ devm_drm_bridge_add(dev, bridge);
+
+ dp->aux.dev = dev;
+ dp->aux.drm_dev = encoder->dev;
+ dp->aux.name = dev_name(dev);
+ dp->aux.transfer = dw_dp_aux_transfer;
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret) {
+ dev_err_probe(dev, ret, "Aux register failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ dev_err_probe(dev, ret, "Failed to attach bridge\n");
+
+ dw_dp_init_hw(dp);
+
+ ret = phy_init(dp->phy);
+ if (ret) {
+ dev_err_probe(dev, ret, "phy init failed\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp);
+ if (ret)
+ return ERR_PTR(ret);
+
+ dp->irq = platform_get_irq(pdev, 0);
+ if (dp->irq < 0)
+ return ERR_PTR(ret);
+
+ ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq,
+ IRQF_ONESHOT, dev_name(dev), dp);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to request irq\n");
+ return ERR_PTR(ret);
+ }
+
+ return dp;
+}
+EXPORT_SYMBOL_GPL(dw_dp_bind);
+
+MODULE_AUTHOR("Andy Yan <andyshrk@163.com>");
+MODULE_DESCRIPTION("DW DP Core Library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
index ab18f9a3bf23..df7a37eb47f4 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
@@ -90,6 +90,11 @@ static int audio_hw_params(struct device *dev, void *data,
params->iec.status[0] & IEC958_AES0_NONAUDIO);
dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width);
+ if (daifmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
+ dw_hdmi_set_sample_iec958(dw->data.hdmi, 1);
+ else
+ dw_hdmi_set_sample_iec958(dw->data.hdmi, 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 39332c57f2c5..fe4c026280f0 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -18,6 +18,7 @@
#include <drm/bridge/dw_hdmi_qp.h>
#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -26,6 +27,8 @@
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
+#include <media/cec.h>
+
#include <sound/hdmi-codec.h>
#include "dw-hdmi-qp.h"
@@ -131,17 +134,34 @@ struct dw_hdmi_qp_i2c {
bool is_segment;
};
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+struct dw_hdmi_qp_cec {
+ struct drm_connector *connector;
+ int irq;
+ u32 addresses;
+ struct cec_msg rx_msg;
+ u8 tx_status;
+ bool tx_done;
+ bool rx_done;
+};
+#endif
+
struct dw_hdmi_qp {
struct drm_bridge bridge;
struct device *dev;
struct dw_hdmi_qp_i2c *i2c;
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+ struct dw_hdmi_qp_cec *cec;
+#endif
+
struct {
const struct dw_hdmi_qp_phy_ops *ops;
void *data;
} phy;
+ unsigned long ref_clk_rate;
struct regmap *regm;
unsigned long tmds_char_rate;
@@ -848,8 +868,9 @@ static void dw_hdmi_qp_bridge_atomic_enable(struct drm_bridge *bridge,
return;
if (connector->display_info.is_hdmi) {
- dev_dbg(hdmi->dev, "%s mode=HDMI rate=%llu\n",
- __func__, conn_state->hdmi.tmds_char_rate);
+ dev_dbg(hdmi->dev, "%s mode=HDMI %s rate=%llu bpc=%u\n", __func__,
+ drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
+ conn_state->hdmi.tmds_char_rate, conn_state->hdmi.output_bpc);
op_mode = 0;
hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate;
} else {
@@ -965,6 +986,179 @@ static int dw_hdmi_qp_bridge_write_infoframe(struct drm_bridge *bridge,
}
}
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+static irqreturn_t dw_hdmi_qp_cec_hardirq(int irq, void *dev_id)
+{
+ struct dw_hdmi_qp *hdmi = dev_id;
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+ irqreturn_t ret = IRQ_HANDLED;
+ u32 stat;
+
+ stat = dw_hdmi_qp_read(hdmi, CEC_INT_STATUS);
+ if (stat == 0)
+ return IRQ_NONE;
+
+ dw_hdmi_qp_write(hdmi, stat, CEC_INT_CLEAR);
+
+ if (stat & CEC_STAT_LINE_ERR) {
+ cec->tx_status = CEC_TX_STATUS_ERROR;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_DONE) {
+ cec->tx_status = CEC_TX_STATUS_OK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_NACK) {
+ cec->tx_status = CEC_TX_STATUS_NACK;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ if (stat & CEC_STAT_EOM) {
+ unsigned int len, i, val;
+
+ val = dw_hdmi_qp_read(hdmi, CEC_RX_COUNT_STATUS);
+ len = (val & 0xf) + 1;
+
+ if (len > sizeof(cec->rx_msg.msg))
+ len = sizeof(cec->rx_msg.msg);
+
+ for (i = 0; i < 4; i++) {
+ val = dw_hdmi_qp_read(hdmi, CEC_RX_DATA3_0 + i * 4);
+ cec->rx_msg.msg[i * 4] = val & 0xff;
+ cec->rx_msg.msg[i * 4 + 1] = (val >> 8) & 0xff;
+ cec->rx_msg.msg[i * 4 + 2] = (val >> 16) & 0xff;
+ cec->rx_msg.msg[i * 4 + 3] = (val >> 24) & 0xff;
+ }
+
+ dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL);
+
+ cec->rx_msg.len = len;
+ cec->rx_done = true;
+
+ ret = IRQ_WAKE_THREAD;
+ }
+
+ return ret;
+}
+
+static irqreturn_t dw_hdmi_qp_cec_thread(int irq, void *dev_id)
+{
+ struct dw_hdmi_qp *hdmi = dev_id;
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ if (cec->tx_done) {
+ cec->tx_done = false;
+ drm_connector_hdmi_cec_transmit_attempt_done(cec->connector,
+ cec->tx_status);
+ }
+
+ if (cec->rx_done) {
+ cec->rx_done = false;
+ drm_connector_hdmi_cec_received_msg(cec->connector, &cec->rx_msg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int dw_hdmi_qp_cec_init(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ cec->connector = connector;
+
+ dw_hdmi_qp_write(hdmi, 0, CEC_TX_COUNT);
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N);
+
+ return devm_request_threaded_irq(hdmi->dev, cec->irq,
+ dw_hdmi_qp_cec_hardirq,
+ dw_hdmi_qp_cec_thread, IRQF_SHARED,
+ dev_name(hdmi->dev), hdmi);
+}
+
+static int dw_hdmi_qp_cec_log_addr(struct drm_bridge *bridge, u8 logical_addr)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ struct dw_hdmi_qp_cec *cec = hdmi->cec;
+
+ if (logical_addr == CEC_LOG_ADDR_INVALID)
+ cec->addresses = 0;
+ else
+ cec->addresses |= BIT(logical_addr) | CEC_ADDR_BROADCAST;
+
+ dw_hdmi_qp_write(hdmi, cec->addresses, CEC_ADDR);
+
+ return 0;
+}
+
+static int dw_hdmi_qp_cec_enable(struct drm_bridge *bridge, bool enable)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ unsigned int irqs;
+ u32 swdisable;
+
+ if (!enable) {
+ dw_hdmi_qp_write(hdmi, 0, CEC_INT_MASK_N);
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+
+ swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE);
+ swdisable = swdisable | CEC_SWDISABLE;
+ dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE);
+ } else {
+ swdisable = dw_hdmi_qp_read(hdmi, GLOBAL_SWDISABLE);
+ swdisable = swdisable & ~CEC_SWDISABLE;
+ dw_hdmi_qp_write(hdmi, swdisable, GLOBAL_SWDISABLE);
+
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, 1, CEC_LOCK_CONTROL);
+
+ dw_hdmi_qp_cec_log_addr(bridge, CEC_LOG_ADDR_INVALID);
+
+ irqs = CEC_STAT_LINE_ERR | CEC_STAT_NACK | CEC_STAT_EOM |
+ CEC_STAT_DONE;
+ dw_hdmi_qp_write(hdmi, ~0, CEC_INT_CLEAR);
+ dw_hdmi_qp_write(hdmi, irqs, CEC_INT_MASK_N);
+ }
+
+ return 0;
+}
+
+static int dw_hdmi_qp_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge);
+ unsigned int i;
+ u32 val;
+
+ for (i = 0; i < msg->len; i++) {
+ if (!(i % 4))
+ val = msg->msg[i];
+ if ((i % 4) == 1)
+ val |= msg->msg[i] << 8;
+ if ((i % 4) == 2)
+ val |= msg->msg[i] << 16;
+ if ((i % 4) == 3)
+ val |= msg->msg[i] << 24;
+
+ if (i == (msg->len - 1) || (i % 4) == 3)
+ dw_hdmi_qp_write(hdmi, val, CEC_TX_DATA3_0 + (i / 4) * 4);
+ }
+
+ dw_hdmi_qp_write(hdmi, msg->len - 1, CEC_TX_COUNT);
+ dw_hdmi_qp_write(hdmi, CEC_CTRL_START, CEC_TX_CONTROL);
+
+ return 0;
+}
+#else
+#define dw_hdmi_qp_cec_init NULL
+#define dw_hdmi_qp_cec_enable NULL
+#define dw_hdmi_qp_cec_log_addr NULL
+#define dw_hdmi_qp_cec_transmit NULL
+#endif /* CONFIG_DRM_DW_HDMI_QP_CEC */
+
static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -979,6 +1173,10 @@ static const struct drm_bridge_funcs dw_hdmi_qp_bridge_funcs = {
.hdmi_audio_startup = dw_hdmi_qp_audio_enable,
.hdmi_audio_shutdown = dw_hdmi_qp_audio_disable,
.hdmi_audio_prepare = dw_hdmi_qp_audio_prepare,
+ .hdmi_cec_init = dw_hdmi_qp_cec_init,
+ .hdmi_cec_enable = dw_hdmi_qp_cec_enable,
+ .hdmi_cec_log_addr = dw_hdmi_qp_cec_log_addr,
+ .hdmi_cec_transmit = dw_hdmi_qp_cec_transmit,
};
static irqreturn_t dw_hdmi_qp_main_hardirq(int irq, void *dev_id)
@@ -1014,13 +1212,11 @@ static void dw_hdmi_qp_init_hw(struct dw_hdmi_qp *hdmi)
{
dw_hdmi_qp_write(hdmi, 0, MAINUNIT_0_INT_MASK_N);
dw_hdmi_qp_write(hdmi, 0, MAINUNIT_1_INT_MASK_N);
- dw_hdmi_qp_write(hdmi, 428571429, TIMER_BASE_CONFIG0);
+ dw_hdmi_qp_write(hdmi, hdmi->ref_clk_rate, TIMER_BASE_CONFIG0);
/* Software reset */
dw_hdmi_qp_write(hdmi, 0x01, I2CM_CONTROL0);
-
dw_hdmi_qp_write(hdmi, 0x085c085c, I2CM_FM_SCL_CONFIG0);
-
dw_hdmi_qp_mod(hdmi, 0, I2CM_FM_EN, I2CM_INTERFACE_CONTROL0);
/* Clear DONE and ERROR interrupts */
@@ -1066,6 +1262,13 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->phy.ops = plat_data->phy_ops;
hdmi->phy.data = plat_data->phy_data;
+ if (plat_data->ref_clk_rate) {
+ hdmi->ref_clk_rate = plat_data->ref_clk_rate;
+ } else {
+ hdmi->ref_clk_rate = 428571429;
+ dev_warn(dev, "Set ref_clk_rate to vendor default\n");
+ }
+
dw_hdmi_qp_init_hw(hdmi);
ret = devm_request_threaded_irq(dev, plat_data->main_irq,
@@ -1085,6 +1288,12 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->bridge.vendor = "Synopsys";
hdmi->bridge.product = "DW HDMI QP TX";
+ if (plat_data->supported_formats)
+ hdmi->bridge.supported_formats = plat_data->supported_formats;
+
+ if (plat_data->max_bpc)
+ hdmi->bridge.max_bpc = plat_data->max_bpc;
+
hdmi->bridge.ddc = dw_hdmi_qp_i2c_adapter(hdmi);
if (IS_ERR(hdmi->bridge.ddc))
return ERR_CAST(hdmi->bridge.ddc);
@@ -1093,6 +1302,22 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
hdmi->bridge.hdmi_audio_dev = dev;
hdmi->bridge.hdmi_audio_dai_port = 1;
+#ifdef CONFIG_DRM_DW_HDMI_QP_CEC
+ if (plat_data->cec_irq) {
+ hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER;
+ hdmi->bridge.hdmi_cec_dev = dev;
+ hdmi->bridge.hdmi_cec_adapter_name = dev_name(dev);
+
+ hdmi->cec = devm_kzalloc(hdmi->dev, sizeof(*hdmi->cec), GFP_KERNEL);
+ if (!hdmi->cec)
+ return ERR_PTR(-ENOMEM);
+
+ hdmi->cec->irq = plat_data->cec_irq;
+ } else {
+ dev_warn(dev, "Disabled CEC support due to missing IRQ\n");
+ }
+#endif
+
ret = devm_drm_bridge_add(dev, &hdmi->bridge);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
index 72987e6c4689..91a15f82e32a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.h
@@ -488,9 +488,23 @@
#define AUDPKT_VBIT_OVR0 0xf24
/* CEC Registers */
#define CEC_TX_CONTROL 0x1000
+#define CEC_CTRL_CLEAR BIT(0)
+#define CEC_CTRL_START BIT(0)
#define CEC_STATUS 0x1004
+#define CEC_STAT_DONE BIT(0)
+#define CEC_STAT_NACK BIT(1)
+#define CEC_STAT_ARBLOST BIT(2)
+#define CEC_STAT_LINE_ERR BIT(3)
+#define CEC_STAT_RETRANS_FAIL BIT(4)
+#define CEC_STAT_DISCARD BIT(5)
+#define CEC_STAT_TX_BUSY BIT(8)
+#define CEC_STAT_RX_BUSY BIT(9)
+#define CEC_STAT_DRIVE_ERR BIT(10)
+#define CEC_STAT_EOM BIT(11)
+#define CEC_STAT_NOTIFY_ERR BIT(12)
#define CEC_CONFIG 0x1008
#define CEC_ADDR 0x100c
+#define CEC_ADDR_BROADCAST BIT(15)
#define CEC_TX_COUNT 0x1020
#define CEC_TX_DATA3_0 0x1024
#define CEC_TX_DATA7_4 0x1028
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 206b099a35e9..3b77e73ac0ea 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -177,6 +177,7 @@ struct dw_hdmi {
spinlock_t audio_lock;
struct mutex audio_mutex;
+ unsigned int sample_iec958;
unsigned int sample_non_pcm;
unsigned int sample_width;
unsigned int sample_rate;
@@ -198,6 +199,12 @@ struct dw_hdmi {
enum drm_connector_status last_connector_result;
};
+const struct dw_hdmi_plat_data *dw_hdmi_to_plat_data(struct dw_hdmi *hdmi)
+{
+ return hdmi->plat_data;
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_to_plat_data);
+
#define HDMI_IH_PHY_STAT0_RX_SENSE \
(HDMI_IH_PHY_STAT0_RX_SENSE0 | HDMI_IH_PHY_STAT0_RX_SENSE1 | \
HDMI_IH_PHY_STAT0_RX_SENSE2 | HDMI_IH_PHY_STAT0_RX_SENSE3)
@@ -712,6 +719,14 @@ void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm)
}
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm);
+void dw_hdmi_set_sample_iec958(struct dw_hdmi *hdmi, unsigned int iec958)
+{
+ mutex_lock(&hdmi->audio_mutex);
+ hdmi->sample_iec958 = iec958;
+ mutex_unlock(&hdmi->audio_mutex);
+}
+EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_iec958);
+
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
{
mutex_lock(&hdmi->audio_mutex);
@@ -843,7 +858,8 @@ static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi)
hdmi->channels,
hdmi->sample_width,
hdmi->sample_rate,
- hdmi->sample_non_pcm);
+ hdmi->sample_non_pcm,
+ hdmi->sample_iec958);
}
static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index ae0d08e5e960..276d05d25ad8 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -106,10 +106,21 @@
#define SN_PWM_EN_INV_REG 0xA5
#define SN_PWM_INV_MASK BIT(0)
#define SN_PWM_EN_MASK BIT(1)
+
+#define SN_IRQ_EN_REG 0xE0
+#define IRQ_EN BIT(0)
+
+#define SN_IRQ_EVENTS_EN_REG 0xE6
+#define HPD_INSERTION_EN BIT(1)
+#define HPD_REMOVAL_EN BIT(2)
+
#define SN_AUX_CMD_STATUS_REG 0xF4
#define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3)
#define AUX_IRQ_STATUS_AUX_SHORT BIT(5)
#define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6)
+#define SN_IRQ_STATUS_REG 0xF5
+#define HPD_REMOVAL_STATUS BIT(2)
+#define HPD_INSERTION_STATUS BIT(1)
#define MIN_DSI_CLK_FREQ_MHZ 40
@@ -152,7 +163,9 @@
* @ln_assign: Value to program to the LN_ASSIGN register.
* @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG.
* @comms_enabled: If true then communication over the aux channel is enabled.
+ * @hpd_enabled: If true then HPD events are enabled.
* @comms_mutex: Protects modification of comms_enabled.
+ * @hpd_mutex: Protects modification of hpd_enabled.
*
* @gchip: If we expose our GPIOs, this is used.
* @gchip_output: A cache of whether we've set GPIOs to output. This
@@ -190,7 +203,9 @@ struct ti_sn65dsi86 {
u8 ln_assign;
u8 ln_polrs;
bool comms_enabled;
+ bool hpd_enabled;
struct mutex comms_mutex;
+ struct mutex hpd_mutex;
#if defined(CONFIG_OF_GPIO)
struct gpio_chip gchip;
@@ -221,6 +236,23 @@ static const struct regmap_config ti_sn65dsi86_regmap_config = {
.max_register = 0xFF,
};
+static int ti_sn65dsi86_read_u8(struct ti_sn65dsi86 *pdata, unsigned int reg,
+ u8 *val)
+{
+ int ret;
+ unsigned int reg_val;
+
+ ret = regmap_read(pdata->regmap, reg, &reg_val);
+ if (ret) {
+ dev_err(pdata->dev, "fail to read raw reg %#x: %d\n",
+ reg, ret);
+ return ret;
+ }
+ *val = (u8)reg_val;
+
+ return 0;
+}
+
static int __maybe_unused ti_sn65dsi86_read_u16(struct ti_sn65dsi86 *pdata,
unsigned int reg, u16 *val)
{
@@ -379,6 +411,7 @@ static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata)
static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
{
struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
int ret;
ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
@@ -413,6 +446,13 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
if (pdata->refclk)
ti_sn65dsi86_enable_comms(pdata, NULL);
+ if (client->irq) {
+ ret = regmap_update_bits(pdata->regmap, SN_IRQ_EN_REG, IRQ_EN,
+ IRQ_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to enable IRQ events: %d\n", ret);
+ }
+
return ret;
}
@@ -1211,6 +1251,8 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *
static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
+ int ret;
/*
* Device needs to be powered on before reading the HPD state
@@ -1219,11 +1261,35 @@ static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
*/
pm_runtime_get_sync(pdata->dev);
+
+ mutex_lock(&pdata->hpd_mutex);
+ pdata->hpd_enabled = true;
+ mutex_unlock(&pdata->hpd_mutex);
+
+ if (client->irq) {
+ ret = regmap_set_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
+ HPD_REMOVAL_EN | HPD_INSERTION_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to enable HPD events: %d\n", ret);
+ }
}
static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+ const struct i2c_client *client = to_i2c_client(pdata->dev);
+ int ret;
+
+ if (client->irq) {
+ ret = regmap_clear_bits(pdata->regmap, SN_IRQ_EVENTS_EN_REG,
+ HPD_REMOVAL_EN | HPD_INSERTION_EN);
+ if (ret)
+ dev_err(pdata->dev, "Failed to disable HPD events: %d\n", ret);
+ }
+
+ mutex_lock(&pdata->hpd_mutex);
+ pdata->hpd_enabled = false;
+ mutex_unlock(&pdata->hpd_mutex);
pm_runtime_put_autosuspend(pdata->dev);
}
@@ -1309,6 +1375,41 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata)
return 0;
}
+static irqreturn_t ti_sn_bridge_interrupt(int irq, void *private)
+{
+ struct ti_sn65dsi86 *pdata = private;
+ struct drm_device *dev = pdata->bridge.dev;
+ u8 status;
+ int ret;
+ bool hpd_event;
+
+ ret = ti_sn65dsi86_read_u8(pdata, SN_IRQ_STATUS_REG, &status);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to read IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ hpd_event = status & (HPD_REMOVAL_STATUS | HPD_INSERTION_STATUS);
+
+ dev_dbg(pdata->dev, "(SN_IRQ_STATUS_REG = %#x)\n", status);
+ if (!status)
+ return IRQ_NONE;
+
+ ret = regmap_write(pdata->regmap, SN_IRQ_STATUS_REG, status);
+ if (ret) {
+ dev_err(pdata->dev, "Failed to clear IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /* Only send the HPD event if we are bound with a device. */
+ mutex_lock(&pdata->hpd_mutex);
+ if (pdata->hpd_enabled && hpd_event)
+ drm_kms_helper_hotplug_event(dev);
+ mutex_unlock(&pdata->hpd_mutex);
+
+ return IRQ_HANDLED;
+}
+
static int ti_sn_bridge_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
@@ -1931,6 +2032,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
dev_set_drvdata(dev, pdata);
pdata->dev = dev;
+ mutex_init(&pdata->hpd_mutex);
mutex_init(&pdata->comms_mutex);
pdata->regmap = devm_regmap_init_i2c(client,
@@ -1971,6 +2073,16 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf)))
return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n");
+ if (client->irq) {
+ ret = devm_request_threaded_irq(pdata->dev, client->irq, NULL,
+ ti_sn_bridge_interrupt,
+ IRQF_ONESHOT,
+ dev_name(pdata->dev), pdata);
+
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to request interrupt\n");
+ }
+
/*
* Break ourselves up into a collection of aux devices. The only real
* motiviation here is to solve the chicken-and-egg problem of probe
diff --git a/drivers/gpu/drm/bridge/waveshare-dsi.c b/drivers/gpu/drm/bridge/waveshare-dsi.c
new file mode 100644
index 000000000000..43f4e7412d72
--- /dev/null
+++ b/drivers/gpu/drm/bridge/waveshare-dsi.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 NXP
+ * Based on panel-raspberrypi-touchscreen by Broadcom
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+struct ws_bridge {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct backlight_device *backlight;
+ struct device *dev;
+ struct regmap *reg_map;
+};
+
+static const struct regmap_config ws_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xff,
+};
+
+static struct ws_bridge *bridge_to_ws_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ws_bridge, bridge);
+}
+
+static int ws_bridge_attach_dsi(struct ws_bridge *ws)
+{
+ const struct mipi_dsi_device_info info = {
+ .type = "ws-bridge",
+ .channel = 0,
+ .node = NULL,
+ };
+ struct device_node *dsi_host_node;
+ struct device *dev = ws->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+
+ dsi_host_node = of_graph_get_remote_node(dev->of_node, 0, 0);
+ if (!dsi_host_node) {
+ dev_err(dev, "Failed to get remote port\n");
+ return -ENODEV;
+ }
+ host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (!host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Failed to find dsi_host\n");
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi))
+ return dev_err_probe(dev, PTR_ERR(dsi), "Failed to create dsi device\n");
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 2;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to attach dsi to host\n");
+
+ return 0;
+}
+
+static int ws_bridge_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+ int ret;
+
+ ret = ws_bridge_attach_dsi(ws);
+ if (ret)
+ return ret;
+
+ return drm_bridge_attach(encoder, ws->next_bridge,
+ &ws->bridge, flags);
+}
+
+static void ws_bridge_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+
+ regmap_write(ws->reg_map, 0xad, 0x01);
+ backlight_enable(ws->backlight);
+}
+
+static void ws_bridge_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ws_bridge *ws = bridge_to_ws_bridge(bridge);
+
+ backlight_disable(ws->backlight);
+ regmap_write(ws->reg_map, 0xad, 0x00);
+}
+
+static const struct drm_bridge_funcs ws_bridge_bridge_funcs = {
+ .enable = ws_bridge_bridge_enable,
+ .disable = ws_bridge_bridge_disable,
+ .attach = ws_bridge_bridge_attach,
+};
+
+static int ws_bridge_bl_update_status(struct backlight_device *bl)
+{
+ struct ws_bridge *ws = bl_get_data(bl);
+
+ regmap_write(ws->reg_map, 0xab, 0xff - backlight_get_brightness(bl));
+ regmap_write(ws->reg_map, 0xaa, 0x01);
+
+ return 0;
+}
+
+static const struct backlight_ops ws_bridge_bl_ops = {
+ .update_status = ws_bridge_bl_update_status,
+};
+
+static struct backlight_device *ws_bridge_create_backlight(struct ws_bridge *ws)
+{
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+ struct device *dev = ws->dev;
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, ws,
+ &ws_bridge_bl_ops, &props);
+}
+
+static int ws_bridge_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct drm_panel *panel;
+ struct ws_bridge *ws;
+ int ret;
+
+ ws = devm_drm_bridge_alloc(dev, struct ws_bridge, bridge, &ws_bridge_bridge_funcs);
+ if (IS_ERR(ws))
+ return PTR_ERR(ws);
+
+ ws->dev = dev;
+
+ ws->reg_map = devm_regmap_init_i2c(i2c, &ws_regmap_config);
+ if (IS_ERR(ws->reg_map))
+ return dev_err_probe(dev, PTR_ERR(ws->reg_map), "Failed to allocate regmap\n");
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, &panel, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to find remote panel\n");
+
+ ws->next_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ws->next_bridge))
+ return PTR_ERR(ws->next_bridge);
+
+ ws->backlight = ws_bridge_create_backlight(ws);
+ if (IS_ERR(ws->backlight)) {
+ ret = PTR_ERR(ws->backlight);
+ dev_err(dev, "Failed to create backlight: %d\n", ret);
+ return ret;
+ }
+
+ regmap_write(ws->reg_map, 0xc0, 0x01);
+ regmap_write(ws->reg_map, 0xc2, 0x01);
+ regmap_write(ws->reg_map, 0xac, 0x01);
+
+ ws->bridge.type = DRM_MODE_CONNECTOR_DPI;
+ ws->bridge.of_node = dev->of_node;
+ devm_drm_bridge_add(dev, &ws->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ws_bridge_of_ids[] = {
+ {.compatible = "waveshare,dsi2dpi",},
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ws_bridge_of_ids);
+
+static struct i2c_driver ws_bridge_driver = {
+ .driver = {
+ .name = "ws_dsi2dpi",
+ .of_match_table = ws_bridge_of_ids,
+ },
+ .probe = ws_bridge_probe,
+};
+module_i2c_driver(ws_bridge_driver);
+
+MODULE_AUTHOR("Joseph Guo <qijian.guo@nxp.com>");
+MODULE_DESCRIPTION("Waveshare DSI2DPI bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml
index d502d146b177..56638814bb28 100644
--- a/drivers/gpu/drm/ci/gitlab-ci.yml
+++ b/drivers/gpu/drm/ci/gitlab-ci.yml
@@ -280,7 +280,7 @@ sanity:
GIT_STRATEGY: none
script:
# ci-fairy check-commits --junit-xml=check-commits.xml
- - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
+ # - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
- |
set -eu
image_tags=(
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index 72480db1f00d..515aceac22b1 100644
--- a/drivers/gpu/drm/clients/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -13,8 +13,8 @@
static char drm_client_default[16] = CONFIG_DRM_CLIENT_DEFAULT;
module_param_string(active, drm_client_default, sizeof(drm_client_default), 0444);
MODULE_PARM_DESC(active,
- "Choose which drm client to start, default is"
- CONFIG_DRM_CLIENT_DEFAULT "]");
+ "Choose which drm client to start, default is "
+ CONFIG_DRM_CLIENT_DEFAULT);
/**
* drm_client_setup() - Setup in-kernel DRM clients
diff --git a/drivers/gpu/drm/clients/drm_fbdev_client.c b/drivers/gpu/drm/clients/drm_fbdev_client.c
index f894ba52bdb5..28951e392482 100644
--- a/drivers/gpu/drm/clients/drm_fbdev_client.c
+++ b/drivers/gpu/drm/clients/drm_fbdev_client.c
@@ -13,22 +13,36 @@
* struct drm_client_funcs
*/
+static void drm_fbdev_client_free(struct drm_client_dev *client)
+{
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+}
+
static void drm_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
if (fb_helper->info) {
+ /*
+ * Fully probed framebuffer device
+ */
drm_fb_helper_unregister_info(fb_helper);
} else {
+ /*
+ * Partially initialized client, no framebuffer device yet
+ */
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
}
-static int drm_fbdev_client_restore(struct drm_client_dev *client)
+static int drm_fbdev_client_restore(struct drm_client_dev *client, bool force)
{
- drm_fb_helper_lastclose(client->dev);
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
return 0;
}
@@ -62,32 +76,27 @@ err_drm_err:
return ret;
}
-static int drm_fbdev_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_fbdev_client_suspend(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (holds_console_lock)
- drm_fb_helper_set_suspend(fb_helper, true);
- else
- drm_fb_helper_set_suspend_unlocked(fb_helper, true);
+ drm_fb_helper_set_suspend_unlocked(fb_helper, true);
return 0;
}
-static int drm_fbdev_client_resume(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_fbdev_client_resume(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (holds_console_lock)
- drm_fb_helper_set_suspend(fb_helper, false);
- else
- drm_fb_helper_set_suspend_unlocked(fb_helper, false);
+ drm_fb_helper_set_suspend_unlocked(fb_helper, false);
return 0;
}
static const struct drm_client_funcs drm_fbdev_client_funcs = {
.owner = THIS_MODULE,
+ .free = drm_fbdev_client_free,
.unregister = drm_fbdev_client_unregister,
.restore = drm_fbdev_client_restore,
.hotplug = drm_fbdev_client_hotplug,
diff --git a/drivers/gpu/drm/clients/drm_log.c b/drivers/gpu/drm/clients/drm_log.c
index d239f1e3c456..4d3005273b27 100644
--- a/drivers/gpu/drm/clients/drm_log.c
+++ b/drivers/gpu/drm/clients/drm_log.c
@@ -100,7 +100,7 @@ static void drm_log_clear_line(struct drm_log_scanout *scanout, u32 line)
return;
iosys_map_memset(&map, r.y1 * fb->pitches[0], 0, height * fb->pitches[0]);
drm_client_buffer_vunmap_local(scanout->buffer);
- drm_client_framebuffer_flush(scanout->buffer, &r);
+ drm_client_buffer_flush(scanout->buffer, &r);
}
static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s,
@@ -133,7 +133,7 @@ static void drm_log_draw_line(struct drm_log_scanout *scanout, const char *s,
if (scanout->line >= scanout->rows)
scanout->line = 0;
drm_client_buffer_vunmap_local(scanout->buffer);
- drm_client_framebuffer_flush(scanout->buffer, &r);
+ drm_client_buffer_flush(scanout->buffer, &r);
}
static void drm_log_draw_new_line(struct drm_log_scanout *scanout,
@@ -204,7 +204,7 @@ static int drm_log_setup_modeset(struct drm_client_dev *client,
if (format == DRM_FORMAT_INVALID)
return -EINVAL;
- scanout->buffer = drm_client_framebuffer_create(client, width, height, format);
+ scanout->buffer = drm_client_buffer_create_dumb(client, width, height, format);
if (IS_ERR(scanout->buffer)) {
drm_warn(client->dev, "drm_log can't create framebuffer %d %d %p4cc\n",
width, height, &format);
@@ -272,7 +272,7 @@ static void drm_log_init_client(struct drm_log *dlog)
err_failed_commit:
for (i = 0; i < n_modeset; i++)
- drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+ drm_client_buffer_delete(dlog->scanout[i].buffer);
err_nomodeset:
kfree(dlog->scanout);
@@ -286,26 +286,45 @@ static void drm_log_free_scanout(struct drm_client_dev *client)
if (dlog->n_scanout) {
for (i = 0; i < dlog->n_scanout; i++)
- drm_client_framebuffer_delete(dlog->scanout[i].buffer);
+ drm_client_buffer_delete(dlog->scanout[i].buffer);
dlog->n_scanout = 0;
kfree(dlog->scanout);
dlog->scanout = NULL;
}
}
-static void drm_log_client_unregister(struct drm_client_dev *client)
+static void drm_log_client_free(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
struct drm_device *dev = client->dev;
+ kfree(dlog);
+
+ drm_dbg(dev, "Unregistered with drm log\n");
+}
+
+static void drm_log_client_unregister(struct drm_client_dev *client)
+{
+ struct drm_log *dlog = client_to_drm_log(client);
+
unregister_console(&dlog->con);
mutex_lock(&dlog->lock);
drm_log_free_scanout(client);
- drm_client_release(client);
mutex_unlock(&dlog->lock);
- kfree(dlog);
- drm_dbg(dev, "Unregistered with drm log\n");
+ drm_client_release(client);
+}
+
+static int drm_log_client_restore(struct drm_client_dev *client, bool force)
+{
+ int ret;
+
+ if (force)
+ ret = drm_client_modeset_commit_locked(client);
+ else
+ ret = drm_client_modeset_commit(client);
+
+ return ret;
}
static int drm_log_client_hotplug(struct drm_client_dev *client)
@@ -319,7 +338,7 @@ static int drm_log_client_hotplug(struct drm_client_dev *client)
return 0;
}
-static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_lock)
+static int drm_log_client_suspend(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
@@ -328,7 +347,7 @@ static int drm_log_client_suspend(struct drm_client_dev *client, bool _console_l
return 0;
}
-static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lock)
+static int drm_log_client_resume(struct drm_client_dev *client)
{
struct drm_log *dlog = client_to_drm_log(client);
@@ -339,7 +358,9 @@ static int drm_log_client_resume(struct drm_client_dev *client, bool _console_lo
static const struct drm_client_funcs drm_log_client_funcs = {
.owner = THIS_MODULE,
+ .free = drm_log_client_free,
.unregister = drm_log_client_unregister,
+ .restore = drm_log_client_restore,
.hotplug = drm_log_client_hotplug,
.suspend = drm_log_client_suspend,
.resume = drm_log_client_resume,
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 5eb7e9bfe361..a2d30cf9e06d 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -20,6 +20,7 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdcp_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
@@ -136,10 +137,9 @@ static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
- struct drm_bridge *bridge;
/* Notify all bridges in the pipeline of hotplug events. */
- drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(bridge_connector->encoder, bridge) {
if (bridge->funcs->hpd_notify)
bridge->funcs->hpd_notify(bridge, status);
}
@@ -618,6 +618,20 @@ static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_f
* Bridge Connector Initialisation
*/
+static void drm_bridge_connector_put_bridges(struct drm_device *dev, void *data)
+{
+ struct drm_bridge_connector *bridge_connector = (struct drm_bridge_connector *)data;
+
+ drm_bridge_put(bridge_connector->bridge_edid);
+ drm_bridge_put(bridge_connector->bridge_hpd);
+ drm_bridge_put(bridge_connector->bridge_detect);
+ drm_bridge_put(bridge_connector->bridge_modes);
+ drm_bridge_put(bridge_connector->bridge_hdmi);
+ drm_bridge_put(bridge_connector->bridge_hdmi_audio);
+ drm_bridge_put(bridge_connector->bridge_dp_audio);
+ drm_bridge_put(bridge_connector->bridge_hdmi_cec);
+}
+
/**
* drm_bridge_connector_init - Initialise a connector for a chain of bridges
* @drm: the DRM device
@@ -638,9 +652,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
struct drm_bridge_connector *bridge_connector;
struct drm_connector *connector;
struct i2c_adapter *ddc = NULL;
- struct drm_bridge *bridge, *panel_bridge = NULL;
+ struct drm_bridge *panel_bridge __free(drm_bridge_put) = NULL;
unsigned int supported_formats = BIT(HDMI_COLORSPACE_RGB);
unsigned int max_bpc = 8;
+ bool support_hdcp = false;
int connector_type;
int ret;
@@ -648,6 +663,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (!bridge_connector)
return ERR_PTR(-ENOMEM);
+ ret = drmm_add_action(drm, drm_bridge_connector_put_bridges, bridge_connector);
+ if (ret)
+ return ERR_PTR(ret);
+
bridge_connector->encoder = encoder;
/*
@@ -665,20 +684,28 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
* detection are available, we don't support hotplug detection at all.
*/
connector_type = DRM_MODE_CONNECTOR_Unknown;
- drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
if (!bridge->interlace_allowed)
connector->interlace_allowed = false;
if (!bridge->ycbcr_420_allowed)
connector->ycbcr_420_allowed = false;
- if (bridge->ops & DRM_BRIDGE_OP_EDID)
- bridge_connector->bridge_edid = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_HPD)
- bridge_connector->bridge_hpd = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_DETECT)
- bridge_connector->bridge_detect = bridge;
- if (bridge->ops & DRM_BRIDGE_OP_MODES)
- bridge_connector->bridge_modes = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_EDID) {
+ drm_bridge_put(bridge_connector->bridge_edid);
+ bridge_connector->bridge_edid = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_put(bridge_connector->bridge_hpd);
+ bridge_connector->bridge_hpd = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT) {
+ drm_bridge_put(bridge_connector->bridge_detect);
+ bridge_connector->bridge_detect = drm_bridge_get(bridge);
+ }
+ if (bridge->ops & DRM_BRIDGE_OP_MODES) {
+ drm_bridge_put(bridge_connector->bridge_modes);
+ bridge_connector->bridge_modes = drm_bridge_get(bridge);
+ }
if (bridge->ops & DRM_BRIDGE_OP_HDMI) {
if (bridge_connector->bridge_hdmi)
return ERR_PTR(-EBUSY);
@@ -686,7 +713,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
!bridge->funcs->hdmi_clear_infoframe)
return ERR_PTR(-EINVAL);
- bridge_connector->bridge_hdmi = bridge;
+ bridge_connector->bridge_hdmi = drm_bridge_get(bridge);
if (bridge->supported_formats)
supported_formats = bridge->supported_formats;
@@ -709,7 +736,7 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
!bridge->funcs->hdmi_audio_shutdown)
return ERR_PTR(-EINVAL);
- bridge_connector->bridge_hdmi_audio = bridge;
+ bridge_connector->bridge_hdmi_audio = drm_bridge_get(bridge);
}
if (bridge->ops & DRM_BRIDGE_OP_DP_AUDIO) {
@@ -727,21 +754,21 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
!bridge->funcs->dp_audio_shutdown)
return ERR_PTR(-EINVAL);
- bridge_connector->bridge_dp_audio = bridge;
+ bridge_connector->bridge_dp_audio = drm_bridge_get(bridge);
}
if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
if (bridge_connector->bridge_hdmi_cec)
return ERR_PTR(-EBUSY);
- bridge_connector->bridge_hdmi_cec = bridge;
+ bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge);
}
if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
if (bridge_connector->bridge_hdmi_cec)
return ERR_PTR(-EBUSY);
- bridge_connector->bridge_hdmi_cec = bridge;
+ bridge_connector->bridge_hdmi_cec = drm_bridge_get(bridge);
if (!bridge->funcs->hdmi_cec_enable ||
!bridge->funcs->hdmi_cec_log_addr ||
@@ -749,20 +776,24 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
return ERR_PTR(-EINVAL);
}
- if (!drm_bridge_get_next_bridge(bridge))
+ if (drm_bridge_is_last(bridge))
connector_type = bridge->type;
#ifdef CONFIG_OF
- if (!drm_bridge_get_next_bridge(bridge) &&
- bridge->of_node)
+ if (drm_bridge_is_last(bridge) && bridge->of_node)
connector->fwnode = fwnode_handle_get(of_fwnode_handle(bridge->of_node));
#endif
if (bridge->ddc)
ddc = bridge->ddc;
- if (drm_bridge_is_panel(bridge))
- panel_bridge = bridge;
+ if (drm_bridge_is_panel(bridge)) {
+ drm_bridge_put(panel_bridge);
+ panel_bridge = drm_bridge_get(bridge);
+ }
+
+ if (bridge->support_hdcp)
+ support_hdcp = true;
}
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -772,8 +803,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (!connector->ycbcr_420_allowed)
supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420);
- bridge = bridge_connector->bridge_hdmi;
-
ret = drmm_connector_hdmi_init(drm, connector,
bridge_connector->bridge_hdmi->vendor,
bridge_connector->bridge_hdmi->product,
@@ -816,6 +845,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec;
+
ret = drmm_connector_hdmi_cec_notifier_register(connector,
NULL,
bridge->hdmi_cec_dev);
@@ -825,6 +856,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (bridge_connector->bridge_hdmi_cec &&
bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ struct drm_bridge *bridge = bridge_connector->bridge_hdmi_cec;
+
ret = drmm_connector_hdmi_cec_register(connector,
&drm_bridge_connector_hdmi_cec_funcs,
bridge->hdmi_cec_adapter_name,
@@ -845,6 +878,10 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
if (panel_bridge)
drm_panel_bridge_set_orientation(connector, panel_bridge);
+ if (support_hdcp && IS_REACHABLE(CONFIG_DRM_DISPLAY_HELPER) &&
+ IS_ENABLED(CONFIG_DRM_DISPLAY_HDCP_HELPER))
+ drm_connector_attach_content_protection_property(connector, true);
+
return connector;
}
EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 3b50d817c839..436bfe9f9081 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -42,7 +42,7 @@
*
* https://hverkuil.home.xs4all.nl/cec-status.txt
*
- * Please mail me (hverkuil@xs4all.nl) if you find an adapter that works
+ * Please mail me (hverkuil@kernel.org) if you find an adapter that works
* and is not yet listed there.
*
* Note that the current implementation does not support CEC over an MST hub.
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 1ecc3df7e316..f9fdf19de74a 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -29,6 +29,7 @@
#include <linux/init.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -123,6 +124,14 @@ bool drm_dp_clock_recovery_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
}
EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
+bool drm_dp_post_lt_adj_req_in_progress(const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ u8 lane_align = dp_link_status(link_status, DP_LANE_ALIGN_STATUS_UPDATED);
+
+ return lane_align & DP_POST_LT_ADJ_REQ_IN_PROGRESS;
+}
+EXPORT_SYMBOL(drm_dp_post_lt_adj_req_in_progress);
+
u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane)
{
@@ -2543,6 +2552,10 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x0C, 0xE7), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC) },
/* Apple MacBookPro 2017 15 inch eDP Retina panel reports too low DP_MAX_LINK_RATE */
{ OUI(0x00, 0x10, 0xfa), DEVICE_ID(101, 68, 21, 101, 98, 97), false, BIT(DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS) },
+ /* Synaptics Panamera supports only a compressed bpp of 12 above 50% of its max DSC pixel throughput */
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x22), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x31), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
+ { OUI(0x90, 0xCC, 0x24), DEVICE_ID('S', 'Y', 'N', 'A', 0x53, 0x33), true, BIT(DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) },
};
#undef OUI
@@ -2832,6 +2845,158 @@ int drm_dp_dsc_sink_supported_input_bpcs(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_S
}
EXPORT_SYMBOL(drm_dp_dsc_sink_supported_input_bpcs);
+/*
+ * See DP Standard v2.1a 2.8.4 Minimum Slices/Display, Table 2-159 and
+ * Appendix L.1 Derivation of Slice Count Requirements.
+ */
+static int dsc_sink_min_slice_throughput(int peak_pixel_rate)
+{
+ if (peak_pixel_rate >= 4800000)
+ return 600000;
+ else if (peak_pixel_rate >= 2700000)
+ return 400000;
+ else
+ return 340000;
+}
+
+/**
+ * drm_dp_dsc_sink_max_slice_throughput() - Get a DSC sink's maximum pixel throughput per slice
+ * @dsc_dpcd: DSC sink's capabilities from DPCD
+ * @peak_pixel_rate: Cumulative peak pixel rate in kHz
+ * @is_rgb_yuv444: The mode is either RGB or YUV444
+ *
+ * Return the DSC sink device's maximum pixel throughput per slice, based on
+ * the device's @dsc_dpcd capabilities, the @peak_pixel_rate of the transferred
+ * stream(s) and whether the output format @is_rgb_yuv444 or yuv422/yuv420.
+ *
+ * Note that @peak_pixel_rate is the total pixel rate transferred to the same
+ * DSC/display sink. For instance to calculate a tile's slice count of an MST
+ * multi-tiled display sink (not considering here the required
+ * rounding/alignment of slice count)::
+ *
+ * @peak_pixel_rate = tile_pixel_rate * tile_count
+ * total_slice_count = @peak_pixel_rate / drm_dp_dsc_sink_max_slice_throughput(@peak_pixel_rate)
+ * tile_slice_count = total_slice_count / tile_count
+ *
+ * Returns:
+ * The maximum pixel throughput per slice supported by the DSC sink device
+ * in kPixels/sec.
+ */
+int drm_dp_dsc_sink_max_slice_throughput(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ int peak_pixel_rate, bool is_rgb_yuv444)
+{
+ int throughput;
+ int delta = 0;
+ int base;
+
+ throughput = dsc_dpcd[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT];
+
+ if (is_rgb_yuv444) {
+ throughput = (throughput & DP_DSC_THROUGHPUT_MODE_0_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_0_SHIFT;
+
+ delta = ((dsc_dpcd[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT]) &
+ DP_DSC_THROUGHPUT_MODE_0_DELTA_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_0_DELTA_SHIFT; /* in units of 2 MPixels/sec */
+ delta *= 2000;
+ } else {
+ throughput = (throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >>
+ DP_DSC_THROUGHPUT_MODE_1_SHIFT;
+ }
+
+ switch (throughput) {
+ case 0:
+ return dsc_sink_min_slice_throughput(peak_pixel_rate);
+ case 1:
+ base = 340000;
+ break;
+ case 2 ... 14:
+ base = 400000 + 50000 * (throughput - 2);
+ break;
+ case 15:
+ base = 170000;
+ break;
+ }
+
+ return base + delta;
+}
+EXPORT_SYMBOL(drm_dp_dsc_sink_max_slice_throughput);
+
+static u8 dsc_branch_dpcd_cap(const u8 dpcd[DP_DSC_BRANCH_CAP_SIZE], int reg)
+{
+ return dpcd[reg - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0];
+}
+
+/**
+ * drm_dp_dsc_branch_max_overall_throughput() - Branch device's max overall DSC pixel throughput
+ * @dsc_branch_dpcd: DSC branch capabilities from DPCD
+ * @is_rgb_yuv444: The mode is either RGB or YUV444
+ *
+ * Return the branch device's maximum overall DSC pixel throughput, based on
+ * the device's DPCD DSC branch capabilities, and whether the output
+ * format @is_rgb_yuv444 or yuv422/yuv420.
+ *
+ * Returns:
+ * - 0: The maximum overall throughput capability is not indicated by
+ * the device separately and it must be determined from the per-slice
+ * max throughput (see @drm_dp_dsc_branch_slice_max_throughput())
+ * and the maximum slice count supported by the device.
+ * - > 0: The maximum overall DSC pixel throughput supported by the branch
+ * device in kPixels/sec.
+ */
+int drm_dp_dsc_branch_max_overall_throughput(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE],
+ bool is_rgb_yuv444)
+{
+ int throughput;
+
+ if (is_rgb_yuv444)
+ throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0);
+ else
+ throughput = dsc_branch_dpcd_cap(dsc_branch_dpcd,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_1);
+
+ switch (throughput) {
+ case 0:
+ return 0;
+ case 1:
+ return 680000;
+ default:
+ return 600000 + 50000 * throughput;
+ }
+}
+EXPORT_SYMBOL(drm_dp_dsc_branch_max_overall_throughput);
+
+/**
+ * drm_dp_dsc_branch_max_line_width() - Branch device's max DSC line width
+ * @dsc_branch_dpcd: DSC branch capabilities from DPCD
+ *
+ * Return the branch device's maximum overall DSC line width, based on
+ * the device's @dsc_branch_dpcd capabilities.
+ *
+ * Returns:
+ * - 0: The maximum line width is not indicated by the device
+ * separately and it must be determined from the maximum
+ * slice count and slice-width supported by the device.
+ * - %-EINVAL: The device indicates an invalid maximum line width
+ * (< 5120 pixels).
+ * - >= 5120: The maximum line width in pixels.
+ */
+int drm_dp_dsc_branch_max_line_width(const u8 dsc_branch_dpcd[DP_DSC_BRANCH_CAP_SIZE])
+{
+ int line_width = dsc_branch_dpcd_cap(dsc_branch_dpcd, DP_DSC_BRANCH_MAX_LINE_WIDTH);
+
+ switch (line_width) {
+ case 0:
+ return 0;
+ case 1 ... 15:
+ return -EINVAL;
+ default:
+ return line_width * 320;
+ }
+}
+EXPORT_SYMBOL(drm_dp_dsc_branch_max_line_width);
+
static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux,
const u8 dpcd[DP_RECEIVER_CAP_SIZE], int address,
u8 *buf, int buf_size)
@@ -3962,6 +4127,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
int ret;
unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB;
u8 buf[3] = { 0 };
+ size_t len = 2;
/* The panel uses the PWM for controlling brightness levels */
if (!(bl->aux_set || bl->luminance_set))
@@ -3974,6 +4140,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[1] = (level & 0x00ff00) >> 8;
buf[2] = (level & 0xff0000) >> 16;
offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE;
+ len = 3;
} else if (bl->lsb_reg_used) {
buf[0] = (level & 0xff00) >> 8;
buf[1] = (level & 0x00ff);
@@ -3981,7 +4148,7 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac
buf[0] = level;
}
- ret = drm_dp_dpcd_write_data(aux, offset, buf, sizeof(buf));
+ ret = drm_dp_dpcd_write_data(aux, offset, buf, len);
if (ret < 0) {
drm_err(aux->drm_dev,
"%s: Failed to write aux backlight level: %d\n",
@@ -4126,22 +4293,61 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
{
int fxp, fxp_min, fxp_max, fxp_actual, f = 1;
int ret;
- u8 pn, pn_min, pn_max;
+ u8 pn, pn_min, pn_max, bit_count;
if (!bl->aux_set)
return 0;
- ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn);
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &bit_count);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n",
aux->name, ret);
return -ENODEV;
}
- pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ bit_count &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
+ aux->name, ret);
+ return -ENODEV;
+ }
+ pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
+ if (ret < 0) {
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
+ aux->name, ret);
+ return -ENODEV;
+ }
+ pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ if (unlikely(pn_min > pn_max)) {
+ drm_dbg_kms(aux->drm_dev, "%s: Invalid pwmgen bit count cap min/max returned: %d %d\n",
+ aux->name, pn_min, pn_max);
+ return -EINVAL;
+ }
+
+ /*
+ * Per VESA eDP Spec v1.4b, section 3.3.10.2:
+ * If DP_EDP_PWMGEN_BIT_COUNT is less than DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN,
+ * the sink must use the MIN value as the effective PWM bit count.
+ * Clamp the reported value to the [MIN, MAX] capability range to ensure
+ * correct brightness scaling on compliant eDP panels.
+ * Only enable this logic if the [MIN, MAX] range is valid in regard to Spec.
+ */
+ pn = bit_count;
+ if (bit_count < pn_min)
+ pn = clamp(bit_count, pn_min, pn_max);
+
bl->max = (1 << pn) - 1;
- if (!driver_pwm_freq_hz)
+ if (!driver_pwm_freq_hz) {
+ if (pn != bit_count)
+ goto bit_count_write_back;
+
return 0;
+ }
/*
* Set PWM Frequency divider to match desired frequency provided by the driver.
@@ -4165,21 +4371,6 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
* - FxP is within 25% of desired value.
* Note: 25% is arbitrary value and may need some tweak.
*/
- ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min);
- if (ret < 0) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n",
- aux->name, ret);
- return 0;
- }
- ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max);
- if (ret < 0) {
- drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n",
- aux->name, ret);
- return 0;
- }
- pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
- pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
-
/* Ensure frequency is within 25% of desired value */
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
@@ -4197,12 +4388,17 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf
break;
}
+bit_count_write_back:
ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn);
if (ret < 0) {
drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n",
aux->name, ret);
return 0;
}
+
+ if (!driver_pwm_freq_hz)
+ return 0;
+
bl->pwmgen_bit_count = pn;
bl->max = (1 << pn) - 1;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index cd15cf52f0c9..67e095e398a3 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -42,6 +42,7 @@
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
#include <drm/drm_writeback.h>
+#include <drm/drm_colorop.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -107,6 +108,7 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state)
kfree(state->connectors);
kfree(state->crtcs);
kfree(state->planes);
+ kfree(state->colorops);
kfree(state->private_objs);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
@@ -138,6 +140,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
sizeof(*state->planes), GFP_KERNEL);
if (!state->planes)
goto fail;
+ state->colorops = kcalloc(dev->mode_config.num_colorop,
+ sizeof(*state->colorops), GFP_KERNEL);
+ if (!state->colorops)
+ goto fail;
/*
* Because drm_atomic_state can be committed asynchronously we need our
@@ -200,6 +206,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
drm_dbg_atomic(dev, "Clearing atomic state %p\n", state);
+ state->checked = false;
+
for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i].ptr;
@@ -207,9 +215,9 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
connector->funcs->atomic_destroy_state(connector,
- state->connectors[i].state);
+ state->connectors[i].state_to_destroy);
state->connectors[i].ptr = NULL;
- state->connectors[i].state = NULL;
+ state->connectors[i].state_to_destroy = NULL;
state->connectors[i].old_state = NULL;
state->connectors[i].new_state = NULL;
drm_connector_put(connector);
@@ -222,10 +230,10 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
crtc->funcs->atomic_destroy_state(crtc,
- state->crtcs[i].state);
+ state->crtcs[i].state_to_destroy);
state->crtcs[i].ptr = NULL;
- state->crtcs[i].state = NULL;
+ state->crtcs[i].state_to_destroy = NULL;
state->crtcs[i].old_state = NULL;
state->crtcs[i].new_state = NULL;
@@ -242,20 +250,34 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
continue;
plane->funcs->atomic_destroy_state(plane,
- state->planes[i].state);
+ state->planes[i].state_to_destroy);
state->planes[i].ptr = NULL;
- state->planes[i].state = NULL;
+ state->planes[i].state_to_destroy = NULL;
state->planes[i].old_state = NULL;
state->planes[i].new_state = NULL;
}
+ for (i = 0; i < config->num_colorop; i++) {
+ struct drm_colorop *colorop = state->colorops[i].ptr;
+
+ if (!colorop)
+ continue;
+
+ drm_colorop_atomic_destroy_state(colorop,
+ state->colorops[i].state);
+ state->colorops[i].ptr = NULL;
+ state->colorops[i].state = NULL;
+ state->colorops[i].old_state = NULL;
+ state->colorops[i].new_state = NULL;
+ }
+
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
obj->funcs->atomic_destroy_state(obj,
- state->private_objs[i].state);
+ state->private_objs[i].state_to_destroy);
state->private_objs[i].ptr = NULL;
- state->private_objs[i].state = NULL;
+ state->private_objs[i].state_to_destroy = NULL;
state->private_objs[i].old_state = NULL;
state->private_objs[i].new_state = NULL;
}
@@ -348,8 +370,9 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
WARN_ON(!state->acquire_ctx);
+ drm_WARN_ON(state->dev, state->checked);
- crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (crtc_state)
return crtc_state;
@@ -361,7 +384,7 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
if (!crtc_state)
return ERR_PTR(-ENOMEM);
- state->crtcs[index].state = crtc_state;
+ state->crtcs[index].state_to_destroy = crtc_state;
state->crtcs[index].old_state = crtc->state;
state->crtcs[index].new_state = crtc_state;
state->crtcs[index].ptr = crtc;
@@ -480,8 +503,8 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
}
if (state->crtc)
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state,
+ state->crtc);
if (writeback_job->fb && !crtc_state->active) {
drm_dbg_atomic(connector->dev,
@@ -528,13 +551,14 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_plane_state *plane_state;
WARN_ON(!state->acquire_ctx);
+ drm_WARN_ON(state->dev, state->checked);
/* the legacy pointers should never be set */
WARN_ON(plane->fb);
WARN_ON(plane->old_fb);
WARN_ON(plane->crtc);
- plane_state = drm_atomic_get_existing_plane_state(state, plane);
+ plane_state = drm_atomic_get_new_plane_state(state, plane);
if (plane_state)
return plane_state;
@@ -546,7 +570,7 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
if (!plane_state)
return ERR_PTR(-ENOMEM);
- state->planes[index].state = plane_state;
+ state->planes[index].state_to_destroy = plane_state;
state->planes[index].ptr = plane;
state->planes[index].old_state = plane->state;
state->planes[index].new_state = plane_state;
@@ -568,6 +592,55 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_plane_state);
+/**
+ * drm_atomic_get_colorop_state - get colorop state
+ * @state: global atomic state object
+ * @colorop: colorop to get state object for
+ *
+ * This function returns the colorop state for the given colorop, allocating it
+ * if needed. It will also grab the relevant plane lock to make sure that the
+ * state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted. All other errors are fatal.
+ */
+struct drm_colorop_state *
+drm_atomic_get_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop)
+{
+ int ret, index = drm_colorop_index(colorop);
+ struct drm_colorop_state *colorop_state;
+
+ WARN_ON(!state->acquire_ctx);
+
+ colorop_state = drm_atomic_get_new_colorop_state(state, colorop);
+ if (colorop_state)
+ return colorop_state;
+
+ ret = drm_modeset_lock(&colorop->plane->mutex, state->acquire_ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ colorop_state = drm_atomic_helper_colorop_duplicate_state(colorop);
+ if (!colorop_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->colorops[index].state = colorop_state;
+ state->colorops[index].ptr = colorop;
+ state->colorops[index].old_state = colorop->state;
+ state->colorops[index].new_state = colorop_state;
+ colorop_state->state = state;
+
+ drm_dbg_atomic(colorop->dev, "Added [COLOROP:%d:%d] %p state to %p\n",
+ colorop->base.id, colorop->type, colorop_state, state);
+
+ return colorop_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_colorop_state);
+
static bool
plane_switching_crtc(const struct drm_plane_state *old_plane_state,
const struct drm_plane_state *new_plane_state)
@@ -707,6 +780,46 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
return 0;
}
+static void drm_atomic_colorop_print_state(struct drm_printer *p,
+ const struct drm_colorop_state *state)
+{
+ struct drm_colorop *colorop = state->colorop;
+
+ drm_printf(p, "colorop[%u]:\n", colorop->base.id);
+ drm_printf(p, "\ttype=%s\n", drm_get_colorop_type_name(colorop->type));
+ if (colorop->bypass_property)
+ drm_printf(p, "\tbypass=%u\n", state->bypass);
+
+ switch (colorop->type) {
+ case DRM_COLOROP_1D_CURVE:
+ drm_printf(p, "\tcurve_1d_type=%s\n",
+ drm_get_colorop_curve_1d_type_name(state->curve_1d_type));
+ break;
+ case DRM_COLOROP_1D_LUT:
+ drm_printf(p, "\tsize=%d\n", colorop->size);
+ drm_printf(p, "\tinterpolation=%s\n",
+ drm_get_colorop_lut1d_interpolation_name(colorop->lut1d_interpolation));
+ drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
+ break;
+ case DRM_COLOROP_CTM_3X4:
+ drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
+ break;
+ case DRM_COLOROP_MULTIPLIER:
+ drm_printf(p, "\tmultiplier=%llu\n", state->multiplier);
+ break;
+ case DRM_COLOROP_3D_LUT:
+ drm_printf(p, "\tsize=%d\n", colorop->size);
+ drm_printf(p, "\tinterpolation=%s\n",
+ drm_get_colorop_lut3d_interpolation_name(colorop->lut3d_interpolation));
+ drm_printf(p, "\tdata blob id=%d\n", state->data ? state->data->base.id : 0);
+ break;
+ default:
+ break;
+ }
+
+ drm_printf(p, "\tnext=%d\n", colorop->next ? colorop->next->base.id : 0);
+}
+
static void drm_atomic_plane_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
@@ -728,7 +841,8 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
drm_printf(p, "\tcolor-range=%s\n",
drm_get_color_range_name(state->color_range));
drm_printf(p, "\tcolor_mgmt_changed=%d\n", state->color_mgmt_changed);
-
+ drm_printf(p, "\tcolor-pipeline=%d\n",
+ state->color_pipeline ? state->color_pipeline->base.id : 0);
if (plane->funcs->atomic_print_state)
plane->funcs->atomic_print_state(p, state);
}
@@ -831,14 +945,17 @@ struct drm_private_state *
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
- int index, num_objs, i, ret;
+ int index, num_objs, ret;
size_t size;
struct __drm_private_objs_state *arr;
struct drm_private_state *obj_state;
- for (i = 0; i < state->num_private_objs; i++)
- if (obj == state->private_objs[i].ptr)
- return state->private_objs[i].state;
+ WARN_ON(!state->acquire_ctx);
+ drm_WARN_ON(state->dev, state->checked);
+
+ obj_state = drm_atomic_get_new_private_obj_state(state, obj);
+ if (obj_state)
+ return obj_state;
ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
if (ret)
@@ -858,7 +975,7 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
if (!obj_state)
return ERR_PTR(-ENOMEM);
- state->private_objs[index].state = obj_state;
+ state->private_objs[index].state_to_destroy = obj_state;
state->private_objs[index].old_state = obj->state;
state->private_objs[index].new_state = obj_state;
state->private_objs[index].ptr = obj;
@@ -1129,6 +1246,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector_state *connector_state;
WARN_ON(!state->acquire_ctx);
+ drm_WARN_ON(state->dev, state->checked);
ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
if (ret)
@@ -1152,15 +1270,16 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
state->num_connector = alloc;
}
- if (state->connectors[index].state)
- return state->connectors[index].state;
+ connector_state = drm_atomic_get_new_connector_state(state, connector);
+ if (connector_state)
+ return connector_state;
connector_state = connector->funcs->atomic_duplicate_state(connector);
if (!connector_state)
return ERR_PTR(-ENOMEM);
drm_connector_get(connector);
- state->connectors[index].state = connector_state;
+ state->connectors[index].state_to_destroy = connector_state;
state->connectors[index].old_state = connector->state;
state->connectors[index].new_state = connector_state;
state->connectors[index].ptr = connector;
@@ -1308,7 +1427,6 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_bridge_state *bridge_state;
- struct drm_bridge *bridge;
if (!encoder)
return 0;
@@ -1317,7 +1435,7 @@ drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
"Adding all bridges for [encoder:%d:%s] to %p\n",
encoder->base.id, encoder->name, state);
- drm_for_each_bridge_in_chain(encoder, bridge) {
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge) {
/* Skip bridges that don't implement the atomic state hooks. */
if (!bridge->funcs->atomic_duplicate_state)
continue;
@@ -1438,6 +1556,52 @@ drm_atomic_add_affected_planes(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_add_affected_planes);
/**
+ * drm_atomic_add_affected_colorops - add colorops for plane
+ * @state: atomic state
+ * @plane: DRM plane
+ *
+ * This function walks the current configuration and adds all colorops
+ * currently used by @plane to the atomic configuration @state. This is useful
+ * when an atomic commit also needs to check all currently enabled colorop on
+ * @plane, e.g. when changing the mode. It's also useful when re-enabling a plane
+ * to avoid special code to force-enable all colorops.
+ *
+ * Since acquiring a colorop state will always also acquire the w/w mutex of the
+ * current plane for that colorop (if there is any) adding all the colorop states for
+ * a plane will not reduce parallelism of atomic updates.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_affected_colorops(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ struct drm_colorop *colorop;
+ struct drm_colorop_state *colorop_state;
+
+ WARN_ON(!drm_atomic_get_new_plane_state(state, plane));
+
+ drm_dbg_atomic(plane->dev,
+ "Adding all current colorops for [PLANE:%d:%s] to %p\n",
+ plane->base.id, plane->name, state);
+
+ drm_for_each_colorop(colorop, plane->dev) {
+ if (colorop->plane != plane)
+ continue;
+
+ colorop_state = drm_atomic_get_colorop_state(state, colorop);
+ if (IS_ERR(colorop_state))
+ return PTR_ERR(colorop_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_affected_colorops);
+
+/**
* drm_atomic_check_only - check whether a given config would work
* @state: atomic configuration to check
*
@@ -1541,6 +1705,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
requested_crtc, affected_crtc);
}
+ state->checked = true;
+
return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -1833,6 +1999,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
bool take_locks)
{
struct drm_mode_config *config = &dev->mode_config;
+ struct drm_colorop *colorop;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_connector *connector;
@@ -1842,6 +2009,14 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
if (!drm_drv_uses_atomic_modeset(dev))
return;
+ list_for_each_entry(colorop, &config->colorop_list, head) {
+ if (take_locks)
+ drm_modeset_lock(&colorop->plane->mutex, NULL);
+ drm_atomic_colorop_print_state(p, colorop->state);
+ if (take_locks)
+ drm_modeset_unlock(&colorop->plane->mutex);
+ }
+
list_for_each_entry(plane, &config->plane_list, head) {
if (take_locks)
drm_modeset_lock(&plane->mutex, NULL);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ef56b474acf5..10adac9397cf 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -456,6 +456,7 @@ mode_fixup(struct drm_atomic_state *state)
ret = drm_atomic_bridge_chain_check(bridge,
new_crtc_state,
new_conn_state);
+ drm_bridge_put(bridge);
if (ret) {
drm_dbg_atomic(encoder->dev, "Bridge atomic check failed\n");
return ret;
@@ -527,6 +528,7 @@ static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
bridge = drm_bridge_chain_get_first_bridge(encoder);
ret = drm_bridge_chain_mode_valid(bridge, &connector->display_info,
mode);
+ drm_bridge_put(bridge);
if (ret != MODE_OK) {
drm_dbg_atomic(encoder->dev, "[BRIDGE] mode_valid() failed\n");
return ret;
@@ -1212,6 +1214,7 @@ encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_disable(bridge, state);
+ drm_bridge_put(bridge);
/* Right function depends upon target state. */
if (funcs) {
@@ -1329,6 +1332,7 @@ encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *sta
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_post_disable(bridge, state);
+ drm_bridge_put(bridge);
}
}
@@ -1501,6 +1505,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *state)
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_bridge_chain_mode_set(bridge, mode, adjusted_mode);
+ drm_bridge_put(bridge);
}
}
@@ -1580,6 +1585,7 @@ encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
drm_atomic_bridge_chain_pre_enable(bridge, state);
+ drm_bridge_put(bridge);
}
}
@@ -1655,6 +1661,7 @@ encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
}
drm_atomic_bridge_chain_enable(bridge, state);
+ drm_bridge_put(bridge);
}
}
@@ -1824,10 +1831,12 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
}
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
+ wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc);
+
if (!(crtc_mask & drm_crtc_mask(crtc)))
continue;
- ret = wait_event_timeout(dev->vblank[i].queue,
+ ret = wait_event_timeout(*queue,
state->crtcs[i].last_vblank_count !=
drm_crtc_vblank_count(crtc),
msecs_to_jiffies(100));
@@ -3175,6 +3184,8 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_colorop *colorop;
+ struct drm_colorop_state *old_colorop_state, *new_colorop_state;
struct drm_crtc_commit *commit;
struct drm_private_obj *obj;
struct drm_private_state *old_obj_state, *new_obj_state;
@@ -3229,7 +3240,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_conn_state->state = state;
new_conn_state->state = NULL;
- state->connectors[i].state = old_conn_state;
+ state->connectors[i].state_to_destroy = old_conn_state;
connector->state = new_conn_state;
}
@@ -3239,7 +3250,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_crtc_state->state = state;
new_crtc_state->state = NULL;
- state->crtcs[i].state = old_crtc_state;
+ state->crtcs[i].state_to_destroy = old_crtc_state;
crtc->state = new_crtc_state;
if (new_crtc_state->commit) {
@@ -3252,6 +3263,16 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
}
}
+ for_each_oldnew_colorop_in_state(state, colorop, old_colorop_state, new_colorop_state, i) {
+ WARN_ON(colorop->state != old_colorop_state);
+
+ old_colorop_state->state = state;
+ new_colorop_state->state = NULL;
+
+ state->colorops[i].state = old_colorop_state;
+ colorop->state = new_colorop_state;
+ }
+
drm_panic_lock(state->dev, flags);
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
WARN_ON(plane->state != old_plane_state);
@@ -3259,7 +3280,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_plane_state->state = state;
new_plane_state->state = NULL;
- state->planes[i].state = old_plane_state;
+ state->planes[i].state_to_destroy = old_plane_state;
plane->state = new_plane_state;
}
drm_panic_unlock(state->dev, flags);
@@ -3270,7 +3291,7 @@ int drm_atomic_helper_swap_state(struct drm_atomic_state *state,
old_obj_state->state = state;
new_obj_state->state = NULL;
- state->private_objs[i].state = old_obj_state;
+ state->private_objs[i].state_to_destroy = old_obj_state;
obj->state = new_obj_state;
}
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 7142e163e618..cee6d8fc44ad 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -268,6 +268,11 @@ void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *plane_state,
plane_state->color_range = val;
}
+ if (plane->color_pipeline_property) {
+ /* default is always NULL, i.e., bypass */
+ plane_state->color_pipeline = NULL;
+ }
+
if (plane->zpos_property) {
if (!drm_object_property_get_default_value(&plane->base,
plane->zpos_property,
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index ecc73d52bfae..7320db4b8489 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -35,6 +35,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_writeback.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_colorop.h>
#include <linux/export.h>
#include <linux/dma-fence.h>
@@ -258,6 +259,34 @@ drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
/**
+ * drm_atomic_set_colorop_for_plane - set colorop for plane
+ * @plane_state: atomic state object for the plane
+ * @colorop: colorop to use for the plane
+ *
+ * Helper function to select the color pipeline on a plane by setting
+ * it to the first drm_colorop element of the pipeline.
+ */
+void
+drm_atomic_set_colorop_for_plane(struct drm_plane_state *plane_state,
+ struct drm_colorop *colorop)
+{
+ struct drm_plane *plane = plane_state->plane;
+
+ if (colorop)
+ drm_dbg_atomic(plane->dev,
+ "Set [COLOROP:%d] for [PLANE:%d:%s] state %p\n",
+ colorop->base.id, plane->base.id, plane->name,
+ plane_state);
+ else
+ drm_dbg_atomic(plane->dev,
+ "Set [NOCOLOROP] for [PLANE:%d:%s] state %p\n",
+ plane->base.id, plane->name, plane_state);
+
+ plane_state->color_pipeline = colorop;
+}
+EXPORT_SYMBOL(drm_atomic_set_colorop_for_plane);
+
+/**
* drm_atomic_set_crtc_for_connector - set CRTC for connector
* @conn_state: atomic state object for the connector
* @crtc: CRTC to use for the connector
@@ -419,6 +448,8 @@ static int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
set_out_fence_for_crtc(state->state, crtc, fence_ptr);
} else if (property == crtc->scaling_filter_property) {
state->scaling_filter = val;
+ } else if (property == crtc->sharpness_strength_property) {
+ state->sharpness_strength = val;
} else if (crtc->funcs->atomic_set_property) {
return crtc->funcs->atomic_set_property(crtc, state, property, val);
} else {
@@ -456,6 +487,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
*val = 0;
else if (property == crtc->scaling_filter_property)
*val = state->scaling_filter;
+ else if (property == crtc->sharpness_strength_property)
+ *val = state->sharpness_strength;
else if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
else {
@@ -540,6 +573,16 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->color_encoding = val;
} else if (property == plane->color_range_property) {
state->color_range = val;
+ } else if (property == plane->color_pipeline_property) {
+ /* find DRM colorop object */
+ struct drm_colorop *colorop = NULL;
+
+ colorop = drm_colorop_find(dev, file_priv, val);
+
+ if (val && !colorop)
+ return -EACCES;
+
+ drm_atomic_set_colorop_for_plane(state, colorop);
} else if (property == config->prop_fb_damage_clips) {
ret = drm_property_replace_blob_from_id(dev,
&state->fb_damage_clips,
@@ -622,6 +665,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->color_encoding;
} else if (property == plane->color_range_property) {
*val = state->color_range;
+ } else if (property == plane->color_pipeline_property) {
+ *val = (state->color_pipeline) ? state->color_pipeline->base.id : 0;
} else if (property == config->prop_fb_damage_clips) {
*val = (state->fb_damage_clips) ?
state->fb_damage_clips->base.id : 0;
@@ -644,6 +689,96 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
+static int drm_atomic_color_set_data_property(struct drm_colorop *colorop,
+ struct drm_colorop_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ ssize_t elem_size = -1;
+ ssize_t size = -1;
+ bool replaced = false;
+
+ switch (colorop->type) {
+ case DRM_COLOROP_1D_LUT:
+ size = colorop->size * sizeof(struct drm_color_lut32);
+ break;
+ case DRM_COLOROP_CTM_3X4:
+ size = sizeof(struct drm_color_ctm_3x4);
+ break;
+ case DRM_COLOROP_3D_LUT:
+ size = colorop->size * colorop->size * colorop->size *
+ sizeof(struct drm_color_lut32);
+ break;
+ default:
+ /* should never get here */
+ return -EINVAL;
+ }
+
+ return drm_property_replace_blob_from_id(colorop->dev,
+ &state->data,
+ val,
+ size,
+ elem_size,
+ &replaced);
+}
+
+static int drm_atomic_colorop_set_property(struct drm_colorop *colorop,
+ struct drm_colorop_state *state,
+ struct drm_file *file_priv,
+ struct drm_property *property,
+ uint64_t val)
+{
+ if (property == colorop->bypass_property) {
+ state->bypass = val;
+ } else if (property == colorop->lut1d_interpolation_property) {
+ colorop->lut1d_interpolation = val;
+ } else if (property == colorop->curve_1d_type_property) {
+ state->curve_1d_type = val;
+ } else if (property == colorop->multiplier_property) {
+ state->multiplier = val;
+ } else if (property == colorop->lut3d_interpolation_property) {
+ colorop->lut3d_interpolation = val;
+ } else if (property == colorop->data_property) {
+ return drm_atomic_color_set_data_property(colorop, state,
+ property, val);
+ } else {
+ drm_dbg_atomic(colorop->dev,
+ "[COLOROP:%d:%d] unknown property [PROP:%d:%s]\n",
+ colorop->base.id, colorop->type,
+ property->base.id, property->name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+drm_atomic_colorop_get_property(struct drm_colorop *colorop,
+ const struct drm_colorop_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ if (property == colorop->type_property)
+ *val = colorop->type;
+ else if (property == colorop->bypass_property)
+ *val = state->bypass;
+ else if (property == colorop->lut1d_interpolation_property)
+ *val = colorop->lut1d_interpolation;
+ else if (property == colorop->curve_1d_type_property)
+ *val = state->curve_1d_type;
+ else if (property == colorop->multiplier_property)
+ *val = state->multiplier;
+ else if (property == colorop->size_property)
+ *val = colorop->size;
+ else if (property == colorop->lut3d_interpolation_property)
+ *val = colorop->lut3d_interpolation;
+ else if (property == colorop->data_property)
+ *val = (state->data) ? state->data->base.id : 0;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
static int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
@@ -910,6 +1045,15 @@ int drm_atomic_get_property(struct drm_mode_object *obj,
plane->state, property, val);
break;
}
+ case DRM_MODE_OBJECT_COLOROP: {
+ struct drm_colorop *colorop = obj_to_colorop(obj);
+
+ if (colorop->plane)
+ WARN_ON(!drm_modeset_is_locked(&colorop->plane->mutex));
+
+ ret = drm_atomic_colorop_get_property(colorop, colorop->state, property, val);
+ break;
+ }
default:
drm_dbg_atomic(dev, "[OBJECT:%d] has no properties\n", obj->id);
ret = -EINVAL;
@@ -1078,19 +1222,20 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
if (async_flip) {
- /* check if the prop does a nop change */
- if ((prop != config->prop_fb_id &&
- prop != config->prop_in_fence_fd &&
- prop != config->prop_fb_damage_clips)) {
- ret = drm_atomic_plane_get_property(plane, plane_state,
- prop, &old_val);
- ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- }
+ /* no-op changes are always allowed */
+ ret = drm_atomic_plane_get_property(plane, plane_state,
+ prop, &old_val);
+ ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
- /* ask the driver if this non-primary plane is supported */
- if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
- ret = -EINVAL;
+ /* fail everything that isn't no-op or a pure flip */
+ if (ret && prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips) {
+ break;
+ }
+ if (ret && plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ /* ask the driver if this non-primary plane is supported */
if (plane_funcs && plane_funcs->atomic_async_check)
ret = plane_funcs->atomic_async_check(plane, state, true);
@@ -1106,6 +1251,21 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv,
prop, prop_value);
+
+ break;
+ }
+ case DRM_MODE_OBJECT_COLOROP: {
+ struct drm_colorop *colorop = obj_to_colorop(obj);
+ struct drm_colorop_state *colorop_state;
+
+ colorop_state = drm_atomic_get_colorop_state(state, colorop);
+ if (IS_ERR(colorop_state)) {
+ ret = PTR_ERR(colorop_state);
+ break;
+ }
+
+ ret = drm_atomic_colorop_set_property(colorop, colorop_state,
+ file_priv, prop, prop_value);
break;
}
default:
@@ -1445,6 +1605,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
state->acquire_ctx = &ctx;
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
+ state->plane_color_pipeline = file_priv->plane_color_pipeline;
retry:
copied_objs = 0;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 4bde00083047..8f355df883d8 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -197,15 +197,22 @@
* driver.
*/
+/* Protect bridge_list and bridge_lingering_list */
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
+static LIST_HEAD(bridge_lingering_list);
static void __drm_bridge_free(struct kref *kref)
{
struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+ mutex_lock(&bridge_lock);
+ list_del(&bridge->list);
+ mutex_unlock(&bridge_lock);
+
if (bridge->funcs->destroy)
bridge->funcs->destroy(bridge);
+
kfree(bridge->container);
}
@@ -273,6 +280,7 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
return ERR_PTR(-ENOMEM);
bridge = container + offset;
+ INIT_LIST_HEAD(&bridge->list);
bridge->container = container;
bridge->funcs = funcs;
kref_init(&bridge->refcount);
@@ -286,10 +294,13 @@ void *__devm_drm_bridge_alloc(struct device *dev, size_t size, size_t offset,
EXPORT_SYMBOL(__devm_drm_bridge_alloc);
/**
- * drm_bridge_add - add the given bridge to the global bridge list
+ * drm_bridge_add - register a bridge
*
* @bridge: bridge control structure
*
+ * Add the given bridge to the global list of bridges, where they can be
+ * found by users via of_drm_find_bridge().
+ *
* The bridge to be added must have been allocated by
* devm_drm_bridge_alloc().
*/
@@ -300,6 +311,14 @@ void drm_bridge_add(struct drm_bridge *bridge)
drm_bridge_get(bridge);
+ /*
+ * If the bridge was previously added and then removed, it is now
+ * in bridge_lingering_list. Remove it or bridge_lingering_list will be
+ * corrupted when adding this bridge to bridge_list below.
+ */
+ if (!list_empty(&bridge->list))
+ list_del_init(&bridge->list);
+
mutex_init(&bridge->hpd_mutex);
if (bridge->ops & DRM_BRIDGE_OP_HDMI)
@@ -336,14 +355,19 @@ int devm_drm_bridge_add(struct device *dev, struct drm_bridge *bridge)
EXPORT_SYMBOL(devm_drm_bridge_add);
/**
- * drm_bridge_remove - remove the given bridge from the global bridge list
+ * drm_bridge_remove - unregister a bridge
*
* @bridge: bridge control structure
+ *
+ * Remove the given bridge from the global list of registered bridges, so
+ * it won't be found by users via of_drm_find_bridge(), and add it to the
+ * lingering bridge list, to keep track of it until its allocated memory is
+ * eventually freed.
*/
void drm_bridge_remove(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
- list_del_init(&bridge->list);
+ list_move_tail(&bridge->list, &bridge_lingering_list);
mutex_unlock(&bridge_lock);
mutex_destroy(&bridge->hpd_mutex);
@@ -398,6 +422,9 @@ static bool drm_bridge_is_atomic(struct drm_bridge *bridge)
* If non-NULL the previous bridge must be already attached by a call to this
* function.
*
+ * The bridge to be attached must have been previously added by
+ * drm_bridge_add().
+ *
* Note that bridges attached to encoders are auto-detached during encoder
* cleanup in drm_encoder_cleanup(), so drm_bridge_attach() should generally
* *not* be balanced with a drm_bridge_detach() in driver code.
@@ -414,6 +441,12 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
if (!encoder || !bridge)
return -EINVAL;
+ if (!bridge->container)
+ DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n");
+
+ if (list_empty(&bridge->list))
+ DRM_WARN("Missing drm_bridge_add() before attach\n");
+
drm_bridge_get(bridge);
if (previous && (!previous->dev || previous->encoder != encoder)) {
@@ -941,11 +974,11 @@ static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
{
unsigned int i, num_in_bus_fmts = 0;
struct drm_bridge_state *cur_state;
- struct drm_bridge *prev_bridge;
+ struct drm_bridge *prev_bridge __free(drm_bridge_put) =
+ drm_bridge_get_prev_bridge(cur_bridge);
u32 *in_bus_fmts;
int ret;
- prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
cur_bridge);
@@ -1062,12 +1095,12 @@ drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
struct drm_encoder *encoder = bridge->encoder;
struct drm_bridge_state *last_bridge_state;
unsigned int i, num_out_bus_fmts = 0;
- struct drm_bridge *last_bridge;
u32 *out_bus_fmts;
int ret = 0;
- last_bridge = list_last_entry(&encoder->bridge_chain,
- struct drm_bridge, chain_node);
+ struct drm_bridge *last_bridge __free(drm_bridge_put) =
+ drm_bridge_get(list_last_entry(&encoder->bridge_chain,
+ struct drm_bridge, chain_node));
last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
last_bridge);
@@ -1121,7 +1154,6 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct drm_bridge_state *bridge_state, *next_bridge_state;
- struct drm_bridge *next_bridge;
u32 output_flags = 0;
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
@@ -1130,7 +1162,7 @@ drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
if (!bridge_state)
return;
- next_bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
/*
* Let's try to apply the most common case here, that is, propagate
@@ -1432,14 +1464,20 @@ EXPORT_SYMBOL(devm_drm_put_bridge);
static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
struct drm_bridge *bridge,
- unsigned int idx)
+ unsigned int idx,
+ bool lingering)
{
drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs);
+
+ drm_printf(p, "\trefcount: %u%s\n", kref_read(&bridge->refcount),
+ lingering ? " [lingering]" : "");
+
drm_printf(p, "\ttype: [%d] %s\n",
bridge->type,
drm_get_connector_type_name(bridge->type));
- if (bridge->of_node)
+ /* The OF node could be freed after drm_bridge_remove() */
+ if (bridge->of_node && !lingering)
drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node);
drm_printf(p, "\tops: [0x%x]", bridge->ops);
@@ -1465,7 +1503,10 @@ static int allbridges_show(struct seq_file *m, void *data)
mutex_lock(&bridge_lock);
list_for_each_entry(bridge, &bridge_list, list)
- drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
+
+ list_for_each_entry(bridge, &bridge_lingering_list, list)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, true);
mutex_unlock(&bridge_lock);
@@ -1477,11 +1518,10 @@ static int encoder_bridges_show(struct seq_file *m, void *data)
{
struct drm_encoder *encoder = m->private;
struct drm_printer p = drm_seq_file_printer(m);
- struct drm_bridge *bridge;
unsigned int idx = 0;
- drm_for_each_bridge_in_chain(encoder, bridge)
- drm_bridge_debugfs_show_bridge(&p, bridge, idx++);
+ drm_for_each_bridge_in_chain_scoped(encoder, bridge)
+ drm_bridge_debugfs_show_bridge(&p, bridge, idx++, false);
return 0;
}
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index a94061f373de..2f279b46bd2c 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -11,9 +11,19 @@
#include <linux/sizes.h>
#include <drm/drm_buddy.h>
+#include <drm/drm_print.h>
+
+enum drm_buddy_free_tree {
+ DRM_BUDDY_CLEAR_TREE = 0,
+ DRM_BUDDY_DIRTY_TREE,
+ DRM_BUDDY_MAX_FREE_TREES,
+};
static struct kmem_cache *slab_blocks;
+#define for_each_free_tree(tree) \
+ for ((tree) = 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++)
+
static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
struct drm_buddy_block *parent,
unsigned int order,
@@ -31,6 +41,8 @@ static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
block->header |= order;
block->parent = parent;
+ RB_CLEAR_NODE(&block->rb);
+
BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
return block;
}
@@ -41,23 +53,64 @@ static void drm_block_free(struct drm_buddy *mm,
kmem_cache_free(slab_blocks, block);
}
-static void list_insert_sorted(struct drm_buddy *mm,
- struct drm_buddy_block *block)
+static enum drm_buddy_free_tree
+get_block_tree(struct drm_buddy_block *block)
{
- struct drm_buddy_block *node;
- struct list_head *head;
+ return drm_buddy_block_is_clear(block) ?
+ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+}
- head = &mm->free_list[drm_buddy_block_order(block)];
- if (list_empty(head)) {
- list_add(&block->link, head);
- return;
- }
+static struct drm_buddy_block *
+rbtree_get_free_block(const struct rb_node *node)
+{
+ return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL;
+}
- list_for_each_entry(node, head, link)
- if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
- break;
+static struct drm_buddy_block *
+rbtree_last_free_block(struct rb_root *root)
+{
+ return rbtree_get_free_block(rb_last(root));
+}
- __list_add(&block->link, node->link.prev, &node->link);
+static bool rbtree_is_empty(struct rb_root *root)
+{
+ return RB_EMPTY_ROOT(root);
+}
+
+static bool drm_buddy_block_offset_less(const struct drm_buddy_block *block,
+ const struct drm_buddy_block *node)
+{
+ return drm_buddy_block_offset(block) < drm_buddy_block_offset(node);
+}
+
+static bool rbtree_block_offset_less(struct rb_node *block,
+ const struct rb_node *node)
+{
+ return drm_buddy_block_offset_less(rbtree_get_free_block(block),
+ rbtree_get_free_block(node));
+}
+
+static void rbtree_insert(struct drm_buddy *mm,
+ struct drm_buddy_block *block,
+ enum drm_buddy_free_tree tree)
+{
+ rb_add(&block->rb,
+ &mm->free_trees[tree][drm_buddy_block_order(block)],
+ rbtree_block_offset_less);
+}
+
+static void rbtree_remove(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ unsigned int order = drm_buddy_block_order(block);
+ enum drm_buddy_free_tree tree;
+ struct rb_root *root;
+
+ tree = get_block_tree(block);
+ root = &mm->free_trees[tree][order];
+
+ rb_erase(&block->rb, root);
+ RB_CLEAR_NODE(&block->rb);
}
static void clear_reset(struct drm_buddy_block *block)
@@ -70,29 +123,34 @@ static void mark_cleared(struct drm_buddy_block *block)
block->header |= DRM_BUDDY_HEADER_CLEAR;
}
-static void mark_allocated(struct drm_buddy_block *block)
+static void mark_allocated(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_ALLOCATED;
- list_del(&block->link);
+ rbtree_remove(mm, block);
}
static void mark_free(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
+ enum drm_buddy_free_tree tree;
+
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE;
- list_insert_sorted(mm, block);
+ tree = get_block_tree(block);
+ rbtree_insert(mm, block, tree);
}
-static void mark_split(struct drm_buddy_block *block)
+static void mark_split(struct drm_buddy *mm,
+ struct drm_buddy_block *block)
{
block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_SPLIT;
- list_del(&block->link);
+ rbtree_remove(mm, block);
}
static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
@@ -148,7 +206,7 @@ static unsigned int __drm_buddy_free(struct drm_buddy *mm,
mark_cleared(parent);
}
- list_del(&buddy->link);
+ rbtree_remove(mm, buddy);
if (force_merge && drm_buddy_block_is_clear(buddy))
mm->clear_avail -= drm_buddy_block_size(mm, buddy);
@@ -169,7 +227,7 @@ static int __force_merge(struct drm_buddy *mm,
u64 end,
unsigned int min_order)
{
- unsigned int order;
+ unsigned int tree, order;
int i;
if (!min_order)
@@ -178,44 +236,48 @@ static int __force_merge(struct drm_buddy *mm,
if (min_order > mm->max_order)
return -EINVAL;
- for (i = min_order - 1; i >= 0; i--) {
- struct drm_buddy_block *block, *prev;
+ for_each_free_tree(tree) {
+ for (i = min_order - 1; i >= 0; i--) {
+ struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
- list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
- struct drm_buddy_block *buddy;
- u64 block_start, block_end;
+ while (iter) {
+ struct drm_buddy_block *block, *buddy;
+ u64 block_start, block_end;
- if (!block->parent)
- continue;
+ block = rbtree_get_free_block(iter);
+ iter = rb_prev(iter);
- block_start = drm_buddy_block_offset(block);
- block_end = block_start + drm_buddy_block_size(mm, block) - 1;
+ if (!block || !block->parent)
+ continue;
- if (!contains(start, end, block_start, block_end))
- continue;
+ block_start = drm_buddy_block_offset(block);
+ block_end = block_start + drm_buddy_block_size(mm, block) - 1;
- buddy = __get_buddy(block);
- if (!drm_buddy_block_is_free(buddy))
- continue;
+ if (!contains(start, end, block_start, block_end))
+ continue;
- WARN_ON(drm_buddy_block_is_clear(block) ==
- drm_buddy_block_is_clear(buddy));
+ buddy = __get_buddy(block);
+ if (!drm_buddy_block_is_free(buddy))
+ continue;
- /*
- * If the prev block is same as buddy, don't access the
- * block in the next iteration as we would free the
- * buddy block as part of the free function.
- */
- if (prev == buddy)
- prev = list_prev_entry(prev, link);
+ WARN_ON(drm_buddy_block_is_clear(block) ==
+ drm_buddy_block_is_clear(buddy));
- list_del(&block->link);
- if (drm_buddy_block_is_clear(block))
- mm->clear_avail -= drm_buddy_block_size(mm, block);
+ /*
+ * Advance to the next node when the current node is the buddy,
+ * as freeing the block will also remove its buddy from the tree.
+ */
+ if (iter == &buddy->rb)
+ iter = rb_prev(iter);
- order = __drm_buddy_free(mm, block, true);
- if (order >= min_order)
- return 0;
+ rbtree_remove(mm, block);
+ if (drm_buddy_block_is_clear(block))
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
+
+ order = __drm_buddy_free(mm, block, true);
+ if (order >= min_order)
+ return 0;
+ }
}
}
@@ -236,8 +298,8 @@ static int __force_merge(struct drm_buddy *mm,
*/
int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
{
- unsigned int i;
- u64 offset;
+ unsigned int i, j, root_count = 0;
+ u64 offset = 0;
if (size < chunk_size)
return -EINVAL;
@@ -258,14 +320,22 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
- mm->free_list = kmalloc_array(mm->max_order + 1,
- sizeof(struct list_head),
- GFP_KERNEL);
- if (!mm->free_list)
+ mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES,
+ sizeof(*mm->free_trees),
+ GFP_KERNEL);
+ if (!mm->free_trees)
return -ENOMEM;
- for (i = 0; i <= mm->max_order; ++i)
- INIT_LIST_HEAD(&mm->free_list[i]);
+ for_each_free_tree(i) {
+ mm->free_trees[i] = kmalloc_array(mm->max_order + 1,
+ sizeof(struct rb_root),
+ GFP_KERNEL);
+ if (!mm->free_trees[i])
+ goto out_free_tree;
+
+ for (j = 0; j <= mm->max_order; ++j)
+ mm->free_trees[i][j] = RB_ROOT;
+ }
mm->n_roots = hweight64(size);
@@ -273,10 +343,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
sizeof(struct drm_buddy_block *),
GFP_KERNEL);
if (!mm->roots)
- goto out_free_list;
-
- offset = 0;
- i = 0;
+ goto out_free_tree;
/*
* Split into power-of-two blocks, in case we are given a size that is
@@ -296,24 +363,26 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
mark_free(mm, root);
- BUG_ON(i > mm->max_order);
+ BUG_ON(root_count > mm->max_order);
BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
- mm->roots[i] = root;
+ mm->roots[root_count] = root;
offset += root_size;
size -= root_size;
- i++;
+ root_count++;
} while (size);
return 0;
out_free_roots:
- while (i--)
- drm_block_free(mm, mm->roots[i]);
+ while (root_count--)
+ drm_block_free(mm, mm->roots[root_count]);
kfree(mm->roots);
-out_free_list:
- kfree(mm->free_list);
+out_free_tree:
+ while (i--)
+ kfree(mm->free_trees[i]);
+ kfree(mm->free_trees);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_buddy_init);
@@ -323,7 +392,7 @@ EXPORT_SYMBOL(drm_buddy_init);
*
* @mm: DRM buddy manager to free
*
- * Cleanup memory manager resources and the freelist
+ * Cleanup memory manager resources and the freetree
*/
void drm_buddy_fini(struct drm_buddy *mm)
{
@@ -349,8 +418,9 @@ void drm_buddy_fini(struct drm_buddy *mm)
WARN_ON(mm->avail != mm->size);
+ for_each_free_tree(i)
+ kfree(mm->free_trees[i]);
kfree(mm->roots);
- kfree(mm->free_list);
}
EXPORT_SYMBOL(drm_buddy_fini);
@@ -374,8 +444,7 @@ static int split_block(struct drm_buddy *mm,
return -ENOMEM;
}
- mark_free(mm, block->left);
- mark_free(mm, block->right);
+ mark_split(mm, block);
if (drm_buddy_block_is_clear(block)) {
mark_cleared(block->left);
@@ -383,7 +452,8 @@ static int split_block(struct drm_buddy *mm,
clear_reset(block);
}
- mark_split(block);
+ mark_free(mm, block->left);
+ mark_free(mm, block->right);
return 0;
}
@@ -412,10 +482,11 @@ EXPORT_SYMBOL(drm_get_buddy);
* @is_clear: blocks clear state
*
* Reset the clear state based on @is_clear value for each block
- * in the freelist.
+ * in the freetree.
*/
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
{
+ enum drm_buddy_free_tree src_tree, dst_tree;
u64 root_size, size, start;
unsigned int order;
int i;
@@ -430,19 +501,24 @@ void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
size -= root_size;
}
+ src_tree = is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
+ dst_tree = is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+
for (i = 0; i <= mm->max_order; ++i) {
- struct drm_buddy_block *block;
-
- list_for_each_entry_reverse(block, &mm->free_list[i], link) {
- if (is_clear != drm_buddy_block_is_clear(block)) {
- if (is_clear) {
- mark_cleared(block);
- mm->clear_avail += drm_buddy_block_size(mm, block);
- } else {
- clear_reset(block);
- mm->clear_avail -= drm_buddy_block_size(mm, block);
- }
+ struct rb_root *root = &mm->free_trees[src_tree][i];
+ struct drm_buddy_block *block, *tmp;
+
+ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
+ rbtree_remove(mm, block);
+ if (is_clear) {
+ mark_cleared(block);
+ mm->clear_avail += drm_buddy_block_size(mm, block);
+ } else {
+ clear_reset(block);
+ mm->clear_avail -= drm_buddy_block_size(mm, block);
}
+
+ rbtree_insert(mm, block, dst_tree);
}
}
}
@@ -632,23 +708,17 @@ __drm_buddy_alloc_range_bias(struct drm_buddy *mm,
}
static struct drm_buddy_block *
-get_maxblock(struct drm_buddy *mm, unsigned int order,
- unsigned long flags)
+get_maxblock(struct drm_buddy *mm,
+ unsigned int order,
+ enum drm_buddy_free_tree tree)
{
struct drm_buddy_block *max_block = NULL, *block = NULL;
+ struct rb_root *root;
unsigned int i;
for (i = order; i <= mm->max_order; ++i) {
- struct drm_buddy_block *tmp_block;
-
- list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
- if (block_incompatible(tmp_block, flags))
- continue;
-
- block = tmp_block;
- break;
- }
-
+ root = &mm->free_trees[tree][i];
+ block = rbtree_last_free_block(root);
if (!block)
continue;
@@ -667,46 +737,44 @@ get_maxblock(struct drm_buddy *mm, unsigned int order,
}
static struct drm_buddy_block *
-alloc_from_freelist(struct drm_buddy *mm,
+alloc_from_freetree(struct drm_buddy *mm,
unsigned int order,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
+ struct rb_root *root;
+ enum drm_buddy_free_tree tree;
unsigned int tmp;
int err;
+ tree = (flags & DRM_BUDDY_CLEAR_ALLOCATION) ?
+ DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE;
+
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
- block = get_maxblock(mm, order, flags);
+ block = get_maxblock(mm, order, tree);
if (block)
/* Store the obtained block order */
tmp = drm_buddy_block_order(block);
} else {
for (tmp = order; tmp <= mm->max_order; ++tmp) {
- struct drm_buddy_block *tmp_block;
-
- list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
- if (block_incompatible(tmp_block, flags))
- continue;
-
- block = tmp_block;
- break;
- }
-
+ /* Get RB tree root for this order and tree */
+ root = &mm->free_trees[tree][tmp];
+ block = rbtree_last_free_block(root);
if (block)
break;
}
}
if (!block) {
- /* Fallback method */
+ /* Try allocating from the other tree */
+ tree = (tree == DRM_BUDDY_CLEAR_TREE) ?
+ DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE;
+
for (tmp = order; tmp <= mm->max_order; ++tmp) {
- if (!list_empty(&mm->free_list[tmp])) {
- block = list_last_entry(&mm->free_list[tmp],
- struct drm_buddy_block,
- link);
- if (block)
- break;
- }
+ root = &mm->free_trees[tree][tmp];
+ block = rbtree_last_free_block(root);
+ if (block)
+ break;
}
if (!block)
@@ -771,7 +839,7 @@ static int __alloc_range(struct drm_buddy *mm,
if (contains(start, end, block_start, block_end)) {
if (drm_buddy_block_is_free(block)) {
- mark_allocated(block);
+ mark_allocated(mm, block);
total_allocated += drm_buddy_block_size(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
@@ -849,10 +917,9 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
{
u64 rhs_offset, lhs_offset, lhs_size, filled;
struct drm_buddy_block *block;
- struct list_head *list;
+ unsigned int tree, order;
LIST_HEAD(blocks_lhs);
unsigned long pages;
- unsigned int order;
u64 modify_size;
int err;
@@ -862,35 +929,45 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
if (order == 0)
return -ENOSPC;
- list = &mm->free_list[order];
- if (list_empty(list))
- return -ENOSPC;
+ for_each_free_tree(tree) {
+ struct rb_root *root;
+ struct rb_node *iter;
+
+ root = &mm->free_trees[tree][order];
+ if (rbtree_is_empty(root))
+ continue;
- list_for_each_entry_reverse(block, list, link) {
- /* Allocate blocks traversing RHS */
- rhs_offset = drm_buddy_block_offset(block);
- err = __drm_buddy_alloc_range(mm, rhs_offset, size,
- &filled, blocks);
- if (!err || err != -ENOSPC)
- return err;
-
- lhs_size = max((size - filled), min_block_size);
- if (!IS_ALIGNED(lhs_size, min_block_size))
- lhs_size = round_up(lhs_size, min_block_size);
-
- /* Allocate blocks traversing LHS */
- lhs_offset = drm_buddy_block_offset(block) - lhs_size;
- err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
- NULL, &blocks_lhs);
- if (!err) {
- list_splice(&blocks_lhs, blocks);
- return 0;
- } else if (err != -ENOSPC) {
+ iter = rb_last(root);
+ while (iter) {
+ block = rbtree_get_free_block(iter);
+
+ /* Allocate blocks traversing RHS */
+ rhs_offset = drm_buddy_block_offset(block);
+ err = __drm_buddy_alloc_range(mm, rhs_offset, size,
+ &filled, blocks);
+ if (!err || err != -ENOSPC)
+ return err;
+
+ lhs_size = max((size - filled), min_block_size);
+ if (!IS_ALIGNED(lhs_size, min_block_size))
+ lhs_size = round_up(lhs_size, min_block_size);
+
+ /* Allocate blocks traversing LHS */
+ lhs_offset = drm_buddy_block_offset(block) - lhs_size;
+ err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
+ NULL, &blocks_lhs);
+ if (!err) {
+ list_splice(&blocks_lhs, blocks);
+ return 0;
+ } else if (err != -ENOSPC) {
+ drm_buddy_free_list_internal(mm, blocks);
+ return err;
+ }
+ /* Free blocks for the next iteration */
drm_buddy_free_list_internal(mm, blocks);
- return err;
+
+ iter = rb_prev(iter);
}
- /* Free blocks for the next iteration */
- drm_buddy_free_list_internal(mm, blocks);
}
return -ENOSPC;
@@ -976,7 +1053,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
list_add(&block->tmp_link, &dfs);
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
if (err) {
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -999,8 +1076,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
return __drm_buddy_alloc_range_bias(mm, start, end,
order, flags);
else
- /* Allocate from freelist */
- return alloc_from_freelist(mm, order, flags);
+ /* Allocate from freetree */
+ return alloc_from_freetree(mm, order, flags);
}
/**
@@ -1017,8 +1094,8 @@ __drm_buddy_alloc_blocks(struct drm_buddy *mm,
* alloc_range_bias() called on range limitations, which traverses
* the tree and returns the desired block.
*
- * alloc_from_freelist() called when *no* range restrictions
- * are enforced, which picks the block from the freelist.
+ * alloc_from_freetree() called when *no* range restrictions
+ * are enforced, which picks the block from the freetree.
*
* Returns:
* 0 on success, error code on failure.
@@ -1120,7 +1197,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
}
} while (1);
- mark_allocated(block);
+ mark_allocated(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
if (drm_buddy_block_is_clear(block))
mm->clear_avail -= drm_buddy_block_size(mm, block);
@@ -1201,12 +1278,18 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
for (order = mm->max_order; order >= 0; order--) {
- struct drm_buddy_block *block;
+ struct drm_buddy_block *block, *tmp;
+ struct rb_root *root;
u64 count = 0, free;
+ unsigned int tree;
- list_for_each_entry(block, &mm->free_list[order], link) {
- BUG_ON(!drm_buddy_block_is_free(block));
- count++;
+ for_each_free_tree(tree) {
+ root = &mm->free_trees[tree][order];
+
+ rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) {
+ BUG_ON(!drm_buddy_block_is_free(block));
+ count++;
+ }
}
drm_printf(p, "order-%2d ", order);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 3fa38d4ac70b..a82d741e6630 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -11,12 +11,14 @@
#include <linux/slab.h>
#include <drm/drm_client.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mode.h>
#include <drm/drm_print.h>
@@ -168,29 +170,59 @@ void drm_client_release(struct drm_client_dev *client)
drm_client_modeset_free(client);
drm_client_close(client);
+
+ if (client->funcs && client->funcs->free)
+ client->funcs->free(client);
+
drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_client_release);
-static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
+/**
+ * drm_client_buffer_delete - Delete a client buffer
+ * @buffer: DRM client buffer
+ */
+void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
- if (buffer->gem) {
- drm_gem_vunmap(buffer->gem, &buffer->map);
- drm_gem_object_put(buffer->gem);
- }
+ struct drm_gem_object *gem;
+ int ret;
+
+ if (!buffer)
+ return;
+
+ gem = buffer->fb->obj[0];
+ drm_gem_vunmap(gem, &buffer->map);
+
+ ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
+ if (ret)
+ drm_err(buffer->client->dev,
+ "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
+
+ drm_gem_object_put(buffer->gem);
kfree(buffer);
}
+EXPORT_SYMBOL(drm_client_buffer_delete);
static struct drm_client_buffer *
drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
- u32 format, u32 *handle)
+ u32 format, u32 handle, u32 pitch)
{
- const struct drm_format_info *info = drm_format_info(format);
- struct drm_mode_create_dumb dumb_args = { };
+ struct drm_mode_fb_cmd2 fb_req = {
+ .width = width,
+ .height = height,
+ .pixel_format = format,
+ .handles = {
+ handle,
+ },
+ .pitches = {
+ pitch,
+ },
+ };
struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer;
struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -199,28 +231,38 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
buffer->client = client;
- dumb_args.width = width;
- dumb_args.height = height;
- dumb_args.bpp = drm_format_info_bpp(info, 0);
- ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
- if (ret)
- goto err_delete;
-
- obj = drm_gem_object_lookup(client->file, dumb_args.handle);
+ obj = drm_gem_object_lookup(client->file, handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
- buffer->pitch = dumb_args.pitch;
+ ret = drm_mode_addfb2(dev, &fb_req, client->file);
+ if (ret)
+ goto err_drm_gem_object_put;
+
+ fb = drm_framebuffer_lookup(dev, client->file, fb_req.fb_id);
+ if (drm_WARN_ON(dev, !fb)) {
+ ret = -ENOENT;
+ goto err_drm_mode_rmfb;
+ }
+
+ /* drop the reference we picked up in framebuffer lookup */
+ drm_framebuffer_put(fb);
+
+ strscpy(fb->comm, client->name, TASK_COMM_LEN);
+
buffer->gem = obj;
- *handle = dumb_args.handle;
+ buffer->fb = fb;
return buffer;
+err_drm_mode_rmfb:
+ drm_mode_rmfb(dev, fb_req.fb_id, client->file);
+err_drm_gem_object_put:
+ drm_gem_object_put(obj);
err_delete:
- drm_client_buffer_delete(buffer);
-
+ kfree(buffer);
return ERR_PTR(ret);
}
@@ -247,7 +289,7 @@ err_delete:
int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
- struct drm_gem_object *gem = buffer->gem;
+ struct drm_gem_object *gem = buffer->fb->obj[0];
struct iosys_map *map = &buffer->map;
int ret;
@@ -276,7 +318,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap_local);
*/
void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer)
{
- struct drm_gem_object *gem = buffer->gem;
+ struct drm_gem_object *gem = buffer->fb->obj[0];
struct iosys_map *map = &buffer->map;
drm_gem_vunmap_locked(gem, map);
@@ -307,9 +349,10 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
struct iosys_map *map_copy)
{
+ struct drm_gem_object *gem = buffer->fb->obj[0];
int ret;
- ret = drm_gem_vmap(buffer->gem, &buffer->map);
+ ret = drm_gem_vmap(gem, &buffer->map);
if (ret)
return ret;
*map_copy = buffer->map;
@@ -328,57 +371,14 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
- drm_gem_vunmap(buffer->gem, &buffer->map);
-}
-EXPORT_SYMBOL(drm_client_buffer_vunmap);
+ struct drm_gem_object *gem = buffer->fb->obj[0];
-static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
-{
- int ret;
-
- if (!buffer->fb)
- return;
-
- ret = drm_mode_rmfb(buffer->client->dev, buffer->fb->base.id, buffer->client->file);
- if (ret)
- drm_err(buffer->client->dev,
- "Error removing FB:%u (%d)\n", buffer->fb->base.id, ret);
-
- buffer->fb = NULL;
-}
-
-static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
- u32 width, u32 height, u32 format,
- u32 handle)
-{
- struct drm_client_dev *client = buffer->client;
- struct drm_mode_fb_cmd2 fb_req = { };
- int ret;
-
- fb_req.width = width;
- fb_req.height = height;
- fb_req.pixel_format = format;
- fb_req.handles[0] = handle;
- fb_req.pitches[0] = buffer->pitch;
-
- ret = drm_mode_addfb2(client->dev, &fb_req, client->file);
- if (ret)
- return ret;
-
- buffer->fb = drm_framebuffer_lookup(client->dev, buffer->client->file, fb_req.fb_id);
- if (WARN_ON(!buffer->fb))
- return -ENOENT;
-
- /* drop the reference we picked up in framebuffer lookup */
- drm_framebuffer_put(buffer->fb);
-
- strscpy(buffer->fb->comm, client->name, TASK_COMM_LEN);
-
- return 0;
+ drm_gem_vunmap(gem, &buffer->map);
}
+EXPORT_SYMBOL(drm_client_buffer_vunmap);
/**
- * drm_client_framebuffer_create - Create a client framebuffer
+ * drm_client_buffer_create_dumb - Create a client buffer backed by a dumb buffer
* @client: DRM client
* @width: Framebuffer width
* @height: Framebuffer height
@@ -386,24 +386,33 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
*
* This function creates a &drm_client_buffer which consists of a
* &drm_framebuffer backed by a dumb buffer.
- * Call drm_client_framebuffer_delete() to free the buffer.
+ * Call drm_client_buffer_delete() to free the buffer.
*
* Returns:
* Pointer to a client buffer or an error pointer on failure.
*/
struct drm_client_buffer *
-drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+drm_client_buffer_create_dumb(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
+ const struct drm_format_info *info = drm_format_info(format);
+ struct drm_device *dev = client->dev;
+ struct drm_mode_create_dumb dumb_args = { };
struct drm_client_buffer *buffer;
- u32 handle;
int ret;
- buffer = drm_client_buffer_create(client, width, height, format,
- &handle);
- if (IS_ERR(buffer))
- return buffer;
+ dumb_args.width = width;
+ dumb_args.height = height;
+ dumb_args.bpp = drm_format_info_bpp(info, 0);
+ ret = drm_mode_create_dumb(dev, &dumb_args, client->file);
+ if (ret)
+ return ERR_PTR(ret);
- ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
+ buffer = drm_client_buffer_create(client, width, height, format,
+ dumb_args.handle, dumb_args.pitch);
+ if (IS_ERR(buffer)) {
+ ret = PTR_ERR(buffer);
+ goto err_drm_mode_destroy_dumb;
+ }
/*
* The handle is only needed for creating the framebuffer, destroy it
@@ -411,34 +420,19 @@ drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 heig
* object as DMA-buf. The framebuffer and our buffer structure are still
* holding references to the GEM object to prevent its destruction.
*/
- drm_mode_destroy_dumb(client->dev, handle, client->file);
-
- if (ret) {
- drm_client_buffer_delete(buffer);
- return ERR_PTR(ret);
- }
+ drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file);
return buffer;
-}
-EXPORT_SYMBOL(drm_client_framebuffer_create);
-
-/**
- * drm_client_framebuffer_delete - Delete a client framebuffer
- * @buffer: DRM client buffer (can be NULL)
- */
-void drm_client_framebuffer_delete(struct drm_client_buffer *buffer)
-{
- if (!buffer)
- return;
- drm_client_buffer_rmfb(buffer);
- drm_client_buffer_delete(buffer);
+err_drm_mode_destroy_dumb:
+ drm_mode_destroy_dumb(client->dev, dumb_args.handle, client->file);
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL(drm_client_framebuffer_delete);
+EXPORT_SYMBOL(drm_client_buffer_create_dumb);
/**
- * drm_client_framebuffer_flush - Manually flush client framebuffer
- * @buffer: DRM client buffer (can be NULL)
+ * drm_client_buffer_flush - Manually flush client buffer
+ * @buffer: DRM client buffer
* @rect: Damage rectangle (if NULL flushes all)
*
* This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes
@@ -447,7 +441,7 @@ EXPORT_SYMBOL(drm_client_framebuffer_delete);
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect)
+int drm_client_buffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect)
{
if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty)
return 0;
@@ -467,4 +461,4 @@ int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_re
return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file,
0, 0, NULL, 0);
}
-EXPORT_SYMBOL(drm_client_framebuffer_flush);
+EXPORT_SYMBOL(drm_client_buffer_flush);
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index c83196ad8b59..7b3e362f7926 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -39,12 +39,13 @@ void drm_client_dev_unregister(struct drm_device *dev)
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry_safe(client, tmp, &dev->clientlist, list) {
list_del(&client->list);
- if (client->funcs && client->funcs->unregister) {
+ /*
+ * Unregistering consumes and frees the client.
+ */
+ if (client->funcs && client->funcs->unregister)
client->funcs->unregister(client);
- } else {
+ else
drm_client_release(client);
- kfree(client);
- }
}
mutex_unlock(&dev->clientlist_mutex);
}
@@ -101,7 +102,7 @@ void drm_client_dev_hotplug(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_client_dev_hotplug);
-void drm_client_dev_restore(struct drm_device *dev)
+void drm_client_dev_restore(struct drm_device *dev, bool force)
{
struct drm_client_dev *client;
int ret;
@@ -114,7 +115,7 @@ void drm_client_dev_restore(struct drm_device *dev)
if (!client->funcs || !client->funcs->restore)
continue;
- ret = client->funcs->restore(client);
+ ret = client->funcs->restore(client, force);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
if (!ret) /* The first one to return zero gets the privilege to restore */
break;
@@ -122,7 +123,7 @@ void drm_client_dev_restore(struct drm_device *dev)
mutex_unlock(&dev->clientlist_mutex);
}
-static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_client_suspend(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret = 0;
@@ -131,7 +132,7 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_
return 0;
if (client->funcs && client->funcs->suspend)
- ret = client->funcs->suspend(client, holds_console_lock);
+ ret = client->funcs->suspend(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
client->suspended = true;
@@ -139,20 +140,20 @@ static int drm_client_suspend(struct drm_client_dev *client, bool holds_console_
return ret;
}
-void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock)
+void drm_client_dev_suspend(struct drm_device *dev)
{
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->suspended)
- drm_client_suspend(client, holds_console_lock);
+ drm_client_suspend(client);
}
mutex_unlock(&dev->clientlist_mutex);
}
EXPORT_SYMBOL(drm_client_dev_suspend);
-static int drm_client_resume(struct drm_client_dev *client, bool holds_console_lock)
+static int drm_client_resume(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret = 0;
@@ -161,7 +162,7 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l
return 0;
if (client->funcs && client->funcs->resume)
- ret = client->funcs->resume(client, holds_console_lock);
+ ret = client->funcs->resume(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
client->suspended = false;
@@ -172,14 +173,14 @@ static int drm_client_resume(struct drm_client_dev *client, bool holds_console_l
return ret;
}
-void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock)
+void drm_client_dev_resume(struct drm_device *dev)
{
struct drm_client_dev *client;
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (client->suspended)
- drm_client_resume(client, holds_console_lock);
+ drm_client_resume(client);
}
mutex_unlock(&dev->clientlist_mutex);
}
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 9c2c3b0c8c47..fc4caf7da5fc 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -1293,6 +1293,50 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode)
}
EXPORT_SYMBOL(drm_client_modeset_dpms);
+/**
+ * drm_client_modeset_wait_for_vblank() - Wait for the next VBLANK to occur
+ * @client: DRM client
+ * @crtc_index: The ndex of the CRTC to wait on
+ *
+ * Block the caller until the given CRTC has seen a VBLANK. Do nothing
+ * if the CRTC is disabled. If there's another DRM master present, fail
+ * with -EBUSY.
+ *
+ * Returns:
+ * 0 on success, or negative error code otherwise.
+ */
+int drm_client_modeset_wait_for_vblank(struct drm_client_dev *client, unsigned int crtc_index)
+{
+ struct drm_device *dev = client->dev;
+ struct drm_crtc *crtc;
+ int ret;
+
+ /*
+ * Rate-limit update frequency to vblank. If there's a DRM master
+ * present, it could interfere while we're waiting for the vblank
+ * event. Don't wait in this case.
+ */
+ if (!drm_master_internal_acquire(dev))
+ return -EBUSY;
+
+ crtc = client->modesets[crtc_index].crtc;
+
+ /*
+ * Only wait for a vblank event if the CRTC is enabled, otherwise
+ * just don't do anything, not even report an error.
+ */
+ ret = drm_crtc_vblank_get(crtc);
+ if (!ret) {
+ drm_crtc_wait_one_vblank(crtc);
+ drm_crtc_vblank_put(crtc);
+ }
+
+ drm_master_internal_release(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_client_modeset_wait_for_vblank);
+
#ifdef CONFIG_DRM_KUNIT_TEST
#include "tests/drm_client_modeset_test.c"
#endif
diff --git a/drivers/gpu/drm/drm_client_sysrq.c b/drivers/gpu/drm/drm_client_sysrq.c
new file mode 100644
index 000000000000..eea660096f1b
--- /dev/null
+++ b/drivers/gpu/drm/drm_client_sysrq.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+#include <linux/sysrq.h>
+
+#include <drm/drm_client_event.h>
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include "drm_internal.h"
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static LIST_HEAD(drm_client_sysrq_dev_list);
+static DEFINE_MUTEX(drm_client_sysrq_dev_lock);
+
+/* emergency restore, don't bother with error reporting */
+static void drm_client_sysrq_restore_work_fn(struct work_struct *ignored)
+{
+ struct drm_device *dev;
+
+ guard(mutex)(&drm_client_sysrq_dev_lock);
+
+ list_for_each_entry(dev, &drm_client_sysrq_dev_list, client_sysrq_list) {
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ continue;
+
+ drm_client_dev_restore(dev, true);
+ }
+}
+
+static DECLARE_WORK(drm_client_sysrq_restore_work, drm_client_sysrq_restore_work_fn);
+
+static void drm_client_sysrq_restore_handler(u8 ignored)
+{
+ schedule_work(&drm_client_sysrq_restore_work);
+}
+
+static const struct sysrq_key_op drm_client_sysrq_restore_op = {
+ .handler = drm_client_sysrq_restore_handler,
+ .help_msg = "force-fb(v)",
+ .action_msg = "Restore framebuffer console",
+};
+
+void drm_client_sysrq_register(struct drm_device *dev)
+{
+ guard(mutex)(&drm_client_sysrq_dev_lock);
+
+ if (list_empty(&drm_client_sysrq_dev_list))
+ register_sysrq_key('v', &drm_client_sysrq_restore_op);
+
+ list_add(&dev->client_sysrq_list, &drm_client_sysrq_dev_list);
+}
+
+void drm_client_sysrq_unregister(struct drm_device *dev)
+{
+ guard(mutex)(&drm_client_sysrq_dev_lock);
+
+ /* remove device from global restore list */
+ if (!drm_WARN_ON(dev, list_empty(&dev->client_sysrq_list)))
+ list_del(&dev->client_sysrq_list);
+
+ /* no devices left; unregister key */
+ if (list_empty(&drm_client_sysrq_dev_list))
+ unregister_sysrq_key('v', &drm_client_sysrq_restore_op);
+}
+#endif
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 37a3270bc3c2..c598b99673fc 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -817,6 +817,40 @@ void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *
}
EXPORT_SYMBOL(drm_crtc_load_palette_8);
+static void fill_palette_332(struct drm_crtc *crtc, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i = (r << 5) | (g << 2) | b; /* 8-bit palette index */
+
+ /* Expand R (3-bit) G (3-bit) and B (2-bit) values to 16-bit values */
+ r = (r << 13) | (r << 10) | (r << 7) | (r << 4) | (r << 1) | (r >> 2);
+ g = (g << 13) | (g << 10) | (g << 7) | (g << 4) | (g << 1) | (g >> 2);
+ b = (b << 14) | (b << 12) | (b << 10) | (b << 8) | (b << 6) | (b << 4) | (b << 2) | b;
+
+ set_palette(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_palette_332 - Programs a default palette for R332-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs an RGB332 palette to hardware.
+ */
+void drm_crtc_fill_palette_332(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int r, g, b;
+
+ /* Limits of 8-8-4 are the maximum number of values for each channel. */
+ for (r = 0; r < 8; ++r) {
+ for (g = 0; g < 8; ++g) {
+ for (b = 0; b < 4; ++b)
+ fill_palette_332(crtc, r, g, b, set_palette);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_332);
+
static void fill_palette_8(struct drm_crtc *crtc, unsigned int i,
drm_crtc_set_lut_func set_palette)
{
@@ -840,3 +874,46 @@ void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_pa
fill_palette_8(crtc, i, set_palette);
}
EXPORT_SYMBOL(drm_crtc_fill_palette_8);
+
+/**
+ * drm_color_lut32_check - check validity of extended lookup table
+ * @lut: property blob containing extended LUT to check
+ * @tests: bitmask of tests to run
+ *
+ * Helper to check whether a userspace-provided extended lookup table is valid and
+ * satisfies hardware requirements. Drivers pass a bitmask indicating which of
+ * the tests in &drm_color_lut_tests should be performed.
+ *
+ * Returns 0 on success, -EINVAL on failure.
+ */
+int drm_color_lut32_check(const struct drm_property_blob *lut, u32 tests)
+{
+ const struct drm_color_lut32 *entry;
+ int i;
+
+ if (!lut || !tests)
+ return 0;
+
+ entry = lut->data;
+ for (i = 0; i < drm_color_lut32_size(lut); i++) {
+ if (tests & DRM_COLOR_LUT_EQUAL_CHANNELS) {
+ if (entry[i].red != entry[i].blue ||
+ entry[i].red != entry[i].green) {
+ DRM_DEBUG_KMS("All LUT entries must have equal r/g/b\n");
+ return -EINVAL;
+ }
+ }
+
+ if (i > 0 && tests & DRM_COLOR_LUT_NON_DECREASING) {
+ if (entry[i].red < entry[i - 1].red ||
+ entry[i].green < entry[i - 1].green ||
+ entry[i].blue < entry[i - 1].blue) {
+ DRM_DEBUG_KMS("LUT entries must never decrease.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_color_lut32_check);
diff --git a/drivers/gpu/drm/drm_colorop.c b/drivers/gpu/drm/drm_colorop.c
new file mode 100644
index 000000000000..44eb823585d2
--- /dev/null
+++ b/drivers/gpu/drm/drm_colorop.c
@@ -0,0 +1,599 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include <drm/drm_colorop.h>
+#include <drm/drm_print.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_plane.h>
+
+#include "drm_crtc_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * When userspace signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE it
+ * should use the COLOR_PIPELINE plane property and associated colorops
+ * for any color operation on the &drm_plane. Setting of all old color
+ * properties, such as COLOR_ENCODING and COLOR_RANGE, will be rejected
+ * and the values of the properties will be ignored.
+ *
+ * Colorops are only advertised and valid for atomic drivers and atomic
+ * userspace that signals the &DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE
+ * client cap.
+ *
+ * A colorop represents a single color operation. Colorops are chained
+ * via the NEXT property and make up color pipelines. Color pipelines
+ * are advertised and selected via the COLOR_PIPELINE &drm_plane
+ * property.
+ *
+ * A colorop will be of a certain type, advertised by the read-only TYPE
+ * property. Each type of colorop will advertise a different set of
+ * properties and is programmed in a different manner. Types can be
+ * enumerated 1D curves, 1D LUTs, 3D LUTs, matrices, etc. See the
+ * &drm_colorop_type documentation for information on each type.
+ *
+ * If a colorop advertises the BYPASS property it can be bypassed.
+ *
+ * Information about colorop and color pipeline design decisions can be
+ * found at rfc/color_pipeline.rst, but note that this document will
+ * grow stale over time.
+ */
+
+static const struct drm_prop_enum_list drm_colorop_type_enum_list[] = {
+ { DRM_COLOROP_1D_CURVE, "1D Curve" },
+ { DRM_COLOROP_1D_LUT, "1D LUT" },
+ { DRM_COLOROP_CTM_3X4, "3x4 Matrix"},
+ { DRM_COLOROP_MULTIPLIER, "Multiplier"},
+ { DRM_COLOROP_3D_LUT, "3D LUT"},
+};
+
+static const char * const colorop_curve_1d_type_names[] = {
+ [DRM_COLOROP_1D_CURVE_SRGB_EOTF] = "sRGB EOTF",
+ [DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF] = "sRGB Inverse EOTF",
+ [DRM_COLOROP_1D_CURVE_PQ_125_EOTF] = "PQ 125 EOTF",
+ [DRM_COLOROP_1D_CURVE_PQ_125_INV_EOTF] = "PQ 125 Inverse EOTF",
+ [DRM_COLOROP_1D_CURVE_BT2020_INV_OETF] = "BT.2020 Inverse OETF",
+ [DRM_COLOROP_1D_CURVE_BT2020_OETF] = "BT.2020 OETF",
+ [DRM_COLOROP_1D_CURVE_GAMMA22] = "Gamma 2.2",
+ [DRM_COLOROP_1D_CURVE_GAMMA22_INV] = "Gamma 2.2 Inverse",
+};
+
+static const struct drm_prop_enum_list drm_colorop_lut1d_interpolation_list[] = {
+ { DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR, "Linear" },
+};
+
+
+static const struct drm_prop_enum_list drm_colorop_lut3d_interpolation_list[] = {
+ { DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL, "Tetrahedral" },
+};
+
+/* Init Helpers */
+
+static int drm_plane_colorop_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, enum drm_colorop_type type,
+ uint32_t flags)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_property *prop;
+ int ret = 0;
+
+ ret = drm_mode_object_add(dev, &colorop->base, DRM_MODE_OBJECT_COLOROP);
+ if (ret)
+ return ret;
+
+ colorop->base.properties = &colorop->properties;
+ colorop->dev = dev;
+ colorop->type = type;
+ colorop->plane = plane;
+ colorop->next = NULL;
+
+ list_add_tail(&colorop->head, &config->colorop_list);
+ colorop->index = config->num_colorop++;
+
+ /* add properties */
+
+ /* type */
+ prop = drm_property_create_enum(dev,
+ DRM_MODE_PROP_IMMUTABLE,
+ "TYPE", drm_colorop_type_enum_list,
+ ARRAY_SIZE(drm_colorop_type_enum_list));
+
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->type_property = prop;
+
+ drm_object_attach_property(&colorop->base,
+ colorop->type_property,
+ colorop->type);
+
+ if (flags & DRM_COLOROP_FLAG_ALLOW_BYPASS) {
+ /* bypass */
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC,
+ "BYPASS");
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->bypass_property = prop;
+ drm_object_attach_property(&colorop->base,
+ colorop->bypass_property,
+ 1);
+ }
+
+ /* next */
+ prop = drm_property_create_object(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
+ "NEXT", DRM_MODE_OBJECT_COLOROP);
+ if (!prop)
+ return -ENOMEM;
+ colorop->next_property = prop;
+ drm_object_attach_property(&colorop->base,
+ colorop->next_property,
+ 0);
+
+ return ret;
+}
+
+/**
+ * drm_colorop_cleanup - Cleanup a drm_colorop object in color_pipeline
+ *
+ * @colorop: The drm_colorop object to be cleaned
+ */
+void drm_colorop_cleanup(struct drm_colorop *colorop)
+{
+ struct drm_device *dev = colorop->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+
+ list_del(&colorop->head);
+ config->num_colorop--;
+
+ if (colorop->state && colorop->state->data) {
+ drm_property_blob_put(colorop->state->data);
+ colorop->state->data = NULL;
+ }
+
+ kfree(colorop->state);
+}
+EXPORT_SYMBOL(drm_colorop_cleanup);
+
+/**
+ * drm_colorop_pipeline_destroy - Helper for color pipeline destruction
+ *
+ * @dev: - The drm_device containing the drm_planes with the color_pipelines
+ *
+ * Provides a default color pipeline destroy handler for drm_device.
+ */
+void drm_colorop_pipeline_destroy(struct drm_device *dev)
+{
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_colorop *colorop, *next;
+
+ list_for_each_entry_safe(colorop, next, &config->colorop_list, head) {
+ drm_colorop_cleanup(colorop);
+ kfree(colorop);
+ }
+}
+EXPORT_SYMBOL(drm_colorop_pipeline_destroy);
+
+/**
+ * drm_plane_colorop_curve_1d_init - Initialize a DRM_COLOROP_1D_CURVE
+ *
+ * @dev: DRM device
+ * @colorop: The drm_colorop object to initialize
+ * @plane: The associated drm_plane
+ * @supported_tfs: A bitfield of supported drm_plane_colorop_curve_1d_init enum values,
+ * created using BIT(curve_type) and combined with the OR '|'
+ * operator.
+ * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
+ * @return zero on success, -E value on failure
+ */
+int drm_plane_colorop_curve_1d_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, u64 supported_tfs, uint32_t flags)
+{
+ struct drm_prop_enum_list enum_list[DRM_COLOROP_1D_CURVE_COUNT];
+ int i, len;
+
+ struct drm_property *prop;
+ int ret;
+
+ if (!supported_tfs) {
+ drm_err(dev,
+ "No supported TFs for new 1D curve colorop on [PLANE:%d:%s]\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ if ((supported_tfs & -BIT(DRM_COLOROP_1D_CURVE_COUNT)) != 0) {
+ drm_err(dev, "Unknown TF provided on [PLANE:%d:%s]\n",
+ plane->base.id, plane->name);
+ return -EINVAL;
+ }
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_CURVE, flags);
+ if (ret)
+ return ret;
+
+ len = 0;
+ for (i = 0; i < DRM_COLOROP_1D_CURVE_COUNT; i++) {
+ if ((supported_tfs & BIT(i)) == 0)
+ continue;
+
+ enum_list[len].type = i;
+ enum_list[len].name = colorop_curve_1d_type_names[i];
+ len++;
+ }
+
+ if (WARN_ON(len <= 0))
+ return -EINVAL;
+
+ /* initialize 1D curve only attribute */
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ATOMIC, "CURVE_1D_TYPE",
+ enum_list, len);
+
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->curve_1d_type_property = prop;
+ drm_object_attach_property(&colorop->base, colorop->curve_1d_type_property,
+ enum_list[0].type);
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_curve_1d_init);
+
+static int drm_colorop_create_data_prop(struct drm_device *dev, struct drm_colorop *colorop)
+{
+ struct drm_property *prop;
+
+ /* data */
+ prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "DATA", 0);
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->data_property = prop;
+ drm_object_attach_property(&colorop->base,
+ colorop->data_property,
+ 0);
+
+ return 0;
+}
+
+/**
+ * drm_plane_colorop_curve_1d_lut_init - Initialize a DRM_COLOROP_1D_LUT
+ *
+ * @dev: DRM device
+ * @colorop: The drm_colorop object to initialize
+ * @plane: The associated drm_plane
+ * @lut_size: LUT size supported by driver
+ * @interpolation: 1D LUT interpolation type
+ * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
+ * @return zero on success, -E value on failure
+ */
+int drm_plane_colorop_curve_1d_lut_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t lut_size,
+ enum drm_colorop_lut1d_interpolation_type interpolation,
+ uint32_t flags)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_1D_LUT, flags);
+ if (ret)
+ return ret;
+
+ /* initialize 1D LUT only attribute */
+ /* LUT size */
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
+ "SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->size_property = prop;
+ drm_object_attach_property(&colorop->base, colorop->size_property, lut_size);
+ colorop->size = lut_size;
+
+ /* interpolation */
+ prop = drm_property_create_enum(dev, 0, "LUT1D_INTERPOLATION",
+ drm_colorop_lut1d_interpolation_list,
+ ARRAY_SIZE(drm_colorop_lut1d_interpolation_list));
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->lut1d_interpolation_property = prop;
+ drm_object_attach_property(&colorop->base, prop, interpolation);
+ colorop->lut1d_interpolation = interpolation;
+
+ /* data */
+ ret = drm_colorop_create_data_prop(dev, colorop);
+ if (ret)
+ return ret;
+
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_curve_1d_lut_init);
+
+int drm_plane_colorop_ctm_3x4_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t flags)
+{
+ int ret;
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_CTM_3X4, flags);
+ if (ret)
+ return ret;
+
+ ret = drm_colorop_create_data_prop(dev, colorop);
+ if (ret)
+ return ret;
+
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_ctm_3x4_init);
+
+/**
+ * drm_plane_colorop_mult_init - Initialize a DRM_COLOROP_MULTIPLIER
+ *
+ * @dev: DRM device
+ * @colorop: The drm_colorop object to initialize
+ * @plane: The associated drm_plane
+ * @flags: bitmask of misc, see DRM_COLOROP_FLAG_* defines.
+ * @return zero on success, -E value on failure
+ */
+int drm_plane_colorop_mult_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane, uint32_t flags)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_MULTIPLIER, flags);
+ if (ret)
+ return ret;
+
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "MULTIPLIER", 0, U64_MAX);
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->multiplier_property = prop;
+ drm_object_attach_property(&colorop->base, colorop->multiplier_property, 0);
+
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_mult_init);
+
+int drm_plane_colorop_3dlut_init(struct drm_device *dev, struct drm_colorop *colorop,
+ struct drm_plane *plane,
+ uint32_t lut_size,
+ enum drm_colorop_lut3d_interpolation_type interpolation,
+ uint32_t flags)
+{
+ struct drm_property *prop;
+ int ret;
+
+ ret = drm_plane_colorop_init(dev, colorop, plane, DRM_COLOROP_3D_LUT, flags);
+ if (ret)
+ return ret;
+
+ /* LUT size */
+ prop = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE | DRM_MODE_PROP_ATOMIC,
+ "SIZE", 0, UINT_MAX);
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->size_property = prop;
+ drm_object_attach_property(&colorop->base, colorop->size_property, lut_size);
+ colorop->size = lut_size;
+
+ /* interpolation */
+ prop = drm_property_create_enum(dev, 0, "LUT3D_INTERPOLATION",
+ drm_colorop_lut3d_interpolation_list,
+ ARRAY_SIZE(drm_colorop_lut3d_interpolation_list));
+ if (!prop)
+ return -ENOMEM;
+
+ colorop->lut3d_interpolation_property = prop;
+ drm_object_attach_property(&colorop->base, prop, interpolation);
+ colorop->lut3d_interpolation = interpolation;
+
+ /* data */
+ ret = drm_colorop_create_data_prop(dev, colorop);
+ if (ret)
+ return ret;
+
+ drm_colorop_reset(colorop);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_colorop_3dlut_init);
+
+static void __drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop,
+ struct drm_colorop_state *state)
+{
+ memcpy(state, colorop->state, sizeof(*state));
+
+ if (state->data)
+ drm_property_blob_get(state->data);
+
+ state->bypass = true;
+}
+
+struct drm_colorop_state *
+drm_atomic_helper_colorop_duplicate_state(struct drm_colorop *colorop)
+{
+ struct drm_colorop_state *state;
+
+ if (WARN_ON(!colorop->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_colorop_duplicate_state(colorop, state);
+
+ return state;
+}
+
+void drm_colorop_atomic_destroy_state(struct drm_colorop *colorop,
+ struct drm_colorop_state *state)
+{
+ kfree(state);
+}
+
+/**
+ * __drm_colorop_state_reset - resets colorop state to default values
+ * @colorop_state: atomic colorop state, must not be NULL
+ * @colorop: colorop object, must not be NULL
+ *
+ * Initializes the newly allocated @colorop_state with default
+ * values. This is useful for drivers that subclass the CRTC state.
+ */
+static void __drm_colorop_state_reset(struct drm_colorop_state *colorop_state,
+ struct drm_colorop *colorop)
+{
+ u64 val;
+
+ colorop_state->colorop = colorop;
+ colorop_state->bypass = true;
+
+ if (colorop->curve_1d_type_property) {
+ drm_object_property_get_default_value(&colorop->base,
+ colorop->curve_1d_type_property,
+ &val);
+ colorop_state->curve_1d_type = val;
+ }
+}
+
+/**
+ * __drm_colorop_reset - reset state on colorop
+ * @colorop: drm colorop
+ * @colorop_state: colorop state to assign
+ *
+ * Initializes the newly allocated @colorop_state and assigns it to
+ * the &drm_crtc->state pointer of @colorop, usually required when
+ * initializing the drivers or when called from the &drm_colorop_funcs.reset
+ * hook.
+ *
+ * This is useful for drivers that subclass the colorop state.
+ */
+static void __drm_colorop_reset(struct drm_colorop *colorop,
+ struct drm_colorop_state *colorop_state)
+{
+ if (colorop_state)
+ __drm_colorop_state_reset(colorop_state, colorop);
+
+ colorop->state = colorop_state;
+}
+
+void drm_colorop_reset(struct drm_colorop *colorop)
+{
+ kfree(colorop->state);
+ colorop->state = kzalloc(sizeof(*colorop->state), GFP_KERNEL);
+
+ if (colorop->state)
+ __drm_colorop_reset(colorop, colorop->state);
+}
+
+static const char * const colorop_type_name[] = {
+ [DRM_COLOROP_1D_CURVE] = "1D Curve",
+ [DRM_COLOROP_1D_LUT] = "1D LUT",
+ [DRM_COLOROP_CTM_3X4] = "3x4 Matrix",
+ [DRM_COLOROP_MULTIPLIER] = "Multiplier",
+ [DRM_COLOROP_3D_LUT] = "3D LUT",
+};
+
+static const char * const colorop_lu3d_interpolation_name[] = {
+ [DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL] = "Tetrahedral",
+};
+
+static const char * const colorop_lut1d_interpolation_name[] = {
+ [DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR] = "Linear",
+};
+
+const char *drm_get_colorop_type_name(enum drm_colorop_type type)
+{
+ if (WARN_ON(type >= ARRAY_SIZE(colorop_type_name)))
+ return "unknown";
+
+ return colorop_type_name[type];
+}
+
+const char *drm_get_colorop_curve_1d_type_name(enum drm_colorop_curve_1d_type type)
+{
+ if (WARN_ON(type >= ARRAY_SIZE(colorop_curve_1d_type_names)))
+ return "unknown";
+
+ return colorop_curve_1d_type_names[type];
+}
+
+/**
+ * drm_get_colorop_lut1d_interpolation_name: return a string for interpolation type
+ * @type: interpolation type to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorop_lut1d_interpolation_name(enum drm_colorop_lut1d_interpolation_type type)
+{
+ if (WARN_ON(type >= ARRAY_SIZE(colorop_lut1d_interpolation_name)))
+ return "unknown";
+
+ return colorop_lut1d_interpolation_name[type];
+}
+
+/**
+ * drm_get_colorop_lut3d_interpolation_name - return a string for interpolation type
+ * @type: interpolation type to compute name of
+ *
+ * In contrast to the other drm_get_*_name functions this one here returns a
+ * const pointer and hence is threadsafe.
+ */
+const char *drm_get_colorop_lut3d_interpolation_name(enum drm_colorop_lut3d_interpolation_type type)
+{
+ if (WARN_ON(type >= ARRAY_SIZE(colorop_lu3d_interpolation_name)))
+ return "unknown";
+
+ return colorop_lu3d_interpolation_name[type];
+}
+
+/**
+ * drm_colorop_set_next_property - sets the next pointer
+ * @colorop: drm colorop
+ * @next: next colorop
+ *
+ * Should be used when constructing the color pipeline
+ */
+void drm_colorop_set_next_property(struct drm_colorop *colorop, struct drm_colorop *next)
+{
+ drm_object_property_set_value(&colorop->base,
+ colorop->next_property,
+ next ? next->base.id : 0);
+ colorop->next = next;
+}
+EXPORT_SYMBOL(drm_colorop_set_next_property);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 272d6254ea47..4d6dc9ebfdb5 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -3439,6 +3439,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
* properties reflect the latest status.
*/
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
+ file_priv->plane_color_pipeline,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
&out_resp->count_props);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 46655339003d..a7797d260f1e 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -229,6 +229,25 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
* Driver's default scaling filter
* Nearest Neighbor:
* Nearest Neighbor scaling filter
+ * SHARPNESS_STRENGTH:
+ * Atomic property for setting the sharpness strength/intensity by userspace.
+ *
+ * The value of this property is set as an integer value ranging
+ * from 0 - 255 where:
+ *
+ * 0: Sharpness feature is disabled(default value).
+ *
+ * 1: Minimum sharpness.
+ *
+ * 255: Maximum sharpness.
+ *
+ * User can gradually increase or decrease the sharpness level and can
+ * set the optimum value depending on content.
+ * This value will be passed to kernel through the UAPI.
+ * The setting of this property does not require modeset.
+ * The sharpness effect takes place post blending on the final composed output.
+ * If the feature is disabled, the content remains same without any sharpening effect
+ * and when this feature is applied, it enhances the clarity of the content.
*/
__printf(6, 0)
@@ -940,6 +959,22 @@ int drm_crtc_create_scaling_filter_property(struct drm_crtc *crtc,
}
EXPORT_SYMBOL(drm_crtc_create_scaling_filter_property);
+int drm_crtc_create_sharpness_strength_property(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_property *prop =
+ drm_property_create_range(dev, 0, "SHARPNESS_STRENGTH", 0, 255);
+
+ if (!prop)
+ return -ENOMEM;
+
+ crtc->sharpness_strength_property = prop;
+ drm_object_attach_property(&crtc->base, prop, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_create_sharpness_strength_property);
+
/**
* drm_crtc_in_clone_mode - check if the given CRTC state is in clone mode
*
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 89706aa8232f..c09409229644 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -163,6 +163,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
void drm_mode_object_unregister(struct drm_device *dev,
struct drm_mode_object *object);
int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+ bool plane_color_pipeline,
uint32_t __user *prop_ptr,
uint64_t __user *prop_values,
uint32_t *arg_count_props);
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index b4fd43783c50..58d0bb6d2676 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -9,6 +9,34 @@
#include "drm_crtc_internal.h"
#include "drm_displayid_internal.h"
+enum {
+ QUIRK_IGNORE_CHECKSUM,
+};
+
+struct displayid_quirk {
+ const struct drm_edid_ident ident;
+ u8 quirks;
+};
+
+static const struct displayid_quirk quirks[] = {
+ {
+ .ident = DRM_EDID_IDENT_INIT('C', 'S', 'O', 5142, "MNE007ZA1-5"),
+ .quirks = BIT(QUIRK_IGNORE_CHECKSUM),
+ },
+};
+
+static u8 get_quirks(const struct drm_edid *drm_edid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(quirks); i++) {
+ if (drm_edid_match(drm_edid, &quirks[i].ident))
+ return quirks[i].quirks;
+ }
+
+ return 0;
+}
+
static const struct displayid_header *
displayid_get_header(const u8 *displayid, int length, int index)
{
@@ -23,7 +51,7 @@ displayid_get_header(const u8 *displayid, int length, int index)
}
static const struct displayid_header *
-validate_displayid(const u8 *displayid, int length, int idx)
+validate_displayid(const u8 *displayid, int length, int idx, bool ignore_checksum)
{
int i, dispid_length;
u8 csum = 0;
@@ -41,33 +69,35 @@ validate_displayid(const u8 *displayid, int length, int idx)
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
- DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
- return ERR_PTR(-EINVAL);
+ DRM_NOTE("DisplayID checksum invalid, remainder is %d%s\n", csum,
+ ignore_checksum ? " (ignoring)" : "");
+
+ if (!ignore_checksum)
+ return ERR_PTR(-EINVAL);
}
return base;
}
-static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
- int *length, int *idx,
- int *ext_index)
+static const u8 *find_next_displayid_extension(struct displayid_iter *iter)
{
const struct displayid_header *base;
const u8 *displayid;
+ bool ignore_checksum = iter->quirks & BIT(QUIRK_IGNORE_CHECKSUM);
- displayid = drm_edid_find_extension(drm_edid, DISPLAYID_EXT, ext_index);
+ displayid = drm_edid_find_extension(iter->drm_edid, DISPLAYID_EXT, &iter->ext_index);
if (!displayid)
return NULL;
/* EDID extensions block checksum isn't for us */
- *length = EDID_LENGTH - 1;
- *idx = 1;
+ iter->length = EDID_LENGTH - 1;
+ iter->idx = 1;
- base = validate_displayid(displayid, *length, *idx);
+ base = validate_displayid(displayid, iter->length, iter->idx, ignore_checksum);
if (IS_ERR(base))
return NULL;
- *length = *idx + sizeof(*base) + base->bytes;
+ iter->length = iter->idx + sizeof(*base) + base->bytes;
return displayid;
}
@@ -78,6 +108,7 @@ void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
memset(iter, 0, sizeof(*iter));
iter->drm_edid = drm_edid;
+ iter->quirks = get_quirks(drm_edid);
}
static const struct displayid_block *
@@ -126,10 +157,7 @@ __displayid_iter_next(struct displayid_iter *iter)
/* The first section we encounter is the base section */
bool base_section = !iter->section;
- iter->section = drm_find_displayid_extension(iter->drm_edid,
- &iter->length,
- &iter->idx,
- &iter->ext_index);
+ iter->section = find_next_displayid_extension(iter);
if (!iter->section) {
iter->drm_edid = NULL;
return NULL;
diff --git a/drivers/gpu/drm/drm_displayid_internal.h b/drivers/gpu/drm/drm_displayid_internal.h
index 957dd0619f5c..5b1b32f73516 100644
--- a/drivers/gpu/drm/drm_displayid_internal.h
+++ b/drivers/gpu/drm/drm_displayid_internal.h
@@ -167,6 +167,8 @@ struct displayid_iter {
u8 version;
u8 primary_use;
+
+ u8 quirks;
};
void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
index 9dc0408fbbea..5b956229c82f 100644
--- a/drivers/gpu/drm/drm_draw.c
+++ b/drivers/gpu/drm/drm_draw.c
@@ -127,7 +127,7 @@ EXPORT_SYMBOL(drm_draw_fill16);
void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
- u16 color)
+ u32 color)
{
unsigned int y, x;
diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
index f121ee7339dc..20cb404e23ea 100644
--- a/drivers/gpu/drm/drm_draw_internal.h
+++ b/drivers/gpu/drm/drm_draw_internal.h
@@ -47,7 +47,7 @@ void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
- u16 color);
+ u32 color);
void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
unsigned int height, unsigned int width,
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index cdd591b11488..2915118436ce 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -532,6 +532,8 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
return "rebind";
case DRM_WEDGE_RECOVERY_BUS_RESET:
return "bus-reset";
+ case DRM_WEDGE_RECOVERY_VENDOR:
+ return "vendor-specific";
default:
return NULL;
}
@@ -694,7 +696,6 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->master_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
}
static int drm_dev_init(struct drm_device *dev,
@@ -732,10 +733,10 @@ static int drm_dev_init(struct drm_device *dev,
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
+ INIT_LIST_HEAD(&dev->client_sysrq_list);
INIT_LIST_HEAD(&dev->vblank_event_list);
spin_lock_init(&dev->event_lock);
- mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
@@ -1100,6 +1101,7 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
goto err_unload;
}
drm_panic_register(dev);
+ drm_client_sysrq_register(dev);
DRM_INFO("Initialized %s %d.%d.%d for %s on minor %d\n",
driver->name, driver->major, driver->minor,
@@ -1144,6 +1146,7 @@ void drm_dev_unregister(struct drm_device *dev)
{
dev->registered = false;
+ drm_client_sysrq_unregister(dev);
drm_panic_unregister(dev);
drm_client_dev_unregister(dev);
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index 70032bba1c97..e2b62e5fb891 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -25,8 +25,11 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -57,6 +60,134 @@
* a hardware-specific ioctl to allocate suitable buffer objects.
*/
+static int drm_mode_align_dumb(struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align)
+{
+ u32 pitch = args->pitch;
+ u32 size;
+
+ if (!pitch)
+ return -EINVAL;
+
+ if (hw_pitch_align)
+ pitch = roundup(pitch, hw_pitch_align);
+
+ if (!hw_size_align)
+ hw_size_align = PAGE_SIZE;
+ else if (!IS_ALIGNED(hw_size_align, PAGE_SIZE))
+ return -EINVAL; /* TODO: handle this if necessary */
+
+ if (check_mul_overflow(args->height, pitch, &size))
+ return -EINVAL;
+ size = ALIGN(size, hw_size_align);
+ if (!size)
+ return -EINVAL;
+
+ args->pitch = pitch;
+ args->size = size;
+
+ return 0;
+}
+
+/**
+ * drm_mode_size_dumb - Calculates the scanline and buffer sizes for dumb buffers
+ * @dev: DRM device
+ * @args: Parameters for the dumb buffer
+ * @hw_pitch_align: Hardware scanline alignment in bytes
+ * @hw_size_align: Hardware buffer-size alignment in bytes
+ *
+ * The helper drm_mode_size_dumb() calculates the size of the buffer
+ * allocation and the scanline size for a dumb buffer. Callers have to
+ * set the buffers width, height and color mode in the argument @arg.
+ * The helper validates the correctness of the input and tests for
+ * possible overflows. If successful, it returns the dumb buffer's
+ * required scanline pitch and size in &args.
+ *
+ * The parameter @hw_pitch_align allows the driver to specifies an
+ * alignment for the scanline pitch, if the hardware requires any. The
+ * calculated pitch will be a multiple of the alignment. The parameter
+ * @hw_size_align allows to specify an alignment for buffer sizes. The
+ * provided alignment should represent requirements of the graphics
+ * hardware. drm_mode_size_dumb() handles GEM-related constraints
+ * automatically across all drivers and hardware. For example, the
+ * returned buffer size is always a multiple of PAGE_SIZE, which is
+ * required by mmap().
+ *
+ * Returns:
+ * Zero on success, or a negative error code otherwise.
+ */
+int drm_mode_size_dumb(struct drm_device *dev,
+ struct drm_mode_create_dumb *args,
+ unsigned long hw_pitch_align,
+ unsigned long hw_size_align)
+{
+ u64 pitch = 0;
+ u32 fourcc;
+
+ /*
+ * The scanline pitch depends on the buffer width and the color
+ * format. The latter is specified as a color-mode constant for
+ * which we first have to find the corresponding color format.
+ *
+ * Different color formats can have the same color-mode constant.
+ * For example XRGB8888 and BGRX8888 both have a color mode of 32.
+ * It is possible to use different formats for dumb-buffer allocation
+ * and rendering as long as all involved formats share the same
+ * color-mode constant.
+ */
+ fourcc = drm_driver_color_mode_format(dev, args->bpp);
+ if (fourcc != DRM_FORMAT_INVALID) {
+ const struct drm_format_info *info = drm_format_info(fourcc);
+
+ if (!info)
+ return -EINVAL;
+ pitch = drm_format_info_min_pitch(info, 0, args->width);
+ } else if (args->bpp) {
+ /*
+ * Some userspace throws in arbitrary values for bpp and
+ * relies on the kernel to figure it out. In this case we
+ * fall back to the old method of using bpp directly. The
+ * over-commitment of memory from the rounding is acceptable
+ * for compatibility with legacy userspace. We have a number
+ * of deprecated legacy values that are explicitly supported.
+ */
+ switch (args->bpp) {
+ default:
+ drm_warn_once(dev,
+ "Unknown color mode %u; guessing buffer size.\n",
+ args->bpp);
+ fallthrough;
+ /*
+ * These constants represent various YUV formats supported by
+ * drm_gem_afbc_get_bpp().
+ */
+ case 12: // DRM_FORMAT_YUV420_8BIT
+ case 15: // DRM_FORMAT_YUV420_10BIT
+ case 30: // DRM_FORMAT_VUY101010
+ fallthrough;
+ /*
+ * Used by Mesa and Gstreamer to allocate NV formats and others
+ * as RGB buffers. Technically, XRGB16161616F formats are RGB,
+ * but the dumb buffers are not supposed to be used for anything
+ * beyond 32 bits per pixels.
+ */
+ case 10: // DRM_FORMAT_NV{15,20,30}, DRM_FORMAT_P010
+ case 64: // DRM_FORMAT_{XRGB,XBGR,ARGB,ABGR}16161616F
+ pitch = args->width * DIV_ROUND_UP(args->bpp, SZ_8);
+ break;
+ }
+ }
+
+ if (!pitch || pitch > U32_MAX)
+ return -EINVAL;
+
+ args->pitch = pitch;
+
+ return drm_mode_align_dumb(args, hw_pitch_align, hw_size_align);
+}
+EXPORT_SYMBOL(drm_mode_size_dumb);
+
int drm_mode_create_dumb(struct drm_device *dev,
struct drm_mode_create_dumb *args,
struct drm_file *file_priv)
@@ -99,7 +230,30 @@ int drm_mode_create_dumb(struct drm_device *dev,
int drm_mode_create_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- return drm_mode_create_dumb(dev, data, file_priv);
+ struct drm_mode_create_dumb *args = data;
+ int err;
+
+ err = drm_mode_create_dumb(dev, args, file_priv);
+ if (err) {
+ args->handle = 0;
+ args->pitch = 0;
+ args->size = 0;
+ }
+ return err;
+}
+
+static int drm_mode_mmap_dumb(struct drm_device *dev, struct drm_mode_map_dumb *args,
+ struct drm_file *file_priv)
+{
+ if (!dev->driver->dumb_create)
+ return -ENOSYS;
+
+ if (dev->driver->dumb_map_offset)
+ return dev->driver->dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
+ else
+ return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+ &args->offset);
}
/**
@@ -120,17 +274,12 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_map_dumb *args = data;
+ int err;
- if (!dev->driver->dumb_create)
- return -ENOSYS;
-
- if (dev->driver->dumb_map_offset)
- return dev->driver->dumb_map_offset(file_priv, dev,
- args->handle,
- &args->offset);
- else
- return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
- &args->offset);
+ err = drm_mode_mmap_dumb(dev, args, file_priv);
+ if (err)
+ args->offset = 0;
+ return err;
}
int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index e2e85345aa9a..26bb7710a462 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -250,6 +250,9 @@ static const struct edid_quirk {
EDID_QUIRK('S', 'V', 'R', 0x1019, BIT(EDID_QUIRK_NON_DESKTOP)),
EDID_QUIRK('A', 'U', 'O', 0x1111, BIT(EDID_QUIRK_NON_DESKTOP)),
+ /* LQ116M1JW10 displays noise when 8 bpc, but display fine as 6 bpc */
+ EDID_QUIRK('S', 'H', 'P', 0x154c, BIT(EDID_QUIRK_FORCE_6BPC)),
+
/*
* @drm_edid_internal_quirk entries end here, following with the
* @drm_edid_quirk entries.
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 11a5b60cb9ce..4a7f72044ab8 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -31,9 +31,6 @@
#include <linux/console.h>
#include <linux/export.h>
-#include <linux/pci.h>
-#include <linux/sysrq.h>
-#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic.h>
#include <drm/drm_drv.h>
@@ -255,6 +252,7 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
/**
* drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
* @fb_helper: driver-allocated fbdev helper, can be NULL
+ * @force: ignore present DRM master
*
* This helper should be called from fbdev emulation's &drm_client_funcs.restore
* callback. It ensures that the user isn't greeted with a black screen when the
@@ -263,48 +261,12 @@ __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, bool force)
{
- return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+ return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
-#ifdef CONFIG_MAGIC_SYSRQ
-/* emergency restore, don't bother with error reporting */
-static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
-{
- struct drm_fb_helper *helper;
-
- mutex_lock(&kernel_fb_helper_lock);
- list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
- struct drm_device *dev = helper->dev;
-
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
- continue;
-
- mutex_lock(&helper->lock);
- drm_client_modeset_commit_locked(&helper->client);
- mutex_unlock(&helper->lock);
- }
- mutex_unlock(&kernel_fb_helper_lock);
-}
-
-static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
-
-static void drm_fb_helper_sysrq(u8 dummy1)
-{
- schedule_work(&drm_fb_helper_restore_work);
-}
-
-static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
- .handler = drm_fb_helper_sysrq,
- .help_msg = "force-fb(v)",
- .action_msg = "Restore framebuffer console",
-};
-#else
-static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
-#endif
-
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -368,6 +330,10 @@ static void drm_fb_helper_fb_dirty(struct drm_fb_helper *helper)
unsigned long flags;
int ret;
+ mutex_lock(&helper->lock);
+ drm_client_modeset_wait_for_vblank(&helper->client, 0);
+ mutex_unlock(&helper->lock);
+
if (drm_WARN_ON_ONCE(dev, !helper->funcs->fb_dirty))
return;
@@ -491,20 +457,7 @@ int drm_fb_helper_init(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_fb_helper_init);
-/**
- * drm_fb_helper_alloc_info - allocate fb_info and some of its members
- * @fb_helper: driver-allocated fbdev helper
- *
- * A helper to alloc fb_info and the member cmap. Called by the driver
- * within the struct &drm_driver.fbdev_probe callback function. Drivers do
- * not need to release the allocated fb_info structure themselves, this is
- * automatically done when calling drm_fb_helper_fini().
- *
- * RETURNS:
- * fb_info pointer if things went okay, pointer containing error code
- * otherwise
- */
-struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
+static struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
{
struct device *dev = fb_helper->dev->dev;
struct fb_info *info;
@@ -531,17 +484,8 @@ err_release:
framebuffer_release(info);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL(drm_fb_helper_alloc_info);
-/**
- * drm_fb_helper_release_info - release fb_info and its members
- * @fb_helper: driver-allocated fbdev helper
- *
- * A helper to release fb_info and the member cmap. Drivers do not
- * need to release the allocated fb_info structure themselves, this is
- * automatically done when calling drm_fb_helper_fini().
- */
-void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
+static void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
{
struct fb_info *info = fb_helper->info;
@@ -554,7 +498,6 @@ void drm_fb_helper_release_info(struct drm_fb_helper *fb_helper)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
-EXPORT_SYMBOL(drm_fb_helper_release_info);
/**
* drm_fb_helper_unregister_info - unregister fb_info framebuffer device
@@ -566,11 +509,6 @@ EXPORT_SYMBOL(drm_fb_helper_release_info);
*/
void drm_fb_helper_unregister_info(struct drm_fb_helper *fb_helper)
{
- struct fb_info *info = fb_helper->info;
- struct device *dev = info->device;
-
- if (dev_is_pci(dev))
- vga_switcheroo_client_fb_set(to_pci_dev(dev), NULL);
unregister_framebuffer(fb_helper->info);
}
EXPORT_SYMBOL(drm_fb_helper_unregister_info);
@@ -597,11 +535,8 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
drm_fb_helper_release_info(fb_helper);
mutex_lock(&kernel_fb_helper_lock);
- if (!list_empty(&fb_helper->kernel_fb_list)) {
+ if (!list_empty(&fb_helper->kernel_fb_list))
list_del(&fb_helper->kernel_fb_list);
- if (list_empty(&kernel_fb_helper_list))
- unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
- }
mutex_unlock(&kernel_fb_helper_lock);
if (!fb_helper->client.funcs)
@@ -1068,15 +1003,9 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
int ret = 0;
- mutex_lock(&fb_helper->lock);
- if (!drm_master_internal_acquire(dev)) {
- ret = -EBUSY;
- goto unlock;
- }
+ guard(mutex)(&fb_helper->lock);
switch (cmd) {
case FBIO_WAITFORVSYNC:
@@ -1096,28 +1025,12 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
* make. If we're not smart enough here, one should
* just consider switch the userspace to KMS.
*/
- crtc = fb_helper->client.modesets[0].crtc;
-
- /*
- * Only wait for a vblank event if the CRTC is
- * enabled, otherwise just don't do anythintg,
- * not even report an error.
- */
- ret = drm_crtc_vblank_get(crtc);
- if (!ret) {
- drm_crtc_wait_one_vblank(crtc);
- drm_crtc_vblank_put(crtc);
- }
-
- ret = 0;
+ ret = drm_client_modeset_wait_for_vblank(&fb_helper->client, 0);
break;
default:
ret = -ENOTTY;
}
- drm_master_internal_release(dev);
-unlock:
- mutex_unlock(&fb_helper->lock);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
@@ -1346,9 +1259,9 @@ int drm_fb_helper_set_par(struct fb_info *info)
* the KDSET IOCTL with KD_TEXT, and only after that drops the master
* status when exiting.
*
- * In the past this was caught by drm_fb_helper_lastclose(), but on
- * modern systems where logind always keeps a drm fd open to orchestrate
- * the vt switching, this doesn't work.
+ * In the past this was caught by drm_fb_helper_restore_fbdev_mode_unlocked(),
+ * but on modern systems where logind always keeps a drm fd open to
+ * orchestrate the vt switching, this doesn't work.
*
* To not break the userspace ABI we have this special case here, which
* is only used for the above case. Everything else uses the normal
@@ -1632,7 +1545,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_surface_size sizes;
- struct fb_info *info;
int ret;
if (drm_WARN_ON(dev, !dev->driver->fbdev_probe))
@@ -1653,12 +1565,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
strcpy(fb_helper->fb->comm, "[fbcon]");
- info = fb_helper->info;
-
- /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
- if (dev_is_pci(info->device))
- vga_switcheroo_client_fb_set(to_pci_dev(info->device), info);
-
return 0;
}
@@ -1827,6 +1733,11 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
height = dev->mode_config.max_height;
drm_client_modeset_probe(&fb_helper->client, width, height);
+
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
ret = drm_fb_helper_single_fb_probe(fb_helper);
if (ret < 0) {
if (ret == -EAGAIN) {
@@ -1835,13 +1746,12 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
}
mutex_unlock(&fb_helper->lock);
- return ret;
+ goto err_drm_fb_helper_release_info;
}
drm_setup_crtcs_fb(fb_helper);
fb_helper->deferred_setup = false;
- info = fb_helper->info;
info->var.pixclock = 0;
/* Need to drop locks to avoid recursive deadlock in
@@ -1857,13 +1767,14 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
info->node, info->fix.id);
mutex_lock(&kernel_fb_helper_lock);
- if (list_empty(&kernel_fb_helper_list))
- register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
-
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
mutex_unlock(&kernel_fb_helper_lock);
return 0;
+
+err_drm_fb_helper_release_info:
+ drm_fb_helper_release_info(fb_helper);
+ return ret;
}
/**
@@ -1973,16 +1884,3 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
-
-/**
- * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation
- * @dev: DRM device
- *
- * This function is obsolete. Call drm_fb_helper_restore_fbdev_mode_unlocked()
- * instead.
- */
-void drm_fb_helper_lastclose(struct drm_device *dev)
-{
- drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper);
-}
-EXPORT_SYMBOL(drm_fb_helper_lastclose);
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 8bd626ef16c7..9412d9fdd74b 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -10,6 +10,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
/*
* struct fb_ops
@@ -55,10 +56,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
- drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_buffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_dma_fb_ops = {
@@ -90,10 +89,8 @@ static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
vfree(shadow);
drm_client_buffer_vunmap(fb_helper->buffer);
- drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_buffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
@@ -272,9 +269,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
+ struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
struct drm_framebuffer *fb;
- struct fb_info *info;
u32 format;
struct iosys_map map;
int ret;
@@ -285,7 +282,7 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
sizes->surface_depth);
- buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ buffer = drm_client_buffer_create_dumb(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
@@ -304,12 +301,6 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->buffer = buffer;
fb_helper->fb = fb;
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_client_buffer_vunmap;
- }
-
drm_fb_helper_fill_info(info, fb_helper, sizes);
if (fb->funcs->dirty)
@@ -317,18 +308,16 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
else
ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
if (ret)
- goto err_drm_fb_helper_release_info;
+ goto err_drm_client_buffer_vunmap;
return 0;
-err_drm_fb_helper_release_info:
- drm_fb_helper_release_info(fb_helper);
err_drm_client_buffer_vunmap:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
drm_client_buffer_vunmap(buffer);
err_drm_client_buffer_delete:
- drm_client_framebuffer_delete(buffer);
+ drm_client_buffer_delete(buffer);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_dma_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c
index 1e827bf8b815..458c899b5d4f 100644
--- a/drivers/gpu/drm/drm_fbdev_shmem.c
+++ b/drivers/gpu/drm/drm_fbdev_shmem.c
@@ -9,6 +9,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
/*
* struct fb_ops
@@ -63,10 +64,8 @@ static void drm_fbdev_shmem_fb_destroy(struct fb_info *info)
drm_fb_helper_fini(fb_helper);
drm_client_buffer_vunmap(fb_helper->buffer);
- drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_buffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_shmem_fb_ops = {
@@ -136,10 +135,10 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
+ struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
struct drm_gem_shmem_object *shmem;
struct drm_framebuffer *fb;
- struct fb_info *info;
u32 format;
struct iosys_map map;
int ret;
@@ -149,7 +148,7 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
sizes->surface_bpp);
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp, sizes->surface_depth);
- buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ buffer = drm_client_buffer_create_dumb(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
@@ -169,12 +168,6 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->buffer = buffer;
fb_helper->fb = fb;
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_client_buffer_vunmap;
- }
-
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fbops = &drm_fbdev_shmem_fb_ops;
@@ -195,18 +188,16 @@ int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
- goto err_drm_fb_helper_release_info;
+ goto err_drm_client_buffer_vunmap;
return 0;
-err_drm_fb_helper_release_info:
- drm_fb_helper_release_info(fb_helper);
err_drm_client_buffer_vunmap:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
drm_client_buffer_vunmap(buffer);
err_drm_client_buffer_delete:
- drm_client_framebuffer_delete(buffer);
+ drm_client_buffer_delete(buffer);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_shmem_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c
index 85feb55bba11..160bc35d8738 100644
--- a/drivers/gpu/drm/drm_fbdev_ttm.c
+++ b/drivers/gpu/drm/drm_fbdev_ttm.c
@@ -50,11 +50,9 @@ static void drm_fbdev_ttm_fb_destroy(struct fb_info *info)
fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper);
vfree(shadow);
- drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_buffer_delete(fb_helper->buffer);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops drm_fbdev_ttm_fb_ops = {
@@ -176,8 +174,8 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
+ struct fb_info *info = fb_helper->info;
struct drm_client_buffer *buffer;
- struct fb_info *info;
size_t screen_size;
void *screen_buffer;
u32 format;
@@ -189,7 +187,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
format = drm_driver_legacy_fb_format(dev, sizes->surface_bpp,
sizes->surface_depth);
- buffer = drm_client_framebuffer_create(client, sizes->surface_width,
+ buffer = drm_client_buffer_create_dumb(client, sizes->surface_width,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
@@ -202,13 +200,7 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
screen_buffer = vzalloc(screen_size);
if (!screen_buffer) {
ret = -ENOMEM;
- goto err_drm_client_framebuffer_delete;
- }
-
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_vfree;
+ goto err_drm_client_buffer_delete;
}
drm_fb_helper_fill_info(info, fb_helper, sizes);
@@ -227,18 +219,16 @@ int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
info->fbdefio = &fb_helper->fbdefio;
ret = fb_deferred_io_init(info);
if (ret)
- goto err_drm_fb_helper_release_info;
+ goto err_vfree;
return 0;
-err_drm_fb_helper_release_info:
- drm_fb_helper_release_info(fb_helper);
err_vfree:
vfree(screen_buffer);
-err_drm_client_framebuffer_delete:
+err_drm_client_buffer_delete:
fb_helper->fb = NULL;
fb_helper->buffer = NULL;
- drm_client_framebuffer_delete(buffer);
+ drm_client_buffer_delete(buffer);
return ret;
}
EXPORT_SYMBOL(drm_fbdev_ttm_driver_fbdev_probe);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index eebd1a05ee97..be5e617ceb9f 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -405,7 +405,7 @@ EXPORT_SYMBOL(drm_open);
static void drm_lastclose(struct drm_device *dev)
{
- drm_client_dev_restore(dev);
+ drm_client_dev_restore(dev, false);
if (dev_is_pci(dev->dev))
vga_switcheroo_process_delayed_switch();
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 8f3daf38ca63..6cddf05c493b 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1165,93 +1165,24 @@ void drm_fb_argb8888_to_argb4444(struct iosys_map *dst, const unsigned int *dst_
}
EXPORT_SYMBOL(drm_fb_argb8888_to_argb4444);
-/**
- * drm_fb_blit - Copy parts of a framebuffer to display memory
- * @dst: Array of display-memory addresses to copy to
- * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
- * within @dst; can be NULL if scanlines are stored next to each other.
- * @dst_format: FOURCC code of the display's color format
- * @src: The framebuffer memory to copy from
- * @fb: The framebuffer to copy from
- * @clip: Clip rectangle area to copy
- * @state: Transform and conversion state
- *
- * This function copies parts of a framebuffer to display memory. If the
- * formats of the display and the framebuffer mismatch, the blit function
- * will attempt to convert between them during the process. The parameters @dst,
- * @dst_pitch and @src refer to arrays. Each array must have at least as many
- * entries as there are planes in @dst_format's format. Each entry stores the
- * value for the format's respective color plane at the same index.
- *
- * This function does not apply clipping on @dst (i.e. the destination is at the
- * top-left corner).
- *
- * Returns:
- * 0 on success, or
- * -EINVAL if the color-format conversion failed, or
- * a negative error code otherwise.
- */
-int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t dst_format,
- const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state)
+static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- uint32_t fb_format = fb->format->format;
-
- if (fb_format == dst_format) {
- drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
- return 0;
- } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- } else if (fb_format == DRM_FORMAT_XRGB8888) {
- if (dst_format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_XRGB1555) {
- drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB1555) {
- drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_RGBA5551) {
- drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_BGR888) {
- drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB8888) {
- drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_XBGR8888) {
- drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ABGR8888) {
- drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_XRGB2101010) {
- drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_ARGB2101010) {
- drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip, state);
- return 0;
- } else if (dst_format == DRM_FORMAT_BGRX8888) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false, state);
- return 0;
- }
- }
+ u8 *dbuf8 = dbuf;
+ const u8 *sbuf8 = sbuf;
+ u8 px;
- drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n",
- &fb_format, &dst_format);
+ while (pixels) {
+ unsigned int i, bits = min(pixels, 4U);
+ u8 byte = 0;
- return -EINVAL;
+ for (i = 0; i < bits; i++, pixels--) {
+ byte >>= 2;
+ px = (*sbuf8++ * 3 + 127) / 255;
+ byte |= (px &= 0x03) << 6;
+ }
+ *dbuf8++ = byte;
+ }
}
-EXPORT_SYMBOL(drm_fb_blit);
static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -1359,3 +1290,92 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
+
+/**
+ * drm_fb_xrgb8888_to_gray2 - Convert XRGB8888 to gray2
+ * @dst: Array of gray2 destination buffer
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. Destination and framebuffer formats must match. The
+ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
+ * least as many entries as there are planes in @fb's format. Each entry stores the
+ * value for the format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner). The first pixel (upper left corner of the clip rectangle) will
+ * be converted and copied to the two first bits (LSB) in the first byte of the gray2
+ * destination buffer. If the caller requires that the first pixel in a byte must
+ * be located at an x-coordinate that is a multiple of 8, then the caller must take
+ * care itself of supplying a suitable clip rectangle.
+ *
+ * DRM doesn't have native gray2 support. Drivers can use this function for
+ * gray2 devices that don't support XRGB8888 natively. Such drivers can
+ * announce the commonly supported XR24 format to userspace and use this function
+ * to convert to the native format.
+ *
+ */
+void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = {
+ 0, 0, 0, 0
+ };
+ unsigned int linepixels = drm_rect_width(clip);
+ unsigned int lines = drm_rect_height(clip);
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int len_src32 = linepixels * cpp;
+ struct drm_device *dev = fb->dev;
+ void *vaddr = src[0].vaddr;
+ unsigned int dst_pitch_0;
+ unsigned int y;
+ u8 *gray2 = dst[0].vaddr, *gray8;
+ u32 *src32;
+
+ if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+
+ if (!dst_pitch)
+ dst_pitch = default_dst_pitch;
+ dst_pitch_0 = dst_pitch[0];
+
+ /*
+ * The gray2 destination buffer contains 2 bit per pixel
+ */
+ if (!dst_pitch_0)
+ dst_pitch_0 = DIV_ROUND_UP(linepixels, 4);
+
+ /*
+ * The dma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ *
+ * Also, format conversion from XR24 to gray2 are done
+ * line-by-line but are converted to 8-bit grayscale as an
+ * intermediate step.
+ *
+ * Allocate a buffer to be used for both copying from the cma
+ * memory and to store the intermediate grayscale line pixels.
+ */
+ src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL);
+ if (!src32)
+ return;
+
+ gray8 = (u8 *)src32 + len_src32;
+
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
+ for (y = 0; y < lines; y++) {
+ src32 = memcpy(src32, vaddr, len_src32);
+ drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels);
+ drm_fb_gray8_to_gray2_line(gray2, gray8, linepixels);
+ vaddr += fb->pitches[0];
+ gray2 += dst_pitch_0;
+ }
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray2);
+
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index adbb73f00d68..18e753ade001 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -1048,7 +1048,7 @@ retry:
plane_state->crtc->base.id,
plane_state->crtc->name, fb->base.id);
- crtc_state = drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
ret = drm_atomic_add_affected_connectors(state, plane_state->crtc);
if (ret)
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 6a44351e58b7..efc79bbf3c73 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -101,10 +101,8 @@ drm_gem_init(struct drm_device *dev)
vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
GFP_KERNEL);
- if (!vma_offset_manager) {
- DRM_ERROR("out of memory\n");
+ if (!vma_offset_manager)
return -ENOMEM;
- }
dev->vma_offset_manager = vma_offset_manager;
drm_vma_offset_manager_init(vma_offset_manager,
@@ -187,6 +185,7 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
+ mutex_init(&obj->gpuva.lock);
dma_resv_init(&obj->_resv);
if (!obj->resv)
obj->resv = &obj->_resv;
@@ -210,6 +209,7 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
WARN_ON(obj->dma_buf);
dma_resv_fini(&obj->_resv);
+ mutex_destroy(&obj->gpuva.lock);
}
EXPORT_SYMBOL(drm_gem_private_object_fini);
@@ -332,7 +332,12 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
+ mutex_lock(&file_priv->prime.lock);
+
drm_prime_remove_buf_handle(&file_priv->prime, id);
+
+ mutex_unlock(&file_priv->prime.lock);
+
drm_vma_node_revoke(&obj->vma_node, file_priv);
drm_gem_object_handle_put_unlocked(obj);
@@ -621,7 +626,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
struct page **pages;
struct folio *folio;
struct folio_batch fbatch;
- long i, j, npages;
+ unsigned long i, j, npages;
if (WARN_ON(!obj->filp))
return ERR_PTR(-EINVAL);
@@ -645,7 +650,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
i = 0;
while (i < npages) {
- long nr;
+ unsigned long nr;
folio = shmem_read_folio_gfp(mapping, i,
mapping_gfp_mask(mapping));
if (IS_ERR(folio))
@@ -778,9 +783,9 @@ static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
int count, struct drm_gem_object ***objs_out)
{
- int ret;
- u32 *handles;
struct drm_gem_object **objs;
+ u32 *handles;
+ int ret;
if (!count)
return 0;
@@ -792,20 +797,11 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
*objs_out = objs;
- handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
- if (!handles) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy in GEM handles\n");
- goto out;
- }
+ handles = vmemdup_array_user(bo_handles, count, sizeof(u32));
+ if (IS_ERR(handles))
+ return PTR_ERR(handles);
ret = objects_lookup(filp, handles, count, objs);
-out:
kvfree(handles);
return ret;
@@ -848,12 +844,13 @@ EXPORT_SYMBOL(drm_gem_object_lookup);
long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout)
{
- long ret;
+ struct drm_device *dev = filep->minor->dev;
struct drm_gem_object *obj;
+ long ret;
obj = drm_gem_object_lookup(filep, handle);
if (!obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+ drm_dbg_core(dev, "Failed to look up GEM BO %d\n", handle);
return -EINVAL;
}
@@ -870,14 +867,6 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
}
EXPORT_SYMBOL(drm_gem_dma_resv_wait);
-/**
- * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Releases the handle to an mm object.
- */
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -893,17 +882,6 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,
return ret;
}
-/**
- * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Create a global name for an object, returning the name.
- *
- * Note that the name does not hold a reference; when the object
- * is freed, the name goes away.
- */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -943,17 +921,6 @@ err:
return ret;
}
-/**
- * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
- * @dev: drm_device
- * @data: ioctl data
- * @file_priv: drm file-private structure
- *
- * Open an object using the global name, returning a handle and the size.
- *
- * This handle (of course) holds a reference to the object, so the object
- * will not go away until the handle is deleted.
- */
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -988,6 +955,57 @@ err:
return ret;
}
+int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_gem_change_handle *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_GEM))
+ return -EOPNOTSUPP;
+
+ obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->handle == args->new_handle)
+ return 0;
+
+ mutex_lock(&file_priv->prime.lock);
+
+ spin_lock(&file_priv->table_lock);
+ ret = idr_alloc(&file_priv->object_idr, obj,
+ args->new_handle, args->new_handle + 1, GFP_NOWAIT);
+ spin_unlock(&file_priv->table_lock);
+
+ if (ret < 0)
+ goto out_unlock;
+
+ if (obj->dma_buf) {
+ ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle);
+ if (ret < 0) {
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, args->new_handle);
+ spin_unlock(&file_priv->table_lock);
+ goto out_unlock;
+ }
+
+ drm_prime_remove_buf_handle(&file_priv->prime, args->handle);
+ }
+
+ ret = 0;
+
+ spin_lock(&file_priv->table_lock);
+ idr_remove(&file_priv->object_idr, args->handle);
+ spin_unlock(&file_priv->table_lock);
+
+out_unlock:
+ mutex_unlock(&file_priv->prime.lock);
+
+ return ret;
+}
+
/**
* drm_gem_open - initializes GEM file-private structures at devnode open time
* @dev: drm_device which is being opened by userspace
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index ebf305fb24f0..569d41a65a0b 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
- __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
- drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
+ if (shadow_plane_state) {
+ __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+ drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
+ } else {
+ __drm_atomic_helper_plane_reset(plane, NULL);
+ }
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);
@@ -334,8 +338,6 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane)
}
shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL);
- if (!shadow_plane_state)
- return;
__drm_gem_reset_shadow_plane(plane, shadow_plane_state);
}
EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 4f0320df858f..12d8307997a0 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -20,7 +20,9 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
/**
@@ -304,9 +306,11 @@ int drm_gem_dma_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct drm_gem_dma_object *dma_obj;
+ int ret;
- args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- args->size = args->pitch * args->height;
+ ret = drm_mode_size_dumb(drm, args, SZ_8, 0);
+ if (ret)
+ return ret;
dma_obj = drm_gem_dma_create_with_handle(file_priv, drm, args->size,
&args->handle);
@@ -582,7 +586,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
if (ret) {
- DRM_ERROR("Failed to vmap PRIME buffer\n");
+ drm_err(dev, "Failed to vmap PRIME buffer\n");
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 4bc89d33df59..9fd4eb02a20f 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -16,6 +16,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include "drm_internal.h"
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 5d1349c34afd..dc94a27710e5 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -18,6 +18,7 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_print.h>
@@ -48,28 +49,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.vm_ops = &drm_gem_shmem_vm_ops,
};
-static struct drm_gem_shmem_object *
-__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
- struct vfsmount *gemfs)
+static int __drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem,
+ size_t size, bool private, struct vfsmount *gemfs)
{
- struct drm_gem_shmem_object *shmem;
- struct drm_gem_object *obj;
+ struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- size = PAGE_ALIGN(size);
-
- if (dev->driver->gem_create_object) {
- obj = dev->driver->gem_create_object(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
- shmem = to_drm_gem_shmem_obj(obj);
- } else {
- shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!shmem)
- return ERR_PTR(-ENOMEM);
- obj = &shmem->base;
- }
-
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
@@ -81,7 +66,7 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
}
if (ret) {
drm_gem_private_object_fini(obj);
- goto err_free;
+ return ret;
}
ret = drm_gem_create_mmap_offset(obj);
@@ -102,14 +87,55 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
}
- return shmem;
-
+ return 0;
err_release:
drm_gem_object_release(obj);
-err_free:
- kfree(obj);
+ return ret;
+}
- return ERR_PTR(ret);
+/**
+ * drm_gem_shmem_init - Initialize an allocated object.
+ * @dev: DRM device
+ * @obj: The allocated shmem GEM object.
+ *
+ * Returns:
+ * 0 on success, or a negative error code on failure.
+ */
+int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size)
+{
+ return __drm_gem_shmem_init(dev, shmem, size, false, NULL);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_init);
+
+static struct drm_gem_shmem_object *
+__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private,
+ struct vfsmount *gemfs)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ size = PAGE_ALIGN(size);
+
+ if (dev->driver->gem_create_object) {
+ obj = dev->driver->gem_create_object(dev, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+ shmem = to_drm_gem_shmem_obj(obj);
+ } else {
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return ERR_PTR(-ENOMEM);
+ obj = &shmem->base;
+ }
+
+ ret = __drm_gem_shmem_init(dev, shmem, size, private, gemfs);
+ if (ret) {
+ kfree(obj);
+ return ERR_PTR(ret);
+ }
+
+ return shmem;
}
/**
* drm_gem_shmem_create - Allocate an object with the given size
@@ -150,13 +176,13 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
/**
- * drm_gem_shmem_free - Free resources associated with a shmem GEM object
- * @shmem: shmem GEM object to free
+ * drm_gem_shmem_release - Release resources associated with a shmem GEM object.
+ * @shmem: shmem GEM object
*
- * This function cleans up the GEM object state and frees the memory used to
- * store the object itself.
+ * This function cleans up the GEM object state, but does not free the memory used to store the
+ * object itself. This function is meant to be a dedicated helper for the Rust GEM bindings.
*/
-void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
@@ -183,6 +209,19 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
}
drm_gem_object_release(obj);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_release);
+
+/**
+ * drm_gem_shmem_free - Free resources associated with a shmem GEM object
+ * @shmem: shmem GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
+{
+ drm_gem_shmem_release(shmem);
kfree(shmem);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_free);
@@ -518,18 +557,11 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked);
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ int ret;
- if (!args->pitch || !args->size) {
- args->pitch = min_pitch;
- args->size = PAGE_ALIGN(args->pitch * args->height);
- } else {
- /* ensure sane minimum values */
- if (args->pitch < min_pitch)
- args->pitch = min_pitch;
- if (args->size < args->pitch * args->height)
- args->size = PAGE_ALIGN(args->pitch * args->height);
- }
+ ret = drm_mode_size_dumb(dev, args, SZ_8, 0);
+ if (ret)
+ return ret;
return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 257cca4cb97a..08ff0fadd0b2 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -4,6 +4,7 @@
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b04cde4a60e7..5e5b70518dbe 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -17,6 +17,7 @@
#include <drm/drm_mode.h>
#include <drm/drm_plane.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_tt.h>
@@ -107,7 +108,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo)
{
- /* We got here via ttm_bo_put(), which means that the
+ /* We got here via ttm_bo_fini(), which means that the
* TTM buffer object in 'bo' has already been cleaned
* up; only release the GEM object.
*/
@@ -234,11 +235,11 @@ EXPORT_SYMBOL(drm_gem_vram_create);
* drm_gem_vram_put() - Releases a reference to a VRAM-backed GEM object
* @gbo: the GEM VRAM object
*
- * See ttm_bo_put() for more information.
+ * See ttm_bo_fini() for more information.
*/
void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
{
- ttm_bo_put(&gbo->bo);
+ ttm_bo_fini(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_put);
@@ -859,7 +860,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
- false, true);
+ TTM_ALLOCATION_POOL_USE_DMA32);
if (ret)
return ret;
@@ -967,7 +968,7 @@ drm_vram_helper_mode_valid_internal(struct drm_device *dev,
max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbsize = (u32)mode->hdisplay * mode->vdisplay * max_bpp;
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
if (fbpages > max_fbpages)
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 5bb4c77db2c3..73e550c8ff8c 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -271,107 +271,50 @@ npages_in_range(unsigned long start, unsigned long end)
}
/**
- * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
- * @notifier: Pointer to the GPU SVM notifier structure.
- * @start: Start address of the range
- * @end: End address of the range
+ * drm_gpusvm_notifier_find() - Find GPU SVM notifier from GPU SVM
+ * @gpusvm: Pointer to the GPU SVM structure.
+ * @start: Start address of the notifier
+ * @end: End address of the notifier
*
- * Return: A pointer to the drm_gpusvm_range if found or NULL
+ * Return: A pointer to the drm_gpusvm_notifier if found or NULL
*/
-struct drm_gpusvm_range *
-drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
- unsigned long end)
+struct drm_gpusvm_notifier *
+drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, unsigned long start,
+ unsigned long end)
{
struct interval_tree_node *itree;
- itree = interval_tree_iter_first(&notifier->root, start, end - 1);
+ itree = interval_tree_iter_first(&gpusvm->root, start, end - 1);
if (itree)
- return container_of(itree, struct drm_gpusvm_range, itree);
+ return container_of(itree, struct drm_gpusvm_notifier, itree);
else
return NULL;
}
-EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
+EXPORT_SYMBOL_GPL(drm_gpusvm_notifier_find);
/**
- * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier
- * @range__: Iterator variable for the ranges
- * @next__: Iterator variable for the ranges temporay storage
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the range
- * @end__: End address of the range
- *
- * This macro is used to iterate over GPU SVM ranges in a notifier while
- * removing ranges from it.
- */
-#define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \
- for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \
- (next__) = __drm_gpusvm_range_next(range__); \
- (range__) && (drm_gpusvm_range_start(range__) < (end__)); \
- (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__))
-
-/**
- * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list
- * @notifier: a pointer to the current drm_gpusvm_notifier
+ * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier
+ * @notifier: Pointer to the GPU SVM notifier structure.
+ * @start: Start address of the range
+ * @end: End address of the range
*
- * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if
- * the current notifier is the last one or if the input notifier is
- * NULL.
+ * Return: A pointer to the drm_gpusvm_range if found or NULL
*/
-static struct drm_gpusvm_notifier *
-__drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier)
-{
- if (notifier && !list_is_last(&notifier->entry,
- &notifier->gpusvm->notifier_list))
- return list_next_entry(notifier, entry);
-
- return NULL;
-}
-
-static struct drm_gpusvm_notifier *
-notifier_iter_first(struct rb_root_cached *root, unsigned long start,
- unsigned long last)
+struct drm_gpusvm_range *
+drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
+ unsigned long end)
{
struct interval_tree_node *itree;
- itree = interval_tree_iter_first(root, start, last);
+ itree = interval_tree_iter_first(&notifier->root, start, end - 1);
if (itree)
- return container_of(itree, struct drm_gpusvm_notifier, itree);
+ return container_of(itree, struct drm_gpusvm_range, itree);
else
return NULL;
}
-
-/**
- * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm
- * @notifier__: Iterator variable for the notifiers
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the notifier
- * @end__: End address of the notifier
- *
- * This macro is used to iterate over GPU SVM notifiers in a gpusvm.
- */
-#define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \
- for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \
- (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
- (notifier__) = __drm_gpusvm_notifier_next(notifier__))
-
-/**
- * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm
- * @notifier__: Iterator variable for the notifiers
- * @next__: Iterator variable for the notifiers temporay storage
- * @notifier__: Pointer to the GPU SVM notifier
- * @start__: Start address of the notifier
- * @end__: End address of the notifier
- *
- * This macro is used to iterate over GPU SVM notifiers in a gpusvm while
- * removing notifiers from it.
- */
-#define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \
- for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \
- (next__) = __drm_gpusvm_notifier_next(notifier__); \
- (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \
- (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__))
+EXPORT_SYMBOL_GPL(drm_gpusvm_range_find);
/**
* drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier.
@@ -418,7 +361,6 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
* @name: Name of the GPU SVM.
* @drm: Pointer to the DRM device structure.
* @mm: Pointer to the mm_struct for the address space.
- * @device_private_page_owner: Device private pages owner.
* @mm_start: Start address of GPU SVM.
* @mm_range: Range of the GPU SVM.
* @notifier_size: Size of individual notifiers.
@@ -430,23 +372,35 @@ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = {
*
* This function initializes the GPU SVM.
*
+ * Note: If only using the simple drm_gpusvm_pages API (get/unmap/free),
+ * then only @gpusvm, @name, and @drm are expected. However, the same base
+ * @gpusvm can also be used with both modes together in which case the full
+ * setup is needed, where the core drm_gpusvm_pages API will simply never use
+ * the other fields.
+ *
* Return: 0 on success, a negative error code on failure.
*/
int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
const char *name, struct drm_device *drm,
- struct mm_struct *mm, void *device_private_page_owner,
+ struct mm_struct *mm,
unsigned long mm_start, unsigned long mm_range,
unsigned long notifier_size,
const struct drm_gpusvm_ops *ops,
const unsigned long *chunk_sizes, int num_chunks)
{
- if (!ops->invalidate || !num_chunks)
- return -EINVAL;
+ if (mm) {
+ if (!ops->invalidate || !num_chunks)
+ return -EINVAL;
+ mmgrab(mm);
+ } else {
+ /* No full SVM mode, only core drm_gpusvm_pages API. */
+ if (ops || num_chunks || mm_range || notifier_size)
+ return -EINVAL;
+ }
gpusvm->name = name;
gpusvm->drm = drm;
gpusvm->mm = mm;
- gpusvm->device_private_page_owner = device_private_page_owner;
gpusvm->mm_start = mm_start;
gpusvm->mm_range = mm_range;
gpusvm->notifier_size = notifier_size;
@@ -454,7 +408,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
gpusvm->chunk_sizes = chunk_sizes;
gpusvm->num_chunks = num_chunks;
- mmgrab(mm);
gpusvm->root = RB_ROOT_CACHED;
INIT_LIST_HEAD(&gpusvm->notifier_list);
@@ -473,22 +426,6 @@ int drm_gpusvm_init(struct drm_gpusvm *gpusvm,
EXPORT_SYMBOL_GPL(drm_gpusvm_init);
/**
- * drm_gpusvm_notifier_find() - Find GPU SVM notifier
- * @gpusvm: Pointer to the GPU SVM structure
- * @fault_addr: Fault address
- *
- * This function finds the GPU SVM notifier associated with the fault address.
- *
- * Return: Pointer to the GPU SVM notifier on success, NULL otherwise.
- */
-static struct drm_gpusvm_notifier *
-drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm,
- unsigned long fault_addr)
-{
- return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1);
-}
-
-/**
* to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node
* @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct
*
@@ -562,7 +499,8 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm)
drm_gpusvm_range_remove(gpusvm, range);
}
- mmdrop(gpusvm->mm);
+ if (gpusvm->mm)
+ mmdrop(gpusvm->mm);
WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root));
}
EXPORT_SYMBOL_GPL(drm_gpusvm_fini);
@@ -702,18 +640,48 @@ drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
INIT_LIST_HEAD(&range->entry);
- range->notifier_seq = LONG_MAX;
- range->flags.migrate_devmem = migrate_devmem ? 1 : 0;
+ range->pages.notifier_seq = LONG_MAX;
+ range->pages.flags.migrate_devmem = migrate_devmem ? 1 : 0;
return range;
}
/**
+ * drm_gpusvm_hmm_pfn_to_order() - Get the largest CPU mapping order.
+ * @hmm_pfn: The current hmm_pfn.
+ * @hmm_pfn_index: Index of the @hmm_pfn within the pfn array.
+ * @npages: Number of pages within the pfn array i.e the hmm range size.
+ *
+ * To allow skipping PFNs with the same flags (like when they belong to
+ * the same huge PTE) when looping over the pfn array, take a given a hmm_pfn,
+ * and return the largest order that will fit inside the CPU PTE, but also
+ * crucially accounting for the original hmm range boundaries.
+ *
+ * Return: The largest order that will safely fit within the size of the hmm_pfn
+ * CPU PTE.
+ */
+static unsigned int drm_gpusvm_hmm_pfn_to_order(unsigned long hmm_pfn,
+ unsigned long hmm_pfn_index,
+ unsigned long npages)
+{
+ unsigned long size;
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= (hmm_pfn & ~HMM_PFN_FLAGS) & (size - 1);
+ hmm_pfn_index += size;
+ if (hmm_pfn_index > npages)
+ size -= (hmm_pfn_index - npages);
+
+ return ilog2(size);
+}
+
+/**
* drm_gpusvm_check_pages() - Check pages
* @gpusvm: Pointer to the GPU SVM structure
* @notifier: Pointer to the GPU SVM notifier structure
* @start: Start address
* @end: End address
+ * @dev_private_owner: The device private page owner
*
* Check if pages between start and end have been faulted in on the CPU. Use to
* prevent migration of pages without CPU backing store.
@@ -722,14 +690,15 @@ drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
*/
static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ void *dev_private_owner)
{
struct hmm_range hmm_range = {
.default_flags = 0,
.notifier = &notifier->notifier,
.start = start,
.end = end,
- .dev_private_owner = gpusvm->device_private_page_owner,
+ .dev_private_owner = dev_private_owner,
};
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -766,7 +735,7 @@ static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm,
err = -EFAULT;
goto err_free;
}
- i += 0x1 << hmm_pfn_to_map_order(pfns[i]);
+ i += 0x1 << drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
}
err_free:
@@ -783,6 +752,7 @@ err_free:
* @gpuva_start: Start address of GPUVA which mirrors CPU
* @gpuva_end: End address of GPUVA which mirrors CPU
* @check_pages_threshold: Check CPU pages for present threshold
+ * @dev_private_owner: The device private page owner
*
* This function determines the chunk size for the GPU SVM range based on the
* fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual
@@ -797,7 +767,8 @@ drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm,
unsigned long fault_addr,
unsigned long gpuva_start,
unsigned long gpuva_end,
- unsigned long check_pages_threshold)
+ unsigned long check_pages_threshold,
+ void *dev_private_owner)
{
unsigned long start, end;
int i = 0;
@@ -844,7 +815,7 @@ retry:
* process-many-malloc' mallocs at least 64k at a time.
*/
if (end - start <= check_pages_threshold &&
- !drm_gpusvm_check_pages(gpusvm, notifier, start, end)) {
+ !drm_gpusvm_check_pages(gpusvm, notifier, start, end, dev_private_owner)) {
++i;
goto retry;
}
@@ -943,7 +914,7 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
if (!mmget_not_zero(mm))
return ERR_PTR(-EFAULT);
- notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr);
+ notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr, fault_addr + 1);
if (!notifier) {
notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr);
if (IS_ERR(notifier)) {
@@ -987,7 +958,8 @@ drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas,
fault_addr, gpuva_start,
gpuva_end,
- ctx->check_pages_threshold);
+ ctx->check_pages_threshold,
+ ctx->device_private_page_owner);
if (chunk_size == LONG_MAX) {
err = -EINVAL;
goto err_notifier_remove;
@@ -1024,31 +996,31 @@ err_mmunlock:
EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert);
/**
- * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal)
+ * __drm_gpusvm_unmap_pages() - Unmap pages associated with GPU SVM pages (internal)
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
* @npages: Number of pages to unmap
*
- * This function unmap pages associated with a GPU SVM range. Assumes and
+ * This function unmap pages associated with a GPU SVM pages struct. Assumes and
* asserts correct locking is in place when called.
*/
-static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- unsigned long npages)
+static void __drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
{
- unsigned long i, j;
- struct drm_pagemap *dpagemap = range->dpagemap;
+ struct drm_pagemap *dpagemap = svm_pages->dpagemap;
struct device *dev = gpusvm->drm->dev;
+ unsigned long i, j;
lockdep_assert_held(&gpusvm->notifier_lock);
- if (range->flags.has_dma_mapping) {
- struct drm_gpusvm_range_flags flags = {
- .__flags = range->flags.__flags,
+ if (svm_pages->flags.has_dma_mapping) {
+ struct drm_gpusvm_pages_flags flags = {
+ .__flags = svm_pages->flags.__flags,
};
for (i = 0, j = 0; i < npages; j++) {
- struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
+ struct drm_pagemap_addr *addr = &svm_pages->dma_addr[j];
if (addr->proto == DRM_INTERCONNECT_SYSTEM)
dma_unmap_page(dev,
@@ -1064,31 +1036,52 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
flags.has_devmem_pages = false;
flags.has_dma_mapping = false;
- WRITE_ONCE(range->flags.__flags, flags.__flags);
+ WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
- range->dpagemap = NULL;
+ svm_pages->dpagemap = NULL;
}
}
/**
- * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range
+ * __drm_gpusvm_free_pages() - Free dma array associated with GPU SVM pages
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
*
* This function frees the dma address array associated with a GPU SVM range.
*/
-static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range)
+static void __drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
{
lockdep_assert_held(&gpusvm->notifier_lock);
- if (range->dma_addr) {
- kvfree(range->dma_addr);
- range->dma_addr = NULL;
+ if (svm_pages->dma_addr) {
+ kvfree(svm_pages->dma_addr);
+ svm_pages->dma_addr = NULL;
}
}
/**
+ * drm_gpusvm_free_pages() - Free dma-mapping associated with GPU SVM pages
+ * struct
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of mapped pages
+ *
+ * This function unmaps and frees the dma address array associated with a GPU
+ * SVM pages struct.
+ */
+void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages)
+{
+ drm_gpusvm_notifier_lock(gpusvm);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_free_pages);
+
+/**
* drm_gpusvm_range_remove() - Remove GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range to be removed
@@ -1107,13 +1100,14 @@ void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm,
drm_gpusvm_driver_lock_held(gpusvm);
notifier = drm_gpusvm_notifier_find(gpusvm,
- drm_gpusvm_range_start(range));
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_start(range) + 1);
if (WARN_ON_ONCE(!notifier))
return;
drm_gpusvm_notifier_lock(gpusvm);
- __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
- drm_gpusvm_range_free_pages(gpusvm, range);
+ __drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages);
+ __drm_gpusvm_free_pages(gpusvm, &range->pages);
__drm_gpusvm_range_remove(notifier, range);
drm_gpusvm_notifier_unlock(gpusvm);
@@ -1179,6 +1173,28 @@ void drm_gpusvm_range_put(struct drm_gpusvm_range *range)
EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
/**
+ * drm_gpusvm_pages_valid() - GPU SVM range pages valid
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ *
+ * This function determines if a GPU SVM range pages are valid. Expected be
+ * called holding gpusvm->notifier_lock and as the last step before committing a
+ * GPU binding. This is akin to a notifier seqno check in the HMM documentation
+ * but due to wider notifiers (i.e., notifiers which span multiple ranges) this
+ * function is required for finer grained checking (i.e., per range) if pages
+ * are valid.
+ *
+ * Return: True if GPU SVM range has valid pages, False otherwise
+ */
+static bool drm_gpusvm_pages_valid(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
+{
+ lockdep_assert_held(&gpusvm->notifier_lock);
+
+ return svm_pages->flags.has_devmem_pages || svm_pages->flags.has_dma_mapping;
+}
+
+/**
* drm_gpusvm_range_pages_valid() - GPU SVM range pages valid
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
@@ -1195,9 +1211,7 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range)
{
- lockdep_assert_held(&gpusvm->notifier_lock);
-
- return range->flags.has_devmem_pages || range->flags.has_dma_mapping;
+ return drm_gpusvm_pages_valid(gpusvm, &range->pages);
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
@@ -1211,66 +1225,71 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
*
* Return: True if GPU SVM range has valid pages, False otherwise
*/
-static bool
-drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range)
+static bool drm_gpusvm_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages)
{
bool pages_valid;
- if (!range->dma_addr)
+ if (!svm_pages->dma_addr)
return false;
drm_gpusvm_notifier_lock(gpusvm);
- pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range);
+ pages_valid = drm_gpusvm_pages_valid(gpusvm, svm_pages);
if (!pages_valid)
- drm_gpusvm_range_free_pages(gpusvm, range);
+ __drm_gpusvm_free_pages(gpusvm, svm_pages);
drm_gpusvm_notifier_unlock(gpusvm);
return pages_valid;
}
/**
- * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * drm_gpusvm_get_pages() - Get pages and populate GPU SVM pages struct
* @gpusvm: Pointer to the GPU SVM structure
- * @range: Pointer to the GPU SVM range structure
+ * @svm_pages: The SVM pages to populate. This will contain the dma-addresses
+ * @mm: The mm corresponding to the CPU range
+ * @notifier: The corresponding notifier for the given CPU range
+ * @pages_start: Start CPU address for the pages
+ * @pages_end: End CPU address for the pages (exclusive)
* @ctx: GPU SVM context
*
- * This function gets pages for a GPU SVM range and ensures they are mapped for
- * DMA access.
+ * This function gets and maps pages for CPU range and ensures they are
+ * mapped for DMA access.
*
* Return: 0 on success, negative error code on failure.
*/
-int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+int drm_gpusvm_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ struct mm_struct *mm,
+ struct mmu_interval_notifier *notifier,
+ unsigned long pages_start, unsigned long pages_end,
+ const struct drm_gpusvm_ctx *ctx)
{
- struct mmu_interval_notifier *notifier = &range->notifier->notifier;
struct hmm_range hmm_range = {
.default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
HMM_PFN_REQ_WRITE),
.notifier = notifier,
- .start = drm_gpusvm_range_start(range),
- .end = drm_gpusvm_range_end(range),
- .dev_private_owner = gpusvm->device_private_page_owner,
+ .start = pages_start,
+ .end = pages_end,
+ .dev_private_owner = ctx->device_private_page_owner,
};
- struct mm_struct *mm = gpusvm->mm;
void *zdd;
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
unsigned long i, j;
- unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
- drm_gpusvm_range_end(range));
+ unsigned long npages = npages_in_range(pages_start, pages_end);
unsigned long num_dma_mapped;
unsigned int order = 0;
unsigned long *pfns;
int err = 0;
struct dev_pagemap *pagemap;
struct drm_pagemap *dpagemap;
- struct drm_gpusvm_range_flags flags;
+ struct drm_gpusvm_pages_flags flags;
+ enum dma_data_direction dma_dir = ctx->read_only ? DMA_TO_DEVICE :
+ DMA_BIDIRECTIONAL;
retry:
hmm_range.notifier_seq = mmu_interval_read_begin(notifier);
- if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range))
+ if (drm_gpusvm_pages_valid_unlocked(gpusvm, svm_pages))
goto set_seqno;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
@@ -1310,7 +1329,7 @@ map_pages:
*/
drm_gpusvm_notifier_lock(gpusvm);
- flags.__flags = range->flags.__flags;
+ flags.__flags = svm_pages->flags.__flags;
if (flags.unmapped) {
drm_gpusvm_notifier_unlock(gpusvm);
err = -EFAULT;
@@ -1323,13 +1342,12 @@ map_pages:
goto retry;
}
- if (!range->dma_addr) {
+ if (!svm_pages->dma_addr) {
/* Unlock and restart mapping to allocate memory. */
drm_gpusvm_notifier_unlock(gpusvm);
- range->dma_addr = kvmalloc_array(npages,
- sizeof(*range->dma_addr),
- GFP_KERNEL);
- if (!range->dma_addr) {
+ svm_pages->dma_addr =
+ kvmalloc_array(npages, sizeof(*svm_pages->dma_addr), GFP_KERNEL);
+ if (!svm_pages->dma_addr) {
err = -ENOMEM;
goto err_free;
}
@@ -1342,10 +1360,11 @@ map_pages:
for (i = 0, j = 0; i < npages; ++j) {
struct page *page = hmm_pfn_to_page(pfns[i]);
- order = hmm_pfn_to_map_order(pfns[i]);
+ order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages);
if (is_device_private_page(page) ||
is_device_coherent_page(page)) {
- if (zdd != page->zone_device_data && i > 0) {
+ if (!ctx->allow_mixed &&
+ zdd != page->zone_device_data && i > 0) {
err = -EOPNOTSUPP;
goto err_unmap;
}
@@ -1368,20 +1387,21 @@ map_pages:
goto err_unmap;
}
}
- range->dma_addr[j] =
+ svm_pages->dma_addr[j] =
dpagemap->ops->device_map(dpagemap,
gpusvm->drm->dev,
page, order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
if (dma_mapping_error(gpusvm->drm->dev,
- range->dma_addr[j].addr)) {
+ svm_pages->dma_addr[j].addr)) {
err = -EFAULT;
goto err_unmap;
}
} else {
dma_addr_t addr;
- if (is_zone_device_page(page) || pagemap) {
+ if (is_zone_device_page(page) ||
+ (pagemap && !ctx->allow_mixed)) {
err = -EOPNOTSUPP;
goto err_unmap;
}
@@ -1394,15 +1414,15 @@ map_pages:
addr = dma_map_page(gpusvm->drm->dev,
page, 0,
PAGE_SIZE << order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
if (dma_mapping_error(gpusvm->drm->dev, addr)) {
err = -EFAULT;
goto err_unmap;
}
- range->dma_addr[j] = drm_pagemap_device_addr_encode
+ svm_pages->dma_addr[j] = drm_pagemap_addr_encode
(addr, DRM_INTERCONNECT_SYSTEM, order,
- DMA_BIDIRECTIONAL);
+ dma_dir);
}
i += 1 << order;
num_dma_mapped = i;
@@ -1411,21 +1431,21 @@ map_pages:
if (pagemap) {
flags.has_devmem_pages = true;
- range->dpagemap = dpagemap;
+ svm_pages->dpagemap = dpagemap;
}
/* WRITE_ONCE pairs with READ_ONCE for opportunistic checks */
- WRITE_ONCE(range->flags.__flags, flags.__flags);
+ WRITE_ONCE(svm_pages->flags.__flags, flags.__flags);
drm_gpusvm_notifier_unlock(gpusvm);
kvfree(pfns);
set_seqno:
- range->notifier_seq = hmm_range.notifier_seq;
+ svm_pages->notifier_seq = hmm_range.notifier_seq;
return 0;
err_unmap:
- __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped);
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, num_dma_mapped);
drm_gpusvm_notifier_unlock(gpusvm);
err_free:
kvfree(pfns);
@@ -1433,11 +1453,62 @@ err_free:
goto retry;
return err;
}
+EXPORT_SYMBOL_GPL(drm_gpusvm_get_pages);
+
+/**
+ * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @range: Pointer to the GPU SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a GPU SVM range and ensures they are mapped for
+ * DMA access.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return drm_gpusvm_get_pages(gpusvm, &range->pages, gpusvm->mm,
+ &range->notifier->notifier,
+ drm_gpusvm_range_start(range),
+ drm_gpusvm_range_end(range), ctx);
+}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages);
/**
+ * drm_gpusvm_unmap_pages() - Unmap GPU svm pages
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @svm_pages: Pointer to the GPU SVM pages structure
+ * @npages: Number of pages in @svm_pages.
+ * @ctx: GPU SVM context
+ *
+ * This function unmaps pages associated with a GPU SVM pages struct. If
+ * @in_notifier is set, it is assumed that gpusvm->notifier_lock is held in
+ * write mode; if it is clear, it acquires gpusvm->notifier_lock in read mode.
+ * Must be called in the invalidate() callback of the corresponding notifier for
+ * IOMMU security model.
+ */
+void drm_gpusvm_unmap_pages(struct drm_gpusvm *gpusvm,
+ struct drm_gpusvm_pages *svm_pages,
+ unsigned long npages,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ if (ctx->in_notifier)
+ lockdep_assert_held_write(&gpusvm->notifier_lock);
+ else
+ drm_gpusvm_notifier_lock(gpusvm);
+
+ __drm_gpusvm_unmap_pages(gpusvm, svm_pages, npages);
+
+ if (!ctx->in_notifier)
+ drm_gpusvm_notifier_unlock(gpusvm);
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_unmap_pages);
+
+/**
* drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range
- * drm_gpusvm_range_evict() - Evict GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @range: Pointer to the GPU SVM range structure
* @ctx: GPU SVM context
@@ -1455,15 +1526,7 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
unsigned long npages = npages_in_range(drm_gpusvm_range_start(range),
drm_gpusvm_range_end(range));
- if (ctx->in_notifier)
- lockdep_assert_held_write(&gpusvm->notifier_lock);
- else
- drm_gpusvm_notifier_lock(gpusvm);
-
- __drm_gpusvm_range_unmap_pages(gpusvm, range, npages);
-
- if (!ctx->in_notifier)
- drm_gpusvm_notifier_unlock(gpusvm);
+ return drm_gpusvm_unmap_pages(gpusvm, &range->pages, npages, ctx);
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages);
@@ -1561,10 +1624,10 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
{
lockdep_assert_held_write(&range->gpusvm->notifier_lock);
- range->flags.unmapped = true;
+ range->pages.flags.unmapped = true;
if (drm_gpusvm_range_start(range) < mmu_range->start ||
drm_gpusvm_range_end(range) > mmu_range->end)
- range->flags.partial_unmap = true;
+ range->pages.flags.partial_unmap = true;
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index db9b089ef62c..8a06d296561d 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -26,6 +26,7 @@
*/
#include <drm/drm_gpuvm.h>
+#include <drm/drm_print.h>
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
@@ -421,6 +422,71 @@
*/
/**
+ * DOC: Madvise Logic - Splitting and Traversal
+ *
+ * This logic handles GPU VA range updates by generating remap and map operations
+ * without performing unmaps or merging existing mappings.
+ *
+ * 1) The requested range lies entirely within a single drm_gpuva. The logic splits
+ * the existing mapping at the start and end boundaries and inserts a new map.
+ *
+ * ::
+ * a start end b
+ * pre: |-----------------------|
+ * drm_gpuva1
+ *
+ * a start end b
+ * new: |-----|=========|-------|
+ * remap map remap
+ *
+ * one REMAP and one MAP : Same behaviour as SPLIT and MERGE
+ *
+ * 2) The requested range spans multiple drm_gpuva regions. The logic traverses
+ * across boundaries, remapping the start and end segments, and inserting two
+ * map operations to cover the full range.
+ *
+ * :: a start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c end d
+ * new: |-------|==========|--------------|========|---------|
+ * remap1 map1 drm_gpuva2 map2 remap2
+ *
+ * two REMAPS and two MAPS
+ *
+ * 3) Either start or end lies within a drm_gpuva. A single remap and map operation
+ * are generated to update the affected portion.
+ *
+ *
+ * :: a/start b c end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a/start b c end d
+ * new: |------------------|--------------|========|---------|
+ * drm_gpuva1 drm_gpuva2 map1 remap1
+ *
+ * :: a start b c/end d
+ * pre: |------------------|--------------|------------------|
+ * drm_gpuva1 drm_gpuva2 drm_gpuva3
+ *
+ * a start b c/end d
+ * new: |-------|==========|--------------|------------------|
+ * remap1 map1 drm_gpuva2 drm_gpuva3
+ *
+ * one REMAP and one MAP
+ *
+ * 4) Both start and end align with existing drm_gpuva boundaries. No operations
+ * are needed as the range is already covered.
+ *
+ * 5) No existing drm_gpuvas. No operations.
+ *
+ * Unlike drm_gpuvm_sm_map_ops_create, this logic avoids unmaps and merging,
+ * focusing solely on remap and map operations for efficient traversal and update.
+ */
+
+/**
* DOC: Locking
*
* In terms of managing &drm_gpuva entries DRM GPUVM does not take care of
@@ -432,8 +498,7 @@
* DRM GPUVM also does not take care of the locking of the backing
* &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by
* itself; drivers are responsible to enforce mutual exclusion using either the
- * GEMs dma_resv lock or alternatively a driver specific external lock. For the
- * latter see also drm_gem_gpuva_set_lock().
+ * GEMs dma_resv lock or the GEMs gpuva.lock mutex.
*
* However, DRM GPUVM contains lockdep checks to ensure callers of its API hold
* the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed
@@ -486,13 +551,18 @@
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
+ * struct drm_gpuvm_map_req map_req = {
+ * .map.va.addr = addr,
+ * .map.va.range = range,
+ * .map.gem.obj = obj,
+ * .map.gem.offset = offset,
+ * };
* struct drm_gpuva_ops *ops;
* struct drm_gpuva_op *op
* struct drm_gpuvm_bo *vm_bo;
*
* driver_lock_va_space();
- * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
- * obj, offset);
+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, &map_req);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
*
@@ -808,6 +878,31 @@ __drm_gpuvm_bo_list_add(struct drm_gpuvm *gpuvm, spinlock_t *lock,
}
/**
+ * drm_gpuvm_bo_is_zombie() - check whether this vm_bo is scheduled for cleanup
+ * @vm_bo: the &drm_gpuvm_bo
+ *
+ * When a vm_bo is scheduled for cleanup using the bo_defer list, it is not
+ * immediately removed from the evict and extobj lists. Therefore, anyone
+ * iterating these lists should skip entries that are being destroyed.
+ *
+ * Checking the refcount without incrementing it is okay as long as the lock
+ * protecting the evict/extobj list is held for as long as you are using the
+ * vm_bo, because even if the refcount hits zero while you are using it, freeing
+ * the vm_bo requires taking the list's lock.
+ *
+ * Zombie entries can be observed on the evict and extobj lists regardless of
+ * whether DRM_GPUVM_RESV_PROTECTED is used, but they remain on the lists for a
+ * longer time when the resv lock is used because we can't take the resv lock
+ * during run_job() in immediate mode, meaning that they need to remain on the
+ * lists until drm_gpuvm_bo_deferred_cleanup() is called.
+ */
+static bool
+drm_gpuvm_bo_is_zombie(struct drm_gpuvm_bo *vm_bo)
+{
+ return !kref_read(&vm_bo->kref);
+}
+
+/**
* drm_gpuvm_bo_list_add() - insert a vm_bo into the given list
* @__vm_bo: the &drm_gpuvm_bo
* @__list_name: the name of the list to insert into
@@ -1012,6 +1107,8 @@ drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
INIT_LIST_HEAD(&gpuvm->evict.list);
spin_lock_init(&gpuvm->evict.lock);
+ init_llist_head(&gpuvm->bo_defer);
+
kref_init(&gpuvm->kref);
gpuvm->name = name ? name : "unknown";
@@ -1053,6 +1150,8 @@ drm_gpuvm_fini(struct drm_gpuvm *gpuvm)
"Extobj list should be empty.\n");
drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
"Evict list should be empty.\n");
+ drm_WARN(gpuvm->drm, !llist_empty(&gpuvm->bo_defer),
+ "VM BO cleanup list should be empty.\n");
drm_gem_object_put(gpuvm->r_obj);
}
@@ -1148,6 +1247,9 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm,
drm_gpuvm_resv_assert_held(gpuvm);
list_for_each_entry(vm_bo, &gpuvm->extobj.list, list.entry.extobj) {
+ if (drm_gpuvm_bo_is_zombie(vm_bo))
+ continue;
+
ret = exec_prepare_obj(exec, vm_bo->obj, num_fences);
if (ret)
break;
@@ -1391,6 +1493,9 @@ drm_gpuvm_validate_locked(struct drm_gpuvm *gpuvm, struct drm_exec *exec)
list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
list.entry.evict) {
+ if (drm_gpuvm_bo_is_zombie(vm_bo))
+ continue;
+
ret = ops->vm_bo_validate(vm_bo, exec);
if (ret)
break;
@@ -1491,6 +1596,7 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
INIT_LIST_HEAD(&vm_bo->list.entry.extobj);
INIT_LIST_HEAD(&vm_bo->list.entry.evict);
+ init_llist_node(&vm_bo->list.entry.bo_defer);
return vm_bo;
}
@@ -1512,7 +1618,7 @@ drm_gpuvm_bo_destroy(struct kref *kref)
drm_gpuvm_bo_list_del(vm_bo, extobj, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_del(&vm_bo->list.entry.gem);
if (ops && ops->vm_bo_free)
@@ -1533,7 +1639,8 @@ drm_gpuvm_bo_destroy(struct kref *kref)
* If the reference count drops to zero, the &gpuvm_bo is destroyed, which
* includes removing it from the GEMs gpuva list. Hence, if a call to this
* function can potentially let the reference count drop to zero the caller must
- * hold the dma-resv or driver specific GEM gpuva lock.
+ * hold the lock that the GEM uses for its gpuva list (either the GEM's
+ * dma-resv or gpuva.lock mutex).
*
* This function may only be called from non-atomic context.
*
@@ -1551,13 +1658,133 @@ drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo)
}
EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put);
+/*
+ * drm_gpuvm_bo_into_zombie() - called when the vm_bo becomes a zombie due to
+ * deferred cleanup
+ *
+ * If deferred cleanup is used, then this must be called right after the vm_bo
+ * refcount drops to zero. Must be called with GEM mutex held. After releasing
+ * the GEM mutex, drm_gpuvm_bo_defer_zombie_cleanup() must be called.
+ */
+static void
+drm_gpuvm_bo_into_zombie(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ if (!drm_gpuvm_resv_protected(vm_bo->vm)) {
+ drm_gpuvm_bo_list_del(vm_bo, extobj, true);
+ drm_gpuvm_bo_list_del(vm_bo, evict, true);
+ }
+
+ list_del(&vm_bo->list.entry.gem);
+}
+
+/*
+ * drm_gpuvm_bo_defer_zombie_cleanup() - adds a new zombie vm_bo to the
+ * bo_defer list
+ *
+ * Called after drm_gpuvm_bo_into_zombie(). GEM mutex must not be held.
+ *
+ * It's important that the GEM stays alive for the duration in which we hold
+ * the mutex, but the instant we add the vm_bo to bo_defer, another thread
+ * might call drm_gpuvm_bo_deferred_cleanup() and put the GEM. Therefore, to
+ * avoid kfreeing a mutex we are holding, the GEM mutex must be released
+ * *before* calling this function.
+ */
+static void
+drm_gpuvm_bo_defer_zombie_cleanup(struct drm_gpuvm_bo *vm_bo)
+{
+ llist_add(&vm_bo->list.entry.bo_defer, &vm_bo->vm->bo_defer);
+}
+
+static void
+drm_gpuvm_bo_defer_free(struct kref *kref)
+{
+ struct drm_gpuvm_bo *vm_bo = container_of(kref, struct drm_gpuvm_bo,
+ kref);
+
+ drm_gpuvm_bo_into_zombie(kref);
+ mutex_unlock(&vm_bo->obj->gpuva.lock);
+ drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
+}
+
+/**
+ * drm_gpuvm_bo_put_deferred() - drop a struct drm_gpuvm_bo reference with
+ * deferred cleanup
+ * @vm_bo: the &drm_gpuvm_bo to release the reference of
+ *
+ * This releases a reference to @vm_bo.
+ *
+ * This might take and release the GEMs GPUVA lock. You should call
+ * drm_gpuvm_bo_deferred_cleanup() later to complete the cleanup process.
+ *
+ * Returns: true if vm_bo is being destroyed, false otherwise.
+ */
+bool
+drm_gpuvm_bo_put_deferred(struct drm_gpuvm_bo *vm_bo)
+{
+ if (!vm_bo)
+ return false;
+
+ drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
+
+ return !!kref_put_mutex(&vm_bo->kref,
+ drm_gpuvm_bo_defer_free,
+ &vm_bo->obj->gpuva.lock);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_put_deferred);
+
+/**
+ * drm_gpuvm_bo_deferred_cleanup() - clean up BOs in the deferred list
+ * deferred cleanup
+ * @gpuvm: the VM to clean up
+ *
+ * Cleans up &drm_gpuvm_bo instances in the deferred cleanup list.
+ */
+void
+drm_gpuvm_bo_deferred_cleanup(struct drm_gpuvm *gpuvm)
+{
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
+ struct drm_gpuvm_bo *vm_bo;
+ struct drm_gem_object *obj;
+ struct llist_node *bo_defer;
+
+ bo_defer = llist_del_all(&gpuvm->bo_defer);
+ if (!bo_defer)
+ return;
+
+ if (drm_gpuvm_resv_protected(gpuvm)) {
+ dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL);
+ llist_for_each_entry(vm_bo, bo_defer, list.entry.bo_defer) {
+ drm_gpuvm_bo_list_del(vm_bo, extobj, false);
+ drm_gpuvm_bo_list_del(vm_bo, evict, false);
+ }
+ dma_resv_unlock(drm_gpuvm_resv(gpuvm));
+ }
+
+ while (bo_defer) {
+ vm_bo = llist_entry(bo_defer, struct drm_gpuvm_bo, list.entry.bo_defer);
+ bo_defer = bo_defer->next;
+ obj = vm_bo->obj;
+ if (ops && ops->vm_bo_free)
+ ops->vm_bo_free(vm_bo);
+ else
+ kfree(vm_bo);
+
+ drm_gpuvm_put(gpuvm);
+ drm_gem_object_put(obj);
+ }
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_bo_deferred_cleanup);
+
static struct drm_gpuvm_bo *
__drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj)
{
struct drm_gpuvm_bo *vm_bo;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
drm_gem_for_each_gpuvm_bo(vm_bo, obj)
if (vm_bo->vm == gpuvm)
return vm_bo;
@@ -1616,7 +1843,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
if (!vm_bo)
return ERR_PTR(-ENOMEM);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list);
return vm_bo;
@@ -1652,7 +1879,7 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo)
return vm_bo;
}
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list);
return __vm_bo;
@@ -1824,8 +2051,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove);
* reference of the latter is taken.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using either the GEMs dma_resv lock or a driver specific
- * lock set through drm_gem_gpuva_set_lock().
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
@@ -1840,7 +2066,7 @@ drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo)
va->vm_bo = drm_gpuvm_bo_get(vm_bo);
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(gpuvm, obj);
list_add_tail(&va->gem.entry, &vm_bo->list.gpuva);
}
EXPORT_SYMBOL_GPL(drm_gpuva_link);
@@ -1860,8 +2086,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link);
* the latter is dropped.
*
* This function expects the caller to protect the GEM's GPUVA list against
- * concurrent access using either the GEMs dma_resv lock or a driver specific
- * lock set through drm_gem_gpuva_set_lock().
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*/
void
drm_gpuva_unlink(struct drm_gpuva *va)
@@ -1872,7 +2097,7 @@ drm_gpuva_unlink(struct drm_gpuva *va)
if (unlikely(!obj))
return;
- drm_gem_gpuva_assert_lock_held(obj);
+ drm_gem_gpuva_assert_lock_held(va->vm, obj);
list_del_init(&va->gem.entry);
va->vm_bo = NULL;
@@ -1881,6 +2106,40 @@ drm_gpuva_unlink(struct drm_gpuva *va)
EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
/**
+ * drm_gpuva_unlink_defer() - unlink a &drm_gpuva with deferred vm_bo cleanup
+ * @va: the &drm_gpuva to unlink
+ *
+ * Similar to drm_gpuva_unlink(), but uses drm_gpuvm_bo_put_deferred() and takes
+ * the lock for the caller.
+ */
+void
+drm_gpuva_unlink_defer(struct drm_gpuva *va)
+{
+ struct drm_gem_object *obj = va->gem.obj;
+ struct drm_gpuvm_bo *vm_bo = va->vm_bo;
+ bool should_defer_bo;
+
+ if (unlikely(!obj))
+ return;
+
+ drm_WARN_ON(vm_bo->vm->drm, !drm_gpuvm_immediate_mode(vm_bo->vm));
+
+ mutex_lock(&obj->gpuva.lock);
+ list_del_init(&va->gem.entry);
+
+ /*
+ * This is drm_gpuvm_bo_put_deferred() except we already hold the mutex.
+ */
+ should_defer_bo = kref_put(&vm_bo->kref, drm_gpuvm_bo_into_zombie);
+ mutex_unlock(&obj->gpuva.lock);
+ if (should_defer_bo)
+ drm_gpuvm_bo_defer_zombie_cleanup(vm_bo);
+
+ va->vm_bo = NULL;
+}
+EXPORT_SYMBOL_GPL(drm_gpuva_unlink_defer);
+
+/**
* drm_gpuva_find_first() - find the first &drm_gpuva in the given range
* @gpuvm: the &drm_gpuvm to search in
* @addr: the &drm_gpuvas address
@@ -2054,16 +2313,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
- u64 addr, u64 range,
- struct drm_gem_object *obj, u64 offset)
+ const struct drm_gpuvm_map_req *req)
{
struct drm_gpuva_op op = {};
+ if (!req)
+ return 0;
+
op.op = DRM_GPUVA_OP_MAP;
- op.map.va.addr = addr;
- op.map.va.range = range;
- op.map.gem.obj = obj;
- op.map.gem.offset = offset;
+ op.map.va.addr = req->map.va.addr;
+ op.map.va.range = req->map.va.range;
+ op.map.gem.obj = req->map.gem.obj;
+ op.map.gem.offset = req->map.gem.offset;
return fn->sm_step_map(&op, priv);
}
@@ -2088,10 +2349,13 @@ op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
- struct drm_gpuva *va, bool merge)
+ struct drm_gpuva *va, bool merge, bool madvise)
{
struct drm_gpuva_op op = {};
+ if (madvise)
+ return 0;
+
op.op = DRM_GPUVA_OP_UNMAP;
op.unmap.va = va;
op.unmap.keep = merge;
@@ -2102,10 +2366,15 @@ op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
static int
__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
const struct drm_gpuvm_ops *ops, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+ const struct drm_gpuvm_map_req *op_map = madvise ? NULL : req;
struct drm_gpuva *va, *next;
+ u64 req_offset = req->map.gem.offset;
+ u64 req_range = req->map.va.range;
+ u64 req_addr = req->map.va.addr;
u64 req_end = req_addr + req_range;
int ret;
@@ -2120,19 +2389,22 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
u64 end = addr + range;
bool merge = !!va->gem.obj;
+ if (madvise && obj)
+ continue;
+
if (addr == req_addr) {
merge &= obj == req_obj &&
offset == req_offset;
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
continue;
@@ -2153,6 +2425,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr < req_addr) {
@@ -2173,6 +2448,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
@@ -2180,6 +2458,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, NULL, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = req_addr,
+ .map.va.range = end - req_addr,
+ };
+
+ ret = op_map_cb(ops, priv, &map_req);
+ if (ret)
+ return ret;
+ }
+
continue;
}
@@ -2195,6 +2485,9 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, &p, &n, &u);
if (ret)
return ret;
+
+ if (madvise)
+ op_map = req;
break;
}
} else if (addr > req_addr) {
@@ -2203,16 +2496,18 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
(addr - req_addr);
if (end == req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
break;
}
if (end < req_end) {
- ret = op_unmap_cb(ops, priv, va, merge);
+ ret = op_unmap_cb(ops, priv, va, merge, madvise);
if (ret)
return ret;
+
continue;
}
@@ -2231,14 +2526,20 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
ret = op_remap_cb(ops, priv, NULL, &n, &u);
if (ret)
return ret;
+
+ if (madvise) {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = req_end - addr,
+ };
+
+ return op_map_cb(ops, priv, &map_req);
+ }
break;
}
}
}
-
- return op_map_cb(ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return op_map_cb(ops, priv, op_map);
}
static int
@@ -2290,7 +2591,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
if (ret)
return ret;
} else {
- ret = op_unmap_cb(ops, priv, va, false);
+ ret = op_unmap_cb(ops, priv, va, false, false);
if (ret)
return ret;
}
@@ -2303,10 +2604,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
* drm_gpuvm_sm_map() - calls the &drm_gpuva_op split/merge steps
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to struct drm_gpuvm_map_req
*
* This function iterates the given range of the GPU VA space. It utilizes the
* &drm_gpuvm_ops to call back into the driver providing the split and merge
@@ -2333,8 +2631,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
*/
int
drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
const struct drm_gpuvm_ops *ops = gpuvm->ops;
@@ -2343,9 +2640,7 @@ drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuvm_sm_map(gpuvm, ops, priv,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
@@ -2421,10 +2716,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* @gpuvm: the &drm_gpuvm representing the GPU VA space
* @exec: the &drm_exec locking context
* @num_fences: for newly mapped objects, the # of fences to reserve
- * @req_addr: the start address of the range to unmap
- * @req_range: the range of the mappings to unmap
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: ptr to drm_gpuvm_map_req struct
*
* This function locks (drm_exec_lock_obj()) objects that will be unmapped/
* remapped, and locks+prepares (drm_exec_prepare_object()) objects that
@@ -2432,8 +2724,6 @@ static const struct drm_gpuvm_ops lock_ops = {
*
* The expected usage is::
*
- * .. code-block:: c
- *
* vm_bind {
* struct drm_exec exec;
*
@@ -2447,9 +2737,7 @@ static const struct drm_gpuvm_ops lock_ops = {
* ret = drm_gpuvm_sm_unmap_exec_lock(gpuvm, &exec, op->addr, op->range);
* break;
* case DRIVER_OP_MAP:
- * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences,
- * op->addr, op->range,
- * obj, op->obj_offset);
+ * ret = drm_gpuvm_sm_map_exec_lock(gpuvm, &exec, num_fences, &req);
* break;
* }
*
@@ -2480,18 +2768,17 @@ static const struct drm_gpuvm_ops lock_ops = {
int
drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
struct drm_exec *exec, unsigned int num_fences,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ struct drm_gpuvm_map_req *req)
{
+ struct drm_gem_object *req_obj = req->map.gem.obj;
+
if (req_obj) {
int ret = drm_exec_prepare_obj(exec, req_obj, num_fences);
if (ret)
return ret;
}
- return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec,
- req_addr, req_range,
- req_obj, req_offset);
+ return __drm_gpuvm_sm_map(gpuvm, &lock_ops, exec, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_exec_lock);
@@ -2610,13 +2897,42 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
.sm_step_unmap = drm_gpuva_sm_step,
};
+static struct drm_gpuva_ops *
+__drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req,
+ bool madvise)
+{
+ struct drm_gpuva_ops *ops;
+ struct {
+ struct drm_gpuvm *vm;
+ struct drm_gpuva_ops *ops;
+ } args;
+ int ret;
+
+ ops = kzalloc(sizeof(*ops), GFP_KERNEL);
+ if (unlikely(!ops))
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&ops->list);
+
+ args.vm = gpuvm;
+ args.ops = ops;
+
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args, req, madvise);
+ if (ret)
+ goto err_free_ops;
+
+ return ops;
+
+err_free_ops:
+ drm_gpuva_ops_free(gpuvm, ops);
+ return ERR_PTR(ret);
+}
+
/**
* drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
* @gpuvm: the &drm_gpuvm representing the GPU VA space
- * @req_addr: the start address of the new mapping
- * @req_range: the range of the new mapping
- * @req_obj: the &drm_gem_object to map
- * @req_offset: the offset within the &drm_gem_object
+ * @req: map request arguments
*
* This function creates a list of operations to perform splitting and merging
* of existing mapping(s) with the newly requested one.
@@ -2644,40 +2960,50 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = {
*/
struct drm_gpuva_ops *
drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
- u64 req_addr, u64 req_range,
- struct drm_gem_object *req_obj, u64 req_offset)
+ const struct drm_gpuvm_map_req *req)
{
- struct drm_gpuva_ops *ops;
- struct {
- struct drm_gpuvm *vm;
- struct drm_gpuva_ops *ops;
- } args;
- int ret;
-
- ops = kzalloc(sizeof(*ops), GFP_KERNEL);
- if (unlikely(!ops))
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&ops->list);
-
- args.vm = gpuvm;
- args.ops = ops;
-
- ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
- req_addr, req_range,
- req_obj, req_offset);
- if (ret)
- goto err_free_ops;
-
- return ops;
-
-err_free_ops:
- drm_gpuva_ops_free(gpuvm, ops);
- return ERR_PTR(ret);
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, false);
}
EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
/**
+ * drm_gpuvm_madvise_ops_create() - creates the &drm_gpuva_ops to split
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
+ * @req: map request arguments
+ *
+ * This function creates a list of operations to perform splitting
+ * of existent mapping(s) at start or end, based on the request map.
+ *
+ * The list can be iterated with &drm_gpuva_for_each_op and must be processed
+ * in the given order. It can contain map and remap operations, but it
+ * also can be empty if no operation is required, e.g. if the requested mapping
+ * already exists is the exact same way.
+ *
+ * There will be no unmap operations, a maximum of two remap operations and two
+ * map operations. The two map operations correspond to: one from start to the
+ * end of drm_gpuvaX, and another from the start of drm_gpuvaY to end.
+ *
+ * Note that before calling this function again with another mapping request it
+ * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
+ * previously obtained operations must be either processed or abandoned. To
+ * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
+ * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
+ * used.
+ *
+ * After the caller finished processing the returned &drm_gpuva_ops, they must
+ * be freed with &drm_gpuva_ops_free.
+ *
+ * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
+ */
+struct drm_gpuva_ops *
+drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_map_req *req)
+{
+ return __drm_gpuvm_sm_map_ops_create(gpuvm, req, true);
+}
+EXPORT_SYMBOL_GPL(drm_gpuvm_madvise_ops_create);
+
+/**
* drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
* unmap
* @gpuvm: the &drm_gpuvm representing the GPU VA space
@@ -2806,8 +3132,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
* After the caller finished processing the returned &drm_gpuva_ops, they must
* be freed with &drm_gpuva_ops_free.
*
- * It is the callers responsibility to protect the GEMs GPUVA list against
- * concurrent access using the GEMs dma_resv lock.
+ * This function expects the caller to protect the GEM's GPUVA list against
+ * concurrent access using either the GEM's dma-resv or gpuva.lock mutex.
*
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
@@ -2819,7 +3145,7 @@ drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo)
struct drm_gpuva *va;
int ret;
- drm_gem_gpuva_assert_lock_held(vm_bo->obj);
+ drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj);
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e79c3c623c9a..f893b1e3a596 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -56,6 +56,17 @@ static inline void drm_client_debugfs_init(struct drm_device *dev)
{ }
#endif
+/* drm_client_sysrq.c */
+#if defined(CONFIG_DRM_CLIENT) && defined(CONFIG_MAGIC_SYSRQ)
+void drm_client_sysrq_register(struct drm_device *dev);
+void drm_client_sysrq_unregister(struct drm_device *dev);
+#else
+static inline void drm_client_sysrq_register(struct drm_device *dev)
+{ }
+static inline void drm_client_sysrq_unregister(struct drm_device *dev)
+{ }
+#endif
+
/* drm_file.c */
extern struct mutex drm_global_mutex;
bool drm_dev_needs_global_mutex(struct drm_device *dev);
@@ -85,6 +96,8 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
+int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf, uint32_t handle);
void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
uint32_t handle);
@@ -170,6 +183,8 @@ int drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f593dc569d31..ff193155129e 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -373,6 +373,13 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->supports_virtualized_cursor_plane = req->value;
break;
+ case DRM_CLIENT_CAP_PLANE_COLOR_PIPELINE:
+ if (!file_priv->atomic)
+ return -EINVAL;
+ if (req->value > 1)
+ return -EINVAL;
+ file_priv->plane_color_pipeline = req->value;
+ break;
default:
return -EINVAL;
}
@@ -653,6 +660,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CHANGE_HANDLE, drm_gem_change_handle_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, 0),
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index e33c78fc8fbd..00482227a9cd 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -26,6 +26,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <video/mipi_display.h>
@@ -691,7 +692,7 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation)
{
- size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
+ size_t bufsize = (u32)mode->vdisplay * mode->hdisplay * sizeof(u16);
dbidev->drm.mode_config.preferred_depth = 16;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 3a9b3278a6e3..a712e177b350 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -773,41 +773,13 @@ ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
EXPORT_SYMBOL(mipi_dsi_generic_write);
/**
- * mipi_dsi_generic_write_chatty() - mipi_dsi_generic_write() w/ an error log
- * @dsi: DSI peripheral device
- * @payload: buffer containing the payload
- * @size: size of payload buffer
- *
- * Like mipi_dsi_generic_write() but includes a dev_err()
- * call for you and returns 0 upon success, not the number of bytes sent.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int mipi_dsi_generic_write_chatty(struct mipi_dsi_device *dsi,
- const void *payload, size_t size)
-{
- struct device *dev = &dsi->dev;
- ssize_t ret;
-
- ret = mipi_dsi_generic_write(dsi, payload, size);
- if (ret < 0) {
- dev_err(dev, "sending generic data %*ph failed: %zd\n",
- (int)size, payload, ret);
- return ret;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(mipi_dsi_generic_write_chatty);
-
-/**
- * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write_chatty() w/ accum_err
+ * mipi_dsi_generic_write_multi() - mipi_dsi_generic_write() w/ accum_err
* @ctx: Context for multiple DSI transactions
* @payload: buffer containing the payload
* @size: size of payload buffer
*
- * Like mipi_dsi_generic_write_chatty() but deals with errors in a way that
- * makes it convenient to make several calls in a row.
+ * A wrapper around mipi_dsi_generic_write() that deals with errors in a way
+ * that makes it convenient to make several calls in a row.
*/
void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
const void *payload, size_t size)
@@ -829,6 +801,30 @@ void mipi_dsi_generic_write_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_generic_write_multi);
/**
+ * mipi_dsi_dual_generic_write_multi() - mipi_dsi_generic_write_multi() for
+ * two dsi channels, one after the other
+ * @ctx: Context for multiple DSI transactions
+ * @dsi1: First dsi channel to write buffer to
+ * @dsi2: Second dsi channel to write buffer to
+ * @payload: Buffer containing the payload
+ * @size: Size of payload buffer
+ *
+ * A wrapper around mipi_dsi_generic_write_multi() that allows the user to
+ * conveniently write to two dsi channels, one after the other.
+ */
+void mipi_dsi_dual_generic_write_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *payload, size_t size)
+{
+ ctx->dsi = dsi1;
+ mipi_dsi_generic_write_multi(ctx, payload, size);
+ ctx->dsi = dsi2;
+ mipi_dsi_generic_write_multi(ctx, payload, size);
+}
+EXPORT_SYMBOL(mipi_dsi_dual_generic_write_multi);
+
+/**
* mipi_dsi_generic_read() - receive data using a generic read packet
* @dsi: DSI peripheral device
* @params: buffer containing the request parameters
@@ -1008,6 +1004,30 @@ void mipi_dsi_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer_multi);
/**
+ * mipi_dsi_dual_dcs_write_buffer_multi - mipi_dsi_dcs_write_buffer_multi() for
+ * two dsi channels, one after the other
+ * @ctx: Context for multiple DSI transactions
+ * @dsi1: First dsi channel to write buffer to
+ * @dsi2: Second dsi channel to write buffer to
+ * @data: Buffer containing data to be transmitted
+ * @len: Size of transmission buffer
+ *
+ * A wrapper around mipi_dsi_dcs_write_buffer_multi() that allows the user to
+ * conveniently write to two dsi channels, one after the other.
+ */
+void mipi_dsi_dual_dcs_write_buffer_multi(struct mipi_dsi_multi_context *ctx,
+ struct mipi_dsi_device *dsi1,
+ struct mipi_dsi_device *dsi2,
+ const void *data, size_t len)
+{
+ ctx->dsi = dsi1;
+ mipi_dsi_dcs_write_buffer_multi(ctx, data, len);
+ ctx->dsi = dsi2;
+ mipi_dsi_dcs_write_buffer_multi(ctx, data, len);
+}
+EXPORT_SYMBOL(mipi_dsi_dual_dcs_write_buffer_multi);
+
+/**
* mipi_dsi_dcs_write() - send DCS write command
* @dsi: DSI peripheral device
* @cmd: DCS command
@@ -1077,6 +1097,43 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
EXPORT_SYMBOL(mipi_dsi_dcs_read);
/**
+ * mipi_dsi_dcs_read_multi() - mipi_dsi_dcs_read() w/ accum_err
+ * @ctx: Context for multiple DSI transactions
+ * @cmd: DCS command
+ * @data: buffer in which to receive data
+ * @len: size of receive buffer
+ *
+ * Like mipi_dsi_dcs_read() but deals with errors in a way that makes it
+ * convenient to make several calls in a row.
+ */
+void mipi_dsi_dcs_read_multi(struct mipi_dsi_multi_context *ctx, u8 cmd,
+ void *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ struct mipi_dsi_msg msg = {
+ .channel = dsi->channel,
+ .type = MIPI_DSI_DCS_READ,
+ .tx_buf = &cmd,
+ .tx_len = 1,
+ .rx_buf = data,
+ .rx_len = len
+ };
+ ssize_t ret;
+
+ if (ctx->accum_err)
+ return;
+
+ ret = mipi_dsi_device_transfer(dsi, &msg);
+ if (ret < 0) {
+ ctx->accum_err = ret;
+ dev_err(dev, "dcs read with command %#x failed: %d\n", cmd,
+ ctx->accum_err);
+ }
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_read_multi);
+
+/**
* mipi_dsi_dcs_nop() - send DCS nop packet
* @dsi: DSI peripheral device
*
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index ca254611b382..6692abe564d3 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,6 +49,7 @@
#include <linux/stacktrace.h>
#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
/**
* DOC: Overview
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 25f376869b3a..d12db9b0bab8 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -30,6 +30,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
+#include <drm/drm_colorop.h>
#include <linux/dma-resv.h>
#include "drm_crtc_internal.h"
@@ -192,11 +193,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
void drm_mode_config_reset(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ struct drm_colorop *colorop;
struct drm_plane *plane;
struct drm_encoder *encoder;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ drm_for_each_colorop(colorop, dev)
+ drm_colorop_reset(colorop);
+
drm_for_each_plane(plane, dev)
if (plane->funcs->reset)
plane->funcs->reset(plane);
@@ -437,6 +442,7 @@ int drmm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
+ INIT_LIST_HEAD(&dev->mode_config.colorop_list);
INIT_LIST_HEAD(&dev->mode_config.privobj_list);
idr_init_base(&dev->mode_config.object_idr, 1);
idr_init_base(&dev->mode_config.tile_idr, 1);
@@ -458,6 +464,7 @@ int drmm_mode_config_init(struct drm_device *dev)
dev->mode_config.num_crtc = 0;
dev->mode_config.num_encoder = 0;
dev->mode_config.num_total_plane = 0;
+ dev->mode_config.num_colorop = 0;
if (IS_ENABLED(CONFIG_LOCKDEP)) {
struct drm_modeset_acquire_ctx modeset_ctx;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index e943205a2394..b45d501b10c8 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -28,6 +28,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_mode_object.h>
+#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@@ -386,6 +387,7 @@ EXPORT_SYMBOL(drm_object_property_get_default_value);
/* helper for getconnector and getproperties ioctls */
int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
+ bool plane_color_pipeline,
uint32_t __user *prop_ptr,
uint64_t __user *prop_values,
uint32_t *arg_count_props)
@@ -399,6 +401,21 @@ int drm_mode_object_get_properties(struct drm_mode_object *obj, bool atomic,
if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic)
continue;
+ if (plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) {
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (prop == plane->color_encoding_property ||
+ prop == plane->color_range_property)
+ continue;
+ }
+
+ if (!plane_color_pipeline && obj->type == DRM_MODE_OBJECT_PLANE) {
+ struct drm_plane *plane = obj_to_plane(obj);
+
+ if (prop == plane->color_pipeline_property)
+ continue;
+ }
+
if (*arg_count_props > count) {
ret = __drm_object_property_get_value(obj, prop, &val);
if (ret)
@@ -457,6 +474,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
}
ret = drm_mode_object_get_properties(obj, file_priv->atomic,
+ file_priv->plane_color_pipeline,
(uint32_t __user *)(unsigned long)(arg->props_ptr),
(uint64_t __user *)(unsigned long)(arg->prop_values_ptr),
&arg->count_props);
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 988735560570..a57f6a10ada4 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -203,10 +203,10 @@ int drm_mode_config_helper_suspend(struct drm_device *dev)
if (dev->mode_config.poll_enabled)
drm_kms_helper_poll_disable(dev);
- drm_client_dev_suspend(dev, false);
+ drm_client_dev_suspend(dev);
state = drm_atomic_helper_suspend(dev);
if (IS_ERR(state)) {
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
/*
* Don't enable polling if it was never initialized
@@ -252,7 +252,7 @@ int drm_mode_config_helper_resume(struct drm_device *dev)
DRM_ERROR("Failed to resume (%d)\n", ret);
dev->mode_config.suspend_state = NULL;
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
/*
* Don't enable polling if it is not initialized
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index d0183dea7703..4f65ce729a47 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -55,7 +55,8 @@ EXPORT_SYMBOL(drm_of_crtc_port_mask);
* and generate the DRM mask of CRTCs which may be attached to this
* encoder.
*
- * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ * See https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/graph.yaml
+ * for the bindings.
*/
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
@@ -106,7 +107,9 @@ EXPORT_SYMBOL_GPL(drm_of_component_match_add);
* Parse the platform device OF node and bind all the components associated
* with the master. Interface ports are added before the encoders in order to
* satisfy their .bind requirements
- * See Documentation/devicetree/bindings/graph.txt for the bindings.
+ *
+ * See https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/graph.yaml
+ * for the bindings.
*
* Returns zero if successful, or one of the standard error codes if it fails.
*/
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 1da55322af12..37d7cfbbb3e8 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -196,13 +196,13 @@ static void drm_pagemap_get_devmem_page(struct page *page,
struct drm_pagemap_zdd *zdd)
{
page->zone_device_data = drm_pagemap_zdd_get(zdd);
- zone_device_page_init(page);
+ zone_device_page_init(page, 0);
}
/**
* drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
* @dev: The device for which the pages are being mapped
- * @dma_addr: Array to store DMA addresses corresponding to mapped pages
+ * @pagemap_addr: Array to store DMA information corresponding to mapped pages
* @migrate_pfn: Array of migrate page frame numbers to map
* @npages: Number of pages to map
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
@@ -215,25 +215,39 @@ static void drm_pagemap_get_devmem_page(struct page *page,
* Returns: 0 on success, -EFAULT if an error occurs during mapping.
*/
static int drm_pagemap_migrate_map_pages(struct device *dev,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long *migrate_pfn,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
+ dma_addr_t dma_addr;
+ struct folio *folio;
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next;
if (WARN_ON_ONCE(is_zone_device_page(page)))
return -EFAULT;
- dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
- if (dma_mapping_error(dev, dma_addr[i]))
+ folio = page_folio(page);
+ order = folio_order(folio);
+
+ dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
+ if (dma_mapping_error(dev, dma_addr))
return -EFAULT;
+
+ pagemap_addr[i] =
+ drm_pagemap_addr_encode(dma_addr,
+ DRM_INTERCONNECT_SYSTEM,
+ order, dir);
+
+next:
+ i += NR_PAGES(order);
}
return 0;
@@ -242,7 +256,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
/**
* drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
* @dev: The device for which the pages were mapped
- * @dma_addr: Array of DMA addresses corresponding to mapped pages
+ * @pagemap_addr: Array of DMA information corresponding to mapped pages
* @npages: Number of pages to unmap
* @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
*
@@ -251,17 +265,20 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
* if it's valid and not already unmapped, and unmaps the corresponding page.
*/
static void drm_pagemap_migrate_unmap_pages(struct device *dev,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages,
enum dma_data_direction dir)
{
unsigned long i;
- for (i = 0; i < npages; ++i) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
- continue;
+ for (i = 0; i < npages;) {
+ if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
+ goto next;
- dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
+ dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
+
+next:
+ i += NR_PAGES(pagemap_addr[i].order);
}
}
@@ -314,7 +331,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct vm_area_struct *vas;
struct drm_pagemap_zdd *zdd = NULL;
struct page **pages;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
void *buf;
int err;
@@ -340,14 +357,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
goto err_out;
}
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
zdd = drm_pagemap_zdd_alloc(pgmap_owner);
if (!zdd) {
@@ -377,8 +394,9 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
if (err)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
migrate.src, npages, DMA_TO_DEVICE);
+
if (err)
goto err_finalize;
@@ -390,7 +408,7 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
drm_pagemap_get_devmem_page(page, zdd);
}
- err = ops->copy_to_devmem(pages, dma_addr, npages);
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -404,7 +422,7 @@ err_finalize:
drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
- drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_TO_DEVICE);
err_free:
if (zdd)
@@ -442,54 +460,80 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
{
unsigned long i;
- for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
- struct page *page, *src_page;
+ for (i = 0; i < npages;) {
+ struct page *page = NULL, *src_page;
+ struct folio *folio;
+ unsigned int order = 0;
if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
- continue;
+ goto next;
src_page = migrate_pfn_to_page(src_mpfn[i]);
if (!src_page)
- continue;
+ goto next;
if (fault_page) {
if (src_page->zone_device_data !=
fault_page->zone_device_data)
- continue;
+ goto next;
}
+ order = folio_order(page_folio(src_page));
+
+ /* TODO: Support fallback to single pages if THP allocation fails */
if (vas)
- page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
+ folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr);
else
- page = alloc_page(GFP_HIGHUSER);
+ folio = folio_alloc(GFP_HIGHUSER, order);
- if (!page)
+ if (!folio)
goto free_pages;
+ page = folio_page(folio, 0);
mpfn[i] = migrate_pfn(page_to_pfn(page));
+
+next:
+ if (page)
+ addr += page_size(page);
+ else
+ addr += PAGE_SIZE;
+
+ i += NR_PAGES(order);
}
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next_lock;
+
+ WARN_ON_ONCE(!folio_trylock(page_folio(page)));
- WARN_ON_ONCE(!trylock_page(page));
- ++*mpages;
+ order = folio_order(page_folio(page));
+ *mpages += NR_PAGES(order);
+
+next_lock:
+ i += NR_PAGES(order);
}
return 0;
free_pages:
- for (i = 0; i < npages; ++i) {
+ for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(mpfn[i]);
+ unsigned int order = 0;
if (!page)
- continue;
+ goto next_put;
put_page(page);
mpfn[i] = 0;
+
+ order = folio_order(page_folio(page));
+
+next_put:
+ i += NR_PAGES(order);
}
return -ENOMEM;
}
@@ -509,7 +553,7 @@ int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
unsigned long npages, mpages = 0;
struct page **pages;
unsigned long *src, *dst;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
void *buf;
int i, err = 0;
unsigned int retry_count = 2;
@@ -520,7 +564,7 @@ retry:
if (!mmget_not_zero(devmem_allocation->mm))
return -EFAULT;
- buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
@@ -528,8 +572,8 @@ retry:
}
src = buf;
dst = buf + (sizeof(*src) * npages);
- dma_addr = buf + (2 * sizeof(*src) * npages);
- pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*src) * npages);
+ pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
if (err)
@@ -544,7 +588,7 @@ retry:
if (err || !mpages)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
+ err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, pagemap_addr,
dst, npages, DMA_FROM_DEVICE);
if (err)
goto err_finalize;
@@ -552,7 +596,7 @@ retry:
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, dma_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -561,7 +605,7 @@ err_finalize:
drm_pagemap_migration_unlock_put_pages(npages, dst);
migrate_device_pages(src, dst, npages);
migrate_device_finalize(src, dst, npages);
- drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
@@ -612,7 +656,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
struct device *dev = NULL;
unsigned long npages, mpages = 0;
struct page **pages;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long start, end;
void *buf;
int i, err = 0;
@@ -637,14 +681,14 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
migrate.end = end;
npages = npages_in_range(start, end);
- buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
+ buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
sizeof(*pages), GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
- pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
+ pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
+ pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
migrate.vma = vas;
migrate.src = buf;
@@ -680,7 +724,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
if (err)
goto err_finalize;
- err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
+ err = drm_pagemap_migrate_map_pages(dev, pagemap_addr, migrate.dst, npages,
DMA_FROM_DEVICE);
if (err)
goto err_finalize;
@@ -688,7 +732,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, dma_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages);
if (err)
goto err_finalize;
@@ -698,7 +742,7 @@ err_finalize:
migrate_vma_pages(&migrate);
migrate_vma_finalize(&migrate);
if (dev)
- drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
+ drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, npages,
DMA_FROM_DEVICE);
err_free:
kvfree(buf);
@@ -708,15 +752,15 @@ err_out:
}
/**
- * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page
- * @page: Pointer to the page
+ * drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio
+ * @folio: Pointer to the folio
*
* This function is a callback used to put the GPU SVM zone device data
* associated with a page when it is being released.
*/
-static void drm_pagemap_page_free(struct page *page)
+static void drm_pagemap_folio_free(struct folio *folio)
{
- drm_pagemap_zdd_put(page->zone_device_data);
+ drm_pagemap_zdd_put(folio->page.zone_device_data);
}
/**
@@ -744,7 +788,7 @@ static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
}
static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
- .page_free = drm_pagemap_page_free,
+ .folio_free = drm_pagemap_folio_free,
.migrate_to_ram = drm_pagemap_migrate_to_ram,
};
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index c8bb28dccdc1..d1e6598ea3bc 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -134,6 +134,9 @@ void drm_panel_prepare(struct drm_panel *panel)
panel->prepared = true;
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_prepared)
+ continue;
+
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -179,6 +182,9 @@ void drm_panel_unprepare(struct drm_panel *panel)
mutex_lock(&panel->follower_lock);
list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_unpreparing)
+ continue;
+
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
@@ -209,6 +215,7 @@ EXPORT_SYMBOL(drm_panel_unprepare);
*/
void drm_panel_enable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
@@ -219,10 +226,12 @@ void drm_panel_enable(struct drm_panel *panel)
return;
}
+ mutex_lock(&panel->follower_lock);
+
if (panel->funcs && panel->funcs->enable) {
ret = panel->funcs->enable(panel);
if (ret < 0)
- return;
+ goto exit;
}
panel->enabled = true;
@@ -230,6 +239,19 @@ void drm_panel_enable(struct drm_panel *panel)
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to enable backlight: %d\n",
ret);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_enabled)
+ continue;
+
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_enable);
@@ -243,6 +265,7 @@ EXPORT_SYMBOL(drm_panel_enable);
*/
void drm_panel_disable(struct drm_panel *panel)
{
+ struct drm_panel_follower *follower;
int ret;
if (!panel)
@@ -262,6 +285,18 @@ void drm_panel_disable(struct drm_panel *panel)
return;
}
+ mutex_lock(&panel->follower_lock);
+
+ list_for_each_entry(follower, &panel->followers, list) {
+ if (!follower->funcs->panel_disabling)
+ continue;
+
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+
ret = backlight_disable(panel->backlight);
if (ret < 0)
DRM_DEV_INFO(panel->dev, "failed to disable backlight: %d\n",
@@ -270,9 +305,12 @@ void drm_panel_disable(struct drm_panel *panel)
if (panel->funcs && panel->funcs->disable) {
ret = panel->funcs->disable(panel);
if (ret < 0)
- return;
+ goto exit;
}
panel->enabled = false;
+
+exit:
+ mutex_unlock(&panel->follower_lock);
}
EXPORT_SYMBOL(drm_panel_disable);
@@ -539,13 +577,13 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* @follower_dev: The 'struct device' for the follower.
* @follower: The panel follower descriptor for the follower.
*
- * A panel follower is called right after preparing the panel and right before
- * unpreparing the panel. It's primary intention is to power on an associated
- * touchscreen, though it could be used for any similar devices. Multiple
- * devices are allowed the follow the same panel.
+ * A panel follower is called right after preparing/enabling the panel and right
+ * before unpreparing/disabling the panel. It's primary intention is to power on
+ * an associated touchscreen, though it could be used for any similar devices.
+ * Multiple devices are allowed the follow the same panel.
*
- * If a follower is added to a panel that's already been turned on, the
- * follower's prepare callback is called right away.
+ * If a follower is added to a panel that's already been prepared/enabled, the
+ * follower's prepared/enabled callback is called right away.
*
* The "panel" property of the follower points to the panel to be followed.
*
@@ -569,12 +607,18 @@ int drm_panel_add_follower(struct device *follower_dev,
mutex_lock(&panel->follower_lock);
list_add_tail(&follower->list, &panel->followers);
- if (panel->prepared) {
+ if (panel->prepared && follower->funcs->panel_prepared) {
ret = follower->funcs->panel_prepared(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
follower->funcs->panel_prepared, ret);
}
+ if (panel->enabled && follower->funcs->panel_enabled) {
+ ret = follower->funcs->panel_enabled(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_enabled, ret);
+ }
mutex_unlock(&panel->follower_lock);
@@ -587,7 +631,8 @@ EXPORT_SYMBOL(drm_panel_add_follower);
* @follower: The panel follower descriptor for the follower.
*
* Undo drm_panel_add_follower(). This includes calling the follower's
- * unprepare function if we're removed from a panel that's currently prepared.
+ * unpreparing/disabling function if we're removed from a panel that's currently
+ * prepared/enabled.
*
* Return: 0 or an error code.
*/
@@ -598,7 +643,13 @@ void drm_panel_remove_follower(struct drm_panel_follower *follower)
mutex_lock(&panel->follower_lock);
- if (panel->prepared) {
+ if (panel->enabled && follower->funcs->panel_disabling) {
+ ret = follower->funcs->panel_disabling(follower);
+ if (ret < 0)
+ dev_info(panel->dev, "%ps failed: %d\n",
+ follower->funcs->panel_disabling, ret);
+ }
+ if (panel->prepared && follower->funcs->panel_unpreparing) {
ret = follower->funcs->panel_unpreparing(follower);
if (ret < 0)
dev_info(panel->dev, "%ps failed: %d\n",
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
index 598f812b7cb3..537dc6dd0534 100644
--- a/drivers/gpu/drm/drm_panel_backlight_quirks.c
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -8,23 +8,26 @@
#include <drm/drm_edid.h>
#include <drm/drm_utils.h>
-struct drm_panel_min_backlight_quirk {
- struct {
- enum dmi_field field;
- const char * const value;
- } dmi_match;
+struct drm_panel_match {
+ enum dmi_field field;
+ const char * const value;
+};
+
+struct drm_get_panel_backlight_quirk {
+ struct drm_panel_match dmi_match;
+ struct drm_panel_match dmi_match_other;
struct drm_edid_ident ident;
- u8 min_brightness;
+ struct drm_panel_backlight_quirk quirk;
};
-static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = {
+static const struct drm_get_panel_backlight_quirk drm_panel_min_backlight_quirks[] = {
/* 13 inch matte panel */
{
.dmi_match.field = DMI_BOARD_VENDOR,
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca),
.ident.name = "NE135FBM-N41",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
},
/* 13 inch glossy panel */
{
@@ -32,7 +35,7 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f),
.ident.name = "NE135FBM-N41",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
},
/* 13 inch 2.8k panel */
{
@@ -40,56 +43,114 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks
.dmi_match.value = "Framework",
.ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4),
.ident.name = "NE135A1M-NY1",
- .min_brightness = 0,
+ .quirk = { .min_brightness = 1, },
+ },
+ /* Steam Deck models */
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "Valve",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "Jupiter",
+ .quirk = { .min_brightness = 1, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "Valve",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "Galileo",
+ .quirk = { .min_brightness = 1, },
+ },
+ /* Have OLED Panels with brightness issue when last byte is 0/1 */
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "AYANEO",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "AYANEO 3",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ZOTAC",
+ .dmi_match_other.field = DMI_BOARD_NAME,
+ .dmi_match_other.value = "G0A1W",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ZOTAC",
+ .dmi_match_other.field = DMI_BOARD_NAME,
+ .dmi_match_other.value = "G1A1W",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ONE-NETBOOK",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "ONEXPLAYER F1Pro",
+ .quirk = { .brightness_mask = 3, },
+ },
+ {
+ .dmi_match.field = DMI_SYS_VENDOR,
+ .dmi_match.value = "ONE-NETBOOK",
+ .dmi_match_other.field = DMI_PRODUCT_NAME,
+ .dmi_match_other.value = "ONEXPLAYER F1 EVA-02",
+ .quirk = { .brightness_mask = 3, },
},
};
-static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk,
- const struct drm_edid *edid)
+static bool drm_panel_min_backlight_quirk_matches(
+ const struct drm_get_panel_backlight_quirk *quirk,
+ const struct drm_edid *edid)
{
- if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ if (quirk->dmi_match.field &&
+ !dmi_match(quirk->dmi_match.field, quirk->dmi_match.value))
+ return false;
+
+ if (quirk->dmi_match_other.field &&
+ !dmi_match(quirk->dmi_match_other.field,
+ quirk->dmi_match_other.value))
return false;
- if (!drm_edid_match(edid, &quirk->ident))
+ if (quirk->ident.panel_id && !drm_edid_match(edid, &quirk->ident))
return false;
return true;
}
/**
- * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel.
+ * drm_get_panel_backlight_quirk - Get backlight quirks for a panel
* @edid: EDID of the panel to check
*
* This function checks for platform specific (e.g. DMI based) quirks
* providing info on the minimum backlight brightness for systems where this
- * cannot be probed correctly from the hard-/firm-ware.
+ * cannot be probed correctly from the hard-/firm-ware and other sources.
*
* Returns:
- * A negative error value or
- * an override value in the range [0, 255] representing 0-100% to be scaled to
- * the drivers target range.
+ * a drm_panel_backlight_quirk struct if a quirk was found, otherwise an
+ * error pointer.
*/
-int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid)
+const struct drm_panel_backlight_quirk *
+drm_get_panel_backlight_quirk(const struct drm_edid *edid)
{
- const struct drm_panel_min_backlight_quirk *quirk;
+ const struct drm_get_panel_backlight_quirk *quirk;
size_t i;
if (!IS_ENABLED(CONFIG_DMI))
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
if (!edid)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) {
quirk = &drm_panel_min_backlight_quirks[i];
if (drm_panel_min_backlight_quirk_matches(quirk, edid))
- return quirk->min_brightness;
+ return &quirk->quirk;
}
- return -ENODATA;
+ return ERR_PTR(-ENODATA);
}
-EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk);
+EXPORT_SYMBOL(drm_get_panel_backlight_quirk);
MODULE_DESCRIPTION("Quirks for panel backlight overrides");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index 1d6312fa1429..d4b6ea42db0f 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color)
*p = color & 0xff;
}
+/*
+ * Special case if the pixel crosses page boundaries
+ */
+static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page,
+ unsigned int offset, u32 color)
+{
+ u8 *vaddr2;
+ u8 *p = vaddr + offset;
+
+ vaddr2 = kmap_local_page_try_from_panic(next_page);
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 1)
+ p = vaddr2;
+
+ *p++ = color & 0xff;
+ color >>= 8;
+
+ if (offset == PAGE_SIZE - 2)
+ p = vaddr2;
+
+ *p = color & 0xff;
+ kunmap_local(vaddr2);
+}
+
static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color)
{
u32 *p = vaddr + offset;
@@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
- if (vaddr)
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, fg32);
+ else
drm_panic_write_pixel(vaddr, offset, fg32, cpp);
}
}
@@ -321,7 +355,15 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch,
page = new_page;
vaddr = kmap_local_page_try_from_panic(pages[page]);
}
- drm_panic_write_pixel(vaddr, offset, color, cpp);
+ if (!vaddr)
+ continue;
+
+ // Special case for 24bit, as a pixel might cross page boundaries
+ if (cpp == 3 && offset + 3 > PAGE_SIZE)
+ drm_panic_write_pixel24_xpage(vaddr, pages[page + 1],
+ offset, color);
+ else
+ drm_panic_write_pixel(vaddr, offset, color, cpp);
}
}
if (vaddr)
@@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f
static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect,
const struct font_desc *font, u32 fg_color)
{
+ if (rect->x2 > sb->width || rect->y2 > sb->height)
+ return;
+
if (logo_mono)
drm_panic_blit(sb, rect, logo_mono->data,
DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color);
@@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
struct drm_panic_line *line, int yoffset, u32 fg_color)
{
int chars_per_row = sb->width / font->width;
- struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height);
+ struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height);
struct drm_panic_line line_wrap;
if (line->len > chars_per_row) {
@@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
struct drm_panic_line line;
int yoffset;
- if (!font)
+ if (!font || font->width > sb->width)
return;
yoffset = sb->height - font->height - (sb->height % font->height) / 2;
@@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
pr_debug("QR width %d and scale %d\n", qr_width, scale);
r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale);
- v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5;
+ v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg);
+ if (v_margin < 0)
+ return -ENOSPC;
+ v_margin /= 5;
drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin);
r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale,
@@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
/* Fill with the background color, and draw text on top */
drm_panic_fill(sb, &r_screen, bg_color);
- if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr))
+ if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas))
drm_panic_logo_draw(sb, &r_logo, font, fg_color);
draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color);
diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
index 50c286c5cee8..ac27e86c601c 100644
--- a/drivers/gpu/drm/drm_panic_qr.rs
+++ b/drivers/gpu/drm/drm_panic_qr.rs
@@ -968,7 +968,7 @@ pub unsafe extern "C" fn drm_panic_qr_generate(
// nul-terminated string.
let url_cstr: &CStr = unsafe { CStr::from_char_ptr(url) };
let segments = &[
- &Segment::Binary(url_cstr.as_bytes()),
+ &Segment::Binary(url_cstr.to_bytes()),
&Segment::Numeric(&data_slice[0..data_len]),
];
match EncodedMsg::new(segments, tmp_slice) {
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 38f82391bfda..ce76c55913f7 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -210,7 +210,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
formats_size = sizeof(__u32) * plane->format_count;
if (WARN_ON(!formats_size)) {
/* 0 formats are never expected */
- return 0;
+ return ERR_PTR(-EINVAL);
}
modifiers_size =
@@ -226,7 +226,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
blob = drm_property_create_blob(dev, blob_size, NULL);
if (IS_ERR(blob))
- return NULL;
+ return blob;
blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
@@ -425,7 +425,7 @@ static int __drm_universal_plane_init(struct drm_device *dev,
plane->modifier_count = format_modifier_count;
plane->modifiers = kmalloc_array(format_modifier_count,
- sizeof(format_modifiers[0]),
+ sizeof(*plane->modifiers),
GFP_KERNEL);
if (format_modifier_count && !plane->modifiers) {
@@ -1820,3 +1820,62 @@ int drm_plane_add_size_hints_property(struct drm_plane *plane,
return 0;
}
EXPORT_SYMBOL(drm_plane_add_size_hints_property);
+
+/**
+ * drm_plane_create_color_pipeline_property - create a new color pipeline
+ * property
+ *
+ * @plane: drm plane
+ * @pipelines: list of pipelines
+ * @num_pipelines: number of pipelines
+ *
+ * Create the COLOR_PIPELINE plane property to specific color pipelines on
+ * the plane.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+int drm_plane_create_color_pipeline_property(struct drm_plane *plane,
+ const struct drm_prop_enum_list *pipelines,
+ int num_pipelines)
+{
+ struct drm_prop_enum_list *all_pipelines;
+ struct drm_property *prop;
+ int len = 0;
+ int i;
+
+ all_pipelines = kcalloc(num_pipelines + 1,
+ sizeof(*all_pipelines),
+ GFP_KERNEL);
+
+ if (!all_pipelines) {
+ drm_err(plane->dev, "failed to allocate color pipeline\n");
+ return -ENOMEM;
+ }
+
+ /* Create default Bypass color pipeline */
+ all_pipelines[len].type = 0;
+ all_pipelines[len].name = "Bypass";
+ len++;
+
+ /* Add all other color pipelines */
+ for (i = 0; i < num_pipelines; i++, len++) {
+ all_pipelines[len].type = pipelines[i].type;
+ all_pipelines[len].name = pipelines[i].name;
+ }
+
+ prop = drm_property_create_enum(plane->dev, DRM_MODE_PROP_ATOMIC,
+ "COLOR_PIPELINE",
+ all_pipelines, len);
+ if (IS_ERR(prop)) {
+ kfree(all_pipelines);
+ return PTR_ERR(prop);
+ }
+
+ drm_object_attach_property(&plane->base, prop, 0);
+ plane->color_pipeline_property = prop;
+
+ kfree(all_pipelines);
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_color_pipeline_property);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index a23fc712a8b7..21809a82187b 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -37,6 +37,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "drm_internal.h"
@@ -93,7 +94,7 @@ struct drm_prime_member {
struct rb_node handle_rb;
};
-static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
+int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
struct dma_buf *dma_buf, uint32_t handle)
{
struct drm_prime_member *member;
@@ -190,8 +191,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
{
struct rb_node *rb;
- mutex_lock(&prime_fpriv->lock);
-
rb = prime_fpriv->handles.rb_node;
while (rb) {
struct drm_prime_member *member;
@@ -210,8 +209,6 @@ void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv,
rb = rb->rb_left;
}
}
-
- mutex_unlock(&prime_fpriv->lock);
}
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv)
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6b3541159c0f..09b12c30df69 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -119,6 +119,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode,
*status = drm_bridge_chain_mode_valid(bridge,
&connector->display_info,
mode);
+ drm_bridge_put(bridge);
if (*status != MODE_OK) {
/* There is also no point in continuing for crtc check
* here. */
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index a455c56dbbeb..b01ffa4d6509 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -18,6 +18,7 @@
#include <linux/gfp.h>
#include <linux/i2c.h>
#include <linux/kdev_t.h>
+#include <linux/pci.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -30,6 +31,8 @@
#include <drm/drm_property.h>
#include <drm/drm_sysfs.h>
+#include <asm/video.h>
+
#include "drm_internal.h"
#include "drm_crtc_internal.h"
@@ -508,6 +511,43 @@ void drm_sysfs_connector_property_event(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_sysfs_connector_property_event);
+static ssize_t boot_display_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "1\n");
+}
+static DEVICE_ATTR_RO(boot_display);
+
+static struct attribute *display_attrs[] = {
+ &dev_attr_boot_display.attr,
+ NULL
+};
+
+static umode_t boot_display_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj)->parent;
+
+ if (dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (video_is_primary_device(&pdev->dev))
+ return a->mode;
+ }
+
+ return 0;
+}
+
+static const struct attribute_group display_attr_group = {
+ .attrs = display_attrs,
+ .is_visible = boot_display_visible,
+};
+
+static const struct attribute_group *card_dev_groups[] = {
+ &display_attr_group,
+ NULL
+};
+
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
@@ -531,6 +571,7 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
kdev->devt = MKDEV(DRM_MAJOR, minor->index);
kdev->class = drm_class;
+ kdev->groups = card_dev_groups;
kdev->type = &drm_sysfs_device_minor;
}
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 46f59883183d..5c14140cd0c2 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -136,8 +136,17 @@
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
*
- * Drivers for hardware without support for vertical-blanking interrupts
- * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * Drivers for hardware without support for vertical-blanking interrupts can
+ * use DRM vblank timers to send vblank events at the rate of the current
+ * display mode's refresh. While not synchronized to the hardware's
+ * vertical-blanking regions, the timer helps DRM clients and compositors to
+ * adapt their update cycle to the display output. Drivers should set up
+ * vblanking as usual, but call drm_crtc_vblank_start_timer() and
+ * drm_crtc_vblank_cancel_timer() as part of their atomic mode setting.
+ * See also DRM vblank helpers for more information.
+ *
+ * Drivers without support for vertical-blanking interrupts nor timers must
+ * not call drm_vblank_init(). For these drivers, atomic helpers will
* automatically generate fake vblank events as part of the display update.
* This functionality also can be controlled by the driver by enabling and
* disabling struct drm_crtc_state.no_vblank.
@@ -508,6 +517,9 @@ static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
drm_WARN_ON(dev, READ_ONCE(vblank->enabled) &&
drm_core_check_feature(dev, DRIVER_MODESET));
+ if (vblank->vblank_timer.crtc)
+ hrtimer_cancel(&vblank->vblank_timer.timer);
+
drm_vblank_destroy_worker(vblank);
timer_delete_sync(&vblank->disable_timer);
}
@@ -794,10 +806,8 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal(
ts_vblank_time = ktime_to_timespec64(*vblank_time);
drm_dbg_vbl(dev,
- "crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
- pipe, hpos, vpos,
- (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
- (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
+ "crtc %u : v p(%d,%d)@ %ptSp -> %ptSp [e %d us, %d rep]\n",
+ pipe, hpos, vpos, &ts_etime, &ts_vblank_time,
duration_ns / 1000, i);
return true;
@@ -1303,7 +1313,7 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
ret = wait_event_timeout(vblank->queue,
last != drm_vblank_count(dev, pipe),
- msecs_to_jiffies(100));
+ msecs_to_jiffies(1000));
drm_WARN(dev, ret == 0, "vblank wait timed out on crtc %i\n", pipe);
@@ -2162,3 +2172,159 @@ err_free:
return ret;
}
+/*
+ * VBLANK timer
+ */
+
+static enum hrtimer_restart drm_vblank_timer_function(struct hrtimer *timer)
+{
+ struct drm_vblank_crtc_timer *vtimer =
+ container_of(timer, struct drm_vblank_crtc_timer, timer);
+ struct drm_crtc *crtc = vtimer->crtc;
+ const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+ ktime_t interval;
+ u64 ret_overrun;
+ bool succ;
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ interval = vtimer->interval;
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ if (!interval)
+ return HRTIMER_NORESTART;
+
+ ret_overrun = hrtimer_forward_now(&vtimer->timer, interval);
+ if (ret_overrun != 1)
+ drm_dbg_vbl(dev, "vblank timer overrun\n");
+
+ if (crtc_funcs->handle_vblank_timeout)
+ succ = crtc_funcs->handle_vblank_timeout(crtc);
+ else
+ succ = drm_crtc_handle_vblank(crtc);
+ if (!succ)
+ return HRTIMER_NORESTART;
+
+ return HRTIMER_RESTART;
+}
+
+/**
+ * drm_crtc_vblank_start_timer - Starts the vblank timer on the given CRTC
+ * @crtc: the CRTC
+ *
+ * Drivers should call this function from their CRTC's enable_vblank
+ * function to start a vblank timer. The timer will fire after the duration
+ * of a full frame. drm_crtc_vblank_cancel_timer() disables a running timer.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_crtc_vblank_start_timer(struct drm_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ unsigned long flags;
+
+ if (!vtimer->crtc) {
+ /*
+ * Set up the data structures on the first invocation.
+ */
+ vtimer->crtc = crtc;
+ spin_lock_init(&vtimer->interval_lock);
+ hrtimer_setup(&vtimer->timer, drm_vblank_timer_function,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ } else {
+ /*
+ * Timer should not be active. If it is, wait for the
+ * previous cancel operations to finish.
+ */
+ while (hrtimer_active(&vtimer->timer))
+ hrtimer_try_to_cancel(&vtimer->timer);
+ }
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ vtimer->interval = ns_to_ktime(vblank->framedur_ns);
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ hrtimer_start(&vtimer->timer, vtimer->interval, HRTIMER_MODE_REL);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_vblank_start_timer);
+
+/**
+ * drm_crtc_vblank_cancel_timer - Cancels the given CRTC's vblank timer
+ * @crtc: the CRTC
+ *
+ * Drivers should call this function from their CRTC's disable_vblank
+ * function to stop a vblank timer.
+ */
+void drm_crtc_vblank_cancel_timer(struct drm_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ unsigned long flags;
+
+ /*
+ * Calling hrtimer_cancel() can result in a deadlock with DRM's
+ * vblank_time_lime_lock and hrtimers' softirq_expiry_lock. So
+ * clear interval and indicate cancellation. The timer function
+ * will cancel itself on the next invocation.
+ */
+
+ spin_lock_irqsave(&vtimer->interval_lock, flags);
+ vtimer->interval = 0;
+ spin_unlock_irqrestore(&vtimer->interval_lock, flags);
+
+ hrtimer_try_to_cancel(&vtimer->timer);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_cancel_timer);
+
+/**
+ * drm_crtc_vblank_get_vblank_timeout - Returns the vblank timeout
+ * @crtc: The CRTC
+ * @vblank_time: Returns the next vblank timestamp
+ *
+ * The helper drm_crtc_vblank_get_vblank_timeout() returns the next vblank
+ * timestamp of the CRTC's vblank timer according to the timer's expiry
+ * time.
+ */
+void drm_crtc_vblank_get_vblank_timeout(struct drm_crtc *crtc, ktime_t *vblank_time)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct drm_vblank_crtc_timer *vtimer = &vblank->vblank_timer;
+ u64 cur_count;
+ ktime_t cur_time;
+
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return;
+ }
+
+ /*
+ * A concurrent vblank timeout could update the expires field before
+ * we compare it with the vblank time. Hence we'd compare the old
+ * expiry time to the new vblank time; deducing the timer had already
+ * expired. Reread until we get consistent values from both fields.
+ */
+ do {
+ cur_count = drm_crtc_vblank_count_and_time(crtc, &cur_time);
+ *vblank_time = READ_ONCE(vtimer->timer.node.expires);
+ } while (cur_count != drm_crtc_vblank_count_and_time(crtc, &cur_time));
+
+ if (drm_WARN_ON(crtc->dev, !ktime_compare(*vblank_time, cur_time)))
+ return; /* Already expired */
+
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt
+ * is only generated after all the vblank registers are updated)
+ * and what the vblank core expects. Therefore we need to always
+ * correct the timestamp by one frame.
+ */
+ *vblank_time = ktime_sub(*vblank_time, vtimer->interval);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_get_vblank_timeout);
diff --git a/drivers/gpu/drm/drm_vblank_helper.c b/drivers/gpu/drm/drm_vblank_helper.c
new file mode 100644
index 000000000000..a04a6ba1b0ca
--- /dev/null
+++ b/drivers/gpu/drm/drm_vblank_helper.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: MIT
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The vblank helper library provides functions for supporting vertical
+ * blanking in DRM drivers.
+ *
+ * For vblank timers, several callback implementations are available.
+ * Drivers enable support for vblank timers by setting the vblank callbacks
+ * in struct &drm_crtc_funcs to the helpers provided by this library. The
+ * initializer macro DRM_CRTC_VBLANK_TIMER_FUNCS does this conveniently.
+ * The driver further has to send the VBLANK event from its atomic_flush
+ * callback and control vblank from the CRTC's atomic_enable and atomic_disable
+ * callbacks. The callbacks are located in struct &drm_crtc_helper_funcs.
+ * The vblank helper library provides implementations of these callbacks
+ * for drivers without further requirements. The initializer macro
+ * DRM_CRTC_HELPER_VBLANK_FUNCS sets them coveniently.
+ *
+ * Once the driver enables vblank support with drm_vblank_init(), each
+ * CRTC's vblank timer fires according to the programmed display mode. By
+ * default, the vblank timer invokes drm_crtc_handle_vblank(). Drivers with
+ * more specific requirements can set their own handler function in
+ * struct &drm_crtc_helper_funcs.handle_vblank_timeout.
+ */
+
+/*
+ * VBLANK helpers
+ */
+
+/**
+ * drm_crtc_vblank_atomic_flush -
+ * Implements struct &drm_crtc_helper_funcs.atomic_flush
+ * @crtc: The CRTC
+ * @state: The atomic state to apply
+ *
+ * The helper drm_crtc_vblank_atomic_flush() implements atomic_flush of
+ * struct drm_crtc_helper_funcs for CRTCs that only need to send out a
+ * VBLANK event.
+ *
+ * See also struct &drm_crtc_helper_funcs.atomic_flush.
+ */
+void drm_crtc_vblank_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_pending_vblank_event *event;
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+
+ spin_unlock_irq(&dev->event_lock);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_flush);
+
+/**
+ * drm_crtc_vblank_atomic_enable - Implements struct &drm_crtc_helper_funcs.atomic_enable
+ * @crtc: The CRTC
+ * @state: The atomic state
+ *
+ * The helper drm_crtc_vblank_atomic_enable() implements atomic_enable
+ * of struct drm_crtc_helper_funcs for CRTCs the only need to enable VBLANKs.
+ *
+ * See also struct &drm_crtc_helper_funcs.atomic_enable.
+ */
+void drm_crtc_vblank_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_enable);
+
+/**
+ * drm_crtc_vblank_atomic_disable - Implements struct &drm_crtc_helper_funcs.atomic_disable
+ * @crtc: The CRTC
+ * @state: The atomic state
+ *
+ * The helper drm_crtc_vblank_atomic_disable() implements atomic_disable
+ * of struct drm_crtc_helper_funcs for CRTCs the only need to disable VBLANKs.
+ *
+ * See also struct &drm_crtc_funcs.atomic_disable.
+ */
+void drm_crtc_vblank_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_off(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_atomic_disable);
+
+/*
+ * VBLANK timer
+ */
+
+/**
+ * drm_crtc_vblank_helper_enable_vblank_timer - Implements struct &drm_crtc_funcs.enable_vblank
+ * @crtc: The CRTC
+ *
+ * The helper drm_crtc_vblank_helper_enable_vblank_timer() implements
+ * enable_vblank of struct drm_crtc_helper_funcs for CRTCs that require
+ * a VBLANK timer. It sets up the timer on the first invocation. The
+ * started timer expires after the current frame duration. See struct
+ * &drm_vblank_crtc.framedur_ns.
+ *
+ * See also struct &drm_crtc_helper_funcs.enable_vblank.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_crtc_vblank_helper_enable_vblank_timer(struct drm_crtc *crtc)
+{
+ return drm_crtc_vblank_start_timer(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_enable_vblank_timer);
+
+/**
+ * drm_crtc_vblank_helper_disable_vblank_timer - Implements struct &drm_crtc_funcs.disable_vblank
+ * @crtc: The CRTC
+ *
+ * The helper drm_crtc_vblank_helper_disable_vblank_timer() implements
+ * disable_vblank of struct drm_crtc_funcs for CRTCs that require a
+ * VBLANK timer.
+ *
+ * See also struct &drm_crtc_helper_funcs.disable_vblank.
+ */
+void drm_crtc_vblank_helper_disable_vblank_timer(struct drm_crtc *crtc)
+{
+ drm_crtc_vblank_cancel_timer(crtc);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_disable_vblank_timer);
+
+/**
+ * drm_crtc_vblank_helper_get_vblank_timestamp_from_timer -
+ * Implements struct &drm_crtc_funcs.get_vblank_timestamp
+ * @crtc: The CRTC
+ * @max_error: Maximum acceptable error
+ * @vblank_time: Returns the next vblank timestamp
+ * @in_vblank_irq: True is called from drm_crtc_handle_vblank()
+ *
+ * The helper drm_crtc_helper_get_vblank_timestamp_from_timer() implements
+ * get_vblank_timestamp of struct drm_crtc_funcs for CRTCs that require a
+ * VBLANK timer. It returns the timestamp according to the timer's expiry
+ * time.
+ *
+ * See also struct &drm_crtc_funcs.get_vblank_timestamp.
+ *
+ * Returns:
+ * True on success, or false otherwise.
+ */
+bool drm_crtc_vblank_helper_get_vblank_timestamp_from_timer(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ drm_crtc_vblank_get_vblank_timeout(crtc, vblank_time);
+
+ return true;
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_from_timer);
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index e4e1873f0e1e..70f0199251ea 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(drm_vblank_work_flush);
void drm_vblank_work_flush_all(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(crtc)];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
spin_lock_irq(&dev->event_lock);
wait_event_lock_irq(vblank->work_wait_queue,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index b13a17276d07..ad5e6f7b23f9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_gpu.h"
@@ -347,7 +348,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
bool switch_mmu_context = gpu->mmu_context != mmu_context;
- unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
+ unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
bool has_blt = !!(gpu->identity.minor_features5 &
chipMinorFeatures5_BLT_ENGINE);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 3e91747ed339..54ceae87b401 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_of.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 2f844e82bc46..5d8f3b03d4ae 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <linux/dma-mapping.h>
#include <linux/shmem_fs.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 76c742328edb..a9611c1a773f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <linux/dma-fence-array.h>
#include <linux/file.h>
#include <linux/dma-resv.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index cf0d9049bcf1..ca0be293f5fe 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -16,6 +16,8 @@
#include <linux/reset.h>
#include <linux/thermal.h>
+#include <drm/drm_print.h>
+
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
#include "etnaviv_gpu.h"
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 8665f2658d51..32d710baf17f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -198,6 +198,38 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
},
{
.model = 0x8000,
+ .revision = 0x6205,
+ .product_id = 0x80003,
+ .customer_id = 0x15,
+ .eco_id = 0,
+ .stream_count = 16,
+ .register_max = 64,
+ .thread_count = 512,
+ .shader_core_count = 2,
+ .nn_core_count = 2,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287c8d,
+ .minor_features0 = 0xc1799eff,
+ .minor_features1 = 0xfefbfad9,
+ .minor_features2 = 0xeb9d4fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xdb0dafc7,
+ .minor_features5 = 0x7b5ac333,
+ .minor_features6 = 0xfcce6000,
+ .minor_features7 = 0x03fbfa6f,
+ .minor_features8 = 0x00ef0ef0,
+ .minor_features9 = 0x0eca703c,
+ .minor_features10 = 0x898048f0,
+ .minor_features11 = 0x00000034,
+ },
+ {
+ .model = 0x8000,
.revision = 0x7120,
.product_id = 0x45080009,
.customer_id = 0x88,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index df5192083b20..a992be2ede88 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -6,6 +6,8 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <drm/drm_print.h>
+
#include "common.xml.h"
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index b9e206303b48..9ae0fa4667a9 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -20,6 +20,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "exynos_drm_crtc.h"
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 805aa28c1723..bb74b17f9753 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -20,6 +20,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
@@ -69,7 +70,6 @@ struct decon_context {
void __iomem *regs;
unsigned long irq_flags;
bool i80_if;
- bool suspended;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
@@ -132,9 +132,6 @@ static void decon_shadow_protect_win(struct decon_context *ctx,
static void decon_wait_for_vblank(struct decon_context *ctx)
{
- if (ctx->suspended)
- return;
-
atomic_set(&ctx->wait_vsync_event, 1);
/*
@@ -210,9 +207,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
u32 val, clkdiv;
- if (ctx->suspended)
- return;
-
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
@@ -274,9 +268,6 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return -EPERM;
-
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -299,9 +290,6 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
u32 val;
- if (ctx->suspended)
- return;
-
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -404,9 +392,6 @@ static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, true);
}
@@ -427,9 +412,6 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
unsigned int pitch = fb->pitches[0];
unsigned int vidw_addr0_base = ctx->data->vidw_buf_start_base;
- if (ctx->suspended)
- return;
-
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
@@ -517,9 +499,6 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
unsigned int win = plane->index;
u32 val;
- if (ctx->suspended)
- return;
-
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
@@ -538,9 +517,6 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
for (i = 0; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
exynos_crtc_handle_event(crtc);
@@ -568,9 +544,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int ret;
- if (!ctx->suspended)
- return;
-
ret = pm_runtime_resume_and_get(ctx->dev);
if (ret < 0) {
DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
@@ -584,8 +557,6 @@ static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
-
- ctx->suspended = false;
}
static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
@@ -593,9 +564,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int i;
- if (ctx->suspended)
- return;
-
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
@@ -605,8 +573,6 @@ static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
decon_disable_plane(crtc, &ctx->planes[i]);
pm_runtime_put_sync(ctx->dev);
-
- ctx->suspended = true;
}
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
@@ -727,7 +693,6 @@ static int decon_probe(struct platform_device *pdev)
return -ENOMEM;
ctx->dev = dev;
- ctx->suspended = true;
ctx->data = of_device_get_match_data(dev);
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 896a03639e2d..c4d098ab7863 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -154,6 +154,11 @@ static const struct samsung_dsim_plat_data exynos5433_dsi_pdata = {
.host_ops = &exynos_dsi_exynos_host_ops,
};
+static const struct samsung_dsim_plat_data exynos7870_dsi_pdata = {
+ .hw_type = DSIM_TYPE_EXYNOS7870,
+ .host_ops = &exynos_dsi_exynos_host_ops,
+};
+
static const struct of_device_id exynos_dsi_of_match[] = {
{
.compatible = "samsung,exynos3250-mipi-dsi",
@@ -175,6 +180,10 @@ static const struct of_device_id exynos_dsi_of_match[] = {
.compatible = "samsung,exynos5433-mipi-dsi",
.data = &exynos5433_dsi_pdata,
},
+ {
+ .compatible = "samsung,exynos7870-mipi-dsi",
+ .data = &exynos7870_dsi_pdata,
+ },
{ /* sentinel. */ }
};
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index ddd73e7f26a3..6ecd95bcb0c4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -14,6 +14,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 93de25b77e68..637927818dfe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -16,6 +16,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
@@ -42,8 +43,6 @@ static void exynos_drm_fb_destroy(struct fb_info *info)
drm_framebuffer_remove(fb);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops exynos_drm_fb_ops = {
@@ -59,18 +58,11 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes,
struct exynos_drm_gem *exynos_gem)
{
- struct fb_info *fbi;
+ struct fb_info *fbi = helper->info;
struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
unsigned long offset;
- fbi = drm_fb_helper_alloc_info(helper);
- if (IS_ERR(fbi)) {
- DRM_DEV_ERROR(to_dma_dev(helper->dev),
- "failed to allocate fb info.\n");
- return PTR_ERR(fbi);
- }
-
fbi->fbops = &exynos_drm_fb_ops;
drm_fb_helper_fill_info(fbi, helper, sizes);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 205c238cc73a..b6abdc4f2b0a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -23,6 +23,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index d32f2474cbaa..2bea107dd960 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -21,6 +21,7 @@
#include <linux/workqueue.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index e3fbb45f37a2..b9b2f000072d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -10,7 +10,9 @@
#include <linux/shmem_fs.h>
#include <linux/module.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
#include <drm/exynos_drm.h>
@@ -329,15 +331,16 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
unsigned int flags;
int ret;
+ ret = drm_mode_size_dumb(dev, args, 0, 0);
+ if (ret)
+ return ret;
+
/*
* allocate memory to be used for framebuffer.
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command.
*/
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
if (is_drm_iommu_supported(dev))
flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
else
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 03c8490af4f4..008def51225a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -22,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 7c3aa77186d3..67afddd566e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
@@ -58,7 +59,7 @@ static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
struct drm_plane_state *state = &exynos_state->base;
struct drm_crtc *crtc = state->crtc;
struct drm_crtc_state *crtc_state =
- drm_atomic_get_existing_crtc_state(state->state, crtc);
+ drm_atomic_get_new_crtc_state(state->state, crtc);
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
int crtc_x, crtc_y;
unsigned int crtc_w, crtc_h;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e094b8bbc0f1..64c69dd2966e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index a3670d2eaab2..69dea5049309 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -28,6 +28,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 794a87d16f88..a9a341ea6507 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -15,6 +15,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "fsl_dcu_drm_drv.h"
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 8711a7a5b8da..c8f1716a12d5 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -11,6 +11,8 @@
#include <acpi/video.h>
+#include <drm/drm_print.h>
+
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 718d45891fc7..fd6ea8998dbe 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -9,6 +9,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_print.h>
#include "cdv_device.h"
#include "gma_device.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index bbd0abdd8382..5942a9d46b02 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -11,6 +11,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include "cdv_device.h"
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index c85143792019..54bf626f0524 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -34,6 +34,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index f2a3e37ef632..8e93ee0d0ccd 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -31,6 +31,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 9276e3676ba0..fbe7fe317393 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -14,6 +14,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c
index 4a37136f90f4..c26926babc2a 100644
--- a/drivers/gpu/drm/gma500/fbdev.c
+++ b/drivers/gpu/drm/gma500/fbdev.c
@@ -50,48 +50,6 @@ static const struct vm_operations_struct psb_fbdev_vm_ops = {
* struct fb_ops
*/
-#define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
-
-static int psb_fbdev_fb_setcolreg(unsigned int regno,
- unsigned int red, unsigned int green,
- unsigned int blue, unsigned int transp,
- struct fb_info *info)
-{
- struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- uint32_t v;
-
- if (!fb)
- return -ENOMEM;
-
- if (regno > 255)
- return 1;
-
- red = CMAP_TOHW(red, info->var.red.length);
- blue = CMAP_TOHW(blue, info->var.blue.length);
- green = CMAP_TOHW(green, info->var.green.length);
- transp = CMAP_TOHW(transp, info->var.transp.length);
-
- v = (red << info->var.red.offset) |
- (green << info->var.green.offset) |
- (blue << info->var.blue.offset) |
- (transp << info->var.transp.offset);
-
- if (regno < 16) {
- switch (fb->format->cpp[0] * 8) {
- case 16:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- case 24:
- case 32:
- ((uint32_t *) info->pseudo_palette)[regno] = v;
- break;
- }
- }
-
- return 0;
-}
-
static int psb_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
if (vma->vm_pgoff != 0)
@@ -120,23 +78,18 @@ static void psb_fbdev_fb_destroy(struct fb_info *info)
drm_fb_helper_fini(fb_helper);
drm_framebuffer_unregister_private(fb);
- fb->obj[0] = NULL;
drm_framebuffer_cleanup(fb);
kfree(fb);
drm_gem_object_put(obj);
drm_client_release(&fb_helper->client);
-
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops psb_fbdev_fb_ops = {
.owner = THIS_MODULE,
__FB_DEFAULT_IOMEM_OPS_RDWR,
DRM_FB_HELPER_DEFAULT_OPS,
- .fb_setcolreg = psb_fbdev_fb_setcolreg,
__FB_DEFAULT_IOMEM_OPS_DRAW,
.fb_mmap = psb_fbdev_fb_mmap,
.fb_destroy = psb_fbdev_fb_destroy,
@@ -155,7 +108,7 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_device *dev = fb_helper->dev;
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct fb_info *info;
+ struct fb_info *info = fb_helper->info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = { };
int size;
@@ -214,12 +167,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->funcs = &psb_fbdev_fb_helper_funcs;
fb_helper->fb = fb;
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_framebuffer_unregister_private;
- }
-
info->fbops = &psb_fbdev_fb_ops;
/* Accessed stolen memory directly */
@@ -243,11 +190,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return 0;
-err_drm_framebuffer_unregister_private:
- drm_framebuffer_unregister_private(fb);
- fb->obj[0] = NULL;
- drm_framebuffer_cleanup(fb);
- kfree(fb);
err_drm_gem_object_put:
drm_gem_object_put(obj);
return ret;
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 4b7627a72637..2e44a2ac2742 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -16,6 +16,7 @@
#include <asm/set_memory.h>
#include <drm/drm.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
#include "gem.h"
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d5924ca3ed05..b60720560830 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -8,6 +8,7 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm.h>
+#include <drm/drm_print.h>
#include "intel_bios.h"
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index ee8b047587f2..2b06ba22f9c6 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -32,6 +32,8 @@
#include <linux/i2c.h>
#include <linux/module.h>
+#include <drm/drm_print.h>
+
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index cba97d7db131..0326f3ddc621 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -12,6 +12,7 @@
*/
#include <drm/drm.h>
+#include <drm/drm_print.h>
#include "mid_bios.h"
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index ea9b41af0867..086d14678a8e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -10,6 +10,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include "framebuffer.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 1cf394369127..20d027d552c7 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
@@ -726,8 +727,8 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
if (hdmi_dev) {
pdev = hdmi_dev->dev;
- pci_set_drvdata(pdev, NULL);
oaktrail_hdmi_i2c_exit(pdev);
+ pci_set_drvdata(pdev, NULL);
iounmap(hdmi_dev->regs);
kfree(hdmi_dev);
pci_dev_put(pdev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 6daa6669ed23..48e8ac560a2a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -30,6 +30,9 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+
+#include <drm/drm_print.h>
+
#include "psb_drv.h"
#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 72191d6f0d06..0705ba3813e6 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -13,6 +13,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 0c271072af63..5f0daa25b86d 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -22,6 +22,9 @@
*
*/
#include <linux/acpi.h>
+
+#include <drm/drm_print.h>
+
#include "psb_drv.h"
#include "psb_irq.h"
#include "psb_intel_reg.h"
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 85d3557c2eb9..005ab7f5355f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_pciids.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index ff46e88c4768..1ff2bd23db74 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -11,6 +11,7 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include "framebuffer.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 9ad611b5956e..f8f3c42e67a7 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index afda40fc4494..553e7c7d9bb8 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -36,6 +36,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 7bbb79b0497d..3a946b472064 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -9,6 +9,7 @@
**************************************************************************/
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "power.h"
@@ -249,6 +250,7 @@ static irqreturn_t gma_irq_handler(int irq, void *arg)
void gma_irq_preinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct drm_crtc *crtc;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -259,10 +261,15 @@ void gma_irq_preinstall(struct drm_device *dev)
PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
- if (dev->vblank[0].enabled)
- dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
- if (dev->vblank[1].enabled)
- dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+
+ if (vblank->enabled) {
+ u32 mask = drm_crtc_index(crtc) ? _PSB_VSYNC_PIPEB_FLAG :
+ _PSB_VSYNC_PIPEA_FLAG;
+ dev_priv->vdc_irq_mask |= mask;
+ }
+ }
/* Revisit this area - want per device masks ? */
if (dev_priv->ops->hotplug)
@@ -277,8 +284,8 @@ void gma_irq_preinstall(struct drm_device *dev)
void gma_irq_postinstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct drm_crtc *crtc;
unsigned long irqflags;
- unsigned int i;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -291,11 +298,13 @@ void gma_irq_postinstall(struct drm_device *dev)
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- for (i = 0; i < dev->num_crtcs; ++i) {
- if (dev->vblank[i].enabled)
- gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+
+ if (vblank->enabled)
+ gma_enable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);
else
- gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);
}
if (dev_priv->ops->hotplug_enable)
@@ -336,8 +345,8 @@ void gma_irq_uninstall(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct drm_crtc *crtc;
unsigned long irqflags;
- unsigned int i;
if (!dev_priv->irq_enabled)
return;
@@ -349,9 +358,11 @@ void gma_irq_uninstall(struct drm_device *dev)
PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- for (i = 0; i < dev->num_crtcs; ++i) {
- if (dev->vblank[i].enabled)
- gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+
+ if (vblank->enabled)
+ gma_disable_pipestat(dev_priv, drm_crtc_index(crtc), PIPE_VBLANK_INTERRUPT_ENABLE);
}
dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index 0f07d77c5d52..1726a3fadff8 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -16,7 +16,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -562,11 +561,11 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn
continue; /* not a DRM property */
property = gud_connector_property_lookup(connector, prop);
- if (WARN_ON(IS_ERR(property)))
+ if (drm_WARN_ON(drm, IS_ERR(property)))
continue;
state_val = gud_connector_tv_state_val(prop, &gconn->initial_tv_state);
- if (WARN_ON(IS_ERR(state_val)))
+ if (drm_WARN_ON(drm, IS_ERR(state_val)))
continue;
*state_val = val;
@@ -594,7 +593,7 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
unsigned int *state_val;
state_val = gud_connector_tv_state_val(prop, &connector_state->tv);
- if (WARN_ON_ONCE(IS_ERR(state_val)))
+ if (drm_WARN_ON_ONCE(connector_state->connector->dev, IS_ERR(state_val)))
return PTR_ERR(state_val);
val = *state_val;
@@ -607,13 +606,16 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state,
return gconn->num_properties;
}
+static const struct drm_encoder_funcs gud_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
+};
+
static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
struct gud_connector_descriptor_req *desc)
{
struct drm_device *drm = &gdrm->drm;
struct gud_connector *gconn;
struct drm_connector *connector;
- struct drm_encoder *encoder;
int ret, connector_type;
u32 flags;
@@ -665,7 +667,7 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
return ret;
}
- if (WARN_ON(connector->index != index))
+ if (drm_WARN_ON(drm, connector->index != index))
return -EINVAL;
if (flags & GUD_CONNECTOR_FLAGS_POLL_STATUS)
@@ -681,20 +683,13 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index,
return ret;
}
- /* The first connector is attached to the existing simple pipe encoder */
- if (!connector->index) {
- encoder = &gdrm->pipe.encoder;
- } else {
- encoder = &gconn->encoder;
-
- ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
- if (ret)
- return ret;
-
- encoder->possible_crtcs = 1;
- }
+ gconn->encoder.possible_crtcs = drm_crtc_mask(&gdrm->crtc);
+ ret = drm_encoder_init(drm, &gconn->encoder, &gud_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
- return drm_connector_attach_encoder(connector, encoder);
+ return drm_connector_attach_encoder(connector, &gconn->encoder);
}
int gud_get_connectors(struct gud_device *gdrm)
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index 5385a2126e45..42135a48d92e 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -16,6 +16,7 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
@@ -27,7 +28,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -249,7 +249,7 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val)
return gud_usb_set(gdrm, request, 0, &val, sizeof(val));
}
-static int gud_get_properties(struct gud_device *gdrm)
+static int gud_plane_add_properties(struct gud_device *gdrm)
{
struct gud_property_req *properties;
unsigned int i, num_properties;
@@ -289,7 +289,7 @@ static int gud_get_properties(struct gud_device *gdrm)
* but mask out any additions on future devices.
*/
val &= GUD_ROTATION_MASK;
- ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
+ ret = drm_plane_create_rotation_property(&gdrm->plane,
DRM_MODE_ROTATE_0, val);
break;
default:
@@ -338,10 +338,30 @@ static int gud_stats_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
- .check = gud_pipe_check,
- .update = gud_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS
+static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check
+};
+
+static const struct drm_crtc_funcs gud_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs gud_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = gud_plane_atomic_check,
+ .atomic_update = gud_plane_atomic_update,
+};
+
+static const struct drm_plane_funcs gud_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
@@ -350,7 +370,7 @@ static const struct drm_mode_config_funcs gud_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const u64 gud_pipe_modifiers[] = {
+static const u64 gud_plane_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
@@ -443,10 +463,6 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
return PTR_ERR(gdrm);
drm = &gdrm->drm;
- drm->mode_config.funcs = &gud_mode_config_funcs;
- ret = drmm_mode_config_init(drm);
- if (ret)
- return ret;
gdrm->flags = le32_to_cpu(desc.flags);
gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4;
@@ -463,11 +479,28 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (ret)
return ret;
+ usb_set_intfdata(intf, gdrm);
+
+ dma_dev = usb_intf_get_dma_device(intf);
+ if (dma_dev) {
+ drm_dev_set_dma_dev(drm, dma_dev);
+ put_device(dma_dev);
+ } else {
+ dev_warn(dev, "buffer sharing not supported"); /* not an error */
+ }
+
+ /* Mode config init */
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
drm->mode_config.min_width = le32_to_cpu(desc.min_width);
drm->mode_config.max_width = le32_to_cpu(desc.max_width);
drm->mode_config.min_height = le32_to_cpu(desc.min_height);
drm->mode_config.max_height = le32_to_cpu(desc.max_height);
+ drm->mode_config.funcs = &gud_mode_config_funcs;
+ /* Format init */
formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
/* Add room for emulated XRGB8888 */
formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL);
@@ -567,22 +600,30 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
return -ENOMEM;
}
- ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
- formats, num_formats,
- gud_pipe_modifiers, NULL);
+ /* Pipeline init */
+ ret = drm_universal_plane_init(drm, &gdrm->plane, 0,
+ &gud_plane_funcs,
+ formats, num_formats,
+ gud_plane_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
- devm_kfree(dev, formats);
- devm_kfree(dev, formats_dev);
+ drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(&gdrm->plane);
- ret = gud_get_properties(gdrm);
+ ret = gud_plane_add_properties(gdrm);
if (ret) {
- dev_err(dev, "Failed to get properties (error=%d)\n", ret);
+ dev_err(dev, "Failed to add properties (error=%d)\n", ret);
return ret;
}
- drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
+ ret = drm_crtc_init_with_planes(drm, &gdrm->crtc, &gdrm->plane, NULL,
+ &gud_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(&gdrm->crtc, &gud_crtc_helper_funcs);
ret = gud_get_connectors(gdrm);
if (ret) {
@@ -591,16 +632,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
}
drm_mode_config_reset(drm);
-
- usb_set_intfdata(intf, gdrm);
-
- dma_dev = usb_intf_get_dma_device(intf);
- if (dma_dev) {
- drm_dev_set_dma_dev(drm, dma_dev);
- put_device(dma_dev);
- } else {
- dev_warn(dev, "buffer sharing not supported"); /* not an error */
- }
+ drm_kms_helper_poll_init(drm);
drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
@@ -608,7 +640,8 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (ret)
return ret;
- drm_kms_helper_poll_init(drm);
+ devm_kfree(dev, formats);
+ devm_kfree(dev, formats_dev);
drm_client_setup(drm, NULL);
@@ -620,8 +653,6 @@ static void gud_disconnect(struct usb_interface *interface)
struct gud_device *gdrm = usb_get_intfdata(interface);
struct drm_device *drm = &gdrm->drm;
- drm_dbg(drm, "%s:\n", __func__);
-
drm_kms_helper_poll_fini(drm);
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index d6fb25388722..d27c31648341 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -11,11 +11,11 @@
#include <uapi/drm/drm_fourcc.h>
#include <drm/drm_modes.h>
-#include <drm/drm_simple_kms_helper.h>
struct gud_device {
struct drm_device drm;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
struct work_struct work;
u32 flags;
const struct drm_format_info *xrgb8888_emulation_format;
@@ -62,11 +62,10 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val);
void gud_clear_damage(struct gud_device *gdrm);
void gud_flush_work(struct work_struct *work);
-int gud_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_plane_state,
- struct drm_crtc_state *new_crtc_state);
-void gud_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state);
+int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state);
int gud_connector_fill_properties(struct drm_connector_state *connector_state,
struct gud_property_req *properties);
int gud_get_connectors(struct gud_device *gdrm);
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 8d548d08f127..76d77a736d84 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -20,7 +20,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include <drm/drm_simple_kms_helper.h>
#include <drm/gud.h>
#include "gud_internal.h"
@@ -62,7 +61,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
size_t len;
void *buf;
- WARN_ON_ONCE(format->char_per_block[0] != 1);
+ drm_WARN_ON_ONCE(fb->dev, format->char_per_block[0] != 1);
/* Start on a byte boundary */
rect->x1 = ALIGN_DOWN(rect->x1, block_width);
@@ -70,7 +69,7 @@ static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format
height = drm_rect_height(rect);
len = drm_format_info_min_pitch(format, 0, width) * height;
- buf = kmalloc(width * height, GFP_KERNEL);
+ buf = kmalloc_array(height, width, GFP_KERNEL);
if (!buf)
return 0;
@@ -139,7 +138,7 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
break;
default:
- WARN_ON_ONCE(1);
+ drm_WARN_ON_ONCE(fb->dev, 1);
return len;
}
@@ -451,14 +450,15 @@ static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer
gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
}
-int gud_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *new_plane_state,
- struct drm_crtc_state *new_crtc_state)
+int gud_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
- struct drm_plane_state *old_plane_state = pipe->plane.state;
- const struct drm_display_mode *mode = &new_crtc_state->mode;
- struct drm_atomic_state *state = new_plane_state->state;
+ struct gud_device *gdrm = to_gud_device(plane->dev);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
+ const struct drm_display_mode *mode;
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct drm_connector_state *connector_state = NULL;
struct drm_framebuffer *fb = new_plane_state->fb;
@@ -469,20 +469,37 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
int idx, ret;
size_t len;
- if (WARN_ON_ONCE(!fb))
+ if (drm_WARN_ON_ONCE(plane->dev, !fb))
return -EINVAL;
+ if (drm_WARN_ON_ONCE(plane->dev, !crtc))
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ mode = &crtc_state->mode;
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!new_plane_state->visible)
+ return 0;
+
if (old_plane_state->rotation != new_plane_state->rotation)
- new_crtc_state->mode_changed = true;
+ crtc_state->mode_changed = true;
if (old_fb && old_fb->format != format)
- new_crtc_state->mode_changed = true;
+ crtc_state->mode_changed = true;
- if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
+ if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
return 0;
/* Only one connector is supported */
- if (hweight32(new_crtc_state->connector_mask) != 1)
+ if (hweight32(crtc_state->connector_mask) != 1)
return -EINVAL;
if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
@@ -500,7 +517,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
if (!connector_state) {
struct drm_connector_list_iter conn_iter;
- drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
+ drm_connector_list_iter_begin(plane->dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->crtc) {
connector_state = connector->state;
@@ -510,7 +527,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
drm_connector_list_iter_end(&conn_iter);
}
- if (WARN_ON_ONCE(!connector_state))
+ if (drm_WARN_ON_ONCE(plane->dev, !connector_state))
return -ENOENT;
len = struct_size(req, properties,
@@ -522,7 +539,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
gud_from_display_mode(&req->mode, mode);
req->format = gud_from_fourcc(format->format);
- if (WARN_ON_ONCE(!req->format)) {
+ if (drm_WARN_ON_ONCE(plane->dev, !req->format)) {
ret = -EINVAL;
goto out;
}
@@ -544,7 +561,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe,
val = new_plane_state->rotation;
break;
default:
- WARN_ON_ONCE(1);
+ drm_WARN_ON_ONCE(plane->dev, 1);
ret = -EINVAL;
goto out;
}
@@ -567,16 +584,18 @@ out:
return ret;
}
-void gud_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+void gud_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *atomic_state)
{
- struct drm_device *drm = pipe->crtc.dev;
+ struct drm_device *drm = plane->dev;
struct gud_device *gdrm = to_gud_device(drm);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_framebuffer *fb = state->fb;
- struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(atomic_state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(atomic_state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_crtc *crtc = new_state->crtc;
struct drm_rect damage;
+ struct drm_atomic_helper_damage_iter iter;
int ret, idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
@@ -611,7 +630,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
if (ret)
goto ctrl_disable;
- if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage)
gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 45c4eb008ad5..76384b4581bf 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 1e1c87be1204..8a11c2df5b88 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 0d49f168a919..06b5d96e6eaf 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -14,6 +14,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "hyperv_drm.h"
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 945b9482bcb3..7978f8c8108c 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -19,6 +19,9 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "hyperv_drm.h"
@@ -111,11 +114,15 @@ static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
plane_state->fb->pitches[0]);
+
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = {
.atomic_check = drm_crtc_helper_atomic_check,
+ .atomic_flush = drm_crtc_vblank_atomic_flush,
.atomic_enable = hyperv_crtc_helper_atomic_enable,
+ .atomic_disable = drm_crtc_vblank_atomic_disable,
};
static const struct drm_crtc_funcs hyperv_crtc_funcs = {
@@ -125,6 +132,7 @@ static const struct drm_crtc_funcs hyperv_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static int hyperv_plane_atomic_check(struct drm_plane *plane,
@@ -321,6 +329,10 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv)
return ret;
}
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 1852e0804942..3562a02ef7ad 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -50,7 +50,7 @@ config DRM_I915_DEBUG
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
select DRM_EXPORT_FOR_TESTS if m
- select DRM_DEBUG_SELFTEST
+ select DRM_KUNIT_TEST if KUNIT
select DMABUF_SELFTESTS
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_WERROR
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 853543443072..4db24050edb0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,6 +13,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# drivers. Define I915 when building i915.
subdir-ccflags-y += -DI915
+# FIXME: Disable tracepoints on i915 for PREEMPT_RT, unfortunately
+# it's an all or nothing flag. You cannot selectively disable
+# only some tracepoints.
+subdir-ccflags-$(CONFIG_PREEMPT_RT) += -DNOTRACE
+
subdir-ccflags-y += -I$(src)
# Please keep these build lists sorted!
@@ -26,12 +31,14 @@ i915-y += \
i915_ioctl.o \
i915_irq.o \
i915_mitigations.o \
+ i915_mmio_range.o \
i915_module.o \
i915_params.o \
i915_pci.o \
i915_scatterlist.o \
i915_switcheroo.o \
i915_sysfs.o \
+ i915_timer_util.o \
i915_utils.o \
intel_clock_gating.o \
intel_cpu_info.o \
@@ -149,6 +156,7 @@ gem-y += \
gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
gem/i915_gem_object.o \
+ gem/i915_gem_object_frontbuffer.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
@@ -227,14 +235,18 @@ i915-y += \
display/intel_bios.o \
display/intel_bo.o \
display/intel_bw.o \
+ display/intel_casf.o \
display/intel_cdclk.o \
display/intel_cmtg.o \
display/intel_color.o \
+ display/intel_colorop.o \
+ display/intel_color_pipeline.o \
display/intel_combo_phy.o \
display/intel_connector.o \
display/intel_crtc.o \
display/intel_crtc_state_dump.o \
display/intel_cursor.o \
+ display/intel_dbuf_bw.o \
display/intel_display.o \
display/intel_display_conversion.o \
display/intel_display_driver.o \
@@ -247,6 +259,7 @@ i915-y += \
display/intel_display_rpm.o \
display/intel_display_rps.o \
display/intel_display_snapshot.o \
+ display/intel_display_utils.o \
display/intel_display_wa.o \
display/intel_dmc.o \
display/intel_dmc_wl.o \
@@ -280,6 +293,7 @@ i915-y += \
display/intel_modeset_setup.o \
display/intel_modeset_verify.o \
display/intel_overlay.o \
+ display/intel_panic.o \
display/intel_pch.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
@@ -295,9 +309,11 @@ i915-y += \
display/intel_vblank.o \
display/intel_vga.o \
display/intel_wm.o \
+ display/skl_prefill.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o \
+ display/vlv_clock.o \
display/vlv_sideband.o
i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
@@ -344,6 +360,7 @@ i915-y += \
display/intel_gmbus.o \
display/intel_hdmi.o \
display/intel_lspcon.o \
+ display/intel_lt_phy.o \
display/intel_lvds.o \
display/intel_panel.o \
display/intel_pfit.o \
@@ -411,7 +428,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
#
# Enable locally for CONFIG_DRM_I915_WERROR=y. See also scripts/Makefile.build
ifdef CONFIG_DRM_I915_WERROR
- cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(KERNELDOC) -none -Werror $<
+ cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none -Werror $<
endif
# header test
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 87f6b9602b16..a3ff21b2f69f 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -11,7 +11,6 @@
#include "g4x_dp.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -20,6 +19,7 @@
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_link_training.h"
@@ -424,17 +424,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
drm_dbg_kms(display->drm, "\n");
- if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(display) && port != PORT_A)) {
- intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
- intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
- } else {
- intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
- intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
- }
- intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(display, intel_dp->output_reg);
-
intel_dp->DP &= ~DP_PORT_EN;
intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(display, intel_dp->output_reg);
@@ -612,6 +601,19 @@ cpt_set_link_train(struct intel_dp *intel_dp,
}
static void
+cpt_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
+}
+
+static void
g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
@@ -639,6 +641,19 @@ g4x_set_link_train(struct intel_dp *intel_dp,
intel_de_posting_read(display, intel_dp->output_reg);
}
+static void
+g4x_set_idle_link_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
+
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
+}
+
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1285,12 +1300,10 @@ bool g4x_dp_init(struct intel_display *display,
drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n",
port_name(port));
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return false;
- dig_port->aux_ch = AUX_CH_NONE;
-
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
@@ -1300,8 +1313,6 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp.mutex);
-
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
"DP %c", port_name(port)))
@@ -1342,10 +1353,13 @@ bool g4x_dp_init(struct intel_display *display,
intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((display->platform.ivybridge && port == PORT_A) ||
- (HAS_PCH_CPT(display) && port != PORT_A))
+ (HAS_PCH_CPT(display) && port != PORT_A)) {
dig_port->dp.set_link_train = cpt_set_link_train;
- else
+ dig_port->dp.set_idle_link_train = cpt_set_idle_link_train;
+ } else {
dig_port->dp.set_link_train = g4x_set_link_train;
+ dig_port->dp.set_idle_link_train = g4x_set_idle_link_train;
+ }
if (display->platform.cherryview)
intel_encoder->set_signal_levels = chv_set_signal_levels;
@@ -1368,7 +1382,6 @@ bool g4x_dp_init(struct intel_display *display,
}
dig_port->dp.output_reg = output_reg;
- dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port);
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 2610f5702fb9..f6e2d1ed5639 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -19,7 +19,7 @@
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_dpio_phy.h"
-#include "intel_fdi.h"
+#include "intel_encoder.h"
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
@@ -135,11 +135,8 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (HAS_PCH_SPLIT(display)) {
+ if (HAS_PCH_SPLIT(display))
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
- return -EINVAL;
- }
if (display->platform.g4x)
crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc);
@@ -690,12 +687,10 @@ bool g4x_hdmi_init(struct intel_display *display,
drm_dbg_kms(display->drm, "No VBT child device for HDMI-%c\n",
port_name(port));
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return false;
- dig_port->aux_ch = AUX_CH_NONE;
-
intel_connector = intel_connector_alloc();
if (!intel_connector)
goto err_connector_alloc;
@@ -704,8 +699,6 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->devdata = devdata;
- mutex_init(&dig_port->hdcp.mutex);
-
if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
"HDMI %c", port_name(port)))
@@ -767,8 +760,6 @@ bool g4x_hdmi_init(struct intel_display *display,
intel_encoder->cloneable |= BIT(INTEL_OUTPUT_HDMI);
dig_port->hdmi.hdmi_reg = hdmi_reg;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
- dig_port->max_lanes = 4;
intel_infoframe_init(dig_port);
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 927fe56aec77..008d339d5c21 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -56,7 +56,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read.
*/
- if (intel_de_wait_for_set(display, IPS_CTL, IPS_ENABLE, 50))
+ if (intel_de_wait_for_set_ms(display, IPS_CTL, IPS_ENABLE, 50))
drm_err(display->drm,
"Timed out waiting for IPS enable\n");
}
@@ -78,7 +78,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_de_wait_for_clear(display, IPS_CTL, IPS_ENABLE, 100))
+ if (intel_de_wait_for_clear_ms(display, IPS_CTL, IPS_ENABLE, 100))
drm_err(display->drm,
"Timed out waiting for IPS disable\n");
} else {
@@ -191,45 +191,46 @@ bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- /* IPS only exists on ULT machines and is tied to pipe A. */
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!display->params.enable_ips)
- return false;
-
if (crtc_state->pipe_bpp > 24)
return false;
- /*
- * We compare against max which means we must take
- * the increased cdclk requirement into account when
- * calculating the new cdclk.
- *
- * Should measure whether using a lower cdclk w/o IPS
- */
- if (display->platform.broadwell &&
- crtc_state->pixel_rate > display->cdclk.max_cdclk_freq * 95 / 100)
- return false;
-
return true;
}
+static int _hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (display->platform.broadwell)
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+
+ /* no IPS specific limits to worry about */
+ return 0;
+}
+
int hsw_ips_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ int min_cdclk;
- if (!display->platform.broadwell)
+ if (!hsw_crtc_state_ips_capable(crtc_state))
return 0;
- if (!hsw_crtc_state_ips_capable(crtc_state))
+ min_cdclk = _hsw_ips_min_cdclk(crtc_state);
+
+ /*
+ * Do not ask for more than the max CDCLK frequency,
+ * if that is not enough IPS will simply not be used.
+ */
+ if (min_cdclk > display->cdclk.max_cdclk_freq)
return 0;
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+ return min_cdclk;
}
int hsw_ips_compute_config(struct intel_atomic_state *state,
@@ -244,6 +245,12 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!hsw_crtc_state_ips_capable(crtc_state))
return 0;
+ if (_hsw_ips_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq)
+ return 0;
+
+ if (!display->params.enable_ips)
+ return 0;
+
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
* enabled and disabled dynamically based on package C states,
@@ -257,18 +264,6 @@ int hsw_ips_compute_config(struct intel_atomic_state *state,
if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
return 0;
- if (display->platform.broadwell) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (crtc_state->pixel_rate > intel_cdclk_logical(cdclk_state) * 95 / 100)
- return 0;
- }
-
crtc_state->ips_enabled = true;
return 0;
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index f291ced989dc..51ccc6bd5f21 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -11,18 +11,18 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
-#include "intel_bo.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_sprite.h"
@@ -155,8 +155,7 @@ static bool i9xx_plane_has_windowing(struct intel_plane *plane)
i9xx_plane == PLANE_C;
}
-static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_plane_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -355,11 +354,24 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
+ plane_state->ctl = i9xx_plane_ctl(plane_state);
return 0;
}
+static u32 i8xx_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ int x = plane_state->view.color_plane[0].x;
+ int y = plane_state->view.color_plane[0].y;
+
+ return intel_fb_xy_to_linear(x, y, plane_state, 0);
+}
+
+u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ return plane_state->view.color_plane[0].offset;
+}
+
static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -463,7 +475,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
int x = plane_state->view.color_plane[0].x;
int y = plane_state->view.color_plane[0].y;
- u32 dspcntr, dspaddr_offset, linear_offset;
+ u32 dspcntr;
dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
@@ -472,13 +484,6 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
crtc_state->async_flip_planes & BIT(plane->id))
dspcntr |= DISP_ASYNC_FLIP;
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- if (DISPLAY_VER(display) >= 4)
- dspaddr_offset = plane_state->view.color_plane[0].offset;
- else
- dspaddr_offset = linear_offset;
-
if (display->platform.cherryview && i9xx_plane == PLANE_B) {
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
@@ -498,7 +503,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
} else if (DISPLAY_VER(display) >= 4) {
intel_de_write_fw(display, DSPLINOFF(display, i9xx_plane),
- linear_offset);
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, DSPTILEOFF(display, i9xx_plane),
DISP_OFFSET_Y(y) | DISP_OFFSET_X(x));
}
@@ -511,11 +516,9 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
if (DISPLAY_VER(display) >= 4)
- intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf);
else
- intel_de_write_fw(display, DSPADDR(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPADDR(display, i9xx_plane), plane_state->surf);
}
static void i830_plane_update_arm(struct intel_dsb *dsb,
@@ -604,16 +607,13 @@ g4x_primary_async_flip(struct intel_dsb *dsb,
{
struct intel_display *display = to_intel_display(plane);
u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
- u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
if (async_flip)
dspcntr |= DISP_ASYNC_FLIP;
intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr);
-
- intel_de_write_fw(display, DSPSURF(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf);
}
static void
@@ -624,11 +624,9 @@ vlv_primary_async_flip(struct intel_dsb *dsb,
bool async_flip)
{
struct intel_display *display = to_intel_display(plane);
- u32 dspaddr_offset = plane_state->view.color_plane[0].offset;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane),
- intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
+ intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane), plane_state->surf);
}
static void
@@ -756,10 +754,9 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
static unsigned int
hsw_primary_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
@@ -768,10 +765,9 @@ hsw_primary_max_stride(struct intel_plane *plane,
static unsigned int
ilk_primary_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
@@ -783,10 +779,9 @@ ilk_primary_max_stride(struct intel_plane *plane,
unsigned int
i965_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
@@ -798,8 +793,8 @@ i965_plane_max_stride(struct intel_plane *plane,
static unsigned int
i915_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
if (modifier == I915_FORMAT_MOD_X_TILED)
return 8 * 1024;
@@ -809,8 +804,8 @@ i915_plane_max_stride(struct intel_plane *plane,
static unsigned int
i8xx_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
if (plane->i9xx_plane == PLANE_C)
return 4 * 1024;
@@ -1037,6 +1032,11 @@ intel_primary_plane_create(struct intel_display *display, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
+ if (DISPLAY_VER(display) >= 4)
+ plane->surf_offset = i965_plane_surf_offset;
+ else
+ plane->surf_offset = i8xx_plane_surf_offset;
+
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
plane->capture_error = g4x_primary_capture_error;
else if (DISPLAY_VER(display) >= 4)
@@ -1175,7 +1175,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
drm_WARN_ON(display->drm, pipe != crtc->pipe);
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
@@ -1188,10 +1188,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
val = intel_de_read(display, DSPCNTR(display, i9xx_plane));
if (DISPLAY_VER(display) >= 4) {
- if (val & DISP_TILED) {
- plane_config->tiling = I915_TILING_X;
+ if (val & DISP_TILED)
fb->modifier = I915_FORMAT_MOD_X_TILED;
- }
if (val & DISP_ROTATE_180)
plane_config->rotation = DRM_MODE_ROTATE_180;
@@ -1203,14 +1201,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & DISP_FORMAT_MASK;
fourcc = i9xx_format_to_fourcc(pixel_format);
- fb->format = drm_format_info(fourcc);
+
+ fb->format = drm_get_format_info(display->drm, fourcc, fb->modifier);
if (display->platform.haswell || display->platform.broadwell) {
offset = intel_de_read(display,
DSPOFFSET(display, i9xx_plane));
base = intel_de_read(display, DSPSURF(display, i9xx_plane)) & DISP_ADDR_MASK;
} else if (DISPLAY_VER(display) >= 4) {
- if (plane_config->tiling)
+ if (fb->modifier == I915_FORMAT_MOD_X_TILED)
offset = intel_de_read(display,
DSPTILEOFF(display, i9xx_plane));
else
@@ -1254,24 +1253,21 @@ bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc,
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
- u32 base;
if (!plane_state->uapi.visible)
return false;
- base = intel_plane_ggtt_offset(plane_state);
-
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
- if (plane_config->base == base)
+ if (plane_config->base == plane_state->surf)
return false;
if (DISPLAY_VER(display) >= 4)
- intel_de_write(display, DSPSURF(display, i9xx_plane), base);
+ intel_de_write(display, DSPSURF(display, i9xx_plane), plane_state->surf);
else
- intel_de_write(display, DSPADDR(display, i9xx_plane), base);
+ intel_de_write(display, DSPADDR(display, i9xx_plane), plane_state->surf);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h
index d90546d60855..ec78bf4dd35e 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.h
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
enum pipe;
+struct drm_format_info;
struct drm_framebuffer;
struct intel_crtc;
struct intel_display;
@@ -18,12 +19,13 @@ struct intel_plane_state;
#ifdef I915
unsigned int i965_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation);
unsigned int vlv_plane_min_alignment(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int colot_plane);
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
+u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state);
struct intel_plane *
intel_primary_plane_create(struct intel_display *display, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 1f9db5118777..01f3803fa09f 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -3,6 +3,12 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/iopoll.h>
+
+#include <drm/drm_print.h>
+
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i9xx_wm.h"
@@ -85,7 +91,8 @@ static const struct cxsr_latency cxsr_latency_table[] = {
static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ const struct dram_info *dram_info = intel_dram_info(display->drm);
+ bool is_ddr3 = dram_info->type == INTEL_DRAM_DDR3;
int i;
for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
@@ -93,15 +100,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis
bool is_desktop = !display->platform.mobile;
if (is_desktop == latency->is_desktop &&
- i915->is_ddr3 == latency->is_ddr3 &&
- DIV_ROUND_CLOSEST(i915->fsb_freq, 1000) == latency->fsb_freq &&
- DIV_ROUND_CLOSEST(i915->mem_freq, 1000) == latency->mem_freq)
+ is_ddr3 == latency->is_ddr3 &&
+ DIV_ROUND_CLOSEST(dram_info->fsb_freq, 1000) == latency->fsb_freq &&
+ DIV_ROUND_CLOSEST(dram_info->mem_freq, 1000) == latency->mem_freq)
return latency;
}
drm_dbg_kms(display->drm,
- "Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n",
- i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq);
+ "Could not find CxSR latency for %s, FSB %u kHz, MEM %u kHz\n",
+ intel_dram_type_str(dram_info->type),
+ dram_info->fsb_freq, dram_info->mem_freq);
return NULL;
}
@@ -109,6 +117,7 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis
static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
{
u32 val;
+ int ret;
vlv_punit_get(display->drm);
@@ -121,8 +130,10 @@ static void chv_set_memory_dvfs(struct intel_display *display, bool enable)
val |= FORCE_DDR_FREQ_REQ_ACK;
vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3))
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2),
+ (val & FORCE_DDR_FREQ_REQ_ACK) == 0,
+ 500, 3000, false);
+ if (ret)
drm_err(display->drm,
"timed out waiting for Punit DDR DVFS request\n");
@@ -2286,12 +2297,11 @@ static void i9xx_update_wm(struct intel_display *display)
crtc = single_enabled_crtc(display);
if (display->platform.i915gm && crtc) {
- struct drm_gem_object *obj;
-
- obj = intel_fb_bo(crtc->base.primary->state->fb);
+ const struct drm_framebuffer *fb =
+ crtc->base.primary->state->fb;
/* self-refresh seems busted with untiled */
- if (!intel_bo_is_tiled(obj))
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
crtc = NULL;
}
@@ -3902,6 +3912,7 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
struct vlv_wm_values *wm = &display->wm.vlv;
struct intel_crtc *crtc;
u32 val;
+ int ret;
vlv_read_wm_values(display, wm);
@@ -3928,8 +3939,10 @@ static void vlv_wm_get_hw_state(struct intel_display *display)
val |= FORCE_DDR_FREQ_REQ_ACK;
vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) &
- FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2),
+ (val & FORCE_DDR_FREQ_REQ_ACK) == 0,
+ 500, 3000, false);
+ if (ret) {
drm_dbg_kms(display->drm,
"Punit not acking DDR DVFS request, "
"assuming DDR DVFS is disabled\n");
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 8d9cb73a93a7..9230792960f2 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -25,6 +25,8 @@
* Jani Nikula <jani.nikula@intel.com>
*/
+#include <linux/iopoll.h>
+
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fixed.h>
@@ -33,7 +35,6 @@
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
@@ -46,6 +47,7 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_regs.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_panel.h"
@@ -72,8 +74,12 @@ static int payload_credits_available(struct intel_display *display,
static bool wait_for_header_credits(struct intel_display *display,
enum transcoder dsi_trans, int hdr_credit)
{
- if (wait_for_us(header_credits_available(display, dsi_trans) >=
- hdr_credit, 100)) {
+ int ret, available;
+
+ ret = poll_timeout_us(available = header_credits_available(display, dsi_trans),
+ available >= hdr_credit,
+ 10, 100, false);
+ if (ret) {
drm_err(display->drm, "DSI header credits not released\n");
return false;
}
@@ -84,8 +90,12 @@ static bool wait_for_header_credits(struct intel_display *display,
static bool wait_for_payload_credits(struct intel_display *display,
enum transcoder dsi_trans, int payld_credit)
{
- if (wait_for_us(payload_credits_available(display, dsi_trans) >=
- payld_credit, 100)) {
+ int ret, available;
+
+ ret = poll_timeout_us(available = payload_credits_available(display, dsi_trans),
+ available >= payld_credit,
+ 10, 100, false);
+ if (ret) {
drm_err(display->drm, "DSI payload credits not released\n");
return false;
}
@@ -137,8 +147,11 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
- LPTX_IN_PROGRESS), 20))
+
+ ret = intel_de_wait_for_clear_us(display,
+ DSI_LP_MSG(dsi_trans),
+ LPTX_IN_PROGRESS, 20);
+ if (ret)
drm_err(display->drm, "LPTX bit not cleared\n");
}
}
@@ -516,13 +529,14 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
+ int ret;
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE);
- if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE),
- 500))
+ ret = intel_de_wait_for_clear_us(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, 500);
+ if (ret)
drm_err(display->drm, "DDI port:%c buffer idle\n",
port_name(port));
}
@@ -838,9 +852,14 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
+ int ret;
+
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) &
- LINK_READY), 2500))
+
+ ret = intel_de_wait_for_set_us(display,
+ DSI_TRANS_FUNC_CONF(dsi_trans),
+ LINK_READY, 2500);
+ if (ret)
drm_err(display->drm, "DSI link not ready\n");
}
}
@@ -1028,8 +1047,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
TRANSCONF_ENABLE);
/* wait for transcoder to be enabled */
- if (intel_de_wait_for_set(display, TRANSCONF(display, dsi_trans),
- TRANSCONF_STATE_ENABLE, 10))
+ if (intel_de_wait_for_set_ms(display, TRANSCONF(display, dsi_trans),
+ TRANSCONF_STATE_ENABLE, 10))
drm_err(display->drm,
"DSI transcoder not enabled\n");
}
@@ -1297,8 +1316,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
TRANSCONF_ENABLE, 0);
/* wait for transcoder to be disabled */
- if (intel_de_wait_for_clear(display, TRANSCONF(display, dsi_trans),
- TRANSCONF_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, dsi_trans),
+ TRANSCONF_STATE_ENABLE, 50))
drm_err(display->drm,
"DSI trancoder not disabled\n");
}
@@ -1321,6 +1340,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
enum port port;
enum transcoder dsi_trans;
u32 tmp;
+ int ret;
/* disable periodic update mode */
if (is_cmd_mode(intel_dsi)) {
@@ -1337,9 +1357,9 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
tmp &= ~LINK_ULPS_TYPE_LP11;
intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp);
- if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) &
- LINK_IN_ULPS),
- 10))
+ ret = intel_de_wait_for_set_us(display, DSI_LP_MSG(dsi_trans),
+ LINK_IN_ULPS, 10);
+ if (ret)
drm_err(display->drm, "DSI link not in ULPS\n");
}
@@ -1367,14 +1387,16 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
+ int ret;
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0);
- if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE),
- 8))
+ ret = intel_de_wait_for_set_us(display, DDI_BUF_CTL(port),
+ DDI_BUF_IS_IDLE, 8);
+
+ if (ret)
drm_err(display->drm,
"DDI port:%c buffer not idle\n",
port_name(port));
@@ -1630,7 +1652,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
if (ret)
return ret;
- crtc_state->dsc.compression_enable = true;
+ intel_dsc_enable_on_crtc(crtc_state);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 1addd6288241..68c01932f7b4 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -11,10 +11,10 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index dfdde8e4eabe..6372f533f65b 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -16,6 +16,14 @@
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#define SILENCE_PERIOD_MIN_TIME 80
+#define SILENCE_PERIOD_MAX_TIME 180
+#define SILENCE_PERIOD_TIME (SILENCE_PERIOD_MIN_TIME + \
+ (SILENCE_PERIOD_MAX_TIME - \
+ SILENCE_PERIOD_MIN_TIME) / 2)
+
+#define LFPS_CYCLE_COUNT 10
+
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp)
{
return intel_dp->alpm_dpcd & DP_ALPM_CAP;
@@ -41,75 +49,41 @@ void intel_alpm_init(struct intel_dp *intel_dp)
return;
intel_dp->alpm_dpcd = dpcd;
- mutex_init(&intel_dp->alpm_parameters.lock);
+ mutex_init(&intel_dp->alpm.lock);
}
-/*
- * See Bspec: 71632 for the table
- *
- * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2)
- *
- * Half cycle duration:
- *
- * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns
- * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) )
- *
- * Link rates 5.4 - 8.1
- * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10
- * LFPS Period chosen is the mid-point of the min:max values from the table
- * FLOOR( LFPS Period in Symbol clocks /
- * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) )
- */
-static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
- int *silence_period,
- int *lfps_half_cycle)
+static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state)
{
- switch (link_rate) {
- case 162000:
- *silence_period = 20;
- *lfps_half_cycle = 5;
- break;
- case 216000:
- *silence_period = 27;
- *lfps_half_cycle = 7;
- break;
- case 243000:
- *silence_period = 31;
- *lfps_half_cycle = 8;
- break;
- case 270000:
- *silence_period = 34;
- *lfps_half_cycle = 9;
- break;
- case 324000:
- *silence_period = 41;
- *lfps_half_cycle = 11;
- break;
- case 432000:
- *silence_period = 56;
- *lfps_half_cycle = 15;
- break;
- case 540000:
- *silence_period = 69;
- *lfps_half_cycle = 12;
- break;
- case 648000:
- *silence_period = 84;
- *lfps_half_cycle = 15;
- break;
- case 675000:
- *silence_period = 87;
- *lfps_half_cycle = 15;
- break;
- case 810000:
- *silence_period = 104;
- *lfps_half_cycle = 19;
- break;
- default:
- *silence_period = *lfps_half_cycle = -1;
- return false;
+ return SILENCE_PERIOD_TIME * intel_dp_link_symbol_clock(crtc_state->port_clock) /
+ 1000 / 1000;
+}
+
+static void get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state,
+ int *min, int *max)
+{
+ if (crtc_state->port_clock < 540000) {
+ *min = 65 * LFPS_CYCLE_COUNT;
+ *max = 75 * LFPS_CYCLE_COUNT;
+ } else {
+ *min = 140;
+ *max = 800;
}
- return true;
+}
+
+static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state)
+{
+ int tlfps_cycle_min, tlfps_cycle_max;
+
+ get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min,
+ &tlfps_cycle_max);
+
+ return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2;
+}
+
+static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state)
+{
+ return get_lfps_cycle_time(crtc_state) * crtc_state->port_clock / 1000 /
+ 1000 / (2 * LFPS_CYCLE_COUNT);
}
/*
@@ -131,40 +105,36 @@ static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate,
* tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) )
* TPS4 Length = 252 Symbols
*/
-static int _lnl_compute_aux_less_wake_time(int port_clock)
+static int _lnl_compute_aux_less_wake_time(const struct intel_crtc_state *crtc_state)
{
int tphy2_p2_to_p0 = 12 * 1000;
- int tlfps_period_max = 800;
- int tsilence_max = 180;
int t1 = 50 * 1000;
int tps4 = 252;
/* port_clock is link rate in 10kbit/s units */
- int tml_phy_lock = 1000 * 1000 * tps4 / port_clock;
+ int tml_phy_lock = 1000 * 1000 * tps4 / crtc_state->port_clock;
int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1;
int t2 = num_ml_phy_lock * tml_phy_lock;
int tcds = 1 * t2;
- return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max +
- t1 + tcds, 1000);
+ return DIV_ROUND_UP(tphy2_p2_to_p0 + get_lfps_cycle_time(crtc_state) +
+ SILENCE_PERIOD_TIME + t1 + tcds, 1000);
}
static int
_lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int aux_less_wake_time, aux_less_wake_lines, silence_period,
lfps_half_cycle;
aux_less_wake_time =
- _lnl_compute_aux_less_wake_time(crtc_state->port_clock);
+ _lnl_compute_aux_less_wake_time(crtc_state);
aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode,
aux_less_wake_time);
+ silence_period = get_silence_period_symbols(crtc_state);
- if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock,
- &silence_period,
- &lfps_half_cycle))
- return false;
+ lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state);
if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK ||
silence_period > PORT_ALPM_CTL_SILENCE_PERIOD_MASK ||
@@ -174,15 +144,15 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp,
if (display->params.psr_safest_params)
aux_less_wake_lines = ALPM_CTL_AUX_LESS_WAKE_TIME_MASK;
- intel_dp->alpm_parameters.aux_less_wake_lines = aux_less_wake_lines;
- intel_dp->alpm_parameters.silence_period_sym_clocks = silence_period;
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms = lfps_half_cycle;
+ crtc_state->alpm_state.aux_less_wake_lines = aux_less_wake_lines;
+ crtc_state->alpm_state.silence_period_sym_clocks = silence_period;
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms = lfps_half_cycle;
return true;
}
static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int check_entry_lines;
@@ -203,7 +173,7 @@ static bool _lnl_compute_alpm_params(struct intel_dp *intel_dp,
if (display->params.psr_safest_params)
check_entry_lines = 15;
- intel_dp->alpm_parameters.check_entry_lines = check_entry_lines;
+ crtc_state->alpm_state.check_entry_lines = check_entry_lines;
return true;
}
@@ -234,7 +204,7 @@ static int io_buffer_wake_time(const struct intel_crtc_state *crtc_state)
}
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(intel_dp);
int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time;
@@ -272,8 +242,8 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
- intel_dp->alpm_parameters.io_wake_lines = max(io_wake_lines, 7);
- intel_dp->alpm_parameters.fast_wake_lines = max(fast_wake_lines, 7);
+ crtc_state->alpm_state.io_wake_lines = max(io_wake_lines, 7);
+ crtc_state->alpm_state.fast_wake_lines = max(fast_wake_lines, 7);
return true;
}
@@ -287,12 +257,12 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
int waketime_in_lines, first_sdp_position;
int context_latency, guardband;
- if (intel_dp->alpm_parameters.lobf_disable_debug) {
+ if (intel_dp->alpm.lobf_disable_debug) {
drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
return;
}
- if (intel_dp->alpm_parameters.sink_alpm_error)
+ if (intel_dp->alpm.sink_alpm_error)
return;
if (!intel_dp_is_edp(intel_dp))
@@ -323,9 +293,9 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_vdisplay - context_latency;
first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
if (intel_alpm_aux_less_wake_supported(intel_dp))
- waketime_in_lines = intel_dp->alpm_parameters.io_wake_lines;
+ waketime_in_lines = crtc_state->alpm_state.io_wake_lines;
else
- waketime_in_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
+ waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines;
crtc_state->has_lobf = (context_latency + guardband) >
(first_sdp_position + waketime_in_lines);
@@ -342,7 +312,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
!crtc_state->has_lobf))
return;
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
/*
* Panel Replay on eDP is always using ALPM aux less. I.e. no need to
* check panel support at this point.
@@ -351,7 +321,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
alpm_ctl = ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE |
ALPM_CTL_AUX_LESS_SLEEP_HOLD_TIME_50_SYMBOLS |
- ALPM_CTL_AUX_LESS_WAKE_TIME(intel_dp->alpm_parameters.aux_less_wake_lines);
+ ALPM_CTL_AUX_LESS_WAKE_TIME(crtc_state->alpm_state.aux_less_wake_lines);
if (intel_dp->as_sdp_supported) {
u32 pr_alpm_ctl = PR_ALPM_CTL_ADAPTIVE_SYNC_SDP_POSITION_T1;
@@ -369,7 +339,7 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
} else {
alpm_ctl = ALPM_CTL_EXTENDED_FAST_WAKE_ENABLE |
- ALPM_CTL_EXTENDED_FAST_WAKE_TIME(intel_dp->alpm_parameters.fast_wake_lines);
+ ALPM_CTL_EXTENDED_FAST_WAKE_TIME(crtc_state->alpm_state.fast_wake_lines);
}
if (crtc_state->has_lobf) {
@@ -377,17 +347,17 @@ static void lnl_alpm_configure(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "Link off between frames (LOBF) enabled\n");
}
- alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(intel_dp->alpm_parameters.check_entry_lines);
+ alpm_ctl |= ALPM_CTL_ALPM_ENTRY_CHECK(crtc_state->alpm_state.check_entry_lines);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), alpm_ctl);
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
lnl_alpm_configure(intel_dp, crtc_state);
- intel_dp->alpm_parameters.transcoder = crtc_state->cpu_transcoder;
+ intel_dp->alpm.transcoder = crtc_state->cpu_transcoder;
}
void intel_alpm_port_configure(struct intel_dp *intel_dp,
@@ -405,14 +375,14 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp,
PORT_ALPM_CTL_MAX_PHY_SWING_SETUP(15) |
PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) |
PORT_ALPM_CTL_SILENCE_PERIOD(
- intel_dp->alpm_parameters.silence_period_sym_clocks);
- lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) |
+ crtc_state->alpm_state.silence_period_sym_clocks);
+ lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(LFPS_CYCLE_COUNT) |
PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms) |
PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) |
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms) |
PORT_ALPM_LFPS_CTL_LAST_LFPS_HALF_CYCLE_DURATION(
- intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms);
+ crtc_state->alpm_state.lfps_half_cycle_num_of_syms);
}
intel_de_write(display, PORT_ALPM_CTL(port), alpm_ctl_val);
@@ -450,10 +420,10 @@ void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
continue;
if (old_crtc_state->has_lobf) {
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
}
}
@@ -547,7 +517,7 @@ i915_edp_lobf_debug_get(void *data, u64 *val)
struct intel_connector *connector = data;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- *val = intel_dp->alpm_parameters.lobf_disable_debug;
+ *val = intel_dp->alpm.lobf_disable_debug;
return 0;
}
@@ -558,7 +528,7 @@ i915_edp_lobf_debug_set(void *data, u64 val)
struct intel_connector *connector = data;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- intel_dp->alpm_parameters.lobf_disable_debug = val;
+ intel_dp->alpm.lobf_disable_debug = val;
return 0;
}
@@ -586,12 +556,12 @@ void intel_alpm_lobf_debugfs_add(struct intel_connector *connector)
void intel_alpm_disable(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->alpm_parameters.transcoder;
+ enum transcoder cpu_transcoder = intel_dp->alpm.transcoder;
if (DISPLAY_VER(display) < 20 || !intel_dp->alpm_dpcd)
return;
- mutex_lock(&intel_dp->alpm_parameters.lock);
+ mutex_lock(&intel_dp->alpm.lock);
intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
@@ -602,7 +572,7 @@ void intel_alpm_disable(struct intel_dp *intel_dp)
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
drm_dbg_kms(display->drm, "Disabling ALPM\n");
- mutex_unlock(&intel_dp->alpm_parameters.lock);
+ mutex_unlock(&intel_dp->alpm.lock);
}
bool intel_alpm_get_error(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index a861c20b5d79..53599b464dea 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -17,7 +17,7 @@ struct intel_crtc;
void intel_alpm_init(struct intel_dp *intel_dp);
bool intel_alpm_compute_params(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
+ struct intel_crtc_state *crtc_state);
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index e007380e9a63..a68fdbd2acb9 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -13,7 +13,6 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
@@ -21,6 +20,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp_aux_backlight.h"
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
@@ -236,7 +236,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0);
+ if (drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0))
+ return;
if (panel->backlight.combination_mode) {
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 9c268bed091d..4b41068e9e35 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -32,14 +32,15 @@
#include <drm/display/drm_dsc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "soc/intel_rom.h"
-#include "i915_drv.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
@@ -1566,10 +1567,7 @@ parse_psr(struct intel_display *display,
panel->vbt.psr.full_link = psr_table->full_link;
panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup;
-
- /* Allowed VBT values goes from 0 to 15 */
- panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
- psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
+ panel->vbt.psr.idle_frames = psr_table->idle_frames;
/*
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
@@ -2480,6 +2478,25 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
+static u32 edp_rate_override_mask(int rate)
+{
+ switch (rate) {
+ case 2000000: return BDB_263_VBT_EDP_LINK_RATE_20;
+ case 1350000: return BDB_263_VBT_EDP_LINK_RATE_13_5;
+ case 1000000: return BDB_263_VBT_EDP_LINK_RATE_10;
+ case 810000: return BDB_263_VBT_EDP_LINK_RATE_8_1;
+ case 675000: return BDB_263_VBT_EDP_LINK_RATE_6_75;
+ case 540000: return BDB_263_VBT_EDP_LINK_RATE_5_4;
+ case 432000: return BDB_263_VBT_EDP_LINK_RATE_4_32;
+ case 324000: return BDB_263_VBT_EDP_LINK_RATE_3_24;
+ case 270000: return BDB_263_VBT_EDP_LINK_RATE_2_7;
+ case 243000: return BDB_263_VBT_EDP_LINK_RATE_2_43;
+ case 216000: return BDB_263_VBT_EDP_LINK_RATE_2_16;
+ case 162000: return BDB_263_VBT_EDP_LINK_RATE_1_62;
+ default: return 0;
+ }
+}
+
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->display->vbt.version < 216)
@@ -2499,6 +2516,19 @@ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata)
return devdata->child.dp_max_lane_count + 1;
}
+bool
+intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata,
+ int rate)
+{
+ if (!devdata || devdata->display->vbt.version < 263)
+ return false;
+
+ if (devdata->child.edp_data_rate_override == BDB_263_VBT_EDP_RATES_MASK)
+ return false;
+
+ return devdata->child.edp_data_rate_override & edp_rate_override_mask(rate);
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -2747,8 +2777,10 @@ static int child_device_expected_size(u16 version)
{
BUILD_BUG_ON(sizeof(struct child_device_config) < 40);
- if (version > 256)
+ if (version > 263)
return -ENOENT;
+ else if (version >= 263)
+ return 44;
else if (version >= 256)
return 40;
else if (version >= 216)
@@ -3112,7 +3144,6 @@ err_free_rom:
static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display,
size_t *sizep)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
const struct vbt_header *vbt = NULL;
vbt = firmware_get_vbt(display, sizep);
@@ -3126,11 +3157,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display
*/
if (!vbt && display->platform.dgfx)
with_intel_display_rpm(display)
- vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash");
+ vbt = oprom_get_vbt(display, intel_rom_spi(display->drm), sizep, "SPI flash");
if (!vbt)
with_intel_display_rpm(display)
- vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM");
+ vbt = oprom_get_vbt(display, intel_rom_pci(display->drm), sizep, "PCI ROM");
return vbt;
}
@@ -3743,8 +3774,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt);
void intel_bios_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_vbt", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_vbt", 0444, display->drm->debugfs_root,
display, &intel_bios_vbt_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index 6cd7a011b8c4..f9e438b2787b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -50,180 +50,6 @@ enum intel_backlight_type {
INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE,
};
-/*
- * MIPI Sequence Block definitions
- *
- * Note the VBT spec has AssertReset / DeassertReset swapped from their
- * usual naming, we use the proper names here to avoid confusion when
- * reading the code.
- */
-enum mipi_seq {
- MIPI_SEQ_END = 0,
- MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
- MIPI_SEQ_INIT_OTP,
- MIPI_SEQ_DISPLAY_ON,
- MIPI_SEQ_DISPLAY_OFF,
- MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
- MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
- MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
- MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
- MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
- MIPI_SEQ_POWER_ON, /* sequence block v3+ */
- MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
- MIPI_SEQ_MAX
-};
-
-enum mipi_seq_element {
- MIPI_SEQ_ELEM_END = 0,
- MIPI_SEQ_ELEM_SEND_PKT,
- MIPI_SEQ_ELEM_DELAY,
- MIPI_SEQ_ELEM_GPIO,
- MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
- MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
- MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
- MIPI_SEQ_ELEM_MAX
-};
-
-#define MIPI_DSI_UNDEFINED_PANEL_ID 0
-#define MIPI_DSI_GENERIC_PANEL_ID 1
-
-struct mipi_config {
- u16 panel_id;
-
- /* General Params */
- u32 enable_dithering:1;
- u32 rsvd1:1;
- u32 is_bridge:1;
-
- u32 panel_arch_type:2;
- u32 is_cmd_mode:1;
-
-#define NON_BURST_SYNC_PULSE 0x1
-#define NON_BURST_SYNC_EVENTS 0x2
-#define BURST_MODE 0x3
- u32 video_transfer_mode:2;
-
- u32 cabc_supported:1;
-#define PPS_BLC_PMIC 0
-#define PPS_BLC_SOC 1
- u32 pwm_blc:1;
-
- /* Bit 13:10 */
-#define PIXEL_FORMAT_RGB565 0x1
-#define PIXEL_FORMAT_RGB666 0x2
-#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
-#define PIXEL_FORMAT_RGB888 0x4
- u32 videomode_color_format:4;
-
- /* Bit 15:14 */
-#define ENABLE_ROTATION_0 0x0
-#define ENABLE_ROTATION_90 0x1
-#define ENABLE_ROTATION_180 0x2
-#define ENABLE_ROTATION_270 0x3
- u32 rotation:2;
- u32 bta_enabled:1;
- u32 rsvd2:15;
-
- /* 2 byte Port Description */
-#define DUAL_LINK_NOT_SUPPORTED 0
-#define DUAL_LINK_FRONT_BACK 1
-#define DUAL_LINK_PIXEL_ALT 2
- u16 dual_link:2;
- u16 lane_cnt:2;
- u16 pixel_overlap:3;
- u16 rgb_flip:1;
-#define DL_DCS_PORT_A 0x00
-#define DL_DCS_PORT_C 0x01
-#define DL_DCS_PORT_A_AND_C 0x02
- u16 dl_dcs_cabc_ports:2;
- u16 dl_dcs_backlight_ports:2;
- u16 rsvd3:4;
-
- u16 rsvd4;
-
- u8 rsvd5;
- u32 target_burst_mode_freq;
- u32 dsi_ddr_clk;
- u32 bridge_ref_clk;
-
-#define BYTE_CLK_SEL_20MHZ 0
-#define BYTE_CLK_SEL_10MHZ 1
-#define BYTE_CLK_SEL_5MHZ 2
- u8 byte_clk_sel:2;
-
- u8 rsvd6:6;
-
- /* DPHY Flags */
- u16 dphy_param_valid:1;
- u16 eot_pkt_disabled:1;
- u16 enable_clk_stop:1;
- u16 rsvd7:13;
-
- u32 hs_tx_timeout;
- u32 lp_rx_timeout;
- u32 turn_around_timeout;
- u32 device_reset_timer;
- u32 master_init_timer;
- u32 dbi_bw_timer;
- u32 lp_byte_clk_val;
-
- /* 4 byte Dphy Params */
- u32 prepare_cnt:6;
- u32 rsvd8:2;
- u32 clk_zero_cnt:8;
- u32 trail_cnt:5;
- u32 rsvd9:3;
- u32 exit_zero_cnt:6;
- u32 rsvd10:2;
-
- u32 clk_lane_switch_cnt;
- u32 hl_switch_cnt;
-
- u32 rsvd11[6];
-
- /* timings based on dphy spec */
- u8 tclk_miss;
- u8 tclk_post;
- u8 rsvd12;
- u8 tclk_pre;
- u8 tclk_prepare;
- u8 tclk_settle;
- u8 tclk_term_enable;
- u8 tclk_trail;
- u16 tclk_prepare_clkzero;
- u8 rsvd13;
- u8 td_term_enable;
- u8 teot;
- u8 ths_exit;
- u8 ths_prepare;
- u16 ths_prepare_hszero;
- u8 rsvd14;
- u8 ths_settle;
- u8 ths_skip;
- u8 ths_trail;
- u8 tinit;
- u8 tlpx;
- u8 rsvd15[3];
-
- /* GPIOs */
- u8 panel_enable;
- u8 bl_enable;
- u8 pwm_enable;
- u8 reset_r_n;
- u8 pwr_down_r;
- u8 stdby_r_n;
-
-} __packed;
-
-/* all delays have a unit of 100us */
-struct mipi_pps_data {
- u16 panel_on_delay;
- u16 bl_enable_delay;
- u16 bl_disable_delay;
- u16 panel_off_delay;
- u16 panel_power_cycle_delay;
-} __packed;
-
void intel_bios_init(struct intel_display *display);
void intel_bios_init_panel_early(struct intel_display *display,
struct intel_panel *panel,
@@ -259,6 +85,8 @@ bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata)
bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata);
bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata);
enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata);
+bool intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata,
+ int rate);
enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index 65d64f79a4bd..f3687eb63467 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -2,7 +2,7 @@
/* Copyright © 2024 Intel Corporation */
#include <drm/drm_panic.h>
-#include "display/intel_display_types.h"
+
#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -29,11 +29,6 @@ bool intel_bo_is_protected(struct drm_gem_object *obj)
return i915_gem_object_is_protected(to_intel_bo(obj));
}
-void intel_bo_flush_if_display(struct drm_gem_object *obj)
-{
- i915_gem_object_flush_if_display(to_intel_bo(obj));
-}
-
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return i915_gem_fb_mmap(to_intel_bo(obj), vma);
@@ -44,33 +39,43 @@ int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, i
return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size);
}
-struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
+struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *_obj)
{
- return i915_gem_object_get_frontbuffer(to_intel_bo(obj));
-}
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct i915_frontbuffer *front;
-struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
- struct intel_frontbuffer *front)
-{
- return i915_gem_object_set_frontbuffer(to_intel_bo(obj), front);
+ front = i915_gem_object_frontbuffer_get(obj);
+ if (!front)
+ return NULL;
+
+ return &front->base;
}
-void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
{
- i915_debugfs_describe_obj(m, to_intel_bo(obj));
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_frontbuffer_ref(front);
}
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
+void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
{
- return i915_gem_object_alloc_framebuffer();
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ return i915_gem_object_frontbuffer_put(front);
}
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
+void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *_front)
{
- return i915_gem_object_panic_setup(sb);
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_flush_if_display(front->obj);
}
-void intel_bo_panic_finish(struct intel_framebuffer *fb)
+void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
- return i915_gem_object_panic_finish(fb);
+ i915_debugfs_describe_obj(m, to_intel_bo(obj));
}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index 97087a64d23b..fc05f680dc76 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -16,17 +16,14 @@ bool intel_bo_is_tiled(struct drm_gem_object *obj);
bool intel_bo_is_userptr(struct drm_gem_object *obj);
bool intel_bo_is_shmem(struct drm_gem_object *obj);
bool intel_bo_is_protected(struct drm_gem_object *obj);
-void intel_bo_flush_if_display(struct drm_gem_object *obj);
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size);
-struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj);
-struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
- struct intel_frontbuffer *front);
+struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj);
+void intel_bo_frontbuffer_ref(struct intel_frontbuffer *front);
+void intel_bo_frontbuffer_put(struct intel_frontbuffer *front);
+void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front);
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void);
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb);
-void intel_bo_panic_finish(struct intel_framebuffer *fb);
#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index d29a755612de..1f6461be50ef 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -4,31 +4,25 @@
*/
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_print.h>
#include "soc/intel_dram.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
-#include "intel_atomic.h"
#include "intel_bw.h"
-#include "intel_cdclk.h"
+#include "intel_crtc.h"
#include "intel_display_core.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
#include "intel_uncore.h"
#include "skl_watermark.h"
-struct intel_dbuf_bw {
- unsigned int max_bw[I915_MAX_DBUF_SLICES];
- u8 active_planes[I915_MAX_DBUF_SLICES];
-};
-
struct intel_bw_state {
struct intel_global_state base;
- struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
/*
* Contains a bit mask, used to determine, whether correspondent
@@ -359,7 +353,7 @@ static int icl_get_qgv_points(struct intel_display *display,
for (i = 0; i < qi->num_psf_points; i++)
drm_dbg_kms(display->drm,
- "PSF GV %d: CLK=%d \n",
+ "PSF GV %d: CLK=%d\n",
i, qi->psf_points[i].clk);
}
@@ -811,72 +805,40 @@ void intel_bw_init_hw(struct intel_display *display)
if (!HAS_DISPLAY(display))
return;
- if (DISPLAY_VERx100(display) >= 3002)
- tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
- else if (DISPLAY_VER(display) >= 30)
- tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
- else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
- dram_info->type == INTEL_DRAM_GDDR_ECC)
- xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
- else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
- xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
- else if (DISPLAY_VER(display) >= 14)
+ /*
+ * Starting with Xe3p_LPD, the hardware tells us whether memory has ECC
+ * enabled that would impact display bandwidth. However, so far there
+ * are no instructions in Bspec on how to handle that case. Let's
+ * complain if we ever find such a scenario.
+ */
+ if (DISPLAY_VER(display) >= 35)
+ drm_WARN_ON(display->drm, dram_info->ecc_impacting_de_bw);
+
+ if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VERx100(display) == 3002)
+ tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
+ else
+ tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
+ } else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) {
+ if (dram_info->type == INTEL_DRAM_GDDR_ECC)
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
+ else
+ xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
+ } else if (DISPLAY_VER(display) >= 14) {
tgl_get_bw_info(display, dram_info, &mtl_sa_info);
- else if (display->platform.dg2)
+ } else if (display->platform.dg2) {
dg2_get_bw_info(display);
- else if (display->platform.alderlake_p)
+ } else if (display->platform.alderlake_p) {
tgl_get_bw_info(display, dram_info, &adlp_sa_info);
- else if (display->platform.alderlake_s)
+ } else if (display->platform.alderlake_s) {
tgl_get_bw_info(display, dram_info, &adls_sa_info);
- else if (display->platform.rocketlake)
+ } else if (display->platform.rocketlake) {
tgl_get_bw_info(display, dram_info, &rkl_sa_info);
- else if (DISPLAY_VER(display) == 12)
+ } else if (DISPLAY_VER(display) == 12) {
tgl_get_bw_info(display, dram_info, &tgl_sa_info);
- else if (DISPLAY_VER(display) == 11)
+ } else if (DISPLAY_VER(display) == 11) {
icl_get_bw_info(display, dram_info, &icl_sa_info);
-}
-
-static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
-{
- /*
- * We assume cursors are small enough
- * to not not cause bandwidth problems.
- */
- return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
-}
-
-static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- unsigned int data_rate = 0;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- /*
- * We assume cursors are small enough
- * to not not cause bandwidth problems.
- */
- if (plane_id == PLANE_CURSOR)
- continue;
-
- data_rate += crtc_state->data_rate[plane_id];
-
- if (DISPLAY_VER(display) < 11)
- data_rate += crtc_state->data_rate_y[plane_id];
}
-
- return data_rate;
-}
-
-/* "Maximum Pipe Read Bandwidth" */
-static int intel_bw_crtc_min_cdclk(struct intel_display *display,
- unsigned int data_rate)
-{
- if (DISPLAY_VER(display) < 12)
- return 0;
-
- return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
}
static unsigned int intel_bw_num_active_planes(struct intel_display *display,
@@ -894,14 +856,13 @@ static unsigned int intel_bw_num_active_planes(struct intel_display *display,
static unsigned int intel_bw_data_rate(struct intel_display *display,
const struct intel_bw_state *bw_state)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int data_rate = 0;
enum pipe pipe;
for_each_pipe(display, pipe)
data_rate += bw_state->data_rate[pipe];
- if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
+ if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display))
data_rate = DIV_ROUND_UP(data_rate * 105, 100);
return data_rate;
@@ -1262,223 +1223,6 @@ static int intel_bw_check_qgv_points(struct intel_display *display,
old_bw_state, new_bw_state);
}
-static bool intel_dbuf_bw_changed(struct intel_display *display,
- const struct intel_dbuf_bw *old_dbuf_bw,
- const struct intel_dbuf_bw *new_dbuf_bw)
-{
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(display, slice) {
- if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
- old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
- return true;
- }
-
- return false;
-}
-
-static bool intel_bw_state_changed(struct intel_display *display,
- const struct intel_bw_state *old_bw_state,
- const struct intel_bw_state *new_bw_state)
-{
- enum pipe pipe;
-
- for_each_pipe(display, pipe) {
- const struct intel_dbuf_bw *old_dbuf_bw =
- &old_bw_state->dbuf_bw[pipe];
- const struct intel_dbuf_bw *new_dbuf_bw =
- &new_bw_state->dbuf_bw[pipe];
-
- if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
- return true;
-
- if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
- intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
- return true;
- }
-
- return false;
-}
-
-static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
- struct intel_crtc *crtc,
- enum plane_id plane_id,
- const struct skl_ddb_entry *ddb,
- unsigned int data_rate)
-{
- struct intel_display *display = to_intel_display(crtc);
- unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
- enum dbuf_slice slice;
-
- /*
- * The arbiter can only really guarantee an
- * equal share of the total bw to each plane.
- */
- for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
- dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
- dbuf_bw->active_planes[slice] |= BIT(plane_id);
- }
-}
-
-static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum plane_id plane_id;
-
- memset(dbuf_bw, 0, sizeof(*dbuf_bw));
-
- if (!crtc_state->hw.active)
- return;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- /*
- * We assume cursors are small enough
- * to not cause bandwidth problems.
- */
- if (plane_id == PLANE_CURSOR)
- continue;
-
- skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
- &crtc_state->wm.skl.plane_ddb[plane_id],
- crtc_state->data_rate[plane_id]);
-
- if (DISPLAY_VER(display) < 11)
- skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
- &crtc_state->wm.skl.plane_ddb_y[plane_id],
- crtc_state->data_rate[plane_id]);
- }
-}
-
-/* "Maximum Data Buffer Bandwidth" */
-static int
-intel_bw_dbuf_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state)
-{
- unsigned int total_max_bw = 0;
- enum dbuf_slice slice;
-
- for_each_dbuf_slice(display, slice) {
- int num_active_planes = 0;
- unsigned int max_bw = 0;
- enum pipe pipe;
-
- /*
- * The arbiter can only really guarantee an
- * equal share of the total bw to each plane.
- */
- for_each_pipe(display, pipe) {
- const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
-
- max_bw = max(dbuf_bw->max_bw[slice], max_bw);
- num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
- }
- max_bw *= num_active_planes;
-
- total_max_bw = max(total_max_bw, max_bw);
- }
-
- return DIV_ROUND_UP(total_max_bw, 64);
-}
-
-int intel_bw_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state)
-{
- enum pipe pipe;
- int min_cdclk;
-
- min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
-
- for_each_pipe(display, pipe)
- min_cdclk = max(min_cdclk,
- intel_bw_crtc_min_cdclk(display,
- bw_state->data_rate[pipe]));
-
- return min_cdclk;
-}
-
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
-{
- struct intel_display *display = to_intel_display(state);
- struct intel_bw_state *new_bw_state = NULL;
- const struct intel_bw_state *old_bw_state = NULL;
- const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *old_crtc_state;
- const struct intel_crtc_state *new_crtc_state;
- int old_min_cdclk, new_min_cdclk;
- struct intel_crtc *crtc;
- int i;
-
- if (DISPLAY_VER(display) < 9)
- return 0;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
-
- skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
- skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
-
- if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
- continue;
-
- new_bw_state = intel_atomic_get_bw_state(state);
- if (IS_ERR(new_bw_state))
- return PTR_ERR(new_bw_state);
-
- old_bw_state = intel_atomic_get_old_bw_state(state);
-
- new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
- }
-
- if (!old_bw_state)
- return 0;
-
- if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
- int ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
-
- old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
- new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
-
- /*
- * No need to check against the cdclk state if
- * the min cdclk doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to bandwidth
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_min_cdclk <= old_min_cdclk)
- return 0;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /*
- * No need to recalculate the cdclk state if
- * the min cdclk doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to bandwidth
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state))
- return 0;
-
- drm_dbg_kms(display->drm,
- "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
- new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state));
- *need_cdclk_calc = true;
-
- return 0;
-}
-
static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
{
struct intel_display *display = to_intel_display(state);
@@ -1489,13 +1233,13 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
unsigned int old_data_rate =
- intel_bw_crtc_data_rate(old_crtc_state);
+ intel_crtc_bw_data_rate(old_crtc_state);
unsigned int new_data_rate =
- intel_bw_crtc_data_rate(new_crtc_state);
+ intel_crtc_bw_data_rate(new_crtc_state);
unsigned int old_active_planes =
- intel_bw_crtc_num_active_planes(old_crtc_state);
+ intel_crtc_bw_num_active_planes(old_crtc_state);
unsigned int new_active_planes =
- intel_bw_crtc_num_active_planes(new_crtc_state);
+ intel_crtc_bw_num_active_planes(new_crtc_state);
struct intel_bw_state *new_bw_state;
/*
@@ -1527,11 +1271,11 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
static int intel_bw_modeset_checks(struct intel_atomic_state *state)
{
- struct intel_display *display = to_intel_display(state);
const struct intel_bw_state *old_bw_state;
struct intel_bw_state *new_bw_state;
+ int ret;
- if (DISPLAY_VER(display) < 9)
+ if (!intel_any_crtc_active_changed(state))
return 0;
new_bw_state = intel_atomic_get_bw_state(state);
@@ -1543,13 +1287,9 @@ static int intel_bw_modeset_checks(struct intel_atomic_state *state)
new_bw_state->active_pipes =
intel_calc_active_pipes(state, old_bw_state->active_pipes);
- if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
- int ret;
-
- ret = intel_atomic_lock_global_state(&new_bw_state->base);
- if (ret)
- return ret;
- }
+ ret = intel_atomic_lock_global_state(&new_bw_state->base);
+ if (ret)
+ return ret;
return 0;
}
@@ -1599,7 +1339,7 @@ static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
return 0;
}
-int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
+int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
bool changed = false;
@@ -1610,11 +1350,9 @@ int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
if (DISPLAY_VER(display) < 9)
return 0;
- if (any_ms) {
- ret = intel_bw_modeset_checks(state);
- if (ret)
- return ret;
- }
+ ret = intel_bw_modeset_checks(state);
+ if (ret)
+ return ret;
ret = intel_bw_check_sagv_mask(state);
if (ret)
@@ -1657,9 +1395,9 @@ static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
bw_state->data_rate[crtc->pipe] =
- intel_bw_crtc_data_rate(crtc_state);
+ intel_crtc_bw_data_rate(crtc_state);
bw_state->num_active_planes[crtc->pipe] =
- intel_bw_crtc_num_active_planes(crtc_state);
+ intel_crtc_bw_num_active_planes(crtc_state);
drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
@@ -1690,8 +1428,6 @@ void intel_bw_update_hw_state(struct intel_display *display)
if (DISPLAY_VER(display) >= 11)
intel_bw_crtc_update(bw_state, crtc_state);
- skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
-
/* initially SAGV has been forced off */
bw_state->pipe_sagv_reject |= BIT(pipe);
}
@@ -1709,7 +1445,6 @@ void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
- memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
}
static struct intel_global_state *
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index d51f50c9d302..99b447388245 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -28,11 +28,7 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state);
void intel_bw_init_hw(struct intel_display *display);
int intel_bw_init(struct intel_display *display);
-int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms);
-int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc);
-int intel_bw_min_cdclk(struct intel_display *display,
- const struct intel_bw_state *bw_state);
+int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_update_hw_state(struct intel_display *display);
void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_casf.c b/drivers/gpu/drm/i915/display/intel_casf.c
new file mode 100644
index 000000000000..95339b496f24
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf.c
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "i915_reg.h"
+#include "intel_casf.h"
+#include "intel_casf_regs.h"
+#include "intel_de.h"
+#include "intel_display_regs.h"
+#include "intel_display_types.h"
+#include "skl_scaler.h"
+
+#define MAX_PIXELS_FOR_3_TAP_FILTER (1920 * 1080)
+#define MAX_PIXELS_FOR_5_TAP_FILTER (3840 * 2160)
+
+#define FILTER_COEFF_0_125 125
+#define FILTER_COEFF_0_25 250
+#define FILTER_COEFF_0_5 500
+#define FILTER_COEFF_1_0 1000
+#define FILTER_COEFF_0_0 0
+#define SET_POSITIVE_SIGN(x) ((x) & (~SIGN))
+
+/**
+ * DOC: Content Adaptive Sharpness Filter (CASF)
+ *
+ * Starting from LNL the display engine supports an
+ * adaptive sharpening filter, enhancing the image
+ * quality. The display hardware utilizes the second
+ * pipe scaler for implementing CASF.
+ * If sharpness is being enabled then pipe scaling
+ * cannot be used.
+ * This filter operates on a region of pixels based
+ * on the tap size. Coefficients are used to generate
+ * an alpha value which blends the sharpened image to
+ * original image.
+ */
+
+/* Default LUT values to be loaded one time. */
+static const u16 sharpness_lut[] = {
+ 4095, 2047, 1364, 1022, 816, 678, 579,
+ 504, 444, 397, 357, 323, 293, 268, 244, 224,
+ 204, 187, 170, 154, 139, 125, 111, 98, 85,
+ 73, 60, 48, 36, 24, 12, 0
+};
+
+const u16 filtercoeff_1[] = {
+ FILTER_COEFF_0_0, FILTER_COEFF_0_0, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_0,
+ FILTER_COEFF_0_0,
+};
+
+const u16 filtercoeff_2[] = {
+ FILTER_COEFF_0_0, FILTER_COEFF_0_25, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25,
+ FILTER_COEFF_0_0,
+};
+
+const u16 filtercoeff_3[] = {
+ FILTER_COEFF_0_125, FILTER_COEFF_0_25, FILTER_COEFF_0_5,
+ FILTER_COEFF_1_0, FILTER_COEFF_0_5, FILTER_COEFF_0_25,
+ FILTER_COEFF_0_125,
+};
+
+static void intel_casf_filter_lut_load(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int i;
+
+ intel_de_write(display, SHRPLUT_INDEX(crtc->pipe),
+ INDEX_AUTO_INCR | INDEX_VALUE(0));
+
+ for (i = 0; i < ARRAY_SIZE(sharpness_lut); i++)
+ intel_de_write(display, SHRPLUT_DATA(crtc->pipe),
+ sharpness_lut[i]);
+}
+
+void intel_casf_update_strength(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int win_size;
+
+ intel_de_rmw(display, SHARPNESS_CTL(crtc->pipe), FILTER_STRENGTH_MASK,
+ FILTER_STRENGTH(crtc_state->hw.casf_params.strength));
+
+ win_size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, 1));
+
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(crtc->pipe, 1), win_size);
+}
+
+static void intel_casf_compute_win_size(struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *mode = &crtc_state->hw.adjusted_mode;
+ u32 total_pixels = mode->hdisplay * mode->vdisplay;
+
+ if (total_pixels <= MAX_PIXELS_FOR_3_TAP_FILTER)
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_3X3;
+ else if (total_pixels <= MAX_PIXELS_FOR_5_TAP_FILTER)
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_5X5;
+ else
+ crtc_state->hw.casf_params.win_size = SHARPNESS_FILTER_SIZE_7X7;
+}
+
+int intel_casf_compute_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!HAS_CASF(display))
+ return 0;
+
+ if (crtc_state->uapi.sharpness_strength == 0) {
+ crtc_state->hw.casf_params.casf_enable = false;
+ crtc_state->hw.casf_params.strength = 0;
+ return 0;
+ }
+
+ crtc_state->hw.casf_params.casf_enable = true;
+
+ /*
+ * HW takes a value in form (1.0 + strength) in 4.4 fixed format.
+ * Strength is from 0.0-14.9375 ie from 0-239.
+ * User can give value from 0-255 but is clamped to 239.
+ * Ex. User gives 85 which is 5.3125 and adding 1.0 gives 6.3125.
+ * 6.3125 in 4.4 format is b01100101 which is equal to 101.
+ * Also 85 + 16 = 101.
+ */
+ crtc_state->hw.casf_params.strength =
+ min(crtc_state->uapi.sharpness_strength, 0xEF) + 0x10;
+
+ intel_casf_compute_win_size(crtc_state);
+
+ intel_casf_scaler_compute_config(crtc_state);
+
+ return 0;
+}
+
+void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 sharp;
+
+ sharp = intel_de_read(display, SHARPNESS_CTL(crtc->pipe));
+ if (sharp & FILTER_EN) {
+ if (drm_WARN_ON(display->drm,
+ REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp) < 16))
+ crtc_state->hw.casf_params.strength = 0;
+ else
+ crtc_state->hw.casf_params.strength =
+ REG_FIELD_GET(FILTER_STRENGTH_MASK, sharp);
+ crtc_state->hw.casf_params.casf_enable = true;
+ crtc_state->hw.casf_params.win_size =
+ REG_FIELD_GET(FILTER_SIZE_MASK, sharp);
+ }
+}
+
+bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->hw.casf_params.casf_enable)
+ return true;
+
+ return false;
+}
+
+static int casf_coeff_tap(int i)
+{
+ return i % SCALER_FILTER_NUM_TAPS;
+}
+
+static u32 casf_coeff(struct intel_crtc_state *crtc_state, int t)
+{
+ struct scaler_filter_coeff value;
+ u32 coeff;
+
+ value = crtc_state->hw.casf_params.coeff[t];
+ value.sign = 0;
+
+ coeff = value.sign << 15 | value.exp << 12 | value.mantissa << 3;
+ return coeff;
+}
+
+/*
+ * 17 phase of 7 taps requires 119 coefficients in 60 dwords per set.
+ * To enable casf: program scaler coefficients with the coeffients
+ * that are calculated and stored in hw.casf_params.coeff as per
+ * SCALER_COEFFICIENT_FORMAT
+ */
+static void intel_casf_write_coeff(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int id = crtc_state->scaler_state.scaler_id;
+ int i;
+
+ if (id != 1) {
+ drm_WARN(display->drm, 0, "Second scaler not enabled\n");
+ return;
+ }
+
+ intel_de_write_fw(display, GLK_PS_COEF_INDEX_SET(crtc->pipe, id, 0),
+ PS_COEF_INDEX_AUTO_INC);
+
+ for (i = 0; i < 17 * SCALER_FILTER_NUM_TAPS; i += 2) {
+ u32 tmp;
+ int t;
+
+ t = casf_coeff_tap(i);
+ tmp = casf_coeff(crtc_state, t);
+
+ t = casf_coeff_tap(i + 1);
+ tmp |= casf_coeff(crtc_state, t) << 16;
+
+ intel_de_write_fw(display, GLK_PS_COEF_DATA_SET(crtc->pipe, id, 0),
+ tmp);
+ }
+}
+
+static void convert_sharpness_coef_binary(struct scaler_filter_coeff *coeff,
+ u16 coefficient)
+{
+ if (coefficient < 25) {
+ coeff->mantissa = (coefficient * 2048) / 100;
+ coeff->exp = 3;
+ } else if (coefficient < 50) {
+ coeff->mantissa = (coefficient * 1024) / 100;
+ coeff->exp = 2;
+ } else if (coefficient < 100) {
+ coeff->mantissa = (coefficient * 512) / 100;
+ coeff->exp = 1;
+ } else {
+ coeff->mantissa = (coefficient * 256) / 100;
+ coeff->exp = 0;
+ }
+}
+
+void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state)
+{
+ const u16 *filtercoeff;
+ u16 filter_coeff[SCALER_FILTER_NUM_TAPS];
+ u16 sumcoeff = 0;
+ int i;
+
+ if (crtc_state->hw.casf_params.win_size == 0)
+ filtercoeff = filtercoeff_1;
+ else if (crtc_state->hw.casf_params.win_size == 1)
+ filtercoeff = filtercoeff_2;
+ else
+ filtercoeff = filtercoeff_3;
+
+ for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++)
+ sumcoeff += *(filtercoeff + i);
+
+ for (i = 0; i < SCALER_FILTER_NUM_TAPS; i++) {
+ filter_coeff[i] = (*(filtercoeff + i) * 100 / sumcoeff);
+ convert_sharpness_coef_binary(&crtc_state->hw.casf_params.coeff[i],
+ filter_coeff[i]);
+ }
+}
+
+void intel_casf_enable(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ u32 sharpness_ctl;
+
+ intel_casf_filter_lut_load(crtc, crtc_state);
+
+ intel_casf_write_coeff(crtc_state);
+
+ sharpness_ctl = FILTER_EN | FILTER_STRENGTH(crtc_state->hw.casf_params.strength);
+
+ sharpness_ctl |= crtc_state->hw.casf_params.win_size;
+
+ intel_de_write(display, SHARPNESS_CTL(crtc->pipe), sharpness_ctl);
+
+ skl_scaler_setup_casf(crtc_state);
+}
+
+void intel_casf_disable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ intel_de_write(display, SKL_PS_CTRL(crtc->pipe, 1), 0);
+ intel_de_write(display, SKL_PS_WIN_POS(crtc->pipe, 1), 0);
+ intel_de_write(display, SHARPNESS_CTL(crtc->pipe), 0);
+ intel_de_write(display, SKL_PS_WIN_SZ(crtc->pipe, 1), 0);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_casf.h b/drivers/gpu/drm/i915/display/intel_casf.h
new file mode 100644
index 000000000000..b3fb0bcb3f5b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CASF_H__
+#define __INTEL_CASF_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+
+int intel_casf_compute_config(struct intel_crtc_state *crtc_state);
+void intel_casf_update_strength(struct intel_crtc_state *new_crtc_state);
+void intel_casf_sharpness_get_config(struct intel_crtc_state *crtc_state);
+void intel_casf_enable(struct intel_crtc_state *crtc_state);
+void intel_casf_disable(const struct intel_crtc_state *crtc_state);
+void intel_casf_scaler_compute_config(struct intel_crtc_state *crtc_state);
+bool intel_casf_needs_scaler(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_CASF_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_casf_regs.h b/drivers/gpu/drm/i915/display/intel_casf_regs.h
new file mode 100644
index 000000000000..87803cca510f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_casf_regs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_CASF_REGS_H__
+#define __INTEL_CASF_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _SHARPNESS_CTL_A 0x682B0
+#define _SHARPNESS_CTL_B 0x68AB0
+#define SHARPNESS_CTL(pipe) _MMIO_PIPE(pipe, _SHARPNESS_CTL_A, _SHARPNESS_CTL_B)
+#define FILTER_EN REG_BIT(31)
+#define FILTER_STRENGTH_MASK REG_GENMASK(15, 8)
+#define FILTER_STRENGTH(x) REG_FIELD_PREP(FILTER_STRENGTH_MASK, (x))
+#define FILTER_SIZE_MASK REG_GENMASK(1, 0)
+#define SHARPNESS_FILTER_SIZE_3X3 REG_FIELD_PREP(FILTER_SIZE_MASK, 0)
+#define SHARPNESS_FILTER_SIZE_5X5 REG_FIELD_PREP(FILTER_SIZE_MASK, 1)
+#define SHARPNESS_FILTER_SIZE_7X7 REG_FIELD_PREP(FILTER_SIZE_MASK, 2)
+
+#define _SHRPLUT_DATA_A 0x682B8
+#define _SHRPLUT_DATA_B 0x68AB8
+#define SHRPLUT_DATA(pipe) _MMIO_PIPE(pipe, _SHRPLUT_DATA_A, _SHRPLUT_DATA_B)
+
+#define _SHRPLUT_INDEX_A 0x682B4
+#define _SHRPLUT_INDEX_B 0x68AB4
+#define SHRPLUT_INDEX(pipe) _MMIO_PIPE(pipe, _SHRPLUT_INDEX_A, _SHRPLUT_INDEX_B)
+#define INDEX_AUTO_INCR REG_BIT(10)
+#define INDEX_VALUE_MASK REG_GENMASK(4, 0)
+#define INDEX_VALUE(x) REG_FIELD_PREP(INDEX_VALUE_MASK, (x))
+
+#endif /* __INTEL_CASF_REGS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 228aa64c1349..37801c744b05 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -22,9 +22,11 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <linux/time.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_print.h>
#include "soc/intel_dram.h"
@@ -33,12 +35,13 @@
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
-#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_crtc.h"
+#include "intel_dbuf_bw.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
#include "intel_pcode.h"
@@ -47,6 +50,7 @@
#include "intel_vdsc.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
+#include "vlv_clock.h"
#include "vlv_dsi.h"
#include "vlv_sideband.h"
@@ -130,8 +134,8 @@ struct intel_cdclk_state {
*/
struct intel_cdclk_config actual;
- /* minimum acceptable cdclk to satisfy bandwidth requirements */
- int bw_min_cdclk;
+ /* minimum acceptable cdclk to satisfy DBUF bandwidth requirements */
+ int dbuf_bw_min_cdclk;
/* minimum acceptable cdclk for each pipe */
int min_cdclk[I915_MAX_PIPES];
/* minimum acceptable voltage level for each pipe */
@@ -143,6 +147,9 @@ struct intel_cdclk_state {
/* forced minimum cdclk for glk+ audio w/a */
int force_min_cdclk;
+ /* bitmask of enabled pipes */
+ u8 enabled_pipes;
+
/* bitmask of active pipes */
u8 active_pipes;
@@ -561,8 +568,7 @@ static void hsw_get_cdclk(struct intel_display *display,
static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
+ int freq_320 = (vlv_clock_get_hpll_vco(display->drm) << 1) % 320000 != 0 ?
333333 : 320000;
/*
@@ -582,8 +588,6 @@ static int vlv_calc_cdclk(struct intel_display *display, int min_cdclk)
static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.valleyview) {
if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
return 2;
@@ -597,7 +601,7 @@ static u8 vlv_calc_voltage_level(struct intel_display *display, int cdclk)
* hardware has shown that we just need to write the desired
* CCK divider into the Punit register.
*/
- return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+ return DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1, cdclk) - 1;
}
}
@@ -606,17 +610,12 @@ static void vlv_get_cdclk(struct intel_display *display,
{
u32 val;
- vlv_iosf_sb_get(display->drm, BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
-
- cdclk_config->vco = vlv_get_hpll_vco(display->drm);
- cdclk_config->cdclk = vlv_get_cck_clock(display->drm, "cdclk",
- CCK_DISPLAY_CLOCK_CONTROL,
- cdclk_config->vco);
+ cdclk_config->vco = vlv_clock_get_hpll_vco(display->drm);
+ cdclk_config->cdclk = vlv_clock_get_cdclk(display->drm);
+ vlv_punit_get(display->drm);
val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
-
- vlv_iosf_sb_put(display->drm,
- BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
+ vlv_punit_put(display->drm);
if (display->platform.valleyview)
cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
@@ -628,7 +627,6 @@ static void vlv_get_cdclk(struct intel_display *display,
static void vlv_program_pfi_credits(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
unsigned int credits, default_credits;
if (display->platform.cherryview)
@@ -636,7 +634,7 @@ static void vlv_program_pfi_credits(struct intel_display *display)
else
default_credits = PFI_CREDIT(8);
- if (display->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ if (display->cdclk.hw.cdclk >= vlv_clock_get_czclk(display->drm)) {
/* CHV suggested value is 31 or 63 */
if (display->platform.cherryview)
credits = PFI_CREDIT_63;
@@ -668,10 +666,10 @@ static void vlv_set_cdclk(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
+ int ret;
switch (cdclk) {
case 400000:
@@ -702,17 +700,17 @@ static void vlv_set_cdclk(struct intel_display *display,
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
- DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
- 50)) {
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
- }
+
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (val & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
if (cdclk == 400000) {
u32 divider;
- divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
+ divider = DIV_ROUND_CLOSEST(vlv_clock_get_hpll_vco(display->drm) << 1,
cdclk) - 1;
/* adjust cdclk divider */
@@ -721,11 +719,11 @@ static void vlv_set_cdclk(struct intel_display *display,
val |= divider;
vlv_cck_write(display->drm, CCK_DISPLAY_CLOCK_CONTROL, val);
- if (wait_for((vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL) &
- CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
- 50))
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
+ ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL),
+ (val & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
}
/* adjust self-refresh exit latency value */
@@ -761,6 +759,7 @@ static void chv_set_cdclk(struct intel_display *display,
int cdclk = cdclk_config->cdclk;
u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
+ int ret;
switch (cdclk) {
case 333333:
@@ -786,12 +785,12 @@ static void chv_set_cdclk(struct intel_display *display,
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val);
- if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) &
- DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
- 50)) {
- drm_err(display->drm,
- "timed out waiting for CDclk change\n");
- }
+
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (val & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+ 500, 50 * 1000, false);
+ if (ret)
+ drm_err(display->drm, "timed out waiting for CDCLK change\n");
vlv_punit_put(display->drm);
@@ -903,8 +902,9 @@ static void bdw_set_cdclk(struct intel_display *display,
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 100))
+ ret = intel_de_wait_for_set_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 100);
+ if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
intel_de_rmw(display, LCPLL_CTL,
@@ -913,8 +913,9 @@ static void bdw_set_cdclk(struct intel_display *display,
intel_de_rmw(display, LCPLL_CTL,
LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ ret = intel_de_wait_for_clear_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
+ if (ret)
drm_err(display->drm, "Switching back to LCPLL failed\n");
intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
@@ -1111,7 +1112,7 @@ static void skl_dpll0_enable(struct intel_display *display, int vco)
intel_de_rmw(display, LCPLL1_CTL,
0, LCPLL_PLL_ENABLE);
- if (intel_de_wait_for_set(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set_ms(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
drm_err(display->drm, "DPLL0 not locked\n");
display->cdclk.hw.vco = vco;
@@ -1125,7 +1126,7 @@ static void skl_dpll0_disable(struct intel_display *display)
intel_de_rmw(display, LCPLL1_CTL,
LCPLL_PLL_ENABLE, 0);
- if (intel_de_wait_for_clear(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
drm_err(display->drm, "Couldn't disable DPLL0\n");
display->cdclk.hw.vco = 0;
@@ -1532,6 +1533,41 @@ static const struct intel_cdclk_vals xe3lpd_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals xe3p_lpd_cdclk_table[] = {
+ { .refclk = 38400, .cdclk = 151200, .ratio = 21, .waveform = 0xa4a4 },
+ { .refclk = 38400, .cdclk = 176400, .ratio = 21, .waveform = 0xaa54 },
+ { .refclk = 38400, .cdclk = 201600, .ratio = 21, .waveform = 0xaaaa },
+ { .refclk = 38400, .cdclk = 226800, .ratio = 21, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 252000, .ratio = 21, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 277200, .ratio = 21, .waveform = 0xdbb6 },
+ { .refclk = 38400, .cdclk = 302400, .ratio = 21, .waveform = 0xeeee },
+ { .refclk = 38400, .cdclk = 327600, .ratio = 21, .waveform = 0xf7de },
+ { .refclk = 38400, .cdclk = 352800, .ratio = 21, .waveform = 0xfefe },
+ { .refclk = 38400, .cdclk = 378000, .ratio = 21, .waveform = 0xfffe },
+ { .refclk = 38400, .cdclk = 403200, .ratio = 21, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 422400, .ratio = 22, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 441600, .ratio = 23, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 460800, .ratio = 24, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 480000, .ratio = 25, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 499200, .ratio = 26, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 518400, .ratio = 27, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 537600, .ratio = 28, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 556800, .ratio = 29, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 576000, .ratio = 30, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 595200, .ratio = 31, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 614400, .ratio = 32, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 633600, .ratio = 33, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 652800, .ratio = 34, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 672000, .ratio = 35, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 691200, .ratio = 36, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 710400, .ratio = 37, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 729600, .ratio = 38, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 748800, .ratio = 39, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 768000, .ratio = 40, .waveform = 0xffff },
+ { .refclk = 38400, .cdclk = 787200, .ratio = 41, .waveform = 0xffff },
+ {}
+};
+
static const int cdclk_squash_len = 16;
static int cdclk_squash_divider(u16 waveform)
@@ -1559,7 +1595,7 @@ static int bxt_calc_cdclk(struct intel_display *display, int min_cdclk)
drm_WARN(display->drm, 1,
"Cannot satisfy minimum cdclk %d with refclk %u\n",
min_cdclk, display->cdclk.hw.ref);
- return 0;
+ return display->cdclk.max_cdclk_freq;
}
static int bxt_calc_cdclk_pll_vco(struct intel_display *display, int cdclk)
@@ -1797,8 +1833,8 @@ static void bxt_de_pll_disable(struct intel_display *display)
intel_de_write(display, BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_de_wait_for_clear(display,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for DE PLL unlock\n");
display->cdclk.hw.vco = 0;
@@ -1814,8 +1850,8 @@ static void bxt_de_pll_enable(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_de_wait_for_set(display,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_set_ms(display,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for DE PLL lock\n");
display->cdclk.hw.vco = vco;
@@ -1827,7 +1863,7 @@ static void icl_cdclk_pll_disable(struct intel_display *display)
BXT_DE_PLL_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_de_wait_for_clear(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for CDCLK PLL unlock\n");
display->cdclk.hw.vco = 0;
@@ -1845,7 +1881,7 @@ static void icl_cdclk_pll_enable(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
+ if (intel_de_wait_for_set_ms(display, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
drm_err(display->drm, "timeout waiting for CDCLK PLL lock\n");
display->cdclk.hw.vco = vco;
@@ -1865,8 +1901,8 @@ static void adlp_cdclk_pll_crawl(struct intel_display *display, int vco)
intel_de_write(display, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (intel_de_wait_for_set(display, BXT_DE_PLL_ENABLE,
- BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
+ if (intel_de_wait_for_set_ms(display, BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1))
drm_err(display->drm, "timeout waiting for FREQ change request ack\n");
val &= ~BXT_DE_PLL_FREQ_REQ;
@@ -2592,6 +2628,12 @@ static void intel_set_cdclk(struct intel_display *display,
}
}
+static bool dg2_power_well_count(struct intel_display *display,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ return display->platform.dg2 ? hweight8(cdclk_state->active_pipes) : 0;
+}
+
static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
@@ -2604,16 +2646,16 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual) &&
- new_cdclk_state->active_pipes ==
- old_cdclk_state->active_pipes)
+ dg2_power_well_count(display, old_cdclk_state) ==
+ dg2_power_well_count(display, new_cdclk_state))
return;
/* According to "Sequence Before Frequency Change", voltage level set to 0x3 */
voltage_level = DISPLAY_TO_PCODE_VOLTAGE_MAX;
change_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
- update_pipe_count = hweight8(new_cdclk_state->active_pipes) >
- hweight8(old_cdclk_state->active_pipes);
+ update_pipe_count = dg2_power_well_count(display, new_cdclk_state) >
+ dg2_power_well_count(display, old_cdclk_state);
/*
* According to "Sequence Before Frequency Change",
@@ -2631,7 +2673,7 @@ static void intel_cdclk_pcode_pre_notify(struct intel_atomic_state *state)
* no action if it is decreasing, before the change
*/
if (update_pipe_count)
- num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+ num_active_pipes = dg2_power_well_count(display, new_cdclk_state);
intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
change_cdclk, update_pipe_count);
@@ -2651,8 +2693,8 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
voltage_level = new_cdclk_state->actual.voltage_level;
update_cdclk = new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk;
- update_pipe_count = hweight8(new_cdclk_state->active_pipes) <
- hweight8(old_cdclk_state->active_pipes);
+ update_pipe_count = dg2_power_well_count(display, new_cdclk_state) <
+ dg2_power_well_count(display, old_cdclk_state);
/*
* According to "Sequence After Frequency Change",
@@ -2668,7 +2710,7 @@ static void intel_cdclk_pcode_post_notify(struct intel_atomic_state *state)
* no action if it is increasing, after the change
*/
if (update_pipe_count)
- num_active_pipes = hweight8(new_cdclk_state->active_pipes);
+ num_active_pipes = dg2_power_well_count(display, new_cdclk_state);
intel_pcode_notify(display, voltage_level, num_active_pipes, cdclk,
update_cdclk, update_pipe_count);
@@ -2703,6 +2745,9 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
struct intel_cdclk_config cdclk_config;
enum pipe pipe;
+ if (!new_cdclk_state)
+ return;
+
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
return;
@@ -2755,6 +2800,9 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_new_cdclk_state(state);
enum pipe pipe;
+ if (!new_cdclk_state)
+ return;
+
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
return;
@@ -2792,16 +2840,20 @@ static int intel_cdclk_guardband(struct intel_display *display)
return 90;
}
-static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+static int _intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state, int pixel_rate)
{
struct intel_display *display = to_intel_display(crtc_state);
int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
int guardband = intel_cdclk_guardband(display);
- int pixel_rate = crtc_state->pixel_rate;
return DIV_ROUND_UP(pixel_rate * 100, guardband * ppc);
}
+static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ return _intel_pixel_rate_to_cdclk(crtc_state, crtc_state->pixel_rate);
+}
+
static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -2810,12 +2862,12 @@ static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state)
int min_cdclk = 0;
for_each_intel_plane_on_crtc(display->drm, crtc, plane)
- min_cdclk = max(min_cdclk, crtc_state->min_cdclk[plane->id]);
+ min_cdclk = max(min_cdclk, crtc_state->plane_min_cdclk[plane->id]);
return min_cdclk;
}
-static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
+int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state)
{
int min_cdclk;
@@ -2823,6 +2875,8 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
return 0;
min_cdclk = intel_pixel_rate_to_cdclk(crtc_state);
+ min_cdclk = max(min_cdclk, intel_crtc_bw_min_cdclk(crtc_state));
+ min_cdclk = max(min_cdclk, intel_fbc_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, hsw_ips_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, intel_audio_min_cdclk(crtc_state));
min_cdclk = max(min_cdclk, vlv_dsi_min_cdclk(crtc_state));
@@ -2832,51 +2886,110 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+static int intel_cdclk_update_crtc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc)
{
struct intel_display *display = to_intel_display(state);
- struct intel_cdclk_state *cdclk_state =
- intel_atomic_get_new_cdclk_state(state);
- const struct intel_bw_state *bw_state;
- struct intel_crtc *crtc;
- struct intel_crtc_state *crtc_state;
- int min_cdclk, i;
- enum pipe pipe;
+ struct intel_cdclk_state *cdclk_state;
+ bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state);
+ int ret;
- for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
- int ret;
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
- min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (min_cdclk < 0)
- return min_cdclk;
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
- if (cdclk_state->min_cdclk[crtc->pipe] == min_cdclk)
- continue;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ old_min_cdclk = cdclk_state->min_cdclk[crtc->pipe];
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
- }
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
- bw_state = intel_atomic_get_new_bw_state(state);
- if (bw_state) {
- min_cdclk = intel_bw_min_cdclk(display, bw_state);
+ cdclk_state->min_cdclk[crtc->pipe] = new_min_cdclk;
- if (cdclk_state->bw_min_cdclk != min_cdclk) {
- int ret;
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
- cdclk_state->bw_min_cdclk = min_cdclk;
+ *need_cdclk_calc = true;
- ret = intel_atomic_lock_global_state(&cdclk_state->base);
- if (ret)
- return ret;
- }
- }
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] min cdclk: %d kHz -> %d kHz\n",
+ crtc->base.base.id, crtc->base.name,
+ old_min_cdclk, new_min_cdclk);
+
+ return 0;
+}
+
+int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_cdclk_state *cdclk_state;
+ bool allow_cdclk_decrease = intel_any_crtc_needs_modeset(state);
+ int ret;
+
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ old_min_cdclk = cdclk_state->dbuf_bw_min_cdclk;
+
+ if (new_min_cdclk == old_min_cdclk)
+ return 0;
+
+ if (!allow_cdclk_decrease && new_min_cdclk < old_min_cdclk)
+ return 0;
+
+ cdclk_state->dbuf_bw_min_cdclk = new_min_cdclk;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+
+ *need_cdclk_calc = true;
+
+ drm_dbg_kms(display->drm,
+ "dbuf bandwidth min cdclk: %d kHz -> %d kHz\n",
+ old_min_cdclk, new_min_cdclk);
+
+ return 0;
+}
+
+static bool glk_cdclk_audio_wa_needed(struct intel_display *display,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ return display->platform.geminilake &&
+ cdclk_state->enabled_pipes &&
+ !is_power_of_2(cdclk_state->enabled_pipes);
+}
+
+static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_cdclk_state *cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe;
+ int min_cdclk;
- min_cdclk = max(cdclk_state->force_min_cdclk,
- cdclk_state->bw_min_cdclk);
+ min_cdclk = cdclk_state->force_min_cdclk;
+ min_cdclk = max(min_cdclk, cdclk_state->dbuf_bw_min_cdclk);
for_each_pipe(display, pipe)
min_cdclk = max(min_cdclk, cdclk_state->min_cdclk[pipe]);
@@ -2888,8 +3001,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* by changing the cd2x divider (see glk_cdclk_table[]) and
* thus a full modeset won't be needed then.
*/
- if (display->platform.geminilake && cdclk_state->active_pipes &&
- !is_power_of_2(cdclk_state->active_pipes))
+ if (glk_cdclk_audio_wa_needed(display, cdclk_state))
min_cdclk = max(min_cdclk, 2 * 96000);
if (min_cdclk > display->cdclk.max_cdclk_freq) {
@@ -3175,38 +3287,66 @@ intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
return to_intel_cdclk_state(cdclk_state);
}
-int intel_cdclk_atomic_check(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
+static int intel_cdclk_modeset_checks(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
{
+ struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
- const struct intel_cdclk_state *new_cdclk_state;
- struct intel_plane_state __maybe_unused *plane_state;
- struct intel_plane *plane;
+ struct intel_cdclk_state *new_cdclk_state;
int ret;
- int i;
- /*
- * active_planes bitmask has been updated, and potentially affected
- * planes are part of the state. We can now compute the minimum cdclk
- * for each plane.
- */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
- if (ret)
- return ret;
- }
+ if (!intel_any_crtc_enable_changed(state) &&
+ !intel_any_crtc_active_changed(state))
+ return 0;
+
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ new_cdclk_state->enabled_pipes =
+ intel_calc_enabled_pipes(state, old_cdclk_state->enabled_pipes);
+
+ new_cdclk_state->active_pipes =
+ intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
- ret = intel_bw_calc_min_cdclk(state, need_cdclk_calc);
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (!old_cdclk_state->active_pipes != !new_cdclk_state->active_pipes)
+ *need_cdclk_calc = true;
- if (new_cdclk_state &&
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
+ if (glk_cdclk_audio_wa_needed(display, old_cdclk_state) !=
+ glk_cdclk_audio_wa_needed(display, new_cdclk_state))
*need_cdclk_calc = true;
+ if (dg2_power_well_count(display, old_cdclk_state) !=
+ dg2_power_well_count(display, new_cdclk_state))
+ *need_cdclk_calc = true;
+
+ return 0;
+}
+
+static int intel_crtcs_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i, ret;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = intel_cdclk_update_crtc_min_cdclk(state, crtc,
+ old_crtc_state->min_cdclk,
+ new_crtc_state->min_cdclk,
+ need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -3242,18 +3382,17 @@ static bool intel_cdclk_need_serialize(struct intel_display *display,
const struct intel_cdclk_state *old_cdclk_state,
const struct intel_cdclk_state *new_cdclk_state)
{
- bool power_well_cnt_changed = hweight8(old_cdclk_state->active_pipes) !=
- hweight8(new_cdclk_state->active_pipes);
- bool cdclk_changed = intel_cdclk_changed(&old_cdclk_state->actual,
- &new_cdclk_state->actual);
/*
- * We need to poke hw for gen >= 12, because we notify PCode if
+ * We need to poke hw for DG2, because we notify PCode if
* pipe power well count changes.
*/
- return cdclk_changed || (display->platform.dg2 && power_well_cnt_changed);
+ return intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual) ||
+ dg2_power_well_count(display, old_cdclk_state) !=
+ dg2_power_well_count(display, new_cdclk_state);
}
-int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct intel_display *display = to_intel_display(state);
const struct intel_cdclk_state *old_cdclk_state;
@@ -3267,9 +3406,6 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state->active_pipes =
- intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
-
ret = intel_cdclk_modeset_calc_cdclk(state);
if (ret)
return ret;
@@ -3282,9 +3418,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk ||
- intel_cdclk_changed(&old_cdclk_state->logical,
+ } else if (intel_cdclk_changed(&old_cdclk_state->logical,
&new_cdclk_state->logical)) {
ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
@@ -3366,14 +3500,55 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
return 0;
}
+int intel_cdclk_atomic_check(struct intel_atomic_state *state)
+{
+ const struct intel_cdclk_state *old_cdclk_state;
+ struct intel_cdclk_state *new_cdclk_state;
+ bool need_cdclk_calc = false;
+ int ret;
+
+ ret = intel_cdclk_modeset_checks(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ ret = intel_crtcs_calc_min_cdclk(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ ret = intel_dbuf_bw_calc_min_cdclk(state, &need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+
+ if (new_cdclk_state &&
+ old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk) {
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
+ if (ret)
+ return ret;
+
+ need_cdclk_calc = true;
+ }
+
+ if (need_cdclk_calc) {
+ ret = intel_modeset_calc_cdclk(state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
void intel_cdclk_update_hw_state(struct intel_display *display)
{
- const struct intel_bw_state *bw_state =
- to_intel_bw_state(display->bw.obj.state);
+ const struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
struct intel_cdclk_state *cdclk_state =
to_intel_cdclk_state(display->cdclk.obj.state);
struct intel_crtc *crtc;
+ cdclk_state->enabled_pipes = 0;
cdclk_state->active_pipes = 0;
for_each_intel_crtc(display->drm, crtc) {
@@ -3381,14 +3556,16 @@ void intel_cdclk_update_hw_state(struct intel_display *display)
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
+ if (crtc_state->hw.enable)
+ cdclk_state->enabled_pipes |= BIT(pipe);
if (crtc_state->hw.active)
cdclk_state->active_pipes |= BIT(pipe);
- cdclk_state->min_cdclk[pipe] = intel_crtc_compute_min_cdclk(crtc_state);
+ cdclk_state->min_cdclk[pipe] = crtc_state->min_cdclk;
cdclk_state->min_voltage_level[pipe] = crtc_state->min_voltage_level;
}
- cdclk_state->bw_min_cdclk = intel_bw_min_cdclk(display, bw_state);
+ cdclk_state->dbuf_bw_min_cdclk = intel_dbuf_bw_min_cdclk(display, dbuf_bw_state);
}
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc)
@@ -3417,7 +3594,9 @@ static int intel_compute_max_dotclk(struct intel_display *display)
*/
void intel_update_max_cdclk(struct intel_display *display)
{
- if (DISPLAY_VERx100(display) >= 3002) {
+ if (DISPLAY_VER(display) >= 35) {
+ display->cdclk.max_cdclk_freq = 787200;
+ } else if (DISPLAY_VERx100(display) >= 3002) {
display->cdclk.max_cdclk_freq = 480000;
} else if (DISPLAY_VER(display) >= 30) {
display->cdclk.max_cdclk_freq = 691200;
@@ -3557,19 +3736,12 @@ static int pch_rawclk(struct intel_display *display)
return (intel_de_read(display, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
-static int vlv_hrawclk(struct intel_display *display)
-{
- /* RAWCLK_FREQ_VLV register updated from power well code */
- return vlv_get_cck_clock_hpll(display->drm, "hrawclk",
- CCK_DISPLAY_REF_CLOCK_CONTROL);
-}
-
static int i9xx_hrawclk(struct intel_display *display)
{
struct drm_i915_private *i915 = to_i915(display->drm);
/* hrawclock is 1/4 the FSB frequency */
- return DIV_ROUND_CLOSEST(i9xx_fsb_freq(i915), 4);
+ return DIV_ROUND_CLOSEST(intel_fsb_freq(i915), 4);
}
/**
@@ -3597,7 +3769,7 @@ u32 intel_read_rawclk(struct intel_display *display)
else if (HAS_PCH_SPLIT(display))
freq = pch_rawclk(display);
else if (display->platform.valleyview || display->platform.cherryview)
- freq = vlv_hrawclk(display);
+ freq = vlv_clock_get_hrawclk(display->drm);
else if (DISPLAY_VER(display) >= 3)
freq = i9xx_hrawclk(display);
else
@@ -3622,9 +3794,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info);
void intel_cdclk_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_cdclk_info", 0444, display->drm->debugfs_root,
display, &i915_cdclk_info_fops);
}
@@ -3777,7 +3947,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
*/
void intel_init_cdclk_hooks(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 30) {
+ if (DISPLAY_VER(display) >= 35) {
+ display->funcs.cdclk = &xe3lpd_cdclk_funcs;
+ display->cdclk.table = xe3p_lpd_cdclk_table;
+ } else if (DISPLAY_VER(display) >= 30) {
display->funcs.cdclk = &xe3lpd_cdclk_funcs;
display->cdclk.table = xe3lpd_cdclk_table;
} else if (DISPLAY_VER(display) >= 20) {
@@ -3891,11 +4064,6 @@ int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe
return cdclk_state->min_cdclk[pipe];
}
-int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state)
-{
- return cdclk_state->bw_min_cdclk;
-}
-
bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state)
{
const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state;
@@ -3927,3 +4095,75 @@ void intel_cdclk_read_hw(struct intel_display *display)
cdclk_state->actual = display->cdclk.hw;
cdclk_state->logical = display->cdclk.hw;
}
+
+static int calc_cdclk(const struct intel_crtc_state *crtc_state, int min_cdclk)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 10 || display->platform.broxton) {
+ return bxt_calc_cdclk(display, min_cdclk);
+ } else if (DISPLAY_VER(display) == 9) {
+ int vco;
+
+ vco = display->cdclk.skl_preferred_vco_freq;
+ if (vco == 0)
+ vco = 8100000;
+
+ return skl_calc_cdclk(min_cdclk, vco);
+ } else if (display->platform.broadwell) {
+ return bdw_calc_cdclk(min_cdclk);
+ } else if (display->platform.cherryview || display->platform.valleyview) {
+ return vlv_calc_cdclk(display, min_cdclk);
+ } else {
+ return display->cdclk.max_cdclk_freq;
+ }
+}
+
+static unsigned int _intel_cdclk_prefill_adj(const struct intel_crtc_state *crtc_state,
+ int clock, int min_cdclk)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+ int cdclk = calc_cdclk(crtc_state, min_cdclk);
+
+ return min(0x10000, DIV_ROUND_UP_ULL((u64)clock << 16, ppc * cdclk));
+}
+
+unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /* FIXME use the actual min_cdclk for the pipe here */
+ return intel_cdclk_prefill_adjustment_worst(crtc_state);
+}
+
+unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ int clock = crtc_state->hw.pipe_mode.crtc_clock;
+ int min_cdclk;
+
+ /*
+ * FIXME could perhaps consider a few more of the factors
+ * that go the per-crtc min_cdclk. Namely anything that
+ * only changes during full modesets.
+ *
+ * FIXME this assumes 1:1 scaling, but the other _worst() stuff
+ * assumes max downscaling, so the final result will be
+ * unrealistically bad. Figure out where the actual maximum value
+ * lies and use that to compute a more realistic worst case
+ * estimate...
+ */
+ min_cdclk = _intel_pixel_rate_to_cdclk(crtc_state, clock);
+
+ return _intel_cdclk_prefill_adj(crtc_state, clock, min_cdclk);
+}
+
+int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state,
+ unsigned int prefill_lines_unadjusted,
+ unsigned int prefill_lines_available)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int ppc = intel_cdclk_ppc(display, crtc_state->double_wide);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, prefill_lines_unadjusted),
+ ppc * prefill_lines_available);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index cacee598af0e..1ff7d078b42c 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -38,16 +38,17 @@ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_cdclk_dump_config(struct intel_display *display,
const struct intel_cdclk_config *cdclk_config,
const char *context);
-int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
void intel_cdclk_get_cdclk(struct intel_display *display,
struct intel_cdclk_config *cdclk_config);
-int intel_cdclk_atomic_check(struct intel_atomic_state *state,
- bool *need_cdclk_calc);
+int intel_cdclk_atomic_check(struct intel_atomic_state *state);
int intel_cdclk_state_set_joined_mbus(struct intel_atomic_state *state, bool joined_mbus);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
void intel_cdclk_update_hw_state(struct intel_display *display);
void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc);
+int intel_cdclk_update_dbuf_bw_min_cdclk(struct intel_atomic_state *state,
+ int old_min_cdclk, int new_min_cdclk,
+ bool *need_cdclk_calc);
#define to_intel_cdclk_state(global_state) \
container_of_const((global_state), struct intel_cdclk_state, base)
@@ -64,9 +65,16 @@ int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state);
int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe);
-int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state);
bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state);
void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk);
void intel_cdclk_read_hw(struct intel_display *display);
+unsigned int intel_cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int intel_cdclk_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+int intel_cdclk_min_cdclk_for_prefill(const struct intel_crtc_state *crtc_state,
+ unsigned int prefill_lines_unadjusted,
+ unsigned int prefill_lines_available);
+
+int intel_crtc_min_cdclk(const struct intel_crtc_state *crtc_state);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 671db6926e4c..e7950655434b 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -24,14 +24,16 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "i9xx_plane_regs.h"
#include "intel_color.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsb.h"
#include "intel_vrr.h"
+#include "skl_universal_plane.h"
+#include "skl_universal_plane_regs.h"
struct intel_color_funcs {
int (*color_check)(struct intel_atomic_state *state,
@@ -87,6 +89,14 @@ struct intel_color_funcs {
* Read config other than LUTs and CSCs, before them. Optional.
*/
void (*get_config)(struct intel_crtc_state *crtc_state);
+
+ /* Plane CSC*/
+ void (*load_plane_csc_matrix)(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+
+ /* Plane Pre/Post CSC */
+ void (*load_plane_luts)(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
};
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -609,6 +619,8 @@ static u16 ctm_to_twos_complement(u64 coeff, int int_bits, int frac_bits)
if (CTM_COEFF_NEGATIVE(coeff))
c = -c;
+ int_bits = max(int_bits, 1);
+
c = clamp(c, -(s64)BIT(int_bits + frac_bits - 1),
(s64)(BIT(int_bits + frac_bits - 1) - 1));
@@ -1090,18 +1102,19 @@ static void skl_get_config(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- u32 tmp;
crtc_state->gamma_mode = hsw_read_gamma_mode(crtc);
crtc_state->csc_mode = ilk_read_csc_mode(crtc);
- tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe));
+ if (DISPLAY_VER(display) < 35) {
+ u32 tmp = intel_de_read(display, SKL_BOTTOM_COLOR(crtc->pipe));
- if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
- crtc_state->gamma_enable = true;
+ if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+ crtc_state->gamma_enable = true;
- if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
- crtc_state->csc_enable = true;
+ if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+ crtc_state->csc_enable = true;
+ }
}
static void skl_color_commit_arm(struct intel_dsb *dsb,
@@ -2013,7 +2026,7 @@ void intel_color_prepare_commit(struct intel_atomic_state *state,
if (crtc_state->use_dsb && intel_color_uses_chained_dsb(crtc_state)) {
intel_vrr_send_push(crtc_state->dsb_color, crtc_state);
- intel_dsb_wait_vblank_delay(state, crtc_state->dsb_color);
+ intel_dsb_wait_for_delayed_vblank(state, crtc_state->dsb_color);
intel_vrr_check_push_sent(crtc_state->dsb_color, crtc_state);
intel_dsb_interrupt(crtc_state->dsb_color);
}
@@ -3835,6 +3848,266 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
}
}
+static void
+xelpd_load_plane_csc_matrix(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_property_blob *blob = plane_state->hw.ctm;
+ struct drm_color_ctm_3x4 *ctm;
+ const u64 *input;
+ u16 coeffs[9] = {};
+ int i, j;
+
+ if (!icl_is_hdr_plane(display, plane) || !blob)
+ return;
+
+ ctm = blob->data;
+ input = ctm->matrix;
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0, j = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[j];
+
+ /*
+ * Clamp input value to min/max supported by
+ * hardware.
+ */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[j]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
+
+ /* Skip postoffs */
+ if (!((j + 2) % 4))
+ j += 2;
+ else
+ j++;
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 0),
+ coeffs[0] << 16 | coeffs[1]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 1),
+ coeffs[2] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 2),
+ coeffs[3] << 16 | coeffs[4]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 3),
+ coeffs[5] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 4),
+ coeffs[6] << 16 | coeffs[7]);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_COEFF(pipe, plane, 5),
+ coeffs[8] << 16);
+
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
+ intel_de_write_dsb(display, dsb, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
+
+ /*
+ * Conversion from S31.32 to S0.12. BIT[12] is the signed bit
+ */
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 0),
+ ctm_to_twos_complement(input[3], 0, 12));
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 1),
+ ctm_to_twos_complement(input[7], 0, 12));
+ intel_de_write_dsb(display, dsb,
+ PLANE_CSC_POSTOFF(pipe, plane, 2),
+ ctm_to_twos_complement(input[11], 0, 12));
+}
+
+static void
+xelpd_program_plane_pre_csc_lut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_color_lut32 *pre_csc_lut = plane_state->hw.degamma_lut->data;
+ u32 i, lut_size;
+
+ if (icl_is_hdr_plane(display, plane)) {
+ lut_size = 128;
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+
+ if (pre_csc_lut) {
+ for (i = 0; i < lut_size; i++) {
+ u32 lut_val = drm_color_lut32_extract(pre_csc_lut[i].green, 24);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ lut_val);
+ }
+
+ /* Program the max register to clamp values > 1.0. */
+ /* TODO: Restrict to 0x7ffffff */
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ (1 << 24));
+ } while (i++ > 130);
+ } else {
+ for (i = 0; i < lut_size; i++) {
+ u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
+ }
+
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ 1 << 24);
+ } while (i++ < 130);
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
+ }
+}
+
+static void
+xelpd_program_plane_post_csc_lut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ const struct drm_plane_state *state = &plane_state->uapi;
+ enum pipe pipe = to_intel_plane(state->plane)->pipe;
+ enum plane_id plane = to_intel_plane(state->plane)->id;
+ const struct drm_color_lut32 *post_csc_lut = plane_state->hw.gamma_lut->data;
+ u32 i, lut_size, lut_val;
+
+ if (icl_is_hdr_plane(display, plane)) {
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+ /* TODO: Add macro */
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0),
+ PLANE_PAL_PREC_AUTO_INCREMENT);
+ if (post_csc_lut) {
+ lut_size = 32;
+ for (i = 0; i < lut_size; i++) {
+ lut_val = drm_color_lut32_extract(post_csc_lut[i].green, 24);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ lut_val);
+ }
+
+ /* Segment 2 */
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ (1 << 24));
+ } while (i++ < 34);
+ } else {
+ /*TODO: Add for segment 0 */
+ lut_size = 32;
+ for (i = 0; i < lut_size; i++) {
+ u32 v = (i * ((1 << 24) - 1)) / (lut_size - 1);
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0), v);
+ }
+
+ do {
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, 0),
+ 1 << 24);
+ } while (i++ < 34);
+ }
+
+ intel_de_write_dsb(display, dsb, PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, 0), 0);
+ intel_de_write_dsb(display, dsb,
+ PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, 0), 0);
+ }
+}
+
+static void
+xelpd_plane_load_luts(struct intel_dsb *dsb, const struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.degamma_lut)
+ xelpd_program_plane_pre_csc_lut(dsb, plane_state);
+
+ if (plane_state->hw.gamma_lut)
+ xelpd_program_plane_post_csc_lut(dsb, plane_state);
+}
+
+static u32 glk_3dlut_10(const struct drm_color_lut32 *color)
+{
+ return REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, drm_color_lut32_extract(color->red, 10)) |
+ REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, drm_color_lut32_extract(color->green, 10)) |
+ REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, drm_color_lut32_extract(color->blue, 10));
+}
+
+static void glk_load_lut_3d(struct intel_dsb *dsb,
+ struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct intel_display *display = to_intel_display(crtc->base.dev);
+ const struct drm_color_lut32 *lut = blob->data;
+ int i, lut_size = drm_color_lut32_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
+ drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not loading LUTs\n",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), LUT_3D_AUTO_INCREMENT);
+ for (i = 0; i < lut_size; i++)
+ intel_de_write_dsb(display, dsb, LUT_3D_DATA(pipe), glk_3dlut_10(&lut[i]));
+ intel_de_write_dsb(display, dsb, LUT_3D_INDEX(pipe), 0);
+}
+
+static void glk_lut_3d_commit(struct intel_dsb *dsb, struct intel_crtc *crtc, bool enable)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 val = 0;
+
+ if (!dsb && intel_de_read(display, LUT_3D_CTL(pipe)) & LUT_3D_READY) {
+ drm_err(display->drm, "[CRTC:%d:%s] 3D LUT not ready, not committing change\n",
+ crtc->base.base.id, crtc->base.name);
+ return;
+ }
+
+ if (enable)
+ val = LUT_3D_ENABLE | LUT_3D_READY | LUT_3D_BIND_PLANE_1;
+
+ intel_de_write_dsb(display, dsb, LUT_3D_CTL(pipe), val);
+}
+
static const struct intel_color_funcs chv_color_funcs = {
.color_check = chv_color_check,
.color_commit_arm = i9xx_color_commit_arm,
@@ -3882,6 +4155,8 @@ static const struct intel_color_funcs tgl_color_funcs = {
.lut_equal = icl_lut_equal,
.read_csc = icl_read_csc,
.get_config = skl_get_config,
+ .load_plane_csc_matrix = xelpd_load_plane_csc_matrix,
+ .load_plane_luts = xelpd_plane_load_luts,
};
static const struct intel_color_funcs icl_color_funcs = {
@@ -3962,6 +4237,67 @@ static const struct intel_color_funcs ilk_color_funcs = {
.get_config = ilk_get_config,
};
+void intel_color_plane_commit_arm(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+
+ if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
+ glk_lut_3d_commit(dsb, crtc, !!plane_state->hw.lut_3d);
+}
+
+static void
+intel_color_load_plane_csc_matrix(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ if (display->funcs.color->load_plane_csc_matrix)
+ display->funcs.color->load_plane_csc_matrix(dsb, plane_state);
+}
+
+static void
+intel_color_load_plane_luts(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ if (display->funcs.color->load_plane_luts)
+ display->funcs.color->load_plane_luts(dsb, plane_state);
+}
+
+bool
+intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe)
+{
+ if (DISPLAY_VER(display) >= 12)
+ return pipe == PIPE_A || pipe == PIPE_B;
+ else
+ return false;
+}
+
+static void
+intel_color_load_3dlut(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+ struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
+
+ if (crtc && intel_color_crtc_has_3dlut(display, crtc->pipe))
+ glk_load_lut_3d(dsb, crtc, plane_state->hw.lut_3d);
+}
+
+void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state)
+{
+ if (plane_state->hw.ctm)
+ intel_color_load_plane_csc_matrix(dsb, plane_state);
+ if (plane_state->hw.degamma_lut || plane_state->hw.gamma_lut)
+ intel_color_load_plane_luts(dsb, plane_state);
+ if (plane_state->hw.lut_3d)
+ intel_color_load_3dlut(dsb, plane_state);
+}
+
void intel_color_crtc_init(struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index bf7a12ce9df0..c21b9bdf7bb8 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -13,7 +13,9 @@ struct intel_crtc_state;
struct intel_crtc;
struct intel_display;
struct intel_dsb;
+struct intel_plane_state;
struct drm_property_blob;
+enum pipe;
void intel_color_init_hooks(struct intel_display *display);
int intel_color_init(struct intel_display *display);
@@ -40,5 +42,9 @@ bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob2,
bool is_pre_csc_lut);
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
-
+void intel_color_plane_program_pipeline(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+void intel_color_plane_commit_arm(struct intel_dsb *dsb,
+ const struct intel_plane_state *plane_state);
+bool intel_color_crtc_has_3dlut(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.c b/drivers/gpu/drm/i915/display/intel_color_pipeline.c
new file mode 100644
index 000000000000..942d9b9c93ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#include "intel_color.h"
+#include "intel_colorop.h"
+#include "intel_color_pipeline.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "skl_universal_plane.h"
+
+#define MAX_COLOR_PIPELINES 1
+#define PLANE_DEGAMMA_SIZE 128
+#define PLANE_GAMMA_SIZE 32
+
+static
+int _intel_color_pipeline_plane_init(struct drm_plane *plane, struct drm_prop_enum_list *list,
+ enum pipe pipe)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_display *display = to_intel_display(dev);
+ struct drm_colorop *prev_op;
+ struct intel_colorop *colorop;
+ int ret;
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_PRE_CSC_LUT);
+
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
+ PLANE_DEGAMMA_SIZE,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+
+ if (ret)
+ return ret;
+
+ list->type = colorop->base.base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", colorop->base.base.id);
+
+ /* TODO: handle failures and clean up */
+ prev_op = &colorop->base;
+
+ if (DISPLAY_VER(display) >= 35 &&
+ intel_color_crtc_has_3dlut(display, pipe) &&
+ plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ colorop = intel_colorop_create(INTEL_PLANE_CB_3DLUT);
+
+ ret = drm_plane_colorop_3dlut_init(dev, &colorop->base, plane, 17,
+ DRM_COLOROP_LUT3D_INTERPOLATION_TETRAHEDRAL,
+ true);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+
+ prev_op = &colorop->base;
+ }
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_CSC);
+ ret = drm_plane_colorop_ctm_3x4_init(dev, &colorop->base, plane,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+ prev_op = &colorop->base;
+
+ colorop = intel_colorop_create(INTEL_PLANE_CB_POST_CSC_LUT);
+ ret = drm_plane_colorop_curve_1d_lut_init(dev, &colorop->base, plane,
+ PLANE_GAMMA_SIZE,
+ DRM_COLOROP_LUT1D_INTERPOLATION_LINEAR,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ return ret;
+
+ drm_colorop_set_next_property(prev_op, &colorop->base);
+
+ return 0;
+}
+
+int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_display *display = to_intel_display(dev);
+ struct drm_prop_enum_list pipelines[MAX_COLOR_PIPELINES];
+ int len = 0;
+ int ret;
+
+ /* Currently expose pipeline only for HDR planes */
+ if (!icl_is_hdr_plane(display, to_intel_plane(plane)->id))
+ return 0;
+
+ /* Add pipeline consisting of transfer functions */
+ ret = _intel_color_pipeline_plane_init(plane, &pipelines[len], pipe);
+ if (ret)
+ return ret;
+ len++;
+
+ return drm_plane_create_color_pipeline_property(plane, pipelines, len);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_color_pipeline.h b/drivers/gpu/drm/i915/display/intel_color_pipeline.h
new file mode 100644
index 000000000000..a457d306da7f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_color_pipeline.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOR_PIPELINE_H__
+#define __INTEL_COLOR_PIPELINE_H__
+
+struct drm_plane;
+enum pipe;
+
+int intel_color_pipeline_plane_init(struct drm_plane *plane, enum pipe pipe);
+
+#endif /* __INTEL_COLOR_PIPELINE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color_regs.h b/drivers/gpu/drm/i915/display/intel_color_regs.h
index 8eb643cfead7..c370b6029369 100644
--- a/drivers/gpu/drm/i915/display/intel_color_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_color_regs.h
@@ -316,4 +316,33 @@
#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30)
#define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE(pipe, _SKL_BOTTOM_COLOR_A, _SKL_BOTTOM_COLOR_B)
+/* 3D LUT */
+#define _LUT_3D_CTL_A 0x490A4
+#define _LUT_3D_CTL_B 0x491A4
+#define LUT_3D_CTL(pipe) _MMIO_PIPE(pipe, _LUT_3D_CTL_A, _LUT_3D_CTL_B)
+#define LUT_3D_ENABLE REG_BIT(31)
+#define LUT_3D_READY REG_BIT(30)
+#define LUT_3D_BINDING_MASK REG_GENMASK(23, 22)
+#define LUT_3D_BIND_PIPE REG_FIELD_PREP(LUT_3D_BINDING_MASK, 0)
+#define LUT_3D_BIND_PLANE_1 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 1)
+#define LUT_3D_BIND_PLANE_2 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 2)
+#define LUT_3D_BIND_PLANE_3 REG_FIELD_PREP(LUT_3D_BINDING_MASK, 3)
+
+#define _LUT_3D_INDEX_A 0x490A8
+#define _LUT_3D_INDEX_B 0x491A8
+#define LUT_3D_INDEX(pipe) _MMIO_PIPE(pipe, _LUT_3D_INDEX_A, _LUT_3D_INDEX_B)
+#define LUT_3D_AUTO_INCREMENT REG_BIT(13)
+#define LUT_3D_INDEX_VALUE_MASK REG_GENMASK(12, 0)
+#define LUT_3D_INDEX_VALUE(x) REG_FIELD_PREP(LUT_3D_INDEX_VALUE_MASK, (x))
+
+#define _LUT_3D_DATA_A 0x490AC
+#define _LUT_3D_DATA_B 0x491AC
+#define LUT_3D_DATA(pipe) _MMIO_PIPE(pipe, _LUT_3D_DATA_A, _LUT_3D_DATA_B)
+#define LUT_3D_DATA_RED_MASK REG_GENMASK(29, 20)
+#define LUT_3D_DATA_GREEN_MASK REG_GENMASK(19, 10)
+#define LUT_3D_DATA_BLUE_MASK REG_GENMASK(9, 0)
+#define LUT_3D_DATA_RED(x) REG_FIELD_PREP(LUT_3D_DATA_RED_MASK, (x))
+#define LUT_3D_DATA_GREEN(x) REG_FIELD_PREP(LUT_3D_DATA_GREEN_MASK, (x))
+#define LUT_3D_DATA_BLUE(x) REG_FIELD_PREP(LUT_3D_DATA_BLUE_MASK, (x))
+
#endif /* __INTEL_COLOR_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_colorop.c b/drivers/gpu/drm/i915/display/intel_colorop.c
new file mode 100644
index 000000000000..f2fc0d8780ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_colorop.c
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#include "intel_colorop.h"
+
+struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop)
+{
+ return container_of(colorop, struct intel_colorop, base);
+}
+
+struct intel_colorop *intel_colorop_alloc(void)
+{
+ struct intel_colorop *colorop;
+
+ colorop = kzalloc(sizeof(*colorop), GFP_KERNEL);
+ if (!colorop)
+ return ERR_PTR(-ENOMEM);
+
+ return colorop;
+}
+
+struct intel_colorop *intel_colorop_create(enum intel_color_block id)
+{
+ struct intel_colorop *colorop;
+
+ colorop = intel_colorop_alloc();
+
+ if (IS_ERR(colorop))
+ return colorop;
+
+ colorop->id = id;
+
+ return colorop;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_colorop.h b/drivers/gpu/drm/i915/display/intel_colorop.h
new file mode 100644
index 000000000000..21d58eb9f3d0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_colorop.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOROP_H__
+#define __INTEL_COLOROP_H__
+
+#include "intel_display_types.h"
+
+struct intel_colorop *to_intel_colorop(struct drm_colorop *colorop);
+struct intel_colorop *intel_colorop_alloc(void);
+struct intel_colorop *intel_colorop_create(enum intel_color_block id);
+
+#endif /* __INTEL_COLOROP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 112749f97c26..f401558ac14e 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -5,12 +5,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#define for_each_combo_phy(__display, __phy) \
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 42c923f416b3..913d90a7a508 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -28,10 +28,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
-#include "i915_utils.h"
+#include "i915_utils.h" /* for i915_inject_probe_failure() */
#include "intel_connector.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
@@ -77,7 +78,7 @@ void intel_connector_cancel_modeset_retry_work(struct intel_connector *connector
drm_connector_put(&connector->base);
}
-int intel_connector_init(struct intel_connector *connector)
+static int intel_connector_init(struct intel_connector *connector)
{
struct intel_digital_connector_state *conn_state;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h
index aafb25a814fa..0aa86626e646 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.h
+++ b/drivers/gpu/drm/i915/display/intel_connector.h
@@ -14,7 +14,6 @@ struct i2c_adapter;
struct intel_connector;
struct intel_encoder;
-int intel_connector_init(struct intel_connector *connector);
struct intel_connector *intel_connector_alloc(void);
void intel_connector_free(struct intel_connector *connector);
void intel_connector_destroy(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 898c5d9e8f7a..82e89cdbe5a5 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -50,6 +50,7 @@
#include "intel_gmbus.h"
#include "intel_hotplug.h"
#include "intel_hotplug_irq.h"
+#include "intel_link_bw.h"
#include "intel_load_detect.h"
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
@@ -421,7 +422,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder,
return -EINVAL;
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -446,7 +447,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
return -EINVAL;
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -497,10 +498,10 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, crt->adpa_reg, adpa);
- if (intel_de_wait_for_clear(display,
- crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
- 1000))
+ if (intel_de_wait_for_clear_ms(display,
+ crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
+ 1000))
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_TRIGGER");
@@ -552,8 +553,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
intel_de_write(display, crt->adpa_reg, adpa);
- if (intel_de_wait_for_clear(display, crt->adpa_reg,
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
+ if (intel_de_wait_for_clear_ms(display, crt->adpa_reg,
+ ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_TRIGGER");
intel_de_write(display, crt->adpa_reg, save_adpa);
@@ -603,8 +604,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_de_wait_for_clear(display, PORT_HOTPLUG_EN(display),
- CRT_HOTPLUG_FORCE_DETECT, 1000))
+ if (intel_de_wait_for_clear_ms(display, PORT_HOTPLUG_EN(display),
+ CRT_HOTPLUG_FORCE_DETECT, 1000))
drm_dbg_kms(display->drm,
"timed out waiting for FORCE_DETECT to go off");
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index a187db6df2d3..9d2a23c96c61 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
@@ -84,8 +85,13 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
if (!crtc->active)
return 0;
- if (!vblank->max_vblank_count)
- return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+ if (!vblank->max_vblank_count) {
+ /* On preempt-rt we cannot take the vblank spinlock since this function is called from tracepoints */
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ return (u32)drm_crtc_vblank_count(&crtc->base);
+ else
+ return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
+ }
return crtc->base.funcs->get_vblank_counter(&crtc->base);
}
@@ -390,6 +396,9 @@ int intel_crtc_init(struct intel_display *display, enum pipe pipe)
drm_WARN_ON(display->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
+ if (HAS_CASF(display))
+ drm_crtc_create_sharpness_strength_property(&crtc->base);
+
return 0;
fail:
@@ -748,3 +757,89 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
out:
intel_psr_unlock(new_crtc_state);
}
+
+bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->hw.enable != new_crtc_state->hw.enable;
+}
+
+bool intel_any_crtc_enable_changed(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_enable_changed(old_crtc_state, new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ return old_crtc_state->hw.active != new_crtc_state->hw.active;
+}
+
+bool intel_any_crtc_active_changed(struct intel_atomic_state *state)
+{
+ const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (intel_crtc_active_changed(old_crtc_state, new_crtc_state))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
+}
+
+unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int data_rate = 0;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ data_rate += crtc_state->data_rate[plane_id];
+
+ if (DISPLAY_VER(display) < 11)
+ data_rate += crtc_state->data_rate_y[plane_id];
+ }
+
+ return data_rate;
+}
+
+/* "Maximum Pipe Read Bandwidth" */
+int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) < 12)
+ return 0;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(intel_crtc_bw_data_rate(crtc_state), 10), 512);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h
index 8c14ff8b391e..07917e8a9ae3 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.h
+++ b/drivers/gpu/drm/i915/display/intel_crtc.h
@@ -58,4 +58,15 @@ void intel_wait_for_vblank_if_active(struct intel_display *display,
enum pipe pipe);
void intel_crtc_wait_for_next_vblank(struct intel_crtc *crtc);
+bool intel_any_crtc_enable_changed(struct intel_atomic_state *state);
+bool intel_crtc_enable_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state);
+bool intel_any_crtc_active_changed(struct intel_atomic_state *state);
+bool intel_crtc_active_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state);
+
+unsigned int intel_crtc_bw_num_active_planes(const struct intel_crtc_state *crtc_state);
+unsigned int intel_crtc_bw_data_rate(const struct intel_crtc_state *crtc_state);
+int intel_crtc_bw_min_cdclk(const struct intel_crtc_state *crtc_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 0c7f91046996..c2a6217c2262 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -289,10 +289,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "scanline offset: %d\n",
intel_crtc_scanline_offset(pipe_config));
- drm_printf(&p, "vblank delay: %d, framestart delay: %d, MSA timing delay: %d\n",
- pipe_config->hw.adjusted_mode.crtc_vblank_start -
- pipe_config->hw.adjusted_mode.crtc_vdisplay,
- pipe_config->framestart_delay, pipe_config->msa_timing_delay);
+ drm_printf(&p, "framestart delay: %d, MSA timing delay: %d, set context latency: %d\n",
+ pipe_config->framestart_delay, pipe_config->msa_timing_delay,
+ pipe_config->set_context_latency);
drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n",
str_yes_no(pipe_config->vrr.enable),
@@ -313,9 +312,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_printf(&p, "pipe mode: " DRM_MODE_FMT "\n",
DRM_MODE_ARG(&pipe_config->hw.pipe_mode));
intel_dump_crtc_timings(&p, &pipe_config->hw.pipe_mode);
- drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d\n",
+ drm_printf(&p, "port clock: %d, pipe src: " DRM_RECT_FMT ", pixel rate %d, min cdclk %d\n",
pipe_config->port_clock, DRM_RECT_ARG(&pipe_config->pipe_src),
- pipe_config->pixel_rate);
+ pipe_config->pixel_rate, pipe_config->min_cdclk);
drm_printf(&p, "linetime: %d, ips linetime: %d\n",
pipe_config->linetime, pipe_config->ips_linetime);
@@ -373,6 +372,11 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_vdsc_state_dump(&p, 0, pipe_config);
+ drm_printf(&p, "sharpness strength: %d, sharpness tap size: %d, sharpness enable: %d\n",
+ pipe_config->hw.casf_params.strength,
+ pipe_config->hw.casf_params.win_size,
+ pipe_config->hw.casf_params.casf_enable);
+
dump_planes:
if (!state)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 198e69efe9ac..a10b2425b94d 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -12,13 +12,13 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cursor.h"
#include "intel_cursor_regs.h"
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
@@ -33,17 +33,9 @@ static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+static u32 intel_cursor_surf_offset(const struct intel_plane_state *plane_state)
{
- struct intel_display *display = to_intel_display(plane_state);
- u32 base;
-
- if (DISPLAY_INFO(display)->cursor_needs_physical)
- base = plane_state->phys_dma_addr;
- else
- base = intel_plane_ggtt_offset(plane_state);
-
- return base + plane_state->view.color_plane[0].offset;
+ return plane_state->view.color_plane[0].offset;
}
static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state,
@@ -190,8 +182,8 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
static unsigned int
i845_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
return 2048;
}
@@ -213,8 +205,7 @@ static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
return cntl;
}
-static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i845_cursor_ctl(const struct intel_plane_state *plane_state)
{
return CURSOR_ENABLE |
CURSOR_FORMAT_ARGB |
@@ -274,7 +265,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+ plane_state->ctl = i845_cursor_ctl(plane_state);
return 0;
}
@@ -297,7 +288,7 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb,
size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width);
- base = intel_cursor_base(plane_state);
+ base = plane_state->surf;
pos = intel_cursor_position(crtc_state, plane_state, false);
}
@@ -352,8 +343,8 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
static unsigned int
i9xx_cursor_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
return plane->base.dev->mode_config.cursor_width * 4;
}
@@ -406,8 +397,7 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
return cntl;
}
-static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_cursor_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
u32 cntl = 0;
@@ -534,7 +524,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
- plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
+ plane_state->ctl = i9xx_cursor_ctl(plane_state);
return 0;
}
@@ -672,10 +662,10 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb,
cntl = plane_state->ctl |
i9xx_cursor_ctl_crtc(crtc_state);
- if (width != height)
+ if (DISPLAY_VER(display) < 14 && width != height)
fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1);
- base = intel_cursor_base(plane_state);
+ base = plane_state->surf;
pos = intel_cursor_position(crtc_state, plane_state, false);
}
@@ -1051,6 +1041,8 @@ intel_cursor_plane_create(struct intel_display *display,
cursor->check_plane = i9xx_check_cursor;
}
+ cursor->surf_offset = intel_cursor_surf_offset;
+
if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
cursor->capture_error = g4x_cursor_capture_error;
else
@@ -1100,3 +1092,23 @@ fail:
return ERR_PTR(ret);
}
+
+void intel_cursor_mode_config_init(struct intel_display *display)
+{
+ struct drm_mode_config *mode_config = &display->drm->mode_config;
+
+ if (display->platform.i845g) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 1023;
+ } else if (display->platform.i865g) {
+ mode_config->cursor_width = 512;
+ mode_config->cursor_height = 1023;
+ } else if (display->platform.i830 || display->platform.i85x ||
+ display->platform.i915g || display->platform.i915gm) {
+ mode_config->cursor_width = 64;
+ mode_config->cursor_height = 64;
+ } else {
+ mode_config->cursor_width = 256;
+ mode_config->cursor_height = 256;
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h
index 65a9e7eb88c2..7c269d7381ad 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.h
+++ b/drivers/gpu/drm/i915/display/intel_cursor.h
@@ -17,4 +17,6 @@ intel_cursor_plane_create(struct intel_display *display,
void intel_cursor_unpin_work(struct kthread_work *base);
+void intel_cursor_mode_config_init(struct intel_display *display);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 801235a5bc0a..d98b4cf6b60e 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -8,7 +8,6 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
@@ -16,16 +15,15 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
+#include "intel_lt_phy.h"
#include "intel_panel.h"
#include "intel_psr.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_tc.h"
-#define MB_WRITE_COMMITTED true
-#define MB_WRITE_UNCOMMITTED false
-
#define for_each_cx0_lane_in_mask(__lane_mask, __lane) \
for ((__lane) = 0; (__lane) < 2; (__lane)++) \
for_each_if((__lane_mask) & BIT(__lane))
@@ -39,14 +37,12 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
- /* PTL doesn't have a PHY connected to PORT B; as such,
- * there will never be a case where PTL uses PHY B.
- * WCL uses PORT A and B with the C10 PHY.
- * Reusing the condition for WCL and extending it for PORT B
- * should not cause any issues for PTL.
- */
- if (display->platform.pantherlake && phy < PHY_C)
- return true;
+ if (display->platform.pantherlake) {
+ if (display->platform.pantherlake_wildcatlake)
+ return phy <= PHY_B;
+ else
+ return phy == PHY_A;
+ }
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
return true;
@@ -130,8 +126,8 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, intel_w
intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
-static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
- int lane)
+void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
struct intel_display *display = to_intel_display(encoder);
@@ -140,7 +136,7 @@ static void intel_clear_response_ready_flag(struct intel_encoder *encoder,
0, XELPDP_PORT_P2M_RESPONSE_READY | XELPDP_PORT_P2M_ERROR_SET);
}
-static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
+void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -149,9 +145,9 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_RESET);
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_RESET,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_RESET,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_err_once(display->drm,
"Failed to bring PHY %c to idle.\n",
phy_name(phy));
@@ -161,19 +157,17 @@ static void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
intel_clear_response_ready_flag(encoder, lane);
}
-static int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
- int command, int lane, u32 *val)
+int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
+ int command, int lane, u32 *val)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
enum phy phy = intel_encoder_to_phy(encoder);
- if (intel_de_wait_custom(display,
- XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane),
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_PORT_P2M_RESPONSE_READY,
- XELPDP_MSGBUS_TIMEOUT_FAST_US,
- XELPDP_MSGBUS_TIMEOUT_SLOW, val)) {
+ if (intel_de_wait_ms(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_PORT_P2M_RESPONSE_READY,
+ XELPDP_MSGBUS_TIMEOUT_MS, val)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for message ACK. Status: 0x%x\n",
phy_name(phy), *val);
@@ -218,9 +212,9 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
int ack;
u32 val;
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus and retry.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -273,8 +267,7 @@ static u8 __intel_cx0_read(struct intel_encoder *encoder,
return 0;
}
-static u8 intel_cx0_read(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr)
+u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
{
int lane = lane_mask_to_lane(lane_mask);
@@ -290,9 +283,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
int ack;
u32 val;
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Resetting the bus.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -306,9 +299,9 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
XELPDP_PORT_M2P_DATA(data) |
XELPDP_PORT_M2P_ADDRESS(addr));
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for write to complete. Resetting the bus.\n", phy_name(phy));
intel_cx0_bus_reset(encoder, lane);
@@ -361,8 +354,8 @@ static void __intel_cx0_write(struct intel_encoder *encoder,
"PHY %c Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
}
-static void intel_cx0_write(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr, u8 data, bool committed)
+void intel_cx0_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed)
{
int lane;
@@ -414,8 +407,8 @@ static void __intel_cx0_rmw(struct intel_encoder *encoder,
__intel_cx0_write(encoder, lane, addr, val, committed);
}
-static void intel_cx0_rmw(struct intel_encoder *encoder,
- u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
+void intel_cx0_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
{
u8 lane;
@@ -2105,6 +2098,9 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
return 0;
}
+static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state);
+
static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_c10pll_state *pll_state)
{
@@ -2129,6 +2125,8 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
pll_state->tx = intel_cx0_read(encoder, lane, PHY_C10_VDR_TX(0));
intel_cx0_phy_transaction_end(encoder, wakeref);
+
+ pll_state->clock = intel_c10pll_calc_port_clock(encoder, pll_state);
}
static void intel_c10_pll_program(struct intel_display *display,
@@ -2587,20 +2585,6 @@ static bool is_dp2(u32 clock)
return false;
}
-static bool is_hdmi_frl(u32 clock)
-{
- switch (clock) {
- case 300000: /* 3 Gbps */
- case 600000: /* 6 Gbps */
- case 800000: /* 8 Gbps */
- case 1000000: /* 10 Gbps */
- case 1200000: /* 12 Gbps */
- return true;
- default:
- return false;
- }
-}
-
static bool intel_c20_protocol_switch_valid(struct intel_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
@@ -2614,7 +2598,7 @@ static int intel_get_c20_custom_width(u32 clock, bool dp)
{
if (dp && is_dp2(clock))
return 2;
- else if (is_hdmi_frl(clock))
+ else if (intel_hdmi_is_frl(clock))
return 1;
else
return 0;
@@ -2626,11 +2610,13 @@ static void intel_c20_pll_program(struct intel_display *display,
bool is_dp, int port_clock)
{
u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder);
+ u8 serdes;
bool cntx;
int i;
/* 1. Read current context selection */
- cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) & BIT(0);
+ cntx = intel_cx0_read(encoder, INTEL_CX0_LANE0, PHY_C20_VDR_CUSTOM_SERDES_RATE) &
+ PHY_C20_CONTEXT_TOGGLE;
/*
* 2. If there is a protocol switch from HDMI to DP or vice versa, clear
@@ -2700,28 +2686,31 @@ static void intel_c20_pll_program(struct intel_display *display,
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
- if (is_dp) {
- intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
- BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(port_clock)),
- MB_WRITE_COMMITTED);
- } else {
- intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
- is_hdmi_frl(port_clock) ? BIT(7) : 0,
- MB_WRITE_COMMITTED);
+ serdes = 0;
+ if (is_dp)
+ serdes = PHY_C20_IS_DP |
+ PHY_C20_DP_RATE(intel_c20_get_dp_rate(port_clock));
+ else if (intel_hdmi_is_frl(port_clock))
+ serdes = PHY_C20_IS_HDMI_FRL;
- intel_cx0_write(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
- intel_c20_get_hdmi_rate(port_clock),
- MB_WRITE_COMMITTED);
- }
+ intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
+ PHY_C20_IS_DP | PHY_C20_DP_RATE_MASK | PHY_C20_IS_HDMI_FRL,
+ serdes,
+ MB_WRITE_COMMITTED);
+
+ if (!is_dp)
+ intel_cx0_rmw(encoder, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
+ PHY_C20_HDMI_RATE_MASK,
+ intel_c20_get_hdmi_rate(port_clock),
+ MB_WRITE_COMMITTED);
/*
* 7. Write Vendor specific registers to toggle context setting to load
* the updated programming toggle context bit
*/
intel_cx0_rmw(encoder, owned_lane_mask, PHY_C20_VDR_CUSTOM_SERDES_RATE,
- BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
+ PHY_C20_CONTEXT_TOGGLE, cntx ? 0 : PHY_C20_CONTEXT_TOGGLE,
+ MB_WRITE_COMMITTED);
}
static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
@@ -2768,7 +2757,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
- if (!is_dp && is_hdmi_frl(port_clock))
+ if (!is_dp && intel_hdmi_is_frl(port_clock))
val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
@@ -2808,8 +2797,8 @@ static u32 intel_cx0_get_powerdown_state(u8 lane_mask, u8 state)
return val;
}
-static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
- u8 lane_mask, u8 state)
+void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
@@ -2823,9 +2812,9 @@ static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
/* Wait for pending transactions.*/
for_each_cx0_lane_in_mask(lane_mask, lane)
- if (intel_de_wait_for_clear(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
- XELPDP_PORT_M2P_TRANSACTION_PENDING,
- XELPDP_MSGBUS_TIMEOUT_SLOW)) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_M2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
drm_dbg_kms(display->drm,
"PHY %c Timeout waiting for previous transaction to complete. Reset the bus.\n",
phy_name(phy));
@@ -2837,26 +2826,26 @@ static void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
intel_cx0_get_powerdown_update(lane_mask));
/* Update Timeout Value */
- if (intel_de_wait_custom(display, buf_ctl2_reg,
- intel_cx0_get_powerdown_update(lane_mask), 0,
- XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_for_clear_ms(display, buf_ctl2_reg,
+ intel_cx0_get_powerdown_update(lane_mask),
+ XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
}
-static void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
+void intel_cx0_setup_powerdown(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
XELPDP_POWER_STATE_READY_MASK,
- XELPDP_POWER_STATE_READY(CX0_P2_STATE_READY));
+ XELPDP_POWER_STATE_READY(XELPDP_P2_STATE_READY));
intel_de_rmw(display, XELPDP_PORT_BUF_CTL3(display, port),
XELPDP_POWER_STATE_ACTIVE_MASK |
XELPDP_PLL_LANE_STAGGERING_DELAY_MASK,
- XELPDP_POWER_STATE_ACTIVE(CX0_P0_STATE_ACTIVE) |
+ XELPDP_POWER_STATE_ACTIVE(XELPDP_P0_STATE_ACTIVE) |
XELPDP_PLL_LANE_STAGGERING_DELAY(0));
}
@@ -2898,48 +2887,47 @@ static void intel_cx0_phy_lane_reset(struct intel_encoder *encoder,
XELPDP_LANE_PHY_CURRENT_STATUS(1))
: XELPDP_LANE_PHY_CURRENT_STATUS(0);
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL1(display, port),
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_PHY_READY,
- XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XELPDP_PORT_BUF_SOC_PHY_READY,
+ XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US))
drm_warn(display->drm,
- "PHY %c failed to bring out of SOC reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
+ "PHY %c failed to bring out of SOC reset\n",
+ phy_name(phy));
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset,
lane_pipe_reset);
- if (intel_de_wait_custom(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_current_status, lane_phy_current_status,
- XELPDP_PORT_RESET_START_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XELPDP_PORT_RESET_START_TIMEOUT_US))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dus.\n",
- phy_name(phy), XELPDP_PORT_RESET_START_TIMEOUT_US);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
intel_cx0_get_pclk_refclk_request(owned_lane_mask),
intel_cx0_get_pclk_refclk_request(lane_mask));
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, port),
- intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
- intel_cx0_get_pclk_refclk_ack(lane_mask),
- XELPDP_REFCLK_ENABLE_TIMEOUT_US, 0, NULL))
+ if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ intel_cx0_get_pclk_refclk_ack(owned_lane_mask),
+ intel_cx0_get_pclk_refclk_ack(lane_mask),
+ XELPDP_REFCLK_ENABLE_TIMEOUT_US, NULL))
drm_warn(display->drm,
- "PHY %c failed to request refclk after %dus.\n",
- phy_name(phy), XELPDP_REFCLK_ENABLE_TIMEOUT_US);
+ "PHY %c failed to request refclk\n",
+ phy_name(phy));
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
- CX0_P2_STATE_RESET);
+ XELPDP_P2_STATE_RESET);
intel_cx0_setup_powerdown(encoder);
intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset, 0);
- if (intel_de_wait_for_clear(display, XELPDP_PORT_BUF_CTL2(display, port),
- lane_phy_current_status,
- XELPDP_PORT_RESET_END_TIMEOUT))
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XELPDP_PORT_RESET_END_TIMEOUT_MS))
drm_warn(display->drm,
- "PHY %c failed to bring out of Lane reset after %dms.\n",
- phy_name(phy), XELPDP_PORT_RESET_END_TIMEOUT);
+ "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
}
static void intel_cx0_program_phy_lane(struct intel_encoder *encoder, int lane_count,
@@ -3034,7 +3022,7 @@ static void __intel_cx0pll_enable(struct intel_encoder *encoder,
* TODO: For DP alt mode use only one lane.
*/
intel_cx0_powerdown_change_sequence(encoder, INTEL_CX0_BOTH_LANES,
- CX0_P2_STATE_READY);
+ XELPDP_P2_STATE_READY);
/*
* 4. Program PORT_MSGBUS_TIMER register's Message Bus Timer field to 0xA000.
@@ -3074,12 +3062,12 @@ static void __intel_cx0pll_enable(struct intel_encoder *encoder,
intel_cx0_get_pclk_pll_request(maxpclk_lane));
/* 10. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK> == "1". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
- intel_cx0_get_pclk_pll_ack(maxpclk_lane),
- XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, 0, NULL))
- drm_warn(display->drm, "Port %c PLL not locked after %dus.\n",
- phy_name(phy), XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US);
+ if (intel_de_wait_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES),
+ intel_cx0_get_pclk_pll_ack(maxpclk_lane),
+ XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US, NULL))
+ drm_warn(display->drm, "Port %c PLL not locked\n",
+ phy_name(phy));
/*
* 11. Follow the Display Voltage Frequency Switching Sequence After
@@ -3160,8 +3148,8 @@ static int intel_mtl_tbt_clock_select(struct intel_display *display,
}
}
-static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -3198,12 +3186,9 @@ static void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
intel_de_write(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port), val);
/* 5. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "1". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_TBT_CLOCK_ACK,
- XELPDP_TBT_CLOCK_ACK,
- 100, 0, NULL))
- drm_warn(display->drm,
- "[ENCODER:%d:%s][%c] PHY PLL not locked after 100us.\n",
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 100))
+ drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not locked\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
/*
@@ -3275,13 +3260,13 @@ static u8 cx0_power_control_disable_val(struct intel_encoder *encoder)
struct intel_display *display = to_intel_display(encoder);
if (intel_encoder_is_c10phy(encoder))
- return CX0_P2PG_STATE_DISABLE;
+ return XELPDP_P2PG_STATE_DISABLE;
if ((display->platform.battlemage && encoder->port == PORT_A) ||
(DISPLAY_VER(display) >= 30 && encoder->type == INTEL_OUTPUT_EDP))
- return CX0_P2PG_STATE_DISABLE;
+ return XELPDP_P2PG_STATE_DISABLE;
- return CX0_P4PG_STATE_DISABLE;
+ return XELPDP_P4PG_STATE_DISABLE;
}
static void intel_cx0pll_disable(struct intel_encoder *encoder)
@@ -3313,13 +3298,12 @@ static void intel_cx0pll_disable(struct intel_encoder *encoder)
/*
* 5. Poll on PORT_CLOCK_CTL PCLK PLL Ack LN<Lane for maxPCLK**> == "0".
*/
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
- intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES), 0,
- XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US, 0, NULL))
- drm_warn(display->drm,
- "Port %c PLL not unlocked after %dus.\n",
- phy_name(phy), XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US);
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ intel_cx0_get_pclk_pll_ack(INTEL_CX0_BOTH_LANES) |
+ intel_cx0_get_pclk_refclk_ack(INTEL_CX0_BOTH_LANES),
+ XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US))
+ drm_warn(display->drm, "Port %c PLL not unlocked\n",
+ phy_name(phy));
/*
* 6. Follow the Display Voltage Frequency Switching Sequence After
@@ -3345,7 +3329,7 @@ static bool intel_cx0_pll_is_enabled(struct intel_encoder *encoder)
intel_cx0_get_pclk_pll_request(lane);
}
-static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
+void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
@@ -3362,10 +3346,9 @@ static void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder)
XELPDP_TBT_CLOCK_REQUEST, 0);
/* 3. Poll on PORT_CLOCK_CTL TBT CLOCK Ack == "0". */
- if (intel_de_wait_custom(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
- XELPDP_TBT_CLOCK_ACK, 0, 10, 0, NULL))
- drm_warn(display->drm,
- "[ENCODER:%d:%s][%c] PHY PLL not unlocked after 10us.\n",
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_TBT_CLOCK_ACK, 10))
+ drm_warn(display->drm, "[ENCODER:%d:%s][%c] PHY PLL not unlocked\n",
encoder->base.base.id, encoder->base.name, phy_name(phy));
/*
@@ -3584,7 +3567,7 @@ void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_encoder *encoder;
struct intel_cx0pll_state mpll_hw_state = {};
- if (DISPLAY_VER(display) < 14)
+ if (!IS_DISPLAY_VER(display, 14, 30))
return;
if (!new_crtc_state->hw.active)
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index c5a7b529955b..84d334b865f7 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -8,6 +8,9 @@
#include <linux/types.h>
+#define MB_WRITE_COMMITTED true
+#define MB_WRITE_UNCOMMITTED false
+
enum icl_port_dpll_id;
struct intel_atomic_state;
struct intel_c10pll_state;
@@ -19,6 +22,8 @@ struct intel_display;
struct intel_encoder;
struct intel_hdmi;
+void intel_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane);
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
@@ -41,9 +46,25 @@ bool intel_cx0pll_compare_hw_state(const struct intel_cx0pll_state *a,
const struct intel_cx0pll_state *b);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_cx0_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state);
+int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
+void intel_cx0_setup_powerdown(struct intel_encoder *encoder);
+bool intel_cx0_is_hdmi_frl(u32 clock);
+u8 intel_cx0_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr);
+void intel_cx0_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed);
+void intel_cx0_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed);
+int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
+ int command, int lane, u32 *val);
+void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_mtl_tbt_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_mtl_tbt_pll_disable(struct intel_encoder *encoder);
#endif /* __INTEL_CX0_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 77eae1d845f7..8df5cd5ce418 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -50,6 +50,7 @@
#define XELPDP_PORT_M2P_COMMAND_WRITE_UNCOMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x1)
#define XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x2)
#define XELPDP_PORT_M2P_COMMAND_READ REG_FIELD_PREP(XELPDP_PORT_M2P_COMMAND_TYPE_MASK, 0x3)
+#define XELPDP_PORT_P2P_TRANSACTION_PENDING REG_BIT(24)
#define XELPDP_PORT_M2P_DATA_MASK REG_GENMASK(23, 16)
#define XELPDP_PORT_M2P_DATA(val) REG_FIELD_PREP(XELPDP_PORT_M2P_DATA_MASK, val)
#define XELPDP_PORT_M2P_TRANSACTION_RESET REG_BIT(15)
@@ -73,14 +74,13 @@
#define XELPDP_PORT_P2M_DATA(val) REG_FIELD_PREP(XELPDP_PORT_P2M_DATA_MASK, val)
#define XELPDP_PORT_P2M_ERROR_SET REG_BIT(15)
-#define XELPDP_MSGBUS_TIMEOUT_SLOW 1
-#define XELPDP_MSGBUS_TIMEOUT_FAST_US 2
+#define XELPDP_MSGBUS_TIMEOUT_MS 1
#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US 3200
#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US 20
#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US 100
#define XELPDP_PORT_RESET_START_TIMEOUT_US 5
-#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_US 100
-#define XELPDP_PORT_RESET_END_TIMEOUT 15
+#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS 2
+#define XELPDP_PORT_RESET_END_TIMEOUT_MS 15
#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 1
#define _XELPDP_PORT_BUF_CTL1_LN0_A 0x64004
@@ -104,6 +104,8 @@
#define XELPDP_PORT_BUF_PORT_DATA_20BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 1)
#define XELPDP_PORT_BUF_PORT_DATA_40BIT REG_FIELD_PREP(XELPDP_PORT_BUF_PORT_DATA_WIDTH_MASK, 2)
#define XELPDP_PORT_REVERSAL REG_BIT(16)
+#define XE3PLPDP_PHY_MODE_MASK REG_GENMASK(15, 12)
+#define XE3PLPDP_PHY_MODE_DP REG_FIELD_PREP(XE3PLPDP_PHY_MODE_MASK, 0x3)
#define XELPDP_PORT_BUF_IO_SELECT_TBT REG_BIT(11)
#define XELPDP_PORT_BUF_PHY_IDLE REG_BIT(7)
#define XELPDP_TC_PHY_OWNERSHIP REG_BIT(6)
@@ -124,6 +126,7 @@
_XELPDP_PORT_BUF_CTL2(port))
#define XELPDP_LANE_PIPE_RESET(lane) _PICK(lane, REG_BIT(31), REG_BIT(30))
#define XELPDP_LANE_PHY_CURRENT_STATUS(lane) _PICK(lane, REG_BIT(29), REG_BIT(28))
+#define XE3PLPDP_LANE_PHY_PULSE_STATUS(lane) _PICK(lane, REG_BIT(27), REG_BIT(26))
#define XELPDP_LANE_POWERDOWN_UPDATE(lane) _PICK(lane, REG_BIT(25), REG_BIT(24))
#define _XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK REG_GENMASK(23, 20)
#define _XELPDP_LANE0_POWERDOWN_NEW_STATE(val) REG_FIELD_PREP(_XELPDP_LANE0_POWERDOWN_NEW_STATE_MASK, val)
@@ -149,11 +152,12 @@
#define XELPDP_PLL_LANE_STAGGERING_DELAY(val) REG_FIELD_PREP(XELPDP_PLL_LANE_STAGGERING_DELAY_MASK, val)
#define XELPDP_POWER_STATE_ACTIVE_MASK REG_GENMASK(3, 0)
#define XELPDP_POWER_STATE_ACTIVE(val) REG_FIELD_PREP(XELPDP_POWER_STATE_ACTIVE_MASK, val)
-#define CX0_P0_STATE_ACTIVE 0x0
-#define CX0_P2_STATE_READY 0x2
-#define CX0_P2PG_STATE_DISABLE 0x9
-#define CX0_P4PG_STATE_DISABLE 0xC
-#define CX0_P2_STATE_RESET 0x2
+#define XELPDP_P0_STATE_ACTIVE 0x0
+#define XELPDP_P2_STATE_READY 0x2
+#define XE3PLPD_P4_STATE_DISABLE 0x4
+#define XELPDP_P2PG_STATE_DISABLE 0x9
+#define XELPDP_P4PG_STATE_DISABLE 0xC
+#define XELPDP_P2_STATE_RESET 0x2
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_A 0x640d8
#define _XELPDP_PORT_MSGBUS_TIMER_LN0_B 0x641d8
@@ -298,10 +302,14 @@
#define PHY_C20_RD_DATA_L 0xC08
#define PHY_C20_RD_DATA_H 0xC09
#define PHY_C20_VDR_CUSTOM_SERDES_RATE 0xD00
-#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_IS_HDMI_FRL REG_BIT8(7)
+#define PHY_C20_IS_DP REG_BIT8(6)
+#define PHY_C20_DP_RATE_MASK REG_GENMASK8(4, 1)
+#define PHY_C20_DP_RATE(val) REG_FIELD_PREP8(PHY_C20_DP_RATE_MASK, val)
#define PHY_C20_CONTEXT_TOGGLE REG_BIT8(0)
-#define PHY_C20_CUSTOM_SERDES_MASK REG_GENMASK8(4, 1)
-#define PHY_C20_CUSTOM_SERDES(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_SERDES_MASK, val)
+#define PHY_C20_VDR_HDMI_RATE 0xD01
+#define PHY_C20_HDMI_RATE_MASK REG_GENMASK8(1, 0)
+#define PHY_C20_HDMI_RATE(val) REG_FIELD_PREP8(PHY_C20_HDMI_RATE_MASK, val)
#define PHY_C20_VDR_CUSTOM_WIDTH 0xD02
#define PHY_C20_CUSTOM_WIDTH_MASK REG_GENMASK(1, 0)
#define PHY_C20_CUSTOM_WIDTH(val) REG_FIELD_PREP8(PHY_C20_CUSTOM_WIDTH_MASK, val)
diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.c b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c
new file mode 100644
index 000000000000..8b8894c37f63
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "intel_dbuf_bw.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "skl_watermark.h"
+
+struct intel_dbuf_bw {
+ unsigned int max_bw[I915_MAX_DBUF_SLICES];
+ u8 active_planes[I915_MAX_DBUF_SLICES];
+};
+
+struct intel_dbuf_bw_state {
+ struct intel_global_state base;
+ struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
+};
+
+struct intel_dbuf_bw_state *to_intel_dbuf_bw_state(struct intel_global_state *obj_state)
+{
+ return container_of(obj_state, struct intel_dbuf_bw_state, base);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_old_global_obj_state(state, &display->dbuf_bw.obj);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_new_global_obj_state(state, &display->dbuf_bw.obj);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_global_state *dbuf_bw_state;
+
+ dbuf_bw_state = intel_atomic_get_global_obj_state(state, &display->dbuf_bw.obj);
+ if (IS_ERR(dbuf_bw_state))
+ return ERR_CAST(dbuf_bw_state);
+
+ return to_intel_dbuf_bw_state(dbuf_bw_state);
+}
+
+static bool intel_dbuf_bw_changed(struct intel_display *display,
+ const struct intel_dbuf_bw *old_dbuf_bw,
+ const struct intel_dbuf_bw *new_dbuf_bw)
+{
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
+ old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel_dbuf_bw_state_changed(struct intel_display *display,
+ const struct intel_dbuf_bw_state *old_dbuf_bw_state,
+ const struct intel_dbuf_bw_state *new_dbuf_bw_state)
+{
+ enum pipe pipe;
+
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *old_dbuf_bw =
+ &old_dbuf_bw_state->dbuf_bw[pipe];
+ const struct intel_dbuf_bw *new_dbuf_bw =
+ &new_dbuf_bw_state->dbuf_bw[pipe];
+
+ if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
+ return true;
+ }
+
+ return false;
+}
+
+static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
+ struct intel_crtc *crtc,
+ enum plane_id plane_id,
+ const struct skl_ddb_entry *ddb,
+ unsigned int data_rate)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
+ enum dbuf_slice slice;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
+ dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
+ dbuf_bw->active_planes[slice] |= BIT(plane_id);
+ }
+}
+
+static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum plane_id plane_id;
+
+ memset(dbuf_bw, 0, sizeof(*dbuf_bw));
+
+ if (!crtc_state->hw.active)
+ return;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ /*
+ * We assume cursors are small enough
+ * to not cause bandwidth problems.
+ */
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb[plane_id],
+ crtc_state->data_rate[plane_id]);
+
+ if (DISPLAY_VER(display) < 11)
+ skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
+ &crtc_state->wm.skl.plane_ddb_y[plane_id],
+ crtc_state->data_rate[plane_id]);
+ }
+}
+
+/* "Maximum Data Buffer Bandwidth" */
+int intel_dbuf_bw_min_cdclk(struct intel_display *display,
+ const struct intel_dbuf_bw_state *dbuf_bw_state)
+{
+ unsigned int total_max_bw = 0;
+ enum dbuf_slice slice;
+
+ for_each_dbuf_slice(display, slice) {
+ int num_active_planes = 0;
+ unsigned int max_bw = 0;
+ enum pipe pipe;
+
+ /*
+ * The arbiter can only really guarantee an
+ * equal share of the total bw to each plane.
+ */
+ for_each_pipe(display, pipe) {
+ const struct intel_dbuf_bw *dbuf_bw = &dbuf_bw_state->dbuf_bw[pipe];
+
+ max_bw = max(dbuf_bw->max_bw[slice], max_bw);
+ num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
+ }
+ max_bw *= num_active_planes;
+
+ total_max_bw = max(total_max_bw, max_bw);
+ }
+
+ return DIV_ROUND_UP(total_max_bw, 64);
+}
+
+int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_dbuf_bw_state *new_dbuf_bw_state = NULL;
+ const struct intel_dbuf_bw_state *old_dbuf_bw_state = NULL;
+ const struct intel_crtc_state *old_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
+ struct intel_crtc *crtc;
+ int ret, i;
+
+ if (DISPLAY_VER(display) < 9)
+ return 0;
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
+
+ skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
+ skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
+
+ if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
+ continue;
+
+ new_dbuf_bw_state = intel_atomic_get_dbuf_bw_state(state);
+ if (IS_ERR(new_dbuf_bw_state))
+ return PTR_ERR(new_dbuf_bw_state);
+
+ old_dbuf_bw_state = intel_atomic_get_old_dbuf_bw_state(state);
+
+ new_dbuf_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
+ }
+
+ if (!old_dbuf_bw_state)
+ return 0;
+
+ if (intel_dbuf_bw_state_changed(display, old_dbuf_bw_state, new_dbuf_bw_state)) {
+ ret = intel_atomic_lock_global_state(&new_dbuf_bw_state->base);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_cdclk_update_dbuf_bw_min_cdclk(state,
+ intel_dbuf_bw_min_cdclk(display, old_dbuf_bw_state),
+ intel_dbuf_bw_min_cdclk(display, new_dbuf_bw_state),
+ need_cdclk_calc);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void intel_dbuf_bw_update_hw_state(struct intel_display *display)
+{
+ struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ struct intel_crtc *crtc;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ for_each_intel_crtc(display->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ skl_crtc_calc_dbuf_bw(&dbuf_bw_state->dbuf_bw[crtc->pipe], crtc_state);
+ }
+}
+
+void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ struct intel_dbuf_bw_state *dbuf_bw_state =
+ to_intel_dbuf_bw_state(display->dbuf_bw.obj.state);
+ enum pipe pipe = crtc->pipe;
+
+ if (DISPLAY_VER(display) < 9)
+ return;
+
+ memset(&dbuf_bw_state->dbuf_bw[pipe], 0, sizeof(dbuf_bw_state->dbuf_bw[pipe]));
+}
+
+static struct intel_global_state *
+intel_dbuf_bw_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_dbuf_bw_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ return &state->base;
+}
+
+static void intel_dbuf_bw_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_dbuf_bw_funcs = {
+ .atomic_duplicate_state = intel_dbuf_bw_duplicate_state,
+ .atomic_destroy_state = intel_dbuf_bw_destroy_state,
+};
+
+int intel_dbuf_bw_init(struct intel_display *display)
+{
+ struct intel_dbuf_bw_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(display, &display->dbuf_bw.obj,
+ &state->base, &intel_dbuf_bw_funcs);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dbuf_bw.h b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h
new file mode 100644
index 000000000000..61875b9d5969
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dbuf_bw.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_DBUF_BW_H__
+#define __INTEL_DBUF_BW_H__
+
+#include <drm/drm_atomic.h>
+
+struct intel_atomic_state;
+struct intel_dbuf_bw_state;
+struct intel_crtc;
+struct intel_display;
+struct intel_global_state;
+
+struct intel_dbuf_bw_state *
+to_intel_dbuf_bw_state(struct intel_global_state *obj_state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_old_dbuf_bw_state(struct intel_atomic_state *state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_new_dbuf_bw_state(struct intel_atomic_state *state);
+
+struct intel_dbuf_bw_state *
+intel_atomic_get_dbuf_bw_state(struct intel_atomic_state *state);
+
+int intel_dbuf_bw_init(struct intel_display *display);
+int intel_dbuf_bw_calc_min_cdclk(struct intel_atomic_state *state,
+ bool *need_cdclk_calc);
+int intel_dbuf_bw_min_cdclk(struct intel_display *display,
+ const struct intel_dbuf_bw_state *dbuf_bw_state);
+void intel_dbuf_bw_update_hw_state(struct intel_display *display);
+void intel_dbuf_bw_crtc_disable_noatomic(struct intel_crtc *crtc);
+
+#endif /* __INTEL_DBUF_BW_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0405396c7750..002ccd47856d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -26,6 +26,7 @@
*/
#include <linux/iopoll.h>
+#include <linux/seq_buf.h>
#include <linux/string_helpers.h>
#include <drm/display/drm_dp_helper.h>
@@ -34,7 +35,6 @@
#include <drm/drm_privacy_screen_consumer.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "icl_dsi.h"
#include "intel_alpm.h"
#include "intel_audio.h"
@@ -52,6 +52,7 @@
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
@@ -71,6 +72,7 @@
#include "intel_hotplug.h"
#include "intel_hti.h"
#include "intel_lspcon.h"
+#include "intel_lt_phy.h"
#include "intel_mg_phy_regs.h"
#include "intel_modeset_lock.h"
#include "intel_panel.h"
@@ -208,8 +210,8 @@ void intel_wait_ddi_buf_idle(struct intel_display *display, enum port port)
}
static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE);
- if (intel_de_wait_for_set(display, intel_ddi_buf_status_reg(display, port),
- DDI_BUF_IS_IDLE, 10))
+ if (intel_de_wait_for_set_ms(display, intel_ddi_buf_status_reg(display, port),
+ DDI_BUF_IS_IDLE, 10))
drm_err(display->drm, "Timeout waiting for DDI BUF %c to get idle\n",
port_name(port));
}
@@ -233,8 +235,8 @@ static void intel_wait_ddi_buf_active(struct intel_encoder *encoder)
}
static_assert(DDI_BUF_IS_IDLE == XELPDP_PORT_BUF_PHY_IDLE);
- if (intel_de_wait_for_clear(display, intel_ddi_buf_status_reg(display, port),
- DDI_BUF_IS_IDLE, 10))
+ if (intel_de_wait_for_clear_ms(display, intel_ddi_buf_status_reg(display, port),
+ DDI_BUF_IS_IDLE, 10))
drm_err(display->drm, "Timeout waiting for DDI BUF %c to get active\n",
port_name(port));
}
@@ -596,8 +598,9 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- drm_WARN_ON(display->drm,
- master == INVALID_TRANSCODER);
+ if (drm_WARN_ON(display->drm,
+ master == INVALID_TRANSCODER))
+ master = TRANSCODER_A;
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
} else {
@@ -1464,10 +1467,15 @@ static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
struct intel_display *display = to_intel_display(intel_dp);
+ const u8 *signal_array;
+ size_t array_size;
int i;
- for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
- if (index_to_dp_signal_levels[i] == signal_levels)
+ signal_array = index_to_dp_signal_levels;
+ array_size = ARRAY_SIZE(index_to_dp_signal_levels);
+
+ for (i = 0; i < array_size; i++) {
+ if (signal_array[i] == signal_levels)
return i;
}
@@ -2166,7 +2174,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
{
struct intel_display *display = to_intel_display(crtc_state);
enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
- u32 ln0, ln1, pin_assignment;
+ enum intel_tc_pin_assignment pin_assignment;
+ u32 ln0, ln1;
u8 width;
if (DISPLAY_VER(display) >= 14)
@@ -2188,11 +2197,11 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */
- pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port);
+ pin_assignment = intel_tc_port_get_pin_assignment(dig_port);
width = crtc_state->lane_count;
switch (pin_assignment) {
- case 0x0:
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
drm_WARN_ON(display->drm,
!intel_tc_port_in_legacy_mode(dig_port));
if (width == 1) {
@@ -2202,20 +2211,20 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x1:
+ case INTEL_TC_PIN_ASSIGNMENT_A:
if (width == 4) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x2:
+ case INTEL_TC_PIN_ASSIGNMENT_B:
if (width == 2) {
ln0 |= MG_DP_MODE_CFG_DP_X2_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x3:
- case 0x5:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2224,8 +2233,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
ln1 |= MG_DP_MODE_CFG_DP_X2_MODE;
}
break;
- case 0x4:
- case 0x6:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_F:
if (width == 1) {
ln0 |= MG_DP_MODE_CFG_DP_X1_MODE;
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
@@ -2298,8 +2307,8 @@ void intel_ddi_wait_for_act_sent(struct intel_encoder *encoder,
{
struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_ACT_SENT, 1))
+ if (intel_de_wait_for_set_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_ACT_SENT, 1))
drm_err(display->drm, "Timed out waiting for ACT sent\n");
}
@@ -2339,34 +2348,24 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n");
}
-static int read_fec_detected_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
static int wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
{
struct intel_display *display = to_intel_display(aux->drm_dev);
int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
- int status;
- int err;
+ u8 status = 0;
+ int ret, err;
- err = readx_poll_timeout(read_fec_detected_status, aux, status,
- status & mask || status < 0,
- 10000, 200000);
+ ret = poll_timeout_us(err = drm_dp_dpcd_read_byte(aux, DP_FEC_STATUS, &status),
+ err || (status & mask),
+ 10 * 1000, 200 * 1000, false);
- if (err || status < 0) {
+ /* Either can be non-zero, but not both */
+ ret = ret ?: err;
+ if (ret) {
drm_dbg_kms(display->drm,
- "Failed waiting for FEC %s to get detected: %d (status %d)\n",
- str_enabled_disabled(enabled), err, status);
- return err ? err : status;
+ "Failed waiting for FEC %s to get detected: %d (status 0x%02x)\n",
+ str_enabled_disabled(enabled), ret, status);
+ return ret;
}
return 0;
@@ -2384,11 +2383,11 @@ int intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
return 0;
if (enabled)
- ret = intel_de_wait_for_set(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ ret = intel_de_wait_for_set_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
else
- ret = intel_de_wait_for_clear(display, dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ ret = intel_de_wait_for_clear_ms(display, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
if (ret) {
drm_err(display->drm,
@@ -2561,6 +2560,7 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
enum port port = encoder->port;
i915_reg_t reg;
u32 set_bits, wait_bits;
+ int ret;
if (DISPLAY_VER(display) < 14)
return;
@@ -2576,7 +2576,9 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder)
}
intel_de_rmw(display, reg, 0, set_bits);
- if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) {
+
+ ret = intel_de_wait_for_set_us(display, reg, wait_bits, 100);
+ if (ret) {
drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3058,6 +3060,7 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder)
enum port port = encoder->port;
i915_reg_t reg;
u32 clr_bits, wait_bits;
+ int ret;
if (DISPLAY_VER(display) < 14)
return;
@@ -3073,7 +3076,9 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder)
}
intel_de_rmw(display, reg, clr_bits, 0);
- if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100))
+
+ ret = intel_de_wait_for_clear_us(display, reg, wait_bits, 100);
+ if (ret)
drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n",
port_name(port));
}
@@ -3859,9 +3864,9 @@ static void intel_ddi_set_idle_link_train(struct intel_dp *intel_dp,
if (port == PORT_A && DISPLAY_VER(display) < 12)
return;
- if (intel_de_wait_for_set(display,
- dp_tp_status_reg(encoder, crtc_state),
- DP_TP_STATUS_IDLE_DONE, 2))
+ if (intel_de_wait_for_set_ms(display,
+ dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_IDLE_DONE, 2))
drm_err(display->drm,
"Timed out waiting for DP idle patterns\n");
}
@@ -4237,6 +4242,19 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
&crtc_state->dpll_hw_state);
}
+static void xe3plpd_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ intel_lt_phy_pll_readout_hw_state(encoder, crtc_state, &crtc_state->dpll_hw_state.ltpll);
+
+ if (crtc_state->dpll_hw_state.ltpll.tbt_mode)
+ crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
+ else
+ crtc_state->port_clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_ddi_get_config(encoder, crtc_state);
+}
+
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
@@ -4556,6 +4574,13 @@ static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
struct drm_connector *connector = conn_state->connector;
u8 port_sync_transcoders = 0;
+ int ret = 0;
+
+ if (intel_crtc_has_dp_encoder(crtc_state))
+ ret = intel_dp_compute_config_late(encoder, crtc_state, conn_state);
+
+ if (ret)
+ return ret;
drm_dbg_kms(display->drm, "[ENCODER:%d:%s] [CRTC:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
@@ -5066,11 +5091,45 @@ static bool port_in_use(struct intel_display *display, enum port port)
return false;
}
+static const char *intel_ddi_encoder_name(struct intel_display *display,
+ enum port port, enum phy phy,
+ struct seq_buf *s)
+{
+ if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
+ seq_buf_printf(s, "DDI %c/PHY %c",
+ port_name(port - PORT_D_XELPD + PORT_D),
+ phy_name(phy));
+ } else if (DISPLAY_VER(display) >= 12) {
+ enum tc_port tc_port = intel_port_to_tc(display, port);
+
+ seq_buf_printf(s, "DDI %s%c/PHY %s%c",
+ port >= PORT_TC1 ? "TC" : "",
+ port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else if (DISPLAY_VER(display) >= 11) {
+ enum tc_port tc_port = intel_port_to_tc(display, port);
+
+ seq_buf_printf(s, "DDI %c%s/PHY %s%c",
+ port_name(port),
+ port >= PORT_C ? " (TC)" : "",
+ tc_port != TC_PORT_NONE ? "TC" : "",
+ tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
+ } else {
+ seq_buf_printf(s, "DDI %c/PHY %c", port_name(port), phy_name(phy));
+ }
+
+ drm_WARN_ON(display->drm, seq_buf_has_overflowed(s));
+
+ return seq_buf_str(s);
+}
+
void intel_ddi_init(struct intel_display *display,
const struct intel_bios_encoder_data *devdata)
{
struct intel_digital_port *dig_port;
struct intel_encoder *encoder;
+ DECLARE_SEQ_BUF(encoder_name, 20);
bool init_hdmi, init_dp;
enum port port;
enum phy phy;
@@ -5148,52 +5207,19 @@ void intel_ddi_init(struct intel_display *display,
phy_name(phy));
}
- dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ dig_port = intel_dig_port_alloc();
if (!dig_port)
return;
- dig_port->aux_ch = AUX_CH_NONE;
-
encoder = &dig_port->base;
encoder->devdata = devdata;
- if (DISPLAY_VER(display) >= 13 && port >= PORT_D_XELPD) {
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c",
- port_name(port - PORT_D_XELPD + PORT_D),
- phy_name(phy));
- } else if (DISPLAY_VER(display) >= 12) {
- enum tc_port tc_port = intel_port_to_tc(display, port);
-
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %s%c/PHY %s%c",
- port >= PORT_TC1 ? "TC" : "",
- port >= PORT_TC1 ? port_tc_name(port) : port_name(port),
- tc_port != TC_PORT_NONE ? "TC" : "",
- tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else if (DISPLAY_VER(display) >= 11) {
- enum tc_port tc_port = intel_port_to_tc(display, port);
-
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c%s/PHY %s%c",
- port_name(port),
- port >= PORT_C ? " (TC)" : "",
- tc_port != TC_PORT_NONE ? "TC" : "",
- tc_port != TC_PORT_NONE ? tc_port_name(tc_port) : phy_name(phy));
- } else {
- drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
- DRM_MODE_ENCODER_TMDS,
- "DDI %c/PHY %c", port_name(port), phy_name(phy));
- }
+ drm_encoder_init(display->drm, &encoder->base, &intel_ddi_funcs,
+ DRM_MODE_ENCODER_TMDS, "%s",
+ intel_ddi_encoder_name(display, port, phy, &encoder_name));
intel_encoder_link_check_init(encoder, intel_ddi_link_check);
- mutex_init(&dig_port->hdcp.mutex);
- dig_port->hdcp.num_streams = 0;
-
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
@@ -5220,7 +5246,12 @@ void intel_ddi_init(struct intel_display *display,
encoder->cloneable = 0;
encoder->pipe_mask = ~0;
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->enable_clock = intel_xe3plpd_pll_enable;
+ encoder->disable_clock = intel_xe3plpd_pll_disable;
+ encoder->port_pll_type = intel_mtl_port_pll_type;
+ encoder->get_config = xe3plpd_ddi_get_config;
+ } else if (DISPLAY_VER(display) >= 14) {
encoder->enable_clock = intel_mtl_pll_enable;
encoder->disable_clock = intel_mtl_pll_disable;
encoder->port_pll_type = intel_mtl_port_pll_type;
@@ -5285,7 +5316,9 @@ void intel_ddi_init(struct intel_display *display,
encoder->get_config = hsw_ddi_get_config;
}
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->set_signal_levels = intel_lt_phy_set_signal_levels;
+ } else if (DISPLAY_VER(display) >= 14) {
encoder->set_signal_levels = intel_cx0_phy_set_signal_levels;
} else if (display->platform.dg2) {
encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
@@ -5331,7 +5364,6 @@ void intel_ddi_init(struct intel_display *display,
dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES;
- dig_port->dp.output_reg = INVALID_MMIO_REG;
dig_port->max_lanes = intel_ddi_max_lanes(dig_port);
if (need_aux_ch(encoder, init_dp)) {
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index a238be5bc455..395dba8c9e4d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -3,13 +3,14 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
+#include "intel_lt_phy.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
@@ -1115,6 +1116,69 @@ static const struct intel_ddi_buf_trans mtl_c20_trans_uhbr = {
.num_entries = ARRAY_SIZE(_mtl_c20_trans_uhbr),
};
+/* DP1.4 */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_dp14[] = {
+ { .lt = { 1, 0, 0, 21, 0 } },
+ { .lt = { 1, 1, 0, 24, 3 } },
+ { .lt = { 1, 2, 0, 28, 7 } },
+ { .lt = { 0, 3, 0, 35, 13 } },
+ { .lt = { 1, 1, 0, 27, 0 } },
+ { .lt = { 1, 2, 0, 31, 4 } },
+ { .lt = { 0, 3, 0, 39, 9 } },
+ { .lt = { 1, 2, 0, 35, 0 } },
+ { .lt = { 0, 3, 0, 41, 7 } },
+ { .lt = { 0, 3, 0, 48, 0 } },
+};
+
+/* DP2.1 */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_uhbr[] = {
+ { .lt = { 0, 0, 0, 48, 0 } },
+ { .lt = { 0, 0, 0, 43, 5 } },
+ { .lt = { 0, 0, 0, 40, 8 } },
+ { .lt = { 0, 0, 0, 37, 11 } },
+ { .lt = { 0, 0, 0, 33, 15 } },
+ { .lt = { 0, 0, 2, 46, 0 } },
+ { .lt = { 0, 0, 2, 42, 4 } },
+ { .lt = { 0, 0, 2, 38, 8 } },
+ { .lt = { 0, 0, 2, 35, 11 } },
+ { .lt = { 0, 0, 2, 33, 13 } },
+ { .lt = { 0, 0, 4, 44, 0 } },
+ { .lt = { 0, 0, 4, 40, 4 } },
+ { .lt = { 0, 0, 4, 37, 7 } },
+ { .lt = { 0, 0, 4, 33, 11 } },
+ { .lt = { 0, 0, 8, 40, 0 } },
+ { .lt = { 1, 0, 2, 26, 2 } },
+};
+
+/* eDp */
+static const union intel_ddi_buf_trans_entry _xe3plpd_lt_trans_edp[] = {
+ { .lt = { 1, 0, 0, 12, 0 } },
+ { .lt = { 1, 1, 0, 13, 1 } },
+ { .lt = { 1, 2, 0, 15, 3 } },
+ { .lt = { 1, 3, 0, 19, 7 } },
+ { .lt = { 1, 1, 0, 14, 0 } },
+ { .lt = { 1, 2, 0, 16, 2 } },
+ { .lt = { 1, 3, 0, 21, 5 } },
+ { .lt = { 1, 2, 0, 18, 0 } },
+ { .lt = { 1, 3, 0, 22, 4 } },
+ { .lt = { 1, 3, 0, 26, 0 } },
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_dp14 = {
+ .entries = _xe3plpd_lt_trans_dp14,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_dp14),
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_uhbr = {
+ .entries = _xe3plpd_lt_trans_uhbr,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_uhbr),
+};
+
+static const struct intel_ddi_buf_trans xe3plpd_lt_trans_edp = {
+ .entries = _xe3plpd_lt_trans_edp,
+ .num_entries = ARRAY_SIZE(_xe3plpd_lt_trans_edp),
+};
+
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
{
return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
@@ -1707,11 +1771,26 @@ mtl_get_c20_buf_trans(struct intel_encoder *encoder,
return intel_get_buf_trans(&mtl_c20_trans_dp14, n_entries);
}
+static const struct intel_ddi_buf_trans *
+xe3plpd_get_lt_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state) && intel_dp_is_uhbr(crtc_state))
+ return intel_get_buf_trans(&xe3plpd_lt_trans_uhbr, n_entries);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return intel_get_buf_trans(&xe3plpd_lt_trans_edp, n_entries);
+ else
+ return intel_get_buf_trans(&xe3plpd_lt_trans_dp14, n_entries);
+}
+
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- if (DISPLAY_VER(display) >= 14) {
+ if (HAS_LT_PHY(display)) {
+ encoder->get_buf_trans = xe3plpd_get_lt_buf_trans;
+ } else if (DISPLAY_VER(display) >= 14) {
if (intel_encoder_is_c10phy(encoder))
encoder->get_buf_trans = mtl_get_c10_buf_trans;
else
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 29a190390192..cec332090a20 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -50,6 +50,14 @@ struct dg2_snps_phy_buf_trans {
u8 post_cursor;
};
+struct xe3plpd_lt_phy_buf_trans {
+ u8 txswing;
+ u8 txswing_level;
+ u8 pre_cursor;
+ u8 main_cursor;
+ u8 post_cursor;
+};
+
union intel_ddi_buf_trans_entry {
struct hsw_ddi_buf_trans hsw;
struct bxt_ddi_buf_trans bxt;
@@ -57,6 +65,7 @@ union intel_ddi_buf_trans_entry {
struct icl_mg_phy_ddi_buf_trans mg;
struct tgl_dkl_phy_ddi_buf_trans dkl;
struct dg2_snps_phy_buf_trans snps;
+ struct xe3plpd_lt_phy_buf_trans lt;
};
struct intel_ddi_buf_trans {
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 9ecdcf6b73e4..a7ce3b875e06 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -84,20 +84,13 @@ intel_de_write(struct intel_display *display, i915_reg_t reg, u32 val)
}
static inline u32
-__intel_de_rmw_nowl(struct intel_display *display, i915_reg_t reg,
- u32 clear, u32 set)
-{
- return intel_uncore_rmw(__to_uncore(display), reg, clear, set);
-}
-
-static inline u32
intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
{
u32 val;
intel_dmc_wl_get(display, reg);
- val = __intel_de_rmw_nowl(display, reg, clear, set);
+ val = intel_uncore_rmw(__to_uncore(display), reg, clear, set);
intel_dmc_wl_put(display, reg);
@@ -105,34 +98,16 @@ intel_de_rmw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
}
static inline int
-__intel_de_wait_for_register_nowl(struct intel_display *display,
- i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout_ms)
-{
- return intel_wait_for_register(__to_uncore(display), reg, mask,
- value, timeout_ms);
-}
-
-static inline int
-__intel_de_wait_for_register_atomic_nowl(struct intel_display *display,
- i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us)
-{
- return __intel_wait_for_register(__to_uncore(display), reg, mask,
- value, fast_timeout_us, 0, NULL);
-}
-
-static inline int
-intel_de_wait(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout_ms)
+intel_de_wait_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_us,
+ u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
- ret = __intel_de_wait_for_register_nowl(display, reg, mask, value,
- timeout_ms);
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, timeout_us, 0, out_value);
intel_dmc_wl_put(display, reg);
@@ -140,15 +115,16 @@ intel_de_wait(struct intel_display *display, i915_reg_t reg,
}
static inline int
-intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value, unsigned int timeout_ms, u32 *out_value)
+intel_de_wait_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_ms,
+ u32 *out_value)
{
int ret;
intel_dmc_wl_get(display, reg);
- ret = intel_wait_for_register_fw(__to_uncore(display), reg, mask,
- value, timeout_ms, out_value);
+ ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
+ value, 2, timeout_ms, out_value);
intel_dmc_wl_put(display, reg);
@@ -156,36 +132,49 @@ intel_de_wait_fw(struct intel_display *display, i915_reg_t reg,
}
static inline int
-intel_de_wait_custom(struct intel_display *display, i915_reg_t reg,
- u32 mask, u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms, u32 *out_value)
+intel_de_wait_fw_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_ms,
+ u32 *out_value)
{
- int ret;
-
- intel_dmc_wl_get(display, reg);
+ return __intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, 2, timeout_ms, out_value);
+}
- ret = __intel_wait_for_register(__to_uncore(display), reg, mask,
- value,
- fast_timeout_us, slow_timeout_ms, out_value);
+static inline int
+intel_de_wait_fw_us_atomic(struct intel_display *display, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout_us,
+ u32 *out_value)
+{
+ return __intel_wait_for_register_fw(__to_uncore(display), reg, mask,
+ value, timeout_us, 0, out_value);
+}
- intel_dmc_wl_put(display, reg);
+static inline int
+intel_de_wait_for_set_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_us)
+{
+ return intel_de_wait_us(display, reg, mask, mask, timeout_us, NULL);
+}
- return ret;
+static inline int
+intel_de_wait_for_clear_us(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_us)
+{
+ return intel_de_wait_us(display, reg, mask, 0, timeout_us, NULL);
}
static inline int
-intel_de_wait_for_set(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout_ms)
+intel_de_wait_for_set_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, mask, timeout_ms);
+ return intel_de_wait_ms(display, reg, mask, mask, timeout_ms, NULL);
}
static inline int
-intel_de_wait_for_clear(struct intel_display *display, i915_reg_t reg,
- u32 mask, unsigned int timeout_ms)
+intel_de_wait_for_clear_ms(struct intel_display *display, i915_reg_t reg,
+ u32 mask, unsigned int timeout_ms)
{
- return intel_de_wait(display, reg, mask, 0, timeout_ms);
+ return intel_de_wait_ms(display, reg, mask, 0, timeout_ms, NULL);
}
/*
@@ -215,6 +204,18 @@ intel_de_write_fw(struct intel_display *display, i915_reg_t reg, u32 val)
}
static inline u32
+intel_de_rmw_fw(struct intel_display *display, i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 old, val;
+
+ old = intel_de_read_fw(display, reg);
+ val = (old & ~clear) | set;
+ intel_de_write_fw(display, reg, val);
+
+ return old;
+}
+
+static inline u32
intel_de_read_notrace(struct intel_display *display, i915_reg_t reg)
{
return intel_uncore_read_notrace(__to_uncore(display), reg);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 7035c1fc9033..095a319f8bc9 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,6 +41,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
@@ -51,7 +52,6 @@
#include "i915_config.h"
#include "i915_drv.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
@@ -60,6 +60,7 @@
#include "intel_audio.h"
#include "intel_bo.h"
#include "intel_bw.h"
+#include "intel_casf.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
#include "intel_color.h"
@@ -76,6 +77,8 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -98,6 +101,7 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_link_bw.h"
+#include "intel_lt_phy.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_modeset_setup.h"
@@ -128,11 +132,9 @@
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
-#include "vlv_dpio_phy_regs.h"
#include "vlv_dsi.h"
#include "vlv_dsi_pll.h"
#include "vlv_dsi_regs.h"
-#include "vlv_sideband.h"
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
@@ -140,65 +142,6 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipe_misc(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state);
-/* returns HPLL frequency in kHz */
-int vlv_get_hpll_vco(struct drm_device *drm)
-{
- int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
-
- /* Obtain SKU information */
- hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
- CCK_FUSE_HPLL_FREQ_MASK;
-
- return vco_freq[hpll_freq] * 1000;
-}
-
-int vlv_get_cck_clock(struct drm_device *drm,
- const char *name, u32 reg, int ref_freq)
-{
- u32 val;
- int divider;
-
- val = vlv_cck_read(drm, reg);
- divider = val & CCK_FREQUENCY_VALUES;
-
- drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
- (divider << CCK_FREQUENCY_STATUS_SHIFT),
- "%s change in progress\n", name);
-
- return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
-}
-
-int vlv_get_cck_clock_hpll(struct drm_device *drm,
- const char *name, u32 reg)
-{
- struct drm_i915_private *dev_priv = to_i915(drm);
- int hpll;
-
- vlv_cck_get(drm);
-
- if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = vlv_get_hpll_vco(drm);
-
- hpll = vlv_get_cck_clock(drm, name, reg, dev_priv->hpll_freq);
-
- vlv_cck_put(drm);
-
- return hpll;
-}
-
-void intel_update_czclk(struct intel_display *display)
-{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
- if (!display->platform.valleyview && !display->platform.cherryview)
- return;
-
- dev_priv->czclk_freq = vlv_get_cck_clock_hpll(display->drm, "czclk",
- CCK_CZ_CLOCK_CONTROL);
-
- drm_dbg_kms(display->drm, "CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
-}
-
static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
{
return (crtc_state->active_planes &
@@ -416,8 +359,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(display, TRANSCONF(display, cpu_transcoder),
- TRANSCONF_STATE_ENABLE, 100))
+ if (intel_de_wait_for_clear_ms(display, TRANSCONF(display, cpu_transcoder),
+ TRANSCONF_STATE_ENABLE, 100))
drm_WARN(display->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
@@ -604,16 +547,13 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
intel_wait_for_pipe_off(old_crtc_state);
}
-u32 intel_plane_fb_max_stride(struct drm_device *drm,
- u32 pixel_format, u64 modifier)
+u32 intel_plane_fb_max_stride(struct intel_display *display,
+ const struct drm_format_info *info,
+ u64 modifier)
{
- struct intel_display *display = to_intel_display(drm);
struct intel_crtc *crtc;
struct intel_plane *plane;
- if (!HAS_DISPLAY(display))
- return 0;
-
/*
* We assume the primary plane for pipe A has
* the highest stride limits of them all,
@@ -625,10 +565,23 @@ u32 intel_plane_fb_max_stride(struct drm_device *drm,
plane = to_intel_plane(crtc->base.primary);
- return plane->max_stride(plane, pixel_format, modifier,
+ return plane->max_stride(plane, info, modifier,
DRM_MODE_ROTATE_0);
}
+u32 intel_dumb_fb_max_stride(struct drm_device *drm,
+ u32 pixel_format, u64 modifier)
+{
+ struct intel_display *display = to_intel_display(drm);
+
+ if (!HAS_DISPLAY(display))
+ return 0;
+
+ return intel_plane_fb_max_stride(display,
+ drm_get_format_info(drm, pixel_format, modifier),
+ modifier);
+}
+
void intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
bool visible)
@@ -890,9 +843,8 @@ static void intel_async_flip_vtd_wa(struct intel_display *display,
static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- return crtc_state->uapi.async_flip && i915_vtd_active(i915) &&
+ return crtc_state->uapi.async_flip && intel_display_vtd_active(display) &&
(DISPLAY_VER(display) == 9 || display->platform.broadwell ||
display->platform.haswell);
}
@@ -1039,6 +991,24 @@ static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
}
+static bool intel_casf_enabling(const struct intel_crtc_state *new_crtc_state,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
+}
+
+static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
+}
+
#undef is_disabling
#undef is_enabling
@@ -1081,6 +1051,11 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
+ if (intel_display_wa(display, 14011503117)) {
+ if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled)
+ adl_scaler_ecc_unmask(new_crtc_state);
+ }
+
intel_alpm_post_plane_update(state, crtc);
intel_psr_post_plane_update(state, crtc);
@@ -1189,6 +1164,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (audio_disabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_disable(state, crtc);
+ if (intel_casf_disabling(old_crtc_state, new_crtc_state))
+ intel_casf_disable(new_crtc_state);
+
intel_drrs_deactivate(old_crtc_state);
if (hsw_ips_pre_update(state, crtc))
@@ -1636,8 +1614,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- if (HAS_VRR(display))
- intel_vrr_set_transcoder_timings(crtc_state);
+ intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
@@ -2416,39 +2393,44 @@ static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state)
return 0;
}
-static int intel_crtc_vblank_delay(const struct intel_crtc_state *crtc_state)
+static int intel_crtc_set_context_latency(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- int vblank_delay = 0;
+ int set_context_latency = 0;
if (!HAS_DSB(display))
return 0;
- vblank_delay = max(vblank_delay, intel_psr_min_vblank_delay(crtc_state));
+ set_context_latency = max(set_context_latency,
+ intel_psr_min_set_context_latency(crtc_state));
- return vblank_delay;
+ return set_context_latency;
}
-static int intel_crtc_compute_vblank_delay(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static int intel_crtc_compute_set_context_latency(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- int vblank_delay, max_vblank_delay;
+ int set_context_latency, max_vblank_delay;
+
+ set_context_latency = intel_crtc_set_context_latency(crtc_state);
- vblank_delay = intel_crtc_vblank_delay(crtc_state);
max_vblank_delay = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start - 1;
- if (vblank_delay > max_vblank_delay) {
- drm_dbg_kms(display->drm, "[CRTC:%d:%s] vblank delay (%d) exceeds max (%d)\n",
- crtc->base.base.id, crtc->base.name, vblank_delay, max_vblank_delay);
+ if (set_context_latency > max_vblank_delay) {
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] set context latency (%d) exceeds max (%d)\n",
+ crtc->base.base.id, crtc->base.name,
+ set_context_latency,
+ max_vblank_delay);
return -EINVAL;
}
- adjusted_mode->crtc_vblank_start += vblank_delay;
+ crtc_state->set_context_latency = set_context_latency;
+ adjusted_mode->crtc_vblank_start += set_context_latency;
return 0;
}
@@ -2460,11 +2442,11 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
int ret;
- ret = intel_crtc_compute_vblank_delay(state, crtc);
+ ret = intel_dpll_crtc_compute_clock(state, crtc);
if (ret)
return ret;
- ret = intel_dpll_crtc_compute_clock(state, crtc);
+ ret = intel_crtc_compute_set_context_latency(state, crtc);
if (ret)
return ret;
@@ -2481,6 +2463,8 @@ static int intel_crtc_compute_config(struct intel_atomic_state *state,
if (crtc_state->has_pch_encoder)
return ilk_fdi_compute_config(crtc, crtc_state);
+ intel_vrr_compute_guardband(crtc_state);
+
return 0;
}
@@ -2672,16 +2656,19 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
if (DISPLAY_VER(display) >= 13) {
intel_de_write(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
- crtc_vblank_start - crtc_vdisplay);
+ crtc_state->set_context_latency);
/*
* VBLANK_START not used by hw, just clear it
* to make it stand out in register dumps.
*/
crtc_vblank_start = 1;
+ } else if (DISPLAY_VER(display) == 12) {
+ /* VBLANK_START - VACTIVE defines SCL on TGL */
+ crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
}
- if (DISPLAY_VER(display) >= 4)
+ if (DISPLAY_VER(display) >= 4 && DISPLAY_VER(display) < 35)
intel_de_write(display,
TRANS_VSYNCSHIFT(display, cpu_transcoder),
vsyncshift);
@@ -2762,13 +2749,16 @@ static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc
if (DISPLAY_VER(display) >= 13) {
intel_de_write(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder),
- crtc_vblank_start - crtc_vdisplay);
+ crtc_state->set_context_latency);
/*
* VBLANK_START not used by hw, just clear it
* to make it stand out in register dumps.
*/
crtc_vblank_start = 1;
+ } else if (DISPLAY_VER(display) == 12) {
+ /* VBLANK_START - VACTIVE defines SCL on TGL */
+ crtc_vblank_start = crtc_vdisplay + crtc_state->set_context_latency;
}
/*
@@ -2819,7 +2809,7 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(display) == 2)
+ if (DISPLAY_VER(display) == 2 || DISPLAY_VER(display) >= 35)
return false;
if (DISPLAY_VER(display) >= 9 ||
@@ -2875,11 +2865,24 @@ static void intel_get_transcoder_timings(struct intel_crtc *crtc,
adjusted_mode->crtc_vblank_end += 1;
}
- if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder))
- adjusted_mode->crtc_vblank_start =
- adjusted_mode->crtc_vdisplay +
+ if (DISPLAY_VER(display) >= 13 && !transcoder_is_dsi(cpu_transcoder)) {
+ pipe_config->set_context_latency =
intel_de_read(display,
TRANS_SET_CONTEXT_LATENCY(display, cpu_transcoder));
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vdisplay +
+ pipe_config->set_context_latency;
+ } else if (DISPLAY_VER(display) == 12) {
+ /*
+ * TGL doesn't have a dedicated register for SCL.
+ * Instead, the hardware derives SCL from the difference between
+ * TRANS_VBLANK.vblank_start and TRANS_VTOTAL.vactive.
+ * To reflect the HW behaviour, readout the value for SCL as
+ * Vblank start - Vactive.
+ */
+ pipe_config->set_context_latency =
+ adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
+ }
if (DISPLAY_VER(display) >= 30)
pipe_config->min_hblank = intel_de_read(display,
@@ -3197,10 +3200,12 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
if (display->platform.haswell && crtc_state->dither)
val |= TRANSCONF_DITHER_EN | TRANSCONF_DITHER_TYPE_SP;
- if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= TRANSCONF_INTERLACE_IF_ID_ILK;
- else
- val |= TRANSCONF_INTERLACE_PF_PD_ILK;
+ if (DISPLAY_VER(display) < 35) {
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
+ val |= TRANSCONF_INTERLACE_IF_ID_ILK;
+ else
+ val |= TRANSCONF_INTERLACE_PF_PD_ILK;
+ }
if (display->platform.haswell &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
@@ -3946,6 +3951,20 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_joiner_get_config(pipe_config);
intel_dsc_get_config(pipe_config);
+ /* intel_vrr_get_config() depends on .framestart_delay */
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
+
+ pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
+ } else {
+ /* no idea if this is correct */
+ pipe_config->framestart_delay = 1;
+ }
+
+ /*
+ * intel_vrr_get_config() depends on TRANS_SET_CONTEXT_LATENCY
+ * readout done by intel_get_transcoder_timings().
+ */
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(display) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
@@ -3997,15 +4016,6 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier = 1;
}
- if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(display, CHICKEN_TRANS(display, pipe_config->cpu_transcoder));
-
- pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
- } else {
- /* no idea if this is correct */
- pipe_config->framestart_delay = 1;
- }
-
out:
intel_display_power_put_all_in_set(display, &crtc->hw_readout_power_domains);
@@ -4252,9 +4262,14 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
+ ret = intel_casf_compute_config(crtc_state);
+ if (ret)
+ return ret;
+
if (DISPLAY_VER(display) >= 9) {
if (intel_crtc_needs_modeset(crtc_state) ||
- intel_crtc_needs_fastset(crtc_state)) {
+ intel_crtc_needs_fastset(crtc_state) ||
+ intel_casf_needs_scaler(crtc_state)) {
ret = skl_update_scaler_crtc(crtc_state);
if (ret)
return ret;
@@ -4633,7 +4648,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret)
return ret;
- crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
+ crtc_state->dsc.compression_enabled_on_link = limits->link_dsc_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > fxp_q4_to_int(crtc_state->max_link_bpp_x16)) {
@@ -4754,8 +4769,6 @@ intel_modeset_pipe_config_late(struct intel_atomic_state *state,
struct drm_connector *connector;
int i;
- intel_vrr_compute_config_late(crtc_state);
-
for_each_new_connector_in_state(&state->base, connector,
conn_state, i) {
struct intel_encoder *encoder =
@@ -4990,9 +5003,33 @@ static bool allow_vblank_delay_fastset(const struct intel_crtc_state *old_crtc_s
* Allow fastboot to fix up vblank delay (handled via LRR
* codepaths), a bit dodgy as the registers aren't
* double buffered but seems to be working more or less...
+ *
+ * Also allow this when the VRR timing generator is always on,
+ * and optimized guardband is used. In such cases,
+ * vblank delay may vary even without inherited state, but it's
+ * still safe as VRR guardband is still same.
*/
- return HAS_LRR(display) && old_crtc_state->inherited &&
- !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+ return HAS_LRR(display) &&
+ (old_crtc_state->inherited || intel_vrr_always_use_vrr_tg(display)) &&
+ !intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI);
+}
+
+static void
+pipe_config_lt_phy_pll_mismatch(struct drm_printer *p, bool fastset,
+ const struct intel_crtc *crtc,
+ const char *name,
+ const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b)
+{
+ struct intel_display *display = to_intel_display(crtc);
+ char *chipname = "LTPHY";
+
+ pipe_config_mismatch(p, fastset, crtc, name, chipname);
+
+ drm_printf(p, "expected:\n");
+ intel_lt_phy_dump_hw_state(display, a);
+ drm_printf(p, "found:\n");
+ intel_lt_phy_dump_hw_state(display, b);
}
bool
@@ -5119,6 +5156,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
+#define PIPE_CONF_CHECK_PLL_LT(name) do { \
+ if (!intel_lt_phy_pll_compare_hw_state(&current_config->name, \
+ &pipe_config->name)) { \
+ pipe_config_lt_phy_pll_mismatch(&p, fastset, crtc, __stringify(name), \
+ &current_config->name, \
+ &pipe_config->name); \
+ ret = false; \
+ } \
+} while (0)
+
#define PIPE_CONF_CHECK_TIMINGS(name) do { \
PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
PIPE_CONF_CHECK_I(name.crtc_htotal); \
@@ -5313,6 +5360,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_I(pixel_rate);
+ PIPE_CONF_CHECK_BOOL(hw.casf_params.casf_enable);
+ PIPE_CONF_CHECK_I(hw.casf_params.win_size);
+ PIPE_CONF_CHECK_I(hw.casf_params.strength);
PIPE_CONF_CHECK_X(gamma_mode);
if (display->platform.cherryview)
@@ -5343,7 +5393,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_PLL(dpll_hw_state);
/* FIXME convert MTL+ platforms over to dpll_mgr */
- if (DISPLAY_VER(display) >= 14)
+ if (HAS_LT_PHY(display))
+ PIPE_CONF_CHECK_PLL_LT(dpll_hw_state.ltpll);
+ else if (DISPLAY_VER(display) >= 14)
PIPE_CONF_CHECK_PLL_CX0(dpll_hw_state.cx0pll);
PIPE_CONF_CHECK_X(dsi_pll.ctrl);
@@ -5437,6 +5489,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(vrr.guardband);
}
+ PIPE_CONF_CHECK_I(set_context_latency);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_LLI
@@ -5683,6 +5737,23 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
+u8 intel_calc_enabled_pipes(struct intel_atomic_state *state,
+ u8 enabled_pipes)
+{
+ const struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->hw.enable)
+ enabled_pipes |= BIT(crtc->pipe);
+ else
+ enabled_pipes &= ~BIT(crtc->pipe);
+ }
+
+ return enabled_pipes;
+}
+
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes)
{
@@ -5712,12 +5783,16 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
return 0;
}
-static bool lrr_params_changed(const struct drm_display_mode *old_adjusted_mode,
- const struct drm_display_mode *new_adjusted_mode)
+static bool lrr_params_changed(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
{
+ const struct drm_display_mode *old_adjusted_mode = &old_crtc_state->hw.adjusted_mode;
+ const struct drm_display_mode *new_adjusted_mode = &new_crtc_state->hw.adjusted_mode;
+
return old_adjusted_mode->crtc_vblank_start != new_adjusted_mode->crtc_vblank_start ||
old_adjusted_mode->crtc_vblank_end != new_adjusted_mode->crtc_vblank_end ||
- old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal;
+ old_adjusted_mode->crtc_vtotal != new_adjusted_mode->crtc_vtotal ||
+ old_crtc_state->set_context_latency != new_crtc_state->set_context_latency;
}
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
@@ -5743,8 +5818,7 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if (!lrr_params_changed(&old_crtc_state->hw.adjusted_mode,
- &new_crtc_state->hw.adjusted_mode))
+ if (!lrr_params_changed(old_crtc_state, new_crtc_state))
new_crtc_state->update_lrr = false;
if (intel_crtc_needs_modeset(new_crtc_state))
@@ -5958,6 +6032,14 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return -EINVAL;
}
+ /* FIXME: selective fetch should be disabled for async flips */
+ if (new_crtc_state->enable_psr2_sel_fetch) {
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s] async flip disallowed with PSR2 selective fetch\n",
+ crtc->base.base.id, crtc->base.name);
+ return -EINVAL;
+ }
+
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (plane->pipe != crtc->pipe)
@@ -6336,7 +6418,6 @@ int intel_atomic_check(struct drm_device *dev,
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- bool any_ms = false;
if (!intel_display_driver_check_access(display))
return -ENODEV;
@@ -6444,14 +6525,11 @@ int intel_atomic_check(struct drm_device *dev,
if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
- any_ms = true;
-
intel_dpll_release(state, crtc);
}
- if (any_ms && !check_digital_port_conflicts(state)) {
- drm_dbg_kms(display->drm,
- "rejecting conflicting digital port configuration\n");
+ if (intel_any_crtc_needs_modeset(state) && !check_digital_port_conflicts(state)) {
+ drm_dbg_kms(display->drm, "rejecting conflicting digital port configuration\n");
ret = -EINVAL;
goto fail;
}
@@ -6460,29 +6538,25 @@ int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
+ new_crtc_state->min_cdclk = intel_crtc_min_cdclk(new_crtc_state);
+
ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
- ret = intel_bw_atomic_check(state, any_ms);
+ ret = intel_bw_atomic_check(state);
if (ret)
goto fail;
- ret = intel_cdclk_atomic_check(state, &any_ms);
+ ret = intel_cdclk_atomic_check(state);
if (ret)
goto fail;
- if (intel_any_crtc_needs_modeset(state))
- any_ms = true;
-
- if (any_ms) {
+ if (intel_any_crtc_needs_modeset(state)) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
-
- ret = intel_modeset_calc_cdclk(state);
- if (ret)
- return ret;
}
ret = intel_pmdemand_atomic_check(state);
@@ -6733,6 +6807,11 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state,
intel_vrr_set_transcoder_timings(new_crtc_state);
}
+ if (intel_casf_enabling(new_crtc_state, old_crtc_state))
+ intel_casf_enable(new_crtc_state);
+ else if (new_crtc_state->hw.casf_params.strength != old_crtc_state->hw.casf_params.strength)
+ intel_casf_update_strength(new_crtc_state);
+
intel_fbc_update(state, crtc);
drm_WARN_ON(display->drm, !intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF));
@@ -7225,6 +7304,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ unsigned int size = new_crtc_state->plane_color_changed ? 8192 : 1024;
if (!new_crtc_state->use_flipq &&
!new_crtc_state->use_dsb &&
@@ -7235,10 +7315,12 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
* Rough estimate:
* ~64 registers per each plane * 8 planes = 512
* Double that for pipe stuff and other overhead.
+ * ~4913 registers for 3DLUT
+ * ~200 color registers * 3 HDR planes
*/
new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0,
new_crtc_state->use_dsb ||
- new_crtc_state->use_flipq ? 1024 : 16);
+ new_crtc_state->use_flipq ? size : 16);
if (!new_crtc_state->dsb_commit) {
new_crtc_state->use_flipq = false;
new_crtc_state->use_dsb = false;
@@ -7265,6 +7347,9 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
state, crtc);
+ intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
if (new_crtc_state->use_dsb)
intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
@@ -7298,7 +7383,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
- intel_dsb_wait_vblank_delay(state, new_crtc_state->dsb_commit);
+ intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit);
intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
new_crtc_state);
intel_dsb_interrupt(new_crtc_state->dsb_commit);
@@ -7388,13 +7473,13 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
*/
intel_pmdemand_pre_plane_update(state);
- if (state->modeset) {
+ if (state->modeset)
drm_atomic_helper_update_legacy_modeset_state(display->drm, &state->base);
- intel_set_cdclk_pre_plane_update(state);
+ intel_set_cdclk_pre_plane_update(state);
+ if (state->modeset)
intel_modeset_verify_disabled(state);
- }
intel_sagv_pre_plane_update(state);
@@ -7507,8 +7592,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_verify_planes(state);
intel_sagv_post_plane_update(state);
- if (state->modeset)
- intel_set_cdclk_post_plane_update(state);
+ intel_set_cdclk_post_plane_update(state);
intel_pmdemand_post_plane_update(state);
drm_atomic_helper_commit_hw_done(&state->base);
@@ -7994,6 +8078,14 @@ enum drm_mode_status intel_mode_valid(struct drm_device *dev,
mode->vtotal > vtotal_max)
return MODE_V_ILLEGAL;
+ /*
+ * WM_LINETIME only goes up to (almost) 64 usec, and also
+ * knowing that the linetime is always bounded will ease the
+ * mind during various calculations.
+ */
+ if (DIV_ROUND_UP(mode->htotal * 1000, mode->clock) > 64)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -8318,7 +8410,5 @@ void i830_disable_pipe(struct intel_display *display, enum pipe pipe)
bool intel_scanout_needs_vtd_wa(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- return IS_DISPLAY_VER(display, 6, 11) && i915_vtd_active(i915);
+ return IS_DISPLAY_VER(display, 6, 11) && intel_display_vtd_active(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 37e2ab301a80..bcc6ccb69d2b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -34,6 +34,7 @@ struct drm_atomic_state;
struct drm_device;
struct drm_display_mode;
struct drm_encoder;
+struct drm_format_info;
struct drm_modeset_acquire_ctx;
struct intel_atomic_state;
struct intel_crtc;
@@ -394,14 +395,19 @@ enum phy_fia {
i)
int intel_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
+u8 intel_calc_enabled_pipes(struct intel_atomic_state *state,
+ u8 enabled_pipes);
u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
int bw_overhead,
struct intel_link_m_n *m_n);
-u32 intel_plane_fb_max_stride(struct drm_device *drm,
- u32 pixel_format, u64 modifier);
+u32 intel_plane_fb_max_stride(struct intel_display *display,
+ const struct drm_format_info *info,
+ u64 modifier);
+u32 intel_dumb_fb_max_stride(struct drm_device *drm,
+ u32 pixel_format, u64 modifier);
enum drm_mode_status
intel_mode_valid_max_plane_size(struct intel_display *display,
const struct drm_display_mode *mode,
@@ -435,11 +441,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct intel_display *display, enum pipe pipe);
void i830_disable_pipe(struct intel_display *display, enum pipe pipe);
-int vlv_get_hpll_vco(struct drm_device *drm);
-int vlv_get_cck_clock(struct drm_device *drm,
- const char *name, u32 reg, int ref_freq);
-int vlv_get_cck_clock_hpll(struct drm_device *drm,
- const char *name, u32 reg);
bool intel_has_pending_fb_unpin(struct intel_display *display);
void intel_encoder_destroy(struct drm_encoder *encoder);
struct drm_display_mode *
@@ -528,7 +529,6 @@ void intel_init_display_hooks(struct intel_display *display);
void intel_setup_outputs(struct intel_display *display);
int intel_initial_commit(struct intel_display *display);
void intel_panel_sanitize_ssc(struct intel_display *display);
-void intel_update_czclk(struct intel_display *display);
enum drm_mode_status intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode);
int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c
index 4d565935e2cc..9a47aa38cf82 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.c
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c
@@ -1,15 +1,21 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
-#include "i915_drv.h"
-#include "intel_display_conversion.h"
+#include <drm/intel/display_member.h>
-struct intel_display *__i915_to_display(struct drm_i915_private *i915)
-{
- return i915->display;
-}
+#include "intel_display_conversion.h"
struct intel_display *__drm_to_display(struct drm_device *drm)
{
- return __i915_to_display(to_i915(drm));
+ /*
+ * Note: This relies on both struct drm_i915_private and struct
+ * xe_device having the struct drm_device and struct intel_display *
+ * members at the same relative offsets, as defined by struct
+ * __intel_generic_device.
+ *
+ * See also INTEL_DISPLAY_MEMBER_STATIC_ASSERT().
+ */
+ struct __intel_generic_device *d = container_of(drm, struct __intel_generic_device, drm);
+
+ return d->display;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h
index 46c7208d42ba..d497bc58a73f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_conversion.h
+++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h
@@ -9,20 +9,8 @@
#define __INTEL_DISPLAY_CONVERSION__
struct drm_device;
-struct drm_i915_private;
struct intel_display;
-struct intel_display *__i915_to_display(struct drm_i915_private *i915);
struct intel_display *__drm_to_display(struct drm_device *drm);
-/*
- * Transitional macro to optionally convert struct drm_i915_private * to struct
- * intel_display *, also accepting the latter.
- */
-#define __to_intel_display(p) \
- _Generic(p, \
- const struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
- struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \
- const struct intel_display *: (p), \
- struct intel_display *: (p))
#endif /* __INTEL_DISPLAY_CONVERSION__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 8c226406c5cd..9b8414b77c15 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -41,6 +41,7 @@ struct intel_cdclk_vals;
struct intel_color_funcs;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display_parent_interface;
struct intel_dmc;
struct intel_dpll_global_funcs;
struct intel_dpll_mgr;
@@ -141,14 +142,13 @@ struct intel_dpll_global {
};
struct intel_frontbuffer_tracking {
+ /* protects busy_bits */
spinlock_t lock;
/*
- * Tracking bits for delayed frontbuffer flushing du to gpu activity or
- * scheduled flips.
+ * Tracking bits for delayed frontbuffer flushing due to gpu activity.
*/
unsigned busy_bits;
- unsigned flip_bits;
};
struct intel_hotplug {
@@ -291,6 +291,9 @@ struct intel_display {
/* Intel PCH: where the south display engine lives */
enum intel_pch pch_type;
+ /* Parent, or core, driver functions exposed to display */
+ const struct intel_display_parent_interface *parent;
+
/* Display functions */
struct {
/* Top level crtc-ish functions */
@@ -370,6 +373,10 @@ struct intel_display {
} dbuf;
struct {
+ struct intel_global_obj obj;
+ } dbuf_bw;
+
+ struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
@@ -475,7 +482,21 @@ struct intel_display {
struct work_struct vblank_notify_work;
- u32 de_irq_mask[I915_MAX_PIPES];
+ /*
+ * Cached value of VLV/CHV IMR to avoid reads in updating the
+ * bitfield.
+ */
+ u32 vlv_imr_mask;
+ /*
+ * Cached value of gen 5-7 DE IMR to avoid reads in updating the
+ * bitfield.
+ */
+ u32 ilk_de_imr_mask;
+ /*
+ * Cached value of BDW+ DE pipe IMR to avoid reads in updating
+ * the bitfield.
+ */
+ u32 de_pipe_imr_mask[I915_MAX_PIPES];
u32 pipestat_irq_mask[I915_MAX_PIPES];
} irq;
@@ -568,6 +589,11 @@ struct intel_display {
} state;
struct {
+ unsigned int hpll_freq;
+ unsigned int czclk_freq;
+ } vlv_clock;
+
+ struct {
/* ordered wq for modesets */
struct workqueue_struct *modeset;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index ce3f9810c42d..9bbfdae8d024 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -12,6 +12,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "hsw_ips.h"
#include "i915_reg.h"
@@ -47,6 +48,7 @@
#include "intel_psr_regs.h"
#include "intel_vdsc.h"
#include "intel_wm.h"
+#include "intel_tc.h"
static struct intel_display *node_to_intel_display(struct drm_info_node *node)
{
@@ -76,9 +78,6 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
seq_printf(m, "FB tracking busy bits: 0x%08x\n",
display->fb_tracking.busy_bits);
- seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- display->fb_tracking.flip_bits);
-
spin_unlock(&display->fb_tracking.lock);
return 0;
@@ -246,6 +245,8 @@ static void intel_connector_info(struct seq_file *m,
{
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *mode;
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct intel_digital_port *dig_port = NULL;
seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
connector->base.id, connector->name,
@@ -268,14 +269,19 @@ static void intel_connector_info(struct seq_file *m,
intel_dp_mst_info(m, intel_connector);
else
intel_dp_info(m, intel_connector);
+ dig_port = dp_to_dig_port(intel_attached_dp(intel_connector));
break;
case DRM_MODE_CONNECTOR_HDMIA:
intel_hdmi_info(m, intel_connector);
+ dig_port = hdmi_to_dig_port(intel_attached_hdmi(intel_connector));
break;
default:
break;
}
+ if (dig_port != NULL && intel_encoder_is_tc(&dig_port->base))
+ intel_tc_info(&p, dig_port);
+
intel_hdcp_info(m, intel_connector);
seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
@@ -820,14 +826,14 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
void intel_display_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_fifo_underrun_reset", 0644, debugfs_root,
display, &i915_fifo_underrun_reset_ops);
drm_debugfs_create_files(intel_display_debugfs_list,
ARRAY_SIZE(intel_display_debugfs_list),
- minor->debugfs_root, minor);
+ debugfs_root, display->drm->primary);
intel_bios_debugfs_register(display);
intel_cdclk_debugfs_register(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
index 88914a1f3f62..de62b774272d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -7,7 +7,6 @@
#include <linux/kernel.h>
#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
#include "intel_display_core.h"
#include "intel_display_debugfs_params.h"
@@ -154,14 +153,14 @@ intel_display_debugfs_create_uint(const char *name, umode_t mode,
/* add a subdirectory with files for each intel display param */
void intel_display_debugfs_params(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
struct dentry *dir;
char dirname[16];
snprintf(dirname, sizeof(dirname), "%s_params", display->drm->driver->name);
- dir = debugfs_lookup(dirname, minor->debugfs_root);
+ dir = debugfs_lookup(dirname, debugfs_root);
if (!dir)
- dir = debugfs_create_dir(dirname, minor->debugfs_root);
+ dir = debugfs_create_dir(dirname, debugfs_root);
if (IS_ERR(dir))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 089cffabbad5..1170afaa8680 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -1354,6 +1354,19 @@ static const struct intel_display_device_info xe2_lpd_display = {
.__runtime_defaults.has_dbuf_overlap_detection = true,
};
+static const struct intel_display_device_info wcl_display = {
+ XE_LPDP_FEATURES,
+
+ .__runtime_defaults.cpu_transcoder_mask =
+ BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .__runtime_defaults.pipe_mask =
+ BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .__runtime_defaults.fbc_mask =
+ BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) | BIT(INTEL_FBC_C),
+ .__runtime_defaults.port_mask =
+ BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2),
+};
+
static const struct intel_display_device_info xe2_hpd_display = {
XE_LPDP_FEATURES,
.__runtime_defaults.port_mask = BIT(PORT_A) |
@@ -1391,8 +1404,20 @@ static const struct platform_desc bmg_desc = {
PLATFORM_GROUP(dgfx),
};
+static const u16 wcl_ids[] = {
+ INTEL_WCL_IDS(ID),
+ 0
+};
+
static const struct platform_desc ptl_desc = {
PLATFORM(pantherlake),
+ .subplatforms = (const struct subplatform_desc[]) {
+ {
+ SUBPLATFORM(pantherlake, wildcatlake),
+ .pciidlist = wcl_ids,
+ },
+ {},
+ }
};
__diag_pop();
@@ -1469,6 +1494,7 @@ static const struct {
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
+ INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
};
static const struct {
@@ -1480,7 +1506,8 @@ static const struct {
{ 14, 1, &xe2_hpd_display },
{ 20, 0, &xe2_lpd_display },
{ 30, 0, &xe2_lpd_display },
- { 30, 2, &xe2_lpd_display },
+ { 30, 2, &wcl_display },
+ { 35, 0, &xe2_lpd_display },
};
static const struct intel_display_device_info *
@@ -1621,7 +1648,8 @@ static void display_platforms_or(struct intel_display_platforms *dst,
bitmap_or(dst->bitmap, dst->bitmap, src->bitmap, display_platforms_num_bits());
}
-struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
+ const struct intel_display_parent_interface *parent)
{
struct intel_display *display;
const struct intel_display_device_info *info;
@@ -1637,6 +1665,8 @@ struct intel_display *intel_display_device_probe(struct pci_dev *pdev)
/* Add drm device backpointer as early as possible. */
display->drm = pci_get_drvdata(pdev);
+ display->parent = parent;
+
intel_display_params_copy(&display->params);
if (has_no_display(pdev)) {
@@ -1931,6 +1961,11 @@ void intel_display_device_info_print(const struct intel_display_device_info *inf
drm_printf(p, "rawclk rate: %u kHz\n", runtime->rawclk_freq);
}
+bool intel_display_device_present(struct intel_display *display)
+{
+ return display && HAS_DISPLAY(display);
+}
+
/*
* Assuming the device has display hardware, should it be enabled?
*
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 4308822f0415..b559ef43d547 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -9,11 +9,11 @@
#include <linux/bitops.h>
#include <linux/types.h>
-#include "intel_display_conversion.h"
#include "intel_display_limits.h"
struct drm_printer;
struct intel_display;
+struct intel_display_parent_interface;
struct pci_dev;
/*
@@ -102,7 +102,9 @@ struct pci_dev;
/* Display ver 14.1 (based on GMD ID) */ \
func(battlemage) \
/* Display ver 30 (based on GMD ID) */ \
- func(pantherlake)
+ func(pantherlake) \
+ func(pantherlake_wildcatlake)
+
#define __MEMBER(name) unsigned long name:1;
#define __COUNT(x) 1 +
@@ -141,10 +143,13 @@ struct intel_display_platforms {
func(overlay_needs_physical); \
func(supports_tv);
+#define HAS_128B_Y_TILING(__display) (!(__display)->platform.i915g && !(__display)->platform.i915gm)
#define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
#define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5)
#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13)
+#define HAS_AUX_CCS(__display) (IS_DISPLAY_VER(__display, 9, 12) || (__display)->platform.alderlake_p || (__display)->platform.meteorlake)
#define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display))
+#define HAS_CASF(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash)
#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20)
@@ -156,7 +161,7 @@ struct intel_display_platforms {
#define HAS_DISPLAY(__display) (DISPLAY_RUNTIME_INFO(__display)->pipe_mask != 0)
#define HAS_DMC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dmc)
#define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20)
-#define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell)
+#define HAS_DOUBLE_BUFFERED_M_N(__display) (IS_DISPLAY_VER((__display), 9, 14) || (__display)->platform.broadwell)
#define HAS_DOUBLE_BUFFERED_LUT(__display) (DISPLAY_VER(__display) >= 30)
#define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4)
#define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14)
@@ -224,8 +229,8 @@ struct intel_display_platforms {
(IS_DISPLAY_VERx100((__display), (ipver), (ipver)) && \
IS_DISPLAY_STEP((__display), (from), (until)))
-#define DISPLAY_INFO(__display) (__to_intel_display(__display)->info.__device_info)
-#define DISPLAY_RUNTIME_INFO(__display) (&__to_intel_display(__display)->info.__runtime_info)
+#define DISPLAY_INFO(__display) ((__display)->info.__device_info)
+#define DISPLAY_RUNTIME_INFO(__display) (&(__display)->info.__runtime_info)
#define DISPLAY_VER(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver)
#define DISPLAY_VERx100(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver * 100 + \
@@ -236,7 +241,7 @@ struct intel_display_platforms {
#define INTEL_DISPLAY_STEP(__display) (DISPLAY_RUNTIME_INFO(__display)->step)
#define IS_DISPLAY_STEP(__display, since, until) \
- (drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
+ (drm_WARN_ON((__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \
INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until))
#define ARLS_HOST_BRIDGE_PCI_ID1 0x7D1C
@@ -307,8 +312,10 @@ struct intel_display_device_info {
} color;
};
+bool intel_display_device_present(struct intel_display *display);
bool intel_display_device_enabled(struct intel_display *display);
-struct intel_display *intel_display_device_probe(struct pci_dev *pdev);
+struct intel_display *intel_display_device_probe(struct pci_dev *pdev,
+ const struct intel_display_parent_interface *parent);
void intel_display_device_remove(struct intel_display *display);
void intel_display_device_info_runtime_init(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 8586ba102605..7e000ba3e08b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -14,10 +14,12 @@
#include <drm/drm_client_event.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_privacy_screen_consumer.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "i915_drv.h"
+#include "i915_utils.h" /* for i915_inject_probe_failure() */
#include "i9xx_wm.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
@@ -27,12 +29,15 @@
#include "intel_cdclk.h"
#include "intel_color.h"
#include "intel_crtc.h"
+#include "intel_cursor.h"
+#include "intel_dbuf_bw.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_dkl_phy.h"
#include "intel_dmc.h"
@@ -144,17 +149,7 @@ static void intel_mode_config_init(struct intel_display *display)
mode_config->max_height = 2048;
}
- if (display->platform.i845g || display->platform.i865g) {
- mode_config->cursor_width = display->platform.i845g ? 64 : 512;
- mode_config->cursor_height = 1023;
- } else if (display->platform.i830 || display->platform.i85x ||
- display->platform.i915g || display->platform.i915gm) {
- mode_config->cursor_width = 64;
- mode_config->cursor_height = 64;
- } else {
- mode_config->cursor_width = 256;
- mode_config->cursor_height = 256;
- }
+ intel_cursor_mode_config_init(display);
}
static void intel_mode_config_cleanup(struct intel_display *display)
@@ -284,6 +279,10 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
if (ret)
goto cleanup_wq_unordered;
+ ret = intel_dbuf_bw_init(display);
+ if (ret)
+ goto cleanup_wq_unordered;
+
ret = intel_bw_init(display);
if (ret)
goto cleanup_wq_unordered;
@@ -481,7 +480,6 @@ int intel_display_driver_probe_nogem(struct intel_display *display)
intel_dpll_init(display);
intel_fdi_pll_freq_update(display);
- intel_update_czclk(display);
intel_display_driver_init_hw(display);
intel_dpll_update_ref_clks(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 68157f177b6a..43b27deb4a26 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "i915_drv.h"
@@ -140,14 +141,14 @@ void ilk_update_display_irq(struct intel_display *display,
lockdep_assert_held(&display->irq.lock);
drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask);
- new_val = dev_priv->irq_mask;
+ new_val = display->irq.ilk_de_imr_mask;
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != dev_priv->irq_mask &&
+ if (new_val != display->irq.ilk_de_imr_mask &&
!drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) {
- dev_priv->irq_mask = new_val;
- intel_de_write(display, DEIMR, dev_priv->irq_mask);
+ display->irq.ilk_de_imr_mask = new_val;
+ intel_de_write(display, DEIMR, display->irq.ilk_de_imr_mask);
intel_de_posting_read(display, DEIMR);
}
}
@@ -215,13 +216,13 @@ static void bdw_update_pipe_irq(struct intel_display *display,
if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)))
return;
- new_val = display->irq.de_irq_mask[pipe];
+ new_val = display->irq.de_pipe_imr_mask[pipe];
new_val &= ~interrupt_mask;
new_val |= (~enabled_irq_mask & interrupt_mask);
- if (new_val != display->irq.de_irq_mask[pipe]) {
- display->irq.de_irq_mask[pipe] = new_val;
- intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]);
+ if (new_val != display->irq.de_pipe_imr_mask[pipe]) {
+ display->irq.de_pipe_imr_mask[pipe] = new_val;
+ intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_pipe_imr_mask[pipe]);
intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe));
}
}
@@ -872,7 +873,7 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display)
}
}
-void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
+static void _ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
{
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
@@ -923,7 +924,7 @@ void ilk_display_irq_handler(struct intel_display *display, u32 de_iir)
ilk_display_rps_irq_handler(display);
}
-void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
+static void _ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
{
enum pipe pipe;
u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
@@ -972,6 +973,53 @@ void ivb_display_irq_handler(struct intel_display *display, u32 de_iir)
}
}
+void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier)
+{
+ /* disable master interrupt before clearing iir */
+ *de_ier = intel_de_read_fw(display, DEIER);
+ intel_de_write_fw(display, DEIER, *de_ier & ~DE_MASTER_IRQ_CONTROL);
+
+ /*
+ * Disable south interrupts. We'll only write to SDEIIR once, so further
+ * interrupts will be stored on its back queue, and then we'll be able
+ * to process them after we restore SDEIER (as soon as we restore it,
+ * we'll get an interrupt if SDEIIR still has something to process due
+ * to its back queue).
+ */
+ if (!HAS_PCH_NOP(display)) {
+ *sde_ier = intel_de_read_fw(display, SDEIER);
+ intel_de_write_fw(display, SDEIER, 0);
+ } else {
+ *sde_ier = 0;
+ }
+}
+
+void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier)
+{
+ intel_de_write_fw(display, DEIER, de_ier);
+
+ if (sde_ier)
+ intel_de_write_fw(display, SDEIER, sde_ier);
+}
+
+bool ilk_display_irq_handler(struct intel_display *display)
+{
+ u32 de_iir;
+ bool handled = false;
+
+ de_iir = intel_de_read_fw(display, DEIIR);
+ if (de_iir) {
+ intel_de_write_fw(display, DEIIR, de_iir);
+ if (DISPLAY_VER(display) >= 7)
+ _ivb_display_irq_handler(display, de_iir);
+ else
+ _ilk_display_irq_handler(display, de_iir);
+ handled = true;
+ }
+
+ return handled;
+}
+
static u32 gen8_de_port_aux_mask(struct intel_display *display)
{
u32 mask;
@@ -1865,8 +1913,6 @@ void vlv_display_error_irq_handler(struct intel_display *display,
static void _vlv_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
-
if (display->platform.cherryview)
intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -1881,7 +1927,7 @@ static void _vlv_display_irq_reset(struct intel_display *display)
i9xx_pipestat_irq_reset(display);
intel_display_irq_regs_reset(display, VLV_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ display->irq.vlv_imr_mask = ~0u;
}
void vlv_display_irq_reset(struct intel_display *display)
@@ -1902,6 +1948,22 @@ void i9xx_display_irq_reset(struct intel_display *display)
i9xx_pipestat_irq_reset(display);
}
+u32 i9xx_display_irq_enable_mask(struct intel_display *display)
+{
+ u32 enable_mask;
+
+ enable_mask = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
+ I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+
+ if (DISPLAY_VER(display) >= 3)
+ enable_mask |= I915_ASLE_INTERRUPT;
+
+ if (HAS_HOTPLUG(display))
+ enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
+
+ return enable_mask;
+}
+
void i915_display_irq_postinstall(struct intel_display *display)
{
/*
@@ -1939,7 +2001,6 @@ static u32 vlv_error_mask(void)
static void _vlv_display_irq_postinstall(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -1973,11 +2034,11 @@ static void _vlv_display_irq_postinstall(struct intel_display *display)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(display->drm, display->irq.vlv_imr_mask != ~0u);
- dev_priv->irq_mask = ~enable_mask;
+ display->irq.vlv_imr_mask = ~enable_mask;
- intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ intel_display_irq_regs_init(display, VLV_IRQ_REGS, display->irq.vlv_imr_mask, enable_mask);
}
void vlv_display_irq_postinstall(struct intel_display *display)
@@ -1988,22 +2049,37 @@ void vlv_display_irq_postinstall(struct intel_display *display)
spin_unlock_irq(&display->irq.lock);
}
-void ibx_display_irq_reset(struct intel_display *display)
+static void ibx_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- if (HAS_PCH_NOP(i915))
+ if (HAS_PCH_NOP(display))
return;
gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS);
- if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915))
+ if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display))
intel_de_write(display, SERR_INT, 0xffffffff);
}
+void ilk_display_irq_reset(struct intel_display *display)
+{
+ struct intel_uncore *uncore = to_intel_uncore(display->drm);
+
+ gen2_irq_reset(uncore, DE_IRQ_REGS);
+ display->irq.ilk_de_imr_mask = ~0u;
+
+ if (DISPLAY_VER(display) == 7)
+ intel_de_write(display, GEN7_ERR_INT, 0xffffffff);
+
+ if (display->platform.haswell) {
+ intel_de_write(display, EDP_PSR_IMR, 0xffffffff);
+ intel_de_write(display, EDP_PSR_IIR, 0xffffffff);
+ }
+
+ ibx_display_irq_reset(display);
+}
+
void gen8_display_irq_reset(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
enum pipe pipe;
if (!HAS_DISPLAY(display))
@@ -2020,7 +2096,7 @@ void gen8_display_irq_reset(struct intel_display *display)
intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS);
intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS);
- if (HAS_PCH_SPLIT(i915))
+ if (HAS_PCH_SPLIT(display))
ibx_display_irq_reset(display);
}
@@ -2091,8 +2167,8 @@ void gen8_irq_power_well_post_enable(struct intel_display *display,
for_each_pipe_masked(display, pipe, pipe_mask)
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- display->irq.de_irq_mask[pipe],
- ~display->irq.de_irq_mask[pipe] | extra_ier);
+ display->irq.de_pipe_imr_mask[pipe],
+ ~display->irq.de_pipe_imr_mask[pipe] | extra_ier);
spin_unlock_irq(&display->irq.lock);
}
@@ -2186,8 +2262,6 @@ out:
void ilk_de_irq_postinstall(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
u32 display_mask, extra_mask;
if (DISPLAY_VER(display) >= 7) {
@@ -2219,11 +2293,11 @@ void ilk_de_irq_postinstall(struct intel_display *display)
if (display->platform.ironlake && display->platform.mobile)
extra_mask |= DE_PCU_EVENT;
- i915->irq_mask = ~display_mask;
+ display->irq.ilk_de_imr_mask = ~display_mask;
ibx_irq_postinstall(display);
- intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask,
+ intel_display_irq_regs_init(display, DE_IRQ_REGS, display->irq.ilk_de_imr_mask,
display_mask | extra_mask);
}
@@ -2308,12 +2382,12 @@ void gen8_de_irq_postinstall(struct intel_display *display)
}
for_each_pipe(display, pipe) {
- display->irq.de_irq_mask[pipe] = ~de_pipe_masked;
+ display->irq.de_pipe_imr_mask[pipe] = ~de_pipe_masked;
if (intel_display_power_is_enabled(display,
POWER_DOMAIN_PIPE(pipe)))
intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe),
- display->irq.de_irq_mask[pipe],
+ display->irq.de_pipe_imr_mask[pipe],
de_pipe_enables);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h
index c66db3851da4..84acd31948cf 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.h
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.h
@@ -47,8 +47,9 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
-void ivb_display_irq_handler(struct intel_display *display, u32 de_iir);
-void ilk_display_irq_handler(struct intel_display *display, u32 de_iir);
+void ilk_display_irq_master_disable(struct intel_display *display, u32 *de_ier, u32 *sde_ier);
+void ilk_display_irq_master_enable(struct intel_display *display, u32 de_ier, u32 sde_ier);
+bool ilk_display_irq_handler(struct intel_display *display);
void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl);
void gen11_display_irq_handler(struct intel_display *display);
@@ -56,11 +57,12 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl);
void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir);
void i9xx_display_irq_reset(struct intel_display *display);
-void ibx_display_irq_reset(struct intel_display *display);
+void ilk_display_irq_reset(struct intel_display *display);
void vlv_display_irq_reset(struct intel_display *display);
void gen8_display_irq_reset(struct intel_display *display);
void gen11_display_irq_reset(struct intel_display *display);
+u32 i9xx_display_irq_enable_mask(struct intel_display *display);
void i915_display_irq_postinstall(struct intel_display *display);
void i965_display_irq_postinstall(struct intel_display *display);
void vlv_display_irq_postinstall(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_jiffies.h b/drivers/gpu/drm/i915/display/intel_display_jiffies.h
new file mode 100644
index 000000000000..c060c567e262
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_jiffies.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_JIFFIES_H__
+#define __INTEL_DISPLAY_JIFFIES_H__
+
+#include <linux/jiffies.h>
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+/*
+ * If you need to wait X milliseconds between events A and B, but event B
+ * doesn't happen exactly after event A, you record the timestamp (jiffies) of
+ * when event A happened, then just before event B you call this function and
+ * pass the timestamp as the first argument, and X as the second argument.
+ */
+static inline void
+wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
+{
+ unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
+
+ /*
+ * Don't re-read the value of "jiffies" every time since it may change
+ * behind our back and break the math.
+ */
+ tmp_jiffies = jiffies;
+ target_jiffies = timestamp_jiffies +
+ msecs_to_jiffies_timeout(to_wait_ms);
+
+ if (time_after(target_jiffies, tmp_jiffies)) {
+ remaining_jiffies = target_jiffies - tmp_jiffies;
+ while (remaining_jiffies)
+ remaining_jiffies =
+ schedule_timeout_uninterruptible(remaining_jiffies);
+ }
+}
+
+#endif /* __INTEL_DISPLAY_JIFFIES_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
index f0fa27e365ab..cb3c9c665c44 100644
--- a/drivers/gpu/drm/i915/display/intel_display_limits.h
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -138,4 +138,13 @@ enum hpd_pin {
HPD_NUM_PINS
};
+enum intel_color_block {
+ INTEL_PLANE_CB_PRE_CSC_LUT,
+ INTEL_PLANE_CB_CSC,
+ INTEL_PLANE_CB_POST_CSC_LUT,
+ INTEL_PLANE_CB_3DLUT,
+
+ INTEL_CB_MAX
+};
+
#endif /* __INTEL_DISPLAY_LIMITS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
index 75316247ee8a..2aed110c5b09 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.c
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -120,6 +120,9 @@ intel_display_param_named_unsafe(enable_psr, int, 0400,
"(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
"Default: -1 (use per-chip default)");
+intel_display_param_named_unsafe(enable_panel_replay, int, 0400,
+ "Enable Panel Replay (0=disabled, 1=enabled). Default: -1 (use per-chip default)");
+
intel_display_param_named(psr_safest_params, bool, 0400,
"Replace PSR VBT parameters by the safest and not optimal ones. This "
"is helpful to detect if PSR issues are related to bad values set in "
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
index 784e6bae8615..b01bc5700c52 100644
--- a/drivers/gpu/drm/i915/display/intel_display_params.h
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -46,6 +46,7 @@ struct drm_printer;
param(bool, enable_dp_mst, true, 0600) \
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
+ param(int, enable_panel_replay, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
param(bool, enable_psr2_sel_fetch, true, 0400) \
param(int, enable_dmc_wl, -1, 0400) \
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c92f3e736228..2a4cc1dcc293 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,8 +3,11 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/iopoll.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "soc/intel_dram.h"
#include "i915_drv.h"
@@ -21,6 +24,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_mchbar_regs.h"
#include "intel_pch_refclk.h"
@@ -1278,6 +1282,7 @@ static void hsw_disable_lcpll(struct intel_display *display,
bool switch_to_fclk, bool allow_power_down)
{
u32 val;
+ int ret;
assert_can_disable_lcpll(display);
@@ -1287,8 +1292,9 @@ static void hsw_disable_lcpll(struct intel_display *display,
val |= LCPLL_CD_SOURCE_FCLK;
intel_de_write(display, LCPLL_CTL, val);
- if (wait_for_us(intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ ret = intel_de_wait_for_set_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
+ if (ret)
drm_err(display->drm, "Switching to FCLK failed\n");
val = intel_de_read(display, LCPLL_CTL);
@@ -1298,7 +1304,7 @@ static void hsw_disable_lcpll(struct intel_display *display,
intel_de_write(display, LCPLL_CTL, val);
intel_de_posting_read(display, LCPLL_CTL);
- if (intel_de_wait_for_clear(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
drm_err(display->drm, "LCPLL still locked\n");
val = hsw_read_dcomp(display);
@@ -1306,8 +1312,10 @@ static void hsw_disable_lcpll(struct intel_display *display,
hsw_write_dcomp(display, val);
ndelay(100);
- if (wait_for((hsw_read_dcomp(display) &
- D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ ret = poll_timeout_us(val = hsw_read_dcomp(display),
+ (val & D_COMP_RCOMP_IN_PROGRESS) == 0,
+ 100, 1000, false);
+ if (ret)
drm_err(display->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
@@ -1324,6 +1332,7 @@ static void hsw_restore_lcpll(struct intel_display *display)
{
struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm);
u32 val;
+ int ret;
val = intel_de_read(display, LCPLL_CTL);
@@ -1352,14 +1361,15 @@ static void hsw_restore_lcpll(struct intel_display *display)
val &= ~LCPLL_PLL_DISABLE;
intel_de_write(display, LCPLL_CTL, val);
- if (intel_de_wait_for_set(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
+ if (intel_de_wait_for_set_ms(display, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
drm_err(display->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0);
- if (wait_for_us((intel_de_read(display, LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ ret = intel_de_wait_for_clear_us(display, LCPLL_CTL,
+ LCPLL_CD_SOURCE_FCLK_DONE, 1);
+ if (ret)
drm_err(display->drm,
"Switching back to LCPLL failed\n");
}
@@ -1426,6 +1436,9 @@ static void intel_pch_reset_handshake(struct intel_display *display,
i915_reg_t reg;
u32 reset_bits;
+ if (DISPLAY_VER(display) >= 35)
+ return;
+
if (display->platform.ivybridge) {
reg = GEN7_MSG_CTL;
reset_bits = WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK;
@@ -2155,8 +2168,6 @@ void intel_power_domains_resume(struct intel_display *display)
power_domains->init_wakeref =
intel_display_power_get(display, POWER_DOMAIN_INIT);
}
-
- intel_power_domains_verify_state(display);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index 77268802b55e..9b49952994ce 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -1516,7 +1516,11 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_D),
.has_fuses = true,
- }, {
+ },
+};
+
+static const struct i915_power_well_desc xelpdp_power_wells_aux[] = {
+ {
.instances = &I915_PW_INSTANCES(
I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
@@ -1534,6 +1538,7 @@ static const struct i915_power_well_desc_list xelpdp_power_wells[] = {
I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
I915_PW_DESCRIPTORS(xelpd_power_wells_dc_off),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
I915_DECL_PW_DOMAINS(xe2lpd_pwdoms_pica_tc,
@@ -1584,6 +1589,7 @@ static const struct i915_power_well_desc_list xe2lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe2lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(xelpdp_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
};
/*
@@ -1677,16 +1683,6 @@ static const struct i915_power_well_desc xe3lpd_power_wells_main[] = {
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_D),
.has_fuses = true,
- }, {
- .instances = &I915_PW_INSTANCES(
- I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
- I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
- I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
- I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
- I915_PW("AUX_TC3", &xelpdp_pwdoms_aux_tc3, .xelpdp.aux_ch = AUX_CH_USBC3),
- I915_PW("AUX_TC4", &xelpdp_pwdoms_aux_tc4, .xelpdp.aux_ch = AUX_CH_USBC4),
- ),
- .ops = &xelpdp_aux_power_well_ops,
},
};
@@ -1715,6 +1711,65 @@ static const struct i915_power_well_desc_list xe3lpd_power_wells[] = {
I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
I915_PW_DESCRIPTORS(xe3lpd_power_wells_main),
I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(xelpdp_power_wells_aux),
+};
+
+static const struct i915_power_well_desc wcl_power_wells_main[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_2", &xe3lpd_pwdoms_pw_2,
+ .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+ .id = SKL_DISP_PW_2),
+ ),
+ .ops = &hsw_power_well_ops,
+ .has_vga = true,
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_A", &xelpd_pwdoms_pw_a,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_A),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_A),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_B", &xe3lpd_pwdoms_pw_b,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_B),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_B),
+ .has_fuses = true,
+ }, {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("PW_C", &xe3lpd_pwdoms_pw_c,
+ .hsw.idx = XELPD_PW_CTL_IDX_PW_C),
+ ),
+ .ops = &hsw_power_well_ops,
+ .irq_pipe_mask = BIT(PIPE_C),
+ .has_fuses = true,
+ },
+};
+
+static const struct i915_power_well_desc wcl_power_wells_aux[] = {
+ {
+ .instances = &I915_PW_INSTANCES(
+ I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A),
+ I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B),
+ I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1),
+ I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2),
+ ),
+ .ops = &xelpdp_aux_power_well_ops,
+ },
+};
+
+static const struct i915_power_well_desc_list wcl_power_wells[] = {
+ I915_PW_DESCRIPTORS(i9xx_power_wells_always_on),
+ I915_PW_DESCRIPTORS(icl_power_wells_pw_1),
+ I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff),
+ I915_PW_DESCRIPTORS(wcl_power_wells_main),
+ I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica),
+ I915_PW_DESCRIPTORS(wcl_power_wells_aux),
};
static void init_power_well_domains(const struct i915_power_well_instance *inst,
@@ -1824,7 +1879,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains)
return 0;
}
- if (DISPLAY_VER(display) >= 30)
+ if (DISPLAY_VERx100(display) == 3002)
+ return set_power_wells(power_domains, wcl_power_wells);
+ else if (DISPLAY_VER(display) >= 30)
return set_power_wells(power_domains, xe3lpd_power_wells);
else if (DISPLAY_VER(display) >= 20)
return set_power_wells(power_domains, xe2lpd_power_wells);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 48cac225a809..f4f7e73acc87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -3,6 +3,10 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/iopoll.h>
+
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -289,8 +293,8 @@ static void hsw_wait_for_power_well_enable(struct intel_display *display,
}
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- if (intel_de_wait_for_set(display, regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
+ if (intel_de_wait_for_set_ms(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx), timeout)) {
drm_dbg_kms(display->drm, "%s power well enable timeout\n",
intel_power_well_name(power_well));
@@ -334,9 +338,9 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display,
*/
reqs = hsw_power_well_requesters(display, regs, pw_idx);
- ret = intel_de_wait_for_clear(display, regs->driver,
- HSW_PWR_WELL_CTL_STATE(pw_idx),
- reqs ? 0 : 1);
+ ret = intel_de_wait_for_clear_ms(display, regs->driver,
+ HSW_PWR_WELL_CTL_STATE(pw_idx),
+ reqs ? 0 : 1);
if (!ret)
return;
@@ -355,8 +359,8 @@ static void gen9_wait_for_power_well_fuses(struct intel_display *display,
{
/* Timeout 5us for PG#0, for other PGs 1us */
drm_WARN_ON(display->drm,
- intel_de_wait_for_set(display, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ intel_de_wait_for_set_ms(display, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct intel_display *display,
@@ -499,7 +503,6 @@ static void icl_tc_port_assert_ref_held(struct intel_display *display,
static void icl_tc_cold_exit(struct intel_display *display)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret, tries = 0;
while (1) {
@@ -514,7 +517,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
msleep(1);
/* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */
- drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" :
+ drm_dbg_kms(display->drm, "TC cold block %s\n", ret ? "failed" :
"succeeded");
}
@@ -527,6 +530,8 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display,
const struct i915_power_well_regs *regs = power_well->desc->ops->regs;
bool is_tbt = power_well->desc->is_tc_tbt;
bool timeout_expected;
+ u32 val;
+ int ret;
icl_tc_port_assert_ref_held(display, power_well, dig_port);
@@ -553,10 +558,11 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display,
tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx);
- if (wait_for(intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)) &
- DKL_CMN_UC_DW27_UC_HEALTH, 1))
- drm_warn(display->drm,
- "Timeout waiting TC uC health\n");
+ ret = poll_timeout_us(val = intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)),
+ val & DKL_CMN_UC_DW27_UC_HEALTH,
+ 100, 1000, false);
+ if (ret)
+ drm_warn(display->drm, "Timeout waiting TC uC health\n");
}
}
@@ -1122,6 +1128,8 @@ static void vlv_set_power_well(struct intel_display *display,
u32 mask;
u32 state;
u32 ctrl;
+ u32 val;
+ int ret;
mask = PUNIT_PWRGT_MASK(pw_idx);
state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) :
@@ -1129,10 +1137,8 @@ static void vlv_set_power_well(struct intel_display *display,
vlv_punit_get(display->drm);
-#define COND \
- ((vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask) == state)
-
- if (COND)
+ val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS);
+ if ((val & mask) == state)
goto out;
ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL);
@@ -1140,14 +1146,15 @@ static void vlv_set_power_well(struct intel_display *display,
ctrl |= state;
vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl);
- if (wait_for(COND, 100))
+ ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS),
+ (val & mask) == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL));
-#undef COND
-
out:
vlv_punit_put(display->drm);
}
@@ -1208,7 +1215,7 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE);
/*
@@ -1351,6 +1358,7 @@ static void assert_chv_phy_status(struct intel_display *display)
u32 phy_control = display->power.chv_phy_control;
u32 phy_status = 0;
u32 phy_status_mask = 0xffffffff;
+ u32 val;
/*
* The BIOS can leave the PHY is some weird state
@@ -1438,12 +1446,11 @@ static void assert_chv_phy_status(struct intel_display *display)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_de_wait(display, DISPLAY_PHY_STATUS,
- phy_status_mask, phy_status, 10))
+ if (intel_de_wait_ms(display, DISPLAY_PHY_STATUS,
+ phy_status_mask, phy_status, 10, &val))
drm_err(display->drm,
"Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- intel_de_read(display, DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, display->power.chv_phy_control);
+ val & phy_status_mask, phy_status, display->power.chv_phy_control);
}
#undef BITS_SET
@@ -1469,8 +1476,8 @@ static void chv_dpio_cmn_power_well_enable(struct intel_display *display,
vlv_set_power_well(display, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_de_wait_for_set(display, DISPLAY_PHY_STATUS,
- PHY_POWERGOOD(phy), 1))
+ if (intel_de_wait_for_set_ms(display, DISPLAY_PHY_STATUS,
+ PHY_POWERGOOD(phy), 1))
drm_err(display->drm, "Display PHY %d is not power up\n",
phy);
@@ -1711,23 +1718,24 @@ static void chv_set_pipe_power_well(struct intel_display *display,
enum pipe pipe = PIPE_A;
u32 state;
u32 ctrl;
+ int ret;
state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
vlv_punit_get(display->drm);
-#define COND \
- ((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
-
- if (COND)
+ ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
+ if ((ctrl & DP_SSS_MASK(pipe)) == state)
goto out;
- ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl);
- if (wait_for(COND, 100))
+ ret = poll_timeout_us(ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM),
+ (ctrl & DP_SSS_MASK(pipe)) == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"timeout setting power well state %08x (%08x)\n",
state,
@@ -1765,7 +1773,6 @@ static void chv_pipe_power_well_disable(struct intel_display *display,
static void
tgl_tc_cold_request(struct intel_display *display, bool block)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
u8 tries = 0;
int ret;
@@ -1798,10 +1805,9 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
}
if (ret)
- drm_err(&i915->drm, "TC cold %sblock failed\n",
- block ? "" : "un");
+ drm_err(display->drm, "TC cold %sblock failed\n", block ? "" : "un");
else
- drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n",
+ drm_dbg_kms(display->drm, "TC cold %sblock succeeded\n",
block ? "" : "un");
}
@@ -1860,18 +1866,36 @@ static void xelpdp_aux_power_well_enable(struct intel_display *display,
* expected to just wait a fixed 600us after raising the request
* bit.
*/
- usleep_range(600, 1200);
+ if (DISPLAY_VER(display) >= 35) {
+ if (intel_de_wait_for_set_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 2))
+ drm_warn(display->drm,
+ "Timeout waiting for PHY %c AUX channel power to be up\n",
+ phy_name(phy));
+ } else {
+ usleep_range(600, 1200);
+ }
}
static void xelpdp_aux_power_well_disable(struct intel_display *display,
struct i915_power_well *power_well)
{
enum aux_ch aux_ch = i915_power_well_instance(power_well)->xelpdp.aux_ch;
+ enum phy phy = icl_aux_pw_to_phy(display, power_well);
intel_de_rmw(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
XELPDP_DP_AUX_CH_CTL_POWER_REQUEST,
0);
- usleep_range(10, 30);
+
+ if (DISPLAY_VER(display) >= 35) {
+ if (intel_de_wait_for_clear_ms(display, XELPDP_DP_AUX_CH_CTL(display, aux_ch),
+ XELPDP_DP_AUX_CH_CTL_POWER_STATUS, 1))
+ drm_warn(display->drm,
+ "Timeout waiting for PHY %c AUX channel to powerdown\n",
+ phy_name(phy));
+ } else {
+ usleep_range(10, 30);
+ }
}
static bool xelpdp_aux_power_well_enabled(struct intel_display *display,
@@ -1889,8 +1913,8 @@ static void xe2lpd_pica_power_well_enable(struct intel_display *display,
intel_de_write(display, XE2LPD_PICA_PW_CTL,
XE2LPD_PICA_CTL_POWER_REQUEST);
- if (intel_de_wait_for_set(display, XE2LPD_PICA_PW_CTL,
- XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
+ if (intel_de_wait_for_set_ms(display, XE2LPD_PICA_PW_CTL,
+ XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
drm_dbg_kms(display->drm, "pica power well enable timeout\n");
drm_WARN(display->drm, 1, "Power well PICA timeout when enabled");
@@ -1902,8 +1926,8 @@ static void xe2lpd_pica_power_well_disable(struct intel_display *display,
{
intel_de_write(display, XE2LPD_PICA_PW_CTL, 0);
- if (intel_de_wait_for_clear(display, XE2LPD_PICA_PW_CTL,
- XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
+ if (intel_de_wait_for_clear_ms(display, XE2LPD_PICA_PW_CTL,
+ XE2LPD_PICA_CTL_POWER_STATUS, 1)) {
drm_dbg_kms(display->drm, "pica power well disable timeout\n");
drm_WARN(display->drm, 1, "Power well PICA timeout when disabled");
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
index 7bd09d981cd2..9d71e26a4fa2 100644
--- a/drivers/gpu/drm/i915/display/intel_display_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -2890,6 +2890,7 @@ enum skl_power_gate {
#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4)
#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4))
#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4))
+/* See enum intel_tc_pin_assignment for the pin assignment field values. */
#define _TCSS_DDI_STATUS_1 0x161500
#define _TCSS_DDI_STATUS_2 0x161504
@@ -2897,6 +2898,7 @@ enum skl_power_gate {
_TCSS_DDI_STATUS_1, \
_TCSS_DDI_STATUS_2))
#define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25)
+/* See enum intel_tc_pin_assignment for the pin assignment field values. */
#define TCSS_DDI_STATUS_READY REG_BIT(2)
#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1)
#define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0)
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index f5f38dca14d7..03e8c68d2913 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
#include "intel_clock_gating.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c
index 56c4024201c1..0a331f89b4db 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rpm.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c
@@ -1,69 +1,62 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
-#include "i915_drv.h"
+#include <drm/intel/display_parent_interface.h>
+
#include "intel_display_core.h"
#include "intel_display_rpm.h"
-#include "intel_runtime_pm.h"
-
-static struct intel_runtime_pm *display_to_rpm(struct intel_display *display)
-{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
- return &i915->runtime_pm;
-}
struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
{
- return intel_runtime_pm_get_raw(display_to_rpm(display));
+ return display->parent->rpm->get_raw(display->drm);
}
void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
{
- intel_runtime_pm_put_raw(display_to_rpm(display), wakeref);
+ display->parent->rpm->put_raw(display->drm, wakeref);
}
struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
{
- return intel_runtime_pm_get(display_to_rpm(display));
+ return display->parent->rpm->get(display->drm);
}
struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
{
- return intel_runtime_pm_get_if_in_use(display_to_rpm(display));
+ return display->parent->rpm->get_if_in_use(display->drm);
}
struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
{
- return intel_runtime_pm_get_noresume(display_to_rpm(display));
+ return display->parent->rpm->get_noresume(display->drm);
}
void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
{
- intel_runtime_pm_put(display_to_rpm(display), wakeref);
+ display->parent->rpm->put(display->drm, wakeref);
}
void intel_display_rpm_put_unchecked(struct intel_display *display)
{
- intel_runtime_pm_put_unchecked(display_to_rpm(display));
+ display->parent->rpm->put_unchecked(display->drm);
}
bool intel_display_rpm_suspended(struct intel_display *display)
{
- return intel_runtime_pm_suspended(display_to_rpm(display));
+ return display->parent->rpm->suspended(display->drm);
}
void assert_display_rpm_held(struct intel_display *display)
{
- assert_rpm_wakelock_held(display_to_rpm(display));
+ display->parent->rpm->assert_held(display->drm);
}
void intel_display_rpm_assert_block(struct intel_display *display)
{
- disable_rpm_wakeref_asserts(display_to_rpm(display));
+ display->parent->rpm->assert_block(display->drm);
}
void intel_display_rpm_assert_unblock(struct intel_display *display)
{
- enable_rpm_wakeref_asserts(display_to_rpm(display));
+ display->parent->rpm->assert_unblock(display->drm);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce45261c4a8f..06bf8f7c0989 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -50,15 +50,17 @@
#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
+#include "intel_dsi_vbt_defs.h"
#include "intel_wm_types.h"
struct cec_notifier;
struct drm_printer;
-struct __intel_global_objs_state;
struct intel_connector;
struct intel_ddi_buf_trans;
struct intel_fbc;
+struct intel_global_objs_state;
struct intel_hdcp_shim;
+struct intel_panic;
struct intel_tc_port;
/*
@@ -148,6 +150,7 @@ struct intel_framebuffer {
unsigned int vtd_guard;
unsigned int (*panic_tiling)(unsigned int x, unsigned int y, unsigned int width);
+ struct intel_panic *panic;
};
enum intel_hotplug_state {
@@ -548,7 +551,16 @@ struct intel_connector {
u8 fec_capability;
u8 dsc_hblank_expansion_quirk:1;
+ u8 dsc_throughput_quirk:1;
u8 dsc_decompression_enabled:1;
+
+ struct {
+ struct {
+ int rgb_yuv444;
+ int yuv422_420;
+ } overall_throughput;
+ int max_line_width;
+ } dsc_branch_caps;
} dp;
struct {
@@ -593,7 +605,7 @@ struct intel_atomic_state {
struct ref_tracker *wakeref;
- struct __intel_global_objs_state *global_objs;
+ struct intel_global_objs_state *global_objs;
int num_global_objs;
/* Internal commit, as opposed to userspace/client initiated one */
@@ -634,6 +646,7 @@ struct intel_plane_state {
enum drm_color_encoding color_encoding;
enum drm_color_range color_range;
enum drm_scaling_filter scaling_filter;
+ struct drm_property_blob *ctm, *degamma_lut, *gamma_lut, *lut_3d;
} hw;
struct i915_vma *ggtt_vma;
@@ -642,7 +655,6 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
- u32 phys_dma_addr; /* for cursor_needs_physical */
/* for legacy cursor fb unpin */
struct drm_vblank_work unpin_work;
@@ -665,6 +677,9 @@ struct intel_plane_state {
/* chroma upsampler control register */
u32 cus_ctl;
+ /* surface address register */
+ u32 surf;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -712,7 +727,6 @@ struct intel_initial_plane_config {
struct intel_memory_region *mem;
resource_size_t phys_base;
struct i915_vma *vma;
- unsigned int tiling;
int size;
u32 base;
u8 rotation;
@@ -941,9 +955,25 @@ struct intel_csc_matrix {
u16 postoff[3];
};
-void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val);
+enum intel_panel_replay_dsc_support {
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED,
+ INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY,
+ INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE,
+};
+
+struct scaler_filter_coeff {
+ u16 sign;
+ u16 exp;
+ u16 mantissa;
+};
-typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val);
+struct intel_casf {
+ #define SCALER_FILTER_NUM_TAPS 7
+ struct scaler_filter_coeff coeff[SCALER_FILTER_NUM_TAPS];
+ u8 strength;
+ u8 win_size;
+ bool casf_enable;
+};
struct intel_crtc_state {
/*
@@ -981,6 +1011,7 @@ struct intel_crtc_state {
struct drm_property_blob *degamma_lut, *gamma_lut, *ctm;
struct drm_display_mode mode, pipe_mode, adjusted_mode;
enum drm_scaling_filter scaling_filter;
+ struct intel_casf casf_params;
} hw;
/* actual state of LUTs */
@@ -1122,9 +1153,13 @@ struct intel_crtc_state {
bool req_psr2_sdp_prior_scanline;
bool has_panel_replay;
bool wm_level_disabled;
+ bool pkg_c_latency_used;
+ /* Only used for state verification. */
+ enum intel_panel_replay_dsc_support panel_replay_dsc_support;
u32 dc3co_exitline;
u16 su_y_granularity;
u8 active_non_psr_pipes;
+ const char *no_psr_reason;
/*
* Frequency the dpll for the port should run at. Differs from the
@@ -1181,7 +1216,9 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
- int min_cdclk[I915_MAX_PLANES];
+ int min_cdclk;
+
+ int plane_min_cdclk[I915_MAX_PLANES];
/* for packed/planar CbCr */
u32 data_rate[I915_MAX_PLANES];
@@ -1266,6 +1303,8 @@ struct intel_crtc_state {
/* Display Stream compression state */
struct {
+ /* Only used for state computation, not read out from the HW. */
+ bool compression_enabled_on_link;
bool compression_enable;
int num_streams;
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
@@ -1339,6 +1378,23 @@ struct intel_crtc_state {
/* LOBF flag */
bool has_lobf;
+
+ /* W2 window or 'set context latency' lines */
+ u16 set_context_latency;
+
+ struct {
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+
+ /* LNL and beyond */
+ u8 check_entry_lines;
+ u8 aux_less_wake_lines;
+ u8 silence_period_sym_clocks;
+ u8 lfps_half_cycle_num_of_syms;
+ } alpm_state;
+
+ /* to track changes in plane color blocks */
+ bool plane_color_changed;
};
enum intel_pipe_crc_source {
@@ -1511,8 +1567,8 @@ struct intel_plane {
const struct drm_framebuffer *fb,
int color_plane);
unsigned int (*max_stride)(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation);
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation);
bool (*can_async_flip)(u64 modifier);
/* Write all non-self arming plane registers */
void (*update_noarm)(struct intel_dsb *dsb,
@@ -1534,6 +1590,7 @@ struct intel_plane {
bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe);
int (*check_plane)(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state);
+ u32 (*surf_offset)(const struct intel_plane_state *plane_state);
int (*min_cdclk)(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void (*async_flip)(struct intel_dsb *dsb,
@@ -1676,15 +1733,22 @@ struct intel_psr {
bool source_panel_replay_support;
bool sink_panel_replay_support;
bool sink_panel_replay_su_support;
+ enum intel_panel_replay_dsc_support sink_panel_replay_dsc_support;
bool panel_replay_enabled;
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
u8 entry_setup_frames;
+ u8 io_wake_lines;
+ u8 fast_wake_lines;
+
bool link_ok;
+ bool pkg_c_latency_used;
u8 active_non_psr_pipes;
+
+ const char *no_psr_reason;
};
struct intel_dp {
@@ -1840,19 +1904,12 @@ struct intel_dp {
bool colorimetry_support;
struct {
- u8 io_wake_lines;
- u8 fast_wake_lines;
enum transcoder transcoder;
struct mutex lock;
- /* LNL and beyond */
- u8 check_entry_lines;
- u8 aux_less_wake_lines;
- u8 silence_period_sym_clocks;
- u8 lfps_half_cycle_num_of_syms;
bool lobf_disable_debug;
bool sink_alpm_error;
- } alpm_parameters;
+ } alpm;
u8 alpm_dpcd;
@@ -1932,6 +1989,11 @@ struct intel_dp_mst_encoder {
struct intel_connector *connector;
};
+struct intel_colorop {
+ struct drm_colorop base;
+ enum intel_color_block id;
+};
+
static inline struct intel_encoder *
intel_attached_encoder(struct intel_connector *connector)
{
diff --git a/drivers/gpu/drm/i915/display/intel_display_utils.c b/drivers/gpu/drm/i915/display/intel_display_utils.c
new file mode 100644
index 000000000000..04d010f7c23e
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_utils.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <linux/device.h>
+
+#include <drm/drm_device.h>
+
+#ifdef CONFIG_X86
+#include <asm/hypervisor.h>
+#endif
+
+#include "intel_display_core.h"
+#include "intel_display_utils.h"
+
+bool intel_display_run_as_guest(struct intel_display *display)
+{
+#if IS_ENABLED(CONFIG_X86)
+ return !hypervisor_is_type(X86_HYPER_NATIVE);
+#else
+ /* Not supported yet */
+ return false;
+#endif
+}
+
+bool intel_display_vtd_active(struct intel_display *display)
+{
+ if (device_iommu_mapped(display->drm->dev))
+ return true;
+
+ /* Running as a guest, we assume the host is enforcing VT'd */
+ return intel_display_run_as_guest(display);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_utils.h b/drivers/gpu/drm/i915/display/intel_display_utils.h
new file mode 100644
index 000000000000..2a18f160320c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_utils.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DISPLAY_UTILS__
+#define __INTEL_DISPLAY_UTILS__
+
+#include <linux/bug.h>
+#include <linux/types.h>
+
+struct intel_display;
+
+#ifndef MISSING_CASE
+#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
+ __stringify(x), (long)(x))
+#endif
+
+#ifndef fetch_and_zero
+#define fetch_and_zero(ptr) ({ \
+ typeof(*ptr) __T = *(ptr); \
+ *(ptr) = (typeof(*ptr))0; \
+ __T; \
+})
+#endif
+
+#define KHz(x) (1000 * (x))
+#define MHz(x) KHz(1000 * (x))
+
+bool intel_display_run_as_guest(struct intel_display *display);
+bool intel_display_vtd_active(struct intel_display *display);
+
+#endif /* __INTEL_DISPLAY_UTILS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index f57280e9d041..e38e5e87877c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -3,6 +3,8 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_core.h"
@@ -39,3 +41,39 @@ void intel_display_wa_apply(struct intel_display *display)
else if (DISPLAY_VER(display) == 11)
gen11_display_wa_apply(display);
}
+
+/*
+ * Wa_16025573575:
+ * Fixes: Issue with bitbashing on Xe3 based platforms.
+ * Workaround: Set masks bits in GPIO CTL and preserve it during bitbashing sequence.
+ */
+static bool intel_display_needs_wa_16025573575(struct intel_display *display)
+{
+ return DISPLAY_VERx100(display) == 3000 || DISPLAY_VERx100(display) == 3002 ||
+ DISPLAY_VERx100(display) == 3500;
+}
+
+/*
+ * Wa_14011503117:
+ * Fixes: Before enabling the scaler DE fatal error is masked
+ * Workaround: Unmask the DE fatal error register after enabling the scaler
+ * and after waiting of at least 1 frame.
+ */
+bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name)
+{
+ switch (wa) {
+ case INTEL_DISPLAY_WA_16023588340:
+ return intel_display_needs_wa_16023588340(display);
+ case INTEL_DISPLAY_WA_16025573575:
+ return intel_display_needs_wa_16025573575(display);
+ case INTEL_DISPLAY_WA_14011503117:
+ return DISPLAY_VER(display) == 13;
+ case INTEL_DISPLAY_WA_22014263786:
+ return IS_DISPLAY_VERx100(display, 1100, 1400);
+ default:
+ drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name);
+ break;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index babd9d16603d..3644e8e2b724 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -21,4 +21,16 @@ static inline bool intel_display_needs_wa_16023588340(struct intel_display *disp
bool intel_display_needs_wa_16023588340(struct intel_display *display);
#endif
+enum intel_display_wa {
+ INTEL_DISPLAY_WA_16023588340,
+ INTEL_DISPLAY_WA_16025573575,
+ INTEL_DISPLAY_WA_14011503117,
+ INTEL_DISPLAY_WA_22014263786,
+};
+
+bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name);
+
+#define intel_display_wa(__display, __wa) \
+ __intel_display_wa((__display), INTEL_DISPLAY_WA_##__wa, __stringify(__wa))
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 744f51c0eab8..6ebbd97e6351 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -30,13 +30,13 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_flipq.h"
@@ -127,6 +127,12 @@ static bool dmc_firmware_param_disabled(struct intel_display *display)
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
+#define XE3P_LPD_DMC_PATH DMC_PATH(xe3p_lpd)
+MODULE_FIRMWARE(XE3P_LPD_DMC_PATH);
+
+#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002)
+MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
+
#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd)
MODULE_FIRMWARE(XE3LPD_DMC_PATH);
@@ -184,8 +190,13 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
const char *fw_path = NULL;
u32 max_fw_size = 0;
- if (DISPLAY_VERx100(display) == 3002 ||
- DISPLAY_VERx100(display) == 3000) {
+ if (DISPLAY_VERx100(display) == 3500) {
+ fw_path = XE3P_LPD_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VERx100(display) == 3002) {
+ fw_path = XE3LPD_3002_DMC_PATH;
+ max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
+ } else if (DISPLAY_VERx100(display) == 3000) {
fw_path = XE3LPD_DMC_PATH;
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VERx100(display) == 2000) {
@@ -509,10 +520,16 @@ static u32 pipedmc_interrupt_mask(struct intel_display *display)
PIPEDMC_ATS_FAULT;
}
-static u32 dmc_evt_ctl_disable(void)
+static u32 dmc_evt_ctl_disable(u32 dmc_evt_ctl)
{
- return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
+ /*
+ * DMC_EVT_CTL_ENABLE cannot be cleared once set. Always
+ * configure it based on the original event definition to
+ * avoid mismatches in assert_dmc_loaded().
+ */
+ return (dmc_evt_ctl & DMC_EVT_CTL_ENABLE) |
+ REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
+ DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
DMC_EVENT_FALSE);
}
@@ -546,6 +563,51 @@ static bool is_event_handler(struct intel_display *display,
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
}
+static bool fixup_dmc_evt(struct intel_display *display,
+ enum intel_dmc_id dmc_id,
+ i915_reg_t reg_ctl, u32 *data_ctl,
+ i915_reg_t reg_htp, u32 *data_htp)
+{
+ if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
+ return false;
+
+ if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
+ return false;
+
+ /* make sure reg_ctl and reg_htp are for the same event */
+ if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
+ i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
+ return false;
+
+ /*
+ * On ADL-S the HRR event handler is not restored after DC6.
+ * Clear it to zero from the beginning to avoid mismatches later.
+ */
+ if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
+ *data_ctl = 0;
+ *data_htp = 0;
+ return true;
+ }
+
+ /*
+ * TGL/ADL-S DMC firmware incorrectly uses the undelayed vblank
+ * event for the HRR handler, when it should be using the delayed
+ * vblank event instead. Fixed firmware was never released
+ * so the Windows driver just hacks around it by overriding
+ * the event ID. Do the same.
+ */
+ if ((display->platform.tigerlake || display->platform.alderlake_s) &&
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
+ *data_ctl &= ~DMC_EVT_CTL_EVENT_ID_MASK;
+ *data_ctl |= REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
+ MAINDMC_EVENT_VBLANK_DELAYED_A);
+ return true;
+ }
+
+ return false;
+}
+
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -564,7 +626,7 @@ static bool disable_dmc_evt(struct intel_display *display,
/* also disable the HRR event on the main DMC on TGL/ADLS */
if ((display->platform.tigerlake || display->platform.alderlake_s) &&
- is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data))
+ is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_DELAYED_A, reg, data))
return true;
return false;
@@ -577,7 +639,7 @@ static u32 dmc_mmiodata(struct intel_display *display,
if (disable_dmc_evt(display, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
- return dmc_evt_ctl_disable();
+ return dmc_evt_ctl_disable(dmc->dmc_info[dmc_id].mmiodata[i]);
else
return dmc->dmc_info[dmc_id].mmiodata[i];
}
@@ -636,12 +698,6 @@ static void assert_dmc_loaded(struct intel_display *display,
found = intel_de_read(display, reg);
expected = dmc_mmiodata(display, dmc, dmc_id, i);
- /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */
- if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) {
- found &= ~DMC_EVT_CTL_ENABLE;
- expected &= ~DMC_EVT_CTL_ENABLE;
- }
-
drm_WARN(display->drm, found != expected,
"DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n",
dmc_id, i, i915_mmio_reg_offset(reg), expected, found);
@@ -662,11 +718,11 @@ static bool need_pipedmc_load_program(struct intel_display *display)
static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe)
{
/*
- * PTL:
+ * Xe3_LPD/Xe3p_LPD:
* - pipe A/B DMC doesn't need save/restore
* - pipe C/D DMC is in PG0, needs manual save/restore
*/
- if (DISPLAY_VER(display) == 30)
+ if (IS_DISPLAY_VER(display, 30, 35))
return pipe >= PIPE_C;
/*
@@ -794,7 +850,7 @@ static void dmc_configure_event(struct intel_display *display,
if (!is_event_handler(display, dmc_id, event_id, reg, data))
continue;
- intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable());
+ intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable(data));
num_handlers++;
}
@@ -1064,9 +1120,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
+ }
+ for (i = 0; i < mmio_count - 1; i++) {
+ u32 orig_mmiodata[2] = {
+ dmc_info->mmiodata[i],
+ dmc_info->mmiodata[i+1],
+ };
+
+ if (!fixup_dmc_evt(display, dmc_id,
+ dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
+ dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
+ continue;
+
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
+ orig_mmiodata[0], dmc_info->mmiodata[i]);
+ drm_dbg_kms(display->drm,
+ " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
+ i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
+ orig_mmiodata[1], dmc_info->mmiodata[i+1]);
+ }
+
+ for (i = 0; i < mmio_count; i++) {
drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
- i, mmioaddr[i], mmiodata[i],
+ i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
@@ -1141,7 +1220,7 @@ parse_dmc_fw_package(struct intel_dmc *dmc,
}
num_entries = package_header->num_entries;
- if (WARN_ON(package_header->num_entries > max_entries))
+ if (WARN_ON(num_entries > max_entries))
num_entries = max_entries;
fw_info = (const struct intel_fw_info *)
@@ -1603,9 +1682,7 @@ DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
void intel_dmc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_dmc_info", 0444, display->drm->debugfs_root,
display, &intel_dmc_debugfs_status_fops);
}
@@ -1642,14 +1719,14 @@ void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe)
drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC GTT fault\n",
crtc->base.base.id, crtc->base.name);
if (tmp & PIPEDMC_ERROR)
- drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC error\n",
+ drm_err(display->drm, "[CRTC:%d:%s] PIPEDMC error\n",
crtc->base.base.id, crtc->base.name);
}
int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK;
if (tmp == 0 && int_vector != 0)
- drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n",
- crtc->base.base.id, crtc->base.name, tmp);
+ drm_err(display->drm, "[CRTC:%d:%s] PIPEDMC interrupt vector 0x%x\n",
+ crtc->base.base.id, crtc->base.name, int_vector);
}
void intel_pipedmc_enable_event(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
index b3bb89ba34f9..73a3101514f3 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c
@@ -179,11 +179,11 @@ static void intel_dmc_wl_work(struct work_struct *work)
if (refcount_read(&wl->refcount))
goto out_unlock;
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+ intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
- if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK, 0,
- DMC_WAKELOCK_CTL_TIMEOUT_US)) {
+ if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK, 0,
+ DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) {
WARN_RATELIMIT(1, "DMC wakelock release timed out");
goto out_unlock;
}
@@ -207,17 +207,16 @@ static void __intel_dmc_wl_take(struct intel_display *display)
if (wl->taken)
return;
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, 0,
- DMC_WAKELOCK_CTL_REQ);
+ intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, 0, DMC_WAKELOCK_CTL_REQ);
/*
* We need to use the atomic variant of the waiting routine
* because the DMC wakelock is also taken in atomic context.
*/
- if (__intel_de_wait_for_register_atomic_nowl(display, DMC_WAKELOCK1_CTL,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_ACK,
- DMC_WAKELOCK_CTL_TIMEOUT_US)) {
+ if (intel_de_wait_fw_us_atomic(display, DMC_WAKELOCK1_CTL,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_ACK,
+ DMC_WAKELOCK_CTL_TIMEOUT_US, NULL)) {
WARN_RATELIMIT(1, "DMC wakelock ack timed out");
return;
}
@@ -360,7 +359,7 @@ void intel_dmc_wl_enable(struct intel_display *display, u32 dc_state)
* wakelock, because we're just enabling it, so call the
* non-locking version directly here.
*/
- __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
+ intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, 0, DMC_WAKELOCK_CFG_ENABLE);
wl->enabled = true;
@@ -402,7 +401,7 @@ void intel_dmc_wl_disable(struct intel_display *display)
goto out_unlock;
/* Disable wakelock in DMC */
- __intel_de_rmw_nowl(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
+ intel_de_rmw_fw(display, DMC_WAKELOCK_CFG, DMC_WAKELOCK_CFG_ENABLE, 0);
wl->enabled = false;
@@ -414,7 +413,7 @@ void intel_dmc_wl_disable(struct intel_display *display)
*
* TODO: Get the correct expectation from the hardware team.
*/
- __intel_de_rmw_nowl(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
+ intel_de_rmw_fw(display, DMC_WAKELOCK1_CTL, DMC_WAKELOCK_CTL_REQ, 0);
wl->taken = false;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 7976fec88606..0ec82fcbcf48 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/notifier.h>
@@ -50,7 +51,6 @@
#include <drm/drm_probe_helper.h>
#include "g4x_dp.h"
-#include "i915_utils.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_audio.h"
@@ -63,6 +63,8 @@
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_driver.h"
+#include "intel_display_jiffies.h"
+#include "intel_display_utils.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
@@ -92,14 +94,10 @@
#include "intel_psr.h"
#include "intel_quirks.h"
#include "intel_tc.h"
+#include "intel_vblank.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
-/* DP DSC throughput values used for slice count calculations KPixels/s */
-#define DP_DSC_PEAK_PIXEL_RATE 2720000
-#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
-#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-
/* Max DSC line buffer depth supported by HW. */
#define INTEL_DP_DSC_MAX_LINE_BUF_DEPTH 13
@@ -174,7 +172,6 @@ int intel_dp_link_symbol_clock(int rate)
static int max_dprx_rate(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
int max_rate;
if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
@@ -183,16 +180,13 @@ static int max_dprx_rate(struct intel_dp *intel_dp)
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
/*
- * Some broken eDP sinks illegally declare support for
- * HBR3 without TPS4, and are unable to produce a stable
- * output. Reject HBR3 when TPS4 is not available.
+ * Some platforms + eDP panels may not reliably support HBR3
+ * due to signal integrity limitations, despite advertising it.
+ * Cap the link rate to HBR2 to avoid unstable configurations for the
+ * known machines.
*/
- if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
- encoder->base.base.id, encoder->base.name);
- max_rate = 540000;
- }
+ if (intel_dp_is_edp(intel_dp) && intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2))
+ max_rate = min(max_rate, 540000);
return max_rate;
}
@@ -1021,13 +1015,43 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
struct intel_display *display = to_intel_display(connector);
u8 min_slice_count, i;
int max_slice_width;
+ int tp_rgb_yuv444;
+ int tp_yuv422_420;
- if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_0);
- else
- min_slice_count = DIV_ROUND_UP(mode_clock,
- DP_DSC_MAX_ENC_THROUGHPUT_1);
+ /*
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output.
+ * The RGB/YUV444 throughput value should be always either equal
+ * or smaller than the YUV422/420 value, but let's not depend on
+ * this assumption.
+ */
+ if (mode_clock > max(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444,
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420))
+ return 0;
+
+ if (mode_hdisplay > connector->dp.dsc_branch_caps.max_line_width)
+ return 0;
+
+ /*
+ * TODO: Pass the total pixel rate of all the streams transferred to
+ * an MST tiled display, calculate the total slice count for all tiles
+ * from this and the per-tile slice count from the total slice count.
+ */
+ tp_rgb_yuv444 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd,
+ mode_clock, true);
+ tp_yuv422_420 = drm_dp_dsc_sink_max_slice_throughput(connector->dp.dsc_dpcd,
+ mode_clock, false);
+
+ /*
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output.
+ * For now use the smaller of these, which is ok, potentially
+ * resulting in a higher than required minimum slice count.
+ * The RGB/YUV444 throughput value should be always either equal
+ * or smaller than the YUV422/420 value, but let's not depend on
+ * this assumption.
+ */
+ min_slice_count = DIV_ROUND_UP(mode_clock, min(tp_rgb_yuv444, tp_yuv422_420));
/*
* Due to some DSC engine BW limitations, we need to enable second
@@ -1418,6 +1442,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
struct intel_display *display = to_intel_display(_connector->dev);
struct intel_connector *connector = to_intel_connector(_connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
+ enum intel_output_format sink_format, output_format;
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
@@ -1451,6 +1476,13 @@ intel_dp_mode_valid(struct drm_connector *_connector,
mode->hdisplay, target_clock);
max_dotclk *= num_joined_pipes;
+ sink_format = intel_dp_sink_format(connector, mode);
+ output_format = intel_dp_output_format(connector, sink_format);
+
+ status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes);
+ if (status != MODE_OK)
+ return status;
+
if (target_clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -1466,11 +1498,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
intel_dp_mode_min_output_bpp(connector, mode));
if (intel_dp_has_dsc(connector)) {
- enum intel_output_format sink_format, output_format;
int pipe_bpp;
- sink_format = intel_dp_sink_format(connector, mode);
- output_format = intel_dp_output_format(connector, sink_format);
/*
* TBD pass the connector BPC,
* for now U8_MAX so that max BPC on that platform would be picked
@@ -2338,24 +2367,26 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
return 0;
}
-static void intel_dp_fec_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
+/*
+ * Return whether FEC must be enabled for 8b10b SST or MST links. On 128b132b
+ * links FEC is always enabled implicitly by the HW, so this function returns
+ * false for that case.
+ */
+bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
+ bool dsc_enabled_on_crtc)
{
- if (crtc_state->fec_enable)
- return;
+ if (intel_dp_is_uhbr(crtc_state))
+ return false;
/*
* Though eDP v1.5 supports FEC with DSC, unlike DP, it is optional.
* Since, FEC is a bandwidth overhead, continue to not enable it for
* eDP. Until, there is a good reason to do so.
*/
- if (intel_dp_is_edp(intel_dp))
- return;
-
- if (intel_dp_is_uhbr(crtc_state))
- return;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return false;
- crtc_state->fec_enable = true;
+ return dsc_enabled_on_crtc || intel_dsc_enabled_on_link(crtc_state);
}
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
@@ -2373,7 +2404,11 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
bool is_mst = intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST);
int ret;
- intel_dp_fec_compute_config(intel_dp, pipe_config);
+ /*
+ * FIXME: set the FEC enabled state once pipe_config->port_clock is
+ * already known, so the UHBR/non-UHBR mode can be determined.
+ */
+ pipe_config->fec_enable = intel_dp_needs_8b10b_fec(pipe_config, true);
if (!intel_dp_dsc_supports_format(connector, pipe_config->output_format))
return -EINVAL;
@@ -2448,7 +2483,8 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return ret;
}
- pipe_config->dsc.compression_enable = true;
+ intel_dsc_enable_on_crtc(pipe_config);
+
drm_dbg_kms(display->drm, "DP DSC computed with Input Bpp = %d "
"Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
@@ -2458,6 +2494,40 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
+static int
+dsc_throughput_quirk_max_bpp_x16(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->dp.dsc_throughput_quirk)
+ return INT_MAX;
+
+ /*
+ * Synaptics Panamera branch devices have a problem decompressing a
+ * stream with a compressed link-bpp higher than 12, if the pixel
+ * clock is higher than ~50 % of the maximum overall throughput
+ * reported by the branch device. Work around this by limiting the
+ * maximum link bpp for such pixel clocks.
+ *
+ * TODO: Use the throughput value specific to the actual RGB/YUV
+ * format of the output, after determining the pixel clock limit for
+ * YUV modes. For now use the smaller of the throughput values, which
+ * may result in limiting the link-bpp value already at a lower than
+ * required mode clock in case of native YUV422/420 output formats.
+ * The RGB/YUV444 throughput value should be always either equal or
+ * smaller than the YUV422/420 value, but let's not depend on this
+ * assumption.
+ */
+ if (adjusted_mode->crtc_clock <
+ min(connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444,
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420) / 2)
+ return INT_MAX;
+
+ return fxp_q4_from_int(12);
+}
+
/*
* Calculate the output link min, max bpp values in limits based on the pipe bpp
* range, crtc_state and dsc mode. Return true on success.
@@ -2489,6 +2559,7 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
} else {
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int dsc_src_max_bpp, dsc_sink_max_bpp, dsc_max_bpp;
+ int throughput_max_bpp_x16;
dsc_src_min_bpp = intel_dp_dsc_min_src_compressed_bpp();
dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
@@ -2503,6 +2574,19 @@ intel_dp_compute_config_link_bpp_limits(struct intel_dp *intel_dp,
min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
max_link_bpp_x16 = min(max_link_bpp_x16, fxp_q4_from_int(dsc_max_bpp));
+
+ throughput_max_bpp_x16 = dsc_throughput_quirk_max_bpp_x16(connector, crtc_state);
+ throughput_max_bpp_x16 = clamp(throughput_max_bpp_x16,
+ limits->link.min_bpp_x16, max_link_bpp_x16);
+ if (throughput_max_bpp_x16 < max_link_bpp_x16) {
+ max_link_bpp_x16 = throughput_max_bpp_x16;
+
+ drm_dbg_kms(display->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Decreasing link max bpp to " FXP_Q4_FMT " due to DSC throughput quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name,
+ FXP_Q4_ARGS(max_link_bpp_x16));
+ }
}
limits->link.max_bpp_x16 = max_link_bpp_x16;
@@ -2535,13 +2619,15 @@ intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp,
bool
intel_dp_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
struct link_config_limits *limits)
{
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
limits->min_rate = intel_dp_min_link_rate(intel_dp);
limits->max_rate = intel_dp_max_link_rate(intel_dp);
@@ -2551,7 +2637,8 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
limits->min_lane_count = intel_dp_min_lane_count(intel_dp);
limits->max_lane_count = intel_dp_max_lane_count(intel_dp);
- limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format);
+ limits->pipe.min_bpp = intel_dp_in_hdr_mode(conn_state) ? 30 :
+ intel_dp_min_bpp(crtc_state->output_format);
if (is_mst) {
/*
* FIXME: If all the streams can't fit into the link with their
@@ -2650,7 +2737,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
+ !intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config,
respect_downstream_limits,
false,
&limits);
@@ -2684,7 +2771,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
- if (!intel_dp_compute_config_limits(intel_dp, connector, pipe_config,
+ if (!intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config,
respect_downstream_limits,
true,
&limits))
@@ -2916,6 +3003,19 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
}
}
+bool
+intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state)
+{
+ struct hdr_output_metadata *hdr_metadata;
+
+ if (!conn_state->hdr_output_metadata)
+ return false;
+
+ hdr_metadata = conn_state->hdr_output_metadata->data;
+
+ return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
+}
+
static void
intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
@@ -3181,7 +3281,26 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
*/
min_hblank = min_hblank - 2;
- min_hblank = min(10, min_hblank);
+ /*
+ * min_hblank formula is undergoing a change, to avoid underrun use the
+ * recomended value in spec to compare with the calculated one and use the
+ * minimum value
+ */
+ if (intel_dp_is_uhbr(crtc_state)) {
+ /*
+ * Note: Bspec requires a min_hblank of 2 for YCBCR420
+ * with compressed bpp 6, but the minimum compressed bpp
+ * supported by the driver is 8.
+ */
+ drm_WARN_ON(display->drm,
+ (crtc_state->dsc.compression_enable &&
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
+ crtc_state->dsc.compressed_bpp_x16 < fxp_q4_from_int(8)));
+ min_hblank = min(3, min_hblank);
+ } else {
+ min_hblank = min(10, min_hblank);
+ }
+
crtc_state->min_hblank = min_hblank;
return 0;
@@ -3842,10 +3961,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
if (ret < 0)
return ret;
/* Wait for PCON to be FRL Ready */
- wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS);
-
- if (!is_active)
- return -ETIMEDOUT;
+ ret = poll_timeout_us(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux),
+ is_active,
+ 1000, TIMEOUT_FRL_READY_MS * 1000, false);
+ if (ret)
+ return ret;
ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw,
DP_PCON_ENABLE_SEQUENTIAL_LINK);
@@ -3862,12 +3982,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp)
* Wait for FRL to be completed
* Check if the HDMI Link is up and active.
*/
- wait_for(is_active =
- intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
- TIMEOUT_HDMI_LINK_ACTIVE_MS);
-
- if (!is_active)
- return -ETIMEDOUT;
+ ret = poll_timeout_us(is_active = intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask),
+ is_active,
+ 1000, TIMEOUT_HDMI_LINK_ACTIVE_MS * 1000, false);
+ if (ret)
+ return ret;
frl_trained:
drm_dbg(display->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask);
@@ -4132,7 +4251,36 @@ static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
dsc_dpcd);
}
-void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
+static void init_dsc_overall_throughput_limits(struct intel_connector *connector, bool is_branch)
+{
+ u8 branch_caps[DP_DSC_BRANCH_CAP_SIZE];
+ int line_width;
+
+ connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 = INT_MAX;
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 = INT_MAX;
+ connector->dp.dsc_branch_caps.max_line_width = INT_MAX;
+
+ if (!is_branch)
+ return;
+
+ if (drm_dp_dpcd_read_data(connector->dp.dsc_decompression_aux,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, branch_caps,
+ sizeof(branch_caps)) != 0)
+ return;
+
+ connector->dp.dsc_branch_caps.overall_throughput.rgb_yuv444 =
+ drm_dp_dsc_branch_max_overall_throughput(branch_caps, true) ? : INT_MAX;
+
+ connector->dp.dsc_branch_caps.overall_throughput.yuv422_420 =
+ drm_dp_dsc_branch_max_overall_throughput(branch_caps, false) ? : INT_MAX;
+
+ line_width = drm_dp_dsc_branch_max_line_width(branch_caps);
+ connector->dp.dsc_branch_caps.max_line_width = line_width > 0 ? line_width : INT_MAX;
+}
+
+void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
+ const struct drm_dp_desc *desc, bool is_branch,
+ struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(connector);
@@ -4145,6 +4293,9 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
/* Clear fec_capable to avoid using stale values */
connector->dp.fec_capability = 0;
+ memset(&connector->dp.dsc_branch_caps, 0, sizeof(connector->dp.dsc_branch_caps));
+ connector->dp.dsc_throughput_quirk = false;
+
if (dpcd_rev < DP_DPCD_REV_14)
return;
@@ -4159,6 +4310,19 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector)
drm_dbg_kms(display->drm, "FEC CAPABILITY: %x\n",
connector->dp.fec_capability);
+
+ if (!(connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED))
+ return;
+
+ init_dsc_overall_throughput_limits(connector, is_branch);
+
+ /*
+ * TODO: Move the HW rev check as well to the DRM core quirk table if
+ * that's required after clarifying the list of affected devices.
+ */
+ if (drm_dp_has_quirk(desc, DP_DPCD_QUIRK_DSC_THROUGHPUT_BPP_LIMIT) &&
+ desc->ident.hw_rev == 0x10)
+ connector->dp.dsc_throughput_quirk = true;
}
static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *connector)
@@ -4167,6 +4331,9 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
return;
intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
+
+ if (connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED)
+ init_dsc_overall_throughput_limits(connector, false);
}
static void
@@ -4183,6 +4350,7 @@ intel_dp_detect_dsc_caps(struct intel_dp *intel_dp, struct intel_connector *conn
connector);
else
intel_dp_get_dsc_sink_cap(intel_dp->dpcd[DP_DPCD_REV],
+ &intel_dp->desc, drm_dp_is_branch(intel_dp->dpcd),
connector);
}
@@ -4277,10 +4445,26 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
}
static void
+intel_edp_set_data_override_rates(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int *sink_rates = intel_dp->sink_rates;
+ int i, count = 0;
+
+ for (i = 0; i < intel_dp->num_sink_rates; i++) {
+ if (intel_bios_encoder_reject_edp_rate(encoder->devdata,
+ intel_dp->sink_rates[i]))
+ continue;
+
+ sink_rates[count++] = intel_dp->sink_rates[i];
+ }
+ intel_dp->num_sink_rates = count;
+}
+
+static void
intel_edp_set_sink_rates(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
intel_dp->num_sink_rates = 0;
@@ -4306,16 +4490,13 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
break;
/*
- * Some broken eDP sinks illegally declare support for
- * HBR3 without TPS4, and are unable to produce a stable
- * output. Reject HBR3 when TPS4 is not available.
+ * Some platforms cannot reliably drive HBR3 rates due to PHY limitations,
+ * even if the sink advertises support. Reject any sink rates above HBR2 on
+ * the known machines for stable output.
*/
- if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) {
- drm_dbg_kms(display->drm,
- "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n",
- encoder->base.base.id, encoder->base.name);
+ if (rate > 540000 &&
+ intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2))
break;
- }
intel_dp->sink_rates[i] = rate;
}
@@ -4330,6 +4511,8 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->use_rate_select = true;
else
intel_dp_set_sink_rates(intel_dp);
+
+ intel_edp_set_data_override_rates(intel_dp);
}
static bool
@@ -5501,7 +5684,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
if (intel_alpm_get_error(intel_dp)) {
intel_alpm_disable(intel_dp);
- intel_dp->alpm_parameters.sink_alpm_error = true;
+ intel_dp->alpm.sink_alpm_error = true;
}
if (intel_dp_test_short_pulse(intel_dp))
@@ -5611,14 +5794,9 @@ bool intel_digital_port_connected_locked(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) {
- unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4);
-
- do {
- is_connected = dig_port->connected(encoder);
- if (is_connected || is_glitch_free)
- break;
- usleep_range(10, 30);
- } while (time_before(jiffies, wait_expires));
+ poll_timeout_us(is_connected = dig_port->connected(encoder),
+ is_connected || is_glitch_free,
+ 30, 4000, false);
}
return is_connected;
@@ -5874,6 +6052,8 @@ intel_dp_detect(struct drm_connector *_connector,
memset(connector->dp.dsc_dpcd, 0, sizeof(connector->dp.dsc_dpcd));
intel_dp->psr.sink_panel_replay_support = false;
intel_dp->psr.sink_panel_replay_su_support = false;
+ intel_dp->psr.sink_panel_replay_dsc_support =
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
intel_dp_mst_disconnect(intel_dp);
@@ -6810,3 +6990,81 @@ void intel_dp_mst_resume(struct intel_display *display)
}
}
}
+
+static
+int intel_dp_sdp_compute_config_late(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int guardband = intel_crtc_vblank_length(crtc_state);
+ int min_sdp_guardband = intel_dp_sdp_min_guardband(crtc_state, false);
+
+ if (guardband < min_sdp_guardband) {
+ drm_dbg_kms(display->drm, "guardband %d < min sdp guardband %d\n",
+ guardband, min_sdp_guardband);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int intel_dp_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int ret;
+
+ intel_psr_compute_config_late(intel_dp, crtc_state);
+
+ ret = intel_dp_sdp_compute_config_late(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static
+int intel_dp_get_lines_for_sdp(const struct intel_crtc_state *crtc_state, u32 type)
+{
+ switch (type) {
+ case DP_SDP_VSC_EXT_VESA:
+ case DP_SDP_VSC_EXT_CEA:
+ return 10;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return 8;
+ case DP_SDP_PPS:
+ return 7;
+ case DP_SDP_ADAPTIVE_SYNC:
+ return crtc_state->vrr.vsync_start + 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
+ bool assume_all_enabled)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int sdp_guardband = 0;
+
+ if (assume_all_enabled ||
+ crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA))
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state,
+ HDMI_PACKET_TYPE_GAMUT_METADATA));
+
+ if (assume_all_enabled ||
+ crtc_state->dsc.compression_enable)
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_PPS));
+
+ if ((assume_all_enabled && HAS_AS_SDP(display)) ||
+ crtc_state->infoframes.enable & intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC))
+ sdp_guardband = max(sdp_guardband,
+ intel_dp_get_lines_for_sdp(crtc_state, DP_SDP_ADAPTIVE_SYNC));
+
+ return sdp_guardband;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 0657f5681196..200a8b267f64 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -12,6 +12,7 @@ enum intel_output_format;
enum pipe;
enum port;
struct drm_connector_state;
+struct drm_dp_desc;
struct drm_dp_vsc_sdp;
struct drm_encoder;
struct drm_modeset_acquire_ctx;
@@ -72,6 +73,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
int intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
+ bool dsc_enabled_on_crtc);
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -193,13 +196,15 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
int intel_dp_output_bpp(enum intel_output_format output_format, int bpp);
bool intel_dp_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits,
bool dsc,
struct link_config_limits *limits);
-void intel_dp_get_dsc_sink_cap(u8 dpcd_rev, struct intel_connector *connector);
+void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
+ const struct drm_dp_desc *desc, bool is_branch,
+ struct intel_connector *connector);
bool intel_dp_has_gamut_metadata_dip(struct intel_encoder *encoder);
bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
@@ -214,5 +219,11 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state,
int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector);
void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external);
+bool intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state);
+int intel_dp_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
+ bool assume_all_enabled);
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 829a7c0fbe4f..809799f63e32 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -5,9 +5,9 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h"
@@ -62,9 +62,9 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
u32 status;
int ret;
- ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY,
- 0,
- 2, timeout_ms, &status);
+ ret = intel_de_wait_ms(display, ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY, 0,
+ timeout_ms, &status);
if (ret == -ETIMEDOUT)
drm_err(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 41228478b21c..eb05ef4bd9f6 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -225,19 +225,6 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state,
connector->base.base.id, connector->base.name);
}
-static bool
-intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state)
-{
- struct hdr_output_metadata *hdr_metadata;
-
- if (!conn_state->hdr_output_metadata)
- return false;
-
- hdr_metadata = conn_state->hdr_output_metadata->data;
-
- return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084;
-}
-
static void
intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
@@ -521,9 +508,6 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- if (panel->backlight.edp.vesa.luminance_control_support)
- return;
-
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
if (!panel->backlight.edp.vesa.info.aux_enable)
@@ -546,7 +530,7 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
luminance_range->max_luminance,
panel->vbt.backlight.pwm_freq_hz,
intel_dp->edp_dpcd, &current_level, &current_mode,
- false);
+ panel->backlight.edp.vesa.luminance_control_support);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index bd757db85927..14ed0ea22dd3 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -782,9 +782,9 @@ intel_dp_mst_hdcp_stream_encryption(struct intel_connector *connector,
return -EINVAL;
/* Wait for encryption confirmation */
- if (intel_de_wait(display, HDCP_STATUS(display, cpu_transcoder, port),
- stream_enc_status, enable ? stream_enc_status : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_ms(display, HDCP_STATUS(display, cpu_transcoder, port),
+ stream_enc_status, enable ? stream_enc_status : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS, NULL)) {
drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), str_enabled_disabled(enable));
return -ETIMEDOUT;
@@ -821,10 +821,10 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector,
return ret;
/* Wait for encryption confirmation */
- if (intel_de_wait(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe),
- STREAM_ENCRYPTION_STATUS,
- enable ? STREAM_ENCRYPTION_STATUS : 0,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_ms(display, HDCP2_STREAM_STATUS(display, cpu_transcoder, pipe),
+ STREAM_ENCRYPTION_STATUS,
+ enable ? STREAM_ENCRYPTION_STATUS : 0,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS, NULL)) {
drm_err(display->drm, "Timed out waiting for transcoder: %s stream encryption %s\n",
transcoder_name(cpu_transcoder), str_enabled_disabled(enable));
return -ETIMEDOUT;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index a479b63112ea..aad5fe14962f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -22,13 +22,15 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_display_core.h"
+#include "intel_display_jiffies.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_encoder.h"
@@ -478,12 +480,13 @@ static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
_TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
_TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
-void
+bool
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
+ bool changed = false;
int lane;
if (intel_dp_is_uhbr(crtc_state)) {
@@ -502,10 +505,17 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
TRAIN_REQ_PREEMPH_ARGS(link_status));
}
- for (lane = 0; lane < 4; lane++)
- intel_dp->train_set[lane] =
- intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
- dp_phy, link_status, lane);
+ for (lane = 0; lane < 4; lane++) {
+ u8 new = intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+ if (intel_dp->train_set[lane] == new)
+ continue;
+
+ intel_dp->train_set[lane] = new;
+ changed = true;
+ }
+
+ return changed;
}
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
@@ -758,6 +768,63 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
}
}
+/*
+ * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
+ * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
+ * 1.2 devices that support it, TPS2 otherwise.
+ */
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ bool source_tps3, sink_tps3, source_tps4, sink_tps4;
+
+ /* UHBR+ use separate 128b/132b TPS2 */
+ if (intel_dp_is_uhbr(crtc_state))
+ return DP_TRAINING_PATTERN_2;
+
+ /*
+ * TPS4 support is mandatory for all downstream devices that
+ * support HBR3. There are no known eDP panels that support
+ * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
+ * LTTPRs must support TPS4.
+ */
+ source_tps4 = intel_dp_source_supports_tps4(display);
+ sink_tps4 = dp_phy != DP_PHY_DPRX ||
+ drm_dp_tps4_supported(intel_dp->dpcd);
+ if (source_tps4 && sink_tps4) {
+ return DP_TRAINING_PATTERN_4;
+ } else if (crtc_state->port_clock == 810000) {
+ if (!source_tps4)
+ lt_dbg(intel_dp, dp_phy,
+ "8.1 Gbps link rate without source TPS4 support\n");
+ if (!sink_tps4)
+ lt_dbg(intel_dp, dp_phy,
+ "8.1 Gbps link rate without sink TPS4 support\n");
+ }
+
+ /*
+ * TPS3 support is mandatory for downstream devices that
+ * support HBR2. However, not all sinks follow the spec.
+ */
+ source_tps3 = intel_dp_source_supports_tps3(display);
+ sink_tps3 = dp_phy != DP_PHY_DPRX ||
+ drm_dp_tps3_supported(intel_dp->dpcd);
+ if (source_tps3 && sink_tps3) {
+ return DP_TRAINING_PATTERN_3;
+ } else if (crtc_state->port_clock >= 540000) {
+ if (!source_tps3)
+ lt_dbg(intel_dp, dp_phy,
+ ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
+ if (!sink_tps3)
+ lt_dbg(intel_dp, dp_phy,
+ ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
+ }
+
+ return DP_TRAINING_PATTERN_2;
+}
+
static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 link_bw, u8 rate_select)
@@ -950,63 +1017,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
}
/*
- * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
- * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
- * 1.2 devices that support it, TPS2 otherwise.
- */
-static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- enum drm_dp_phy dp_phy)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- bool source_tps3, sink_tps3, source_tps4, sink_tps4;
-
- /* UHBR+ use separate 128b/132b TPS2 */
- if (intel_dp_is_uhbr(crtc_state))
- return DP_TRAINING_PATTERN_2;
-
- /*
- * TPS4 support is mandatory for all downstream devices that
- * support HBR3. There are no known eDP panels that support
- * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
- * LTTPRs must support TPS4.
- */
- source_tps4 = intel_dp_source_supports_tps4(display);
- sink_tps4 = dp_phy != DP_PHY_DPRX ||
- drm_dp_tps4_supported(intel_dp->dpcd);
- if (source_tps4 && sink_tps4) {
- return DP_TRAINING_PATTERN_4;
- } else if (crtc_state->port_clock == 810000) {
- if (!source_tps4)
- lt_dbg(intel_dp, dp_phy,
- "8.1 Gbps link rate without source TPS4 support\n");
- if (!sink_tps4)
- lt_dbg(intel_dp, dp_phy,
- "8.1 Gbps link rate without sink TPS4 support\n");
- }
-
- /*
- * TPS3 support is mandatory for downstream devices that
- * support HBR2. However, not all sinks follow the spec.
- */
- source_tps3 = intel_dp_source_supports_tps3(display);
- sink_tps3 = dp_phy != DP_PHY_DPRX ||
- drm_dp_tps3_supported(intel_dp->dpcd);
- if (source_tps3 && sink_tps3) {
- return DP_TRAINING_PATTERN_3;
- } else if (crtc_state->port_clock >= 540000) {
- if (!source_tps3)
- lt_dbg(intel_dp, dp_phy,
- ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
- if (!sink_tps3)
- lt_dbg(intel_dp, dp_phy,
- ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
- }
-
- return DP_TRAINING_PATTERN_2;
-}
-
-/*
* Perform the link training channel equalization phase on the given DP PHY
* using one of training pattern 2, 3 or 4 depending on the source and
* sink capabilities.
@@ -1127,16 +1137,19 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int ret;
intel_dp->link.active = true;
- intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
DP_TRAINING_PATTERN_DISABLE);
- if (intel_dp_is_uhbr(crtc_state) &&
- wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
- lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
+ if (intel_dp_is_uhbr(crtc_state)) {
+ ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state),
+ ret == 0,
+ 500, 500 * 1000, false);
+ if (ret)
+ lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n");
}
intel_hpd_unblock(encoder);
@@ -1371,8 +1384,8 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp,
if (ret)
ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX);
- if (intel_dp->set_idle_link_train)
- intel_dp->set_idle_link_train(intel_dp, crtc_state);
+ intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+ intel_dp->set_idle_link_train(intel_dp, crtc_state);
return ret;
}
@@ -1574,8 +1587,12 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
int lttpr_count)
{
bool passed = false;
+ int ret;
- if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
+ ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state),
+ ret == 0,
+ 500, 500 * 1000, false);
+ if (ret) {
lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
goto out;
}
@@ -1602,6 +1619,8 @@ out:
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
+ intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
+
return passed;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 46614124569f..1ba22ed6db08 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -23,7 +23,7 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp,
int link_bw, int rate_select, int lane_count,
bool enhanced_framing);
-void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+bool intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE]);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 74497c9a0554..4c0b943fe86f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -33,7 +33,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -43,6 +42,7 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
@@ -293,12 +293,22 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
mst_stream_update_slots(crtc_state, mst_state);
}
- if (dsc) {
- if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
- return -EINVAL;
-
- crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
- }
+ /*
+ * NOTE: The following must reset crtc_state->fec_enable for UHBR/DSC
+ * after it was set by intel_dp_dsc_compute_config() ->
+ * intel_dp_needs_8b10b_fec().
+ */
+ crtc_state->fec_enable = intel_dp_needs_8b10b_fec(crtc_state, dsc);
+ /*
+ * If FEC gets enabled only because of another compressed stream, FEC
+ * may not be supported for this uncompressed stream on the whole link
+ * path until the sink DPRX. In this case a downstream branch device
+ * will disable FEC for the uncompressed stream as expected and so the
+ * FEC support doesn't need to be checked for this uncompressed stream.
+ */
+ if (crtc_state->fec_enable && dsc &&
+ !intel_dp_supports_fec(intel_dp, connector, crtc_state))
+ return -EINVAL;
max_dpt_bpp_x16 = fxp_q4_from_int(intel_dp_mst_max_dpt_bpp(crtc_state, dsc));
if (max_dpt_bpp_x16 && max_bpp_x16 > max_dpt_bpp_x16) {
@@ -611,12 +621,15 @@ adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp,
static bool
mst_stream_compute_config_limits(struct intel_dp *intel_dp,
- struct intel_connector *connector,
+ struct drm_connector_state *conn_state,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
{
- if (!intel_dp_compute_config_limits(intel_dp, connector,
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+
+ if (!intel_dp_compute_config_limits(intel_dp, conn_state,
crtc_state, false, dsc,
limits))
return false;
@@ -665,7 +678,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en ||
- !mst_stream_compute_config_limits(intel_dp, connector,
+ !mst_stream_compute_config_limits(intel_dp, conn_state,
pipe_config, false, &limits);
if (!dsc_needed) {
@@ -691,7 +704,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
str_yes_no(intel_dp->force_dsc_en));
- if (!mst_stream_compute_config_limits(intel_dp, connector,
+ if (!mst_stream_compute_config_limits(intel_dp, conn_state,
pipe_config, true,
&limits))
return -EINVAL;
@@ -808,14 +821,14 @@ static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
return mask;
}
-static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
+static int intel_dp_mst_check_dsc_change(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mst_mgr,
struct intel_link_bw_limits *limits)
{
struct intel_display *display = to_intel_display(state);
struct intel_crtc *crtc;
u8 mst_pipe_mask;
- u8 fec_pipe_mask = 0;
+ u8 dsc_pipe_mask = 0;
int ret;
mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
@@ -828,16 +841,16 @@ static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
if (drm_WARN_ON(display->drm, !crtc_state))
return -EINVAL;
- if (crtc_state->fec_enable)
- fec_pipe_mask |= BIT(crtc->pipe);
+ if (intel_dsc_enabled_on_link(crtc_state))
+ dsc_pipe_mask |= BIT(crtc->pipe);
}
- if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
+ if (!dsc_pipe_mask || mst_pipe_mask == dsc_pipe_mask)
return 0;
- limits->force_fec_pipes |= mst_pipe_mask;
+ limits->link_dsc_pipes |= mst_pipe_mask;
- ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
+ ret = intel_modeset_pipes_in_mask_early(state, "MST DSC",
mst_pipe_mask);
return ret ? : -EAGAIN;
@@ -891,7 +904,7 @@ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
int i;
for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
- ret = intel_dp_mst_check_fec_change(state, mgr, limits);
+ ret = intel_dp_mst_check_dsc_change(state, mgr, limits);
if (ret)
return ret;
@@ -1655,6 +1668,7 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
struct intel_connector *connector)
{
u8 dpcd_caps[DP_RECEIVER_CAP_SIZE];
+ struct drm_dp_desc desc;
if (!connector->dp.dsc_decompression_aux)
return;
@@ -1662,7 +1676,13 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd_caps) < 0)
return;
- intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
+ if (drm_dp_read_desc(connector->dp.dsc_decompression_aux, &desc,
+ drm_dp_is_branch(dpcd_caps)) < 0)
+ return;
+
+ intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV],
+ &desc, drm_dp_is_branch(dpcd_caps),
+ connector);
}
static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c
index 6ed5012c5fac..5cfa1dd411da 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_test.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_test.c
@@ -6,7 +6,6 @@
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -753,13 +752,12 @@ static const struct {
void intel_dp_test_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
int i;
for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
debugfs_create_file(intel_display_debugfs_files[i].name,
0644,
- minor->debugfs_root,
+ display->drm->debugfs_root,
display,
intel_display_debugfs_files[i].fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 3f77ad92c156..8027bab2951b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -24,13 +24,13 @@
#include <drm/drm_print.h>
#include "bxt_dpio_phy_regs.h"
-#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
#include "vlv_dpio_phy_regs.h"
@@ -390,7 +390,7 @@ static u32 bxt_get_grc(struct intel_display *display, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct intel_display *display,
enum dpio_phy phy)
{
- if (intel_de_wait_for_set(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10))
+ if (intel_de_wait_for_set_ms(display, BXT_PORT_REF_DW3(phy), GRC_DONE, 10))
drm_err(display->drm, "timeout waiting for PHY%d GRC\n", phy);
}
@@ -427,7 +427,7 @@ static void _bxt_dpio_phy_init(struct intel_display *display, enum dpio_phy phy)
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_de_wait_fw(display, BXT_PORT_CL1CM_DW0(phy),
+ if (intel_de_wait_ms(display, BXT_PORT_CL1CM_DW0(phy),
PHY_RESERVED | PHY_POWER_GOOD, PHY_POWER_GOOD, 1, NULL))
drm_err(display->drm, "timeout during PHY%d power on\n",
phy);
@@ -1173,6 +1173,7 @@ void vlv_wait_port_ready(struct intel_encoder *encoder,
struct intel_display *display = to_intel_display(encoder);
u32 port_mask;
i915_reg_t dpll_reg;
+ u32 val;
switch (encoder->port) {
default:
@@ -1193,10 +1194,9 @@ void vlv_wait_port_ready(struct intel_encoder *encoder,
break;
}
- if (intel_de_wait(display, dpll_reg, port_mask, expected_mask, 1000))
+ if (intel_de_wait_ms(display, dpll_reg, port_mask, expected_mask, 1000, &val))
drm_WARN(display->drm, 1,
"timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
encoder->base.base.id, encoder->base.name,
- intel_de_read(display, dpll_reg) & port_mask,
- expected_mask);
+ val & port_mask, expected_mask);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index f969c5399a51..4f1db8493a2e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -17,6 +17,7 @@
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_lt_phy.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_panel.h"
@@ -1232,6 +1233,28 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
+static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder =
+ intel_get_crtc_new_encoder(state, crtc_state);
+ int ret;
+
+ ret = intel_lt_phy_pll_calc_state(crtc_state, encoder);
+ if (ret)
+ return ret;
+
+ /* TODO: Do the readback via intel_compute_shared_dplls() */
+ crtc_state->port_clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+
+ crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
+
+ return 0;
+}
+
static int ilk_fb_cb_factor(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -1691,6 +1714,10 @@ static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
return 0;
}
+static const struct intel_dpll_global_funcs xe3plpd_dpll_funcs = {
+ .crtc_compute_clock = xe3plpd_crtc_compute_clock,
+};
+
static const struct intel_dpll_global_funcs mtl_dpll_funcs = {
.crtc_compute_clock = mtl_crtc_compute_clock,
};
@@ -1789,7 +1816,9 @@ int intel_dpll_crtc_get_dpll(struct intel_atomic_state *state,
void
intel_dpll_init_clock_hook(struct intel_display *display)
{
- if (DISPLAY_VER(display) >= 14)
+ if (HAS_LT_PHY(display))
+ display->funcs.dpll = &xe3plpd_dpll_funcs;
+ else if (DISPLAY_VER(display) >= 14)
display->funcs.dpll = &mtl_dpll_funcs;
else if (display->platform.dg2)
display->funcs.dpll = &dg2_dpll_funcs;
@@ -1990,7 +2019,7 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
intel_de_posting_read(display, DPLL(display, pipe));
udelay(150);
- if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
drm_err(display->drm, "DPLL %d failed to lock\n", pipe);
}
@@ -2136,7 +2165,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
intel_de_write(display, DPLL(display, pipe), hw_state->dpll);
/* Check PLL is locked */
- if (intel_de_wait_for_set(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
+ if (intel_de_wait_for_set_ms(display, DPLL(display, pipe), DPLL_LOCK_VLV, 1))
drm_err(display->drm, "PLL %d failed to lock\n", pipe);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 33e0398120c8..9c7cf03cf022 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -27,11 +27,11 @@
#include <drm/drm_print.h>
#include "bxt_dpio_phy_regs.h"
-#include "i915_utils.h"
#include "intel_cx0_phy.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dpio_phy.h"
@@ -1395,7 +1395,7 @@ static void skl_ddi_pll_enable(struct intel_display *display,
/* the enable bit is always bit 31 */
intel_de_rmw(display, regs[id].ctl, 0, LCPLL_PLL_ENABLE);
- if (intel_de_wait_for_set(display, DPLL_STATUS, DPLL_LOCK(id), 5))
+ if (intel_de_wait_for_set_ms(display, DPLL_STATUS, DPLL_LOCK(id), 5))
drm_err(display->drm, "DPLL %d not locked\n", id);
}
@@ -2046,6 +2046,7 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
u32 temp;
+ int ret;
bxt_port_to_phy_channel(display, port, &phy, &ch);
@@ -2056,8 +2057,10 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
0, PORT_PLL_POWER_ENABLE);
- if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
+ ret = intel_de_wait_for_set_us(display,
+ BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_STATE, 200);
+ if (ret)
drm_err(display->drm,
"Power state not set for PLL:%d\n", port);
}
@@ -2119,8 +2122,9 @@ static void bxt_ddi_pll_enable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE);
intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
- if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
- 200))
+ ret = intel_de_wait_for_set_us(display, BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_LOCK, 200);
+ if (ret)
drm_err(display->drm, "PLL %d not locked\n", port);
if (display->platform.geminilake) {
@@ -2144,6 +2148,7 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
struct intel_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
+ int ret;
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0);
intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port));
@@ -2152,8 +2157,10 @@ static void bxt_ddi_pll_disable(struct intel_display *display,
intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port),
PORT_PLL_POWER_ENABLE, 0);
- if (wait_for_us(!(intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
+ ret = intel_de_wait_for_clear_us(display,
+ BXT_PORT_PLL_ENABLE(port),
+ PORT_PLL_POWER_STATE, 200);
+ if (ret)
drm_err(display->drm,
"Power state not reset for PLL:%d\n", port);
}
@@ -3913,7 +3920,7 @@ static void icl_pll_power_enable(struct intel_display *display,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_de_wait_for_set(display, enable_reg, PLL_POWER_STATE, 1))
+ if (intel_de_wait_for_set_ms(display, enable_reg, PLL_POWER_STATE, 1))
drm_err(display->drm, "PLL %d Power not enabled\n",
pll->info->id);
}
@@ -3925,7 +3932,7 @@ static void icl_pll_enable(struct intel_display *display,
intel_de_rmw(display, enable_reg, 0, PLL_ENABLE);
/* Timeout is actually 600us. */
- if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 1))
+ if (intel_de_wait_for_set_ms(display, enable_reg, PLL_LOCK, 1))
drm_err(display->drm, "PLL %d not locked\n", pll->info->id);
}
@@ -4038,7 +4045,7 @@ static void icl_pll_disable(struct intel_display *display,
intel_de_rmw(display, enable_reg, PLL_ENABLE, 0);
/* Timeout is actually 1us. */
- if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 1))
+ if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_LOCK, 1))
drm_err(display->drm, "PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@@ -4049,7 +4056,7 @@ static void icl_pll_disable(struct intel_display *display,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_de_wait_for_clear(display, enable_reg, PLL_POWER_STATE, 1))
+ if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_POWER_STATE, 1))
drm_err(display->drm, "PLL %d Power not disabled\n",
pll->info->id);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index f131bdd1c975..6183da90b28d 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -267,6 +267,16 @@ struct intel_cx0pll_state {
bool tbt_mode;
};
+struct intel_lt_phy_pll_state {
+ u32 clock; /* in kHz */
+ u8 addr_msb[13];
+ u8 addr_lsb[13];
+ u8 data[13][4];
+ u8 config[3];
+ bool ssc_enabled;
+ bool tbt_mode;
+};
+
struct intel_dpll_hw_state {
union {
struct i9xx_dpll_hw_state i9xx;
@@ -276,6 +286,7 @@ struct intel_dpll_hw_state {
struct icl_dpll_hw_state icl;
struct intel_mpllb_state mpllb;
struct intel_cx0pll_state cx0pll;
+ struct intel_lt_phy_pll_state ltpll;
};
};
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index aea249e2699f..58d953472218 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_domain.h"
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
@@ -33,8 +35,6 @@ i915_vm_to_dpt(struct i915_address_space *vm)
return container_of(vm, struct i915_dpt, vm);
}
-#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
-
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -322,5 +322,5 @@ void intel_dpt_destroy(struct i915_address_space *vm)
u64 intel_dpt_offset(struct i915_vma *dpt_vma)
{
- return dpt_vma->node.start;
+ return i915_vma_offset(dpt_vma);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 53d8ae3a70e9..4ad4efbf9253 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,10 +4,11 @@
*
*/
+#include <linux/iopoll.h>
+
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_regs.h"
@@ -114,24 +115,6 @@ static bool pre_commit_is_vrr_active(struct intel_atomic_state *state,
return old_crtc_state->vrr.enable && !intel_crtc_vrr_disabling(state, crtc);
}
-static int dsb_vblank_delay(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *crtc_state =
- intel_pre_commit_crtc_state(state, crtc);
-
- if (pre_commit_is_vrr_active(state, crtc))
- /*
- * When the push is sent during vblank it will trigger
- * on the next scanline, hence we have up to one extra
- * scanline until the delayed vblank occurs after
- * TRANS_PUSH has been written.
- */
- return intel_vrr_vblank_delay(crtc_state) + 1;
- else
- return intel_mode_vblank_delay(&crtc_state->hw.adjusted_mode);
-}
-
static int dsb_vtotal(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -722,7 +705,7 @@ void intel_dsb_vblank_evade(struct intel_atomic_state *state,
intel_dsb_emit_wait_dsl(dsb, DSB_OPCODE_WAIT_DSL_OUT, 0, 0);
if (pre_commit_is_vrr_active(state, crtc)) {
- int vblank_delay = intel_vrr_vblank_delay(crtc_state);
+ int vblank_delay = crtc_state->set_context_latency;
end = intel_vrr_vmin_vblank_start(crtc_state);
start = end - vblank_delay - latency;
@@ -814,16 +797,43 @@ void intel_dsb_chain(struct intel_atomic_state *state,
wait_for_vblank ? DSB_WAIT_FOR_VBLANK : 0);
}
-void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
- struct intel_dsb *dsb)
+void intel_dsb_wait_for_delayed_vblank(struct intel_atomic_state *state,
+ struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
const struct intel_crtc_state *crtc_state =
intel_pre_commit_crtc_state(state, crtc);
- int usecs = intel_scanlines_to_usecs(&crtc_state->hw.adjusted_mode,
- dsb_vblank_delay(state, crtc));
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int wait_scanlines;
+
+ if (pre_commit_is_vrr_active(state, crtc)) {
+ /*
+ * If the push happened before the vmin decision boundary
+ * we don't know how far we are from the undelayed vblank.
+ * Wait until we're past the vmin safe window, at which
+ * point we're SCL lines away from the delayed vblank.
+ *
+ * If the push happened after the vmin decision boundary
+ * the hardware itself guarantees that we're SCL lines
+ * away from the delayed vblank, and we won't be inside
+ * the vmin safe window so this extra wait does nothing.
+ */
+ intel_dsb_wait_scanline_out(state, dsb,
+ intel_vrr_safe_window_start(crtc_state),
+ intel_vrr_vmin_safe_window_end(crtc_state));
+ /*
+ * When the push is sent during vblank it will trigger
+ * on the next scanline, hence we have up to one extra
+ * scanline until the delayed vblank occurs after
+ * TRANS_PUSH has been written.
+ */
+ wait_scanlines = crtc_state->set_context_latency + 1;
+ } else {
+ wait_scanlines = intel_mode_vblank_delay(adjusted_mode);
+ }
- intel_dsb_wait_usec(dsb, usecs);
+ intel_dsb_wait_usec(dsb, intel_scanlines_to_usecs(adjusted_mode, wait_scanlines));
}
/**
@@ -871,8 +881,13 @@ void intel_dsb_wait(struct intel_dsb *dsb)
struct intel_crtc *crtc = dsb->crtc;
struct intel_display *display = to_intel_display(crtc->base.dev);
enum pipe pipe = crtc->pipe;
+ bool is_busy;
+ int ret;
- if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) {
+ ret = poll_timeout_us(is_busy = is_dsb_busy(display, pipe, dsb->id),
+ !is_busy,
+ 100, 1000, false);
+ if (ret) {
u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index c8f4499916eb..2f31f2c1d0c5 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -48,8 +48,8 @@ void intel_dsb_nonpost_end(struct intel_dsb *dsb);
void intel_dsb_interrupt(struct intel_dsb *dsb);
void intel_dsb_wait_usec(struct intel_dsb *dsb, int count);
void intel_dsb_wait_vblanks(struct intel_dsb *dsb, int count);
-void intel_dsb_wait_vblank_delay(struct intel_atomic_state *state,
- struct intel_dsb *dsb);
+void intel_dsb_wait_for_delayed_vblank(struct intel_atomic_state *state,
+ struct intel_dsb *dsb);
void intel_dsb_wait_scanline_in(struct intel_atomic_state *state,
struct intel_dsb *dsb,
int lower, int upper);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index e6a851d276f8..4b815ce6b1fe 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -38,10 +38,10 @@
#include <drm/drm_print.h>
#include <video/mipi_display.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_gmbus_regs.h"
@@ -106,8 +106,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u8 type, flags, seq_port;
u16 len;
enum port port;
-
- drm_dbg_kms(display->drm, "\n");
+ ssize_t ret;
+ bool hs_mode;
flags = *data++;
type = *data++;
@@ -129,45 +129,56 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
goto out;
}
- if ((flags >> MIPI_TRANSFER_MODE_SHIFT) & 1)
+ hs_mode = (flags >> MIPI_TRANSFER_MODE_SHIFT) & 1;
+ if (hs_mode)
dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM;
else
dsi_device->mode_flags |= MIPI_DSI_MODE_LPM;
dsi_device->channel = (flags >> MIPI_VIRTUAL_CHANNEL_SHIFT) & 3;
+ drm_dbg_kms(display->drm, "DSI packet: Port %c (seq %u), Flags 0x%02x, VC %u, %s, Type 0x%02x, Length %u, Data %*ph\n",
+ port_name(port), seq_port, flags, dsi_device->channel,
+ hs_mode ? "HS" : "LP", type, len, (int)len, data);
+
switch (type) {
case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
- mipi_dsi_generic_write(dsi_device, NULL, 0);
+ ret = mipi_dsi_generic_write(dsi_device, NULL, 0);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
- mipi_dsi_generic_write(dsi_device, data, 1);
+ ret = mipi_dsi_generic_write(dsi_device, data, 1);
break;
case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
- mipi_dsi_generic_write(dsi_device, data, 2);
+ ret = mipi_dsi_generic_write(dsi_device, data, 2);
break;
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n");
+ ret = -EOPNOTSUPP;
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
- mipi_dsi_generic_write(dsi_device, data, len);
+ ret = mipi_dsi_generic_write(dsi_device, data, len);
break;
case MIPI_DSI_DCS_SHORT_WRITE:
- mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, 1);
break;
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
- mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n");
+ ret = -EOPNOTSUPP;
break;
case MIPI_DSI_DCS_LONG_WRITE:
- mipi_dsi_dcs_write_buffer(dsi_device, data, len);
+ ret = mipi_dsi_dcs_write_buffer(dsi_device, data, len);
+ break;
+ default:
+ ret = -EINVAL;
break;
}
+ if (ret < 0)
+ drm_err(display->drm, "DSI send packet failed with %pe\n", ERR_PTR(ret));
+
if (DISPLAY_VER(display) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
@@ -777,7 +788,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->init_count = mipi_config->master_init_timer;
intel_dsi->bw_timer = mipi_config->dbi_bw_timer;
intel_dsi->video_frmt_cfg_bits =
- mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0;
+ mipi_config->bta_disable ? DISABLE_VIDEO_BTA : 0;
intel_dsi->bgr_enabled = mipi_config->rgb_flip;
/* Starting point, adjusted depending on dual link and burst mode */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h
new file mode 100644
index 000000000000..edc7331dcca2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h
@@ -0,0 +1,197 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_DSI_VBT_DEFS_H__
+#define __INTEL_DSI_VBT_DEFS_H__
+
+#include <linux/types.h>
+
+/*
+ * MIPI Sequence Block definitions
+ *
+ * Note the VBT spec has AssertReset / DeassertReset swapped from their
+ * usual naming, we use the proper names here to avoid confusion when
+ * reading the code.
+ */
+enum mipi_seq {
+ MIPI_SEQ_END = 0,
+ MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */
+ MIPI_SEQ_INIT_OTP,
+ MIPI_SEQ_DISPLAY_ON,
+ MIPI_SEQ_DISPLAY_OFF,
+ MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */
+ MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */
+ MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+ MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */
+ MIPI_SEQ_POWER_ON, /* sequence block v3+ */
+ MIPI_SEQ_POWER_OFF, /* sequence block v3+ */
+ MIPI_SEQ_MAX
+};
+
+enum mipi_seq_element {
+ MIPI_SEQ_ELEM_END = 0,
+ MIPI_SEQ_ELEM_SEND_PKT,
+ MIPI_SEQ_ELEM_DELAY,
+ MIPI_SEQ_ELEM_GPIO,
+ MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */
+ MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */
+ MIPI_SEQ_ELEM_MAX
+};
+
+#define MIPI_DSI_UNDEFINED_PANEL_ID 0
+#define MIPI_DSI_GENERIC_PANEL_ID 1
+
+struct mipi_config {
+ u16 panel_id;
+
+ /* General Params */
+ struct {
+ u32 enable_dithering:1;
+ u32 rsvd1:1;
+ u32 is_bridge:1;
+
+ u32 panel_arch_type:2;
+ u32 is_cmd_mode:1;
+
+#define NON_BURST_SYNC_PULSE 0x1
+#define NON_BURST_SYNC_EVENTS 0x2
+#define BURST_MODE 0x3
+ u32 video_transfer_mode:2;
+
+ u32 cabc_supported:1;
+#define PPS_BLC_PMIC 0
+#define PPS_BLC_SOC 1
+ u32 pwm_blc:1;
+
+#define PIXEL_FORMAT_RGB565 0x1
+#define PIXEL_FORMAT_RGB666 0x2
+#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3
+#define PIXEL_FORMAT_RGB888 0x4
+ u32 videomode_color_format:4;
+
+#define ENABLE_ROTATION_0 0x0
+#define ENABLE_ROTATION_90 0x1
+#define ENABLE_ROTATION_180 0x2
+#define ENABLE_ROTATION_270 0x3
+ u32 rotation:2;
+ u32 bta_disable:1;
+ u32 rsvd2:15;
+ } __packed;
+
+ /* Port Desc */
+ struct {
+#define DUAL_LINK_NOT_SUPPORTED 0
+#define DUAL_LINK_FRONT_BACK 1
+#define DUAL_LINK_PIXEL_ALT 2
+ u16 dual_link:2;
+ u16 lane_cnt:2;
+ u16 pixel_overlap:3;
+ u16 rgb_flip:1;
+#define DL_DCS_PORT_A 0x00
+#define DL_DCS_PORT_C 0x01
+#define DL_DCS_PORT_A_AND_C 0x02
+ u16 dl_dcs_cabc_ports:2;
+ u16 dl_dcs_backlight_ports:2;
+ u16 port_sync:1; /* 219-230 */
+ u16 rsvd3:3;
+ } __packed;
+
+ /* DSI Controller Parameters */
+ struct {
+ u16 dsi_usage:1;
+ u16 rsvd4:15;
+ } __packed;
+
+ u8 rsvd5;
+ u32 target_burst_mode_freq;
+ u32 dsi_ddr_clk;
+ u32 bridge_ref_clk;
+
+ /* LP Byte Clock */
+ struct {
+#define BYTE_CLK_SEL_20MHZ 0
+#define BYTE_CLK_SEL_10MHZ 1
+#define BYTE_CLK_SEL_5MHZ 2
+ u8 byte_clk_sel:2;
+ u8 rsvd6:6;
+ } __packed;
+
+ /* DPhy Flags */
+ struct {
+ u16 dphy_param_valid:1;
+ u16 eot_pkt_disabled:1;
+ u16 enable_clk_stop:1;
+ u16 blanking_packets_during_bllp:1; /* 219+ */
+ u16 lp_clock_during_lpm:1; /* 219+ */
+ u16 rsvd7:11;
+ } __packed;
+
+ u32 hs_tx_timeout;
+ u32 lp_rx_timeout;
+ u32 turn_around_timeout;
+ u32 device_reset_timer;
+ u32 master_init_timer;
+ u32 dbi_bw_timer;
+ u32 lp_byte_clk_val;
+
+ /* DPhy Params */
+ struct {
+ u32 prepare_cnt:6;
+ u32 rsvd8:2;
+ u32 clk_zero_cnt:8;
+ u32 trail_cnt:5;
+ u32 rsvd9:3;
+ u32 exit_zero_cnt:6;
+ u32 rsvd10:2;
+ } __packed;
+
+ u32 clk_lane_switch_cnt;
+ u32 hl_switch_cnt;
+
+ u32 rsvd11[6];
+
+ /* timings based on dphy spec */
+ u8 tclk_miss;
+ u8 tclk_post;
+ u8 rsvd12;
+ u8 tclk_pre;
+ u8 tclk_prepare;
+ u8 tclk_settle;
+ u8 tclk_term_enable;
+ u8 tclk_trail;
+ u16 tclk_prepare_clkzero;
+ u8 rsvd13;
+ u8 td_term_enable;
+ u8 teot;
+ u8 ths_exit;
+ u8 ths_prepare;
+ u16 ths_prepare_hszero;
+ u8 rsvd14;
+ u8 ths_settle;
+ u8 ths_skip;
+ u8 ths_trail;
+ u8 tinit;
+ u8 tlpx;
+ u8 rsvd15[3];
+
+ /* GPIOs */
+ u8 panel_enable;
+ u8 bl_enable;
+ u8 pwm_enable;
+ u8 reset_r_n;
+ u8 pwr_down_r;
+ u8 stdby_r_n;
+} __packed;
+
+/* all delays have a unit of 100us */
+struct mipi_pps_data {
+ u16 panel_on_delay;
+ u16 bl_enable_delay;
+ u16 bl_disable_delay;
+ u16 panel_off_delay;
+ u16 panel_power_cycle_delay;
+} __packed;
+
+#endif /* __INTEL_DSI_VBT_DEFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 08b48e36aca6..c2663d6e2c92 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -34,12 +34,12 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
#include "intel_dvo_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.c b/drivers/gpu/drm/i915/display/intel_encoder.c
index 0b7bd26f4339..2ffe1f251ef8 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.c
+++ b/drivers/gpu/drm/i915/display/intel_encoder.c
@@ -8,6 +8,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_encoder.h"
+#include "intel_hotplug.h"
static void intel_encoder_link_check_work_fn(struct work_struct *work)
{
@@ -37,6 +38,28 @@ void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int dela
&encoder->link_check_work, msecs_to_jiffies(delay_ms));
}
+void intel_encoder_unblock_all_hpds(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ for_each_intel_encoder(display->drm, encoder)
+ intel_hpd_unblock(encoder);
+}
+
+void intel_encoder_block_all_hpds(struct intel_display *display)
+{
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(display))
+ return;
+
+ for_each_intel_encoder(display->drm, encoder)
+ intel_hpd_block(encoder);
+}
+
void intel_encoder_suspend_all(struct intel_display *display)
{
struct intel_encoder *encoder;
@@ -80,3 +103,21 @@ void intel_encoder_shutdown_all(struct intel_display *display)
if (encoder->shutdown_complete)
encoder->shutdown_complete(encoder);
}
+
+struct intel_digital_port *intel_dig_port_alloc(void)
+{
+ struct intel_digital_port *dig_port;
+
+ dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+ if (!dig_port)
+ return NULL;
+
+ dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
+ dig_port->dp.output_reg = INVALID_MMIO_REG;
+ dig_port->aux_ch = AUX_CH_NONE;
+ dig_port->max_lanes = 4;
+
+ mutex_init(&dig_port->hdcp.mutex);
+
+ return dig_port;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_encoder.h b/drivers/gpu/drm/i915/display/intel_encoder.h
index 3fa5589f0b1c..ace0fe1a8f27 100644
--- a/drivers/gpu/drm/i915/display/intel_encoder.h
+++ b/drivers/gpu/drm/i915/display/intel_encoder.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_ENCODER_H__
#define __INTEL_ENCODER_H__
+struct intel_digital_port;
struct intel_display;
struct intel_encoder;
@@ -17,4 +18,9 @@ void intel_encoder_link_check_flush_work(struct intel_encoder *encoder);
void intel_encoder_suspend_all(struct intel_display *display);
void intel_encoder_shutdown_all(struct intel_display *display);
+void intel_encoder_block_all_hpds(struct intel_display *display);
+void intel_encoder_unblock_all_hpds(struct intel_display *display);
+
+struct intel_digital_port *intel_dig_port_alloc(void);
+
#endif /* __INTEL_ENCODER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 0da842bd2f2f..b34b4961fe1c 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -9,16 +9,18 @@
#include <drm/drm_blend.h>
#include <drm/drm_gem.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
-#include "i915_drv.h"
#include "intel_bo.h"
#include "intel_display.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a))
@@ -545,8 +547,6 @@ static bool plane_has_modifier(struct intel_display *display,
u8 plane_caps,
const struct intel_modifier_desc *md)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (!IS_DISPLAY_VER(display, md->display_ver.from, md->display_ver.until))
return false;
@@ -558,15 +558,15 @@ static bool plane_has_modifier(struct intel_display *display,
* where supported.
*/
if (intel_fb_is_ccs_modifier(md->modifier) &&
- HAS_FLAT_CCS(i915) != !md->ccs.packed_aux_planes)
+ HAS_AUX_CCS(display) != !!md->ccs.packed_aux_planes)
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_BMG_CCS &&
- (GRAPHICS_VER(i915) < 20 || !display->platform.dgfx))
+ (DISPLAY_VER(display) < 14 || !display->platform.dgfx))
return false;
if (md->modifier == I915_FORMAT_MOD_4_TILED_LNL_CCS &&
- (GRAPHICS_VER(i915) < 20 || display->platform.dgfx))
+ (DISPLAY_VER(display) < 20 || display->platform.dgfx))
return false;
return true;
@@ -775,7 +775,6 @@ unsigned int
intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
struct intel_display *display = to_intel_display(fb->dev);
- struct drm_i915_private *i915 = to_i915(display->drm);
unsigned int cpp = fb->format->cpp[color_plane];
switch (fb->modifier) {
@@ -812,7 +811,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 64;
fallthrough;
case I915_FORMAT_MOD_Y_TILED:
- if (DISPLAY_VER(display) == 2 || HAS_128_BYTE_Y_TILING(i915))
+ if (HAS_128B_Y_TILING(display))
return 128;
else
return 512;
@@ -1327,7 +1326,7 @@ static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
* unclear in Bspec, for now no checking.
*/
stride = intel_fb_pitch(fb, 0, rotation);
- max_stride = plane->max_stride(plane, fb->base.format->format,
+ max_stride = plane->max_stride(plane, fb->base.format,
fb->base.modifier, rotation);
return stride > max_stride;
@@ -1973,7 +1972,8 @@ void intel_add_fb_offsets(int *x, int *y,
static
u32 intel_fb_max_stride(struct intel_display *display,
- u32 pixel_format, u64 modifier)
+ const struct drm_format_info *info,
+ u64 modifier)
{
/*
* Arbitrary limit for gen4+ chosen to match the
@@ -1983,7 +1983,7 @@ u32 intel_fb_max_stride(struct intel_display *display,
*/
if (DISPLAY_VER(display) < 4 || intel_fb_is_ccs_modifier(modifier) ||
intel_fb_modifier_uses_dpt(display, modifier))
- return intel_plane_fb_max_stride(display->drm, pixel_format, modifier);
+ return intel_plane_fb_max_stride(display, info, modifier);
else if (DISPLAY_VER(display) >= 7)
return 256 * 1024;
else
@@ -1997,8 +1997,8 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
unsigned int tile_width;
if (is_surface_linear(fb, color_plane)) {
- unsigned int max_stride = intel_plane_fb_max_stride(display->drm,
- fb->format->format,
+ unsigned int max_stride = intel_plane_fb_max_stride(display,
+ fb->format,
fb->modifier);
/*
@@ -2056,7 +2056,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
/* FIXME other color planes? */
stride = plane_state->view.color_plane[0].mapping_stride;
- max_stride = plane->max_stride(plane, fb->format->format,
+ max_stride = plane->max_stride(plane, fb->format,
fb->modifier, rotation);
if (stride > max_stride) {
@@ -2111,10 +2111,11 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
if (intel_fb_uses_dpt(fb))
intel_dpt_destroy(intel_fb->dpt_vm);
- intel_frontbuffer_put(intel_fb->frontbuffer);
-
intel_fb_bo_framebuffer_fini(intel_fb_bo(fb));
+ intel_frontbuffer_put(intel_fb->frontbuffer);
+
+ kfree(intel_fb->panic);
kfree(intel_fb);
}
@@ -2194,7 +2195,6 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
return ret;
flush:
- intel_bo_flush_if_display(obj);
intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
return ret;
}
@@ -2213,38 +2213,46 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct intel_display *display = to_intel_display(obj->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- int ret = -EINVAL;
+ int ret;
int i;
- ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd);
- if (ret)
- return ret;
+ intel_fb->panic = intel_panic_alloc();
+ if (!intel_fb->panic)
+ return -ENOMEM;
+ /*
+ * intel_frontbuffer_get() must be done before
+ * intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
+ */
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
if (!intel_fb->frontbuffer) {
ret = -ENOMEM;
- goto err;
+ goto err_free_panic;
}
- ret = -EINVAL;
+ ret = intel_fb_bo_framebuffer_init(obj, mode_cmd);
+ if (ret)
+ goto err_frontbuffer_put;
+
if (!drm_any_plane_has_format(display->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg_kms(display->drm,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
- goto err_frontbuffer_put;
+ ret = -EINVAL;
+ goto err_bo_framebuffer_fini;
}
- max_stride = intel_fb_max_stride(display, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
+ max_stride = intel_fb_max_stride(display, info, mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > max_stride) {
drm_dbg_kms(display->drm,
"%s pitch (%u) must be at most %d\n",
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
mode_cmd->pitches[0], max_stride);
- goto err_frontbuffer_put;
+ ret = -EINVAL;
+ goto err_bo_framebuffer_fini;
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
@@ -2252,7 +2260,8 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(display->drm,
"plane 0 offset (0x%08x) must be 0\n",
mode_cmd->offsets[0]);
- goto err_frontbuffer_put;
+ ret = -EINVAL;
+ goto err_bo_framebuffer_fini;
}
drm_helper_mode_fill_fb_struct(display->drm, fb, info, mode_cmd);
@@ -2262,7 +2271,8 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
drm_dbg_kms(display->drm, "bad plane %d handle\n", i);
- goto err_frontbuffer_put;
+ ret = -EINVAL;
+ goto err_bo_framebuffer_fini;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
@@ -2270,7 +2280,8 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(display->drm,
"plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
- goto err_frontbuffer_put;
+ ret = -EINVAL;
+ goto err_bo_framebuffer_fini;
}
if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) {
@@ -2280,7 +2291,8 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(display->drm,
"ccs aux plane %d pitch (%d) must be %d\n",
i, fb->pitches[i], ccs_aux_stride);
- goto err_frontbuffer_put;
+ ret = -EINVAL;
+ goto err_bo_framebuffer_fini;
}
}
@@ -2289,7 +2301,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = intel_fill_fb_info(display, intel_fb);
if (ret)
- goto err_frontbuffer_put;
+ goto err_bo_framebuffer_fini;
if (intel_fb_uses_dpt(fb)) {
struct i915_address_space *vm;
@@ -2315,10 +2327,13 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
err_free_dpt:
if (intel_fb_uses_dpt(fb))
intel_dpt_destroy(intel_fb->dpt_vm);
+err_bo_framebuffer_fini:
+ intel_fb_bo_framebuffer_fini(obj);
err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
-err:
- intel_fb_bo_framebuffer_fini(obj);
+err_free_panic:
+ kfree(intel_fb->panic);
+
return ret;
}
@@ -2342,6 +2357,17 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
+struct intel_framebuffer *intel_framebuffer_alloc(void)
+{
+ struct intel_framebuffer *intel_fb;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return NULL;
+
+ return intel_fb;
+}
+
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
const struct drm_format_info *info,
@@ -2350,7 +2376,7 @@ intel_framebuffer_create(struct drm_gem_object *obj,
struct intel_framebuffer *intel_fb;
int ret;
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 403b8b63721a..22514d5f2bb6 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -104,6 +104,9 @@ int intel_framebuffer_init(struct intel_framebuffer *ifb,
struct drm_gem_object *obj,
const struct drm_format_info *info,
struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct intel_framebuffer *intel_framebuffer_alloc(void);
+
struct drm_framebuffer *
intel_framebuffer_create(struct drm_gem_object *obj,
const struct drm_format_info *info,
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
index b0e8b89f7ce8..bfecd73d5fa0 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_object.h"
@@ -18,8 +19,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
/* Nothing to do for i915 */
}
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *_obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *_obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
index eefcb05a99f0..d775773c6c03 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h
@@ -14,8 +14,7 @@ struct drm_mode_fb_cmd2;
void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj);
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_gem_object *
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 5a0151775a3a..7249b784fbba 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -7,10 +7,13 @@
* DOC: display pinning helpers
*/
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_domain.h"
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "i915_vma.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
@@ -151,7 +154,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb,
* happy to scanout from anywhere within its global aperture.
*/
pinctl = 0;
- if (HAS_GMCH(dev_priv))
+ if (HAS_GMCH(display))
pinctl |= PIN_MAPPABLE;
i915_gem_ww_ctx_init(&ww, true);
@@ -192,7 +195,7 @@ retry:
* mode that matches the user configuration.
*/
ret = i915_vma_pin_fence(vma);
- if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
+ if (ret != 0 && DISPLAY_VER(display) < 4) {
i915_vma_unpin(vma);
goto err_unpin;
}
@@ -260,6 +263,7 @@ intel_plane_fb_vtd_guard(const struct intel_plane_state *plane_state)
int intel_plane_pin_fb(struct intel_plane_state *plane_state,
const struct intel_plane_state *old_plane_state)
{
+ struct intel_display *display = to_intel_display(plane_state);
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
const struct intel_framebuffer *fb =
to_intel_framebuffer(plane_state->hw.fb);
@@ -277,17 +281,6 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
plane_state->ggtt_vma = vma;
- /*
- * Pre-populate the dma address before we enter the vblank
- * evade critical section as i915_gem_object_get_dma_address()
- * will trigger might_sleep() even if it won't actually sleep,
- * which is the case when the fb has already been pinned.
- */
- if (intel_plane_needs_physical(plane)) {
- struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
-
- plane_state->phys_dma_addr = i915_gem_object_get_dma_address(obj, 0);
- }
} else {
unsigned int alignment = intel_plane_fb_min_alignment(plane_state);
@@ -309,6 +302,28 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
plane_state->dpt_vma = vma;
WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
+
+ /*
+ * The DPT object contains only one vma, and there is no VT-d
+ * guard, so the VMA's offset within the DPT is always 0.
+ */
+ drm_WARN_ON(display->drm, intel_dpt_offset(plane_state->dpt_vma));
+ }
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (intel_plane_needs_physical(plane)) {
+ struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+
+ plane_state->surf = i915_gem_object_get_dma_address(obj, 0) +
+ plane->surf_offset(plane_state);
+ } else {
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma) +
+ plane->surf_offset(plane_state);
}
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 685ac98bd001..437d2fda20a7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -43,23 +43,23 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_stolen.h"
#include "gt/intel_gt_types.h"
#include "i915_drv.h"
-#include "i915_utils.h"
#include "i915_vgpu.h"
#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_device.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_display_wa.h"
#include "intel_fbc.h"
#include "intel_fbc_regs.h"
@@ -98,15 +98,12 @@ struct intel_fbc {
struct intel_display *display;
const struct intel_fbc_funcs *funcs;
- /*
- * This is always the inner lock when overlapping with
- * struct_mutex and it's the outer lock when overlapping
- * with stolen_lock.
- */
+ /* This is always the outer lock when overlapping with stolen_lock */
struct mutex lock;
unsigned int busy_bits;
- struct i915_stolen_fb compressed_fb, compressed_llb;
+ struct intel_stolen_node *compressed_fb;
+ struct intel_stolen_node *compressed_llb;
enum intel_fbc_id id;
@@ -145,15 +142,18 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane
return stride;
}
-static unsigned int intel_fbc_cfb_cpp(void)
+static unsigned int intel_fbc_cfb_cpp(const struct intel_plane_state *plane_state)
{
- return 4; /* FBC always 4 bytes per pixel */
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int cpp = fb->format->cpp[0];
+
+ return max(cpp, 4);
}
/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
static unsigned int intel_fbc_plane_cfb_stride(const struct intel_plane_state *plane_state)
{
- unsigned int cpp = intel_fbc_cfb_cpp();
+ unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
return intel_fbc_plane_stride(plane_state) * cpp;
}
@@ -207,7 +207,7 @@ static unsigned int intel_fbc_cfb_stride(const struct intel_plane_state *plane_s
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
unsigned int stride = intel_fbc_plane_cfb_stride(plane_state);
unsigned int width = drm_rect_width(&plane_state->uapi.src) >> 16;
- unsigned int cpp = intel_fbc_cfb_cpp();
+ unsigned int cpp = intel_fbc_cfb_cpp(plane_state);
return _intel_fbc_cfb_stride(display, cpp, width, stride);
}
@@ -328,8 +328,8 @@ static void i8xx_fbc_deactivate(struct intel_fbc *fbc)
intel_de_write(display, FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_de_wait_for_clear(display, FBC_STATUS,
- FBC_STAT_COMPRESSING, 10)) {
+ if (intel_de_wait_for_clear_ms(display, FBC_STATUS,
+ FBC_STAT_COMPRESSING, 10)) {
drm_dbg_kms(display->drm, "FBC idle timed out\n");
return;
}
@@ -380,20 +380,19 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
drm_WARN_ON(display->drm,
- range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
- i915_gem_stolen_node_offset(&fbc->compressed_fb),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm),
+ i915_gem_stolen_node_offset(fbc->compressed_fb),
U32_MAX));
drm_WARN_ON(display->drm,
- range_overflows_end_t(u64, i915_gem_stolen_area_address(i915),
- i915_gem_stolen_node_offset(&fbc->compressed_llb),
+ range_end_overflows_t(u64, i915_gem_stolen_area_address(display->drm),
+ i915_gem_stolen_node_offset(fbc->compressed_llb),
U32_MAX));
intel_de_write(display, FBC_CFB_BASE,
- i915_gem_stolen_node_address(i915, &fbc->compressed_fb));
+ i915_gem_stolen_node_address(fbc->compressed_fb));
intel_de_write(display, FBC_LL_BASE,
- i915_gem_stolen_node_address(i915, &fbc->compressed_llb));
+ i915_gem_stolen_node_address(fbc->compressed_llb));
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -501,7 +500,7 @@ static void g4x_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
intel_de_write(display, DPFC_CB_BASE,
- i915_gem_stolen_node_offset(&fbc->compressed_fb));
+ i915_gem_stolen_node_offset(fbc->compressed_fb));
}
static const struct intel_fbc_funcs g4x_fbc_funcs = {
@@ -570,7 +569,7 @@ static void ilk_fbc_program_cfb(struct intel_fbc *fbc)
struct intel_display *display = fbc->display;
intel_de_write(display, ILK_DPFC_CB_BASE(fbc->id),
- i915_gem_stolen_node_offset(&fbc->compressed_fb));
+ i915_gem_stolen_node_offset(fbc->compressed_fb));
}
static const struct intel_fbc_funcs ilk_fbc_funcs = {
@@ -801,7 +800,6 @@ static u64 intel_fbc_cfb_base_max(struct intel_display *display)
static u64 intel_fbc_stolen_end(struct intel_display *display)
{
- struct drm_i915_private __maybe_unused *i915 = to_i915(display->drm);
u64 end;
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
@@ -810,7 +808,7 @@ static u64 intel_fbc_stolen_end(struct intel_display *display)
* underruns, even if that range is not reserved by the BIOS. */
if (display->platform.broadwell ||
(DISPLAY_VER(display) == 9 && !display->platform.broxton))
- end = i915_gem_stolen_area_size(i915) - 8 * 1024 * 1024;
+ end = i915_gem_stolen_area_size(display->drm) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -839,20 +837,19 @@ static int find_compression_limit(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
u64 end = intel_fbc_stolen_end(display);
int ret, limit = min_limit;
size /= limit;
/* Try to over-allocate to reduce reallocations and fragmentation. */
- ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size <<= 1, 4096, 0, end);
if (ret == 0)
return limit;
for (; limit <= intel_fbc_max_limit(display); limit <<= 1) {
- ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb,
+ ret = i915_gem_stolen_insert_node_in_range(fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
return limit;
@@ -865,17 +862,15 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
unsigned int size, int min_limit)
{
struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
int ret;
drm_WARN_ON(display->drm,
- i915_gem_stolen_node_allocated(&fbc->compressed_fb));
+ i915_gem_stolen_node_allocated(fbc->compressed_fb));
drm_WARN_ON(display->drm,
- i915_gem_stolen_node_allocated(&fbc->compressed_llb));
+ i915_gem_stolen_node_allocated(fbc->compressed_llb));
if (DISPLAY_VER(display) < 5 && !display->platform.g4x) {
- ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb,
- 4096, 4096);
+ ret = i915_gem_stolen_insert_node(fbc->compressed_llb, 4096, 4096);
if (ret)
goto err;
}
@@ -891,14 +886,14 @@ static int intel_fbc_alloc_cfb(struct intel_fbc *fbc,
drm_dbg_kms(display->drm,
"reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
- i915_gem_stolen_node_size(&fbc->compressed_fb), fbc->limit);
+ i915_gem_stolen_node_size(fbc->compressed_fb), fbc->limit);
return 0;
err_llb:
- if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
+ i915_gem_stolen_remove_node(fbc->compressed_llb);
err:
- if (i915_gem_stolen_initialized(i915))
+ if (i915_gem_stolen_initialized(display->drm))
drm_info_once(display->drm,
"not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
@@ -936,9 +931,12 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
if (IS_DISPLAY_VER(display, 11, 12))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
-
- /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
- if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
+ /*
+ * Wa_22014263786
+ * Fixes: Screen flicker with FBC and Package C state enabled
+ * Workaround: Forced SLB invalidation before start of new frame.
+ */
+ if (intel_display_wa(display, 22014263786))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
@@ -949,16 +947,13 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
{
- struct intel_display *display = fbc->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
-
if (WARN_ON(intel_fbc_hw_is_active(fbc)))
return;
- if (i915_gem_stolen_node_allocated(&fbc->compressed_llb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_llb);
- if (i915_gem_stolen_node_allocated(&fbc->compressed_fb))
- i915_gem_stolen_remove_node(i915, &fbc->compressed_fb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_llb))
+ i915_gem_stolen_remove_node(fbc->compressed_llb);
+ if (i915_gem_stolen_node_allocated(fbc->compressed_fb))
+ i915_gem_stolen_remove_node(fbc->compressed_fb);
}
void intel_fbc_cleanup(struct intel_display *display)
@@ -971,6 +966,9 @@ void intel_fbc_cleanup(struct intel_display *display)
__intel_fbc_cleanup_cfb(fbc);
mutex_unlock(&fbc->lock);
+ i915_gem_stolen_node_free(fbc->compressed_fb);
+ i915_gem_stolen_node_free(fbc->compressed_llb);
+
kfree(fbc);
}
}
@@ -1087,11 +1085,57 @@ static bool lnl_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_
}
}
+static bool
+xe3p_lpd_fbc_fp16_format_is_valid(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool xe3p_lpd_fbc_pixel_format_is_valid(const struct intel_plane_state *plane_state)
+{
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+
+ if (lnl_fbc_pixel_format_is_valid(plane_state))
+ return true;
+
+ if (xe3p_lpd_fbc_fp16_format_is_valid(plane_state))
+ return true;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB16161616:
+ case DRM_FORMAT_XBGR16161616:
+ case DRM_FORMAT_ARGB16161616:
+ case DRM_FORMAT_ABGR16161616:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool
+intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state)
+{
+ struct intel_display *display = to_intel_display(plane_state);
+
+ return DISPLAY_VER(display) >= 35 &&
+ xe3p_lpd_fbc_fp16_format_is_valid(plane_state);
+}
+
static bool pixel_format_is_valid(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state->uapi.plane->dev);
- if (DISPLAY_VER(display) >= 20)
+ if (DISPLAY_VER(display) >= 35)
+ return xe3p_lpd_fbc_pixel_format_is_valid(plane_state);
+ else if (DISPLAY_VER(display) >= 20)
return lnl_fbc_pixel_format_is_valid(plane_state);
else if (DISPLAY_VER(display) >= 5 || display->platform.g4x)
return g4x_fbc_pixel_format_is_valid(plane_state);
@@ -1359,7 +1403,7 @@ static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
return intel_fbc_min_limit(plane_state) <= fbc->limit &&
intel_fbc_cfb_size(plane_state) <= fbc->limit *
- i915_gem_stolen_node_size(&fbc->compressed_fb);
+ i915_gem_stolen_node_size(fbc->compressed_fb);
}
static bool intel_fbc_is_ok(const struct intel_plane_state *plane_state)
@@ -1425,6 +1469,18 @@ intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
}
}
+static int _intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /* WaFbcExceedCdClockThreshold:hsw,bdw */
+ if (display->platform.haswell || display->platform.broadwell)
+ return DIV_ROUND_UP(crtc_state->pixel_rate * 100, 95);
+
+ /* no FBC specific limits to worry about */
+ return 0;
+}
+
static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane *plane)
{
@@ -1440,7 +1496,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
if (!fbc)
return 0;
- if (!i915_gem_stolen_initialized(i915)) {
+ if (!i915_gem_stolen_initialized(display->drm)) {
plane_state->no_fbc_reason = "stolen memory not initialised";
return 0;
}
@@ -1460,13 +1516,14 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_needs_wa_16023588340(display)) {
+ if (intel_display_wa(display, 16023588340)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
/* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
- if (i915_vtd_active(i915) && (display->platform.skylake || display->platform.broxton)) {
+ if (intel_display_vtd_active(display) &&
+ (display->platform.skylake || display->platform.broxton)) {
plane_state->no_fbc_reason = "VT-d enabled";
return 0;
}
@@ -1550,32 +1607,23 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (DISPLAY_VER(display) >= 9 &&
+ if (IS_DISPLAY_VER(display, 9, 12) &&
plane_state->view.color_plane[0].y & 3) {
plane_state->no_fbc_reason = "plane start Y offset misaligned";
return 0;
}
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
- if (DISPLAY_VER(display) >= 11 &&
+ if (IS_DISPLAY_VER(display, 9, 12) &&
(plane_state->view.color_plane[0].y +
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
return 0;
}
- /* WaFbcExceedCdClockThreshold:hsw,bdw */
- if (display->platform.haswell || display->platform.broadwell) {
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) {
- plane_state->no_fbc_reason = "pixel rate too high";
- return 0;
- }
+ if (_intel_fbc_min_cdclk(crtc_state) > display->cdclk.max_cdclk_freq) {
+ plane_state->no_fbc_reason = "pixel rate too high";
+ return 0;
}
plane_state->no_fbc_reason = NULL;
@@ -1583,6 +1631,27 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
+int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ int min_cdclk;
+
+ if (!plane->fbc)
+ return 0;
+
+ min_cdclk = _intel_fbc_min_cdclk(crtc_state);
+
+ /*
+ * Do not ask for more than the max CDCLK frequency,
+ * if that is not enough FBC will simply not be used.
+ */
+ if (min_cdclk > display->cdclk.max_cdclk_freq)
+ return 0;
+
+ return min_cdclk;
+}
static bool intel_fbc_can_flip_nuke(struct intel_atomic_state *state,
struct intel_crtc *crtc,
@@ -2087,6 +2156,13 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
if (!fbc)
return NULL;
+ fbc->compressed_fb = i915_gem_stolen_node_alloc(display->drm);
+ if (!fbc->compressed_fb)
+ goto err;
+ fbc->compressed_llb = i915_gem_stolen_node_alloc(display->drm);
+ if (!fbc->compressed_llb)
+ goto err;
+
fbc->id = fbc_id;
fbc->display = display;
INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
@@ -2106,6 +2182,13 @@ static struct intel_fbc *intel_fbc_create(struct intel_display *display,
fbc->funcs = &i8xx_fbc_funcs;
return fbc;
+
+err:
+ i915_gem_stolen_node_free(fbc->compressed_llb);
+ i915_gem_stolen_node_free(fbc->compressed_fb);
+ kfree(fbc);
+
+ return NULL;
}
/**
@@ -2240,10 +2323,9 @@ void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
/* FIXME: remove this once igt is on board with per-crtc stuff */
void intel_fbc_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
struct intel_fbc *fbc;
fbc = display->fbc[INTEL_FBC_A];
if (fbc)
- intel_fbc_debugfs_add(fbc, minor->debugfs_root);
+ intel_fbc_debugfs_add(fbc, display->drm->debugfs_root);
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 0e715cb6b4e6..91424563206a 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -28,6 +28,7 @@ enum intel_fbc_id {
};
int intel_fbc_atomic_check(struct intel_atomic_state *state);
+int intel_fbc_min_cdclk(const struct intel_crtc_state *crtc_state);
bool intel_fbc_pre_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_post_update(struct intel_atomic_state *state,
@@ -52,5 +53,7 @@ void intel_fbc_prepare_dirty_rect(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_dirty_rect_update_noarm(struct intel_dsb *dsb,
struct intel_plane *plane);
+bool
+intel_fbc_is_enable_pixel_normalizer(const struct intel_plane_state *plane_state);
#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 7c4709d58aa3..9cd03e2adeb2 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -146,8 +146,6 @@ static void intel_fbdev_fb_destroy(struct fb_info *info)
drm_framebuffer_remove(fb_helper->fb);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
__diag_push();
@@ -207,14 +205,70 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_set_suspend = intelfb_set_suspend,
};
+static void intel_fbdev_fill_mode_cmd(struct drm_fb_helper_surface_size *sizes,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd->flags = DRM_MODE_FB_MODIFIERS;
+ mode_cmd->width = sizes->surface_width;
+ mode_cmd->height = sizes->surface_height;
+
+ mode_cmd->pitches[0] = intel_fbdev_fb_pitch_align(mode_cmd->width * DIV_ROUND_UP(sizes->surface_bpp, 8));
+ mode_cmd->pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ mode_cmd->modifier[0] = DRM_FORMAT_MOD_LINEAR;
+}
+
+static struct intel_framebuffer *
+__intel_fbdev_fb_alloc(struct intel_display *display,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *obj;
+ int size;
+
+ intel_fbdev_fill_mode_cmd(sizes, &mode_cmd);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = PAGE_ALIGN(size);
+
+ obj = intel_fbdev_fb_bo_create(display->drm, size);
+ if (IS_ERR(obj)) {
+ fb = ERR_CAST(obj);
+ goto err;
+ }
+
+ fb = intel_framebuffer_create(obj,
+ drm_get_format_info(display->drm,
+ mode_cmd.pixel_format,
+ mode_cmd.modifier[0]),
+ &mode_cmd);
+ if (IS_ERR(fb)) {
+ intel_fbdev_fb_bo_destroy(obj);
+ goto err;
+ }
+
+ drm_gem_object_put(obj);
+
+ return to_intel_framebuffer(fb);
+
+err:
+ return ERR_CAST(fb);
+
+}
+
int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_display *display = to_intel_display(helper->dev);
struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
struct intel_framebuffer *fb = ifbdev->fb;
+ struct fb_info *info = helper->info;
struct ref_tracker *wakeref;
- struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
@@ -237,7 +291,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (!fb || drm_WARN_ON(display->drm, !intel_fb_bo(&fb->base))) {
drm_dbg_kms(display->drm,
"no BIOS fb, allocating a new one\n");
- fb = intel_fbdev_fb_alloc(helper, sizes);
+
+ fb = __intel_fbdev_fb_alloc(display, sizes);
if (IS_ERR(fb))
return PTR_ERR(fb);
} else {
@@ -263,13 +318,6 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto out_unlock;
}
- info = drm_fb_helper_alloc_info(helper);
- if (IS_ERR(info)) {
- drm_err(display->drm, "Failed to allocate fb_info (%pe)\n", info);
- ret = PTR_ERR(info);
- goto out_unpin;
- }
-
helper->funcs = &intel_fb_helper_funcs;
helper->fb = &fb->base;
@@ -277,7 +325,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
obj = intel_fb_bo(&fb->base);
- ret = intel_fbdev_fb_fill_info(display, info, obj, vma);
+ ret = intel_fbdev_fb_fill_info(display->drm, info, obj, vma);
if (ret)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index 210aee9ae88b..c3202ba141c5 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -3,40 +3,24 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/drm_fb_helper.h>
+#include <linux/fb.h>
+
+#include <drm/drm_print.h>
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
-#include "intel_display_core.h"
-#include "intel_display_types.h"
-#include "intel_fb.h"
#include "intel_fbdev_fb.h"
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+u32 intel_fbdev_fb_pitch_align(u32 stride)
{
- struct intel_display *display = to_intel_display(helper->dev);
- struct drm_i915_private *dev_priv = to_i915(display->drm);
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
+ return ALIGN(stride, 64);
+}
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
+{
+ struct drm_i915_private *dev_priv = to_i915(drm);
+ struct drm_i915_gem_object *obj;
obj = ERR_PTR(-ENODEV);
if (HAS_LMEM(dev_priv)) {
@@ -51,31 +35,29 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
*
* Also skip stolen on MTL as Wa_22018444074 mitigation.
*/
- if (!display->platform.meteorlake && size * 2 < dev_priv->dsm.usable_size)
+ if (!IS_METEORLAKE(dev_priv) && size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
}
if (IS_ERR(obj)) {
- drm_err(display->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ drm_err(drm, "failed to allocate framebuffer (%pe)\n", obj);
return ERR_PTR(-ENOMEM);
}
- fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj),
- drm_get_format_info(display->drm,
- mode_cmd.pixel_format,
- mode_cmd.modifier[0]),
- &mode_cmd);
- i915_gem_object_put(obj);
+ return &obj->base;
+}
- return to_intel_framebuffer(fb);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj)
+{
+ drm_gem_object_put(obj);
}
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
- struct drm_i915_private *i915 = to_i915(display->drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
struct i915_gem_ww_ctx ww;
void __iomem *vaddr;
@@ -107,7 +89,7 @@ int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info
vaddr = i915_vma_pin_iomap(vma);
if (IS_ERR(vaddr)) {
- drm_err(display->drm,
+ drm_err(drm,
"Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
ret = PTR_ERR(vaddr);
continue;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index cb7957272715..fd0b3775dc1f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -6,16 +6,18 @@
#ifndef __INTEL_FBDEV_FB_H__
#define __INTEL_FBDEV_FB_H__
-struct drm_fb_helper;
-struct drm_fb_helper_surface_size;
+#include <linux/types.h>
+
+struct drm_device;
struct drm_gem_object;
+struct drm_mode_fb_cmd2;
struct fb_info;
struct i915_vma;
-struct intel_display;
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes);
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+u32 intel_fbdev_fb_pitch_align(u32 stride);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj);
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 8039a84671cc..5bb0090dd5ed 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -9,13 +9,13 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@@ -292,34 +292,6 @@ int intel_fdi_link_freq(struct intel_display *display,
return display->fdi.pll_freq;
}
-/**
- * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp
- * @crtc_state: the crtc state
- *
- * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
- * call this function during state computation in the simple case where the
- * link bpp will always match the pipe bpp. This is the case for all non-DP
- * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
- * of DSC compression.
- *
- * Returns %true in case of success, %false if pipe bpp would need to be
- * reduced below its valid range.
- */
-bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
-{
- int pipe_bpp = min(crtc_state->pipe_bpp,
- fxp_q4_to_int(crtc_state->max_link_bpp_x16));
-
- pipe_bpp = rounddown(pipe_bpp, 2 * 3);
-
- if (pipe_bpp < 6 * 3)
- return false;
-
- crtc_state->pipe_bpp = pipe_bpp;
-
- return true;
-}
-
int ilk_fdi_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index ad5e103c38a8..1cd08df9b0c2 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -20,7 +20,6 @@ struct intel_link_bw_limits;
int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state);
int intel_fdi_link_freq(struct intel_display *display,
const struct intel_crtc_state *pipe_config);
-bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state);
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config);
int intel_fdi_atomic_check_link(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
index 6ab2272ab2df..1e9550cb66a3 100644
--- a/drivers/gpu/drm/i915/display/intel_flipq.c
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -7,16 +7,16 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
-#include "intel_step.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
-#include "intel_flipq.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_dsb.h"
+#include "intel_flipq.h"
+#include "intel_step.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
@@ -163,10 +163,10 @@ static void intel_flipq_preempt(struct intel_crtc *crtc, bool preempt)
PIPEDMC_FQ_CTRL_PREEMPT, preempt ? PIPEDMC_FQ_CTRL_PREEMPT : 0);
if (preempt &&
- intel_de_wait_for_clear(display,
- PIPEDMC_FQ_STATUS(crtc->pipe),
- PIPEDMC_FQ_STATUS_BUSY,
- intel_flipq_preempt_timeout_ms(display)))
+ intel_de_wait_for_clear_ms(display,
+ PIPEDMC_FQ_STATUS(crtc->pipe),
+ PIPEDMC_FQ_STATUS_BUSY,
+ intel_flipq_preempt_timeout_ms(display)))
drm_err(display->drm, "[CRTC:%d:%s] flip queue preempt timeout\n",
crtc->base.base.id, crtc->base.name);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 43be5377ddc1..03c4978fa5ec 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -56,9 +56,8 @@
*/
#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
-#include "i915_active.h"
-#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
@@ -103,51 +102,6 @@ static void frontbuffer_flush(struct intel_display *display,
}
/**
- * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @display: display device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after scheduling a flip on @obj. The actual
- * frontbuffer flushing will be delayed until completion is signalled with
- * intel_frontbuffer_flip_complete. If an invalidate happens in between this
- * flush will be cancelled.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_prepare(struct intel_display *display,
- unsigned frontbuffer_bits)
-{
- spin_lock(&display->fb_tracking.lock);
- display->fb_tracking.flip_bits |= frontbuffer_bits;
- /* Remove stale busy bits due to the old buffer. */
- display->fb_tracking.busy_bits &= ~frontbuffer_bits;
- spin_unlock(&display->fb_tracking.lock);
-}
-
-/**
- * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @display: display device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called after the flip has been latched and will complete
- * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
- *
- * Can be called without any locks held.
- */
-void intel_frontbuffer_flip_complete(struct intel_display *display,
- unsigned frontbuffer_bits)
-{
- spin_lock(&display->fb_tracking.lock);
- /* Mask any cancelled flips. */
- frontbuffer_bits &= display->fb_tracking.flip_bits;
- display->fb_tracking.flip_bits &= ~frontbuffer_bits;
- spin_unlock(&display->fb_tracking.lock);
-
- if (frontbuffer_bits)
- frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
-}
-
-/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
* @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
@@ -173,12 +127,11 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
- struct intel_display *display = to_intel_display(front->obj->dev);
+ struct intel_display *display = front->display;
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
display->fb_tracking.busy_bits |= frontbuffer_bits;
- display->fb_tracking.flip_bits &= ~frontbuffer_bits;
spin_unlock(&display->fb_tracking.lock);
}
@@ -194,7 +147,10 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
- struct intel_display *display = to_intel_display(front->obj->dev);
+ struct intel_display *display = front->display;
+
+ if (origin == ORIGIN_DIRTYFB)
+ intel_bo_frontbuffer_flush_for_display(front);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -208,12 +164,16 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
frontbuffer_flush(display, frontbuffer_bits, origin);
}
+static void intel_frontbuffer_ref(struct intel_frontbuffer *front)
+{
+ intel_bo_frontbuffer_ref(front);
+}
+
static void intel_frontbuffer_flush_work(struct work_struct *work)
{
struct intel_frontbuffer *front =
container_of(work, struct intel_frontbuffer, flush_work);
- intel_bo_flush_if_display(front->obj);
intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
intel_frontbuffer_put(front);
}
@@ -230,85 +190,31 @@ void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front)
if (!front)
return;
- kref_get(&front->ref);
+ intel_frontbuffer_ref(front);
if (!schedule_work(&front->flush_work))
intel_frontbuffer_put(front);
}
-static int frontbuffer_active(struct i915_active *ref)
+void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm)
{
- struct intel_frontbuffer *front =
- container_of(ref, typeof(*front), write);
-
- kref_get(&front->ref);
- return 0;
+ front->display = to_intel_display(drm);
+ atomic_set(&front->bits, 0);
+ INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
}
-static void frontbuffer_retire(struct i915_active *ref)
+void intel_frontbuffer_fini(struct intel_frontbuffer *front)
{
- struct intel_frontbuffer *front =
- container_of(ref, typeof(*front), write);
-
- intel_frontbuffer_flush(front, ORIGIN_CS);
- intel_frontbuffer_put(front);
+ drm_WARN_ON(front->display->drm, atomic_read(&front->bits));
}
-static void frontbuffer_release(struct kref *ref)
- __releases(&to_intel_display(front->obj->dev)->fb_tracking.lock)
+struct intel_frontbuffer *intel_frontbuffer_get(struct drm_gem_object *obj)
{
- struct intel_frontbuffer *ret, *front =
- container_of(ref, typeof(*front), ref);
- struct drm_gem_object *obj = front->obj;
- struct intel_display *display = to_intel_display(obj->dev);
-
- drm_WARN_ON(display->drm, atomic_read(&front->bits));
-
- i915_ggtt_clear_scanout(to_intel_bo(obj));
-
- ret = intel_bo_set_frontbuffer(obj, NULL);
- drm_WARN_ON(display->drm, ret);
- spin_unlock(&display->fb_tracking.lock);
-
- i915_active_fini(&front->write);
- kfree_rcu(front, rcu);
-}
-
-struct intel_frontbuffer *
-intel_frontbuffer_get(struct drm_gem_object *obj)
-{
- struct intel_display *display = to_intel_display(obj->dev);
- struct intel_frontbuffer *front, *cur;
-
- front = intel_bo_get_frontbuffer(obj);
- if (front)
- return front;
-
- front = kmalloc(sizeof(*front), GFP_KERNEL);
- if (!front)
- return NULL;
-
- front->obj = obj;
- kref_init(&front->ref);
- atomic_set(&front->bits, 0);
- i915_active_init(&front->write,
- frontbuffer_active,
- frontbuffer_retire,
- I915_ACTIVE_RETIRE_SLEEPS);
- INIT_WORK(&front->flush_work, intel_frontbuffer_flush_work);
-
- spin_lock(&display->fb_tracking.lock);
- cur = intel_bo_set_frontbuffer(obj, front);
- spin_unlock(&display->fb_tracking.lock);
- if (cur != front)
- kfree(front);
- return cur;
+ return intel_bo_frontbuffer_get(obj);
}
void intel_frontbuffer_put(struct intel_frontbuffer *front)
{
- kref_put_lock(&front->ref,
- frontbuffer_release,
- &to_intel_display(front->obj->dev)->fb_tracking.lock);
+ intel_bo_frontbuffer_put(front);
}
/**
@@ -337,17 +243,13 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE);
if (old) {
- struct intel_display *display = to_intel_display(old->obj->dev);
-
- drm_WARN_ON(display->drm,
+ drm_WARN_ON(old->display->drm,
!(atomic_read(&old->bits) & frontbuffer_bits));
atomic_andnot(frontbuffer_bits, &old->bits);
}
if (new) {
- struct intel_display *display = to_intel_display(new->obj->dev);
-
- drm_WARN_ON(display->drm,
+ drm_WARN_ON(new->display->drm,
atomic_read(&new->bits) & frontbuffer_bits);
atomic_or(frontbuffer_bits, &new->bits);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 2fee12eaf9b6..22677acb4c06 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -26,10 +26,9 @@
#include <linux/atomic.h>
#include <linux/bits.h>
-#include <linux/kref.h>
-
-#include "i915_active_types.h"
+#include <linux/workqueue_types.h>
+struct drm_device;
struct drm_gem_object;
struct intel_display;
@@ -42,12 +41,8 @@ enum fb_op_origin {
};
struct intel_frontbuffer {
- struct kref ref;
+ struct intel_display *display;
atomic_t bits;
- struct i915_active write;
- struct drm_gem_object *obj;
- struct rcu_head rcu;
-
struct work_struct flush_work;
};
@@ -68,10 +63,6 @@ struct intel_frontbuffer {
GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
-void intel_frontbuffer_flip_prepare(struct intel_display *display,
- unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct intel_display *display,
- unsigned frontbuffer_bits);
void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits);
@@ -144,4 +135,7 @@ void intel_frontbuffer_track(struct intel_frontbuffer *old,
struct intel_frontbuffer *new,
unsigned int frontbuffer_bits);
+void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm);
+void intel_frontbuffer_fini(struct intel_frontbuffer *front);
+
#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
index 000a898c9480..30eff6009e87 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.c
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -13,6 +13,36 @@
#include "intel_display_types.h"
#include "intel_global_state.h"
+#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+struct intel_global_objs_state {
+ struct intel_global_obj *ptr;
+ struct intel_global_state *state, *old_state, *new_state;
+};
+
struct intel_global_commit {
struct kref ref;
struct completion done;
@@ -148,7 +178,7 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
int index, num_objs, i;
size_t size;
- struct __intel_global_objs_state *arr;
+ struct intel_global_objs_state *arr;
struct intel_global_state *obj_state;
for (i = 0; i < state->num_global_objs; i++)
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
index d42fb2547ee9..e1efa530cc86 100644
--- a/drivers/gpu/drm/i915/display/intel_global_state.h
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -11,6 +11,7 @@
struct intel_atomic_state;
struct intel_display;
+struct intel_global_commit;
struct intel_global_obj;
struct intel_global_state;
@@ -26,36 +27,6 @@ struct intel_global_obj {
const struct intel_global_state_funcs *funcs;
};
-#define intel_for_each_global_obj(obj, dev_priv) \
- list_for_each_entry(obj, &(dev_priv)->display.global.obj_list, head)
-
-#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->num_global_objs && \
- ((obj) = (__state)->global_objs[__i].ptr, \
- (old_obj_state) = (__state)->global_objs[__i].old_state, \
- (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
- (__i)++) \
- for_each_if(obj)
-
-struct intel_global_commit;
-
struct intel_global_state {
struct intel_global_obj *obj;
struct intel_atomic_state *state;
@@ -64,11 +35,6 @@ struct intel_global_state {
bool changed, serialized;
};
-struct __intel_global_objs_state {
- struct intel_global_obj *ptr;
- struct intel_global_state *state, *old_state, *new_state;
-};
-
void intel_atomic_global_obj_init(struct intel_display *display,
struct intel_global_obj *obj,
struct intel_global_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 0d73f32fe7f1..795012d7c24c 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -30,7 +30,9 @@
#include <linux/export.h>
#include <linux/i2c-algo-bit.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <drm/drm_print.h>
#include <drm/display/drm_hdcp_helper.h>
#include "i915_drv.h"
@@ -39,6 +41,7 @@
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_gmbus.h"
#include "intel_gmbus_regs.h"
@@ -217,7 +220,7 @@ static void pnv_gmbus_clock_gating(struct intel_display *display,
bool enable)
{
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, DSPCLK_GATE_D,
PNV_GMBUSUNIT_CLOCK_GATE_DISABLE,
!enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0);
}
@@ -240,14 +243,20 @@ static void bxt_gmbus_clock_gating(struct intel_display *display,
static u32 get_reserved(struct intel_gmbus *bus)
{
struct intel_display *display = bus->display;
- u32 reserved = 0;
+ u32 preserve_bits = 0;
+
+ if (display->platform.i830 || display->platform.i845g)
+ return 0;
/* On most chips, these bits must be preserved in software. */
- if (!display->platform.i830 && !display->platform.i845g)
- reserved = intel_de_read_notrace(display, bus->gpio_reg) &
- (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
+ preserve_bits |= GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE;
+
+ /* Wa_16025573575: the masks bits need to be preserved through out */
+ if (intel_display_wa(display, 16025573575))
+ preserve_bits |= GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK |
+ GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK;
- return reserved;
+ return intel_de_read_notrace(display, bus->gpio_reg) & preserve_bits;
}
static int get_clock(void *data)
@@ -308,6 +317,22 @@ static void set_data(void *data, int state_high)
intel_de_posting_read(display, bus->gpio_reg);
}
+static void
+ptl_handle_mask_bits(struct intel_gmbus *bus, bool set)
+{
+ struct intel_display *display = bus->display;
+ u32 reg_val = intel_de_read_notrace(display, bus->gpio_reg);
+ u32 mask_bits = GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK |
+ GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK;
+ if (set)
+ reg_val |= mask_bits;
+ else
+ reg_val &= ~mask_bits;
+
+ intel_de_write_notrace(display, bus->gpio_reg, reg_val);
+ intel_de_posting_read(display, bus->gpio_reg);
+}
+
static int
intel_gpio_pre_xfer(struct i2c_adapter *adapter)
{
@@ -319,6 +344,9 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
+ if (intel_display_wa(display, 16025573575))
+ ptl_handle_mask_bits(bus, true);
+
set_data(bus, 1);
set_clock(bus, 1);
udelay(I2C_RISEFALL_TIME);
@@ -336,6 +364,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
+
+ if (intel_display_wa(display, 16025573575))
+ ptl_handle_mask_bits(bus, false);
}
static void
@@ -385,11 +416,14 @@ static int gmbus_wait(struct intel_display *display, u32 status, u32 irq_en)
intel_de_write_fw(display, GMBUS4(display), irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status,
- 2);
+
+ ret = poll_timeout_us_atomic(gmbus2 = intel_de_read_fw(display, GMBUS2(display)),
+ gmbus2 & status,
+ 0, 2, false);
if (ret)
- ret = wait_for((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status,
- 50);
+ ret = poll_timeout_us(gmbus2 = intel_de_read_fw(display, GMBUS2(display)),
+ gmbus2 & status,
+ 500, 50 * 1000, false);
intel_de_write_fw(display, GMBUS4(display), 0);
remove_wait_queue(&display->gmbus.wait_queue, &wait);
@@ -415,7 +449,7 @@ gmbus_wait_idle(struct intel_display *display)
add_wait_queue(&display->gmbus.wait_queue, &wait);
intel_de_write_fw(display, GMBUS4(display), irq_enable);
- ret = intel_de_wait_fw(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL);
+ ret = intel_de_wait_fw_ms(display, GMBUS2(display), GMBUS_ACTIVE, 0, 10, NULL);
intel_de_write_fw(display, GMBUS4(display), 0);
remove_wait_queue(&display->gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 42202c8bb066..5e1a96223a9c 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -11,6 +11,7 @@
#include <linux/component.h>
#include <linux/debugfs.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/random.h>
#include <drm/display/drm_hdcp_helper.h>
@@ -18,9 +19,9 @@
#include <drm/intel/i915_component.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
@@ -326,16 +327,13 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
bool ksv_ready;
/* Poll for ksv list ready (spec says max time allowed is 5s) */
- ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
- &ksv_ready),
- read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
- 100 * 1000);
+ ret = poll_timeout_us(read_ret = shim->read_ksv_ready(dig_port, &ksv_ready),
+ read_ret || ksv_ready,
+ 100 * 1000, 5 * 1000 * 1000, false);
if (ret)
return ret;
if (read_ret)
return read_ret;
- if (!ksv_ready)
- return -ETIMEDOUT;
return 0;
}
@@ -412,9 +410,8 @@ static int intel_hdcp_load_keys(struct intel_display *display)
}
/* Wait for the keys to load (500us) */
- ret = intel_de_wait_custom(display, HDCP_KEY_STATUS,
- HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
- 10, 1, &val);
+ ret = intel_de_wait_ms(display, HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE,
+ HDCP_KEY_LOAD_DONE, 1, &val);
if (ret)
return ret;
else if (!(val & HDCP_KEY_LOAD_STATUS))
@@ -430,7 +427,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
static int intel_write_sha_text(struct intel_display *display, u32 sha_text)
{
intel_de_write(display, HDCP_SHA_TEXT, sha_text);
- if (intel_de_wait_for_set(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
+ if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
drm_err(display->drm, "Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
@@ -709,8 +706,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Tell the HW we're done with the hash and wait for it to ACK */
intel_de_write(display, HDCP_REP_CTL,
rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_de_wait_for_set(display, HDCP_REP_CTL,
- HDCP_SHA1_COMPLETE, 1)) {
+ if (intel_de_wait_for_set_ms(display, HDCP_REP_CTL,
+ HDCP_SHA1_COMPLETE, 1)) {
drm_err(display->drm, "Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
@@ -817,6 +814,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
enum port port = dig_port->base.port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
+ u32 val;
union {
u32 reg[2];
u8 shim[DRM_HDCP_AN_LEN];
@@ -857,9 +855,9 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_de_wait_for_set(display,
- HDCP_STATUS(display, cpu_transcoder, port),
- HDCP_STATUS_AN_READY, 1)) {
+ if (intel_de_wait_for_set_ms(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
+ HDCP_STATUS_AN_READY, 1)) {
drm_err(display->drm, "Timed out waiting for An\n");
return -ETIMEDOUT;
}
@@ -905,8 +903,10 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
+ ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)),
+ val & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC),
+ 100, 1000, false);
+ if (ret) {
drm_err(display->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -938,24 +938,24 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+ ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)),
+ val & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC),
+ 100, 1000, false);
+ if (!ret)
break;
}
if (i == tries) {
drm_dbg_kms(display->drm,
- "Timed out waiting for Ri prime match (%x)\n",
- intel_de_read(display,
- HDCP_STATUS(display, cpu_transcoder, port)));
+ "Timed out waiting for Ri prime match (%x)\n", val);
return -ETIMEDOUT;
}
/* Wait for encryption confirmation */
- if (intel_de_wait_for_set(display,
- HDCP_STATUS(display, cpu_transcoder, port),
- HDCP_STATUS_ENC,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_for_set_ms(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
+ HDCP_STATUS_ENC,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(display->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -1012,9 +1012,9 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
hdcp->hdcp_encrypted = false;
intel_de_write(display, HDCP_CONF(display, cpu_transcoder, port), 0);
- if (intel_de_wait_for_clear(display,
- HDCP_STATUS(display, cpu_transcoder, port),
- ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
+ if (intel_de_wait_for_clear_ms(display,
+ HDCP_STATUS(display, cpu_transcoder, port),
+ ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
drm_err(display->drm,
"Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -1939,11 +1939,10 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
0, CTL_LINK_ENCRYPTION_REQ);
- ret = intel_de_wait_for_set(display,
- HDCP2_STATUS(display, cpu_transcoder,
- port),
- LINK_ENCRYPTION_STATUS,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ ret = intel_de_wait_for_set_ms(display,
+ HDCP2_STATUS(display, cpu_transcoder, port),
+ LINK_ENCRYPTION_STATUS,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
dig_port->hdcp.auth_status = true;
return ret;
@@ -1965,11 +1964,10 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
intel_de_rmw(display, HDCP2_CTL(display, cpu_transcoder, port),
CTL_LINK_ENCRYPTION_REQ, 0);
- ret = intel_de_wait_for_clear(display,
- HDCP2_STATUS(display, cpu_transcoder,
- port),
- LINK_ENCRYPTION_STATUS,
- HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ ret = intel_de_wait_for_clear_ms(display,
+ HDCP2_STATUS(display, cpu_transcoder, port),
+ LINK_ENCRYPTION_STATUS,
+ HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
drm_dbg_kms(display->drm, "Disable Encryption Timedout");
@@ -2446,12 +2444,6 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
if (!hdcp->shim)
return -ENOENT;
- if (!connector->encoder) {
- drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n",
- connector->base.base.id, connector->base.name);
- return -ENODEV;
- }
-
mutex_lock(&hdcp->mutex);
mutex_lock(&dig_port->hdcp.mutex);
drm_WARN_ON(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
index 6a22862d6be1..3e7b480ee9f1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc.c
@@ -3,13 +3,13 @@
* Copyright 2023, Intel Corporation.
*/
+#include <drm/drm_print.h>
#include <drm/intel/i915_hdcp_interface.h>
#include "gem/i915_gem_region.h"
#include "gt/intel_gt.h"
#include "gt/uc/intel_gsc_uc_heci_cmd_submit.h"
#include "i915_drv.h"
-#include "i915_utils.h"
#include "intel_hdcp_gsc.h"
struct intel_hdcp_gsc_context {
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 9961ff259298..908faf17f93d 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
+#include <linux/iopoll.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
@@ -44,7 +45,6 @@
#include <media/cec-notifier.h>
#include "g4x_hdmi.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
@@ -54,18 +54,34 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_hdmi.h"
+#include "intel_link_bw.h"
#include "intel_lspcon.h"
#include "intel_panel.h"
#include "intel_pfit.h"
#include "intel_snps_phy.h"
#include "intel_vrr.h"
+bool intel_hdmi_is_frl(u32 clock)
+{
+ switch (clock) {
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 800000: /* 8 Gbps */
+ case 1000000: /* 10 Gbps */
+ case 1200000: /* 12 Gbps */
+ return true;
+ default:
+ return false;
+ }
+}
+
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
@@ -1582,9 +1598,9 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for((intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
+ ret = intel_de_wait_for_set_ms(display, HDCP_STATUS(display, cpu_transcoder, port),
+ HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC, 1);
+ if (ret) {
drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
intel_de_read(display, HDCP_STATUS(display, cpu_transcoder,
port)));
@@ -1689,11 +1705,10 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
if (timeout < 0)
return timeout;
- ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port,
- msg_id, &msg_ready,
- &msg_sz),
- !ret && msg_ready && msg_sz, timeout * 1000,
- 1000, 5 * 1000);
+ ret = poll_timeout_us(ret = hdcp2_detect_msg_availability(dig_port, msg_id,
+ &msg_ready, &msg_sz),
+ !ret && msg_ready && msg_sz,
+ 4000, timeout * 1000, false);
if (ret)
drm_dbg_kms(display->drm,
"msg_id: %d, ret: %d, timeout: %d\n",
@@ -2053,6 +2068,10 @@ intel_hdmi_mode_valid(struct drm_connector *_connector,
else
sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ status = intel_pfit_mode_valid(display, mode, sink_format, 0);
+ if (status != MODE_OK)
+ return status;
+
status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink, sink_format);
if (status != MODE_OK) {
if (ycbcr_420_only ||
@@ -2341,6 +2360,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
pipe_config->pixel_multiplier = 2;
+ if (!intel_link_bw_compute_pipe_bpp(pipe_config))
+ return -EINVAL;
+
pipe_config->has_audio =
intel_hdmi_has_audio(encoder, pipe_config, conn_state) &&
intel_audio_compute_config(encoder, pipe_config, conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index dec2ad7dd8a2..be2fad57e4ad 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -60,6 +60,7 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
int src_max_slices, int src_max_slice_width,
int hdmi_max_slices, int hdmi_throughput);
int intel_hdmi_dsc_get_slice_height(int vactive);
+bool intel_hdmi_is_frl(u32 clock);
void hsw_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 265aa97fcc75..235706229ffb 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -24,15 +24,17 @@
#include <linux/debugfs.h>
#include <linux/kernel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_connector.h"
-#include "intel_display_power.h"
#include "intel_display_core.h"
+#include "intel_display_power.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
@@ -971,8 +973,6 @@ void intel_hpd_cancel_work(struct intel_display *display)
spin_lock_irq(&display->irq.lock);
- drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display));
-
display->hotplug.long_hpd_pin_mask = 0;
display->hotplug.short_hpd_pin_mask = 0;
display->hotplug.event_bits = 0;
@@ -1333,12 +1333,12 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = {
void intel_hpd_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_hpd_storm_ctl", 0644, debugfs_root,
display, &i915_hpd_storm_ctl_fops);
- debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_hpd_short_storm_ctl", 0644, debugfs_root,
display, &i915_hpd_short_storm_ctl_fops);
- debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root,
+ debugfs_create_bool("i915_ignore_long_hpd", 0644, debugfs_root,
&display->hotplug.ignore_long_hpd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 43aee70597bf..46c47b3d6f42 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -6,11 +6,11 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp_aux.h"
#include "intel_gmbus.h"
#include "intel_hotplug.h"
@@ -420,6 +420,9 @@ u32 i9xx_hpd_irq_ack(struct intel_display *display)
u32 hotplug_status = 0, hotplug_status_mask;
int i;
+ if (!HAS_HOTPLUG(display))
+ return 0;
+
if (display->platform.g4x ||
display->platform.valleyview || display->platform.cherryview)
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
@@ -1025,7 +1028,7 @@ static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- intel_de_rmw(display, SHOTPLUG_CTL_DDI,
+ intel_de_rmw(display, SHOTPLUG_CTL_TC,
mtp_tc_hotplug_mask(encoder->hpd_pin),
mtp_tc_hotplug_enables(encoder));
}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index 3caef7f9c7c4..d2862de894fa 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -20,6 +20,7 @@
#include "intel_dp_tunnel.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
+#include "intel_vdsc.h"
static int get_forced_link_bpp_x16(struct intel_atomic_state *state,
const struct intel_crtc *crtc)
@@ -55,7 +56,7 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
enum pipe pipe;
- limits->force_fec_pipes = 0;
+ limits->link_dsc_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(display, pipe) {
struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
@@ -65,8 +66,8 @@ void intel_link_bw_init_limits(struct intel_atomic_state *state,
if (state->base.duplicated && crtc_state) {
limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
- if (crtc_state->fec_enable)
- limits->force_fec_pipes |= BIT(pipe);
+ if (intel_dsc_enabled_on_link(crtc_state))
+ limits->link_dsc_pipes |= BIT(pipe);
} else {
limits->max_bpp_x16[pipe] = INT_MAX;
}
@@ -165,6 +166,34 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
}
/**
+ * intel_link_bw_compute_pipe_bpp - compute pipe bpp limited by max link bpp
+ * @crtc_state: the crtc state
+ *
+ * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can
+ * call this function during state computation in the simple case where the
+ * link bpp will always match the pipe bpp. This is the case for all non-DP
+ * encoders, while DP encoders will use a link bpp lower than pipe bpp in case
+ * of DSC compression.
+ *
+ * Returns %true in case of success, %false if pipe bpp would need to be
+ * reduced below its valid range.
+ */
+bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state)
+{
+ int pipe_bpp = min(crtc_state->pipe_bpp,
+ fxp_q4_to_int(crtc_state->max_link_bpp_x16));
+
+ pipe_bpp = rounddown(pipe_bpp, 2 * 3);
+
+ if (pipe_bpp < 6 * 3)
+ return false;
+
+ crtc_state->pipe_bpp = pipe_bpp;
+
+ return true;
+}
+
+/**
* intel_link_bw_set_bpp_limit_for_pipe - set link bpp limit for a pipe to its minimum
* @state: atomic state
* @old_limits: link BW limits
@@ -237,10 +266,10 @@ assert_link_limit_change_valid(struct intel_display *display,
bool bpps_changed = false;
enum pipe pipe;
- /* FEC can't be forced off after it was forced on. */
+ /* DSC can't be disabled after it was enabled. */
if (drm_WARN_ON(display->drm,
- (old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
- old_limits->force_fec_pipes))
+ (old_limits->link_dsc_pipes & new_limits->link_dsc_pipes) !=
+ old_limits->link_dsc_pipes))
return false;
for_each_pipe(display, pipe) {
@@ -258,8 +287,8 @@ assert_link_limit_change_valid(struct intel_display *display,
/* At least one limit must change. */
if (drm_WARN_ON(display->drm,
!bpps_changed &&
- new_limits->force_fec_pipes ==
- old_limits->force_fec_pipes))
+ new_limits->link_dsc_pipes ==
+ old_limits->link_dsc_pipes))
return false;
return true;
@@ -449,6 +478,7 @@ void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
switch (connector->base.connector_type) {
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_HDMIA:
break;
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_SVIDEO:
@@ -458,11 +488,6 @@ void intel_link_bw_connector_debugfs_add(struct intel_connector *connector)
break;
return;
- case DRM_MODE_CONNECTOR_HDMIA:
- if (HAS_FDI(display) && !HAS_DDI(display))
- break;
-
- return;
default:
return;
}
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index b499042e62b1..cb18e171037c 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -15,7 +15,7 @@ struct intel_connector;
struct intel_crtc_state;
struct intel_link_bw_limits {
- u8 force_fec_pipes;
+ u8 link_dsc_pipes;
u8 bpp_limit_reached_pipes;
/* in 1/16 bpp units */
int max_bpp_x16[I915_MAX_PIPES];
@@ -27,6 +27,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
struct intel_link_bw_limits *limits,
u8 pipe_mask,
const char *reason);
+bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state);
bool intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state,
const struct intel_link_bw_limits *old_limits,
struct intel_link_bw_limits *new_limits,
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 666148a14522..42284e9928f2 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -68,9 +68,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_print.h>
#include <drm/intel/intel_lpe_audio.h>
-#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_audio_regs.h"
#include "intel_de.h"
@@ -170,14 +170,11 @@ static struct irq_chip lpe_audio_irqchip = {
static int lpe_audio_irq_init(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
int irq = display->audio.lpe.irq;
- drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv));
- irq_set_chip_and_handler_name(irq,
- &lpe_audio_irqchip,
- handle_simple_irq,
- "hdmi_lpe_audio_irq_handler");
+ irq_set_chip_and_handler_name(irq, &lpe_audio_irqchip,
+ handle_simple_irq,
+ "hdmi_lpe_audio_irq_handler");
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index abc4b562083d..9ceabbc981a1 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -23,16 +23,18 @@
*
*/
+#include <linux/iopoll.h>
+
#include <drm/display/drm_dp_dual_mode_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_hdmi.h"
#include "intel_lspcon.h"
@@ -181,6 +183,8 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
struct intel_display *display = to_intel_display(intel_dp);
enum drm_lspcon_mode current_mode;
+ int timeout_us;
+ int ret;
current_mode = lspcon_get_current_mode(lspcon);
if (current_mode == mode)
@@ -189,9 +193,12 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
drm_dbg_kms(display->drm, "Waiting for LSPCON mode %s to settle\n",
lspcon_mode_name(mode));
- wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode,
- lspcon_get_mode_settle_timeout(lspcon));
- if (current_mode != mode)
+ timeout_us = lspcon_get_mode_settle_timeout(lspcon) * 1000;
+
+ ret = poll_timeout_us(current_mode = lspcon_get_current_mode(lspcon),
+ current_mode == mode,
+ 5000, timeout_us, false);
+ if (ret)
drm_err(display->drm, "LSPCON mode hasn't settled\n");
out:
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.c b/drivers/gpu/drm/i915/display/intel_lt_phy.c
new file mode 100644
index 000000000000..a67eb4f7f897
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.c
@@ -0,0 +1,2327 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+
+#include "i915_reg.h"
+#include "intel_cx0_phy.h"
+#include "intel_cx0_phy_regs.h"
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
+#include "intel_de.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_display_utils.h"
+#include "intel_dpll_mgr.h"
+#include "intel_hdmi.h"
+#include "intel_lt_phy.h"
+#include "intel_lt_phy_regs.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
+#include "intel_tc.h"
+
+#define for_each_lt_phy_lane_in_mask(__lane_mask, __lane) \
+ for ((__lane) = 0; (__lane) < 2; (__lane)++) \
+ for_each_if((__lane_mask) & BIT(__lane))
+
+#define INTEL_LT_PHY_LANE0 BIT(0)
+#define INTEL_LT_PHY_LANE1 BIT(1)
+#define INTEL_LT_PHY_BOTH_LANES (INTEL_LT_PHY_LANE1 |\
+ INTEL_LT_PHY_LANE0)
+#define MODE_DP 3
+#define Q32_TO_INT(x) ((x) >> 32)
+#define Q32_TO_FRAC(x) ((x) & 0xFFFFFFFF)
+#define DCO_MIN_FREQ_MHZ 11850
+#define REF_CLK_KHZ 38400
+#define TDC_RES_MULTIPLIER 10000000ULL
+
+struct phy_param_t {
+ u32 val;
+ u32 addr;
+};
+
+struct lt_phy_params {
+ struct phy_param_t pll_reg4;
+ struct phy_param_t pll_reg3;
+ struct phy_param_t pll_reg5;
+ struct phy_param_t pll_reg57;
+ struct phy_param_t lf;
+ struct phy_param_t tdc;
+ struct phy_param_t ssc;
+ struct phy_param_t bias2;
+ struct phy_param_t bias_trim;
+ struct phy_param_t dco_med;
+ struct phy_param_t dco_fine;
+ struct phy_param_t ssc_inj;
+ struct phy_param_t surv_bonus;
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
+ .clock = 162000,
+ .config = {
+ 0x83,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x5, 0xa, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x4, 0x4, 0x82, 0x28 },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
+ .clock = 270000,
+ .config = {
+ 0x8b,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0x5, 0x4, 0x81, 0xad },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
+ .clock = 540000,
+ .config = {
+ 0x93,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4d, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0xa, 0x4, 0x81, 0xda },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
+ .clock = 810000,
+ .config = {
+ 0x9b,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4a, 0x34, 0xa0 },
+ { 0xe0, 0x0, 0x0, 0x0 },
+ { 0x5, 0x4, 0x80, 0xa8 },
+ { 0xfa, 0x11, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x7, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x43, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0d },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
+ .clock = 1000000,
+ .config = {
+ 0x43,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0xa, 0x20, 0x80 },
+ { 0x6a, 0xaa, 0xaa, 0xab },
+ { 0x0, 0x3, 0x4, 0x94 },
+ { 0xfa, 0x1c, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x4, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x45, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x5b, 0xe0, 0x8 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
+ .clock = 1350000,
+ .config = {
+ 0xcb,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x9, 0x2b, 0xe0 },
+ { 0x90, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x80, 0xe0 },
+ { 0xfa, 0x15, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x49, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x57, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
+ .clock = 2000000,
+ .config = {
+ 0x53,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x85,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ 0x86,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0xa, 0x20, 0x80 },
+ { 0x6a, 0xaa, 0xaa, 0xab },
+ { 0x0, 0x3, 0x4, 0x94 },
+ { 0xfa, 0x1c, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x4, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x45, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x5b, 0xe0, 0x8 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = {
+ &xe3plpd_lt_dp_rbr,
+ &xe3plpd_lt_dp_hbr1,
+ &xe3plpd_lt_dp_hbr2,
+ &xe3plpd_lt_dp_hbr3,
+ &xe3plpd_lt_dp_uhbr10,
+ &xe3plpd_lt_dp_uhbr13_5,
+ &xe3plpd_lt_dp_uhbr20,
+ NULL,
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
+ .clock = 216000,
+ .config = {
+ 0xa3,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
+ .clock = 243000,
+ .config = {
+ 0xab,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x3, 0xca, 0x2f, 0x60 },
+ { 0xb0, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x13, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x47, 0x48, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
+ .clock = 324000,
+ .config = {
+ 0xb3,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x8a, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x81, 0x28 },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
+ .clock = 432000,
+ .config = {
+ 0xbb,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4d, 0x2a, 0x20 },
+ { 0x80, 0x0, 0x0, 0x0 },
+ { 0xc, 0x4, 0x81, 0xbc },
+ { 0xfa, 0x16, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x5, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x4b, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x5b, 0xe0, 0x0a },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
+ .clock = 675000,
+ .config = {
+ 0xdb,
+ 0x2d,
+ 0x1,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x1, 0x4a, 0x2b, 0xe0 },
+ { 0x90, 0x0, 0x0, 0x0 },
+ { 0x6, 0x4, 0x80, 0xa8 },
+ { 0xfa, 0x15, 0x83, 0x11 },
+ { 0x80, 0x0f, 0xf9, 0x53 },
+ { 0x84, 0x26, 0x6, 0x4 },
+ { 0x0, 0xe0, 0x1, 0x0 },
+ { 0x49, 0x48, 0x0, 0x0 },
+ { 0x27, 0x8, 0x0, 0x0 },
+ { 0x5a, 0x13, 0x29, 0x13 },
+ { 0x0, 0x57, 0xe0, 0x0c },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = {
+ &xe3plpd_lt_dp_rbr,
+ &xe3plpd_lt_edp_2_16,
+ &xe3plpd_lt_edp_2_43,
+ &xe3plpd_lt_dp_hbr1,
+ &xe3plpd_lt_edp_3_24,
+ &xe3plpd_lt_edp_4_32,
+ &xe3plpd_lt_dp_hbr2,
+ &xe3plpd_lt_edp_6_75,
+ &xe3plpd_lt_dp_hbr3,
+ NULL,
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
+ .clock = 25200,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0c, 0x15, 0x27, 0x60 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x98, 0x28 },
+ { 0x42, 0x0, 0x84, 0x10 },
+ { 0x80, 0x0f, 0xd9, 0xb5 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = {
+ .clock = 27200,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0b, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x96, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
+ .clock = 74250,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x4, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x88, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
+ .clock = 148500,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x2, 0x15, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x84, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
+ .clock = 594000,
+ .config = {
+ 0x84,
+ 0x2d,
+ 0x0,
+ },
+ .addr_msb = {
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x87,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ 0x88,
+ },
+ .addr_lsb = {
+ 0x10,
+ 0x0c,
+ 0x14,
+ 0xe4,
+ 0x0c,
+ 0x10,
+ 0x14,
+ 0x18,
+ 0x48,
+ 0x40,
+ 0x4c,
+ 0x24,
+ 0x44,
+ },
+ .data = {
+ { 0x0, 0x4c, 0x2, 0x0 },
+ { 0x0, 0x95, 0x26, 0xa0 },
+ { 0x60, 0x0, 0x0, 0x0 },
+ { 0x8, 0x4, 0x81, 0x28 },
+ { 0xfa, 0x0c, 0x84, 0x11 },
+ { 0x80, 0x0f, 0xd9, 0x53 },
+ { 0x86, 0x0, 0x0, 0x0 },
+ { 0x1, 0xa0, 0x1, 0x0 },
+ { 0x4b, 0x0, 0x0, 0x0 },
+ { 0x28, 0x0, 0x0, 0x0 },
+ { 0x0, 0x14, 0x2a, 0x14 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ { 0x0, 0x0, 0x0, 0x0 },
+ },
+};
+
+static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = {
+ &xe3plpd_lt_hdmi_252,
+ &xe3plpd_lt_hdmi_272,
+ &xe3plpd_lt_hdmi_742p5,
+ &xe3plpd_lt_hdmi_1p485,
+ &xe3plpd_lt_hdmi_5p94,
+ NULL,
+};
+
+static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (!intel_tc_port_in_dp_alt_mode(dig_port))
+ return INTEL_LT_PHY_BOTH_LANES;
+
+ return intel_tc_port_max_lane_count(dig_port) > 2
+ ? INTEL_LT_PHY_BOTH_LANES : INTEL_LT_PHY_LANE0;
+}
+
+static u8 intel_lt_phy_read(struct intel_encoder *encoder, u8 lane_mask, u16 addr)
+{
+ return intel_cx0_read(encoder, lane_mask, addr);
+}
+
+static void intel_lt_phy_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data, bool committed)
+{
+ intel_cx0_write(encoder, lane_mask, addr, data, committed);
+}
+
+static void intel_lt_phy_rmw(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 clear, u8 set, bool committed)
+{
+ intel_cx0_rmw(encoder, lane_mask, addr, clear, set, committed);
+}
+
+static void intel_lt_phy_clear_status_p2p(struct intel_encoder *encoder,
+ int lane)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ intel_de_rmw(display,
+ XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(encoder->port, lane),
+ XELPDP_PORT_P2M_RESPONSE_READY, 0);
+}
+
+static void
+assert_dc_off(struct intel_display *display)
+{
+ bool enabled;
+
+ enabled = intel_display_power_is_enabled(display, POWER_DOMAIN_DC_OFF);
+ drm_WARN_ON(display->drm, !enabled);
+}
+
+static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder,
+ int lane, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+ int ack;
+ u32 val;
+
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_P2P_TRANSACTION_PENDING,
+ XELPDP_MSGBUS_TIMEOUT_MS)) {
+ drm_dbg_kms(display->drm,
+ "PHY %c Timeout waiting for previous transaction to complete. Resetting bus.\n",
+ phy_name(phy));
+ intel_cx0_bus_reset(encoder, lane);
+ return -ETIMEDOUT;
+ }
+
+ intel_de_rmw(display, XELPDP_PORT_P2M_MSGBUS_STATUS(display, port, lane), 0, 0);
+
+ intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
+ XELPDP_PORT_P2P_TRANSACTION_PENDING |
+ XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED |
+ XELPDP_PORT_M2P_DATA(data) |
+ XELPDP_PORT_M2P_ADDRESS(addr));
+
+ ack = intel_cx0_wait_for_ack(encoder, XELPDP_PORT_P2M_COMMAND_WRITE_ACK, lane, &val);
+ if (ack < 0)
+ return ack;
+
+ if (val & XELPDP_PORT_P2M_ERROR_SET) {
+ drm_dbg_kms(display->drm,
+ "PHY %c Error occurred during P2P write command. Status: 0x%x\n",
+ phy_name(phy), val);
+ intel_lt_phy_clear_status_p2p(encoder, lane);
+ intel_cx0_bus_reset(encoder, lane);
+ return -EINVAL;
+ }
+
+ /*
+ * RE-VISIT:
+ * This needs to be added to give PHY time to set everything up this was a requirement
+ * to get the display up and running
+ * This is the time PHY takes to settle down after programming the PHY.
+ */
+ udelay(150);
+ intel_clear_response_ready_flag(encoder, lane);
+ intel_lt_phy_clear_status_p2p(encoder, lane);
+
+ return 0;
+}
+
+static void __intel_lt_phy_p2p_write(struct intel_encoder *encoder,
+ int lane, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ int i, status;
+
+ assert_dc_off(display);
+
+ /* 3 tries is assumed to be enough to write successfully */
+ for (i = 0; i < 3; i++) {
+ status = __intel_lt_phy_p2p_write_once(encoder, lane, addr, data, mac_reg_addr,
+ expected_mac_val);
+
+ if (status == 0)
+ return;
+ }
+
+ drm_err_once(display->drm,
+ "PHY %c P2P Write %04x failed after %d retries.\n", phy_name(phy), addr, i);
+}
+
+static void intel_lt_phy_p2p_write(struct intel_encoder *encoder,
+ u8 lane_mask, u16 addr, u8 data,
+ i915_reg_t mac_reg_addr,
+ u8 expected_mac_val)
+{
+ int lane;
+
+ for_each_lt_phy_lane_in_mask(lane_mask, lane)
+ __intel_lt_phy_p2p_write(encoder, lane, addr, data, mac_reg_addr, expected_mac_val);
+}
+
+static void
+intel_lt_phy_setup_powerdown(struct intel_encoder *encoder, u8 lane_count)
+{
+ /*
+ * The new PORT_BUF_CTL6 stuff for dc5 entry and exit needs to be handled
+ * by dmc firmware not explicitly mentioned in Bspec. This leaves this
+ * function as a wrapper only but keeping it expecting future changes.
+ */
+ intel_cx0_setup_powerdown(encoder);
+}
+
+static void
+intel_lt_phy_powerdown_change_sequence(struct intel_encoder *encoder,
+ u8 lane_mask, u8 state)
+{
+ intel_cx0_powerdown_change_sequence(encoder, lane_mask, state);
+}
+
+static void
+intel_lt_phy_lane_reset(struct intel_encoder *encoder,
+ u8 lane_count)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum port port = encoder->port;
+ enum phy phy = intel_encoder_to_phy(encoder);
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1)
+ : XELPDP_LANE_PIPE_RESET(0);
+ u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1))
+ : XELPDP_LANE_PHY_CURRENT_STATUS(0);
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RATE_MASK, XE3PLPD_MACCLK_RATE_DEF);
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, port),
+ XE3PLPDP_PHY_MODE_MASK, XE3PLPDP_PHY_MODE_DP);
+
+ intel_lt_phy_setup_powerdown(encoder, lane_count);
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P2_STATE_RESET);
+
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RESET_0, 0);
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0));
+
+ if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNON_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c PLL MacCLK assertion ack not done\n",
+ phy_name(phy));
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_FORWARD_CLOCK_UNGATE,
+ XELPDP_FORWARD_CLOCK_UNGATE);
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_pipe_reset | lane_phy_pulse_status, 0);
+
+ if (intel_de_wait_for_clear_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XE3PLPD_RESET_END_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c failed to bring out of lane reset\n",
+ phy_name(phy));
+
+ if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ XE3PLPD_RATE_CALIB_DONE_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c PLL rate not changed\n",
+ phy_name(phy));
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_phy_pulse_status, 0);
+}
+
+static void
+intel_lt_phy_program_port_clock_ctl(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool lane_reversal)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = 0;
+
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL1(display, encoder->port),
+ XELPDP_PORT_REVERSAL,
+ lane_reversal ? XELPDP_PORT_REVERSAL : 0);
+
+ val |= XELPDP_FORWARD_CLOCK_UNGATE;
+
+ /*
+ * We actually mean MACCLK here and not MAXPCLK when using LT Phy
+ * but since the register bits still remain the same we use
+ * the same definition
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
+ intel_hdmi_is_frl(crtc_state->port_clock))
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
+ else
+ val |= XELPDP_DDI_CLOCK_SELECT_PREP(display, XELPDP_DDI_CLOCK_SELECT_MAXPCLK);
+
+ /* DP2.0 10G and 20G rates enable MPLLA*/
+ if (crtc_state->port_clock == 1000000 || crtc_state->port_clock == 2000000)
+ val |= XELPDP_SSC_ENABLE_PLLA;
+ else
+ val |= crtc_state->dpll_hw_state.ltpll.ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
+
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, encoder->port),
+ XELPDP_LANE1_PHY_CLOCK_SELECT | XELPDP_FORWARD_CLOCK_UNGATE |
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_SSC_ENABLE_PLLA |
+ XELPDP_SSC_ENABLE_PLLB, val);
+}
+
+static u32 intel_lt_phy_get_dp_clock(u8 rate)
+{
+ switch (rate) {
+ case 0:
+ return 162000;
+ case 1:
+ return 270000;
+ case 2:
+ return 540000;
+ case 3:
+ return 810000;
+ case 4:
+ return 216000;
+ case 5:
+ return 243000;
+ case 6:
+ return 324000;
+ case 7:
+ return 432000;
+ case 8:
+ return 1000000;
+ case 9:
+ return 1350000;
+ case 10:
+ return 2000000;
+ case 11:
+ return 675000;
+ default:
+ MISSING_CASE(rate);
+ return 0;
+ }
+}
+
+static bool
+intel_lt_phy_config_changed(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 val, rate;
+ u32 clock;
+
+ val = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_0_CONFIG);
+ rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK, val);
+
+ /*
+ * The only time we do not reconfigure the PLL is when we are
+ * using 1.62 Gbps clock since PHY PLL defaults to that
+ * otherwise we always need to reconfigure it.
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ clock = intel_lt_phy_get_dp_clock(rate);
+ if (crtc_state->port_clock == 1620000 && crtc_state->port_clock == clock)
+ return false;
+ }
+
+ return true;
+}
+
+static intel_wakeref_t intel_lt_phy_transaction_begin(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_wakeref_t wakeref;
+
+ intel_psr_pause(intel_dp);
+ wakeref = intel_display_power_get(display, POWER_DOMAIN_DC_OFF);
+
+ return wakeref;
+}
+
+static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, intel_wakeref_t wakeref)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ intel_psr_resume(intel_dp);
+ intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
+}
+
+static const struct intel_lt_phy_pll_state * const *
+intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return xe3plpd_lt_edp_tables;
+
+ return xe3plpd_lt_dp_tables;
+ } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return xe3plpd_lt_hdmi_tables;
+ }
+
+ MISSING_CASE(encoder->type);
+ return NULL;
+}
+
+static bool
+intel_lt_phy_pll_is_ssc_enabled(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_panel_use_ssc(display)) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ return (intel_dp->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5);
+ }
+ }
+
+ return false;
+}
+
+static u64 mul_q32_u32(u64 a_q32, u32 b)
+{
+ u64 p0, p1, carry, result;
+ u64 x_hi = a_q32 >> 32;
+ u64 x_lo = a_q32 & 0xFFFFFFFFULL;
+
+ p0 = x_lo * (u64)b;
+ p1 = x_hi * (u64)b;
+ carry = p0 >> 32;
+ result = (p1 << 32) + (carry << 32) + (p0 & 0xFFFFFFFFULL);
+
+ return result;
+}
+
+static bool
+calculate_target_dco_and_loop_cnt(u32 frequency_khz, u64 *target_dco_mhz, u32 *loop_cnt)
+{
+ u32 ppm_value = 1;
+ u32 dco_min_freq = DCO_MIN_FREQ_MHZ;
+ u32 dco_max_freq = 16200;
+ u32 dco_min_freq_low = 10000;
+ u32 dco_max_freq_low = 12000;
+ u64 val = 0;
+ u64 refclk_khz = REF_CLK_KHZ;
+ u64 m2div = 0;
+ u64 val_with_frac = 0;
+ u64 ppm = 0;
+ u64 temp0 = 0, temp1, scale;
+ int ppm_cnt, dco_count, y;
+
+ for (ppm_cnt = 0; ppm_cnt < 5; ppm_cnt++) {
+ ppm_value = ppm_cnt == 2 ? 2 : 1;
+ for (dco_count = 0; dco_count < 2; dco_count++) {
+ if (dco_count == 1) {
+ dco_min_freq = dco_min_freq_low;
+ dco_max_freq = dco_max_freq_low;
+ }
+ for (y = 2; y <= 255; y += 2) {
+ val = div64_u64((u64)y * frequency_khz, 200);
+ m2div = div64_u64(((u64)(val) << 32), refclk_khz);
+ m2div = mul_q32_u32(m2div, 500);
+ val_with_frac = mul_q32_u32(m2div, refclk_khz);
+ val_with_frac = div64_u64(val_with_frac, 500);
+ temp1 = Q32_TO_INT(val_with_frac);
+ temp0 = (temp1 > val) ? (temp1 - val) :
+ (val - temp1);
+ ppm = div64_u64(temp0, val);
+ if (temp1 >= dco_min_freq &&
+ temp1 <= dco_max_freq &&
+ ppm < ppm_value) {
+ /* Round to two places */
+ scale = (1ULL << 32) / 100;
+ temp0 = DIV_ROUND_UP_ULL(val_with_frac,
+ scale);
+ *target_dco_mhz = temp0 * scale;
+ *loop_cnt = y;
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+static void set_phy_vdr_addresses(struct lt_phy_params *p, int pll_type)
+{
+ p->pll_reg4.addr = PLL_REG_ADDR(PLL_REG4_ADDR, pll_type);
+ p->pll_reg3.addr = PLL_REG_ADDR(PLL_REG3_ADDR, pll_type);
+ p->pll_reg5.addr = PLL_REG_ADDR(PLL_REG5_ADDR, pll_type);
+ p->pll_reg57.addr = PLL_REG_ADDR(PLL_REG57_ADDR, pll_type);
+ p->lf.addr = PLL_REG_ADDR(PLL_LF_ADDR, pll_type);
+ p->tdc.addr = PLL_REG_ADDR(PLL_TDC_ADDR, pll_type);
+ p->ssc.addr = PLL_REG_ADDR(PLL_SSC_ADDR, pll_type);
+ p->bias2.addr = PLL_REG_ADDR(PLL_BIAS2_ADDR, pll_type);
+ p->bias_trim.addr = PLL_REG_ADDR(PLL_BIAS_TRIM_ADDR, pll_type);
+ p->dco_med.addr = PLL_REG_ADDR(PLL_DCO_MED_ADDR, pll_type);
+ p->dco_fine.addr = PLL_REG_ADDR(PLL_DCO_FINE_ADDR, pll_type);
+ p->ssc_inj.addr = PLL_REG_ADDR(PLL_SSC_INJ_ADDR, pll_type);
+ p->surv_bonus.addr = PLL_REG_ADDR(PLL_SURV_BONUS_ADDR, pll_type);
+}
+
+static void compute_ssc(struct lt_phy_params *p, u32 ana_cfg)
+{
+ int ssc_stepsize = 0;
+ int ssc_steplen = 0;
+ int ssc_steplog = 0;
+
+ p->ssc.val = (1 << 31) | (ana_cfg << 24) | (ssc_steplog << 16) |
+ (ssc_stepsize << 8) | ssc_steplen;
+}
+
+static void compute_bias2(struct lt_phy_params *p)
+{
+ u32 ssc_en_local = 0;
+ u64 dynctrl_ovrd_en = 0;
+
+ p->bias2.val = (dynctrl_ovrd_en << 31) | (ssc_en_local << 30) |
+ (1 << 23) | (1 << 24) | (32 << 16) | (1 << 8);
+}
+
+static void compute_tdc(struct lt_phy_params *p, u64 tdc_fine)
+{
+ u32 settling_time = 15;
+ u32 bias_ovr_en = 1;
+ u32 coldstart = 1;
+ u32 true_lock = 2;
+ u32 early_lock = 1;
+ u32 lock_ovr_en = 1;
+ u32 lock_thr = tdc_fine ? 3 : 5;
+ u32 unlock_thr = tdc_fine ? 5 : 11;
+
+ p->tdc.val = (u32)((2 << 30) + (settling_time << 16) + (bias_ovr_en << 15) +
+ (lock_ovr_en << 14) + (coldstart << 12) + (true_lock << 10) +
+ (early_lock << 8) + (unlock_thr << 4) + lock_thr);
+}
+
+static void compute_dco_med(struct lt_phy_params *p)
+{
+ u32 cselmed_en = 0;
+ u32 cselmed_dyn_adj = 0;
+ u32 cselmed_ratio = 39;
+ u32 cselmed_thr = 8;
+
+ p->dco_med.val = (cselmed_en << 31) + (cselmed_dyn_adj << 30) +
+ (cselmed_ratio << 24) + (cselmed_thr << 21);
+}
+
+static void compute_dco_fine(struct lt_phy_params *p, u32 dco_12g)
+{
+ u32 dco_fine0_tune_2_0 = 0;
+ u32 dco_fine1_tune_2_0 = 0;
+ u32 dco_fine2_tune_2_0 = 0;
+ u32 dco_fine3_tune_2_0 = 0;
+ u32 dco_dith0_tune_2_0 = 0;
+ u32 dco_dith1_tune_2_0 = 0;
+
+ dco_fine0_tune_2_0 = dco_12g ? 4 : 3;
+ dco_fine1_tune_2_0 = 2;
+ dco_fine2_tune_2_0 = dco_12g ? 2 : 1;
+ dco_fine3_tune_2_0 = 5;
+ dco_dith0_tune_2_0 = dco_12g ? 4 : 3;
+ dco_dith1_tune_2_0 = 2;
+
+ p->dco_fine.val = (dco_dith1_tune_2_0 << 19) +
+ (dco_dith0_tune_2_0 << 16) +
+ (dco_fine3_tune_2_0 << 11) +
+ (dco_fine2_tune_2_0 << 8) +
+ (dco_fine1_tune_2_0 << 3) +
+ dco_fine0_tune_2_0;
+}
+
+int
+intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
+ u32 frequency_khz)
+{
+#define DATA_ASSIGN(i, pll_reg) \
+ do { \
+ lt_state->data[i][0] = (u8)((((pll_reg).val) & 0xFF000000) >> 24); \
+ lt_state->data[i][1] = (u8)((((pll_reg).val) & 0x00FF0000) >> 16); \
+ lt_state->data[i][2] = (u8)((((pll_reg).val) & 0x0000FF00) >> 8); \
+ lt_state->data[i][3] = (u8)((((pll_reg).val) & 0x000000FF)); \
+ } while (0)
+#define ADDR_ASSIGN(i, pll_reg) \
+ do { \
+ lt_state->addr_msb[i] = ((pll_reg).addr >> 8) & 0xFF; \
+ lt_state->addr_lsb[i] = (pll_reg).addr & 0xFF; \
+ } while (0)
+
+ bool found = false;
+ struct lt_phy_params p;
+ u32 dco_fmin = DCO_MIN_FREQ_MHZ;
+ u64 refclk_khz = REF_CLK_KHZ;
+ u32 refclk_mhz_int = REF_CLK_KHZ / 1000;
+ u64 m2div = 0;
+ u64 target_dco_mhz = 0;
+ u64 tdc_fine, tdc_targetcnt;
+ u64 feedfwd_gain ,feedfwd_cal_en;
+ u64 tdc_res = 30;
+ u32 prop_coeff;
+ u32 int_coeff;
+ u32 ndiv = 1;
+ u32 m1div = 1, m2div_int, m2div_frac;
+ u32 frac_en;
+ u32 ana_cfg;
+ u32 loop_cnt = 0;
+ u32 gain_ctrl = 2;
+ u32 postdiv = 0;
+ u32 dco_12g = 0;
+ u32 pll_type = 0;
+ u32 d1 = 2, d3 = 5, d4 = 0, d5 = 0;
+ u32 d6 = 0, d6_new = 0;
+ u32 d7, d8 = 0;
+ u32 bonus_7_0 = 0;
+ u32 csel2fo = 11;
+ u32 csel2fo_ovrd_en = 1;
+ u64 temp0, temp1, temp2, temp3;
+
+ p.surv_bonus.val = (bonus_7_0 << 16);
+ p.pll_reg4.val = (refclk_mhz_int << 17) +
+ (ndiv << 9) + (1 << 4);
+ p.bias_trim.val = (csel2fo_ovrd_en << 30) + (csel2fo << 24);
+ p.ssc_inj.val = 0;
+ found = calculate_target_dco_and_loop_cnt(frequency_khz, &target_dco_mhz, &loop_cnt);
+ if (!found)
+ return -EINVAL;
+
+ m2div = div64_u64(target_dco_mhz, (refclk_khz * ndiv * m1div));
+ m2div = mul_q32_u32(m2div, 1000);
+ if (Q32_TO_INT(m2div) > 511)
+ return -EINVAL;
+
+ m2div_int = (u32)Q32_TO_INT(m2div);
+ m2div_frac = (u32)(Q32_TO_FRAC(m2div));
+ frac_en = (m2div_frac > 0) ? 1 : 0;
+
+ if (frac_en > 0)
+ tdc_res = 70;
+ else
+ tdc_res = 36;
+ tdc_fine = tdc_res > 50 ? 1 : 0;
+ temp0 = tdc_res * 40 * 11;
+ temp1 = div64_u64(((4 * TDC_RES_MULTIPLIER) + temp0) * 500, temp0 * refclk_khz);
+ temp2 = div64_u64(temp0 * refclk_khz, 1000);
+ temp3 = div64_u64(((8 * TDC_RES_MULTIPLIER) + temp2), temp2);
+ tdc_targetcnt = tdc_res < 50 ? (int)(temp1) : (int)(temp3);
+ tdc_targetcnt = (int)(tdc_targetcnt / 2);
+ temp0 = mul_q32_u32(target_dco_mhz, tdc_res);
+ temp0 >>= 32;
+ feedfwd_gain = (m2div_frac > 0) ? div64_u64(m1div * TDC_RES_MULTIPLIER, temp0) : 0;
+ feedfwd_cal_en = frac_en;
+
+ temp0 = (u32)Q32_TO_INT(target_dco_mhz);
+ prop_coeff = (temp0 >= dco_fmin) ? 3 : 4;
+ int_coeff = (temp0 >= dco_fmin) ? 7 : 8;
+ ana_cfg = (temp0 >= dco_fmin) ? 8 : 6;
+ dco_12g = (temp0 >= dco_fmin) ? 0 : 1;
+
+ if (temp0 > 12960)
+ d7 = 10;
+ else
+ d7 = 8;
+
+ d8 = loop_cnt / 2;
+ d4 = d8 * 2;
+
+ /* Compute pll_reg3,5,57 & lf */
+ p.pll_reg3.val = (u32)((d4 << 21) + (d3 << 18) + (d1 << 15) + (m2div_int << 5));
+ p.pll_reg5.val = m2div_frac;
+ postdiv = (d5 == 0) ? 9 : d5;
+ d6_new = (d6 == 0) ? 40 : d6;
+ p.pll_reg57.val = (d7 << 24) + (postdiv << 15) + (d8 << 7) + d6_new;
+ p.lf.val = (u32)((frac_en << 31) + (1 << 30) + (frac_en << 29) +
+ (feedfwd_cal_en << 28) + (tdc_fine << 27) +
+ (gain_ctrl << 24) + (feedfwd_gain << 16) +
+ (int_coeff << 12) + (prop_coeff << 8) + tdc_targetcnt);
+
+ compute_ssc(&p, ana_cfg);
+ compute_bias2(&p);
+ compute_tdc(&p, tdc_fine);
+ compute_dco_med(&p);
+ compute_dco_fine(&p, dco_12g);
+
+ pll_type = ((frequency_khz == 10000) || (frequency_khz == 20000) ||
+ (frequency_khz == 2500) || (dco_12g == 1)) ? 0 : 1;
+ set_phy_vdr_addresses(&p, pll_type);
+
+ lt_state->config[0] = 0x84;
+ lt_state->config[1] = 0x2d;
+ ADDR_ASSIGN(0, p.pll_reg4);
+ ADDR_ASSIGN(1, p.pll_reg3);
+ ADDR_ASSIGN(2, p.pll_reg5);
+ ADDR_ASSIGN(3, p.pll_reg57);
+ ADDR_ASSIGN(4, p.lf);
+ ADDR_ASSIGN(5, p.tdc);
+ ADDR_ASSIGN(6, p.ssc);
+ ADDR_ASSIGN(7, p.bias2);
+ ADDR_ASSIGN(8, p.bias_trim);
+ ADDR_ASSIGN(9, p.dco_med);
+ ADDR_ASSIGN(10, p.dco_fine);
+ ADDR_ASSIGN(11, p.ssc_inj);
+ ADDR_ASSIGN(12, p.surv_bonus);
+ DATA_ASSIGN(0, p.pll_reg4);
+ DATA_ASSIGN(1, p.pll_reg3);
+ DATA_ASSIGN(2, p.pll_reg5);
+ DATA_ASSIGN(3, p.pll_reg57);
+ DATA_ASSIGN(4, p.lf);
+ DATA_ASSIGN(5, p.tdc);
+ DATA_ASSIGN(6, p.ssc);
+ DATA_ASSIGN(7, p.bias2);
+ DATA_ASSIGN(8, p.bias_trim);
+ DATA_ASSIGN(9, p.dco_med);
+ DATA_ASSIGN(10, p.dco_fine);
+ DATA_ASSIGN(11, p.ssc_inj);
+ DATA_ASSIGN(12, p.surv_bonus);
+
+ return 0;
+}
+
+static int
+intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
+{
+#define REGVAL(i) ( \
+ (lt_state->data[i][3]) | \
+ (lt_state->data[i][2] << 8) | \
+ (lt_state->data[i][1] << 16) | \
+ (lt_state->data[i][0] << 24) \
+)
+
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct intel_lt_phy_pll_state *lt_state =
+ &crtc_state->dpll_hw_state.ltpll;
+ int clk = 0;
+ u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int;
+ u64 temp0, temp1;
+ /*
+ * The algorithm uses '+' to combine bitfields when
+ * constructing PLL_reg3 and PLL_reg57:
+ * PLL_reg57 = (D7 << 24) + (postdiv << 15) + (D8 << 7) + D6_new;
+ * PLL_reg3 = (D4 << 21) + (D3 << 18) + (D1 << 15) + (m2div_int << 5);
+ *
+ * However, this is likely intended to be a bitwise OR operation,
+ * as each field occupies distinct, non-overlapping bits in the register.
+ *
+ * PLL_reg57 is composed of following fields packed into a 32-bit value:
+ * - D7: max value 10 -> fits in 4 bits -> placed at bits 24-27
+ * - postdiv: max value 9 -> fits in 4 bits -> placed at bits 15-18
+ * - D8: derived from loop_cnt / 2, max 127 -> fits in 7 bits
+ * (though 8 bits are given to it) -> placed at bits 7-14
+ * - D6_new: fits in lower 7 bits -> placed at bits 0-6
+ * PLL_reg57 = (D7 << 24) | (postdiv << 15) | (D8 << 7) | D6_new;
+ *
+ * Similarly, PLL_reg3 is packed as:
+ * - D4: max value 256 -> fits in 9 bits -> placed at bits 21-29
+ * - D3: max value 9 -> fits in 4 bits -> placed at bits 18-21
+ * - D1: max value 2 -> fits in 2 bits -> placed at bits 15-16
+ * - m2div_int: max value 511 -> fits in 9 bits (10 bits allocated)
+ * -> placed at bits 5-14
+ * PLL_reg3 = (D4 << 21) | (D3 << 18) | (D1 << 15) | (m2div_int << 5);
+ */
+ pll_reg_5 = REGVAL(2);
+ pll_reg_3 = REGVAL(1);
+ pll_reg_57 = REGVAL(3);
+ m2div_frac = pll_reg_5;
+
+ /*
+ * From forward algorithm we know
+ * m2div = 2 * m2
+ * val = y * frequency * 5
+ * So now,
+ * frequency = (m2 * 2 * refclk_khz / (d8 * 10))
+ * frequency = (m2div * refclk_khz / (d8 * 10))
+ */
+ d8 = (pll_reg_57 & REG_GENMASK(14, 7)) >> 7;
+ if (d8 == 0) {
+ drm_WARN_ON(display->drm,
+ "Invalid port clock using lowest HDMI portclock\n");
+ return xe3plpd_lt_hdmi_252.clock;
+ }
+ m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5;
+ temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32;
+ temp1 = (u64)m2div_int * REF_CLK_KHZ;
+
+ clk = div_u64((temp1 + temp0), d8 * 10);
+
+ return clk;
+}
+
+int
+intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ int clk;
+ const struct intel_lt_phy_pll_state *lt_state =
+ &crtc_state->dpll_hw_state.ltpll;
+ u8 mode, rate;
+
+ mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK,
+ lt_state->config[0]);
+ /*
+ * For edp/dp read the clock value from the tables
+ * and return the clock as the algorithm used for
+ * calculating the port clock does not exactly matches
+ * with edp/dp clock.
+ */
+ if (mode == MODE_DP) {
+ rate = REG_FIELD_GET8(LT_PHY_VDR_RATE_ENCODING_MASK,
+ lt_state->config[0]);
+ clk = intel_lt_phy_get_dp_clock(rate);
+ } else {
+ clk = intel_lt_phy_calc_hdmi_port_clock(crtc_state);
+ }
+
+ return clk;
+}
+
+int
+intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
+{
+ const struct intel_lt_phy_pll_state * const *tables;
+ int i;
+
+ tables = intel_lt_phy_pll_tables_get(crtc_state, encoder);
+ if (!tables)
+ return -EINVAL;
+
+ for (i = 0; tables[i]; i++) {
+ if (crtc_state->port_clock == tables[i]->clock) {
+ crtc_state->dpll_hw_state.ltpll = *tables[i];
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ crtc_state->dpll_hw_state.ltpll.config[2] = 1;
+ }
+ crtc_state->dpll_hw_state.ltpll.ssc_enabled =
+ intel_lt_phy_pll_is_ssc_enabled(crtc_state, encoder);
+ return 0;
+ }
+ }
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ return intel_lt_phy_calculate_hdmi_state(&crtc_state->dpll_hw_state.ltpll,
+ crtc_state->port_clock);
+ }
+
+ return -EINVAL;
+}
+
+static void
+intel_lt_phy_program_pll(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ int i, j, k;
+
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_0_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[0], MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[1], MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_VDR_2_CONFIG,
+ crtc_state->dpll_hw_state.ltpll.config[2], MB_WRITE_COMMITTED);
+
+ for (i = 0; i <= 12; i++) {
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_MSB(i),
+ crtc_state->dpll_hw_state.ltpll.addr_msb[i],
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_X_ADDR_LSB(i),
+ crtc_state->dpll_hw_state.ltpll.addr_lsb[i],
+ MB_WRITE_COMMITTED);
+
+ for (j = 3, k = 0; j >= 0; j--, k++)
+ intel_lt_phy_write(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_X_DATAY(i, j),
+ crtc_state->dpll_hw_state.ltpll.data[i][k],
+ MB_WRITE_COMMITTED);
+ }
+}
+
+static void
+intel_lt_phy_enable_disable_tx(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool lane_reversal = dig_port->lane_reversal;
+ u8 lane_count = crtc_state->lane_count;
+ bool is_dp_alt =
+ intel_tc_port_in_dp_alt_mode(dig_port);
+ enum intel_tc_pin_assignment tc_pin =
+ intel_tc_port_get_pin_assignment(dig_port);
+ u8 transmitter_mask = 0;
+
+ /*
+ * We have a two transmitters per lane and total of 2 PHY lanes so a total
+ * of 4 transmitters. We prepare a mask of the lanes that need to be activated
+ * and the transmitter which need to be activated for each lane. TX 0,1 correspond
+ * to LANE0 and TX 2, 3 correspond to LANE1.
+ */
+
+ switch (lane_count) {
+ case 1:
+ transmitter_mask = lane_reversal ? REG_BIT8(3) : REG_BIT8(0);
+ if (is_dp_alt) {
+ if (tc_pin == INTEL_TC_PIN_ASSIGNMENT_D)
+ transmitter_mask = REG_BIT8(0);
+ else
+ transmitter_mask = REG_BIT8(1);
+ }
+ break;
+ case 2:
+ transmitter_mask = lane_reversal ? REG_GENMASK8(3, 2) : REG_GENMASK8(1, 0);
+ if (is_dp_alt)
+ transmitter_mask = REG_GENMASK8(1, 0);
+ break;
+ case 3:
+ transmitter_mask = lane_reversal ? REG_GENMASK8(3, 1) : REG_GENMASK8(2, 0);
+ if (is_dp_alt)
+ transmitter_mask = REG_GENMASK8(2, 0);
+ break;
+ case 4:
+ transmitter_mask = REG_GENMASK8(3, 0);
+ break;
+ default:
+ MISSING_CASE(lane_count);
+ transmitter_mask = REG_GENMASK8(3, 0);
+ break;
+ }
+
+ if (transmitter_mask & BIT(0)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(0),
+ 0, LT_PHY_TXY_CTL10_MAC(0), 0);
+ }
+
+ if (transmitter_mask & BIT(1)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE0, LT_PHY_TXY_CTL10(1),
+ 0, LT_PHY_TXY_CTL10_MAC(1), 0);
+ }
+
+ if (transmitter_mask & BIT(2)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(0),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(0),
+ 0, LT_PHY_TXY_CTL10_MAC(0), 0);
+ }
+
+ if (transmitter_mask & BIT(3)) {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
+ LT_PHY_TX_LANE_ENABLE, LT_PHY_TXY_CTL10_MAC(1),
+ LT_PHY_TX_LANE_ENABLE);
+ } else {
+ intel_lt_phy_p2p_write(encoder, INTEL_LT_PHY_LANE1, LT_PHY_TXY_CTL10(1),
+ 0, LT_PHY_TXY_CTL10_MAC(1), 0);
+ }
+}
+
+void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ bool lane_reversal = dig_port->lane_reversal;
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ enum port port = encoder->port;
+ intel_wakeref_t wakeref = 0;
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+ u8 rate_update;
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ /* 1. Enable MacCLK at default 162 MHz frequency. */
+ intel_lt_phy_lane_reset(encoder, crtc_state->lane_count);
+
+ /* 2. Program PORT_CLOCK_CTL register to configure clock muxes, gating, and SSC. */
+ intel_lt_phy_program_port_clock_ctl(encoder, crtc_state, lane_reversal);
+
+ /* 3. Change owned PHY lanes power to Ready state. */
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P2_STATE_READY);
+
+ /*
+ * 4. Read the PHY message bus VDR register PHY_VDR_0_Config check enabled PLL type,
+ * encoded rate and encoded mode.
+ */
+ if (intel_lt_phy_config_changed(encoder, crtc_state)) {
+ /*
+ * 5. Program the PHY internal PLL registers over PHY message bus for the desired
+ * frequency and protocol type
+ */
+ intel_lt_phy_program_pll(encoder, crtc_state);
+
+ /* 6. Use the P2P transaction flow */
+ /*
+ * 6.1. Set the PHY VDR register 0xCC4[Rate Control VDR Update] = 1 over PHY message
+ * bus for Owned PHY Lanes.
+ */
+ /*
+ * 6.2. Poll for P2P Transaction Ready = "1" and read the MAC message bus VDR
+ * register at offset 0xC00 for Owned PHY Lanes*.
+ */
+ /* 6.3. Clear P2P transaction Ready bit. */
+ intel_lt_phy_p2p_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE,
+ LT_PHY_RATE_CONTROL_VDR_UPDATE, LT_PHY_MAC_VDR,
+ LT_PHY_PCLKIN_GATE);
+
+ /* 7. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
+
+ /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNOFF_LATENCY_US))
+ drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n",
+ phy_name(phy));
+
+ /*
+ * 9. Follow the Display Voltage Frequency Switching - Sequence Before Frequency
+ * Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 10. Program DDI_CLK_VALFREQ to match intended DDI clock frequency. */
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port),
+ crtc_state->port_clock);
+
+ /* 11. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 1. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0));
+
+ /* 12. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 1. */
+ if (intel_de_wait_for_set_ms(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNON_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c PLL MacCLK ack assertion timeout\n",
+ phy_name(phy));
+
+ /*
+ * 13. Ungate the forward clock by setting
+ * PORT_CLOCK_CTL[Forward Clock Ungate] = 1.
+ */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_FORWARD_CLOCK_UNGATE,
+ XELPDP_FORWARD_CLOCK_UNGATE);
+
+ /* 14. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+ /*
+ * 15. Clear the PHY VDR register 0xCC4[Rate Control VDR Update] over
+ * PHY message bus for Owned PHY Lanes.
+ */
+ rate_update = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_RATE_UPDATE);
+ rate_update &= ~LT_PHY_RATE_CONTROL_VDR_UPDATE;
+ intel_lt_phy_write(encoder, owned_lane_mask, LT_PHY_RATE_UPDATE,
+ rate_update, MB_WRITE_COMMITTED);
+
+ /* 16. Poll for PORT_BUF_CTL2 register PHY Pulse Status = 1 for Owned PHY Lanes. */
+ if (intel_de_wait_for_set_ms(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ XE3PLPD_RATE_CALIB_DONE_LATENCY_MS))
+ drm_warn(display->drm, "PHY %c PLL rate not changed\n",
+ phy_name(phy));
+
+ /* 17. SW clears PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+ } else {
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), crtc_state->port_clock);
+ }
+
+ /*
+ * 18. Follow the Display Voltage Frequency Switching - Sequence After Frequency Change.
+ * We handle this step in bxt_set_cdclk()
+ */
+ /* 19. Move the PHY powerdown state to Active and program to enable/disable transmitters */
+ intel_lt_phy_powerdown_change_sequence(encoder, owned_lane_mask,
+ XELPDP_P0_STATE_ACTIVE);
+
+ intel_lt_phy_enable_disable_tx(encoder, crtc_state);
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_pll_disable(struct intel_encoder *encoder)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ enum phy phy = intel_encoder_to_phy(encoder);
+ enum port port = encoder->port;
+ intel_wakeref_t wakeref;
+ u8 owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ u32 lane_pipe_reset = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PIPE_RESET(0) |
+ XELPDP_LANE_PIPE_RESET(1))
+ : XELPDP_LANE_PIPE_RESET(0);
+ u32 lane_phy_current_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XELPDP_LANE_PHY_CURRENT_STATUS(0) |
+ XELPDP_LANE_PHY_CURRENT_STATUS(1))
+ : XELPDP_LANE_PHY_CURRENT_STATUS(0);
+ u32 lane_phy_pulse_status = owned_lane_mask == INTEL_LT_PHY_BOTH_LANES
+ ? (XE3PLPDP_LANE_PHY_PULSE_STATUS(0) |
+ XE3PLPDP_LANE_PHY_PULSE_STATUS(1))
+ : XE3PLPDP_LANE_PHY_PULSE_STATUS(0);
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ /* 1. Clear PORT_BUF_CTL2 [PHY Pulse Status]. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+
+ /* 2. Set PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> Pipe Reset to 1. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port), lane_pipe_reset,
+ lane_pipe_reset);
+
+ /* 3. Poll for PORT_BUF_CTL2<port> Lane<PHY Lanes Owned> PHY Current Status == 1. */
+ if (intel_de_wait_for_set_us(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_current_status,
+ XE3PLPD_RESET_START_LATENCY_US))
+ drm_warn(display->drm, "PHY %c failed to reset lane\n",
+ phy_name(phy));
+
+ /* 4. Clear for PHY pulse status on owned PHY lanes. */
+ intel_de_rmw(display, XELPDP_PORT_BUF_CTL2(display, port),
+ lane_phy_pulse_status,
+ lane_phy_pulse_status);
+
+ /*
+ * 5. Follow the Display Voltage Frequency Switching -
+ * Sequence Before Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 6. Program PORT_CLOCK_CTL[PCLK PLL Request LN0] = 0. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_REQUEST(0), 0);
+
+ /* 7. Program DDI_CLK_VALFREQ to 0. */
+ intel_de_write(display, DDI_CLK_VALFREQ(encoder->port), 0);
+
+ /* 8. Poll for PORT_CLOCK_CTL[PCLK PLL Ack LN0]= 0. */
+ if (intel_de_wait_for_clear_us(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_LANE_PCLK_PLL_ACK(0),
+ XE3PLPD_MACCLK_TURNOFF_LATENCY_US))
+ drm_warn(display->drm, "PHY %c PLL MacCLK ack deassertion timeout\n",
+ phy_name(phy));
+
+ /*
+ * 9. Follow the Display Voltage Frequency Switching -
+ * Sequence After Frequency Change. We handle this step in bxt_set_cdclk().
+ */
+ /* 10. Program PORT_CLOCK_CTL register to disable and gate clocks. */
+ intel_de_rmw(display, XELPDP_PORT_CLOCK_CTL(display, port),
+ XELPDP_DDI_CLOCK_SELECT_MASK(display) | XELPDP_FORWARD_CLOCK_UNGATE, 0);
+
+ /* 11. Program PORT_BUF_CTL5[MacCLK Reset_0] = 1 to assert MacCLK reset. */
+ intel_de_rmw(display, XE3PLPD_PORT_BUF_CTL5(port),
+ XE3PLPD_MACCLK_RESET_0, XE3PLPD_MACCLK_RESET_0);
+
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ const struct intel_ddi_buf_trans *trans;
+ u8 owned_lane_mask;
+ intel_wakeref_t wakeref;
+ int n_entries, ln;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(display->drm, !trans)) {
+ intel_lt_phy_transaction_end(encoder, wakeref);
+ return;
+ }
+
+ for (ln = 0; ln < crtc_state->lane_count; ln++) {
+ int level = intel_ddi_level(encoder, crtc_state, ln);
+ int lane = ln / 2;
+ int tx = ln % 2;
+ u8 lane_mask = lane == 0 ? INTEL_LT_PHY_LANE0 : INTEL_LT_PHY_LANE1;
+
+ if (!(lane_mask & owned_lane_mask))
+ continue;
+
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL8(tx),
+ LT_PHY_TX_SWING_LEVEL_MASK | LT_PHY_TX_SWING_MASK,
+ LT_PHY_TX_SWING_LEVEL(trans->entries[level].lt.txswing_level) |
+ LT_PHY_TX_SWING(trans->entries[level].lt.txswing),
+ MB_WRITE_COMMITTED);
+
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL2(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.pre_cursor),
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL3(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.main_cursor),
+ MB_WRITE_COMMITTED);
+ intel_lt_phy_rmw(encoder, lane_mask, LT_PHY_TXY_CTL4(tx),
+ LT_PHY_TX_CURSOR_MASK,
+ LT_PHY_TX_CURSOR(trans->entries[level].lt.post_cursor),
+ MB_WRITE_COMMITTED);
+ }
+
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_dump_hw_state(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *hw_state)
+{
+ int i, j;
+
+ drm_dbg_kms(display->drm, "lt_phy_pll_hw_state:\n");
+ for (i = 0; i < 3; i++) {
+ drm_dbg_kms(display->drm, "config[%d] = 0x%.4x,\n",
+ i, hw_state->config[i]);
+ }
+
+ for (i = 0; i <= 12; i++)
+ for (j = 3; j >= 0; j--)
+ drm_dbg_kms(display->drm, "vdr_data[%d][%d] = 0x%.4x,\n",
+ i, j, hw_state->data[i][j]);
+}
+
+bool
+intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b)
+{
+ if (memcmp(&a->config, &b->config, sizeof(a->config)) != 0)
+ return false;
+
+ if (memcmp(&a->data, &b->data, sizeof(a->data)) != 0)
+ return false;
+
+ return true;
+}
+
+void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct intel_lt_phy_pll_state *pll_state)
+{
+ u8 owned_lane_mask;
+ u8 lane;
+ intel_wakeref_t wakeref;
+ int i, j, k;
+
+ pll_state->tbt_mode = intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder));
+ if (pll_state->tbt_mode)
+ return;
+
+ owned_lane_mask = intel_lt_phy_get_owned_lane_mask(encoder);
+ lane = owned_lane_mask & INTEL_LT_PHY_LANE0 ? : INTEL_LT_PHY_LANE1;
+ wakeref = intel_lt_phy_transaction_begin(encoder);
+
+ pll_state->config[0] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_0_CONFIG);
+ pll_state->config[1] = intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0, LT_PHY_VDR_1_CONFIG);
+ pll_state->config[2] = intel_lt_phy_read(encoder, lane, LT_PHY_VDR_2_CONFIG);
+
+ for (i = 0; i <= 12; i++) {
+ for (j = 3, k = 0; j >= 0; j--, k++)
+ pll_state->data[i][k] =
+ intel_lt_phy_read(encoder, INTEL_LT_PHY_LANE0,
+ LT_PHY_VDR_X_DATAY(i, j));
+ }
+
+ pll_state->clock =
+ intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_lt_phy_transaction_end(encoder, wakeref);
+}
+
+void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_display *display = to_intel_display(state);
+ struct intel_digital_port *dig_port;
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ struct intel_encoder *encoder;
+ struct intel_lt_phy_pll_state pll_hw_state = {};
+ const struct intel_lt_phy_pll_state *pll_sw_state = &new_crtc_state->dpll_hw_state.ltpll;
+ int clock;
+ int i, j;
+
+ if (DISPLAY_VER(display) < 35)
+ return;
+
+ if (!new_crtc_state->hw.active)
+ return;
+
+ /* intel_get_crtc_new_encoder() only works for modeset/fastset commits */
+ if (!intel_crtc_needs_modeset(new_crtc_state) &&
+ !intel_crtc_needs_fastset(new_crtc_state))
+ return;
+
+ encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+ intel_lt_phy_pll_readout_hw_state(encoder, new_crtc_state, &pll_hw_state);
+ clock = intel_lt_phy_calc_port_clock(encoder, new_crtc_state);
+
+ dig_port = enc_to_dig_port(encoder);
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.clock != clock,
+ "[CRTC:%d:%s] mismatch in LT PHY: Register CLOCK (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ pll_sw_state->clock, pll_hw_state.clock);
+
+ for (i = 0; i < 3; i++) {
+ INTEL_DISPLAY_STATE_WARN(display, pll_hw_state.config[i] != pll_sw_state->config[i],
+ "[CRTC:%d:%s] mismatch in LT PHY PLL CONFIG%d: (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ pll_sw_state->config[i], pll_hw_state.config[i]);
+ }
+
+ for (i = 0; i <= 12; i++) {
+ for (j = 3; j >= 0; j--)
+ INTEL_DISPLAY_STATE_WARN(display,
+ pll_hw_state.data[i][j] !=
+ pll_sw_state->data[i][j],
+ "[CRTC:%d:%s] mismatch in LT PHY PLL DATA[%d][%d]: (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i, j,
+ pll_sw_state->data[i][j], pll_hw_state.data[i][j]);
+ }
+}
+
+void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_mtl_tbt_pll_enable(encoder, crtc_state);
+ else
+ intel_lt_phy_pll_enable(encoder, crtc_state);
+}
+
+void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ intel_mtl_tbt_pll_disable(encoder);
+ else
+ intel_lt_phy_pll_disable(encoder);
+
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.h b/drivers/gpu/drm/i915/display/intel_lt_phy.h
new file mode 100644
index 000000000000..b7911acd7dcd
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_LT_PHY_H__
+#define __INTEL_LT_PHY_H__
+
+#include <linux/types.h>
+
+struct intel_atomic_state;
+struct intel_display;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_crtc;
+struct intel_lt_phy_pll_state;
+
+void intel_lt_phy_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_pll_disable(struct intel_encoder *encoder);
+int
+intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder);
+int intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_lt_phy_dump_hw_state(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *hw_state);
+bool
+intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
+ const struct intel_lt_phy_pll_state *b);
+void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ struct intel_lt_phy_pll_state *pll_state);
+void intel_lt_phy_pll_state_verify(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int
+intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
+ u32 frequency_khz);
+void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+void intel_xe3plpd_pll_disable(struct intel_encoder *encoder);
+
+#define HAS_LT_PHY(display) (DISPLAY_VER(display) >= 35)
+
+#endif /* __INTEL_LT_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
new file mode 100644
index 000000000000..98ccc069a69b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __INTEL_LT_PHY_REGS_H__
+#define __INTEL_LT_PHY_REGS_H__
+
+#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500
+#define XE3PLPD_MACCLK_TURNON_LATENCY_MS 2
+#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 1
+#define XE3PLPD_RATE_CALIB_DONE_LATENCY_MS 1
+#define XE3PLPD_RESET_START_LATENCY_US 10
+#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 4
+#define XE3PLPD_RESET_END_LATENCY_MS 2
+
+/* LT Phy MAC Register */
+#define LT_PHY_MAC_VDR _MMIO(0xC00)
+#define LT_PHY_PCLKIN_GATE REG_BIT8(0)
+
+/* LT Phy Pipe Spec Registers */
+#define LT_PHY_TXY_CTL8(idx) (0x408 + (0x200 * (idx)))
+#define LT_PHY_TX_SWING_LEVEL_MASK REG_GENMASK8(7, 4)
+#define LT_PHY_TX_SWING_LEVEL(val) REG_FIELD_PREP8(LT_PHY_TX_SWING_LEVEL_MASK, val)
+#define LT_PHY_TX_SWING_MASK REG_BIT8(3)
+#define LT_PHY_TX_SWING(val) REG_FIELD_PREP8(LT_PHY_TX_SWING_MASK, val)
+
+#define LT_PHY_TXY_CTL2(idx) (0x402 + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL3(idx) (0x403 + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL4(idx) (0x404 + (0x200 * (idx)))
+#define LT_PHY_TX_CURSOR_MASK REG_GENMASK8(5, 0)
+#define LT_PHY_TX_CURSOR(val) REG_FIELD_PREP8(LT_PHY_TX_CURSOR_MASK, val)
+
+#define LT_PHY_TXY_CTL10(idx) (0x40A + (0x200 * (idx)))
+#define LT_PHY_TXY_CTL10_MAC(idx) _MMIO(LT_PHY_TXY_CTL10(idx))
+#define LT_PHY_TX_LANE_ENABLE REG_BIT8(0)
+
+/* LT Phy Vendor Register */
+#define LT_PHY_VDR_0_CONFIG 0xC02
+#define LT_PHY_VDR_DP_PLL_ENABLE REG_BIT(7)
+#define LT_PHY_VDR_1_CONFIG 0xC03
+#define LT_PHY_VDR_RATE_ENCODING_MASK REG_GENMASK8(6, 3)
+#define LT_PHY_VDR_MODE_ENCODING_MASK REG_GENMASK8(2, 0)
+#define LT_PHY_VDR_2_CONFIG 0xCC3
+
+#define LT_PHY_VDR_X_ADDR_MSB(idx) (0xC04 + 0x6 * (idx))
+#define LT_PHY_VDR_X_ADDR_LSB(idx) (0xC05 + 0x6 * (idx))
+
+#define LT_PHY_VDR_X_DATAY(idx, y) ((0xC06 + (3 - (y))) + 0x6 * (idx))
+
+#define LT_PHY_RATE_UPDATE 0xCC4
+#define LT_PHY_RATE_CONTROL_VDR_UPDATE REG_BIT8(0)
+
+#define _XE3PLPD_PORT_BUF_CTL5(idx) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_A, \
+ _XELPDP_PORT_BUF_CTL1_LN0_B, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC1, \
+ _XELPDP_PORT_BUF_CTL1_LN0_USBC2) \
+ + 0x34)
+#define XE3PLPD_PORT_BUF_CTL5(port) _XE3PLPD_PORT_BUF_CTL5(__xe2lpd_port_idx(port))
+#define XE3PLPD_MACCLK_RESET_0 REG_BIT(11)
+#define XE3PLPD_MACCLK_RATE_MASK REG_GENMASK(4, 0)
+#define XE3PLPD_MACCLK_RATE_DEF REG_FIELD_PREP(XE3PLPD_MACCLK_RATE_MASK, 0x1F)
+
+#define _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(idx, lane) _MMIO(_PICK_EVEN_2RANGES(idx, PORT_TC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_A, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_B, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC1, \
+ _XELPDP_PORT_M2P_MSGBUS_CTL_LN0_USBC2) \
+ + 0x60 + (lane) * 0x4)
+#define XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(port, lane) _XE3PLPD_PORT_P2M_MSGBUS_STATUS_P2P(__xe2lpd_port_idx(port), \
+ lane)
+#define XE3LPD_PORT_P2M_ADDR_MASK REG_GENMASK(11, 0)
+
+#define PLL_REG4_ADDR 0x8510
+#define PLL_REG3_ADDR 0x850C
+#define PLL_REG5_ADDR 0x8514
+#define PLL_REG57_ADDR 0x85E4
+#define PLL_LF_ADDR 0x860C
+#define PLL_TDC_ADDR 0x8610
+#define PLL_SSC_ADDR 0x8614
+#define PLL_BIAS2_ADDR 0x8618
+#define PLL_BIAS_TRIM_ADDR 0x8648
+#define PLL_DCO_MED_ADDR 0x8640
+#define PLL_DCO_FINE_ADDR 0x864C
+#define PLL_SSC_INJ_ADDR 0x8624
+#define PLL_SURV_BONUS_ADDR 0x8644
+#define PLL_TYPE_OFFSET 0x200
+#define PLL_REG_ADDR(base, pll_type) ((pll_type) ? (base) + PLL_TYPE_OFFSET : (base))
+#endif /* __INTEL_LT_PHY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 7e48a235c99f..89aeb4fb340e 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -48,6 +48,7 @@
#include "intel_dpll.h"
#include "intel_fdi.h"
#include "intel_gmbus.h"
+#include "intel_link_bw.h"
#include "intel_lvds.h"
#include "intel_lvds_regs.h"
#include "intel_panel.h"
@@ -328,7 +329,7 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
intel_de_rmw(display, PP_CONTROL(display, 0), 0, PANEL_POWER_ON);
intel_de_posting_read(display, lvds_encoder->reg);
- if (intel_de_wait_for_set(display, PP_STATUS(display, 0), PP_ON, 5000))
+ if (intel_de_wait_for_set_ms(display, PP_STATUS(display, 0), PP_ON, 5000))
drm_err(display->drm,
"timed out waiting for panel to power on\n");
@@ -344,7 +345,7 @@ static void intel_disable_lvds(struct intel_atomic_state *state,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
intel_de_rmw(display, PP_CONTROL(display, 0), PANEL_POWER_ON, 0);
- if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_ON, 1000))
+ if (intel_de_wait_for_clear_ms(display, PP_STATUS(display, 0), PP_ON, 1000))
drm_err(display->drm,
"timed out waiting for panel to power off\n");
@@ -383,7 +384,7 @@ static void intel_lvds_shutdown(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
- if (intel_de_wait_for_clear(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
+ if (intel_de_wait_for_clear_ms(display, PP_STATUS(display, 0), PP_CYCLE_DELAY_ACTIVE, 5000))
drm_err(display->drm,
"timed out waiting for panel power cycle delay\n");
}
@@ -433,7 +434,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(display)) {
crtc_state->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(crtc_state))
+ if (!intel_link_bw_compute_pipe_bpp(crtc_state))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 8415f3d703ed..0dcb0597879a 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -19,6 +19,7 @@
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_dbuf_bw.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display.h"
@@ -176,6 +177,7 @@ static void intel_crtc_disable_noatomic_complete(struct intel_crtc *crtc)
intel_cdclk_crtc_disable_noatomic(crtc);
skl_wm_crtc_disable_noatomic(crtc);
intel_bw_crtc_disable_noatomic(crtc);
+ intel_dbuf_bw_crtc_disable_noatomic(crtc);
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe, 0);
}
@@ -851,18 +853,23 @@ static void intel_modeset_readout_hw_state(struct intel_display *display)
*/
if (plane_state->uapi.visible && plane->min_cdclk) {
if (crtc_state->double_wide || DISPLAY_VER(display) >= 10)
- crtc_state->min_cdclk[plane->id] =
+ crtc_state->plane_min_cdclk[plane->id] =
DIV_ROUND_UP(crtc_state->pixel_rate, 2);
else
- crtc_state->min_cdclk[plane->id] =
+ crtc_state->plane_min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] min_cdclk %d kHz\n",
plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id]);
+ crtc_state->plane_min_cdclk[plane->id]);
}
+ crtc_state->min_cdclk = intel_crtc_min_cdclk(crtc_state);
+
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] min_cdclk %d kHz\n",
+ crtc->base.base.id, crtc->base.name, crtc_state->min_cdclk);
+
intel_pmdemand_update_port_clock(display, pmdemand_state, pipe,
crtc_state->port_clock);
}
@@ -872,6 +879,7 @@ static void intel_modeset_readout_hw_state(struct intel_display *display)
intel_wm_get_hw_state(display);
intel_bw_update_hw_state(display);
+ intel_dbuf_bw_update_hw_state(display);
intel_cdclk_update_hw_state(display);
intel_pmdemand_init_pmdemand_params(display, pmdemand_state);
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index f2f6b9d9afa1..b361a77cd235 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -16,6 +16,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
+#include "intel_lt_phy.h"
#include "intel_modeset_verify.h"
#include "intel_snps_phy.h"
#include "skl_watermark.h"
@@ -246,6 +247,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
intel_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
intel_cx0pll_state_verify(state, crtc);
+ intel_lt_phy_pll_state_verify(state, crtc);
}
void intel_modeset_verify_disabled(struct intel_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 81efdb17fc0c..cbc220310813 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -28,13 +28,13 @@
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/dmi.h>
+#include <linux/iopoll.h>
#include <acpi/video.h>
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_acpi.h"
#include "intel_backlight.h"
#include "intel_display_core.h"
@@ -357,10 +357,12 @@ static int swsci(struct intel_display *display,
pci_write_config_word(pdev, SWSCI, swsci_val);
/* Poll for the result. */
-#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
- if (wait_for(C, dslp)) {
+ ret = poll_timeout_us(scic = swsci->scic,
+ (scic & SWSCI_SCIC_INDICATOR) == 0,
+ 1000, dslp * 1000, false);
+ if (ret) {
drm_dbg(display->drm, "SWSCI request timed out\n");
- return -ETIMEDOUT;
+ return ret;
}
scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >>
@@ -1299,8 +1301,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_opregion);
void intel_opregion_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
-
- debugfs_create_file("i915_opregion", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_opregion", 0444, display->drm->debugfs_root,
display, &intel_opregion_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 159a5f998ea0..88eb7ae5765c 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,6 +27,7 @@
*/
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_object_frontbuffer.h"
@@ -217,10 +218,9 @@ static void i830_overlay_clock_gating(struct intel_display *display,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- intel_de_write(display, DSPCLK_GATE_D(display), 0);
+ intel_de_write(display, DSPCLK_GATE_D, 0);
else
- intel_de_write(display, DSPCLK_GATE_D(display),
- OVRUNIT_CLOCK_GATE_DISABLE);
+ intel_de_write(display, DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
pci_bus_read_config_byte(pdev->bus,
@@ -308,8 +308,6 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
intel_frontbuffer_put(overlay->frontbuffer);
overlay->frontbuffer = frontbuffer;
- intel_frontbuffer_flip_prepare(display, INTEL_FRONTBUFFER_OVERLAY(pipe));
-
overlay->old_vma = overlay->vma;
if (vma)
overlay->vma = i915_vma_get(vma);
@@ -366,7 +364,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
if (drm_WARN_ON(display->drm, !vma))
return;
- intel_frontbuffer_flip_complete(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+ intel_frontbuffer_flip(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
i915_vma_unpin(vma);
i915_vma_put(vma);
@@ -822,8 +820,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_pin_section;
}
- i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
-
if (!overlay->active) {
const struct intel_crtc_state *crtc_state =
overlay->crtc->config;
diff --git a/drivers/gpu/drm/i915/display/intel_panic.c b/drivers/gpu/drm/i915/display/intel_panic.c
new file mode 100644
index 000000000000..7311ce4e8b6c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_panic.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_panic.h>
+
+#include "gem/i915_gem_object.h"
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_panic.h"
+
+struct intel_panic *intel_panic_alloc(void)
+{
+ return i915_gem_object_alloc_panic();
+}
+
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
+{
+ struct intel_framebuffer *fb = sb->private;
+ struct drm_gem_object *obj = intel_fb_bo(&fb->base);
+
+ return i915_gem_object_panic_setup(panic, sb, obj, fb->panic_tiling);
+}
+
+void intel_panic_finish(struct intel_panic *panic)
+{
+ return i915_gem_object_panic_finish(panic);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_panic.h b/drivers/gpu/drm/i915/display/intel_panic.h
new file mode 100644
index 000000000000..afb472e924aa
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_panic.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __INTEL_PANIC_H__
+#define __INTEL_PANIC_H__
+
+struct drm_scanout_buffer;
+struct intel_panic;
+
+struct intel_panic *intel_panic_alloc(void);
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb);
+void intel_panic_finish(struct intel_panic *panic);
+
+#endif /* __INTEL_PANIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pch.c b/drivers/gpu/drm/i915/display/intel_pch.c
index 469e8a3cfb49..65359a36df48 100644
--- a/drivers/gpu/drm/i915/display/intel_pch.c
+++ b/drivers/gpu/drm/i915/display/intel_pch.c
@@ -5,8 +5,8 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_display_core.h"
+#include "intel_display_utils.h"
#include "intel_pch.h"
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
@@ -328,7 +328,7 @@ void intel_pch_detect(struct intel_display *display)
"Display disabled, reverting to NOP PCH\n");
display->pch_type = PCH_NOP;
} else if (!pch) {
- if (i915_run_as_guest() && HAS_DISPLAY(display)) {
+ if (intel_display_run_as_guest(display) && HAS_DISPLAY(display)) {
intel_virt_detect_pch(display, &id, &pch_type);
display->pch_type = pch_type;
} else {
diff --git a/drivers/gpu/drm/i915/display/intel_pch.h b/drivers/gpu/drm/i915/display/intel_pch.h
index cf4dab1b98bf..19cac7412d0a 100644
--- a/drivers/gpu/drm/i915/display/intel_pch.h
+++ b/drivers/gpu/drm/i915/display/intel_pch.h
@@ -6,8 +6,6 @@
#ifndef __INTEL_PCH__
#define __INTEL_PCH__
-#include "intel_display_conversion.h"
-
struct intel_display;
/*
@@ -36,7 +34,7 @@ enum intel_pch {
PCH_LNL,
};
-#define INTEL_PCH_TYPE(_display) (__to_intel_display(_display)->pch_type)
+#define INTEL_PCH_TYPE(_display) ((_display)->pch_type)
#define HAS_PCH_DG2(display) (INTEL_PCH_TYPE(display) == PCH_DG2)
#define HAS_PCH_ADP(display) (INTEL_PCH_TYPE(display) == PCH_ADP)
#define HAS_PCH_DG1(display) (INTEL_PCH_TYPE(display) == PCH_DG1)
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 3456c794e0e7..16619f7be5f8 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -305,7 +305,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
}
intel_de_write(display, reg, val | TRANS_ENABLE);
- if (intel_de_wait_for_set(display, reg, TRANS_STATE_ENABLE, 100))
+ if (intel_de_wait_for_set_ms(display, reg, TRANS_STATE_ENABLE, 100))
drm_err(display->drm, "failed to enable transcoder %c\n",
pipe_name(pipe));
}
@@ -326,7 +326,7 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc)
reg = PCH_TRANSCONF(pipe);
intel_de_rmw(display, reg, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(display, reg, TRANS_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear_ms(display, reg, TRANS_STATE_ENABLE, 50))
drm_err(display->drm, "failed to disable transcoder %c\n",
pipe_name(pipe));
@@ -572,8 +572,8 @@ static void lpt_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_INTERLACE_PROGRESSIVE;
intel_de_write(display, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(display, LPT_TRANSCONF,
- TRANS_STATE_ENABLE, 100))
+ if (intel_de_wait_for_set_ms(display, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 100))
drm_err(display->drm, "Failed to enable PCH transcoder\n");
}
@@ -581,8 +581,8 @@ static void lpt_disable_pch_transcoder(struct intel_display *display)
{
intel_de_rmw(display, LPT_TRANSCONF, TRANS_ENABLE, 0);
/* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(display, LPT_TRANSCONF,
- TRANS_STATE_ENABLE, 50))
+ if (intel_de_wait_for_clear_ms(display, LPT_TRANSCONF,
+ TRANS_STATE_ENABLE, 50))
drm_err(display->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index d3c5255bf1a8..9a89bb6dcf65 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -6,10 +6,10 @@
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_panel.h"
#include "intel_pch_refclk.h"
#include "intel_sbi.h"
@@ -17,16 +17,20 @@
static void lpt_fdi_reset_mphy(struct intel_display *display)
{
+ int ret;
+
intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL);
- if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ ret = intel_de_wait_for_set_us(display, SOUTH_CHICKEN2,
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100);
+ if (ret)
drm_err(display->drm, "FDI mPHY reset assert timeout\n");
intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0);
- if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ ret = intel_de_wait_for_clear_us(display, SOUTH_CHICKEN2,
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100);
+ if (ret)
drm_err(display->drm, "FDI mPHY reset de-assert timeout\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c
index 13541be4d6df..6dda496190e0 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.c
+++ b/drivers/gpu/drm/i915/display/intel_pfit.c
@@ -5,15 +5,16 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_lvds_regs.h"
#include "intel_pfit.h"
#include "intel_pfit_regs.h"
+#include "skl_scaler.h"
static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state)
{
@@ -546,6 +547,16 @@ out:
return intel_gmch_pfit_check_timings(crtc_state);
}
+enum drm_mode_status
+intel_pfit_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes)
+{
+ return skl_scaler_mode_valid(display, mode, output_format,
+ num_joined_pipes);
+}
+
int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h
index ef34f9b49d09..c1bb0d1f344e 100644
--- a/drivers/gpu/drm/i915/display/intel_pfit.h
+++ b/drivers/gpu/drm/i915/display/intel_pfit.h
@@ -6,8 +6,12 @@
#ifndef __INTEL_PFIT_H__
#define __INTEL_PFIT_H__
+enum drm_mode_status;
+struct drm_display_mode;
struct drm_connector_state;
struct intel_crtc_state;
+struct intel_display;
+enum intel_output_format;
int intel_pfit_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -17,5 +21,9 @@ void ilk_pfit_get_config(struct intel_crtc_state *crtc_state);
void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state);
void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state);
void i9xx_pfit_get_config(struct intel_crtc_state *crtc_state);
-
+enum drm_mode_status
+intel_pfit_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes);
#endif /* __INTEL_PFIT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index c2b4b2254190..1f27643412f1 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -28,6 +28,8 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c
index 36fb07471deb..ab6a58530b39 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_plane.c
@@ -43,20 +43,20 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_object.h"
-#include "i915_scheduler_types.h"
-#include "i915_vma.h"
#include "i9xx_plane_regs.h"
-#include "intel_bo.h"
#include "intel_cdclk.h"
#include "intel_cursor.h"
+#include "intel_colorop.h"
#include "intel_display_rps.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "skl_scaler.h"
@@ -293,64 +293,21 @@ intel_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
rel_data_rate);
}
-int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane,
- bool *need_cdclk_calc)
+static void intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane)
{
- struct intel_display *display = to_intel_display(plane);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
- const struct intel_cdclk_state *cdclk_state;
- const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
if (!plane_state->uapi.visible || !plane->min_cdclk)
- return 0;
+ return;
- old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- new_crtc_state->min_cdclk[plane->id] =
+ new_crtc_state->plane_min_cdclk[plane->id] =
plane->min_cdclk(new_crtc_state, plane_state);
-
- /*
- * No need to check against the cdclk state if
- * the min cdclk for the plane doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to plane
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_crtc_state->min_cdclk[plane->id] <=
- old_crtc_state->min_cdclk[plane->id])
- return 0;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state))
- return PTR_ERR(cdclk_state);
-
- /*
- * No need to recalculate the cdclk state if
- * the min cdclk for the pipe doesn't increase.
- *
- * Ie. we only ever increase the cdclk due to plane
- * requirements. This can reduce back and forth
- * display blinking due to constant cdclk changes.
- */
- if (new_crtc_state->min_cdclk[plane->id] <=
- intel_cdclk_min_cdclk(cdclk_state, crtc->pipe))
- return 0;
-
- drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
- plane->base.base.id, plane->base.name,
- new_crtc_state->min_cdclk[plane->id],
- crtc->base.base.id, crtc->base.name,
- intel_cdclk_min_cdclk(cdclk_state, crtc->pipe));
- *need_cdclk_calc = true;
-
- return 0;
}
static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
@@ -380,6 +337,58 @@ intel_plane_copy_uapi_plane_damage(struct intel_plane_state *new_plane_state,
*damage = drm_plane_state_src(&new_uapi_plane_state->uapi);
}
+static bool
+intel_plane_colorop_replace_blob(struct intel_plane_state *plane_state,
+ struct intel_colorop *intel_colorop,
+ struct drm_property_blob *blob)
+{
+ if (intel_colorop->id == INTEL_PLANE_CB_CSC)
+ return drm_property_replace_blob(&plane_state->hw.ctm, blob);
+ else if (intel_colorop->id == INTEL_PLANE_CB_PRE_CSC_LUT)
+ return drm_property_replace_blob(&plane_state->hw.degamma_lut, blob);
+ else if (intel_colorop->id == INTEL_PLANE_CB_POST_CSC_LUT)
+ return drm_property_replace_blob(&plane_state->hw.gamma_lut, blob);
+ else if (intel_colorop->id == INTEL_PLANE_CB_3DLUT)
+ return drm_property_replace_blob(&plane_state->hw.lut_3d, blob);
+
+ return false;
+}
+
+static void
+intel_plane_color_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
+ const struct intel_plane_state *from_plane_state,
+ struct intel_crtc *crtc)
+{
+ struct drm_colorop *iter_colorop, *colorop;
+ struct drm_colorop_state *new_colorop_state;
+ struct drm_atomic_state *state = plane_state->uapi.state;
+ struct intel_colorop *intel_colorop;
+ struct drm_property_blob *blob;
+ struct intel_atomic_state *intel_atomic_state = to_intel_atomic_state(state);
+ struct intel_crtc_state *new_crtc_state = intel_atomic_state ?
+ intel_atomic_get_new_crtc_state(intel_atomic_state, crtc) : NULL;
+ bool changed = false;
+ int i = 0;
+
+ iter_colorop = plane_state->uapi.color_pipeline;
+
+ while (iter_colorop) {
+ for_each_new_colorop_in_state(state, colorop, new_colorop_state, i) {
+ if (new_colorop_state->colorop == iter_colorop) {
+ blob = new_colorop_state->bypass ? NULL : new_colorop_state->data;
+ intel_colorop = to_intel_colorop(colorop);
+ changed |= intel_plane_colorop_replace_blob(plane_state,
+ intel_colorop,
+ blob);
+ }
+ }
+ iter_colorop = iter_colorop->next;
+ }
+
+ if (new_crtc_state && changed)
+ new_crtc_state->plane_color_changed = true;
+}
+
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
const struct intel_plane_state *from_plane_state,
struct intel_crtc *crtc)
@@ -408,6 +417,8 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
plane_state->uapi.src = drm_plane_state_src(&from_plane_state->uapi);
plane_state->uapi.dst = drm_plane_state_dest(&from_plane_state->uapi);
+
+ intel_plane_color_copy_uapi_to_hw_state(plane_state, from_plane_state, crtc);
}
void intel_plane_copy_hw_state(struct intel_plane_state *plane_state,
@@ -436,7 +447,7 @@ void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
crtc_state->data_rate_y[plane->id] = 0;
crtc_state->rel_data_rate[plane->id] = 0;
crtc_state->rel_data_rate_y[plane->id] = 0;
- crtc_state->min_cdclk[plane->id] = 0;
+ crtc_state->plane_min_cdclk[plane->id] = 0;
plane_state->uapi.visible = false;
}
@@ -1095,6 +1106,9 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
DISPLAY_VERx100(display) == 3002) &&
src_x % 2 != 0)
hsub = 2;
+
+ if (DISPLAY_VER(display) == 35)
+ vsub = 2;
} else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
@@ -1173,7 +1187,6 @@ static int
intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
- struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
struct intel_plane *plane = to_intel_plane(_plane);
struct intel_display *display = to_intel_display(plane);
struct intel_plane_state *new_plane_state =
@@ -1222,8 +1235,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
goto unpin_fb;
if (new_plane_state->uapi.fence) {
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
+ i915_gem_fence_wait_priority_display(new_plane_state->uapi.fence);
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence);
@@ -1327,7 +1339,7 @@ static void intel_panic_flush(struct drm_plane *plane)
struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- intel_bo_panic_finish(intel_fb);
+ intel_panic_finish(intel_fb->panic);
if (crtc_state->enable_psr2_sel_fetch) {
/* Force a full update for psr2 */
@@ -1410,7 +1422,7 @@ static int intel_get_scanout_buffer(struct drm_plane *plane,
return -EOPNOTSUPP;
}
sb->private = intel_fb;
- ret = intel_bo_panic_setup(sb);
+ ret = intel_panic_setup(intel_fb->panic, sb);
if (ret)
return ret;
}
@@ -1747,10 +1759,8 @@ int intel_plane_atomic_check(struct intel_atomic_state *state)
return ret;
}
- return 0;
-}
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i)
+ intel_plane_calc_min_cdclk(state, plane);
-u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state)
-{
- return i915_ggtt_offset(plane_state->ggtt_vma);
+ return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h
index 4ef012c08fa4..4e99df9de3e8 100644
--- a/drivers/gpu/drm/i915/display/intel_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_plane.h
@@ -69,9 +69,6 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *intel_state);
-int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane,
- bool *need_cdclk_calc);
int intel_plane_check_clipping(struct intel_plane_state *plane_state,
struct intel_crtc_state *crtc_state,
int min_scale, int max_scale,
@@ -87,7 +84,6 @@ int intel_plane_add_affected(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_plane_atomic_check(struct intel_atomic_state *state);
-u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state);
bool intel_plane_format_mod_supported_async(struct drm_plane *plane,
u32 format,
u64 modifier);
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 4246173ed311..a1de1ec564d1 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
@@ -131,6 +133,7 @@ initial_plane_vma(struct intel_display *display,
struct drm_mm_node orig_mm = {};
struct i915_vma *vma;
resource_size_t phys_base;
+ unsigned int tiling;
u32 base, size;
u64 pinctl;
@@ -177,17 +180,19 @@ initial_plane_vma(struct intel_display *display,
i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
I915_CACHE_WT : I915_CACHE_NONE);
- switch (plane_config->tiling) {
+ tiling = intel_fb_modifier_to_tiling(plane_config->fb->base.modifier);
+
+ switch (tiling) {
case I915_TILING_NONE:
break;
case I915_TILING_X:
case I915_TILING_Y:
obj->tiling_and_stride =
plane_config->fb->base.pitches[0] |
- plane_config->tiling;
+ tiling;
break;
default:
- MISSING_CASE(plane_config->tiling);
+ MISSING_CASE(tiling);
goto err_obj;
}
@@ -360,6 +365,8 @@ valid_fb:
i915_vma_pin_fence(vma) == 0 && vma->fence)
plane_state->flags |= PLANE_HAS_FENCE;
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma);
+
plane_state->uapi.src_x = 0;
plane_state->uapi.src_y = 0;
plane_state->uapi.src_w = fb->width << 16;
@@ -370,7 +377,7 @@ valid_fb:
plane_state->uapi.crtc_w = fb->width;
plane_state->uapi.crtc_h = fb->height;
- if (plane_config->tiling)
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
dev_priv->preserve_bios_swizzle = true;
plane_state->uapi.fb = fb;
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index d806c15db7ce..dc44a7a169c1 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -7,13 +7,14 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_regs.h"
#include "intel_display_trace.h"
+#include "intel_display_utils.h"
#include "intel_pmdemand.h"
#include "intel_step.h"
#include "skl_watermark.h"
@@ -389,12 +390,12 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state)
static bool intel_pmdemand_check_prev_transaction(struct intel_display *display)
{
- return !(intel_de_wait_for_clear(display,
- XELPDP_INITIATE_PMDEMAND_REQUEST(1),
- XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
- intel_de_wait_for_clear(display,
- GEN12_DCPR_STATUS_1,
- XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
+ return !(intel_de_wait_for_clear_ms(display,
+ XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 10) ||
+ intel_de_wait_for_clear_ms(display,
+ GEN12_DCPR_STATUS_1,
+ XELPDP_PMDEMAND_INFLIGHT_STATUS, 10));
}
void
@@ -461,9 +462,9 @@ static void intel_pmdemand_poll(struct intel_display *display)
u32 status;
int ret;
- ret = intel_de_wait_custom(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
- XELPDP_PMDEMAND_REQ_ENABLE, 0,
- 50, timeout_ms, &status);
+ ret = intel_de_wait_ms(display, XELPDP_INITIATE_PMDEMAND_REQUEST(1),
+ XELPDP_PMDEMAND_REQ_ENABLE, 0,
+ timeout_ms, &status);
if (ret == -ETIMEDOUT)
drm_err(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index b64d0b30f5b1..25692a547764 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -4,16 +4,18 @@
*/
#include <linux/debugfs.h>
+#include <linux/iopoll.h>
#include <drm/drm_print.h>
#include "g4x_dp.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_power_well.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
@@ -608,6 +610,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
+ int ret;
+ u32 val;
lockdep_assert_held(&display->pps.mutex);
@@ -624,13 +628,18 @@ static void wait_panel_status(struct intel_dp *intel_dp,
intel_de_read(display, pp_stat_reg),
intel_de_read(display, pp_ctrl_reg));
- if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
+ ret = poll_timeout_us(val = intel_de_read(display, pp_stat_reg),
+ (val & mask) == value,
+ 10 * 1000, 5000 * 1000, true);
+ if (ret) {
drm_err(display->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
pps_name(intel_dp),
intel_de_read(display, pp_stat_reg),
intel_de_read(display, pp_ctrl_reg));
+ return;
+ }
drm_dbg_kms(display->drm, "Wait complete\n");
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 41988e193a41..08bca4573974 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "i915_reg.h"
@@ -39,9 +40,11 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
+#include "intel_dsb.h"
#include "intel_frontbuffer.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
@@ -49,6 +52,7 @@
#include "intel_snps_phy.h"
#include "intel_step.h"
#include "intel_vblank.h"
+#include "intel_vdsc.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
@@ -233,16 +237,12 @@ bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder,
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (display->params.enable_psr == -1)
- return intel_dp_is_edp(intel_dp) ?
- connector->panel.vbt.psr.enable :
- true;
- return display->params.enable_psr;
+ return intel_dp_is_edp(intel_dp) ?
+ connector->panel.vbt.psr.enable : true;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -250,39 +250,23 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
}
}
-static bool psr2_global_enabled(struct intel_dp *intel_dp)
+static bool sel_update_global_enabled(struct intel_dp *intel_dp)
{
- struct intel_display *display = to_intel_display(intel_dp);
-
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (display->params.enable_psr == 1)
- return false;
return true;
}
}
-static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
-{
- struct intel_display *display = to_intel_display(intel_dp);
-
- if (display->params.enable_psr != -1)
- return false;
-
- return true;
-}
-
static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- if ((display->params.enable_psr != -1) ||
- (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
- return false;
- return true;
+ return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) &&
+ display->params.enable_panel_replay;
}
static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
@@ -514,12 +498,14 @@ static u8 intel_dp_get_su_capability(struct intel_dp *intel_dp)
{
u8 su_capability = 0;
- if (intel_dp->psr.sink_panel_replay_su_support)
- drm_dp_dpcd_readb(&intel_dp->aux,
- DP_PANEL_REPLAY_CAP_CAPABILITY,
- &su_capability);
- else
+ if (intel_dp->psr.sink_panel_replay_su_support) {
+ if (drm_dp_dpcd_read_byte(&intel_dp->aux,
+ DP_PANEL_REPLAY_CAP_CAPABILITY,
+ &su_capability) < 0)
+ return 0;
+ } else {
su_capability = intel_dp->psr_dpcd[1];
+ }
return su_capability;
}
@@ -597,9 +583,61 @@ exit:
intel_dp->psr.su_y_granularity = y;
}
+static enum intel_panel_replay_dsc_support
+compute_pr_dsc_support(struct intel_dp *intel_dp)
+{
+ u8 pr_dsc_mode;
+ u8 val;
+
+ val = intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_CAPABILITY)];
+ pr_dsc_mode = REG_FIELD_GET8(DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK, val);
+
+ switch (pr_dsc_mode) {
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY:
+ return INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY;
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED:
+ return INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE;
+ default:
+ MISSING_CASE(pr_dsc_mode);
+ fallthrough;
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED:
+ case DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED:
+ return INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED;
+ }
+}
+
+static const char *panel_replay_dsc_support_str(enum intel_panel_replay_dsc_support dsc_support)
+{
+ switch (dsc_support) {
+ case INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED:
+ return "not supported";
+ case INTEL_DP_PANEL_REPLAY_DSC_FULL_FRAME_ONLY:
+ return "full frame only";
+ case INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE:
+ return "selective update";
+ default:
+ MISSING_CASE(dsc_support);
+ return "n/a";
+ };
+}
+
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
+
+ /* TODO: Enable Panel Replay on MST once it's properly implemented. */
+ if (intel_dp->mst_detect == DRM_DP_MST)
+ return;
+
+ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
+ &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+ if (ret < 0)
+ return;
+
+ if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
+ DP_PANEL_REPLAY_SUPPORT))
+ return;
if (intel_dp_is_edp(intel_dp)) {
if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
@@ -622,15 +660,27 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
+ intel_dp->psr.sink_panel_replay_dsc_support = compute_pr_dsc_support(intel_dp);
+
drm_dbg_kms(display->drm,
- "Panel replay %sis supported by panel\n",
+ "Panel replay %sis supported by panel (in DSC mode: %s)\n",
intel_dp->psr.sink_panel_replay_su_support ?
- "selective_update " : "");
+ "selective_update " : "",
+ panel_replay_dsc_support_str(intel_dp->psr.sink_panel_replay_dsc_support));
}
static void _psr_init_dpcd(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
+
+ ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (ret < 0)
+ return;
+
+ if (!intel_dp->psr_dpcd[0])
+ return;
drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
@@ -676,18 +726,9 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
-
- drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT,
- &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd));
+ _psr_init_dpcd(intel_dp);
- if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
- DP_PANEL_REPLAY_SUPPORT)
- _panel_replay_init_dpcd(intel_dp);
-
- if (intel_dp->psr_dpcd[0])
- _psr_init_dpcd(intel_dp);
+ _panel_replay_init_dpcd(intel_dp);
if (intel_dp->psr.sink_psr2_support ||
intel_dp->psr.sink_panel_replay_su_support)
@@ -742,8 +783,7 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay
return panel_replay ?
intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT :
- intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED &&
- psr2_su_region_et_global_enabled(intel_dp);
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED;
}
static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
@@ -896,7 +936,8 @@ static bool is_dc5_dc6_blocked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
u32 current_dc_state = intel_display_power_get_current_dc_state(display);
- struct drm_vblank_crtc *vblank = &display->drm->vblank[intel_dp->psr.pipe];
+ struct intel_crtc *crtc = intel_crtc_for_pipe(display, intel_dp->psr.pipe);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(&crtc->base);
return (current_dc_state != DC_STATE_EN_UPTO_DC5 &&
current_dc_state != DC_STATE_EN_UPTO_DC6) ||
@@ -936,7 +977,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
/* Wa_16025596647 */
if ((DISPLAY_VER(display) == 20 ||
IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
- is_dc5_dc6_blocked(intel_dp))
+ is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
intel_dp->psr.pipe,
true);
@@ -964,15 +1005,16 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
return val;
}
-static int psr2_block_count_lines(struct intel_dp *intel_dp)
+static int
+psr2_block_count_lines(u8 io_wake_lines, u8 fast_wake_lines)
{
- return intel_dp->alpm_parameters.io_wake_lines < 9 &&
- intel_dp->alpm_parameters.fast_wake_lines < 9 ? 8 : 12;
+ return io_wake_lines < 9 && fast_wake_lines < 9 ? 8 : 12;
}
static int psr2_block_count(struct intel_dp *intel_dp)
{
- return psr2_block_count_lines(intel_dp) / 4;
+ return psr2_block_count_lines(intel_dp->psr.io_wake_lines,
+ intel_dp->psr.fast_wake_lines) / 4;
}
static u8 frames_before_su_entry(struct intel_dp *intel_dp)
@@ -1026,7 +1068,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
/* Wa_16025596647 */
if ((DISPLAY_VER(display) == 20 ||
IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
- is_dc5_dc6_blocked(intel_dp))
+ is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used)
idle_frames = 0;
else
idle_frames = psr_compute_idle_frames(intel_dp);
@@ -1067,20 +1109,20 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
*/
int tmp;
- tmp = map[intel_dp->alpm_parameters.io_wake_lines -
+ tmp = map[intel_dp->psr.io_wake_lines -
TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(tmp + TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES);
- tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
+ tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
} else if (DISPLAY_VER(display) >= 20) {
- val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
+ val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
} else if (DISPLAY_VER(display) >= 12) {
- val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
+ val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
} else if (DISPLAY_VER(display) >= 9) {
- val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
+ val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines);
+ val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines);
}
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
@@ -1259,12 +1301,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (crtc_state->uapi.async_flip) {
- drm_dbg_kms(display->drm,
- "PSR2 sel fetch not enabled, async flip enabled\n");
- return false;
- }
-
return crtc_state->enable_psr2_sel_fetch = true;
}
@@ -1368,22 +1404,54 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
return entry_setup_frames;
}
-static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool aux_less)
+static
+int _intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state,
+ bool needs_panel_replay,
+ bool needs_sel_update)
{
- struct intel_display *display = to_intel_display(intel_dp);
- int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
- crtc_state->hw.adjusted_mode.crtc_vblank_start;
- int wake_lines;
+ struct intel_display *display = to_intel_display(crtc_state);
- if (aux_less)
- wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
+ if (!crtc_state->has_psr)
+ return 0;
+
+ /* Wa_14015401596 */
+ if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
+ return 1;
+
+ /* Rest is for SRD_STATUS needed on LunarLake and onwards */
+ if (DISPLAY_VER(display) < 20)
+ return 0;
+
+ /*
+ * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
+ *
+ * To deterministically capture the transition of the state machine
+ * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
+ * one line after the non-delayed V. Blank.
+ *
+ * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
+ * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
+ * - TRANS_VTOTAL[ Vertical Active ])
+ *
+ * SRD_STATUS is used only by PSR1 on PantherLake.
+ * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
+ */
+
+ if (DISPLAY_VER(display) >= 30 && (needs_panel_replay ||
+ needs_sel_update))
+ return 0;
+ else if (DISPLAY_VER(display) < 30 && (needs_sel_update ||
+ intel_crtc_has_type(crtc_state,
+ INTEL_OUTPUT_EDP)))
+ return 0;
else
- wake_lines = DISPLAY_VER(display) < 20 ?
- psr2_block_count_lines(intel_dp) :
- intel_dp->alpm_parameters.io_wake_lines;
+ return 1;
+}
+static bool _wake_lines_fit_into_vblank(const struct intel_crtc_state *crtc_state,
+ int vblank,
+ int wake_lines)
+{
if (crtc_state->req_psr2_sdp_prior_scanline)
vblank -= 1;
@@ -1394,9 +1462,46 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
return true;
}
+static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool aux_less,
+ bool needs_panel_replay,
+ bool needs_sel_update)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
+ crtc_state->hw.adjusted_mode.crtc_vblank_start;
+ int wake_lines;
+ int scl = _intel_psr_min_set_context_latency(crtc_state,
+ needs_panel_replay,
+ needs_sel_update);
+ vblank -= scl;
+
+ if (aux_less)
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+
+ /*
+ * Guardband has not been computed yet, so we conservatively check if the
+ * full vblank duration is sufficient to accommodate wake line requirements
+ * for PSR features like Panel Replay and Selective Update.
+ *
+ * Once the actual guardband is available, a more accurate validation is
+ * performed in intel_psr_compute_config_late(), and PSR features are
+ * disabled if wake lines exceed the available guardband.
+ */
+ return _wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines);
+}
+
static bool alpm_config_valid(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool aux_less)
+ struct intel_crtc_state *crtc_state,
+ bool aux_less,
+ bool needs_panel_replay,
+ bool needs_sel_update)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1406,7 +1511,8 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
+ if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less,
+ needs_panel_replay, needs_sel_update)) {
drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, too short vblank time\n");
return false;
@@ -1423,7 +1529,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
- if (!intel_dp->psr.sink_psr2_support)
+ if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1)
return false;
/* JSL and EHL only supports eDP 1.3 */
@@ -1498,7 +1604,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- if (!alpm_config_valid(intel_dp, crtc_state, false))
+ if (!alpm_config_valid(intel_dp, crtc_state, false, false, true))
return false;
if (!crtc_state->enable_psr2_sel_fetch &&
@@ -1528,7 +1634,7 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
- if (!psr2_global_enabled(intel_dp)) {
+ if (!sel_update_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm,
"Selective update disabled by flag\n");
goto unsupported;
@@ -1543,9 +1649,21 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
}
- if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
- !intel_dp->psr.sink_panel_replay_su_support))
- goto unsupported;
+ if (crtc_state->has_panel_replay) {
+ if (DISPLAY_VER(display) < 14)
+ goto unsupported;
+
+ if (!intel_dp->psr.sink_panel_replay_su_support)
+ goto unsupported;
+
+ if (intel_dsc_enabled_on_link(crtc_state) &&
+ intel_dp->psr.sink_panel_replay_dsc_support !=
+ INTEL_DP_PANEL_REPLAY_DSC_SELECTIVE_UPDATE) {
+ drm_dbg_kms(display->drm,
+ "Selective update with Panel Replay not enabled because it's not supported with DSC\n");
+ goto unsupported;
+ }
+ }
if (crtc_state->crc_enabled) {
drm_dbg_kms(display->drm,
@@ -1576,7 +1694,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
- if (!CAN_PSR(intel_dp))
+ if (!CAN_PSR(intel_dp) || !display->params.enable_psr)
return false;
/*
@@ -1590,6 +1708,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (entry_setup_frames >= 0) {
intel_dp->psr.entry_setup_frames = entry_setup_frames;
} else {
+ crtc_state->no_psr_reason = "PSR setup timing not met";
drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup timing not met\n");
return false;
@@ -1600,7 +1719,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
static bool
_panel_replay_compute_config(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
+ struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -1622,6 +1741,14 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
+ if (intel_dsc_enabled_on_link(crtc_state) &&
+ intel_dp->psr.sink_panel_replay_dsc_support ==
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED) {
+ drm_dbg_kms(display->drm,
+ "Panel Replay not enabled because it's not supported with DSC\n");
+ return false;
+ }
+
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -1649,7 +1776,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
}
- if (!alpm_config_valid(intel_dp, crtc_state, true))
+ if (!alpm_config_valid(intel_dp, crtc_state, true, true, false))
return false;
return true;
@@ -1664,15 +1791,40 @@ static bool intel_psr_needs_wa_18037818876(struct intel_dp *intel_dp,
!crtc_state->has_sel_update);
}
+static
+void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc;
+ u8 active_pipes = 0;
+
+ /* Wa_16025596647 */
+ if (DISPLAY_VER(display) != 20 &&
+ !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ return;
+
+ /* Not needed by Panel Replay */
+ if (crtc_state->has_panel_replay)
+ return;
+
+ /* We ignore possible secondary PSR/Panel Replay capable eDP */
+ for_each_intel_crtc(display->drm, crtc)
+ active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
+
+ active_pipes = intel_calc_active_pipes(state, active_pipes);
+
+ crtc_state->active_non_psr_pipes = active_pipes &
+ ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
+}
+
void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
- struct intel_crtc *crtc;
- u8 active_pipes = 0;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(display->drm, "PSR disabled by flag\n");
@@ -1702,6 +1854,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
+ /* Only used for state verification. */
+ crtc_state->panel_replay_dsc_support = intel_dp->psr.sink_panel_replay_dsc_support;
crtc_state->has_panel_replay = _panel_replay_compute_config(intel_dp,
crtc_state,
conn_state);
@@ -1713,31 +1867,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
crtc_state->has_sel_update = intel_sel_update_config_valid(intel_dp, crtc_state);
-
- /* Wa_18037818876 */
- if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
- crtc_state->has_psr = false;
- drm_dbg_kms(display->drm,
- "PSR disabled to workaround PSR FSM hang issue\n");
- }
-
- /* Rest is for Wa_16025596647 */
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
- return;
-
- /* Not needed by Panel Replay */
- if (crtc_state->has_panel_replay)
- return;
-
- /* We ignore possible secondary PSR/Panel Replay capable eDP */
- for_each_intel_crtc(display->drm, crtc)
- active_pipes |= crtc->active ? BIT(crtc->pipe) : 0;
-
- active_pipes = intel_calc_active_pipes(state, active_pipes);
-
- crtc_state->active_non_psr_pipes = active_pipes &
- ~BIT(to_intel_crtc(crtc_state->uapi.crtc)->pipe);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1808,6 +1937,8 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, intel_dp->psr.active);
+ drm_WARN_ON(display->drm, !intel_dp->psr.enabled);
+
lockdep_assert_held(&intel_dp->psr.lock);
/* psr1, psr2 and panel-replay are mutually exclusive.*/
@@ -1819,6 +1950,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
hsw_activate_psr1(intel_dp);
intel_dp->psr.active = true;
+ intel_dp->psr.no_psr_reason = NULL;
}
/*
@@ -2027,6 +2159,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_dp->psr.req_psr2_sdp_prior_scanline =
crtc_state->req_psr2_sdp_prior_scanline;
intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes;
+ intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used;
+ intel_dp->psr.io_wake_lines = crtc_state->alpm_state.io_wake_lines;
+ intel_dp->psr.fast_wake_lines = crtc_state->alpm_state.fast_wake_lines;
if (!psr_interrupt_error_check(intel_dp))
return;
@@ -2103,8 +2238,9 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
- if (DISPLAY_VER(display) == 20 ||
- IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if ((DISPLAY_VER(display) == 20 ||
+ IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) &&
+ intel_dp->psr.pkg_c_latency_used)
intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display,
intel_dp->psr.pipe,
false);
@@ -2135,8 +2271,8 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
- if (intel_de_wait_for_clear(display, psr_status,
- psr_status_mask, 2000))
+ if (intel_de_wait_for_clear_ms(display, psr_status,
+ psr_status_mask, 2000))
drm_err(display->drm, "Timed out waiting PSR idle state\n");
}
@@ -2207,6 +2343,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_dp->psr.su_region_et_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
intel_dp->psr.active_non_psr_pipes = 0;
+ intel_dp->psr.pkg_c_latency_used = 0;
}
/**
@@ -2363,50 +2500,17 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
}
/**
- * intel_psr_min_vblank_delay - Minimum vblank delay needed by PSR
+ * intel_psr_min_set_context_latency - Minimum 'set context latency' lines needed by PSR
* @crtc_state: the crtc state
*
- * Return minimum vblank delay needed by PSR.
+ * Return minimum SCL lines/delay needed by PSR.
*/
-int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state)
+int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
- if (!crtc_state->has_psr)
- return 0;
-
- /* Wa_14015401596 */
- if (intel_vrr_possible(crtc_state) && IS_DISPLAY_VER(display, 13, 14))
- return 1;
-
- /* Rest is for SRD_STATUS needed on LunarLake and onwards */
- if (DISPLAY_VER(display) < 20)
- return 0;
-
- /*
- * Comment on SRD_STATUS register in Bspec for LunarLake and onwards:
- *
- * To deterministically capture the transition of the state machine
- * going from SRDOFFACK to IDLE, the delayed V. Blank should be at least
- * one line after the non-delayed V. Blank.
- *
- * Legacy TG: TRANS_SET_CONTEXT_LATENCY > 0
- * VRR TG: TRANS_VRR_CTL[ VRR Guardband ] < (TRANS_VRR_VMAX[ VRR Vmax ]
- * - TRANS_VTOTAL[ Vertical Active ])
- *
- * SRD_STATUS is used only by PSR1 on PantherLake.
- * SRD_STATUS is used by PSR1 and Panel Replay DP on LunarLake.
- */
-
- if (DISPLAY_VER(display) >= 30 && (crtc_state->has_panel_replay ||
- crtc_state->has_sel_update))
- return 0;
- else if (DISPLAY_VER(display) < 30 && (crtc_state->has_sel_update ||
- intel_crtc_has_type(crtc_state,
- INTEL_OUTPUT_EDP)))
- return 0;
- else
- return 1;
+ return _intel_psr_min_set_context_latency(crtc_state,
+ crtc_state->has_panel_replay,
+ crtc_state->has_sel_update);
}
static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
@@ -2928,6 +3032,9 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
mutex_lock(&psr->lock);
+ if (!new_crtc_state->has_psr)
+ psr->no_psr_reason = new_crtc_state->no_psr_reason;
+
if (psr->enabled) {
/*
* Reasons to disable:
@@ -2954,6 +3061,20 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
}
}
+static void
+verify_panel_replay_dsc_state(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->has_panel_replay)
+ return;
+
+ drm_WARN_ON(display->drm,
+ intel_dsc_enabled_on_link(crtc_state) &&
+ crtc_state->panel_replay_dsc_support ==
+ INTEL_DP_PANEL_REPLAY_DSC_NOT_SUPPORTED);
+}
+
void intel_psr_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -2965,6 +3086,8 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
if (!crtc_state->has_psr)
return;
+ verify_panel_replay_dsc_state(crtc_state);
+
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2976,12 +3099,19 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
drm_WARN_ON(display->drm,
psr->enabled && !crtc_state->active_planes);
- keep_disabled |= psr->sink_not_reliable;
- keep_disabled |= !crtc_state->active_planes;
+ if (psr->sink_not_reliable)
+ keep_disabled = true;
+
+ if (!crtc_state->active_planes) {
+ psr->no_psr_reason = "All planes inactive";
+ keep_disabled = true;
+ }
/* Display WA #1136: skl, bxt */
- keep_disabled |= DISPLAY_VER(display) < 11 &&
- crtc_state->wm_level_disabled;
+ if (DISPLAY_VER(display) < 11 && crtc_state->wm_level_disabled) {
+ psr->no_psr_reason = "Workaround #1136 for skl, bxt";
+ keep_disabled = true;
+ }
if (!psr->enabled && !keep_disabled)
intel_psr_enable_locked(intel_dp, crtc_state);
@@ -3003,35 +3133,57 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
}
}
-static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+/*
+ * From bspec: Panel Self Refresh (BDW+)
+ * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
+ * exit training time + 1.5 ms of aux channel handshake. 50 ms is
+ * defensive enough to cover everything.
+ */
+#define PSR_IDLE_TIMEOUT_MS 50
+
+static int
+_psr2_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
+ struct intel_dsb *dsb)
{
- struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_display *display = to_intel_display(new_crtc_state);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
/*
* Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough.
* As all higher states has bit 4 of PSR2 state set we can just wait for
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
- return intel_de_wait_for_clear(display,
+ if (dsb) {
+ intel_dsb_poll(dsb, EDP_PSR2_STATUS(display, cpu_transcoder),
+ EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 0, 200,
+ PSR_IDLE_TIMEOUT_MS * 1000 / 200);
+ return true;
+ }
+
+ return intel_de_wait_for_clear_ms(display,
EDP_PSR2_STATUS(display, cpu_transcoder),
- EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
+ EDP_PSR2_STATUS_STATE_DEEP_SLEEP,
+ PSR_IDLE_TIMEOUT_MS);
}
-static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
+static int
+_psr1_ready_for_pipe_update_locked(const struct intel_crtc_state *new_crtc_state,
+ struct intel_dsb *dsb)
{
- struct intel_display *display = to_intel_display(intel_dp);
- enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
+ struct intel_display *display = to_intel_display(new_crtc_state);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
- /*
- * From bspec: Panel Self Refresh (BDW+)
- * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
- * exit training time + 1.5 ms of aux channel handshake. 50 ms is
- * defensive enough to cover everything.
- */
- return intel_de_wait_for_clear(display,
+ if (dsb) {
+ intel_dsb_poll(dsb, psr_status_reg(display, cpu_transcoder),
+ EDP_PSR_STATUS_STATE_MASK, 0, 200,
+ PSR_IDLE_TIMEOUT_MS * 1000 / 200);
+ return true;
+ }
+
+ return intel_de_wait_for_clear_ms(display,
psr_status_reg(display, cpu_transcoder),
- EDP_PSR_STATUS_STATE_MASK, 50);
+ EDP_PSR_STATUS_STATE_MASK,
+ PSR_IDLE_TIMEOUT_MS);
}
/**
@@ -3060,9 +3212,11 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
continue;
if (intel_dp->psr.sel_update_enabled)
- ret = _psr2_ready_for_pipe_update_locked(intel_dp);
+ ret = _psr2_ready_for_pipe_update_locked(new_crtc_state,
+ NULL);
else
- ret = _psr1_ready_for_pipe_update_locked(intel_dp);
+ ret = _psr1_ready_for_pipe_update_locked(new_crtc_state,
+ NULL);
if (ret)
drm_err(display->drm,
@@ -3070,6 +3224,18 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
}
}
+void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->has_psr || new_crtc_state->has_panel_replay)
+ return;
+
+ if (new_crtc_state->has_sel_update)
+ _psr2_ready_for_pipe_update_locked(new_crtc_state, dsb);
+ else
+ _psr1_ready_for_pipe_update_locked(new_crtc_state, dsb);
+}
+
static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
@@ -3092,14 +3258,14 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
mutex_unlock(&intel_dp->psr.lock);
- err = intel_de_wait_for_clear(display, reg, mask, 50);
+ err = intel_de_wait_for_clear_ms(display, reg, mask, 50);
if (err)
drm_err(display->drm,
"Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&intel_dp->psr.lock);
- return err == 0 && intel_dp->psr.enabled;
+ return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter;
}
static int intel_psr_fastset_force(struct intel_display *display)
@@ -3228,8 +3394,13 @@ static void intel_psr_work(struct work_struct *work)
if (!intel_dp->psr.enabled)
goto unlock;
- if (READ_ONCE(intel_dp->psr.irq_aux_error))
+ if (READ_ONCE(intel_dp->psr.irq_aux_error)) {
intel_psr_handle_irq(intel_dp);
+ goto unlock;
+ }
+
+ if (intel_dp->psr.pause_counter)
+ goto unlock;
/*
* We have to make sure PSR is ready for re-enable
@@ -3364,6 +3535,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
+ /* Selective fetch prior LNL */
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0)
@@ -3382,12 +3554,19 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
intel_psr_configure_full_frame_update(intel_dp);
intel_psr_force_update(intel_dp);
+ } else if (!intel_dp->psr.psr2_sel_fetch_enabled) {
+ /*
+ * PSR1 on all platforms
+ * PSR2 HW tracking
+ * Panel Replay Full frame update
+ */
+ intel_psr_force_update(intel_dp);
} else {
+ /* Selective update LNL onwards */
intel_psr_exit(intel_dp);
}
- if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) &&
- !intel_dp->psr.busy_frontbuffer_bits)
+ if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
queue_work(display->wq.unordered, &intel_dp->psr.work);
}
@@ -3723,7 +3902,7 @@ static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp
struct intel_display *display = to_intel_display(intel_dp);
bool dc5_dc6_blocked;
- if (!intel_dp->psr.active)
+ if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used)
return;
dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp);
@@ -3748,7 +3927,8 @@ static void psr_dc5_dc6_wa_work(struct work_struct *work)
mutex_lock(&intel_dp->psr.lock);
- if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled)
+ if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled &&
+ !intel_dp->psr.pkg_c_latency_used)
intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
mutex_unlock(&intel_dp->psr.lock);
@@ -3826,7 +4006,8 @@ void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
goto unlock;
if ((enable && intel_dp->psr.active_non_psr_pipes) ||
- (!enable && !intel_dp->psr.active_non_psr_pipes)) {
+ (!enable && !intel_dp->psr.active_non_psr_pipes) ||
+ !intel_dp->psr.pkg_c_latency_used) {
intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes;
goto unlock;
}
@@ -3861,7 +4042,7 @@ void intel_psr_notify_vblank_enable_disable(struct intel_display *display,
break;
}
- if (intel_dp->psr.enabled)
+ if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used)
intel_psr_apply_underrun_on_idle_wa_locked(intel_dp);
mutex_unlock(&intel_dp->psr.lock);
@@ -3943,6 +4124,8 @@ static void intel_psr_sink_capability(struct intel_dp *intel_dp,
seq_printf(m, ", Panel Replay = %s", str_yes_no(psr->sink_panel_replay_support));
seq_printf(m, ", Panel Replay Selective Update = %s",
str_yes_no(psr->sink_panel_replay_su_support));
+ seq_printf(m, ", Panel Replay DSC support = %s",
+ panel_replay_dsc_support_str(psr->sink_panel_replay_dsc_support));
if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] &
DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)
seq_printf(m, " (Early Transport)");
@@ -3977,6 +4160,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
region_et = "";
seq_printf(m, "PSR mode: %s%s%s\n", mode, status, region_et);
+ if (psr->no_psr_reason)
+ seq_printf(m, " %s\n", psr->no_psr_reason);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
@@ -4157,12 +4342,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
void intel_psr_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root,
display, &i915_edp_psr_debug_fops);
- debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root,
display, &i915_edp_psr_status_fops);
}
@@ -4274,3 +4459,84 @@ bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
{
return intel_dp_is_edp(intel_dp) && crtc_state->has_panel_replay;
}
+
+void intel_psr_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ int vblank = intel_crtc_vblank_length(crtc_state);
+ int wake_lines;
+
+ if (intel_psr_needs_alpm_aux_less(intel_dp, crtc_state))
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else if (intel_psr_needs_alpm(intel_dp, crtc_state))
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+ else
+ wake_lines = 0;
+
+ /*
+ * Disable the PSR features if wake lines exceed the available vblank.
+ * Though SCL is computed based on these PSR features, it is not reset
+ * even if the PSR features are disabled to avoid changing vblank start
+ * at this stage.
+ */
+ if (wake_lines && !_wake_lines_fit_into_vblank(crtc_state, vblank, wake_lines)) {
+ drm_dbg_kms(display->drm,
+ "Adjusting PSR/PR mode: vblank too short for wake lines = %d\n",
+ wake_lines);
+
+ if (crtc_state->has_panel_replay) {
+ crtc_state->has_panel_replay = false;
+ /*
+ * #TODO : Add fall back to PSR/PSR2
+ * Since panel replay cannot be supported, we can fall back to PSR/PSR2.
+ * This will require calling compute_config for psr and psr2 with check for
+ * actual guardband instead of vblank_length.
+ */
+ crtc_state->has_psr = false;
+ }
+
+ crtc_state->has_sel_update = false;
+ crtc_state->enable_psr2_su_region_et = false;
+ crtc_state->enable_psr2_sel_fetch = false;
+ }
+
+ /* Wa_18037818876 */
+ if (intel_psr_needs_wa_18037818876(intel_dp, crtc_state)) {
+ crtc_state->has_psr = false;
+ drm_dbg_kms(display->drm,
+ "PSR disabled to workaround PSR FSM hang issue\n");
+ }
+
+ intel_psr_set_non_psr_pipes(intel_dp, crtc_state);
+}
+
+int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int psr_min_guardband;
+ int wake_lines;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ return 0;
+
+ if (crtc_state->has_panel_replay)
+ wake_lines = crtc_state->alpm_state.aux_less_wake_lines;
+ else if (crtc_state->has_sel_update)
+ wake_lines = DISPLAY_VER(display) < 20 ?
+ psr2_block_count_lines(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.fast_wake_lines) :
+ crtc_state->alpm_state.io_wake_lines;
+ else
+ return 0;
+
+ psr_min_guardband = wake_lines + crtc_state->set_context_latency;
+
+ if (crtc_state->req_psr2_sdp_prior_scanline)
+ psr_min_guardband++;
+
+ return psr_min_guardband;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 9b061a22361f..620b35928832 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -52,6 +52,8 @@ void intel_psr_get_config(struct intel_encoder *encoder,
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
void intel_psr_short_pulse(struct intel_dp *intel_dp);
void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state);
+void intel_psr_wait_for_idle_dsb(struct intel_dsb *dsb,
+ const struct intel_crtc_state *new_crtc_state);
bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@@ -75,11 +77,14 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int intel_psr_min_vblank_delay(const struct intel_crtc_state *crtc_state);
+int intel_psr_min_set_context_latency(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
void intel_psr_debugfs_register(struct intel_display *display);
bool intel_psr_needs_alpm(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state);
bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
+void intel_psr_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+int intel_psr_min_guardband(struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index 600c815e37e4..c05d4beb91d8 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -5,7 +5,7 @@
#include <drm/display/drm_dsc.h>
-#include "i915_utils.h"
+#include "intel_display_utils.h"
#include "intel_qp_tables.h"
/* from BPP 6 to 24 in steps of 0.5 */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index a32fae510ed2..d2e16b79d6be 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -80,6 +80,12 @@ static void quirk_fw_sync_len(struct intel_dp *intel_dp)
drm_info(display->drm, "Applying Fast Wake sync pulse count quirk\n");
}
+static void quirk_edp_limit_rate_hbr2(struct intel_display *display)
+{
+ intel_set_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2);
+ drm_info(display->drm, "Applying eDP Limit rate to HBR2 quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -231,6 +237,9 @@ static struct intel_quirk intel_quirks[] = {
{ 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
/* HP Notebook - 14-r206nv */
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
+
+ /* Dell XPS 13 7390 2-in-1 */
+ { 0x8a12, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 },
};
static const struct intel_dpcd_quirk intel_dpcd_quirks[] = {
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index cafdebda7535..06da0e286c67 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -20,6 +20,7 @@ enum intel_quirk_id {
QUIRK_LVDS_SSC_DISABLE,
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
QUIRK_FW_SYNC_LEN,
+ QUIRK_EDP_LIMIT_RATE_HBR2,
};
void intel_init_quirks(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_sbi.c b/drivers/gpu/drm/i915/display/intel_sbi.c
index dfcff924f0ed..b636a0060d39 100644
--- a/drivers/gpu/drm/i915/display/intel_sbi.c
+++ b/drivers/gpu/drm/i915/display/intel_sbi.c
@@ -21,7 +21,8 @@ static int intel_sbi_rw(struct intel_display *display, u16 reg,
lockdep_assert_held(&display->sbi.lock);
- if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) {
+ if (intel_de_wait_fw_ms(display, SBI_CTL_STAT,
+ SBI_STATUS_MASK, SBI_STATUS_READY, 100, NULL)) {
drm_err(display->drm, "timeout waiting for SBI to become ready\n");
return -EBUSY;
}
@@ -37,7 +38,8 @@ static int intel_sbi_rw(struct intel_display *display, u16 reg,
cmd |= SBI_CTL_OP_WR;
intel_de_write_fw(display, SBI_CTL_STAT, cmd | SBI_STATUS_BUSY);
- if (intel_de_wait_fw(display, SBI_CTL_STAT, SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) {
+ if (intel_de_wait_fw_ms(display, SBI_CTL_STAT,
+ SBI_STATUS_MASK, SBI_STATUS_READY, 100, &cmd)) {
drm_err(display->drm, "timeout waiting for SBI to complete read\n");
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 87aff2754f69..6c032d81e7ee 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -47,11 +47,11 @@
#include "intel_display_driver.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
-#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_gmbus.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
#include "intel_panel.h"
#include "intel_sdvo.h"
#include "intel_sdvo_regs.h"
@@ -1367,7 +1367,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(display)) {
pipe_config->has_pch_encoder = true;
- if (!intel_fdi_compute_pipe_bpp(pipe_config))
+ if (!intel_link_bw_compute_pipe_bpp(pipe_config))
return -EINVAL;
}
@@ -2052,8 +2052,10 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
- intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
- &intel_sdvo->hotplug_active, 2);
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
+ &intel_sdvo->hotplug_active, 2))
+ drm_warn(intel_sdvo->base.base.dev,
+ "Failed to enable hotplug on SDVO encoder\n");
}
static enum intel_hotplug_state
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index 7fe6b4a18213..a201edceee10 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -332,6 +332,8 @@ void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u6
c10_curve_1, c10_curve_2, prescaler_divider,
&pll_params);
+ pll_state->clock = pixel_clock;
+
pll_state->tx = 0x10;
pll_state->cmn = 0x1;
pll_state->pll[0] = REG_FIELD_PREP(C10_PLL0_DIV5CLK_EN, pll_params.mpll_div5_en) |
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b2dd69a11124..295030742294 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -7,12 +7,12 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_snps_hdmi_pll.h"
#include "intel_snps_phy.h"
#include "intel_snps_phy_regs.h"
@@ -42,8 +42,8 @@ void intel_snps_phy_wait_for_calibration(struct intel_display *display)
* which phy was affected and skip setup of the corresponding
* output later.
*/
- if (intel_de_wait_for_clear(display, DG2_PHY_MISC(phy),
- DG2_PHY_DP_TX_ACK_MASK, 25))
+ if (intel_de_wait_for_clear_ms(display, DG2_PHY_MISC(phy),
+ DG2_PHY_DP_TX_ACK_MASK, 25))
display->snps.phy_failed_calibration |= BIT(phy);
}
}
@@ -1863,7 +1863,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
* is locked at new settings. This register bit is sampling PHY
* dp_mpllb_state interface signal.
*/
- if (intel_de_wait_for_set(display, enable_reg, PLL_LOCK, 5))
+ if (intel_de_wait_for_set_ms(display, enable_reg, PLL_LOCK, 5))
drm_dbg_kms(display->drm, "Port %c PLL not locked\n", phy_name(phy));
/*
@@ -1903,7 +1903,7 @@ void intel_mpllb_disable(struct intel_encoder *encoder)
* 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment
* (dp_txX_ack) that the new transmitter setting request is completed.
*/
- if (intel_de_wait_for_clear(display, enable_reg, PLL_LOCK, 5))
+ if (intel_de_wait_for_clear_ms(display, enable_reg, PLL_LOCK, 5))
drm_err(display->drm, "Port %c PLL not locked\n", phy_name(phy));
/*
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index e6844df837af..69b6873a6044 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -39,10 +39,10 @@
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
-#include "i915_utils.h"
#include "i9xx_plane.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_frontbuffer.h"
#include "intel_plane.h"
@@ -264,8 +264,7 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return sprctl;
}
-static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 vlv_sprite_ctl(const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
@@ -395,15 +394,12 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
enum pipe pipe = plane->pipe;
enum plane_id plane_id = plane->id;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 sprctl, linear_offset;
+ u32 sprctl;
sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (display->platform.cherryview && pipe == PIPE_B)
chv_sprite_update_csc(plane_state);
@@ -418,7 +414,8 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, SPCONSTALPHA(pipe, plane_id), 0);
- intel_de_write_fw(display, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(display, SPLINOFF(pipe, plane_id),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, SPTILEOFF(pipe, plane_id),
SP_OFFSET_Y(y) | SP_OFFSET_X(x));
@@ -428,8 +425,7 @@ vlv_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, SPCNTR(pipe, plane_id), sprctl);
- intel_de_write_fw(display, SPSURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(display, SPSURF(pipe, plane_id), plane_state->surf);
vlv_sprite_update_clrc(plane_state);
vlv_sprite_update_gamma(plane_state);
@@ -663,8 +659,7 @@ static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state)
(display->platform.ivybridge || display->platform.haswell);
}
-static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ivb_sprite_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -830,15 +825,12 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 sprsurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 sprctl, linear_offset;
+ u32 sprctl;
sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (key->flags) {
intel_de_write_fw(display, SPRKEYVAL(pipe), key->min_value);
intel_de_write_fw(display, SPRKEYMSK(pipe),
@@ -852,7 +844,8 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, SPROFFSET(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
} else {
- intel_de_write_fw(display, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, SPRLINOFF(pipe),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, SPRTILEOFF(pipe),
SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x));
}
@@ -863,8 +856,7 @@ ivb_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, SPRCTL(pipe), sprctl);
- intel_de_write_fw(display, SPRSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(display, SPRSURF(pipe), plane_state->surf);
ivb_sprite_update_gamma(plane_state);
}
@@ -966,10 +958,9 @@ static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state,
static unsigned int
g4x_sprite_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 4k pixels to guarantee TILEOFF.x doesn't get too big. */
@@ -981,10 +972,9 @@ g4x_sprite_max_stride(struct intel_plane *plane,
static unsigned int
hsw_sprite_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
/* Limit to 8k pixels to guarantee OFFSET.x doesn't get too big. */
@@ -1016,8 +1006,7 @@ static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
return dvscntr;
}
-static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 g4x_sprite_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1181,15 +1170,12 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum pipe pipe = plane->pipe;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 dvssurf_offset = plane_state->view.color_plane[0].offset;
u32 x = plane_state->view.color_plane[0].x;
u32 y = plane_state->view.color_plane[0].y;
- u32 dvscntr, linear_offset;
+ u32 dvscntr;
dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state);
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
if (key->flags) {
intel_de_write_fw(display, DVSKEYVAL(pipe), key->min_value);
intel_de_write_fw(display, DVSKEYMSK(pipe),
@@ -1197,7 +1183,8 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
intel_de_write_fw(display, DVSKEYMAX(pipe), key->max_value);
}
- intel_de_write_fw(display, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(display, DVSLINOFF(pipe),
+ intel_fb_xy_to_linear(x, y, plane_state, 0));
intel_de_write_fw(display, DVSTILEOFF(pipe),
DVS_OFFSET_Y(y) | DVS_OFFSET_X(x));
@@ -1207,8 +1194,7 @@ g4x_sprite_update_arm(struct intel_dsb *dsb,
* the control register just before the surface register.
*/
intel_de_write_fw(display, DVSCNTR(pipe), dvscntr);
- intel_de_write_fw(display, DVSSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ intel_de_write_fw(display, DVSSURF(pipe), plane_state->surf);
if (display->platform.g4x)
g4x_sprite_update_gamma(plane_state);
@@ -1387,9 +1373,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state,
return ret;
if (DISPLAY_VER(display) >= 7)
- plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = ivb_sprite_ctl(plane_state);
else
- plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = g4x_sprite_ctl(plane_state);
return 0;
}
@@ -1439,7 +1425,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state);
+ plane_state->ctl = vlv_sprite_ctl(plane_state);
return 0;
}
@@ -1624,6 +1610,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = vlv_sprite_capture_error;
plane->get_hw_state = vlv_sprite_get_hw_state;
plane->check_plane = vlv_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
plane->max_stride = i965_plane_max_stride;
plane->min_alignment = vlv_plane_min_alignment;
plane->min_cdclk = vlv_plane_min_cdclk;
@@ -1648,6 +1635,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = ivb_sprite_capture_error;
plane->get_hw_state = ivb_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
if (display->platform.broadwell || display->platform.haswell) {
plane->max_stride = hsw_sprite_max_stride;
@@ -1673,6 +1661,7 @@ intel_sprite_plane_create(struct intel_display *display,
plane->capture_error = g4x_sprite_capture_error;
plane->get_hw_state = g4x_sprite_get_hw_state;
plane->check_plane = g4x_sprite_check;
+ plane->surf_offset = i965_plane_surf_offset;
plane->max_stride = g4x_sprite_max_stride;
plane->min_alignment = g4x_sprite_min_alignment;
plane->min_cdclk = g4x_sprite_min_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 668ef139391b..1e21fd02685d 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -3,10 +3,11 @@
* Copyright © 2019 Intel Corporation
*/
+#include <linux/iopoll.h>
+
#include <drm/drm_print.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -16,6 +17,7 @@
#include "intel_display_power_map.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
@@ -23,11 +25,6 @@
#include "intel_modeset_lock.h"
#include "intel_tc.h"
-#define DP_PIN_ASSIGNMENT_NONE 0x0
-#define DP_PIN_ASSIGNMENT_C 0x3
-#define DP_PIN_ASSIGNMENT_D 0x4
-#define DP_PIN_ASSIGNMENT_E 0x5
-
enum tc_port_mode {
TC_PORT_DISCONNECTED,
TC_PORT_TBT_ALT,
@@ -66,6 +63,7 @@ struct intel_tc_port {
enum tc_port_mode mode;
enum tc_port_mode init_mode;
enum phy_fia phy_fia;
+ enum intel_tc_pin_assignment pin_assignment;
u8 phy_fia_idx;
u8 max_lane_count;
};
@@ -253,6 +251,9 @@ tc_port_power_domain(struct intel_tc_port *tc)
{
enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
+ if (tc_port == TC_PORT_NONE)
+ return POWER_DOMAIN_INVALID;
+
return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1;
}
@@ -265,13 +266,14 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc)
!intel_display_power_is_enabled(display, tc_port_power_domain(tc)));
}
-static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
+static u32 get_lane_mask(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- struct intel_tc_port *tc = to_tc_port(dig_port);
+ struct intel_display *display = to_intel_display(tc->dig_port);
+ intel_wakeref_t wakeref;
u32 lane_mask;
- lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
+ with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
+ lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia));
drm_WARN_ON(display->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(tc);
@@ -280,77 +282,87 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
}
-u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
+static char pin_assignment_name(enum intel_tc_pin_assignment pin_assignment)
{
- struct intel_display *display = to_intel_display(dig_port);
- struct intel_tc_port *tc = to_tc_port(dig_port);
- u32 pin_mask;
-
- pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia));
-
- drm_WARN_ON(display->drm, pin_mask == 0xffffffff);
- assert_tc_cold_blocked(tc);
+ if (pin_assignment == INTEL_TC_PIN_ASSIGNMENT_NONE)
+ return '-';
- return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >>
- DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx);
+ return 'A' + pin_assignment - INTEL_TC_PIN_ASSIGNMENT_A;
}
-static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static enum intel_tc_pin_assignment
+get_pin_assignment(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base);
+ struct intel_display *display = to_intel_display(tc->dig_port);
+ enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base);
+ enum intel_tc_pin_assignment pin_assignment;
intel_wakeref_t wakeref;
- u32 val, pin_assignment;
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+
+ if (tc->mode == TC_PORT_TBT_ALT)
+ return INTEL_TC_PIN_ASSIGNMENT_NONE;
+
+ if (DISPLAY_VER(display) >= 20) {
+ reg = TCSS_DDI_STATUS(tc_port);
+ mask = TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK;
+ } else {
+ reg = PORT_TX_DFLEXPA1(tc->phy_fia);
+ mask = DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx);
+ }
with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- val = intel_de_read(display, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(display, reg);
+
+ drm_WARN_ON(display->drm, val == 0xffffffff);
+ assert_tc_cold_blocked(tc);
- pin_assignment =
- REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val);
+ pin_assignment = (val & mask) >> (ffs(mask) - 1);
switch (pin_assignment) {
- case DP_PIN_ASSIGNMENT_NONE:
- return 0;
+ case INTEL_TC_PIN_ASSIGNMENT_A:
+ case INTEL_TC_PIN_ASSIGNMENT_B:
+ case INTEL_TC_PIN_ASSIGNMENT_F:
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) > 11);
+ break;
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
+ break;
default:
MISSING_CASE(pin_assignment);
- fallthrough;
- case DP_PIN_ASSIGNMENT_D:
- return 2;
- case DP_PIN_ASSIGNMENT_C:
- case DP_PIN_ASSIGNMENT_E:
- return 4;
}
+
+ return pin_assignment;
}
-static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static int mtl_get_max_lane_count(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- intel_wakeref_t wakeref;
- u32 pin_mask;
+ enum intel_tc_pin_assignment pin_assignment;
- with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port);
+ pin_assignment = get_pin_assignment(tc);
- switch (pin_mask) {
+ switch (pin_assignment) {
+ case INTEL_TC_PIN_ASSIGNMENT_NONE:
+ return 0;
default:
- MISSING_CASE(pin_mask);
+ MISSING_CASE(pin_assignment);
fallthrough;
- case DP_PIN_ASSIGNMENT_D:
+ case INTEL_TC_PIN_ASSIGNMENT_D:
return 2;
- case DP_PIN_ASSIGNMENT_C:
- case DP_PIN_ASSIGNMENT_E:
+ case INTEL_TC_PIN_ASSIGNMENT_C:
+ case INTEL_TC_PIN_ASSIGNMENT_E:
return 4;
}
}
-static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
+static int icl_get_max_lane_count(struct intel_tc_port *tc)
{
- struct intel_display *display = to_intel_display(dig_port);
- intel_wakeref_t wakeref;
u32 lane_mask = 0;
- with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref)
- lane_mask = intel_tc_port_get_lane_mask(dig_port);
+ lane_mask = get_lane_mask(tc);
switch (lane_mask) {
default:
@@ -372,41 +384,43 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port)
static int get_max_lane_count(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
- struct intel_digital_port *dig_port = tc->dig_port;
if (tc->mode != TC_PORT_DP_ALT)
return 4;
- assert_tc_cold_blocked(tc);
-
- if (DISPLAY_VER(display) >= 20)
- return lnl_tc_port_get_max_lane_count(dig_port);
-
if (DISPLAY_VER(display) >= 14)
- return mtl_tc_port_get_max_lane_count(dig_port);
+ return mtl_get_max_lane_count(tc);
- return intel_tc_port_get_max_lane_count(dig_port);
+ return icl_get_max_lane_count(tc);
}
static void read_pin_configuration(struct intel_tc_port *tc)
{
+ tc->pin_assignment = get_pin_assignment(tc);
tc->max_lane_count = get_max_lane_count(tc);
}
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port)
{
- struct intel_display *display = to_intel_display(dig_port);
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!intel_encoder_is_tc(&dig_port->base))
return 4;
- if (DISPLAY_VER(display) < 20)
- return get_max_lane_count(tc);
-
return tc->max_lane_count;
}
+enum intel_tc_pin_assignment
+intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port)
+{
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ if (!intel_encoder_is_tc(&dig_port->base))
+ return INTEL_TC_PIN_ASSIGNMENT_NONE;
+
+ return tc->pin_assignment;
+}
+
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes)
{
@@ -1038,8 +1052,13 @@ static bool
xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
{
struct intel_display *display = to_intel_display(tc->dig_port);
+ bool is_enabled;
+ int ret;
- if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) {
+ ret = poll_timeout_us(is_enabled = xelpdp_tc_phy_tcss_power_is_enabled(tc),
+ is_enabled == enabled,
+ 200, 5000, false);
+ if (ret) {
drm_dbg_kms(display->drm,
"Port %s: timeout waiting for TCSS power to get %s\n",
str_enabled_disabled(enabled),
@@ -1057,8 +1076,8 @@ xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled)
static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enable)
{
/* check if mailbox is running busy */
- if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
- TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
drm_dbg_kms(display->drm,
"Timeout waiting for TCSS mailbox run/busy bit to clear\n");
return;
@@ -1070,8 +1089,8 @@ static void xelpdp_tc_power_request_wa(struct intel_display *display, bool enabl
TCSS_DISP_MAILBOX_IN_CMD_DATA(0x1));
/* wait to clear mailbox running busy bit before continuing */
- if (intel_de_wait_for_clear(display, TCSS_DISP_MAILBOX_IN_CMD,
- TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
+ if (intel_de_wait_for_clear_ms(display, TCSS_DISP_MAILBOX_IN_CMD,
+ TCSS_DISP_MAILBOX_IN_CMD_RUN_BUSY, 10)) {
drm_dbg_kms(display->drm,
"Timeout after writing data to mailbox. Mailbox run/busy bit did not clear\n");
return;
@@ -1320,8 +1339,13 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc,
static bool tc_phy_wait_for_ready(struct intel_tc_port *tc)
{
struct intel_display *display = to_intel_display(tc->dig_port);
+ bool is_ready;
+ int ret;
- if (wait_for(tc_phy_is_ready(tc), 500)) {
+ ret = poll_timeout_us(is_ready = tc_phy_is_ready(tc),
+ is_ready,
+ 1000, 500 * 1000, false);
+ if (ret) {
drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n",
tc->port_name);
@@ -1509,10 +1533,13 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc,
if (!force_disconnect)
tc_phy_connect(tc, required_lanes);
- drm_dbg_kms(display->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+ drm_dbg_kms(display->drm,
+ "Port %s: TC port mode reset (%s -> %s) pin assignment: %c max lanes: %d\n",
tc->port_name,
tc_port_mode_name(old_tc_mode),
- tc_port_mode_name(tc->mode));
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
}
static bool intel_tc_port_needs_reset(struct intel_tc_port *tc)
@@ -1667,13 +1694,28 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port,
__intel_tc_port_put_link(tc);
}
- drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n",
+ drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s) pin assignment: %c max lanes: %d\n",
tc->port_name,
- tc_port_mode_name(tc->mode));
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
mutex_unlock(&tc->lock);
}
+void intel_tc_info(struct drm_printer *p, struct intel_digital_port *dig_port)
+{
+ struct intel_tc_port *tc = to_tc_port(dig_port);
+
+ intel_tc_port_lock(dig_port);
+ drm_printf(p, "\tTC Port %s: mode: %s, pin assignment: %c, max lanes: %d\n",
+ tc->port_name,
+ tc_port_mode_name(tc->mode),
+ pin_assignment_name(tc->pin_assignment),
+ tc->max_lane_count);
+ intel_tc_port_unlock(dig_port);
+}
+
/*
* The type-C ports are different because even when they are connected, they may
* not be available/usable by the graphics driver: see the comment on
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 26c4265368c1..6719aea5bd58 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -8,10 +8,80 @@
#include <linux/types.h>
+struct drm_printer;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_encoder;
+/*
+ * The following enum values must stay fixed, as they match the corresponding
+ * pin assignment fields in the PORT_TX_DFLEXPA1 and TCSS_DDI_STATUS registers.
+ */
+enum intel_tc_pin_assignment { /* Lanes (a) Signal/ Cable Notes */
+ /* DP USB Rate (b) type */
+ INTEL_TC_PIN_ASSIGNMENT_NONE = 0, /* 4 - - - (c) */
+ INTEL_TC_PIN_ASSIGNMENT_A, /* 2/4 0 GEN2 TC->TC (d,e) */
+ INTEL_TC_PIN_ASSIGNMENT_B, /* 1/2 1 GEN2 TC->TC (d,f,g) */
+ INTEL_TC_PIN_ASSIGNMENT_C, /* 4 0 DP2 TC->TC (h) */
+ INTEL_TC_PIN_ASSIGNMENT_D, /* 2 1 DP2 TC->TC (h,g) */
+ INTEL_TC_PIN_ASSIGNMENT_E, /* 4 0 DP2 TC->DP */
+ INTEL_TC_PIN_ASSIGNMENT_F, /* 2 1 GEN1/DP1 TC->DP (d,g,i) */
+ /*
+ * (a) - DP unidirectional lanes, each lane using 1 differential signal
+ * pair.
+ * - USB SuperSpeed bidirectional lane, using 2 differential (TX and
+ * RX) signal pairs.
+ * - USB 2.0 (HighSpeed) unidirectional lane, using 1 differential
+ * signal pair. Not indicated, this lane is always present on pin
+ * assignments A-D and never present on pin assignments E/F.
+ * (b) - GEN1: USB 3.1 GEN1 bit rate (5 Gbps) and signaling. This
+ * is used for transferring only a USB stream.
+ * - GEN2: USB 3.1 GEN2 bit rate (10 Gbps) and signaling. This
+ * allows transferring an HBR3 (8.1 Gbps) DP stream.
+ * - DP1: Display Port signaling defined by the DP v1.3 Standard,
+ * with a maximum bit rate of HBR3.
+ * - DP2: Display Port signaling defined by the DP v2.1 Standard,
+ * with a maximum bit rate defined by the DP Alt Mode
+ * v2.1a Standard depending on the cable type as follows:
+ * - Passive (Full-Featured) USB 3.2 GEN1
+ * TC->TC cables (CC3G1-X) : UHBR10
+ * - Passive (Full-Featured) USB 3.2/4 GEN2 and
+ * Thunderbolt Alt Mode GEN2
+ * TC->TC cables (CC3G2-X) all : UHBR10
+ * DP54 logo : UHBR13.5
+ * - Passive (Full-Featured) USB4 GEN3+ and
+ * Thunderbolt Alt Mode GEN3+
+ * TC->TC cables (CC4G3-X) all : UHBR13.5
+ * DP80 logo : UHBR20
+ * - Active Re-Timed or
+ * Active Linear Re-driven (LRD)
+ * USB3.2 GEN1/2 and USB4 GEN2+
+ * TC->TC cables all : HBR3
+ * with DP_BR CTS : UHBR10
+ * DP54 logo : UHBR13.5
+ * DP80 logo : UHBR20
+ * - Passive/Active Re-Timed or
+ * Active Linear Re-driven (LRD)
+ * TC->DP cables with DP_BR CTS/DP8K logo : HBR3
+ * with DP_BR CTS : UHBR10
+ * DP54 logo : UHBR13.5
+ * DP80 logo : UHBR20
+ * (c) Used in TBT-alt/legacy modes and on LNL+ after the sink
+ * disconnected in DP-alt mode.
+ * (d) Only defined by the DP Alt Standard v1.0a, deprecated by v1.0b,
+ * only supported on ICL.
+ * (e) GEN2 passive 1 m cable: 4 DP lanes, GEN2 active cable: 2 DP lanes.
+ * (f) GEN2 passive 1 m cable: 2 DP lanes, GEN2 active cable: 1 DP lane.
+ * (g) These pin assignments are also referred to as (USB/DP)
+ * multifunction or Multifunction Display Port (MFD) modes.
+ * (h) Also used where one end of the cable is a captive connector,
+ * attached to a DP->HDMI/DVI/VGA converter.
+ * (i) The DP end of the cable is a captive connector attached to a
+ * (DP/USB) multifunction dock as defined by the DockPort v1.0a
+ * specification.
+ */
+};
+
bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
@@ -19,7 +89,8 @@ bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port);
bool intel_tc_port_connected(struct intel_encoder *encoder);
-u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
+enum intel_tc_pin_assignment
+intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port);
int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port);
void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
int required_lanes);
@@ -43,4 +114,6 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port);
bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port);
+void intel_tc_info(struct drm_printer *p, struct intel_digital_port *dig_port);
+
#endif /* __INTEL_TC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 70ba7aa26bf4..671f357c6563 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -3,14 +3,19 @@
* Copyright © 2022-2023 Intel Corporation
*/
+#include <linux/iopoll.h>
+
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "i915_drv.h"
#include "intel_color.h"
#include "intel_crtc.h"
#include "intel_de.h"
+#include "intel_display_jiffies.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
@@ -492,9 +497,14 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
{
struct intel_display *display = to_intel_display(crtc);
enum pipe pipe = crtc->pipe;
+ bool is_moving;
+ int ret;
/* Wait for the display line to settle/start moving */
- if (wait_for(pipe_scanline_is_moving(display, pipe) == state, 100))
+ ret = poll_timeout_us(is_moving = pipe_scanline_is_moving(display, pipe),
+ is_moving == state,
+ 500, 100 * 1000, false);
+ if (ret)
drm_err(display->drm,
"pipe %c scanline %s wait timed out\n",
pipe_name(pipe), str_on_off(state));
@@ -673,7 +683,7 @@ void intel_vblank_evade_init(const struct intel_crtc_state *old_crtc_state,
else
evade->vblank_start = intel_vrr_vmax_vblank_start(crtc_state);
- vblank_delay = intel_vrr_vblank_delay(crtc_state);
+ vblank_delay = crtc_state->set_context_latency;
} else {
evade->vblank_start = intel_mode_vblank_start(adjusted_mode);
@@ -724,9 +734,9 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
break;
if (!timeout) {
- drm_err(display->drm,
- "Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(display->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
break;
}
@@ -759,3 +769,13 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade)
return scanline;
}
+
+int intel_crtc_vblank_length(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+
+ if (crtc_state->vrr.enable)
+ return crtc_state->vrr.guardband;
+ else
+ return adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
index 21fbb08d61d5..98d04cacd65f 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.h
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -48,4 +48,6 @@ const struct intel_crtc_state *
intel_pre_commit_crtc_state(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_crtc_vblank_length(const struct intel_crtc_state *crtc_state);
+
#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 92c04811aa28..70e31520c560 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -37,7 +37,7 @@
#ifndef _INTEL_VBT_DEFS_H_
#define _INTEL_VBT_DEFS_H_
-#include "intel_bios.h"
+#include "intel_dsi_vbt_defs.h"
/* EDID derived structures */
struct bdb_edid_pnp_id {
@@ -437,6 +437,22 @@ enum vbt_gmbus_ddi {
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6
#define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7
+/* EDP link rate 263+ */
+#define BDB_263_VBT_EDP_LINK_RATE_1_62 BIT_U32(0)
+#define BDB_263_VBT_EDP_LINK_RATE_2_16 BIT_U32(1)
+#define BDB_263_VBT_EDP_LINK_RATE_2_43 BIT_U32(2)
+#define BDB_263_VBT_EDP_LINK_RATE_2_7 BIT_U32(3)
+#define BDB_263_VBT_EDP_LINK_RATE_3_24 BIT_U32(4)
+#define BDB_263_VBT_EDP_LINK_RATE_4_32 BIT_U32(5)
+#define BDB_263_VBT_EDP_LINK_RATE_5_4 BIT_U32(6)
+#define BDB_263_VBT_EDP_LINK_RATE_6_75 BIT_U32(7)
+#define BDB_263_VBT_EDP_LINK_RATE_8_1 BIT_U32(8)
+#define BDB_263_VBT_EDP_LINK_RATE_10 BIT_U32(9)
+#define BDB_263_VBT_EDP_LINK_RATE_13_5 BIT_U32(10)
+#define BDB_263_VBT_EDP_LINK_RATE_20 BIT_U32(11)
+#define BDB_263_VBT_EDP_NUM_RATES 12
+#define BDB_263_VBT_EDP_RATES_MASK GENMASK(BDB_263_VBT_EDP_NUM_RATES - 1, 0)
+
/*
* The child device config, aka the display device data structure, provides a
* description of a port and its configuration on the platform.
@@ -547,6 +563,8 @@ struct child_device_config {
u8 dp_max_link_rate:3; /* 216+ */
u8 dp_max_link_rate_reserved:5; /* 216+ */
u8 efp_index; /* 256+ */
+ u32 edp_data_rate_override:12; /* 263+ */
+ u32 edp_data_rate_override_reserved:20; /* 263+ */
} __packed;
struct bdb_general_definitions {
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 8e799e225af1..0e727fc5e80c 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -11,10 +11,10 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dp.h"
#include "intel_dsi.h"
#include "intel_qp_tables.h"
@@ -372,6 +372,22 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
return 0;
}
+void intel_dsc_enable_on_crtc(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->dsc.compression_enabled_on_link = true;
+ crtc_state->dsc.compression_enable = true;
+}
+
+bool intel_dsc_enabled_on_link(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ drm_WARN_ON(display->drm, crtc_state->dsc.compression_enable &&
+ !crtc_state->dsc.compression_enabled_on_link);
+
+ return crtc_state->dsc.compression_enabled_on_link;
+}
+
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
@@ -1077,3 +1093,11 @@ int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
return min_cdclk;
}
+
+unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsc.compression_enable)
+ return 0;
+
+ return 0x18000; /* 1.5 */
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 9e2812f99dd7..99f64ac54b27 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -20,6 +20,8 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
int intel_dsc_compute_params(struct intel_crtc_state *pipe_config);
+void intel_dsc_enable_on_crtc(struct intel_crtc_state *crtc_state);
+bool intel_dsc_enabled_on_link(const struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
@@ -32,5 +34,6 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
+unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 3eed37f271b0..b92c42fde937 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -10,8 +10,11 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
+#include "intel_psr.h"
#include "intel_vrr.h"
#include "intel_vrr_regs.h"
+#include "skl_prefill.h"
+#include "skl_watermark.h"
#define FIXED_POINT_PRECISION 100
#define CMRR_PRECISION_TOLERANCE 10
@@ -22,6 +25,9 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
const struct drm_display_info *info = &connector->base.display_info;
struct intel_dp *intel_dp;
+ if (!HAS_VRR(display))
+ return false;
+
/*
* DP Sink is capable of VRR video timings if
* Ignore MSA bit is set in DPCD.
@@ -46,8 +52,7 @@ bool intel_vrr_is_capable(struct intel_connector *connector)
return false;
}
- return HAS_VRR(display) &&
- info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
+ return info->monitor_range.max_vfreq - info->monitor_range.min_vfreq > 10;
}
bool intel_vrr_is_in_range(struct intel_connector *connector, int vrefresh)
@@ -79,44 +84,42 @@ intel_vrr_check_modeset(struct intel_atomic_state *state)
}
}
-static int intel_vrr_real_vblank_delay(const struct intel_crtc_state *crtc_state)
-{
- return crtc_state->hw.adjusted_mode.crtc_vblank_start -
- crtc_state->hw.adjusted_mode.crtc_vdisplay;
-}
-
static int intel_vrr_extra_vblank_delay(struct intel_display *display)
{
/*
* On ICL/TGL VRR hardware inserts one extra scanline
* just after vactive, which pushes the vmin decision
- * boundary ahead accordingly. We'll include the extra
- * scanline in our vblank delay estimates to make sure
- * that we never underestimate how long we have until
- * the delayed vblank has passed.
+ * boundary ahead accordingly, and thus reduces the
+ * max guardband length by one scanline.
*/
return DISPLAY_VER(display) < 13 ? 1 : 0;
}
-int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_vmin_flipline_offset(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return intel_vrr_real_vblank_delay(crtc_state) +
- intel_vrr_extra_vblank_delay(display);
+ /*
+ * ICL/TGL hardware imposes flipline>=vmin+1
+ *
+ * We reduce the vmin value to compensate when programming the
+ * hardware. This approach allows flipline to remain set at the
+ * original value, and thus the frame will have the desired
+ * minimum vtotal.
+ */
+ return DISPLAY_VER(display) < 13 ? 1 : 0;
}
-static int intel_vrr_flipline_offset(struct intel_display *display)
+static int intel_vrr_guardband_to_pipeline_full(const struct intel_crtc_state *crtc_state,
+ int guardband)
{
- /* ICL/TGL hardware imposes flipline>=vmin+1 */
- return DISPLAY_VER(display) < 13 ? 1 : 0;
+ /* hardware imposes one extra scanline somewhere */
+ return guardband - crtc_state->framestart_delay - 1;
}
-static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_pipeline_full_to_guardband(const struct intel_crtc_state *crtc_state,
+ int pipeline_full)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- return crtc_state->vrr.vmin + intel_vrr_flipline_offset(display);
+ /* hardware imposes one extra scanline somewhere */
+ return pipeline_full + crtc_state->framestart_delay + 1;
}
/*
@@ -135,48 +138,26 @@ static int intel_vrr_vmin_flipline(const struct intel_crtc_state *crtc_state)
*
* framestart_delay is programmable 1-4.
*/
-static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state)
-{
- struct intel_display *display = to_intel_display(crtc_state);
-
- if (DISPLAY_VER(display) >= 13)
- return crtc_state->vrr.guardband;
- else
- /* hardware imposes one extra scanline somewhere */
- return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
-}
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
/* Min vblank actually determined by flipline */
- if (DISPLAY_VER(display) >= 13)
- return intel_vrr_vmin_flipline(crtc_state);
- else
- return intel_vrr_vmin_flipline(crtc_state) +
- intel_vrr_real_vblank_delay(crtc_state);
+ return crtc_state->vrr.vmin;
}
int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state)
{
- struct intel_display *display = to_intel_display(crtc_state);
-
- if (DISPLAY_VER(display) >= 13)
- return crtc_state->vrr.vmax;
- else
- return crtc_state->vrr.vmax +
- intel_vrr_real_vblank_delay(crtc_state);
+ return crtc_state->vrr.vmax;
}
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_vmin_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmin_vtotal(crtc_state) - crtc_state->vrr.guardband;
}
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_vmax_vtotal(crtc_state) - intel_vrr_vblank_exit_length(crtc_state);
+ return intel_vrr_vmax_vtotal(crtc_state) - crtc_state->vrr.guardband;
}
static bool
@@ -230,7 +211,6 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required)
static
void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
{
- crtc_state->cmrr.enable = true;
/*
* TODO: Compute precise target refresh rate to determine
* if video_mode_required should be true. Currently set to
@@ -240,52 +220,76 @@ void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false);
crtc_state->vrr.vmin = crtc_state->vrr.vmax;
crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+
+ crtc_state->cmrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
static
-void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state)
+void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state,
+ int vmin, int vmax)
{
+ crtc_state->vrr.vmax = vmax;
+ crtc_state->vrr.vmin = vmin;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+
crtc_state->vrr.enable = true;
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
-/*
- * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
- * Vtotal value.
- */
static
-int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state)
+void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
+{
+ /* For fixed rr, vmin = vmax = flipline */
+ crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ crtc_state->vrr.vmin = crtc_state->vrr.vmax;
+ crtc_state->vrr.flipline = crtc_state->vrr.vmin;
+}
+
+static int intel_vrr_hw_value(const struct intel_crtc_state *crtc_state,
+ int value)
{
struct intel_display *display = to_intel_display(crtc_state);
- int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ /*
+ * On TGL vmin/vmax/flipline also need to be
+ * adjusted by the SCL to maintain correct vtotals.
+ */
if (DISPLAY_VER(display) >= 13)
- return crtc_vtotal;
+ return value;
else
- return crtc_vtotal -
- intel_vrr_real_vblank_delay(crtc_state);
+ return value - crtc_state->set_context_latency;
+}
+
+/*
+ * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to
+ * Vtotal value.
+ */
+static
+int intel_vrr_fixed_rr_hw_vtotal(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->hw.adjusted_mode.crtc_vtotal);
}
static
-int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_vmax(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_fixed_rr_vtotal(crtc_state);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state);
}
static
-int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_vmin(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- return intel_vrr_fixed_rr_vtotal(crtc_state) -
- intel_vrr_flipline_offset(display);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state) -
+ intel_vrr_vmin_flipline_offset(display);
}
static
-int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state)
+int intel_vrr_fixed_rr_hw_flipline(const struct intel_crtc_state *crtc_state)
{
- return intel_vrr_fixed_rr_vtotal(crtc_state);
+ return intel_vrr_fixed_rr_hw_vtotal(crtc_state);
}
void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
@@ -297,22 +301,11 @@ void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state)
return;
intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- intel_vrr_fixed_rr_vmin(crtc_state) - 1);
+ intel_vrr_fixed_rr_hw_vmin(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- intel_vrr_fixed_rr_vmax(crtc_state) - 1);
+ intel_vrr_fixed_rr_hw_vmax(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- intel_vrr_fixed_rr_flipline(crtc_state) - 1);
-}
-
-static
-void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state)
-{
- /*
- * For fixed rr, vmin = vmax = flipline.
- * vmin is already set to crtc_vtotal set vmax and flipline the same.
- */
- crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal;
- crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal;
+ intel_vrr_fixed_rr_hw_flipline(crtc_state) - 1);
}
static
@@ -384,60 +377,131 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
vmax = vmin;
}
- crtc_state->vrr.vmin = vmin;
- crtc_state->vrr.vmax = vmax;
-
- crtc_state->vrr.flipline = crtc_state->vrr.vmin;
-
if (crtc_state->uapi.vrr_enabled && vmin < vmax)
- intel_vrr_compute_vrr_timings(crtc_state);
+ intel_vrr_compute_vrr_timings(crtc_state, vmin, vmax);
else if (is_cmrr_frac_required(crtc_state) && is_edp)
intel_vrr_compute_cmrr_timings(crtc_state);
else
intel_vrr_compute_fixed_rr_timings(crtc_state);
- /*
- * flipline determines the min vblank length the hardware will
- * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce
- * vmin by one to make sure we can get the actual min vblank length.
- */
- crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display);
-
if (HAS_AS_SDP(display)) {
crtc_state->vrr.vsync_start =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
- crtc_state->hw.adjusted_mode.vsync_start);
+ crtc_state->hw.adjusted_mode.crtc_vsync_start);
crtc_state->vrr.vsync_end =
(crtc_state->hw.adjusted_mode.crtc_vtotal -
- crtc_state->hw.adjusted_mode.vsync_end);
+ crtc_state->hw.adjusted_mode.crtc_vsync_end);
}
}
-void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state)
+static int
+intel_vrr_max_hw_guardband(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ int max_pipeline_full = REG_FIELD_MAX(VRR_CTL_PIPELINE_FULL_MASK);
+
+ if (DISPLAY_VER(display) >= 13)
+ return REG_FIELD_MAX(XELPD_VRR_CTL_VRR_GUARDBAND_MASK);
+ else
+ return intel_vrr_pipeline_full_to_guardband(crtc_state,
+ max_pipeline_full);
+}
+
+static int
+intel_vrr_max_vblank_guardband(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ return crtc_state->vrr.vmin -
+ adjusted_mode->crtc_vdisplay -
+ crtc_state->set_context_latency -
+ intel_vrr_extra_vblank_delay(display);
+}
+
+static int
+intel_vrr_max_guardband(struct intel_crtc_state *crtc_state)
+{
+ return min(intel_vrr_max_hw_guardband(crtc_state),
+ intel_vrr_max_vblank_guardband(crtc_state));
+}
+
+static
+int intel_vrr_compute_optimized_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct skl_prefill_ctx prefill_ctx;
+ int prefill_latency_us;
+ int guardband = 0;
+
+ skl_prefill_init_worst(&prefill_ctx, crtc_state);
+
+ /*
+ * The SoC power controller runs SAGV mutually exclusive with package C states,
+ * so the max of package C and SAGV latencies is used to compute the min prefill guardband.
+ * PM delay = max(sagv_latency, pkgc_max_latency (highest enabled wm level 1 and up))
+ */
+ prefill_latency_us = max(display->sagv.block_time_us,
+ skl_watermark_max_latency(display, 1));
+
+ guardband = skl_prefill_min_guardband(&prefill_ctx,
+ crtc_state,
+ prefill_latency_us);
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ guardband = max(guardband, intel_psr_min_guardband(crtc_state));
+ guardband = max(guardband, intel_dp_sdp_min_guardband(crtc_state, true));
+ }
+
+ return guardband;
+}
+
+static bool intel_vrr_use_optimized_guardband(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * #TODO: Enable optimized guardband for HDMI
+ * For HDMI lot of infoframes are transmitted a line or two after vsync.
+ * Since with optimized guardband the double bufferring point is at delayed vblank,
+ * we need to ensure that vsync happens after delayed vblank for the HDMI case.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return false;
+
+ return true;
+}
+
+void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int guardband;
+
if (!intel_vrr_possible(crtc_state))
return;
- if (DISPLAY_VER(display) >= 13) {
- crtc_state->vrr.guardband =
- crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start;
- } else {
- /* hardware imposes one extra scanline somewhere */
- crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vblank_start -
- crtc_state->framestart_delay - 1);
+ if (intel_vrr_use_optimized_guardband(crtc_state))
+ guardband = intel_vrr_compute_optimized_guardband(crtc_state);
+ else
+ guardband = crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+
+ crtc_state->vrr.guardband = min(guardband, intel_vrr_max_guardband(crtc_state));
+ if (intel_vrr_always_use_vrr_tg(display)) {
+ adjusted_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vtotal - crtc_state->vrr.guardband;
/*
- * vmin/vmax/flipline also need to be adjusted by
- * the vblank delay to maintain correct vtotals.
+ * pipe_mode has already been derived from the
+ * original adjusted_mode, keep the two in sync.
*/
- crtc_state->vrr.vmin -= intel_vrr_real_vblank_delay(crtc_state);
- crtc_state->vrr.vmax -= intel_vrr_real_vblank_delay(crtc_state);
- crtc_state->vrr.flipline -= intel_vrr_real_vblank_delay(crtc_state);
+ pipe_mode->crtc_vblank_start =
+ adjusted_mode->crtc_vblank_start;
}
+
+ if (DISPLAY_VER(display) < 13)
+ crtc_state->vrr.pipeline_full =
+ intel_vrr_guardband_to_pipeline_full(crtc_state,
+ crtc_state->vrr.guardband);
}
static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
@@ -461,6 +525,9 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ if (!HAS_VRR(display))
+ return;
+
/*
* This bit seems to have two meanings depending on the platform:
* TGL: generate VRR "safe window" for DSB vblank waits
@@ -489,7 +556,7 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
intel_vrr_set_fixed_rr_timings(crtc_state);
- if (!intel_vrr_always_use_vrr_tg(display) && !crtc_state->vrr.enable)
+ if (!intel_vrr_always_use_vrr_tg(display))
intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
trans_vrr_ctl(crtc_state));
@@ -498,6 +565,18 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
TRANS_VRR_VSYNC(display, cpu_transcoder),
VRR_VSYNC_END(crtc_state->vrr.vsync_end) |
VRR_VSYNC_START(crtc_state->vrr.vsync_start));
+
+ /*
+ * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
+ * double buffering point and transmission line for VRR packets for
+ * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
+ * Since currently we support VRR only for DP/eDP, so this is programmed
+ * to for Adaptive Sync SDP to Vsync start.
+ */
+ if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
+ intel_de_write(display,
+ EMP_AS_SDP_TL(display, cpu_transcoder),
+ EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
}
void intel_vrr_send_push(struct intel_dsb *dsb,
@@ -576,126 +655,128 @@ bool intel_vrr_always_use_vrr_tg(struct intel_display *display)
return false;
}
-static
-void intel_vrr_set_db_point_and_transmission_line(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_hw_vmin(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- /*
- * For BMG and LNL+ onwards the EMP_AS_SDP_TL is used for programming
- * double buffering point and transmission line for VRR packets for
- * HDMI2.1/DP/eDP/DP->HDMI2.1 PCON.
- * Since currently we support VRR only for DP/eDP, so this is programmed
- * to for Adaptive Sync SDP to Vsync start.
- */
- if (DISPLAY_VERx100(display) == 1401 || DISPLAY_VER(display) >= 20)
- intel_de_write(display,
- EMP_AS_SDP_TL(display, cpu_transcoder),
- EMP_AS_SDP_DB_TL(crtc_state->vrr.vsync_start));
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmin) -
+ intel_vrr_vmin_flipline_offset(display);
}
-void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
+static int intel_vrr_hw_vmax(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.vmax);
+}
+
+static int intel_vrr_hw_flipline(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_hw_value(crtc_state, crtc_state->vrr.flipline);
+}
+
+static void intel_vrr_set_vrr_timings(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->vrr.enable)
- return;
-
intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder),
- crtc_state->vrr.vmin - 1);
+ intel_vrr_hw_vmin(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder),
- crtc_state->vrr.vmax - 1);
+ intel_vrr_hw_vmax(crtc_state) - 1);
intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder),
- crtc_state->vrr.flipline - 1);
+ intel_vrr_hw_flipline(crtc_state) - 1);
+}
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN);
+static void intel_vrr_tg_enable(const struct intel_crtc_state *crtc_state,
+ bool cmrr_enable)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 vrr_ctl;
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_vrr_set_db_point_and_transmission_line(crtc_state);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN);
- if (crtc_state->cmrr.enable) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | VRR_CTL_CMRR_ENABLE |
- trans_vrr_ctl(crtc_state));
- } else {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
- }
- }
+ vrr_ctl = VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state);
+
+ /*
+ * FIXME this might be broken as bspec seems to imply that
+ * even VRR_CTL_CMRR_ENABLE is armed by TRANS_CMRR_N_HI
+ * when enabling CMRR (but not when disabling CMRR?).
+ */
+ if (cmrr_enable)
+ vrr_ctl |= VRR_CTL_CMRR_ENABLE;
+
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), vrr_ctl);
}
-void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+static void intel_vrr_tg_disable(const struct intel_crtc_state *old_crtc_state)
{
struct intel_display *display = to_intel_display(old_crtc_state);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- if (!old_crtc_state->vrr.enable)
- return;
+ intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(old_crtc_state));
- intel_de_wait_for_clear(display,
- TRANS_VRR_STATUS(display, cpu_transcoder),
- VRR_STATUS_VRR_EN_LIVE, 1000);
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
- }
+ if (intel_de_wait_for_clear_ms(display,
+ TRANS_VRR_STATUS(display, cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000))
+ drm_err(display->drm, "Timed out waiting for VRR live status to clear\n");
- intel_vrr_set_fixed_rr_timings(old_crtc_state);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
}
-void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
+void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!HAS_VRR(display))
+ if (!crtc_state->vrr.enable)
return;
- if (!intel_vrr_possible(crtc_state))
- return;
+ intel_vrr_set_vrr_timings(crtc_state);
- if (!intel_vrr_always_use_vrr_tg(display)) {
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- trans_vrr_ctl(crtc_state));
- return;
- }
+ if (!intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_enable(crtc_state, crtc_state->cmrr.enable);
+}
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN);
+void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
- intel_vrr_set_db_point_and_transmission_line(crtc_state);
+ if (!old_crtc_state->vrr.enable)
+ return;
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder),
- VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
+ if (!intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_disable(old_crtc_state);
+
+ intel_vrr_set_fixed_rr_timings(old_crtc_state);
}
-void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state)
+void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
- if (!HAS_VRR(display))
- return;
if (!intel_vrr_possible(crtc_state))
return;
- intel_de_write(display, TRANS_VRR_CTL(display, cpu_transcoder), 0);
+ if (intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_enable(crtc_state, false);
+}
- intel_de_wait_for_clear(display, TRANS_VRR_STATUS(display, cpu_transcoder),
- VRR_STATUS_VRR_EN_LIVE, 1000);
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+void intel_vrr_transcoder_disable(const struct intel_crtc_state *old_crtc_state)
+{
+ struct intel_display *display = to_intel_display(old_crtc_state);
+
+ if (!intel_vrr_possible(old_crtc_state))
+ return;
+
+ if (intel_vrr_always_use_vrr_tg(display))
+ intel_vrr_tg_disable(old_crtc_state);
}
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
{
return crtc_state->vrr.flipline &&
crtc_state->vrr.flipline == crtc_state->vrr.vmax &&
- crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state);
+ crtc_state->vrr.flipline == crtc_state->vrr.vmin;
}
void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
@@ -720,14 +801,20 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
TRANS_CMRR_M_HI(display, cpu_transcoder));
}
- if (DISPLAY_VER(display) >= 13)
+ if (DISPLAY_VER(display) >= 13) {
crtc_state->vrr.guardband =
REG_FIELD_GET(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, trans_vrr_ctl);
- else
- if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE)
+ } else {
+ if (trans_vrr_ctl & VRR_CTL_PIPELINE_FULL_OVERRIDE) {
crtc_state->vrr.pipeline_full =
REG_FIELD_GET(VRR_CTL_PIPELINE_FULL_MASK, trans_vrr_ctl);
+ crtc_state->vrr.guardband =
+ intel_vrr_pipeline_full_to_guardband(crtc_state,
+ crtc_state->vrr.pipeline_full);
+ }
+ }
+
if (trans_vrr_ctl & VRR_CTL_FLIP_LINE_EN) {
crtc_state->vrr.flipline = intel_de_read(display,
TRANS_VRR_FLIPLINE(display, cpu_transcoder)) + 1;
@@ -736,6 +823,15 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
crtc_state->vrr.vmin = intel_de_read(display,
TRANS_VRR_VMIN(display, cpu_transcoder)) + 1;
+ if (DISPLAY_VER(display) < 13) {
+ /* undo what intel_vrr_hw_value() does when writing the values */
+ crtc_state->vrr.flipline += crtc_state->set_context_latency;
+ crtc_state->vrr.vmax += crtc_state->set_context_latency;
+ crtc_state->vrr.vmin += crtc_state->set_context_latency;
+
+ crtc_state->vrr.vmin += intel_vrr_vmin_flipline_offset(display);
+ }
+
/*
* For platforms that always use VRR Timing Generator, the VTOTAL.Vtotal
* bits are not filled. Since for these platforms TRAN_VMIN is always
@@ -771,4 +867,34 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state)
*/
if (crtc_state->vrr.enable)
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
+
+ /*
+ * For platforms that always use the VRR timing generator, we overwrite
+ * crtc_vblank_start with vtotal - guardband to reflect the delayed
+ * vblank start. This works for both default and optimized guardband values.
+ * On other platforms, we keep the original value from
+ * intel_get_transcoder_timings() and apply adjustments only in VRR-specific
+ * paths as needed.
+ */
+ if (intel_vrr_always_use_vrr_tg(display))
+ crtc_state->hw.adjusted_mode.crtc_vblank_start =
+ crtc_state->hw.adjusted_mode.crtc_vtotal -
+ crtc_state->vrr.guardband;
+}
+
+int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (DISPLAY_VER(display) >= 30)
+ return crtc_state->hw.adjusted_mode.crtc_vdisplay -
+ crtc_state->set_context_latency;
+ else
+ return crtc_state->hw.adjusted_mode.crtc_vdisplay;
+}
+
+int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state)
+{
+ return intel_vrr_vmin_vblank_start(crtc_state) -
+ crtc_state->set_context_latency;
}
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index 38bf9996b883..bc9044621635 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -21,7 +21,7 @@ bool intel_vrr_possible(const struct intel_crtc_state *crtc_state);
void intel_vrr_check_modeset(struct intel_atomic_state *state);
void intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state);
-void intel_vrr_compute_config_late(struct intel_crtc_state *crtc_state);
+void intel_vrr_compute_guardband(struct intel_crtc_state *crtc_state);
void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
void intel_vrr_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_send_push(struct intel_dsb *dsb,
@@ -35,11 +35,12 @@ int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state);
-int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state);
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state);
void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_transcoder_disable(const struct intel_crtc_state *crtc_state);
void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state);
bool intel_vrr_always_use_vrr_tg(struct intel_display *display);
+int intel_vrr_safe_window_start(const struct intel_crtc_state *crtc_state);
+int intel_vrr_vmin_safe_window_end(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_VRR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c
index bba82e888db2..f887a664fe22 100644
--- a/drivers/gpu/drm/i915/display/intel_wm.c
+++ b/drivers/gpu/drm/i915/display/intel_wm.c
@@ -5,7 +5,6 @@
#include <linux/debugfs.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "i9xx_wm.h"
@@ -390,15 +389,15 @@ static const struct file_operations i915_cur_wm_latency_fops = {
void intel_wm_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
- debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_pri_wm_latency", 0644, debugfs_root,
display, &i915_pri_wm_latency_fops);
- debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_spr_wm_latency", 0644, debugfs_root,
display, &i915_spr_wm_latency_fops);
- debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_cur_wm_latency", 0644, debugfs_root,
display, &i915_cur_wm_latency_fops);
skl_watermark_debugfs_register(display);
diff --git a/drivers/gpu/drm/i915/display/skl_prefill.c b/drivers/gpu/drm/i915/display/skl_prefill.c
new file mode 100644
index 000000000000..4707c2e7127a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_prefill.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include <drm/drm_print.h>
+
+#include "intel_cdclk.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "intel_vblank.h"
+#include "intel_vdsc.h"
+#include "skl_prefill.h"
+#include "skl_scaler.h"
+#include "skl_watermark.h"
+
+static unsigned int prefill_usecs_to_lines(const struct intel_crtc_state *crtc_state,
+ unsigned int usecs)
+{
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pipe_mode->crtc_clock, usecs << 16),
+ pipe_mode->crtc_htotal * 1000);
+}
+
+static void prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->prefill.fixed = crtc_state->framestart_delay << 16;
+
+ /* 20 usec for translation walks/etc. */
+ ctx->prefill.fixed += prefill_usecs_to_lines(crtc_state, 20);
+
+ ctx->prefill.dsc = intel_vdsc_prefill_lines(crtc_state);
+}
+
+static void prefill_init_nocdclk_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init(ctx, crtc_state);
+
+ ctx->prefill.wm0 = skl_wm0_prefill_lines_worst(crtc_state);
+ ctx->prefill.scaler_1st = skl_scaler_1st_prefill_lines_worst(crtc_state);
+ ctx->prefill.scaler_2nd = skl_scaler_2nd_prefill_lines_worst(crtc_state);
+
+ ctx->adj.scaler_1st = skl_scaler_1st_prefill_adjustment_worst(crtc_state);
+ ctx->adj.scaler_2nd = skl_scaler_2nd_prefill_adjustment_worst(crtc_state);
+}
+
+static void prefill_init_nocdclk(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init(ctx, crtc_state);
+
+ ctx->prefill.wm0 = skl_wm0_prefill_lines(crtc_state);
+ ctx->prefill.scaler_1st = skl_scaler_1st_prefill_lines(crtc_state);
+ ctx->prefill.scaler_2nd = skl_scaler_2nd_prefill_lines(crtc_state);
+
+ ctx->adj.scaler_1st = skl_scaler_1st_prefill_adjustment(crtc_state);
+ ctx->adj.scaler_2nd = skl_scaler_2nd_prefill_adjustment(crtc_state);
+}
+
+static unsigned int prefill_adjust(unsigned int value, unsigned int factor)
+{
+ return DIV_ROUND_UP_ULL(mul_u32_u32(value, factor), 0x10000);
+}
+
+static unsigned int prefill_lines_nocdclk(const struct skl_prefill_ctx *ctx)
+{
+ unsigned int prefill = 0;
+
+ prefill += ctx->prefill.dsc;
+ prefill = prefill_adjust(prefill, ctx->adj.scaler_2nd);
+
+ prefill += ctx->prefill.scaler_2nd;
+ prefill = prefill_adjust(prefill, ctx->adj.scaler_1st);
+
+ prefill += ctx->prefill.scaler_1st;
+ prefill += ctx->prefill.wm0;
+
+ return prefill;
+}
+
+static unsigned int prefill_lines_cdclk(const struct skl_prefill_ctx *ctx)
+{
+ return prefill_adjust(prefill_lines_nocdclk(ctx), ctx->adj.cdclk);
+}
+
+static unsigned int prefill_lines_full(const struct skl_prefill_ctx *ctx)
+{
+ return ctx->prefill.fixed + prefill_lines_cdclk(ctx);
+}
+
+void skl_prefill_init_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init_nocdclk_worst(ctx, crtc_state);
+
+ ctx->adj.cdclk = intel_cdclk_prefill_adjustment_worst(crtc_state);
+
+ ctx->prefill.full = prefill_lines_full(ctx);
+}
+
+void skl_prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ prefill_init_nocdclk(ctx, crtc_state);
+
+ ctx->adj.cdclk = intel_cdclk_prefill_adjustment(crtc_state);
+
+ ctx->prefill.full = prefill_lines_full(ctx);
+}
+
+static unsigned int prefill_lines_with_latency(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ return ctx->prefill.full + prefill_usecs_to_lines(crtc_state, latency_us);
+}
+
+int skl_prefill_min_guardband(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ unsigned int prefill = prefill_lines_with_latency(ctx, crtc_state, latency_us);
+
+ return DIV_ROUND_UP(prefill, 0x10000);
+}
+
+static unsigned int prefill_guardband(const struct intel_crtc_state *crtc_state)
+{
+ return intel_crtc_vblank_length(crtc_state) << 16;
+}
+
+bool skl_prefill_vblank_too_short(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us)
+{
+ unsigned int guardband = prefill_guardband(crtc_state);
+ unsigned int prefill = prefill_lines_with_latency(ctx, crtc_state, latency_us);
+
+ return guardband < prefill;
+}
+
+int skl_prefill_min_cdclk(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state)
+{
+ unsigned int prefill_unadjusted = prefill_lines_nocdclk(ctx);
+ unsigned int prefill_available = prefill_guardband(crtc_state) - ctx->prefill.fixed;
+
+ return intel_cdclk_min_cdclk_for_prefill(crtc_state, prefill_unadjusted,
+ prefill_available);
+}
diff --git a/drivers/gpu/drm/i915/display/skl_prefill.h b/drivers/gpu/drm/i915/display/skl_prefill.h
new file mode 100644
index 000000000000..028ee19b64ce
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/skl_prefill.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __SKL_PREFILL_H__
+#define __SKL_PREFILL_H__
+
+#include <linux/types.h>
+
+struct intel_crtc_state;
+
+struct skl_prefill_ctx {
+ /* .16 scanlines */
+ struct {
+ unsigned int fixed;
+ unsigned int wm0;
+ unsigned int scaler_1st;
+ unsigned int scaler_2nd;
+ unsigned int dsc;
+ unsigned int full;
+ } prefill;
+
+ /* .16 adjustment factors */
+ struct {
+ unsigned int cdclk;
+ unsigned int scaler_1st;
+ unsigned int scaler_2nd;
+ } adj;
+};
+
+void skl_prefill_init_worst(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+void skl_prefill_init(struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+
+bool skl_prefill_vblank_too_short(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us);
+int skl_prefill_min_guardband(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int latency_us);
+int skl_prefill_min_cdclk(const struct skl_prefill_ctx *ctx,
+ const struct intel_crtc_state *crtc_state);
+
+#endif /* __SKL_PREFILL_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index d77798499c57..4c4deac7f9c8 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -5,11 +5,14 @@
#include <drm/drm_print.h>
-#include "i915_utils.h"
+#include "intel_casf.h"
+#include "intel_casf_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_fb.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
@@ -91,11 +94,9 @@ static void skl_scaler_min_src_size(const struct drm_format_info *format,
}
}
-static void skl_scaler_max_src_size(struct intel_crtc *crtc,
+static void skl_scaler_max_src_size(struct intel_display *display,
int *max_w, int *max_h)
{
- struct intel_display *display = to_intel_display(crtc);
-
if (DISPLAY_VER(display) >= 14) {
*max_w = 4096;
*max_h = 8192;
@@ -134,6 +135,23 @@ static void skl_scaler_max_dst_size(struct intel_crtc *crtc,
}
}
+enum drm_mode_status
+skl_scaler_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes)
+{
+ int max_h, max_w;
+
+ if (num_joined_pipes < 2 && output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
+ skl_scaler_max_src_size(display, &max_w, &max_h);
+ if (mode->hdisplay > max_h)
+ return MODE_NO_420;
+ }
+
+ return MODE_OK;
+}
+
static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned int scaler_user, int *scaler_id,
@@ -201,7 +219,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
}
skl_scaler_min_src_size(format, modifier, &min_src_w, &min_src_h);
- skl_scaler_max_src_size(crtc, &max_src_w, &max_src_h);
+ skl_scaler_max_src_size(display, &max_src_w, &max_src_h);
skl_scaler_min_dst_size(&min_dst_w, &min_dst_h);
skl_scaler_max_dst_size(crtc, &max_dst_w, &max_dst_h);
@@ -266,7 +284,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state)
drm_rect_width(&crtc_state->pipe_src),
drm_rect_height(&crtc_state->pipe_src),
width, height, NULL, 0,
- crtc_state->pch_pfit.enabled);
+ crtc_state->pch_pfit.enabled ||
+ intel_casf_needs_scaler(crtc_state));
}
/**
@@ -305,7 +324,9 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
}
static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
- struct intel_crtc *crtc)
+ struct intel_crtc *crtc,
+ struct intel_plane_state *plane_state,
+ bool casf_scaler)
{
int i;
@@ -313,6 +334,10 @@ static int intel_allocate_scaler(struct intel_crtc_scaler_state *scaler_state,
if (scaler_state->scalers[i].in_use)
continue;
+ /* CASF needs second scaler */
+ if (!plane_state && casf_scaler && i != 1)
+ continue;
+
scaler_state->scalers[i].in_use = true;
return i;
@@ -363,7 +388,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state,
int num_scalers_need, struct intel_crtc *crtc,
const char *name, int idx,
struct intel_plane_state *plane_state,
- int *scaler_id)
+ int *scaler_id, bool casf_scaler)
{
struct intel_display *display = to_intel_display(crtc);
struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
@@ -372,7 +397,7 @@ static int intel_atomic_setup_scaler(struct intel_crtc_state *crtc_state,
int vscale = 0;
if (*scaler_id < 0)
- *scaler_id = intel_allocate_scaler(scaler_state, crtc);
+ *scaler_id = intel_allocate_scaler(scaler_state, crtc, plane_state, casf_scaler);
if (drm_WARN(display->drm, *scaler_id < 0,
"Cannot find scaler for %s:%d\n", name, idx))
@@ -504,10 +529,14 @@ static int setup_crtc_scaler(struct intel_atomic_state *state,
struct intel_crtc_scaler_state *scaler_state =
&crtc_state->scaler_state;
+ if (intel_casf_needs_scaler(crtc_state) && crtc_state->pch_pfit.enabled)
+ return -EINVAL;
+
return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "CRTC", crtc->base.base.id,
- NULL, &scaler_state->scaler_id);
+ NULL, &scaler_state->scaler_id,
+ intel_casf_needs_scaler(crtc_state));
}
static int setup_plane_scaler(struct intel_atomic_state *state,
@@ -542,7 +571,8 @@ static int setup_plane_scaler(struct intel_atomic_state *state,
return intel_atomic_setup_scaler(crtc_state,
hweight32(scaler_state->scaler_users),
crtc, "PLANE", plane->base.base.id,
- plane_state, &plane_state->scaler_id);
+ plane_state, &plane_state->scaler_id,
+ false);
}
/**
@@ -722,6 +752,52 @@ static void skl_scaler_setup_filter(struct intel_display *display,
}
}
+#define CASF_SCALER_FILTER_SELECT \
+ (PS_FILTER_PROGRAMMED | \
+ PS_Y_VERT_FILTER_SELECT(0) | \
+ PS_Y_HORZ_FILTER_SELECT(0) | \
+ PS_UV_VERT_FILTER_SELECT(0) | \
+ PS_UV_HORZ_FILTER_SELECT(0))
+
+void skl_scaler_setup_casf(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_display *display = to_intel_display(crtc);
+ struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ struct drm_rect src, dest;
+ int id, width, height;
+ int x = 0, y = 0;
+ enum pipe pipe = crtc->pipe;
+ u32 ps_ctrl;
+
+ width = adjusted_mode->crtc_hdisplay;
+ height = adjusted_mode->crtc_vdisplay;
+
+ drm_rect_init(&dest, x, y, width, height);
+
+ width = drm_rect_width(&dest);
+ height = drm_rect_height(&dest);
+ id = scaler_state->scaler_id;
+
+ drm_rect_init(&src, 0, 0,
+ drm_rect_width(&crtc_state->pipe_src) << 16,
+ drm_rect_height(&crtc_state->pipe_src) << 16);
+
+ trace_intel_pipe_scaler_update_arm(crtc, id, x, y, width, height);
+
+ ps_ctrl = PS_SCALER_EN | PS_BINDING_PIPE | scaler_state->scalers[id].mode |
+ CASF_SCALER_FILTER_SELECT;
+
+ intel_de_write_fw(display, SKL_PS_CTRL(pipe, id), ps_ctrl);
+ intel_de_write_fw(display, SKL_PS_WIN_POS(pipe, id),
+ PS_WIN_XPOS(x) | PS_WIN_YPOS(y));
+ intel_de_write_fw(display, SKL_PS_WIN_SZ(pipe, id),
+ PS_WIN_XSIZE(width) | PS_WIN_YSIZE(height));
+}
+
void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
@@ -747,6 +823,9 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
crtc_state->scaler_state.scaler_id < 0))
return;
+ if (intel_display_wa(display, 14011503117))
+ adl_scaler_ecc_mask(crtc_state);
+
drm_rect_init(&src, 0, 0,
drm_rect_width(&crtc_state->pipe_src) << 16,
drm_rect_height(&crtc_state->pipe_src) << 16);
@@ -902,16 +981,23 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
continue;
id = i;
- crtc_state->pch_pfit.enabled = true;
+
+ /* Read CASF regs for second scaler */
+ if (HAS_CASF(display) && id == 1)
+ intel_casf_sharpness_get_config(crtc_state);
+
+ if (!crtc_state->hw.casf_params.casf_enable)
+ crtc_state->pch_pfit.enabled = true;
pos = intel_de_read(display, SKL_PS_WIN_POS(crtc->pipe, i));
size = intel_de_read(display, SKL_PS_WIN_SZ(crtc->pipe, i));
- drm_rect_init(&crtc_state->pch_pfit.dst,
- REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
- REG_FIELD_GET(PS_WIN_YPOS_MASK, pos),
- REG_FIELD_GET(PS_WIN_XSIZE_MASK, size),
- REG_FIELD_GET(PS_WIN_YSIZE_MASK, size));
+ if (!crtc_state->hw.casf_params.casf_enable)
+ drm_rect_init(&crtc_state->pch_pfit.dst,
+ REG_FIELD_GET(PS_WIN_XPOS_MASK, pos),
+ REG_FIELD_GET(PS_WIN_YPOS_MASK, pos),
+ REG_FIELD_GET(PS_WIN_XSIZE_MASK, size),
+ REG_FIELD_GET(PS_WIN_YSIZE_MASK, size));
scaler_state->scalers[i].in_use = true;
break;
@@ -923,3 +1009,170 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state)
else
scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
+
+void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ if (!crtc_state->pch_pfit.enabled)
+ return;
+
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
+}
+
+void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+
+ if (scaler_state->scaler_id < 0)
+ return;
+
+ intel_de_write_fw(display,
+ SKL_PS_ECC_STAT(crtc->pipe, scaler_state->scaler_id),
+ 1);
+ intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, 0);
+}
+
+unsigned int skl_scaler_1st_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * FIXME don't have scalers assigned yet
+ * so can't look up the scale factors
+ */
+ return 0x10000;
+}
+
+unsigned int skl_scaler_2nd_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+{
+ /*
+ * FIXME don't have scalers assigned yet
+ * so can't look up the scale factors
+ */
+ return 0x10000;
+}
+
+unsigned int skl_scaler_1st_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int num_scalers = hweight32(scaler_state->scaler_users);
+
+ if (num_scalers > 0)
+ return 4 << 16;
+
+ return 0;
+}
+
+unsigned int skl_scaler_2nd_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ const struct intel_crtc_scaler_state *scaler_state =
+ &crtc_state->scaler_state;
+ int num_scalers = hweight32(scaler_state->scaler_users);
+
+ if (num_scalers > 1 && crtc_state->pch_pfit.enabled)
+ return 4 << 16;
+
+ return 0;
+}
+
+static unsigned int _skl_scaler_max_scale(const struct intel_crtc_state *crtc_state,
+ unsigned int max_scale)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ /*
+ * Downscaling requires increasing cdclk, so max scale
+ * factor is limited to the max_dotclock/dotclock ratio.
+ *
+ * FIXME find out the max downscale factors properly
+ */
+ return min(max_scale, DIV_ROUND_UP_ULL((u64)display->cdclk.max_dotclk_freq << 16,
+ crtc_state->hw.pipe_mode.crtc_clock));
+}
+
+unsigned int skl_scaler_max_total_scale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 9 << 16;
+ if (crtc->num_scalers > 1)
+ max_scale *= 9;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_max_hscale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 3 << 16;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_max_scale(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ unsigned int max_scale;
+
+ if (crtc->num_scalers < 1)
+ return 0x10000;
+
+ /* FIXME find out the max downscale factors properly */
+ max_scale = 9 << 16;
+
+ return _skl_scaler_max_scale(crtc_state, max_scale);
+}
+
+unsigned int skl_scaler_1st_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 0)
+ return skl_scaler_max_scale(crtc_state);
+ else
+ return 0x10000;
+}
+
+unsigned int skl_scaler_2nd_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 1)
+ return skl_scaler_max_scale(crtc_state);
+ else
+ return 0x10000;
+}
+
+unsigned int skl_scaler_1st_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 0)
+ return 4 << 16;
+ else
+ return 0;
+}
+
+unsigned int skl_scaler_2nd_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ if (crtc->num_scalers > 1)
+ return 4 << 16;
+ else
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h
index 355ea15260ca..7e8d819c019d 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.h
+++ b/drivers/gpu/drm/i915/display/skl_scaler.h
@@ -5,10 +5,14 @@
#ifndef INTEL_SCALER_H
#define INTEL_SCALER_H
+enum drm_mode_status;
+struct drm_display_mode;
struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dsb;
+enum intel_output_format;
struct intel_plane;
struct intel_plane_state;
@@ -32,4 +36,30 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state);
void skl_scaler_get_config(struct intel_crtc_state *crtc_state);
+void skl_scaler_setup_casf(struct intel_crtc_state *crtc_state);
+
+enum drm_mode_status
+skl_scaler_mode_valid(struct intel_display *display,
+ const struct drm_display_mode *mode,
+ enum intel_output_format output_format,
+ int num_joined_pipes);
+
+void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state);
+
+void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_max_total_scale(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_max_scale(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_max_hscale(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_1st_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_adjustment_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_1st_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+
+unsigned int skl_scaler_1st_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_adjustment(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_1st_prefill_lines(const struct intel_crtc_state *crtc_state);
+unsigned int skl_scaler_2nd_prefill_lines(const struct intel_crtc_state *crtc_state);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index e20972ddfa09..ee8e24497d2c 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -7,21 +7,26 @@
#include <drm/drm_blend.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "pxp/intel_pxp.h"
-#include "i915_drv.h"
#include "intel_bo.h"
+#include "intel_color.h"
+#include "intel_color_pipeline.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_panic.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_step.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_universal_plane_regs.h"
@@ -387,44 +392,19 @@ static int glk_plane_max_width(const struct drm_framebuffer *fb,
}
}
+static int adl_plane_min_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 16 / fb->format->cpp[color_plane];
+}
+
static int icl_plane_min_width(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation)
{
/* Wa_14011264657, Wa_14011050563: gen11+ */
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- return 18;
- case DRM_FORMAT_RGB565:
- return 10;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_ABGR8888:
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ARGB2101010:
- case DRM_FORMAT_ABGR2101010:
- case DRM_FORMAT_XVYU2101010:
- case DRM_FORMAT_Y212:
- case DRM_FORMAT_Y216:
- return 6;
- case DRM_FORMAT_NV12:
- return 20;
- case DRM_FORMAT_P010:
- case DRM_FORMAT_P012:
- case DRM_FORMAT_P016:
- return 12;
- case DRM_FORMAT_XRGB16161616F:
- case DRM_FORMAT_XBGR16161616F:
- case DRM_FORMAT_ARGB16161616F:
- case DRM_FORMAT_ABGR16161616F:
- case DRM_FORMAT_XVYU12_16161616:
- case DRM_FORMAT_XVYU16161616:
- return 4;
- default:
- return 1;
- }
+ return 16 / fb->format->cpp[color_plane] + 2;
}
static int xe3_plane_max_width(const struct drm_framebuffer *fb,
@@ -461,6 +441,23 @@ static int skl_plane_max_height(const struct drm_framebuffer *fb,
return 4096;
}
+static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
+{
+ return pipe - PIPE_A + INTEL_FBC_A;
+}
+
+static bool skl_plane_has_fbc(struct intel_display *display,
+ enum intel_fbc_id fbc_id, enum plane_id plane_id)
+{
+ if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0)
+ return false;
+
+ if (DISPLAY_VER(display) >= 20)
+ return icl_is_hdr_plane(display, plane_id);
+ else
+ return plane_id == PLANE_1;
+}
+
static int icl_plane_max_height(const struct drm_framebuffer *fb,
int color_plane,
unsigned int rotation)
@@ -470,12 +467,11 @@ static int icl_plane_max_height(const struct drm_framebuffer *fb,
static unsigned int
plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation,
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation,
unsigned int max_pixels,
unsigned int max_bytes)
{
- const struct drm_format_info *info = drm_format_info(pixel_format);
int cpp = info->cpp[0];
if (drm_rotation_90_or_270(rotation))
@@ -486,26 +482,26 @@ plane_max_stride(struct intel_plane *plane,
static unsigned int
adl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
unsigned int max_pixels = 65536; /* PLANE_OFFSET limit */
unsigned int max_bytes = 128 * 1024;
- return plane_max_stride(plane, pixel_format,
+ return plane_max_stride(plane, info,
modifier, rotation,
max_pixels, max_bytes);
}
static unsigned int
skl_plane_max_stride(struct intel_plane *plane,
- u32 pixel_format, u64 modifier,
- unsigned int rotation)
+ const struct drm_format_info *info,
+ u64 modifier, unsigned int rotation)
{
unsigned int max_pixels = 8192; /* PLANE_OFFSET limit */
unsigned int max_bytes = 32 * 1024;
- return plane_max_stride(plane, pixel_format,
+ return plane_max_stride(plane, info,
modifier, rotation,
max_pixels, max_bytes);
}
@@ -896,6 +892,25 @@ static void icl_plane_disable_sel_fetch_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, SEL_FETCH_PLANE_CTL(pipe, plane->id), 0);
}
+static void x3p_lpd_plane_update_pixel_normalizer(struct intel_dsb *dsb,
+ struct intel_plane *plane,
+ bool enable)
+{
+ struct intel_display *display = to_intel_display(plane);
+ enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(plane->pipe);
+ u32 val;
+
+ /* Only HDR planes have pixel normalizer and don't matter if no FBC */
+ if (!skl_plane_has_fbc(display, fbc_id, plane->id))
+ return;
+
+ val = enable ? PLANE_PIXEL_NORMALIZE_NORM_FACTOR(PLANE_PIXEL_NORMALIZE_NORM_FACTOR_1_0) |
+ PLANE_PIXEL_NORMALIZE_ENABLE : 0;
+
+ intel_de_write_dsb(display, dsb,
+ PLANE_PIXEL_NORMALIZE(plane->pipe, plane->id), val);
+}
+
static void
icl_plane_disable_arm(struct intel_dsb *dsb,
struct intel_plane *plane,
@@ -911,6 +926,10 @@ icl_plane_disable_arm(struct intel_dsb *dsb,
skl_write_plane_wm(dsb, plane, crtc_state);
icl_plane_disable_sel_fetch_arm(dsb, plane, crtc_state);
+
+ if (DISPLAY_VER(display) >= 35)
+ x3p_lpd_plane_update_pixel_normalizer(dsb, plane, false);
+
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1166,8 +1185,7 @@ static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
return plane_ctl;
}
-static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 skl_plane_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1225,8 +1243,7 @@ static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
return plane_color_ctl;
}
-static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 glk_plane_color_ctl(const struct intel_plane_state *plane_state)
{
struct intel_display *display = to_intel_display(plane_state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1260,6 +1277,18 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
if (plane_state->force_black)
plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+ if (plane_state->hw.degamma_lut)
+ plane_color_ctl |= PLANE_COLOR_PRE_CSC_GAMMA_ENABLE;
+
+ if (plane_state->hw.ctm)
+ plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+
+ if (plane_state->hw.gamma_lut) {
+ plane_color_ctl &= ~PLANE_COLOR_PLANE_GAMMA_DISABLE;
+ if (drm_color_lut32_size(plane_state->hw.gamma_lut) != 32)
+ plane_color_ctl |= PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE;
+ }
+
return plane_color_ctl;
}
@@ -1271,12 +1300,6 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
u32 offset = plane_state->view.color_plane[color_plane].offset;
if (intel_fb_uses_dpt(fb)) {
- /*
- * The DPT object contains only one vma, so the VMA's offset
- * within the DPT is always 0.
- */
- drm_WARN_ON(display->drm, plane_state->dpt_vma &&
- intel_dpt_offset(plane_state->dpt_vma));
drm_WARN_ON(display->drm, offset & 0x1fffff);
return offset >> 9;
} else {
@@ -1285,13 +1308,20 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
}
}
-static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
- int color_plane)
+static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
{
+ if (plane_state->planar_linked_plane && !plane_state->is_y_plane)
+ return 1;
+ else
+ return 0;
+}
+
+static u32 skl_plane_surf_offset(const struct intel_plane_state *plane_state)
+{
+ int color_plane = icl_plane_color_plane(plane_state);
u32 plane_surf;
- plane_surf = intel_plane_ggtt_offset(plane_state) +
- skl_surf_address(plane_state, color_plane);
+ plane_surf = skl_surf_address(plane_state, color_plane);
if (plane_state->decrypt)
plane_surf |= PLANE_SURF_DECRYPT;
@@ -1373,14 +1403,6 @@ static void icl_plane_csc_load_black(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
}
-static int icl_plane_color_plane(const struct intel_plane_state *plane_state)
-{
- if (plane_state->planar_linked_plane && !plane_state->is_y_plane)
- return 1;
- else
- return 0;
-}
-
static void
skl_plane_update_noarm(struct intel_dsb *dsb,
struct intel_plane *plane,
@@ -1476,7 +1498,7 @@ skl_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
plane_ctl);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, 0));
+ plane_state->surf);
}
static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb,
@@ -1548,6 +1570,8 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
plane_color_ctl = plane_state->color_ctl |
glk_plane_color_ctl_crtc(crtc_state);
+ intel_color_plane_program_pipeline(dsb, plane_state);
+
/* The scaler will handle the output position */
if (plane_state->scaler_id >= 0) {
crtc_x = 0;
@@ -1579,7 +1603,7 @@ icl_plane_update_noarm(struct intel_dsb *dsb,
}
/* FLAT CCS doesn't need to program AUX_DIST */
- if (!HAS_FLAT_CCS(to_i915(display->drm)) && DISPLAY_VER(display) < 20)
+ if (HAS_AUX_CCS(display))
intel_de_write_dsb(display, dsb, PLANE_AUX_DIST(pipe, plane_id),
skl_plane_aux_dist(plane_state, color_plane));
@@ -1632,7 +1656,6 @@ icl_plane_update_arm(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- int color_plane = icl_plane_color_plane(plane_state);
u32 plane_ctl;
plane_ctl = plane_state->ctl |
@@ -1650,6 +1673,16 @@ icl_plane_update_arm(struct intel_dsb *dsb,
icl_plane_update_sel_fetch_arm(dsb, plane, crtc_state, plane_state);
+ intel_color_plane_commit_arm(dsb, plane_state);
+
+ /*
+ * In order to have FBC for fp16 formats pixel normalizer block must be
+ * active. Check if pixel normalizer block need to be enabled for FBC.
+ * If needed, use normalization factor as 1.0 and enable the block.
+ */
+ if (intel_fbc_is_enable_pixel_normalizer(plane_state))
+ x3p_lpd_plane_update_pixel_normalizer(dsb, plane, true);
+
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
@@ -1658,7 +1691,7 @@ icl_plane_update_arm(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id),
plane_ctl);
intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id),
- skl_plane_surf(plane_state, color_plane));
+ plane_state->surf);
}
static void skl_plane_capture_error(struct intel_crtc *crtc,
@@ -1682,10 +1715,10 @@ skl_plane_async_flip(struct intel_dsb *dsb,
struct intel_display *display = to_intel_display(plane);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 plane_ctl = plane_state->ctl, plane_surf;
+ u32 plane_ctl = plane_state->ctl;
+ u32 plane_surf = plane_state->surf;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
- plane_surf = skl_plane_surf(plane_state, 0);
if (async_flip) {
if (DISPLAY_VER(display) >= 30)
@@ -1732,7 +1765,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
if (rotation & DRM_MODE_REFLECT_X &&
- fb->modifier == DRM_FORMAT_MOD_LINEAR) {
+ fb->modifier == DRM_FORMAT_MOD_LINEAR &&
+ DISPLAY_VER(display) < 35) {
drm_dbg_kms(display->drm,
"[PLANE:%d:%s] horizontal flip is not supported with linear surface formats\n",
plane->base.base.id, plane->base.name);
@@ -1788,8 +1822,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
}
/* Y-tiling is not supported in IF-ID Interlace mode */
- if (crtc_state->hw.enable &&
- crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
+ if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
fb->modifier != DRM_FORMAT_MOD_LINEAR &&
fb->modifier != I915_FORMAT_MOD_X_TILED) {
drm_dbg_kms(display->drm,
@@ -1892,6 +1925,14 @@ static int intel_plane_min_width(struct intel_plane *plane,
return 1;
}
+static int intel_plane_min_height(struct intel_plane *plane,
+ const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ return 1;
+}
+
static int intel_plane_max_width(struct intel_plane *plane,
const struct drm_framebuffer *fb,
int color_plane,
@@ -2023,6 +2064,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
int w = drm_rect_width(&plane_state->uapi.src) >> 16;
int h = drm_rect_height(&plane_state->uapi.src) >> 16;
int min_width = intel_plane_min_width(plane, fb, 0, rotation);
+ int min_height = intel_plane_min_height(plane, fb, 0, rotation);
int max_width = intel_plane_max_width(plane, fb, 0, rotation);
int max_height = intel_plane_max_height(plane, fb, 0, rotation);
unsigned int alignment = plane->min_alignment(plane, fb, 0);
@@ -2030,11 +2072,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
u32 offset;
int ret;
- if (w > max_width || w < min_width || h > max_height || h < 1) {
+ if (w > max_width || w < min_width || h > max_height || h < min_height) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
+ "[PLANE:%d:%s] requested Y/RGB source size %dx%d outside limits (min: %dx%d max: %dx%d)\n",
plane->base.base.id, plane->base.name,
- w, h, min_width, max_width, max_height);
+ w, h, min_width, min_height, max_width, max_height);
return -EINVAL;
}
@@ -2094,6 +2136,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
int uv_plane = 1;
int ccs_plane = intel_fb_is_ccs_modifier(fb->modifier) ?
skl_main_to_aux_plane(fb, uv_plane) : 0;
+ int min_width = intel_plane_min_width(plane, fb, uv_plane, rotation);
+ int min_height = intel_plane_min_height(plane, fb, uv_plane, rotation);
int max_width = intel_plane_max_width(plane, fb, uv_plane, rotation);
int max_height = intel_plane_max_height(plane, fb, uv_plane, rotation);
int x = plane_state->uapi.src.x1 >> 17;
@@ -2103,11 +2147,11 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
u32 offset;
/* FIXME not quite sure how/if these apply to the chroma plane */
- if (w > max_width || h > max_height) {
+ if (w > max_width || w < min_width || h > max_height || h < min_height) {
drm_dbg_kms(display->drm,
- "[PLANE:%d:%s] CbCr source size %dx%d too big (limit %dx%d)\n",
+ "[PLANE:%d:%s] requested CbCr source size %dx%d outside limits (min: %dx%d max: %dx%d)\n",
plane->base.base.id, plane->base.name,
- w, h, max_width, max_height);
+ w, h, min_width, min_height, max_width, max_height);
return -EINVAL;
}
@@ -2363,11 +2407,10 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
plane_state->damage = DRM_RECT_INIT(0, 0, 0, 0);
}
- plane_state->ctl = skl_plane_ctl(crtc_state, plane_state);
+ plane_state->ctl = skl_plane_ctl(plane_state);
if (DISPLAY_VER(display) >= 10)
- plane_state->color_ctl = glk_plane_color_ctl(crtc_state,
- plane_state);
+ plane_state->color_ctl = glk_plane_color_ctl(plane_state);
if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) &&
icl_is_hdr_plane(display, plane->id))
@@ -2413,23 +2456,6 @@ void icl_link_nv12_planes(struct intel_plane_state *uv_plane_state,
}
}
-static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe)
-{
- return pipe - PIPE_A + INTEL_FBC_A;
-}
-
-static bool skl_plane_has_fbc(struct intel_display *display,
- enum intel_fbc_id fbc_id, enum plane_id plane_id)
-{
- if ((DISPLAY_RUNTIME_INFO(display)->fbc_mask & BIT(fbc_id)) == 0)
- return false;
-
- if (DISPLAY_VER(display) >= 20)
- return icl_is_hdr_plane(display, plane_id);
- else
- return plane_id == PLANE_1;
-}
-
static struct intel_fbc *skl_plane_fbc(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
@@ -2448,13 +2474,10 @@ static bool skl_plane_has_planar(struct intel_display *display,
if (display->platform.skylake || display->platform.broxton)
return false;
- if (DISPLAY_VER(display) == 9 && pipe == PIPE_C)
+ if (pipe == PIPE_C)
return false;
- if (plane_id != PLANE_1 && plane_id != PLANE_2)
- return false;
-
- return true;
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
}
static const u32 *skl_get_plane_formats(struct intel_display *display,
@@ -2470,11 +2493,17 @@ static const u32 *skl_get_plane_formats(struct intel_display *display,
}
}
+static bool glk_plane_has_planar(struct intel_display *display,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
+}
+
static const u32 *glk_get_plane_formats(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id,
int *num_formats)
{
- if (skl_plane_has_planar(display, pipe, plane_id)) {
+ if (glk_plane_has_planar(display, pipe, plane_id)) {
*num_formats = ARRAY_SIZE(glk_planar_formats);
return glk_planar_formats;
} else {
@@ -2714,8 +2743,10 @@ skl_plane_disable_flip_done(struct intel_plane *plane)
static bool skl_plane_has_rc_ccs(struct intel_display *display,
enum pipe pipe, enum plane_id plane_id)
{
- return pipe != PIPE_C &&
- (plane_id == PLANE_1 || plane_id == PLANE_2);
+ if (pipe == PIPE_C)
+ return false;
+
+ return plane_id == PLANE_1 || plane_id == PLANE_2;
}
static u8 skl_plane_caps(struct intel_display *display,
@@ -2814,7 +2845,7 @@ static void skl_disable_tiling(struct intel_plane *plane)
intel_de_write_fw(display, PLANE_CTL(plane->pipe, plane->id), plane_ctl);
intel_de_write_fw(display, PLANE_SURF(plane->pipe, plane->id),
- skl_plane_surf(state, 0));
+ state->surf);
}
struct intel_plane *
@@ -2843,11 +2874,15 @@ skl_universal_plane_create(struct intel_display *display,
intel_fbc_add_plane(skl_plane_fbc(display, pipe, plane_id), plane);
if (DISPLAY_VER(display) >= 30) {
+ plane->min_width = adl_plane_min_width;
plane->max_width = xe3_plane_max_width;
plane->max_height = icl_plane_max_height;
plane->min_cdclk = icl_plane_min_cdclk;
} else if (DISPLAY_VER(display) >= 11) {
- plane->min_width = icl_plane_min_width;
+ if (DISPLAY_VER(display) >= 14 || display->platform.alderlake_p)
+ plane->min_width = adl_plane_min_width;
+ else
+ plane->min_width = icl_plane_min_width;
if (icl_is_hdr_plane(display, plane_id))
plane->max_width = icl_hdr_plane_max_width;
else
@@ -2865,6 +2900,8 @@ skl_universal_plane_create(struct intel_display *display,
}
plane->disable_tiling = skl_disable_tiling;
+ plane->surf_offset = skl_plane_surf_offset;
+
if (DISPLAY_VER(display) >= 13)
plane->max_stride = adl_plane_max_stride;
else
@@ -2937,7 +2974,7 @@ skl_universal_plane_create(struct intel_display *display,
caps = skl_plane_caps(display, pipe, plane_id);
/* FIXME: xe has problems with AUX */
- if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(to_i915(display->drm)))
+ if (!IS_ENABLED(I915) && HAS_AUX_CCS(display))
caps &= ~(INTEL_PLANE_CAP_CCS_RC |
INTEL_PLANE_CAP_CCS_RC_CC |
INTEL_PLANE_CAP_CCS_MC);
@@ -2982,6 +3019,9 @@ skl_universal_plane_create(struct intel_display *display,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
+ if (DISPLAY_VER(display) >= 12)
+ intel_color_pipeline_plane_init(&plane->base, pipe);
+
drm_plane_create_alpha_property(&plane->base);
drm_plane_create_blend_mode_property(&plane->base,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
@@ -3036,7 +3076,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
return;
}
- intel_fb = intel_bo_alloc_framebuffer();
+ intel_fb = intel_framebuffer_alloc();
if (!intel_fb) {
drm_dbg_kms(display->drm, "failed to alloc fb\n");
return;
@@ -3064,7 +3104,6 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX, alpha);
- fb->format = drm_format_info(fourcc);
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
@@ -3072,11 +3111,9 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fb->modifier = DRM_FORMAT_MOD_LINEAR;
break;
case PLANE_CTL_TILED_X:
- plane_config->tiling = I915_TILING_X;
fb->modifier = I915_FORMAT_MOD_X_TILED;
break;
case PLANE_CTL_TILED_Y:
- plane_config->tiling = I915_TILING_Y;
if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
if (DISPLAY_VER(display) >= 14)
fb->modifier = I915_FORMAT_MOD_4_TILED_MTL_RC_CCS;
@@ -3117,6 +3154,8 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
+ fb->format = drm_get_format_info(display->drm, fourcc, fb->modifier);
+
if (!display->params.enable_dpt &&
intel_fb_modifier_uses_dpt(display, fb->modifier)) {
drm_dbg_kms(display->drm, "DPT disabled, skipping initial FB\n");
@@ -3191,21 +3230,18 @@ bool skl_fixup_initial_plane_config(struct intel_crtc *crtc,
to_intel_plane_state(plane->base.state);
enum plane_id plane_id = plane->id;
enum pipe pipe = crtc->pipe;
- u32 base;
if (!plane_state->uapi.visible)
return false;
- base = intel_plane_ggtt_offset(plane_state);
-
/*
* We may have moved the surface to a different
* part of ggtt, make the plane aware of that.
*/
- if (plane_config->base == base)
+ if (plane_config->base == plane_state->surf)
return false;
- intel_de_write(display, PLANE_SURF(pipe, plane_id), base);
+ intel_de_write(display, PLANE_SURF(pipe, plane_id), plane_state->surf);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
index ca9fdfbbe57c..6fd4da9f63cf 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane_regs.h
@@ -254,6 +254,8 @@
#define PLANE_COLOR_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-ICL */
#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */
#define PLANE_COLOR_INPUT_CSC_ENABLE REG_BIT(20) /* ICL+ */
+#define PLANE_COLOR_POST_CSC_GAMMA_MULTSEG_ENABLE REG_BIT(15) /* TGL+ */
+#define PLANE_COLOR_PRE_CSC_GAMMA_ENABLE REG_BIT(14)
#define PLANE_COLOR_CSC_MODE_MASK REG_GENMASK(19, 17)
#define PLANE_COLOR_CSC_MODE_BYPASS REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 0)
#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 1)
@@ -290,6 +292,119 @@
_PLANE_INPUT_CSC_POSTOFF_HI_1_A, _PLANE_INPUT_CSC_POSTOFF_HI_1_B, \
_PLANE_INPUT_CSC_POSTOFF_HI_2_A, _PLANE_INPUT_CSC_POSTOFF_HI_2_B)
+#define _MMIO_PLANE_GAMC(plane, i, a, b) _MMIO(_PIPE(plane, a, b) + (i) * 4)
+
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A 0x70160
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B 0x71160
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A 0x70260
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B 0x71260
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_A, \
+ _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1_B)
+#define _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_A, \
+ _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2_B)
+#define PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_1(pipe), \
+ _PLANE_POST_CSC_GAMC_SEG0_INDEX_ENH_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A 0x70164
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B 0x71164
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A 0x70264
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B 0x71264
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_A, \
+ _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1_B)
+#define _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_A, \
+ _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2_B)
+#define PLANE_POST_CSC_GAMC_SEG0_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_1(pipe), \
+ _PLANE_POST_CSC_GAMC_SEG0_DATA_ENH_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A 0x701d8
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1_B 0x711d8
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A 0x702d8
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2_B 0x712d8
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_1_A, \
+ _PLANE_POST_CSC_GAMC_INDEX_ENH_1_B)
+#define _PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_ENH_2_A, \
+ _PLANE_POST_CSC_GAMC_INDEX_ENH_2_B)
+#define PLANE_POST_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_ENH_1(pipe), \
+ _PLANE_POST_CSC_GAMC_INDEX_ENH_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_A 0x701dc
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_1_B 0x711dc
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_A 0x702dc
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_2_B 0x712dc
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_1_A, \
+ _PLANE_POST_CSC_GAMC_DATA_ENH_1_B)
+#define _PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_ENH_2_A, \
+ _PLANE_POST_CSC_GAMC_DATA_ENH_2_B)
+#define PLANE_POST_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_ENH_1(pipe), \
+ _PLANE_POST_CSC_GAMC_DATA_ENH_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_INDEX_1_A 0x704d8
+#define _PLANE_POST_CSC_GAMC_INDEX_1_B 0x714d8
+#define _PLANE_POST_CSC_GAMC_INDEX_2_A 0x705d8
+#define _PLANE_POST_CSC_GAMC_INDEX_2_B 0x715d8
+#define _PLANE_POST_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_1_A, \
+ _PLANE_POST_CSC_GAMC_INDEX_1_B)
+#define _PLANE_POST_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_INDEX_2_A, \
+ _PLANE_POST_CSC_GAMC_INDEX_2_B)
+#define PLANE_POST_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_INDEX_1(pipe), \
+ _PLANE_POST_CSC_GAMC_INDEX_2(pipe))
+
+#define _PLANE_POST_CSC_GAMC_DATA_1_A 0x704dc
+#define _PLANE_POST_CSC_GAMC_DATA_1_B 0x714dc
+#define _PLANE_POST_CSC_GAMC_DATA_2_A 0x705dc
+#define _PLANE_POST_CSC_GAMC_DATA_2_B 0x715dc
+#define _PLANE_POST_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_1_A, \
+ _PLANE_POST_CSC_GAMC_DATA_1_B)
+#define _PLANE_POST_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_POST_CSC_GAMC_DATA_2_A, \
+ _PLANE_POST_CSC_GAMC_DATA_2_B)
+#define PLANE_POST_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_POST_CSC_GAMC_DATA_1(pipe), \
+ _PLANE_POST_CSC_GAMC_DATA_2(pipe))
+
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A 0x701d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B 0x711d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A 0x702d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B 0x712d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_A, \
+ _PLANE_PRE_CSC_GAMC_INDEX_ENH_1_B)
+#define _PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_A, \
+ _PLANE_PRE_CSC_GAMC_INDEX_ENH_2_B)
+#define PLANE_PRE_CSC_GAMC_INDEX_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_ENH_1(pipe), \
+ _PLANE_PRE_CSC_GAMC_INDEX_ENH_2(pipe))
+#define PLANE_PAL_PREC_AUTO_INCREMENT REG_BIT(10)
+
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A 0x701d4
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1_B 0x711d4
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A 0x702d4
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2_B 0x712d4
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_1_A, \
+ _PLANE_PRE_CSC_GAMC_DATA_ENH_1_B)
+#define _PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_ENH_2_A, \
+ _PLANE_PRE_CSC_GAMC_DATA_ENH_2_B)
+#define PLANE_PRE_CSC_GAMC_DATA_ENH(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_ENH_1(pipe), \
+ _PLANE_PRE_CSC_GAMC_DATA_ENH_2(pipe))
+
+#define _PLANE_PRE_CSC_GAMC_INDEX_1_A 0x704d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_1_B 0x714d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_2_A 0x705d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_2_B 0x715d0
+#define _PLANE_PRE_CSC_GAMC_INDEX_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_1_A, \
+ _PLANE_PRE_CSC_GAMC_INDEX_1_B)
+#define _PLANE_PRE_CSC_GAMC_INDEX_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_INDEX_2_A, \
+ _PLANE_PRE_CSC_GAMC_INDEX_2_B)
+#define PLANE_PRE_CSC_GAMC_INDEX(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_INDEX_1(pipe), \
+ _PLANE_PRE_CSC_GAMC_INDEX_2(pipe))
+
+#define _PLANE_PRE_CSC_GAMC_DATA_1_A 0x704d4
+#define _PLANE_PRE_CSC_GAMC_DATA_1_B 0x714d4
+#define _PLANE_PRE_CSC_GAMC_DATA_2_A 0x705d4
+#define _PLANE_PRE_CSC_GAMC_DATA_2_B 0x715d4
+#define _PLANE_PRE_CSC_GAMC_DATA_1(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_1_A, \
+ _PLANE_PRE_CSC_GAMC_DATA_1_B)
+#define _PLANE_PRE_CSC_GAMC_DATA_2(pipe) _PIPE(pipe, _PLANE_PRE_CSC_GAMC_DATA_2_A, \
+ _PLANE_PRE_CSC_GAMC_DATA_2_B)
+#define PLANE_PRE_CSC_GAMC_DATA(pipe, plane, i) _MMIO_PLANE_GAMC(plane, i, _PLANE_PRE_CSC_GAMC_DATA_1(pipe), \
+ _PLANE_PRE_CSC_GAMC_DATA_2(pipe))
+
#define _PLANE_CSC_RY_GY_1_A 0x70210
#define _PLANE_CSC_RY_GY_2_A 0x70310
#define _PLANE_CSC_RY_GY_1_B 0x71210
@@ -324,7 +439,7 @@
#define PLANE_WM_IGNORE_LINES REG_BIT(30)
#define PLANE_WM_AUTO_MIN_ALLOC_EN REG_BIT(29)
#define PLANE_WM_LINES_MASK REG_GENMASK(26, 14)
-#define PLANE_WM_BLOCKS_MASK REG_GENMASK(11, 0)
+#define PLANE_WM_BLOCKS_MASK REG_GENMASK(12, 0)
#define _PLANE_WM_SAGV_1_A 0x70258
#define _PLANE_WM_SAGV_1_B 0x71258
@@ -375,10 +490,10 @@
_PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B, \
_PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
-/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */
-#define PLANE_BUF_END_MASK REG_GENMASK(27, 16)
+/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits, xe3p_lpd 13 bits */
+#define PLANE_BUF_END_MASK REG_GENMASK(28, 16)
#define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end))
-#define PLANE_BUF_START_MASK REG_GENMASK(11, 0)
+#define PLANE_BUF_START_MASK REG_GENMASK(12, 0)
#define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start))
#define _PLANE_MIN_BUF_CFG_1_A 0x70274
@@ -389,9 +504,9 @@
_PLANE_MIN_BUF_CFG_1_A, _PLANE_MIN_BUF_CFG_1_B, \
_PLANE_MIN_BUF_CFG_2_A, _PLANE_MIN_BUF_CFG_2_B)
#define PLANE_AUTO_MIN_DBUF_EN REG_BIT(31)
-#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(27, 16)
+#define PLANE_MIN_DBUF_BLOCKS_MASK REG_GENMASK(28, 16)
#define PLANE_MIN_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_MIN_DBUF_BLOCKS_MASK, (val))
-#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(11, 0)
+#define PLANE_INTERIM_DBUF_BLOCKS_MASK REG_GENMASK(12, 0)
#define PLANE_INTERIM_DBUF_BLOCKS(val) REG_FIELD_PREP(PLANE_INTERIM_DBUF_BLOCKS_MASK, (val))
/* tgl+ */
@@ -455,4 +570,16 @@
_SEL_FETCH_PLANE_OFFSET_5_A, _SEL_FETCH_PLANE_OFFSET_5_B, \
_SEL_FETCH_PLANE_OFFSET_6_A, _SEL_FETCH_PLANE_OFFSET_6_B)
+#define _PLANE_PIXEL_NORMALIZE_1_A 0x701a8
+#define _PLANE_PIXEL_NORMALIZE_2_A 0x702a8
+#define _PLANE_PIXEL_NORMALIZE_1_B 0x711a8
+#define _PLANE_PIXEL_NORMALIZE_2_B 0x712a8
+#define PLANE_PIXEL_NORMALIZE(pipe, plane) _MMIO_SKL_PLANE((pipe), (plane), \
+ _PLANE_PIXEL_NORMALIZE_1_A, _PLANE_PIXEL_NORMALIZE_1_B, \
+ _PLANE_PIXEL_NORMALIZE_2_A, _PLANE_PIXEL_NORMALIZE_2_B)
+#define PLANE_PIXEL_NORMALIZE_ENABLE REG_BIT(31)
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR_MASK REG_GENMASK(15, 0)
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR(val) REG_FIELD_PREP(PLANE_PIXEL_NORMALIZE_NORM_FACTOR_MASK, (val))
+#define PLANE_PIXEL_NORMALIZE_NORM_FACTOR_1_0 0x3c00
+
#endif /* __SKL_UNIVERSAL_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 222c069fdadb..54e9e0be019d 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -6,12 +6,10 @@
#include <linux/debugfs.h>
#include <drm/drm_blend.h>
-#include <drm/drm_file.h>
#include <drm/drm_print.h>
#include "soc/intel_dram.h"
#include "i915_reg.h"
-#include "i915_utils.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
@@ -24,12 +22,16 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_fb.h"
#include "intel_fixed.h"
#include "intel_flipq.h"
#include "intel_pcode.h"
#include "intel_plane.h"
+#include "intel_vblank.h"
#include "intel_wm.h"
+#include "skl_prefill.h"
+#include "skl_scaler.h"
#include "skl_universal_plane_regs.h"
#include "skl_watermark.h"
#include "skl_watermark_regs.h"
@@ -633,15 +635,22 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor);
+ const struct drm_mode_config *mode_config = &display->drm->mode_config;
+ const struct drm_format_info *info;
struct skl_wm_level wm = {};
int ret, min_ddb_alloc = 0;
struct skl_wm_params wp;
+ u64 modifier;
+ u32 format;
int level;
- ret = skl_compute_wm_params(crtc_state, 256,
- drm_format_info(DRM_FORMAT_ARGB8888),
- DRM_FORMAT_MOD_LINEAR,
- DRM_MODE_ROTATE_0,
+ format = DRM_FORMAT_ARGB8888;
+ modifier = DRM_FORMAT_MOD_LINEAR;
+
+ info = drm_get_format_info(display->drm, format, modifier);
+
+ ret = skl_compute_wm_params(crtc_state, mode_config->cursor_width,
+ info, modifier, DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0, 0);
drm_WARN_ON(display->drm, ret);
@@ -1389,7 +1398,7 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter,
{
u16 size, extra = 0;
- if (data_rate) {
+ if (data_rate && iter->data_rate) {
extra = min_t(u16, iter->size,
DIV64_U64_ROUND_UP(iter->size * data_rate,
iter->data_rate));
@@ -1637,26 +1646,11 @@ skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
return ret;
}
-static uint_fixed_16_16_t
-intel_get_linetime_us(const struct intel_crtc_state *crtc_state)
+static int skl_wm_linetime_us(const struct intel_crtc_state *crtc_state,
+ int pixel_rate)
{
- struct intel_display *display = to_intel_display(crtc_state);
- u32 pixel_rate;
- u32 crtc_htotal;
- uint_fixed_16_16_t linetime_us;
-
- if (!crtc_state->hw.active)
- return u32_to_fixed16(0);
-
- pixel_rate = crtc_state->pixel_rate;
-
- if (drm_WARN_ON(display->drm, pixel_rate == 0))
- return u32_to_fixed16(0);
-
- crtc_htotal = crtc_state->hw.pipe_mode.crtc_htotal;
- linetime_us = div_fixed16(crtc_htotal * 1000, pixel_rate);
-
- return linetime_us;
+ return DIV_ROUND_UP(crtc_state->hw.pipe_mode.crtc_htotal * 1000,
+ pixel_rate);
}
static int
@@ -1744,7 +1738,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
wp->plane_blocks_per_line);
- wp->linetime_us = fixed16_to_u32_round_up(intel_get_linetime_us(crtc_state));
+ wp->linetime_us = skl_wm_linetime_us(crtc_state, plane_pixel_rate);
return 0;
}
@@ -1825,6 +1819,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
if (wp->y_tiled) {
selected_result = max_fixed16(method2, wp->y_tile_minimum);
+ } else if (DISPLAY_VER(display) >= 35) {
+ selected_result = method2;
} else {
if ((wp->cpp * crtc_state->hw.pipe_mode.crtc_htotal /
wp->dbuf_block_size < 1) &&
@@ -1879,18 +1875,21 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
} else {
blocks++;
}
-
- /*
- * Make sure result blocks for higher latency levels are
- * at least as high as level below the current level.
- * Assumption in DDB algorithm optimization for special
- * cases. Also covers Display WA #1125 for RC.
- */
- if (result_prev->blocks > blocks)
- blocks = result_prev->blocks;
}
}
+ /*
+ * Make sure result blocks for higher latency levels are
+ * at least as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ *
+ * Let's always do this as the algorithm can give non
+ * monotonic results on any platform.
+ */
+ blocks = max_t(u32, blocks, result_prev->blocks);
+ lines = max_t(u32, lines, result_prev->lines);
+
if (DISPLAY_VER(display) >= 11) {
if (wp->y_tiled) {
int extra_lines;
@@ -2158,103 +2157,55 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
return 0;
}
-static int
-cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state)
+unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct intel_cdclk_state *cdclk_state;
-
- cdclk_state = intel_atomic_get_cdclk_state(state);
- if (IS_ERR(cdclk_state)) {
- drm_WARN_ON(display->drm, PTR_ERR(cdclk_state));
- return 1;
- }
-
- return min(1, DIV_ROUND_UP(crtc_state->pixel_rate,
- 2 * intel_cdclk_logical(cdclk_state)));
-}
-
-static int
-dsc_prefill_latency(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
- crtc_state->hw.adjusted_mode.clock);
- int num_scaler_users = hweight32(scaler_state->scaler_users);
- int chroma_downscaling_factor =
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
- u32 dsc_prefill_latency = 0;
-
- if (!crtc_state->dsc.compression_enable ||
- !num_scaler_users ||
- num_scaler_users > crtc->num_scalers)
- return dsc_prefill_latency;
-
- dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10);
-
- for (int i = 0; i < num_scaler_users; i++) {
- u64 hscale_k, vscale_k;
-
- hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].hscale, 1000) >> 16);
- vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[i].vscale, 1000) >> 16);
- dsc_prefill_latency = DIV_ROUND_UP_ULL(dsc_prefill_latency * hscale_k * vscale_k,
- 1000000);
- }
-
- dsc_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+ struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->primary);
+ const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode;
+ int ret, pixel_rate, width, level = 0;
+ const struct drm_format_info *info;
+ struct skl_wm_level wm = {};
+ struct skl_wm_params wp;
+ unsigned int latency;
+ u64 modifier;
+ u32 format;
- return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, dsc_prefill_latency);
-}
+ /* only expected to be used for VRR guardband calculation */
+ drm_WARN_ON(display->drm, !HAS_VRR(display));
-static int
-scaler_prefill_latency(const struct intel_crtc_state *crtc_state)
-{
- const struct intel_crtc_scaler_state *scaler_state =
- &crtc_state->scaler_state;
- int num_scaler_users = hweight32(scaler_state->scaler_users);
- int scaler_prefill_latency = 0;
- int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal,
- crtc_state->hw.adjusted_mode.clock);
+ /* FIXME rather ugly to pick this by hand but maybe no better way? */
+ format = DRM_FORMAT_XBGR16161616F;
+ if (HAS_4TILE(display))
+ modifier = I915_FORMAT_MOD_4_TILED;
+ else
+ modifier = I915_FORMAT_MOD_Y_TILED;
- if (!num_scaler_users)
- return scaler_prefill_latency;
+ info = drm_get_format_info(display->drm, format, modifier);
- scaler_prefill_latency = 4 * linetime;
+ pixel_rate = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_total_scale(crtc_state),
+ pipe_mode->crtc_clock),
+ 0x10000);
- if (num_scaler_users > 1) {
- u64 hscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].hscale, 1000) >> 16);
- u64 vscale_k = max(1000, mul_u32_u32(scaler_state->scalers[0].vscale, 1000) >> 16);
- int chroma_downscaling_factor =
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1;
- int latency;
+ /* FIXME limit to max plane width? */
+ width = DIV_ROUND_UP_ULL(mul_u32_u32(skl_scaler_max_hscale(crtc_state),
+ pipe_mode->crtc_hdisplay),
+ 0x10000);
- latency = DIV_ROUND_UP_ULL((4 * linetime * hscale_k * vscale_k *
- chroma_downscaling_factor), 1000000);
- scaler_prefill_latency += latency;
- }
+ /* FIXME is 90/270 rotation worse than 0/180? */
+ ret = skl_compute_wm_params(crtc_state, width, info,
+ modifier, DRM_MODE_ROTATE_0,
+ pixel_rate, &wp, 0, 1);
+ drm_WARN_ON(display->drm, ret);
- scaler_prefill_latency *= cdclk_prefill_adjustment(crtc_state);
+ latency = skl_wm_latency(display, level, &wp);
- return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, scaler_prefill_latency);
-}
+ skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm);
-static bool
-skl_is_vblank_too_short(const struct intel_crtc_state *crtc_state,
- int wm0_lines, int latency)
-{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ /* FIXME is this sane? */
+ if (wm.min_ddb_alloc == U16_MAX)
+ wm.lines = skl_wm_max_lines(display);
- return crtc_state->framestart_delay +
- intel_usecs_to_scanlines(adjusted_mode, latency) +
- scaler_prefill_latency(crtc_state) +
- dsc_prefill_latency(crtc_state) +
- wm0_lines >
- adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vblank_start;
+ return wm.lines << 16;
}
static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
@@ -2273,10 +2224,21 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state)
return wm0_lines;
}
+unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state)
+{
+ return skl_max_wm0_lines(crtc_state) << 16;
+}
+
+/*
+ * TODO: In case we use PKG_C_LATENCY to allow C-states when the delayed vblank
+ * size is too small for the package C exit latency we need to notify PSR about
+ * the scenario to apply Wa_16025596647.
+ */
static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
- int wm0_lines)
+ const struct skl_prefill_ctx *ctx)
{
struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int level;
for (level = display->wm.num_levels - 1; level >= 0; level--) {
@@ -2291,10 +2253,13 @@ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state,
if (level == 0)
latency = 0;
- if (!skl_is_vblank_too_short(crtc_state, wm0_lines, latency))
+ if (!skl_prefill_vblank_too_short(ctx, crtc_state, latency))
return level;
}
+ drm_dbg_kms(display->drm, "[CRTC:%d:%s] Not enough time in vblank for prefill\n",
+ crtc->base.base.id, crtc->base.name);
+
return -EINVAL;
}
@@ -2302,14 +2267,15 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- int wm0_lines, level;
+ struct skl_prefill_ctx ctx;
+ int level;
if (!crtc_state->hw.active)
return 0;
- wm0_lines = skl_max_wm0_lines(crtc_state);
+ skl_prefill_init(&ctx, crtc_state);
- level = skl_max_wm_level_for_vblank(crtc_state, wm0_lines);
+ level = skl_max_wm_level_for_vblank(crtc_state, &ctx);
if (level < 0)
return level;
@@ -2319,6 +2285,13 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
*/
crtc_state->wm_level_disabled = level < display->wm.num_levels - 1;
+ /*
+ * TODO: assert that we are in fact using the maximum guardband
+ * if we end up disabling any WM levels here. Otherwise we clearly
+ * failed in using a realistic worst case prefill estimate when
+ * determining the guardband size.
+ */
+
for (level++; level < display->wm.num_levels; level++) {
enum plane_id plane_id;
@@ -2337,8 +2310,8 @@ static int skl_wm_check_vblank(struct intel_crtc_state *crtc_state)
if (DISPLAY_VER(display) >= 12 &&
display->sagv.block_time_us &&
- skl_is_vblank_too_short(crtc_state, wm0_lines,
- display->sagv.block_time_us)) {
+ skl_prefill_vblank_too_short(&ctx, crtc_state,
+ display->sagv.block_time_us)) {
enum plane_id plane_id;
for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -3170,12 +3143,60 @@ void skl_watermark_ipc_init(struct intel_display *display)
skl_watermark_ipc_update(display);
}
-static void
-adjust_wm_latency(struct intel_display *display,
- u16 wm[], int num_levels, int read_latency)
+static void multiply_wm_latency(struct intel_display *display, int mult)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] *= mult;
+}
+
+static void increase_wm_latency(struct intel_display *display, int inc)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ wm[0] += inc;
+
+ for (level = 1; level < num_levels; level++) {
+ if (wm[level] == 0)
+ break;
+
+ wm[level] += inc;
+ }
+}
+
+static bool need_16gb_dimm_wa(struct intel_display *display)
{
const struct dram_info *dram_info = intel_dram_info(display->drm);
- int i, level;
+
+ return (display->platform.skylake || display->platform.kabylake ||
+ display->platform.coffeelake || display->platform.cometlake ||
+ DISPLAY_VER(display) == 11) && dram_info->has_16gb_dimms;
+}
+
+static int wm_read_latency(struct intel_display *display)
+{
+ if (DISPLAY_VER(display) >= 14)
+ return 6;
+ else if (DISPLAY_VER(display) >= 12)
+ return 3;
+ else
+ return 2;
+}
+
+static void sanitize_wm_latency(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ /*
+ * Xe3p and beyond should ignore level 0's reported latency and
+ * always apply WaWmMemoryReadLatency logic.
+ */
+ if (DISPLAY_VER(display) >= 35)
+ wm[0] = 0;
/*
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
@@ -3183,14 +3204,38 @@ adjust_wm_latency(struct intel_display *display,
* of the punit to satisfy this requirement.
*/
for (level = 1; level < num_levels; level++) {
- if (wm[level] == 0) {
- for (i = level + 1; i < num_levels; i++)
- wm[i] = 0;
+ if (wm[level] == 0)
+ break;
+ }
+
+ for (level = level + 1; level < num_levels; level++)
+ wm[level] = 0;
+}
- num_levels = level;
+static void make_wm_latency_monotonic(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+ int level, num_levels = display->wm.num_levels;
+
+ for (level = 1; level < num_levels; level++) {
+ if (wm[level] == 0)
break;
- }
+
+ wm[level] = max(wm[level], wm[level-1]);
}
+}
+
+static void
+adjust_wm_latency(struct intel_display *display)
+{
+ u16 *wm = display->wm.skl_latency;
+
+ if (display->platform.dg2)
+ multiply_wm_latency(display, 2);
+
+ sanitize_wm_latency(display);
+
+ make_wm_latency_monotonic(display);
/*
* WaWmMemoryReadLatency
@@ -3199,24 +3244,22 @@ adjust_wm_latency(struct intel_display *display,
* to add proper adjustment to each valid level we retrieve
* from the punit when level 0 response data is 0us.
*/
- if (wm[0] == 0) {
- for (level = 0; level < num_levels; level++)
- wm[level] += read_latency;
- }
+ if (wm[0] == 0)
+ increase_wm_latency(display, wm_read_latency(display));
/*
- * WA Level-0 adjustment for 16GB DIMMs: SKL+
+ * WA Level-0 adjustment for 16Gb+ DIMMs: SKL+
* If we could not get dimm info enable this WA to prevent from
- * any underrun. If not able to get Dimm info assume 16GB dimm
+ * any underrun. If not able to get DIMM info assume 16Gb+ DIMM
* to avoid any underrun.
*/
- if (!display->platform.dg2 && dram_info->wm_lv_0_adjust_needed)
- wm[0] += 1;
+ if (need_16gb_dimm_wa(display))
+ increase_wm_latency(display, 1);
}
-static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
+static void mtl_read_wm_latency(struct intel_display *display)
{
- int num_levels = display->wm.num_levels;
+ u16 *wm = display->wm.skl_latency;
u32 val;
val = intel_de_read(display, MTL_LATENCY_LP0_LP1);
@@ -3230,15 +3273,11 @@ static void mtl_read_wm_latency(struct intel_display *display, u16 wm[])
val = intel_de_read(display, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
-
- adjust_wm_latency(display, wm, num_levels, 6);
}
-static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
+static void skl_read_wm_latency(struct intel_display *display)
{
- int num_levels = display->wm.num_levels;
- int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2;
- int mult = display->platform.dg2 ? 2 : 1;
+ u16 *wm = display->wm.skl_latency;
u32 val;
int ret;
@@ -3250,10 +3289,10 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
return;
}
- wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
- wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
- wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
- wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
+ wm[0] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val);
+ wm[1] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val);
+ wm[2] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val);
+ wm[3] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val);
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
@@ -3263,12 +3302,10 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[])
return;
}
- wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val) * mult;
- wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val) * mult;
- wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val) * mult;
- wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val) * mult;
-
- adjust_wm_latency(display, wm, num_levels, read_latency);
+ wm[4] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_0_4_MASK, val);
+ wm[5] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_1_5_MASK, val);
+ wm[6] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_2_6_MASK, val);
+ wm[7] = REG_FIELD_GET(GEN9_MEM_LATENCY_LEVEL_3_7_MASK, val);
}
static void skl_setup_wm_latency(struct intel_display *display)
@@ -3279,11 +3316,15 @@ static void skl_setup_wm_latency(struct intel_display *display)
display->wm.num_levels = 8;
if (DISPLAY_VER(display) >= 14)
- mtl_read_wm_latency(display, display->wm.skl_latency);
+ mtl_read_wm_latency(display);
else
- skl_read_wm_latency(display, display->wm.skl_latency);
+ skl_read_wm_latency(display);
- intel_print_wm_latency(display, "Gen9 Plane", display->wm.skl_latency);
+ intel_print_wm_latency(display, "original", display->wm.skl_latency);
+
+ adjust_wm_latency(display);
+
+ intel_print_wm_latency(display, "adjusted", display->wm.skl_latency);
}
static struct intel_global_state *intel_dbuf_duplicate_state(struct intel_global_obj *obj)
@@ -3452,7 +3493,10 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
if (!HAS_MBUS_JOINING(display))
return;
- if (DISPLAY_VER(display) >= 20)
+ if (DISPLAY_VER(display) >= 35)
+ intel_de_rmw(display, MBUS_CTL, XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK,
+ XE3P_MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
+ else if (DISPLAY_VER(display) >= 20)
intel_de_rmw(display, MBUS_CTL, MBUS_TRANSLATION_THROTTLE_MIN_MASK,
MBUS_TRANSLATION_THROTTLE_MIN(ratio - 1));
@@ -3463,9 +3507,14 @@ void intel_dbuf_mdclk_cdclk_ratio_update(struct intel_display *display,
ratio, str_yes_no(joined_mbus));
for_each_dbuf_slice(display, slice)
- intel_de_rmw(display, DBUF_CTL_S(slice),
- DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
- DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
+ if (DISPLAY_VER(display) >= 35)
+ intel_de_rmw(display, DBUF_CTL_S(slice),
+ XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ XE3P_DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
+ else
+ intel_de_rmw(display, DBUF_CTL_S(slice),
+ DBUF_MIN_TRACKER_STATE_SERVICE_MASK,
+ DBUF_MIN_TRACKER_STATE_SERVICE(ratio - 1));
}
static void intel_dbuf_mdclk_min_tracker_update(struct intel_atomic_state *state)
@@ -4033,14 +4082,14 @@ DEFINE_SHOW_ATTRIBUTE(intel_sagv_status);
void skl_watermark_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = display->drm->primary;
+ struct dentry *debugfs_root = display->drm->debugfs_root;
if (HAS_IPC(display))
- debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root,
+ debugfs_create_file("i915_ipc_status", 0644, debugfs_root,
display, &skl_watermark_ipc_status_fops);
if (HAS_SAGV(display))
- debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root,
+ debugfs_create_file("i915_sagv_status", 0444, debugfs_root,
display, &intel_sagv_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 62790816f030..6bc2ec9164bf 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -79,5 +79,8 @@ void intel_program_dpkgc_latency(struct intel_atomic_state *state);
bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state);
+unsigned int skl_wm0_prefill_lines_worst(const struct intel_crtc_state *crtc_state);
+unsigned int skl_wm0_prefill_lines(const struct intel_crtc_state *crtc_state);
+
#endif /* __SKL_WATERMARK_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_watermark_regs.h b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
index c5572fc0e847..abf56ac31105 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark_regs.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark_regs.h
@@ -32,16 +32,18 @@
#define MBUS_BBOX_CTL_S1 _MMIO(0x45040)
#define MBUS_BBOX_CTL_S2 _MMIO(0x45044)
-#define MBUS_CTL _MMIO(0x4438C)
-#define MBUS_JOIN REG_BIT(31)
-#define MBUS_HASHING_MODE_MASK REG_BIT(30)
-#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
-#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
-#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
-#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
-#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
-#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13)
-#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
+#define MBUS_CTL _MMIO(0x4438C)
+#define MBUS_JOIN REG_BIT(31)
+#define MBUS_HASHING_MODE_MASK REG_BIT(30)
+#define MBUS_HASHING_MODE_2x2 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 0)
+#define MBUS_HASHING_MODE_1x4 REG_FIELD_PREP(MBUS_HASHING_MODE_MASK, 1)
+#define MBUS_JOIN_PIPE_SELECT_MASK REG_GENMASK(28, 26)
+#define MBUS_JOIN_PIPE_SELECT(pipe) REG_FIELD_PREP(MBUS_JOIN_PIPE_SELECT_MASK, pipe)
+#define MBUS_JOIN_PIPE_SELECT_NONE MBUS_JOIN_PIPE_SELECT(7)
+#define XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(16, 13)
+#define XE3P_MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(XE3P_MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
+#define MBUS_TRANSLATION_THROTTLE_MIN_MASK REG_GENMASK(15, 13)
+#define MBUS_TRANSLATION_THROTTLE_MIN(val) REG_FIELD_PREP(MBUS_TRANSLATION_THROTTLE_MIN_MASK, val)
/*
* The below are numbered starting from "S1" on gen11/gen12, but starting
@@ -51,20 +53,22 @@
* way things will be named by the hardware team going forward, plus it's more
* consistent with how most of the rest of our registers are named.
*/
-#define _DBUF_CTL_S0 0x45008
-#define _DBUF_CTL_S1 0x44FE8
-#define _DBUF_CTL_S2 0x44300
-#define _DBUF_CTL_S3 0x44304
-#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \
- _DBUF_CTL_S0, \
- _DBUF_CTL_S1, \
- _DBUF_CTL_S2, \
- _DBUF_CTL_S3))
-#define DBUF_POWER_REQUEST REG_BIT(31)
-#define DBUF_POWER_STATE REG_BIT(30)
-#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19)
-#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
-#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */
+#define _DBUF_CTL_S0 0x45008
+#define _DBUF_CTL_S1 0x44FE8
+#define _DBUF_CTL_S2 0x44300
+#define _DBUF_CTL_S3 0x44304
+#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \
+ _DBUF_CTL_S0, \
+ _DBUF_CTL_S1, \
+ _DBUF_CTL_S2, \
+ _DBUF_CTL_S3))
+#define DBUF_POWER_REQUEST REG_BIT(31)
+#define DBUF_POWER_STATE REG_BIT(30)
+#define DBUF_TRACKER_STATE_SERVICE_MASK REG_GENMASK(23, 19)
+#define DBUF_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
+#define XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(20, 16)
+#define XE3P_DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(XE3P_DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x)
+#define DBUF_MIN_TRACKER_STATE_SERVICE_MASK REG_GENMASK(18, 16) /* ADL-P+ */
#define DBUF_MIN_TRACKER_STATE_SERVICE(x) REG_FIELD_PREP(DBUF_MIN_TRACKER_STATE_SERVICE_MASK, x) /* ADL-P+ */
#define MTL_LATENCY_LP0_LP1 _MMIO(0x45780)
diff --git a/drivers/gpu/drm/i915/display/vlv_clock.c b/drivers/gpu/drm/i915/display/vlv_clock.c
new file mode 100644
index 000000000000..1abdae453514
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_clock.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_print.h>
+
+#include "intel_display_core.h"
+#include "intel_display_types.h"
+#include "vlv_clock.h"
+#include "vlv_sideband.h"
+
+/*
+ * FIXME: The caching of hpll_freq and czclk_freq relies on the first calls
+ * occurring at a time when they can actually be read. This appears to be the
+ * case, but is somewhat fragile. Make the initialization explicit at a point
+ * where they can be reliably read.
+ */
+
+/* returns HPLL frequency in kHz */
+int vlv_clock_get_hpll_vco(struct drm_device *drm)
+{
+ struct intel_display *display = to_intel_display(drm);
+ int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
+
+ if (!display->vlv_clock.hpll_freq) {
+ vlv_cck_get(drm);
+ /* Obtain SKU information */
+ hpll_freq = vlv_cck_read(drm, CCK_FUSE_REG) &
+ CCK_FUSE_HPLL_FREQ_MASK;
+ vlv_cck_put(drm);
+
+ display->vlv_clock.hpll_freq = vco_freq[hpll_freq] * 1000;
+
+ drm_dbg_kms(drm, "HPLL frequency: %d kHz\n", display->vlv_clock.hpll_freq);
+ }
+
+ return display->vlv_clock.hpll_freq;
+}
+
+static int vlv_clock_get_cck(struct drm_device *drm,
+ const char *name, u32 reg, int ref_freq)
+{
+ u32 val;
+ int divider;
+
+ vlv_cck_get(drm);
+ val = vlv_cck_read(drm, reg);
+ vlv_cck_put(drm);
+
+ divider = val & CCK_FREQUENCY_VALUES;
+
+ drm_WARN(drm, (val & CCK_FREQUENCY_STATUS) !=
+ (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ "%s change in progress\n", name);
+
+ return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
+}
+
+int vlv_clock_get_hrawclk(struct drm_device *drm)
+{
+ /* RAWCLK_FREQ_VLV register updated from power well code */
+ return vlv_clock_get_cck(drm, "hrawclk", CCK_DISPLAY_REF_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+}
+
+int vlv_clock_get_czclk(struct drm_device *drm)
+{
+ struct intel_display *display = to_intel_display(drm);
+
+ if (!display->vlv_clock.czclk_freq) {
+ display->vlv_clock.czclk_freq = vlv_clock_get_cck(drm, "czclk", CCK_CZ_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+ drm_dbg_kms(drm, "CZ clock rate: %d kHz\n", display->vlv_clock.czclk_freq);
+ }
+
+ return display->vlv_clock.czclk_freq;
+}
+
+int vlv_clock_get_cdclk(struct drm_device *drm)
+{
+ return vlv_clock_get_cck(drm, "cdclk", CCK_DISPLAY_CLOCK_CONTROL,
+ vlv_clock_get_hpll_vco(drm));
+}
+
+int vlv_clock_get_gpll(struct drm_device *drm)
+{
+ return vlv_clock_get_cck(drm, "GPLL ref", CCK_GPLL_CLOCK_CONTROL,
+ vlv_clock_get_czclk(drm));
+}
diff --git a/drivers/gpu/drm/i915/display/vlv_clock.h b/drivers/gpu/drm/i915/display/vlv_clock.h
new file mode 100644
index 000000000000..5742ed3c628d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/vlv_clock.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __VLV_CLOCK_H__
+#define __VLV_CLOCK_H__
+
+struct drm_device;
+
+#ifdef I915
+int vlv_clock_get_hpll_vco(struct drm_device *drm);
+int vlv_clock_get_hrawclk(struct drm_device *drm);
+int vlv_clock_get_czclk(struct drm_device *drm);
+int vlv_clock_get_cdclk(struct drm_device *drm);
+int vlv_clock_get_gpll(struct drm_device *drm);
+#else
+static inline int vlv_clock_get_hpll_vco(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_hrawclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_czclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_cdclk(struct drm_device *drm)
+{
+ return 0;
+}
+static inline int vlv_clock_get_gpll(struct drm_device *drm)
+{
+ return 0;
+}
+#endif
+
+#endif /* __VLV_CLOCK_H__ */
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 6d9f3312de7e..19bdd8662359 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -34,7 +34,6 @@
#include <drm/drm_probe_helper.h>
#include "i915_reg.h"
-#include "i915_utils.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -42,6 +41,7 @@
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_utils.h"
#include "intel_dsi.h"
#include "intel_dsi_vbt.h"
#include "intel_fifo_underrun.h"
@@ -94,8 +94,8 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_de_wait_for_set(display, MIPI_GEN_FIFO_STAT(display, port),
- mask, 100))
+ if (intel_de_wait_for_set_ms(display, MIPI_GEN_FIFO_STAT(display, port),
+ mask, 100))
drm_err(display->drm, "DPI FIFOs are not empty\n");
}
@@ -162,8 +162,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
- data_mask, 50))
+ if (intel_de_wait_for_clear_ms(display, MIPI_GEN_FIFO_STAT(display, port),
+ data_mask, 50))
drm_err(display->drm,
"Timeout waiting for HS/LP DATA FIFO !full\n");
@@ -176,8 +176,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
GEN_READ_DATA_AVAIL);
}
- if (intel_de_wait_for_clear(display, MIPI_GEN_FIFO_STAT(display, port),
- ctrl_mask, 50)) {
+ if (intel_de_wait_for_clear_ms(display, MIPI_GEN_FIFO_STAT(display, port),
+ ctrl_mask, 50)) {
drm_err(display->drm,
"Timeout waiting for HS/LP CTRL FIFO !full\n");
}
@@ -188,8 +188,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port),
- data_mask, 50))
+ if (intel_de_wait_for_set_ms(display, MIPI_INTR_STAT(display, port),
+ data_mask, 50))
drm_err(display->drm,
"Timeout waiting for read data.\n");
@@ -246,7 +246,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
intel_de_write(display, MIPI_DPI_CONTROL(display, port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_de_wait_for_set(display, MIPI_INTR_STAT(display, port), mask, 100))
+ if (intel_de_wait_for_set_ms(display, MIPI_INTR_STAT(display, port), mask, 100))
drm_err(display->drm,
"Video mode command 0x%08x send failed.\n", cmd);
@@ -352,8 +352,8 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
- GLK_MIPIIO_PORT_POWERED, 20))
+ if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port),
+ GLK_MIPIIO_PORT_POWERED, 20))
drm_err(display->drm, "MIPIO port is powergated\n");
}
@@ -374,8 +374,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port),
+ GLK_PHY_STATUS_PORT_READY, 20))
drm_err(display->drm, "PHY is not ON\n");
}
@@ -394,8 +394,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
ULPS_STATE_MASK, ULPS_STATE_ENTER | DEVICE_READY);
/* Wait for ULPS active */
- if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
- GLK_ULPS_NOT_ACTIVE, 20))
+ if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port),
+ GLK_ULPS_NOT_ACTIVE, 20))
drm_err(display->drm, "ULPS not active\n");
/* Exit ULPS */
@@ -413,16 +413,16 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(display, MIPI_CTRL(display, port),
- GLK_DATA_LANE_STOP_STATE, 20))
+ if (intel_de_wait_for_set_ms(display, MIPI_CTRL(display, port),
+ GLK_DATA_LANE_STOP_STATE, 20))
drm_err(display->drm,
"Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_set(display, BXT_MIPI_PORT_CTRL(port),
- AFE_LATCHOUT, 20))
+ if (intel_de_wait_for_set_ms(display, BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT, 20))
drm_err(display->drm,
"D-PHY not entering LP-11 state\n");
}
@@ -519,15 +519,15 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port),
+ GLK_PHY_STATUS_PORT_READY, 20))
drm_err(display->drm, "PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
- GLK_MIPIIO_PORT_POWERED, 20))
+ if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port),
+ GLK_MIPIIO_PORT_POWERED, 20))
drm_err(display->drm,
"MIPI IO Port is not powergated\n");
}
@@ -544,8 +544,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_de_wait_for_clear(display, MIPI_CTRL(display, port),
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_de_wait_for_clear_ms(display, MIPI_CTRL(display, port),
+ GLK_PHY_STATUS_PORT_READY, 20))
drm_err(display->drm, "PHY is not turning OFF\n");
}
@@ -595,8 +595,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((display->platform.broxton || port == PORT_A) &&
- intel_de_wait_for_clear(display, port_ctrl,
- AFE_LATCHOUT, 30))
+ intel_de_wait_for_clear_ms(display, port_ctrl,
+ AFE_LATCHOUT, 30))
drm_err(display->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
@@ -761,7 +761,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
if (display->platform.valleyview || display->platform.cherryview) {
/* Disable DPOunit clock gating, can stall pipe */
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
0, DPOUNIT_CLOCK_GATE_DISABLE);
}
@@ -918,7 +918,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state,
} else {
vlv_dsi_pll_disable(encoder);
- intel_de_rmw(display, DSPCLK_GATE_D(display),
+ intel_de_rmw(display, VLV_DSPCLK_GATE_D,
DPOUNIT_CLOCK_GATE_DISABLE, 0);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index d42b61e6f076..a2da6285890b 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -25,12 +25,12 @@
* Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com>
*/
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
-#include "i915_utils.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -142,11 +142,9 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder,
pll_div &= DSI_PLL_M1_DIV_MASK;
pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT;
- while (pll_ctl) {
- pll_ctl = pll_ctl >> 1;
- p++;
- }
- p--;
+ p = fls(pll_ctl);
+ if (p)
+ p--;
if (!p) {
drm_err(display->drm, "wrong P1 divisor\n");
@@ -216,6 +214,8 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct intel_display *display = to_intel_display(encoder);
+ u32 val;
+ int ret;
drm_dbg_kms(display->drm, "\n");
@@ -233,9 +233,10 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl);
- if (wait_for(vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) &
- DSI_PLL_LOCK, 20)) {
-
+ ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL),
+ val & DSI_PLL_LOCK,
+ 500, 20 * 1000, false);
+ if (ret) {
vlv_cck_put(display->drm);
drm_err(display->drm, "DSI PLL lock failed\n");
return;
@@ -262,6 +263,11 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
vlv_cck_put(display->drm);
}
+static bool has_dsic_clock(struct intel_display *display)
+{
+ return display->platform.broxton;
+}
+
bool bxt_dsi_pll_is_enabled(struct intel_display *display)
{
bool enabled;
@@ -284,7 +290,7 @@ bool bxt_dsi_pll_is_enabled(struct intel_display *display)
* causes a system hang.
*/
val = intel_de_read(display, BXT_DSI_PLL_CTL);
- if (display->platform.geminilake) {
+ if (!has_dsic_clock(display)) {
if (!(val & BXT_DSIA_16X_MASK)) {
drm_dbg_kms(display->drm,
"Invalid PLL divider (%08x)\n", val);
@@ -313,8 +319,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED, 1))
+ if (intel_de_wait_for_clear_ms(display, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1))
drm_err(display->drm,
"Timeout waiting for PLL lock deassertion\n");
}
@@ -358,6 +364,8 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
u32 pclk;
config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL);
+ if (!has_dsic_clock(display))
+ config->dsi_pll.ctrl &= ~BXT_DSIC_16X_MASK;
pclk = bxt_dsi_pclk(encoder, config);
@@ -514,7 +522,9 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
* Spec says both have to be programmed, even if one is not getting
* used. Configure MIPI_CLOCK_CTL dividers in modeset
*/
- config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2;
+ config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2;
+ if (has_dsic_clock(display))
+ config->dsi_pll.ctrl |= BXT_DSIC_16X_BY2;
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
@@ -558,8 +568,8 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE);
/* Timeout and fail if PLL not locked */
- if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE,
- BXT_DSI_PLL_LOCKED, 1)) {
+ if (intel_de_wait_for_set_ms(display, BXT_DSI_PLL_ENABLE,
+ BXT_DSI_PLL_LOCKED, 1)) {
drm_err(display->drm,
"Timed out waiting for DSI PLL to lock\n");
return;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 15835952352e..3215ef49c975 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -68,6 +68,7 @@
#include <linux/nospec.h>
#include <drm/drm_cache.h>
+#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include "gt/gen6_ppgtt.h"
@@ -2158,18 +2159,12 @@ static int set_context_image(struct i915_gem_context *ctx,
goto out_ce;
}
- state = kmalloc(ce->engine->context_size, GFP_KERNEL);
- if (!state) {
- ret = -ENOMEM;
+ state = memdup_user(u64_to_user_ptr(user.image), ce->engine->context_size);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
goto out_ce;
}
- if (copy_from_user(state, u64_to_user_ptr(user.image),
- ce->engine->context_size)) {
- ret = -EFAULT;
- goto out_state;
- }
-
shmem_state = shmem_create_from_data(ce->engine->name,
state, ce->engine->context_size);
if (IS_ERR(shmem_state)) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index c3e6a325872d..189ecdd0a9c1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "display/intel_display.h"
#include "gem/i915_gem_ioctls.h"
@@ -193,8 +194,8 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * cpp, 64);
/* align stride to page size so that we can remap */
- if (args->pitch > intel_plane_fb_max_stride(dev, format,
- DRM_FORMAT_MOD_LINEAR))
+ if (args->pitch > intel_dumb_fb_max_stride(dev, format,
+ DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
if (args->pitch < args->width)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index ca7e9216934a..b057c2fa03a4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -9,6 +9,7 @@
#include <linux/uaccess.h>
#include <drm/drm_auth.h>
+#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include "gem/i915_gem_ioctls.h"
@@ -142,7 +143,7 @@ enum {
* we want to leave the object where it is and for all the existing relocations
* to match. If the object is given a new address, or if userspace thinks the
* object is elsewhere, we have to parse all the relocation entries and update
- * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
+ * the addresses. Userspace can set the I915_EXEC_NO_RELOC flag to hint that
* all the target addresses in all of its objects match the value in the
* relocation entries and that they all match the presumed offsets given by the
* list of execbuffer objects. Using this knowledge, we know that if we haven't
@@ -182,7 +183,7 @@ enum {
* the object. Simple! ... The relocation entries are stored in user memory
* and so to access them we have to copy them into a local buffer. That copy
* has to avoid taking any pagefaults as they may lead back to a GEM object
- * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
+ * requiring the vm->mutex (i.e. recursive deadlock). So once again we split
* the relocation into multiple passes. First we try to do everything within an
* atomic context (avoid the pagefaults) which requires that we never wait. If
* we detect that we may wait, or if we need to fault, then we have to fallback
@@ -1382,8 +1383,9 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
*/
if (flushes & CLFLUSH_AFTER)
drm_clflush_virt_range(addr, sizeof(*addr));
- } else
+ } else {
*addr = value;
+ }
}
static u64
@@ -1567,36 +1569,36 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
do {
u64 offset = eb_relocate_entry(eb, ev, r);
- if (likely(offset == 0)) {
- } else if ((s64)offset < 0) {
+ if (likely(offset == 0))
+ continue;
+
+ if ((s64)offset < 0) {
remain = (int)offset;
goto out;
- } else {
- /*
- * Note that reporting an error now
- * leaves everything in an inconsistent
- * state as we have *already* changed
- * the relocation value inside the
- * object. As we have not changed the
- * reloc.presumed_offset or will not
- * change the execobject.offset, on the
- * call we may not rewrite the value
- * inside the object, leaving it
- * dangling and causing a GPU hang. Unless
- * userspace dynamically rebuilds the
- * relocations on each execbuf rather than
- * presume a static tree.
- *
- * We did previously check if the relocations
- * were writable (access_ok), an error now
- * would be a strange race with mprotect,
- * having already demonstrated that we
- * can read from this userspace address.
- */
- offset = gen8_canonical_addr(offset & ~UPDATE);
- __put_user(offset,
- &urelocs[r - stack].presumed_offset);
}
+ /*
+ * Note that reporting an error now
+ * leaves everything in an inconsistent
+ * state as we have *already* changed
+ * the relocation value inside the
+ * object. As we have not changed the
+ * reloc.presumed_offset or will not
+ * change the execobject.offset, on the
+ * call we may not rewrite the value
+ * inside the object, leaving it
+ * dangling and causing a GPU hang. Unless
+ * userspace dynamically rebuilds the
+ * relocations on each execbuf rather than
+ * presume a static tree.
+ *
+ * We did previously check if the relocations
+ * were writable (access_ok), an error now
+ * would be a strange race with mprotect,
+ * having already demonstrated that we
+ * can read from this userspace address.
+ */
+ offset = gen8_canonical_addr(offset & ~UPDATE);
+ __put_user(offset, &urelocs[r - stack].presumed_offset);
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
} while (remain);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 75f5b0e871ef..4542135b20d5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -16,12 +16,13 @@
#include "i915_gem_evict.h"
#include "i915_gem_gtt.h"
#include "i915_gem_ioctls.h"
-#include "i915_gem_object.h"
#include "i915_gem_mman.h"
+#include "i915_gem_object.h"
+#include "i915_gem_ttm.h"
+#include "i915_jiffies.h"
#include "i915_mm.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
-#include "i915_gem_ttm.h"
#include "i915_vma.h"
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 1f38e367c60b..3f6f040c359d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -27,6 +27,7 @@
#include <linux/sched/mm.h>
#include <drm/drm_cache.h>
+#include <drm/drm_print.h>
#include "display/intel_frontbuffer.h"
#include "pxp/intel_pxp.h"
@@ -459,8 +460,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
atomic_inc(&i915->mm.free_count);
/*
- * Since we require blocking on struct_mutex to unbind the freed
- * object from the GPU before releasing resources back to the
+ * Since we require blocking on drm_i915_gem_object->vma.lock to unbind
+ * the freed object from the GPU before releasing resources back to the
* system, we can not do that directly from the RCU callback (which may
* be a softirq context), but must instead then defer that work onto a
* kthread. We use the RCU callback rather than move the freed object
@@ -476,24 +477,24 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
- struct intel_frontbuffer *front;
+ struct i915_frontbuffer *front;
- front = i915_gem_object_get_frontbuffer(obj);
+ front = i915_gem_object_frontbuffer_lookup(obj);
if (front) {
- intel_frontbuffer_flush(front, origin);
- intel_frontbuffer_put(front);
+ intel_frontbuffer_flush(&front->base, origin);
+ i915_gem_object_frontbuffer_put(front);
}
}
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
- struct intel_frontbuffer *front;
+ struct i915_frontbuffer *front;
- front = i915_gem_object_get_frontbuffer(obj);
+ front = i915_gem_object_frontbuffer_lookup(obj);
if (front) {
- intel_frontbuffer_invalidate(front, origin);
- intel_frontbuffer_put(front);
+ intel_frontbuffer_invalidate(&front->base, origin);
+ i915_gem_object_frontbuffer_put(front);
}
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 565f8fa330db..8878539c10ed 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -16,9 +16,9 @@
#include "i915_gem_ww.h"
#include "i915_vma_types.h"
-struct drm_scanout_buffer;
enum intel_region_id;
-struct intel_framebuffer;
+struct drm_scanout_buffer;
+struct intel_panic;
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
@@ -693,9 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int i915_gem_object_truncate(struct drm_i915_gem_object *obj);
-struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void);
-int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb);
-void i915_gem_object_panic_finish(struct intel_framebuffer *fb);
+struct intel_panic *i915_gem_object_alloc_panic(void);
+int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb,
+ struct drm_gem_object *_obj, bool panic_tiling);
+void i915_gem_object_panic_finish(struct intel_panic *panic);
/**
* i915_gem_object_pin_map - return a contiguous mapping of the entire object
@@ -801,6 +802,7 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
void i915_gem_fence_wait_priority(struct dma_fence *fence,
const struct i915_sched_attr *attr);
+void i915_gem_fence_wait_priority_display(struct dma_fence *fence);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
new file mode 100644
index 000000000000..aaa15e7b3f17
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "i915_drv.h"
+#include "i915_gem_object_frontbuffer.h"
+
+static int frontbuffer_active(struct i915_active *ref)
+{
+ struct i915_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ kref_get(&front->ref);
+ return 0;
+}
+
+static void frontbuffer_retire(struct i915_active *ref)
+{
+ struct i915_frontbuffer *front =
+ container_of(ref, typeof(*front), write);
+
+ intel_frontbuffer_flush(&front->base, ORIGIN_CS);
+ i915_gem_object_frontbuffer_put(front);
+}
+
+struct i915_frontbuffer *
+i915_gem_object_frontbuffer_get(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_frontbuffer *front, *cur;
+
+ front = i915_gem_object_frontbuffer_lookup(obj);
+ if (front)
+ return front;
+
+ front = kmalloc(sizeof(*front), GFP_KERNEL);
+ if (!front)
+ return NULL;
+
+ intel_frontbuffer_init(&front->base, &i915->drm);
+
+ kref_init(&front->ref);
+ i915_gem_object_get(obj);
+ front->obj = obj;
+
+ i915_active_init(&front->write,
+ frontbuffer_active,
+ frontbuffer_retire,
+ I915_ACTIVE_RETIRE_SLEEPS);
+
+ spin_lock(&i915->frontbuffer_lock);
+ if (rcu_access_pointer(obj->frontbuffer)) {
+ cur = rcu_dereference_protected(obj->frontbuffer, true);
+ kref_get(&cur->ref);
+ } else {
+ cur = front;
+ rcu_assign_pointer(obj->frontbuffer, front);
+ }
+ spin_unlock(&i915->frontbuffer_lock);
+
+ if (cur != front) {
+ i915_gem_object_put(obj);
+ intel_frontbuffer_fini(&front->base);
+ kfree(front);
+ }
+
+ return cur;
+}
+
+void i915_gem_object_frontbuffer_ref(struct i915_frontbuffer *front)
+{
+ kref_get(&front->ref);
+}
+
+static void frontbuffer_release(struct kref *ref)
+ __releases(&i915->frontbuffer_lock)
+{
+ struct i915_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+ struct drm_i915_gem_object *obj = front->obj;
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ i915_ggtt_clear_scanout(obj);
+
+ RCU_INIT_POINTER(obj->frontbuffer, NULL);
+
+ spin_unlock(&i915->frontbuffer_lock);
+
+ i915_active_fini(&front->write);
+
+ i915_gem_object_put(obj);
+
+ intel_frontbuffer_fini(&front->base);
+
+ kfree_rcu(front, rcu);
+}
+
+void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front)
+{
+ struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
+
+ kref_put_lock(&front->ref, frontbuffer_release,
+ &i915->frontbuffer_lock);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
index b6dc3d1b9bb1..2133e29047c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
@@ -12,6 +12,14 @@
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
+struct i915_frontbuffer {
+ struct intel_frontbuffer base;
+ struct drm_i915_gem_object *obj;
+ struct i915_active write;
+ struct rcu_head rcu;
+ struct kref ref;
+};
+
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
enum fb_op_origin origin);
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
@@ -33,19 +41,23 @@ i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
__i915_gem_object_invalidate_frontbuffer(obj, origin);
}
+struct i915_frontbuffer *i915_gem_object_frontbuffer_get(struct drm_i915_gem_object *obj);
+void i915_gem_object_frontbuffer_ref(struct i915_frontbuffer *front);
+void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front);
+
/**
- * i915_gem_object_get_frontbuffer - Get the object's frontbuffer
- * @obj: The object whose frontbuffer to get.
+ * i915_gem_object_frontbuffer_lookup - Look up the object's frontbuffer
+ * @obj: The object whose frontbuffer to look up.
*
* Get pointer to object's frontbuffer if such exists. Please note that RCU
* mechanism is used to handle e.g. ongoing removal of frontbuffer pointer.
*
* Return: pointer to object's frontbuffer is such exists or NULL
*/
-static inline struct intel_frontbuffer *
-i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
+static inline struct i915_frontbuffer *
+i915_gem_object_frontbuffer_lookup(const struct drm_i915_gem_object *obj)
{
- struct intel_frontbuffer *front;
+ struct i915_frontbuffer *front;
if (likely(!rcu_access_pointer(obj->frontbuffer)))
return NULL;
@@ -62,43 +74,11 @@ i915_gem_object_get_frontbuffer(const struct drm_i915_gem_object *obj)
if (likely(front == rcu_access_pointer(obj->frontbuffer)))
break;
- intel_frontbuffer_put(front);
+ i915_gem_object_frontbuffer_put(front);
} while (1);
rcu_read_unlock();
return front;
}
-/**
- * i915_gem_object_set_frontbuffer - Set the object's frontbuffer
- * @obj: The object whose frontbuffer to set.
- * @front: The frontbuffer to set
- *
- * Set object's frontbuffer pointer. If frontbuffer is already set for the
- * object keep it and return it's pointer to the caller. Please note that RCU
- * mechanism is used to handle e.g. ongoing removal of frontbuffer pointer. This
- * function is protected by i915->display->fb_tracking.lock
- *
- * Return: pointer to frontbuffer which was set.
- */
-static inline struct intel_frontbuffer *
-i915_gem_object_set_frontbuffer(struct drm_i915_gem_object *obj,
- struct intel_frontbuffer *front)
-{
- struct intel_frontbuffer *cur = front;
-
- if (!front) {
- RCU_INIT_POINTER(obj->frontbuffer, NULL);
- drm_gem_object_put(intel_bo_to_drm_bo(obj));
- } else if (rcu_access_pointer(obj->frontbuffer)) {
- cur = rcu_dereference_protected(obj->frontbuffer, true);
- kref_get(&cur->ref);
- } else {
- drm_gem_object_get(intel_bo_to_drm_bo(obj));
- rcu_assign_pointer(obj->frontbuffer, front);
- }
-
- return cur;
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 64600aa8227f..465ce94aee76 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -574,7 +574,7 @@ struct drm_i915_gem_object {
*/
u16 write_domain;
- struct intel_frontbuffer __rcu *frontbuffer;
+ struct i915_frontbuffer __rcu *frontbuffer;
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index c16a57160b26..c2f8e5f95696 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -3,9 +3,11 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/vmalloc.h>
+
#include <drm/drm_cache.h>
#include <drm/drm_panic.h>
-#include <linux/vmalloc.h>
+#include <drm/drm_print.h>
#include "display/intel_fb.h"
#include "display/intel_display_types.h"
@@ -357,23 +359,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
return vaddr ?: ERR_PTR(-ENOMEM);
}
-struct i915_panic_data {
+struct intel_panic {
struct page **pages;
int page;
void *vaddr;
};
-struct i915_framebuffer {
- struct intel_framebuffer base;
- struct i915_panic_data panic;
-};
-
-static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb)
-{
- return &container_of_const(fb, struct i915_framebuffer, base)->panic;
-}
-
-static void i915_panic_kunmap(struct i915_panic_data *panic)
+static void i915_panic_kunmap(struct intel_panic *panic)
{
if (panic->vaddr) {
drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
@@ -420,7 +412,7 @@ static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb,
unsigned int new_page;
unsigned int offset;
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct i915_panic_data *panic = to_i915_panic_data(fb);
+ struct intel_panic *panic = fb->panic;
if (fb->panic_tiling)
offset = fb->panic_tiling(sb->width, x, y);
@@ -441,14 +433,13 @@ static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb,
}
}
-struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
+struct intel_panic *i915_gem_object_alloc_panic(void)
{
- struct i915_framebuffer *i915_fb;
+ struct intel_panic *panic;
+
+ panic = kzalloc(sizeof(*panic), GFP_KERNEL);
- i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL);
- if (i915_fb)
- return &i915_fb->base;
- return NULL;
+ return panic;
}
/*
@@ -456,12 +447,11 @@ struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void)
* Use current vaddr if it exists, or setup a list of pages.
* pfn is not supported yet.
*/
-int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
+int i915_gem_object_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb,
+ struct drm_gem_object *_obj, bool panic_tiling)
{
enum i915_map_type has_type;
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct i915_panic_data *panic = to_i915_panic_data(fb);
- struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base));
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
void *ptr;
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
@@ -471,7 +461,7 @@ int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
else
iosys_map_set_vaddr(&sb->map[0], ptr);
- if (fb->panic_tiling)
+ if (panic_tiling)
sb->set_pixel = i915_gem_object_panic_map_set_pixel;
return 0;
}
@@ -486,10 +476,8 @@ int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb)
return -EOPNOTSUPP;
}
-void i915_gem_object_panic_finish(struct intel_framebuffer *fb)
+void i915_gem_object_panic_finish(struct intel_panic *panic)
{
- struct i915_panic_data *panic = to_i915_panic_data(fb);
-
i915_panic_kunmap(panic);
panic->page = -1;
kfree(panic->pages);
@@ -779,7 +767,7 @@ __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
sg = i915_gem_object_get_sg(obj, n, &offset);
- return nth_page(sg_page(sg), offset);
+ return sg_page(sg) + offset;
}
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index f9e7cab140f8..bc799f182850 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -8,6 +8,7 @@
#include <linux/swap.h>
#include <drm/drm_cache.h>
+#include <drm/drm_print.h>
#include "gt/intel_gt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index e3d188455f67..26dda55a07ff 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -441,11 +441,20 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
written = file->f_op->write_iter(&kiocb, &iter);
BUG_ON(written == -EIOCBQUEUED);
- if (written != size)
- return -EIO;
-
+ /*
+ * First, check if write_iter returned a negative error.
+ * If the write failed, return the real error code immediately.
+ * This prevents it from being overwritten by the short write check below.
+ */
if (written < 0)
return written;
+ /*
+ * Check for a short write (written bytes != requested size).
+ * Even if some data was written, return -EIO to indicate that the
+ * write was not fully completed.
+ */
+ if (written != size)
+ return -EIO;
return 0;
}
@@ -514,6 +523,13 @@ static int __create_shmem(struct drm_i915_private *i915,
if (IS_ERR(filp))
return PTR_ERR(filp);
+ /*
+ * Prevent -EFBIG by allowing large writes beyond MAX_NON_LFS on shmem
+ * objects by setting O_LARGEFILE.
+ */
+ if (force_o_largefile())
+ filp->f_flags |= O_LARGEFILE;
+
obj->filp = filp;
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index b81e67504bbe..e0d1f369a163 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -12,6 +12,8 @@
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
+#include <drm/drm_print.h>
+
#include "gt/intel_gt_requests.h"
#include "gt/intel_gt.h"
@@ -170,7 +172,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
* Also note that although these lists do not hold a reference to
* the object we can safely grab one here: The final object
* unreferencing and the bound_list are both protected by the
- * dev->struct_mutex and so we won't ever be able to observe an
+ * i915->mm.obj_lock and so we won't ever be able to observe an
* object on the bound_list with a reference count equals 0.
*/
for (phase = phases; phase->list; phase++) {
@@ -185,7 +187,7 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww,
/*
* We serialize our access to unreferenced objects through
- * the use of the struct_mutex. While the objects are not
+ * the use of the obj_lock. While the objects are not
* yet freed (due to RCU then a workqueue) we still want
* to be able to shrink their pages, so they remain on
* the unbound/bound list until actually freed.
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 3380151edfc1..f859c99f969b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -7,6 +7,7 @@
#include <linux/mutex.h>
#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_drm.h>
#include "gem/i915_gem_lmem.h"
@@ -24,6 +25,11 @@
#include "intel_mchbar_regs.h"
#include "intel_pci_config.h"
+struct intel_stolen_node {
+ struct drm_i915_private *i915;
+ struct drm_mm_node node;
+};
+
/*
* The BIOS typically reserves some of the system's memory for the exclusive
* use of the integrated graphics. This memory is no longer available for
@@ -36,9 +42,9 @@
* for is a boon.
*/
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
- unsigned alignment, u64 start, u64 end)
+static int __i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
+ struct drm_mm_node *node, u64 size,
+ unsigned int alignment, u64 start, u64 end)
{
int ret;
@@ -58,24 +64,43 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
return ret;
}
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
- unsigned alignment)
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int alignment, u64 start, u64 end)
+{
+ return __i915_gem_stolen_insert_node_in_range(node->i915, &node->node,
+ size, alignment,
+ start, end);
+}
+
+static int __i915_gem_stolen_insert_node(struct drm_i915_private *i915,
+ struct drm_mm_node *node, u64 size,
+ unsigned int alignment)
{
- return i915_gem_stolen_insert_node_in_range(i915, node,
- size, alignment,
- I915_GEM_STOLEN_BIAS,
- U64_MAX);
+ return __i915_gem_stolen_insert_node_in_range(i915, node,
+ size, alignment,
+ I915_GEM_STOLEN_BIAS,
+ U64_MAX);
}
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
- struct drm_mm_node *node)
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
+ unsigned int alignment)
+{
+ return __i915_gem_stolen_insert_node(node->i915, &node->node, size, alignment);
+}
+
+static void __i915_gem_stolen_remove_node(struct drm_i915_private *i915,
+ struct drm_mm_node *node)
{
mutex_lock(&i915->mm.stolen_lock);
drm_mm_remove_node(node);
mutex_unlock(&i915->mm.stolen_lock);
}
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
+{
+ __i915_gem_stolen_remove_node(node->i915, &node->node);
+}
+
static bool valid_stolen_size(struct drm_i915_private *i915, struct resource *dsm)
{
return (dsm->start != 0 || HAS_LMEMBAR_SMEM_STOLEN(i915)) && dsm->end > dsm->start;
@@ -683,7 +708,7 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);
GEM_BUG_ON(!stolen);
- i915_gem_stolen_remove_node(i915, stolen);
+ __i915_gem_stolen_remove_node(i915, stolen);
kfree(stolen);
i915_gem_object_release_memory_region(obj);
@@ -772,8 +797,8 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
mutex_unlock(&i915->mm.stolen_lock);
} else {
- ret = i915_gem_stolen_insert_node(i915, stolen, size,
- mem->min_page_size);
+ ret = __i915_gem_stolen_insert_node(i915, stolen, size,
+ mem->min_page_size);
}
if (ret)
goto err_free;
@@ -785,7 +810,7 @@ static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
return 0;
err_remove:
- i915_gem_stolen_remove_node(i915, stolen);
+ __i915_gem_stolen_remove_node(i915, stolen);
err_free:
kfree(stolen);
return ret;
@@ -1000,38 +1025,64 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj)
return obj->ops == &i915_gem_object_stolen_ops;
}
-bool i915_gem_stolen_initialized(const struct drm_i915_private *i915)
+bool i915_gem_stolen_initialized(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return drm_mm_initialized(&i915->mm.stolen);
}
-u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915)
+u64 i915_gem_stolen_area_address(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return i915->dsm.stolen.start;
}
-u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915)
+u64 i915_gem_stolen_area_size(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
+
return resource_size(&i915->dsm.stolen);
}
-u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
- const struct drm_mm_node *node)
+u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node)
{
+ struct drm_i915_private *i915 = node->i915;
+
return i915->dsm.stolen.start + i915_gem_stolen_node_offset(node);
}
-bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node)
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node)
+{
+ return drm_mm_node_allocated(&node->node);
+}
+
+u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node)
{
- return drm_mm_node_allocated(node);
+ return node->node.start;
}
-u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node)
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node)
{
- return node->start;
+ return node->node.size;
+}
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_stolen_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->i915 = i915;
+
+ return node;
}
-u64 i915_gem_stolen_node_size(const struct drm_mm_node *node)
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node)
{
- return node->size;
+ kfree(node);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index dfe0db8bb1b9..7b0386002ed4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -8,21 +8,17 @@
#include <linux/types.h>
-struct drm_i915_private;
-struct drm_mm_node;
+struct drm_device;
struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_stolen_node;
-#define i915_stolen_fb drm_mm_node
-
-int i915_gem_stolen_insert_node(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
unsigned alignment);
-int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *i915,
- struct drm_mm_node *node, u64 size,
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
-void i915_gem_stolen_remove_node(struct drm_i915_private *i915,
- struct drm_mm_node *node);
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node);
struct intel_memory_region *
i915_gem_stolen_smem_setup(struct drm_i915_private *i915, u16 type,
u16 instance);
@@ -38,15 +34,17 @@ bool i915_gem_object_is_stolen(const struct drm_i915_gem_object *obj);
#define I915_GEM_STOLEN_BIAS SZ_128K
-bool i915_gem_stolen_initialized(const struct drm_i915_private *i915);
-u64 i915_gem_stolen_area_address(const struct drm_i915_private *i915);
-u64 i915_gem_stolen_area_size(const struct drm_i915_private *i915);
+bool i915_gem_stolen_initialized(struct drm_device *drm);
+u64 i915_gem_stolen_area_address(struct drm_device *drm);
+u64 i915_gem_stolen_area_size(struct drm_device *drm);
+
+u64 i915_gem_stolen_node_address(const struct intel_stolen_node *node);
-u64 i915_gem_stolen_node_address(const struct drm_i915_private *i915,
- const struct drm_mm_node *node);
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node);
+u64 i915_gem_stolen_node_offset(const struct intel_stolen_node *node);
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node);
-bool i915_gem_stolen_node_allocated(const struct drm_mm_node *node);
-u64 i915_gem_stolen_node_offset(const struct drm_mm_node *node);
-u64 i915_gem_stolen_node_size(const struct drm_mm_node *node);
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm);
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node);
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 5a296ba3758a..567b97d28d30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -145,8 +145,9 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
return false;
}
- if (GRAPHICS_VER(i915) == 2 ||
- (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
+ if (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915))
+ tile_width = 128;
+ else if (GRAPHICS_VER(i915) == 2)
tile_width = 128;
else
tile_width = 512;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 1f4814968868..f65fe86c02b5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -5,11 +5,13 @@
#include <linux/shmem_fs.h>
+#include <drm/drm_buddy.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/drm_buddy.h>
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_ttm_buddy_manager.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
@@ -1029,7 +1031,7 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
GEM_BUG_ON(!obj->ttm.created);
- ttm_bo_put(i915_gem_to_ttm(obj));
+ ttm_bo_fini(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -1325,7 +1327,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
* If this function fails, it will call the destructor, but
* our caller still owns the object. So no freeing in the
* destructor until obj->ttm.created is true.
- * Similarly, in delayed_destroy, we can't call ttm_bo_put()
+ * Similarly, in delayed_destroy, we can't call ttm_bo_fini()
* until successful initialization.
*/
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 61596cecce4d..4824f948daed 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -3,6 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 307a18eede72..77cc3af3d518 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -38,6 +38,8 @@
#include <linux/swap.h>
#include <linux/sched/mm.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index 991666fd9f85..2893df65c359 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -138,6 +138,13 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence,
local_bh_enable(); /* kick the tasklets if queues were reprioritised */
}
+void i915_gem_fence_wait_priority_display(struct dma_fence *fence)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
+
+ i915_gem_fence_wait_priority(fence, &attr);
+}
+
int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
@@ -217,10 +224,10 @@ static unsigned long to_wait_timeout(s64 timeout_ns)
*
* The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
* non-zero timeout parameter the wait ioctl will wait for the given number of
- * nanoseconds on an object becoming unbusy. Since the wait itself does so
- * without holding struct_mutex the object may become re-busied before this
- * function completes. A similar but shorter * race condition exists in the busy
- * ioctl
+ * nanoseconds on an object becoming unbusy. Since the wait occurs without
+ * holding a global or exclusive lock the object may become re-busied before
+ * this function completes. A similar but shorter * race condition exists
+ * in the busy ioctl
*/
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index a09e2eb47175..1f1290214031 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -7,15 +7,12 @@
#include <linux/mount.h>
#include <linux/fs_context.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_gemfs.h"
#include "i915_utils.h"
-static int add_param(struct fs_context *fc, const char *key, const char *val)
-{
- return vfs_parse_fs_string(fc, key, val, strlen(val));
-}
-
void i915_gemfs_init(struct drm_i915_private *i915)
{
struct file_system_type *type;
@@ -48,9 +45,9 @@ void i915_gemfs_init(struct drm_i915_private *i915)
fc = fs_context_for_mount(type, SB_KERNMOUNT);
if (IS_ERR(fc))
goto err;
- ret = add_param(fc, "source", "tmpfs");
+ ret = vfs_parse_fs_string(fc, "source", "tmpfs");
if (!ret)
- ret = add_param(fc, "huge", "within_size");
+ ret = vfs_parse_fs_string(fc, "huge", "within_size");
if (!ret)
gemfs = fc_mount_longterm(fc);
put_fs_context(fc);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 86d9d2fcb6a6..3557e9e6f422 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -3,9 +3,11 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_selftest.h"
-#include "display/intel_display_core.h"
+#include "display/intel_display_device.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_engine_user.h"
@@ -110,6 +112,7 @@ struct tiled_blits {
static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
int gen = GRAPHICS_VER(i915);
/* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */
@@ -121,7 +124,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return false;
- return HAS_DISPLAY(i915);
+ return intel_display_device_present(display);
}
static bool fast_blit_ok(const struct blit_buffer *buf)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index eb0158e43417..1330c0b431a7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -962,13 +962,14 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
if (IS_ERR(rpcs))
return PTR_ERR(rpcs);
+ i915_gem_ww_ctx_init(&ww, false);
+
batch = i915_vma_instance(rpcs, ce->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
goto err_put;
}
- i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_gem_object_lock(obj, &ww);
if (!err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 9c3f17e51885..0d250d57496a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -7,6 +7,8 @@
#include <linux/highmem.h>
#include <linux/prime_numbers.h>
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
@@ -1096,32 +1098,20 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
unsigned long addr,
bool unfaultable)
{
- struct vm_area_struct *area;
- int err = 0, i;
+ int i;
pr_info("igt_mmap(%s, %d) @ %lx\n",
obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
- mmap_read_lock(current->mm);
- area = vma_lookup(current->mm, addr);
- mmap_read_unlock(current->mm);
- if (!area) {
- pr_err("%s: Did not create a vm_area_struct for the mmap\n",
- obj->mm.region->name);
- err = -EINVAL;
- goto out_unmap;
- }
-
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
u32 x;
if (get_user(x, ux)) {
- err = -EFAULT;
if (!unfaultable) {
pr_err("%s: Unable to read from mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
- goto out_unmap;
+ return -EFAULT;
}
continue;
@@ -1130,37 +1120,29 @@ static int ___igt_mmap_migrate(struct drm_i915_private *i915,
if (unfaultable) {
pr_err("%s: Faulted unmappable memory\n",
obj->mm.region->name);
- err = -EINVAL;
- goto out_unmap;
+ return -EINVAL;
}
if (x != expand32(POISON_INUSE)) {
pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
obj->mm.region->name,
i * sizeof(x), x, expand32(POISON_INUSE));
- err = -EINVAL;
- goto out_unmap;
+ return -EINVAL;
}
x = expand32(POISON_FREE);
if (put_user(x, ux)) {
pr_err("%s: Unable to write to mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
- err = -EFAULT;
- goto out_unmap;
+ return -EFAULT;
}
}
- if (unfaultable) {
- if (err == -EFAULT)
- err = 0;
- } else {
- obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
- err = wc_check(obj);
- }
-out_unmap:
- vm_munmap(addr, obj->base.size);
- return err;
+ if (unfaultable)
+ return 0;
+
+ obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
+ return wc_check(obj);
}
#define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0)
@@ -1176,6 +1158,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
struct drm_i915_private *i915 = placements[0]->i915;
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
+ struct vm_area_struct *area;
unsigned long addr;
LIST_HEAD(objects);
u64 offset;
@@ -1207,20 +1190,30 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
goto out_put;
}
+ mmap_read_lock(current->mm);
+ area = vma_lookup(current->mm, addr);
+ mmap_read_unlock(current->mm);
+ if (!area) {
+ pr_err("%s: Did not create a vm_area_struct for the mmap\n",
+ obj->mm.region->name);
+ err = -EINVAL;
+ goto out_addr;
+ }
+
if (flags & IGT_MMAP_MIGRATE_FILL) {
err = igt_fill_mappable(placements[0], &objects);
if (err)
- goto out_put;
+ goto out_addr;
}
err = i915_gem_object_lock(obj, NULL);
if (err)
- goto out_put;
+ goto out_addr;
err = i915_gem_object_pin_pages(obj);
if (err) {
i915_gem_object_unlock(obj);
- goto out_put;
+ goto out_addr;
}
err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
@@ -1228,7 +1221,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
i915_gem_object_is_lmem(obj),
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
- if (rq) {
+ if (rq && !err) {
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (!err)
dma_resv_add_fence(obj->base.resv, &rq->fence,
@@ -1237,7 +1230,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
}
i915_gem_object_unlock(obj);
if (err)
- goto out_put;
+ goto out_addr;
if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
igt_make_evictable(&objects);
@@ -1245,16 +1238,16 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
err = i915_gem_object_lock(obj, NULL);
if (err)
- goto out_put;
+ goto out_addr;
/*
- * Ensure we only simulate the gpu failuire when faulting the
+ * Ensure we only simulate the gpu failure when faulting the
* pages.
*/
err = i915_gem_object_wait_moving_fence(obj, true);
i915_gem_object_unlock(obj);
if (err)
- goto out_put;
+ goto out_addr;
i915_ttm_migrate_set_failure_modes(true, false);
}
@@ -1298,6 +1291,9 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
}
}
+out_addr:
+ vm_munmap(addr, obj->base.size);
+
out_put:
i915_gem_object_put(obj);
igt_close_objects(i915, &objects);
diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
index 8116fd5987e2..8c01fb6d4e7b 100644
--- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c
@@ -292,15 +292,15 @@ int gen4_emit_bb_start(struct i915_request *rq,
void gen2_irq_enable(struct intel_engine_cs *engine)
{
- engine->i915->irq_mask &= ~engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ engine->i915->gen2_imr_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask);
intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
void gen2_irq_disable(struct intel_engine_cs *engine)
{
- engine->i915->irq_mask |= engine->irq_enable_mask;
- intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ engine->i915->gen2_imr_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->gen2_imr_mask);
}
void gen5_irq_enable(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index e9f65f27b53f..071c1cc45257 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "gen8_engine_cs.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index cc866773ba6f..bf6117d5fc57 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -8,6 +8,8 @@
#include <trace/events/dma_fence.h>
#include <uapi/linux/sched/types.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_breadcrumbs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 98c7f6052069..10070ee4d74c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -14,7 +14,6 @@
#include "i915_active_types.h"
#include "i915_sw_fence.h"
-#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
#include "intel_wakeref.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 8d4bb95f8424..b279878dca29 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -3,7 +3,10 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_request.h"
#include "intel_context.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 833987015b8b..be4bbff1a57c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -7,6 +7,8 @@
#include <linux/list_sort.h>
#include <linux/llist.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "intel_engine.h"
#include "intel_engine_user.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 03baa7fa0a27..3df683b0402a 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -106,14 +106,20 @@
* preemption, but just sampling the new tail pointer).
*
*/
+
#include <linux/interrupt.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
+#include "gen8_engine_cs.h"
#include "i915_drv.h"
+#include "i915_list_util.h"
#include "i915_reg.h"
+#include "i915_timer_util.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "gen8_engine_cs.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_heartbeat.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 46a5aa4ab9c8..08c4e735481b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -9,6 +9,7 @@
#include <linux/stop_machine.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_drm.h>
#include <drm/intel/intel-gtt.h>
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 889e61843ff3..5eda98ebc1ae 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -5,6 +5,8 @@
#include <linux/highmem.h>
+#include <drm/drm_print.h>
+
#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index c5f5f0bdfb2c..cc5d345c5e29 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -5,6 +5,7 @@
#include "intel_ggtt_gmch.h"
+#include <drm/drm_print.h>
#include <drm/intel/intel-gtt.h>
#include <linux/agp_backend.h>
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index 86b5a9ba323d..c7befc5c20d0 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -7,6 +7,7 @@
#include "gem/i915_gem_object.h"
#include "i915_drv.h"
+#include "i915_list_util.h"
#include "intel_engine_pm.h"
#include "intel_gt_buffer_pool.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 6c499692d61e..c90b35881a26 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -148,7 +148,7 @@ static u32 gen4_read_clock_frequency(struct intel_uncore *uncore)
*
* Testing on actual hardware has shown there is no /16.
*/
- return DIV_ROUND_CLOSEST(i9xx_fsb_freq(uncore->i915), 4) * 1000;
+ return DIV_ROUND_CLOSEST(intel_fsb_freq(uncore->i915), 4) * 1000;
}
static u32 read_clock_frequency(struct intel_uncore *uncore)
@@ -205,7 +205,7 @@ static u64 div_u64_roundup(u64 nom, u32 den)
u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count)
{
- return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency);
+ return mul_u64_u32_div(count, NSEC_PER_SEC, gt->clock_frequency);
}
u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
@@ -215,7 +215,7 @@ u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count)
u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns)
{
- return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC);
+ return mul_u64_u32_div(ns, gt->clock_frequency, NSEC_PER_SEC);
}
u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index 4dc23b8d3aa2..bd9abbd6d3d4 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -5,6 +5,8 @@
#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_debugfs.h"
@@ -82,14 +84,15 @@ static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
void intel_gt_debugfs_register(struct intel_gt *gt)
{
+ struct dentry *debugfs_root = gt->i915->drm.debugfs_root;
struct dentry *root;
char gtname[4];
- if (!gt->i915->drm.primary->debugfs_root)
+ if (!debugfs_root)
return;
snprintf(gtname, sizeof(gtname), "gt%u", gt->info.id);
- root = debugfs_create_dir(gtname, gt->i915->drm.primary->debugfs_root);
+ root = debugfs_create_dir(gtname, debugfs_root);
if (IS_ERR(root))
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index a60822e2b5d4..c3afa321fe30 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index 87ef85483bae..96411f357f5d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -7,6 +7,8 @@
#include <linux/seq_file.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index c481b56fa67d..e8927ad49142 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_lmem.h"
#include "gen8_engine_cs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 5dd8121f4b15..e8d93a657ef6 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -3,6 +3,8 @@
* Copyright © 2015 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "intel_engine.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9ca42589da4d..286d49ecc449 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -6,6 +6,9 @@
#include <linux/pm_runtime.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
+#include "display/vlv_clock.h"
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -341,7 +344,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
return PTR_ERR(pctx);
}
- GEM_BUG_ON(range_overflows_end_t(u64,
+ GEM_BUG_ON(range_end_overflows_t(u64,
i915->dsm.stolen.start,
pctx->stolen->start,
U32_MAX));
@@ -802,7 +805,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, enum intel_rc6_res_type id)
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
mul = 1000000;
- div = i915->czclk_freq;
+ div = vlv_clock_get_czclk(&i915->drm);
overflow_hw = BIT_ULL(40);
time_hw = vlv_residency_raw(uncore, reg);
} else {
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index 51bb27e10a4f..a30060fd4429 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_pci.h"
#include "i915_reg.h"
@@ -18,16 +20,6 @@
#include "gt/intel_gt_regs.h"
#ifdef CONFIG_64BIT
-static void _release_bars(struct pci_dev *pdev)
-{
- int resno;
-
- for (resno = PCI_STD_RESOURCES; resno < PCI_STD_RESOURCE_END; resno++) {
- if (pci_resource_len(pdev, resno))
- pci_release_resource(pdev, resno);
- }
-}
-
static void
_resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
{
@@ -35,9 +27,7 @@ _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size)
int bar_size = pci_rebar_bytes_to_size(size);
int ret;
- _release_bars(pdev);
-
- ret = pci_resize_resource(pdev, resno, bar_size);
+ ret = pci_resize_resource(pdev, resno, bar_size, 0);
if (ret) {
drm_info(&i915->drm, "Failed to resize BAR%d to %dM (%pe)\n",
resno, 1 << bar_size, ERR_PTR(ret));
@@ -61,16 +51,12 @@ static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t
current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR));
if (i915->params.lmem_bar_size) {
- u32 bar_sizes;
-
- rebar_size = i915->params.lmem_bar_size *
- (resource_size_t)SZ_1M;
- bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR);
-
+ rebar_size = i915->params.lmem_bar_size * (resource_size_t)SZ_1M;
if (rebar_size == current_size)
return;
- if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) ||
+ if (!pci_rebar_size_supported(pdev, GEN12_LMEM_BAR,
+ pci_rebar_bytes_to_size(rebar_size)) ||
rebar_size >= roundup_pow_of_two(lmem_size)) {
rebar_size = lmem_size;
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 4b56ec3743cf..d53766c288f7 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -3,6 +3,8 @@
* Copyright © 2014 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_internal.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 4a1675dea1c7..41b5036dc538 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -9,18 +9,17 @@
#include "display/intel_display_reset.h"
#include "display/intel_overlay.h"
-
#include "gem/i915_gem_context.h"
-
#include "gt/intel_gt_regs.h"
-
#include "gt/uc/intel_gsc_fw.h"
+#include "uc/intel_guc.h"
#include "i915_drv.h"
#include "i915_file_private.h"
#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
@@ -32,8 +31,6 @@
#include "intel_pci_config.h"
#include "intel_reset.h"
-#include "uc/intel_guc.h"
-
#define RESET_MAX_RETRIES 3
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h
index 4f5fd393af6f..ee4eb574a219 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h
@@ -20,7 +20,7 @@ struct intel_reset {
* FENCE registers).
*
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
- * acquire the struct_mutex to reset an engine, we need an explicit
+ * acquire a global lock to reset an engine, we need an explicit
* flag to prevent two concurrent reset attempts in the same engine.
* As the number of engines continues to grow, allocate the flags from
* the most significant bits.
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 2a6d79abf25b..8314a4b0505e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -15,18 +15,19 @@
#include "i915_irq.h"
#include "i915_mitigations.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_reset.h"
#include "intel_ring.h"
#include "shmem_utils.h"
-#include "intel_engine_heartbeat.h"
-#include "intel_engine_pm.h"
-#include "intel_gt_print.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 0b35fdd461d4..b01c837ab646 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,11 +7,14 @@
#include <drm/intel/i915_drm.h>
-#include "display/intel_display.h"
#include "display/intel_display_rps.h"
+#include "display/vlv_clock.h"
+#include "soc/intel_dram.h"
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
@@ -276,20 +279,24 @@ static void gen5_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
+ unsigned int fsb_freq, mem_freq;
u8 fmax, fmin, fstart;
u32 rgvmodectl;
int c_m, i;
- if (i915->fsb_freq <= 3200000)
+ fsb_freq = intel_fsb_freq(i915);
+ mem_freq = intel_mem_freq(i915);
+
+ if (fsb_freq <= 3200000)
c_m = 0;
- else if (i915->fsb_freq <= 4800000)
+ else if (fsb_freq <= 4800000)
c_m = 1;
else
c_m = 2;
for (i = 0; i < ARRAY_SIZE(cparams); i++) {
if (cparams[i].i == c_m &&
- cparams[i].t == DIV_ROUND_CLOSEST(i915->mem_freq, 1000)) {
+ cparams[i].t == DIV_ROUND_CLOSEST(mem_freq, 1000)) {
rps->ips.m = cparams[i].m;
rps->ips.c = cparams[i].c;
break;
@@ -1683,10 +1690,7 @@ static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- rps->gpll_ref_freq =
- vlv_get_cck_clock(&i915->drm, "GPLL ref",
- CCK_GPLL_CLOCK_CONTROL,
- i915->czclk_freq);
+ rps->gpll_ref_freq = vlv_clock_get_gpll(&i915->drm);
drm_dbg(&i915->drm, "GPLL reference freq: %d kHz\n",
rps->gpll_ref_freq);
@@ -1696,13 +1700,13 @@ static void vlv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ vlv_init_gpll_ref_freq(rps);
+
vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- vlv_init_gpll_ref_freq(rps);
-
rps->max_freq = vlv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1730,13 +1734,13 @@ static void chv_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
+ vlv_init_gpll_ref_freq(rps);
+
vlv_iosf_sb_get(&i915->drm,
BIT(VLV_IOSF_SB_PUNIT) |
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- vlv_init_gpll_ref_freq(rps);
-
rps->max_freq = chv_rps_max_freq(rps);
rps->rp0_freq = rps->max_freq;
drm_dbg(&i915->drm, "max GPU freq: %d MHz (%u)\n",
@@ -1773,6 +1777,7 @@ static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
struct intel_uncore *uncore = rps_to_uncore(rps);
const struct intel_rps_ei *prev = &rps->ei;
struct intel_rps_ei now;
@@ -1789,7 +1794,7 @@ static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir)
time = ktime_us_delta(now.ktime, prev->ktime);
- time *= rps_to_i915(rps)->czclk_freq;
+ time *= vlv_clock_get_czclk(&i915->drm);
/* Workload can be split between render + media,
* e.g. SwapBuffers being blitted in X after being rendered in
diff --git a/drivers/gpu/drm/i915/gt/intel_sa_media.c b/drivers/gpu/drm/i915/gt/intel_sa_media.c
index 2945526d52d1..fb260d1ec360 100644
--- a/drivers/gpu/drm/i915/gt/intel_sa_media.c
+++ b/drivers/gpu/drm/i915/gt/intel_sa_media.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index 9501d323d0d3..656a499b2706 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -5,6 +5,8 @@
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_perf_types.h"
#include "intel_engine_regs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
index c2ee5e1826b5..1dc8205bc64d 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -7,6 +7,8 @@
#include <linux/bitmap.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_regs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index b9640212d659..843f72829a24 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_cache.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_internal.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index 57308c4d664a..85b43f9b9d95 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -9,6 +9,7 @@
#include <linux/lockdep.h>
#include "i915_active.h"
+#include "i915_list_util.h"
#include "i915_syncmap.h"
#include "intel_timeline_types.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_tlb.h b/drivers/gpu/drm/i915/gt/intel_tlb.h
index 337327af92ac..ec7612216248 100644
--- a/drivers/gpu/drm/i915/gt/intel_tlb.h
+++ b/drivers/gpu/drm/i915/gt/intel_tlb.h
@@ -18,7 +18,7 @@ void intel_gt_fini_tlb(struct intel_gt *gt);
static inline u32 intel_gt_tlb_seqno(const struct intel_gt *gt)
{
- return seqprop_sequence(&gt->tlb.seqno);
+ return raw_read_seqcount(&gt->tlb.seqno);
}
static inline u32 intel_gt_next_invalidate_tlb_full(const struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_wopcm.c b/drivers/gpu/drm/i915/gt/intel_wopcm.c
index 7ebbcc191c2d..1b26ff6488b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/gt/intel_wopcm.c
@@ -3,6 +3,8 @@
* Copyright © 2017-2019 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "intel_wopcm.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 5a95f06900b5..ece88c612e27 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_mmio_range.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_regs.h"
@@ -337,12 +338,26 @@ static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
}
static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
+ /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization:ivb,hsw
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal,
+ CACHE_MODE_1,
+ PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
}
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -2567,18 +2582,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
RING_MODE_GEN7(RENDER_RING_BASE),
GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
- /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
- wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
-
- /*
- * BSpec says this must be set, even though
- * WaDisable4x2SubspanOptimization:ivb,hsw
- * WaDisable4x2SubspanOptimization isn't listed for VLV.
- */
- wa_masked_en(wal,
- CACHE_MODE_1,
- PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
-
/*
* BSpec recommends 8x4 when MSAA is used,
* however in practice 16x4 seems fastest.
@@ -2645,9 +2648,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN6_WIZ_HASHING_MASK,
GEN6_WIZ_HASHING_16x4);
- /* WaDisable_RenderCache_OperationalFlush:snb */
- wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
-
/*
* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
@@ -2924,7 +2924,7 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(&engine->wa_list);
}
-static const struct i915_range mcr_ranges_gen8[] = {
+static const struct i915_mmio_range mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
@@ -2933,7 +2933,7 @@ static const struct i915_range mcr_ranges_gen8[] = {
{},
};
-static const struct i915_range mcr_ranges_gen12[] = {
+static const struct i915_mmio_range mcr_ranges_gen12[] = {
{ .start = 0x8150, .end = 0x815f },
{ .start = 0x9520, .end = 0x955f },
{ .start = 0xb100, .end = 0xb3ff },
@@ -2942,7 +2942,7 @@ static const struct i915_range mcr_ranges_gen12[] = {
{},
};
-static const struct i915_range mcr_ranges_xehp[] = {
+static const struct i915_mmio_range mcr_ranges_xehp[] = {
{ .start = 0x4000, .end = 0x4aff },
{ .start = 0x5200, .end = 0x52ff },
{ .start = 0x5400, .end = 0x7fff },
@@ -2961,7 +2961,7 @@ static const struct i915_range mcr_ranges_xehp[] = {
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
- const struct i915_range *mcr_ranges;
+ const struct i915_mmio_range *mcr_ranges;
int i;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c
index 5eb46700dc4e..ab76703f6e8b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_context.c
+++ b/drivers/gpu/drm/i915/gt/selftest_context.c
@@ -3,6 +3,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_selftest.h"
#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 0454eb1814bb..a06b397b6d42 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -5,12 +5,15 @@
#include <linux/prime_numbers.h>
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_reset.h"
#include "gt/selftest_engine_heartbeat.h"
+#include "i915_jiffies.h"
#include "i915_selftest.h"
#include "selftests/i915_random.h"
#include "selftests/igt_flush_test.h"
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index f057c16410e7..4f252f704975 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -904,7 +904,7 @@ static void active_engine(struct kthread_work *work)
arg->result = PTR_ERR(ce[count]);
pr_err("[%s] Create context #%ld failed: %d!\n",
engine->name, count, arg->result);
- while (--count)
+ while (count--)
intel_context_put(ce[count]);
return;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 54bc447efce0..fdf0e9858607 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -710,7 +710,14 @@ static int threaded_migrate(struct intel_migrate *migrate,
thread[i].tsk = tsk;
}
- msleep(10 * n_cpus); /* start all threads before we kthread_stop() */
+ /*
+ * Start all threads before we kthread_stop().
+ * In CHV / BXT+VTD environments, where VMA pinning is committed
+ * asynchronously, empirically determined 100ms delay is needed
+ * to avoid stopping threads that may still wait for completion of
+ * intel_ggtt_bind_vma and fail with -ERESTARTSYS when interrupted.
+ */
+ msleep((intel_vm_no_concurrent_access_wa(migrate->context->vm->i915) ? 100 : 10) * n_cpus);
for (i = 0; i < n_cpus; ++i) {
struct task_struct *tsk = thread[i].tsk;
diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c
index 69ed946a39e5..a5184f09d1de 100644
--- a/drivers/gpu/drm/i915/gt/selftest_tlb.c
+++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c
@@ -3,17 +3,17 @@
* Copyright © 2022 Intel Corporation
*/
-#include "i915_selftest.h"
-
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
#include "gen8_engine_cs.h"
#include "i915_gem_ww.h"
+#include "i915_selftest.h"
+#include "i915_wait_util.h"
+#include "intel_context.h"
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
-#include "intel_context.h"
#include "intel_gt.h"
#include "intel_ring.h"
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index aab2759067d2..4a81bc396b21 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -7,6 +7,7 @@
#include <linux/sysfs.h>
#include "i915_drv.h"
+#include "i915_timer_util.h"
#include "intel_engine.h"
#include "intel_engine_heartbeat.h"
#include "sysfs_engines.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index d8edd7c054c8..e7444ebc373e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -10,11 +10,13 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_print.h"
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_gsc_proxy.h"
#include "intel_gsc_uc.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
/*
* GSC proxy:
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
index 2fde5c360cff..dabb870dcdb1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
@@ -3,11 +3,15 @@
* Copyright © 2023 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_ring.h"
+
+#include "i915_wait_util.h"
#include "intel_gsc_uc_heci_cmd_submit.h"
struct gsc_heci_pkt {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index f360f020d8f1..52ec4421a211 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -8,15 +8,17 @@
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
#include "gt/intel_gt_regs.h"
+
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_slpc.h"
#include "intel_guc_submission.h"
-#include "i915_drv.h"
-#include "i915_irq.h"
-#include "i915_reg.h"
/**
* DOC: GuC
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 0d5197c0824a..2c651ec024ef 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -5,15 +5,16 @@
#include <linux/circ_buf.h>
#include <linux/ktime.h>
-#include <linux/time64.h>
#include <linux/string_helpers.h>
+#include <linux/time64.h>
#include <linux/timekeeping.h>
#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_guc_ct.h"
#include "intel_guc_print.h"
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
enum {
CT_DEAD_ALIVE = 0,
CT_DEAD_SETUP,
@@ -144,7 +145,7 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
spin_lock_init(&ct->requests.lock);
INIT_LIST_HEAD(&ct->requests.pending);
INIT_LIST_HEAD(&ct->requests.incoming);
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
INIT_WORK(&ct->dead_ct_worker, ct_dead_ct_worker_func);
#endif
INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
@@ -373,7 +374,7 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
ct->enabled = true;
ct->stall_time = KTIME_MAX;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
ct->dead_ct_reported = false;
ct->dead_ct_reason = CT_DEAD_ALIVE;
#endif
@@ -1324,9 +1325,16 @@ static int ct_receive(struct intel_guc_ct *ct)
static void ct_try_receive_message(struct intel_guc_ct *ct)
{
+ struct intel_guc *guc = ct_to_guc(ct);
int ret;
- if (GEM_WARN_ON(!ct->enabled))
+ if (!ct->enabled) {
+ GEM_WARN_ON(!guc_to_gt(guc)->uc.reset_in_progress);
+ return;
+ }
+
+ /* When interrupt disabled, message handling is not expected */
+ if (!guc->interrupts.enabled)
return;
ret = ct_receive(ct);
@@ -1377,7 +1385,7 @@ void intel_guc_ct_print_info(struct intel_guc_ct *ct,
ct->ctbs.recv.desc->tail);
}
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
static void ct_dead_ct_worker_func(struct work_struct *w)
{
struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, dead_ct_worker);
@@ -1386,6 +1394,9 @@ static void ct_dead_ct_worker_func(struct work_struct *w)
if (ct->dead_ct_reported)
return;
+ if (i915_error_injected())
+ return;
+
ct->dead_ct_reported = true;
guc_info(guc, "CTB is dead - reason=0x%X\n", ct->dead_ct_reason);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 2c4bb9a941be..e9a6ec4e6d38 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -97,7 +97,7 @@ struct intel_guc_ct {
/** @stall_time: time of first time a CTB submission is stalled */
ktime_t stall_time;
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC)
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
int dead_ct_reason;
bool dead_ct_reported;
struct work_struct dead_ct_worker;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index e7ccfa520df3..b1bda1b84f0a 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -13,9 +13,11 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_rps.h"
+
+#include "i915_drv.h"
+#include "i915_wait_util.h"
#include "intel_guc_fw.h"
#include "intel_guc_print.h"
-#include "i915_drv.h"
static void guc_prepare_xfer(struct intel_gt *gt)
{
@@ -46,6 +48,14 @@ static void guc_prepare_xfer(struct intel_gt *gt)
/* allows for 5us (in 10ns units) before GT can go to RC6 */
intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
}
+
+ /*
+ * Starting from IP 12.50 we need to enable the mirroring of GuC
+ * internal state to debug registers. This is always enabled on previous
+ * IPs.
+ */
+ if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50))
+ intel_uncore_rmw(uncore, GUC_SHIM_CONTROL2, 0, GUC_ENABLE_DEBUG_REG);
}
static int guc_xfer_rsa_mmio(struct intel_uc_fw *guc_fw,
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 09a64f224c49..cdff48920ee6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -6,6 +6,8 @@
#include <linux/debugfs.h>
#include <linux/string_helpers.h>
+#include <drm/drm_managed.h>
+
#include "gt/intel_gt.h"
#include "i915_drv.h"
#include "i915_irq.h"
@@ -511,7 +513,11 @@ static void guc_log_relay_unmap(struct intel_guc_log *log)
void intel_guc_log_init_early(struct intel_guc_log *log)
{
- mutex_init(&log->relay.lock);
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
+ drmm_mutex_init(&i915->drm, &log->relay.lock);
+ drmm_mutex_init(&i915->drm, &log->guc_lock);
INIT_WORK(&log->relay.flush_work, copy_debug_logs_work);
log->relay.started = false;
}
@@ -677,7 +683,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX)
return -EINVAL;
- mutex_lock(&i915->drm.struct_mutex);
+ mutex_lock(&log->guc_lock);
if (log->level == level)
goto out_unlock;
@@ -695,7 +701,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
log->level = level;
out_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
+ mutex_unlock(&log->guc_lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
index 02127703be80..13cb93ad0710 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
@@ -42,6 +42,14 @@ enum {
struct intel_guc_log {
u32 level;
+ /*
+ * Protects concurrent access and modification of intel_guc_log->level.
+ *
+ * This lock replaces the legacy struct_mutex usage in
+ * intel_guc_log system.
+ */
+ struct mutex guc_lock;
+
/* Allocation settings */
struct {
s32 bytes; /* Size in bytes */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
index 3fd798837502..f73dab527547 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h
@@ -96,6 +96,7 @@
#define GUC_GEN10_SHIM_WC_ENABLE (1<<21)
#define GUC_SHIM_CONTROL2 _MMIO(0xc068)
+#define GUC_ENABLE_DEBUG_REG (1<<11)
#define GUC_IS_PRIVILEGED (1<<29)
#define GSC_LOADS_HUC (1<<30)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index d5ee6e5e1443..fa9af08f9708 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -3,17 +3,20 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/drm_cache.h>
#include <linux/string_helpers.h>
+#include <drm/drm_cache.h>
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_regs.h"
+#include "gt/intel_rps.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_guc_slpc.h"
+#include "i915_wait_util.h"
#include "intel_guc_print.h"
+#include "intel_guc_slpc.h"
#include "intel_mchbar_regs.h"
-#include "gt/intel_gt.h"
-#include "gt/intel_gt_regs.h"
-#include "gt/intel_rps.h"
/**
* DOC: SLPC - Dynamic Frequency management
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 127316d2c8aa..68f2b8d363ac 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -25,16 +25,16 @@
#include "gt/intel_mocs.h"
#include "gt/intel_ring.h"
+#include "i915_drv.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "i915_trace.h"
+#include "i915_wait_util.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_print.h"
#include "intel_guc_submission.h"
-#include "i915_drv.h"
-#include "i915_reg.h"
-#include "i915_irq.h"
-#include "i915_trace.h"
-
/**
* DOC: GuC-based command submission
*
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 62d14f82256f..8cc6e712b0f7 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -34,6 +34,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "gt/intel_ggtt_fencing.h"
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 9bafac1eaf48..295a7b5e1d7c 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -31,6 +31,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "gvt.h"
#include "intel_pci_config.h"
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index a91e23c22ea1..df04e4ead8ea 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -36,6 +36,8 @@
#include <linux/slab.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "display/intel_display_regs.h"
@@ -1921,7 +1923,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (!bb)
return -ENOMEM;
- bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
+ bb->ppgtt = s->buf_addr_type != GTT_BUFFER;
/*
* The start_offset stores the batch buffer's start gma's
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 673534f061ef..415422b5943c 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -194,9 +194,9 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- if (minor->debugfs_root && gvt->debugfs_root) {
+ if (debugfs_root && gvt->debugfs_root) {
debugfs_remove_recursive(vgpu->debugfs);
vgpu->debugfs = NULL;
}
@@ -208,9 +208,9 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
+ gvt->debugfs_root = debugfs_create_dir("gvt", debugfs_root);
debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
&gvt->mmio.num_tracked_mmio);
@@ -222,9 +222,9 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
*/
void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->gt->i915->drm.primary;
+ struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root;
- if (minor->debugfs_root) {
+ if (debugfs_root) {
debugfs_remove_recursive(gvt->debugfs_root);
gvt->debugfs_root = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 74197e337585..06517d1f07a2 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -33,6 +33,7 @@
*/
#include <drm/display/drm_dp.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 4f599af766b0..92506c80322d 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -33,6 +33,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_dmabuf.h"
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 2031b97de2b7..30e414381af3 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -33,6 +33,7 @@
*/
#include <drm/display/drm_dp.h>
+#include <drm/drm_print.h>
#include "display/intel_dp_aux_regs.h"
#include "display/intel_gmbus.h"
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index ae9b0ded3651..076d9139edc6 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -33,6 +33,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index f446f73f0fe2..36ea12ade849 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -37,6 +37,7 @@
*/
#include <drm/display/drm_dp.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index a956da68e6bd..3e66269bc4ee 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -31,6 +31,8 @@
#include <linux/eventfd.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "display/intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 69830a5c49d3..3abc9206f1a8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -48,6 +48,7 @@
#include <linux/nospec.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
#include "intel_gvt.h"
@@ -1140,6 +1141,122 @@ static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
return func(vgpu, index, start, count, flags, data);
}
+static int intel_vgpu_ioctl_get_region_info(struct vfio_device *vfio_dev,
+ struct vfio_region_info *info,
+ struct vfio_info_cap *caps)
+{
+ struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
+ struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
+ int nr_areas = 1;
+ int cap_type_id;
+ unsigned int i;
+ int ret;
+
+ switch (info->index) {
+ case VFIO_PCI_CONFIG_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = vgpu->gvt->device_info.cfg_space_size;
+ info->flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR0_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = vgpu->cfg_space.bar[info->index].size;
+ if (!info->size) {
+ info->flags = 0;
+ break;
+ }
+
+ info->flags = VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ break;
+ case VFIO_PCI_BAR1_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = 0;
+ info->flags = 0;
+ break;
+ case VFIO_PCI_BAR2_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->flags = VFIO_REGION_INFO_FLAG_CAPS |
+ VFIO_REGION_INFO_FLAG_MMAP |
+ VFIO_REGION_INFO_FLAG_READ |
+ VFIO_REGION_INFO_FLAG_WRITE;
+ info->size = gvt_aperture_sz(vgpu->gvt);
+
+ sparse = kzalloc(struct_size(sparse, areas, nr_areas),
+ GFP_KERNEL);
+ if (!sparse)
+ return -ENOMEM;
+
+ sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->header.version = 1;
+ sparse->nr_areas = nr_areas;
+ cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
+ sparse->areas[0].offset =
+ PAGE_ALIGN(vgpu_aperture_offset(vgpu));
+ sparse->areas[0].size = vgpu_aperture_sz(vgpu);
+ break;
+
+ case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = 0;
+ info->flags = 0;
+
+ gvt_dbg_core("get region info bar:%d\n", info->index);
+ break;
+
+ case VFIO_PCI_ROM_REGION_INDEX:
+ case VFIO_PCI_VGA_REGION_INDEX:
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = 0;
+ info->flags = 0;
+
+ gvt_dbg_core("get region info index:%d\n", info->index);
+ break;
+ default: {
+ struct vfio_region_info_cap_type cap_type = {
+ .header.id = VFIO_REGION_INFO_CAP_TYPE,
+ .header.version = 1
+ };
+
+ if (info->index >= VFIO_PCI_NUM_REGIONS + vgpu->num_regions)
+ return -EINVAL;
+ info->index = array_index_nospec(
+ info->index, VFIO_PCI_NUM_REGIONS + vgpu->num_regions);
+
+ i = info->index - VFIO_PCI_NUM_REGIONS;
+
+ info->offset = VFIO_PCI_INDEX_TO_OFFSET(info->index);
+ info->size = vgpu->region[i].size;
+ info->flags = vgpu->region[i].flags;
+
+ cap_type.type = vgpu->region[i].type;
+ cap_type.subtype = vgpu->region[i].subtype;
+
+ ret = vfio_info_add_capability(caps, &cap_type.header,
+ sizeof(cap_type));
+ if (ret)
+ return ret;
+ }
+ }
+
+ if ((info->flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
+ ret = -EINVAL;
+ if (cap_type_id == VFIO_REGION_INFO_CAP_SPARSE_MMAP) {
+ ret = vfio_info_add_capability(
+ caps, &sparse->header,
+ struct_size(sparse, areas, sparse->nr_areas));
+ }
+ if (ret) {
+ kfree(sparse);
+ return ret;
+ }
+ }
+
+ kfree(sparse);
+ return 0;
+}
+
static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
unsigned long arg)
{
@@ -1168,157 +1285,6 @@ static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
- } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
- struct vfio_region_info info;
- struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- unsigned int i;
- int ret;
- struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
- int nr_areas = 1;
- int cap_type_id;
-
- minsz = offsetofend(struct vfio_region_info, offset);
-
- if (copy_from_user(&info, (void __user *)arg, minsz))
- return -EFAULT;
-
- if (info.argsz < minsz)
- return -EINVAL;
-
- switch (info.index) {
- case VFIO_PCI_CONFIG_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->gvt->device_info.cfg_space_size;
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- break;
- case VFIO_PCI_BAR0_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->cfg_space.bar[info.index].size;
- if (!info.size) {
- info.flags = 0;
- break;
- }
-
- info.flags = VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- break;
- case VFIO_PCI_BAR1_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0;
- info.flags = 0;
- break;
- case VFIO_PCI_BAR2_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.flags = VFIO_REGION_INFO_FLAG_CAPS |
- VFIO_REGION_INFO_FLAG_MMAP |
- VFIO_REGION_INFO_FLAG_READ |
- VFIO_REGION_INFO_FLAG_WRITE;
- info.size = gvt_aperture_sz(vgpu->gvt);
-
- sparse = kzalloc(struct_size(sparse, areas, nr_areas),
- GFP_KERNEL);
- if (!sparse)
- return -ENOMEM;
-
- sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
- sparse->header.version = 1;
- sparse->nr_areas = nr_areas;
- cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
- sparse->areas[0].offset =
- PAGE_ALIGN(vgpu_aperture_offset(vgpu));
- sparse->areas[0].size = vgpu_aperture_sz(vgpu);
- break;
-
- case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0;
- info.flags = 0;
-
- gvt_dbg_core("get region info bar:%d\n", info.index);
- break;
-
- case VFIO_PCI_ROM_REGION_INDEX:
- case VFIO_PCI_VGA_REGION_INDEX:
- info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = 0;
- info.flags = 0;
-
- gvt_dbg_core("get region info index:%d\n", info.index);
- break;
- default:
- {
- struct vfio_region_info_cap_type cap_type = {
- .header.id = VFIO_REGION_INFO_CAP_TYPE,
- .header.version = 1 };
-
- if (info.index >= VFIO_PCI_NUM_REGIONS +
- vgpu->num_regions)
- return -EINVAL;
- info.index =
- array_index_nospec(info.index,
- VFIO_PCI_NUM_REGIONS +
- vgpu->num_regions);
-
- i = info.index - VFIO_PCI_NUM_REGIONS;
-
- info.offset =
- VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->region[i].size;
- info.flags = vgpu->region[i].flags;
-
- cap_type.type = vgpu->region[i].type;
- cap_type.subtype = vgpu->region[i].subtype;
-
- ret = vfio_info_add_capability(&caps,
- &cap_type.header,
- sizeof(cap_type));
- if (ret)
- return ret;
- }
- }
-
- if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
- switch (cap_type_id) {
- case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
- ret = vfio_info_add_capability(&caps,
- &sparse->header,
- struct_size(sparse, areas,
- sparse->nr_areas));
- if (ret) {
- kfree(sparse);
- return ret;
- }
- break;
- default:
- kfree(sparse);
- return -EINVAL;
- }
- }
-
- if (caps.size) {
- info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
- if (info.argsz < sizeof(info) + caps.size) {
- info.argsz = sizeof(info) + caps.size;
- info.cap_offset = 0;
- } else {
- vfio_info_cap_shift(&caps, sizeof(info));
- if (copy_to_user((void __user *)arg +
- sizeof(info), caps.buf,
- caps.size)) {
- kfree(caps.buf);
- kfree(sparse);
- return -EFAULT;
- }
- info.cap_offset = sizeof(info);
- }
-
- kfree(caps.buf);
- }
-
- kfree(sparse);
- return copy_to_user((void __user *)arg, &info, minsz) ?
- -EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
struct vfio_irq_info info;
@@ -1361,21 +1327,27 @@ static long intel_vgpu_ioctl(struct vfio_device *vfio_dev, unsigned int cmd,
if (copy_from_user(&hdr, (void __user *)arg, minsz))
return -EFAULT;
+ if (!is_power_of_2(hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) ||
+ !is_power_of_2(hdr.flags & VFIO_IRQ_SET_ACTION_TYPE_MASK))
+ return -EINVAL;
+
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
+ if (!hdr.count)
+ return -EINVAL;
+
ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
VFIO_PCI_NUM_IRQS, &data_size);
if (ret) {
- gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
- return -EINVAL;
- }
- if (data_size) {
- data = memdup_user((void __user *)(arg + minsz),
- data_size);
- if (IS_ERR(data))
- return PTR_ERR(data);
+ gvt_vgpu_err("vfio_set_irqs_validate_and_prepare failed\n");
+ return ret;
}
+
+ data = memdup_user((void __user *)(arg + minsz),
+ data_size);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
}
ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
@@ -1475,6 +1447,7 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = {
.write = intel_vgpu_write,
.mmap = intel_vgpu_mmap,
.ioctl = intel_vgpu_ioctl,
+ .get_region_info_caps = intel_vgpu_ioctl_get_region_info,
.dma_unmap = intel_vgpu_dma_unmap,
.bind_iommufd = vfio_iommufd_emulated_bind,
.unbind_iommufd = vfio_iommufd_emulated_unbind,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index da1135fa7cda..214eb7effa31 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -34,6 +34,9 @@
*/
#include <linux/vmalloc.h>
+
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "display/intel_display_regs.h"
@@ -49,7 +52,7 @@
* @gpa: guest physical address
*
* Returns:
- * Zero on success, negative error code if failed
+ * The MMIO offset of the given GPA
*/
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
{
@@ -58,7 +61,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
}
#define reg_is_mmio(gvt, reg) \
- (reg >= 0 && reg < gvt->device_info.mmio_size)
+ (reg < gvt->device_info.mmio_size)
#define reg_is_gtt(gvt, reg) \
(reg >= gvt->device_info.gtt_start_offset \
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 2f7208843367..d4e9d485d382 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -33,14 +33,18 @@
*
*/
-#include "i915_drv.h"
-#include "i915_reg.h"
+#include <drm/drm_print.h>
+
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_ring.h"
+
#include "gvt.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "trace.h"
#define GEN9_MOCS_SIZE 64
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 6e87c10bc454..63ad1fed525a 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -35,6 +35,8 @@
#include <linux/kthread.h>
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
#include "gt/intel_execlists_submission.h"
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 11260392234a..c49e4bf95a30 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -31,6 +31,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "gvt.h"
#include "i915_pvinfo.h"
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 0dbc4e289300..6b0c1162505a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -257,10 +257,9 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
* claimed the cache and we know that is does not match our
* idx. If, and only if, the timeline is currently zero is it
* worth competing to claim it atomically for ourselves (for
- * only the winner of that race will cmpxchg return the old
- * value of 0).
+ * only the winner of that race will cmpxchg succeed).
*/
- if (!cached && !cmpxchg64(&it->timeline, 0, idx))
+ if (!cached && try_cmpxchg64(&it->timeline, &cached, idx))
return it;
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 2905df83e180..7654f1be8d3b 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -28,6 +28,7 @@
#include <linux/highmem.h>
#include <drm/drm_cache.h>
+#include <drm/drm_print.h>
#include "gt/intel_engine.h"
#include "gt/intel_engine_regs.h"
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
index 24e5bb8a670e..3cb615ffa96d 100644
--- a/drivers/gpu/drm/i915/i915_config.c
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include "i915_config.h"
-#include "i915_utils.h"
+#include "i915_jiffies.h"
unsigned long
i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 967c0501e91e..42f6b44f0027 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,12 +26,13 @@
*
*/
+#include <linux/debugfs.h>
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/string_helpers.h>
-#include <linux/debugfs.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
@@ -54,6 +55,7 @@
#include "i915_irq.h"
#include "i915_reg.h"
#include "i915_scheduler.h"
+#include "i915_wait_util.h"
#include "intel_mchbar_regs.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -720,26 +722,24 @@ static const struct i915_debugfs_files {
{"i915_gem_drop_caches", &i915_drop_caches_fops},
};
-void i915_debugfs_register(struct drm_i915_private *dev_priv)
+void i915_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = dev_priv->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
int i;
- i915_debugfs_params(dev_priv);
+ i915_debugfs_params(i915);
- debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
- to_i915(minor->dev), &i915_forcewake_fops);
+ debugfs_create_file("i915_forcewake_user", S_IRUSR, debugfs_root,
+ i915, &i915_forcewake_fops);
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
- debugfs_create_file(i915_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
- minor->debugfs_root,
- to_i915(minor->dev),
+ debugfs_create_file(i915_debugfs_files[i].name, S_IRUGO | S_IWUSR,
+ debugfs_root, i915,
i915_debugfs_files[i].fops);
}
drm_debugfs_create_files(i915_debugfs_list,
ARRAY_SIZE(i915_debugfs_list),
- minor->debugfs_root, minor);
+ debugfs_root, i915->drm.primary);
- i915_gpu_error_debugfs_register(dev_priv);
+ i915_gpu_error_debugfs_register(i915);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 33d2dcb0de65..89ab5eb14779 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -248,11 +248,11 @@ i915_debugfs_create_charp(const char *name, umode_t mode,
/* add a subdirectory with files for each i915 param */
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
struct i915_params *params = &i915->params;
struct dentry *dir;
- dir = debugfs_create_dir("i915_params", minor->debugfs_root);
+ dir = debugfs_create_dir("i915_params", debugfs_root);
if (IS_ERR(dir))
return dir;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index c6263c6d3384..c97b76771917 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -46,18 +46,22 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include <drm/intel/display_member.h>
+#include <drm/intel/display_parent_interface.h>
#include "display/i9xx_display_sr.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
#include "display/intel_crtc.h"
-#include "display/intel_display_core.h"
+#include "display/intel_display_device.h"
#include "display/intel_display_driver.h"
+#include "display/intel_display_power.h"
#include "display/intel_dmc.h"
#include "display/intel_dp.h"
#include "display/intel_dpt.h"
#include "display/intel_encoder.h"
#include "display/intel_fbdev.h"
+#include "display/intel_gmbus.h"
#include "display/intel_hotplug.h"
#include "display/intel_opregion.h"
#include "display/intel_overlay.h"
@@ -735,6 +739,18 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
"DRM_I915_DEBUG_RUNTIME_PM enabled\n");
}
+static const struct intel_display_parent_interface parent = {
+ .rpm = &i915_display_rpm_interface,
+};
+
+const struct intel_display_parent_interface *i915_driver_parent_interface(void)
+{
+ return &parent;
+}
+
+/* Ensure drm and display members are placed properly. */
+INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct drm_i915_private, drm, display);
+
static struct drm_i915_private *
i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -756,7 +772,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, match_info);
- display = intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev, &parent);
if (IS_ERR(display))
return ERR_CAST(display);
@@ -976,8 +992,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_runtime_pm_disable(&i915->runtime_pm);
intel_power_domains_disable(display);
- drm_client_dev_suspend(&i915->drm, false);
- if (HAS_DISPLAY(i915)) {
+ drm_client_dev_suspend(&i915->drm);
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&i915->drm);
intel_display_driver_disable_user_access(display);
@@ -989,7 +1005,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_irq_suspend(i915);
intel_hpd_cancel_work(display);
- if (HAS_DISPLAY(i915))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -1051,7 +1067,6 @@ static int i915_drm_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
pci_power_t opregion_target_state;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1059,20 +1074,18 @@ static int i915_drm_suspend(struct drm_device *dev)
/* We do a lot of poking in a lot of registers, make sure they work
* properly. */
intel_power_domains_disable(display);
- drm_client_dev_suspend(dev, false);
- if (HAS_DISPLAY(dev_priv)) {
+ drm_client_dev_suspend(dev);
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(dev);
intel_display_driver_disable_user_access(display);
}
- pci_save_state(pdev);
-
intel_display_driver_suspend(display);
intel_irq_suspend(dev_priv);
intel_hpd_cancel_work(display);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -1101,7 +1114,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
struct intel_gt *gt;
int ret, i;
@@ -1122,11 +1134,21 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
if (ret) {
drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
intel_display_power_resume_early(display);
-
- goto out;
}
- pci_disable_device(pdev);
+ enable_rpm_wakeref_asserts(rpm);
+
+ if (!dev_priv->uncore.user_forcewake_count)
+ intel_runtime_pm_driver_release(rpm);
+
+ return ret;
+}
+
+static int i915_drm_suspend_noirq(struct drm_device *dev, bool hibernation)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+
/*
* During hibernation on some platforms the BIOS may try to access
* the device even though it's already in D3 and hang the machine. So
@@ -1138,21 +1160,20 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
* Lenovo Thinkpad X301, X61s, X60, T60, X41
* Fujitsu FSC S7110
* Acer Aspire 1830T
+ *
+ * pci_save_state() prevents drivers/pci from
+ * automagically putting the device into D3.
*/
- if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
- pci_set_power_state(pdev, PCI_D3hot);
-
-out:
- enable_rpm_wakeref_asserts(rpm);
- if (!dev_priv->uncore.user_forcewake_count)
- intel_runtime_pm_driver_release(rpm);
+ if (hibernation && GRAPHICS_VER(dev_priv) < 6)
+ pci_save_state(pdev);
- return ret;
+ return 0;
}
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
pm_message_t state)
{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int error;
if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
@@ -1166,7 +1187,14 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
if (error)
return error;
- return i915_drm_suspend_late(&i915->drm, false);
+ error = i915_drm_suspend_late(&i915->drm, false);
+ if (error)
+ return error;
+
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
}
static int i915_drm_resume(struct drm_device *dev)
@@ -1219,7 +1247,7 @@ static int i915_drm_resume(struct drm_device *dev)
*/
intel_irq_resume(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
drm_mode_config_reset(dev);
i915_gem_resume(dev_priv);
@@ -1228,14 +1256,14 @@ static int i915_drm_resume(struct drm_device *dev)
intel_clock_gating_init(dev_priv);
- if (HAS_DISPLAY(dev_priv))
+ if (intel_display_device_present(display))
intel_display_driver_resume_access(display);
intel_hpd_init(display);
intel_display_driver_resume(display);
- if (HAS_DISPLAY(dev_priv)) {
+ if (intel_display_device_present(display)) {
intel_display_driver_enable_user_access(display);
drm_kms_helper_poll_enable(dev);
}
@@ -1243,7 +1271,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_opregion_resume(display);
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
intel_power_domains_enable(display);
@@ -1258,7 +1286,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_display *display = dev_priv->display;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
struct intel_gt *gt;
int ret, i;
@@ -1272,41 +1299,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
* similar so that power domains can be employed.
*/
- /*
- * Note that we need to set the power state explicitly, since we
- * powered off the device during freeze and the PCI core won't power
- * it back up for us during thaw. Powering off the device during
- * freeze is not a hard requirement though, and during the
- * suspend/resume phases the PCI core makes sure we get here with the
- * device powered on. So in case we change our freeze logic and keep
- * the device powered we can also remove the following set power state
- * call.
- */
- ret = pci_set_power_state(pdev, PCI_D0);
- if (ret) {
- drm_err(&dev_priv->drm,
- "failed to set PCI D0 power state (%d)\n", ret);
- return ret;
- }
-
- /*
- * Note that pci_enable_device() first enables any parent bridge
- * device and only then sets the power state for this device. The
- * bridge enabling is a nop though, since bridge devices are resumed
- * first. The order of enabling power and enabling the device is
- * imposed by the PCI core as described above, so here we preserve the
- * same order for the freeze/thaw phases.
- *
- * TODO: eventually we should remove pci_disable_device() /
- * pci_enable_enable_device() from suspend/resume. Due to how they
- * depend on the device enable refcount we can't anyway depend on them
- * disabling/enabling the device.
- */
- if (pci_enable_device(pdev))
- return -EIO;
-
- pci_set_master(pdev);
-
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
ret = vlv_resume_prepare(dev_priv, false);
@@ -1326,11 +1318,18 @@ static int i915_drm_resume_early(struct drm_device *dev)
int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
int ret;
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (ret)
+ return ret;
+
+ pci_restore_state(pdev);
+
ret = i915_drm_resume_early(&i915->drm);
if (ret)
return ret;
@@ -1387,6 +1386,16 @@ static int i915_pm_suspend_late(struct device *kdev)
return i915_drm_suspend_late(&i915->drm, false);
}
+static int i915_pm_suspend_noirq(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_noirq(&i915->drm, false);
+}
+
static int i915_pm_poweroff_late(struct device *kdev)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
@@ -1397,6 +1406,16 @@ static int i915_pm_poweroff_late(struct device *kdev)
return i915_drm_suspend_late(&i915->drm, true);
}
+static int i915_pm_poweroff_noirq(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_noirq(&i915->drm, true);
+}
+
static int i915_pm_resume_early(struct device *kdev)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
@@ -1662,24 +1681,25 @@ const struct dev_pm_ops i915_pm_ops = {
.prepare = i915_pm_prepare,
.suspend = i915_pm_suspend,
.suspend_late = i915_pm_suspend_late,
+ .suspend_noirq = i915_pm_suspend_noirq,
.resume_early = i915_pm_resume_early,
.resume = i915_pm_resume,
.complete = i915_pm_complete,
/*
* S4 event handlers
- * @freeze, @freeze_late : called (1) before creating the
- * hibernation image [PMSG_FREEZE] and
- * (2) after rebooting, before restoring
- * the image [PMSG_QUIESCE]
- * @thaw, @thaw_early : called (1) after creating the hibernation
- * image, before writing it [PMSG_THAW]
- * and (2) after failing to create or
- * restore the image [PMSG_RECOVER]
- * @poweroff, @poweroff_late: called after writing the hibernation
- * image, before rebooting [PMSG_HIBERNATE]
- * @restore, @restore_early : called after rebooting and restoring the
- * hibernation image [PMSG_RESTORE]
+ * @freeze* : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw* : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff* : called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore* : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
*/
.freeze = i915_pm_freeze,
.freeze_late = i915_pm_freeze_late,
@@ -1687,6 +1707,7 @@ const struct dev_pm_ops i915_pm_ops = {
.thaw = i915_pm_thaw,
.poweroff = i915_pm_suspend,
.poweroff_late = i915_pm_poweroff_late,
+ .poweroff_noirq = i915_pm_poweroff_noirq,
.restore_early = i915_pm_restore_early,
.restore = i915_pm_restore,
diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h
index 1e95ecb2a163..9551519ab429 100644
--- a/drivers/gpu/drm/i915/i915_driver.h
+++ b/drivers/gpu/drm/i915/i915_driver.h
@@ -12,6 +12,7 @@ struct pci_dev;
struct pci_device_id;
struct drm_i915_private;
struct drm_printer;
+struct intel_display_parent_interface;
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
@@ -24,6 +25,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915);
int i915_driver_resume_switcheroo(struct drm_i915_private *i915);
int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
+const struct intel_display_parent_interface *i915_driver_parent_interface(void);
void
i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4e4e89746aa6..5381a934a671 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -114,8 +114,7 @@ struct i915_gem_mm {
struct intel_memory_region *stolen_region;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
- /** Protects the usage of the GTT stolen memory allocator. This is
- * always the inner lock when overlapping with struct_mutex. */
+ /** Protects the usage of the GTT stolen memory allocator */
struct mutex stolen_lock;
/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
@@ -175,6 +174,7 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
+ /* display device data, must be placed after drm device member */
struct intel_display *display;
/* FIXME: Device release actions should all be moved to drmm_ */
@@ -222,6 +222,9 @@ struct drm_i915_private {
bool irqs_enabled;
+ /* LPT/WPT IOSF sideband protection */
+ struct mutex sbi_lock;
+
/* VLV/CHV IOSF sideband */
struct {
struct mutex lock; /* protect sideband access */
@@ -232,16 +235,11 @@ struct drm_i915_private {
/* Sideband mailbox protection */
struct mutex sb_lock;
- /** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask;
+ /* Cached value of gen 2-4 IMR to avoid reads in updating the bitfield */
+ u32 gen2_imr_mask;
bool preserve_bios_swizzle;
- unsigned int fsb_freq, mem_freq, is_ddr3;
-
- unsigned int hpll_freq;
- unsigned int czclk_freq;
-
/**
* wq - Driver workqueue for GEM.
*
@@ -313,6 +311,8 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
+ spinlock_t frontbuffer_lock; /* protects obj->frontbuffer (write-side) */
+
struct intel_pxp *pxp;
struct i915_pmu pmu;
@@ -490,16 +490,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_ALDERLAKE_P(i915) IS_PLATFORM(i915, INTEL_ALDERLAKE_P)
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
-/*
- * Display code shared by i915 and Xe relies on macros like IS_LUNARLAKE,
- * so we need to define these even on platforms that the i915 base driver
- * doesn't support. Ensure the parameter is used in the definition to
- * avoid 'unused variable' warnings when compiling the shared display code
- * for i915.
- */
-#define IS_LUNARLAKE(i915) (0 && i915)
-#define IS_BATTLEMAGE(i915) (0 && i915)
-#define IS_PANTHERLAKE(i915) (0 && i915)
#define IS_ARROWLAKE_H(i915) \
IS_SUBPLATFORM(i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_ARL_H)
@@ -604,8 +594,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(i915) (GRAPHICS_VER(i915) != 2 && \
- !(IS_I915G(i915) || IS_I915GM(i915)))
+#define HAS_128_BYTE_Y_TILING(i915) (!IS_I915G(i915) && !IS_I915GM(i915))
#define HAS_RC6(i915) (INTEL_INFO(i915)->has_rc6)
#define HAS_RC6p(i915) (INTEL_INFO(i915)->has_rc6p)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c8d43451f35..4c82c9544b93 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -37,6 +37,7 @@
#include <linux/mman.h>
#include <drm/drm_cache.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
#include "gem/i915_gem_clflush.h"
@@ -847,8 +848,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
/*
* Only called during RPM suspend. All users of the userfault_list
* must be holding an RPM wakeref to ensure that this can not
- * run concurrently with themselves (and use the struct_mutex for
- * protection between themselves).
+ * run concurrently with themselves.
*/
list_for_each_entry_safe(obj, on,
@@ -1299,6 +1299,8 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
+
+ spin_lock_init(&dev_priv->frontbuffer_lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c
index 6fcda6d7b5b7..cf47c2491a0a 100644
--- a/drivers/gpu/drm/i915/i915_getparam.c
+++ b/drivers/gpu/drm/i915/i915_getparam.c
@@ -2,6 +2,8 @@
* SPDX-License-Identifier: MIT
*/
+#include <drm/drm_print.h>
+
#include "display/intel_overlay.h"
#include "gem/i915_gem_mman.h"
#include "gt/intel_engine_user.h"
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0e4b832dff84..7582ef34bf3f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -685,6 +685,74 @@ static void err_print_guc_ctb(struct drm_i915_error_state_buf *m,
ctb->head, ctb->tail, ctb->desc_offset, ctb->cmds_offset, ctb->size);
}
+/* This list includes registers that are useful in debugging GuC hangs. */
+const struct {
+ u32 start;
+ u32 count;
+} guc_hw_reg_state[] = {
+ { 0xc0b0, 2 },
+ { 0xc000, 65 },
+ { 0xc140, 1 },
+ { 0xc180, 16 },
+ { 0xc1dc, 10 },
+ { 0xc300, 79 },
+ { 0xc4b4, 47 },
+ { 0xc574, 1 },
+ { 0xc57c, 1 },
+ { 0xc584, 11 },
+ { 0xc5c0, 8 },
+ { 0xc5e4, 1 },
+ { 0xc5ec, 103 },
+ { 0xc7c0, 1 },
+ { 0xc0b0, 2 }
+};
+
+static u32 print_range_line(struct drm_i915_error_state_buf *m, u32 start, u32 *dump, u32 count)
+{
+ if (count >= 8) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ start, dump[0], dump[1], dump[2], dump[3],
+ dump[4], dump[5], dump[6], dump[7]);
+ return 8;
+ } else if (count >= 4) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ start, dump[0], dump[1], dump[2], dump[3]);
+ return 4;
+ } else if (count >= 2) {
+ err_printf(m, "[0x%04x] 0x%08x 0x%08x\n", start, dump[0], dump[1]);
+ return 2;
+ }
+
+ err_printf(m, "[0x%04x] 0x%08x\n", start, dump[0]);
+ return 1;
+}
+
+static void err_print_guc_hw_state(struct drm_i915_error_state_buf *m, u32 *hw_state)
+{
+ u32 total = 0;
+ int i;
+
+ if (!hw_state)
+ return;
+
+ err_printf(m, "GuC Register State:\n");
+
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++) {
+ u32 entry = 0;
+
+ while (entry < guc_hw_reg_state[i].count) {
+ u32 start = guc_hw_reg_state[i].start + entry * sizeof(u32);
+ u32 count = guc_hw_reg_state[i].count - entry;
+ u32 *values = hw_state + total + entry;
+
+ entry += print_range_line(m, start, values, count);
+ }
+
+ GEM_BUG_ON(entry != guc_hw_reg_state[i].count);
+ total += entry;
+ }
+}
+
static void err_print_uc(struct drm_i915_error_state_buf *m,
const struct intel_uc_coredump *error_uc)
{
@@ -693,6 +761,7 @@ static void err_print_uc(struct drm_i915_error_state_buf *m,
intel_uc_fw_dump(&error_uc->guc_fw, &p);
intel_uc_fw_dump(&error_uc->huc_fw, &p);
err_printf(m, "GuC timestamp: 0x%08x\n", error_uc->guc.timestamp);
+ err_print_guc_hw_state(m, error_uc->guc.hw_state);
intel_gpu_error_print_vma(m, NULL, error_uc->guc.vma_log);
err_printf(m, "GuC CTB fence: %d\n", error_uc->guc.last_fence);
err_print_guc_ctb(m, "Send", error_uc->guc.ctb + 0);
@@ -1025,6 +1094,7 @@ static void cleanup_uc(struct intel_uc_coredump *uc)
kfree(uc->huc_fw.file_wanted.path);
i915_vma_coredump_free(uc->guc.vma_log);
i915_vma_coredump_free(uc->guc.vma_ctb);
+ kfree(uc->guc.hw_state);
kfree(uc);
}
@@ -1721,6 +1791,37 @@ static void gt_record_guc_ctb(struct intel_ctb_coredump *saved,
saved->cmds_offset = ((void *)ctb->cmds) - blob_ptr;
}
+static u32 read_guc_state_reg(struct intel_uncore *uncore, int range, int count)
+{
+ GEM_BUG_ON(range >= ARRAY_SIZE(guc_hw_reg_state));
+ GEM_BUG_ON(count >= guc_hw_reg_state[range].count);
+
+ return intel_uncore_read(uncore,
+ _MMIO(guc_hw_reg_state[range].start + count * sizeof(u32)));
+}
+
+static void gt_record_guc_hw_state(struct intel_uncore *uncore,
+ struct intel_uc_coredump *error_uc)
+{
+ u32 *hw_state;
+ u32 count = 0;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++)
+ count += guc_hw_reg_state[i].count;
+
+ hw_state = kcalloc(count, sizeof(u32), ALLOW_FAIL);
+ if (!hw_state)
+ return;
+
+ count = 0;
+ for (i = 0; i < ARRAY_SIZE(guc_hw_reg_state); i++)
+ for (j = 0; j < guc_hw_reg_state[i].count; j++)
+ hw_state[count++] = read_guc_state_reg(uncore, i, j);
+
+ error_uc->guc.hw_state = hw_state;
+}
+
static struct intel_uc_coredump *
gt_record_uc(struct intel_gt_coredump *gt,
struct i915_vma_compress *compress)
@@ -1755,6 +1856,7 @@ gt_record_uc(struct intel_gt_coredump *gt,
uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
gt_record_guc_ctb(error_uc->guc.ctb + 1, &uc->guc.ct.ctbs.recv,
uc->guc.ct.ctbs.send.desc, (struct intel_guc *)&uc->guc);
+ gt_record_guc_hw_state(gt->_gt->uncore, error_uc);
return error_uc;
}
@@ -2445,11 +2547,11 @@ static const struct file_operations i915_error_state_fops = {
void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct dentry *debugfs_root = i915->drm.debugfs_root;
- debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915,
+ debugfs_create_file("i915_error_state", 0644, debugfs_root, i915,
&i915_error_state_fops);
- debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915,
+ debugfs_create_file("i915_gpu_info", 0644, debugfs_root, i915,
&i915_gpu_info_fops);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 182324979278..91b3df621a49 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -177,6 +177,7 @@ struct intel_gt_coredump {
struct intel_ctb_coredump ctb[2];
struct i915_vma_coredump *vma_ctb;
struct i915_vma_coredump *vma_log;
+ u32 *hw_state;
u32 timestamp;
u16 last_fence;
bool is_guc_capture;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 191ed8bb1d9c..1898be4ddc8b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -32,13 +32,12 @@
#include <linux/sysrq.h>
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
-#include "display/intel_display_core.h"
#include "display/intel_display_irq.h"
#include "display/intel_hotplug.h"
#include "display/intel_hotplug_irq.h"
#include "display/intel_lpe_audio.h"
-#include "display/intel_psr_regs.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_gt.h"
@@ -163,11 +162,6 @@ static void ivb_parity_work(struct work_struct *work)
u32 misccpctl;
u8 slice = 0;
- /* We must turn off DOP level clock gating to access the L3 registers.
- * In order to prevent a get/put style interface, acquire struct mutex
- * any time we access those registers.
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
@@ -225,7 +219,6 @@ out:
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(gt->irq_lock);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
@@ -421,7 +414,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
struct drm_i915_private *i915 = arg;
struct intel_display *display = i915->display;
void __iomem * const regs = intel_uncore_regs(&i915->uncore);
- u32 de_iir, gt_iir, de_ier, sde_ier = 0;
+ u32 gt_iir, de_ier = 0, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
if (unlikely(!intel_irqs_enabled(i915)))
@@ -430,19 +423,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
disable_rpm_wakeref_asserts(&i915->runtime_pm);
- /* disable master interrupt before clearing iir */
- de_ier = raw_reg_read(regs, DEIER);
- raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-
- /* Disable south interrupts. We'll only write to SDEIIR once, so further
- * interrupts will will be stored on its back queue, and then we'll be
- * able to process them after we restore SDEIER (as soon as we restore
- * it, we'll get an interrupt if SDEIIR still has something to process
- * due to its back queue). */
- if (!HAS_PCH_NOP(i915)) {
- sde_ier = raw_reg_read(regs, SDEIER);
- raw_reg_write(regs, SDEIER, 0);
- }
+ /* Disable master and south interrupts */
+ ilk_display_irq_master_disable(display, &de_ier, &sde_ier);
/* Find, clear, then process each source of interrupt */
@@ -456,15 +438,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
}
- de_iir = raw_reg_read(regs, DEIIR);
- if (de_iir) {
- raw_reg_write(regs, DEIIR, de_iir);
- if (DISPLAY_VER(i915) >= 7)
- ivb_display_irq_handler(display, de_iir);
- else
- ilk_display_irq_handler(display, de_iir);
+ if (ilk_display_irq_handler(display))
ret = IRQ_HANDLED;
- }
if (GRAPHICS_VER(i915) >= 6) {
u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
@@ -475,9 +450,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
}
}
- raw_reg_write(regs, DEIER, de_ier);
- if (sde_ier)
- raw_reg_write(regs, SDEIER, sde_ier);
+ /* Re-enable master and south interrupts */
+ ilk_display_irq_master_enable(display, de_ier, sde_ier);
pmu_irq_stats(i915, ret);
@@ -662,22 +636,10 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void ilk_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_display *display = dev_priv->display;
- struct intel_uncore *uncore = &dev_priv->uncore;
-
- gen2_irq_reset(uncore, DE_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
-
- if (GRAPHICS_VER(dev_priv) == 7)
- intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
-
- if (IS_HASWELL(dev_priv)) {
- intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
- intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
- }
+ /* The master interrupt enable is in DEIER, reset display irq first */
+ ilk_display_irq_reset(display);
gen5_gt_irq_reset(to_gt(dev_priv));
-
- ibx_display_irq_reset(display);
}
static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
@@ -832,6 +794,8 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
}
+#define I9XX_HAS_FBC(i915) (IS_I85X(i915) || IS_I865G(i915) || IS_I915GM(i915) || IS_I945GM(i915))
+
static u32 i9xx_error_mask(struct drm_i915_private *i915)
{
/*
@@ -846,7 +810,7 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915)
* Unfortunately we can't mask off individual PGTBL_ER bits,
* so we just have to mask off all page table errors via EMR.
*/
- if (HAS_FBC(i915))
+ if (I9XX_HAS_FBC(i915))
return I915_ERROR_MEMORY_REFRESH;
else
return I915_ERROR_PAGE_TABLE |
@@ -902,7 +866,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ dev_priv->gen2_imr_mask = ~0u;
}
static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -913,28 +877,14 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_error_init(uncore, GEN2_ERROR_REGS, ~i9xx_error_mask(dev_priv));
- dev_priv->irq_mask =
- ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT);
+ enable_mask = i9xx_display_irq_enable_mask(display) |
+ I915_MASTER_ERROR_INTERRUPT;
- enable_mask =
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT;
-
- if (DISPLAY_VER(dev_priv) >= 3) {
- dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT;
- enable_mask |= I915_ASLE_INTERRUPT;
- }
+ dev_priv->gen2_imr_mask = ~enable_mask;
- if (HAS_HOTPLUG(dev_priv)) {
- dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
- enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
- }
+ enable_mask |= I915_USER_INTERRUPT;
- gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask);
i915_display_irq_postinstall(display);
}
@@ -963,8 +913,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
ret = IRQ_HANDLED;
- if (HAS_HOTPLUG(dev_priv) &&
- iir & I915_DISPLAY_PORT_INTERRUPT)
+ if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(display);
/* Call regardless, as some status bits might not be
@@ -1004,7 +953,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv)
gen2_error_reset(uncore, GEN2_ERROR_REGS);
gen2_irq_reset(uncore, GEN2_IRQ_REGS);
- dev_priv->irq_mask = ~0u;
+ dev_priv->gen2_imr_mask = ~0u;
}
static u32 i965_error_mask(struct drm_i915_private *i915)
@@ -1034,25 +983,17 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
gen2_error_init(uncore, GEN2_ERROR_REGS, ~i965_error_mask(dev_priv));
- dev_priv->irq_mask =
- ~(I915_ASLE_INTERRUPT |
- I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT);
-
- enable_mask =
- I915_ASLE_INTERRUPT |
- I915_DISPLAY_PORT_INTERRUPT |
- I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
- I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_MASTER_ERROR_INTERRUPT |
- I915_USER_INTERRUPT;
+ enable_mask = i9xx_display_irq_enable_mask(display) |
+ I915_MASTER_ERROR_INTERRUPT;
+
+ dev_priv->gen2_imr_mask = ~enable_mask;
+
+ enable_mask |= I915_USER_INTERRUPT;
if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
- gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->irq_mask, enable_mask);
+ gen2_irq_init(uncore, GEN2_IRQ_REGS, dev_priv->gen2_imr_mask, enable_mask);
i965_display_irq_postinstall(display);
}
diff --git a/drivers/gpu/drm/i915/i915_jiffies.h b/drivers/gpu/drm/i915/i915_jiffies.h
new file mode 100644
index 000000000000..18a4eaea897a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_jiffies.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_JIFFIES_H__
+#define __I915_JIFFIES_H__
+
+#include <linux/jiffies.h>
+
+static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
+{
+ unsigned long j = msecs_to_jiffies(m);
+
+ return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
+}
+
+#endif /* __I915_JIFFIES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_list_util.h b/drivers/gpu/drm/i915/i915_list_util.h
new file mode 100644
index 000000000000..4e515dc8a3e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_list_util.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_LIST_UTIL_H__
+#define __I915_LIST_UTIL_H__
+
+#include <linux/list.h>
+#include <asm/rwonce.h>
+
+static inline void __list_del_many(struct list_head *head,
+ struct list_head *first)
+{
+ first->prev = head;
+ WRITE_ONCE(head->next, first);
+}
+
+static inline int list_is_last_rcu(const struct list_head *list,
+ const struct list_head *head)
+{
+ return READ_ONCE(list->next) == head;
+}
+
+#endif /* __I915_LIST_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_mmio_range.c b/drivers/gpu/drm/i915/i915_mmio_range.c
new file mode 100644
index 000000000000..724041e81aa7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mmio_range.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "i915_mmio_range.h"
+
+bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table)
+{
+ while (table->start || table->end) {
+ if (addr >= table->start && addr <= table->end)
+ return true;
+
+ table++;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/i915_mmio_range.h b/drivers/gpu/drm/i915/i915_mmio_range.h
new file mode 100644
index 000000000000..f1c7086d3e3c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mmio_range.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef __I915_MMIO_RANGE_H__
+#define __I915_MMIO_RANGE_H__
+
+#include <linux/types.h>
+
+/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
+struct i915_mmio_range {
+ u32 start;
+ u32 end;
+};
+
+bool i915_mmio_range_table_contains(u32 addr, const struct i915_mmio_range *table);
+
+#endif /* __I915_MMIO_RANGE_H__ */
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 5862754c662c..5d9c35b5a182 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 1658f1246c6f..0b9d9f3f7813 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -219,6 +219,7 @@
#include "i915_perf.h"
#include "i915_perf_oa_regs.h"
#include "i915_reg.h"
+#include "i915_mmio_range.h"
/* HW requires this to be a power of two, between 128k and 16M, though driver
* is currently generally designed assuming the largest 16M size is used such
@@ -4320,29 +4321,17 @@ static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr)
return false;
}
-static bool reg_in_range_table(u32 addr, const struct i915_range *table)
-{
- while (table->start || table->end) {
- if (addr >= table->start && addr <= table->end)
- return true;
-
- table++;
- }
-
- return false;
-}
-
#define REG_EQUAL(addr, mmio) \
((addr) == i915_mmio_reg_offset(mmio))
-static const struct i915_range gen7_oa_b_counters[] = {
+static const struct i915_mmio_range gen7_oa_b_counters[] = {
{ .start = 0x2710, .end = 0x272c }, /* OASTARTTRIG[1-8] */
{ .start = 0x2740, .end = 0x275c }, /* OAREPORTTRIG[1-8] */
{ .start = 0x2770, .end = 0x27ac }, /* OACEC[0-7][0-1] */
{}
};
-static const struct i915_range gen12_oa_b_counters[] = {
+static const struct i915_mmio_range gen12_oa_b_counters[] = {
{ .start = 0x2b2c, .end = 0x2b2c }, /* GEN12_OAG_OA_PESS */
{ .start = 0xd900, .end = 0xd91c }, /* GEN12_OAG_OASTARTTRIG[1-8] */
{ .start = 0xd920, .end = 0xd93c }, /* GEN12_OAG_OAREPORTTRIG1[1-8] */
@@ -4353,7 +4342,7 @@ static const struct i915_range gen12_oa_b_counters[] = {
{}
};
-static const struct i915_range mtl_oam_b_counters[] = {
+static const struct i915_mmio_range mtl_oam_b_counters[] = {
{ .start = 0x393000, .end = 0x39301c }, /* GEN12_OAM_STARTTRIG1[1-8] */
{ .start = 0x393020, .end = 0x39303c }, /* GEN12_OAM_REPORTTRIG1[1-8] */
{ .start = 0x393040, .end = 0x39307c }, /* GEN12_OAM_CEC[0-7][0-1] */
@@ -4361,43 +4350,43 @@ static const struct i915_range mtl_oam_b_counters[] = {
{}
};
-static const struct i915_range xehp_oa_b_counters[] = {
+static const struct i915_mmio_range xehp_oa_b_counters[] = {
{ .start = 0xdc48, .end = 0xdc48 }, /* OAA_ENABLE_REG */
{ .start = 0xdd00, .end = 0xdd48 }, /* OAG_LCE0_0 - OAA_LENABLE_REG */
{}
};
-static const struct i915_range gen7_oa_mux_regs[] = {
+static const struct i915_mmio_range gen7_oa_mux_regs[] = {
{ .start = 0x91b8, .end = 0x91cc }, /* OA_PERFCNT[1-2], OA_PERFMATRIX */
{ .start = 0x9800, .end = 0x9888 }, /* MICRO_BP0_0 - NOA_WRITE */
{ .start = 0xe180, .end = 0xe180 }, /* HALF_SLICE_CHICKEN2 */
{}
};
-static const struct i915_range hsw_oa_mux_regs[] = {
+static const struct i915_mmio_range hsw_oa_mux_regs[] = {
{ .start = 0x09e80, .end = 0x09ea4 }, /* HSW_MBVID2_NOA[0-9] */
{ .start = 0x09ec0, .end = 0x09ec0 }, /* HSW_MBVID2_MISR0 */
{ .start = 0x25100, .end = 0x2ff90 },
{}
};
-static const struct i915_range chv_oa_mux_regs[] = {
+static const struct i915_mmio_range chv_oa_mux_regs[] = {
{ .start = 0x182300, .end = 0x1823a4 },
{}
};
-static const struct i915_range gen8_oa_mux_regs[] = {
+static const struct i915_mmio_range gen8_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d2c }, /* RPM_CONFIG[0-1], NOA_CONFIG[0-8] */
{ .start = 0x20cc, .end = 0x20cc }, /* WAIT_FOR_RC6_EXIT */
{}
};
-static const struct i915_range gen11_oa_mux_regs[] = {
+static const struct i915_mmio_range gen11_oa_mux_regs[] = {
{ .start = 0x91c8, .end = 0x91dc }, /* OA_PERFCNT[3-4] */
{}
};
-static const struct i915_range gen12_oa_mux_regs[] = {
+static const struct i915_mmio_range gen12_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
{ .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
{ .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
@@ -4410,7 +4399,7 @@ static const struct i915_range gen12_oa_mux_regs[] = {
* Ref: 14010536224:
* 0x20cc is repurposed on MTL, so use a separate array for MTL.
*/
-static const struct i915_range mtl_oa_mux_regs[] = {
+static const struct i915_mmio_range mtl_oa_mux_regs[] = {
{ .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
{ .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
{ .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
@@ -4421,61 +4410,61 @@ static const struct i915_range mtl_oa_mux_regs[] = {
static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_b_counters);
+ return i915_mmio_range_table_contains(addr, gen7_oa_b_counters);
}
static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, gen8_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen8_oa_mux_regs);
}
static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, gen8_oa_mux_regs) ||
- reg_in_range_table(addr, gen11_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen8_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, gen11_oa_mux_regs);
}
static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, hsw_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, hsw_oa_mux_regs);
}
static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen7_oa_mux_regs) ||
- reg_in_range_table(addr, chv_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen7_oa_mux_regs) ||
+ i915_mmio_range_table_contains(addr, chv_oa_mux_regs);
}
static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen12_oa_b_counters);
+ return i915_mmio_range_table_contains(addr, gen12_oa_b_counters);
}
static bool mtl_is_valid_oam_b_counter_addr(struct i915_perf *perf, u32 addr)
{
if (HAS_OAM(perf->i915) &&
GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
- return reg_in_range_table(addr, mtl_oam_b_counters);
+ return i915_mmio_range_table_contains(addr, mtl_oam_b_counters);
return false;
}
static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, xehp_oa_b_counters) ||
- reg_in_range_table(addr, gen12_oa_b_counters) ||
+ return i915_mmio_range_table_contains(addr, xehp_oa_b_counters) ||
+ i915_mmio_range_table_contains(addr, gen12_oa_b_counters) ||
mtl_is_valid_oam_b_counter_addr(perf, addr);
}
static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 70))
- return reg_in_range_table(addr, mtl_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, mtl_oa_mux_regs);
else
- return reg_in_range_table(addr, gen12_oa_mux_regs);
+ return i915_mmio_range_table_contains(addr, gen12_oa_mux_regs);
}
static u32 mask_reg_value(u32 reg, u32 val)
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 5bc696bfbb0f..a6697db21c72 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -6,6 +6,8 @@
#include <linux/pm_runtime.h>
+#include <drm/drm_print.h>
+
#include "gt/intel_engine.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_regs.h"
@@ -895,7 +897,7 @@ static ssize_t i915_pmu_format_show(struct device *dev,
struct i915_str_attribute *eattr;
eattr = container_of(attr, struct i915_str_attribute, attr);
- return sprintf(buf, "%s\n", eattr->str);
+ return sysfs_emit(buf, "%s\n", eattr->str);
}
#define I915_PMU_FORMAT_ATTR(_name, _config) \
@@ -925,7 +927,7 @@ static ssize_t i915_pmu_event_show(struct device *dev,
struct i915_ext_attribute *eattr;
eattr = container_of(attr, struct i915_ext_attribute, attr);
- return sprintf(buf, "config=0x%lx\n", eattr->val);
+ return sysfs_emit(buf, "config=0x%lx\n", eattr->val);
}
#define __event(__counter, __name, __unit) \
diff --git a/drivers/gpu/drm/i915/i915_ptr_util.h b/drivers/gpu/drm/i915/i915_ptr_util.h
new file mode 100644
index 000000000000..9f8931d7d99b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ptr_util.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_PTR_UTIL_H__
+#define __I915_PTR_UTIL_H__
+
+#include <linux/types.h>
+
+#define ptr_mask_bits(ptr, n) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v & -BIT(n)); \
+})
+
+#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
+
+#define ptr_unpack_bits(ptr, bits, n) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ *(bits) = __v & (BIT(n) - 1); \
+ (typeof(ptr))(__v & -BIT(n)); \
+})
+
+#define ptr_pack_bits(ptr, bits, n) ({ \
+ unsigned long __bits = (bits); \
+ GEM_BUG_ON(__bits & -BIT(n)); \
+ ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
+})
+
+#define ptr_dec(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v - 1); \
+})
+
+#define ptr_inc(ptr) ({ \
+ unsigned long __v = (unsigned long)(ptr); \
+ (typeof(ptr))(__v + 1); \
+})
+
+#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
+#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
+#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
+#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
+
+static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
+{
+ return a - b;
+}
+
+#define u64_to_ptr(T, x) ({ \
+ typecheck(u64, x); \
+ (T *)(uintptr_t)(x); \
+})
+
+/*
+ * container_of_user: Extract the superclass from a pointer to a member.
+ *
+ * Exactly like container_of() with the exception that it plays nicely
+ * with sparse for __user @ptr.
+ */
+#define container_of_user(ptr, type, member) ({ \
+ void __user *__mptr = (void __user *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type __user *)(__mptr - offsetof(type, member))); })
+
+#endif /* __I915_PTR_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 14d9ec0ed777..0c55fb6e9727 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -6,6 +6,8 @@
#include <linux/nospec.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_perf.h"
#include "i915_query.h"
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 03b895897f60..5bf3b4ab2baa 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -412,9 +412,9 @@
#define FW_BLC _MMIO(0x20d8)
#define FW_BLC2 _MMIO(0x20dc)
#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK (1 << 31)
-#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */
-#define FW_BLC_SELF_EN (1 << 15) /* 945 only */
+#define FW_BLC_SELF_EN_MASK REG_BIT(31)
+#define FW_BLC_SELF_FIFO_MASK REG_BIT(16) /* 945 only */
+#define FW_BLC_SELF_EN REG_BIT(15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -613,7 +613,8 @@
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200)
+#define DSPCLK_GATE_D _MMIO(0x6200)
+#define VLV_DSPCLK_GATE_D _MMIO(VLV_DISPLAY_BASE + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -1232,6 +1233,7 @@
#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
+#define XE3P_ECC_IMPACTING_DE REG_BIT(12)
#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index bfe98cb9a038..e81fac8ab51b 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -174,6 +174,16 @@
*/
#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
+/**
+ * REG_FIELD_MAX() - produce the maximum value representable by a field
+ * @__mask: shifted mask defining the field's length and position
+ *
+ * Local wrapper for FIELD_MAX() to return the maximum bit value that can
+ * be held in the field specified by @_mask, cast to u32 for consistency
+ * with other macros.
+ */
+#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask))
+
typedef struct {
u32 reg;
} i915_reg_t;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b9a2b2194c8f..4399941236cb 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -31,6 +31,8 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_context.h"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 5f7e8138ec14..b09135301f39 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -31,19 +31,20 @@
#include <linux/llist.h>
#include <linux/lockdep.h>
+#include <uapi/drm/i915_drm.h>
+
#include "gem/i915_gem_context_types.h"
#include "gt/intel_context_types.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_timeline_types.h"
#include "i915_gem.h"
+#include "i915_ptr_util.h"
#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
#include "i915_vma_resource.h"
-#include <uapi/drm/i915_drm.h>
-
struct drm_file;
struct drm_i915_gem_object;
struct drm_printer;
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 4c02a04be681..7e0791024282 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -5,7 +5,9 @@
#include <linux/vga_switcheroo.h>
-#include "display/intel_display_core.h"
+#include <drm/drm_print.h>
+
+#include "display/intel_display_device.h"
#include "i915_driver.h"
#include "i915_drv.h"
@@ -15,13 +17,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_display *display = i915 ? i915->display : NULL;
pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
if (!i915) {
dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
return;
}
- if (!HAS_DISPLAY(i915)) {
+ if (!intel_display_device_present(display)) {
dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
return;
}
@@ -44,13 +47,15 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
+ struct intel_display *display = i915 ? i915->display : NULL;
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
+ return i915 && intel_display_device_present(display) &&
+ atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 622c66666935..70e0d8615160 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -30,6 +30,8 @@
#include <linux/stat.h>
#include <linux/sysfs.h>
+#include <drm/drm_print.h>
+
#include "gt/intel_gt_regs.h"
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
diff --git a/drivers/gpu/drm/i915/i915_timer_util.c b/drivers/gpu/drm/i915/i915_timer_util.c
new file mode 100644
index 000000000000..ee4cfd8b3c07
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timer_util.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <linux/jiffies.h>
+
+#include "i915_timer_util.h"
+
+void cancel_timer(struct timer_list *t)
+{
+ if (!timer_active(t))
+ return;
+
+ timer_delete(t);
+ WRITE_ONCE(t->expires, 0);
+}
+
+void set_timer_ms(struct timer_list *t, unsigned long timeout)
+{
+ if (!timeout) {
+ cancel_timer(t);
+ return;
+ }
+
+ timeout = msecs_to_jiffies(timeout);
+
+ /*
+ * Paranoia to make sure the compiler computes the timeout before
+ * loading 'jiffies' as jiffies is volatile and may be updated in
+ * the background by a timer tick. All to reduce the complexity
+ * of the addition and reduce the risk of losing a jiffy.
+ */
+ barrier();
+
+ /* Keep t->expires = 0 reserved to indicate a canceled timer. */
+ mod_timer(t, jiffies + timeout ?: 1);
+}
diff --git a/drivers/gpu/drm/i915/i915_timer_util.h b/drivers/gpu/drm/i915/i915_timer_util.h
new file mode 100644
index 000000000000..f35ad730820c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timer_util.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_TIMER_UTIL_H__
+#define __I915_TIMER_UTIL_H__
+
+#include <linux/timer.h>
+#include <asm/rwonce.h>
+
+void cancel_timer(struct timer_list *t);
+void set_timer_ms(struct timer_list *t, unsigned long timeout);
+
+static inline bool timer_active(const struct timer_list *t)
+{
+ return READ_ONCE(t->expires);
+}
+
+static inline bool timer_expired(const struct timer_list *t)
+{
+ return timer_active(t) && !timer_pending(t);
+}
+
+#endif /* __I915_TIMER_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 942345548bc3..d5c6e6605086 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -5,11 +5,11 @@
#include <linux/slab.h>
+#include <drm/drm_buddy.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo.h>
-#include <drm/drm_buddy.h>
-
#include "i915_ttm_buddy_manager.h"
#include "i915_gem.h"
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index b60c28fbd207..89b920ccbccb 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -47,36 +48,6 @@ bool i915_error_injected(void)
#endif
-void cancel_timer(struct timer_list *t)
-{
- if (!timer_active(t))
- return;
-
- timer_delete(t);
- WRITE_ONCE(t->expires, 0);
-}
-
-void set_timer_ms(struct timer_list *t, unsigned long timeout)
-{
- if (!timeout) {
- cancel_timer(t);
- return;
- }
-
- timeout = msecs_to_jiffies(timeout);
-
- /*
- * Paranoia to make sure the compiler computes the timeout before
- * loading 'jiffies' as jiffies is volatile and may be updated in
- * the background by a timer tick. All to reduce the complexity
- * of the addition and reduce the risk of losing a jiffy.
- */
- barrier();
-
- /* Keep t->expires = 0 reserved to indicate a canceled timer. */
- mod_timer(t, jiffies + timeout ?: 1);
-}
-
bool i915_vtd_active(struct drm_i915_private *i915)
{
if (device_iommu_mapped(i915->drm.dev))
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index f7fb40cfdb70..4f75115b87d6 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,7 +25,6 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
-#include <linux/list.h>
#include <linux/overflow.h>
#include <linux/sched.h>
#include <linux/string_helpers.h>
@@ -38,10 +37,11 @@
#endif
struct drm_i915_private;
-struct timer_list;
+#ifndef MISSING_CASE
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
+#endif
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
@@ -67,87 +67,13 @@ bool i915_error_injected(void);
drm_err(&(i915)->drm, fmt, ##__VA_ARGS__); \
})
-#define range_overflows(start, size, max) ({ \
- typeof(start) start__ = (start); \
- typeof(size) size__ = (size); \
- typeof(max) max__ = (max); \
- (void)(&start__ == &size__); \
- (void)(&start__ == &max__); \
- start__ >= max__ || size__ > max__ - start__; \
-})
-
-#define range_overflows_t(type, start, size, max) \
- range_overflows((type)(start), (type)(size), (type)(max))
-
-#define range_overflows_end(start, size, max) ({ \
- typeof(start) start__ = (start); \
- typeof(size) size__ = (size); \
- typeof(max) max__ = (max); \
- (void)(&start__ == &size__); \
- (void)(&start__ == &max__); \
- start__ > max__ || size__ > max__ - start__; \
-})
-
-#define range_overflows_end_t(type, start, size, max) \
- range_overflows_end((type)(start), (type)(size), (type)(max))
-
-#define ptr_mask_bits(ptr, n) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v & -BIT(n)); \
-})
-
-#define ptr_unmask_bits(ptr, n) ((unsigned long)(ptr) & (BIT(n) - 1))
-
-#define ptr_unpack_bits(ptr, bits, n) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- *(bits) = __v & (BIT(n) - 1); \
- (typeof(ptr))(__v & -BIT(n)); \
-})
-
-#define ptr_pack_bits(ptr, bits, n) ({ \
- unsigned long __bits = (bits); \
- GEM_BUG_ON(__bits & -BIT(n)); \
- ((typeof(ptr))((unsigned long)(ptr) | __bits)); \
-})
-
-#define ptr_dec(ptr) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v - 1); \
-})
-
-#define ptr_inc(ptr) ({ \
- unsigned long __v = (unsigned long)(ptr); \
- (typeof(ptr))(__v + 1); \
-})
-
-#define page_mask_bits(ptr) ptr_mask_bits(ptr, PAGE_SHIFT)
-#define page_unmask_bits(ptr) ptr_unmask_bits(ptr, PAGE_SHIFT)
-#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
-#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
-
+#ifndef fetch_and_zero
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
__T; \
})
-
-static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
-{
- return a - b;
-}
-
-/*
- * container_of_user: Extract the superclass from a pointer to a member.
- *
- * Exactly like container_of() with the exception that it plays nicely
- * with sparse for __user @ptr.
- */
-#define container_of_user(ptr, type, member) ({ \
- void __user *__mptr = (void __user *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \
- !__same_type(*(ptr), void), \
- "pointer type mismatch in container_of()"); \
- ((type __user *)(__mptr - offsetof(type, member))); })
+#endif
/*
* check_user_mbz: Check that a user value exists and is zero
@@ -167,11 +93,6 @@ static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
})
-#define u64_to_ptr(T, x) ({ \
- typecheck(u64, x); \
- (T *)(uintptr_t)(x); \
-})
-
#define __mask_next_bit(mask) ({ \
int __idx = ffs(mask) - 1; \
mask &= ~BIT(__idx); \
@@ -183,157 +104,6 @@ static inline bool is_power_of_2_u64(u64 n)
return (n != 0 && ((n & (n - 1)) == 0));
}
-static inline void __list_del_many(struct list_head *head,
- struct list_head *first)
-{
- first->prev = head;
- WRITE_ONCE(head->next, first);
-}
-
-static inline int list_is_last_rcu(const struct list_head *list,
- const struct list_head *head)
-{
- return READ_ONCE(list->next) == head;
-}
-
-static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
-{
- unsigned long j = msecs_to_jiffies(m);
-
- return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
-}
-
-/*
- * If you need to wait X milliseconds between events A and B, but event B
- * doesn't happen exactly after event A, you record the timestamp (jiffies) of
- * when event A happened, then just before event B you call this function and
- * pass the timestamp as the first argument, and X as the second argument.
- */
-static inline void
-wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
-{
- unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
-
- /*
- * Don't re-read the value of "jiffies" every time since it may change
- * behind our back and break the math.
- */
- tmp_jiffies = jiffies;
- target_jiffies = timestamp_jiffies +
- msecs_to_jiffies_timeout(to_wait_ms);
-
- if (time_after(target_jiffies, tmp_jiffies)) {
- remaining_jiffies = target_jiffies - tmp_jiffies;
- while (remaining_jiffies)
- remaining_jiffies =
- schedule_timeout_uninterruptible(remaining_jiffies);
- }
-}
-
-/*
- * __wait_for - magic wait macro
- *
- * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
- * important that we check the condition again after having timed out, since the
- * timeout could be due to preemption or similar and we've never had a chance to
- * check the condition before the timeout.
- */
-#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
- const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
- long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
- int ret__; \
- might_sleep(); \
- for (;;) { \
- const bool expired__ = ktime_after(ktime_get_raw(), end__); \
- OP; \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret__ = 0; \
- break; \
- } \
- if (expired__) { \
- ret__ = -ETIMEDOUT; \
- break; \
- } \
- usleep_range(wait__, wait__ * 2); \
- if (wait__ < (Wmax)) \
- wait__ <<= 1; \
- } \
- ret__; \
-})
-
-#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
- (Wmax))
-#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
-
-/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
-#else
-# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
-#endif
-
-#define _wait_for_atomic(COND, US, ATOMIC) \
-({ \
- int cpu, ret, timeout = (US) * 1000; \
- u64 base; \
- _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- cpu = smp_processor_id(); \
- } \
- base = local_clock(); \
- for (;;) { \
- u64 now = local_clock(); \
- if (!(ATOMIC)) \
- preempt_enable(); \
- /* Guarantee COND check prior to timeout */ \
- barrier(); \
- if (COND) { \
- ret = 0; \
- break; \
- } \
- if (now - base >= timeout) { \
- ret = -ETIMEDOUT; \
- break; \
- } \
- cpu_relax(); \
- if (!(ATOMIC)) { \
- preempt_disable(); \
- if (unlikely(cpu != smp_processor_id())) { \
- timeout -= now - base; \
- cpu = smp_processor_id(); \
- base = local_clock(); \
- } \
- } \
- } \
- ret; \
-})
-
-#define wait_for_us(COND, US) \
-({ \
- int ret__; \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- if ((US) > 10) \
- ret__ = _wait_for((COND), (US), 10, 10); \
- else \
- ret__ = _wait_for_atomic((COND), (US), 0); \
- ret__; \
-})
-
-#define wait_for_atomic_us(COND, US) \
-({ \
- BUILD_BUG_ON(!__builtin_constant_p(US)); \
- BUILD_BUG_ON((US) > 50000); \
- _wait_for_atomic((COND), (US), 1); \
-})
-
-#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
-
-#define KHz(x) (1000 * (x))
-#define MHz(x) KHz(1000 * (x))
-
void add_taint_for_CI(struct drm_i915_private *i915, unsigned int taint);
static inline void __add_taint_for_CI(unsigned int taint)
{
@@ -346,19 +116,6 @@ static inline void __add_taint_for_CI(unsigned int taint)
add_taint(taint, LOCKDEP_STILL_OK);
}
-void cancel_timer(struct timer_list *t);
-void set_timer_ms(struct timer_list *t, unsigned long timeout);
-
-static inline bool timer_active(const struct timer_list *t)
-{
- return READ_ONCE(t->expires);
-}
-
-static inline bool timer_expired(const struct timer_list *t)
-{
- return timer_active(t) && !timer_pending(t);
-}
-
static inline bool i915_run_as_guest(void)
{
#if IS_ENABLED(CONFIG_X86)
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index c97323973f9b..d29a06ea51a5 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,6 +21,8 @@
* SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_pvinfo.h"
#include "i915_vgpu.h"
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 25e97031d76e..2c0a63664e13 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -24,7 +24,9 @@
#include <linux/sched/mm.h>
#include <linux/dma-fence-array.h>
+
#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
#include "display/intel_fb.h"
#include "display/intel_frontbuffer.h"
@@ -1595,8 +1597,20 @@ err_unlock:
err_vma_res:
i915_vma_resource_free(vma_res);
err_fence:
- if (work)
- dma_fence_work_commit_imm(&work->base);
+ if (work) {
+ /*
+ * When pinning VMA to GGTT on CHV or BXT with VTD enabled,
+ * commit VMA binding asynchronously to avoid risk of lock
+ * inversion among reservation_ww locks held here and
+ * cpu_hotplug_lock acquired from stop_machine(), which we
+ * wrap around GGTT updates when running in those environments.
+ */
+ if (i915_vma_is_ggtt(vma) &&
+ intel_vm_no_concurrent_access_wa(vma->vm->i915))
+ dma_fence_work_commit(&work->base);
+ else
+ dma_fence_work_commit_imm(&work->base);
+ }
err_rpm:
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
@@ -1990,13 +2004,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
}
if (flags & EXEC_OBJECT_WRITE) {
- struct intel_frontbuffer *front;
+ struct i915_frontbuffer *front;
- front = i915_gem_object_get_frontbuffer(obj);
+ front = i915_gem_object_frontbuffer_lookup(obj);
if (unlikely(front)) {
- if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+ if (intel_frontbuffer_invalidate(&front->base, ORIGIN_CS))
i915_active_add_request(&front->write, rq);
- intel_frontbuffer_put(front);
+ i915_gem_object_frontbuffer_put(front);
}
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 0f9eee6d18d2..8054047840aa 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -30,12 +30,12 @@
#include <drm/drm_mm.h>
-#include "gt/intel_ggtt_fencing.h"
#include "gem/i915_gem_object.h"
-
-#include "i915_gem_gtt.h"
+#include "gt/intel_ggtt_fencing.h"
#include "i915_active.h"
+#include "i915_gem_gtt.h"
+#include "i915_ptr_util.h"
#include "i915_request.h"
#include "i915_vma_resource.h"
#include "i915_vma_types.h"
diff --git a/drivers/gpu/drm/i915/i915_wait_util.h b/drivers/gpu/drm/i915/i915_wait_util.h
new file mode 100644
index 000000000000..7376898e3bf8
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_wait_util.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2025 Intel Corporation */
+
+#ifndef __I915_WAIT_UTIL_H__
+#define __I915_WAIT_UTIL_H__
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/sched/clock.h>
+#include <linux/smp.h>
+
+/*
+ * __wait_for - magic wait macro
+ *
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
+ */
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
+ break; \
+ } \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
+ } \
+ ret__; \
+})
+
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
+/*
+ * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false.
+ * On PREEMPT_RT the context isn't becoming atomic because it is used in an
+ * interrupt handler or because a spinlock_t is acquired. This leads to
+ * warnings which don't occur otherwise and therefore the check is disabled.
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
+#else
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
+#endif
+
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+ int cpu, ret, timeout = (US) * 1000; \
+ u64 base; \
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ cpu = smp_processor_id(); \
+ } \
+ base = local_clock(); \
+ for (;;) { \
+ u64 now = local_clock(); \
+ if (!(ATOMIC)) \
+ preempt_enable(); \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret = 0; \
+ break; \
+ } \
+ if (now - base >= timeout) { \
+ ret = -ETIMEDOUT; \
+ break; \
+ } \
+ cpu_relax(); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ if (unlikely(cpu != smp_processor_id())) { \
+ timeout -= now - base; \
+ cpu = smp_processor_id(); \
+ base = local_clock(); \
+ } \
+ } \
+ } \
+ ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+ int ret__; \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ if ((US) > 10) \
+ ret__ = _wait_for((COND), (US), 10, 10); \
+ else \
+ ret__ = _wait_for_atomic((COND), (US), 0); \
+ ret__; \
+})
+
+#define wait_for_atomic_us(COND, US) \
+({ \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ BUILD_BUG_ON((US) > 50000); \
+ _wait_for_atomic((COND), (US), 1); \
+})
+
+#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
+
+#endif /* __I915_WAIT_UTIL_H__ */
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index f86a3629ae9e..175a240ac848 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -25,6 +25,8 @@
*
*/
+#include <drm/drm_print.h>
+
#include "display/i9xx_plane_regs.h"
#include "display/intel_display.h"
#include "display/intel_display_core.h"
@@ -132,16 +134,17 @@ static void ibx_init_clock_gating(struct drm_i915_private *i915)
static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = dev_priv->display;
enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
- intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(dev_priv, pipe),
+ for_each_pipe(display, pipe) {
+ intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(display, pipe),
0, DISP_TRICKLE_FEED_DISABLE);
- intel_uncore_rmw(&dev_priv->uncore, DSPSURF(dev_priv, pipe),
+ intel_uncore_rmw(&dev_priv->uncore, DSPSURF(display, pipe),
0, 0);
intel_uncore_posting_read(&dev_priv->uncore,
- DSPSURF(dev_priv, pipe));
+ DSPSURF(display, pipe));
}
}
@@ -218,7 +221,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe));
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
@@ -229,7 +232,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(&i915->uncore, TRANS_CHICKEN2(pipe), val);
}
/* WADP0ClockGatingDisable */
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
intel_uncore_write(&i915->uncore, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
@@ -307,11 +310,13 @@ static void gen6_init_clock_gating(struct drm_i915_private *i915)
static void lpt_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
+
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
- if (HAS_PCH_LPT_LP(i915))
+ if (HAS_PCH_LPT_LP(display))
intel_uncore_rmw(&i915->uncore, SOUTH_DSPCLK_GATE_D,
0, PCH_LP_PARTITION_LEVEL_DISABLE);
@@ -355,7 +360,9 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915)
static void cnp_init_clock_gating(struct drm_i915_private *i915)
{
- if (!HAS_PCH_CNP(i915))
+ struct intel_display *display = i915->display;
+
+ if (!HAS_PCH_CNP(display))
return;
/* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */
@@ -421,6 +428,7 @@ static void skl_init_clock_gating(struct drm_i915_private *i915)
static void bdw_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
@@ -432,7 +440,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
/* WaPsrDPAMaskVBlankInSRD:bdw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
/* WaPsrDPRSUnmaskVBlankInSRD:bdw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
0, BDW_UNMASK_VBL_TO_REGS_IN_SRD);
@@ -468,6 +476,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
static void hsw_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
enum pipe pipe;
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
@@ -476,7 +485,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915)
/* WaPsrDPAMaskVBlankInSRD:hsw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD);
- for_each_pipe(i915, pipe) {
+ for_each_pipe(display, pipe) {
/* WaPsrDPRSUnmaskVBlankInSRD:hsw */
intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe),
0, HSW_UNMASK_VBL_TO_REGS_IN_SRD);
@@ -494,6 +503,8 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915)
static void ivb_init_clock_gating(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
+
intel_uncore_write(&i915->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
@@ -531,7 +542,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_rmw(&i915->uncore, GEN6_MBCUNIT_SNPCR, GEN6_MBC_SNPCR_MASK,
GEN6_MBC_SNPCR_MED);
- if (!HAS_PCH_NOP(i915))
+ if (!HAS_PCH_NOP(display))
cpt_init_clock_gating(i915);
gen6_check_mch_setup(i915);
@@ -611,7 +622,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *i915)
OVCUNIT_CLOCK_GATE_DISABLE;
if (IS_GM45(i915))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
- intel_uncore_write(&i915->uncore, DSPCLK_GATE_D(i915), dspclk_gate);
+ intel_uncore_write(&i915->uncore, DSPCLK_GATE_D, dspclk_gate);
g4x_disable_trickle_feed(i915);
}
@@ -622,7 +633,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
intel_uncore_write(uncore, RENCLK_GATE_D2, 0);
- intel_uncore_write(uncore, DSPCLK_GATE_D(i915), 0);
+ intel_uncore_write(uncore, DSPCLK_GATE_D, 0);
intel_uncore_write(uncore, RAMCLK_GATE_D, 0);
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index dae9dce7d1b3..c3efc3454ec2 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -21,6 +21,8 @@
* SOFTWARE.
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "intel_gvt.h"
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 87ac4446d306..ca57a3dd3148 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -62,6 +62,7 @@
static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
+ struct intel_display *display = dev_priv->display;
MMIO_RING_D(RING_IMR);
MMIO_D(SDEIMR);
@@ -133,38 +134,38 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(_MMIO(0x650b4));
MMIO_D(_MMIO(0xc4040));
MMIO_D(DERRMR);
- MMIO_D(PIPEDSL(dev_priv, PIPE_A));
- MMIO_D(PIPEDSL(dev_priv, PIPE_B));
- MMIO_D(PIPEDSL(dev_priv, PIPE_C));
- MMIO_D(PIPEDSL(dev_priv, _PIPE_EDP));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_A));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_B));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_C));
- MMIO_D(TRANSCONF(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPESTAT(dev_priv, PIPE_A));
- MMIO_D(PIPESTAT(dev_priv, PIPE_B));
- MMIO_D(PIPESTAT(dev_priv, PIPE_C));
- MMIO_D(PIPESTAT(dev_priv, _PIPE_EDP));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_A));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_B));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_C));
- MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, _PIPE_EDP));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_A));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_B));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_C));
- MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, _PIPE_EDP));
- MMIO_D(CURCNTR(dev_priv, PIPE_A));
- MMIO_D(CURCNTR(dev_priv, PIPE_B));
- MMIO_D(CURCNTR(dev_priv, PIPE_C));
- MMIO_D(CURPOS(dev_priv, PIPE_A));
- MMIO_D(CURPOS(dev_priv, PIPE_B));
- MMIO_D(CURPOS(dev_priv, PIPE_C));
- MMIO_D(CURBASE(dev_priv, PIPE_A));
- MMIO_D(CURBASE(dev_priv, PIPE_B));
- MMIO_D(CURBASE(dev_priv, PIPE_C));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_A));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_B));
- MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_C));
+ MMIO_D(PIPEDSL(display, PIPE_A));
+ MMIO_D(PIPEDSL(display, PIPE_B));
+ MMIO_D(PIPEDSL(display, PIPE_C));
+ MMIO_D(PIPEDSL(display, _PIPE_EDP));
+ MMIO_D(TRANSCONF(display, TRANSCODER_A));
+ MMIO_D(TRANSCONF(display, TRANSCODER_B));
+ MMIO_D(TRANSCONF(display, TRANSCODER_C));
+ MMIO_D(TRANSCONF(display, TRANSCODER_EDP));
+ MMIO_D(PIPESTAT(display, PIPE_A));
+ MMIO_D(PIPESTAT(display, PIPE_B));
+ MMIO_D(PIPESTAT(display, PIPE_C));
+ MMIO_D(PIPESTAT(display, _PIPE_EDP));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_A));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_B));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_C));
+ MMIO_D(PIPE_FLIPCOUNT_G4X(display, _PIPE_EDP));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_A));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_B));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_C));
+ MMIO_D(PIPE_FRMCOUNT_G4X(display, _PIPE_EDP));
+ MMIO_D(CURCNTR(display, PIPE_A));
+ MMIO_D(CURCNTR(display, PIPE_B));
+ MMIO_D(CURCNTR(display, PIPE_C));
+ MMIO_D(CURPOS(display, PIPE_A));
+ MMIO_D(CURPOS(display, PIPE_B));
+ MMIO_D(CURPOS(display, PIPE_C));
+ MMIO_D(CURBASE(display, PIPE_A));
+ MMIO_D(CURBASE(display, PIPE_B));
+ MMIO_D(CURBASE(display, PIPE_C));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_A));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_B));
+ MMIO_D(CUR_FBC_CTL(display, PIPE_C));
MMIO_D(_MMIO(0x700ac));
MMIO_D(_MMIO(0x710ac));
MMIO_D(_MMIO(0x720ac));
@@ -172,32 +173,32 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(_MMIO(0x70094));
MMIO_D(_MMIO(0x70098));
MMIO_D(_MMIO(0x7009c));
- MMIO_D(DSPCNTR(dev_priv, PIPE_A));
- MMIO_D(DSPADDR(dev_priv, PIPE_A));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_A));
- MMIO_D(DSPPOS(dev_priv, PIPE_A));
- MMIO_D(DSPSIZE(dev_priv, PIPE_A));
- MMIO_D(DSPSURF(dev_priv, PIPE_A));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_A));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_A));
+ MMIO_D(DSPCNTR(display, PIPE_A));
+ MMIO_D(DSPADDR(display, PIPE_A));
+ MMIO_D(DSPSTRIDE(display, PIPE_A));
+ MMIO_D(DSPPOS(display, PIPE_A));
+ MMIO_D(DSPSIZE(display, PIPE_A));
+ MMIO_D(DSPSURF(display, PIPE_A));
+ MMIO_D(DSPOFFSET(display, PIPE_A));
+ MMIO_D(DSPSURFLIVE(display, PIPE_A));
MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY));
- MMIO_D(DSPCNTR(dev_priv, PIPE_B));
- MMIO_D(DSPADDR(dev_priv, PIPE_B));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_B));
- MMIO_D(DSPPOS(dev_priv, PIPE_B));
- MMIO_D(DSPSIZE(dev_priv, PIPE_B));
- MMIO_D(DSPSURF(dev_priv, PIPE_B));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_B));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_B));
+ MMIO_D(DSPCNTR(display, PIPE_B));
+ MMIO_D(DSPADDR(display, PIPE_B));
+ MMIO_D(DSPSTRIDE(display, PIPE_B));
+ MMIO_D(DSPPOS(display, PIPE_B));
+ MMIO_D(DSPSIZE(display, PIPE_B));
+ MMIO_D(DSPSURF(display, PIPE_B));
+ MMIO_D(DSPOFFSET(display, PIPE_B));
+ MMIO_D(DSPSURFLIVE(display, PIPE_B));
MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY));
- MMIO_D(DSPCNTR(dev_priv, PIPE_C));
- MMIO_D(DSPADDR(dev_priv, PIPE_C));
- MMIO_D(DSPSTRIDE(dev_priv, PIPE_C));
- MMIO_D(DSPPOS(dev_priv, PIPE_C));
- MMIO_D(DSPSIZE(dev_priv, PIPE_C));
- MMIO_D(DSPSURF(dev_priv, PIPE_C));
- MMIO_D(DSPOFFSET(dev_priv, PIPE_C));
- MMIO_D(DSPSURFLIVE(dev_priv, PIPE_C));
+ MMIO_D(DSPCNTR(display, PIPE_C));
+ MMIO_D(DSPADDR(display, PIPE_C));
+ MMIO_D(DSPSTRIDE(display, PIPE_C));
+ MMIO_D(DSPPOS(display, PIPE_C));
+ MMIO_D(DSPSIZE(display, PIPE_C));
+ MMIO_D(DSPSURF(display, PIPE_C));
+ MMIO_D(DSPOFFSET(display, PIPE_C));
+ MMIO_D(DSPSURFLIVE(display, PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY));
MMIO_D(SPRCTL(PIPE_A));
MMIO_D(SPRLINOFF(PIPE_A));
@@ -238,73 +239,73 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(SPRSCALE(PIPE_C));
MMIO_D(SPRSURFLIVE(PIPE_C));
MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_A));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_A));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_B));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_B));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_C));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_C));
- MMIO_D(PIPESRC(dev_priv, TRANSCODER_C));
- MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_EDP));
- MMIO_D(BCLRPAT(dev_priv, TRANSCODER_EDP));
- MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_A));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_B));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_C));
- MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_EDP));
- MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_EDP));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_A));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_A));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_A));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_A));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_A));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_A));
+ MMIO_D(BCLRPAT(display, TRANSCODER_A));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_A));
+ MMIO_D(PIPESRC(display, TRANSCODER_A));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_B));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_B));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_B));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_B));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_B));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_B));
+ MMIO_D(BCLRPAT(display, TRANSCODER_B));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_B));
+ MMIO_D(PIPESRC(display, TRANSCODER_B));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_C));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_C));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_C));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_C));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_C));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_C));
+ MMIO_D(BCLRPAT(display, TRANSCODER_C));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_C));
+ MMIO_D(PIPESRC(display, TRANSCODER_C));
+ MMIO_D(TRANS_HTOTAL(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_HBLANK(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_HSYNC(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VTOTAL(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VBLANK(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNC(display, TRANSCODER_EDP));
+ MMIO_D(BCLRPAT(display, TRANSCODER_EDP));
+ MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_A));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_A));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_B));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_B));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_C));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_C));
+ MMIO_D(PIPE_DATA_M1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_M2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_DATA_N2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N1(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_M2(display, TRANSCODER_EDP));
+ MMIO_D(PIPE_LINK_N2(display, TRANSCODER_EDP));
MMIO_D(PF_CTL(PIPE_A));
MMIO_D(PF_WIN_SZ(PIPE_A));
MMIO_D(PF_WIN_POS(PIPE_A));
@@ -513,12 +514,12 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GAMMA_MODE(PIPE_A));
MMIO_D(GAMMA_MODE(PIPE_B));
MMIO_D(GAMMA_MODE(PIPE_C));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_A));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_B));
- MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_C));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_A));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_B));
- MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_C));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_A));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_B));
+ MMIO_D(TRANS_MULT(display, TRANSCODER_C));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_C));
MMIO_D(SFUSE_STRAP);
MMIO_D(SBI_ADDR);
MMIO_D(SBI_DATA);
@@ -1111,6 +1112,7 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
{
struct drm_i915_private *dev_priv = iter->i915;
+ struct intel_display *display = dev_priv->display;
MMIO_F(_MMIO(0x80000), 0x3000);
MMIO_D(GEN7_SAMPLER_INSTDONE);
@@ -1242,9 +1244,9 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(BXT_DSI_PLL_ENABLE);
MMIO_D(GEN9_CLKGATE_DIS_0);
MMIO_D(GEN9_CLKGATE_DIS_4);
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_A));
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_B));
- MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_C));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_A));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_B));
+ MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_C));
MMIO_D(RC6_CTX_BASE);
MMIO_D(GEN8_PUSHBUS_CONTROL);
MMIO_D(GEN8_PUSHBUS_ENABLE);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 59bd603e6deb..ce722f20cab1 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -5,6 +5,7 @@
#include <linux/prandom.h>
+#include <drm/drm_print.h>
#include <uapi/drm/i915_drm.h>
#include "intel_memory_region.h"
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 81da75108c60..756652b8ec97 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -3,8 +3,11 @@
* Copyright © 2013-2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_wait_util.h"
#include "intel_pcode.h"
static int gen6_check_mailbox_status(u32 mbox)
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index 04525d92bec5..47a69aad5c3f 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -34,7 +34,7 @@ int intel_region_ttm_device_init(struct drm_i915_private *dev_priv)
return ttm_device_init(&dev_priv->bdev, i915_ttm_driver(),
drm->dev, drm->anon_inode->i_mapping,
- drm->vma_offset_manager, false, false);
+ drm->vma_offset_manager, 0);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7ce3e6de0c19..d11c2814b787 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -29,6 +29,7 @@
#include <linux/pm_runtime.h>
#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -177,6 +178,82 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
return track_intel_runtime_pm_wakeref(rpm);
}
+static struct intel_runtime_pm *drm_to_rpm(const struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ return &i915->runtime_pm;
+}
+
+static struct ref_tracker *i915_display_rpm_get(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_raw(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_raw(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_if_in_use(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_if_in_use(drm_to_rpm(drm));
+}
+
+static struct ref_tracker *i915_display_rpm_get_noresume(const struct drm_device *drm)
+{
+ return intel_runtime_pm_get_noresume(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put(drm_to_rpm(drm), wakeref);
+}
+
+static void i915_display_rpm_put_raw(const struct drm_device *drm, struct ref_tracker *wakeref)
+{
+ intel_runtime_pm_put_raw(drm_to_rpm(drm), wakeref);
+}
+
+static void i915_display_rpm_put_unchecked(const struct drm_device *drm)
+{
+ intel_runtime_pm_put_unchecked(drm_to_rpm(drm));
+}
+
+static bool i915_display_rpm_suspended(const struct drm_device *drm)
+{
+ return intel_runtime_pm_suspended(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_held(const struct drm_device *drm)
+{
+ assert_rpm_wakelock_held(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_block(const struct drm_device *drm)
+{
+ disable_rpm_wakeref_asserts(drm_to_rpm(drm));
+}
+
+static void i915_display_rpm_assert_unblock(const struct drm_device *drm)
+{
+ enable_rpm_wakeref_asserts(drm_to_rpm(drm));
+}
+
+const struct intel_display_rpm_interface i915_display_rpm_interface = {
+ .get = i915_display_rpm_get,
+ .get_raw = i915_display_rpm_get_raw,
+ .get_if_in_use = i915_display_rpm_get_if_in_use,
+ .get_noresume = i915_display_rpm_get_noresume,
+ .put = i915_display_rpm_put,
+ .put_raw = i915_display_rpm_put_raw,
+ .put_unchecked = i915_display_rpm_put_unchecked,
+ .suspended = i915_display_rpm_suspended,
+ .assert_held = i915_display_rpm_assert_held,
+ .assert_block = i915_display_rpm_assert_block,
+ .assert_unblock = i915_display_rpm_assert_unblock
+};
+
/**
* intel_runtime_pm_get_raw - grab a raw runtime pm reference
* @rpm: the intel_runtime_pm structure
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 7428bd8fa67f..ed6c43b17f9a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -14,6 +14,7 @@
struct device;
struct drm_i915_private;
struct drm_printer;
+struct intel_display_rpm_interface;
/*
* This struct helps tracking the state needed for runtime PM, which puts the
@@ -226,4 +227,6 @@ static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
}
#endif
+extern const struct intel_display_rpm_interface i915_display_rpm_interface;
+
#endif /* __INTEL_RUNTIME_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c
index 285b96fadfd5..60a2af5307fc 100644
--- a/drivers/gpu/drm/i915/intel_step.c
+++ b/drivers/gpu/drm/i915/intel_step.c
@@ -3,6 +3,8 @@
* Copyright © 2020,2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "intel_step.h"
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index c8e29fd72290..4adeb271fcbf 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,19 +21,22 @@
* IN THE SOFTWARE.
*/
-#include <drm/drm_managed.h>
#include <linux/pm_runtime.h>
-#include "display/intel_display_core.h"
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
-#include "gt/intel_gt.h"
+#include "display/intel_display_core.h"
#include "gt/intel_engine_regs.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
#include "i915_vgpu.h"
+#include "i915_wait_util.h"
+#include "i915_mmio_range.h"
#include "intel_uncore_trace.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -998,7 +1001,7 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
* scanned for obvious mistakes or typos by the selftests.
*/
-static const struct i915_range gen8_shadowed_regs[] = {
+static const struct i915_mmio_range gen8_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0xA008, .end = 0xA00C },
{ .start = 0x12030, .end = 0x12030 },
@@ -1006,7 +1009,7 @@ static const struct i915_range gen8_shadowed_regs[] = {
{ .start = 0x22030, .end = 0x22030 },
};
-static const struct i915_range gen11_shadowed_regs[] = {
+static const struct i915_mmio_range gen11_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2550, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1033,7 +1036,7 @@ static const struct i915_range gen11_shadowed_regs[] = {
{ .start = 0x1D8510, .end = 0x1D8550 },
};
-static const struct i915_range gen12_shadowed_regs[] = {
+static const struct i915_mmio_range gen12_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1077,7 +1080,7 @@ static const struct i915_range gen12_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range dg2_shadowed_regs[] = {
+static const struct i915_mmio_range dg2_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1116,7 +1119,7 @@ static const struct i915_range dg2_shadowed_regs[] = {
{ .start = 0x1F8510, .end = 0x1F8550 },
};
-static const struct i915_range mtl_shadowed_regs[] = {
+static const struct i915_mmio_range mtl_shadowed_regs[] = {
{ .start = 0x2030, .end = 0x2030 },
{ .start = 0x2510, .end = 0x2550 },
{ .start = 0xA008, .end = 0xA00C },
@@ -1134,7 +1137,7 @@ static const struct i915_range mtl_shadowed_regs[] = {
{ .start = 0x22510, .end = 0x22550 },
};
-static const struct i915_range xelpmp_shadowed_regs[] = {
+static const struct i915_mmio_range xelpmp_shadowed_regs[] = {
{ .start = 0x1C0030, .end = 0x1C0030 },
{ .start = 0x1C0510, .end = 0x1C0550 },
{ .start = 0x1C8030, .end = 0x1C8030 },
@@ -1155,7 +1158,7 @@ static const struct i915_range xelpmp_shadowed_regs[] = {
{ .start = 0x38CFD4, .end = 0x38CFDC },
};
-static int mmio_range_cmp(u32 key, const struct i915_range *range)
+static int mmio_range_cmp(u32 key, const struct i915_mmio_range *range)
{
if (key < range->start)
return -1;
@@ -2502,6 +2505,7 @@ static int sanity_check_mmio_access(struct intel_uncore *uncore)
int intel_uncore_init_mmio(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
+ struct intel_display *display = i915->display;
int ret;
ret = sanity_check_mmio_access(uncore);
@@ -2536,7 +2540,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
- if (HAS_FPGA_DBG_UNCLAIMED(i915))
+ if (HAS_FPGA_DBG_UNCLAIMED(display))
uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 6048b99b96cb..fafc2ca9a237 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -123,12 +123,6 @@ struct intel_forcewake_range {
enum forcewake_domains domains;
};
-/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
-struct i915_range {
- u32 start;
- u32 end;
-};
-
struct intel_uncore {
void __iomem *regs;
@@ -162,7 +156,7 @@ struct intel_uncore {
* Shadowed registers are special cases where we can safely write
* to the register *without* grabbing forcewake.
*/
- const struct i915_range *shadowed_reg_table;
+ const struct i915_mmio_range *shadowed_reg_table;
unsigned int shadowed_reg_table_entries;
struct notifier_block pmic_bus_access_nb;
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 7fa194de5d35..b1883dccc22a 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -6,6 +6,8 @@
#include <linux/wait_bit.h>
+#include <drm/drm_print.h>
+
#include "intel_runtime_pm.h"
#include "intel_wakeref.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index f8da693ad3ce..d4b0c76f335b 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -2,15 +2,17 @@
/*
* Copyright(c) 2020 Intel Corporation.
*/
+
#include <linux/workqueue.h>
-#include "gem/i915_gem_context.h"
+#include <drm/drm_print.h>
+#include "gem/i915_gem_context.h"
#include "gt/intel_context.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
-
+#include "i915_wait_util.h"
#include "intel_pxp.h"
#include "intel_pxp_gsccs.h"
#include "intel_pxp_irq.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index e07c5b380789..545f79eb0cc5 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -69,17 +69,17 @@ DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set
void intel_pxp_debugfs_register(struct intel_pxp *pxp)
{
- struct drm_minor *minor;
+ struct dentry *debugfs_root;
struct dentry *pxproot;
if (!intel_pxp_is_supported(pxp))
return;
- minor = pxp->ctrl_gt->i915->drm.primary;
- if (!minor->debugfs_root)
+ debugfs_root = pxp->ctrl_gt->i915->drm.debugfs_root;
+ if (!debugfs_root)
return;
- pxproot = debugfs_create_dir("pxp", minor->debugfs_root);
+ pxproot = debugfs_create_dir("pxp", debugfs_root);
if (IS_ERR(pxproot))
return;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
index 75df959b0aa0..2763773e627d 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_gsccs.c
@@ -3,6 +3,8 @@
* Copyright(c) 2023 Intel Corporation.
*/
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_internal.h"
#include "gt/intel_context.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
index 0e609547bef8..9fc575a3d0d5 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
@@ -3,6 +3,8 @@
* Copyright(c) 2021-2022, Intel Corporation. All rights reserved.
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "gem/i915_gem_region.h"
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 091c86e03d1a..1e63261b620f 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -3,6 +3,8 @@
* Copyright(c) 2020, Intel Corporation. All rights reserved.
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "intel_pxp.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 0d89d70b9c36..36c3a5460221 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -7,6 +7,8 @@
#include <linux/kref.h>
#include <linux/string_helpers.h>
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 7ab4c4e60264..0a86e4857539 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -1118,6 +1118,10 @@ static int misaligned_case(struct i915_address_space *vm, struct intel_memory_re
goto err_put;
}
+ /* make sure page_sizes_gtt has been populated before use */
+ if (i915_is_ggtt(vm) && intel_vm_no_concurrent_access_wa(vm->i915))
+ i915_vma_wait_for_bind(vma);
+
expected_vma_size = round_up(size, 1 << (ffs(vma->resource->page_sizes_gtt) - 1));
expected_node_size = expected_vma_size;
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 2fb7a9e7efec..1260601bda1f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -22,14 +22,15 @@
*
*/
-#include <linux/prime_numbers.h>
#include <linux/pm_qos.h>
+#include <linux/prime_numbers.h>
#include <linux/sort.h>
+#include <drm/drm_print.h>
+
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_pm.h"
#include "gem/selftests/mock_context.h"
-
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
@@ -40,11 +41,11 @@
#include "i915_random.h"
#include "i915_selftest.h"
+#include "i915_wait_util.h"
#include "igt_flush_test.h"
#include "igt_live_test.h"
#include "igt_spinner.h"
#include "lib_sw_fence.h"
-
#include "mock_drm.h"
#include "mock_gem_device.h"
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 889281819c5b..8460f0a70d04 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -30,8 +30,9 @@
#include "i915_driver.h"
#include "i915_drv.h"
+#include "i915_jiffies.h"
#include "i915_selftest.h"
-
+#include "i915_wait_util.h"
#include "igt_flush_test.h"
struct i915_selftest i915_selftest __read_mostly = {
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 8c3e1f20e5a1..820364171ebe 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -3,12 +3,13 @@
*
* Copyright © 2018 Intel Corporation
*/
-#include "gt/intel_gpu_commands.h"
-#include "gt/intel_gt.h"
#include "gem/i915_gem_internal.h"
#include "gem/selftests/igt_gem_utils.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "i915_wait_util.h"
#include "igt_spinner.h"
int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 41eaa9b7f67d..507bf42a1aaf 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -64,7 +64,7 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
static int intel_shadow_table_check(void)
{
struct {
- const struct i915_range *regs;
+ const struct i915_mmio_range *regs;
unsigned int size;
} range_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
@@ -74,7 +74,7 @@ static int intel_shadow_table_check(void)
{ mtl_shadowed_regs, ARRAY_SIZE(mtl_shadowed_regs) },
{ xelpmp_shadowed_regs, ARRAY_SIZE(xelpmp_shadowed_regs) },
};
- const struct i915_range *range;
+ const struct i915_mmio_range *range;
unsigned int i, j;
s32 prev;
@@ -277,13 +277,15 @@ static int live_forcewake_domains(void *arg)
#define FW_RANGE 0x40000
struct intel_gt *gt = arg;
struct intel_uncore *uncore = gt->uncore;
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_display *display = i915->display;
unsigned long *valid;
u32 offset;
int err;
- if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
- !IS_VALLEYVIEW(gt->i915) &&
- !IS_CHERRYVIEW(gt->i915))
+ if (!HAS_FPGA_DBG_UNCLAIMED(display) &&
+ !IS_VALLEYVIEW(i915) &&
+ !IS_CHERRYVIEW(i915))
return 0;
/*
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index fb8751bd5df0..b59626c4994c 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -33,6 +33,7 @@
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
+#include "i915_driver.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
@@ -183,7 +184,8 @@ struct drm_i915_private *mock_gem_device(void)
/* Set up device info and initial runtime info. */
intel_device_info_driver_create(i915, pdev->device, &mock_info);
- display = intel_display_device_probe(pdev);
+ /* FIXME: Can we run selftests using a mock device without display? */
+ display = intel_display_device_probe(pdev, i915_driver_parent_interface());
if (IS_ERR(display))
goto err_device;
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index deb159548a09..3e588762709a 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
@@ -6,11 +6,13 @@
#include <linux/string_helpers.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "../display/intel_display_core.h" /* FIXME */
#include "i915_drv.h"
#include "i915_reg.h"
+#include "i915_utils.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
@@ -30,10 +32,11 @@ struct dram_channel_info {
#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-static const char *intel_dram_type_str(enum intel_dram_type type)
+const char *intel_dram_type_str(enum intel_dram_type type)
{
static const char * const str[] = {
DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR2),
DRAM_TYPE_STR(DDR3),
DRAM_TYPE_STR(DDR4),
DRAM_TYPE_STR(LPDDR3),
@@ -54,9 +57,10 @@ static const char *intel_dram_type_str(enum intel_dram_type type)
#undef DRAM_TYPE_STR
-static bool pnv_is_ddr3(struct drm_i915_private *i915)
+static enum intel_dram_type pnv_dram_type(struct drm_i915_private *i915)
{
- return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3;
+ return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3 ?
+ INTEL_DRAM_DDR3 : INTEL_DRAM_DDR2;
}
static unsigned int pnv_mem_freq(struct drm_i915_private *dev_priv)
@@ -135,25 +139,21 @@ static unsigned int vlv_mem_freq(struct drm_i915_private *i915)
return 0;
}
-static void detect_mem_freq(struct drm_i915_private *i915)
+unsigned int intel_mem_freq(struct drm_i915_private *i915)
{
if (IS_PINEVIEW(i915))
- i915->mem_freq = pnv_mem_freq(i915);
+ return pnv_mem_freq(i915);
else if (GRAPHICS_VER(i915) == 5)
- i915->mem_freq = ilk_mem_freq(i915);
+ return ilk_mem_freq(i915);
else if (IS_CHERRYVIEW(i915))
- i915->mem_freq = chv_mem_freq(i915);
+ return chv_mem_freq(i915);
else if (IS_VALLEYVIEW(i915))
- i915->mem_freq = vlv_mem_freq(i915);
-
- if (IS_PINEVIEW(i915))
- i915->is_ddr3 = pnv_is_ddr3(i915);
-
- if (i915->mem_freq)
- drm_dbg(&i915->drm, "DDR speed: %d kHz\n", i915->mem_freq);
+ return vlv_mem_freq(i915);
+ else
+ return 0;
}
-unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
+static unsigned int i9xx_fsb_freq(struct drm_i915_private *i915)
{
u32 fsb;
@@ -235,15 +235,30 @@ static unsigned int ilk_fsb_freq(struct drm_i915_private *dev_priv)
}
}
-static void detect_fsb_freq(struct drm_i915_private *i915)
+unsigned int intel_fsb_freq(struct drm_i915_private *i915)
{
if (GRAPHICS_VER(i915) == 5)
- i915->fsb_freq = ilk_fsb_freq(i915);
+ return ilk_fsb_freq(i915);
else if (GRAPHICS_VER(i915) == 3 || GRAPHICS_VER(i915) == 4)
- i915->fsb_freq = i9xx_fsb_freq(i915);
+ return i9xx_fsb_freq(i915);
+ else
+ return 0;
+}
- if (i915->fsb_freq)
- drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", i915->fsb_freq);
+static int i915_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
+{
+ dram_info->fsb_freq = intel_fsb_freq(i915);
+ if (dram_info->fsb_freq)
+ drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", dram_info->fsb_freq);
+
+ dram_info->mem_freq = intel_mem_freq(i915);
+ if (dram_info->mem_freq)
+ drm_dbg(&i915->drm, "DDR speed: %d kHz\n", dram_info->mem_freq);
+
+ if (IS_PINEVIEW(i915))
+ dram_info->type = pnv_dram_type(i915);
+
+ return 0;
}
static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
@@ -321,7 +336,7 @@ static bool
skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
{
/* Convert total Gb to Gb per DRAM device */
- return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+ return dimm->size / (intel_dimm_num_devices(dimm) ?: 1) >= 16;
}
static void
@@ -340,7 +355,7 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915,
}
drm_dbg_kms(&i915->drm,
- "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ "CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb+ DIMMs: %s\n",
channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
str_yes_no(skl_is_16gb_dimm(dimm)));
}
@@ -370,7 +385,7 @@ skl_dram_get_channel_info(struct drm_i915_private *i915,
ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
skl_is_16gb_dimm(&ch->dimm_s);
- drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb+ DIMMs: %s\n",
channel, ch->ranks, str_yes_no(ch->is_16gb_dimm));
return 0;
@@ -392,6 +407,9 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
u32 val;
int ret;
+ /* Assume 16Gb+ DIMMs are present until proven otherwise */
+ dram_info->has_16gb_dimms = true;
+
val = intel_uncore_read(&i915->uncore,
SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
@@ -414,13 +432,16 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram
return -EINVAL;
}
- dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+ dram_info->has_16gb_dimms = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
str_yes_no(dram_info->symmetric_memory));
+ drm_dbg_kms(&i915->drm, "16Gb+ DIMMs: %s\n",
+ str_yes_no(dram_info->has_16gb_dimms));
+
return 0;
}
@@ -649,8 +670,9 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv,
static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- int ret = skl_get_dram_info(i915, dram_info);
+ int ret;
+ ret = skl_dram_get_channels_info(i915, dram_info);
if (ret)
return ret;
@@ -659,13 +681,12 @@ static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *
static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
- dram_info->wm_lv_0_adjust_needed = false;
-
return icl_pcode_read_mem_global_info(i915, dram_info);
}
static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info)
{
+ struct intel_display *display = i915->display;
u32 val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_GLOBAL);
switch (REG_FIELD_GET(MTL_DDR_TYPE_MASK, val)) {
@@ -704,18 +725,19 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info
dram_info->num_qgv_points = REG_FIELD_GET(MTL_N_OF_ENABLED_QGV_POINTS_MASK, val);
/* PSF GV points not supported in D14+ */
+ if (DISPLAY_VER(display) >= 35)
+ dram_info->ecc_impacting_de_bw = REG_FIELD_GET(XE3P_ECC_IMPACTING_DE, val);
+
return 0;
}
int intel_dram_detect(struct drm_i915_private *i915)
{
+ struct intel_display *display = i915->display;
struct dram_info *dram_info;
int ret;
- detect_fsb_freq(i915);
- detect_mem_freq(i915);
-
- if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
+ if (IS_DG2(i915) || !intel_display_device_present(display))
return 0;
dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL);
@@ -724,13 +746,7 @@ int intel_dram_detect(struct drm_i915_private *i915)
i915->dram_info = dram_info;
- /*
- * Assume level 0 watermark latency adjustment is needed until proven
- * otherwise, this w/a is not needed by bxt/glk.
- */
- dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915);
-
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
ret = xelpdp_get_dram_info(i915, dram_info);
else if (GRAPHICS_VER(i915) >= 12)
ret = gen12_get_dram_info(i915, dram_info);
@@ -738,23 +754,23 @@ int intel_dram_detect(struct drm_i915_private *i915)
ret = gen11_get_dram_info(i915, dram_info);
else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915))
ret = bxt_get_dram_info(i915, dram_info);
- else
+ else if (GRAPHICS_VER(i915) >= 9)
ret = skl_get_dram_info(i915, dram_info);
+ else
+ ret = i915_get_dram_info(i915, dram_info);
drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
intel_dram_type_str(dram_info->type));
+ drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "Num QGV points %u\n", dram_info->num_qgv_points);
+ drm_dbg_kms(&i915->drm, "Num PSF GV points %u\n", dram_info->num_psf_gv_points);
+
/* TODO: Do we want to abort probe on dram detection failures? */
if (ret)
return 0;
- drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points);
-
- drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels);
-
- drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n",
- str_yes_no(dram_info->wm_lv_0_adjust_needed));
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
index 2a696e03aad4..8475ee379daa 100644
--- a/drivers/gpu/drm/i915/soc/intel_dram.h
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
@@ -12,11 +12,9 @@ struct drm_i915_private;
struct drm_device;
struct dram_info {
- bool wm_lv_0_adjust_needed;
- u8 num_channels;
- bool symmetric_memory;
enum intel_dram_type {
INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR2,
INTEL_DRAM_DDR3,
INTEL_DRAM_DDR4,
INTEL_DRAM_LPDDR3,
@@ -27,13 +25,21 @@ struct dram_info {
INTEL_DRAM_GDDR_ECC,
__INTEL_DRAM_TYPE_MAX,
} type;
+ unsigned int fsb_freq;
+ unsigned int mem_freq;
+ u8 num_channels;
u8 num_qgv_points;
u8 num_psf_gv_points;
+ bool ecc_impacting_de_bw; /* Only valid from Xe3p_LPD onward. */
+ bool symmetric_memory;
+ bool has_16gb_dimms;
};
void intel_dram_edram_detect(struct drm_i915_private *i915);
int intel_dram_detect(struct drm_i915_private *i915);
-unsigned int i9xx_fsb_freq(struct drm_i915_private *i915);
+unsigned int intel_fsb_freq(struct drm_i915_private *i915);
+unsigned int intel_mem_freq(struct drm_i915_private *i915);
const struct dram_info *intel_dram_info(struct drm_device *drm);
+const char *intel_dram_type_str(enum intel_dram_type type);
#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index 5346b8dda79a..271da30c8290 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -8,6 +8,7 @@
#include <linux/vgaarb.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/intel/i915_drm.h>
#include "../display/intel_display_core.h" /* FIXME */
@@ -148,7 +149,8 @@ void intel_gmch_bar_teardown(struct drm_i915_private *i915)
int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
{
- unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ struct intel_display *display = i915->display;
+ unsigned int reg = DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
diff --git a/drivers/gpu/drm/i915/soc/intel_rom.c b/drivers/gpu/drm/i915/soc/intel_rom.c
index 243d98cab8c3..2f17dc856e7f 100644
--- a/drivers/gpu/drm/i915/soc/intel_rom.c
+++ b/drivers/gpu/drm/i915/soc/intel_rom.c
@@ -39,8 +39,9 @@ static u16 spi_read16(struct intel_rom *rom, loff_t offset)
return spi_read32(rom, offset) & 0xffff;
}
-struct intel_rom *intel_rom_spi(struct drm_i915_private *i915)
+struct intel_rom *intel_rom_spi(struct drm_device *drm)
{
+ struct drm_i915_private *i915 = to_i915(drm);
struct intel_rom *rom;
u32 static_region;
@@ -85,7 +86,7 @@ static void pci_free(struct intel_rom *rom)
pci_unmap_rom(rom->pdev, rom->oprom);
}
-struct intel_rom *intel_rom_pci(struct drm_i915_private *i915)
+struct intel_rom *intel_rom_pci(struct drm_device *drm)
{
struct intel_rom *rom;
@@ -93,7 +94,7 @@ struct intel_rom *intel_rom_pci(struct drm_i915_private *i915)
if (!rom)
return NULL;
- rom->pdev = to_pci_dev(i915->drm.dev);
+ rom->pdev = to_pci_dev(drm->dev);
rom->oprom = pci_map_rom(rom->pdev, &rom->size);
if (!rom->oprom) {
diff --git a/drivers/gpu/drm/i915/soc/intel_rom.h b/drivers/gpu/drm/i915/soc/intel_rom.h
index fb2979c8ef7f..4e59a375787e 100644
--- a/drivers/gpu/drm/i915/soc/intel_rom.h
+++ b/drivers/gpu/drm/i915/soc/intel_rom.h
@@ -8,11 +8,11 @@
#include <linux/types.h>
-struct drm_i915_private;
+struct drm_device;
struct intel_rom;
-struct intel_rom *intel_rom_spi(struct drm_i915_private *i915);
-struct intel_rom *intel_rom_pci(struct drm_i915_private *i915);
+struct intel_rom *intel_rom_spi(struct drm_device *drm);
+struct intel_rom *intel_rom_pci(struct drm_device *drm);
u32 intel_rom_read32(struct intel_rom *rom, loff_t offset);
u16 intel_rom_read16(struct intel_rom *rom, loff_t offset);
diff --git a/drivers/gpu/drm/i915/vlv_iosf_sb.c b/drivers/gpu/drm/i915/vlv_iosf_sb.c
index f4b386933141..38a75651b0dc 100644
--- a/drivers/gpu/drm/i915/vlv_iosf_sb.c
+++ b/drivers/gpu/drm/i915/vlv_iosf_sb.c
@@ -3,6 +3,8 @@
* Copyright © 2013-2021 Intel Corporation
*/
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index fc9f311ea1db..221e4c0b2c58 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -8,16 +8,17 @@
#include <drm/drm_print.h>
+#include "gt/intel_gt_regs.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
#include "i915_utils.h"
+#include "i915_wait_util.h"
#include "intel_clock_gating.h"
#include "intel_uncore_trace.h"
#include "vlv_suspend.h"
-#include "gt/intel_gt_regs.h"
-
struct vlv_s0ix_state {
/* GAM */
u32 wr_watermark;
diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig
index 3bfa2ac212dc..0482bfcefdde 100644
--- a/drivers/gpu/drm/imagination/Kconfig
+++ b/drivers/gpu/drm/imagination/Kconfig
@@ -3,9 +3,11 @@
config DRM_POWERVR
tristate "Imagination Technologies PowerVR (Series 6 and later) & IMG Graphics"
- depends on ARM64
+ depends on (ARM64 || RISCV && 64BIT)
depends on DRM
+ depends on MMU
depends on PM
+ depends on POWER_SEQUENCING || !POWER_SEQUENCING
select DRM_EXEC
select DRM_GEM_SHMEM_HELPER
select DRM_SCHED
diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c
index 2bbdc05a3b97..9294b4ba1de7 100644
--- a/drivers/gpu/drm/imagination/pvr_ccb.c
+++ b/drivers/gpu/drm/imagination/pvr_ccb.c
@@ -10,6 +10,7 @@
#include "pvr_power.h"
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c
index 8b9ba4983c4c..78d6b8a0a450 100644
--- a/drivers/gpu/drm/imagination/pvr_device.c
+++ b/drivers/gpu/drm/imagination/pvr_device.c
@@ -23,6 +23,7 @@
#include <linux/firmware.h>
#include <linux/gfp.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
@@ -47,7 +48,7 @@
*
* Return:
* * 0 on success, or
- * * Any error returned by devm_platform_ioremap_resource().
+ * * Any error returned by devm_platform_get_and_ioremap_resource().
*/
static int
pvr_device_reg_init(struct pvr_device *pvr_dev)
@@ -121,21 +122,6 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev)
return 0;
}
-static int pvr_device_reset_init(struct pvr_device *pvr_dev)
-{
- struct drm_device *drm_dev = from_pvr_device(pvr_dev);
- struct reset_control *reset;
-
- reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
- if (IS_ERR(reset))
- return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
- "failed to get gpu reset line\n");
-
- pvr_dev->reset = reset;
-
- return 0;
-}
-
/**
* pvr_device_process_active_queues() - Process all queue related events.
* @pvr_dev: PowerVR device to check
@@ -618,6 +604,9 @@ pvr_device_init(struct pvr_device *pvr_dev)
struct device *dev = drm_dev->dev;
int err;
+ /* Get the platform-specific data based on the compatible string. */
+ pvr_dev->device_data = of_device_get_match_data(dev);
+
/*
* Setup device parameters. We do this first in case other steps
* depend on them.
@@ -631,8 +620,7 @@ pvr_device_init(struct pvr_device *pvr_dev)
if (err)
return err;
- /* Get the reset line for the GPU */
- err = pvr_device_reset_init(pvr_dev);
+ err = pvr_dev->device_data->pwr_ops->init(pvr_dev);
if (err)
return err;
diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h
index 7cb01c38d2a9..ec53ff275541 100644
--- a/drivers/gpu/drm/imagination/pvr_device.h
+++ b/drivers/gpu/drm/imagination/pvr_device.h
@@ -37,6 +37,9 @@ struct clk;
/* Forward declaration from <linux/firmware.h>. */
struct firmware;
+/* Forward declaration from <linux/pwrseq/consumer.h> */
+struct pwrseq_desc;
+
/**
* struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device
* @b: Branch ID.
@@ -58,6 +61,14 @@ struct pvr_fw_version {
};
/**
+ * struct pvr_device_data - Platform specific data associated with a compatible string.
+ * @pwr_ops: Pointer to a structure with platform-specific power functions.
+ */
+struct pvr_device_data {
+ const struct pvr_power_sequence_ops *pwr_ops;
+};
+
+/**
* struct pvr_device - powervr-specific wrapper for &struct drm_device
*/
struct pvr_device {
@@ -98,6 +109,9 @@ struct pvr_device {
/** @fw_version: Firmware version detected at runtime. */
struct pvr_fw_version fw_version;
+ /** @device_data: Pointer to platform-specific data. */
+ const struct pvr_device_data *device_data;
+
/** @regs_resource: Resource representing device control registers. */
struct resource *regs_resource;
@@ -132,6 +146,14 @@ struct pvr_device {
*/
struct clk *mem_clk;
+ /**
+ * @power: Optional power domain devices.
+ *
+ * On platforms with more than one power domain for the GPU, they are
+ * stored here in @domain_devs, along with links between them in
+ * @domain_links. The size of @domain_devs is given by @domain_count,
+ * while the size of @domain_links is (2 * @domain_count) - 1.
+ */
struct pvr_device_power {
struct device **domain_devs;
struct device_link **domain_links;
@@ -148,6 +170,9 @@ struct pvr_device {
*/
struct reset_control *reset;
+ /** @pwrseq: Pointer to a power sequencer, if one is used. */
+ struct pwrseq_desc *pwrseq;
+
/** @irq: IRQ number. */
int irq;
diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c
index b058ec183bb3..916b40ced7eb 100644
--- a/drivers/gpu/drm/imagination/pvr_drv.c
+++ b/drivers/gpu/drm/imagination/pvr_drv.c
@@ -1480,15 +1480,33 @@ static void pvr_remove(struct platform_device *plat_dev)
pvr_power_domains_fini(pvr_dev);
}
+static const struct pvr_device_data pvr_device_data_manual = {
+ .pwr_ops = &pvr_power_sequence_ops_manual,
+};
+
+static const struct pvr_device_data pvr_device_data_pwrseq = {
+ .pwr_ops = &pvr_power_sequence_ops_pwrseq,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "img,img-rogue", .data = NULL },
+ {
+ .compatible = "thead,th1520-gpu",
+ .data = &pvr_device_data_pwrseq,
+ },
+ {
+ .compatible = "img,img-rogue",
+ .data = &pvr_device_data_manual,
+ },
/*
* This legacy compatible string was introduced early on before the more generic
* "img,img-rogue" was added. Keep it around here for compatibility, but never use
* "img,img-axe" in new devicetrees.
*/
- { .compatible = "img,img-axe", .data = NULL },
+ {
+ .compatible = "img,img-axe",
+ .data = &pvr_device_data_manual,
+ },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1513,4 +1531,5 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC);
MODULE_LICENSE("Dual MIT/GPL");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw");
+MODULE_FIRMWARE("powervr/rogue_36.52.104.182_v1.fw");
MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw");
diff --git a/drivers/gpu/drm/imagination/pvr_fw.c b/drivers/gpu/drm/imagination/pvr_fw.c
index b2f8cba77346..779a58fe6ee8 100644
--- a/drivers/gpu/drm/imagination/pvr_fw.c
+++ b/drivers/gpu/drm/imagination/pvr_fw.c
@@ -17,6 +17,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
#include <linux/clk.h>
#include <linux/firmware.h>
#include <linux/math.h>
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index 60db3668ad3c..9ff03bc60a08 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -16,6 +16,8 @@
#include <linux/ktime.h>
#include <linux/types.h>
+#include <drm/drm_print.h>
+
#define ROGUE_FW_HEAP_META_SHIFT 25 /* 32 MB */
#define POLL_TIMEOUT_USEC 1000000
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index a1098b521485..8a56952f6730 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -9,6 +9,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <linux/build_bug.h>
#include <linux/dcache.h>
diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c
index 187a07e0bd9a..b9f801c63260 100644
--- a/drivers/gpu/drm/imagination/pvr_power.c
+++ b/drivers/gpu/drm/imagination/pvr_power.c
@@ -10,6 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
@@ -18,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <linux/pwrseq/consumer.h>
#include <linux/reset.h>
#include <linux/timer.h>
#include <linux/types.h>
@@ -234,6 +236,118 @@ pvr_watchdog_init(struct pvr_device *pvr_dev)
return 0;
}
+static int pvr_power_init_manual(struct pvr_device *pvr_dev)
+{
+ struct drm_device *drm_dev = from_pvr_device(pvr_dev);
+ struct reset_control *reset;
+
+ reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL);
+ if (IS_ERR(reset))
+ return dev_err_probe(drm_dev->dev, PTR_ERR(reset),
+ "failed to get gpu reset line\n");
+
+ pvr_dev->reset = reset;
+
+ return 0;
+}
+
+static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = clk_prepare_enable(pvr_dev->core_clk);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(pvr_dev->sys_clk);
+ if (err)
+ goto err_core_clk_disable;
+
+ err = clk_prepare_enable(pvr_dev->mem_clk);
+ if (err)
+ goto err_sys_clk_disable;
+
+ /*
+ * According to the hardware manual, a delay of at least 32 clock
+ * cycles is required between de-asserting the clkgen reset and
+ * de-asserting the GPU reset. Assuming a worst-case scenario with
+ * a very high GPU clock frequency, a delay of 1 microsecond is
+ * sufficient to ensure this requirement is met across all
+ * feasible GPU clock speeds.
+ */
+ udelay(1);
+
+ err = reset_control_deassert(pvr_dev->reset);
+ if (err)
+ goto err_mem_clk_disable;
+
+ return 0;
+
+err_mem_clk_disable:
+ clk_disable_unprepare(pvr_dev->mem_clk);
+
+err_sys_clk_disable:
+ clk_disable_unprepare(pvr_dev->sys_clk);
+
+err_core_clk_disable:
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+ return err;
+}
+
+static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev)
+{
+ int err;
+
+ err = reset_control_assert(pvr_dev->reset);
+
+ clk_disable_unprepare(pvr_dev->mem_clk);
+ clk_disable_unprepare(pvr_dev->sys_clk);
+ clk_disable_unprepare(pvr_dev->core_clk);
+
+ return err;
+}
+
+const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = {
+ .init = pvr_power_init_manual,
+ .power_on = pvr_power_on_sequence_manual,
+ .power_off = pvr_power_off_sequence_manual,
+};
+
+static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev)
+{
+ struct device *dev = from_pvr_device(pvr_dev)->dev;
+
+ pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power");
+ if (IS_ERR(pvr_dev->pwrseq)) {
+ /*
+ * This platform requires a sequencer. If we can't get it, we
+ * must return the error (including -EPROBE_DEFER to wait for
+ * the provider to appear)
+ */
+ return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq),
+ "Failed to get required power sequencer\n");
+ }
+
+ return 0;
+}
+
+static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev)
+{
+ return pwrseq_power_on(pvr_dev->pwrseq);
+}
+
+static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev)
+{
+ return pwrseq_power_off(pvr_dev->pwrseq);
+}
+
+const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = {
+ .init = pvr_power_init_pwrseq,
+ .power_on = pvr_power_on_sequence_pwrseq,
+ .power_off = pvr_power_off_sequence_pwrseq,
+};
+
int
pvr_power_device_suspend(struct device *dev)
{
@@ -252,11 +366,7 @@ pvr_power_device_suspend(struct device *dev)
goto err_drm_dev_exit;
}
- clk_disable_unprepare(pvr_dev->mem_clk);
- clk_disable_unprepare(pvr_dev->sys_clk);
- clk_disable_unprepare(pvr_dev->core_clk);
-
- err = reset_control_assert(pvr_dev->reset);
+ err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
err_drm_dev_exit:
drm_dev_exit(idx);
@@ -276,53 +386,22 @@ pvr_power_device_resume(struct device *dev)
if (!drm_dev_enter(drm_dev, &idx))
return -EIO;
- err = clk_prepare_enable(pvr_dev->core_clk);
+ err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev);
if (err)
goto err_drm_dev_exit;
- err = clk_prepare_enable(pvr_dev->sys_clk);
- if (err)
- goto err_core_clk_disable;
-
- err = clk_prepare_enable(pvr_dev->mem_clk);
- if (err)
- goto err_sys_clk_disable;
-
- /*
- * According to the hardware manual, a delay of at least 32 clock
- * cycles is required between de-asserting the clkgen reset and
- * de-asserting the GPU reset. Assuming a worst-case scenario with
- * a very high GPU clock frequency, a delay of 1 microsecond is
- * sufficient to ensure this requirement is met across all
- * feasible GPU clock speeds.
- */
- udelay(1);
-
- err = reset_control_deassert(pvr_dev->reset);
- if (err)
- goto err_mem_clk_disable;
-
if (pvr_dev->fw_dev.booted) {
err = pvr_power_fw_enable(pvr_dev);
if (err)
- goto err_reset_assert;
+ goto err_power_off;
}
drm_dev_exit(idx);
return 0;
-err_reset_assert:
- reset_control_assert(pvr_dev->reset);
-
-err_mem_clk_disable:
- clk_disable_unprepare(pvr_dev->mem_clk);
-
-err_sys_clk_disable:
- clk_disable_unprepare(pvr_dev->sys_clk);
-
-err_core_clk_disable:
- clk_disable_unprepare(pvr_dev->core_clk);
+err_power_off:
+ pvr_dev->device_data->pwr_ops->power_off(pvr_dev);
err_drm_dev_exit:
drm_dev_exit(idx);
diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h
index ada85674a7ca..b853d092242c 100644
--- a/drivers/gpu/drm/imagination/pvr_power.h
+++ b/drivers/gpu/drm/imagination/pvr_power.h
@@ -41,4 +41,19 @@ pvr_power_put(struct pvr_device *pvr_dev)
int pvr_power_domains_init(struct pvr_device *pvr_dev);
void pvr_power_domains_fini(struct pvr_device *pvr_dev);
+/**
+ * struct pvr_power_sequence_ops - Platform specific power sequence operations.
+ * @init: Pointer to the platform-specific initialization function.
+ * @power_on: Pointer to the platform-specific power on function.
+ * @power_off: Pointer to the platform-specific power off function.
+ */
+struct pvr_power_sequence_ops {
+ int (*init)(struct pvr_device *pvr_dev);
+ int (*power_on)(struct pvr_device *pvr_dev);
+ int (*power_off)(struct pvr_device *pvr_dev);
+};
+
+extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual;
+extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq;
+
#endif /* PVR_POWER_H */
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 2896fa7501b1..48e52c5561be 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -13,6 +13,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
#include <drm/drm_gpuvm.h>
+#include <drm/drm_print.h>
#include <linux/bug.h>
#include <linux/container_of.h>
@@ -185,12 +186,17 @@ struct pvr_vm_bind_op {
static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
{
switch (bind_op->type) {
- case PVR_VM_BIND_TYPE_MAP:
+ case PVR_VM_BIND_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = bind_op->device_addr,
+ .map.va.range = bind_op->size,
+ .map.gem.obj = gem_from_pvr_gem(bind_op->pvr_obj),
+ .map.gem.offset = bind_op->offset,
+ };
+
return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
- bind_op, bind_op->device_addr,
- bind_op->size,
- gem_from_pvr_gem(bind_op->pvr_obj),
- bind_op->offset);
+ bind_op, &map_req);
+ }
case PVR_VM_BIND_TYPE_UNMAP:
return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c
index 86ecc22d0a55..d42f33d6f3fc 100644
--- a/drivers/gpu/drm/imx/dc/dc-ed.c
+++ b/drivers/gpu/drm/imx/dc/dc-ed.c
@@ -15,12 +15,12 @@
#include "dc-pe.h"
#define PIXENGCFG_STATIC 0x8
-#define POWERDOWN BIT(4)
-#define SYNC_MODE BIT(8)
-#define SINGLE 0
#define DIV_MASK GENMASK(23, 16)
#define DIV(x) FIELD_PREP(DIV_MASK, (x))
#define DIV_RESET 0x80
+#define SYNC_MODE BIT(8)
+#define SINGLE 0
+#define POWERDOWN BIT(4)
#define PIXENGCFG_DYNAMIC 0xc
@@ -28,9 +28,9 @@
#define SYNC_TRIGGER BIT(0)
#define STATICCONTROL 0x8
+#define PERFCOUNTMODE BIT(12)
#define KICK_MODE BIT(8)
#define EXTERNAL BIT(8)
-#define PERFCOUNTMODE BIT(12)
#define CONTROL 0xc
#define GAMMAAPPLYENABLE BIT(0)
diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c
index 7f6c1852bf72..28f372be9247 100644
--- a/drivers/gpu/drm/imx/dc/dc-fg.c
+++ b/drivers/gpu/drm/imx/dc/dc-fg.c
@@ -56,9 +56,9 @@
#define FGINCTRL 0x5c
#define FGINCTRLPANIC 0x60
-#define FGDM_MASK GENMASK(2, 0)
-#define ENPRIMALPHA BIT(3)
#define ENSECALPHA BIT(4)
+#define ENPRIMALPHA BIT(3)
+#define FGDM_MASK GENMASK(2, 0)
#define FGCCR 0x64
#define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x))
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c
index f94c591c8158..1d8f74babef8 100644
--- a/drivers/gpu/drm/imx/dc/dc-fu.c
+++ b/drivers/gpu/drm/imx/dc/dc-fu.c
@@ -18,11 +18,11 @@
#define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x))
/* BURSTBUFFERMANAGEMENT */
+#define LINEMODE_MASK BIT(31)
#define SETBURSTLENGTH_MASK GENMASK(12, 8)
#define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x))
#define SETNUMBUFFERS_MASK GENMASK(7, 0)
#define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x))
-#define LINEMODE_MASK BIT(31)
/* SOURCEBUFFERATTRIBUTES */
#define BITSPERPIXEL_MASK GENMASK(21, 16)
@@ -31,20 +31,20 @@
#define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1)
/* SOURCEBUFFERDIMENSION */
-#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
#define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
/* LAYEROFFSET */
-#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
#define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
/* CLIPWINDOWOFFSET */
-#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
#define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
/* CLIPWINDOWDIMENSIONS */
-#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
#define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1)
+#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
enum dc_linemode {
/*
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h
index e016e1ea5b4e..f678de3ca8c0 100644
--- a/drivers/gpu/drm/imx/dc/dc-fu.h
+++ b/drivers/gpu/drm/imx/dc/dc-fu.h
@@ -33,13 +33,13 @@
#define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x))
/* LAYERPROPERTY */
+#define SOURCEBUFFERENABLE BIT(31)
#define YUVCONVERSIONMODE_MASK GENMASK(18, 17)
#define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x))
-#define SOURCEBUFFERENABLE BIT(31)
/* FRAMEDIMENSIONS */
-#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
#define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
/* CONTROL */
#define INPUTSELECT_MASK GENMASK(4, 3)
diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c
index 38f966625d38..ca1d714c8d6e 100644
--- a/drivers/gpu/drm/imx/dc/dc-lb.c
+++ b/drivers/gpu/drm/imx/dc/dc-lb.c
@@ -17,12 +17,12 @@
#include "dc-pe.h"
#define PIXENGCFG_DYNAMIC 0x8
-#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
-#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
- FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8)
#define PIXENGCFG_DYNAMIC_SEC_SEL(x) \
FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x))
+#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
+#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
#define STATICCONTROL 0x8
#define SHDTOKSEL_MASK GENMASK(4, 3)
@@ -37,24 +37,24 @@
#define BLENDCONTROL 0x10
#define ALPHA_MASK GENMASK(23, 16)
#define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x))
-#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
-#define PRIM_C_BLD_FUNC(x) \
- FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
-#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
-#define SEC_C_BLD_FUNC(x) \
- FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
-#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
-#define PRIM_A_BLD_FUNC(x) \
- FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
#define SEC_A_BLD_FUNC_MASK GENMASK(14, 12)
#define SEC_A_BLD_FUNC(x) \
FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x))
+#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
+#define PRIM_A_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
+#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
+#define SEC_C_BLD_FUNC(x) \
+ FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
+#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
+#define PRIM_C_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
#define POSITION 0x14
-#define XPOS_MASK GENMASK(15, 0)
-#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
#define YPOS_MASK GENMASK(31, 16)
#define YPOS(x) FIELD_PREP(YPOS_MASK, (x))
+#define XPOS_MASK GENMASK(15, 0)
+#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
enum dc_lb_blend_func {
DC_LAYERBLEND_BLEND_ZERO,
diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c
index d8b946fb90de..e40d5d66c5c1 100644
--- a/drivers/gpu/drm/imx/dc/dc-plane.c
+++ b/drivers/gpu/drm/imx/dc/dc-plane.c
@@ -106,7 +106,7 @@ dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
}
crtc_state =
- drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ drm_atomic_get_new_crtc_state(state, plane_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index ab6d32bad756..0b99b407ac0a 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -10,6 +10,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
@@ -159,8 +160,8 @@ static int dcss_plane_atomic_check(struct drm_plane *plane,
dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
WARN_ON(!dma_obj);
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
hdisplay = crtc_state->adjusted_mode.hdisplay;
vdisplay = crtc_state->adjusted_mode.vdisplay;
diff --git a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
index 8333c4bf7369..07e5f96202d4 100644
--- a/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
@@ -278,4 +278,3 @@ MODULE_AUTHOR("Andy Yan <andy.yan@rock-chips.com>");
MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>");
MODULE_DESCRIPTION("IMX6 Specific DW-HDMI Driver Extension");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:dwhdmi-imx");
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index ec5fd9a01f1e..eddb471119c6 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
@@ -17,7 +17,9 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
@@ -141,17 +143,34 @@ static int imx_drm_dumb_create(struct drm_file *file_priv,
struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- u32 width = args->width;
+ u32 fourcc;
+ u64 pitch_align;
int ret;
- args->width = ALIGN(width, 8);
-
- ret = drm_gem_dma_dumb_create(file_priv, drm, args);
+ /*
+ * Hardware requires the framebuffer width to be aligned to
+ * multiples of 8. The mode-setting code handles this, but
+ * the buffer pitch has to be aligned as well. Set the pitch
+ * alignment accordingly, so that the each scanline fits into
+ * the allocated buffer.
+ */
+ fourcc = drm_driver_color_mode_format(drm, args->bpp);
+ if (fourcc != DRM_FORMAT_INVALID) {
+ const struct drm_format_info *info = drm_format_info(fourcc);
+
+ if (!info)
+ return -EINVAL;
+ pitch_align = drm_format_info_min_pitch(info, 0, 8);
+ } else {
+ pitch_align = DIV_ROUND_UP(args->bpp, SZ_8) * 8;
+ }
+ if (!pitch_align || pitch_align > U32_MAX)
+ return -EINVAL;
+ ret = drm_mode_size_dumb(drm, args, pitch_align, 0);
if (ret)
return ret;
- args->width = width;
- return ret;
+ return drm_gem_dma_dumb_create(file_priv, drm, args);
}
static const struct drm_driver imx_drm_driver = {
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index 6be7a57ad03d..626d410d9150 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
@@ -644,4 +644,3 @@ module_platform_driver(imx_ldb_driver);
MODULE_DESCRIPTION("i.MX LVDS driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index c5629e155d25..c5c6e070cc06 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
@@ -368,17 +368,20 @@ static unsigned long clk_tve_di_recalc_rate(struct clk_hw *hw,
return 0;
}
-static long clk_tve_di_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_tve_di_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long div;
- div = *prate / rate;
+ div = req->best_parent_rate / req->rate;
if (div >= 4)
- return *prate / 4;
+ req->rate = req->best_parent_rate / 4;
else if (div >= 2)
- return *prate / 2;
- return *prate;
+ req->rate = req->best_parent_rate / 2;
+ else
+ req->rate = req->best_parent_rate;
+
+ return 0;
}
static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -409,7 +412,7 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
}
static const struct clk_ops clk_tve_di_ops = {
- .round_rate = clk_tve_di_round_rate,
+ .determine_rate = clk_tve_di_determine_rate,
.set_rate = clk_tve_di_set_rate,
.recalc_rate = clk_tve_di_recalc_rate,
};
@@ -674,4 +677,3 @@ module_platform_driver(imx_tve_driver);
MODULE_DESCRIPTION("i.MX Television Encoder driver");
MODULE_AUTHOR("Philipp Zabel, Pengutronix");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-tve");
diff --git a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
index 704c549750f9..db50eccea0ca 100644
--- a/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <video/imx-ipu-v3.h>
@@ -386,8 +387,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
crtc_state =
- drm_atomic_get_existing_crtc_state(state,
- new_state->crtc);
+ drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 6d8325c76697..6fbf505d2801 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -25,19 +25,18 @@
struct imx_parallel_display_encoder {
struct drm_encoder encoder;
- struct drm_bridge bridge;
- struct imx_parallel_display *pd;
};
struct imx_parallel_display {
struct device *dev;
u32 bus_format;
struct drm_bridge *next_bridge;
+ struct drm_bridge bridge;
};
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
- return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
+ return container_of(b, struct imx_parallel_display, bridge);
}
static const u32 imx_pd_bus_fmts[] = {
@@ -134,10 +133,10 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
struct drm_bridge_state *next_bridge_state = NULL;
- struct drm_bridge *next_bridge;
u32 bus_flags, bus_fmt;
- next_bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *next_bridge __free(drm_bridge_put) = drm_bridge_get_next_bridge(bridge);
+
if (next_bridge)
next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
next_bridge);
@@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(imxpd_encoder))
return PTR_ERR(imxpd_encoder);
- imxpd_encoder->pd = imxpd;
encoder = &imxpd_encoder->encoder;
- bridge = &imxpd_encoder->bridge;
+ bridge = &imxpd->bridge;
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
- bridge->funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
connector = drm_bridge_connector_init(drm, encoder);
@@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)
u32 bus_format = 0;
const char *fmt;
- imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
- if (!imxpd)
- return -ENOMEM;
+ imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge,
+ &imx_pd_bridge_funcs);
+ if (IS_ERR(imxpd))
+ return PTR_ERR(imxpd);
/* port@1 is the output port */
imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
@@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxpd);
+ devm_drm_bridge_add(dev, &imxpd->bridge);
+
return component_add(dev, &imx_pd_ops);
}
@@ -286,4 +286,3 @@ module_platform_driver(imx_pd_driver);
MODULE_DESCRIPTION("i.MX parallel display driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-parallel-display");
diff --git a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
index 8d6a0bb31c48..e200b40f30fe 100644
--- a/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
+++ b/drivers/gpu/drm/imx/lcdc/imx-lcdc.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 9db1ceaed518..d3213fbf22be 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -247,8 +247,8 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct ingenic_drm_private_state *priv_state;
unsigned int next_id;
- priv_state = ingenic_drm_get_priv_state(priv, state);
- if (WARN_ON(IS_ERR(priv_state)))
+ priv_state = ingenic_drm_get_new_priv_state(priv, state);
+ if (WARN_ON(!priv_state))
return;
/* Set addresses of our DMA descriptor chains */
@@ -340,6 +340,7 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
crtc);
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
+ struct ingenic_drm_private_state *priv_state;
if (crtc_state->gamma_lut &&
drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
@@ -347,6 +348,11 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ /* We will need the state in atomic_enable, so let's make sure it's part of the state */
+ priv_state = ingenic_drm_get_priv_state(priv, state);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) {
f1_state = drm_atomic_get_plane_state(crtc_state->state,
&priv->f1);
@@ -471,8 +477,7 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
if (priv->soc_info->plane_f0_not_working && plane == &priv->f0)
return -EINVAL;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index 26ebf424d63e..32638a713241 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -580,7 +580,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -705,7 +705,7 @@ ingenic_ipu_plane_atomic_set_property(struct drm_plane *plane,
ipu->sharpness = val;
if (state->crtc) {
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 32cda134ae3e..7c2eb1152fc2 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 9e0562aa2bcb..a935ff1503cd 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -12,6 +12,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "kmb_drv.h"
#include "kmb_plane.h"
@@ -129,8 +130,7 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
}
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
- drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 739e8c6c6d90..9a1e6b9ecbe5 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -8,6 +8,8 @@
#include <linux/vmalloc.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_print.h>
+
#include "lima_devfreq.h"
#include "lima_drv.h"
#include "lima_sched.h"
diff --git a/drivers/gpu/drm/logicvc/logicvc_layer.c b/drivers/gpu/drm/logicvc/logicvc_layer.c
index 464000aea765..eab4d773f92b 100644
--- a/drivers/gpu/drm/logicvc/logicvc_layer.c
+++ b/drivers/gpu/drm/logicvc/logicvc_layer.c
@@ -96,8 +96,8 @@ static int logicvc_plane_atomic_check(struct drm_plane *drm_plane,
if (!new_state->crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(new_state->state,
- new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/loongson/lsdc_benchmark.c b/drivers/gpu/drm/loongson/lsdc_benchmark.c
index b088646a2ff9..659173381814 100644
--- a/drivers/gpu/drm/loongson/lsdc_benchmark.c
+++ b/drivers/gpu/drm/loongson/lsdc_benchmark.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_debugfs.h>
+#include <drm/drm_print.h>
#include "lsdc_benchmark.h"
#include "lsdc_drv.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_crtc.c b/drivers/gpu/drm/loongson/lsdc_crtc.c
index 03958b79f251..a5b7d5c5fd20 100644
--- a/drivers/gpu/drm/loongson/lsdc_crtc.c
+++ b/drivers/gpu/drm/loongson/lsdc_crtc.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "lsdc_drv.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_debugfs.c b/drivers/gpu/drm/loongson/lsdc_debugfs.c
index b9c2e6b1701f..19aa7ef577de 100644
--- a/drivers/gpu/drm/loongson/lsdc_debugfs.c
+++ b/drivers/gpu/drm/loongson/lsdc_debugfs.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_debugfs.h>
+#include <drm/drm_print.h>
#include "lsdc_benchmark.h"
#include "lsdc_drv.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
index 12193d2a301a..abf5bf68eec2 100644
--- a/drivers/gpu/drm/loongson/lsdc_drv.c
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -15,6 +15,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c
index a720d8f53209..6372db2d3093 100644
--- a/drivers/gpu/drm/loongson/lsdc_gem.c
+++ b/drivers/gpu/drm/loongson/lsdc_gem.c
@@ -6,9 +6,11 @@
#include <linux/dma-buf.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "lsdc_drv.h"
#include "lsdc_gem.h"
@@ -57,7 +59,7 @@ static void lsdc_gem_object_free(struct drm_gem_object *obj)
struct ttm_buffer_object *tbo = to_ttm_bo(obj);
if (tbo)
- ttm_bo_put(tbo);
+ ttm_bo_fini(tbo);
}
static int lsdc_gem_object_vmap(struct drm_gem_object *obj, struct iosys_map *map)
@@ -204,45 +206,31 @@ int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
const struct lsdc_desc *descp = ldev->descp;
u32 domain = LSDC_GEM_DOMAIN_VRAM;
struct drm_gem_object *gobj;
- size_t size;
- u32 pitch;
- u32 handle;
int ret;
- if (!args->width || !args->height)
- return -EINVAL;
-
- if (args->bpp != 32 && args->bpp != 16)
- return -EINVAL;
-
- pitch = args->width * args->bpp / 8;
- pitch = ALIGN(pitch, descp->pitch_align);
- size = pitch * args->height;
- size = ALIGN(size, PAGE_SIZE);
+ ret = drm_mode_size_dumb(ddev, args, descp->pitch_align, 0);
+ if (ret)
+ return ret;
/* Maximum single bo size allowed is the half vram size available */
- if (size > ldev->vram_size / 2) {
- drm_err(ddev, "Requesting(%zuMiB) failed\n", size >> 20);
+ if (args->size > ldev->vram_size / 2) {
+ drm_err(ddev, "Requesting(%zuMiB) failed\n", (size_t)(args->size >> PAGE_SHIFT));
return -ENOMEM;
}
- gobj = lsdc_gem_object_create(ddev, domain, size, false, NULL, NULL);
+ gobj = lsdc_gem_object_create(ddev, domain, args->size, false, NULL, NULL);
if (IS_ERR(gobj)) {
drm_err(ddev, "Failed to create gem object\n");
return PTR_ERR(gobj);
}
- ret = drm_gem_handle_create(file, gobj, &handle);
+ ret = drm_gem_handle_create(file, gobj, &args->handle);
/* drop reference from allocate, handle holds it now */
drm_gem_object_put(gobj);
if (ret)
return ret;
- args->pitch = pitch;
- args->size = size;
- args->handle = handle;
-
return 0;
}
diff --git a/drivers/gpu/drm/loongson/lsdc_i2c.c b/drivers/gpu/drm/loongson/lsdc_i2c.c
index ce90c25536d2..012b4761c538 100644
--- a/drivers/gpu/drm/loongson/lsdc_i2c.c
+++ b/drivers/gpu/drm/loongson/lsdc_i2c.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "lsdc_drv.h"
#include "lsdc_output.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_irq.c b/drivers/gpu/drm/loongson/lsdc_irq.c
index efdc4d10792d..e8b7cc327f04 100644
--- a/drivers/gpu/drm/loongson/lsdc_irq.c
+++ b/drivers/gpu/drm/loongson/lsdc_irq.c
@@ -3,6 +3,7 @@
* Copyright (C) 2023 Loongson Technology Corporation Limited
*/
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "lsdc_irq.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_output_7a1000.c b/drivers/gpu/drm/loongson/lsdc_output_7a1000.c
index 600ed4fb0884..ccca67e01fd9 100644
--- a/drivers/gpu/drm/loongson/lsdc_output_7a1000.c
+++ b/drivers/gpu/drm/loongson/lsdc_output_7a1000.c
@@ -5,6 +5,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "lsdc_drv.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_output_7a2000.c b/drivers/gpu/drm/loongson/lsdc_output_7a2000.c
index 2bd797a9b9ff..aa7daee4c065 100644
--- a/drivers/gpu/drm/loongson/lsdc_output_7a2000.c
+++ b/drivers/gpu/drm/loongson/lsdc_output_7a2000.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "lsdc_drv.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_pixpll.c b/drivers/gpu/drm/loongson/lsdc_pixpll.c
index 2609a2256da4..51b9a032cf43 100644
--- a/drivers/gpu/drm/loongson/lsdc_pixpll.c
+++ b/drivers/gpu/drm/loongson/lsdc_pixpll.c
@@ -6,6 +6,7 @@
#include <linux/delay.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "lsdc_drv.h"
diff --git a/drivers/gpu/drm/loongson/lsdc_plane.c b/drivers/gpu/drm/loongson/lsdc_plane.c
index aa9a97f9c4dc..9675344128d0 100644
--- a/drivers/gpu/drm/loongson/lsdc_plane.c
+++ b/drivers/gpu/drm/loongson/lsdc_plane.c
@@ -9,6 +9,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include "lsdc_drv.h"
#include "lsdc_regs.h"
@@ -196,7 +197,7 @@ static int lsdc_cursor_plane_atomic_async_check(struct drm_plane *plane,
return -EINVAL;
}
- crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (!crtc_state->active)
return -EINVAL;
diff --git a/drivers/gpu/drm/loongson/lsdc_ttm.c b/drivers/gpu/drm/loongson/lsdc_ttm.c
index 2e42c6970c9f..5d9075634bf8 100644
--- a/drivers/gpu/drm/loongson/lsdc_ttm.c
+++ b/drivers/gpu/drm/loongson/lsdc_ttm.c
@@ -8,6 +8,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "lsdc_drv.h"
#include "lsdc_ttm.h"
@@ -544,7 +545,8 @@ int lsdc_ttm_init(struct lsdc_device *ldev)
ret = ttm_device_init(&ldev->bdev, &lsdc_bo_driver, ddev->dev,
ddev->anon_inode->i_mapping,
- ddev->vma_offset_manager, false, true);
+ ddev->vma_offset_manager,
+ TTM_ALLOCATION_POOL_USE_DMA32);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c
index 3056ac566473..8c5af2677357 100644
--- a/drivers/gpu/drm/mcde/mcde_clk_div.c
+++ b/drivers/gpu/drm/mcde/mcde_clk_div.c
@@ -71,12 +71,15 @@ static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
return best_div;
}
-static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int mcde_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int div = mcde_clk_div_choose_div(hw, rate, prate, true);
+ int div = mcde_clk_div_choose_div(hw, req->rate,
+ &req->best_parent_rate, true);
- return DIV_ROUND_UP_ULL(*prate, div);
+ req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div);
+
+ return 0;
}
static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw,
@@ -132,7 +135,7 @@ static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops mcde_clk_div_ops = {
.enable = mcde_clk_div_enable,
.recalc_rate = mcde_clk_div_recalc_rate,
- .round_rate = mcde_clk_div_round_rate,
+ .determine_rate = mcde_clk_div_determine_rate,
.set_rate = mcde_clk_div_set_rate,
};
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 52043a12a2e8..257a6e84dd58 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -17,6 +17,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index e47debd60619..96188bf9274a 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -30,9 +30,30 @@ config DRM_MEDIATEK_DP
help
DRM/KMS Display Port driver for MediaTek SoCs.
+config DRM_MEDIATEK_HDMI_COMMON
+ tristate
+ depends on DRM_MEDIATEK
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HELPER
+ select SND_SOC_HDMI_CODEC if SND_SOC
+ help
+ MediaTek SoC HDMI common library
+
config DRM_MEDIATEK_HDMI
tristate "DRM HDMI Support for Mediatek SoCs"
depends on DRM_MEDIATEK
- select SND_SOC_HDMI_CODEC if SND_SOC
+ select DRM_MEDIATEK_HDMI_COMMON
help
DRM/KMS HDMI driver for Mediatek SoCs
+
+config DRM_MEDIATEK_HDMI_V2
+ tristate "DRM HDMI v2 IP support for MediaTek SoCs"
+ depends on DRM_MEDIATEK
+ select DRM_MEDIATEK_HDMI_COMMON
+ help
+ Say yes here to enable support for the HDMIv2 IP and related
+ DDCv2 as found in the MediaTek MT8195, MT8188 SoCs and other
+ variants.
+ This driver can also be built as a module. If so, the HDMIv2
+ module will be called "mtk_hdmi_v2", and the DDCv2 module
+ will be called "mtk_hdmi_ddc_v2".
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 43afd0a26d14..e0ac49b07d50 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -21,8 +21,11 @@ mediatek-drm-y := mtk_crtc.o \
obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI_COMMON) += mtk_hdmi_common.o
obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_cec.o
obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi.o
obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mtk_hdmi_ddc.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI_V2) += mtk_hdmi_v2.o
+obj-$(CONFIG_DRM_MEDIATEK_HDMI_V2) += mtk_hdmi_ddc_v2.o
obj-$(CONFIG_DRM_MEDIATEK_DP) += mtk_dp.o
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index bc7527542fdc..991cdb3d7d5f 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -16,6 +16,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -283,6 +284,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
unsigned int i;
unsigned long flags;
+ /* release GCE HW usage and start autosuspend */
+ pm_runtime_mark_last_busy(cmdq_cl->chan->mbox->dev);
+ pm_runtime_put_autosuspend(cmdq_cl->chan->mbox->dev);
+
if (data->sta < 0)
return;
@@ -618,6 +623,9 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
mtk_crtc->config_updating = false;
spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ if (pm_runtime_resume_and_get(mtk_crtc->cmdq_client.chan->mbox->dev) < 0)
+ goto update_config_out;
+
mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
goto update_config_out;
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
index ac6620e10262..9672ea1f91a2 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c
@@ -621,15 +621,27 @@ int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev)
return ret;
}
-int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
+static void mtk_ddp_comp_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
+static void mtk_ddp_comp_clk_put(void *_clk)
+{
+ struct clk *clk = _clk;
+
+ clk_put(clk);
+}
+
+int mtk_ddp_comp_init(struct device *dev, struct device_node *node, struct mtk_ddp_comp *comp,
unsigned int comp_id)
{
struct platform_device *comp_pdev;
enum mtk_ddp_comp_type type;
struct mtk_ddp_comp_dev *priv;
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
int ret;
-#endif
if (comp_id >= DDP_COMPONENT_DRM_ID_MAX)
return -EINVAL;
@@ -651,6 +663,10 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
}
comp->dev = &comp_pdev->dev;
+ ret = devm_add_action_or_reset(dev, mtk_ddp_comp_put_device, comp->dev);
+ if (ret)
+ return ret;
+
if (type == MTK_DISP_AAL ||
type == MTK_DISP_BLS ||
type == MTK_DISP_CCORR ||
@@ -666,15 +682,22 @@ int mtk_ddp_comp_init(struct device_node *node, struct mtk_ddp_comp *comp,
type == MTK_DSI)
return 0;
- priv = devm_kzalloc(comp->dev, sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- priv->regs = of_iomap(node, 0);
+ priv->regs = devm_of_iomap(dev, node, 0, NULL);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
priv->clk = of_clk_get(node, 0);
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
+ ret = devm_add_action_or_reset(dev, mtk_ddp_comp_clk_put, priv->clk);
+ if (ret)
+ return ret;
+
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
ret = cmdq_dev_get_client_reg(comp->dev, &priv->cmdq_reg, 0);
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
index 7289b3dcf22f..3f3d43f4330d 100644
--- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h
@@ -350,7 +350,7 @@ static inline void mtk_ddp_comp_encoder_index_set(struct mtk_ddp_comp *comp)
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type);
int mtk_find_possible_crtcs(struct drm_device *drm, struct device *dev);
-int mtk_ddp_comp_init(struct device_node *comp_node, struct mtk_ddp_comp *comp,
+int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, struct mtk_ddp_comp *comp,
unsigned int comp_id);
enum mtk_ddp_comp_type mtk_ddp_comp_get_type(unsigned int comp_id);
void mtk_ddp_write(struct cmdq_pkt *cmdq_pkt, unsigned int value,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 10d60d2c2a56..6d7bf4afa78d 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -80,27 +80,6 @@ void mtk_ccorr_stop(struct device *dev)
writel_relaxed(0x0, ccorr->regs + DISP_CCORR_EN);
}
-/* Converts a DRM S31.32 value to the HW S1.n format. */
-static u16 mtk_ctm_s31_32_to_s1_n(u64 in, u32 n)
-{
- u16 r;
-
- /* Sign bit. */
- r = in & BIT_ULL(63) ? BIT(n + 1) : 0;
-
- if ((in & GENMASK_ULL(62, 33)) > 0) {
- /* identity value 0x100000000 -> 0x400(mt8183), */
- /* identity value 0x100000000 -> 0x800(mt8192), */
- /* if bigger this, set it to max 0x7ff. */
- r |= GENMASK(n, 0);
- } else {
- /* take the n+1 most important bits. */
- r |= (in >> (32 - n)) & GENMASK(n, 0);
- }
-
- return r;
-}
-
void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state)
{
struct mtk_disp_ccorr *ccorr = dev_get_drvdata(dev);
@@ -119,7 +98,7 @@ void mtk_ccorr_ctm_set(struct device *dev, struct drm_crtc_state *state)
input = ctm->matrix;
for (i = 0; i < ARRAY_SIZE(coeffs); i++)
- coeffs[i] = mtk_ctm_s31_32_to_s1_n(input[i], matrix_bits);
+ coeffs[i] = drm_color_ctm_s31_32_to_qm_n(input[i], 2, matrix_bits);
mtk_ddp_write(cmdq_pkt, coeffs[0] << 16 | coeffs[1],
&ccorr->cmdq_reg, ccorr->regs, DISP_CCORR_COEF_0);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index fe97bb97e004..c0af3e3b51d5 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -527,6 +527,13 @@ bool mtk_ovl_adaptor_is_comp_present(struct device_node *node)
type == OVL_ADAPTOR_TYPE_PADDING;
}
+static void ovl_adaptor_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
@@ -560,6 +567,11 @@ static int ovl_adaptor_comp_init(struct device *dev, struct component_match **ma
if (!comp_pdev)
return -EPROBE_DEFER;
+ ret = devm_add_action_or_reset(dev, ovl_adaptor_put_device,
+ &comp_pdev->dev);
+ if (ret)
+ return ret;
+
priv->ovl_adaptor_comp[id] = &comp_pdev->dev;
drm_of_component_match_add(dev, match, component_compare_of, node);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index bef6eeb30d3e..b0b1e158600f 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2087,6 +2087,7 @@ static int mtk_dp_dt_parse(struct mtk_dp *mtk_dp,
endpoint = of_graph_get_endpoint_by_regs(pdev->dev.of_node, 1, -1);
len = of_property_count_elems_of_size(endpoint,
"data-lanes", sizeof(u32));
+ of_node_put(endpoint);
if (len < 0 || len > 4 || len == 3) {
dev_err(dev, "invalid data lane size: %d\n", len);
return -EINVAL;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index eb5537f0ac90..a94c51a83261 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -686,10 +686,6 @@ err_free:
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
err_put_dev:
- for (i = 0; i < private->data->mmsys_dev_num; i++) {
- /* For device_find_child in mtk_drm_get_all_priv() */
- put_device(private->all_drm_private[i]->dev);
- }
put_device(private->mutex_dev);
return ret;
}
@@ -697,18 +693,12 @@ err_put_dev:
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
- int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
-
- for (i = 0; i < private->data->mmsys_dev_num; i++) {
- /* For device_find_child in mtk_drm_get_all_priv() */
- put_device(private->all_drm_private[i]->dev);
- }
put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;
@@ -1133,7 +1123,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
(void *)private->mmsys_dev,
sizeof(*private->mmsys_dev));
private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR].dev = &ovl_adaptor->dev;
- mtk_ddp_comp_init(NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR],
+ mtk_ddp_comp_init(dev, NULL, &private->ddp_comp[DDP_COMPONENT_DRM_OVL_ADAPTOR],
DDP_COMPONENT_DRM_OVL_ADAPTOR);
component_match_add(dev, &match, compare_dev, &ovl_adaptor->dev);
}
@@ -1199,7 +1189,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
node);
}
- ret = mtk_ddp_comp_init(node, &private->ddp_comp[comp_id], comp_id);
+ ret = mtk_ddp_comp_init(dev, node, &private->ddp_comp[comp_id], comp_id);
if (ret) {
of_node_put(node);
goto err_node;
diff --git a/drivers/gpu/drm/mediatek/mtk_gem.c b/drivers/gpu/drm/mediatek/mtk_gem.c
index a172456d1d7b..024cc7e9036c 100644
--- a/drivers/gpu/drm/mediatek/mtk_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_gem.c
@@ -11,6 +11,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include "mtk_drm_drv.h"
#include "mtk_gem.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index b766dd5e6c8d..0face4dcaa36 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -31,6 +31,7 @@
#include <drm/drm_probe_helper.h>
#include "mtk_cec.h"
+#include "mtk_hdmi_common.h"
#include "mtk_hdmi_regs.h"
#define NCTS_BYTES 7
@@ -43,143 +44,6 @@ enum mtk_hdmi_clk_id {
MTK_HDMI_CLK_COUNT
};
-enum hdmi_aud_input_type {
- HDMI_AUD_INPUT_I2S = 0,
- HDMI_AUD_INPUT_SPDIF,
-};
-
-enum hdmi_aud_i2s_fmt {
- HDMI_I2S_MODE_RJT_24BIT = 0,
- HDMI_I2S_MODE_RJT_16BIT,
- HDMI_I2S_MODE_LJT_24BIT,
- HDMI_I2S_MODE_LJT_16BIT,
- HDMI_I2S_MODE_I2S_24BIT,
- HDMI_I2S_MODE_I2S_16BIT
-};
-
-enum hdmi_aud_mclk {
- HDMI_AUD_MCLK_128FS,
- HDMI_AUD_MCLK_192FS,
- HDMI_AUD_MCLK_256FS,
- HDMI_AUD_MCLK_384FS,
- HDMI_AUD_MCLK_512FS,
- HDMI_AUD_MCLK_768FS,
- HDMI_AUD_MCLK_1152FS,
-};
-
-enum hdmi_aud_channel_type {
- HDMI_AUD_CHAN_TYPE_1_0 = 0,
- HDMI_AUD_CHAN_TYPE_1_1,
- HDMI_AUD_CHAN_TYPE_2_0,
- HDMI_AUD_CHAN_TYPE_2_1,
- HDMI_AUD_CHAN_TYPE_3_0,
- HDMI_AUD_CHAN_TYPE_3_1,
- HDMI_AUD_CHAN_TYPE_4_0,
- HDMI_AUD_CHAN_TYPE_4_1,
- HDMI_AUD_CHAN_TYPE_5_0,
- HDMI_AUD_CHAN_TYPE_5_1,
- HDMI_AUD_CHAN_TYPE_6_0,
- HDMI_AUD_CHAN_TYPE_6_1,
- HDMI_AUD_CHAN_TYPE_7_0,
- HDMI_AUD_CHAN_TYPE_7_1,
- HDMI_AUD_CHAN_TYPE_3_0_LRS,
- HDMI_AUD_CHAN_TYPE_3_1_LRS,
- HDMI_AUD_CHAN_TYPE_4_0_CLRS,
- HDMI_AUD_CHAN_TYPE_4_1_CLRS,
- HDMI_AUD_CHAN_TYPE_6_1_CS,
- HDMI_AUD_CHAN_TYPE_6_1_CH,
- HDMI_AUD_CHAN_TYPE_6_1_OH,
- HDMI_AUD_CHAN_TYPE_6_1_CHR,
- HDMI_AUD_CHAN_TYPE_7_1_LH_RH,
- HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR,
- HDMI_AUD_CHAN_TYPE_7_1_LC_RC,
- HDMI_AUD_CHAN_TYPE_7_1_LW_RW,
- HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD,
- HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS,
- HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS,
- HDMI_AUD_CHAN_TYPE_7_1_CS_CH,
- HDMI_AUD_CHAN_TYPE_7_1_CS_OH,
- HDMI_AUD_CHAN_TYPE_7_1_CS_CHR,
- HDMI_AUD_CHAN_TYPE_7_1_CH_OH,
- HDMI_AUD_CHAN_TYPE_7_1_CH_CHR,
- HDMI_AUD_CHAN_TYPE_7_1_OH_CHR,
- HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR,
- HDMI_AUD_CHAN_TYPE_6_0_CS,
- HDMI_AUD_CHAN_TYPE_6_0_CH,
- HDMI_AUD_CHAN_TYPE_6_0_OH,
- HDMI_AUD_CHAN_TYPE_6_0_CHR,
- HDMI_AUD_CHAN_TYPE_7_0_LH_RH,
- HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR,
- HDMI_AUD_CHAN_TYPE_7_0_LC_RC,
- HDMI_AUD_CHAN_TYPE_7_0_LW_RW,
- HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD,
- HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS,
- HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS,
- HDMI_AUD_CHAN_TYPE_7_0_CS_CH,
- HDMI_AUD_CHAN_TYPE_7_0_CS_OH,
- HDMI_AUD_CHAN_TYPE_7_0_CS_CHR,
- HDMI_AUD_CHAN_TYPE_7_0_CH_OH,
- HDMI_AUD_CHAN_TYPE_7_0_CH_CHR,
- HDMI_AUD_CHAN_TYPE_7_0_OH_CHR,
- HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR,
- HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS,
- HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF
-};
-
-enum hdmi_aud_channel_swap_type {
- HDMI_AUD_SWAP_LR,
- HDMI_AUD_SWAP_LFE_CC,
- HDMI_AUD_SWAP_LSRS,
- HDMI_AUD_SWAP_RLS_RRS,
- HDMI_AUD_SWAP_LR_STATUS,
-};
-
-struct hdmi_audio_param {
- enum hdmi_audio_coding_type aud_codec;
- enum hdmi_audio_sample_size aud_sample_size;
- enum hdmi_aud_input_type aud_input_type;
- enum hdmi_aud_i2s_fmt aud_i2s_fmt;
- enum hdmi_aud_mclk aud_mclk;
- enum hdmi_aud_channel_type aud_input_chan_type;
- struct hdmi_codec_params codec_params;
-};
-
-struct mtk_hdmi_conf {
- bool tz_disabled;
- bool cea_modes_only;
- unsigned long max_mode_clock;
-};
-
-struct mtk_hdmi {
- struct drm_bridge bridge;
- struct drm_bridge *next_bridge;
- struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */
- struct device *dev;
- const struct mtk_hdmi_conf *conf;
- struct phy *phy;
- struct device *cec_dev;
- struct i2c_adapter *ddc_adpt;
- struct clk *clk[MTK_HDMI_CLK_COUNT];
- struct drm_display_mode mode;
- bool dvi_mode;
- struct regmap *sys_regmap;
- unsigned int sys_offset;
- struct regmap *regs;
- struct platform_device *audio_pdev;
- struct hdmi_audio_param aud_param;
- bool audio_enable;
- bool powered;
- bool enabled;
- hdmi_codec_plugged_cb plugged_cb;
- struct device *codec_dev;
- struct mutex update_plugged_status_lock;
-};
-
-static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
-{
- return container_of(b, struct mtk_hdmi, bridge);
-}
-
static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
{
regmap_update_bits(hdmi->regs, VIDEO_CFG_4,
@@ -600,88 +464,6 @@ static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
regmap_write(hdmi->regs, GRL_CFG5, val);
}
-struct hdmi_acr_n {
- unsigned int clock;
- unsigned int n[3];
-};
-
-/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */
-static const struct hdmi_acr_n hdmi_rec_n_table[] = {
- /* Clock, N: 32kHz 44.1kHz 48kHz */
- { 25175, { 4576, 7007, 6864 } },
- { 74176, { 11648, 17836, 11648 } },
- { 148352, { 11648, 8918, 5824 } },
- { 296703, { 5824, 4459, 5824 } },
- { 297000, { 3072, 4704, 5120 } },
- { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */
-};
-
-/**
- * hdmi_recommended_n() - Return N value recommended by HDMI specification
- * @freq: audio sample rate in Hz
- * @clock: rounded TMDS clock in kHz
- */
-static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock)
-{
- const struct hdmi_acr_n *recommended;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) {
- if (clock == hdmi_rec_n_table[i].clock)
- break;
- }
- recommended = hdmi_rec_n_table + i;
-
- switch (freq) {
- case 32000:
- return recommended->n[0];
- case 44100:
- return recommended->n[1];
- case 48000:
- return recommended->n[2];
- case 88200:
- return recommended->n[1] * 2;
- case 96000:
- return recommended->n[2] * 2;
- case 176400:
- return recommended->n[1] * 4;
- case 192000:
- return recommended->n[2] * 4;
- default:
- return (128 * freq) / 1000;
- }
-}
-
-static unsigned int hdmi_mode_clock_to_hz(unsigned int clock)
-{
- switch (clock) {
- case 25175:
- return 25174825; /* 25.2/1.001 MHz */
- case 74176:
- return 74175824; /* 74.25/1.001 MHz */
- case 148352:
- return 148351648; /* 148.5/1.001 MHz */
- case 296703:
- return 296703297; /* 297/1.001 MHz */
- default:
- return clock * 1000;
- }
-}
-
-static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
- unsigned int tmds_clock, unsigned int n)
-{
- return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n,
- 128 * audio_sample_rate);
-}
-
-static void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock,
- unsigned int *n, unsigned int *cts)
-{
- *n = hdmi_recommended_n(sample_rate, clock);
- *cts = hdmi_expected_cts(sample_rate, clock, *n);
-}
-
static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
unsigned int cts)
{
@@ -1072,20 +854,6 @@ static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = {
[MTK_HDMI_CLK_AUD_SPDIF] = "spdif",
};
-static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi,
- struct device_node *np)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) {
- hdmi->clk[i] = of_clk_get_by_name(np,
- mtk_hdmi_clk_names[i]);
- if (IS_ERR(hdmi->clk[i]))
- return PTR_ERR(hdmi->clk[i]);
- }
- return 0;
-}
-
static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
{
int ret;
@@ -1230,13 +998,6 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
return 0;
}
-static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static void mtk_hdmi_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
@@ -1268,28 +1029,6 @@ static void mtk_hdmi_bridge_atomic_post_disable(struct drm_bridge *bridge,
hdmi->powered = false;
}
-static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
-
- dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n",
- adjusted_mode->name, adjusted_mode->hdisplay);
- dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d",
- adjusted_mode->hsync_start, adjusted_mode->hsync_end,
- adjusted_mode->htotal);
- dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n",
- adjusted_mode->hskew, adjusted_mode->vdisplay);
- dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d",
- adjusted_mode->vsync_start, adjusted_mode->vsync_end,
- adjusted_mode->vtotal);
- dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n",
- adjusted_mode->vscan, adjusted_mode->flags);
-
- drm_mode_copy(&hdmi->mode, adjusted_mode);
-}
-
static void mtk_hdmi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
@@ -1345,169 +1084,10 @@ static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
.edid_read = mtk_hdmi_bridge_edid_read,
};
-static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np)
-{
- struct platform_device *cec_pdev;
- struct device_node *cec_np;
- int ret;
-
- ret = mtk_hdmi_get_all_clk(hdmi, np);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get clocks\n");
-
- /* The CEC module handles HDMI hotplug detection */
- cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
- if (!cec_np)
- return dev_err_probe(dev, -EINVAL, "Failed to find CEC node\n");
-
- cec_pdev = of_find_device_by_node(cec_np);
- if (!cec_pdev) {
- dev_err(hdmi->dev, "Waiting for CEC device %pOF\n",
- cec_np);
- of_node_put(cec_np);
- return -EPROBE_DEFER;
- }
- of_node_put(cec_np);
-
- /*
- * The mediatek,syscon-hdmi property contains a phandle link to the
- * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
- * registers it contains.
- */
- hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi",
- 1, &hdmi->sys_offset);
- if (IS_ERR(hdmi->sys_regmap))
- return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap),
- "Failed to get system configuration registers\n");
-
- hdmi->cec_dev = &cec_pdev->dev;
- return 0;
-}
-
-static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
- struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *remote, *i2c_np;
- int ret;
-
- ret = mtk_hdmi_get_all_clk(hdmi, np);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to get clocks\n");
-
- hdmi->regs = device_node_to_regmap(dev->of_node);
- if (IS_ERR(hdmi->regs))
- return PTR_ERR(hdmi->regs);
-
- remote = of_graph_get_remote_node(np, 1, 0);
- if (!remote)
- return -EINVAL;
-
- if (!of_device_is_compatible(remote, "hdmi-connector")) {
- hdmi->next_bridge = of_drm_find_bridge(remote);
- if (!hdmi->next_bridge) {
- dev_err(dev, "Waiting for external bridge\n");
- of_node_put(remote);
- return -EPROBE_DEFER;
- }
- }
-
- i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- of_node_put(remote);
- if (!i2c_np)
- return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n");
-
- hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
- of_node_put(i2c_np);
- if (!hdmi->ddc_adpt)
- return dev_err_probe(dev, -EINVAL, "Failed to get ddc i2c adapter by node\n");
-
- ret = mtk_hdmi_get_cec_dev(hdmi, dev, np);
- if (ret)
- return ret;
-
- return 0;
-}
-
/*
* HDMI audio codec callbacks
*/
-static int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct hdmi_audio_param aud_params = { 0 };
- unsigned int chan = params->cea.channels;
-
- dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
- params->sample_rate, params->sample_width, chan);
-
- if (!hdmi->bridge.encoder)
- return -ENODEV;
-
- switch (chan) {
- case 2:
- aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
- break;
- case 4:
- aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
- break;
- case 6:
- aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
- break;
- case 8:
- aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
- break;
- default:
- dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
- return -EINVAL;
- }
-
- switch (params->sample_rate) {
- case 32000:
- case 44100:
- case 48000:
- case 88200:
- case 96000:
- case 176400:
- case 192000:
- break;
- default:
- dev_err(hdmi->dev, "rate[%d] not supported!\n",
- params->sample_rate);
- return -EINVAL;
- }
-
- switch (daifmt->fmt) {
- case HDMI_I2S:
- aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- aud_params.aud_input_type = HDMI_AUD_INPUT_I2S;
- aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- aud_params.aud_mclk = HDMI_AUD_MCLK_128FS;
- break;
- case HDMI_SPDIF:
- aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
- break;
- default:
- dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
- daifmt->fmt);
- return -EINVAL;
- }
- memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params));
- memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params));
-
- dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
- aud_params.aud_codec, aud_params.aud_input_type,
- aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate);
-
- return 0;
-}
-
static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
struct hdmi_codec_daifmt *daifmt,
struct hdmi_codec_params *params)
@@ -1555,26 +1135,6 @@ mtk_hdmi_audio_mute(struct device *dev, void *data,
return 0;
}
-static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
-{
- struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
-
- if (hdmi->enabled)
- memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len));
- else
- memset(buf, 0, len);
- return 0;
-}
-
-static void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn,
- struct device *codec_dev)
-{
- mutex_lock(&hdmi->update_plugged_status_lock);
- hdmi->plugged_cb = fn;
- hdmi->codec_dev = codec_dev;
- mutex_unlock(&hdmi->update_plugged_status_lock);
-}
-
static int mtk_hdmi_audio_hook_plugged_cb(struct device *dev, void *data,
hdmi_codec_plugged_cb fn,
struct device *codec_dev)
@@ -1596,92 +1156,21 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
.hook_plugged_cb = mtk_hdmi_audio_hook_plugged_cb,
};
-static void mtk_hdmi_unregister_audio_driver(void *data)
-{
- platform_device_unregister(data);
-}
-
-static int mtk_hdmi_register_audio_driver(struct device *dev)
-{
- struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- struct hdmi_audio_param *aud_param = &hdmi->aud_param;
- struct hdmi_codec_pdata codec_data = {
- .ops = &mtk_hdmi_audio_codec_ops,
- .max_i2s_channels = 2,
- .i2s = 1,
- .data = hdmi,
- .no_capture_mute = 1,
- };
- int ret;
-
- aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
- aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
- aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
- aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
- aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
- aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
-
- hdmi->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- if (IS_ERR(hdmi->audio_pdev))
- return PTR_ERR(hdmi->audio_pdev);
-
- ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver,
- hdmi->audio_pdev);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int mtk_hdmi_probe(struct platform_device *pdev)
{
struct mtk_hdmi *hdmi;
- struct device *dev = &pdev->dev;
int ret;
- hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge,
- &mtk_hdmi_bridge_funcs);
+ hdmi = mtk_hdmi_common_probe(pdev);
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
- hdmi->dev = dev;
- hdmi->conf = of_device_get_match_data(dev);
-
- ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
- if (ret)
- return ret;
-
- hdmi->phy = devm_phy_get(dev, "hdmi");
- if (IS_ERR(hdmi->phy))
- return dev_err_probe(dev, PTR_ERR(hdmi->phy),
- "Failed to get HDMI PHY\n");
-
- mutex_init(&hdmi->update_plugged_status_lock);
- platform_set_drvdata(pdev, hdmi);
-
- ret = mtk_hdmi_register_audio_driver(dev);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to register audio driver\n");
-
- hdmi->bridge.of_node = pdev->dev.of_node;
- hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
- | DRM_BRIDGE_OP_HPD;
- hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- hdmi->bridge.vendor = "MediaTek";
- hdmi->bridge.product = "On-Chip HDMI";
-
- ret = devm_drm_bridge_add(dev, &hdmi->bridge);
- if (ret)
- return dev_err_probe(dev, ret, "Failed to add bridge\n");
+ if (!hdmi->cec_dev)
+ return dev_err_probe(hdmi->dev, -ENODEV, "CEC is required by HDMIv1\n");
ret = mtk_hdmi_clk_enable_audio(hdmi);
if (ret)
- return dev_err_probe(dev, ret,
+ return dev_err_probe(hdmi->dev, ret,
"Failed to enable audio clocks\n");
return 0;
@@ -1712,19 +1201,32 @@ static __maybe_unused int mtk_hdmi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops, mtk_hdmi_suspend, mtk_hdmi_resume);
+static const struct mtk_hdmi_ver_conf mtk_hdmi_v1_ver_conf = {
+ .bridge_funcs = &mtk_hdmi_bridge_funcs,
+ .codec_ops = &mtk_hdmi_audio_codec_ops,
+ .mtk_hdmi_clock_names = mtk_hdmi_clk_names,
+ .num_clocks = ARRAY_SIZE(mtk_hdmi_clk_names)
+};
+
static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = {
.tz_disabled = true,
+ .ver_conf = &mtk_hdmi_v1_ver_conf
};
static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8167 = {
- .max_mode_clock = 148500,
.cea_modes_only = true,
+ .max_mode_clock = 148500,
+ .ver_conf = &mtk_hdmi_v1_ver_conf
+};
+
+static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8173 = {
+ .ver_conf = &mtk_hdmi_v1_ver_conf
};
static const struct of_device_id mtk_hdmi_of_ids[] = {
{ .compatible = "mediatek,mt2701-hdmi", .data = &mtk_hdmi_conf_mt2701 },
{ .compatible = "mediatek,mt8167-hdmi", .data = &mtk_hdmi_conf_mt8167 },
- { .compatible = "mediatek,mt8173-hdmi" },
+ { .compatible = "mediatek,mt8173-hdmi", .data = &mtk_hdmi_conf_mt8173 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mtk_hdmi_of_ids);
@@ -1744,3 +1246,4 @@ MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
MODULE_DESCRIPTION("MediaTek HDMI Driver");
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS("DRM_MTK_HDMI_V1");
+MODULE_IMPORT_NS("DRM_MTK_HDMI");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_common.c b/drivers/gpu/drm/mediatek/mtk_hdmi_common.c
new file mode 100644
index 000000000000..e78eb0876f16
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_common.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <drm/drm_modes.h>
+#include <linux/device.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/math.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <sound/hdmi-codec.h>
+
+#include "mtk_hdmi_common.h"
+
+struct hdmi_acr_n {
+ unsigned int clock;
+ unsigned int n[3];
+};
+
+/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */
+static const struct hdmi_acr_n hdmi_rec_n_table[] = {
+ /* Clock, N: 32kHz 44.1kHz 48kHz */
+ { 25175, { 4576, 7007, 6864 } },
+ { 74176, { 11648, 17836, 11648 } },
+ { 148352, { 11648, 8918, 5824 } },
+ { 296703, { 5824, 4459, 5824 } },
+ { 297000, { 3072, 4704, 5120 } },
+ { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */
+};
+
+/**
+ * hdmi_recommended_n() - Return N value recommended by HDMI specification
+ * @freq: audio sample rate in Hz
+ * @clock: rounded TMDS clock in kHz
+ */
+static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock)
+{
+ const struct hdmi_acr_n *recommended;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) {
+ if (clock == hdmi_rec_n_table[i].clock)
+ break;
+ }
+ recommended = hdmi_rec_n_table + i;
+
+ switch (freq) {
+ case 32000:
+ return recommended->n[0];
+ case 44100:
+ return recommended->n[1];
+ case 48000:
+ return recommended->n[2];
+ case 88200:
+ return recommended->n[1] * 2;
+ case 96000:
+ return recommended->n[2] * 2;
+ case 176400:
+ return recommended->n[1] * 4;
+ case 192000:
+ return recommended->n[2] * 4;
+ default:
+ return (128 * freq) / 1000;
+ }
+}
+
+static unsigned int hdmi_mode_clock_to_hz(unsigned int clock)
+{
+ switch (clock) {
+ case 25175:
+ return 25174825; /* 25.2/1.001 MHz */
+ case 74176:
+ return 74175824; /* 74.25/1.001 MHz */
+ case 148352:
+ return 148351648; /* 148.5/1.001 MHz */
+ case 296703:
+ return 296703297; /* 297/1.001 MHz */
+ default:
+ return clock * 1000;
+ }
+}
+
+static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
+ unsigned int tmds_clock, unsigned int n)
+{
+ return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n,
+ 128 * audio_sample_rate);
+}
+
+void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock,
+ unsigned int *n, unsigned int *cts)
+{
+ *n = hdmi_recommended_n(sample_rate, clock);
+ *cts = hdmi_expected_cts(sample_rate, clock, *n);
+}
+EXPORT_SYMBOL_NS_GPL(mtk_hdmi_get_ncts, "DRM_MTK_HDMI");
+
+int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct hdmi_audio_param aud_params = { 0 };
+ unsigned int chan = params->cea.channels;
+
+ dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
+ params->sample_rate, params->sample_width, chan);
+
+ if (!hdmi->bridge.encoder)
+ return -ENODEV;
+
+ switch (chan) {
+ case 2:
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+ break;
+ case 4:
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
+ break;
+ case 6:
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
+ break;
+ case 8:
+ aud_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
+ break;
+ default:
+ dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
+ return -EINVAL;
+ }
+
+ switch (params->sample_rate) {
+ case 32000:
+ case 44100:
+ case 48000:
+ case 88200:
+ case 96000:
+ case 176400:
+ case 192000:
+ break;
+ default:
+ dev_err(hdmi->dev, "rate[%d] not supported!\n",
+ params->sample_rate);
+ return -EINVAL;
+ }
+
+ switch (daifmt->fmt) {
+ case HDMI_I2S:
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_params.aud_mclk = HDMI_AUD_MCLK_128FS;
+ break;
+ case HDMI_SPDIF:
+ aud_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_params.aud_input_type = HDMI_AUD_INPUT_SPDIF;
+ break;
+ default:
+ dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
+ daifmt->fmt);
+ return -EINVAL;
+ }
+ memcpy(&aud_params.codec_params, params, sizeof(aud_params.codec_params));
+ memcpy(&hdmi->aud_param, &aud_params, sizeof(aud_params));
+
+ dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
+ aud_params.aud_codec, aud_params.aud_input_type,
+ aud_params.aud_input_chan_type, aud_params.codec_params.sample_rate);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_params, "DRM_MTK_HDMI");
+
+int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ if (hdmi->enabled)
+ memcpy(buf, hdmi->curr_conn->eld, min(sizeof(hdmi->curr_conn->eld), len));
+ else
+ memset(buf, 0, len);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_get_eld, "DRM_MTK_HDMI");
+
+void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ mutex_lock(&hdmi->update_plugged_status_lock);
+ hdmi->plugged_cb = fn;
+ hdmi->codec_dev = codec_dev;
+ mutex_unlock(&hdmi->update_plugged_status_lock);
+}
+EXPORT_SYMBOL_NS_GPL(mtk_hdmi_audio_set_plugged_cb, "DRM_MTK_HDMI");
+
+static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi, struct device_node *np,
+ const char * const *clock_names, size_t num_clocks)
+{
+ int i;
+
+ for (i = 0; i < num_clocks; i++) {
+ hdmi->clk[i] = of_clk_get_by_name(np, clock_names[i]);
+
+ if (IS_ERR(hdmi->clk[i]))
+ return PTR_ERR(hdmi->clk[i]);
+ }
+
+ return 0;
+}
+
+bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+EXPORT_SYMBOL_NS_GPL(mtk_hdmi_bridge_mode_fixup, "DRM_MTK_HDMI");
+
+void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n",
+ adjusted_mode->name, adjusted_mode->hdisplay);
+ dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d",
+ adjusted_mode->hsync_start, adjusted_mode->hsync_end,
+ adjusted_mode->htotal);
+ dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n",
+ adjusted_mode->hskew, adjusted_mode->vdisplay);
+ dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d",
+ adjusted_mode->vsync_start, adjusted_mode->vsync_end,
+ adjusted_mode->vtotal);
+ dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n",
+ adjusted_mode->vscan, adjusted_mode->flags);
+
+ drm_mode_copy(&hdmi->mode, adjusted_mode);
+}
+EXPORT_SYMBOL_NS_GPL(mtk_hdmi_bridge_mode_set, "DRM_MTK_HDMI");
+
+static void mtk_hdmi_put_device(void *_dev)
+{
+ struct device *dev = _dev;
+
+ put_device(dev);
+}
+
+static int mtk_hdmi_get_cec_dev(struct mtk_hdmi *hdmi, struct device *dev, struct device_node *np)
+{
+ struct platform_device *cec_pdev;
+ struct device_node *cec_np;
+ int ret;
+
+ /* The CEC module handles HDMI hotplug detection */
+ cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
+ if (!cec_np)
+ return dev_err_probe(dev, -EOPNOTSUPP, "Failed to find CEC node\n");
+
+ cec_pdev = of_find_device_by_node(cec_np);
+ if (!cec_pdev) {
+ dev_err(hdmi->dev, "Waiting for CEC device %pOF\n", cec_np);
+ of_node_put(cec_np);
+ return -EPROBE_DEFER;
+ }
+ of_node_put(cec_np);
+
+ ret = devm_add_action_or_reset(dev, mtk_hdmi_put_device, &cec_pdev->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * The mediatek,syscon-hdmi property contains a phandle link to the
+ * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
+ * registers it contains.
+ */
+ hdmi->sys_regmap = syscon_regmap_lookup_by_phandle_args(np, "mediatek,syscon-hdmi",
+ 1, &hdmi->sys_offset);
+ if (IS_ERR(hdmi->sys_regmap))
+ return dev_err_probe(dev, PTR_ERR(hdmi->sys_regmap),
+ "Failed to get system configuration registers\n");
+
+ hdmi->cec_dev = &cec_pdev->dev;
+ return 0;
+}
+
+static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, struct platform_device *pdev,
+ const char * const *clk_names, size_t num_clocks)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *remote, *i2c_np;
+ int ret;
+
+ ret = mtk_hdmi_get_all_clk(hdmi, np, clk_names, num_clocks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ hdmi->irq = platform_get_irq(pdev, 0);
+ if (!hdmi->irq)
+ return hdmi->irq;
+
+ hdmi->regs = device_node_to_regmap(dev->of_node);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ remote = of_graph_get_remote_node(np, 1, 0);
+ if (!remote)
+ return -EINVAL;
+
+ if (!of_device_is_compatible(remote, "hdmi-connector")) {
+ hdmi->next_bridge = of_drm_find_bridge(remote);
+ if (!hdmi->next_bridge) {
+ dev_err(dev, "Waiting for external bridge\n");
+ of_node_put(remote);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!i2c_np)
+ return dev_err_probe(dev, -EINVAL, "No ddc-i2c-bus in connector\n");
+
+ hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
+ of_node_put(i2c_np);
+ if (!hdmi->ddc_adpt)
+ return dev_err_probe(dev, -EPROBE_DEFER, "Failed to get ddc i2c adapter by node\n");
+
+ ret = devm_add_action_or_reset(dev, mtk_hdmi_put_device, &hdmi->ddc_adpt->dev);
+ if (ret)
+ return ret;
+
+ ret = mtk_hdmi_get_cec_dev(hdmi, dev, np);
+ if (ret == -EOPNOTSUPP)
+ dev_info(dev, "CEC support unavailable: node not found\n");
+ else if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void mtk_hdmi_unregister_audio_driver(void *data)
+{
+ platform_device_unregister(data);
+}
+
+static int mtk_hdmi_register_audio_driver(struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ struct hdmi_audio_param *aud_param = &hdmi->aud_param;
+ struct hdmi_codec_pdata codec_data = {
+ .ops = hdmi->conf->ver_conf->codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+ .data = hdmi,
+ .no_capture_mute = 1,
+ };
+ int ret;
+
+ aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
+ aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16;
+ aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
+ aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
+ aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
+ aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
+
+ hdmi->audio_pdev = platform_device_register_data(dev,
+ HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
+
+ ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver,
+ hdmi->audio_pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct mtk_hdmi *mtk_hdmi_common_probe(struct platform_device *pdev)
+{
+ const struct mtk_hdmi_ver_conf *ver_conf;
+ const struct mtk_hdmi_conf *hdmi_conf;
+ struct device *dev = &pdev->dev;
+ struct mtk_hdmi *hdmi;
+ int ret;
+
+ hdmi_conf = of_device_get_match_data(dev);
+ if (!hdmi_conf)
+ return ERR_PTR(-ENODEV);
+
+ ver_conf = hdmi_conf->ver_conf;
+
+ hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge,
+ ver_conf->bridge_funcs);
+ if (IS_ERR(hdmi))
+ return hdmi;
+
+ hdmi->dev = dev;
+ hdmi->conf = hdmi_conf;
+
+ hdmi->clk = devm_kcalloc(dev, ver_conf->num_clocks, sizeof(*hdmi->clk), GFP_KERNEL);
+ if (!hdmi->clk)
+ return ERR_PTR(-ENOMEM);
+
+ ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev, ver_conf->mtk_hdmi_clock_names,
+ ver_conf->num_clocks);
+ if (ret)
+ return ERR_PTR(ret);
+
+ hdmi->phy = devm_phy_get(dev, "hdmi");
+ if (IS_ERR(hdmi->phy))
+ return dev_err_cast_probe(dev, hdmi->phy, "Failed to get HDMI PHY\n");
+
+ mutex_init(&hdmi->update_plugged_status_lock);
+ platform_set_drvdata(pdev, hdmi);
+
+ ret = mtk_hdmi_register_audio_driver(dev);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret, "Cannot register HDMI Audio driver\n");
+
+ hdmi->bridge.of_node = pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
+ | DRM_BRIDGE_OP_HPD;
+
+ if (ver_conf->bridge_funcs->hdmi_write_infoframe &&
+ ver_conf->bridge_funcs->hdmi_clear_infoframe)
+ hdmi->bridge.ops |= DRM_BRIDGE_OP_HDMI;
+
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ hdmi->bridge.ddc = hdmi->ddc_adpt;
+ hdmi->bridge.vendor = "MediaTek";
+ hdmi->bridge.product = "On-Chip HDMI";
+ hdmi->bridge.interlace_allowed = ver_conf->interlace_allowed;
+
+ ret = devm_drm_bridge_add(dev, &hdmi->bridge);
+ if (ret)
+ return dev_err_ptr_probe(dev, ret, "Failed to add bridge\n");
+
+ return hdmi;
+}
+EXPORT_SYMBOL_NS_GPL(mtk_hdmi_common_probe, "DRM_MTK_HDMI");
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("MediaTek HDMI Common Library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_common.h b/drivers/gpu/drm/mediatek/mtk_hdmi_common.h
new file mode 100644
index 000000000000..de5e064585f8
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_common.h
@@ -0,0 +1,198 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ */
+
+#ifndef _MTK_HDMI_COMMON_H
+#define _MTK_HDMI_COMMON_H
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mutex.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#include <sound/hdmi-codec.h>
+
+enum hdmi_aud_input_type {
+ HDMI_AUD_INPUT_I2S = 0,
+ HDMI_AUD_INPUT_SPDIF,
+};
+
+enum hdmi_aud_i2s_fmt {
+ HDMI_I2S_MODE_RJT_24BIT = 0,
+ HDMI_I2S_MODE_RJT_16BIT,
+ HDMI_I2S_MODE_LJT_24BIT,
+ HDMI_I2S_MODE_LJT_16BIT,
+ HDMI_I2S_MODE_I2S_24BIT,
+ HDMI_I2S_MODE_I2S_16BIT
+};
+
+enum hdmi_aud_mclk {
+ HDMI_AUD_MCLK_128FS,
+ HDMI_AUD_MCLK_192FS,
+ HDMI_AUD_MCLK_256FS,
+ HDMI_AUD_MCLK_384FS,
+ HDMI_AUD_MCLK_512FS,
+ HDMI_AUD_MCLK_768FS,
+ HDMI_AUD_MCLK_1152FS,
+};
+
+enum hdmi_aud_channel_type {
+ HDMI_AUD_CHAN_TYPE_1_0 = 0,
+ HDMI_AUD_CHAN_TYPE_1_1,
+ HDMI_AUD_CHAN_TYPE_2_0,
+ HDMI_AUD_CHAN_TYPE_2_1,
+ HDMI_AUD_CHAN_TYPE_3_0,
+ HDMI_AUD_CHAN_TYPE_3_1,
+ HDMI_AUD_CHAN_TYPE_4_0,
+ HDMI_AUD_CHAN_TYPE_4_1,
+ HDMI_AUD_CHAN_TYPE_5_0,
+ HDMI_AUD_CHAN_TYPE_5_1,
+ HDMI_AUD_CHAN_TYPE_6_0,
+ HDMI_AUD_CHAN_TYPE_6_1,
+ HDMI_AUD_CHAN_TYPE_7_0,
+ HDMI_AUD_CHAN_TYPE_7_1,
+ HDMI_AUD_CHAN_TYPE_3_0_LRS,
+ HDMI_AUD_CHAN_TYPE_3_1_LRS,
+ HDMI_AUD_CHAN_TYPE_4_0_CLRS,
+ HDMI_AUD_CHAN_TYPE_4_1_CLRS,
+ HDMI_AUD_CHAN_TYPE_6_1_CS,
+ HDMI_AUD_CHAN_TYPE_6_1_CH,
+ HDMI_AUD_CHAN_TYPE_6_1_OH,
+ HDMI_AUD_CHAN_TYPE_6_1_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_LH_RH,
+ HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_7_1_LC_RC,
+ HDMI_AUD_CHAN_TYPE_7_1_LW_RW,
+ HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD,
+ HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS,
+ HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_CH,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_OH,
+ HDMI_AUD_CHAN_TYPE_7_1_CS_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_CH_OH,
+ HDMI_AUD_CHAN_TYPE_7_1_CH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_OH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_6_0_CS,
+ HDMI_AUD_CHAN_TYPE_6_0_CH,
+ HDMI_AUD_CHAN_TYPE_6_0_OH,
+ HDMI_AUD_CHAN_TYPE_6_0_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_LH_RH,
+ HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_7_0_LC_RC,
+ HDMI_AUD_CHAN_TYPE_7_0_LW_RW,
+ HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD,
+ HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS,
+ HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_CH,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_OH,
+ HDMI_AUD_CHAN_TYPE_7_0_CS_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_CH_OH,
+ HDMI_AUD_CHAN_TYPE_7_0_CH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_OH_CHR,
+ HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR,
+ HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS,
+ HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF
+};
+
+enum hdmi_aud_channel_swap_type {
+ HDMI_AUD_SWAP_LR,
+ HDMI_AUD_SWAP_LFE_CC,
+ HDMI_AUD_SWAP_LSRS,
+ HDMI_AUD_SWAP_RLS_RRS,
+ HDMI_AUD_SWAP_LR_STATUS,
+};
+
+struct hdmi_audio_param {
+ enum hdmi_audio_coding_type aud_codec;
+ enum hdmi_audio_sample_size aud_sample_size;
+ enum hdmi_aud_input_type aud_input_type;
+ enum hdmi_aud_i2s_fmt aud_i2s_fmt;
+ enum hdmi_aud_mclk aud_mclk;
+ enum hdmi_aud_channel_type aud_input_chan_type;
+ struct hdmi_codec_params codec_params;
+};
+
+enum hdmi_hpd_state {
+ HDMI_PLUG_OUT = 0,
+ HDMI_PLUG_IN_AND_SINK_POWER_ON,
+ HDMI_PLUG_IN_ONLY,
+};
+
+struct mtk_hdmi_ver_conf {
+ const struct drm_bridge_funcs *bridge_funcs;
+ const struct hdmi_codec_ops *codec_ops;
+ const char * const *mtk_hdmi_clock_names;
+ int num_clocks;
+ bool interlace_allowed;
+};
+
+struct mtk_hdmi_conf {
+ const struct mtk_hdmi_ver_conf *ver_conf;
+ bool tz_disabled;
+ bool cea_modes_only;
+ unsigned long max_mode_clock;
+ u32 reg_hdmi_tx_cfg;
+};
+
+struct mtk_hdmi {
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_connector *curr_conn;/* current connector (only valid when 'enabled') */
+ struct device *dev;
+ const struct mtk_hdmi_conf *conf;
+ struct phy *phy;
+ struct device *cec_dev;
+ struct i2c_adapter *ddc_adpt;
+ struct clk **clk;
+ struct drm_display_mode mode;
+ bool dvi_mode;
+ struct regmap *sys_regmap;
+ unsigned int sys_offset;
+ struct regmap *regs;
+ struct platform_device *audio_pdev;
+ struct hdmi_audio_param aud_param;
+ bool audio_enable;
+ bool powered;
+ bool enabled;
+ unsigned int irq;
+ enum hdmi_hpd_state hpd;
+ hdmi_codec_plugged_cb plugged_cb;
+ struct device *codec_dev;
+ struct mutex update_plugged_status_lock;
+};
+
+static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
+{
+ return container_of(b, struct mtk_hdmi, bridge);
+}
+
+
+int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len);
+void mtk_hdmi_audio_set_plugged_cb(struct mtk_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev);
+int mtk_hdmi_audio_params(struct mtk_hdmi *hdmi, struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params);
+void mtk_hdmi_get_ncts(unsigned int sample_rate, unsigned int clock,
+ unsigned int *n, unsigned int *cts);
+bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+struct mtk_hdmi *mtk_hdmi_common_probe(struct platform_device *pdev);
+#endif /* _MTK_HDMI_COMMON_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c
new file mode 100644
index 000000000000..b844e2c10f28
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc_v2.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek HDMI v2 Display Data Channel Driver
+ *
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2021 BayLibre, SAS
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_edid.h>
+
+#include "mtk_hdmi_common.h"
+#include "mtk_hdmi_regs_v2.h"
+
+#define DDC2_DLY_CNT 572 /* BIM=208M/(v*4) = 90Khz */
+#define DDC2_DLY_CNT_EDID 832 /* BIM=208M/(v*4) = 62.5Khz */
+#define SI2C_ADDR_READ 0xf4
+#define SCDC_I2C_SLAVE_ADDRESS 0x54
+
+struct mtk_hdmi_ddc {
+ struct device *dev;
+ struct regmap *regs;
+ struct clk *clk;
+ struct i2c_adapter adap;
+};
+
+static int mtk_ddc_check_and_rise_low_bus(struct mtk_hdmi_ddc *ddc)
+{
+ u32 val;
+
+ regmap_read(ddc->regs, HDCP2X_DDCM_STATUS, &val);
+ if (val & DDC_I2C_BUS_LOW) {
+ regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD,
+ FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_CLOCK_SCL));
+ usleep_range(250, 300);
+ }
+
+ if (val & DDC_I2C_NO_ACK) {
+ u32 ddc_ctrl, hpd_ddc_ctrl, hpd_ddc_status;
+
+ regmap_read(ddc->regs, DDC_CTRL, &ddc_ctrl);
+ regmap_read(ddc->regs, HPD_DDC_CTRL, &hpd_ddc_ctrl);
+ regmap_read(ddc->regs, HPD_DDC_STATUS, &hpd_ddc_status);
+ }
+
+ if (val & DDC_I2C_NO_ACK)
+ return -EIO;
+
+ return 0;
+}
+
+static int mtk_ddc_wr_one(struct mtk_hdmi_ddc *ddc, u16 addr_id,
+ u16 offset_id, u8 *wr_data)
+{
+ u32 val;
+ int ret;
+
+ /* If down, rise bus for write operation */
+ mtk_ddc_check_and_rise_low_bus(ddc);
+
+ regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT,
+ FIELD_PREP(HPD_DDC_DELAY_CNT, DDC2_DLY_CNT));
+
+ if (wr_data) {
+ regmap_write(ddc->regs, SI2C_CTRL,
+ FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) |
+ FIELD_PREP(SI2C_WDATA, *wr_data) |
+ SI2C_WR);
+ }
+
+ regmap_write(ddc->regs, DDC_CTRL,
+ FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_WRITE) |
+ FIELD_PREP(DDC_CTRL_DIN_CNT, wr_data == NULL ? 0 : 1) |
+ FIELD_PREP(DDC_CTRL_OFFSET, offset_id) |
+ FIELD_PREP(DDC_CTRL_ADDR, addr_id));
+ usleep_range(1000, 1250);
+
+ ret = regmap_read_poll_timeout(ddc->regs, HPD_DDC_STATUS, val,
+ !(val & DDC_I2C_IN_PROG), 500, 1000);
+ if (ret) {
+ dev_err(ddc->dev, "DDC I2C write timeout\n");
+ return ret;
+ }
+
+ /* The I2C bus might be down after WR operation: rise it again */
+ ret = mtk_ddc_check_and_rise_low_bus(ddc);
+ if (ret) {
+ dev_err(ddc->dev, "Error during write operation: No ACK\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mtk_ddcm_read_hdmi(struct mtk_hdmi_ddc *ddc, u16 uc_dev,
+ u8 addr, u8 *puc_value, u16 data_cnt)
+{
+ u16 dly_cnt, i, uc_idx;
+ u32 rem, temp_length, uc_read_count, val;
+ u64 loop_counter;
+ int ret;
+
+ mtk_ddc_check_and_rise_low_bus(ddc);
+
+ regmap_update_bits(ddc->regs, DDC_CTRL, DDC_CTRL_CMD,
+ FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_CLEAR_FIFO));
+
+ if (data_cnt >= 16) {
+ temp_length = 16;
+ loop_counter = data_cnt;
+
+ rem = do_div(loop_counter, temp_length);
+ if (rem)
+ loop_counter++;
+ } else {
+ temp_length = data_cnt;
+ loop_counter = 1;
+ }
+
+ if (uc_dev >= DDC_ADDR)
+ dly_cnt = DDC2_DLY_CNT_EDID;
+ else
+ dly_cnt = DDC2_DLY_CNT;
+
+ regmap_update_bits(ddc->regs, HPD_DDC_CTRL, HPD_DDC_DELAY_CNT,
+ FIELD_PREP(HPD_DDC_DELAY_CNT, dly_cnt));
+
+ for (i = 0; i < loop_counter; i++) {
+ rem = data_cnt % 16;
+
+ if (i > 0 && i == (loop_counter - 1) && rem)
+ temp_length = rem;
+
+ /* 0x51 - 0x53: Flow control */
+ if (uc_dev > DDC_ADDR && uc_dev <= 0x53) {
+ regmap_update_bits(ddc->regs, SCDC_CTRL, SCDC_DDC_SEGMENT,
+ FIELD_PREP(SCDC_DDC_SEGMENT, uc_dev - DDC_ADDR));
+
+ regmap_write(ddc->regs, DDC_CTRL,
+ FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_ENH_READ_NOACK) |
+ FIELD_PREP(DDC_CTRL_DIN_CNT, temp_length) |
+ FIELD_PREP(DDC_CTRL_OFFSET, addr + i * temp_length) |
+ FIELD_PREP(DDC_CTRL_ADDR, DDC_ADDR));
+ } else {
+ u16 offset;
+
+ if (addr != 0x43)
+ offset = i * 16;
+ else
+ offset = 0;
+
+ regmap_write(ddc->regs, DDC_CTRL,
+ FIELD_PREP(DDC_CTRL_CMD, DDC_CMD_SEQ_READ_NOACK) |
+ FIELD_PREP(DDC_CTRL_DIN_CNT, temp_length) |
+ FIELD_PREP(DDC_CTRL_OFFSET, addr + offset) |
+ FIELD_PREP(DDC_CTRL_ADDR, uc_dev));
+ }
+ usleep_range(5000, 5500);
+
+ ret = regmap_read_poll_timeout(ddc->regs, HPD_DDC_STATUS, val,
+ !(val & DDC_I2C_IN_PROG), 1000,
+ 500 * (temp_length + 5));
+ if (ret) {
+ dev_err(ddc->dev, "Timeout waiting for DDC I2C\n");
+ return ret;
+ }
+
+ ret = mtk_ddc_check_and_rise_low_bus(ddc);
+ if (ret) {
+ dev_err(ddc->dev, "Error during read operation: No ACK\n");
+ return ret;
+ }
+
+ for (uc_idx = 0; uc_idx < temp_length; uc_idx++) {
+ unsigned int read_idx = i * 16 + uc_idx;
+
+ regmap_write(ddc->regs, SI2C_CTRL,
+ FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) |
+ SI2C_RD);
+
+ regmap_read(ddc->regs, HPD_DDC_STATUS, &val);
+ puc_value[read_idx] = FIELD_GET(DDC_DATA_OUT, val);
+
+ regmap_write(ddc->regs, SI2C_CTRL,
+ FIELD_PREP(SI2C_ADDR, SI2C_ADDR_READ) |
+ SI2C_CONFIRM_READ);
+
+ /*
+ * If HDMI IP gets reset during EDID read, DDC read
+ * operation will fail and its delay counter will be
+ * reset to 400.
+ */
+ regmap_read(ddc->regs, HPD_DDC_CTRL, &val);
+ if (FIELD_GET(HPD_DDC_DELAY_CNT, val) < DDC2_DLY_CNT)
+ return 0;
+
+ uc_read_count = read_idx + 1;
+ }
+ }
+ if (uc_read_count > U8_MAX)
+ dev_warn(ddc->dev, "Invalid read data count %u\n", uc_read_count);
+
+ return uc_read_count;
+}
+
+static int mtk_hdmi_fg_ddc_data_read(struct mtk_hdmi_ddc *ddc, u16 b_dev,
+ u8 data_addr, u16 data_cnt, u8 *pr_data)
+{
+ int read_data_cnt;
+ u16 req_data_cnt;
+
+ if (!data_cnt) {
+ dev_err(ddc->dev, "Invalid DDCM read request\n");
+ return -EINVAL;
+ }
+
+ req_data_cnt = U8_MAX - data_addr + 1;
+ if (req_data_cnt > data_cnt)
+ req_data_cnt = data_cnt;
+
+ regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN);
+
+ read_data_cnt = mtk_ddcm_read_hdmi(ddc, b_dev, data_addr, pr_data, req_data_cnt);
+
+ if (read_data_cnt < 0)
+ return read_data_cnt;
+ else if (read_data_cnt != req_data_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mtk_hdmi_ddc_fg_data_write(struct mtk_hdmi_ddc *ddc, u16 b_dev,
+ u8 data_addr, u16 data_cnt, u8 *pr_data)
+{
+ int i, ret;
+
+ regmap_set_bits(ddc->regs, HDCP2X_POL_CTRL, HDCP2X_DIS_POLL_EN);
+ /*
+ * In case there is no payload data, just do a single write for the
+ * address only
+ */
+ if (data_cnt == 0)
+ return mtk_ddc_wr_one(ddc, b_dev, data_addr, NULL);
+
+ i = 0;
+ do {
+ ret = mtk_ddc_wr_one(ddc, b_dev, data_addr + i, pr_data + i);
+ if (ret)
+ return ret;
+ } while (++i < data_cnt);
+
+ return 0;
+}
+
+static int mtk_hdmi_ddc_v2_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+{
+ struct mtk_hdmi_ddc *ddc;
+ u8 offset = 0;
+ int i, ret;
+
+ ddc = adapter->algo_data;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *msg = &msgs[i];
+
+ if (!msg->buf) {
+ dev_err(ddc->dev, "No message buffer\n");
+ return -EINVAL;
+ }
+
+ if (msg->flags & I2C_M_RD) {
+ /*
+ * The underlying DDC hardware always issues a write request
+ * that assigns the read offset as part of the read operation,
+ * therefore, use the `offset` value assigned in the previous
+ * write request from drm_edid
+ */
+ ret = mtk_hdmi_fg_ddc_data_read(ddc, msg->addr, offset,
+ msg->len, &msg->buf[0]);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * The HW needs the data offset, found in buf[0], in the
+ * DDC_CTRL register, and each byte of data, starting at
+ * buf[1], goes in the SI2C_WDATA register.
+ */
+ ret = mtk_hdmi_ddc_fg_data_write(ddc, msg->addr, msg->buf[0],
+ msg->len - 1, &msg->buf[1]);
+ if (ret)
+ return ret;
+
+ /*
+ * Store the offset value requested by drm_edid or by
+ * scdc to use in subsequent read requests.
+ */
+ if ((msg->addr == DDC_ADDR || msg->addr == SCDC_I2C_SLAVE_ADDRESS) &&
+ msg->len == 1) {
+ offset = msg->buf[0];
+ }
+ }
+ }
+
+ return i;
+}
+
+static u32 mtk_hdmi_ddc_v2_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm mtk_hdmi_ddc_v2_algorithm = {
+ .master_xfer = mtk_hdmi_ddc_v2_xfer,
+ .functionality = mtk_hdmi_ddc_v2_func,
+};
+
+static int mtk_hdmi_ddc_v2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_hdmi_ddc *ddc;
+ int ret;
+
+ ddc = devm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL);
+ if (!ddc)
+ return -ENOMEM;
+
+ ddc->dev = dev;
+ ddc->regs = device_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR_OR_NULL(ddc->regs))
+ return dev_err_probe(dev,
+ IS_ERR(ddc->regs) ? PTR_ERR(ddc->regs) : -EINVAL,
+ "Cannot get regmap\n");
+
+ ddc->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(ddc->clk))
+ return dev_err_probe(dev, PTR_ERR(ddc->clk), "Cannot get DDC clock\n");
+
+ strscpy(ddc->adap.name, "mediatek-hdmi-ddc-v2", sizeof(ddc->adap.name));
+ ddc->adap.owner = THIS_MODULE;
+ ddc->adap.algo = &mtk_hdmi_ddc_v2_algorithm;
+ ddc->adap.retries = 3;
+ ddc->adap.dev.of_node = dev->of_node;
+ ddc->adap.algo_data = ddc;
+ ddc->adap.dev.parent = &pdev->dev;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot enable Runtime PM\n");
+
+ pm_runtime_get_sync(dev);
+
+ ret = devm_i2c_add_adapter(dev, &ddc->adap);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Cannot add DDC I2C adapter\n");
+
+ platform_set_drvdata(pdev, ddc);
+ return 0;
+}
+
+static const struct of_device_id mtk_hdmi_ddc_v2_match[] = {
+ { .compatible = "mediatek,mt8195-hdmi-ddc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_hdmi_ddc_v2_match);
+
+struct platform_driver mtk_hdmi_ddc_v2_driver = {
+ .probe = mtk_hdmi_ddc_v2_probe,
+ .driver = {
+ .name = "mediatek-hdmi-ddc-v2",
+ .of_match_table = mtk_hdmi_ddc_v2_match,
+ },
+};
+module_platform_driver(mtk_hdmi_ddc_v2_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_AUTHOR("Can Zeng <can.zeng@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMIv2 DDC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h
new file mode 100644
index 000000000000..521b35c7e14d
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs_v2.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Copyright (c) 2021 BayLibre, SAS
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#ifndef _MTK_HDMI_REGS_H
+#define _MTK_HDMI_REGS_H
+
+/* HDMI_TOP Config */
+#define TOP_CFG00 0x000
+#define HDMI2_ON BIT(2)
+#define HDMI_MODE_HDMI BIT(3)
+#define SCR_ON BIT(4)
+#define TMDS_PACK_MODE GENMASK(9, 8)
+#define TMDS_PACK_MODE_8BPP 0
+#define TMDS_PACK_MODE_10BPP 1
+#define TMDS_PACK_MODE_12BPP 2
+#define TMDS_PACK_MODE_16BPP 3
+#define DEEPCOLOR_PKT_EN BIT(12)
+#define HDMI_ABIST_VIDEO_FORMAT GENMASK(21, 16)
+#define HDMI_ABIST_ENABLE BIT(31)
+#define TOP_CFG01 0x004
+#define CP_SET_MUTE_EN BIT(0)
+#define CP_CLR_MUTE_EN BIT(1)
+#define NULL_PKT_EN BIT(2)
+#define NULL_PKT_VSYNC_HIGH_EN BIT(3)
+
+/* HDMI_TOP Audio: Channel Mapping */
+#define TOP_AUD_MAP 0x00c
+#define SD0_MAP GENMASK(2, 0)
+#define SD1_MAP GENMASK(6, 4)
+#define SD2_MAP GENMASK(10, 8)
+#define SD3_MAP GENMASK(14, 12)
+#define SD4_MAP GENMASK(18, 16)
+#define SD5_MAP GENMASK(22, 20)
+#define SD6_MAP GENMASK(26, 24)
+#define SD7_MAP GENMASK(30, 28)
+
+/* Auxiliary Video Information (AVI) Infoframe */
+#define TOP_AVI_HEADER 0x024
+#define TOP_AVI_PKT00 0x028
+#define TOP_AVI_PKT01 0x02C
+#define TOP_AVI_PKT02 0x030
+#define TOP_AVI_PKT03 0x034
+#define TOP_AVI_PKT04 0x038
+#define TOP_AVI_PKT05 0x03C
+
+/* Audio Interface Infoframe */
+#define TOP_AIF_HEADER 0x040
+#define TOP_AIF_PKT00 0x044
+#define TOP_AIF_PKT01 0x048
+#define TOP_AIF_PKT02 0x04c
+#define TOP_AIF_PKT03 0x050
+
+/* Audio SPDIF Infoframe */
+#define TOP_SPDIF_HEADER 0x054
+#define TOP_SPDIF_PKT00 0x058
+#define TOP_SPDIF_PKT01 0x05c
+#define TOP_SPDIF_PKT02 0x060
+#define TOP_SPDIF_PKT03 0x064
+#define TOP_SPDIF_PKT04 0x068
+#define TOP_SPDIF_PKT05 0x06c
+#define TOP_SPDIF_PKT06 0x070
+#define TOP_SPDIF_PKT07 0x074
+
+/* Infoframes Configuration */
+#define TOP_INFO_EN 0x01c
+#define AVI_EN BIT(0)
+#define SPD_EN BIT(1)
+#define AUD_EN BIT(2)
+#define CP_EN BIT(5)
+#define VSIF_EN BIT(11)
+#define AVI_EN_WR BIT(16)
+#define SPD_EN_WR BIT(17)
+#define AUD_EN_WR BIT(18)
+#define CP_EN_WR BIT(21)
+#define VSIF_EN_WR BIT(27)
+#define TOP_INFO_RPT 0x020
+#define AVI_RPT_EN BIT(0)
+#define SPD_RPT_EN BIT(1)
+#define AUD_RPT_EN BIT(2)
+#define CP_RPT_EN BIT(5)
+#define VSIF_RPT_EN BIT(11)
+
+/* Vendor Specific Infoframe */
+#define TOP_VSIF_HEADER 0x174
+#define TOP_VSIF_PKT00 0x178
+#define TOP_VSIF_PKT01 0x17c
+#define TOP_VSIF_PKT02 0x180
+#define TOP_VSIF_PKT03 0x184
+#define TOP_VSIF_PKT04 0x188
+#define TOP_VSIF_PKT05 0x18c
+#define TOP_VSIF_PKT06 0x190
+#define TOP_VSIF_PKT07 0x194
+
+/* HDMI_TOP Misc */
+#define TOP_MISC_CTLR 0x1a4
+#define DEEP_COLOR_ADD BIT(4)
+
+/* Hardware interrupts */
+#define TOP_INT_STA00 0x1a8
+#define TOP_INT_ENABLE00 0x1b0
+#define HTPLG_R_INT BIT(0)
+#define HTPLG_F_INT BIT(1)
+#define PORD_R_INT BIT(2)
+#define PORD_F_INT BIT(3)
+#define HDMI_VSYNC_INT BIT(4)
+#define HDMI_AUDIO_INT BIT(5)
+#define HDCP2X_RX_REAUTH_REQ_DDCM_INT BIT(25)
+#define TOP_INT_ENABLE01 0x1b4
+#define TOP_INT_CLR00 0x1b8
+#define TOP_INT_CLR01 0x1bc
+
+
+/* Video Mute */
+#define TOP_VMUTE_CFG1 0x1c8
+#define REG_VMUTE_EN BIT(16)
+
+/* HDMI Audio IP */
+#define AIP_CTRL 0x400
+#define CTS_SW_SEL BIT(0)
+#define CTS_REQ_EN BIT(1)
+#define MCLK_EN BIT(2)
+#define NO_MCLK_CTSGEN_SEL BIT(3)
+#define AUD_IN_EN BIT(8)
+#define AUD_SEL_OWRT BIT(9)
+#define SPDIF_EN BIT(13)
+#define HBRA_ON BIT(14)
+#define DSD_EN BIT(15)
+#define I2S_EN GENMASK(19, 16)
+#define HBR_FROM_SPDIF BIT(20)
+#define CTS_CAL_N4 BIT(23)
+#define SPDIF_INTERNAL_MODULE BIT(24)
+#define AIP_N_VAL 0x404
+#define AIP_CTS_SVAL 0x408
+#define AIP_SPDIF_CTRL 0x40c
+#define WR_1UI_LOCK BIT(0)
+#define FS_OVERRIDE_WRITE BIT(1)
+#define WR_2UI_LOCK BIT(2)
+#define MAX_1UI_WRITE GENMASK(15, 8)
+#define MAX_2UI_SPDIF_WRITE GENMASK(23, 16)
+#define MAX_2UI_I2S_HI_WRITE GENMASK(23, 20)
+#define MAX_2UI_I2S_LFE_CC_SWAP BIT(1)
+#define MAX_2UI_I2S_LO_WRITE GENMASK(19, 16)
+#define AUD_ERR_THRESH GENMASK(29, 24)
+#define I2S2DSD_EN BIT(30)
+#define AIP_I2S_CTRL 0x410
+#define FIFO0_MAP GENMASK(1, 0)
+#define FIFO1_MAP GENMASK(3, 2)
+#define FIFO2_MAP GENMASK(5, 4)
+#define FIFO3_MAP GENMASK(7, 6)
+#define I2S_1ST_BIT_NOSHIFT BIT(8)
+#define I2S_DATA_DIR_LSB BIT(9)
+#define JUSTIFY_RIGHT BIT(10)
+#define WS_HIGH BIT(11)
+#define VBIT_COMPRESSED BIT(12)
+#define CBIT_ORDER_SAME BIT(13)
+#define SCK_EDGE_RISE BIT(14)
+#define AIP_I2S_CHST0 0x414
+#define AIP_I2S_CHST1 0x418
+#define AIP_TXCTRL 0x424
+#define RST4AUDIO BIT(0)
+#define RST4AUDIO_FIFO BIT(1)
+#define RST4AUDIO_ACR BIT(2)
+#define AUD_LAYOUT_1 BIT(4)
+#define AUD_MUTE_FIFO_EN BIT(5)
+#define AUD_PACKET_DROP BIT(6)
+#define DSD_MUTE_EN BIT(7)
+#define AIP_TPI_CTRL 0x428
+#define TPI_AUDIO_LOOKUP_EN BIT(2)
+
+/* Video downsampling configuration */
+#define VID_DOWNSAMPLE_CONFIG 0x8d0
+#define C444_C422_CONFIG_ENABLE BIT(0)
+#define C422_C420_CONFIG_ENABLE BIT(4)
+#define C422_C420_CONFIG_BYPASS BIT(5)
+#define C422_C420_CONFIG_OUT_CB_OR_CR BIT(6)
+#define VID_OUT_FORMAT 0x8fc
+#define OUTPUT_FORMAT_DEMUX_420_ENABLE BIT(10)
+
+/* HDCP registers */
+#define HDCP_TOP_CTRL 0xc00
+#define HDCP2X_CTRL_0 0xc20
+#define HDCP2X_EN BIT(0)
+#define HDCP2X_ENCRYPT_EN BIT(7)
+#define HDCP2X_HPD_OVR BIT(10)
+#define HDCP2X_HPD_SW BIT(11)
+#define HDCP2X_POL_CTRL 0xc54
+#define HDCP2X_DIS_POLL_EN BIT(16)
+#define HDCP1X_CTRL 0xcd0
+#define HDCP1X_ENC_EN BIT(6)
+
+/* HDMI DDC registers */
+#define HPD_DDC_CTRL 0xc08
+#define HPD_DDC_DELAY_CNT GENMASK(31, 16)
+#define HPD_DDC_HPD_DBNC_EN BIT(2)
+#define HPD_DDC_PORD_DBNC_EN BIT(3)
+#define DDC_CTRL 0xc10
+#define DDC_CTRL_ADDR GENMASK(7, 1)
+#define DDC_CTRL_OFFSET GENMASK(15, 8)
+#define DDC_CTRL_DIN_CNT GENMASK(25, 16)
+#define DDC_CTRL_CMD GENMASK(31, 28)
+#define SCDC_CTRL 0xc18
+#define SCDC_DDC_SEGMENT GENMASK(15, 8)
+#define HPD_DDC_STATUS 0xc60
+#define HPD_STATE GENMASK(1, 0)
+#define HPD_STATE_CONNECTED 2
+#define HPD_PIN_STA BIT(4)
+#define PORD_PIN_STA BIT(5)
+#define DDC_I2C_IN_PROG BIT(13)
+#define DDC_DATA_OUT GENMASK(23, 16)
+#define SI2C_CTRL 0xcac
+#define SI2C_WR BIT(0)
+#define SI2C_RD BIT(1)
+#define SI2C_CONFIRM_READ BIT(2)
+#define SI2C_WDATA GENMASK(15, 8)
+#define SI2C_ADDR GENMASK(23, 16)
+
+/* HDCP DDC registers */
+#define HDCP2X_DDCM_STATUS 0xc68
+#define DDC_I2C_NO_ACK BIT(10)
+#define DDC_I2C_BUS_LOW BIT(11)
+
+/* HDMI TX registers */
+#define HDMITX_CONFIG_MT8188 0xea0
+#define HDMITX_CONFIG_MT8195 0x900
+#define HDMI_YUV420_MODE BIT(10)
+#define HDMITX_SW_HPD BIT(29)
+#define HDMITX_SW_RSTB BIT(31)
+
+/**
+ * enum mtk_hdmi_ddc_v2_cmds - DDC_CMD register commands
+ * @DDC_CMD_READ_NOACK: Current address read with no ACK on last byte
+ * @DDC_CMD_READ: Current address read with ACK on last byte
+ * @DDC_CMD_SEQ_READ_NOACK: Sequential read with no ACK on last byte
+ * @DDC_CMD_SEQ_READ: Sequential read with ACK on last byte
+ * @DDC_CMD_ENH_READ_NOACK: Enhanced read with no ACK on last byte
+ * @DDC_CMD_ENH_READ: Enhanced read with ACK on last byte
+ * @DDC_CMD_SEQ_WRITE_NOACK: Sequential write ignoring ACK on last byte
+ * @DDC_CMD_SEQ_WRITE: Sequential write requiring ACK on last byte
+ * @DDC_CMD_RSVD: Reserved for future use
+ * @DDC_CMD_CLEAR_FIFO: Clear DDC I2C FIFO
+ * @DDC_CMD_CLOCK_SCL: Start clocking DDC I2C SCL
+ * @DDC_CMD_ABORT_XFER: Abort DDC I2C transaction
+ */
+enum mtk_hdmi_ddc_v2_cmds {
+ DDC_CMD_READ_NOACK = 0x0,
+ DDC_CMD_READ,
+ DDC_CMD_SEQ_READ_NOACK,
+ DDC_CMD_SEQ_READ,
+ DDC_CMD_ENH_READ_NOACK,
+ DDC_CMD_ENH_READ,
+ DDC_CMD_SEQ_WRITE_NOACK,
+ DDC_CMD_SEQ_WRITE = 0x07,
+ DDC_CMD_CLEAR_FIFO = 0x09,
+ DDC_CMD_CLOCK_SCL = 0x0a,
+ DDC_CMD_ABORT_XFER = 0x0f
+};
+
+#endif /* _MTK_HDMI_REGS_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c b/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
new file mode 100644
index 000000000000..c272e1e74b7d
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_v2.c
@@ -0,0 +1,1521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MediaTek HDMI v2 IP driver
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Copyright (c) 2022 BayLibre, SAS
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/suspend.h>
+#include <linux/units.h>
+#include <linux/phy/phy.h>
+
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
+#include <drm/display/drm_scdc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include "mtk_hdmi_common.h"
+#include "mtk_hdmi_regs_v2.h"
+
+#define MTK_HDMI_V2_CLOCK_MIN 27000
+#define MTK_HDMI_V2_CLOCK_MAX 594000
+
+#define HPD_PORD_HWIRQS (HTPLG_R_INT | HTPLG_F_INT | PORD_F_INT | PORD_R_INT)
+
+enum mtk_hdmi_v2_clk_id {
+ MTK_HDMI_V2_CLK_HDCP_SEL,
+ MTK_HDMI_V2_CLK_HDCP_24M_SEL,
+ MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI,
+ MTK_HDMI_V2_CLK_HDMI_APB_SEL,
+ MTK_HDMI_V2_CLK_COUNT,
+};
+
+const char *const mtk_hdmi_v2_clk_names[MTK_HDMI_V2_CLK_COUNT] = {
+ [MTK_HDMI_V2_CLK_HDMI_APB_SEL] = "bus",
+ [MTK_HDMI_V2_CLK_HDCP_SEL] = "hdcp",
+ [MTK_HDMI_V2_CLK_HDCP_24M_SEL] = "hdcp24m",
+ [MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI] = "hdmi-split",
+};
+
+static inline void mtk_hdmi_v2_hwirq_disable(struct mtk_hdmi *hdmi)
+{
+ regmap_write(hdmi->regs, TOP_INT_ENABLE00, 0);
+ regmap_write(hdmi->regs, TOP_INT_ENABLE01, 0);
+}
+
+static inline void mtk_hdmi_v2_enable_hpd_pord_irq(struct mtk_hdmi *hdmi, bool enable)
+{
+ if (enable)
+ regmap_set_bits(hdmi->regs, TOP_INT_ENABLE00, HPD_PORD_HWIRQS);
+ else
+ regmap_clear_bits(hdmi->regs, TOP_INT_ENABLE00, HPD_PORD_HWIRQS);
+}
+
+static inline void mtk_hdmi_v2_set_sw_hpd(struct mtk_hdmi *hdmi, bool enable)
+{
+ if (enable) {
+ regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_HPD);
+ regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR);
+ regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW);
+ } else {
+ regmap_clear_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR);
+ regmap_clear_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW);
+ regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_HPD);
+ }
+}
+
+static inline void mtk_hdmi_v2_enable_scrambling(struct mtk_hdmi *hdmi, bool enable)
+{
+ struct drm_scdc *scdc = &hdmi->curr_conn->display_info.hdmi.scdc;
+
+ if (enable)
+ regmap_set_bits(hdmi->regs, TOP_CFG00, SCR_ON | HDMI2_ON);
+ else
+ regmap_clear_bits(hdmi->regs, TOP_CFG00, SCR_ON | HDMI2_ON);
+
+ if (scdc->supported) {
+ if (scdc->scrambling.supported)
+ drm_scdc_set_scrambling(hdmi->curr_conn, enable);
+ drm_scdc_set_high_tmds_clock_ratio(hdmi->curr_conn, enable);
+ }
+}
+
+static void mtk_hdmi_v2_hw_vid_mute(struct mtk_hdmi *hdmi, bool enable)
+{
+ /* If enabled, sends a black image */
+ if (enable)
+ regmap_set_bits(hdmi->regs, TOP_VMUTE_CFG1, REG_VMUTE_EN);
+ else
+ regmap_clear_bits(hdmi->regs, TOP_VMUTE_CFG1, REG_VMUTE_EN);
+}
+
+static void mtk_hdmi_v2_hw_aud_mute(struct mtk_hdmi *hdmi, bool enable)
+{
+ u32 aip, val;
+
+ if (!enable) {
+ regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_MUTE_FIFO_EN);
+ return;
+ }
+
+ regmap_read(hdmi->regs, AIP_CTRL, &aip);
+
+ val = AUD_MUTE_FIFO_EN;
+ if (aip & DSD_EN)
+ val |= DSD_MUTE_EN;
+
+ regmap_update_bits(hdmi->regs, AIP_TXCTRL, val, val);
+}
+
+static void mtk_hdmi_v2_hw_reset(struct mtk_hdmi *hdmi)
+{
+ regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_RSTB);
+ udelay(5);
+ regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMITX_SW_RSTB);
+}
+
+static inline u32 mtk_hdmi_v2_format_hw_packet(const u8 *buffer, u8 len)
+{
+ unsigned short i;
+ u32 val = 0;
+
+ for (i = 0; i < len; i++)
+ val |= buffer[i] << (i * 8);
+
+ return val;
+}
+
+static void mtk_hdmi_v2_hw_write_audio_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer)
+{
+ regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AUD_EN | AUD_EN_WR);
+ regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN);
+
+ regmap_write(hdmi->regs, TOP_AIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3));
+ regmap_write(hdmi->regs, TOP_AIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 3));
+ regmap_write(hdmi->regs, TOP_AIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 2));
+ regmap_write(hdmi->regs, TOP_AIF_PKT02, 0);
+ regmap_write(hdmi->regs, TOP_AIF_PKT03, 0);
+
+ regmap_set_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN);
+ regmap_set_bits(hdmi->regs, TOP_INFO_EN, AUD_EN | AUD_EN_WR);
+}
+
+static void mtk_hdmi_v2_hw_write_avi_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer)
+{
+ regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN);
+ regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN);
+
+ regmap_write(hdmi->regs, TOP_AVI_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3));
+ regmap_write(hdmi->regs, TOP_AVI_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4));
+ regmap_write(hdmi->regs, TOP_AVI_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 3));
+ regmap_write(hdmi->regs, TOP_AVI_PKT02, mtk_hdmi_v2_format_hw_packet(&buffer[10], 4));
+ regmap_write(hdmi->regs, TOP_AVI_PKT03, mtk_hdmi_v2_format_hw_packet(&buffer[14], 3));
+ regmap_write(hdmi->regs, TOP_AVI_PKT04, 0);
+ regmap_write(hdmi->regs, TOP_AVI_PKT05, 0);
+
+ regmap_set_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN);
+ regmap_set_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN);
+}
+
+static void mtk_hdmi_v2_hw_write_spd_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer)
+{
+ regmap_clear_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN);
+ regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN);
+
+ regmap_write(hdmi->regs, TOP_SPDIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3));
+ regmap_write(hdmi->regs, TOP_SPDIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4));
+ regmap_write(hdmi->regs, TOP_SPDIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 3));
+ regmap_write(hdmi->regs, TOP_SPDIF_PKT02, mtk_hdmi_v2_format_hw_packet(&buffer[10], 4));
+ regmap_write(hdmi->regs, TOP_SPDIF_PKT03, mtk_hdmi_v2_format_hw_packet(&buffer[14], 3));
+ regmap_write(hdmi->regs, TOP_SPDIF_PKT04, mtk_hdmi_v2_format_hw_packet(&buffer[17], 4));
+ regmap_write(hdmi->regs, TOP_SPDIF_PKT05, mtk_hdmi_v2_format_hw_packet(&buffer[21], 3));
+ regmap_write(hdmi->regs, TOP_SPDIF_PKT06, mtk_hdmi_v2_format_hw_packet(&buffer[24], 4));
+ regmap_write(hdmi->regs, TOP_SPDIF_PKT07, buffer[28]);
+
+ regmap_set_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN);
+ regmap_set_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN);
+}
+
+static void mtk_hdmi_v2_hw_write_vendor_infoframe(struct mtk_hdmi *hdmi, const u8 *buffer)
+{
+ regmap_clear_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN);
+ regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN);
+
+ regmap_write(hdmi->regs, TOP_VSIF_HEADER, mtk_hdmi_v2_format_hw_packet(&buffer[0], 3));
+ regmap_write(hdmi->regs, TOP_VSIF_PKT00, mtk_hdmi_v2_format_hw_packet(&buffer[3], 4));
+ regmap_write(hdmi->regs, TOP_VSIF_PKT01, mtk_hdmi_v2_format_hw_packet(&buffer[7], 2));
+ regmap_write(hdmi->regs, TOP_VSIF_PKT02, 0);
+ regmap_write(hdmi->regs, TOP_VSIF_PKT03, 0);
+ regmap_write(hdmi->regs, TOP_VSIF_PKT04, 0);
+ regmap_write(hdmi->regs, TOP_VSIF_PKT05, 0);
+ regmap_write(hdmi->regs, TOP_VSIF_PKT06, 0);
+ regmap_write(hdmi->regs, TOP_VSIF_PKT07, 0);
+
+ regmap_set_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN);
+ regmap_set_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN);
+}
+
+static void mtk_hdmi_yuv420_downsampling(struct mtk_hdmi *hdmi, bool enable)
+{
+ u32 val;
+
+ regmap_read(hdmi->regs, VID_DOWNSAMPLE_CONFIG, &val);
+
+ if (enable) {
+ regmap_set_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMI_YUV420_MODE);
+
+ val |= C444_C422_CONFIG_ENABLE | C422_C420_CONFIG_ENABLE;
+ val |= C422_C420_CONFIG_OUT_CB_OR_CR;
+ val &= ~C422_C420_CONFIG_BYPASS;
+ regmap_write(hdmi->regs, VID_DOWNSAMPLE_CONFIG, val);
+
+ regmap_set_bits(hdmi->regs, VID_OUT_FORMAT, OUTPUT_FORMAT_DEMUX_420_ENABLE);
+ } else {
+ regmap_clear_bits(hdmi->regs, hdmi->conf->reg_hdmi_tx_cfg, HDMI_YUV420_MODE);
+
+ val &= ~(C444_C422_CONFIG_ENABLE | C422_C420_CONFIG_ENABLE);
+ val &= ~C422_C420_CONFIG_OUT_CB_OR_CR;
+ val |= C422_C420_CONFIG_BYPASS;
+ regmap_write(hdmi->regs, VID_DOWNSAMPLE_CONFIG, val);
+
+ regmap_clear_bits(hdmi->regs, VID_OUT_FORMAT, OUTPUT_FORMAT_DEMUX_420_ENABLE);
+ }
+}
+
+static int mtk_hdmi_v2_setup_audio_infoframe(struct mtk_hdmi *hdmi)
+{
+ struct hdmi_codec_params *params = &hdmi->aud_param.codec_params;
+ struct hdmi_audio_infoframe frame;
+ u8 buffer[14];
+ ssize_t ret;
+
+ memcpy(&frame, &params->cea, sizeof(frame));
+
+ ret = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (ret < 0)
+ return ret;
+
+ mtk_hdmi_v2_hw_write_audio_infoframe(hdmi, buffer);
+
+ return 0;
+}
+
+static inline void mtk_hdmi_v2_hw_gcp_avmute(struct mtk_hdmi *hdmi, bool mute)
+{
+ u32 val;
+
+ regmap_read(hdmi->regs, TOP_CFG01, &val);
+ val &= ~(CP_CLR_MUTE_EN | CP_SET_MUTE_EN);
+
+ if (mute) {
+ val |= CP_SET_MUTE_EN;
+ val &= ~CP_CLR_MUTE_EN;
+ } else {
+ val |= CP_CLR_MUTE_EN;
+ val &= ~CP_SET_MUTE_EN;
+ }
+ regmap_write(hdmi->regs, TOP_CFG01, val);
+
+ regmap_set_bits(hdmi->regs, TOP_INFO_RPT, CP_RPT_EN);
+ regmap_set_bits(hdmi->regs, TOP_INFO_EN, CP_EN | CP_EN_WR);
+}
+
+static void mtk_hdmi_v2_hw_ncts_enable(struct mtk_hdmi *hdmi, bool enable)
+{
+ if (enable)
+ regmap_set_bits(hdmi->regs, AIP_CTRL, CTS_SW_SEL);
+ else
+ regmap_clear_bits(hdmi->regs, AIP_CTRL, CTS_SW_SEL);
+}
+
+static void mtk_hdmi_v2_hw_aud_set_channel_status(struct mtk_hdmi *hdmi)
+{
+ u8 *ch_status = hdmi->aud_param.codec_params.iec.status;
+
+ /* Only the first 5 to 7 bytes of Channel Status contain useful information */
+ regmap_write(hdmi->regs, AIP_I2S_CHST0, mtk_hdmi_v2_format_hw_packet(&ch_status[0], 4));
+ regmap_write(hdmi->regs, AIP_I2S_CHST1, mtk_hdmi_v2_format_hw_packet(&ch_status[4], 3));
+}
+
+static void mtk_hdmi_v2_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
+ unsigned int sample_rate,
+ unsigned int clock)
+{
+ unsigned int n, cts;
+
+ mtk_hdmi_get_ncts(sample_rate, clock, &n, &cts);
+
+ regmap_write(hdmi->regs, AIP_N_VAL, n);
+ regmap_write(hdmi->regs, AIP_CTS_SVAL, cts);
+}
+
+static void mtk_hdmi_v2_hw_aud_enable(struct mtk_hdmi *hdmi, bool enable)
+{
+ if (enable)
+ regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_PACKET_DROP);
+ else
+ regmap_set_bits(hdmi->regs, AIP_TXCTRL, AUD_PACKET_DROP);
+}
+
+static u32 mtk_hdmi_v2_aud_output_channel_map(u8 sd0, u8 sd1, u8 sd2, u8 sd3,
+ u8 sd4, u8 sd5, u8 sd6, u8 sd7)
+{
+ u32 val;
+
+ /*
+ * Each of the Output Channels (0-7) can be mapped to get their input
+ * from any of the available Input Channels (0-7): this function
+ * takes input channel numbers and formats a value that must then
+ * be written to the TOP_AUD_MAP hardware register by the caller.
+ */
+ val = FIELD_PREP(SD0_MAP, sd0) | FIELD_PREP(SD1_MAP, sd1);
+ val |= FIELD_PREP(SD2_MAP, sd2) | FIELD_PREP(SD3_MAP, sd3);
+ val |= FIELD_PREP(SD4_MAP, sd4) | FIELD_PREP(SD5_MAP, sd5);
+ val |= FIELD_PREP(SD6_MAP, sd6) | FIELD_PREP(SD7_MAP, sd7);
+
+ return val;
+}
+
+static void mtk_hdmi_audio_dsd_config(struct mtk_hdmi *hdmi,
+ unsigned char chnum, bool dsd_bypass)
+{
+ u32 channel_map;
+
+ regmap_update_bits(hdmi->regs, AIP_CTRL, SPDIF_EN | DSD_EN | HBRA_ON, DSD_EN);
+ regmap_set_bits(hdmi->regs, AIP_TXCTRL, DSD_MUTE_EN);
+
+ if (dsd_bypass)
+ channel_map = mtk_hdmi_v2_aud_output_channel_map(0, 2, 4, 6, 1, 3, 5, 7);
+ else
+ channel_map = mtk_hdmi_v2_aud_output_channel_map(0, 5, 1, 0, 3, 2, 4, 0);
+
+ regmap_write(hdmi->regs, TOP_AUD_MAP, channel_map);
+ regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, I2S2DSD_EN);
+}
+
+static inline void mtk_hdmi_v2_hw_i2s_fifo_map(struct mtk_hdmi *hdmi, u32 fifo_mapping)
+{
+ regmap_update_bits(hdmi->regs, AIP_I2S_CTRL,
+ FIFO0_MAP | FIFO1_MAP | FIFO2_MAP | FIFO3_MAP, fifo_mapping);
+}
+
+static inline void mtk_hdmi_v2_hw_i2s_ch_number(struct mtk_hdmi *hdmi, u8 chnum)
+{
+ regmap_update_bits(hdmi->regs, AIP_CTRL, I2S_EN, FIELD_PREP(I2S_EN, chnum));
+}
+
+static void mtk_hdmi_v2_hw_i2s_ch_mapping(struct mtk_hdmi *hdmi, u8 chnum, u8 mapping)
+{
+ u32 fifo_map;
+ u8 bdata;
+
+ switch (chnum) {
+ default:
+ case 2:
+ bdata = 0x1;
+ break;
+ case 3:
+ bdata = 0x3;
+ break;
+ case 6:
+ if (mapping == 0x0e) {
+ bdata = 0xf;
+ break;
+ }
+ fallthrough;
+ case 5:
+ bdata = 0x7;
+ break;
+ case 7:
+ case 8:
+ bdata = 0xf;
+ break;
+ }
+
+ /* Assign default FIFO mapping: SD0 to FIFO0, SD1 to FIFO1, etc. */
+ fifo_map = FIELD_PREP(FIFO0_MAP, 0) | FIELD_PREP(FIFO1_MAP, 1);
+ fifo_map |= FIELD_PREP(FIFO2_MAP, 2) | FIELD_PREP(FIFO3_MAP, 3);
+ mtk_hdmi_v2_hw_i2s_fifo_map(hdmi, fifo_map);
+ mtk_hdmi_v2_hw_i2s_ch_number(hdmi, bdata);
+
+ /*
+ * Set HDMI Audio packet layout indicator:
+ * Layout 0 is for two channels
+ * Layout 1 is for up to eight channels
+ */
+ if (chnum == 2)
+ regmap_set_bits(hdmi->regs, AIP_TXCTRL, AUD_LAYOUT_1);
+ else
+ regmap_clear_bits(hdmi->regs, AIP_TXCTRL, AUD_LAYOUT_1);
+}
+
+static void mtk_hdmi_i2s_data_fmt(struct mtk_hdmi *hdmi, unsigned char fmt)
+{
+ u32 val;
+
+ regmap_read(hdmi->regs, AIP_I2S_CTRL, &val);
+ val &= ~(WS_HIGH | I2S_1ST_BIT_NOSHIFT | JUSTIFY_RIGHT);
+
+ switch (fmt) {
+ case HDMI_I2S_MODE_RJT_24BIT:
+ case HDMI_I2S_MODE_RJT_16BIT:
+ val |= (WS_HIGH | I2S_1ST_BIT_NOSHIFT | JUSTIFY_RIGHT);
+ break;
+ case HDMI_I2S_MODE_LJT_24BIT:
+ case HDMI_I2S_MODE_LJT_16BIT:
+ val |= (WS_HIGH | I2S_1ST_BIT_NOSHIFT);
+ break;
+ case HDMI_I2S_MODE_I2S_24BIT:
+ case HDMI_I2S_MODE_I2S_16BIT:
+ default:
+ break;
+ }
+
+ regmap_write(hdmi->regs, AIP_I2S_CTRL, val);
+}
+
+static inline void mtk_hdmi_i2s_sck_edge_rise(struct mtk_hdmi *hdmi, bool rise)
+{
+ if (rise)
+ regmap_set_bits(hdmi->regs, AIP_I2S_CTRL, SCK_EDGE_RISE);
+ else
+ regmap_clear_bits(hdmi->regs, AIP_I2S_CTRL, SCK_EDGE_RISE);
+}
+
+static inline void mtk_hdmi_i2s_cbit_order(struct mtk_hdmi *hdmi, unsigned int cbit)
+{
+ regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, CBIT_ORDER_SAME, cbit);
+}
+
+static inline void mtk_hdmi_i2s_vbit(struct mtk_hdmi *hdmi, unsigned int vbit)
+{
+ /* V bit: 0 for PCM, 1 for Compressed data */
+ regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, VBIT_COMPRESSED, vbit);
+}
+
+static inline void mtk_hdmi_i2s_data_direction(struct mtk_hdmi *hdmi, unsigned int is_lsb)
+{
+ regmap_update_bits(hdmi->regs, AIP_I2S_CTRL, I2S_DATA_DIR_LSB, is_lsb);
+}
+
+static inline void mtk_hdmi_v2_hw_audio_type(struct mtk_hdmi *hdmi, unsigned int spdif_i2s)
+{
+ regmap_update_bits(hdmi->regs, AIP_CTRL, SPDIF_EN, FIELD_PREP(SPDIF_EN, spdif_i2s));
+}
+
+static u8 mtk_hdmi_v2_get_i2s_ch_mapping(struct mtk_hdmi *hdmi, u8 channel_type)
+{
+ switch (channel_type) {
+ case HDMI_AUD_CHAN_TYPE_1_1:
+ case HDMI_AUD_CHAN_TYPE_2_1:
+ return 0x01;
+ case HDMI_AUD_CHAN_TYPE_3_0:
+ return 0x02;
+ case HDMI_AUD_CHAN_TYPE_3_1:
+ return 0x03;
+ case HDMI_AUD_CHAN_TYPE_3_0_LRS:
+ case HDMI_AUD_CHAN_TYPE_4_0:
+ return 0x08;
+ case HDMI_AUD_CHAN_TYPE_5_1:
+ return 0x0b;
+ case HDMI_AUD_CHAN_TYPE_4_1_CLRS:
+ case HDMI_AUD_CHAN_TYPE_6_0:
+ case HDMI_AUD_CHAN_TYPE_6_0_CS:
+ case HDMI_AUD_CHAN_TYPE_6_0_CH:
+ case HDMI_AUD_CHAN_TYPE_6_0_OH:
+ case HDMI_AUD_CHAN_TYPE_6_0_CHR:
+ return 0x0e;
+ case HDMI_AUD_CHAN_TYPE_1_0:
+ case HDMI_AUD_CHAN_TYPE_2_0:
+ case HDMI_AUD_CHAN_TYPE_3_1_LRS:
+ case HDMI_AUD_CHAN_TYPE_4_1:
+ case HDMI_AUD_CHAN_TYPE_5_0:
+ case HDMI_AUD_CHAN_TYPE_4_0_CLRS:
+ case HDMI_AUD_CHAN_TYPE_6_1:
+ case HDMI_AUD_CHAN_TYPE_6_1_CS:
+ case HDMI_AUD_CHAN_TYPE_6_1_CH:
+ case HDMI_AUD_CHAN_TYPE_6_1_OH:
+ case HDMI_AUD_CHAN_TYPE_6_1_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0:
+ case HDMI_AUD_CHAN_TYPE_7_0_LH_RH:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_7_0_LC_RC:
+ case HDMI_AUD_CHAN_TYPE_7_0_LW_RW:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS:
+ case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_CH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_OH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_CH_OH:
+ case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS:
+ case HDMI_AUD_CHAN_TYPE_7_1:
+ case HDMI_AUD_CHAN_TYPE_7_1_LH_RH:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR:
+ case HDMI_AUD_CHAN_TYPE_7_1_LC_RC:
+ case HDMI_AUD_CHAN_TYPE_7_1_LW_RW:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS:
+ case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_CH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_OH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_CH_OH:
+ case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR:
+ case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR:
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static inline void mtk_hdmi_v2_hw_i2s_ch_swap(struct mtk_hdmi *hdmi)
+{
+ regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_I2S_HI_WRITE,
+ FIELD_PREP(MAX_2UI_I2S_HI_WRITE, MAX_2UI_I2S_LFE_CC_SWAP));
+}
+
+static void mtk_hdmi_hbr_config(struct mtk_hdmi *hdmi, bool dsd_bypass)
+{
+ const u32 hbr_mask = SPDIF_EN | DSD_EN | HBRA_ON;
+
+ if (dsd_bypass) {
+ regmap_update_bits(hdmi->regs, AIP_CTRL, hbr_mask, HBRA_ON);
+ regmap_set_bits(hdmi->regs, AIP_CTRL, I2S_EN);
+ } else {
+ regmap_update_bits(hdmi->regs, AIP_CTRL, hbr_mask, SPDIF_EN);
+ regmap_set_bits(hdmi->regs, AIP_CTRL, SPDIF_INTERNAL_MODULE);
+ regmap_set_bits(hdmi->regs, AIP_CTRL, HBR_FROM_SPDIF);
+ regmap_set_bits(hdmi->regs, AIP_CTRL, CTS_CAL_N4);
+ }
+}
+
+static inline void mtk_hdmi_v2_hw_spdif_config(struct mtk_hdmi *hdmi)
+{
+ regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, WR_1UI_LOCK);
+ regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, FS_OVERRIDE_WRITE);
+ regmap_clear_bits(hdmi->regs, AIP_SPDIF_CTRL, WR_2UI_LOCK);
+
+ regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_1UI_WRITE,
+ FIELD_PREP(MAX_1UI_WRITE, 4));
+ regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_SPDIF_WRITE,
+ FIELD_PREP(MAX_2UI_SPDIF_WRITE, 9));
+ regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, AUD_ERR_THRESH,
+ FIELD_PREP(AUD_ERR_THRESH, 4));
+
+ regmap_set_bits(hdmi->regs, AIP_SPDIF_CTRL, I2S2DSD_EN);
+}
+
+static void mtk_hdmi_v2_aud_set_input(struct mtk_hdmi *hdmi)
+{
+ struct hdmi_audio_param *aud_param = &hdmi->aud_param;
+ struct hdmi_codec_params *codec_params = &aud_param->codec_params;
+ u8 i2s_ch_map;
+ u32 out_ch_map;
+
+ /* Write the default output channel map. CH0 maps to SD0, CH1 maps to SD1, etc */
+ out_ch_map = mtk_hdmi_v2_aud_output_channel_map(0, 1, 2, 3, 4, 5, 6, 7);
+ regmap_write(hdmi->regs, TOP_AUD_MAP, out_ch_map);
+
+ regmap_update_bits(hdmi->regs, AIP_SPDIF_CTRL, MAX_2UI_I2S_HI_WRITE, 0);
+ regmap_clear_bits(hdmi->regs, AIP_CTRL,
+ SPDIF_EN | DSD_EN | HBRA_ON | CTS_CAL_N4 |
+ HBR_FROM_SPDIF | SPDIF_INTERNAL_MODULE);
+ regmap_clear_bits(hdmi->regs, AIP_TXCTRL, DSD_MUTE_EN | AUD_LAYOUT_1);
+
+ if (aud_param->aud_input_type == HDMI_AUD_INPUT_I2S) {
+ switch (aud_param->aud_codec) {
+ case HDMI_AUDIO_CODING_TYPE_DTS_HD:
+ case HDMI_AUDIO_CODING_TYPE_MLP:
+ mtk_hdmi_i2s_data_fmt(hdmi, aud_param->aud_i2s_fmt);
+ mtk_hdmi_hbr_config(hdmi, true);
+ break;
+ case HDMI_AUDIO_CODING_TYPE_DSD:
+ mtk_hdmi_audio_dsd_config(hdmi, codec_params->channels, 0);
+ mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, codec_params->channels, 1);
+ break;
+ default:
+ mtk_hdmi_i2s_data_fmt(hdmi, aud_param->aud_i2s_fmt);
+ mtk_hdmi_i2s_sck_edge_rise(hdmi, true);
+ mtk_hdmi_i2s_cbit_order(hdmi, CBIT_ORDER_SAME);
+ mtk_hdmi_i2s_vbit(hdmi, 0); /* PCM data */
+ mtk_hdmi_i2s_data_direction(hdmi, 0); /* MSB first */
+ mtk_hdmi_v2_hw_audio_type(hdmi, HDMI_AUD_INPUT_I2S);
+ i2s_ch_map = mtk_hdmi_v2_get_i2s_ch_mapping(hdmi,
+ aud_param->aud_input_chan_type);
+ mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, codec_params->channels, i2s_ch_map);
+ mtk_hdmi_v2_hw_i2s_ch_swap(hdmi);
+ }
+ } else {
+ if (codec_params->sample_rate == 768000 &&
+ (aud_param->aud_codec == HDMI_AUDIO_CODING_TYPE_DTS_HD ||
+ aud_param->aud_codec == HDMI_AUDIO_CODING_TYPE_MLP)) {
+ mtk_hdmi_hbr_config(hdmi, false);
+ } else {
+ mtk_hdmi_v2_hw_spdif_config(hdmi);
+ mtk_hdmi_v2_hw_i2s_ch_mapping(hdmi, 2, 0);
+ }
+ }
+}
+
+static inline void mtk_hdmi_v2_hw_audio_input_enable(struct mtk_hdmi *hdmi, bool ena)
+{
+ if (ena)
+ regmap_set_bits(hdmi->regs, AIP_CTRL, AUD_IN_EN);
+ else
+ regmap_clear_bits(hdmi->regs, AIP_CTRL, AUD_IN_EN);
+}
+
+static void mtk_hdmi_v2_aip_ctrl_init(struct mtk_hdmi *hdmi)
+{
+ regmap_set_bits(hdmi->regs, AIP_CTRL,
+ AUD_SEL_OWRT | NO_MCLK_CTSGEN_SEL | MCLK_EN | CTS_REQ_EN);
+ regmap_clear_bits(hdmi->regs, AIP_TPI_CTRL, TPI_AUDIO_LOOKUP_EN);
+}
+
+static void mtk_hdmi_v2_audio_reset(struct mtk_hdmi *hdmi, bool reset)
+{
+ const u32 arst_bits = RST4AUDIO | RST4AUDIO_FIFO | RST4AUDIO_ACR;
+
+ if (reset)
+ regmap_set_bits(hdmi->regs, AIP_TXCTRL, arst_bits);
+ else
+ regmap_clear_bits(hdmi->regs, AIP_TXCTRL, arst_bits);
+}
+
+static void mtk_hdmi_v2_aud_output_config(struct mtk_hdmi *hdmi,
+ struct drm_display_mode *display_mode)
+{
+ /* Shut down and reset the HDMI Audio HW to avoid glitching */
+ mtk_hdmi_v2_hw_aud_mute(hdmi, true);
+ mtk_hdmi_v2_hw_aud_enable(hdmi, false);
+ mtk_hdmi_v2_audio_reset(hdmi, true);
+
+ /* Configure the main hardware params and get out of reset */
+ mtk_hdmi_v2_aip_ctrl_init(hdmi);
+ mtk_hdmi_v2_aud_set_input(hdmi);
+ mtk_hdmi_v2_hw_aud_set_channel_status(hdmi);
+ mtk_hdmi_v2_setup_audio_infoframe(hdmi);
+ mtk_hdmi_v2_hw_audio_input_enable(hdmi, true);
+ mtk_hdmi_v2_audio_reset(hdmi, false);
+
+ /* Ignore N/CTS packet transmission requests and configure */
+ mtk_hdmi_v2_hw_ncts_enable(hdmi, false);
+ mtk_hdmi_v2_hw_aud_set_ncts(hdmi, hdmi->aud_param.codec_params.sample_rate,
+ display_mode->clock);
+
+ /* Wait for the HW to apply settings */
+ usleep_range(25, 50);
+
+ /* Hardware is fully configured: enable TX of N/CTS pkts and unmute */
+ mtk_hdmi_v2_hw_ncts_enable(hdmi, true);
+ mtk_hdmi_v2_hw_aud_enable(hdmi, true);
+ mtk_hdmi_v2_hw_aud_mute(hdmi, false);
+}
+
+static void mtk_hdmi_v2_change_video_resolution(struct mtk_hdmi *hdmi,
+ struct drm_connector_state *conn_state)
+{
+ mtk_hdmi_v2_hw_reset(hdmi);
+ mtk_hdmi_v2_set_sw_hpd(hdmi, true);
+ udelay(2);
+
+ regmap_write(hdmi->regs, HDCP_TOP_CTRL, 0);
+
+ /*
+ * Enable HDCP reauthentication interrupt: the HW uses this internally
+ * for the HPD state machine even if HDCP encryption is not enabled.
+ */
+ regmap_set_bits(hdmi->regs, TOP_INT_ENABLE00, HDCP2X_RX_REAUTH_REQ_DDCM_INT);
+
+ /* Enable hotplug and pord interrupts */
+ mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true);
+
+ /* Force enabling HDCP HPD */
+ regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_OVR);
+ regmap_set_bits(hdmi->regs, HDCP2X_CTRL_0, HDCP2X_HPD_SW);
+
+ /* Set 8 bits per pixel */
+ regmap_update_bits(hdmi->regs, TOP_CFG00, TMDS_PACK_MODE,
+ FIELD_PREP(TMDS_PACK_MODE, TMDS_PACK_MODE_8BPP));
+ /* Disable generating deepcolor packets */
+ regmap_clear_bits(hdmi->regs, TOP_CFG00, DEEPCOLOR_PKT_EN);
+ /* Disable adding deepcolor information to the general packet */
+ regmap_clear_bits(hdmi->regs, TOP_MISC_CTLR, DEEP_COLOR_ADD);
+
+ if (hdmi->curr_conn->display_info.is_hdmi)
+ regmap_set_bits(hdmi->regs, TOP_CFG00, HDMI_MODE_HDMI);
+ else
+ regmap_clear_bits(hdmi->regs, TOP_CFG00, HDMI_MODE_HDMI);
+
+ udelay(5);
+ mtk_hdmi_v2_hw_vid_mute(hdmi, true);
+ mtk_hdmi_v2_hw_aud_mute(hdmi, true);
+ mtk_hdmi_v2_hw_gcp_avmute(hdmi, false);
+
+ regmap_update_bits(hdmi->regs, TOP_CFG01,
+ NULL_PKT_VSYNC_HIGH_EN | NULL_PKT_EN, NULL_PKT_VSYNC_HIGH_EN);
+ usleep_range(100, 150);
+
+ /* Enable scrambling if tmds clock is 340MHz or more */
+ mtk_hdmi_v2_enable_scrambling(hdmi, hdmi->mode.clock >= 340 * KILO);
+
+ switch (conn_state->hdmi.output_format) {
+ default:
+ case HDMI_COLORSPACE_RGB:
+ case HDMI_COLORSPACE_YUV444:
+ /* Disable YUV420 downsampling for RGB and YUV444 */
+ mtk_hdmi_yuv420_downsampling(hdmi, false);
+ break;
+ case HDMI_COLORSPACE_YUV422:
+ /*
+ * YUV420 downsampling is special and needs a bit of setup
+ * so we disable everything there before doing anything else.
+ *
+ * YUV422 downsampling instead just needs one bit to be set.
+ */
+ mtk_hdmi_yuv420_downsampling(hdmi, false);
+ regmap_set_bits(hdmi->regs, VID_DOWNSAMPLE_CONFIG,
+ C444_C422_CONFIG_ENABLE);
+ break;
+ case HDMI_COLORSPACE_YUV420:
+ mtk_hdmi_yuv420_downsampling(hdmi, true);
+ break;
+ };
+}
+
+static void mtk_hdmi_v2_output_set_display_mode(struct mtk_hdmi *hdmi,
+ struct drm_connector_state *conn_state,
+ struct drm_display_mode *mode)
+{
+ union phy_configure_opts opts = {
+ .dp = { .link_rate = hdmi->mode.clock * KILO }
+ };
+ int ret;
+
+ ret = phy_configure(hdmi->phy, &opts);
+ if (ret)
+ dev_err(hdmi->dev, "Setting clock=%d failed: %d", mode->clock, ret);
+
+ mtk_hdmi_v2_change_video_resolution(hdmi, conn_state);
+ mtk_hdmi_v2_aud_output_config(hdmi, mode);
+}
+
+static int mtk_hdmi_v2_clk_enable(struct mtk_hdmi *hdmi)
+{
+ int ret;
+
+ ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]);
+ if (ret)
+ goto disable_hdcp_clk;
+
+ ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]);
+ if (ret)
+ goto disable_hdcp_24m_clk;
+
+ ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI]);
+ if (ret)
+ goto disable_bus_clk;
+
+ return 0;
+
+disable_bus_clk:
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]);
+disable_hdcp_24m_clk:
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]);
+disable_hdcp_clk:
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]);
+
+ return ret;
+}
+
+static void mtk_hdmi_v2_clk_disable(struct mtk_hdmi *hdmi)
+{
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_VPP_SPLIT_HDMI]);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDMI_APB_SEL]);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_24M_SEL]);
+ clk_disable_unprepare(hdmi->clk[MTK_HDMI_V2_CLK_HDCP_SEL]);
+}
+
+static enum hdmi_hpd_state mtk_hdmi_v2_hpd_pord_status(struct mtk_hdmi *hdmi)
+{
+ u8 hpd_pin_sta, pord_pin_sta;
+ u32 hpd_status;
+
+ regmap_read(hdmi->regs, HPD_DDC_STATUS, &hpd_status);
+ hpd_pin_sta = FIELD_GET(HPD_PIN_STA, hpd_status);
+ pord_pin_sta = FIELD_GET(PORD_PIN_STA, hpd_status);
+
+ /*
+ * Inform that the cable is plugged in (hpd_pin_sta) so that the
+ * sink can be powered on by switching the 5V VBUS as required by
+ * the HDMI spec for reading EDID and for HDMI Audio registers to
+ * be accessible.
+ *
+ * PORD detection succeeds only when the cable is plugged in and
+ * the sink is powered on: reaching that state means that the
+ * communication with the sink can be started.
+ *
+ * Please note that when the cable is plugged out the HPD pin will
+ * be the first one to fall, while PORD may still be in rise state
+ * for a few more milliseconds, so we decide HDMI_PLUG_OUT without
+ * checking PORD at all (we check only HPD falling for that).
+ */
+ if (hpd_pin_sta && pord_pin_sta)
+ return HDMI_PLUG_IN_AND_SINK_POWER_ON;
+ else if (hpd_pin_sta)
+ return HDMI_PLUG_IN_ONLY;
+ else
+ return HDMI_PLUG_OUT;
+}
+
+static irqreturn_t mtk_hdmi_v2_isr(int irq, void *arg)
+{
+ struct mtk_hdmi *hdmi = arg;
+ unsigned int irq_sta;
+ int ret = IRQ_HANDLED;
+
+ regmap_read(hdmi->regs, TOP_INT_STA00, &irq_sta);
+
+ /* Handle Hotplug Detection interrupts */
+ if (irq_sta & HPD_PORD_HWIRQS) {
+ /*
+ * Disable the HPD/PORD IRQs now and until thread done to
+ * avoid interrupt storm that could happen with bad cables
+ */
+ mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, false);
+ ret = IRQ_WAKE_THREAD;
+
+ /* Clear HPD/PORD irqs to avoid unwanted retriggering */
+ regmap_write(hdmi->regs, TOP_INT_CLR00, HPD_PORD_HWIRQS);
+ regmap_write(hdmi->regs, TOP_INT_CLR00, 0);
+ }
+
+ return ret;
+}
+
+static irqreturn_t __mtk_hdmi_v2_isr_thread(struct mtk_hdmi *hdmi)
+{
+ enum hdmi_hpd_state hpd;
+
+ hpd = mtk_hdmi_v2_hpd_pord_status(hdmi);
+ if (hpd != hdmi->hpd) {
+ struct drm_encoder *encoder = hdmi->bridge.encoder;
+
+ hdmi->hpd = hpd;
+
+ if (encoder && encoder->dev)
+ drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
+ }
+
+ mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_hdmi_v2_isr_thread(int irq, void *arg)
+{
+ struct mtk_hdmi *hdmi = arg;
+
+ /*
+ * Debounce HDMI monitor HPD status.
+ * Empirical testing shows that 30ms is enough wait
+ */
+ msleep(30);
+
+ return __mtk_hdmi_v2_isr_thread(hdmi);
+}
+
+static int mtk_hdmi_v2_enable(struct mtk_hdmi *hdmi)
+{
+ bool was_active = pm_runtime_active(hdmi->dev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(hdmi->dev);
+ if (ret) {
+ dev_err(hdmi->dev, "Cannot resume HDMI\n");
+ return ret;
+ }
+
+ ret = mtk_hdmi_v2_clk_enable(hdmi);
+ if (ret) {
+ pm_runtime_put(hdmi->dev);
+ return ret;
+ }
+
+ if (!was_active) {
+ mtk_hdmi_v2_hw_reset(hdmi);
+ mtk_hdmi_v2_set_sw_hpd(hdmi, true);
+ }
+
+ return 0;
+}
+
+static void mtk_hdmi_v2_disable(struct mtk_hdmi *hdmi)
+{
+ mtk_hdmi_v2_clk_disable(hdmi);
+ pm_runtime_put_sync(hdmi->dev);
+}
+
+/*
+ * Bridge callbacks
+ */
+
+static int mtk_hdmi_v2_bridge_attach(struct drm_bridge *bridge,
+ struct drm_encoder *encoder,
+ enum drm_bridge_attach_flags flags)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ DRM_ERROR("The flag DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied\n");
+ return -EINVAL;
+ }
+ if (hdmi->next_bridge) {
+ ret = drm_bridge_attach(encoder, hdmi->next_bridge, bridge, flags);
+ if (ret)
+ return ret;
+ }
+
+ ret = mtk_hdmi_v2_enable(hdmi);
+ if (ret)
+ return ret;
+
+ /* Enable Hotplug and Pord pins internal debouncing */
+ regmap_set_bits(hdmi->regs, HPD_DDC_CTRL,
+ HPD_DDC_HPD_DBNC_EN | HPD_DDC_PORD_DBNC_EN);
+
+ irq_clear_status_flags(hdmi->irq, IRQ_NOAUTOEN);
+ enable_irq(hdmi->irq);
+
+ /*
+ * Check if any HDMI monitor was connected before probing this driver
+ * and/or attaching the bridge, without debouncing: if so, we want to
+ * notify the DRM so that we start outputting an image ASAP.
+ * Note that calling the ISR thread function will also perform a HW
+ * registers write that enables both the HPD and Pord interrupts.
+ */
+ __mtk_hdmi_v2_isr_thread(hdmi);
+
+ mtk_hdmi_v2_disable(hdmi);
+
+ return 0;
+}
+
+static void mtk_hdmi_v2_bridge_detach(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ WARN_ON(pm_runtime_active(hdmi->dev));
+
+ /* The controller is already powered off, just disable irq here */
+ disable_irq(hdmi->irq);
+}
+
+static void mtk_hdmi_v2_handle_plugged_change(struct mtk_hdmi *hdmi, bool plugged)
+{
+ mutex_lock(&hdmi->update_plugged_status_lock);
+ if (hdmi->plugged_cb && hdmi->codec_dev)
+ hdmi->plugged_cb(hdmi->codec_dev, plugged);
+ mutex_unlock(&hdmi->update_plugged_status_lock);
+}
+
+static void mtk_hdmi_v2_bridge_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ struct drm_connector_state *conn_state;
+ union phy_configure_opts opts = {
+ .dp = { .link_rate = hdmi->mode.clock * KILO }
+ };
+ int ret;
+
+ /* Power on the controller before trying to write to registers */
+ ret = mtk_hdmi_v2_enable(hdmi);
+ if (WARN_ON(ret))
+ return;
+
+ /* Retrieve the connector through the atomic state */
+ hdmi->curr_conn = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+
+ conn_state = drm_atomic_get_new_connector_state(state, hdmi->curr_conn);
+ if (WARN_ON(!conn_state))
+ return;
+
+ /*
+ * Preconfigure the HDMI controller and the HDMI PHY at pre_enable
+ * stage to make sure that this IP is ready and clocked before the
+ * mtk_dpi gets powered on and before it enables the output.
+ */
+ mtk_hdmi_v2_output_set_display_mode(hdmi, conn_state, &hdmi->mode);
+
+ /* Reconfigure phy clock link with appropriate rate */
+ phy_configure(hdmi->phy, &opts);
+
+ /* Power on the PHY here to make sure that DPI_HDMI is clocked */
+ phy_power_on(hdmi->phy);
+
+ hdmi->powered = true;
+}
+
+static void mtk_hdmi_v2_bridge_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ int ret;
+
+ if (WARN_ON(!hdmi->powered))
+ return;
+
+ ret = drm_atomic_helper_connector_hdmi_update_infoframes(hdmi->curr_conn, state);
+ if (ret)
+ dev_err(hdmi->dev, "Could not update infoframes: %d\n", ret);
+
+ mtk_hdmi_v2_hw_vid_mute(hdmi, false);
+
+ /* signal the connect event to audio codec */
+ mtk_hdmi_v2_handle_plugged_change(hdmi, true);
+
+ hdmi->enabled = true;
+}
+
+static void mtk_hdmi_v2_bridge_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ if (!hdmi->enabled)
+ return;
+
+ mtk_hdmi_v2_hw_gcp_avmute(hdmi, true);
+ msleep(50);
+ mtk_hdmi_v2_hw_vid_mute(hdmi, true);
+ mtk_hdmi_v2_hw_aud_mute(hdmi, true);
+ msleep(50);
+
+ hdmi->enabled = false;
+}
+
+static void mtk_hdmi_v2_bridge_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ if (!hdmi->powered)
+ return;
+
+ phy_power_off(hdmi->phy);
+ hdmi->powered = false;
+
+ /* signal the disconnect event to audio codec */
+ mtk_hdmi_v2_handle_plugged_change(hdmi, false);
+
+ /* Power off */
+ mtk_hdmi_v2_disable(hdmi);
+}
+
+static enum drm_connector_status mtk_hdmi_v2_bridge_detect(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ return hdmi->hpd != HDMI_PLUG_OUT ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static const struct drm_edid *mtk_hdmi_v2_bridge_edid_read(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ return drm_edid_read(connector);
+}
+
+static void mtk_hdmi_v2_hpd_enable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+ int ret;
+
+ ret = mtk_hdmi_v2_enable(hdmi);
+ if (ret) {
+ dev_err(hdmi->dev, "Cannot power on controller for HPD: %d\n", ret);
+ return;
+ }
+
+ mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, true);
+}
+
+static void mtk_hdmi_v2_hpd_disable(struct drm_bridge *bridge)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ mtk_hdmi_v2_enable_hpd_pord_irq(hdmi, false);
+ mtk_hdmi_v2_disable(hdmi);
+}
+
+static int mtk_hdmi_v2_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
+{
+ if (mode->clock < MTK_HDMI_V2_CLOCK_MIN)
+ return MODE_CLOCK_LOW;
+ else if (mode->clock > MTK_HDMI_V2_CLOCK_MAX)
+ return MODE_CLOCK_HIGH;
+ else
+ return MODE_OK;
+}
+
+static int mtk_hdmi_v2_hdmi_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AUD_EN_WR | AUD_EN);
+ regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AUD_RPT_EN);
+ break;
+ case HDMI_INFOFRAME_TYPE_AVI:
+ regmap_clear_bits(hdmi->regs, TOP_INFO_EN, AVI_EN_WR | AVI_EN);
+ regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, AVI_RPT_EN);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ regmap_clear_bits(hdmi->regs, TOP_INFO_EN, SPD_EN_WR | SPD_EN);
+ regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, SPD_RPT_EN);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ regmap_clear_bits(hdmi->regs, TOP_INFO_EN, VSIF_EN_WR | VSIF_EN);
+ regmap_clear_bits(hdmi->regs, TOP_INFO_RPT, VSIF_RPT_EN);
+ break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ default:
+ break;
+ };
+
+ return 0;
+}
+
+static int mtk_hdmi_v2_hdmi_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ mtk_hdmi_v2_hw_write_audio_infoframe(hdmi, buffer);
+ break;
+ case HDMI_INFOFRAME_TYPE_AVI:
+ mtk_hdmi_v2_hw_write_avi_infoframe(hdmi, buffer);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ mtk_hdmi_v2_hw_write_spd_infoframe(hdmi, buffer);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ mtk_hdmi_v2_hw_write_vendor_infoframe(hdmi, buffer);
+ break;
+ case HDMI_INFOFRAME_TYPE_DRM:
+ default:
+ dev_err(hdmi->dev, "Unsupported HDMI infoframe type %u\n", type);
+ break;
+ };
+
+ return 0;
+}
+
+static int mtk_hdmi_v2_set_abist(struct mtk_hdmi *hdmi, bool enable)
+{
+ struct drm_display_mode *mode = &hdmi->mode;
+ int abist_format = -EINVAL;
+ bool interlaced;
+
+ if (!enable) {
+ regmap_clear_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_ENABLE);
+ return 0;
+ }
+
+ if (!mode->hdisplay || !mode->vdisplay)
+ return -EINVAL;
+
+ interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+ switch (mode->hdisplay) {
+ case 720:
+ if (mode->vdisplay == 480)
+ abist_format = 2;
+ else if (mode->vdisplay == 576)
+ abist_format = 11;
+ break;
+ case 1280:
+ if (mode->vdisplay == 720)
+ abist_format = 3;
+ break;
+ case 1440:
+ if (mode->vdisplay == 480)
+ abist_format = interlaced ? 5 : 9;
+ else if (mode->vdisplay == 576)
+ abist_format = interlaced ? 14 : 18;
+ break;
+ case 1920:
+ if (mode->vdisplay == 1080)
+ abist_format = interlaced ? 4 : 10;
+ break;
+ case 3840:
+ if (mode->vdisplay == 2160)
+ abist_format = 25;
+ break;
+ case 4096:
+ if (mode->vdisplay == 2160)
+ abist_format = 26;
+ break;
+ default:
+ break;
+ }
+ if (abist_format < 0)
+ return abist_format;
+
+ regmap_update_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_VIDEO_FORMAT,
+ FIELD_PREP(HDMI_ABIST_VIDEO_FORMAT, abist_format));
+ regmap_set_bits(hdmi->regs, TOP_CFG00, HDMI_ABIST_ENABLE);
+ return 0;
+}
+
+static int mtk_hdmi_v2_debug_abist_show(struct seq_file *m, void *arg)
+{
+ struct mtk_hdmi *hdmi = m->private;
+ bool en;
+ u32 val;
+ int ret;
+
+ if (!hdmi)
+ return -EINVAL;
+
+ ret = regmap_read(hdmi->regs, TOP_CFG00, &val);
+ if (ret)
+ return ret;
+
+ en = FIELD_GET(HDMI_ABIST_ENABLE, val);
+
+ seq_printf(m, "HDMI Automated Built-In Self Test: %s\n",
+ en ? "Enabled" : "Disabled");
+
+ return 0;
+}
+
+static ssize_t mtk_hdmi_v2_debug_abist_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int ret;
+ u32 en;
+
+ if (!m || !m->private || *offp)
+ return -EINVAL;
+
+ ret = kstrtouint_from_user(ubuf, len, 0, &en);
+ if (ret)
+ return ret;
+
+ if (en < 0 || en > 1)
+ return -EINVAL;
+
+ mtk_hdmi_v2_set_abist((struct mtk_hdmi *)m->private, en);
+ return len;
+}
+
+static int mtk_hdmi_v2_debug_abist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_hdmi_v2_debug_abist_show, inode->i_private);
+}
+
+static const struct file_operations mtk_hdmi_debug_abist_fops = {
+ .owner = THIS_MODULE,
+ .open = mtk_hdmi_v2_debug_abist_open,
+ .read = seq_read,
+ .write = mtk_hdmi_v2_debug_abist_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mtk_hdmi_v2_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
+{
+ struct mtk_hdmi *dpi = hdmi_ctx_from_bridge(bridge);
+
+ debugfs_create_file("hdmi_abist", 0640, root, dpi, &mtk_hdmi_debug_abist_fops);
+}
+
+static const struct drm_bridge_funcs mtk_v2_hdmi_bridge_funcs = {
+ .attach = mtk_hdmi_v2_bridge_attach,
+ .detach = mtk_hdmi_v2_bridge_detach,
+ .mode_fixup = mtk_hdmi_bridge_mode_fixup,
+ .mode_set = mtk_hdmi_bridge_mode_set,
+ .atomic_pre_enable = mtk_hdmi_v2_bridge_pre_enable,
+ .atomic_enable = mtk_hdmi_v2_bridge_enable,
+ .atomic_disable = mtk_hdmi_v2_bridge_disable,
+ .atomic_post_disable = mtk_hdmi_v2_bridge_post_disable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .detect = mtk_hdmi_v2_bridge_detect,
+ .edid_read = mtk_hdmi_v2_bridge_edid_read,
+ .hpd_enable = mtk_hdmi_v2_hpd_enable,
+ .hpd_disable = mtk_hdmi_v2_hpd_disable,
+ .hdmi_tmds_char_rate_valid = mtk_hdmi_v2_hdmi_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = mtk_hdmi_v2_hdmi_clear_infoframe,
+ .hdmi_write_infoframe = mtk_hdmi_v2_hdmi_write_infoframe,
+ .debugfs_init = mtk_hdmi_v2_debugfs_init,
+};
+
+/*
+ * HDMI audio codec callbacks
+ */
+static int mtk_hdmi_v2_audio_hook_plugged_cb(struct device *dev, void *data,
+ hdmi_codec_plugged_cb fn,
+ struct device *codec_dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+ bool plugged;
+
+ if (!hdmi)
+ return -ENODEV;
+
+ mtk_hdmi_audio_set_plugged_cb(hdmi, fn, codec_dev);
+ plugged = (hdmi->hpd == HDMI_PLUG_IN_AND_SINK_POWER_ON);
+ mtk_hdmi_v2_handle_plugged_change(hdmi, plugged);
+
+ return 0;
+}
+
+static int mtk_hdmi_v2_audio_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ if (hdmi->audio_enable) {
+ mtk_hdmi_audio_params(hdmi, daifmt, params);
+ mtk_hdmi_v2_aud_output_config(hdmi, &hdmi->mode);
+ }
+ return 0;
+}
+
+static int mtk_hdmi_v2_audio_startup(struct device *dev, void *data)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ mtk_hdmi_v2_hw_aud_enable(hdmi, true);
+ hdmi->audio_enable = true;
+
+ return 0;
+}
+
+static void mtk_hdmi_v2_audio_shutdown(struct device *dev, void *data)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->audio_enable = false;
+ mtk_hdmi_v2_hw_aud_enable(hdmi, false);
+}
+
+static int mtk_hdmi_v2_audio_mute(struct device *dev, void *data, bool enable, int dir)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ mtk_hdmi_v2_hw_aud_mute(hdmi, enable);
+
+ return 0;
+}
+
+static const struct hdmi_codec_ops mtk_hdmi_v2_audio_codec_ops = {
+ .hw_params = mtk_hdmi_v2_audio_hw_params,
+ .audio_startup = mtk_hdmi_v2_audio_startup,
+ .audio_shutdown = mtk_hdmi_v2_audio_shutdown,
+ .mute_stream = mtk_hdmi_v2_audio_mute,
+ .get_eld = mtk_hdmi_audio_get_eld,
+ .hook_plugged_cb = mtk_hdmi_v2_audio_hook_plugged_cb,
+};
+
+static __maybe_unused int mtk_hdmi_v2_suspend(struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ mtk_hdmi_v2_disable(hdmi);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_hdmi_v2_resume(struct device *dev)
+{
+ struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
+
+ return mtk_hdmi_v2_enable(hdmi);
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_hdmi_v2_pm_ops, mtk_hdmi_v2_suspend, mtk_hdmi_v2_resume);
+
+static const struct mtk_hdmi_ver_conf mtk_hdmi_conf_v2 = {
+ .bridge_funcs = &mtk_v2_hdmi_bridge_funcs,
+ .codec_ops = &mtk_hdmi_v2_audio_codec_ops,
+ .mtk_hdmi_clock_names = mtk_hdmi_v2_clk_names,
+ .num_clocks = MTK_HDMI_V2_CLK_COUNT,
+ .interlace_allowed = true,
+};
+
+static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8188 = {
+ .ver_conf = &mtk_hdmi_conf_v2,
+ .reg_hdmi_tx_cfg = HDMITX_CONFIG_MT8188
+};
+
+static const struct mtk_hdmi_conf mtk_hdmi_conf_mt8195 = {
+ .ver_conf = &mtk_hdmi_conf_v2,
+ .reg_hdmi_tx_cfg = HDMITX_CONFIG_MT8195
+};
+
+static int mtk_hdmi_v2_probe(struct platform_device *pdev)
+{
+ struct mtk_hdmi *hdmi;
+ int ret;
+
+ /* Populate HDMI sub-devices if present */
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret)
+ return ret;
+
+ hdmi = mtk_hdmi_common_probe(pdev);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
+
+ hdmi->hpd = HDMI_PLUG_OUT;
+
+ /* Disable all HW interrupts at probe stage */
+ mtk_hdmi_v2_hwirq_disable(hdmi);
+
+ /*
+ * In case bootloader leaves HDMI enabled before booting, make
+ * sure that any interrupt that was left is cleared by setting
+ * all bits in the INT_CLR registers for all 32+19 interrupts.
+ */
+ regmap_write(hdmi->regs, TOP_INT_CLR00, GENMASK(31, 0));
+ regmap_write(hdmi->regs, TOP_INT_CLR01, GENMASK(18, 0));
+
+ /* Restore interrupt clearing registers to zero */
+ regmap_write(hdmi->regs, TOP_INT_CLR00, 0);
+ regmap_write(hdmi->regs, TOP_INT_CLR01, 0);
+
+ /*
+ * Install the ISR but keep it disabled: as the interrupts are
+ * being set up in the .bridge_attach() callback which will
+ * enable both the right HW IRQs and the ISR.
+ */
+ irq_set_status_flags(hdmi->irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, mtk_hdmi_v2_isr,
+ mtk_hdmi_v2_isr_thread,
+ IRQ_TYPE_LEVEL_HIGH,
+ dev_name(&pdev->dev), hdmi);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot request IRQ\n");
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "Cannot enable Runtime PM\n");
+
+ return 0;
+}
+
+static void mtk_hdmi_v2_remove(struct platform_device *pdev)
+{
+ struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
+
+ i2c_put_adapter(hdmi->ddc_adpt);
+}
+
+static const struct of_device_id mtk_drm_hdmi_v2_of_ids[] = {
+ { .compatible = "mediatek,mt8188-hdmi-tx", .data = &mtk_hdmi_conf_mt8188 },
+ { .compatible = "mediatek,mt8195-hdmi-tx", .data = &mtk_hdmi_conf_mt8195 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_drm_hdmi_v2_of_ids);
+
+static struct platform_driver mtk_hdmi_v2_driver = {
+ .probe = mtk_hdmi_v2_probe,
+ .remove = mtk_hdmi_v2_remove,
+ .driver = {
+ .name = "mediatek-drm-hdmi-v2",
+ .of_match_table = mtk_drm_hdmi_v2_of_ids,
+ .pm = &mtk_hdmi_v2_pm_ops,
+ },
+};
+module_platform_driver(mtk_hdmi_v2_driver);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>>");
+MODULE_DESCRIPTION("MediaTek HDMIv2 Driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("DRM_MTK_HDMI");
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 02349bd44001..5043e0377270 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -11,6 +11,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <linux/align.h>
#include "mtk_crtc.h"
@@ -21,9 +22,6 @@
static const u64 modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_ARM_AFBC(AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
- AFBC_FORMAT_MOD_SPLIT |
- AFBC_FORMAT_MOD_SPARSE),
DRM_FORMAT_MOD_INVALID,
};
@@ -71,26 +69,7 @@ static bool mtk_plane_format_mod_supported(struct drm_plane *plane,
uint32_t format,
uint64_t modifier)
{
- if (modifier == DRM_FORMAT_MOD_LINEAR)
- return true;
-
- if (modifier != DRM_FORMAT_MOD_ARM_AFBC(
- AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 |
- AFBC_FORMAT_MOD_SPLIT |
- AFBC_FORMAT_MOD_SPARSE))
- return false;
-
- if (format != DRM_FORMAT_XRGB8888 &&
- format != DRM_FORMAT_ARGB8888 &&
- format != DRM_FORMAT_BGRX8888 &&
- format != DRM_FORMAT_BGRA8888 &&
- format != DRM_FORMAT_ABGR8888 &&
- format != DRM_FORMAT_XBGR8888 &&
- format != DRM_FORMAT_RGB888 &&
- format != DRM_FORMAT_BGR888)
- return false;
-
- return true;
+ return modifier == DRM_FORMAT_MOD_LINEAR;
}
static void mtk_plane_destroy_state(struct drm_plane *plane,
@@ -122,7 +101,8 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (ret)
return ret;
- crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index 7f98de38842b..783572b16963 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -16,6 +16,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "meson_overlay.h"
#include "meson_registers.h"
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index b43ac61201f3..f8d0e0874a5d 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -20,6 +20,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "meson_plane.h"
#include "meson_registers.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 32cd8ac018c0..a32be27c39e8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -20,6 +20,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_module.h>
#include <drm/drm_pciids.h>
+#include <drm/drm_print.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200.c b/drivers/gpu/drm/mgag200/mgag200_g200.c
index f874e2949840..a5e291b344db 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh.c b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
index e2305f8e00f8..d2aa931f579d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
index 11ae76eb081d..7bea7a728f56 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh3.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200eh5.c b/drivers/gpu/drm/mgag200/mgag200_g200eh5.c
index e2a2942a80a0..36da6529d74f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200eh5.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200eh5.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index 23debc70dc54..8fa8fe943abf 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index f8796e2b7a0f..3fadbeb10af9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
index 31624c9ab7b7..e387a455eae5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ew3.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index e80da12ba1fe..a0ac19ee0353 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200wb.c b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
index a0e7b9ad46cd..d847fa8ded8c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200wb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200wb.c
@@ -7,6 +7,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_drv.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_vga.c b/drivers/gpu/drm/mgag200/mgag200_vga.c
index 60568f32736d..b07c1362ddd4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_vga.c
+++ b/drivers/gpu/drm/mgag200/mgag200_vga.c
@@ -2,6 +2,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_ddc.h"
diff --git a/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
index a5a3ac108bd5..a855f1734316 100644
--- a/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
+++ b/drivers/gpu/drm/mgag200/mgag200_vga_bmc.c
@@ -3,6 +3,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "mgag200_ddc.h"
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 0c0dfb25f01b..8aa7d07303fb 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -24,6 +24,7 @@ adreno-y := \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
adreno/a6xx_preempt.o \
+ adreno/a8xx_gpu.o \
adreno-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
@@ -201,6 +202,7 @@ ADRENO_HEADERS = \
generated/a6xx_perfcntrs.xml.h \
generated/a7xx_enums.xml.h \
generated/a7xx_perfcntrs.xml.h \
+ generated/a8xx_enums.xml.h \
generated/a6xx_gmu.xml.h \
generated/adreno_common.xml.h \
generated/adreno_pm4.xml.h \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
index 5ddd015f930d..e9dbf3ddf89e 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_catalog.c
@@ -7,6 +7,7 @@
*/
#include "adreno_gpu.h"
+#include "a2xx_gpu.h"
static const struct adreno_info a2xx_gpus[] = {
{
@@ -19,7 +20,7 @@ static const struct adreno_info a2xx_gpus[] = {
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}, { /* a200 on i.mx51 has only 128kib gmem */
.chip_ids = ADRENO_CHIP_IDS(0x02000001),
.family = ADRENO_2XX_GEN1,
@@ -30,7 +31,7 @@ static const struct adreno_info a2xx_gpus[] = {
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x02020000),
.family = ADRENO_2XX_GEN2,
@@ -41,7 +42,7 @@ static const struct adreno_info a2xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a2xx_gpu_init,
+ .funcs = &a2xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a2xx);
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index ec38db45d8a3..1b1ee14b65cf 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -234,7 +234,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
* word (0x20xxxx for A200, 0x220xxx for A220, 0x225xxx for A225).
* Older firmware files, which lack protection support, have 0 instead.
*/
- if (ptr[1] == 0) {
+ if (ptr[1] == 0 && !a2xx_gpu->protection_disabled) {
dev_warn(gpu->dev->dev,
"Legacy firmware detected, disabling protection support\n");
a2xx_gpu->protection_disabled = true;
@@ -486,39 +486,18 @@ static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a2xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
- .recover = a2xx_recover,
- .submit = a2xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a2xx_irq,
- .destroy = a2xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_state_get = a2xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = a2xx_create_vm,
- .get_rptr = a2xx_get_rptr,
- },
-};
-
static const struct msm_gpu_perfcntr perfcntrs[] = {
/* TODO */
};
-struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
{
struct a2xx_gpu *a2xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
int ret;
if (!pdev) {
@@ -539,7 +518,7 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
gpu->perfcntrs = perfcntrs;
gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
@@ -558,3 +537,26 @@ fail:
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a2xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a2xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a2xx_recover,
+ .submit = a2xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a2xx_irq,
+ .destroy = a2xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_state_get = a2xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = a2xx_create_vm,
+ .get_rptr = a2xx_get_rptr,
+ },
+ .init = a2xx_gpu_init,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
index 53702f19990f..162ef98951f5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -19,6 +19,8 @@ struct a2xx_gpu {
};
#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
+extern const struct adreno_gpu_funcs a2xx_gpu_funcs;
+
struct msm_mmu *a2xx_gpummu_new(struct device *dev, struct msm_gpu *gpu);
void a2xx_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
dma_addr_t *tran_error);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 1498e6532f62..6ae8716fc08a 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -7,6 +7,7 @@
*/
#include "adreno_gpu.h"
+#include "a3xx_gpu.h"
static const struct adreno_info a3xx_gpus[] = {
{
@@ -18,7 +19,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000520),
.family = ADRENO_3XX,
@@ -29,7 +30,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000600),
.family = ADRENO_3XX,
@@ -40,7 +41,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x03000620),
.family = ADRENO_3XX,
@@ -51,7 +52,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
@@ -66,7 +67,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_512K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x03030000,
@@ -81,7 +82,7 @@ static const struct adreno_info a3xx_gpus[] = {
},
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a3xx_gpu_init,
+ .funcs = &a3xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a3xx);
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index a956cd79195e..f22d33e99e81 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -508,29 +508,6 @@ static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a3xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
- .recover = a3xx_recover,
- .submit = a3xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a3xx_irq,
- .destroy = a3xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_busy = a3xx_gpu_busy,
- .gpu_state_get = a3xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a3xx_get_rptr,
- },
-};
-
static const struct msm_gpu_perfcntr perfcntrs[] = {
{ REG_A3XX_SP_PERFCOUNTER6_SELECT, REG_A3XX_RBBM_PERFCTR_SP_6_LO,
SP_ALU_ACTIVE_CYCLES, "ALUACTIVE" },
@@ -538,13 +515,14 @@ static const struct msm_gpu_perfcntr perfcntrs[] = {
SP_FS_FULL_ALU_INSTRUCTIONS, "ALUFULL" },
};
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
{
struct a3xx_gpu *a3xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
struct icc_path *ocmem_icc_path;
struct icc_path *icc_path;
int ret;
@@ -569,7 +547,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
@@ -613,3 +591,27 @@ fail:
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a3xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a3xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = a3xx_recover,
+ .submit = a3xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a3xx_irq,
+ .destroy = a3xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_busy = a3xx_gpu_busy,
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a3xx_get_rptr,
+ },
+ .init = a3xx_gpu_init,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
index c555fb13e0d7..3d4ec9dbd918 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -23,4 +23,6 @@ struct a3xx_gpu {
};
#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
+extern const struct adreno_gpu_funcs a3xx_gpu_funcs;
+
#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
index 09f9f228b75e..9192586f7ef0 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_catalog.c
@@ -7,6 +7,7 @@
*/
#include "adreno_gpu.h"
+#include "a4xx_gpu.h"
static const struct adreno_info a4xx_gpus[] = {
{
@@ -19,7 +20,7 @@ static const struct adreno_info a4xx_gpus[] = {
},
.gmem = SZ_256K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04020000),
.family = ADRENO_4XX,
@@ -30,7 +31,7 @@ static const struct adreno_info a4xx_gpus[] = {
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x04030002),
.family = ADRENO_4XX,
@@ -41,7 +42,7 @@ static const struct adreno_info a4xx_gpus[] = {
},
.gmem = (SZ_1M + SZ_512K),
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a4xx_gpu_init,
+ .funcs = &a4xx_gpu_funcs,
}
};
DECLARE_ADRENO_GPULIST(a4xx);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 83f6329accba..db06c06067ae 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -627,37 +627,14 @@ static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr;
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a4xx_hw_init,
- .pm_suspend = a4xx_pm_suspend,
- .pm_resume = a4xx_pm_resume,
- .recover = a4xx_recover,
- .submit = a4xx_submit,
- .active_ring = adreno_active_ring,
- .irq = a4xx_irq,
- .destroy = a4xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = adreno_show,
-#endif
- .gpu_busy = a4xx_gpu_busy,
- .gpu_state_get = a4xx_gpu_state_get,
- .gpu_state_put = adreno_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a4xx_get_rptr,
- },
- .get_timestamp = a4xx_get_timestamp,
-};
-
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
{
struct a4xx_gpu *a4xx_gpu = NULL;
struct adreno_gpu *adreno_gpu;
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
struct icc_path *ocmem_icc_path;
struct icc_path *icc_path;
int ret;
@@ -680,7 +657,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, 1);
if (ret)
goto fail;
@@ -726,3 +703,28 @@ fail:
return ERR_PTR(ret);
}
+
+const struct adreno_gpu_funcs a4xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a4xx_hw_init,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
+ .recover = a4xx_recover,
+ .submit = a4xx_submit,
+ .active_ring = adreno_active_ring,
+ .irq = a4xx_irq,
+ .destroy = a4xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
+#endif
+ .gpu_busy = a4xx_gpu_busy,
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a4xx_get_rptr,
+ },
+ .init = a4xx_gpu_init,
+ .get_timestamp = a4xx_get_timestamp,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
index a01448cba2ea..71b164439f62 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -20,4 +20,6 @@ struct a4xx_gpu {
};
#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
+extern const struct adreno_gpu_funcs a4xx_gpu_funcs;
+
#endif /* __A4XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
index b48a636d8237..babd320f3b73 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_catalog.c
@@ -7,6 +7,7 @@
*/
#include "adreno_gpu.h"
+#include "a5xx_gpu.h"
static const struct adreno_info a5xx_gpus[] = {
{
@@ -21,7 +22,7 @@ static const struct adreno_info a5xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000600),
.family = ADRENO_5XX,
@@ -38,7 +39,7 @@ static const struct adreno_info a5xx_gpus[] = {
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a506_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000800),
@@ -55,7 +56,7 @@ static const struct adreno_info a5xx_gpus[] = {
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a508_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05000900),
@@ -72,7 +73,7 @@ static const struct adreno_info a5xx_gpus[] = {
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
/* Adreno 509 uses the same ZAP as 512 */
.zapfw = "a512_zap.mdt",
}, {
@@ -89,7 +90,7 @@ static const struct adreno_info a5xx_gpus[] = {
* the GDSC which appears to make it grumpy
*/
.inactive_period = 250,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05010200),
.family = ADRENO_5XX,
@@ -105,7 +106,7 @@ static const struct adreno_info a5xx_gpus[] = {
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a512_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(
@@ -127,7 +128,7 @@ static const struct adreno_info a5xx_gpus[] = {
.inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a530_zap.mdt",
}, {
.chip_ids = ADRENO_CHIP_IDS(0x05040001),
@@ -145,7 +146,7 @@ static const struct adreno_info a5xx_gpus[] = {
*/
.inactive_period = 250,
.quirks = ADRENO_QUIRK_LMLOADKILL_DISABLE,
- .init = a5xx_gpu_init,
+ .funcs = &a5xx_gpu_funcs,
.zapfw = "a540_zap.mdt",
}
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4a04dc43a8e6..56eaff2ee4e4 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1691,34 +1691,6 @@ static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
}
-static const struct adreno_gpu_funcs funcs = {
- .base = {
- .get_param = adreno_get_param,
- .set_param = adreno_set_param,
- .hw_init = a5xx_hw_init,
- .ucode_load = a5xx_ucode_load,
- .pm_suspend = a5xx_pm_suspend,
- .pm_resume = a5xx_pm_resume,
- .recover = a5xx_recover,
- .submit = a5xx_submit,
- .active_ring = a5xx_active_ring,
- .irq = a5xx_irq,
- .destroy = a5xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
- .show = a5xx_show,
-#endif
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = a5xx_debugfs_init,
-#endif
- .gpu_busy = a5xx_gpu_busy,
- .gpu_state_get = a5xx_gpu_state_get,
- .gpu_state_put = a5xx_gpu_state_put,
- .create_vm = adreno_create_vm,
- .get_rptr = a5xx_get_rptr,
- },
- .get_timestamp = a5xx_get_timestamp,
-};
-
static void check_speed_bin(struct device *dev)
{
struct nvmem_cell *cell;
@@ -1751,7 +1723,7 @@ static void check_speed_bin(struct device *dev)
devm_pm_opp_set_supported_hw(dev, &val, 1);
}
-struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+static struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
@@ -1781,7 +1753,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
if (config->info->revn == 510)
nr_rings = 1;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, nr_rings);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings);
if (ret) {
a5xx_destroy(&(a5xx_gpu->base.base));
return ERR_PTR(ret);
@@ -1806,3 +1778,32 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
return gpu;
}
+
+const struct adreno_gpu_funcs a5xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a5xx_hw_init,
+ .ucode_load = a5xx_ucode_load,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .submit = a5xx_submit,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
+ .debugfs_init = a5xx_debugfs_init,
+#endif
+ .gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
+ .create_vm = adreno_create_vm,
+ .get_rptr = a5xx_get_rptr,
+ },
+ .init = a5xx_gpu_init,
+ .get_timestamp = a5xx_get_timestamp,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 9c0d701fe4b8..407bb950d350 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -133,6 +133,7 @@ struct a5xx_preempt_record {
*/
#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+extern const struct adreno_gpu_funcs a5xx_gpu_funcs;
int a5xx_power_init(struct msm_gpu *gpu);
void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 00e1afd46b81..29107b362346 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -672,6 +672,14 @@ static const u32 a690_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a690_protect, 48);
+static const struct adreno_reglist a640_gbif[] = {
+ { REG_A6XX_GBIF_QSB_SIDE0, 0x00071620 },
+ { REG_A6XX_GBIF_QSB_SIDE1, 0x00071620 },
+ { REG_A6XX_GBIF_QSB_SIDE2, 0x00071620 },
+ { REG_A6XX_GBIF_QSB_SIDE3, 0x00071620 },
+ { },
+};
+
static const struct adreno_info a6xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x06010000),
@@ -683,11 +691,12 @@ static const struct adreno_info a6xx_gpus[] = {
.gmem = (SZ_128K + SZ_4K),
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gmuwrapper_funcs,
.zapfw = "a610_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a612_hwcg,
.protect = &a630_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00080000,
},
@@ -706,6 +715,22 @@ static const struct adreno_info a6xx_gpus[] = {
{ 127, 4 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010200),
+ .family = ADRENO_6XX_GEN1,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a612_rgmu.bin",
+ },
+ .gmem = (SZ_128K + SZ_4K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .funcs = &a6xx_gmuwrapper_funcs,
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a612_hwcg,
+ .protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000022,
+ .prim_fifo_threshold = 0x00080000,
+ },
+ }, {
.chip_ids = ADRENO_CHIP_IDS(0x06010500),
.family = ADRENO_6XX_GEN1,
.revn = 615,
@@ -716,7 +741,7 @@ static const struct adreno_info a6xx_gpus[] = {
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -747,7 +772,7 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -774,7 +799,7 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
.gmu_cgc_mode = 0x00000222,
@@ -797,7 +822,7 @@ static const struct adreno_info a6xx_gpus[] = {
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -822,7 +847,7 @@ static const struct adreno_info a6xx_gpus[] = {
.gmem = SZ_512K,
.quirks = ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -847,7 +872,7 @@ static const struct adreno_info a6xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a615_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
@@ -873,11 +898,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a620_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a620_hwcg,
.protect = &a650_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
},
@@ -896,10 +922,11 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a650_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00010000,
.bcms = (const struct a6xx_bcm[]) {
@@ -913,6 +940,11 @@ static const struct adreno_info a6xx_gpus[] = {
{ /* sentinel */ },
},
},
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 185, 0 },
+ { 127, 1 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
@@ -928,7 +960,7 @@ static const struct adreno_info a6xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a630_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a630_hwcg,
@@ -948,7 +980,7 @@ static const struct adreno_info a6xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
@@ -972,11 +1004,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a650_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a650_hwcg,
.protect = &a650_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00300200,
},
@@ -998,11 +1031,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a660_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020000,
.prim_fifo_threshold = 0x00300200,
},
@@ -1017,13 +1051,19 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a660_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00300200,
},
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 169, 0 },
+ { 113, 1 },
+ ),
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06030500),
.family = ADRENO_6XX_GEN4,
@@ -1035,11 +1075,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a660_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x00200200,
},
@@ -1062,7 +1103,7 @@ static const struct adreno_info a6xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_4GB_VA,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a640_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
@@ -1081,11 +1122,12 @@ static const struct adreno_info a6xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gpu_funcs,
.zapfw = "a690_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a690_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020200,
.prim_fifo_threshold = 0x00800200,
},
@@ -1343,6 +1385,69 @@ static const uint32_t a7xx_pwrup_reglist_regs[] = {
DECLARE_ADRENO_REGLIST_LIST(a7xx_pwrup_reglist);
+/* Applicable for X185, A750 */
+static const u32 a750_ifpc_reglist_regs[] = {
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(0),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(1),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
+ REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
+ REG_A6XX_TPL1_NC_MODE_CNTL,
+ REG_A6XX_SP_NC_MODE_CNTL,
+ REG_A6XX_CP_DBG_ECO_CNTL,
+ REG_A6XX_CP_PROTECT_CNTL,
+ REG_A6XX_CP_PROTECT(0),
+ REG_A6XX_CP_PROTECT(1),
+ REG_A6XX_CP_PROTECT(2),
+ REG_A6XX_CP_PROTECT(3),
+ REG_A6XX_CP_PROTECT(4),
+ REG_A6XX_CP_PROTECT(5),
+ REG_A6XX_CP_PROTECT(6),
+ REG_A6XX_CP_PROTECT(7),
+ REG_A6XX_CP_PROTECT(8),
+ REG_A6XX_CP_PROTECT(9),
+ REG_A6XX_CP_PROTECT(10),
+ REG_A6XX_CP_PROTECT(11),
+ REG_A6XX_CP_PROTECT(12),
+ REG_A6XX_CP_PROTECT(13),
+ REG_A6XX_CP_PROTECT(14),
+ REG_A6XX_CP_PROTECT(15),
+ REG_A6XX_CP_PROTECT(16),
+ REG_A6XX_CP_PROTECT(17),
+ REG_A6XX_CP_PROTECT(18),
+ REG_A6XX_CP_PROTECT(19),
+ REG_A6XX_CP_PROTECT(20),
+ REG_A6XX_CP_PROTECT(21),
+ REG_A6XX_CP_PROTECT(22),
+ REG_A6XX_CP_PROTECT(23),
+ REG_A6XX_CP_PROTECT(24),
+ REG_A6XX_CP_PROTECT(25),
+ REG_A6XX_CP_PROTECT(26),
+ REG_A6XX_CP_PROTECT(27),
+ REG_A6XX_CP_PROTECT(28),
+ REG_A6XX_CP_PROTECT(29),
+ REG_A6XX_CP_PROTECT(30),
+ REG_A6XX_CP_PROTECT(31),
+ REG_A6XX_CP_PROTECT(32),
+ REG_A6XX_CP_PROTECT(33),
+ REG_A6XX_CP_PROTECT(34),
+ REG_A6XX_CP_PROTECT(35),
+ REG_A6XX_CP_PROTECT(36),
+ REG_A6XX_CP_PROTECT(37),
+ REG_A6XX_CP_PROTECT(38),
+ REG_A6XX_CP_PROTECT(39),
+ REG_A6XX_CP_PROTECT(40),
+ REG_A6XX_CP_PROTECT(41),
+ REG_A6XX_CP_PROTECT(42),
+ REG_A6XX_CP_PROTECT(43),
+ REG_A6XX_CP_PROTECT(44),
+ REG_A6XX_CP_PROTECT(45),
+ REG_A6XX_CP_PROTECT(46),
+ REG_A6XX_CP_PROTECT(47),
+};
+
+DECLARE_ADRENO_REGLIST_LIST(a750_ifpc_reglist);
+
static const struct adreno_info a7xx_gpus[] = {
{
.chip_ids = ADRENO_CHIP_IDS(0x07000200),
@@ -1353,11 +1458,12 @@ static const struct adreno_info a7xx_gpus[] = {
.gmem = SZ_128K,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_HW_APRIV,
- .init = a6xx_gpu_init,
+ .funcs = &a6xx_gmuwrapper_funcs,
.zapfw = "a702_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.hwcg = a702_hwcg,
.protect = &a650_protect,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020202,
.prim_fifo_threshold = 0x0000c000,
},
@@ -1379,12 +1485,13 @@ static const struct adreno_info a7xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "a730_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .gbif_cx = a640_gbif,
.gmu_cgc_mode = 0x00020000,
},
.preempt_record_size = 2860 * SZ_1K,
@@ -1400,12 +1507,13 @@ static const struct adreno_info a7xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "a740_zap.mdt",
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .gbif_cx = a640_gbif,
.gmu_chipid = 0x7020100,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
@@ -1432,14 +1540,28 @@ static const struct adreno_info a7xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
- ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ ADRENO_QUIRK_PREEMPTION |
+ ADRENO_QUIRK_IFPC,
+ .funcs = &a7xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .ifpc_reglist = &a750_ifpc_reglist,
+ .gbif_cx = a640_gbif,
.gmu_chipid = 0x7050001,
.gmu_cgc_mode = 0x00020202,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(3),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
},
.preempt_record_size = 4192 * SZ_1K,
.speedbins = ADRENO_SPEEDBINS(
@@ -1460,12 +1582,15 @@ static const struct adreno_info a7xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
- ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ ADRENO_QUIRK_PREEMPTION |
+ ADRENO_QUIRK_IFPC,
+ .funcs = &a7xx_gpu_funcs,
.zapfw = "gen70900_zap.mbn",
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .ifpc_reglist = &a750_ifpc_reglist,
+ .gbif_cx = a640_gbif,
.gmu_chipid = 0x7090100,
.gmu_cgc_mode = 0x00020202,
.bcms = (const struct a6xx_bcm[]) {
@@ -1493,11 +1618,12 @@ static const struct adreno_info a7xx_gpus[] = {
.quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
ADRENO_QUIRK_HAS_HW_APRIV |
ADRENO_QUIRK_PREEMPTION,
- .init = a6xx_gpu_init,
+ .funcs = &a7xx_gpu_funcs,
.a6xx = &(const struct a6xx_info) {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.pwrup_reglist = &a7xx_pwrup_reglist,
+ .gbif_cx = a640_gbif,
.gmu_chipid = 0x70f0000,
.gmu_cgc_mode = 0x00020222,
.bcms = (const struct a6xx_bcm[]) {
@@ -1524,6 +1650,306 @@ static const struct adreno_info a7xx_gpus[] = {
};
DECLARE_ADRENO_GPULIST(a7xx);
+static const struct adreno_reglist_pipe x285_nonctxt_regs[] = {
+ { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) },
+ { REG_A8XX_GRAS_DBG_ECO_CNTL, 0x00000800, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0x00200000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_VIS_STREAM_CNTL, 0x10010000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0x00000002, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_1, 0x00000003, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_2, 0x00000200, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_3, 0x00500000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_4, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_RB_GC_GMEM_PROTECT, 0x15000000, BIT(PIPE_BR) },
+ { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0x00000007, BIT(PIPE_BR) },
+ { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0x00004000, BIT(PIPE_BR) },
+ { REG_A8XX_RBBM_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_SLICE_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL, 0x00000030, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL2, 0x00000030, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, 0x0fffffff, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x22122212, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_CGC_P2S_CNTL, 0x00000040, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_CHICKEN_BITS_2, 0x00820800, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_CHICKEN_BITS_3, 0x00300000, BIT(PIPE_NONE) },
+ { REG_A6XX_SP_PERFCTR_SHADER_MASK, 0x0000003f, BIT(PIPE_NONE) },
+ /* Disable CS dead batch merge */
+ { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_2, BIT(31), BIT(PIPE_NONE) },
+ { REG_A7XX_SP_HLSQ_TIMEOUT_THRESHOLD_DP, 0x00000080, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_READ_SEL, 0x0001ff00, BIT(PIPE_NONE) },
+ { REG_A6XX_TPL1_DBG_ECO_CNTL, 0x10000000, BIT(PIPE_NONE) },
+ /* BIT(26): Disable final clamp for bicubic filtering */
+ { REG_A6XX_TPL1_DBG_ECO_CNTL1, 0x00000720, BIT(PIPE_NONE) },
+ { REG_A6XX_UCHE_MODE_CNTL, 0x80080000, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_CCHE_MODE_CNTL, 0x00001000, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_CCHE_CACHE_WAYS, 0x00000800, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_GBIF_GX_CONFIG, 0x010240e0, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_VARB_IDLE_TIMEOUT, 0x00000020, BIT(PIPE_NONE) },
+ { REG_A7XX_VFD_DBG_ECO_CNTL, 0x00008000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BV_THRESHOLD, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BR_THRESHOLD, 0x00600060, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0x00200020, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_LP_REQ_CNT, 0x00000020, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { },
+};
+
+static const u32 x285_protect_regs[] = {
+ A6XX_PROTECT_RDONLY(0x00008, 0x039b),
+ A6XX_PROTECT_RDONLY(0x003b4, 0x008b),
+ A6XX_PROTECT_NORDWR(0x00440, 0x001f),
+ A6XX_PROTECT_RDONLY(0x00580, 0x005f),
+ A6XX_PROTECT_NORDWR(0x005e0, 0x011f),
+ A6XX_PROTECT_RDONLY(0x0074a, 0x0005),
+ A6XX_PROTECT_RDONLY(0x00759, 0x0026),
+ A6XX_PROTECT_RDONLY(0x00789, 0x0000),
+ A6XX_PROTECT_RDONLY(0x0078c, 0x0013),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0029),
+ A6XX_PROTECT_NORDWR(0x0082c, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00837, 0x00af),
+ A6XX_PROTECT_RDONLY(0x008e7, 0x00c9),
+ A6XX_PROTECT_NORDWR(0x008ec, 0x00c3),
+ A6XX_PROTECT_NORDWR(0x009b1, 0x0250),
+ A6XX_PROTECT_RDONLY(0x00ce0, 0x0001),
+ A6XX_PROTECT_RDONLY(0x00df0, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00df1, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e01, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c5),
+ A6XX_PROTECT_RDONLY(0x03cc6, 0x0039),
+ A6XX_PROTECT_NORDWR(0x03d00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08600, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x00ff),
+ A6XX_PROTECT_RDONLY(0x08f00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08f01, 0x01be),
+ A6XX_PROTECT_NORDWR(0x09600, 0x01ff),
+ A6XX_PROTECT_RDONLY(0x0981a, 0x02e5),
+ A6XX_PROTECT_NORDWR(0x09e00, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x0a600, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x0a82e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae00, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0ae08, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0ae10, 0x00bf),
+ A6XX_PROTECT_RDONLY(0x0aed0, 0x002f),
+ A6XX_PROTECT_NORDWR(0x0af00, 0x027f),
+ A6XX_PROTECT_NORDWR(0x0b600, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x0dc00, 0x1fff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x003f),
+ A6XX_PROTECT_RDONLY(0x18440, 0x013f),
+ A6XX_PROTECT_NORDWR(0x18580, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1b400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0477),
+ A6XX_PROTECT_RDONLY(0x1f878, 0x0507),
+ A6XX_PROTECT_NORDWR(0x1f930, 0x0329),
+ A6XX_PROTECT_NORDWR(0x1fd80, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x27800, 0x007f),
+ A6XX_PROTECT_RDONLY(0x27880, 0x0385),
+ A6XX_PROTECT_NORDWR(0x27882, 0x000a),
+ A6XX_PROTECT_NORDWR(0x27c06, 0x0000),
+};
+
+DECLARE_ADRENO_PROTECT(x285_protect, 64);
+
+static const struct adreno_reglist_pipe a840_nonctxt_regs[] = {
+ { REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) },
+ { REG_A8XX_GRAS_DBG_ECO_CNTL, 0x00000800, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_GRAS_TSEFE_DBG_ECO_CNTL, 0x00200000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A6XX_PC_AUTO_VERTEX_STRIDE, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_VIS_STREAM_CNTL, 0x10010000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CONTEXT_SWITCH_STABILIZE_CNTL_1, 0x00000002, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_1, 0x00000003, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_2, 0x00000200, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_3, 0x00500000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_PC_CHICKEN_BITS_4, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ /* Disable Dead Draw Merge scheme on RB-HLSQ */
+ { REG_A6XX_RB_RBP_CNTL, BIT(5), BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A7XX_RB_CCU_CNTL, 0x00000068, BIT(PIPE_BR) },
+ /* Partially enable perf clear, Disable DINT to c/z be data forwarding */
+ { REG_A7XX_RB_CCU_DBG_ECO_CNTL, 0x00002200, BIT(PIPE_BR) },
+ { REG_A8XX_RB_GC_GMEM_PROTECT, 0x12000000, BIT(PIPE_BR) },
+ { REG_A8XX_RB_RESOLVE_PREFETCH_CNTL, 0x00000007, BIT(PIPE_BR) },
+ { REG_A8XX_RB_CMP_DBG_ECO_CNTL, 0x00004000, BIT(PIPE_BR) },
+ { REG_A8XX_RBBM_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_SLICE_NC_MODE_CNTL, 0x00000001, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_POWER_UP_RESET_SW_OVERRIDE, 0x70809060, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_POWER_UP_RESET_SW_BV_OVERRIDE, 0x30000000, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL, 0x00000030, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_WAIT_IDLE_CLOCKS_CNTL2, 0x00000030, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, 0x0fffffff, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x22122212, BIT(PIPE_NONE) },
+ { REG_A8XX_RBBM_CGC_P2S_CNTL, 0x00000040, BIT(PIPE_NONE) },
+ /* Disable mode_switch optimization in UMAS */
+ { REG_A6XX_SP_CHICKEN_BITS, BIT(24) | BIT(26), BIT(PIPE_NONE) },
+ /* Disable LPAC large-LM mode */
+ { REG_A8XX_SP_SS_CHICKEN_BITS_0, BIT(3), BIT(PIPE_NONE) },
+ /* Disable PS out of order retire */
+ { REG_A7XX_SP_CHICKEN_BITS_2, 0x00c21800, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_CHICKEN_BITS_3, 0x00300000, BIT(PIPE_NONE) },
+ /* Disable SP2TP info attribute */
+ { REG_A8XX_SP_CHICKEN_BITS_4, 0x00000002, BIT(PIPE_NONE) },
+ { REG_A6XX_SP_PERFCTR_SHADER_MASK, 0x0000003f, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL, BIT(14), BIT(PIPE_NONE) },
+ /* Ignore HLSQ shared constant feedback from SP */
+ { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_1, BIT(17), BIT(PIPE_NONE) },
+ /* Disable CS dead batch merge */
+ { REG_A7XX_SP_HLSQ_DBG_ECO_CNTL_2, BIT(24), BIT(PIPE_NONE) },
+ { REG_A8XX_SP_HLSQ_DBG_ECO_CNTL_3, BIT(7), BIT(PIPE_NONE) },
+ { REG_A7XX_SP_HLSQ_TIMEOUT_THRESHOLD_DP, 0x00000080, BIT(PIPE_NONE) },
+ { REG_A7XX_SP_READ_SEL, 0x0001ff00, BIT(PIPE_NONE) },
+ { REG_A6XX_TPL1_DBG_ECO_CNTL, 0x10100000, BIT(PIPE_NONE) },
+ /* BIT(26): Disable final clamp for bicubic filtering */
+ { REG_A6XX_TPL1_DBG_ECO_CNTL1, 0x04000720, BIT(PIPE_NONE) },
+ { REG_A6XX_UCHE_MODE_CNTL, 0x80080000, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_CCHE_MODE_CNTL, 0x00001000, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_CCHE_CACHE_WAYS, 0x00000800, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_GBIF_GX_CONFIG, 0x010240e0, BIT(PIPE_NONE) },
+ { REG_A8XX_UCHE_VARB_IDLE_TIMEOUT, 0x00000020, BIT(PIPE_NONE) },
+ { REG_A7XX_VFD_DBG_ECO_CNTL, 0x00008000, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BV_THRESHOLD, 0x00500050, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BR_THRESHOLD, 0x00600060, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_BUSY_REQ_CNT, 0x00200020, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VFD_CB_LP_REQ_CNT, 0x00000020, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { REG_A8XX_VPC_FLATSHADE_MODE_CNTL, 0x00000001, BIT(PIPE_BV) | BIT(PIPE_BR) },
+ { },
+};
+
+static const u32 a840_protect_regs[] = {
+ A6XX_PROTECT_RDONLY(0x00008, 0x039b),
+ A6XX_PROTECT_RDONLY(0x003b4, 0x008b),
+ A6XX_PROTECT_NORDWR(0x00440, 0x001f),
+ A6XX_PROTECT_RDONLY(0x00580, 0x005f),
+ A6XX_PROTECT_NORDWR(0x005e0, 0x011f),
+ A6XX_PROTECT_RDONLY(0x0074a, 0x0005),
+ A6XX_PROTECT_RDONLY(0x00759, 0x001b),
+ A6XX_PROTECT_NORDWR(0x00775, 0x000a),
+ A6XX_PROTECT_RDONLY(0x00789, 0x0000),
+ A6XX_PROTECT_RDONLY(0x0078c, 0x0013),
+ A6XX_PROTECT_NORDWR(0x00800, 0x0029),
+ A6XX_PROTECT_NORDWR(0x00837, 0x00af),
+ A6XX_PROTECT_RDONLY(0x008e7, 0x00c9),
+ A6XX_PROTECT_NORDWR(0x008ec, 0x00c3),
+ A6XX_PROTECT_NORDWR(0x009b1, 0x0250),
+ A6XX_PROTECT_NORDWR(0x00c07, 0x0008),
+ A6XX_PROTECT_RDONLY(0x00ce0, 0x0001),
+ A6XX_PROTECT_RDONLY(0x00df0, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00df1, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e01, 0x0000),
+ A6XX_PROTECT_NORDWR(0x00e03, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x03c00, 0x00c5),
+ A6XX_PROTECT_RDONLY(0x03cc6, 0x0039),
+ A6XX_PROTECT_NORDWR(0x03d00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x08600, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x08e00, 0x00ff),
+ A6XX_PROTECT_RDONLY(0x08f00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x08f01, 0x01be),
+ A6XX_PROTECT_NORDWR(0x09600, 0x01ff),
+ A6XX_PROTECT_RDONLY(0x0981a, 0x02e5),
+ A6XX_PROTECT_NORDWR(0x09e00, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x0a600, 0x01ff),
+ A6XX_PROTECT_NORDWR(0x0a82e, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae00, 0x0000),
+ A6XX_PROTECT_NORDWR(0x0ae02, 0x0004),
+ A6XX_PROTECT_NORDWR(0x0ae08, 0x0006),
+ A6XX_PROTECT_NORDWR(0x0ae10, 0x00bf),
+ A6XX_PROTECT_RDONLY(0x0aed0, 0x002f),
+ A6XX_PROTECT_NORDWR(0x0af00, 0x027f),
+ A6XX_PROTECT_NORDWR(0x0b600, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x0dc00, 0x1fff),
+ A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x18400, 0x003f),
+ A6XX_PROTECT_RDONLY(0x18440, 0x013f),
+ A6XX_PROTECT_NORDWR(0x18580, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1b400, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x1f400, 0x0477),
+ A6XX_PROTECT_RDONLY(0x1f878, 0x0507),
+ A6XX_PROTECT_NORDWR(0x1f930, 0x0329),
+ A6XX_PROTECT_NORDWR(0x1fd80, 0x1fff),
+ A6XX_PROTECT_NORDWR(0x27800, 0x007f),
+ A6XX_PROTECT_RDONLY(0x27880, 0x0385),
+ A6XX_PROTECT_NORDWR(0x27882, 0x0009),
+ A6XX_PROTECT_NORDWR(0x27c06, 0x0000),
+};
+DECLARE_ADRENO_PROTECT(a840_protect, 15);
+
+static const struct adreno_reglist a840_gbif[] = {
+ { REG_A6XX_GBIF_QSB_SIDE0, 0x00071e20 },
+ { REG_A6XX_GBIF_QSB_SIDE1, 0x00071e20 },
+ { REG_A6XX_GBIF_QSB_SIDE2, 0x00071e20 },
+ { REG_A6XX_GBIF_QSB_SIDE3, 0x00071e20 },
+ { REG_A8XX_GBIF_CX_CONFIG, 0x20023000 },
+ { },
+};
+
+static const struct adreno_info a8xx_gpus[] = {
+ {
+ .chip_ids = ADRENO_CHIP_IDS(0x44070001),
+ .family = ADRENO_8XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "gen80100_sqe.fw",
+ [ADRENO_FW_GMU] = "gen80100_gmu.bin",
+ },
+ .gmem = 21 * SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .funcs = &a8xx_gpu_funcs,
+ .a6xx = &(const struct a6xx_info) {
+ .protect = &x285_protect,
+ .nonctxt_reglist = x285_nonctxt_regs,
+ .gbif_cx = a840_gbif,
+ .max_slices = 4,
+ .gmu_chipid = 0x8010100,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(2),
+ .perfmode_bw = 16500000,
+ },
+ { /* sentinel */ },
+ },
+ },
+ }, {
+ .chip_ids = ADRENO_CHIP_IDS(0x44050a01),
+ .family = ADRENO_8XX_GEN2,
+ .fw = {
+ [ADRENO_FW_SQE] = "gen80200_sqe.fw",
+ [ADRENO_FW_GMU] = "gen80200_gmu.bin",
+ [ADRENO_FW_AQE] = "gen80200_aqe.fw",
+ },
+ .gmem = 18 * SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .funcs = &a8xx_gpu_funcs,
+ .a6xx = &(const struct a6xx_info) {
+ .protect = &a840_protect,
+ .nonctxt_reglist = a840_nonctxt_regs,
+ .gbif_cx = a840_gbif,
+ .max_slices = 3,
+ .gmu_chipid = 0x8020100,
+ .bcms = (const struct a6xx_bcm[]) {
+ { .name = "SH0", .buswidth = 16 },
+ { .name = "MC0", .buswidth = 4 },
+ {
+ .name = "ACV",
+ .fixed = true,
+ .perfmode = BIT(2),
+ .perfmode_bw = 10687500,
+ },
+ { /* sentinel */ },
+ },
+ },
+ .preempt_record_size = 19708 * SZ_1K,
+ }
+};
+
+DECLARE_ADRENO_GPULIST(a8xx);
+
static inline __always_unused void __build_asserts(void)
{
BUILD_BUG_ON(a630_protect.count > a630_protect.count_max);
@@ -1531,4 +1957,5 @@ static inline __always_unused void __build_asserts(void)
BUILD_BUG_ON(a660_protect.count > a660_protect.count_max);
BUILD_BUG_ON(a690_protect.count > a690_protect.count_max);
BUILD_BUG_ON(a730_protect.count > a730_protect.count_max);
+ BUILD_BUG_ON(a840_protect.count > a840_protect.count_max);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 28e6705c6da6..5903cd891b49 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -93,14 +93,25 @@ bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu)
/* Check to see if the GX rail is still powered */
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
u32 val;
/* This can be called from gpu state code so make sure GMU is valid */
if (!gmu->initialized)
return false;
+ /* If GMU is absent, then GX power domain is ON as long as GPU is in active state */
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return true;
+
val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+ if (adreno_is_a7xx(adreno_gpu))
+ return !(val &
+ (A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A7XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+
return !(val &
(A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
@@ -213,14 +224,19 @@ unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
{
- u32 val;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int local = gmu->idle_level;
+ u32 val;
/* SPTP and IFPC both report as IFPC */
if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
local = GMU_IDLE_STATE_IFPC;
- val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+ if (adreno_is_a8xx(adreno_gpu))
+ val = gmu_read(gmu, REG_A8XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+ else
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
if (val == local) {
if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
@@ -258,7 +274,9 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
/* Set the log wptr index
* note: downstream saves the value in poweroff and restores it here
*/
- if (adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a8xx(adreno_gpu))
+ gmu_write(gmu, REG_A8XX_GMU_GENERAL_9, 0);
+ else if (adreno_is_a7xx(adreno_gpu))
gmu_write(gmu, REG_A7XX_GMU_GENERAL_9, 0);
else
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
@@ -272,6 +290,8 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
if (ret)
DRM_DEV_ERROR(gmu->dev, "GMU firmware initialization timed out\n");
+ set_bit(GMU_STATUS_FW_START, &gmu->status);
+
return ret;
}
@@ -337,12 +357,18 @@ static const struct a6xx_gmu_oob_bits a6xx_gmu_oob_bits[] = {
/* Trigger a OOB (out of band) request to the GMU */
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int ret;
u32 val;
int request, ack;
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+ /* Skip OOB calls since RGMU is not enabled */
+ if (adreno_has_rgmu(adreno_gpu))
+ return 0;
+
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return -EINVAL;
@@ -363,9 +389,23 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Trigger the equested OOB operation */
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
- /* Wait for the acknowledge interrupt */
- ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
- val & (1 << ack), 100, 10000);
+ do {
+ /* Wait for the acknowledge interrupt */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & (1 << ack), 100, 10000);
+
+ if (!ret)
+ break;
+
+ if (completion_done(&a6xx_gpu->base.fault_coredump_done))
+ break;
+
+ /* We may timeout because the GMU is temporarily wedged from
+ * pending faults from the GPU and we are taking a devcoredump.
+ * Wait until the MMU is resumed and try again.
+ */
+ wait_for_completion(&a6xx_gpu->base.fault_coredump_done);
+ } while (true);
if (ret)
DRM_DEV_ERROR(gmu->dev,
@@ -382,10 +422,16 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int bit;
WARN_ON_ONCE(!mutex_is_locked(&gmu->lock));
+ /* Skip OOB calls since RGMU is not enabled */
+ if (adreno_has_rgmu(adreno_gpu))
+ return;
+
if (state >= ARRAY_SIZE(a6xx_gmu_oob_bits))
return;
@@ -403,7 +449,10 @@ int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
int ret;
u32 val;
- if (!gmu->legacy)
+ WARN_ON(!gmu->legacy);
+
+ /* Nothing to do if GMU does the power management */
+ if (gmu->idle_level > GMU_IDLE_STATE_ACTIVE)
return 0;
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
@@ -469,17 +518,25 @@ static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
* in the power down sequence not being fully executed. That in turn can
* prevent CX_GDSC from collapsing. Assert Qactive to avoid this.
*/
- if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu))
- gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0));
+ if (adreno_is_a8xx(adreno_gpu))
+ gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0));
+ else if (adreno_is_a7xx(adreno_gpu) || (adreno_is_a621(adreno_gpu) ||
+ adreno_is_7c3(adreno_gpu)))
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, BIT(0));
}
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
int ret;
/* Disable the power counter so the GMU isn't busy */
- gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+ if (adreno_is_a8xx(adreno_gpu))
+ gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+ else
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
/* Disable SPTP_PC if the CPU is responsible for it */
if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
@@ -506,10 +563,9 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
out:
- a6xx_gemnoc_workaround(gmu);
-
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ a6xx_gemnoc_workaround(gmu);
return ret;
}
@@ -518,6 +574,9 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (!test_and_clear_bit(GMU_STATUS_PDC_SLEEP, &gmu->status))
+ return 0;
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, BIT(1));
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
@@ -542,17 +601,28 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ u32 bitmask = BIT(16);
int ret;
u32 val;
+ if (test_and_clear_bit(GMU_STATUS_FW_START, &gmu->status))
+ return;
+
+ if (adreno_is_a840(adreno_gpu))
+ bitmask = BIT(30);
+
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
- val, val & (1 << 16), 100, 10000);
+ val, val & bitmask, 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ set_bit(GMU_STATUS_PDC_SLEEP, &gmu->status);
}
static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
@@ -560,22 +630,24 @@ static inline void pdc_write(void __iomem *ptr, u32 offset, u32 value)
writel(value, ptr + (offset << 2));
}
-static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
- const char *name);
-
static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
- void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
u32 seqmem0_drv0_reg = REG_A6XX_RSCC_SEQ_MEM_0_DRV0;
void __iomem *seqptr = NULL;
uint32_t pdc_address_offset;
+ void __iomem *pdcptr;
bool pdc_in_aop = false;
+ /* On A8x and above, RPMH/PDC configurations are entirely configured in AOP */
+ if (adreno_is_a8xx(adreno_gpu))
+ return;
+
+ pdcptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc");
if (IS_ERR(pdcptr))
- goto err;
+ return;
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu))
@@ -588,9 +660,9 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_address_offset = 0x30080;
if (!pdc_in_aop) {
- seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ seqptr = devm_platform_ioremap_resource_byname(pdev, "gmu_pdc_seq");
if (IS_ERR(seqptr))
- goto err;
+ return;
}
/* Disable SDE clock gating */
@@ -680,14 +752,6 @@ setup_pdc:
/* ensure no writes happen before the uCode is fully written */
wmb();
-
- a6xx_rpmh_stop(gmu);
-
-err:
- if (!IS_ERR_OR_NULL(pdcptr))
- iounmap(pdcptr);
- if (!IS_ERR_OR_NULL(seqptr))
- iounmap(seqptr);
}
/*
@@ -710,7 +774,7 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
/* A7xx knows better by default! */
- if (adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
return;
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
@@ -758,6 +822,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
return true;
}
+#define NEXT_BLK(blk) \
+ ((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
+
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
@@ -770,7 +837,9 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
u32 itcm_base = 0x00000000;
u32 dtcm_base = 0x00040000;
- if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a650_family(adreno_gpu) ||
+ adreno_is_a7xx(adreno_gpu) ||
+ adreno_is_a8xx(adreno_gpu))
dtcm_base = 0x10004000;
if (gmu->legacy) {
@@ -789,7 +858,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
for (blk = (const struct block_header *) fw_image->data;
(const u8*) blk < fw_image->data + fw_image->size;
- blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ blk = NEXT_BLK(blk)) {
if (blk->size == 0)
continue;
@@ -825,7 +894,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
const struct a6xx_info *a6xx_info = adreno_gpu->info->a6xx;
+ const struct adreno_reglist *gbif_cx = a6xx_info->gbif_cx;
u32 fence_range_lower, fence_range_upper;
u32 chipid = 0;
int ret;
@@ -834,27 +905,26 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
if (adreno_is_a650_family(adreno_gpu) || adreno_is_a7xx(adreno_gpu)) {
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
+ } else if (adreno_is_a8xx(adreno_gpu)) {
+ gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FALNEXT_INTF, 1);
+ gmu_write(gmu, REG_A8XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
}
/* Turn on TCM (Tightly Coupled Memory) retention */
if (adreno_is_a7xx(adreno_gpu))
a6xx_llc_write(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL, 1);
- else
+ else if (!adreno_is_a8xx(adreno_gpu))
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
- if (state == GMU_WARM_BOOT) {
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
- } else {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+
+ if (state == GMU_COLD_BOOT) {
if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
"GMU firmware is not loaded\n"))
return -ENOENT;
- ret = a6xx_rpmh_start(gmu);
- if (ret)
- return ret;
-
ret = a6xx_gmu_fw_load(gmu);
if (ret)
return ret;
@@ -868,7 +938,10 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
- if (adreno_is_a7xx(adreno_gpu)) {
+ if (adreno_is_a8xx(adreno_gpu)) {
+ fence_range_upper = 0x32;
+ fence_range_lower = 0x8c0;
+ } else if (adreno_is_a7xx(adreno_gpu)) {
fence_range_upper = 0x32;
fence_range_lower = 0x8a0;
} else {
@@ -902,7 +975,12 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
chipid |= (adreno_gpu->chip_id << 8) & 0x0f00; /* patchid */
}
- if (adreno_is_a7xx(adreno_gpu)) {
+ if (adreno_is_a8xx(adreno_gpu)) {
+ gmu_write(gmu, REG_A8XX_GMU_GENERAL_10, chipid);
+ gmu_write(gmu, REG_A8XX_GMU_GENERAL_8,
+ (gmu->log.iova & GENMASK(31, 12)) |
+ ((gmu->log.size / SZ_4K - 1) & GENMASK(7, 0)));
+ } else if (adreno_is_a7xx(adreno_gpu)) {
gmu_write(gmu, REG_A7XX_GMU_GENERAL_10, chipid);
gmu_write(gmu, REG_A7XX_GMU_GENERAL_8,
(gmu->log.iova & GENMASK(31, 12)) |
@@ -914,6 +992,15 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu->log.iova | (gmu->log.size / SZ_4K - 1));
}
+ /* For A7x and newer, do the CX GBIF configurations before GMU wake up */
+ for (int i = 0; (gbif_cx && gbif_cx[i].offset); i++)
+ gpu_write(gpu, gbif_cx[i].offset, gbif_cx[i].value);
+
+ if (adreno_is_a8xx(adreno_gpu)) {
+ gpu_write(gpu, REG_A8XX_GBIF_CX_CONFIG, 0x20023000);
+ gmu_write(gmu, REG_A6XX_GMU_MRC_GBIF_QOS_CTRL, 0x33);
+ }
+
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
@@ -925,10 +1012,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
ret = a6xx_gmu_gfx_rail_on(gmu);
if (ret)
return ret;
- }
- /* Enable SPTP_PC if the CPU is responsible for it */
- if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
ret = a6xx_sptprac_enable(gmu);
if (ret)
return ret;
@@ -968,7 +1052,7 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
u32 val, seqmem_off = 0;
/* The second spin of A7xx GPUs messed with some register offsets.. */
- if (adreno_is_a740_family(adreno_gpu))
+ if (adreno_is_a740_family(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
seqmem_off = 4;
/* Make sure there are no outstanding RPMh votes */
@@ -980,6 +1064,22 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
val, (val & 1), 100, 10000);
gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS + seqmem_off,
val, (val & 1), 100, 1000);
+
+ if (!adreno_is_a740_family(adreno_gpu) && !adreno_is_a8xx(adreno_gpu))
+ return;
+
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS4_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS5_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS6_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS7_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 1000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS8_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 10000);
+ gmu_poll_timeout_rscc(gmu, REG_A7XX_RSCC_TCS9_DRV0_STATUS + seqmem_off,
+ val, (val & 1), 100, 1000);
}
/* Force the GMU off in case it isn't responsive */
@@ -993,7 +1093,10 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
* Turn off keep alive that might have been enabled by the hang
* interrupt
*/
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+ if (adreno_is_a8xx(adreno_gpu))
+ gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+ else
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
/* Flush all the queues */
a6xx_hfi_stop(gmu);
@@ -1019,10 +1122,12 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Halt the gmu cm3 core */
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
- a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+ adreno_gpu->funcs->bus_halt(adreno_gpu, true);
/* Reset GPU core blocks */
a6xx_gpu_sw_reset(gpu, true);
+
+ a6xx_rpmh_stop(gmu);
}
static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
@@ -1086,6 +1191,9 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
return ret;
}
+ /* Read the slice info on A8x GPUs */
+ a8xx_gpu_get_slice_info(gpu);
+
/* Set the bus quota to a reasonable value for boot */
a6xx_gmu_set_initial_bw(gpu, gmu);
@@ -1095,7 +1203,7 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
- if (adreno_is_a7xx(adreno_gpu)) {
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) {
status = a6xx_llc_read(a6xx_gpu, REG_A7XX_CX_MISC_TCM_RET_CNTL) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
} else if (gmu->legacy) {
@@ -1128,6 +1236,11 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
/* Set the GPU to the current freq */
a6xx_gmu_set_initial_freq(gpu, gmu);
+ if (refcount_read(&gpu->sysprof_active) > 1) {
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+ if (!ret)
+ set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status);
+ }
out:
/* On failure, shut down the GMU to leave it in a good state */
if (ret) {
@@ -1175,13 +1288,16 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
}
+ if (test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+
ret = a6xx_gmu_wait_for_idle(gmu);
/* If the GMU isn't responding assume it is hung */
if (ret)
goto force_off;
- a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
+ adreno_gpu->funcs->bus_halt(adreno_gpu, a6xx_gpu->hung);
/* tell the GMU we want to slumber */
ret = a6xx_gmu_notify_slumber(gmu);
@@ -1318,8 +1434,6 @@ static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu)
struct msm_mmu *mmu;
mmu = msm_iommu_new(gmu->dev, 0);
- if (!mmu)
- return -ENODEV;
if (IS_ERR(mmu))
return PTR_ERR(mmu);
@@ -1418,7 +1532,7 @@ static int a6xx_gmu_rpmh_bw_votes_init(struct adreno_gpu *adreno_gpu,
vote = clamp(peak, 1, BCM_TCS_CMD_VOTE_MASK);
/* GMUs on A7xx votes on both x & y */
- if (adreno_is_a7xx(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
data[bcm_index] = BCM_TCS_CMD(commit, true, vote, vote);
else
data[bcm_index] = BCM_TCS_CMD(commit, true, 0, vote);
@@ -1450,13 +1564,14 @@ static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
}
static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
- unsigned long *freqs, int freqs_count, const char *id)
+ unsigned long *freqs, int freqs_count,
+ const char *pri_id, const char *sec_id)
{
int i, j;
const u16 *pri, *sec;
size_t pri_count, sec_count;
- pri = cmd_db_read_aux_data(id, &pri_count);
+ pri = cmd_db_read_aux_data(pri_id, &pri_count);
if (IS_ERR(pri))
return PTR_ERR(pri);
/*
@@ -1467,13 +1582,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
if (!pri_count)
return -EINVAL;
- /*
- * Some targets have a separate gfx mxc rail. So try to read that first and then fall back
- * to regular mx rail if it is missing
- */
- sec = cmd_db_read_aux_data("gmxc.lvl", &sec_count);
- if (IS_ERR(sec) && sec != ERR_PTR(-EPROBE_DEFER))
- sec = cmd_db_read_aux_data("mx.lvl", &sec_count);
+ sec = cmd_db_read_aux_data(sec_id, &sec_count);
if (IS_ERR(sec))
return PTR_ERR(sec);
@@ -1527,6 +1636,57 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
return 0;
}
+static int a6xx_gmu_rpmh_dep_votes_init(struct device *dev, u32 *votes,
+ unsigned long *freqs, int freqs_count)
+{
+ const u16 *mx;
+ size_t count;
+
+ mx = cmd_db_read_aux_data("mx.lvl", &count);
+ if (IS_ERR(mx))
+ return PTR_ERR(mx);
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ count >>= 1;
+ if (!count)
+ return -EINVAL;
+
+ /* Fix the vote for zero frequency */
+ votes[0] = 0xffffffff;
+
+ /* Construct a vote for rest of the corners */
+ for (int i = 1; i < freqs_count; i++) {
+ unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+ u8 j, index = 0;
+
+ /* Get the primary index that matches the arc level */
+ for (j = 0; j < count; j++) {
+ if (mx[j] >= level) {
+ index = j;
+ break;
+ }
+ }
+
+ if (j == count) {
+ DRM_DEV_ERROR(dev,
+ "Mx Level %u not found in the RPMh list\n",
+ level);
+ DRM_DEV_ERROR(dev, "Available levels:\n");
+ for (j = 0; j < count; j++)
+ DRM_DEV_ERROR(dev, " %u\n", mx[j]);
+
+ return -EINVAL;
+ }
+
+ /* Construct the vote */
+ votes[i] = (0x3fff << 14) | (index << 8) | (0xff);
+ }
+
+ return 0;
+}
+
/*
* The GMU votes with the RPMh for itself and on behalf of the GPU but we need
* to construct the list of votes on the CPU and send it over. Query the RPMh
@@ -1541,15 +1701,27 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
const struct a6xx_info *info = adreno_gpu->info->a6xx;
struct msm_gpu *gpu = &adreno_gpu->base;
+ const char *sec_id;
+ const u16 *gmxc;
int ret;
+ gmxc = cmd_db_read_aux_data("gmxc.lvl", NULL);
+ if (gmxc == ERR_PTR(-EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ /* If GMxC is present, prefer that as secondary rail for GX votes */
+ sec_id = IS_ERR_OR_NULL(gmxc) ? "mx.lvl" : "gmxc.lvl";
+
/* Build the GX votes */
ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
- gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl");
+ gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl", sec_id);
/* Build the CX votes */
ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
- gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl");
+ gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl", "mx.lvl");
+
+ ret |= a6xx_gmu_rpmh_dep_votes_init(gmu->dev, gmu->dep_arc_votes,
+ gmu->gpu_freqs, gmu->nr_gpu_freqs);
/* Build the interconnect votes */
if (info->bcms && gmu->nr_gpu_bws > 1)
@@ -1692,6 +1864,7 @@ static int a6xx_gmu_acd_probe(struct a6xx_gmu *gmu)
u32 val;
freq = gmu->gpu_freqs[i];
+ /* This is unlikely to fail because we are passing back a known freq */
opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, freq, true);
np = dev_pm_opp_get_of_node(opp);
@@ -1752,27 +1925,6 @@ static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
return 0;
}
-static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
- const char *name)
-{
- void __iomem *ret;
- struct resource *res = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, name);
-
- if (!res) {
- DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
- return ERR_PTR(-EINVAL);
- }
-
- ret = ioremap(res->start, resource_size(res));
- if (!ret) {
- DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
- return ERR_PTR(-EINVAL);
- }
-
- return ret;
-}
-
static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
const char *name, irq_handler_t handler)
{
@@ -1790,11 +1942,39 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
return irq;
}
+void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int sysprof_active;
+
+ /* Nothing to do if GPU is suspended. We will handle this during GMU resume */
+ if (!pm_runtime_get_if_active(&gpu->pdev->dev))
+ return;
+
+ mutex_lock(&gmu->lock);
+
+ sysprof_active = refcount_read(&gpu->sysprof_active);
+
+ /*
+ * 'Perfcounter select' register values are lost during IFPC collapse. To avoid that,
+ * use the currently unused perfcounter oob vote to block IFPC when sysprof is active
+ */
+ if ((sysprof_active > 1) && !test_and_set_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_set_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+ else if ((sysprof_active == 1) && test_and_clear_bit(GMU_STATUS_OOB_PERF_SET, &gmu->status))
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ mutex_unlock(&gmu->lock);
+
+ pm_runtime_put(&gpu->pdev->dev);
+}
+
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- struct platform_device *pdev = to_platform_device(gmu->dev);
mutex_lock(&gmu->lock);
if (!gmu->initialized) {
@@ -1823,12 +2003,11 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
qmp_put(gmu->qmp);
iounmap(gmu->mmio);
- if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
- iounmap(gmu->rscc);
gmu->mmio = NULL;
gmu->rscc = NULL;
- if (!adreno_has_gmu_wrapper(adreno_gpu)) {
+ if (!adreno_has_gmu_wrapper(adreno_gpu) &&
+ !adreno_has_rgmu(adreno_gpu)) {
a6xx_gmu_memory_free(gmu);
free_irq(gmu->gmu_irq, gmu);
@@ -1850,10 +2029,38 @@ static int cxpd_notifier_cb(struct notifier_block *nb,
return 0;
}
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name, resource_size_t *start)
+{
+ void __iomem *ret;
+ struct resource *res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+
+ if (!res) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to find the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = ioremap(res->start, resource_size(res));
+ if (!ret) {
+ DRM_DEV_ERROR(&pdev->dev, "Unable to map the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (start)
+ *start = res->start;
+
+ return ret;
+}
+
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
struct platform_device *pdev = of_find_device_by_node(node);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ resource_size_t start;
+ struct resource *res;
int ret;
if (!pdev)
@@ -1870,13 +2077,29 @@ int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Mark legacy for manual SPTPRAC control */
gmu->legacy = true;
+ /* RGMU requires clocks */
+ ret = devm_clk_bulk_get_all(gmu->dev, &gmu->clocks);
+ if (ret < 0)
+ goto err_clk;
+
+ gmu->nr_clocks = ret;
+
/* Map the GMU registers */
- gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start);
if (IS_ERR(gmu->mmio)) {
ret = PTR_ERR(gmu->mmio);
goto err_mmio;
}
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory");
+ if (!res) {
+ ret = -EINVAL;
+ goto err_mmio;
+ }
+
+ /* Identify gmu base offset from gpu base address */
+ gmu->mmio_offset = (u32)(start - res->start);
+
gmu->cxpd = dev_pm_domain_attach_by_name(gmu->dev, "cx");
if (IS_ERR(gmu->cxpd)) {
ret = PTR_ERR(gmu->cxpd);
@@ -1909,6 +2132,7 @@ detach_cxpd:
err_mmio:
iounmap(gmu->mmio);
+err_clk:
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
@@ -1917,10 +2141,13 @@ err_mmio:
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
+ struct platform_device *pdev = of_find_device_by_node(node);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- struct platform_device *pdev = of_find_device_by_node(node);
struct device_link *link;
+ resource_size_t start;
+ struct resource *res;
int ret;
if (!pdev)
@@ -1932,8 +2159,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
return ret;
- /* Fow now, don't do anything fancy until we get our feet under us */
- gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+ /* Set GMU idle level */
+ gmu->idle_level = (adreno_gpu->info->quirks & ADRENO_QUIRK_IFPC) ?
+ GMU_IDLE_STATE_IFPC : GMU_IDLE_STATE_ACTIVE;
pm_runtime_enable(gmu->dev);
@@ -1955,13 +2183,14 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
*/
gmu->dummy.size = SZ_4K;
if (adreno_is_a660_family(adreno_gpu) ||
- adreno_is_a7xx(adreno_gpu)) {
+ adreno_is_a7xx(adreno_gpu) ||
+ adreno_is_a8xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
0x60400000, "debug");
if (ret)
goto err_memory;
- gmu->dummy.size = SZ_8K;
+ gmu->dummy.size = SZ_16K;
}
/* Allocate memory for the GMU dummy page */
@@ -1972,7 +2201,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
/* Note that a650 family also includes a660 family: */
if (adreno_is_a650_family(adreno_gpu) ||
- adreno_is_a7xx(adreno_gpu)) {
+ adreno_is_a7xx(adreno_gpu) ||
+ adreno_is_a8xx(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
@@ -2014,19 +2244,30 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto err_memory;
/* Map the GMU registers */
- gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu", &start);
if (IS_ERR(gmu->mmio)) {
ret = PTR_ERR(gmu->mmio);
goto err_memory;
}
+ res = platform_get_resource_byname(gpu->pdev, IORESOURCE_MEM, "kgsl_3d0_reg_memory");
+ if (!res) {
+ ret = -EINVAL;
+ goto err_mmio;
+ }
+
+ /* Identify gmu base offset from gpu base address */
+ gmu->mmio_offset = (u32)(start - res->start);
+
if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu)) {
- gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
+ gmu->rscc = devm_platform_ioremap_resource_byname(pdev, "rscc");
if (IS_ERR(gmu->rscc)) {
ret = -ENODEV;
goto err_mmio;
}
+ } else if (adreno_is_a8xx(adreno_gpu)) {
+ gmu->rscc = gmu->mmio + 0x19000;
} else {
gmu->rscc = gmu->mmio + 0x23000;
}
@@ -2100,8 +2341,6 @@ detach_cxpd:
err_mmio:
iounmap(gmu->mmio);
- if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
- iounmap(gmu->rscc);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index d1ce11131ba6..2af074c8e8cf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -19,8 +19,8 @@ struct a6xx_gmu_bo {
u64 iova;
};
-#define GMU_MAX_GX_FREQS 16
-#define GMU_MAX_CX_FREQS 4
+#define GMU_MAX_GX_FREQS 32
+#define GMU_MAX_CX_FREQS 6
#define GMU_MAX_BCMS 3
struct a6xx_bcm {
@@ -50,6 +50,9 @@ struct a6xx_bcm {
/* The GMU does not do any idle state management */
#define GMU_IDLE_STATE_ACTIVE 0
+/* Unknown power state. Not exposed by the firmware. For documentation purpose only */
+#define GMU_IDLE_STATE_RESERVED 1
+
/* The GMU manages SPTP power collapse */
#define GMU_IDLE_STATE_SPTP 2
@@ -65,6 +68,7 @@ struct a6xx_gmu {
struct drm_gpuvm *vm;
void __iomem *mmio;
+ u32 mmio_offset;
void __iomem *rscc;
int hfi_irq;
@@ -93,6 +97,7 @@ struct a6xx_gmu {
int nr_gpu_freqs;
unsigned long gpu_freqs[GMU_MAX_GX_FREQS];
u32 gx_arc_votes[GMU_MAX_GX_FREQS];
+ u32 dep_arc_votes[GMU_MAX_GX_FREQS];
struct a6xx_hfi_acd_table acd_table;
int nr_gpu_bws;
@@ -117,22 +122,33 @@ struct a6xx_gmu {
struct qmp *qmp;
struct a6xx_hfi_msg_bw_table *bw_table;
+
+/* To check if we can trigger sleep seq at PDC. Cleared in a6xx_rpmh_stop() */
+#define GMU_STATUS_FW_START 0
+/* To track if PDC sleep seq was done */
+#define GMU_STATUS_PDC_SLEEP 1
+/* To track Perfcounter OOB set status */
+#define GMU_STATUS_OOB_PERF_SET 2
+ unsigned long status;
};
+#define GMU_BYTE_OFFSET(gmu, offset) (((offset) << 2) - (gmu)->mmio_offset)
+
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
{
- return readl(gmu->mmio + (offset << 2));
+ /* The 'offset' is based on GPU's start address. Adjust it */
+ return readl(gmu->mmio + GMU_BYTE_OFFSET(gmu, offset));
}
static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
{
- writel(value, gmu->mmio + (offset << 2));
+ writel(value, gmu->mmio + GMU_BYTE_OFFSET(gmu, offset));
}
static inline void
gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
{
- memcpy_toio(gmu->mmio + (offset << 2), data, size);
+ memcpy_toio(gmu->mmio + GMU_BYTE_OFFSET(gmu, offset), data, size);
wmb();
}
@@ -149,14 +165,17 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
{
u64 val;
- val = (u64) readl(gmu->mmio + (lo << 2));
- val |= ((u64) readl(gmu->mmio + (hi << 2)) << 32);
+ val = gmu_read(gmu, lo);
+ val |= ((u64) gmu_read(gmu, hi) << 32);
return val;
}
#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
- readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
+ readl_poll_timeout((gmu)->mmio + (GMU_BYTE_OFFSET(gmu, addr)), val, \
+ cond, interval, timeout)
+#define gmu_poll_timeout_atomic(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout_atomic((gmu)->mmio + (GMU_BYTE_OFFSET(gmu, addr)), val, cond, \
interval, timeout)
static inline u32 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 45dd5fd1c2bf..0200a7e71cdf 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -16,6 +16,97 @@
#define GPU_PAS_ID 13
+static u64 read_gmu_ao_counter(struct a6xx_gpu *a6xx_gpu)
+{
+ u64 count_hi, count_lo, temp;
+
+ do {
+ count_hi = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H);
+ count_lo = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L);
+ temp = gmu_read(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_H);
+ } while (unlikely(count_hi != temp));
+
+ return (count_hi << 32) | count_lo;
+}
+
+static bool fence_status_check(struct msm_gpu *gpu, u32 offset, u32 value, u32 status, u32 mask)
+{
+ /* Success if !writedropped0/1 */
+ if (!(status & mask))
+ return true;
+
+ udelay(10);
+
+ /* Try to update fenced register again */
+ gpu_write(gpu, offset, value);
+
+ /* We can't do a posted write here because the power domain could be
+ * in collapse state. So use the heaviest barrier instead
+ */
+ mb();
+ return false;
+}
+
+static int fenced_write(struct a6xx_gpu *a6xx_gpu, u32 offset, u32 value, u32 mask)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 status;
+
+ gpu_write(gpu, offset, value);
+
+ /* Nothing else to be done in the case of no-GMU */
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return 0;
+
+ /* We can't do a posted write here because the power domain could be
+ * in collapse state. So use the heaviest barrier instead
+ */
+ mb();
+
+ if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status,
+ fence_status_check(gpu, offset, value, status, mask), 0, 1000))
+ return 0;
+
+ /* Try again for another 1ms before failing */
+ gpu_write(gpu, offset, value);
+ mb();
+
+ if (!gmu_poll_timeout(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS, status,
+ fence_status_check(gpu, offset, value, status, mask), 0, 1000)) {
+ /*
+ * The 'delay' warning is here because the pause to print this
+ * warning will allow gpu to move to power collapse which
+ * defeats the purpose of continuous polling for 2 ms
+ */
+ dev_err_ratelimited(gmu->dev, "delay in fenced register write (0x%x)\n",
+ offset);
+ return 0;
+ }
+
+ dev_err_ratelimited(gmu->dev, "fenced register write (0x%x) fail\n",
+ offset);
+
+ return -ETIMEDOUT;
+}
+
+int a6xx_fenced_write(struct a6xx_gpu *a6xx_gpu, u32 offset, u64 value, u32 mask, bool is_64b)
+{
+ int ret;
+
+ ret = fenced_write(a6xx_gpu, offset, lower_32_bits(value), mask);
+ if (ret)
+ return ret;
+
+ if (!is_64b)
+ return 0;
+
+ ret = fenced_write(a6xx_gpu, offset + 1, upper_32_bits(value), mask);
+
+ return ret;
+}
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -66,7 +157,7 @@ static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
}
}
-static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
@@ -86,7 +177,7 @@ static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Update HW if this is the current ring and we are not in preempt*/
if (!a6xx_in_preempt(a6xx_gpu)) {
if (a6xx_gpu->cur_ring == ring)
- gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false);
else
ring->restore_wptr = true;
} else {
@@ -133,7 +224,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, submit->seqno - 1);
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
- OUT_RING(ring, CP_SET_THREAD_BOTH);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BOTH);
/* Reset state used to synchronize BR and BV */
OUT_PKT7(ring, CP_RESET_CONTEXT_STATE, 1);
@@ -144,18 +235,31 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
CP_RESET_CONTEXT_STATE_0_RESET_GLOBAL_LOCAL_TS);
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
- OUT_RING(ring, CP_SET_THREAD_BR);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BOTH);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, LRZ_FLUSH_INVALIDATE);
+
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
}
if (!sysprof) {
- if (!adreno_is_a7xx(adreno_gpu)) {
+ if (!(adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))) {
/* Turn off protected mode to write to special registers */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 0);
}
- OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
- OUT_RING(ring, 1);
+ if (adreno_is_a8xx(adreno_gpu)) {
+ OUT_PKT4(ring, REG_A8XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ OUT_PKT4(ring, REG_A8XX_RBBM_SLICE_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ } else {
+ OUT_PKT4(ring, REG_A6XX_RBBM_PERFCTR_SRAM_INIT_CMD, 1);
+ OUT_RING(ring, 1);
+ }
}
/* Execute the table update */
@@ -173,8 +277,8 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
* Needed for preemption
*/
OUT_PKT7(ring, CP_MEM_WRITE, 5);
- OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
- OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+ OUT_RING(ring, A5XX_CP_MEM_WRITE_ADDR_LO(lower_32_bits(memptr)));
+ OUT_RING(ring, A5XX_CP_MEM_WRITE_ADDR_HI(upper_32_bits(memptr)));
OUT_RING(ring, lower_32_bits(ttbr));
OUT_RING(ring, upper_32_bits(ttbr));
OUT_RING(ring, ctx->seqno);
@@ -184,7 +288,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
* to make sure BV doesn't race ahead while BR is still switching
* pagetables.
*/
- if (adreno_is_a7xx(&a6xx_gpu->base)) {
+ if (adreno_is_a7xx(&a6xx_gpu->base) || adreno_is_a8xx(&a6xx_gpu->base)) {
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
}
@@ -198,20 +302,22 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_RING(ring, CACHE_INVALIDATE);
if (!sysprof) {
+ u32 reg_status = adreno_is_a8xx(adreno_gpu) ?
+ REG_A8XX_RBBM_PERFCTR_SRAM_INIT_STATUS :
+ REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS;
/*
* Wait for SRAM clear after the pgtable update, so the
* two can happen in parallel:
*/
OUT_PKT7(ring, CP_WAIT_REG_MEM, 6);
OUT_RING(ring, CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ));
- OUT_RING(ring, CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
- REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS));
- OUT_RING(ring, CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0));
+ OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_LO(reg_status));
+ OUT_RING(ring, CP_WAIT_REG_MEM_POLL_ADDR_HI(0));
OUT_RING(ring, CP_WAIT_REG_MEM_3_REF(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_4_MASK(0x1));
OUT_RING(ring, CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0));
- if (!adreno_is_a7xx(adreno_gpu)) {
+ if (!(adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu))) {
/* Re-enable protected mode: */
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
@@ -284,7 +390,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
- OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH(2), 1);
OUT_RING(ring, submit->seqno);
/*
@@ -298,8 +404,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
OUT_RING(ring, submit->seqno);
- trace_msm_gpu_submit_flush(submit,
- gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+ trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu));
a6xx_flush(gpu, ring);
}
@@ -350,6 +455,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
+ u32 rbbm_perfctr_cp0, cp_always_on_counter;
unsigned int i, ibs = 0;
adreno_check_and_reenable_stall(adreno_gpu);
@@ -370,10 +476,16 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
if (gpu->nr_rings > 1)
a6xx_emit_set_pseudo_reg(ring, a6xx_gpu, submit->queue);
- get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
- rbmemptr_stats(ring, index, cpcycles_start));
- get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
- rbmemptr_stats(ring, index, alwayson_start));
+ if (adreno_is_a8xx(adreno_gpu)) {
+ rbbm_perfctr_cp0 = REG_A8XX_RBBM_PERFCTR_CP(0);
+ cp_always_on_counter = REG_A8XX_CP_ALWAYS_ON_COUNTER;
+ } else {
+ rbbm_perfctr_cp0 = REG_A7XX_RBBM_PERFCTR_CP(0);
+ cp_always_on_counter = REG_A6XX_CP_ALWAYS_ON_COUNTER;
+ }
+
+ get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_start));
+ get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_start));
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BOTH);
@@ -420,14 +532,17 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_RING(ring, 0x00e); /* IB1LIST end */
}
- get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
- rbmemptr_stats(ring, index, cpcycles_end));
- get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER,
- rbmemptr_stats(ring, index, alwayson_end));
+ get_stats_counter(ring, rbbm_perfctr_cp0, rbmemptr_stats(ring, index, cpcycles_end));
+ get_stats_counter(ring, cp_always_on_counter, rbmemptr_stats(ring, index, alwayson_end));
/* Write the fence to the scratch register */
- OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
- OUT_RING(ring, submit->seqno);
+ if (adreno_is_a8xx(adreno_gpu)) {
+ OUT_PKT4(ring, REG_A8XX_CP_SCRATCH_GLOBAL(2), 1);
+ OUT_RING(ring, submit->seqno);
+ } else {
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH(2), 1);
+ OUT_RING(ring, submit->seqno);
+ }
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
OUT_RING(ring, CP_SET_THREAD_BR);
@@ -499,8 +614,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
}
- trace_msm_gpu_submit_flush(submit,
- gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER));
+ trace_msm_gpu_submit_flush(submit, read_gmu_ao_counter(a6xx_gpu));
a6xx_flush(gpu, ring);
@@ -523,15 +637,26 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
if (adreno_is_a630(adreno_gpu))
clock_cntl_on = 0x8aa8aa02;
- else if (adreno_is_a610(adreno_gpu))
+ else if (adreno_is_a610(adreno_gpu) || adreno_is_a612(adreno_gpu))
clock_cntl_on = 0xaaa8aa82;
else if (adreno_is_a702(adreno_gpu))
clock_cntl_on = 0xaaaaaa82;
else
clock_cntl_on = 0x8aa8aa82;
- cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111;
- cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555;
+ if (adreno_is_a612(adreno_gpu))
+ cgc_delay = 0x11;
+ else if (adreno_is_a615_family(adreno_gpu))
+ cgc_delay = 0x111;
+ else
+ cgc_delay = 0x10111;
+
+ if (adreno_is_a612(adreno_gpu))
+ cgc_hyst = 0x55;
+ else if (adreno_is_a615_family(adreno_gpu))
+ cgc_hyst = 0x555;
+ else
+ cgc_hyst = 0x5555;
gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
@@ -617,14 +742,20 @@ static int a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
/* Copy the data into the internal struct to drop the const qualifier (temporarily) */
*cfg = *common_cfg;
- cfg->ubwc_swizzle = 0x6;
- cfg->highest_bank_bit = 15;
+ /* Use common config as is for A8x */
+ if (!adreno_is_a8xx(gpu)) {
+ cfg->ubwc_swizzle = 0x6;
+ cfg->highest_bank_bit = 15;
+ }
if (adreno_is_a610(gpu)) {
cfg->highest_bank_bit = 13;
cfg->ubwc_swizzle = 0x7;
}
+ if (adreno_is_a612(gpu))
+ cfg->highest_bank_bit = 14;
+
if (adreno_is_a618(gpu))
cfg->highest_bank_bit = 14;
@@ -739,11 +870,10 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
u32 *dest = (u32 *)&lock->regs[0];
int i;
- reglist = adreno_gpu->info->a6xx->pwrup_reglist;
-
lock->gpu_req = lock->cpu_req = lock->turn = 0;
- lock->ifpc_list_len = 0;
- lock->preemption_list_len = reglist->count;
+
+ reglist = adreno_gpu->info->a6xx->ifpc_reglist;
+ lock->ifpc_list_len = reglist->count;
/*
* For each entry in each of the lists, write the offset and the current
@@ -754,6 +884,14 @@ static void a7xx_patch_pwrup_reglist(struct msm_gpu *gpu)
*dest++ = gpu_read(gpu, reglist->regs[i]);
}
+ reglist = adreno_gpu->info->a6xx->pwrup_reglist;
+ lock->preemption_list_len = reglist->count;
+
+ for (i = 0; i < reglist->count; i++) {
+ *dest++ = reglist->regs[i];
+ *dest++ = gpu_read(gpu, reglist->regs[i]);
+ }
+
/*
* The overall register list is composed of
* 1. Static IFPC-only registers
@@ -897,7 +1035,7 @@ static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
return false;
/* A7xx is safe! */
- if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu))
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a702(adreno_gpu) || adreno_is_a8xx(adreno_gpu))
return true;
/*
@@ -980,6 +1118,23 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
}
}
+ if (!a6xx_gpu->aqe_bo && adreno_gpu->fw[ADRENO_FW_AQE]) {
+ a6xx_gpu->aqe_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_AQE], &a6xx_gpu->aqe_iova);
+
+ if (IS_ERR(a6xx_gpu->aqe_bo)) {
+ int ret = PTR_ERR(a6xx_gpu->aqe_bo);
+
+ a6xx_gpu->aqe_bo = NULL;
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Could not allocate AQE ucode: %d\n", ret);
+
+ return ret;
+ }
+
+ msm_gem_object_set_name(a6xx_gpu->aqe_bo, "aqefw");
+ }
+
/*
* Expanded APRIV and targets that support WHERE_AM_I both need a
* privileged buffer to store the RPTR shadow
@@ -1011,7 +1166,7 @@ static int a6xx_ucode_load(struct msm_gpu *gpu)
return 0;
}
-static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+int a6xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
int ret;
@@ -1124,17 +1279,20 @@ static int hw_init(struct msm_gpu *gpu)
/* enable hardware clockgating */
a6xx_set_hwcg(gpu, true);
- /* VBIF/GBIF start*/
- if (adreno_is_a610_family(adreno_gpu) ||
- adreno_is_a640_family(adreno_gpu) ||
- adreno_is_a650_family(adreno_gpu) ||
- adreno_is_a7xx(adreno_gpu)) {
+ /* For gmuwrapper implementations, do the VBIF/GBIF CX configuration here */
+ if (adreno_is_a610_family(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
- gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL,
- adreno_is_a7xx(adreno_gpu) ? 0x2120212 : 0x3);
+ }
+
+ if (adreno_is_a610_family(adreno_gpu) ||
+ adreno_is_a640_family(adreno_gpu) ||
+ adreno_is_a650_family(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ } else if (adreno_is_a7xx(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x2120212);
} else {
gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
}
@@ -1189,10 +1347,10 @@ static int hw_init(struct msm_gpu *gpu)
}
if (adreno_is_a660_family(adreno_gpu))
- gpu_write(gpu, REG_A6XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
+ gpu_write(gpu, REG_A7XX_CP_LPAC_PROG_FIFO_SIZE, 0x00000020);
/* Setting the mem pool size */
- if (adreno_is_a610(adreno_gpu)) {
+ if (adreno_is_a610(adreno_gpu) || adreno_is_a612(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 48);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_DBG_ADDR, 47);
} else if (adreno_is_a702(adreno_gpu)) {
@@ -1225,7 +1383,8 @@ static int hw_init(struct msm_gpu *gpu)
a6xx_set_ubwc_config(gpu);
/* Enable fault detection */
- if (adreno_is_a730(adreno_gpu) ||
+ if (adreno_is_a612(adreno_gpu) ||
+ adreno_is_a730(adreno_gpu) ||
adreno_is_a740_family(adreno_gpu))
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL, (1 << 30) | 0xcfffff);
else if (adreno_is_a690(adreno_gpu))
@@ -1241,14 +1400,14 @@ static int hw_init(struct msm_gpu *gpu)
/* Set weights for bicubic filtering */
if (adreno_is_a650_family(adreno_gpu) || adreno_is_x185(adreno_gpu)) {
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), 0);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(1),
0x3fe05ff4);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2),
0x3fa0ebee);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3),
0x3f5193ed);
- gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4),
0x3f0243f0);
}
@@ -1444,25 +1603,25 @@ static void a6xx_recover(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- int i, active_submits;
+ int active_submits;
adreno_dump_info(gpu);
- for (i = 0; i < 8; i++)
- DRM_DEV_INFO(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+ if (a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) {
+ /* Sometimes crashstate capture is skipped, so SQE should be halted here again */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
+ if (hang_debug)
+ a6xx_dump(gpu);
- if (hang_debug)
- a6xx_dump(gpu);
+ }
/*
* To handle recovery specific sequences during the rpm suspend we are
* about to trigger
*/
- a6xx_gpu->hung = true;
- /* Halt SQE first */
- gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+ a6xx_gpu->hung = true;
pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
@@ -1476,9 +1635,9 @@ static void a6xx_recover(struct msm_gpu *gpu)
*/
gpu->active_submits = 0;
- if (adreno_has_gmu_wrapper(adreno_gpu)) {
+ if (adreno_has_gmu_wrapper(adreno_gpu) || adreno_has_rgmu(adreno_gpu)) {
/* Drain the outstanding traffic on memory buses */
- a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+ adreno_gpu->funcs->bus_halt(adreno_gpu, true);
/* Reset the GPU to a clean state */
a6xx_gpu_sw_reset(gpu, true);
@@ -1637,10 +1796,10 @@ static int a6xx_fault_handler(void *arg, unsigned long iova, int flags, void *da
const char *block = "unknown";
u32 scratch[] = {
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
- gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH(7)),
};
if (info)
@@ -1693,8 +1852,6 @@ static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
/*
@@ -1706,13 +1863,6 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
if (gpu_read(gpu, REG_A6XX_RBBM_STATUS3) & A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT)
return;
- /*
- * Force the GPU to stay on until after we finish
- * collecting information
- */
- if (!adreno_has_gmu_wrapper(adreno_gpu))
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
-
DRM_DEV_ERROR(&gpu->pdev->dev,
"gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
@@ -1727,6 +1877,9 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
/* Turn off the hangcheck timer to keep it from bothering us */
timer_delete(&gpu->hangcheck_timer);
+ /* Turn off interrupts to avoid triggering recovery again */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, 0);
+
kthread_queue_work(gpu->worker, &gpu->recover_work);
}
@@ -1751,9 +1904,49 @@ static void a7xx_sw_fuse_violation_irq(struct msm_gpu *gpu)
}
}
+static void a6xx_gpu_keepalive_vote(struct msm_gpu *gpu, bool on)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, on);
+}
+
+static int irq_poll_fence(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 status;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return 0;
+
+ if (gmu_poll_timeout_atomic(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, status, !status, 1, 100)) {
+ u32 rbbm_unmasked = gmu_read(gmu, REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "irq fence poll timeout, fence_ctrl=0x%x, unmasked_status=0x%x\n",
+ status, rbbm_unmasked);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
struct msm_drm_private *priv = gpu->dev->dev_private;
+
+ /* Set keepalive vote to avoid power collapse after RBBM_INT_0_STATUS is read */
+ a6xx_gpu_keepalive_vote(gpu, true);
+
+ if (irq_poll_fence(gpu))
+ goto done;
+
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
@@ -1790,6 +1983,9 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
a6xx_preempt_irq(gpu);
+done:
+ a6xx_gpu_keepalive_vote(gpu, false);
+
return IRQ_HANDLED;
}
@@ -1935,7 +2131,7 @@ static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu)
u32 fuse_val;
int ret;
- if (adreno_is_a750(adreno_gpu)) {
+ if (adreno_is_a750(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) {
/*
* Assume that if qcom scm isn't available, that whatever
* replacement allows writing the fuse register ourselves.
@@ -1961,9 +2157,9 @@ static int a7xx_cx_mem_init(struct a6xx_gpu *a6xx_gpu)
return ret;
/*
- * On a750 raytracing may be disabled by the firmware, find out
- * whether that's the case. The scm call above sets the fuse
- * register.
+ * On A7XX_GEN3 and newer, raytracing may be disabled by the
+ * firmware, find out whether that's the case. The scm call
+ * above sets the fuse register.
*/
fuse_val = a6xx_llc_read(a6xx_gpu,
REG_A7XX_CX_MISC_SW_FUSE_VALUE);
@@ -2024,7 +2220,7 @@ void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert)
{
/* 11nm chips (e.g. ones with A610) have hw issues with the reset line! */
- if (adreno_is_a610(to_adreno_gpu(gpu)))
+ if (adreno_is_a610(to_adreno_gpu(gpu)) || adreno_is_a8xx(to_adreno_gpu(gpu)))
return;
gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, assert);
@@ -2055,7 +2251,12 @@ static int a6xx_gmu_pm_resume(struct msm_gpu *gpu)
msm_devfreq_resume(gpu);
- adreno_is_a7xx(adreno_gpu) ? a7xx_llc_activate(a6xx_gpu) : a6xx_llc_activate(a6xx_gpu);
+ if (adreno_is_a8xx(adreno_gpu))
+ a8xx_llc_activate(a6xx_gpu);
+ else if (adreno_is_a7xx(adreno_gpu))
+ a7xx_llc_activate(a6xx_gpu);
+ else
+ a6xx_llc_activate(a6xx_gpu);
return ret;
}
@@ -2092,6 +2293,12 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
if (ret)
goto err_bulk_clk;
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret) {
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
+ goto err_bulk_clk;
+ }
+
if (adreno_is_a619_holi(adreno_gpu))
a6xx_sptprac_enable(gmu);
@@ -2105,8 +2312,10 @@ err_bulk_clk:
err_set_opp:
mutex_unlock(&a6xx_gpu->gmu.lock);
- if (!ret)
+ if (!ret) {
msm_devfreq_resume(gpu);
+ a6xx_llc_activate(a6xx_gpu);
+ }
return ret;
}
@@ -2147,17 +2356,20 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
trace_msm_gpu_suspend(0);
+ a6xx_llc_deactivate(a6xx_gpu);
+
msm_devfreq_suspend(gpu);
mutex_lock(&a6xx_gpu->gmu.lock);
/* Drain the outstanding traffic on memory buses */
- a6xx_bus_clear_pending_transactions(adreno_gpu, true);
+ adreno_gpu->funcs->bus_halt(adreno_gpu, true);
if (adreno_is_a619_holi(adreno_gpu))
a6xx_sptprac_disable(gmu);
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->gxpd);
dev_pm_opp_set_opp(&gpu->pdev->dev, NULL);
@@ -2179,16 +2391,7 @@ static int a6xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- mutex_lock(&a6xx_gpu->gmu.lock);
-
- /* Force the GPU power on so we can read this register */
- a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
-
- *value = gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER);
-
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
-
- mutex_unlock(&a6xx_gpu->gmu.lock);
+ *value = read_gmu_ao_counter(a6xx_gpu);
return 0;
}
@@ -2217,6 +2420,11 @@ static void a6xx_destroy(struct msm_gpu *gpu)
drm_gem_object_put(a6xx_gpu->sqe_bo);
}
+ if (a6xx_gpu->aqe_bo) {
+ msm_gem_unpin_iova(a6xx_gpu->aqe_bo, gpu->vm);
+ drm_gem_object_put(a6xx_gpu->aqe_bo);
+ }
+
if (a6xx_gpu->shadow_bo) {
msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->vm);
drm_gem_object_put(a6xx_gpu->shadow_bo);
@@ -2298,18 +2506,36 @@ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
return a6xx_gpu->shadow[ring->id];
+ /*
+ * This is true only on an A6XX_GEN1 with GMU, has IFPC enabled and a super old SQE firmware
+ * without 'whereami' support
+ */
+ WARN_ONCE((to_adreno_gpu(gpu)->info->quirks & ADRENO_QUIRK_IFPC),
+ "Can't read CP_RB_RPTR register reliably\n");
+
return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
}
static bool a6xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
- struct msm_cp_state cp_state = {
+ struct msm_cp_state cp_state;
+ bool progress;
+
+ /*
+ * With IFPC, KMD doesn't know whether GX power domain is collapsed
+ * or not. So, we can't blindly read the below registers in GX domain.
+ * Lets trust the hang detection in HW and lie to the caller that
+ * there was progress.
+ */
+ if (to_adreno_gpu(gpu)->info->quirks & ADRENO_QUIRK_IFPC)
+ return true;
+
+ cp_state = (struct msm_cp_state) {
.ib1_base = gpu_read64(gpu, REG_A6XX_CP_IB1_BASE),
.ib2_base = gpu_read64(gpu, REG_A6XX_CP_IB2_BASE),
.ib1_rem = gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
.ib2_rem = gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE),
};
- bool progress;
/*
* Adjust the remaining data to account for what has already been
@@ -2381,7 +2607,105 @@ static int a6xx_set_supported_hw(struct device *dev, const struct adreno_info *i
return 0;
}
-static const struct adreno_gpu_funcs funcs = {
+static struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct device_node *node;
+ struct a6xx_gpu *a6xx_gpu;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ extern int enable_preemption;
+ bool is_a7xx;
+ int ret, nr_rings = 1;
+
+ a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
+ if (!a6xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a6xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ mutex_init(&a6xx_gpu->gmu.lock);
+
+ adreno_gpu->registers = NULL;
+
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
+
+ adreno_gpu->base.hw_apriv =
+ !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
+
+ /* gpu->info only gets assigned in adreno_gpu_init(). A8x is included intentionally */
+ is_a7xx = config->info->family >= ADRENO_7XX_GEN1;
+
+ a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
+
+ ret = a6xx_set_supported_hw(&pdev->dev, config->info);
+ if (ret) {
+ a6xx_llc_slices_destroy(a6xx_gpu);
+ kfree(a6xx_gpu);
+ return ERR_PTR(ret);
+ }
+
+ if ((enable_preemption == 1) || (enable_preemption == -1 &&
+ (config->info->quirks & ADRENO_QUIRK_PREEMPTION)))
+ nr_rings = 4;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, config->info->funcs, nr_rings);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ priv->gpu_clamp_to_idle = true;
+
+ if (adreno_has_gmu_wrapper(adreno_gpu) || adreno_has_rgmu(adreno_gpu))
+ ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
+ else
+ ret = a6xx_gmu_init(a6xx_gpu, node);
+ of_node_put(node);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (adreno_is_a7xx(adreno_gpu) || adreno_is_a8xx(adreno_gpu)) {
+ ret = a7xx_cx_mem_init(a6xx_gpu);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+ }
+
+ adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
+
+ msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
+ adreno_gpu->funcs->mmu_fault_handler);
+
+ ret = a6xx_calc_ubwc_config(adreno_gpu);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a6xx_preempt_init(gpu);
+
+ return gpu;
+}
+
+const struct adreno_gpu_funcs a6xx_gpu_funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
@@ -2409,10 +2733,13 @@ static const struct adreno_gpu_funcs funcs = {
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
+ .init = a6xx_gpu_init,
.get_timestamp = a6xx_gmu_get_timestamp,
+ .bus_halt = a6xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a6xx_fault_handler,
};
-static const struct adreno_gpu_funcs funcs_gmuwrapper = {
+const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
@@ -2438,10 +2765,13 @@ static const struct adreno_gpu_funcs funcs_gmuwrapper = {
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
+ .init = a6xx_gpu_init,
.get_timestamp = a6xx_get_timestamp,
+ .bus_halt = a6xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a6xx_fault_handler,
};
-static const struct adreno_gpu_funcs funcs_a7xx = {
+const struct adreno_gpu_funcs a7xx_gpu_funcs = {
.base = {
.get_param = adreno_get_param,
.set_param = adreno_set_param,
@@ -2469,109 +2799,35 @@ static const struct adreno_gpu_funcs funcs_a7xx = {
.get_rptr = a6xx_get_rptr,
.progress = a6xx_progress,
},
+ .init = a6xx_gpu_init,
.get_timestamp = a6xx_gmu_get_timestamp,
+ .bus_halt = a6xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a6xx_fault_handler,
};
-struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct platform_device *pdev = priv->gpu_pdev;
- struct adreno_platform_config *config = pdev->dev.platform_data;
- struct device_node *node;
- struct a6xx_gpu *a6xx_gpu;
- struct adreno_gpu *adreno_gpu;
- struct msm_gpu *gpu;
- extern int enable_preemption;
- bool is_a7xx;
- int ret;
-
- a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
- if (!a6xx_gpu)
- return ERR_PTR(-ENOMEM);
-
- adreno_gpu = &a6xx_gpu->base;
- gpu = &adreno_gpu->base;
-
- mutex_init(&a6xx_gpu->gmu.lock);
-
- adreno_gpu->registers = NULL;
-
- /* Check if there is a GMU phandle and set it up */
- node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
- /* FIXME: How do we gracefully handle this? */
- BUG_ON(!node);
-
- adreno_gpu->gmu_is_wrapper = of_device_is_compatible(node, "qcom,adreno-gmu-wrapper");
-
- adreno_gpu->base.hw_apriv =
- !!(config->info->quirks & ADRENO_QUIRK_HAS_HW_APRIV);
-
- /* gpu->info only gets assigned in adreno_gpu_init() */
- is_a7xx = config->info->family == ADRENO_7XX_GEN1 ||
- config->info->family == ADRENO_7XX_GEN2 ||
- config->info->family == ADRENO_7XX_GEN3;
-
- a6xx_llc_slices_init(pdev, a6xx_gpu, is_a7xx);
-
- ret = a6xx_set_supported_hw(&pdev->dev, config->info);
- if (ret) {
- a6xx_llc_slices_destroy(a6xx_gpu);
- kfree(a6xx_gpu);
- return ERR_PTR(ret);
- }
-
- if ((enable_preemption == 1) || (enable_preemption == -1 &&
- (config->info->quirks & ADRENO_QUIRK_PREEMPTION)))
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 4);
- else if (is_a7xx)
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_a7xx, 1);
- else if (adreno_has_gmu_wrapper(adreno_gpu))
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs_gmuwrapper, 1);
- else
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
- if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
- return ERR_PTR(ret);
- }
-
- /*
- * For now only clamp to idle freq for devices where this is known not
- * to cause power supply issues:
- */
- if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
- priv->gpu_clamp_to_idle = true;
-
- if (adreno_has_gmu_wrapper(adreno_gpu))
- ret = a6xx_gmu_wrapper_init(a6xx_gpu, node);
- else
- ret = a6xx_gmu_init(a6xx_gpu, node);
- of_node_put(node);
- if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
- return ERR_PTR(ret);
- }
-
- if (adreno_is_a7xx(adreno_gpu)) {
- ret = a7xx_cx_mem_init(a6xx_gpu);
- if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
- return ERR_PTR(ret);
- }
- }
-
- adreno_gpu->uche_trap_base = 0x1fffffffff000ull;
-
- msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu,
- a6xx_fault_handler);
-
- ret = a6xx_calc_ubwc_config(adreno_gpu);
- if (ret) {
- a6xx_destroy(&(a6xx_gpu->base.base));
- return ERR_PTR(ret);
- }
-
- /* Set up the preemption specific bits and pieces for each ringbuffer */
- a6xx_preempt_init(gpu);
-
- return gpu;
-}
+const struct adreno_gpu_funcs a8xx_gpu_funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .set_param = adreno_set_param,
+ .hw_init = a8xx_hw_init,
+ .ucode_load = a6xx_ucode_load,
+ .pm_suspend = a6xx_gmu_pm_suspend,
+ .pm_resume = a6xx_gmu_pm_resume,
+ .recover = a8xx_recover,
+ .submit = a7xx_submit,
+ .active_ring = a6xx_active_ring,
+ .irq = a8xx_irq,
+ .destroy = a6xx_destroy,
+ .gpu_busy = a8xx_gpu_busy,
+ .gpu_get_freq = a6xx_gmu_get_freq,
+ .gpu_set_freq = a6xx_gpu_set_freq,
+ .create_vm = a6xx_create_vm,
+ .create_private_vm = a6xx_create_private_vm,
+ .get_rptr = a6xx_get_rptr,
+ .progress = a8xx_progress,
+ },
+ .init = a6xx_gpu_init,
+ .get_timestamp = a8xx_gmu_get_timestamp,
+ .bus_halt = a8xx_bus_clear_pending_transactions,
+ .mmu_fault_handler = a8xx_fault_handler,
+};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 6e71f617fc3d..6820216ec5fc 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -45,6 +45,10 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
const struct adreno_reglist_list *pwrup_reglist;
+ const struct adreno_reglist_list *ifpc_reglist;
+ const struct adreno_reglist *gbif_cx;
+ const struct adreno_reglist_pipe *nonctxt_reglist;
+ u32 max_slices;
u32 gmu_chipid;
u32 gmu_cgc_mode;
u32 prim_fifo_threshold;
@@ -56,6 +60,8 @@ struct a6xx_gpu {
struct drm_gem_object *sqe_bo;
uint64_t sqe_iova;
+ struct drm_gem_object *aqe_bo;
+ uint64_t aqe_iova;
struct msm_ringbuffer *cur_ring;
struct msm_ringbuffer *next_ring;
@@ -100,6 +106,11 @@ struct a6xx_gpu {
void *htw_llc_slice;
bool have_mmu500;
bool hung;
+
+ u32 cached_aperture;
+ spinlock_t aperture_lock;
+
+ u32 slice_mask;
};
#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
@@ -215,6 +226,11 @@ struct a7xx_cp_smmu_info {
#define A6XX_PROTECT_RDONLY(_reg, _len) \
((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+extern const struct adreno_gpu_funcs a6xx_gpu_funcs;
+extern const struct adreno_gpu_funcs a6xx_gmuwrapper_funcs;
+extern const struct adreno_gpu_funcs a7xx_gpu_funcs;
+extern const struct adreno_gpu_funcs a8xx_gpu_funcs;
+
static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
{
if(adreno_is_a630(gpu))
@@ -254,6 +270,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
int a6xx_gmu_wrapper_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+void a6xx_gmu_sysprof_setup(struct msm_gpu *gpu);
void a6xx_preempt_init(struct msm_gpu *gpu);
void a6xx_preempt_hw_init(struct msm_gpu *gpu);
@@ -295,5 +312,20 @@ int a6xx_gpu_state_put(struct msm_gpu_state *state);
void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
void a6xx_gpu_sw_reset(struct msm_gpu *gpu, bool assert);
-
+int a6xx_fenced_write(struct a6xx_gpu *gpu, u32 offset, u64 value, u32 mask, bool is_64b);
+void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+int a6xx_zap_shader_init(struct msm_gpu *gpu);
+
+void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off);
+int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data);
+void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value);
+u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate);
+int a8xx_gpu_feature_probe(struct msm_gpu *gpu);
+void a8xx_gpu_get_slice_info(struct msm_gpu *gpu);
+int a8xx_hw_init(struct msm_gpu *gpu);
+irqreturn_t a8xx_irq(struct msm_gpu *gpu);
+void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu);
+bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+void a8xx_recover(struct msm_gpu *gpu);
#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index d5d1271fce61..d2d6b2fd3cba 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -1255,7 +1255,7 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
return;
/* Set the fence to ALLOW mode so we can access the registers */
- gpu_write(gpu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[2],
&a6xx_state->gmu_registers[3], false);
@@ -1586,8 +1586,7 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gpu_state *a6xx_state = kzalloc(sizeof(*a6xx_state),
GFP_KERNEL);
- bool stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
- A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
+ bool stalled;
if (!a6xx_state)
return ERR_PTR(-ENOMEM);
@@ -1597,7 +1596,8 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
/* Get the generic state from the adreno core */
adreno_gpu_state_get(gpu, &a6xx_state->base);
- if (!adreno_has_gmu_wrapper(adreno_gpu)) {
+ if (!adreno_has_gmu_wrapper(adreno_gpu) &&
+ !adreno_has_rgmu(adreno_gpu)) {
a6xx_get_gmu_registers(gpu, a6xx_state);
a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
@@ -1608,15 +1608,20 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
}
/* If GX isn't on the rest of the data isn't going to be accessible */
- if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
+ if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return &a6xx_state->base;
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
+
/* Get the banks of indexed registers */
if (adreno_is_a7xx(adreno_gpu))
a7xx_get_indexed_registers(gpu, a6xx_state);
else
a6xx_get_indexed_registers(gpu, a6xx_state);
+ stalled = !!(gpu_read(gpu, REG_A6XX_RBBM_STATUS3) &
+ A6XX_RBBM_STATUS3_SMMU_STALLED_ON_FAULT);
/*
* Try to initialize the crashdumper, if we are not dumping state
* with the SMMU stalled. The crashdumper needs memory access to
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 1c18499b60bb..b49d8427b59e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -71,8 +71,8 @@ static const struct a6xx_cluster {
u32 sel_val;
} a6xx_clusters[] = {
CLUSTER(CLUSTER_GRAS, a6xx_gras_cluster, 0, 0),
- CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
- CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rac, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0x0),
+ CLUSTER(CLUSTER_PS, a6xx_ps_cluster_rbp, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0x9),
CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
@@ -303,8 +303,8 @@ static const u32 a660_registers[] = {
static const struct a6xx_registers a6xx_reglist[] = {
REGS(a6xx_registers, 0, 0),
REGS(a660_registers, 0, 0),
- REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
- REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
+ REGS(a6xx_rb_rac_registers, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
+ REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
};
static const u32 a6xx_ahb_registers[] = {
@@ -343,48 +343,48 @@ static const struct a6xx_registers a6xx_gbif_reglist =
static const u32 a6xx_gmu_gx_registers[] = {
/* GMU GX */
- 0x0000, 0x0000, 0x0010, 0x0013, 0x0016, 0x0016, 0x0018, 0x001b,
- 0x001e, 0x001e, 0x0020, 0x0023, 0x0026, 0x0026, 0x0028, 0x002b,
- 0x002e, 0x002e, 0x0030, 0x0033, 0x0036, 0x0036, 0x0038, 0x003b,
- 0x003e, 0x003e, 0x0040, 0x0043, 0x0046, 0x0046, 0x0080, 0x0084,
- 0x0100, 0x012b, 0x0140, 0x0140,
+ 0x1a800, 0x1a800, 0x1a810, 0x1a813, 0x1a816, 0x1a816, 0x1a818, 0x1a81b,
+ 0x1a81e, 0x1a81e, 0x1a820, 0x1a823, 0x1a826, 0x1a826, 0x1a828, 0x1a82b,
+ 0x1a82e, 0x1a82e, 0x1a830, 0x1a833, 0x1a836, 0x1a836, 0x1a838, 0x1a83b,
+ 0x1a83e, 0x1a83e, 0x1a840, 0x1a843, 0x1a846, 0x1a846, 0x1a880, 0x1a884,
+ 0x1a900, 0x1a92b, 0x1a940, 0x1a940,
};
static const u32 a6xx_gmu_cx_registers[] = {
/* GMU CX */
- 0x4c00, 0x4c07, 0x4c10, 0x4c12, 0x4d00, 0x4d00, 0x4d07, 0x4d0a,
- 0x5000, 0x5004, 0x5007, 0x5008, 0x500b, 0x500c, 0x500f, 0x501c,
- 0x5024, 0x502a, 0x502d, 0x5030, 0x5040, 0x5053, 0x5087, 0x5089,
- 0x50a0, 0x50a2, 0x50a4, 0x50af, 0x50c0, 0x50c3, 0x50d0, 0x50d0,
- 0x50e4, 0x50e4, 0x50e8, 0x50ec, 0x5100, 0x5103, 0x5140, 0x5140,
- 0x5142, 0x5144, 0x514c, 0x514d, 0x514f, 0x5151, 0x5154, 0x5154,
- 0x5157, 0x5158, 0x515d, 0x515d, 0x5162, 0x5162, 0x5164, 0x5165,
- 0x5180, 0x5186, 0x5190, 0x519e, 0x51c0, 0x51c0, 0x51c5, 0x51cc,
- 0x51e0, 0x51e2, 0x51f0, 0x51f0, 0x5200, 0x5201,
+ 0x1f400, 0x1f407, 0x1f410, 0x1f412, 0x1f500, 0x1f500, 0x1f507, 0x1f50a,
+ 0x1f800, 0x1f804, 0x1f807, 0x1f808, 0x1f80b, 0x1f80c, 0x1f80f, 0x1f81c,
+ 0x1f824, 0x1f82a, 0x1f82d, 0x1f830, 0x1f840, 0x1f853, 0x1f887, 0x1f889,
+ 0x1f8a0, 0x1f8a2, 0x1f8a4, 0x1f8af, 0x1f8c0, 0x1f8c3, 0x1f8d0, 0x1f8d0,
+ 0x1f8e4, 0x1f8e4, 0x1f8e8, 0x1f8ec, 0x1f900, 0x1f903, 0x1f940, 0x1f940,
+ 0x1f942, 0x1f944, 0x1f94c, 0x1f94d, 0x1f94f, 0x1f951, 0x1f954, 0x1f954,
+ 0x1f957, 0x1f958, 0x1f95d, 0x1f95d, 0x1f962, 0x1f962, 0x1f964, 0x1f965,
+ 0x1f980, 0x1f986, 0x1f990, 0x1f99e, 0x1f9c0, 0x1f9c0, 0x1f9c5, 0x1f9cc,
+ 0x1f9e0, 0x1f9e2, 0x1f9f0, 0x1f9f0, 0x1fa00, 0x1fa01,
/* GMU AO */
- 0x9300, 0x9316, 0x9400, 0x9400,
+ 0x23b00, 0x23b16, 0x23c00, 0x23c00,
};
static const u32 a6xx_gmu_gpucc_registers[] = {
/* GPU CC */
- 0x9800, 0x9812, 0x9840, 0x9852, 0x9c00, 0x9c04, 0x9c07, 0x9c0b,
- 0x9c15, 0x9c1c, 0x9c1e, 0x9c2d, 0x9c3c, 0x9c3d, 0x9c3f, 0x9c40,
- 0x9c42, 0x9c49, 0x9c58, 0x9c5a, 0x9d40, 0x9d5e, 0xa000, 0xa002,
- 0xa400, 0xa402, 0xac00, 0xac02, 0xb000, 0xb002, 0xb400, 0xb402,
- 0xb800, 0xb802,
+ 0x24000, 0x24012, 0x24040, 0x24052, 0x24400, 0x24404, 0x24407, 0x2440b,
+ 0x24415, 0x2441c, 0x2441e, 0x2442d, 0x2443c, 0x2443d, 0x2443f, 0x24440,
+ 0x24442, 0x24449, 0x24458, 0x2445a, 0x24540, 0x2455e, 0x24800, 0x24802,
+ 0x24c00, 0x24c02, 0x25400, 0x25402, 0x25800, 0x25802, 0x25c00, 0x25c02,
+ 0x26000, 0x26002,
/* GPU CC ACD */
- 0xbc00, 0xbc16, 0xbc20, 0xbc27,
+ 0x26400, 0x26416, 0x26420, 0x26427,
};
static const u32 a621_gmu_gpucc_registers[] = {
/* GPU CC */
- 0x9800, 0x980e, 0x9c00, 0x9c0e, 0xb000, 0xb004, 0xb400, 0xb404,
- 0xb800, 0xb804, 0xbc00, 0xbc05, 0xbc14, 0xbc1d, 0xbc2a, 0xbc30,
- 0xbc32, 0xbc32, 0xbc41, 0xbc55, 0xbc66, 0xbc68, 0xbc78, 0xbc7a,
- 0xbc89, 0xbc8a, 0xbc9c, 0xbc9e, 0xbca0, 0xbca3, 0xbcb3, 0xbcb5,
- 0xbcc5, 0xbcc7, 0xbcd6, 0xbcd8, 0xbce8, 0xbce9, 0xbcf9, 0xbcfc,
- 0xbd0b, 0xbd0c, 0xbd1c, 0xbd1e, 0xbd40, 0xbd70, 0xbe00, 0xbe16,
- 0xbe20, 0xbe2d,
+ 0x24000, 0x2400e, 0x24400, 0x2440e, 0x25800, 0x25804, 0x25c00, 0x25c04,
+ 0x26000, 0x26004, 0x26400, 0x26405, 0x26414, 0x2641d, 0x2642a, 0x26430,
+ 0x26432, 0x26432, 0x26441, 0x26455, 0x26466, 0x26468, 0x26478, 0x2647a,
+ 0x26489, 0x2648a, 0x2649c, 0x2649e, 0x264a0, 0x264a3, 0x264b3, 0x264b5,
+ 0x264c5, 0x264c7, 0x264d6, 0x264d8, 0x264e8, 0x264e9, 0x264f9, 0x264fc,
+ 0x2650b, 0x2650c, 0x2651c, 0x2651e, 0x26540, 0x26570, 0x26600, 0x26616,
+ 0x26620, 0x2662d,
};
static const u32 a6xx_gmu_cx_rscc_registers[] = {
@@ -575,7 +575,7 @@ struct gen7_sptp_cluster_registers {
/* statetype: SP block state type for the cluster */
enum a7xx_statetype_id statetype;
/* pipe_id: Pipe identifier */
- enum a7xx_pipe pipe_id;
+ enum adreno_pipe pipe_id;
/* context_id: Context identifier */
int context_id;
/* location_id: Location identifier */
@@ -801,10 +801,10 @@ static const char *a7xx_statetype_names[] = {
};
static const char *a7xx_pipe_names[] = {
- A7XX_NAME(A7XX_PIPE_NONE),
- A7XX_NAME(A7XX_PIPE_BR),
- A7XX_NAME(A7XX_PIPE_BV),
- A7XX_NAME(A7XX_PIPE_LPAC),
+ A7XX_NAME(PIPE_NONE),
+ A7XX_NAME(PIPE_BR),
+ A7XX_NAME(PIPE_BV),
+ A7XX_NAME(PIPE_LPAC),
};
static const char *a7xx_cluster_names[] = {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index 8e69b1e84657..53cfdf4e6c34 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -21,7 +21,9 @@ static const char * const a6xx_hfi_msg_id[] = {
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_TEST),
HFI_MSG_ID(HFI_H2F_MSG_START),
+ HFI_MSG_ID(HFI_H2F_FEATURE_CTRL),
HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
+ HFI_MSG_ID(HFI_H2F_MSG_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
};
@@ -104,10 +106,25 @@ static int a6xx_hfi_wait_for_msg_interrupt(struct a6xx_gmu *gmu, u32 id, u32 seq
{
int ret;
u32 val;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+
+ do {
+ /* Wait for a response */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
+
+ if (!ret)
+ break;
- /* Wait for a response */
- ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
- val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 1000000);
+ if (completion_done(&a6xx_gpu->base.fault_coredump_done))
+ break;
+
+ /* We may timeout because the GMU is temporarily wedged from
+ * pending faults from the GPU and we are taking a devcoredump.
+ * Wait until the MMU is resumed and try again.
+ */
+ wait_for_completion(&a6xx_gpu->base.fault_coredump_done);
+ } while (true);
if (ret) {
DRM_DEV_ERROR(gmu->dev,
@@ -254,11 +271,63 @@ static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
NULL, 0);
}
+static int a8xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
+{
+ unsigned int num_gx_votes = 3, num_cx_votes = 2;
+ struct a6xx_hfi_table_entry *entry;
+ struct a6xx_hfi_table *tbl;
+ int ret, i;
+ u32 size;
+
+ size = sizeof(*tbl) + (2 * sizeof(tbl->entry[0])) +
+ (gmu->nr_gpu_freqs * num_gx_votes * sizeof(gmu->gx_arc_votes[0])) +
+ (gmu->nr_gmu_freqs * num_cx_votes * sizeof(gmu->cx_arc_votes[0]));
+ tbl = kzalloc(size, GFP_KERNEL);
+ tbl->type = HFI_TABLE_GPU_PERF;
+
+ /* First fill GX votes */
+ entry = &tbl->entry[0];
+ entry->count = gmu->nr_gpu_freqs;
+ entry->stride = num_gx_votes;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ unsigned int base = i * entry->stride;
+
+ entry->data[base+0] = gmu->gx_arc_votes[i];
+ entry->data[base+1] = gmu->dep_arc_votes[i];
+ entry->data[base+2] = gmu->gpu_freqs[i] / 1000;
+ }
+
+ /* Then fill CX votes */
+ entry = (struct a6xx_hfi_table_entry *)
+ &tbl->entry[0].data[gmu->nr_gpu_freqs * num_gx_votes];
+
+ entry->count = gmu->nr_gmu_freqs;
+ entry->stride = num_cx_votes;
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ unsigned int base = i * entry->stride;
+
+ entry->data[base] = gmu->cx_arc_votes[i];
+ entry->data[base+1] = gmu->gmu_freqs[i] / 1000;
+ }
+
+ ret = a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TABLE, tbl, size, NULL, 0);
+
+ kfree(tbl);
+ return ret;
+}
+
static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_hfi_msg_perf_table msg = { 0 };
int i;
+ if (adreno_is_a8xx(adreno_gpu))
+ return a8xx_hfi_send_perf_table(gmu);
+
msg.num_gpu_levels = gmu->nr_gpu_freqs;
msg.num_gmu_levels = gmu->nr_gmu_freqs;
@@ -765,23 +834,40 @@ send:
NULL, 0);
}
+static int a6xx_hfi_feature_ctrl_msg(struct a6xx_gmu *gmu, u32 feature, u32 enable, u32 data)
+{
+ struct a6xx_hfi_msg_feature_ctrl msg = {
+ .feature = feature,
+ .enable = enable,
+ .data = data,
+ };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+}
+
+#define HFI_FEATURE_IFPC 9
+#define IFPC_LONG_HYST 0x1680
+
+static int a6xx_hfi_enable_ifpc(struct a6xx_gmu *gmu)
+{
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC)
+ return 0;
+
+ return a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_IFPC, 1, IFPC_LONG_HYST);
+}
+
#define HFI_FEATURE_ACD 12
static int a6xx_hfi_enable_acd(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_acd_table *acd_table = &gmu->acd_table;
- struct a6xx_hfi_msg_feature_ctrl msg = {
- .feature = HFI_FEATURE_ACD,
- .enable = 1,
- .data = 0,
- };
int ret;
if (!acd_table->enable_by_level)
return 0;
/* Enable ACD feature at GMU */
- ret = a6xx_hfi_send_msg(gmu, HFI_H2F_FEATURE_CTRL, &msg, sizeof(msg), NULL, 0);
+ ret = a6xx_hfi_feature_ctrl_msg(gmu, HFI_FEATURE_ACD, 1, 0);
if (ret) {
DRM_DEV_ERROR(gmu->dev, "Unable to enable ACD (%d)\n", ret);
return ret;
@@ -898,6 +984,10 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
if (ret)
return ret;
+ ret = a6xx_hfi_enable_ifpc(gmu);
+ if (ret)
+ return ret;
+
ret = a6xx_hfi_send_core_fw_start(gmu);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 653ef720e2da..6f9f74a0bc85 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -185,6 +185,23 @@ struct a6xx_hfi_msg_core_fw_start {
u32 handle;
};
+#define HFI_H2F_MSG_TABLE 15
+
+struct a6xx_hfi_table_entry {
+ u32 count;
+ u32 stride;
+ u32 data[];
+};
+
+struct a6xx_hfi_table {
+ u32 header;
+ u32 version;
+ u32 type;
+#define HFI_TABLE_BW_VOTE 0
+#define HFI_TABLE_GPU_PERF 1
+ struct a6xx_hfi_table_entry entry[];
+};
+
#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30
struct a6xx_hfi_gx_bw_perf_vote_cmd {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
index 6a12a35dabff..afc5f4aa3b17 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c
@@ -41,7 +41,7 @@ static inline void set_preempt_state(struct a6xx_gpu *gpu,
}
/* Write the most recent wptr for the given ring into the hardware */
-static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+static inline void update_wptr(struct a6xx_gpu *a6xx_gpu, struct msm_ringbuffer *ring)
{
unsigned long flags;
uint32_t wptr;
@@ -51,7 +51,7 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
if (ring->restore_wptr) {
wptr = get_wptr(ring);
- gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_RB_WPTR, wptr, BIT(0), false);
ring->restore_wptr = false;
}
@@ -111,9 +111,9 @@ static void preempt_prepare_postamble(struct a6xx_gpu *a6xx_gpu)
postamble[count++] = PKT7(CP_WAIT_REG_MEM, 6);
postamble[count++] = CP_WAIT_REG_MEM_0_FUNCTION(WRITE_EQ);
- postamble[count++] = CP_WAIT_REG_MEM_1_POLL_ADDR_LO(
+ postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_LO(
REG_A6XX_RBBM_PERFCTR_SRAM_INIT_STATUS);
- postamble[count++] = CP_WAIT_REG_MEM_2_POLL_ADDR_HI(0);
+ postamble[count++] = CP_WAIT_REG_MEM_POLL_ADDR_HI(0);
postamble[count++] = CP_WAIT_REG_MEM_3_REF(0x1);
postamble[count++] = CP_WAIT_REG_MEM_4_MASK(0x1);
postamble[count++] = CP_WAIT_REG_MEM_5_DELAY_LOOP_CYCLES(0);
@@ -136,6 +136,21 @@ static void preempt_disable_postamble(struct a6xx_gpu *a6xx_gpu)
a6xx_gpu->postamble_enabled = false;
}
+/*
+ * Set preemption keepalive vote. Please note that this vote is different from the one used in
+ * a6xx_irq()
+ */
+static void a6xx_preempt_keepalive_vote(struct msm_gpu *gpu, bool on)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (adreno_has_gmu_wrapper(adreno_gpu))
+ return;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_PWR_COL_PREEMPT_KEEPALIVE, on);
+}
+
void a6xx_preempt_irq(struct msm_gpu *gpu)
{
uint32_t status;
@@ -172,10 +187,12 @@ void a6xx_preempt_irq(struct msm_gpu *gpu)
set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
- update_wptr(gpu, a6xx_gpu->cur_ring);
+ update_wptr(a6xx_gpu, a6xx_gpu->cur_ring);
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
+ a6xx_preempt_keepalive_vote(gpu, false);
+
trace_msm_gpu_preemption_irq(a6xx_gpu->cur_ring->id);
/*
@@ -268,7 +285,7 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
*/
if (!ring || (a6xx_gpu->cur_ring == ring)) {
set_preempt_state(a6xx_gpu, PREEMPT_FINISH);
- update_wptr(gpu, a6xx_gpu->cur_ring);
+ update_wptr(a6xx_gpu, a6xx_gpu->cur_ring);
set_preempt_state(a6xx_gpu, PREEMPT_NONE);
spin_unlock_irqrestore(&a6xx_gpu->eval_lock, flags);
return;
@@ -302,13 +319,16 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
spin_unlock_irqrestore(&ring->preempt_lock, flags);
- gpu_write64(gpu,
- REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO,
- a6xx_gpu->preempt_smmu_iova[ring->id]);
+ /* Set the keepalive bit to keep the GPU ON until preemption is complete */
+ a6xx_preempt_keepalive_vote(gpu, true);
+
+ a6xx_fenced_write(a6xx_gpu,
+ REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO, a6xx_gpu->preempt_smmu_iova[ring->id],
+ BIT(1), true);
- gpu_write64(gpu,
+ a6xx_fenced_write(a6xx_gpu,
REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR,
- a6xx_gpu->preempt_iova[ring->id]);
+ a6xx_gpu->preempt_iova[ring->id], BIT(1), true);
a6xx_gpu->next_ring = ring;
@@ -328,7 +348,7 @@ void a6xx_preempt_trigger(struct msm_gpu *gpu)
set_preempt_state(a6xx_gpu, PREEMPT_TRIGGERED);
/* Trigger the preemption */
- gpu_write(gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl);
+ a6xx_fenced_write(a6xx_gpu, REG_A6XX_CP_CONTEXT_SWITCH_CNTL, cntl, BIT(1), false);
}
static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu,
diff --git a/drivers/gpu/drm/msm/adreno/a8xx_gpu.c b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
new file mode 100644
index 000000000000..30de078e9dfd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
@@ -0,0 +1,1201 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "msm_gpu_trace.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+#include <linux/bitfield.h>
+#include <linux/devfreq.h>
+#include <linux/firmware/qcom/qcom_scm.h>
+#include <linux/pm_domain.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define GPU_PAS_ID 13
+
+static void a8xx_aperture_slice_set(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 val;
+
+ val = A8XX_CP_APERTURE_CNTL_HOST_PIPEID(pipe) | A8XX_CP_APERTURE_CNTL_HOST_SLICEID(slice);
+
+ if (a6xx_gpu->cached_aperture == val)
+ return;
+
+ gpu_write(gpu, REG_A8XX_CP_APERTURE_CNTL_HOST, val);
+
+ a6xx_gpu->cached_aperture = val;
+}
+
+static void a8xx_aperture_acquire(struct msm_gpu *gpu, enum adreno_pipe pipe, unsigned long *flags)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ spin_lock_irqsave(&a6xx_gpu->aperture_lock, *flags);
+
+ a8xx_aperture_slice_set(gpu, pipe, 0);
+}
+
+static void a8xx_aperture_release(struct msm_gpu *gpu, unsigned long flags)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ spin_unlock_irqrestore(&a6xx_gpu->aperture_lock, flags);
+}
+
+static void a8xx_aperture_clear(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+
+ a8xx_aperture_acquire(gpu, PIPE_NONE, &flags);
+ a8xx_aperture_release(gpu, flags);
+}
+
+static void a8xx_write_pipe(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 offset, u32 data)
+{
+ unsigned long flags;
+
+ a8xx_aperture_acquire(gpu, pipe, &flags);
+ gpu_write(gpu, offset, data);
+ a8xx_aperture_release(gpu, flags);
+}
+
+static u32 a8xx_read_pipe_slice(struct msm_gpu *gpu, enum adreno_pipe pipe, u32 slice, u32 offset)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&a6xx_gpu->aperture_lock, flags);
+ a8xx_aperture_slice_set(gpu, pipe, slice);
+ val = gpu_read(gpu, offset);
+ spin_unlock_irqrestore(&a6xx_gpu->aperture_lock, flags);
+
+ return val;
+}
+
+void a8xx_gpu_get_slice_info(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
+ u32 slice_mask;
+
+ if (adreno_gpu->info->family < ADRENO_8XX_GEN1)
+ return;
+
+ if (a6xx_gpu->slice_mask)
+ return;
+
+ slice_mask = GENMASK(info->max_slices - 1, 0);
+
+ /* GEN1 doesn't support partial slice configurations */
+ if (adreno_gpu->info->family == ADRENO_8XX_GEN1) {
+ a6xx_gpu->slice_mask = slice_mask;
+ return;
+ }
+
+ slice_mask &= a6xx_llc_read(a6xx_gpu,
+ REG_A8XX_CX_MISC_SLICE_ENABLE_FINAL);
+
+ a6xx_gpu->slice_mask = slice_mask;
+
+ /* Chip ID depends on the number of slices available. So update it */
+ adreno_gpu->chip_id |= FIELD_PREP(GENMASK(7, 4), hweight32(slice_mask));
+}
+
+static u32 a8xx_get_first_slice(struct a6xx_gpu *a6xx_gpu)
+{
+ return ffs(a6xx_gpu->slice_mask) - 1;
+}
+
+static inline bool _a8xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check that the CX master is idle */
+ if (gpu_read(gpu, REG_A8XX_RBBM_STATUS) &
+ ~A8XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+static bool a8xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a8xx_check_idle(gpu))) {
+ DRM_ERROR(
+ "%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A8XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+void a8xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->preempt_lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ /* Update HW if this is the current ring and we are not in preempt*/
+ if (!a6xx_in_preempt(a6xx_gpu)) {
+ if (a6xx_gpu->cur_ring == ring)
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+ else
+ ring->restore_wptr = true;
+ } else {
+ ring->restore_wptr = true;
+ }
+
+ spin_unlock_irqrestore(&ring->preempt_lock, flags);
+}
+
+static void a8xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 val;
+
+ if (adreno_is_x285(adreno_gpu) && state)
+ gpu_write(gpu, REG_A8XX_RBBM_CGC_0_PC, 0x00000702);
+
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ state ? 0x110111 : 0);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ state ? 0x55555 : 0);
+
+ gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
+ gpu_write(gpu, REG_A8XX_RBBM_CGC_GLOBAL_LOAD_CMD, !!state);
+
+ if (state) {
+ gpu_write(gpu, REG_A8XX_RBBM_CGC_P2S_TRIG_CMD, 1);
+
+ if (gpu_poll_timeout(gpu, REG_A8XX_RBBM_CGC_P2S_STATUS, val,
+ val & A8XX_RBBM_CGC_P2S_STATUS_TXDONE, 1, 10)) {
+ dev_err(&gpu->pdev->dev, "RBBM_CGC_P2S_STATUS TXDONE Poll failed\n");
+ return;
+ }
+
+ gpu_write(gpu, REG_A8XX_RBBM_CLOCK_CNTL_GLOBAL, 0);
+ } else {
+ /*
+ * GMU enables clk gating in GBIF during boot up. So,
+ * override that here when hwcg feature is disabled
+ */
+ gpu_rmw(gpu, REG_A8XX_GBIF_CX_CONFIG, BIT(0), 0);
+ }
+}
+
+static void a8xx_set_cp_protect(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct adreno_protect *protect = adreno_gpu->info->a6xx->protect;
+ u32 cntl, final_cfg;
+ unsigned int i;
+
+ cntl = A8XX_CP_PROTECT_CNTL_PIPE_ACCESS_PROT_EN |
+ A8XX_CP_PROTECT_CNTL_PIPE_ACCESS_FAULT_ON_VIOL_EN |
+ A8XX_CP_PROTECT_CNTL_PIPE_LAST_SPAN_INF_RANGE |
+ A8XX_CP_PROTECT_CNTL_PIPE_HALT_SQE_RANGE__MASK;
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_CP_PROTECT_CNTL_PIPE, cntl);
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_CP_PROTECT_CNTL_PIPE, cntl);
+
+ a8xx_aperture_clear(gpu);
+
+ for (i = 0; i < protect->count; i++) {
+ /* Intentionally skip writing to some registers */
+ if (protect->regs[i]) {
+ gpu_write(gpu, REG_A8XX_CP_PROTECT_GLOBAL(i), protect->regs[i]);
+ final_cfg = protect->regs[i];
+ }
+ }
+
+ /*
+ * Last span feature is only supported on PIPE specific register.
+ * So update those here
+ */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_CP_PROTECT_PIPE(protect->count_max), final_cfg);
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_CP_PROTECT_PIPE(protect->count_max), final_cfg);
+
+ a8xx_aperture_clear(gpu);
+}
+
+static void a8xx_set_ubwc_config(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct qcom_ubwc_cfg_data *cfg = adreno_gpu->ubwc_config;
+ u32 level2_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL2);
+ u32 level3_swizzling_dis = !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL3);
+ bool rgba8888_lossless = false, fp16compoptdis = false;
+ bool yuvnotcomptofc = false, min_acc_len_64b = false;
+ bool rgb565_predicator = false, amsbc = false;
+ bool ubwc_mode = qcom_ubwc_get_ubwc_mode(cfg);
+ u32 ubwc_version = cfg->ubwc_enc_version;
+ u32 hbb, hbb_hi, hbb_lo, mode = 1;
+ u8 uavflagprd_inv = 2;
+
+ switch (ubwc_version) {
+ case UBWC_5_0:
+ amsbc = true;
+ rgb565_predicator = true;
+ mode = 4;
+ break;
+ case UBWC_4_0:
+ amsbc = true;
+ rgb565_predicator = true;
+ fp16compoptdis = true;
+ rgba8888_lossless = true;
+ mode = 2;
+ break;
+ case UBWC_3_0:
+ amsbc = true;
+ mode = 1;
+ break;
+ default:
+ dev_err(&gpu->pdev->dev, "Unknown UBWC version: 0x%x\n", ubwc_version);
+ break;
+ }
+
+ /*
+ * We subtract 13 from the highest bank bit (13 is the minimum value
+ * allowed by hw) and write the lowest two bits of the remaining value
+ * as hbb_lo and the one above it as hbb_hi to the hardware.
+ */
+ WARN_ON(cfg->highest_bank_bit < 13);
+ hbb = cfg->highest_bank_bit - 13;
+ hbb_hi = hbb >> 2;
+ hbb_lo = hbb & 3;
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5);
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5);
+
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CCU_NC_MODE_CNTL,
+ yuvnotcomptofc << 6 |
+ hbb_hi << 3 |
+ hbb_lo << 1);
+
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CMP_NC_MODE_CNTL,
+ mode << 15 |
+ yuvnotcomptofc << 6 |
+ rgba8888_lossless << 4 |
+ fp16compoptdis << 3 |
+ rgb565_predicator << 2 |
+ amsbc << 1 |
+ min_acc_len_64b);
+
+ a8xx_aperture_clear(gpu);
+
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+ level3_swizzling_dis << 13 |
+ level2_swizzling_dis << 12 |
+ hbb_hi << 10 |
+ uavflagprd_inv << 4 |
+ min_acc_len_64b << 3 |
+ hbb_lo << 1 | ubwc_mode);
+
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
+ level3_swizzling_dis << 7 |
+ level2_swizzling_dis << 6 |
+ hbb_hi << 4 |
+ min_acc_len_64b << 3 |
+ hbb_lo << 1 | ubwc_mode);
+}
+
+static void a8xx_nonctxt_config(struct msm_gpu *gpu, u32 *gmem_protect)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ const struct a6xx_info *info = adreno_gpu->info->a6xx;
+ const struct adreno_reglist_pipe *regs = info->nonctxt_reglist;
+ unsigned int pipe_id, i;
+ unsigned long flags;
+
+ for (pipe_id = PIPE_NONE; pipe_id <= PIPE_DDE_BV; pipe_id++) {
+ /* We don't have support for LPAC yet */
+ if (pipe_id == PIPE_LPAC)
+ continue;
+
+ a8xx_aperture_acquire(gpu, pipe_id, &flags);
+
+ for (i = 0; regs[i].offset; i++) {
+ if (!(BIT(pipe_id) & regs[i].pipe))
+ continue;
+
+ if (regs[i].offset == REG_A8XX_RB_GC_GMEM_PROTECT)
+ *gmem_protect = regs[i].value;
+
+ gpu_write(gpu, regs[i].offset, regs[i].value);
+ }
+
+ a8xx_aperture_release(gpu, flags);
+ }
+
+ a8xx_aperture_clear(gpu);
+}
+
+static int a8xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+ u32 mask;
+
+ /* Disable concurrent binning before sending CP init */
+ OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
+ OUT_RING(ring, BIT(27));
+
+ OUT_PKT7(ring, CP_ME_INIT, 4);
+
+ /* Use multiple HW contexts */
+ mask = BIT(0);
+
+ /* Enable error detection */
+ mask |= BIT(1);
+
+ /* Set default reset state */
+ mask |= BIT(3);
+
+ /* Disable save/restore of performance counters across preemption */
+ mask |= BIT(6);
+
+ OUT_RING(ring, mask);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Operation mode mask */
+ OUT_RING(ring, 0x00000002);
+
+ a6xx_flush(gpu, ring);
+ return a8xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+#define A8XX_INT_MASK \
+ (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_SW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_PM4CPINTERRUPT | \
+ A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR | \
+ A6XX_RBBM_INT_0_MASK_TSBWRITEERROR | \
+ A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
+
+#define A8XX_APRIV_MASK \
+ (A8XX_CP_APRIV_CNTL_PIPE_ICACHE | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBFETCH | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBPRIVLEVEL | \
+ A8XX_CP_APRIV_CNTL_PIPE_RBRPWB)
+
+#define A8XX_BR_APRIV_MASK \
+ (A8XX_APRIV_MASK | \
+ A8XX_CP_APRIV_CNTL_PIPE_CDREAD | \
+ A8XX_CP_APRIV_CNTL_PIPE_CDWRITE)
+
+#define A8XX_CP_GLOBAL_INT_MASK \
+ (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBR | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTBV | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTLPAC | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTAQE0 | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTAQE1 | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTDDEBR | \
+ A8XX_CP_GLOBAL_INT_MASK_HWFAULTDDEBV | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBR | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBV | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTLPAC | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTAQE0 | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTAQE1 | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTDDEBR | \
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTDDEBV)
+
+#define A8XX_CP_INTERRUPT_STATUS_MASK_PIPE \
+ (A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFRBWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB1WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB2WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFIB3WRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFSDSWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFMRBWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_CSFVSDWRAP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_OPCODEERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VSDPARITYERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_REGISTERPROTECTIONERROR | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_ILLEGALINSTRUCTION | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_SMMUFAULT | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPCLIENT| \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPTYPE | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESPREAD | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_VBIFRESP | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_RTWROVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTWROVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTREFCNTOVF | \
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE_LRZRTCLRRESMISS)
+
+#define A8XX_CP_HW_FAULT_STATUS_MASK_PIPE \
+ (A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFRBFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB1FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB2FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFIB3FAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFSDSFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFMRBFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_CSFVSDFAULT | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_SQEREADBURSTOVF | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_EVENTENGINEOVF | \
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE_UCODEERROR)
+
+static int hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int pipe_id, i;
+ u32 gmem_protect = 0;
+ u64 gmem_range_min;
+ int ret;
+
+ ret = a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ if (ret)
+ return ret;
+
+ /* Clear the cached value to force aperture configuration next time */
+ a6xx_gpu->cached_aperture = UINT_MAX;
+ a8xx_aperture_clear(gpu);
+
+ /* Clear GBIF halt in case GX domain was not collapsed */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
+ gpu_read(gpu, REG_A6XX_GBIF_HALT);
+
+ gpu_write(gpu, REG_A8XX_RBBM_GBIF_HALT, 0);
+ gpu_read(gpu, REG_A8XX_RBBM_GBIF_HALT);
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A8XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Setup GMEM Range in UCHE */
+ gmem_range_min = SZ_64M;
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_GC_GMEM_RANGE_MIN, gmem_range_min);
+ gpu_write64(gpu, REG_A8XX_SP_HLSQ_GC_GMEM_RANGE_MIN, gmem_range_min);
+
+ /* Setup UCHE Trap region */
+ gpu_write64(gpu, REG_A8XX_UCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_TRAP_BASE, adreno_gpu->uche_trap_base);
+ gpu_write64(gpu, REG_A8XX_UCHE_CCHE_WRITE_THRU_BASE, adreno_gpu->uche_trap_base);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A8XX_RBBM_PERFCTR_CNTL, 0x1);
+ gpu_write(gpu, REG_A8XX_RBBM_SLICE_PERFCTR_CNTL, 0x1);
+
+ /* Turn on the IFPC counter (countable 4 on XOCLK1) */
+ gmu_write(&a6xx_gpu->gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_1,
+ FIELD_PREP(GENMASK(7, 0), 0x4));
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A8XX_CP_PERFCTR_CP_SEL(0), 1);
+
+ a8xx_set_ubwc_config(gpu);
+
+ /* Set weights for bicubic filtering */
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(0), 0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(1), 0x3fe05ff4);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(2), 0x3fa0ebee);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(3), 0x3f5193ed);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(4), 0x3f0243f0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(5), 0x00000000);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(6), 0x3fd093e8);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(7), 0x3f4133dc);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(8), 0x3ea1dfdb);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(9), 0x3e0283e0);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(10), 0x0000ac2b);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(11), 0x0000f01d);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(12), 0x00114412);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(13), 0x0021980a);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(14), 0x0051ec05);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(15), 0x0000380e);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(16), 0x3ff09001);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(17), 0x3fc10bfa);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(18), 0x3f9193f7);
+ gpu_write(gpu, REG_A8XX_TPL1_BICUBIC_WEIGHTS_TABLE(19), 0x3f7227f7);
+
+ gpu_write(gpu, REG_A8XX_UCHE_CLIENT_PF, BIT(7) | 0x1);
+
+ a8xx_nonctxt_config(gpu, &gmem_protect);
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A8XX_RBBM_INTERFACE_HANG_INT_CNTL, BIT(30) | 0xcfffff);
+ gpu_write(gpu, REG_A8XX_RBBM_SLICE_INTERFACE_HANG_INT_CNTL, BIT(30));
+
+ /* Set up the CX GMU counter 0 to count busy ticks */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+
+ /* Enable the power counter */
+ gmu_rmw(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_0, 0xff, BIT(5));
+ gmu_write(gmu, REG_A8XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+
+ /* Protect registers from the CP */
+ a8xx_set_cp_protect(gpu);
+
+ /* Enable the GMEM save/restore feature for preemption */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 1);
+
+ for (pipe_id = PIPE_BR; pipe_id <= PIPE_DDE_BV; pipe_id++) {
+ u32 apriv_mask = A8XX_APRIV_MASK;
+ unsigned long flags;
+
+ if (pipe_id == PIPE_LPAC)
+ continue;
+
+ if (pipe_id == PIPE_BR)
+ apriv_mask = A8XX_BR_APRIV_MASK;
+
+ a8xx_aperture_acquire(gpu, pipe_id, &flags);
+ gpu_write(gpu, REG_A8XX_CP_APRIV_CNTL_PIPE, apriv_mask);
+ gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_PIPE,
+ A8XX_CP_INTERRUPT_STATUS_MASK_PIPE);
+ gpu_write(gpu, REG_A8XX_CP_HW_FAULT_STATUS_MASK_PIPE,
+ A8XX_CP_HW_FAULT_STATUS_MASK_PIPE);
+ a8xx_aperture_release(gpu, flags);
+ }
+
+ a8xx_aperture_clear(gpu);
+
+ /* Enable interrupts */
+ gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_GLOBAL, A8XX_CP_GLOBAL_INT_MASK);
+ gpu_write(gpu, REG_A8XX_RBBM_INT_0_MASK, A8XX_INT_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ goto out;
+
+ gpu_write64(gpu, REG_A8XX_CP_SQE_INSTR_BASE, a6xx_gpu->sqe_iova);
+ if (a6xx_gpu->aqe_iova)
+ gpu_write64(gpu, REG_A8XX_CP_AQE_INSTR_BASE_0, a6xx_gpu->aqe_iova);
+
+ /* Set the ringbuffer address */
+ gpu_write64(gpu, REG_A6XX_CP_RB_BASE, gpu->rb[0]->iova);
+ gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
+
+ /* Configure the RPTR shadow if needed: */
+ gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR, shadowptr(a6xx_gpu, gpu->rb[0]));
+ gpu_write64(gpu, REG_A8XX_CP_RB_RPTR_ADDR_BV, rbmemptr(gpu->rb[0], bv_rptr));
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ a6xx_gpu->shadow[i] = 0;
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+
+ for (i = 0; i < gpu->nr_rings; i++)
+ gpu->rb[i]->cur_ctx_seqno = 0;
+
+ /* Enable the SQE_to start the CP engine */
+ gpu_write(gpu, REG_A8XX_CP_SQE_CNTL, 1);
+
+ ret = a8xx_cp_init(gpu);
+ if (ret)
+ goto out;
+
+ /*
+ * Try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a6xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a6xx_flush(gpu, gpu->rb[0]);
+ if (!a8xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else if (ret == -ENODEV) {
+ /*
+ * This device does not use zap shader (but print a warning
+ * just in case someone got their dt wrong.. hopefully they
+ * have a debug UART to realize the error of their ways...
+ * if you mess this up you are about to crash horribly)
+ */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ ret = 0;
+ } else {
+ return ret;
+ }
+
+ /*
+ * GMEM_PROTECT register should be programmed after GPU is transitioned to
+ * non-secure mode
+ */
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_GC_GMEM_PROTECT, gmem_protect);
+ WARN_ON(!gmem_protect);
+ a8xx_aperture_clear(gpu);
+
+ /* Enable hardware clockgating */
+ a8xx_set_hwcg(gpu, true);
+out:
+ /*
+ * Tell the GMU that we are done touching the GPU and it can start power
+ * management
+ */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ return ret;
+}
+
+int a8xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+ ret = hw_init(gpu);
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return ret;
+}
+
+static void a8xx_dump(struct msm_gpu *gpu)
+{
+ DRM_DEV_INFO(&gpu->pdev->dev, "status: %08x\n", gpu_read(gpu, REG_A8XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+void a8xx_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int active_submits;
+
+ adreno_dump_info(gpu);
+
+ if (hang_debug)
+ a8xx_dump(gpu);
+
+ /*
+ * To handle recovery specific sequences during the rpm suspend we are
+ * about to trigger
+ */
+ a6xx_gpu->hung = true;
+
+ /* Halt SQE first */
+ gpu_write(gpu, REG_A8XX_CP_SQE_CNTL, 3);
+
+ pm_runtime_dont_use_autosuspend(&gpu->pdev->dev);
+
+ /* active_submit won't change until we make a submission */
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+
+ /*
+ * Temporarily clear active_submits count to silence a WARN() in the
+ * runtime suspend cb
+ */
+ gpu->active_submits = 0;
+
+ reinit_completion(&gmu->pd_gate);
+ dev_pm_genpd_add_notifier(gmu->cxpd, &gmu->pd_nb);
+ dev_pm_genpd_synced_poweroff(gmu->cxpd);
+
+ /* Drop the rpm refcount from active submits */
+ if (active_submits)
+ pm_runtime_put(&gpu->pdev->dev);
+
+ /* And the final one from recover worker */
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ if (!wait_for_completion_timeout(&gmu->pd_gate, msecs_to_jiffies(1000)))
+ DRM_DEV_ERROR(&gpu->pdev->dev, "cx gdsc didn't collapse\n");
+
+ dev_pm_genpd_remove_notifier(gmu->cxpd);
+
+ pm_runtime_use_autosuspend(&gpu->pdev->dev);
+
+ if (active_submits)
+ pm_runtime_get(&gpu->pdev->dev);
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ gpu->active_submits = active_submits;
+ mutex_unlock(&gpu->active_lock);
+
+ msm_gpu_hw_init(gpu);
+ a6xx_gpu->hung = false;
+}
+
+static const char *a8xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
+{
+ static const char * const uche_clients[] = {
+ "BR_VFD", "BR_SP", "BR_VSC", "BR_VPC", "BR_HLSQ", "BR_PC", "BR_LRZ", "BR_TP",
+ "BV_VFD", "BV_SP", "BV_VSC", "BV_VPC", "BV_HLSQ", "BV_PC", "BV_LRZ", "BV_TP",
+ "STCHE",
+ };
+ static const char * const uche_clients_lpac[] = {
+ "-", "SP_LPAC", "-", "-", "HLSQ_LPAC", "-", "-", "TP_LPAC",
+ };
+ u32 val;
+
+ /*
+ * The source of the data depends on the mid ID read from FSYNR1.
+ * and the client ID read from the UCHE block
+ */
+ val = gpu_read(gpu, REG_A8XX_UCHE_CLIENT_PF);
+
+ val &= GENMASK(6, 0);
+
+ /* mid=3 refers to BR or BV */
+ if (mid == 3) {
+ if (val < ARRAY_SIZE(uche_clients))
+ return uche_clients[val];
+ else
+ return "UCHE";
+ }
+
+ /* mid=8 refers to LPAC */
+ if (mid == 8) {
+ if (val < ARRAY_SIZE(uche_clients_lpac))
+ return uche_clients_lpac[val];
+ else
+ return "UCHE_LPAC";
+ }
+
+ return "Unknown";
+}
+
+static const char *a8xx_fault_block(struct msm_gpu *gpu, u32 id)
+{
+ switch (id) {
+ case 0x0:
+ return "CP";
+ case 0x1:
+ return "UCHE: Unknown";
+ case 0x2:
+ return "UCHE_LPAC: Unknown";
+ case 0x3:
+ case 0x8:
+ return a8xx_uche_fault_block(gpu, id);
+ case 0x4:
+ return "CCU";
+ case 0x5:
+ return "Flag cache";
+ case 0x6:
+ return "PREFETCH";
+ case 0x7:
+ return "GMU";
+ case 0x9:
+ return "UCHE_HPAC";
+ }
+
+ return "Unknown";
+}
+
+int a8xx_fault_handler(void *arg, unsigned long iova, int flags, void *data)
+{
+ struct msm_gpu *gpu = arg;
+ struct adreno_smmu_fault_info *info = data;
+ const char *block = "unknown";
+
+ u32 scratch[] = {
+ gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(0)),
+ gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(1)),
+ gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(2)),
+ gpu_read(gpu, REG_A8XX_CP_SCRATCH_GLOBAL(3)),
+ };
+
+ if (info)
+ block = a8xx_fault_block(gpu, info->fsynr1 & 0xff);
+
+ return adreno_fault_handler(gpu, iova, flags, info, block, scratch);
+}
+
+static void a8xx_cp_hw_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A8XX_CP_INTERRUPT_STATUS_GLOBAL);
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u32 slice = a8xx_get_first_slice(a6xx_gpu);
+ u32 hw_fault_mask = GENMASK(6, 0);
+ u32 sw_fault_mask = GENMASK(22, 16);
+ u32 pipe = 0;
+
+ dev_err_ratelimited(&gpu->pdev->dev, "CP Fault Global INT status: 0x%x\n", status);
+
+ if (status & (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBR |
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBR))
+ pipe |= BIT(PIPE_BR);
+
+ if (status & (A8XX_CP_GLOBAL_INT_MASK_HWFAULTBV |
+ A8XX_CP_GLOBAL_INT_MASK_SWFAULTBV))
+ pipe |= BIT(PIPE_BV);
+
+ if (!pipe) {
+ dev_err_ratelimited(&gpu->pdev->dev, "CP Fault Unknown pipe\n");
+ goto out;
+ }
+
+ for (unsigned int pipe_id = PIPE_NONE; pipe_id <= PIPE_DDE_BV; pipe_id++) {
+ if (!(BIT(pipe_id) & pipe))
+ continue;
+
+ if (hw_fault_mask & status) {
+ status = a8xx_read_pipe_slice(gpu, pipe_id, slice,
+ REG_A8XX_CP_HW_FAULT_STATUS_PIPE);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP HW FAULT pipe: %u status: 0x%x\n", pipe_id, status);
+ }
+
+ if (sw_fault_mask & status) {
+ status = a8xx_read_pipe_slice(gpu, pipe_id, slice,
+ REG_A8XX_CP_INTERRUPT_STATUS_PIPE);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP SW FAULT pipe: %u status: 0x%x\n", pipe_id, status);
+
+ if (status & BIT(8)) {
+ a8xx_write_pipe(gpu, pipe_id, REG_A8XX_CP_SQE_STAT_ADDR_PIPE, 1);
+ status = a8xx_read_pipe_slice(gpu, pipe_id, slice,
+ REG_A8XX_CP_SQE_STAT_DATA_PIPE);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP Opcode error, opcode=0x%x\n", status);
+ }
+
+ if (status & BIT(10)) {
+ status = a8xx_read_pipe_slice(gpu, pipe_id, slice,
+ REG_A8XX_CP_PROTECT_STATUS_PIPE);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP REG PROTECT error, status=0x%x\n", status);
+ }
+ }
+ }
+
+out:
+ /* Turn off interrupts to avoid triggering recovery again */
+ a8xx_aperture_clear(gpu);
+ gpu_write(gpu, REG_A8XX_CP_INTERRUPT_STATUS_MASK_GLOBAL, 0);
+ gpu_write(gpu, REG_A8XX_RBBM_INT_0_MASK, 0);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static u32 gpu_periph_read(struct msm_gpu *gpu, u32 dbg_offset)
+{
+ gpu_write(gpu, REG_A8XX_CP_SQE_UCODE_DBG_ADDR_PIPE, dbg_offset);
+
+ return gpu_read(gpu, REG_A8XX_CP_SQE_UCODE_DBG_DATA_PIPE);
+}
+
+static u64 gpu_periph_read64(struct msm_gpu *gpu, u32 dbg_offset)
+{
+ u64 lo, hi;
+
+ lo = gpu_periph_read(gpu, dbg_offset);
+ hi = gpu_periph_read(gpu, dbg_offset + 1);
+
+ return (hi << 32) | lo;
+}
+
+#define CP_PERIPH_IB1_BASE_LO 0x7005
+#define CP_PERIPH_IB1_BASE_HI 0x7006
+#define CP_PERIPH_IB1_SIZE 0x7007
+#define CP_PERIPH_IB1_OFFSET 0x7008
+#define CP_PERIPH_IB2_BASE_LO 0x7009
+#define CP_PERIPH_IB2_BASE_HI 0x700a
+#define CP_PERIPH_IB2_SIZE 0x700b
+#define CP_PERIPH_IB2_OFFSET 0x700c
+#define CP_PERIPH_IB3_BASE_LO 0x700d
+#define CP_PERIPH_IB3_BASE_HI 0x700e
+#define CP_PERIPH_IB3_SIZE 0x700f
+#define CP_PERIPH_IB3_OFFSET 0x7010
+
+static void a8xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ unsigned long flags;
+
+ /*
+ * If stalled on SMMU fault, we could trip the GPU's hang detection,
+ * but the fault handler will trigger the devcore dump, and we want
+ * to otherwise resume normally rather than killing the submit, so
+ * just bail.
+ */
+ if (gpu_read(gpu, REG_A8XX_RBBM_MISC_STATUS) & A8XX_RBBM_MISC_STATUS_SMMU_STALLED_ON_FAULT)
+ return;
+
+ /*
+ * Force the GPU to stay on until after we finish
+ * collecting information
+ */
+ if (!adreno_has_gmu_wrapper(adreno_gpu))
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "gpu fault ring %d fence %x status %8.8X gfx_status %8.8X\n",
+ ring ? ring->id : -1, ring ? ring->fctx->last_fence : 0,
+ gpu_read(gpu, REG_A8XX_RBBM_STATUS), gpu_read(gpu, REG_A8XX_RBBM_GFX_STATUS));
+
+ a8xx_aperture_acquire(gpu, PIPE_BR, &flags);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "BR: status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x ib3 %16.16llX/%4.4x\n",
+ gpu_read(gpu, REG_A8XX_RBBM_GFX_BR_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_periph_read64(gpu, CP_PERIPH_IB1_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB1_OFFSET),
+ gpu_periph_read64(gpu, CP_PERIPH_IB2_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB2_OFFSET),
+ gpu_periph_read64(gpu, CP_PERIPH_IB3_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB3_OFFSET));
+
+ a8xx_aperture_release(gpu, flags);
+ a8xx_aperture_acquire(gpu, PIPE_BV, &flags);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "BV: status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x ib3 %16.16llX/%4.4x\n",
+ gpu_read(gpu, REG_A8XX_RBBM_GFX_BV_STATUS),
+ gpu_read(gpu, REG_A8XX_CP_RB_RPTR_BV),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_periph_read64(gpu, CP_PERIPH_IB1_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB1_OFFSET),
+ gpu_periph_read64(gpu, CP_PERIPH_IB2_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB2_OFFSET),
+ gpu_periph_read64(gpu, CP_PERIPH_IB3_BASE_LO),
+ gpu_periph_read(gpu, CP_PERIPH_IB3_OFFSET));
+
+ a8xx_aperture_release(gpu, flags);
+ a8xx_aperture_clear(gpu);
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ timer_delete(&gpu->hangcheck_timer);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+}
+
+static void a8xx_sw_fuse_violation_irq(struct msm_gpu *gpu)
+{
+ u32 status;
+
+ status = gpu_read(gpu, REG_A8XX_RBBM_SW_FUSE_INT_STATUS);
+ gpu_write(gpu, REG_A8XX_RBBM_SW_FUSE_INT_MASK, 0);
+
+ dev_err_ratelimited(&gpu->pdev->dev, "SW fuse violation status=%8.8x\n", status);
+
+ /*
+ * Ignore FASTBLEND violations, because the HW will silently fall back
+ * to legacy blending.
+ */
+ if (status & (A7XX_CX_MISC_SW_FUSE_VALUE_RAYTRACING |
+ A7XX_CX_MISC_SW_FUSE_VALUE_LPAC)) {
+ timer_delete(&gpu->hangcheck_timer);
+
+ kthread_queue_work(gpu->worker, &gpu->recover_work);
+ }
+}
+
+irqreturn_t a8xx_irq(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ u32 status = gpu_read(gpu, REG_A8XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A8XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (priv->disable_err_irq)
+ status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
+ a8xx_fault_detect_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR) {
+ u32 rl0, rl1;
+
+ rl0 = gpu_read(gpu, REG_A8XX_CP_RL_ERROR_DETAILS_0);
+ rl1 = gpu_read(gpu, REG_A8XX_CP_RL_ERROR_DETAILS_1);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | AHB bus error RL_ERROR_0: %x, RL_ERROR_1: %x\n", rl0, rl1);
+ }
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a8xx_cp_hw_err_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Trap interrupt\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_SWFUSEVIOLATION)
+ a8xx_sw_fuse_violation_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+ msm_gpu_retire(gpu);
+ a6xx_preempt_trigger(gpu);
+ }
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_SW)
+ a6xx_preempt_irq(gpu);
+
+ return IRQ_HANDLED;
+}
+
+void a8xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
+{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+
+ gpu_scid &= GENMASK(5, 0);
+
+ gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL1,
+ FIELD_PREP(GENMASK(29, 24), gpu_scid) |
+ FIELD_PREP(GENMASK(23, 18), gpu_scid) |
+ FIELD_PREP(GENMASK(17, 12), gpu_scid) |
+ FIELD_PREP(GENMASK(11, 6), gpu_scid) |
+ FIELD_PREP(GENMASK(5, 0), gpu_scid));
+
+ gpu_write(gpu, REG_A6XX_GBIF_SCACHE_CNTL0,
+ FIELD_PREP(GENMASK(27, 22), gpu_scid) |
+ FIELD_PREP(GENMASK(21, 16), gpu_scid) |
+ FIELD_PREP(GENMASK(15, 10), gpu_scid) |
+ BIT(8));
+ }
+
+ llcc_slice_activate(a6xx_gpu->htw_llc_slice);
+}
+
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
+#define VBIF_RESET_ACK_MASK 0xF0
+#define GPR0_GBIF_HALT_REQUEST 0x1E0
+
+void a8xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu, bool gx_off)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (gx_off) {
+ /* Halt the gx side of GBIF */
+ gpu_write(gpu, REG_A8XX_RBBM_GBIF_HALT, 1);
+ spin_until(gpu_read(gpu, REG_A8XX_RBBM_GBIF_HALT_ACK) & 1);
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
+int a8xx_gmu_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ mutex_lock(&a6xx_gpu->gmu.lock);
+
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ *value = gpu_read64(gpu, REG_A8XX_CP_ALWAYS_ON_COUNTER);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_PERFCOUNTER_SET);
+
+ mutex_unlock(&a6xx_gpu->gmu.lock);
+
+ return 0;
+}
+
+u64 a8xx_gpu_busy(struct msm_gpu *gpu, unsigned long *out_sample_rate)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ u64 busy_cycles;
+
+ /* 19.2MHz */
+ *out_sample_rate = 19200000;
+
+ busy_cycles = gmu_read64(&a6xx_gpu->gmu,
+ REG_A8XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
+ REG_A8XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
+
+ return busy_cycles;
+}
+
+bool a8xx_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ return true;
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 50945bfe9b49..554d746f115b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -24,12 +24,17 @@ bool disable_acd;
MODULE_PARM_DESC(disable_acd, "Forcefully disable GPU ACD");
module_param_unsafe(disable_acd, bool, 0400);
+static bool skip_gpu;
+MODULE_PARM_DESC(no_gpu, "Disable GPU driver register (0=enable GPU driver register (default), 1=skip GPU driver register");
+module_param(skip_gpu, bool, 0400);
+
extern const struct adreno_gpulist a2xx_gpulist;
extern const struct adreno_gpulist a3xx_gpulist;
extern const struct adreno_gpulist a4xx_gpulist;
extern const struct adreno_gpulist a5xx_gpulist;
extern const struct adreno_gpulist a6xx_gpulist;
extern const struct adreno_gpulist a7xx_gpulist;
+extern const struct adreno_gpulist a8xx_gpulist;
static const struct adreno_gpulist *gpulists[] = {
&a2xx_gpulist,
@@ -38,6 +43,7 @@ static const struct adreno_gpulist *gpulists[] = {
&a5xx_gpulist,
&a6xx_gpulist,
&a7xx_gpulist,
+ &a8xx_gpulist,
};
static const struct adreno_info *adreno_info(uint32_t chip_id)
@@ -184,6 +190,9 @@ bool adreno_has_gpu(struct device_node *node)
uint32_t chip_id;
int ret;
+ if (skip_gpu)
+ return false;
+
ret = find_chipid(node, &chip_id);
if (ret)
return false;
@@ -228,7 +237,7 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
priv->has_cached_coherent =
!!(info->quirks & ADRENO_QUIRK_HAS_CACHED_COHERENT);
- gpu = info->init(drm);
+ gpu = info->funcs->init(drm);
if (IS_ERR(gpu)) {
dev_warn(drm->dev, "failed to load adreno gpu\n");
return PTR_ERR(gpu);
@@ -404,10 +413,16 @@ static struct platform_driver adreno_driver = {
void __init adreno_register(void)
{
+ if (skip_gpu)
+ return;
+
platform_driver_register(&adreno_driver);
}
void __exit adreno_unregister(void)
{
+ if (skip_gpu)
+ return;
+
platform_driver_unregister(&adreno_driver);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
index 04b49d385f9d..d513e03fef08 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_0_0_snapshot.h
@@ -82,85 +82,85 @@ static const u32 gen7_0_0_debugbus_blocks[] = {
};
static const struct gen7_shader_block gen7_0_0_shader_blocks[] = {
- {A7XX_TP0_TMO_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_TP0_SMO_DATA, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA_1, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_0_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_1_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_2_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_3_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_4_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_5_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_6_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_7_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_CB_RAM, 0x390, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_TAG, 0x90, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA_2, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_TMO_TAG, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_SMO_TAG, 0x80, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_STATE_DATA, 0x40, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_HWAVE_RAM, 0x100, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_L0_INST_BUF, 0x50, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_8_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_9_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_10_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_11_DATA, 0x800, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_12_DATA, 0x200, 4, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_1, 0x200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_TP0_TMO_DATA, 0x200, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_SMO_DATA, 0x80, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_1, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_0_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_1_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_2_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_3_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_4_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_5_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_6_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_7_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_CB_RAM, 0x390, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_TAG, 0x90, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_2, 0x200, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_TMO_TAG, 0x80, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_SMO_TAG, 0x80, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_STATE_DATA, 0x40, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_HWAVE_RAM, 0x100, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_L0_INST_BUF, 0x50, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_8_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_9_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_10_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_11_DATA, 0x800, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_12_DATA, 0x200, 4, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_1, 0x200, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
};
static const u32 gen7_0_0_pre_crashdumper_gpu_registers[] = {
@@ -303,7 +303,7 @@ static const u32 gen7_0_0_noncontext_rb_rbp_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_noncontext_rb_rbp_pipe_br_registers), 8));
-/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: A7XX_PIPE_BR */
+/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: PIPE_BR */
static const u32 gen7_0_0_gras_cluster_gras_pipe_br_registers[] = {
0x08000, 0x08008, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
@@ -313,7 +313,7 @@ static const u32 gen7_0_0_gras_cluster_gras_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_gras_cluster_gras_pipe_br_registers), 8));
-/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: A7XX_PIPE_BV */
+/* Block: GRAS Cluster: A7XX_CLUSTER_GRAS Pipeline: PIPE_BV */
static const u32 gen7_0_0_gras_cluster_gras_pipe_bv_registers[] = {
0x08000, 0x08008, 0x08010, 0x08092, 0x08094, 0x08099, 0x0809b, 0x0809d,
0x080a0, 0x080a7, 0x080af, 0x080f1, 0x080f4, 0x080f6, 0x080f8, 0x080fa,
@@ -323,7 +323,7 @@ static const u32 gen7_0_0_gras_cluster_gras_pipe_bv_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_gras_cluster_gras_pipe_bv_registers), 8));
-/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */
static const u32 gen7_0_0_pc_cluster_fe_pipe_br_registers[] = {
0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886,
0x09b00, 0x09b08,
@@ -331,7 +331,7 @@ static const u32 gen7_0_0_pc_cluster_fe_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_pc_cluster_fe_pipe_br_registers), 8));
-/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+/* Block: PC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */
static const u32 gen7_0_0_pc_cluster_fe_pipe_bv_registers[] = {
0x09800, 0x09804, 0x09806, 0x0980a, 0x09810, 0x09811, 0x09884, 0x09886,
0x09b00, 0x09b08,
@@ -339,7 +339,7 @@ static const u32 gen7_0_0_pc_cluster_fe_pipe_bv_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_pc_cluster_fe_pipe_bv_registers), 8));
-/* Block: RB_RAC Cluster: A7XX_CLUSTER_PS Pipeline: A7XX_PIPE_BR */
+/* Block: RB_RAC Cluster: A7XX_CLUSTER_PS Pipeline: PIPE_BR */
static const u32 gen7_0_0_rb_rac_cluster_ps_pipe_br_registers[] = {
0x08802, 0x08802, 0x08804, 0x08806, 0x08809, 0x0880a, 0x0880e, 0x08811,
0x08818, 0x0881e, 0x08821, 0x08821, 0x08823, 0x08826, 0x08829, 0x08829,
@@ -355,7 +355,7 @@ static const u32 gen7_0_0_rb_rac_cluster_ps_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_rb_rac_cluster_ps_pipe_br_registers), 8));
-/* Block: RB_RBP Cluster: A7XX_CLUSTER_PS Pipeline: A7XX_PIPE_BR */
+/* Block: RB_RBP Cluster: A7XX_CLUSTER_PS Pipeline: PIPE_BR */
static const u32 gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers[] = {
0x08800, 0x08801, 0x08803, 0x08803, 0x0880b, 0x0880d, 0x08812, 0x08812,
0x08820, 0x08820, 0x08822, 0x08822, 0x08827, 0x08828, 0x0882a, 0x0882a,
@@ -370,7 +370,7 @@ static const u32 gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers[] = {
0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a99e, 0x0a9a7, 0x0a9a7,
0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9b0, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9ba,
@@ -381,7 +381,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers[] = {
0x0a9b0, 0x0a9b0, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9ba, 0x0a9bc, 0x0a9bc,
0x0a9c4, 0x0a9c4, 0x0a9cd, 0x0a9cd, 0x0a9e2, 0x0a9e3, 0x0a9e6, 0x0a9fc,
@@ -390,21 +390,21 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: HLSQ_DP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: HLSQ_DP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers[] = {
0x0a9b1, 0x0a9b1, 0x0a9c6, 0x0a9cb, 0x0a9d4, 0x0a9df,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: HLSQ_DP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: HLSQ_DP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers[] = {
0x0a9b1, 0x0a9b1, 0x0a9d4, 0x0a9df,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers[] = {
0x0a980, 0x0a980, 0x0a982, 0x0a984, 0x0a99e, 0x0a9a2, 0x0a9a7, 0x0a9a8,
0x0a9aa, 0x0a9aa, 0x0a9ae, 0x0a9ae, 0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5,
@@ -414,7 +414,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers[] = {
0x0a9b0, 0x0a9b1, 0x0a9b3, 0x0a9b5, 0x0a9ba, 0x0a9bc, 0x0a9e2, 0x0a9e3,
0x0a9e6, 0x0a9f9, 0x0aa00, 0x0aa00, 0x0ab00, 0x0ab00,
@@ -422,7 +422,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers[] = {
0x0a980, 0x0a982, 0x0a985, 0x0a9a6, 0x0a9a8, 0x0a9a9, 0x0a9ab, 0x0a9ae,
0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9bf, 0x0a9c2, 0x0a9c3,
@@ -432,7 +432,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers[] = {
0x0a9b0, 0x0a9b3, 0x0a9b6, 0x0a9b9, 0x0a9bb, 0x0a9be, 0x0a9c2, 0x0a9c3,
0x0a9cd, 0x0a9cd, 0x0a9d0, 0x0a9d3, 0x0aa31, 0x0aa31, 0x0ab00, 0x0ab01,
@@ -440,7 +440,7 @@ static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers[] = {
0x0a800, 0x0a800, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a833, 0x0a835, 0x0a83a, 0x0a83a,
@@ -453,7 +453,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers[] = {
0x0a800, 0x0a800, 0x0a81b, 0x0a81d, 0x0a822, 0x0a822, 0x0a824, 0x0a824,
0x0a827, 0x0a82a, 0x0a830, 0x0a830, 0x0a833, 0x0a835, 0x0a83a, 0x0a83a,
@@ -466,7 +466,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers[] = {
0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a830, 0x0a831,
0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, 0x0a85c, 0x0a85d,
@@ -477,7 +477,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers[] = {
0x0a800, 0x0a800, 0x0a81c, 0x0a81d, 0x0a822, 0x0a824, 0x0a830, 0x0a831,
0x0a834, 0x0a835, 0x0a83a, 0x0a83c, 0x0a840, 0x0a840, 0x0a85c, 0x0a85d,
@@ -488,7 +488,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers[] = {
0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a830, 0x0a833,
0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, 0x0a863, 0x0a867,
@@ -498,7 +498,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers[] = {
0x0a800, 0x0a81b, 0x0a81e, 0x0a821, 0x0a823, 0x0a827, 0x0a830, 0x0a833,
0x0a836, 0x0a839, 0x0a83b, 0x0a85b, 0x0a85e, 0x0a861, 0x0a863, 0x0a867,
@@ -508,7 +508,7 @@ static const u32 gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BR */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BR */
static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers[] = {
0x0b180, 0x0b183, 0x0b190, 0x0b195, 0x0b2c0, 0x0b2d5, 0x0b300, 0x0b307,
0x0b309, 0x0b309, 0x0b310, 0x0b310,
@@ -516,35 +516,35 @@ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: HLSQ_STATE */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_hlsq_state_registers[] = {
0x0ab00, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20, 0x0ab40, 0x0abbf,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_hlsq_state_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: SP_TOP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: SP_TOP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_sp_top_registers[] = {
0x0ab00, 0x0ab00, 0x0ab02, 0x0ab02, 0x0ab0a, 0x0ab1b, 0x0ab20, 0x0ab20,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_sp_top_registers), 8));
-/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV Location: uSPTP */
+/* Block: SP Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV Location: uSPTP */
static const u32 gen7_0_0_sp_cluster_sp_ps_pipe_bv_usptp_registers[] = {
0x0ab00, 0x0ab02, 0x0ab21, 0x0ab22, 0x0ab40, 0x0abbf,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_cluster_sp_ps_pipe_bv_usptp_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_BV */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_BV */
static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_bv_registers[] = {
0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_bv_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: A7XX_PIPE_LPAC */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_PS Pipeline: PIPE_LPAC */
static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers[] = {
0x0b180, 0x0b181, 0x0b300, 0x0b301, 0x0b307, 0x0b307, 0x0b309, 0x0b309,
0x0b310, 0x0b310,
@@ -552,84 +552,84 @@ static const u32 gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BR */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BR */
static const u32 gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers[] = {
0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers), 8));
-/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: A7XX_PIPE_BV */
+/* Block: TPL1 Cluster: A7XX_CLUSTER_SP_VS Pipeline: PIPE_BV */
static const u32 gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers[] = {
0x0b300, 0x0b307, 0x0b309, 0x0b309, 0x0b310, 0x0b310,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers), 8));
-/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */
static const u32 gen7_0_0_vfd_cluster_fe_pipe_br_registers[] = {
0x0a000, 0x0a009, 0x0a00e, 0x0a0ef,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vfd_cluster_fe_pipe_br_registers), 8));
-/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+/* Block: VFD Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */
static const u32 gen7_0_0_vfd_cluster_fe_pipe_bv_registers[] = {
0x0a000, 0x0a009, 0x0a00e, 0x0a0ef,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vfd_cluster_fe_pipe_bv_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BR */
+/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BR */
static const u32 gen7_0_0_vpc_cluster_fe_pipe_br_registers[] = {
0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_fe_pipe_br_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: A7XX_PIPE_BV */
+/* Block: VPC Cluster: A7XX_CLUSTER_FE Pipeline: PIPE_BV */
static const u32 gen7_0_0_vpc_cluster_fe_pipe_bv_registers[] = {
0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_fe_pipe_bv_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: A7XX_PIPE_BR */
+/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: PIPE_BR */
static const u32 gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers[] = {
0x09101, 0x0910c, 0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: A7XX_PIPE_BV */
+/* Block: VPC Cluster: A7XX_CLUSTER_PC_VS Pipeline: PIPE_BV */
static const u32 gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers[] = {
0x09101, 0x0910c, 0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: A7XX_PIPE_BR */
+/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: PIPE_BR */
static const u32 gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers[] = {
0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x09236, 0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers), 8));
-/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: A7XX_PIPE_BV */
+/* Block: VPC Cluster: A7XX_CLUSTER_VPC_PS Pipeline: PIPE_BV */
static const u32 gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers[] = {
0x09200, 0x0920f, 0x09212, 0x09216, 0x09218, 0x09236, 0x09300, 0x09307,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: HLSQ_STATE */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers[] = {
0x0ae52, 0x0ae52, 0x0ae60, 0x0ae67, 0x0ae69, 0x0ae73,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: SP_TOP */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: SP_TOP */
static const u32 gen7_0_0_sp_noncontext_pipe_br_sp_top_registers[] = {
0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae09, 0x0ae0c, 0x0ae0c,
0x0ae0f, 0x0ae0f, 0x0ae28, 0x0ae2b, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3f,
@@ -638,7 +638,7 @@ static const u32 gen7_0_0_sp_noncontext_pipe_br_sp_top_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_sp_top_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_BR Location: uSPTP */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_BR Location: uSPTP */
static const u32 gen7_0_0_sp_noncontext_pipe_br_usptp_registers[] = {
0x0ae00, 0x0ae00, 0x0ae02, 0x0ae04, 0x0ae06, 0x0ae09, 0x0ae0c, 0x0ae0c,
0x0ae0f, 0x0ae0f, 0x0ae30, 0x0ae32, 0x0ae35, 0x0ae35, 0x0ae3a, 0x0ae3b,
@@ -647,28 +647,28 @@ static const u32 gen7_0_0_sp_noncontext_pipe_br_usptp_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_br_usptp_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: HLSQ_STATE */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: HLSQ_STATE */
static const u32 gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers[] = {
0x0af88, 0x0af8a,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: SP_TOP */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: SP_TOP */
static const u32 gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers[] = {
0x0af80, 0x0af84,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers), 8));
-/* Block: SP Cluster: noncontext Pipeline: A7XX_PIPE_LPAC Location: uSPTP */
+/* Block: SP Cluster: noncontext Pipeline: PIPE_LPAC Location: uSPTP */
static const u32 gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers[] = {
0x0af80, 0x0af84, 0x0af90, 0x0af92,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_NONE */
+/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_NONE */
static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
0x0b600, 0x0b600, 0x0b602, 0x0b602, 0x0b604, 0x0b604, 0x0b608, 0x0b60c,
0x0b60f, 0x0b621, 0x0b630, 0x0b633,
@@ -676,14 +676,14 @@ static const u32 gen7_0_0_tpl1_noncontext_pipe_none_registers[] = {
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_none_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_BR */
+/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_BR */
static const u32 gen7_0_0_tpl1_noncontext_pipe_br_registers[] = {
0x0b600, 0x0b600,
UINT_MAX, UINT_MAX,
};
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_br_registers), 8));
-/* Block: TPl1 Cluster: noncontext Pipeline: A7XX_PIPE_LPAC */
+/* Block: TPl1 Cluster: noncontext Pipeline: PIPE_LPAC */
static const u32 gen7_0_0_tpl1_noncontext_pipe_lpac_registers[] = {
0x0b780, 0x0b780,
UINT_MAX, UINT_MAX,
@@ -691,184 +691,184 @@ static const u32 gen7_0_0_tpl1_noncontext_pipe_lpac_registers[] = {
static_assert(IS_ALIGNED(sizeof(gen7_0_0_tpl1_noncontext_pipe_lpac_registers), 8));
static const struct gen7_sel_reg gen7_0_0_rb_rac_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x0,
};
static const struct gen7_sel_reg gen7_0_0_rb_rbp_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x9,
};
static const struct gen7_cluster_registers gen7_0_0_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_br_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_bv_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_lpac_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_rb_rac_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_0_0_noncontext_rb_rbp_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_gras_cluster_gras_pipe_br_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_gras_cluster_gras_pipe_bv_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_gras_cluster_gras_pipe_br_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_gras_cluster_gras_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_pc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_pc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_0_0_rb_rbp_sel, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
static const struct gen7_sptp_cluster_registers gen7_0_0_sptp_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_0_0_sp_noncontext_pipe_br_sp_top_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_sp_noncontext_pipe_br_usptp_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_lpac_hlsq_state_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_0_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
index 772652eb61f3..7897622ea6f7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_2_0_snapshot.h
@@ -96,87 +96,87 @@ static const u32 gen7_2_0_debugbus_blocks[] = {
};
static const struct gen7_shader_block gen7_2_0_shader_blocks[] = {
- {A7XX_TP0_TMO_DATA, 0x200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_TP0_SMO_DATA, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA_1, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_0_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_1_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_2_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_3_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_4_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_5_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_6_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_7_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_CB_RAM, 0x390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_13_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_14_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_TAG, 0xc0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_INST_DATA_2, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_TMO_TAG, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_SMO_TAG, 0x80, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_STATE_DATA, 0x40, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_HWAVE_RAM, 0x100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_L0_INST_BUF, 0x50, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_8_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_9_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_10_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_11_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_SP_LB_12_DATA, 0x800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP},
- {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM, 0x180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM, 0x200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_1, 0x1c0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM, 0x200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x38, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE},
- {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_TP0_TMO_DATA, 0x200, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_SMO_DATA, 0x80, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_TP0_MIPMAP_BASE_DATA, 0x3c0, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_1, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_0_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_1_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_2_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_3_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_4_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_5_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_6_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_7_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_CB_RAM, 0x390, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_13_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_14_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_TAG, 0xc0, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_INST_DATA_2, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_TMO_TAG, 0x80, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_SMO_TAG, 0x80, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_STATE_DATA, 0x40, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_HWAVE_RAM, 0x100, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_L0_INST_BUF, 0x50, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_8_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_9_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_10_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_11_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_SP_LB_12_DATA, 0x800, 6, 2, PIPE_BR, A7XX_USPTP},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM, 0x1c0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM, 0x180, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM, 0x280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM, 0x200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_1, 0x1c0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM, 0x200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_TAG, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x38, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x64, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x10, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INST_RAM_1, 0x800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_STPROC_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BV_BE_META, 0x10, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_DATAPATH_META, 0x20, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_FRONTEND_META, 0x80, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_INDIRECT_META, 0x10, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BR, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_BV, A7XX_HLSQ_STATE},
+ {A7XX_HLSQ_BACKEND_META, 0x40, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE},
};
static const u32 gen7_2_0_gpu_registers[] = {
@@ -478,182 +478,182 @@ static const u32 gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers[] = {
static_assert(IS_ALIGNED(sizeof(gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers), 8));
static const struct gen7_sel_reg gen7_2_0_rb_rac_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x0,
};
static const struct gen7_sel_reg gen7_2_0_rb_rbp_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x9,
};
static const struct gen7_cluster_registers gen7_2_0_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_pipe_br_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT,
gen7_2_0_noncontext_pipe_bv_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT,
gen7_0_0_noncontext_pipe_lpac_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_rb_rac_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_2_0_noncontext_rb_rbp_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_2_0_gras_cluster_gras_pipe_br_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_2_0_gras_cluster_gras_pipe_bv_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_2_0_gras_cluster_gras_pipe_br_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_2_0_gras_cluster_gras_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_pc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_pc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_pc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_2_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_2_0_rb_rac_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_rb_rbp_cluster_ps_pipe_br_registers, &gen7_2_0_rb_rbp_sel, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vfd_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vfd_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_fe_pipe_br_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_fe_pipe_bv_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_pc_vs_pipe_br_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_pc_vs_pipe_bv_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_vpc_ps_pipe_br_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_0_0_vpc_cluster_vpc_ps_pipe_bv_registers, },
};
static const struct gen7_sptp_cluster_registers gen7_2_0_sptp_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_0_0_sp_noncontext_pipe_br_hlsq_state_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_0_0_sp_noncontext_pipe_br_sp_top_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_sp_noncontext_pipe_br_usptp_registers, 0xae00 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_noncontext_pipe_lpac_hlsq_state_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_0_0_sp_noncontext_pipe_lpac_sp_top_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_sp_noncontext_pipe_lpac_usptp_registers, 0xaf80 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_br_registers, 0xb600 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_NONE, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_none_registers, 0xb600 },
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_noncontext_pipe_lpac_registers, 0xb780 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_ps_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_br_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_ps_pipe_lpac_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP,
gen7_0_0_sp_cluster_sp_ps_pipe_lpac_hlsq_dp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_ps_pipe_lpac_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_ps_pipe_lpac_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_vs_pipe_br_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_hlsq_state_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_vs_pipe_br_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_sp_top_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_vs_pipe_br_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_2_0_sp_cluster_sp_vs_pipe_bv_usptp_registers, 0xa800 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_TP0_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_ps_pipe_lpac_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_br_registers, 0xb000 },
- { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_TP0_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_0_0_tpl1_cluster_sp_vs_pipe_bv_registers, 0xb000 },
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index 0956dfca1f05..20125d1aa21d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -118,97 +118,97 @@ static const u32 gen7_9_0_cx_debugbus_blocks[] = {
};
static const struct gen7_shader_block gen7_9_0_shader_blocks[] = {
- { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_INST_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_INST_DATA_1, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_0_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_1_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_2_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_3_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_4_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_5_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_6_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_7_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_CB_RAM, 0x0390, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_13_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_14_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_INST_TAG, 0x00C0, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_INST_DATA_2, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_TMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_SMO_TAG, 0x0080, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_STATE_DATA, 0x0040, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_HWAVE_RAM, 0x0100, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_L0_INST_BUF, 0x0050, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_8_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_9_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_10_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_11_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_SP_LB_12_DATA, 0x0800, 6, 2, A7XX_PIPE_BR, A7XX_USPTP },
- { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_L2STC_TAG_RAM, 0x0200, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_L2STC_INFO_CMD, 0x0474, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0300, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0180, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM, 0x0640, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM, 0x00B0, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM, 0x0200, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0050, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_TAG, 0x0014, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_TAG, 0x0004, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0020, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x03C0, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0280, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0050, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0008, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_1, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_STPROC_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INST_RAM_2, 0x0800, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_DATAPATH_META, 0x0020, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_INDIRECT_META, 0x0010, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BR, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_BV, A7XX_HLSQ_STATE },
- { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, A7XX_PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_TP0_TMO_DATA, 0x0200, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_TP0_SMO_DATA, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_TP0_MIPMAP_BASE_DATA, 0x03C0, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA_1, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_0_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_1_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_2_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_3_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_4_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_5_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_6_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_7_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_CB_RAM, 0x0390, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_13_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_14_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_TAG, 0x00C0, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_INST_DATA_2, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_TMO_TAG, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_SMO_TAG, 0x0080, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_STATE_DATA, 0x0040, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_HWAVE_RAM, 0x0100, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_L0_INST_BUF, 0x0050, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_8_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_9_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_10_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_11_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_SP_LB_12_DATA, 0x0800, 6, 2, PIPE_BR, A7XX_USPTP },
+ { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_DATAPATH_DSTR_META, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_L2STC_TAG_RAM, 0x0200, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_L2STC_INFO_CMD, 0x0474, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM, 0x0400, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM, 0x01C0, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0300, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM, 0x0180, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CVS_RAM_TAG, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CHUNK_CPS_RAM_TAG, 0x0040, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CVS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_ICB_CPS_CB_BASE_TAG, 0x0010, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM, 0x0540, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM, 0x0640, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM, 0x00B0, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_1, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM, 0x0200, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM, 0x0800, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM, 0x0800, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CVS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0050, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_CPS_MISC_RAM_TAG, 0x0008, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0014, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0010, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_TAG, 0x0004, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CVS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_CPS_CONST_RAM_TAG, 0x0020, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x03C0, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0280, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM, 0x0050, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG, 0x0008, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_1, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_STPROC_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BV_BE_META, 0x0018, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INST_RAM_2, 0x0800, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_DATAPATH_META, 0x0020, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_FRONTEND_META, 0x0080, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_INDIRECT_META, 0x0010, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_BR, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_BV, A7XX_HLSQ_STATE },
+ { A7XX_HLSQ_BACKEND_META, 0x0040, 1, 1, PIPE_LPAC, A7XX_HLSQ_STATE },
};
/*
@@ -226,7 +226,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pre_crashdumper_gpu_registers), 8));
* Block : ['BROADCAST', 'CP', 'GRAS', 'GXCLKCTL']
* Block : ['PC', 'RBBM', 'RDVM', 'UCHE']
* Block : ['VFD', 'VPC', 'VSC']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 196 (Regs:1778)
*/
static const u32 gen7_9_0_gpu_registers[] = {
@@ -290,7 +290,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gxclkctl_registers), 8));
/*
* Block : ['GMUAO', 'GMUCX', 'GMUCX_RAM']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 134 (Regs:429)
*/
static const u32 gen7_9_0_gmu_registers[] = {
@@ -334,7 +334,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmu_registers), 8));
/*
* Block : ['GMUGX']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 44 (Regs:454)
*/
static const u32 gen7_9_0_gmugx_registers[] = {
@@ -355,7 +355,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gmugx_registers), 8));
/*
* Block : ['CX_MISC']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 7 (Regs:56)
*/
static const u32 gen7_9_0_cx_misc_registers[] = {
@@ -367,7 +367,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_misc_registers), 8));
/*
* Block : ['DBGC']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 19 (Regs:155)
*/
static const u32 gen7_9_0_dbgc_registers[] = {
@@ -382,7 +382,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_dbgc_registers), 8));
/*
* Block : ['CX_DBGC']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* pairs : 7 (Regs:75)
*/
static const u32 gen7_9_0_cx_dbgc_registers[] = {
@@ -396,7 +396,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_cx_dbgc_registers), 8));
* Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
* Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
* Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* pairs : 29 (Regs:573)
*/
@@ -417,7 +417,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_br_registers), 8));
* Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
* Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
* Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_NONE
* pairs : 29 (Regs:573)
*/
@@ -438,7 +438,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_bv_registers), 8));
* Block : ['BROADCAST', 'CP', 'CX_DBGC', 'CX_MISC', 'DBGC', 'GBIF']
* Block : ['GMUAO', 'GMUCX', 'GMUGX', 'GRAS', 'GXCLKCTL', 'PC']
* Block : ['RBBM', 'RDVM', 'UCHE', 'VFD', 'VPC', 'VSC']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* pairs : 2 (Regs:7)
*/
@@ -450,7 +450,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_pipe_lpac_registers), 8));
/*
* Block : ['RB']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* pairs : 5 (Regs:37)
*/
@@ -463,7 +463,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rac_registers),
/*
* Block : ['RB']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* pairs : 15 (Regs:66)
*/
@@ -478,7 +478,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_rb_pipe_br_rbp_registers),
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_HLSQ_STATE
* pairs : 4 (Regs:28)
@@ -491,7 +491,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_state_regis
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_SP_TOP
* pairs : 10 (Regs:61)
@@ -506,7 +506,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_sp_top_registers
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 12 (Regs:62)
@@ -521,7 +521,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_usptp_registers)
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_HLSQ_DP_STR
* pairs : 2 (Regs:5)
@@ -534,7 +534,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_regi
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_HLSQ_STATE
* pairs : 1 (Regs:5)
@@ -547,7 +547,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_reg
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_SP_TOP
* pairs : 1 (Regs:6)
@@ -560,7 +560,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_sp_top_registe
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 2 (Regs:9)
@@ -573,7 +573,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_sp_pipe_lpac_usptp_register
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_NONE
+ * Pipeline: PIPE_NONE
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 5 (Regs:29)
@@ -587,7 +587,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_none_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 1 (Regs:1)
@@ -600,7 +600,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_br_usptp_register
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_NONE
* Location: A7XX_USPTP
* pairs : 1 (Regs:1)
@@ -613,7 +613,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_non_context_tpl1_pipe_lpac_usptp_regist
/*
* Block : ['GRAS']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_GRAS
* pairs : 14 (Regs:293)
*/
@@ -628,7 +628,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_br_cluster_gras_registers), 8
/*
* Block : ['GRAS']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_GRAS
* pairs : 14 (Regs:293)
*/
@@ -643,7 +643,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_gras_pipe_bv_cluster_gras_registers), 8
/*
* Block : ['PC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_FE
* pairs : 6 (Regs:31)
*/
@@ -656,7 +656,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_br_cluster_fe_registers), 8));
/*
* Block : ['PC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_FE
* pairs : 6 (Regs:31)
*/
@@ -669,7 +669,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_pc_pipe_bv_cluster_fe_registers), 8));
/*
* Block : ['VFD']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_FE
* pairs : 2 (Regs:236)
*/
@@ -681,7 +681,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_br_cluster_fe_registers), 8));
/*
* Block : ['VFD']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_FE
* pairs : 2 (Regs:236)
*/
@@ -693,7 +693,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vfd_pipe_bv_cluster_fe_registers), 8));
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_FE
* pairs : 2 (Regs:18)
*/
@@ -705,7 +705,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_fe_registers), 8));
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_PC_VS
* pairs : 3 (Regs:30)
*/
@@ -717,7 +717,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers), 8
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_VPC_PS
* pairs : 5 (Regs:76)
*/
@@ -730,7 +730,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers),
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_FE
* pairs : 2 (Regs:18)
*/
@@ -742,7 +742,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_fe_registers), 8));
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_PC_VS
* pairs : 3 (Regs:30)
*/
@@ -754,7 +754,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers), 8
/*
* Block : ['VPC']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_VPC_PS
* pairs : 5 (Regs:76)
*/
@@ -767,7 +767,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers),
/*
* Block : ['RB']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_PS
* pairs : 39 (Regs:133)
*/
@@ -788,7 +788,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rac_registers), 8
/*
* Block : ['RB']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_PS
* pairs : 34 (Regs:100)
*/
@@ -808,7 +808,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers), 8
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_HLSQ_STATE
* pairs : 29 (Regs:215)
@@ -828,7 +828,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_reg
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_SP_TOP
* pairs : 22 (Regs:73)
@@ -846,7 +846,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registe
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_USPTP
* pairs : 16 (Regs:269)
@@ -862,7 +862,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_register
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_STATE
* pairs : 21 (Regs:334)
@@ -880,7 +880,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_reg
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_DP
* pairs : 3 (Regs:19)
@@ -893,7 +893,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_regist
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_SP_TOP
* pairs : 18 (Regs:77)
@@ -910,7 +910,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registe
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_USPTP
* pairs : 17 (Regs:333)
@@ -927,7 +927,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_register
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_DP_STR
* pairs : 1 (Regs:6)
@@ -940,7 +940,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_re
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_HLSQ_STATE
* pairs : 28 (Regs:213)
@@ -959,7 +959,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_reg
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_SP_TOP
* pairs : 21 (Regs:71)
@@ -977,7 +977,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registe
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_USPTP
* pairs : 16 (Regs:266)
@@ -993,7 +993,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_register
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_STATE
* pairs : 14 (Regs:299)
@@ -1009,7 +1009,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_r
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_HLSQ_DP
* pairs : 2 (Regs:13)
@@ -1022,7 +1022,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_regi
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_SP_TOP
* pairs : 9 (Regs:34)
@@ -1037,7 +1037,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_regis
/*
* Block : ['SP']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_USPTP
* pairs : 11 (Regs:279)
@@ -1052,7 +1052,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_USPTP
* pairs : 3 (Regs:10)
@@ -1065,7 +1065,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_BR
+ * Pipeline: PIPE_BR
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_USPTP
* pairs : 6 (Regs:42)
@@ -1079,7 +1079,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_BV
+ * Pipeline: PIPE_BV
* Cluster : A7XX_CLUSTER_SP_VS
* Location: A7XX_USPTP
* pairs : 3 (Regs:10)
@@ -1092,7 +1092,7 @@ static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_regist
/*
* Block : ['TPL1']
- * Pipeline: A7XX_PIPE_LPAC
+ * Pipeline: PIPE_LPAC
* Cluster : A7XX_CLUSTER_SP_PS
* Location: A7XX_USPTP
* pairs : 5 (Regs:7)
@@ -1105,192 +1105,192 @@ static const u32 gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers[] = {
static_assert(IS_ALIGNED(sizeof(gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers), 8));
static const struct gen7_sel_reg gen7_9_0_rb_rac_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0,
};
static const struct gen7_sel_reg gen7_9_0_rb_rbp_sel = {
- .host_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_HOST,
- .cd_reg = REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD,
+ .host_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_HOST,
+ .cd_reg = REG_A6XX_RB_SUB_BLOCK_SEL_CNTL_CD,
.val = 0x9,
};
static const struct gen7_cluster_registers gen7_9_0_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_br_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BV, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BV, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_bv_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_LPAC, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_LPAC, STATE_NON_CONTEXT,
gen7_9_0_non_context_pipe_lpac_registers, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_rb_pipe_br_rac_registers, &gen7_9_0_rb_rac_sel, },
- { A7XX_CLUSTER_NONE, A7XX_PIPE_BR, STATE_NON_CONTEXT,
+ { A7XX_CLUSTER_NONE, PIPE_BR, STATE_NON_CONTEXT,
gen7_9_0_non_context_rb_pipe_br_rbp_registers, &gen7_9_0_rb_rbp_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_rb_pipe_br_cluster_ps_rac_registers, &gen7_9_0_rb_rac_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, },
- { A7XX_CLUSTER_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_rb_pipe_br_cluster_ps_rbp_registers, &gen7_9_0_rb_rbp_sel, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_gras_pipe_br_cluster_gras_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_gras_pipe_br_cluster_gras_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_gras_pipe_bv_cluster_gras_registers, },
- { A7XX_CLUSTER_GRAS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_GRAS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_gras_pipe_bv_cluster_gras_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_pc_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_pc_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_pc_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_pc_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_vfd_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_vfd_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_vfd_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_vfd_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_br_cluster_fe_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_br_cluster_pc_vs_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BR, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BR, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_br_cluster_vpc_ps_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_FE, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_FE, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_bv_cluster_fe_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, },
- { A7XX_CLUSTER_PC_VS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_PC_VS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_bv_cluster_pc_vs_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_0,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_0,
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
- { A7XX_CLUSTER_VPC_PS, A7XX_PIPE_BV, STATE_FORCE_CTXT_1,
+ { A7XX_CLUSTER_VPC_PS, PIPE_BV, STATE_FORCE_CTXT_1,
gen7_9_0_vpc_pipe_bv_cluster_vpc_ps_registers, },
};
static const struct gen7_sptp_cluster_registers gen7_9_0_sptp_clusters[] = {
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_non_context_sp_pipe_br_hlsq_state_registers, 0xae00},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_9_0_non_context_sp_pipe_br_sp_top_registers, 0xae00},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_non_context_sp_pipe_br_usptp_registers, 0xae00},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_BR, 0, A7XX_HLSQ_DP_STR,
gen7_9_0_non_context_sp_pipe_br_hlsq_dp_str_registers, 0xae00},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_9_0_non_context_sp_pipe_lpac_hlsq_state_registers, 0xaf80},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_9_0_non_context_sp_pipe_lpac_sp_top_registers, 0xaf80},
- { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_SP_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_9_0_non_context_sp_pipe_lpac_usptp_registers, 0xaf80},
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_NONE, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_NONE, 0, A7XX_USPTP,
gen7_9_0_non_context_tpl1_pipe_none_usptp_registers, 0xb600},
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_non_context_tpl1_pipe_br_usptp_registers, 0xb600},
- { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_NONE, A7XX_TP0_NCTX_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_9_0_non_context_tpl1_pipe_lpac_usptp_registers, 0xb780},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_SP_TOP,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_br_cluster_sp_vs_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_vs_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_vs_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_SP_TOP,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_9_0_sp_pipe_bv_cluster_sp_vs_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_HLSQ_DP_STR,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_lpac_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_SP_TOP,
gen7_9_0_sp_pipe_lpac_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_9_0_sp_pipe_lpac_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_STATE,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_STATE,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_state_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_HLSQ_DP_STR,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_HLSQ_DP_STR,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_SP_TOP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_SP_TOP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_sp_top_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_9_0_sp_pipe_br_cluster_sp_ps_usptp_registers, 0xa800},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_HLSQ_DP_STR,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_HLSQ_DP_STR,
gen7_9_0_sp_pipe_br_cluster_sp_ps_hlsq_dp_str_registers, 0xa800},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, A7XX_PIPE_BV, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX0_3D_CVS_REG, PIPE_BV, 0, A7XX_USPTP,
gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_vs_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, A7XX_PIPE_BV, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_VS, A7XX_SP_CTX1_3D_CVS_REG, PIPE_BV, 1, A7XX_USPTP,
gen7_9_0_tpl1_pipe_bv_cluster_sp_vs_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_BR, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_BR, 0, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, A7XX_PIPE_LPAC, 0, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX0_3D_CPS_REG, PIPE_LPAC, 0, A7XX_USPTP,
gen7_9_0_tpl1_pipe_lpac_cluster_sp_ps_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, A7XX_PIPE_BR, 1, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX1_3D_CPS_REG, PIPE_BR, 1, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, A7XX_PIPE_BR, 2, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX2_3D_CPS_REG, PIPE_BR, 2, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
- { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, A7XX_PIPE_BR, 3, A7XX_USPTP,
+ { A7XX_CLUSTER_SP_PS, A7XX_SP_CTX3_3D_CPS_REG, PIPE_BR, 3, A7XX_USPTP,
gen7_9_0_tpl1_pipe_br_cluster_sp_ps_usptp_registers, 0xb000},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index f1230465bf0d..1c80909e63ca 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -10,7 +10,7 @@
#include <linux/interconnect.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/kernel.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
@@ -33,7 +33,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
struct device *dev = &gpu->pdev->dev;
const struct firmware *fw;
const char *signed_fwname = NULL;
- struct device_node *np, *mem_np;
+ struct device_node *np;
struct resource r;
phys_addr_t mem_phys;
ssize_t mem_size;
@@ -51,18 +51,11 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
return -ENODEV;
}
- mem_np = of_parse_phandle(np, "memory-region", 0);
- of_node_put(np);
- if (!mem_np) {
+ ret = of_reserved_mem_region_to_resource(np, 0, &r);
+ if (ret) {
zap_available = false;
- return -EINVAL;
- }
-
- ret = of_address_to_resource(mem_np, 0, &r);
- of_node_put(mem_np);
- if (ret)
return ret;
-
+ }
mem_phys = r.start;
/*
@@ -209,9 +202,7 @@ adreno_iommu_create_vm(struct msm_gpu *gpu,
u64 start, size;
mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks);
- if (!mmu)
- return ERR_PTR(-ENODEV);
- else if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return ERR_CAST(mmu);
geometry = msm_iommu_get_geometry(mmu);
@@ -293,6 +284,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
struct adreno_smmu_fault_info *info, const char *block,
u32 scratch[4])
{
+ struct adreno_gpu *adreno_gpu = container_of(gpu, struct adreno_gpu, base);
struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu;
const char *type = "UNKNOWN";
@@ -345,6 +337,11 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
/* Turn off the hangcheck timer to keep it from bothering us */
timer_delete(&gpu->hangcheck_timer);
+ /* Let any concurrent GMU transactions know that the MMU may be
+ * blocked for a while and they should wait on us.
+ */
+ reinit_completion(&adreno_gpu->fault_coredump_done);
+
fault_info.ttbr0 = info->ttbr0;
fault_info.iova = iova;
fault_info.flags = flags;
@@ -352,18 +349,13 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
fault_info.block = block;
msm_gpu_fault_crashstate_capture(gpu, &fault_info);
+
+ complete_all(&adreno_gpu->fault_coredump_done);
}
return 0;
}
-static bool
-adreno_smmu_has_prr(struct msm_gpu *gpu)
-{
- struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
- return adreno_smmu && adreno_smmu->set_prr_addr;
-}
-
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{
@@ -1205,6 +1197,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
/* Only handle the core clock when GMU is not in use (or is absent). */
if (adreno_has_gmu_wrapper(adreno_gpu) ||
+ adreno_has_rgmu(adreno_gpu) ||
adreno_gpu->info->family < ADRENO_6XX_GEN1) {
/*
* This can only be done before devm_pm_opp_of_add_table(), or
@@ -1238,6 +1231,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (ret)
return ret;
+ init_completion(&adreno_gpu->fault_coredump_done);
+ complete_all(&adreno_gpu->fault_coredump_done);
+
pm_runtime_set_autosuspend_delay(dev,
adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 9dc93c247196..0f8d3de97636 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -27,6 +27,7 @@ enum {
ADRENO_FW_PFP = 1,
ADRENO_FW_GMU = 1, /* a6xx */
ADRENO_FW_GPMU = 2,
+ ADRENO_FW_AQE = 3,
ADRENO_FW_MAX,
};
@@ -50,6 +51,8 @@ enum adreno_family {
ADRENO_7XX_GEN1, /* a730 family */
ADRENO_7XX_GEN2, /* a740 family */
ADRENO_7XX_GEN3, /* a750 family */
+ ADRENO_8XX_GEN1, /* a830 family */
+ ADRENO_8XX_GEN2, /* a840 family */
};
#define ADRENO_QUIRK_TWO_PASS_USE_WFI BIT(0)
@@ -59,6 +62,7 @@ enum adreno_family {
#define ADRENO_QUIRK_HAS_CACHED_COHERENT BIT(4)
#define ADRENO_QUIRK_PREEMPTION BIT(5)
#define ADRENO_QUIRK_4GB_VA BIT(6)
+#define ADRENO_QUIRK_IFPC BIT(7)
/* Helper for formating the chip_id in the way that userspace tools like
* crashdec expect.
@@ -70,9 +74,14 @@ enum adreno_family {
(((_c) >> 8) & 0xff), \
((_c) & 0xff)
+struct adreno_gpu;
+
struct adreno_gpu_funcs {
struct msm_gpu_funcs base;
+ struct msm_gpu *(*init)(struct drm_device *dev);
int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
+ void (*bus_halt)(struct adreno_gpu *adreno_gpu, bool gx_off);
+ int (*mmu_fault_handler)(void *arg, unsigned long iova, int flags, void *data);
};
struct adreno_reglist {
@@ -80,6 +89,13 @@ struct adreno_reglist {
u32 value;
};
+/* Reglist with pipe information */
+struct adreno_reglist_pipe {
+ u32 offset;
+ u32 value;
+ u32 pipe;
+};
+
struct adreno_speedbin {
uint16_t fuse;
uint16_t speedbin;
@@ -100,7 +116,7 @@ struct adreno_info {
const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
u64 quirks;
- struct msm_gpu *(*init)(struct drm_device *dev);
+ const struct adreno_gpu_funcs *funcs;
const char *zapfw;
u32 inactive_period;
union {
@@ -179,6 +195,8 @@ struct adreno_gpu {
uint16_t speedbin;
const struct adreno_gpu_funcs *funcs;
+ struct completion fault_coredump_done;
+
/* interesting register offsets to dump: */
const unsigned int *registers;
@@ -391,6 +409,16 @@ static inline int adreno_is_a610(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 610);
}
+static inline int adreno_is_a612(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06010200;
+}
+
+static inline bool adreno_has_rgmu(const struct adreno_gpu *gpu)
+{
+ return adreno_is_a612(gpu);
+}
+
static inline int adreno_is_a618(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 618);
@@ -465,9 +493,9 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
return false;
-
- /* TODO: A612 */
- return adreno_is_a610(gpu) || adreno_is_a702(gpu);
+ return adreno_is_a610(gpu) ||
+ adreno_is_a612(gpu) ||
+ adreno_is_a702(gpu);
}
/* TODO: 615/616 */
@@ -547,6 +575,21 @@ static inline int adreno_is_a7xx(struct adreno_gpu *gpu)
adreno_is_a740_family(gpu);
}
+static inline int adreno_is_a8xx(struct adreno_gpu *gpu)
+{
+ return gpu->info->family >= ADRENO_8XX_GEN1;
+}
+
+static inline int adreno_is_x285(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x44070001;
+}
+
+static inline int adreno_is_a840(struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x44050a01;
+}
+
/* Put vm_start above 32b to catch issues with not setting xyz_BASE_HI */
#define ADRENO_VM_START 0x100000000ULL
u64 adreno_private_vm_size(struct msm_gpu *gpu);
@@ -672,12 +715,6 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
OUT_RING(ring, PKT7(opcode, cnt));
}
-struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
-struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
-
static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
{
return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h
new file mode 100644
index 000000000000..13bb43ba67d3
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_12_2_glymur.h
@@ -0,0 +1,541 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2025 Linaro Limited
+ */
+
+#ifndef _DPU_12_2_GLYMUR_H
+#define _DPU_12_2_GLYMUR_H
+
+static const struct dpu_caps glymur_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 8192,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_mdp_cfg glymur_mdp = {
+ .name = "top_0",
+ .base = 0, .len = 0x494,
+ .clk_ctrls = {
+ [DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
+ },
+};
+
+static const struct dpu_ctl_cfg glymur_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ }, {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ }, {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ }, {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ }, {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ }, {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ }, {
+ .name = "ctl_6", .id = CTL_6,
+ .base = 0x1b000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 14),
+ }, {
+ .name = "ctl_7", .id = CTL_7,
+ .base = 0x1c000, .len = 0x1000,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 15),
+ },
+};
+
+static const struct dpu_sspp_cfg glymur_sspp[] = {
+ {
+ .name = "sspp_0", .id = SSPP_VIG0,
+ .base = 0x4000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 0,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_1", .id = SSPP_VIG1,
+ .base = 0x6000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 4,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_2", .id = SSPP_VIG2,
+ .base = 0x8000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 8,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_3", .id = SSPP_VIG3,
+ .base = 0xa000, .len = 0x344,
+ .features = VIG_SDM845_MASK_SDMA,
+ .sblk = &dpu_vig_sblk_qseed3_3_4,
+ .xin_id = 12,
+ .type = SSPP_TYPE_VIG,
+ }, {
+ .name = "sspp_8", .id = SSPP_DMA0,
+ .base = 0x24000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 1,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_9", .id = SSPP_DMA1,
+ .base = 0x26000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 5,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_10", .id = SSPP_DMA2,
+ .base = 0x28000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 9,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_11", .id = SSPP_DMA3,
+ .base = 0x2a000, .len = 0x344,
+ .features = DMA_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 13,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_12", .id = SSPP_DMA4,
+ .base = 0x2c000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 14,
+ .type = SSPP_TYPE_DMA,
+ }, {
+ .name = "sspp_13", .id = SSPP_DMA5,
+ .base = 0x2e000, .len = 0x344,
+ .features = DMA_CURSOR_SDM845_MASK_SDMA,
+ .sblk = &dpu_dma_sblk,
+ .xin_id = 15,
+ .type = SSPP_TYPE_DMA,
+ },
+};
+
+static const struct dpu_lm_cfg glymur_lm[] = {
+ {
+ .name = "lm_0", .id = LM_0,
+ .base = 0x44000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_1,
+ .pingpong = PINGPONG_0,
+ .dspp = DSPP_0,
+ }, {
+ .name = "lm_1", .id = LM_1,
+ .base = 0x45000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_0,
+ .pingpong = PINGPONG_1,
+ .dspp = DSPP_1,
+ }, {
+ .name = "lm_2", .id = LM_2,
+ .base = 0x46000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_3,
+ .pingpong = PINGPONG_2,
+ .dspp = DSPP_2,
+ }, {
+ .name = "lm_3", .id = LM_3,
+ .base = 0x47000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_2,
+ .pingpong = PINGPONG_3,
+ .dspp = DSPP_3,
+ }, {
+ .name = "lm_4", .id = LM_4,
+ .base = 0x48000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_5,
+ .pingpong = PINGPONG_4,
+ }, {
+ .name = "lm_5", .id = LM_5,
+ .base = 0x49000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_4,
+ .pingpong = PINGPONG_5,
+ }, {
+ .name = "lm_6", .id = LM_6,
+ .base = 0x4a000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_7,
+ .pingpong = PINGPONG_6,
+ }, {
+ .name = "lm_7", .id = LM_7,
+ .base = 0x4b000, .len = 0x400,
+ .features = MIXER_MSM8998_MASK,
+ .sblk = &sm8750_lm_sblk,
+ .lm_pair = LM_6,
+ .pingpong = PINGPONG_7,
+ },
+};
+
+static const struct dpu_dspp_cfg glymur_dspp[] = {
+ {
+ .name = "dspp_0", .id = DSPP_0,
+ .base = 0x54000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_1", .id = DSPP_1,
+ .base = 0x56000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_2", .id = DSPP_2,
+ .base = 0x58000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_3", .id = DSPP_3,
+ .base = 0x5a000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_4", .id = DSPP_4,
+ .base = 0x5c000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_5", .id = DSPP_5,
+ .base = 0x5e000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_6", .id = DSPP_6,
+ .base = 0x60000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ }, {
+ .name = "dspp_7", .id = DSPP_7,
+ .base = 0x62000, .len = 0x1800,
+ .sblk = &sm8750_dspp_sblk,
+ },
+};
+
+static const struct dpu_pingpong_cfg glymur_pp[] = {
+ {
+ .name = "pingpong_0", .id = PINGPONG_0,
+ .base = 0x69000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ }, {
+ .name = "pingpong_1", .id = PINGPONG_1,
+ .base = 0x6a000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_0,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ }, {
+ .name = "pingpong_2", .id = PINGPONG_2,
+ .base = 0x6b000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ }, {
+ .name = "pingpong_3", .id = PINGPONG_3,
+ .base = 0x6c000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_1,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ }, {
+ .name = "pingpong_4", .id = PINGPONG_4,
+ .base = 0x6d000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ }, {
+ .name = "pingpong_5", .id = PINGPONG_5,
+ .base = 0x6e000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_2,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ }, {
+ .name = "pingpong_6", .id = PINGPONG_6,
+ .base = 0x6f000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 20),
+ }, {
+ .name = "pingpong_7", .id = PINGPONG_7,
+ .base = 0x70000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_3,
+ .intr_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 21),
+ }, {
+ .name = "pingpong_cwb_0", .id = PINGPONG_CWB_0,
+ .base = 0x66000, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ }, {
+ .name = "pingpong_cwb_1", .id = PINGPONG_CWB_1,
+ .base = 0x66400, .len = 0,
+ .sblk = &sc7280_pp_sblk,
+ .merge_3d = MERGE_3D_4,
+ },
+};
+
+static const struct dpu_merge_3d_cfg glymur_merge_3d[] = {
+ {
+ .name = "merge_3d_0", .id = MERGE_3D_0,
+ .base = 0x4e000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_1", .id = MERGE_3D_1,
+ .base = 0x4f000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_2", .id = MERGE_3D_2,
+ .base = 0x50000, .len = 0x1c,
+ }, {
+ .name = "merge_3d_3", .id = MERGE_3D_3,
+ .base = 0x51000, .len = 0x1c,
+ },
+};
+
+/*
+ * NOTE: Each display compression engine (DCE) contains dual hard
+ * slice DSC encoders so both share same base address but with
+ * its own different sub block address.
+ */
+static const struct dpu_dsc_cfg glymur_dsc[] = {
+ {
+ .name = "dce_0_0", .id = DSC_0,
+ .base = 0x80000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_0_1", .id = DSC_1,
+ .base = 0x80000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_1_0", .id = DSC_2,
+ .base = 0x81000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_1_1", .id = DSC_3,
+ .base = 0x81000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_2_0", .id = DSC_4,
+ .base = 0x82000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_2_1", .id = DSC_5,
+ .base = 0x82000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ }, {
+ .name = "dce_3_0", .id = DSC_6,
+ .base = 0x83000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_0,
+ }, {
+ .name = "dce_3_1", .id = DSC_7,
+ .base = 0x83000, .len = 0x8,
+ .features = BIT(DPU_DSC_NATIVE_42x_EN),
+ .sblk = &sm8750_dsc_sblk_1,
+ },
+
+};
+
+static const struct dpu_wb_cfg glymur_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb_yuv,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb_yuv),
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
+static const struct dpu_cwb_cfg glymur_cwb[] = {
+ {
+ .name = "cwb_0", .id = CWB_0,
+ .base = 0x66200, .len = 0x20,
+ },
+ {
+ .name = "cwb_1", .id = CWB_1,
+ .base = 0x66600, .len = 0x20,
+ },
+ {
+ .name = "cwb_2", .id = CWB_2,
+ .base = 0x7e200, .len = 0x20,
+ },
+ {
+ .name = "cwb_3", .id = CWB_3,
+ .base = 0x7e600, .len = 0x20,
+ },
+};
+
+static const struct dpu_intf_cfg glymur_intf[] = {
+ {
+ .name = "intf_0", .id = INTF_0,
+ .base = 0x34000, .len = 0x400,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 24),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 25),
+ }, {
+ .name = "intf_1", .id = INTF_1,
+ .base = 0x35000, .len = 0x400,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_0,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 26),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 27),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF1_TEAR_INTR, 2),
+ }, {
+ .name = "intf_2", .id = INTF_2,
+ .base = 0x36000, .len = 0x400,
+ .type = INTF_DSI,
+ .controller_id = MSM_DSI_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 28),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 29),
+ .intr_tear_rd_ptr = DPU_IRQ_IDX(MDP_INTF2_TEAR_INTR, 2),
+ }, {
+ .name = "intf_3", .id = INTF_3,
+ .base = 0x37000, .len = 0x400,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
+ }, {
+ .name = "intf_4", .id = INTF_4,
+ .base = 0x38000, .len = 0x400,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_1,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
+ }, {
+ .name = "intf_5", .id = INTF_5,
+ .base = 0x39000, .len = 0x400,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_3,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+ }, {
+ .name = "intf_6", .id = INTF_6,
+ .base = 0x3A000, .len = 0x400,
+ .type = INTF_DP,
+ .controller_id = MSM_DP_CONTROLLER_2,
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+ }, {
+ .name = "intf_7", .id = INTF_7,
+ .base = 0x3b000, .len = 0x400,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_2, /* pair with intf_6 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+ }, {
+ .name = "intf_8", .id = INTF_8,
+ .base = 0x3c000, .len = 0x400,
+ .type = INTF_NONE,
+ .controller_id = MSM_DP_CONTROLLER_1, /* pair with intf_4 for DP MST */
+ .prog_fetch_lines_worst_case = 24,
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
+ },
+};
+
+static const struct dpu_perf_cfg glymur_perf_data = {
+ .max_bw_low = 18900000,
+ .max_bw_high = 28500000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_mdss_version glymur_mdss_ver = {
+ .core_major_ver = 12,
+ .core_minor_ver = 2,
+};
+
+const struct dpu_mdss_cfg dpu_glymur_cfg = {
+ .mdss_ver = &glymur_mdss_ver,
+ .caps = &glymur_dpu_caps,
+ .mdp = &glymur_mdp,
+ .cdm = &dpu_cdm_5_x,
+ .ctl_count = ARRAY_SIZE(glymur_ctl),
+ .ctl = glymur_ctl,
+ .sspp_count = ARRAY_SIZE(glymur_sspp),
+ .sspp = glymur_sspp,
+ .mixer_count = ARRAY_SIZE(glymur_lm),
+ .mixer = glymur_lm,
+ .dspp_count = ARRAY_SIZE(glymur_dspp),
+ .dspp = glymur_dspp,
+ .pingpong_count = ARRAY_SIZE(glymur_pp),
+ .pingpong = glymur_pp,
+ .dsc_count = ARRAY_SIZE(glymur_dsc),
+ .dsc = glymur_dsc,
+ .merge_3d_count = ARRAY_SIZE(glymur_merge_3d),
+ .merge_3d = glymur_merge_3d,
+ .wb_count = ARRAY_SIZE(glymur_wb),
+ .wb = glymur_wb,
+ .cwb_count = ARRAY_SIZE(glymur_cwb),
+ .cwb = sm8650_cwb,
+ .intf_count = ARRAY_SIZE(glymur_intf),
+ .intf = glymur_intf,
+ .vbif_count = ARRAY_SIZE(sm8650_vbif),
+ .vbif = sm8650_vbif,
+ .perf = &glymur_perf_data,
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 0fb5789c60d0..13cc658065c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -32,6 +32,26 @@ enum dpu_perf_mode {
};
/**
+ * dpu_core_perf_adjusted_mode_clk - Adjust given mode clock rate according to
+ * the perf clock factor.
+ * @crtc_clk_rate - Unadjusted mode clock rate
+ * @perf_cfg: performance configuration
+ */
+u64 dpu_core_perf_adjusted_mode_clk(u64 mode_clk_rate,
+ const struct dpu_perf_cfg *perf_cfg)
+{
+ u32 clk_factor;
+
+ clk_factor = perf_cfg->clk_inefficiency_factor;
+ if (clk_factor) {
+ mode_clk_rate *= clk_factor;
+ do_div(mode_clk_rate, 100);
+ }
+
+ return mode_clk_rate;
+}
+
+/**
* _dpu_core_perf_calc_bw() - to calculate BW per crtc
* @perf_cfg: performance configuration
* @crtc: pointer to a crtc
@@ -75,28 +95,21 @@ static u64 _dpu_core_perf_calc_clk(const struct dpu_perf_cfg *perf_cfg,
struct drm_plane *plane;
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
- u64 crtc_clk;
- u32 clk_factor;
+ u64 mode_clk;
mode = &state->adjusted_mode;
- crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+ mode_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
drm_atomic_crtc_for_each_plane(plane, crtc) {
pstate = to_dpu_plane_state(plane->state);
if (!pstate)
continue;
- crtc_clk = max(pstate->plane_clk, crtc_clk);
- }
-
- clk_factor = perf_cfg->clk_inefficiency_factor;
- if (clk_factor) {
- crtc_clk *= clk_factor;
- do_div(crtc_clk, 100);
+ mode_clk = max(pstate->plane_clk, mode_clk);
}
- return crtc_clk;
+ return dpu_core_perf_adjusted_mode_clk(mode_clk, perf_cfg);
}
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
index d2f21d34e501..3740bc97422c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -54,6 +54,9 @@ struct dpu_core_perf {
u32 fix_core_ab_vote;
};
+u64 dpu_core_perf_adjusted_mode_clk(u64 clk_rate,
+ const struct dpu_perf_cfg *perf_cfg);
+
int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 94912b4708fb..c39f1908ea65 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -200,7 +200,7 @@ static int dpu_crtc_get_lm_crc(struct drm_crtc *crtc,
struct dpu_crtc_state *crtc_state)
{
struct dpu_crtc_mixer *m;
- u32 crcs[CRTC_DUAL_MIXERS];
+ u32 crcs[CRTC_QUAD_MIXERS];
int rc = 0;
int i;
@@ -377,11 +377,10 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
{
struct dpu_crtc_state *crtc_state;
- int lm_idx, lm_horiz_position;
+ int lm_idx;
crtc_state = to_dpu_crtc_state(crtc->state);
- lm_horiz_position = 0;
for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
@@ -392,7 +391,7 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
cfg.out_width = drm_rect_width(lm_roi);
cfg.out_height = drm_rect_height(lm_roi);
- cfg.right_mixer = lm_horiz_position++;
+ cfg.right_mixer = lm_idx & 0x1;
cfg.flags = 0;
hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
}
@@ -401,7 +400,7 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
struct drm_plane *plane,
struct dpu_crtc_mixer *mixer,
- u32 num_mixers,
+ u32 lms_in_pair,
enum dpu_stage stage,
const struct msm_format *format,
uint64_t modifier,
@@ -420,7 +419,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
state, to_dpu_plane_state(state), stage_idx,
- format->pixel_format,
+ format->pixel_format, pipe,
modifier);
DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d multirect_idx %d\n",
@@ -435,7 +434,7 @@ static void _dpu_crtc_blend_setup_pipe(struct drm_crtc *crtc,
stage_cfg->multirect_index[stage][stage_idx] = pipe->multirect_index;
/* blend config update */
- for (lm_idx = 0; lm_idx < num_mixers; lm_idx++)
+ for (lm_idx = 0; lm_idx < lms_in_pair; lm_idx++)
mixer[lm_idx].lm_ctl->ops.update_pending_flush_sspp(mixer[lm_idx].lm_ctl, sspp_idx);
}
@@ -450,7 +449,7 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_plane_state *pstate = NULL;
const struct msm_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
- u32 lm_idx;
+ u32 lm_idx, stage, i, pipe_idx, head_pipe_in_stage, lms_in_pair;
bool bg_alpha_enable = false;
DECLARE_BITMAP(active_fetch, SSPP_MAX);
DECLARE_BITMAP(active_pipes, SSPP_MAX);
@@ -473,22 +472,25 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
bg_alpha_enable = true;
- set_bit(pstate->pipe.sspp->idx, active_fetch);
- set_bit(pstate->pipe.sspp->idx, active_pipes);
- _dpu_crtc_blend_setup_pipe(crtc, plane,
- mixer, cstate->num_mixers,
- pstate->stage,
- format, fb ? fb->modifier : 0,
- &pstate->pipe, 0, stage_cfg);
-
- if (pstate->r_pipe.sspp) {
- set_bit(pstate->r_pipe.sspp->idx, active_fetch);
- set_bit(pstate->r_pipe.sspp->idx, active_pipes);
- _dpu_crtc_blend_setup_pipe(crtc, plane,
- mixer, cstate->num_mixers,
- pstate->stage,
- format, fb ? fb->modifier : 0,
- &pstate->r_pipe, 1, stage_cfg);
+ /* loop pipe per mixer pair with config in stage structure */
+ for (stage = 0; stage < STAGES_PER_PLANE; stage++) {
+ head_pipe_in_stage = stage * PIPES_PER_STAGE;
+ for (i = 0; i < PIPES_PER_STAGE; i++) {
+ pipe_idx = i + head_pipe_in_stage;
+ if (!pstate->pipe[pipe_idx].sspp)
+ continue;
+ lms_in_pair = min(cstate->num_mixers - (stage * PIPES_PER_STAGE),
+ PIPES_PER_STAGE);
+ set_bit(pstate->pipe[pipe_idx].sspp->idx, active_fetch);
+ set_bit(pstate->pipe[pipe_idx].sspp->idx, active_pipes);
+ _dpu_crtc_blend_setup_pipe(crtc, plane,
+ &mixer[head_pipe_in_stage],
+ lms_in_pair,
+ pstate->stage,
+ format, fb ? fb->modifier : 0,
+ &pstate->pipe[pipe_idx], i,
+ &stage_cfg[stage]);
+ }
}
/* blend config update */
@@ -524,7 +526,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
- struct dpu_hw_stage_cfg stage_cfg;
+ struct dpu_hw_stage_cfg stage_cfg[STAGES_PER_PLANE];
DECLARE_BITMAP(active_lms, LM_MAX);
int i;
@@ -545,10 +547,10 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
}
/* initialize stage cfg */
- memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+ memset(&stage_cfg, 0, sizeof(stage_cfg));
memset(active_lms, 0, sizeof(active_lms));
- _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, stage_cfg);
for (i = 0; i < cstate->num_mixers; i++) {
ctl = mixer[i].lm_ctl;
@@ -569,13 +571,17 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].mixer_op_mode,
ctl->idx - CTL_0);
+ /*
+ * call dpu_hw_ctl_setup_blendstage() to blend layers per stage cfg.
+ * stage data is shared between PIPES_PER_STAGE pipes.
+ */
if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &stage_cfg);
+ &stage_cfg[i / PIPES_PER_STAGE]);
if (lm->ops.setup_blendstage)
lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx,
- &stage_cfg);
+ &stage_cfg[i / PIPES_PER_STAGE]);
}
}
@@ -1311,7 +1317,7 @@ done:
return ret;
}
-#define MAX_CHANNELS_PER_CRTC 2
+#define MAX_CHANNELS_PER_CRTC PIPES_PER_PLANE
#define MAX_HDISPLAY_SPLIT 1080
static struct msm_display_topology dpu_crtc_get_topology(
@@ -1322,6 +1328,7 @@ static struct msm_display_topology dpu_crtc_get_topology(
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct msm_display_topology topology = {0};
struct drm_encoder *drm_enc;
+ u32 num_rt_intf;
drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask)
dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state,
@@ -1335,11 +1342,14 @@ static struct msm_display_topology dpu_crtc_get_topology(
* Dual display
* 2 LM, 2 INTF ( Split display using 2 interfaces)
*
+ * If DSC is enabled, try to use 4:4:2 topology if there is enough
+ * resource. Otherwise, use 2:2:2 topology.
+ *
* Single display
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
- * If DSC is enabled, use 2 LMs for 2:2:1 topology
+ * If DSC is enabled, use 2:2:1 topology
*
* Add dspps to the reservation requirements if ctm is requested
*
@@ -1351,14 +1361,23 @@ static struct msm_display_topology dpu_crtc_get_topology(
* (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
*/
- if (topology.num_intf == 2 && !topology.cwb_enabled)
- topology.num_lm = 2;
- else if (topology.num_dsc == 2)
+ num_rt_intf = topology.num_intf;
+ if (topology.cwb_enabled)
+ num_rt_intf--;
+
+ if (topology.num_dsc) {
+ if (dpu_kms->catalog->dsc_count >= num_rt_intf * 2)
+ topology.num_dsc = num_rt_intf * 2;
+ else
+ topology.num_dsc = num_rt_intf;
+ topology.num_lm = topology.num_dsc;
+ } else if (num_rt_intf == 2) {
topology.num_lm = 2;
- else if (dpu_kms->catalog->caps->has_3d_merge)
+ } else if (dpu_kms->catalog->caps->has_3d_merge) {
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
- else
+ } else {
topology.num_lm = 1;
+ }
if (crtc_state->ctm)
topology.num_dspp = topology.num_lm;
@@ -1534,6 +1553,7 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ u64 adjusted_mode_clk;
/* if there is no 3d_mux block we cannot merge LMs so we cannot
* split the large layer into 2 LMs, filter out such modes
@@ -1541,6 +1561,20 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
if (!dpu_kms->catalog->caps->has_3d_merge &&
mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width)
return MODE_BAD_HVALUE;
+
+ adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
+ dpu_kms->perf.perf_cfg);
+
+ if (dpu_kms->catalog->caps->has_3d_merge)
+ adjusted_mode_clk /= 2;
+
+ /*
+ * The given mode, adjusted for the perf clock factor, should not exceed
+ * the max core clock rate
+ */
+ if (dpu_kms->perf.max_core_clk_rate < adjusted_mode_clk * 1000)
+ return MODE_CLOCK_HIGH;
+
/*
* max crtc width is equal to the max mixer width * 2 and max height is 4K
*/
@@ -1586,6 +1620,17 @@ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
return 0;
}
+/**
+ * dpu_crtc_get_num_lm - Get mixer number in this CRTC pipeline
+ * @state: Pointer to drm crtc state object
+ */
+unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state)
+{
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+
+ return cstate->num_mixers;
+}
+
#ifdef CONFIG_DEBUG_FS
static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
{
@@ -1668,15 +1713,15 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
state->crtc_x, state->crtc_y, state->crtc_w,
state->crtc_h);
- seq_printf(s, "\tsspp[0]:%s\n",
- pstate->pipe.sspp->cap->name);
- seq_printf(s, "\tmultirect[0]: mode: %d index: %d\n",
- pstate->pipe.multirect_mode, pstate->pipe.multirect_index);
- if (pstate->r_pipe.sspp) {
- seq_printf(s, "\tsspp[1]:%s\n",
- pstate->r_pipe.sspp->cap->name);
- seq_printf(s, "\tmultirect[1]: mode: %d index: %d\n",
- pstate->r_pipe.multirect_mode, pstate->r_pipe.multirect_index);
+
+ for (i = 0; i < PIPES_PER_PLANE; i++) {
+ if (!pstate->pipe[i].sspp)
+ continue;
+ seq_printf(s, "\tsspp[%d]:%s\n",
+ i, pstate->pipe[i].sspp->cap->name);
+ seq_printf(s, "\tmultirect[%d]: mode: %d index: %d\n",
+ i, pstate->pipe[i].multirect_mode,
+ pstate->pipe[i].multirect_index);
}
seq_puts(s, "\n");
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 94392b9b9245..455073c7025b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -210,7 +210,7 @@ struct dpu_crtc_state {
bool bw_control;
bool bw_split_vote;
- struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+ struct drm_rect lm_bounds[CRTC_QUAD_MIXERS];
uint64_t input_fence_timeout_ns;
@@ -218,10 +218,10 @@ struct dpu_crtc_state {
/* HW Resources reserved for the crtc */
u32 num_mixers;
- struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+ struct dpu_crtc_mixer mixers[CRTC_QUAD_MIXERS];
u32 num_ctls;
- struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
+ struct dpu_hw_ctl *hw_ctls[CRTC_QUAD_MIXERS];
enum dpu_crtc_crc_source crc_source;
int crc_frame_skip_count;
@@ -267,4 +267,6 @@ static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event);
+unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state);
+
#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 258edaa18fc0..d1cfe81a3373 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -55,7 +55,8 @@
#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
(MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
-#define MAX_CHANNELS_PER_ENC 2
+#define MAX_CHANNELS_PER_ENC 4
+#define MAX_CWB_PER_ENC 2
#define IDLE_SHORT_TIMEOUT 1
@@ -182,7 +183,7 @@ struct dpu_encoder_virt {
struct dpu_encoder_phys *cur_master;
struct dpu_encoder_phys *cur_slave;
struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
- struct dpu_hw_cwb *hw_cwb[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_cwb *hw_cwb[MAX_CWB_PER_ENC];
struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
unsigned int dsc_mask;
@@ -660,7 +661,6 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
struct msm_drm_private *priv = dpu_enc->base.dev->dev_private;
struct msm_display_info *disp_info = &dpu_enc->disp_info;
- struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_framebuffer *fb;
@@ -674,22 +674,12 @@ void dpu_encoder_update_topology(struct drm_encoder *drm_enc,
dsc = dpu_encoder_get_dsc_config(drm_enc);
- /* We only support 2 DSC mode (with 2 LM and 1 INTF) */
- if (dsc) {
- /*
- * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces
- * when Display Stream Compression (DSC) is enabled,
- * and when enough DSC blocks are available.
- * This is power-optimal and can drive up to (including) 4k
- * screens.
- */
- WARN(topology->num_intf > 2,
- "DSC topology cannot support more than 2 interfaces\n");
- if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2)
- topology->num_dsc = 2;
- else
- topology->num_dsc = 1;
- }
+ /*
+ * Set DSC number as 1 to mark the enabled status, will be adjusted
+ * in dpu_crtc_get_topology()
+ */
+ if (dsc)
+ topology->num_dsc = 1;
connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
if (!connector)
@@ -1160,7 +1150,7 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_cwb[MAX_CHANNELS_PER_ENC];
- int num_ctl, num_pp, num_dsc;
+ int num_ctl, num_pp, num_dsc, num_pp_per_intf;
int num_cwb = 0;
bool is_cwb_encoder;
unsigned int dsc_mask = 0;
@@ -1239,10 +1229,16 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
}
+ /*
+ * There may be 4 PP and 2 INTF for quad pipe case, so INTF is not
+ * mapped to PP 1:1. Let's calculate the stride with pipe/INTF
+ */
+ num_pp_per_intf = num_pp / dpu_enc->num_phys_encs;
+
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
- phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->hw_pp = dpu_enc->hw_pp[num_pp_per_intf * i];
if (!phys->hw_pp) {
DPU_ERROR_ENC(dpu_enc,
"no pp block assigned at idx: %d\n", i);
@@ -2171,15 +2167,12 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
{
- struct dpu_hw_mixer_cfg mixer;
int i, num_lm;
struct dpu_global_state *global_state;
- struct dpu_hw_blk *hw_lm[2];
- struct dpu_hw_mixer *hw_mixer[2];
+ struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
+ struct dpu_hw_mixer *hw_mixer[MAX_CHANNELS_PER_ENC];
struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
- memset(&mixer, 0, sizeof(mixer));
-
/* reset all mixers for this encoder */
if (ctl->ops.clear_all_blendstages)
ctl->ops.clear_all_blendstages(ctl);
@@ -2383,7 +2376,7 @@ void dpu_encoder_helper_phys_setup_cwb(struct dpu_encoder_phys *phys_enc,
*/
cwb_cfg.input = INPUT_MODE_LM_OUT;
- for (int i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ for (int i = 0; i < MAX_CWB_PER_ENC; i++) {
hw_cwb = dpu_enc->hw_cwb[i];
if (!hw_cwb)
continue;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index 61b22d949454..09395d7910ac 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -302,7 +302,7 @@ static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
/* Use merge_3d unless DSC MERGE topology is used */
if (phys_enc->split_role == ENC_ROLE_SOLO &&
- dpu_cstate->num_mixers == CRTC_DUAL_MIXERS &&
+ (dpu_cstate->num_mixers != 1) &&
!dpu_encoder_use_dsc_merge(phys_enc->parent))
return BLEND_3D_H_ROW_INT;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 56a5b596554d..46f348972a97 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -446,7 +446,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
static int dpu_encoder_phys_wb_wait_for_commit_done(
struct dpu_encoder_phys *phys_enc)
{
- unsigned long ret;
+ int ret;
struct dpu_encoder_wait_info wait_info;
struct dpu_encoder_phys_wb *wb_enc = to_dpu_encoder_phys_wb(phys_enc);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index e824cd64fd3f..23bb39b471b7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x200, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
.base = 0x320, .len = 0x100,}, \
- .format_list = plane_formats_yuv, \
- .num_formats = ARRAY_SIZE(plane_formats_yuv), \
+ .format_list = plane_formats, \
+ .num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}
@@ -338,7 +338,6 @@ static const struct dpu_sspp_sub_blks dpu_dma_sblk = _DMA_SBLK();
*************************************************************/
static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x50, 0x80, 0xb0, 0x230,
@@ -347,7 +346,6 @@ static const struct dpu_lm_sub_blks msm8998_lm_sblk = {
};
static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
@@ -356,7 +354,6 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
};
static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 7, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68, 0x80, 0x98, 0xb0
@@ -364,7 +361,6 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
};
static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
- .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.maxblendstages = 11, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
/* 0x40 + n*0x30 */
@@ -374,7 +370,6 @@ static const struct dpu_lm_sub_blks sm8750_lm_sblk = {
};
static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
- .maxwidth = DEFAULT_DPU_LINE_WIDTH,
.maxblendstages = 4, /* excluding base layer */
.blendstage_base = { /* offsets relative to mixer base */
0x20, 0x38, 0x50, 0x68
@@ -731,3 +726,4 @@ static const struct dpu_qos_lut_entry sc7180_qos_nrt[] = {
#include "catalog/dpu_10_0_sm8650.h"
#include "catalog/dpu_12_0_sm8750.h"
+#include "catalog/dpu_12_2_glymur.h"
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index a78bb2c334e3..336757103b5a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -24,7 +24,7 @@
#define DPU_MAX_IMG_WIDTH 0x3fff
#define DPU_MAX_IMG_HEIGHT 0x3fff
-#define CRTC_DUAL_MIXERS 2
+#define CRTC_QUAD_MIXERS 4
#define MAX_XIN_COUNT 16
@@ -307,7 +307,6 @@ struct dpu_sspp_sub_blks {
* @blendstage_base: Blend-stage register base offset
*/
struct dpu_lm_sub_blks {
- u32 maxwidth;
u32 maxblendstages;
u32 blendstage_base[MAX_BLOCKS];
};
@@ -750,6 +749,7 @@ struct dpu_mdss_cfg {
const struct dpu_format_extended *vig_formats;
};
+extern const struct dpu_mdss_cfg dpu_glymur_cfg;
extern const struct dpu_mdss_cfg dpu_msm8917_cfg;
extern const struct dpu_mdss_cfg dpu_msm8937_cfg;
extern const struct dpu_mdss_cfg dpu_msm8953_cfg;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index b7013c9822d2..cc7cc6f6f7cd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -71,12 +71,6 @@ struct dpu_hw_dsc *dpu_hw_dsc_init_1_2(struct drm_device *dev,
const struct dpu_dsc_cfg *cfg,
void __iomem *addr);
-/**
- * dpu_hw_dsc_destroy - destroys dsc driver context
- * @dsc: Pointer to dsc driver context returned by dpu_hw_dsc_init
- */
-void dpu_hw_dsc_destroy(struct dpu_hw_dsc *dsc);
-
static inline struct dpu_hw_dsc *to_dpu_hw_dsc(struct dpu_hw_blk *hw)
{
return container_of(hw, struct dpu_hw_dsc, base);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 175639c8bfbb..31451241f083 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -34,7 +34,9 @@
#define DPU_MAX_PLANES 4
#endif
+#define STAGES_PER_PLANE 2
#define PIPES_PER_STAGE 2
+#define PIPES_PER_PLANE (PIPES_PER_STAGE * STAGES_PER_PLANE)
#ifndef DPU_MAX_DE_CURVES
#define DPU_MAX_DE_CURVES 3
#endif
@@ -149,6 +151,10 @@ enum dpu_dspp {
DSPP_1,
DSPP_2,
DSPP_3,
+ DSPP_4,
+ DSPP_5,
+ DSPP_6,
+ DSPP_7,
DSPP_MAX
};
@@ -159,6 +165,8 @@ enum dpu_ctl {
CTL_3,
CTL_4,
CTL_5,
+ CTL_6,
+ CTL_7,
CTL_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index a306077647c3..f4c9767c418d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1110,7 +1110,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
struct drm_gpuvm *vm;
- vm = msm_kms_init_vm(dpu_kms->dev);
+ vm = msm_kms_init_vm(dpu_kms->dev, dpu_kms->dev->dev->parent);
if (IS_ERR(vm))
return PTR_ERR(vm);
@@ -1505,6 +1505,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
};
static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,glymur-dpu", .data = &dpu_glymur_cfg, },
{ .compatible = "qcom,msm8917-mdp5", .data = &dpu_msm8917_cfg, },
{ .compatible = "qcom,msm8937-mdp5", .data = &dpu_msm8937_cfg, },
{ .compatible = "qcom,msm8953-mdp5", .data = &dpu_msm8953_cfg, },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 6859e8ef6b05..d07a6ab6e7ee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
int i;
for (i = 0; i < DPU_MAX_PLANES; i++) {
+ uint32_t w = src_w, h = src_h;
+
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
- src_w /= chroma_subsmpl_h;
- src_h /= chroma_subsmpl_v;
+ w /= chroma_subsmpl_h;
+ h /= chroma_subsmpl_v;
}
- pixel_ext->num_ext_pxls_top[i] = src_h;
- pixel_ext->num_ext_pxls_left[i] = src_w;
+ pixel_ext->num_ext_pxls_top[i] = h;
+ pixel_ext->num_ext_pxls_left[i] = w;
}
}
@@ -620,6 +622,7 @@ static void _dpu_plane_color_fill(struct dpu_plane *pdpu,
struct msm_drm_private *priv = plane->dev->dev_private;
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
u32 fill_color = (color & 0xFFFFFF) | ((alpha & 0xFF) << 24);
+ int i;
DPU_DEBUG_PLANE(pdpu, "\n");
@@ -633,12 +636,13 @@ static void _dpu_plane_color_fill(struct dpu_plane *pdpu,
return;
/* update sspp */
- _dpu_plane_color_fill_pipe(pstate, &pstate->pipe, &pstate->pipe_cfg.dst_rect,
- fill_color, fmt);
-
- if (pstate->r_pipe.sspp)
- _dpu_plane_color_fill_pipe(pstate, &pstate->r_pipe, &pstate->r_pipe_cfg.dst_rect,
+ for (i = 0; i < PIPES_PER_PLANE; i++) {
+ if (!pstate->pipe[i].sspp)
+ continue;
+ _dpu_plane_color_fill_pipe(pstate, &pstate->pipe[i],
+ &pstate->pipe_cfg[i].dst_rect,
fill_color, fmt);
+ }
}
static int dpu_plane_prepare_fb(struct drm_plane *plane,
@@ -740,7 +744,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
* We already have verified scaling against platform limitations.
* Now check if the SSPP supports scaling at all.
*/
- if (!sblk->scaler_blk.len &&
+ if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&
((drm_rect_width(&new_plane_state->src) >> 16 !=
drm_rect_width(&new_plane_state->dst)) ||
(drm_rect_height(&new_plane_state->src) >> 16 !=
@@ -820,10 +824,14 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
u64 max_mdp_clk_rate = kms->perf.max_core_clk_rate;
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
- struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
- struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ struct dpu_sw_pipe_cfg *pipe_cfg;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg;
+ struct dpu_sw_pipe_cfg init_pipe_cfg;
struct drm_rect fb_rect = { 0 };
+ const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
uint32_t max_linewidth;
+ u32 num_lm;
+ int stage_id, num_stages;
min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO);
max_scale = MAX_DOWNSCALE_RATIO << 16;
@@ -846,10 +854,10 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
return -EINVAL;
}
- /* state->src is 16.16, src_rect is not */
- drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src);
+ num_lm = dpu_crtc_get_num_lm(crtc_state);
- pipe_cfg->dst_rect = new_plane_state->dst;
+ /* state->src is 16.16, src_rect is not */
+ drm_rect_fp_to_int(&init_pipe_cfg.src_rect, &new_plane_state->src);
fb_rect.x2 = new_plane_state->fb->width;
fb_rect.y2 = new_plane_state->fb->height;
@@ -874,35 +882,94 @@ static int dpu_plane_atomic_check_nosspp(struct drm_plane *plane,
max_linewidth = pdpu->catalog->caps->max_linewidth;
- drm_rect_rotate(&pipe_cfg->src_rect,
+ drm_rect_rotate(&init_pipe_cfg.src_rect,
new_plane_state->fb->width, new_plane_state->fb->height,
new_plane_state->rotation);
- if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
- _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
- if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
- DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
- DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
- return -E2BIG;
+ /*
+ * We have 1 mixer pair cfg for 1:1:1 and 2:2:1 topology, 2 mixer pair
+ * configs for left and right half screen in case of 4:4:2 topology.
+ * But we may have 2 rect to split wide plane that exceeds limit with 1
+ * config for 2:2:1. So need to handle both wide plane splitting, and
+ * two halves of screen splitting for quad-pipe case. Check dest
+ * rectangle left/right clipping first, then check wide rectangle
+ * splitting in every half next.
+ */
+ num_stages = (num_lm + 1) / 2;
+ /* iterate mixer configs for this plane, to separate left/right with the id */
+ for (stage_id = 0; stage_id < num_stages; stage_id++) {
+ struct drm_rect mixer_rect = {
+ .x1 = stage_id * mode->hdisplay / num_stages,
+ .y1 = 0,
+ .x2 = (stage_id + 1) * mode->hdisplay / num_stages,
+ .y2 = mode->vdisplay
+ };
+ int cfg_idx = stage_id * PIPES_PER_STAGE;
+
+ pipe_cfg = &pstate->pipe_cfg[cfg_idx];
+ r_pipe_cfg = &pstate->pipe_cfg[cfg_idx + 1];
+
+ drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src);
+ pipe_cfg->dst_rect = new_plane_state->dst;
+
+ DPU_DEBUG_PLANE(pdpu, "checking src " DRM_RECT_FMT
+ " vs clip window " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect),
+ DRM_RECT_ARG(&mixer_rect));
+
+ /*
+ * If this plane does not fall into mixer rect, check next
+ * mixer rect.
+ */
+ if (!drm_rect_clip_scaled(&pipe_cfg->src_rect,
+ &pipe_cfg->dst_rect,
+ &mixer_rect)) {
+ memset(pipe_cfg, 0, 2 * sizeof(struct dpu_sw_pipe_cfg));
+
+ continue;
}
- *r_pipe_cfg = *pipe_cfg;
- pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
- pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
- r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
- r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
- } else {
- memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg));
- }
+ pipe_cfg->dst_rect.x1 -= mixer_rect.x1;
+ pipe_cfg->dst_rect.x2 -= mixer_rect.x1;
+
+ DPU_DEBUG_PLANE(pdpu, "Got clip src:" DRM_RECT_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), DRM_RECT_ARG(&pipe_cfg->dst_rect));
+
+ /* Split wide rect into 2 rect */
+ if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
+ _dpu_plane_calc_clk(mode, pipe_cfg) > max_mdp_clk_rate) {
+
+ if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) {
+ DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth);
+ return -E2BIG;
+ }
+
+ memcpy(r_pipe_cfg, pipe_cfg, sizeof(struct dpu_sw_pipe_cfg));
+ pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1;
+ pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1;
+ r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2;
+ r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
+ DPU_DEBUG_PLANE(pdpu, "Split wide plane into:"
+ DRM_RECT_FMT " and " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&pipe_cfg->src_rect),
+ DRM_RECT_ARG(&r_pipe_cfg->src_rect));
+ } else {
+ memset(r_pipe_cfg, 0, sizeof(struct dpu_sw_pipe_cfg));
+ }
- drm_rect_rotate_inv(&pipe_cfg->src_rect,
- new_plane_state->fb->width, new_plane_state->fb->height,
- new_plane_state->rotation);
- if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
- drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
- new_plane_state->fb->width, new_plane_state->fb->height,
+ drm_rect_rotate_inv(&pipe_cfg->src_rect,
+ new_plane_state->fb->width,
+ new_plane_state->fb->height,
new_plane_state->rotation);
+ if (drm_rect_width(&r_pipe_cfg->src_rect) != 0)
+ drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
+ new_plane_state->fb->width,
+ new_plane_state->fb->height,
+ new_plane_state->rotation);
+ }
+
pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state);
return 0;
@@ -922,6 +989,9 @@ static int dpu_plane_is_multirect_capable(struct dpu_hw_sspp *sspp,
if (MSM_FORMAT_IS_YUV(fmt))
return false;
+ if (!sspp)
+ return true;
+
if (!test_bit(DPU_SSPP_SMART_DMA_V1, &sspp->cap->features) &&
!test_bit(DPU_SSPP_SMART_DMA_V2, &sspp->cap->features))
return false;
@@ -949,6 +1019,23 @@ static int dpu_plane_is_multirect_parallel_capable(struct dpu_hw_sspp *sspp,
dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth);
}
+static bool dpu_plane_get_single_pipe_in_stage(struct dpu_plane_state *pstate,
+ struct dpu_sw_pipe **single_pipe,
+ struct dpu_sw_pipe_cfg **single_pipe_cfg,
+ int stage_index)
+{
+ int pipe_idx;
+
+ pipe_idx = stage_index * PIPES_PER_STAGE;
+ if (drm_rect_width(&pstate->pipe_cfg[pipe_idx].src_rect) != 0 &&
+ drm_rect_width(&pstate->pipe_cfg[pipe_idx + 1].src_rect) == 0) {
+ *single_pipe = &pstate->pipe[pipe_idx];
+ *single_pipe_cfg = &pstate->pipe_cfg[pipe_idx];
+ return true;
+ }
+
+ return false;
+}
static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
struct drm_atomic_state *state,
@@ -958,20 +1045,17 @@ static int dpu_plane_atomic_check_sspp(struct drm_plane *plane,
drm_atomic_get_new_plane_state(state, plane);
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
- struct dpu_sw_pipe *pipe = &pstate->pipe;
- struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
- struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
- int ret = 0;
-
- ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
- &crtc_state->adjusted_mode,
- new_plane_state);
- if (ret)
- return ret;
+ struct dpu_sw_pipe *pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg;
+ int ret = 0, i;
- if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) {
- ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg,
+ for (i = 0; i < PIPES_PER_PLANE; i++) {
+ pipe = &pstate->pipe[i];
+ pipe_cfg = &pstate->pipe_cfg[i];
+ if (!drm_rect_width(&pipe_cfg->src_rect))
+ continue;
+ DPU_DEBUG_PLANE(pdpu, "pipe %d is in use, validate it\n", i);
+ ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg,
&crtc_state->adjusted_mode,
new_plane_state);
if (ret)
@@ -1014,20 +1098,24 @@ static bool dpu_plane_try_multirect_parallel(struct dpu_sw_pipe *pipe, struct dp
static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
struct dpu_plane_state *prev_adjacent_pstate,
const struct msm_format *fmt,
- uint32_t max_linewidth)
+ uint32_t max_linewidth, int stage_index)
{
- struct dpu_sw_pipe *pipe = &pstate->pipe;
- struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
- struct dpu_sw_pipe *prev_pipe = &prev_adjacent_pstate->pipe;
- struct dpu_sw_pipe_cfg *prev_pipe_cfg = &prev_adjacent_pstate->pipe_cfg;
+ struct dpu_sw_pipe *pipe, *prev_pipe;
+ struct dpu_sw_pipe_cfg *pipe_cfg, *prev_pipe_cfg;
const struct msm_format *prev_fmt = msm_framebuffer_format(prev_adjacent_pstate->base.fb);
u16 max_tile_height = 1;
- if (prev_adjacent_pstate->r_pipe.sspp != NULL ||
+ if (!dpu_plane_get_single_pipe_in_stage(pstate, &pipe,
+ &pipe_cfg, stage_index))
+ return false;
+
+ if (!dpu_plane_get_single_pipe_in_stage(prev_adjacent_pstate,
+ &prev_pipe, &prev_pipe_cfg,
+ stage_index) ||
prev_pipe->multirect_mode != DPU_SSPP_MULTIRECT_NONE)
return false;
+ /* Do not validate SSPP of current plane when it is not ready */
if (!dpu_plane_is_multirect_capable(pipe->sspp, pipe_cfg, fmt) ||
!dpu_plane_is_multirect_capable(prev_pipe->sspp, prev_pipe_cfg, prev_fmt))
return false;
@@ -1038,11 +1126,6 @@ static int dpu_plane_try_multirect_shared(struct dpu_plane_state *pstate,
if (MSM_FORMAT_IS_UBWC(prev_fmt))
max_tile_height = max(max_tile_height, prev_fmt->tile_height);
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
-
- r_pipe->sspp = NULL;
-
if (dpu_plane_is_parallel_capable(pipe_cfg, fmt, max_linewidth) &&
dpu_plane_is_parallel_capable(prev_pipe_cfg, prev_fmt, max_linewidth) &&
(pipe_cfg->dst_rect.x1 >= prev_pipe_cfg->dst_rect.x2 ||
@@ -1083,10 +1166,10 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
- struct dpu_sw_pipe *pipe = &pstate->pipe;
- struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
- struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ struct dpu_sw_pipe *pipe = &pstate->pipe[0];
+ struct dpu_sw_pipe *r_pipe = &pstate->pipe[1];
+ struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg[0];
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->pipe_cfg[1];
const struct drm_crtc_state *crtc_state = NULL;
uint32_t max_linewidth = dpu_kms->catalog->caps->max_linewidth;
@@ -1130,7 +1213,7 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
drm_atomic_get_old_plane_state(state, plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(plane_state);
struct drm_crtc_state *crtc_state = NULL;
- int ret;
+ int ret, i;
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
@@ -1148,8 +1231,8 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
* resources are freed by dpu_crtc_assign_plane_resources(),
* but clean them here.
*/
- pstate->pipe.sspp = NULL;
- pstate->r_pipe.sspp = NULL;
+ for (i = 0; i < PIPES_PER_PLANE; i++)
+ pstate->pipe[i].sspp = NULL;
return 0;
}
@@ -1171,37 +1254,72 @@ static int dpu_plane_virtual_atomic_check(struct drm_plane *plane,
return 0;
}
+static int dpu_plane_assign_resource_in_stage(struct dpu_sw_pipe *pipe,
+ struct dpu_sw_pipe_cfg *pipe_cfg,
+ struct drm_plane_state *plane_state,
+ struct dpu_global_state *global_state,
+ struct drm_crtc *crtc,
+ struct dpu_rm_sspp_requirements *reqs)
+{
+ struct drm_plane *plane = plane_state->plane;
+ struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ struct dpu_sw_pipe *r_pipe = pipe + 1;
+ struct dpu_sw_pipe_cfg *r_pipe_cfg = pipe_cfg + 1;
+
+ if (drm_rect_width(&pipe_cfg->src_rect) == 0)
+ return 0;
+
+ pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, reqs);
+ if (!pipe->sspp)
+ return -ENODEV;
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ if (drm_rect_width(&r_pipe_cfg->src_rect) == 0)
+ return 0;
+
+ if (dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
+ pipe->sspp,
+ msm_framebuffer_format(plane_state->fb),
+ dpu_kms->catalog->caps->max_linewidth))
+ return 0;
+
+ r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, reqs);
+ if (!r_pipe->sspp)
+ return -ENODEV;
+ r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+
+ return 0;
+}
+
static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
struct dpu_global_state *global_state,
struct drm_atomic_state *state,
struct drm_plane_state *plane_state,
- struct drm_plane_state *prev_adjacent_plane_state)
+ struct drm_plane_state **prev_adjacent_plane_state)
{
const struct drm_crtc_state *crtc_state = NULL;
struct drm_plane *plane = plane_state->plane;
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
struct dpu_rm_sspp_requirements reqs;
- struct dpu_plane_state *pstate, *prev_adjacent_pstate;
+ struct dpu_plane_state *pstate, *prev_adjacent_pstate[STAGES_PER_PLANE];
struct dpu_sw_pipe *pipe;
- struct dpu_sw_pipe *r_pipe;
struct dpu_sw_pipe_cfg *pipe_cfg;
- struct dpu_sw_pipe_cfg *r_pipe_cfg;
const struct msm_format *fmt;
+ int i, ret;
if (plane_state->crtc)
crtc_state = drm_atomic_get_new_crtc_state(state,
plane_state->crtc);
pstate = to_dpu_plane_state(plane_state);
- prev_adjacent_pstate = prev_adjacent_plane_state ?
- to_dpu_plane_state(prev_adjacent_plane_state) : NULL;
- pipe = &pstate->pipe;
- r_pipe = &pstate->r_pipe;
- pipe_cfg = &pstate->pipe_cfg;
- r_pipe_cfg = &pstate->r_pipe_cfg;
-
- pipe->sspp = NULL;
- r_pipe->sspp = NULL;
+ for (i = 0; i < STAGES_PER_PLANE; i++)
+ prev_adjacent_pstate[i] = prev_adjacent_plane_state[i] ?
+ to_dpu_plane_state(prev_adjacent_plane_state[i]) : NULL;
+
+ for (i = 0; i < PIPES_PER_PLANE; i++)
+ pstate->pipe[i].sspp = NULL;
if (!plane_state->fb)
return -EINVAL;
@@ -1213,42 +1331,24 @@ static int dpu_plane_virtual_assign_resources(struct drm_crtc *crtc,
reqs.rot90 = drm_rotation_90_or_270(plane_state->rotation);
- if (drm_rect_width(&r_pipe_cfg->src_rect) == 0) {
- if (!prev_adjacent_pstate ||
- !dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate, fmt,
- dpu_kms->catalog->caps->max_linewidth)) {
- pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!pipe->sspp)
- return -ENODEV;
-
- r_pipe->sspp = NULL;
+ for (i = 0; i < STAGES_PER_PLANE; i++) {
+ if (prev_adjacent_pstate[i] &&
+ dpu_plane_try_multirect_shared(pstate, prev_adjacent_pstate[i], fmt,
+ dpu_kms->catalog->caps->max_linewidth,
+ i))
+ continue;
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (dpu_plane_get_single_pipe_in_stage(pstate, &pipe, &pipe_cfg, i))
+ prev_adjacent_plane_state[i] = plane_state;
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- }
- } else {
- pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!pipe->sspp)
- return -ENODEV;
-
- if (!dpu_plane_try_multirect_parallel(pipe, pipe_cfg, r_pipe, r_pipe_cfg,
- pipe->sspp,
- msm_framebuffer_format(plane_state->fb),
- dpu_kms->catalog->caps->max_linewidth)) {
- /* multirect is not possible, use two SSPP blocks */
- r_pipe->sspp = dpu_rm_reserve_sspp(&dpu_kms->rm, global_state, crtc, &reqs);
- if (!r_pipe->sspp)
- return -ENODEV;
-
- pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
-
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
- }
+ pipe = &pstate->pipe[i * PIPES_PER_STAGE];
+ pipe_cfg = &pstate->pipe_cfg[i * PIPES_PER_STAGE];
+ ret = dpu_plane_assign_resource_in_stage(pipe, pipe_cfg,
+ plane_state,
+ global_state,
+ crtc, &reqs);
+ if (ret)
+ return ret;
}
return dpu_plane_atomic_check_sspp(plane, state, crtc_state);
@@ -1261,7 +1361,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
unsigned int num_planes)
{
unsigned int i;
- struct drm_plane_state *prev_adjacent_plane_state = NULL;
+ struct drm_plane_state *prev_adjacent_plane_state[STAGES_PER_PLANE] = { NULL };
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
@@ -1274,9 +1374,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
state, plane_state,
prev_adjacent_plane_state);
if (ret)
- break;
-
- prev_adjacent_plane_state = plane_state;
+ return ret;
}
return 0;
@@ -1312,6 +1410,7 @@ void dpu_plane_flush(struct drm_plane *plane)
{
struct dpu_plane *pdpu;
struct dpu_plane_state *pstate;
+ int i;
if (!plane || !plane->state) {
DPU_ERROR("invalid plane\n");
@@ -1332,8 +1431,8 @@ void dpu_plane_flush(struct drm_plane *plane)
/* force 100% alpha */
_dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
else {
- dpu_plane_flush_csc(pdpu, &pstate->pipe);
- dpu_plane_flush_csc(pdpu, &pstate->r_pipe);
+ for (i = 0; i < PIPES_PER_PLANE; i++)
+ dpu_plane_flush_csc(pdpu, &pstate->pipe[i]);
}
/* flag h/w flush complete */
@@ -1434,15 +1533,12 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane,
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
- struct dpu_sw_pipe *pipe = &pstate->pipe;
- struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
bool is_rt_pipe;
const struct msm_format *fmt =
msm_framebuffer_format(fb);
- struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
- struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ int i;
pstate->pending = true;
@@ -1457,12 +1553,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane,
crtc->base.id, DRM_RECT_ARG(&state->dst),
&fmt->pixel_format, MSM_FORMAT_IS_UBWC(fmt));
- dpu_plane_sspp_update_pipe(plane, pipe, pipe_cfg, fmt,
- drm_mode_vrefresh(&crtc->mode),
- &pstate->layout);
-
- if (r_pipe->sspp) {
- dpu_plane_sspp_update_pipe(plane, r_pipe, r_pipe_cfg, fmt,
+ for (i = 0; i < PIPES_PER_PLANE; i++) {
+ if (!drm_rect_width(&pstate->pipe_cfg[i].src_rect))
+ continue;
+ dpu_plane_sspp_update_pipe(plane, &pstate->pipe[i],
+ &pstate->pipe_cfg[i], fmt,
drm_mode_vrefresh(&crtc->mode),
&pstate->layout);
}
@@ -1470,15 +1565,17 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane,
if (pstate->needs_qos_remap)
pstate->needs_qos_remap = false;
- pstate->plane_fetch_bw = _dpu_plane_calc_bw(pdpu->catalog, fmt,
- &crtc->mode, pipe_cfg);
-
- pstate->plane_clk = _dpu_plane_calc_clk(&crtc->mode, pipe_cfg);
-
- if (r_pipe->sspp) {
- pstate->plane_fetch_bw += _dpu_plane_calc_bw(pdpu->catalog, fmt, &crtc->mode, r_pipe_cfg);
+ pstate->plane_fetch_bw = 0;
+ pstate->plane_clk = 0;
+ for (i = 0; i < PIPES_PER_PLANE; i++) {
+ if (!drm_rect_width(&pstate->pipe_cfg[i].src_rect))
+ continue;
+ pstate->plane_fetch_bw += _dpu_plane_calc_bw(pdpu->catalog, fmt,
+ &crtc->mode, &pstate->pipe_cfg[i]);
- pstate->plane_clk = max(pstate->plane_clk, _dpu_plane_calc_clk(&crtc->mode, r_pipe_cfg));
+ pstate->plane_clk = max(pstate->plane_clk,
+ _dpu_plane_calc_clk(&crtc->mode,
+ &pstate->pipe_cfg[i]));
}
}
@@ -1486,17 +1583,28 @@ static void _dpu_plane_atomic_disable(struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
- struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
+ struct dpu_sw_pipe *pipe;
+ int i;
- trace_dpu_plane_disable(DRMID(plane), false,
- pstate->pipe.multirect_mode);
+ for (i = 0; i < PIPES_PER_PLANE; i += 1) {
+ pipe = &pstate->pipe[i];
+ if (!pipe->sspp)
+ continue;
+
+ trace_dpu_plane_disable(DRMID(plane), false,
+ pstate->pipe[i].multirect_mode);
- if (r_pipe->sspp) {
- r_pipe->multirect_index = DPU_SSPP_RECT_SOLO;
- r_pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (i % PIPES_PER_STAGE == 0)
+ continue;
- if (r_pipe->sspp->ops.setup_multirect)
- r_pipe->sspp->ops.setup_multirect(r_pipe);
+ /*
+ * clear multirect for the right pipe so that the SSPP
+ * can be further reused in the solo mode
+ */
+ pipe->multirect_index = DPU_SSPP_RECT_SOLO;
+ pipe->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+ if (pipe->sspp->ops.setup_multirect)
+ pipe->sspp->ops.setup_multirect(pipe);
}
pstate->pending = true;
@@ -1591,31 +1699,26 @@ static void dpu_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
const struct dpu_plane_state *pstate = to_dpu_plane_state(state);
- const struct dpu_sw_pipe *pipe = &pstate->pipe;
- const struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg;
- const struct dpu_sw_pipe *r_pipe = &pstate->r_pipe;
- const struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->r_pipe_cfg;
+ const struct dpu_sw_pipe *pipe;
+ const struct dpu_sw_pipe_cfg *pipe_cfg;
+ int i;
drm_printf(p, "\tstage=%d\n", pstate->stage);
- if (pipe->sspp) {
- drm_printf(p, "\tsspp[0]=%s\n", pipe->sspp->cap->name);
- drm_printf(p, "\tmultirect_mode[0]=%s\n",
+ for (i = 0; i < PIPES_PER_PLANE; i++) {
+ pipe = &pstate->pipe[i];
+ if (!pipe->sspp)
+ continue;
+ pipe_cfg = &pstate->pipe_cfg[i];
+ drm_printf(p, "\tsspp[%d]=%s\n", i, pipe->sspp->cap->name);
+ drm_printf(p, "\tmultirect_mode[%d]=%s\n", i,
dpu_get_multirect_mode(pipe->multirect_mode));
- drm_printf(p, "\tmultirect_index[0]=%s\n",
+ drm_printf(p, "\tmultirect_index[%d]=%s\n", i,
dpu_get_multirect_index(pipe->multirect_index));
- drm_printf(p, "\tsrc[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->src_rect));
- drm_printf(p, "\tdst[0]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&pipe_cfg->dst_rect));
- }
-
- if (r_pipe->sspp) {
- drm_printf(p, "\tsspp[1]=%s\n", r_pipe->sspp->cap->name);
- drm_printf(p, "\tmultirect_mode[1]=%s\n",
- dpu_get_multirect_mode(r_pipe->multirect_mode));
- drm_printf(p, "\tmultirect_index[1]=%s\n",
- dpu_get_multirect_index(r_pipe->multirect_index));
- drm_printf(p, "\tsrc[1]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&r_pipe_cfg->src_rect));
- drm_printf(p, "\tdst[1]=" DRM_RECT_FMT "\n", DRM_RECT_ARG(&r_pipe_cfg->dst_rect));
+ drm_printf(p, "\tsrc[%d]=" DRM_RECT_FMT "\n", i,
+ DRM_RECT_ARG(&pipe_cfg->src_rect));
+ drm_printf(p, "\tdst[%d]=" DRM_RECT_FMT "\n", i,
+ DRM_RECT_ARG(&pipe_cfg->dst_rect));
}
}
@@ -1653,14 +1756,17 @@ void dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+ int i;
if (!pdpu->is_rt_pipe)
return;
pm_runtime_get_sync(&dpu_kms->pdev->dev);
- _dpu_plane_set_qos_ctrl(plane, &pstate->pipe, enable);
- if (pstate->r_pipe.sspp)
- _dpu_plane_set_qos_ctrl(plane, &pstate->r_pipe, enable);
+ for (i = 0; i < PIPES_PER_PLANE; i++) {
+ if (!pstate->pipe[i].sspp)
+ continue;
+ _dpu_plane_set_qos_ctrl(plane, &pstate->pipe[i], enable);
+ }
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index a3a6e9028333..1ef5a041b8ac 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -17,10 +17,8 @@
/**
* struct dpu_plane_state: Define dpu extension of drm plane state object
* @base: base drm plane state object
- * @pipe: software pipe description
- * @r_pipe: software pipe description of the second pipe
- * @pipe_cfg: software pipe configuration
- * @r_pipe_cfg: software pipe configuration for the second pipe
+ * @pipe: software pipe description array
+ * @pipe_cfg: software pipe configuration array
* @stage: assigned by crtc blender
* @needs_qos_remap: qos remap settings need to be updated
* @multirect_index: index of the rectangle of SSPP
@@ -33,10 +31,8 @@
*/
struct dpu_plane_state {
struct drm_plane_state base;
- struct dpu_sw_pipe pipe;
- struct dpu_sw_pipe r_pipe;
- struct dpu_sw_pipe_cfg pipe_cfg;
- struct dpu_sw_pipe_cfg r_pipe_cfg;
+ struct dpu_sw_pipe pipe[PIPES_PER_PLANE];
+ struct dpu_sw_pipe_cfg pipe_cfg[PIPES_PER_PLANE];
enum dpu_stage stage;
bool needs_qos_remap;
bool pending;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 25382120cb1a..f6568ed8375f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -374,7 +374,11 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!rm->mixer_blks[i])
continue;
- lm_count = 0;
+ /*
+ * Reset lm_count to an even index. This will drop the previous
+ * primary mixer if failed to find its peer.
+ */
+ lm_count &= ~1;
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
@@ -842,7 +846,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
if (!reqs->scale && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
- if (!hw_sspp && reqs->scale)
+ if (!hw_sspp && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
if (!hw_sspp)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);
@@ -865,6 +869,21 @@ void dpu_rm_release_all_sspp(struct dpu_global_state *global_state,
ARRAY_SIZE(global_state->sspp_to_crtc_id), crtc_id);
}
+static char *dpu_hw_blk_type_name[] = {
+ [DPU_HW_BLK_TOP] = "TOP",
+ [DPU_HW_BLK_SSPP] = "SSPP",
+ [DPU_HW_BLK_LM] = "LM",
+ [DPU_HW_BLK_CTL] = "CTL",
+ [DPU_HW_BLK_PINGPONG] = "pingpong",
+ [DPU_HW_BLK_INTF] = "INTF",
+ [DPU_HW_BLK_WB] = "WB",
+ [DPU_HW_BLK_DSPP] = "DSPP",
+ [DPU_HW_BLK_MERGE_3D] = "merge_3d",
+ [DPU_HW_BLK_DSC] = "DSC",
+ [DPU_HW_BLK_CDM] = "CDM",
+ [DPU_HW_BLK_MAX] = "unknown",
+};
+
/**
* dpu_rm_get_assigned_resources - Get hw resources of the given type that are
* assigned to this encoder
@@ -946,13 +965,13 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
}
if (num_blks == blks_size) {
- DPU_ERROR("More than %d resources assigned to crtc %d\n",
- blks_size, crtc_id);
+ DPU_ERROR("More than %d %s assigned to crtc %d\n",
+ blks_size, dpu_hw_blk_type_name[type], crtc_id);
break;
}
if (!hw_blks[i]) {
- DPU_ERROR("Allocated resource %d unavailable to assign to crtc %d\n",
- type, crtc_id);
+ DPU_ERROR("%s unavailable to assign to crtc %d\n",
+ dpu_hw_blk_type_name[type], crtc_id);
break;
}
blks[num_blks++] = hw_blks[i];
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index 5307cbc2007c..cb24ad2a6d8d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -651,9 +651,9 @@ TRACE_EVENT(dpu_crtc_setup_mixer,
TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
struct drm_plane_state *state, struct dpu_plane_state *pstate,
uint32_t stage_idx, uint32_t pixel_format,
- uint64_t modifier),
+ struct dpu_sw_pipe *pipe, uint64_t modifier),
TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx,
- pixel_format, modifier),
+ pixel_format, pipe, modifier),
TP_STRUCT__entry(
__field( uint32_t, crtc_id )
__field( uint32_t, plane_id )
@@ -676,9 +676,9 @@ TRACE_EVENT(dpu_crtc_setup_mixer,
__entry->dst_rect = drm_plane_state_dest(state);
__entry->stage_idx = stage_idx;
__entry->stage = pstate->stage;
- __entry->sspp = pstate->pipe.sspp->idx;
- __entry->multirect_idx = pstate->pipe.multirect_index;
- __entry->multirect_mode = pstate->pipe.multirect_mode;
+ __entry->sspp = pipe->sspp->idx;
+ __entry->multirect_idx = pipe->multirect_index;
+ __entry->multirect_mode = pipe->multirect_mode;
__entry->pixel_format = pixel_format;
__entry->modifier = modifier;
),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 8ff496082902..7545c0293efb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
fb->width, dpu_wb_conn->maxlinewidth);
return -EINVAL;
+ } else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+ DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier);
+ return -EINVAL;
}
return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);
@@ -80,7 +83,6 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
static const struct drm_connector_funcs dpu_wb_conn_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -131,12 +133,9 @@ int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
- /* DPU initializes the encoder and sets it up completely for writeback
- * cases and hence should use the new API drm_writeback_connector_init_with_encoder
- * to initialize the writeback connector
- */
- rc = drm_writeback_connector_init_with_encoder(dev, &dpu_wb_conn->base, enc,
- &dpu_wb_conn_funcs, format_list, num_formats);
+ rc = drmm_writeback_connector_init(dev, &dpu_wb_conn->base,
+ &dpu_wb_conn_funcs, enc,
+ format_list, num_formats);
if (!rc)
dpu_wb_conn->wb_enc = enc;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index da53ca88251e..e8066f9fd534 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -527,13 +527,14 @@ static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc);
int ret;
ret = drm_crtc_vblank_get(crtc);
if (ret)
return;
- ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
+ ret = wait_event_timeout(*queue,
!(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
mdp4_crtc->flushed_mask),
msecs_to_jiffies(50));
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 0952c7f18abd..809ca191e9de 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -391,11 +391,9 @@ static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
static int mdp4_kms_init(struct drm_device *dev)
{
- struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms));
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
struct drm_gpuvm *vm;
int ret;
u32 major, minor;
@@ -458,29 +456,14 @@ static int mdp4_kms_init(struct drm_device *dev)
mdp4_disable(mdp4_kms);
mdelay(16);
- mmu = msm_iommu_new(&pdev->dev, 0);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
+ vm = msm_kms_init_vm(mdp4_kms->dev, NULL);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
goto fail;
- } else if (!mmu) {
- DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- vm = NULL;
- } else {
- vm = msm_gem_vm_create(dev, mmu, "mdp4",
- 0x1000, 0x100000000 - 0x1000,
- true);
-
- if (IS_ERR(vm)) {
- if (!IS_ERR(mmu))
- mmu->funcs->destroy(mmu);
- ret = PTR_ERR(vm);
- goto fail;
- }
-
- kms->vm = vm;
}
+ kms->vm = vm;
+
ret = modeset_init(mdp4_kms);
if (ret) {
DRM_DEV_ERROR(dev->dev, "modeset_init failed: %d\n", ret);
@@ -529,7 +512,7 @@ static int mdp4_probe(struct platform_device *pdev)
mdp4_kms = devm_kzalloc(dev, sizeof(*mdp4_kms), GFP_KERNEL);
if (!mdp4_kms)
- return dev_err_probe(dev, -ENOMEM, "failed to allocate kms\n");
+ return -ENOMEM;
mdp4_kms->mmio = msm_ioremap(pdev, NULL);
if (IS_ERR(mdp4_kms->mmio))
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index fb348583dc84..06458d4ee48c 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -202,6 +202,6 @@ static inline struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev)
}
#endif
-struct clk *mpd4_get_lcdc_clock(struct drm_device *dev);
+struct clk *mdp4_get_lcdc_clock(struct drm_device *dev);
#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 06a307c1272d..1051873057f6 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
@@ -375,7 +375,7 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev)
drm_encoder_helper_add(encoder, &mdp4_lcdc_encoder_helper_funcs);
- mdp4_lcdc_encoder->lcdc_clk = mpd4_get_lcdc_clock(dev);
+ mdp4_lcdc_encoder->lcdc_clk = mdp4_get_lcdc_clock(dev);
if (IS_ERR(mdp4_lcdc_encoder->lcdc_clk)) {
DRM_DEV_ERROR(dev->dev, "failed to get lvds_clk\n");
return ERR_CAST(mdp4_lcdc_encoder->lcdc_clk);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index fa2c29470510..04c49bf3d854 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
@@ -54,7 +54,7 @@ static const struct pll_rate *find_rate(unsigned long rate)
return &freqtbl[i-1];
}
-static int mpd4_lvds_pll_enable(struct clk_hw *hw)
+static int mdp4_lvds_pll_enable(struct clk_hw *hw)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
@@ -80,7 +80,7 @@ static int mpd4_lvds_pll_enable(struct clk_hw *hw)
return 0;
}
-static void mpd4_lvds_pll_disable(struct clk_hw *hw)
+static void mdp4_lvds_pll_disable(struct clk_hw *hw)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
struct mdp4_kms *mdp4_kms = get_kms(lvds_pll);
@@ -91,21 +91,24 @@ static void mpd4_lvds_pll_disable(struct clk_hw *hw)
mdp4_write(mdp4_kms, REG_MDP4_LVDS_PHY_PLL_CTRL_0, 0x0);
}
-static unsigned long mpd4_lvds_pll_recalc_rate(struct clk_hw *hw,
+static unsigned long mdp4_lvds_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
return lvds_pll->pixclk;
}
-static long mpd4_lvds_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int mdp4_lvds_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- const struct pll_rate *pll_rate = find_rate(rate);
- return pll_rate->rate;
+ const struct pll_rate *pll_rate = find_rate(req->rate);
+
+ req->rate = pll_rate->rate;
+
+ return 0;
}
-static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+static int mdp4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct mdp4_lvds_pll *lvds_pll = to_mdp4_lvds_pll(hw);
@@ -114,26 +117,26 @@ static int mpd4_lvds_pll_set_rate(struct clk_hw *hw, unsigned long rate,
}
-static const struct clk_ops mpd4_lvds_pll_ops = {
- .enable = mpd4_lvds_pll_enable,
- .disable = mpd4_lvds_pll_disable,
- .recalc_rate = mpd4_lvds_pll_recalc_rate,
- .round_rate = mpd4_lvds_pll_round_rate,
- .set_rate = mpd4_lvds_pll_set_rate,
+static const struct clk_ops mdp4_lvds_pll_ops = {
+ .enable = mdp4_lvds_pll_enable,
+ .disable = mdp4_lvds_pll_disable,
+ .recalc_rate = mdp4_lvds_pll_recalc_rate,
+ .determine_rate = mdp4_lvds_pll_determine_rate,
+ .set_rate = mdp4_lvds_pll_set_rate,
};
-static const struct clk_parent_data mpd4_lvds_pll_parents[] = {
+static const struct clk_parent_data mdp4_lvds_pll_parents[] = {
{ .fw_name = "pxo", .name = "pxo", },
};
static struct clk_init_data pll_init = {
- .name = "mpd4_lvds_pll",
- .ops = &mpd4_lvds_pll_ops,
- .parent_data = mpd4_lvds_pll_parents,
- .num_parents = ARRAY_SIZE(mpd4_lvds_pll_parents),
+ .name = "mdp4_lvds_pll",
+ .ops = &mdp4_lvds_pll_ops,
+ .parent_data = mdp4_lvds_pll_parents,
+ .num_parents = ARRAY_SIZE(mdp4_lvds_pll_parents),
};
-static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
+static struct clk_hw *mdp4_lvds_pll_init(struct drm_device *dev)
{
struct mdp4_lvds_pll *lvds_pll;
int ret;
@@ -156,14 +159,14 @@ static struct clk_hw *mpd4_lvds_pll_init(struct drm_device *dev)
return &lvds_pll->pll_hw;
}
-struct clk *mpd4_get_lcdc_clock(struct drm_device *dev)
+struct clk *mdp4_get_lcdc_clock(struct drm_device *dev)
{
struct clk_hw *hw;
struct clk *clk;
/* TODO: do we need different pll in other cases? */
- hw = mpd4_lvds_pll_init(dev);
+ hw = mdp4_lvds_pll_init(dev);
if (IS_ERR(hw)) {
DRM_DEV_ERROR(dev->dev, "failed to register LVDS PLL\n");
return ERR_CAST(hw);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 4c4900a7beda..373ae7d9bf01 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1234,6 +1234,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ wait_queue_head_t *queue = drm_crtc_vblank_waitqueue(crtc);
int ret;
/* Should not call this function if crtc is disabled. */
@@ -1244,7 +1245,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
if (ret)
return;
- ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
+ ret = wait_event_timeout(*queue,
((mdp5_ctl_get_commit_status(ctl) &
mdp5_crtc->flushed_mask) == 0),
msecs_to_jiffies(50));
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 5b6ca8dd929e..61edf6864092 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -534,7 +534,7 @@ static int mdp5_kms_init(struct drm_device *dev)
}
mdelay(16);
- vm = msm_kms_init_vm(mdp5_kms->dev);
+ vm = msm_kms_init_vm(mdp5_kms->dev, pdev->dev.parent);
if (IS_ERR(vm)) {
ret = PTR_ERR(vm);
goto fail;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 7c790406d533..4ca183fb61a9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -336,8 +336,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -373,8 +372,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
int min_scale, max_scale;
int ret;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
index b5f452bd7ada..53bd1dcde15f 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
@@ -38,6 +38,7 @@
* struct msm_disp_state - structure to store current dpu state
* @dev: device pointer
* @drm_dev: drm device pointer
+ * @blocks: list head for hardware state blocks
* @atomic_state: atomic state duplicated at the time of the error
* @time: timestamp at which the coredump was captured
*/
@@ -55,7 +56,7 @@ struct msm_disp_state {
/**
* struct msm_disp_state_block - structure to store each hardware block state
* @name: name of the block
- * @drm_dev: handle to the linked list head
+ * @node: handle to the linked list head
* @size: size of the register space of this hardware block
* @state: array holding the register dump of this hardware block
* @base_addr: starting address of this hardware block's register space
@@ -88,8 +89,9 @@ void msm_disp_snapshot_destroy(struct drm_device *drm_dev);
* msm_disp_snapshot_state_sync - synchronously snapshot display state
* @kms: the kms object
*
- * Returns state or error
+ * Returns: state or error
*
+ * Context:
* Must be called with &kms->dump_mutex held
*/
struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms);
@@ -97,7 +99,7 @@ struct msm_disp_state *msm_disp_snapshot_state_sync(struct msm_kms *kms);
/**
* msm_disp_snapshot_state - trigger to dump the display snapshot
* @drm_dev: handle to drm device
-
+ *
* Returns: none
*/
void msm_disp_snapshot_state(struct drm_device *drm_dev);
@@ -114,7 +116,7 @@ void msm_disp_state_print(struct msm_disp_state *disp_state, struct drm_printer
/**
* msm_disp_snapshot_capture_state - utility to capture atomic state and hw registers
* @disp_state: handle to msm_disp_state struct
-
+ *
* Returns: none
*/
void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state);
@@ -122,7 +124,7 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state);
/**
* msm_disp_state_free - free the memory after the coredump has been read
* @data: handle to struct msm_disp_state
-
+ *
* Returns: none
*/
void msm_disp_state_free(void *data);
@@ -130,7 +132,6 @@ void msm_disp_state_free(void *data);
/**
* msm_disp_snapshot_add_block - add a hardware block with its register dump
* @disp_state: handle to struct msm_disp_state
- * @name: name of the hardware block
* @len: size of the register space of the hardware block
* @base_addr: starting address of the register space of the hardware block
* @fmt: format in which the block names need to be printed
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index 071bcdea80f7..19b470968f4d 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -82,8 +82,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
drm_printf(p, "kernel: " UTS_RELEASE "\n");
drm_printf(p, "module: " KBUILD_MODNAME "\n");
drm_printf(p, "dpu devcoredump\n");
- drm_printf(p, "time: %lld.%09ld\n",
- state->time.tv_sec, state->time.tv_nsec);
+ drm_printf(p, "time: %ptSp\n", &state->time);
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
drm_printf(p, "====================%s================\n", block->name);
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index c42fd2c17a32..cbcc7c2f0ffc 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -423,13 +423,13 @@ static void msm_dp_ctrl_config_ctrl(struct msm_dp_ctrl_private *ctrl)
static void msm_dp_ctrl_lane_mapping(struct msm_dp_ctrl_private *ctrl)
{
- u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
+ u32 *lane_map = ctrl->link->lane_map;
u32 ln_mapping;
- ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
- ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
- ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
- ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
+ ln_mapping = lane_map[0] << LANE0_MAPPING_SHIFT;
+ ln_mapping |= lane_map[1] << LANE1_MAPPING_SHIFT;
+ ln_mapping |= lane_map[2] << LANE2_MAPPING_SHIFT;
+ ln_mapping |= lane_map[3] << LANE3_MAPPING_SHIFT;
msm_dp_write_link(ctrl, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
ln_mapping);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index d87d47cc7ec3..9bd9cd5c1e03 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -130,6 +130,14 @@ struct msm_dp_desc {
bool wide_bus_supported;
};
+static const struct msm_dp_desc msm_dp_desc_glymur[] = {
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x0af64000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
+ { .io_start = 0x0af6c000, .id = MSM_DP_CONTROLLER_3, .wide_bus_supported = true },
+ {}
+};
+
static const struct msm_dp_desc msm_dp_desc_sa8775p[] = {
{ .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{ .io_start = 0x0af5c000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
@@ -187,6 +195,7 @@ static const struct msm_dp_desc msm_dp_desc_x1e80100[] = {
};
static const struct of_device_id msm_dp_dt_match[] = {
+ { .compatible = "qcom,glymur-dp", .data = &msm_dp_desc_glymur },
{ .compatible = "qcom,sa8775p-dp", .data = &msm_dp_desc_sa8775p },
{ .compatible = "qcom,sc7180-dp", .data = &msm_dp_desc_sc7180 },
{ .compatible = "qcom,sc7280-dp", .data = &msm_dp_desc_sc7280 },
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index 66e1bbd80db3..34a91e194a12 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -6,12 +6,14 @@
#define pr_fmt(fmt) "[drm-dp] %s: " fmt, __func__
#include <drm/drm_device.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include "dp_reg.h"
#include "dp_link.h"
#include "dp_panel.h"
+#define DP_LINK_RATE_HBR2 540000 /* kbytes */
#define DP_TEST_REQUEST_MASK 0x7F
enum audio_sample_rate {
@@ -1210,10 +1212,121 @@ u32 msm_dp_link_get_test_bits_depth(struct msm_dp_link *msm_dp_link, u32 bpp)
return tbd;
}
+static u32 msm_dp_link_link_frequencies(struct device_node *of_node)
+{
+ struct device_node *endpoint;
+ u64 frequency = 0;
+ int cnt;
+
+ endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */
+ if (!endpoint)
+ return 0;
+
+ cnt = of_property_count_u64_elems(endpoint, "link-frequencies");
+
+ if (cnt > 0)
+ of_property_read_u64_index(endpoint, "link-frequencies",
+ cnt - 1, &frequency);
+ of_node_put(endpoint);
+
+ do_div(frequency,
+ 10 * /* from symbol rate to link rate */
+ 1000); /* kbytes */
+
+ return frequency;
+}
+
+/*
+ * Always populate msm_dp_link->lane_map with 4 lanes.
+ * - Use DTS "data-lanes" if present; otherwise fall back to default mapping.
+ * - For partial definitions, fill remaining entries with unused lanes in
+ * ascending order.
+ */
+static int msm_dp_link_lane_map(struct device *dev, struct msm_dp_link *msm_dp_link)
+{
+ struct device_node *of_node = dev->of_node;
+ struct device_node *endpoint;
+ int cnt = msm_dp_link->max_dp_lanes;
+ u32 tmp[DP_MAX_NUM_DP_LANES];
+ u32 map[DP_MAX_NUM_DP_LANES] = {0, 1, 2, 3}; /* default 1:1 mapping */
+ bool used[DP_MAX_NUM_DP_LANES] = {false};
+ int i, j = 0, ret = -EINVAL;
+
+ endpoint = of_graph_get_endpoint_by_regs(of_node, 1, -1);
+ if (endpoint) {
+ ret = of_property_read_u32_array(endpoint, "data-lanes", tmp, cnt);
+ if (ret)
+ dev_dbg(dev, "endpoint data-lanes read failed (ret=%d)\n", ret);
+ }
+
+ if (ret) {
+ ret = of_property_read_u32_array(of_node, "data-lanes", tmp, cnt);
+ if (ret) {
+ dev_info(dev, "data-lanes not defined, set to default\n");
+ goto out;
+ }
+ }
+
+ for (i = 0; i < cnt; i++) {
+ if (tmp[i] >= DP_MAX_NUM_DP_LANES) {
+ dev_err(dev, "data-lanes[%d]=%u out of range\n", i, tmp[i]);
+ return -EINVAL;
+ }
+ used[tmp[i]] = true;
+ map[i] = tmp[i];
+ }
+
+ /* Fill the remaining entries with unused physical lanes (ascending) */
+ for (i = cnt; i < DP_MAX_NUM_DP_LANES && j < DP_MAX_NUM_DP_LANES; j++) {
+ if (!used[j])
+ map[i++] = j;
+ }
+
+out:
+ if (endpoint)
+ of_node_put(endpoint);
+
+ dev_dbg(dev, "data-lanes count %d <%d %d %d %d>\n", cnt, map[0], map[1], map[2], map[3]);
+ memcpy(msm_dp_link->lane_map, map, sizeof(map));
+ return 0;
+}
+
+static int msm_dp_link_parse_dt(struct device *dev, struct msm_dp_link *msm_dp_link)
+{
+ struct device_node *of_node = dev->of_node;
+ int cnt;
+
+ /*
+ * data-lanes is the property of msm_dp_out endpoint
+ */
+ cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
+ if (cnt < 0) {
+ /* legacy code, data-lanes is the property of mdss_dp node */
+ cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
+ }
+
+ if (cnt > 0)
+ msm_dp_link->max_dp_lanes = cnt;
+ else
+ msm_dp_link->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
+
+ if (msm_dp_link_lane_map(dev, msm_dp_link)) {
+ dev_err(dev, "failed to parse data-lanes\n");
+ return -EINVAL;
+ }
+
+ msm_dp_link->max_dp_link_rate = msm_dp_link_link_frequencies(of_node);
+ if (!msm_dp_link->max_dp_link_rate)
+ msm_dp_link->max_dp_link_rate = DP_LINK_RATE_HBR2;
+
+ return 0;
+}
+
struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux)
{
struct msm_dp_link_private *link;
struct msm_dp_link *msm_dp_link;
+ int ret;
if (!dev || !aux) {
DRM_ERROR("invalid input\n");
@@ -1229,5 +1342,9 @@ struct msm_dp_link *msm_dp_link_get(struct device *dev, struct drm_dp_aux *aux)
mutex_init(&link->psm_mutex);
msm_dp_link = &link->msm_dp_link;
+ ret = msm_dp_link_parse_dt(dev, msm_dp_link);
+ if (ret)
+ return ERR_PTR(ret);
+
return msm_dp_link;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
index ba47c6d19fbf..b1eb2de6d2a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.h
+++ b/drivers/gpu/drm/msm/dp/dp_link.h
@@ -12,6 +12,7 @@
#define DS_PORT_STATUS_CHANGED 0x200
#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
+#define DP_MAX_NUM_DP_LANES 4
struct msm_dp_link_info {
unsigned char revision;
@@ -72,6 +73,10 @@ struct msm_dp_link {
struct msm_dp_link_test_audio test_audio;
struct msm_dp_link_phy_params phy_params;
struct msm_dp_link_info link_params;
+
+ u32 lane_map[DP_MAX_NUM_DP_LANES];
+ u32 max_dp_lanes;
+ u32 max_dp_link_rate;
};
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 15b7f6c7146e..ad5d55bf009d 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -16,9 +16,6 @@
#define DP_INTF_CONFIG_DATABUS_WIDEN BIT(4)
-#define DP_MAX_NUM_DP_LANES 4
-#define DP_LINK_RATE_HBR2 540000 /* kbytes */
-
struct msm_dp_panel_private {
struct device *dev;
struct drm_device *drm_dev;
@@ -91,6 +88,7 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
int rc, max_lttpr_lanes, max_lttpr_rate;
struct msm_dp_panel_private *panel;
struct msm_dp_link_info *link_info;
+ struct msm_dp_link *link;
u8 *dpcd, major, minor;
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
@@ -105,16 +103,20 @@ static int msm_dp_panel_read_dpcd(struct msm_dp_panel *msm_dp_panel)
major = (link_info->revision >> 4) & 0x0f;
minor = link_info->revision & 0x0f;
+ link = panel->link;
+ drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n",
+ link->max_dp_lanes, link->max_dp_link_rate);
+
link_info->rate = drm_dp_max_link_rate(dpcd);
link_info->num_lanes = drm_dp_max_lane_count(dpcd);
/* Limit data lanes from data-lanes of endpoint property of dtsi */
- if (link_info->num_lanes > msm_dp_panel->max_dp_lanes)
- link_info->num_lanes = msm_dp_panel->max_dp_lanes;
+ if (link_info->num_lanes > link->max_dp_lanes)
+ link_info->num_lanes = link->max_dp_lanes;
/* Limit link rate from link-frequencies of endpoint property of dtsi */
- if (link_info->rate > msm_dp_panel->max_dp_link_rate)
- link_info->rate = msm_dp_panel->max_dp_link_rate;
+ if (link_info->rate > link->max_dp_link_rate)
+ link_info->rate = link->max_dp_link_rate;
/* Limit data lanes from LTTPR capabilities, if any */
max_lttpr_lanes = drm_dp_lttpr_max_lane_count(panel->link->lttpr_common_caps);
@@ -173,9 +175,6 @@ int msm_dp_panel_read_sink_caps(struct msm_dp_panel *msm_dp_panel,
panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- drm_dbg_dp(panel->drm_dev, "max_lanes=%d max_link_rate=%d\n",
- msm_dp_panel->max_dp_lanes, msm_dp_panel->max_dp_link_rate);
-
rc = msm_dp_panel_read_dpcd(msm_dp_panel);
if (rc) {
DRM_ERROR("read dpcd failed %d\n", rc);
@@ -648,60 +647,6 @@ int msm_dp_panel_init_panel_info(struct msm_dp_panel *msm_dp_panel)
return 0;
}
-static u32 msm_dp_panel_link_frequencies(struct device_node *of_node)
-{
- struct device_node *endpoint;
- u64 frequency = 0;
- int cnt;
-
- endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */
- if (!endpoint)
- return 0;
-
- cnt = of_property_count_u64_elems(endpoint, "link-frequencies");
-
- if (cnt > 0)
- of_property_read_u64_index(endpoint, "link-frequencies",
- cnt - 1, &frequency);
- of_node_put(endpoint);
-
- do_div(frequency,
- 10 * /* from symbol rate to link rate */
- 1000); /* kbytes */
-
- return frequency;
-}
-
-static int msm_dp_panel_parse_dt(struct msm_dp_panel *msm_dp_panel)
-{
- struct msm_dp_panel_private *panel;
- struct device_node *of_node;
- int cnt;
-
- panel = container_of(msm_dp_panel, struct msm_dp_panel_private, msm_dp_panel);
- of_node = panel->dev->of_node;
-
- /*
- * data-lanes is the property of msm_dp_out endpoint
- */
- cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
- if (cnt < 0) {
- /* legacy code, data-lanes is the property of mdss_dp node */
- cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
- }
-
- if (cnt > 0)
- msm_dp_panel->max_dp_lanes = cnt;
- else
- msm_dp_panel->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
-
- msm_dp_panel->max_dp_link_rate = msm_dp_panel_link_frequencies(of_node);
- if (!msm_dp_panel->max_dp_link_rate)
- msm_dp_panel->max_dp_link_rate = DP_LINK_RATE_HBR2;
-
- return 0;
-}
-
struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux,
struct msm_dp_link *link,
void __iomem *link_base,
@@ -709,7 +654,6 @@ struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux
{
struct msm_dp_panel_private *panel;
struct msm_dp_panel *msm_dp_panel;
- int ret;
if (!dev || !aux || !link) {
DRM_ERROR("invalid input\n");
@@ -729,10 +673,6 @@ struct msm_dp_panel *msm_dp_panel_get(struct device *dev, struct drm_dp_aux *aux
msm_dp_panel = &panel->msm_dp_panel;
msm_dp_panel->max_bw_code = DP_LINK_BW_8_1;
- ret = msm_dp_panel_parse_dt(msm_dp_panel);
- if (ret)
- return ERR_PTR(ret);
-
return msm_dp_panel;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index d2cf401506dc..921a296852d4 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -41,9 +41,6 @@ struct msm_dp_panel {
bool vsc_sdp_supported;
u32 hw_revision;
- u32 max_dp_lanes;
- u32 max_dp_link_rate;
-
u32 max_bw_code;
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
index af2e30f3f842..ec486ff02c9b 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -444,21 +444,19 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
return (unsigned long)vco_rate;
}
-static long dsi_pll_10nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_10nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_10nm *pll_10nm = to_pll_10nm(hw);
- if (rate < pll_10nm->phy->cfg->min_pll_rate)
- return pll_10nm->phy->cfg->min_pll_rate;
- else if (rate > pll_10nm->phy->cfg->max_pll_rate)
- return pll_10nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_10nm->phy->cfg->min_pll_rate, pll_10nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
- .round_rate = dsi_pll_10nm_clk_round_rate,
+ .determine_rate = dsi_pll_10nm_clk_determine_rate,
.set_rate = dsi_pll_10nm_vco_set_rate,
.recalc_rate = dsi_pll_10nm_vco_recalc_rate,
.prepare = dsi_pll_10nm_vco_prepare,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 3a1c8ece6657..fdefcbd9c284 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -578,21 +578,19 @@ static void dsi_pll_14nm_vco_unprepare(struct clk_hw *hw)
pll_14nm->phy->pll_on = false;
}
-static long dsi_pll_14nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_14nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_14nm *pll_14nm = to_pll_14nm(hw);
- if (rate < pll_14nm->phy->cfg->min_pll_rate)
- return pll_14nm->phy->cfg->min_pll_rate;
- else if (rate > pll_14nm->phy->cfg->max_pll_rate)
- return pll_14nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_14nm->phy->cfg->min_pll_rate, pll_14nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_14nm_vco = {
- .round_rate = dsi_pll_14nm_clk_round_rate,
+ .determine_rate = dsi_pll_14nm_clk_determine_rate,
.set_rate = dsi_pll_14nm_vco_set_rate,
.recalc_rate = dsi_pll_14nm_vco_recalc_rate,
.prepare = dsi_pll_14nm_vco_prepare,
@@ -622,18 +620,20 @@ static unsigned long dsi_pll_14nm_postdiv_recalc_rate(struct clk_hw *hw,
postdiv->flags, width);
}
-static long dsi_pll_14nm_postdiv_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *prate)
+static int dsi_pll_14nm_postdiv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_14nm_postdiv *postdiv = to_pll_14nm_postdiv(hw);
struct dsi_pll_14nm *pll_14nm = postdiv->pll;
- DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, rate);
+ DBG("DSI%d PLL parent rate=%lu", pll_14nm->phy->id, req->rate);
- return divider_round_rate(hw, rate, prate, NULL,
- postdiv->width,
- postdiv->flags);
+ req->rate = divider_round_rate(hw, req->rate, &req->best_parent_rate,
+ NULL,
+ postdiv->width,
+ postdiv->flags);
+
+ return 0;
}
static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -680,7 +680,7 @@ static int dsi_pll_14nm_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops clk_ops_dsi_pll_14nm_postdiv = {
.recalc_rate = dsi_pll_14nm_postdiv_recalc_rate,
- .round_rate = dsi_pll_14nm_postdiv_round_rate,
+ .determine_rate = dsi_pll_14nm_postdiv_determine_rate,
.set_rate = dsi_pll_14nm_postdiv_set_rate,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 90348a2af3e9..d00e415b9a99 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -533,21 +533,20 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
pll_28nm->phy->pll_on = false;
}
-static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
- if (rate < pll_28nm->phy->cfg->min_pll_rate)
- return pll_28nm->phy->cfg->min_pll_rate;
- else if (rate > pll_28nm->phy->cfg->max_pll_rate)
- return pll_28nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_28nm->phy->cfg->min_pll_rate,
+ pll_28nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_hpm,
@@ -556,7 +555,7 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco_hpm = {
};
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_lp,
@@ -565,7 +564,7 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco_lp = {
};
static const struct clk_ops clk_ops_dsi_pll_28nm_vco_8226 = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare_8226,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index f3643320ff2f..8dcce9581dc3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -231,21 +231,19 @@ static void dsi_pll_28nm_vco_unprepare(struct clk_hw *hw)
pll_28nm->phy->pll_on = false;
}
-static long dsi_pll_28nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_28nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_28nm *pll_28nm = to_pll_28nm(hw);
- if (rate < pll_28nm->phy->cfg->min_pll_rate)
- return pll_28nm->phy->cfg->min_pll_rate;
- else if (rate > pll_28nm->phy->cfg->max_pll_rate)
- return pll_28nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_28nm->phy->cfg->min_pll_rate, pll_28nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
- .round_rate = dsi_pll_28nm_clk_round_rate,
+ .determine_rate = dsi_pll_28nm_clk_determine_rate,
.set_rate = dsi_pll_28nm_clk_set_rate,
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = dsi_pll_28nm_vco_prepare,
@@ -296,18 +294,20 @@ static unsigned int get_vco_mul_factor(unsigned long byte_clk_rate)
return 8;
}
-static long clk_bytediv_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int clk_bytediv_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
unsigned long best_parent;
unsigned int factor;
- factor = get_vco_mul_factor(rate);
+ factor = get_vco_mul_factor(req->rate);
+
+ best_parent = req->rate * factor;
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
- best_parent = rate * factor;
- *prate = clk_hw_round_rate(clk_hw_get_parent(hw), best_parent);
+ req->rate = req->best_parent_rate / factor;
- return *prate / factor;
+ return 0;
}
static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -328,7 +328,7 @@ static int clk_bytediv_set_rate(struct clk_hw *hw, unsigned long rate,
/* Our special byte clock divider ops */
static const struct clk_ops clk_bytediv_ops = {
- .round_rate = clk_bytediv_round_rate,
+ .determine_rate = clk_bytediv_determine_rate,
.set_rate = clk_bytediv_set_rate,
.recalc_rate = clk_bytediv_recalc_rate,
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 8c98f91a5930..c5e1d2016bcc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -90,6 +90,13 @@ struct dsi_pll_7nm {
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
spinlock_t pclk_mux_lock;
+ /*
+ * protects REG_DSI_7nm_PHY_CMN_CTRL_0 register and pll_enable_cnt
+ * member
+ */
+ spinlock_t pll_enable_lock;
+ int pll_enable_cnt;
+
struct pll_7nm_cached_state cached_state;
struct dsi_pll_7nm *slave;
@@ -103,6 +110,9 @@ struct dsi_pll_7nm {
*/
static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll);
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll);
+
static void dsi_pll_setup_config(struct dsi_pll_config *config)
{
config->ssc_freq = 31500;
@@ -340,6 +350,7 @@ static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
struct dsi_pll_config config;
+ dsi_pll_enable_pll_bias(pll_7nm);
DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->phy->id, rate,
parent_rate);
@@ -357,6 +368,7 @@ static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
dsi_pll_ssc_commit(pll_7nm, &config);
+ dsi_pll_disable_pll_bias(pll_7nm);
/* flush, ensure all register writes are done*/
wmb();
@@ -385,19 +397,44 @@ static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
{
- u32 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ unsigned long flags;
+ u32 data;
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ --pll->pll_enable_cnt;
+ if (pll->pll_enable_cnt < 0) {
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+ DRM_DEV_ERROR_RATELIMITED(&pll->phy->pdev->dev,
+ "bug: imbalance in disabling PLL bias\n");
+ return;
+ } else if (pll->pll_enable_cnt > 0) {
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
+ return;
+ } /* else: == 0 */
+
+ data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ data &= ~DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES);
- writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
ndelay(250);
}
static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
{
- u32 data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ unsigned long flags;
+ u32 data;
+
+ spin_lock_irqsave(&pll->pll_enable_lock, flags);
+ pll->pll_enable_cnt++;
+ WARN_ON(pll->pll_enable_cnt == INT_MAX);
+
+ data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+ data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
- writel(data | BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0xc0, pll->phy->pll_base + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES);
+ spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
ndelay(250);
}
@@ -491,6 +528,10 @@ static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
if (pll_7nm->slave)
dsi_pll_enable_global_clk(pll_7nm->slave);
+ writel(0x1, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
+ if (pll_7nm->slave)
+ writel(0x1, pll_7nm->slave->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
+
error:
return rc;
}
@@ -534,6 +575,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
u32 dec;
u64 pll_freq, tmp64;
+ dsi_pll_enable_pll_bias(pll_7nm);
dec = readl(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
dec &= 0xff;
@@ -558,24 +600,24 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
+ dsi_pll_disable_pll_bias(pll_7nm);
+
return (unsigned long)vco_rate;
}
-static long dsi_pll_7nm_clk_round_rate(struct clk_hw *hw,
- unsigned long rate, unsigned long *parent_rate)
+static int dsi_pll_7nm_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(hw);
- if (rate < pll_7nm->phy->cfg->min_pll_rate)
- return pll_7nm->phy->cfg->min_pll_rate;
- else if (rate > pll_7nm->phy->cfg->max_pll_rate)
- return pll_7nm->phy->cfg->max_pll_rate;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate,
+ pll_7nm->phy->cfg->min_pll_rate, pll_7nm->phy->cfg->max_pll_rate);
+
+ return 0;
}
static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
- .round_rate = dsi_pll_7nm_clk_round_rate,
+ .determine_rate = dsi_pll_7nm_clk_determine_rate,
.set_rate = dsi_pll_7nm_vco_set_rate,
.recalc_rate = dsi_pll_7nm_vco_recalc_rate,
.prepare = dsi_pll_7nm_vco_prepare,
@@ -593,6 +635,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
void __iomem *phy_base = pll_7nm->phy->base;
u32 cmn_clk_cfg0, cmn_clk_cfg1;
+ dsi_pll_enable_pll_bias(pll_7nm);
cached->pll_out_div = readl(pll_7nm->phy->pll_base +
REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
cached->pll_out_div &= 0x3;
@@ -604,6 +647,7 @@ static void dsi_7nm_pll_save_state(struct msm_dsi_phy *phy)
cmn_clk_cfg1 = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
cached->pll_mux = FIELD_GET(DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK, cmn_clk_cfg1);
+ dsi_pll_disable_pll_bias(pll_7nm);
DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
pll_7nm->phy->id, cached->pll_out_div, cached->bit_clk_div,
cached->pix_clk_div, cached->pll_mux);
@@ -826,6 +870,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
spin_lock_init(&pll_7nm->postdiv_lock);
spin_lock_init(&pll_7nm->pclk_mux_lock);
+ spin_lock_init(&pll_7nm->pll_enable_lock);
pll_7nm->phy = phy;
@@ -839,6 +884,12 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy);
+ /*
+ * Store also proper vco_current_rate, because its value will be used in
+ * dsi_7nm_pll_restore_state().
+ */
+ if (!dsi_pll_7nm_vco_recalc_rate(&pll_7nm->clk_hw, VCO_REF_CLK_RATE))
+ pll_7nm->vco_current_rate = pll_7nm->phy->cfg->min_pll_rate;
return 0;
}
@@ -1034,7 +1085,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
}
/* de-assert digital and pll power down */
- data = BIT(6) | BIT(5);
+ data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
+ DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
/* Assert PLL core reset */
@@ -1177,6 +1229,7 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
/* Turn off all PHY blocks */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
/* make sure phy is turned off */
wmb();
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index 8c8d80b59573..36e928b0fd5a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -629,16 +629,12 @@ static int hdmi_8996_pll_prepare(struct clk_hw *hw)
return 0;
}
-static long hdmi_8996_pll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_8996_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < HDMI_PCLK_MIN_FREQ)
- return HDMI_PCLK_MIN_FREQ;
- else if (rate > HDMI_PCLK_MAX_FREQ)
- return HDMI_PCLK_MAX_FREQ;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate, HDMI_PCLK_MIN_FREQ, HDMI_PCLK_MAX_FREQ);
+
+ return 0;
}
static unsigned long hdmi_8996_pll_recalc_rate(struct clk_hw *hw,
@@ -684,7 +680,7 @@ static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops hdmi_8996_pll_ops = {
.set_rate = hdmi_8996_pll_set_clk_rate,
- .round_rate = hdmi_8996_pll_round_rate,
+ .determine_rate = hdmi_8996_pll_determine_rate,
.recalc_rate = hdmi_8996_pll_recalc_rate,
.prepare = hdmi_8996_pll_prepare,
.unprepare = hdmi_8996_pll_unprepare,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
index 33bb48ae58a2..a86ff3706369 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -646,16 +646,12 @@ static int hdmi_8998_pll_prepare(struct clk_hw *hw)
return 0;
}
-static long hdmi_8998_pll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_8998_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- if (rate < HDMI_PCLK_MIN_FREQ)
- return HDMI_PCLK_MIN_FREQ;
- else if (rate > HDMI_PCLK_MAX_FREQ)
- return HDMI_PCLK_MAX_FREQ;
- else
- return rate;
+ req->rate = clamp_t(unsigned long, req->rate, HDMI_PCLK_MIN_FREQ, HDMI_PCLK_MAX_FREQ);
+
+ return 0;
}
static unsigned long hdmi_8998_pll_recalc_rate(struct clk_hw *hw,
@@ -688,7 +684,7 @@ static int hdmi_8998_pll_is_enabled(struct clk_hw *hw)
static const struct clk_ops hdmi_8998_pll_ops = {
.set_rate = hdmi_8998_pll_set_clk_rate,
- .round_rate = hdmi_8998_pll_round_rate,
+ .determine_rate = hdmi_8998_pll_determine_rate,
.recalc_rate = hdmi_8998_pll_recalc_rate,
.prepare = hdmi_8998_pll_prepare,
.unprepare = hdmi_8998_pll_unprepare,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index 83c8781fcc3f..6ba6bbdb7e05 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -373,12 +373,14 @@ static unsigned long hdmi_pll_recalc_rate(struct clk_hw *hw,
return pll->pixclk;
}
-static long hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int hdmi_pll_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- const struct pll_rate *pll_rate = find_rate(rate);
+ const struct pll_rate *pll_rate = find_rate(req->rate);
+
+ req->rate = pll_rate->rate;
- return pll_rate->rate;
+ return 0;
}
static int hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -402,7 +404,7 @@ static const struct clk_ops hdmi_pll_ops = {
.enable = hdmi_pll_enable,
.disable = hdmi_pll_disable,
.recalc_rate = hdmi_pll_recalc_rate,
- .round_rate = hdmi_pll_round_rate,
+ .determine_rate = hdmi_pll_determine_rate,
.set_rate = hdmi_pll_set_rate,
};
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9dcc7a596a11..7e977fec4100 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -826,6 +826,7 @@ static const struct file_operations fops = {
#define DRIVER_FEATURES_KMS ( \
DRIVER_GEM | \
+ DRIVER_GEM_GPUVA | \
DRIVER_ATOMIC | \
DRIVER_MODESET | \
0 )
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 985db9febd98..6d847d593f1a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -229,7 +229,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev);
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev, struct device *mdss_dev);
bool msm_use_mmu(struct drm_device *dev);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index b5969374d53f..fd19995b12b5 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -52,8 +52,6 @@ static void msm_fbdev_fb_destroy(struct fb_info *info)
drm_framebuffer_remove(fb);
drm_client_release(&helper->client);
- drm_fb_helper_unprepare(helper);
- kfree(helper);
}
static const struct fb_ops msm_fb_ops = {
@@ -93,9 +91,9 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
{
struct drm_device *dev = helper->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct fb_info *fbi = helper->info;
struct drm_framebuffer *fb = NULL;
struct drm_gem_object *bo;
- struct fb_info *fbi = NULL;
uint64_t paddr;
uint32_t format;
int ret, pitch;
@@ -128,13 +126,6 @@ int msm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
- fbi = drm_fb_helper_alloc_info(helper);
- if (IS_ERR(fbi)) {
- DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
- ret = PTR_ERR(fbi);
- goto fail;
- }
-
DBG("fbi=%p, dev=%p", fbi, dev);
helper->funcs = &msm_fbdev_helper_funcs;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index e7631f4ef530..017411a0bf45 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -10,8 +10,10 @@
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
#include <drm/drm_file.h>
+#include <drm/drm_fourcc.h>
#include <trace/events/gpu_mem.h>
@@ -191,7 +193,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
- int npages = obj->size >> PAGE_SHIFT;
+ size_t npages = obj->size >> PAGE_SHIFT;
p = drm_gem_get_pages(obj);
@@ -698,8 +700,32 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- args->pitch = align_pitch(args->width, args->bpp);
- args->size = PAGE_ALIGN(args->pitch * args->height);
+ u32 fourcc;
+ u64 pitch_align;
+ int ret;
+
+ /*
+ * Adreno needs pitch aligned to 32 pixels. Compute the number
+ * of bytes for a block of 32 pixels at the given color format.
+ * Use the result as pitch alignment.
+ */
+ fourcc = drm_driver_color_mode_format(dev, args->bpp);
+ if (fourcc != DRM_FORMAT_INVALID) {
+ const struct drm_format_info *info;
+
+ info = drm_format_info(fourcc);
+ if (!info)
+ return -EINVAL;
+ pitch_align = drm_format_info_min_pitch(info, 0, 32);
+ } else {
+ pitch_align = round_up(args->width, 32) * DIV_ROUND_UP(args->bpp, SZ_8);
+ }
+ if (!pitch_align || pitch_align > U32_MAX)
+ return -EINVAL;
+ ret = drm_mode_size_dumb(dev, args, pitch_align, 0);
+ if (ret)
+ return ret;
+
return msm_gem_new_handle(dev, file, args->size,
MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
}
@@ -1120,12 +1146,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (obj->resv != &obj->_resv) {
+ /*
+ * In error paths, we could end up here before msm_gem_new_handle()
+ * has changed obj->resv to point to the shared resv. In this case,
+ * we don't want to drop a ref to the shared r_obj that we haven't
+ * taken yet.
+ */
+ if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
- WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
-
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}
@@ -1148,7 +1178,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle,
+ size_t size, uint32_t flags, uint32_t *handle,
char *name)
{
struct drm_gem_object *obj;
@@ -1214,9 +1244,8 @@ static const struct drm_gem_object_funcs msm_gem_object_funcs = {
.vm_ops = &vm_ops,
};
-static int msm_gem_new_impl(struct drm_device *dev,
- uint32_t size, uint32_t flags,
- struct drm_gem_object **obj)
+static int msm_gem_new_impl(struct drm_device *dev, uint32_t flags,
+ struct drm_gem_object **obj)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
@@ -1250,7 +1279,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
return 0;
}
-struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags)
+struct drm_gem_object *msm_gem_new(struct drm_device *dev, size_t size, uint32_t flags)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
@@ -1265,7 +1294,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
if (size == 0)
return ERR_PTR(-EINVAL);
- ret = msm_gem_new_impl(dev, size, flags, &obj);
+ ret = msm_gem_new_impl(dev, flags, &obj);
if (ret)
return ERR_PTR(ret);
@@ -1305,12 +1334,12 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
- uint32_t size;
- int ret, npages;
+ size_t size, npages;
+ int ret;
size = PAGE_ALIGN(dmabuf->size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
+ ret = msm_gem_new_impl(dev, MSM_BO_WC, &obj);
if (ret)
return ERR_PTR(ret);
@@ -1353,7 +1382,7 @@ fail:
return ERR_PTR(ret);
}
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
struct drm_gpuvm *vm, struct drm_gem_object **bo,
uint64_t *iova)
{
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 751c3b4965bc..a4cf31853c50 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -297,10 +297,10 @@ bool msm_gem_active(struct drm_gem_object *obj);
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
int msm_gem_cpu_fini(struct drm_gem_object *obj);
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- uint32_t size, uint32_t flags, uint32_t *handle, char *name);
+ size_t size, uint32_t flags, uint32_t *handle, char *name);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags);
-void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags,
+ size_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, size_t size, uint32_t flags,
struct drm_gpuvm *vm, struct drm_gem_object **bo,
uint64_t *iova);
void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm);
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index c0a33ac839cb..036d34c674d9 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -15,7 +15,7 @@
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
+ size_t npages = obj->size >> PAGE_SHIFT;
if (msm_obj->flags & MSM_BO_NO_SHARE)
return ERR_PTR(-EINVAL);
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3ab3b27134f9..75d9f3574370 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
submit->user_fence,
DMA_RESV_USAGE_BOOKKEEP,
DMA_RESV_USAGE_BOOKKEEP);
+
+ last_fence = vm->last_fence;
+ vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
+ dma_fence_put(last_fence);
+
return;
}
@@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
-
- last_fence = vm->last_fence;
- vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
- dma_fence_put(last_fence);
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 00d0f3b7ba32..71d5238437eb 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -396,7 +396,14 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
if (obj)
GEM_WARN_ON((range_end - range_start) > obj->size);
- drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset);
+ struct drm_gpuva_op_map op_map = {
+ .va.addr = range_start,
+ .va.range = range_end - range_start,
+ .gem.obj = obj,
+ .gem.offset = offset,
+ };
+
+ drm_gpuva_init_from_op(&vma->base, &op_map);
vma->mapped = false;
ret = drm_gpuva_insert(&vm->base, &vma->base);
@@ -455,15 +462,20 @@ struct op_arg {
bool kept;
};
-static void
+static int
vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op)
{
struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return -ENOMEM;
+
*op = _op;
list_add_tail(&op->node, &arg->job->vm_ops);
if (op->obj)
drm_gem_object_get(op->obj);
+
+ return 0;
}
static struct drm_gpuva *
@@ -482,6 +494,7 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
struct drm_gpuva *vma;
struct sg_table *sgt;
unsigned prot;
+ int ret;
if (arg->kept)
return 0;
@@ -493,8 +506,6 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
vma->va.addr, vma->va.range);
- vma->flags = ((struct op_arg *)arg)->flags;
-
if (obj) {
sgt = to_msm_bo(obj)->sgt;
prot = msm_gem_prot(obj);
@@ -503,7 +514,7 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
prot = IOMMU_READ | IOMMU_WRITE;
}
- vm_op_enqueue(arg, (struct msm_vm_op){
+ ret = vm_op_enqueue(arg, (struct msm_vm_op){
.op = MSM_VM_OP_MAP,
.map = {
.sgt = sgt,
@@ -516,6 +527,10 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
.obj = vma->gem.obj,
});
+ if (ret)
+ return ret;
+
+ vma->flags = ((struct op_arg *)arg)->flags;
to_msm_vma(vma)->mapped = true;
return 0;
@@ -531,6 +546,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo;
bool mapped = to_msm_vma(orig_vma)->mapped;
unsigned flags;
+ int ret;
vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma,
orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range);
@@ -540,7 +556,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
- vm_op_enqueue(arg, (struct msm_vm_op){
+ ret = vm_op_enqueue(arg, (struct msm_vm_op){
.op = MSM_VM_OP_UNMAP,
.unmap = {
.iova = unmap_start,
@@ -550,6 +566,9 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
.obj = orig_vma->gem.obj,
});
+ if (ret)
+ return ret;
+
/*
* Part of this GEM obj is still mapped, but we're going to kill the
* existing VMA and replace it with one or two new ones (ie. two if
@@ -611,6 +630,7 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
struct msm_vm_bind_job *job = arg->job;
struct drm_gpuva *vma = op->unmap.va;
struct msm_gem_vma *msm_vma = to_msm_vma(vma);
+ int ret;
vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
vma->va.addr, vma->va.range);
@@ -643,7 +663,7 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
if (!msm_vma->mapped)
goto out_close;
- vm_op_enqueue(arg, (struct msm_vm_op){
+ ret = vm_op_enqueue(arg, (struct msm_vm_op){
.op = MSM_VM_OP_UNMAP,
.unmap = {
.iova = vma->va.addr,
@@ -653,6 +673,9 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
.obj = vma->gem.obj,
});
+ if (ret)
+ return ret;
+
msm_vma->mapped = false;
out_close:
@@ -964,6 +987,7 @@ static int
lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
{
struct drm_device *dev = job->vm->drm;
+ struct msm_drm_private *priv = dev->dev_private;
int i = job->nr_ops++;
int ret = 0;
@@ -1010,6 +1034,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
break;
}
+ if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
+ !adreno_smmu_has_prr(priv->gpu)) {
+ ret = UERR(EINVAL, dev, "PRR not supported\n");
+ }
+
return ret;
}
@@ -1023,6 +1052,7 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
struct drm_device *dev = job->vm->drm;
int ret = 0;
int cnt = 0;
+ int i = -1;
if (args->nr_ops == 1) {
/* Single op case, the op is inlined: */
@@ -1056,11 +1086,12 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
spin_lock(&file->table_lock);
- for (unsigned i = 0; i < args->nr_ops; i++) {
+ for (i = 0; i < args->nr_ops; i++) {
+ struct msm_vm_bind_op *op = &job->ops[i];
struct drm_gem_object *obj;
- if (!job->ops[i].handle) {
- job->ops[i].obj = NULL;
+ if (!op->handle) {
+ op->obj = NULL;
continue;
}
@@ -1068,16 +1099,22 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
- obj = idr_find(&file->object_idr, job->ops[i].handle);
+ obj = idr_find(&file->object_idr, op->handle);
if (!obj) {
- ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i);
+ ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i);
goto out_unlock;
}
drm_gem_object_get(obj);
- job->ops[i].obj = obj;
+ op->obj = obj;
cnt++;
+
+ if ((op->range + op->obj_offset) > obj->size) {
+ ret = UERR(EINVAL, dev, "invalid range: %016llx + %016llx > %016zx\n",
+ op->range, op->obj_offset, obj->size);
+ goto out_unlock;
+ }
}
*nr_bos = cnt;
@@ -1085,6 +1122,17 @@ vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args
out_unlock:
spin_unlock(&file->table_lock);
+ if (ret) {
+ for (; i >= 0; i--) {
+ struct msm_vm_bind_op *op = &job->ops[i];
+
+ if (!op->obj)
+ continue;
+
+ drm_gem_object_put(op->obj);
+ op->obj = NULL;
+ }
+ }
out:
return ret;
}
@@ -1200,11 +1248,17 @@ vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
op->obj_offset);
break;
case MSM_VM_BIND_OP_MAP:
- case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1,
- op->iova, op->range,
- op->obj, op->obj_offset);
+ case MSM_VM_BIND_OP_MAP_NULL: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->iova,
+ .map.va.range = op->range,
+ .map.gem.obj = op->obj,
+ .map.gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
@@ -1312,10 +1366,17 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job)
if (op->flags & MSM_VM_BIND_OP_DUMP)
arg.flags |= MSM_VMA_DUMP;
fallthrough;
- case MSM_VM_BIND_OP_MAP_NULL:
- ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova,
- op->range, op->obj, op->obj_offset);
+ case MSM_VM_BIND_OP_MAP_NULL: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->iova,
+ .map.va.range = op->range,
+ .map.gem.obj = op->obj,
+ .map.gem.offset = op->obj_offset,
+ };
+
+ ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
break;
+ }
default:
/*
* lookup_op() should have already thrown an error for
@@ -1382,7 +1443,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
* Maybe we could allow just UNMAP ops? OTOH userspace should just
* immediately close the device file and all will be torn down.
*/
- if (to_msm_vm(ctx->vm)->unusable)
+ if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
return UERR(EPIPE, dev, "context is unusable");
/*
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 26c5ce897cbb..995549d0bbbc 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -197,8 +197,7 @@ static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
drm_printf(&p, "---\n");
drm_printf(&p, "kernel: " UTS_RELEASE "\n");
drm_printf(&p, "module: " KBUILD_MODNAME "\n");
- drm_printf(&p, "time: %lld.%09ld\n",
- state->time.tv_sec, state->time.tv_nsec);
+ drm_printf(&p, "time: %ptSp\n", &state->time);
if (state->comm)
drm_printf(&p, "comm: %s\n", state->comm);
if (state->cmd)
@@ -287,16 +286,17 @@ static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submi
state->bos = kcalloc(cnt, sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
- drm_gpuvm_for_each_va (vma, submit->vm) {
- bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
+ if (state->bos)
+ drm_gpuvm_for_each_va(vma, submit->vm) {
+ bool dump = rd_full || (vma->flags & MSM_VMA_DUMP);
- /* Skip MAP_NULL/PRR VMAs: */
- if (!vma->gem.obj)
- continue;
+ /* Skip MAP_NULL/PRR VMAs: */
+ if (!vma->gem.obj)
+ continue;
- msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
- dump, vma->gem.offset, vma->va.range);
- }
+ msm_gpu_crashstate_get_bo(state, vma->gem.obj, vma->va.addr,
+ dump, vma->gem.offset, vma->va.range);
+ }
drm_exec_fini(&exec);
} else {
@@ -304,7 +304,7 @@ static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submi
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
for (int i = 0; state->bos && i < submit->nr_bos; i++) {
- struct drm_gem_object *obj = submit->bos[i].obj;;
+ struct drm_gem_object *obj = submit->bos[i].obj;
bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP);
msm_gem_lock(obj);
@@ -348,6 +348,10 @@ static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_v
state->vm_logs = kmalloc_array(
state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL);
+ if (!state->vm_logs) {
+ state->nr_vm_logs = 0;
+ }
+
for (int i = 0; i < state->nr_vm_logs; i++) {
int idx = (i + first) & vm_log_mask;
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index b2a96544f92a..2894fc118485 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -16,6 +16,7 @@
#include "msm_drv.h"
#include "msm_fence.h"
+#include "msm_gpu_trace.h"
#include "msm_ringbuffer.h"
#include "msm_gem.h"
@@ -91,6 +92,7 @@ struct msm_gpu_funcs {
* for cmdstream that is buffered in this FIFO upstream of the CP fw.
*/
bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+ void (*sysprof_setup)(struct msm_gpu *gpu);
};
/* Additional state for iommu faults: */
@@ -297,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
}
+static inline bool
+adreno_smmu_has_prr(struct msm_gpu *gpu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+
+ if (!adreno_smmu)
+ return false;
+
+ return adreno_smmu && adreno_smmu->set_prr_addr;
+}
+
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
@@ -613,16 +626,19 @@ struct msm_gpu_state {
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
+ trace_msm_gpu_regaccess(reg);
writel(data, gpu->mmio + (reg << 2));
}
static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
{
+ trace_msm_gpu_regaccess(reg);
return readl(gpu->mmio + (reg << 2));
}
static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
{
+ trace_msm_gpu_regaccess(reg);
msm_rmw(gpu->mmio + (reg << 2), mask, or);
}
@@ -644,7 +660,9 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
* when the lo is read, so make sure to read the lo first to trigger
* that
*/
+ trace_msm_gpu_regaccess(reg);
val = (u64) readl(gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg+1);
val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32);
return val;
@@ -652,8 +670,10 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
{
+ trace_msm_gpu_regaccess(reg);
/* Why not a writeq here? Read the screed above */
writel(lower_32_bits(val), gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg+1);
writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
}
diff --git a/drivers/gpu/drm/msm/msm_gpu_trace.h b/drivers/gpu/drm/msm/msm_gpu_trace.h
index 781bbe5540bd..5417f8d389a3 100644
--- a/drivers/gpu/drm/msm/msm_gpu_trace.h
+++ b/drivers/gpu/drm/msm/msm_gpu_trace.h
@@ -219,6 +219,18 @@ TRACE_EVENT(msm_mmu_prealloc_cleanup,
TP_printk("count=%u, remaining=%u", __entry->count, __entry->remaining)
);
+TRACE_EVENT(msm_gpu_regaccess,
+ TP_PROTO(u32 offset),
+ TP_ARGS(offset),
+ TP_STRUCT__entry(
+ __field(u32, offset)
+ ),
+ TP_fast_assign(
+ __entry->offset = offset;
+ ),
+ TP_printk("offset=0x%x", __entry->offset)
+);
+
#endif
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 76cdd5ea06a0..a188617653e8 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
if (ret != p->count) {
+ kfree(p->pages);
+ p->pages = NULL;
p->count = ret;
return -ENOMEM;
}
@@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
struct kmem_cache *pt_cache = get_pt_cache(mmu);
uint32_t remaining_pt_count = p->count - p->ptr;
+ if (!p->pages)
+ return;
+
if (p->count > 0)
trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
@@ -721,7 +726,7 @@ struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
int ret;
if (!device_iommu_mapped(dev))
- return NULL;
+ return ERR_PTR(-ENODEV);
domain = iommu_paging_domain_alloc(dev);
if (IS_ERR(domain))
@@ -756,7 +761,7 @@ struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
struct msm_mmu *mmu;
mmu = msm_iommu_new(dev, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return mmu;
iommu = to_msm_iommu(mmu);
@@ -772,11 +777,11 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
struct msm_mmu *mmu;
mmu = msm_iommu_new(dev, quirks);
- if (IS_ERR_OR_NULL(mmu))
+ if (IS_ERR(mmu))
return mmu;
iommu = to_msm_iommu(mmu);
- if (adreno_smmu && adreno_smmu->cookie) {
+ if (adreno_smmu->cookie) {
const struct io_pgtable_cfg *cfg =
adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
size_t tblsz = get_tblsz(cfg);
diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c
index 56828d218e88..6e5e94f5c9a7 100644
--- a/drivers/gpu/drm/msm/msm_kms.c
+++ b/drivers/gpu/drm/msm/msm_kms.c
@@ -177,12 +177,11 @@ static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void
return -ENOSYS;
}
-struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
+struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev, struct device *mdss_dev)
{
struct drm_gpuvm *vm;
struct msm_mmu *mmu;
struct device *mdp_dev = dev->dev;
- struct device *mdss_dev = mdp_dev->parent;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
struct device *iommu_dev;
@@ -193,18 +192,17 @@ struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev)
*/
if (device_iommu_mapped(mdp_dev))
iommu_dev = mdp_dev;
- else
+ else if (mdss_dev && device_iommu_mapped(mdss_dev))
iommu_dev = mdss_dev;
+ else {
+ drm_info(dev, "no IOMMU, bailing out\n");
+ return ERR_PTR(-ENODEV);
+ }
mmu = msm_iommu_disp_new(iommu_dev, 0);
if (IS_ERR(mmu))
return ERR_CAST(mmu);
- if (!mmu) {
- drm_info(dev, "no IOMMU, fallback to phys contig buffers for scanout\n");
- return NULL;
- }
-
vm = msm_gem_vm_create(dev, mmu, "mdp_kms",
0x1000, 0x100000000 - 0x1000, true);
if (IS_ERR(vm)) {
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 39885b333910..bf9a33e925ac 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -154,8 +154,7 @@ static int _msm_mdss_irq_domain_add(struct msm_mdss *msm_mdss)
dev = msm_mdss->dev;
- domain = irq_domain_create_linear(of_fwnode_handle(dev->of_node), 32,
- &msm_mdss_irqdomain_ops, msm_mdss);
+ domain = irq_domain_create_linear(dev_fwnode(dev), 32, &msm_mdss_irqdomain_ops, msm_mdss);
if (!domain) {
dev_err(dev, "failed to add irq_domain\n");
return -EINVAL;
@@ -554,8 +553,10 @@ static const struct msm_mdss_data data_153k6 = {
static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,mdss", .data = &data_153k6 },
+ { .compatible = "qcom,glymur-mdss", .data = &data_57k },
{ .compatible = "qcom,msm8998-mdss", .data = &data_76k8 },
{ .compatible = "qcom,qcm2290-mdss", .data = &data_76k8 },
+ { .compatible = "qcom,qcs8300-mdss", .data = &data_74k },
{ .compatible = "qcom,sa8775p-mdss", .data = &data_74k },
{ .compatible = "qcom,sar2130p-mdss", .data = &data_74k },
{ .compatible = "qcom,sdm670-mdss", .data = &data_76k8 },
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 8617a82cd6b3..d53dfad16bde 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -40,6 +40,10 @@ int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sy
break;
}
+ /* Some gpu families require additional setup for sysprof */
+ if (gpu->funcs->sysprof_setup)
+ gpu->funcs->sysprof_setup(gpu);
+
ctx->sysprof = sysprof;
return 0;
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 86fab2750ba7..3941e7510754 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -7,9 +7,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<import file="adreno/adreno_pm4.xml"/>
<import file="adreno/a6xx_enums.xml"/>
<import file="adreno/a7xx_enums.xml"/>
+<import file="adreno/a8xx_enums.xml"/>
<import file="adreno/a6xx_perfcntrs.xml"/>
<import file="adreno/a7xx_perfcntrs.xml"/>
<import file="adreno/a6xx_descriptors.xml"/>
+<import file="adreno/a8xx_descriptors.xml"/>
<!--
Each register that is actually being used by driver should have "usage" defined,
@@ -84,29 +86,134 @@ by a particular renderpass/blit.
<bitfield name="CP_ILLEGAL_INSTR_ERROR_BV" pos="17" type="boolean" variants="A7XX-"/>
</bitset>
+ <bitset name="A8XX_CP_GLOBAL_INT_MASK" inline="no" varset="chip">
+ <bitfield name="HWFAULTBR" pos="0" type="boolean"/>
+ <bitfield name="HWFAULTBV" pos="1" type="boolean"/>
+ <bitfield name="HWFAULTLPAC" pos="2" type="boolean"/>
+ <bitfield name="HWFAULTAQE0" pos="3" type="boolean"/>
+ <bitfield name="HWFAULTAQE1" pos="4" type="boolean"/>
+ <bitfield name="HWFAULTDDEBR" pos="5" type="boolean"/>
+ <bitfield name="HWFAULTDDEBV" pos="6" type="boolean"/>
+ <bitfield name="SWFAULTBR" pos="16" type="boolean"/>
+ <bitfield name="SWFAULTBV" pos="17" type="boolean"/>
+ <bitfield name="SWFAULTLPAC" pos="18" type="boolean"/>
+ <bitfield name="SWFAULTAQE0" pos="19" type="boolean"/>
+ <bitfield name="SWFAULTAQE1" pos="20" type="boolean"/>
+ <bitfield name="SWFAULTDDEBR" pos="21" type="boolean"/>
+ <bitfield name="SWFAULTDDEBV" pos="22" type="boolean"/>
+ </bitset>
+
+ <bitset name="A8XX_CP_INTERRUPT_STATUS_MASK_PIPE" inline="no" varset="chip">
+ <bitfield name="CSFRBWRAP" pos="0" type="boolean"/>
+ <bitfield name="CSFIB1WRAP" pos="1" type="boolean"/>
+ <bitfield name="CSFIB2WRAP" pos="2" type="boolean"/>
+ <bitfield name="CSFIB3WRAP" pos="3" type="boolean"/>
+ <bitfield name="CSFSDSWRAP" pos="4" type="boolean"/>
+ <bitfield name="CSFMRBWRAP" pos="5" type="boolean"/>
+ <bitfield name="CSFVSDWRAP" pos="6" type="boolean"/>
+ <bitfield name="OPCODEERROR" pos="8" type="boolean"/>
+ <bitfield name="VSDPARITYERROR" pos="9" type="boolean"/>
+ <bitfield name="REGISTERPROTECTIONERROR" pos="10" type="boolean"/>
+ <bitfield name="ILLEGALINSTRUCTION" pos="11" type="boolean"/>
+ <bitfield name="SMMUFAULT" pos="12" type="boolean"/>
+ <bitfield name="VBIFRESPCLIENT" pos="13" type="boolean"/>
+ <bitfield name="VBIFRESPTYPE" pos="19" type="boolean"/>
+ <bitfield name="VBIFRESPREAD" pos="21" type="boolean"/>
+ <bitfield name="VBIFRESP" pos="22" type="boolean"/>
+ <bitfield name="RTWROVF" pos="23" type="boolean"/>
+ <bitfield name="LRZRTWROVF" pos="24" type="boolean"/>
+ <bitfield name="LRZRTREFCNTOVF" pos="25" type="boolean"/>
+ <bitfield name="LRZRTCLRRESMISS" pos="26" type="boolean"/>
+ </bitset>
+
+ <bitset name="A8XX_CP_HW_FAULT_STATUS_MASK_PIPE" inline="no" varset="chip">
+ <bitfield name="CSFRBFAULT" pos="0" type="boolean"/>
+ <bitfield name="CSFIB1FAULT" pos="1" type="boolean"/>
+ <bitfield name="CSFIB2FAULT" pos="2" type="boolean"/>
+ <bitfield name="CSFIB3FAULT" pos="3" type="boolean"/>
+ <bitfield name="CSFSDSFAULT" pos="4" type="boolean"/>
+ <bitfield name="CSFMRBFAULT" pos="5" type="boolean"/>
+ <bitfield name="CSFVSDFAULT" pos="6" type="boolean"/>
+ <bitfield name="SQEREADBURSTOVF" pos="8" type="boolean"/>
+ <bitfield name="EVENTENGINEOVF" pos="9" type="boolean"/>
+ <bitfield name="UCODEERROR" pos="10" type="boolean"/>
+ </bitset>
+
<reg64 offset="0x0800" name="CP_RB_BASE"/>
<reg32 offset="0x0802" name="CP_RB_CNTL"/>
+ <reg32 offset="0x0803" name="CP_RB_RPTR_WR" variants="A7XX-"/>
<reg64 offset="0x0804" name="CP_RB_RPTR_ADDR"/>
<reg32 offset="0x0806" name="CP_RB_RPTR"/>
<reg32 offset="0x0807" name="CP_RB_WPTR"/>
- <reg32 offset="0x0808" name="CP_SQE_CNTL"/>
- <reg32 offset="0x0812" name="CP_CP2GMU_STATUS">
+ <reg32 offset="0x0808" name="CP_RB_RPTR_ADDR_BV" variants="A8XX-"/>
+ <reg32 offset="0x080a" name="CP_RB_RPTR_BV" variants="A8XX-"/>
+ <reg64 offset="0x080b" name="CP_RB_BASE_LPAC" variants="A8XX-"/>
+ <reg32 offset="0x080d" name="CP_RB_CNTL_LPAC" variants="A8XX-"/>
+ <reg32 offset="0x080e" name="CP_RB_RPTR_WR_LPAC" variants="A8XX-"/>
+ <reg64 offset="0x080f" name="CP_RB_RPTR_ADDR_LPAC" variants="A8XX-"/>
+ <reg32 offset="0x0811" name="CP_RB_RPTR_LPAC" variants="A8XX-"/>
+ <reg32 offset="0x0812" name="CP_RB_WPTR_LPAC" variants="A8XX-"/>
+ <reg32 offset="0x0814" name="CP_SMMU_STREAM_ID_LPAC" variants="A8XX-"/>
+ <reg32 offset="0x0808" name="CP_SQE_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0815" name="CP_SQE_CNTL" variants="A8XX-"/>
+ <reg64 offset="0x0816" name="CP_SQE_INSTR_BASE" variants="A8XX-"/>
+ <reg64 offset="0x0818" name="CP_AQE_INSTR_BASE_0" variants="A8XX-"/>
+ <reg64 offset="0x081a" name="CP_AQE_INSTR_BASE_1" variants="A8XX-"/>
+ <reg32 offset="0x0812" name="CP_CP2GMU_STATUS" variants="A6XX-A7XX">
+ <!-- Note, layout defined by microcode -->
+ <bitfield name="IFPC" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x0822" name="CP_CP2GMU_STATUS" variants="A8XX-">
<bitfield name="IFPC" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x0821" name="CP_HW_FAULT"/>
- <reg32 offset="0x0823" name="CP_INTERRUPT_STATUS" type="A6XX_CP_INT"/>
- <reg32 offset="0x0824" name="CP_PROTECT_STATUS"/>
- <reg32 offset="0x0825" name="CP_STATUS_1"/>
- <reg64 offset="0x0830" name="CP_SQE_INSTR_BASE"/>
- <reg32 offset="0x0840" name="CP_MISC_CNTL"/>
- <reg32 offset="0x0844" name="CP_APRIV_CNTL">
+ <reg32 offset="0x0821" name="CP_HW_FAULT" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0823" name="CP_INTERRUPT_STATUS" type="A6XX_CP_INT" variants="A6XX-A7XX"/>
+
+ <bitset name="a6xx_cp_protect_status" inline="yes">
+ <bitfield name="ADDR" low="0" high="17"/>
+ <bitfield name="READ" pos="20" type="boolean"/>
+ <bitfield name="CP_HALTED" pos="21" type="boolean"/>
+ <bitfield name="ACCESS_VIOLATION" pos="22" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x0824" name="CP_PROTECT_STATUS" type="a6xx_cp_protect_status" variants="A6XX-A7XX"/>
+ <reg32 offset="0x084f" name="CP_PROTECT_STATUS_PIPE" type="a6xx_cp_protect_status" variants="A8XX-"/>
+ <reg32 offset="0x0825" name="CP_STATUS_1" variants="A6XX-A7XX"/>
+
+ <reg32 offset="0x0825" name="CP_SEMAPHORE_REG_0" variants="A8XX-"/>
+ <array offset="0x082a" name="CP_SCRATCH_GLOBAL" stride="1" length="4" variants="A8XX-">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+ <array offset="0x0830" name="CP_SCRATCH_PIPE" stride="1" length="5" variants="A8XX-">
+ <reg32 offset="0x0" name="REG"/>
+ </array>
+
+ <reg32 offset="0x0840" name="CP_RL_ERROR_DETAILS_0" variants="A8XX-"/>
+ <reg32 offset="0x0841" name="CP_RL_ERROR_DETAILS_1" variants="A8XX-"/>
+
+ <reg64 offset="0x0830" name="CP_SQE_INSTR_BASE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0840" name="CP_MISC_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x084c" name="CP_MISC_CNTL" variants="A8XX-"/>
+
+ <reg32 offset="0x08b0" name="CP_SQE_ICACHE_CNTL_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08b1" name="CP_SQE_DCACHE_CNTL_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08b3" name="CP_HW_FAULT_STATUS_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08b4" name="CP_HW_FAULT_STATUS_MASK_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08b5" name="CP_INTERRUPT_STATUS_GLOBAL" type="A8XX_CP_GLOBAL_INT_MASK" variants="A8XX-"/>
+ <reg32 offset="0x08b6" name="CP_INTERRUPT_STATUS_MASK_GLOBAL" type="A8XX_CP_GLOBAL_INT_MASK" variants="A8XX-"/>
+ <reg32 offset="0x08b7" name="CP_INTERRUPT_STATUS_PIPE" type="A8XX_CP_INTERRUPT_STATUS_MASK_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08b8" name="CP_INTERRUPT_STATUS_MASK_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08b9" name="CP_PIPE_STATUS_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08ba" name="CP_GPU_BATCH_ID_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08bb" name="CP_SQE_STATUS_PIPE" variants="A8XX-"/>
+
+ <bitset name="a6xx_cp_apriv_cntl" inline="yes">
<!-- Crashdumper writes -->
<bitfield pos="6" name="CDWRITE" type="boolean"/>
<!-- Crashdumper reads -->
<bitfield pos="5" name="CDREAD" type="boolean"/>
-
- <!-- 4 is unknown -->
-
+ <!-- CP Scratch reg copy to mem -->
+ <bitfield pos="4" name="SCRATCHWT" type="boolean"/>
<!-- RPTR shadow writes -->
<bitfield pos="3" name="RBRPWB" type="boolean"/>
<!-- Memory accesses from PM4 packets in the ringbuffer -->
@@ -115,11 +222,16 @@ by a particular renderpass/blit.
<bitfield pos="1" name="RBFETCH" type="boolean"/>
<!-- Instruction cache fetches -->
<bitfield pos="0" name="ICACHE" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x0844" name="CP_APRIV_CNTL" type="a6xx_cp_apriv_cntl" variants="A6XX-A7XX"/>
+ <reg32 offset="0x084d" name="CP_APRIV_CNTL_PIPE" type="a6xx_cp_apriv_cntl" variants="A8XX-"/>
+
<!-- Preemptions taking longer than this threshold increment PERF_CP_LONG_PREEMPTIONS: -->
- <reg32 offset="0x08C0" name="CP_PREEMPT_THRESHOLD"/>
+ <reg32 offset="0x08c0" name="CP_PREEMPT_THRESHOLD" variants="A6XX-A7XX"/>
+ <reg32 offset="0x08ec" name="CP_PREEMPT_THRESHOLD" variants="A8XX-"/>
<!-- all the threshold values seem to be in units of quad-dwords: -->
- <reg32 offset="0x08C1" name="CP_ROQ_THRESHOLDS_1">
+ <reg32 offset="0x08c1" name="CP_ROQ_THRESHOLDS_1" variants="A6XX">
<doc>
b0..7 identifies where MRB data starts (and RB data ends)
b8.15 identifies where VSD data starts (and MRB data ends)
@@ -131,7 +243,7 @@ by a particular renderpass/blit.
<bitfield name="IB1_START" low="16" high="23" shr="2"/>
<bitfield name="IB2_START" low="24" high="31" shr="2"/>
</reg32>
- <reg32 offset="0x08C2" name="CP_ROQ_THRESHOLDS_2">
+ <reg32 offset="0x08c2" name="CP_ROQ_THRESHOLDS_2" variants="A6XX">
<doc>
low bits identify where CP_SET_DRAW_STATE stateobj
processing starts (and IB2 data ends). I'm guessing
@@ -147,176 +259,293 @@ by a particular renderpass/blit.
<!-- total ROQ size: -->
<bitfield name="ROQ_SIZE" low="16" high="31" shr="2"/>
</reg32>
- <reg32 offset="0x08C3" name="CP_MEM_POOL_SIZE"/>
- <reg32 offset="0x0841" name="CP_CHICKEN_DBG"/>
- <reg32 offset="0x0842" name="CP_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg32 offset="0x0843" name="CP_DBG_ECO_CNTL"/>
- <reg32 offset="0x084F" name="CP_PROTECT_CNTL">
+ <reg32 offset="0x08C3" name="CP_MEM_POOL_SIZE" variants="A6XX"/>
+ <reg32 offset="0x0841" name="CP_CHICKEN_DBG" variants="A6XX-A7XX"/>
+ <reg32 offset="0x08b2" name="CP_CHICKEN_DBG_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x0842" name="CP_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/>
+ <reg32 offset="0x0843" name="CP_DBG_ECO_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x084b" name="CP_DBG_ECO_CNTL" variants="A8XX-"/>
+
+ <bitset name="a6xx_cp_protect_cntl" inline="yes">
<bitfield pos="3" name="LAST_SPAN_INF_RANGE" type="boolean"/>
<bitfield pos="1" name="ACCESS_FAULT_ON_VIOL_EN" type="boolean"/>
<bitfield pos="0" name="ACCESS_PROT_EN" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x084f" name="CP_PROTECT_CNTL" type="a6xx_cp_protect_cntl" variants="A6XX-A7XX"/>
+ <bitset name="a8xx_cp_protect_cntl" inline="yes">
+ <bitfield name="HALT_SQE_RANGE" low="16" high="31"/>
+ <bitfield name="LAST_SPAN_INF_RANGE" pos="3" type="boolean"/>
+ <bitfield name="ACCESS_FAULT_ON_VIOL_EN" pos="1" type="boolean"/>
+ <bitfield name="ACCESS_PROT_EN" pos="0" type="boolean"/>
+ </bitset>
- <array offset="0x0883" name="CP_SCRATCH" stride="1" length="8">
+ <reg32 offset="0x084e" name="CP_PROTECT_CNTL_PIPE" type="a8xx_cp_protect_cntl" variants="A8XX-"/>
+
+ <array offset="0x0883" name="CP_SCRATCH" stride="1" length="8" variants="A6XX-A7XX">
<reg32 offset="0x0" name="REG" type="uint"/>
</array>
- <array offset="0x0850" name="CP_PROTECT" stride="1" length="32">
+ <array offset="0x0850" name="CP_PROTECT" stride="1" length="32" variants="A6XX-A7XX">
+ <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/>
+ </array>
+ <array offset="0x0850" name="CP_PROTECT_GLOBAL" stride="1" length="64" variants="A8XX-">
+ <reg32 offset="0x0" name="REG" type="a6x_cp_protect"/>
+ </array>
+ <array offset="0x08a0" name="CP_PROTECT_PIPE" stride="1" length="16" variants="A8XX-">
<reg32 offset="0x0" name="REG" type="a6x_cp_protect"/>
</array>
- <reg32 offset="0x08A0" name="CP_CONTEXT_SWITCH_CNTL">
+ <bitset name="a6xx_cp_context_switch_cntl" inline="yes">
<bitfield name="STOP" pos="0" type="boolean"/>
<bitfield name="LEVEL" low="6" high="7"/>
<bitfield name="USES_GMEM" pos="8" type="boolean"/>
<bitfield name="SKIP_SAVE_RESTORE" pos="9" type="boolean"/>
- </reg32>
- <reg64 offset="0x08A1" name="CP_CONTEXT_SWITCH_SMMU_INFO"/>
- <reg64 offset="0x08A3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR"/>
- <reg64 offset="0x08A5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR"/>
- <reg64 offset="0x08A7" name="CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR"/>
- <reg32 offset="0x08ab" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A7XX-"/>
- <array offset="0x08D0" name="CP_PERFCTR_CP_SEL" stride="1" length="14"/>
- <array offset="0x08e0" name="CP_BV_PERFCTR_CP_SEL" stride="1" length="7" variants="A7XX-"/>
- <reg64 offset="0x0900" name="CP_CRASH_DUMP_SCRIPT_BASE"/>
- <reg32 offset="0x0902" name="CP_CRASH_DUMP_CNTL"/>
- <reg32 offset="0x0903" name="CP_CRASH_DUMP_STATUS"/>
- <reg32 offset="0x0908" name="CP_SQE_STAT_ADDR"/>
- <reg32 offset="0x0909" name="CP_SQE_STAT_DATA"/>
- <reg32 offset="0x090A" name="CP_DRAW_STATE_ADDR"/>
- <reg32 offset="0x090B" name="CP_DRAW_STATE_DATA"/>
- <reg32 offset="0x090C" name="CP_ROQ_DBG_ADDR"/>
- <reg32 offset="0x090D" name="CP_ROQ_DBG_DATA"/>
- <reg32 offset="0x090E" name="CP_MEM_POOL_DBG_ADDR"/>
- <reg32 offset="0x090F" name="CP_MEM_POOL_DBG_DATA"/>
- <reg32 offset="0x0910" name="CP_SQE_UCODE_DBG_ADDR"/>
- <reg32 offset="0x0911" name="CP_SQE_UCODE_DBG_DATA"/>
- <reg64 offset="0x0928" name="CP_IB1_BASE"/>
- <reg32 offset="0x092A" name="CP_IB1_REM_SIZE"/>
- <reg64 offset="0x092B" name="CP_IB2_BASE"/>
- <reg32 offset="0x092D" name="CP_IB2_REM_SIZE"/>
+ </bitset>
+
+ <reg32 offset="0x08a0" name="CP_CONTEXT_SWITCH_CNTL" type="a6xx_cp_context_switch_cntl" variants="A6XX-A7XX"/>
+ <reg32 offset="0x08c0" name="CP_CONTEXT_SWITCH_CNTL" type="a6xx_cp_context_switch_cntl" variants="A8XX-"/>
+
+ <reg64 offset="0x08a1" name="CP_CONTEXT_SWITCH_SMMU_INFO" variants="A6XX-A7XX"/>
+ <reg64 offset="0x08a3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR" variants="A6XX-A7XX"/>
+ <reg64 offset="0x08a5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR" variants="A6XX-A7XX"/>
+ <reg64 offset="0x08a7" name="CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR" variants="A6XX-A7XX"/>
+ <reg32 offset="0x08ab" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A7XX"/>
+
+ <reg64 offset="0x08c1" name="CP_CONTEXT_SWITCH_SMMU_INFO" variants="A8XX-"/>
+ <reg64 offset="0x08c3" name="CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR" variants="A8XX-"/>
+ <reg64 offset="0x08c5" name="CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR" variants="A8XX-"/>
+ <reg64 offset="0x08c7" name="CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR" variants="A8XX-"/>
+ <reg32 offset="0x08cb" name="CP_CONTEXT_SWITCH_LEVEL_STATUS" variants="A8XX-"/>
+
+ <array offset="0x08d0" name="CP_PERFCTR_CP_SEL" stride="1" length="14" variants="A6XX-A7XX"/>
+ <array offset="0x08d0" name="CP_PERFCTR_CP_SEL" stride="1" length="21" variants="A8XX-"/>
+ <array offset="0x08e0" name="CP_BV_PERFCTR_CP_SEL" stride="1" length="7" variants="A7XX"/>
+ <reg64 offset="0x0900" name="CP_CRASH_DUMP_SCRIPT_BASE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0902" name="CP_CRASH_DUMP_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0903" name="CP_CRASH_DUMP_STATUS" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0842" name="CP_CRASH_DUMP_SCRIPT_BASE" variants="A8XX-"/>
+ <reg32 offset="0x0844" name="CP_CRASH_DUMP_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x0845" name="CP_CRASH_DUMP_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0908" name="CP_SQE_STAT_ADDR" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0909" name="CP_SQE_STAT_DATA" variants="A6XX-A7XX"/>
+ <reg32 offset="0x090a" name="CP_DRAW_STATE_ADDR" variants="A6XX-A7XX"/>
+ <reg32 offset="0x090b" name="CP_DRAW_STATE_DATA" variants="A6XX-A7XX"/>
+ <reg32 offset="0x090c" name="CP_ROQ_DBG_ADDR" variants="A6XX-A7XX"/>
+ <reg32 offset="0x090d" name="CP_ROQ_DBG_DATA" variants="A6XX-A7XX"/>
+ <reg32 offset="0x090e" name="CP_MEM_POOL_DBG_ADDR" variants="A6XX-A7XX"/>
+ <reg32 offset="0x090f" name="CP_MEM_POOL_DBG_DATA" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0910" name="CP_SQE_UCODE_DBG_ADDR" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0911" name="CP_SQE_UCODE_DBG_DATA" variants="A6XX-A7XX"/>
+
+ <reg32 offset="0x08f0" name="CP_SQE_STAT_ADDR_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f1" name="CP_SQE_STAT_DATA_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f2" name="CP_DRAW_STATE_ADDR_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f3" name="CP_DRAW_STATE_DATA_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f4" name="CP_ROQ_DBG_ADDR_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f5" name="CP_ROQ_DBG_DATA_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f6" name="CP_MEM_POOL_DBG_ADDR_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f7" name="CP_MEM_POOL_DBG_DATA_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f8" name="CP_SQE_UCODE_DBG_ADDR_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08f9" name="CP_SQE_UCODE_DBG_DATA_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08fa" name="CP_RESOURCE_TABLE_DBG_ADDR_BV" variants="A8XX-"/>
+ <reg32 offset="0x08fb" name="CP_RESOURCE_TABLE_DBG_DATA_BV" variants="A8XX-"/>
+ <reg32 offset="0x08fc" name="CP_FIFO_DBG_ADDR_LPAC" variants="A8XX-"/>
+ <reg32 offset="0x08fd" name="CP_FIFO_DBG_DATA_LPAC" variants="A8XX-"/>
+ <reg32 offset="0x08fe" name="CP_FIFO_DBG_ADDR_DDE_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x08ff" name="CP_FIFO_DBG_DATA_DDE_PIPE" variants="A8XX-"/>
+
+ <reg32 offset="0x0b00" name="CP_SLICE_MEM_POOL_DBG_ADDR_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x0b01" name="CP_SLICE_MEM_POOL_DBG_DATA_PIPE" variants="A8XX-"/>
+ <reg32 offset="0x0b93" name="CP_SLICE_CHICKEN_DBG_PIPE" variants="A8XX-"/>
+
+ <reg64 offset="0x0928" name="CP_IB1_BASE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x092a" name="CP_IB1_REM_SIZE" variants="A6XX-A7XX"/>
+ <reg64 offset="0x092b" name="CP_IB2_BASE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x092d" name="CP_IB2_REM_SIZE" variants="A6XX-A7XX"/>
<!-- SDS == CP_SET_DRAW_STATE: -->
- <reg64 offset="0x092e" name="CP_SDS_BASE"/>
- <reg32 offset="0x0930" name="CP_SDS_REM_SIZE"/>
+ <reg64 offset="0x092e" name="CP_SDS_BASE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0930" name="CP_SDS_REM_SIZE" variants="A6XX-A7XX"/>
<!-- MRB == MEM_READ_ADDR/$addr in SQE firmware -->
- <reg64 offset="0x0931" name="CP_MRB_BASE"/>
- <reg32 offset="0x0933" name="CP_MRB_REM_SIZE"/>
+ <reg64 offset="0x0931" name="CP_MRB_BASE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0933" name="CP_MRB_REM_SIZE" variants="A6XX-A7XX"/>
<!--
VSD == Visibility Stream Decode
This is used by CP to read the draw stream and skip empty draws
-->
- <reg64 offset="0x0934" name="CP_VSD_BASE"/>
+ <reg64 offset="0x0934" name="CP_VSD_BASE" variants="A6XX-A7XX"/>
+
+ <reg64 offset="0x0900" name="CP_IB1_BASE" variants="A8XX-"/>
+ <reg32 offset="0x0902" name="CP_IB1_REM_SIZE" variants="A8XX-"/>
+ <reg32 offset="0x0903" name="CP_IB1_INIT_SIZE" variants="A8XX-"/>
+ <reg64 offset="0x0904" name="CP_IB2_BASE" variants="A8XX-"/>
+ <reg32 offset="0x0906" name="CP_IB2_REM_SIZE" variants="A8XX-"/>
+ <reg32 offset="0x0907" name="CP_IB2_INIT_SIZE" variants="A8XX-"/>
+ <reg64 offset="0x0908" name="CP_IB3_BASE" variants="A8XX-"/>
+ <reg32 offset="0x090a" name="CP_IB3_REM_SIZE" variants="A8XX-"/>
+ <reg32 offset="0x090b" name="CP_IB3_INIT_SIZE" variants="A8XX-"/>
+ <reg64 offset="0x090c" name="CP_SDS_BASE" variants="A8XX-"/>
+ <reg32 offset="0x090e" name="CP_SDS_REM_SIZE" variants="A8XX-"/>
+ <reg32 offset="0x090f" name="CP_SDS_INIT_SIZE" variants="A8XX-"/>
+ <reg64 offset="0x0910" name="CP_MRB_BASE" variants="A8XX-"/>
+ <reg32 offset="0x0912" name="CP_MRB_REM_SIZE" variants="A8XX-"/>
+ <reg32 offset="0x0913" name="CP_MRB_INIT_SIZE" variants="A8XX-"/>
+ <reg64 offset="0x0914" name="CP_VSD_BASE" variants="A8XX-"/>
+ <reg32 offset="0x0916" name="CP_VSD_REM_SIZE" variants="A8XX-"/>
+ <reg32 offset="0x0917" name="CP_VSD_INIT_SIZE" variants="A8XX-"/>
<bitset name="a6xx_roq_status" inline="yes">
<bitfield name="RPTR" low="0" high="9"/>
<bitfield name="WPTR" low="16" high="25"/>
</bitset>
- <reg32 offset="0x0939" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status"/>
- <reg32 offset="0x093a" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status"/>
- <reg32 offset="0x093b" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status"/>
- <reg32 offset="0x093c" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status"/>
- <reg32 offset="0x093d" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status"/>
- <reg32 offset="0x093e" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status"/>
-
- <reg32 offset="0x0943" name="CP_IB1_INIT_SIZE"/>
- <reg32 offset="0x0944" name="CP_IB2_INIT_SIZE"/>
- <reg32 offset="0x0945" name="CP_SDS_INIT_SIZE"/>
- <reg32 offset="0x0946" name="CP_MRB_INIT_SIZE"/>
- <reg32 offset="0x0947" name="CP_VSD_INIT_SIZE"/>
-
- <reg32 offset="0x0948" name="CP_ROQ_AVAIL_RB">
+ <reg32 offset="0x0939" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/>
+ <reg32 offset="0x093a" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/>
+ <reg32 offset="0x093b" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/>
+ <reg32 offset="0x093c" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/>
+ <reg32 offset="0x093d" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/>
+ <reg32 offset="0x093e" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status" variants="A6XX-A7XX"/>
+
+ <reg32 offset="0x0920" name="CP_ROQ_RB_STATUS" type="a6xx_roq_status" variants="A8XX-"/>
+ <reg32 offset="0x0921" name="CP_ROQ_IB1_STATUS" type="a6xx_roq_status" variants="A8XX-"/>
+ <reg32 offset="0x0922" name="CP_ROQ_IB2_STATUS" type="a6xx_roq_status" variants="A8XX-"/>
+ <reg32 offset="0x0923" name="CP_ROQ_IB3_STATUS" type="a6xx_roq_status" variants="A8XX-"/>
+ <reg32 offset="0x0924" name="CP_ROQ_SDS_STATUS" type="a6xx_roq_status" variants="A8XX-"/>
+ <reg32 offset="0x0925" name="CP_ROQ_MRB_STATUS" type="a6xx_roq_status" variants="A8XX-"/>
+ <reg32 offset="0x0926" name="CP_ROQ_VSD_STATUS" type="a6xx_roq_status" variants="A8XX-"/>
+
+ <reg32 offset="0x0943" name="CP_IB1_INIT_SIZE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0944" name="CP_IB2_INIT_SIZE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0945" name="CP_SDS_INIT_SIZE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0946" name="CP_MRB_INIT_SIZE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0947" name="CP_VSD_INIT_SIZE" variants="A6XX-A7XX"/>
+
+ <bitset name="a6xx_cp_roq_avail" inline="yes">
<doc>number of remaining dwords incl current dword being consumed?</doc>
<bitfield name="REM" low="16" high="31"/>
- </reg32>
- <reg32 offset="0x0949" name="CP_ROQ_AVAIL_IB1">
- <doc>number of remaining dwords incl current dword being consumed?</doc>
- <bitfield name="REM" low="16" high="31"/>
- </reg32>
- <reg32 offset="0x094a" name="CP_ROQ_AVAIL_IB2">
+ </bitset>
+
+ <reg32 offset="0x0948" name="CP_ROQ_AVAIL_RB" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0949" name="CP_ROQ_AVAIL_IB1" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/>
+ <reg32 offset="0x094a" name="CP_ROQ_AVAIL_IB2" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/>
+ <reg32 offset="0x094b" name="CP_ROQ_AVAIL_SDS" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/>
+ <reg32 offset="0x094c" name="CP_ROQ_AVAIL_MRB" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/>
+ <reg32 offset="0x094d" name="CP_ROQ_AVAIL_VSD" type="a6xx_cp_roq_avail" variants="A6XX-A7XX"/>
+
+ <reg32 offset="0x0918" name="CP_ROQ_AVAIL_RB" type="a6xx_cp_roq_avail" variants="A8XX-"/>
+ <reg32 offset="0x0919" name="CP_ROQ_AVAIL_IB1" type="a6xx_cp_roq_avail" variants="A8XX-"/>
+ <reg32 offset="0x091a" name="CP_ROQ_AVAIL_IB2" type="a6xx_cp_roq_avail" variants="A8XX-"/>
+ <reg32 offset="0x091b" name="CP_ROQ_AVAIL_IB3" type="a6xx_cp_roq_avail" variants="A8XX-"/>
+ <reg32 offset="0x091c" name="CP_ROQ_AVAIL_SDS" type="a6xx_cp_roq_avail" variants="A8XX-"/>
+ <reg32 offset="0x091d" name="CP_ROQ_AVAIL_MRB" type="a6xx_cp_roq_avail" variants="A8XX-"/>
+ <reg32 offset="0x091e" name="CP_ROQ_AVAIL_VSD" type="a6xx_cp_roq_avail" variants="A8XX-"/>
+
+ <bitset name="a7xx_aperture_cntl" inline="yes">
+ <bitfield name="PIPE" low="12" high="13" type="adreno_pipe"/>
+ <bitfield name="CLUSTER" low="8" high="10" type="a7xx_cluster"/>
+ <bitfield name="CONTEXT" low="4" high="5"/>
+ </bitset>
+ <reg64 offset="0x0980" name="CP_ALWAYS_ON_COUNTER" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0982" name="CP_ALWAYS_ON_CONTEXT" variants="A6XX-A7XX"/>
+ <reg64 offset="0x08e7" name="CP_ALWAYS_ON_COUNTER" variants="A8XX-"/>
+ <reg64 offset="0x08e9" name="CP_ALWAYS_ON_CONTEXT" variants="A8XX-"/>
+ <reg32 offset="0x098d" name="CP_AHB_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0838" name="CP_AHB_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" variants="A6XX"/>
+ <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" type="a7xx_aperture_cntl" variants="A7XX"/>
+ <reg32 offset="0x0A01" name="CP_APERTURE_CNTL_SQE" variants="A6XX"/>
+ <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" variants="A6XX"/>
+ <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" type="a7xx_aperture_cntl" variants="A7XX"/>
+
+ <array offset="0x0a9c" name="CP_RESERVED_REG" stride="1" length="4" variants="A7XX"/>
+ <array offset="0x0958" name="CP_RESERVED_REG" stride="1" length="4" variants="A8XX-"/>
+
+ <bitset name="a8xx_aperture_cntl" inline="yes">
+ <bitfield name="CONTEXTID3D" low="4" high="5"/>
+ <bitfield name="CLUSTERID" low="8" high="11"/>
+ <bitfield name="PIPEID" low="12" high="15"/>
+ <bitfield name="SLICEID" low="16" high="18"/>
+ <bitfield name="USESLICEID" pos="23" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x081c" name="CP_APERTURE_CNTL_HOST" type="a8xx_aperture_cntl" variants="A8XX-"/>
+ <reg32 offset="0x081d" name="CP_APERTURE_CNTL_GMU" type="a8xx_aperture_cntl" variants="A8XX-"/>
+ <reg32 offset="0x081e" name="CP_APERTURE_CNTL_CD" type="a8xx_aperture_cntl" variants="A8XX-"/>
+
+ <reg32 offset="0x0a61" name="CP_BV_PROTECT_STATUS" variants="A7XX"/>
+ <reg32 offset="0x0a64" name="CP_BV_HW_FAULT" variants="A7XX"/>
+ <reg32 offset="0x0a66" name="CP_BV_RB_RPTR" variants="A7XX"/>
+ <reg64 offset="0x0a6d" name="CP_BV_IB1_BASE" variants="A7XX"/>
+ <reg32 offset="0x0a70" name="CP_BV_IB1_REM_SIZE" variants="A7XX"/>
+ <reg64 offset="0x0a71" name="CP_BV_IB2_BASE" variants="A7XX"/>
+ <reg32 offset="0x0a74" name="CP_BV_IB2_REM_SIZE" variants="A7XX"/>
+ <reg32 offset="0x0a81" name="CP_BV_DRAW_STATE_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0a82" name="CP_BV_DRAW_STATE_DATA" variants="A7XX"/>
+ <reg32 offset="0x0a83" name="CP_BV_ROQ_DBG_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0a84" name="CP_BV_ROQ_DBG_DATA" variants="A7XX"/>
+ <reg32 offset="0x0a85" name="CP_BV_SQE_UCODE_DBG_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0a86" name="CP_BV_SQE_UCODE_DBG_DATA" variants="A7XX"/>
+ <reg32 offset="0x0a87" name="CP_BV_SQE_STAT_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0a88" name="CP_BV_SQE_STAT_DATA" variants="A7XX"/>
+
+ <reg32 offset="0x0a8f" name="CP_BV_ROQ_AVAIL_RB" variants="A7XX">
<doc>number of remaining dwords incl current dword being consumed?</doc>
<bitfield name="REM" low="16" high="31"/>
</reg32>
- <reg32 offset="0x094b" name="CP_ROQ_AVAIL_SDS">
+ <reg32 offset="0x0a90" name="CP_BV_ROQ_AVAIL_IB1" variants="A7XX">
<doc>number of remaining dwords incl current dword being consumed?</doc>
<bitfield name="REM" low="16" high="31"/>
</reg32>
- <reg32 offset="0x094c" name="CP_ROQ_AVAIL_MRB">
- <doc>number of dwords that have already been read but haven't been consumed by $addr</doc>
- <bitfield name="REM" low="16" high="31"/>
- </reg32>
- <reg32 offset="0x094d" name="CP_ROQ_AVAIL_VSD">
+ <reg32 offset="0x0a91" name="CP_BV_ROQ_AVAIL_IB2" variants="A7XX">
<doc>number of remaining dwords incl current dword being consumed?</doc>
<bitfield name="REM" low="16" high="31"/>
</reg32>
- <bitset name="a7xx_aperture_cntl" inline="yes">
- <bitfield name="PIPE" low="12" high="13" type="a7xx_pipe"/>
- <bitfield name="CLUSTER" low="8" high="10" type="a7xx_cluster"/>
- <bitfield name="CONTEXT" low="4" high="5"/>
- </bitset>
- <reg64 offset="0x0980" name="CP_ALWAYS_ON_COUNTER"/>
- <reg32 offset="0x098D" name="CP_AHB_CNTL"/>
- <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" variants="A6XX"/>
- <reg32 offset="0x0A00" name="CP_APERTURE_CNTL_HOST" type="a7xx_aperture_cntl" variants="A7XX-"/>
- <reg32 offset="0x0A01" name="CP_APERTURE_CNTL_SQE" variants="A6XX"/>
- <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" variants="A6XX"/>
- <reg32 offset="0x0A03" name="CP_APERTURE_CNTL_CD" type="a7xx_aperture_cntl" variants="A7XX-"/>
-
- <reg32 offset="0x0a61" name="CP_BV_PROTECT_STATUS" variants="A7XX-"/>
- <reg32 offset="0x0a64" name="CP_BV_HW_FAULT" variants="A7XX-"/>
- <reg32 offset="0x0a81" name="CP_BV_DRAW_STATE_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a82" name="CP_BV_DRAW_STATE_DATA" variants="A7XX-"/>
- <reg32 offset="0x0a83" name="CP_BV_ROQ_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a84" name="CP_BV_ROQ_DBG_DATA" variants="A7XX-"/>
- <reg32 offset="0x0a85" name="CP_BV_SQE_UCODE_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a86" name="CP_BV_SQE_UCODE_DBG_DATA" variants="A7XX-"/>
- <reg32 offset="0x0a87" name="CP_BV_SQE_STAT_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a88" name="CP_BV_SQE_STAT_DATA" variants="A7XX-"/>
- <reg32 offset="0x0a96" name="CP_BV_MEM_POOL_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a97" name="CP_BV_MEM_POOL_DBG_DATA" variants="A7XX-"/>
- <reg64 offset="0x0a98" name="CP_BV_RB_RPTR_ADDR" variants="A7XX-"/>
-
- <reg32 offset="0x0a9a" name="CP_RESOURCE_TABLE_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0a9b" name="CP_RESOURCE_TABLE_DBG_DATA" variants="A7XX-"/>
- <reg32 offset="0x0ad0" name="CP_BV_APRIV_CNTL" variants="A7XX-"/>
- <reg32 offset="0x0ada" name="CP_BV_CHICKEN_DBG" variants="A7XX-"/>
-
- <reg32 offset="0x0b0a" name="CP_LPAC_DRAW_STATE_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0b0b" name="CP_LPAC_DRAW_STATE_DATA" variants="A7XX-"/>
- <reg32 offset="0x0b0c" name="CP_LPAC_ROQ_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0b27" name="CP_SQE_AC_UCODE_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0b28" name="CP_SQE_AC_UCODE_DBG_DATA" variants="A7XX-"/>
- <reg32 offset="0x0b29" name="CP_SQE_AC_STAT_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0b2a" name="CP_SQE_AC_STAT_DATA" variants="A7XX-"/>
-
- <reg32 offset="0x0b31" name="CP_LPAC_APRIV_CNTL" variants="A7XX-"/>
- <reg32 offset="0x0B34" name="CP_LPAC_PROG_FIFO_SIZE"/>
- <reg32 offset="0x0b35" name="CP_LPAC_ROQ_DBG_DATA" variants="A7XX-"/>
- <reg32 offset="0x0b36" name="CP_LPAC_FIFO_DBG_DATA" variants="A7XX-"/>
- <reg32 offset="0x0b40" name="CP_LPAC_FIFO_DBG_ADDR" variants="A7XX-"/>
- <reg32 offset="0x0b81" name="CP_LPAC_SQE_CNTL"/>
- <reg64 offset="0x0b82" name="CP_LPAC_SQE_INSTR_BASE"/>
-
- <reg64 offset="0x0b70" name="CP_AQE_INSTR_BASE_0" variants="A7XX-"/>
- <reg64 offset="0x0b72" name="CP_AQE_INSTR_BASE_1" variants="A7XX-"/>
- <reg32 offset="0x0b78" name="CP_AQE_APRIV_CNTL" variants="A7XX-"/>
-
- <reg32 offset="0x0ba8" name="CP_AQE_ROQ_DBG_ADDR_0" variants="A7XX-"/>
- <reg32 offset="0x0ba9" name="CP_AQE_ROQ_DBG_ADDR_1" variants="A7XX-"/>
- <reg32 offset="0x0bac" name="CP_AQE_ROQ_DBG_DATA_0" variants="A7XX-"/>
- <reg32 offset="0x0bad" name="CP_AQE_ROQ_DBG_DATA_1" variants="A7XX-"/>
- <reg32 offset="0x0bb0" name="CP_AQE_UCODE_DBG_ADDR_0" variants="A7XX-"/>
- <reg32 offset="0x0bb1" name="CP_AQE_UCODE_DBG_ADDR_1" variants="A7XX-"/>
- <reg32 offset="0x0bb4" name="CP_AQE_UCODE_DBG_DATA_0" variants="A7XX-"/>
- <reg32 offset="0x0bb5" name="CP_AQE_UCODE_DBG_DATA_1" variants="A7XX-"/>
- <reg32 offset="0x0bb8" name="CP_AQE_STAT_ADDR_0" variants="A7XX-"/>
- <reg32 offset="0x0bb9" name="CP_AQE_STAT_ADDR_1" variants="A7XX-"/>
- <reg32 offset="0x0bbc" name="CP_AQE_STAT_DATA_0" variants="A7XX-"/>
- <reg32 offset="0x0bbd" name="CP_AQE_STAT_DATA_1" variants="A7XX-"/>
-
- <reg32 offset="0x0C01" name="VSC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg32 offset="0x0018" name="RBBM_GPR0_CNTL"/>
- <reg32 offset="0x0201" name="RBBM_INT_0_STATUS" type="A6XX_RBBM_INT_0_MASK"/>
- <reg32 offset="0x0210" name="RBBM_STATUS">
+ <reg32 offset="0x0a96" name="CP_BV_MEM_POOL_DBG_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0a97" name="CP_BV_MEM_POOL_DBG_DATA" variants="A7XX"/>
+ <reg64 offset="0x0a98" name="CP_BV_RB_RPTR_ADDR" variants="A7XX"/>
+
+ <reg32 offset="0x0a9a" name="CP_RESOURCE_TABLE_DBG_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0a9b" name="CP_RESOURCE_TABLE_DBG_DATA" variants="A7XX"/>
+ <reg32 offset="0x0ad0" name="CP_BV_APRIV_CNTL" variants="A7XX"/>
+ <reg32 offset="0x0ada" name="CP_BV_CHICKEN_DBG" variants="A7XX"/>
+
+ <reg32 offset="0x0b0a" name="CP_LPAC_DRAW_STATE_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0b0b" name="CP_LPAC_DRAW_STATE_DATA" variants="A7XX"/>
+ <reg32 offset="0x0b0c" name="CP_LPAC_ROQ_DBG_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0b27" name="CP_SQE_AC_UCODE_DBG_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0b28" name="CP_SQE_AC_UCODE_DBG_DATA" variants="A7XX"/>
+ <reg32 offset="0x0b29" name="CP_SQE_AC_STAT_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0b2a" name="CP_SQE_AC_STAT_DATA" variants="A7XX"/>
+
+ <reg32 offset="0x0b31" name="CP_LPAC_APRIV_CNTL" variants="A7XX"/>
+ <reg32 offset="0x0b34" name="CP_LPAC_PROG_FIFO_SIZE" variants="A7XX"/>
+ <reg32 offset="0x0b35" name="CP_LPAC_ROQ_DBG_DATA" variants="A7XX"/>
+ <reg32 offset="0x0b36" name="CP_LPAC_FIFO_DBG_DATA" variants="A7XX"/>
+ <reg32 offset="0x0b40" name="CP_LPAC_FIFO_DBG_ADDR" variants="A7XX"/>
+ <reg32 offset="0x0b81" name="CP_LPAC_SQE_CNTL" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0b82" name="CP_LPAC_SQE_INSTR_BASE" variants="A6XX-A7XX"/>
+
+ <reg64 offset="0x0b70" name="CP_AQE_INSTR_BASE_0" variants="A7XX"/>
+ <reg64 offset="0x0b72" name="CP_AQE_INSTR_BASE_1" variants="A7XX"/>
+ <reg32 offset="0x0b78" name="CP_AQE_APRIV_CNTL" variants="A7XX"/>
+
+ <reg32 offset="0x0ba8" name="CP_AQE_ROQ_DBG_ADDR_0" variants="A7XX"/>
+ <reg32 offset="0x0ba9" name="CP_AQE_ROQ_DBG_ADDR_1" variants="A7XX"/>
+ <reg32 offset="0x0bac" name="CP_AQE_ROQ_DBG_DATA_0" variants="A7XX"/>
+ <reg32 offset="0x0bad" name="CP_AQE_ROQ_DBG_DATA_1" variants="A7XX"/>
+ <reg32 offset="0x0bb0" name="CP_AQE_UCODE_DBG_ADDR_0" variants="A7XX"/>
+ <reg32 offset="0x0bb1" name="CP_AQE_UCODE_DBG_ADDR_1" variants="A7XX"/>
+ <reg32 offset="0x0bb4" name="CP_AQE_UCODE_DBG_DATA_0" variants="A7XX"/>
+ <reg32 offset="0x0bb5" name="CP_AQE_UCODE_DBG_DATA_1" variants="A7XX"/>
+ <reg32 offset="0x0bb8" name="CP_AQE_STAT_ADDR_0" variants="A7XX"/>
+ <reg32 offset="0x0bb9" name="CP_AQE_STAT_ADDR_1" variants="A7XX"/>
+ <reg32 offset="0x0bbc" name="CP_AQE_STAT_DATA_0" variants="A7XX"/>
+ <reg32 offset="0x0bbd" name="CP_AQE_STAT_DATA_1" variants="A7XX"/>
+
+ <reg32 offset="0x0C01" name="VSC_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/>
+ <reg32 offset="0x0018" name="RBBM_GPR0_CNTL" variants="A6XX"/>
+ <reg32 offset="0x0201" name="RBBM_INT_0_STATUS" type="A6XX_RBBM_INT_0_MASK" variants="A6XX-A7XX"/>
+ <reg32 offset="0x006a" name="RBBM_INT_0_STATUS" type="A6XX_RBBM_INT_0_MASK" variants="A8XX-"/>
+ <reg32 offset="0x0210" name="RBBM_STATUS" variants="A6XX-A7XX">
<bitfield pos="23" name="GPU_BUSY_IGN_AHB" type="boolean"/>
<bitfield pos="22" name="GPU_BUSY_IGN_AHB_CP" type="boolean"/>
<bitfield pos="21" name="HLSQ_BUSY" type="boolean"/>
@@ -342,22 +571,59 @@ by a particular renderpass/blit.
<bitfield pos="1" name="CP_AHB_BUSY_CP_MASTER" type="boolean"/>
<bitfield pos="0" name="CP_AHB_BUSY_CX_MASTER" type="boolean"/>
</reg32>
- <reg32 offset="0x0211" name="RBBM_STATUS1"/>
- <reg32 offset="0x0212" name="RBBM_STATUS2"/>
- <reg32 offset="0x0213" name="RBBM_STATUS3">
+ <reg32 offset="0x0211" name="RBBM_STATUS1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0212" name="RBBM_STATUS2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0213" name="RBBM_STATUS3" variants="A6XX-A7XX">
<bitfield pos="24" name="SMMU_STALLED_ON_FAULT" type="boolean"/>
</reg32>
- <reg32 offset="0x0215" name="RBBM_VBIF_GX_RESET_STATUS"/>
- <reg32 offset="0x0260" name="RBBM_CLOCK_MODE_CP" variants="A7XX-"/>
- <reg32 offset="0x0284" name="RBBM_CLOCK_MODE_BV_LRZ" variants="A7XX-"/>
- <reg32 offset="0x0285" name="RBBM_CLOCK_MODE_BV_GRAS" variants="A7XX-"/>
- <reg32 offset="0x0286" name="RBBM_CLOCK_MODE2_GRAS" variants="A7XX-"/>
- <reg32 offset="0x0287" name="RBBM_CLOCK_MODE_BV_VFD" variants="A7XX-"/>
- <reg32 offset="0x0288" name="RBBM_CLOCK_MODE_BV_GPC" variants="A7XX-"/>
+ <reg32 offset="0x012" name="RBBM_STATUS" variants="A8XX-">
+ <bitfield pos="23" name="GPU_BUSY_IGN_AHB" type="boolean"/>
+ <bitfield pos="22" name="GPU_BUSY_IGN_AHB_CP" type="boolean"/>
+ <bitfield pos="21" name="SLICE_BUSY_IGN_CP" type="boolean"/>
+ <bitfield pos="20" name="CP_SLICE_BUSY" type="boolean"/>
+ <bitfield pos="19" name="UNSLICE_BUSY_IGN_AHB" type="boolean"/>
+ <bitfield pos="18" name="UNSLICE_BUSY_IGN_AHB_CP" type="boolean"/>
+ <bitfield pos="17" name="CP_SLICE_RL_BUSY" type="boolean"/>
+ <bitfield pos="14" name="UNSLICE_TOP_BUSY" type="boolean"/>
+ <bitfield pos="13" name="UFC_BUSY" type="boolean"/>
+ <bitfield pos="12" name="HLSQ_BUSY" type="boolean"/>
+ <bitfield pos="11" name="VSC_BUSY" type="boolean"/>
+ <bitfield pos="10" name="UCHE_BUSY" type="boolean"/>
+ <bitfield pos="9" name="VPC_BUSY" type="boolean"/>
+ <bitfield pos="8" name="PC_BUSY" type="boolean"/>
+ <bitfield pos="7" name="CMP_BUSY" type="boolean"/>
+ <bitfield pos="6" name="DCMP_BUSY" type="boolean"/>
+ <bitfield pos="5" name="VBIF_GX_BUSY" type="boolean"/>
+ <bitfield pos="4" name="DBGC_PERF_BUSY" type="boolean"/>
+ <bitfield pos="3" name="GFX_DBGC_BUSY" type="boolean"/>
+ <bitfield pos="2" name="CP_BUSY" type="boolean"/>
+ <bitfield pos="1" name="CP_AHB_BUSY_CP_MASTER" type="boolean"/>
+ <bitfield pos="0" name="CP_AHB_BUSY_CX_MASTER" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x013" name="RBBM_STATUS1" variants="A8XX-"/>
+ <reg32 offset="0x015" name="RBBM_GFX_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x016" name="RBBM_GFX_STATUS1" variants="A8XX-"/>
+ <reg32 offset="0x018" name="RBBM_LPAC_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x01a" name="RBBM_GFX_BR_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x01c" name="RBBM_GFX_BV_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x01e" name="RBBM_MISC_STATUS" variants="A8XX-">
+ <bitfield pos="0" name="SMMU_STALLED_ON_FAULT" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0x0215" name="RBBM_VBIF_GX_RESET_STATUS" variants="A6XX"/>
+
+ <reg32 offset="0x0260" name="RBBM_CLOCK_MODE_CP" variants="A7XX"/>
+ <reg32 offset="0x0284" name="RBBM_CLOCK_MODE_BV_LRZ" variants="A7XX"/>
+ <reg32 offset="0x0285" name="RBBM_CLOCK_MODE_BV_GRAS" variants="A7XX"/>
+ <reg32 offset="0x0286" name="RBBM_CLOCK_MODE2_GRAS" variants="A7XX"/>
+ <reg32 offset="0x0287" name="RBBM_CLOCK_MODE_BV_VFD" variants="A7XX"/>
+ <reg32 offset="0x0288" name="RBBM_CLOCK_MODE_BV_GPC" variants="A7XX"/>
- <reg32 offset="0x02c0" name="RBBM_SW_FUSE_INT_STATUS" variants="A7XX-"/>
- <reg32 offset="0x02c1" name="RBBM_SW_FUSE_INT_MASK" variants="A7XX-"/>
+ <reg32 offset="0x02c0" name="RBBM_SW_FUSE_INT_STATUS" variants="A7XX"/>
+ <reg32 offset="0x02c1" name="RBBM_SW_FUSE_INT_MASK" variants="A7XX"/>
+ <reg32 offset="0x0071" name="RBBM_SW_FUSE_INT_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0072" name="RBBM_SW_FUSE_INT_MASK" variants="A8XX-"/>
<array offset="0x0400" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A6XX"/>
<array offset="0x041c" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A6XX"/>
@@ -376,49 +642,96 @@ by a particular renderpass/blit.
<array offset="0x04ea" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A6XX"/>
<array offset="0x04f2" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A6XX"/>
- <array offset="0x0300" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A7XX-"/>
- <array offset="0x031c" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A7XX-"/>
- <array offset="0x0324" name="RBBM_PERFCTR_PC" stride="2" length="8" variants="A7XX-"/>
- <array offset="0x0334" name="RBBM_PERFCTR_VFD" stride="2" length="8" variants="A7XX-"/>
- <array offset="0x0344" name="RBBM_PERFCTR_HLSQ" stride="2" length="6" variants="A7XX-"/>
- <array offset="0x0350" name="RBBM_PERFCTR_VPC" stride="2" length="6" variants="A7XX-"/>
- <array offset="0x035c" name="RBBM_PERFCTR_CCU" stride="2" length="5" variants="A7XX-"/>
- <array offset="0x0366" name="RBBM_PERFCTR_TSE" stride="2" length="4" variants="A7XX-"/>
- <array offset="0x036e" name="RBBM_PERFCTR_RAS" stride="2" length="4" variants="A7XX-"/>
- <array offset="0x0376" name="RBBM_PERFCTR_UCHE" stride="2" length="12" variants="A7XX-"/>
- <array offset="0x038e" name="RBBM_PERFCTR_TP" stride="2" length="12" variants="A7XX-"/>
- <array offset="0x03a6" name="RBBM_PERFCTR_SP" stride="2" length="24" variants="A7XX-"/>
- <array offset="0x03d6" name="RBBM_PERFCTR_RB" stride="2" length="8" variants="A7XX-"/>
- <array offset="0x03e6" name="RBBM_PERFCTR_VSC" stride="2" length="2" variants="A7XX-"/>
- <array offset="0x03ea" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A7XX-"/>
- <array offset="0x03f2" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A7XX-"/>
- <array offset="0x03fa" name="RBBM_PERFCTR_UFC" stride="2" length="4" variants="A7XX-"/>
- <array offset="0x0410" name="RBBM_PERFCTR2_HLSQ" stride="2" length="6" variants="A7XX-"/>
- <array offset="0x041c" name="RBBM_PERFCTR2_CP" stride="2" length="7" variants="A7XX-"/>
- <array offset="0x042a" name="RBBM_PERFCTR2_SP" stride="2" length="12" variants="A7XX-"/>
- <array offset="0x0442" name="RBBM_PERFCTR2_TP" stride="2" length="6" variants="A7XX-"/>
- <array offset="0x044e" name="RBBM_PERFCTR2_UFC" stride="2" length="2" variants="A7XX-"/>
- <array offset="0x0460" name="RBBM_PERFCTR_BV_PC" stride="2" length="8" variants="A7XX-"/>
- <array offset="0x0470" name="RBBM_PERFCTR_BV_VFD" stride="2" length="8" variants="A7XX-"/>
- <array offset="0x0480" name="RBBM_PERFCTR_BV_VPC" stride="2" length="6" variants="A7XX-"/>
- <array offset="0x048c" name="RBBM_PERFCTR_BV_TSE" stride="2" length="4" variants="A7XX-"/>
- <array offset="0x0494" name="RBBM_PERFCTR_BV_RAS" stride="2" length="4" variants="A7XX-"/>
- <array offset="0x049c" name="RBBM_PERFCTR_BV_LRZ" stride="2" length="4" variants="A7XX-"/>
-
- <reg32 offset="0x0500" name="RBBM_PERFCTR_CNTL"/>
- <reg32 offset="0x0501" name="RBBM_PERFCTR_LOAD_CMD0"/>
- <reg32 offset="0x0502" name="RBBM_PERFCTR_LOAD_CMD1"/>
- <reg32 offset="0x0503" name="RBBM_PERFCTR_LOAD_CMD2"/>
- <reg32 offset="0x0504" name="RBBM_PERFCTR_LOAD_CMD3"/>
- <reg32 offset="0x0505" name="RBBM_PERFCTR_LOAD_VALUE_LO"/>
- <reg32 offset="0x0506" name="RBBM_PERFCTR_LOAD_VALUE_HI"/>
- <array offset="0x0507" name="RBBM_PERFCTR_RBBM_SEL" stride="1" length="4"/>
- <reg32 offset="0x050B" name="RBBM_PERFCTR_GPU_BUSY_MASKED"/>
- <reg32 offset="0x050e" name="RBBM_PERFCTR_SRAM_INIT_CMD"/>
- <reg32 offset="0x050f" name="RBBM_PERFCTR_SRAM_INIT_STATUS"/>
- <reg32 offset="0x0533" name="RBBM_ISDB_CNT"/>
- <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL"/>
- <reg32 offset="0x0535" name="RBBM_SNAPSHOT_STATUS" variants="A7XX-"/>
+ <array offset="0x0300" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A7XX"/>
+ <array offset="0x031c" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A7XX"/>
+ <array offset="0x0324" name="RBBM_PERFCTR_PC" stride="2" length="8" variants="A7XX"/>
+ <array offset="0x0334" name="RBBM_PERFCTR_VFD" stride="2" length="8" variants="A7XX"/>
+ <array offset="0x0344" name="RBBM_PERFCTR_HLSQ" stride="2" length="6" variants="A7XX"/>
+ <array offset="0x0350" name="RBBM_PERFCTR_VPC" stride="2" length="6" variants="A7XX"/>
+ <array offset="0x035c" name="RBBM_PERFCTR_CCU" stride="2" length="5" variants="A7XX"/>
+ <array offset="0x0366" name="RBBM_PERFCTR_TSE" stride="2" length="4" variants="A7XX"/>
+ <array offset="0x036e" name="RBBM_PERFCTR_RAS" stride="2" length="4" variants="A7XX"/>
+ <array offset="0x0376" name="RBBM_PERFCTR_UCHE" stride="2" length="12" variants="A7XX"/>
+ <array offset="0x038e" name="RBBM_PERFCTR_TP" stride="2" length="12" variants="A7XX"/>
+ <array offset="0x03a6" name="RBBM_PERFCTR_SP" stride="2" length="24" variants="A7XX"/>
+ <array offset="0x03d6" name="RBBM_PERFCTR_RB" stride="2" length="8" variants="A7XX"/>
+ <array offset="0x03e6" name="RBBM_PERFCTR_VSC" stride="2" length="2" variants="A7XX"/>
+ <array offset="0x03ea" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A7XX"/>
+ <array offset="0x03f2" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A7XX"/>
+ <array offset="0x03fa" name="RBBM_PERFCTR_UFC" stride="2" length="4" variants="A7XX"/>
+ <array offset="0x0410" name="RBBM_PERFCTR2_HLSQ" stride="2" length="6" variants="A7XX"/>
+ <array offset="0x041c" name="RBBM_PERFCTR2_CP" stride="2" length="7" variants="A7XX"/>
+ <array offset="0x042a" name="RBBM_PERFCTR2_SP" stride="2" length="12" variants="A7XX"/>
+ <array offset="0x0442" name="RBBM_PERFCTR2_TP" stride="2" length="6" variants="A7XX"/>
+ <array offset="0x044e" name="RBBM_PERFCTR2_UFC" stride="2" length="2" variants="A7XX"/>
+ <array offset="0x0460" name="RBBM_PERFCTR_BV_PC" stride="2" length="8" variants="A7XX"/>
+ <array offset="0x0470" name="RBBM_PERFCTR_BV_VFD" stride="2" length="8" variants="A7XX"/>
+ <array offset="0x0480" name="RBBM_PERFCTR_BV_VPC" stride="2" length="6" variants="A7XX"/>
+ <array offset="0x048c" name="RBBM_PERFCTR_BV_TSE" stride="2" length="4" variants="A7XX"/>
+ <array offset="0x0494" name="RBBM_PERFCTR_BV_RAS" stride="2" length="4" variants="A7XX"/>
+ <array offset="0x049c" name="RBBM_PERFCTR_BV_LRZ" stride="2" length="4" variants="A7XX"/>
+
+ <array offset="0x01b0" name="RBBM_PERFCTR_CP" stride="2" length="14" variants="A8XX"/>
+ <array offset="0x01cc" name="RBBM_PERFCTR_RBBM" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x01d4" name="RBBM_PERFCTR_PC" stride="2" length="8" variants="A8XX"/>
+ <array offset="0x01e4" name="RBBM_PERFCTR_VFD" stride="2" length="8" variants="A8XX"/>
+ <array offset="0x01f4" name="RBBM_PERFCTR_HLSQ" stride="2" length="6" variants="A8XX"/>
+ <array offset="0x0200" name="RBBM_PERFCTR_VPC" stride="2" length="6" variants="A8XX"/>
+ <array offset="0x020c" name="RBBM_PERFCTR_CCU" stride="2" length="5" variants="A8XX"/>
+ <array offset="0x0216" name="RBBM_PERFCTR_TSE" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x021e" name="RBBM_PERFCTR_RAS" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x0226" name="RBBM_PERFCTR_UCHE" stride="2" length="24" variants="A8XX"/>
+ <array offset="0x0256" name="RBBM_PERFCTR_TP" stride="2" length="12" variants="A8XX"/>
+ <array offset="0x026e" name="RBBM_PERFCTR_SP" stride="2" length="24" variants="A8XX"/>
+ <array offset="0x029e" name="RBBM_PERFCTR_RB" stride="2" length="8" variants="A8XX"/>
+ <array offset="0x02ae" name="RBBM_PERFCTR_VSC" stride="2" length="2" variants="A8XX"/>
+ <array offset="0x02b2" name="RBBM_PERFCTR_LRZ" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x02ba" name="RBBM_PERFCTR_CMP" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x02c2" name="RBBM_PERFCTR_UFC" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x02e2" name="RBBM_PERFCTR2_HLSQ" stride="2" length="6" variants="A8XX"/>
+ <array offset="0x02ee" name="RBBM_PERFCTR2_CP" stride="2" length="7" variants="A8XX"/>
+ <array offset="0x02fc" name="RBBM_PERFCTR2_SP" stride="2" length="12" variants="A8XX"/>
+ <array offset="0x0314" name="RBBM_PERFCTR2_TP" stride="2" length="8" variants="A8XX"/>
+ <array offset="0x0324" name="RBBM_PERFCTR2_UFC" stride="2" length="2" variants="A8XX"/>
+ <array offset="0x0328" name="RBBM_PERFCTR_BV_PC" stride="2" length="8" variants="A8XX"/>
+ <array offset="0x0338" name="RBBM_PERFCTR_BV_VFD" stride="2" length="8" variants="A8XX"/>
+ <array offset="0x0348" name="RBBM_PERFCTR_BV_VPC" stride="2" length="6" variants="A8XX"/>
+ <array offset="0x0354" name="RBBM_PERFCTR_BV_TSE" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x035c" name="RBBM_PERFCTR_BV_RAS" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x0364" name="RBBM_PERFCTR_BV_LRZ" stride="2" length="4" variants="A8XX"/>
+ <array offset="0x036c" name="RBBM_PERFCTR_BV_CCU" stride="2" length="3" variants="A8XX"/>
+ <array offset="0x0372" name="RBBM_PERFCTR_BV_RB" stride="2" length="6" variants="A8XX"/>
+
+ <reg32 offset="0x0500" name="RBBM_PERFCTR_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0460" name="RBBM_PERFCTR_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x0501" name="RBBM_PERFCTR_LOAD_CMD0" variants="A6XX"/>
+ <reg32 offset="0x0502" name="RBBM_PERFCTR_LOAD_CMD1" variants="A6XX"/>
+ <reg32 offset="0x0503" name="RBBM_PERFCTR_LOAD_CMD2" variants="A6XX"/>
+ <reg32 offset="0x0504" name="RBBM_PERFCTR_LOAD_CMD3" variants="A6XX"/>
+ <reg64 offset="0x0505" name="RBBM_PERFCTR_LOAD_VALUE" variants="A6XX"/>
+ <array offset="0x0507" name="RBBM_PERFCTR_RBBM_SEL" stride="1" length="4" variants="A6XX-A7XX"/>
+ <array offset="0x0441" name="RBBM_PERFCTR_RBBM_SEL" stride="1" length="4" variants="A8XX-"/>
+ <reg32 offset="0x050B" name="RBBM_PERFCTR_GPU_BUSY_MASKED" variants="A6XX-A7XX"/>
+ <reg32 offset="0x019e" name="RBBM_PERFCTR_GPU_BUSY_MASKED" variants="A8XX-"/>
+ <reg32 offset="0x050e" name="RBBM_PERFCTR_SRAM_INIT_CMD" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0449" name="RBBM_PERFCTR_SRAM_INIT_CMD" variants="A8XX-"/>
+ <reg32 offset="0x050f" name="RBBM_PERFCTR_SRAM_INIT_STATUS" variants="A6XX-A7XX"/>
+ <reg32 offset="0x019f" name="RBBM_PERFCTR_SRAM_INIT_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x01a1" name="RBBM_PERFCTR_FLUSH_HOST_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x044c" name="RBBM_PERFCTR_FLUSH_HOST_CMD" variants="A8XX-"/>
+ <reg32 offset="0x0533" name="RBBM_ISDB_CNT" variants="A6XX-A7XX"/>
+ <reg32 offset="0x002d" name="RBBM_ISDB_CNT" variants="A8XX-"/>
+ <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0440" name="RBBM_NC_MODE_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x0535" name="RBBM_SNAPSHOT_STATUS" variants="A7XX"/>
+ <reg32 offset="0x002e" name="RBBM_SNAPSHOT_STATUS" variants="A8XX-"/>
+
+ <reg32 offset="0x500" name="RBBM_SLICE_PERFCTR_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x58f" name="RBBM_SLICE_INTERFACE_HANG_INT_CNTL" variants="A8XX-"/>
+ <array offset="0x5e0" name="RBBM_SLICE_PERFCTR_RBBM_SEL" stride="1" length="4" variants="A8XX-"/>
+ <reg32 offset="0x5e8" name="RBBM_SLICE_PERFCTR_SRAM_INIT_CMD" variants="A8XX-"/>
+ <reg32 offset="0x5eb" name="RBBM_SLICE_PERFCTR_FLUSH_HOST_CMD" variants="A8XX-"/>
+ <reg32 offset="0x5ec" name="RBBM_SLICE_NC_MODE_CNTL" variants="A8XX-"/>
<!---
This block of registers aren't tied to perf counters. They
@@ -426,170 +739,211 @@ by a particular renderpass/blit.
vertices in, number of primnitives assembled etc.
-->
- <reg64 offset="0x0540" name="RBBM_PIPESTAT_IAVERTICES"/>
- <reg64 offset="0x0542" name="RBBM_PIPESTAT_IAPRIMITIVES"/>
- <reg64 offset="0x0544" name="RBBM_PIPESTAT_VSINVOCATIONS"/>
- <reg64 offset="0x0546" name="RBBM_PIPESTAT_HSINVOCATIONS"/>
- <reg64 offset="0x0548" name="RBBM_PIPESTAT_DSINVOCATIONS"/>
- <reg64 offset="0x054a" name="RBBM_PIPESTAT_GSINVOCATIONS"/>
- <reg64 offset="0x054c" name="RBBM_PIPESTAT_GSPRIMITIVES"/>
- <reg64 offset="0x054e" name="RBBM_PIPESTAT_CINVOCATIONS"/>
- <reg64 offset="0x0550" name="RBBM_PIPESTAT_CPRIMITIVES"/>
- <reg64 offset="0x0552" name="RBBM_PIPESTAT_PSINVOCATIONS"/>
- <reg64 offset="0x0554" name="RBBM_PIPESTAT_CSINVOCATIONS"/>
+ <reg64 offset="0x0540" name="RBBM_PIPESTAT_IAVERTICES" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0542" name="RBBM_PIPESTAT_IAPRIMITIVES" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0544" name="RBBM_PIPESTAT_VSINVOCATIONS" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0546" name="RBBM_PIPESTAT_HSINVOCATIONS" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0548" name="RBBM_PIPESTAT_DSINVOCATIONS" variants="A6XX-A7XX"/>
+ <reg64 offset="0x054a" name="RBBM_PIPESTAT_GSINVOCATIONS" variants="A6XX-A7XX"/>
+ <reg64 offset="0x054c" name="RBBM_PIPESTAT_GSPRIMITIVES" variants="A6XX-A7XX"/>
+ <reg64 offset="0x054e" name="RBBM_PIPESTAT_CINVOCATIONS" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0550" name="RBBM_PIPESTAT_CPRIMITIVES" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0552" name="RBBM_PIPESTAT_PSINVOCATIONS" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0554" name="RBBM_PIPESTAT_CSINVOCATIONS" variants="A6XX-A7XX"/>
+
+ <reg64 offset="0x0380" name="RBBM_PIPESTAT_IAVERTICES" variants="A8XX-"/>
+ <reg64 offset="0x0382" name="RBBM_PIPESTAT_IAPRIMITIVES" variants="A8XX-"/>
+ <reg64 offset="0x0384" name="RBBM_PIPESTAT_VSINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x0386" name="RBBM_PIPESTAT_GSINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x0388" name="RBBM_PIPESTAT_GSPRIMITIVES" variants="A8XX-"/>
+ <reg64 offset="0x038a" name="RBBM_PIPESTAT_CINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x038c" name="RBBM_PIPESTAT_CPRIMITIVES" variants="A8XX-"/>
+ <reg64 offset="0x038e" name="RBBM_PIPESTAT_PSINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x0390" name="RBBM_PIPESTAT_HSINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x0392" name="RBBM_PIPESTAT_DSINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x0394" name="RBBM_PIPESTAT_CSINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x0396" name="RBBM_PIPESTAT_ASINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x0398" name="RBBM_PIPESTAT_MSINVOCATIONS" variants="A8XX-"/>
+ <reg64 offset="0x039a" name="RBBM_PIPESTAT_MSPRIMITIVES" variants="A8XX-"/>
<reg32 offset="0xF400" name="RBBM_SECVID_TRUST_CNTL"/>
<reg64 offset="0xF800" name="RBBM_SECVID_TSB_TRUSTED_BASE"/>
<reg32 offset="0xF802" name="RBBM_SECVID_TSB_TRUSTED_SIZE"/>
<reg32 offset="0xF803" name="RBBM_SECVID_TSB_CNTL"/>
- <reg32 offset="0xF810" name="RBBM_SECVID_TSB_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0xF810" name="RBBM_SECVID_TSB_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/>
<reg64 offset="0xfc00" name="RBBM_SECVID_TSB_STATUS" variants="A7XX-"/>
- <reg32 offset="0x00010" name="RBBM_VBIF_CLIENT_QOS_CNTL"/>
- <reg32 offset="0x00011" name="RBBM_GBIF_CLIENT_QOS_CNTL"/>
- <reg32 offset="0x00016" name="RBBM_GBIF_HALT"/>
- <reg32 offset="0x00017" name="RBBM_GBIF_HALT_ACK"/>
- <reg32 offset="0x0001c" name="RBBM_WAIT_FOR_GPU_IDLE_CMD">
+ <reg32 offset="0x00010" name="RBBM_VBIF_CLIENT_QOS_CNTL" variants="A6XX"/>
+ <reg32 offset="0x00011" name="RBBM_GBIF_CLIENT_QOS_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00008" name="RBBM_GBIF_CLIENT_QOS_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x00016" name="RBBM_GBIF_HALT" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0000a" name="RBBM_GBIF_HALT" variants="A8XX-"/>
+ <reg32 offset="0x00017" name="RBBM_GBIF_HALT_ACK" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0000b" name="RBBM_GBIF_HALT_ACK" variants="A8XX-"/>
+ <reg32 offset="0x0001c" name="RBBM_WAIT_FOR_GPU_IDLE_CMD" variants="A6XX">
<bitfield pos="0" name="WAIT_GPU_IDLE" type="boolean"/>
</reg32>
- <reg32 offset="0x00016" name="RBBM_GBIF_HALT" variants="A7XX-"/>
- <reg32 offset="0x00017" name="RBBM_GBIF_HALT_ACK" variants="A7XX-"/>
- <reg32 offset="0x0001f" name="RBBM_INTERFACE_HANG_INT_CNTL"/>
- <reg32 offset="0x00037" name="RBBM_INT_CLEAR_CMD" type="A6XX_RBBM_INT_0_MASK"/>
- <reg32 offset="0x00038" name="RBBM_INT_0_MASK" type="A6XX_RBBM_INT_0_MASK"/>
- <reg32 offset="0x0003a" name="RBBM_INT_2_MASK" variants="A7XX-"/>
- <reg32 offset="0x00042" name="RBBM_SP_HYST_CNT"/>
- <reg32 offset="0x00043" name="RBBM_SW_RESET_CMD"/>
- <reg32 offset="0x00044" name="RBBM_RAC_THRESHOLD_CNT"/>
- <reg32 offset="0x00045" name="RBBM_BLOCK_SW_RESET_CMD"/>
- <reg32 offset="0x00046" name="RBBM_BLOCK_SW_RESET_CMD2"/>
- <reg32 offset="0x000ad" name="RBBM_CLOCK_CNTL_GLOBAL" variants="A7XX-"/>
- <reg32 offset="0x000ae" name="RBBM_CLOCK_CNTL"/>
- <reg32 offset="0x000b0" name="RBBM_CLOCK_CNTL_SP0"/>
- <reg32 offset="0x000b1" name="RBBM_CLOCK_CNTL_SP1"/>
- <reg32 offset="0x000b2" name="RBBM_CLOCK_CNTL_SP2"/>
- <reg32 offset="0x000b3" name="RBBM_CLOCK_CNTL_SP3"/>
- <reg32 offset="0x000b4" name="RBBM_CLOCK_CNTL2_SP0"/>
- <reg32 offset="0x000b5" name="RBBM_CLOCK_CNTL2_SP1"/>
- <reg32 offset="0x000b6" name="RBBM_CLOCK_CNTL2_SP2"/>
- <reg32 offset="0x000b7" name="RBBM_CLOCK_CNTL2_SP3"/>
- <reg32 offset="0x000b8" name="RBBM_CLOCK_DELAY_SP0"/>
- <reg32 offset="0x000b9" name="RBBM_CLOCK_DELAY_SP1"/>
- <reg32 offset="0x000ba" name="RBBM_CLOCK_DELAY_SP2"/>
- <reg32 offset="0x000bb" name="RBBM_CLOCK_DELAY_SP3"/>
- <reg32 offset="0x000bc" name="RBBM_CLOCK_HYST_SP0"/>
- <reg32 offset="0x000bd" name="RBBM_CLOCK_HYST_SP1"/>
- <reg32 offset="0x000be" name="RBBM_CLOCK_HYST_SP2"/>
- <reg32 offset="0x000bf" name="RBBM_CLOCK_HYST_SP3"/>
- <reg32 offset="0x000c0" name="RBBM_CLOCK_CNTL_TP0"/>
- <reg32 offset="0x000c1" name="RBBM_CLOCK_CNTL_TP1"/>
- <reg32 offset="0x000c2" name="RBBM_CLOCK_CNTL_TP2"/>
- <reg32 offset="0x000c3" name="RBBM_CLOCK_CNTL_TP3"/>
- <reg32 offset="0x000c4" name="RBBM_CLOCK_CNTL2_TP0"/>
- <reg32 offset="0x000c5" name="RBBM_CLOCK_CNTL2_TP1"/>
- <reg32 offset="0x000c6" name="RBBM_CLOCK_CNTL2_TP2"/>
- <reg32 offset="0x000c7" name="RBBM_CLOCK_CNTL2_TP3"/>
- <reg32 offset="0x000c8" name="RBBM_CLOCK_CNTL3_TP0"/>
- <reg32 offset="0x000c9" name="RBBM_CLOCK_CNTL3_TP1"/>
- <reg32 offset="0x000ca" name="RBBM_CLOCK_CNTL3_TP2"/>
- <reg32 offset="0x000cb" name="RBBM_CLOCK_CNTL3_TP3"/>
- <reg32 offset="0x000cc" name="RBBM_CLOCK_CNTL4_TP0"/>
- <reg32 offset="0x000cd" name="RBBM_CLOCK_CNTL4_TP1"/>
- <reg32 offset="0x000ce" name="RBBM_CLOCK_CNTL4_TP2"/>
- <reg32 offset="0x000cf" name="RBBM_CLOCK_CNTL4_TP3"/>
- <reg32 offset="0x000d0" name="RBBM_CLOCK_DELAY_TP0"/>
- <reg32 offset="0x000d1" name="RBBM_CLOCK_DELAY_TP1"/>
- <reg32 offset="0x000d2" name="RBBM_CLOCK_DELAY_TP2"/>
- <reg32 offset="0x000d3" name="RBBM_CLOCK_DELAY_TP3"/>
- <reg32 offset="0x000d4" name="RBBM_CLOCK_DELAY2_TP0"/>
- <reg32 offset="0x000d5" name="RBBM_CLOCK_DELAY2_TP1"/>
- <reg32 offset="0x000d6" name="RBBM_CLOCK_DELAY2_TP2"/>
- <reg32 offset="0x000d7" name="RBBM_CLOCK_DELAY2_TP3"/>
- <reg32 offset="0x000d8" name="RBBM_CLOCK_DELAY3_TP0"/>
- <reg32 offset="0x000d9" name="RBBM_CLOCK_DELAY3_TP1"/>
- <reg32 offset="0x000da" name="RBBM_CLOCK_DELAY3_TP2"/>
- <reg32 offset="0x000db" name="RBBM_CLOCK_DELAY3_TP3"/>
- <reg32 offset="0x000dc" name="RBBM_CLOCK_DELAY4_TP0"/>
- <reg32 offset="0x000dd" name="RBBM_CLOCK_DELAY4_TP1"/>
- <reg32 offset="0x000de" name="RBBM_CLOCK_DELAY4_TP2"/>
- <reg32 offset="0x000df" name="RBBM_CLOCK_DELAY4_TP3"/>
- <reg32 offset="0x000e0" name="RBBM_CLOCK_HYST_TP0"/>
- <reg32 offset="0x000e1" name="RBBM_CLOCK_HYST_TP1"/>
- <reg32 offset="0x000e2" name="RBBM_CLOCK_HYST_TP2"/>
- <reg32 offset="0x000e3" name="RBBM_CLOCK_HYST_TP3"/>
- <reg32 offset="0x000e4" name="RBBM_CLOCK_HYST2_TP0"/>
- <reg32 offset="0x000e5" name="RBBM_CLOCK_HYST2_TP1"/>
- <reg32 offset="0x000e6" name="RBBM_CLOCK_HYST2_TP2"/>
- <reg32 offset="0x000e7" name="RBBM_CLOCK_HYST2_TP3"/>
- <reg32 offset="0x000e8" name="RBBM_CLOCK_HYST3_TP0"/>
- <reg32 offset="0x000e9" name="RBBM_CLOCK_HYST3_TP1"/>
- <reg32 offset="0x000ea" name="RBBM_CLOCK_HYST3_TP2"/>
- <reg32 offset="0x000eb" name="RBBM_CLOCK_HYST3_TP3"/>
- <reg32 offset="0x000ec" name="RBBM_CLOCK_HYST4_TP0"/>
- <reg32 offset="0x000ed" name="RBBM_CLOCK_HYST4_TP1"/>
- <reg32 offset="0x000ee" name="RBBM_CLOCK_HYST4_TP2"/>
- <reg32 offset="0x000ef" name="RBBM_CLOCK_HYST4_TP3"/>
- <reg32 offset="0x000f0" name="RBBM_CLOCK_CNTL_RB0"/>
- <reg32 offset="0x000f1" name="RBBM_CLOCK_CNTL_RB1"/>
- <reg32 offset="0x000f2" name="RBBM_CLOCK_CNTL_RB2"/>
- <reg32 offset="0x000f3" name="RBBM_CLOCK_CNTL_RB3"/>
- <reg32 offset="0x000f4" name="RBBM_CLOCK_CNTL2_RB0"/>
- <reg32 offset="0x000f5" name="RBBM_CLOCK_CNTL2_RB1"/>
- <reg32 offset="0x000f6" name="RBBM_CLOCK_CNTL2_RB2"/>
- <reg32 offset="0x000f7" name="RBBM_CLOCK_CNTL2_RB3"/>
- <reg32 offset="0x000f8" name="RBBM_CLOCK_CNTL_CCU0"/>
- <reg32 offset="0x000f9" name="RBBM_CLOCK_CNTL_CCU1"/>
- <reg32 offset="0x000fa" name="RBBM_CLOCK_CNTL_CCU2"/>
- <reg32 offset="0x000fb" name="RBBM_CLOCK_CNTL_CCU3"/>
- <reg32 offset="0x00100" name="RBBM_CLOCK_HYST_RB_CCU0"/>
- <reg32 offset="0x00101" name="RBBM_CLOCK_HYST_RB_CCU1"/>
- <reg32 offset="0x00102" name="RBBM_CLOCK_HYST_RB_CCU2"/>
- <reg32 offset="0x00103" name="RBBM_CLOCK_HYST_RB_CCU3"/>
- <reg32 offset="0x00104" name="RBBM_CLOCK_CNTL_RAC"/>
- <reg32 offset="0x00105" name="RBBM_CLOCK_CNTL2_RAC"/>
- <reg32 offset="0x00106" name="RBBM_CLOCK_DELAY_RAC"/>
- <reg32 offset="0x00107" name="RBBM_CLOCK_HYST_RAC"/>
- <reg32 offset="0x00108" name="RBBM_CLOCK_CNTL_TSE_RAS_RBBM"/>
- <reg32 offset="0x00109" name="RBBM_CLOCK_DELAY_TSE_RAS_RBBM"/>
- <reg32 offset="0x0010a" name="RBBM_CLOCK_HYST_TSE_RAS_RBBM"/>
- <reg32 offset="0x0010b" name="RBBM_CLOCK_CNTL_UCHE"/>
- <reg32 offset="0x0010c" name="RBBM_CLOCK_CNTL2_UCHE"/>
- <reg32 offset="0x0010d" name="RBBM_CLOCK_CNTL3_UCHE"/>
- <reg32 offset="0x0010e" name="RBBM_CLOCK_CNTL4_UCHE"/>
- <reg32 offset="0x0010f" name="RBBM_CLOCK_DELAY_UCHE"/>
- <reg32 offset="0x00110" name="RBBM_CLOCK_HYST_UCHE"/>
- <reg32 offset="0x00111" name="RBBM_CLOCK_MODE_VFD"/>
- <reg32 offset="0x00112" name="RBBM_CLOCK_DELAY_VFD"/>
- <reg32 offset="0x00113" name="RBBM_CLOCK_HYST_VFD"/>
- <reg32 offset="0x00114" name="RBBM_CLOCK_MODE_GPC"/>
- <reg32 offset="0x00115" name="RBBM_CLOCK_DELAY_GPC"/>
- <reg32 offset="0x00116" name="RBBM_CLOCK_HYST_GPC"/>
- <reg32 offset="0x00117" name="RBBM_CLOCK_DELAY_HLSQ_2"/>
- <reg32 offset="0x00118" name="RBBM_CLOCK_CNTL_GMU_GX"/>
- <reg32 offset="0x00119" name="RBBM_CLOCK_DELAY_GMU_GX"/>
- <reg32 offset="0x0011a" name="RBBM_CLOCK_HYST_GMU_GX"/>
- <reg32 offset="0x0011b" name="RBBM_CLOCK_MODE_HLSQ"/>
- <reg32 offset="0x0011c" name="RBBM_CLOCK_DELAY_HLSQ"/>
- <reg32 offset="0x0011d" name="RBBM_CLOCK_HYST_HLSQ"/>
- <reg32 offset="0x0011e" name="RBBM_CGC_GLOBAL_LOAD_CMD" variants="A7XX-"/>
- <reg32 offset="0x0011f" name="RBBM_CGC_P2S_TRIG_CMD" variants="A7XX-"/>
- <reg32 offset="0x00120" name="RBBM_CLOCK_CNTL_TEX_FCHE"/>
- <reg32 offset="0x00121" name="RBBM_CLOCK_DELAY_TEX_FCHE"/>
+ <reg32 offset="0x01a" name="RBBM_WAIT_IDLE_CLOCKS_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x01b" name="RBBM_WAIT_IDLE_CLOCKS_CNTL2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x010" name="RBBM_WAIT_IDLE_CLOCKS_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x011" name="RBBM_WAIT_IDLE_CLOCKS_CNTL2" variants="A8XX-"/>
+
+ <reg32 offset="0x0001f" name="RBBM_INTERFACE_HANG_INT_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0002f" name="RBBM_INTERFACE_HANG_INT_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x00037" name="RBBM_INT_CLEAR_CMD" type="A6XX_RBBM_INT_0_MASK" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00061" name="RBBM_INT_CLEAR_CMD" type="A6XX_RBBM_INT_0_MASK" variants="A8XX-"/>
+ <reg32 offset="0x00038" name="RBBM_INT_0_MASK" type="A6XX_RBBM_INT_0_MASK" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00062" name="RBBM_INT_0_MASK" type="A6XX_RBBM_INT_0_MASK" variants="A8XX-"/>
+ <reg32 offset="0x0003a" name="RBBM_INT_2_MASK" variants="A7XX"/>
+ <reg32 offset="0x00064" name="RBBM_INT_2_MASK" variants="A8XX-"/>
+ <reg32 offset="0x00042" name="RBBM_SP_HYST_CNT" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00043" name="RBBM_SW_RESET_CMD" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00073" name="RBBM_SW_RESET_CMD" variants="A8XX-"/>
+ <reg32 offset="0x00044" name="RBBM_RAC_THRESHOLD_CNT" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00029" name="RBBM_RAC_THRESHOLD_CNT" variants="A8XX-"/>
+ <reg32 offset="0x00045" name="RBBM_BLOCK_SW_RESET_CMD" variants="A6XX"/>
+ <reg32 offset="0x00046" name="RBBM_BLOCK_SW_RESET_CMD2" variants="A6XX"/>
+ <reg32 offset="0x000ad" name="RBBM_CLOCK_CNTL_GLOBAL" variants="A7XX"/>
+ <reg32 offset="0x0009a" name="RBBM_CLOCK_CNTL_GLOBAL" variants="A8XX-"/>
+ <reg32 offset="0x07d" name="RBBM_POWER_UP_RESET_SW_OVERRIDE" variants="A8XX-"/>
+ <reg32 offset="0x07e" name="RBBM_POWER_UP_RESET_SW_BV_OVERRIDE" variants="A8XX-"/>
+ <reg32 offset="0x000ae" name="RBBM_CLOCK_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b0" name="RBBM_CLOCK_CNTL_SP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b1" name="RBBM_CLOCK_CNTL_SP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b2" name="RBBM_CLOCK_CNTL_SP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b3" name="RBBM_CLOCK_CNTL_SP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b4" name="RBBM_CLOCK_CNTL2_SP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b5" name="RBBM_CLOCK_CNTL2_SP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b6" name="RBBM_CLOCK_CNTL2_SP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b7" name="RBBM_CLOCK_CNTL2_SP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b8" name="RBBM_CLOCK_DELAY_SP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000b9" name="RBBM_CLOCK_DELAY_SP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000ba" name="RBBM_CLOCK_DELAY_SP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000bb" name="RBBM_CLOCK_DELAY_SP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000bc" name="RBBM_CLOCK_HYST_SP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000bd" name="RBBM_CLOCK_HYST_SP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000be" name="RBBM_CLOCK_HYST_SP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000bf" name="RBBM_CLOCK_HYST_SP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c0" name="RBBM_CLOCK_CNTL_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c1" name="RBBM_CLOCK_CNTL_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c2" name="RBBM_CLOCK_CNTL_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c3" name="RBBM_CLOCK_CNTL_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c4" name="RBBM_CLOCK_CNTL2_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c5" name="RBBM_CLOCK_CNTL2_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c6" name="RBBM_CLOCK_CNTL2_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c7" name="RBBM_CLOCK_CNTL2_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c8" name="RBBM_CLOCK_CNTL3_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000c9" name="RBBM_CLOCK_CNTL3_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000ca" name="RBBM_CLOCK_CNTL3_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000cb" name="RBBM_CLOCK_CNTL3_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000cc" name="RBBM_CLOCK_CNTL4_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000cd" name="RBBM_CLOCK_CNTL4_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000ce" name="RBBM_CLOCK_CNTL4_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000cf" name="RBBM_CLOCK_CNTL4_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d0" name="RBBM_CLOCK_DELAY_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d1" name="RBBM_CLOCK_DELAY_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d2" name="RBBM_CLOCK_DELAY_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d3" name="RBBM_CLOCK_DELAY_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d4" name="RBBM_CLOCK_DELAY2_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d5" name="RBBM_CLOCK_DELAY2_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d6" name="RBBM_CLOCK_DELAY2_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d7" name="RBBM_CLOCK_DELAY2_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d8" name="RBBM_CLOCK_DELAY3_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000d9" name="RBBM_CLOCK_DELAY3_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000da" name="RBBM_CLOCK_DELAY3_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000db" name="RBBM_CLOCK_DELAY3_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000dc" name="RBBM_CLOCK_DELAY4_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000dd" name="RBBM_CLOCK_DELAY4_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000de" name="RBBM_CLOCK_DELAY4_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000df" name="RBBM_CLOCK_DELAY4_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e0" name="RBBM_CLOCK_HYST_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e1" name="RBBM_CLOCK_HYST_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e2" name="RBBM_CLOCK_HYST_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e3" name="RBBM_CLOCK_HYST_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e4" name="RBBM_CLOCK_HYST2_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e5" name="RBBM_CLOCK_HYST2_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e6" name="RBBM_CLOCK_HYST2_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e7" name="RBBM_CLOCK_HYST2_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e8" name="RBBM_CLOCK_HYST3_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000e9" name="RBBM_CLOCK_HYST3_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000ea" name="RBBM_CLOCK_HYST3_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000eb" name="RBBM_CLOCK_HYST3_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000ec" name="RBBM_CLOCK_HYST4_TP0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000ed" name="RBBM_CLOCK_HYST4_TP1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000ee" name="RBBM_CLOCK_HYST4_TP2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000ef" name="RBBM_CLOCK_HYST4_TP3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f0" name="RBBM_CLOCK_CNTL_RB0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f1" name="RBBM_CLOCK_CNTL_RB1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f2" name="RBBM_CLOCK_CNTL_RB2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f3" name="RBBM_CLOCK_CNTL_RB3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f4" name="RBBM_CLOCK_CNTL2_RB0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f5" name="RBBM_CLOCK_CNTL2_RB1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f6" name="RBBM_CLOCK_CNTL2_RB2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f7" name="RBBM_CLOCK_CNTL2_RB3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f8" name="RBBM_CLOCK_CNTL_CCU0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000f9" name="RBBM_CLOCK_CNTL_CCU1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000fa" name="RBBM_CLOCK_CNTL_CCU2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x000fb" name="RBBM_CLOCK_CNTL_CCU3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00100" name="RBBM_CLOCK_HYST_RB_CCU0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00101" name="RBBM_CLOCK_HYST_RB_CCU1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00102" name="RBBM_CLOCK_HYST_RB_CCU2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00103" name="RBBM_CLOCK_HYST_RB_CCU3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00104" name="RBBM_CLOCK_CNTL_RAC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00105" name="RBBM_CLOCK_CNTL2_RAC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00106" name="RBBM_CLOCK_DELAY_RAC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00107" name="RBBM_CLOCK_HYST_RAC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00108" name="RBBM_CLOCK_CNTL_TSE_RAS_RBBM" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00109" name="RBBM_CLOCK_DELAY_TSE_RAS_RBBM" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0010a" name="RBBM_CLOCK_HYST_TSE_RAS_RBBM" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0010b" name="RBBM_CLOCK_CNTL_UCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0010c" name="RBBM_CLOCK_CNTL2_UCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0010d" name="RBBM_CLOCK_CNTL3_UCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0010e" name="RBBM_CLOCK_CNTL4_UCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0010f" name="RBBM_CLOCK_DELAY_UCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00110" name="RBBM_CLOCK_HYST_UCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00111" name="RBBM_CLOCK_MODE_VFD" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00112" name="RBBM_CLOCK_DELAY_VFD" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00113" name="RBBM_CLOCK_HYST_VFD" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00114" name="RBBM_CLOCK_MODE_GPC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00115" name="RBBM_CLOCK_DELAY_GPC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00116" name="RBBM_CLOCK_HYST_GPC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00117" name="RBBM_CLOCK_DELAY_HLSQ_2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00118" name="RBBM_CLOCK_CNTL_GMU_GX" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00119" name="RBBM_CLOCK_DELAY_GMU_GX" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0011a" name="RBBM_CLOCK_HYST_GMU_GX" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0011b" name="RBBM_CLOCK_MODE_HLSQ" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0011c" name="RBBM_CLOCK_DELAY_HLSQ" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0011d" name="RBBM_CLOCK_HYST_HLSQ" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0011e" name="RBBM_CGC_GLOBAL_LOAD_CMD" variants="A7XX"/>
+ <reg32 offset="0x0009b" name="RBBM_CGC_GLOBAL_LOAD_CMD" variants="A8XX-"/>
+ <reg32 offset="0x0011f" name="RBBM_CGC_P2S_TRIG_CMD" variants="A7XX"/>
+ <reg32 offset="0x0009c" name="RBBM_CGC_P2S_TRIG_CMD" variants="A8XX-"/>
+ <reg32 offset="0x00120" name="RBBM_CGC_P2S_CNTL" variants="A7XX"/>
+ <reg32 offset="0x0009d" name="RBBM_CGC_P2S_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x00120" name="RBBM_CLOCK_CNTL_TEX_FCHE" variants="A6XX"/>
+ <reg32 offset="0x00121" name="RBBM_CLOCK_DELAY_TEX_FCHE" variants="A6XX-A7XX"/>
<reg32 offset="0x00122" name="RBBM_CLOCK_HYST_TEX_FCHE" variants="A6XX"/>
- <reg32 offset="0x00122" name="RBBM_CGC_P2S_STATUS" variants="A7XX-">
+ <reg32 offset="0x00122" name="RBBM_CGC_P2S_STATUS" variants="A7XX">
+ <bitfield name="TXDONE" pos="0" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x09f" name="RBBM_CGC_P2S_STATUS" variants="A8XX-">
<bitfield name="TXDONE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x00123" name="RBBM_CLOCK_CNTL_FCHE"/>
- <reg32 offset="0x00124" name="RBBM_CLOCK_DELAY_FCHE"/>
- <reg32 offset="0x00125" name="RBBM_CLOCK_HYST_FCHE"/>
- <reg32 offset="0x00126" name="RBBM_CLOCK_CNTL_MHUB"/>
- <reg32 offset="0x00127" name="RBBM_CLOCK_DELAY_MHUB"/>
- <reg32 offset="0x00128" name="RBBM_CLOCK_HYST_MHUB"/>
- <reg32 offset="0x00129" name="RBBM_CLOCK_DELAY_GLC"/>
- <reg32 offset="0x0012a" name="RBBM_CLOCK_HYST_GLC"/>
- <reg32 offset="0x0012b" name="RBBM_CLOCK_CNTL_GLC"/>
- <reg32 offset="0x0012f" name="RBBM_CLOCK_HYST2_VFD" variants="A7XX-"/>
- <reg32 offset="0x005ff" name="RBBM_LPAC_GBIF_CLIENT_QOS_CNTL"/>
+ <reg32 offset="0x00123" name="RBBM_CLOCK_CNTL_FCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00124" name="RBBM_CLOCK_DELAY_FCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00125" name="RBBM_CLOCK_HYST_FCHE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00126" name="RBBM_CLOCK_CNTL_MHUB" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00127" name="RBBM_CLOCK_DELAY_MHUB" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00128" name="RBBM_CLOCK_HYST_MHUB" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00129" name="RBBM_CLOCK_DELAY_GLC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0012a" name="RBBM_CLOCK_HYST_GLC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0012b" name="RBBM_CLOCK_CNTL_GLC" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0012f" name="RBBM_CLOCK_HYST2_VFD" variants="A7XX"/>
+ <reg32 offset="0x00195" name="RBBM_CGC_0_PC" variants="A7XX"/>
+ <reg32 offset="0x0010b" name="RBBM_CGC_0_PC" variants="A8XX-"/>
+
+ <reg32 offset="0x005ff" name="RBBM_LPAC_GBIF_CLIENT_QOS_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x00009" name="RBBM_LPAC_GBIF_CLIENT_QOS_CNTL" variants="A8XX-"/>
<reg32 offset="0x0600" name="DBGC_CFG_DBGBUS_SEL_A"/>
<reg32 offset="0x0601" name="DBGC_CFG_DBGBUS_SEL_B"/>
@@ -610,6 +964,8 @@ by a particular renderpass/blit.
<reg32 offset="0x0605" name="DBGC_CFG_DBGBUS_CNTLM">
<bitfield high="27" low="24" name="ENABLE"/>
</reg32>
+ <reg32 offset="0x0606" name="DBGC_CFG_DBGBUS_OPL"/>
+ <reg32 offset="0x0607" name="DBGC_CFG_DBGBUS_OPE"/>
<reg32 offset="0x0608" name="DBGC_CFG_DBGBUS_IVTL_0"/>
<reg32 offset="0x0609" name="DBGC_CFG_DBGBUS_IVTL_1"/>
<reg32 offset="0x060a" name="DBGC_CFG_DBGBUS_IVTL_2"/>
@@ -638,72 +994,276 @@ by a particular renderpass/blit.
<bitfield high="27" low="24" name="BYTEL14"/>
<bitfield high="31" low="28" name="BYTEL15"/>
</reg32>
+ <reg32 offset="0x0612" name="DBGC_CFG_DBGBUS_IVTE_0"/>
+ <reg32 offset="0x0613" name="DBGC_CFG_DBGBUS_IVTE_1"/>
+ <reg32 offset="0x0614" name="DBGC_CFG_DBGBUS_IVTE_2"/>
+ <reg32 offset="0x0615" name="DBGC_CFG_DBGBUS_IVTE_3"/>
+ <reg32 offset="0x0616" name="DBGC_CFG_DBGBUS_MASKE_0"/>
+ <reg32 offset="0x0617" name="DBGC_CFG_DBGBUS_MASKE_1"/>
+ <reg32 offset="0x0618" name="DBGC_CFG_DBGBUS_MASKE_2"/>
+ <reg32 offset="0x0619" name="DBGC_CFG_DBGBUS_MASKE_3"/>
+ <reg32 offset="0x061a" name="DBGC_CFG_DBGBUS_NIBBLEE"/>
+ <reg32 offset="0x061b" name="DBGC_CFG_DBGBUS_PTRC0"/>
+ <reg32 offset="0x061c" name="DBGC_CFG_DBGBUS_PTRC1"/>
+ <reg32 offset="0x061d" name="DBGC_CFG_DBGBUS_LOADREG"/>
+ <reg32 offset="0x061e" name="DBGC_CFG_DBGBUS_IDX"/>
+ <reg32 offset="0x061f" name="DBGC_CFG_DBGBUS_CLRC"/>
+ <reg32 offset="0x0620" name="DBGC_CFG_DBGBUS_LOADIVT"/>
+ <reg32 offset="0x0621" name="DBGC_VBIF_DBG_CNTL"/>
+ <reg32 offset="0x0622" name="DBGC_DBG_LO_HI_GPIO"/>
+ <reg32 offset="0x0623" name="DBGC_EXT_TRACE_BUS_CNTL"/>
+ <reg32 offset="0x0624" name="DBGC_READ_AHB_THROUGH_DBG"/>
+ <reg32 offset="0x0625" name="DBGC_CFG_DBGBUS_EVENT_LOGIC"/>
+ <reg32 offset="0x0626" name="DBGC_CFG_DBGBUS_OVER"/>
+ <reg32 offset="0x0627" name="DBGC_CFG_DBGBUS_COUNT0"/>
+ <reg32 offset="0x0628" name="DBGC_CFG_DBGBUS_COUNT1"/>
+ <reg32 offset="0x0629" name="DBGC_CFG_DBGBUS_COUNT2"/>
+ <reg32 offset="0x062a" name="DBGC_CFG_DBGBUS_COUNT3"/>
+ <reg32 offset="0x062b" name="DBGC_CFG_DBGBUS_COUNT4"/>
+ <reg32 offset="0x062c" name="DBGC_CFG_DBGBUS_COUNT5"/>
+ <reg32 offset="0x062d" name="DBGC_CFG_DBGBUS_TRACE_ADDR"/>
+ <reg32 offset="0x062e" name="DBGC_CFG_DBGBUS_TRACE_BUF0"/>
<reg32 offset="0x062f" name="DBGC_CFG_DBGBUS_TRACE_BUF1"/>
<reg32 offset="0x0630" name="DBGC_CFG_DBGBUS_TRACE_BUF2"/>
- <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2" variants="A6XX"/>
- <reg32 offset="0x0CD8" name="VSC_UNKNOWN_0CD8" variants="A7XX">
- <doc>
- Set to true when binning, isn't changed afterwards
- </doc>
- <bitfield name="BINNING" pos="0" type="boolean"/>
- </reg32>
+ <reg32 offset="0x0631" name="DBGC_CFG_DBGBUS_TRACE_BUF3"/>
+ <reg32 offset="0x0632" name="DBGC_CFG_DBGBUS_TRACE_BUF4"/>
+ <reg32 offset="0x0633" name="DBGC_CFG_DBGBUS_MISR0"/>
+ <reg32 offset="0x0634" name="DBGC_CFG_DBGBUS_MISR1"/>
+ <reg32 offset="0x0635" name="DBGC_EVT_CFG"/>
+ <reg32 offset="0x0636" name="DBGC_EVT_INTF_SEL_0"/>
+ <reg32 offset="0x0637" name="DBGC_EVT_INTF_SEL_1"/>
+ <reg32 offset="0x0638" name="DBGC_EVT_SLICE_CFG"/>
+ <reg32 offset="0x0639" name="DBGC_QDSS_TIMESTAMP_0"/>
+ <reg32 offset="0x063a" name="DBGC_QDSS_TIMESTAMP_1"/>
+ <reg32 offset="0x063b" name="DBGC_ECO_CNTL"/>
+ <reg32 offset="0x063c" name="DBGC_AHB_DBG_CNTL"/>
+ <reg32 offset="0x063d" name="DBGC_EVT_INTF_SEL_2"/>
+ <reg32 offset="0x0640" name="DBGC_CFG_DBGBUS_PONG_SEL_A"/>
+ <reg32 offset="0x0641" name="DBGC_CFG_DBGBUS_PONG_SEL_B"/>
+ <reg32 offset="0x0642" name="DBGC_CFG_DBGBUS_PONG_SEL_C"/>
+ <reg32 offset="0x0643" name="DBGC_CFG_DBGBUS_PONG_SEL_D"/>
+ <reg32 offset="0x0644" name="DBGC_CFG_DBGBUS_MISC_MODE"/>
+ <reg32 offset="0x0650" name="DBGC_EVT_INTF_SEL_3_0"/>
+ <reg32 offset="0x0651" name="DBGC_EVT_INTF_SEL_3_1"/>
+ <reg32 offset="0x0652" name="DBGC_EVT_INTF_SEL_3_2"/>
+ <reg32 offset="0x0653" name="DBGC_EVT_INTF_SEL_3_3"/>
+ <reg32 offset="0x0654" name="DBGC_EVT_INTF_SEL_3_4"/>
+ <reg32 offset="0x0655" name="DBGC_EVT_INTF_SEL_3_5"/>
+ <reg32 offset="0x0660" name="DBGC_TRACE_BUFFER_STATUS"/>
+ <reg32 offset="0x0661" name="DBGC_TRACE_BUFFER_CMD"/>
+ <reg32 offset="0x0662" name="DBGC_DBG_TRACE_BUFFER_RD_ADDR"/>
+ <reg32 offset="0x0663" name="DBGC_DBG_TRACE_BUFFER_RD_DATA"/>
+ <reg32 offset="0x0664" name="DBGC_TRACE_BUFFER_ATB_RD_STATUS"/>
+ <reg32 offset="0x0665" name="DBGC_SMMU_FAULT_BLOCK_HALT_CFG"/>
+ <reg32 offset="0x0666" name="DBGC_DBG_LOPC_SB_RD_ADDR"/>
+ <reg32 offset="0x0667" name="DBGC_DBG_LOPC_SB_RD_DATA"/>
+ <reg32 offset="0x0668" name="DBGC_DBG_LOPC_SB_WR_ADDR"/>
+ <reg32 offset="0x0669" name="DBGC_DBG_LOPC_SB_WR_DATA"/>
+ <reg32 offset="0x066a" name="DBGC_INTERRUPT_STATUS"/>
+ <reg64 offset="0x0680" name="DBGC_GBIF_DBG_BASE"/>
+ <reg32 offset="0x0682" name="DBGC_GBIF_DBG_BUFF_SIZE"/>
+ <reg32 offset="0x0683" name="DBGC_GBIF_DBG_CNTL"/>
+ <reg32 offset="0x0684" name="DBGC_GBIF_DBG_CMD"/>
+ <reg32 offset="0x0685" name="DBGC_GBIF_DBG_STATUS"/>
+
+ <reg32 offset="0x0700" name="DBGC_SCOPE_PERF_COUNTER_CFG_US" variants="A8XX-"/>
+ <reg32 offset="0x0701" name="DBGC_CFG_PERF_TRIG_CLUSTER_FE_US" variants="A8XX-"/>
+ <reg32 offset="0x0702" name="DBGC_CFG_PERF_TRIG_CLUSTER_VPC_US" variants="A8XX-"/>
+ <reg32 offset="0x0703" name="DBGC_CFG_PERF_TRIG_CLUSTER_SP_VS_US" variants="A8XX-"/>
+ <reg32 offset="0x0704" name="DBGC_CFG_PERF_TRIG_CLUSTER_SP_PS_US" variants="A8XX-"/>
+ <reg32 offset="0x0707" name="DBGC_CFG_PERF_TRIG_CLUSTER_NONE_US" variants="A8XX-"/>
+ <reg32 offset="0x0708" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_FE_US" variants="A8XX-"/>
+ <reg32 offset="0x0709" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_VPC_US" variants="A8XX-"/>
+ <reg32 offset="0x070a" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_SP_VS_US" variants="A8XX-"/>
+ <reg32 offset="0x070f" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_NONE_US" variants="A8XX-"/>
+ <reg32 offset="0x0710" name="DBGC_CFG_PERF_COUNTER_SEL_FE_US" variants="A8XX-"/>
+ <reg32 offset="0x0711" name="DBGC_CFG_PERF_COUNTER_SEL_FE_US_1" variants="A8XX-"/>
+ <reg32 offset="0x0712" name="DBGC_CFG_PERF_COUNTER_SEL_FE_US_2" variants="A8XX-"/>
+ <reg32 offset="0x0713" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_US" variants="A8XX-"/>
+ <reg32 offset="0x0714" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_US_1" variants="A8XX-"/>
+ <reg32 offset="0x0715" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS_US" variants="A8XX-"/>
+ <reg32 offset="0x0716" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS_US" variants="A8XX-"/>
+ <reg32 offset="0x0720" name="DBGC_CFG_PERF_COUNTER_SEL_NONE_US" variants="A8XX-"/>
+ <reg32 offset="0x0721" name="DBGC_CFG_PERF_COUNTER_SEL_NONE_US_1" variants="A8XX-"/>
+ <reg32 offset="0x0722" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_US" variants="A8XX-"/>
+ <reg32 offset="0x0723" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_US_1" variants="A8XX-"/>
+ <reg32 offset="0x0724" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_US_2" variants="A8XX-"/>
+ <reg32 offset="0x0730" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_US" variants="A8XX-"/>
+ <reg32 offset="0x0731" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_US_1" variants="A8XX-"/>
+ <reg32 offset="0x0732" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS_US" variants="A8XX-"/>
+ <reg32 offset="0x0740" name="DBGC_CFG_BV_PERF_COUNTER_SEL_NONE_US" variants="A8XX-"/>
+ <reg32 offset="0x0742" name="DBGC_CFG_PERF_TIMESTAMP_TRIG_SEL_US" variants="A8XX-"/>
+ <reg32 offset="0x0743" name="DBGC_CFG_BV_PERF_TIMESTAMP_TRIG_SEL_US" variants="A8XX-"/>
+ <reg64 offset="0x0744" name="DBGC_CFG_GBIF_BR_PERF_CNTR_BASE" variants="A8XX-"/>
+ <reg32 offset="0x0746" name="DBGC_CFG_GBIF_BR_BUFFER_SIZE" variants="A8XX-"/>
+ <reg64 offset="0x0747" name="DBGC_CFG_GBIF_BV_PERF_CNTR_BASE" variants="A8XX-"/>
+ <reg32 offset="0x0749" name="DBGC_CFG_GBIF_BV_BUFFER_SIZE" variants="A8XX-"/>
+ <reg32 offset="0x074a" name="DBGC_CFG_GBIF_QOS_CTRL" variants="A8XX-"/>
+ <reg32 offset="0x0750" name="DBGC_GBIF_BR_PERF_CNTR_WRITE_POINTER" variants="A8XX-"/>
+ <reg32 offset="0x0751" name="DBGC_GBIF_BV_PERF_CNTR_WRITE_POINTER" variants="A8XX-"/>
+ <reg32 offset="0x0752" name="DBGC_PERF_COUNTER_FE_LOCAL_BATCH_ID" variants="A8XX-"/>
+ <reg32 offset="0x0753" name="DBGC_CFG_PERF_WAIT_IDLE_CLOCKS_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x0754" name="DBGC_PERF_COUNTER_SCOPING_CMD_US" variants="A8XX-"/>
+ <reg32 offset="0x0755" name="DBGC_PERF_SKEW_BUFFER_INIT_CMD" variants="A8XX-"/>
+ <reg32 offset="0x0759" name="DBGC_LOPC_INTERRUPT_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x075a" name="DBGC_LOPC_BUFFER_PTR_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x075b" name="DBGC_PERF_SCOPING_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x075c" name="DBGC_PERF_COUNTER_PKT_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0760" name="DBGC_GC_LIVE_MBX_PKT_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0761" name="DBGC_GC_ALW_MBX_PKT_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0762" name="DBGC_AO_CNTR_LO_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0763" name="DBGC_AO_CNTR_HI_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0770" name="DBGC_LOPC_GC_SB_DEPTH_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0780" name="DBGC_LPAC_SCOPE_PERF_COUNTER_CFG_US" variants="A8XX-"/>
+ <reg32 offset="0x0781" name="DBGC_CFG_PERF_TRIG_LPAC_US" variants="A8XX-"/>
+ <reg32 offset="0x0782" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_US" variants="A8XX-"/>
+ <reg32 offset="0x0783" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_US_1" variants="A8XX-"/>
+ <reg32 offset="0x0784" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_US_2" variants="A8XX-"/>
+ <reg32 offset="0x0785" name="DBGC_CFG_PERF_TIMESTAMP_TRIG_SEL_LPAC_US" variants="A8XX-"/>
+ <reg64 offset="0x0786" name="DBGC_CFG_GBIF_LPAC_PERF_CNTR_BASE" variants="A8XX-"/>
+ <reg32 offset="0x0788" name="DBGC_CFG_GBIF_LPAC_BUFFER_SIZE" variants="A8XX-"/>
+ <reg32 offset="0x0789" name="DBGC_GBIF_LPAC_PERF_CNTR_WRITE_POINTER" variants="A8XX-"/>
+ <reg32 offset="0x078a" name="DBGC_CFG_LPAC_PERF_WAIT_IDLE_CLOCKS_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x078b" name="DBGC_LPAC_PERF_COUNTER_SCOPING_CMD_US" variants="A8XX-"/>
+ <reg32 offset="0x078c" name="DBGC_LPAC_MBX_PKT_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x078d" name="DBGC_LPAC_PERF_SCOPING_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x0790" name="DBGC_LOPC_LPAC_SB_DEPTH_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x07a0" name="DBGC_SCOPE_PERF_COUNTER_CFG_S" variants="A8XX-"/>
+ <reg32 offset="0x07a1" name="DBGC_CFG_PERF_TRIG_CLUSTER_FE_S" variants="A8XX-"/>
+ <reg32 offset="0x07a2" name="DBGC_CFG_PERF_TRIG_CLUSTER_SP_VS" variants="A8XX-"/>
+ <reg32 offset="0x07a3" name="DBGC_CFG_PERF_TRIG_CLUSTER_VPC_VS" variants="A8XX-"/>
+ <reg32 offset="0x07a4" name="DBGC_CFG_PERF_TRIG_CLUSTER_GRAS" variants="A8XX-"/>
+ <reg32 offset="0x07a5" name="DBGC_CFG_PERF_TRIG_CLUSTER_SP_PS" variants="A8XX-"/>
+ <reg32 offset="0x07a6" name="DBGC_CFG_PERF_TRIG_CLUSTER_VPC_PS" variants="A8XX-"/>
+ <reg32 offset="0x07a7" name="DBGC_CFG_PERF_TRIG_CLUSTER_PS" variants="A8XX-"/>
+ <reg32 offset="0x07a8" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_FE_S" variants="A8XX-"/>
+ <reg32 offset="0x07a9" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_SP_VS" variants="A8XX-"/>
+ <reg32 offset="0x07aa" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_VPC_VS" variants="A8XX-"/>
+ <reg32 offset="0x07ab" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_GRAS" variants="A8XX-"/>
+ <reg32 offset="0x07ac" name="DBGC_CFG_BV_PERF_TRIG_CLUSTER_VPC_PS" variants="A8XX-"/>
+ <reg32 offset="0x07ad" name="DBGC_CFG_PERF_COUNTER_SEL_FE_S" variants="A8XX-"/>
+ <reg32 offset="0x07ae" name="DBGC_CFG_PERF_COUNTER_SEL_FE_S_1" variants="A8XX-"/>
+ <reg32 offset="0x07af" name="DBGC_CFG_PERF_COUNTER_SEL_FE_S_2" variants="A8XX-"/>
+ <reg32 offset="0x07b0" name="DBGC_CFG_PERF_COUNTER_SEL_FE_S_3" variants="A8XX-"/>
+ <reg32 offset="0x07b1" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS" variants="A8XX-"/>
+ <reg32 offset="0x07b2" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS_1" variants="A8XX-"/>
+ <reg32 offset="0x07b3" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS_2" variants="A8XX-"/>
+ <reg32 offset="0x07b4" name="DBGC_CFG_PERF_COUNTER_SEL_SP_VS_3" variants="A8XX-"/>
+ <reg32 offset="0x07b5" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_VS" variants="A8XX-"/>
+ <reg32 offset="0x07b6" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_VS_1" variants="A8XX-"/>
+ <reg32 offset="0x07b7" name="DBGC_CFG_PERF_COUNTER_SEL_GRAS" variants="A8XX-"/>
+ <reg32 offset="0x07b8" name="DBGC_CFG_PERF_COUNTER_SEL_GRAS_1" variants="A8XX-"/>
+ <reg32 offset="0x07b9" name="DBGC_CFG_PERF_COUNTER_SEL_GRAS_2" variants="A8XX-"/>
+ <reg32 offset="0x07ba" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS" variants="A8XX-"/>
+ <reg32 offset="0x07bb" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS_1" variants="A8XX-"/>
+ <reg32 offset="0x07bc" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS_2" variants="A8XX-"/>
+ <reg32 offset="0x07bd" name="DBGC_CFG_PERF_COUNTER_SEL_SP_PS_3" variants="A8XX-"/>
+ <reg32 offset="0x07be" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_PS" variants="A8XX-"/>
+ <reg32 offset="0x07bf" name="DBGC_CFG_PERF_COUNTER_SEL_VPC_PS_1" variants="A8XX-"/>
+ <reg32 offset="0x07c0" name="DBGC_CFG_PERF_COUNTER_SEL_PS" variants="A8XX-"/>
+ <reg32 offset="0x07c1" name="DBGC_CFG_PERF_COUNTER_SEL_PS_1" variants="A8XX-"/>
+ <reg32 offset="0x07c2" name="DBGC_CFG_PERF_COUNTER_SEL_PS_2" variants="A8XX-"/>
+ <reg32 offset="0x07c3" name="DBGC_CFG_PERF_COUNTER_SEL_PS_3" variants="A8XX-"/>
+ <reg32 offset="0x07c4" name="DBGC_CFG_PERF_TIMESTAMP_TRIG_SEL_S" variants="A8XX-"/>
+ <reg32 offset="0x07c5" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_S" variants="A8XX-"/>
+ <reg32 offset="0x07c6" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_S_1" variants="A8XX-"/>
+ <reg32 offset="0x07c7" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_S_2" variants="A8XX-"/>
+ <reg32 offset="0x07c8" name="DBGC_CFG_BV_PERF_COUNTER_SEL_FE_S_3" variants="A8XX-"/>
+ <reg32 offset="0x07c9" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS" variants="A8XX-"/>
+ <reg32 offset="0x07ca" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS_1" variants="A8XX-"/>
+ <reg32 offset="0x07cb" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS_2" variants="A8XX-"/>
+ <reg32 offset="0x07cc" name="DBGC_CFG_BV_PERF_COUNTER_SEL_SP_VS_3" variants="A8XX-"/>
+ <reg32 offset="0x07cd" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_VS" variants="A8XX-"/>
+ <reg32 offset="0x07ce" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_VS_1" variants="A8XX-"/>
+ <reg32 offset="0x07cf" name="DBGC_CFG_BV_PERF_COUNTER_SEL_GRAS" variants="A8XX-"/>
+ <reg32 offset="0x07d0" name="DBGC_CFG_BV_PERF_COUNTER_SEL_GRAS_1" variants="A8XX-"/>
+ <reg32 offset="0x07d1" name="DBGC_CFG_BV_PERF_COUNTER_SEL_GRAS_2" variants="A8XX-"/>
+ <reg32 offset="0x07d2" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_PS" variants="A8XX-"/>
+ <reg32 offset="0x07d3" name="DBGC_CFG_BV_PERF_COUNTER_SEL_VPC_PS_1" variants="A8XX-"/>
+ <reg32 offset="0x07d4" name="DBGC_CFG_BV_PERF_TIMESTAMP_TRIG_SEL_S" variants="A8XX-"/>
+ <reg32 offset="0x07d5" name="DBGC_PERF_COUNTER_SCOPING_CMD_S" variants="A8XX-"/>
+ <reg32 offset="0x07e0" name="DBGC_LPAC_SCOPE_PERF_COUNTER_CFG_S" variants="A8XX-"/>
+ <reg32 offset="0x07e1" name="DBGC_CFG_PERF_TRIG_LPAC_S" variants="A8XX-"/>
+ <reg32 offset="0x07e2" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_S" variants="A8XX-"/>
+ <reg32 offset="0x07e3" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_S_1" variants="A8XX-"/>
+ <reg32 offset="0x07e4" name="DBGC_CFG_PERF_COUNTER_SEL_LPAC_S_2" variants="A8XX-"/>
+ <reg32 offset="0x07e5" name="DBGC_CFG_PERF_TIMESTAMP_TRIG_SEL_LPAC_S" variants="A8XX-"/>
+ <reg32 offset="0x07e6" name="DBGC_LPAC_PERF_COUNTER_SCOPING_CMD_S" variants="A8XX-"/>
+
+ <array offset="0x0CD8" name="VSC_PERFCTR_VSC_SEL" stride="1" length="2"/>
<reg32 offset="0xC800" name="HLSQ_DBG_AHB_READ_APERTURE"/>
<reg32 offset="0xD000" name="HLSQ_DBG_READ_SEL"/>
- <reg32 offset="0x0E00" name="UCHE_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0x0E00" name="UCHE_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/>
<reg32 offset="0x0E01" name="UCHE_MODE_CNTL"/>
- <reg64 offset="0x0E05" name="UCHE_WRITE_RANGE_MAX"/>
- <reg64 offset="0x0E07" name="UCHE_WRITE_THRU_BASE"/>
- <reg64 offset="0x0E09" name="UCHE_TRAP_BASE"/>
- <reg64 offset="0x0E0B" name="UCHE_GMEM_RANGE_MIN"/>
- <reg64 offset="0x0E0D" name="UCHE_GMEM_RANGE_MAX"/>
- <reg32 offset="0x0E17" name="UCHE_CACHE_WAYS" usage="cmd"/>
+ <reg64 offset="0x0E05" name="UCHE_WRITE_RANGE_MAX" variants="A6XX"/>
+ <reg64 offset="0x0E07" name="UCHE_WRITE_THRU_BASE" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0E06" name="UCHE_WRITE_THRU_BASE" variants="A8XX-"/>
+ <reg64 offset="0x0E09" name="UCHE_TRAP_BASE" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0E08" name="UCHE_TRAP_BASE" variants="A8XX-"/>
+ <reg64 offset="0x0E0B" name="UCHE_GMEM_RANGE_MIN" variants="A6XX-A7XX"/>
+ <reg64 offset="0x0E0D" name="UCHE_GMEM_RANGE_MAX" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0e17" name="UCHE_CACHE_WAYS" variants="A6XX-A7XX" usage="init"/>
+ <reg32 offset="0x0e04" name="UCHE_CACHE_WAYS" variants="A8XX-"/>
<reg32 offset="0x0E18" name="UCHE_FILTER_CNTL"/>
- <reg32 offset="0x0E19" name="UCHE_CLIENT_PF" usage="cmd">
+ <reg32 offset="0x0e19" name="UCHE_CLIENT_PF" variants="A6XX-A7XX" usage="init">
<bitfield high="7" low="0" name="PERFSEL"/>
</reg32>
- <array offset="0x0E1C" name="UCHE_PERFCTR_UCHE_SEL" stride="1" length="12"/>
- <reg32 offset="0x0e3a" name="UCHE_GBIF_GX_CONFIG"/>
- <reg32 offset="0x0e3c" name="UCHE_CMDQ_CONFIG"/>
+ <array offset="0x0e1c" name="UCHE_PERFCTR_UCHE_SEL" stride="1" length="12" variants="A6XX-A7XX"/>
+ <array offset="0x0e20" name="UCHE_PERFCTR_UCHE_SEL" stride="1" length="24" variants="A8XX-"/>
+ <reg32 offset="0x0e3a" name="UCHE_GBIF_GX_CONFIG" variants="A6XX-A7XX"/>
+ <reg32 offset="0x0e12" name="UCHE_GBIF_GX_CONFIG" variants="A8XX-"/>
+ <reg32 offset="0x0e3c" name="UCHE_CMDQ_CONFIG" variants="A6XX-A7XX"/>
+
+ <reg32 offset="0x0f01" name="UCHE_CCHE_MODE_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x0f02" name="UCHE_CCHE_CACHE_WAYS" variants="A8XX-"/>
+ <reg64 offset="0x0f04" name="UCHE_CCHE_WRITE_THRU_BASE" variants="A8XX-"/>
+ <reg64 offset="0x0f06" name="UCHE_CCHE_TRAP_BASE" variants="A8XX-"/>
+ <reg64 offset="0x0f08" name="UCHE_CCHE_GC_GMEM_RANGE_MIN" variants="A8XX-"/>
+ <reg64 offset="0x0f0a" name="UCHE_CCHE_LPAC_GMEM_RANGE_MIN" variants="A8XX-"/>
+ <reg32 offset="0x0f0c" name="UCHE_CCHE_HW_DBG_CNTL" variants="A8XX-"/>
- <reg32 offset="0x3000" name="VBIF_VERSION"/>
- <reg32 offset="0x3001" name="VBIF_CLKON">
+ <!-- VBIF only existed on early a6xx, and was later replaced with GBIF -->
+
+ <reg32 offset="0x3000" name="VBIF_VERSION" variants="A6XX"/>
+ <reg32 offset="0x3001" name="VBIF_CLKON" variants="A6XX">
<bitfield pos="1" name="FORCE_ON_TESTBUS" type="boolean"/>
</reg32>
- <reg32 offset="0x302A" name="VBIF_GATE_OFF_WRREQ_EN"/>
- <reg32 offset="0x3080" name="VBIF_XIN_HALT_CTRL0"/>
- <reg32 offset="0x3081" name="VBIF_XIN_HALT_CTRL1"/>
- <reg32 offset="0x3084" name="VBIF_TEST_BUS_OUT_CTRL"/>
- <reg32 offset="0x3085" name="VBIF_TEST_BUS1_CTRL0"/>
- <reg32 offset="0x3086" name="VBIF_TEST_BUS1_CTRL1">
+ <reg32 offset="0x302A" name="VBIF_GATE_OFF_WRREQ_EN" variants="A6XX"/>
+ <reg32 offset="0x3080" name="VBIF_XIN_HALT_CTRL0" variants="A6XX"/>
+ <reg32 offset="0x3081" name="VBIF_XIN_HALT_CTRL1" variants="A6XX"/>
+ <reg32 offset="0x3084" name="VBIF_TEST_BUS_OUT_CTRL" variants="A6XX"/>
+ <reg32 offset="0x3085" name="VBIF_TEST_BUS1_CTRL0" variants="A6XX"/>
+ <reg32 offset="0x3086" name="VBIF_TEST_BUS1_CTRL1" variants="A6XX">
<bitfield low="0" high="3" name="DATA_SEL"/>
</reg32>
- <reg32 offset="0x3087" name="VBIF_TEST_BUS2_CTRL0"/>
- <reg32 offset="0x3088" name="VBIF_TEST_BUS2_CTRL1">
+ <reg32 offset="0x3087" name="VBIF_TEST_BUS2_CTRL0" variants="A6XX"/>
+ <reg32 offset="0x3088" name="VBIF_TEST_BUS2_CTRL1" variants="A6XX">
<bitfield low="0" high="8" name="DATA_SEL"/>
</reg32>
- <reg32 offset="0x308c" name="VBIF_TEST_BUS_OUT"/>
- <reg32 offset="0x30d0" name="VBIF_PERF_CNT_SEL0"/>
- <reg32 offset="0x30d1" name="VBIF_PERF_CNT_SEL1"/>
- <reg32 offset="0x30d2" name="VBIF_PERF_CNT_SEL2"/>
- <reg32 offset="0x30d3" name="VBIF_PERF_CNT_SEL3"/>
- <reg32 offset="0x30d8" name="VBIF_PERF_CNT_LOW0"/>
- <reg32 offset="0x30d9" name="VBIF_PERF_CNT_LOW1"/>
- <reg32 offset="0x30da" name="VBIF_PERF_CNT_LOW2"/>
- <reg32 offset="0x30db" name="VBIF_PERF_CNT_LOW3"/>
- <reg32 offset="0x30e0" name="VBIF_PERF_CNT_HIGH0"/>
- <reg32 offset="0x30e1" name="VBIF_PERF_CNT_HIGH1"/>
- <reg32 offset="0x30e2" name="VBIF_PERF_CNT_HIGH2"/>
- <reg32 offset="0x30e3" name="VBIF_PERF_CNT_HIGH3"/>
- <reg32 offset="0x3100" name="VBIF_PERF_PWR_CNT_EN0"/>
- <reg32 offset="0x3101" name="VBIF_PERF_PWR_CNT_EN1"/>
- <reg32 offset="0x3102" name="VBIF_PERF_PWR_CNT_EN2"/>
- <reg32 offset="0x3110" name="VBIF_PERF_PWR_CNT_LOW0"/>
- <reg32 offset="0x3111" name="VBIF_PERF_PWR_CNT_LOW1"/>
- <reg32 offset="0x3112" name="VBIF_PERF_PWR_CNT_LOW2"/>
- <reg32 offset="0x3118" name="VBIF_PERF_PWR_CNT_HIGH0"/>
- <reg32 offset="0x3119" name="VBIF_PERF_PWR_CNT_HIGH1"/>
- <reg32 offset="0x311a" name="VBIF_PERF_PWR_CNT_HIGH2"/>
-
+ <reg32 offset="0x308c" name="VBIF_TEST_BUS_OUT" variants="A6XX"/>
+ <reg32 offset="0x30d0" name="VBIF_PERF_CNT_SEL0" variants="A6XX"/>
+ <reg32 offset="0x30d1" name="VBIF_PERF_CNT_SEL1" variants="A6XX"/>
+ <reg32 offset="0x30d2" name="VBIF_PERF_CNT_SEL2" variants="A6XX"/>
+ <reg32 offset="0x30d3" name="VBIF_PERF_CNT_SEL3" variants="A6XX"/>
+ <reg32 offset="0x30d8" name="VBIF_PERF_CNT_LOW0" variants="A6XX"/>
+ <reg32 offset="0x30d9" name="VBIF_PERF_CNT_LOW1" variants="A6XX"/>
+ <reg32 offset="0x30da" name="VBIF_PERF_CNT_LOW2" variants="A6XX"/>
+ <reg32 offset="0x30db" name="VBIF_PERF_CNT_LOW3" variants="A6XX"/>
+ <reg32 offset="0x30e0" name="VBIF_PERF_CNT_HIGH0" variants="A6XX"/>
+ <reg32 offset="0x30e1" name="VBIF_PERF_CNT_HIGH1" variants="A6XX"/>
+ <reg32 offset="0x30e2" name="VBIF_PERF_CNT_HIGH2" variants="A6XX"/>
+ <reg32 offset="0x30e3" name="VBIF_PERF_CNT_HIGH3" variants="A6XX"/>
+ <reg32 offset="0x3100" name="VBIF_PERF_PWR_CNT_EN0" variants="A6XX"/>
+ <reg32 offset="0x3101" name="VBIF_PERF_PWR_CNT_EN1" variants="A6XX"/>
+ <reg32 offset="0x3102" name="VBIF_PERF_PWR_CNT_EN2" variants="A6XX"/>
+ <reg32 offset="0x3110" name="VBIF_PERF_PWR_CNT_LOW0" variants="A6XX"/>
+ <reg32 offset="0x3111" name="VBIF_PERF_PWR_CNT_LOW1" variants="A6XX"/>
+ <reg32 offset="0x3112" name="VBIF_PERF_PWR_CNT_LOW2" variants="A6XX"/>
+ <reg32 offset="0x3118" name="VBIF_PERF_PWR_CNT_HIGH0" variants="A6XX"/>
+ <reg32 offset="0x3119" name="VBIF_PERF_PWR_CNT_HIGH1" variants="A6XX"/>
+ <reg32 offset="0x311a" name="VBIF_PERF_PWR_CNT_HIGH2" variants="A6XX"/>
+
+ <reg32 offset="0x3c00" name="GBIF_CX_CONFIG" variants="A8XX-"/>
<reg32 offset="0x3c01" name="GBIF_SCACHE_CNTL0"/>
<reg32 offset="0x3c02" name="GBIF_SCACHE_CNTL1"/>
<reg32 offset="0x3c03" name="GBIF_QSB_SIDE0"/>
@@ -712,30 +1272,66 @@ by a particular renderpass/blit.
<reg32 offset="0x3c06" name="GBIF_QSB_SIDE3"/>
<reg32 offset="0x3c45" name="GBIF_HALT"/>
<reg32 offset="0x3c46" name="GBIF_HALT_ACK"/>
+ <reg32 offset="0x3c49" name="GBIF_REINIT_ENABLE" variants="A8XX-"/>
+ <reg32 offset="0x3c4a" name="GBIF_REINIT_DONE" variants="A8XX-"/>
<reg32 offset="0x3cc0" name="GBIF_PERF_PWR_CNT_EN"/>
<reg32 offset="0x3cc1" name="GBIF_PERF_PWR_CNT_CLR"/>
<reg32 offset="0x3cc2" name="GBIF_PERF_CNT_SEL"/>
- <reg32 offset="0x3cc3" name="GBIF_PERF_PWR_CNT_SEL"/>
- <reg32 offset="0x3cc4" name="GBIF_PERF_CNT_LOW0"/>
- <reg32 offset="0x3cc5" name="GBIF_PERF_CNT_LOW1"/>
- <reg32 offset="0x3cc6" name="GBIF_PERF_CNT_LOW2"/>
- <reg32 offset="0x3cc7" name="GBIF_PERF_CNT_LOW3"/>
- <reg32 offset="0x3cc8" name="GBIF_PERF_CNT_HIGH0"/>
- <reg32 offset="0x3cc9" name="GBIF_PERF_CNT_HIGH1"/>
- <reg32 offset="0x3cca" name="GBIF_PERF_CNT_HIGH2"/>
- <reg32 offset="0x3ccb" name="GBIF_PERF_CNT_HIGH3"/>
- <reg32 offset="0x3ccc" name="GBIF_PWR_CNT_LOW0"/>
- <reg32 offset="0x3ccd" name="GBIF_PWR_CNT_LOW1"/>
- <reg32 offset="0x3cce" name="GBIF_PWR_CNT_LOW2"/>
- <reg32 offset="0x3ccf" name="GBIF_PWR_CNT_HIGH0"/>
- <reg32 offset="0x3cd0" name="GBIF_PWR_CNT_HIGH1"/>
- <reg32 offset="0x3cd1" name="GBIF_PWR_CNT_HIGH2"/>
+ <reg32 offset="0x3cc3" name="GBIF_PERF_CNT_SEL_1" variants="A8XX-"/>
+
+ <reg32 offset="0x3cc3" name="GBIF_PERF_PWR_CNT_SEL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cc4" name="GBIF_PERF_CNT_LOW0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cc5" name="GBIF_PERF_CNT_LOW1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cc6" name="GBIF_PERF_CNT_LOW2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cc7" name="GBIF_PERF_CNT_LOW3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cc8" name="GBIF_PERF_CNT_HIGH0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cc9" name="GBIF_PERF_CNT_HIGH1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cca" name="GBIF_PERF_CNT_HIGH2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3ccb" name="GBIF_PERF_CNT_HIGH3" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3ccc" name="GBIF_PWR_CNT_LOW0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3ccd" name="GBIF_PWR_CNT_LOW1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cce" name="GBIF_PWR_CNT_LOW2" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3ccf" name="GBIF_PWR_CNT_HIGH0" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cd0" name="GBIF_PWR_CNT_HIGH1" variants="A6XX-A7XX"/>
+ <reg32 offset="0x3cd1" name="GBIF_PWR_CNT_HIGH2" variants="A6XX-A7XX"/>
+
+ <reg32 offset="0x3cc4" name="GBIF_PWR_CNT_SEL" variants="A8XX"/>
+ <reg32 offset="0x3cc6" name="GBIF_PERF_CNT_LO_0" variants="A8XX"/>
+ <reg32 offset="0x3cc7" name="GBIF_PERF_CNT_HI_0" variants="A8XX"/>
+ <reg32 offset="0x3cc8" name="GBIF_PERF_CNT_LO_1" variants="A8XX"/>
+ <reg32 offset="0x3cc9" name="GBIF_PERF_CNT_HI_1" variants="A8XX"/>
+ <reg32 offset="0x3cca" name="GBIF_PERF_CNT_LO_2" variants="A8XX"/>
+ <reg32 offset="0x3ccb" name="GBIF_PERF_CNT_HI_2" variants="A8XX"/>
+ <reg32 offset="0x3ccc" name="GBIF_PERF_CNT_LO_3" variants="A8XX"/>
+ <reg32 offset="0x3ccd" name="GBIF_PERF_CNT_HI_3" variants="A8XX"/>
+ <reg32 offset="0x3cce" name="GBIF_PERF_CNT_LO_4" variants="A8XX"/>
+ <reg32 offset="0x3ccf" name="GBIF_PERF_CNT_HI_4" variants="A8XX"/>
+ <reg32 offset="0x3cd0" name="GBIF_PERF_CNT_LO_5" variants="A8XX"/>
+ <reg32 offset="0x3cd1" name="GBIF_PERF_CNT_HI_5" variants="A8XX"/>
+ <reg32 offset="0x3cd2" name="GBIF_PERF_CNT_LO_6" variants="A8XX"/>
+ <reg32 offset="0x3cd3" name="GBIF_PERF_CNT_HI_6" variants="A8XX"/>
+ <reg32 offset="0x3cd4" name="GBIF_PERF_CNT_LO_7" variants="A8XX"/>
+ <reg32 offset="0x3cd5" name="GBIF_PERF_CNT_HI_7" variants="A8XX"/>
+ <reg32 offset="0x3ce0" name="GBIF_PWR_CNT_LO_0" variants="A8XX"/>
+ <reg32 offset="0x3ce1" name="GBIF_PWR_CNT_LO_1" variants="A8XX"/>
+ <reg32 offset="0x3ce2" name="GBIF_PWR_CNT_LO_2" variants="A8XX"/>
+ <reg32 offset="0x3ce3" name="GBIF_PWR_CNT_HI_0" variants="A8XX"/>
+ <reg32 offset="0x3ce4" name="GBIF_PWR_CNT_HI_1" variants="A8XX"/>
+ <reg32 offset="0x3ce5" name="GBIF_PWR_CNT_HI_2" variants="A8XX"/>
<reg32 offset="0x0c00" name="VSC_DBG_ECO_CNTL"/>
- <reg32 offset="0x0c02" name="VSC_BIN_SIZE" usage="rp_blit">
- <bitfield name="WIDTH" low="0" high="7" shr="5" type="uint"/>
- <bitfield name="HEIGHT" low="8" high="16" shr="4" type="uint"/>
+ <reg32 offset="0x0df0" name="VSC_KMD_DBG_ECO_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x0c02" name="VSC_BIN_SIZE" usage="rp_blit" variants="A6XX-A7XX">
+ <bitfield name="BINW" low="0" high="7" shr="5" type="uint"/>
+ <bitfield name="BINH" low="8" high="16" shr="4" type="uint"/>
</reg32>
+
+ <bitset name="a8xx_bin_size" inline="yes">
+ <bitfield name="BINW" low="0" high="9" shr="5" type="uint"/>
+ <bitfield name="BINH" low="16" high="26" shr="4" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x0c02" name="VSC_BIN_SIZE" type="a8xx_bin_size" usage="rp_blit" variants="A8XX"/>
<reg64 offset="0x0c03" name="VSC_SIZE_BASE" type="waddress" usage="cmd"/>
<reg32 offset="0x0c06" name="VSC_EXPANDED_BIN_CNTL" usage="rp_blit">
<bitfield name="NX" low="1" high="10" type="uint"/>
@@ -803,10 +1399,14 @@ by a particular renderpass/blit.
<reg32 offset="0x0d08" name="VSC_UNKNOWN_0D08" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x0E10" name="UCHE_UNKNOWN_0E10" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x0E11" name="UCHE_UNKNOWN_0E11" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x0e10" name="UCHE_UNKNOWN_0E10" variants="A7XX" usage="init"/>
+ <reg32 offset="0x0e10" name="UCHE_VARB_IDLE_TIMEOUT" variants="A8XX-"/>
+ <reg32 offset="0x0e11" name="UCHE_UNKNOWN_0E11" variants="A7XX" usage="init"/>
+ <reg32 offset="0x0e11" name="UCHE_CLIENT_PF" variants="A8XX-"/>
<!-- always 0x03200000 ? -->
- <reg32 offset="0x0e12" name="UCHE_UNKNOWN_0E12" usage="cmd"/>
+ <reg32 offset="0x0e12" name="UCHE_UNKNOWN_0E12" variants="A6XX-A7XX" usage="init"/>
+ <reg32 offset="0x0e15" name="UCHE_DBG_ECO_CNTL_0" variants="A8XX-"/>
+ <reg32 offset="0x0e16" name="UCHE_HW_DBG_CNTL" variants="A8XX-"/>
<!-- adreno_reg_xy has 15 bits per coordinate, but a6xx registers only have 14 -->
<bitset name="a6xx_reg_xy" inline="yes">
@@ -814,7 +1414,7 @@ by a particular renderpass/blit.
<bitfield name="Y" low="16" high="29" type="uint"/>
</bitset>
- <reg32 offset="0x8000" name="GRAS_CL_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_cl_cntl" inline="yes">
<bitfield name="CLIP_DISABLE" pos="0" type="boolean"/>
<bitfield name="ZNEAR_CLIP_DISABLE" pos="1" type="boolean"/>
<bitfield name="ZFAR_CLIP_DISABLE" pos="2" type="boolean"/>
@@ -826,18 +1426,33 @@ by a particular renderpass/blit.
<bitfield name="VP_CLIP_CODE_IGNORE" pos="7" type="boolean"/>
<bitfield name="VP_XFORM_DISABLE" pos="8" type="boolean"/>
<bitfield name="PERSP_DIVISION_DISABLE" pos="9" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8000" name="GRAS_CL_CNTL" type="a6xx_gras_cl_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8200" name="GRAS_CL_CNTL" type="a6xx_gras_cl_cntl" variants="A8XX-" usage="rp_blit"/>
<bitset name="a6xx_gras_xs_clip_cull_distance" inline="yes">
<bitfield name="CLIP_MASK" low="0" high="7"/>
<bitfield name="CULL_MASK" low="8" high="15"/>
</bitset>
- <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit"/>
- <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit"/>
-
- <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" usage="rp_blit">
+ <reg32 offset="0x8001" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8002" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8003" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0x8004" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit" variants="A6XX-A7XX" />
+
+ <reg32 offset="0x8201" name="GRAS_CL_VS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A8XX" />
+ <reg32 offset="0x8202" name="GRAS_CL_DS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A8XX" />
+ <reg32 offset="0x8203" name="GRAS_CL_GS_CLIP_CULL_DISTANCE" type="a6xx_gras_xs_clip_cull_distance" usage="rp_blit" variants="A8XX" />
+ <reg32 offset="0x8204" name="GRAS_CL_ARRAY_SIZE" low="0" high="10" type="uint" usage="rp_blit" variants="A8XX" />
+
+ <reg32 offset="0x8228" name="GRAS_UNKNOWN_8228" variants="A8XX-"/>
+ <reg32 offset="0x8229" name="GRAS_UNKNOWN_8229" variants="A8XX-"/>
+ <reg32 offset="0x822a" name="GRAS_UNKNOWN_822A" variants="A8XX-"/>
+ <reg32 offset="0x822b" name="GRAS_UNKNOWN_822B" variants="A8XX-"/>
+ <reg32 offset="0x822c" name="GRAS_UNKNOWN_822C" variants="A8XX-"/>
+ <reg32 offset="0x822d" name="GRAS_UNKNOWN_822D" variants="A8XX-"/>
+
+ <bitset name="a6xx_gras_cl_interp_cntl" inline="yes">
<!-- see also RB_INTERP_CNTL -->
<bitfield name="IJ_PERSP_PIXEL" pos="0" type="boolean"/>
<bitfield name="IJ_PERSP_CENTROID" pos="1" type="boolean"/>
@@ -848,26 +1463,78 @@ by a particular renderpass/blit.
<bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
<bitfield name="UNK10" pos="10" type="boolean" variants="A7XX-"/>
<bitfield name="UNK11" pos="11" type="boolean" variants="A7XX-"/>
- </reg32>
- <reg32 offset="0x8006" name="GRAS_CL_GUARDBAND_CLIP_ADJ" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8005" name="GRAS_CL_INTERP_CNTL" type="a6xx_gras_cl_interp_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8080" name="GRAS_CL_INTERP_CNTL" type="a6xx_gras_cl_interp_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_cl_guardband_clip_adj" inline="true">
<bitfield name="HORZ" low="0" high="8" type="uint"/>
<bitfield name="VERT" low="10" high="18" type="uint"/>
- </reg32>
+ </bitset>
- <!-- Something connected to depth-stencil attachment size -->
- <reg32 offset="0x8007" name="GRAS_UNKNOWN_8007" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x8006" name="GRAS_CL_GUARDBAND_CLIP_ADJ" type="a6xx_gras_cl_guardband_clip_adj" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8205" name="GRAS_CL_GUARDBAND_CLIP_ADJ" type="a6xx_gras_cl_guardband_clip_adj" variants="A8XX-" usage="rp_blit"/>
- <reg32 offset="0x8008" name="GRAS_UNKNOWN_8008" variants="A7XX-" usage="cmd"/>
+ <!-- the scale/offset is per view, with up to 6 views -->
+ <bitset name="a6xx_gras_bin_foveat" inline="yes">
+ <bitfield name="BINSCALEEN" pos="6" type="boolean"/>
+ <enum name="a7xx_bin_scale">
+ <value value="0" name="NOSCALE"/>
+ <value value="1" name="SCALE2X"/>
+ <value value="2" name="SCALE4X"/>
+ </enum>
+ <bitfield name="XSCALE_0" low="8" high="9" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_0" low="10" high="11" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_1" low="12" high="13" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_1" low="14" high="15" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_2" low="16" high="17" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_2" low="18" high="19" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_3" low="20" high="21" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_3" low="22" high="23" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_4" low="24" high="25" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_4" low="26" high="27" type="a7xx_bin_scale"/>
+ <bitfield name="XSCALE_5" low="28" high="29" type="a7xx_bin_scale"/>
+ <bitfield name="YSCALE_5" low="30" high="31" type="a7xx_bin_scale"/>
+ </bitset>
- <reg32 offset="0x8009" name="GRAS_UNKNOWN_8009" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800a" name="GRAS_UNKNOWN_800A" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800b" name="GRAS_UNKNOWN_800B" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x800c" name="GRAS_UNKNOWN_800C" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8008" name="GRAS_BIN_FOVEAT" type="a6xx_gras_bin_foveat" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0x8206" name="GRAS_BIN_FOVEAT" type="a6xx_gras_bin_foveat" variants="A8XX-" usage="cmd"/>
+
+ <reg32 offset="0x8009" name="GRAS_BIN_FOVEAT_OFFSET_0" variants="A7XX-" usage="cmd">
+ <bitfield name="XOFFSET_0" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_1" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_2" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800a" name="GRAS_BIN_FOVEAT_OFFSET_1" variants="A7XX-" usage="cmd">
+ <bitfield name="XOFFSET_3" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_4" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="XOFFSET_5" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800b" name="GRAS_BIN_FOVEAT_OFFSET_2" variants="A7XX-" usage="cmd">
+ <bitfield name="YOFFSET_0" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_1" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_2" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
+ <reg32 offset="0x800c" name="GRAS_BIN_FOVEAT_OFFSET_3" variants="A7XX-" usage="cmd">
+ <bitfield name="YOFFSET_3" low="0" high="9" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_4" low="10" high="19" shr="2" type="uint"/>
+ <bitfield name="YOFFSET_5" low="20" high="29" shr="2" type="uint"/>
+ </reg32>
<!-- <reg32 offset="0x80f0" name="GRAS_UNKNOWN_80F0" type="a6xx_reg_xy"/> -->
<!-- 0x8006-0x800f invalid -->
- <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" usage="rp_blit">
+ <array offset="0x8010" name="GRAS_CL_VIEWPORT" stride="6" length="16" variants="A6XX-A7XX" usage="rp_blit">
+ <reg32 offset="0" name="XOFFSET" type="float"/>
+ <reg32 offset="1" name="XSCALE" type="float"/>
+ <reg32 offset="2" name="YOFFSET" type="float"/>
+ <reg32 offset="3" name="YSCALE" type="float"/>
+ <reg32 offset="4" name="ZOFFSET" type="float"/>
+ <reg32 offset="5" name="ZSCALE" type="float"/>
+ </array>
+
+ <array offset="0x82d0" name="GRAS_CL_VIEWPORT" stride="6" length="16" variants="A8XX-" usage="rp_blit">
<reg32 offset="0" name="XOFFSET" type="float"/>
<reg32 offset="1" name="XSCALE" type="float"/>
<reg32 offset="2" name="YOFFSET" type="float"/>
@@ -875,12 +1542,17 @@ by a particular renderpass/blit.
<reg32 offset="4" name="ZOFFSET" type="float"/>
<reg32 offset="5" name="ZSCALE" type="float"/>
</array>
- <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" usage="rp_blit">
+
+ <array offset="0x8070" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
+ <reg32 offset="0" name="MIN" type="float"/>
+ <reg32 offset="1" name="MAX" type="float"/>
+ </array>
+ <array offset="0x80c0" name="GRAS_CL_VIEWPORT_ZCLAMP" stride="2" length="16" variants="A8XX-" usage="rp_blit">
<reg32 offset="0" name="MIN" type="float"/>
<reg32 offset="1" name="MAX" type="float"/>
</array>
- <reg32 offset="0x8090" name="GRAS_SU_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_su_cntl" varset="chip">
<bitfield name="CULL_FRONT" pos="0" type="boolean"/>
<bitfield name="CULL_BACK" pos="1" type="boolean"/>
<bitfield name="FRONT_CW" pos="2" type="boolean"/>
@@ -890,39 +1562,88 @@ by a particular renderpass/blit.
<bitfield name="LINE_MODE" pos="13" type="a5xx_line_mode"/>
<bitfield name="UNK15" low="15" high="16"/>
<!--
- On gen1 only MULTIVIEW_ENABLE exists. On gen3 we have
- the ability to add the view index to either the RT array
- index or the viewport index, and it seems that
- MULTIVIEW_ENABLE doesn't do anything, instead we need to
- set at least one of RENDERTARGETINDEXINCR or
- VIEWPORTINDEXINCR to enable multiview. The blob still
- sets MULTIVIEW_ENABLE regardless.
- TODO: what about gen2 (a640)?
+ On gen1 only MULTIVIEW_ENABLE exists. On gen3 we have
+ the ability to add the view index to either the RT array
+ index or the viewport index, and it seems that
+ MULTIVIEW_ENABLE doesn't do anything, instead we need to
+ set at least one of RENDERTARGETINDEXINCR or
+ VIEWPORTINDEXINCR to enable multiview. The blob still
+ sets MULTIVIEW_ENABLE regardless.
+ TODO: what about gen2 (a640)?
-->
<bitfield name="MULTIVIEW_ENABLE" pos="17" type="boolean"/>
+ <bitfield name="RENDERTARGETINDEXINCR" pos="18" type="boolean" variants="A6XX-A7XX"/>
+ <bitfield name="VIEWPORTINDEXINCR" pos="19" type="boolean" variants="A6XX-A7XX"/>
+ <bitfield name="UNK20" low="20" high="22" variants="A6XX-A7XX"/>
+ </bitset>
+ <reg32 offset="0x8090" name="GRAS_SU_CNTL" type="a6xx_gras_su_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8209" name="GRAS_SU_CNTL" type="a6xx_gras_su_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <!-- Fields moved from GRAS_SU_CNTL on earlier gens: -->
+ <reg32 offset="0x820c" name="GRAS_SU_STEREO_CNTL" variants="A8XX-" usage="rp_blit">
<bitfield name="RENDERTARGETINDEXINCR" pos="18" type="boolean"/>
<bitfield name="VIEWPORTINDEXINCR" pos="19" type="boolean"/>
- <bitfield name="UNK20" low="20" high="22"/>
</reg32>
- <reg32 offset="0x8091" name="GRAS_SU_POINT_MINMAX" usage="rp_blit">
+
+ <bitset name="a6xx_gras_su_point_minmax" inline="yes">
<bitfield name="MIN" low="0" high="15" type="ufixed" radix="4"/>
<bitfield name="MAX" low="16" high="31" type="ufixed" radix="4"/>
- </reg32>
- <reg32 offset="0x8092" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" usage="rp_blit"/>
+ </bitset>
+
+ <reg32 offset="0x8091" name="GRAS_SU_POINT_MINMAX" type="a6xx_gras_su_point_minmax" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x820a" name="GRAS_SU_POINT_MINMAX" type="a6xx_gras_su_point_minmax" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x8092" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x820b" name="GRAS_SU_POINT_SIZE" low="0" high="15" type="fixed" radix="4" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_depth_cntl" inline="yes">
+ <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8114" name="GRAS_SU_DEPTH_CNTL" variants="A6XX-A7XX" type="a6xx_gras_su_depth_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8086" name="GRAS_SU_DEPTH_CNTL" variants="A8XX-" type="a6xx_gras_su_depth_cntl" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_stencil_cntl" inline="yes">
+ <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" type="a6xx_gras_su_stencil_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8087" name="GRAS_SU_STENCIL_CNTL" type="a6xx_gras_su_stencil_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_render_cntl" inline="yes">
+ <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" type="a6xx_gras_su_render_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8088" name="GRAS_SU_RENDER_CNTL" type="a6xx_gras_su_render_cntl" variants="A8XX-" usage="rp_blit"/>
+
<!-- 0x8093 invalid -->
- <reg32 offset="0x8094" name="GRAS_SU_DEPTH_PLANE_CNTL" usage="rp_blit">
+ <bitset name="a6xx_depth_plane_cntl" inline="yes">
<bitfield name="Z_MODE" low="0" high="1" type="a6xx_ztest_mode"/>
- </reg32>
- <reg32 offset="0x8095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" usage="rp_blit"/>
- <reg32 offset="0x8096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" usage="rp_blit"/>
- <reg32 offset="0x8097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" usage="rp_blit"/>
- <!-- duplicates RB_DEPTH_BUFFER_INFO: -->
- <reg32 offset="0x8098" name="GRAS_SU_DEPTH_BUFFER_INFO" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8094" name="GRAS_SU_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8089" name="GRAS_SU_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x8095" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x808a" name="GRAS_SU_POLY_OFFSET_SCALE" type="float" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x8096" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x808b" name="GRAS_SU_POLY_OFFSET_OFFSET" type="float" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x8097" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x808c" name="GRAS_SU_POLY_OFFSET_OFFSET_CLAMP" type="float" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_depth_buffer_info" inline="yes">
<bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
- <bitfield name="UNK3" pos="3"/>
- </reg32>
+ <bitfield name="READ_ONLY" pos="3" type="boolean"/>
+ </bitset>
- <reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" usage="cmd">
+ <!-- duplicates RB_DEPTH_BUFFER_INFO: -->
+ <reg32 offset="0x8098" name="GRAS_SU_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x808d" name="GRAS_SU_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_su_conservative_ras_cntl" inline="yes">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
<enum name="a6xx_shift_amount">
<value value="0" name="NO_SHIFT"/>
@@ -932,7 +1653,11 @@ by a particular renderpass/blit.
<bitfield name="SHIFTAMOUNT" low="1" high="2" type="a6xx_shift_amount"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="5"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8099" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_gras_su_conservative_ras_cntl" variants="A6XX-A7XX" usage="cmd"/>
+ <reg32 offset="0x820d" name="GRAS_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_gras_su_conservative_ras_cntl" variants="A8XX-" usage="cmd"/>
+
<reg32 offset="0x809a" name="GRAS_SU_PATH_RENDERING_CNTL">
<bitfield name="UNK0" pos="0" type="boolean"/>
<bitfield name="LINELENGTHEN" pos="1" type="boolean"/>
@@ -942,10 +1667,19 @@ by a particular renderpass/blit.
<bitfield name="WRITES_LAYER" pos="0" type="boolean"/>
<bitfield name="WRITES_VIEW" pos="1" type="boolean"/>
</bitset>
- <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" usage="rp_blit"/>
- <!-- 0x809e/0x809f invalid -->
+ <reg32 offset="0x809b" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x809c" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x809d" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x820e" name="GRAS_SU_VS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A8XX" usage="rp_blit"/>
+ <reg32 offset="0x820f" name="GRAS_SU_GS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A8XX" usage="rp_blit"/>
+ <reg32 offset="0x8210" name="GRAS_SU_DS_SIV_CNTL" type="a6xx_gras_us_xs_siv_cntl" variants="A8XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_rast_cntl" inline="yes">
+ <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
+ </bitset>
+
+ <reg32 offset="0x8211" name="GRAS_RAST_CNTL" type="a6xx_rast_cntl" variants="A8XX-" usage="rp_blit"/>
<enum name="a6xx_sequenced_thread_dist">
<value value="0x0" name="DIST_SCREEN_COORD"/>
@@ -993,8 +1727,7 @@ by a particular renderpass/blit.
<value value="0x3" name="RB_BT"/>
</enum>
- <reg32 offset="0x80a0" name="GRAS_SC_CNTL" usage="rp_blit">
- <bitfield name="CCUSINGLECACHELINESIZE" low="0" high="2"/>
+ <bitset name="a6xx_gras_sc_cntl" inline="yes">
<bitfield name="SINGLE_PRIM_MODE" low="3" high="4" type="a6xx_single_prim_mode"/>
<bitfield name="RASTER_MODE" pos="5" type="a6xx_raster_mode"/>
<bitfield name="RASTER_DIRECTION" low="6" high="7" type="a6xx_raster_direction"/>
@@ -1003,7 +1736,12 @@ by a particular renderpass/blit.
<bitfield name="UNK9" pos="9" type="boolean"/>
<bitfield name="ROTATION" low="10" high="11" type="uint"/>
<bitfield name="EARLYVIZOUTEN" pos="12" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x80a0" name="GRAS_SC_CNTL" type="a6xx_gras_sc_cntl" variants="A6XX-A7XX" usage="rp_blit">
+ <bitfield name="CCUSINGLECACHELINESIZE" low="0" high="2" variants="A6XX-A7XX"/>
</reg32>
+ <reg32 offset="0x8230" name="GRAS_SC_CNTL" type="a6xx_gras_sc_cntl" variants="A8XX-" usage="rp_blit"/>
<enum name="a6xx_render_mode">
<value value="0x0" name="RENDERING_PASS"/>
@@ -1024,7 +1762,7 @@ by a particular renderpass/blit.
<value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
</enum>
- <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" usage="rp_blit">
+ <bitset name="a6xx_bin_cntl" inline="yes">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
@@ -1037,18 +1775,49 @@ by a particular renderpass/blit.
In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered.
</doc>
<bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- <bitfield name="UNK27" pos="27"/>
+ <bitfield name="FORCE_LRZ_DIS" pos="27" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x80a1" name="GRAS_SC_BIN_CNTL" type="a6xx_bin_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <!-- Common fields for RB_CNTL and GRAS_SC_BIN_CNTL -->
+ <bitset name="a8xx_bin_cntl" inline="yes">
+ <bitfield name="BINW" low="0" high="9" shr="5" type="uint"/>
+ <bitfield name="BINH" low="16" high="26" shr="4" type="uint"/>
+ <bitfield name="RENDER_MODE" low="11" high="13" type="a6xx_render_mode"/>
+ <doc>
+ Allows draws that don't have GRAS_LRZ_CNTL.LRZ_WRITE but have
+ GRAS_LRZ_CNTL.ENABLE to contribute to LRZ during RENDERING pass.
+ In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered.
+ </doc>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="28" high="30" type="a6xx_lrz_feedback_mask"/>
+ <doc>Disable LRZ feedback writes</doc>
+ <bitfield name="FORCE_LRZ_WRITE_DIS" pos="31" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x8231" name="GRAS_SC_BIN_CNTL" type="a8xx_bin_cntl" variants="A8XX-" usage="rp_blit">
+ <bitfield name="CONS_VIS_IN_BINNING" pos="10" type="boolean"/>
+ <bitfield name="FORCE_BI_DIR_LRZ_DISABLE" pos="14" type="boolean"/>
+ <bitfield name="FORCE_LRZ_DIS" pos="15" type="boolean"/>
+ <bitfield name="BIN_VRS_DIS" pos="27" type="boolean"/>
</reg32>
- <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_sc_ras_msaa_cntl" inline="yes">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" pos="2"/>
<bitfield name="UNK3" pos="3"/>
- </reg32>
- <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80a2" name="GRAS_SC_RAS_MSAA_CNTL" type="a6xx_gras_sc_ras_msaa_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8232" name="GRAS_SC_RAS_MSAA_CNTL" type="a6xx_gras_sc_ras_msaa_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_sc_dest_msaa_cntl" inline="yes">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="MSAA_DISABLE" pos="2" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x80a3" name="GRAS_SC_DEST_MSAA_CNTL" type="a6xx_gras_sc_dest_msaa_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8233" name="GRAS_SC_DEST_MSAA_CNTL" type="a6xx_gras_sc_dest_msaa_cntl" variants="A8XX-" usage="rp_blit"/>
<bitset name="a6xx_msaa_sample_pos_cntl" inline="yes">
<bitfield name="UNK0" pos="0"/>
@@ -1066,30 +1835,55 @@ by a particular renderpass/blit.
<bitfield name="SAMPLE_3_Y" low="28" high="31" radix="4" type="fixed"/>
</bitset>
- <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
- <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
- <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0x80a4" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80a5" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80a6" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x8237" name="GRAS_SC_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8238" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8239" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x823a" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_2" type="a6xx_programmable_msaa_pos" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x823b" name="GRAS_SC_PROGRAMMABLE_MSAA_POS_3" type="a6xx_programmable_msaa_pos" variants="A8XX-" usage="rp_blit"/>
- <reg32 offset="0x80a7" name="GRAS_UNKNOWN_80A7" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x80a7" name="GRAS_ROTATION_CNTL" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0x8207" name="GRAS_ROTATION_CNTL" variants="A8XX-" usage="cmd"/>
+
+ <bitset name="a6xx_screen_scissor_cntl" inline="yes">
+ <bitfield name="SCISSOR_DISABLE" pos="0" type="boolean"/>
+ </bitset>
- <!-- 0x80a7-0x80ae invalid -->
- <reg32 offset="0x80af" name="GRAS_UNKNOWN_80AF" pos="0" usage="cmd"/>
+ <reg32 offset="0x80af" name="GRAS_SC_SCREEN_SCISSOR_CNTL" type="a6xx_screen_scissor_cntl" variants="A6XX-A7XX" pos="0" usage="cmd"/>
+ <reg32 offset="0x8234" name="GRAS_SC_SCREEN_SCISSOR_CNTL" type="a6xx_screen_scissor_cntl" variants="A8XX-" pos="0" usage="cmd"/>
<bitset name="a6xx_scissor_xy" inline="yes">
<bitfield name="X" low="0" high="15" type="uint"/>
<bitfield name="Y" low="16" high="31" type="uint"/>
</bitset>
- <array offset="0x80b0" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" usage="rp_blit">
+
+ <array offset="0x80b0" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
+ <reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
+ <reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
+ </array>
+
+ <array offset="0x8240" name="GRAS_SC_SCREEN_SCISSOR" stride="2" length="16" variants="A8XX-" usage="rp_blit">
+ <reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
+ <reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
+ </array>
+
+ <array offset="0x80d0" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" variants="A6XX-A7XX" usage="rp_blit">
<reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
<reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
</array>
- <array offset="0x80d0" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" usage="rp_blit">
+ <array offset="0x8270" name="GRAS_SC_VIEWPORT_SCISSOR" stride="2" length="16" variants="A8XX-" usage="rp_blit">
<reg32 offset="0" name="TL" type="a6xx_scissor_xy"/>
<reg32 offset="1" name="BR" type="a6xx_scissor_xy"/>
</array>
- <reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x80f0" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x80f1" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x8235" name="GRAS_SC_WINDOW_SCISSOR_TL" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8236" name="GRAS_SC_WINDOW_SCISSOR_BR" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/>
<enum name="a6xx_fsr_combiner">
<value value="0" name="FSR_COMBINER_OP_KEEP"/>
@@ -1099,7 +1893,7 @@ by a particular renderpass/blit.
<value value="4" name="FSR_COMBINER_OP_MUL"/>
</enum>
- <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" variants="A7XX-" usage="rp_blit">
+ <bitset name="a6xx_gras_vrs_config">
<bitfield name="PIPELINE_FSR_ENABLE" pos="0" type="boolean"/>
<bitfield name="FRAG_SIZE_X" low="1" high="2" type="uint"/>
<bitfield name="FRAG_SIZE_Y" low="3" high="4" type="uint"/>
@@ -1107,20 +1901,37 @@ by a particular renderpass/blit.
<bitfield name="COMBINER_OP_2" low="8" high="10" type="a6xx_fsr_combiner"/>
<bitfield name="ATTACHMENT_FSR_ENABLE" pos="13" type="boolean"/>
<bitfield name="PRIMITIVE_FSR_ENABLE" pos="20" type="boolean"/>
- </reg32>
- <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f4" name="GRAS_VRS_CONFIG" type="a6xx_gras_vrs_config" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8208" name="GRAS_VRS_CONFIG" type="a6xx_gras_vrs_config" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_info" inline="yes">
<bitfield name="LAYERED" pos="0" type="boolean"/>
<bitfield name="TILE_MODE" low="1" high="2" type="a6xx_tile_mode"/>
- </reg32>
- <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f5" name="GRAS_QUALITY_BUFFER_INFO" type="a6xx_gras_quality_buffer_info" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x808e" name="GRAS_QUALITY_BUFFER_INFO" type="a6xx_gras_quality_buffer_info" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_dimension" inline="yes">
<bitfield name="WIDTH" low="0" high="15" type="uint"/>
<bitfield name="HEIGHT" low="16" high="31" type="uint"/>
- </reg32>
- <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX-" type="waddress" usage="rp_blit"/>
- <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" variants="A7XX-" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x80f6" name="GRAS_QUALITY_BUFFER_DIMENSION" type="a6xx_gras_quality_buffer_dimension" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x808f" name="GRAS_QUALITY_BUFFER_DIMENSION" type="a6xx_gras_quality_buffer_dimension" variants="A8XX-" usage="rp_blit"/>
+
+ <reg64 offset="0x80f8" name="GRAS_QUALITY_BUFFER_BASE" variants="A7XX" type="waddress" usage="rp_blit"/>
+ <reg64 offset="0x8090" name="GRAS_QUALITY_BUFFER_BASE" variants="A8XX-" type="waddress" usage="rp_blit"/>
+
+ <bitset name="a6xx_gras_quality_buffer_pitch" inline="yes">
<bitfield name="PITCH" shr="6" low="0" high="7" type="uint"/>
<bitfield name="ARRAY_PITCH" shr="6" low="10" high="28" type="uint"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x80fa" name="GRAS_QUALITY_BUFFER_PITCH" type="a6xx_gras_quality_buffer_pitch" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8092" name="GRAS_QUALITY_BUFFER_PITCH" type="a6xx_gras_quality_buffer_pitch" variants="A8XX-" usage="rp_blit"/>
<enum name="a6xx_lrz_dir_status">
<value value="0x1" name="LRZ_DIR_LE"/>
@@ -1128,7 +1939,7 @@ by a particular renderpass/blit.
<value value="0x3" name="LRZ_DIR_INVALID"/>
</enum>
- <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_cntl" inline="yes">
<bitfield name="ENABLE" pos="0" type="boolean"/>
<doc>LRZ write also disabled for blend/etc.</doc>
<bitfield name="LRZ_WRITE" pos="1" type="boolean"/>
@@ -1139,7 +1950,6 @@ by a particular renderpass/blit.
- 0.0 if GREATER
- 1.0 if LESS
</doc>
- <bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/>
<!-- set when depth-test + depth-write enabled -->
<bitfield name="Z_WRITE_ENABLE" pos="4" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/>
@@ -1153,8 +1963,26 @@ by a particular renderpass/blit.
Disable LRZ based on previous direction and the current one.
If DIR_WRITE is not enabled - there is no write to direction buffer.
</doc>
- <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/>
<bitfield name="Z_FUNC" low="11" high="13" type="adreno_compare_func" variants="A7XX-"/>
+ </bitset>
+
+ <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A6XX">
+ <bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/>
+ <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/>
+ </reg32>
+ <reg32 offset="0x8100" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A7XX"/>
+ <reg32 offset="0x8212" name="GRAS_LRZ_CNTL" type="a6xx_gras_lrz_cntl" usage="rp_blit" variants="A8XX-"/>
+
+ <reg32 offset="0x8007" name="GRAS_LRZ_CB_CNTL" variants="A7XX" usage="rp_blit">
+ <doc>
+ The total size of the LRZ image array (not including
+ fast clear buffer), used as a stride for double
+ buffering used with concurrent binning.
+ </doc>
+ <bitfield name="DOUBLE_BUFFER_STRIDE" low="8" high="31" shr="8"/>
+ </reg32>
+ <reg32 offset="0x8101" name="GRAS_LRZ_CB_CNTL" usage="rp_blit" variants="A8XX-">
+ <bitfield name="DOUBLE_BUFFER_PITCH" low="8" high="31" shr="8"/>
</reg32>
<enum name="a6xx_fragcoord_sample_mode">
@@ -1162,19 +1990,33 @@ by a particular renderpass/blit.
<value value="3" name="FRAGCOORD_SAMPLE"/>
</enum>
- <reg32 offset="0x8101" name="GRAS_LRZ_PS_INPUT_CNTL" low="0" high="2" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_ps_input_cntl" inline="yes">
<bitfield name="SAMPLEID" pos="0" type="boolean"/>
<bitfield name="FRAGCOORDSAMPLEMODE" low="1" high="2" type="a6xx_fragcoord_sample_mode"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8101" name="GRAS_LRZ_PS_INPUT_CNTL" type="a6xx_gras_lrz_ps_input_cntl" usage="rp_blit" variants="A6XX-A7XX"/>
+ <reg32 offset="0x8102" name="GRAS_LRZ_PS_INPUT_CNTL" type="a6xx_gras_lrz_ps_input_cntl" usage="rp_blit" variants="A8XX-"/>
- <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" usage="rp_blit">
+ <bitset name="a6xx_gras_lrz_mrt_buffer_info_0" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
- </reg32>
- <reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit"/>
- <reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x8102" name="GRAS_LRZ_MRT_BUFFER_INFO_0" type="a6xx_gras_lrz_mrt_buffer_info_0" usage="rp_blit" variants="A6XX-A7XX"/>
+ <reg32 offset="0x8103" name="GRAS_LRZ_MRT_BUFFER_INFO_0" type="a6xx_gras_lrz_mrt_buffer_info_0" usage="rp_blit" variants="A8XX-"/>
+
+ <reg64 offset="0x8103" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit" variants="A6XX-A7XX"/>
+ <reg64 offset="0x8104" name="GRAS_LRZ_BUFFER_BASE" align="256" type="waddress" usage="rp_blit" variants="A8XX-"/>
+
+ <bitset name="a6xx_gras_lrz_buffer_pitch" inline="yes">
<bitfield name="PITCH" low="0" high="7" shr="5" type="uint"/>
<bitfield name="ARRAY_PITCH" low="10" high="28" shr="8" type="uint"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x8105" name="GRAS_LRZ_BUFFER_PITCH" type="a6xx_gras_lrz_buffer_pitch" usage="rp_blit" variants="A6XX-A7XX"/>
+ <reg32 offset="0x8108" name="GRAS_LRZ_BUFFER_PITCH" type="a6xx_gras_lrz_buffer_pitch" usage="rp_blit" variants="A8XX-"/>
+
+ <reg32 offset="0x810e" name="GRAS_LRZ_BUFFER_STRIDE" usage="rp_blit" low="0" high="16" shr="12" variants="A8XX-"/>
<!--
The LRZ "fast clear" buffer is initialized to zero's by blob, and
@@ -1207,7 +2049,6 @@ by a particular renderpass/blit.
not.
-->
<reg64 offset="0x8106" name="GRAS_LRZ_FAST_CLEAR_BUFFER_BASE" align="64" type="waddress" usage="rp_blit"/>
- <!-- 0x8108 invalid -->
<reg32 offset="0x8109" name="GRAS_LRZ_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
@@ -1232,21 +2073,32 @@ by a particular renderpass/blit.
<!-- 0x810c-0x810f invalid -->
- <reg32 offset="0x8110" name="GRAS_UNKNOWN_8110" low="0" high="1" usage="cmd"/>
+ <reg32 offset="0x8110" name="GRAS_LRZ_BUFFER_SLICE_PITCH" low="0" high="31" shr="8" type="uint" variants="A8XX-"/>
+
+ <reg32 offset="0x8110" name="GRAS_MODE_CNTL" low="0" high="1" variants="A6XX-A7XX" usage="cmd"/>
+ <reg32 offset="0x8213" name="GRAS_MODE_CNTL" low="0" high="1" variants="A8XX-" usage="cmd"/>
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
- <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX-"/>
+ <reg32 offset="0x8111" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A7XX"/>
+ <reg32 offset="0x810d" name="GRAS_LRZ_DEPTH_CLEAR" type="float" variants="A8XX-"/>
- <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
- <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
- <bitfield name="UNK3" pos="3"/>
- </reg32>
+ <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x810f" name="GRAS_LRZ_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A8XX" usage="rp_blit"/>
- <!-- Always written together and always equal 09510840 00000a62 -->
- <reg32 offset="0x8120" name="GRAS_UNKNOWN_8120" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0x8121" name="GRAS_UNKNOWN_8121" variants="A7XX-" usage="cmd"/>
+ <doc>LUT used to convert quality buffer values to HW shading rate values. An array of 4-bit values.</doc>
+ <array offset="0x8120" name="GRAS_LRZ_QUALITY_LOOKUP_TABLE" variants="A7XX" stride="1" length="2"/>
+ <array offset="0x8130" name="GRAS_LRZ_QUALITY_LOOKUP_TABLE" variants="A8XX-" stride="1" length="2"/>
- <!-- 0x8112-0x83ff invalid -->
+ <reg32 offset="0x810c" name="GRAS_LRZ_COLOR_COMP_MASK" variants="A8XX-">
+ <bitfield name="MRT0" low="0" high="3"/>
+ <bitfield name="MRT1" low="4" high="7"/>
+ <bitfield name="MRT2" low="8" high="11"/>
+ <bitfield name="MRT3" low="12" high="15"/>
+ <bitfield name="MRT4" low="16" high="19"/>
+ <bitfield name="MRT5" low="20" high="23"/>
+ <bitfield name="MRT6" low="24" high="27"/>
+ <bitfield name="MRT7" low="28" high="31"/>
+ </reg32>
<enum name="a6xx_rotation">
<value value="0x0" name="ROTATE_0"/>
@@ -1257,7 +2109,7 @@ by a particular renderpass/blit.
<value value="0x5" name="ROTATE_VFLIP"/>
</enum>
- <bitset name="a6xx_a2d_bit_cntl" inline="yes">
+ <bitset name="a6xx_a2d_blt_cntl" inline="yes">
<bitfield name="ROTATE" low="0" high="2" type="a6xx_rotation"/>
<bitfield name="OVERWRITEEN" pos="3" type="boolean"/>
<bitfield name="UNK4" low="4" high="6"/>
@@ -1269,61 +2121,67 @@ by a particular renderpass/blit.
<bitfield name="D24S8" pos="19" type="boolean"/>
<!-- some sort of channel mask, disabled channels are set to zero ? -->
<bitfield name="MASK" low="20" high="23"/>
- <bitfield name="IFMT" low="24" high="28" type="a6xx_2d_ifmt"/>
+ <bitfield name="IFMT" low="24" high="26" type="a6xx_2d_ifmt"/>
+ <bitfield name="UNK27" pos="27" type="boolean"/>
+ <bitfield name="UNK28" pos="28" type="boolean"/>
<bitfield name="RASTER_MODE" pos="29" type="a6xx_raster_mode"/>
- <bitfield name="UNK30" pos="30" type="boolean" variants="A7XX-"/>
+ <bitfield name="COPY" pos="30" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8400" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_blt_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<!-- note: the low 8 bits for src coords are valid, probably fixed point
it would be a bit weird though, since we subtract 1 from BR coords
apparently signed, gallium driver uses negative coords and it works?
-->
- <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" usage="rp_blit"/>
- <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0x8401" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8402" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8403" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8404" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8405" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x8406" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
<reg32 offset="0x8407" name="GRAS_2D_UNKNOWN_8407" low="0" high="31"/>
<reg32 offset="0x8408" name="GRAS_2D_UNKNOWN_8408" low="0" high="31"/>
<reg32 offset="0x8409" name="GRAS_2D_UNKNOWN_8409" low="0" high="31"/>
- <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" usage="rp_blit"/>
- <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" usage="rp_blit"/>
- <!-- 0x840c-0x85ff invalid -->
+ <reg32 offset="0x840a" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x840b" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x8500" name="GRAS_A2D_BLT_CNTL" type="a6xx_a2d_blt_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8501" name="GRAS_A2D_SRC_XMIN" low="8" high="24" type="int" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8502" name="GRAS_A2D_SRC_XMAX" low="8" high="24" type="int" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8503" name="GRAS_A2D_SRC_YMIN" low="8" high="24" type="int" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8504" name="GRAS_A2D_SRC_YMAX" low="8" high="24" type="int" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8505" name="GRAS_A2D_DEST_TL" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8506" name="GRAS_A2D_DEST_BR" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8507" name="GRAS_A2D_SCISSOR_TL" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x8508" name="GRAS_A2D_SCISSOR_BR" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/>
<!-- always 0x880 ? (and 0 in a640/a650 traces?) -->
- <reg32 offset="0x8600" name="GRAS_DBG_ECO_CNTL" usage="cmd">
+ <reg32 offset="0x8600" name="GRAS_DBG_ECO_CNTL" usage="init" variants="A6XX-A7XX">
<bitfield name="UNK7" pos="7" type="boolean"/>
<bitfield name="LRZCACHELOCKDIS" pos="11" type="boolean"/>
</reg32>
- <reg32 offset="0x8601" name="GRAS_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
- <reg32 offset="0x8602" name="GRAS_NC_MODE_CNTL" variants="A7XX-"/>
- <array offset="0x8610" name="GRAS_PERFCTR_TSE_SEL" stride="1" length="4"/>
- <array offset="0x8614" name="GRAS_PERFCTR_RAS_SEL" stride="1" length="4"/>
- <array offset="0x8618" name="GRAS_PERFCTR_LRZ_SEL" stride="1" length="4"/>
+ <reg32 offset="0x8600" name="GRAS_TSEFE_DBG_ECO_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x8702" name="GRAS_DBG_ECO_CNTL" variants="A8XX"/>
+ <reg32 offset="0x8601" name="GRAS_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" variants="A6XX"/>
+ <reg32 offset="0x8602" name="GRAS_NC_MODE_CNTL" variants="A7XX"/>
+ <reg32 offset="0x8700" name="GRAS_NC_MODE_CNTL" variants="A8XX-"/>
+ <array offset="0x8610" name="GRAS_PERFCTR_TSE_SEL" stride="1" length="4" variants="A6XX-A7XX"/>
+ <array offset="0x8614" name="GRAS_PERFCTR_RAS_SEL" stride="1" length="4" variants="A6XX-A7XX"/>
+ <array offset="0x8618" name="GRAS_PERFCTR_LRZ_SEL" stride="1" length="4" variants="A6XX-A7XX"/>
+
+ <array offset="0x8610" name="GRAS_PERFCTR_TSEFE_SEL" stride="1" length="6" variants="A8XX-"/>
+ <array offset="0x8710" name="GRAS_PERFCTR_TSE_SEL" stride="1" length="6" variants="A8XX-"/>
+ <array offset="0x8720" name="GRAS_PERFCTR_RAS_SEL" stride="1" length="4" variants="A8XX-"/>
+ <array offset="0x8730" name="GRAS_PERFCTR_LRZ_SEL" stride="1" length="6" variants="A8XX-"/>
+
<!-- note 0x8620-0x87ff are not all invalid
(in particular, 0x8631/0x8632 have 0x3fff3fff mask and would be xy coords)
-->
<!-- same as GRAS_BIN_CONTROL, but without bit 27: -->
- <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX" usage="rp_blit">
- <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
- <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
- <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
- <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- </reg32>
-
- <reg32 offset="0x8800" name="RB_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
- <bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
- <bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
- <bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
- </reg32>
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A6XX-A7XX" type="a6xx_bin_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8800" name="RB_CNTL" variants="A8XX-" type="a8xx_bin_cntl" usage="rp_blit"/>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
<bitfield name="CCUSINGLECACHELINESIZE" low="3" high="5"/>
@@ -1347,9 +2205,6 @@ by a particular renderpass/blit.
<bitfield name="CONSERVATIVERASEN" pos="11" type="boolean"/>
<bitfield name="INNERCONSERVATIVERASEN" pos="12" type="boolean"/>
</reg32>
- <reg32 offset="0x8116" name="GRAS_SU_RENDER_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="FS_DISABLE" pos="7" type="boolean"/>
- </reg32>
<reg32 offset="0x8802" name="RB_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
@@ -1364,7 +2219,8 @@ by a particular renderpass/blit.
<reg32 offset="0x8804" name="RB_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
<reg32 offset="0x8805" name="RB_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
<reg32 offset="0x8806" name="RB_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
- <!-- 0x8807-0x8808 invalid -->
+ <reg32 offset="0x8807" name="RB_PROGRAMMABLE_MSAA_POS_2" type="a6xx_programmable_msaa_pos" usage="rp_blit" variants="A8XX-"/>
+ <reg32 offset="0x8808" name="RB_PROGRAMMABLE_MSAA_POS_3" type="a6xx_programmable_msaa_pos" usage="rp_blit" variants="A8XX-"/>
<!--
note: maybe not actually called RB_RENDER_CONTROLn (since RB_RENDER_CNTL
name comes from kernel and is probably right)
@@ -1378,7 +2234,7 @@ by a particular renderpass/blit.
<bitfield name="IJ_LINEAR_CENTROID" pos="4" type="boolean"/>
<bitfield name="IJ_LINEAR_SAMPLE" pos="5" type="boolean"/>
<bitfield name="COORD_MASK" low="6" high="9" type="hex"/>
- <bitfield name="UNK10" pos="10" type="boolean"/>
+ <bitfield name="INTERP_EN" pos="10" type="boolean"/>
</reg32>
<reg32 offset="0x880a" name="RB_PS_INPUT_CNTL" usage="rp_blit">
<!-- enable bits for various FS sysvalue regs: -->
@@ -1436,8 +2292,32 @@ by a particular renderpass/blit.
<reg32 offset="0x8810" name="RB_PS_SAMPLEFREQ_CNTL" usage="rp_blit">
<bitfield name="PER_SAMP_MODE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x8811" name="RB_UNKNOWN_8811" low="4" high="6" usage="cmd"/>
- <reg32 offset="0x8812" name="RB_UNKNOWN_8812" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x8811" name="RB_MODE_CNTL" low="4" high="6" usage="cmd"/>
+ <reg32 offset="0x8812" name="RB_BUFFER_CNTL" variants="A7XX-" usage="rp_blit">
+ <bitfield name="Z_SYSMEM" pos="0" type="boolean"/>
+ <bitfield name="S_SYSMEM" pos="1" type="boolean"/>
+ <bitfield name="RT0_SYSMEM" pos="2" type="boolean"/>
+ <bitfield name="RT1_SYSMEM" pos="3" type="boolean"/>
+ <bitfield name="RT2_SYSMEM" pos="4" type="boolean"/>
+ <bitfield name="RT3_SYSMEM" pos="5" type="boolean"/>
+ <bitfield name="RT4_SYSMEM" pos="6" type="boolean"/>
+ <bitfield name="RT5_SYSMEM" pos="7" type="boolean"/>
+ <bitfield name="RT6_SYSMEM" pos="8" type="boolean"/>
+ <bitfield name="RT7_SYSMEM" pos="9" type="boolean"/>
+ <bitfield name="Z_FULL_IN_GMEM" pos="10" type="boolean" variants="A8XX-"/>
+ <bitfield name="S_FULL_IN_GMEM" pos="11" type="boolean" variants="A8XX-"/>
+ <bitfield name="RT0_FULL_IN_GMEM" pos="12" type="boolean" variants="A8XX-"/>
+ <bitfield name="RT1_FULL_IN_GMEM" pos="13" type="boolean" variants="A8XX-"/>
+ <bitfield name="RT2_FULL_IN_GMEM" pos="14" type="boolean" variants="A8XX-"/>
+ <bitfield name="RT3_FULL_IN_GMEM" pos="15" type="boolean" variants="A8XX-"/>
+ <bitfield name="RT4_FULL_IN_GMEM" pos="16" type="boolean" variants="A8XX-"/>
+ <bitfield name="RT5_FULL_IN_GMEM" pos="17" type="boolean" variants="A8XX-"/>
+ <bitfield name="RT6_FULL_IN_GMEM" pos="18" type="boolean" variants="A8XX-"/>
+ <bitfield name="RT7_FULL_IN_GMEM" pos="19" type="boolean" variants="A8XX-"/>
+ </reg32>
+
+ <reg32 offset="0x8816" name="RB_RESOLVE_CR_CNTL" variants="A8XX-" usage="rp_blit"/>
+
<!-- 0x8813-0x8817 invalid -->
<!-- always 0x0 ? -->
<reg32 offset="0x8818" name="RB_UNKNOWN_8818" low="0" high="6" usage="cmd"/>
@@ -1448,11 +2328,17 @@ by a particular renderpass/blit.
<reg32 offset="0x881c" name="RB_UNKNOWN_881C" usage="cmd"/>
<reg32 offset="0x881d" name="RB_UNKNOWN_881D" usage="cmd"/>
<reg32 offset="0x881e" name="RB_UNKNOWN_881E" usage="cmd"/>
- <!-- 0x881f invalid -->
+
+ <!-- Duplicates fields from SP_PS_CNTL_0 -->
+ <reg32 offset="0x881f" name="RB_PS_CNTL" variants="A8XX-" usage="rp_blit">
+ <bitfield name="PIXLODENABLE" pos="0" type="boolean"/>
+ <bitfield name="LODPIXMASK" pos="1" type="boolean"/>
+ </reg32>
+
<array offset="0x8820" name="RB_MRT" stride="8" length="8" usage="rp_blit">
<reg32 offset="0x0" name="CONTROL">
- <bitfield name="BLEND" pos="0" type="boolean"/>
- <bitfield name="BLEND2" pos="1" type="boolean"/>
+ <bitfield name="COLOR_BLEND_EN" pos="0" type="boolean"/>
+ <bitfield name="ALPHA_BLEND_EN" pos="1" type="boolean"/>
<bitfield name="ROP_ENABLE" pos="2" type="boolean"/>
<bitfield name="ROP_CODE" low="3" high="6" type="a3xx_rop_code"/>
<bitfield name="COMPONENT_ENABLE" low="7" high="10" type="hex"/>
@@ -1515,10 +2401,10 @@ by a particular renderpass/blit.
<bitfield name="ALPHA_TO_ONE" pos="11" type="boolean"/>
<bitfield name="SAMPLE_MASK" low="16" high="31"/>
</reg32>
- <!-- 0x8866-0x886f invalid -->
- <reg32 offset="0x8870" name="RB_DEPTH_PLANE_CNTL" usage="rp_blit">
- <bitfield name="Z_MODE" low="0" high="1" type="a6xx_ztest_mode"/>
+ <reg32 offset="0x8866" name="RB_LB_PARAM_LIMIT" variants="A8XX-" usage="rp_blit">
+ <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
</reg32>
+ <reg32 offset="0x8870" name="RB_DEPTH_PLANE_CNTL" type="a6xx_depth_plane_cntl" usage="rp_blit"/>
<reg32 offset="0x8871" name="RB_DEPTH_CNTL" usage="rp_blit">
<bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
@@ -1531,19 +2417,15 @@ by a particular renderpass/blit.
</doc>
<bitfield name="Z_READ_ENABLE" pos="6" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="7" type="boolean"/>
+ <!-- clamp shader depth out to [0, 1] (instead of RB_VIEWPORT_ZCLAMP_MIN/MAX)-->
+ <bitfield name="O_DEPTH_01_CLAMP_EN" pos="8" type="boolean" variants="A8XX-"/>
</reg32>
- <reg32 offset="0x8114" name="GRAS_SU_DEPTH_CNTL" usage="rp_blit">
- <bitfield name="Z_TEST_ENABLE" pos="0" type="boolean"/>
- </reg32>
+
<!-- duplicates GRAS_SU_DEPTH_BUFFER_INFO: -->
- <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A6XX" usage="rp_blit">
- <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
- <bitfield name="UNK3" low="3" high="4"/>
- </reg32>
+ <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A6XX" type="a6xx_depth_buffer_info" usage="rp_blit"/>
<!-- first 4 bits duplicates GRAS_SU_DEPTH_BUFFER_INFO -->
- <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
- <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
- <bitfield name="UNK3" low="3" high="4"/>
+ <reg32 offset="0x8872" name="RB_DEPTH_BUFFER_INFO" type="a6xx_depth_buffer_info" variants="A7XX-" usage="rp_blit">
+ <bitfield name="PRT" low="3" high="4"/>
<bitfield name="TILEMODE" low="5" high="6" type="a6xx_tile_mode"/>
<bitfield name="LOSSLESSCOMPEN" pos="7" type="boolean"/>
</reg32>
@@ -1575,9 +2457,7 @@ by a particular renderpass/blit.
<bitfield name="ZPASS_BF" low="26" high="28" type="adreno_stencil_op"/>
<bitfield name="ZFAIL_BF" low="29" high="31" type="adreno_stencil_op"/>
</reg32>
- <reg32 offset="0x8115" name="GRAS_SU_STENCIL_CNTL" usage="rp_blit">
- <bitfield name="STENCIL_ENABLE" pos="0" type="boolean"/>
- </reg32>
+
<reg32 offset="0x8881" name="RB_STENCIL_BUFFER_INFO" variants="A6XX" usage="rp_blit">
<bitfield name="SEPARATE_STENCIL" pos="0" type="boolean"/>
<bitfield name="UNK1" pos="1" type="boolean"/>
@@ -1613,11 +2493,22 @@ by a particular renderpass/blit.
<reg32 offset="0x8898" name="RB_LRZ_CNTL" usage="rp_blit">
<bitfield name="ENABLE" pos="0" type="boolean"/>
</reg32>
- <reg32 offset="0x8899" name="RB_UNKNOWN_8899" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8899" name="RB_LRZ_CNTL2" variants="A7XX-" usage="cmd">
+ <bitfield name="ENABLE_BIDIRECTIONAL_LRZ" pos="0" type="boolean"/>
+ </reg32>
<!-- 0x8899-0x88bf invalid -->
<!-- clamps depth value for depth test/write -->
- <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit"/>
- <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit"/>
+ <reg32 offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MIN" type="float" usage="rp_blit" variants="A6XX-A7XX"/>
+ <reg32 offset="0x88c1" name="RB_VIEWPORT_ZCLAMP_MAX" type="float" usage="rp_blit" variants="A6XX-A7XX"/>
+
+<!-- todo allow type="float" on an <array/> -->
+ <array offset="0x88b0" name="RB_VIEWPORT_ZCLAMP_MIN" stride="1" length="16" usage="rp_blit" variants="A8XX-">
+ <reg32 offset="0" name="REG" type="float"/>
+ </array>
+ <array offset="0x88c0" name="RB_VIEWPORT_ZCLAMP_MAX" stride="1" length="16" usage="rp_blit" variants="A8XX-">
+ <reg32 offset="0" name="REG" type="float"/>
+ </array>
+
<!-- 0x88c2-0x88cf invalid-->
<reg32 offset="0x88d0" name="RB_RESOLVE_CNTL_0" usage="rp_blit">
<bitfield name="UNK0" low="0" high="12"/>
@@ -1626,10 +2517,15 @@ by a particular renderpass/blit.
<reg32 offset="0x88d1" name="RB_RESOLVE_CNTL_1" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x88d2" name="RB_RESOLVE_CNTL_2" type="a6xx_reg_xy" usage="rp_blit"/>
<!-- weird to duplicate other regs from same block?? -->
- <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" usage="rp_blit">
+ <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" variants="A6XX-A7XX" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
</reg32>
+
+ <reg32 offset="0x88d3" name="RB_RESOLVE_CNTL_3" type="a8xx_bin_size" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x88f0" name="RB_RESOLVE_CNTL_4" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x88f1" name="RB_RESOLVE_CNTL_5" variants="A8XX-" usage="rp_blit"/>
+
<reg32 offset="0x88d4" name="RB_RESOLVE_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
<reg32 offset="0x88d5" name="RB_RESOLVE_GMEM_BUFFER_INFO" usage="rp_blit">
<bitfield name="SAMPLES" low="3" high="4" type="a3xx_msaa_samples"/>
@@ -1650,10 +2546,13 @@ by a particular renderpass/blit.
<!-- array-pitch is size of layer -->
<reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint" usage="rp_blit"/>
<reg64 offset="0x88dc" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" usage="rp_blit">
+
+ <bitset name="a6xx_flag_buffer_pitch" inline="yes">
<bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
- </reg32>
+ <bitfield name="ARRAY_PITCH" low="11" high="28" shr="7" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x88de" name="RB_RESOLVE_SYSTEM_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch" usage="rp_blit"/>
<reg32 offset="0x88df" name="RB_RESOLVE_CLEAR_COLOR_DW0" usage="rp_blit"/>
<reg32 offset="0x88e0" name="RB_RESOLVE_CLEAR_COLOR_DW1" usage="rp_blit"/>
@@ -1705,8 +2604,10 @@ by a particular renderpass/blit.
<value value="0x1" name="CCU_CACHE_SIZE_HALF"/>
<value value="0x2" name="CCU_CACHE_SIZE_QUARTER"/>
<value value="0x3" name="CCU_CACHE_SIZE_EIGHTH"/>
+ <!-- for DEPTH_CACHE_SIZE 3 == THREE_QUARTER from KNP -->
+ <value value="0x3" name="CCU_CACHE_SIZE_THREE_QUARTER"/>
</enum>
- <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A7XX-" usage="cmd">
+ <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A7XX" usage="cmd">
<bitfield name="DEPTH_OFFSET_HI" pos="0" type="hex"/>
<bitfield name="COLOR_OFFSET_HI" pos="2" type="hex"/>
<bitfield name="DEPTH_CACHE_SIZE" low="10" high="11" type="a6xx_ccu_cache_size"/>
@@ -1721,15 +2622,30 @@ by a particular renderpass/blit.
-->
<bitfield name="COLOR_OFFSET" low="23" high="31" shr="12" type="hex"/>
</reg32>
- <!-- 0x88e6-0x88ef invalid -->
+
+ <reg32 offset="0x88e5" name="RB_CCU_CACHE_CNTL" variants="A8XX-" usage="cmd">
+ <!--
+ For color cache, full is 128KB per CCU. For depth cache,
+ full is 256KB per CCU.
+
+ For attr/pos caches (see VPC_{ATTR,POS,BV_POS}_BUF_GMEM_SIZE),
+ the sizes are per CCU
+ -->
+ <bitfield name="COLOR_OFFSET" low="0" high="13" shr="12" type="hex"/>
+ <bitfield name="COLOR_CACHE_SIZE" low="14" high="15" type="a6xx_ccu_cache_size"/>
+ <bitfield name="DEPTH_OFFSET" low="16" high="29" shr="12" type="hex"/>
+ <bitfield name="DEPTH_CACHE_SIZE" low="30" high="31" type="a6xx_ccu_cache_size"/>
+ </reg32>
+
+ <reg32 offset="0x88e6" name="RB_RESOLVE_GMEM_BUFFER_CNTL" variants="A8XX-">
+ <bitfield name="FULL_IN_GMEM" pos="0" type="boolean"/>
+ </reg32>
+
<!-- always 0x0 ? -->
- <reg32 offset="0x88f0" name="RB_UNKNOWN_88F0" low="0" high="11" usage="cmd"/>
+ <reg32 offset="0x88f0" name="RB_UNKNOWN_88F0" low="0" high="11" variants="A6XX" usage="cmd"/>
<!-- could be for separate stencil? (or may not be a flag buffer at all) -->
- <reg64 offset="0x88f1" name="RB_UNK_FLAG_BUFFER_BASE" type="waddress" align="64"/>
- <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH">
- <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="23" shr="7" type="uint"/>
- </reg32>
+ <reg64 offset="0x88f1" name="RB_UNK_FLAG_BUFFER_BASE" type="waddress" align="64" variants="A6XX"/>
+ <reg32 offset="0x88f3" name="RB_UNK_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch" variants="A6XX"/>
<reg32 offset="0x88f4" name="RB_VRS_CONFIG" usage="rp_blit">
<bitfield name="UNK2" pos="2" type="boolean"/>
@@ -1737,8 +2653,9 @@ by a particular renderpass/blit.
<bitfield name="ATTACHMENT_FSR_ENABLE" pos="5" type="boolean"/>
<bitfield name="PRIMITIVE_FSR_ENABLE" pos="18" type="boolean"/>
</reg32>
- <!-- Connected to VK_EXT_fragment_density_map? -->
- <reg32 offset="0x88f5" name="RB_UNKNOWN_88F5" variants="A7XX-"/>
+ <reg32 offset="0x88f5" name="RB_BIN_FOVEAT" variants="A7XX-" usage="cmd">
+ <bitfield name="BINSCALEEN" pos="6" type="boolean"/>
+ </reg32>
<!-- 0x88f6-0x88ff invalid -->
<reg64 offset="0x8900" name="RB_DEPTH_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8902" name="RB_DEPTH_FLAG_BUFFER_PITCH" usage="rp_blit">
@@ -1747,12 +2664,10 @@ by a particular renderpass/blit.
<bitfield name="UNK8" low="8" high="10"/>
<bitfield name="ARRAY_PITCH" low="11" high="27" shr="7" type="uint"/>
</reg32>
+
<array offset="0x8903" name="RB_COLOR_FLAG_BUFFER" stride="3" length="8" usage="rp_blit">
<reg64 offset="0" name="ADDR" type="waddress" align="64"/>
- <reg32 offset="2" name="PITCH">
- <bitfield name="PITCH" low="0" high="10" shr="6" type="uint"/>
- <bitfield name="ARRAY_PITCH" low="11" high="28" shr="7" type="uint"/>
- </reg32>
+ <reg32 offset="2" name="PITCH" type="a6xx_flag_buffer_pitch"/>
</array>
<!-- 0x891b-0x8926 invalid -->
<doc>
@@ -1760,9 +2675,19 @@ by a particular renderpass/blit.
the address is specified through CP_EVENT_WRITE7::WRITE_SAMPLE_COUNT.
</doc>
<reg64 offset="0x8927" name="RB_SAMPLE_COUNTER_BASE" type="waddress" align="16" usage="cmd"/>
- <!-- 0x8929-0x89ff invalid -->
- <!-- TODO: there are some registers in the 0x8a00-0x8bff range -->
+ <bitset name="a8xx_gmem_dimension" inline="yes">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="16" high="30" type="uint"/>
+ </bitset>
+
+ <reg32 offset="0x8813" name="RB_DEPTH_GMEM_DIMENSION" type="a8xx_gmem_dimension" variants="A8XX-"/>
+ <reg32 offset="0x8814" name="RB_STENCIL_GMEM_DIMENSION" type="a8xx_gmem_dimension" variants="A8XX-"/>
+ <reg32 offset="0x8815" name="RB_RESOLVE_GMEM_DIMENSION" type="a8xx_gmem_dimension" variants="A8XX-"/>
+
+ <array offset="0x8930" name="RB_MRT_GMEM_DIMENSION" variants="A8XX-" stride="1" length="8">
+ <reg32 offset="0" name="REG" type="a8xx_gmem_dimension"/>
+ </array>
<!--
These show up in a6xx gen3+ but so far haven't found an example of
@@ -1773,7 +2698,7 @@ by a particular renderpass/blit.
<reg32 offset="0x8a20" name="RB_UNKNOWN_8A20" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0x8a30" name="RB_UNKNOWN_8A30" variants="A6XX" usage="rp_blit"/>
- <reg32 offset="0x8c00" name="RB_A2D_BLT_CNTL" type="a6xx_a2d_bit_cntl" usage="rp_blit"/>
+ <reg32 offset="0x8c00" name="RB_A2D_BLT_CNTL" type="a6xx_a2d_blt_cntl" usage="rp_blit"/>
<reg32 offset="0x8c01" name="RB_A2D_PIXEL_CNTL" low="0" high="31" usage="rp_blit"/>
<bitset name="a6xx_a2d_src_texture_info" inline="yes">
@@ -1815,7 +2740,7 @@ by a particular renderpass/blit.
<reg64 offset="0x8c1e" name="RB_A2D_DEST_BUFFER_BASE_2" type="waddress" align="64" usage="rp_blit"/>
<reg64 offset="0x8c20" name="RB_A2D_DEST_FLAG_BUFFER_BASE" type="waddress" align="64" usage="rp_blit"/>
- <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
+ <reg32 offset="0x8c22" name="RB_A2D_DEST_FLAG_BUFFER_PITCH" type="a6xx_flag_buffer_pitch" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12 with UBWC): -->
<reg64 offset="0x8c23" name="RB_A2D_DEST_FLAG_BUFFER_BASE_1" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8c25" name="RB_A2D_DEST_FLAG_BUFFER_PITCH_1" low="0" high="7" shr="6" type="uint" usage="rp_blit"/>
@@ -1832,10 +2757,10 @@ by a particular renderpass/blit.
<!-- 0x8c35-0x8dff invalid -->
<!-- always 0x1 ? either doesn't exist for a650 or write-only: -->
- <reg32 offset="0x8e01" name="RB_UNKNOWN_8E01" usage="cmd"/>
+ <reg32 offset="0x8e01" name="RB_RBP_CNTL" usage="cmd"/>
<!-- 0x8e00-0x8e03 invalid -->
<reg32 offset="0x8e04" name="RB_DBG_ECO_CNTL" usage="cmd"/> <!-- TODO: valid mask 0xfffffeff -->
- <reg32 offset="0x8e05" name="RB_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
+ <reg32 offset="0x8e05" name="RB_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" variants="A6XX"/>
<!-- 0x02080000 in GMEM, zero otherwise? -->
<reg32 offset="0x8e06" name="RB_CCU_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
@@ -1874,7 +2799,7 @@ by a particular renderpass/blit.
<bitfield name="CONCURRENT_UNRESOLVE_MODE" low="5" high="6" type="a7xx_concurrent_unresolve_mode"/>
<!-- rest of the bits were moved to RB_CCU_CACHE_CNTL -->
</reg32>
- <reg32 offset="0x8e08" name="RB_NC_MODE_CNTL">
+ <reg32 offset="0x8e08" name="RB_NC_MODE_CNTL" variants="A6XX-A7XX">
<bitfield name="MODE" pos="0" type="boolean"/>
<bitfield name="LOWER_BIT" low="1" high="2" type="uint"/>
<bitfield name="MIN_ACCESS_LENGTH" pos="3" type="boolean"/> <!-- true=64b false=32b -->
@@ -1883,26 +2808,40 @@ by a particular renderpass/blit.
<bitfield name="RGB565_PREDICATOR" pos="11" type="boolean"/>
<bitfield name="UNK12" low="12" high="13"/>
</reg32>
- <reg32 offset="0x8e09" name="RB_UNKNOWN_8E09" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8e08" name="RB_CCU_NC_MODE_CNTL" variants="A8XX-"/>
+
+ <reg32 offset="0x8e09" name="RB_UNKNOWN_8E09" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0x8e09" name="RB_GC_GMEM_PROTECT" variants="A8XX-"/>
+ <reg32 offset="0x8e0a" name="RB_LPAC_GMEM_PROTECT" variants="A8XX-"/>
<!-- 0x8e09-0x8e0f invalid -->
<array offset="0x8e10" name="RB_PERFCTR_RB_SEL" stride="1" length="8"/>
<array offset="0x8e18" name="RB_PERFCTR_CCU_SEL" stride="1" length="5"/>
<!-- 0x8e1d-0x8e1f invalid -->
<!-- 0x8e20-0x8e25 more perfcntr sel? -->
<!-- 0x8e26-0x8e27 invalid -->
- <reg32 offset="0x8e28" name="RB_CMP_DBG_ECO_CNTL"/>
+
+ <reg32 offset="0x8f00" name="RB_CMP_NC_MODE_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x8f01" name="RB_RESOLVE_PREFETCH_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x8f02" name="RB_CMP_DBG_ECO_CNTL" variants="A8XX-"/>
+
+ <reg32 offset="0x8f03" name="RB_UNSLICE_STATUS" variants="A8XX-"/>
+ <reg32 offset="0x8e28" name="RB_CMP_DBG_ECO_CNTL" variants="A6XX-A7XX"/>
<!-- 0x8e29-0x8e2b invalid -->
- <array offset="0x8e2c" name="RB_PERFCTR_CMP_SEL" stride="1" length="4"/>
- <array offset="0x8e30" name="RB_PERFCTR_UFC_SEL" stride="1" length="6" variants="A7XX-"/>
- <reg32 offset="0x8e3b" name="RB_RB_SUB_BLOCK_SEL_CNTL_HOST"/>
- <reg32 offset="0x8e3d" name="RB_RB_SUB_BLOCK_SEL_CNTL_CD"/>
+ <array offset="0x8e2c" name="RB_PERFCTR_CMP_SEL" stride="1" length="4" variants="A6XX-A7XX"/>
+ <array offset="0x8e30" name="RB_PERFCTR_UFC_SEL" stride="1" length="6" variants="A7XX"/>
+ <array offset="0x8f04" name="RB_PERFCTR_CMP_SEL" stride="1" length="4" variants="A8XX-"/>
+ <array offset="0x8f10" name="RB_PERFCTR_UFC_SEL" stride="1" length="6" variants="A8XX-"/>
+ <reg32 offset="0x8e3b" name="RB_SUB_BLOCK_SEL_CNTL_HOST"/>
+ <reg32 offset="0x8e3d" name="RB_SUB_BLOCK_SEL_CNTL_CD"/>
+ <reg32 offset="0x8f29" name="RB_UFC_DBG_CNTL" variants="A8XX-"/>
<!-- 0x8e3e-0x8e4f invalid -->
<!-- GMEM save/restore for preemption: -->
<reg32 offset="0x8e50" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE" pos="0" type="boolean"/>
<!-- address for GMEM save/restore? -->
<reg32 offset="0x8e51" name="RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ADDR" type="waddress" align="1"/>
- <!-- 0x8e53-0x8e7f invalid -->
- <reg32 offset="0x8e79" name="RB_UNKNOWN_8E79" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x8e77" name="RB_SLICE_UFC_PREFETCH_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x8e78" name="RB_SLICE_UFC_DBG_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x8e79" name="RB_UNKNOWN_8E79" variants="A7XX" usage="init"/>
<!-- 0x8e80-0x8e83 are valid -->
<!-- 0x8e84-0x90ff invalid -->
@@ -1921,13 +2860,17 @@ by a particular renderpass/blit.
<bitfield name="CLIP_DIST_03_LOC" low="8" high="15" type="uint"/>
<bitfield name="CLIP_DIST_47_LOC" low="16" high="23" type="uint"/>
</bitset>
- <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9101" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9102" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9103" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9307" name="VPC_VS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A8XX" usage="rp_blit"/>
+ <reg32 offset="0x9308" name="VPC_GS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A8XX" usage="rp_blit"/>
+ <reg32 offset="0x9309" name="VPC_DS_CLIP_CULL_CNTL" type="a6xx_vpc_xs_clip_cntl" variants="A8XX" usage="rp_blit"/>
- <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
- <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9311" name="VPC_VS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9312" name="VPC_GS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9313" name="VPC_DS_CLIP_CULL_CNTL_V2" type="a6xx_vpc_xs_clip_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
<bitset name="a6xx_vpc_xs_siv_cntl" inline="yes">
<bitfield name="LAYERLOC" low="0" high="7" type="uint"/>
@@ -1935,23 +2878,38 @@ by a particular renderpass/blit.
<bitfield name="SHADINGRATELOC" low="16" high="23" type="uint" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9104" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9105" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9106" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x930a" name="VPC_VS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x930b" name="VPC_GS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x930c" name="VPC_DS_SIV_CNTL" type="a6xx_vpc_xs_siv_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_rast_stream_cntl" inline="yes">
+ <!-- which stream to send to GRAS -->
+ <bitfield name="STREAM" low="0" high="1" type="uint"/>
+ <!-- discard primitives before rasterization -->
+ <bitfield name="DISCARD" pos="2" type="boolean"/>
+ </bitset>
- <reg32 offset="0x9314" name="VPC_VS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9315" name="VPC_GS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
- <reg32 offset="0x9316" name="VPC_DS_SIV_CNTL_V2" type="a6xx_vpc_xs_siv_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x930d" name="VPC_RAST_STREAM_CNTL" type="a6xx_vpc_rast_stream_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" type="a6xx_vpc_rast_stream_cntl" variants="A7XX" usage="rp_blit"/>
<reg32 offset="0x9107" name="VPC_UNKNOWN_9107" variants="A6XX" usage="rp_blit">
<!-- this mirrors VPC_RAST_STREAM_CNTL::DISCARD, although it seems it's unused -->
<bitfield name="RASTER_DISCARD" pos="0" type="boolean"/>
<bitfield name="UNK2" pos="2" type="boolean"/>
</reg32>
- <reg32 offset="0x9108" name="VPC_RAST_CNTL" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
+ <reg32 offset="0x9108" name="VPC_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x930e" name="VPC_RAST_CNTL" type="a6xx_rast_cntl" variants="A8XX-" usage="rp_blit"/>
<bitset name="a6xx_pc_cntl" inline="yes">
<bitfield name="PRIMITIVE_RESTART" pos="0" type="boolean"/>
<bitfield name="PROVOKING_VTX_LAST" pos="1" type="boolean"/>
@@ -1991,10 +2949,14 @@ by a particular renderpass/blit.
<bitfield name="VIEWS" low="2" high="6" type="uint"/>
</bitset>
- <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x9109" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x930f" name="VPC_PC_CNTL" type="a6xx_pc_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x910a" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x90c0" name="VPC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x910b" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x90c1" name="VPC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x910c" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x931a" name="VPC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A8XX-" usage="rp_blit"/>
<enum name="a6xx_varying_interp_mode">
<value value="0" name="INTERP_SMOOTH"/>
@@ -2011,11 +2973,20 @@ by a particular renderpass/blit.
</enum>
<!-- 0x9109-0x91ff invalid -->
- <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9200" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" variants="A6XX-A7XX" usage="rp_blit">
+ <doc>Packed array of a6xx_varying_interp_mode</doc>
+ <reg32 offset="0x0" name="MODE"/>
+ </array>
+ <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE" stride="1" length="8" variants="A6XX-A7XX" usage="rp_blit">
+ <doc>Packed array of a6xx_varying_ps_repl_mode</doc>
+ <reg32 offset="0x0" name="MODE"/>
+ </array>
+
+ <array offset="0x9240" name="VPC_VARYING_INTERP_MODE" stride="1" length="8" variants="A8XX-" usage="rp_blit">
<doc>Packed array of a6xx_varying_interp_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
- <array offset="0x9208" name="VPC_VARYING_REPLACE_MODE_0" stride="1" length="8" usage="rp_blit">
+ <array offset="0x9248" name="VPC_VARYING_REPLACE_MODE" stride="1" length="8" variants="A8XX-" usage="rp_blit">
<doc>Packed array of a6xx_varying_ps_repl_mode</doc>
<reg32 offset="0x0" name="MODE"/>
</array>
@@ -2024,12 +2995,17 @@ by a particular renderpass/blit.
<reg32 offset="0x9210" name="VPC_UNKNOWN_9210" low="0" high="31" variants="A6XX" usage="cmd"/>
<reg32 offset="0x9211" name="VPC_UNKNOWN_9211" low="0" high="31" variants="A6XX" usage="cmd"/>
- <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL_0" stride="1" length="4" usage="rp_blit">
+ <array offset="0x9212" name="VPC_VARYING_LM_TRANSFER_CNTL" stride="1" length="4" variants="A6XX-A7XX" usage="rp_blit">
<!-- one bit per varying component: -->
<reg32 offset="0" name="DISABLE"/>
</array>
- <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" usage="rp_blit">
+ <array offset="0x9252" name="VPC_VARYING_LM_TRANSFER_CNTL" stride="1" length="4" variants="A8XX-" usage="rp_blit">
+ <!-- one bit per varying component: -->
+ <reg32 offset="0" name="DISABLE"/>
+ </array>
+
+ <bitset name="a6xx_vpc_so_mapping_wptr" inline="yes">
<!--
Choose which DWORD to write to. There is an array of
(4 * 64) DWORD's, dumped in the devcoredump at
@@ -2056,20 +3032,28 @@ by a particular renderpass/blit.
<bitfield name="ADDR" low="0" high="7" type="hex"/>
<!-- clear all A_EN and B_EN bits for all DWORD's -->
<bitfield name="RESET" pos="16" type="boolean"/>
- </reg32>
- <!-- special register, write multiple times to load SO program (not readable) -->
- <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x9216" name="VPC_SO_MAPPING_WPTR" type="a6xx_vpc_so_mapping_wptr" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9180" name="VPC_SO_MAPPING_WPTR" type="a6xx_vpc_so_mapping_wptr" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_so_mapping_port" inline="yes">
<bitfield name="A_BUF" low="0" high="1" type="uint"/>
<bitfield name="A_OFF" low="2" high="10" shr="2" type="uint"/>
<bitfield name="A_EN" pos="11" type="boolean"/>
<bitfield name="B_BUF" low="12" high="13" type="uint"/>
<bitfield name="B_OFF" low="14" high="22" shr="2" type="uint"/>
<bitfield name="B_EN" pos="23" type="boolean"/>
- </reg32>
+ </bitset>
+
+ <!-- special register, write multiple times to load SO program (not readable) -->
+ <reg32 offset="0x9217" name="VPC_SO_MAPPING_PORT" type="a6xx_vpc_so_mapping_port" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9181" name="VPC_SO_MAPPING_PORT" type="a6xx_vpc_so_mapping_port" variants="A8XX-" usage="rp_blit"/>
- <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9218" name="VPC_SO_QUERY_BASE" type="waddress" align="32" variants="A6XX-A7XX" usage="cmd"/>
+ <reg64 offset="0x9182" name="VPC_SO_QUERY_BASE" type="waddress" align="32" variants="A8XX-" usage="cmd"/>
- <array offset="0x921a" name="VPC_SO" stride="7" length="4" usage="cmd">
+ <array offset="0x921a" name="VPC_SO" stride="7" length="4" variants="A6XX-A7XX" usage="cmd">
<reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/>
<reg32 offset="2" name="BUFFER_SIZE" low="2" high="31" shr="2"/>
<reg32 offset="3" name="BUFFER_STRIDE" low="0" high="9" shr="2"/>
@@ -2077,12 +3061,23 @@ by a particular renderpass/blit.
<reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/>
</array>
- <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" usage="cmd">
+ <array offset="0x9184" name="VPC_SO" stride="7" length="4" variants="A8XX-" usage="cmd">
+ <reg64 offset="0" name="BUFFER_BASE" type="waddress" align="32"/>
+ <reg32 offset="2" name="BUFFER_SIZE" low="2" high="31" shr="2"/>
+ <reg32 offset="3" name="BUFFER_STRIDE" low="0" high="9" shr="2"/>
+ <reg32 offset="4" name="BUFFER_OFFSET" low="2" high="31" shr="2"/>
+ <reg64 offset="5" name="FLUSH_BASE" type="waddress" align="32"/>
+ </array>
+
+ <bitset name="a6xx_vpc_replace_mode_cntl" inline="yes">
<bitfield name="INVERT" pos="0" type="boolean"/>
- </reg32>
- <!-- 0x9237-0x92ff invalid -->
- <!-- always 0x0 ? -->
- <reg32 offset="0x9300" name="VPC_UNKNOWN_9300" low="0" high="2" usage="cmd"/>
+ </bitset>
+
+ <reg32 offset="0x9236" name="VPC_REPLACE_MODE_CNTL" type="a6xx_vpc_replace_mode_cntl" variants="A6XX-A7XX" usage="cmd"/>
+ <reg32 offset="0x9310" name="VPC_REPLACE_MODE_CNTL" type="a6xx_vpc_replace_mode_cntl" variants="A8XX-" usage="cmd"/>
+
+ <reg32 offset="0x9300" name="VPC_ROTATION_CNTL" low="0" high="2" variants="A6XX-A7XX" usage="cmd"/>
+ <reg32 offset="0x9312" name="VPC_ROTATION_CNTL" low="0" high="2" variants="A8XX-" usage="cmd"/>
<bitset name="a6xx_vpc_xs_cntl" inline="yes">
<doc>
@@ -2101,11 +3096,16 @@ by a particular renderpass/blit.
</doc>
</bitfield>
</bitset>
- <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9304" name="VPC_PS_CNTL" usage="rp_blit">
+ <reg32 offset="0x9301" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9302" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9303" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9300" name="VPC_VS_CNTL" type="a6xx_vpc_xs_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x9301" name="VPC_GS_CNTL" type="a6xx_vpc_xs_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x9302" name="VPC_DS_CNTL" type="a6xx_vpc_xs_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_ps_cntl" inline="yes">
<bitfield name="NUMNONPOSVAR" low="0" high="7" type="uint"/>
<!-- for fixed-function (i.e. no GS) gl_PrimitiveID in FS -->
<bitfield name="PRIMIDLOC" low="8" high="15" type="uint"/>
@@ -2122,9 +3122,12 @@ by a particular renderpass/blit.
ViewID through the VS.
</doc>
</bitfield>
- </reg32>
+ </bitset>
- <reg32 offset="0x9305" name="VPC_SO_CNTL" usage="rp_blit">
+ <reg32 offset="0x9304" name="VPC_PS_CNTL" type="a6xx_vpc_ps_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9303" name="VPC_PS_CNTL" type="a6xx_vpc_ps_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_vpc_so_cntl" inline="yes">
<!--
It's offset by 1, and 0 means "disabled"
-->
@@ -2133,82 +3136,147 @@ by a particular renderpass/blit.
<bitfield name="BUF2_STREAM" low="6" high="8" type="uint"/>
<bitfield name="BUF3_STREAM" low="9" high="11" type="uint"/>
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
- </reg32>
- <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" usage="rp_blit">
+ </bitset>
+
+ <reg32 offset="0x9305" name="VPC_SO_CNTL" type="a6xx_vpc_so_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9304" name="VPC_SO_CNTL" type="a6xx_vpc_so_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_so_override" inline="yes">
<bitfield name="DISABLE" pos="0" type="boolean"/>
- </reg32>
- <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" variants="A6XX-" usage="rp_blit"> <!-- A702 + A7xx -->
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
- <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
- <bitfield name="SIZE_GMEM" low="0" high="31"/>
- </reg32>
- <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX-" usage="rp_blit">
- <bitfield name="BASE_GMEM" low="0" high="31"/>
- </reg32>
- <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX-" usage="rp_blit">
- <bitfield name="SIZE_GMEM" low="0" high="31"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9306" name="VPC_SO_OVERRIDE" type="a6xx_so_override" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9305" name="VPC_SO_OVERRIDE" type="a6xx_so_override" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x9807" name="PC_DGEN_SO_OVERRIDE" type="a6xx_so_override" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b0a" name="PC_DGEN_SO_OVERRIDE" type="a6xx_so_override" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x9307" name="VPC_PS_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9306" name="VPC_PS_RAST_CNTL" type="a6xx_rast_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x9308" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="cmd"/>
+ <reg32 offset="0x9309" name="VPC_ATTR_BUF_GMEM_BASE" variants="A7XX" type="hex" usage="cmd"/>
+
+ <reg32 offset="0x9314" name="VPC_ATTR_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/>
+ <reg32 offset="0x9315" name="VPC_ATTR_BUF_GMEM_BASE" variants="A8XX-" type="hex" usage="cmd"/>
+
+ <reg32 offset="0x9316" name="VPC_POS_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/>
+ <reg32 offset="0x9317" name="VPC_POS_BUF_GMEM_BASE" variants="A8XX-" type="hex" usage="cmd"/>
+
+ <reg32 offset="0x9318" name="VPC_BV_POS_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/>
+ <reg32 offset="0x9319" name="VPC_BV_POS_BUF_GMEM_BASE" variants="A8XX-" type="hex" usage="cmd"/>
+
+ <reg32 offset="0x9b09" name="PC_ATTR_BUF_GMEM_SIZE" variants="A7XX" type="uint" usage="cmd"/>
+ <reg32 offset="0x9b16" name="PC_ATTR_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/>
+
+ <reg32 offset="0x9b17" name="PC_POS_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/>
+ <reg32 offset="0x9b18" name="PC_BV_POS_BUF_GMEM_SIZE" variants="A8XX-" type="uint" usage="cmd"/>
+
+ <reg32 offset="0x930a" name="VPC_UNKNOWN_930A" variants="A7XX"/>
+
+ <reg32 offset="0x9313" name="VPC_UNKNOWN_9313" variants="A8XX-"/>
+ <reg32 offset="0x9e17" name="PC_UNKNOWN_9E17" variants="A8XX-"/>
+
+ <reg32 offset="0x960a" name="VPC_FLATSHADE_MODE_CNTL" variants="A7XX"/>
+ <reg32 offset="0x9741" name="VPC_FLATSHADE_MODE_CNTL" variants="A8XX-"/>
<!-- 0x9307-0x95ff invalid -->
<!-- TODO: 0x9600-0x97ff range -->
- <reg32 offset="0x9600" name="VPC_DBG_ECO_CNTL" usage="cmd"/> <!-- always 0x0 ? TODO: 0x1fbf37ff valid mask -->
- <reg32 offset="0x9601" name="VPC_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" usage="cmd"/>
- <reg32 offset="0x9602" name="VPC_UNKNOWN_9602" pos="0" usage="cmd"/> <!-- always 0x0 ? -->
- <reg32 offset="0x9603" name="VPC_UNKNOWN_9603" low="0" high="26"/>
+ <reg32 offset="0x9600" name="VPC_DBG_ECO_CNTL" variants="A6XX-A7XX" usage="cmd"/> <!-- always 0x0 ? TODO: 0x1fbf37ff valid mask -->
+ <reg32 offset="0x9601" name="VPC_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" usage="cmd" variants="A6XX"/>
+ <reg32 offset="0x9680" name="VPC_DBG_ECO_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x9604" name="VPC_DBG_ECO_CNTL_2" variants="A8XX-"/>
+ <reg32 offset="0x9742" name="VPC_DBG_ECO_CNTL_1" variants="A8XX-"/>
+ <reg32 offset="0x9745" name="VPC_DBG_ECO_CNTL_3" variants="A8XX-"/>
+ <reg32 offset="0x9602" name="VPC_LB_MODE_CNTL" pos="0" variants="A6XX-A7XX" usage="init"/> <!-- always 0x0 ? -->
+ <reg32 offset="0x9740" name="VPC_LB_MODE_CNTL" pos="0" variants="A8XX-"/>
+ <reg32 offset="0x9603" name="VPC_STATUS" low="0" high="26" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9600" name="VPC_STATUS" low="0" high="26" variants="A8XX-"/>
<array offset="0x9604" name="VPC_PERFCTR_VPC_SEL" stride="1" length="6" variants="A6XX"/>
- <array offset="0x960b" name="VPC_PERFCTR_VPC_SEL" stride="1" length="12" variants="A7XX-"/>
- <!-- 0x960a-0x9623 invalid -->
- <!-- TODO: regs from 0x9624-0x963a -->
- <!-- 0x963b-0x97ff invalid -->
+ <array offset="0x960b" name="VPC_PERFCTR_VPC_SEL" stride="1" length="12" variants="A7XX"/>
+ <array offset="0x9670" name="VPC_PERFCTR_VPC_SEL_2" stride="1" length="12" variants="A8XX-"/>
+ <array offset="0x9690" name="VPC_PERFCTR_VPC_SEL" stride="1" length="12" variants="A8XX-"/>
+ <array offset="0x9750" name="VPC_PERFCTR_VPC_SEL_1" stride="1" length="12" variants="A8XX-"/>
- <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" usage="rp_blit"/>
+ <reg64 offset="0x9634" name="VPC_CONTEXT_SWITCH_SO_SAVE_ADDR" type="waddress" variants="A6XX-A7XX"/>
+ <reg64 offset="0x9602" name="VPC_CONTEXT_SWITCH_SO_SAVE_ADDR" type="waddress" variants="A8XX-"/>
- <!-- always 0x0 ? -->
- <reg32 offset="0x9801" name="PC_HS_PARAM_1" usage="rp_blit">
+ <reg32 offset="0x980b" name="PC_UNKNOWN_980B" variants="A8XX-"/>
+
+ <reg32 offset="0x9800" name="PC_HS_PARAM_0" low="0" high="5" type="uint" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b10" name="PC_HS_PARAM_0" low="0" high="5" type="uint" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_pc_hs_param_1" inline="yes">
<bitfield name="SIZE" low="0" high="10" type="uint"/>
<bitfield name="UNK13" pos="13"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9801" name="PC_HS_PARAM_1" type="a6xx_pc_hs_param_1" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b11" name="PC_HS_PARAM_1" type="a6xx_pc_hs_param_1" variants="A8XX-" usage="rp_blit"/>
- <reg32 offset="0x9802" name="PC_DS_PARAM" usage="rp_blit">
+ <bitset name="a6xx_pc_ds_param" inline="yes">
<bitfield name="SPACING" low="0" high="1" type="a6xx_tess_spacing"/>
<bitfield name="OUTPUT" low="2" high="3" type="a6xx_tess_output"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9802" name="PC_DS_PARAM" type="a6xx_pc_ds_param" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b12" name="PC_DS_PARAM" type="a6xx_pc_ds_param" variants="A8XX-" usage="rp_blit"/>
+
+ <reg32 offset="0x9803" name="PC_RESTART_INDEX" low="0" high="31" type="uint" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b15" name="PC_RESTART_INDEX" low="0" high="31" type="uint" variants="A8XX-" usage="rp_blit"/>
- <reg32 offset="0x9803" name="PC_RESTART_INDEX" low="0" high="31" type="uint" usage="rp_blit"/>
- <reg32 offset="0x9804" name="PC_MODE_CNTL" low="0" high="7" usage="rp_blit"/>
+ <reg32 offset="0x9804" name="PC_MODE_CNTL" low="0" high="7" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b00" name="PC_MODE_CNTL" low="0" high="14" variants="A8XX" usage="rp_blit"/>
<reg32 offset="0x9805" name="PC_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
- <reg32 offset="0x9806" name="PC_PS_CNTL" usage="rp_blit">
+ <bitset name="a6xx_pc_ps_cntl" inline="yes">
<bitfield name="PRIMITIVEIDEN" pos="0" type="boolean"/>
- </reg32>
+ </bitset>
- <!-- New in a6xx gen3+ -->
- <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" usage="rp_blit">
+ <reg32 offset="0x9806" name="PC_PS_CNTL" type="a6xx_pc_ps_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b06" name="PC_PS_CNTL" type="a6xx_pc_ps_cntl" variants="A8XX-" usage="rp_blit"/>
+
+ <bitset name="a6xx_pc_dgen_so_cntl" inline="yes">
<bitfield name="STREAM_ENABLE" low="15" high="18" type="hex"/>
- </reg32>
+ </bitset>
+
+ <!-- New in a6xx gen3+ -->
+ <reg32 offset="0x9808" name="PC_DGEN_SO_CNTL" type="a6xx_pc_dgen_so_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b0b" name="PC_DGEN_SO_CNTL" type="a6xx_pc_dgen_so_cntl" variants="A8XX-" usage="rp_blit"/>
- <reg32 offset="0x980a" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL">
+ <bitset name="a6xx_pc_dgen_su_conservative_ras_cntl" inline="yes">
<bitfield name="CONSERVATIVERASEN" pos="0" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0x980a" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_pc_dgen_su_conservative_ras_cntl" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9b08" name="PC_DGEN_SU_CONSERVATIVE_RAS_CNTL" type="a6xx_pc_dgen_su_conservative_ras_cntl" variants="A8XX-"/>
+
+ <reg32 offset="0x9b0c" name="PC_VS_INPUT_CNTL" variants="A8XX-" usage="rp_blit">
+ <bitfield name="INSTR_CNT" low="0" high="5" type="uint"/>
+ <bitfield name="SIDEBAND_CNT" low="6" high="8" type="uint"/>
</reg32>
- <!-- 0x980b-0x983f invalid -->
<!-- 0x9840 - 0x9842 are not readable -->
- <reg32 offset="0x9840" name="PC_DRAW_INITIATOR">
+ <bitset name="a6xx_draw_initiator" inline="yes">
<bitfield name="STATE_ID" low="0" high="7"/>
- </reg32>
+ </bitset>
- <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR">
- <bitfield name="STATE_ID" low="0" high="7"/>
- </reg32>
+ <reg32 offset="0x9840" name="PC_DRAW_INITIATOR" type="a6xx_draw_initiator" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9841" name="PC_KERNEL_INITIATOR" type="a6xx_draw_initiator" variants="A6XX-A7XX"/>
- <reg32 offset="0x9842" name="PC_EVENT_INITIATOR">
+ <reg32 offset="0x9800" name="PC_DRAW_INITIATOR" type="a6xx_draw_initiator" variants="A8XX-"/>
+ <reg32 offset="0x9801" name="PC_KERNEL_INITIATOR" type="a6xx_draw_initiator" variants="A8XX-"/>
+
+ <bitset name="a6xx_event_initiator" inline="yes">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
- </reg32>
+ </bitset>
+
+ <reg32 offset="0x9842" name="PC_EVENT_INITIATOR" type="a6xx_event_initiator" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9802" name="PC_EVENT_INITIATOR" type="a6xx_event_initiator" variants="A8XX-"/>
<!--
0x9880 written in a lot of places by SQE, same value gets written
@@ -2219,45 +3287,25 @@ by a particular renderpass/blit.
<!-- 0x9843-0x997f invalid -->
- <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" variants="A6XX" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
- <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" variants="A7XX-" usage="rp_blit">
- <bitfield name="MODE" low="0" high="1" type="a6xx_polygon_mode"/>
- </reg32>
-
- <reg32 offset="0x9980" name="VPC_RAST_STREAM_CNTL" variants="A6XX" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
- <!-- VPC_RAST_STREAM_CNTL -->
- <reg32 offset="0x9107" name="VPC_RAST_STREAM_CNTL" variants="A7XX-" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
- <reg32 offset="0x9317" name="VPC_RAST_STREAM_CNTL_V2" variants="A7XX-" usage="rp_blit">
- <!-- which stream to send to GRAS -->
- <bitfield name="STREAM" low="0" high="1" type="uint"/>
- <!-- discard primitives before rasterization -->
- <bitfield name="DISCARD" pos="2" type="boolean"/>
- </reg32>
+ <reg32 offset="0x9981" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0x9809" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9812" name="PC_DGEN_RAST_CNTL" type="a6xx_rast_cntl" variants="A8XX" usage="rp_blit"/>
<!-- Both are a750+.
Probably needed to correctly overlap execution of several draws.
-->
- <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9885" name="PC_HS_BUFFER_SIZE" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0x9814" name="PC_HS_BUFFER_SIZE" variants="A8XX-" usage="cmd"/>
<!-- Blob adds a bit more space {0x10, 0x20, 0x30, 0x40} bytes, but the meaning of
this additional space is not known.
-->
- <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9886" name="PC_TF_BUFFER_SIZE" variants="A7XX" usage="cmd"/>
+ <reg32 offset="0x9815" name="PC_TF_BUFFER_SIZE" variants="A8XX-" usage="cmd"/>
<!-- 0x9982-0x9aff invalid -->
- <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b00" name="PC_CNTL" type="a6xx_pc_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b01" name="PC_CNTL" type="a6xx_pc_cntl" variants="A8XX-" usage="rp_blit"/>
<bitset name="a6xx_pc_xs_cntl" inline="yes">
<doc>
@@ -2270,18 +3318,24 @@ by a particular renderpass/blit.
<bitfield name="LAYER" pos="9" type="boolean"/>
<bitfield name="VIEW" pos="10" type="boolean"/>
<!-- note: PC_VS_CNTL doesn't have the PRIMITIVE_ID bit -->
+ <!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
<bitfield name="PRIMITIVE_ID" pos="11" type="boolean"/>
<bitfield name="CLIP_MASK" low="16" high="23" type="uint"/>
<bitfield name="SHADINGRATE" pos="24" type="boolean" variants="A7XX-"/>
</bitset>
- <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <!-- since HS can't output anything, only PRIMITIVE_ID is valid -->
- <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
- <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b01" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b02" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b03" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b04" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+
+ <reg32 offset="0x9b02" name="PC_VS_CNTL" type="a6xx_pc_xs_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x9b03" name="PC_GS_CNTL" type="a6xx_pc_xs_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x9b04" name="PC_HS_CNTL" type="a6xx_pc_xs_cntl" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0x9b05" name="PC_DS_CNTL" type="a6xx_pc_xs_cntl" variants="A8XX-" usage="rp_blit"/>
- <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" usage="rp_blit"/>
+ <reg32 offset="0x9b05" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b13" name="PC_GS_PARAM_0" type="a6xx_gs_param_0" variants="A8XX-" usage="rp_blit"/>
<reg32 offset="0x9b06" name="PC_PRIMITIVE_CNTL_6" variants="A6XX" usage="rp_blit">
<doc>
@@ -2290,55 +3344,87 @@ by a particular renderpass/blit.
<bitfield name="STRIDE_IN_VPC" low="0" high="10" type="uint"/>
</reg32>
- <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" usage="rp_blit"/>
+ <reg32 offset="0x9b07" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b09" name="PC_STEREO_RENDERING_CNTL" type="a6xx_stereo_rendering_cntl" variants="A8XX-" usage="rp_blit"/>
<!-- mask of enabled views, doesn't exist on A630 -->
- <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" usage="rp_blit"/>
+ <reg32 offset="0x9b08" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0x9b0d" name="PC_STEREO_RENDERING_VIEWMASK" type="hex" low="0" high="15" variants="A8XX-" usage="rp_blit"/>
<!-- 0x9b09-0x9bff invalid -->
<reg32 offset="0x9c00" name="PC_2D_EVENT_CMD">
<!-- special register (but note first 8 bits can be written/read) -->
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
<bitfield name="STATE_ID" low="8" high="15"/>
</reg32>
- <!-- 0x9c01-0x9dff invalid -->
- <!-- TODO: 0x9e00-0xa000 range incomplete -->
- <reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL"/>
- <reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg64 offset="0x9e04" name="PC_DMA_BASE"/>
- <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint"/>
- <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint"/>
+
+ <reg32 offset="0x9e50" name="PC_CHICKEN_BITS_1" variants="A8XX-"/>
+ <reg32 offset="0x9f20" name="PC_CHICKEN_BITS_2" variants="A8XX-"/>
+ <reg32 offset="0x9e22" name="PC_CHICKEN_BITS_3" variants="A8XX-"/>
+ <reg32 offset="0x9e23" name="PC_CHICKEN_BITS_4" variants="A8XX-"/>
+ <reg32 offset="0x9f23" name="PC_CHICKEN_BITS_5" variants="A8XX-"/>
+
+ <reg32 offset="0x9e00" name="PC_DBG_ECO_CNTL" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e53" name="PC_DBG_ECO_CNTL" variants="A8XX-"/>
+ <reg32 offset="0x9e01" name="PC_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/>
+ <reg64 offset="0x9e04" name="PC_DMA_BASE" type="address" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e06" name="PC_DMA_OFFSET" type="uint" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e07" name="PC_DMA_SIZE" type="uint" variants="A6XX-A7XX"/>
+
+ <reg64 offset="0x9e06" name="PC_DMA_BASE" type="address" variants="A8XX-"/>
+ <reg32 offset="0x9e08" name="PC_DMA_OFFSET" type="uint" variants="A8XX-"/>
+ <reg32 offset="0x9e09" name="PC_DMA_SIZE" type="uint" variants="A8XX-"/>
+
<reg64 offset="0x9e08" name="PC_TESS_BASE" variants="A6XX" type="waddress" align="32" usage="cmd"/>
- <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX-" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9810" name="PC_TESS_BASE" variants="A7XX" type="waddress" align="32" usage="cmd"/>
+ <reg64 offset="0x9816" name="PC_TESS_BASE" variants="A8XX-" type="waddress" align="32" usage="cmd"/>
- <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx">
+ <reg32 offset="0x9e0b" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx" variants="A6XX-A7XX">
<doc>
Possibly not really "initiating" the draw but the layout is similar
to VGT_DRAW_INITIATOR on older gens
</doc>
</reg32>
- <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint"/>
- <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint"/>
+ <reg32 offset="0x9e0c" name="PC_DRAWCALL_INSTANCE_NUM" type="uint" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e0d" name="PC_DRAWCALL_SIZE" type="uint" variants="A6XX-A7XX"/>
+
+ <reg32 offset="0x9e00" name="PC_DRAWCALL_CNTL" type="vgt_draw_initiator_a4xx" variants="A8XX-"/>
+ <reg32 offset="0x9e01" name="PC_DRAWCALL_INSTANCE_NUM" type="uint" variants="A8XX-"/>
+ <reg32 offset="0x9e02" name="PC_DRAWCALL_SIZE" type="uint" variants="A8XX-"/>
<!-- These match the contents of CP_SET_BIN_DATA (not written directly) -->
- <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL">
+ <bitset name="a6xx_pc_vis_stream_cntl" inline="yes">
<bitfield name="UNK0" low="0" high="15"/>
<bitfield name="VSC_SIZE" low="16" high="21" type="uint"/>
<bitfield name="VSC_N" low="22" high="26" type="uint"/>
- </reg32>
- <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
- <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32"/>
+ </bitset>
+
+ <reg32 offset="0x9e11" name="PC_VIS_STREAM_CNTL" type="a6xx_pc_vis_stream_cntl" variants="A6XX-A7XX"/>
+ <reg64 offset="0x9e12" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A6XX-A7XX"/>
+ <reg64 offset="0x9e14" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A6XX-A7XX"/>
+
+ <reg32 offset="0x9e0a" name="PC_AUTO_VERTEX_STRIDE"/>
+ <reg32 offset="0x9e0d" name="PC_VIS_STREAM_CNTL" type="a6xx_pc_vis_stream_cntl" variants="A8XX-"/>
+ <reg64 offset="0x9e0e" name="PC_PVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A8XX-"/>
+ <reg64 offset="0x9e10" name="PC_DVIS_STREAM_BIN_BASE" type="waddress" align="32" variants="A8XX-"/>
- <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE">
+ <bitset name="a6xx_pc_drawcall_cntl_override" inline="yes">
<doc>Written by CP_SET_VISIBILITY_OVERRIDE handler</doc>
<bitfield name="OVERRIDE" pos="0" type="boolean"/>
- </reg32>
+ </bitset>
- <reg32 offset="0x9e24" name="PC_UNKNOWN_9E24" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x9e1c" name="PC_DRAWCALL_CNTL_OVERRIDE" type="a6xx_pc_drawcall_cntl_override" variants="A6XX-A7XX"/>
+ <reg32 offset="0x9e04" name="PC_DRAWCALL_CNTL_OVERRIDE" type="a6xx_pc_drawcall_cntl_override" variants="A8XX-"/>
+
+ <reg32 offset="0x9e24" name="PC_UNKNOWN_9E24" variants="A7XX-" usage="init"/>
<array offset="0x9e34" name="PC_PERFCTR_PC_SEL" stride="1" length="8" variants="A6XX"/>
- <array offset="0x9e42" name="PC_PERFCTR_PC_SEL" stride="1" length="16" variants="A7XX-"/>
+ <array offset="0x9e42" name="PC_PERFCTR_PC_SEL" stride="1" length="16" variants="A7XX"/>
+ <array offset="0x9e30" name="PC_PERFCTR_PC_SEL" stride="1" length="16" variants="A8XX-"/>
+ <array offset="0x9f00" name="PC_SLICE_PERFCTR_PC_SEL" stride="1" length="16" variants="A8XX-"/>
<!-- always 0x0 -->
- <reg32 offset="0x9e72" name="PC_UNKNOWN_9E72" usage="cmd"/>
+ <reg32 offset="0x9e72" name="PC_CONTEXT_SWITCH_GFX_PREEMPTION_MODE" variants="A6XX-A7XX" usage="init"/>
+ <reg32 offset="0x9e63" name="PC_CONTEXT_SWITCH_GFX_PREEMPTION_MODE" variants="A8XX-"/>
+ <reg32 offset="0x9e64" name="PC_CONTEXT_SWITCH_STABILIZE_CNTL_1" variants="A8XX-"/>
<reg32 offset="0xa000" name="VFD_CNTL_0" usage="rp_blit">
<bitfield name="FETCH_CNT" low="0" high="5" type="uint"/>
@@ -2425,11 +3511,15 @@ by a particular renderpass/blit.
<reg32 offset="0xa0f8" name="VFD_POWER_CNTL" low="0" high="2" usage="rp_blit"/>
- <reg32 offset="0xa600" name="VFD_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa600" name="VFD_DBG_ECO_CNTL" variants="A7XX-" usage="init"/>
- <reg32 offset="0xa601" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0xa601" name="VFD_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/>
<array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="8" variants="A6XX"/>
<array offset="0xa610" name="VFD_PERFCTR_VFD_SEL" stride="1" length="16" variants="A7XX-"/>
+ <reg32 offset="0xa639" name="VFD_CB_BV_THRESHOLD" variants="A8XX-"/>
+ <reg32 offset="0xa63a" name="VFD_CB_BR_THRESHOLD" variants="A8XX-"/>
+ <reg32 offset="0xa63b" name="VFD_CB_BUSY_REQ_CNT" variants="A8XX-"/>
+ <reg32 offset="0xa63c" name="VFD_CB_LP_REQ_CNT" variants="A8XX-"/>
<!--
Note: this seems to always be paired with another bit in another
@@ -2488,8 +3578,6 @@ by a particular renderpass/blit.
<bitset name="a6xx_sp_xs_output_cntl" inline="yes">
<!-- # of VS outputs including pos/psize -->
<bitfield name="OUT" low="0" high="5" type="uint"/>
- <!-- FLAGS_REGID only for GS -->
- <bitfield name="FLAGS_REGID" low="6" high="13" type="a3xx_regid"/>
</bitset>
<reg32 offset="0xa800" name="SP_VS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
@@ -2615,6 +3703,15 @@ by a particular renderpass/blit.
<bitfield name="OFFSET" low="0" high="18" shr="11"/>
</bitset>
+ <bitset name="a6xx_sp_xs_hysteresis" inline="yes">
+ <doc>Same on a6xx/a7xx, UMD should not need to write this</doc>
+ </bitset>
+
+ <bitset name="a8xx_sp_xs_hysteresis" inline="yes">
+ <doc>UMD needs to write in some cases</doc>
+ <!-- seen 0x400, 0xc00, 0x1000, 0x1c00, 0x1000, 0x2000, 0x3000 -->
+ </bitset>
+
<reg32 offset="0xa81b" name="SP_VS_PROGRAM_COUNTER_OFFSET" type="uint" usage="rp_blit"/>
<reg64 offset="0xa81c" name="SP_VS_BASE" type="address" align="32" usage="rp_blit"/>
<reg32 offset="0xa81e" name="SP_VS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="rp_blit"/>
@@ -2624,6 +3721,8 @@ by a particular renderpass/blit.
<reg32 offset="0xa823" name="SP_VS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
<reg32 offset="0xa824" name="SP_VS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
<reg32 offset="0xa825" name="SP_VS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa826" name="SP_VS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/>
+ <reg32 offset="0xa826" name="SP_VS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/>
<reg32 offset="0xa82d" name="SP_VS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xa830" name="SP_HS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
@@ -2649,6 +3748,8 @@ by a particular renderpass/blit.
<reg32 offset="0xa83b" name="SP_HS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
<reg32 offset="0xa83c" name="SP_HS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
<reg32 offset="0xa83d" name="SP_HS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa83e" name="SP_HS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/>
+ <reg32 offset="0xa83e" name="SP_HS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/>
<reg32 offset="0xa82f" name="SP_HS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xa840" name="SP_DS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
@@ -2686,6 +3787,8 @@ by a particular renderpass/blit.
<reg32 offset="0xa863" name="SP_DS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
<reg32 offset="0xa864" name="SP_DS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
<reg32 offset="0xa865" name="SP_DS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa866" name="SP_DS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/>
+ <reg32 offset="0xa866" name="SP_DS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/>
<reg32 offset="0xa868" name="SP_DS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
<reg32 offset="0xa870" name="SP_GS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="rp_blit">
@@ -2709,7 +3812,10 @@ by a particular renderpass/blit.
<reg32 offset="0xa872" name="SP_GS_BOOLEAN_CF_MASK" type="hex" usage="rp_blit"/>
<!-- TODO: exact same layout as 0xa802-0xa81a -->
- <reg32 offset="0xa873" name="SP_GS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit"/>
+ <reg32 offset="0xa873" name="SP_GS_OUTPUT_CNTL" type="a6xx_sp_xs_output_cntl" usage="rp_blit">
+ <!-- FLAGS_REGID only for GS -->
+ <bitfield name="FLAGS_REGID" low="6" high="13" type="a3xx_regid"/>
+ </reg32>
<array offset="0xa874" name="SP_GS_OUTPUT" stride="1" length="16" usage="rp_blit">
<reg32 offset="0x0" name="REG">
<bitfield name="A_REGID" low="0" high="7" type="a3xx_regid"/>
@@ -2738,6 +3844,8 @@ by a particular renderpass/blit.
<reg32 offset="0xa894" name="SP_GS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
<reg32 offset="0xa895" name="SP_GS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
<reg32 offset="0xa896" name="SP_GS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
+ <reg32 offset="0xa897" name="SP_GS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/>
+ <reg32 offset="0xa897" name="SP_GS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/>
<reg32 offset="0xa899" name="SP_GS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
<reg64 offset="0xa8a0" name="SP_VS_SAMPLER_BASE" type="address" align="16" usage="cmd"/>
@@ -2781,13 +3889,19 @@ by a particular renderpass/blit.
<reg64 offset="0xa986" name="SP_PS_PVT_MEM_BASE" type="waddress" align="32" usage="rp_blit"/>
<reg32 offset="0xa988" name="SP_PS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="rp_blit"/>
- <reg32 offset="0xa989" name="SP_BLEND_CNTL" usage="rp_blit">
+ <bitset name="a6xx_sp_blend_cntl" inline="yes">
<!-- per-mrt enable bit -->
<bitfield name="ENABLE_BLEND" low="0" high="7"/>
- <bitfield name="UNK8" pos="8" type="boolean"/>
+ <bitfield name="INDEPENDENT_BLEND_EN" pos="8" type="boolean"/>
<bitfield name="DUAL_COLOR_IN_ENABLE" pos="9" type="boolean"/>
<bitfield name="ALPHA_TO_COVERAGE" pos="10" type="boolean"/>
+ </bitset>
+
+ <reg32 offset="0xa989" name="SP_BLEND_CNTL" type="a6xx_sp_blend_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0xa989" name="SP_BLEND_CNTL" type="a6xx_sp_blend_cntl" variants="A8XX-" usage="rp_blit">
+ <bitfield name="ALPHA_TO_ONE" pos="11" type="boolean" variants="A8XX-"/>
</reg32>
+
<reg32 offset="0xa98a" name="SP_SRGB_CNTL" usage="rp_blit">
<!-- Same as RB_SRGB_CNTL -->
<bitfield name="SRGB_MRT0" pos="0" type="boolean"/>
@@ -2888,13 +4002,11 @@ by a particular renderpass/blit.
<reg32 offset="0xa9a7" name="SP_PS_TSIZE" low="0" high="7" type="uint" usage="rp_blit"/>
<reg32 offset="0xa9a8" name="SP_UNKNOWN_A9A8" low="0" high="16" usage="cmd"/> <!-- always 0x0 ? -->
<reg32 offset="0xa9a9" name="SP_PS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="rp_blit"/>
- <reg32 offset="0xa9ab" name="SP_PS_UNKNOWN_A9AB" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9ab" name="SP_PS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/>
+ <reg32 offset="0xa9ab" name="SP_PS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/>
<!-- TODO: unknown bool register at 0xa9aa, likely same as 0xa8c0-0xa8c3 but for FS -->
-
-
-
<reg32 offset="0xa9b0" name="SP_CS_CNTL_0" type="a6xx_sp_xs_cntl_0" usage="cmd">
<bitfield name="THREADSIZE" pos="20" type="a6xx_threadsize"/>
<!-- seems to make SP use less concurrent threads when possible? -->
@@ -2931,18 +4043,20 @@ by a particular renderpass/blit.
must be at least the actual CONSTLEN.
</doc>
</bitfield>
+ <bitfield name="ALT_LM_ENCODE" pos="26" type="boolean"/>
</reg32>
<reg32 offset="0xa9b2" name="SP_CS_BOOLEAN_CF_MASK" type="hex" usage="cmd"/>
<reg32 offset="0xa9b3" name="SP_CS_PROGRAM_COUNTER_OFFSET" type="uint" usage="cmd"/>
<reg64 offset="0xa9b4" name="SP_CS_BASE" type="address" align="32" usage="cmd"/>
<reg32 offset="0xa9b6" name="SP_CS_PVT_MEM_PARAM" type="a6xx_sp_xs_pvt_mem_param" usage="cmd"/>
- <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" align="32" usage="cmd"/>
+ <reg64 offset="0xa9b7" name="SP_CS_PVT_MEM_BASE" type="waddress" align="32" usage="cmd"/>
<reg32 offset="0xa9b9" name="SP_CS_PVT_MEM_SIZE" type="a6xx_sp_xs_pvt_mem_size" usage="cmd"/>
<reg32 offset="0xa9ba" name="SP_CS_TSIZE" low="0" high="7" type="uint" usage="cmd"/>
<reg32 offset="0xa9bb" name="SP_CS_CONFIG" type="a6xx_sp_xs_config" usage="cmd"/>
<reg32 offset="0xa9bc" name="SP_CS_INSTR_SIZE" low="0" high="27" type="uint" usage="cmd"/>
<reg32 offset="0xa9bd" name="SP_CS_PVT_MEM_STACK_OFFSET" type="a6xx_sp_xs_pvt_mem_stack_offset" usage="cmd"/>
- <reg32 offset="0xa9be" name="SP_CS_UNKNOWN_A9BE" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xa9be" name="SP_CS_HYSTERESIS" type="a6xx_sp_xs_hysteresis" variants="A6XX-A7XX"/>
+ <reg32 offset="0xa9be" name="SP_CS_HYSTERESIS" type="a8xx_sp_xs_hysteresis" variants="A8XX-"/>
<reg32 offset="0xa9c5" name="SP_CS_VGS_CNTL" variants="A7XX-" usage="cmd"/>
<!-- new in a6xx gen4, matches SP_CS_CONST_CONFIG_0 -->
@@ -3021,7 +4135,7 @@ by a particular renderpass/blit.
UAV state for compute shader:
-->
<reg64 offset="0xa9f2" name="SP_CS_UAV_BASE" type="address" align="16" variants="A6XX"/>
- <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX"/>
+ <reg64 offset="0xa9f8" name="SP_CS_UAV_BASE" type="address" align="16" variants="A7XX-"/>
<reg32 offset="0xaa00" name="SP_CS_USIZE" low="0" high="6" type="uint"/>
<!-- Correlated with avgs/uvgs usage in FS -->
@@ -3053,6 +4167,18 @@ by a particular renderpass/blit.
<bitfield name="RT7" low="28" high="31"/>
</reg32>
+ <array offset="0xaa04" name="SP_MRT_BLEND_CNTL" stride="1" length="8" variants="A8XX-">
+ <reg32 offset="0" name="REG">
+ <bitfield name="COLOR_BLEND_EN" pos="0" type="boolean"/>
+ <bitfield name="ALPHA_BLEND_EN" pos="1" type="boolean"/>
+ <bitfield name="COMPONENT_WRITE_MASK" low="7" high="10"/>
+ </reg32>
+ </array>
+
+ <reg32 offset="0xaa0c" name="SP_ALPHA_TEST_CNTL" variants="A8XX-">
+ <bitfield name="ALPHA_TEST" pos="8" type="boolean"/>
+ </reg32>
+
<reg32 offset="0xaaf2" name="SP_UNKNOWN_AAF2" type="uint" usage="cmd"/>
<!--
@@ -3077,15 +4203,19 @@ by a particular renderpass/blit.
-->
<bitfield name="CONSTANT_DEMOTION_ENABLE" pos="0" type="boolean"/>
<bitfield name="ISAMMODE" low="1" high="2" type="a6xx_isam_mode"/>
- <bitfield name="SHARED_CONSTS_ENABLE" pos="3" type="boolean"/> <!-- see HLSQ_SHARED_CONSTS -->
+ <bitfield name="SHARED_CONSTS_ENABLE" pos="3" type="boolean"/> <!-- see SP_SHARED_CONSTANT -->
</reg32>
<reg32 offset="0xab01" name="SP_UNKNOWN_AB01" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xab02" name="SP_UNKNOWN_AB02" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xab02" name="SP_HLSQ_MODE_CNTL" variants="A7XX-" usage="cmd">
+ <bitfield name="SHARED_CONSTS_ENABLE" pos="0" type="boolean"/> <!-- see SP_SHARED_CONSTANT -->
+ </reg32>
<reg32 offset="0xab04" name="SP_PS_CONFIG" type="a6xx_sp_xs_config" usage="rp_blit"/>
<reg32 offset="0xab05" name="SP_PS_INSTR_SIZE" low="0" high="27" type="uint" usage="rp_blit"/>
+ <reg32 offset="0xab06" name="SP_BIN_SIZE" type="a8xx_bin_size" variants="A8XX-" usage="rp_blit"/>
+
<array offset="0xab10" name="SP_GFX_BINDLESS_BASE" stride="2" length="5" variants="A6XX" usage="rp_blit">
<reg64 offset="0" name="DESCRIPTOR" variants="A6XX">
<bitfield name="DESC_SIZE" low="0" high="1" type="a6xx_bindless_descriptor_size"/>
@@ -3104,14 +4234,22 @@ by a particular renderpass/blit.
instructions VS/HS/DS/GS/FS. See SP_CS_UAV_BASE_* for compute shaders.
-->
<reg64 offset="0xab1a" name="SP_GFX_UAV_BASE" type="address" align="16" usage="cmd"/>
- <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" usage="cmd"/>
+ <reg32 offset="0xab20" name="SP_GFX_USIZE" low="0" high="6" type="uint" variants="A6XX-A7XX" usage="cmd"/>
+ <reg32 offset="0xab09" name="SP_GFX_USIZE" low="0" high="6" type="uint" variants="A8XX-" usage="cmd"/>
+
+ <reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xab22" name="SP_UNKNOWN_AB22" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xab23" name="SP_UNKNOWN_AB23" variants="A8XX-"/>
+
+ <enum name="a6xx_sp_a2d_output_ifmt_type">
+ <value name="OUTPUT_IFMT_2D_FLOAT" value="0"/>
+ <value name="OUTPUT_IFMT_2D_SINT" value="1"/>
+ <value name="OUTPUT_IFMT_2D_UINT" value="2"/>
+ </enum>
<bitset name="a6xx_sp_a2d_output_info" inline="yes">
- <bitfield name="NORM" pos="0" type="boolean"/>
- <bitfield name="SINT" pos="1" type="boolean"/>
- <bitfield name="UINT" pos="2" type="boolean"/>
+ <bitfield name="HALF_PRECISION" pos="0" type="boolean"/>
+ <bitfield name="IFMT_TYPE" low="1" high="2" type="a6xx_sp_a2d_output_ifmt_type"/>
<!-- looks like HW only cares about the base type of this format,
which matches the ifmt? -->
<bitfield name="COLOR_FORMAT" low="3" high="10" type="a6xx_format"/>
@@ -3124,22 +4262,27 @@ by a particular renderpass/blit.
<reg32 offset="0xacc0" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xa9bf" name="SP_A2D_OUTPUT_INFO" type="a6xx_sp_a2d_output_info" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xae00" name="SP_DBG_ECO_CNTL" usage="cmd"/>
- <reg32 offset="0xae01" name="SP_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode"/>
+ <reg32 offset="0xae00" name="SP_DBG_ECO_CNTL" usage="init"/>
+ <reg32 offset="0xae01" name="SP_ADDR_MODE_CNTL" pos="0" type="a5xx_address_mode" variants="A6XX"/>
+ <reg32 offset="0xae01" name="SP_SHADER_PROFILING" variants="A8XX-"/>
<reg32 offset="0xae02" name="SP_NC_MODE_CNTL">
<!-- TODO: valid bits 0x3c3f, see kernel -->
</reg32>
- <reg32 offset="0xae03" name="SP_CHICKEN_BITS" usage="cmd"/>
- <reg32 offset="0xae04" name="SP_NC_MODE_CNTL_2" usage="cmd">
+ <reg32 offset="0xae03" name="SP_CHICKEN_BITS" usage="init"/>
+ <reg32 offset="0xae04" name="SP_NC_MODE_CNTL_2" usage="init">
<bitfield name="F16_NO_INF" pos="3" type="boolean"/>
</reg32>
- <reg32 offset="0xae06" name="SP_UNKNOWN_AE06" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae08" name="SP_CHICKEN_BITS_1" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae09" name="SP_CHICKEN_BITS_2" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae0a" name="SP_CHICKEN_BITS_3" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae05" name="SP_SS_CHICKEN_BITS_0" variants="A8XX-"/>
+ <reg32 offset="0xae06" name="SP_ISDB_CNTL" variants="A7XX-" usage="init"/>
+ <reg32 offset="0xae07" name="SP_PERFCTR_CNTL"/>
+ <reg32 offset="0xae08" name="SP_CHICKEN_BITS_1" variants="A7XX-" usage="init"/>
+ <reg32 offset="0xae09" name="SP_CHICKEN_BITS_2" variants="A7XX-" usage="init"/>
+ <reg32 offset="0xae0a" name="SP_CHICKEN_BITS_3" variants="A7XX-" usage="init"/>
+ <reg32 offset="0xae0b" name="SP_CHICKEN_BITS_4" variants="A8XX-"/>
+ <reg32 offset="0xae0c" name="SP_STATUS"/>
- <reg32 offset="0xae0f" name="SP_PERFCTR_SHADER_MASK" usage="cmd">
+ <reg32 offset="0xae0f" name="SP_PERFCTR_SHADER_MASK" usage="init">
<!-- some perfcntrs are affected by a per-stage enable bit
(PERF_SP_ALU_WORKING_CYCLES for example)
TODO: verify position of HS/DS/GS bits -->
@@ -3150,24 +4293,47 @@ by a particular renderpass/blit.
<bitfield name="FS" pos="4" type="boolean"/>
<bitfield name="CS" pos="5" type="boolean"/>
</reg32>
- <array offset="0xae10" name="SP_PERFCTR_SP_SEL" stride="1" length="24"/>
+ <array offset="0xae10" name="SP_PERFCTR_SP_SEL" stride="1" length="24" variants="A6XX"/>
<array offset="0xae60" name="SP_PERFCTR_HLSQ_SEL" stride="1" length="6" variants="A7XX-"/>
- <reg32 offset="0xae6a" name="SP_UNKNOWN_AE6A" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae6b" name="SP_UNKNOWN_AE6B" variants="A7XX-" usage="cmd"/>
- <reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae6a" name="SP_UNKNOWN_AE6A" variants="A7XX-" usage="init"/>
+ <reg32 offset="0xae6b" name="SP_HLSQ_TIMEOUT_THRESHOLD_DP" variants="A7XX-" usage="init"/>
+ <reg32 offset="0xae6c" name="SP_HLSQ_DBG_ECO_CNTL" variants="A7XX-" usage="init"/>
<reg32 offset="0xae6d" name="SP_READ_SEL" variants="A7XX-">
- <bitfield name="LOCATION" low="18" high="19" type="a7xx_state_location"/>
- <bitfield name="PIPE" low="16" high="17" type="a7xx_pipe"/>
+ <bitfield name="CONTEXT" low="26" high="30"/>
+ <bitfield name="SLICE" low="21" high="25"/>
+ <bitfield name="LOCATION" low="18" high="20" type="a7xx_state_location"/>
+ <bitfield name="PIPE" low="16" high="17" type="adreno_pipe"/>
<bitfield name="STATETYPE" low="8" high="15" type="a7xx_statetype_id"/>
<bitfield name="USPTP" low="4" high="7"/>
<bitfield name="SPTP" low="0" high="3"/>
</reg32>
<reg32 offset="0xae71" name="SP_DBG_CNTL" variants="A7XX-"/>
- <reg32 offset="0xae73" name="SP_UNKNOWN_AE73" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0xae73" name="SP_HLSQ_DBG_ECO_CNTL_1" variants="A7XX-"/>
+ <reg32 offset="0xae74" name="SP_HLSQ_DBG_ECO_CNTL_2" variants="A7XX-"/>
+ <reg32 offset="0xae76" name="SP_HLSQ_DBG_ECO_CNTL_3" variants="A8XX-"/>
<array offset="0xae80" name="SP_PERFCTR_SP_SEL" stride="1" length="36" variants="A7XX-"/>
<!-- TODO: there are 4 more percntr select registers (0xae28-0xae2b) -->
<!-- TODO: there are a few unknown registers in the 0xae30-0xae52 range -->
- <reg32 offset="0xbe22" name="SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE"/>
+ <reg32 offset="0xae52" name="SP_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE"/>
+
+ <reg64 offset="0xae10" name="SP_HLSQ_GC_GMEM_RANGE_MIN" variants="A8XX-"/>
+ <reg64 offset="0xae12" name="SP_HLSQ_LPAC_GMEM_RANGE_MIN" variants="A8XX-"/>
+ <reg32 offset="0xae15" name="SP_LPAC_CPI_STATUS" variants="A8XX-"/>
+ <reg32 offset="0xae16" name="SP_LPAC_DBG_STATUS" variants="A8XX-"/>
+ <reg32 offset="0xae17" name="SP_LPAC_ISDB_BATCH_COUNT" variants="A8XX-"/>
+ <reg32 offset="0xae18" name="SP_LPAC_ISDB_BATCH_COUNT_INCR_EN" variants="A8XX-"/>
+ <reg32 offset="0xae19" name="SP_LPAC_ISDB_BATCH_COUNT_SHADERS" variants="A8XX-"/>
+ <reg32 offset="0xae30" name="SP_ISDB_BATCH_COUNT" variants="A7XX-"/>
+ <reg32 offset="0xae31" name="SP_ISDB_BATCH_COUNT_INCR_EN" variants="A7XX-"/>
+ <reg32 offset="0xae32" name="SP_ISDB_BATCH_COUNT_SHADERS" variants="A7XX-"/>
+ <reg32 offset="0xae35" name="SP_ISDB_DEBUG_CONFIG" variants="A7XX-"/>
+
+ <reg32 offset="0xae3a" name="SP_SELF_THROTTLE_CONTROL" variants="A7XX-"/>
+ <reg32 offset="0xae3b" name="SP_DISPATCH_CNTL" variants="A7XX-"/>
+ <reg64 offset="0xae3c" name="SP_SW_DEBUG_ADDR" variants="A7XX-"/>
+ <reg64 offset="0xae3e" name="SP_ISDB_DEBUG_ADDR" variants="A7XX-"/>
+
+ <array offset="0xaec0" name="SP_PERFCTR_HLSQ_SEL_2_0" stride="1" length="6" variants="A7XX-"/>
<!--
The downstream kernel calls the debug cluster of registers
@@ -3175,12 +4341,15 @@ by a particular renderpass/blit.
color base for compute shaders.
-->
<reg64 offset="0xb180" name="TPL1_CS_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
- <reg32 offset="0xb182" name="SP_UNKNOWN_B182" low="0" high="2" usage="cmd"/>
- <reg32 offset="0xb183" name="SP_UNKNOWN_B183" low="0" high="23" usage="cmd"/>
+ <reg32 offset="0xb182" name="TPL1_PS_ROTATION_CNTL" low="0" high="2" usage="cmd"/>
+ <reg32 offset="0xb183" name="TPL1_PS_SWIZZLE_CNTL" low="0" high="23" usage="cmd"/>
<reg32 offset="0xb190" name="SP_UNKNOWN_B190"/>
<reg32 offset="0xb191" name="SP_UNKNOWN_B191"/>
+ <reg32 offset="0xb2d6" name="TPL1_A2D_BIN_SIZE" type="a8xx_bin_size" variants="A8XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2d7" name="TPL1_A2D_FILTER_CNTL" variants="A8XX-" usage="rp_blit"/>
+
<reg32 offset="0xb300" name="TPL1_RAS_MSAA_CNTL" usage="rp_blit">
<bitfield name="SAMPLES" low="0" high="1" type="a3xx_msaa_samples"/>
<bitfield name="UNK2" low="2" high="3"/>
@@ -3192,11 +4361,13 @@ by a particular renderpass/blit.
<!-- looks to work in the same way as a5xx: -->
<reg64 offset="0xb302" name="TPL1_GFX_BORDER_COLOR_BASE" type="address" align="128" usage="cmd"/>
- <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" usage="rp_blit"/>
- <reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
- <reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit"/>
+ <reg32 offset="0xb304" name="TPL1_MSAA_SAMPLE_POS_CNTL" type="a6xx_msaa_sample_pos_cntl" variants="A6XX-A7XX" usage="rp_blit"/>
+ <reg32 offset="0xb305" name="TPL1_PROGRAMMABLE_MSAA_POS_0" type="a6xx_programmable_msaa_pos" usage="rp_blit" variants="A6XX-A7XX" />
+ <reg32 offset="0xb306" name="TPL1_PROGRAMMABLE_MSAA_POS_1" type="a6xx_programmable_msaa_pos" usage="rp_blit" variants="A6XX-A7XX" />
<reg32 offset="0xb307" name="TPL1_WINDOW_OFFSET" type="a6xx_reg_xy" usage="rp_blit"/>
+ <reg32 offset="0xb304" name="TPL1_BIN_SIZE" type="a8xx_bin_size" variants="A8XX-" usage="rp_blit"/>
+
<enum name="a6xx_coord_round">
<value value="0" name="COORD_TRUNCATE"/>
<value value="1" name="COORD_ROUND_NEAREST_EVEN"/>
@@ -3205,13 +4376,17 @@ by a particular renderpass/blit.
<enum name="a6xx_nearest_mode">
<value value="0" name="ROUND_CLAMP_TRUNCATE"/>
<value value="1" name="CLAMP_ROUND_TRUNCATE"/>
+ <value value="2" name="ROUND_FLOAT_TO_INT"/> <!-- only ARRAYCOORDROUNDMODE -->
</enum>
<reg32 offset="0xb309" name="TPL1_MODE_CNTL" usage="cmd">
<bitfield name="ISAMMODE" low="0" high="1" type="a6xx_isam_mode"/>
<bitfield name="TEXCOORDROUNDMODE" pos="2" type="a6xx_coord_round"/>
+ <bitfield name="ARRAYCOORDROUNDMODE" low="3" high="4" type="a6xx_coord_round"/>
<bitfield name="NEARESTMIPSNAP" pos="5" type="a6xx_nearest_mode"/>
+ <bitfield name="SAMPLEREPLICATE" pos="6" type="boolean"/>
<bitfield name="DESTDATATYPEOVERRIDE" pos="7" type="boolean"/>
+ <bitfield name="PACK_SAMP_REDUCED_PRECISION" pos="8" type="boolean"/>
</reg32>
<reg32 offset="0xb310" name="SP_UNKNOWN_B310" variants="A7XX-" usage="cmd"/>
@@ -3232,12 +4407,12 @@ by a particular renderpass/blit.
</reg32>
<reg32 offset="0xb2c0" name="TPL1_A2D_SRC_TEXTURE_INFO" type="a6xx_a2d_src_texture_info" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX">
+ <reg32 offset="0xb2c1" name="TPL1_A2D_SRC_TEXTURE_SIZE" variants="A7XX-">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
</reg32>
<reg64 offset="0xb2c2" name="TPL1_A2D_SRC_TEXTURE_BASE" type="address" align="16" variants="A7XX-" usage="rp_blit"/>
- <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX">
+ <reg32 offset="0xb2c4" name="TPL1_A2D_SRC_TEXTURE_PITCH" variants="A7XX-">
<!--
Bits from 3..9 must be zero unless 'TPL1_A2D_BLT_CNTL::TYPE'
is A6XX_TEX_IMG_BUFFER, which allows for lower alignment.
@@ -3270,18 +4445,19 @@ by a particular renderpass/blit.
<reg32 offset="0xb2ce" name="SP_PS_UNKNOWN_B4CE" low="0" high="31" variants="A7XX"/>
<reg32 offset="0xb2cf" name="SP_PS_UNKNOWN_B4CF" low="0" high="30" variants="A7XX"/>
<reg32 offset="0xb2d0" name="SP_PS_UNKNOWN_B4D0" low="0" high="29" variants="A7XX"/>
- <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX"/>
+ <reg32 offset="0xb2d1" name="TPL1_A2D_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-"/>
<reg32 offset="0xb2d2" name="TPL1_A2D_BLT_CNTL" variants="A7XX-" usage="rp_blit">
<bitfield name="RAW_COPY" pos="0" type="boolean"/>
<bitfield name="START_OFFSET_TEXELS" low="16" high="21"/>
<bitfield name="TYPE" low="29" high="31" type="a6xx_tex_type"/>
</reg32>
- <reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xab21" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0xab07" name="SP_WINDOW_OFFSET" type="a6xx_reg_xy" variants="A8XX-" usage="rp_blit"/>
<!-- always 0x100000 or 0x1000000? -->
- <reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="cmd"/>
- <reg32 offset="0xb601" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd">
+ <reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="init"/>
+ <reg32 offset="0xb601" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/>
+ <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="init">
<!-- Affects UBWC in some way, if BLIT_OP_SCALE is done with this bit set
and if other blit is done without it - UBWC image may be copied incorrectly.
-->
@@ -3294,22 +4470,23 @@ by a particular renderpass/blit.
<bitfield name="UPPER_BIT" pos="4" type="uint"/>
<bitfield name="UNK6" low="6" high="7"/>
</reg32>
- <reg32 offset="0xb605" name="TPL1_UNKNOWN_B605" low="0" high="7" type="uint" variants="A6XX" usage="cmd"/> <!-- always 0x0 or 0x44 ? -->
+ <reg32 offset="0xb605" name="TPL1_UNKNOWN_B605" low="0" high="7" type="uint" variants="A6XX" usage="init"/> <!-- always 0x0 or 0x44 ? -->
- <reg32 offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE_0" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb609" name="TPL1_BICUBIC_WEIGHTS_TABLE_1" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60a" name="TPL1_BICUBIC_WEIGHTS_TABLE_2" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A6XX"/>
- <reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A6XX"/>
+ <array offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="5" variants="A6XX">
+ <reg32 offset="0" name="REG" low="0" high="29"/>
+ </array>
- <reg32 offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE_0" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb609" name="TPL1_BICUBIC_WEIGHTS_TABLE_1" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60a" name="TPL1_BICUBIC_WEIGHTS_TABLE_2" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A7XX" usage="cmd"/>
- <reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A7XX" usage="cmd"/>
+ <array offset="0xb608" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="5" variants="A7XX">
+ <reg32 offset="0" name="REG" low="0" high="29" usage="cmd"/>
+ </array>
+
+ <array offset="0xb606" name="TPL1_BICUBIC_WEIGHTS_TABLE" stride="1" length="25" variants="A8XX">
+ <reg32 offset="0" name="REG" low="0" high="29"/>
+ </array>
<array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12" variants="A6XX"/>
<array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="18" variants="A7XX"/>
+ <array offset="0xb620" name="TPL1_PERFCTR_TP_SEL" stride="1" length="20" variants="A8XX"/>
<!-- TODO: 4 more perfcntr sel at 0xb620 ? -->
@@ -3352,10 +4529,8 @@ by a particular renderpass/blit.
<reg32 offset="0xa9ae" name="SP_PS_CNTL_1" variants="A7XX-" usage="rp_blit">
<bitfield name="SYSVAL_REGS_COUNT" low="0" high="7" type="uint"/>
- <!-- UNK8 is set on a730/a740 -->
- <bitfield name="UNK8" pos="8" type="boolean"/>
- <!-- UNK9 is set on a750 -->
- <bitfield name="UNK9" pos="9" type="boolean"/>
+ <bitfield name="DEFER_WAVE_ALLOC_DIS" pos="8" type="boolean"/>
+ <bitfield name="EVICT_BUF_MODE" low="9" high="10"/>
</reg32>
<reg32 offset="0xb820" name="HLSQ_LOAD_STATE_GEOM_CMD"/>
@@ -3406,9 +4581,12 @@ by a particular renderpass/blit.
<reg32 offset="0xb985" type="a6xx_sp_reg_prog_id_2" name="SP_REG_PROG_ID_2" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xb986" type="a6xx_sp_reg_prog_id_3" name="SP_REG_PROG_ID_3" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xb987" name="SP_CS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="cmd"/>
- <reg32 offset="0xa9c6" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xa9c6" type="a6xx_sp_ps_wave_cntl" name="SP_PS_WAVE_CNTL" variants="A7XX" usage="rp_blit"/>
+ <reg32 offset="0xa9c6" name="SP_PS_WAVE_CNTL" variants="A8XX-" usage="rp_blit">
+ <bitfield name="VARYINGS" pos="1" type="boolean"/>
+ </reg32>
<reg32 offset="0xa9c7" name="SP_LB_PARAM_LIMIT" low="0" high="2" variants="A7XX-" usage="rp_blit">
- <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
+ <bitfield name="PRIMALLOCTHRESHOLD" low="0" high="2" type="uint"/>
</reg32>
<reg32 offset="0xa9c8" name="SP_REG_PROG_ID_0" variants="A7XX-" usage="rp_blit">
<bitfield name="FACEREGID" low="0" high="7" type="a3xx_regid"/>
@@ -3612,7 +4790,7 @@ by a particular renderpass/blit.
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
</reg32>
- <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A7XX-" usage="cmd">
+ <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A7XX" usage="cmd">
<doc>
This register clears pending loads queued up by
CP_LOAD_STATE6. Each bit resets a particular kind(s) of
@@ -3635,10 +4813,30 @@ by a particular renderpass/blit.
<bitfield name="GFX_BINDLESS" low="17" high="24" type="hex"/>
</reg32>
+ <reg32 offset="0xab1f" name="SP_UPDATE_CNTL" variants="A8XX" usage="cmd">
+ <doc>
+ This register clears pending loads queued up by
+ CP_LOAD_STATE6. Each bit resets a particular kind(s) of
+ CP_LOAD_STATE6.
+ </doc>
+
+ <!-- per-stage state: shader, non-bindless UBO, textures, and samplers -->
+ <bitfield name="VS_STATE" pos="0" type="boolean"/>
+ <bitfield name="HS_STATE" pos="1" type="boolean"/>
+ <bitfield name="DS_STATE" pos="2" type="boolean"/>
+ <bitfield name="GS_STATE" pos="3" type="boolean"/>
+ <bitfield name="FS_STATE" pos="4" type="boolean"/>
+ <bitfield name="CS_STATE" pos="5" type="boolean"/>
+ </reg32>
+
+ <reg32 offset="0xa9c0" name="SP_CS_BINDLESS_INVALIDATE"/>
+ <reg32 offset="0xab08" name="SP_GFX_BINDLESS_INVALIDATE"/>
+
<reg32 offset="0xbb10" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xab03" name="SP_PS_CONST_CONFIG" type="a6xx_xs_const_config" variants="A7XX-" usage="rp_blit"/>
- <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX_0" stride="1" length="64" variants="A7XX-"/>
+ <array offset="0xab40" name="SP_SHARED_CONSTANT_GFX" stride="1" length="64" variants="A7XX"/>
+ <array offset="0xab30" name="SP_SHARED_CONSTANT_GFX" stride="1" length="128" variants="A8XX-"/>
<reg32 offset="0xbb11" name="HLSQ_SHARED_CONSTS" variants="A6XX" usage="cmd">
<doc>
@@ -3675,15 +4873,15 @@ by a particular renderpass/blit.
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
</reg32>
- <reg32 offset="0xbe00" name="HLSQ_UNKNOWN_BE00" variants="A6XX" usage="cmd"/> <!-- all bits valid except bit 29 -->
- <reg32 offset="0xbe01" name="HLSQ_UNKNOWN_BE01" low="4" high="6" variants="A6XX" usage="cmd"/>
- <reg32 offset="0xbe04" name="HLSQ_DBG_ECO_CNTL" variants="A6XX" usage="cmd"/>
- <reg32 offset="0xbe05" name="HLSQ_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
+ <reg32 offset="0xbe00" name="HLSQ_UNKNOWN_BE00" variants="A6XX" usage="init"/> <!-- all bits valid except bit 29 -->
+ <reg32 offset="0xbe01" name="HLSQ_UNKNOWN_BE01" low="4" high="6" variants="A6XX" usage="init"/>
+ <reg32 offset="0xbe04" name="HLSQ_DBG_ECO_CNTL" variants="A6XX" usage="init"/>
+ <reg32 offset="0xbe05" name="HLSQ_ADDR_MODE_CNTL" type="a5xx_address_mode" variants="A6XX"/>
<reg32 offset="0xbe08" name="HLSQ_UNKNOWN_BE08" low="0" high="15"/>
<array offset="0xbe10" name="HLSQ_PERFCTR_HLSQ_SEL" stride="1" length="6"/>
<!-- TODO: some valid registers between 0xbe20 and 0xbe33 -->
- <reg32 offset="0xbe22" name="HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE"/>
+ <reg32 offset="0xbe22" name="HLSQ_CONTEXT_SWITCH_GFX_PREEMPTION_SAFE_MODE" variants="A6XX"/>
<reg32 offset="0xc000" name="SP_AHB_READ_APERTURE" variants="A7XX-"/>
@@ -3800,7 +4998,7 @@ by a particular renderpass/blit.
<reg32 offset="0x0030" name="CFG_DBGBUS_TRACE_BUF2"/>
</domain>
-<domain name="A7XX_CX_DBGC" width="32">
+<domain name="A7XX_CX_DBGC" width="32" varset="chip">
<!-- Bitfields shifted, but otherwise the same: -->
<reg32 offset="0x0000" name="CFG_DBGBUS_SEL_A" variants="A7XX-">
<bitfield high="7" low="0" name="PING_INDEX"/>
@@ -3812,6 +5010,7 @@ by a particular renderpass/blit.
<reg32 offset="0x0001" name="SYSTEM_CACHE_CNTL_0"/>
<reg32 offset="0x0002" name="SYSTEM_CACHE_CNTL_1"/>
<reg32 offset="0x0039" name="CX_MISC_TCM_RET_CNTL" variants="A7XX-"/>
+ <reg32 offset="0x0087" name="CX_MISC_SLICE_ENABLE_FINAL" variants="A8XX"/>
<reg32 offset="0x0400" name="CX_MISC_SW_FUSE_VALUE" variants="A7XX-">
<bitfield pos="0" name="FASTBLEND" type="boolean"/>
<bitfield pos="1" name="LPAC" type="boolean"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
index 307d43dda8a2..56cfaff614a4 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_descriptors.xml
@@ -9,38 +9,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<domain name="A6XX_TEX_SAMP" width="32">
<doc>Texture sampler dwords</doc>
- <enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_NEAREST" value="0"/>
- <value name="A6XX_TEX_LINEAR" value="1"/>
- <value name="A6XX_TEX_ANISO" value="2"/>
- <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
- </enum>
- <enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_REPEAT" value="0"/>
- <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
- <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
- <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
- <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
- </enum>
- <enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_ANISO_1" value="0"/>
- <value name="A6XX_TEX_ANISO_2" value="1"/>
- <value name="A6XX_TEX_ANISO_4" value="2"/>
- <value name="A6XX_TEX_ANISO_8" value="3"/>
- <value name="A6XX_TEX_ANISO_16" value="4"/>
- </enum>
- <enum name="a6xx_reduction_mode">
- <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
- <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
- <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
- </enum>
- <enum name="a6xx_fast_border_color">
- <!-- R B G A -->
- <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
- <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
- <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
- <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
- </enum>
<reg32 offset="0" name="0">
<bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
@@ -79,14 +47,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<domain name="A6XX_TEX_CONST" width="32" varset="chip">
<doc>Texture constant dwords</doc>
- <enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
- <value name="A6XX_TEX_X" value="0"/>
- <value name="A6XX_TEX_Y" value="1"/>
- <value name="A6XX_TEX_Z" value="2"/>
- <value name="A6XX_TEX_W" value="3"/>
- <value name="A6XX_TEX_ZERO" value="4"/>
- <value name="A6XX_TEX_ONE" value="5"/>
- </enum>
<reg32 offset="0" name="0">
<bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
<bitfield name="SRGB" pos="2" type="boolean"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
index 665539b098c6..81538831dc19 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_enums.xml
@@ -303,7 +303,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
</enum>
<!--
-Used in a6xx_a2d_bit_cntl.. the value mostly seems to correlate to the
+Used in a6xx_a2d_blt_cntl.. the value mostly seems to correlate to the
component type/size, so I think it relates to internal format used for
blending? The one exception is that 16b unorm and 32b float use the
same value... maybe 16b unorm is uncommon enough that it was just easier
@@ -320,14 +320,14 @@ to upconvert to 32b float internally?
16b float: 3
-->
<enum name="a6xx_2d_ifmt">
- <value value="0x10" name="R2D_UNORM8"/>
<value value="0x7" name="R2D_INT32"/>
<value value="0x6" name="R2D_INT16"/>
<value value="0x5" name="R2D_INT8"/>
<value value="0x4" name="R2D_FLOAT32"/>
<value value="0x3" name="R2D_FLOAT16"/>
+ <value value="0x2" name="R2D_SNORM8"/>
<value value="0x1" name="R2D_UNORM8_SRGB"/>
- <value value="0x0" name="R2D_RAW"/>
+ <value value="0x0" name="R2D_UNORM8"/>
</enum>
<enum name="a6xx_tex_type">
@@ -380,4 +380,50 @@ to upconvert to 32b float internally?
<value value="0x3" name="TESS_CCW_TRIS"/>
</enum>
+<enum name="a6xx_tex_filter"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_NEAREST" value="0"/>
+ <value name="A6XX_TEX_LINEAR" value="1"/>
+ <value name="A6XX_TEX_ANISO" value="2"/>
+ <value name="A6XX_TEX_CUBIC" value="3"/> <!-- a650 only -->
+</enum>
+
+<enum name="a6xx_tex_clamp"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_REPEAT" value="0"/>
+ <value name="A6XX_TEX_CLAMP_TO_EDGE" value="1"/>
+ <value name="A6XX_TEX_MIRROR_REPEAT" value="2"/>
+ <value name="A6XX_TEX_CLAMP_TO_BORDER" value="3"/>
+ <value name="A6XX_TEX_MIRROR_CLAMP" value="4"/>
+</enum>
+
+<enum name="a6xx_tex_aniso"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_ANISO_1" value="0"/>
+ <value name="A6XX_TEX_ANISO_2" value="1"/>
+ <value name="A6XX_TEX_ANISO_4" value="2"/>
+ <value name="A6XX_TEX_ANISO_8" value="3"/>
+ <value name="A6XX_TEX_ANISO_16" value="4"/>
+</enum>
+
+<enum name="a6xx_reduction_mode">
+ <value name="A6XX_REDUCTION_MODE_AVERAGE" value="0"/>
+ <value name="A6XX_REDUCTION_MODE_MIN" value="1"/>
+ <value name="A6XX_REDUCTION_MODE_MAX" value="2"/>
+</enum>
+
+<enum name="a6xx_fast_border_color">
+ <!-- R B G A -->
+ <value name="A6XX_BORDER_COLOR_0_0_0_0" value="0"/>
+ <value name="A6XX_BORDER_COLOR_0_0_0_1" value="1"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_0" value="2"/>
+ <value name="A6XX_BORDER_COLOR_1_1_1_1" value="3"/>
+</enum>
+
+<enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
+ <value name="A6XX_TEX_X" value="0"/>
+ <value name="A6XX_TEX_Y" value="1"/>
+ <value name="A6XX_TEX_Z" value="2"/>
+ <value name="A6XX_TEX_W" value="3"/>
+ <value name="A6XX_TEX_ZERO" value="4"/>
+ <value name="A6XX_TEX_ONE" value="5"/>
+</enum>
+
</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
index 3d2cc339b8f1..c4e00b1263cd 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx_gmu.xml
@@ -40,56 +40,62 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="IRQ_MASK_BIT" pos="0" />
</bitset>
- <reg32 offset="0x80" name="GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL"/>
- <reg32 offset="0x81" name="GMU_GX_SPTPRAC_POWER_CONTROL"/>
- <reg32 offset="0xc00" name="GMU_CM3_ITCM_START"/>
- <reg32 offset="0x1c00" name="GMU_CM3_DTCM_START"/>
- <reg32 offset="0x23f0" name="GMU_NMI_CONTROL_STATUS"/>
- <reg32 offset="0x23f8" name="GMU_BOOT_SLUMBER_OPTION"/>
- <reg32 offset="0x23f9" name="GMU_GX_VOTE_IDX"/>
- <reg32 offset="0x23fa" name="GMU_MX_VOTE_IDX"/>
- <reg32 offset="0x23fc" name="GMU_DCVS_ACK_OPTION"/>
- <reg32 offset="0x23fd" name="GMU_DCVS_PERF_SETTING"/>
- <reg32 offset="0x23fe" name="GMU_DCVS_BW_SETTING"/>
- <reg32 offset="0x23ff" name="GMU_DCVS_RETURN"/>
- <reg32 offset="0x2bf8" name="GMU_CORE_FW_VERSION">
+ <reg32 offset="0x1a880" name="GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL"/>
+ <reg32 offset="0x1a881" name="GMU_GX_SPTPRAC_POWER_CONTROL"/>
+ <reg32 offset="0x1b400" name="GMU_CM3_ITCM_START"/>
+ <reg32 offset="0x1c400" name="GMU_CM3_DTCM_START"/>
+ <reg32 offset="0x1cbf0" name="GMU_NMI_CONTROL_STATUS"/>
+ <reg32 offset="0x1cbf8" name="GMU_BOOT_SLUMBER_OPTION"/>
+ <reg32 offset="0x1cbf9" name="GMU_GX_VOTE_IDX"/>
+ <reg32 offset="0x1cbfa" name="GMU_MX_VOTE_IDX"/>
+ <reg32 offset="0x1cbfc" name="GMU_DCVS_ACK_OPTION"/>
+ <reg32 offset="0x1cbfd" name="GMU_DCVS_PERF_SETTING"/>
+ <reg32 offset="0x1cbfe" name="GMU_DCVS_BW_SETTING"/>
+ <reg32 offset="0x1cbff" name="GMU_DCVS_RETURN"/>
+ <reg32 offset="0x1d3f8" name="GMU_CORE_FW_VERSION">
<bitfield name="MAJOR" low="28" high="31"/>
<bitfield name="MINOR" low="16" high="27"/>
<bitfield name="STEP" low="0" high="15"/>
</reg32>
- <reg32 offset="0x4c00" name="GMU_ICACHE_CONFIG"/>
- <reg32 offset="0x4c01" name="GMU_DCACHE_CONFIG"/>
- <reg32 offset="0x4c0f" name="GMU_SYS_BUS_CONFIG"/>
- <reg32 offset="0x5000" name="GMU_CM3_SYSRESET"/>
- <reg32 offset="0x5001" name="GMU_CM3_BOOT_CONFIG"/>
- <reg32 offset="0x501a" name="GMU_CM3_FW_BUSY"/>
- <reg32 offset="0x501c" name="GMU_CM3_FW_INIT_RESULT"/>
- <reg32 offset="0x502d" name="GMU_CM3_CFG"/>
- <reg32 offset="0x5040" name="GMU_CX_GMU_POWER_COUNTER_ENABLE"/>
- <reg32 offset="0x5041" name="GMU_CX_GMU_POWER_COUNTER_SELECT_0"/>
- <reg32 offset="0x5042" name="GMU_CX_GMU_POWER_COUNTER_SELECT_1"/>
- <reg32 offset="0x5044" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L"/>
- <reg32 offset="0x5045" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H"/>
- <reg32 offset="0x5046" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L"/>
- <reg32 offset="0x5047" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H"/>
- <reg32 offset="0x5048" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L"/>
- <reg32 offset="0x5049" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H"/>
- <reg32 offset="0x504a" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L"/>
- <reg32 offset="0x504b" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H"/>
- <reg32 offset="0x504c" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L"/>
- <reg32 offset="0x504d" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H"/>
- <reg32 offset="0x504e" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L"/>
- <reg32 offset="0x504f" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H"/>
- <reg32 offset="0x50c0" name="GMU_PWR_COL_INTER_FRAME_CTRL">
+ <reg32 offset="0x1f400" name="GMU_ICACHE_CONFIG"/>
+ <reg32 offset="0x1f401" name="GMU_DCACHE_CONFIG"/>
+ <reg32 offset="0x1f40f" name="GMU_SYS_BUS_CONFIG"/>
+ <reg32 offset="0x1f50b" name="GMU_MRC_GBIF_QOS_CTRL"/>
+ <reg32 offset="0x1f800" name="GMU_CM3_SYSRESET"/>
+ <reg32 offset="0x1f801" name="GMU_CM3_BOOT_CONFIG"/>
+ <reg32 offset="0x1f81a" name="GMU_CM3_FW_BUSY"/>
+ <reg32 offset="0x1f81c" name="GMU_CM3_FW_INIT_RESULT"/>
+ <reg32 offset="0x1f82d" name="GMU_CM3_CFG"/>
+ <reg32 offset="0x1f840" name="GMU_CX_GMU_POWER_COUNTER_ENABLE"/>
+ <reg32 offset="0x1fc10" name="GMU_CX_GMU_POWER_COUNTER_ENABLE" variants="A8XX"/>
+ <reg32 offset="0x1f841" name="GMU_CX_GMU_POWER_COUNTER_SELECT_0"/>
+ <reg32 offset="0x1f842" name="GMU_CX_GMU_POWER_COUNTER_SELECT_1"/>
+ <reg32 offset="0x1fc40" name="GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_0" variants="A8XX-"/>
+ <reg32 offset="0x1fc41" name="GMU_CX_GMU_POWER_COUNTER_SELECT_XOCLK_1" variants="A8XX-"/>
+ <reg32 offset="0x1f844" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L"/>
+ <reg32 offset="0x1fca0" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L" variants="A8XX-"/>
+ <reg32 offset="0x1f845" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H"/>
+ <reg32 offset="0x1fca1" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H" variants="A8XX-"/>
+ <reg32 offset="0x1f846" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L"/>
+ <reg32 offset="0x1f847" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H"/>
+ <reg32 offset="0x1f848" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L"/>
+ <reg32 offset="0x1f849" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H"/>
+ <reg32 offset="0x1f84a" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L"/>
+ <reg32 offset="0x1f84b" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H"/>
+ <reg32 offset="0x1f84c" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L"/>
+ <reg32 offset="0x1f84d" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H"/>
+ <reg32 offset="0x1f84e" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L"/>
+ <reg32 offset="0x1f84f" name="GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H"/>
+ <reg32 offset="0x1f8c0" name="GMU_PWR_COL_INTER_FRAME_CTRL">
<bitfield name="IFPC_ENABLE" pos="0" type="boolean"/>
<bitfield name="HM_POWER_COLLAPSE_ENABLE" pos="1" type="boolean"/>
<bitfield name="SPTPRAC_POWER_CONTROL_ENABLE" pos="2" type="boolean"/>
<bitfield name="NUM_PASS_SKIPS" low="10" high="13"/>
<bitfield name="MIN_PASS_LENGTH" low="14" high="31"/>
</reg32>
- <reg32 offset="0x50c1" name="GMU_PWR_COL_INTER_FRAME_HYST"/>
- <reg32 offset="0x50c2" name="GMU_PWR_COL_SPTPRAC_HYST"/>
- <reg32 offset="0x50d0" name="GMU_SPTPRAC_PWR_CLK_STATUS">
+ <reg32 offset="0x1f8c1" name="GMU_PWR_COL_INTER_FRAME_HYST"/>
+ <reg32 offset="0x1f8c2" name="GMU_PWR_COL_SPTPRAC_HYST"/>
+ <reg32 offset="0x1f8d0" name="GMU_SPTPRAC_PWR_CLK_STATUS" variants="A6XX">
<bitfield name="SPTPRAC_GDSC_POWERING_OFF" pos="0" type="boolean"/>
<bitfield name="SPTPRAC_GDSC_POWERING_ON" pos="1" type="boolean"/>
<bitfield name="SPTPRAC_GDSC_POWER_OFF" pos="2" type="boolean"/>
@@ -99,11 +105,19 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="GX_HM_GDSC_POWER_OFF" pos="6" type="boolean"/>
<bitfield name="GX_HM_CLK_OFF" pos="7" type="boolean"/>
</reg32>
- <reg32 offset="0x50e4" name="GMU_GPU_NAP_CTRL">
+ <reg32 offset="0x1f8d0" name="GMU_SPTPRAC_PWR_CLK_STATUS" variants="A7XX">
+ <bitfield name="GX_HM_GDSC_POWER_OFF" pos="0" type="boolean"/>
+ <bitfield name="GX_HM_CLK_OFF" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x1f7e8" name="GMU_PWR_CLK_STATUS" variants="A8XX-">
+ <bitfield name="GX_HM_GDSC_POWER_OFF" pos="0" type="boolean"/>
+ <bitfield name="GX_HM_CLK_OFF" pos="1" type="boolean"/>
+ </reg32>
+ <reg32 offset="0x1f8e4" name="GMU_GPU_NAP_CTRL">
<bitfield name="HW_NAP_ENABLE" pos="0"/>
<bitfield name="SID" low="4" high="8"/>
</reg32>
- <reg32 offset="0x50e8" name="GMU_RPMH_CTRL">
+ <reg32 offset="0x1f8e8" name="GMU_RPMH_CTRL">
<bitfield name="RPMH_INTERFACE_ENABLE" pos="0" type="boolean"/>
<bitfield name="LLC_VOTE_ENABLE" pos="4" type="boolean"/>
<bitfield name="DDR_VOTE_ENABLE" pos="8" type="boolean"/>
@@ -115,70 +129,84 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="CX_MIN_VOTE_ENABLE" pos="14" type="boolean"/>
<bitfield name="GFX_MIN_VOTE_ENABLE" pos="15" type="boolean"/>
</reg32>
- <reg32 offset="0x50e9" name="GMU_RPMH_HYST_CTRL"/>
- <reg32 offset="0x50ec" name="GPU_GMU_CX_GMU_RPMH_POWER_STATE"/>
- <reg32 offset="0x50f0" name="GPU_GMU_CX_GMU_CX_FAL_INTF"/>
- <reg32 offset="0x50f1" name="GPU_GMU_CX_GMU_CX_FALNEXT_INTF"/>
- <reg32 offset="0x5100" name="GPU_GMU_CX_GMU_PWR_COL_CP_MSG"/>
- <reg32 offset="0x5101" name="GPU_GMU_CX_GMU_PWR_COL_CP_RESP"/>
- <reg32 offset="0x51f0" name="GMU_BOOT_KMD_LM_HANDSHAKE"/>
- <reg32 offset="0x5157" name="GMU_LLM_GLM_SLEEP_CTRL"/>
- <reg32 offset="0x5158" name="GMU_LLM_GLM_SLEEP_STATUS"/>
- <reg32 offset="0x5088" name="GMU_ALWAYS_ON_COUNTER_L"/>
- <reg32 offset="0x5089" name="GMU_ALWAYS_ON_COUNTER_H"/>
- <reg32 offset="0x50c3" name="GMU_GMU_PWR_COL_KEEPALIVE"/>
- <reg32 offset="0x5180" name="GMU_HFI_CTRL_STATUS"/>
- <reg32 offset="0x5181" name="GMU_HFI_VERSION_INFO"/>
- <reg32 offset="0x5182" name="GMU_HFI_SFR_ADDR"/>
- <reg32 offset="0x5183" name="GMU_HFI_MMAP_ADDR"/>
- <reg32 offset="0x5184" name="GMU_HFI_QTBL_INFO"/>
- <reg32 offset="0x5185" name="GMU_HFI_QTBL_ADDR"/>
- <reg32 offset="0x5186" name="GMU_HFI_CTRL_INIT"/>
- <reg32 offset="0x5190" name="GMU_GMU2HOST_INTR_SET"/>
- <reg32 offset="0x5191" name="GMU_GMU2HOST_INTR_CLR"/>
- <reg32 offset="0x5192" name="GMU_GMU2HOST_INTR_INFO">
+ <reg32 offset="0x1f8e9" name="GMU_RPMH_HYST_CTRL"/>
+ <reg32 offset="0x1f8ec" name="GPU_GMU_CX_GMU_RPMH_POWER_STATE" variants="A6XX"/>
+ <reg32 offset="0x1f7e9" name="GPU_GMU_CX_GMU_RPMH_POWER_STATE" variants="A8XX-"/>
+ <reg32 offset="0x1f8f0" name="GPU_GMU_CX_GMU_CX_FAL_INTF" variants="A6XX"/>
+ <reg32 offset="0x1f7ec" name="GPU_GMU_CX_GMU_CX_FAL_INTF" variants="A8XX-"/>
+ <reg32 offset="0x1f8f1" name="GPU_GMU_CX_GMU_CX_FALNEXT_INTF" variants="A6XX"/>
+ <reg32 offset="0x1f7ed" name="GPU_GMU_CX_GMU_CX_FALNEXT_INTF" variants="A8XX-"/>
+ <reg32 offset="0x1f900" name="GPU_GMU_CX_GMU_PWR_COL_CP_MSG"/>
+ <reg32 offset="0x1f901" name="GPU_GMU_CX_GMU_PWR_COL_CP_RESP"/>
+ <reg32 offset="0x1f9f0" name="GMU_BOOT_KMD_LM_HANDSHAKE"/>
+ <reg32 offset="0x1f957" name="GMU_LLM_GLM_SLEEP_CTRL"/>
+ <reg32 offset="0x1f958" name="GMU_LLM_GLM_SLEEP_STATUS"/>
+ <reg32 offset="0x1f888" name="GMU_ALWAYS_ON_COUNTER_L"/>
+ <reg32 offset="0x1f889" name="GMU_ALWAYS_ON_COUNTER_H"/>
+ <reg32 offset="0x1f8c3" name="GMU_GMU_PWR_COL_KEEPALIVE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x1f7e4" name="GMU_GMU_PWR_COL_KEEPALIVE" variants="A8XX-"/>
+ <reg32 offset="0x1f8c4" name="GMU_PWR_COL_PREEMPT_KEEPALIVE" variants="A6XX-A7XX"/>
+ <reg32 offset="0x1f7e5" name="GMU_PWR_COL_PREEMPT_KEEPALIVE" variants="A8XX-"/>
+ <reg32 offset="0x1f980" name="GMU_HFI_CTRL_STATUS"/>
+ <reg32 offset="0x1f981" name="GMU_HFI_VERSION_INFO"/>
+ <reg32 offset="0x1f982" name="GMU_HFI_SFR_ADDR"/>
+ <reg32 offset="0x1f983" name="GMU_HFI_MMAP_ADDR"/>
+ <reg32 offset="0x1f984" name="GMU_HFI_QTBL_INFO"/>
+ <reg32 offset="0x1f985" name="GMU_HFI_QTBL_ADDR"/>
+ <reg32 offset="0x1f986" name="GMU_HFI_CTRL_INIT"/>
+ <reg32 offset="0x1f990" name="GMU_GMU2HOST_INTR_SET"/>
+ <reg32 offset="0x1f991" name="GMU_GMU2HOST_INTR_CLR"/>
+ <reg32 offset="0x1f992" name="GMU_GMU2HOST_INTR_INFO">
<bitfield name="MSGQ" pos="0" type="boolean"/>
<bitfield name="CM3_FAULT" pos="23" type="boolean"/>
</reg32>
- <reg32 offset="0x5193" name="GMU_GMU2HOST_INTR_MASK"/>
- <reg32 offset="0x5194" name="GMU_HOST2GMU_INTR_SET"/>
- <reg32 offset="0x5195" name="GMU_HOST2GMU_INTR_CLR"/>
- <reg32 offset="0x5196" name="GMU_HOST2GMU_INTR_RAW_INFO"/>
- <reg32 offset="0x5197" name="GMU_HOST2GMU_INTR_EN_0"/>
- <reg32 offset="0x5198" name="GMU_HOST2GMU_INTR_EN_1"/>
- <reg32 offset="0x5199" name="GMU_HOST2GMU_INTR_EN_2"/>
- <reg32 offset="0x519a" name="GMU_HOST2GMU_INTR_EN_3"/>
- <reg32 offset="0x519b" name="GMU_HOST2GMU_INTR_INFO_0"/>
- <reg32 offset="0x519c" name="GMU_HOST2GMU_INTR_INFO_1"/>
- <reg32 offset="0x519d" name="GMU_HOST2GMU_INTR_INFO_2"/>
- <reg32 offset="0x519e" name="GMU_HOST2GMU_INTR_INFO_3"/>
- <reg32 offset="0x51c5" name="GMU_GENERAL_0"/>
- <reg32 offset="0x51c6" name="GMU_GENERAL_1"/>
- <reg32 offset="0x51cb" name="GMU_GENERAL_6"/>
- <reg32 offset="0x51cc" name="GMU_GENERAL_7"/>
- <reg32 offset="0x51cd" name="GMU_GENERAL_8" variants="A7XX"/>
- <reg32 offset="0x51ce" name="GMU_GENERAL_9" variants="A7XX"/>
- <reg32 offset="0x51cf" name="GMU_GENERAL_10" variants="A7XX"/>
- <reg32 offset="0x515d" name="GMU_ISENSE_CTRL"/>
- <reg32 offset="0x8920" name="GPU_CS_ENABLE_REG"/>
- <reg32 offset="0x515d" name="GPU_GMU_CX_GMU_ISENSE_CTRL"/>
- <reg32 offset="0x8578" name="GPU_CS_AMP_CALIBRATION_CONTROL3"/>
- <reg32 offset="0x8558" name="GPU_CS_AMP_CALIBRATION_CONTROL2"/>
- <reg32 offset="0x8580" name="GPU_CS_A_SENSOR_CTRL_0"/>
- <reg32 offset="0x27ada" name="GPU_CS_A_SENSOR_CTRL_2"/>
- <reg32 offset="0x881a" name="GPU_CS_SENSOR_GENERAL_STATUS"/>
- <reg32 offset="0x8957" name="GPU_CS_AMP_CALIBRATION_CONTROL1"/>
- <reg32 offset="0x881a" name="GPU_CS_SENSOR_GENERAL_STATUS"/>
- <reg32 offset="0x881d" name="GPU_CS_AMP_CALIBRATION_STATUS1_0"/>
- <reg32 offset="0x881f" name="GPU_CS_AMP_CALIBRATION_STATUS1_2"/>
- <reg32 offset="0x8821" name="GPU_CS_AMP_CALIBRATION_STATUS1_4"/>
- <reg32 offset="0x8965" name="GPU_CS_AMP_CALIBRATION_DONE"/>
- <reg32 offset="0x896d" name="GPU_CS_AMP_PERIOD_CTRL"/>
- <reg32 offset="0x8965" name="GPU_CS_AMP_CALIBRATION_DONE"/>
- <reg32 offset="0x514d" name="GPU_GMU_CX_GMU_PWR_THRESHOLD"/>
- <reg32 offset="0x9303" name="GMU_AO_INTERRUPT_EN"/>
- <reg32 offset="0x9304" name="GMU_AO_HOST_INTERRUPT_CLR"/>
- <reg32 offset="0x9305" name="GMU_AO_HOST_INTERRUPT_STATUS">
+ <reg32 offset="0x1f993" name="GMU_GMU2HOST_INTR_MASK"/>
+ <reg32 offset="0x1f994" name="GMU_HOST2GMU_INTR_SET"/>
+ <reg32 offset="0x1f995" name="GMU_HOST2GMU_INTR_CLR"/>
+ <reg32 offset="0x1f996" name="GMU_HOST2GMU_INTR_RAW_INFO"/>
+ <reg32 offset="0x1f997" name="GMU_HOST2GMU_INTR_EN_0"/>
+ <reg32 offset="0x1f998" name="GMU_HOST2GMU_INTR_EN_1"/>
+ <reg32 offset="0x1f999" name="GMU_HOST2GMU_INTR_EN_2"/>
+ <reg32 offset="0x1f99a" name="GMU_HOST2GMU_INTR_EN_3"/>
+ <reg32 offset="0x1f99b" name="GMU_HOST2GMU_INTR_INFO_0"/>
+ <reg32 offset="0x1f99c" name="GMU_HOST2GMU_INTR_INFO_1"/>
+ <reg32 offset="0x1f99d" name="GMU_HOST2GMU_INTR_INFO_2"/>
+ <reg32 offset="0x1f99e" name="GMU_HOST2GMU_INTR_INFO_3"/>
+ <reg32 offset="0x1f9c5" name="GMU_GENERAL_0"/>
+ <reg32 offset="0x1f9c6" name="GMU_GENERAL_1"/>
+ <reg32 offset="0x1f9cb" name="GMU_GENERAL_6"/>
+ <reg32 offset="0x1f9cc" name="GMU_GENERAL_7"/>
+ <reg32 offset="0x1f9cd" name="GMU_GENERAL_8" variants="A7XX"/>
+ <reg32 offset="0x1f9ce" name="GMU_GENERAL_9" variants="A7XX"/>
+ <reg32 offset="0x1f9cf" name="GMU_GENERAL_10" variants="A7XX"/>
+ <reg32 offset="0x1f9c0" name="GMU_GENERAL_0" variants="A8XX"/>
+ <reg32 offset="0x1f9c1" name="GMU_GENERAL_1" variants="A8XX"/>
+ <reg32 offset="0x1f9c6" name="GMU_GENERAL_6" variants="A8XX"/>
+ <reg32 offset="0x1f9c7" name="GMU_GENERAL_7" variants="A8XX"/>
+ <reg32 offset="0x1f9c8" name="GMU_GENERAL_8" variants="A8XX"/>
+ <reg32 offset="0x1f9c9" name="GMU_GENERAL_9" variants="A8XX"/>
+ <reg32 offset="0x1f9ca" name="GMU_GENERAL_10" variants="A8XX"/>
+ <reg32 offset="0x1f9cb" name="GMU_GENERAL_11" variants="A8XX"/>
+ <reg32 offset="0x1f95d" name="GMU_ISENSE_CTRL"/>
+ <reg32 offset="0x23120" name="GPU_CS_ENABLE_REG"/>
+ <reg32 offset="0x1f95d" name="GPU_GMU_CX_GMU_ISENSE_CTRL"/>
+ <reg32 offset="0x22d78" name="GPU_CS_AMP_CALIBRATION_CONTROL3"/>
+ <reg32 offset="0x22d58" name="GPU_CS_AMP_CALIBRATION_CONTROL2"/>
+ <reg32 offset="0x22d80" name="GPU_CS_A_SENSOR_CTRL_0"/>
+ <reg32 offset="0x422da" name="GPU_CS_A_SENSOR_CTRL_2"/>
+ <reg32 offset="0x2301a" name="GPU_CS_SENSOR_GENERAL_STATUS"/>
+ <reg32 offset="0x23157" name="GPU_CS_AMP_CALIBRATION_CONTROL1"/>
+ <reg32 offset="0x2301a" name="GPU_CS_SENSOR_GENERAL_STATUS"/>
+ <reg32 offset="0x2301d" name="GPU_CS_AMP_CALIBRATION_STATUS1_0"/>
+ <reg32 offset="0x2301f" name="GPU_CS_AMP_CALIBRATION_STATUS1_2"/>
+ <reg32 offset="0x23021" name="GPU_CS_AMP_CALIBRATION_STATUS1_4"/>
+ <reg32 offset="0x23165" name="GPU_CS_AMP_CALIBRATION_DONE"/>
+ <reg32 offset="0x2316d" name="GPU_CS_AMP_PERIOD_CTRL"/>
+ <reg32 offset="0x23165" name="GPU_CS_AMP_CALIBRATION_DONE"/>
+ <reg32 offset="0x1f94d" name="GPU_GMU_CX_GMU_PWR_THRESHOLD"/>
+ <reg32 offset="0x23b03" name="GMU_AO_INTERRUPT_EN"/>
+ <reg32 offset="0x23b04" name="GMU_AO_HOST_INTERRUPT_CLR"/>
+ <reg32 offset="0x23b05" name="GMU_AO_HOST_INTERRUPT_STATUS">
<bitfield name="WDOG_BITE" pos="0" type="boolean"/>
<bitfield name="RSCC_COMP" pos="1" type="boolean"/>
<bitfield name="VDROOP" pos="2" type="boolean"/>
@@ -186,27 +214,27 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<bitfield name="DBD_WAKEUP" pos="4" type="boolean"/>
<bitfield name="HOST_AHB_BUS_ERROR" pos="5" type="boolean"/>
</reg32>
- <reg32 offset="0x9306" name="GMU_AO_HOST_INTERRUPT_MASK"/>
- <reg32 offset="0x9309" name="GPU_GMU_AO_GMU_CGC_MODE_CNTL"/>
- <reg32 offset="0x930a" name="GPU_GMU_AO_GMU_CGC_DELAY_CNTL"/>
- <reg32 offset="0x930b" name="GPU_GMU_AO_GMU_CGC_HYST_CNTL"/>
- <reg32 offset="0x930c" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS">
+ <reg32 offset="0x23b06" name="GMU_AO_HOST_INTERRUPT_MASK"/>
+ <reg32 offset="0x23b09" name="GPU_GMU_AO_GMU_CGC_MODE_CNTL"/>
+ <reg32 offset="0x23b0a" name="GPU_GMU_AO_GMU_CGC_DELAY_CNTL"/>
+ <reg32 offset="0x23b0b" name="GPU_GMU_AO_GMU_CGC_HYST_CNTL"/>
+ <reg32 offset="0x23b0c" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS">
<bitfield name = "GPUBUSYIGNAHB" pos="23" type="boolean"/>
</reg32>
- <reg32 offset="0x930d" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS2"/>
- <reg32 offset="0x930e" name="GPU_GMU_AO_GPU_CX_BUSY_MASK"/>
- <reg32 offset="0x9310" name="GMU_AO_AHB_FENCE_CTRL"/>
- <reg32 offset="0x9313" name="GMU_AHB_FENCE_STATUS"/>
- <reg32 offset="0x9314" name="GMU_AHB_FENCE_STATUS_CLR"/>
- <reg32 offset="0x9315" name="GMU_RBBM_INT_UNMASKED_STATUS"/>
- <reg32 offset="0x9316" name="GMU_AO_SPARE_CNTL"/>
- <reg32 offset="0x9307" name="GMU_RSCC_CONTROL_REQ"/>
- <reg32 offset="0x9308" name="GMU_RSCC_CONTROL_ACK"/>
- <reg32 offset="0x9311" name="GMU_AHB_FENCE_RANGE_0"/>
- <reg32 offset="0x9312" name="GMU_AHB_FENCE_RANGE_1"/>
- <reg32 offset="0x9c03" name="GPU_CC_GX_GDSCR"/>
- <reg32 offset="0x9d42" name="GPU_CC_GX_DOMAIN_MISC"/>
- <reg32 offset="0xc001" name="GPU_CPR_FSM_CTL"/>
+ <reg32 offset="0x23b0d" name="GPU_GMU_AO_GPU_CX_BUSY_STATUS2"/>
+ <reg32 offset="0x23b0e" name="GPU_GMU_AO_GPU_CX_BUSY_MASK"/>
+ <reg32 offset="0x23b10" name="GMU_AO_AHB_FENCE_CTRL"/>
+ <reg32 offset="0x23b13" name="GMU_AHB_FENCE_STATUS"/>
+ <reg32 offset="0x23b14" name="GMU_AHB_FENCE_STATUS_CLR"/>
+ <reg32 offset="0x23b15" name="GMU_RBBM_INT_UNMASKED_STATUS"/>
+ <reg32 offset="0x23b16" name="GMU_AO_SPARE_CNTL"/>
+ <reg32 offset="0x23b07" name="GMU_RSCC_CONTROL_REQ"/>
+ <reg32 offset="0x23b08" name="GMU_RSCC_CONTROL_ACK"/>
+ <reg32 offset="0x23b11" name="GMU_AHB_FENCE_RANGE_0"/>
+ <reg32 offset="0x23b12" name="GMU_AHB_FENCE_RANGE_1"/>
+ <reg32 offset="0x24403" name="GPU_CC_GX_GDSCR"/>
+ <reg32 offset="0x24542" name="GPU_CC_GX_DOMAIN_MISC"/>
+ <reg32 offset="0x26801" name="GPU_CPR_FSM_CTL"/>
<!-- starts at offset 0x8c00 on most gpus -->
<reg32 offset="0x0004" name="GPU_RSCC_RSC_STATUS0_DRV0"/>
@@ -228,6 +256,12 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x03ee" name="RSCC_TCS1_DRV0_STATUS"/>
<reg32 offset="0x0496" name="RSCC_TCS2_DRV0_STATUS"/>
<reg32 offset="0x053e" name="RSCC_TCS3_DRV0_STATUS"/>
+ <reg32 offset="0x05e6" name="RSCC_TCS4_DRV0_STATUS" variants="A7XX-"/>
+ <reg32 offset="0x068e" name="RSCC_TCS5_DRV0_STATUS" variants="A7XX-"/>
+ <reg32 offset="0x0736" name="RSCC_TCS6_DRV0_STATUS" variants="A7XX-"/>
+ <reg32 offset="0x07de" name="RSCC_TCS7_DRV0_STATUS" variants="A7XX-"/>
+ <reg32 offset="0x0886" name="RSCC_TCS8_DRV0_STATUS" variants="A7XX-"/>
+ <reg32 offset="0x092e" name="RSCC_TCS9_DRV0_STATUS" variants="A7XX-"/>
</domain>
</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
index 661b0dd0f675..8d195ee5d284 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a7xx_enums.xml
@@ -93,13 +93,6 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value value="4" name="A7XX_HLSQ_DP_STR"/>
</enum>
-<enum name="a7xx_pipe">
- <value value="0" name="A7XX_PIPE_NONE"/>
- <value value="1" name="A7XX_PIPE_BR"/>
- <value value="2" name="A7XX_PIPE_BV"/>
- <value value="3" name="A7XX_PIPE_LPAC"/>
-</enum>
-
<enum name="a7xx_cluster">
<value value="0" name="A7XX_CLUSTER_NONE"/>
<value value="1" name="A7XX_CLUSTER_FE"/>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml b/drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml
new file mode 100644
index 000000000000..edcbdb3b6921
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a8xx_descriptors.xml
@@ -0,0 +1,121 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+<import file="adreno/a6xx_enums.xml"/>
+<import file="adreno/a8xx_enums.xml"/>
+
+<domain name="A8XX_TEX_SAMP" width="32">
+ <doc>Texture sampler dwords</doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="MIPFILTER_LINEAR_NEAR" pos="0" type="boolean"/>
+ <bitfield name="MIPMAPING_DIS" pos="1" type="boolean"/>
+ <bitfield name="XY_MAG" low="2" high="3" type="a6xx_tex_filter"/>
+ <bitfield name="XY_MIN" low="4" high="5" type="a6xx_tex_filter"/>
+ <bitfield name="WRAP_S" low="6" high="8" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_T" low="9" high="11" type="a6xx_tex_clamp"/>
+ <bitfield name="WRAP_R" low="12" high="14" type="a6xx_tex_clamp"/>
+ <bitfield name="MSAA_BOX_FILTERING" pos="15" type="boolean"/>
+ <bitfield name="LOD_BIAS" low="16" high="28" type="fixed" radix="8"/>
+ <bitfield name="ANISO" low="29" high="31" type="a6xx_tex_aniso"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="MAX_LOD" low="0" high="11" type="ufixed" radix="8"/>
+ <bitfield name="MIN_LOD" low="12" high="23" type="ufixed" radix="8"/>
+ <bitfield name="REDUCTION_MODE" low="24" high="25" type="a6xx_reduction_mode"/>
+ <bitfield name="COMPARE_FUNC" low="26" high="28" type="adreno_compare_func"/>
+ <bitfield name="CHROMA_LINEAR" pos="29" type="boolean"/>
+ <bitfield name="CUBEMAPSEAMLESSFILTOFF" pos="30" type="boolean"/>
+ <bitfield name="UNNORM_COORDS" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="FASTBORDERCOLOREN" pos="0" type="boolean"/>
+ <bitfield name="FASTBORDERCOLOR" low="1" high="2" type="a6xx_fast_border_color"/>
+ <bitfield name="BCOLOR" low="7" high="31"/>
+ </reg32>
+ <reg32 offset="3" name="3"/>
+</domain>
+
+<domain name="A8XX_TEX_MEMOBJ" width="32" varset="chip">
+ <doc>Texture memobj dwords</doc>
+ <reg32 offset="0" name="0">
+ <bitfield name="BASE_LO" low="6" high="31" shr="6"/>
+ </reg32>
+ <reg32 offset="1" name="1">
+ <bitfield name="BASE_HI" low="0" high="16"/>
+ <bitfield name="TYPE" low="17" high="19" type="a6xx_tex_type"/>
+ <bitfield name="DEPTH" low="20" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="2" name="2">
+ <bitfield name="WIDTH" low="0" high="14" type="uint"/>
+ <bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ <bitfield name="SAMPLES" low="30" high="31" type="a3xx_msaa_samples"/>
+ </reg32>
+ <reg32 offset="3" name="3">
+ <bitfield name="FMT" low="0" high="7" type="a6xx_format"/>
+ <bitfield name="SWAP" low="8" high="9" type="a3xx_color_swap"/>
+ <bitfield name="SWIZ_X" low="10" high="12" type="a8xx_tex_swiz"/>
+ <bitfield name="SWIZ_Y" low="13" high="15" type="a8xx_tex_swiz"/>
+ <bitfield name="SWIZ_Z" low="16" high="18" type="a8xx_tex_swiz"/>
+ <bitfield name="SWIZ_W" low="19" high="21" type="a8xx_tex_swiz"/>
+ </reg32>
+ <reg32 offset="4" name="4">
+ <bitfield name="TILE_MODE" low="0" high="1" type="a6xx_tile_mode"/>
+ <bitfield name="FLAG" pos="2" type="boolean"/>
+ <bitfield name="PRT_EN" pos="3" type="boolean"/>
+ <bitfield name="TILE_ALL" pos="4" type="boolean"/>
+ <bitfield name="SRGB" pos="5" type="boolean"/>
+ <bitfield name="FLAG_LO" low="6" high="31" shr="6"/>
+ <!-- For multiplanar: -->
+ <bitfield name="BASE_U_LO" low="6" high="31" shr="6"/>
+ </reg32>
+ <reg32 offset="5" name="5">
+ <bitfield name="FLAG_HI" low="0" high="16"/>
+ <!-- For multiplanar: -->
+ <bitfield name="BASE_U_HI" low="0" high="16"/>
+ <bitfield name="FLAG_BUFFER_PITCH" low="17" high="24" shr="6" type="uint"/>
+ <bitfield name="ALL_SAMPLES_CENTER" pos="29" type="boolean"/>
+ <bitfield name="MUTABLEEN" pos="31" type="boolean"/>
+ </reg32>
+ <reg32 offset="6" name="6">
+ <bitfield name="TEX_LINE_OFFSET" low="0" high="23" type="uint"/> <!-- PITCH -->
+ <bitfield name="MIN_LINE_OFFSET" low="24" high="27" type="uint"/> <!-- PITCHALIGN -->
+ <bitfield name="MIPLVLS" low="28" high="31" type="uint"/>
+ </reg32>
+ <reg32 offset="7" name="7">
+ <bitfield name="ARRAY_SLICE_OFFSET" low="0" high="22" shr="12" type="uint"/> <!-- ARRAY_PITCH -->
+ <bitfield name="ASO_UNIT" pos="23"/> <!-- 4KB or 32B ? -->
+ <bitfield name="MIN_ARRAY_SLIZE_OFFSET" low="24" high="27" shr="12"/> <!-- MIN_LAYERSZ -->
+ <bitfield name="GMEM_TILING_FALLBACK_EN" pos="28" type="boolean"/>
+ <bitfield name="CORNER_BASED_EN" pos="30" type="boolean"/>
+ <bitfield name="GMEM_FULL_SURF" pos="31" type="boolean"/>
+ <!-- For multiplanar. This overlaps other single-planar fields: -->
+ <bitfield name="UV_OFFSET_H" low="24" high="25" type="ufixed" radix="2"/> <!-- CHROMA_MIDPOINT_X -->
+ <bitfield name="UV_OFFSET_V" low="26" high="27" type="ufixed" radix="2"/> <!-- CHROMA_MIDPOINT_Y -->
+ </reg32>
+ <reg32 offset="8" name="8">
+ <bitfield name="FLAG_ARRAY_PITCH" low="0" high="14" shr="12" type="uint"/> <!-- FLAG_BUFFER_ARRAY_PITCH -->
+ <!-- log2 size of the first level, required for mipmapping -->
+ <bitfield name="FLAG_BUFFER_LOGW" low="24" high="27" type="uint"/>
+ <bitfield name="FLAG_BUFFER_LOGH" low="28" high="31" type="uint"/>
+ <!-- For multiplanar. This overlaps other single-planar fields: -->
+ <bitfield name="BASE_V_LO" low="6" high="31" shr="6"/>
+ </reg32>
+ <reg32 offset="9" name="9">
+ <bitfield name="MIN_LOD_CLAMP" low="19" high="30" type="ufixed" radix="8"/>
+ <!-- For multiplanar, this overlaps other fields: -->
+ <bitfield name="BASE_V_HI" low="0" high="16"/>
+ <bitfield name="UV_PITCH" low="17" high="26"/> <!-- PLANE_PITCH -->
+ </reg32>
+ <reg32 offset="10" name="10"/>
+ <reg32 offset="11" name="11"/>
+ <reg32 offset="12" name="12"/>
+ <reg32 offset="13" name="13"/>
+ <reg32 offset="14" name="14"/>
+ <reg32 offset="15" name="15"/>
+</domain>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml b/drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml
new file mode 100644
index 000000000000..c842db8c78d6
--- /dev/null
+++ b/drivers/gpu/drm/msm/registers/adreno/a8xx_enums.xml
@@ -0,0 +1,299 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<database xmlns="http://nouveau.freedesktop.org/"
+xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
+<import file="freedreno_copyright.xml"/>
+<import file="adreno/adreno_common.xml"/>
+<import file="adreno/adreno_pm4.xml"/>
+
+<enum name="a8xx_statetype_id">
+ <value value="0" name="A8XX_TP0_NCTX_REG"/>
+ <value value="1" name="A8XX_TP0_CTX0_3D_CVS_REG"/>
+ <value value="2" name="A8XX_TP0_CTX0_3D_CPS_REG"/>
+ <value value="3" name="A8XX_TP0_CTX1_3D_CVS_REG"/>
+ <value value="4" name="A8XX_TP0_CTX1_3D_CPS_REG"/>
+ <value value="5" name="A8XX_TP0_CTX2_3D_CPS_REG"/>
+ <value value="6" name="A8XX_TP0_CTX3_3D_CPS_REG"/>
+ <value value="9" name="A8XX_TP0_TMO_DATA"/>
+ <value value="10" name="A8XX_TP0_SMO_DATA"/>
+ <value value="11" name="A8XX_TP0_MIPMAP_BASE_DATA"/>
+ <value value="12" name="A8XX_TP_3D_CVS_REG"/>
+ <value value="13" name="A8XX_TP_3D_CPS_REG"/>
+ <value value="16" name="A8XX_SP_3D_CVS_REG"/>
+ <value value="17" name="A8XX_SP_3D_CPS_REG"/>
+ <value value="22" name="A8XX_SP_LB_DATA_RAM"/>
+ <value value="23" name="A8XX_SP_INST_DATA_RAM"/>
+ <value value="24" name="A8XX_SP_STH"/>
+ <value value="25" name="A8XX_SP_EVQ"/>
+ <value value="26" name="A8XX_SP_CONSMNG"/>
+ <value value="30" name="A8XX_HLSQ_INST_DATA_RAM"/>
+ <value value="31" name="A8XX_SP_INST_DATA_3"/>
+ <value value="32" name="A8XX_SP_NCTX_REG"/>
+ <value value="33" name="A8XX_SP_CTX0_3D_CVS_REG"/>
+ <value value="34" name="A8XX_SP_CTX0_3D_CPS_REG"/>
+ <value value="35" name="A8XX_SP_CTX1_3D_CVS_REG"/>
+ <value value="36" name="A8XX_SP_CTX1_3D_CPS_REG"/>
+ <value value="37" name="A8XX_SP_CTX2_3D_CPS_REG"/>
+ <value value="38" name="A8XX_SP_CTX3_3D_CPS_REG"/>
+ <value value="39" name="A8XX_SP_INST_DATA"/>
+ <value value="40" name="A8XX_SP_INST_DATA_1"/>
+ <value value="41" name="A8XX_SP_LB_0_DATA"/>
+ <value value="42" name="A8XX_SP_LB_1_DATA"/>
+ <value value="43" name="A8XX_SP_LB_2_DATA"/>
+ <value value="44" name="A8XX_SP_LB_3_DATA"/>
+ <value value="45" name="A8XX_SP_LB_4_DATA"/>
+ <value value="46" name="A8XX_SP_LB_5_DATA"/>
+ <value value="47" name="A8XX_SP_LB_6_DATA"/>
+ <value value="48" name="A8XX_SP_LB_7_DATA"/>
+ <value value="49" name="A8XX_SP_CB_RAM"/>
+ <value value="50" name="A8XX_SP_LB_13_DATA"/>
+ <value value="51" name="A8XX_SP_LB_14_DATA"/>
+ <value value="52" name="A8XX_SP_INST_TAG"/>
+ <value value="53" name="A8XX_SP_INST_DATA_2"/>
+ <value value="54" name="A8XX_SP_TMO_TAG"/>
+ <value value="55" name="A8XX_SP_SMO_TAG"/>
+ <value value="56" name="A8XX_SP_STATE_DATA"/>
+ <value value="57" name="A8XX_SP_HWAVE_RAM"/>
+ <value value="58" name="A8XX_SP_L0_INST_BUF"/>
+ <value value="59" name="A8XX_SP_LB_8_DATA"/>
+ <value value="60" name="A8XX_SP_LB_9_DATA"/>
+ <value value="61" name="A8XX_SP_LB_10_DATA"/>
+ <value value="62" name="A8XX_SP_LB_11_DATA"/>
+ <value value="63" name="A8XX_SP_LB_12_DATA"/>
+ <value value="64" name="A8XX_HLSQ_DATAPATH_DSTR_META"/>
+ <value value="65" name="A8XX_HLSQ_DESC_REMAP_META"/>
+ <value value="66" name="A8XX_HLSQ_SLICE_TOP_META"/>
+ <value value="67" name="A8XX_HLSQ_L2STC_TAG_RAM"/>
+ <value value="68" name="A8XX_HLSQ_L2STC_INFO_CMD"/>
+ <value value="69" name="A8XX_HLSQ_CVS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="70" name="A8XX_HLSQ_CPS_BE_CTXT_BUF_RAM_TAG"/>
+ <value value="71" name="A8XX_HLSQ_GFX_CVS_BE_CTXT_BUF_RAM"/>
+ <value value="72" name="A8XX_HLSQ_GFX_CPS_BE_CTXT_BUF_RAM"/>
+ <value value="73" name="A8XX_HLSQ_CHUNK_CVS_RAM"/>
+ <value value="74" name="A8XX_HLSQ_CHUNK_CPS_RAM"/>
+ <value value="75" name="A8XX_HLSQ_CHUNK_CVS_RAM_TAG"/>
+ <value value="76" name="A8XX_HLSQ_CHUNK_CPS_RAM_TAG"/>
+ <value value="77" name="A8XX_HLSQ_ICB_CVS_CB_BASE_TAG"/>
+ <value value="78" name="A8XX_HLSQ_ICB_CPS_CB_BASE_TAG"/>
+ <value value="79" name="A8XX_HLSQ_CVS_MISC_RAM"/>
+ <value value="80" name="A8XX_HLSQ_CPS_MISC_RAM"/>
+ <value value="81" name="A8XX_HLSQ_CPS_MISC_RAM_1"/>
+ <value value="82" name="A8XX_HLSQ_INST_RAM"/>
+ <value value="83" name="A8XX_HLSQ_GFX_CVS_CONST_RAM"/>
+ <value value="84" name="A8XX_HLSQ_GFX_CPS_CONST_RAM"/>
+ <value value="85" name="A8XX_HLSQ_CVS_MISC_RAM_TAG"/>
+ <value value="86" name="A8XX_HLSQ_CPS_MISC_RAM_TAG"/>
+ <value value="87" name="A8XX_HLSQ_INST_RAM_TAG"/>
+ <value value="88" name="A8XX_HLSQ_GFX_CVS_CONST_RAM_TAG"/>
+ <value value="89" name="A8XX_HLSQ_GFX_CPS_CONST_RAM_TAG"/>
+ <value value="90" name="A8XX_HLSQ_GFX_LOCAL_MISC_RAM"/>
+ <value value="91" name="A8XX_HLSQ_GFX_LOCAL_MISC_RAM_TAG"/>
+ <value value="92" name="A8XX_HLSQ_INST_RAM_1"/>
+ <value value="93" name="A8XX_HLSQ_STPROC_META"/>
+ <value value="94" name="A8XX_HLSQ_SLICE_BACKEND_META"/>
+ <value value="95" name="A8XX_HLSQ_INST_RAM_2"/>
+ <value value="96" name="A8XX_HLSQ_DATAPATH_META"/>
+ <value value="97" name="A8XX_HLSQ_FRONTEND_META"/>
+ <value value="98" name="A8XX_HLSQ_INDIRECT_META"/>
+ <value value="99" name="A8XX_HLSQ_BACKEND_META"/>
+</enum>
+
+<enum name="a8xx_state_location">
+ <value value="0" name="A8XX_HLSQ_STATE"/>
+ <value value="1" name="A8XX_HLSQ_DP"/>
+ <value value="2" name="A8XX_SP_TOP"/>
+ <value value="3" name="A8XX_USPTP"/>
+ <value value="4" name="A8XX_HLSQ_DP_STR"/>
+</enum>
+
+<enum name="a8xx_cluster">
+ <value value="0" name="A8XX_CLUSTER_NONE"/>
+ <value value="1" name="A8XX_CLUSTER_FE_US"/>
+ <value value="2" name="A8XX_CLUSTER_FE_S"/>
+ <value value="3" name="A8XX_CLUSTER_SP_VS"/>
+ <value value="4" name="A8XX_CLUSTER_VPC_VS"/>
+ <value value="5" name="A8XX_CLUSTER_VPC_US"/>
+ <value value="6" name="A8XX_CLUSTER_GRAS"/>
+ <value value="7" name="A8XX_CLUSTER_SP_PS"/>
+ <value value="8" name="A8XX_CLUSTER_VPC_PS"/>
+ <value value="9" name="A8XX_CLUSTER_PS"/>
+</enum>
+
+<enum name="a8xx_debugbus_id">
+ <value value="1" name="A8XX_DEBUGBUS_GBIF_CX_GC_US_I_0"/>
+ <value value="2" name="A8XX_DEBUGBUS_GMU_CX_GC_US_I_0"/>
+ <value value="3" name="A8XX_DEBUGBUS_CX_GC_US_I_0"/>
+ <value value="8" name="A8XX_DEBUGBUS_GBIF_GX_GC_US_I_0"/>
+ <value value="9" name="A8XX_DEBUGBUS_GMU_GX_GC_US_I_0"/>
+ <value value="10" name="A8XX_DEBUGBUS_DBGC_GC_US_I_0"/>
+ <value value="11" name="A8XX_DEBUGBUS_RBBM_GC_US_I_0"/>
+ <value value="12" name="A8XX_DEBUGBUS_LARC_GC_US_I_0"/>
+ <value value="13" name="A8XX_DEBUGBUS_COM_GC_US_I_0"/>
+ <value value="14" name="A8XX_DEBUGBUS_HLSQ_GC_US_I_0"/>
+ <value value="15" name="A8XX_DEBUGBUS_CGC_GC_US_I_0"/>
+ <value value="20" name="A8XX_DEBUGBUS_VSC_GC_US_I_0_0"/>
+ <value value="21" name="A8XX_DEBUGBUS_VSC_GC_US_I_0_1"/>
+ <value value="24" name="A8XX_DEBUGBUS_UFC_GC_US_I_0"/>
+ <value value="25" name="A8XX_DEBUGBUS_UFC_GC_US_I_1"/>
+ <value value="40" name="A8XX_DEBUGBUS_CP_GC_US_I_0_0"/>
+ <value value="41" name="A8XX_DEBUGBUS_CP_GC_US_I_0_1"/>
+ <value value="42" name="A8XX_DEBUGBUS_CP_GC_US_I_0_2"/>
+ <value value="56" name="A8XX_DEBUGBUS_PC_BR_US_I_0"/>
+ <value value="57" name="A8XX_DEBUGBUS_PC_BV_US_I_0"/>
+ <value value="58" name="A8XX_DEBUGBUS_GPC_BR_US_I_0"/>
+ <value value="59" name="A8XX_DEBUGBUS_GPC_BV_US_I_0"/>
+ <value value="60" name="A8XX_DEBUGBUS_VPC_BR_US_I_0"/>
+ <value value="61" name="A8XX_DEBUGBUS_VPC_BV_US_I_0"/>
+ <value value="80" name="A8XX_DEBUGBUS_UCHE_WRAPPER_GC_US_I_0"/>
+ <value value="81" name="A8XX_DEBUGBUS_UCHE_GC_US_I_0"/>
+ <value value="82" name="A8XX_DEBUGBUS_UCHE_GC_US_I_1"/>
+ <value value="83" name="A8XX_DEBUGBUS_UCHE_GC_US_I_0_1"/>
+ <value value="84" name="A8XX_DEBUGBUS_UCHE_GC_US_I_1_1"/>
+ <value value="128" name="A8XX_DEBUGBUS_CP_GC_S_0_I_0"/>
+ <value value="129" name="A8XX_DEBUGBUS_PC_BR_S_0_I_0"/>
+ <value value="130" name="A8XX_DEBUGBUS_PC_BV_S_0_I_0"/>
+ <value value="131" name="A8XX_DEBUGBUS_TESS_GC_S_0_I_0"/>
+ <value value="132" name="A8XX_DEBUGBUS_TSEFE_GC_S_0_I_0"/>
+ <value value="133" name="A8XX_DEBUGBUS_TSEBE_GC_S_0_I_0"/>
+ <value value="134" name="A8XX_DEBUGBUS_RAS_GC_S_0_I_0"/>
+ <value value="135" name="A8XX_DEBUGBUS_LRZ_BR_S_0_I_0"/>
+ <value value="136" name="A8XX_DEBUGBUS_LRZ_BV_S_0_I_0"/>
+ <value value="137" name="A8XX_DEBUGBUS_VFDP_GC_S_0_I_0"/>
+ <value value="138" name="A8XX_DEBUGBUS_GPC_BR_S_0_I_0"/>
+ <value value="139" name="A8XX_DEBUGBUS_GPC_BV_S_0_I_0"/>
+ <value value="140" name="A8XX_DEBUGBUS_VPCFE_BR_S_0_I_0"/>
+ <value value="141" name="A8XX_DEBUGBUS_VPCFE_BV_S_0_I_0"/>
+ <value value="142" name="A8XX_DEBUGBUS_VPCBE_BR_S_0_I_0"/>
+ <value value="143" name="A8XX_DEBUGBUS_VPCBE_BV_S_0_I_0"/>
+ <value value="144" name="A8XX_DEBUGBUS_CCHE_GC_S_0_I_0"/>
+ <value value="145" name="A8XX_DEBUGBUS_DBGC_GC_S_0_I_0"/>
+ <value value="146" name="A8XX_DEBUGBUS_LARC_GC_S_0_I_0"/>
+ <value value="147" name="A8XX_DEBUGBUS_RBBM_GC_S_0_I_0"/>
+ <value value="148" name="A8XX_DEBUGBUS_CCRE_GC_S_0_I_0"/>
+ <value value="149" name="A8XX_DEBUGBUS_CGC_GC_S_0_I_0"/>
+ <value value="150" name="A8XX_DEBUGBUS_GMU_GC_S_0_I_0"/>
+ <value value="151" name="A8XX_DEBUGBUS_SLICE_GC_S_0_I_0"/>
+ <value value="152" name="A8XX_DEBUGBUS_HLSQ_SPTP_STAR_GC_S_0_I_0"/>
+ <value value="160" name="A8XX_DEBUGBUS_USP_GC_S_0_I_0"/>
+ <value value="161" name="A8XX_DEBUGBUS_USP_GC_S_0_I_1"/>
+ <value value="166" name="A8XX_DEBUGBUS_USPTP_GC_S_0_I_0"/>
+ <value value="167" name="A8XX_DEBUGBUS_USPTP_GC_S_0_I_1"/>
+ <value value="168" name="A8XX_DEBUGBUS_USPTP_GC_S_0_I_2"/>
+ <value value="169" name="A8XX_DEBUGBUS_USPTP_GC_S_0_I_3"/>
+ <value value="178" name="A8XX_DEBUGBUS_TP_GC_S_0_I_0"/>
+ <value value="179" name="A8XX_DEBUGBUS_TP_GC_S_0_I_1"/>
+ <value value="180" name="A8XX_DEBUGBUS_TP_GC_S_0_I_2"/>
+ <value value="181" name="A8XX_DEBUGBUS_TP_GC_S_0_I_3"/>
+ <value value="190" name="A8XX_DEBUGBUS_RB_GC_S_0_I_0"/>
+ <value value="191" name="A8XX_DEBUGBUS_RB_GC_S_0_I_1"/>
+ <value value="196" name="A8XX_DEBUGBUS_CCU_GC_S_0_I_0"/>
+ <value value="197" name="A8XX_DEBUGBUS_CCU_GC_S_0_I_1"/>
+ <value value="202" name="A8XX_DEBUGBUS_HLSQ_GC_S_0_I_0"/>
+ <value value="203" name="A8XX_DEBUGBUS_HLSQ_GC_S_0_I_1"/>
+ <value value="208" name="A8XX_DEBUGBUS_VFD_GC_S_0_I_0"/>
+ <value value="209" name="A8XX_DEBUGBUS_VFD_GC_S_0_I_1"/>
+ <value value="256" name="A8XX_DEBUGBUS_CP_GC_S_1_I_0"/>
+ <value value="257" name="A8XX_DEBUGBUS_PC_BR_S_1_I_0"/>
+ <value value="258" name="A8XX_DEBUGBUS_PC_BV_S_1_I_0"/>
+ <value value="259" name="A8XX_DEBUGBUS_TESS_GC_S_1_I_0"/>
+ <value value="260" name="A8XX_DEBUGBUS_TSEFE_GC_S_1_I_0"/>
+ <value value="261" name="A8XX_DEBUGBUS_TSEBE_GC_S_1_I_0"/>
+ <value value="262" name="A8XX_DEBUGBUS_RAS_GC_S_1_I_0"/>
+ <value value="263" name="A8XX_DEBUGBUS_LRZ_BR_S_1_I_0"/>
+ <value value="264" name="A8XX_DEBUGBUS_LRZ_BV_S_1_I_0"/>
+ <value value="265" name="A8XX_DEBUGBUS_VFDP_GC_S_1_I_0"/>
+ <value value="266" name="A8XX_DEBUGBUS_GPC_BR_S_1_I_0"/>
+ <value value="267" name="A8XX_DEBUGBUS_GPC_BV_S_1_I_0"/>
+ <value value="268" name="A8XX_DEBUGBUS_VPCFE_BR_S_1_I_0"/>
+ <value value="269" name="A8XX_DEBUGBUS_VPCFE_BV_S_1_I_0"/>
+ <value value="270" name="A8XX_DEBUGBUS_VPCBE_BR_S_1_I_0"/>
+ <value value="271" name="A8XX_DEBUGBUS_VPCBE_BV_S_1_I_0"/>
+ <value value="272" name="A8XX_DEBUGBUS_CCHE_GC_S_1_I_0"/>
+ <value value="273" name="A8XX_DEBUGBUS_DBGC_GC_S_1_I_0"/>
+ <value value="274" name="A8XX_DEBUGBUS_LARC_GC_S_1_I_0"/>
+ <value value="275" name="A8XX_DEBUGBUS_RBBM_GC_S_1_I_0"/>
+ <value value="276" name="A8XX_DEBUGBUS_CCRE_GC_S_1_I_0"/>
+ <value value="277" name="A8XX_DEBUGBUS_CGC_GC_S_1_I_0"/>
+ <value value="278" name="A8XX_DEBUGBUS_GMU_GC_S_1_I_0"/>
+ <value value="279" name="A8XX_DEBUGBUS_SLICE_GC_S_1_I_0"/>
+ <value value="280" name="A8XX_DEBUGBUS_HLSQ_SPTP_STAR_GC_S_1_I_0"/>
+ <value value="288" name="A8XX_DEBUGBUS_USP_GC_S_1_I_0"/>
+ <value value="289" name="A8XX_DEBUGBUS_USP_GC_S_1_I_1"/>
+ <value value="294" name="A8XX_DEBUGBUS_USPTP_GC_S_1_I_0"/>
+ <value value="295" name="A8XX_DEBUGBUS_USPTP_GC_S_1_I_1"/>
+ <value value="296" name="A8XX_DEBUGBUS_USPTP_GC_S_1_I_2"/>
+ <value value="297" name="A8XX_DEBUGBUS_USPTP_GC_S_1_I_3"/>
+ <value value="306" name="A8XX_DEBUGBUS_TP_GC_S_1_I_0"/>
+ <value value="307" name="A8XX_DEBUGBUS_TP_GC_S_1_I_1"/>
+ <value value="308" name="A8XX_DEBUGBUS_TP_GC_S_1_I_2"/>
+ <value value="309" name="A8XX_DEBUGBUS_TP_GC_S_1_I_3"/>
+ <value value="318" name="A8XX_DEBUGBUS_RB_GC_S_1_I_0"/>
+ <value value="319" name="A8XX_DEBUGBUS_RB_GC_S_1_I_1"/>
+ <value value="324" name="A8XX_DEBUGBUS_CCU_GC_S_1_I_0"/>
+ <value value="325" name="A8XX_DEBUGBUS_CCU_GC_S_1_I_1"/>
+ <value value="330" name="A8XX_DEBUGBUS_HLSQ_GC_S_1_I_0"/>
+ <value value="331" name="A8XX_DEBUGBUS_HLSQ_GC_S_1_I_1"/>
+ <value value="336" name="A8XX_DEBUGBUS_VFD_GC_S_1_I_0"/>
+ <value value="337" name="A8XX_DEBUGBUS_VFD_GC_S_1_I_1"/>
+ <value value="384" name="A8XX_DEBUGBUS_CP_GC_S_2_I_0"/>
+ <value value="385" name="A8XX_DEBUGBUS_PC_BR_S_2_I_0"/>
+ <value value="386" name="A8XX_DEBUGBUS_PC_BV_S_2_I_0"/>
+ <value value="387" name="A8XX_DEBUGBUS_TESS_GC_S_2_I_0"/>
+ <value value="388" name="A8XX_DEBUGBUS_TSEFE_GC_S_2_I_0"/>
+ <value value="389" name="A8XX_DEBUGBUS_TSEBE_GC_S_2_I_0"/>
+ <value value="390" name="A8XX_DEBUGBUS_RAS_GC_S_2_I_0"/>
+ <value value="391" name="A8XX_DEBUGBUS_LRZ_BR_S_2_I_0"/>
+ <value value="392" name="A8XX_DEBUGBUS_LRZ_BV_S_2_I_0"/>
+ <value value="393" name="A8XX_DEBUGBUS_VFDP_GC_S_2_I_0"/>
+ <value value="394" name="A8XX_DEBUGBUS_GPC_BR_S_2_I_0"/>
+ <value value="395" name="A8XX_DEBUGBUS_GPC_BV_S_2_I_0"/>
+ <value value="396" name="A8XX_DEBUGBUS_VPCFE_BR_S_2_I_0"/>
+ <value value="397" name="A8XX_DEBUGBUS_VPCFE_BV_S_2_I_0"/>
+ <value value="398" name="A8XX_DEBUGBUS_VPCBE_BR_S_2_I_0"/>
+ <value value="399" name="A8XX_DEBUGBUS_VPCBE_BV_S_2_I_0"/>
+ <value value="400" name="A8XX_DEBUGBUS_CCHE_GC_S_2_I_0"/>
+ <value value="401" name="A8XX_DEBUGBUS_DBGC_GC_S_2_I_0"/>
+ <value value="402" name="A8XX_DEBUGBUS_LARC_GC_S_2_I_0"/>
+ <value value="403" name="A8XX_DEBUGBUS_RBBM_GC_S_2_I_0"/>
+ <value value="404" name="A8XX_DEBUGBUS_CCRE_GC_S_2_I_0"/>
+ <value value="405" name="A8XX_DEBUGBUS_CGC_GC_S_2_I_0"/>
+ <value value="406" name="A8XX_DEBUGBUS_GMU_GC_S_2_I_0"/>
+ <value value="407" name="A8XX_DEBUGBUS_SLICE_GC_S_2_I_0"/>
+ <value value="408" name="A8XX_DEBUGBUS_HLSQ_SPTP_STAR_GC_S_2_I_0"/>
+ <value value="416" name="A8XX_DEBUGBUS_USP_GC_S_2_I_0"/>
+ <value value="417" name="A8XX_DEBUGBUS_USP_GC_S_2_I_1"/>
+ <value value="422" name="A8XX_DEBUGBUS_USPTP_GC_S_2_I_0"/>
+ <value value="423" name="A8XX_DEBUGBUS_USPTP_GC_S_2_I_1"/>
+ <value value="424" name="A8XX_DEBUGBUS_USPTP_GC_S_2_I_2"/>
+ <value value="425" name="A8XX_DEBUGBUS_USPTP_GC_S_2_I_3"/>
+ <value value="434" name="A8XX_DEBUGBUS_TP_GC_S_2_I_0"/>
+ <value value="435" name="A8XX_DEBUGBUS_TP_GC_S_2_I_1"/>
+ <value value="436" name="A8XX_DEBUGBUS_TP_GC_S_2_I_2"/>
+ <value value="437" name="A8XX_DEBUGBUS_TP_GC_S_2_I_3"/>
+ <value value="446" name="A8XX_DEBUGBUS_RB_GC_S_2_I_0"/>
+ <value value="447" name="A8XX_DEBUGBUS_RB_GC_S_2_I_1"/>
+ <value value="452" name="A8XX_DEBUGBUS_CCU_GC_S_2_I_0"/>
+ <value value="453" name="A8XX_DEBUGBUS_CCU_GC_S_2_I_1"/>
+ <value value="458" name="A8XX_DEBUGBUS_HLSQ_GC_S_2_I_0"/>
+ <value value="459" name="A8XX_DEBUGBUS_HLSQ_GC_S_2_I_1"/>
+ <value value="464" name="A8XX_DEBUGBUS_VFD_GC_S_2_I_0"/>
+ <value value="465" name="A8XX_DEBUGBUS_VFD_GC_S_2_I_1"/>
+</enum>
+
+<enum name="a8xx_usptp_id">
+ <value value="0" name="A8XX_uSPTP0"/>
+ <value value="1" name="A8XX_uSPTP1"/>
+ <value value="15" name="A8XX_SPTOP"/>
+</enum>
+
+<enum name="a8xx_tex_swiz">
+ <value name="A8XX_SWIZ_IDENTITY" value="0"/>
+ <value name="A8XX_SWIZ_ZERO" value="1"/>
+ <value name="A8XX_SWIZ_ONE" value="2"/>
+ <value name="A8XX_SWIZ_X" value="3"/>
+ <value name="A8XX_SWIZ_Y" value="4"/>
+ <value name="A8XX_SWIZ_Z" value="5"/>
+ <value name="A8XX_SWIZ_W" value="6"/>
+</enum>
+
+</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml
index 218ec8bb966e..79d204f1e400 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_common.xml
@@ -11,6 +11,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="A5XX" value="5"/>
<value name="A6XX" value="6"/>
<value name="A7XX" value="7"/>
+ <value name="A8XX" value="8"/>
</enum>
<enum name="adreno_pa_su_sc_draw">
@@ -397,4 +398,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value value="0x7" name="TEX_PREFETCH_UNK7"/>
</enum>
+<enum name="adreno_pipe">
+ <value value="0" name="PIPE_NONE"/>
+ <value value="1" name="PIPE_BR"/>
+ <value value="2" name="PIPE_BV"/>
+ <value value="3" name="PIPE_LPAC"/>
+ <value value="4" name="PIPE_AQE0"/>
+ <value value="5" name="PIPE_AQE1"/>
+ <value value="6" name="PIPE_DDE_BR"/>
+ <value value="7" name="PIPE_DDE_BV"/>
+</enum>
+
</database>
diff --git a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
index 7abc08635495..51e9c94f5e37 100644
--- a/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/adreno_pm4.xml
@@ -6,103 +6,102 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<import file="adreno/adreno_common.xml"/>
<enum name="vgt_event_type" varset="chip">
- <value name="VS_DEALLOC" value="0"/>
- <value name="PS_DEALLOC" value="1" variants="A2XX-A6XX"/>
- <value name="VS_DONE_TS" value="2"/>
- <value name="PS_DONE_TS" value="3"/>
+ <value name="VS_DEALLOC" value="0x00" variants="A2XX-A5XX"/>
+ <value name="PS_DEALLOC" value="0x01" variants="A2XX-A5XX"/>
+ <value name="VS_DONE_TS" value="0x02" variants="A2XX-A5XX"/>
+ <value name="PS_DONE_TS" value="0x03" variants="A2XX-A5XX"/>
<doc>
Flushes dirty data from UCHE, and also writes a GPU timestamp to
the address if one is provided.
</doc>
- <value name="CACHE_FLUSH_TS" value="4"/>
- <value name="CONTEXT_DONE" value="5"/>
- <value name="CACHE_FLUSH" value="6" variants="A2XX-A4XX"/>
- <value name="VIZQUERY_START" value="7" variants="A2XX"/>
- <value name="HLSQ_FLUSH" value="7" variants="A3XX-A4XX"/>
- <value name="VIZQUERY_END" value="8" variants="A2XX"/>
- <value name="SC_WAIT_WC" value="9" variants="A2XX"/>
- <value name="WRITE_PRIMITIVE_COUNTS" value="9" variants="A6XX-"/>
- <value name="START_PRIMITIVE_CTRS" value="11" variants="A6XX-"/>
- <value name="STOP_PRIMITIVE_CTRS" value="12" variants="A6XX-"/>
+ <value name="CACHE_FLUSH_TS" value="0x04"/>
+ <value name="CONTEXT_DONE" value="0x05"/>
+ <value name="CACHE_FLUSH" value="0x06" variants="A2XX-A4XX"/>
+ <value name="VIZQUERY_START" value="0x07" variants="A2XX"/>
+ <value name="HLSQ_FLUSH" value="0x07" variants="A3XX-A4XX"/>
+ <value name="VIZQUERY_END" value="0x08" variants="A2XX"/>
+ <value name="SC_WAIT_WC" value="0x09" variants="A2XX"/>
+ <value name="WRITE_PRIMITIVE_COUNTS" value="0x09" variants="A6XX-"/>
+ <value name="START_PRIMITIVE_CTRS" value="0x0b" variants="A6XX-"/>
+ <value name="STOP_PRIMITIVE_CTRS" value="0x0c" variants="A6XX-"/>
<!-- Not sure that these 4 events don't have the same meaning as on A5XX+ -->
- <value name="RST_PIX_CNT" value="13" variants="A2XX-A4XX"/>
- <value name="RST_VTX_CNT" value="14" variants="A2XX-A4XX"/>
- <value name="TILE_FLUSH" value="15" variants="A2XX-A4XX"/>
- <value name="STAT_EVENT" value="16" variants="A2XX-A4XX"/>
- <value name="CACHE_FLUSH_AND_INV_TS_EVENT" value="20" variants="A2XX-A4XX"/>
+ <value name="RST_PIX_CNT" value="0x0d" variants="A2XX-A4XX"/>
+ <value name="RST_VTX_CNT" value="0x0e" variants="A2XX-A4XX"/>
+ <value name="TILE_FLUSH" value="0x0f" variants="A2XX-A4XX"/>
+ <value name="STAT_EVENT" value="0x10" variants="A2XX-A4XX"/>
+ <value name="CACHE_FLUSH_AND_INV_TS_EVENT" value="0x14" variants="A2XX-A4XX"/>
<doc>
If A6XX_RB_SAMPLE_COUNTER_CNTL.copy is true, writes OQ Z passed
sample counts to RB_SAMPLE_COUNTER_BASE. This writes to main
memory, skipping UCHE.
</doc>
- <value name="ZPASS_DONE" value="21"/>
- <value name="CACHE_FLUSH_AND_INV_EVENT" value="22" variants="A2XX"/>
+ <value name="ZPASS_DONE" value="0x15"/>
+ <value name="CACHE_FLUSH_AND_INV_EVENT" value="0x16" variants="A2XX"/>
<doc>
Writes the GPU timestamp to the address that follows, once RB
access and flushes are complete.
</doc>
- <value name="RB_DONE_TS" value="22" variants="A3XX-"/>
+ <value name="RB_DONE_TS" value="0x16" variants="A3XX-"/>
- <value name="PERFCOUNTER_START" value="23" variants="A2XX-A4XX"/>
- <value name="PERFCOUNTER_STOP" value="24" variants="A2XX-A4XX"/>
- <value name="VS_FETCH_DONE" value="27"/>
- <value name="FACENESS_FLUSH" value="28" variants="A2XX-A4XX"/>
+ <value name="PERFCOUNTER_START" value="0x17" variants="A2XX-A4XX"/>
+ <value name="PERFCOUNTER_STOP" value="0x18" variants="A2XX-A4XX"/>
+ <value name="VS_FETCH_DONE" value="0x1b" variants="A2XX-A5XX"/>
+ <value name="FACENESS_FLUSH" value="0x1c" variants="A2XX-A4XX"/>
<!-- a5xx events -->
- <value name="WT_DONE_TS" value="8" variants="A5XX-"/>
- <value name="START_FRAGMENT_CTRS" value="13" variants="A5XX-"/>
- <value name="STOP_FRAGMENT_CTRS" value="14" variants="A5XX-"/>
- <value name="START_COMPUTE_CTRS" value="15" variants="A5XX-"/>
- <value name="STOP_COMPUTE_CTRS" value="16" variants="A5XX-"/>
- <value name="FLUSH_SO_0" value="17" variants="A5XX-"/>
- <value name="FLUSH_SO_1" value="18" variants="A5XX-"/>
- <value name="FLUSH_SO_2" value="19" variants="A5XX-"/>
- <value name="FLUSH_SO_3" value="20" variants="A5XX-"/>
+ <value name="WT_DONE_TS" value="0x08" variants="A5XX-A6XX"/>
+ <value name="START_FRAGMENT_CTRS" value="0x0d" variants="A5XX-"/>
+ <value name="STOP_FRAGMENT_CTRS" value="0x0e" variants="A5XX-"/>
+ <value name="START_COMPUTE_CTRS" value="0x0f" variants="A5XX-"/>
+ <value name="STOP_COMPUTE_CTRS" value="0x10" variants="A5XX-"/>
+ <value name="FLUSH_SO_0" value="0x11" variants="A5XX-"/>
+ <value name="FLUSH_SO_1" value="0x12" variants="A5XX-"/>
+ <value name="FLUSH_SO_2" value="0x13" variants="A5XX-"/>
+ <value name="FLUSH_SO_3" value="0x14" variants="A5XX-"/>
<doc>
Invalidates depth attachment data from the CCU. We assume this
happens in the last stage.
</doc>
- <value name="PC_CCU_INVALIDATE_DEPTH" value="24" variants="A5XX-"/>
+ <value name="PC_CCU_INVALIDATE_DEPTH" value="0x18" variants="A5XX-A6XX"/>
<doc>
Invalidates color attachment data from the CCU. We assume this
happens in the last stage.
</doc>
- <value name="PC_CCU_INVALIDATE_COLOR" value="25" variants="A5XX-"/>
+ <value name="PC_CCU_INVALIDATE_COLOR" value="0x19" variants="A5XX-A6XX"/>
<doc>
Flushes the small cache used by CP_EVENT_WRITE::BLIT (which,
along with its registers, would be better named RESOLVE).
</doc>
- <value name="PC_CCU_RESOLVE_TS" value="26" variants="A6XX"/>
+ <value name="PC_CCU_RESOLVE_TS" value="0x1a" variants="A6XX"/>
<doc>
Flushes depth attachment data from the CCU. We assume this
happens in the last stage.
</doc>
- <value name="PC_CCU_FLUSH_DEPTH_TS" value="28" variants="A5XX-"/>
+ <value name="PC_CCU_FLUSH_DEPTH_TS" value="0x1c" variants="A5XX-A6XX"/>
<doc>
Flushes color attachment data from the CCU. We assume this
happens in the last stage.
</doc>
- <value name="PC_CCU_FLUSH_COLOR_TS" value="29" variants="A5XX-"/>
+ <value name="PC_CCU_FLUSH_COLOR_TS" value="0x1d" variants="A5XX-A6XX"/>
<doc>
- 2D blit to resolve GMEM to system memory (skipping CCU) at the
- end of a render pass. Compare to CP_BLIT's BLIT_OP_SCALE for
- more general blitting.
+ Triggers a resolve (GMEM to sysmem) or unresolve (sysmem to
+ GMEM) or clear blit, depending on CCU programming.
</doc>
- <value name="BLIT" value="30" variants="A5XX-"/>
+ <value name="CCU_RESOLVE" value="0x1e" variants="A5XX-"/>
<doc>
Flip between the primary and secondary LRZ buffers. This is used
for concurrent binning, so that BV can write to one buffer while
BR reads from the other.
</doc>
- <value name="LRZ_FLIP_BUFFER" value="36" variants="A7XX"/>
+ <value name="LRZ_FLIP_BUFFER" value="0x24" variants="A7XX-"/>
<doc>
Clears based on GRAS_LRZ_CNTL configuration, could clear
@@ -115,44 +114,46 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
CUR_DIR_UNSET = 0x3
Clear of direction means setting the direction to CUR_DIR_UNSET.
</doc>
- <value name="LRZ_CLEAR" value="37" variants="A5XX-"/>
-
- <value name="LRZ_FLUSH" value="38" variants="A5XX-"/>
- <value name="BLIT_OP_FILL_2D" value="39" variants="A5XX-"/>
- <value name="BLIT_OP_COPY_2D" value="40" variants="A5XX-A6XX"/>
- <value name="UNK_40" value="40" variants="A7XX"/>
- <value name="LRZ_Q_CACHE_INVALIDATE" value="41" variants="A7XX"/>
- <value name="BLIT_OP_SCALE_2D" value="42" variants="A5XX-"/>
- <value name="CONTEXT_DONE_2D" value="43" variants="A5XX-"/>
- <value name="UNK_2C" value="44" variants="A5XX-"/>
- <value name="UNK_2D" value="45" variants="A5XX-"/>
+ <value name="LRZ_CLEAR" value="0x25" variants="A5XX-"/>
+
+ <value name="LRZ_FLUSH_INVALIDATE" value="0x26" variants="A5XX-A6XX"/>
+ <value name="LRZ_CACHE_FLUSH" value="0x26" variants="A7XX-"/>
+ <value name="BLIT_OP_FILL_2D" value="0x27" variants="A5XX-A6XX"/>
+ <value name="BLIT_OP_COPY_2D" value="0x28" variants="A5XX-A6XX"/>
+ <value name="LRZ_CACHE_INVALIDATE" value="0x28" variants="A7XX-"/>
+ <value name="LRZ_Q_CACHE_INVALIDATE" value="0x29" variants="A7XX-"/>
+ <value name="BLIT_OP_SCALE_2D" value="0x2a" variants="A5XX-"/>
+ <value name="CONTEXT_DONE_2D" value="0x2b" variants="A5XX-"/>
+ <value name="VSC_BINNING_START" value="0x2c" variants="A5XX-"/>
+ <value name="VSC_BINNING_END" value="0x2d" variants="A5XX-"/>
<!-- a6xx events -->
<doc>
Invalidates UCHE.
</doc>
- <value name="CACHE_INVALIDATE" value="49" variants="A6XX"/>
+ <value name="CACHE_INVALIDATE" value="0x31" variants="A6XX"/>
- <value name="LABEL" value="63" variants="A6XX-"/>
+ <value name="DEBUG_LABEL" value="0x3f" variants="A6XX-"/>
<!-- note, some of these are the same as a6xx, just named differently -->
<doc> Doesn't seem to do anything </doc>
- <value name="DUMMY_EVENT" value="1" variants="A7XX"/>
- <value name="CCU_INVALIDATE_DEPTH" value="24" variants="A7XX"/>
- <value name="CCU_INVALIDATE_COLOR" value="25" variants="A7XX"/>
- <value name="CCU_RESOLVE_CLEAN" value="26" variants="A7XX"/>
- <value name="CCU_FLUSH_DEPTH" value="28" variants="A7XX"/>
- <value name="CCU_FLUSH_COLOR" value="29" variants="A7XX"/>
- <value name="CCU_RESOLVE" value="30" variants="A7XX"/>
- <value name="CCU_END_RESOLVE_GROUP" value="31" variants="A7XX"/>
- <value name="CCU_CLEAN_DEPTH" value="32" variants="A7XX"/>
- <value name="CCU_CLEAN_COLOR" value="33" variants="A7XX"/>
- <value name="CACHE_RESET" value="48" variants="A7XX"/>
- <value name="CACHE_CLEAN" value="49" variants="A7XX"/>
+ <value name="DUMMY_EVENT" value="0x01" variants="A7XX-"/>
+ <value name="CCU_INVALIDATE_DEPTH" value="0x18" variants="A7XX-"/>
+ <value name="CCU_INVALIDATE_COLOR" value="0x19" variants="A7XX-"/>
+ <value name="CCU_RESOLVE_CLEAN" value="0x1a" variants="A7XX-"/>
+ <value name="CCU_FLUSH_DEPTH" value="0x1c" variants="A7XX-"/>
+ <value name="CCU_FLUSH_COLOR" value="0x1d" variants="A7XX-"/>
+ <value name="CCU_END_RESOLVE_GROUP" value="0x1f" variants="A7XX-"/>
+ <value name="CCU_CLEAN_DEPTH" value="0x20" variants="A7XX-"/>
+ <value name="CCU_CLEAN_COLOR" value="0x21" variants="A7XX-"/>
+ <value name="CACHE_RESET" value="0x30" variants="A7XX-"/>
+ <value name="CACHE_CLEAN" value="0x31" variants="A7XX-"/>
<!-- TODO: deal with name conflicts with other gens -->
- <value name="CACHE_FLUSH7" value="50" variants="A7XX"/>
- <value name="CACHE_INVALIDATE7" value="51" variants="A7XX"/>
+ <value name="CACHE_FLUSH7" value="0x32" variants="A7XX-"/>
+ <value name="CACHE_INVALIDATE7" value="0x33" variants="A7XX-"/>
+ <value name="DEPTH_BUFFER_FLIP" value="0x3d" variants="A8XX-"/>
+ <value name="CCH_FAST_CLEAR_CLEAN" value="0x1b" variants="A8XX-"/>
</enum>
<enum name="pc_di_primtype">
@@ -310,11 +311,11 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_EVENT_WRITE" value="0x46" variants="A2XX-A6XX"/>
<value name="CP_EVENT_WRITE7" value="0x46" variants="A7XX-"/>
<doc>generate a VS|PS_done event</doc>
- <value name="CP_EVENT_WRITE_SHD" value="0x58"/>
+ <value name="CP_EVENT_WRITE_SHD" value="0x58" variants="A2XX"/>
<doc>generate a cache flush done event</doc>
- <value name="CP_EVENT_WRITE_CFL" value="0x59"/>
+ <value name="CP_EVENT_WRITE_CFL" value="0x59" variants="A2XX"/>
<doc>generate a z_pass done event</doc>
- <value name="CP_EVENT_WRITE_ZPD" value="0x5b"/>
+ <value name="CP_EVENT_WRITE_ZPD" value="0x5b" variants="A2XX"/>
<doc>
not sure the real name, but this seems to be what is used for
opencl, instead of CP_DRAW_INDX..
@@ -335,9 +336,9 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<doc>load constant into chip and to memory</doc>
<value name="CP_SET_CONSTANT" value="0x2d" variants="A2XX"/>
<doc>load sequencer instruction memory (pointer-based)</doc>
- <value name="CP_IM_LOAD" value="0x27"/>
+ <value name="CP_IM_LOAD" value="0x27" variants="A2XX"/>
<doc>load sequencer instruction memory (code embedded in packet)</doc>
- <value name="CP_IM_LOAD_IMMEDIATE" value="0x2b"/>
+ <value name="CP_IM_LOAD_IMMEDIATE" value="0x2b" variants="A2XX"/>
<doc>load constants from a location in memory</doc>
<value name="CP_LOAD_CONSTANT_CONTEXT" value="0x2e" variants="A2XX"/>
<doc>selective invalidation of state pointers</doc>
@@ -523,7 +524,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<!--
Seems to set the mode flags which control which CP_SET_DRAW_STATE
packets are executed, based on their ENABLE_MASK values
-
+
CP_SET_MODE w/ payload of 0x1 seems to cause CP_SET_DRAW_STATE
packets w/ ENABLE_MASK & 0x6 to execute immediately
-->
@@ -640,8 +641,7 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_BV_BR_COUNT_OPS" value="0x1b" variants="A7XX-"/>
<doc> Clears, adds to local, or adds to global timestamp </doc>
<value name="CP_MODIFY_TIMESTAMP" value="0x1c" variants="A7XX-"/>
- <!-- similar to CP_CONTEXT_REG_BUNCH, but discards first two dwords?? -->
- <value name="CP_CONTEXT_REG_BUNCH2" value="0x5d" variants="A7XX-"/>
+ <value name="CP_NON_CONTEXT_REG_BUNCH" value="0x5d" variants="A7XX-"/>
<doc>
Write to a scratch memory that is read by CP_REG_TEST with
SOURCE_SCRATCH_MEM set. It's not the same scratch as scratch registers.
@@ -663,6 +663,12 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<value name="CP_CCHE_INVALIDATE" value="0x3a" variants="A7XX-"/>
<value name="CP_SCOPE_CNTL" value="0x6c" variants="A7XX-"/>
+
+ <value name="CP_SKIP_IB_MODE" value="0x27" variants="A7XX-"/>
+
+ <value name="CP_MEMORY_MAP_UPDATE" value="0x58" variants="A8XX-"/>
+
+ <value name="CP_BARRIER" value="0x59" variants="A8XX-"/>
</enum>
@@ -918,12 +924,6 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
<stripe varset="chip" variants="A5XX-">
- <reg32 offset="4" name="4">
- <bitfield name="INDX_BASE_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="INDX_BASE_HI" low="0" high="31"/>
- </reg32>
<reg64 offset="4" name="INDX_BASE" type="address"/>
<reg32 offset="6" name="6">
<!-- max # of elements in index buffer -->
@@ -1099,8 +1099,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="BINNING" pos="20" varset="chip" variants="A6XX-" type="boolean"/>
<bitfield name="GMEM" pos="21" varset="chip" variants="A6XX-" type="boolean"/>
<bitfield name="SYSMEM" pos="22" varset="chip" variants="A6XX-" type="boolean"/>
- <bitfield name="GROUP_ID" low="24" high="28" type="uint"/>
+ <!-- high bit is 28 until a750: -->
+ <bitfield name="GROUP_ID" low="24" high="29" type="uint"/>
</reg32>
+ <reg64 offset="1" name="ADDR" type="address"/>
<reg32 offset="1" name="1">
<bitfield name="ADDR_LO" low="0" high="31" type="hex"/>
</reg32>
@@ -1166,26 +1168,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
<stripe varset="a7xx_abs_mask_mode" variants="NO_ABS_MASK">
<!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="1" name="1">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="BIN_DATA_ADDR" type="address"/>
<!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="3" name="3">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="3" name="BIN_SIZE_ADDR" type="address"/>
<!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="5" name="5">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="5" name="BIN_PRIM_STRM" type="address"/>
<!--
a7xx adds a few more addresses to the end of the pkt
-->
@@ -1195,26 +1182,11 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<stripe varset="a7xx_abs_mask_mode" variants="ABS_MASK">
<reg32 offset="1" name="ABS_MASK"/>
<!-- BIN_DATA_ADDR -> VSC_PIPE[p].DATA_ADDRESS -->
- <reg32 offset="2" name="2">
- <bitfield name="BIN_DATA_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="3" name="3">
- <bitfield name="BIN_DATA_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="2" name="BIN_DATA_ADDR" type="address"/>
<!-- BIN_SIZE_ADDRESS -> VSC_SIZE_ADDRESS + (p * 4)-->
- <reg32 offset="4" name="4">
- <bitfield name="BIN_SIZE_ADDRESS_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="5" name="5">
- <bitfield name="BIN_SIZE_ADDRESS_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="4" name="BIN_SIZE_ADDR" type="address"/>
<!-- new on a6xx, where BIN_DATA_ADDR is the DRAW_STRM: -->
- <reg32 offset="6" name="6">
- <bitfield name="BIN_PRIM_STRM_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="7" name="7">
- <bitfield name="BIN_PRIM_STRM_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="6" name="BIN_PRIM_STRM" type="address"/>
<!--
a7xx adds a few more addresses to the end of the pkt
-->
@@ -1300,7 +1272,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
-<domain name="CP_REG_TO_MEM" width="32">
+<domain name="CP_REG_TO_MEM" width="32" prefix="chip">
<reg32 offset="0" name="0">
<bitfield name="REG" low="0" high="17" type="hex"/>
<!-- number of registers/dwords copied is max(CNT, 1). -->
@@ -1308,12 +1280,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="1" name="DEST" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="1" name="DEST" type="address"/>
+ </stripe>
</domain>
<domain name="CP_REG_TO_MEM_OFFSET_REG" width="32">
@@ -1329,12 +1301,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="DEST" type="waddress"/>
<reg32 offset="3" name="3">
<bitfield name="OFFSET0" low="0" high="17" type="hex"/>
<bitfield name="OFFSET0_SCRATCH" pos="19" type="boolean"/>
@@ -1354,18 +1321,8 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="64B" pos="30" type="boolean"/>
<bitfield name="ACCUMULATE" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="DEST" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="DEST_HI" low="0" high="31"/>
- </reg32>
- <reg32 offset="3" name="3">
- <bitfield name="OFFSET_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="4" name="4">
- <bitfield name="OFFSET_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="DEST" type="waddress"/>
+ <reg64 offset="3" name="OFFSET" type="waddress"/>
</domain>
<domain name="CP_MEM_TO_REG" width="32">
@@ -1378,12 +1335,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- does the same thing as CP_MEM_TO_MEM::UNK31 -->
<bitfield name="UNK31" pos="31" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="SRC" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2" varset="chip" variants="A5XX-">
- <bitfield name="SRC_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="1" name="SRC" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="1" name="SRC" type="address"/>
+ </stripe>
</domain>
<domain name="CP_MEM_TO_MEM" width="32">
@@ -1403,6 +1360,10 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- some other kind of wait -->
<bitfield name="UNK31" pos="31" type="boolean"/>
</reg32>
+ <reg64 offset="1" name="DST" type="waddress"/>
+ <reg64 offset="3" name="SRC_A" type="address"/>
+ <reg64 offset="5" name="SRC_B" type="address"/>
+ <reg64 offset="7" name="SRC_C" type="address"/>
<!--
followed by sequence of addresses.. the first is the
destination and the rest are N src addresses which are
@@ -1461,12 +1422,12 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</domain>
<domain name="CP_MEM_WRITE" width="32">
- <reg32 offset="0" name="0">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <stripe varset="chip" variants="A2XX-A4XX">
+ <reg32 offset="0" name="ADDR" type="address"/>
+ </stripe>
+ <stripe varset="chip" variants="A5XX-">
+ <reg64 offset="0" name="ADDR" type="address"/>
+ </stripe>
<!-- followed by the DWORDs to write -->
</domain>
@@ -1518,24 +1479,14 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="POLL" low="4" high="5" type="poll_memory_type"/>
<bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
<reg32 offset="4" name="4">
<bitfield name="MASK" low="0" high="31"/>
</reg32>
- <reg32 offset="5" name="5">
- <bitfield name="WRITE_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="6" name="6">
- <bitfield name="WRITE_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="5" name="WRITE_ADDR" type="waddress"/>
<reg32 offset="7" name="7">
<bitfield name="WRITE_DATA" low="0" high="31"/>
</reg32>
@@ -1550,12 +1501,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<!-- Reserved for flags, presumably? Unused in FW -->
<bitfield name="RESERVED" low="0" high="31" type="hex"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
@@ -1573,12 +1519,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<bitfield name="POLL" low="4" high="5" type="poll_memory_type"/>
<bitfield name="WRITE_MEMORY" pos="8" type="boolean"/>
</reg32>
- <reg32 offset="1" name="1">
- <bitfield name="POLL_ADDR_LO" low="0" high="31" type="hex"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="POLL_ADDR_HI" low="0" high="31" type="hex"/>
- </reg32>
+ <reg64 offset="1" name="POLL_ADDR" type="address"/>
<reg32 offset="3" name="3">
<bitfield name="REF" low="0" high="31"/>
</reg32>
@@ -1712,12 +1653,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
TODO what is gpuaddr for, seems to be all 0's.. maybe needed for
context switch?
-->
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_0_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_0_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="ADDR" type="waddress"/>
<reg32 offset="3" name="3">
<!-- ??? -->
</reg32>
@@ -1832,9 +1768,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<reg32 offset="0" name="0">
</reg32>
<stripe varset="chip" variants="A4XX">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR" low="0" high="31"/>
- </reg32>
+ <reg32 offset="1" name="ADDR" type="address"/>
<reg32 offset="2" name="2">
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
@@ -1843,12 +1777,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</stripe>
<stripe varset="chip" variants="A5XX-">
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="2" name="2">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="1" name="ADDR" type="address"/>
<reg32 offset="3" name="3">
<!-- localsize is value minus one: -->
<bitfield name="LOCALSIZEX" low="2" high="11" type="uint"/>
@@ -1878,49 +1807,73 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
<value value="6" name="RM6_BIN_RESOLVE"/>
<value value="7" name="RM6_BIN_RENDER_END"/>
<value value="8" name="RM6_COMPUTE"/>
- <value value="0xc" name="RM6_BLIT2DSCALE"/> <!-- no-op (at least on current sqe fw) -->
+ <value value="12" name="RM6_BLIT2DSCALE"/> <!-- no-op (at least on current sqe fw) -->
<!--
These values come from a6xx_set_marker() in the
downstream kernel, and they can only be set by the kernel
-->
- <value value="0xd" name="RM6_IB1LIST_START"/>
- <value value="0xe" name="RM6_IB1LIST_END"/>
+ <value value="13" name="RM6_IB1LIST_START"/>
+ <value value="14" name="RM6_IB1LIST_END"/>
+ <value value="15" name="RM7_BIN_VISIBILITY_END"/>
+
+ <!-- new in a8xx: -->
+ <value value="32" name="RM8_DEPTH_PASS_START"/>
+ <value value="33" name="RM8_DEPTH_PASS_END"/>
+ <value value="34" name="RM8_SET_RENDER_TARGET"/>
+ <value value="35" name="RM8_PGMEM_ON"/>
+ <value value="36" name="RM8_PGMEM_OFF"/>
</enum>
- <reg32 offset="0" name="0">
- <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) -->
- <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/>
-
- <bitfield name="MODE" low="0" high="3" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
- <!-- used by preemption to determine if GMEM needs to be saved or not -->
- <bitfield name="USES_GMEM" pos="4" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
-
- <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/>
-
- <!--
- CP_SET_MARKER is used with these bits to create a
- critical section around a workaround for ray tracing.
- The workaround happens after BVH building, and appears
- to invalidate the RTU's BVH node cache. It makes sure
- that only one of BR/BV/LPAC is executing the
- workaround at a time, and no draws using RT on BV/LPAC
- are executing while the workaround is executed on BR (or
- vice versa, that no draws on BV/BR using RT are executed
- while the workaround executes on LPAC), by
- hooking subsequent CP_EVENT_WRITE/CP_DRAW_*/CP_EXEC_CS.
- The blob usage is:
-
- CP_SET_MARKER(RT_WA_START)
- ... workaround here ...
- CP_SET_MARKER(RT_WA_END)
- ...
- CP_SET_MARKER(SHADER_USES_RT)
- CP_DRAW_INDX(...) or CP_EXEC_CS(...)
- -->
- <bitfield name="SHADER_USES_RT" pos="9" type="boolean" variants="A7XX-"/>
- <bitfield name="RT_WA_START" pos="10" type="boolean" variants="A7XX-"/>
- <bitfield name="RT_WA_END" pos="11" type="boolean" variants="A7XX-"/>
- </reg32>
+ <stripe varset="chip" variants="A6XX-A7XX">
+ <reg32 offset="0" name="0">
+ <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) -->
+ <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/>
+
+
+ <bitfield name="MODE" low="0" high="3" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+ <!-- used by preemption to determine if GMEM needs to be saved or not -->
+ <bitfield name="USES_GMEM" pos="4" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+
+
+ <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/>
+
+
+ <!--
+ CP_SET_MARKER is used with these bits to create a
+ critical section around a workaround for ray tracing.
+ The workaround happens after BVH building, and appears
+ to invalidate the RTU's BVH node cache. It makes sure
+ that only one of BR/BV/LPAC is executing the
+ workaround at a time, and no draws using RT on BV/LPAC
+ are executing while the workaround is executed on BR (or
+ vice versa, that no draws on BV/BR using RT are executed
+ while the workaround executes on LPAC), by
+ hooking subsequent CP_EVENT_WRITE/CP_DRAW_*/CP_EXEC_CS.
+ The blob usage is:
+
+
+ CP_SET_MARKER(RT_WA_START)
+ ... workaround here ...
+ CP_SET_MARKER(RT_WA_END)
+ ...
+ CP_SET_MARKER(SHADER_USES_RT)
+ CP_DRAW_INDX(...) or CP_EXEC_CS(...)
+ -->
+ <bitfield name="SHADER_USES_RT" pos="9" type="boolean" variants="A7XX-"/>
+ <bitfield name="RT_WA_START" pos="10" type="boolean" variants="A7XX-"/>
+ <bitfield name="RT_WA_END" pos="11" type="boolean" variants="A7XX-"/>
+ </reg32>
+ </stripe>
+ <stripe varset="chip" variants="A8XX-">
+ <reg32 offset="0" name="0">
+ <!-- if b8 is set, the low bits are interpreted differently (and b4 ignored) -->
+ <bitfield name="MARKER_MODE" pos="8" type="set_marker_mode" addvariant="yes"/>
+ <bitfield name="MODE" low="0" high="6" type="a6xx_marker" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+ <bitfield name="USES_GMEM" pos="7" type="boolean" varset="set_marker_mode" variants="SET_RENDER_MODE"/>
+ <bitfield name="IFPC_MODE" pos="0" type="a6xx_ifpc_mode" varset="set_marker_mode" variants="SET_IFPC_MODE"/>
+ <!-- idk if the RT w/a fields apply to a8xx as well -->
+ </reg32>
+ </stripe>
</domain>
<domain name="CP_SET_PSEUDO_REG" width="32" varset="chip" prefix="chip" variants="A6XX-">
@@ -2144,6 +2097,14 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
payload *and* skipsaverestore is set. This is
expected to restore static register values not
saved when skipsaverestore is set.
+
+ On BV, a skipsaverestore preemption is triggered
+ and this preamble type is executed whenever a
+ CP_THREAD_CONTROL that synchronizes threads
+ happens. This can be explicitly via
+ SYNC_THREADS, or implicitly when the value of
+ CONCURRENT_BIN_DISABLE changes from the previous
+ thread control.
</doc>
</value>
<value name="POSTAMBLE_AMBLE_TYPE" value="2">
@@ -2161,12 +2122,7 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</doc>
</value>
</enum>
- <reg32 offset="0" name="0">
- <bitfield name="ADDR_LO" low="0" high="31"/>
- </reg32>
- <reg32 offset="1" name="1">
- <bitfield name="ADDR_HI" low="0" high="31"/>
- </reg32>
+ <reg64 offset="0" name="ADDR" type="address"/>
<reg32 offset="2" name="2">
<bitfield name="DWORDS" low="0" high="19" type="uint"/>
<bitfield name="TYPE" low="20" high="21" type="amble_type"/>
@@ -2391,5 +2347,99 @@ opcode: CP_LOAD_STATE4 (30) (4 dwords)
</reg32>
</domain>
+<domain name="CP_RESOURCE_LIST" width="32">
+ <doc>
+ A7xx introduces the "resource table" which is managed by
+ CP_RESOURCE_LIST. It is used to synchronize BR and BV access
+ to resources such as LRZ buffers.
+
+ The resource table consists of resources that are in-use by BR.
+ Each "resource" has a base address, which is
+ usually a pointer but is treated by the HW as an opaque handle,
+ a read/write bit, and a timestamp when it was last used.
+ Resources are removed from the table upon event completion when
+ a special CP_EVENT_WRITE::CLEAR_RENDER_RESOURCE bit is set, which
+ will remove all resources with a timestamp up to the current
+ timestamp.
+
+ CP_RESOURCE_LIST first specifies a list of BV resources. For
+ each BV resource, the HW will check if there is a corresponding
+ BR resource in the table, and if at least one of the BV and BR
+ resources is marked WRITE then it will stall until the BR
+ resource is removed.
+
+ It then specifies a list of BR resources. These will be added to
+ the resource table, unless there is an overflow in which case
+ the designated overflow register will have bit 0 set. Overflow
+ should cause the next binning pass to stall until BR is done,
+ effectively disabling concurrent binning.
+
+ CP_RESOURCE_LIST must be executed by BV. BR resources are added
+ by BV and removed by BR.
+
+ There is a separate table for "LRZ resources." These behave a
+ bit differently: specifying an LRZ resource via BV_RES_LRZ
+ stalls on any matching resource existing and then adds it to the
+ table, making it both a BV and BR resource in one. There is a
+ separate CLEAR_LRZ_RESOURCE bit for removing resources from the
+ LRZ table, and it only removes one resource given by a base
+ address passed to CP_EVENT_WRITE. Therefore timestamps are
+ unnecessary.
+ </doc>
+ <reg32 offset="0" name="BV_COUNT" type="uint"/>
+ <doc>
+ What follows is a list of CP_BV_RESOURCE and then CP_RESOURCE_LIST_BR.
+ </doc>
+</domain>
+
+<domain name="CP_BV_RESOURCE" width="32">
+ <doc>
+ BV resources don't go in the table. Instead CP waits until any
+ corresponding BR resources with the same base pointer are
+ finished before the packet completes.
+ </doc>
+ <enum name="cp_bv_resource_encoding">
+ <value value="0" name="BV_RES_DIRECT"/>
+ <doc>
+ INDIRECT resources are encoded as a 32b offset + 3b
+ bindless base selector. The offset is added to the given
+ BINDLESS_BASE pseudoregister and then the 64b value
+ fetched there is used as the pointer.
+ </doc>
+ <value value="1" name="BV_RES_INDIRECT_READ"/>
+ <value value="2" name="BV_RES_LRZ"/>
+ <value value="3" name="BV_RES_INDIRECT_WRITE"/>
+ </enum>
+ <reg64 offset="0" name="0">
+ <bitfield name="BASE_ADDR" low="1" high="61" shr="1" type="address"/>
+ <bitfield name="WRITE" pos="0" type="boolean"/>
+ <bitfield name="ENCODING" low="62" high="63" type="cp_bv_resource_encoding"/>
+ </reg64>
+</domain>
+
+<domain name="CP_RESOURCE_LIST_BR" width="32">
+ <reg32 offset="0" name="0">
+ <bitfield name="BR_COUNT" low="0" high="23" type="uint"/>
+ <bitfield name="OVERFLOW_ONCHIP_ADDR" low="24" high="26"/>
+ <bitfield name="OVERFLOW" pos="31" type="boolean"/>
+ </reg32>
+ <doc>
+ What follows is a list of CP_BR_RESOURCE.
+ </doc>
+</domain>
+
+<domain name="CP_BR_RESOURCE" width="32">
+ <enum name="cp_br_resource_encoding">
+ <value value="0" name="BR_RES_DIRECT"/>
+ <value value="2" name="BR_RES_INDIRECT_READ"/>
+ <value value="3" name="BR_RES_INDIRECT_WRITE"/> <!-- set WRITE bit -->
+ </enum>
+ <reg64 offset="0" name="0">
+ <bitfield name="BASE_ADDR" low="1" high="61" shr="1" type="address"/>
+ <bitfield name="WRITE" pos="0" type="boolean"/>
+ <bitfield name="ENCODING" low="62" high="63" type="cp_br_resource_encoding"/>
+ </reg64>
+</domain>
+
</database>
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index 4e5ac0f25dea..f41516dd0567 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -22,7 +22,16 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
<reg32 offset="0x00020" name="VREG_CTRL_0"/>
- <reg32 offset="0x00024" name="CTRL_0"/>
+ <reg32 offset="0x00024" name="CTRL_0">
+ <bitfield name="CLKSL_SHUTDOWNB" pos="7" type="boolean"/>
+ <bitfield name="DIGTOP_PWRDN_B" pos="6" type="boolean"/>
+ <bitfield name="PLL_SHUTDOWNB" pos="5" type="boolean"/>
+ <bitfield name="DLN3_SHUTDOWNB" pos="4" type="boolean"/>
+ <bitfield name="DLN2_SHUTDOWNB" pos="3" type="boolean"/>
+ <bitfield name="CLK_SHUTDOWNB" pos="2" type="boolean"/>
+ <bitfield name="DLN1_SHUTDOWNB" pos="1" type="boolean"/>
+ <bitfield name="DLN0_SHUTDOWNB" pos="0" type="boolean"/>
+ </reg32>
<reg32 offset="0x00028" name="CTRL_1"/>
<reg32 offset="0x0002c" name="CTRL_2"/>
<reg32 offset="0x00030" name="CTRL_3"/>
diff --git a/drivers/gpu/drm/msm/registers/gen_header.py b/drivers/gpu/drm/msm/registers/gen_header.py
index a409404627c7..2acad951f1e2 100644
--- a/drivers/gpu/drm/msm/registers/gen_header.py
+++ b/drivers/gpu/drm/msm/registers/gen_header.py
@@ -11,7 +11,6 @@ import collections
import argparse
import time
import datetime
-import re
class Error(Exception):
def __init__(self, message):
@@ -31,7 +30,7 @@ class Enum(object):
def names(self):
return [n for (n, value) in self.values]
- def dump(self):
+ def dump(self, is_deprecated):
use_hex = False
for (name, value) in self.values:
if value > 0x1000:
@@ -45,7 +44,7 @@ class Enum(object):
print("\t%s = %d," % (name, value))
print("};\n")
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
pass
class Field(object):
@@ -70,11 +69,11 @@ class Field(object):
raise parser.error("booleans should be 1 bit fields")
elif self.type == "float" and not (high - low == 31 or high - low == 15):
raise parser.error("floats should be 16 or 32 bit fields")
- elif not self.type in builtin_types and not self.type in parser.enums:
+ elif self.type not in builtin_types and self.type not in parser.enums:
raise parser.error("unknown type '%s'" % self.type)
def ctype(self, var_name):
- if self.type == None:
+ if self.type is None:
type = "uint32_t"
val = var_name
elif self.type == "boolean":
@@ -124,7 +123,7 @@ def field_name(reg, f):
name = f.name.lower()
else:
# We hit this path when a reg is defined with no bitset fields, ie.
- # <reg32 offset="0x88db" name="RB_BLIT_DST_ARRAY_PITCH" low="0" high="28" shr="6" type="uint"/>
+ # <reg32 offset="0x88db" name="RB_RESOLVE_SYSTEM_BUFFER_ARRAY_PITCH" low="0" high="28" shr="6" type="uint"/>
name = reg.name.lower()
if (name in [ "double", "float", "int" ]) or not (name[0].isalpha()):
@@ -146,10 +145,23 @@ def indices_strides(indices):
"%s(i%d)" % (offset, idx)
for (idx, (ctype, stride, offset)) in enumerate(indices)])
+def is_number(str):
+ try:
+ int(str)
+ return True
+ except ValueError:
+ return False
+
+def sanitize_variant(variant):
+ if variant and "-" in variant:
+ return variant[:variant.index("-")]
+ return variant
+
class Bitset(object):
def __init__(self, name, template):
self.name = name
self.inline = False
+ self.reg = None
if template:
self.fields = template.fields[:]
else:
@@ -175,18 +187,15 @@ class Bitset(object):
print("#endif\n")
print(" return (struct fd_reg_pair) {")
- if reg.array:
- print(" .reg = REG_%s(__i)," % reg.full_name)
- else:
- print(" .reg = REG_%s," % reg.full_name)
-
+ print(" .reg = (uint32_t)%s," % reg.reg_offset())
print(" .value =")
+ cast = "(uint64_t)" if reg.bit_size == 64 else ""
for f in self.fields:
if f.type in [ "address", "waddress" ]:
continue
else:
type, val = f.ctype("fields.%s" % field_name(reg, f))
- print(" (%-40s << %2d) |" % (val, f.low))
+ print(" (%s%-40s << %2d) |" % (cast, val, f.low))
value_name = "dword"
if reg.bit_size == 64:
value_name = "qword"
@@ -204,7 +213,7 @@ class Bitset(object):
print(" };")
- def dump_pack_struct(self, reg=None):
+ def dump_pack_struct(self, is_deprecated, reg=None):
if not reg:
return
@@ -229,12 +238,15 @@ class Bitset(object):
tab_to(" uint32_t", "dword;")
print("};\n")
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED"
if reg.array:
- print("static inline struct fd_reg_pair\npack_%s(uint32_t __i, struct %s fields)\n{" %
- (prefix, prefix))
+ print("static inline%s struct fd_reg_pair\npack_%s(uint32_t __i, struct %s fields)\n{" %
+ (depcrstr, prefix, prefix))
else:
- print("static inline struct fd_reg_pair\npack_%s(struct %s fields)\n{" %
- (prefix, prefix))
+ print("static inline%s struct fd_reg_pair\npack_%s(struct %s fields)\n{" %
+ (depcrstr, prefix, prefix))
self.dump_regpair_builder(reg)
@@ -253,28 +265,37 @@ class Bitset(object):
(prefix, prefix, prefix, skip))
- def dump(self, prefix=None):
- if prefix == None:
+ def dump(self, is_deprecated, prefix=None, reg=None):
+ if prefix is None:
prefix = self.name
+ reg64 = reg and self.reg and self.reg.bit_size == 64
+ if reg64:
+ print("static inline uint32_t %s_LO(uint32_t val)\n{" % prefix)
+ print("\treturn val;\n}")
+ print("static inline uint32_t %s_HI(uint32_t val)\n{" % prefix)
+ print("\treturn val;\n}")
for f in self.fields:
if f.name:
name = prefix + "_" + f.name
else:
name = prefix
- if not f.name and f.low == 0 and f.shr == 0 and not f.type in ["float", "fixed", "ufixed"]:
+ if not f.name and f.low == 0 and f.shr == 0 and f.type not in ["float", "fixed", "ufixed"]:
pass
- elif f.type == "boolean" or (f.type == None and f.low == f.high):
+ elif f.type == "boolean" or (f.type is None and f.low == f.high):
tab_to("#define %s" % name, "0x%08x" % (1 << f.low))
else:
- tab_to("#define %s__MASK" % name, "0x%08x" % mask(f.low, f.high))
+ typespec = "ull" if reg64 else "u"
+ tab_to("#define %s__MASK" % name, "0x%08x%s" % (mask(f.low, f.high), typespec))
tab_to("#define %s__SHIFT" % name, "%d" % f.low)
type, val = f.ctype("val")
+ ret_type = "uint64_t" if reg64 else "uint32_t"
+ cast = "(uint64_t)" if reg64 else ""
- print("static inline uint32_t %s(%s val)\n{" % (name, type))
+ print("static inline %s %s(%s val)\n{" % (ret_type, name, type))
if f.shr > 0:
print("\tassert(!(val & 0x%x));" % mask(0, f.shr - 1))
- print("\treturn ((%s) << %s__SHIFT) & %s__MASK;\n}" % (val, name, name))
+ print("\treturn (%s(%s) << %s__SHIFT) & %s__MASK;\n}" % (cast, val, name, name))
print()
class Array(object):
@@ -286,6 +307,7 @@ class Array(object):
self.domain = domain
self.variant = variant
self.parent = parent
+ self.children = []
if self.parent:
self.name = self.parent.name + "_" + self.local_name
else:
@@ -337,12 +359,15 @@ class Array(object):
offset += self.parent.total_offset()
return offset
- def dump(self):
+ def dump(self, is_deprecated):
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED"
proto = indices_varlist(self.indices())
strides = indices_strides(self.indices())
array_offset = self.total_offset()
if self.fixed_offsets:
- print("static inline uint32_t __offset_%s(%s idx)" % (self.local_name, self.index_ctype()))
+ print("static inline%s uint32_t __offset_%s(%s idx)" % (depcrstr, self.local_name, self.index_ctype()))
print("{\n\tswitch (idx) {")
if self.index_type:
for val, offset in zip(self.index_type.names(), self.offsets):
@@ -357,7 +382,7 @@ class Array(object):
else:
tab_to("#define REG_%s_%s(%s)" % (self.domain, self.name, proto), "(0x%08x + %s )\n" % (array_offset, strides))
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
pass
def dump_regpair_builder(self):
@@ -373,6 +398,7 @@ class Reg(object):
self.bit_size = bit_size
if array:
self.name = array.name + "_" + self.name
+ array.children.append(self)
self.full_name = self.domain + "_" + self.name
if "stride" in attrs:
self.stride = int(attrs["stride"], 0)
@@ -397,25 +423,34 @@ class Reg(object):
else:
return self.offset
- def dump(self):
+ def reg_offset(self):
+ if self.array:
+ offset = self.array.offset + self.offset
+ return "(0x%08x + 0x%x*__i)" % (offset, self.array.stride)
+ return "0x%08x" % self.offset
+
+ def dump(self, is_deprecated):
+ depcrstr = ""
+ if is_deprecated:
+ depcrstr = " FD_DEPRECATED "
proto = indices_prototype(self.indices())
strides = indices_strides(self.indices())
offset = self.total_offset()
if proto == '':
tab_to("#define REG_%s" % self.full_name, "0x%08x" % offset)
else:
- print("static inline uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (self.full_name, proto, offset, strides))
+ print("static inline%s uint32_t REG_%s(%s) { return 0x%08x + %s; }" % (depcrstr, self.full_name, proto, offset, strides))
if self.bitset.inline:
- self.bitset.dump(self.full_name)
+ self.bitset.dump(is_deprecated, self.full_name, self)
+ print("")
- def dump_pack_struct(self):
+ def dump_pack_struct(self, is_deprecated):
if self.bitset.inline:
- self.bitset.dump_pack_struct(self)
+ self.bitset.dump_pack_struct(is_deprecated, self)
def dump_regpair_builder(self):
- if self.bitset.inline:
- self.bitset.dump_regpair_builder(self)
+ self.bitset.dump_regpair_builder(self)
def dump_py(self):
print("\tREG_%s = 0x%08x" % (self.full_name, self.offset))
@@ -444,9 +479,6 @@ class Parser(object):
self.variants = set()
self.file = []
self.xml_files = []
- self.copyright_year = None
- self.authors = []
- self.license = None
def error(self, message):
parser, filename = self.stack[-1]
@@ -454,7 +486,7 @@ class Parser(object):
def prefix(self, variant=None):
if self.current_prefix_type == "variant" and variant:
- return variant
+ return sanitize_variant(variant)
elif self.current_stripe:
return self.current_stripe + "_" + self.current_domain
elif self.current_prefix:
@@ -500,15 +532,22 @@ class Parser(object):
return varset
def parse_variants(self, attrs):
- if not "variants" in attrs:
+ if "variants" not in attrs:
return None
- variant = attrs["variants"].split(",")[0]
- if "-" in variant:
- variant = variant[:variant.index("-")]
+ variant = attrs["variants"].split(",")[0]
varset = self.parse_varset(attrs)
- assert varset.has_name(variant)
+ if "-" in variant:
+ # if we have a range, validate that both the start and end
+ # of the range are valid enums:
+ start = variant[:variant.index("-")]
+ end = variant[variant.index("-") + 1:]
+ assert varset.has_name(start)
+ if end != "":
+ assert varset.has_name(end)
+ else:
+ assert varset.has_name(variant)
return variant
@@ -572,9 +611,6 @@ class Parser(object):
error_str = str(xmlschema.error_log.filter_from_errors()[0])
raise self.error("Schema validation failed for: " + filename + "\n" + error_str)
except ImportError as e:
- if self.validate:
- raise e
-
print("lxml not found, skipping validation", file=sys.stderr)
def do_parse(self, filename):
@@ -620,6 +656,7 @@ class Parser(object):
self.current_reg = Reg(attrs, self.prefix(variant), self.current_array, bit_size)
self.current_reg.bitset = self.current_bitset
+ self.current_bitset.reg = self.current_reg
if len(self.stack) == 1:
self.file.append(self.current_reg)
@@ -643,7 +680,7 @@ class Parser(object):
elif name == "domain":
self.current_domain = attrs["name"]
if "prefix" in attrs:
- self.current_prefix = self.parse_variants(attrs)
+ self.current_prefix = sanitize_variant(self.parse_variants(attrs))
self.current_prefix_type = attrs["prefix"]
else:
self.current_prefix = None
@@ -651,7 +688,7 @@ class Parser(object):
if "varset" in attrs:
self.current_varset = self.enums[attrs["varset"]]
elif name == "stripe":
- self.current_stripe = self.parse_variants(attrs)
+ self.current_stripe = sanitize_variant(self.parse_variants(attrs))
elif name == "enum":
self.current_enum_value = 0
self.current_enum = Enum(attrs["name"])
@@ -686,10 +723,6 @@ class Parser(object):
self.parse_field(attrs["name"], attrs)
elif name == "database":
self.do_validate(attrs["xsi:schemaLocation"])
- elif name == "copyright":
- self.copyright_year = attrs["year"]
- elif name == "author":
- self.authors.append(attrs["name"] + " <" + attrs["email"] + "> " + attrs["name"])
def end_element(self, name):
if name == "domain":
@@ -703,11 +736,16 @@ class Parser(object):
elif name == "reg32":
self.current_reg = None
elif name == "array":
+ # if the array has no Reg children, push an implicit reg32:
+ if len(self.current_array.children) == 0:
+ attrs = {
+ "name": "REG",
+ "offset": "0",
+ }
+ self.parse_reg(attrs, 32)
self.current_array = self.current_array.parent
elif name == "enum":
self.current_enum = None
- elif name == "license":
- self.license = self.cdata
def character_data(self, data):
self.cdata += data
@@ -720,10 +758,10 @@ class Parser(object):
if variants:
for variant, vreg in variants.items():
if reg == vreg:
- d[(usage, variant)].append(reg)
+ d[(usage, sanitize_variant(variant))].append(reg)
else:
for variant in self.variants:
- d[(usage, variant)].append(reg)
+ d[(usage, sanitize_variant(variant))].append(reg)
print("#ifdef __cplusplus")
@@ -753,6 +791,9 @@ class Parser(object):
print("#endif")
+ def has_variants(self, reg):
+ return reg.name in self.variant_regs and not is_number(reg.name) and not is_number(reg.name[1:])
+
def dump(self):
enums = []
bitsets = []
@@ -766,7 +807,7 @@ class Parser(object):
regs.append(e)
for e in enums + bitsets + regs:
- e.dump()
+ e.dump(self.has_variants(e))
self.dump_reg_usages()
@@ -782,8 +823,7 @@ class Parser(object):
def dump_reg_variants(self, regname, variants):
- # Don't bother for things that only have a single variant:
- if len(variants) == 1:
+ if is_number(regname) or is_number(regname[1:]):
return
print("#ifdef __cplusplus")
print("struct __%s {" % regname)
@@ -834,11 +874,20 @@ class Parser(object):
xtravar = "__i, "
print("__%s(%sstruct __%s fields) {" % (regname, xtra, regname))
for variant in variants.keys():
- print(" if (%s == %s) {" % (varenum.upper(), variant))
+ if "-" in variant:
+ start = variant[:variant.index("-")]
+ end = variant[variant.index("-") + 1:]
+ if end != "":
+ print(" if ((%s >= %s) && (%s <= %s)) {" % (varenum.upper(), start, varenum.upper(), end))
+ else:
+ print(" if (%s >= %s) {" % (varenum.upper(), start))
+ else:
+ print(" if (%s == %s) {" % (varenum.upper(), variant))
reg = variants[variant]
reg.dump_regpair_builder()
print(" } else")
print(" assert(!\"invalid variant\");")
+ print(" return (struct fd_reg_pair){};")
print("}")
if bit_size == 64:
@@ -851,7 +900,7 @@ class Parser(object):
def dump_structs(self):
for e in self.file:
- e.dump_pack_struct()
+ e.dump_pack_struct(self.has_variants(e))
for regname in self.variant_regs:
self.dump_reg_variants(regname, self.variant_regs[regname])
@@ -868,33 +917,7 @@ def dump_c(args, guard, func):
print("#ifndef %s\n#define %s\n" % (guard, guard))
- print("""/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng gen_header.py tool in this git repository:
-http://gitlab.freedesktop.org/mesa/mesa/
-git clone https://gitlab.freedesktop.org/mesa/mesa.git
-
-The rules-ng-ng source files this header was generated from are:
-""")
- maxlen = 0
- for filepath in p.xml_files:
- new_filepath = re.sub("^.+drivers","drivers",filepath)
- maxlen = max(maxlen, len(new_filepath))
- for filepath in p.xml_files:
- pad = " " * (maxlen - len(new_filepath))
- filesize = str(os.path.getsize(filepath))
- filesize = " " * (7 - len(filesize)) + filesize
- filetime = time.ctime(os.path.getmtime(filepath))
- print("- " + new_filepath + pad + " (" + filesize + " bytes, from <stripped>)")
- if p.copyright_year:
- current_year = str(datetime.date.today().year)
- print()
- print("Copyright (C) %s-%s by the following authors:" % (p.copyright_year, current_year))
- for author in p.authors:
- print("- " + author)
- if p.license:
- print(p.license)
- print("*/")
+ print("/* Autogenerated file, DO NOT EDIT manually! */")
print()
print("#ifdef __KERNEL__")
@@ -912,9 +935,20 @@ The rules-ng-ng source files this header was generated from are:
print("#endif")
print()
+ print("#ifndef FD_NO_DEPRECATED_PACK")
+ print("#define FD_DEPRECATED __attribute__((deprecated))")
+ print("#else")
+ print("#define FD_DEPRECATED")
+ print("#endif")
+ print()
+
func(p)
- print("\n#endif /* %s */" % guard)
+ print()
+ print("#undef FD_DEPRECATED")
+ print()
+
+ print("#endif /* %s */" % guard)
def dump_c_defines(args):
@@ -931,7 +965,7 @@ def dump_py_defines(args):
p = Parser()
try:
- p.parse(args.rnn, args.xml)
+ p.parse(args.rnn, args.xml, args.validate)
except Error as e:
print(e, file=sys.stderr)
exit(1)
diff --git a/drivers/gpu/drm/mxsfb/lcdif_kms.c b/drivers/gpu/drm/mxsfb/lcdif_kms.c
index dbd42cc1da87..72eb0de46b54 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_kms.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_kms.c
@@ -26,6 +26,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "lcdif_drv.h"
@@ -433,7 +434,6 @@ static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge_state *bridge_state;
- struct drm_bridge *bridge;
u32 bus_format, bus_flags;
bool format_set = false, flags_set = false;
int ret, i;
@@ -453,7 +453,8 @@ static int lcdif_crtc_atomic_check(struct drm_crtc *crtc,
encoder = connector_state->best_encoder;
- bridge = drm_bridge_chain_get_first_bridge(encoder);
+ struct drm_bridge *bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_first_bridge(encoder);
if (!bridge)
continue;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 7ed2516b6de0..8cac0a275b7d 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -26,6 +26,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "mxsfb_drv.h"
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index d1587639ebb0..3b5757aed9c8 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -28,6 +28,7 @@ config DRM_NOUVEAU
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
select SND_HDA_COMPONENT if SND_HDA_CORE
+ select PM_DEVFREQ if ARCH_TEGRA
help
Choose this option for open-source NVIDIA support.
@@ -102,14 +103,6 @@ config DRM_NOUVEAU_SVM
Say Y here if you want to enable experimental support for
Shared Virtual Memory (SVM).
-config DRM_NOUVEAU_GSP_DEFAULT
- bool "Use GSP firmware for Turing/Ampere (needs firmware installed)"
- depends on DRM_NOUVEAU
- default n
- help
- Say Y here if you want to use the GSP codepaths by default on
- Turing and Ampere GPUs.
-
config DRM_NOUVEAU_CH7006
tristate "Chrontel ch7006 TV encoder"
depends on DRM_NOUVEAU
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index e97e39abf3a2..12b1dba8e05d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -2867,7 +2867,9 @@ nv50_display_create(struct drm_device *dev)
}
/* Assign the correct format modifiers */
- if (disp->disp->object.oclass >= TU102_DISP)
+ if (disp->disp->object.oclass >= GB202_DISP)
+ nouveau_display(dev)->format_modifiers = wndwca7e_modifiers;
+ else if (disp->disp->object.oclass >= TU102_DISP)
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
else
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 15f9242b72ac..5d998f0319dc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -104,4 +104,5 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
extern const u64 disp50xx_modifiers[];
extern const u64 disp90xx_modifiers[];
extern const u64 wndwc57e_modifiers[];
+extern const u64 wndwca7e_modifiers[];
#endif
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index e2c55f4b9c5a..ef9e410babbf 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -786,13 +786,14 @@ nv50_wndw_destroy(struct drm_plane *plane)
}
/* This function assumes the format has already been validated against the plane
- * and the modifier was validated against the device-wides modifier list at FB
+ * and the modifier was validated against the device-wide modifier list at FB
* creation time.
*/
static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
{
struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ const struct drm_format_info *info = drm_format_info(format);
uint8_t i;
/* All chipsets can display all formats in linear layout */
@@ -800,13 +801,32 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
return true;
if (drm->client.device.info.chipset < 0xc0) {
- const struct drm_format_info *info = drm_format_info(format);
const uint8_t kind = (modifier >> 12) & 0xff;
if (!format) return false;
for (i = 0; i < info->num_planes; i++)
if ((info->cpp[i] != 4) && kind != 0x70) return false;
+ } else if (drm->client.device.info.chipset >= 0x1b2) {
+ const uint8_t slayout = ((modifier >> 22) & 0x1) |
+ ((modifier >> 25) & 0x6);
+
+ if (!format)
+ return false;
+
+ /*
+ * Note in practice this implies only formats where cpp is equal
+ * for each plane, or >= 4 for all planes, are supported.
+ */
+ for (i = 0; i < info->num_planes; i++) {
+ if (((info->cpp[i] == 2) && slayout != 3) ||
+ ((info->cpp[i] == 1) && slayout != 2) ||
+ ((info->cpp[i] >= 4) && slayout != 1))
+ return false;
+
+ /* 24-bit not supported. It has yet another layout */
+ WARN_ON(info->cpp[i] == 3);
+ }
}
return true;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
index 0d8e9a9d1a57..2cec8cfbd546 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwca7e.c
@@ -179,6 +179,39 @@ wndwca7e_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
return 0;
}
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 wndwca7e_modifiers[] = { /* | | | | | */
+ /* 4cpp+ modifiers */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
+ /* 1cpp/8bpp modifiers */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 2, 2, 0x06, 5),
+ /* 2cpp/16bpp modifiers */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 3, 2, 0x06, 5),
+ /* All formats support linear */
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const struct nv50_wndw_func
wndwca7e = {
.acquire = wndwc37e_acquire,
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
index 8b58b668fc0c..c78ab11ec3ac 100644
--- a/drivers/gpu/drm/nouveau/include/nvfw/hs.h
+++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
@@ -52,7 +52,9 @@ struct nvfw_hs_load_header_v2 {
struct {
u32 offset;
u32 size;
- } app[];
+ u32 data_offset;
+ u32 data_size;
+ } app[] __counted_by(num_apps);
};
const struct nvfw_hs_load_header_v2 *nvfw_hs_load_header_v2(struct nvkm_subdev *, const void *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index 22f74fc88cd7..57bc542780bb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -9,6 +9,8 @@ struct nvkm_device_tegra {
struct nvkm_device device;
struct platform_device *pdev;
+ void __iomem *regs;
+
struct reset_control *rst;
struct clk *clk;
struct clk *clk_ref;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
index d5d8877064a7..6a09d397c651 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/clk.h
@@ -134,4 +134,5 @@ int gf100_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
int gk104_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
int gk20a_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
int gm20b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
+int gp10b_clk_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_clk **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index b96f0555ca14..f26562eafffc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -929,7 +929,7 @@ done:
nvif_vmm_put(vmm, &old_mem->vma[1]);
nvif_vmm_put(vmm, &old_mem->vma[0]);
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index d59fd12268b9..6c26beeb427f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -57,7 +57,7 @@ nouveau_bo(struct ttm_buffer_object *bo)
static inline void
nouveau_bo_fini(struct nouveau_bo *bo)
{
- ttm_bo_put(&bo->bo);
+ ttm_bo_fini(&bo->bo);
}
extern struct ttm_device_funcs nouveau_bo_driver;
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.h b/drivers/gpu/drm/nouveau/nouveau_chan.h
index 561877725aac..bb34b0a6082d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.h
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.h
@@ -31,8 +31,6 @@ struct nouveau_channel {
u64 addr;
} push;
- /* TODO: this will be reworked in the near future */
- bool accel_done;
void *fence;
struct {
int max;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 805d0a87aa54..00515623a2cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
@@ -764,7 +765,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
- drm_client_dev_suspend(dev, false);
+ drm_client_dev_suspend(dev);
if (drm_drv_uses_atomic_modeset(dev)) {
if (!runtime) {
@@ -795,7 +796,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
}
}
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
}
int
@@ -807,9 +808,9 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
uint32_t domain;
int ret;
- args->pitch = roundup(args->width * (args->bpp / 8), 256);
- args->size = args->pitch * args->height;
- args->size = roundup(args->size, PAGE_SIZE);
+ ret = drm_mode_size_dumb(dev, args, SZ_256, 0);
+ if (ret)
+ return ret;
/* Use VRAM if there is any ; otherwise fallback to system memory */
if (nouveau_drm(dev)->client.device.info.ram_size != 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 0e27b76d1e1c..c25ef9a54b9f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -90,7 +90,6 @@ FIRE_RING(struct nouveau_channel *chan)
{
if (chan->dma.cur == chan->dma.put)
return;
- chan->accel_done = true;
WRITE_PUT(chan->dma.cur);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ca4932a150e3..58071652679d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -50,6 +50,7 @@
*/
#define DMEM_CHUNK_SIZE (2UL << 20)
#define DMEM_CHUNK_NPAGES (DMEM_CHUNK_SIZE >> PAGE_SHIFT)
+#define NR_CHUNKS (128)
enum nouveau_aper {
NOUVEAU_APER_VIRT,
@@ -83,9 +84,15 @@ struct nouveau_dmem {
struct list_head chunks;
struct mutex mutex;
struct page *free_pages;
+ struct folio *free_folios;
spinlock_t lock;
};
+struct nouveau_dmem_dma_info {
+ dma_addr_t dma_addr;
+ size_t size;
+};
+
static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
return container_of(page_pgmap(page), struct nouveau_dmem_chunk,
@@ -108,14 +115,20 @@ unsigned long nouveau_dmem_page_addr(struct page *page)
return chunk->bo->offset + off;
}
-static void nouveau_dmem_page_free(struct page *page)
+static void nouveau_dmem_folio_free(struct folio *folio)
{
+ struct page *page = &folio->page;
struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
struct nouveau_dmem *dmem = chunk->drm->dmem;
spin_lock(&dmem->lock);
- page->zone_device_data = dmem->free_pages;
- dmem->free_pages = page;
+ if (folio_order(folio)) {
+ page->zone_device_data = dmem->free_folios;
+ dmem->free_folios = folio;
+ } else {
+ page->zone_device_data = dmem->free_pages;
+ dmem->free_pages = page;
+ }
WARN_ON(!chunk->callocated);
chunk->callocated--;
@@ -139,20 +152,28 @@ static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
}
}
-static int nouveau_dmem_copy_one(struct nouveau_drm *drm, struct page *spage,
- struct page *dpage, dma_addr_t *dma_addr)
+static int nouveau_dmem_copy_folio(struct nouveau_drm *drm,
+ struct folio *sfolio, struct folio *dfolio,
+ struct nouveau_dmem_dma_info *dma_info)
{
struct device *dev = drm->dev->dev;
+ struct page *dpage = folio_page(dfolio, 0);
+ struct page *spage = folio_page(sfolio, 0);
- lock_page(dpage);
+ folio_lock(dfolio);
- *dma_addr = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr))
+ dma_info->dma_addr = dma_map_page(dev, dpage, 0, page_size(dpage),
+ DMA_BIDIRECTIONAL);
+ dma_info->size = page_size(dpage);
+ if (dma_mapping_error(dev, dma_info->dma_addr))
return -EIO;
- if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_HOST, *dma_addr,
- NOUVEAU_APER_VRAM, nouveau_dmem_page_addr(spage))) {
- dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (drm->dmem->migrate.copy_func(drm, folio_nr_pages(sfolio),
+ NOUVEAU_APER_HOST, dma_info->dma_addr,
+ NOUVEAU_APER_VRAM,
+ nouveau_dmem_page_addr(spage))) {
+ dma_unmap_page(dev, dma_info->dma_addr, page_size(dpage),
+ DMA_BIDIRECTIONAL);
return -EIO;
}
@@ -165,21 +186,48 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
struct nouveau_svmm *svmm;
- struct page *spage, *dpage;
- unsigned long src = 0, dst = 0;
- dma_addr_t dma_addr = 0;
+ struct page *dpage;
vm_fault_t ret = 0;
+ int err;
struct migrate_vma args = {
.vma = vmf->vma,
- .start = vmf->address,
- .end = vmf->address + PAGE_SIZE,
- .src = &src,
- .dst = &dst,
.pgmap_owner = drm->dev,
.fault_page = vmf->page,
- .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE,
+ .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
+ MIGRATE_VMA_SELECT_COMPOUND,
+ .src = NULL,
+ .dst = NULL,
};
+ unsigned int order, nr;
+ struct folio *sfolio, *dfolio;
+ struct nouveau_dmem_dma_info dma_info;
+
+ sfolio = page_folio(vmf->page);
+ order = folio_order(sfolio);
+ nr = 1 << order;
+
+ /*
+ * Handle partial unmap faults, where the folio is large, but
+ * the pmd is split.
+ */
+ if (vmf->pte) {
+ order = 0;
+ nr = 1;
+ }
+
+ if (order)
+ args.flags |= MIGRATE_VMA_SELECT_COMPOUND;
+ args.start = ALIGN_DOWN(vmf->address, (PAGE_SIZE << order));
+ args.vma = vmf->vma;
+ args.end = args.start + (PAGE_SIZE << order);
+ args.src = kcalloc(nr, sizeof(*args.src), GFP_KERNEL);
+ args.dst = kcalloc(nr, sizeof(*args.dst), GFP_KERNEL);
+
+ if (!args.src || !args.dst) {
+ ret = VM_FAULT_OOM;
+ goto err;
+ }
/*
* FIXME what we really want is to find some heuristic to migrate more
* than just one page on CPU fault. When such fault happens it is very
@@ -190,22 +238,28 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!args.cpages)
return 0;
- spage = migrate_pfn_to_page(src);
- if (!spage || !(src & MIGRATE_PFN_MIGRATE))
- goto done;
-
- dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
- if (!dpage)
+ if (order)
+ dpage = folio_page(vma_alloc_folio(GFP_HIGHUSER | __GFP_ZERO,
+ order, vmf->vma, vmf->address), 0);
+ else
+ dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma,
+ vmf->address);
+ if (!dpage) {
+ ret = VM_FAULT_OOM;
goto done;
+ }
- dst = migrate_pfn(page_to_pfn(dpage));
+ args.dst[0] = migrate_pfn(page_to_pfn(dpage));
+ if (order)
+ args.dst[0] |= MIGRATE_PFN_COMPOUND;
+ dfolio = page_folio(dpage);
- svmm = spage->zone_device_data;
+ svmm = folio_zone_device_data(sfolio);
mutex_lock(&svmm->mutex);
nouveau_svmm_invalidate(svmm, args.start, args.end);
- ret = nouveau_dmem_copy_one(drm, spage, dpage, &dma_addr);
+ err = nouveau_dmem_copy_folio(drm, sfolio, dfolio, &dma_info);
mutex_unlock(&svmm->mutex);
- if (ret) {
+ if (err) {
ret = VM_FAULT_SIGBUS;
goto done;
}
@@ -213,25 +267,40 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
nouveau_fence_new(&fence, dmem->migrate.chan);
migrate_vma_pages(&args);
nouveau_dmem_fence_done(&fence);
- dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(drm->dev->dev, dma_info.dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
done:
migrate_vma_finalize(&args);
+err:
+ kfree(args.src);
+ kfree(args.dst);
return ret;
}
+static void nouveau_dmem_folio_split(struct folio *head, struct folio *tail)
+{
+ if (tail == NULL)
+ return;
+ tail->pgmap = head->pgmap;
+ tail->mapping = head->mapping;
+ folio_set_zone_device_data(tail, folio_zone_device_data(head));
+}
+
static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
- .page_free = nouveau_dmem_page_free,
+ .folio_free = nouveau_dmem_folio_free,
.migrate_to_ram = nouveau_dmem_migrate_to_ram,
+ .folio_split = nouveau_dmem_folio_split,
};
static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage,
+ bool is_large)
{
struct nouveau_dmem_chunk *chunk;
struct resource *res;
struct page *page;
void *ptr;
- unsigned long i, pfn_first;
+ unsigned long i, pfn_first, pfn;
int ret;
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
@@ -241,7 +310,7 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
}
/* Allocate unused physical address space for device private pages. */
- res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
+ res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE * NR_CHUNKS,
"nouveau_dmem");
if (IS_ERR(res)) {
ret = PTR_ERR(res);
@@ -274,16 +343,40 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
pfn_first = chunk->pagemap.range.start >> PAGE_SHIFT;
page = pfn_to_page(pfn_first);
spin_lock(&drm->dmem->lock);
- for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
- page->zone_device_data = drm->dmem->free_pages;
- drm->dmem->free_pages = page;
+
+ pfn = pfn_first;
+ for (i = 0; i < NR_CHUNKS; i++) {
+ int j;
+
+ if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) || !is_large) {
+ for (j = 0; j < DMEM_CHUNK_NPAGES - 1; j++, pfn++) {
+ page = pfn_to_page(pfn);
+ page->zone_device_data = drm->dmem->free_pages;
+ drm->dmem->free_pages = page;
+ }
+ } else {
+ page = pfn_to_page(pfn);
+ page->zone_device_data = drm->dmem->free_folios;
+ drm->dmem->free_folios = page_folio(page);
+ pfn += DMEM_CHUNK_NPAGES;
+ }
+ }
+
+ /* Move to next page */
+ if (is_large) {
+ *ppage = &drm->dmem->free_folios->page;
+ drm->dmem->free_folios = (*ppage)->zone_device_data;
+ } else {
+ *ppage = drm->dmem->free_pages;
+ drm->dmem->free_pages = (*ppage)->zone_device_data;
}
- *ppage = page;
+
chunk->callocated++;
spin_unlock(&drm->dmem->lock);
- NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
- DMEM_CHUNK_SIZE >> 20);
+ NV_INFO(drm, "DMEM: registered %ldMB of %sdevice memory %lx %lx\n",
+ NR_CHUNKS * DMEM_CHUNK_SIZE >> 20, is_large ? "THP " : "", pfn_first,
+ nouveau_dmem_page_addr(page));
return 0;
@@ -298,27 +391,41 @@ out:
}
static struct page *
-nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
+nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, bool is_large)
{
struct nouveau_dmem_chunk *chunk;
struct page *page = NULL;
+ struct folio *folio = NULL;
int ret;
+ unsigned int order = 0;
spin_lock(&drm->dmem->lock);
- if (drm->dmem->free_pages) {
+ if (is_large && drm->dmem->free_folios) {
+ folio = drm->dmem->free_folios;
+ page = &folio->page;
+ drm->dmem->free_folios = page->zone_device_data;
+ chunk = nouveau_page_to_chunk(&folio->page);
+ chunk->callocated++;
+ spin_unlock(&drm->dmem->lock);
+ order = ilog2(DMEM_CHUNK_NPAGES);
+ } else if (!is_large && drm->dmem->free_pages) {
page = drm->dmem->free_pages;
drm->dmem->free_pages = page->zone_device_data;
chunk = nouveau_page_to_chunk(page);
chunk->callocated++;
spin_unlock(&drm->dmem->lock);
+ folio = page_folio(page);
} else {
spin_unlock(&drm->dmem->lock);
- ret = nouveau_dmem_chunk_alloc(drm, &page);
+ ret = nouveau_dmem_chunk_alloc(drm, &page, is_large);
if (ret)
return NULL;
+ folio = page_folio(page);
+ if (is_large)
+ order = ilog2(DMEM_CHUNK_NPAGES);
}
- zone_device_page_init(page);
+ zone_device_folio_init(folio, order);
return page;
}
@@ -369,12 +476,12 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
{
unsigned long i, npages = range_len(&chunk->pagemap.range) >> PAGE_SHIFT;
unsigned long *src_pfns, *dst_pfns;
- dma_addr_t *dma_addrs;
+ struct nouveau_dmem_dma_info *dma_info;
struct nouveau_fence *fence;
src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
- dma_addrs = kvcalloc(npages, sizeof(*dma_addrs), GFP_KERNEL | __GFP_NOFAIL);
+ dma_info = kvcalloc(npages, sizeof(*dma_info), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, chunk->pagemap.range.start >> PAGE_SHIFT,
npages);
@@ -382,17 +489,28 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
for (i = 0; i < npages; i++) {
if (src_pfns[i] & MIGRATE_PFN_MIGRATE) {
struct page *dpage;
+ struct folio *folio = page_folio(
+ migrate_pfn_to_page(src_pfns[i]));
+ unsigned int order = folio_order(folio);
+
+ if (src_pfns[i] & MIGRATE_PFN_COMPOUND) {
+ dpage = folio_page(
+ folio_alloc(
+ GFP_HIGHUSER_MOVABLE, order), 0);
+ } else {
+ /*
+ * _GFP_NOFAIL because the GPU is going away and there
+ * is nothing sensible we can do if we can't copy the
+ * data back.
+ */
+ dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
+ }
- /*
- * _GFP_NOFAIL because the GPU is going away and there
- * is nothing sensible we can do if we can't copy the
- * data back.
- */
- dpage = alloc_page(GFP_HIGHUSER | __GFP_NOFAIL);
dst_pfns[i] = migrate_pfn(page_to_pfn(dpage));
- nouveau_dmem_copy_one(chunk->drm,
- migrate_pfn_to_page(src_pfns[i]), dpage,
- &dma_addrs[i]);
+ nouveau_dmem_copy_folio(chunk->drm,
+ page_folio(migrate_pfn_to_page(src_pfns[i])),
+ page_folio(dpage),
+ &dma_info[i]);
}
}
@@ -403,8 +521,9 @@ nouveau_dmem_evict_chunk(struct nouveau_dmem_chunk *chunk)
kvfree(src_pfns);
kvfree(dst_pfns);
for (i = 0; i < npages; i++)
- dma_unmap_page(chunk->drm->dev->dev, dma_addrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
- kvfree(dma_addrs);
+ dma_unmap_page(chunk->drm->dev->dev, dma_info[i].dma_addr,
+ dma_info[i].size, DMA_BIDIRECTIONAL);
+ kvfree(dma_info);
}
void
@@ -607,31 +726,36 @@ nouveau_dmem_init(struct nouveau_drm *drm)
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
struct nouveau_svmm *svmm, unsigned long src,
- dma_addr_t *dma_addr, u64 *pfn)
+ struct nouveau_dmem_dma_info *dma_info, u64 *pfn)
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
unsigned long paddr;
+ bool is_large = false;
+ unsigned long mpfn;
spage = migrate_pfn_to_page(src);
if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
- dpage = nouveau_dmem_page_alloc_locked(drm);
+ is_large = src & MIGRATE_PFN_COMPOUND;
+ dpage = nouveau_dmem_page_alloc_locked(drm, is_large);
if (!dpage)
goto out;
paddr = nouveau_dmem_page_addr(dpage);
if (spage) {
- *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
+ dma_info->dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr))
+ dma_info->size = page_size(spage);
+ if (dma_mapping_error(dev, dma_info->dma_addr))
goto out_free_page;
- if (drm->dmem->migrate.copy_func(drm, 1,
- NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
+ if (drm->dmem->migrate.copy_func(drm, folio_nr_pages(page_folio(spage)),
+ NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST,
+ dma_info->dma_addr))
goto out_dma_unmap;
} else {
- *dma_addr = DMA_MAPPING_ERROR;
+ dma_info->dma_addr = DMA_MAPPING_ERROR;
if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
NOUVEAU_APER_VRAM, paddr))
goto out_free_page;
@@ -642,10 +766,13 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
if (src & MIGRATE_PFN_WRITE)
*pfn |= NVIF_VMM_PFNMAP_V0_W;
- return migrate_pfn(page_to_pfn(dpage));
+ mpfn = migrate_pfn(page_to_pfn(dpage));
+ if (folio_order(page_folio(dpage)))
+ mpfn |= MIGRATE_PFN_COMPOUND;
+ return mpfn;
out_dma_unmap:
- dma_unmap_page(dev, *dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_info->dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
out_free_page:
nouveau_dmem_page_free_locked(drm, dpage);
out:
@@ -655,27 +782,38 @@ out:
static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
struct nouveau_svmm *svmm, struct migrate_vma *args,
- dma_addr_t *dma_addrs, u64 *pfns)
+ struct nouveau_dmem_dma_info *dma_info, u64 *pfns)
{
struct nouveau_fence *fence;
unsigned long addr = args->start, nr_dma = 0, i;
+ unsigned long order = 0;
+
+ for (i = 0; addr < args->end; ) {
+ struct folio *folio;
- for (i = 0; addr < args->end; i++) {
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
- args->src[i], dma_addrs + nr_dma, pfns + i);
- if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
+ args->src[i], dma_info + nr_dma, pfns + i);
+ if (!args->dst[i]) {
+ i++;
+ addr += PAGE_SIZE;
+ continue;
+ }
+ if (!dma_mapping_error(drm->dev->dev, dma_info[nr_dma].dma_addr))
nr_dma++;
- addr += PAGE_SIZE;
+ folio = page_folio(migrate_pfn_to_page(args->dst[i]));
+ order = folio_order(folio);
+ i += 1 << order;
+ addr += (1 << order) * PAGE_SIZE;
}
nouveau_fence_new(&fence, drm->dmem->migrate.chan);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
- nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
+ nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i, order);
while (nr_dma--) {
- dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(drm->dev->dev, dma_info[nr_dma].dma_addr,
+ dma_info[nr_dma].size, DMA_BIDIRECTIONAL);
}
migrate_vma_finalize(args);
}
@@ -688,20 +826,27 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
unsigned long end)
{
unsigned long npages = (end - start) >> PAGE_SHIFT;
- unsigned long max = min(SG_MAX_SINGLE_ALLOC, npages);
- dma_addr_t *dma_addrs;
+ unsigned long max = npages;
struct migrate_vma args = {
.vma = vma,
.start = start,
.pgmap_owner = drm->dev,
- .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM
+ | MIGRATE_VMA_SELECT_COMPOUND,
};
unsigned long i;
u64 *pfns;
int ret = -ENOMEM;
+ struct nouveau_dmem_dma_info *dma_info;
- if (drm->dmem == NULL)
- return -ENODEV;
+ if (drm->dmem == NULL) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ if (max > (unsigned long)HPAGE_PMD_NR)
+ max = (unsigned long)HPAGE_PMD_NR;
args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
if (!args.src)
@@ -710,8 +855,8 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
if (!args.dst)
goto out_free_src;
- dma_addrs = kmalloc_array(max, sizeof(*dma_addrs), GFP_KERNEL);
- if (!dma_addrs)
+ dma_info = kmalloc_array(max, sizeof(*dma_info), GFP_KERNEL);
+ if (!dma_info)
goto out_free_dst;
pfns = nouveau_pfns_alloc(max);
@@ -729,7 +874,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
goto out_free_pfns;
if (args.cpages)
- nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
+ nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_info,
pfns);
args.start = args.end;
}
@@ -738,7 +883,7 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
out_free_pfns:
nouveau_pfns_free(pfns);
out_free_dma:
- kfree(dma_addrs);
+ kfree(dma_info);
out_free_dst:
kfree(args.dst);
out_free_src:
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 55abc510067b..0e409414f44d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -10,7 +10,7 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 4
-#define DRIVER_PATCHLEVEL 0
+#define DRIVER_PATCHLEVEL 1
/*
* 1.1.1:
@@ -35,6 +35,8 @@
* programs that get directly linked with NVKM.
* 1.3.1:
* - implemented limited ABI16/NVIF interop
+ * 1.4.1:
+ * - add variable page sizes and compression for Turing+
*/
#include <linux/notifier.h>
@@ -49,6 +51,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 690e10fbf0bd..395d92ab6271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -87,7 +87,7 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
return;
}
- ttm_bo_put(&nvbo->bo);
+ ttm_bo_fini(&nvbo->bo);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index a5ce8eb4a3be..9fd351273236 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -21,6 +21,8 @@
*/
#include "nouveau_platform.h"
+#include <nvkm/subdev/clk/gk20a_devfreq.h>
+
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
@@ -30,10 +32,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
func = of_device_get_match_data(&pdev->dev);
drm = nouveau_platform_device_create(func, pdev, &device);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- return 0;
+ return PTR_ERR_OR_ZERO(drm);
}
static void nouveau_platform_remove(struct platform_device *pdev)
@@ -43,6 +42,21 @@ static void nouveau_platform_remove(struct platform_device *pdev)
nouveau_drm_device_remove(drm);
}
+#ifdef CONFIG_PM_SLEEP
+static int nouveau_platform_suspend(struct device *dev)
+{
+ return gk20a_devfreq_suspend(dev);
+}
+
+static int nouveau_platform_resume(struct device *dev)
+{
+ return gk20a_devfreq_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(nouveau_pm_ops, nouveau_platform_suspend,
+ nouveau_platform_resume);
+#endif
+
#if IS_ENABLED(CONFIG_OF)
static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
@@ -84,6 +98,9 @@ struct platform_driver nouveau_platform_driver = {
.driver = {
.name = "nouveau",
.of_match_table = of_match_ptr(nouveau_platform_match),
+#ifdef CONFIG_PM_SLEEP
+ .pm = &nouveau_pm_ops,
+#endif
},
.probe = nouveau_platform_probe,
.remove = nouveau_platform_remove,
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index cd95446d6851..caab60fc62f6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -108,9 +108,21 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj,
int flags)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gobj);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (nvbo->no_share)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&nvbo->bo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
return drm_gem_prime_export(gobj, flags);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index e60f7892f5ce..a7bf539e5d86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
+static bool
+nouveau_sched_job_list_empty(struct nouveau_sched *sched)
+{
+ bool empty;
+
+ spin_lock(&sched->job.list.lock);
+ empty = list_empty(&sched->job.list.head);
+ spin_unlock(&sched->job.list.lock);
+
+ return empty;
+}
static void
nouveau_sched_fini(struct nouveau_sched *sched)
@@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
- rmb(); /* for list_empty to work without lock */
- wait_event(sched->job.wq, list_empty(&sched->job.list.head));
+ wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 6fa387da0637..b8a3378154d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -921,12 +921,14 @@ nouveau_pfns_free(u64 *pfns)
void
nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
- unsigned long addr, u64 *pfns, unsigned long npages)
+ unsigned long addr, u64 *pfns, unsigned long npages,
+ unsigned int page_shift)
{
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
args->p.addr = addr;
- args->p.size = npages << PAGE_SHIFT;
+ args->p.size = npages << page_shift;
+ args->p.page = page_shift;
mutex_lock(&svmm->mutex);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
index e7d63d7f0c2d..3fd78662f17e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -33,7 +33,8 @@ void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit);
u64 *nouveau_pfns_alloc(unsigned long npages);
void nouveau_pfns_free(u64 *pfns);
void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
- unsigned long addr, u64 *pfns, unsigned long npages);
+ unsigned long addr, u64 *pfns, unsigned long npages,
+ unsigned int page_shift);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 7d2436e5d50d..0a55babdf667 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -302,8 +302,10 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
- drm_need_swiotlb(drm->client.mmu.dmabits),
- drm->client.mmu.dmabits <= 32);
+ (drm_need_swiotlb(drm->client.mmu.dmabits) ?
+ TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
+ (drm->client.mmu.dmabits <= 32 ?
+ TTM_ALLOCATION_POOL_USE_DMA32 : 0));
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index 48f105239f42..f10809115c56 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -107,34 +107,34 @@ nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
static int
nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
- u64 addr, u64 range)
+ u64 addr, u64 range, u8 page_shift)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
- return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT);
+ return nvif_vmm_raw_get(vmm, addr, range, page_shift);
}
static int
nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
- u64 addr, u64 range)
+ u64 addr, u64 range, u8 page_shift)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
- return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT);
+ return nvif_vmm_raw_put(vmm, addr, range, page_shift);
}
static int
nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
- u64 addr, u64 range, bool sparse)
+ u64 addr, u64 range, u8 page_shift, bool sparse)
{
struct nvif_vmm *vmm = &uvmm->vmm.vmm;
- return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse);
+ return nvif_vmm_raw_unmap(vmm, addr, range, page_shift, sparse);
}
static int
nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
- u64 addr, u64 range,
+ u64 addr, u64 range, u8 page_shift,
u64 bo_offset, u8 kind,
struct nouveau_mem *mem)
{
@@ -163,7 +163,7 @@ nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
return -ENOSYS;
}
- return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT,
+ return nvif_vmm_raw_map(vmm, addr, range, page_shift,
&args, argc,
&mem->mem, bo_offset);
}
@@ -182,8 +182,9 @@ nouveau_uvma_vmm_put(struct nouveau_uvma *uvma)
{
u64 addr = uvma->va.va.addr;
u64 range = uvma->va.va.range;
+ u8 page_shift = uvma->page_shift;
- return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range);
+ return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range, page_shift);
}
static int
@@ -193,9 +194,11 @@ nouveau_uvma_map(struct nouveau_uvma *uvma,
u64 addr = uvma->va.va.addr;
u64 offset = uvma->va.gem.offset;
u64 range = uvma->va.va.range;
+ u8 page_shift = uvma->page_shift;
return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range,
- offset, uvma->kind, mem);
+ page_shift, offset, uvma->kind,
+ mem);
}
static int
@@ -203,12 +206,13 @@ nouveau_uvma_unmap(struct nouveau_uvma *uvma)
{
u64 addr = uvma->va.va.addr;
u64 range = uvma->va.va.range;
+ u8 page_shift = uvma->page_shift;
bool sparse = !!uvma->region;
if (drm_gpuva_invalidated(&uvma->va))
return 0;
- return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+ return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse);
}
static int
@@ -450,6 +454,62 @@ op_unmap_prepare_unwind(struct drm_gpuva *va)
drm_gpuva_insert(va->vm, va);
}
+static bool
+op_map_aligned_to_page_shift(const struct drm_gpuva_op_map *op, u8 page_shift)
+{
+ u64 non_page_bits = (1ULL << page_shift) - 1;
+
+ return (op->va.addr & non_page_bits) == 0 &&
+ (op->va.range & non_page_bits) == 0 &&
+ (op->gem.offset & non_page_bits) == 0;
+}
+
+static u8
+select_page_shift(struct nouveau_uvmm *uvmm, struct drm_gpuva_op_map *op)
+{
+ struct nouveau_bo *nvbo = nouveau_gem_object(op->gem.obj);
+
+ /* nouveau_bo_fixup_align() guarantees that the page size will be aligned
+ * for most cases, but it can't handle cases where userspace allocates with
+ * a size and then binds with a smaller granularity. So in order to avoid
+ * breaking old userspace, we need to ensure that the VA is actually
+ * aligned before using it, and if it isn't, then we downgrade to the first
+ * granularity that will fit, which is optimal from a correctness and
+ * performance perspective.
+ */
+ if (op_map_aligned_to_page_shift(op, nvbo->page))
+ return nvbo->page;
+
+ struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
+ struct nvif_vmm *vmm = &uvmm->vmm.vmm;
+ int i;
+
+ /* If the given granularity doesn't fit, let's find one that will fit. */
+ for (i = 0; i < vmm->page_nr; i++) {
+ /* Ignore anything that is bigger or identical to the BO preference. */
+ if (vmm->page[i].shift >= nvbo->page)
+ continue;
+
+ /* Skip incompatible domains. */
+ if ((mem->mem.type & NVIF_MEM_VRAM) && !vmm->page[i].vram)
+ continue;
+ if ((mem->mem.type & NVIF_MEM_HOST) &&
+ (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
+ continue;
+
+ /* If it fits, return the proposed shift. */
+ if (op_map_aligned_to_page_shift(op, vmm->page[i].shift))
+ return vmm->page[i].shift;
+ }
+
+ /* If we get here then nothing can reconcile the requirements. This should never
+ * happen.
+ */
+ drm_WARN_ONCE(op->gem.obj->dev, 1, "Could not find an appropriate page size.\n");
+
+ return PAGE_SHIFT;
+}
+
static void
nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_prealloc *new,
@@ -501,7 +561,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
if (vmm_get_range)
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
- vmm_get_range);
+ vmm_get_range,
+ select_page_shift(uvmm, &op->map));
break;
}
case DRM_GPUVA_OP_REMAP: {
@@ -528,6 +589,7 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
u64 ustart = va->va.addr;
u64 urange = va->va.range;
u64 uend = ustart + urange;
+ u8 page_shift = uvma_from_va(va)->page_shift;
/* Nothing to do for mappings we merge with. */
if (uend == vmm_get_start ||
@@ -538,7 +600,8 @@ nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
u64 vmm_get_range = ustart - vmm_get_start;
nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
- vmm_get_range);
+ vmm_get_range,
+ page_shift);
}
vmm_get_start = uend;
break;
@@ -592,6 +655,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
uvma->region = args->region;
uvma->kind = args->kind;
+ uvma->page_shift = select_page_shift(uvmm, op);
drm_gpuva_map(&uvmm->base, &uvma->va, op);
@@ -633,7 +697,8 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
if (vmm_get_range) {
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
- vmm_get_range);
+ vmm_get_range,
+ new->map->page_shift);
if (ret) {
op_map_prepare_unwind(new->map);
goto unwind;
@@ -689,6 +754,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
u64 ustart = va->va.addr;
u64 urange = va->va.range;
u64 uend = ustart + urange;
+ u8 page_shift = uvma_from_va(va)->page_shift;
op_unmap_prepare(u);
@@ -704,7 +770,7 @@ nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
u64 vmm_get_range = ustart - vmm_get_start;
ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
- vmm_get_range);
+ vmm_get_range, page_shift);
if (ret) {
op_unmap_prepare_unwind(va);
goto unwind;
@@ -799,10 +865,11 @@ op_unmap_range(struct drm_gpuva_op_unmap *u,
u64 addr, u64 range)
{
struct nouveau_uvma *uvma = uvma_from_va(u->va);
+ u8 page_shift = uvma->page_shift;
bool sparse = !!uvma->region;
if (!drm_gpuva_invalidated(u->va))
- nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse);
+ nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, page_shift, sparse);
}
static void
@@ -882,6 +949,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
struct drm_gpuva_op_map *n = r->next;
struct drm_gpuva *va = r->unmap->va;
struct nouveau_uvma *uvma = uvma_from_va(va);
+ u8 page_shift = uvma->page_shift;
if (unmap) {
u64 addr = va->va.addr;
@@ -893,7 +961,7 @@ nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
if (n)
end = n->va.addr;
- nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
+ nouveau_uvmm_vmm_put(uvmm, addr, end - addr, page_shift);
}
nouveau_uvma_gem_put(uvma);
@@ -1276,6 +1344,12 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
break;
case OP_MAP: {
struct nouveau_uvma_region *reg;
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->gem.obj,
+ .map.gem.offset = op->gem.offset,
+ };
reg = nouveau_uvma_region_find_first(uvmm,
op->va.addr,
@@ -1301,10 +1375,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job,
}
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
- op->va.addr,
- op->va.range,
- op->gem.obj,
- op->gem.offset);
+ &map_req);
if (IS_ERR(op->ops)) {
ret = PTR_ERR(op->ops);
goto unwind_continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index 9d3c348581eb..51925711ae90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -33,6 +33,7 @@ struct nouveau_uvma {
struct nouveau_uvma_region *region;
u8 kind;
+ u8 page_shift;
};
#define uvmm_from_gpuvm(x) container_of((x), struct nouveau_uvmm, base)
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
index b9581feb24cc..a23b40b27b81 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/enum.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c
@@ -44,7 +44,7 @@ nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value)
bool space = false;
while (size >= 1 && bf->name) {
if (value & bf->mask) {
- int this = snprintf(data, size, "%s%s",
+ int this = scnprintf(data, size, "%s%s",
space ? " " : "", bf->name);
size -= this;
data += this;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 3375a59ebf1a..2517b65d8faa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2280,6 +2280,7 @@ nv13b_chipset = {
.acr = { 0x00000001, gp10b_acr_new },
.bar = { 0x00000001, gm20b_bar_new },
.bus = { 0x00000001, gf100_bus_new },
+ .clk = { 0x00000001, gp10b_clk_new },
.fault = { 0x00000001, gp10b_fault_new },
.fb = { 0x00000001, gp10b_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 114e50ca1827..03aa6f09ec89 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -259,6 +259,10 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->func = func;
tdev->pdev = pdev;
+ tdev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tdev->regs))
+ return PTR_ERR(tdev->regs);
+
if (func->require_vdd) {
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
if (IS_ERR(tdev->vdd)) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
index cac6d64ab67d..4e8b3f1c7e25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c
@@ -159,6 +159,8 @@ nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw)
nvkm_memory_unref(&fw->inst);
nvkm_falcon_fw_dtor_sigs(fw);
nvkm_firmware_dtor(&fw->fw);
+ kfree(fw->boot);
+ fw->boot = NULL;
}
static const struct nvkm_firmware_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
index dcecd499d8df..be8f3283ee16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/Kbuild
@@ -10,6 +10,8 @@ nvkm-y += nvkm/subdev/clk/gf100.o
nvkm-y += nvkm/subdev/clk/gk104.o
nvkm-y += nvkm/subdev/clk/gk20a.o
nvkm-y += nvkm/subdev/clk/gm20b.o
+nvkm-y += nvkm/subdev/clk/gp10b.o
+nvkm-$(CONFIG_PM_DEVFREQ) += nvkm/subdev/clk/gk20a_devfreq.o
nvkm-y += nvkm/subdev/clk/pllnv04.o
nvkm-y += nvkm/subdev/clk/pllgt215.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index d573fb0917fc..65f5d0f1f3bf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -23,6 +23,7 @@
*
*/
#include "priv.h"
+#include "gk20a_devfreq.h"
#include "gk20a.h"
#include <core/tegra.h>
@@ -589,6 +590,10 @@ gk20a_clk_init(struct nvkm_clk *base)
return ret;
}
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 286413ff4a9e..ea5b0bab4cce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -118,6 +118,7 @@ struct gk20a_clk {
const struct gk20a_clk_pllg_params *params;
struct gk20a_pll pll;
u32 parent_rate;
+ struct gk20a_devfreq *devfreq;
u32 (*div_to_pl)(u32);
u32 (*pl_to_div)(u32);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c
new file mode 100644
index 000000000000..41003cbcdbfa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: MIT
+#include <linux/clk.h>
+#include <linux/math64.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+
+#include <drm/drm_managed.h>
+
+#include <subdev/clk.h>
+
+#include "nouveau_drv.h"
+#include "nouveau_chan.h"
+#include "priv.h"
+#include "gk20a_devfreq.h"
+#include "gk20a.h"
+#include "gp10b.h"
+
+#define PMU_BUSY_CYCLES_NORM_MAX 1000U
+
+#define PWR_PMU_IDLE_COUNTER_TOTAL 0U
+#define PWR_PMU_IDLE_COUNTER_BUSY 4U
+
+#define PWR_PMU_IDLE_COUNT_REG_OFFSET 0x0010A508U
+#define PWR_PMU_IDLE_COUNT_REG_SIZE 16U
+#define PWR_PMU_IDLE_COUNT_MASK 0x7FFFFFFFU
+#define PWR_PMU_IDLE_COUNT_RESET_VALUE (0x1U << 31U)
+
+#define PWR_PMU_IDLE_INTR_REG_OFFSET 0x0010A9E8U
+#define PWR_PMU_IDLE_INTR_ENABLE_VALUE 0U
+
+#define PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET 0x0010A9ECU
+#define PWR_PMU_IDLE_INTR_STATUS_MASK 0x00000001U
+#define PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE 0x1U
+
+#define PWR_PMU_IDLE_THRESHOLD_REG_OFFSET 0x0010A8A0U
+#define PWR_PMU_IDLE_THRESHOLD_REG_SIZE 4U
+#define PWR_PMU_IDLE_THRESHOLD_MAX_VALUE 0x7FFFFFFFU
+
+#define PWR_PMU_IDLE_CTRL_REG_OFFSET 0x0010A50CU
+#define PWR_PMU_IDLE_CTRL_REG_SIZE 16U
+#define PWR_PMU_IDLE_CTRL_VALUE_MASK 0x3U
+#define PWR_PMU_IDLE_CTRL_VALUE_BUSY 0x2U
+#define PWR_PMU_IDLE_CTRL_VALUE_ALWAYS 0x3U
+#define PWR_PMU_IDLE_CTRL_FILTER_MASK (0x1U << 2)
+#define PWR_PMU_IDLE_CTRL_FILTER_DISABLED 0x0U
+
+#define PWR_PMU_IDLE_MASK_REG_OFFSET 0x0010A504U
+#define PWR_PMU_IDLE_MASK_REG_SIZE 16U
+#define PWM_PMU_IDLE_MASK_GR_ENABLED 0x1U
+#define PWM_PMU_IDLE_MASK_CE_2_ENABLED 0x200000U
+
+/**
+ * struct gk20a_devfreq - Device frequency management
+ */
+struct gk20a_devfreq {
+ /** @devfreq: devfreq device. */
+ struct devfreq *devfreq;
+
+ /** @regs: Device registers. */
+ void __iomem *regs;
+
+ /** @gov_data: Governor data. */
+ struct devfreq_simple_ondemand_data gov_data;
+
+ /** @busy_time: Busy time. */
+ ktime_t busy_time;
+
+ /** @total_time: Total time. */
+ ktime_t total_time;
+
+ /** @time_last_update: Last update time. */
+ ktime_t time_last_update;
+};
+
+static struct gk20a_devfreq *dev_to_gk20a_devfreq(struct device *dev)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+
+ switch (drm->nvkm->chipset) {
+ case 0x13b: return gp10b_clk(base)->devfreq; break;
+ default: return gk20a_clk(base)->devfreq; break;
+ }
+}
+
+static void gk20a_pmu_init_perfmon_counter(struct gk20a_devfreq *gdevfreq)
+{
+ u32 data;
+
+ // Set pmu idle intr status bit on total counter overflow
+ writel(PWR_PMU_IDLE_INTR_ENABLE_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_INTR_REG_OFFSET);
+
+ writel(PWR_PMU_IDLE_THRESHOLD_MAX_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_THRESHOLD_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_THRESHOLD_REG_SIZE));
+
+ // Setup counter for total cycles
+ data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE));
+ data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK);
+ data |= PWR_PMU_IDLE_CTRL_VALUE_ALWAYS | PWR_PMU_IDLE_CTRL_FILTER_DISABLED;
+ writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_TOTAL * PWR_PMU_IDLE_CTRL_REG_SIZE));
+
+ // Setup counter for busy cycles
+ writel(PWM_PMU_IDLE_MASK_GR_ENABLED | PWM_PMU_IDLE_MASK_CE_2_ENABLED,
+ gdevfreq->regs + PWR_PMU_IDLE_MASK_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_MASK_REG_SIZE));
+
+ data = readl(gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE));
+ data &= ~(PWR_PMU_IDLE_CTRL_VALUE_MASK | PWR_PMU_IDLE_CTRL_FILTER_MASK);
+ data |= PWR_PMU_IDLE_CTRL_VALUE_BUSY | PWR_PMU_IDLE_CTRL_FILTER_DISABLED;
+ writel(data, gdevfreq->regs + PWR_PMU_IDLE_CTRL_REG_OFFSET +
+ (PWR_PMU_IDLE_COUNTER_BUSY * PWR_PMU_IDLE_CTRL_REG_SIZE));
+}
+
+static u32 gk20a_pmu_read_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id)
+{
+ u32 ret;
+
+ ret = readl(gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET +
+ (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE));
+
+ return ret & PWR_PMU_IDLE_COUNT_MASK;
+}
+
+static void gk20a_pmu_reset_idle_counter(struct gk20a_devfreq *gdevfreq, u32 counter_id)
+{
+ writel(PWR_PMU_IDLE_COUNT_RESET_VALUE, gdevfreq->regs + PWR_PMU_IDLE_COUNT_REG_OFFSET +
+ (counter_id * PWR_PMU_IDLE_COUNT_REG_SIZE));
+}
+
+static u32 gk20a_pmu_read_idle_intr_status(struct gk20a_devfreq *gdevfreq)
+{
+ u32 ret;
+
+ ret = readl(gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET);
+
+ return ret & PWR_PMU_IDLE_INTR_STATUS_MASK;
+}
+
+static void gk20a_pmu_clear_idle_intr_status(struct gk20a_devfreq *gdevfreq)
+{
+ writel(PWR_PMU_IDLE_INTR_STATUS_RESET_VALUE,
+ gdevfreq->regs + PWR_PMU_IDLE_INTR_STATUS_REG_OFFSET);
+}
+
+static void gk20a_devfreq_update_utilization(struct gk20a_devfreq *gdevfreq)
+{
+ ktime_t now, last;
+ u64 busy_cycles, total_cycles;
+ u32 norm, intr_status;
+
+ now = ktime_get();
+ last = gdevfreq->time_last_update;
+ gdevfreq->total_time = ktime_us_delta(now, last);
+
+ busy_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ total_cycles = gk20a_pmu_read_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+ intr_status = gk20a_pmu_read_idle_intr_status(gdevfreq);
+
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+
+ if (intr_status != 0UL) {
+ norm = PMU_BUSY_CYCLES_NORM_MAX;
+ gk20a_pmu_clear_idle_intr_status(gdevfreq);
+ } else if (total_cycles == 0ULL || busy_cycles > total_cycles) {
+ norm = PMU_BUSY_CYCLES_NORM_MAX;
+ } else {
+ norm = (u32)div64_u64(busy_cycles * PMU_BUSY_CYCLES_NORM_MAX,
+ total_cycles);
+ }
+
+ gdevfreq->busy_time = div_u64(gdevfreq->total_time * norm, PMU_BUSY_CYCLES_NORM_MAX);
+ gdevfreq->time_last_update = now;
+}
+
+static int gk20a_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+ struct nvkm_pstate *pstates = base->func->pstates;
+ int nr_pstates = base->func->nr_pstates;
+ int i, ret;
+
+ for (i = 0; i < nr_pstates - 1; i++)
+ if (pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV >= *freq)
+ break;
+
+ ret = nvkm_clk_ustate(base, pstates[i].pstate, 0);
+ ret |= nvkm_clk_ustate(base, pstates[i].pstate, 1);
+ if (ret) {
+ nvkm_error(subdev, "cannot update clock\n");
+ return ret;
+ }
+
+ *freq = pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static int gk20a_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct nvkm_subdev *subdev = nvkm_device_subdev(drm->nvkm, NVKM_SUBDEV_CLK, 0);
+ struct nvkm_clk *base = nvkm_clk(subdev);
+
+ *freq = nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static void gk20a_devfreq_reset(struct gk20a_devfreq *gdevfreq)
+{
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_BUSY);
+ gk20a_pmu_reset_idle_counter(gdevfreq, PWR_PMU_IDLE_COUNTER_TOTAL);
+ gk20a_pmu_clear_idle_intr_status(gdevfreq);
+
+ gdevfreq->busy_time = 0;
+ gdevfreq->total_time = 0;
+ gdevfreq->time_last_update = ktime_get();
+}
+
+static int gk20a_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct nouveau_drm *drm = dev_get_drvdata(dev);
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ gk20a_devfreq_get_cur_freq(dev, &status->current_frequency);
+
+ gk20a_devfreq_update_utilization(gdevfreq);
+
+ status->busy_time = ktime_to_ns(gdevfreq->busy_time);
+ status->total_time = ktime_to_ns(gdevfreq->total_time);
+
+ gk20a_devfreq_reset(gdevfreq);
+
+ NV_DEBUG(drm, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile gk20a_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
+ .polling_ms = 50,
+ .target = gk20a_devfreq_target,
+ .get_cur_freq = gk20a_devfreq_get_cur_freq,
+ .get_dev_status = gk20a_devfreq_get_dev_status,
+};
+
+int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **gdevfreq)
+{
+ struct nvkm_device *device = base->subdev.device;
+ struct nouveau_drm *drm = dev_get_drvdata(device->dev);
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ struct nvkm_pstate *pstates = base->func->pstates;
+ int nr_pstates = base->func->nr_pstates;
+ struct gk20a_devfreq *new_gdevfreq;
+ int i;
+
+ new_gdevfreq = drmm_kzalloc(drm->dev, sizeof(struct gk20a_devfreq), GFP_KERNEL);
+ if (!new_gdevfreq)
+ return -ENOMEM;
+
+ new_gdevfreq->regs = tdev->regs;
+
+ for (i = 0; i < nr_pstates; i++)
+ dev_pm_opp_add(base->subdev.device->dev,
+ pstates[i].base.domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV, 0);
+
+ gk20a_pmu_init_perfmon_counter(new_gdevfreq);
+ gk20a_devfreq_reset(new_gdevfreq);
+
+ gk20a_devfreq_profile.initial_freq =
+ nvkm_clk_read(base, nv_clk_src_gpc) * GK20A_CLK_GPC_MDIV;
+
+ new_gdevfreq->gov_data.upthreshold = 45;
+ new_gdevfreq->gov_data.downdifferential = 5;
+
+ new_gdevfreq->devfreq = devm_devfreq_add_device(device->dev,
+ &gk20a_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &new_gdevfreq->gov_data);
+ if (IS_ERR(new_gdevfreq->devfreq))
+ return PTR_ERR(new_gdevfreq->devfreq);
+
+ *gdevfreq = new_gdevfreq;
+
+ return 0;
+}
+
+int gk20a_devfreq_resume(struct device *dev)
+{
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ if (!gdevfreq || !gdevfreq->devfreq)
+ return 0;
+
+ return devfreq_resume_device(gdevfreq->devfreq);
+}
+
+int gk20a_devfreq_suspend(struct device *dev)
+{
+ struct gk20a_devfreq *gdevfreq = dev_to_gk20a_devfreq(dev);
+
+ if (!gdevfreq || !gdevfreq->devfreq)
+ return 0;
+
+ return devfreq_suspend_device(gdevfreq->devfreq);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h
new file mode 100644
index 000000000000..5b7ca8a7a5cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a_devfreq.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __GK20A_DEVFREQ_H__
+#define __GK20A_DEVFREQ_H__
+
+#include <linux/devfreq.h>
+
+struct gk20a_devfreq;
+
+#if defined(CONFIG_PM_DEVFREQ)
+int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq);
+
+int gk20a_devfreq_resume(struct device *dev);
+int gk20a_devfreq_suspend(struct device *dev);
+#else
+static inline int gk20a_devfreq_init(struct nvkm_clk *base, struct gk20a_devfreq **devfreq)
+{
+ return 0;
+}
+
+static inline int gk20a_devfreq_resume(struct device dev) { return 0; }
+static inline int gk20a_devfreq_suspend(struct device *dev) { return 0; }
+#endif /* CONFIG_PM_DEVFREQ */
+
+#endif /* __GK20A_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index 7c33542f651b..fa8ca53acbd1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -27,6 +27,7 @@
#include <core/tegra.h>
#include "priv.h"
+#include "gk20a_devfreq.h"
#include "gk20a.h"
#define GPCPLL_CFG_SYNC_MODE BIT(2)
@@ -869,6 +870,10 @@ gm20b_clk_init(struct nvkm_clk *base)
return ret;
}
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c
new file mode 100644
index 000000000000..492b62c0ee96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: MIT
+#include <subdev/clk.h>
+#include <subdev/timer.h>
+#include <core/device.h>
+#include <core/tegra.h>
+
+#include "priv.h"
+#include "gk20a_devfreq.h"
+#include "gk20a.h"
+#include "gp10b.h"
+
+static int
+gp10b_clk_init(struct nvkm_clk *base)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+ int ret;
+
+ /* Start with the highest frequency, matching the BPMP default */
+ base->func->calc(base, &base->func->pstates[base->func->nr_pstates - 1].base);
+ ret = base->func->prog(base);
+ if (ret) {
+ nvkm_error(subdev, "cannot initialize clock\n");
+ return ret;
+ }
+
+ ret = gk20a_devfreq_init(base, &clk->devfreq);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+gp10b_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ struct nvkm_subdev *subdev = &clk->base.subdev;
+
+ switch (src) {
+ case nv_clk_src_gpc:
+ return clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV;
+ default:
+ nvkm_error(subdev, "invalid clock source %d\n", src);
+ return -EINVAL;
+ }
+}
+
+static int
+gp10b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ u32 target_rate = cstate->domain[nv_clk_src_gpc] * GK20A_CLK_GPC_MDIV;
+
+ clk->new_rate = clk_round_rate(clk->clk, target_rate) / GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static int
+gp10b_clk_prog(struct nvkm_clk *base)
+{
+ struct gp10b_clk *clk = gp10b_clk(base);
+ int ret;
+
+ ret = clk_set_rate(clk->clk, clk->new_rate * GK20A_CLK_GPC_MDIV);
+ if (ret < 0)
+ return ret;
+
+ clk->rate = clk_get_rate(clk->clk) / GK20A_CLK_GPC_MDIV;
+
+ return 0;
+}
+
+static struct nvkm_pstate
+gp10b_pstates[] = {
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 114750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 216750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 318750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 420750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 522750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 624750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 726750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 828750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 930750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1032750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1134750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1236750,
+ },
+ },
+ {
+ .base = {
+ .domain[nv_clk_src_gpc] = 1300500,
+ },
+ },
+};
+
+static const struct nvkm_clk_func
+gp10b_clk = {
+ .init = gp10b_clk_init,
+ .read = gp10b_clk_read,
+ .calc = gp10b_clk_calc,
+ .prog = gp10b_clk_prog,
+ .tidy = gk20a_clk_tidy,
+ .pstates = gp10b_pstates,
+ .nr_pstates = ARRAY_SIZE(gp10b_pstates),
+ .domains = {
+ { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
+ { nv_clk_src_max }
+ }
+};
+
+int
+gp10b_clk_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+ struct nvkm_clk **pclk)
+{
+ struct nvkm_device_tegra *tdev = device->func->tegra(device);
+ const struct nvkm_clk_func *func = &gp10b_clk;
+ struct gp10b_clk *clk;
+ int ret, i;
+
+ clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+ if (!clk)
+ return -ENOMEM;
+ *pclk = &clk->base;
+ clk->clk = tdev->clk;
+
+ /* Finish initializing the pstates */
+ for (i = 0; i < func->nr_pstates; i++) {
+ INIT_LIST_HEAD(&func->pstates[i].list);
+ func->pstates[i].pstate = i + 1;
+ }
+
+ ret = nvkm_clk_ctor(func, device, type, inst, true, &clk->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h
new file mode 100644
index 000000000000..178e3bcdbbf7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gp10b.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_CLK_GP10B_H__
+#define __NVKM_CLK_GP10B_H__
+
+struct gp10b_clk {
+ /* currently applied parameters */
+ struct nvkm_clk base;
+ struct gk20a_devfreq *devfreq;
+ struct clk *clk;
+ u32 rate;
+
+ /* new parameters to apply */
+ u32 new_rate;
+};
+
+#define gp10b_clk(p) container_of((p), struct gp10b_clk, base)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 8a286a9349ac..7ce1b65e2c1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -279,7 +279,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
mutex_init(&fb->tags.mutex);
if (func->sysmem.flush_page_init) {
- fb->sysmem.flush_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ fb->sysmem.flush_page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (!fb->sysmem.flush_page)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
index 1c78c8853617..170776cc82fb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb100.c
@@ -15,6 +15,9 @@ gb100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
const u32 hshub = DRF_LO(NV_PFB_HSHUB0);
struct nvkm_device *device = fb->subdev.device;
+ // Ensure that the address is within hardware limits
+ WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(52));
+
nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
nvkm_wr32(device, hshub + NV_PFB_HSHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, addr_lo);
nvkm_wr32(device, hshub + NV_PFB_HSHUB_EG_PCIE_FLUSH_SYSMEM_ADDR_HI, addr_hi);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
index 848505026d02..a21bf19e1041 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gb202.c
@@ -13,6 +13,9 @@ gb202_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
struct nvkm_device *device = fb->subdev.device;
const u64 addr = fb->sysmem.flush_page_addr;
+ // Ensure that the address is within hardware limits
+ WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(52));
+
nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
nvkm_wr32(device, NV_PFB_FBHUB0_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index 07db9b397ac1..64281a09fb39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -80,6 +80,9 @@ gf100_fb_init_page(struct nvkm_fb *fb)
void
gf100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
{
+ // Ensure that the address can actually fit in the register
+ WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(40));
+
nvkm_wr32(fb->subdev.device, 0x100c10, fb->sysmem.flush_page_addr >> 8);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
index 2d8c51f882d5..8c9394048f25 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gh100.c
@@ -13,6 +13,9 @@ gh100_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
const u64 addr = fb->sysmem.flush_page_addr >> NV_PFB_NISO_FLUSH_SYSMEM_ADDR_SHIFT;
struct nvkm_device *device = fb->subdev.device;
+ // Ensure that the address is within hardware limits
+ WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(52));
+
nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_HI, upper_32_bits(addr));
nvkm_wr32(device, NV_PFB_FBHUB_PCIE_FLUSH_SYSMEM_ADDR_LO, lower_32_bits(addr));
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
index a6efbd913c13..076d968b7297 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/nv50.c
@@ -214,6 +214,9 @@ nv50_fb_tags(struct nvkm_fb *base)
static void
nv50_fb_sysmem_flush_page_init(struct nvkm_fb *fb)
{
+ // Ensure that the address can actually fit in the register
+ WARN_ON(fb->sysmem.flush_page_addr > DMA_BIT_MASK(40));
+
nvkm_wr32(fb->subdev.device, 0x100c08, fb->sysmem.flush_page_addr >> 8);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
index eb765da0876e..35d1fcef520b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
@@ -41,8 +41,8 @@ ad102_gsp = {
static struct nvkm_gsp_fwif
ad102_gsps[] = {
- { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144", true },
- { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01", true },
+ { 1, tu102_gsp_load, &ad102_gsp, &r570_rm_ga102, "570.144" },
+ { 0, tu102_gsp_load, &ad102_gsp, &r535_rm_ga102, "535.113.01" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
index d23243a83a4c..7ccb41761066 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
@@ -138,8 +138,10 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
nvkm_subdev_ctor(&nvkm_gsp, device, type, inst, &gsp->subdev);
fwif = nvkm_firmware_load(&gsp->subdev, fwif, "Gsp", gsp);
- if (IS_ERR(fwif))
+ if (IS_ERR(fwif)) {
+ nvkm_error(&gsp->subdev, "Failed to load required firmware for device.");
return PTR_ERR(fwif);
+ }
gsp->func = fwif->func;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
index 12a3f2c1ed82..1b3b31b95ce4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb100.c
@@ -20,7 +20,7 @@ gb100_gsp = {
static struct nvkm_gsp_fwif
gb100_gsps[] = {
- { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144", true },
+ { 0, gh100_gsp_load, &gb100_gsp, &r570_rm_gb10x, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
index c1d718172ddf..51384c63148c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gb202.c
@@ -20,7 +20,7 @@ gb202_gsp = {
static struct nvkm_gsp_fwif
gb202_gsps[] = {
- { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144", true },
+ { 0, gh100_gsp_load, &gb202_gsp, &r570_rm_gb20x, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
index ce31e8248807..b0dd5fce7bad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gh100.c
@@ -344,7 +344,7 @@ done:
static struct nvkm_gsp_fwif
gh100_gsps[] = {
- { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144", true },
+ { 0, gh100_gsp_load, &gh100_gsp, &r570_rm_gh100, "570.144" },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index 4f14e85fc69e..c3494b7ac572 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -14,7 +14,6 @@ struct nvkm_gsp_fwif {
const struct nvkm_gsp_func *func;
const struct nvkm_rm_impl *rm;
const char *ver;
- bool enable;
};
int nvkm_gsp_load_fw(struct nvkm_gsp *, const char *name, const char *ver,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index 588cb4ab85cb..32e6a065d6d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -582,10 +582,13 @@ struct nv_gsp_registry_entries {
* RMSecBusResetEnable - enables PCI secondary bus reset
* RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration
* registers on any PCI reset.
+ * RMDevidCheckIgnore - allows GSP-RM to boot even if the PCI dev ID
+ * is not found in the internal product name database.
*/
static const struct nv_gsp_registry_entries r535_registry_entries[] = {
{ "RMSecBusResetEnable", 1 },
{ "RMForcePcieConfigSave", 1 },
+ { "RMDevidCheckIgnore", 1 },
};
#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
index 58e233bc53b1..81e56da0474a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
@@ -383,13 +383,9 @@ int
tu102_gsp_load_rm(struct nvkm_gsp *gsp, const struct nvkm_gsp_fwif *fwif)
{
struct nvkm_subdev *subdev = &gsp->subdev;
- bool enable_gsp = fwif->enable;
int ret;
-#if IS_ENABLED(CONFIG_DRM_NOUVEAU_GSP_DEFAULT)
- enable_gsp = true;
-#endif
- if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", enable_gsp))
+ if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", true))
return -EINVAL;
ret = nvkm_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index 851fd847a2a9..ed15a4475181 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -21,9 +21,7 @@
*/
#include "vmm.h"
-#include <core/client.h>
#include <subdev/fb.h>
-#include <subdev/ltc.h>
#include <subdev/timer.h>
#include <engine/gr.h>
@@ -111,13 +109,33 @@ gp100_vmm_pgt_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
nvkm_done(pt->memory);
}
+static inline u64
+gp100_vmm_comptag_nr(u64 size)
+{
+ return size >> 16; /* One comptag per 64KiB VRAM. */
+}
+
+static inline u64
+gp100_vmm_pte_comptagline_base(u64 addr)
+{
+ /* RM allocates enough comptags for all of VRAM, so use a 1:1 mapping. */
+ return (1 + gp100_vmm_comptag_nr(addr)) << 36; /* NV_MMU_VER2_PTE_COMPTAGLINE */
+}
+
+static inline u64
+gp100_vmm_pte_comptagline_incr(u32 page_size)
+{
+ return gp100_vmm_comptag_nr(page_size) << 36; /* NV_MMU_VER2_PTE_COMPTAGLINE */
+}
+
static inline void
gp100_vmm_pgt_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr)
{
u64 data = (addr >> 4) | map->type;
- map->type += ptes * map->ctag;
+ if (map->ctag)
+ data |= gp100_vmm_pte_comptagline_base(addr);
while (ptes--) {
VMM_WO064(pt, vmm, ptei++ * 8, data);
@@ -142,7 +160,6 @@ gp100_vmm_pgt_dma(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
while (ptes--) {
const u64 data = (*map->dma++ >> 4) | map->type;
VMM_WO064(pt, vmm, ptei++ * 8, data);
- map->type += map->ctag;
}
nvkm_done(pt->memory);
return;
@@ -200,7 +217,8 @@ gp100_vmm_pd0_pte(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
{
u64 data = (addr >> 4) | map->type;
- map->type += ptes * map->ctag;
+ if (map->ctag)
+ data |= gp100_vmm_pte_comptagline_base(addr);
while (ptes--) {
VMM_WO128(pt, vmm, ptei++ * 0x10, data, 0ULL);
@@ -411,8 +429,6 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
struct gp100_vmm_map_vn vn;
struct gp100_vmm_map_v0 v0;
} *args = argv;
- struct nvkm_device *device = vmm->mmu->subdev.device;
- struct nvkm_memory *memory = map->memory;
u8 kind, kind_inv, priv, ro, vol;
int kindn, aper, ret = -ENOSYS;
const u8 *kindm;
@@ -449,29 +465,24 @@ gp100_vmm_valid(struct nvkm_vmm *vmm, void *argv, u32 argc,
return -EINVAL;
}
+ /* Handle compression. */
if (kindm[kind] != kind) {
- u64 tags = nvkm_memory_size(memory) >> 16;
- if (aper != 0 || !(page->type & NVKM_VMM_PAGE_COMP)) {
- VMM_DEBUG(vmm, "comp %d %02x", aper, page->type);
- return -EINVAL;
- }
-
- if (!map->no_comp) {
- ret = nvkm_memory_tags_get(memory, device, tags,
- nvkm_ltc_tags_clear,
- &map->tags);
- if (ret) {
- VMM_DEBUG(vmm, "comp %d", ret);
- return ret;
+ struct nvkm_device *device = vmm->mmu->subdev.device;
+
+ /* Compression is only supported when using GSP-RM, as
+ * PMU firmware is required in order to initialise the
+ * compbit backing store.
+ */
+ if (nvkm_gsp_rm(device->gsp)) {
+ /* Turing GPUs require PTE_COMPTAGLINE to be filled,
+ * in addition to specifying a compressed kind.
+ */
+ if (device->card_type < GA100) {
+ map->ctag = gp100_vmm_pte_comptagline_incr(1 << map->page->shift);
+ map->next |= map->ctag;
}
- }
-
- if (!map->no_comp && map->tags->mn) {
- tags = map->tags->mn->offset + (map->offset >> 16);
- map->ctag |= ((1ULL << page->shift) >> 16) << 36;
- map->type |= tags << 36;
- map->next |= map->ctag;
} else {
+ /* Revert to non-compressed kind. */
kind = kindm[kind];
}
}
@@ -592,8 +603,8 @@ gp100_vmm = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
- { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxC },
- { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxC },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SVxx },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SVxx },
{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SVHx },
{}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
index e081239afe58..5791d134962b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp10b.c
@@ -34,8 +34,8 @@ gp10b_vmm = {
{ 47, &gp100_vmm_desc_16[4], NVKM_VMM_PAGE_Sxxx },
{ 38, &gp100_vmm_desc_16[3], NVKM_VMM_PAGE_Sxxx },
{ 29, &gp100_vmm_desc_16[2], NVKM_VMM_PAGE_Sxxx },
- { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHC },
- { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHC },
+ { 21, &gp100_vmm_desc_16[1], NVKM_VMM_PAGE_SxHx },
+ { 16, &gp100_vmm_desc_16[0], NVKM_VMM_PAGE_SxHx },
{ 12, &gp100_vmm_desc_12[0], NVKM_VMM_PAGE_SxHx },
{}
}
diff --git a/drivers/gpu/drm/nova/Kconfig b/drivers/gpu/drm/nova/Kconfig
index cca6a3fea879..3e637ad7b5ba 100644
--- a/drivers/gpu/drm/nova/Kconfig
+++ b/drivers/gpu/drm/nova/Kconfig
@@ -1,9 +1,11 @@
config DRM_NOVA
tristate "Nova DRM driver"
+ depends on 64BIT
depends on DRM=y
depends on PCI
depends on RUST
select AUXILIARY_BUS
+ select NOVA_CORE
default n
help
Choose this if you want to build the Nova DRM driver for Nvidia
diff --git a/drivers/gpu/drm/nova/driver.rs b/drivers/gpu/drm/nova/driver.rs
index b28b2e05cc15..2246d8e104e0 100644
--- a/drivers/gpu/drm/nova/driver.rs
+++ b/drivers/gpu/drm/nova/driver.rs
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, types::ARef};
+use kernel::{
+ auxiliary, c_str, device::Core, drm, drm::gem, drm::ioctl, prelude::*, sync::aref::ARef,
+};
use crate::file::File;
use crate::gem::NovaObject;
@@ -43,13 +45,13 @@ impl auxiliary::Driver for NovaDriver {
type IdInfo = ();
const ID_TABLE: auxiliary::IdTable<Self::IdInfo> = &AUX_TABLE;
- fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
+ fn probe(adev: &auxiliary::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
let data = try_pin_init!(NovaData { adev: adev.into() });
let drm = drm::Device::<Self>::new(adev.as_ref(), data)?;
drm::Registration::new_foreign_owned(&drm, adev.as_ref(), 0)?;
- Ok(KBox::new(Self { drm }, GFP_KERNEL)?.into())
+ Ok(Self { drm })
}
}
diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
index 4fe62cf98a23..a3b7bd36792c 100644
--- a/drivers/gpu/drm/nova/file.rs
+++ b/drivers/gpu/drm/nova/file.rs
@@ -2,13 +2,11 @@
use crate::driver::{NovaDevice, NovaDriver};
use crate::gem::NovaObject;
-use crate::uapi::{GemCreate, GemInfo, Getparam};
use kernel::{
alloc::flags::*,
drm::{self, gem::BaseObject},
pci,
prelude::*,
- types::Opaque,
uapi,
};
@@ -26,21 +24,19 @@ impl File {
/// IOCTL: get_param: Query GPU / driver metadata.
pub(crate) fn get_param(
dev: &NovaDevice,
- getparam: &Opaque<uapi::drm_nova_getparam>,
+ getparam: &mut uapi::drm_nova_getparam,
_file: &drm::File<File>,
) -> Result<u32> {
let adev = &dev.adev;
- let parent = adev.parent().ok_or(ENOENT)?;
+ let parent = adev.parent();
let pdev: &pci::Device = parent.try_into()?;
- let getparam: &Getparam = getparam.into();
- let value = match getparam.param() as u32 {
+ let value = match getparam.param as u32 {
uapi::NOVA_GETPARAM_VRAM_BAR_SIZE => pdev.resource_len(1)?,
_ => return Err(EINVAL),
};
- #[allow(clippy::useless_conversion)]
- getparam.set_value(value.into());
+ getparam.value = Into::<u64>::into(value);
Ok(0)
}
@@ -48,13 +44,12 @@ impl File {
/// IOCTL: gem_create: Create a new DRM GEM object.
pub(crate) fn gem_create(
dev: &NovaDevice,
- req: &Opaque<uapi::drm_nova_gem_create>,
+ req: &mut uapi::drm_nova_gem_create,
file: &drm::File<File>,
) -> Result<u32> {
- let req: &GemCreate = req.into();
- let obj = NovaObject::new(dev, req.size().try_into()?)?;
+ let obj = NovaObject::new(dev, req.size.try_into()?)?;
- req.set_handle(obj.create_handle(file)?);
+ req.handle = obj.create_handle(file)?;
Ok(0)
}
@@ -62,13 +57,12 @@ impl File {
/// IOCTL: gem_info: Query GEM metadata.
pub(crate) fn gem_info(
_dev: &NovaDevice,
- req: &Opaque<uapi::drm_nova_gem_info>,
+ req: &mut uapi::drm_nova_gem_info,
file: &drm::File<File>,
) -> Result<u32> {
- let req: &GemInfo = req.into();
- let bo = NovaObject::lookup_handle(file, req.handle())?;
+ let bo = NovaObject::lookup_handle(file, req.handle)?;
- req.set_size(bo.size().try_into()?);
+ req.size = bo.size().try_into()?;
Ok(0)
}
diff --git a/drivers/gpu/drm/nova/gem.rs b/drivers/gpu/drm/nova/gem.rs
index 33b62d21400c..2760ba4f3450 100644
--- a/drivers/gpu/drm/nova/gem.rs
+++ b/drivers/gpu/drm/nova/gem.rs
@@ -4,7 +4,7 @@ use kernel::{
drm,
drm::{gem, gem::BaseObject},
prelude::*,
- types::ARef,
+ sync::aref::ARef,
};
use crate::{
@@ -16,16 +16,14 @@ use crate::{
#[pin_data]
pub(crate) struct NovaObject {}
-impl gem::BaseDriverObject<gem::Object<NovaObject>> for NovaObject {
+impl gem::DriverObject for NovaObject {
+ type Driver = NovaDriver;
+
fn new(_dev: &NovaDevice, _size: usize) -> impl PinInit<Self, Error> {
try_pin_init!(NovaObject {})
}
}
-impl gem::DriverObject for NovaObject {
- type Driver = NovaDriver;
-}
-
impl NovaObject {
/// Create a new DRM GEM object.
pub(crate) fn new(dev: &NovaDevice, size: usize) -> Result<ARef<gem::Object<Self>>> {
diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs
index 64fd670e99e1..8893e58ee0db 100644
--- a/drivers/gpu/drm/nova/nova.rs
+++ b/drivers/gpu/drm/nova/nova.rs
@@ -5,7 +5,6 @@
mod driver;
mod file;
mod gem;
-mod uapi;
use crate::driver::NovaDriver;
diff --git a/drivers/gpu/drm/nova/uapi.rs b/drivers/gpu/drm/nova/uapi.rs
deleted file mode 100644
index eb228a58d423..000000000000
--- a/drivers/gpu/drm/nova/uapi.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-use kernel::uapi;
-
-// TODO Work out some common infrastructure to avoid boilerplate code for uAPI abstractions.
-
-macro_rules! define_uapi_abstraction {
- ($name:ident <= $inner:ty) => {
- #[repr(transparent)]
- pub struct $name(::kernel::types::Opaque<$inner>);
-
- impl ::core::convert::From<&::kernel::types::Opaque<$inner>> for &$name {
- fn from(value: &::kernel::types::Opaque<$inner>) -> Self {
- // SAFETY: `Self` is a transparent wrapper of `$inner`.
- unsafe { ::core::mem::transmute(value) }
- }
- }
- };
-}
-
-define_uapi_abstraction!(Getparam <= uapi::drm_nova_getparam);
-
-impl Getparam {
- pub fn param(&self) -> u64 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
- unsafe { (*self.0.get()).param }
- }
-
- pub fn set_value(&self, v: u64) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_getparam`.
- unsafe { (*self.0.get()).value = v };
- }
-}
-
-define_uapi_abstraction!(GemCreate <= uapi::drm_nova_gem_create);
-
-impl GemCreate {
- pub fn size(&self) -> u64 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
- unsafe { (*self.0.get()).size }
- }
-
- pub fn set_handle(&self, handle: u32) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_create`.
- unsafe { (*self.0.get()).handle = handle };
- }
-}
-
-define_uapi_abstraction!(GemInfo <= uapi::drm_nova_gem_info);
-
-impl GemInfo {
- pub fn handle(&self) -> u32 {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
- unsafe { (*self.0.get()).handle }
- }
-
- pub fn set_size(&self, size: u64) {
- // SAFETY: `self.get()` is a valid pointer to a `struct drm_nova_gem_info`.
- unsafe { (*self.0.get()).size = size };
- }
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 63ddc5127f7b..1c2a1920c0a6 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index a3d470468e5b..9edc1b3f9f95 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -11,6 +11,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 3fff32c000a6..bbe427ab43c1 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -26,6 +26,8 @@
#include <linux/vmalloc.h>
#include <linux/wait.h>
+#include <drm/drm_print.h>
+
#include "omap_dmm_tiler.h"
#include "omap_dmm_priv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 054b71dba6a7..1b96343226a5 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -19,6 +19,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_panel.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -378,10 +379,8 @@ static int omap_display_id(struct omap_dss_device *output)
struct device_node *node = NULL;
if (output->bridge) {
- struct drm_bridge *bridge = output->bridge;
-
- while (drm_bridge_get_next_bridge(bridge))
- bridge = drm_bridge_get_next_bridge(bridge);
+ struct drm_bridge *bridge __free(drm_bridge_put) =
+ drm_bridge_chain_get_last_bridge(output->bridge->encoder);
node = bridge->of_node;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4dd05bc732da..195715b162e3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -77,7 +77,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct omap_dss_device *output = omap_encoder->output;
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
- struct drm_bridge *bridge;
struct videomode vm = { 0 };
u32 bus_flags;
@@ -97,8 +96,7 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (bridge = output->bridge; bridge;
- bridge = drm_bridge_get_next_bridge(bridge)) {
+ drm_for_each_bridge_in_chain_from(output->bridge, bridge) {
if (!bridge->timings)
continue;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index bb3105556f19..b8c249ec1891 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -12,6 +12,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 948af7ec1130..ca3fb186bf19 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -15,6 +15,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/drm_util.h>
#include "omap_drv.h"
@@ -103,8 +104,6 @@ static void omap_fbdev_fb_destroy(struct fb_info *info)
drm_framebuffer_remove(fb);
drm_client_release(&helper->client);
- drm_fb_helper_unprepare(helper);
- kfree(helper);
}
/*
@@ -155,9 +154,9 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct omap_drm_private *priv = dev->dev_private;
struct omap_fbdev *fbdev = priv->fbdev;
+ struct fb_info *fbi = helper->info;
struct drm_framebuffer *fb = NULL;
union omap_gem_size gsize;
- struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
struct drm_gem_object *bo;
dma_addr_t dma_addr;
@@ -226,13 +225,6 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
goto fail;
}
- fbi = drm_fb_helper_alloc_info(helper);
- if (IS_ERR(fbi)) {
- dev_err(dev->dev, "failed to allocate fb info\n");
- ret = PTR_ERR(fbi);
- goto fail;
- }
-
DBG("fbi=%p, dev=%p", fbi, dev);
helper->funcs = &omap_fbdev_helper_funcs;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 381552bfb409..71e79f53489a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -10,7 +10,9 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
#include "omap_drv.h"
@@ -580,15 +582,13 @@ static int omap_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- union omap_gem_size gsize;
-
- args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
-
- args->size = PAGE_ALIGN(args->pitch * args->height);
+ union omap_gem_size gsize = { };
+ int ret;
- gsize = (union omap_gem_size){
- .bytes = args->size,
- };
+ ret = drm_mode_size_dumb(dev, args, SZ_8, 0);
+ if (ret)
+ return ret;
+ gsize.bytes = args->size;
return omap_gem_new_handle(dev, file, gsize,
OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index a6f0bbc879d2..943c5307da00 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -5,6 +5,7 @@
*/
#include <drm/drm_vblank.h>
+#include <drm/drm_print.h>
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_overlay.c b/drivers/gpu/drm/omapdrm/omap_overlay.c
index fb97c74386f2..6fb7510cbebb 100644
--- a/drivers/gpu/drm/omapdrm/omap_overlay.c
+++ b/drivers/gpu/drm/omapdrm/omap_overlay.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_print.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 24a2ded08b45..f9698890c989 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -10,6 +10,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -229,7 +230,7 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 09b9f7ff9340..76f6af819037 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -215,6 +215,19 @@ config DRM_PANEL_HIMAX_HX8394
If M is selected the module will be called panel-himax-hx8394.
+config DRM_PANEL_HYDIS_HV101HD1
+ tristate "Hydis HV101HD1 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Hydis HV101HD1
+ 2-lane 1366x768 MIPI DSI panel found in ASUS VivoTab RT TF600T.
+ HV101HD1 is a color active matrix TFT LCD module using amorphous
+ silicon TFT's (Thin Film Transistors) as an active switching devices.
+
+ If M is selected the module will be called panel-hydis-hv101hd1
+
config DRM_PANEL_ILITEK_IL9322
tristate "Ilitek ILI9322 320x240 QVGA panels"
depends on OF && SPI
@@ -395,6 +408,19 @@ config DRM_PANEL_LG_LB035Q02
(found on the Gumstix Overo Palo35 board). To compile this driver as
a module, choose M here.
+config DRM_PANEL_LG_LD070WX3
+ tristate "LG LD070WX3 MIPI DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable support for the LD070WX3 MIPI DSI
+ panel found in the NVIDIA Tegra Note 7 tablet.
+
+ To compile this driver as a module, choose M here: the module will
+ be called panel-lg-ld070wx3.
+
config DRM_PANEL_LG_LG4573
tristate "LG4573 RGB/SPI panel"
depends on OF && SPI
@@ -788,6 +814,19 @@ config DRM_PANEL_SAMSUNG_S6D7AA0
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E3FC2X01
+ tristate "Samsung S6E3FC2X01 DSI panel controller"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y or M here if you want to enable support for the
+ Samsung S6E3FC2 DDIC and connected MIPI DSI panel.
+ Currently supported panels:
+
+ Samsung AMS641RW (found in the OnePlus 6T smartphone)
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
@@ -843,17 +882,29 @@ config DRM_PANEL_SAMSUNG_S6E8AA0
select DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01
+ tristate "Samsung AMS561RA01 panel with S6E8AA5X01 controller"
+ depends on GPIOLIB && OF && REGULATOR
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Samsung AMS561RA01
+ panel, which uses Samsung's S6E8AA5X01 controller. The panel has a
+ ~5.6 inch AMOLED display, and the controller is driven by the MIPI
+ DSI protocol with 4 lanes.
+
config DRM_PANEL_SAMSUNG_SOFEF00
- tristate "Samsung sofef00/s6e3fc2x01 OnePlus 6/6T DSI cmd mode panels"
+ tristate "Samsung SOFEF00 DSI panel controller"
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
select VIDEOMODE_HELPERS
help
Say Y or M here if you want to enable support for the Samsung AMOLED
- command mode panels found in the OnePlus 6/6T smartphones.
+ panel SOFEF00 DDIC and connected panel.
+ Currently supported panels:
- The panels are 2280x1080@60Hz and 2340x1080@60Hz respectively
+ Samsung AMS628NW01 (found in OnePlus 6, 1080x2280@60Hz)
config DRM_PANEL_SEIKO_43WVF1G
tristate "Seiko 43WVF1G panel"
@@ -864,6 +915,21 @@ config DRM_PANEL_SEIKO_43WVF1G
Say Y here if you want to enable support for the Seiko
43WVF1G controller for 800x480 LCD panels
+config DRM_PANEL_SHARP_LQ079L1SX01
+ tristate "Sharp LQ079L1SX01 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable support for Sharp LQ079L1SX01
+ TFT-LCD modules. The panel has a 2560x1600 resolution and uses
+ 24 bit RGB per pixel. It provides a dual MIPI DSI interface to
+ the host.
+
+ To compile this driver as a module, choose M here: the module
+ will be called panel-sharp-lq079l1sx01.
+
config DRM_PANEL_SHARP_LQ101R1SX01
tristate "Sharp LQ101R1SX01 panel"
depends on OF
@@ -971,7 +1037,7 @@ config DRM_PANEL_STARTEK_KD070FHFID015
depends on BACKLIGHT_CLASS_DEVICE
help
Say Y here if you want to enable support for STARTEK KD070FHFID015 DSI panel
- based on RENESAS-R69429 controller. The pannel is a 7-inch TFT LCD display
+ based on RENESAS-R69429 controller. The panel is a 7-inch TFT LCD display
with a resolution of 1024 x 600 pixels. It provides a MIPI DSI interface to
the host, a built-in LED backlight and touch controller.
@@ -1021,6 +1087,17 @@ config DRM_PANEL_SYNAPTICS_R63353
Say Y if you want to enable support for panels based on the
Synaptics R63353 controller.
+config DRM_PANEL_SYNAPTICS_TDDI
+ tristate "Synaptics TDDI display panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for the Synaptics TDDI display
+ panels. There are multiple MIPI DSI panels manufactured under the TDDI
+ namesake, with varying resolutions and data lanes. They also have a
+ built-in LED backlight and a touch controller.
+
config DRM_PANEL_TDO_TL070WSH30
tristate "TDO TL070WSH30 DSI panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 957555b49996..b9562a6fdcb3 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112B) += panel-himax-hx83112b.o
obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
+obj-$(CONFIG_DRM_PANEL_HYDIS_HV101HD1) += panel-hydis-hv101hd1.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9805) += panel-ilitek-ili9805.o
@@ -40,6 +41,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LINCOLNTECH_LCD197) += panel-lincolntech-lcd197.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
+obj-$(CONFIG_DRM_PANEL_LG_LD070WX3) += panel-lg-ld070wx3.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o
obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o
@@ -78,6 +80,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0) += panel-samsung-s6d7aa0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FA7) += panel-samsung-s6e3fa7.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3FC2X01) += panel-samsung-s6e3fc2x01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA8) += panel-samsung-s6e3ha8.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
@@ -87,8 +90,10 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0_DSI) += panel-samsung-s6e63m0-dsi.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS427AP24) += panel-samsung-s6e88a0-ams427ap24.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA5X01_AMS561RA01) += panel-samsung-s6e8aa5x01-ams561ra01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_SOFEF00) += panel-samsung-sofef00.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
+obj-$(CONFIG_DRM_PANEL_SHARP_LQ079L1SX01) += panel-sharp-lq079l1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
@@ -98,6 +103,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
obj-$(CONFIG_DRM_PANEL_SUMMIT) += panel-summit.o
obj-$(CONFIG_DRM_PANEL_SYNAPTICS_R63353) += panel-synaptics-r63353.o
+obj-$(CONFIG_DRM_PANEL_SYNAPTICS_TDDI) += panel-synaptics-tddi.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_PANEL_SONY_TD4353_JDI) += panel-sony-td4353-jdi.o
obj-$(CONFIG_DRM_PANEL_SONY_TULIP_TRULY_NT35521) += panel-sony-tulip-truly-nt35521.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 9a56e208cbdd..415b894890ad 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1736,10 +1736,11 @@ static const struct panel_delay delay_200_500_e50 = {
.enable = 50,
};
-static const struct panel_delay delay_200_500_e50_p2e200 = {
+static const struct panel_delay delay_200_500_e50_d50_p2e200 = {
.hpd_absent = 200,
.unprepare = 500,
.enable = 50,
+ .disable = 50,
.prepare_to_enable = 200,
};
@@ -1795,6 +1796,13 @@ static const struct panel_delay delay_200_500_e200_d10 = {
.disable = 10,
};
+static const struct panel_delay delay_200_500_e200_d50 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 200,
+ .disable = 50,
+};
+
static const struct panel_delay delay_200_150_e200 = {
.hpd_absent = 200,
.unprepare = 150,
@@ -1828,6 +1836,20 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
.powered_on_to_enable = 335,
};
+static const struct panel_delay delay_200_500_e50_d100 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+ .disable = 100,
+};
+
+static const struct panel_delay delay_80_500_e50_d50 = {
+ .hpd_absent = 80,
+ .unprepare = 500,
+ .enable = 50,
+ .disable = 50,
+};
+
#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
{ \
.ident = { \
@@ -1857,6 +1879,7 @@ static const struct panel_delay delay_50_500_e200_d200_po2e335 = {
* Sort first by vendor, then by product ID.
*/
static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x04a4, &delay_200_500_e50, "B122UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x105c, &delay_200_500_e50, "B116XTN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1062, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x125c, &delay_200_500_e50, "Unknown"),
@@ -1865,6 +1888,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1e9b, &delay_200_500_e50, "B133UAN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x1ea5, &delay_200_500_e50, "B116XAK01.6"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x203d, &delay_200_500_e50, "B140HTN02.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x205c, &delay_200_500_e50, "B116XAN02.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x208d, &delay_200_500_e50, "B140HTN02.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
@@ -1875,6 +1899,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
&auo_b116xa3_mode),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x435c, &delay_200_500_e50, "Unknown"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x52b0, &delay_200_500_e50, "B116XAK02.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x582d, &delay_200_500_e50, "B133UAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x635c, &delay_200_500_e50, "B116XAN06.3"),
@@ -1882,10 +1907,13 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x723c, &delay_200_500_e50, "B140XTN07.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x73aa, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x8594, &delay_200_500_e50, "B133UAN01.0"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x8bba, &delay_200_500_e50, "B140UAN08.5"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa199, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xa7b3, &delay_200_500_e50, "B140UAN04.4"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xb7a9, &delay_200_500_e50, "B140HAK03.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc4b4, &delay_200_500_e50, "B116XAT04.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xc9a8, &delay_200_500_e50, "B140QAN08.H"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0xcdba, &delay_200_500_e50, "B140UAX01.2"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xd497, &delay_200_500_e50, "B120XAN01.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0xf390, &delay_200_500_e50, "B140XTN07.7"),
@@ -1934,21 +1962,27 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x09dd, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a1b, &delay_200_500_e50, "NV133WUM-N63"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80, "NV116WHM-N49"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a6a, &delay_200_500_e80, "NV140WUM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a84, &delay_200_500_e50, "NV133WUM-T01"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"),
- EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80_d50, "NV122WUM-N41"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b66, &delay_200_500_e80, "NE140WUM-N6G"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c20, &delay_200_500_e80, "NT140FHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0c93, &delay_200_500_e200, "Unknown"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cb6, &delay_200_500_e200, "NT116WHM-N44"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf2, &delay_200_500_e200, "NV156FHM-N4S"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cf6, &delay_200_500_e200, "NV140WUM-N64"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0cfa, &delay_200_500_e50, "NV116WHM-A4D"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d45, &delay_200_500_e80, "NV116WHM-N4B"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0d73, &delay_200_500_e80, "NE140WUM-N6S"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ddf, &delay_200_500_e80, "NV116WHM-T01"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1130, &delay_200_500_e50, "N116BGE-EB2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1132, &delay_200_500_e80_d50, "N116BGE-EA2"),
@@ -1966,27 +2000,40 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115b, &delay_200_500_e80_d50, "N116BCN-EB1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115d, &delay_200_500_e80_d50, "N116BCA-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x115f, &delay_200_500_e80_d50, "N116BCL-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x124c, &delay_200_500_e80_d50, "N122JCA-ENK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x148f, &delay_200_500_e80, "N140HCA-EAC"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x1565, &delay_200_500_e80, "N156HCA-EAB"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x7402, &delay_200_500_e200_d50, "N116BCA-EAK"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
- EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_d50_p2e200, "MNC207QS1-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_d50_p2e200, "MNE007JA1-2"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"),
- EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x144b, &delay_200_500_e80, "MNE001BS1-4"),
EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"),
+ EDP_PANEL_ENTRY('C', 'S', 'W', 0x1519, &delay_200_500_e80_d50, "MNF601BS1-3"),
EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"),
@@ -2007,6 +2054,8 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1212, &delay_200_500_e50, "KD116N0930A16"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1707, &delay_200_150_e50, "KD116N2130B12"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0110, &delay_200_500_e50, "KD116N3730A07"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x0397, &delay_200_500_e50, "KD116N3730A12"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
@@ -2027,12 +2076,16 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &delay_80_500_e50, "LQ140M1JW46"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x153a, &delay_200_500_e50, "LQ140T1JH01"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+ EDP_PANEL_ENTRY('S', 'H', 'P', 0x158f, &delay_200_500_p2e100, "LQ134Z1"),
EDP_PANEL_ENTRY('S', 'H', 'P', 0x1593, &delay_200_500_p2e100, "LQ134N1"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0009, &delay_200_500_e250, "116QHD024002"),
EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"),
+ EDP_PANEL_ENTRY('T', 'M', 'A', 0x0811, &delay_200_500_e80_d50, "TM140VDXP01-04"),
+ EDP_PANEL_ENTRY('T', 'M', 'A', 0x2094, &delay_200_500_e50_d100, "TL140VDMS03-01"),
+
{ /* sentinal */ }
};
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8279.c b/drivers/gpu/drm/panel/panel-himax-hx8279.c
index fb302d1f91b9..9e443c719843 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8279.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8279.c
@@ -935,7 +935,7 @@ static int hx8279_check_dig_gamma(struct hx8279 *hx, struct device *dev, const u
j++;
x++;
} while (x < 4);
- };
+ }
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c b/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c
new file mode 100644
index 000000000000..46426c388932
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-hydis-hv101hd1.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct hv101hd1 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+};
+
+static const struct regulator_bulk_data hv101hd1_supplies[] = {
+ { .supply = "vdd" },
+ { .supply = "vio" },
+};
+
+static inline struct hv101hd1 *to_hv101hd1(struct drm_panel *panel)
+{
+ return container_of(panel, struct hv101hd1, panel);
+}
+
+static int hv101hd1_prepare(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = hv->dsi };
+ struct device *dev = &hv->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(hv101hd1_supplies), hv->supplies);
+ if (ret) {
+ dev_err(dev, "error enabling regulators (%d)\n", ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ return 0;
+}
+
+static int hv101hd1_disable(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = hv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 120);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ return 0;
+}
+
+static int hv101hd1_unprepare(struct drm_panel *panel)
+{
+ struct hv101hd1 *hv = to_hv101hd1(panel);
+
+ return regulator_bulk_disable(ARRAY_SIZE(hv101hd1_supplies),
+ hv->supplies);
+}
+
+static const struct drm_display_mode hv101hd1_mode = {
+ .clock = (1366 + 74 + 36 + 24) * (768 + 21 + 7 + 4) * 60 / 1000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 74,
+ .hsync_end = 1366 + 74 + 36,
+ .htotal = 1366 + 74 + 36 + 24,
+ .vdisplay = 768,
+ .vsync_start = 768 + 21,
+ .vsync_end = 768 + 21 + 7,
+ .vtotal = 768 + 21 + 7 + 4,
+ .width_mm = 140,
+ .height_mm = 220,
+};
+
+static int hv101hd1_get_modes(struct drm_panel *panel, struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &hv101hd1_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs hv101hd1_panel_funcs = {
+ .prepare = hv101hd1_prepare,
+ .disable = hv101hd1_disable,
+ .unprepare = hv101hd1_unprepare,
+ .get_modes = hv101hd1_get_modes,
+};
+
+static int hv101hd1_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hv101hd1 *hv;
+ int ret;
+
+ hv = devm_drm_panel_alloc(dev, struct hv101hd1, panel,
+ &hv101hd1_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(hv))
+ return PTR_ERR(hv);
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(hv101hd1_supplies),
+ hv101hd1_supplies, &hv->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ hv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, hv);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&hv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&hv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&hv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void hv101hd1_remove(struct mipi_dsi_device *dsi)
+{
+ struct hv101hd1 *hv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev,
+ "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&hv->panel);
+}
+
+static const struct of_device_id hv101hd1_of_match[] = {
+ { .compatible = "hydis,hv101hd1" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hv101hd1_of_match);
+
+static struct mipi_dsi_driver hv101hd1_driver = {
+ .driver = {
+ .name = "panel-hv101hd1",
+ .of_match_table = hv101hd1_of_match,
+ },
+ .probe = hv101hd1_probe,
+ .remove = hv101hd1_remove,
+};
+module_mipi_dsi_driver(hv101hd1_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for Hydis HV101HD1 panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index ac433345a179..947b47841b01 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -100,7 +100,7 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x13, 0x00),
ILI9881C_COMMAND_INSTR(0x14, 0x00),
ILI9881C_COMMAND_INSTR(0x15, 0x00),
- ILI9881C_COMMAND_INSTR(0x16, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x16, 0x0c),
ILI9881C_COMMAND_INSTR(0x17, 0x00),
ILI9881C_COMMAND_INSTR(0x18, 0x00),
ILI9881C_COMMAND_INSTR(0x19, 0x00),
@@ -108,7 +108,7 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x1b, 0x00),
ILI9881C_COMMAND_INSTR(0x1c, 0x00),
ILI9881C_COMMAND_INSTR(0x1d, 0x00),
- ILI9881C_COMMAND_INSTR(0x1e, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
ILI9881C_COMMAND_INSTR(0x1f, 0x80),
ILI9881C_COMMAND_INSTR(0x20, 0x04),
ILI9881C_COMMAND_INSTR(0x21, 0x01),
@@ -134,7 +134,7 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x35, 0x00),
ILI9881C_COMMAND_INSTR(0x36, 0x00),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
- ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_COMMAND_INSTR(0x3a, 0x00),
ILI9881C_COMMAND_INSTR(0x3b, 0x00),
@@ -173,11 +173,11 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x67, 0x02),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
ILI9881C_COMMAND_INSTR(0x69, 0x02),
- ILI9881C_COMMAND_INSTR(0x6a, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x0c),
ILI9881C_COMMAND_INSTR(0x6b, 0x02),
- ILI9881C_COMMAND_INSTR(0x6c, 0x0F),
- ILI9881C_COMMAND_INSTR(0x6d, 0x0E),
- ILI9881C_COMMAND_INSTR(0x6e, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x0d),
ILI9881C_COMMAND_INSTR(0x6f, 0x06),
ILI9881C_COMMAND_INSTR(0x70, 0x07),
ILI9881C_COMMAND_INSTR(0x71, 0x02),
@@ -195,74 +195,74 @@ static const struct ili9881c_instr lhr050h41_init[] = {
ILI9881C_COMMAND_INSTR(0x7d, 0x02),
ILI9881C_COMMAND_INSTR(0x7e, 0x02),
ILI9881C_COMMAND_INSTR(0x7f, 0x02),
- ILI9881C_COMMAND_INSTR(0x80, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x80, 0x0c),
ILI9881C_COMMAND_INSTR(0x81, 0x02),
- ILI9881C_COMMAND_INSTR(0x82, 0x0F),
- ILI9881C_COMMAND_INSTR(0x83, 0x0E),
- ILI9881C_COMMAND_INSTR(0x84, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x82, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x83, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x84, 0x0d),
ILI9881C_COMMAND_INSTR(0x85, 0x06),
ILI9881C_COMMAND_INSTR(0x86, 0x07),
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
- ILI9881C_COMMAND_INSTR(0x6C, 0x15),
- ILI9881C_COMMAND_INSTR(0x6E, 0x22),
- ILI9881C_COMMAND_INSTR(0x6F, 0x33),
- ILI9881C_COMMAND_INSTR(0x3A, 0xA4),
- ILI9881C_COMMAND_INSTR(0x8D, 0x0D),
- ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x22),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3a, 0xa4),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
ILI9881C_COMMAND_INSTR(0x26, 0x76),
- ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
- ILI9881C_COMMAND_INSTR(0x53, 0xDC),
- ILI9881C_COMMAND_INSTR(0x55, 0xA7),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x53, 0xdc),
+ ILI9881C_COMMAND_INSTR(0x55, 0xa7),
ILI9881C_COMMAND_INSTR(0x50, 0x78),
ILI9881C_COMMAND_INSTR(0x51, 0x78),
ILI9881C_COMMAND_INSTR(0x31, 0x02),
ILI9881C_COMMAND_INSTR(0x60, 0x14),
- ILI9881C_COMMAND_INSTR(0xA0, 0x2A),
- ILI9881C_COMMAND_INSTR(0xA1, 0x39),
- ILI9881C_COMMAND_INSTR(0xA2, 0x46),
- ILI9881C_COMMAND_INSTR(0xA3, 0x0e),
- ILI9881C_COMMAND_INSTR(0xA4, 0x12),
- ILI9881C_COMMAND_INSTR(0xA5, 0x25),
- ILI9881C_COMMAND_INSTR(0xA6, 0x19),
- ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
- ILI9881C_COMMAND_INSTR(0xA8, 0xa6),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
- ILI9881C_COMMAND_INSTR(0xAA, 0x29),
- ILI9881C_COMMAND_INSTR(0xAB, 0x85),
- ILI9881C_COMMAND_INSTR(0xAC, 0x1C),
- ILI9881C_COMMAND_INSTR(0xAD, 0x1B),
- ILI9881C_COMMAND_INSTR(0xAE, 0x51),
- ILI9881C_COMMAND_INSTR(0xAF, 0x22),
- ILI9881C_COMMAND_INSTR(0xB0, 0x2d),
- ILI9881C_COMMAND_INSTR(0xB1, 0x4f),
- ILI9881C_COMMAND_INSTR(0xB2, 0x59),
- ILI9881C_COMMAND_INSTR(0xB3, 0x3F),
- ILI9881C_COMMAND_INSTR(0xC0, 0x2A),
- ILI9881C_COMMAND_INSTR(0xC1, 0x3a),
- ILI9881C_COMMAND_INSTR(0xC2, 0x45),
- ILI9881C_COMMAND_INSTR(0xC3, 0x0e),
- ILI9881C_COMMAND_INSTR(0xC4, 0x11),
- ILI9881C_COMMAND_INSTR(0xC5, 0x24),
- ILI9881C_COMMAND_INSTR(0xC6, 0x1a),
- ILI9881C_COMMAND_INSTR(0xC7, 0x1c),
- ILI9881C_COMMAND_INSTR(0xC8, 0xaa),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
- ILI9881C_COMMAND_INSTR(0xCA, 0x29),
- ILI9881C_COMMAND_INSTR(0xCB, 0x96),
- ILI9881C_COMMAND_INSTR(0xCC, 0x1C),
- ILI9881C_COMMAND_INSTR(0xCD, 0x1B),
- ILI9881C_COMMAND_INSTR(0xCE, 0x51),
- ILI9881C_COMMAND_INSTR(0xCF, 0x22),
- ILI9881C_COMMAND_INSTR(0xD0, 0x2b),
- ILI9881C_COMMAND_INSTR(0xD1, 0x4b),
- ILI9881C_COMMAND_INSTR(0xD2, 0x59),
- ILI9881C_COMMAND_INSTR(0xD3, 0x3F),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x39),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x46),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x12),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x25),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x19),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa8, 0xa6),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x85),
+ ILI9881C_COMMAND_INSTR(0xac, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xad, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xae, 0x51),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x22),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x2d),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x4f),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x3f),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x3a),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x45),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x0e),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x11),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x24),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc8, 0xaa),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x96),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xce, 0x51),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x22),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x3f),
};
static const struct ili9881c_instr k101_im2byl02_init[] = {
@@ -276,12 +276,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x07, 0x00),
ILI9881C_COMMAND_INSTR(0x08, 0x00),
ILI9881C_COMMAND_INSTR(0x09, 0x00),
- ILI9881C_COMMAND_INSTR(0x0A, 0x01),
- ILI9881C_COMMAND_INSTR(0x0B, 0x01),
- ILI9881C_COMMAND_INSTR(0x0C, 0x00),
- ILI9881C_COMMAND_INSTR(0x0D, 0x01),
- ILI9881C_COMMAND_INSTR(0x0E, 0x01),
- ILI9881C_COMMAND_INSTR(0x0F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
ILI9881C_COMMAND_INSTR(0x10, 0x00),
ILI9881C_COMMAND_INSTR(0x11, 0x00),
ILI9881C_COMMAND_INSTR(0x12, 0x00),
@@ -292,12 +292,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x17, 0x00),
ILI9881C_COMMAND_INSTR(0x18, 0x00),
ILI9881C_COMMAND_INSTR(0x19, 0x00),
- ILI9881C_COMMAND_INSTR(0x1A, 0x00),
- ILI9881C_COMMAND_INSTR(0x1B, 0x00),
- ILI9881C_COMMAND_INSTR(0x1C, 0x00),
- ILI9881C_COMMAND_INSTR(0x1D, 0x00),
- ILI9881C_COMMAND_INSTR(0x1E, 0x40),
- ILI9881C_COMMAND_INSTR(0x1F, 0xC0),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0xc0),
ILI9881C_COMMAND_INSTR(0x20, 0x06),
ILI9881C_COMMAND_INSTR(0x21, 0x01),
ILI9881C_COMMAND_INSTR(0x22, 0x06),
@@ -306,14 +306,14 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x25, 0x88),
ILI9881C_COMMAND_INSTR(0x26, 0x00),
ILI9881C_COMMAND_INSTR(0x27, 0x00),
- ILI9881C_COMMAND_INSTR(0x28, 0x3B),
+ ILI9881C_COMMAND_INSTR(0x28, 0x3b),
ILI9881C_COMMAND_INSTR(0x29, 0x03),
- ILI9881C_COMMAND_INSTR(0x2A, 0x00),
- ILI9881C_COMMAND_INSTR(0x2B, 0x00),
- ILI9881C_COMMAND_INSTR(0x2C, 0x00),
- ILI9881C_COMMAND_INSTR(0x2D, 0x00),
- ILI9881C_COMMAND_INSTR(0x2E, 0x00),
- ILI9881C_COMMAND_INSTR(0x2F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
ILI9881C_COMMAND_INSTR(0x30, 0x00),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x32, 0x00),
@@ -324,12 +324,12 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x37, 0x00),
ILI9881C_COMMAND_INSTR(0x38, 0x00),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
- ILI9881C_COMMAND_INSTR(0x3A, 0x00),
- ILI9881C_COMMAND_INSTR(0x3B, 0x00),
- ILI9881C_COMMAND_INSTR(0x3C, 0x00),
- ILI9881C_COMMAND_INSTR(0x3D, 0x00),
- ILI9881C_COMMAND_INSTR(0x3E, 0x00),
- ILI9881C_COMMAND_INSTR(0x3F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
ILI9881C_COMMAND_INSTR(0x40, 0x00),
ILI9881C_COMMAND_INSTR(0x41, 0x00),
ILI9881C_COMMAND_INSTR(0x42, 0x00),
@@ -340,17 +340,17 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x52, 0x45),
ILI9881C_COMMAND_INSTR(0x53, 0x67),
ILI9881C_COMMAND_INSTR(0x54, 0x89),
- ILI9881C_COMMAND_INSTR(0x55, 0xAB),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
ILI9881C_COMMAND_INSTR(0x56, 0x01),
ILI9881C_COMMAND_INSTR(0x57, 0x23),
ILI9881C_COMMAND_INSTR(0x58, 0x45),
ILI9881C_COMMAND_INSTR(0x59, 0x67),
- ILI9881C_COMMAND_INSTR(0x5A, 0x89),
- ILI9881C_COMMAND_INSTR(0x5B, 0xAB),
- ILI9881C_COMMAND_INSTR(0x5C, 0xCD),
- ILI9881C_COMMAND_INSTR(0x5D, 0xEF),
- ILI9881C_COMMAND_INSTR(0x5E, 0x00),
- ILI9881C_COMMAND_INSTR(0x5F, 0x01),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x01),
ILI9881C_COMMAND_INSTR(0x60, 0x01),
ILI9881C_COMMAND_INSTR(0x61, 0x06),
ILI9881C_COMMAND_INSTR(0x62, 0x06),
@@ -361,101 +361,101 @@ static const struct ili9881c_instr k101_im2byl02_init[] = {
ILI9881C_COMMAND_INSTR(0x67, 0x02),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
ILI9881C_COMMAND_INSTR(0x69, 0x05),
- ILI9881C_COMMAND_INSTR(0x6A, 0x05),
- ILI9881C_COMMAND_INSTR(0x6B, 0x02),
- ILI9881C_COMMAND_INSTR(0x6C, 0x0D),
- ILI9881C_COMMAND_INSTR(0x6D, 0x0D),
- ILI9881C_COMMAND_INSTR(0x6E, 0x0C),
- ILI9881C_COMMAND_INSTR(0x6F, 0x0C),
- ILI9881C_COMMAND_INSTR(0x70, 0x0F),
- ILI9881C_COMMAND_INSTR(0x71, 0x0F),
- ILI9881C_COMMAND_INSTR(0x72, 0x0E),
- ILI9881C_COMMAND_INSTR(0x73, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x05),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x70, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x71, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x72, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x73, 0x0e),
ILI9881C_COMMAND_INSTR(0x74, 0x02),
ILI9881C_COMMAND_INSTR(0x75, 0x01),
ILI9881C_COMMAND_INSTR(0x76, 0x01),
ILI9881C_COMMAND_INSTR(0x77, 0x06),
ILI9881C_COMMAND_INSTR(0x78, 0x06),
ILI9881C_COMMAND_INSTR(0x79, 0x07),
- ILI9881C_COMMAND_INSTR(0x7A, 0x07),
- ILI9881C_COMMAND_INSTR(0x7B, 0x00),
- ILI9881C_COMMAND_INSTR(0x7C, 0x00),
- ILI9881C_COMMAND_INSTR(0x7D, 0x02),
- ILI9881C_COMMAND_INSTR(0x7E, 0x02),
- ILI9881C_COMMAND_INSTR(0x7F, 0x05),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x05),
ILI9881C_COMMAND_INSTR(0x80, 0x05),
ILI9881C_COMMAND_INSTR(0x81, 0x02),
- ILI9881C_COMMAND_INSTR(0x82, 0x0D),
- ILI9881C_COMMAND_INSTR(0x83, 0x0D),
- ILI9881C_COMMAND_INSTR(0x84, 0x0C),
- ILI9881C_COMMAND_INSTR(0x85, 0x0C),
- ILI9881C_COMMAND_INSTR(0x86, 0x0F),
- ILI9881C_COMMAND_INSTR(0x87, 0x0F),
- ILI9881C_COMMAND_INSTR(0x88, 0x0E),
- ILI9881C_COMMAND_INSTR(0x89, 0x0E),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x83, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x84, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x85, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x86, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x87, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x88, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x89, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
- ILI9881C_COMMAND_INSTR(0x3B, 0xC0), /* ILI4003D sel */
- ILI9881C_COMMAND_INSTR(0x6C, 0x15), /* Set VCORE voltage = 1.5V */
- ILI9881C_COMMAND_INSTR(0x6E, 0x2A), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */
- ILI9881C_COMMAND_INSTR(0x6F, 0x33), /* pumping ratio VGH=5x VGL=-3x */
- ILI9881C_COMMAND_INSTR(0x8D, 0x1B), /* VGL clamp -10V */
- ILI9881C_COMMAND_INSTR(0x87, 0xBA), /* ESD */
- ILI9881C_COMMAND_INSTR(0x3A, 0x24), /* POWER SAVING */
+ ILI9881C_COMMAND_INSTR(0x3b, 0xc0), /* ILI4003D sel */
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15), /* Set VCORE voltage = 1.5V */
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a), /* di_pwr_reg=0 for power mode 2A, VGH clamp 18V */
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33), /* pumping ratio VGH=5x VGL=-3x */
+ ILI9881C_COMMAND_INSTR(0x8d, 0x1b), /* VGL clamp -10V */
+ ILI9881C_COMMAND_INSTR(0x87, 0xba), /* ESD */
+ ILI9881C_COMMAND_INSTR(0x3a, 0x24), /* POWER SAVING */
ILI9881C_COMMAND_INSTR(0x26, 0x76),
- ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A), /* BGR, SS */
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a), /* BGR, SS */
ILI9881C_COMMAND_INSTR(0x31, 0x00), /* Zigzag type3 inversion */
ILI9881C_COMMAND_INSTR(0x40, 0x53), /* ILI4003D sel */
ILI9881C_COMMAND_INSTR(0x43, 0x66),
- ILI9881C_COMMAND_INSTR(0x53, 0x4C),
+ ILI9881C_COMMAND_INSTR(0x53, 0x4c),
ILI9881C_COMMAND_INSTR(0x50, 0x87),
ILI9881C_COMMAND_INSTR(0x51, 0x82),
ILI9881C_COMMAND_INSTR(0x60, 0x15),
ILI9881C_COMMAND_INSTR(0x61, 0x01),
- ILI9881C_COMMAND_INSTR(0x62, 0x0C),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0c),
ILI9881C_COMMAND_INSTR(0x63, 0x00),
- ILI9881C_COMMAND_INSTR(0xA0, 0x00),
- ILI9881C_COMMAND_INSTR(0xA1, 0x13), /* VP251 */
- ILI9881C_COMMAND_INSTR(0xA2, 0x23), /* VP247 */
- ILI9881C_COMMAND_INSTR(0xA3, 0x14), /* VP243 */
- ILI9881C_COMMAND_INSTR(0xA4, 0x16), /* VP239 */
- ILI9881C_COMMAND_INSTR(0xA5, 0x29), /* VP231 */
- ILI9881C_COMMAND_INSTR(0xA6, 0x1E), /* VP219 */
- ILI9881C_COMMAND_INSTR(0xA7, 0x1D), /* VP203 */
- ILI9881C_COMMAND_INSTR(0xA8, 0x86), /* VP175 */
- ILI9881C_COMMAND_INSTR(0xA9, 0x1E), /* VP144 */
- ILI9881C_COMMAND_INSTR(0xAA, 0x29), /* VP111 */
- ILI9881C_COMMAND_INSTR(0xAB, 0x74), /* VP80 */
- ILI9881C_COMMAND_INSTR(0xAC, 0x19), /* VP52 */
- ILI9881C_COMMAND_INSTR(0xAD, 0x17), /* VP36 */
- ILI9881C_COMMAND_INSTR(0xAE, 0x4B), /* VP24 */
- ILI9881C_COMMAND_INSTR(0xAF, 0x20), /* VP16 */
- ILI9881C_COMMAND_INSTR(0xB0, 0x26), /* VP12 */
- ILI9881C_COMMAND_INSTR(0xB1, 0x4C), /* VP8 */
- ILI9881C_COMMAND_INSTR(0xB2, 0x5D), /* VP4 */
- ILI9881C_COMMAND_INSTR(0xB3, 0x3F), /* VP0 */
- ILI9881C_COMMAND_INSTR(0xC0, 0x00), /* VN255 GAMMA N */
- ILI9881C_COMMAND_INSTR(0xC1, 0x13), /* VN251 */
- ILI9881C_COMMAND_INSTR(0xC2, 0x23), /* VN247 */
- ILI9881C_COMMAND_INSTR(0xC3, 0x14), /* VN243 */
- ILI9881C_COMMAND_INSTR(0xC4, 0x16), /* VN239 */
- ILI9881C_COMMAND_INSTR(0xC5, 0x29), /* VN231 */
- ILI9881C_COMMAND_INSTR(0xC6, 0x1E), /* VN219 */
- ILI9881C_COMMAND_INSTR(0xC7, 0x1D), /* VN203 */
- ILI9881C_COMMAND_INSTR(0xC8, 0x86), /* VN175 */
- ILI9881C_COMMAND_INSTR(0xC9, 0x1E), /* VN144 */
- ILI9881C_COMMAND_INSTR(0xCA, 0x29), /* VN111 */
- ILI9881C_COMMAND_INSTR(0xCB, 0x74), /* VN80 */
- ILI9881C_COMMAND_INSTR(0xCC, 0x19), /* VN52 */
- ILI9881C_COMMAND_INSTR(0xCD, 0x17), /* VN36 */
- ILI9881C_COMMAND_INSTR(0xCE, 0x4B), /* VN24 */
- ILI9881C_COMMAND_INSTR(0xCF, 0x20), /* VN16 */
- ILI9881C_COMMAND_INSTR(0xD0, 0x26), /* VN12 */
- ILI9881C_COMMAND_INSTR(0xD1, 0x4C), /* VN8 */
- ILI9881C_COMMAND_INSTR(0xD2, 0x5D), /* VN4 */
- ILI9881C_COMMAND_INSTR(0xD3, 0x3F), /* VN0 */
+ ILI9881C_COMMAND_INSTR(0xa0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x13), /* VP251 */
+ ILI9881C_COMMAND_INSTR(0xa2, 0x23), /* VP247 */
+ ILI9881C_COMMAND_INSTR(0xa3, 0x14), /* VP243 */
+ ILI9881C_COMMAND_INSTR(0xa4, 0x16), /* VP239 */
+ ILI9881C_COMMAND_INSTR(0xa5, 0x29), /* VP231 */
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1e), /* VP219 */
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d), /* VP203 */
+ ILI9881C_COMMAND_INSTR(0xa8, 0x86), /* VP175 */
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1e), /* VP144 */
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29), /* VP111 */
+ ILI9881C_COMMAND_INSTR(0xab, 0x74), /* VP80 */
+ ILI9881C_COMMAND_INSTR(0xac, 0x19), /* VP52 */
+ ILI9881C_COMMAND_INSTR(0xad, 0x17), /* VP36 */
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b), /* VP24 */
+ ILI9881C_COMMAND_INSTR(0xaf, 0x20), /* VP16 */
+ ILI9881C_COMMAND_INSTR(0xb0, 0x26), /* VP12 */
+ ILI9881C_COMMAND_INSTR(0xb1, 0x4c), /* VP8 */
+ ILI9881C_COMMAND_INSTR(0xb2, 0x5d), /* VP4 */
+ ILI9881C_COMMAND_INSTR(0xb3, 0x3f), /* VP0 */
+ ILI9881C_COMMAND_INSTR(0xc0, 0x00), /* VN255 GAMMA N */
+ ILI9881C_COMMAND_INSTR(0xc1, 0x13), /* VN251 */
+ ILI9881C_COMMAND_INSTR(0xc2, 0x23), /* VN247 */
+ ILI9881C_COMMAND_INSTR(0xc3, 0x14), /* VN243 */
+ ILI9881C_COMMAND_INSTR(0xc4, 0x16), /* VN239 */
+ ILI9881C_COMMAND_INSTR(0xc5, 0x29), /* VN231 */
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1e), /* VN219 */
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1d), /* VN203 */
+ ILI9881C_COMMAND_INSTR(0xc8, 0x86), /* VN175 */
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1e), /* VN144 */
+ ILI9881C_COMMAND_INSTR(0xca, 0x29), /* VN111 */
+ ILI9881C_COMMAND_INSTR(0xcb, 0x74), /* VN80 */
+ ILI9881C_COMMAND_INSTR(0xcc, 0x19), /* VN52 */
+ ILI9881C_COMMAND_INSTR(0xcd, 0x17), /* VN36 */
+ ILI9881C_COMMAND_INSTR(0xce, 0x4b), /* VN24 */
+ ILI9881C_COMMAND_INSTR(0xcf, 0x20), /* VN16 */
+ ILI9881C_COMMAND_INSTR(0xd0, 0x26), /* VN12 */
+ ILI9881C_COMMAND_INSTR(0xd1, 0x4c), /* VN8 */
+ ILI9881C_COMMAND_INSTR(0xd2, 0x5d), /* VN4 */
+ ILI9881C_COMMAND_INSTR(0xd3, 0x3f), /* VN0 */
};
static const struct ili9881c_instr kd050hdfia020_init[] = {
@@ -517,7 +517,7 @@ static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_COMMAND_INSTR(0x35, 0x00),
ILI9881C_COMMAND_INSTR(0x36, 0x00),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
- ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_COMMAND_INSTR(0x3a, 0x40),
ILI9881C_COMMAND_INSTR(0x3b, 0x40),
@@ -549,10 +549,10 @@ static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_COMMAND_INSTR(0x60, 0x00),
ILI9881C_COMMAND_INSTR(0x61, 0x15),
ILI9881C_COMMAND_INSTR(0x62, 0x14),
- ILI9881C_COMMAND_INSTR(0x63, 0x0E),
- ILI9881C_COMMAND_INSTR(0x64, 0x0F),
- ILI9881C_COMMAND_INSTR(0x65, 0x0C),
- ILI9881C_COMMAND_INSTR(0x66, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x66, 0x0d),
ILI9881C_COMMAND_INSTR(0x67, 0x06),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
ILI9881C_COMMAND_INSTR(0x69, 0x07),
@@ -571,10 +571,10 @@ static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_COMMAND_INSTR(0x76, 0x00),
ILI9881C_COMMAND_INSTR(0x77, 0x14),
ILI9881C_COMMAND_INSTR(0x78, 0x15),
- ILI9881C_COMMAND_INSTR(0x79, 0x0E),
- ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
- ILI9881C_COMMAND_INSTR(0x7b, 0x0C),
- ILI9881C_COMMAND_INSTR(0x7c, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x0d),
ILI9881C_COMMAND_INSTR(0x7d, 0x06),
ILI9881C_COMMAND_INSTR(0x7e, 0x02),
ILI9881C_COMMAND_INSTR(0x7f, 0x07),
@@ -587,71 +587,71 @@ static const struct ili9881c_instr kd050hdfia020_init[] = {
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(0x4),
- ILI9881C_COMMAND_INSTR(0x6C, 0x15),
- ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
- ILI9881C_COMMAND_INSTR(0x6F, 0x33),
- ILI9881C_COMMAND_INSTR(0x3A, 0x94),
- ILI9881C_COMMAND_INSTR(0x8D, 0x15),
- ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x94),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x15),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
ILI9881C_COMMAND_INSTR(0x26, 0x76),
- ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
- ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_COMMAND_INSTR(0xb5, 0x06),
ILI9881C_SWITCH_PAGE_INSTR(0x1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x53, 0x90),
- ILI9881C_COMMAND_INSTR(0x55, 0xA2),
- ILI9881C_COMMAND_INSTR(0x50, 0xB7),
- ILI9881C_COMMAND_INSTR(0x51, 0xB7),
+ ILI9881C_COMMAND_INSTR(0x55, 0xa2),
+ ILI9881C_COMMAND_INSTR(0x50, 0xb7),
+ ILI9881C_COMMAND_INSTR(0x51, 0xb7),
ILI9881C_COMMAND_INSTR(0x60, 0x22),
ILI9881C_COMMAND_INSTR(0x61, 0x00),
ILI9881C_COMMAND_INSTR(0x62, 0x19),
ILI9881C_COMMAND_INSTR(0x63, 0x10),
- ILI9881C_COMMAND_INSTR(0xA0, 0x08),
- ILI9881C_COMMAND_INSTR(0xA1, 0x1A),
- ILI9881C_COMMAND_INSTR(0xA2, 0x27),
- ILI9881C_COMMAND_INSTR(0xA3, 0x15),
- ILI9881C_COMMAND_INSTR(0xA4, 0x17),
- ILI9881C_COMMAND_INSTR(0xA5, 0x2A),
- ILI9881C_COMMAND_INSTR(0xA6, 0x1E),
- ILI9881C_COMMAND_INSTR(0xA7, 0x1F),
- ILI9881C_COMMAND_INSTR(0xA8, 0x8B),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1B),
- ILI9881C_COMMAND_INSTR(0xAA, 0x27),
- ILI9881C_COMMAND_INSTR(0xAB, 0x78),
- ILI9881C_COMMAND_INSTR(0xAC, 0x18),
- ILI9881C_COMMAND_INSTR(0xAD, 0x18),
- ILI9881C_COMMAND_INSTR(0xAE, 0x4C),
- ILI9881C_COMMAND_INSTR(0xAF, 0x21),
- ILI9881C_COMMAND_INSTR(0xB0, 0x27),
- ILI9881C_COMMAND_INSTR(0xB1, 0x54),
- ILI9881C_COMMAND_INSTR(0xB2, 0x67),
- ILI9881C_COMMAND_INSTR(0xB3, 0x39),
- ILI9881C_COMMAND_INSTR(0xC0, 0x08),
- ILI9881C_COMMAND_INSTR(0xC1, 0x1A),
- ILI9881C_COMMAND_INSTR(0xC2, 0x27),
- ILI9881C_COMMAND_INSTR(0xC3, 0x15),
- ILI9881C_COMMAND_INSTR(0xC4, 0x17),
- ILI9881C_COMMAND_INSTR(0xC5, 0x2A),
- ILI9881C_COMMAND_INSTR(0xC6, 0x1E),
- ILI9881C_COMMAND_INSTR(0xC7, 0x1F),
- ILI9881C_COMMAND_INSTR(0xC8, 0x8B),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1B),
- ILI9881C_COMMAND_INSTR(0xCA, 0x27),
- ILI9881C_COMMAND_INSTR(0xCB, 0x78),
- ILI9881C_COMMAND_INSTR(0xCC, 0x18),
- ILI9881C_COMMAND_INSTR(0xCD, 0x18),
- ILI9881C_COMMAND_INSTR(0xCE, 0x4C),
- ILI9881C_COMMAND_INSTR(0xCF, 0x21),
- ILI9881C_COMMAND_INSTR(0xD0, 0x27),
- ILI9881C_COMMAND_INSTR(0xD1, 0x54),
- ILI9881C_COMMAND_INSTR(0xD2, 0x67),
- ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x27),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x17),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x8b),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x27),
+ ILI9881C_COMMAND_INSTR(0xab, 0x78),
+ ILI9881C_COMMAND_INSTR(0xac, 0x18),
+ ILI9881C_COMMAND_INSTR(0xad, 0x18),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4c),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x21),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x54),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x67),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x27),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x17),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x8b),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xca, 0x27),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x78),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x18),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x18),
+ ILI9881C_COMMAND_INSTR(0xce, 0x4c),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x21),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x54),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x67),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x39),
ILI9881C_SWITCH_PAGE_INSTR(0),
ILI9881C_COMMAND_INSTR(0x35, 0x00),
- ILI9881C_COMMAND_INSTR(0x3A, 0x7),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x7),
};
static const struct ili9881c_instr tl050hdv35_init[] = {
@@ -696,7 +696,7 @@ static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_COMMAND_INSTR(0x35, 0x00),
ILI9881C_COMMAND_INSTR(0x36, 0x00),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
- ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_COMMAND_INSTR(0x3a, 0x40),
ILI9881C_COMMAND_INSTR(0x3b, 0x40),
@@ -750,7 +750,7 @@ static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_COMMAND_INSTR(0x7f, 0x07),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
ILI9881C_COMMAND_INSTR(0x38, 0x01),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
@@ -820,6 +820,204 @@ static const struct ili9881c_instr tl050hdv35_init[] = {
ILI9881C_COMMAND_INSTR(0xd3, 0x39),
};
+static const struct ili9881c_instr w552946aaa_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x53),
+ ILI9881C_COMMAND_INSTR(0x04, 0x53),
+ ILI9881C_COMMAND_INSTR(0x05, 0x13),
+ ILI9881C_COMMAND_INSTR(0x06, 0x04),
+ ILI9881C_COMMAND_INSTR(0x07, 0x02),
+ ILI9881C_COMMAND_INSTR(0x08, 0x02),
+ ILI9881C_COMMAND_INSTR(0x09, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x10, 0x00),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x08),
+ ILI9881C_COMMAND_INSTR(0x16, 0x10),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x08),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x02),
+ ILI9881C_COMMAND_INSTR(0x21, 0x09),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x55),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x05),
+ ILI9881C_COMMAND_INSTR(0x36, 0x05),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
+ ILI9881C_COMMAND_INSTR(0x39, 0x35),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x88),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x1f),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x03),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x14),
+ ILI9881C_COMMAND_INSTR(0x60, 0x15),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x65, 0x10),
+ ILI9881C_COMMAND_INSTR(0x66, 0x11),
+ ILI9881C_COMMAND_INSTR(0x67, 0x08),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x06),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x14),
+ ILI9881C_COMMAND_INSTR(0x76, 0x15),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x11),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x10),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x80, 0x02),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x08),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x00, 0x80),
+ ILI9881C_COMMAND_INSTR(0x70, 0x00),
+ ILI9881C_COMMAND_INSTR(0x71, 0x00),
+ ILI9881C_COMMAND_INSTR(0x66, 0xfe),
+ ILI9881C_COMMAND_INSTR(0x82, 0x15),
+ ILI9881C_COMMAND_INSTR(0x84, 0x15),
+ ILI9881C_COMMAND_INSTR(0x85, 0x15),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x24),
+ ILI9881C_COMMAND_INSTR(0x32, 0xac),
+ ILI9881C_COMMAND_INSTR(0x8c, 0x80),
+ ILI9881C_COMMAND_INSTR(0x3c, 0xf5),
+ ILI9881C_COMMAND_INSTR(0x88, 0x33),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x78),
+ ILI9881C_COMMAND_INSTR(0x55, 0x7b),
+ ILI9881C_COMMAND_INSTR(0x60, 0x20),
+ ILI9881C_COMMAND_INSTR(0x61, 0x00),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x10),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x13),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x26),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x67),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x5b),
+ ILI9881C_COMMAND_INSTR(0xac, 0x26),
+ ILI9881C_COMMAND_INSTR(0xad, 0x28),
+ ILI9881C_COMMAND_INSTR(0xae, 0x5c),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x30),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x31),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x32),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x00),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x32),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x00),
+ ILI9881C_COMMAND_INSTR(0xb6, 0x02),
+ ILI9881C_COMMAND_INSTR(0xb7, 0x03),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x10),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x13),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x26),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x67),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x5b),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x26),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x28),
+ ILI9881C_COMMAND_INSTR(0xce, 0x5c),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x30),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x31),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x32),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x00),
+ ILI9881C_SWITCH_PAGE_INSTR(0),
+};
+
static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
@@ -831,12 +1029,12 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x07, 0x02),
ILI9881C_COMMAND_INSTR(0x08, 0x02),
ILI9881C_COMMAND_INSTR(0x09, 0x00),
- ILI9881C_COMMAND_INSTR(0x0A, 0x00),
- ILI9881C_COMMAND_INSTR(0x0B, 0x00),
- ILI9881C_COMMAND_INSTR(0x0C, 0x00),
- ILI9881C_COMMAND_INSTR(0x0D, 0x00),
- ILI9881C_COMMAND_INSTR(0x0E, 0x00),
- ILI9881C_COMMAND_INSTR(0x0F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
ILI9881C_COMMAND_INSTR(0x10, 0x00),
ILI9881C_COMMAND_INSTR(0x11, 0x00),
@@ -848,12 +1046,12 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x17, 0x00),
ILI9881C_COMMAND_INSTR(0x18, 0x08),
ILI9881C_COMMAND_INSTR(0x19, 0x00),
- ILI9881C_COMMAND_INSTR(0x1A, 0x00),
- ILI9881C_COMMAND_INSTR(0x1B, 0x00),
- ILI9881C_COMMAND_INSTR(0x1C, 0x00),
- ILI9881C_COMMAND_INSTR(0x1D, 0x00),
- ILI9881C_COMMAND_INSTR(0x1E, 0xC0),
- ILI9881C_COMMAND_INSTR(0x1F, 0x80),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
ILI9881C_COMMAND_INSTR(0x20, 0x02),
ILI9881C_COMMAND_INSTR(0x21, 0x09),
@@ -865,12 +1063,12 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x27, 0x00),
ILI9881C_COMMAND_INSTR(0x28, 0x55),
ILI9881C_COMMAND_INSTR(0x29, 0x03),
- ILI9881C_COMMAND_INSTR(0x2A, 0x00),
- ILI9881C_COMMAND_INSTR(0x2B, 0x00),
- ILI9881C_COMMAND_INSTR(0x2C, 0x00),
- ILI9881C_COMMAND_INSTR(0x2D, 0x00),
- ILI9881C_COMMAND_INSTR(0x2E, 0x00),
- ILI9881C_COMMAND_INSTR(0x2F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
ILI9881C_COMMAND_INSTR(0x30, 0x00),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
@@ -880,54 +1078,54 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x35, 0x05),
ILI9881C_COMMAND_INSTR(0x36, 0x05),
ILI9881C_COMMAND_INSTR(0x37, 0x00),
- ILI9881C_COMMAND_INSTR(0x38, 0x3C),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
ILI9881C_COMMAND_INSTR(0x39, 0x35),
- ILI9881C_COMMAND_INSTR(0x3A, 0x00),
- ILI9881C_COMMAND_INSTR(0x3B, 0x40),
- ILI9881C_COMMAND_INSTR(0x3C, 0x00),
- ILI9881C_COMMAND_INSTR(0x3D, 0x00),
- ILI9881C_COMMAND_INSTR(0x3E, 0x00),
- ILI9881C_COMMAND_INSTR(0x3F, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x40),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
ILI9881C_COMMAND_INSTR(0x40, 0x00),
ILI9881C_COMMAND_INSTR(0x41, 0x88),
ILI9881C_COMMAND_INSTR(0x42, 0x00),
ILI9881C_COMMAND_INSTR(0x43, 0x00),
- ILI9881C_COMMAND_INSTR(0x44, 0x1F),
+ ILI9881C_COMMAND_INSTR(0x44, 0x1f),
ILI9881C_COMMAND_INSTR(0x50, 0x01),
ILI9881C_COMMAND_INSTR(0x51, 0x23),
ILI9881C_COMMAND_INSTR(0x52, 0x45),
ILI9881C_COMMAND_INSTR(0x53, 0x67),
ILI9881C_COMMAND_INSTR(0x54, 0x89),
- ILI9881C_COMMAND_INSTR(0x55, 0xaB),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
ILI9881C_COMMAND_INSTR(0x56, 0x01),
ILI9881C_COMMAND_INSTR(0x57, 0x23),
ILI9881C_COMMAND_INSTR(0x58, 0x45),
ILI9881C_COMMAND_INSTR(0x59, 0x67),
- ILI9881C_COMMAND_INSTR(0x5A, 0x89),
- ILI9881C_COMMAND_INSTR(0x5B, 0xAB),
- ILI9881C_COMMAND_INSTR(0x5C, 0xCD),
- ILI9881C_COMMAND_INSTR(0x5D, 0xEF),
- ILI9881C_COMMAND_INSTR(0x5E, 0x03),
- ILI9881C_COMMAND_INSTR(0x5F, 0x14),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x03),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x14),
ILI9881C_COMMAND_INSTR(0x60, 0x15),
- ILI9881C_COMMAND_INSTR(0x61, 0x0C),
- ILI9881C_COMMAND_INSTR(0x62, 0x0D),
- ILI9881C_COMMAND_INSTR(0x63, 0x0E),
- ILI9881C_COMMAND_INSTR(0x64, 0x0F),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0f),
ILI9881C_COMMAND_INSTR(0x65, 0x10),
ILI9881C_COMMAND_INSTR(0x66, 0x11),
ILI9881C_COMMAND_INSTR(0x67, 0x08),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
- ILI9881C_COMMAND_INSTR(0x69, 0x0A),
- ILI9881C_COMMAND_INSTR(0x6A, 0x02),
- ILI9881C_COMMAND_INSTR(0x6B, 0x02),
- ILI9881C_COMMAND_INSTR(0x6C, 0x02),
- ILI9881C_COMMAND_INSTR(0x6D, 0x02),
- ILI9881C_COMMAND_INSTR(0x6E, 0x02),
- ILI9881C_COMMAND_INSTR(0x6F, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
ILI9881C_COMMAND_INSTR(0x70, 0x02),
ILI9881C_COMMAND_INSTR(0x71, 0x02),
@@ -936,15 +1134,15 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x74, 0x02),
ILI9881C_COMMAND_INSTR(0x75, 0x14),
ILI9881C_COMMAND_INSTR(0x76, 0x15),
- ILI9881C_COMMAND_INSTR(0x77, 0x0F),
- ILI9881C_COMMAND_INSTR(0x78, 0x0E),
- ILI9881C_COMMAND_INSTR(0x79, 0x0D),
- ILI9881C_COMMAND_INSTR(0x7A, 0x0C),
- ILI9881C_COMMAND_INSTR(0x7B, 0x11),
- ILI9881C_COMMAND_INSTR(0x7C, 0x10),
- ILI9881C_COMMAND_INSTR(0x7D, 0x06),
- ILI9881C_COMMAND_INSTR(0x7E, 0x02),
- ILI9881C_COMMAND_INSTR(0x7F, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x11),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x10),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x0a),
ILI9881C_COMMAND_INSTR(0x80, 0x02),
ILI9881C_COMMAND_INSTR(0x81, 0x02),
@@ -956,74 +1154,74 @@ static const struct ili9881c_instr w552946ab_init[] = {
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x08),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
ILI9881C_COMMAND_INSTR(0x00, 0x80),
ILI9881C_COMMAND_INSTR(0x70, 0x00),
ILI9881C_COMMAND_INSTR(0x71, 0x00),
- ILI9881C_COMMAND_INSTR(0x66, 0xFE),
+ ILI9881C_COMMAND_INSTR(0x66, 0xfe),
ILI9881C_COMMAND_INSTR(0x82, 0x15),
ILI9881C_COMMAND_INSTR(0x84, 0x15),
ILI9881C_COMMAND_INSTR(0x85, 0x15),
ILI9881C_COMMAND_INSTR(0x3a, 0x24),
- ILI9881C_COMMAND_INSTR(0x32, 0xAC),
- ILI9881C_COMMAND_INSTR(0x8C, 0x80),
- ILI9881C_COMMAND_INSTR(0x3C, 0xF5),
+ ILI9881C_COMMAND_INSTR(0x32, 0xac),
+ ILI9881C_COMMAND_INSTR(0x8c, 0x80),
+ ILI9881C_COMMAND_INSTR(0x3c, 0xf5),
ILI9881C_COMMAND_INSTR(0x88, 0x33),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x53, 0x78),
- ILI9881C_COMMAND_INSTR(0x50, 0x5B),
- ILI9881C_COMMAND_INSTR(0x51, 0x5B),
+ ILI9881C_COMMAND_INSTR(0x50, 0x5b),
+ ILI9881C_COMMAND_INSTR(0x51, 0x5b),
ILI9881C_COMMAND_INSTR(0x60, 0x20),
ILI9881C_COMMAND_INSTR(0x61, 0x00),
- ILI9881C_COMMAND_INSTR(0x62, 0x0D),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
ILI9881C_COMMAND_INSTR(0x63, 0x00),
- ILI9881C_COMMAND_INSTR(0xA0, 0x00),
- ILI9881C_COMMAND_INSTR(0xA1, 0x10),
- ILI9881C_COMMAND_INSTR(0xA2, 0x1C),
- ILI9881C_COMMAND_INSTR(0xA3, 0x13),
- ILI9881C_COMMAND_INSTR(0xA4, 0x15),
- ILI9881C_COMMAND_INSTR(0xA5, 0x26),
- ILI9881C_COMMAND_INSTR(0xA6, 0x1A),
- ILI9881C_COMMAND_INSTR(0xA7, 0x1D),
- ILI9881C_COMMAND_INSTR(0xA8, 0x67),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1C),
- ILI9881C_COMMAND_INSTR(0xAA, 0x29),
- ILI9881C_COMMAND_INSTR(0xAB, 0x5B),
- ILI9881C_COMMAND_INSTR(0xAC, 0x26),
- ILI9881C_COMMAND_INSTR(0xAD, 0x28),
- ILI9881C_COMMAND_INSTR(0xAE, 0x5C),
- ILI9881C_COMMAND_INSTR(0xAF, 0x30),
- ILI9881C_COMMAND_INSTR(0xB0, 0x31),
- ILI9881C_COMMAND_INSTR(0xB1, 0x2E),
- ILI9881C_COMMAND_INSTR(0xB2, 0x32),
- ILI9881C_COMMAND_INSTR(0xB3, 0x00),
-
- ILI9881C_COMMAND_INSTR(0xC0, 0x00),
- ILI9881C_COMMAND_INSTR(0xC1, 0x10),
- ILI9881C_COMMAND_INSTR(0xC2, 0x1C),
- ILI9881C_COMMAND_INSTR(0xC3, 0x13),
- ILI9881C_COMMAND_INSTR(0xC4, 0x15),
- ILI9881C_COMMAND_INSTR(0xC5, 0x26),
- ILI9881C_COMMAND_INSTR(0xC6, 0x1A),
- ILI9881C_COMMAND_INSTR(0xC7, 0x1D),
- ILI9881C_COMMAND_INSTR(0xC8, 0x67),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1C),
- ILI9881C_COMMAND_INSTR(0xCA, 0x29),
- ILI9881C_COMMAND_INSTR(0xCB, 0x5B),
- ILI9881C_COMMAND_INSTR(0xCC, 0x26),
- ILI9881C_COMMAND_INSTR(0xCD, 0x28),
- ILI9881C_COMMAND_INSTR(0xCE, 0x5C),
- ILI9881C_COMMAND_INSTR(0xCF, 0x30),
- ILI9881C_COMMAND_INSTR(0xD0, 0x31),
- ILI9881C_COMMAND_INSTR(0xD1, 0x2E),
- ILI9881C_COMMAND_INSTR(0xD2, 0x32),
- ILI9881C_COMMAND_INSTR(0xD3, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x10),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x13),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x26),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x67),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x5b),
+ ILI9881C_COMMAND_INSTR(0xac, 0x26),
+ ILI9881C_COMMAND_INSTR(0xad, 0x28),
+ ILI9881C_COMMAND_INSTR(0xae, 0x5c),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x30),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x31),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x32),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x00),
+
+ ILI9881C_COMMAND_INSTR(0xc0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x10),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x13),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x26),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x67),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x5b),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x26),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x28),
+ ILI9881C_COMMAND_INSTR(0xce, 0x5c),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x30),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x31),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x32),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x00),
ILI9881C_SWITCH_PAGE_INSTR(0),
};
@@ -1032,10 +1230,10 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0x01, 0x00),
ILI9881C_COMMAND_INSTR(0x02, 0x00),
ILI9881C_COMMAND_INSTR(0x03, 0x73),
- ILI9881C_COMMAND_INSTR(0x04, 0xD3),
+ ILI9881C_COMMAND_INSTR(0x04, 0xd3),
ILI9881C_COMMAND_INSTR(0x05, 0x00),
- ILI9881C_COMMAND_INSTR(0x06, 0x0A),
- ILI9881C_COMMAND_INSTR(0x07, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x07, 0x0e),
ILI9881C_COMMAND_INSTR(0x08, 0x00),
ILI9881C_COMMAND_INSTR(0x09, 0x01),
ILI9881C_COMMAND_INSTR(0x0a, 0x01),
@@ -1117,10 +1315,10 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0x5f, 0x02),
ILI9881C_COMMAND_INSTR(0x60, 0x00),
ILI9881C_COMMAND_INSTR(0x61, 0x01),
- ILI9881C_COMMAND_INSTR(0x62, 0x0D),
- ILI9881C_COMMAND_INSTR(0x63, 0x0C),
- ILI9881C_COMMAND_INSTR(0x64, 0x0F),
- ILI9881C_COMMAND_INSTR(0x65, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x65, 0x0e),
ILI9881C_COMMAND_INSTR(0x66, 0x06),
ILI9881C_COMMAND_INSTR(0x67, 0x07),
ILI9881C_COMMAND_INSTR(0x68, 0x02),
@@ -1139,10 +1337,10 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0x75, 0x02),
ILI9881C_COMMAND_INSTR(0x76, 0x00),
ILI9881C_COMMAND_INSTR(0x77, 0x01),
- ILI9881C_COMMAND_INSTR(0x78, 0x0D),
- ILI9881C_COMMAND_INSTR(0x79, 0x0C),
- ILI9881C_COMMAND_INSTR(0x7a, 0x0F),
- ILI9881C_COMMAND_INSTR(0x7b, 0x0E),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x0e),
ILI9881C_COMMAND_INSTR(0x7c, 0x06),
ILI9881C_COMMAND_INSTR(0x7d, 0x07),
ILI9881C_COMMAND_INSTR(0x7e, 0x02),
@@ -1157,7 +1355,7 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
ILI9881C_COMMAND_INSTR(0x6c, 0x15),
@@ -1170,60 +1368,248 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
- ILI9881C_COMMAND_INSTR(0x31, 0x0B),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x0b),
ILI9881C_COMMAND_INSTR(0x50, 0xa5),
ILI9881C_COMMAND_INSTR(0x51, 0xa0),
ILI9881C_COMMAND_INSTR(0x53, 0x70),
- ILI9881C_COMMAND_INSTR(0x55, 0x7A),
+ ILI9881C_COMMAND_INSTR(0x55, 0x7a),
ILI9881C_COMMAND_INSTR(0x60, 0x14),
- ILI9881C_COMMAND_INSTR(0xA0, 0x00),
- ILI9881C_COMMAND_INSTR(0xA1, 0x53),
- ILI9881C_COMMAND_INSTR(0xA2, 0x50),
- ILI9881C_COMMAND_INSTR(0xA3, 0x20),
- ILI9881C_COMMAND_INSTR(0xA4, 0x27),
- ILI9881C_COMMAND_INSTR(0xA5, 0x33),
- ILI9881C_COMMAND_INSTR(0xA6, 0x25),
- ILI9881C_COMMAND_INSTR(0xA7, 0x25),
- ILI9881C_COMMAND_INSTR(0xA8, 0xD4),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1A),
- ILI9881C_COMMAND_INSTR(0xAA, 0x2B),
- ILI9881C_COMMAND_INSTR(0xAB, 0xB5),
- ILI9881C_COMMAND_INSTR(0xAC, 0x19),
- ILI9881C_COMMAND_INSTR(0xAD, 0x18),
- ILI9881C_COMMAND_INSTR(0xAE, 0x53),
- ILI9881C_COMMAND_INSTR(0xAF, 0x1A),
- ILI9881C_COMMAND_INSTR(0xB0, 0x25),
- ILI9881C_COMMAND_INSTR(0xB1, 0x62),
- ILI9881C_COMMAND_INSTR(0xB2, 0x6A),
- ILI9881C_COMMAND_INSTR(0xB3, 0x31),
-
- ILI9881C_COMMAND_INSTR(0xC0, 0x00),
- ILI9881C_COMMAND_INSTR(0xC1, 0x53),
- ILI9881C_COMMAND_INSTR(0xC2, 0x50),
- ILI9881C_COMMAND_INSTR(0xC3, 0x20),
- ILI9881C_COMMAND_INSTR(0xC4, 0x27),
- ILI9881C_COMMAND_INSTR(0xC5, 0x33),
- ILI9881C_COMMAND_INSTR(0xC6, 0x25),
- ILI9881C_COMMAND_INSTR(0xC7, 0x25),
- ILI9881C_COMMAND_INSTR(0xC8, 0xD4),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1A),
- ILI9881C_COMMAND_INSTR(0xCA, 0x2B),
- ILI9881C_COMMAND_INSTR(0xCB, 0xB5),
- ILI9881C_COMMAND_INSTR(0xCC, 0x19),
- ILI9881C_COMMAND_INSTR(0xCD, 0x18),
- ILI9881C_COMMAND_INSTR(0xCE, 0x53),
- ILI9881C_COMMAND_INSTR(0xCF, 0x1A),
- ILI9881C_COMMAND_INSTR(0xD0, 0x25),
- ILI9881C_COMMAND_INSTR(0xD1, 0x62),
- ILI9881C_COMMAND_INSTR(0xD2, 0x6A),
- ILI9881C_COMMAND_INSTR(0xD3, 0x31),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xa8, 0xd4),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xab, 0xb5),
+ ILI9881C_COMMAND_INSTR(0xac, 0x19),
+ ILI9881C_COMMAND_INSTR(0xad, 0x18),
+ ILI9881C_COMMAND_INSTR(0xae, 0x53),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x6a),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x31),
+
+ ILI9881C_COMMAND_INSTR(0xc0, 0x00),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x53),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x50),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x20),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x27),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x33),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x25),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x25),
+ ILI9881C_COMMAND_INSTR(0xc8, 0xd4),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xca, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xcb, 0xb5),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x19),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x18),
+ ILI9881C_COMMAND_INSTR(0xce, 0x53),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x25),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x62),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x6a),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x31),
ILI9881C_SWITCH_PAGE_INSTR(0),
ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c),
ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
};
+static const struct ili9881c_instr rpi_5inch_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x73),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x06),
+ ILI9881C_COMMAND_INSTR(0x07, 0x02),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x10, 0x01),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x01),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x04),
+ ILI9881C_COMMAND_INSTR(0x21, 0x03),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x03),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x03),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x00),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x01),
+ ILI9881C_COMMAND_INSTR(0x51, 0x23),
+ ILI9881C_COMMAND_INSTR(0x52, 0x45),
+ ILI9881C_COMMAND_INSTR(0x53, 0x67),
+ ILI9881C_COMMAND_INSTR(0x54, 0x89),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x01),
+ ILI9881C_COMMAND_INSTR(0x57, 0x23),
+ ILI9881C_COMMAND_INSTR(0x58, 0x45),
+ ILI9881C_COMMAND_INSTR(0x59, 0x67),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x89),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xab),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xcd),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xef),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x10),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x09),
+ ILI9881C_COMMAND_INSTR(0x60, 0x08),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x63, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x64, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x06),
+ ILI9881C_COMMAND_INSTR(0x72, 0x07),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x06),
+ ILI9881C_COMMAND_INSTR(0x76, 0x07),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x79, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x80, 0x02),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x02),
+ ILI9881C_COMMAND_INSTR(0x83, 0x02),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x09),
+ ILI9881C_COMMAND_INSTR(0x88, 0x08),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x57),
+ ILI9881C_COMMAND_INSTR(0x3a, 0xa4),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x1a),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x35),
+ ILI9881C_COMMAND_INSTR(0x55, 0x50),
+ ILI9881C_COMMAND_INSTR(0x50, 0xaf),
+ ILI9881C_COMMAND_INSTR(0x51, 0xaf),
+ ILI9881C_COMMAND_INSTR(0x60, 0x14),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x2c),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x14),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x19),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x22),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x23),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x97),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x7b),
+ ILI9881C_COMMAND_INSTR(0xac, 0x18),
+ ILI9881C_COMMAND_INSTR(0xad, 0x17),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x52),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x63),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x2c),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x14),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x19),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x2e),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x22),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x23),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x97),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x7b),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x18),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x17),
+ ILI9881C_COMMAND_INSTR(0xce, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x52),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x63),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x39),
+};
+
static const struct ili9881c_instr rpi_7inch_init[] = {
ILI9881C_SWITCH_PAGE_INSTR(3),
ILI9881C_COMMAND_INSTR(0x01, 0x00),
@@ -1352,22 +1738,22 @@ static const struct ili9881c_instr rpi_7inch_init[] = {
ILI9881C_COMMAND_INSTR(0x87, 0x02),
ILI9881C_COMMAND_INSTR(0x88, 0x02),
ILI9881C_COMMAND_INSTR(0x89, 0x02),
- ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
ILI9881C_SWITCH_PAGE_INSTR(4),
- ILI9881C_COMMAND_INSTR(0x6C, 0x15),
- ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
- ILI9881C_COMMAND_INSTR(0x6F, 0x33),
- ILI9881C_COMMAND_INSTR(0x3B, 0x98),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x98),
ILI9881C_COMMAND_INSTR(0x3a, 0x94),
- ILI9881C_COMMAND_INSTR(0x8D, 0x14),
- ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x14),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
ILI9881C_COMMAND_INSTR(0x26, 0x76),
- ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
- ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_COMMAND_INSTR(0xb5, 0x06),
ILI9881C_COMMAND_INSTR(0x38, 0x01),
ILI9881C_COMMAND_INSTR(0x39, 0x00),
ILI9881C_SWITCH_PAGE_INSTR(1),
- ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
ILI9881C_COMMAND_INSTR(0x31, 0x00),
ILI9881C_COMMAND_INSTR(0x53, 0x7d),
ILI9881C_COMMAND_INSTR(0x55, 0x8f),
@@ -1375,46 +1761,240 @@ static const struct ili9881c_instr rpi_7inch_init[] = {
ILI9881C_COMMAND_INSTR(0x50, 0x96),
ILI9881C_COMMAND_INSTR(0x51, 0x96),
ILI9881C_COMMAND_INSTR(0x60, 0x23),
- ILI9881C_COMMAND_INSTR(0xA0, 0x08),
- ILI9881C_COMMAND_INSTR(0xA1, 0x1d),
- ILI9881C_COMMAND_INSTR(0xA2, 0x2a),
- ILI9881C_COMMAND_INSTR(0xA3, 0x10),
- ILI9881C_COMMAND_INSTR(0xA4, 0x15),
- ILI9881C_COMMAND_INSTR(0xA5, 0x28),
- ILI9881C_COMMAND_INSTR(0xA6, 0x1c),
- ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
- ILI9881C_COMMAND_INSTR(0xA8, 0x7e),
- ILI9881C_COMMAND_INSTR(0xA9, 0x1d),
- ILI9881C_COMMAND_INSTR(0xAA, 0x29),
- ILI9881C_COMMAND_INSTR(0xAB, 0x6b),
- ILI9881C_COMMAND_INSTR(0xAC, 0x1a),
- ILI9881C_COMMAND_INSTR(0xAD, 0x18),
- ILI9881C_COMMAND_INSTR(0xAE, 0x4b),
- ILI9881C_COMMAND_INSTR(0xAF, 0x20),
- ILI9881C_COMMAND_INSTR(0xB0, 0x27),
- ILI9881C_COMMAND_INSTR(0xB1, 0x50),
- ILI9881C_COMMAND_INSTR(0xB2, 0x64),
- ILI9881C_COMMAND_INSTR(0xB3, 0x39),
- ILI9881C_COMMAND_INSTR(0xC0, 0x08),
- ILI9881C_COMMAND_INSTR(0xC1, 0x1d),
- ILI9881C_COMMAND_INSTR(0xC2, 0x2a),
- ILI9881C_COMMAND_INSTR(0xC3, 0x10),
- ILI9881C_COMMAND_INSTR(0xC4, 0x15),
- ILI9881C_COMMAND_INSTR(0xC5, 0x28),
- ILI9881C_COMMAND_INSTR(0xC6, 0x1c),
- ILI9881C_COMMAND_INSTR(0xC7, 0x1d),
- ILI9881C_COMMAND_INSTR(0xC8, 0x7e),
- ILI9881C_COMMAND_INSTR(0xC9, 0x1d),
- ILI9881C_COMMAND_INSTR(0xCA, 0x29),
- ILI9881C_COMMAND_INSTR(0xCB, 0x6b),
- ILI9881C_COMMAND_INSTR(0xCC, 0x1a),
- ILI9881C_COMMAND_INSTR(0xCD, 0x18),
- ILI9881C_COMMAND_INSTR(0xCE, 0x4b),
- ILI9881C_COMMAND_INSTR(0xCF, 0x20),
- ILI9881C_COMMAND_INSTR(0xD0, 0x27),
- ILI9881C_COMMAND_INSTR(0xD1, 0x50),
- ILI9881C_COMMAND_INSTR(0xD2, 0x64),
- ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xa8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x29),
+ ILI9881C_COMMAND_INSTR(0xab, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xac, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xad, 0x18),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x20),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xc8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xca, 0x29),
+ ILI9881C_COMMAND_INSTR(0xcb, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x18),
+ ILI9881C_COMMAND_INSTR(0xce, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x20),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x39),
+};
+
+static const struct ili9881c_instr bsd1218_a101kl68_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x55),
+ ILI9881C_COMMAND_INSTR(0x04, 0x55),
+ ILI9881C_COMMAND_INSTR(0x05, 0x03),
+ ILI9881C_COMMAND_INSTR(0x06, 0x06),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x07),
+ ILI9881C_COMMAND_INSTR(0x09, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x10, 0x00),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0xc0),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x04),
+ ILI9881C_COMMAND_INSTR(0x21, 0x03),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x33),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x00),
+ ILI9881C_COMMAND_INSTR(0x51, 0x11),
+ ILI9881C_COMMAND_INSTR(0x52, 0x44),
+ ILI9881C_COMMAND_INSTR(0x53, 0x55),
+ ILI9881C_COMMAND_INSTR(0x54, 0x88),
+ ILI9881C_COMMAND_INSTR(0x55, 0xab),
+ ILI9881C_COMMAND_INSTR(0x56, 0x00),
+ ILI9881C_COMMAND_INSTR(0x57, 0x11),
+ ILI9881C_COMMAND_INSTR(0x58, 0x22),
+ ILI9881C_COMMAND_INSTR(0x59, 0x33),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x44),
+ ILI9881C_COMMAND_INSTR(0x5b, 0x55),
+ ILI9881C_COMMAND_INSTR(0x5c, 0x66),
+ ILI9881C_COMMAND_INSTR(0x5d, 0x77),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x60, 0x02),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x62, 0x09),
+ ILI9881C_COMMAND_INSTR(0x63, 0x08),
+ ILI9881C_COMMAND_INSTR(0x64, 0x13),
+ ILI9881C_COMMAND_INSTR(0x65, 0x12),
+ ILI9881C_COMMAND_INSTR(0x66, 0x11),
+ ILI9881C_COMMAND_INSTR(0x67, 0x10),
+ ILI9881C_COMMAND_INSTR(0x68, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x69, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x06),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x07),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x02),
+ ILI9881C_COMMAND_INSTR(0x76, 0x02),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x78, 0x06),
+ ILI9881C_COMMAND_INSTR(0x79, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x10),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x11),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x12),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x13),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x80, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x81, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x82, 0x09),
+ ILI9881C_COMMAND_INSTR(0x83, 0x08),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8a, 0x02),
+
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x2a),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x37),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x24),
+ ILI9881C_COMMAND_INSTR(0x8d, 0x19),
+ ILI9881C_COMMAND_INSTR(0x87, 0xba),
+ ILI9881C_COMMAND_INSTR(0xb2, 0xd1),
+ ILI9881C_COMMAND_INSTR(0x88, 0x0b),
+ ILI9881C_COMMAND_INSTR(0x38, 0x01),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0xb5, 0x02),
+ ILI9881C_COMMAND_INSTR(0x31, 0x25),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x98),
+
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x31, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x53, 0x40),
+ ILI9881C_COMMAND_INSTR(0x55, 0x45),
+ ILI9881C_COMMAND_INSTR(0x50, 0xb7),
+ ILI9881C_COMMAND_INSTR(0x51, 0xb2),
+ ILI9881C_COMMAND_INSTR(0x60, 0x07),
+ ILI9881C_COMMAND_INSTR(0xa0, 0x22),
+ ILI9881C_COMMAND_INSTR(0xa1, 0x3f),
+ ILI9881C_COMMAND_INSTR(0xa2, 0x4e),
+ ILI9881C_COMMAND_INSTR(0xa3, 0x17),
+ ILI9881C_COMMAND_INSTR(0xa4, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xa5, 0x2d),
+ ILI9881C_COMMAND_INSTR(0xa6, 0x21),
+ ILI9881C_COMMAND_INSTR(0xa7, 0x22),
+ ILI9881C_COMMAND_INSTR(0xa8, 0xc4),
+ ILI9881C_COMMAND_INSTR(0xa9, 0x1b),
+ ILI9881C_COMMAND_INSTR(0xaa, 0x25),
+ ILI9881C_COMMAND_INSTR(0xab, 0xa7),
+ ILI9881C_COMMAND_INSTR(0xac, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xad, 0x19),
+ ILI9881C_COMMAND_INSTR(0xae, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xaf, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xb0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xb1, 0x59),
+ ILI9881C_COMMAND_INSTR(0xb2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xb3, 0x3f),
+ ILI9881C_COMMAND_INSTR(0xc0, 0x22),
+ ILI9881C_COMMAND_INSTR(0xc1, 0x48),
+ ILI9881C_COMMAND_INSTR(0xc2, 0x59),
+ ILI9881C_COMMAND_INSTR(0xc3, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xc5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xc6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xc7, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xc8, 0xc4),
+ ILI9881C_COMMAND_INSTR(0xc9, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xca, 0x2b),
+ ILI9881C_COMMAND_INSTR(0xcb, 0xa3),
+ ILI9881C_COMMAND_INSTR(0xcc, 0x1f),
+ ILI9881C_COMMAND_INSTR(0xcd, 0x1e),
+ ILI9881C_COMMAND_INSTR(0xce, 0x52),
+ ILI9881C_COMMAND_INSTR(0xcf, 0x24),
+ ILI9881C_COMMAND_INSTR(0xd0, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xd1, 0x58),
+ ILI9881C_COMMAND_INSTR(0xd2, 0x68),
+ ILI9881C_COMMAND_INSTR(0xd3, 0x3f),
};
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
@@ -1433,33 +2013,24 @@ static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
* So before any attempt at sending a command or data, we have to be
* sure if we're in the right page or not.
*/
-static int ili9881c_switch_page(struct ili9881c *ctx, u8 page)
+static void ili9881c_switch_page(struct mipi_dsi_multi_context *mctx, u8 page)
{
u8 buf[4] = { 0xff, 0x98, 0x81, page };
- int ret;
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
- if (ret < 0)
- return ret;
- return 0;
+ mipi_dsi_dcs_write_buffer_multi(mctx, buf, sizeof(buf));
}
-static int ili9881c_send_cmd_data(struct ili9881c *ctx, u8 cmd, u8 data)
+static void ili9881c_send_cmd_data(struct mipi_dsi_multi_context *mctx, u8 cmd, u8 data)
{
u8 buf[2] = { cmd, data };
- int ret;
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, buf, sizeof(buf));
- if (ret < 0)
- return ret;
- return 0;
+ mipi_dsi_dcs_write_buffer_multi(mctx, buf, sizeof(buf));
}
static int ili9881c_prepare(struct drm_panel *panel)
{
struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
unsigned int i;
int ret;
@@ -1480,61 +2051,39 @@ static int ili9881c_prepare(struct drm_panel *panel)
const struct ili9881c_instr *instr = &ctx->desc->init[i];
if (instr->op == ILI9881C_SWITCH_PAGE)
- ret = ili9881c_switch_page(ctx, instr->arg.page);
+ ili9881c_switch_page(&mctx, instr->arg.page);
else if (instr->op == ILI9881C_COMMAND)
- ret = ili9881c_send_cmd_data(ctx, instr->arg.cmd.cmd,
- instr->arg.cmd.data);
-
- if (ret)
- return ret;
- }
-
- ret = ili9881c_switch_page(ctx, 0);
- if (ret)
- return ret;
-
- if (ctx->address_mode) {
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_ADDRESS_MODE,
- &ctx->address_mode,
- sizeof(ctx->address_mode));
- if (ret < 0)
- return ret;
+ ili9881c_send_cmd_data(&mctx, instr->arg.cmd.cmd,
+ instr->arg.cmd.data);
}
- ret = mipi_dsi_dcs_set_tear_on(ctx->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret)
- return ret;
-
- ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ili9881c_enable(struct drm_panel *panel)
-{
- struct ili9881c *ctx = panel_to_ili9881c(panel);
+ ili9881c_switch_page(&mctx, 0);
- msleep(120);
+ if (ctx->address_mode)
+ ili9881c_send_cmd_data(&mctx, MIPI_DCS_SET_ADDRESS_MODE,
+ ctx->address_mode);
- mipi_dsi_dcs_set_display_on(ctx->dsi);
+ mipi_dsi_dcs_set_tear_on_multi(&mctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&mctx);
+ mipi_dsi_msleep(&mctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&mctx);
+ if (mctx.accum_err)
+ goto disable_power;
return 0;
-}
-
-static int ili9881c_disable(struct drm_panel *panel)
-{
- struct ili9881c *ctx = panel_to_ili9881c(panel);
- return mipi_dsi_dcs_set_display_off(ctx->dsi);
+disable_power:
+ regulator_disable(ctx->power);
+ return mctx.accum_err;
}
static int ili9881c_unprepare(struct drm_panel *panel)
{
struct ili9881c *ctx = panel_to_ili9881c(panel);
+ struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi };
- mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ mipi_dsi_dcs_set_display_off_multi(&mctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&mctx);
regulator_disable(ctx->power);
gpiod_set_value_cansleep(ctx->reset, 1);
@@ -1609,6 +2158,23 @@ static const struct drm_display_mode tl050hdv35_default_mode = {
.height_mm = 110,
};
+static const struct drm_display_mode w552946aaa_default_mode = {
+ .clock = 65000,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 52,
+ .hsync_end = 720 + 52 + 8,
+ .htotal = 720 + 52 + 8 + 48,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 6,
+ .vtotal = 1280 + 16 + 6 + 15,
+
+ .width_mm = 68,
+ .height_mm = 121,
+};
+
static const struct drm_display_mode w552946aba_default_mode = {
.clock = 64000,
@@ -1643,6 +2209,23 @@ static const struct drm_display_mode am8001280g_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode rpi_5inch_default_mode = {
+ .clock = 83333,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 110,
+ .hsync_end = 720 + 110 + 12,
+ .htotal = 720 + 110 + 12 + 95,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 100,
+ .vsync_end = 1280 + 100 + 2,
+ .vtotal = 1280 + 100 + 2 + 100,
+
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
static const struct drm_display_mode rpi_7inch_default_mode = {
.clock = 83330,
@@ -1660,6 +2243,23 @@ static const struct drm_display_mode rpi_7inch_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode bsd1218_a101kl68_default_mode = {
+ .clock = 70000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 20,
+ .htotal = 800 + 40 + 20 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 20,
+
+ .width_mm = 120,
+ .height_mm = 170,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1706,8 +2306,6 @@ static enum drm_panel_orientation ili9881c_get_orientation(struct drm_panel *pan
static const struct drm_panel_funcs ili9881c_funcs = {
.prepare = ili9881c_prepare,
.unprepare = ili9881c_unprepare,
- .enable = ili9881c_enable,
- .disable = ili9881c_disable,
.get_modes = ili9881c_get_modes,
.get_orientation = ili9881c_get_orientation,
};
@@ -1805,6 +2403,15 @@ static const struct ili9881c_desc tl050hdv35_desc = {
.default_address_mode = 0x03,
};
+static const struct ili9881c_desc w552946aaa_desc = {
+ .init = w552946aaa_init,
+ .init_length = ARRAY_SIZE(w552946aaa_init),
+ .mode = &w552946aaa_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .lanes = 2,
+};
+
static const struct ili9881c_desc w552946aba_desc = {
.init = w552946ab_init,
.init_length = ARRAY_SIZE(w552946ab_init),
@@ -1822,6 +2429,14 @@ static const struct ili9881c_desc am8001280g_desc = {
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
};
+static const struct ili9881c_desc rpi_5inch_desc = {
+ .init = rpi_5inch_init,
+ .init_length = ARRAY_SIZE(rpi_5inch_init),
+ .mode = &rpi_5inch_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM,
+ .lanes = 2,
+};
+
static const struct ili9881c_desc rpi_7inch_desc = {
.init = rpi_7inch_init,
.init_length = ARRAY_SIZE(rpi_7inch_init),
@@ -1830,13 +2445,25 @@ static const struct ili9881c_desc rpi_7inch_desc = {
.lanes = 2,
};
+static const struct ili9881c_desc bsd1218_a101kl68_desc = {
+ .init = bsd1218_a101kl68_init,
+ .init_length = ARRAY_SIZE(bsd1218_a101kl68_init),
+ .mode = &bsd1218_a101kl68_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .lanes = 4,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
+ { .compatible = "bestar,bsd1218-a101kl68", .data = &bsd1218_a101kl68_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
{ .compatible = "startek,kd050hdfia020", .data = &kd050hdfia020_desc },
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
+ { .compatible = "wanchanglong,w552946aaa", .data = &w552946aaa_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
+ { .compatible = "raspberrypi,dsi-5inch", &rpi_5inch_desc },
{ .compatible = "raspberrypi,dsi-7inch", &rpi_7inch_desc },
{ }
};
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
index 85c7059be214..c52f20863fc7 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
@@ -61,6 +61,13 @@ struct ili9882t {
mipi_dsi_dcs_write_seq_multi(ctx, ILI9882T_DCS_SWITCH_PAGE, \
0x98, 0x82, (page))
+/* IL79900A-specific commands, add new commands as you decode them */
+#define IL79900A_DCS_SWITCH_PAGE 0xFF
+
+#define il79900a_switch_page(ctx, page) \
+ mipi_dsi_dcs_write_seq_multi(ctx, IL79900A_DCS_SWITCH_PAGE, \
+ 0x5a, 0xa5, (page))
+
static int starry_ili9882t_init(struct ili9882t *ili)
{
struct mipi_dsi_multi_context ctx = { .dsi = ili->dsi };
@@ -413,6 +420,38 @@ static int starry_ili9882t_init(struct ili9882t *ili)
return ctx.accum_err;
};
+static int tianma_il79900a_init(struct ili9882t *ili)
+{
+ struct mipi_dsi_multi_context ctx = { .dsi = ili->dsi };
+
+ mipi_dsi_usleep_range(&ctx, 5000, 5100);
+
+ il79900a_switch_page(&ctx, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0x3e, 0x62);
+
+ il79900a_switch_page(&ctx, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0x1b, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0x5d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0x5e, 0x40);
+
+ il79900a_switch_page(&ctx, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0X29, 0x00);
+
+ il79900a_switch_page(&ctx, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0x92, 0x22);
+
+ il79900a_switch_page(&ctx, 0x00);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+
+ mipi_dsi_msleep(&ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+
+ mipi_dsi_msleep(&ctx, 80);
+
+ return 0;
+};
+
static inline struct ili9882t *to_ili9882t(struct drm_panel *panel)
{
return container_of(panel, struct ili9882t, base);
@@ -529,6 +568,19 @@ static const struct drm_display_mode starry_ili9882t_default_mode = {
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
+static const struct drm_display_mode tianma_il79900a_default_mode = {
+ .clock = 264355,
+ .hdisplay = 1600,
+ .hsync_start = 1600 + 20,
+ .hsync_end = 1600 + 20 + 4,
+ .htotal = 1600 + 20 + 4 + 20,
+ .vdisplay = 2560,
+ .vsync_start = 2560 + 82,
+ .vsync_end = 2560 + 82 + 2,
+ .vtotal = 2560 + 82 + 2 + 36,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
static const struct panel_desc starry_ili9882t_desc = {
.modes = &starry_ili9882t_default_mode,
.bpc = 8,
@@ -543,6 +595,20 @@ static const struct panel_desc starry_ili9882t_desc = {
.init = starry_ili9882t_init,
};
+static const struct panel_desc tianma_tl121bvms07_desc = {
+ .modes = &tianma_il79900a_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 163,
+ .height_mm = 260,
+ },
+ .lanes = 3,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init = tianma_il79900a_init,
+};
+
static int ili9882t_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -680,6 +746,9 @@ static const struct of_device_id ili9882t_of_match[] = {
{ .compatible = "starry,ili9882t",
.data = &starry_ili9882t_desc
},
+ { .compatible = "tianma,tl121bvms07-00",
+ .data = &tianma_tl121bvms07_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ili9882t_of_match);
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 5c2530598ddb..aa05316dc57b 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -1132,22 +1132,19 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
dsi->lanes = desc->lanes;
jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(jadard->reset)) {
- DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
- return PTR_ERR(jadard->reset);
- }
+ if (IS_ERR(jadard->reset))
+ return dev_err_probe(&dsi->dev, PTR_ERR(jadard->reset),
+ "failed to get our reset GPIO\n");
jadard->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(jadard->vdd)) {
- DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n");
- return PTR_ERR(jadard->vdd);
- }
+ if (IS_ERR(jadard->vdd))
+ return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vdd),
+ "failed to get vdd regulator\n");
jadard->vccio = devm_regulator_get(dev, "vccio");
- if (IS_ERR(jadard->vccio)) {
- DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n");
- return PTR_ERR(jadard->vccio);
- }
+ if (IS_ERR(jadard->vccio))
+ return dev_err_probe(&dsi->dev, PTR_ERR(jadard->vccio),
+ "failed to get vccio regulator\n");
ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
index 5f897e143758..23462065d726 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c
@@ -81,25 +81,25 @@ static int jdi_panel_disable(struct drm_panel *panel)
static int jdi_panel_unprepare(struct drm_panel *panel)
{
struct jdi_panel *jdi = to_panel_jdi(panel);
- int ret;
- ret = mipi_dsi_dcs_set_display_off(jdi->link1);
- if (ret < 0)
- dev_err(panel->dev, "failed to set display off: %d\n", ret);
+ /*
+ * One context per panel since we'll continue trying to shut down the
+ * other panel even if one isn't responding.
+ */
+ struct mipi_dsi_multi_context dsi_ctx1 = { .dsi = jdi->link1 };
+ struct mipi_dsi_multi_context dsi_ctx2 = { .dsi = jdi->link2 };
- ret = mipi_dsi_dcs_set_display_off(jdi->link2);
- if (ret < 0)
- dev_err(panel->dev, "failed to set display off: %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx1);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx2);
/* Specified by JDI @ 50ms, subject to change */
msleep(50);
- ret = mipi_dsi_dcs_enter_sleep_mode(jdi->link1);
- if (ret < 0)
- dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
- ret = mipi_dsi_dcs_enter_sleep_mode(jdi->link2);
- if (ret < 0)
- dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret);
+ /* Doesn't hurt to try sleep mode even if display off fails */
+ dsi_ctx1.accum_err = 0;
+ dsi_ctx2.accum_err = 0;
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx1);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx2);
/* Specified by JDI @ 150ms, subject to change */
msleep(150);
@@ -123,72 +123,46 @@ static int jdi_panel_unprepare(struct drm_panel *panel)
/* Specified by JDI @ 20ms, subject to change */
msleep(20);
- return ret;
+ return 0;
}
-static int jdi_setup_symmetrical_split(struct mipi_dsi_device *left,
- struct mipi_dsi_device *right,
- const struct drm_display_mode *mode)
+static void jdi_setup_symmetrical_split(struct mipi_dsi_multi_context *dsi_ctx,
+ struct mipi_dsi_device *left,
+ struct mipi_dsi_device *right,
+ const struct drm_display_mode *mode)
{
- int err;
-
- err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1);
- if (err < 0) {
- dev_err(&left->dev, "failed to set column address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_column_address(right, 0, mode->hdisplay / 2 - 1);
- if (err < 0) {
- dev_err(&right->dev, "failed to set column address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1);
- if (err < 0) {
- dev_err(&left->dev, "failed to set page address: %d\n", err);
- return err;
- }
-
- err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1);
- if (err < 0) {
- dev_err(&right->dev, "failed to set page address: %d\n", err);
- return err;
- }
-
- return 0;
+ mipi_dsi_dual(mipi_dsi_dcs_set_column_address_multi,
+ dsi_ctx, left, right,
+ 0, mode->hdisplay / 2 - 1);
+ mipi_dsi_dual(mipi_dsi_dcs_set_page_address_multi,
+ dsi_ctx, left, right,
+ 0, mode->vdisplay - 1);
}
-static int jdi_write_dcdc_registers(struct jdi_panel *jdi)
+static void jdi_write_dcdc_registers(struct mipi_dsi_multi_context *dsi_ctx,
+ struct jdi_panel *jdi)
{
/* Clear the manufacturer command access protection */
- mipi_dsi_generic_write_seq(jdi->link1, MCS_CMD_ACS_PROT,
- MCS_CMD_ACS_PROT_OFF);
- mipi_dsi_generic_write_seq(jdi->link2, MCS_CMD_ACS_PROT,
- MCS_CMD_ACS_PROT_OFF);
+ mipi_dsi_dual_generic_write_seq_multi(dsi_ctx, jdi->link1, jdi->link2,
+ MCS_CMD_ACS_PROT,
+ MCS_CMD_ACS_PROT_OFF);
/*
- * Change the VGH/VGL divide rations to move the noise generated by the
+ * Change the VGH/VGL divide ratios to move the noise generated by the
* TCONN. This should hopefully avoid interaction with the backlight
* controller.
*/
- mipi_dsi_generic_write_seq(jdi->link1, MCS_PWR_CTRL_FUNC,
- MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
- MCS_PWR_CTRL_PARAM1_DEFAULT,
- MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
- MCS_PWR_CTRL_PARAM2_DEFAULT);
-
- mipi_dsi_generic_write_seq(jdi->link2, MCS_PWR_CTRL_FUNC,
- MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
- MCS_PWR_CTRL_PARAM1_DEFAULT,
- MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
- MCS_PWR_CTRL_PARAM2_DEFAULT);
-
- return 0;
+ mipi_dsi_dual_generic_write_seq_multi(dsi_ctx, jdi->link1, jdi->link2,
+ MCS_PWR_CTRL_FUNC,
+ MCS_PWR_CTRL_PARAM1_VGH_330_DIV |
+ MCS_PWR_CTRL_PARAM1_DEFAULT,
+ MCS_PWR_CTRL_PARAM2_VGL_410_DIV |
+ MCS_PWR_CTRL_PARAM2_DEFAULT);
}
static int jdi_panel_prepare(struct drm_panel *panel)
{
struct jdi_panel *jdi = to_panel_jdi(panel);
+ struct mipi_dsi_multi_context dsi_ctx = {};
int err;
/* Disable backlight to avoid showing random pixels
@@ -231,86 +205,36 @@ static int jdi_panel_prepare(struct drm_panel *panel)
* put in place to communicate the configuration back to the DSI host
* controller.
*/
- err = jdi_setup_symmetrical_split(jdi->link1, jdi->link2,
- jdi->mode);
- if (err < 0) {
- dev_err(panel->dev, "failed to set up symmetrical split: %d\n",
- err);
- goto poweroff;
- }
+ jdi_setup_symmetrical_split(&dsi_ctx, jdi->link1, jdi->link2,
+ jdi->mode);
- err = mipi_dsi_dcs_set_tear_scanline(jdi->link1,
- jdi->mode->vdisplay - 16);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear scanline: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_tear_scanline_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ jdi->mode->vdisplay - 16);
- err = mipi_dsi_dcs_set_tear_scanline(jdi->link2,
- jdi->mode->vdisplay - 16);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear scanline: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_set_tear_on(jdi->link1,
- MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear on: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_set_tear_on(jdi->link2,
- MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (err < 0) {
- dev_err(panel->dev, "failed to set tear on: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_tear_on_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- err = mipi_dsi_dcs_set_pixel_format(jdi->link1, MIPI_DCS_PIXEL_FMT_24BIT);
- if (err < 0) {
- dev_err(panel->dev, "failed to set pixel format: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_pixel_format_multi,
+ &dsi_ctx, jdi->link1, jdi->link2,
+ MIPI_DCS_PIXEL_FMT_24BIT);
- err = mipi_dsi_dcs_set_pixel_format(jdi->link2, MIPI_DCS_PIXEL_FMT_24BIT);
- if (err < 0) {
- dev_err(panel->dev, "failed to set pixel format: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_exit_sleep_mode_multi,
+ &dsi_ctx, jdi->link1, jdi->link2);
- err = mipi_dsi_dcs_exit_sleep_mode(jdi->link1);
- if (err < 0) {
- dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
- goto poweroff;
- }
-
- err = mipi_dsi_dcs_exit_sleep_mode(jdi->link2);
- if (err < 0) {
- dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
- goto poweroff;
- }
-
- err = jdi_write_dcdc_registers(jdi);
- if (err < 0) {
- dev_err(panel->dev, "failed to write dcdc registers: %d\n", err);
- goto poweroff;
- }
+ jdi_write_dcdc_registers(&dsi_ctx, jdi);
/*
- * We need to wait 150ms between mipi_dsi_dcs_exit_sleep_mode() and
- * mipi_dsi_dcs_set_display_on().
+ * We need to wait 150ms between mipi_dsi_dcs_exit_sleep_mode_multi()
+ * and mipi_dsi_dcs_set_display_on_multi().
*/
- msleep(150);
+ mipi_dsi_msleep(&dsi_ctx, 150);
- err = mipi_dsi_dcs_set_display_on(jdi->link1);
- if (err < 0) {
- dev_err(panel->dev, "failed to set display on: %d\n", err);
- goto poweroff;
- }
+ mipi_dsi_dual(mipi_dsi_dcs_set_display_on_multi,
+ &dsi_ctx, jdi->link1, jdi->link2);
- err = mipi_dsi_dcs_set_display_on(jdi->link2);
- if (err < 0) {
- dev_err(panel->dev, "failed to set display on: %d\n", err);
+ if (dsi_ctx.accum_err < 0) {
+ err = dsi_ctx.accum_err;
goto poweroff;
}
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index 2fc7b0779b37..893af9b16756 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
&kingdisplay_panel_funcs,
diff --git a/drivers/gpu/drm/panel/panel-lg-ld070wx3.c b/drivers/gpu/drm/panel/panel-lg-ld070wx3.c
new file mode 100644
index 000000000000..00cbfc5518a5
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lg-ld070wx3.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+static const struct regulator_bulk_data lg_ld070wx3_supplies[] = {
+ { .supply = "vdd" }, { .supply = "vcc" },
+};
+
+struct lg_ld070wx3 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator_bulk_data *supplies;
+};
+
+static inline struct lg_ld070wx3 *to_lg_ld070wx3(struct drm_panel *panel)
+{
+ return container_of(panel, struct lg_ld070wx3, panel);
+}
+
+static int lg_ld070wx3_prepare(struct drm_panel *panel)
+{
+ struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+ struct device *dev = panel->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable power supplies: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * According to spec delay between enabling supply is 0,
+ * for regulators to reach required voltage ~5ms needed.
+ * MIPI interface signal for setup requires additional
+ * 110ms which in total results in 115ms.
+ */
+ mdelay(115);
+
+ mipi_dsi_dcs_soft_reset_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ /* Differential input impedance selection */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xae, 0x0b);
+
+ /* Enter test mode 1 and 2*/
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0xea);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x5f);
+
+ /* Increased MIPI CLK driving ability */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xf2, 0x68);
+
+ /* Exit test mode 1 and 2 */
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xee, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&ctx, 0xef, 0x00);
+
+ return ctx.accum_err;
+}
+
+static int lg_ld070wx3_unprepare(struct drm_panel *panel)
+{
+ struct lg_ld070wx3 *priv = to_lg_ld070wx3(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ msleep(50);
+
+ regulator_bulk_disable(ARRAY_SIZE(lg_ld070wx3_supplies), priv->supplies);
+
+ /* power supply must be off for at least 1s after panel disable */
+ msleep(1000);
+
+ return 0;
+}
+
+static const struct drm_display_mode lg_ld070wx3_mode = {
+ .clock = (800 + 32 + 48 + 8) * (1280 + 5 + 3 + 1) * 60 / 1000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 32,
+ .hsync_end = 800 + 32 + 48,
+ .htotal = 800 + 32 + 48 + 8,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 5,
+ .vsync_end = 1280 + 5 + 3,
+ .vtotal = 1280 + 5 + 3 + 1,
+ .width_mm = 94,
+ .height_mm = 151,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int lg_ld070wx3_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &lg_ld070wx3_mode);
+}
+
+static const struct drm_panel_funcs lg_ld070wx3_panel_funcs = {
+ .prepare = lg_ld070wx3_prepare,
+ .unprepare = lg_ld070wx3_unprepare,
+ .get_modes = lg_ld070wx3_get_modes,
+};
+
+static int lg_ld070wx3_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct lg_ld070wx3 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct lg_ld070wx3, panel,
+ &lg_ld070wx3_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(lg_ld070wx3_supplies),
+ lg_ld070wx3_supplies, &priv->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get supplies\n");
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void lg_ld070wx3_remove(struct mipi_dsi_device *dsi)
+{
+ struct lg_ld070wx3 *priv = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id lg_ld070wx3_of_match[] = {
+ { .compatible = "lg,ld070wx3-sl01" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lg_ld070wx3_of_match);
+
+static struct mipi_dsi_driver lg_ld070wx3_driver = {
+ .driver = {
+ .name = "panel-lg-ld070wx3",
+ .of_match_table = lg_ld070wx3_of_match,
+ },
+ .probe = lg_ld070wx3_probe,
+ .remove = lg_ld070wx3_remove,
+};
+module_mipi_dsi_driver(lg_ld070wx3_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("LG LD070WX3-SL01 DSI panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 23fd535d8f47..46b07f38559f 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -28,8 +28,6 @@ struct panel_lvds {
struct device *dev;
const char *label;
- unsigned int width;
- unsigned int height;
struct drm_display_mode dmode;
u32 bus_flags;
unsigned int bus_format;
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index 0db9cadd868e..18130bc14201 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -43,59 +43,12 @@ struct nv3052c {
struct gpio_desc *reset_gpio;
};
-static const struct nv3052c_reg ltk035c5444t_panel_regs[] = {
- // EXTC Command set enable, select page 1
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x01 },
- // Mostly unknown registers
- { 0xe3, 0x00 },
- { 0x40, 0x00 },
- { 0x03, 0x40 },
- { 0x04, 0x00 },
- { 0x05, 0x03 },
- { 0x08, 0x00 },
- { 0x09, 0x07 },
- { 0x0a, 0x01 },
- { 0x0b, 0x32 },
- { 0x0c, 0x32 },
- { 0x0d, 0x0b },
- { 0x0e, 0x00 },
- { 0x23, 0xa0 },
- { 0x24, 0x0c },
- { 0x25, 0x06 },
- { 0x26, 0x14 },
- { 0x27, 0x14 },
- { 0x38, 0xcc }, // VCOM_ADJ1
- { 0x39, 0xd7 }, // VCOM_ADJ2
- { 0x3a, 0x4a }, // VCOM_ADJ3
- { 0x28, 0x40 },
- { 0x29, 0x01 },
- { 0x2a, 0xdf },
- { 0x49, 0x3c },
- { 0x91, 0x77 }, // EXTPW_CTRL2
- { 0x92, 0x77 }, // EXTPW_CTRL3
- { 0xa0, 0x55 },
- { 0xa1, 0x50 },
- { 0xa4, 0x9c },
- { 0xa7, 0x02 },
- { 0xa8, 0x01 },
- { 0xa9, 0x01 },
- { 0xaa, 0xfc },
- { 0xab, 0x28 },
- { 0xac, 0x06 },
- { 0xad, 0x06 },
- { 0xae, 0x06 },
- { 0xaf, 0x03 },
- { 0xb0, 0x08 },
- { 0xb1, 0x26 },
- { 0xb2, 0x28 },
- { 0xb3, 0x28 },
- { 0xb4, 0x33 },
- { 0xb5, 0x08 },
- { 0xb6, 0x26 },
- { 0xb7, 0x08 },
- { 0xb8, 0x26 },
- { 0xf0, 0x00 },
- { 0xf6, 0xc0 },
+/*
+ * Common initialization registers for all currently
+ * supported displays. Mostly seem to be related
+ * to Gamma correction curves and output pad mappings.
+ */
+static const struct nv3052c_reg common_init_regs[] = {
// EXTC Command set enable, select page 2
{ 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
// Set gray scale voltage to adjust gamma
@@ -215,7 +168,7 @@ static const struct nv3052c_reg ltk035c5444t_panel_regs[] = {
{ 0xa0, 0x01 }, // PANELU2D33
// EXTC Command set enable, select page 2
{ 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
- // Unknown registers
+ // Page 2 register values (0x01..0x10) are same for nv3051d and nv3052c
{ 0x01, 0x01 },
{ 0x02, 0xda },
{ 0x03, 0xba },
@@ -236,6 +189,62 @@ static const struct nv3052c_reg ltk035c5444t_panel_regs[] = {
{ 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 },
// Display Access Control
{ 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0
+
+};
+
+static const struct nv3052c_reg ltk035c5444t_panel_regs[] = {
+ // EXTC Command set enable, select page 1
+ { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x01 },
+ // Mostly unknown registers
+ { 0xe3, 0x00 },
+ { 0x40, 0x00 },
+ { 0x03, 0x40 },
+ { 0x04, 0x00 },
+ { 0x05, 0x03 },
+ { 0x08, 0x00 },
+ { 0x09, 0x07 },
+ { 0x0a, 0x01 },
+ { 0x0b, 0x32 },
+ { 0x0c, 0x32 },
+ { 0x0d, 0x0b },
+ { 0x0e, 0x00 },
+ { 0x23, 0xa0 },
+ { 0x24, 0x0c },
+ { 0x25, 0x06 },
+ { 0x26, 0x14 },
+ { 0x27, 0x14 },
+ { 0x38, 0xcc }, // VCOM_ADJ1
+ { 0x39, 0xd7 }, // VCOM_ADJ2
+ { 0x3a, 0x4a }, // VCOM_ADJ3
+ { 0x28, 0x40 },
+ { 0x29, 0x01 },
+ { 0x2a, 0xdf },
+ { 0x49, 0x3c },
+ { 0x91, 0x77 }, // EXTPW_CTRL2
+ { 0x92, 0x77 }, // EXTPW_CTRL3
+ { 0xa0, 0x55 },
+ { 0xa1, 0x50 },
+ { 0xa4, 0x9c },
+ { 0xa7, 0x02 },
+ { 0xa8, 0x01 },
+ { 0xa9, 0x01 },
+ { 0xaa, 0xfc },
+ { 0xab, 0x28 },
+ { 0xac, 0x06 },
+ { 0xad, 0x06 },
+ { 0xae, 0x06 },
+ { 0xaf, 0x03 },
+ { 0xb0, 0x08 },
+ { 0xb1, 0x26 },
+ { 0xb2, 0x28 },
+ { 0xb3, 0x28 },
+ { 0xb4, 0x33 },
+ { 0xb5, 0x08 },
+ { 0xb6, 0x26 },
+ { 0xb7, 0x08 },
+ { 0xb8, 0x26 },
+ { 0xf0, 0x00 },
+ { 0xf6, 0xc0 },
};
static const struct nv3052c_reg fs035vg158_panel_regs[] = {
@@ -291,146 +300,6 @@ static const struct nv3052c_reg fs035vg158_panel_regs[] = {
{ 0xb8, 0x26 },
{ 0xf0, 0x00 },
{ 0xf6, 0xc0 },
- // EXTC Command set enable, select page 0
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
- // Set gray scale voltage to adjust gamma
- { 0xb0, 0x0b }, // PGAMVR0
- { 0xb1, 0x16 }, // PGAMVR1
- { 0xb2, 0x17 }, // PGAMVR2
- { 0xb3, 0x2c }, // PGAMVR3
- { 0xb4, 0x32 }, // PGAMVR4
- { 0xb5, 0x3b }, // PGAMVR5
- { 0xb6, 0x29 }, // PGAMPR0
- { 0xb7, 0x40 }, // PGAMPR1
- { 0xb8, 0x0d }, // PGAMPK0
- { 0xb9, 0x05 }, // PGAMPK1
- { 0xba, 0x12 }, // PGAMPK2
- { 0xbb, 0x10 }, // PGAMPK3
- { 0xbc, 0x12 }, // PGAMPK4
- { 0xbd, 0x15 }, // PGAMPK5
- { 0xbe, 0x19 }, // PGAMPK6
- { 0xbf, 0x0e }, // PGAMPK7
- { 0xc0, 0x16 }, // PGAMPK8
- { 0xc1, 0x0a }, // PGAMPK9
- // Set gray scale voltage to adjust gamma
- { 0xd0, 0x0c }, // NGAMVR0
- { 0xd1, 0x17 }, // NGAMVR0
- { 0xd2, 0x14 }, // NGAMVR1
- { 0xd3, 0x2e }, // NGAMVR2
- { 0xd4, 0x32 }, // NGAMVR3
- { 0xd5, 0x3c }, // NGAMVR4
- { 0xd6, 0x22 }, // NGAMPR0
- { 0xd7, 0x3d }, // NGAMPR1
- { 0xd8, 0x0d }, // NGAMPK0
- { 0xd9, 0x07 }, // NGAMPK1
- { 0xda, 0x13 }, // NGAMPK2
- { 0xdb, 0x13 }, // NGAMPK3
- { 0xdc, 0x11 }, // NGAMPK4
- { 0xdd, 0x15 }, // NGAMPK5
- { 0xde, 0x19 }, // NGAMPK6
- { 0xdf, 0x10 }, // NGAMPK7
- { 0xe0, 0x17 }, // NGAMPK8
- { 0xe1, 0x0a }, // NGAMPK9
- // EXTC Command set enable, select page 3
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x03 },
- // Set various timing settings
- { 0x00, 0x2a }, // GIP_VST_1
- { 0x01, 0x2a }, // GIP_VST_2
- { 0x02, 0x2a }, // GIP_VST_3
- { 0x03, 0x2a }, // GIP_VST_4
- { 0x04, 0x61 }, // GIP_VST_5
- { 0x05, 0x80 }, // GIP_VST_6
- { 0x06, 0xc7 }, // GIP_VST_7
- { 0x07, 0x01 }, // GIP_VST_8
- { 0x08, 0x03 }, // GIP_VST_9
- { 0x09, 0x04 }, // GIP_VST_10
- { 0x70, 0x22 }, // GIP_ECLK1
- { 0x71, 0x80 }, // GIP_ECLK2
- { 0x30, 0x2a }, // GIP_CLK_1
- { 0x31, 0x2a }, // GIP_CLK_2
- { 0x32, 0x2a }, // GIP_CLK_3
- { 0x33, 0x2a }, // GIP_CLK_4
- { 0x34, 0x61 }, // GIP_CLK_5
- { 0x35, 0xc5 }, // GIP_CLK_6
- { 0x36, 0x80 }, // GIP_CLK_7
- { 0x37, 0x23 }, // GIP_CLK_8
- { 0x40, 0x03 }, // GIP_CLKA_1
- { 0x41, 0x04 }, // GIP_CLKA_2
- { 0x42, 0x05 }, // GIP_CLKA_3
- { 0x43, 0x06 }, // GIP_CLKA_4
- { 0x44, 0x11 }, // GIP_CLKA_5
- { 0x45, 0xe8 }, // GIP_CLKA_6
- { 0x46, 0xe9 }, // GIP_CLKA_7
- { 0x47, 0x11 }, // GIP_CLKA_8
- { 0x48, 0xea }, // GIP_CLKA_9
- { 0x49, 0xeb }, // GIP_CLKA_10
- { 0x50, 0x07 }, // GIP_CLKB_1
- { 0x51, 0x08 }, // GIP_CLKB_2
- { 0x52, 0x09 }, // GIP_CLKB_3
- { 0x53, 0x0a }, // GIP_CLKB_4
- { 0x54, 0x11 }, // GIP_CLKB_5
- { 0x55, 0xec }, // GIP_CLKB_6
- { 0x56, 0xed }, // GIP_CLKB_7
- { 0x57, 0x11 }, // GIP_CLKB_8
- { 0x58, 0xef }, // GIP_CLKB_9
- { 0x59, 0xf0 }, // GIP_CLKB_10
- // Map internal GOA signals to GOA output pad
- { 0xb1, 0x01 }, // PANELD2U2
- { 0xb4, 0x15 }, // PANELD2U5
- { 0xb5, 0x16 }, // PANELD2U6
- { 0xb6, 0x09 }, // PANELD2U7
- { 0xb7, 0x0f }, // PANELD2U8
- { 0xb8, 0x0d }, // PANELD2U9
- { 0xb9, 0x0b }, // PANELD2U10
- { 0xba, 0x00 }, // PANELD2U11
- { 0xc7, 0x02 }, // PANELD2U24
- { 0xca, 0x17 }, // PANELD2U27
- { 0xcb, 0x18 }, // PANELD2U28
- { 0xcc, 0x0a }, // PANELD2U29
- { 0xcd, 0x10 }, // PANELD2U30
- { 0xce, 0x0e }, // PANELD2U31
- { 0xcf, 0x0c }, // PANELD2U32
- { 0xd0, 0x00 }, // PANELD2U33
- // Map internal GOA signals to GOA output pad
- { 0x81, 0x00 }, // PANELU2D2
- { 0x84, 0x15 }, // PANELU2D5
- { 0x85, 0x16 }, // PANELU2D6
- { 0x86, 0x10 }, // PANELU2D7
- { 0x87, 0x0a }, // PANELU2D8
- { 0x88, 0x0c }, // PANELU2D9
- { 0x89, 0x0e }, // PANELU2D10
- { 0x8a, 0x02 }, // PANELU2D11
- { 0x97, 0x00 }, // PANELU2D24
- { 0x9a, 0x17 }, // PANELU2D27
- { 0x9b, 0x18 }, // PANELU2D28
- { 0x9c, 0x0f }, // PANELU2D29
- { 0x9d, 0x09 }, // PANELU2D30
- { 0x9e, 0x0b }, // PANELU2D31
- { 0x9f, 0x0d }, // PANELU2D32
- { 0xa0, 0x01 }, // PANELU2D33
- // EXTC Command set enable, select page 2
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
- // Unknown registers
- { 0x01, 0x01 },
- { 0x02, 0xda },
- { 0x03, 0xba },
- { 0x04, 0xa8 },
- { 0x05, 0x9a },
- { 0x06, 0x70 },
- { 0x07, 0xff },
- { 0x08, 0x91 },
- { 0x09, 0x90 },
- { 0x0a, 0xff },
- { 0x0b, 0x8f },
- { 0x0c, 0x60 },
- { 0x0d, 0x58 },
- { 0x0e, 0x48 },
- { 0x0f, 0x38 },
- { 0x10, 0x2b },
- // EXTC Command set enable, select page 0
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 },
- // Display Access Control
- { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0
};
@@ -487,146 +356,6 @@ static const struct nv3052c_reg wl_355608_a8_panel_regs[] = {
{ 0xb8, 0x26 },
{ 0xf0, 0x00 },
{ 0xf6, 0xc0 },
- // EXTC Command set enable, select page 2
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
- // Set gray scale voltage to adjust gamma
- { 0xb0, 0x0b }, // PGAMVR0
- { 0xb1, 0x16 }, // PGAMVR1
- { 0xb2, 0x17 }, // PGAMVR2
- { 0xb3, 0x2c }, // PGAMVR3
- { 0xb4, 0x32 }, // PGAMVR4
- { 0xb5, 0x3b }, // PGAMVR5
- { 0xb6, 0x29 }, // PGAMPR0
- { 0xb7, 0x40 }, // PGAMPR1
- { 0xb8, 0x0d }, // PGAMPK0
- { 0xb9, 0x05 }, // PGAMPK1
- { 0xba, 0x12 }, // PGAMPK2
- { 0xbb, 0x10 }, // PGAMPK3
- { 0xbc, 0x12 }, // PGAMPK4
- { 0xbd, 0x15 }, // PGAMPK5
- { 0xbe, 0x19 }, // PGAMPK6
- { 0xbf, 0x0e }, // PGAMPK7
- { 0xc0, 0x16 }, // PGAMPK8
- { 0xc1, 0x0a }, // PGAMPK9
- // Set gray scale voltage to adjust gamma
- { 0xd0, 0x0c }, // NGAMVR0
- { 0xd1, 0x17 }, // NGAMVR0
- { 0xd2, 0x14 }, // NGAMVR1
- { 0xd3, 0x2e }, // NGAMVR2
- { 0xd4, 0x32 }, // NGAMVR3
- { 0xd5, 0x3c }, // NGAMVR4
- { 0xd6, 0x22 }, // NGAMPR0
- { 0xd7, 0x3d }, // NGAMPR1
- { 0xd8, 0x0d }, // NGAMPK0
- { 0xd9, 0x07 }, // NGAMPK1
- { 0xda, 0x13 }, // NGAMPK2
- { 0xdb, 0x13 }, // NGAMPK3
- { 0xdc, 0x11 }, // NGAMPK4
- { 0xdd, 0x15 }, // NGAMPK5
- { 0xde, 0x19 }, // NGAMPK6
- { 0xdf, 0x10 }, // NGAMPK7
- { 0xe0, 0x17 }, // NGAMPK8
- { 0xe1, 0x0a }, // NGAMPK9
- // EXTC Command set enable, select page 3
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x03 },
- // Set various timing settings
- { 0x00, 0x2a }, // GIP_VST_1
- { 0x01, 0x2a }, // GIP_VST_2
- { 0x02, 0x2a }, // GIP_VST_3
- { 0x03, 0x2a }, // GIP_VST_4
- { 0x04, 0x61 }, // GIP_VST_5
- { 0x05, 0x80 }, // GIP_VST_6
- { 0x06, 0xc7 }, // GIP_VST_7
- { 0x07, 0x01 }, // GIP_VST_8
- { 0x08, 0x03 }, // GIP_VST_9
- { 0x09, 0x04 }, // GIP_VST_10
- { 0x70, 0x22 }, // GIP_ECLK1
- { 0x71, 0x80 }, // GIP_ECLK2
- { 0x30, 0x2a }, // GIP_CLK_1
- { 0x31, 0x2a }, // GIP_CLK_2
- { 0x32, 0x2a }, // GIP_CLK_3
- { 0x33, 0x2a }, // GIP_CLK_4
- { 0x34, 0x61 }, // GIP_CLK_5
- { 0x35, 0xc5 }, // GIP_CLK_6
- { 0x36, 0x80 }, // GIP_CLK_7
- { 0x37, 0x23 }, // GIP_CLK_8
- { 0x40, 0x03 }, // GIP_CLKA_1
- { 0x41, 0x04 }, // GIP_CLKA_2
- { 0x42, 0x05 }, // GIP_CLKA_3
- { 0x43, 0x06 }, // GIP_CLKA_4
- { 0x44, 0x11 }, // GIP_CLKA_5
- { 0x45, 0xe8 }, // GIP_CLKA_6
- { 0x46, 0xe9 }, // GIP_CLKA_7
- { 0x47, 0x11 }, // GIP_CLKA_8
- { 0x48, 0xea }, // GIP_CLKA_9
- { 0x49, 0xeb }, // GIP_CLKA_10
- { 0x50, 0x07 }, // GIP_CLKB_1
- { 0x51, 0x08 }, // GIP_CLKB_2
- { 0x52, 0x09 }, // GIP_CLKB_3
- { 0x53, 0x0a }, // GIP_CLKB_4
- { 0x54, 0x11 }, // GIP_CLKB_5
- { 0x55, 0xec }, // GIP_CLKB_6
- { 0x56, 0xed }, // GIP_CLKB_7
- { 0x57, 0x11 }, // GIP_CLKB_8
- { 0x58, 0xef }, // GIP_CLKB_9
- { 0x59, 0xf0 }, // GIP_CLKB_10
- // Map internal GOA signals to GOA output pad
- { 0xb1, 0x01 }, // PANELD2U2
- { 0xb4, 0x15 }, // PANELD2U5
- { 0xb5, 0x16 }, // PANELD2U6
- { 0xb6, 0x09 }, // PANELD2U7
- { 0xb7, 0x0f }, // PANELD2U8
- { 0xb8, 0x0d }, // PANELD2U9
- { 0xb9, 0x0b }, // PANELD2U10
- { 0xba, 0x00 }, // PANELD2U11
- { 0xc7, 0x02 }, // PANELD2U24
- { 0xca, 0x17 }, // PANELD2U27
- { 0xcb, 0x18 }, // PANELD2U28
- { 0xcc, 0x0a }, // PANELD2U29
- { 0xcd, 0x10 }, // PANELD2U30
- { 0xce, 0x0e }, // PANELD2U31
- { 0xcf, 0x0c }, // PANELD2U32
- { 0xd0, 0x00 }, // PANELD2U33
- // Map internal GOA signals to GOA output pad
- { 0x81, 0x00 }, // PANELU2D2
- { 0x84, 0x15 }, // PANELU2D5
- { 0x85, 0x16 }, // PANELU2D6
- { 0x86, 0x10 }, // PANELU2D7
- { 0x87, 0x0a }, // PANELU2D8
- { 0x88, 0x0c }, // PANELU2D9
- { 0x89, 0x0e }, // PANELU2D10
- { 0x8a, 0x02 }, // PANELU2D11
- { 0x97, 0x00 }, // PANELU2D24
- { 0x9a, 0x17 }, // PANELU2D27
- { 0x9b, 0x18 }, // PANELU2D28
- { 0x9c, 0x0f }, // PANELU2D29
- { 0x9d, 0x09 }, // PANELU2D30
- { 0x9e, 0x0b }, // PANELU2D31
- { 0x9f, 0x0d }, // PANELU2D32
- { 0xa0, 0x01 }, // PANELU2D33
- // EXTC Command set enable, select page 2
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x02 },
- // Unknown registers
- { 0x01, 0x01 },
- { 0x02, 0xda },
- { 0x03, 0xba },
- { 0x04, 0xa8 },
- { 0x05, 0x9a },
- { 0x06, 0x70 },
- { 0x07, 0xff },
- { 0x08, 0x91 },
- { 0x09, 0x90 },
- { 0x0a, 0xff },
- { 0x0b, 0x8f },
- { 0x0c, 0x60 },
- { 0x0d, 0x58 },
- { 0x0e, 0x48 },
- { 0x0f, 0x38 },
- { 0x10, 0x2b },
- // EXTC Command set enable, select page 0
- { 0xff, 0x30 }, { 0xff, 0x52 }, { 0xff, 0x00 },
- // Display Access Control
- { 0x36, 0x0a }, // bgr = 1, ss = 1, gs = 0
};
static inline struct nv3052c *to_nv3052c(struct drm_panel *panel)
@@ -655,6 +384,7 @@ static int nv3052c_prepare(struct drm_panel *panel)
gpiod_set_value_cansleep(priv->reset_gpio, 0);
usleep_range(5000, 20000);
+ /* Apply panel-specific initialization registers */
for (i = 0; i < panel_regs_len; i++) {
err = mipi_dbi_command(dbi, panel_regs[i].cmd,
panel_regs[i].val);
@@ -665,6 +395,16 @@ static int nv3052c_prepare(struct drm_panel *panel)
}
}
+ /* Apply common initialization registers */
+ for (i = 0; i < ARRAY_SIZE(common_init_regs); i++) {
+ err = mipi_dbi_command(dbi, common_init_regs[i].cmd,
+ common_init_regs[i].val);
+ if (err) {
+ dev_err(priv->dev, "Unable to set register: %d\n", err);
+ goto err_disable_regulator;
+ }
+ }
+
err = mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
if (err) {
dev_err(priv->dev, "Unable to exit sleep mode: %d\n", err);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 98f0782c8411..561e6643dcbb 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -148,24 +148,20 @@ static inline struct nt35560 *panel_to_nt35560(struct drm_panel *panel)
static int nt35560_set_brightness(struct backlight_device *bl)
{
struct nt35560 *nt = bl_get_data(bl);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- int period_ns = 1023;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
int duty_ns = bl->props.brightness;
+ int period_ns = 1023;
u8 pwm_ratio;
u8 pwm_div;
- u8 par;
- int ret;
if (backlight_is_blank(bl)) {
/* Disable backlight */
- par = 0x00;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- &par, 1);
- if (ret) {
- dev_err(nt->dev, "failed to disable display backlight (%d)\n", ret);
- return ret;
- }
- return 0;
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x00);
+ return dsi_ctx.accum_err;
}
/* Calculate the PWM duty cycle in n/256's */
@@ -176,12 +172,6 @@ static int nt35560_set_brightness(struct backlight_device *bl)
/* Set up PWM dutycycle ONE byte (differs from the standard) */
dev_dbg(nt->dev, "calculated duty cycle %02x\n", pwm_ratio);
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
- &pwm_ratio, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to set display PWM ratio (%d)\n", ret);
- return ret;
- }
/*
* Sequence to write PWMDIV:
@@ -192,46 +182,23 @@ static int nt35560_set_brightness(struct backlight_device *bl)
* 0x22 PWMDIV
* 0x7F 0xAA CMD2 page 1 lock
*/
- par = 0xaa;
- ret = mipi_dsi_dcs_write(dsi, 0xf3, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to unlock CMD 2 (%d)\n", ret);
- return ret;
- }
- par = 0x01;
- ret = mipi_dsi_dcs_write(dsi, 0x00, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to enter page 1 (%d)\n", ret);
- return ret;
- }
- par = 0x01;
- ret = mipi_dsi_dcs_write(dsi, 0x7d, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to disable MTP reload (%d)\n", ret);
- return ret;
- }
- ret = mipi_dsi_dcs_write(dsi, 0x22, &pwm_div, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to set PWM divisor (%d)\n", ret);
- return ret;
- }
- par = 0xaa;
- ret = mipi_dsi_dcs_write(dsi, 0x7f, &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to lock CMD 2 (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx,
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ pwm_ratio);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0xaa);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x01);
+
+ mipi_dsi_dcs_write_var_seq_multi(&dsi_ctx, 0x22, pwm_div);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0xaa);
/* Enable backlight */
- par = 0x24;
- ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY,
- &par, 1);
- if (ret < 0) {
- dev_err(nt->dev, "failed to enable display backlight (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY,
+ 0x24);
- return 0;
+ return dsi_ctx.accum_err;
}
static const struct backlight_ops nt35560_bl_ops = {
@@ -244,32 +211,23 @@ static const struct backlight_properties nt35560_bl_props = {
.max_brightness = 1023,
};
-static int nt35560_read_id(struct nt35560 *nt)
+static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ struct device dev = dsi_ctx->dsi->dev;
u8 vendor, version, panel;
u16 val;
- int ret;
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID1, &vendor, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not vendor ID byte\n");
- return ret;
- }
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID2, &version, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not read device version byte\n");
- return ret;
- }
- ret = mipi_dsi_dcs_read(dsi, NT35560_DCS_READ_ID3, &panel, 1);
- if (ret < 0) {
- dev_err(nt->dev, "could not read panel ID byte\n");
- return ret;
- }
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID1, &vendor, 1);
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID2, &version, 1);
+ mipi_dsi_dcs_read_multi(dsi_ctx, NT35560_DCS_READ_ID3, &panel, 1);
+
+ if (dsi_ctx->accum_err < 0)
+ return;
if (vendor == 0x00) {
- dev_err(nt->dev, "device vendor ID is zero\n");
- return -ENODEV;
+ dev_err(&dev, "device vendor ID is zero\n");
+ dsi_ctx->accum_err = -ENODEV;
+ return;
}
val = (vendor << 8) | panel;
@@ -278,16 +236,16 @@ static int nt35560_read_id(struct nt35560 *nt)
case DISPLAY_SONY_ACX424AKP_ID2:
case DISPLAY_SONY_ACX424AKP_ID3:
case DISPLAY_SONY_ACX424AKP_ID4:
- dev_info(nt->dev, "MTP vendor: %02x, version: %02x, panel: %02x\n",
+ dev_info(&dev,
+ "MTP vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
default:
- dev_info(nt->dev, "unknown vendor: %02x, version: %02x, panel: %02x\n",
+ dev_info(&dev,
+ "unknown vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
}
-
- return 0;
}
static int nt35560_power_on(struct nt35560 *nt)
@@ -322,92 +280,56 @@ static void nt35560_power_off(struct nt35560 *nt)
static int nt35560_prepare(struct drm_panel *panel)
{
struct nt35560 *nt = panel_to_nt35560(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- const u8 mddi = 3;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
int ret;
ret = nt35560_power_on(nt);
if (ret)
return ret;
- ret = nt35560_read_id(nt);
- if (ret) {
- dev_err(nt->dev, "failed to read panel ID (%d)\n", ret);
- goto err_power_off;
- }
+ nt35560_read_id(&dsi_ctx);
- /* Enabe tearing mode: send TE (tearing effect) at VBLANK */
- ret = mipi_dsi_dcs_set_tear_on(dsi,
+ /* Enable tearing mode: send TE (tearing effect) at VBLANK */
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx,
MIPI_DSI_DCS_TEAR_MODE_VBLANK);
- if (ret) {
- dev_err(nt->dev, "failed to enable vblank TE (%d)\n", ret);
- goto err_power_off;
- }
/*
* Set MDDI
*
* This presumably deactivates the Qualcomm MDDI interface and
* selects DSI, similar code is found in other drivers such as the
- * Sharp LS043T1LE01 which makes us suspect that this panel may be
- * using a Novatek NT35565 or similar display driver chip that shares
- * this command. Due to the lack of documentation we cannot know for
- * sure.
+ * Sharp LS043T1LE01.
*/
- ret = mipi_dsi_dcs_write(dsi, NT35560_DCS_SET_MDDI,
- &mddi, sizeof(mddi));
- if (ret < 0) {
- dev_err(nt->dev, "failed to set MDDI (%d)\n", ret);
- goto err_power_off;
- }
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, NT35560_DCS_SET_MDDI, 3);
- /* Exit sleep mode */
- ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to exit sleep mode (%d)\n", ret);
- goto err_power_off;
- }
- msleep(140);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 140);
- ret = mipi_dsi_dcs_set_display_on(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn display on (%d)\n", ret);
- goto err_power_off;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
if (nt->video_mode) {
- /* In video mode turn peripheral on */
- ret = mipi_dsi_turn_on_peripheral(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn on peripheral\n");
- goto err_power_off;
- }
+ mipi_dsi_turn_on_peripheral_multi(&dsi_ctx);
}
- return 0;
-
-err_power_off:
- nt35560_power_off(nt);
- return ret;
+ if (dsi_ctx.accum_err < 0)
+ nt35560_power_off(nt);
+ return dsi_ctx.accum_err;
}
static int nt35560_unprepare(struct drm_panel *panel)
{
struct nt35560 *nt = panel_to_nt35560(panel);
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = {
+ .dsi = to_mipi_dsi_device(nt->dev)
+ };
- ret = mipi_dsi_dcs_set_display_off(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to turn display off (%d)\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ if (dsi_ctx.accum_err < 0)
+ return dsi_ctx.accum_err;
- /* Enter sleep mode */
- ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
- if (ret) {
- dev_err(nt->dev, "failed to enter sleep mode (%d)\n", ret);
- return ret;
- }
msleep(85);
nt35560_power_off(nt);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 32cf64c7c18b..226d91daf8c7 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -23,14 +23,6 @@
#define DSI_NUM_MIN 1
-#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \
- do { \
- dsi_ctx.dsi = dsi0; \
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
- dsi_ctx.dsi = dsi1; \
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \
- } while (0)
-
struct panel_info {
struct drm_panel panel;
struct mipi_dsi_device *dsi[2];
@@ -71,217 +63,217 @@ static int elish_boe_init_sequence(struct panel_info *pinfo)
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x25, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0x47);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd2, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x74, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x76, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x77, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xba, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbe, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbf, 0x49);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc0, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc1, 0x59);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc6, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc7, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xca, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcb, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcf, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd3, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdc, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdd, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xe1, 0x43);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xe2, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf3, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xf4, 0x48);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x13, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x14, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbd, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x97, 0x3c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x98, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x99, 0x95);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9b, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9c, 0x0b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9d, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9e, 0x90);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9f, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa3, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x14, 0x60);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11);
mipi_dsi_msleep(&dsi_ctx, 70);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29);
return dsi_ctx.accum_err;
}
@@ -292,195 +284,195 @@ static int elish_csot_init_sequence(struct panel_info *pinfo)
struct mipi_dsi_device *dsi1 = pinfo->dsi[1];
struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
/* No datasheet, so write magic init sequence directly */
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x05);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x18, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xd0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x02, 0xaf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x30);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0xee);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1d, 0x09);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x23);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0x84);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x05, 0x2d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x06, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x07, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x08, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x09, 0x45);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x12, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x15, 0x83);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x16, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29, 0x0a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x31, 0xfe);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x32, 0xfd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x33, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x34, 0xf8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x35, 0xf5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x36, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x37, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x38, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0xf2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3a, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0xec);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3d, 0xe9);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3f, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x40, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x41, 0xe5);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x45, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x46, 0xf4);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x47, 0xe7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x48, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x49, 0xcd);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4a, 0xc0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4b, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4c, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4d, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4e, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x4f, 0x99);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x50, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x68);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x52, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x54, 0x66);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0x0e);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x58, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x59, 0xfb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5a, 0xf7);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5b, 0xf3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5c, 0xef);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5d, 0xe3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5e, 0xda);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x5f, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x60, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x61, 0xd8);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x62, 0xcb);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x63, 0xbf);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x64, 0xb3);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x65, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x66, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x67, 0xb2);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x53, 0x2c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x55, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x13);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x25, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x30, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x39, 0x46);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1a, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1c, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2a, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x2b, 0xe0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0xf0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x84, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x85, 0x0c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x51, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x91, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x92, 0x0f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x93, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x94, 0x18);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x95, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x96, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb0, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x19, 0x1f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x1b, 0x1b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x24);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x27);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x31);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd1, 0x20);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xde, 0x80);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xdf, 0x02);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x26);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x00, 0x81);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x01, 0xb0);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x22);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x6f, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x70, 0x11);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x73, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x74, 0x4d);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa0, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xa9, 0x50);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xaa, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xab, 0x28);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xad, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb8, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xb9, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xba, 0x96);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbb, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbe, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbf, 0x4b);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc0, 0x07);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc1, 0x5c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc2, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc5, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc6, 0x3f);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xc7, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xca, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcb, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xce, 0x00);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xcf, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd0, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd3, 0x08);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xd4, 0x40);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x25);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbc, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xbd, 0x1c);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x2a);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xfb, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x9a, 0x03);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0xff, 0x10);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x11);
mipi_dsi_msleep(&dsi_ctx, 70);
- mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, 0x29);
return dsi_ctx.accum_err;
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
index 3231e84dc66c..8a608972fc41 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -276,11 +276,8 @@ static int ota5601a_probe(struct spi_device *spi)
}
err = drm_panel_of_backlight(&panel->drm_panel);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(dev, "Failed to get backlight handle\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to get backlight handle\n");
drm_panel_add(&panel->drm_panel);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index ad35d0fb0a16..c3fbc459c7e0 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -54,9 +54,9 @@ static int rb070d30_panel_prepare(struct drm_panel *panel)
}
msleep(20);
- gpiod_set_value(ctx->gpios.power, 1);
+ gpiod_set_value_cansleep(ctx->gpios.power, 1);
msleep(20);
- gpiod_set_value(ctx->gpios.reset, 1);
+ gpiod_set_value_cansleep(ctx->gpios.reset, 1);
msleep(20);
return 0;
}
@@ -65,8 +65,8 @@ static int rb070d30_panel_unprepare(struct drm_panel *panel)
{
struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
- gpiod_set_value(ctx->gpios.reset, 0);
- gpiod_set_value(ctx->gpios.power, 0);
+ gpiod_set_value_cansleep(ctx->gpios.reset, 0);
+ gpiod_set_value_cansleep(ctx->gpios.power, 0);
regulator_disable(ctx->supply);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c
new file mode 100644
index 000000000000..e63080204af7
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fc2x01.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Nia Espera <a5b6@riseup.net>
+ * Copyright (c) 2025 David Heidelberg <david@ixit.cz>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/swab.h>
+#include <linux/backlight.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+#define MCS_ELVSS_ON 0xb1
+
+struct samsung_s6e3fc2x01 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+};
+
+static const struct regulator_bulk_data s6e3fc2x01_supplies[] = {
+ { .supply = "vddio" },
+ { .supply = "vci" },
+ { .supply = "poc" },
+};
+
+static inline
+struct samsung_s6e3fc2x01 *to_samsung_s6e3fc2x01(struct drm_panel *panel)
+{
+ return container_of(panel, struct samsung_s6e3fc2x01, panel);
+}
+
+#define s6e3fc2x01_test_key_on_lvl1(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0xa5, 0xa5)
+#define s6e3fc2x01_test_key_off_lvl1(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0x9f, 0x5a, 0x5a)
+#define s6e3fc2x01_test_key_on_lvl2(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a)
+#define s6e3fc2x01_test_key_off_lvl2(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5)
+#define s6e3fc2x01_test_key_on_lvl3(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0x5a, 0x5a)
+#define s6e3fc2x01_test_key_off_lvl3(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xfc, 0xa5, 0xa5)
+
+static void s6e3fc2x01_reset(struct samsung_s6e3fc2x01 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+}
+
+static int s6e3fc2x01_on(struct samsung_s6e3fc2x01 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ s6e3fc2x01_test_key_on_lvl1(&dsi_ctx);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x0a);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+
+ s6e3fc2x01_test_key_off_lvl1(&dsi_ctx);
+
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x01);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+
+ mipi_dsi_usleep_range(&dsi_ctx, 15000, 16000);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x0f);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+
+ s6e3fc2x01_test_key_on_lvl1(&dsi_ctx);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ s6e3fc2x01_test_key_off_lvl1(&dsi_ctx);
+
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xeb, 0x17,
+ 0x41, 0x92,
+ 0x0e, 0x10,
+ 0x82, 0x5a);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+
+ /* Column & Page Address Setting */
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 0x0437);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 0x0923);
+
+ /* Horizontal & Vertical sync Setting */
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe8, 0x10, 0x30);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+
+ s6e3fc2x01_test_key_on_lvl3(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xed, 0x67);
+ s6e3fc2x01_test_key_off_lvl3(&dsi_ctx);
+
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x12);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_usleep_range(&dsi_ctx, 1000, 2000);
+
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ELVSS_ON, 0x00, 0x01);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x00, 0xc1);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x78);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x90);
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ELVSS_ON, 0xc6, 0x00, 0x00,
+ 0x21, 0xed, 0x02, 0x08, 0x06, 0xc1, 0x27,
+ 0xfc, 0xdc, 0xe4, 0x00, 0xd9, 0xe6, 0xe7,
+ 0x00, 0xfc, 0xff, 0xea);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MCS_ELVSS_ON, 0x00, 0x00);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+
+ return dsi_ctx.accum_err;
+}
+
+static int s6e3fc2x01_enable(struct drm_panel *panel)
+{
+ struct samsung_s6e3fc2x01 *ctx = to_samsung_s6e3fc2x01(panel);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ s6e3fc2x01_test_key_on_lvl1(&dsi_ctx);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+ s6e3fc2x01_test_key_off_lvl1(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int s6e3fc2x01_off(struct samsung_s6e3fc2x01 *ctx)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
+ s6e3fc2x01_test_key_on_lvl1(&dsi_ctx);
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+
+ mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
+
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 16000, 17000);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x82);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+ mipi_dsi_usleep_range(&dsi_ctx, 16000, 17000);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+
+ s6e3fc2x01_test_key_off_lvl1(&dsi_ctx);
+
+ s6e3fc2x01_test_key_on_lvl2(&dsi_ctx);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, 0x01);
+ s6e3fc2x01_test_key_off_lvl2(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 160);
+
+ return dsi_ctx.accum_err;
+}
+
+static int s6e3fc2x01_disable(struct drm_panel *panel)
+{
+ struct samsung_s6e3fc2x01 *ctx = to_samsung_s6e3fc2x01(panel);
+
+ s6e3fc2x01_off(ctx);
+
+ return 0;
+}
+
+static int s6e3fc2x01_prepare(struct drm_panel *panel)
+{
+ struct samsung_s6e3fc2x01 *ctx = to_samsung_s6e3fc2x01(panel);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(s6e3fc2x01_supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ s6e3fc2x01_reset(ctx);
+
+ ret = s6e3fc2x01_on(ctx);
+ if (ret < 0) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(s6e3fc2x01_supplies), ctx->supplies);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e3fc2x01_unprepare(struct drm_panel *panel)
+{
+ struct samsung_s6e3fc2x01 *ctx = to_samsung_s6e3fc2x01(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(s6e3fc2x01_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode ams641rw_mode = {
+ .clock = (1080 + 72 + 16 + 36) * (2340 + 32 + 4 + 18) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 72,
+ .hsync_end = 1080 + 72 + 16,
+ .htotal = 1080 + 72 + 16 + 36,
+ .vdisplay = 2340,
+ .vsync_start = 2340 + 32,
+ .vsync_end = 2340 + 32 + 4,
+ .vtotal = 2340 + 32 + 4 + 18,
+ .width_mm = 68,
+ .height_mm = 145,
+};
+
+static int s6e3fc2x01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &ams641rw_mode);
+}
+
+static const struct drm_panel_funcs samsung_s6e3fc2x01_panel_funcs = {
+ .prepare = s6e3fc2x01_prepare,
+ .enable = s6e3fc2x01_enable,
+ .disable = s6e3fc2x01_disable,
+ .unprepare = s6e3fc2x01_unprepare,
+ .get_modes = s6e3fc2x01_get_modes,
+};
+
+static int s6e3fc2x01_panel_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int err;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (err < 0)
+ return err;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops s6e3fc2x01_panel_bl_ops = {
+ .update_status = s6e3fc2x01_panel_bl_update_status,
+};
+
+static struct backlight_device *
+s6e3fc2x01_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_PLATFORM,
+ .brightness = 512,
+ .max_brightness = 1023,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &s6e3fc2x01_panel_bl_ops, &props);
+}
+
+static int s6e3fc2x01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct samsung_s6e3fc2x01 *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct samsung_s6e3fc2x01, panel,
+ &samsung_s6e3fc2x01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(s6e3fc2x01_supplies),
+ s6e3fc2x01_supplies,
+ &ctx->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+
+ /* keep the display on for flicker-free experience */
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = s6e3fc2x01_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void s6e3fc2x01_remove(struct mipi_dsi_device *dsi)
+{
+ struct samsung_s6e3fc2x01 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id s6e3fc2x01_of_match[] = {
+ { .compatible = "samsung,s6e3fc2x01-ams641rw", .data = &ams641rw_mode },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s6e3fc2x01_of_match);
+
+static struct mipi_dsi_driver s6e3fc2x01_driver = {
+ .probe = s6e3fc2x01_probe,
+ .remove = s6e3fc2x01_remove,
+ .driver = {
+ .name = "panel-samsung-s6e3fc2x01",
+ .of_match_table = s6e3fc2x01_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e3fc2x01_driver);
+
+MODULE_AUTHOR("David Heidelberg <david@ixit.cz>");
+MODULE_DESCRIPTION("DRM driver for Samsung S6E3FC2X01 DDIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
index e91f50662997..7e2f4e043d62 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
@@ -7,7 +7,9 @@
#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c
new file mode 100644
index 000000000000..56e10c7c3a76
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa5x01-ams561ra01.c
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Samsung AMS561RA01 panel with S6E8AA5X01 controller.
+ *
+ * Copyright (C) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer Command Set */
+#define MCS_AIDCTL 0xb2
+#define MCS_ADAPTIVECTL 0xb5
+#define MCS_ELVSS 0xb6
+#define MCS_TEMPERCTL 0xb8
+#define MCS_PENTILE 0xc0
+#define MCS_GAMMACTL 0xca
+#define MCS_LTPSCTL 0xcb
+#define MCS_PCD 0xcc
+#define MCS_ERRFLAG 0xe7
+#define MCS_ACCESSPROT 0xf0
+#define MCS_DISPCTL 0xf2
+#define MCS_GAMMAUPD 0xf7
+
+#define GAMMA_CMD_LEN 34
+#define AID_CMD_LEN 3
+
+static const struct {
+ u8 gamma[GAMMA_CMD_LEN];
+ u8 aid[AID_CMD_LEN];
+} s6e8aa5x01_ams561ra01_cmds[] = {
+ {
+ /* 5 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x94,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x8d, 0x8c, 0x8d, 0x89, 0x8c, 0x8e,
+ 0x8e, 0x8f, 0x90, 0xa3, 0xa2, 0x9a,
+ 0xcf, 0xca, 0x9f, 0xe6, 0xff, 0xb4,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0xa5 },
+ }, {
+ /* 6 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8c, 0x8a, 0x8c, 0x85, 0x88, 0x8c,
+ 0x8b, 0x8c, 0x8e, 0xa2, 0xa2, 0x9a,
+ 0xd0, 0xcc, 0xa2, 0xed, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x95 },
+ }, {
+ /* 7 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8c, 0x8a, 0x8c, 0x85, 0x88, 0x8c,
+ 0x8b, 0x8c, 0x8e, 0xa2, 0xa2, 0x99,
+ 0xc8, 0xc4, 0x9d, 0xed, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x89 },
+ }, {
+ /* 8 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x83, 0x86, 0x8b,
+ 0x8c, 0x8b, 0x8d, 0x9d, 0x9f, 0x97,
+ 0xc7, 0xc3, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x7e },
+ }, {
+ /* 9 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x90, 0x8f, 0x91, 0x95, 0x97, 0x94,
+ 0xc6, 0xc2, 0x9d, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x73 },
+ }, {
+ /* 10 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x90, 0x8f, 0x91, 0x94, 0x97, 0x93,
+ 0xc6, 0xc2, 0x9e, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x67 },
+ }, {
+ /* 11 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8a, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x8a, 0x82, 0x84, 0x88,
+ 0x8b, 0x8b, 0x8d, 0x90, 0x93, 0x92,
+ 0xc5, 0xc1, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x56 },
+ }, {
+ /* 12 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x8a, 0x8c, 0x90, 0x8f,
+ 0xcd, 0xc9, 0xa1, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x4a },
+ }, {
+ /* 13 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x8a, 0x8c, 0x90, 0x8e,
+ 0xc4, 0xbf, 0x9c, 0xf5, 0xff, 0xbb,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x3b },
+ }, {
+ /* 14 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xc2, 0xbf, 0x9c, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x35 },
+ }, {
+ /* 15 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x87, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xb7, 0xb6, 0x96, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x25 },
+ }, {
+ /* 16 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x82, 0x84, 0x88,
+ 0x88, 0x86, 0x89, 0x8c, 0x90, 0x8f,
+ 0xb7, 0xb6, 0x96, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x20 },
+ }, {
+ /* 17 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x86, 0x85, 0x89, 0x88, 0x8c, 0x8e,
+ 0xbf, 0xbe, 0x9c, 0xec, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x05, 0x11 },
+ }, {
+ /* 19 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x87, 0x85, 0x89, 0x88, 0x8c, 0x8e,
+ 0xb3, 0xb4, 0x97, 0xeb, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xf2 },
+ }, {
+ /* 20 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x95,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x89, 0x86, 0x89, 0x7f, 0x80, 0x86,
+ 0x87, 0x85, 0x89, 0x89, 0x8c, 0x8e,
+ 0xb3, 0xb4, 0x97, 0xeb, 0xff, 0xb7,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xe4 },
+ }, {
+ /* 21 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x96,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x7d, 0x7e, 0x84,
+ 0x8c, 0x8a, 0x8c, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x97, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xd5 },
+ }, {
+ /* 22 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x97,
+ 0x88, 0x89, 0x8b, 0x87, 0x87, 0x89,
+ 0x8a, 0x88, 0x8b, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x88, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x95, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xc5 },
+ }, {
+ /* 24 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x97,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x88, 0x8e, 0x90, 0x8f,
+ 0xb6, 0xb6, 0x94, 0xe3, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0xa7 },
+ }, {
+ /* 25 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x98,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x87, 0x8e, 0x90, 0x8f,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x95 },
+ }, {
+ /* 27 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x99,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x83, 0x86, 0x8a,
+ 0x88, 0x87, 0x87, 0x88, 0x8b, 0x8c,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x76 },
+ }, {
+ /* 29 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x99,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x83, 0x86, 0x89,
+ 0x88, 0x87, 0x88, 0x88, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x9a, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x54 },
+ }, {
+ /* 30 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9a,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x84, 0x86, 0x8a,
+ 0x87, 0x87, 0x87, 0x88, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x99, 0xda, 0xfa, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x44 },
+ }, {
+ /* 32 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9a,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x89, 0x87, 0x8a, 0x84, 0x86, 0x8a,
+ 0x87, 0x87, 0x87, 0x89, 0x8b, 0x8b,
+ 0xbf, 0xbf, 0x98, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x04, 0x1f },
+ }, {
+ /* 34 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x88, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8b, 0x87, 0x8b, 0x83, 0x86, 0x89,
+ 0x87, 0x87, 0x88, 0x88, 0x8b, 0x8a,
+ 0xbf, 0xbf, 0x98, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xff },
+ }, {
+ /* 37 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x86, 0x86, 0x86, 0x8d, 0x90, 0x8d,
+ 0xc0, 0xbf, 0x9a, 0xd2, 0xf2, 0xac,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xd3 },
+ }, {
+ /* 39 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8a, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x87, 0x8d, 0x90, 0x8d,
+ 0xb6, 0xb6, 0x93, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0xb3 },
+ }, {
+ /* 41 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x85,
+ 0x87, 0x86, 0x87, 0x8d, 0x90, 0x8d,
+ 0xb6, 0xb6, 0x94, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x93 },
+ }, {
+ /* 44 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x87, 0x86, 0x86, 0x85, 0x87, 0x8a,
+ 0xbe, 0xbe, 0x99, 0xda, 0xf9, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x66 },
+ }, {
+ /* 47 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9b,
+ 0x89, 0x89, 0x8c, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x84, 0x87, 0x89,
+ 0xb4, 0xb4, 0x94, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x40 },
+ }, {
+ /* 50 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x84, 0x87, 0x89,
+ 0xb4, 0xb4, 0x95, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x03, 0x0e },
+ }, {
+ /* 53 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x85, 0x87, 0x8a,
+ 0xb4, 0xb4, 0x96, 0xe2, 0xff, 0xb3,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0xe2 },
+ }, {
+ /* 56 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x81, 0x82, 0x86,
+ 0x88, 0x86, 0x87, 0x85, 0x87, 0x8a,
+ 0xab, 0xab, 0x90, 0xdd, 0xf7, 0xaf,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0xb5 },
+ }, {
+ /* 60 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x82, 0x82, 0x87,
+ 0x83, 0x81, 0x84, 0x81, 0x84, 0x88,
+ 0xb3, 0xb3, 0x96, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x77 },
+ }, {
+ /* 64 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x98, 0x00, 0xa4, 0x00, 0x9c,
+ 0x89, 0x89, 0x8b, 0x88, 0x88, 0x8a,
+ 0x8a, 0x87, 0x8b, 0x82, 0x82, 0x87,
+ 0x83, 0x81, 0x84, 0x82, 0x84, 0x88,
+ 0xb2, 0xb3, 0x97, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x36 },
+ }, {
+ /* 68 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x9d,
+ 0x88, 0x88, 0x89, 0x89, 0x89, 0x8b,
+ 0x8a, 0x88, 0x8b, 0x7f, 0x80, 0x86,
+ 0x88, 0x86, 0x87, 0x7d, 0x7f, 0x85,
+ 0xb2, 0xb3, 0x97, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 72 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0x9c, 0x00, 0xa9, 0x00, 0xa0,
+ 0x88, 0x88, 0x89, 0x88, 0x88, 0x8a,
+ 0x8c, 0x8a, 0x8d, 0x7f, 0x81, 0x85,
+ 0x84, 0x82, 0x84, 0x85, 0x87, 0x8a,
+ 0xaa, 0xab, 0x93, 0xcf, 0xe5, 0xa8,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 77 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xa1, 0x00, 0xad, 0x00, 0xa5,
+ 0x89, 0x89, 0x8a, 0x88, 0x87, 0x89,
+ 0x8c, 0x89, 0x8d, 0x7f, 0x81, 0x85,
+ 0x84, 0x83, 0x84, 0x81, 0x83, 0x86,
+ 0xaa, 0xab, 0x93, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 82 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xa5, 0x00, 0xb0, 0x00, 0xa9,
+ 0x88, 0x89, 0x89, 0x85, 0x86, 0x89,
+ 0x8a, 0x88, 0x8b, 0x82, 0x82, 0x87,
+ 0x81, 0x80, 0x82, 0x89, 0x8b, 0x8b,
+ 0xa2, 0xa3, 0x8e, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 87 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xab, 0x00, 0xb4, 0x00, 0xad,
+ 0x88, 0x89, 0x8a, 0x84, 0x86, 0x88,
+ 0x8a, 0x88, 0x8b, 0x7f, 0x7f, 0x84,
+ 0x86, 0x84, 0x85, 0x85, 0x86, 0x88,
+ 0xa2, 0xa3, 0x8f, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 93 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xaf, 0x00, 0xb9, 0x00, 0xb1,
+ 0x88, 0x89, 0x8a, 0x84, 0x85, 0x87,
+ 0x8a, 0x89, 0x8b, 0x7e, 0x7e, 0x83,
+ 0x87, 0x86, 0x86, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8b, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 98 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xb3, 0x00, 0xbc, 0x00, 0xb5,
+ 0x88, 0x88, 0x88, 0x84, 0x84, 0x86,
+ 0x8a, 0x88, 0x8a, 0x7f, 0x7f, 0x84,
+ 0x84, 0x83, 0x84, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8b, 0xc0, 0xd3, 0xa1,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 105 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xb7, 0x00, 0xc0, 0x00, 0xba,
+ 0x87, 0x87, 0x88, 0x85, 0x85, 0x87,
+ 0x89, 0x88, 0x89, 0x7f, 0x7f, 0x83,
+ 0x81, 0x80, 0x82, 0x88, 0x8a, 0x89,
+ 0x9c, 0x9c, 0x8c, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 111 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xbb, 0x00, 0xc3, 0x00, 0xbe,
+ 0x87, 0x87, 0x88, 0x85, 0x85, 0x88,
+ 0x88, 0x87, 0x89, 0x80, 0x80, 0x84,
+ 0x81, 0x81, 0x82, 0x85, 0x86, 0x87,
+ 0x9c, 0x9c, 0x8b, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x15 },
+ }, {
+ /* 119 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8c, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x02, 0x14 },
+ }, {
+ /* 126 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8d, 0xb2, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0xde },
+ }, {
+ /* 134 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc4,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x9c, 0x9c, 0x8d, 0xa4, 0xb0, 0x92,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0x94 },
+ }, {
+ /* 143 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x82, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x82, 0x81, 0x85,
+ 0x83, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x92, 0x92, 0x89, 0xab, 0xb6, 0x96,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x01, 0x46 },
+ }, {
+ /* 152 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x85,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x83,
+ 0x92, 0x92, 0x8b, 0xab, 0xb6, 0x96,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0xfa },
+ }, {
+ /* 162 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x84,
+ 0x84, 0x82, 0x84, 0x80, 0x81, 0x83,
+ 0x92, 0x92, 0x8b, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0xac },
+ }, {
+ /* 172 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc0, 0x00, 0xc8, 0x00, 0xc3,
+ 0x87, 0x87, 0x88, 0x83, 0x84, 0x86,
+ 0x87, 0x85, 0x87, 0x81, 0x81, 0x84,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x84,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x57 },
+ }, {
+ /* 183 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc2, 0x00, 0xca, 0x00, 0xc5,
+ 0x86, 0x86, 0x87, 0x85, 0x84, 0x87,
+ 0x87, 0x86, 0x88, 0x7e, 0x80, 0x83,
+ 0x84, 0x82, 0x83, 0x80, 0x81, 0x83,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 195 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xc7, 0x00, 0xce, 0x00, 0xc9,
+ 0x86, 0x87, 0x86, 0x83, 0x83, 0x85,
+ 0x85, 0x84, 0x86, 0x82, 0x82, 0x85,
+ 0x80, 0x80, 0x81, 0x81, 0x81, 0x84,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 207 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xcc, 0x00, 0xd2, 0x00, 0xce,
+ 0x86, 0x86, 0x87, 0x81, 0x83, 0x84,
+ 0x84, 0x82, 0x84, 0x83, 0x83, 0x85,
+ 0x81, 0x81, 0x82, 0x7c, 0x7d, 0x81,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 220 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xd1, 0x00, 0xd6, 0x00, 0xd3,
+ 0x86, 0x86, 0x86, 0x81, 0x83, 0x84,
+ 0x84, 0x82, 0x84, 0x80, 0x80, 0x83,
+ 0x81, 0x81, 0x82, 0x7c, 0x7d, 0x81,
+ 0x93, 0x92, 0x8c, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 234 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xd6, 0x00, 0xdb, 0x00, 0xd8,
+ 0x85, 0x85, 0x85, 0x81, 0x83, 0x84,
+ 0x83, 0x82, 0x83, 0x80, 0x80, 0x82,
+ 0x84, 0x82, 0x83, 0x79, 0x79, 0x7e,
+ 0x93, 0x92, 0x8d, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 249 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xdc, 0x00, 0xe0, 0x00, 0xdd,
+ 0x84, 0x84, 0x84, 0x81, 0x82, 0x83,
+ 0x84, 0x82, 0x84, 0x7f, 0x7f, 0x82,
+ 0x81, 0x80, 0x81, 0x80, 0x81, 0x82,
+ 0x8c, 0x8c, 0x86, 0x9d, 0xa4, 0x8e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 265 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xe2, 0x00, 0xe5, 0x00, 0xe3,
+ 0x83, 0x83, 0x83, 0x81, 0x82, 0x83,
+ 0x82, 0x82, 0x83, 0x82, 0x81, 0x83,
+ 0x7f, 0x7e, 0x80, 0x7c, 0x7d, 0x80,
+ 0x8c, 0x8c, 0x86, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 282 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xe8, 0x00, 0xea, 0x00, 0xe9,
+ 0x83, 0x83, 0x83, 0x80, 0x82, 0x82,
+ 0x81, 0x82, 0x82, 0x82, 0x81, 0x82,
+ 0x81, 0x80, 0x81, 0x80, 0x80, 0x81,
+ 0x85, 0x85, 0x83, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 300 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xed, 0x00, 0xef, 0x00, 0xed,
+ 0x81, 0x82, 0x81, 0x81, 0x81, 0x82,
+ 0x82, 0x82, 0x83, 0x80, 0x80, 0x81,
+ 0x81, 0x81, 0x82, 0x83, 0x83, 0x83,
+ 0x80, 0x80, 0x7f, 0x8e, 0x92, 0x87,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 316 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xf3, 0x00, 0xf4, 0x00, 0xf3,
+ 0x80, 0x81, 0x80, 0x81, 0x81, 0x81,
+ 0x82, 0x82, 0x82, 0x81, 0x80, 0x81,
+ 0x82, 0x82, 0x83, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x7f, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 333 nits */
+ { MCS_GAMMACTL,
+ 0x00, 0xf8, 0x00, 0xf8, 0x00, 0xf8,
+ 0x80, 0x81, 0x80, 0x81, 0x80, 0x81,
+ 0x81, 0x82, 0x82, 0x81, 0x80, 0x81,
+ 0x83, 0x83, 0x83, 0x7e, 0x7d, 0x7e,
+ 0x80, 0x80, 0x7f, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 360 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 378 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x04, 0x01, 0x03, 0x01, 0x04,
+ 0x7f, 0x7f, 0x80, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 395 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x09, 0x01, 0x07, 0x01, 0x08,
+ 0x7e, 0x7f, 0x80, 0x7f, 0x7f, 0x7f,
+ 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7e, 0x7e, 0x7e,
+ 0x80, 0x80, 0x7f, 0x7e, 0x7e, 0x7f,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 413 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x0e, 0x01, 0x0b, 0x01, 0x0c,
+ 0x7e, 0x7f, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7d, 0x7d, 0x7d,
+ 0x80, 0x80, 0x7f, 0x7d, 0x7e, 0x7e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 430 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x13, 0x01, 0x0f, 0x01, 0x10,
+ 0x7d, 0x7f, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7e, 0x7f, 0x7f, 0x7f,
+ 0x80, 0x7f, 0x7f, 0x7d, 0x7d, 0x7d,
+ 0x80, 0x80, 0x7f, 0x7c, 0x7d, 0x7e,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 448 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x18, 0x01, 0x13, 0x01, 0x14,
+ 0x7c, 0x7e, 0x80, 0x7e, 0x7e, 0x7e,
+ 0x7e, 0x7e, 0x7d, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7c, 0x7c, 0x7c,
+ 0x80, 0x80, 0x7e, 0x7b, 0x7c, 0x7d,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 465 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x1d, 0x01, 0x17, 0x01, 0x18,
+ 0x7c, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7d, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7b, 0x7b, 0x7b,
+ 0x80, 0x80, 0x7e, 0x7a, 0x7c, 0x7d,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 483 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x22, 0x01, 0x1b, 0x01, 0x1c,
+ 0x7b, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7c, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7a, 0x7a, 0x7a,
+ 0x80, 0x80, 0x7e, 0x79, 0x7b, 0x7c,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ }, {
+ /* 500 nits */
+ { MCS_GAMMACTL,
+ 0x01, 0x27, 0x01, 0x1f, 0x01, 0x20,
+ 0x7b, 0x7e, 0x80, 0x7d, 0x7d, 0x7d,
+ 0x7d, 0x7d, 0x7c, 0x7e, 0x7f, 0x7e,
+ 0x80, 0x7f, 0x7f, 0x7a, 0x7a, 0x7a,
+ 0x81, 0x80, 0x7e, 0x79, 0x7b, 0x7c,
+ 0x00, 0x00, 0x00, },
+ { MCS_AIDCTL, 0x00, 0x10 },
+ },
+};
+
+struct s6e8aa5x01_ams561ra01_ctx {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct backlight_device *bl;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+ u32 nr_supplies;
+};
+
+static const struct regulator_bulk_data s6e8aa5x01_ams561ra01_supplies[] = {
+ { .supply = "vdd" },
+ { .supply = "vci" },
+};
+
+static inline struct s6e8aa5x01_ams561ra01_ctx *to_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e8aa5x01_ams561ra01_ctx, panel);
+}
+
+static int s6e8aa5x01_ams561ra01_update_status(struct backlight_device *bl)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = bl_get_data(bl);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+ u16 lvl = backlight_get_brightness(bl);
+
+ if (!ctx->panel.enabled)
+ return 0;
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0x5a, 0x5a);
+
+ mipi_dsi_dcs_write_buffer_multi(&dsi,
+ s6e8aa5x01_ams561ra01_cmds[lvl].gamma,
+ GAMMA_CMD_LEN);
+ mipi_dsi_dcs_write_buffer_multi(&dsi,
+ s6e8aa5x01_ams561ra01_cmds[lvl].aid,
+ AID_CMD_LEN);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_GAMMAUPD, 0x03);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0xa5, 0xa5);
+
+ return dsi.accum_err;
+}
+
+static int s6e8aa5x01_ams561ra01_prepare(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ctx->nr_supplies, ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+
+ return 0;
+}
+
+static int s6e8aa5x01_ams561ra01_unprepare(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_bulk_disable(ctx->nr_supplies, ctx->supplies);
+
+ return 0;
+}
+
+static int s6e8aa5x01_ams561ra01_enable(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 100);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0x5a, 0x5a);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_PENTILE, 0xd8, 0xd8, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_PCD, 0x5c);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ERRFLAG, 0xed, 0xc7, 0x23, 0x67);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_DISPCTL, 0x0c, 0x0c, 0xb9, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_LTPSCTL,
+ 0x00, 0x45, 0x10, 0x10, 0x08, 0x32, 0x54, 0x00,
+ 0x00, 0x00, 0x00, 0x07, 0x06, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x48, 0x5e, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x03, 0x00, 0x00, 0x00, 0xad, 0x00, 0x00,
+ 0x08, 0x05, 0x2a, 0x54, 0x03, 0xcc, 0x00, 0xff,
+ 0xfb, 0x03, 0x0d, 0x00, 0x11, 0x0f, 0x02, 0x03,
+ 0x0b, 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13, 0x13, 0x13, 0x00, 0x02, 0x03, 0x0b,
+ 0x0c, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13,
+ 0x13, 0x13);
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MCS_ACCESSPROT, 0xa5, 0xa5);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi);
+
+ return dsi.accum_err;
+}
+
+static int s6e8aa5x01_ams561ra01_disable(struct drm_panel *panel)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = to_ctx(panel);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 100);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi);
+ mipi_dsi_msleep(&dsi, 150);
+
+ return dsi.accum_err;
+}
+
+static const struct drm_display_mode s6e8aa5x01_ams561ra01_mode = {
+ .clock = (720 + 62 + 2 + 26) * (1480 + 12 + 2 + 10) * 60 / 1000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 62,
+ .hsync_end = 720 + 62 + 2,
+ .htotal = 720 + 62 + 2 + 26,
+ .vdisplay = 1480,
+ .vsync_start = 1480 + 12,
+ .vsync_end = 1480 + 12 + 2,
+ .vtotal = 1480 + 12 + 2 + 10,
+ .width_mm = 62,
+ .height_mm = 128,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int s6e8aa5x01_ams561ra01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector,
+ &s6e8aa5x01_ams561ra01_mode);
+}
+
+static const struct backlight_ops s6e8aa5x01_ams561ra01_bl_ops = {
+ .update_status = s6e8aa5x01_ams561ra01_update_status,
+};
+
+static const struct drm_panel_funcs s6e8aa5x01_ams561ra01_panel_funcs = {
+ .prepare = s6e8aa5x01_ams561ra01_prepare,
+ .unprepare = s6e8aa5x01_ams561ra01_unprepare,
+ .enable = s6e8aa5x01_ams561ra01_enable,
+ .disable = s6e8aa5x01_ams561ra01_disable,
+ .get_modes = s6e8aa5x01_ams561ra01_get_modes,
+};
+
+static int s6e8aa5x01_ams561ra01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e8aa5x01_ams561ra01_ctx *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct s6e8aa5x01_ams561ra01_ctx, panel,
+ &s6e8aa5x01_ams561ra01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->nr_supplies = ARRAY_SIZE(s6e8aa5x01_ams561ra01_supplies);
+ ret = devm_regulator_bulk_get_const(dev, ctx->nr_supplies,
+ s6e8aa5x01_ams561ra01_supplies,
+ &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "failed to get reset-gpios\n");
+
+ ctx->bl = devm_backlight_device_register(dev, dev_name(dev), dev, ctx,
+ &s6e8aa5x01_ams561ra01_bl_ops,
+ NULL);
+ if (IS_ERR(ctx->bl))
+ return dev_err_probe(dev, PTR_ERR(ctx->bl),
+ "failed to register backlight device\n");
+
+ ctx->bl->props.type = BACKLIGHT_PLATFORM;
+ ctx->bl->props.brightness = ARRAY_SIZE(s6e8aa5x01_ams561ra01_cmds) - 1;
+ ctx->bl->props.max_brightness = ctx->bl->props.brightness;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_NO_HFP;
+
+ ctx->panel.prepare_prev_first = true;
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void s6e8aa5x01_ams561ra01_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e8aa5x01_ams561ra01_ctx *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id s6e8aa5x01_ams561ra01_of_device_id[] = {
+ { .compatible = "samsung,s6e8aa5x01-ams561ra01" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6e8aa5x01_ams561ra01_of_device_id);
+
+static struct mipi_dsi_driver s6e8aa5x01_ams561ra01_dsi_driver = {
+ .probe = s6e8aa5x01_ams561ra01_probe,
+ .remove = s6e8aa5x01_ams561ra01_remove,
+ .driver = {
+ .name = "panel-samsung-s6e8aa5x01-ams561ra01",
+ .of_match_table = s6e8aa5x01_ams561ra01_of_device_id,
+ },
+};
+module_mipi_dsi_driver(s6e8aa5x01_ams561ra01_dsi_driver);
+
+MODULE_AUTHOR("Kaustabh Chakraborty <kauschluss@disroot.org>");
+MODULE_DESCRIPTION("Samsung AMS561RA01 Panel with S6E8AA5X01 Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 064258217d50..e00a497a7c96 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -16,20 +16,32 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
struct sofef00_panel {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
- struct regulator *supply;
+ struct regulator_bulk_data *supplies;
struct gpio_desc *reset_gpio;
};
+static const struct regulator_bulk_data sofef00_supplies[] = {
+ { .supply = "vddio" },
+ { .supply = "vci" },
+ { .supply = "poc" },
+};
+
static inline
struct sofef00_panel *to_sofef00_panel(struct drm_panel *panel)
{
return container_of(panel, struct sofef00_panel, panel);
}
+#define sofef00_test_key_on_lvl2(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0x5a, 0x5a)
+#define sofef00_test_key_off_lvl2(ctx) \
+ mipi_dsi_dcs_write_seq_multi(ctx, 0xf0, 0xa5, 0xa5)
+
static void sofef00_panel_reset(struct sofef00_panel *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
@@ -50,18 +62,26 @@ static int sofef00_panel_on(struct sofef00_panel *ctx)
mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
-
+ sofef00_test_key_on_lvl2(&dsi_ctx);
mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ sofef00_test_key_off_lvl2(&dsi_ctx);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a);
+ sofef00_test_key_on_lvl2(&dsi_ctx);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x07);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x12);
- mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5);
+ sofef00_test_key_off_lvl2(&dsi_ctx);
+
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ return dsi_ctx.accum_err;
+}
+
+static int sofef00_enable(struct drm_panel *panel)
+{
+ struct sofef00_panel *ctx = to_sofef00_panel(panel);
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+
mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
return dsi_ctx.accum_err;
@@ -72,8 +92,6 @@ static int sofef00_panel_off(struct sofef00_panel *ctx)
struct mipi_dsi_device *dsi = ctx->dsi;
struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
- dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
-
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
mipi_dsi_msleep(&dsi_ctx, 40);
@@ -86,70 +104,70 @@ static int sofef00_panel_off(struct sofef00_panel *ctx)
static int sofef00_panel_prepare(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
- struct device *dev = &ctx->dsi->dev;
int ret;
- ret = regulator_enable(ctx->supply);
- if (ret < 0) {
- dev_err(dev, "Failed to enable regulator: %d\n", ret);
+ ret = regulator_bulk_enable(ARRAY_SIZE(sofef00_supplies), ctx->supplies);
+ if (ret < 0)
return ret;
- }
sofef00_panel_reset(ctx);
ret = sofef00_panel_on(ctx);
if (ret < 0) {
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies);
return ret;
}
return 0;
}
-static int sofef00_panel_unprepare(struct drm_panel *panel)
+static int sofef00_disable(struct drm_panel *panel)
{
struct sofef00_panel *ctx = to_sofef00_panel(panel);
sofef00_panel_off(ctx);
- regulator_disable(ctx->supply);
return 0;
}
-static const struct drm_display_mode enchilada_panel_mode = {
+static int sofef00_panel_unprepare(struct drm_panel *panel)
+{
+ struct sofef00_panel *ctx = to_sofef00_panel(panel);
+
+ regulator_bulk_disable(ARRAY_SIZE(sofef00_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode ams628nw01_panel_mode = {
.clock = (1080 + 112 + 16 + 36) * (2280 + 36 + 8 + 12) * 60 / 1000,
+
.hdisplay = 1080,
.hsync_start = 1080 + 112,
.hsync_end = 1080 + 112 + 16,
.htotal = 1080 + 112 + 16 + 36,
+
.vdisplay = 2280,
.vsync_start = 2280 + 36,
.vsync_end = 2280 + 36 + 8,
.vtotal = 2280 + 36 + 8 + 12,
+
.width_mm = 68,
.height_mm = 145,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static int sofef00_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector)
{
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &enchilada_panel_mode);
- if (!mode)
- return -ENOMEM;
-
- drm_mode_set_name(mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- drm_mode_probed_add(connector, mode);
-
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, &ams628nw01_panel_mode);
}
static const struct drm_panel_funcs sofef00_panel_panel_funcs = {
.prepare = sofef00_panel_prepare,
+ .enable = sofef00_enable,
+ .disable = sofef00_disable,
.unprepare = sofef00_panel_unprepare,
.get_modes = sofef00_panel_get_modes,
};
@@ -160,10 +178,14 @@ static int sofef00_panel_bl_update_status(struct backlight_device *bl)
int err;
u16 brightness = (u16)backlight_get_brightness(bl);
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
if (err < 0)
return err;
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
return 0;
}
@@ -177,7 +199,7 @@ sofef00_create_backlight(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
const struct backlight_properties props = {
.type = BACKLIGHT_PLATFORM,
- .brightness = 1023,
+ .brightness = 512,
.max_brightness = 1023,
};
@@ -197,10 +219,12 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- ctx->supply = devm_regulator_get(dev, "vddio");
- if (IS_ERR(ctx->supply))
- return dev_err_probe(dev, PTR_ERR(ctx->supply),
- "Failed to get vddio regulator\n");
+ ret = devm_regulator_bulk_get_const(dev,
+ ARRAY_SIZE(sofef00_supplies),
+ sofef00_supplies,
+ &ctx->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -212,6 +236,10 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = sofef00_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
@@ -243,7 +271,8 @@ static void sofef00_panel_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id sofef00_panel_of_match[] = {
- { .compatible = "samsung,sofef00" },
+ { .compatible = "samsung,sofef00" }, /* legacy */
+ { .compatible = "samsung,sofef00-ams628nw01" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sofef00_panel_of_match);
@@ -252,7 +281,7 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
.probe = sofef00_panel_probe,
.remove = sofef00_panel_remove,
.driver = {
- .name = "panel-oneplus6",
+ .name = "panel-samsung-sofef00",
.of_match_table = sofef00_panel_of_match,
},
};
@@ -260,5 +289,5 @@ static struct mipi_dsi_driver sofef00_panel_driver = {
module_mipi_dsi_driver(sofef00_panel_driver);
MODULE_AUTHOR("Casey Connolly <casey.connolly@linaro.org>");
-MODULE_DESCRIPTION("DRM driver for Samsung AMOLED DSI panels found in OnePlus 6/6T phones");
+MODULE_DESCRIPTION("DRM driver for Samsung SOFEF00 DDIC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c
new file mode 100644
index 000000000000..8c00fde1c4a9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-lq079l1sx01.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 XiaoMi, Inc.
+ * Copyright (c) 2024 Svyatoslav Ryhel <clamor95@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+static const struct regulator_bulk_data sharp_supplies[] = {
+ { .supply = "avdd" }, { .supply = "vddio" },
+ { .supply = "vsp" }, { .supply = "vsn" },
+};
+
+struct sharp_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi[2];
+
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data *supplies;
+
+ const struct drm_display_mode *mode;
+};
+
+static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct sharp_panel, panel);
+}
+
+static void sharp_panel_reset(struct sharp_panel *sharp)
+{
+ gpiod_set_value_cansleep(sharp->reset_gpio, 1);
+ usleep_range(2000, 3000);
+ gpiod_set_value_cansleep(sharp->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int sharp_panel_prepare(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+ struct device *dev = panel->dev;
+ struct mipi_dsi_device *dsi0 = sharp->dsi[0];
+ struct mipi_dsi_device *dsi1 = sharp->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(sharp_supplies), sharp->supplies);
+ if (ret) {
+ dev_err(dev, "error enabling regulators (%d)\n", ret);
+ return ret;
+ }
+
+ msleep(24);
+
+ if (sharp->reset_gpio)
+ sharp_panel_reset(sharp);
+
+ msleep(32);
+
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1,
+ MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1,
+ MIPI_DCS_WRITE_POWER_SAVE, 0x01);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1,
+ MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c);
+
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_SET_DISPLAY_ON);
+
+ return 0;
+}
+
+static int sharp_panel_unprepare(struct drm_panel *panel)
+{
+ struct sharp_panel *sharp = to_sharp_panel(panel);
+ struct mipi_dsi_device *dsi0 = sharp->dsi[0];
+ struct mipi_dsi_device *dsi1 = sharp->dsi[1];
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL };
+
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_SET_DISPLAY_OFF);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+ mipi_dsi_dual_dcs_write_seq_multi(&dsi_ctx, dsi0, dsi1, MIPI_DCS_ENTER_SLEEP_MODE);
+ mipi_dsi_msleep(&dsi_ctx, 150);
+
+ if (sharp->reset_gpio)
+ gpiod_set_value_cansleep(sharp->reset_gpio, 1);
+
+ return regulator_bulk_disable(ARRAY_SIZE(sharp_supplies), sharp->supplies);
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = (1536 + 136 + 28 + 28) * (2048 + 14 + 8 + 2) * 60 / 1000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 136,
+ .hsync_end = 1536 + 136 + 28,
+ .htotal = 1536 + 136 + 28 + 28,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 14,
+ .vsync_end = 2048 + 14 + 8,
+ .vtotal = 2048 + 14 + 8 + 2,
+ .width_mm = 120,
+ .height_mm = 160,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int sharp_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &default_mode);
+}
+
+static const struct drm_panel_funcs sharp_panel_funcs = {
+ .unprepare = sharp_panel_unprepare,
+ .prepare = sharp_panel_prepare,
+ .get_modes = sharp_panel_get_modes,
+};
+
+static int sharp_panel_probe(struct mipi_dsi_device *dsi)
+{
+ const struct mipi_dsi_device_info info = { "sharp-link1", 0, NULL };
+ struct device *dev = &dsi->dev;
+ struct device_node *dsi_r;
+ struct mipi_dsi_host *dsi_r_host;
+ struct sharp_panel *sharp;
+ int i, ret;
+
+ sharp = devm_drm_panel_alloc(dev, struct sharp_panel, panel,
+ &sharp_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(sharp))
+ return PTR_ERR(sharp);
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(sharp_supplies),
+ sharp_supplies, &sharp->supplies);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get supplies\n");
+
+ sharp->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(sharp->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(sharp->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ /* Panel is always connected to two DSI hosts, DSI0 is left, DSI1 is right */
+ dsi_r = of_graph_get_remote_node(dsi->dev.of_node, 1, -1);
+ if (!dsi_r)
+ return dev_err_probe(dev, -ENODEV, "failed to find second DSI host node\n");
+
+ dsi_r_host = of_find_mipi_dsi_host_by_node(dsi_r);
+ of_node_put(dsi_r);
+ if (!dsi_r_host)
+ return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
+
+ sharp->dsi[1] = devm_mipi_dsi_device_register_full(dev, dsi_r_host, &info);
+ if (IS_ERR(sharp->dsi[1]))
+ return dev_err_probe(dev, PTR_ERR(sharp->dsi[1]),
+ "second link registration failed\n");
+
+ sharp->dsi[0] = dsi;
+ mipi_dsi_set_drvdata(dsi, sharp);
+
+ ret = drm_panel_of_backlight(&sharp->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&sharp->panel);
+
+ for (i = 0; i < ARRAY_SIZE(sharp->dsi); i++) {
+ if (!sharp->dsi[i])
+ continue;
+
+ sharp->dsi[i]->lanes = 4;
+ sharp->dsi[i]->format = MIPI_DSI_FMT_RGB888;
+ sharp->dsi[i]->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM;
+
+ ret = devm_mipi_dsi_attach(dev, sharp->dsi[i]);
+ if (ret < 0) {
+ drm_panel_remove(&sharp->panel);
+ return dev_err_probe(dev, ret, "failed to attach to DSI%d\n", i);
+ }
+ }
+
+ return 0;
+}
+
+static void sharp_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&sharp->panel);
+}
+
+static const struct of_device_id sharp_of_match[] = {
+ { .compatible = "sharp,lq079l1sx01" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sharp_of_match);
+
+static struct mipi_dsi_driver sharp_panel_driver = {
+ .driver = {
+ .name = "panel-sharp-lq079l1sx01",
+ .of_match_table = sharp_of_match,
+ },
+ .probe = sharp_panel_probe,
+ .remove = sharp_panel_remove,
+};
+module_mipi_dsi_driver(sharp_panel_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Sharp LQ079L1SX01 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3333d4a07504..b26b682826bc 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2889,6 +2889,38 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct display_timing jutouch_jt101tm023_timing = {
+ .pixelclock = { 66300000, 72400000, 78900000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 12, 72, 132 },
+ .hback_porch = { 88, 88, 88 },
+ .hsync_len = { 10, 10, 48 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 1, 15, 49 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 5, 6, 13 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc jutouch_jt101tm023 = {
+ .timings = &jutouch_jt101tm023_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .enable = 50,
+ .disable = 50,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
+
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -3716,6 +3748,29 @@ static const struct panel_desc olimex_lcd_olinuxino_43ts = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode olimex_lcd_olinuxino_5cts_mode = {
+ .clock = 33300,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 20,
+ .htotal = 800 + 210 + 20 + 26,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 10,
+ .vtotal = 480 + 22 + 10 + 13,
+};
+
+static const struct panel_desc olimex_lcd_olinuxino_5cts = {
+ .modes = &olimex_lcd_olinuxino_5cts_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
+
static const struct display_timing ontat_kd50g21_40nt_a1_timing = {
.pixelclock = { 30000000, 30000000, 50000000 },
.hactive = { 800, 800, 800 },
@@ -4051,6 +4106,30 @@ static const struct panel_desc qishenglong_gopher2b_lcd = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing raystar_rff500f_awh_dnn_timing = {
+ .pixelclock = { 23000000, 25000000, 27000000 },
+ .hactive = { 800, 800, 800 },
+ .hback_porch = { 4, 8, 48 },
+ .hfront_porch = { 4, 8, 48 },
+ .hsync_len = { 2, 4, 8 },
+ .vactive = { 480, 480, 480 },
+ .vback_porch = { 4, 8, 12 },
+ .vfront_porch = { 4, 8, 12 },
+ .vsync_len = { 2, 4, 8 },
+};
+
+static const struct panel_desc raystar_rff500f_awh_dnn = {
+ .timings = &raystar_rff500f_awh_dnn_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 108,
+ .height = 65,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing rocktech_rk043fn48h_timing = {
.pixelclock = { 6000000, 9000000, 12000000 },
.hactive = { 480, 480, 480 },
@@ -4168,6 +4247,37 @@ static const struct panel_desc samsung_ltl101al01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing samsung_ltl106al01_timing = {
+ .pixelclock = { 71980000, 71980000, 71980000 },
+ .hactive = { 1366, 1366, 1366 },
+ .hfront_porch = { 56, 56, 56 },
+ .hback_porch = { 106, 106, 106 },
+ .hsync_len = { 14, 14, 14 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 3, 3, 3 },
+ .vback_porch = { 6, 6, 6 },
+ .vsync_len = { 1, 1, 1 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
+};
+
+static const struct panel_desc samsung_ltl106al01 = {
+ .timings = &samsung_ltl106al01_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 235,
+ .height = 132,
+ },
+ .delay = {
+ .prepare = 5,
+ .enable = 10,
+ .disable = 10,
+ .unprepare = 5,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -5186,6 +5296,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "jutouch,jt101tm023",
+ .data = &jutouch_jt101tm023,
+ }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
@@ -5279,6 +5392,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "olimex,lcd-olinuxino-43-ts",
.data = &olimex_lcd_olinuxino_43ts,
}, {
+ .compatible = "olimex,lcd-olinuxino-5-cts",
+ .data = &olimex_lcd_olinuxino_5cts,
+ }, {
.compatible = "ontat,kd50g21-40nt-a1",
.data = &ontat_kd50g21_40nt_a1,
}, {
@@ -5318,6 +5434,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "qishenglong,gopher2b-lcd",
.data = &qishenglong_gopher2b_lcd,
}, {
+ .compatible = "raystar,rff500f-awh-dnn",
+ .data = &raystar_rff500f_awh_dnn,
+ }, {
.compatible = "rocktech,rk043fn48h",
.data = &rocktech_rk043fn48h,
}, {
@@ -5330,6 +5449,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "samsung,ltl101al01",
.data = &samsung_ltl101al01,
}, {
+ .compatible = "samsung,ltl106al01",
+ .data = &samsung_ltl106al01,
+ }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
@@ -5539,34 +5661,6 @@ static const struct panel_desc_dsi boe_tv080wum_nl0 = {
.lanes = 4,
};
-static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
- .clock = 71000,
- .hdisplay = 800,
- .hsync_start = 800 + 32,
- .hsync_end = 800 + 32 + 1,
- .htotal = 800 + 32 + 1 + 57,
- .vdisplay = 1280,
- .vsync_start = 1280 + 28,
- .vsync_end = 1280 + 28 + 1,
- .vtotal = 1280 + 28 + 1 + 14,
-};
-
-static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
- .desc = {
- .modes = &lg_ld070wx3_sl01_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 94,
- .height = 151,
- },
- .connector_type = DRM_MODE_CONNECTOR_DSI,
- },
- .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
- .format = MIPI_DSI_FMT_RGB888,
- .lanes = 4,
-};
-
static const struct drm_display_mode lg_lh500wx1_sd03_mode = {
.clock = 67000,
.hdisplay = 720,
@@ -5691,9 +5785,6 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "boe,tv080wum-nl0",
.data = &boe_tv080wum_nl0
}, {
- .compatible = "lg,ld070wx3-sl01",
- .data = &lg_ld070wx3_sl01
- }, {
.compatible = "lg,lh500wx1-sd03",
.data = &lg_lh500wx1_sd03
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 1a007a244d84..6c348fe28955 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Driver for panels based on Sitronix ST7703 controller, souch as:
+ * Driver for panels based on Sitronix ST7703 controller, such as:
*
* - Rocktech jh057n00900 5.5" MIPI-DSI panel
*
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 04d91929eedd..d5f821d6b23c 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
+/*
+ * The mode data for this panel has been reverse engineered without access
+ * to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all
+ * other panels results in garbage data on the display.
+ */
static const struct drm_display_mode t28cp45tn89_mode = {
.clock = 6008,
.hdisplay = 240,
@@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {
.vtotal = 320 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 57,
- .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct drm_display_mode et028013dma_mode = {
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
index 4854437e2899..6d40b9ddfe02 100644
--- a/drivers/gpu/drm/panel/panel-summit.c
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/backlight.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <drm/drm_device.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_mode.h>
diff --git a/drivers/gpu/drm/panel/panel-synaptics-tddi.c b/drivers/gpu/drm/panel/panel-synaptics-tddi.c
new file mode 100644
index 000000000000..0aea1854710e
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-synaptics-tddi.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synaptics TDDI display panel driver.
+ *
+ * Copyright (C) 2025 Kaustabh Chakraborty <kauschluss@disroot.org>
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct tddi_panel_data {
+ u8 lanes;
+ /* wait timings for panel enable */
+ u8 delay_ms_sleep_exit;
+ u8 delay_ms_display_on;
+ /* wait timings for panel disable */
+ u8 delay_ms_display_off;
+ u8 delay_ms_sleep_enter;
+};
+
+struct tddi_ctx {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct drm_display_mode mode;
+ struct backlight_device *backlight;
+ const struct tddi_panel_data *data;
+ struct regulator_bulk_data *supplies;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *backlight_gpio;
+};
+
+static const struct regulator_bulk_data tddi_supplies[] = {
+ { .supply = "vio" },
+ { .supply = "vsn" },
+ { .supply = "vsp" },
+};
+
+static inline struct tddi_ctx *to_tddi_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct tddi_ctx, panel);
+}
+
+static int tddi_update_status(struct backlight_device *backlight)
+{
+ struct tddi_ctx *ctx = bl_get_data(backlight);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+ u8 brightness = backlight_get_brightness(backlight);
+
+ if (!ctx->panel.enabled)
+ return 0;
+
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi, brightness);
+
+ return dsi.accum_err;
+}
+
+static int tddi_prepare(struct drm_panel *panel)
+{
+ struct tddi_ctx *ctx = to_tddi_ctx(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(tddi_supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(ctx->backlight_gpio, 0);
+ usleep_range(5000, 6000);
+
+ return 0;
+}
+
+static int tddi_unprepare(struct drm_panel *panel)
+{
+ struct tddi_ctx *ctx = to_tddi_ctx(panel);
+
+ gpiod_set_value_cansleep(ctx->backlight_gpio, 1);
+ usleep_range(5000, 6000);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_bulk_disable(ARRAY_SIZE(tddi_supplies), ctx->supplies);
+
+ return 0;
+}
+
+static int tddi_enable(struct drm_panel *panel)
+{
+ struct tddi_ctx *ctx = to_tddi_ctx(panel);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+ u8 brightness = ctx->backlight->props.brightness;
+
+ mipi_dsi_dcs_write_seq_multi(&dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x0c);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi);
+ mipi_dsi_msleep(&dsi, ctx->data->delay_ms_sleep_exit);
+
+ /* sync the panel with the backlight's brightness level */
+ mipi_dsi_dcs_set_display_brightness_multi(&dsi, brightness);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi);
+ mipi_dsi_msleep(&dsi, ctx->data->delay_ms_display_on);
+
+ return dsi.accum_err;
+};
+
+static int tddi_disable(struct drm_panel *panel)
+{
+ struct tddi_ctx *ctx = to_tddi_ctx(panel);
+ struct mipi_dsi_multi_context dsi = { .dsi = ctx->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi);
+ mipi_dsi_msleep(&dsi, ctx->data->delay_ms_display_off);
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi);
+ mipi_dsi_msleep(&dsi, ctx->data->delay_ms_sleep_enter);
+
+ return dsi.accum_err;
+}
+
+static int tddi_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct tddi_ctx *ctx = to_tddi_ctx(panel);
+
+ return drm_connector_helper_get_modes_fixed(connector, &ctx->mode);
+}
+
+static const struct backlight_ops tddi_bl_ops = {
+ .update_status = tddi_update_status,
+};
+
+static const struct backlight_properties tddi_bl_props = {
+ .type = BACKLIGHT_PLATFORM,
+ .brightness = 255,
+ .max_brightness = 255,
+};
+
+static const struct drm_panel_funcs tddi_drm_panel_funcs = {
+ .prepare = tddi_prepare,
+ .unprepare = tddi_unprepare,
+ .enable = tddi_enable,
+ .disable = tddi_disable,
+ .get_modes = tddi_get_modes,
+};
+
+static int tddi_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct tddi_ctx *ctx;
+ int ret;
+
+ ctx = devm_drm_panel_alloc(dev, struct tddi_ctx, panel,
+ &tddi_drm_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->data = of_device_get_match_data(dev);
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(tddi_supplies),
+ tddi_supplies, &ctx->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ ctx->backlight_gpio = devm_gpiod_get_optional(dev, "backlight", GPIOD_ASIS);
+ if (IS_ERR(ctx->backlight_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->backlight_gpio),
+ "failed to get backlight-gpios\n");
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "failed to get reset-gpios\n");
+
+ ret = of_get_drm_panel_display_mode(dev->of_node, &ctx->mode, NULL);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get panel timings\n");
+
+ ctx->backlight = devm_backlight_device_register(dev, dev_name(dev), dev,
+ ctx, &tddi_bl_ops,
+ &tddi_bl_props);
+ if (IS_ERR(ctx->backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->backlight),
+ "failed to register backlight device");
+
+ dsi->lanes = ctx->data->lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_NO_HFP;
+
+ ctx->panel.prepare_prev_first = true;
+ drm_panel_add(&ctx->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void tddi_remove(struct mipi_dsi_device *dsi)
+{
+ struct tddi_ctx *ctx = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct tddi_panel_data td4101_panel_data = {
+ .lanes = 2,
+ /* wait timings for panel enable */
+ .delay_ms_sleep_exit = 100,
+ .delay_ms_display_on = 0,
+ /* wait timings for panel disable */
+ .delay_ms_display_off = 20,
+ .delay_ms_sleep_enter = 90,
+};
+
+static const struct tddi_panel_data td4300_panel_data = {
+ .lanes = 4,
+ /* wait timings for panel enable */
+ .delay_ms_sleep_exit = 100,
+ .delay_ms_display_on = 0,
+ /* wait timings for panel disable */
+ .delay_ms_display_off = 0,
+ .delay_ms_sleep_enter = 0,
+};
+
+static const struct of_device_id tddi_of_device_id[] = {
+ {
+ .compatible = "syna,td4101-panel",
+ .data = &td4101_panel_data,
+ }, {
+ .compatible = "syna,td4300-panel",
+ .data = &td4300_panel_data,
+ }, { }
+};
+MODULE_DEVICE_TABLE(of, tddi_of_device_id);
+
+static struct mipi_dsi_driver tddi_dsi_driver = {
+ .probe = tddi_probe,
+ .remove = tddi_remove,
+ .driver = {
+ .name = "panel-synaptics-tddi",
+ .of_match_table = tddi_of_device_id,
+ },
+};
+module_mipi_dsi_driver(tddi_dsi_driver);
+
+MODULE_AUTHOR("Kaustabh Chakraborty <kauschluss@disroot.org>");
+MODULE_DESCRIPTION("Synaptics TDDI Display Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index 909c280eab1f..e5e688cf98fd 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -3,6 +3,7 @@
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
+#include <linux/backlight.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/property.h>
@@ -20,6 +21,8 @@ struct visionox_rm69299_panel_desc {
const struct drm_display_mode *mode;
const u8 *init_seq;
unsigned int init_seq_len;
+ int max_brightness;
+ int initial_brightness;
};
struct visionox_rm69299 {
@@ -192,7 +195,7 @@ static int visionox_rm69299_unprepare(struct drm_panel *panel)
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
- ctx->dsi->mode_flags = 0;
+ ctx->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
@@ -247,7 +250,7 @@ static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
};
static const struct drm_display_mode visionox_rm69299_1080x2160_60hz = {
- .clock = 158695,
+ .clock = (2160 + 8 + 4 + 4) * (1080 + 26 + 2 + 36) * 60 / 1000,
.hdisplay = 1080,
.hsync_start = 1080 + 26,
.hsync_end = 1080 + 26 + 2,
@@ -285,6 +288,63 @@ static const struct drm_panel_funcs visionox_rm69299_drm_funcs = {
.get_modes = visionox_rm69299_get_modes,
};
+static int visionox_rm69299_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness;
+}
+
+static int visionox_rm69299_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static const struct backlight_ops visionox_rm69299_bl_ops = {
+ .update_status = visionox_rm69299_bl_update_status,
+ .get_brightness = visionox_rm69299_bl_get_brightness,
+};
+
+static struct backlight_device *
+visionox_rm69299_create_backlight(struct visionox_rm69299 *ctx)
+{
+ struct device *dev = &ctx->dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = ctx->desc->initial_brightness,
+ .max_brightness = ctx->desc->max_brightness,
+ };
+
+ if (!ctx->desc->max_brightness)
+ return 0;
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, ctx->dsi,
+ &visionox_rm69299_bl_ops,
+ &props);
+}
+
static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
@@ -316,6 +376,11 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
+ ctx->panel.backlight = visionox_rm69299_create_backlight(ctx);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
@@ -353,6 +418,8 @@ const struct visionox_rm69299_panel_desc visionox_rm69299_shift_desc = {
.mode = &visionox_rm69299_1080x2160_60hz,
.init_seq = (const u8 *)visionox_rm69299_1080x2160_60hz_init_seq,
.init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2160_60hz_init_seq),
+ .max_brightness = 255,
+ .initial_brightness = 50,
};
static const struct of_device_id visionox_rm69299_of_match[] = {
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 5d0dce10336b..b51c30778811 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -8,6 +8,8 @@
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
+#include <drm/drm_print.h>
+
#include "panfrost_device.h"
#include "panfrost_devfreq.h"
@@ -74,7 +76,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
- dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ dev_dbg(pfdev->base.dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
status->busy_time, status->total_time,
status->busy_time / (status->total_time / 100),
status->current_frequency / 1000 / 1000);
@@ -119,7 +121,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
int ret;
struct dev_pm_opp *opp;
unsigned long cur_freq;
- struct device *dev = &pfdev->pdev->dev;
+ struct device *dev = pfdev->base.dev;
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 04bec27449cb..c61b97af120c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -20,9 +20,9 @@
static int panfrost_reset_init(struct panfrost_device *pfdev)
{
- pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev);
+ pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->base.dev);
if (IS_ERR(pfdev->rstc)) {
- dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
+ dev_err(pfdev->base.dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
return PTR_ERR(pfdev->rstc);
}
@@ -39,22 +39,22 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
int err;
unsigned long rate;
- pfdev->clock = devm_clk_get(pfdev->dev, NULL);
+ pfdev->clock = devm_clk_get(pfdev->base.dev, NULL);
if (IS_ERR(pfdev->clock)) {
- dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
+ dev_err(pfdev->base.dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
return PTR_ERR(pfdev->clock);
}
rate = clk_get_rate(pfdev->clock);
- dev_info(pfdev->dev, "clock rate = %lu\n", rate);
+ dev_info(pfdev->base.dev, "clock rate = %lu\n", rate);
err = clk_prepare_enable(pfdev->clock);
if (err)
return err;
- pfdev->bus_clock = devm_clk_get_optional(pfdev->dev, "bus");
+ pfdev->bus_clock = devm_clk_get_optional(pfdev->base.dev, "bus");
if (IS_ERR(pfdev->bus_clock)) {
- dev_err(pfdev->dev, "get bus_clock failed %ld\n",
+ dev_err(pfdev->base.dev, "get bus_clock failed %ld\n",
PTR_ERR(pfdev->bus_clock));
err = PTR_ERR(pfdev->bus_clock);
goto disable_clock;
@@ -62,7 +62,7 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
if (pfdev->bus_clock) {
rate = clk_get_rate(pfdev->bus_clock);
- dev_info(pfdev->dev, "bus_clock rate = %lu\n", rate);
+ dev_info(pfdev->base.dev, "bus_clock rate = %lu\n", rate);
err = clk_prepare_enable(pfdev->bus_clock);
if (err)
@@ -87,7 +87,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
int ret, i;
- pfdev->regulators = devm_kcalloc(pfdev->dev, pfdev->comp->num_supplies,
+ pfdev->regulators = devm_kcalloc(pfdev->base.dev, pfdev->comp->num_supplies,
sizeof(*pfdev->regulators),
GFP_KERNEL);
if (!pfdev->regulators)
@@ -96,12 +96,12 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
for (i = 0; i < pfdev->comp->num_supplies; i++)
pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
- ret = devm_regulator_bulk_get(pfdev->dev,
+ ret = devm_regulator_bulk_get(pfdev->base.dev,
pfdev->comp->num_supplies,
pfdev->regulators);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
- dev_err(pfdev->dev, "failed to get regulators: %d\n",
+ dev_err(pfdev->base.dev, "failed to get regulators: %d\n",
ret);
return ret;
}
@@ -109,7 +109,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
ret = regulator_bulk_enable(pfdev->comp->num_supplies,
pfdev->regulators);
if (ret < 0) {
- dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
+ dev_err(pfdev->base.dev, "failed to enable regulators: %d\n", ret);
return ret;
}
@@ -144,7 +144,7 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
int err;
int i, num_domains;
- num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+ num_domains = of_count_phandle_with_args(pfdev->base.dev->of_node,
"power-domains",
"#power-domain-cells");
@@ -156,7 +156,7 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
return 0;
if (num_domains != pfdev->comp->num_pm_domains) {
- dev_err(pfdev->dev,
+ dev_err(pfdev->base.dev,
"Incorrect number of power domains: %d provided, %d needed\n",
num_domains, pfdev->comp->num_pm_domains);
return -EINVAL;
@@ -168,20 +168,21 @@ static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
for (i = 0; i < num_domains; i++) {
pfdev->pm_domain_devs[i] =
- dev_pm_domain_attach_by_name(pfdev->dev,
- pfdev->comp->pm_domain_names[i]);
+ dev_pm_domain_attach_by_name(pfdev->base.dev,
+ pfdev->comp->pm_domain_names[i]);
if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
pfdev->pm_domain_devs[i] = NULL;
- dev_err(pfdev->dev,
+ dev_err(pfdev->base.dev,
"failed to get pm-domain %s(%d): %d\n",
pfdev->comp->pm_domain_names[i], i, err);
goto err;
}
- pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
- pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
- DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+ pfdev->pm_domain_links[i] =
+ device_link_add(pfdev->base.dev,
+ pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
if (!pfdev->pm_domain_links[i]) {
dev_err(pfdev->pm_domain_devs[i],
"adding device link failed!\n");
@@ -220,20 +221,20 @@ int panfrost_device_init(struct panfrost_device *pfdev)
err = panfrost_reset_init(pfdev);
if (err) {
- dev_err(pfdev->dev, "reset init failed %d\n", err);
+ dev_err(pfdev->base.dev, "reset init failed %d\n", err);
goto out_pm_domain;
}
err = panfrost_clk_init(pfdev);
if (err) {
- dev_err(pfdev->dev, "clk init failed %d\n", err);
+ dev_err(pfdev->base.dev, "clk init failed %d\n", err);
goto out_reset;
}
err = panfrost_devfreq_init(pfdev);
if (err) {
if (err != -EPROBE_DEFER)
- dev_err(pfdev->dev, "devfreq init failed %d\n", err);
+ dev_err(pfdev->base.dev, "devfreq init failed %d\n", err);
goto out_clk;
}
@@ -244,7 +245,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto out_devfreq;
}
- pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
+ pfdev->iomem = devm_platform_ioremap_resource(to_platform_device(pfdev->base.dev), 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
goto out_regulator;
@@ -258,7 +259,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
if (err)
goto out_gpu;
- err = panfrost_job_init(pfdev);
+ err = panfrost_jm_init(pfdev);
if (err)
goto out_mmu;
@@ -268,7 +269,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
return 0;
out_job:
- panfrost_job_fini(pfdev);
+ panfrost_jm_fini(pfdev);
out_mmu:
panfrost_mmu_fini(pfdev);
out_gpu:
@@ -289,7 +290,7 @@ out_pm_domain:
void panfrost_device_fini(struct panfrost_device *pfdev)
{
panfrost_perfcnt_fini(pfdev);
- panfrost_job_fini(pfdev);
+ panfrost_jm_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
panfrost_devfreq_fini(pfdev);
@@ -399,13 +400,16 @@ bool panfrost_exception_needs_reset(const struct panfrost_device *pfdev,
return false;
}
-void panfrost_device_reset(struct panfrost_device *pfdev)
+void panfrost_device_reset(struct panfrost_device *pfdev, bool enable_job_int)
{
panfrost_gpu_soft_reset(pfdev);
panfrost_gpu_power_on(pfdev);
panfrost_mmu_reset(pfdev);
- panfrost_job_enable_interrupts(pfdev);
+
+ panfrost_jm_reset_interrupts(pfdev);
+ if (enable_job_int)
+ panfrost_jm_enable_interrupts(pfdev);
}
static int panfrost_device_runtime_resume(struct device *dev)
@@ -429,7 +433,7 @@ static int panfrost_device_runtime_resume(struct device *dev)
}
}
- panfrost_device_reset(pfdev);
+ panfrost_device_reset(pfdev, true);
panfrost_devfreq_resume(pfdev);
return 0;
@@ -447,11 +451,11 @@ static int panfrost_device_runtime_suspend(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
- if (!panfrost_job_is_idle(pfdev))
+ if (!panfrost_jm_is_idle(pfdev))
return -EBUSY;
panfrost_devfreq_suspend(pfdev);
- panfrost_job_suspend_irq(pfdev);
+ panfrost_jm_suspend_irq(pfdev);
panfrost_mmu_suspend_irq(pfdev);
panfrost_gpu_suspend_irq(pfdev);
panfrost_gpu_power_off(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 077525a3ad68..e61c4329fd07 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -10,11 +10,13 @@
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
+#include <drm/drm_auth.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
#include <drm/gpu_scheduler.h>
#include "panfrost_devfreq.h"
+#include "panfrost_job.h"
struct panfrost_device;
struct panfrost_mmu;
@@ -22,9 +24,12 @@ struct panfrost_job_slot;
struct panfrost_job;
struct panfrost_perfcnt;
-#define NUM_JOB_SLOTS 3
#define MAX_PM_DOMAINS 5
+#define ALL_JS_INT_MASK \
+ (GENMASK(16 + NUM_JOB_SLOTS - 1, 16) | \
+ GENMASK(NUM_JOB_SLOTS - 1, 0))
+
enum panfrost_drv_comp_bits {
PANFROST_COMP_BIT_GPU,
PANFROST_COMP_BIT_JOB,
@@ -123,9 +128,7 @@ struct panfrost_device_debugfs {
};
struct panfrost_device {
- struct device *dev;
- struct drm_device *ddev;
- struct platform_device *pdev;
+ struct drm_device base;
int gpu_irq;
int mmu_irq;
@@ -144,7 +147,6 @@ struct panfrost_device {
DECLARE_BITMAP(is_suspended, PANFROST_COMP_BIT_MAX);
spinlock_t as_lock;
- unsigned long as_in_use_mask;
unsigned long as_alloc_mask;
unsigned long as_faulty_mask;
struct list_head as_lru_list;
@@ -206,16 +208,22 @@ struct panfrost_engine_usage {
struct panfrost_file_priv {
struct panfrost_device *pfdev;
- struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+ struct xarray jm_ctxs;
struct panfrost_mmu *mmu;
struct panfrost_engine_usage engine_usage;
};
+static inline bool panfrost_high_prio_allowed(struct drm_file *file)
+{
+ /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
+ return (capable(CAP_SYS_NICE) || drm_is_current_master(file));
+}
+
static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
{
- return ddev->dev_private;
+ return container_of(ddev, struct panfrost_device, base);
}
static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id)
@@ -241,7 +249,7 @@ int panfrost_unstable_ioctl_check(void);
int panfrost_device_init(struct panfrost_device *pfdev);
void panfrost_device_fini(struct panfrost_device *pfdev);
-void panfrost_device_reset(struct panfrost_device *pfdev);
+void panfrost_device_reset(struct panfrost_device *pfdev, bool enable_job_int);
extern const struct dev_pm_ops panfrost_pm_ops;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 1ea6c509a5d5..7d8c7c337606 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -16,6 +16,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>
@@ -36,7 +37,7 @@ static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev,
{
int ret;
- ret = pm_runtime_resume_and_get(pfdev->dev);
+ ret = pm_runtime_resume_and_get(pfdev->base.dev);
if (ret)
return ret;
@@ -44,14 +45,14 @@ static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev,
*arg = panfrost_timestamp_read(pfdev);
panfrost_cycle_counter_put(pfdev);
- pm_runtime_put(pfdev->dev);
+ pm_runtime_put(pfdev->base.dev);
return 0;
}
static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
{
struct drm_panfrost_get_param *param = data;
- struct panfrost_device *pfdev = ddev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(ddev);
int ret;
if (param->pad != 0)
@@ -109,6 +110,14 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
#endif
break;
+ case DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES:
+ param->value = BIT(PANFROST_JM_CTX_PRIORITY_LOW) |
+ BIT(PANFROST_JM_CTX_PRIORITY_MEDIUM);
+
+ if (panfrost_high_prio_allowed(file))
+ param->value |= BIT(PANFROST_JM_CTX_PRIORITY_HIGH);
+ break;
+
default:
return -EINVAL;
}
@@ -275,13 +284,17 @@ fail:
static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_file_priv *file_priv = file->driver_priv;
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
+ struct panfrost_jm_ctx *jm_ctx;
struct panfrost_job *job;
int ret = 0, slot;
+ if (args->pad)
+ return -EINVAL;
+
if (!args->jc)
return -EINVAL;
@@ -294,10 +307,16 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
return -ENODEV;
}
+ jm_ctx = panfrost_jm_ctx_from_handle(file, args->jm_ctx_handle);
+ if (!jm_ctx) {
+ ret = -EINVAL;
+ goto out_put_syncout;
+ }
+
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) {
ret = -ENOMEM;
- goto out_put_syncout;
+ goto out_put_jm_ctx;
}
kref_init(&job->refcount);
@@ -307,12 +326,13 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
job->mmu = file_priv->mmu;
+ job->ctx = panfrost_jm_ctx_get(jm_ctx);
job->engine_usage = &file_priv->engine_usage;
slot = panfrost_job_get_slot(job);
ret = drm_sched_job_init(&job->base,
- &file_priv->sched_entity[slot],
+ &jm_ctx->slot_entity[slot],
1, NULL, file->client_id);
if (ret)
goto out_put_job;
@@ -338,6 +358,8 @@ out_cleanup_job:
drm_sched_job_cleanup(&job->base);
out_put_job:
panfrost_job_put(job);
+out_put_jm_ctx:
+ panfrost_jm_ctx_put(jm_ctx);
out_put_syncout:
if (sync_out)
drm_syncobj_put(sync_out);
@@ -436,7 +458,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
{
struct panfrost_file_priv *priv = file_priv->driver_priv;
struct drm_panfrost_madvise *args = data;
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct drm_gem_object *gem_obj;
struct panfrost_gem_object *bo;
int ret = 0;
@@ -536,6 +558,27 @@ err_put_obj:
return ret;
}
+static int panfrost_ioctl_jm_ctx_create(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return panfrost_jm_ctx_create(file, data);
+}
+
+static int panfrost_ioctl_jm_ctx_destroy(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ const struct drm_panfrost_jm_ctx_destroy *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ /* We can't destroy the default context created when the file is opened. */
+ if (!args->handle)
+ return -EINVAL;
+
+ return panfrost_jm_ctx_destroy(file, args->handle);
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -548,7 +591,7 @@ static int
panfrost_open(struct drm_device *dev, struct drm_file *file)
{
int ret;
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_file_priv *panfrost_priv;
panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
@@ -564,7 +607,7 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
goto err_free;
}
- ret = panfrost_job_open(panfrost_priv);
+ ret = panfrost_jm_open(file);
if (ret)
goto err_job;
@@ -583,7 +626,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
struct panfrost_file_priv *panfrost_priv = file->driver_priv;
panfrost_perfcnt_close(file);
- panfrost_job_close(panfrost_priv);
+ panfrost_jm_close(file);
panfrost_mmu_ctx_put(panfrost_priv->mmu);
kfree(panfrost_priv);
@@ -603,6 +646,8 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(JM_CTX_CREATE, jm_ctx_create, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(JM_CTX_DESTROY, jm_ctx_destroy, DRM_RENDER_ALLOW),
};
static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
@@ -624,30 +669,25 @@ static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
* job spent on the GPU.
*/
- static const char * const engine_names[] = {
- "fragment", "vertex-tiler", "compute-only"
- };
-
- BUILD_BUG_ON(ARRAY_SIZE(engine_names) != NUM_JOB_SLOTS);
-
for (i = 0; i < NUM_JOB_SLOTS - 1; i++) {
if (pfdev->profile_mode) {
drm_printf(p, "drm-engine-%s:\t%llu ns\n",
- engine_names[i], panfrost_priv->engine_usage.elapsed_ns[i]);
+ panfrost_engine_names[i],
+ panfrost_priv->engine_usage.elapsed_ns[i]);
drm_printf(p, "drm-cycles-%s:\t%llu\n",
- engine_names[i], panfrost_priv->engine_usage.cycles[i]);
+ panfrost_engine_names[i],
+ panfrost_priv->engine_usage.cycles[i]);
}
drm_printf(p, "drm-maxfreq-%s:\t%lu Hz\n",
- engine_names[i], pfdev->pfdevfreq.fast_rate);
+ panfrost_engine_names[i], pfdev->pfdevfreq.fast_rate);
drm_printf(p, "drm-curfreq-%s:\t%lu Hz\n",
- engine_names[i], pfdev->pfdevfreq.current_frequency);
+ panfrost_engine_names[i], pfdev->pfdevfreq.current_frequency);
}
}
static void panfrost_show_fdinfo(struct drm_printer *p, struct drm_file *file)
{
- struct drm_device *dev = file->minor->dev;
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(file->minor->dev);
panfrost_gpu_show_fdinfo(pfdev, file->driver_priv, p);
@@ -664,16 +704,57 @@ static const struct file_operations panfrost_drm_driver_fops = {
static int panthor_gems_show(struct seq_file *m, void *data)
{
struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(node->minor->dev);
panfrost_gem_debugfs_print_bos(pfdev, m);
return 0;
}
+static void show_panfrost_jm_ctx(struct panfrost_jm_ctx *jm_ctx, u32 handle,
+ struct seq_file *m)
+{
+ struct drm_device *ddev = ((struct drm_info_node *)m->private)->minor->dev;
+ const char *prio = "UNKNOWN";
+
+ static const char * const prios[] = {
+ [DRM_SCHED_PRIORITY_HIGH] = "HIGH",
+ [DRM_SCHED_PRIORITY_NORMAL] = "NORMAL",
+ [DRM_SCHED_PRIORITY_LOW] = "LOW",
+ };
+
+ if (jm_ctx->slot_entity[0].priority !=
+ jm_ctx->slot_entity[1].priority)
+ drm_warn(ddev, "Slot priorities should be the same in a single context");
+
+ if (jm_ctx->slot_entity[0].priority < ARRAY_SIZE(prios))
+ prio = prios[jm_ctx->slot_entity[0].priority];
+
+ seq_printf(m, " JM context %u: priority %s\n", handle, prio);
+}
+
+static int show_file_jm_ctxs(struct panfrost_file_priv *pfile,
+ struct seq_file *m)
+{
+ struct panfrost_jm_ctx *jm_ctx;
+ unsigned long i;
+
+ xa_lock(&pfile->jm_ctxs);
+ xa_for_each(&pfile->jm_ctxs, i, jm_ctx) {
+ jm_ctx = panfrost_jm_ctx_get(jm_ctx);
+ xa_unlock(&pfile->jm_ctxs);
+ show_panfrost_jm_ctx(jm_ctx, i, m);
+ panfrost_jm_ctx_put(jm_ctx);
+ xa_lock(&pfile->jm_ctxs);
+ }
+ xa_unlock(&pfile->jm_ctxs);
+
+ return 0;
+}
+
static struct drm_info_list panthor_debugfs_list[] = {
- {"gems", panthor_gems_show, 0, NULL},
+ {"gems",
+ panthor_gems_show, 0, NULL},
};
static int panthor_gems_debugfs_init(struct drm_minor *minor)
@@ -685,9 +766,64 @@ static int panthor_gems_debugfs_init(struct drm_minor *minor)
return 0;
}
+static int show_each_file(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *ddev = node->minor->dev;
+ int (*show)(struct panfrost_file_priv *, struct seq_file *) =
+ node->info_ent->data;
+ struct drm_file *file;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ddev->filelist_mutex);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(file, &ddev->filelist, lhead) {
+ struct task_struct *task;
+ struct panfrost_file_priv *pfile = file->driver_priv;
+ struct pid *pid;
+
+ /*
+ * Although we have a valid reference on file->pid, that does
+ * not guarantee that the task_struct who called get_pid() is
+ * still alive (e.g. get_pid(current) => fork() => exit()).
+ * Therefore, we need to protect this ->comm access using RCU.
+ */
+ rcu_read_lock();
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ seq_printf(m, "client_id %8llu pid %8d command %s:\n",
+ file->client_id, pid_nr(pid),
+ task ? task->comm : "<unknown>");
+ rcu_read_unlock();
+
+ ret = show(pfile, m);
+ if (ret < 0)
+ break;
+
+ seq_puts(m, "\n");
+ }
+
+ mutex_unlock(&ddev->filelist_mutex);
+ return ret;
+}
+
+static struct drm_info_list panfrost_sched_debugfs_list[] = {
+ { "sched_ctxs", show_each_file, 0, show_file_jm_ctxs },
+};
+
+static void panfrost_sched_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panfrost_sched_debugfs_list,
+ ARRAY_SIZE(panfrost_sched_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
static void panfrost_debugfs_init(struct drm_minor *minor)
{
panthor_gems_debugfs_init(minor);
+ panfrost_sched_debugfs_init(minor);
}
#endif
@@ -699,6 +835,8 @@ static void panfrost_debugfs_init(struct drm_minor *minor)
* - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
* - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
* - 1.4 - adds SET_LABEL_BO
+ * - 1.5 - adds JM_CTX_{CREATE,DESTROY} ioctls and extend SUBMIT to allow
+ * context creation with configurable priorities/affinity
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -711,7 +849,7 @@ static const struct drm_driver panfrost_drm_driver = {
.name = "panfrost",
.desc = "panfrost DRM",
.major = 1,
- .minor = 4,
+ .minor = 5,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
@@ -723,15 +861,12 @@ static const struct drm_driver panfrost_drm_driver = {
static int panfrost_probe(struct platform_device *pdev)
{
struct panfrost_device *pfdev;
- struct drm_device *ddev;
int err;
- pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
- if (!pfdev)
- return -ENOMEM;
-
- pfdev->pdev = pdev;
- pfdev->dev = &pdev->dev;
+ pfdev = devm_drm_dev_alloc(&pdev->dev, &panfrost_drm_driver,
+ struct panfrost_device, base);
+ if (IS_ERR(pfdev))
+ return PTR_ERR(pfdev);
platform_set_drvdata(pdev, pfdev);
@@ -741,14 +876,6 @@ static int panfrost_probe(struct platform_device *pdev)
pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
- /* Allocate and initialize the DRM device. */
- ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
- if (IS_ERR(ddev))
- return PTR_ERR(ddev);
-
- ddev->dev_private = pfdev;
- pfdev->ddev = ddev;
-
mutex_init(&pfdev->shrinker_lock);
INIT_LIST_HEAD(&pfdev->shrinker_list);
@@ -759,51 +886,47 @@ static int panfrost_probe(struct platform_device *pdev)
goto err_out0;
}
- pm_runtime_set_active(pfdev->dev);
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_enable(pfdev->dev);
- pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
- pm_runtime_use_autosuspend(pfdev->dev);
+ pm_runtime_set_active(pfdev->base.dev);
+ pm_runtime_mark_last_busy(pfdev->base.dev);
+ pm_runtime_enable(pfdev->base.dev);
+ pm_runtime_set_autosuspend_delay(pfdev->base.dev, 50); /* ~3 frames */
+ pm_runtime_use_autosuspend(pfdev->base.dev);
/*
* Register the DRM device with the core and the connectors with
* sysfs
*/
- err = drm_dev_register(ddev, 0);
+ err = drm_dev_register(&pfdev->base, 0);
if (err < 0)
goto err_out1;
- err = panfrost_gem_shrinker_init(ddev);
+ err = panfrost_gem_shrinker_init(&pfdev->base);
if (err)
goto err_out2;
return 0;
err_out2:
- drm_dev_unregister(ddev);
+ drm_dev_unregister(&pfdev->base);
err_out1:
- pm_runtime_disable(pfdev->dev);
+ pm_runtime_disable(pfdev->base.dev);
panfrost_device_fini(pfdev);
- pm_runtime_set_suspended(pfdev->dev);
+ pm_runtime_set_suspended(pfdev->base.dev);
err_out0:
- drm_dev_put(ddev);
return err;
}
static void panfrost_remove(struct platform_device *pdev)
{
struct panfrost_device *pfdev = platform_get_drvdata(pdev);
- struct drm_device *ddev = pfdev->ddev;
- drm_dev_unregister(ddev);
- panfrost_gem_shrinker_cleanup(ddev);
+ drm_dev_unregister(&pfdev->base);
+ panfrost_gem_shrinker_cleanup(&pfdev->base);
- pm_runtime_get_sync(pfdev->dev);
- pm_runtime_disable(pfdev->dev);
+ pm_runtime_get_sync(pfdev->base.dev);
+ pm_runtime_disable(pfdev->base.dev);
panfrost_device_fini(pfdev);
- pm_runtime_set_suspended(pfdev->dev);
-
- drm_dev_put(ddev);
+ pm_runtime_set_suspended(pfdev->base.dev);
}
static ssize_t profiling_show(struct device *dev,
diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c
index 4042afe2fbf4..3ed6c902d0a1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_dump.c
+++ b/drivers/gpu/drm/panfrost/panfrost_dump.c
@@ -163,7 +163,7 @@ void panfrost_core_dump(struct panfrost_job *job)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
__GFP_NORETRY);
if (!iter.start) {
- dev_warn(pfdev->dev, "failed to allocate devcoredump file\n");
+ dev_warn(pfdev->base.dev, "failed to allocate devcoredump file\n");
return;
}
@@ -204,14 +204,14 @@ void panfrost_core_dump(struct panfrost_job *job)
mapping = job->mappings[i];
if (!bo->base.sgt) {
- dev_err(pfdev->dev, "Panfrost Dump: BO has no sgt, cannot dump\n");
+ dev_err(pfdev->base.dev, "Panfrost Dump: BO has no sgt, cannot dump\n");
iter.hdr->bomap.valid = 0;
goto dump_header;
}
ret = drm_gem_vmap(&bo->base.base, &map);
if (ret) {
- dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
+ dev_err(pfdev->base.dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
goto dump_header;
}
@@ -237,5 +237,5 @@ dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data +
}
panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_TRAILER, iter.data);
- dev_coredumpv(pfdev->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
+ dev_coredumpv(pfdev->base.dev, iter.start, iter.data - iter.start, GFP_KERNEL);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 85d6289a6eda..8041b65c6609 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -8,6 +8,7 @@
#include <linux/dma-mapping.h>
#include <drm/panfrost_drm.h>
+#include <drm/drm_print.h>
#include "panfrost_device.h"
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
@@ -26,7 +27,7 @@ static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
{
- struct panfrost_device *pfdev = bo->base.base.dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(bo->base.base.dev);
if (list_empty(&bo->debugfs.node))
return;
@@ -48,7 +49,7 @@ static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
static void panfrost_gem_free_object(struct drm_gem_object *obj)
{
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
- struct panfrost_device *pfdev = obj->dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
/*
* Make sure the BO is no longer inserted in the shrinker list before
@@ -76,7 +77,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
for (i = 0; i < n_sgt; i++) {
if (bo->sgts[i].sgl) {
- dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
+ dma_unmap_sgtable(pfdev->base.dev, &bo->sgts[i],
DMA_BIDIRECTIONAL, 0);
sg_free_table(&bo->sgts[i]);
}
@@ -284,7 +285,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 02b60ea1433a..2fe967a90bcb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -97,7 +97,7 @@ panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
*/
int panfrost_gem_shrinker_init(struct drm_device *dev)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
pfdev->shrinker = shrinker_alloc(0, "drm-panfrost");
if (!pfdev->shrinker)
@@ -120,7 +120,7 @@ int panfrost_gem_shrinker_init(struct drm_device *dev)
*/
void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
if (pfdev->shrinker)
shrinker_free(pfdev->shrinker);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 174e190ba40f..483d278eb154 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -12,6 +12,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_print.h>
+
#include "panfrost_device.h"
#include "panfrost_features.h"
#include "panfrost_issues.h"
@@ -36,12 +38,12 @@ static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32;
address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
- dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
+ dev_warn(pfdev->base.dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panfrost_exception_name(fault_status & 0xFF),
address);
if (state & GPU_IRQ_MULTIPLE_FAULT)
- dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n");
+ dev_warn(pfdev->base.dev, "There were multiple GPU faults - some have not been reported\n");
gpu_write(pfdev, GPU_INT_MASK, 0);
}
@@ -72,13 +74,13 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
val, val & GPU_IRQ_RESET_COMPLETED, 10, 10000);
if (ret) {
- dev_err(pfdev->dev, "gpu soft reset timed out, attempting hard reset\n");
+ dev_err(pfdev->base.dev, "gpu soft reset timed out, attempting hard reset\n");
gpu_write(pfdev, GPU_CMD, GPU_CMD_HARD_RESET);
ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT, val,
val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
if (ret) {
- dev_err(pfdev->dev, "gpu hard reset timed out\n");
+ dev_err(pfdev->base.dev, "gpu hard reset timed out\n");
return ret;
}
}
@@ -95,7 +97,7 @@ int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
* All in-flight jobs should have released their cycle
* counter references upon reset, but let us make sure
*/
- if (drm_WARN_ON(pfdev->ddev, atomic_read(&pfdev->cycle_counter.use_count) != 0))
+ if (drm_WARN_ON(&pfdev->base, atomic_read(&pfdev->cycle_counter.use_count) != 0))
atomic_set(&pfdev->cycle_counter.use_count, 0);
return 0;
@@ -240,9 +242,10 @@ static const struct panfrost_model gpu_models[] = {
/* MediaTek MT8188 Mali-G57 MC3 */
GPU_MODEL(g57, 0x9093,
GPU_REV(g57, 0, 0)),
+ {0},
};
-static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
+static int panfrost_gpu_init_features(struct panfrost_device *pfdev)
{
u32 gpu_id, num_js, major, minor, status, rev;
const char *name = "unknown";
@@ -327,16 +330,22 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
break;
}
+ if (!model->name) {
+ dev_err(pfdev->base.dev, "GPU model not found: mali-%s id rev %#x %#x\n",
+ name, gpu_id, rev);
+ return -ENODEV;
+ }
+
bitmap_from_u64(pfdev->features.hw_features, hw_feat);
bitmap_from_u64(pfdev->features.hw_issues, hw_issues);
- dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ dev_info(pfdev->base.dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
name, gpu_id, major, minor, status);
- dev_info(pfdev->dev, "features: %64pb, issues: %64pb",
+ dev_info(pfdev->base.dev, "features: %64pb, issues: %64pb",
pfdev->features.hw_features,
pfdev->features.hw_issues);
- dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
+ dev_info(pfdev->base.dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
pfdev->features.l2_features,
pfdev->features.core_features,
pfdev->features.tiler_features,
@@ -345,8 +354,10 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
pfdev->features.as_present,
pfdev->features.js_present);
- dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx",
+ dev_info(pfdev->base.dev, "shader_present=0x%0llx l2_present=0x%0llx",
pfdev->features.shader_present, pfdev->features.l2_present);
+
+ return 0;
}
void panfrost_cycle_counter_get(struct panfrost_device *pfdev)
@@ -411,7 +422,7 @@ static u64 panfrost_get_core_mask(struct panfrost_device *pfdev)
*/
core_mask = ~(pfdev->features.l2_present - 1) &
(pfdev->features.l2_present - 2);
- dev_info_once(pfdev->dev, "using only 1st core group (%lu cores from %lu)\n",
+ dev_info_once(pfdev->base.dev, "using only 1st core group (%lu cores from %lu)\n",
hweight64(core_mask),
hweight64(pfdev->features.shader_present));
@@ -432,7 +443,7 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
val, val == (pfdev->features.l2_present & core_mask),
10, 20000);
if (ret)
- dev_err(pfdev->dev, "error powering up gpu L2");
+ dev_err(pfdev->base.dev, "error powering up gpu L2");
gpu_write(pfdev, SHADER_PWRON_LO,
pfdev->features.shader_present & core_mask);
@@ -440,13 +451,13 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
val, val == (pfdev->features.shader_present & core_mask),
10, 20000);
if (ret)
- dev_err(pfdev->dev, "error powering up gpu shader");
+ dev_err(pfdev->base.dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
val, val == pfdev->features.tiler_present, 10, 1000);
if (ret)
- dev_err(pfdev->dev, "error powering up gpu tiler");
+ dev_err(pfdev->base.dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
@@ -458,19 +469,19 @@ void panfrost_gpu_power_off(struct panfrost_device *pfdev)
ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_PWRTRANS_LO,
val, !val, 1, 2000);
if (ret)
- dev_err(pfdev->dev, "shader power transition timeout");
+ dev_err(pfdev->base.dev, "shader power transition timeout");
gpu_write(pfdev, TILER_PWROFF_LO, pfdev->features.tiler_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_PWRTRANS_LO,
val, !val, 1, 2000);
if (ret)
- dev_err(pfdev->dev, "tiler power transition timeout");
+ dev_err(pfdev->base.dev, "tiler power transition timeout");
gpu_write(pfdev, L2_PWROFF_LO, pfdev->features.l2_present);
ret = readl_poll_timeout(pfdev->iomem + L2_PWRTRANS_LO,
val, !val, 0, 2000);
if (ret)
- dev_err(pfdev->dev, "l2 power transition timeout");
+ dev_err(pfdev->base.dev, "l2 power transition timeout");
}
void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev)
@@ -489,23 +500,26 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
if (err)
return err;
- panfrost_gpu_init_features(pfdev);
+ err = panfrost_gpu_init_features(pfdev);
+ if (err)
+ return err;
- err = dma_set_mask_and_coherent(pfdev->dev,
- DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
+ err = dma_set_mask_and_coherent(pfdev->base.dev,
+ DMA_BIT_MASK(FIELD_GET(0xff00,
+ pfdev->features.mmu_features)));
if (err)
return err;
- dma_set_max_seg_size(pfdev->dev, UINT_MAX);
+ dma_set_max_seg_size(pfdev->base.dev, UINT_MAX);
- pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+ pfdev->gpu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "gpu");
if (pfdev->gpu_irq < 0)
return pfdev->gpu_irq;
- err = devm_request_irq(pfdev->dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
+ err = devm_request_irq(pfdev->base.dev, pfdev->gpu_irq, panfrost_gpu_irq_handler,
IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
- dev_err(pfdev->dev, "failed to request gpu irq");
+ dev_err(pfdev->base.dev, "failed to request gpu irq");
return err;
}
@@ -525,9 +539,9 @@ u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION)) {
/* Flush reduction only makes sense when the GPU is kept powered on between jobs */
- if (pm_runtime_get_if_in_use(pfdev->dev)) {
+ if (pm_runtime_get_if_in_use(pfdev->base.dev)) {
flush_id = gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
- pm_runtime_put(pfdev->dev);
+ pm_runtime_put(pfdev->base.dev);
return flush_id;
}
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 82acabb21b27..11894a6b9fcc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -22,11 +22,16 @@
#include "panfrost_mmu.h"
#include "panfrost_dump.h"
+#define MAX_JM_CTX_PER_FILE 64
#define JOB_TIMEOUT_MS 500
#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
#define job_read(dev, reg) readl(dev->iomem + (reg))
+const char * const panfrost_engine_names[] = {
+ "fragment", "vertex-tiler", "compute-only"
+};
+
struct panfrost_queue_state {
struct drm_gpu_scheduler sched;
u64 fence_context;
@@ -94,7 +99,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
if (!fence)
return ERR_PTR(-ENOMEM);
- fence->dev = pfdev->ddev;
+ fence->dev = &pfdev->base;
fence->queue = js_num;
fence->seqno = ++js->queue[js_num].emit_seqno;
dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
@@ -195,7 +200,7 @@ panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
return 1;
}
-static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
+static int panfrost_job_hw_submit(struct panfrost_job *job, int js)
{
struct panfrost_device *pfdev = job->pfdev;
unsigned int subslot;
@@ -203,17 +208,22 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
u64 jc_head = job->jc;
int ret;
- panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
-
- ret = pm_runtime_get_sync(pfdev->dev);
+ ret = pm_runtime_get_sync(pfdev->base.dev);
if (ret < 0)
- return;
+ goto err_hwsubmit;
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
- return;
+ ret = -EINVAL;
+ goto err_hwsubmit;
}
- cfg = panfrost_mmu_as_get(pfdev, job->mmu);
+ ret = panfrost_mmu_as_get(pfdev, job->mmu);
+ if (ret < 0)
+ goto err_hwsubmit;
+
+ cfg = ret;
+
+ panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
@@ -256,11 +266,17 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
}
job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
- dev_dbg(pfdev->dev,
+ dev_dbg(pfdev->base.dev,
"JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d",
job, js, subslot, jc_head, cfg & 0xf);
}
spin_unlock(&pfdev->js->job_lock);
+
+ return 0;
+
+err_hwsubmit:
+ pm_runtime_put_autosuspend(pfdev->base.dev);
+ return ret;
}
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
@@ -359,6 +375,7 @@ static void panfrost_job_cleanup(struct kref *ref)
kvfree(job->bos);
}
+ panfrost_jm_ctx_put(job->ctx);
kfree(job);
}
@@ -382,6 +399,10 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
struct panfrost_device *pfdev = job->pfdev;
int slot = panfrost_job_get_slot(job);
struct dma_fence *fence = NULL;
+ int ret;
+
+ if (job->ctx->destroyed)
+ return ERR_PTR(-ECANCELED);
if (unlikely(job->base.s_fence->finished.error))
return NULL;
@@ -400,27 +421,27 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
dma_fence_put(job->done_fence);
job->done_fence = dma_fence_get(fence);
- panfrost_job_hw_submit(job, slot);
+ ret = panfrost_job_hw_submit(job, slot);
+ if (ret) {
+ dma_fence_put(fence);
+ return ERR_PTR(ret);
+ }
return fence;
}
-void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
+void panfrost_jm_reset_interrupts(struct panfrost_device *pfdev)
{
- int j;
- u32 irq_mask = 0;
+ job_write(pfdev, JOB_INT_CLEAR, ALL_JS_INT_MASK);
+}
+void panfrost_jm_enable_interrupts(struct panfrost_device *pfdev)
+{
clear_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
-
- for (j = 0; j < NUM_JOB_SLOTS; j++) {
- irq_mask |= MK_JS_MASK(j);
- }
-
- job_write(pfdev, JOB_INT_CLEAR, irq_mask);
- job_write(pfdev, JOB_INT_MASK, irq_mask);
+ job_write(pfdev, JOB_INT_MASK, ALL_JS_INT_MASK);
}
-void panfrost_job_suspend_irq(struct panfrost_device *pfdev)
+void panfrost_jm_suspend_irq(struct panfrost_device *pfdev)
{
set_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended);
@@ -437,12 +458,12 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
bool signal_fence = true;
if (!panfrost_exception_is_fault(js_status)) {
- dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
+ dev_dbg(pfdev->base.dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
js, exception_name,
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)));
} else {
- dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
+ dev_err(pfdev->base.dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
js, exception_name,
job_read(pfdev, JS_HEAD_LO(js)),
job_read(pfdev, JS_TAIL_LO(js)));
@@ -474,7 +495,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
if (signal_fence)
dma_fence_signal_locked(job->done_fence);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
if (panfrost_exception_needs_reset(pfdev, js_status)) {
atomic_set(&pfdev->reset.pending, 1);
@@ -482,8 +503,8 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev,
}
}
-static void panfrost_job_handle_done(struct panfrost_device *pfdev,
- struct panfrost_job *job)
+static void panfrost_jm_handle_done(struct panfrost_device *pfdev,
+ struct panfrost_job *job)
{
/* Set ->jc to 0 to avoid re-submitting an already finished job (can
* happen when we receive the DONE interrupt while doing a GPU reset).
@@ -493,10 +514,10 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev,
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
dma_fence_signal_locked(job->done_fence);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
}
-static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
+static void panfrost_jm_handle_irq(struct panfrost_device *pfdev, u32 status)
{
struct panfrost_job *done[NUM_JOB_SLOTS][2] = {};
struct panfrost_job *failed[NUM_JOB_SLOTS] = {};
@@ -571,7 +592,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
}
for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++)
- panfrost_job_handle_done(pfdev, done[j][i]);
+ panfrost_jm_handle_done(pfdev, done[j][i]);
}
/* And finally we requeue jobs that were waiting in the second slot
@@ -589,7 +610,7 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j);
dma_fence_set_error(canceled->done_fence, -ECANCELED);
- panfrost_job_handle_done(pfdev, canceled);
+ panfrost_jm_handle_done(pfdev, canceled);
} else if (!atomic_read(&pfdev->reset.pending)) {
/* Requeue the job we removed if no reset is pending */
job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START);
@@ -597,15 +618,15 @@ static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
}
}
-static void panfrost_job_handle_irqs(struct panfrost_device *pfdev)
+static void panfrost_jm_handle_irqs(struct panfrost_device *pfdev)
{
u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
while (status) {
- pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_mark_last_busy(pfdev->base.dev);
spin_lock(&pfdev->js->job_lock);
- panfrost_job_handle_irq(pfdev, status);
+ panfrost_jm_handle_irq(pfdev, status);
spin_unlock(&pfdev->js->job_lock);
status = job_read(pfdev, JOB_INT_RAWSTAT);
}
@@ -683,10 +704,10 @@ panfrost_reset(struct panfrost_device *pfdev,
10, 10000);
if (ret)
- dev_err(pfdev->dev, "Soft-stop failed\n");
+ dev_err(pfdev->base.dev, "Soft-stop failed\n");
/* Handle the remaining interrupts before we reset. */
- panfrost_job_handle_irqs(pfdev);
+ panfrost_jm_handle_irqs(pfdev);
/* Remaining interrupts have been handled, but we might still have
* stuck jobs. Let's make sure the PM counters stay balanced by
@@ -701,7 +722,7 @@ panfrost_reset(struct panfrost_device *pfdev,
if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT ||
pfdev->jobs[i][j]->is_profiled)
panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev);
- pm_runtime_put_noidle(pfdev->dev);
+ pm_runtime_put_noidle(pfdev->base.dev);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
}
}
@@ -709,12 +730,7 @@ panfrost_reset(struct panfrost_device *pfdev,
spin_unlock(&pfdev->js->job_lock);
/* Proceed with reset now. */
- panfrost_device_reset(pfdev);
-
- /* panfrost_device_reset() unmasks job interrupts, but we want to
- * keep them masked a bit longer.
- */
- job_write(pfdev, JOB_INT_MASK, 0);
+ panfrost_device_reset(pfdev, false);
/* GPU has been reset, we can clear the reset pending bit. */
atomic_set(&pfdev->reset.pending, 0);
@@ -736,9 +752,7 @@ panfrost_reset(struct panfrost_device *pfdev,
drm_sched_start(&pfdev->js->queue[i].sched, 0);
/* Re-enable job interrupts now that everything has been restarted. */
- job_write(pfdev, JOB_INT_MASK,
- GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
- GENMASK(NUM_JOB_SLOTS - 1, 0));
+ panfrost_jm_enable_interrupts(pfdev);
dma_fence_end_signalling(cookie);
}
@@ -769,11 +783,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
synchronize_irq(pfdev->js->irq);
if (dma_fence_is_signaled(job->done_fence)) {
- dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n");
+ dev_warn(pfdev->base.dev, "unexpectedly high interrupt latency\n");
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
+ dev_err(pfdev->base.dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
job_read(pfdev, JS_CONFIG(js)),
job_read(pfdev, JS_STATUS(js)),
@@ -803,22 +817,20 @@ static const struct drm_sched_backend_ops panfrost_sched_ops = {
.free_job = panfrost_job_free
};
-static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
+static irqreturn_t panfrost_jm_irq_handler_thread(int irq, void *data)
{
struct panfrost_device *pfdev = data;
- panfrost_job_handle_irqs(pfdev);
+ panfrost_jm_handle_irqs(pfdev);
/* Enable interrupts only if we're not about to get suspended */
if (!test_bit(PANFROST_COMP_BIT_JOB, pfdev->is_suspended))
- job_write(pfdev, JOB_INT_MASK,
- GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
- GENMASK(NUM_JOB_SLOTS - 1, 0));
+ job_write(pfdev, JOB_INT_MASK, ALL_JS_INT_MASK);
return IRQ_HANDLED;
}
-static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
+static irqreturn_t panfrost_jm_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 status;
@@ -834,19 +846,20 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
return IRQ_WAKE_THREAD;
}
-int panfrost_job_init(struct panfrost_device *pfdev)
+int panfrost_jm_init(struct panfrost_device *pfdev)
{
struct drm_sched_init_args args = {
.ops = &panfrost_sched_ops,
.num_rqs = DRM_SCHED_PRIORITY_COUNT,
.credit_limit = 2,
.timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
- .name = "pan_js",
- .dev = pfdev->dev,
+ .dev = pfdev->base.dev,
};
struct panfrost_job_slot *js;
int ret, j;
+ BUILD_BUG_ON(ARRAY_SIZE(panfrost_engine_names) != NUM_JOB_SLOTS);
+
/* All GPUs have two entries per queue, but without jobchain
* disambiguation stopping the right job in the close path is tricky,
* so let's just advertise one entry in that case.
@@ -854,24 +867,25 @@ int panfrost_job_init(struct panfrost_device *pfdev)
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
args.credit_limit = 1;
- pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
+ js = devm_kzalloc(pfdev->base.dev, sizeof(*js), GFP_KERNEL);
if (!js)
return -ENOMEM;
+ pfdev->js = js;
INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
spin_lock_init(&js->job_lock);
- js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
+ js->irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "job");
if (js->irq < 0)
return js->irq;
- ret = devm_request_threaded_irq(pfdev->dev, js->irq,
- panfrost_job_irq_handler,
- panfrost_job_irq_handler_thread,
+ ret = devm_request_threaded_irq(pfdev->base.dev, js->irq,
+ panfrost_jm_irq_handler,
+ panfrost_jm_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-job",
pfdev);
if (ret) {
- dev_err(pfdev->dev, "failed to request job irq");
+ dev_err(pfdev->base.dev, "failed to request job irq");
return ret;
}
@@ -882,15 +896,17 @@ int panfrost_job_init(struct panfrost_device *pfdev)
for (j = 0; j < NUM_JOB_SLOTS; j++) {
js->queue[j].fence_context = dma_fence_context_alloc(1);
+ args.name = panfrost_engine_names[j];
ret = drm_sched_init(&js->queue[j].sched, &args);
if (ret) {
- dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
+ dev_err(pfdev->base.dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
}
}
- panfrost_job_enable_interrupts(pfdev);
+ panfrost_jm_reset_interrupts(pfdev);
+ panfrost_jm_enable_interrupts(pfdev);
return 0;
@@ -902,7 +918,7 @@ err_sched:
return ret;
}
-void panfrost_job_fini(struct panfrost_device *pfdev)
+void panfrost_jm_fini(struct panfrost_device *pfdev)
{
struct panfrost_job_slot *js = pfdev->js;
int j;
@@ -917,39 +933,176 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
destroy_workqueue(pfdev->reset.wq);
}
-int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
+int panfrost_jm_open(struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+ int ret;
+
+ struct drm_panfrost_jm_ctx_create default_jm_ctx = {
+ .priority = PANFROST_JM_CTX_PRIORITY_MEDIUM,
+ };
+
+ xa_init_flags(&panfrost_priv->jm_ctxs, XA_FLAGS_ALLOC);
+
+ ret = panfrost_jm_ctx_create(file, &default_jm_ctx);
+ if (ret)
+ return ret;
+
+ /* We expect the default context to be assigned handle 0. */
+ if (WARN_ON(default_jm_ctx.handle))
+ return -EINVAL;
+
+ return 0;
+}
+
+void panfrost_jm_close(struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+ struct panfrost_jm_ctx *jm_ctx;
+ unsigned long i;
+
+ xa_for_each(&panfrost_priv->jm_ctxs, i, jm_ctx)
+ panfrost_jm_ctx_destroy(file, i);
+
+ xa_destroy(&panfrost_priv->jm_ctxs);
+}
+
+int panfrost_jm_is_idle(struct panfrost_device *pfdev)
{
- struct panfrost_device *pfdev = panfrost_priv->pfdev;
struct panfrost_job_slot *js = pfdev->js;
- struct drm_gpu_scheduler *sched;
- int ret, i;
+ int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
- sched = &js->queue[i].sched;
- ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
- DRM_SCHED_PRIORITY_NORMAL, &sched,
- 1, NULL);
- if (WARN_ON(ret))
- return ret;
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&js->queue[i].sched.credit_count))
+ return false;
+ }
+
+ return true;
+}
+
+static void panfrost_jm_ctx_release(struct kref *kref)
+{
+ struct panfrost_jm_ctx *jm_ctx = container_of(kref, struct panfrost_jm_ctx, refcnt);
+
+ WARN_ON(!jm_ctx->destroyed);
+
+ for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++)
+ drm_sched_entity_destroy(&jm_ctx->slot_entity[i]);
+
+ kfree(jm_ctx);
+}
+
+void
+panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx)
+{
+ if (jm_ctx)
+ kref_put(&jm_ctx->refcnt, panfrost_jm_ctx_release);
+}
+
+struct panfrost_jm_ctx *
+panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx)
+{
+ if (jm_ctx)
+ kref_get(&jm_ctx->refcnt);
+
+ return jm_ctx;
+}
+
+struct panfrost_jm_ctx *
+panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle)
+{
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_jm_ctx *jm_ctx;
+
+ xa_lock(&priv->jm_ctxs);
+ jm_ctx = panfrost_jm_ctx_get(xa_load(&priv->jm_ctxs, handle));
+ xa_unlock(&priv->jm_ctxs);
+
+ return jm_ctx;
+}
+
+static int jm_ctx_prio_to_drm_sched_prio(struct drm_file *file,
+ enum drm_panfrost_jm_ctx_priority in,
+ enum drm_sched_priority *out)
+{
+ switch (in) {
+ case PANFROST_JM_CTX_PRIORITY_LOW:
+ *out = DRM_SCHED_PRIORITY_LOW;
+ return 0;
+ case PANFROST_JM_CTX_PRIORITY_MEDIUM:
+ *out = DRM_SCHED_PRIORITY_NORMAL;
+ return 0;
+ case PANFROST_JM_CTX_PRIORITY_HIGH:
+ if (!panfrost_high_prio_allowed(file))
+ return -EACCES;
+
+ *out = DRM_SCHED_PRIORITY_HIGH;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int panfrost_jm_ctx_create(struct drm_file *file,
+ struct drm_panfrost_jm_ctx_create *args)
+{
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_device *pfdev = priv->pfdev;
+ enum drm_sched_priority sched_prio;
+ struct panfrost_jm_ctx *jm_ctx;
+ int ret;
+
+ jm_ctx = kzalloc(sizeof(*jm_ctx), GFP_KERNEL);
+ if (!jm_ctx)
+ return -ENOMEM;
+
+ kref_init(&jm_ctx->refcnt);
+
+ ret = jm_ctx_prio_to_drm_sched_prio(file, args->priority, &sched_prio);
+ if (ret)
+ goto err_put_jm_ctx;
+
+ for (u32 i = 0; i < NUM_JOB_SLOTS; i++) {
+ struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
+
+ ret = drm_sched_entity_init(&jm_ctx->slot_entity[i], sched_prio,
+ &sched, 1, NULL);
+ if (ret)
+ goto err_put_jm_ctx;
}
+
+ ret = xa_alloc(&priv->jm_ctxs, &args->handle, jm_ctx,
+ XA_LIMIT(0, MAX_JM_CTX_PER_FILE), GFP_KERNEL);
+ if (ret)
+ goto err_put_jm_ctx;
+
return 0;
+
+err_put_jm_ctx:
+ jm_ctx->destroyed = true;
+ panfrost_jm_ctx_put(jm_ctx);
+ return ret;
}
-void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
+int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle)
{
- struct panfrost_device *pfdev = panfrost_priv->pfdev;
- int i;
+ struct panfrost_file_priv *priv = file->driver_priv;
+ struct panfrost_device *pfdev = priv->pfdev;
+ struct panfrost_jm_ctx *jm_ctx;
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
+ jm_ctx = xa_erase(&priv->jm_ctxs, handle);
+ if (!jm_ctx)
+ return -EINVAL;
+
+ jm_ctx->destroyed = true;
/* Kill in-flight jobs */
spin_lock(&pfdev->js->job_lock);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
- int j;
+ for (u32 i = 0; i < ARRAY_SIZE(jm_ctx->slot_entity); i++) {
+ struct drm_sched_entity *entity = &jm_ctx->slot_entity[i];
- for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
+ for (int j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
struct panfrost_job *job = pfdev->jobs[i][j];
u32 cmd;
@@ -980,18 +1133,7 @@ void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
}
}
spin_unlock(&pfdev->js->job_lock);
-}
-
-int panfrost_job_is_idle(struct panfrost_device *pfdev)
-{
- struct panfrost_job_slot *js = pfdev->js;
- int i;
-
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- /* If there are any jobs in the HW queue, we're not idle */
- if (atomic_read(&js->queue[i].sched.credit_count))
- return false;
- }
- return true;
+ panfrost_jm_ctx_put(jm_ctx);
+ return 0;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index ec581b97852b..c3f57e41a571 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -18,6 +18,7 @@ struct panfrost_job {
struct panfrost_device *pfdev;
struct panfrost_mmu *mmu;
+ struct panfrost_jm_ctx *ctx;
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
@@ -39,15 +40,38 @@ struct panfrost_job {
u64 start_cycles;
};
-int panfrost_job_init(struct panfrost_device *pfdev);
-void panfrost_job_fini(struct panfrost_device *pfdev);
-int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
-void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+struct panfrost_js_ctx {
+ struct drm_sched_entity sched_entity;
+ bool enabled;
+};
+
+#define NUM_JOB_SLOTS 3
+
+struct panfrost_jm_ctx {
+ struct kref refcnt;
+ bool destroyed;
+ struct drm_sched_entity slot_entity[NUM_JOB_SLOTS];
+};
+
+extern const char * const panfrost_engine_names[];
+
+int panfrost_jm_ctx_create(struct drm_file *file,
+ struct drm_panfrost_jm_ctx_create *args);
+int panfrost_jm_ctx_destroy(struct drm_file *file, u32 handle);
+void panfrost_jm_ctx_put(struct panfrost_jm_ctx *jm_ctx);
+struct panfrost_jm_ctx *panfrost_jm_ctx_get(struct panfrost_jm_ctx *jm_ctx);
+struct panfrost_jm_ctx *panfrost_jm_ctx_from_handle(struct drm_file *file, u32 handle);
+
+int panfrost_jm_init(struct panfrost_device *pfdev);
+void panfrost_jm_fini(struct panfrost_device *pfdev);
+int panfrost_jm_open(struct drm_file *file);
+void panfrost_jm_close(struct drm_file *file);
+void panfrost_jm_reset_interrupts(struct panfrost_device *pfdev);
+void panfrost_jm_enable_interrupts(struct panfrost_device *pfdev);
+void panfrost_jm_suspend_irq(struct panfrost_device *pfdev);
+int panfrost_jm_is_idle(struct panfrost_device *pfdev);
int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
-void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
-void panfrost_job_suspend_irq(struct panfrost_device *pfdev);
-int panfrost_job_is_idle(struct panfrost_device *pfdev);
#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index f6b91c052cfb..8f3b7a7b6ad0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -2,6 +2,7 @@
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#include <drm/panfrost_drm.h>
+#include <drm/drm_print.h>
#include <linux/atomic.h>
#include <linux/bitfield.h>
@@ -81,7 +82,7 @@ static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
if (ret) {
/* The GPU hung, let's trigger a reset */
panfrost_device_schedule_reset(pfdev);
- dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
+ dev_err(pfdev->base.dev, "AS_ACTIVE bit stuck\n");
}
return ret;
@@ -222,7 +223,7 @@ static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
struct panfrost_device *pfdev = mmu->pfdev;
- if (drm_WARN_ON(pfdev->ddev, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
+ if (drm_WARN_ON(&pfdev->base, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
return -EINVAL;
@@ -253,12 +254,12 @@ static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
return mmu_cfg_init_mali_lpae(mmu);
default:
/* This should never happen */
- drm_WARN(pfdev->ddev, 1, "Invalid pgtable format");
+ drm_WARN(&pfdev->base, 1, "Invalid pgtable format");
return -EINVAL;
}
}
-u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{
int as;
@@ -300,7 +301,10 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
if (!atomic_read(&lru_mmu->as_count))
break;
}
- WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
+ if (WARN_ON(&lru_mmu->list == &pfdev->as_lru_list)) {
+ as = -EBUSY;
+ goto out;
+ }
list_del_init(&lru_mmu->list);
as = lru_mmu->as;
@@ -315,7 +319,9 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
atomic_set(&mmu->as_count, 1);
list_add(&mmu->list, &pfdev->as_lru_list);
- dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
+ dev_dbg(pfdev->base.dev,
+ "Assigned AS%d to mmu %p, alloc_mask=%lx",
+ as, mmu, pfdev->as_alloc_mask);
panfrost_mmu_enable(pfdev, mmu);
@@ -381,13 +387,30 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
if (mmu->as < 0)
return;
- pm_runtime_get_noresume(pfdev->dev);
+ pm_runtime_get_noresume(pfdev->base.dev);
/* Flush the PTs only if we're already awake */
- if (pm_runtime_active(pfdev->dev))
+ if (pm_runtime_active(pfdev->base.dev))
mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
+}
+
+static void mmu_unmap_range(struct panfrost_mmu *mmu, u64 iova, size_t len)
+{
+ struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ size_t pgsize, unmapped_len = 0;
+ size_t unmapped_page, pgcount;
+
+ while (unmapped_len < len) {
+ pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
+
+ unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
+ WARN_ON(unmapped_page != pgsize * pgcount);
+
+ iova += pgsize * pgcount;
+ unmapped_len += pgsize * pgcount;
+ }
}
static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
@@ -396,22 +419,30 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
unsigned int count;
struct scatterlist *sgl;
struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+ size_t total_mapped = 0;
u64 start_iova = iova;
+ int ret;
for_each_sgtable_dma_sg(sgt, sgl, count) {
unsigned long paddr = sg_dma_address(sgl);
size_t len = sg_dma_len(sgl);
- dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
+ dev_dbg(pfdev->base.dev,
+ "map: as=%d, iova=%llx, paddr=%lx, len=%zx",
+ mmu->as, iova, paddr, len);
while (len) {
size_t pgcount, mapped = 0;
size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
- ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
+ ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
GFP_KERNEL, &mapped);
+ if (ret)
+ goto err_unmap_pages;
+
/* Don't get stuck if things have gone wrong */
mapped = max(mapped, pgsize);
+ total_mapped += mapped;
iova += mapped;
paddr += mapped;
len -= mapped;
@@ -421,6 +452,10 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
return 0;
+
+err_unmap_pages:
+ mmu_unmap_range(mmu, start_iova, total_mapped);
+ return ret;
}
int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
@@ -431,6 +466,7 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct sg_table *sgt;
int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
+ int ret;
if (WARN_ON(mapping->active))
return 0;
@@ -442,11 +478,18 @@ int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
- mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
- prot, sgt);
+ ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
+ prot, sgt);
+ if (ret)
+ goto err_put_pages;
+
mapping->active = true;
return 0;
+
+err_put_pages:
+ drm_gem_shmem_put_pages_locked(shmem);
+ return ret;
}
void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
@@ -462,7 +505,7 @@ void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
if (WARN_ON(!mapping->active))
return;
- dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
+ dev_dbg(pfdev->base.dev, "unmap: as=%d, iova=%llx, len=%zx",
mapping->mmu->as, iova, len);
while (unmapped_len < len) {
@@ -559,7 +602,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
bo = bomapping->obj;
if (!bo->is_heap) {
- dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
+ dev_WARN(pfdev->base.dev, "matching BO is not heap type (GPU VA = %llx)",
bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL;
goto err_bo;
@@ -595,10 +638,12 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
refcount_set(&bo->base.pages_use_count, 1);
} else {
pages = bo->base.pages;
- if (pages[page_offset]) {
- /* Pages are already mapped, bail out. */
- goto out;
- }
+ }
+
+ sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
+ if (sgt->sgl) {
+ /* Pages are already mapped, bail out. */
+ goto out;
}
mapping = bo->base.base.filp->f_mapping;
@@ -620,23 +665,24 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
}
}
- sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
goto err_unlock;
- ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ ret = dma_map_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0);
if (ret)
goto err_map;
- mmu_map_sg(pfdev, bomapping->mmu, addr,
- IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
+ ret = mmu_map_sg(pfdev, bomapping->mmu, addr,
+ IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
+ if (ret)
+ goto err_mmu_map_sg;
bomapping->active = true;
bo->heap_rss_size += SZ_2M;
- dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+ dev_dbg(pfdev->base.dev, "mapped page fault @ AS%d %llx", as, addr);
out:
dma_resv_unlock(obj->resv);
@@ -645,6 +691,8 @@ out:
return 0;
+err_mmu_map_sg:
+ dma_unmap_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0);
err_map:
sg_free_table(sgt);
err_unlock:
@@ -662,13 +710,12 @@ static void panfrost_mmu_release_ctx(struct kref *kref)
spin_lock(&pfdev->as_lock);
if (mmu->as >= 0) {
- pm_runtime_get_noresume(pfdev->dev);
- if (pm_runtime_active(pfdev->dev))
+ pm_runtime_get_noresume(pfdev->base.dev);
+ if (pm_runtime_active(pfdev->base.dev))
panfrost_mmu_disable(pfdev, mmu->as);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
clear_bit(mmu->as, &pfdev->as_alloc_mask);
- clear_bit(mmu->as, &pfdev->as_in_use_mask);
list_del(&mmu->list);
}
spin_unlock(&pfdev->as_lock);
@@ -726,7 +773,7 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
- dev_err_once(pfdev->dev,
+ dev_err_once(pfdev->base.dev,
"AARCH64_4K page table not supported\n");
return ERR_PTR(-EINVAL);
}
@@ -755,7 +802,7 @@ struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
.oas = pa_bits,
.coherent_walk = pfdev->coherent,
.tlb = &mmu_tlb_ops,
- .iommu_dev = pfdev->dev,
+ .iommu_dev = pfdev->base.dev,
};
mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
@@ -848,7 +895,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
if (ret) {
/* terminal fault, print info about the fault */
- dev_err(pfdev->dev,
+ dev_err(pfdev->base.dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
@@ -896,18 +943,18 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
{
int err;
- pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+ pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "mmu");
if (pfdev->mmu_irq < 0)
return pfdev->mmu_irq;
- err = devm_request_threaded_irq(pfdev->dev, pfdev->mmu_irq,
+ err = devm_request_threaded_irq(pfdev->base.dev, pfdev->mmu_irq,
panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
IRQF_SHARED, KBUILD_MODNAME "-mmu",
pfdev);
if (err) {
- dev_err(pfdev->dev, "failed to request mmu irq");
+ dev_err(pfdev->base.dev, "failed to request mmu irq");
return err;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index 022a9a74a114..27c3c65ed074 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -4,6 +4,7 @@
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
+struct panfrost_device;
struct panfrost_gem_mapping;
struct panfrost_file_priv;
struct panfrost_mmu;
@@ -16,7 +17,7 @@ void panfrost_mmu_fini(struct panfrost_device *pfdev);
void panfrost_mmu_reset(struct panfrost_device *pfdev);
void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev);
-u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
+int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 563f16bae543..7020c0192e18 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -84,11 +84,11 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
else if (perfcnt->user)
return -EBUSY;
- ret = pm_runtime_get_sync(pfdev->dev);
+ ret = pm_runtime_get_sync(pfdev->base.dev);
if (ret < 0)
goto err_put_pm;
- bo = drm_gem_shmem_create(pfdev->ddev, perfcnt->bosize);
+ bo = drm_gem_shmem_create(&pfdev->base, perfcnt->bosize);
if (IS_ERR(bo)) {
ret = PTR_ERR(bo);
goto err_put_pm;
@@ -130,9 +130,11 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_vunmap;
}
- perfcnt->user = user;
+ ret = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
+ if (ret < 0)
+ goto err_vunmap;
- as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
+ as = ret;
cfg = GPU_PERFCNT_CFG_AS(as) |
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
@@ -164,6 +166,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
/* The BO ref is retained by the mapping. */
drm_gem_object_put(&bo->base);
+ perfcnt->user = user;
+
return 0;
err_vunmap:
@@ -175,7 +179,7 @@ err_close_bo:
err_put_bo:
drm_gem_object_put(&bo->base);
err_put_pm:
- pm_runtime_put(pfdev->dev);
+ pm_runtime_put(pfdev->base.dev);
return ret;
}
@@ -203,8 +207,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
return 0;
}
@@ -212,7 +215,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_enable *req = data;
int ret;
@@ -239,7 +242,7 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_device *pfdev = to_panfrost_device(dev);
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_dump *req = data;
void __user *user_ptr = (void __user *)(uintptr_t)req->buf_ptr;
@@ -274,13 +277,12 @@ void panfrost_perfcnt_close(struct drm_file *file_priv)
struct panfrost_device *pfdev = pfile->pfdev;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
- pm_runtime_get_sync(pfdev->dev);
+ pm_runtime_get_sync(pfdev->base.dev);
mutex_lock(&perfcnt->lock);
if (perfcnt->user == pfile)
panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
- pm_runtime_mark_last_busy(pfdev->dev);
- pm_runtime_put_autosuspend(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->base.dev);
}
int panfrost_perfcnt_init(struct panfrost_device *pfdev)
@@ -318,7 +320,7 @@ int panfrost_perfcnt_init(struct panfrost_device *pfdev)
COUNTERS_PER_BLOCK * BYTES_PER_COUNTER;
}
- perfcnt = devm_kzalloc(pfdev->dev, sizeof(*perfcnt), GFP_KERNEL);
+ perfcnt = devm_kzalloc(pfdev->base.dev, sizeof(*perfcnt), GFP_KERNEL);
if (!perfcnt)
return -ENOMEM;
diff --git a/drivers/gpu/drm/panthor/Makefile b/drivers/gpu/drm/panthor/Makefile
index 15294719b09c..753a32c446df 100644
--- a/drivers/gpu/drm/panthor/Makefile
+++ b/drivers/gpu/drm/panthor/Makefile
@@ -8,7 +8,9 @@ panthor-y := \
panthor_gem.o \
panthor_gpu.o \
panthor_heap.o \
+ panthor_hw.o \
panthor_mmu.o \
+ panthor_pwr.o \
panthor_sched.o
obj-$(CONFIG_DRM_PANTHOR) += panthor.o
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.c b/drivers/gpu/drm/panthor/panthor_devfreq.c
index 3686515d368d..2249b41ca4af 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.c
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.c
@@ -8,6 +8,7 @@
#include <linux/pm_opp.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "panthor_devfreq.h"
#include "panthor_device.h"
@@ -62,7 +63,6 @@ static void panthor_devfreq_update_utilization(struct panthor_devfreq *pdevfreq)
static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panthor_device *ptdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
int err;
@@ -72,8 +72,6 @@ static int panthor_devfreq_target(struct device *dev, unsigned long *freq,
dev_pm_opp_put(opp);
err = dev_pm_opp_set_rate(dev, *freq);
- if (!err)
- ptdev->current_frequency = *freq;
return err;
}
@@ -115,11 +113,21 @@ static int panthor_devfreq_get_dev_status(struct device *dev,
return 0;
}
+static int panthor_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct panthor_device *ptdev = dev_get_drvdata(dev);
+
+ *freq = clk_get_rate(ptdev->clks.core);
+
+ return 0;
+}
+
static struct devfreq_dev_profile panthor_devfreq_profile = {
.timer = DEVFREQ_TIMER_DELAYED,
.polling_ms = 50, /* ~3 frames */
.target = panthor_devfreq_target,
.get_dev_status = panthor_devfreq_get_dev_status,
+ .get_cur_freq = panthor_devfreq_get_cur_freq,
};
int panthor_devfreq_init(struct panthor_device *ptdev)
@@ -134,6 +142,7 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
struct thermal_cooling_device *cooling;
struct device *dev = ptdev->base.dev;
struct panthor_devfreq *pdevfreq;
+ struct opp_table *table;
struct dev_pm_opp *opp;
unsigned long cur_freq;
unsigned long freq = ULONG_MAX;
@@ -145,18 +154,30 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
ptdev->devfreq = pdevfreq;
- ret = devm_pm_opp_set_regulators(dev, reg_names);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
-
- return ret;
+ /*
+ * The power domain associated with the GPU may have already added an
+ * OPP table, complete with OPPs, as part of the platform bus
+ * initialization. If this is the case, the power domain is in charge of
+ * also controlling the performance, with a set_performance callback.
+ * Only add a new OPP table from DT if there isn't such a table present
+ * already.
+ */
+ table = dev_pm_opp_get_opp_table(dev);
+ if (IS_ERR_OR_NULL(table)) {
+ ret = devm_pm_opp_set_regulators(dev, reg_names);
+ if (ret && ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
+ return ret;
+ }
+
+ ret = devm_pm_opp_of_add_table(dev);
+ if (ret)
+ return ret;
+ } else {
+ dev_pm_opp_put_opp_table(table);
}
- ret = devm_pm_opp_of_add_table(dev);
- if (ret)
- return ret;
-
spin_lock_init(&pdevfreq->lock);
panthor_devfreq_reset(pdevfreq);
@@ -198,7 +219,6 @@ int panthor_devfreq_init(struct panthor_device *ptdev)
return PTR_ERR(opp);
panthor_devfreq_profile.initial_freq = cur_freq;
- ptdev->current_frequency = cur_freq;
/*
* Set the recommend OPP this will enable and configure the regulator
@@ -296,3 +316,19 @@ void panthor_devfreq_record_idle(struct panthor_device *ptdev)
spin_unlock_irqrestore(&pdevfreq->lock, irqflags);
}
+
+unsigned long panthor_devfreq_get_freq(struct panthor_device *ptdev)
+{
+ struct panthor_devfreq *pdevfreq = ptdev->devfreq;
+ unsigned long freq = 0;
+ int ret;
+
+ if (!pdevfreq->devfreq)
+ return 0;
+
+ ret = pdevfreq->devfreq->profile->get_cur_freq(ptdev->base.dev, &freq);
+ if (ret)
+ return 0;
+
+ return freq;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_devfreq.h b/drivers/gpu/drm/panthor/panthor_devfreq.h
index b7631de695f7..f8e29e02f66c 100644
--- a/drivers/gpu/drm/panthor/panthor_devfreq.h
+++ b/drivers/gpu/drm/panthor/panthor_devfreq.h
@@ -18,4 +18,6 @@ void panthor_devfreq_suspend(struct panthor_device *ptdev);
void panthor_devfreq_record_busy(struct panthor_device *ptdev);
void panthor_devfreq_record_idle(struct panthor_device *ptdev);
+unsigned long panthor_devfreq_get_freq(struct panthor_device *ptdev);
+
#endif /* __PANTHOR_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_device.c b/drivers/gpu/drm/panthor/panthor_device.c
index f0b2da5b2b96..e133b1e0ad6d 100644
--- a/drivers/gpu/drm/panthor/panthor_device.c
+++ b/drivers/gpu/drm/panthor/panthor_device.c
@@ -13,12 +13,15 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "panthor_devfreq.h"
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gpu.h"
+#include "panthor_hw.h"
#include "panthor_mmu.h"
+#include "panthor_pwr.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
@@ -64,6 +67,16 @@ static int panthor_clk_init(struct panthor_device *ptdev)
return 0;
}
+static int panthor_init_power(struct device *dev)
+{
+ struct dev_pm_domain_list *pd_list = NULL;
+
+ if (dev->pm_domain)
+ return 0;
+
+ return devm_pm_domain_attach_list(dev, NULL, &pd_list);
+}
+
void panthor_device_unplug(struct panthor_device *ptdev)
{
/* This function can be called from two different path: the reset work
@@ -82,6 +95,8 @@ void panthor_device_unplug(struct panthor_device *ptdev)
return;
}
+ drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
+
/* Call drm_dev_unplug() so any access to HW blocks happening after
* that point get rejected.
*/
@@ -92,8 +107,6 @@ void panthor_device_unplug(struct panthor_device *ptdev)
*/
mutex_unlock(&ptdev->unplug.lock);
- drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
-
/* Now, try to cleanly shutdown the GPU before the device resources
* get reclaimed.
*/
@@ -101,6 +114,7 @@ void panthor_device_unplug(struct panthor_device *ptdev)
panthor_fw_unplug(ptdev);
panthor_mmu_unplug(ptdev);
panthor_gpu_unplug(ptdev);
+ panthor_pwr_unplug(ptdev);
pm_runtime_dont_use_autosuspend(ptdev->base.dev);
pm_runtime_put_sync_suspend(ptdev->base.dev);
@@ -119,7 +133,7 @@ static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
{
struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
- cancel_work_sync(&ptdev->reset.work);
+ disable_work_sync(&ptdev->reset.work);
destroy_workqueue(ptdev->reset.wq);
}
@@ -140,8 +154,8 @@ static void panthor_device_reset_work(struct work_struct *work)
panthor_sched_pre_reset(ptdev);
panthor_fw_pre_reset(ptdev, true);
panthor_mmu_pre_reset(ptdev);
- panthor_gpu_soft_reset(ptdev);
- panthor_gpu_l2_power_on(ptdev);
+ panthor_hw_soft_reset(ptdev);
+ panthor_hw_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev);
atomic_set(&ptdev->reset.pending, 0);
@@ -171,6 +185,8 @@ int panthor_device_init(struct panthor_device *ptdev)
struct page *p;
int ret;
+ ptdev->soc_data = of_device_get_match_data(ptdev->base.dev);
+
init_completion(&ptdev->unplug.done);
ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
if (ret)
@@ -218,6 +234,12 @@ int panthor_device_init(struct panthor_device *ptdev)
if (ret)
return ret;
+ ret = panthor_init_power(ptdev->base.dev);
+ if (ret < 0) {
+ drm_err(&ptdev->base, "init power domains failed, ret=%d", ret);
+ return ret;
+ }
+
ret = panthor_devfreq_init(ptdev);
if (ret)
return ret;
@@ -244,10 +266,18 @@ int panthor_device_init(struct panthor_device *ptdev)
return ret;
}
- ret = panthor_gpu_init(ptdev);
+ ret = panthor_hw_init(ptdev);
if (ret)
goto err_rpm_put;
+ ret = panthor_pwr_init(ptdev);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = panthor_gpu_init(ptdev);
+ if (ret)
+ goto err_unplug_pwr;
+
ret = panthor_gpu_coherency_init(ptdev);
if (ret)
goto err_unplug_gpu;
@@ -288,6 +318,9 @@ err_unplug_mmu:
err_unplug_gpu:
panthor_gpu_unplug(ptdev);
+err_unplug_pwr:
+ panthor_pwr_unplug(ptdev);
+
err_rpm_put:
pm_runtime_put_sync_suspend(ptdev->base.dev);
return ret;
@@ -441,6 +474,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
{
int ret;
+ panthor_pwr_resume(ptdev);
panthor_gpu_resume(ptdev);
panthor_mmu_resume(ptdev);
@@ -450,6 +484,7 @@ static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
panthor_mmu_suspend(ptdev);
panthor_gpu_suspend(ptdev);
+ panthor_pwr_suspend(ptdev);
return ret;
}
@@ -563,6 +598,7 @@ int panthor_device_suspend(struct device *dev)
panthor_fw_suspend(ptdev);
panthor_mmu_suspend(ptdev);
panthor_gpu_suspend(ptdev);
+ panthor_pwr_suspend(ptdev);
drm_dev_exit(cookie);
}
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 4fc7cf2aeed5..f35e52b9546a 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -24,14 +24,27 @@ struct panthor_device;
struct panthor_gpu;
struct panthor_group_pool;
struct panthor_heap_pool;
+struct panthor_hw;
struct panthor_job;
struct panthor_mmu;
struct panthor_fw;
struct panthor_perfcnt;
+struct panthor_pwr;
struct panthor_vm;
struct panthor_vm_pool;
/**
+ * struct panthor_soc_data - Panthor SoC Data
+ */
+struct panthor_soc_data {
+ /** @asn_hash_enable: True if GPU_L2_CONFIG_ASN_HASH_ENABLE must be set. */
+ bool asn_hash_enable;
+
+ /** @asn_hash: ASN_HASH values when asn_hash_enable is true. */
+ u32 asn_hash[3];
+};
+
+/**
* enum panthor_device_pm_state - PM state
*/
enum panthor_device_pm_state {
@@ -93,6 +106,9 @@ struct panthor_device {
/** @base: Base drm_device. */
struct drm_device base;
+ /** @soc_data: Optional SoC data. */
+ const struct panthor_soc_data *soc_data;
+
/** @phys_addr: Physical address of the iomem region. */
phys_addr_t phys_addr;
@@ -120,6 +136,12 @@ struct panthor_device {
/** @csif_info: Command stream interface information. */
struct drm_panthor_csif_info csif_info;
+ /** @hw: GPU-specific data. */
+ struct panthor_hw *hw;
+
+ /** @pwr: Power control management data. */
+ struct panthor_pwr *pwr;
+
/** @gpu: GPU management data. */
struct panthor_gpu *gpu;
@@ -200,9 +222,6 @@ struct panthor_device {
/** @profile_mask: User-set profiling flags for job accounting. */
u32 profile_mask;
- /** @current_frequency: Device clock frequency at present. Set by DVFS*/
- unsigned long current_frequency;
-
/** @fast_rate: Maximum device clock frequency. Set by DVFS */
unsigned long fast_rate;
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 4d8e9b34702a..d1d4c50da5bf 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -20,11 +20,13 @@
#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>
#include <drm/gpu_scheduler.h>
#include <drm/panthor_drm.h>
+#include "panthor_devfreq.h"
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gem.h"
@@ -1103,14 +1105,15 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
ret = group_priority_permit(file, args->priority);
if (ret)
- return ret;
+ goto out;
- ret = panthor_group_create(pfile, args, queue_args);
- if (ret >= 0) {
- args->group_handle = ret;
- ret = 0;
- }
+ ret = panthor_group_create(pfile, args, queue_args, file->client_id);
+ if (ret < 0)
+ goto out;
+ args->group_handle = ret;
+ ret = 0;
+out:
kvfree(queue_args);
return ret;
}
@@ -1400,14 +1403,9 @@ panthor_open(struct drm_device *ddev, struct drm_file *file)
struct panthor_file *pfile;
int ret;
- if (!try_module_get(THIS_MODULE))
- return -EINVAL;
-
pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
- if (!pfile) {
- ret = -ENOMEM;
- goto err_put_mod;
- }
+ if (!pfile)
+ return -ENOMEM;
pfile->ptdev = ptdev;
pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
@@ -1439,9 +1437,6 @@ err_destroy_vm_pool:
err_free_file:
kfree(pfile);
-
-err_put_mod:
- module_put(THIS_MODULE);
return ret;
}
@@ -1454,7 +1449,6 @@ panthor_postclose(struct drm_device *ddev, struct drm_file *file)
panthor_vm_pool_destroy(pfile);
kfree(pfile);
- module_put(THIS_MODULE);
}
static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
@@ -1527,7 +1521,8 @@ static void panthor_gpu_show_fdinfo(struct panthor_device *ptdev,
drm_printf(p, "drm-cycles-panthor:\t%llu\n", pfile->stats.cycles);
drm_printf(p, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev->fast_rate);
- drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n", ptdev->current_frequency);
+ drm_printf(p, "drm-curfreq-panthor:\t%lu Hz\n",
+ panthor_devfreq_get_freq(ptdev));
}
static void panthor_show_internal_memory_stats(struct drm_printer *p, struct drm_file *file)
@@ -1555,6 +1550,7 @@ static void panthor_show_fdinfo(struct drm_printer *p, struct drm_file *file)
}
static const struct file_operations panthor_drm_driver_fops = {
+ .owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
@@ -1689,7 +1685,13 @@ static struct attribute *panthor_attrs[] = {
ATTRIBUTE_GROUPS(panthor);
+static const struct panthor_soc_data soc_data_mediatek_mt8196 = {
+ .asn_hash_enable = true,
+ .asn_hash = { 0xb, 0xe, 0x0, },
+};
+
static const struct of_device_id dt_match[] = {
+ { .compatible = "mediatek,mt8196-mali", .data = &soc_data_mediatek_mt8196, },
{ .compatible = "rockchip,rk3588-mali" },
{ .compatible = "arm,mali-valhall-csf" },
{}
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 36f1034839c2..1a5e3c1a27fb 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -16,11 +16,13 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "panthor_device.h"
#include "panthor_fw.h"
#include "panthor_gem.h"
#include "panthor_gpu.h"
+#include "panthor_hw.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
#include "panthor_sched.h"
@@ -32,6 +34,7 @@
#define PROGRESS_TIMEOUT_SCALE_SHIFT 10
#define IDLE_HYSTERESIS_US 800
#define PWROFF_HYSTERESIS_US 10000
+#define MCU_HALT_TIMEOUT_US (1ULL * USEC_PER_SEC)
/**
* struct panthor_fw_binary_hdr - Firmware binary header.
@@ -316,6 +319,49 @@ panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot)
return &ptdev->fw->iface.streams[csg_slot][cs_slot];
}
+static bool panthor_fw_has_glb_state(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ return glb_iface->control->version >= CSF_IFACE_VERSION(4, 1, 0);
+}
+
+static bool panthor_fw_has_64bit_ep_req(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ return glb_iface->control->version >= CSF_IFACE_VERSION(4, 0, 0);
+}
+
+u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev,
+ struct panthor_fw_csg_iface *csg_iface)
+{
+ if (panthor_fw_has_64bit_ep_req(ptdev))
+ return csg_iface->input->endpoint_req2;
+ else
+ return csg_iface->input->endpoint_req;
+}
+
+void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev,
+ struct panthor_fw_csg_iface *csg_iface, u64 value)
+{
+ if (panthor_fw_has_64bit_ep_req(ptdev))
+ csg_iface->input->endpoint_req2 = value;
+ else
+ csg_iface->input->endpoint_req = lower_32_bits(value);
+}
+
+void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev,
+ struct panthor_fw_csg_iface *csg_iface, u64 value,
+ u64 mask)
+{
+ if (panthor_fw_has_64bit_ep_req(ptdev))
+ panthor_fw_update_reqs64(csg_iface, endpoint_req2, value, mask);
+ else
+ panthor_fw_update_reqs(csg_iface, endpoint_req, lower_32_bits(value),
+ lower_32_bits(mask));
+}
+
/**
* panthor_fw_conv_timeout() - Convert a timeout into a cycle-count
* @ptdev: Device.
@@ -995,6 +1041,9 @@ static void panthor_fw_init_global_iface(struct panthor_device *ptdev)
GLB_IDLE_EN |
GLB_IDLE;
+ if (panthor_fw_has_glb_state(ptdev))
+ glb_iface->input->ack_irq_mask |= GLB_STATE_MASK;
+
panthor_fw_update_reqs(glb_iface, req, GLB_IDLE_EN, GLB_IDLE_EN);
panthor_fw_toggle_reqs(glb_iface, req, ack,
GLB_CFG_ALLOC_EN |
@@ -1068,6 +1117,54 @@ static void panthor_fw_stop(struct panthor_device *ptdev)
drm_err(&ptdev->base, "Failed to stop MCU");
}
+static bool panthor_fw_mcu_halted(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+ bool halted;
+
+ halted = gpu_read(ptdev, MCU_STATUS) == MCU_STATUS_HALT;
+
+ if (panthor_fw_has_glb_state(ptdev))
+ halted &= (GLB_STATE_GET(glb_iface->output->ack) == GLB_STATE_HALT);
+
+ return halted;
+}
+
+static void panthor_fw_halt_mcu(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ if (panthor_fw_has_glb_state(ptdev))
+ panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_HALT), GLB_STATE_MASK);
+ else
+ panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
+
+ gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
+}
+
+static bool panthor_fw_wait_mcu_halted(struct panthor_device *ptdev)
+{
+ bool halted = false;
+
+ if (read_poll_timeout_atomic(panthor_fw_mcu_halted, halted, halted, 10,
+ MCU_HALT_TIMEOUT_US, 0, ptdev)) {
+ drm_warn(&ptdev->base, "Timed out waiting for MCU to halt");
+ return false;
+ }
+
+ return true;
+}
+
+static void panthor_fw_mcu_set_active(struct panthor_device *ptdev)
+{
+ struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
+
+ if (panthor_fw_has_glb_state(ptdev))
+ panthor_fw_update_reqs(glb_iface, req, GLB_STATE(GLB_STATE_ACTIVE), GLB_STATE_MASK);
+ else
+ panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
+}
+
/**
* panthor_fw_pre_reset() - Call before a reset.
* @ptdev: Device.
@@ -1084,21 +1181,16 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
ptdev->reset.fast = false;
if (!on_hang) {
- struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
- u32 status;
-
- panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
- gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
- if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
- status == MCU_STATUS_HALT, 10,
- 100000)) {
- ptdev->reset.fast = true;
- } else {
+ panthor_fw_halt_mcu(ptdev);
+ if (!panthor_fw_wait_mcu_halted(ptdev))
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
- }
+ else
+ ptdev->reset.fast = true;
}
+ panthor_fw_stop(ptdev);
panthor_job_irq_suspend(&ptdev->fw->irq);
+ panthor_fw_stop(ptdev);
}
/**
@@ -1124,14 +1216,14 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
*/
panthor_reload_fw_sections(ptdev, true);
} else {
- /* The FW detects 0 -> 1 transitions. Make sure we reset
- * the HALT bit before the FW is rebooted.
+ /*
+ * If the FW was previously successfully halted in the pre-reset
+ * operation, we need to transition it to active again before
+ * the FW is rebooted.
* This is not needed on a slow reset because FW sections are
* re-initialized.
*/
- struct panthor_fw_global_iface *glb_iface = panthor_fw_get_glb_iface(ptdev);
-
- panthor_fw_update_reqs(glb_iface, req, 0, GLB_HALT);
+ panthor_fw_mcu_set_active(ptdev);
}
ret = panthor_fw_start(ptdev);
@@ -1162,13 +1254,17 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
{
struct panthor_fw_section *section;
- cancel_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
+ disable_delayed_work_sync(&ptdev->fw->watchdog.ping_work);
if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) {
/* Make sure the IRQ handler cannot be called after that point. */
if (ptdev->fw->irq.irq)
panthor_job_irq_suspend(&ptdev->fw->irq);
+ panthor_fw_halt_mcu(ptdev);
+ if (!panthor_fw_wait_mcu_halted(ptdev))
+ drm_warn(&ptdev->base, "Failed to halt MCU on unplug");
+
panthor_fw_stop(ptdev);
}
@@ -1184,7 +1280,7 @@ void panthor_fw_unplug(struct panthor_device *ptdev)
ptdev->fw->vm = NULL;
if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev))
- panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
+ panthor_hw_l2_power_off(ptdev);
}
/**
@@ -1363,7 +1459,7 @@ int panthor_fw_init(struct panthor_device *ptdev)
return ret;
}
- ret = panthor_gpu_l2_power_on(ptdev);
+ ret = panthor_hw_l2_power_on(ptdev);
if (ret)
return ret;
@@ -1402,3 +1498,9 @@ err_unplug_fw:
}
MODULE_FIRMWARE("arm/mali/arch10.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch10.10/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch10.12/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch11.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch12.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch13.8/mali_csffw.bin");
+MODULE_FIRMWARE("arm/mali/arch14.8/mali_csffw.bin");
diff --git a/drivers/gpu/drm/panthor/panthor_fw.h b/drivers/gpu/drm/panthor/panthor_fw.h
index 6598d96c6d2a..fbdc21469ba3 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.h
+++ b/drivers/gpu/drm/panthor/panthor_fw.h
@@ -167,10 +167,11 @@ struct panthor_fw_csg_input_iface {
#define CSG_EP_REQ_TILER(x) (((x) << 16) & GENMASK(19, 16))
#define CSG_EP_REQ_EXCL_COMPUTE BIT(20)
#define CSG_EP_REQ_EXCL_FRAGMENT BIT(21)
-#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & GENMASK(31, 28))
#define CSG_EP_REQ_PRIORITY_MASK GENMASK(31, 28)
+#define CSG_EP_REQ_PRIORITY(x) (((x) << 28) & CSG_EP_REQ_PRIORITY_MASK)
+#define CSG_EP_REQ_PRIORITY_GET(x) (((x) & CSG_EP_REQ_PRIORITY_MASK) >> 28)
u32 endpoint_req;
- u32 reserved2[2];
+ u64 endpoint_req2;
u64 suspend_buf;
u64 protm_suspend_buf;
u32 config;
@@ -214,6 +215,13 @@ struct panthor_fw_global_input_iface {
#define GLB_FWCFG_UPDATE BIT(9)
#define GLB_IDLE_EN BIT(10)
#define GLB_SLEEP BIT(12)
+#define GLB_STATE_MASK GENMASK(14, 12)
+#define GLB_STATE_ACTIVE 0
+#define GLB_STATE_HALT 1
+#define GLB_STATE_SLEEP 2
+#define GLB_STATE_SUSPEND 3
+#define GLB_STATE(x) (((x) << 12) & GLB_STATE_MASK)
+#define GLB_STATE_GET(x) (((x) & GLB_STATE_MASK) >> 12)
#define GLB_INACTIVE_COMPUTE BIT(20)
#define GLB_INACTIVE_FRAGMENT BIT(21)
#define GLB_INACTIVE_TILER BIT(22)
@@ -457,6 +465,16 @@ struct panthor_fw_global_iface {
spin_unlock(&(__iface)->lock); \
} while (0)
+#define panthor_fw_update_reqs64(__iface, __in_reg, __val, __mask) \
+ do { \
+ u64 __cur_val, __new_val; \
+ spin_lock(&(__iface)->lock); \
+ __cur_val = READ_ONCE((__iface)->input->__in_reg); \
+ __new_val = (__cur_val & ~(__mask)) | ((__val) & (__mask)); \
+ WRITE_ONCE((__iface)->input->__in_reg, __new_val); \
+ spin_unlock(&(__iface)->lock); \
+ } while (0)
+
struct panthor_fw_global_iface *
panthor_fw_get_glb_iface(struct panthor_device *ptdev);
@@ -466,6 +484,16 @@ panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot);
struct panthor_fw_cs_iface *
panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot);
+u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev,
+ struct panthor_fw_csg_iface *csg_iface);
+
+void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev,
+ struct panthor_fw_csg_iface *csg_iface, u64 value);
+
+void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev,
+ struct panthor_fw_csg_iface *csg_iface, u64 value,
+ u64 mask);
+
int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask,
u32 *acked, u32 timeout_ms);
diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c
index a123bc740ba1..fbde78db270a 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.c
+++ b/drivers/gpu/drm/panthor/panthor_gem.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/slab.h>
+#include <drm/drm_print.h>
#include <drm/panthor_drm.h>
#include "panthor_device.h"
@@ -74,7 +75,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
mutex_destroy(&bo->label.lock);
drm_gem_free_mmap_offset(&bo->base.base);
- mutex_destroy(&bo->gpuva_list_lock);
drm_gem_shmem_free(&bo->base);
drm_gem_object_put(vm_root_gem);
}
@@ -87,7 +87,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj)
void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
{
struct panthor_vm *vm;
- int ret;
if (IS_ERR_OR_NULL(bo))
return;
@@ -95,18 +94,11 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
vm = bo->vm;
panthor_kernel_bo_vunmap(bo);
- if (drm_WARN_ON(bo->obj->dev,
- to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
- goto out_free_bo;
-
- ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
- if (ret)
- goto out_free_bo;
-
+ drm_WARN_ON(bo->obj->dev,
+ to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm));
+ panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
panthor_vm_free_va(vm, &bo->va_node);
drm_gem_object_put(bo->obj);
-
-out_free_bo:
panthor_vm_put(vm);
kfree(bo);
}
@@ -153,6 +145,9 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
bo = to_panthor_bo(&obj->base);
kbo->obj = &obj->base;
bo->flags = bo_flags;
+ bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
+ drm_gem_object_get(bo->exclusive_vm_root_gem);
+ bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
if (vm == panthor_fw_vm(ptdev))
debug_flags |= PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED;
@@ -176,9 +171,6 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
goto err_free_va;
kbo->vm = panthor_vm_get(vm);
- bo->exclusive_vm_root_gem = panthor_vm_root_gem(vm);
- drm_gem_object_get(bo->exclusive_vm_root_gem);
- bo->base.base.resv = bo->exclusive_vm_root_gem->resv;
return kbo;
err_free_va:
@@ -246,8 +238,6 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t
obj->base.base.funcs = &panthor_gem_funcs;
obj->base.map_wc = !ptdev->coherent;
- mutex_init(&obj->gpuva_list_lock);
- drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock);
mutex_init(&obj->label.lock);
panthor_gem_debugfs_bo_init(obj);
@@ -291,6 +281,23 @@ panthor_gem_create_with_handle(struct drm_file *file,
panthor_gem_debugfs_set_usage_flags(bo, 0);
+ /* If this is a write-combine mapping, we query the sgt to force a CPU
+ * cache flush (dma_map_sgtable() is called when the sgt is created).
+ * This ensures the zero-ing is visible to any uncached mapping created
+ * by vmap/mmap.
+ * FIXME: Ideally this should be done when pages are allocated, not at
+ * BO creation time.
+ */
+ if (shmem->map_wc) {
+ struct sg_table *sgt;
+
+ sgt = drm_gem_shmem_get_pages_sgt(shmem);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto out_put_gem;
+ }
+ }
+
/*
* Allocate an id of idr table where the obj is registered
* and handle has the id what user can see.
@@ -299,6 +306,7 @@ panthor_gem_create_with_handle(struct drm_file *file,
if (!ret)
*size = bo->base.base.size;
+out_put_gem:
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h
index 8fc7215e9b90..80c6e24112d0 100644
--- a/drivers/gpu/drm/panthor/panthor_gem.h
+++ b/drivers/gpu/drm/panthor/panthor_gem.h
@@ -79,18 +79,6 @@ struct panthor_gem_object {
*/
struct drm_gem_object *exclusive_vm_root_gem;
- /**
- * @gpuva_list_lock: Custom GPUVA lock.
- *
- * Used to protect insertion of drm_gpuva elements to the
- * drm_gem_object.gpuva.list list.
- *
- * We can't use the GEM resv for that, because drm_gpuva_link() is
- * called in a dma-signaling path, where we're not allowed to take
- * resv locks.
- */
- struct mutex gpuva_list_lock;
-
/** @flags: Combination of drm_panthor_bo_flags flags. */
u32 flags;
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index cb7a335e07d7..06b231b2460a 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -15,9 +15,11 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "panthor_device.h"
#include "panthor_gpu.h"
+#include "panthor_hw.h"
#include "panthor_regs.h"
/**
@@ -35,40 +37,9 @@ struct panthor_gpu {
/** @reqs_acked: GPU request wait queue. */
wait_queue_head_t reqs_acked;
-};
-
-/**
- * struct panthor_model - GPU model description
- */
-struct panthor_model {
- /** @name: Model name. */
- const char *name;
-
- /** @arch_major: Major version number of architecture. */
- u8 arch_major;
-
- /** @product_major: Major version number of product. */
- u8 product_major;
-};
-
-/**
- * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified
- * by a combination of the major architecture version and the major product
- * version.
- * @_name: Name for the GPU model.
- * @_arch_major: Architecture major.
- * @_product_major: Product major.
- */
-#define GPU_MODEL(_name, _arch_major, _product_major) \
-{\
- .name = __stringify(_name), \
- .arch_major = _arch_major, \
- .product_major = _product_major, \
-}
-static const struct panthor_model gpu_models[] = {
- GPU_MODEL(g610, 10, 7),
- {},
+ /** @cache_flush_lock: Lock to serialize cache flushes */
+ struct mutex cache_flush_lock;
};
#define GPU_INTERRUPTS_MASK \
@@ -83,64 +54,26 @@ static void panthor_gpu_coherency_set(struct panthor_device *ptdev)
ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE);
}
-static void panthor_gpu_init_info(struct panthor_device *ptdev)
+static void panthor_gpu_l2_config_set(struct panthor_device *ptdev)
{
- const struct panthor_model *model;
- u32 arch_major, product_major;
- u32 major, minor, status;
- unsigned int i;
-
- ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
- ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
- ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
- ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
- ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
- ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
- ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
- ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
- ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
- ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
- ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
- ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
- ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
- for (i = 0; i < 4; i++)
- ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
-
- ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
-
- ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
- ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
- ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
-
- arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
- product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
- major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
- minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
- status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
-
- for (model = gpu_models; model->name; model++) {
- if (model->arch_major == arch_major &&
- model->product_major == product_major)
- break;
+ const struct panthor_soc_data *data = ptdev->soc_data;
+ u32 l2_config;
+ u32 i;
+
+ if (!data || !data->asn_hash_enable)
+ return;
+
+ if (GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) < 11) {
+ drm_err(&ptdev->base, "Custom ASN hash not supported by the device");
+ return;
}
- drm_info(&ptdev->base,
- "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
- model->name ?: "unknown", ptdev->gpu_info.gpu_id >> 16,
- major, minor, status);
-
- drm_info(&ptdev->base,
- "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
- ptdev->gpu_info.l2_features,
- ptdev->gpu_info.tiler_features,
- ptdev->gpu_info.mem_features,
- ptdev->gpu_info.mmu_features,
- ptdev->gpu_info.as_present);
-
- drm_info(&ptdev->base,
- "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
- ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
- ptdev->gpu_info.tiler_present);
+ for (i = 0; i < ARRAY_SIZE(data->asn_hash); i++)
+ gpu_write(ptdev, GPU_ASN_HASH(i), data->asn_hash[i]);
+
+ l2_config = gpu_read(ptdev, GPU_L2_CONFIG);
+ l2_config |= GPU_L2_CONFIG_ASN_HASH_ENABLE;
+ gpu_write(ptdev, GPU_L2_CONFIG, l2_config);
}
static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
@@ -204,8 +137,8 @@ int panthor_gpu_init(struct panthor_device *ptdev)
spin_lock_init(&gpu->reqs_lock);
init_waitqueue_head(&gpu->reqs_acked);
+ mutex_init(&gpu->cache_flush_lock);
ptdev->gpu = gpu;
- panthor_gpu_init_info(ptdev);
dma_set_max_seg_size(ptdev->base.dev, UINT_MAX);
pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features);
@@ -309,6 +242,11 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev,
return 0;
}
+void panthor_gpu_l2_power_off(struct panthor_device *ptdev)
+{
+ panthor_gpu_power_off(ptdev, L2, ptdev->gpu_info.l2_present, 20000);
+}
+
/**
* panthor_gpu_l2_power_on() - Power-on the L2-cache
* @ptdev: Device.
@@ -332,8 +270,9 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev)
hweight64(ptdev->gpu_info.shader_present));
}
- /* Set the desired coherency mode before the power up of L2 */
+ /* Set the desired coherency mode and L2 config before the power up of L2 */
panthor_gpu_coherency_set(ptdev);
+ panthor_gpu_l2_config_set(ptdev);
return panthor_gpu_power_on(ptdev, L2, 1, 20000);
}
@@ -353,6 +292,9 @@ int panthor_gpu_flush_caches(struct panthor_device *ptdev,
bool timedout = false;
unsigned long flags;
+ /* Serialize cache flush operations. */
+ guard(mutex)(&ptdev->gpu->cache_flush_lock);
+
spin_lock_irqsave(&ptdev->gpu->reqs_lock, flags);
if (!drm_WARN_ON(&ptdev->base,
ptdev->gpu->pending_reqs & GPU_IRQ_CLEAN_CACHES_COMPLETED)) {
@@ -432,9 +374,9 @@ void panthor_gpu_suspend(struct panthor_device *ptdev)
{
/* On a fast reset, simply power down the L2. */
if (!ptdev->reset.fast)
- panthor_gpu_soft_reset(ptdev);
+ panthor_hw_soft_reset(ptdev);
else
- panthor_gpu_power_off(ptdev, L2, 1, 20000);
+ panthor_hw_l2_power_off(ptdev);
panthor_gpu_irq_suspend(&ptdev->gpu->irq);
}
@@ -449,6 +391,6 @@ void panthor_gpu_suspend(struct panthor_device *ptdev)
void panthor_gpu_resume(struct panthor_device *ptdev)
{
panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK);
- panthor_gpu_l2_power_on(ptdev);
+ panthor_hw_l2_power_on(ptdev);
}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h
index 7c17a8c06858..12e66f48ced1 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.h
+++ b/drivers/gpu/drm/panthor/panthor_gpu.h
@@ -46,6 +46,7 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
type ## _PWRTRANS, \
mask, timeout_us)
+void panthor_gpu_l2_power_off(struct panthor_device *ptdev);
int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other);
diff --git a/drivers/gpu/drm/panthor/panthor_heap.c b/drivers/gpu/drm/panthor/panthor_heap.c
index d236e9ceade4..0b6ff4c0a11b 100644
--- a/drivers/gpu/drm/panthor/panthor_heap.c
+++ b/drivers/gpu/drm/panthor/panthor_heap.c
@@ -4,6 +4,7 @@
#include <linux/iosys-map.h>
#include <linux/rwsem.h>
+#include <drm/drm_print.h>
#include <drm/panthor_drm.h>
#include "panthor_device.h"
diff --git a/drivers/gpu/drm/panthor/panthor_hw.c b/drivers/gpu/drm/panthor/panthor_hw.c
new file mode 100644
index 000000000000..87ebb7ae42c4
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_hw.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2025 ARM Limited. All rights reserved. */
+
+#include <drm/drm_print.h>
+
+#include "panthor_device.h"
+#include "panthor_gpu.h"
+#include "panthor_hw.h"
+#include "panthor_pwr.h"
+#include "panthor_regs.h"
+
+#define GPU_PROD_ID_MAKE(arch_major, prod_major) \
+ (((arch_major) << 24) | (prod_major))
+
+/** struct panthor_hw_entry - HW arch major to panthor_hw binding entry */
+struct panthor_hw_entry {
+ /** @arch_min: Minimum supported architecture major value (inclusive) */
+ u8 arch_min;
+
+ /** @arch_max: Maximum supported architecture major value (inclusive) */
+ u8 arch_max;
+
+ /** @hwdev: Pointer to panthor_hw structure */
+ struct panthor_hw *hwdev;
+};
+
+static struct panthor_hw panthor_hw_arch_v10 = {
+ .ops = {
+ .soft_reset = panthor_gpu_soft_reset,
+ .l2_power_off = panthor_gpu_l2_power_off,
+ .l2_power_on = panthor_gpu_l2_power_on,
+ },
+};
+
+static struct panthor_hw panthor_hw_arch_v14 = {
+ .ops = {
+ .soft_reset = panthor_pwr_reset_soft,
+ .l2_power_off = panthor_pwr_l2_power_off,
+ .l2_power_on = panthor_pwr_l2_power_on,
+ },
+};
+
+static struct panthor_hw_entry panthor_hw_match[] = {
+ {
+ .arch_min = 10,
+ .arch_max = 13,
+ .hwdev = &panthor_hw_arch_v10,
+ },
+ {
+ .arch_min = 14,
+ .arch_max = 14,
+ .hwdev = &panthor_hw_arch_v14,
+ },
+};
+
+static char *get_gpu_model_name(struct panthor_device *ptdev)
+{
+ const u32 gpu_id = ptdev->gpu_info.gpu_id;
+ const u32 product_id = GPU_PROD_ID_MAKE(GPU_ARCH_MAJOR(gpu_id),
+ GPU_PROD_MAJOR(gpu_id));
+ const bool ray_intersection = !!(ptdev->gpu_info.gpu_features &
+ GPU_FEATURES_RAY_INTERSECTION);
+ const u8 shader_core_count = hweight64(ptdev->gpu_info.shader_present);
+
+ switch (product_id) {
+ case GPU_PROD_ID_MAKE(10, 2):
+ return "Mali-G710";
+ case GPU_PROD_ID_MAKE(10, 3):
+ return "Mali-G510";
+ case GPU_PROD_ID_MAKE(10, 4):
+ return "Mali-G310";
+ case GPU_PROD_ID_MAKE(10, 7):
+ return "Mali-G610";
+ case GPU_PROD_ID_MAKE(11, 2):
+ if (shader_core_count > 10 && ray_intersection)
+ return "Mali-G715-Immortalis";
+ else if (shader_core_count >= 7)
+ return "Mali-G715";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(11, 3):
+ return "Mali-G615";
+ case GPU_PROD_ID_MAKE(12, 0):
+ if (shader_core_count >= 10 && ray_intersection)
+ return "Mali-G720-Immortalis";
+ else if (shader_core_count >= 6)
+ return "Mali-G720";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(12, 1):
+ return "Mali-G620";
+ case GPU_PROD_ID_MAKE(13, 0):
+ if (shader_core_count >= 10 && ray_intersection)
+ return "Mali-G925-Immortalis";
+ else if (shader_core_count >= 6)
+ return "Mali-G725";
+
+ fallthrough;
+ case GPU_PROD_ID_MAKE(13, 1):
+ return "Mali-G625";
+ case GPU_PROD_ID_MAKE(14, 0):
+ return "Mali-G1-Ultra";
+ case GPU_PROD_ID_MAKE(14, 1):
+ return "Mali-G1-Premium";
+ case GPU_PROD_ID_MAKE(14, 3):
+ return "Mali-G1-Pro";
+ }
+
+ return "(Unknown Mali GPU)";
+}
+
+static void panthor_gpu_info_init(struct panthor_device *ptdev)
+{
+ unsigned int i;
+
+ ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID);
+ ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID);
+ ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES);
+ ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES);
+ ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES);
+ ptdev->gpu_info.mem_features = gpu_read(ptdev, GPU_MEM_FEATURES);
+ ptdev->gpu_info.mmu_features = gpu_read(ptdev, GPU_MMU_FEATURES);
+ ptdev->gpu_info.thread_features = gpu_read(ptdev, GPU_THREAD_FEATURES);
+ ptdev->gpu_info.max_threads = gpu_read(ptdev, GPU_THREAD_MAX_THREADS);
+ ptdev->gpu_info.thread_max_workgroup_size = gpu_read(ptdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+ ptdev->gpu_info.thread_max_barrier_size = gpu_read(ptdev, GPU_THREAD_MAX_BARRIER_SIZE);
+ ptdev->gpu_info.coherency_features = gpu_read(ptdev, GPU_COHERENCY_FEATURES);
+ for (i = 0; i < 4; i++)
+ ptdev->gpu_info.texture_features[i] = gpu_read(ptdev, GPU_TEXTURE_FEATURES(i));
+
+ ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
+
+ /* Introduced in arch 11.x */
+ ptdev->gpu_info.gpu_features = gpu_read64(ptdev, GPU_FEATURES);
+
+ if (panthor_hw_has_pwr_ctrl(ptdev)) {
+ /* Introduced in arch 14.x */
+ ptdev->gpu_info.l2_present = gpu_read64(ptdev, PWR_L2_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(ptdev, PWR_TILER_PRESENT);
+ ptdev->gpu_info.shader_present = gpu_read64(ptdev, PWR_SHADER_PRESENT);
+ } else {
+ ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
+ ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
+ }
+}
+
+static void panthor_hw_info_init(struct panthor_device *ptdev)
+{
+ u32 major, minor, status;
+
+ panthor_gpu_info_init(ptdev);
+
+ major = GPU_VER_MAJOR(ptdev->gpu_info.gpu_id);
+ minor = GPU_VER_MINOR(ptdev->gpu_info.gpu_id);
+ status = GPU_VER_STATUS(ptdev->gpu_info.gpu_id);
+
+ drm_info(&ptdev->base,
+ "%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ get_gpu_model_name(ptdev), ptdev->gpu_info.gpu_id >> 16,
+ major, minor, status);
+
+ drm_info(&ptdev->base,
+ "Features: L2:%#x Tiler:%#x Mem:%#x MMU:%#x AS:%#x",
+ ptdev->gpu_info.l2_features,
+ ptdev->gpu_info.tiler_features,
+ ptdev->gpu_info.mem_features,
+ ptdev->gpu_info.mmu_features,
+ ptdev->gpu_info.as_present);
+
+ drm_info(&ptdev->base,
+ "shader_present=0x%0llx l2_present=0x%0llx tiler_present=0x%0llx",
+ ptdev->gpu_info.shader_present, ptdev->gpu_info.l2_present,
+ ptdev->gpu_info.tiler_present);
+}
+
+static int panthor_hw_bind_device(struct panthor_device *ptdev)
+{
+ struct panthor_hw *hdev = NULL;
+ const u32 arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(panthor_hw_match); i++) {
+ struct panthor_hw_entry *entry = &panthor_hw_match[i];
+
+ if (arch_major >= entry->arch_min && arch_major <= entry->arch_max) {
+ hdev = entry->hwdev;
+ break;
+ }
+ }
+
+ if (!hdev)
+ return -EOPNOTSUPP;
+
+ ptdev->hw = hdev;
+
+ return 0;
+}
+
+static int panthor_hw_gpu_id_init(struct panthor_device *ptdev)
+{
+ ptdev->gpu_info.gpu_id = gpu_read(ptdev, GPU_ID);
+ if (!ptdev->gpu_info.gpu_id)
+ return -ENXIO;
+
+ return 0;
+}
+
+int panthor_hw_init(struct panthor_device *ptdev)
+{
+ int ret = 0;
+
+ ret = panthor_hw_gpu_id_init(ptdev);
+ if (ret)
+ return ret;
+
+ ret = panthor_hw_bind_device(ptdev);
+ if (ret)
+ return ret;
+
+ panthor_hw_info_init(ptdev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/panthor/panthor_hw.h b/drivers/gpu/drm/panthor/panthor_hw.h
new file mode 100644
index 000000000000..56c68c1e9c26
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_hw.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2025 ARM Limited. All rights reserved. */
+
+#ifndef __PANTHOR_HW_H__
+#define __PANTHOR_HW_H__
+
+#include "panthor_device.h"
+#include "panthor_regs.h"
+
+/**
+ * struct panthor_hw_ops - HW operations that are specific to a GPU
+ */
+struct panthor_hw_ops {
+ /** @soft_reset: Soft reset function pointer */
+ int (*soft_reset)(struct panthor_device *ptdev);
+
+ /** @l2_power_off: L2 power off function pointer */
+ void (*l2_power_off)(struct panthor_device *ptdev);
+
+ /** @l2_power_on: L2 power on function pointer */
+ int (*l2_power_on)(struct panthor_device *ptdev);
+};
+
+/**
+ * struct panthor_hw - GPU specific register mapping and functions
+ */
+struct panthor_hw {
+ /** @features: Bitmap containing panthor_hw_feature */
+
+ /** @ops: Panthor HW specific operations */
+ struct panthor_hw_ops ops;
+};
+
+int panthor_hw_init(struct panthor_device *ptdev);
+
+static inline int panthor_hw_soft_reset(struct panthor_device *ptdev)
+{
+ return ptdev->hw->ops.soft_reset(ptdev);
+}
+
+static inline int panthor_hw_l2_power_on(struct panthor_device *ptdev)
+{
+ return ptdev->hw->ops.l2_power_on(ptdev);
+}
+
+static inline void panthor_hw_l2_power_off(struct panthor_device *ptdev)
+{
+ ptdev->hw->ops.l2_power_off(ptdev);
+}
+
+static inline bool panthor_hw_has_pwr_ctrl(struct panthor_device *ptdev)
+{
+ return GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) >= 14;
+}
+
+#endif /* __PANTHOR_HW_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 4140f697ba5a..d4839d282689 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -7,6 +7,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gpuvm.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <drm/panthor_drm.h>
@@ -29,6 +30,7 @@
#include "panthor_device.h"
#include "panthor_gem.h"
+#include "panthor_gpu.h"
#include "panthor_heap.h"
#include "panthor_mmu.h"
#include "panthor_regs.h"
@@ -180,20 +182,6 @@ struct panthor_vm_op_ctx {
u64 range;
} va;
- /**
- * @returned_vmas: List of panthor_vma objects returned after a VM operation.
- *
- * For unmap operations, this will contain all VMAs that were covered by the
- * specified VA range.
- *
- * For map operations, this will contain all VMAs that previously mapped to
- * the specified VA range.
- *
- * Those VMAs, and the resources they point to will be released as part of
- * the op_ctx cleanup operation.
- */
- struct list_head returned_vmas;
-
/** @map: Fields specific to a map operation. */
struct {
/** @map.vm_bo: Buffer object to map. */
@@ -571,8 +559,24 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
u64 iova, u64 size, u32 op)
{
+ const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV;
+ u32 lsc_flush_op;
+ int ret;
+
lockdep_assert_held(&ptdev->mmu->as.slots_lock);
+ switch (op) {
+ case AS_COMMAND_FLUSH_MEM:
+ lsc_flush_op = CACHE_CLEAN | CACHE_INV;
+ break;
+ case AS_COMMAND_FLUSH_PT:
+ lsc_flush_op = 0;
+ break;
+ default:
+ drm_WARN(&ptdev->base, 1, "Unexpected AS_COMMAND: %d", op);
+ return -EINVAL;
+ }
+
if (as_nr < 0)
return 0;
@@ -582,13 +586,24 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
* power it up
*/
- if (op != AS_COMMAND_UNLOCK)
- lock_region(ptdev, as_nr, iova, size);
+ lock_region(ptdev, as_nr, iova, size);
- /* Run the MMU operation */
- write_cmd(ptdev, as_nr, op);
+ ret = wait_ready(ptdev, as_nr);
+ if (ret)
+ return ret;
+
+ ret = panthor_gpu_flush_caches(ptdev, l2_flush_op, lsc_flush_op, 0);
+ if (ret)
+ return ret;
+
+ /*
+ * Explicitly unlock the region as the AS is not unlocked automatically
+ * at the end of the GPU_CONTROL cache flush command, unlike
+ * AS_COMMAND_FLUSH_MEM or AS_COMMAND_FLUSH_PT.
+ */
+ write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK);
- /* Wait for the flush to complete */
+ /* Wait for the unlock command to complete */
return wait_ready(ptdev, as_nr);
}
@@ -889,10 +904,9 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
{
struct panthor_device *ptdev = vm->ptdev;
struct io_pgtable_ops *ops = vm->pgtbl_ops;
+ u64 start_iova = iova;
u64 offset = 0;
- drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size);
-
while (offset < size) {
size_t unmapped_sz = 0, pgcount;
size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount);
@@ -907,6 +921,12 @@ static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
panthor_vm_flush_range(vm, iova, offset + unmapped_sz);
return -EINVAL;
}
+
+ drm_dbg(&ptdev->base,
+ "unmap: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pgcnt=%zu, pgsz=%zu",
+ vm->as.id, start_iova, size, iova + offset,
+ unmapped_sz / pgsize, pgsize);
+
offset += unmapped_sz;
}
@@ -922,6 +942,7 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
struct scatterlist *sgl;
struct io_pgtable_ops *ops = vm->pgtbl_ops;
u64 start_iova = iova;
+ u64 start_size = size;
int ret;
if (!size)
@@ -941,15 +962,18 @@ panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot,
len = min_t(size_t, len, size);
size -= len;
- drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx",
- vm->as.id, iova, &paddr, len);
-
while (len) {
size_t pgcount, mapped = 0;
size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
GFP_KERNEL, &mapped);
+
+ drm_dbg(&ptdev->base,
+ "map: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pa=%pad, pgcnt=%zu, pgsz=%zu",
+ vm->as.id, start_iova, start_size, iova, &paddr,
+ mapped / pgsize, pgsize);
+
iova += mapped;
paddr += mapped;
len -= mapped;
@@ -1053,47 +1077,18 @@ void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node)
mutex_unlock(&vm->mm_lock);
}
-static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo)
+static void panthor_vm_bo_free(struct drm_gpuvm_bo *vm_bo)
{
struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj);
- struct drm_gpuvm *vm = vm_bo->vm;
- bool unpin;
- /* We must retain the GEM before calling drm_gpuvm_bo_put(),
- * otherwise the mutex might be destroyed while we hold it.
- * Same goes for the VM, since we take the VM resv lock.
- */
- drm_gem_object_get(&bo->base.base);
- drm_gpuvm_get(vm);
-
- /* We take the resv lock to protect against concurrent accesses to the
- * gpuvm evicted/extobj lists that are modified in
- * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put()
- * releases sthe last vm_bo reference.
- * We take the BO GPUVA list lock to protect the vm_bo removal from the
- * GEM vm_bo list.
- */
- dma_resv_lock(drm_gpuvm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
- unpin = drm_gpuvm_bo_put(vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
- dma_resv_unlock(drm_gpuvm_resv(vm));
-
- /* If the vm_bo object was destroyed, release the pin reference that
- * was hold by this object.
- */
- if (unpin && !drm_gem_is_imported(&bo->base.base))
+ if (!drm_gem_is_imported(&bo->base.base))
drm_gem_shmem_unpin(&bo->base);
-
- drm_gpuvm_put(vm);
- drm_gem_object_put(&bo->base.base);
+ kfree(vm_bo);
}
static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
struct panthor_vm *vm)
{
- struct panthor_vma *vma, *tmp_vma;
-
u32 remaining_pt_count = op_ctx->rsvd_page_tables.count -
op_ctx->rsvd_page_tables.ptr;
@@ -1106,16 +1101,26 @@ static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx,
kfree(op_ctx->rsvd_page_tables.pages);
if (op_ctx->map.vm_bo)
- panthor_vm_bo_put(op_ctx->map.vm_bo);
+ drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++)
kfree(op_ctx->preallocated_vmas[i]);
- list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) {
- list_del(&vma->node);
- panthor_vm_bo_put(vma->base.vm_bo);
- kfree(vma);
+ drm_gpuvm_bo_deferred_cleanup(&vm->base);
+}
+
+static void
+panthor_vm_op_ctx_return_vma(struct panthor_vm_op_ctx *op_ctx,
+ struct panthor_vma *vma)
+{
+ for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) {
+ if (!op_ctx->preallocated_vmas[i]) {
+ op_ctx->preallocated_vmas[i] = vma;
+ return;
+ }
}
+
+ WARN_ON_ONCE(1);
}
static struct panthor_vma *
@@ -1147,10 +1152,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx)
break;
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
- /* Partial unmaps might trigger a remap with either a prev or a next VA,
- * but not both.
+ /* Two VMAs can be needed for an unmap, as an unmap can happen
+ * in the middle of a drm_gpuva, requiring a remap with both
+ * prev & next VA. Or an unmap can span more than one drm_gpuva
+ * where the first and last ones are covered partially, requring
+ * a remap for the first with a prev VA and remap for the last
+ * with a next VA.
*/
- vma_count = 1;
+ vma_count = 2;
break;
default:
@@ -1194,7 +1203,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
(flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP)
return -EINVAL;
- /* Make sure the VA and size are aligned and in-bounds. */
+ /* Make sure the VA and size are in-bounds. */
if (size > bo->base.base.size || offset > bo->base.base.size - size)
return -EINVAL;
@@ -1204,7 +1213,6 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
return -EINVAL;
memset(op_ctx, 0, sizeof(*op_ctx));
- INIT_LIST_HEAD(&op_ctx->returned_vmas);
op_ctx->flags = flags;
op_ctx->va.range = size;
op_ctx->va.addr = va;
@@ -1215,7 +1223,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
if (!drm_gem_is_imported(&bo->base.base)) {
/* Pre-reserve the BO pages, so the map operation doesn't have to
- * allocate.
+ * allocate. This pin is dropped in panthor_vm_bo_free(), so
+ * once we have successfully called drm_gpuvm_bo_create(),
+ * GPUVM will take care of dropping the pin for us.
*/
ret = drm_gem_shmem_pin(&bo->base);
if (ret)
@@ -1249,21 +1259,11 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx,
* calling this function.
*/
dma_resv_lock(panthor_vm_resv(vm), NULL);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo);
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
dma_resv_unlock(panthor_vm_resv(vm));
- /* If the a vm_bo for this <VM,BO> combination exists, it already
- * retains a pin ref, and we can release the one we took earlier.
- *
- * If our pre-allocated vm_bo is picked, it now retains the pin ref,
- * which will be released in panthor_vm_bo_put().
- */
- if (preallocated_vm_bo != op_ctx->map.vm_bo &&
- !drm_gem_is_imported(&bo->base.base))
- drm_gem_shmem_unpin(&bo->base);
-
op_ctx->map.bo_offset = offset;
/* L1, L2 and L3 page tables.
@@ -1311,7 +1311,6 @@ static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx,
int ret;
memset(op_ctx, 0, sizeof(*op_ctx));
- INIT_LIST_HEAD(&op_ctx->returned_vmas);
op_ctx->va.range = size;
op_ctx->va.addr = va;
op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP;
@@ -1359,7 +1358,6 @@ static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx
struct panthor_vm *vm)
{
memset(op_ctx, 0, sizeof(*op_ctx));
- INIT_LIST_HEAD(&op_ctx->returned_vmas);
op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY;
}
@@ -2003,28 +2001,15 @@ static void panthor_vma_link(struct panthor_vm *vm,
{
struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
- mutex_lock(&bo->gpuva_list_lock);
+ mutex_lock(&bo->base.base.gpuva.lock);
drm_gpuva_link(&vma->base, vm_bo);
- drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo));
- mutex_unlock(&bo->gpuva_list_lock);
+ mutex_unlock(&bo->base.base.gpuva.lock);
}
-static void panthor_vma_unlink(struct panthor_vm *vm,
- struct panthor_vma *vma)
+static void panthor_vma_unlink(struct panthor_vma *vma)
{
- struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj);
- struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo);
-
- mutex_lock(&bo->gpuva_list_lock);
- drm_gpuva_unlink(&vma->base);
- mutex_unlock(&bo->gpuva_list_lock);
-
- /* drm_gpuva_unlink() release the vm_bo, but we manually retained it
- * when entering this function, so we can implement deferred VMA
- * destruction. Re-assign it here.
- */
- vma->base.vm_bo = vm_bo;
- list_add_tail(&vma->node, &vm->op_ctx->returned_vmas);
+ drm_gpuva_unlink_defer(&vma->base);
+ kfree(vma);
}
static void panthor_vma_init(struct panthor_vma *vma, u32 flags)
@@ -2053,15 +2038,17 @@ static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv)
ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags),
op_ctx->map.sgt, op->map.gem.offset,
op->map.va.range);
- if (ret)
+ if (ret) {
+ panthor_vm_op_ctx_return_vma(op_ctx, vma);
return ret;
+ }
- /* Ref owned by the mapping now, clear the obj field so we don't release the
- * pinning/obj ref behind GPUVA's back.
- */
drm_gpuva_map(&vm->base, &vma->base, &op->map);
panthor_vma_link(vm, vma, op_ctx->map.vm_bo);
+
+ drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo);
op_ctx->map.vm_bo = NULL;
+
return 0;
}
@@ -2100,16 +2087,14 @@ static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op,
* owned by the old mapping which will be released when this
* mapping is destroyed, we need to grab a ref here.
*/
- panthor_vma_link(vm, prev_vma,
- drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
+ panthor_vma_link(vm, prev_vma, op->remap.unmap->va->vm_bo);
}
if (next_vma) {
- panthor_vma_link(vm, next_vma,
- drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo));
+ panthor_vma_link(vm, next_vma, op->remap.unmap->va->vm_bo);
}
- panthor_vma_unlink(vm, unmap_vma);
+ panthor_vma_unlink(unmap_vma);
return 0;
}
@@ -2126,12 +2111,13 @@ static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op,
return ret;
drm_gpuva_unmap(&op->unmap);
- panthor_vma_unlink(vm, unmap_vma);
+ panthor_vma_unlink(unmap_vma);
return 0;
}
static const struct drm_gpuvm_ops panthor_gpuvm_ops = {
.vm_free = panthor_vm_free,
+ .vm_bo_free = panthor_vm_bo_free,
.sm_step_map = panthor_gpuva_sm_step_map,
.sm_step_remap = panthor_gpuva_sm_step_remap,
.sm_step_unmap = panthor_gpuva_sm_step_unmap,
@@ -2169,15 +2155,22 @@ panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op,
mutex_lock(&vm->op_lock);
vm->op_ctx = op;
switch (op_type) {
- case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP:
+ case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: {
+ const struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = op->va.addr,
+ .map.va.range = op->va.range,
+ .map.gem.obj = op->map.vm_bo->obj,
+ .map.gem.offset = op->map.bo_offset,
+ };
+
if (vm->unusable) {
ret = -EINVAL;
break;
}
- ret = drm_gpuvm_sm_map(&vm->base, vm, op->va.addr, op->va.range,
- op->map.vm_bo->obj, op->map.bo_offset);
+ ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req);
break;
+ }
case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP:
ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range);
@@ -2380,8 +2373,9 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
* to be handled the same way user VMAs are.
*/
drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM",
- DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem,
- min_va, va_range, 0, 0, &panthor_gpuvm_ops);
+ DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE,
+ &ptdev->base, dummy_gem, min_va, va_range, 0, 0,
+ &panthor_gpuvm_ops);
drm_gem_object_put(dummy_gem);
return vm;
@@ -2411,7 +2405,7 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
int ret;
/* Aligned on page size. */
- if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
+ if (!IS_ALIGNED(op->va | op->size | op->bo_offset, vm_pgsz))
return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
diff --git a/drivers/gpu/drm/panthor/panthor_pwr.c b/drivers/gpu/drm/panthor/panthor_pwr.c
new file mode 100644
index 000000000000..57cfc7ce715b
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_pwr.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+/* Copyright 2025 ARM Limited. All rights reserved. */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/cleanup.h>
+#include <linux/iopoll.h>
+#include <linux/wait.h>
+
+#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
+
+#include "panthor_device.h"
+#include "panthor_hw.h"
+#include "panthor_pwr.h"
+#include "panthor_regs.h"
+
+#define PWR_INTERRUPTS_MASK \
+ (PWR_IRQ_POWER_CHANGED_SINGLE | \
+ PWR_IRQ_POWER_CHANGED_ALL | \
+ PWR_IRQ_DELEGATION_CHANGED | \
+ PWR_IRQ_RESET_COMPLETED | \
+ PWR_IRQ_RETRACT_COMPLETED | \
+ PWR_IRQ_INSPECT_COMPLETED | \
+ PWR_IRQ_COMMAND_NOT_ALLOWED | \
+ PWR_IRQ_COMMAND_INVALID)
+
+#define PWR_ALL_CORES_MASK GENMASK_U64(63, 0)
+
+#define PWR_DOMAIN_MAX_BITS 16
+
+#define PWR_TRANSITION_TIMEOUT_US (2ULL * USEC_PER_SEC)
+
+#define PWR_RETRACT_TIMEOUT_US (2ULL * USEC_PER_MSEC)
+
+#define PWR_RESET_TIMEOUT_MS 500
+
+/**
+ * struct panthor_pwr - PWR_CONTROL block management data.
+ */
+struct panthor_pwr {
+ /** @irq: PWR irq. */
+ struct panthor_irq irq;
+
+ /** @reqs_lock: Lock protecting access to pending_reqs. */
+ spinlock_t reqs_lock;
+
+ /** @pending_reqs: Pending PWR requests. */
+ u32 pending_reqs;
+
+ /** @reqs_acked: PWR request wait queue. */
+ wait_queue_head_t reqs_acked;
+};
+
+static void panthor_pwr_irq_handler(struct panthor_device *ptdev, u32 status)
+{
+ spin_lock(&ptdev->pwr->reqs_lock);
+ gpu_write(ptdev, PWR_INT_CLEAR, status);
+
+ if (unlikely(status & PWR_IRQ_COMMAND_NOT_ALLOWED))
+ drm_err(&ptdev->base, "PWR_IRQ: COMMAND_NOT_ALLOWED");
+
+ if (unlikely(status & PWR_IRQ_COMMAND_INVALID))
+ drm_err(&ptdev->base, "PWR_IRQ: COMMAND_INVALID");
+
+ if (status & ptdev->pwr->pending_reqs) {
+ ptdev->pwr->pending_reqs &= ~status;
+ wake_up_all(&ptdev->pwr->reqs_acked);
+ }
+ spin_unlock(&ptdev->pwr->reqs_lock);
+}
+PANTHOR_IRQ_HANDLER(pwr, PWR, panthor_pwr_irq_handler);
+
+static void panthor_pwr_write_command(struct panthor_device *ptdev, u32 command, u64 args)
+{
+ if (args)
+ gpu_write64(ptdev, PWR_CMDARG, args);
+
+ gpu_write(ptdev, PWR_COMMAND, command);
+}
+
+static bool reset_irq_raised(struct panthor_device *ptdev)
+{
+ return gpu_read(ptdev, PWR_INT_RAWSTAT) & PWR_IRQ_RESET_COMPLETED;
+}
+
+static bool reset_pending(struct panthor_device *ptdev)
+{
+ return (ptdev->pwr->pending_reqs & PWR_IRQ_RESET_COMPLETED);
+}
+
+static int panthor_pwr_reset(struct panthor_device *ptdev, u32 reset_cmd)
+{
+ scoped_guard(spinlock_irqsave, &ptdev->pwr->reqs_lock) {
+ if (reset_pending(ptdev)) {
+ drm_WARN(&ptdev->base, 1, "Reset already pending");
+ } else {
+ ptdev->pwr->pending_reqs |= PWR_IRQ_RESET_COMPLETED;
+ gpu_write(ptdev, PWR_INT_CLEAR, PWR_IRQ_RESET_COMPLETED);
+ panthor_pwr_write_command(ptdev, reset_cmd, 0);
+ }
+ }
+
+ if (!wait_event_timeout(ptdev->pwr->reqs_acked, !reset_pending(ptdev),
+ msecs_to_jiffies(PWR_RESET_TIMEOUT_MS))) {
+ guard(spinlock_irqsave)(&ptdev->pwr->reqs_lock);
+
+ if (reset_pending(ptdev) && !reset_irq_raised(ptdev)) {
+ drm_err(&ptdev->base, "RESET timed out (0x%x)", reset_cmd);
+ return -ETIMEDOUT;
+ }
+
+ ptdev->pwr->pending_reqs &= ~PWR_IRQ_RESET_COMPLETED;
+ }
+
+ return 0;
+}
+
+static const char *get_domain_name(u8 domain)
+{
+ switch (domain) {
+ case PWR_COMMAND_DOMAIN_L2:
+ return "L2";
+ case PWR_COMMAND_DOMAIN_TILER:
+ return "Tiler";
+ case PWR_COMMAND_DOMAIN_SHADER:
+ return "Shader";
+ case PWR_COMMAND_DOMAIN_BASE:
+ return "Base";
+ case PWR_COMMAND_DOMAIN_STACK:
+ return "Stack";
+ }
+ return "Unknown";
+}
+
+static u32 get_domain_base(u8 domain)
+{
+ switch (domain) {
+ case PWR_COMMAND_DOMAIN_L2:
+ return PWR_L2_PRESENT;
+ case PWR_COMMAND_DOMAIN_TILER:
+ return PWR_TILER_PRESENT;
+ case PWR_COMMAND_DOMAIN_SHADER:
+ return PWR_SHADER_PRESENT;
+ case PWR_COMMAND_DOMAIN_BASE:
+ return PWR_BASE_PRESENT;
+ case PWR_COMMAND_DOMAIN_STACK:
+ return PWR_STACK_PRESENT;
+ }
+ return 0;
+}
+
+static u32 get_domain_ready_reg(u32 domain)
+{
+ return get_domain_base(domain) + (PWR_L2_READY - PWR_L2_PRESENT);
+}
+
+static u32 get_domain_pwrtrans_reg(u32 domain)
+{
+ return get_domain_base(domain) + (PWR_L2_PWRTRANS - PWR_L2_PRESENT);
+}
+
+static bool is_valid_domain(u32 domain)
+{
+ return get_domain_base(domain) != 0;
+}
+
+static bool has_rtu(struct panthor_device *ptdev)
+{
+ return ptdev->gpu_info.gpu_features & GPU_FEATURES_RAY_TRAVERSAL;
+}
+
+static u8 get_domain_subdomain(struct panthor_device *ptdev, u32 domain)
+{
+ if (domain == PWR_COMMAND_DOMAIN_SHADER && has_rtu(ptdev))
+ return PWR_COMMAND_SUBDOMAIN_RTU;
+
+ return 0;
+}
+
+static int panthor_pwr_domain_wait_transition(struct panthor_device *ptdev, u32 domain,
+ u32 timeout_us)
+{
+ u32 pwrtrans_reg = get_domain_pwrtrans_reg(domain);
+ u64 val;
+ int ret = 0;
+
+ ret = gpu_read64_poll_timeout(ptdev, pwrtrans_reg, val, !(PWR_ALL_CORES_MASK & val), 100,
+ timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "%s domain power in transition, pwrtrans(0x%llx)",
+ get_domain_name(domain), val);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void panthor_pwr_debug_info_show(struct panthor_device *ptdev)
+{
+ drm_info(&ptdev->base, "GPU_FEATURES: 0x%016llx", gpu_read64(ptdev, GPU_FEATURES));
+ drm_info(&ptdev->base, "PWR_STATUS: 0x%016llx", gpu_read64(ptdev, PWR_STATUS));
+ drm_info(&ptdev->base, "L2_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_L2_PRESENT));
+ drm_info(&ptdev->base, "L2_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_L2_PWRTRANS));
+ drm_info(&ptdev->base, "L2_READY: 0x%016llx", gpu_read64(ptdev, PWR_L2_READY));
+ drm_info(&ptdev->base, "TILER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PRESENT));
+ drm_info(&ptdev->base, "TILER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_TILER_PWRTRANS));
+ drm_info(&ptdev->base, "TILER_READY: 0x%016llx", gpu_read64(ptdev, PWR_TILER_READY));
+ drm_info(&ptdev->base, "SHADER_PRESENT: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PRESENT));
+ drm_info(&ptdev->base, "SHADER_PWRTRANS: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_PWRTRANS));
+ drm_info(&ptdev->base, "SHADER_READY: 0x%016llx", gpu_read64(ptdev, PWR_SHADER_READY));
+}
+
+static int panthor_pwr_domain_transition(struct panthor_device *ptdev, u32 cmd, u32 domain,
+ u64 mask, u32 timeout_us)
+{
+ u32 ready_reg = get_domain_ready_reg(domain);
+ u32 pwr_cmd = PWR_COMMAND_DEF(cmd, domain, get_domain_subdomain(ptdev, domain));
+ u64 expected_val = 0;
+ u64 val;
+ int ret = 0;
+
+ if (drm_WARN_ON(&ptdev->base, !is_valid_domain(domain)))
+ return -EINVAL;
+
+ switch (cmd) {
+ case PWR_COMMAND_POWER_DOWN:
+ expected_val = 0;
+ break;
+ case PWR_COMMAND_POWER_UP:
+ expected_val = mask;
+ break;
+ default:
+ drm_err(&ptdev->base, "Invalid power domain transition command (0x%x)", cmd);
+ return -EINVAL;
+ }
+
+ ret = panthor_pwr_domain_wait_transition(ptdev, domain, timeout_us);
+ if (ret)
+ return ret;
+
+ /* domain already in target state, return early */
+ if ((gpu_read64(ptdev, ready_reg) & mask) == expected_val)
+ return 0;
+
+ panthor_pwr_write_command(ptdev, pwr_cmd, mask);
+
+ ret = gpu_read64_poll_timeout(ptdev, ready_reg, val, (mask & val) == expected_val, 100,
+ timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s power domain transition, cmd(0x%x), arg(0x%llx)",
+ get_domain_name(domain), pwr_cmd, mask);
+ panthor_pwr_debug_info_show(ptdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define panthor_pwr_domain_power_off(__ptdev, __domain, __mask, __timeout_us) \
+ panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_DOWN, __domain, __mask, \
+ __timeout_us)
+
+#define panthor_pwr_domain_power_on(__ptdev, __domain, __mask, __timeout_us) \
+ panthor_pwr_domain_transition(__ptdev, PWR_COMMAND_POWER_UP, __domain, __mask, __timeout_us)
+
+/**
+ * retract_domain() - Retract control of a domain from MCU
+ * @ptdev: Device.
+ * @domain: Domain to retract the control
+ *
+ * Retracting L2 domain is not expected since it won't be delegated.
+ *
+ * Return: 0 on success or retracted already.
+ * -EPERM if domain is L2.
+ * A negative error code otherwise.
+ */
+static int retract_domain(struct panthor_device *ptdev, u32 domain)
+{
+ const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_RETRACT, domain, 0);
+ const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
+ const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain);
+ const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain);
+ u64 val;
+ int ret;
+
+ if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2))
+ return -EPERM;
+
+ ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val, !(PWR_STATUS_RETRACT_PENDING & val),
+ 0, PWR_RETRACT_TIMEOUT_US);
+ if (ret) {
+ drm_err(&ptdev->base, "%s domain retract pending", get_domain_name(domain));
+ return ret;
+ }
+
+ if (!(pwr_status & delegated_mask)) {
+ drm_dbg(&ptdev->base, "%s domain already retracted", get_domain_name(domain));
+ return 0;
+ }
+
+ panthor_pwr_write_command(ptdev, pwr_cmd, 0);
+
+ /*
+ * On successful retraction
+ * allow-flag will be set with delegated-flag being cleared.
+ */
+ ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val,
+ ((delegated_mask | allow_mask) & val) == allow_mask, 10,
+ PWR_TRANSITION_TIMEOUT_US);
+ if (ret) {
+ drm_err(&ptdev->base, "Retracting %s domain timeout, cmd(0x%x)",
+ get_domain_name(domain), pwr_cmd);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * delegate_domain() - Delegate control of a domain to MCU
+ * @ptdev: Device.
+ * @domain: Domain to delegate the control
+ *
+ * Delegating L2 domain is prohibited.
+ *
+ * Return:
+ * * 0 on success or delegated already.
+ * * -EPERM if domain is L2.
+ * * A negative error code otherwise.
+ */
+static int delegate_domain(struct panthor_device *ptdev, u32 domain)
+{
+ const u32 pwr_cmd = PWR_COMMAND_DEF(PWR_COMMAND_DELEGATE, domain, 0);
+ const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
+ const u64 allow_mask = PWR_STATUS_DOMAIN_ALLOWED(domain);
+ const u64 delegated_mask = PWR_STATUS_DOMAIN_DELEGATED(domain);
+ u64 val;
+ int ret;
+
+ if (drm_WARN_ON(&ptdev->base, domain == PWR_COMMAND_DOMAIN_L2))
+ return -EPERM;
+
+ /* Already delegated, exit early */
+ if (pwr_status & delegated_mask)
+ return 0;
+
+ /* Check if the command is allowed before delegating. */
+ if (!(pwr_status & allow_mask)) {
+ drm_warn(&ptdev->base, "Delegating %s domain not allowed", get_domain_name(domain));
+ return -EPERM;
+ }
+
+ ret = panthor_pwr_domain_wait_transition(ptdev, domain, PWR_TRANSITION_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ panthor_pwr_write_command(ptdev, pwr_cmd, 0);
+
+ /*
+ * On successful delegation
+ * allow-flag will be cleared with delegated-flag being set.
+ */
+ ret = gpu_read64_poll_timeout(ptdev, PWR_STATUS, val,
+ ((delegated_mask | allow_mask) & val) == delegated_mask,
+ 10, PWR_TRANSITION_TIMEOUT_US);
+ if (ret) {
+ drm_err(&ptdev->base, "Delegating %s domain timeout, cmd(0x%x)",
+ get_domain_name(domain), pwr_cmd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int panthor_pwr_delegate_domains(struct panthor_device *ptdev)
+{
+ int ret;
+
+ if (!ptdev->pwr)
+ return 0;
+
+ ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER);
+ if (ret)
+ return ret;
+
+ ret = delegate_domain(ptdev, PWR_COMMAND_DOMAIN_TILER);
+ if (ret)
+ goto err_retract_shader;
+
+ return 0;
+
+err_retract_shader:
+ retract_domain(ptdev, PWR_COMMAND_DOMAIN_SHADER);
+
+ return ret;
+}
+
+/**
+ * panthor_pwr_domain_force_off - Forcefully power down a domain.
+ * @ptdev: Device.
+ * @domain: Domain to forcefully power down.
+ *
+ * This function will attempt to retract and power off the requested power
+ * domain. However, if retraction fails, the operation is aborted. If power off
+ * fails, the domain will remain retracted and under the host control.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static int panthor_pwr_domain_force_off(struct panthor_device *ptdev, u32 domain)
+{
+ const u64 domain_ready = gpu_read64(ptdev, get_domain_ready_reg(domain));
+ int ret;
+
+ /* Domain already powered down, early exit. */
+ if (!domain_ready)
+ return 0;
+
+ /* Domain has to be in host control to issue power off command. */
+ ret = retract_domain(ptdev, domain);
+ if (ret)
+ return ret;
+
+ return panthor_pwr_domain_power_off(ptdev, domain, domain_ready, PWR_TRANSITION_TIMEOUT_US);
+}
+
+void panthor_pwr_unplug(struct panthor_device *ptdev)
+{
+ unsigned long flags;
+
+ if (!ptdev->pwr)
+ return;
+
+ /* Make sure the IRQ handler is not running after that point. */
+ panthor_pwr_irq_suspend(&ptdev->pwr->irq);
+
+ /* Wake-up all waiters. */
+ spin_lock_irqsave(&ptdev->pwr->reqs_lock, flags);
+ ptdev->pwr->pending_reqs = 0;
+ wake_up_all(&ptdev->pwr->reqs_acked);
+ spin_unlock_irqrestore(&ptdev->pwr->reqs_lock, flags);
+}
+
+int panthor_pwr_init(struct panthor_device *ptdev)
+{
+ struct panthor_pwr *pwr;
+ int err, irq;
+
+ if (!panthor_hw_has_pwr_ctrl(ptdev))
+ return 0;
+
+ pwr = drmm_kzalloc(&ptdev->base, sizeof(*pwr), GFP_KERNEL);
+ if (!pwr)
+ return -ENOMEM;
+
+ spin_lock_init(&pwr->reqs_lock);
+ init_waitqueue_head(&pwr->reqs_acked);
+ ptdev->pwr = pwr;
+
+ irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "gpu");
+ if (irq < 0)
+ return irq;
+
+ err = panthor_request_pwr_irq(ptdev, &pwr->irq, irq, PWR_INTERRUPTS_MASK);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int panthor_pwr_reset_soft(struct panthor_device *ptdev)
+{
+ if (!(gpu_read64(ptdev, PWR_STATUS) & PWR_STATUS_ALLOW_SOFT_RESET)) {
+ drm_err(&ptdev->base, "RESET_SOFT not allowed");
+ return -EOPNOTSUPP;
+ }
+
+ return panthor_pwr_reset(ptdev, PWR_COMMAND_RESET_SOFT);
+}
+
+void panthor_pwr_l2_power_off(struct panthor_device *ptdev)
+{
+ const u64 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2);
+ const u64 pwr_status = gpu_read64(ptdev, PWR_STATUS);
+
+ /* Abort if L2 power off constraints are not satisfied */
+ if (!(pwr_status & l2_allow_mask)) {
+ drm_warn(&ptdev->base, "Power off L2 domain not allowed");
+ return;
+ }
+
+ /* It is expected that when halting the MCU, it would power down its
+ * delegated domains. However, an unresponsive or hung MCU may not do
+ * so, which is why we need to check and retract the domains back into
+ * host control to be powered down in the right order before powering
+ * down the L2.
+ */
+ if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_TILER))
+ return;
+
+ if (panthor_pwr_domain_force_off(ptdev, PWR_COMMAND_DOMAIN_SHADER))
+ return;
+
+ panthor_pwr_domain_power_off(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present,
+ PWR_TRANSITION_TIMEOUT_US);
+}
+
+int panthor_pwr_l2_power_on(struct panthor_device *ptdev)
+{
+ const u32 pwr_status = gpu_read64(ptdev, PWR_STATUS);
+ const u32 l2_allow_mask = PWR_STATUS_DOMAIN_ALLOWED(PWR_COMMAND_DOMAIN_L2);
+ int ret;
+
+ if ((pwr_status & l2_allow_mask) == 0) {
+ drm_warn(&ptdev->base, "Power on L2 domain not allowed");
+ return -EPERM;
+ }
+
+ ret = panthor_pwr_domain_power_on(ptdev, PWR_COMMAND_DOMAIN_L2, ptdev->gpu_info.l2_present,
+ PWR_TRANSITION_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ /* Delegate control of the shader and tiler power domains to the MCU as
+ * it can better manage which shader/tiler cores need to be powered up
+ * or can be powered down based on currently running jobs.
+ *
+ * If the shader and tiler domains are already delegated to the MCU,
+ * this call would just return early.
+ */
+ return panthor_pwr_delegate_domains(ptdev);
+}
+
+void panthor_pwr_suspend(struct panthor_device *ptdev)
+{
+ if (!ptdev->pwr)
+ return;
+
+ panthor_pwr_irq_suspend(&ptdev->pwr->irq);
+}
+
+void panthor_pwr_resume(struct panthor_device *ptdev)
+{
+ if (!ptdev->pwr)
+ return;
+
+ panthor_pwr_irq_resume(&ptdev->pwr->irq, PWR_INTERRUPTS_MASK);
+}
diff --git a/drivers/gpu/drm/panthor/panthor_pwr.h b/drivers/gpu/drm/panthor/panthor_pwr.h
new file mode 100644
index 000000000000..adf1f6136abc
--- /dev/null
+++ b/drivers/gpu/drm/panthor/panthor_pwr.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 or MIT */
+/* Copyright 2025 ARM Limited. All rights reserved. */
+
+#ifndef __PANTHOR_PWR_H__
+#define __PANTHOR_PWR_H__
+
+struct panthor_device;
+
+void panthor_pwr_unplug(struct panthor_device *ptdev);
+
+int panthor_pwr_init(struct panthor_device *ptdev);
+
+int panthor_pwr_reset_soft(struct panthor_device *ptdev);
+
+void panthor_pwr_l2_power_off(struct panthor_device *ptdev);
+
+int panthor_pwr_l2_power_on(struct panthor_device *ptdev);
+
+void panthor_pwr_suspend(struct panthor_device *ptdev);
+
+void panthor_pwr_resume(struct panthor_device *ptdev);
+
+#endif /* __PANTHOR_PWR_H__ */
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index 48bbfd40138c..08bf06c452d6 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -64,12 +64,18 @@
#define GPU_FAULT_STATUS 0x3C
#define GPU_FAULT_ADDR 0x40
+#define GPU_L2_CONFIG 0x48
+#define GPU_L2_CONFIG_ASN_HASH_ENABLE BIT(24)
#define GPU_PWR_KEY 0x50
#define GPU_PWR_KEY_UNLOCK 0x2968A819
#define GPU_PWR_OVERRIDE0 0x54
#define GPU_PWR_OVERRIDE1 0x58
+#define GPU_FEATURES 0x60
+#define GPU_FEATURES_RAY_INTERSECTION BIT(2)
+#define GPU_FEATURES_RAY_TRAVERSAL BIT(5)
+
#define GPU_TIMESTAMP_OFFSET 0x88
#define GPU_CYCLE_COUNT 0x90
#define GPU_TIMESTAMP 0x98
@@ -107,6 +113,8 @@
#define GPU_REVID 0x280
+#define GPU_ASN_HASH(n) (0x2C0 + ((n) * 4))
+
#define GPU_COHERENCY_FEATURES 0x300
#define GPU_COHERENCY_PROT_BIT(name) BIT(GPU_COHERENCY_ ## name)
@@ -202,4 +210,82 @@
#define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000))
#define CSF_GLB_DOORBELL_ID 0
+/* PWR Control registers */
+
+#define PWR_CONTROL_BASE 0x800
+#define PWR_CTRL_REG(x) (PWR_CONTROL_BASE + (x))
+
+#define PWR_INT_RAWSTAT PWR_CTRL_REG(0x0)
+#define PWR_INT_CLEAR PWR_CTRL_REG(0x4)
+#define PWR_INT_MASK PWR_CTRL_REG(0x8)
+#define PWR_INT_STAT PWR_CTRL_REG(0xc)
+#define PWR_IRQ_POWER_CHANGED_SINGLE BIT(0)
+#define PWR_IRQ_POWER_CHANGED_ALL BIT(1)
+#define PWR_IRQ_DELEGATION_CHANGED BIT(2)
+#define PWR_IRQ_RESET_COMPLETED BIT(3)
+#define PWR_IRQ_RETRACT_COMPLETED BIT(4)
+#define PWR_IRQ_INSPECT_COMPLETED BIT(5)
+#define PWR_IRQ_COMMAND_NOT_ALLOWED BIT(30)
+#define PWR_IRQ_COMMAND_INVALID BIT(31)
+
+#define PWR_STATUS PWR_CTRL_REG(0x20)
+#define PWR_STATUS_ALLOW_L2 BIT_U64(0)
+#define PWR_STATUS_ALLOW_TILER BIT_U64(1)
+#define PWR_STATUS_ALLOW_SHADER BIT_U64(8)
+#define PWR_STATUS_ALLOW_BASE BIT_U64(14)
+#define PWR_STATUS_ALLOW_STACK BIT_U64(15)
+#define PWR_STATUS_DOMAIN_ALLOWED(x) BIT_U64(x)
+#define PWR_STATUS_DELEGATED_L2 BIT_U64(16)
+#define PWR_STATUS_DELEGATED_TILER BIT_U64(17)
+#define PWR_STATUS_DELEGATED_SHADER BIT_U64(24)
+#define PWR_STATUS_DELEGATED_BASE BIT_U64(30)
+#define PWR_STATUS_DELEGATED_STACK BIT_U64(31)
+#define PWR_STATUS_DELEGATED_SHIFT 16
+#define PWR_STATUS_DOMAIN_DELEGATED(x) BIT_U64((x) + PWR_STATUS_DELEGATED_SHIFT)
+#define PWR_STATUS_ALLOW_SOFT_RESET BIT_U64(33)
+#define PWR_STATUS_ALLOW_FAST_RESET BIT_U64(34)
+#define PWR_STATUS_POWER_PENDING BIT_U64(41)
+#define PWR_STATUS_RESET_PENDING BIT_U64(42)
+#define PWR_STATUS_RETRACT_PENDING BIT_U64(43)
+#define PWR_STATUS_INSPECT_PENDING BIT_U64(44)
+
+#define PWR_COMMAND PWR_CTRL_REG(0x28)
+#define PWR_COMMAND_POWER_UP 0x10
+#define PWR_COMMAND_POWER_DOWN 0x11
+#define PWR_COMMAND_DELEGATE 0x20
+#define PWR_COMMAND_RETRACT 0x21
+#define PWR_COMMAND_RESET_SOFT 0x31
+#define PWR_COMMAND_RESET_FAST 0x32
+#define PWR_COMMAND_INSPECT 0xF0
+#define PWR_COMMAND_DOMAIN_L2 0
+#define PWR_COMMAND_DOMAIN_TILER 1
+#define PWR_COMMAND_DOMAIN_SHADER 8
+#define PWR_COMMAND_DOMAIN_BASE 14
+#define PWR_COMMAND_DOMAIN_STACK 15
+#define PWR_COMMAND_SUBDOMAIN_RTU BIT(0)
+#define PWR_COMMAND_DEF(cmd, domain, subdomain) \
+ (((subdomain) << 16) | ((domain) << 8) | (cmd))
+
+#define PWR_CMDARG PWR_CTRL_REG(0x30)
+
+#define PWR_L2_PRESENT PWR_CTRL_REG(0x100)
+#define PWR_L2_READY PWR_CTRL_REG(0x108)
+#define PWR_L2_PWRTRANS PWR_CTRL_REG(0x110)
+#define PWR_L2_PWRACTIVE PWR_CTRL_REG(0x118)
+#define PWR_TILER_PRESENT PWR_CTRL_REG(0x140)
+#define PWR_TILER_READY PWR_CTRL_REG(0x148)
+#define PWR_TILER_PWRTRANS PWR_CTRL_REG(0x150)
+#define PWR_TILER_PWRACTIVE PWR_CTRL_REG(0x158)
+#define PWR_SHADER_PRESENT PWR_CTRL_REG(0x200)
+#define PWR_SHADER_READY PWR_CTRL_REG(0x208)
+#define PWR_SHADER_PWRTRANS PWR_CTRL_REG(0x210)
+#define PWR_SHADER_PWRACTIVE PWR_CTRL_REG(0x218)
+#define PWR_BASE_PRESENT PWR_CTRL_REG(0x380)
+#define PWR_BASE_READY PWR_CTRL_REG(0x388)
+#define PWR_BASE_PWRTRANS PWR_CTRL_REG(0x390)
+#define PWR_BASE_PWRACTIVE PWR_CTRL_REG(0x398)
+#define PWR_STACK_PRESENT PWR_CTRL_REG(0x3c0)
+#define PWR_STACK_READY PWR_CTRL_REG(0x3c8)
+#define PWR_STACK_PWRTRANS PWR_CTRL_REG(0x3d0)
+
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 8f17394cc82a..b834123a6560 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -5,6 +5,7 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
#include <drm/panthor_drm.h>
@@ -360,17 +361,23 @@ struct panthor_queue {
/** @entity: DRM scheduling entity used for this queue. */
struct drm_sched_entity entity;
- /**
- * @remaining_time: Time remaining before the job timeout expires.
- *
- * The job timeout is suspended when the queue is not scheduled by the
- * FW. Every time we suspend the timer, we need to save the remaining
- * time so we can restore it later on.
- */
- unsigned long remaining_time;
+ /** @name: DRM scheduler name for this queue. */
+ char *name;
- /** @timeout_suspended: True if the job timeout was suspended. */
- bool timeout_suspended;
+ /** @timeout: Queue timeout related fields. */
+ struct {
+ /** @timeout.work: Work executed when a queue timeout occurs. */
+ struct delayed_work work;
+
+ /**
+ * @timeout.remaining: Time remaining before a queue timeout.
+ *
+ * When the timer is running, this value is set to MAX_SCHEDULE_TIMEOUT.
+ * When the timer is suspended, it's set to the time remaining when the
+ * timer was suspended.
+ */
+ unsigned long remaining;
+ } timeout;
/**
* @doorbell_id: Doorbell assigned to this queue.
@@ -641,6 +648,15 @@ struct panthor_group {
size_t kbo_sizes;
} fdinfo;
+ /** @task_info: Info of current->group_leader that created the group. */
+ struct {
+ /** @task_info.pid: pid of current->group_leader */
+ pid_t pid;
+
+ /** @task_info.comm: comm of current->group_leader */
+ char comm[TASK_COMM_LEN];
+ } task_info;
+
/** @state: Group state. */
enum panthor_group_state state;
@@ -886,12 +902,18 @@ static void group_free_queue(struct panthor_group *group, struct panthor_queue *
if (IS_ERR_OR_NULL(queue))
return;
+ /* This should have been disabled before that point. */
+ drm_WARN_ON(&group->ptdev->base,
+ disable_delayed_work_sync(&queue->timeout.work));
+
if (queue->entity.fence_context)
drm_sched_entity_destroy(&queue->entity);
if (queue->scheduler.ops)
drm_sched_fini(&queue->scheduler);
+ kfree(queue->name);
+
panthor_queue_put_syncwait_obj(queue);
panthor_kernel_bo_destroy(queue->ringbuf);
@@ -1031,6 +1053,115 @@ group_unbind_locked(struct panthor_group *group)
return 0;
}
+static bool
+group_is_idle(struct panthor_group *group)
+{
+ struct panthor_device *ptdev = group->ptdev;
+ u32 inactive_queues;
+
+ if (group->csg_id >= 0)
+ return ptdev->scheduler->csg_slots[group->csg_id].idle;
+
+ inactive_queues = group->idle_queues | group->blocked_queues;
+ return hweight32(inactive_queues) == group->queue_count;
+}
+
+static void
+queue_reset_timeout_locked(struct panthor_queue *queue)
+{
+ lockdep_assert_held(&queue->fence_ctx.lock);
+
+ if (queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT) {
+ mod_delayed_work(queue->scheduler.timeout_wq,
+ &queue->timeout.work,
+ msecs_to_jiffies(JOB_TIMEOUT_MS));
+ }
+}
+
+static bool
+group_can_run(struct panthor_group *group)
+{
+ return group->state != PANTHOR_CS_GROUP_TERMINATED &&
+ group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
+ !group->destroyed && group->fatal_queues == 0 &&
+ !group->timedout;
+}
+
+static bool
+queue_timeout_is_suspended(struct panthor_queue *queue)
+{
+ /* When running, the remaining time is set to MAX_SCHEDULE_TIMEOUT. */
+ return queue->timeout.remaining != MAX_SCHEDULE_TIMEOUT;
+}
+
+static void
+queue_suspend_timeout_locked(struct panthor_queue *queue)
+{
+ unsigned long qtimeout, now;
+ struct panthor_group *group;
+ struct panthor_job *job;
+ bool timer_was_active;
+
+ lockdep_assert_held(&queue->fence_ctx.lock);
+
+ /* Already suspended, nothing to do. */
+ if (queue_timeout_is_suspended(queue))
+ return;
+
+ job = list_first_entry_or_null(&queue->fence_ctx.in_flight_jobs,
+ struct panthor_job, node);
+ group = job ? job->group : NULL;
+
+ /* If the queue is blocked and the group is idle, we want the timer to
+ * keep running because the group can't be unblocked by other queues,
+ * so it has to come from an external source, and we want to timebox
+ * this external signalling.
+ */
+ if (group && group_can_run(group) &&
+ (group->blocked_queues & BIT(job->queue_idx)) &&
+ group_is_idle(group))
+ return;
+
+ now = jiffies;
+ qtimeout = queue->timeout.work.timer.expires;
+
+ /* Cancel the timer. */
+ timer_was_active = cancel_delayed_work(&queue->timeout.work);
+ if (!timer_was_active || !job)
+ queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
+ else if (time_after(qtimeout, now))
+ queue->timeout.remaining = qtimeout - now;
+ else
+ queue->timeout.remaining = 0;
+
+ if (WARN_ON_ONCE(queue->timeout.remaining > msecs_to_jiffies(JOB_TIMEOUT_MS)))
+ queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
+}
+
+static void
+queue_suspend_timeout(struct panthor_queue *queue)
+{
+ spin_lock(&queue->fence_ctx.lock);
+ queue_suspend_timeout_locked(queue);
+ spin_unlock(&queue->fence_ctx.lock);
+}
+
+static void
+queue_resume_timeout(struct panthor_queue *queue)
+{
+ spin_lock(&queue->fence_ctx.lock);
+
+ if (queue_timeout_is_suspended(queue)) {
+ mod_delayed_work(queue->scheduler.timeout_wq,
+ &queue->timeout.work,
+ queue->timeout.remaining);
+
+ queue->timeout.remaining = MAX_SCHEDULE_TIMEOUT;
+ }
+
+ spin_unlock(&queue->fence_ctx.lock);
+}
+
/**
* cs_slot_prog_locked() - Program a queue slot
* @ptdev: Device.
@@ -1069,10 +1200,8 @@ cs_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
CS_IDLE_EMPTY |
CS_STATE_MASK |
CS_EXTRACT_EVENT);
- if (queue->iface.input->insert != queue->iface.input->extract && queue->timeout_suspended) {
- drm_sched_resume_timeout(&queue->scheduler, queue->remaining_time);
- queue->timeout_suspended = false;
- }
+ if (queue->iface.input->insert != queue->iface.input->extract)
+ queue_resume_timeout(queue);
}
/**
@@ -1099,14 +1228,7 @@ cs_slot_reset_locked(struct panthor_device *ptdev, u32 csg_id, u32 cs_id)
CS_STATE_STOP,
CS_STATE_MASK);
- /* If the queue is blocked, we want to keep the timeout running, so
- * we can detect unbounded waits and kill the group when that happens.
- */
- if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) {
- queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
- queue->timeout_suspended = true;
- WARN_ON(queue->remaining_time > msecs_to_jiffies(JOB_TIMEOUT_MS));
- }
+ queue_suspend_timeout(queue);
return 0;
}
@@ -1125,11 +1247,13 @@ csg_slot_sync_priority_locked(struct panthor_device *ptdev, u32 csg_id)
{
struct panthor_csg_slot *csg_slot = &ptdev->scheduler->csg_slots[csg_id];
struct panthor_fw_csg_iface *csg_iface;
+ u64 endpoint_req;
lockdep_assert_held(&ptdev->scheduler->lock);
csg_iface = panthor_fw_get_csg_iface(ptdev, csg_id);
- csg_slot->priority = (csg_iface->input->endpoint_req & CSG_EP_REQ_PRIORITY_MASK) >> 28;
+ endpoint_req = panthor_fw_csg_endpoint_req_get(ptdev, csg_iface);
+ csg_slot->priority = CSG_EP_REQ_PRIORITY_GET(endpoint_req);
}
/**
@@ -1289,6 +1413,7 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
struct panthor_csg_slot *csg_slot;
struct panthor_group *group;
u32 queue_mask = 0, i;
+ u64 endpoint_req;
lockdep_assert_held(&ptdev->scheduler->lock);
@@ -1315,10 +1440,12 @@ csg_slot_prog_locked(struct panthor_device *ptdev, u32 csg_id, u32 priority)
csg_iface->input->allow_compute = group->compute_core_mask;
csg_iface->input->allow_fragment = group->fragment_core_mask;
csg_iface->input->allow_other = group->tiler_core_mask;
- csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
- CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
- CSG_EP_REQ_TILER(group->max_tiler_cores) |
- CSG_EP_REQ_PRIORITY(priority);
+ endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) |
+ CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) |
+ CSG_EP_REQ_TILER(group->max_tiler_cores) |
+ CSG_EP_REQ_PRIORITY(priority);
+ panthor_fw_csg_endpoint_req_set(ptdev, csg_iface, endpoint_req);
+
csg_iface->input->config = panthor_vm_as(group->vm);
if (group->suspend_buf)
@@ -1355,8 +1482,12 @@ cs_slot_process_fatal_event_locked(struct panthor_device *ptdev,
fatal = cs_iface->output->fatal;
info = cs_iface->output->fatal_info;
- if (group)
+ if (group) {
+ drm_warn(&ptdev->base, "CS_FATAL: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+
group->fatal_queues |= BIT(cs_id);
+ }
if (CS_EXCEPTION_TYPE(fatal) == DRM_PANTHOR_EXCEPTION_CS_UNRECOVERABLE) {
/* If this exception is unrecoverable, queue a reset, and make
@@ -1399,7 +1530,7 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
fault = cs_iface->output->fault;
info = cs_iface->output->fault_info;
- if (queue && CS_EXCEPTION_TYPE(fault) == DRM_PANTHOR_EXCEPTION_CS_INHERIT_FAULT) {
+ if (queue) {
u64 cs_extract = queue->iface.output->extract;
struct panthor_job *job;
@@ -1416,6 +1547,11 @@ cs_slot_process_fault_event_locked(struct panthor_device *ptdev,
spin_unlock(&queue->fence_ctx.lock);
}
+ if (group) {
+ drm_warn(&ptdev->base, "CS_FAULT: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+ }
+
drm_warn(&ptdev->base,
"CSG slot %d CS slot: %d\n"
"CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n"
@@ -1632,11 +1768,15 @@ csg_slot_process_progress_timer_event_locked(struct panthor_device *ptdev, u32 c
lockdep_assert_held(&sched->lock);
- drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
-
group = csg_slot->group;
- if (!drm_WARN_ON(&ptdev->base, !group))
+ if (!drm_WARN_ON(&ptdev->base, !group)) {
+ drm_warn(&ptdev->base, "CSG_PROGRESS_TIMER_EVENT: pid=%d, comm=%s\n",
+ group->task_info.pid, group->task_info.comm);
+
group->timedout = true;
+ }
+
+ drm_warn(&ptdev->base, "CSG slot %d progress timeout\n", csg_id);
sched_queue_delayed_work(sched, tick, 0);
}
@@ -1888,28 +2028,6 @@ tick_ctx_is_full(const struct panthor_scheduler *sched,
return ctx->group_count == sched->csg_slot_count;
}
-static bool
-group_is_idle(struct panthor_group *group)
-{
- struct panthor_device *ptdev = group->ptdev;
- u32 inactive_queues;
-
- if (group->csg_id >= 0)
- return ptdev->scheduler->csg_slots[group->csg_id].idle;
-
- inactive_queues = group->idle_queues | group->blocked_queues;
- return hweight32(inactive_queues) == group->queue_count;
-}
-
-static bool
-group_can_run(struct panthor_group *group)
-{
- return group->state != PANTHOR_CS_GROUP_TERMINATED &&
- group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE &&
- !group->destroyed && group->fatal_queues == 0 &&
- !group->timedout;
-}
-
static void
tick_ctx_pick_groups_from_list(const struct panthor_scheduler *sched,
struct panthor_sched_tick_ctx *ctx,
@@ -2203,9 +2321,9 @@ tick_ctx_apply(struct panthor_scheduler *sched, struct panthor_sched_tick_ctx *c
continue;
}
- panthor_fw_update_reqs(csg_iface, endpoint_req,
- CSG_EP_REQ_PRIORITY(new_csg_prio),
- CSG_EP_REQ_PRIORITY_MASK);
+ panthor_fw_csg_endpoint_req_update(ptdev, csg_iface,
+ CSG_EP_REQ_PRIORITY(new_csg_prio),
+ CSG_EP_REQ_PRIORITY_MASK);
csgs_upd_ctx_queue_reqs(ptdev, &upd_ctx, csg_id,
csg_iface->output->ack ^ CSG_ENDPOINT_CONFIG,
CSG_ENDPOINT_CONFIG);
@@ -2591,6 +2709,7 @@ static void group_schedule_locked(struct panthor_group *group, u32 queue_mask)
static void queue_stop(struct panthor_queue *queue,
struct panthor_job *bad_job)
{
+ disable_delayed_work_sync(&queue->timeout.work);
drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL);
}
@@ -2602,6 +2721,7 @@ static void queue_start(struct panthor_queue *queue)
list_for_each_entry(job, &queue->scheduler.pending_list, base.list)
job->base.s_fence->parent = dma_fence_get(job->done_fence);
+ enable_delayed_work(&queue->timeout.work);
drm_sched_start(&queue->scheduler, 0);
}
@@ -2668,7 +2788,6 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
{
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_csg_slots_upd_ctx upd_ctx;
- struct panthor_group *group;
u32 suspended_slots;
u32 i;
@@ -2722,13 +2841,23 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
while (slot_mask) {
u32 csg_id = ffs(slot_mask) - 1;
struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id];
+ struct panthor_group *group = csg_slot->group;
/* Terminate command timedout, but the soft-reset will
* automatically terminate all active groups, so let's
* force the state to halted here.
*/
- if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED)
- csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED;
+ if (group->state != PANTHOR_CS_GROUP_TERMINATED) {
+ group->state = PANTHOR_CS_GROUP_TERMINATED;
+
+ /* Reset the queue slots manually if the termination
+ * request failed.
+ */
+ for (i = 0; i < group->queue_count; i++) {
+ if (group->queues[i])
+ cs_slot_reset_locked(ptdev, csg_id, i);
+ }
+ }
slot_mask &= ~BIT(csg_id);
}
}
@@ -2758,8 +2887,8 @@ void panthor_sched_suspend(struct panthor_device *ptdev)
for (i = 0; i < sched->csg_slot_count; i++) {
struct panthor_csg_slot *csg_slot = &sched->csg_slots[i];
+ struct panthor_group *group = csg_slot->group;
- group = csg_slot->group;
if (!group)
continue;
@@ -2888,35 +3017,47 @@ void panthor_fdinfo_gather_group_samples(struct panthor_file *pfile)
xa_unlock(&gpool->xa);
}
-static void group_sync_upd_work(struct work_struct *work)
+static bool queue_check_job_completion(struct panthor_queue *queue)
{
- struct panthor_group *group =
- container_of(work, struct panthor_group, sync_upd_work);
+ struct panthor_syncobj_64b *syncobj = NULL;
struct panthor_job *job, *job_tmp;
+ bool cookie, progress = false;
LIST_HEAD(done_jobs);
- u32 queue_idx;
- bool cookie;
cookie = dma_fence_begin_signalling();
- for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
- struct panthor_queue *queue = group->queues[queue_idx];
- struct panthor_syncobj_64b *syncobj;
+ spin_lock(&queue->fence_ctx.lock);
+ list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
+ if (!syncobj) {
+ struct panthor_group *group = job->group;
- if (!queue)
- continue;
+ syncobj = group->syncobjs->kmap +
+ (job->queue_idx * sizeof(*syncobj));
+ }
- syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj));
+ if (syncobj->seqno < job->done_fence->seqno)
+ break;
- spin_lock(&queue->fence_ctx.lock);
- list_for_each_entry_safe(job, job_tmp, &queue->fence_ctx.in_flight_jobs, node) {
- if (syncobj->seqno < job->done_fence->seqno)
- break;
+ list_move_tail(&job->node, &done_jobs);
+ dma_fence_signal_locked(job->done_fence);
+ }
- list_move_tail(&job->node, &done_jobs);
- dma_fence_signal_locked(job->done_fence);
- }
- spin_unlock(&queue->fence_ctx.lock);
+ if (list_empty(&queue->fence_ctx.in_flight_jobs)) {
+ /* If we have no job left, we cancel the timer, and reset remaining
+ * time to its default so it can be restarted next time
+ * queue_resume_timeout() is called.
+ */
+ queue_suspend_timeout_locked(queue);
+
+ /* If there's no job pending, we consider it progress to avoid a
+ * spurious timeout if the timeout handler and the sync update
+ * handler raced.
+ */
+ progress = true;
+ } else if (!list_empty(&done_jobs)) {
+ queue_reset_timeout_locked(queue);
+ progress = true;
}
+ spin_unlock(&queue->fence_ctx.lock);
dma_fence_end_signalling(cookie);
list_for_each_entry_safe(job, job_tmp, &done_jobs, node) {
@@ -2926,6 +3067,27 @@ static void group_sync_upd_work(struct work_struct *work)
panthor_job_put(&job->base);
}
+ return progress;
+}
+
+static void group_sync_upd_work(struct work_struct *work)
+{
+ struct panthor_group *group =
+ container_of(work, struct panthor_group, sync_upd_work);
+ u32 queue_idx;
+ bool cookie;
+
+ cookie = dma_fence_begin_signalling();
+ for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) {
+ struct panthor_queue *queue = group->queues[queue_idx];
+
+ if (!queue)
+ continue;
+
+ queue_check_job_completion(queue);
+ }
+ dma_fence_end_signalling(cookie);
+
group_put(group);
}
@@ -3173,17 +3335,6 @@ queue_run_job(struct drm_sched_job *sched_job)
queue->iface.input->insert = job->ringbuf.end;
if (group->csg_id < 0) {
- /* If the queue is blocked, we want to keep the timeout running, so we
- * can detect unbounded waits and kill the group when that happens.
- * Otherwise, we suspend the timeout so the time we spend waiting for
- * a CSG slot is not counted.
- */
- if (!(group->blocked_queues & BIT(job->queue_idx)) &&
- !queue->timeout_suspended) {
- queue->remaining_time = drm_sched_suspend_timeout(&queue->scheduler);
- queue->timeout_suspended = true;
- }
-
group_schedule_locked(group, BIT(job->queue_idx));
} else {
gpu_write(ptdev, CSF_DOORBELL(queue->doorbell_id), 1);
@@ -3192,6 +3343,7 @@ queue_run_job(struct drm_sched_job *sched_job)
pm_runtime_get(ptdev->base.dev);
sched->pm.has_ref = true;
}
+ queue_resume_timeout(queue);
panthor_devfreq_record_busy(sched->ptdev);
}
@@ -3218,7 +3370,8 @@ queue_timedout_job(struct drm_sched_job *sched_job)
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_queue *queue = group->queues[job->queue_idx];
- drm_warn(&ptdev->base, "job timeout\n");
+ drm_warn(&ptdev->base, "job timeout: pid=%d, comm=%s, seqno=%llu\n",
+ group->task_info.pid, group->task_info.comm, job->done_fence->seqno);
drm_WARN_ON(&ptdev->base, atomic_read(&sched->reset.in_progress));
@@ -3240,7 +3393,6 @@ queue_timedout_job(struct drm_sched_job *sched_job)
mutex_unlock(&sched->lock);
queue_start(queue);
-
return DRM_GPU_SCHED_STAT_RESET;
}
@@ -3283,11 +3435,23 @@ static u32 calc_profiling_ringbuf_num_slots(struct panthor_device *ptdev,
return DIV_ROUND_UP(cs_ringbuf_size, min_profiled_job_instrs * sizeof(u64));
}
+static void queue_timeout_work(struct work_struct *work)
+{
+ struct panthor_queue *queue = container_of(work, struct panthor_queue,
+ timeout.work.work);
+ bool progress;
+
+ progress = queue_check_job_completion(queue);
+ if (!progress)
+ drm_sched_fault(&queue->scheduler);
+}
+
static struct panthor_queue *
group_create_queue(struct panthor_group *group,
- const struct drm_panthor_queue_create *args)
+ const struct drm_panthor_queue_create *args,
+ u64 drm_client_id, u32 gid, u32 qid)
{
- const struct drm_sched_init_args sched_args = {
+ struct drm_sched_init_args sched_args = {
.ops = &panthor_queue_sched_ops,
.submit_wq = group->ptdev->scheduler->wq,
.num_rqs = 1,
@@ -3298,9 +3462,8 @@ group_create_queue(struct panthor_group *group,
* their profiling status.
*/
.credit_limit = args->ringbuf_size / sizeof(u64),
- .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS),
+ .timeout = MAX_SCHEDULE_TIMEOUT,
.timeout_wq = group->ptdev->reset.wq,
- .name = "panthor-queue",
.dev = group->ptdev->base.dev,
};
struct drm_gpu_scheduler *drm_sched;
@@ -3321,6 +3484,8 @@ group_create_queue(struct panthor_group *group,
if (!queue)
return ERR_PTR(-ENOMEM);
+ queue->timeout.remaining = msecs_to_jiffies(JOB_TIMEOUT_MS);
+ INIT_DELAYED_WORK(&queue->timeout.work, queue_timeout_work);
queue->fence_ctx.id = dma_fence_context_alloc(1);
spin_lock_init(&queue->fence_ctx.lock);
INIT_LIST_HEAD(&queue->fence_ctx.in_flight_jobs);
@@ -3375,12 +3540,23 @@ group_create_queue(struct panthor_group *group,
if (ret)
goto err_free_queue;
+ /* assign a unique name */
+ queue->name = kasprintf(GFP_KERNEL, "panthor-queue-%llu-%u-%u", drm_client_id, gid, qid);
+ if (!queue->name) {
+ ret = -ENOMEM;
+ goto err_free_queue;
+ }
+
+ sched_args.name = queue->name;
+
ret = drm_sched_init(&queue->scheduler, &sched_args);
if (ret)
goto err_free_queue;
drm_sched = &queue->scheduler;
ret = drm_sched_entity_init(&queue->entity, 0, &drm_sched, 1, NULL);
+ if (ret)
+ goto err_free_queue;
return queue;
@@ -3389,6 +3565,14 @@ err_free_queue:
return ERR_PTR(ret);
}
+static void group_init_task_info(struct panthor_group *group)
+{
+ struct task_struct *task = current->group_leader;
+
+ group->task_info.pid = task->pid;
+ get_task_comm(group->task_info.comm, task);
+}
+
static void add_group_kbo_sizes(struct panthor_device *ptdev,
struct panthor_group *group)
{
@@ -3416,7 +3600,8 @@ static void add_group_kbo_sizes(struct panthor_device *ptdev,
int panthor_group_create(struct panthor_file *pfile,
const struct drm_panthor_group_create *group_args,
- const struct drm_panthor_queue_create *queue_args)
+ const struct drm_panthor_queue_create *queue_args,
+ u64 drm_client_id)
{
struct panthor_device *ptdev = pfile->ptdev;
struct panthor_group_pool *gpool = pfile->groups;
@@ -3509,12 +3694,16 @@ int panthor_group_create(struct panthor_file *pfile,
memset(group->syncobjs->kmap, 0,
group_args->queues.count * sizeof(struct panthor_syncobj_64b));
+ ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
+ if (ret)
+ goto err_put_group;
+
for (i = 0; i < group_args->queues.count; i++) {
- group->queues[i] = group_create_queue(group, &queue_args[i]);
+ group->queues[i] = group_create_queue(group, &queue_args[i], drm_client_id, gid, i);
if (IS_ERR(group->queues[i])) {
ret = PTR_ERR(group->queues[i]);
group->queues[i] = NULL;
- goto err_put_group;
+ goto err_erase_gid;
}
group->queue_count++;
@@ -3522,10 +3711,6 @@ int panthor_group_create(struct panthor_file *pfile,
group->idle_queues = GENMASK(group->queue_count - 1, 0);
- ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL);
- if (ret)
- goto err_put_group;
-
mutex_lock(&sched->reset.lock);
if (atomic_read(&sched->reset.in_progress)) {
panthor_group_stop(group);
@@ -3540,8 +3725,13 @@ int panthor_group_create(struct panthor_file *pfile,
add_group_kbo_sizes(group->ptdev, group);
spin_lock_init(&group->fdinfo.lock);
+ group_init_task_info(group);
+
return gid;
+err_erase_gid:
+ xa_erase(&gpool->xa, gid);
+
err_put_group:
group_put(group);
return ret;
@@ -3558,11 +3748,6 @@ int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle)
if (!group)
return -EINVAL;
- for (u32 i = 0; i < group->queue_count; i++) {
- if (group->queues[i])
- drm_sched_entity_destroy(&group->queues[i]->entity);
- }
-
mutex_lock(&sched->reset.lock);
mutex_lock(&sched->lock);
group->destroyed = true;
@@ -3828,7 +4013,9 @@ void panthor_sched_unplug(struct panthor_device *ptdev)
{
struct panthor_scheduler *sched = ptdev->scheduler;
- cancel_delayed_work_sync(&sched->tick_work);
+ disable_delayed_work_sync(&sched->tick_work);
+ disable_work_sync(&sched->fw_events_work);
+ disable_work_sync(&sched->sync_upd_work);
mutex_lock(&sched->lock);
if (sched->pm.has_ref) {
@@ -3846,8 +4033,6 @@ static void panthor_sched_fini(struct drm_device *ddev, void *res)
if (!sched || !sched->csg_slot_count)
return;
- cancel_delayed_work_sync(&sched->tick_work);
-
if (sched->wq)
destroy_workqueue(sched->wq);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index 742b0b4ff3a3..f4a475aa34c0 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -21,7 +21,8 @@ struct panthor_job;
int panthor_group_create(struct panthor_file *pfile,
const struct drm_panthor_group_create *group_args,
- const struct drm_panthor_queue_create *queue_args);
+ const struct drm_panthor_queue_create *queue_args,
+ u64 drm_client_id);
int panthor_group_destroy(struct panthor_file *pfile, u32 group_handle);
int panthor_group_get_state(struct panthor_file *pfile,
struct drm_panthor_group_get_state *get_state);
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index b9fe926a49e8..3a9661b9b1fc 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -20,6 +20,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "pl111_drm.h"
@@ -473,12 +474,15 @@ static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
return best_div;
}
-static long pl111_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int pl111_clk_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- int div = pl111_clk_div_choose_div(hw, rate, prate, true);
+ int div = pl111_clk_div_choose_div(hw, req->rate,
+ &req->best_parent_rate, true);
- return DIV_ROUND_UP_ULL(*prate, div);
+ req->rate = DIV_ROUND_UP_ULL(req->best_parent_rate, div);
+
+ return 0;
}
static unsigned long pl111_clk_div_recalc_rate(struct clk_hw *hw,
@@ -528,7 +532,7 @@ static int pl111_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops pl111_clk_div_ops = {
.recalc_rate = pl111_clk_div_recalc_rate,
- .round_rate = pl111_clk_div_round_rate,
+ .determine_rate = pl111_clk_div_determine_rate,
.set_rate = pl111_clk_div_set_rate,
};
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index d6ea01f3797b..2e3200db2f39 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
+#include <drm/drm_print.h>
#include <drm/drm_util.h>
#include "qxl_drv.h"
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 2d9ed3b94574..b66b14b08b61 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -30,6 +30,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index ae7e572b1b4a..a134820aac58 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -34,9 +34,12 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -382,7 +385,25 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_pending_vblank_event *event;
+
qxl_crtc_update_monitors_config(crtc, "flush");
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+
+ spin_unlock_irq(&dev->event_lock);
}
static void qxl_crtc_destroy(struct drm_crtc *crtc)
@@ -401,6 +422,7 @@ static const struct drm_crtc_funcs qxl_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
@@ -455,11 +477,15 @@ static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "enable");
+
+ drm_crtc_vblank_on(crtc);
}
static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_off(crtc);
+
qxl_crtc_update_monitors_config(crtc, "disable");
}
@@ -1276,6 +1302,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
qxl_display_read_client_monitors_config(qdev);
+ ret = drm_vblank_init(&qdev->ddev, qxl_num_crtc);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(&qdev->ddev);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 417061ae59eb..2bbb1168a3ff 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -44,6 +44,7 @@
#include <drm/drm_module.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index fc5e3763c359..4939b57a2a48 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -24,6 +24,7 @@
*/
#include <drm/drm.h>
+#include <drm/drm_print.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -39,7 +40,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
qxl_surface_evict(qdev, qobj, false);
tbo = &qobj->tbo;
- ttm_bo_put(tbo);
+ ttm_bo_fini(tbo);
}
int qxl_gem_object_create(struct qxl_device *qdev, int size,
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index ffff54e5fb31..3cc45997533d 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -26,6 +26,8 @@
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <drm/drm_print.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 506ae1f5e099..336cbff26089 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -26,6 +26,8 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
+#include <drm/drm_print.h>
+
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 665278ee3b6d..4018bcf808e5 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include "qxl_drv.h"
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index dc3828db1991..461b7ab9ad5c 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -28,6 +28,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "qxl_drv.h"
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 05204a6a3fa8..7b3c9a6016db 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -22,6 +22,8 @@
#include <linux/delay.h>
+#include <drm/drm_print.h>
+
#include <trace/events/dma_fence.h>
#include "qxl_drv.h"
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 765a144cea14..1a40590077dd 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -28,6 +28,7 @@
#include <drm/drm.h>
#include <drm/drm_file.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_print.h>
#include <drm/qxl_drm.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
@@ -196,7 +197,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager,
- false, false);
+ 0);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index d1c5e471bdca..3d9f47bc807a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1832,7 +1832,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
return;
}
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 7c3a960f486a..ba8db1d07c07 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -2457,7 +2457,7 @@ static void ci_register_patching_mc_arb(struct radeon_device *rdev,
u32 tmp, tmp2;
tmp = RREG32(MC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+ patch = (tmp & 0x0000f00) == 0x300;
if (patch &&
((rdev->pdev->device == 0x67B0) ||
@@ -3238,7 +3238,8 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
SMU7_MAX_LEVELS_GRAPHICS;
SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
- u32 i, ret;
+ int ret;
+ u32 i;
memset(levels, 0, level_array_size);
@@ -3285,7 +3286,8 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
SMU7_MAX_LEVELS_MEMORY;
SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
- u32 i, ret;
+ int ret;
+ u32 i;
memset(levels, 0, level_array_size);
@@ -3436,7 +3438,7 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
allowed_sclk_vddc_table->entries[i].clk;
pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
pi->dpm_table.sclk_table.count++;
}
}
@@ -3449,7 +3451,7 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
allowed_mclk_table->entries[i].clk;
pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
+ i == 0;
pi->dpm_table.mclk_table.count++;
}
}
@@ -4487,7 +4489,7 @@ static int ci_register_patching_mc_seq(struct radeon_device *rdev,
bool patch;
tmp = RREG32(MC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
+ patch = (tmp & 0x0000f00) == 0x300;
if (patch &&
((rdev->pdev->device == 0x67B0) ||
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 266c57733136..1162cb5d75ed 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -951,13 +951,13 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
(u64)track->vgt_strmout_size[i];
if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
- DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
- i, offset,
- radeon_bo_size(track->vgt_strmout_bo[i]));
+ dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
return -EINVAL;
}
} else {
- dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ dev_warn_once(p->dev, "No buffer for streamout %d\n", i);
return -EINVAL;
}
}
@@ -979,8 +979,8 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
(tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
return -EINVAL;
}
/* check cb */
@@ -1056,8 +1056,8 @@ static int evergreen_packet0_check(struct radeon_cs_parser *p,
case EVERGREEN_VLINE_START_END:
r = evergreen_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
return r;
}
break;
@@ -1143,8 +1143,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_VSTMP_RING_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1155,15 +1155,15 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case CAYMAN_DB_EQAA:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
break;
case CAYMAN_DB_DEPTH_INFO:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
break;
@@ -1172,8 +1172,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
@@ -1214,8 +1214,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_READ_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_z_read_offset = radeon_get_ib_value(p, idx);
@@ -1226,8 +1226,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_Z_WRITE_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_z_write_offset = radeon_get_ib_value(p, idx);
@@ -1238,8 +1238,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_STENCIL_READ_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_s_read_offset = radeon_get_ib_value(p, idx);
@@ -1250,8 +1250,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_STENCIL_WRITE_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_s_write_offset = radeon_get_ib_value(p, idx);
@@ -1273,8 +1273,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_3:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
@@ -1295,8 +1295,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CP_COHER_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1311,8 +1311,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case PA_SC_AA_CONFIG:
if (p->rdev->family >= CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
@@ -1320,8 +1320,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case CAYMAN_PA_SC_AA_CONFIG:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
@@ -1360,8 +1360,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
@@ -1378,8 +1378,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags));
@@ -1439,8 +1439,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_ATTRIB:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1467,8 +1467,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_ATTRIB:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1555,8 +1555,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 0x3c;
@@ -1571,8 +1571,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
@@ -1584,8 +1584,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_DATA_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = radeon_get_ib_value(p, idx);
@@ -1702,36 +1702,36 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_LS_15:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case SX_MEMORY_EXPORT_BASE:
if (p->rdev->family >= CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
break;
case CAYMAN_SX_SCATTER_EXPORT_BASE:
if (p->rdev->family < CHIP_CAYMAN) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1740,7 +1740,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
break;
default:
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
return 0;
@@ -1795,7 +1795,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1807,13 +1807,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return 0;
if (pred_op > 2) {
- DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1827,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
- DRM_ERROR("bad CONTEXT_CONTROL\n");
+ dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n");
return -EINVAL;
}
break;
@@ -1835,17 +1835,17 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NUM_INSTANCES:
case PACKET3_CLEAR_STATE:
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
return -EINVAL;
}
break;
case CAYMAN_PACKET3_DEALLOC_STATE:
if (p->rdev->family < CHIP_CAYMAN) {
- DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
+ dev_warn_once(p->dev, "bad PACKET3_DEALLOC_STATE\n");
return -EINVAL;
}
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
return -EINVAL;
}
break;
@@ -1854,12 +1854,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad INDEX_BASE\n");
+ dev_warn_once(p->dev, "bad INDEX_BASE\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad INDEX_BASE\n");
+ dev_warn_once(p->dev, "bad INDEX_BASE\n");
return -EINVAL;
}
@@ -1872,7 +1872,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -1880,7 +1880,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDEX_BUFFER_SIZE:
{
if (pkt->count != 0) {
- DRM_ERROR("bad INDEX_BUFFER_SIZE\n");
+ dev_warn_once(p->dev, "bad INDEX_BUFFER_SIZE\n");
return -EINVAL;
}
break;
@@ -1889,12 +1889,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
@@ -1907,7 +1907,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -1917,12 +1917,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad DRAW_INDEX_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_2\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DRAW_INDEX_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_2\n");
return -EINVAL;
}
@@ -1935,63 +1935,63 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_MULTI_AUTO:
if (pkt->count != 2) {
- DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_MULTI_AUTO\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_IMMD:
if (pkt->count < 2) {
- DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_DRAW_INDEX_OFFSET:
if (pkt->count != 2) {
- DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_DRAW_INDEX_OFFSET_2:
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET_2\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
@@ -2005,19 +2005,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32]
*/
if (pkt->count != 2) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
/* currently only supporting setting indirect draw buffer base address */
if (idx_value != 1) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_BASE\n");
+ dev_warn_once(p->dev, "bad SET_BASE\n");
return -EINVAL;
}
@@ -2039,54 +2039,54 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context
*/
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DRAW_INDIRECT\n");
return -EINVAL;
}
if (idx_value + size > track->indirect_draw_buffer_size) {
- dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
- idx_value, size, track->indirect_draw_buffer_size);
+ dev_warn_once(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n",
+ idx_value, size, track->indirect_draw_buffer_size);
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DISPATCH_DIRECT:
if (pkt->count != 3) {
- DRM_ERROR("bad DISPATCH_DIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_DIRECT\n");
return -EINVAL;
}
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DISPATCH_INDIRECT:
if (pkt->count != 1) {
- DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad DISPATCH_INDIRECT\n");
+ dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n");
return -EINVAL;
}
ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff);
r = evergreen_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
/* bit 4 is reg (0) or mem (1) */
@@ -2095,7 +2095,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
@@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else if (idx_value & 0x100) {
- DRM_ERROR("cannot use PFP on REG wait\n");
+ dev_warn_once(p->dev, "cannot use PFP on REG wait\n");
return -EINVAL;
}
break;
@@ -2115,7 +2115,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u32 command, size, info;
u64 offset, tmp;
if (pkt->count != 4) {
- DRM_ERROR("bad CP DMA\n");
+ dev_warn_once(p->dev, "bad CP DMA\n");
return -EINVAL;
}
command = radeon_get_ib_value(p, idx+4);
@@ -2129,7 +2129,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
(command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
/* non mem to mem copies requires dw aligned count */
if (size % 4) {
- DRM_ERROR("CP DMA command requires dw count alignment\n");
+ dev_warn_once(p->dev, "CP DMA command requires dw count alignment\n");
return -EINVAL;
}
}
@@ -2137,19 +2137,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* src address space is register */
/* GDS is ok */
if (((info & 0x60000000) >> 29) != 1) {
- DRM_ERROR("CP DMA SAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA SAS not supported\n");
return -EINVAL;
}
} else {
if (command & PACKET3_CP_DMA_CMD_SAIC) {
- DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n");
return -EINVAL;
}
/* src address space is memory */
if (((info & 0x60000000) >> 29) == 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad CP DMA SRC\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC\n");
return -EINVAL;
}
@@ -2159,15 +2159,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx] = offset;
ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
} else if (((info & 0x60000000) >> 29) != 2) {
- DRM_ERROR("bad CP DMA SRC_SEL\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC_SEL\n");
return -EINVAL;
}
}
@@ -2175,19 +2175,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* dst address space is register */
/* GDS is ok */
if (((info & 0x00300000) >> 20) != 1) {
- DRM_ERROR("CP DMA DAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA DAS not supported\n");
return -EINVAL;
}
} else {
/* dst address space is memory */
if (command & PACKET3_CP_DMA_CMD_DAIC) {
- DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
if (((info & 0x00300000) >> 20) == 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad CP DMA DST\n");
+ dev_warn_once(p->dev, "bad CP DMA DST\n");
return -EINVAL;
}
@@ -2197,15 +2197,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx+2] = offset;
ib[idx+3] = upper_32_bits(offset) & 0xff;
} else {
- DRM_ERROR("bad CP DMA DST_SEL\n");
+ dev_warn_once(p->dev, "bad CP DMA DST_SEL\n");
return -EINVAL;
}
}
@@ -2213,13 +2213,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_PFP_SYNC_ME:
if (pkt->count) {
- DRM_ERROR("bad PFP_SYNC_ME\n");
+ dev_warn_once(p->dev, "bad PFP_SYNC_ME\n");
return -EINVAL;
}
break;
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
/* 0xffffffff/0x0 is flush all cache flag */
@@ -2227,7 +2227,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
radeon_get_ib_value(p, idx + 2) != 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2235,7 +2235,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_EVENT_WRITE:
if (pkt->count != 2 && pkt->count != 0) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
if (pkt->count) {
@@ -2243,7 +2243,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
offset = reloc->gpu_offset +
@@ -2259,12 +2259,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
@@ -2281,12 +2281,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad EVENT_WRITE_EOS\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n");
return -EINVAL;
}
@@ -2304,7 +2304,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
@@ -2321,7 +2321,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
(start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
(end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n");
return -EINVAL;
}
for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) {
@@ -2334,7 +2334,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SET_RESOURCE:
if (pkt->count % 8) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
@@ -2342,7 +2342,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_RESOURCE_START) ||
(start_reg >= PACKET3_SET_RESOURCE_END) ||
(end_reg >= PACKET3_SET_RESOURCE_END)) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
for (i = 0; i < (pkt->count / 8); i++) {
@@ -2355,7 +2355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* tex base */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -2392,7 +2392,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
} else {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (tex)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n");
return -EINVAL;
}
moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2411,14 +2411,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* vtx base */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_RESOURCE (vtx)\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE (vtx)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
size = radeon_get_ib_value(p, idx+1+(i*8)+1);
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n");
+ dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
}
@@ -2431,7 +2432,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
}
@@ -2445,7 +2446,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
(start_reg >= PACKET3_SET_BOOL_CONST_END) ||
(end_reg >= PACKET3_SET_BOOL_CONST_END)) {
- DRM_ERROR("bad SET_BOOL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_BOOL_CONST\n");
return -EINVAL;
}
break;
@@ -2455,7 +2456,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
(start_reg >= PACKET3_SET_LOOP_CONST_END) ||
(end_reg >= PACKET3_SET_LOOP_CONST_END)) {
- DRM_ERROR("bad SET_LOOP_CONST\n");
+ dev_warn_once(p->dev, "bad SET_LOOP_CONST\n");
return -EINVAL;
}
break;
@@ -2465,13 +2466,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
(start_reg >= PACKET3_SET_CTL_CONST_END) ||
(end_reg >= PACKET3_SET_CTL_CONST_END)) {
- DRM_ERROR("bad SET_CTL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_CTL_CONST\n");
return -EINVAL;
}
break;
case PACKET3_SET_SAMPLER:
if (pkt->count % 3) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
@@ -2479,13 +2480,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_SAMPLER_START) ||
(start_reg >= PACKET3_SET_SAMPLER_END) ||
(end_reg >= PACKET3_SET_SAMPLER_END)) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BUFFER_UPDATE:
if (pkt->count != 4) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
return -EINVAL;
}
/* Updating memory at DST_ADDRESS. */
@@ -2493,14 +2494,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2512,14 +2513,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2532,23 +2533,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 3) {
- DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+0);
offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2558,7 +2559,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COPY_DW:
if (pkt->count != 4) {
- DRM_ERROR("bad COPY_DW (invalid count)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x1) {
@@ -2566,14 +2567,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* SRC is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2583,8 +2584,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* SRC is a reg. */
reg = radeon_get_ib_value(p, idx+1) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 1);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
return -EINVAL;
}
}
@@ -2593,14 +2594,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* DST is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2610,8 +2611,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* DST is a reg. */
reg = radeon_get_ib_value(p, idx+3) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 3);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 3);
return -EINVAL;
}
}
@@ -2622,7 +2623,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint32_t allowed_reg_base;
uint32_t source_sel;
if (pkt->count != 2) {
- DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (invalid count)\n");
return -EINVAL;
}
@@ -2632,8 +2633,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
areg = idx_value >> 16;
if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
- dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
- areg, idx);
+ dev_warn_once(p->dev, "forbidden register for append cnt 0x%08x at %d\n",
+ areg, idx);
return -EINVAL;
}
@@ -2643,7 +2644,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
uint32_t swap;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 1);
@@ -2656,7 +2657,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (offset & 0xfffffffc) | swap;
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else {
- DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n");
+ dev_warn_once(p->dev, "bad SET_APPEND_CNT (unsupported operation)\n");
return -EINVAL;
}
break;
@@ -2666,23 +2667,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 2) {
- DRM_ERROR("bad COND_EXEC (invalid count)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_EXEC (missing reloc)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 0);
offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad COND_EXEC (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_EXEC bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2692,7 +2693,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COND_WRITE:
if (pkt->count != 7) {
- DRM_ERROR("bad COND_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x10) {
@@ -2700,14 +2701,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* POLL is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_WRITE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 1);
offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32;
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2717,8 +2718,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* POLL is a reg. */
reg = radeon_get_ib_value(p, idx + 1) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 1);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 1);
return -EINVAL;
}
}
@@ -2727,14 +2728,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* WRITE is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("bad COND_WRITE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COND_WRITE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx + 5);
offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32;
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2744,8 +2745,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
/* WRITE is a reg. */
reg = radeon_get_ib_value(p, idx + 5) << 2;
if (!evergreen_is_safe_reg(p, reg)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n",
- reg, idx + 5);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n",
+ reg, idx + 5);
return -EINVAL;
}
}
@@ -2753,7 +2754,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2853,7 +2854,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
r = evergreen_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
kfree(p->track);
p->track = NULL;
return -EINVAL;
@@ -2896,8 +2897,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
do {
if (p->idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- p->idx, ib_chunk->length_dw);
+ dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
return -EINVAL;
}
idx = p->idx;
@@ -2910,7 +2911,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case DMA_PACKET_WRITE:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
switch (sub_cmd) {
@@ -2932,24 +2933,24 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += count + 3;
break;
default:
- DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
- dst_offset, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_COPY:
r = r600_dma_cs_next_reloc(p, &src_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
switch (sub_cmd) {
@@ -2961,13 +2962,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3001,13 +3002,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
p->idx += 9;
@@ -3020,13 +3021,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
- src_offset + count, radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
+ src_offset + count, radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
- dst_offset + count, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
+ dst_offset + count, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff);
@@ -3039,7 +3040,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x41:
/* L2L, partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2L Partial is cayman only !\n");
return -EINVAL;
}
ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff);
@@ -3054,7 +3055,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* L2L, dw, broadcast */
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2L, dw, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3064,18 +3065,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+3);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3089,12 +3090,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
/* Copy L2T Frame to Field */
case 0x48:
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3104,18 +3105,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3128,7 +3129,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x49:
/* L2T, T2L partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n");
return -EINVAL;
}
/* detile bit */
@@ -3151,12 +3152,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4b:
/* L2T, broadcast */
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3166,18 +3167,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3212,13 +3213,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
p->idx += 9;
@@ -3227,7 +3228,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4d:
/* T2T partial */
if (p->family < CHIP_CAYMAN) {
- DRM_ERROR("L2T, T2L Partial is cayman only !\n");
+ dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n");
return -EINVAL;
}
ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8);
@@ -3238,12 +3239,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
case 0x4f:
/* L2T, broadcast */
if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst2_reloc);
if (r) {
- DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
@@ -3253,18 +3254,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
src_offset = radeon_get_ib_value(p, idx+8);
src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
- dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
- dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
+ dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
+ dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8);
@@ -3274,21 +3275,21 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 10;
break;
default:
- DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_CONSTANT_FILL\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
- dst_offset, radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset, radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -3299,7 +3300,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
@@ -3430,7 +3431,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
return true;
default:
- DRM_ERROR("Invalid register 0x%x in CS\n", reg);
+ DRM_DEBUG("Invalid register 0x%x in CS\n", reg);
return false;
}
}
@@ -3448,7 +3449,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
break;
case PACKET3_SET_BASE:
if (idx_value != 1) {
- DRM_ERROR("bad SET_BASE");
+ dev_warn_once(rdev->dev, "bad SET_BASE");
return -EINVAL;
}
break;
@@ -3519,7 +3520,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(rdev->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -3539,7 +3540,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
(command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
/* non mem to mem copies requires dw aligned count */
if ((command & 0x1fffff) % 4) {
- DRM_ERROR("CP DMA command requires dw count alignment\n");
+ dev_warn_once(rdev->dev, "CP DMA command requires dw count alignment\n");
return -EINVAL;
}
}
@@ -3550,14 +3551,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if (command & PACKET3_CP_DMA_CMD_SAIC) {
reg = start_reg;
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n");
return -EINVAL;
}
}
@@ -3571,14 +3572,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
if (command & PACKET3_CP_DMA_CMD_DAIC) {
reg = start_reg;
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad DST register\n");
return -EINVAL;
}
} else {
for (i = 0; i < (command & 0x1fffff); i++) {
reg = start_reg + (4 * i);
if (!evergreen_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
+ dev_warn_once(rdev->dev, "CP DMA Bad DST register\n");
return -EINVAL;
}
}
@@ -3591,7 +3592,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
uint32_t allowed_reg_base;
if (pkt->count != 2) {
- DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n");
+ dev_warn_once(rdev->dev, "bad SET_APPEND_CNT (invalid count)\n");
return -EINVAL;
}
@@ -3601,8 +3602,8 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
areg = idx_value >> 16;
if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) {
- DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n",
- areg, idx);
+ dev_warn_once(rdev->dev, "forbidden register for append cnt 0x%08x at %d\n",
+ areg, idx);
return -EINVAL;
}
break;
@@ -3681,7 +3682,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += count + 3;
break;
default:
- DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
+ dev_warn_once(rdev->dev,
+ "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n",
+ idx, ib->ptr[idx]);
return -EINVAL;
}
break;
@@ -3732,7 +3735,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += 10;
break;
default:
- DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
+ dev_warn_once(rdev->dev,
+ "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n",
+ idx, ib->ptr[idx]);
return -EINVAL;
}
break;
@@ -3743,7 +3748,7 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(rdev->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (idx < ib->length_dw);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index e08559c44a5c..82edbfb259bf 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -3397,7 +3397,7 @@ static int ni_enable_smc_cac(struct radeon_device *rdev,
if (PPSMC_Result_OK != smc_result)
ret = -EINVAL;
- ni_pi->cac_enabled = (PPSMC_Result_OK == smc_result) ? true : false;
+ ni_pi->cac_enabled = PPSMC_Result_OK == smc_result;
}
} else if (ni_pi->cac_enabled) {
smc_result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 80703417d8a1..07a9c523a17a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1298,8 +1298,8 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1313,7 +1313,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
- DRM_ERROR("Cannot src blit from microtiled surface\n");
+ dev_warn_once(p->dev, "Cannot src blit from microtiled surface\n");
radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
@@ -1342,8 +1342,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
- DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "Only 16 vertex buffers are allowed %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return -EINVAL;
}
@@ -1351,8 +1351,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
for (i = 0; i < (c - 1); i += 2, idx += 3) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1364,8 +1364,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
track->arrays[i + 0].esize &= 0x7F;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1377,8 +1377,8 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
if (c & 1) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n",
- pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n",
+ pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1470,12 +1470,12 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
/* check its a wait until and only 1 count */
if (waitreloc.reg != RADEON_WAIT_UNTIL ||
waitreloc.count != 0) {
- DRM_ERROR("vline wait had illegal wait until segment\n");
+ dev_warn_once(p->dev, "vline wait had illegal wait until segment\n");
return -EINVAL;
}
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
- DRM_ERROR("vline wait had illegal wait until\n");
+ dev_warn_once(p->dev, "vline wait had illegal wait until\n");
return -EINVAL;
}
@@ -1493,7 +1493,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
reg = R100_CP_PACKET0_GET_REG(header);
crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
radeon_crtc = to_radeon_crtc(crtc);
@@ -1514,7 +1514,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
break;
default:
- DRM_ERROR("unknown crtc reloc\n");
+ dev_warn_once(p->dev, "unknown crtc reloc\n");
return -EINVAL;
}
ib[h_idx] = header;
@@ -1599,7 +1599,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
@@ -1616,8 +1616,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1629,8 +1629,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1645,8 +1645,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_TXOFFSET_0) / 24;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1672,8 +1672,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1690,8 +1690,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1708,8 +1708,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1726,8 +1726,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLORPITCH:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1768,8 +1768,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->cb[0].cpp = 4;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
@@ -1797,8 +1797,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1927,10 +1927,10 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
idx = pkt->idx + 1;
value = radeon_get_ib_value(p, idx + 2);
if ((value + 1) > radeon_bo_size(robj)) {
- DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
- "(need %u have %lu) !\n",
- value + 1,
- radeon_bo_size(robj));
+ dev_warn_once(p->dev, "[drm] Buffer too small for PACKET3 INDX_BUFFER "
+ "(need %u have %lu) !\n",
+ value + 1,
+ radeon_bo_size(robj));
return -EINVAL;
}
return 0;
@@ -1957,7 +1957,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDX_BUFFER:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1971,7 +1971,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1992,7 +1992,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_3D_DRAW_IMMD:
if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 0));
@@ -2005,7 +2005,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
/* triggers drawing using in-packet vertex data */
case PACKET3_3D_DRAW_IMMD_2:
if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
@@ -2051,7 +2051,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2093,8 +2093,8 @@ int r100_cs_parse(struct radeon_cs_parser *p)
r = r100_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n",
- pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n",
+ pkt.type);
return -EINVAL;
}
if (r)
@@ -2105,19 +2105,19 @@ int r100_cs_parse(struct radeon_cs_parser *p)
static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
- DRM_ERROR("pitch %d\n", t->pitch);
- DRM_ERROR("use_pitch %d\n", t->use_pitch);
- DRM_ERROR("width %d\n", t->width);
- DRM_ERROR("width_11 %d\n", t->width_11);
- DRM_ERROR("height %d\n", t->height);
- DRM_ERROR("height_11 %d\n", t->height_11);
- DRM_ERROR("num levels %d\n", t->num_levels);
- DRM_ERROR("depth %d\n", t->txdepth);
- DRM_ERROR("bpp %d\n", t->cpp);
- DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
- DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
- DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
- DRM_ERROR("compress format %d\n", t->compress_format);
+ DRM_DEBUG("pitch %d\n", t->pitch);
+ DRM_DEBUG("use_pitch %d\n", t->use_pitch);
+ DRM_DEBUG("width %d\n", t->width);
+ DRM_DEBUG("width_11 %d\n", t->width_11);
+ DRM_DEBUG("height %d\n", t->height);
+ DRM_DEBUG("height_11 %d\n", t->height_11);
+ DRM_DEBUG("num levels %d\n", t->num_levels);
+ DRM_DEBUG("depth %d\n", t->txdepth);
+ DRM_DEBUG("bpp %d\n", t->cpp);
+ DRM_DEBUG("coordinate type %d\n", t->tex_coord_type);
+ DRM_DEBUG("width round to power of 2 %d\n", t->roundup_w);
+ DRM_DEBUG("height round to power of 2 %d\n", t->roundup_h);
+ DRM_DEBUG("compress format %d\n", t->compress_format);
}
static int r100_track_compress_size(int compress_format, int w, int h)
@@ -2172,8 +2172,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
size += track->textures[idx].cube_info[face].offset;
if (size > radeon_bo_size(cube_robj)) {
- DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
- size, radeon_bo_size(cube_robj));
+ dev_warn_once(rdev->dev,
+ "Cube texture offset greater than object size %lu %lu\n",
+ size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
@@ -2196,7 +2197,7 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
continue;
robj = track->textures[u].robj;
if (robj == NULL) {
- DRM_ERROR("No texture bound to unit %u\n", u);
+ dev_warn_once(rdev->dev, "No texture bound to unit %u\n", u);
return -EINVAL;
}
size = 0;
@@ -2249,13 +2250,13 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
size *= 6;
break;
default:
- DRM_ERROR("Invalid texture coordinate type %u for unit "
- "%u\n", track->textures[u].tex_coord_type, u);
+ dev_warn_once(rdev->dev, "Invalid texture coordinate type %u for unit "
+ "%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
if (size > radeon_bo_size(robj)) {
- DRM_ERROR("Texture of unit %u needs %lu bytes but is "
- "%lu\n", u, size, radeon_bo_size(robj));
+ dev_warn_once(rdev->dev, "Texture of unit %u needs %lu bytes but is "
+ "%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
@@ -2277,18 +2278,18 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
- DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
+ dev_warn_once(rdev->dev, "[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
if (size > radeon_bo_size(track->cb[i].robj)) {
- DRM_ERROR("[drm] Buffer too small for color buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->cb[i].robj));
- DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
- i, track->cb[i].pitch, track->cb[i].cpp,
- track->cb[i].offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for color buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->cb[i].robj));
+ dev_warn_once(rdev->dev, "[drm] color buffer %d (%u %u %u %u)\n",
+ i, track->cb[i].pitch, track->cb[i].cpp,
+ track->cb[i].offset, track->maxy);
return -EINVAL;
}
}
@@ -2296,18 +2297,18 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
- DRM_ERROR("[drm] No buffer for z buffer !\n");
+ dev_warn_once(rdev->dev, "[drm] No buffer for z buffer !\n");
return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
if (size > radeon_bo_size(track->zb.robj)) {
- DRM_ERROR("[drm] Buffer too small for z buffer "
- "(need %lu have %lu) !\n", size,
- radeon_bo_size(track->zb.robj));
- DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
- track->zb.pitch, track->zb.cpp,
- track->zb.offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for z buffer "
+ "(need %lu have %lu) !\n", size,
+ radeon_bo_size(track->zb.robj));
+ dev_warn_once(rdev->dev, "[drm] zbuffer (%u %u %u %u)\n",
+ track->zb.pitch, track->zb.cpp,
+ track->zb.offset, track->maxy);
return -EINVAL;
}
}
@@ -2315,19 +2316,19 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
if (track->aa_dirty && track->aaresolve) {
if (track->aa.robj == NULL) {
- DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
+ dev_warn_once(rdev->dev, "[drm] No buffer for AA resolve buffer %d !\n", i);
return -EINVAL;
}
/* I believe the format comes from colorbuffer0. */
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
size += track->aa.offset;
if (size > radeon_bo_size(track->aa.robj)) {
- DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
- "(need %lu have %lu) !\n", i, size,
- radeon_bo_size(track->aa.robj));
- DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
- i, track->aa.pitch, track->cb[0].cpp,
- track->aa.offset, track->maxy);
+ dev_warn_once(rdev->dev, "[drm] Buffer too small for AA resolve buffer %d "
+ "(need %lu have %lu) !\n", i, size,
+ radeon_bo_size(track->aa.robj));
+ dev_warn_once(rdev->dev, "[drm] AA resolve buffer %d (%u %u %u %u)\n",
+ i, track->aa.pitch, track->cb[0].cpp,
+ track->aa.offset, track->maxy);
return -EINVAL;
}
}
@@ -2344,17 +2345,17 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4UL;
if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
- DRM_ERROR("Max indices %u\n", track->max_indx);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
+ dev_warn_once(rdev->dev, "Max indices %u\n", track->max_indx);
return -EINVAL;
}
}
@@ -2363,16 +2364,16 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4UL;
if (track->arrays[i].robj == NULL) {
- DRM_ERROR("(PW %u) Vertex array %u no buffer "
- "bound\n", prim_walk, i);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u no buffer "
+ "bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
- dev_err(rdev->dev, "(PW %u) Vertex array %u "
- "need %lu dwords have %lu dwords\n",
- prim_walk, i, size >> 2,
- radeon_bo_size(track->arrays[i].robj)
- >> 2);
+ dev_warn_once(rdev->dev, "(PW %u) Vertex array %u "
+ "need %lu dwords have %lu dwords\n",
+ prim_walk, i, size >> 2,
+ radeon_bo_size(track->arrays[i].robj)
+ >> 2);
return -EINVAL;
}
}
@@ -2380,16 +2381,16 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
case 3:
size = track->vtx_size * nverts;
if (size != track->immd_dwords) {
- DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
- track->immd_dwords, size);
- DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
- nverts, track->vtx_size);
+ dev_warn_once(rdev->dev, "IMMD draw %u dwors but needs %lu dwords\n",
+ track->immd_dwords, size);
+ dev_warn_once(rdev->dev, "VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
+ nverts, track->vtx_size);
return -EINVAL;
}
break;
default:
- DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
- prim_walk);
+ dev_warn_once(rdev->dev, "[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
+ prim_walk);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index f5f2ffea5ab2..10a65a71de31 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -163,8 +163,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -180,8 +180,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -193,8 +193,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLOROFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -212,8 +212,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
i = (reg - R200_PP_TXOFFSET_0) / 24;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -265,8 +265,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -283,8 +283,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_COLORPITCH:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -326,12 +326,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->cb[0].cpp = 4;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
- DRM_ERROR("No support for depth xy offset in kms\n");
+ dev_warn_once(p->dev, "No support for depth xy offset in kms\n");
return -EINVAL;
}
@@ -360,8 +360,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case RADEON_RB3D_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d22889fbfa9c..d2ee6deec039 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -645,8 +645,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -664,8 +664,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -677,8 +677,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_ZB_DEPTHOFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -706,8 +706,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
i = (reg - R300_TX_OFFSET_0) >> 2;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -762,7 +762,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_CCTL */
if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
p->rdev->cmask_filp != p->filp) {
- DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+ dev_warn_once(p->dev, "Invalid RB3D_CCTL: Cannot enable CMASK.\n");
return -EINVAL;
}
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
@@ -779,8 +779,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -812,8 +812,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 5:
if (p->rdev->family < CHIP_RV515) {
- DRM_ERROR("Invalid color buffer format (%d)!\n",
- ((idx_value >> 21) & 0xF));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d)!\n",
+ ((idx_value >> 21) & 0xF));
return -EINVAL;
}
fallthrough;
@@ -827,8 +827,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->cb[i].cpp = 16;
break;
default:
- DRM_ERROR("Invalid color buffer format (%d) !\n",
- ((idx_value >> 21) & 0xF));
+ dev_warn_once(p->dev, "Invalid color buffer format (%d) !\n",
+ ((idx_value >> 21) & 0xF));
return -EINVAL;
}
track->cb_dirty = true;
@@ -853,8 +853,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->zb.cpp = 4;
break;
default:
- DRM_ERROR("Invalid z buffer format (%d) !\n",
- (idx_value & 0xF));
+ dev_warn_once(p->dev, "Invalid z buffer format (%d) !\n",
+ (idx_value & 0xF));
return -EINVAL;
}
track->zb_dirty = true;
@@ -864,8 +864,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -962,8 +962,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case R300_TX_FORMAT_ATI2N:
if (p->rdev->family < CHIP_R420) {
- DRM_ERROR("Invalid texture format %u\n",
- (idx_value & 0x1F));
+ dev_warn_once(p->dev, "Invalid texture format %u\n",
+ (idx_value & 0x1F));
return -EINVAL;
}
/* The same rules apply as for DXT3/5. */
@@ -974,8 +974,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
track->textures[i].compress_format = R100_TRACK_COMP_DXT35;
break;
default:
- DRM_ERROR("Invalid texture format %u\n",
- (idx_value & 0x1F));
+ dev_warn_once(p->dev, "Invalid texture format %u\n",
+ (idx_value & 0x1F));
return -EINVAL;
}
track->tex_dirty = true;
@@ -1041,7 +1041,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
R100_TRACK_COMP_DXT1;
}
} else if (idx_value & (1 << 14)) {
- DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
+ dev_warn_once(p->dev, "Forbidden bit TXFORMAT_MSB\n");
return -EINVAL;
}
track->tex_dirty = true;
@@ -1079,8 +1079,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_ZB_ZPASS_ADDR:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1121,8 +1121,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case R300_RB3D_AARESOLVE_OFFSET:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1191,7 +1191,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
case PACKET3_INDX_BUFFER:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
+ dev_warn_once(p->dev, "No reloc for packet3 %d\n", pkt->opcode);
radeon_cs_dump_packet(p, pkt);
return r;
}
@@ -1207,7 +1207,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
* PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */
if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
@@ -1222,7 +1222,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
* PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */
if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
- DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
+ dev_warn_once(p->dev, "PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
@@ -1272,7 +1272,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -1308,7 +1308,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
r = r300_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
return -EINVAL;
}
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index ac77d1246b94..8eeceeeca362 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -361,9 +361,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
format = G_0280A0_FORMAT(track->cb_color_info[i]);
if (!r600_fmt_is_valid_color(format)) {
- dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
- __func__, __LINE__, format,
- i, track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+ __func__, __LINE__, format,
+ i, track->cb_color_info[i]);
return -EINVAL;
}
/* pitch in pixels */
@@ -384,9 +384,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
array_check.blocksize = r600_fmt_get_blocksize(format);
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
- G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
- track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
return -EINVAL;
}
switch (array_mode) {
@@ -402,25 +402,26 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
case V_0280A0_ARRAY_2D_TILED_THIN1:
break;
default:
- dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
- G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
- track->cb_color_info[i]);
+ dev_warn_once(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
return -EINVAL;
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, array_mode);
+ dev_warn_once(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, height, height_align, array_mode);
+ dev_warn_once(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, height, height_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
- base_offset, base_align, array_mode);
+ dev_warn_once(p->dev,
+ "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
+ base_offset, base_align, array_mode);
return -EINVAL;
}
@@ -447,13 +448,14 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
* broken userspace.
*/
} else {
- dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
- __func__, i, array_mode,
- track->cb_color_bo_offset[i], tmp,
- radeon_bo_size(track->cb_color_bo[i]),
- pitch, height, r600_fmt_get_nblocksx(format, pitch),
- r600_fmt_get_nblocksy(format, height),
- r600_fmt_get_blocksize(format));
+ dev_warn_once(p->dev,
+ "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n",
+ __func__, i, array_mode,
+ track->cb_color_bo_offset[i], tmp,
+ radeon_bo_size(track->cb_color_bo[i]),
+ pitch, height, r600_fmt_get_nblocksx(format, pitch),
+ r600_fmt_get_nblocksy(format, height),
+ r600_fmt_get_blocksize(format));
return -EINVAL;
}
}
@@ -478,11 +480,11 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (bytes + track->cb_color_frag_offset[i] >
radeon_bo_size(track->cb_color_frag_bo[i])) {
- dev_warn(p->dev, "%s FMASK_TILE_MAX too large "
- "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
- __func__, tile_max, bytes,
- track->cb_color_frag_offset[i],
- radeon_bo_size(track->cb_color_frag_bo[i]));
+ dev_warn_once(p->dev, "%s FMASK_TILE_MAX too large "
+ "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, tile_max, bytes,
+ track->cb_color_frag_offset[i],
+ radeon_bo_size(track->cb_color_frag_bo[i]));
return -EINVAL;
}
}
@@ -496,17 +498,17 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (bytes + track->cb_color_tile_offset[i] >
radeon_bo_size(track->cb_color_tile_bo[i])) {
- dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large "
- "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
- __func__, block_max, bytes,
- track->cb_color_tile_offset[i],
- radeon_bo_size(track->cb_color_tile_bo[i]));
+ dev_warn_once(p->dev, "%s CMASK_BLOCK_MAX too large "
+ "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n",
+ __func__, block_max, bytes,
+ track->cb_color_tile_offset[i],
+ radeon_bo_size(track->cb_color_tile_bo[i]));
return -EINVAL;
}
break;
}
default:
- dev_warn(p->dev, "%s invalid tile mode\n", __func__);
+ dev_warn_once(p->dev, "%s invalid tile mode\n", __func__);
return -EINVAL;
}
return 0;
@@ -526,7 +528,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
if (track->db_bo == NULL) {
- dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ dev_warn_once(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
}
switch (G_028010_FORMAT(track->db_depth_info)) {
@@ -544,20 +546,22 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
bpe = 8;
break;
default:
- dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ dev_warn_once(p->dev,
+ "z/stencil with invalid format %d\n",
+ G_028010_FORMAT(track->db_depth_info));
return -EINVAL;
}
if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
if (!track->db_depth_size_idx) {
- dev_warn(p->dev, "z/stencil buffer size not set\n");
+ dev_warn_once(p->dev, "z/stencil buffer size not set\n");
return -EINVAL;
}
tmp = radeon_bo_size(track->db_bo) - track->db_offset;
tmp = (tmp / bpe) >> 6;
if (!tmp) {
- dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
- track->db_depth_size, bpe, track->db_offset,
- radeon_bo_size(track->db_bo));
+ dev_warn_once(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
@@ -579,9 +583,9 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
array_check.blocksize = bpe;
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
+ dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
return -EINVAL;
}
switch (array_mode) {
@@ -592,24 +596,24 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
case V_028010_ARRAY_2D_TILED_THIN1:
break;
default:
- dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
- G_028010_ARRAY_MODE(track->db_depth_info),
- track->db_depth_info);
+ dev_warn_once(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
return -EINVAL;
}
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
+ dev_warn_once(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
__func__, __LINE__, pitch, pitch_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(height, height_align)) {
- dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
+ dev_warn_once(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
__func__, __LINE__, height, height_align, array_mode);
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
+ dev_warn_once(p->dev, "%s offset 0x%llx, 0x%llx, %d not aligned\n", __func__,
base_offset, base_align, array_mode);
return -EINVAL;
}
@@ -618,10 +622,11 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews * track->nsamples;
if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
- dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
- array_mode,
- track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
- radeon_bo_size(track->db_bo));
+ dev_warn_once(p->dev,
+ "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
+ array_mode,
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
return -EINVAL;
}
}
@@ -632,13 +637,13 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
unsigned nbx, nby;
if (track->htile_bo == NULL) {
- dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
- __func__, __LINE__, track->db_depth_info);
+ dev_warn_once(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
+ __func__, __LINE__, track->db_depth_info);
return -EINVAL;
}
if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
- dev_warn(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
- __func__, __LINE__, track->db_depth_size);
+ dev_warn_once(p->dev, "%s:%d htile can't be enabled with bogus db_depth_size 0x%08x\n",
+ __func__, __LINE__, track->db_depth_size);
return -EINVAL;
}
@@ -676,8 +681,8 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
nby = round_up(nby, 16 * 8);
break;
default:
- dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
- __func__, __LINE__, track->npipes);
+ dev_warn_once(p->dev, "%s:%d invalid num pipes %d\n",
+ __func__, __LINE__, track->npipes);
return -EINVAL;
}
}
@@ -689,9 +694,9 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
size += track->htile_offset;
if (size > radeon_bo_size(track->htile_bo)) {
- dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
- __func__, __LINE__, radeon_bo_size(track->htile_bo),
- size, nbx, nby);
+ dev_warn_once(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
+ __func__, __LINE__, radeon_bo_size(track->htile_bo),
+ size, nbx, nby);
return -EINVAL;
}
}
@@ -718,13 +723,13 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
(u64)track->vgt_strmout_size[i];
if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
- DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
- i, offset,
- radeon_bo_size(track->vgt_strmout_bo[i]));
+ dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n",
+ i, offset,
+ radeon_bo_size(track->vgt_strmout_bo[i]));
return -EINVAL;
}
} else {
- dev_warn(p->dev, "No buffer for streamout %d\n", i);
+ dev_warn_once(p->dev, "No buffer for streamout %d\n", i);
return -EINVAL;
}
}
@@ -753,8 +758,8 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
(tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
- dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
- __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
return -EINVAL;
}
/* perform rewrite of CB_COLOR[0-7]_SIZE */
@@ -841,33 +846,33 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
/* check its a WAIT_REG_MEM */
if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
- DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
+ dev_warn_once(p->dev, "vline wait missing WAIT_REG_MEM segment\n");
return -EINVAL;
}
wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
/* bit 4 is reg (0) or mem (1) */
if (wait_reg_mem_info & 0x10) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on MEM instead of REG\n");
return -EINVAL;
}
/* bit 8 is me (0) or pfp (1) */
if (wait_reg_mem_info & 0x100) {
- DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM waiting on PFP instead of ME\n");
return -EINVAL;
}
/* waiting for value to be equal */
if ((wait_reg_mem_info & 0x7) != 0x3) {
- DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM function not equal\n");
return -EINVAL;
}
if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
- DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM bad reg\n");
return -EINVAL;
}
if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
- DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
+ dev_warn_once(p->dev, "vline WAIT_REG_MEM bad bit mask\n");
return -EINVAL;
}
@@ -886,7 +891,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
crtc = drm_crtc_find(rdev_to_drm(p->rdev), p->filp, crtc_id);
if (!crtc) {
- DRM_ERROR("cannot find crtc %d\n", crtc_id);
+ dev_warn_once(p->dev, "cannot find crtc %d\n", crtc_id);
return -ENOENT;
}
radeon_crtc = to_radeon_crtc(crtc);
@@ -907,7 +912,7 @@ int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
ib[h_idx] = header;
ib[h_idx + 4] = vline_status[crtc_id] >> 2;
} else {
- DRM_ERROR("unknown crtc reloc\n");
+ dev_warn_once(p->dev, "unknown crtc reloc\n");
return -EINVAL;
}
return 0;
@@ -923,8 +928,8 @@ static int r600_packet0_check(struct radeon_cs_parser *p,
case AVIVO_D1MODE_VLINE_START_END:
r = r600_cs_packet_parse_vline(p);
if (r) {
- DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
- idx, reg);
+ dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n",
+ idx, reg);
return r;
}
break;
@@ -972,7 +977,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
i = (reg >> 7);
if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
m = 1 << ((reg >> 2) & 31);
@@ -1013,8 +1018,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_VSTMP_RING_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1031,8 +1036,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
radeon_cs_packet_next_is_pkt3_nop(p)) {
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_depth_info = radeon_get_ib_value(p, idx);
@@ -1073,8 +1078,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case VGT_STRMOUT_BUFFER_BASE_3:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
@@ -1096,8 +1101,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CP_COHER_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1270,8 +1275,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 4;
@@ -1285,8 +1290,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_DEPTH_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->db_offset = radeon_get_ib_value(p, idx) << 8;
@@ -1298,8 +1303,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case DB_HTILE_DATA_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
track->htile_offset = (u64)radeon_get_ib_value(p, idx) << 8;
@@ -1368,8 +1373,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SQ_ALU_CONST_CACHE_VS_15:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1377,8 +1382,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case SX_MEMORY_EXPORT_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- dev_warn(p->dev, "bad SET_CONFIG_REG "
- "0x%04X\n", reg);
+ dev_warn_once(p->dev, "bad SET_CONFIG_REG "
+ "0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1387,7 +1392,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
break;
default:
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return -EINVAL;
}
return 0;
@@ -1408,7 +1413,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
unsigned block_align, unsigned height_align, unsigned base_align,
unsigned *l0_size, unsigned *mipmap_size)
{
- unsigned offset, i, level;
+ unsigned offset, i;
unsigned width, height, depth, size;
unsigned blocksize;
unsigned nbx, nby;
@@ -1420,7 +1425,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel,
w0 = r600_mip_minify(w0, 0);
h0 = r600_mip_minify(h0, 0);
d0 = r600_mip_minify(d0, 0);
- for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ for (i = 0, offset = 0; i < nlevels; i++) {
width = r600_mip_minify(w0, i);
nbx = r600_fmt_get_nblocksx(format, width);
@@ -1543,43 +1548,43 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
llevel = 0;
break;
default:
- dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+ dev_warn_once(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
return -EINVAL;
}
if (!r600_fmt_is_valid_texture(format, p->family)) {
- dev_warn(p->dev, "%s:%d texture invalid format %d\n",
- __func__, __LINE__, format);
+ dev_warn_once(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, format);
return -EINVAL;
}
if (r600_get_array_mode_alignment(&array_check,
&pitch_align, &height_align, &depth_align, &base_align)) {
- dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n",
- __func__, __LINE__, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex array mode (%d) invalid\n",
+ __func__, __LINE__, G_038000_TILE_MODE(word0));
return -EINVAL;
}
/* XXX check height as well... */
if (!IS_ALIGNED(pitch, pitch_align)) {
- dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
- __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
+ __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(base_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
- __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (!IS_ALIGNED(mip_offset, base_align)) {
- dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
- __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
+ dev_warn_once(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
+ __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
return -EINVAL;
}
if (blevel > llevel) {
- dev_warn(p->dev, "texture blevel %d > llevel %d\n",
- blevel, llevel);
+ dev_warn_once(p->dev, "texture blevel %d > llevel %d\n",
+ blevel, llevel);
}
if (is_array) {
barray = G_038014_BASE_ARRAY(word5);
@@ -1592,16 +1597,16 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
&l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
if ((l0_size + word2) > radeon_bo_size(texture)) {
- dev_warn(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
- w0, h0, pitch_align, height_align,
- array_check.array_mode, format, word2,
- l0_size, radeon_bo_size(texture));
- dev_warn(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
+ dev_warn_once(p->dev, "texture bo too small ((%d %d) (%d %d) %d %d %d -> %d have %ld)\n",
+ w0, h0, pitch_align, height_align,
+ array_check.array_mode, format, word2,
+ l0_size, radeon_bo_size(texture));
+ dev_warn_once(p->dev, "alignments %d %d %d %lld\n", pitch, pitch_align, height_align, base_align);
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
if ((mipmap_size + word3) > radeon_bo_size(mipmap)) {
- /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ /*dev_warn_once(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/
}
return 0;
@@ -1613,13 +1618,13 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
i = (reg >> 7);
if (i >= ARRAY_SIZE(r600_reg_safe_bm)) {
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false;
}
m = 1 << ((reg >> 2) & 31);
if (!(r600_reg_safe_bm[i] & m))
return true;
- dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
return false;
}
@@ -1648,7 +1653,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 1) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1660,13 +1665,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return 0;
if (pred_op > 2) {
- DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
+ dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op);
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET PREDICATION\n");
+ dev_warn_once(p->dev, "bad SET PREDICATION\n");
return -EINVAL;
}
@@ -1681,20 +1686,20 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case PACKET3_START_3D_CMDBUF:
if (p->family >= CHIP_RV770 || pkt->count) {
- DRM_ERROR("bad START_3D\n");
+ dev_warn_once(p->dev, "bad START_3D\n");
return -EINVAL;
}
break;
case PACKET3_CONTEXT_CONTROL:
if (pkt->count != 1) {
- DRM_ERROR("bad CONTEXT_CONTROL\n");
+ dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n");
return -EINVAL;
}
break;
case PACKET3_INDEX_TYPE:
case PACKET3_NUM_INSTANCES:
if (pkt->count) {
- DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
+ dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES\n");
return -EINVAL;
}
break;
@@ -1702,12 +1707,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
{
uint64_t offset;
if (pkt->count != 3) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad DRAW_INDEX\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX\n");
return -EINVAL;
}
@@ -1720,37 +1725,37 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
}
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
- DRM_ERROR("bad DRAW_INDEX_AUTO\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
return r;
}
break;
case PACKET3_DRAW_INDEX_IMMD_BE:
case PACKET3_DRAW_INDEX_IMMD:
if (pkt->count < 2) {
- DRM_ERROR("bad DRAW_INDEX_IMMD\n");
+ dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
r = r600_cs_track_check(p);
if (r) {
- dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
return r;
}
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
/* bit 4 is reg (0) or mem (1) */
@@ -1759,7 +1764,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad WAIT_REG_MEM\n");
+ dev_warn_once(p->dev, "bad WAIT_REG_MEM\n");
return -EINVAL;
}
@@ -1770,7 +1775,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
ib[idx+2] = upper_32_bits(offset) & 0xff;
} else if (idx_value & 0x100) {
- DRM_ERROR("cannot use PFP on REG wait\n");
+ dev_warn_once(p->dev, "cannot use PFP on REG wait\n");
return -EINVAL;
}
break;
@@ -1779,24 +1784,24 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u32 command, size;
u64 offset, tmp;
if (pkt->count != 4) {
- DRM_ERROR("bad CP DMA\n");
+ dev_warn_once(p->dev, "bad CP DMA\n");
return -EINVAL;
}
command = radeon_get_ib_value(p, idx+4);
size = command & 0x1fffff;
if (command & PACKET3_CP_DMA_CMD_SAS) {
/* src address space is register */
- DRM_ERROR("CP DMA SAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA SAS not supported\n");
return -EINVAL;
} else {
if (command & PACKET3_CP_DMA_CMD_SAIC) {
- DRM_ERROR("CP DMA SAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n");
return -EINVAL;
}
/* src address space is memory */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad CP DMA SRC\n");
+ dev_warn_once(p->dev, "bad CP DMA SRC\n");
return -EINVAL;
}
@@ -1806,8 +1811,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@@ -1816,17 +1821,17 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
if (command & PACKET3_CP_DMA_CMD_DAS) {
/* dst address space is register */
- DRM_ERROR("CP DMA DAS not supported\n");
+ dev_warn_once(p->dev, "CP DMA DAS not supported\n");
return -EINVAL;
} else {
/* dst address space is memory */
if (command & PACKET3_CP_DMA_CMD_DAIC) {
- DRM_ERROR("CP DMA DAIC only supported for registers\n");
+ dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad CP DMA DST\n");
+ dev_warn_once(p->dev, "bad CP DMA DST\n");
return -EINVAL;
}
@@ -1836,8 +1841,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
offset = reloc->gpu_offset + tmp;
if ((tmp + size) > radeon_bo_size(reloc->robj)) {
- dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
- tmp + size, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
+ tmp + size, radeon_bo_size(reloc->robj));
return -EINVAL;
}
@@ -1848,7 +1853,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_SURFACE_SYNC:
if (pkt->count != 3) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
/* 0xffffffff/0x0 is flush all cache flag */
@@ -1856,7 +1861,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
radeon_get_ib_value(p, idx + 2) != 0) {
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SURFACE_SYNC\n");
+ dev_warn_once(p->dev, "bad SURFACE_SYNC\n");
return -EINVAL;
}
ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1864,7 +1869,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_EVENT_WRITE:
if (pkt->count != 2 && pkt->count != 0) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
if (pkt->count) {
@@ -1872,7 +1877,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
offset = reloc->gpu_offset +
@@ -1888,12 +1893,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
uint64_t offset;
if (pkt->count != 4) {
- DRM_ERROR("bad EVENT_WRITE_EOP\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad EVENT_WRITE\n");
+ dev_warn_once(p->dev, "bad EVENT_WRITE\n");
return -EINVAL;
}
@@ -1911,7 +1916,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -1927,7 +1932,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
(start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
(end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
- DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
+ dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
@@ -1939,7 +1944,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SET_RESOURCE:
if (pkt->count % 7) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
@@ -1947,7 +1952,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
(start_reg >= PACKET3_SET_RESOURCE_END) ||
(end_reg >= PACKET3_SET_RESOURCE_END)) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
for (i = 0; i < (pkt->count / 7); i++) {
@@ -1959,7 +1964,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* tex base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
base_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1973,7 +1978,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* tex mip base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
mip_offset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -1994,15 +1999,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* vtx base */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
- size + offset, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj) - offset;
}
@@ -2015,7 +2020,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case SQ_TEX_VTX_INVALID_TEXTURE:
case SQ_TEX_VTX_INVALID_BUFFER:
default:
- DRM_ERROR("bad SET_RESOURCE\n");
+ dev_warn_once(p->dev, "bad SET_RESOURCE\n");
return -EINVAL;
}
}
@@ -2027,7 +2032,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_ALU_CONST_END) ||
(end_reg >= PACKET3_SET_ALU_CONST_END)) {
- DRM_ERROR("bad SET_ALU_CONST\n");
+ dev_warn_once(p->dev, "bad SET_ALU_CONST\n");
return -EINVAL;
}
}
@@ -2038,7 +2043,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_BOOL_CONST_END) ||
(end_reg >= PACKET3_SET_BOOL_CONST_END)) {
- DRM_ERROR("bad SET_BOOL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_BOOL_CONST\n");
return -EINVAL;
}
break;
@@ -2048,7 +2053,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_LOOP_CONST_END) ||
(end_reg >= PACKET3_SET_LOOP_CONST_END)) {
- DRM_ERROR("bad SET_LOOP_CONST\n");
+ dev_warn_once(p->dev, "bad SET_LOOP_CONST\n");
return -EINVAL;
}
break;
@@ -2058,13 +2063,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
(start_reg >= PACKET3_SET_CTL_CONST_END) ||
(end_reg >= PACKET3_SET_CTL_CONST_END)) {
- DRM_ERROR("bad SET_CTL_CONST\n");
+ dev_warn_once(p->dev, "bad SET_CTL_CONST\n");
return -EINVAL;
}
break;
case PACKET3_SET_SAMPLER:
if (pkt->count % 3) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
@@ -2072,22 +2077,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
(start_reg >= PACKET3_SET_SAMPLER_END) ||
(end_reg >= PACKET3_SET_SAMPLER_END)) {
- DRM_ERROR("bad SET_SAMPLER\n");
+ dev_warn_once(p->dev, "bad SET_SAMPLER\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BASE_UPDATE:
/* RS780 and RS880 also need this */
if (p->family < CHIP_RS780) {
- DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+ dev_warn_once(p->dev, "STRMOUT_BASE_UPDATE only supported on 7xx\n");
return -EINVAL;
}
if (pkt->count != 1) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE packet count\n");
return -EINVAL;
}
if (idx_value > 3) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE index\n");
return -EINVAL;
}
{
@@ -2095,25 +2100,27 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE reloc\n");
return -EINVAL;
}
if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BASE_UPDATE, bo does not match\n");
return -EINVAL;
}
offset = (u64)radeon_get_ib_value(p, idx+1) << 8;
if (offset != track->vgt_strmout_bo_offset[idx_value]) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
- offset, track->vgt_strmout_bo_offset[idx_value]);
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+ offset, track->vgt_strmout_bo_offset[idx_value]);
return -EINVAL;
}
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
@@ -2121,17 +2128,17 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
break;
case PACKET3_SURFACE_BASE_UPDATE:
if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
- DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+ dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n");
return -EINVAL;
}
if (pkt->count) {
- DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
+ dev_warn_once(p->dev, "bad SURFACE_BASE_UPDATE\n");
return -EINVAL;
}
break;
case PACKET3_STRMOUT_BUFFER_UPDATE:
if (pkt->count != 4) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
return -EINVAL;
}
/* Updating memory at DST_ADDRESS. */
@@ -2139,14 +2146,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2158,14 +2166,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev,
+ "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2178,23 +2187,23 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
u64 offset;
if (pkt->count != 3) {
- DRM_ERROR("bad MEM_WRITE (invalid count)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n");
return -EINVAL;
}
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+0);
offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
if (offset & 0x7) {
- DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
+ dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n");
return -EINVAL;
}
if ((offset + 8) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
- offset + 8, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
+ offset + 8, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2204,7 +2213,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
case PACKET3_COPY_DW:
if (pkt->count != 4) {
- DRM_ERROR("bad COPY_DW (invalid count)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n");
return -EINVAL;
}
if (idx_value & 0x1) {
@@ -2212,14 +2221,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* SRC is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad COPY_DW (missing src reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1);
offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2236,14 +2245,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
/* DST is memory. */
r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
if (r) {
- DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
+ dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n");
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+3);
offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
if ((offset + 4) > radeon_bo_size(reloc->robj)) {
- DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
- offset + 4, radeon_bo_size(reloc->robj));
+ dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
return -EINVAL;
}
offset += reloc->gpu_offset;
@@ -2259,7 +2268,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
case PACKET3_NOP:
break;
default:
- DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
+ dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode);
return -EINVAL;
}
return 0;
@@ -2306,7 +2315,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
r = r600_packet3_check(p, &pkt);
break;
default:
- DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type);
kfree(p->track);
p->track = NULL;
return -EINVAL;
@@ -2346,13 +2355,13 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
*cs_reloc = NULL;
if (p->chunk_relocs == NULL) {
- DRM_ERROR("No relocation chunk !\n");
+ dev_warn_once(p->dev, "No relocation chunk !\n");
return -EINVAL;
}
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
- DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
- idx, p->nrelocs);
+ dev_warn_once(p->dev, "Relocs at %d after relocations chunk end %d !\n",
+ idx, p->nrelocs);
return -EINVAL;
}
*cs_reloc = &p->relocs[idx];
@@ -2385,8 +2394,8 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
do {
if (p->idx >= ib_chunk->length_dw) {
- DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
- p->idx, ib_chunk->length_dw);
+ dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n",
+ p->idx, ib_chunk->length_dw);
return -EINVAL;
}
idx = p->idx;
@@ -2399,7 +2408,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
case DMA_PACKET_WRITE:
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
if (tiled) {
@@ -2417,20 +2426,20 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += count + 3;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_COPY:
r = r600_dma_cs_next_reloc(p, &src_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_COPY\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n");
return -EINVAL;
}
if (tiled) {
@@ -2484,31 +2493,31 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
}
}
if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
- dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
- src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
+ dev_warn_once(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
+ src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
return -EINVAL;
}
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
break;
case DMA_PACKET_CONSTANT_FILL:
if (p->family < CHIP_RV770) {
- DRM_ERROR("Constant Fill is 7xx only !\n");
+ dev_warn_once(p->dev, "Constant Fill is 7xx only !\n");
return -EINVAL;
}
r = r600_dma_cs_next_reloc(p, &dst_reloc);
if (r) {
- DRM_ERROR("bad DMA_PACKET_WRITE\n");
+ dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n");
return -EINVAL;
}
dst_offset = radeon_get_ib_value(p, idx+1);
dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16;
if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
- dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
- dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
+ dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
+ dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
return -EINVAL;
}
ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc);
@@ -2519,7 +2528,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
p->idx += 1;
break;
default:
- DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
+ dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
} while (p->idx < p->chunk_ib->length_dw);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 63c47585afbc..527b9d19d730 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -80,6 +80,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_audio_component.h>
#include <drm/drm_suballoc.h>
+#include <drm/drm_print.h>
#include "radeon_family.h"
#include "radeon_mode.h"
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 22ce61bdfc06..08f8ba4fd148 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -408,7 +408,6 @@ static int radeon_atif_handler(struct radeon_device *rdev,
pm_runtime_get_sync(rdev_to_drm(rdev)->dev);
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(rdev_to_drm(rdev));
- pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 9f6a3df951ba..012d8b2295b8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -875,10 +875,8 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1066,10 +1064,8 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1154,10 +1150,8 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1402,10 +1396,8 @@ out:
}
exit:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
@@ -1714,10 +1706,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker())
pm_runtime_put_autosuspend(connector->dev->dev);
- }
return ret;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index b8e6202f1d5b..3f9c0011244f 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -834,7 +834,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++)
- DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+ dev_dbg(p->dev, "ib[%d]=0x%08X\n", idx, ib[idx]);
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7a3e510327b7..60afaa8e56b4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -554,7 +554,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* cover the whole aperture even if VRAM size is inferior to aperture size
* Novell bug 204882 + along with lots of ubuntu ones
*
- * Note 3: when limiting vram it's safe to overwritte real_vram_size because
+ * Note 3: when limiting vram it's safe to overwrite real_vram_size because
* we are not in case where real_vram_size is inferior to mc_vram_size (ie
* not affected by bogus hw of Novell bug 204882 + along with lots of ubuntu
* ones)
@@ -562,7 +562,7 @@ int radeon_wb_init(struct radeon_device *rdev)
* Note 4: IGP TOM addr should be the same as the aperture addr, we don't
* explicitly check for that thought.
*
- * FIXME: when reducing VRAM size align new size on power of 2.
+ * FIXME: when reducing VRAM size, align new size on power of 2.
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
@@ -1635,7 +1635,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
}
if (notify_clients)
- drm_client_dev_suspend(dev, false);
+ drm_client_dev_suspend(dev);
return 0;
}
@@ -1739,7 +1739,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients)
radeon_pm_compute_clocks(rdev);
if (notify_clients)
- drm_client_dev_resume(dev, false);
+ drm_client_dev_resume(dev);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 4dc77c398617..35fb99bcd9a7 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -644,8 +644,6 @@ radeon_crtc_set_config(struct drm_mode_set *set,
if (crtc->enabled)
active = true;
- pm_runtime_mark_last_busy(dev->dev);
-
rdev = dev->dev_private;
/* if we have active crtcs and we don't have a power ref,
take the current one */
@@ -926,10 +924,10 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned *fb_div, unsigned *ref_div)
{
/* limit reference * post divider to a maximum */
- ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
+ ref_div_max = clamp(100 / post_div, 1u, ref_div_max);
/* get matching reference and feedback divider */
- *ref_div = min(max(den/post_div, 1u), ref_div_max);
+ *ref_div = clamp(den / post_div, 1u, ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 88e821d67af7..87fd6255c114 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -241,12 +241,12 @@ module_param_named(uvd, radeon_uvd, int, 0444);
MODULE_PARM_DESC(vce, "vce enable/disable vce support (1 = enable, 0 = disable)");
module_param_named(vce, radeon_vce, int, 0444);
-int radeon_si_support = 1;
-MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
+int radeon_si_support = -1;
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(si_support, radeon_si_support, int, 0444);
-int radeon_cik_support = 1;
-MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
+int radeon_cik_support = -1;
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled, -1 = default)");
module_param_named(cik_support, radeon_cik_support, int, 0444);
static const struct pci_device_id pciidlist[] = {
@@ -256,12 +256,60 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static const struct drm_driver kms_driver;
+static bool radeon_support_enabled(struct device *dev,
+ const enum radeon_family family)
+{
+ const char *gen;
+ int module_param = -1;
+ bool amdgpu_support_built = IS_ENABLED(CONFIG_DRM_AMDGPU);
+ bool support_by_default = true;
+
+ switch (family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+ gen = "SI";
+ module_param = radeon_si_support;
+ amdgpu_support_built &= IS_ENABLED(CONFIG_DRM_AMDGPU_SI);
+ support_by_default = false;
+ break;
+
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ support_by_default = false;
+ fallthrough;
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ gen = "CIK";
+ module_param = radeon_cik_support;
+ amdgpu_support_built &= IS_ENABLED(CONFIG_DRM_AMDGPU_CIK);
+ break;
+
+ default:
+ /* All other chips are supported by radeon only */
+ return true;
+ }
+
+ if ((module_param == -1 && (support_by_default || !amdgpu_support_built)) ||
+ module_param == 1)
+ return true;
+
+ if (!module_param)
+ dev_info(dev, "%s support disabled by module param\n", gen);
+
+ return false;
+}
+
static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long flags = 0;
struct drm_device *ddev;
struct radeon_device *rdev;
+ struct device *dev = &pdev->dev;
const struct drm_format_info *format;
int ret;
@@ -270,30 +318,8 @@ static int radeon_pci_probe(struct pci_dev *pdev,
flags = ent->driver_data;
- if (!radeon_si_support) {
- switch (flags & RADEON_FAMILY_MASK) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- dev_info(&pdev->dev,
- "SI support disabled by module param\n");
- return -ENODEV;
- }
- }
- if (!radeon_cik_support) {
- switch (flags & RADEON_FAMILY_MASK) {
- case CHIP_KAVERI:
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- dev_info(&pdev->dev,
- "CIK support disabled by module param\n");
- return -ENODEV;
- }
- }
+ if (!radeon_support_enabled(dev, flags & RADEON_FAMILY_MASK))
+ return -ENODEV;
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
@@ -303,28 +329,28 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- rdev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*rdev), ddev);
+ rdev = devm_drm_dev_alloc(dev, &kms_driver, typeof(*rdev), ddev);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
- rdev->dev = &pdev->dev;
+ rdev->dev = dev;
rdev->pdev = pdev;
ddev = rdev_to_drm(rdev);
ddev->dev_private = rdev;
ret = pci_enable_device(pdev);
if (ret)
- goto err_free;
+ return ret;
pci_set_drvdata(pdev, ddev);
ret = radeon_driver_load_kms(ddev, flags);
if (ret)
- goto err_agp;
+ goto err;
ret = drm_dev_register(ddev, flags);
if (ret)
- goto err_agp;
+ goto err;
if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
format = drm_format_info(DRM_FORMAT_C8);
@@ -337,30 +363,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return 0;
-err_agp:
+err:
pci_disable_device(pdev);
-err_free:
- drm_dev_put(ddev);
return ret;
}
static void
-radeon_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
-static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
- /* if we are running in a VM, make sure the device
- * torn down properly on reboot/shutdown
- */
- if (radeon_device_is_virtual())
- radeon_pci_remove(pdev);
-
#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
@@ -477,7 +487,6 @@ static int radeon_pmops_runtime_idle(struct device *dev)
}
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
return 1;
@@ -499,7 +508,6 @@ long radeon_drm_ioctl(struct file *filp,
ret = drm_ioctl(filp, cmd, arg);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
@@ -613,7 +621,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = radeon_pci_probe,
- .remove = radeon_pci_remove,
.shutdown = radeon_pci_shutdown,
.driver.pm = &radeon_pm_ops,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index dc81b0c2dbff..fd083aaa91bb 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -154,7 +154,6 @@ static int radeon_fbdev_fb_open(struct fb_info *info, int user)
return 0;
err_pm_runtime_mark_last_busy:
- pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
return ret;
}
@@ -164,7 +163,6 @@ static int radeon_fbdev_fb_release(struct fb_info *info, int user)
struct drm_fb_helper *fb_helper = info->par;
struct radeon_device *rdev = fb_helper->dev->dev_private;
- pm_runtime_mark_last_busy(rdev_to_drm(rdev)->dev);
pm_runtime_put_autosuspend(rdev_to_drm(rdev)->dev);
return 0;
@@ -184,8 +182,6 @@ static void radeon_fbdev_fb_destroy(struct fb_info *info)
radeon_fbdev_destroy_pinned_object(gobj);
drm_client_release(&fb_helper->client);
- drm_fb_helper_unprepare(fb_helper);
- kfree(fb_helper);
}
static const struct fb_ops radeon_fbdev_fb_ops = {
@@ -206,7 +202,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct radeon_device *rdev = fb_helper->dev->dev_private;
const struct drm_format_info *format_info;
struct drm_mode_fb_cmd2 mode_cmd = { };
- struct fb_info *info;
+ struct fb_info *info = fb_helper->info;
struct drm_gem_object *gobj;
struct radeon_bo *rbo;
struct drm_framebuffer *fb;
@@ -247,13 +243,6 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
fb_helper->funcs = &radeon_fbdev_fb_helper_funcs;
fb_helper->fb = fb;
- /* okay we have an object now allocate the framebuffer */
- info = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_drm_framebuffer_unregister_private;
- }
-
info->fbops = &radeon_fbdev_fb_ops;
/* radeon resume is fragile and needs a vt switch to help it along */
@@ -279,10 +268,6 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
return 0;
-err_drm_framebuffer_unregister_private:
- fb_helper->fb = NULL;
- drm_framebuffer_unregister_private(fb);
- drm_framebuffer_cleanup(fb);
err_kfree:
kfree(fb);
err_radeon_fbdev_destroy_pinned_object:
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 5b5b54e876d4..167d6f122b8e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f)
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
return true;
- if (down_read_trylock(&rdev->exclusive_lock)) {
- radeon_fence_process(rdev, ring);
- up_read(&rdev->exclusive_lock);
-
- if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
- return true;
- }
return false;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4bb242437ff6..acd89a20f272 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -346,14 +346,14 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
- rdev->gart.pages = vzalloc(array_size(sizeof(void *),
- rdev->gart.num_cpu_pages));
+ rdev->gart.pages = vcalloc(rdev->gart.num_cpu_pages,
+ sizeof(void *));
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_entry = vmalloc(array_size(sizeof(uint64_t),
- rdev->gart.num_gpu_pages));
+ rdev->gart.pages_entry = vmalloc_array(rdev->gart.num_gpu_pages,
+ sizeof(uint64_t));
if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f86773f3db20..18ca1bcfd2f9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -86,7 +86,7 @@ static void radeon_gem_object_free(struct drm_gem_object *gobj)
if (robj) {
radeon_mn_unregister(robj);
- ttm_bo_put(&robj->tbo);
+ ttm_bo_fini(&robj->tbo);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 645e33bf7947..7cbe02ffb193 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
rdev->agp = NULL;
done_free:
- kfree(rdev);
dev->dev_private = NULL;
}
@@ -170,7 +169,6 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
@@ -677,7 +675,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
file_priv->driver_priv = fpriv;
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
@@ -687,7 +684,6 @@ err_fpriv:
kfree(fpriv);
err_suspend:
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
}
@@ -737,7 +733,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
kfree(fpriv);
file_priv->driver_priv = NULL;
}
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index d6aa1a3012a8..d1e8b9757a65 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,9 +136,9 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
}
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -545,9 +545,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -742,9 +742,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -908,9 +908,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -1113,9 +1113,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
}
if (rdev->is_atom_bios)
- radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_atombios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
else
- radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+ radeon_combios_encoder_dpms_scratch_regs(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index b4fb7e70320b..a855a96dd2ea 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -907,8 +907,7 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
static bool radeon_dpm_single_display(struct radeon_device *rdev)
{
- bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ bool single_display = rdev->pm.dpm.new_active_crtc_count < 2;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && rdev->asic->dpm.vblank_too_short) {
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index c9fef9b61ced..818554e60537 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -455,7 +455,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringC, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ DRM_ERROR("Failed to lock ring C %p\n", ringC);
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
@@ -481,7 +481,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,
r = radeon_ring_lock(rdev, ringC, 64);
if (r) {
- DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ DRM_ERROR("Failed to lock ring C %p\n", ringC);
goto out_cleanup;
}
radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 616d25c8c2de..695ac32f7535 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -683,8 +683,10 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
rdev_to_drm(rdev)->anon_inode->i_mapping,
rdev_to_drm(rdev)->vma_offset_manager,
- rdev->need_swiotlb,
- dma_addressing_limited(&rdev->pdev->dev));
+ (rdev->need_swiotlb ?
+ TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0) |
+ (dma_addressing_limited(&rdev->pdev->dev) ?
+ TTM_ALLOCATION_POOL_USE_DMA32 : 0));
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 2355a78e1b69..bdbc1bbe8a9b 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -86,7 +86,7 @@ int radeon_vce_init(struct radeon_device *rdev)
r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
if (r) {
- dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
+ dev_err(rdev->dev, "radeon_vce: can't load firmware \"%s\"\n",
fw_name);
return r;
}
@@ -126,7 +126,7 @@ int radeon_vce_init(struct radeon_device *rdev)
rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
- /* we can only work with this fw version for now */
+ /* we can only work with these fw versions for now */
if ((rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8))) &&
(rdev->vce.fw_version != ((50 << 24) | (0 << 16) | (1 << 8))) &&
(rdev->vce.fw_version != ((50 << 24) | (1 << 16) | (2 << 8))))
@@ -281,7 +281,7 @@ static void radeon_vce_idle_work_handler(struct work_struct *work)
*
* @rdev: radeon_device pointer
*
- * Make sure VCE is powerd up when we want to use it
+ * Make sure VCE is powered up when we want to use it
*/
void radeon_vce_note_usage(struct radeon_device *rdev)
{
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c
index 7e175dbfd892..2e2906ab750b 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_crtc.c
@@ -17,6 +17,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "rcar_cmm.h"
@@ -993,7 +994,7 @@ static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
rcar_du_crtc_crc_cleanup(rcrtc);
- return drm_crtc_cleanup(crtc);
+ drm_crtc_cleanup(crtc);
}
static void rcar_du_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
index d948ff3594c4..031d07f4508e 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_drv.c
@@ -24,6 +24,7 @@
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "rcar_du_drv.h"
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
index 216219accfd9..6294443f6068 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c
@@ -11,6 +11,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -407,8 +408,8 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct rcar_du_device *rcdu = to_rcar_du_device(dev);
- unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
+ int ret;
/*
* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
@@ -419,7 +420,9 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
else
align = 16 * args->bpp / 8;
- args->pitch = roundup(min_pitch, align);
+ ret = drm_mode_size_dumb(dev, args, align, 0);
+ if (ret)
+ return ret;
return drm_gem_dma_dumb_create_internal(file, dev, args);
}
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index af58b814e588..001b3543924a 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -1013,7 +1013,7 @@ err_reset_assert:
}
static const struct dev_pm_ops rcar_lvds_pm_ops = {
- SET_RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL)
};
static struct platform_driver rcar_lvds_platform_driver = {
@@ -1021,7 +1021,7 @@ static struct platform_driver rcar_lvds_platform_driver = {
.remove = rcar_lvds_remove,
.driver = {
.name = "rcar-lvds",
- .pm = &rcar_lvds_pm_ops,
+ .pm = pm_ptr(&rcar_lvds_pm_ops),
.of_match_table = rcar_lvds_of_table,
},
};
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 1af4c73f7a88..9413b76d0bfc 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -5,6 +5,7 @@
* Copyright (C) 2020 Renesas Electronics Corporation
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -71,6 +72,7 @@ struct rcar_mipi_dsi {
} clocks;
enum mipi_dsi_pixel_format format;
+ unsigned long mode_flags;
unsigned int num_data_lanes;
unsigned int lanes;
};
@@ -316,8 +318,8 @@ rcar_mipi_dsi_post_init_phtw_v4h(struct rcar_mipi_dsi *dsi,
WRITE_PHTW(0x01020100, 0x00000180);
ret = read_poll_timeout(rcar_mipi_dsi_read, status,
- status & PHTR_TEST, 2000, 10000, false,
- dsi, PHTR);
+ status & PHTR_TESTDOUT_TEST,
+ 2000, 10000, false, dsi, PHTR);
if (ret < 0) {
dev_err(dsi->dev, "failed to test PHTR\n");
return ret;
@@ -457,29 +459,43 @@ static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi,
u32 vprmset4r;
/* Configuration for Pixel Stream and Packet Header */
- if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 24)
+ switch (mipi_dsi_pixel_format_to_bpp(dsi->format)) {
+ case 24:
rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB24);
- else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 18)
+ break;
+ case 18:
rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB18);
- else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 16)
+ break;
+ case 16:
rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB16);
- else {
+ break;
+ default:
dev_warn(dsi->dev, "unsupported format");
return;
}
/* Configuration for Blanking sequence and Input Pixel */
- setr = TXVMSETR_HSABPEN_EN | TXVMSETR_HBPBPEN_EN
- | TXVMSETR_HFPBPEN_EN | TXVMSETR_SYNSEQ_PULSES
- | TXVMSETR_PIXWDTH | TXVMSETR_VSTPM;
+ setr = TXVMSETR_PIXWDTH | TXVMSETR_VSTPM;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
+ setr |= TXVMSETR_SYNSEQ_EVENTS;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP))
+ setr |= TXVMSETR_HFPBPEN;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP))
+ setr |= TXVMSETR_HBPBPEN;
+ if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA))
+ setr |= TXVMSETR_HSABPEN;
+ }
+
rcar_mipi_dsi_write(dsi, TXVMSETR, setr);
- /* Configuration for Video Parameters */
- vprmset0r = (mode->flags & DRM_MODE_FLAG_PVSYNC ?
- TXVMVPRMSET0R_VSPOL_HIG : TXVMVPRMSET0R_VSPOL_LOW)
- | (mode->flags & DRM_MODE_FLAG_PHSYNC ?
- TXVMVPRMSET0R_HSPOL_HIG : TXVMVPRMSET0R_HSPOL_LOW)
- | TXVMVPRMSET0R_CSPC_RGB | TXVMVPRMSET0R_BPP_24;
+ /* Configuration for Video Parameters, input is always RGB888 */
+ vprmset0r = TXVMVPRMSET0R_BPP_24;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ vprmset0r |= TXVMVPRMSET0R_VSPOL_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ vprmset0r |= TXVMVPRMSET0R_HSPOL_LOW;
vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay)
| TXVMVPRMSET1R_VSA(mode->vsync_end - mode->vsync_start);
@@ -576,7 +592,10 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
udelay(10);
rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL);
- ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN;
+ rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK);
+ rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1);
+
+ ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN;
rcar_mipi_dsi_write(dsi, PPISETR, ppisetr);
rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ);
@@ -617,6 +636,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
vclkset = VCLKSET_CKEN;
rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
+ /* Output is always RGB, never YCbCr */
if (dsi_format == 24)
vclkset |= VCLKSET_BPP_24;
else if (dsi_format == 18)
@@ -628,7 +648,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
return -EINVAL;
}
- vclkset |= VCLKSET_COLOR_RGB | VCLKSET_LANE(dsi->lanes - 1);
+ vclkset |= VCLKSET_LANE(dsi->lanes - 1);
switch (dsi->info->model) {
case RCAR_DSI_V3U:
@@ -908,6 +928,7 @@ static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host,
dsi->lanes = device->lanes;
dsi->format = device->format;
+ dsi->mode_flags = device->mode_flags;
dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node,
1, 0);
@@ -934,9 +955,234 @@ static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static ssize_t rcar_mipi_dsi_host_tx_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg,
+ bool is_rx_xfer)
+{
+ const bool is_tx_long = mipi_dsi_packet_format_is_long(msg->type);
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ struct mipi_dsi_packet packet;
+ u8 payload[16] = { 0 };
+ u32 status;
+ int ret;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret)
+ return ret;
+
+ /* Configure LP or HS command transfer. */
+ rcar_mipi_dsi_write(dsi, TXCMSETR, (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+ TXCMSETR_SPDTYP : 0);
+
+ /* Register access mode for RX transfer. */
+ if (is_rx_xfer)
+ rcar_mipi_dsi_write(dsi, RXPSETR, 0);
+
+ /* Do not use IRQ, poll for completion, the completion is quick. */
+ rcar_mipi_dsi_write(dsi, TXCMIER, 0);
+
+ /*
+ * Send the header:
+ * header[0] = Virtual Channel + Data Type
+ * header[1] = Word Count LSB (LP) or first param (SP)
+ * header[2] = Word Count MSB (LP) or second param (SP)
+ */
+ rcar_mipi_dsi_write(dsi, TXCMPHDR,
+ (is_tx_long ? TXCMPHDR_FMT : 0) |
+ TXCMPHDR_VC(msg->channel) |
+ TXCMPHDR_DT(msg->type) |
+ TXCMPHDR_DATA1(packet.header[2]) |
+ TXCMPHDR_DATA0(packet.header[1]));
+
+ if (is_tx_long) {
+ memcpy(payload, packet.payload,
+ min(msg->tx_len, sizeof(payload)));
+
+ rcar_mipi_dsi_write(dsi, TXCMPPD0R,
+ (payload[3] << 24) | (payload[2] << 16) |
+ (payload[1] << 8) | payload[0]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD1R,
+ (payload[7] << 24) | (payload[6] << 16) |
+ (payload[5] << 8) | payload[4]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD2R,
+ (payload[11] << 24) | (payload[10] << 16) |
+ (payload[9] << 8) | payload[8]);
+ rcar_mipi_dsi_write(dsi, TXCMPPD3R,
+ (payload[15] << 24) | (payload[14] << 16) |
+ (payload[13] << 8) | payload[12]);
+ }
+
+ /* Start the transfer, RX with BTA, TX without BTA. */
+ if (is_rx_xfer) {
+ rcar_mipi_dsi_write(dsi, TXCMCR, TXCMCR_BTAREQ);
+
+ /* Wait until the transmission, BTA, reception completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ (status & RXPSR_BTAREQEND),
+ 2000, 50000, false, dsi, RXPSR);
+ } else {
+ rcar_mipi_dsi_write(dsi, TXCMCR, TXCMCR_TXREQ);
+
+ /* Wait until the transmission completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ (status & TXCMSR_TXREQEND),
+ 2000, 50000, false, dsi, TXCMSR);
+ }
+
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command transfer timeout (0x%08x)\n",
+ status);
+ return ret;
+ }
+
+ return packet.size;
+}
+
+static ssize_t rcar_mipi_dsi_host_rx_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ u8 *rx_buf = (u8 *)(msg->rx_buf);
+ u32 reg, data, status, wc;
+ int i, ret;
+
+ /* RX transfer received data validation and parsing starts here. */
+ reg = rcar_mipi_dsi_read(dsi, TOSR);
+ if (reg & TOSR_TATO) { /* Turn-Around TimeOut. */
+ /* Clear TATO Turn-Around TimeOut bit. */
+ rcar_mipi_dsi_write(dsi, TOSR, TOSR_TATO);
+ return -ETIMEDOUT;
+ }
+
+ reg = rcar_mipi_dsi_read(dsi, RXPSR);
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+ /* Transfer with zero-length RX. */
+ if (!(reg & RXPSR_RCVACK)) {
+ /* No ACK on RX response received. */
+ return -EINVAL;
+ }
+ } else {
+ /* Transfer with non-zero-length RX. */
+ if (!(reg & RXPSR_RCVRESP)) {
+ /* No packet header of RX response received. */
+ return -EINVAL;
+ }
+
+ if (reg & (RXPSR_CRCERR | RXPSR_WCERR | RXPSR_AXIERR | RXPSR_OVRERR)) {
+ /* Incorrect response payload. */
+ return -ENODATA;
+ }
+
+ data = rcar_mipi_dsi_read(dsi, RXPHDR);
+ if (data & RXPHDR_FMT) { /* Long Packet Response. */
+ /* Read Long Packet Response length from packet header. */
+ wc = data & 0xffff;
+ if (wc > msg->rx_len) {
+ dev_warn(dsi->dev,
+ "Long Packet Response longer than RX buffer (%d), limited to %zu Bytes\n",
+ wc, msg->rx_len);
+ wc = msg->rx_len;
+ }
+
+ if (wc > 16) {
+ dev_warn(dsi->dev,
+ "Long Packet Response too long (%d), limited to 16 Bytes\n",
+ wc);
+ wc = 16;
+ }
+
+ for (i = 0; i < msg->rx_len; i++) {
+ if (!(i % 4))
+ data = rcar_mipi_dsi_read(dsi, RXPPD0R + i);
+
+ rx_buf[i] = data & 0xff;
+ data >>= 8;
+ }
+ } else { /* Short Packet Response. */
+ if (msg->rx_len >= 1)
+ rx_buf[0] = data & 0xff;
+ if (msg->rx_len >= 2)
+ rx_buf[1] = (data >> 8) & 0xff;
+ if (msg->rx_len >= 3) {
+ dev_warn(dsi->dev,
+ "Expected Short Packet Response too long (%zu), limited to 2 Bytes\n",
+ msg->rx_len);
+ }
+ }
+ }
+
+ if (reg & RXPSR_RCVAKE) {
+ /* Acknowledge and Error report received. */
+ return -EFAULT;
+ }
+
+ /* Wait until the bus handover to host processor completed. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ !(status & PPIDL0SR_DIR),
+ 2000, 50000, false, dsi, PPIDL0SR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command RX DIR timeout (0x%08x)\n", status);
+ return ret;
+ }
+
+ /* Wait until the data lane is in LP11 stop state. */
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & PPIDL0SR_STPST,
+ 2000, 50000, false, dsi, PPIDL0SR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "Command RX STPST timeout (0x%08x)\n", status);
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t rcar_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ const bool is_rx_xfer = (msg->flags & MIPI_DSI_MSG_REQ_ACK) || msg->rx_len;
+ struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host);
+ int ret;
+
+ if (msg->tx_len > 16 || msg->rx_len > 16) {
+ /* ToDo: Implement Memory on AXI bus command mode. */
+ dev_warn(dsi->dev,
+ "Register-based command mode supports only up to 16 Bytes long payload\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = rcar_mipi_dsi_host_tx_transfer(host, msg, is_rx_xfer);
+
+ /* If TX transfer succeeded and this transfer has RX part. */
+ if (ret >= 0 && is_rx_xfer) {
+ ret = rcar_mipi_dsi_host_rx_transfer(host, msg);
+ if (ret)
+ return ret;
+
+ ret = msg->rx_len;
+ }
+
+ /*
+ * Wait a bit between commands, otherwise panels based on ILI9881C
+ * TCON may fail to correctly receive all commands sent to them.
+ * Until we can actually test with another DSI device, keep the
+ * delay here, but eventually this delay might have to be moved
+ * into the ILI9881C panel driver.
+ */
+ usleep_range(1000, 2000);
+
+ /* Clear the completion interrupt. */
+ if (!msg->rx_len)
+ rcar_mipi_dsi_write(dsi, TXCMSR, TXCMSR_TXREQEND);
+
+ return ret;
+}
+
static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = {
.attach = rcar_mipi_dsi_host_attach,
.detach = rcar_mipi_dsi_host_detach,
+ .transfer = rcar_mipi_dsi_host_transfer
};
/* -----------------------------------------------------------------------------
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
index a6b276f1d6ee..b6fb58c2f9f6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h
@@ -9,167 +9,311 @@
#define __RCAR_MIPI_DSI_REGS_H__
#define LINKSR 0x010
-#define LINKSR_LPBUSY (1 << 1)
-#define LINKSR_HSBUSY (1 << 0)
+#define LINKSR_LPBUSY BIT_U32(1)
+#define LINKSR_HSBUSY BIT_U32(0)
+
+#define TXSETR 0x100
+#define TXSETR_LANECNT_MASK GENMASK_U32(1, 0)
+
+/*
+ * DSI Command Transfer Registers
+ */
+#define TXCMSETR 0x110
+#define TXCMSETR_SPDTYP BIT_U32(8) /* 0:HS 1:LP */
+#define TXCMSETR_LPPDACC BIT_U32(0)
+#define TXCMCR 0x120
+#define TXCMCR_BTATYP BIT_U32(2)
+#define TXCMCR_BTAREQ BIT_U32(1)
+#define TXCMCR_TXREQ BIT_U32(0)
+#define TXCMSR 0x130
+#define TXCMSR_CLSNERR BIT_U32(18)
+#define TXCMSR_AXIERR BIT_U32(16)
+#define TXCMSR_TXREQEND BIT_U32(0)
+#define TXCMSCR 0x134
+#define TXCMSCR_CLSNERR BIT_U32(18)
+#define TXCMSCR_AXIERR BIT_U32(16)
+#define TXCMSCR_TXREQEND BIT_U32(0)
+#define TXCMIER 0x138
+#define TXCMIER_CLSNERR BIT_U32(18)
+#define TXCMIER_AXIERR BIT_U32(16)
+#define TXCMIER_TXREQEND BIT_U32(0)
+#define TXCMADDRSET0R 0x140
+#define TXCMPHDR 0x150
+#define TXCMPHDR_FMT BIT_U32(24) /* 0:SP 1:LP */
+#define TXCMPHDR_VC_MASK GENMASK_U32(23, 22)
+#define TXCMPHDR_VC(n) FIELD_PREP(TXCMPHDR_VC_MASK, (n))
+#define TXCMPHDR_DT_MASK GENMASK_U32(21, 16)
+#define TXCMPHDR_DT(n) FIELD_PREP(TXCMPHDR_DT_MASK, (n))
+#define TXCMPHDR_DATA1_MASK GENMASK_U32(15, 8)
+#define TXCMPHDR_DATA1(n) FIELD_PREP(TXCMPHDR_DATA1_MASK, (n))
+#define TXCMPHDR_DATA0_MASK GENMASK_U32(7, 0)
+#define TXCMPHDR_DATA0(n) FIELD_PREP(TXCMPHDR_DATA0_MASK, (n))
+#define TXCMPPD0R 0x160
+#define TXCMPPD1R 0x164
+#define TXCMPPD2R 0x168
+#define TXCMPPD3R 0x16c
+
+#define RXSETR 0x200
+#define RXSETR_CRCEN_MASK GENMASK_U32(27, 24)
+#define RXSETR_ECCEN_MASK GENMASK_U32(19, 16)
+#define RXPSETR 0x210
+#define RXPSETR_LPPDACC BIT_U32(0)
+#define RXPSR 0x220
+#define RXPSR_ECCERR1B BIT_U32(28)
+#define RXPSR_UEXTRGERR BIT_U32(25)
+#define RXPSR_RESPTOERR BIT_U32(24)
+#define RXPSR_OVRERR BIT_U32(23)
+#define RXPSR_AXIERR BIT_U32(22)
+#define RXPSR_CRCERR BIT_U32(21)
+#define RXPSR_WCERR BIT_U32(20)
+#define RXPSR_UEXDTERR BIT_U32(19)
+#define RXPSR_UEXPKTERR BIT_U32(18)
+#define RXPSR_ECCERR BIT_U32(17)
+#define RXPSR_MLFERR BIT_U32(16)
+#define RXPSR_RCVACK BIT_U32(14)
+#define RXPSR_RCVEOT BIT_U32(10)
+#define RXPSR_RCVAKE BIT_U32(9)
+#define RXPSR_RCVRESP BIT_U32(8)
+#define RXPSR_BTAREQEND BIT_U32(0)
+#define RXPSCR 0x224
+#define RXPSCR_ECCERR1B BIT_U32(28)
+#define RXPSCR_UEXTRGERR BIT_U32(25)
+#define RXPSCR_RESPTOERR BIT_U32(24)
+#define RXPSCR_OVRERR BIT_U32(23)
+#define RXPSCR_AXIERR BIT_U32(22)
+#define RXPSCR_CRCERR BIT_U32(21)
+#define RXPSCR_WCERR BIT_U32(20)
+#define RXPSCR_UEXDTERR BIT_U32(19)
+#define RXPSCR_UEXPKTERR BIT_U32(18)
+#define RXPSCR_ECCERR BIT_U32(17)
+#define RXPSCR_MLFERR BIT_U32(16)
+#define RXPSCR_RCVACK BIT_U32(14)
+#define RXPSCR_RCVEOT BIT_U32(10)
+#define RXPSCR_RCVAKE BIT_U32(9)
+#define RXPSCR_RCVRESP BIT_U32(8)
+#define RXPSCR_BTAREQEND BIT_U32(0)
+#define RXPIER 0x228
+#define RXPIER_ECCERR1B BIT_U32(28)
+#define RXPIER_UEXTRGERR BIT_U32(25)
+#define RXPIER_RESPTOERR BIT_U32(24)
+#define RXPIER_OVRERR BIT_U32(23)
+#define RXPIER_AXIERR BIT_U32(22)
+#define RXPIER_CRCERR BIT_U32(21)
+#define RXPIER_WCERR BIT_U32(20)
+#define RXPIER_UEXDTERR BIT_U32(19)
+#define RXPIER_UEXPKTERR BIT_U32(18)
+#define RXPIER_ECCERR BIT_U32(17)
+#define RXPIER_MLFERR BIT_U32(16)
+#define RXPIER_RCVACK BIT_U32(14)
+#define RXPIER_RCVEOT BIT_U32(10)
+#define RXPIER_RCVAKE BIT_U32(9)
+#define RXPIER_RCVRESP BIT_U32(8)
+#define RXPIER_BTAREQEND BIT_U32(0)
+#define RXPADDRSET0R 0x230
+#define RXPSIZESETR 0x238
+#define RXPSIZESETR_SIZE_MASK GENMASK_U32(6, 3)
+#define RXPHDR 0x240
+#define RXPHDR_FMT BIT_U32(24) /* 0:SP 1:LP */
+#define RXPHDR_VC_MASK GENMASK_U32(23, 22)
+#define RXPHDR_DT_MASK GENMASK_U32(21, 16)
+#define RXPHDR_DATA1_MASK GENMASK_U32(15, 8)
+#define RXPHDR_DATA0_MASK GENMASK_U32(7, 0)
+#define RXPPD0R 0x250
+#define RXPPD1R 0x254
+#define RXPPD2R 0x258
+#define RXPPD3R 0x25c
+#define AKEPR 0x300
+#define AKEPR_VC_MASK GENMASK_U32(23, 22)
+#define AKEPR_DT_MASK GENMASK_U32(21, 16)
+#define AKEPR_ERRRPT_MASK GENMASK_U32(15, 0)
+#define RXRESPTOSETR 0x400
+#define TACR 0x500
+#define TASR 0x510
+#define TASCR 0x514
+#define TAIER 0x518
+#define TOSR 0x610
+#define TOSR_TATO BIT_U32(2)
+#define TOSR_LRXHTO BIT_U32(1)
+#define TOSR_HRXTO BIT_U32(0)
+#define TOSCR 0x614
+#define TOSCR_TATO BIT_U32(2)
+#define TOSCR_LRXHTO BIT_U32(1)
+#define TOSCR_HRXTO BIT_U32(0)
/*
* Video Mode Register
*/
#define TXVMSETR 0x180
-#define TXVMSETR_SYNSEQ_PULSES (0 << 16)
-#define TXVMSETR_SYNSEQ_EVENTS (1 << 16)
-#define TXVMSETR_VSTPM (1 << 15)
-#define TXVMSETR_PIXWDTH (1 << 8)
-#define TXVMSETR_VSEN_EN (1 << 4)
-#define TXVMSETR_VSEN_DIS (0 << 4)
-#define TXVMSETR_HFPBPEN_EN (1 << 2)
-#define TXVMSETR_HFPBPEN_DIS (0 << 2)
-#define TXVMSETR_HBPBPEN_EN (1 << 1)
-#define TXVMSETR_HBPBPEN_DIS (0 << 1)
-#define TXVMSETR_HSABPEN_EN (1 << 0)
-#define TXVMSETR_HSABPEN_DIS (0 << 0)
+#define TXVMSETR_SYNSEQ_EVENTS BIT_U32(16) /* 0:Pulses 1:Events */
+#define TXVMSETR_VSTPM BIT_U32(15)
+#define TXVMSETR_PIXWDTH_MASK GENMASK_U32(10, 8)
+#define TXVMSETR_PIXWDTH BIT_U32(8) /* Only allowed value */
+#define TXVMSETR_VSEN BIT_U32(4)
+#define TXVMSETR_HFPBPEN BIT_U32(2)
+#define TXVMSETR_HBPBPEN BIT_U32(1)
+#define TXVMSETR_HSABPEN BIT_U32(0)
#define TXVMCR 0x190
-#define TXVMCR_VFCLR (1 << 12)
-#define TXVMCR_EN_VIDEO (1 << 0)
+#define TXVMCR_VFCLR BIT_U32(12)
+#define TXVMCR_EN_VIDEO BIT_U32(0)
#define TXVMSR 0x1a0
-#define TXVMSR_STR (1 << 16)
-#define TXVMSR_VFRDY (1 << 12)
-#define TXVMSR_ACT (1 << 8)
-#define TXVMSR_RDY (1 << 0)
+#define TXVMSR_STR BIT_U32(16)
+#define TXVMSR_VFRDY BIT_U32(12)
+#define TXVMSR_ACT BIT_U32(8)
+#define TXVMSR_RDY BIT_U32(0)
#define TXVMSCR 0x1a4
-#define TXVMSCR_STR (1 << 16)
+#define TXVMSCR_STR BIT_U32(16)
#define TXVMPSPHSETR 0x1c0
-#define TXVMPSPHSETR_DT_RGB16 (0x0e << 16)
-#define TXVMPSPHSETR_DT_RGB18 (0x1e << 16)
-#define TXVMPSPHSETR_DT_RGB18_LS (0x2e << 16)
-#define TXVMPSPHSETR_DT_RGB24 (0x3e << 16)
-#define TXVMPSPHSETR_DT_YCBCR16 (0x2c << 16)
+#define TXVMPSPHSETR_DT_MASK (0x3f << 16)
+#define TXVMPSPHSETR_DT_RGB16 FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x0e)
+#define TXVMPSPHSETR_DT_RGB18 FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x1e)
+#define TXVMPSPHSETR_DT_RGB18_LS FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x2e)
+#define TXVMPSPHSETR_DT_RGB24 FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x3e)
+#define TXVMPSPHSETR_DT_YCBCR16 FIELD_PREP(TXVMPSPHSETR_DT_MASK, 0x2c)
#define TXVMVPRMSET0R 0x1d0
-#define TXVMVPRMSET0R_HSPOL_HIG (0 << 17)
-#define TXVMVPRMSET0R_HSPOL_LOW (1 << 17)
-#define TXVMVPRMSET0R_VSPOL_HIG (0 << 16)
-#define TXVMVPRMSET0R_VSPOL_LOW (1 << 16)
-#define TXVMVPRMSET0R_CSPC_RGB (0 << 4)
-#define TXVMVPRMSET0R_CSPC_YCbCr (1 << 4)
-#define TXVMVPRMSET0R_BPP_16 (0 << 0)
-#define TXVMVPRMSET0R_BPP_18 (1 << 0)
-#define TXVMVPRMSET0R_BPP_24 (2 << 0)
+#define TXVMVPRMSET0R_HSPOL_LOW BIT_U32(17) /* 0:High 1:Low */
+#define TXVMVPRMSET0R_VSPOL_LOW BIT_U32(16) /* 0:High 1:Low */
+#define TXVMVPRMSET0R_CSPC_YCbCr BIT_U32(4) /* 0:RGB 1:YCbCr */
+#define TXVMVPRMSET0R_BPP_MASK GENMASK_U32(2, 0)
+#define TXVMVPRMSET0R_BPP_16 FIELD_PREP(TXVMVPRMSET0R_BPP_MASK, 0)
+#define TXVMVPRMSET0R_BPP_18 FIELD_PREP(TXVMVPRMSET0R_BPP_MASK, 1)
+#define TXVMVPRMSET0R_BPP_24 FIELD_PREP(TXVMVPRMSET0R_BPP_MASK, 2)
#define TXVMVPRMSET1R 0x1d4
-#define TXVMVPRMSET1R_VACTIVE(x) (((x) & 0x7fff) << 16)
-#define TXVMVPRMSET1R_VSA(x) (((x) & 0xfff) << 0)
+#define TXVMVPRMSET1R_VACTIVE_MASK GENMASK_U32(30, 16)
+#define TXVMVPRMSET1R_VACTIVE(n) FIELD_PREP(TXVMVPRMSET1R_VACTIVE_MASK, (n))
+#define TXVMVPRMSET1R_VSA_MASK GENMASK_U32(11, 0)
+#define TXVMVPRMSET1R_VSA(n) FIELD_PREP(TXVMVPRMSET1R_VSA_MASK, (n))
#define TXVMVPRMSET2R 0x1d8
-#define TXVMVPRMSET2R_VFP(x) (((x) & 0x1fff) << 16)
-#define TXVMVPRMSET2R_VBP(x) (((x) & 0x1fff) << 0)
+#define TXVMVPRMSET2R_VFP_MASK GENMASK_U32(28, 16)
+#define TXVMVPRMSET2R_VFP(n) FIELD_PREP(TXVMVPRMSET2R_VFP_MASK, (n))
+#define TXVMVPRMSET2R_VBP_MASK GENMASK_U32(12, 0)
+#define TXVMVPRMSET2R_VBP(n) FIELD_PREP(TXVMVPRMSET2R_VBP_MASK, (n))
#define TXVMVPRMSET3R 0x1dc
-#define TXVMVPRMSET3R_HACTIVE(x) (((x) & 0x7fff) << 16)
-#define TXVMVPRMSET3R_HSA(x) (((x) & 0xfff) << 0)
+#define TXVMVPRMSET3R_HACTIVE_MASK GENMASK_U32(30, 16)
+#define TXVMVPRMSET3R_HACTIVE(n) FIELD_PREP(TXVMVPRMSET3R_HACTIVE_MASK, (n))
+#define TXVMVPRMSET3R_HSA_MASK GENMASK_U32(11, 0)
+#define TXVMVPRMSET3R_HSA(n) FIELD_PREP(TXVMVPRMSET3R_HSA_MASK, (n))
#define TXVMVPRMSET4R 0x1e0
-#define TXVMVPRMSET4R_HFP(x) (((x) & 0x1fff) << 16)
-#define TXVMVPRMSET4R_HBP(x) (((x) & 0x1fff) << 0)
+#define TXVMVPRMSET4R_HFP_MASK GENMASK_U32(28, 16)
+#define TXVMVPRMSET4R_HFP(n) FIELD_PREP(TXVMVPRMSET4R_HFP_MASK, (n))
+#define TXVMVPRMSET4R_HBP_MASK GENMASK_U32(12, 0)
+#define TXVMVPRMSET4R_HBP(n) FIELD_PREP(TXVMVPRMSET4R_HBP_MASK, (n))
/*
* PHY-Protocol Interface (PPI) Registers
*/
#define PPISETR 0x700
-#define PPISETR_DLEN_0 (0x1 << 0)
-#define PPISETR_DLEN_1 (0x3 << 0)
-#define PPISETR_DLEN_2 (0x7 << 0)
-#define PPISETR_DLEN_3 (0xf << 0)
-#define PPISETR_CLEN (1 << 8)
+#define PPISETR_DLEN_MASK GENMASK_U32(3, 0)
+#define PPISETR_CLEN BIT_U32(8)
#define PPICLCR 0x710
-#define PPICLCR_TXREQHS (1 << 8)
-#define PPICLCR_TXULPSEXT (1 << 1)
-#define PPICLCR_TXULPSCLK (1 << 0)
+#define PPICLCR_TXREQHS BIT_U32(8)
+#define PPICLCR_TXULPSEXT BIT_U32(1)
+#define PPICLCR_TXULPSCLK BIT_U32(0)
#define PPICLSR 0x720
-#define PPICLSR_HSTOLP (1 << 27)
-#define PPICLSR_TOHS (1 << 26)
-#define PPICLSR_STPST (1 << 0)
+#define PPICLSR_HSTOLP BIT_U32(27)
+#define PPICLSR_TOHS BIT_U32(26)
+#define PPICLSR_STPST BIT_U32(0)
#define PPICLSCR 0x724
-#define PPICLSCR_HSTOLP (1 << 27)
-#define PPICLSCR_TOHS (1 << 26)
+#define PPICLSCR_HSTOLP BIT_U32(27)
+#define PPICLSCR_TOHS BIT_U32(26)
+
+#define PPIDL0SR 0x740
+#define PPIDL0SR_DIR BIT_U32(10)
+#define PPIDL0SR_STPST BIT_U32(6)
#define PPIDLSR 0x760
-#define PPIDLSR_STPST (0xf << 0)
+#define PPIDLSR_STPST GENMASK_U32(3, 0)
/*
* Clocks registers
*/
#define LPCLKSET 0x1000
-#define LPCLKSET_CKEN (1 << 8)
-#define LPCLKSET_LPCLKDIV(x) (((x) & 0x3f) << 0)
+#define LPCLKSET_CKEN BIT_U32(8)
+#define LPCLKSET_LPCLKDIV_MASK GENMASK_U32(5, 0)
#define CFGCLKSET 0x1004
-#define CFGCLKSET_CKEN (1 << 8)
-#define CFGCLKSET_CFGCLKDIV(x) (((x) & 0x3f) << 0)
+#define CFGCLKSET_CKEN BIT_U32(8)
+#define CFGCLKSET_CFGCLKDIV_MASK GENMASK_U32(5, 0)
#define DOTCLKDIV 0x1008
-#define DOTCLKDIV_CKEN (1 << 8)
-#define DOTCLKDIV_DOTCLKDIV(x) (((x) & 0x3f) << 0)
+#define DOTCLKDIV_CKEN BIT_U32(8)
+#define DOTCLKDIV_DOTCLKDIV_MASK GENMASK_U32(5, 0)
#define VCLKSET 0x100c
-#define VCLKSET_CKEN (1 << 16)
-#define VCLKSET_COLOR_RGB (0 << 8)
-#define VCLKSET_COLOR_YCC (1 << 8)
-#define VCLKSET_DIV_V3U(x) (((x) & 0x3) << 4)
-#define VCLKSET_DIV_V4H(x) (((x) & 0x7) << 4)
-#define VCLKSET_BPP_16 (0 << 2)
-#define VCLKSET_BPP_18 (1 << 2)
-#define VCLKSET_BPP_18L (2 << 2)
-#define VCLKSET_BPP_24 (3 << 2)
-#define VCLKSET_LANE(x) (((x) & 0x3) << 0)
+#define VCLKSET_CKEN BIT_U32(16)
+#define VCLKSET_COLOR_YCC BIT_U32(8) /* 0:RGB 1:YCbCr */
+#define VCLKSET_DIV_V3U_MASK GENMASK_U32(5, 4)
+#define VCLKSET_DIV_V3U(n) FIELD_PREP(VCLKSET_DIV_V3U_MASK, (n))
+#define VCLKSET_DIV_V4H_MASK GENMASK_U32(6, 4)
+#define VCLKSET_DIV_V4H(n) FIELD_PREP(VCLKSET_DIV_V4H_MASK, (n))
+#define VCLKSET_BPP_MASK GENMASK_U32(3, 2)
+#define VCLKSET_BPP_16 FIELD_PREP(VCLKSET_BPP_MASK, 0)
+#define VCLKSET_BPP_18 FIELD_PREP(VCLKSET_BPP_MASK, 1)
+#define VCLKSET_BPP_18L FIELD_PREP(VCLKSET_BPP_MASK, 2)
+#define VCLKSET_BPP_24 FIELD_PREP(VCLKSET_BPP_MASK, 3)
+#define VCLKSET_LANE_MASK GENMASK_U32(1, 0)
+#define VCLKSET_LANE(n) FIELD_PREP(VCLKSET_LANE_MASK, (n))
#define VCLKEN 0x1010
-#define VCLKEN_CKEN (1 << 0)
+#define VCLKEN_CKEN BIT_U32(0)
#define PHYSETUP 0x1014
-#define PHYSETUP_HSFREQRANGE(x) (((x) & 0x7f) << 16)
-#define PHYSETUP_HSFREQRANGE_MASK (0x7f << 16)
-#define PHYSETUP_CFGCLKFREQRANGE(x) (((x) & 0x3f) << 8)
-#define PHYSETUP_SHUTDOWNZ (1 << 1)
-#define PHYSETUP_RSTZ (1 << 0)
+#define PHYSETUP_HSFREQRANGE_MASK GENMASK_U32(22, 16)
+#define PHYSETUP_HSFREQRANGE(n) FIELD_PREP(PHYSETUP_HSFREQRANGE_MASK, (n))
+#define PHYSETUP_CFGCLKFREQRANGE_MASK GENMASK_U32(13, 8)
+#define PHYSETUP_SHUTDOWNZ BIT_U32(1)
+#define PHYSETUP_RSTZ BIT_U32(0)
#define CLOCKSET1 0x101c
-#define CLOCKSET1_LOCK_PHY (1 << 17)
-#define CLOCKSET1_CLKSEL (1 << 8)
-#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2)
-#define CLOCKSET1_CLKINSEL_DIG (1 << 2)
-#define CLOCKSET1_CLKINSEL_DU (1 << 3)
-#define CLOCKSET1_SHADOW_CLEAR (1 << 1)
-#define CLOCKSET1_UPDATEPLL (1 << 0)
+#define CLOCKSET1_LOCK_PHY BIT_U32(17)
+#define CLOCKSET1_CLKSEL BIT_U32(8)
+#define CLOCKSET1_CLKINSEL_MASK GENMASK_U32(3, 2)
+#define CLOCKSET1_CLKINSEL_EXTAL FIELD_PREP(CLOCKSET1_CLKINSEL_MASK, 0)
+#define CLOCKSET1_CLKINSEL_DIG FIELD_PREP(CLOCKSET1_CLKINSEL_MASK, 1)
+#define CLOCKSET1_CLKINSEL_DU FIELD_PREP(CLOCKSET1_CLKINSEL_MASK, 2)
+#define CLOCKSET1_SHADOW_CLEAR BIT_U32(1)
+#define CLOCKSET1_UPDATEPLL BIT_U32(0)
#define CLOCKSET2 0x1020
-#define CLOCKSET2_M(x) (((x) & 0xfff) << 16)
-#define CLOCKSET2_VCO_CNTRL(x) (((x) & 0x3f) << 8)
-#define CLOCKSET2_N(x) (((x) & 0xf) << 0)
+#define CLOCKSET2_M_MASK GENMASK_U32(27, 16)
+#define CLOCKSET2_M(n) FIELD_PREP(CLOCKSET2_M_MASK, (n))
+#define CLOCKSET2_VCO_CNTRL_MASK GENMASK_U32(13, 8)
+#define CLOCKSET2_VCO_CNTRL(n) FIELD_PREP(CLOCKSET2_VCO_CNTRL_MASK, (n))
+#define CLOCKSET2_N_MASK GENMASK_U32(3, 0)
+#define CLOCKSET2_N(n) FIELD_PREP(CLOCKSET2_N_MASK, (n))
#define CLOCKSET3 0x1024
-#define CLOCKSET3_PROP_CNTRL(x) (((x) & 0x3f) << 24)
-#define CLOCKSET3_INT_CNTRL(x) (((x) & 0x3f) << 16)
-#define CLOCKSET3_CPBIAS_CNTRL(x) (((x) & 0x7f) << 8)
-#define CLOCKSET3_GMP_CNTRL(x) (((x) & 0x3) << 0)
+#define CLOCKSET3_PROP_CNTRL_MASK GENMASK_U32(29, 24)
+#define CLOCKSET3_PROP_CNTRL(n) FIELD_PREP(CLOCKSET3_PROP_CNTRL_MASK, (n))
+#define CLOCKSET3_INT_CNTRL_MASK GENMASK_U32(21, 16)
+#define CLOCKSET3_INT_CNTRL(n) FIELD_PREP(CLOCKSET3_INT_CNTRL_MASK, (n))
+#define CLOCKSET3_CPBIAS_CNTRL_MASK GENMASK_U32(14, 8)
+#define CLOCKSET3_CPBIAS_CNTRL(n) FIELD_PREP(CLOCKSET3_CPBIAS_CNTRL_MASK, (n))
+#define CLOCKSET3_GMP_CNTRL_MASK GENMASK_U32(1, 0)
+#define CLOCKSET3_GMP_CNTRL(n) FIELD_PREP(CLOCKSET3_GMP_CNTRL_MASK, (n))
#define PHTW 0x1034
-#define PHTW_DWEN (1 << 24)
-#define PHTW_TESTDIN_DATA(x) (((x) & 0xff) << 16)
-#define PHTW_CWEN (1 << 8)
-#define PHTW_TESTDIN_CODE(x) (((x) & 0xff) << 0)
+#define PHTW_DWEN BIT_U32(24)
+#define PHTW_TESTDIN_DATA_MASK GENMASK_U32(23, 16)
+#define PHTW_CWEN BIT_U32(8)
+#define PHTW_TESTDIN_CODE_MASK GENMASK_U32(7, 0)
#define PHTR 0x1038
-#define PHTR_TEST (1 << 16)
+#define PHTR_TESTDOUT GENMASK_U32(23, 16)
+#define PHTR_TESTDOUT_TEST BIT_U32(16)
#define PHTC 0x103c
-#define PHTC_TESTCLR (1 << 0)
+#define PHTC_TESTCLR BIT_U32(0)
#endif /* __RCAR_MIPI_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index e57536fd6f4d..7f2ef7137ae5 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_RZG2L_DU
tristate "DRM Support for RZ/G2L Display Unit"
- depends on ARCH_RZG2L || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
depends on DRM && OF
depends on VIDEO_RENESAS_VSP1
select DRM_CLIENT_SELECTION
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index e1aa6a719529..0fef33a5a089 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "rzg2l_du_drv.h"
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index f87337c3cbb5..3b52dfc0ea1e 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -913,7 +913,7 @@ static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = {
* Power Management
*/
-static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev)
+static int rzg2l_mipi_pm_runtime_suspend(struct device *dev)
{
struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev);
@@ -923,7 +923,7 @@ static int __maybe_unused rzg2l_mipi_pm_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev)
+static int rzg2l_mipi_pm_runtime_resume(struct device *dev)
{
struct rzg2l_mipi_dsi *dsi = dev_get_drvdata(dev);
int ret;
@@ -940,7 +940,7 @@ static int __maybe_unused rzg2l_mipi_pm_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops rzg2l_mipi_pm_ops = {
- SET_RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL)
+ RUNTIME_PM_OPS(rzg2l_mipi_pm_runtime_suspend, rzg2l_mipi_pm_runtime_resume, NULL)
};
/* -----------------------------------------------------------------------------
@@ -1072,7 +1072,7 @@ static struct platform_driver rzg2l_mipi_dsi_platform_driver = {
.remove = rzg2l_mipi_dsi_remove,
.driver = {
.name = "rzg2l-mipi-dsi",
- .pm = &rzg2l_mipi_pm_ops,
+ .pm = pm_ptr(&rzg2l_mipi_pm_ops),
.of_match_table = rzg2l_mipi_dsi_of_table,
},
};
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index faf50d872be3..b7b025814e72 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -10,6 +10,7 @@ config DRM_ROCKCHIP
select VIDEOMODE_HELPERS
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP
+ select DRM_DW_DP if ROCKCHIP_DW_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
@@ -61,6 +62,14 @@ config ROCKCHIP_CDN_DP
RK3399 based SoC, you should select this
option.
+config ROCKCHIP_DW_DP
+ bool "Rockchip specific extensions for Synopsys DW DP"
+ help
+ This selects support for Rockchip SoC specific extensions
+ to enable Synopsys DesignWare Cores based DisplayPort transmit
+ controller support on Rockchip SoC, If you want to enable DP on
+ rk3588 based SoC, you should select this option.
+
config ROCKCHIP_DW_HDMI
bool "Rockchip specific extensions for Synopsys DW HDMI"
help
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index 2b867cebbc12..097f062399c7 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -14,6 +14,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI2) += dw-mipi-dsi2-rockchip.o
+rockchipdrm-$(CONFIG_ROCKCHIP_DW_DP) += dw_dp-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index d30f0983a53a..fdab71d51e2a 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -28,6 +28,7 @@
#include <drm/bridge/analogix_dp.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -330,38 +331,29 @@ static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
struct device_node *np = dev->of_node;
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
- if (IS_ERR(dp->grf)) {
- DRM_DEV_ERROR(dev, "failed to get rockchip,grf property\n");
- return PTR_ERR(dp->grf);
- }
+ if (IS_ERR(dp->grf))
+ return dev_err_probe(dev, PTR_ERR(dp->grf),
+ "failed to get rockchip,grf property\n");
- dp->grfclk = devm_clk_get(dev, "grf");
- if (PTR_ERR(dp->grfclk) == -ENOENT) {
- dp->grfclk = NULL;
- } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
- return -EPROBE_DEFER;
- } else if (IS_ERR(dp->grfclk)) {
- DRM_DEV_ERROR(dev, "failed to get grf clock\n");
- return PTR_ERR(dp->grfclk);
- }
+ dp->grfclk = devm_clk_get_optional(dev, "grf");
+ if (IS_ERR(dp->grfclk))
+ return dev_err_probe(dev, PTR_ERR(dp->grfclk),
+ "failed to get grf clock\n");
dp->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(dp->pclk)) {
- DRM_DEV_ERROR(dev, "failed to get pclk property\n");
- return PTR_ERR(dp->pclk);
- }
+ if (IS_ERR(dp->pclk))
+ return dev_err_probe(dev, PTR_ERR(dp->pclk),
+ "failed to get pclk property\n");
dp->rst = devm_reset_control_get(dev, "dp");
- if (IS_ERR(dp->rst)) {
- DRM_DEV_ERROR(dev, "failed to get dp reset control\n");
- return PTR_ERR(dp->rst);
- }
+ if (IS_ERR(dp->rst))
+ return dev_err_probe(dev, PTR_ERR(dp->rst),
+ "failed to get dp reset control\n");
dp->apbrst = devm_reset_control_get_optional(dev, "apb");
- if (IS_ERR(dp->apbrst)) {
- DRM_DEV_ERROR(dev, "failed to get apb reset control\n");
- return PTR_ERR(dp->apbrst);
- }
+ if (IS_ERR(dp->apbrst))
+ return dev_err_probe(dev, PTR_ERR(dp->apbrst),
+ "failed to get apb reset control\n");
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index b7e3f5dcf8d5..177e30445ee8 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -21,6 +21,7 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 924fb1d3ece2..0dc3804051a9 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -11,6 +11,8 @@
#include <linux/iopoll.h>
#include <linux/reset.h>
+#include <drm/drm_print.h>
+
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 3398160ad75e..2dad6b7b61b2 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/mfd/syscon.h>
@@ -23,6 +24,7 @@
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
@@ -148,7 +150,7 @@
#define DW_MIPI_NEEDS_GRF_CLK BIT(1)
#define PX30_GRF_PD_VO_CON1 0x0438
-#define PX30_DSI_FORCETXSTOPMODE (0xf << 7)
+#define PX30_DSI_FORCETXSTOPMODE (0xfUL << 7)
#define PX30_DSI_FORCERXMODE BIT(6)
#define PX30_DSI_TURNDISABLE BIT(5)
#define PX30_DSI_LCDC_SEL BIT(0)
@@ -162,21 +164,26 @@
#define RK3288_DSI0_LCDC_SEL BIT(6)
#define RK3288_DSI1_LCDC_SEL BIT(9)
+#define RK3368_GRF_SOC_CON7 0x41c
+#define RK3368_DSI_FORCETXSTOPMODE (0xf << 7)
+#define RK3368_DSI_FORCERXMODE BIT(6)
+#define RK3368_DSI_TURNDISABLE BIT(5)
+
#define RK3399_GRF_SOC_CON20 0x6250
#define RK3399_DSI0_LCDC_SEL BIT(0)
#define RK3399_DSI1_LCDC_SEL BIT(4)
#define RK3399_GRF_SOC_CON22 0x6258
-#define RK3399_DSI0_TURNREQUEST (0xf << 12)
-#define RK3399_DSI0_TURNDISABLE (0xf << 8)
-#define RK3399_DSI0_FORCETXSTOPMODE (0xf << 4)
-#define RK3399_DSI0_FORCERXMODE (0xf << 0)
+#define RK3399_DSI0_TURNREQUEST (0xfUL << 12)
+#define RK3399_DSI0_TURNDISABLE (0xfUL << 8)
+#define RK3399_DSI0_FORCETXSTOPMODE (0xfUL << 4)
+#define RK3399_DSI0_FORCERXMODE (0xfUL << 0)
#define RK3399_GRF_SOC_CON23 0x625c
-#define RK3399_DSI1_TURNDISABLE (0xf << 12)
-#define RK3399_DSI1_FORCETXSTOPMODE (0xf << 8)
-#define RK3399_DSI1_FORCERXMODE (0xf << 4)
-#define RK3399_DSI1_ENABLE (0xf << 0)
+#define RK3399_DSI1_TURNDISABLE (0xfUL << 12)
+#define RK3399_DSI1_FORCETXSTOPMODE (0xfUL << 8)
+#define RK3399_DSI1_FORCERXMODE (0xfUL << 4)
+#define RK3399_DSI1_ENABLE (0xfUL << 0)
#define RK3399_GRF_SOC_CON24 0x6260
#define RK3399_TXRX_MASTERSLAVEZ BIT(7)
@@ -186,8 +193,8 @@
#define RK3399_TXRX_TURNREQUEST GENMASK(3, 0)
#define RK3568_GRF_VO_CON2 0x0368
-#define RK3568_DSI0_SKEWCALHS (0x1f << 11)
-#define RK3568_DSI0_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI0_SKEWCALHS (0x1fUL << 11)
+#define RK3568_DSI0_FORCETXSTOPMODE (0xfUL << 4)
#define RK3568_DSI0_TURNDISABLE BIT(2)
#define RK3568_DSI0_FORCERXMODE BIT(0)
@@ -197,18 +204,16 @@
* come from. Name GRF_VO_CON3 is assumed.
*/
#define RK3568_GRF_VO_CON3 0x36c
-#define RK3568_DSI1_SKEWCALHS (0x1f << 11)
-#define RK3568_DSI1_FORCETXSTOPMODE (0xf << 4)
+#define RK3568_DSI1_SKEWCALHS (0x1fUL << 11)
+#define RK3568_DSI1_FORCETXSTOPMODE (0xfUL << 4)
#define RK3568_DSI1_TURNDISABLE BIT(2)
#define RK3568_DSI1_FORCERXMODE BIT(0)
#define RV1126_GRF_DSIPHY_CON 0x10220
-#define RV1126_DSI_FORCETXSTOPMODE (0xf << 4)
+#define RV1126_DSI_FORCETXSTOPMODE (0xfUL << 4)
#define RV1126_DSI_TURNDISABLE BIT(2)
#define RV1126_DSI_FORCERXMODE BIT(0)
-#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
-
enum {
DW_DSI_USAGE_IDLE,
DW_DSI_USAGE_DSI,
@@ -1484,14 +1489,13 @@ static const struct rockchip_dw_dsi_chip_data px30_chip_data[] = {
{
.reg = 0xff450000,
.lcdsel_grf_reg = PX30_GRF_PD_VO_CON1,
- .lcdsel_big = HIWORD_UPDATE(0, PX30_DSI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(PX30_DSI_LCDC_SEL,
- PX30_DSI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(PX30_DSI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(PX30_DSI_LCDC_SEL, 1),
.lanecfg1_grf_reg = PX30_GRF_PD_VO_CON1,
- .lanecfg1 = HIWORD_UPDATE(0, PX30_DSI_TURNDISABLE |
- PX30_DSI_FORCERXMODE |
- PX30_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((PX30_DSI_TURNDISABLE |
+ PX30_DSI_FORCERXMODE |
+ PX30_DSI_FORCETXSTOPMODE), 0),
.max_data_lanes = 4,
},
@@ -1502,9 +1506,9 @@ static const struct rockchip_dw_dsi_chip_data rk3128_chip_data[] = {
{
.reg = 0x10110000,
.lanecfg1_grf_reg = RK3128_GRF_LVDS_CON0,
- .lanecfg1 = HIWORD_UPDATE(0, RK3128_DSI_TURNDISABLE |
- RK3128_DSI_FORCERXMODE |
- RK3128_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3128_DSI_TURNDISABLE |
+ RK3128_DSI_FORCERXMODE |
+ RK3128_DSI_FORCETXSTOPMODE), 0),
.max_data_lanes = 4,
},
{ /* sentinel */ }
@@ -1514,22 +1518,34 @@ static const struct rockchip_dw_dsi_chip_data rk3288_chip_data[] = {
{
.reg = 0xff960000,
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI0_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI0_LCDC_SEL, RK3288_DSI0_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_DSI0_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_DSI0_LCDC_SEL, 1),
.max_data_lanes = 4,
},
{
.reg = 0xff964000,
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_DSI1_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_DSI1_LCDC_SEL, RK3288_DSI1_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_DSI1_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_DSI1_LCDC_SEL, 1),
.max_data_lanes = 4,
},
{ /* sentinel */ }
};
+static const struct rockchip_dw_dsi_chip_data rk3368_chip_data[] = {
+ {
+ .reg = 0xff960000,
+ .lanecfg1_grf_reg = RK3368_GRF_SOC_CON7,
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3368_DSI_TURNDISABLE |
+ RK3368_DSI_FORCETXSTOPMODE |
+ RK3368_DSI_FORCERXMODE), 0),
+ .max_data_lanes = 4,
+ },
+ { /* sentinel */ }
+};
+
static int rk3399_dphy_tx1rx1_init(struct phy *phy)
{
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
@@ -1539,13 +1555,13 @@ static int rk3399_dphy_tx1rx1_init(struct phy *phy)
* Assume ISP0 is supplied by the RX0 dphy.
*/
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_SRC_SEL_ISP0));
+ FIELD_PREP_WM16(RK3399_TXRX_SRC_SEL_ISP0, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ));
+ FIELD_PREP_WM16(RK3399_TXRX_MASTERSLAVEZ, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_BASEDIR));
+ FIELD_PREP_WM16(RK3399_TXRX_BASEDIR, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE, 0));
return 0;
}
@@ -1559,21 +1575,20 @@ static int rk3399_dphy_tx1rx1_power_on(struct phy *phy)
usleep_range(100, 150);
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_MASTERSLAVEZ));
+ FIELD_PREP_WM16(RK3399_TXRX_MASTERSLAVEZ, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(RK3399_TXRX_BASEDIR, RK3399_TXRX_BASEDIR));
+ FIELD_PREP_WM16(RK3399_TXRX_BASEDIR, 1));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_FORCERXMODE));
+ FIELD_PREP_WM16(RK3399_DSI1_FORCERXMODE, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_FORCETXSTOPMODE));
+ FIELD_PREP_WM16(RK3399_DSI1_FORCETXSTOPMODE, 0));
/* Disable lane turn around, which is ignored in receive mode */
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON24,
- HIWORD_UPDATE(0, RK3399_TXRX_TURNREQUEST));
+ FIELD_PREP_WM16(RK3399_TXRX_TURNREQUEST, 0));
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(RK3399_DSI1_TURNDISABLE,
- RK3399_DSI1_TURNDISABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_TURNDISABLE, 0xf));
usleep_range(100, 150);
dsi_write(dsi, DSI_PHY_TST_CTRL0, PHY_TESTCLK | PHY_UNTESTCLR);
@@ -1581,8 +1596,8 @@ static int rk3399_dphy_tx1rx1_power_on(struct phy *phy)
/* Enable dphy lanes */
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(GENMASK(dsi->dphy_config.lanes - 1, 0),
- RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE,
+ GENMASK(dsi->dphy_config.lanes - 1, 0)));
usleep_range(100, 150);
@@ -1594,7 +1609,7 @@ static int rk3399_dphy_tx1rx1_power_off(struct phy *phy)
struct dw_mipi_dsi_rockchip *dsi = phy_get_drvdata(phy);
regmap_write(dsi->grf_regmap, RK3399_GRF_SOC_CON23,
- HIWORD_UPDATE(0, RK3399_DSI1_ENABLE));
+ FIELD_PREP_WM16(RK3399_DSI1_ENABLE, 0));
return 0;
}
@@ -1603,15 +1618,14 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{
.reg = 0xff960000,
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI0_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI0_LCDC_SEL,
- RK3399_DSI0_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_DSI0_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_DSI0_LCDC_SEL, 1),
.lanecfg1_grf_reg = RK3399_GRF_SOC_CON22,
- .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI0_TURNREQUEST |
- RK3399_DSI0_TURNDISABLE |
- RK3399_DSI0_FORCETXSTOPMODE |
- RK3399_DSI0_FORCERXMODE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3399_DSI0_TURNREQUEST |
+ RK3399_DSI0_TURNDISABLE |
+ RK3399_DSI0_FORCETXSTOPMODE |
+ RK3399_DSI0_FORCERXMODE), 0),
.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
.max_data_lanes = 4,
@@ -1619,25 +1633,23 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = {
{
.reg = 0xff968000,
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_DSI1_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_DSI1_LCDC_SEL,
- RK3399_DSI1_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_DSI1_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_DSI1_LCDC_SEL, 1),
+
.lanecfg1_grf_reg = RK3399_GRF_SOC_CON23,
- .lanecfg1 = HIWORD_UPDATE(0, RK3399_DSI1_TURNDISABLE |
- RK3399_DSI1_FORCETXSTOPMODE |
- RK3399_DSI1_FORCERXMODE |
- RK3399_DSI1_ENABLE),
+ .lanecfg1 = FIELD_PREP_WM16_CONST((RK3399_DSI1_TURNDISABLE |
+ RK3399_DSI1_FORCETXSTOPMODE |
+ RK3399_DSI1_FORCERXMODE |
+ RK3399_DSI1_ENABLE), 0),
.lanecfg2_grf_reg = RK3399_GRF_SOC_CON24,
- .lanecfg2 = HIWORD_UPDATE(RK3399_TXRX_MASTERSLAVEZ |
- RK3399_TXRX_ENABLECLK,
- RK3399_TXRX_MASTERSLAVEZ |
- RK3399_TXRX_ENABLECLK |
- RK3399_TXRX_BASEDIR),
+ .lanecfg2 = (FIELD_PREP_WM16_CONST(RK3399_TXRX_MASTERSLAVEZ, 1) |
+ FIELD_PREP_WM16_CONST(RK3399_TXRX_ENABLECLK, 1) |
+ FIELD_PREP_WM16_CONST(RK3399_TXRX_BASEDIR, 0)),
.enable_grf_reg = RK3399_GRF_SOC_CON23,
- .enable = HIWORD_UPDATE(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
+ .enable = FIELD_PREP_WM16_CONST(RK3399_DSI1_ENABLE, RK3399_DSI1_ENABLE),
.flags = DW_MIPI_NEEDS_PHY_CFG_CLK | DW_MIPI_NEEDS_GRF_CLK,
.max_data_lanes = 4,
@@ -1653,19 +1665,19 @@ static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = {
{
.reg = 0xfe060000,
.lanecfg1_grf_reg = RK3568_GRF_VO_CON2,
- .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS |
- RK3568_DSI0_FORCETXSTOPMODE |
- RK3568_DSI0_TURNDISABLE |
- RK3568_DSI0_FORCERXMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RK3568_DSI0_SKEWCALHS, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_FORCETXSTOPMODE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI0_FORCERXMODE, 0)),
.max_data_lanes = 4,
},
{
.reg = 0xfe070000,
.lanecfg1_grf_reg = RK3568_GRF_VO_CON3,
- .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS |
- RK3568_DSI1_FORCETXSTOPMODE |
- RK3568_DSI1_TURNDISABLE |
- RK3568_DSI1_FORCERXMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RK3568_DSI1_SKEWCALHS, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_FORCETXSTOPMODE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RK3568_DSI1_FORCERXMODE, 0)),
.max_data_lanes = 4,
},
{ /* sentinel */ }
@@ -1675,9 +1687,9 @@ static const struct rockchip_dw_dsi_chip_data rv1126_chip_data[] = {
{
.reg = 0xffb30000,
.lanecfg1_grf_reg = RV1126_GRF_DSIPHY_CON,
- .lanecfg1 = HIWORD_UPDATE(0, RV1126_DSI_TURNDISABLE |
- RV1126_DSI_FORCERXMODE |
- RV1126_DSI_FORCETXSTOPMODE),
+ .lanecfg1 = (FIELD_PREP_WM16_CONST(RV1126_DSI_TURNDISABLE, 0) |
+ FIELD_PREP_WM16_CONST(RV1126_DSI_FORCERXMODE, 0) |
+ FIELD_PREP_WM16_CONST(RV1126_DSI_FORCETXSTOPMODE, 0)),
.max_data_lanes = 4,
},
{ /* sentinel */ }
@@ -1694,6 +1706,9 @@ static const struct of_device_id dw_mipi_dsi_rockchip_dt_ids[] = {
.compatible = "rockchip,rk3288-mipi-dsi",
.data = &rk3288_chip_data,
}, {
+ .compatible = "rockchip,rk3368-mipi-dsi",
+ .data = &rk3368_chip_data,
+ }, {
.compatible = "rockchip,rk3399-mipi-dsi",
.data = &rk3399_chip_data,
}, {
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
index cdd490778756..0aea764e29b2 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi2-rockchip.c
@@ -437,6 +437,15 @@ static void dw_mipi_dsi2_rockchip_remove(struct platform_device *pdev)
dw_mipi_dsi2_remove(dsi2->dmd);
}
+static const struct dsigrf_reg rk3576_dsi_grf_reg_fields[MAX_FIELDS] = {
+ [TXREQCLKHS_EN] = { 0x0028, 1, 1 },
+ [GATING_EN] = { 0x0028, 0, 0 },
+ [IPI_SHUTDN] = { 0x0028, 3, 3 },
+ [IPI_COLORM] = { 0x0028, 2, 2 },
+ [IPI_COLOR_DEPTH] = { 0x0028, 8, 11 },
+ [IPI_FORMAT] = { 0x0028, 4, 7 },
+};
+
static const struct dsigrf_reg rk3588_dsi0_grf_reg_fields[MAX_FIELDS] = {
[TXREQCLKHS_EN] = { 0x0000, 11, 11 },
[GATING_EN] = { 0x0000, 10, 10 },
@@ -455,6 +464,15 @@ static const struct dsigrf_reg rk3588_dsi1_grf_reg_fields[MAX_FIELDS] = {
[IPI_FORMAT] = { 0x0004, 0, 3 },
};
+static const struct rockchip_dw_dsi2_chip_data rk3576_chip_data[] = {
+ {
+ .reg = 0x27d80000,
+ .grf_regs = rk3576_dsi_grf_reg_fields,
+ .max_bit_rate_per_lane = 2500000ULL,
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
{
.reg = 0xfde20000,
@@ -470,6 +488,9 @@ static const struct rockchip_dw_dsi2_chip_data rk3588_chip_data[] = {
static const struct of_device_id dw_mipi_dsi2_rockchip_dt_ids[] = {
{
+ .compatible = "rockchip,rk3576-mipi-dsi2",
+ .data = &rk3576_chip_data,
+ }, {
.compatible = "rockchip,rk3588-mipi-dsi2",
.data = &rk3588_chip_data,
},
diff --git a/drivers/gpu/drm/rockchip/dw_dp-rockchip.c b/drivers/gpu/drm/rockchip/dw_dp-rockchip.c
new file mode 100644
index 000000000000..25ab4e46301e
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/dw_dp-rockchip.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 Rockchip Electronics Co., Ltd.
+ *
+ * Author: Zhang Yubing <yubing.zhang@rock-chips.com>
+ * Author: Andy Yan <andy.yan@rock-chips.com>
+ */
+
+#include <linux/component.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <drm/bridge/dw_dp.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include <linux/media-bus-format.h>
+#include <linux/videodev2.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+struct rockchip_dw_dp {
+ struct dw_dp *base;
+ struct device *dev;
+ struct rockchip_encoder encoder;
+};
+
+static int dw_dp_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct drm_atomic_state *state = conn_state->state;
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_bridge *bridge = drm_bridge_chain_get_first_bridge(encoder);
+ struct drm_bridge_state *bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+ u32 bus_format = bridge_state->input_bus_cfg.format;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ s->output_mode = ROCKCHIP_OUT_MODE_YUV420;
+ break;
+ case MEDIA_BUS_FMT_YUYV10_1X20:
+ case MEDIA_BUS_FMT_YUYV8_1X16:
+ s->output_mode = ROCKCHIP_OUT_MODE_S888_DUMMY;
+ break;
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ default:
+ s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ break;
+ }
+
+ s->output_type = DRM_MODE_CONNECTOR_DisplayPort;
+ s->bus_format = bus_format;
+ s->bus_flags = di->bus_flags;
+ s->color_space = V4L2_COLORSPACE_DEFAULT;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs dw_dp_encoder_helper_funcs = {
+ .atomic_check = dw_dp_encoder_atomic_check,
+};
+
+static int dw_dp_rockchip_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dw_dp_plat_data plat_data;
+ struct drm_device *drm_dev = data;
+ struct rockchip_dw_dp *dp;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ int ret;
+
+ dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
+ if (!dp)
+ return -ENOMEM;
+
+ dp->dev = dev;
+ platform_set_drvdata(pdev, dp);
+
+ plat_data.max_link_rate = 810000;
+ encoder = &dp->encoder.encoder;
+ encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, dev->of_node);
+ rockchip_drm_encoder_set_crtc_endpoint_id(&dp->encoder, dev->of_node, 0, 0);
+
+ ret = drmm_encoder_init(drm_dev, encoder, NULL, DRM_MODE_ENCODER_TMDS, NULL);
+ if (ret)
+ return ret;
+ drm_encoder_helper_add(encoder, &dw_dp_encoder_helper_funcs);
+
+ dp->base = dw_dp_bind(dev, encoder, &plat_data);
+ if (IS_ERR(dp->base)) {
+ ret = PTR_ERR(dp->base);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ return dev_err_probe(dev, ret, "Failed to init bridge connector");
+ }
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static const struct component_ops dw_dp_rockchip_component_ops = {
+ .bind = dw_dp_rockchip_bind,
+};
+
+static int dw_dp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return component_add(dev, &dw_dp_rockchip_component_ops);
+}
+
+static void dw_dp_remove(struct platform_device *pdev)
+{
+ struct rockchip_dw_dp *dp = platform_get_drvdata(pdev);
+
+ component_del(dp->dev, &dw_dp_rockchip_component_ops);
+}
+
+static const struct of_device_id dw_dp_of_match[] = {
+ { .compatible = "rockchip,rk3588-dp", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dw_dp_of_match);
+
+struct platform_driver dw_dp_driver = {
+ .probe = dw_dp_probe,
+ .remove = dw_dp_remove,
+ .driver = {
+ .name = "dw-dp",
+ .of_match_table = dw_dp_of_match,
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index acb59b25d928..727cdf768161 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -54,8 +55,6 @@
#define RK3568_HDMI_SDAIN_MSK BIT(15)
#define RK3568_HDMI_SCLIN_MSK BIT(14)
-#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
-
/**
* struct rockchip_hdmi_chip_data - splite the grf setting of kind of chips
* @lcdsel_grf_reg: grf register offset of lcdc select
@@ -355,17 +354,14 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
dw_hdmi_phy_setup_hpd(dw_hdmi, data);
- regmap_write(hdmi->regmap,
- RK3228_GRF_SOC_CON6,
- HIWORD_UPDATE(RK3228_HDMI_HPD_VSEL | RK3228_HDMI_SDA_VSEL |
- RK3228_HDMI_SCL_VSEL,
- RK3228_HDMI_HPD_VSEL | RK3228_HDMI_SDA_VSEL |
- RK3228_HDMI_SCL_VSEL));
-
- regmap_write(hdmi->regmap,
- RK3228_GRF_SOC_CON2,
- HIWORD_UPDATE(RK3228_HDMI_SDAIN_MSK | RK3228_HDMI_SCLIN_MSK,
- RK3228_HDMI_SDAIN_MSK | RK3228_HDMI_SCLIN_MSK));
+ regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON6,
+ FIELD_PREP_WM16(RK3228_HDMI_HPD_VSEL, 1) |
+ FIELD_PREP_WM16(RK3228_HDMI_SDA_VSEL, 1) |
+ FIELD_PREP_WM16(RK3228_HDMI_SCL_VSEL, 1));
+
+ regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2,
+ FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1));
}
static enum drm_connector_status
@@ -377,15 +373,13 @@ dw_hdmi_rk3328_read_hpd(struct dw_hdmi *dw_hdmi, void *data)
status = dw_hdmi_phy_read_hpd(dw_hdmi, data);
if (status == connector_status_connected)
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V,
- RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 1));
else
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(0, RK3328_HDMI_SDA_5V |
- RK3328_HDMI_SCL_5V));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 0));
return status;
}
@@ -396,21 +390,21 @@ static void dw_hdmi_rk3328_setup_hpd(struct dw_hdmi *dw_hdmi, void *data)
dw_hdmi_phy_setup_hpd(dw_hdmi, data);
/* Enable and map pins to 3V grf-controlled io-voltage */
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON4,
- HIWORD_UPDATE(0, RK3328_HDMI_HPD_SARADC | RK3328_HDMI_CEC_5V |
- RK3328_HDMI_SDA_5V | RK3328_HDMI_SCL_5V |
- RK3328_HDMI_HPD_5V));
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON3,
- HIWORD_UPDATE(0, RK3328_HDMI_SDA5V_GRF | RK3328_HDMI_SCL5V_GRF |
- RK3328_HDMI_HPD5V_GRF |
- RK3328_HDMI_CEC5V_GRF));
- regmap_write(hdmi->regmap,
- RK3328_GRF_SOC_CON2,
- HIWORD_UPDATE(RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK,
- RK3328_HDMI_SDAIN_MSK | RK3328_HDMI_SCLIN_MSK |
- RK3328_HDMI_HPD_IOE));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON4,
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_SARADC, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_CEC_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SDA_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL_5V, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_5V, 0));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON3,
+ FIELD_PREP_WM16(RK3328_HDMI_SDA5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCL5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD5V_GRF, 0) |
+ FIELD_PREP_WM16(RK3328_HDMI_CEC5V_GRF, 0));
+ regmap_write(hdmi->regmap, RK3328_GRF_SOC_CON2,
+ FIELD_PREP_WM16(RK3328_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3328_HDMI_HPD_IOE, 0));
dw_hdmi_rk3328_read_hpd(dw_hdmi, data);
}
@@ -438,8 +432,8 @@ static const struct dw_hdmi_plat_data rk3228_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3288_chip_data = {
.lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
- .lcdsel_big = HIWORD_UPDATE(0, RK3288_HDMI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3288_HDMI_LCDC_SEL, RK3288_HDMI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3288_HDMI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3288_HDMI_LCDC_SEL, 1),
.max_tmds_clock = 340000,
};
@@ -475,8 +469,8 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = {
static struct rockchip_hdmi_chip_data rk3399_chip_data = {
.lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
- .lcdsel_big = HIWORD_UPDATE(0, RK3399_HDMI_LCDC_SEL),
- .lcdsel_lit = HIWORD_UPDATE(RK3399_HDMI_LCDC_SEL, RK3399_HDMI_LCDC_SEL),
+ .lcdsel_big = FIELD_PREP_WM16_CONST(RK3399_HDMI_LCDC_SEL, 0),
+ .lcdsel_lit = FIELD_PREP_WM16_CONST(RK3399_HDMI_LCDC_SEL, 1),
.max_tmds_clock = 594000,
};
@@ -589,10 +583,8 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
if (hdmi->chip_data == &rk3568_chip_data) {
regmap_write(hdmi->regmap, RK3568_GRF_VO_CON1,
- HIWORD_UPDATE(RK3568_HDMI_SDAIN_MSK |
- RK3568_HDMI_SCLIN_MSK,
- RK3568_HDMI_SDAIN_MSK |
- RK3568_HDMI_SCLIN_MSK));
+ FIELD_PREP_WM16(RK3568_HDMI_SDAIN_MSK, 1) |
+ FIELD_PREP_WM16(RK3568_HDMI_SCLIN_MSK, 1));
}
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
index 7d531b6f4c09..c9fe6aa3e3e3 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi_qp-rockchip.c
@@ -9,10 +9,12 @@
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/phy/phy-hdmi.h>
#include <linux/regmap.h>
#include <linux/workqueue.h>
@@ -37,21 +39,16 @@
#define RK3576_HDMI_HDCP14_MEM_EN BIT(15)
#define RK3576_VO0_GRF_SOC_CON8 0x0020
-#define RK3576_COLOR_FORMAT_MASK (0xf << 4)
-#define RK3576_COLOR_DEPTH_MASK (0xf << 8)
-#define RK3576_RGB (0 << 4)
-#define RK3576_YUV422 (0x1 << 4)
-#define RK3576_YUV444 (0x2 << 4)
-#define RK3576_YUV420 (0x3 << 4)
-#define RK3576_8BPC (0x0 << 8)
-#define RK3576_10BPC (0x6 << 8)
+#define RK3576_COLOR_DEPTH_MASK GENMASK(11, 8)
+#define RK3576_8BPC 0x0
+#define RK3576_10BPC 0x6
+#define RK3576_COLOR_FORMAT_MASK GENMASK(7, 4)
+#define RK3576_RGB 0x9
+#define RK3576_YUV422 0x1
+#define RK3576_YUV444 0x2
+#define RK3576_YUV420 0x3
#define RK3576_CECIN_MASK BIT(3)
-#define RK3576_VO0_GRF_SOC_CON12 0x0030
-#define RK3576_GRF_OSDA_DLYN (0xf << 12)
-#define RK3576_GRF_OSDA_DIV (0x7f << 1)
-#define RK3576_GRF_OSDA_DLY_EN BIT(0)
-
#define RK3576_VO0_GRF_SOC_CON14 0x0038
#define RK3576_I2S_SEL_MASK BIT(0)
#define RK3576_SPDIF_SEL_MASK BIT(1)
@@ -66,12 +63,19 @@
#define RK3588_HDMI1_HPD_INT_MSK BIT(15)
#define RK3588_HDMI1_HPD_INT_CLR BIT(14)
#define RK3588_GRF_SOC_CON7 0x031c
-#define RK3588_SET_HPD_PATH_MASK GENMASK(13, 12)
+#define RK3588_HPD_HDMI0_IO_EN_MASK BIT(12)
+#define RK3588_HPD_HDMI1_IO_EN_MASK BIT(13)
#define RK3588_GRF_SOC_STATUS1 0x0384
#define RK3588_HDMI0_LEVEL_INT BIT(16)
#define RK3588_HDMI1_LEVEL_INT BIT(24)
#define RK3588_GRF_VO1_CON3 0x000c
#define RK3588_GRF_VO1_CON6 0x0018
+#define RK3588_COLOR_DEPTH_MASK GENMASK(7, 4)
+#define RK3588_8BPC 0x0
+#define RK3588_10BPC 0x6
+#define RK3588_COLOR_FORMAT_MASK GENMASK(3, 0)
+#define RK3588_RGB 0x0
+#define RK3588_YUV420 0x3
#define RK3588_SCLIN_MASK BIT(9)
#define RK3588_SDAIN_MASK BIT(10)
#define RK3588_MODE_MASK BIT(11)
@@ -80,7 +84,6 @@
#define RK3588_HDMI0_GRANT_SEL BIT(10)
#define RK3588_HDMI1_GRANT_SEL BIT(12)
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
#define HOTPLUG_DEBOUNCE_MS 150
#define MAX_HDMI_PORT_NUM 2
@@ -91,14 +94,16 @@ struct rockchip_hdmi_qp {
struct rockchip_encoder encoder;
struct dw_hdmi_qp *hdmi;
struct phy *phy;
- struct gpio_desc *enable_gpio;
+ struct gpio_desc *frl_enable_gpio;
struct delayed_work hpd_work;
int port_id;
const struct rockchip_hdmi_qp_ctrl_ops *ctrl_ops;
+ unsigned long long tmds_char_rate;
};
struct rockchip_hdmi_qp_ctrl_ops {
void (*io_init)(struct rockchip_hdmi_qp *hdmi);
+ void (*enc_init)(struct rockchip_hdmi_qp *hdmi, struct rockchip_crtc_state *state);
irqreturn_t (*irq_callback)(int irq, void *dev_id);
irqreturn_t (*hardirq_callback)(int irq, void *dev_id);
};
@@ -114,23 +119,15 @@ static void dw_hdmi_qp_rockchip_encoder_enable(struct drm_encoder *encoder)
{
struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder);
struct drm_crtc *crtc = encoder->crtc;
- unsigned long long rate;
/* Unconditionally switch to TMDS as FRL is not yet supported */
- gpiod_set_value(hdmi->enable_gpio, 1);
-
- if (crtc && crtc->state) {
- rate = drm_hdmi_compute_mode_clock(&crtc->state->adjusted_mode,
- 8, HDMI_COLORSPACE_RGB);
- /*
- * FIXME: Temporary workaround to pass pixel clock rate
- * to the PHY driver until phy_configure_opts_hdmi
- * becomes available in the PHY API. See also the related
- * comment in rk_hdptx_phy_power_on() from
- * drivers/phy/rockchip/phy-rockchip-samsung-hdptx.c
- */
- phy_set_bus_width(hdmi->phy, div_u64(rate, 100));
- }
+ gpiod_set_value(hdmi->frl_enable_gpio, 0);
+
+ if (!crtc || !crtc->state)
+ return;
+
+ if (hdmi->ctrl_ops->enc_init)
+ hdmi->ctrl_ops->enc_init(hdmi, to_rockchip_crtc_state(crtc->state));
}
static int
@@ -138,12 +135,29 @@ dw_hdmi_qp_rockchip_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct rockchip_hdmi_qp *hdmi = to_rockchip_hdmi_qp(encoder);
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ union phy_configure_opts phy_cfg = {};
+ int ret;
+
+ if (hdmi->tmds_char_rate == conn_state->hdmi.tmds_char_rate &&
+ s->output_bpc == conn_state->hdmi.output_bpc)
+ return 0;
+
+ phy_cfg.hdmi.tmds_char_rate = conn_state->hdmi.tmds_char_rate;
+ phy_cfg.hdmi.bpc = conn_state->hdmi.output_bpc;
+
+ ret = phy_configure(hdmi->phy, &phy_cfg);
+ if (!ret) {
+ hdmi->tmds_char_rate = conn_state->hdmi.tmds_char_rate;
+ s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
+ s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+ s->output_bpc = conn_state->hdmi.output_bpc;
+ } else {
+ dev_err(hdmi->dev, "Failed to configure phy: %d\n", ret);
+ }
- s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
- s->output_type = DRM_MODE_CONNECTOR_HDMIA;
-
- return 0;
+ return ret;
}
static const struct
@@ -185,11 +199,11 @@ static void dw_hdmi_qp_rk3588_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
u32 val;
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
- RK3588_HDMI1_HPD_INT_CLR | RK3588_HDMI1_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 0));
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR | RK3588_HDMI0_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 0));
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
@@ -218,8 +232,8 @@ static void dw_hdmi_qp_rk3576_setup_hpd(struct dw_hdmi_qp *dw_hdmi, void *data)
struct rockchip_hdmi_qp *hdmi = (struct rockchip_hdmi_qp *)data;
u32 val;
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR,
- RK3576_HDMI_HPD_INT_CLR | RK3576_HDMI_HPD_INT_MSK);
+ val = (FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_CLR, 1) |
+ FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0));
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
regmap_write(hdmi->regmap, 0xa404, 0xffff0102);
@@ -254,7 +268,7 @@ static irqreturn_t dw_hdmi_qp_rk3576_hardirq(int irq, void *dev_id)
regmap_read(hdmi->regmap, RK3576_IOC_HDMI_HPD_STATUS, &intr_stat);
if (intr_stat) {
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_MSK, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
return IRQ_WAKE_THREAD;
@@ -273,12 +287,12 @@ static irqreturn_t dw_hdmi_qp_rk3576_irq(int irq, void *dev_id)
if (!intr_stat)
return IRQ_NONE;
- val = HIWORD_UPDATE(RK3576_HDMI_HPD_INT_CLR, RK3576_HDMI_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_CLR, 1);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
mod_delayed_work(system_wq, &hdmi->hpd_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
- val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
return IRQ_HANDLED;
@@ -293,11 +307,9 @@ static irqreturn_t dw_hdmi_qp_rk3588_hardirq(int irq, void *dev_id)
if (intr_stat) {
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK,
- RK3588_HDMI1_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK,
- RK3588_HDMI0_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_WAKE_THREAD;
}
@@ -315,20 +327,18 @@ static irqreturn_t dw_hdmi_qp_rk3588_irq(int irq, void *dev_id)
return IRQ_NONE;
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_CLR,
- RK3588_HDMI1_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_CLR, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_CLR,
- RK3588_HDMI0_HPD_INT_CLR);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_CLR, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
mod_delayed_work(system_wq, &hdmi->hpd_work,
msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
if (hdmi->port_id)
- val |= HIWORD_UPDATE(0, RK3588_HDMI1_HPD_INT_MSK);
+ val |= FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 0);
else
- val |= HIWORD_UPDATE(0, RK3588_HDMI0_HPD_INT_MSK);
+ val |= FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
return IRQ_HANDLED;
@@ -338,14 +348,14 @@ static void dw_hdmi_qp_rk3576_io_init(struct rockchip_hdmi_qp *hdmi)
{
u32 val;
- val = HIWORD_UPDATE(RK3576_SCLIN_MASK, RK3576_SCLIN_MASK) |
- HIWORD_UPDATE(RK3576_SDAIN_MASK, RK3576_SDAIN_MASK) |
- HIWORD_UPDATE(RK3576_HDMI_GRANT_SEL, RK3576_HDMI_GRANT_SEL) |
- HIWORD_UPDATE(RK3576_I2S_SEL_MASK, RK3576_I2S_SEL_MASK);
+ val = FIELD_PREP_WM16(RK3576_SCLIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3576_SDAIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3576_HDMI_GRANT_SEL, 1) |
+ FIELD_PREP_WM16(RK3576_I2S_SEL_MASK, 1);
regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON14, val);
- val = HIWORD_UPDATE(0, RK3576_HDMI_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3576_HDMI_HPD_INT_MSK, 0);
regmap_write(hdmi->regmap, RK3576_IOC_MISC_CON0, val);
}
@@ -353,39 +363,70 @@ static void dw_hdmi_qp_rk3588_io_init(struct rockchip_hdmi_qp *hdmi)
{
u32 val;
- val = HIWORD_UPDATE(RK3588_SCLIN_MASK, RK3588_SCLIN_MASK) |
- HIWORD_UPDATE(RK3588_SDAIN_MASK, RK3588_SDAIN_MASK) |
- HIWORD_UPDATE(RK3588_MODE_MASK, RK3588_MODE_MASK) |
- HIWORD_UPDATE(RK3588_I2S_SEL_MASK, RK3588_I2S_SEL_MASK);
+ val = FIELD_PREP_WM16(RK3588_SCLIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_SDAIN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_MODE_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_I2S_SEL_MASK, 1);
regmap_write(hdmi->vo_regmap,
hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
val);
- val = HIWORD_UPDATE(RK3588_SET_HPD_PATH_MASK, RK3588_SET_HPD_PATH_MASK);
+ val = FIELD_PREP_WM16(RK3588_HPD_HDMI0_IO_EN_MASK, 1) |
+ FIELD_PREP_WM16(RK3588_HPD_HDMI1_IO_EN_MASK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON7, val);
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_GRANT_SEL, RK3588_HDMI1_GRANT_SEL);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_GRANT_SEL, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_GRANT_SEL, RK3588_HDMI0_GRANT_SEL);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_GRANT_SEL, 1);
regmap_write(hdmi->vo_regmap, RK3588_GRF_VO1_CON9, val);
if (hdmi->port_id)
- val = HIWORD_UPDATE(RK3588_HDMI1_HPD_INT_MSK, RK3588_HDMI1_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI1_HPD_INT_MSK, 1);
else
- val = HIWORD_UPDATE(RK3588_HDMI0_HPD_INT_MSK, RK3588_HDMI0_HPD_INT_MSK);
+ val = FIELD_PREP_WM16(RK3588_HDMI0_HPD_INT_MSK, 1);
regmap_write(hdmi->regmap, RK3588_GRF_SOC_CON2, val);
}
+static void dw_hdmi_qp_rk3576_enc_init(struct rockchip_hdmi_qp *hdmi,
+ struct rockchip_crtc_state *state)
+{
+ u32 val;
+
+ if (state->output_bpc == 10)
+ val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_10BPC);
+ else
+ val = FIELD_PREP_WM16(RK3576_COLOR_DEPTH_MASK, RK3576_8BPC);
+
+ regmap_write(hdmi->vo_regmap, RK3576_VO0_GRF_SOC_CON8, val);
+}
+
+static void dw_hdmi_qp_rk3588_enc_init(struct rockchip_hdmi_qp *hdmi,
+ struct rockchip_crtc_state *state)
+{
+ u32 val;
+
+ if (state->output_bpc == 10)
+ val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_10BPC);
+ else
+ val = FIELD_PREP_WM16(RK3588_COLOR_DEPTH_MASK, RK3588_8BPC);
+
+ regmap_write(hdmi->vo_regmap,
+ hdmi->port_id ? RK3588_GRF_VO1_CON6 : RK3588_GRF_VO1_CON3,
+ val);
+}
+
static const struct rockchip_hdmi_qp_ctrl_ops rk3576_hdmi_ctrl_ops = {
.io_init = dw_hdmi_qp_rk3576_io_init,
- .irq_callback = dw_hdmi_qp_rk3576_irq,
+ .enc_init = dw_hdmi_qp_rk3576_enc_init,
+ .irq_callback = dw_hdmi_qp_rk3576_irq,
.hardirq_callback = dw_hdmi_qp_rk3576_hardirq,
};
static const struct rockchip_hdmi_qp_ctrl_ops rk3588_hdmi_ctrl_ops = {
.io_init = dw_hdmi_qp_rk3588_io_init,
- .irq_callback = dw_hdmi_qp_rk3588_irq,
+ .enc_init = dw_hdmi_qp_rk3588_enc_init,
+ .irq_callback = dw_hdmi_qp_rk3588_irq,
.hardirq_callback = dw_hdmi_qp_rk3588_hardirq,
};
@@ -431,14 +472,15 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct dw_hdmi_qp_plat_data plat_data = {};
const struct rockchip_hdmi_qp_cfg *cfg;
- struct dw_hdmi_qp_plat_data plat_data;
struct drm_device *drm = data;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct rockchip_hdmi_qp *hdmi;
struct resource *res;
struct clk_bulk_data *clks;
+ struct clk *ref_clk;
int ret, irq, i;
if (!pdev->dev.of_node)
@@ -457,10 +499,8 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
return -ENODEV;
if (!cfg->ctrl_ops || !cfg->ctrl_ops->io_init ||
- !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback) {
- dev_err(dev, "Missing platform ctrl ops\n");
- return -ENODEV;
- }
+ !cfg->ctrl_ops->irq_callback || !cfg->ctrl_ops->hardirq_callback)
+ return dev_err_probe(dev, -ENODEV, "Missing platform ctrl ops\n");
hdmi->ctrl_ops = cfg->ctrl_ops;
hdmi->dev = &pdev->dev;
@@ -473,13 +513,13 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
break;
}
}
- if (hdmi->port_id < 0) {
- dev_err(hdmi->dev, "Failed to match HDMI port ID\n");
- return hdmi->port_id;
- }
+ if (hdmi->port_id < 0)
+ return dev_err_probe(hdmi->dev, hdmi->port_id,
+ "Failed to match HDMI port ID\n");
plat_data.phy_ops = cfg->phy_ops;
plat_data.phy_data = hdmi;
+ plat_data.max_bpc = 10;
encoder = &hdmi->encoder.encoder;
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -497,39 +537,38 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
hdmi->regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
- if (IS_ERR(hdmi->regmap)) {
- dev_err(hdmi->dev, "Unable to get rockchip,grf\n");
- return PTR_ERR(hdmi->regmap);
- }
+ if (IS_ERR(hdmi->regmap))
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->regmap),
+ "Unable to get rockchip,grf\n");
hdmi->vo_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,vo-grf");
- if (IS_ERR(hdmi->vo_regmap)) {
- dev_err(hdmi->dev, "Unable to get rockchip,vo-grf\n");
- return PTR_ERR(hdmi->vo_regmap);
- }
+ if (IS_ERR(hdmi->vo_regmap))
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->vo_regmap),
+ "Unable to get rockchip,vo-grf\n");
ret = devm_clk_bulk_get_all_enabled(hdmi->dev, &clks);
- if (ret < 0) {
- dev_err(hdmi->dev, "Failed to get clocks: %d\n", ret);
- return ret;
- }
+ if (ret < 0)
+ return dev_err_probe(hdmi->dev, ret, "Failed to get clocks\n");
- hdmi->enable_gpio = devm_gpiod_get_optional(hdmi->dev, "enable",
- GPIOD_OUT_HIGH);
- if (IS_ERR(hdmi->enable_gpio)) {
- ret = PTR_ERR(hdmi->enable_gpio);
- dev_err(hdmi->dev, "Failed to request enable GPIO: %d\n", ret);
- return ret;
- }
+ ref_clk = clk_get(hdmi->dev, "ref");
+ if (IS_ERR(ref_clk))
+ return dev_err_probe(hdmi->dev, PTR_ERR(ref_clk),
+ "Failed to get ref clock\n");
+
+ plat_data.ref_clk_rate = clk_get_rate(ref_clk);
+ clk_put(ref_clk);
+
+ hdmi->frl_enable_gpio = devm_gpiod_get_optional(hdmi->dev, "frl-enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(hdmi->frl_enable_gpio))
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->frl_enable_gpio),
+ "Failed to request FRL enable GPIO\n");
hdmi->phy = devm_of_phy_get_by_index(dev, dev->of_node, 0);
- if (IS_ERR(hdmi->phy)) {
- ret = PTR_ERR(hdmi->phy);
- if (ret != -EPROBE_DEFER)
- dev_err(hdmi->dev, "failed to get phy: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hdmi->phy))
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->phy),
+ "Failed to get phy\n");
cfg->ctrl_ops->io_init(hdmi);
@@ -539,6 +578,10 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
if (plat_data.main_irq < 0)
return plat_data.main_irq;
+ plat_data.cec_irq = platform_get_irq_byname(pdev, "cec");
+ if (plat_data.cec_irq < 0)
+ return plat_data.cec_irq;
+
irq = platform_get_irq_byname(pdev, "hpd");
if (irq < 0)
return irq;
@@ -558,17 +601,15 @@ static int dw_hdmi_qp_rockchip_bind(struct device *dev, struct device *master,
hdmi->hdmi = dw_hdmi_qp_bind(pdev, encoder, &plat_data);
if (IS_ERR(hdmi->hdmi)) {
- ret = PTR_ERR(hdmi->hdmi);
drm_encoder_cleanup(encoder);
- return ret;
+ return dev_err_probe(hdmi->dev, PTR_ERR(hdmi->hdmi),
+ "Failed to bind dw-hdmi-qp");
}
connector = drm_bridge_connector_init(drm, encoder);
- if (IS_ERR(connector)) {
- ret = PTR_ERR(connector);
- dev_err(hdmi->dev, "failed to init bridge connector: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(connector))
+ return dev_err_probe(hdmi->dev, PTR_ERR(connector),
+ "Failed to init bridge connector\n");
return drm_connector_attach_encoder(connector, encoder);
}
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 1ab3ad4bde9e..9f7a8cf0ab44 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/hdmi.h>
+#include <linux/hw_bitfield.h>
#include <linux/mfd/syscon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -21,6 +22,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -382,8 +384,6 @@ enum {
#define HDMI_CEC_BUSFREETIME_H 0xdd
#define HDMI_CEC_LOGICADDR 0xde
-#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16)
-
#define RK3036_GRF_SOC_CON2 0x148
#define RK3036_HDMI_PHSYNC BIT(4)
#define RK3036_HDMI_PVSYNC BIT(5)
@@ -756,10 +756,10 @@ static int inno_hdmi_config_video_timing(struct inno_hdmi *hdmi,
int value, psync;
if (hdmi->variant->dev_type == RK3036_HDMI) {
- psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? RK3036_HDMI_PHSYNC : 0;
- value = HIWORD_UPDATE(psync, RK3036_HDMI_PHSYNC);
- psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? RK3036_HDMI_PVSYNC : 0;
- value |= HIWORD_UPDATE(psync, RK3036_HDMI_PVSYNC);
+ psync = mode->flags & DRM_MODE_FLAG_PHSYNC ? 1 : 0;
+ value = FIELD_PREP_WM16(RK3036_HDMI_PHSYNC, psync);
+ psync = mode->flags & DRM_MODE_FLAG_PVSYNC ? 1 : 0;
+ value |= FIELD_PREP_WM16(RK3036_HDMI_PVSYNC, psync);
regmap_write(hdmi->grf, RK3036_GRF_SOC_CON2, value);
}
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index ae4a5ac2299a..997429115068 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -10,6 +10,7 @@
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 180fad5d49ad..3099408e9d05 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -96,6 +97,9 @@ void rockchip_drm_dma_init_device(struct drm_device *drm_dev,
private->iommu_dev = ERR_PTR(-ENODEV);
else if (!private->iommu_dev)
private->iommu_dev = dev;
+
+ if (!IS_ERR(private->iommu_dev))
+ drm_dev_set_dma_dev(drm_dev, private->iommu_dev);
}
static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
@@ -529,6 +533,7 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver,
CONFIG_ROCKCHIP_ANALOGIX_DP);
ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP);
+ ADD_ROCKCHIP_SUB_DRIVER(dw_dp_driver, CONFIG_ROCKCHIP_DW_DP);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver,
CONFIG_ROCKCHIP_DW_HDMI);
ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index c183e82a42a5..2e86ad00979c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -87,6 +87,7 @@ int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rencoder,
struct device_node *np, int port, int reg);
int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
extern struct platform_driver cdn_dp_driver;
+extern struct platform_driver dw_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_rockchip_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 6330b883efc3..df9a8bff2e22 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -9,10 +9,12 @@
#include <linux/vmalloc.h>
#include <drm/drm.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
#include "rockchip_drm_drv.h"
@@ -403,13 +405,12 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct rockchip_gem_object *rk_obj;
- int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ int ret;
- /*
- * align to 64 bytes since Mali requires it.
- */
- args->pitch = ALIGN(min_pitch, 64);
- args->size = args->pitch * args->height;
+ /* 64-byte alignment required by Mali */
+ ret = drm_mode_size_dumb(dev, args, SZ_64, 0);
+ if (ret)
+ return ret;
rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
&args->handle);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ba6b0528d1e5..ad4ab894391a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -27,6 +27,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_self_refresh_helper.h>
#include <drm/drm_vblank.h>
@@ -826,8 +827,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -1092,7 +1092,8 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
- crtc_state = drm_atomic_get_existing_crtc_state(state, new_plane_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
/* Special case for asynchronous cursor updates. */
if (!crtc_state)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index b50927a824b4..498df0ce4680 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -29,6 +29,7 @@
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -101,7 +102,7 @@ enum vop2_afbc_format {
VOP2_AFBC_FMT_INVALID = -1,
};
-#define VOP2_MAX_DCLK_RATE 600000000
+#define VOP2_MAX_DCLK_RATE 600000000UL
/*
* bus-format types.
@@ -1003,6 +1004,8 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
struct drm_rect *src = &pstate->src;
int min_scale = FRAC_16_16(1, 8);
int max_scale = FRAC_16_16(8, 1);
+ int src_x, src_w, src_h;
+ int dest_w, dest_h;
int format;
int ret;
@@ -1013,7 +1016,7 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
vop2 = vp->vop2;
vop2_data = vop2->data;
- cstate = drm_atomic_get_existing_crtc_state(pstate->state, crtc);
+ cstate = drm_atomic_get_new_crtc_state(pstate->state, crtc);
if (WARN_ON(!cstate))
return -EINVAL;
@@ -1030,22 +1033,25 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
if (format < 0)
return format;
- if (drm_rect_width(src) >> 16 < 4 || drm_rect_height(src) >> 16 < 4 ||
- drm_rect_width(dest) < 4 || drm_rect_width(dest) < 4) {
- drm_err(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",
- drm_rect_width(src) >> 16, drm_rect_height(src) >> 16,
- drm_rect_width(dest), drm_rect_height(dest));
- pstate->visible = false;
- return 0;
+ /* Co-ordinates have now been clipped */
+ src_x = src->x1 >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
+ dest_w = drm_rect_width(dest);
+ dest_h = drm_rect_height(dest);
+
+ if (src_w < 4 || src_h < 4 || dest_w < 4 || dest_h < 4) {
+ drm_dbg_kms(vop2->drm, "Invalid size: %dx%d->%dx%d, min size is 4x4\n",
+ src_w, src_h, dest_w, dest_h);
+ return -EINVAL;
}
- if (drm_rect_width(src) >> 16 > vop2_data->max_input.width ||
- drm_rect_height(src) >> 16 > vop2_data->max_input.height) {
- drm_err(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n",
- drm_rect_width(src) >> 16,
- drm_rect_height(src) >> 16,
- vop2_data->max_input.width,
- vop2_data->max_input.height);
+ if (src_w > vop2_data->max_input.width ||
+ src_h > vop2_data->max_input.height) {
+ drm_dbg_kms(vop2->drm, "Invalid source: %dx%d. max input: %dx%d\n",
+ src_w, src_h,
+ vop2_data->max_input.width,
+ vop2_data->max_input.height);
return -EINVAL;
}
@@ -1053,8 +1059,8 @@ static int vop2_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (fb->format->is_yuv && ((pstate->src.x1 >> 16) % 2)) {
- drm_err(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n");
+ if (fb->format->is_yuv && src_x % 2) {
+ drm_dbg_kms(vop2->drm, "Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
@@ -1140,7 +1146,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
struct vop2 *vop2 = win->vop2;
struct drm_framebuffer *fb = pstate->fb;
u32 bpp = vop2_get_bpp(fb->format);
- u32 actual_w, actual_h, dsp_w, dsp_h;
+ u32 src_w, src_h, dsp_w, dsp_h;
u32 act_info, dsp_info;
u32 format;
u32 afbc_format;
@@ -1204,8 +1210,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
uv_mst = rk_obj->dma_addr + offset + fb->offsets[1];
}
- actual_w = drm_rect_width(src) >> 16;
- actual_h = drm_rect_height(src) >> 16;
+ src_w = drm_rect_width(src) >> 16;
+ src_h = drm_rect_height(src) >> 16;
dsp_w = drm_rect_width(dest);
if (dest->x1 + dsp_w > adjusted_mode->hdisplay) {
@@ -1215,7 +1221,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_w = adjusted_mode->hdisplay - dest->x1;
if (dsp_w < 4)
dsp_w = 4;
- actual_w = dsp_w * actual_w / drm_rect_width(dest);
+ src_w = dsp_w * src_w / drm_rect_width(dest);
}
dsp_h = drm_rect_height(dest);
@@ -1227,35 +1233,35 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
dsp_h = adjusted_mode->vdisplay - dest->y1;
if (dsp_h < 4)
dsp_h = 4;
- actual_h = dsp_h * actual_h / drm_rect_height(dest);
+ src_h = dsp_h * src_h / drm_rect_height(dest);
}
/*
* This is workaround solution for IC design:
- * esmart can't support scale down when actual_w % 16 == 1.
+ * esmart can't support scale down when src_w % 16 == 1.
*/
if (!(win->data->feature & WIN_FEATURE_AFBDC)) {
- if (actual_w > dsp_w && (actual_w & 0xf) == 1) {
+ if (src_w > dsp_w && (src_w & 0xf) == 1) {
drm_dbg_kms(vop2->drm, "vp%d %s act_w[%d] MODE 16 == 1\n",
- vp->id, win->data->name, actual_w);
- actual_w -= 1;
+ vp->id, win->data->name, src_w);
+ src_w -= 1;
}
}
- if (afbc_en && actual_w % 4) {
- drm_dbg_kms(vop2->drm, "vp%d %s actual_w[%d] not 4 pixel aligned\n",
- vp->id, win->data->name, actual_w);
- actual_w = ALIGN_DOWN(actual_w, 4);
+ if (afbc_en && src_w % 4) {
+ drm_dbg_kms(vop2->drm, "vp%d %s src_w[%d] not 4 pixel aligned\n",
+ vp->id, win->data->name, src_w);
+ src_w = ALIGN_DOWN(src_w, 4);
}
- act_info = (actual_h - 1) << 16 | ((actual_w - 1) & 0xffff);
+ act_info = (src_h - 1) << 16 | ((src_w - 1) & 0xffff);
dsp_info = (dsp_h - 1) << 16 | ((dsp_w - 1) & 0xffff);
format = vop2_convert_format(fb->format->format);
half_block_en = vop2_half_block_enable(pstate);
drm_dbg(vop2->drm, "vp%d update %s[%dx%d->%dx%d@%dx%d] fmt[%p4cc_%s] addr[%pad]\n",
- vp->id, win->data->name, actual_w, actual_h, dsp_w, dsp_h,
+ vp->id, win->data->name, src_w, src_h, dsp_w, dsp_h,
dest->x1, dest->y1,
&fb->format->format,
afbc_en ? "AFBC" : "", &yrgb_mst);
@@ -1284,7 +1290,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
if (fb->modifier & AFBC_FORMAT_MOD_YTR)
afbc_format |= (1 << 4);
- afbc_tile_num = ALIGN(actual_w, block_w) / block_w;
+ afbc_tile_num = ALIGN(src_w, block_w) / block_w;
/*
* AFBC pic_vir_width is count by pixel, this is different
@@ -1362,8 +1368,8 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
if (rotate_90 || rotate_270) {
act_info = swahw32(act_info);
- actual_w = drm_rect_height(src) >> 16;
- actual_h = drm_rect_width(src) >> 16;
+ src_w = drm_rect_height(src) >> 16;
+ src_h = drm_rect_width(src) >> 16;
}
vop2_win_write(win, VOP2_WIN_FORMAT, format);
@@ -1379,7 +1385,7 @@ static void vop2_plane_atomic_update(struct drm_plane *plane,
vop2_win_write(win, VOP2_WIN_UV_MST, uv_mst);
}
- vop2_setup_scale(vop2, win, actual_w, actual_h, dsp_w, dsp_h, fb->format->format);
+ vop2_setup_scale(vop2, win, src_w, src_h, dsp_w, dsp_h, fb->format->format);
if (!vop2_cluster_window(win))
vop2_plane_setup_color_key(plane, 0);
vop2_win_write(win, VOP2_WIN_ACT_INFO, act_info);
@@ -1737,36 +1743,42 @@ static void vop2_crtc_atomic_enable(struct drm_crtc *crtc,
* Switch to HDMI PHY PLL as DCLK source for display modes up
* to 4K@60Hz, if available, otherwise keep using the system CRU.
*/
- if ((vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) && clock <= VOP2_MAX_DCLK_RATE) {
- drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
- struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
-
- if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
- if (!vop2->pll_hdmiphy0)
+ if (vop2->pll_hdmiphy0 || vop2->pll_hdmiphy1) {
+ unsigned long max_dclk = DIV_ROUND_CLOSEST_ULL(VOP2_MAX_DCLK_RATE * 8,
+ vcstate->output_bpc);
+ if (clock <= max_dclk) {
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc_state->encoder_mask) {
+ struct rockchip_encoder *rkencoder = to_rockchip_encoder(encoder);
+
+ if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI0) {
+ if (!vop2->pll_hdmiphy0)
+ break;
+
+ if (!vp->dclk_src)
+ vp->dclk_src = clk_get_parent(vp->dclk);
+
+ ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0);
+ if (ret < 0)
+ drm_warn(vop2->drm,
+ "Could not switch to HDMI0 PHY PLL: %d\n",
+ ret);
break;
+ }
- if (!vp->dclk_src)
- vp->dclk_src = clk_get_parent(vp->dclk);
+ if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) {
+ if (!vop2->pll_hdmiphy1)
+ break;
- ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy0);
- if (ret < 0)
- drm_warn(vop2->drm,
- "Could not switch to HDMI0 PHY PLL: %d\n", ret);
- break;
- }
+ if (!vp->dclk_src)
+ vp->dclk_src = clk_get_parent(vp->dclk);
- if (rkencoder->crtc_endpoint_id == ROCKCHIP_VOP2_EP_HDMI1) {
- if (!vop2->pll_hdmiphy1)
+ ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1);
+ if (ret < 0)
+ drm_warn(vop2->drm,
+ "Could not switch to HDMI1 PHY PLL: %d\n",
+ ret);
break;
-
- if (!vp->dclk_src)
- vp->dclk_src = clk_get_parent(vp->dclk);
-
- ret = clk_set_parent(vp->dclk, vop2->pll_hdmiphy1);
- if (ret < 0)
- drm_warn(vop2->drm,
- "Could not switch to HDMI1 PHY PLL: %d\n", ret);
- break;
+ }
}
}
}
@@ -2647,6 +2659,12 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(vop2->map))
return PTR_ERR(vop2->map);
+ /* Set the bounds for framebuffer creation */
+ drm->mode_config.min_width = 4;
+ drm->mode_config.min_height = 4;
+ drm->mode_config.max_width = vop2_data->max_input.width;
+ drm->mode_config.max_height = vop2_data->max_input.height;
+
ret = vop2_win_init(vop2);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
index fa5c56f16047..9124191899ba 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h
@@ -33,7 +33,6 @@
#define WIN_FEATURE_AFBDC BIT(0)
#define WIN_FEATURE_CLUSTER BIT(1)
-#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
/*
* the delay number of a window in different mode.
*/
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 2411260db51d..75f898a10cbc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -22,6 +22,7 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.h b/drivers/gpu/drm/rockchip/rockchip_lvds.h
index ca83d7b6bea7..2d92447d819b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.h
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.h
@@ -9,6 +9,9 @@
#ifndef _ROCKCHIP_LVDS_
#define _ROCKCHIP_LVDS_
+#include <linux/bits.h>
+#include <linux/hw_bitfield.h>
+
#define RK3288_LVDS_CH0_REG0 0x00
#define RK3288_LVDS_CH0_REG0_LVDS_EN BIT(7)
#define RK3288_LVDS_CH0_REG0_TTL_EN BIT(6)
@@ -106,18 +109,16 @@
#define LVDS_VESA_18 2
#define LVDS_JEIDA_18 3
-#define HIWORD_UPDATE(v, h, l) ((GENMASK(h, l) << 16) | ((v) << (l)))
-
#define PX30_LVDS_GRF_PD_VO_CON0 0x434
-#define PX30_LVDS_TIE_CLKS(val) HIWORD_UPDATE(val, 8, 8)
-#define PX30_LVDS_INVERT_CLKS(val) HIWORD_UPDATE(val, 9, 9)
-#define PX30_LVDS_INVERT_DCLK(val) HIWORD_UPDATE(val, 5, 5)
+#define PX30_LVDS_TIE_CLKS(val) FIELD_PREP_WM16(BIT(8), (val))
+#define PX30_LVDS_INVERT_CLKS(val) FIELD_PREP_WM16(BIT(9), (val))
+#define PX30_LVDS_INVERT_DCLK(val) FIELD_PREP_WM16(BIT(5), (val))
#define PX30_LVDS_GRF_PD_VO_CON1 0x438
-#define PX30_LVDS_FORMAT(val) HIWORD_UPDATE(val, 14, 13)
-#define PX30_LVDS_MODE_EN(val) HIWORD_UPDATE(val, 12, 12)
-#define PX30_LVDS_MSBSEL(val) HIWORD_UPDATE(val, 11, 11)
-#define PX30_LVDS_P2S_EN(val) HIWORD_UPDATE(val, 6, 6)
-#define PX30_LVDS_VOP_SEL(val) HIWORD_UPDATE(val, 1, 1)
+#define PX30_LVDS_FORMAT(val) FIELD_PREP_WM16(GENMASK(14, 13), (val))
+#define PX30_LVDS_MODE_EN(val) FIELD_PREP_WM16(BIT(12), (val))
+#define PX30_LVDS_MSBSEL(val) FIELD_PREP_WM16(BIT(11), (val))
+#define PX30_LVDS_P2S_EN(val) FIELD_PREP_WM16(BIT(6), (val))
+#define PX30_LVDS_VOP_SEL(val) FIELD_PREP_WM16(BIT(1), (val))
#endif /* _ROCKCHIP_LVDS_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 811020665120..5c0c6e2cc28d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -15,6 +15,7 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
index 45c5e3987813..cd8380f0eddc 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c
@@ -7,6 +7,7 @@
#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/component.h>
+#include <linux/hw_bitfield.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -1368,6 +1369,25 @@ static const struct vop2_regs_dump rk3588_regs_dump[] = {
},
};
+/*
+ * phys_id is used to identify a main window(Cluster Win/Smart Win, not
+ * include the sub win of a cluster or the multi area) that can do overlay
+ * in main overlay stage.
+ */
+static struct vop2_win *vop2_find_win_by_phys_id(struct vop2 *vop2, uint8_t phys_id)
+{
+ struct vop2_win *win;
+ int i;
+
+ for (i = 0; i < vop2->data->win_size; i++) {
+ win = &vop2->win[i];
+ if (win->data->phys_id == phys_id)
+ return win;
+ }
+
+ return NULL;
+}
+
static unsigned long rk3568_set_intf_mux(struct vop2_video_port *vp, int id, u32 polflags)
{
struct vop2 *vop2 = vp->vop2;
@@ -1695,8 +1715,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die |= RK3588_SYS_DSP_INFACE_EN_HDMI0 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 1, 1));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 6, 5));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(1), 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0,
+ FIELD_PREP_WM16(GENMASK(6, 5), val));
break;
case ROCKCHIP_VOP2_EP_HDMI1:
div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
@@ -1707,8 +1728,9 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die |= RK3588_SYS_DSP_INFACE_EN_HDMI1 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
val = rk3588_get_hdmi_pol(polflags);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 4, 4));
- regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0, HIWORD_UPDATE(val, 8, 7));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(4), 1));
+ regmap_write(vop2->vo1_grf, RK3588_GRF_VO1_CON0,
+ FIELD_PREP_WM16(GENMASK(8, 7), val));
break;
case ROCKCHIP_VOP2_EP_EDP0:
div &= ~RK3588_DSP_IF_EDP_HDMI0_DCLK_DIV;
@@ -1718,7 +1740,7 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX;
die |= RK3588_SYS_DSP_INFACE_EN_EDP0 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI0_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 0, 0));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(0), 1));
break;
case ROCKCHIP_VOP2_EP_EDP1:
div &= ~RK3588_DSP_IF_EDP_HDMI1_DCLK_DIV;
@@ -1728,7 +1750,7 @@ static unsigned long rk3588_set_intf_mux(struct vop2_video_port *vp, int id, u32
die &= ~RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX;
die |= RK3588_SYS_DSP_INFACE_EN_EDP1 |
FIELD_PREP(RK3588_SYS_DSP_INFACE_EN_EDP_HDMI1_MUX, vp->id);
- regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, HIWORD_UPDATE(1, 3, 3));
+ regmap_write(vop2->vop_grf, RK3588_GRF_VOP_CON2, FIELD_PREP_WM16(BIT(3), 1));
break;
case ROCKCHIP_VOP2_EP_MIPI0:
div &= ~RK3588_DSP_IF_MIPI0_PCLK_DIV;
@@ -1839,15 +1861,31 @@ static void vop2_parse_alpha(struct vop2_alpha_config *alpha_config,
alpha->dst_alpha_ctrl.bits.factor_mode = ALPHA_SRC_INVERSE;
}
-static int vop2_find_start_mixer_id_for_vp(struct vop2 *vop2, u8 port_id)
+static int vop2_find_start_mixer_id_for_vp(struct vop2_video_port *vp)
{
- struct vop2_video_port *vp;
- int used_layer = 0;
+ struct vop2 *vop2 = vp->vop2;
+ struct vop2_win *win;
+ u32 layer_sel = vop2->old_layer_sel;
+ u32 used_layer = 0;
+ unsigned long win_mask = vp->win_mask;
+ unsigned long phys_id;
+ bool match;
int i;
- for (i = 0; i < port_id; i++) {
- vp = &vop2->vps[i];
- used_layer += hweight32(vp->win_mask);
+ for (i = 0; i < 31; i += 4) {
+ match = false;
+ for_each_set_bit(phys_id, &win_mask, ROCKCHIP_VOP2_ESMART3) {
+ win = vop2_find_win_by_phys_id(vop2, phys_id);
+ if (win->data->layer_sel_id[vp->id] == ((layer_sel >> i) & 0xf)) {
+ match = true;
+ break;
+ }
+ }
+
+ if (!match)
+ used_layer += 1;
+ else
+ break;
}
return used_layer;
@@ -1932,7 +1970,7 @@ static void vop2_setup_alpha(struct vop2_video_port *vp)
u32 dst_global_alpha = DRM_BLEND_ALPHA_OPAQUE;
if (vop2->version <= VOP_VERSION_RK3588)
- mixer_id = vop2_find_start_mixer_id_for_vp(vop2, vp->id);
+ mixer_id = vop2_find_start_mixer_id_for_vp(vp);
else
mixer_id = 0;
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index d1f788763318..219f8c2fa88e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -880,6 +880,7 @@ static const struct vop_data rk3368_vop = {
.win = rk3368_vop_win_data,
.win_size = ARRAY_SIZE(rk3368_vop_win_data),
.max_output = { 4096, 2160 },
+ .lut_size = 1024,
};
static const struct vop_intr rk3366_vop_intr = {
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 3d06f72531ba..fe174a4857be 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
+ entity->last_user = current->group_leader;
/*
* It's perfectly valid to initialize an entity without having a valid
* scheduler attached. It's just not valid to use the scheduler before it
@@ -172,26 +173,15 @@ int drm_sched_entity_error(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_error);
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb);
+
static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
{
struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
-
- drm_sched_fence_scheduled(job->s_fence, NULL);
- drm_sched_fence_finished(job->s_fence, -ESRCH);
- WARN_ON(job->s_fence->parent);
- job->sched->ops->free_job(job);
-}
-
-/* Signal the scheduler finished fence when the entity in question is killed. */
-static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
- struct dma_fence_cb *cb)
-{
- struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
- finish_cb);
+ struct dma_fence *f;
unsigned long index;
- dma_fence_put(f);
-
/* Wait for all dependencies to avoid data corruptions */
xa_for_each(&job->dependencies, index, f) {
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
@@ -219,6 +209,21 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
dma_fence_put(f);
}
+ drm_sched_fence_scheduled(job->s_fence, NULL);
+ drm_sched_fence_finished(job->s_fence, -ESRCH);
+ WARN_ON(job->s_fence->parent);
+ job->sched->ops->free_job(job);
+}
+
+/* Signal the scheduler finished fence when the entity in question is killed. */
+static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
+ finish_cb);
+
+ dma_fence_put(f);
+
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
schedule_work(&job->work);
}
@@ -285,9 +290,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
return 0;
sched = entity->rq->sched;
- /**
- * The client will not queue more IBs during this fini, consume existing
- * queued IBs or discard them on SIGKILL
+ /*
+ * The client will not queue more jobs during this fini - consume
+ * existing queued ones, or discard them on SIGKILL.
*/
if (current->flags & PF_EXITING) {
if (timeout)
@@ -300,9 +305,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
drm_sched_entity_is_idle(entity));
}
- /* For killed process disable any more IBs enqueue right now */
+ /* For a killed process disallow further enqueueing of jobs. */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
- if ((!last_user || last_user == current->group_leader) &&
+ if (last_user == current->group_leader &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_kill(entity);
@@ -324,9 +329,9 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
/*
- * If consumption of existing IBs wasn't completed. Forcefully remove
- * them here. Also makes sure that the scheduler won't touch this entity
- * any more.
+ * If consumption of existing jobs wasn't completed forcefully remove
+ * them. Also makes sure that the scheduler won't touch this entity any
+ * more.
*/
drm_sched_entity_kill(entity);
@@ -552,10 +557,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
- spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
+
+ spin_unlock(&entity->lock);
}
/**
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index e2cda28a1af4..1d4f1b822e7b 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -349,37 +349,16 @@ static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
}
/**
- * __drm_sched_run_free_queue - enqueue free-job work
+ * drm_sched_run_free_queue - enqueue free-job work
* @sched: scheduler instance
*/
-static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
{
if (!READ_ONCE(sched->pause_submit))
queue_work(sched->submit_wq, &sched->work_free_job);
}
/**
- * drm_sched_run_free_queue - enqueue free-job work if ready
- * @sched: scheduler instance
- */
-static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
-{
- struct drm_sched_job *job;
-
- job = list_first_entry_or_null(&sched->pending_list,
- struct drm_sched_job, list);
- if (job && dma_fence_is_signaled(&job->s_fence->finished))
- __drm_sched_run_free_queue(sched);
-}
-
-static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched)
-{
- spin_lock(&sched->job_list_lock);
- drm_sched_run_free_queue(sched);
- spin_unlock(&sched->job_list_lock);
-}
-
-/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
@@ -398,7 +377,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
dma_fence_put(&s_fence->finished);
- __drm_sched_run_free_queue(sched);
+ drm_sched_run_free_queue(sched);
}
/**
@@ -986,13 +965,14 @@ int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
dma_resv_assert_held(resv);
dma_resv_for_each_fence(&cursor, resv, usage, fence) {
- /* Make sure to grab an additional ref on the added fence */
- dma_fence_get(fence);
- ret = drm_sched_job_add_dependency(job, fence);
- if (ret) {
- dma_fence_put(fence);
+ /*
+ * As drm_sched_job_add_dependency always consumes the fence
+ * reference (even when it fails), and dma_resv_for_each_fence
+ * is not obtaining one, we need to grab one before calling.
+ */
+ ret = drm_sched_job_add_dependency(job, dma_fence_get(fence));
+ if (ret)
return ret;
- }
}
return 0;
}
@@ -1134,12 +1114,16 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
* drm_sched_get_finished_job - fetch the next finished job to be destroyed
*
* @sched: scheduler instance
+ * @have_more: are there more finished jobs on the list
+ *
+ * Informs the caller through @have_more whether there are more finished jobs
+ * besides the returned one.
*
* Returns the next finished job from the pending list (if there is one)
* ready for it to be destroyed.
*/
static struct drm_sched_job *
-drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
+drm_sched_get_finished_job(struct drm_gpu_scheduler *sched, bool *have_more)
{
struct drm_sched_job *job, *next;
@@ -1147,22 +1131,25 @@ drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
job = list_first_entry_or_null(&sched->pending_list,
struct drm_sched_job, list);
-
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
/* cancel this job's TO timer */
cancel_delayed_work(&sched->work_tdr);
- /* make the scheduled timestamp more accurate */
+
+ *have_more = false;
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
-
if (next) {
+ /* make the scheduled timestamp more accurate */
if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
&next->s_fence->scheduled.flags))
next->s_fence->scheduled.timestamp =
dma_fence_timestamp(&job->s_fence->finished);
+
+ *have_more = dma_fence_is_signaled(&next->s_fence->finished);
+
/* start TO timer for next job */
drm_sched_start_timeout(sched);
}
@@ -1221,12 +1208,15 @@ static void drm_sched_free_job_work(struct work_struct *w)
struct drm_gpu_scheduler *sched =
container_of(w, struct drm_gpu_scheduler, work_free_job);
struct drm_sched_job *job;
+ bool have_more;
- job = drm_sched_get_finished_job(sched);
- if (job)
+ job = drm_sched_get_finished_job(sched, &have_more);
+ if (job) {
sched->ops->free_job(job);
+ if (have_more)
+ drm_sched_run_free_queue(sched);
+ }
- drm_sched_run_free_queue_unlocked(sched);
drm_sched_run_job_queue(sched);
}
@@ -1247,8 +1237,13 @@ static void drm_sched_run_job_work(struct work_struct *w)
/* Find entity with a ready job */
entity = drm_sched_select_entity(sched);
- if (!entity)
- return; /* No more work */
+ if (!entity) {
+ /*
+ * Either no more work to do, or the next ready job needs more
+ * credits than the scheduler has currently available.
+ */
+ return;
+ }
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
@@ -1325,7 +1320,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
sched->name = args->name;
sched->timeout = args->timeout;
sched->hang_limit = args->hang_limit;
- sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_wq;
+ sched->timeout_wq = args->timeout_wq ? args->timeout_wq : system_percpu_wq;
sched->score = args->score ? args->score : &sched->_score;
sched->dev = args->dev;
@@ -1430,13 +1425,36 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
struct drm_sched_rq *rq = sched->sched_rq[i];
spin_lock(&rq->lock);
- list_for_each_entry(s_entity, &rq->entities, list)
+ list_for_each_entry(s_entity, &rq->entities, list) {
/*
* Prevents reinsertion and marks job_queue as idle,
* it will be removed from the rq in drm_sched_entity_fini()
* eventually
+ *
+ * FIXME:
+ * This lacks the proper spin_lock(&s_entity->lock) and
+ * is, therefore, a race condition. Most notably, it
+ * can race with drm_sched_entity_push_job(). The lock
+ * cannot be taken here, however, because this would
+ * lead to lock inversion -> deadlock.
+ *
+ * The best solution probably is to enforce the life
+ * time rule of all entities having to be torn down
+ * before their scheduler. Then, however, locking could
+ * be dropped alltogether from this function.
+ *
+ * For now, this remains a potential race in all
+ * drivers that keep entities alive for longer than
+ * the scheduler.
+ *
+ * The READ_ONCE() is there to make the lockless read
+ * (warning about the lockless write below) slightly
+ * less broken...
*/
+ if (!READ_ONCE(s_entity->stopped))
+ dev_warn(sched->dev, "Tearing down scheduler with active entities!\n");
s_entity->stopped = true;
+ }
spin_unlock(&rq->lock);
kfree(sched->sched_rq[i]);
}
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
index 65acffc3fea8..8e9ae7d980eb 100644
--- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -219,7 +219,7 @@ mock_sched_timedout_job(struct drm_sched_job *sched_job)
unsigned long flags;
if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) {
- job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET;
+ job->flags |= DRM_MOCK_SCHED_JOB_RESET_SKIPPED;
return DRM_GPU_SCHED_STAT_NO_HANG;
}
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 63d4f2ac7074..553d45abd057 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -11,7 +11,6 @@
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <linux/list.h>
-#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/types.h>
@@ -32,9 +31,8 @@
*
* @base: DRM scheduler base class
* @test: Backpointer to owning the kunit test case
- * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list
+ * @lock: Lock to protect the simulated @hw_timeline and @job_list
* @job_list: List of jobs submitted to the mock GPU
- * @done_list: List of jobs completed by the mock GPU
* @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and
* @cur_seqno for implementing a struct dma_fence signaling the
* simulated job completion.
@@ -95,9 +93,10 @@ struct drm_mock_sched_job {
struct completion done;
-#define DRM_MOCK_SCHED_JOB_DONE 0x1
-#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
-#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+#define DRM_MOCK_SCHED_JOB_DONE 0x1
+#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2
+#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4
+#define DRM_MOCK_SCHED_JOB_RESET_SKIPPED 0x8
unsigned long flags;
struct list_head link;
diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c
index 55eb142bd7c5..82a41a456b0a 100644
--- a/drivers/gpu/drm/scheduler/tests/tests_basic.c
+++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c
@@ -317,8 +317,8 @@ static void drm_sched_skip_reset(struct kunit *test)
KUNIT_ASSERT_FALSE(test, done);
KUNIT_ASSERT_EQ(test,
- job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET,
- 0);
+ job->flags & DRM_MOCK_SCHED_JOB_RESET_SKIPPED,
+ DRM_MOCK_SCHED_JOB_RESET_SKIPPED);
i = drm_mock_sched_advance(sched, 1);
KUNIT_ASSERT_EQ(test, i, 1);
diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c
index 453eb7e045e5..4e73c8b415d6 100644
--- a/drivers/gpu/drm/sitronix/st7571-i2c.c
+++ b/drivers/gpu/drm/sitronix/st7571-i2c.c
@@ -151,6 +151,7 @@ struct st7571_device {
bool ignore_nak;
bool grayscale;
+ bool inverted;
u32 height_mm;
u32 width_mm;
u32 startline;
@@ -218,10 +219,11 @@ static int st7571_send_command_list(struct st7571_device *st7571,
return ret;
}
-static inline u8 st7571_transform_xy(const char *p, int x, int y)
+static inline u8 st7571_transform_xy(const char *p, int x, int y, u8 bpp)
{
int xrest = x % 8;
u8 result = 0;
+ u8 row_len = 16 * bpp;
/*
* Transforms an (x, y) pixel coordinate into a vertical 8-bit
@@ -236,7 +238,7 @@ static inline u8 st7571_transform_xy(const char *p, int x, int y)
for (int i = 0; i < 8; i++) {
int row_idx = y + i;
- u8 byte = p[row_idx * 16 + x];
+ u8 byte = p[row_idx * row_len + x];
u8 bit = (byte >> xrest) & 1;
result |= (bit << i);
@@ -261,6 +263,7 @@ static int st7571_fb_clear_screen(struct st7571_device *st7571)
u32 npixels = st7571->ncols * round_up(st7571->nlines, ST7571_PAGE_HEIGHT) * st7571->bpp;
char pixelvalue = 0x00;
+ st7571_set_position(st7571, 0, 0);
for (int i = 0; i < npixels; i++)
regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, &pixelvalue, 1);
@@ -303,11 +306,11 @@ static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571,
struct iosys_map dst;
switch (fb->format->format) {
- case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */
- dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+ case DRM_FORMAT_XRGB8888:
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 4);
iosys_map_set_vaddr(&dst, st7571->hwbuf);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
+ drm_fb_xrgb8888_to_gray2(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
break;
case DRM_FORMAT_R1:
@@ -319,7 +322,7 @@ static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571,
size = (rect->x2 - rect->x1) * (rect->y2 - rect->y1) / 4;
memcpy(st7571->hwbuf, vmap->vaddr, size);
break;
- };
+ }
}
static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct drm_rect *rect)
@@ -333,7 +336,7 @@ static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct d
for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
for (int x = rect->x1; x < rect->x2; x++)
- row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 1);
st7571_set_position(st7571, rect->x1, y);
@@ -358,14 +361,13 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines);
switch (format) {
- case DRM_FORMAT_XRGB8888:
- /* Threated as monochrome (R1) */
- fallthrough;
case DRM_FORMAT_R1:
- x1 = rect->x1;
- x2 = rect->x2;
+ x1 = rect->x1 * 1;
+ x2 = rect->x2 * 1;
break;
case DRM_FORMAT_R2:
+ fallthrough;
+ case DRM_FORMAT_XRGB8888:
x1 = rect->x1 * 2;
x2 = rect->x2 * 2;
break;
@@ -373,7 +375,7 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) {
for (int x = x1; x < x2; x++)
- row[x] = st7571_transform_xy(st7571->hwbuf, x, y);
+ row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 2);
st7571_set_position(st7571, rect->x1, y);
@@ -386,15 +388,15 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr
* even if the format is monochrome.
*
* The bit values maps to the following grayscale:
- * 0 0 = White
- * 0 1 = Light gray
- * 1 0 = Dark gray
- * 1 1 = Black
+ * 0 0 = Black
+ * 0 1 = Dark gray
+ * 1 0 = Light gray
+ * 1 1 = White
*
* For monochrome formats, write the same value twice to get
* either a black or white pixel.
*/
- if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888)
+ if (format == DRM_FORMAT_R1)
regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1);
}
}
@@ -792,6 +794,7 @@ static int st7567_parse_dt(struct st7571_device *st7567)
of_property_read_u32(np, "width-mm", &st7567->width_mm);
of_property_read_u32(np, "height-mm", &st7567->height_mm);
+ st7567->inverted = of_property_read_bool(np, "sitronix,inverted");
st7567->pformat = &st7571_monochrome;
st7567->bpp = 1;
@@ -819,6 +822,7 @@ static int st7571_parse_dt(struct st7571_device *st7571)
of_property_read_u32(np, "width-mm", &st7571->width_mm);
of_property_read_u32(np, "height-mm", &st7571->height_mm);
st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale");
+ st7571->inverted = of_property_read_bool(np, "sitronix,inverted");
if (st7571->grayscale) {
st7571->pformat = &st7571_grayscale;
@@ -873,7 +877,7 @@ static int st7567_lcd_init(struct st7571_device *st7567)
ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */
ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */
- ST7571_SET_REVERSE(0),
+ ST7571_SET_REVERSE(st7567->inverted ? 1 : 0),
ST7571_SET_ENTIRE_DISPLAY_ON(0),
};
@@ -917,7 +921,7 @@ static int st7571_lcd_init(struct st7571_device *st7571)
ST7571_SET_COLOR_MODE(st7571->pformat->mode),
ST7571_COMMAND_SET_NORMAL,
- ST7571_SET_REVERSE(0),
+ ST7571_SET_REVERSE(st7571->inverted ? 1 : 0),
ST7571_SET_ENTIRE_DISPLAY_ON(0),
};
@@ -1024,7 +1028,7 @@ static void st7571_remove(struct i2c_client *client)
drm_dev_unplug(&st7571->dev);
}
-struct st7571_panel_data st7567_config = {
+static const struct st7571_panel_data st7567_config = {
.init = st7567_lcd_init,
.parse_dt = st7567_parse_dt,
.constraints = {
@@ -1036,7 +1040,7 @@ struct st7571_panel_data st7567_config = {
},
};
-struct st7571_panel_data st7571_config = {
+static const struct st7571_panel_data st7571_config = {
.init = st7571_lcd_init,
.parse_dt = st7571_parse_dt,
.constraints = {
diff --git a/drivers/gpu/drm/sitronix/st7586.c b/drivers/gpu/drm/sitronix/st7586.c
index a29672d84ede..b57ebf37a664 100644
--- a/drivers/gpu/drm/sitronix/st7586.c
+++ b/drivers/gpu/drm/sitronix/st7586.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
/* controller-specific commands */
diff --git a/drivers/gpu/drm/sitronix/st7735r.c b/drivers/gpu/drm/sitronix/st7735r.c
index 1d60f6e5b3bc..c1f8228495f6 100644
--- a/drivers/gpu/drm/sitronix/st7735r.c
+++ b/drivers/gpu/drm/sitronix/st7735r.c
@@ -24,6 +24,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_print.h>
#define ST7735R_FRMCTR1 0xb1
#define ST7735R_FRMCTR2 0xb2
diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c
index 7c935870f7d2..b52f5fd592a1 100644
--- a/drivers/gpu/drm/solomon/ssd130x-spi.c
+++ b/drivers/gpu/drm/solomon/ssd130x-spi.c
@@ -74,8 +74,7 @@ static int ssd130x_spi_probe(struct spi_device *spi)
t = devm_kzalloc(dev, sizeof(*t), GFP_KERNEL);
if (!t)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate SPI transport data\n");
+ return -ENOMEM;
t->spi = spi;
t->dc = dc;
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index eec43d1a5595..96cf39320137 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -33,6 +33,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "ssd130x.h"
@@ -1016,15 +1017,9 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
iosys_map_set_vaddr(&dst, buf);
drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-
ssd130x_update_rect(ssd130x, rect, buf, data_array);
return ret;
@@ -1048,15 +1043,9 @@ static int ssd132x_fb_blit_rect(struct drm_framebuffer *fb,
dst_pitch = drm_rect_width(rect);
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
iosys_map_set_vaddr(&dst, buf);
drm_fb_xrgb8888_to_gray8(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-
ssd132x_update_rect(ssd130x, rect, buf, data_array);
return ret;
@@ -1078,15 +1067,9 @@ static int ssd133x_fb_blit_rect(struct drm_framebuffer *fb,
dst_pitch = drm_format_info_min_pitch(fi, 0, drm_rect_width(rect));
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- return ret;
-
iosys_map_set_vaddr(&dst, data_array);
drm_fb_xrgb8888_to_rgb332(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state);
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-
ssd133x_update_rect(ssd130x, rect, data_array, dst_pitch);
return ret;
@@ -1232,6 +1215,9 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(drm, &idx))
return;
+ if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE))
+ goto out_drm_dev_exit;
+
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
dst_clip = plane_state->dst;
@@ -1245,6 +1231,9 @@ static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
&shadow_plane_state->fmtcnv_state);
}
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+out_drm_dev_exit:
drm_dev_exit(idx);
}
@@ -1267,6 +1256,9 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(drm, &idx))
return;
+ if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE))
+ goto out_drm_dev_exit;
+
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
dst_clip = plane_state->dst;
@@ -1280,6 +1272,9 @@ static void ssd132x_primary_plane_atomic_update(struct drm_plane *plane,
&shadow_plane_state->fmtcnv_state);
}
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+out_drm_dev_exit:
drm_dev_exit(idx);
}
@@ -1301,6 +1296,9 @@ static void ssd133x_primary_plane_atomic_update(struct drm_plane *plane,
if (!drm_dev_enter(drm, &idx))
return;
+ if (drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE))
+ goto out_drm_dev_exit;
+
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
dst_clip = plane_state->dst;
@@ -1313,6 +1311,9 @@ static void ssd133x_primary_plane_atomic_update(struct drm_plane *plane,
&shadow_plane_state->fmtcnv_state);
}
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+out_drm_dev_exit:
drm_dev_exit(idx);
}
@@ -1393,7 +1394,7 @@ static void ssd130x_primary_plane_reset(struct drm_plane *plane)
{
struct ssd130x_plane_state *ssd130x_state;
- WARN_ON(plane->state);
+ drm_WARN_ON_ONCE(plane->dev, plane->state);
ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
if (!ssd130x_state)
@@ -1408,7 +1409,7 @@ static struct drm_plane_state *ssd130x_primary_plane_duplicate_state(struct drm_
struct ssd130x_plane_state *old_ssd130x_state;
struct ssd130x_plane_state *ssd130x_state;
- if (WARN_ON(!plane->state))
+ if (drm_WARN_ON_ONCE(plane->dev, !plane->state))
return NULL;
old_ssd130x_state = to_ssd130x_plane_state(plane->state);
@@ -1473,15 +1474,7 @@ static enum drm_mode_status ssd130x_crtc_mode_valid(struct drm_crtc *crtc,
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(crtc->dev);
- if (mode->hdisplay != ssd130x->mode.hdisplay &&
- mode->vdisplay != ssd130x->mode.vdisplay)
- return MODE_ONE_SIZE;
- else if (mode->hdisplay != ssd130x->mode.hdisplay)
- return MODE_ONE_WIDTH;
- else if (mode->vdisplay != ssd130x->mode.vdisplay)
- return MODE_ONE_HEIGHT;
-
- return MODE_OK;
+ return drm_crtc_helper_mode_valid_fixed(crtc, mode, &ssd130x->mode);
}
static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc,
@@ -1498,7 +1491,7 @@ static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
- ssd130x_state->data_array = kmalloc(ssd130x->width * pages, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(ssd130x->width, pages, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
@@ -1519,7 +1512,7 @@ static int ssd132x_crtc_atomic_check(struct drm_crtc *crtc,
if (ret)
return ret;
- ssd130x_state->data_array = kmalloc(columns * ssd130x->height, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(columns, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
@@ -1546,7 +1539,7 @@ static int ssd133x_crtc_atomic_check(struct drm_crtc *crtc,
pitch = drm_format_info_min_pitch(fi, 0, ssd130x->width);
- ssd130x_state->data_array = kmalloc(pitch * ssd130x->height, GFP_KERNEL);
+ ssd130x_state->data_array = kmalloc_array(pitch, ssd130x->height, GFP_KERNEL);
if (!ssd130x_state->data_array)
return -ENOMEM;
@@ -1558,7 +1551,7 @@ static void ssd130x_crtc_reset(struct drm_crtc *crtc)
{
struct ssd130x_crtc_state *ssd130x_state;
- WARN_ON(crtc->state);
+ drm_WARN_ON_ONCE(crtc->dev, crtc->state);
ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
if (!ssd130x_state)
@@ -1572,7 +1565,7 @@ static struct drm_crtc_state *ssd130x_crtc_duplicate_state(struct drm_crtc *crtc
struct ssd130x_crtc_state *old_ssd130x_state;
struct ssd130x_crtc_state *ssd130x_state;
- if (WARN_ON(!crtc->state))
+ if (drm_WARN_ON_ONCE(crtc->dev, !crtc->state))
return NULL;
old_ssd130x_state = to_ssd130x_crtc_state(crtc->state);
@@ -1740,20 +1733,8 @@ static const struct drm_encoder_funcs ssd130x_encoder_funcs = {
static int ssd130x_connector_get_modes(struct drm_connector *connector)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev);
- struct drm_display_mode *mode;
- struct device *dev = ssd130x->dev;
-
- mode = drm_mode_duplicate(connector->dev, &ssd130x->mode);
- if (!mode) {
- dev_err(dev, "Failed to duplicated mode\n");
- return 0;
- }
-
- drm_mode_probed_add(connector, mode);
- drm_set_preferred_mode(connector, mode->hdisplay, mode->vdisplay);
- /* There is only a single mode */
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, &ssd130x->mode);
}
static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = {
@@ -1887,10 +1868,14 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
mode->type = DRM_MODE_TYPE_DRIVER;
mode->clock = 1;
- mode->hdisplay = mode->htotal = ssd130x->width;
- mode->hsync_start = mode->hsync_end = ssd130x->width;
- mode->vdisplay = mode->vtotal = ssd130x->height;
- mode->vsync_start = mode->vsync_end = ssd130x->height;
+ mode->hdisplay = ssd130x->width;
+ mode->htotal = ssd130x->width;
+ mode->hsync_start = ssd130x->width;
+ mode->hsync_end = ssd130x->width;
+ mode->vdisplay = ssd130x->height;
+ mode->vtotal = ssd130x->height;
+ mode->vsync_start = ssd130x->height;
+ mode->vsync_end = ssd130x->height;
mode->width_mm = 27;
mode->height_mm = 27;
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index c59fcb4dca32..4e12a465be7f 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -14,6 +14,7 @@
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "sti_compositor.h"
#include "sti_cursor.h"
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 5e9332df21df..f16345f01065 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sti_drv.h"
@@ -231,23 +232,15 @@ static const struct component_master_ops sti_ops = {
static int sti_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
- struct device_node *child_np;
- struct component_match *match = NULL;
+ int ret;
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
devm_of_platform_populate(dev);
- child_np = of_get_next_available_child(node, NULL);
-
- while (child_np) {
- drm_of_component_match_add(dev, &match, component_compare_of,
- child_np);
- child_np = of_get_next_available_child(node, child_np);
- }
-
- return component_master_add_with_match(dev, &sti_ops, match);
+ return drm_of_component_probe(dev, component_compare_of, &sti_ops);
}
static void sti_platform_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index f046f5f7ad25..1e5aa8c30645 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -16,6 +16,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "sti_compositor.h"
#include "sti_gdp.h"
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 2c015f563de9..b7397827889c 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -779,6 +779,8 @@ static int sti_hda_probe(struct platform_device *pdev)
return PTR_ERR(hda->clk_hddac);
}
+ drm_bridge_add(&hda->bridge);
+
platform_set_drvdata(pdev, hda);
return component_add(&pdev->dev, &sti_hda_ops);
@@ -786,7 +788,10 @@ static int sti_hda_probe(struct platform_device *pdev)
static void sti_hda_remove(struct platform_device *pdev)
{
+ struct sti_hda *hda = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &sti_hda_ops);
+ drm_bridge_remove(&hda->bridge);
}
static const struct of_device_id hda_of_match[] = {
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 4e7c3d78b2b9..f8222e60b1e0 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1459,6 +1459,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hdmi);
+ drm_bridge_add(&hdmi->bridge);
return component_add(&pdev->dev, &sti_hdmi_ops);
release_adapter:
@@ -1475,6 +1476,7 @@ static void sti_hdmi_remove(struct platform_device *pdev)
if (hdmi->audio_pdev)
platform_device_unregister(hdmi->audio_pdev);
component_del(&pdev->dev, &sti_hdmi_ops);
+ drm_bridge_remove(&hdmi->bridge);
}
struct platform_driver sti_hdmi_driver = {
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 03684062309b..57ef4ba3554e 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -20,6 +20,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "sti_compositor.h"
#include "sti_drv.h"
@@ -744,7 +745,7 @@ static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp,
inv_zy = DIV_ROUND_UP(src_h, dst_h);
- return (inv_zy <= lfw) ? true : false;
+ return inv_zy <= lfw;
}
/**
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index 29e669ccec5b..948f947b5cad 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -12,6 +12,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include "sti_compositor.h"
#include "sti_drv.h"
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index ee81691b3203..ce6bc7e7b135 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -143,12 +143,17 @@ struct sti_vtg {
struct sti_vtg *of_vtg_find(struct device_node *np)
{
struct platform_device *pdev;
+ struct sti_vtg *vtg;
pdev = of_find_device_by_node(np);
if (!pdev)
return NULL;
- return (struct sti_vtg *)platform_get_drvdata(pdev);
+ vtg = platform_get_drvdata(pdev);
+
+ put_device(&pdev->dev);
+
+ return vtg;
}
static void vtg_reset(struct sti_vtg *vtg)
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 8ebcaf953782..56d53ac3082d 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_module.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_managed.h>
@@ -236,8 +237,18 @@ static void stm_drm_platform_shutdown(struct platform_device *pdev)
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
}
+static struct ltdc_plat_data stm_drm_plat_data = {
+ .pad_max_freq_hz = 90000000,
+};
+
+static struct ltdc_plat_data stm_drm_plat_data_mp25 = {
+ .pad_max_freq_hz = 150000000,
+};
+
static const struct of_device_id drv_dt_ids[] = {
- { .compatible = "st,stm32-ltdc"},
+ { .compatible = "st,stm32-ltdc", .data = &stm_drm_plat_data, },
+ { .compatible = "st,stm32mp251-ltdc", .data = &stm_drm_plat_data_mp25, },
+ { .compatible = "st,stm32mp255-ltdc", .data = &stm_drm_plat_data_mp25, },
{ /* end node */ },
};
MODULE_DEVICE_TABLE(of, drv_dt_ids);
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 2c7bc064bc66..58eae6804cc8 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -274,8 +274,8 @@ static unsigned long dw_mipi_dsi_clk_recalc_rate(struct clk_hw *hw,
return (unsigned long)pll_out_khz * 1000;
}
-static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int dw_mipi_dsi_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct dw_mipi_dsi_stm *dsi = clk_to_dw_mipi_dsi_stm(hw);
unsigned int idf, ndiv, odf, pll_in_khz, pll_out_khz;
@@ -283,14 +283,14 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
DRM_DEBUG_DRIVER("\n");
- pll_in_khz = (unsigned int)(*parent_rate / 1000);
+ pll_in_khz = (unsigned int)(req->best_parent_rate / 1000);
/* Compute best pll parameters */
idf = 0;
ndiv = 0;
odf = 0;
- ret = dsi_pll_get_params(dsi, pll_in_khz, rate / 1000,
+ ret = dsi_pll_get_params(dsi, pll_in_khz, req->rate / 1000,
&idf, &ndiv, &odf);
if (ret)
DRM_WARN("Warning dsi_pll_get_params(): bad params\n");
@@ -298,7 +298,9 @@ static long dw_mipi_dsi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
/* Get the adjusted pll out value */
pll_out_khz = dsi_pll_get_clkout_khz(pll_in_khz, idf, ndiv, odf);
- return pll_out_khz * 1000;
+ req->rate = pll_out_khz * 1000;
+
+ return 0;
}
static int dw_mipi_dsi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -351,7 +353,7 @@ static const struct clk_ops dw_mipi_dsi_stm_clk_ops = {
.disable = dw_mipi_dsi_clk_disable,
.is_enabled = dw_mipi_dsi_clk_is_enabled,
.recalc_rate = dw_mipi_dsi_clk_recalc_rate,
- .round_rate = dw_mipi_dsi_clk_round_rate,
+ .determine_rate = dw_mipi_dsi_clk_determine_rate,
.set_rate = dw_mipi_dsi_clk_set_rate,
};
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index ba315c66a04d..f7e847cfa38f 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
@@ -33,6 +34,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -51,6 +53,7 @@
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
#define HWVER_40100 0x040100
+#define HWVER_40101 0x040101
/*
* The address of some registers depends on the HW version: such registers have
@@ -641,7 +644,7 @@ static inline void ltdc_set_ycbcr_config(struct drm_plane *plane, u32 drm_pix_fm
break;
default:
/* RGB or not a YCbCr supported format */
- DRM_ERROR("Unsupported pixel format: %u\n", drm_pix_fmt);
+ drm_err(plane->dev, "Unsupported pixel format: %u\n", drm_pix_fmt);
return;
}
@@ -664,18 +667,19 @@ static inline void ltdc_set_ycbcr_coeffs(struct drm_plane *plane)
u32 lofs = plane->index * LAY_OFS;
if (enc != DRM_COLOR_YCBCR_BT601 && enc != DRM_COLOR_YCBCR_BT709) {
- DRM_ERROR("color encoding %d not supported, use bt601 by default\n", enc);
+ drm_err(plane->dev, "color encoding %d not supported, use bt601 by default\n", enc);
/* set by default color encoding to DRM_COLOR_YCBCR_BT601 */
enc = DRM_COLOR_YCBCR_BT601;
}
if (ran != DRM_COLOR_YCBCR_LIMITED_RANGE && ran != DRM_COLOR_YCBCR_FULL_RANGE) {
- DRM_ERROR("color range %d not supported, use limited range by default\n", ran);
+ drm_err(plane->dev,
+ "color range %d not supported, use limited range by default\n", ran);
/* set by default color range to DRM_COLOR_YCBCR_LIMITED_RANGE */
ran = DRM_COLOR_YCBCR_LIMITED_RANGE;
}
- DRM_DEBUG_DRIVER("Color encoding=%d, range=%d\n", enc, ran);
+ drm_err(plane->dev, "Color encoding=%d, range=%d\n", enc, ran);
regmap_write(ldev->regmap, LTDC_L1CYR0R + lofs,
ltdc_ycbcr2rgb_coeffs[enc][ran][0]);
regmap_write(ldev->regmap, LTDC_L1CYR1R + lofs,
@@ -774,7 +778,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
pm_runtime_get_sync(ddev->dev);
@@ -798,7 +802,7 @@ static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_device *ddev = crtc->dev;
int layer_index = 0;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
drm_crtc_vblank_off(crtc);
@@ -835,9 +839,15 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc,
int target_max = target + CLK_TOLERANCE_HZ;
int result;
+ if (ldev->lvds_clk) {
+ result = clk_round_rate(ldev->lvds_clk, target);
+ drm_dbg_driver(crtc->dev, "lvds pixclk rate target %d, available %d\n",
+ target, result);
+ }
+
result = clk_round_rate(ldev->pixel_clk, target);
- DRM_DEBUG_DRIVER("clk rate target %d, available %d\n", target, result);
+ drm_dbg_driver(crtc->dev, "clk rate target %d, available %d\n", target, result);
/* Filter modes according to the max frequency supported by the pads */
if (result > ldev->caps.pad_max_freq_hz)
@@ -872,14 +882,14 @@ static bool ltdc_crtc_mode_fixup(struct drm_crtc *crtc,
int rate = mode->clock * 1000;
if (clk_set_rate(ldev->pixel_clk, rate) < 0) {
- DRM_ERROR("Cannot set rate (%dHz) for pixel clk\n", rate);
+ drm_err(crtc->dev, "Cannot set rate (%dHz) for pixel clk\n", rate);
return false;
}
adjusted_mode->clock = clk_get_rate(ldev->pixel_clk) / 1000;
- DRM_DEBUG_DRIVER("requested clock %dkHz, adjusted clock %dkHz\n",
- mode->clock, adjusted_mode->clock);
+ drm_dbg_driver(crtc->dev, "requested clock %dkHz, adjusted clock %dkHz\n",
+ mode->clock, adjusted_mode->clock);
return true;
}
@@ -934,20 +944,20 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (!pm_runtime_active(ddev->dev)) {
ret = pm_runtime_get_sync(ddev->dev);
if (ret) {
- DRM_ERROR("Failed to set mode, cannot get sync\n");
+ drm_err(crtc->dev, "Failed to set mode, cannot get sync\n");
return;
}
}
- DRM_DEBUG_DRIVER("CRTC:%d mode:%s\n", crtc->base.id, mode->name);
- DRM_DEBUG_DRIVER("Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
- DRM_DEBUG_DRIVER(" hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
- mode->hsync_start - mode->hdisplay,
- mode->htotal - mode->hsync_end,
- mode->hsync_end - mode->hsync_start,
- mode->vsync_start - mode->vdisplay,
- mode->vtotal - mode->vsync_end,
- mode->vsync_end - mode->vsync_start);
+ drm_dbg_driver(crtc->dev, "CRTC:%d mode:%s\n", crtc->base.id, mode->name);
+ drm_dbg_driver(crtc->dev, "Video mode: %dx%d", mode->hdisplay, mode->vdisplay);
+ drm_dbg_driver(crtc->dev, " hfp %d hbp %d hsl %d vfp %d vbp %d vsl %d\n",
+ mode->hsync_start - mode->hdisplay,
+ mode->htotal - mode->hsync_end,
+ mode->hsync_end - mode->hsync_start,
+ mode->vsync_start - mode->vdisplay,
+ mode->vtotal - mode->vsync_end,
+ mode->vsync_end - mode->vsync_start);
/* Convert video timings to ltdc timings */
hsync = mode->hsync_end - mode->hsync_start - 1;
@@ -1033,7 +1043,7 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *ddev = crtc->dev;
struct drm_pending_vblank_event *event = crtc->state->event;
- DRM_DEBUG_ATOMIC("\n");
+ drm_dbg_atomic(crtc->dev, "\n");
ltdc_crtc_update_clut(crtc);
@@ -1121,7 +1131,7 @@ static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_crtc_state *state = crtc->state;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
if (state->enable)
regmap_set_bits(ldev->regmap, LTDC_IER, IER_LIE);
@@ -1135,7 +1145,7 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(crtc->dev, "\n");
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_LIE);
}
@@ -1144,11 +1154,11 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
struct ltdc_device *ldev;
int ret;
- DRM_DEBUG_DRIVER("\n");
-
if (!crtc)
return -ENODEV;
+ drm_dbg_driver(crtc->dev, "\n");
+
ldev = crtc_to_ltdc(crtc);
if (source && strcmp(source, "auto") == 0) {
@@ -1168,14 +1178,14 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
static int ltdc_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source, size_t *values_cnt)
{
- DRM_DEBUG_DRIVER("\n");
-
if (!crtc)
return -ENODEV;
+ drm_dbg_driver(crtc->dev, "\n");
+
if (source && strcmp(source, "auto") != 0) {
- DRM_DEBUG_DRIVER("Unknown CRC source %s for %s\n",
- source, crtc->name);
+ drm_dbg_driver(crtc->dev, "Unknown CRC source %s for %s\n",
+ source, crtc->name);
return -EINVAL;
}
@@ -1233,7 +1243,7 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
struct drm_framebuffer *fb = new_plane_state->fb;
u32 src_w, src_h;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(plane->dev, "\n");
if (!fb)
return 0;
@@ -1244,7 +1254,7 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
/* Reject scaling */
if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
- DRM_DEBUG_DRIVER("Scaling is not supported");
+ drm_dbg_driver(plane->dev, "Scaling is not supported");
return -EINVAL;
}
@@ -1270,7 +1280,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
enum ltdc_pix_fmt pf;
if (!newstate->crtc || !fb) {
- DRM_DEBUG_DRIVER("fb or crtc NULL");
+ drm_dbg_driver(plane->dev, "fb or crtc NULL");
return;
}
@@ -1280,11 +1290,11 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
src_w = newstate->src_w >> 16;
src_h = newstate->src_h >> 16;
- DRM_DEBUG_DRIVER("plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
- plane->base.id, fb->base.id,
- src_w, src_h, src_x, src_y,
- newstate->crtc_w, newstate->crtc_h,
- newstate->crtc_x, newstate->crtc_y);
+ drm_dbg_driver(plane->dev, "plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
+ plane->base.id, fb->base.id,
+ src_w, src_h, src_x, src_y,
+ newstate->crtc_w, newstate->crtc_h,
+ newstate->crtc_x, newstate->crtc_y);
regmap_read(ldev->regmap, LTDC_BPCR, &bpcr);
@@ -1312,8 +1322,8 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
val = ltdc_set_flexible_pixel_format(plane, pf);
if (val == NB_PF) {
- DRM_ERROR("Pixel format %.4s not supported\n",
- (char *)&fb->format->format);
+ drm_err(fb->dev, "Pixel format %.4s not supported\n",
+ (char *)&fb->format->format);
val = 0; /* set by default ARGB 32 bits */
}
regmap_write_bits(ldev->regmap, LTDC_L1PFCR + lofs, LXPFCR_PF, val);
@@ -1350,7 +1360,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
if (newstate->rotation & DRM_MODE_REFLECT_Y)
paddr += (fb->pitches[0] * (y1 - y0));
- DRM_DEBUG_DRIVER("fb: phys 0x%08x", paddr);
+ drm_dbg_driver(fb->dev, "fb: phys 0x%08x", paddr);
regmap_write(ldev->regmap, LTDC_L1CFBAR + lofs, paddr);
/* Configures the color frame buffer pitch in bytes & line length */
@@ -1517,8 +1527,8 @@ static void ltdc_plane_atomic_disable(struct drm_plane *plane,
regmap_write_bits(ldev->regmap, LTDC_L1RCR + lofs,
LXRCR_IMR | LXRCR_VBR | LXRCR_GRMSK, LXRCR_VBR);
- DRM_DEBUG_DRIVER("CRTC:%d plane:%d\n",
- oldstate->crtc->base.id, plane->base.id);
+ drm_dbg_driver(plane->dev, "CRTC:%d plane:%d\n",
+ oldstate->crtc->base.id, plane->base.id);
}
static void ltdc_plane_atomic_print_state(struct drm_printer *p,
@@ -1632,7 +1642,7 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
drm_plane_create_alpha_property(plane);
- DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
+ drm_dbg_driver(plane->dev, "plane:%d created\n", plane->base.id);
return plane;
}
@@ -1647,7 +1657,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
primary = ltdc_plane_create(ddev, DRM_PLANE_TYPE_PRIMARY, 0);
if (!primary) {
- DRM_ERROR("Can not create primary plane\n");
+ drm_err(ddev, "Can not create primary plane\n");
return -EINVAL;
}
@@ -1668,7 +1678,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
ret = drmm_crtc_init_with_planes(ddev, crtc, primary, NULL,
&ltdc_crtc_funcs, NULL);
if (ret) {
- DRM_ERROR("Can not initialize CRTC\n");
+ drm_err(ddev, "Can not initialize CRTC\n");
return ret;
}
@@ -1677,13 +1687,13 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
drm_mode_crtc_set_gamma_size(crtc, CLUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, CLUT_SIZE);
- DRM_DEBUG_DRIVER("CRTC:%d created\n", crtc->base.id);
+ drm_dbg_driver(ddev, "CRTC:%d created\n", crtc->base.id);
/* Add planes. Note : the first layer is used by primary plane */
for (i = 1; i < ldev->caps.nb_layers; i++) {
overlay = ltdc_plane_create(ddev, DRM_PLANE_TYPE_OVERLAY, i);
if (!overlay) {
- DRM_ERROR("Can not create overlay plane %d\n", i);
+ drm_err(ddev, "Can not create overlay plane %d\n", i);
return -ENOMEM;
}
if (ldev->caps.dynamic_zorder)
@@ -1704,7 +1714,7 @@ static void ltdc_encoder_disable(struct drm_encoder *encoder)
struct drm_device *ddev = encoder->dev;
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/* Disable LTDC */
regmap_clear_bits(ldev->regmap, LTDC_GCR, GCR_LTDCEN);
@@ -1718,7 +1728,7 @@ static void ltdc_encoder_enable(struct drm_encoder *encoder)
struct drm_device *ddev = encoder->dev;
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/* set fifo underrun threshold register */
if (ldev->caps.fifo_threshold)
@@ -1734,7 +1744,7 @@ static void ltdc_encoder_mode_set(struct drm_encoder *encoder,
{
struct drm_device *ddev = encoder->dev;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(encoder->dev, "\n");
/*
* Set to default state the pinctrl only with DPI type.
@@ -1770,7 +1780,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
if (ret)
return ret;
- DRM_DEBUG_DRIVER("Bridge encoder:%d created\n", encoder->base.id);
+ drm_dbg_driver(encoder->dev, "Bridge encoder:%d created\n", encoder->base.id);
return 0;
}
@@ -1779,6 +1789,7 @@ static int ltdc_get_caps(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
u32 bus_width_log2, lcr, gc2r;
+ const struct ltdc_plat_data *pdata = of_device_get_match_data(ddev->dev);
/*
* at least 1 layer must be managed & the number of layers
@@ -1794,6 +1805,8 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.bus_width = 8 << bus_width_log2;
regmap_read(ldev->regmap, LTDC_IDR, &ldev->caps.hw_version);
+ ldev->caps.pad_max_freq_hz = pdata->pad_max_freq_hz;
+
switch (ldev->caps.hw_version) {
case HWVER_10200:
case HWVER_10300:
@@ -1811,7 +1824,6 @@ static int ltdc_get_caps(struct drm_device *ddev)
* does not work on 2nd layer.
*/
ldev->caps.non_alpha_only_l1 = true;
- ldev->caps.pad_max_freq_hz = 90000000;
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
ldev->caps.nb_irq = 2;
@@ -1842,6 +1854,7 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.fifo_threshold = false;
break;
case HWVER_40100:
+ case HWVER_40101:
ldev->caps.layer_ofs = LAY_OFS_1;
ldev->caps.layer_regs = ltdc_layer_regs_a2;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a2;
@@ -1849,7 +1862,6 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a2);
ldev->caps.pix_fmt_flex = true;
ldev->caps.non_alpha_only_l1 = false;
- ldev->caps.pad_max_freq_hz = 90000000;
ldev->caps.nb_irq = 2;
ldev->caps.ycbcr_input = true;
ldev->caps.ycbcr_output = true;
@@ -1870,8 +1882,12 @@ void ltdc_suspend(struct drm_device *ddev)
{
struct ltdc_device *ldev = ddev->dev_private;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+ if (ldev->lvds_clk)
+ clk_disable_unprepare(ldev->lvds_clk);
}
int ltdc_resume(struct drm_device *ddev)
@@ -1879,15 +1895,29 @@ int ltdc_resume(struct drm_device *ddev)
struct ltdc_device *ldev = ddev->dev_private;
int ret;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
ret = clk_prepare_enable(ldev->pixel_clk);
if (ret) {
- DRM_ERROR("failed to enable pixel clock (%d)\n", ret);
+ drm_err(ddev, "failed to enable pixel clock (%d)\n", ret);
return ret;
}
- return 0;
+ if (ldev->bus_clk) {
+ ret = clk_prepare_enable(ldev->bus_clk);
+ if (ret) {
+ drm_err(ddev, "failed to enable bus clock (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ if (ldev->lvds_clk) {
+ ret = clk_prepare_enable(ldev->lvds_clk);
+ if (ret)
+ drm_err(ddev, "failed to prepare lvds clock\n");
+ }
+
+ return ret;
}
int ltdc_load(struct drm_device *ddev)
@@ -1903,7 +1933,7 @@ int ltdc_load(struct drm_device *ddev)
int irq, i, nb_endpoints;
int ret = -ENODEV;
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
/* Get number of endpoints */
nb_endpoints = of_graph_get_endpoint_count(np);
@@ -1913,15 +1943,29 @@ int ltdc_load(struct drm_device *ddev)
ldev->pixel_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(ldev->pixel_clk)) {
if (PTR_ERR(ldev->pixel_clk) != -EPROBE_DEFER)
- DRM_ERROR("Unable to get lcd clock\n");
+ drm_err(ddev, "Unable to get lcd clock\n");
return PTR_ERR(ldev->pixel_clk);
}
if (clk_prepare_enable(ldev->pixel_clk)) {
- DRM_ERROR("Unable to prepare pixel clock\n");
+ drm_err(ddev, "Unable to prepare pixel clock\n");
return -ENODEV;
}
+ if (of_device_is_compatible(np, "st,stm32mp251-ltdc") ||
+ of_device_is_compatible(np, "st,stm32mp255-ltdc")) {
+ ldev->bus_clk = devm_clk_get(dev, "bus");
+ if (IS_ERR(ldev->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(ldev->bus_clk),
+ "Unable to get bus clock\n");
+
+ ret = clk_prepare_enable(ldev->bus_clk);
+ if (ret) {
+ drm_err(ddev, "Unable to prepare bus clock\n");
+ return ret;
+ }
+ }
+
/* Get endpoints if any */
for (i = 0; i < nb_endpoints; i++) {
ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
@@ -1939,7 +1983,7 @@ int ltdc_load(struct drm_device *ddev)
if (panel) {
bridge = drmm_panel_bridge_add(ddev, panel);
if (IS_ERR(bridge)) {
- DRM_ERROR("panel-bridge endpoint %d\n", i);
+ drm_err(ddev, "panel-bridge endpoint %d\n", i);
ret = PTR_ERR(bridge);
goto err;
}
@@ -1949,12 +1993,16 @@ int ltdc_load(struct drm_device *ddev)
ret = ltdc_encoder_init(ddev, bridge);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("init encoder endpoint %d\n", i);
+ drm_err(ddev, "init encoder endpoint %d\n", i);
goto err;
}
}
}
+ ldev->lvds_clk = devm_clk_get(dev, "lvds");
+ if (IS_ERR(ldev->lvds_clk))
+ ldev->lvds_clk = NULL;
+
rstc = devm_reset_control_get_exclusive(dev, NULL);
mutex_init(&ldev->err_lock);
@@ -1967,29 +2015,29 @@ int ltdc_load(struct drm_device *ddev)
ldev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ldev->regs)) {
- DRM_ERROR("Unable to get ltdc registers\n");
+ drm_err(ddev, "Unable to get ltdc registers\n");
ret = PTR_ERR(ldev->regs);
goto err;
}
ldev->regmap = devm_regmap_init_mmio(&pdev->dev, ldev->regs, &stm32_ltdc_regmap_cfg);
if (IS_ERR(ldev->regmap)) {
- DRM_ERROR("Unable to regmap ltdc registers\n");
+ drm_err(ddev, "Unable to regmap ltdc registers\n");
ret = PTR_ERR(ldev->regmap);
goto err;
}
ret = ltdc_get_caps(ddev);
if (ret) {
- DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
- ldev->caps.hw_version);
+ drm_err(ddev, "hardware identifier (0x%08x) not supported!\n",
+ ldev->caps.hw_version);
goto err;
}
/* Disable all interrupts */
regmap_clear_bits(ldev->regmap, LTDC_IER, IER_MASK);
- DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+ drm_dbg_driver(ddev, "ltdc hw version 0x%08x\n", ldev->caps.hw_version);
/* initialize default value for fifo underrun threshold & clear interrupt error counters */
ldev->transfer_err = 0;
@@ -2008,32 +2056,35 @@ int ltdc_load(struct drm_device *ddev)
ltdc_irq_thread, IRQF_ONESHOT,
dev_name(dev), ddev);
if (ret) {
- DRM_ERROR("Failed to register LTDC interrupt\n");
+ drm_err(ddev, "Failed to register LTDC interrupt\n");
goto err;
}
}
crtc = drmm_kzalloc(ddev, sizeof(*crtc), GFP_KERNEL);
if (!crtc) {
- DRM_ERROR("Failed to allocate crtc\n");
+ drm_err(ddev, "Failed to allocate crtc\n");
ret = -ENOMEM;
goto err;
}
ret = ltdc_crtc_init(ddev, crtc);
if (ret) {
- DRM_ERROR("Failed to init crtc\n");
+ drm_err(ddev, "Failed to init crtc\n");
goto err;
}
ret = drm_vblank_init(ddev, NB_CRTC);
if (ret) {
- DRM_ERROR("Failed calling drm_vblank_init()\n");
+ drm_err(ddev, "Failed calling drm_vblank_init()\n");
goto err;
}
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+
pinctrl_pm_select_sleep_state(ddev->dev);
pm_runtime_enable(ddev->dev);
@@ -2042,12 +2093,15 @@ int ltdc_load(struct drm_device *ddev)
err:
clk_disable_unprepare(ldev->pixel_clk);
+ if (ldev->bus_clk)
+ clk_disable_unprepare(ldev->bus_clk);
+
return ret;
}
void ltdc_unload(struct drm_device *ddev)
{
- DRM_DEBUG_DRIVER("\n");
+ drm_dbg_driver(ddev, "\n");
pm_runtime_disable(ddev->dev);
}
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index 9d488043ffdb..17b51a7ce28e 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -40,10 +40,16 @@ struct fps_info {
ktime_t last_timestamp;
};
+struct ltdc_plat_data {
+ int pad_max_freq_hz; /* max frequency supported by pad */
+};
+
struct ltdc_device {
void __iomem *regs;
struct regmap *regmap;
struct clk *pixel_clk; /* lcd pixel clock */
+ struct clk *lvds_clk; /* lvds pixel clock */
+ struct clk *bus_clk; /* bus clock */
struct mutex err_lock; /* protecting error_status */
struct ltdc_caps caps;
u32 irq_status;
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index 07788e8d3d83..fe38c0984b2b 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -682,8 +682,8 @@ static unsigned long lvds_pixel_clk_recalc_rate(struct clk_hw *hw,
return (unsigned long)lvds->pixel_clock_rate;
}
-static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int lvds_pixel_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct stm_lvds *lvds = container_of(hw, struct stm_lvds, lvds_ck_px);
unsigned int pll_in_khz, bdiv = 0, mdiv = 0, ndiv = 0;
@@ -703,7 +703,7 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
mode = list_first_entry(&connector->modes,
struct drm_display_mode, head);
- pll_in_khz = (unsigned int)(*parent_rate / 1000);
+ pll_in_khz = (unsigned int)(req->best_parent_rate / 1000);
if (lvds_is_dual_link(lvds->link_type))
multiplier = 2;
@@ -719,14 +719,16 @@ static long lvds_pixel_clk_round_rate(struct clk_hw *hw, unsigned long rate,
lvds->pixel_clock_rate = (unsigned long)pll_get_clkout_khz(pll_in_khz, bdiv, mdiv, ndiv)
* 1000 * multiplier / 7;
- return lvds->pixel_clock_rate;
+ req->rate = lvds->pixel_clock_rate;
+
+ return 0;
}
static const struct clk_ops lvds_pixel_clk_ops = {
.enable = lvds_pixel_clk_enable,
.disable = lvds_pixel_clk_disable,
.recalc_rate = lvds_pixel_clk_recalc_rate,
- .round_rate = lvds_pixel_clk_round_rate,
+ .determine_rate = lvds_pixel_clk_determine_rate,
};
static const struct clk_init_data clk_data = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 2dded3b828df..40405a52a073 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -23,6 +23,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_backend.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index c11dfb2739fa..8a409eee1dca 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_of.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index 5ab1604f12dd..5e9c4b97c84c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -19,6 +19,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
#include "sun4i_drv.h"
#include "sun4i_frontend.h"
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 12430b9d4e93..b1beadb9bb59 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -59,13 +59,15 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
return best_rate;
}
-static long sun4i_ddc_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *prate)
+static int sun4i_ddc_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sun4i_ddc *ddc = hw_to_ddc(hw);
- return sun4i_ddc_calc_divider(rate, *prate, ddc->pre_div,
- ddc->m_offset, NULL, NULL);
+ req->rate = sun4i_ddc_calc_divider(req->rate, req->best_parent_rate,
+ ddc->pre_div, ddc->m_offset, NULL, NULL);
+
+ return 0;
}
static unsigned long sun4i_ddc_recalc_rate(struct clk_hw *hw,
@@ -101,7 +103,7 @@ static int sun4i_ddc_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops sun4i_ddc_ops = {
.recalc_rate = sun4i_ddc_recalc_rate,
- .round_rate = sun4i_ddc_round_rate,
+ .determine_rate = sun4i_ddc_determine_rate,
.set_rate = sun4i_ddc_set_rate,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
index 03d7de1911cd..4afb12bd5281 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon_dclk.c
@@ -67,8 +67,8 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
return parent_rate / val;
}
-static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int sun4i_dclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct sun4i_dclk *dclk = hw_to_dclk(hw);
struct sun4i_tcon *tcon = dclk->tcon;
@@ -77,7 +77,7 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
int i;
for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
- u64 ideal = (u64)rate * i;
+ u64 ideal = (u64)req->rate * i;
unsigned long rounded;
/*
@@ -99,17 +99,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
goto out;
}
- if (abs(rate - rounded / i) <
- abs(rate - best_parent / best_div)) {
+ if (abs(req->rate - rounded / i) <
+ abs(req->rate - best_parent / best_div)) {
best_parent = rounded;
best_div = i;
}
}
out:
- *parent_rate = best_parent;
+ req->best_parent_rate = best_parent;
- return best_parent / best_div;
+ req->rate = best_parent / best_div;
+
+ return 0;
}
static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -155,7 +157,7 @@ static const struct clk_ops sun4i_dclk_ops = {
.is_enabled = sun4i_dclk_is_enabled,
.recalc_rate = sun4i_dclk_recalc_rate,
- .round_rate = sun4i_dclk_round_rate,
+ .determine_rate = sun4i_dclk_determine_rate,
.set_rate = sun4i_dclk_set_rate,
.get_phase = sun4i_dclk_get_phase,
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index c100d29b1a89..ce81c12f511d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -3,11 +3,20 @@
* Copyright (C) Jernej Skrabec <jernej.skrabec@siol.net>
*/
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_plane.h>
#include <drm/drm_print.h>
#include "sun8i_csc.h"
#include "sun8i_mixer.h"
+enum sun8i_csc_mode {
+ SUN8I_CSC_MODE_OFF,
+ SUN8I_CSC_MODE_YUV2RGB,
+ SUN8I_CSC_MODE_YVU2RGB,
+};
+
static const u32 ccsc_base[][2] = {
[CCSC_MIXER0_LAYOUT] = {CCSC00_OFFSET, CCSC01_OFFSET},
[CCSC_MIXER1_LAYOUT] = {CCSC10_OFFSET, CCSC11_OFFSET},
@@ -107,23 +116,28 @@ static const u32 yuv2rgb_de3[2][3][12] = {
},
};
-static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
- enum sun8i_csc_mode mode,
- enum drm_color_encoding encoding,
- enum drm_color_range range)
+static void sun8i_csc_setup(struct regmap *map, u32 base,
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range)
{
+ u32 base_reg, val;
const u32 *table;
- u32 base_reg;
int i;
table = yuv2rgb[range][encoding];
switch (mode) {
+ case SUN8I_CSC_MODE_OFF:
+ val = 0;
+ break;
case SUN8I_CSC_MODE_YUV2RGB:
+ val = SUN8I_CSC_CTRL_EN;
base_reg = SUN8I_CSC_COEFF(base, 0);
regmap_bulk_write(map, base_reg, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
+ val = SUN8I_CSC_CTRL_EN;
for (i = 0; i < 12; i++) {
if ((i & 3) == 1)
base_reg = SUN8I_CSC_COEFF(base, i + 1);
@@ -135,28 +149,37 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base,
}
break;
default:
+ val = 0;
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
+
+ regmap_write(map, SUN8I_CSC_CTRL(base), val);
}
-static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
- enum sun8i_csc_mode mode,
- enum drm_color_encoding encoding,
- enum drm_color_range range)
+static void sun8i_de3_ccsc_setup(struct regmap *map, int layer,
+ enum sun8i_csc_mode mode,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range)
{
+ u32 addr, val, mask;
const u32 *table;
- u32 addr;
int i;
+ mask = SUN50I_MIXER_BLEND_CSC_CTL_EN(layer);
table = yuv2rgb_de3[range][encoding];
switch (mode) {
+ case SUN8I_CSC_MODE_OFF:
+ val = 0;
+ break;
case SUN8I_CSC_MODE_YUV2RGB:
+ val = mask;
addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0);
regmap_bulk_write(map, addr, table, 12);
break;
case SUN8I_CSC_MODE_YVU2RGB:
+ val = mask;
for (i = 0; i < 12; i++) {
if ((i & 3) == 1)
addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE,
@@ -173,67 +196,53 @@ static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer,
}
break;
default:
+ val = 0;
DRM_WARN("Wrong CSC mode specified.\n");
return;
}
-}
-
-static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable)
-{
- u32 val;
-
- if (enable)
- val = SUN8I_CSC_CTRL_EN;
- else
- val = 0;
-
- regmap_update_bits(map, SUN8I_CSC_CTRL(base), SUN8I_CSC_CTRL_EN, val);
-}
-
-static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable)
-{
- u32 val, mask;
-
- mask = SUN50I_MIXER_BLEND_CSC_CTL_EN(layer);
-
- if (enable)
- val = mask;
- else
- val = 0;
regmap_update_bits(map, SUN50I_MIXER_BLEND_CSC_CTL(DE3_BLD_BASE),
mask, val);
}
-void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
- enum sun8i_csc_mode mode,
- enum drm_color_encoding encoding,
- enum drm_color_range range)
+static u32 sun8i_csc_get_mode(struct drm_plane_state *state)
{
- u32 base;
+ const struct drm_format_info *format;
- if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
- sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer,
- mode, encoding, range);
- return;
- }
+ if (!state->crtc || !state->visible)
+ return SUN8I_CSC_MODE_OFF;
- base = ccsc_base[mixer->cfg->ccsc][layer];
+ format = state->fb->format;
+ if (!format->is_yuv)
+ return SUN8I_CSC_MODE_OFF;
- sun8i_csc_set_coefficients(mixer->engine.regs, base,
- mode, encoding, range);
+ switch (format->format) {
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ return SUN8I_CSC_MODE_YVU2RGB;
+ default:
+ return SUN8I_CSC_MODE_YUV2RGB;
+ }
}
-void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
+void sun8i_csc_config(struct sun8i_layer *layer,
+ struct drm_plane_state *state)
{
+ u32 mode = sun8i_csc_get_mode(state);
u32 base;
- if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
- sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
+ if (layer->cfg->de_type == SUN8I_MIXER_DE3) {
+ sun8i_de3_ccsc_setup(layer->regs, layer->channel,
+ mode, state->color_encoding,
+ state->color_range);
return;
}
- base = ccsc_base[mixer->cfg->ccsc][layer];
+ base = ccsc_base[layer->cfg->ccsc][layer->channel];
- sun8i_csc_enable(mixer->engine.regs, base, enable);
+ sun8i_csc_setup(layer->regs, base,
+ mode, state->color_encoding,
+ state->color_range);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
index 828b86fd0cab..2a4b79599610 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
@@ -8,7 +8,8 @@
#include <drm/drm_color_mgmt.h>
-struct sun8i_mixer;
+struct drm_plane_state;
+struct sun8i_layer;
/* VI channel CSC units offsets */
#define CCSC00_OFFSET 0xAA050
@@ -22,16 +23,7 @@ struct sun8i_mixer;
#define SUN8I_CSC_CTRL_EN BIT(0)
-enum sun8i_csc_mode {
- SUN8I_CSC_MODE_OFF,
- SUN8I_CSC_MODE_YUV2RGB,
- SUN8I_CSC_MODE_YVU2RGB,
-};
-
-void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
- enum sun8i_csc_mode mode,
- enum drm_color_encoding encoding,
- enum drm_color_range range);
-void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable);
+void sun8i_csc_config(struct sun8i_layer *layer,
+ struct drm_plane_state *state);
#endif
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 31a8409b98f4..ce9c155bfad7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -21,6 +21,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun4i_drv.h"
@@ -250,24 +251,6 @@ int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format)
return -EINVAL;
}
-static void sun8i_layer_enable(struct sun8i_layer *layer, bool enable)
-{
- u32 ch_base = sun8i_channel_base(layer->mixer, layer->channel);
- u32 val, reg, mask;
-
- if (layer->type == SUN8I_LAYER_TYPE_UI) {
- val = enable ? SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN : 0;
- mask = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN;
- reg = SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, layer->overlay);
- } else {
- val = enable ? SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN : 0;
- mask = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN;
- reg = SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, layer->overlay);
- }
-
- regmap_update_bits(layer->mixer->engine.regs, reg, mask, val);
-}
-
static void sun8i_mixer_commit(struct sunxi_engine *engine,
struct drm_crtc *crtc,
struct drm_atomic_state *state)
@@ -283,10 +266,10 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
drm_for_each_plane(plane, state->dev) {
struct sun8i_layer *layer = plane_to_sun8i_layer(plane);
+ int w, h, x, y, zpos;
bool enable;
- int zpos;
- if (!(plane->possible_crtcs & drm_crtc_mask(crtc)) || layer->mixer != mixer)
+ if (!(plane->possible_crtcs & drm_crtc_mask(crtc)))
continue;
plane_state = drm_atomic_get_new_plane_state(state, plane);
@@ -295,23 +278,28 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
enable = plane_state->crtc && plane_state->visible;
zpos = plane_state->normalized_zpos;
+ x = plane_state->dst.x1;
+ y = plane_state->dst.y1;
+ w = drm_rect_width(&plane_state->dst);
+ h = drm_rect_height(&plane_state->dst);
- DRM_DEBUG_DRIVER(" plane %d: chan=%d ovl=%d en=%d zpos=%d\n",
- plane->base.id, layer->channel, layer->overlay,
- enable, zpos);
-
- /*
- * We always update the layer enable bit, because it can clear
- * spontaneously for unknown reasons.
- */
- sun8i_layer_enable(layer, enable);
+ DRM_DEBUG_DRIVER(" plane %d: chan=%d ovl=%d en=%d zpos=%d x=%d y=%d w=%d h=%d\n",
+ plane->base.id, layer->index, layer->overlay,
+ enable, zpos, x, y, w, h);
if (!enable)
continue;
/* Route layer to pipe based on zpos */
- route |= layer->channel << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
+ route |= layer->index << SUN8I_MIXER_BLEND_ROUTE_PIPE_SHIFT(zpos);
pipe_en |= SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
+
+ regmap_write(bld_regs,
+ SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
+ SUN8I_MIXER_COORD(x, y));
+ regmap_write(bld_regs,
+ SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
+ SUN8I_MIXER_SIZE(w, h));
}
regmap_write(bld_regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
@@ -328,18 +316,30 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
{
struct drm_plane **planes;
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
+ int plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+ enum drm_plane_type type;
+ unsigned int phy_index;
int i;
- planes = devm_kcalloc(drm->dev,
- mixer->cfg->vi_num + mixer->cfg->ui_num + 1,
- sizeof(*planes), GFP_KERNEL);
+ planes = devm_kcalloc(drm->dev, plane_cnt, sizeof(*planes), GFP_KERNEL);
if (!planes)
return ERR_PTR(-ENOMEM);
for (i = 0; i < mixer->cfg->vi_num; i++) {
struct sun8i_layer *layer;
- layer = sun8i_vi_layer_init_one(drm, mixer, i);
+ if (i == 0 && !mixer->cfg->ui_num)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ phy_index = i;
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ phy_index = mixer->cfg->map[i];
+
+ layer = sun8i_vi_layer_init_one(drm, type, mixer->engine.regs,
+ i, phy_index, plane_cnt,
+ &mixer->cfg->lay_cfg);
if (IS_ERR(layer)) {
dev_err(drm->dev,
"Couldn't initialize overlay plane\n");
@@ -350,16 +350,28 @@ static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
}
for (i = 0; i < mixer->cfg->ui_num; i++) {
+ unsigned int index = mixer->cfg->vi_num + i;
struct sun8i_layer *layer;
- layer = sun8i_ui_layer_init_one(drm, mixer, i);
+ if (i == 0)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+
+ phy_index = index;
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ phy_index = mixer->cfg->map[index];
+
+ layer = sun8i_ui_layer_init_one(drm, type, mixer->engine.regs,
+ index, phy_index, plane_cnt,
+ &mixer->cfg->lay_cfg);
if (IS_ERR(layer)) {
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
return ERR_CAST(layer);
}
- planes[mixer->cfg->vi_num + i] = &layer->plane;
+ planes[index] = &layer->plane;
}
return planes;
@@ -692,119 +704,173 @@ static void sun8i_mixer_remove(struct platform_device *pdev)
}
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
- .ccsc = CCSC_MIXER0_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 2048,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
- .scaler_mask = 0xf,
- .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
- .ccsc = CCSC_MIXER1_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0x3,
+ .scanline_yuv = 2048,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
- .scaler_mask = 0x3,
- .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
- .ccsc = CCSC_MIXER0_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 2048,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
.mod_rate = 432000000,
- .scaler_mask = 0xf,
- .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
- .ccsc = CCSC_MIXER0_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 2048,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
- .scaler_mask = 0xf,
- .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
- .ccsc = CCSC_MIXER1_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0x3,
+ .scanline_yuv = 2048,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
- .scaler_mask = 0x3,
- .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
- .de_type = SUN8I_MIXER_DE2,
- .vi_num = 2,
- .ui_num = 1,
- .scaler_mask = 0x3,
- .scanline_yuv = 2048,
- .ccsc = CCSC_MIXER0_LAYOUT,
- .mod_rate = 150000000,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 2,
+ .scaler_mask = 0x3,
+ .scanline_yuv = 2048,
+ },
+ .de_type = SUN8I_MIXER_DE2,
+ .mod_rate = 150000000,
+ .vi_num = 2,
+ .ui_num = 1,
};
static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
- .ccsc = CCSC_D1_MIXER0_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_D1_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0x3,
+ .scanline_yuv = 2048,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
- .scaler_mask = 0x3,
- .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
- .ccsc = CCSC_MIXER1_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0x1,
+ .scanline_yuv = 1024,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
- .scaler_mask = 0x1,
- .scanline_yuv = 1024,
.ui_num = 0,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
- .ccsc = CCSC_MIXER0_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 4096,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
- .scaler_mask = 0xf,
- .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
- .ccsc = CCSC_MIXER1_LAYOUT,
+ .lay_cfg = {
+ .ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0x3,
+ .scanline_yuv = 2048,
+ .de2_fcc_alpha = 1,
+ },
.de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
- .scaler_mask = 0x3,
- .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
- .ccsc = CCSC_MIXER0_LAYOUT,
+ .lay_cfg = {
+ .de_type = SUN8I_MIXER_DE3,
+ .vi_scaler_num = 1,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 4096,
+ },
.de_type = SUN8I_MIXER_DE3,
.mod_rate = 600000000,
- .scaler_mask = 0xf,
- .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
static const struct sun8i_mixer_cfg sun50i_h616_mixer0_cfg = {
- .ccsc = CCSC_MIXER0_LAYOUT,
+ .lay_cfg = {
+ .de_type = SUN8I_MIXER_DE33,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 4096,
+ },
.de_type = SUN8I_MIXER_DE33,
.mod_rate = 600000000,
- .scaler_mask = 0xf,
- .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
.map = {0, 6, 7, 8},
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index a1c1cbccc654..e2f83301aae8 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -39,6 +39,9 @@
#define DE3_CH_BASE 0x1000
#define DE3_CH_SIZE 0x0800
+#define DE33_CH_BASE 0x1000
+#define DE33_CH_SIZE 0x20000
+
#define SUN8I_MIXER_BLEND_PIPE_CTL(base) ((base) + 0)
#define SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, x) ((base) + 0x4 + 0x10 * (x))
#define SUN8I_MIXER_BLEND_ATTR_INSIZE(base, x) ((base) + 0x8 + 0x10 * (x))
@@ -161,29 +164,45 @@ enum sun8i_mixer_type {
};
/**
- * struct sun8i_mixer_cfg - mixer HW configuration
- * @vi_num: number of VI channels
- * @ui_num: number of UI channels
+ * struct sun8i_layer_cfg - layer configuration
+ * @vi_scaler_num: Number of VI scalers. Used on DE2 and DE3.
* @scaler_mask: bitmask which tells which channel supports scaling
* First, scaler supports for VI channels is defined and after that, scaler
* support for UI channels. For example, if mixer has 2 VI channels without
* scaler and 2 UI channels with scaler, bitmask would be 0xC.
* @ccsc: select set of CCSC base addresses from the enumeration above.
- * @mod_rate: module clock rate that needs to be set in order to have
- * a functional block.
* @de_type: sun8i_mixer_type enum representing the display engine generation.
* @scaline_yuv: size of a scanline for VI scaler for YUV formats.
- * @map: channel map for DE variants processing YUV separately (DE33)
+ * @de2_fcc_alpha: use FCC for missing DE2 VI alpha capability
+ * Most DE2 cores has FCC. If number of VI planes is one, enable this.
*/
-struct sun8i_mixer_cfg {
- int vi_num;
- int ui_num;
+struct sun8i_layer_cfg {
+ unsigned int vi_scaler_num;
int scaler_mask;
int ccsc;
- unsigned long mod_rate;
unsigned int de_type;
unsigned int scanline_yuv;
- unsigned int map[6];
+ unsigned int de2_fcc_alpha : 1;
+};
+
+/**
+ * struct sun8i_mixer_cfg - mixer HW configuration
+ * @lay_cfg: layer configuration
+ * @vi_num: number of VI channels
+ * @ui_num: number of UI channels
+ * @de_type: sun8i_mixer_type enum representing the display engine generation.
+ * @mod_rate: module clock rate that needs to be set in order to have
+ * a functional block.
+ * @map: channel map for DE variants processing YUV separately (DE33)
+ */
+
+struct sun8i_mixer_cfg {
+ struct sun8i_layer_cfg lay_cfg;
+ int vi_num;
+ int ui_num;
+ unsigned int de_type;
+ unsigned long mod_rate;
+ unsigned int map[6];
};
struct sun8i_mixer {
@@ -206,11 +225,13 @@ enum {
};
struct sun8i_layer {
- struct drm_plane plane;
- struct sun8i_mixer *mixer;
- int type;
- int channel;
- int overlay;
+ struct drm_plane plane;
+ int type;
+ int index;
+ int channel;
+ int overlay;
+ struct regmap *regs;
+ const struct sun8i_layer_cfg *cfg;
};
static inline struct sun8i_layer *
@@ -239,14 +260,14 @@ sun8i_blender_regmap(struct sun8i_mixer *mixer)
}
static inline u32
-sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
+sun8i_channel_base(struct sun8i_layer *layer)
{
- if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
- return mixer->cfg->map[channel] * 0x20000 + DE2_CH_SIZE;
- else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
- return DE3_CH_BASE + channel * DE3_CH_SIZE;
+ if (layer->cfg->de_type == SUN8I_MIXER_DE33)
+ return DE33_CH_BASE + layer->channel * DE33_CH_SIZE;
+ else if (layer->cfg->de_type == SUN8I_MIXER_DE3)
+ return DE3_CH_BASE + layer->channel * DE3_CH_SIZE;
else
- return DE2_CH_BASE + channel * DE2_CH_SIZE;
+ return DE2_CH_BASE + layer->channel * DE2_CH_SIZE;
}
int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format);
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index f97be0040aab..f08f6da55dd0 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -18,6 +18,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "sun8i_mixer.h"
@@ -25,44 +26,49 @@
#include "sun8i_ui_scaler.h"
#include "sun8i_vi_scaler.h"
-static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+static void sun8i_ui_layer_disable(struct sun8i_layer *layer)
{
- u32 mask, val, ch_base;
+ u32 ch_base = sun8i_channel_base(layer);
- ch_base = sun8i_channel_base(mixer, channel);
+ regmap_write(layer->regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, layer->overlay), 0);
+}
- mask = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK |
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK;
+static void sun8i_ui_layer_update_attributes(struct sun8i_layer *layer,
+ struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ const struct drm_format_info *fmt;
+ u32 val, ch_base, hw_fmt;
- val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(plane->state->alpha >> 8);
+ ch_base = sun8i_channel_base(layer);
+ fmt = state->fb->format;
+ sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
- val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ?
+ val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(state->alpha >> 8);
+ val |= (state->alpha == DRM_BLEND_ALPHA_OPAQUE) ?
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_PIXEL :
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_COMBINED;
+ val |= hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
+ val |= SUN8I_MIXER_CHAN_UI_LAYER_ATTR_EN;
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
- mask, val);
+ regmap_write(layer->regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, layer->overlay), val);
}
-static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane,
- unsigned int zpos)
+static void sun8i_ui_layer_update_coord(struct sun8i_layer *layer,
+ struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
- struct regmap *bld_regs;
- u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
+ u32 ch_base;
DRM_DEBUG_DRIVER("Updating UI channel %d overlay %d\n",
- channel, overlay);
+ layer->channel, layer->overlay);
- bld_base = sun8i_blender_base(mixer);
- bld_regs = sun8i_blender_regmap(mixer);
- ch_base = sun8i_channel_base(mixer, channel);
+ ch_base = sun8i_channel_base(layer);
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
@@ -79,10 +85,10 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Layer source offset X: %d Y: %d\n",
state->src.x1 >> 16, state->src.y1 >> 16);
DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch_base, overlay),
+ regmap_write(layer->regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_SIZE(ch_base, layer->overlay),
insize);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_MIXER_CHAN_UI_OVL_SIZE(ch_base),
insize);
@@ -94,67 +100,27 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
hscale = state->src_w / state->crtc_w;
vscale = state->src_h / state->crtc_h;
- if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
- sun8i_vi_scaler_setup(mixer, channel, src_w, src_h,
- dst_w, dst_h, hscale, vscale,
- hphase, vphase,
+ if (layer->cfg->de_type == SUN8I_MIXER_DE33) {
+ sun8i_vi_scaler_setup(layer, src_w, src_h, dst_w, dst_h,
+ hscale, vscale, hphase, vphase,
state->fb->format);
- sun8i_vi_scaler_enable(mixer, channel, true);
+ sun8i_vi_scaler_enable(layer, true);
} else {
- sun8i_ui_scaler_setup(mixer, channel, src_w, src_h,
- dst_w, dst_h, hscale, vscale,
- hphase, vphase);
- sun8i_ui_scaler_enable(mixer, channel, true);
+ sun8i_ui_scaler_setup(layer, src_w, src_h, dst_w, dst_h,
+ hscale, vscale, hphase, vphase);
+ sun8i_ui_scaler_enable(layer, true);
}
} else {
DRM_DEBUG_DRIVER("HW scaling is not needed\n");
- if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
- sun8i_vi_scaler_enable(mixer, channel, false);
+ if (layer->cfg->de_type == SUN8I_MIXER_DE33)
+ sun8i_vi_scaler_enable(layer, false);
else
- sun8i_ui_scaler_enable(mixer, channel, false);
+ sun8i_ui_scaler_enable(layer, false);
}
-
- /* Set base coordinates */
- DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
- state->dst.x1, state->dst.y1);
- DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(bld_regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
- SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(bld_regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
- outsize);
-
- return 0;
}
-static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
-{
- struct drm_plane_state *state = plane->state;
- const struct drm_format_info *fmt;
- u32 val, ch_base, hw_fmt;
- int ret;
-
- ch_base = sun8i_channel_base(mixer, channel);
-
- fmt = state->fb->format;
- ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
- if (ret || fmt->is_yuv) {
- DRM_DEBUG_DRIVER("Invalid format\n");
- return -EINVAL;
- }
-
- val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
- SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
-
- return 0;
-}
-
-static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+static void sun8i_ui_layer_update_buffer(struct sun8i_layer *layer,
+ struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
@@ -163,7 +129,7 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
u32 ch_base;
int bpp;
- ch_base = sun8i_channel_base(mixer, channel);
+ ch_base = sun8i_channel_base(layer);
/* Get the physical address of the buffer in memory */
gem = drm_fb_dma_get_gem_obj(fb, 0);
@@ -180,17 +146,15 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
/* Set the line width */
DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
+ regmap_write(layer->regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, layer->overlay),
fb->pitches[0]);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
+ regmap_write(layer->regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, layer->overlay),
lower_32_bits(dma_addr));
-
- return 0;
}
static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
@@ -201,20 +165,28 @@ static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
struct sun8i_layer *layer = plane_to_sun8i_layer(plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
- int min_scale, max_scale;
+ const struct drm_format_info *fmt;
+ int min_scale, max_scale, ret;
+ u32 hw_fmt;
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
+ fmt = new_plane_state->fb->format;
+ ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+ if (ret || fmt->is_yuv) {
+ DRM_DEBUG_DRIVER("Invalid plane format\n");
+ return -EINVAL;
+ }
+
min_scale = DRM_PLANE_NO_SCALING;
max_scale = DRM_PLANE_NO_SCALING;
- if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
+ if (layer->cfg->scaler_mask & BIT(layer->channel)) {
min_scale = SUN8I_UI_SCALER_SCALE_MIN;
max_scale = SUN8I_UI_SCALER_SCALE_MAX;
}
@@ -232,20 +204,15 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct sun8i_layer *layer = plane_to_sun8i_layer(plane);
- unsigned int zpos = new_state->normalized_zpos;
- struct sun8i_mixer *mixer = layer->mixer;
- if (!new_state->crtc || !new_state->visible)
+ if (!new_state->crtc || !new_state->visible) {
+ sun8i_ui_layer_disable(layer);
return;
+ }
- sun8i_ui_layer_update_coord(mixer, layer->channel,
- layer->overlay, plane, zpos);
- sun8i_ui_layer_update_alpha(mixer, layer->channel,
- layer->overlay, plane);
- sun8i_ui_layer_update_formats(mixer, layer->channel,
- layer->overlay, plane);
- sun8i_ui_layer_update_buffer(mixer, layer->channel,
- layer->overlay, plane);
+ sun8i_ui_layer_update_attributes(layer, plane);
+ sun8i_ui_layer_update_coord(layer, plane);
+ sun8i_ui_layer_update_buffer(layer, plane);
}
static const struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
@@ -291,21 +258,25 @@ static const uint64_t sun8i_layer_modifiers[] = {
};
struct sun8i_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
- struct sun8i_mixer *mixer,
- int index)
+ enum drm_plane_type type,
+ struct regmap *regs,
+ int index, int phy_index,
+ int plane_cnt,
+ const struct sun8i_layer_cfg *cfg)
{
- enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
- int channel = mixer->cfg->vi_num + index;
struct sun8i_layer *layer;
- unsigned int plane_cnt;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
- if (index == 0)
- type = DRM_PLANE_TYPE_PRIMARY;
+ layer->type = SUN8I_LAYER_TYPE_UI;
+ layer->index = index;
+ layer->channel = phy_index;
+ layer->overlay = 0;
+ layer->regs = regs;
+ layer->cfg = cfg;
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
@@ -318,15 +289,13 @@ struct sun8i_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
- plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
-
ret = drm_plane_create_alpha_property(&layer->plane);
if (ret) {
dev_err(drm->dev, "Couldn't add alpha property\n");
return ERR_PTR(ret);
}
- ret = drm_plane_create_zpos_property(&layer->plane, channel,
+ ret = drm_plane_create_zpos_property(&layer->plane, index,
0, plane_cnt - 1);
if (ret) {
dev_err(drm->dev, "Couldn't add zpos property\n");
@@ -334,10 +303,6 @@ struct sun8i_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
}
drm_plane_helper_add(&layer->plane, &sun8i_ui_layer_helper_funcs);
- layer->mixer = mixer;
- layer->type = SUN8I_LAYER_TYPE_UI;
- layer->channel = channel;
- layer->overlay = 0;
return layer;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
index 83892f6ff211..1581ffc6d4e5 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
@@ -50,6 +50,9 @@ struct sun8i_mixer;
struct sun8i_layer;
struct sun8i_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
- struct sun8i_mixer *mixer,
- int index);
+ enum drm_plane_type type,
+ struct regmap *regs,
+ int index, int phy_index,
+ int plane_cnt,
+ const struct sun8i_layer_cfg *cfg);
#endif /* _SUN8I_UI_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
index 8b7a58e27517..a178da8f532a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -89,18 +89,18 @@ static const u32 lan2coefftab16[240] = {
0x0b1c1603, 0x0d1c1502, 0x0e1d1401, 0x0f1d1301,
};
-static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
+static u32 sun8i_ui_scaler_base(struct sun8i_layer *layer)
{
- int vi_num = mixer->cfg->vi_num;
+ int offset = layer->cfg->vi_scaler_num;
- if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
+ if (layer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
- DE3_VI_SCALER_UNIT_SIZE * vi_num +
- DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
+ DE3_VI_SCALER_UNIT_SIZE * offset +
+ DE3_UI_SCALER_UNIT_SIZE * (layer->channel - offset);
else
return DE2_VI_SCALER_UNIT_BASE +
- DE2_VI_SCALER_UNIT_SIZE * vi_num +
- DE2_UI_SCALER_UNIT_SIZE * (channel - vi_num);
+ DE2_VI_SCALER_UNIT_SIZE * offset +
+ DE2_UI_SCALER_UNIT_SIZE * (layer->channel - offset);
}
static int sun8i_ui_scaler_coef_index(unsigned int step)
@@ -127,14 +127,11 @@ static int sun8i_ui_scaler_coef_index(unsigned int step)
}
}
-void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
+void sun8i_ui_scaler_enable(struct sun8i_layer *layer, bool enable)
{
u32 val, base;
- if (WARN_ON(layer < mixer->cfg->vi_num))
- return;
-
- base = sun8i_ui_scaler_base(mixer, layer);
+ base = sun8i_ui_scaler_base(layer);
if (enable)
val = SUN8I_SCALER_GSU_CTRL_EN |
@@ -142,10 +139,10 @@ void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
else
val = 0;
- regmap_write(mixer->engine.regs, SUN8I_SCALER_GSU_CTRL(base), val);
+ regmap_write(layer->regs, SUN8I_SCALER_GSU_CTRL(base), val);
}
-void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
+void sun8i_ui_scaler_setup(struct sun8i_layer *layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase)
{
@@ -153,10 +150,7 @@ void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
int i, offset;
u32 base;
- if (WARN_ON(layer < mixer->cfg->vi_num))
- return;
-
- base = sun8i_ui_scaler_base(mixer, layer);
+ base = sun8i_ui_scaler_base(layer);
hphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
vphase <<= SUN8I_UI_SCALER_PHASE_FRAC - 16;
@@ -166,22 +160,22 @@ void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
insize = SUN8I_UI_SCALER_SIZE(src_w, src_h);
outsize = SUN8I_UI_SCALER_SIZE(dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_GSU_OUTSIZE(base), outsize);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_GSU_INSIZE(base), insize);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_GSU_HSTEP(base), hscale);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_GSU_VSTEP(base), vscale);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_GSU_HPHASE(base), hphase);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_GSU_VPHASE(base), vphase);
offset = sun8i_ui_scaler_coef_index(hscale) *
SUN8I_UI_SCALER_COEFF_COUNT;
for (i = 0; i < SUN8I_UI_SCALER_COEFF_COUNT; i++)
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_GSU_HCOEFF(base, i),
lan2coefftab16[offset + i]);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
index 1ef4bd6f2718..872d88a58e7e 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.h
@@ -35,8 +35,8 @@
#define SUN8I_SCALER_GSU_CTRL_EN BIT(0)
#define SUN8I_SCALER_GSU_CTRL_COEFF_RDY BIT(4)
-void sun8i_ui_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable);
-void sun8i_ui_scaler_setup(struct sun8i_mixer *mixer, int layer,
+void sun8i_ui_scaler_enable(struct sun8i_layer *layer, bool enable);
+void sun8i_ui_scaler_setup(struct sun8i_layer *layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index a09ee4097537..ca3ab59e108d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -11,64 +11,74 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include "sun4i_crtc.h"
#include "sun8i_csc.h"
#include "sun8i_mixer.h"
#include "sun8i_vi_layer.h"
#include "sun8i_vi_scaler.h"
-static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+static void sun8i_vi_layer_disable(struct sun8i_layer *layer)
{
- u32 mask, val, ch_base;
+ u32 ch_base = sun8i_channel_base(layer);
- ch_base = sun8i_channel_base(mixer, channel);
+ regmap_write(layer->regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, layer->overlay), 0);
+}
- if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
- mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK |
- SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK;
- val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA
- (plane->state->alpha >> 8);
+static void sun8i_vi_layer_update_attributes(struct sun8i_layer *layer,
+ struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ const struct drm_format_info *fmt;
+ u32 val, ch_base, hw_fmt;
- val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ?
+ ch_base = sun8i_channel_base(layer);
+ fmt = state->fb->format;
+ sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+
+ val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
+ if (!fmt->is_yuv)
+ val |= SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
+ val |= SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN;
+ if (layer->cfg->de_type >= SUN8I_MIXER_DE3) {
+ val |= SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(state->alpha >> 8);
+ val |= (state->alpha == DRM_BLEND_ALPHA_OPAQUE) ?
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_PIXEL :
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_COMBINED;
+ }
+
+ regmap_write(layer->regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, layer->overlay), val);
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base,
- overlay),
- mask, val);
- } else if (mixer->cfg->vi_num == 1) {
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG,
- SUN8I_MIXER_FCC_GLOBAL_ALPHA_MASK,
- SUN8I_MIXER_FCC_GLOBAL_ALPHA
- (plane->state->alpha >> 8));
+ if (layer->cfg->de2_fcc_alpha) {
+ regmap_write(layer->regs,
+ SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG,
+ SUN8I_MIXER_FCC_GLOBAL_ALPHA(state->alpha >> 8));
}
}
-static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane,
- unsigned int zpos)
+static void sun8i_vi_layer_update_coord(struct sun8i_layer *layer,
+ struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
+ struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(state->crtc);
+ struct sun8i_mixer *mixer = engine_to_sun8i_mixer(scrtc->engine);
const struct drm_format_info *format = state->fb->format;
u32 src_w, src_h, dst_w, dst_h;
- struct regmap *bld_regs;
- u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
u32 hn = 0, hm = 0;
u32 vn = 0, vm = 0;
bool subsampled;
+ u32 ch_base;
DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
- channel, overlay);
+ layer->channel, layer->overlay);
- bld_base = sun8i_blender_base(mixer);
- bld_regs = sun8i_blender_regmap(mixer);
- ch_base = sun8i_channel_base(mixer, channel);
+ ch_base = sun8i_channel_base(layer);
src_w = drm_rect_width(&state->src) >> 16;
src_h = drm_rect_height(&state->src) >> 16;
@@ -105,10 +115,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
(state->src.x1 >> 16) & ~(format->hsub - 1),
(state->src.y1 >> 16) & ~(format->vsub - 1));
DRM_DEBUG_DRIVER("Layer source size W: %d H: %d\n", src_w, src_h);
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch_base, overlay),
+ regmap_write(layer->regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_SIZE(ch_base, layer->overlay),
insize);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_MIXER_CHAN_VI_OVL_SIZE(ch_base),
insize);
@@ -143,7 +153,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
}
/* it seems that every RGB scaler has buffer for 2048 pixels */
- scanline = subsampled ? mixer->cfg->scanline_yuv : 2048;
+ scanline = subsampled ? layer->cfg->scanline_yuv : 2048;
if (src_w > scanline) {
DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n");
@@ -155,108 +165,34 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
hscale = (src_w << 16) / dst_w;
vscale = (src_h << 16) / dst_h;
- sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w,
- dst_h, hscale, vscale, hphase, vphase,
- format);
- sun8i_vi_scaler_enable(mixer, channel, true);
+ sun8i_vi_scaler_setup(layer, src_w, src_h, dst_w, dst_h,
+ hscale, vscale, hphase, vphase, format);
+ sun8i_vi_scaler_enable(layer, true);
} else {
DRM_DEBUG_DRIVER("HW scaling is not needed\n");
- sun8i_vi_scaler_enable(mixer, channel, false);
+ sun8i_vi_scaler_enable(layer, false);
}
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base),
SUN8I_MIXER_CHAN_VI_DS_N(hn) |
SUN8I_MIXER_CHAN_VI_DS_M(hm));
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base),
SUN8I_MIXER_CHAN_VI_DS_N(hn) |
SUN8I_MIXER_CHAN_VI_DS_M(hm));
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base),
SUN8I_MIXER_CHAN_VI_DS_N(vn) |
SUN8I_MIXER_CHAN_VI_DS_M(vm));
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base),
SUN8I_MIXER_CHAN_VI_DS_N(vn) |
SUN8I_MIXER_CHAN_VI_DS_M(vm));
-
- /* Set base coordinates */
- DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
- state->dst.x1, state->dst.y1);
- DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(bld_regs,
- SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
- SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(bld_regs,
- SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
- outsize);
-
- return 0;
-}
-
-static u32 sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
-{
- if (!format->is_yuv)
- return SUN8I_CSC_MODE_OFF;
-
- switch (format->format) {
- case DRM_FORMAT_YVU411:
- case DRM_FORMAT_YVU420:
- case DRM_FORMAT_YVU422:
- case DRM_FORMAT_YVU444:
- return SUN8I_CSC_MODE_YVU2RGB;
- default:
- return SUN8I_CSC_MODE_YUV2RGB;
- }
-}
-
-static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
-{
- struct drm_plane_state *state = plane->state;
- u32 val, ch_base, csc_mode, hw_fmt;
- const struct drm_format_info *fmt;
- int ret;
-
- ch_base = sun8i_channel_base(mixer, channel);
-
- fmt = state->fb->format;
- ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
- if (ret) {
- DRM_DEBUG_DRIVER("Invalid format\n");
- return ret;
- }
-
- val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
-
- csc_mode = sun8i_vi_layer_get_csc_mode(fmt);
- if (csc_mode != SUN8I_CSC_MODE_OFF) {
- sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode,
- state->color_encoding,
- state->color_range);
- sun8i_csc_enable_ccsc(mixer, channel, true);
- } else {
- sun8i_csc_enable_ccsc(mixer, channel, false);
- }
-
- if (!fmt->is_yuv)
- val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
- else
- val = 0;
-
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val);
-
- return 0;
}
-static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
- int overlay, struct drm_plane *plane)
+static void sun8i_vi_layer_update_buffer(struct sun8i_layer *layer,
+ struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
@@ -267,7 +203,7 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
u32 ch_base;
int i;
- ch_base = sun8i_channel_base(mixer, channel);
+ ch_base = sun8i_channel_base(layer);
/* Adjust x and y to be dividable by subsampling factor */
src_x = (state->src.x1 >> 16) & ~(format->hsub - 1);
@@ -297,21 +233,19 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
/* Set the line width */
DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
i + 1, fb->pitches[i]);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_MIXER_CHAN_VI_LAYER_PITCH(ch_base,
- overlay, i),
+ layer->overlay, i),
fb->pitches[i]);
DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
i + 1, &dma_addr);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
- overlay, i),
+ layer->overlay, i),
lower_32_bits(dma_addr));
}
-
- return 0;
}
static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
@@ -322,20 +256,28 @@ static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
struct sun8i_layer *layer = plane_to_sun8i_layer(plane);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
- int min_scale, max_scale;
+ const struct drm_format_info *fmt;
+ int min_scale, max_scale, ret;
+ u32 hw_fmt;
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
+ fmt = new_plane_state->fb->format;
+ ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Invalid plane format\n");
+ return ret;
+ }
+
min_scale = DRM_PLANE_NO_SCALING;
max_scale = DRM_PLANE_NO_SCALING;
- if (layer->mixer->cfg->scaler_mask & BIT(layer->channel)) {
+ if (layer->cfg->scaler_mask & BIT(layer->channel)) {
min_scale = SUN8I_VI_SCALER_SCALE_MIN;
max_scale = SUN8I_VI_SCALER_SCALE_MAX;
}
@@ -352,20 +294,16 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct sun8i_layer *layer = plane_to_sun8i_layer(plane);
- unsigned int zpos = new_state->normalized_zpos;
- struct sun8i_mixer *mixer = layer->mixer;
- if (!new_state->crtc || !new_state->visible)
+ if (!new_state->crtc || !new_state->visible) {
+ sun8i_vi_layer_disable(layer);
return;
+ }
- sun8i_vi_layer_update_coord(mixer, layer->channel,
- layer->overlay, plane, zpos);
- sun8i_vi_layer_update_alpha(mixer, layer->channel,
- layer->overlay, plane);
- sun8i_vi_layer_update_formats(mixer, layer->channel,
- layer->overlay, plane);
- sun8i_vi_layer_update_buffer(mixer, layer->channel,
- layer->overlay, plane);
+ sun8i_vi_layer_update_attributes(layer, plane);
+ sun8i_vi_layer_update_coord(layer, plane);
+ sun8i_csc_config(layer, new_state);
+ sun8i_vi_layer_update_buffer(layer, plane);
}
static const struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
@@ -471,12 +409,14 @@ static const uint64_t sun8i_layer_modifiers[] = {
};
struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
- struct sun8i_mixer *mixer,
- int index)
+ enum drm_plane_type type,
+ struct regmap *regs,
+ int index, int phy_index,
+ int plane_cnt,
+ const struct sun8i_layer_cfg *cfg)
{
- enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
u32 supported_encodings, supported_ranges;
- unsigned int plane_cnt, format_count;
+ unsigned int format_count;
struct sun8i_layer *layer;
const u32 *formats;
int ret;
@@ -485,7 +425,14 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
if (!layer)
return ERR_PTR(-ENOMEM);
- if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
+ layer->type = SUN8I_LAYER_TYPE_VI;
+ layer->index = index;
+ layer->channel = phy_index;
+ layer->overlay = 0;
+ layer->regs = regs;
+ layer->cfg = cfg;
+
+ if (layer->cfg->de_type >= SUN8I_MIXER_DE3) {
formats = sun8i_vi_layer_de3_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
} else {
@@ -493,9 +440,6 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
}
- if (!mixer->cfg->ui_num && index == 0)
- type = DRM_PLANE_TYPE_PRIMARY;
-
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
@@ -507,9 +451,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
return ERR_PTR(ret);
}
- plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
-
- if (mixer->cfg->vi_num == 1 || mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
+ if (layer->cfg->de2_fcc_alpha || layer->cfg->de_type >= SUN8I_MIXER_DE3) {
ret = drm_plane_create_alpha_property(&layer->plane);
if (ret) {
dev_err(drm->dev, "Couldn't add alpha property\n");
@@ -526,7 +468,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
- if (mixer->cfg->de_type >= SUN8I_MIXER_DE3)
+ if (layer->cfg->de_type >= SUN8I_MIXER_DE3)
supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
@@ -543,10 +485,6 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
}
drm_plane_helper_add(&layer->plane, &sun8i_vi_layer_helper_funcs);
- layer->mixer = mixer;
- layer->type = SUN8I_LAYER_TYPE_VI;
- layer->channel = index;
- layer->overlay = 0;
return layer;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index 655440cdc78f..29cc5573691f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -55,6 +55,9 @@ struct sun8i_mixer;
struct sun8i_layer;
struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
- struct sun8i_mixer *mixer,
- int index);
+ enum drm_plane_type type,
+ struct regmap *regs,
+ int index, int phy_index,
+ int plane_cnt,
+ const struct sun8i_layer_cfg *cfg);
#endif /* _SUN8I_VI_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
index 82df6244af88..3dec4eeb1ba2 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -833,16 +833,17 @@ static const u32 bicubic4coefftab32[480] = {
0x1012110d, 0x1012110d, 0x1013110c, 0x1013110c,
};
-static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
+static u32 sun8i_vi_scaler_base(struct sun8i_layer *layer)
{
- if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
- return sun8i_channel_base(mixer, channel) + 0x3000;
- else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
+ if (layer->cfg->de_type == SUN8I_MIXER_DE33)
+ return DE33_VI_SCALER_UNIT_BASE +
+ DE33_CH_SIZE * layer->channel;
+ else if (layer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
- DE3_VI_SCALER_UNIT_SIZE * channel;
+ DE3_VI_SCALER_UNIT_SIZE * layer->channel;
else
return DE2_VI_SCALER_UNIT_BASE +
- DE2_VI_SCALER_UNIT_SIZE * channel;
+ DE2_VI_SCALER_UNIT_SIZE * layer->channel;
}
static int sun8i_vi_scaler_coef_index(unsigned int step)
@@ -909,11 +910,11 @@ static void sun8i_vi_scaler_set_coeff(struct regmap *map, u32 base,
}
}
-void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
+void sun8i_vi_scaler_enable(struct sun8i_layer *layer, bool enable)
{
u32 val, base;
- base = sun8i_vi_scaler_base(mixer, layer);
+ base = sun8i_vi_scaler_base(layer);
if (enable)
val = SUN8I_SCALER_VSU_CTRL_EN |
@@ -921,11 +922,11 @@ void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable)
else
val = 0;
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_CTRL(base), val);
}
-void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
+void sun8i_vi_scaler_setup(struct sun8i_layer *layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase,
const struct drm_format_info *format)
@@ -934,7 +935,7 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
u32 insize, outsize;
u32 base;
- base = sun8i_vi_scaler_base(mixer, layer);
+ base = sun8i_vi_scaler_base(layer);
hphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
vphase <<= SUN8I_VI_SCALER_PHASE_FRAC - 16;
@@ -958,7 +959,7 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
cvphase = vphase;
}
- if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
+ if (layer->cfg->de_type >= SUN8I_MIXER_DE3) {
u32 val;
if (format->hsub == 1 && format->vsub == 1)
@@ -966,36 +967,36 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
else
val = SUN50I_SCALER_VSU_SCALE_MODE_NORMAL;
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN50I_SCALER_VSU_SCALE_MODE(base), val);
}
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_OUTSIZE(base), outsize);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_YINSIZE(base), insize);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_YHSTEP(base), hscale);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_YVSTEP(base), vscale);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_YHPHASE(base), hphase);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_YVPHASE(base), vphase);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_CINSIZE(base),
SUN8I_VI_SCALER_SIZE(src_w / format->hsub,
src_h / format->vsub));
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_CHSTEP(base),
hscale / format->hsub);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_CVSTEP(base),
vscale / format->vsub);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_CHPHASE(base), chphase);
- regmap_write(mixer->engine.regs,
+ regmap_write(layer->regs,
SUN8I_SCALER_VSU_CVPHASE(base), cvphase);
- sun8i_vi_scaler_set_coeff(mixer->engine.regs, base,
+ sun8i_vi_scaler_set_coeff(layer->regs, base,
hscale, vscale, format);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
index 68f6593b369a..245fe2f431c3 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.h
@@ -18,6 +18,8 @@
#define DE3_VI_SCALER_UNIT_BASE 0x20000
#define DE3_VI_SCALER_UNIT_SIZE 0x08000
+#define DE33_VI_SCALER_UNIT_BASE 0x4000
+
/* this two macros assumes 16 fractional bits which is standard in DRM */
#define SUN8I_VI_SCALER_SCALE_MIN 1
#define SUN8I_VI_SCALER_SCALE_MAX ((1UL << 20) - 1)
@@ -69,8 +71,8 @@
#define SUN50I_SCALER_VSU_ANGLE_SHIFT(x) (((x) << 16) & 0xF)
#define SUN50I_SCALER_VSU_ANGLE_OFFSET(x) ((x) & 0xFF)
-void sun8i_vi_scaler_enable(struct sun8i_mixer *mixer, int layer, bool enable);
-void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
+void sun8i_vi_scaler_enable(struct sun8i_layer *layer, bool enable);
+void sun8i_vi_scaler_setup(struct sun8i_layer *layer,
u32 src_w, u32 src_h, u32 dst_w, u32 dst_h,
u32 hscale, u32 vscale, u32 hphase, u32 vphase,
const struct drm_format_info *format);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index 1424b63dde99..da670d7eeb2e 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -10,12 +10,19 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_modes.h>
struct drm_format_info;
struct drm_scanout_buffer;
struct screen_info;
+typedef void (*drm_sysfb_blit_func)(struct iosys_map *, const unsigned int *,
+ const struct iosys_map *,
+ const struct drm_framebuffer *,
+ const struct drm_rect *,
+ struct drm_format_conv_state *);
+
/*
* Input parsing
*/
@@ -93,10 +100,25 @@ static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *de
* Plane
*/
+struct drm_sysfb_plane_state {
+ struct drm_shadow_plane_state base;
+
+ /* transfers framebuffer data to scanout buffer in CRTC format */
+ drm_sysfb_blit_func blit_to_crtc;
+};
+
+static inline struct drm_sysfb_plane_state *
+to_drm_sysfb_plane_state(struct drm_plane_state *base)
+{
+ return container_of(to_drm_shadow_plane_state(base), struct drm_sysfb_plane_state, base);
+}
+
size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
u32 *fourccs_out, size_t nfourccs_out);
+int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state);
void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
@@ -114,16 +136,24 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
DRM_FORMAT_MOD_INVALID
#define DRM_SYSFB_PLANE_HELPER_FUNCS \
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
+ .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access, \
+ .end_fb_access = drm_gem_end_shadow_fb_access, \
.atomic_check = drm_sysfb_plane_helper_atomic_check, \
.atomic_update = drm_sysfb_plane_helper_atomic_update, \
.atomic_disable = drm_sysfb_plane_helper_atomic_disable, \
.get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer
+void drm_sysfb_plane_reset(struct drm_plane *plane);
+struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane);
+void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+
#define DRM_SYSFB_PLANE_FUNCS \
+ .reset = drm_sysfb_plane_reset, \
.update_plane = drm_atomic_helper_update_plane, \
.disable_plane = drm_atomic_helper_disable_plane, \
- DRM_GEM_SHADOW_PLANE_FUNCS
+ .atomic_duplicate_state = drm_sysfb_plane_atomic_duplicate_state, \
+ .atomic_destroy_state = drm_sysfb_plane_atomic_destroy_state
/*
* CRTC
@@ -132,7 +162,7 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
struct drm_sysfb_crtc_state {
struct drm_crtc_state base;
- /* Primary-plane format; required for color mgmt. */
+ /* CRTC input color format; required for color mgmt. */
const struct drm_format_info *format;
};
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
index 1bcdb5ee8f09..6214b7709b37 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -11,7 +11,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_print.h>
@@ -185,6 +184,104 @@ size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_sysfb_build_fourcc_list);
+static void drm_sysfb_plane_state_destroy(struct drm_sysfb_plane_state *sysfb_plane_state)
+{
+ __drm_gem_destroy_shadow_plane_state(&sysfb_plane_state->base);
+
+ kfree(sysfb_plane_state);
+}
+
+static void drm_sysfb_memcpy(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state)
+{
+ drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
+}
+
+static drm_sysfb_blit_func drm_sysfb_get_blit_func(u32 dst_format, u32 src_format)
+{
+ if (src_format == dst_format) {
+ return drm_sysfb_memcpy;
+ } else if (src_format == DRM_FORMAT_XRGB8888) {
+ switch (dst_format) {
+ case DRM_FORMAT_RGB565:
+ return drm_fb_xrgb8888_to_rgb565;
+ case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
+ return drm_fb_xrgb8888_to_rgb565be;
+ case DRM_FORMAT_XRGB1555:
+ return drm_fb_xrgb8888_to_xrgb1555;
+ case DRM_FORMAT_ARGB1555:
+ return drm_fb_xrgb8888_to_argb1555;
+ case DRM_FORMAT_RGBA5551:
+ return drm_fb_xrgb8888_to_rgba5551;
+ case DRM_FORMAT_RGB888:
+ return drm_fb_xrgb8888_to_rgb888;
+ case DRM_FORMAT_BGR888:
+ return drm_fb_xrgb8888_to_bgr888;
+ case DRM_FORMAT_ARGB8888:
+ return drm_fb_xrgb8888_to_argb8888;
+ case DRM_FORMAT_XBGR8888:
+ return drm_fb_xrgb8888_to_xbgr8888;
+ case DRM_FORMAT_ABGR8888:
+ return drm_fb_xrgb8888_to_abgr8888;
+ case DRM_FORMAT_XRGB2101010:
+ return drm_fb_xrgb8888_to_xrgb2101010;
+ case DRM_FORMAT_ARGB2101010:
+ return drm_fb_xrgb8888_to_argb2101010;
+ case DRM_FORMAT_BGRX8888:
+ return drm_fb_xrgb8888_to_bgrx8888;
+ case DRM_FORMAT_RGB332:
+ return drm_fb_xrgb8888_to_rgb332;
+ }
+ }
+
+ return NULL;
+}
+
+int drm_sysfb_plane_helper_begin_fb_access(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc_state *crtc_state;
+ struct drm_sysfb_crtc_state *sysfb_crtc_state;
+ drm_sysfb_blit_func blit_to_crtc;
+ int ret;
+
+ ret = drm_gem_begin_shadow_fb_access(plane, plane_state);
+ if (ret)
+ return ret;
+
+ if (!fb)
+ return 0;
+
+ ret = -EINVAL;
+
+ crtc_state = drm_atomic_get_new_crtc_state(plane_state->state, plane_state->crtc);
+ if (drm_WARN_ON_ONCE(dev, !crtc_state))
+ goto err_drm_gem_end_shadow_fb_access;
+ sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+
+ if (drm_WARN_ON_ONCE(dev, !sysfb_crtc_state->format))
+ goto err_drm_gem_end_shadow_fb_access;
+ blit_to_crtc = drm_sysfb_get_blit_func(sysfb_crtc_state->format->format,
+ fb->format->format);
+ if (!blit_to_crtc) {
+ drm_warn_once(dev, "No blit helper from %p4cc to %p4cc found.\n",
+ &fb->format->format, &sysfb_crtc_state->format->format);
+ goto err_drm_gem_end_shadow_fb_access;
+ }
+ sysfb_plane_state->blit_to_crtc = blit_to_crtc;
+
+ return 0;
+
+err_drm_gem_end_shadow_fb_access:
+ drm_gem_end_shadow_fb_access(plane, plane_state);
+ return ret;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_helper_begin_fb_access);
+
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state)
{
@@ -210,7 +307,12 @@ int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
else if (!new_plane_state->visible)
return 0;
- if (new_fb->format != sysfb->fb_format) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+ new_sysfb_crtc_state->format = sysfb->fb_format;
+
+ if (new_fb->format != new_sysfb_crtc_state->format) {
void *buf;
/* format conversion necessary; reserve buffer */
@@ -220,11 +322,6 @@ int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
return -ENOMEM;
}
- new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
-
- new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
- new_sysfb_crtc_state->format = new_fb->format;
-
return 0;
}
EXPORT_SYMBOL(drm_sysfb_plane_helper_atomic_check);
@@ -235,10 +332,14 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at
struct drm_sysfb_device *sysfb = to_drm_sysfb_device(dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_sysfb_plane_state *sysfb_plane_state = to_drm_sysfb_plane_state(plane_state);
+ struct drm_shadow_plane_state *shadow_plane_state = &sysfb_plane_state->base;
struct drm_framebuffer *fb = plane_state->fb;
unsigned int dst_pitch = sysfb->fb_pitch;
- const struct drm_format_info *dst_format = sysfb->fb_format;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state);
+ const struct drm_format_info *dst_format = sysfb_crtc_state->format;
+ drm_sysfb_blit_func blit_to_crtc = sysfb_plane_state->blit_to_crtc;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int ret, idx;
@@ -259,8 +360,8 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at
continue;
iosys_map_incr(&dst, drm_fb_clip_offset(dst_pitch, dst_format, &dst_clip));
- drm_fb_blit(&dst, &dst_pitch, dst_format->format, shadow_plane_state->data, fb,
- &damage, &shadow_plane_state->fmtcnv_state);
+ blit_to_crtc(&dst, &dst_pitch, shadow_plane_state->data, fb, &damage,
+ &shadow_plane_state->fmtcnv_state);
}
drm_dev_exit(idx);
@@ -319,6 +420,52 @@ int drm_sysfb_plane_helper_get_scanout_buffer(struct drm_plane *plane,
}
EXPORT_SYMBOL(drm_sysfb_plane_helper_get_scanout_buffer);
+void drm_sysfb_plane_reset(struct drm_plane *plane)
+{
+ struct drm_sysfb_plane_state *sysfb_plane_state;
+
+ if (plane->state)
+ drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane->state));
+
+ sysfb_plane_state = kzalloc(sizeof(*sysfb_plane_state), GFP_KERNEL);
+ if (sysfb_plane_state)
+ __drm_gem_reset_shadow_plane(plane, &sysfb_plane_state->base);
+ else
+ __drm_gem_reset_shadow_plane(plane, NULL);
+}
+EXPORT_SYMBOL(drm_sysfb_plane_reset);
+
+struct drm_plane_state *drm_sysfb_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_sysfb_plane_state *sysfb_plane_state;
+ struct drm_sysfb_plane_state *new_sysfb_plane_state;
+ struct drm_shadow_plane_state *new_shadow_plane_state;
+
+ if (drm_WARN_ON(dev, !plane_state))
+ return NULL;
+ sysfb_plane_state = to_drm_sysfb_plane_state(plane_state);
+
+ new_sysfb_plane_state = kzalloc(sizeof(*new_sysfb_plane_state), GFP_KERNEL);
+ if (!new_sysfb_plane_state)
+ return NULL;
+ new_shadow_plane_state = &new_sysfb_plane_state->base;
+
+ __drm_gem_duplicate_shadow_plane_state(plane, new_shadow_plane_state);
+ new_sysfb_plane_state->blit_to_crtc = sysfb_plane_state->blit_to_crtc;
+
+ return &new_shadow_plane_state->base;
+}
+EXPORT_SYMBOL(drm_sysfb_plane_atomic_duplicate_state);
+
+void drm_sysfb_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ drm_sysfb_plane_state_destroy(to_drm_sysfb_plane_state(plane_state));
+}
+EXPORT_SYMBOL(drm_sysfb_plane_atomic_destroy_state);
+
/*
* CRTC
*/
@@ -370,16 +517,19 @@ EXPORT_SYMBOL(drm_sysfb_crtc_helper_atomic_check);
void drm_sysfb_crtc_reset(struct drm_crtc *crtc)
{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(crtc->dev);
struct drm_sysfb_crtc_state *sysfb_crtc_state;
if (crtc->state)
drm_sysfb_crtc_state_destroy(to_drm_sysfb_crtc_state(crtc->state));
sysfb_crtc_state = kzalloc(sizeof(*sysfb_crtc_state), GFP_KERNEL);
- if (sysfb_crtc_state)
+ if (sysfb_crtc_state) {
+ sysfb_crtc_state->format = sysfb->fb_format;
__drm_atomic_helper_crtc_reset(crtc, &sysfb_crtc_state->base);
- else
+ } else {
__drm_atomic_helper_crtc_reset(crtc, NULL);
+ }
}
EXPORT_SYMBOL(drm_sysfb_crtc_reset);
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
index 0b3fb874a51f..885864168c54 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_screen_info.c
@@ -79,22 +79,19 @@ const struct drm_format_info *drm_sysfb_get_format_si(struct drm_device *dev,
const struct screen_info *si)
{
const struct drm_format_info *format = NULL;
- u32 bits_per_pixel;
+ struct pixel_format pixel;
size_t i;
+ int ret;
- bits_per_pixel = __screen_info_lfb_bits_per_pixel(si);
+ ret = screen_info_pixel_format(si, &pixel);
+ if (ret)
+ return NULL;
for (i = 0; i < nformats; ++i) {
- const struct pixel_format *f = &formats[i].pixel;
-
- if (bits_per_pixel == f->bits_per_pixel &&
- si->red_size == f->red.length &&
- si->red_pos == f->red.offset &&
- si->green_size == f->green.length &&
- si->green_pos == f->green.offset &&
- si->blue_size == f->blue.length &&
- si->blue_pos == f->blue.offset) {
- format = drm_format_info(formats[i].fourcc);
+ const struct drm_sysfb_format *f = &formats[i];
+
+ if (pixel_format_equal(&pixel, &f->pixel)) {
+ format = drm_format_info(f->fourcc);
break;
}
}
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
index 1883c4a8604c..1b683d55d6ea 100644
--- a/drivers/gpu/drm/sysfb/efidrm.c
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -21,6 +21,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <video/edid.h>
diff --git a/drivers/gpu/drm/sysfb/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index 8d8ab39c5f36..d38ba70f4e0d 100644
--- a/drivers/gpu/drm/sysfb/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -21,6 +21,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "drm_sysfb_helper.h"
diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index 8530a3ef8a7a..7a95d2dacd9d 100644
--- a/drivers/gpu/drm/sysfb/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -2,9 +2,10 @@
#include <linux/aperture.h>
#include <linux/clk.h>
-#include <linux/of_clk.h>
#include <linux/minmax.h>
#include <linux/of_address.h>
+#include <linux/of_clk.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
@@ -24,6 +25,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "drm_sysfb_helper.h"
@@ -179,22 +181,17 @@ simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node)
static struct resource *
simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
{
- struct device_node *np;
- struct resource *res;
+ struct resource r, *res;
int err;
- np = of_parse_phandle(of_node, "memory-region", 0);
- if (!np)
+ err = of_reserved_mem_region_to_resource(of_node, 0, &r);
+ if (err)
return NULL;
- res = devm_kzalloc(dev->dev, sizeof(*res), GFP_KERNEL);
+ res = devm_kmemdup(dev->dev, &r, sizeof(r), GFP_KERNEL);
if (!res)
return ERR_PTR(-ENOMEM);
- err = of_address_to_resource(np, 0, res);
- if (err)
- return ERR_PTR(err);
-
if (of_property_present(of_node, "reg"))
drm_warn(dev, "preferring \"memory-region\" over \"reg\" property\n");
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
index 90615e9ac86b..7b7b5ba26317 100644
--- a/drivers/gpu/drm/sysfb/vesadrm.c
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -22,6 +22,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <video/edid.h>
@@ -46,6 +47,7 @@ static const struct drm_format_info *vesadrm_get_format_si(struct drm_device *de
{ PIXEL_FORMAT_RGB888, DRM_FORMAT_RGB888, },
{ PIXEL_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888, },
{ PIXEL_FORMAT_XBGR8888, DRM_FORMAT_XBGR8888, },
+ { PIXEL_FORMAT_C8, DRM_FORMAT_C8, },
};
return drm_sysfb_get_format_si(dev, formats, ARRAY_SIZE(formats), si);
@@ -82,7 +84,7 @@ static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
}
/*
- * Palette
+ * Color LUT
*/
static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
@@ -128,7 +130,7 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
}
#endif
-static void vesadrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+static void vesadrm_set_color_lut(struct drm_crtc *crtc, unsigned int index,
u16 red, u16 green, u16 blue)
{
struct drm_device *dev = crtc->dev;
@@ -149,15 +151,15 @@ static void vesadrm_fill_gamma_lut(struct vesadrm_device *vesa,
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- drm_crtc_fill_gamma_555(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_555(crtc, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB565:
- drm_crtc_fill_gamma_565(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_565(crtc, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- drm_crtc_fill_gamma_888(crtc, vesadrm_set_gamma_lut);
+ drm_crtc_fill_gamma_888(crtc, vesadrm_set_color_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -175,15 +177,53 @@ static void vesadrm_load_gamma_lut(struct vesadrm_device *vesa,
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB565:
- drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_color_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_gamma_lut);
+ drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_color_lut);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_fill_palette_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ struct drm_crtc *crtc = &vesa->crtc;
+
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ drm_crtc_fill_palette_8(crtc, vesadrm_set_color_lut);
+ break;
+ case DRM_FORMAT_RGB332:
+ drm_crtc_fill_palette_332(crtc, vesadrm_set_color_lut);
+ break;
+ default:
+ drm_warn_once(dev, "Unsupported format %p4cc for palette\n",
+ &format->format);
+ break;
+ }
+}
+
+static void vesadrm_load_palette_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
+{
+ struct drm_device *dev = &vesa->sysfb.dev;
+ struct drm_crtc *crtc = &vesa->crtc;
+
+ switch (format->format) {
+ case DRM_FORMAT_C8:
+ drm_crtc_load_palette_8(crtc, lut, vesadrm_set_color_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -200,8 +240,68 @@ static const u64 vesadrm_primary_plane_format_modifiers[] = {
DRM_SYSFB_PLANE_FORMAT_MODIFIERS,
};
+static int vesadrm_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *new_state)
+{
+ struct drm_sysfb_device *sysfb = to_drm_sysfb_device(plane->dev);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(new_state, plane);
+ struct drm_framebuffer *new_fb = new_plane_state->fb;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_sysfb_crtc_state *new_sysfb_crtc_state;
+ int ret;
+
+ ret = drm_sysfb_plane_helper_atomic_check(plane, new_state);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ /*
+ * Fix up format conversion for specific cases
+ */
+
+ switch (sysfb->fb_format->format) {
+ case DRM_FORMAT_C8:
+ new_crtc_state = drm_atomic_get_new_crtc_state(new_state, new_plane_state->crtc);
+ new_sysfb_crtc_state = to_drm_sysfb_crtc_state(new_crtc_state);
+
+ switch (new_fb->format->format) {
+ case DRM_FORMAT_XRGB8888:
+ /*
+ * Reduce XRGB8888 to RGB332. Each resulting pixel is an index
+ * into the C8 hardware palette, which stores RGB332 colors.
+ */
+ if (new_sysfb_crtc_state->format->format != DRM_FORMAT_RGB332) {
+ new_sysfb_crtc_state->format =
+ drm_format_info(DRM_FORMAT_RGB332);
+ new_crtc_state->color_mgmt_changed = true;
+ }
+ break;
+ case DRM_FORMAT_C8:
+ /*
+ * Restore original output. Emulation of XRGB8888 set RBG332
+ * output format and hardware palette. This needs to be undone
+ * when we switch back to DRM_FORMAT_C8.
+ */
+ if (new_sysfb_crtc_state->format->format == DRM_FORMAT_RGB332) {
+ new_sysfb_crtc_state->format = sysfb->fb_format;
+ new_crtc_state->color_mgmt_changed = true;
+ }
+ break;
+ }
+ break;
+ }
+
+ return 0;
+}
+
static const struct drm_plane_helper_funcs vesadrm_primary_plane_helper_funcs = {
- DRM_SYSFB_PLANE_HELPER_FUNCS,
+ .begin_fb_access = drm_sysfb_plane_helper_begin_fb_access,
+ .end_fb_access = drm_gem_end_shadow_fb_access,
+ .atomic_check = vesadrm_primary_plane_helper_atomic_check,
+ .atomic_update = drm_sysfb_plane_helper_atomic_update,
+ .atomic_disable = drm_sysfb_plane_helper_atomic_disable,
+ .get_scanout_buffer = drm_sysfb_plane_helper_get_scanout_buffer,
};
static const struct drm_plane_funcs vesadrm_primary_plane_funcs = {
@@ -223,15 +323,36 @@ static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
* plane's color format.
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
- if (sysfb_crtc_state->format == sysfb->fb_format) {
- if (crtc_state->gamma_lut)
- vesadrm_load_gamma_lut(vesa,
- sysfb_crtc_state->format,
- crtc_state->gamma_lut->data);
- else
+ switch (sysfb->fb_format->format) {
+ /*
+ * Index formats
+ */
+ case DRM_FORMAT_C8:
+ if (sysfb_crtc_state->format->format == DRM_FORMAT_RGB332) {
+ vesadrm_fill_palette_lut(vesa, sysfb_crtc_state->format);
+ } else if (crtc->state->gamma_lut) {
+ vesadrm_load_palette_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ } else {
+ vesadrm_fill_palette_lut(vesa, sysfb_crtc_state->format);
+ }
+ break;
+ /*
+ * Component formats
+ */
+ default:
+ if (sysfb_crtc_state->format == sysfb->fb_format) {
+ if (crtc_state->gamma_lut)
+ vesadrm_load_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
+ else
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
+ } else {
vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
- } else {
- vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
+ }
+ break;
}
}
}
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
index 6fc4b504e786..e399b40d64a1 100644
--- a/drivers/gpu/drm/tegra/Makefile
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -25,6 +25,7 @@ tegra-drm-y := \
falcon.o \
vic.o \
nvdec.o \
+ nvjpg.o \
riscv.o
tegra-drm-y += trace.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 59d5c1ba145a..01e9d5011dd8 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -27,6 +27,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "dc.h"
@@ -1033,7 +1034,7 @@ static int tegra_cursor_atomic_async_check(struct drm_plane *plane, struct drm_a
int min_scale, max_scale;
int err;
- crtc_state = drm_atomic_get_existing_crtc_state(state, new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -3148,6 +3149,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
dc->client.parent = &parent->client;
dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
+ put_device(companion);
}
return 0;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 4596073fe28f..1d18d43292dc 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -22,6 +22,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
@@ -1383,6 +1384,7 @@ static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra210-sor1", },
{ .compatible = "nvidia,tegra210-vic", },
{ .compatible = "nvidia,tegra210-nvdec", },
+ { .compatible = "nvidia,tegra210-nvjpg", },
{ .compatible = "nvidia,tegra186-display", },
{ .compatible = "nvidia,tegra186-dc", },
{ .compatible = "nvidia,tegra186-sor", },
@@ -1421,6 +1423,7 @@ static struct platform_driver * const drivers[] = {
&tegra_gr3d_driver,
&tegra_vic_driver,
&tegra_nvdec_driver,
+ &tegra_nvjpg_driver,
};
static int __init host1x_drm_init(void)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 1dd3670f37db..ae68b03d8483 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -214,5 +214,6 @@ extern struct platform_driver tegra_gr2d_driver;
extern struct platform_driver tegra_gr3d_driver;
extern struct platform_driver tegra_vic_driver;
extern struct platform_driver tegra_nvdec_driver;
+extern struct platform_driver tegra_nvjpg_driver;
#endif /* HOST1X_DRM_H */
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index b5089b772267..175f5f9937b0 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -22,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
@@ -545,12 +546,19 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
/* horizontal back porch */
hbp = (mode->htotal - mode->hsync_end) * mul / div;
- if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
- hbp += hsw;
-
/* horizontal front porch */
hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
+ if (dsi->master || dsi->slave) {
+ hact /= 2;
+ hsw /= 2;
+ hbp /= 2;
+ hfp /= 2;
+ }
+
+ if ((dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) == 0)
+ hbp += hsw;
+
/* subtract packet overhead */
hsw -= 10;
hbp -= 14;
@@ -560,11 +568,6 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
-
- /* set SOL delay (for non-burst mode only) */
- tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
-
- /* TODO: implement ganged mode */
} else {
u16 bytes;
@@ -586,29 +589,28 @@ static void tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
value = MIPI_DCS_WRITE_MEMORY_START << 8 |
MIPI_DCS_WRITE_MEMORY_CONTINUE;
tegra_dsi_writel(dsi, value, DSI_DCS_CMDS);
+ }
- /* set SOL delay */
- if (dsi->master || dsi->slave) {
- unsigned long delay, bclk, bclk_ganged;
- unsigned int lanes = state->lanes;
-
- /* SOL to valid, valid to FIFO and FIFO write delay */
- delay = 4 + 4 + 2;
- delay = DIV_ROUND_UP(delay * mul, div * lanes);
- /* FIFO read delay */
- delay = delay + 6;
-
- bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
- bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
- value = bclk - bclk_ganged + delay + 20;
- } else {
- /* TODO: revisit for non-ganged mode */
- value = 8 * mul / div;
- }
+ /* set SOL delay */
+ if (dsi->master || dsi->slave) {
+ unsigned long delay, bclk, bclk_ganged;
+ unsigned int lanes = state->lanes;
+
+ /* SOL to valid, valid to FIFO and FIFO write delay */
+ delay = 4 + 4 + 2;
+ delay = DIV_ROUND_UP(delay * mul, div * lanes);
+ /* FIFO read delay */
+ delay = delay + 6;
- tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
+ bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
+ bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
+ value = bclk - bclk_ganged + delay + 20;
+ } else {
+ value = 8 * mul / div;
}
+ tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
+
if (dsi->slave) {
tegra_dsi_configure(dsi->slave, pipe, mode);
@@ -913,15 +915,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
u32 value;
int err;
- /* If the bootloader enabled DSI it needs to be disabled
- * in order for the panel initialization commands to be
- * properly sent.
- */
- value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
-
- if (value & DSI_POWER_CONTROL_ENABLE)
- tegra_dsi_disable(dsi);
-
err = tegra_dsi_prepare(dsi);
if (err < 0) {
dev_err(dsi->dev, "failed to prepare: %d\n", err);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index dd041089f797..1cef8c5cac50 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -13,6 +13,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include "drm.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c
index 1b70f5e164af..8f40882aa76e 100644
--- a/drivers/gpu/drm/tegra/fbdev.c
+++ b/drivers/gpu/drm/tegra/fbdev.c
@@ -53,8 +53,6 @@ static void tegra_fbdev_fb_destroy(struct fb_info *info)
drm_framebuffer_remove(fb);
drm_client_release(&helper->client);
- drm_fb_helper_unprepare(helper);
- kfree(helper);
}
static const struct fb_ops tegra_fb_ops = {
@@ -75,10 +73,10 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
struct tegra_drm *tegra = helper->dev->dev_private;
struct drm_device *drm = helper->dev;
struct drm_mode_fb_cmd2 cmd = { 0 };
+ struct fb_info *info = helper->info;
unsigned int bytes_per_pixel;
struct drm_framebuffer *fb;
unsigned long offset;
- struct fb_info *info;
struct tegra_bo *bo;
size_t size;
int err;
@@ -99,13 +97,6 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
if (IS_ERR(bo))
return PTR_ERR(bo);
- info = drm_fb_helper_alloc_info(helper);
- if (IS_ERR(info)) {
- dev_err(drm->dev, "failed to allocate framebuffer info\n");
- drm_gem_object_put(&bo->gem);
- return PTR_ERR(info);
- }
-
fb = tegra_fb_alloc(drm,
drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]),
&cmd, &bo, 1);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 8ede07fb7a21..6b14f1e919eb 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -16,6 +16,7 @@
#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_prime.h>
#include "drm.h"
@@ -542,12 +543,13 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args)
{
- unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_bo *bo;
+ int ret;
- args->pitch = round_up(min_pitch, tegra->pitch_align);
- args->size = args->pitch * args->height;
+ ret = drm_mode_size_dumb(drm, args, tegra->pitch_align, 0);
+ if (ret)
+ return ret;
bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
&args->handle);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 8cd2969e7d4b..0adcd4244a42 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -28,6 +28,7 @@
#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -658,7 +659,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
{
const u8 *ptr = data;
unsigned long offset;
- size_t i, j;
+ size_t i;
u32 value;
switch (ptr[0]) {
@@ -691,7 +692,7 @@ static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
- for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ for (i = 3; i < size; i += 7) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
value = tegra_hdmi_subpack(&ptr[i], num);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 8f779f23dc09..c924ffba4094 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -20,6 +20,7 @@
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "drm.h"
diff --git a/drivers/gpu/drm/tegra/nvjpg.c b/drivers/gpu/drm/tegra/nvjpg.c
new file mode 100644
index 000000000000..94503fd0d52d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/nvjpg.c
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/host1x.h>
+#include <linux/iommu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "drm.h"
+#include "falcon.h"
+
+struct nvjpg_config {
+ const char *firmware;
+ unsigned int version;
+};
+
+struct nvjpg {
+ struct falcon falcon;
+
+ void __iomem *regs;
+ struct tegra_drm_client client;
+ struct device *dev;
+ struct clk *clk;
+
+ /* Platform configuration */
+ const struct nvjpg_config *config;
+};
+
+static inline struct nvjpg *to_nvjpg(struct tegra_drm_client *client)
+{
+ return container_of(client, struct nvjpg, client);
+}
+
+static int nvjpg_init(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct nvjpg *nvjpg = to_nvjpg(drm);
+ int err;
+
+ err = host1x_client_iommu_attach(client);
+ if (err < 0 && err != -ENODEV) {
+ dev_err(nvjpg->dev, "failed to attach to domain: %d\n", err);
+ return err;
+ }
+
+ err = tegra_drm_register_client(tegra, drm);
+ if (err < 0)
+ goto detach;
+
+ /*
+ * Inherit the DMA parameters (such as maximum segment size) from the
+ * parent host1x device.
+ */
+ client->dev->dma_parms = client->host->dma_parms;
+
+ return 0;
+
+detach:
+ host1x_client_iommu_detach(client);
+
+ return err;
+}
+
+static int nvjpg_exit(struct host1x_client *client)
+{
+ struct tegra_drm_client *drm = host1x_to_drm_client(client);
+ struct drm_device *dev = dev_get_drvdata(client->host);
+ struct tegra_drm *tegra = dev->dev_private;
+ struct nvjpg *nvjpg = to_nvjpg(drm);
+ int err;
+
+ /* avoid a dangling pointer just in case this disappears */
+ client->dev->dma_parms = NULL;
+
+ err = tegra_drm_unregister_client(tegra, drm);
+ if (err < 0)
+ return err;
+
+ pm_runtime_dont_use_autosuspend(client->dev);
+ pm_runtime_force_suspend(client->dev);
+
+ host1x_client_iommu_detach(client);
+
+ if (client->group) {
+ dma_unmap_single(nvjpg->dev, nvjpg->falcon.firmware.phys,
+ nvjpg->falcon.firmware.size, DMA_TO_DEVICE);
+ tegra_drm_free(tegra, nvjpg->falcon.firmware.size,
+ nvjpg->falcon.firmware.virt,
+ nvjpg->falcon.firmware.iova);
+ } else {
+ dma_free_coherent(nvjpg->dev, nvjpg->falcon.firmware.size,
+ nvjpg->falcon.firmware.virt,
+ nvjpg->falcon.firmware.iova);
+ }
+
+ return 0;
+}
+
+static const struct host1x_client_ops nvjpg_client_ops = {
+ .init = nvjpg_init,
+ .exit = nvjpg_exit,
+};
+
+static int nvjpg_load_falcon_firmware(struct nvjpg *nvjpg)
+{
+ struct host1x_client *client = &nvjpg->client.base;
+ struct tegra_drm *tegra = nvjpg->client.drm;
+ dma_addr_t iova;
+ size_t size;
+ void *virt;
+ int err;
+
+ if (nvjpg->falcon.firmware.virt)
+ return 0;
+
+ err = falcon_read_firmware(&nvjpg->falcon, nvjpg->config->firmware);
+ if (err < 0)
+ return err;
+
+ size = nvjpg->falcon.firmware.size;
+
+ if (!client->group) {
+ virt = dma_alloc_coherent(nvjpg->dev, size, &iova, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+ } else {
+ virt = tegra_drm_alloc(tegra, size, &iova);
+ if (IS_ERR(virt))
+ return PTR_ERR(virt);
+ }
+
+ nvjpg->falcon.firmware.virt = virt;
+ nvjpg->falcon.firmware.iova = iova;
+
+ err = falcon_load_firmware(&nvjpg->falcon);
+ if (err < 0)
+ goto cleanup;
+
+ /*
+ * In this case we have received an IOVA from the shared domain, so we
+ * need to make sure to get the physical address so that the DMA API
+ * knows what memory pages to flush the cache for.
+ */
+ if (client->group) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(nvjpg->dev, virt, size, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(nvjpg->dev, phys);
+ if (err < 0)
+ goto cleanup;
+
+ nvjpg->falcon.firmware.phys = phys;
+ }
+
+ return 0;
+
+cleanup:
+ if (!client->group)
+ dma_free_coherent(nvjpg->dev, size, virt, iova);
+ else
+ tegra_drm_free(tegra, size, virt, iova);
+
+ return err;
+}
+
+static __maybe_unused int nvjpg_runtime_resume(struct device *dev)
+{
+ struct nvjpg *nvjpg = dev_get_drvdata(dev);
+ int err;
+
+ err = clk_prepare_enable(nvjpg->clk);
+ if (err < 0)
+ return err;
+
+ usleep_range(20, 30);
+
+ err = nvjpg_load_falcon_firmware(nvjpg);
+ if (err < 0)
+ goto disable_clk;
+
+ err = falcon_boot(&nvjpg->falcon);
+ if (err < 0)
+ goto disable_clk;
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(nvjpg->clk);
+ return err;
+}
+
+static __maybe_unused int nvjpg_runtime_suspend(struct device *dev)
+{
+ struct nvjpg *nvjpg = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(nvjpg->clk);
+
+ return 0;
+}
+
+static int nvjpg_can_use_memory_ctx(struct tegra_drm_client *client, bool *supported)
+{
+ *supported = false;
+
+ return 0;
+}
+
+static const struct tegra_drm_client_ops nvjpg_ops = {
+ .get_streamid_offset = NULL,
+ .can_use_memory_ctx = nvjpg_can_use_memory_ctx,
+};
+
+#define NVIDIA_TEGRA_210_NVJPG_FIRMWARE "nvidia/tegra210/nvjpg.bin"
+
+static const struct nvjpg_config tegra210_nvjpg_config = {
+ .firmware = NVIDIA_TEGRA_210_NVJPG_FIRMWARE,
+ .version = 0x21,
+};
+
+static const struct of_device_id tegra_nvjpg_of_match[] = {
+ { .compatible = "nvidia,tegra210-nvjpg", .data = &tegra210_nvjpg_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_nvjpg_of_match);
+
+static int nvjpg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nvjpg *nvjpg;
+ int err;
+
+ /* inherit DMA mask from host1x parent */
+ err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
+ return err;
+ }
+
+ nvjpg = devm_kzalloc(dev, sizeof(*nvjpg), GFP_KERNEL);
+ if (!nvjpg)
+ return -ENOMEM;
+
+ nvjpg->config = of_device_get_match_data(dev);
+
+ nvjpg->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(nvjpg->regs))
+ return PTR_ERR(nvjpg->regs);
+
+ nvjpg->clk = devm_clk_get(dev, "nvjpg");
+ if (IS_ERR(nvjpg->clk)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(nvjpg->clk);
+ }
+
+ err = clk_set_rate(nvjpg->clk, ULONG_MAX);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to set clock rate\n");
+ return err;
+ }
+
+ nvjpg->falcon.dev = dev;
+ nvjpg->falcon.regs = nvjpg->regs;
+
+ err = falcon_init(&nvjpg->falcon);
+ if (err < 0)
+ return err;
+
+ platform_set_drvdata(pdev, nvjpg);
+
+ INIT_LIST_HEAD(&nvjpg->client.base.list);
+ nvjpg->client.base.ops = &nvjpg_client_ops;
+ nvjpg->client.base.dev = dev;
+ nvjpg->client.base.class = HOST1X_CLASS_NVJPG;
+ nvjpg->dev = dev;
+
+ INIT_LIST_HEAD(&nvjpg->client.list);
+ nvjpg->client.version = nvjpg->config->version;
+ nvjpg->client.ops = &nvjpg_ops;
+
+ err = host1x_client_register(&nvjpg->client.base);
+ if (err < 0) {
+ dev_err(dev, "failed to register host1x client: %d\n", err);
+ goto exit_falcon;
+ }
+
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+ devm_pm_runtime_enable(dev);
+
+ return 0;
+
+exit_falcon:
+ falcon_exit(&nvjpg->falcon);
+
+ return err;
+}
+
+static void nvjpg_remove(struct platform_device *pdev)
+{
+ struct nvjpg *nvjpg = platform_get_drvdata(pdev);
+
+ host1x_client_unregister(&nvjpg->client.base);
+ falcon_exit(&nvjpg->falcon);
+}
+
+static const struct dev_pm_ops nvjpg_pm_ops = {
+ RUNTIME_PM_OPS(nvjpg_runtime_suspend, nvjpg_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
+struct platform_driver tegra_nvjpg_driver = {
+ .driver = {
+ .name = "tegra-nvjpg",
+ .of_match_table = tegra_nvjpg_of_match,
+ .pm = &nvjpg_pm_ops
+ },
+ .probe = nvjpg_probe,
+ .remove = nvjpg_remove,
+};
+
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+MODULE_FIRMWARE(NVIDIA_TEGRA_210_NVJPG_FIRMWARE);
+#endif
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 21f3dfdcc5c9..4023cb5998f1 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -24,6 +24,7 @@
#include <drm/drm_eld.h>
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
@@ -1864,7 +1865,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
{
const u8 *ptr = data;
unsigned long offset;
- size_t i, j;
+ size_t i;
u32 value;
switch (ptr[0]) {
@@ -1897,7 +1898,7 @@ static void tegra_sor_hdmi_write_infopack(struct tegra_sor *sor,
* - subpack_low: bytes 0 - 3
* - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
*/
- for (i = 3, j = 0; i < size; i += 7, j += 8) {
+ for (i = 3; i < size; i += 7) {
size_t rem = size - i, num = min_t(size_t, rem, 4);
value = tegra_sor_hdmi_subpack(&ptr[i], num);
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
index 5adab6b22916..d0b6a1fa6efa 100644
--- a/drivers/gpu/drm/tegra/uapi.c
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -114,9 +114,12 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_
if (err)
goto put_channel;
- if (supported)
+ if (supported) {
+ struct pid *pid = get_task_pid(current, PIDTYPE_TGID);
context->memory_context = host1x_memory_context_alloc(
- host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
+ host, client->base.dev, pid);
+ put_pid(pid);
+ }
if (IS_ERR(context->memory_context)) {
if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
diff --git a/drivers/gpu/drm/tests/.kunitconfig b/drivers/gpu/drm/tests/.kunitconfig
index 6ec04b4c979d..5be8e71f45d5 100644
--- a/drivers/gpu/drm/tests/.kunitconfig
+++ b/drivers/gpu/drm/tests/.kunitconfig
@@ -1,3 +1,5 @@
CONFIG_KUNIT=y
CONFIG_DRM=y
+CONFIG_DRM_VKMS=y
+CONFIG_DRM_FBDEV_EMULATION=y
CONFIG_DRM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index c0e952293ad0..87d5d5f9332a 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
drm_rect_test.o \
- drm_sysfb_modeset_test.o
+ drm_sysfb_modeset_test.o \
+ drm_fixp_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
index 7a0e523651f0..5f40b5343bd8 100644
--- a/drivers/gpu/drm/tests/drm_buddy_test.c
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -21,6 +21,110 @@ static inline u64 get_size(int order, u64 chunk_size)
return (1 << order) * chunk_size;
}
+static void drm_test_buddy_fragmentation_performance(struct kunit *test)
+{
+ struct drm_buddy_block *block, *tmp;
+ int num_blocks, i, ret, count = 0;
+ LIST_HEAD(allocated_blocks);
+ unsigned long elapsed_ms;
+ LIST_HEAD(reverse_list);
+ LIST_HEAD(test_blocks);
+ LIST_HEAD(clear_list);
+ LIST_HEAD(dirty_list);
+ LIST_HEAD(free_list);
+ struct drm_buddy mm;
+ u64 mm_size = SZ_4G;
+ ktime_t start, end;
+
+ /*
+ * Allocation under severe fragmentation
+ *
+ * Create severe fragmentation by allocating the entire 4 GiB address space
+ * as tiny 8 KiB blocks but forcing a 64 KiB alignment. The resulting pattern
+ * leaves many scattered holes. Split the allocations into two groups and
+ * return them with different flags to block coalescing, then repeatedly
+ * allocate and free 64 KiB blocks while timing the loop. This stresses how
+ * quickly the allocator can satisfy larger, aligned requests from a pool of
+ * highly fragmented space.
+ */
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
+ "buddy_init failed\n");
+
+ num_blocks = mm_size / SZ_64K;
+
+ start = ktime_get();
+ /* Allocate with maximum fragmentation - 8K blocks with 64K alignment */
+ for (i = 0; i < num_blocks; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K,
+ &allocated_blocks, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_8K);
+
+ list_for_each_entry_safe(block, tmp, &allocated_blocks, link) {
+ if (count % 4 == 0 || count % 4 == 3)
+ list_move_tail(&block->link, &clear_list);
+ else
+ list_move_tail(&block->link, &dirty_list);
+ count++;
+ }
+
+ /* Free with different flags to ensure no coalescing */
+ drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED);
+ drm_buddy_free_list(&mm, &dirty_list, 0);
+
+ for (i = 0; i < num_blocks; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_64K, SZ_64K,
+ &test_blocks, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_64K);
+ drm_buddy_free_list(&mm, &test_blocks, 0);
+
+ end = ktime_get();
+ elapsed_ms = ktime_to_ms(ktime_sub(end, start));
+
+ kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms);
+
+ drm_buddy_fini(&mm);
+
+ /*
+ * Reverse free order under fragmentation
+ *
+ * Construct a fragmented 4 GiB space by allocating every 8 KiB block with
+ * 64 KiB alignment, creating a dense scatter of small regions. Half of the
+ * blocks are selectively freed to form sparse gaps, while the remaining
+ * allocations are preserved, reordered in reverse, and released back with
+ * the cleared flag. This models a pathological reverse-ordered free pattern
+ * and measures how quickly the allocator can merge and reclaim space when
+ * deallocation occurs in the opposite order of allocation, exposing the
+ * cost difference between a linear freelist scan and an ordered tree lookup.
+ */
+ ret = drm_buddy_init(&mm, mm_size, SZ_4K);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ start = ktime_get();
+ /* Allocate maximum fragmentation */
+ for (i = 0; i < num_blocks; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_64K,
+ &allocated_blocks, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_8K);
+
+ list_for_each_entry_safe(block, tmp, &allocated_blocks, link) {
+ if (count % 2 == 0)
+ list_move_tail(&block->link, &free_list);
+ count++;
+ }
+ drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED);
+
+ list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link)
+ list_move(&block->link, &reverse_list);
+ drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED);
+
+ end = ktime_get();
+ elapsed_ms = ktime_to_ms(ktime_sub(end, start));
+
+ kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms);
+
+ drm_buddy_fini(&mm);
+}
+
static void drm_test_buddy_alloc_range_bias(struct kunit *test)
{
u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
@@ -772,6 +876,7 @@ static struct kunit_case drm_buddy_tests[] = {
KUNIT_CASE(drm_test_buddy_alloc_contiguous),
KUNIT_CASE(drm_test_buddy_alloc_clear),
KUNIT_CASE(drm_test_buddy_alloc_range_bias),
+ KUNIT_CASE(drm_test_buddy_fragmentation_performance),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c
index d6c4dd1194a0..3a20c788c51f 100644
--- a/drivers/gpu/drm/tests/drm_exec_test.c
+++ b/drivers/gpu/drm/tests/drm_exec_test.c
@@ -150,14 +150,22 @@ static void test_prepare(struct kunit *test)
static void test_prepare_array(struct kunit *test)
{
struct drm_exec_priv *priv = test->priv;
- struct drm_gem_object gobj1 = { };
- struct drm_gem_object gobj2 = { };
- struct drm_gem_object *array[] = { &gobj1, &gobj2 };
+ struct drm_gem_object *gobj1;
+ struct drm_gem_object *gobj2;
+ struct drm_gem_object *array[] = {
+ (gobj1 = kunit_kzalloc(test, sizeof(*gobj1), GFP_KERNEL)),
+ (gobj2 = kunit_kzalloc(test, sizeof(*gobj2), GFP_KERNEL)),
+ };
struct drm_exec exec;
int ret;
- drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE);
- drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE);
+ if (!gobj1 || !gobj2) {
+ KUNIT_FAIL(test, "Failed to allocate GEM objects.\n");
+ return;
+ }
+
+ drm_gem_private_object_init(priv->drm, gobj1, PAGE_SIZE);
+ drm_gem_private_object_init(priv->drm, gobj2, PAGE_SIZE);
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec)
@@ -166,8 +174,8 @@ static void test_prepare_array(struct kunit *test)
KUNIT_EXPECT_EQ(test, ret, 0);
drm_exec_fini(&exec);
- drm_gem_private_object_fini(&gobj1);
- drm_gem_private_object_fini(&gobj2);
+ drm_gem_private_object_fini(gobj1);
+ drm_gem_private_object_fini(gobj2);
}
static void test_multiple_loops(struct kunit *test)
diff --git a/drivers/gpu/drm/tests/drm_fixp_test.c b/drivers/gpu/drm/tests/drm_fixp_test.c
new file mode 100644
index 000000000000..dd77fdedb2a9
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_fixp_test.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <kunit/test.h>
+#include <drm/drm_fixed.h>
+
+static void drm_test_sm2fixp(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, 0x7fffffffffffffffll, ((1ull << 63) - 1));
+
+ /* 1 */
+ KUNIT_EXPECT_EQ(test, drm_int2fixp(1), drm_sm2fixp(1ull << DRM_FIXED_POINT));
+
+ /* -1 */
+ KUNIT_EXPECT_EQ(test, drm_int2fixp(-1),
+ drm_sm2fixp((1ull << 63) | (1ull << DRM_FIXED_POINT)));
+
+ /* 0.5 */
+ KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(1, 2),
+ drm_sm2fixp(1ull << (DRM_FIXED_POINT - 1)));
+
+ /* -0.5 */
+ KUNIT_EXPECT_EQ(test, drm_fixp_from_fraction(-1, 2),
+ drm_sm2fixp((1ull << 63) | (1ull << (DRM_FIXED_POINT - 1))));
+}
+
+static void drm_test_int2fixp(struct kunit *test)
+{
+ /* 1 */
+ KUNIT_EXPECT_EQ(test, 1ll << 32, drm_int2fixp(1));
+
+ /* -1 */
+ KUNIT_EXPECT_EQ(test, -(1ll << 32), drm_int2fixp(-1));
+
+ /* 1 + (-1) = 0 */
+ KUNIT_EXPECT_EQ(test, 0, drm_int2fixp(1) + drm_int2fixp(-1));
+
+ /* 1 / 2 */
+ KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(1, 2));
+
+ /* -0.5 */
+ KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(-1, 2));
+
+ /* (1 / 2) + (-1) = 0.5 */
+ KUNIT_EXPECT_EQ(test, 1ll << 31, drm_fixp_from_fraction(-1, 2) + drm_int2fixp(1));
+
+ /* (1 / 2) - 1) = 0.5 */
+ KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) + drm_int2fixp(-1));
+
+ /* (1 / 2) - 1) = 0.5 */
+ KUNIT_EXPECT_EQ(test, -(1ll << 31), drm_fixp_from_fraction(1, 2) - drm_int2fixp(1));
+}
+
+static struct kunit_case drm_fixp_tests[] = {
+ KUNIT_CASE(drm_test_int2fixp),
+ KUNIT_CASE(drm_test_sm2fixp),
+ { }
+};
+
+static struct kunit_suite drm_fixp_test_suite = {
+ .name = "drm_fixp",
+ .test_cases = drm_fixp_tests,
+};
+
+kunit_test_suite(drm_fixp_test_suite);
+
+MODULE_AUTHOR("AMD");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_DESCRIPTION("Unit tests for drm_fixed.h");
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
index 6174d0929020..aec9eccdeae9 100644
--- a/drivers/gpu/drm/tests/drm_mm_test.c
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -14,6 +14,7 @@
#include <linux/ktime.h>
#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
#include "../lib/drm_random.h"
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index a2f40a5c7703..8f81eb560b9e 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "tidss_crtc.h"
@@ -91,11 +92,9 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
struct dispc_device *dispc = tidss->dispc;
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
- const struct drm_display_mode *mode;
+ struct drm_display_mode *mode;
enum drm_mode_status ok;
- dev_dbg(ddev->dev, "%s\n", __func__);
-
if (!crtc_state->enable)
return 0;
@@ -103,11 +102,14 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
if (ok != MODE_OK) {
- dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n",
+ drm_dbg(ddev, "%s: bad mode: %ux%u pclk %u kHz\n",
__func__, mode->hdisplay, mode->vdisplay, mode->clock);
return -EINVAL;
}
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ drm_mode_set_crtcinfo(mode, 0);
+
return dispc_vp_bus_check(dispc, hw_videoport, crtc_state);
}
@@ -169,7 +171,7 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
- dev_dbg(ddev->dev, "%s: %s is %sactive, %s modeset, event %p\n",
+ drm_dbg(ddev, "%s: %s is %sactive, %s modeset, event %p\n",
__func__, crtc->name, crtc->state->active ? "" : "not ",
drm_atomic_crtc_needs_modeset(crtc->state) ? "needs" : "doesn't need",
crtc->state->event);
@@ -225,7 +227,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
tidss_runtime_get(tidss);
r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport,
- mode->clock * 1000);
+ mode->crtc_clock * 1000);
if (r != 0)
return;
@@ -241,11 +243,15 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
dispc_vp_prepare(tidss->dispc, tcrtc->hw_videoport, crtc->state);
- dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport, crtc->state);
-
spin_lock_irqsave(&ddev->event_lock, flags);
+ dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport);
+
if (crtc->state->event) {
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+
+ vblank->time = ktime_get();
+
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
@@ -325,8 +331,6 @@ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
struct drm_device *ddev = crtc->dev;
struct tidss_device *tidss = to_tidss(ddev);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
tidss_runtime_get(tidss);
tidss_irq_enable_vblank(crtc);
@@ -339,29 +343,34 @@ static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
struct drm_device *ddev = crtc->dev;
struct tidss_device *tidss = to_tidss(ddev);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
tidss_irq_disable_vblank(crtc);
tidss_runtime_put(tidss);
}
+static void tidss_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(&tstate->base);
+ kfree(tstate);
+}
+
static void tidss_crtc_reset(struct drm_crtc *crtc)
{
- struct tidss_crtc_state *tcrtc;
+ struct tidss_crtc_state *tstate;
if (crtc->state)
- __drm_atomic_helper_crtc_destroy_state(crtc->state);
-
- kfree(crtc->state);
+ tidss_crtc_destroy_state(crtc, crtc->state);
- tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
- if (!tcrtc) {
+ tstate = kzalloc(sizeof(*tstate), GFP_KERNEL);
+ if (!tstate) {
crtc->state = NULL;
return;
}
- __drm_atomic_helper_crtc_reset(crtc, &tcrtc->base);
+ __drm_atomic_helper_crtc_reset(crtc, &tstate->base);
}
static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
@@ -401,7 +410,7 @@ static const struct drm_crtc_funcs tidss_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = tidss_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_destroy_state = tidss_crtc_destroy_state,
.enable_vblank = tidss_crtc_enable_vblank,
.disable_vblank = tidss_crtc_disable_vblank,
};
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index c0277fa36425..58d5eb033bdb 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -4,6 +4,7 @@
* Author: Jyri Sarha <jsarha@ti.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -26,6 +27,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include "tidss_crtc.h"
#include "tidss_dispc.h"
@@ -56,12 +58,6 @@ static const u16 tidss_k2g_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
};
const struct dispc_features dispc_k2g_feats = {
- .min_pclk_khz = 4375,
-
- .max_pclk_khz = {
- [DISPC_VP_DPI] = 150000,
- },
-
/*
* XXX According TRM the RGB input buffer width up to 2560 should
* work on 3 taps, but in practice it only works up to 1280.
@@ -144,11 +140,6 @@ static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
};
const struct dispc_features dispc_am65x_feats = {
- .max_pclk_khz = {
- [DISPC_VP_DPI] = 165000,
- [DISPC_VP_OLDI_AM65X] = 165000,
- },
-
.scaling = {
.in_width_max_5tap_rgb = 1280,
.in_width_max_3tap_rgb = 2560,
@@ -244,11 +235,6 @@ static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
};
const struct dispc_features dispc_j721e_feats = {
- .max_pclk_khz = {
- [DISPC_VP_DPI] = 170000,
- [DISPC_VP_INTERNAL] = 600000,
- },
-
.scaling = {
.in_width_max_5tap_rgb = 2048,
.in_width_max_3tap_rgb = 4096,
@@ -315,11 +301,6 @@ const struct dispc_features dispc_j721e_feats = {
};
const struct dispc_features dispc_am625_feats = {
- .max_pclk_khz = {
- [DISPC_VP_DPI] = 165000,
- [DISPC_VP_INTERNAL] = 170000,
- },
-
.scaling = {
.in_width_max_5tap_rgb = 1280,
.in_width_max_3tap_rgb = 2560,
@@ -376,15 +357,6 @@ const struct dispc_features dispc_am625_feats = {
};
const struct dispc_features dispc_am62a7_feats = {
- /*
- * if the code reaches dispc_mode_valid with VP1,
- * it should return MODE_BAD.
- */
- .max_pclk_khz = {
- [DISPC_VP_TIED_OFF] = 0,
- [DISPC_VP_DPI] = 165000,
- },
-
.scaling = {
.in_width_max_5tap_rgb = 1280,
.in_width_max_3tap_rgb = 2560,
@@ -441,10 +413,6 @@ const struct dispc_features dispc_am62a7_feats = {
};
const struct dispc_features dispc_am62l_feats = {
- .max_pclk_khz = {
- [DISPC_VP_DPI] = 165000,
- },
-
.subrev = DISPC_AM62L,
.common = "common",
@@ -594,79 +562,53 @@ void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport)
* number. For example 7:0
*/
-static u32 FLD_MASK(u32 start, u32 end)
-{
- return ((1 << (start - end + 1)) - 1) << end;
-}
-
-static u32 FLD_VAL(u32 val, u32 start, u32 end)
-{
- return (val << end) & FLD_MASK(start, end);
-}
-
-static u32 FLD_GET(u32 val, u32 start, u32 end)
-{
- return (val & FLD_MASK(start, end)) >> end;
-}
-
-static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end)
-{
- return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end);
-}
-
-static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end)
-{
- return FLD_GET(dispc_read(dispc, idx), start, end);
-}
-
-static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val,
- u32 start, u32 end)
-{
- dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val,
- start, end));
-}
-
-static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end);
-}
-
-static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx,
- u32 val, u32 start, u32 end)
-{
- dispc_vid_write(dispc, hw_plane, idx,
- FLD_MOD(dispc_vid_read(dispc, hw_plane, idx),
- val, start, end));
-}
-
-static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end);
-}
-
-static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val,
- u32 start, u32 end)
-{
- dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx),
- val, start, end));
-}
-
-__maybe_unused
-static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx,
- u32 start, u32 end)
-{
- return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end);
-}
-
-static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx,
- u32 val, u32 start, u32 end)
-{
- dispc_ovr_write(dispc, ovr, idx,
- FLD_MOD(dispc_ovr_read(dispc, ovr, idx),
- val, start, end));
-}
+#define REG_GET(dispc, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_read((dispc), (idx))))
+
+#define REG_FLD_MOD(dispc, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_read(_dispc, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_write(_dispc, _idx, _reg); \
+ })
+
+#define VID_REG_GET(dispc, hw_plane, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_vid_read((dispc), (hw_plane), (idx))))
+
+#define VID_REG_FLD_MOD(dispc, hw_plane, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _hw_plane = (hw_plane); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_vid_read(_dispc, _hw_plane, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_vid_write(_dispc, _hw_plane, _idx, _reg); \
+ })
+
+#define VP_REG_GET(dispc, vp, idx, mask) \
+ ((u32)FIELD_GET((mask), dispc_vp_read((dispc), (vp), (idx))))
+
+#define VP_REG_FLD_MOD(dispc, vp, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _vp = (vp); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_vp_read(_dispc, _vp, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_vp_write(_dispc, _vp, _idx, _reg); \
+ })
+
+#define OVR_REG_FLD_MOD(dispc, ovr, idx, val, mask) \
+ ({ \
+ struct dispc_device *_dispc = (dispc); \
+ u32 _ovr = (ovr); \
+ u32 _idx = (idx); \
+ u32 _reg = dispc_ovr_read(_dispc, _ovr, _idx); \
+ FIELD_MODIFY((mask), &_reg, (val)); \
+ dispc_ovr_write(_dispc, _ovr, _idx, _reg); \
+ })
static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport)
{
@@ -1076,20 +1018,22 @@ struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc,
int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
const struct drm_crtc_state *state)
{
+ struct tidss_device *tidss = dispc->tidss;
+ struct drm_device *dev = &tidss->ddev;
const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
const struct dispc_bus_format *fmt;
fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
tstate->bus_flags);
if (!fmt) {
- dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n",
+ drm_dbg(dev, "%s: Unsupported bus format: %u\n",
__func__, tstate->bus_format);
return -EINVAL;
}
if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI_AM65X &&
fmt->is_oldi_fmt) {
- dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+ drm_dbg(dev, "%s: %s is not OLDI-port\n",
__func__, dispc->feat->vp_name[hw_videoport]);
return -EINVAL;
}
@@ -1139,7 +1083,8 @@ static void dispc_set_num_datalines(struct dispc_device *dispc,
v = 3;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v,
+ DISPC_VP_CONTROL_DATALINES_MASK);
}
static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport,
@@ -1162,7 +1107,8 @@ static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport
oldi_cfg |= BIT(7); /* DEPOL */
- oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1);
+ FIELD_MODIFY(DISPC_VP_DSS_OLDI_CFG_MAP_MASK, &oldi_cfg,
+ fmt->am65x_oldi_mode_reg_val);
oldi_cfg |= BIT(12); /* SOFTRST */
@@ -1184,6 +1130,9 @@ void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
{
const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
const struct dispc_bus_format *fmt;
+ const struct drm_display_mode *mode = &state->adjusted_mode;
+ bool align, onoff, rf, ieo, ipc, ihs, ivs;
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
tstate->bus_flags);
@@ -1196,42 +1145,26 @@ void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
dispc_enable_am65x_oldi(dispc, hw_videoport, fmt);
}
-}
-
-void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
- const struct drm_crtc_state *state)
-{
- const struct drm_display_mode *mode = &state->adjusted_mode;
- const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
- bool align, onoff, rf, ieo, ipc, ihs, ivs;
- const struct dispc_bus_format *fmt;
- u32 hsw, hfp, hbp, vsw, vfp, vbp;
-
- fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
- tstate->bus_flags);
-
- if (WARN_ON(!fmt))
- return;
dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width);
- hfp = mode->hsync_start - mode->hdisplay;
- hsw = mode->hsync_end - mode->hsync_start;
- hbp = mode->htotal - mode->hsync_end;
+ hfp = mode->crtc_hsync_start - mode->crtc_hdisplay;
+ hsw = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ hbp = mode->crtc_htotal - mode->crtc_hsync_end;
- vfp = mode->vsync_start - mode->vdisplay;
- vsw = mode->vsync_end - mode->vsync_start;
- vbp = mode->vtotal - mode->vsync_end;
+ vfp = mode->crtc_vsync_start - mode->crtc_vdisplay;
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H,
- FLD_VAL(hsw - 1, 7, 0) |
- FLD_VAL(hfp - 1, 19, 8) |
- FLD_VAL(hbp - 1, 31, 20));
+ FIELD_PREP(DISPC_VP_TIMING_H_SYNC_PULSE_MASK, hsw - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_H_FRONT_PORCH_MASK, hfp - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_H_BACK_PORCH_MASK, hbp - 1));
dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V,
- FLD_VAL(vsw - 1, 7, 0) |
- FLD_VAL(vfp, 19, 8) |
- FLD_VAL(vbp, 31, 20));
+ FIELD_PREP(DISPC_VP_TIMING_V_SYNC_PULSE_MASK, vsw - 1) |
+ FIELD_PREP(DISPC_VP_TIMING_V_FRONT_PORCH_MASK, vfp) |
+ FIELD_PREP(DISPC_VP_TIMING_V_BACK_PORCH_MASK, vbp));
ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
@@ -1254,24 +1187,31 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
ieo = false;
dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
- FLD_VAL(align, 18, 18) |
- FLD_VAL(onoff, 17, 17) |
- FLD_VAL(rf, 16, 16) |
- FLD_VAL(ieo, 15, 15) |
- FLD_VAL(ipc, 14, 14) |
- FLD_VAL(ihs, 13, 13) |
- FLD_VAL(ivs, 12, 12));
+ FIELD_PREP(DISPC_VP_POL_FREQ_ALIGN_MASK, align) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_ONOFF_MASK, onoff) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_RF_MASK, rf) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IEO_MASK, ieo) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IPC_MASK, ipc) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IHS_MASK, ihs) |
+ FIELD_PREP(DISPC_VP_POL_FREQ_IVS_MASK, ivs));
dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN,
- FLD_VAL(mode->hdisplay - 1, 11, 0) |
- FLD_VAL(mode->vdisplay - 1, 27, 16));
+ FIELD_PREP(DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK,
+ mode->crtc_hdisplay - 1) |
+ FIELD_PREP(DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK,
+ mode->crtc_vdisplay - 1));
+}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0);
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport)
+{
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
{
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
@@ -1285,13 +1225,16 @@ void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport)
{
- return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5);
+ return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL,
+ DISPC_VP_CONTROL_GOBIT_MASK);
}
void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport)
{
- WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5));
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5);
+ WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL,
+ DISPC_VP_CONTROL_GOBIT_MASK));
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1,
+ DISPC_VP_CONTROL_GOBIT_MASK);
}
enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN };
@@ -1347,33 +1290,61 @@ static void dispc_vp_set_default_color(struct dispc_device *dispc,
DISPC_OVR_DEFAULT_COLOR2, (v >> 32) & 0xffff);
}
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
+{
+ int r = rate / 100, rr = real_rate / 100;
+
+ return (unsigned int)(abs(((rr - r) * 100) / r));
+}
+
+static int check_pixel_clock(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long clock)
+{
+ unsigned long round_clock;
+
+ /*
+ * For VP's with external clocking, clock operations must be
+ * delegated to respective driver, so we skip the check here.
+ */
+ if (dispc->tidss->is_ext_vp_clk[hw_videoport])
+ return 0;
+
+ round_clock = clk_round_rate(dispc->vp_clk[hw_videoport], clock);
+ /*
+ * To keep the check consistent with dispc_vp_set_clk_rate(), we
+ * use the same 5% check here.
+ */
+ if (dispc_pclk_diff(clock, round_clock) > 5)
+ return -EINVAL;
+
+ return 0;
+}
+
enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
u32 hw_videoport,
const struct drm_display_mode *mode)
{
u32 hsw, hfp, hbp, vsw, vfp, vbp;
enum dispc_vp_bus_type bus_type;
- int max_pclk;
bus_type = dispc->feat->vp_bus_type[hw_videoport];
- max_pclk = dispc->feat->max_pclk_khz[bus_type];
-
- if (WARN_ON(max_pclk == 0))
+ if (WARN_ON(bus_type == DISPC_VP_TIED_OFF))
return MODE_BAD;
- if (mode->clock < dispc->feat->min_pclk_khz)
- return MODE_CLOCK_LOW;
-
- if (mode->clock > max_pclk)
- return MODE_CLOCK_HIGH;
-
if (mode->hdisplay > 4096)
return MODE_BAD;
if (mode->vdisplay > 4096)
return MODE_BAD;
+ if (check_pixel_clock(dispc, hw_videoport, mode->clock * 1000))
+ return MODE_CLOCK_RANGE;
+
/* TODO: add interlace support */
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
@@ -1437,17 +1408,6 @@ void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
clk_disable_unprepare(dispc->vp_clk[hw_videoport]);
}
-/*
- * Calculate the percentage difference between the requested pixel clock rate
- * and the effective rate resulting from calculating the clock divider value.
- */
-unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
-{
- int r = rate / 100, rr = real_rate / 100;
-
- return (unsigned int)(abs(((rr - r) * 100) / r));
-}
-
int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
unsigned long rate)
{
@@ -1491,11 +1451,11 @@ static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_id, 4, 1);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- x, 17, 6);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- y, 30, 19);
+ hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), x,
+ DISPC_OVR_ATTRIBUTES_POSX_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), y,
+ DISPC_OVR_ATTRIBUTES_POSY_MASK);
}
static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
@@ -1505,11 +1465,11 @@ static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_id, 4, 1);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
- x, 13, 0);
- OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
- y, 29, 16);
+ hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), x,
+ DISPC_OVR_ATTRIBUTES2_POSX_MASK);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), y,
+ DISPC_OVR_ATTRIBUTES2_POSY_MASK);
}
void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
@@ -1544,7 +1504,7 @@ void dispc_ovr_enable_layer(struct dispc_device *dispc,
return;
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- !!enable, 0, 0);
+ !!enable, DISPC_OVR_ATTRIBUTES_ENABLE_MASK);
}
/* CSC */
@@ -1580,14 +1540,14 @@ struct dispc_csc_coef {
static
void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval)
{
-#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19))
+#define OVAL(x, y) (FIELD_PREP(GENMASK(15, 3), x) | FIELD_PREP(GENMASK(31, 19), y))
regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]);
regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]);
regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]);
#undef OVAL
}
-#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16))
+#define CVAL(x, y) (FIELD_PREP(GENMASK(10, 0), x) | FIELD_PREP(GENMASK(26, 16), y))
static
void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval)
{
@@ -1767,7 +1727,8 @@ static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
bool enable)
{
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable,
+ DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK);
}
/* SCALER */
@@ -1826,7 +1787,8 @@ static void dispc_vid_write_fir_coefs(struct dispc_device *dispc,
c1 = coefs->c1[phase];
c2 = coefs->c2[phase];
- c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20);
+ c12 = FIELD_PREP(GENMASK(19, 10), c1) | FIELD_PREP(GENMASK(29, 20),
+ c2);
dispc_vid_write(dispc, hw_plane, reg, c12);
}
@@ -2023,20 +1985,20 @@ static void dispc_vid_set_scaling(struct dispc_device *dispc,
u32 fourcc)
{
/* HORIZONTAL RESIZE ENABLE */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->scale_x, 7, 7);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_x,
+ DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK);
/* VERTICAL RESIZE ENABLE */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->scale_y, 8, 8);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_y,
+ DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK);
/* Skip the rest if no scaling is used */
if (!sp->scale_x && !sp->scale_y)
return;
/* VERTICAL 5-TAPS */
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
- sp->five_taps, 21, 21);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->five_taps,
+ DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK);
if (dispc_fourcc_is_yuv(fourcc)) {
if (sp->scale_x) {
@@ -2126,7 +2088,7 @@ static void dispc_plane_set_pixel_format(struct dispc_device *dispc,
if (dispc_color_formats[i].fourcc == fourcc) {
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
dispc_color_formats[i].dss_code,
- 6, 1);
+ DISPC_VID_ATTRIBUTES_FORMAT_MASK);
return;
}
}
@@ -2248,7 +2210,8 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32);
dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
- (scale.in_w - 1) | ((scale.in_h - 1) << 16));
+ FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK, scale.in_h - 1) |
+ FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK, scale.in_w - 1));
/* For YUV422 format we use the macropixel size for pixel inc */
if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
@@ -2285,8 +2248,10 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
if (!lite) {
dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE,
- (state->crtc_w - 1) |
- ((state->crtc_h - 1) << 16));
+ FIELD_PREP(DISPC_VID_SIZE_SIZEY_MASK,
+ state->crtc_h - 1) |
+ FIELD_PREP(DISPC_VID_SIZE_SIZEX_MASK,
+ state->crtc_w - 1));
dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc);
}
@@ -2300,38 +2265,45 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
}
dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA,
- 0xFF & (state->alpha >> 8));
+ FIELD_PREP(DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK,
+ state->alpha >> 8));
if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
- 28, 28);
+ DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK);
else
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
- 28, 28);
+ DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK);
}
void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
{
- VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable,
+ DISPC_VID_ATTRIBUTES_ENABLE_MASK);
}
static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
{
- return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
+ return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS,
+ DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK);
}
static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc,
u32 hw_plane, u32 low, u32 high)
{
dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD,
- FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+ FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK, high) |
+ FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK, low));
}
static void dispc_vid_set_buf_threshold(struct dispc_device *dispc,
u32 hw_plane, u32 low, u32 high)
{
dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
- FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+ FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK,
+ high) |
+ FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK,
+ low));
}
static void dispc_k2g_plane_init(struct dispc_device *dispc)
@@ -2341,9 +2313,11 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev, "%s()\n", __func__);
/* MFLAG_CTRL = ENABLED */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK);
/* MFLAG_START = MFLAGNORMALSTARTMODE */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK);
for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
@@ -2380,7 +2354,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
* register is ignored.
*/
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
- 19, 19);
+ DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK);
}
}
@@ -2392,13 +2366,15 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev, "%s()\n", __func__);
- REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
- REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, DSS_CBA_CFG_PRI_LO_MASK);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, DSS_CBA_CFG_PRI_HI_MASK);
/* MFLAG_CTRL = ENABLED */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK);
/* MFLAG_START = MFLAGNORMALSTARTMODE */
- REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK);
for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
@@ -2431,7 +2407,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
/* Prefech up to PRELOAD value */
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
- 19, 19);
+ DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK);
}
}
@@ -2461,7 +2437,8 @@ static void dispc_vp_init(struct dispc_device *dispc)
/* Enable the gamma Shadow bit-field for all VPs*/
for (i = 0; i < dispc->feat->num_vps; i++)
- VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
+ VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1,
+ DISPC_VP_CONFIG_GAMMAENABLE_MASK);
}
static void dispc_initial_config(struct dispc_device *dispc)
@@ -2472,8 +2449,8 @@ static void dispc_initial_config(struct dispc_device *dispc)
/* Note: Hardcoded DPI routing on J721E for now */
if (dispc->feat->subrev == DISPC_J721E) {
dispc_write(dispc, DISPC_CONNECTIONS,
- FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */
- FLD_VAL(8, 7, 4) /* VP3 to DPI1 */
+ FIELD_PREP(DISPC_CONNECTIONS_DPI_0_CONN_MASK, 2) | /* VP1 to DPI0 */
+ FIELD_PREP(DISPC_CONNECTIONS_DPI_1_CONN_MASK, 8) /* VP3 to DPI1 */
);
}
}
@@ -2651,8 +2628,8 @@ static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm,
cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]);
}
-#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \
- FLD_VAL(xB, 31, 22))
+#define CVAL(xR, xG, xB) (FIELD_PREP(GENMASK(9, 0), xR) | FIELD_PREP(GENMASK(20, 11), xG) | \
+ FIELD_PREP(GENMASK(31, 22), xB))
static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc,
u32 *regval)
@@ -2694,8 +2671,8 @@ static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
cprenable = 1;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
- cprenable, 15, 15);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, cprenable,
+ DISPC_VP_CONFIG_CPR_MASK);
}
static s16 dispc_S31_32_to_s3_8(s64 coef)
@@ -2760,8 +2737,8 @@ static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
colorconvenable = 1;
}
- VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
- colorconvenable, 24, 24);
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, colorconvenable,
+ DISPC_VP_CONFIG_COLORCONVENABLE_MASK);
}
static void dispc_vp_set_color_mgmt(struct dispc_device *dispc,
@@ -2816,26 +2793,26 @@ int dispc_runtime_resume(struct dispc_device *dispc)
clk_prepare_enable(dispc->fclk);
- if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+ if (REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_FUNC_RESETDONE) == 0)
dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
dispc_read(dispc, DSS_REVISION));
dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
- REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
- REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(1, 1)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(2, 2)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(3, 3)));
if (dispc->feat->subrev == DISPC_AM625 ||
dispc->feat->subrev == DISPC_AM65X)
dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
- REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
- REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(5, 5)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(6, 6)),
+ REG_GET(dispc, DSS_SYSSTATUS, GENMASK(7, 7)));
dev_dbg(dispc->dev, "DISPC IDLE %d\n",
- REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+ REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_IDLE_STATUS));
dispc_initial_config(dispc);
@@ -2848,8 +2825,6 @@ int dispc_runtime_resume(struct dispc_device *dispc)
void dispc_remove(struct tidss_device *tidss)
{
- dev_dbg(tidss->dev, "%s\n", __func__);
-
tidss->dispc = NULL;
}
@@ -2912,7 +2887,8 @@ static void dispc_softreset_k2g(struct dispc_device *dispc)
spin_unlock_irqrestore(&dispc->tidss->irq_lock, flags);
for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx)
- VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0);
+ VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0,
+ DISPC_VP_CONTROL_ENABLE_MASK);
}
static int dispc_softreset(struct dispc_device *dispc)
@@ -2926,7 +2902,7 @@ static int dispc_softreset(struct dispc_device *dispc)
}
/* Soft reset */
- REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1);
+ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, DSS_SYSCONFIG_SOFTRESET_MASK);
/* Wait for reset to complete */
ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS,
val, val & 1, 100, 5000);
@@ -2990,8 +2966,6 @@ int dispc_init(struct tidss_device *tidss)
unsigned int i, num_fourccs;
int r = 0;
- dev_dbg(dev, "%s\n", __func__);
-
feat = tidss->feat;
if (feat->subrev != DISPC_K2G) {
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index b8614f62186c..739d211d0018 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -7,11 +7,14 @@
#ifndef __TIDSS_DISPC_H__
#define __TIDSS_DISPC_H__
+#include <drm/drm_color_mgmt.h>
+
#include "tidss_drv.h"
struct dispc_device;
struct drm_crtc_state;
+struct drm_plane_state;
enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT };
@@ -74,9 +77,6 @@ enum dispc_dss_subrevision {
};
struct dispc_features {
- int min_pclk_khz;
- int max_pclk_khz[DISPC_VP_MAX_BUS_TYPE];
-
struct dispc_features_scaling scaling;
enum dispc_dss_subrevision subrev;
@@ -116,8 +116,7 @@ void dispc_ovr_enable_layer(struct dispc_device *dispc,
void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
const struct drm_crtc_state *state);
-void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
- const struct drm_crtc_state *state);
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport);
void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport);
void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport);
bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
index 50a3f28250ef..382027dddce8 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -56,7 +56,12 @@ enum dispc_common_regs {
#define DSS_REVISION REG(DSS_REVISION)
#define DSS_SYSCONFIG REG(DSS_SYSCONFIG)
+#define DSS_SYSCONFIG_SOFTRESET_MASK GENMASK(1, 1)
+
#define DSS_SYSSTATUS REG(DSS_SYSSTATUS)
+#define DSS_SYSSTATUS_DISPC_IDLE_STATUS GENMASK(9, 9)
+#define DSS_SYSSTATUS_DISPC_FUNC_RESETDONE GENMASK(0, 0)
+
#define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI)
#define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW)
#define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS)
@@ -70,9 +75,15 @@ enum dispc_common_regs {
#define WB_IRQSTATUS REG(WB_IRQSTATUS)
#define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE)
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK GENMASK(6, 6)
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK GENMASK(1, 0)
+
#define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE)
#define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER)
#define DSS_CBA_CFG REG(DSS_CBA_CFG)
+#define DSS_CBA_CFG_PRI_HI_MASK GENMASK(5, 3)
+#define DSS_CBA_CFG_PRI_LO_MASK GENMASK(2, 0)
+
#define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL)
#define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS)
#define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE)
@@ -88,6 +99,9 @@ enum dispc_common_regs {
#define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0)
#define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1)
#define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS)
+#define DISPC_CONNECTIONS_DPI_1_CONN_MASK GENMASK(7, 4)
+#define DISPC_CONNECTIONS_DPI_0_CONN_MASK GENMASK(3, 0)
+
#define DISPC_MSS_VP1 REG(DISPC_MSS_VP1)
#define DISPC_MSS_VP3 REG(DISPC_MSS_VP3)
@@ -102,13 +116,27 @@ enum dispc_common_regs {
#define DISPC_VID_ACCUV2_0 0x18
#define DISPC_VID_ACCUV2_1 0x1c
#define DISPC_VID_ATTRIBUTES 0x20
+#define DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK GENMASK(28, 28)
+#define DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK GENMASK(21, 21)
+#define DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK GENMASK(19, 19)
+#define DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK GENMASK(9, 9)
+#define DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK GENMASK(8, 8)
+#define DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK GENMASK(7, 7)
+#define DISPC_VID_ATTRIBUTES_FORMAT_MASK GENMASK(6, 1)
+#define DISPC_VID_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_VID_ATTRIBUTES2 0x24
#define DISPC_VID_BA_0 0x28
#define DISPC_VID_BA_1 0x2c
#define DISPC_VID_BA_UV_0 0x30
#define DISPC_VID_BA_UV_1 0x34
#define DISPC_VID_BUF_SIZE_STATUS 0x38
+#define DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK GENMASK(15, 0)
+
#define DISPC_VID_BUF_THRESHOLD 0x3c
+#define DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK GENMASK(31, 16)
+#define DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK GENMASK(15, 0)
+
#define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4)
#define DISPC_VID_FIRH 0x5c
@@ -137,15 +165,26 @@ enum dispc_common_regs {
#define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4)
#define DISPC_VID_GLOBAL_ALPHA 0x1fc
+#define DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK GENMASK(7, 0)
+
#define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */
#define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */
#define DISPC_VID_MFLAG_THRESHOLD 0x208
+#define DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK GENMASK(31, 16)
+#define DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK GENMASK(15, 0)
+
#define DISPC_VID_PICTURE_SIZE 0x20c
+#define DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK GENMASK(27, 16)
+#define DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK GENMASK(11, 0)
+
#define DISPC_VID_PIXEL_INC 0x210
#define DISPC_VID_K2G_POSITION 0x214 /* K2G */
#define DISPC_VID_PRELOAD 0x218
#define DISPC_VID_ROW_INC 0x21c
#define DISPC_VID_SIZE 0x220
+#define DISPC_VID_SIZE_SIZEY_MASK GENMASK(27, 16)
+#define DISPC_VID_SIZE_SIZEX_MASK GENMASK(11, 0)
+
#define DISPC_VID_BA_EXT_0 0x22c
#define DISPC_VID_BA_EXT_1 0x230
#define DISPC_VID_BA_UV_EXT_0 0x234
@@ -173,11 +212,27 @@ enum dispc_common_regs {
#define DISPC_OVR_TRANS_COLOR_MIN 0x18
#define DISPC_OVR_TRANS_COLOR_MIN2 0x1c
#define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4)
+#define DISPC_OVR_ATTRIBUTES_POSY_MASK GENMASK(30, 19)
+#define DISPC_OVR_ATTRIBUTES_POSX_MASK GENMASK(17, 6)
+#define DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK GENMASK(4, 1)
+#define DISPC_OVR_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */
+#define DISPC_OVR_ATTRIBUTES2_POSY_MASK GENMASK(29, 16)
+#define DISPC_OVR_ATTRIBUTES2_POSX_MASK GENMASK(13, 0)
+
/* VP */
#define DISPC_VP_CONFIG 0x0
+#define DISPC_VP_CONFIG_COLORCONVENABLE_MASK GENMASK(24, 24)
+#define DISPC_VP_CONFIG_CPR_MASK GENMASK(15, 15)
+#define DISPC_VP_CONFIG_GAMMAENABLE_MASK GENMASK(2, 2)
+
#define DISPC_VP_CONTROL 0x4
+#define DISPC_VP_CONTROL_DATALINES_MASK GENMASK(10, 8)
+#define DISPC_VP_CONTROL_GOBIT_MASK GENMASK(5, 5)
+#define DISPC_VP_CONTROL_ENABLE_MASK GENMASK(0, 0)
+
#define DISPC_VP_CSC_COEF0 0x8
#define DISPC_VP_CSC_COEF1 0xc
#define DISPC_VP_CSC_COEF2 0x10
@@ -189,9 +244,28 @@ enum dispc_common_regs {
#define DISPC_VP_DATA_CYCLE_2 0x1c
#define DISPC_VP_LINE_NUMBER 0x44
#define DISPC_VP_POL_FREQ 0x4c
+#define DISPC_VP_POL_FREQ_ALIGN_MASK GENMASK(18, 18)
+#define DISPC_VP_POL_FREQ_ONOFF_MASK GENMASK(17, 17)
+#define DISPC_VP_POL_FREQ_RF_MASK GENMASK(16, 16)
+#define DISPC_VP_POL_FREQ_IEO_MASK GENMASK(15, 15)
+#define DISPC_VP_POL_FREQ_IPC_MASK GENMASK(14, 14)
+#define DISPC_VP_POL_FREQ_IHS_MASK GENMASK(13, 13)
+#define DISPC_VP_POL_FREQ_IVS_MASK GENMASK(12, 12)
+
#define DISPC_VP_SIZE_SCREEN 0x50
+#define DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK GENMASK(11, 0)
+#define DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK GENMASK(27, 16)
+
#define DISPC_VP_TIMING_H 0x54
+#define DISPC_VP_TIMING_H_SYNC_PULSE_MASK GENMASK(7, 0)
+#define DISPC_VP_TIMING_H_FRONT_PORCH_MASK GENMASK(19, 8)
+#define DISPC_VP_TIMING_H_BACK_PORCH_MASK GENMASK(31, 20)
+
#define DISPC_VP_TIMING_V 0x58
+#define DISPC_VP_TIMING_V_SYNC_PULSE_MASK GENMASK(7, 0)
+#define DISPC_VP_TIMING_V_FRONT_PORCH_MASK GENMASK(19, 8)
+#define DISPC_VP_TIMING_V_BACK_PORCH_MASK GENMASK(31, 20)
+
#define DISPC_VP_CSC_COEF3 0x5c
#define DISPC_VP_CSC_COEF4 0x60
#define DISPC_VP_CSC_COEF5 0x64
@@ -220,6 +294,8 @@ enum dispc_common_regs {
#define DISPC_VP_SAFETY_LFSR_SEED 0x110
#define DISPC_VP_GAMMA_TABLE 0x120
#define DISPC_VP_DSS_OLDI_CFG 0x160
+#define DISPC_VP_DSS_OLDI_CFG_MAP_MASK GENMASK(3, 1)
+
#define DISPC_VP_DSS_OLDI_STATUS 0x164
#define DISPC_VP_DSS_OLDI_LB 0x168
#define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index a1b12e52aca4..1c8cc18bc53c 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/aperture.h>
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
@@ -32,8 +33,6 @@ int tidss_runtime_get(struct tidss_device *tidss)
{
int r;
- dev_dbg(tidss->dev, "%s\n", __func__);
-
r = pm_runtime_resume_and_get(tidss->dev);
WARN_ON(r < 0);
return r;
@@ -43,8 +42,6 @@ void tidss_runtime_put(struct tidss_device *tidss)
{
int r;
- dev_dbg(tidss->dev, "%s\n", __func__);
-
pm_runtime_mark_last_busy(tidss->dev);
r = pm_runtime_put_autosuspend(tidss->dev);
@@ -55,8 +52,6 @@ static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev)
{
struct tidss_device *tidss = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
return dispc_runtime_suspend(tidss->dispc);
}
@@ -65,8 +60,6 @@ static int __maybe_unused tidss_pm_runtime_resume(struct device *dev)
struct tidss_device *tidss = dev_get_drvdata(dev);
int r;
- dev_dbg(dev, "%s\n", __func__);
-
r = dispc_runtime_resume(tidss->dispc);
if (r)
return r;
@@ -78,8 +71,6 @@ static int __maybe_unused tidss_suspend(struct device *dev)
{
struct tidss_device *tidss = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
return drm_mode_config_helper_suspend(&tidss->ddev);
}
@@ -87,8 +78,6 @@ static int __maybe_unused tidss_resume(struct device *dev)
{
struct tidss_device *tidss = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
return drm_mode_config_helper_resume(&tidss->ddev);
}
@@ -126,8 +115,6 @@ static int tidss_probe(struct platform_device *pdev)
int ret;
int irq;
- dev_dbg(dev, "%s\n", __func__);
-
tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver,
struct tidss_device, ddev);
if (IS_ERR(tidss))
@@ -192,12 +179,20 @@ static int tidss_probe(struct platform_device *pdev)
goto err_irq_uninstall;
}
+ /* Remove possible early fb before setting up the fbdev */
+ ret = aperture_remove_all_conflicting_devices(tidss_driver.name);
+ if (ret)
+ goto err_drm_dev_unreg;
+
drm_client_setup(ddev, NULL);
dev_dbg(dev, "%s done\n", __func__);
return 0;
+err_drm_dev_unreg:
+ drm_dev_unregister(ddev);
+
err_irq_uninstall:
tidss_irq_uninstall(ddev);
@@ -219,8 +214,6 @@ static void tidss_remove(struct platform_device *pdev)
struct tidss_device *tidss = platform_get_drvdata(pdev);
struct drm_device *ddev = &tidss->ddev;
- dev_dbg(dev, "%s\n", __func__);
-
drm_dev_unregister(ddev);
drm_atomic_helper_shutdown(ddev);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index d14d5d28f0a3..e1c1f41d8b4b 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -9,6 +9,8 @@
#include <linux/spinlock.h>
+#include <drm/drm_device.h>
+
#define TIDSS_MAX_PORTS 4
#define TIDSS_MAX_PLANES 4
#define TIDSS_MAX_OLDI_TXES 2
@@ -22,6 +24,8 @@ struct tidss_device {
const struct dispc_features *feat;
struct dispc_device *dispc;
+ bool is_ext_vp_clk[TIDSS_MAX_PORTS];
+
unsigned int num_crtcs;
struct drm_crtc *crtcs[TIDSS_MAX_PORTS];
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index c34eb90cddbe..86eb5d97410b 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -24,8 +24,6 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
struct drm_device *ddev = old_state->dev;
struct tidss_device *tidss = to_tidss(ddev);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
tidss_runtime_get(tidss);
drm_atomic_helper_commit_modeset_disables(ddev, old_state);
@@ -245,8 +243,6 @@ int tidss_modeset_init(struct tidss_device *tidss)
struct drm_device *ddev = &tidss->ddev;
int ret;
- dev_dbg(tidss->dev, "%s\n", __func__);
-
ret = drmm_mode_config_init(ddev);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c
index 8f25159d0666..17c535bfa057 100644
--- a/drivers/gpu/drm/tidss/tidss_oldi.c
+++ b/drivers/gpu/drm/tidss/tidss_oldi.c
@@ -309,6 +309,25 @@ static u32 *tidss_oldi_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static enum drm_mode_status
+tidss_oldi_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge);
+ unsigned long round_clock;
+
+ round_clock = clk_round_rate(oldi->serial, mode->clock * 7 * 1000);
+ /*
+ * To keep the check consistent with dispc_vp_set_clk_rate(),
+ * we use the same 5% check here.
+ */
+ if (dispc_pclk_diff(mode->clock * 7 * 1000, round_clock) > 5)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct drm_bridge_funcs tidss_oldi_bridge_funcs = {
.attach = tidss_oldi_bridge_attach,
.atomic_pre_enable = tidss_oldi_atomic_pre_enable,
@@ -317,6 +336,7 @@ static const struct drm_bridge_funcs tidss_oldi_bridge_funcs = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+ .mode_valid = tidss_oldi_mode_valid,
};
static int get_oldi_mode(struct device_node *oldi_tx, int *companion_instance)
@@ -430,6 +450,7 @@ void tidss_oldi_deinit(struct tidss_device *tidss)
for (int i = 0; i < tidss->num_oldis; i++) {
if (tidss->oldis[i]) {
drm_bridge_remove(&tidss->oldis[i]->bridge);
+ tidss->is_ext_vp_clk[tidss->oldis[i]->parent_vp] = false;
tidss->oldis[i] = NULL;
}
}
@@ -464,7 +485,6 @@ int tidss_oldi_init(struct tidss_device *tidss)
* which may still be connected.
* Continue to search for that.
*/
- ret = 0;
continue;
}
goto err_put_node;
@@ -581,6 +601,7 @@ int tidss_oldi_init(struct tidss_device *tidss)
oldi->bridge.timings = &default_tidss_oldi_timings;
tidss->oldis[tidss->num_oldis++] = oldi;
+ tidss->is_ext_vp_clk[oldi->parent_vp] = true;
oldi->tidss = tidss;
drm_bridge_add(&oldi->bridge);
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 142ae81951a0..bd10bc1b9961 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -42,8 +42,6 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
u32 hw_videoport;
int ret;
- dev_dbg(ddev->dev, "%s\n", __func__);
-
if (!new_plane_state->crtc) {
/*
* The visible field is not reset by the DRM core but only
@@ -124,8 +122,6 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
plane);
u32 hw_videoport;
- dev_dbg(ddev->dev, "%s\n", __func__);
-
if (!new_state->visible) {
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
return;
@@ -143,8 +139,6 @@ static void tidss_plane_atomic_enable(struct drm_plane *plane,
struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
}
@@ -155,8 +149,6 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane,
struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
- dev_dbg(ddev->dev, "%s\n", __func__);
-
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
}
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
index aecaf2728406..92c560c3a621 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.h
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -7,6 +7,8 @@
#ifndef __TIDSS_PLANE_H__
#define __TIDSS_PLANE_H__
+#include <drm/drm_plane.h>
+
#define to_tidss_plane(p) container_of((p), struct tidss_plane, plane)
struct tidss_device;
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
index 9c560d0fdac0..9824d02d9d1f 100644
--- a/drivers/gpu/drm/tidss/tidss_scale_coefs.h
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
+struct device;
+
struct tidss_scale_coefs {
s16 c2[16];
s16 c1[16];
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index b5f60b2b2d0e..5718d9d83a49 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -676,14 +676,7 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
if (!crtc_state->active)
return 0;
- if (state->planes[0].ptr != crtc->primary ||
- state->planes[0].state == NULL ||
- state->planes[0].state->crtc != crtc) {
- dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
- return -EINVAL;
- }
-
- return 0;
+ return drm_atomic_helper_check_crtc_primary_plane(crtc_state);
}
static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index cf77a8ce7398..aa72ca679598 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -42,8 +42,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 06e54694a7f2..f0e72d4b6a47 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -82,6 +82,22 @@ config DRM_PANEL_MIPI_DBI
https://github.com/notro/panel-mipi-dbi/wiki.
To compile this driver as a module, choose M here.
+config DRM_PIXPAPER
+ tristate "DRM support for PIXPAPER display panels"
+ depends on DRM && SPI
+ depends on MMU
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ help
+ DRM driver for the Mayqueen Pixpaper e-ink display panel.
+
+ This driver supports small e-paper displays connected over SPI,
+ with a resolution of 122x250 and XRGB8888 framebuffer format.
+ It is intended for low-power embedded applications.
+
+ If M is selected, the module will be built as pixpaper.ko.
+
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 4a9ff61ec254..48d30bf6152f 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
+obj-$(CONFIG_DRM_PIXPAPER) += pixpaper.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 8d3b7c4fa6a4..222e4ae1abbd 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -21,7 +21,10 @@
#include <drm/drm_module.h>
#include <drm/drm_panic.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include <video/vga.h>
@@ -252,7 +255,7 @@ static int bochs_hw_init(struct bochs_device *bochs)
}
bochs->ioports = 1;
} else {
- dev_err(dev->dev, "I/O ports are not supported\n");
+ drm_err(dev, "I/O ports are not supported\n");
return -EIO;
}
@@ -526,6 +529,7 @@ static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct bochs_device *bochs = to_bochs_device(crtc->dev);
bochs_hw_blank(bochs, false);
+ drm_crtc_vblank_on(crtc);
}
static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
@@ -533,12 +537,14 @@ static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
{
struct bochs_device *bochs = to_bochs_device(crtc->dev);
+ drm_crtc_vblank_off(crtc);
bochs_hw_blank(bochs, true);
}
static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = {
.mode_set_nofb = bochs_crtc_helper_mode_set_nofb,
.atomic_check = bochs_crtc_helper_atomic_check,
+ .atomic_flush = drm_crtc_vblank_atomic_flush,
.atomic_enable = bochs_crtc_helper_atomic_enable,
.atomic_disable = bochs_crtc_helper_atomic_disable,
};
@@ -550,6 +556,7 @@ static const struct drm_crtc_funcs bochs_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static const struct drm_encoder_funcs bochs_encoder_funcs = {
@@ -670,6 +677,10 @@ static int bochs_kms_init(struct bochs_device *bochs)
drm_connector_attach_edid_property(connector);
drm_connector_attach_encoder(connector, encoder);
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/tiny/cirrus-qemu.c b/drivers/gpu/drm/tiny/cirrus-qemu.c
index 97a93adc5669..9ba0eab489bb 100644
--- a/drivers/gpu/drm/tiny/cirrus-qemu.c
+++ b/drivers/gpu/drm/tiny/cirrus-qemu.c
@@ -44,7 +44,10 @@
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_module.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#define DRIVER_NAME "cirrus-qemu"
#define DRIVER_DESC "qemu cirrus vga"
@@ -404,11 +407,15 @@ static void cirrus_crtc_helper_atomic_enable(struct drm_crtc *crtc,
#endif
drm_dev_exit(idx);
+
+ drm_crtc_vblank_on(crtc);
}
static const struct drm_crtc_helper_funcs cirrus_crtc_helper_funcs = {
.atomic_check = cirrus_crtc_helper_atomic_check,
+ .atomic_flush = drm_crtc_vblank_atomic_flush,
.atomic_enable = cirrus_crtc_helper_atomic_enable,
+ .atomic_disable = drm_crtc_vblank_atomic_disable,
};
static const struct drm_crtc_funcs cirrus_crtc_funcs = {
@@ -418,6 +425,7 @@ static const struct drm_crtc_funcs cirrus_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static const struct drm_encoder_funcs cirrus_encoder_funcs = {
@@ -493,6 +501,10 @@ static int cirrus_pipe_init(struct cirrus_device *cirrus)
if (ret)
return ret;
+ ret = drm_vblank_init(dev, 1);
+ if (ret)
+ return ret;
+
return 0;
}
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index fb0004166f4a..d73dfebb4353 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -25,6 +25,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index df263818f45f..9f26aaca0bfa 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -25,6 +25,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
#define HX8357D_SETOSC 0xb0
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index 62cadf5e033d..7c154c008344 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -15,6 +15,7 @@
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 6de44ff69b51..d32538b1eb09 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -29,6 +29,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
#define ILI9225_DRIVER_READ_CODE 0x00
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index e55029433509..2ab750cba505 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -24,6 +24,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
#define ILI9341_FRMCTR1 0xb1
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 093661c771a0..1e411a0f4567 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -23,6 +23,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#define ILI9486_ITFCTR1 0xb0
#define ILI9486_PWCTRL1 0xc2
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index b6b4664908ae..a063eff77624 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -22,6 +22,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
#define ILI9341_FRMCTR1 0xb1
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 23914a9f7fd3..82dfa169f762 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -25,6 +25,7 @@
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
#include <drm/drm_modeset_helper.h>
+#include <drm/drm_print.h>
#include <video/mipi_display.h>
diff --git a/drivers/gpu/drm/tiny/pixpaper.c b/drivers/gpu/drm/tiny/pixpaper.c
new file mode 100644
index 000000000000..df3ec42edd57
--- /dev/null
+++ b/drivers/gpu/drm/tiny/pixpaper.c
@@ -0,0 +1,1166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DRM driver for PIXPAPER e-ink panel
+ *
+ * Author: LiangCheng Wang <zaq14760@gmail.com>,
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_shmem.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/*
+ * Note on Undocumented Commands/Registers:
+ *
+ * Several commands and register parameters defined in this header are not
+ * documented in the datasheet. Their values and usage have been derived
+ * through analysis of existing userspace example programs.
+ *
+ * These 'unknown' definitions are crucial for the proper initialization
+ * and stable operation of the panel. Modifying these values without
+ * thorough understanding may lead to display anomalies, panel damage,
+ * or unexpected behavior.
+ */
+
+/* Command definitions */
+#define PIXPAPER_CMD_PANEL_SETTING 0x00 /* R00H: Panel settings */
+#define PIXPAPER_CMD_POWER_SETTING 0x01 /* R01H: Power settings */
+#define PIXPAPER_CMD_POWER_OFF 0x02 /* R02H: Power off */
+#define PIXPAPER_CMD_POWER_OFF_SEQUENCE 0x03 /* R03H: Power off sequence */
+#define PIXPAPER_CMD_POWER_ON 0x04 /* R04H: Power on */
+#define PIXPAPER_CMD_BOOSTER_SOFT_START 0x06 /* R06H: Booster soft start */
+#define PIXPAPER_CMD_DEEP_SLEEP 0x07 /* R07H: Deep sleep */
+#define PIXPAPER_CMD_DATA_START_TRANSMISSION 0x10
+/* R10H: Data transmission start */
+#define PIXPAPER_CMD_DISPLAY_REFRESH 0x12 /* R12H: Display refresh */
+#define PIXPAPER_CMD_PLL_CONTROL 0x30 /* R30H: PLL control */
+#define PIXPAPER_CMD_TEMP_SENSOR_CALIB 0x41
+/* R41H: Temperature sensor calibration */
+#define PIXPAPER_CMD_UNKNOWN_4D 0x4D /* R4DH: Unknown command */
+#define PIXPAPER_CMD_VCOM_INTERVAL 0x50 /* R50H: VCOM interval */
+#define PIXPAPER_CMD_UNKNOWN_60 0x60 /* R60H: Unknown command */
+#define PIXPAPER_CMD_RESOLUTION_SETTING 0x61 /* R61H: Resolution settings */
+#define PIXPAPER_CMD_GATE_SOURCE_START 0x65 /* R65H: Gate/source start */
+#define PIXPAPER_CMD_UNKNOWN_B4 0xB4 /* RB4H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_B5 0xB5 /* RB5H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_E0 0xE0 /* RE0H: Unknown command */
+#define PIXPAPER_CMD_POWER_SAVING 0xE3 /* RE3H: Power saving */
+#define PIXPAPER_CMD_UNKNOWN_E7 0xE7 /* RE7H: Unknown command */
+#define PIXPAPER_CMD_UNKNOWN_E9 0xE9 /* RE9H: Unknown command */
+
+/* R00H PSR - First Parameter */
+#define PIXPAPER_PSR_RST_N BIT(0)
+/* Bit 0: RST_N, 1=no effect (default), 0=reset with booster OFF */
+#define PIXPAPER_PSR_SHD_N BIT(1)
+/* Bit 1: SHD_N, 1=booster ON (default), 0=booster OFF */
+#define PIXPAPER_PSR_SHL BIT(2)
+/* Bit 2: SHL, 1=shift right (default), 0=shift left */
+#define PIXPAPER_PSR_UD BIT(3)
+/* Bit 3: UD, 1=scan up (default), 0=scan down */
+#define PIXPAPER_PSR_PST_MODE BIT(5)
+/* Bit 5: PST_MODE, 0=frame scanning (default), 1=external */
+#define PIXPAPER_PSR_RES_MASK (3 << 6)
+/* Bits 7-6: RES[1:0], resolution setting */
+#define PIXPAPER_PSR_RES_176x296 (0x0 << 6) /* 00: 176x296 */
+#define PIXPAPER_PSR_RES_128x296 (0x1 << 6) /* 01: 128x296 */
+#define PIXPAPER_PSR_RES_128x250 (0x2 << 6) /* 10: 128x250 */
+#define PIXPAPER_PSR_RES_112x204 (0x3 << 6) /* 11: 112x204 */
+#define PIXPAPER_PSR_CONFIG \
+ (PIXPAPER_PSR_RST_N | PIXPAPER_PSR_SHD_N | PIXPAPER_PSR_SHL | \
+ PIXPAPER_PSR_UD)
+/* 0x0F: Default settings, resolution set by R61H */
+
+/* R00H PSR - Second Parameter */
+#define PIXPAPER_PSR2_VC_LUTZ \
+ (1 << 0) /* Bit 0: VC_LUTZ, 1=VCOM float after refresh (default), 0=no effect */
+#define PIXPAPER_PSR2_NORG \
+ (1 << 1) /* Bit 1: NORG, 1=VCOM to GND before power off, 0=no effect (default) */
+#define PIXPAPER_PSR2_TIEG \
+ (1 << 2) /* Bit 2: TIEG, 1=VGN to GND on power off, 0=no effect (default) */
+#define PIXPAPER_PSR2_TS_AUTO \
+ (1 << 3) /* Bit 3: TS_AUTO, 1=sensor on RST_N low to high (default), 0=on booster */
+#define PIXPAPER_PSR2_VCMZ \
+ (1 << 4) /* Bit 4: VCMZ, 1=VCOM always floating, 0=no effect (default) */
+#define PIXPAPER_PSR2_FOPT \
+ (1 << 5) /* Bit 5: FOPT, 0=scan 1 frame (default), 1=no scan, HiZ */
+#define PIXPAPER_PSR_CONFIG2 \
+ (PIXPAPER_PSR2_VC_LUTZ | \
+ PIXPAPER_PSR2_TS_AUTO) /* 0x09: Default VCOM and temp sensor settings */
+
+/* R01H PWR - Power Setting Register */
+/* First Parameter */
+#define PIXPAPER_PWR_VDG_EN \
+ (1 << 0) /* Bit 0: VDG_EN, 1=internal DCDC for VGP/VGN (default), 0=external */
+#define PIXPAPER_PWR_VDS_EN \
+ (1 << 1) /* Bit 1: VDS_EN, 1=internal regulator for VSP/VSN (default), 0=external */
+#define PIXPAPER_PWR_VSC_EN \
+ (1 << 2) /* Bit 2: VSC_EN, 1=internal regulator for VSPL (default), 0=external */
+#define PIXPAPER_PWR_V_MODE \
+ (1 << 3) /* Bit 3: V_MODE, 0=Mode0 (default), 1=Mode1 */
+#define PIXPAPER_PWR_CONFIG1 \
+ (PIXPAPER_PWR_VDG_EN | PIXPAPER_PWR_VDS_EN | \
+ PIXPAPER_PWR_VSC_EN) /* 0x07: Internal power for VGP/VGN, VSP/VSN, VSPL */
+
+/* Second Parameter */
+#define PIXPAPER_PWR_VGPN_MASK \
+ (3 << 0) /* Bits 1-0: VGPN, VGP/VGN voltage levels */
+#define PIXPAPER_PWR_VGPN_20V (0x0 << 0) /* 00: VGP=20V, VGN=-20V (default) */
+#define PIXPAPER_PWR_VGPN_17V (0x1 << 0) /* 01: VGP=17V, VGN=-17V */
+#define PIXPAPER_PWR_VGPN_15V (0x2 << 0) /* 10: VGP=15V, VGN=-15V */
+#define PIXPAPER_PWR_VGPN_10V (0x3 << 0) /* 11: VGP=10V, VGN=-10V */
+#define PIXPAPER_PWR_CONFIG2 PIXPAPER_PWR_VGPN_20V /* 0x00: VGP=20V, VGN=-20V */
+
+/* Third, Fourth, Sixth Parameters (VSP_1, VSPL_0, VSPL_1) */
+#define PIXPAPER_PWR_VSP_8_2V 0x22 /* VSP_1/VSPL_1: 8.2V (34 decimal) */
+#define PIXPAPER_PWR_VSPL_15V 0x78 /* VSPL_0: 15V (120 decimal) */
+
+/* Fifth Parameter (VSN_1) */
+#define PIXPAPER_PWR_VSN_4V 0x0A /* VSN_1: -4V (10 decimal) */
+
+/* R03H PFS - Power Off Sequence Setting Register */
+/* First Parameter */
+#define PIXPAPER_PFS_T_VDS_OFF_MASK \
+ (3 << 0) /* Bits 1-0: T_VDS_OFF, VSP/VSN power-off sequence */
+#define PIXPAPER_PFS_T_VDS_OFF_20MS (0x0 << 0) /* 00: 20 ms (default) */
+#define PIXPAPER_PFS_T_VDS_OFF_40MS (0x1 << 0) /* 01: 40 ms */
+#define PIXPAPER_PFS_T_VDS_OFF_60MS (0x2 << 0) /* 10: 60 ms */
+#define PIXPAPER_PFS_T_VDS_OFF_80MS (0x3 << 0) /* 11: 80 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_MASK \
+ (3 << 4) /* Bits 5-4: T_VDPG_OFF, VGP/VGN power-off sequence */
+#define PIXPAPER_PFS_T_VDPG_OFF_20MS (0x0 << 4) /* 00: 20 ms (default) */
+#define PIXPAPER_PFS_T_VDPG_OFF_40MS (0x1 << 4) /* 01: 40 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_60MS (0x2 << 4) /* 10: 60 ms */
+#define PIXPAPER_PFS_T_VDPG_OFF_80MS (0x3 << 4) /* 11: 80 ms */
+#define PIXPAPER_PFS_CONFIG1 \
+ (PIXPAPER_PFS_T_VDS_OFF_20MS | \
+ PIXPAPER_PFS_T_VDPG_OFF_20MS) /* 0x10: Default 20 ms for VSP/VSN and VGP/VGN */
+
+/* Second Parameter */
+#define PIXPAPER_PFS_VGP_EXT_MASK \
+ (0xF << 0) /* Bits 3-0: VGP_EXT, VGP extension time */
+#define PIXPAPER_PFS_VGP_EXT_0MS (0x0 << 0) /* 0000: 0 ms */
+#define PIXPAPER_PFS_VGP_EXT_500MS (0x1 << 0) /* 0001: 500 ms */
+#define PIXPAPER_PFS_VGP_EXT_1000MS (0x2 << 0) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_VGP_EXT_1500MS (0x3 << 0) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_VGP_EXT_2000MS (0x4 << 0) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_VGP_EXT_2500MS (0x5 << 0) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_VGP_EXT_3000MS (0x6 << 0) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_VGP_EXT_3500MS (0x7 << 0) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_VGP_EXT_4000MS (0x8 << 0) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_VGP_EXT_4500MS (0x9 << 0) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_VGP_EXT_5000MS (0xA << 0) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_VGP_EXT_5500MS (0xB << 0) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_VGP_EXT_6000MS (0xC << 0) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_VGP_EXT_6500MS (0xD << 0) /* 1101: 6500 ms */
+#define PIXPAPER_PFS_VGP_LEN_MASK \
+ (0xF << 4) /* Bits 7-4: VGP_LEN, VGP at 10V during power-off */
+#define PIXPAPER_PFS_VGP_LEN_0MS (0x0 << 4) /* 0000: 0 ms */
+#define PIXPAPER_PFS_VGP_LEN_500MS (0x1 << 4) /* 0001: 500 ms */
+#define PIXPAPER_PFS_VGP_LEN_1000MS (0x2 << 4) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_VGP_LEN_1500MS (0x3 << 4) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_VGP_LEN_2000MS (0x4 << 4) /* 0100: 2000 ms */
+#define PIXPAPER_PFS_VGP_LEN_2500MS (0x5 << 4) /* 0101: 2500 ms (default) */
+#define PIXPAPER_PFS_VGP_LEN_3000MS (0x6 << 4) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_VGP_LEN_3500MS (0x7 << 4) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_VGP_LEN_4000MS (0x8 << 4) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_VGP_LEN_4500MS (0x9 << 4) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_VGP_LEN_5000MS (0xA << 4) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_VGP_LEN_5500MS (0xB << 4) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_VGP_LEN_6000MS (0xC << 4) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_VGP_LEN_6500MS (0xD << 4) /* 1101: 6500 ms */
+#define PIXPAPER_PFS_CONFIG2 \
+ (PIXPAPER_PFS_VGP_EXT_1000MS | \
+ PIXPAPER_PFS_VGP_LEN_2500MS) /* 0x54: VGP extension 1000 ms, VGP at 10V for 2500 ms */
+
+/* Third Parameter */
+#define PIXPAPER_PFS_XON_LEN_MASK \
+ (0xF << 0) /* Bits 3-0: XON_LEN, XON enable time */
+#define PIXPAPER_PFS_XON_LEN_0MS (0x0 << 0) /* 0000: 0 ms */
+#define PIXPAPER_PFS_XON_LEN_500MS (0x1 << 0) /* 0001: 500 ms */
+#define PIXPAPER_PFS_XON_LEN_1000MS (0x2 << 0) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_XON_LEN_1500MS (0x3 << 0) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_XON_LEN_2000MS (0x4 << 0) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_XON_LEN_2500MS (0x5 << 0) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_XON_LEN_3000MS (0x6 << 0) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_XON_LEN_3500MS (0x7 << 0) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_XON_LEN_4000MS (0x8 << 0) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_XON_LEN_4500MS (0x9 << 0) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_XON_LEN_5000MS (0xA << 0) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_XON_LEN_5500MS (0xB << 0) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_XON_LEN_6000MS (0xC << 0) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_XON_DLY_MASK \
+ (0xF << 4) /* Bits 7-4: XON_DLY, XON delay time */
+#define PIXPAPER_PFS_XON_DLY_0MS (0x0 << 4) /* 0000: 0 ms */
+#define PIXPAPER_PFS_XON_DLY_500MS (0x1 << 4) /* 0001: 500 ms */
+#define PIXPAPER_PFS_XON_DLY_1000MS (0x2 << 4) /* 0010: 1000 ms */
+#define PIXPAPER_PFS_XON_DLY_1500MS (0x3 << 4) /* 0011: 1500 ms */
+#define PIXPAPER_PFS_XON_DLY_2000MS (0x4 << 4) /* 0100: 2000 ms (default) */
+#define PIXPAPER_PFS_XON_DLY_2500MS (0x5 << 4) /* 0101: 2500 ms */
+#define PIXPAPER_PFS_XON_DLY_3000MS (0x6 << 4) /* 0110: 3000 ms */
+#define PIXPAPER_PFS_XON_DLY_3500MS (0x7 << 4) /* 0111: 3500 ms */
+#define PIXPAPER_PFS_XON_DLY_4000MS (0x8 << 4) /* 1000: 4000 ms */
+#define PIXPAPER_PFS_XON_DLY_4500MS (0x9 << 4) /* 1001: 4500 ms */
+#define PIXPAPER_PFS_XON_DLY_5000MS (0xA << 4) /* 1010: 5000 ms */
+#define PIXPAPER_PFS_XON_DLY_5500MS (0xB << 4) /* 1011: 5500 ms */
+#define PIXPAPER_PFS_XON_DLY_6000MS (0xC << 4) /* 1100: 6000 ms */
+#define PIXPAPER_PFS_CONFIG3 \
+ (PIXPAPER_PFS_XON_LEN_2000MS | \
+ PIXPAPER_PFS_XON_DLY_2000MS) /* 0x44: XON enable and delay at 2000 ms */
+
+/* R06H BTST - Booster Soft Start Command */
+/* First Parameter */
+#define PIXPAPER_BTST_PHA_SFT_MASK \
+ (3 << 0) /* Bits 1-0: PHA_SFT, soft start period for phase A */
+#define PIXPAPER_BTST_PHA_SFT_10MS (0x0 << 0) /* 00: 10 ms (default) */
+#define PIXPAPER_BTST_PHA_SFT_20MS (0x1 << 0) /* 01: 20 ms */
+#define PIXPAPER_BTST_PHA_SFT_30MS (0x2 << 0) /* 10: 30 ms */
+#define PIXPAPER_BTST_PHA_SFT_40MS (0x3 << 0) /* 11: 40 ms */
+#define PIXPAPER_BTST_PHB_SFT_MASK \
+ (3 << 2) /* Bits 3-2: PHB_SFT, soft start period for phase B */
+#define PIXPAPER_BTST_PHB_SFT_10MS (0x0 << 2) /* 00: 10 ms (default) */
+#define PIXPAPER_BTST_PHB_SFT_20MS (0x1 << 2) /* 01: 20 ms */
+#define PIXPAPER_BTST_PHB_SFT_30MS (0x2 << 2) /* 10: 30 ms */
+#define PIXPAPER_BTST_PHB_SFT_40MS (0x3 << 2) /* 11: 40 ms */
+#define PIXPAPER_BTST_CONFIG1 \
+ (PIXPAPER_BTST_PHA_SFT_40MS | \
+ PIXPAPER_BTST_PHB_SFT_40MS) /* 0x0F: 40 ms for phase A and B */
+
+/* Second to Seventh Parameters (Driving Strength or Minimum OFF Time) */
+#define PIXPAPER_BTST_CONFIG2 0x0A /* Strength11 */
+#define PIXPAPER_BTST_CONFIG3 0x2F /* Period48 */
+#define PIXPAPER_BTST_CONFIG4 0x25 /* Strength38 */
+#define PIXPAPER_BTST_CONFIG5 0x22 /* Period35 */
+#define PIXPAPER_BTST_CONFIG6 0x2E /* Strength47 */
+#define PIXPAPER_BTST_CONFIG7 0x21 /* Period34 */
+
+/* R12H: DRF (Display Refresh) */
+#define PIXPAPER_DRF_VCOM_AC 0x00 /* AC VCOM: VCOM follows LUTC (default) */
+#define PIXPAPER_DRF_VCOM_DC 0x01 /* DC VCOM: VCOM fixed to VCOMDC */
+
+/* R30H PLL - PLL Control Register */
+/* First Parameter */
+#define PIXPAPER_PLL_FR_MASK (0x7 << 0) /* Bits 2-0: FR, frame rate */
+#define PIXPAPER_PLL_FR_12_5HZ (0x0 << 0) /* 000: 12.5 Hz */
+#define PIXPAPER_PLL_FR_25HZ (0x1 << 0) /* 001: 25 Hz */
+#define PIXPAPER_PLL_FR_50HZ (0x2 << 0) /* 010: 50 Hz (default) */
+#define PIXPAPER_PLL_FR_65HZ (0x3 << 0) /* 011: 65 Hz */
+#define PIXPAPER_PLL_FR_75HZ (0x4 << 0) /* 100: 75 Hz */
+#define PIXPAPER_PLL_FR_85HZ (0x5 << 0) /* 101: 85 Hz */
+#define PIXPAPER_PLL_FR_100HZ (0x6 << 0) /* 110: 100 Hz */
+#define PIXPAPER_PLL_FR_120HZ (0x7 << 0) /* 111: 120 Hz */
+#define PIXPAPER_PLL_DFR \
+ (1 << 3) /* Bit 3: Dynamic frame rate, 0=disabled (default), 1=enabled */
+#define PIXPAPER_PLL_CONFIG \
+ (PIXPAPER_PLL_FR_50HZ) /* 0x02: 50 Hz, dynamic frame rate disabled */
+
+/* R41H TSE - Temperature Sensor Calibration Register */
+/* First Parameter */
+#define PIXPAPER_TSE_TO_MASK \
+ (0xF << 0) /* Bits 3-0: TO[3:0], temperature offset */
+#define PIXPAPER_TSE_TO_POS_0C (0x0 << 0) /* 0000: +0°C (default) */
+#define PIXPAPER_TSE_TO_POS_0_5C (0x1 << 0) /* 0001: +0.5°C */
+#define PIXPAPER_TSE_TO_POS_1C (0x2 << 0) /* 0010: +1°C */
+#define PIXPAPER_TSE_TO_POS_1_5C (0x3 << 0) /* 0011: +1.5°C */
+#define PIXPAPER_TSE_TO_POS_2C (0x4 << 0) /* 0100: +2°C */
+#define PIXPAPER_TSE_TO_POS_2_5C (0x5 << 0) /* 0101: +2.5°C */
+#define PIXPAPER_TSE_TO_POS_3C (0x6 << 0) /* 0110: +3°C */
+#define PIXPAPER_TSE_TO_POS_3_5C (0x7 << 0) /* 0111: +3.5°C */
+#define PIXPAPER_TSE_TO_NEG_4C (0x8 << 0) /* 1000: -4°C */
+#define PIXPAPER_TSE_TO_NEG_3_5C (0x9 << 0) /* 1001: -3.5°C */
+#define PIXPAPER_TSE_TO_NEG_3C (0xA << 0) /* 1010: -3°C */
+#define PIXPAPER_TSE_TO_NEG_2_5C (0xB << 0) /* 1011: -2.5°C */
+#define PIXPAPER_TSE_TO_NEG_2C (0xC << 0) /* 1100: -2°C */
+#define PIXPAPER_TSE_TO_NEG_1_5C (0xD << 0) /* 1101: -1.5°C */
+#define PIXPAPER_TSE_TO_NEG_1C (0xE << 0) /* 1110: -1°C */
+#define PIXPAPER_TSE_TO_NEG_0_5C (0xF << 0) /* 1111: -0.5°C */
+#define PIXPAPER_TSE_TO_FINE_MASK \
+ (0x3 << 4) /* Bits 5-4: TO[5:4], fine adjustment for positive offsets */
+#define PIXPAPER_TSE_TO_FINE_0C (0x0 << 4) /* 00: +0.0°C (default) */
+#define PIXPAPER_TSE_TO_FINE_0_25C (0x1 << 4) /* 01: +0.25°C */
+#define PIXPAPER_TSE_ENABLE \
+ (0 << 7) /* Bit 7: TSE, 0=internal sensor enabled (default), 1=disabled (external) */
+#define PIXPAPER_TSE_DISABLE \
+ (1 << 7) /* Bit 7: TSE, 1=internal sensor disabled, use external */
+#define PIXPAPER_TSE_CONFIG \
+ (PIXPAPER_TSE_TO_POS_0C | PIXPAPER_TSE_TO_FINE_0C | \
+ PIXPAPER_TSE_ENABLE) /* 0x00: Internal sensor enabled, +0°C offset */
+
+/* R4DH */
+#define PIXPAPER_UNKNOWN_4D_CONFIG \
+ 0x78 /* This value is essential for initialization, derived from userspace examples. */
+
+/* R50H CDI - VCOM and DATA Interval Setting Register */
+/* First Parameter */
+#define PIXPAPER_CDI_INTERVAL_MASK \
+ (0xF << 0) /* Bits 3-0: CDI[3:0], VCOM and data interval (hsync) */
+#define PIXPAPER_CDI_17_HSYNC (0x0 << 0) /* 0000: 17 hsync */
+#define PIXPAPER_CDI_16_HSYNC (0x1 << 0) /* 0001: 16 hsync */
+#define PIXPAPER_CDI_15_HSYNC (0x2 << 0) /* 0010: 15 hsync */
+#define PIXPAPER_CDI_14_HSYNC (0x3 << 0) /* 0011: 14 hsync */
+#define PIXPAPER_CDI_13_HSYNC (0x4 << 0) /* 0100: 13 hsync */
+#define PIXPAPER_CDI_12_HSYNC (0x5 << 0) /* 0101: 12 hsync */
+#define PIXPAPER_CDI_11_HSYNC (0x6 << 0) /* 0110: 11 hsync */
+#define PIXPAPER_CDI_10_HSYNC (0x7 << 0) /* 0111: 10 hsync (default) */
+#define PIXPAPER_CDI_9_HSYNC (0x8 << 0) /* 1000: 9 hsync */
+#define PIXPAPER_CDI_8_HSYNC (0x9 << 0) /* 1001: 8 hsync */
+#define PIXPAPER_CDI_7_HSYNC (0xA << 0) /* 1010: 7 hsync */
+#define PIXPAPER_CDI_6_HSYNC (0xB << 0) /* 1011: 6 hsync */
+#define PIXPAPER_CDI_5_HSYNC (0xC << 0) /* 1100: 5 hsync */
+#define PIXPAPER_CDI_4_HSYNC (0xD << 0) /* 1101: 4 hsync */
+#define PIXPAPER_CDI_3_HSYNC (0xE << 0) /* 1110: 3 hsync */
+#define PIXPAPER_CDI_2_HSYNC (0xF << 0) /* 1111: 2 hsync */
+#define PIXPAPER_CDI_DDX \
+ (1 << 4) /* Bit 4: DDX, 0=grayscale mapping 0, 1=grayscale mapping 1 (default) */
+#define PIXPAPER_CDI_VBD_MASK \
+ (0x7 << 5) /* Bits 7-5: VBD[2:0], border data selection */
+#define PIXPAPER_CDI_VBD_FLOAT (0x0 << 5) /* 000: Floating (DDX=0 or 1) */
+#define PIXPAPER_CDI_VBD_GRAY3_DDX0 \
+ (0x1 << 5) /* 001: Gray3 (border_buf=011) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY2_DDX0 \
+ (0x2 << 5) /* 010: Gray2 (border_buf=010) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY1_DDX0 \
+ (0x3 << 5) /* 011: Gray1 (border_buf=001) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY0_DDX0 \
+ (0x4 << 5) /* 100: Gray0 (border_buf=000) when DDX=0 */
+#define PIXPAPER_CDI_VBD_GRAY0_DDX1 \
+ (0x0 << 5) /* 000: Gray0 (border_buf=000) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY1_DDX1 \
+ (0x1 << 5) /* 001: Gray1 (border_buf=001) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY2_DDX1 \
+ (0x2 << 5) /* 010: Gray2 (border_buf=010) when DDX=1 */
+#define PIXPAPER_CDI_VBD_GRAY3_DDX1 \
+ (0x3 << 5) /* 011: Gray3 (border_buf=011) when DDX=1 */
+#define PIXPAPER_CDI_VBD_FLOAT_DDX1 (0x4 << 5) /* 100: Floating when DDX=1 */
+#define PIXPAPER_CDI_CONFIG \
+ (PIXPAPER_CDI_10_HSYNC | PIXPAPER_CDI_DDX | \
+ PIXPAPER_CDI_VBD_GRAY1_DDX1) /* 0x37: 10 hsync, DDX=1, border Gray1 */
+
+/* R60H */
+#define PIXPAPER_UNKNOWN_60_CONFIG1 \
+ 0x02 /* This value is essential for initialization, derived from userspace examples. */
+#define PIXPAPER_UNKNOWN_60_CONFIG2 \
+ 0x02 /* This value is essential for initialization, derived from userspace examples. */
+
+/* R61H TRES - Resolution Setting Register */
+#define PIXPAPER_TRES_HRES_H \
+ ((PIXPAPER_PANEL_BUFFER_WIDTH >> 8) & \
+ 0xFF) /* HRES[9:8]: High byte of horizontal resolution (128) */
+#define PIXPAPER_TRES_HRES_L \
+ (PIXPAPER_PANEL_BUFFER_WIDTH & \
+ 0xFF) /* HRES[7:0]: Low byte of horizontal resolution (128 = 0x80) */
+#define PIXPAPER_TRES_VRES_H \
+ ((PIXPAPER_HEIGHT >> 8) & \
+ 0xFF) /* VRES[9:8]: High byte of vertical resolution (250) */
+#define PIXPAPER_TRES_VRES_L \
+ (PIXPAPER_HEIGHT & \
+ 0xFF) /* VRES[7:0]: Low byte of vertical resolution (250 = 0xFA) */
+
+/* R65H GSST - Gate/Source Start Setting Register */
+#define PIXPAPER_GSST_S_START 0x00 /* S_Start[7:0]: First source line (S0) */
+#define PIXPAPER_GSST_RESERVED 0x00 /* Reserved byte */
+#define PIXPAPER_GSST_G_START_H \
+ 0x00 /* G_Start[8]: High bit of first gate line (G0) */
+#define PIXPAPER_GSST_G_START_L \
+ 0x00 /* G_Start[7:0]: Low byte of first gate line (G0) */
+
+/* RB4H */
+#define PIXPAPER_UNKNOWN_B4_CONFIG \
+ 0xD0 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RB5H */
+#define PIXPAPER_UNKNOWN_B5_CONFIG \
+ 0x03 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE0H */
+#define PIXPAPER_UNKNOWN_E0_CONFIG \
+ 0x00 /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE3H PWS - Power Saving Register */
+/* First Parameter */
+#define PIXPAPER_PWS_VCOM_W_MASK \
+ (0xF \
+ << 4) /* Bits 7-4: VCOM_W[3:0], VCOM power-saving width (line periods) */
+#define PIXPAPER_PWS_VCOM_W_0 (0x0 << 4) /* 0000: 0 line periods */
+#define PIXPAPER_PWS_VCOM_W_1 (0x1 << 4) /* 0001: 1 line period */
+#define PIXPAPER_PWS_VCOM_W_2 (0x2 << 4) /* 0010: 2 line periods */
+#define PIXPAPER_PWS_VCOM_W_3 (0x3 << 4) /* 0011: 3 line periods */
+#define PIXPAPER_PWS_VCOM_W_4 (0x4 << 4) /* 0100: 4 line periods */
+#define PIXPAPER_PWS_VCOM_W_5 (0x5 << 4) /* 0101: 5 line periods */
+#define PIXPAPER_PWS_VCOM_W_6 (0x6 << 4) /* 0110: 6 line periods */
+#define PIXPAPER_PWS_VCOM_W_7 (0x7 << 4) /* 0111: 7 line periods */
+#define PIXPAPER_PWS_VCOM_W_8 (0x8 << 4) /* 1000: 8 line periods */
+#define PIXPAPER_PWS_VCOM_W_9 (0x9 << 4) /* 1001: 9 line periods */
+#define PIXPAPER_PWS_VCOM_W_10 (0xA << 4) /* 1010: 10 line periods */
+#define PIXPAPER_PWS_VCOM_W_11 (0xB << 4) /* 1011: 11 line periods */
+#define PIXPAPER_PWS_VCOM_W_12 (0xC << 4) /* 1100: 12 line periods */
+#define PIXPAPER_PWS_VCOM_W_13 (0xD << 4) /* 1101: 13 line periods */
+#define PIXPAPER_PWS_VCOM_W_14 (0xE << 4) /* 1110: 14 line periods */
+#define PIXPAPER_PWS_VCOM_W_15 (0xF << 4) /* 1111: 15 line periods */
+#define PIXPAPER_PWS_SD_W_MASK \
+ (0xF << 0) /* Bits 3-0: SD_W[3:0], source power-saving width (660 ns units) */
+#define PIXPAPER_PWS_SD_W_0 (0x0 << 0) /* 0000: 0 ns */
+#define PIXPAPER_PWS_SD_W_1 (0x1 << 0) /* 0001: 660 ns */
+#define PIXPAPER_PWS_SD_W_2 (0x2 << 0) /* 0010: 1320 ns */
+#define PIXPAPER_PWS_SD_W_3 (0x3 << 0) /* 0011: 1980 ns */
+#define PIXPAPER_PWS_SD_W_4 (0x4 << 0) /* 0100: 2640 ns */
+#define PIXPAPER_PWS_SD_W_5 (0x5 << 0) /* 0101: 3300 ns */
+#define PIXPAPER_PWS_SD_W_6 (0x6 << 0) /* 0110: 3960 ns */
+#define PIXPAPER_PWS_SD_W_7 (0x7 << 0) /* 0111: 4620 ns */
+#define PIXPAPER_PWS_SD_W_8 (0x8 << 0) /* 1000: 5280 ns */
+#define PIXPAPER_PWS_SD_W_9 (0x9 << 0) /* 1001: 5940 ns */
+#define PIXPAPER_PWS_SD_W_10 (0xA << 0) /* 1010: 6600 ns */
+#define PIXPAPER_PWS_SD_W_11 (0xB << 0) /* 1011: 7260 ns */
+#define PIXPAPER_PWS_SD_W_12 (0xC << 0) /* 1100: 7920 ns */
+#define PIXPAPER_PWS_SD_W_13 (0xD << 0) /* 1101: 8580 ns */
+#define PIXPAPER_PWS_SD_W_14 (0xE << 0) /* 1110: 9240 ns */
+#define PIXPAPER_PWS_SD_W_15 (0xF << 0) /* 1111: 9900 ns */
+#define PIXPAPER_PWS_CONFIG \
+ (PIXPAPER_PWS_VCOM_W_2 | \
+ PIXPAPER_PWS_SD_W_2) /* 0x22: VCOM 2 line periods (160 µs), source 1320 ns */
+
+/* RE7H */
+#define PIXPAPER_UNKNOWN_E7_CONFIG \
+ 0x1C /* This value is essential for initialization, derived from userspace examples. */
+
+/* RE9H */
+#define PIXPAPER_UNKNOWN_E9_CONFIG \
+ 0x01 /* This value is essential for initialization, derived from userspace examples. */
+
+MODULE_IMPORT_NS("DMA_BUF");
+
+/*
+ * The panel has a visible resolution of 122x250.
+ * However, the controller requires the horizontal resolution to be aligned to 128 pixels.
+ * No porch or sync timing values are provided in the datasheet, so we define minimal
+ * placeholder values to satisfy the DRM framework.
+ */
+
+/* Panel visible resolution */
+#define PIXPAPER_WIDTH 122
+#define PIXPAPER_HEIGHT 250
+
+/* Controller requires 128 horizontal pixels total (for memory alignment) */
+#define PIXPAPER_HTOTAL 128
+#define PIXPAPER_HFP 2
+#define PIXPAPER_HSYNC 2
+#define PIXPAPER_HBP (PIXPAPER_HTOTAL - PIXPAPER_WIDTH - PIXPAPER_HFP - PIXPAPER_HSYNC)
+
+/*
+ * According to the datasheet, the total vertical blanking must be 55 lines,
+ * regardless of how the vertical back porch is set.
+ * Here we allocate VFP=2, VSYNC=2, and VBP=51 to sum up to 55 lines.
+ * Total vertical lines = 250 (visible) + 55 (blanking) = 305.
+ */
+#define PIXPAPER_VTOTAL (250 + 55)
+#define PIXPAPER_VFP 2
+#define PIXPAPER_VSYNC 2
+#define PIXPAPER_VBP (55 - PIXPAPER_VFP - PIXPAPER_VSYNC)
+
+/*
+ * Pixel clock calculation:
+ * pixel_clock = htotal * vtotal * refresh_rate
+ * = 128 * 305 * 50
+ * = 1,952,000 Hz = 1952 kHz
+ */
+#define PIXPAPER_PIXEL_CLOCK 1952
+
+#define PIXPAPER_WIDTH_MM 24 /* approximate from 23.7046mm */
+#define PIXPAPER_HEIGHT_MM 49 /* approximate from 48.55mm */
+
+#define PIXPAPER_SPI_BITS_PER_WORD 8
+#define PIXPAPER_SPI_SPEED_DEFAULT 1000000
+
+#define PIXPAPER_PANEL_BUFFER_WIDTH 128
+#define PIXPAPER_PANEL_BUFFER_TWO_BYTES_PER_ROW (PIXPAPER_PANEL_BUFFER_WIDTH / 4)
+
+#define PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL 60
+#define PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL 200
+#define PIXPAPER_COLOR_THRESHOLD_YELLOW_MIN_GREEN 180
+
+struct pixpaper_error_ctx {
+ int errno_code;
+};
+
+struct pixpaper_panel {
+ struct drm_device drm;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+
+ struct spi_device *spi;
+ struct gpio_desc *reset;
+ struct gpio_desc *busy;
+ struct gpio_desc *dc;
+};
+
+static inline struct pixpaper_panel *to_pixpaper_panel(struct drm_device *drm)
+{
+ return container_of(drm, struct pixpaper_panel, drm);
+}
+
+static void pixpaper_wait_for_panel(struct pixpaper_panel *panel)
+{
+ unsigned int timeout_ms = 10000;
+ unsigned long timeout_jiffies = jiffies + msecs_to_jiffies(timeout_ms);
+
+ usleep_range(1000, 1500);
+ while (gpiod_get_value_cansleep(panel->busy) != 1) {
+ if (time_after(jiffies, timeout_jiffies)) {
+ drm_warn(&panel->drm, "Busy wait timed out\n");
+ return;
+ }
+ usleep_range(100, 200);
+ }
+}
+
+static void pixpaper_spi_sync(struct spi_device *spi, struct spi_message *msg,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ int ret = spi_sync(spi, msg);
+
+ if (ret < 0)
+ err->errno_code = ret;
+}
+
+static void pixpaper_send_cmd(struct pixpaper_panel *panel, u8 cmd,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ struct spi_transfer xfer = {
+ .tx_buf = &cmd,
+ .len = 1,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ gpiod_set_value_cansleep(panel->dc, 0);
+ usleep_range(1, 5);
+ pixpaper_spi_sync(panel->spi, &msg, err);
+}
+
+static void pixpaper_send_data(struct pixpaper_panel *panel, u8 data,
+ struct pixpaper_error_ctx *err)
+{
+ if (err->errno_code)
+ return;
+
+ struct spi_transfer xfer = {
+ .tx_buf = &data,
+ .len = 1,
+ };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ gpiod_set_value_cansleep(panel->dc, 1);
+ usleep_range(1, 5);
+ pixpaper_spi_sync(panel->spi, &msg, err);
+}
+
+static int pixpaper_panel_hw_init(struct pixpaper_panel *panel)
+{
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ gpiod_set_value_cansleep(panel->reset, 0);
+ msleep(50);
+ gpiod_set_value_cansleep(panel->reset, 1);
+ msleep(50);
+
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_4D, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_4D_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_PANEL_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PSR_CONFIG, &err);
+ pixpaper_send_data(panel, PIXPAPER_PSR_CONFIG2, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSP_8_2V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSPL_15V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSN_4V, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWR_VSP_8_2V, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_OFF_SEQUENCE, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG3, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_BOOSTER_SOFT_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG2, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG3, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG4, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG5, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG6, &err);
+ pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG7, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_PLL_CONTROL, &err);
+ pixpaper_send_data(panel, PIXPAPER_PLL_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_TEMP_SENSOR_CALIB, &err);
+ pixpaper_send_data(panel, PIXPAPER_TSE_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_VCOM_INTERVAL, &err);
+ pixpaper_send_data(panel, PIXPAPER_CDI_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_60, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_60_CONFIG1, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_60_CONFIG2, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_RESOLUTION_SETTING, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_HRES_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_HRES_L, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_VRES_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_TRES_VRES_L, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_GATE_SOURCE_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_S_START, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_RESERVED, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_G_START_H, &err);
+ pixpaper_send_data(panel, PIXPAPER_GSST_G_START_L, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E7, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E7_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_SAVING, &err);
+ pixpaper_send_data(panel, PIXPAPER_PWS_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E0, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E0_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_B4, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_B4_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_B5, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_B5_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E9, &err);
+ pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E9_CONFIG, &err);
+ if (err.errno_code)
+ goto init_fail;
+ pixpaper_wait_for_panel(panel);
+
+ return 0;
+
+init_fail:
+ drm_err(&panel->drm, "Hardware initialization failed (err=%d)\n",
+ err.errno_code);
+ return err.errno_code;
+}
+
+/*
+ * Convert framebuffer pixels to 2-bit e-paper format:
+ * 00 - White
+ * 01 - Black
+ * 10 - Yellow
+ * 11 - Red
+ */
+static u8 pack_pixels_to_byte(__le32 *src_pixels, int i, int j,
+ struct drm_framebuffer *fb)
+{
+ u8 packed_byte = 0;
+ int k;
+
+ for (k = 0; k < 4; k++) {
+ int current_pixel_x = j * 4 + k;
+ u8 two_bit_val;
+
+ if (current_pixel_x < PIXPAPER_WIDTH) {
+ u32 pixel_offset =
+ (i * (fb->pitches[0] / 4)) + current_pixel_x;
+ u32 pixel = le32_to_cpu(src_pixels[pixel_offset]);
+ u32 r = (pixel >> 16) & 0xFF;
+ u32 g = (pixel >> 8) & 0xFF;
+ u32 b = pixel & 0xFF;
+
+ if (r < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ g < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b00;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ b > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL) {
+ two_bit_val = 0b01;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b11;
+ } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL &&
+ g > PIXPAPER_COLOR_THRESHOLD_YELLOW_MIN_GREEN &&
+ b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) {
+ two_bit_val = 0b10;
+ } else {
+ two_bit_val = 0b01;
+ }
+ } else {
+ two_bit_val = 0b01;
+ }
+
+ packed_byte |= two_bit_val << ((3 - k) * 2);
+ }
+
+ return packed_byte;
+}
+
+static int pixpaper_plane_helper_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *new_crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state = NULL;
+ int ret;
+
+ if (new_crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state,
+ new_crtc_state, DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING, false, false);
+ if (ret)
+ return ret;
+ else if (!new_plane_state->visible)
+ return 0;
+
+ return 0;
+}
+
+static int pixpaper_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+
+ if (!crtc_state->enable)
+ return 0;
+
+ return drm_atomic_helper_check_crtc_primary_plane(crtc_state);
+}
+
+static void pixpaper_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+ struct drm_device *drm = &panel->drm;
+ int idx;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_ON, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send PON command: %d\n", err.errno_code);
+ goto exit_drm_dev;
+ }
+
+ pixpaper_wait_for_panel(panel);
+
+ drm_dbg(drm, "Panel enabled and powered on\n");
+
+exit_drm_dev:
+ drm_dev_exit(idx);
+}
+
+static void pixpaper_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+ struct drm_device *drm = &panel->drm;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_OFF, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send POF command: %d\n", err.errno_code);
+ goto exit_drm_dev;
+ }
+ pixpaper_wait_for_panel(panel);
+
+ drm_dbg(drm, "Panel disabled\n");
+
+exit_drm_dev:
+ drm_dev_exit(idx);
+}
+
+static void pixpaper_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state =
+ to_drm_shadow_plane_state(plane_state);
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev);
+
+ struct drm_device *drm = &panel->drm;
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct iosys_map map = shadow_plane_state->data[0];
+ void *vaddr = map.vaddr;
+ int i, j, idx;
+ __le32 *src_pixels = NULL;
+ struct pixpaper_error_ctx err = { .errno_code = 0 };
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ drm_dbg(drm, "Starting frame update (phys=%dx%d, buf_w=%d)\n",
+ PIXPAPER_WIDTH, PIXPAPER_HEIGHT, PIXPAPER_PANEL_BUFFER_WIDTH);
+
+ if (!fb || !plane_state->visible) {
+ drm_err_once(drm, "No framebuffer or plane not visible, skipping update\n");
+ goto update_cleanup;
+ }
+
+ src_pixels = (__le32 *)vaddr;
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_DATA_START_TRANSMISSION, &err);
+ if (err.errno_code)
+ goto update_cleanup;
+
+ pixpaper_wait_for_panel(panel);
+
+ for (i = 0; i < PIXPAPER_HEIGHT; i++) {
+ for (j = 0; j < PIXPAPER_PANEL_BUFFER_TWO_BYTES_PER_ROW; j++) {
+ u8 packed_byte =
+ pack_pixels_to_byte(src_pixels, i, j, fb);
+
+ pixpaper_wait_for_panel(panel);
+ pixpaper_send_data(panel, packed_byte, &err);
+ }
+ }
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_ON, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed to send PON command: %d\n", err.errno_code);
+ goto update_cleanup;
+ }
+ pixpaper_wait_for_panel(panel);
+
+ pixpaper_send_cmd(panel, PIXPAPER_CMD_DISPLAY_REFRESH, &err);
+ pixpaper_send_data(panel, PIXPAPER_DRF_VCOM_AC, &err);
+ if (err.errno_code) {
+ drm_err_once(drm, "Failed sending data after DRF: %d\n", err.errno_code);
+ goto update_cleanup;
+ }
+ pixpaper_wait_for_panel(panel);
+
+update_cleanup:
+ if (err.errno_code && err.errno_code != -ETIMEDOUT)
+ drm_err_once(drm, "Frame update function failed with error %d\n", err.errno_code);
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_display_mode pixpaper_mode = {
+ .clock = PIXPAPER_PIXEL_CLOCK,
+ .hdisplay = PIXPAPER_WIDTH,
+ .hsync_start = PIXPAPER_WIDTH + PIXPAPER_HFP,
+ .hsync_end = PIXPAPER_WIDTH + PIXPAPER_HFP + PIXPAPER_HSYNC,
+ .htotal = PIXPAPER_HTOTAL,
+ .vdisplay = PIXPAPER_HEIGHT,
+ .vsync_start = PIXPAPER_HEIGHT + PIXPAPER_VFP,
+ .vsync_end = PIXPAPER_HEIGHT + PIXPAPER_VFP + PIXPAPER_VSYNC,
+ .vtotal = PIXPAPER_VTOTAL,
+ .width_mm = PIXPAPER_WIDTH_MM,
+ .height_mm = PIXPAPER_HEIGHT_MM,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int pixpaper_connector_get_modes(struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &pixpaper_mode);
+}
+
+static const struct drm_plane_funcs pixpaper_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
+};
+
+static const struct drm_plane_helper_funcs pixpaper_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = pixpaper_plane_helper_atomic_check,
+ .atomic_update = pixpaper_plane_atomic_update,
+};
+
+static const struct drm_crtc_funcs pixpaper_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static enum drm_mode_status
+pixpaper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ if (mode->hdisplay == PIXPAPER_WIDTH &&
+ mode->vdisplay == PIXPAPER_HEIGHT) {
+ return MODE_OK;
+ }
+ return MODE_BAD;
+}
+
+static const struct drm_crtc_helper_funcs pixpaper_crtc_helper_funcs = {
+ .mode_valid = pixpaper_mode_valid,
+ .atomic_check = pixpaper_crtc_helper_atomic_check,
+ .atomic_enable = pixpaper_crtc_atomic_enable,
+ .atomic_disable = pixpaper_crtc_atomic_disable,
+};
+
+static const struct drm_encoder_funcs pixpaper_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static const struct drm_connector_funcs pixpaper_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs pixpaper_connector_helper_funcs = {
+ .get_modes = pixpaper_connector_get_modes,
+};
+
+DEFINE_DRM_GEM_FOPS(pixpaper_fops);
+
+static struct drm_driver pixpaper_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &pixpaper_fops,
+ .name = "pixpaper",
+ .desc = "DRM driver for PIXPAPER e-ink",
+ .major = 1,
+ .minor = 0,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_FBDEV_SHMEM_DRIVER_OPS,
+};
+
+static const struct drm_mode_config_funcs pixpaper_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int pixpaper_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct pixpaper_panel *panel;
+ struct drm_device *drm;
+ int ret;
+
+ panel = devm_drm_dev_alloc(dev, &pixpaper_drm_driver,
+ struct pixpaper_panel, drm);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
+ drm = &panel->drm;
+ panel->spi = spi;
+ spi_set_drvdata(spi, panel);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = PIXPAPER_SPI_BITS_PER_WORD;
+
+ if (!spi->max_speed_hz) {
+ drm_warn(drm,
+ "spi-max-frequency not specified in DT, using default %u Hz\n",
+ PIXPAPER_SPI_SPEED_DEFAULT);
+ spi->max_speed_hz = PIXPAPER_SPI_SPEED_DEFAULT;
+ }
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ drm_err(drm, "SPI setup failed: %d\n", ret);
+ return ret;
+ }
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ drm_err(drm, "Failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ panel->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->reset))
+ return PTR_ERR(panel->reset);
+
+ panel->busy = devm_gpiod_get(dev, "busy", GPIOD_IN);
+ if (IS_ERR(panel->busy))
+ return PTR_ERR(panel->busy);
+
+ panel->dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->dc))
+ return PTR_ERR(panel->dc);
+
+ ret = pixpaper_panel_hw_init(panel);
+ if (ret) {
+ drm_err(drm, "Panel hardware initialization failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+ drm->mode_config.funcs = &pixpaper_mode_config_funcs;
+ drm->mode_config.min_width = PIXPAPER_WIDTH;
+ drm->mode_config.max_width = PIXPAPER_WIDTH;
+ drm->mode_config.min_height = PIXPAPER_HEIGHT;
+ drm->mode_config.max_height = PIXPAPER_HEIGHT;
+
+ ret = drm_universal_plane_init(drm, &panel->plane, 1,
+ &pixpaper_plane_funcs,
+ (const uint32_t[]){ DRM_FORMAT_XRGB8888 },
+ 1, NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+ drm_plane_helper_add(&panel->plane, &pixpaper_plane_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &panel->crtc, &panel->plane, NULL,
+ &pixpaper_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(&panel->crtc, &pixpaper_crtc_helper_funcs);
+
+ ret = drm_encoder_init(drm, &panel->encoder, &pixpaper_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+ panel->encoder.possible_crtcs = drm_crtc_mask(&panel->crtc);
+
+ ret = drm_connector_init(drm, &panel->connector,
+ &pixpaper_connector_funcs,
+ DRM_MODE_CONNECTOR_SPI);
+ if (ret)
+ return ret;
+
+ drm_connector_helper_add(&panel->connector,
+ &pixpaper_connector_helper_funcs);
+ drm_connector_attach_encoder(&panel->connector, &panel->encoder);
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ drm_client_setup(drm, NULL);
+
+ return 0;
+}
+
+static void pixpaper_remove(struct spi_device *spi)
+{
+ struct pixpaper_panel *panel = spi_get_drvdata(spi);
+
+ if (!panel)
+ return;
+
+ drm_dev_unplug(&panel->drm);
+ drm_atomic_helper_shutdown(&panel->drm);
+}
+
+static const struct spi_device_id pixpaper_ids[] = { { "pixpaper", 0 }, {} };
+MODULE_DEVICE_TABLE(spi, pixpaper_ids);
+
+static const struct of_device_id pixpaper_dt_ids[] = {
+ { .compatible = "mayqueen,pixpaper" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pixpaper_dt_ids);
+
+static struct spi_driver pixpaper_spi_driver = {
+ .driver = {
+ .name = "pixpaper",
+ .of_match_table = pixpaper_dt_ids,
+ },
+ .id_table = pixpaper_ids,
+ .probe = pixpaper_probe,
+ .remove = pixpaper_remove,
+};
+
+module_spi_driver(pixpaper_spi_driver);
+
+MODULE_AUTHOR("LiangCheng Wang");
+MODULE_DESCRIPTION("DRM SPI driver for PIXPAPER e-ink panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 5c3b51eb0a97..c8270591afc7 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -36,6 +36,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -510,13 +511,12 @@ static void repaper_get_temperature(struct repaper_epd *epd)
epd->factored_stage_time = epd->stage_time * factor10x / 10;
}
-static int repaper_fb_dirty(struct drm_framebuffer *fb,
+static int repaper_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap,
struct drm_format_conv_state *fmtcnv_state)
{
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct repaper_epd *epd = drm_to_epd(fb->dev);
unsigned int dst_pitch = 0;
- struct iosys_map dst, vmap;
+ struct iosys_map dst;
struct drm_rect clip;
int idx, ret = 0;
u8 *buf = NULL;
@@ -546,8 +546,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb,
goto out_free;
iosys_map_set_vaddr(&dst, buf);
- iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
- drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, &vmap, fb, &clip, fmtcnv_state);
+ drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, &clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
@@ -832,16 +831,15 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_rect rect;
if (!pipe->crtc.state->active)
return;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- repaper_fb_dirty(state->fb, &fmtcnv_state);
-
- drm_format_conv_state_release(&fmtcnv_state);
+ repaper_fb_dirty(state->fb, shadow_plane_state->data,
+ &shadow_plane_state->fmtcnv_state);
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
@@ -849,6 +847,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.enable = repaper_pipe_enable,
.disable = repaper_pipe_disable,
.update = repaper_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
static int repaper_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/tiny/sharp-memory.c b/drivers/gpu/drm/tiny/sharp-memory.c
index 03d2850310c4..64272cd0f6e2 100644
--- a/drivers/gpu/drm/tiny/sharp-memory.c
+++ b/drivers/gpu/drm/tiny/sharp-memory.c
@@ -126,28 +126,28 @@ static inline void sharp_memory_set_tx_buffer_addresses(u8 *buffer,
static void sharp_memory_set_tx_buffer_data(u8 *buffer,
struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
struct drm_rect clip,
u32 pitch,
struct drm_format_conv_state *fmtcnv_state)
{
int ret;
- struct iosys_map dst, vmap;
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
+ struct iosys_map dst;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return;
iosys_map_set_vaddr(&dst, buffer);
- iosys_map_set_vaddr(&vmap, dma_obj->vaddr);
- drm_fb_xrgb8888_to_mono(&dst, &pitch, &vmap, fb, &clip, fmtcnv_state);
+ drm_fb_xrgb8888_to_mono(&dst, &pitch, vmap, fb, &clip, fmtcnv_state);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static int sharp_memory_update_display(struct sharp_memory_device *smd,
struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
struct drm_rect clip,
struct drm_format_conv_state *fmtcnv_state)
{
@@ -163,7 +163,7 @@ static int sharp_memory_update_display(struct sharp_memory_device *smd,
sharp_memory_set_tx_buffer_mode(&tx_buffer[0],
SHARP_MEMORY_DISPLAY_UPDATE_MODE, vcom);
sharp_memory_set_tx_buffer_addresses(&tx_buffer[1], clip, pitch);
- sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, clip, pitch, fmtcnv_state);
+ sharp_memory_set_tx_buffer_data(&tx_buffer[2], fb, vmap, clip, pitch, fmtcnv_state);
ret = sharp_memory_spi_write(smd->spi, tx_buffer, tx_buffer_size);
@@ -206,7 +206,8 @@ static int sharp_memory_clear_display(struct sharp_memory_device *smd)
return ret;
}
-static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect,
+static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, const struct iosys_map *vmap,
+ struct drm_rect *rect,
struct drm_format_conv_state *fmtconv_state)
{
struct drm_rect clip;
@@ -218,7 +219,7 @@ static void sharp_memory_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *r
clip.y1 = rect->y1;
clip.y2 = rect->y2;
- sharp_memory_update_display(smd, fb, clip, fmtconv_state);
+ sharp_memory_update_display(smd, fb, vmap, clip, fmtconv_state);
}
static int sharp_memory_plane_atomic_check(struct drm_plane *plane,
@@ -242,7 +243,7 @@ static void sharp_memory_plane_atomic_update(struct drm_plane *plane,
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_plane_state *plane_state = plane->state;
- struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct sharp_memory_device *smd;
struct drm_rect rect;
@@ -251,15 +252,15 @@ static void sharp_memory_plane_atomic_update(struct drm_plane *plane,
return;
if (drm_atomic_helper_damage_merged(old_state, plane_state, &rect))
- sharp_memory_fb_dirty(plane_state->fb, &rect, &fmtcnv_state);
-
- drm_format_conv_state_release(&fmtcnv_state);
+ sharp_memory_fb_dirty(plane_state->fb, shadow_plane_state->data,
+ &rect, &shadow_plane_state->fmtcnv_state);
}
static const struct drm_plane_helper_funcs sharp_memory_plane_helper_funcs = {
.prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = sharp_memory_plane_atomic_check,
.atomic_update = sharp_memory_plane_atomic_update,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
};
static bool sharp_memory_format_mod_supported(struct drm_plane *plane,
@@ -273,9 +274,7 @@ static const struct drm_plane_funcs sharp_memory_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
.format_mod_supported = sharp_memory_format_mod_supported,
};
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
index 6c77550c51af..d468f8322072 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c
@@ -251,7 +251,7 @@ static void ttm_bo_unreserve_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
@@ -290,7 +290,7 @@ static void ttm_bo_unreserve_pinned(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
@@ -342,7 +342,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, resv);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
@@ -379,7 +379,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test)
dma_resv_fini(resv);
}
-static void ttm_bo_put_basic(struct kunit *test)
+static void ttm_bo_fini_basic(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
@@ -394,7 +394,7 @@ static void ttm_bo_put_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
@@ -410,7 +410,7 @@ static void ttm_bo_put_basic(struct kunit *test)
dma_resv_unlock(bo->base.resv);
KUNIT_EXPECT_EQ(test, err, 0);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static const char *mock_name(struct dma_fence *f)
@@ -423,7 +423,7 @@ static const struct dma_fence_ops mock_fence_ops = {
.get_timeline_name = mock_name,
};
-static void ttm_bo_put_shared_resv(struct kunit *test)
+static void ttm_bo_fini_shared_resv(struct kunit *test)
{
struct ttm_test_devices *priv = test->priv;
struct ttm_buffer_object *bo;
@@ -437,7 +437,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
@@ -463,7 +463,7 @@ static void ttm_bo_put_shared_resv(struct kunit *test)
bo->type = ttm_bo_type_device;
bo->base.resv = external_resv;
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_pin_basic(struct kunit *test)
@@ -477,7 +477,7 @@ static void ttm_bo_pin_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
@@ -512,7 +512,7 @@ static void ttm_bo_pin_unpin_resource(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
@@ -563,7 +563,7 @@ static void ttm_bo_multiple_pin_one_unpin(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
priv->ttm_dev = ttm_dev;
@@ -616,8 +616,8 @@ static struct kunit_case ttm_bo_test_cases[] = {
KUNIT_CASE(ttm_bo_unreserve_basic),
KUNIT_CASE(ttm_bo_unreserve_pinned),
KUNIT_CASE(ttm_bo_unreserve_bulk),
- KUNIT_CASE(ttm_bo_put_basic),
- KUNIT_CASE(ttm_bo_put_shared_resv),
+ KUNIT_CASE(ttm_bo_fini_basic),
+ KUNIT_CASE(ttm_bo_fini_shared_resv),
KUNIT_CASE(ttm_bo_pin_basic),
KUNIT_CASE(ttm_bo_pin_unpin_resource),
KUNIT_CASE(ttm_bo_multiple_pin_one_unpin),
diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
index 1bcc67977f48..2eda87882e65 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c
@@ -144,7 +144,7 @@ static void ttm_bo_init_reserved_sys_man(struct kunit *test)
drm_mm_node_allocated(&bo->base.vma_node.vm_node));
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_init_reserved_mock_man(struct kunit *test)
@@ -186,7 +186,7 @@ static void ttm_bo_init_reserved_mock_man(struct kunit *test)
drm_mm_node_allocated(&bo->base.vma_node.vm_node));
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -221,7 +221,7 @@ static void ttm_bo_init_reserved_resv(struct kunit *test)
KUNIT_EXPECT_PTR_EQ(test, bo->base.resv, &resv);
ttm_resource_free(bo, &bo->resource);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_basic(struct kunit *test)
@@ -265,7 +265,7 @@ static void ttm_bo_validate_basic(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->placement,
DRM_BUDDY_TOPDOWN_ALLOCATION);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -292,7 +292,7 @@ static void ttm_bo_validate_invalid_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_failed_alloc(struct kunit *test)
@@ -321,7 +321,7 @@ static void ttm_bo_validate_failed_alloc(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_bad_manager_fini(priv->ttm_dev, mem_type);
}
@@ -353,7 +353,7 @@ static void ttm_bo_validate_pinned(struct kunit *test)
ttm_bo_unpin(bo);
dma_resv_unlock(bo->base.resv);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static const struct ttm_bo_validate_test_case ttm_mem_type_cases[] = {
@@ -403,7 +403,7 @@ static void ttm_bo_validate_same_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, 0);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
if (params->mem_type != TTM_PL_SYSTEM)
ttm_mock_manager_fini(priv->ttm_dev, params->mem_type);
@@ -452,7 +452,7 @@ static void ttm_bo_validate_busy_placement(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem);
KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo->priority]));
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_bad_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -495,7 +495,7 @@ static void ttm_bo_validate_multihop(struct kunit *test)
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2);
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, final_mem);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, tmp_mem);
@@ -567,7 +567,7 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test)
KUNIT_ASSERT_TRUE(test, flags & TTM_TT_FLAG_ZERO_ALLOC);
}
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static int threaded_dma_resv_signal(void *arg)
@@ -635,7 +635,7 @@ static void ttm_bo_validate_no_placement_not_signaled(struct kunit *test)
/* Make sure we have an idle object at this point */
dma_resv_wait_timeout(bo->base.resv, usage, false, MAX_SCHEDULE_TIMEOUT);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
@@ -652,7 +652,7 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
int err;
man = ttm_manager_type(priv->ttm_dev, mem_type);
- man->move = dma_fence_get_stub();
+ man->eviction_fences[0] = dma_fence_get_stub();
bo = ttm_bo_kunit_init(test, test->priv, size, NULL);
bo->type = bo_type;
@@ -668,8 +668,8 @@ static void ttm_bo_validate_move_fence_signaled(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, mem_type);
KUNIT_EXPECT_EQ(test, ctx.bytes_moved, size);
- ttm_bo_put(bo);
- dma_fence_put(man->move);
+ ttm_bo_fini(bo);
+ dma_fence_put(man->eviction_fences[0]);
}
static const struct ttm_bo_validate_test_case ttm_bo_validate_wait_cases[] = {
@@ -733,9 +733,9 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
spin_lock_init(&fence_lock);
man = ttm_manager_type(priv->ttm_dev, fst_mem);
- man->move = alloc_mock_fence(test);
+ man->eviction_fences[0] = alloc_mock_fence(test);
- task = kthread_create(threaded_fence_signal, man->move, "move-fence-signal");
+ task = kthread_create(threaded_fence_signal, man->eviction_fences[0], "move-fence-signal");
if (IS_ERR(task))
KUNIT_FAIL(test, "Couldn't create move fence signal task\n");
@@ -743,7 +743,8 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
err = ttm_bo_validate(bo, placement_val, &ctx_val);
dma_resv_unlock(bo->base.resv);
- dma_fence_wait_timeout(man->move, false, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_wait_timeout(man->eviction_fences[0], false, MAX_SCHEDULE_TIMEOUT);
+ man->eviction_fences[0] = NULL;
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size);
@@ -753,7 +754,7 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test)
else
KUNIT_EXPECT_EQ(test, bo->resource->mem_type, fst_mem);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, fst_mem);
ttm_mock_manager_fini(priv->ttm_dev, snd_mem);
}
@@ -807,8 +808,8 @@ static void ttm_bo_validate_happy_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bos[1].resource->mem_type, mem_type);
for (i = 0; i < bo_no; i++)
- ttm_bo_put(&bos[i]);
- ttm_bo_put(bo_val);
+ ttm_bo_fini(&bos[i]);
+ ttm_bo_fini(bo_val);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -852,12 +853,12 @@ static void ttm_bo_validate_all_pinned_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, err, -ENOMEM);
- ttm_bo_put(bo_small);
+ ttm_bo_fini(bo_small);
ttm_bo_reserve(bo_big, false, false, NULL);
ttm_bo_unpin(bo_big);
dma_resv_unlock(bo_big->base.resv);
- ttm_bo_put(bo_big);
+ ttm_bo_fini(bo_big);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -916,13 +917,13 @@ static void ttm_bo_validate_allowed_only_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo_evictable->resource->mem_type, mem_type_evict);
KUNIT_EXPECT_EQ(test, ctx_val.bytes_moved, size * 2 + BO_SIZE);
- ttm_bo_put(bo);
- ttm_bo_put(bo_evictable);
+ ttm_bo_fini(bo);
+ ttm_bo_fini(bo_evictable);
ttm_bo_reserve(bo_pinned, false, false, NULL);
ttm_bo_unpin(bo_pinned);
dma_resv_unlock(bo_pinned->base.resv);
- ttm_bo_put(bo_pinned);
+ ttm_bo_fini(bo_pinned);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_multihop);
@@ -973,8 +974,8 @@ static void ttm_bo_validate_deleted_evict(struct kunit *test)
KUNIT_EXPECT_NULL(test, bo_big->ttm);
KUNIT_EXPECT_NULL(test, bo_big->resource);
- ttm_bo_put(bo_small);
- ttm_bo_put(bo_big);
+ ttm_bo_fini(bo_small);
+ ttm_bo_fini(bo_big);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -995,7 +996,7 @@ static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
*/
ttm_device_fini(priv->ttm_dev);
- err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev, false, false);
+ err = ttm_device_kunit_init_bad_evict(test->priv, priv->ttm_dev);
KUNIT_ASSERT_EQ(test, err, 0);
ttm_mock_manager_init(priv->ttm_dev, mem_type, MANAGER_SIZE);
@@ -1025,8 +1026,8 @@ static void ttm_bo_validate_busy_domain_evict(struct kunit *test)
KUNIT_EXPECT_EQ(test, bo_init->resource->mem_type, mem_type);
KUNIT_EXPECT_NULL(test, bo_val->resource);
- ttm_bo_put(bo_init);
- ttm_bo_put(bo_val);
+ ttm_bo_fini(bo_init);
+ ttm_bo_fini(bo_val);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_bad_manager_fini(priv->ttm_dev, mem_type_evict);
@@ -1070,8 +1071,8 @@ static void ttm_bo_validate_evict_gutting(struct kunit *test)
KUNIT_ASSERT_NULL(test, bo_evict->resource);
KUNIT_ASSERT_TRUE(test, bo_evict->ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC);
- ttm_bo_put(bo_evict);
- ttm_bo_put(bo);
+ ttm_bo_fini(bo_evict);
+ ttm_bo_fini(bo);
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
}
@@ -1128,9 +1129,9 @@ static void ttm_bo_validate_recrusive_evict(struct kunit *test)
ttm_mock_manager_fini(priv->ttm_dev, mem_type);
ttm_mock_manager_fini(priv->ttm_dev, mem_type_evict);
- ttm_bo_put(bo_val);
- ttm_bo_put(bo_tt);
- ttm_bo_put(bo_mock);
+ ttm_bo_fini(bo_val);
+ ttm_bo_fini(bo_tt);
+ ttm_bo_fini(bo_mock);
}
static struct kunit_case ttm_bo_validate_test_cases[] = {
diff --git a/drivers/gpu/drm/ttm/tests/ttm_device_test.c b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
index 1621903818e5..2d55ad34fe48 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_device_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_device_test.c
@@ -7,11 +7,11 @@
#include <drm/ttm/ttm_placement.h>
#include "ttm_kunit_helpers.h"
+#include "../ttm_pool_internal.h"
struct ttm_device_test_case {
const char *description;
- bool use_dma_alloc;
- bool use_dma32;
+ unsigned int alloc_flags;
bool pools_init_expected;
};
@@ -25,7 +25,7 @@ static void ttm_device_init_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_EXPECT_PTR_EQ(test, ttm_dev->funcs, &ttm_dev_funcs);
@@ -55,7 +55,7 @@ static void ttm_device_init_multiple(struct kunit *test)
KUNIT_ASSERT_NOT_NULL(test, ttm_devs);
for (i = 0; i < num_dev; i++) {
- err = ttm_device_kunit_init(priv, &ttm_devs[i], false, false);
+ err = ttm_device_kunit_init(priv, &ttm_devs[i], 0);
KUNIT_ASSERT_EQ(test, err, 0);
KUNIT_EXPECT_PTR_EQ(test, ttm_devs[i].dev_mapping,
@@ -81,7 +81,7 @@ static void ttm_device_fini_basic(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
man = ttm_manager_type(ttm_dev, TTM_PL_SYSTEM);
@@ -109,7 +109,7 @@ static void ttm_device_init_no_vma_man(struct kunit *test)
vma_man = drm->vma_offset_manager;
drm->vma_offset_manager = NULL;
- err = ttm_device_kunit_init(priv, ttm_dev, false, false);
+ err = ttm_device_kunit_init(priv, ttm_dev, 0);
KUNIT_EXPECT_EQ(test, err, -EINVAL);
/* Bring the manager back for a graceful cleanup */
@@ -119,26 +119,22 @@ static void ttm_device_init_no_vma_man(struct kunit *test)
static const struct ttm_device_test_case ttm_device_cases[] = {
{
.description = "No DMA allocations, no DMA32 required",
- .use_dma_alloc = false,
- .use_dma32 = false,
.pools_init_expected = false,
},
{
.description = "DMA allocations, DMA32 required",
- .use_dma_alloc = true,
- .use_dma32 = true,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC |
+ TTM_ALLOCATION_POOL_USE_DMA32,
.pools_init_expected = true,
},
{
.description = "No DMA allocations, DMA32 required",
- .use_dma_alloc = false,
- .use_dma32 = true,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA32,
.pools_init_expected = false,
},
{
.description = "DMA allocations, no DMA32 required",
- .use_dma_alloc = true,
- .use_dma32 = false,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
.pools_init_expected = true,
},
};
@@ -162,16 +158,13 @@ static void ttm_device_init_pools(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(priv, ttm_dev,
- params->use_dma_alloc,
- params->use_dma32);
+ err = ttm_device_kunit_init(priv, ttm_dev, params->alloc_flags);
KUNIT_ASSERT_EQ(test, err, 0);
pool = &ttm_dev->pool;
KUNIT_ASSERT_NOT_NULL(test, pool);
KUNIT_EXPECT_PTR_EQ(test, pool->dev, priv->dev);
- KUNIT_EXPECT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
- KUNIT_EXPECT_EQ(test, pool->use_dma32, params->use_dma32);
+ KUNIT_EXPECT_EQ(test, pool->alloc_flags, params->alloc_flags);
if (params->pools_init_expected) {
for (int i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
@@ -181,7 +174,7 @@ static void ttm_device_init_pools(struct kunit *test)
KUNIT_EXPECT_EQ(test, pt.caching, i);
KUNIT_EXPECT_EQ(test, pt.order, j);
- if (params->use_dma_alloc)
+ if (ttm_pool_uses_dma_alloc(pool))
KUNIT_ASSERT_FALSE(test,
list_empty(&pt.pages));
}
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
index 7aaf0d1395ff..7b533e4e1e04 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -117,8 +117,7 @@ static void bad_evict_flags(struct ttm_buffer_object *bo,
static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
struct ttm_device *ttm,
- bool use_dma_alloc,
- bool use_dma32,
+ unsigned int alloc_flags,
struct ttm_device_funcs *funcs)
{
struct drm_device *drm = priv->drm;
@@ -127,7 +126,7 @@ static int ttm_device_kunit_init_with_funcs(struct ttm_test_devices *priv,
err = ttm_device_init(ttm, funcs, drm->dev,
drm->anon_inode->i_mapping,
drm->vma_offset_manager,
- use_dma_alloc, use_dma32);
+ alloc_flags);
return err;
}
@@ -143,11 +142,10 @@ EXPORT_SYMBOL_GPL(ttm_dev_funcs);
int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_device *ttm,
- bool use_dma_alloc,
- bool use_dma32)
+ unsigned int alloc_flags)
{
- return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc,
- use_dma32, &ttm_dev_funcs);
+ return ttm_device_kunit_init_with_funcs(priv, ttm, alloc_flags,
+ &ttm_dev_funcs);
}
EXPORT_SYMBOL_GPL(ttm_device_kunit_init);
@@ -161,12 +159,10 @@ struct ttm_device_funcs ttm_dev_funcs_bad_evict = {
EXPORT_SYMBOL_GPL(ttm_dev_funcs_bad_evict);
int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
- struct ttm_device *ttm,
- bool use_dma_alloc,
- bool use_dma32)
+ struct ttm_device *ttm)
{
- return ttm_device_kunit_init_with_funcs(priv, ttm, use_dma_alloc,
- use_dma32, &ttm_dev_funcs_bad_evict);
+ return ttm_device_kunit_init_with_funcs(priv, ttm, 0,
+ &ttm_dev_funcs_bad_evict);
}
EXPORT_SYMBOL_GPL(ttm_device_kunit_init_bad_evict);
@@ -252,7 +248,7 @@ struct ttm_test_devices *ttm_test_devices_all(struct kunit *test)
ttm_dev = kunit_kzalloc(test, sizeof(*ttm_dev), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, ttm_dev);
- err = ttm_device_kunit_init(devs, ttm_dev, false, false);
+ err = ttm_device_kunit_init(devs, ttm_dev, 0);
KUNIT_ASSERT_EQ(test, err, 0);
devs->ttm_dev = ttm_dev;
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
index c7da23232ffa..f8402b979d05 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.h
@@ -28,12 +28,9 @@ struct ttm_test_devices {
/* Building blocks for test-specific init functions */
int ttm_device_kunit_init(struct ttm_test_devices *priv,
struct ttm_device *ttm,
- bool use_dma_alloc,
- bool use_dma32);
+ unsigned int alloc_flags);
int ttm_device_kunit_init_bad_evict(struct ttm_test_devices *priv,
- struct ttm_device *ttm,
- bool use_dma_alloc,
- bool use_dma32);
+ struct ttm_device *ttm);
struct ttm_buffer_object *ttm_bo_kunit_init(struct kunit *test,
struct ttm_test_devices *devs,
size_t size,
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
index d7eb6471f2ed..dd395229e388 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
@@ -4,6 +4,7 @@
*/
#include <linux/export.h>
+#include <linux/module.h>
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index 8ade53371f72..11c92bd75779 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -8,11 +8,12 @@
#include <drm/ttm/ttm_pool.h>
#include "ttm_kunit_helpers.h"
+#include "../ttm_pool_internal.h"
struct ttm_pool_test_case {
const char *description;
unsigned int order;
- bool use_dma_alloc;
+ unsigned int alloc_flags;
};
struct ttm_pool_test_priv {
@@ -86,7 +87,7 @@ static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -113,12 +114,12 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
{
.description = "One page, with coherent DMA mappings enabled",
.order = 0,
- .use_dma_alloc = true,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
},
{
.description = "Above the allocation limit, with coherent DMA mappings enabled",
.order = MAX_PAGE_ORDER + 1,
- .use_dma_alloc = true,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
},
};
@@ -150,12 +151,11 @@ static void ttm_pool_alloc_basic(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
- false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->alloc_flags);
KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
- KUNIT_ASSERT_EQ(test, pool->use_dma_alloc, params->use_dma_alloc);
+ KUNIT_ASSERT_EQ(test, pool->alloc_flags, params->alloc_flags);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -165,14 +165,14 @@ static void ttm_pool_alloc_basic(struct kunit *test)
last_page = tt->pages[tt->num_pages - 1];
if (params->order <= MAX_PAGE_ORDER) {
- if (params->use_dma_alloc) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
} else {
KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
}
} else {
- if (params->use_dma_alloc) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NULL(test, (void *)last_page->private);
} else {
@@ -218,7 +218,7 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -348,7 +348,7 @@ static void ttm_pool_free_dma_alloc(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
ttm_pool_alloc(pool, tt, &simple_ctx);
pt = &pool->caching[caching].orders[order];
@@ -379,7 +379,7 @@ static void ttm_pool_free_no_dma_alloc(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, 0);
ttm_pool_alloc(pool, tt, &simple_ctx);
pt = &pool->caching[caching].orders[order];
diff --git a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
index e6ea2bd01f07..c0e4e35e0442 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_resource_test.c
@@ -207,6 +207,7 @@ static void ttm_resource_manager_init_basic(struct kunit *test)
struct ttm_resource_test_priv *priv = test->priv;
struct ttm_resource_manager *man;
size_t size = SZ_16K;
+ int i;
man = kunit_kzalloc(test, sizeof(*man), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, man);
@@ -216,8 +217,8 @@ static void ttm_resource_manager_init_basic(struct kunit *test)
KUNIT_ASSERT_PTR_EQ(test, man->bdev, priv->devs->ttm_dev);
KUNIT_ASSERT_EQ(test, man->size, size);
KUNIT_ASSERT_EQ(test, man->usage, 0);
- KUNIT_ASSERT_NULL(test, man->move);
- KUNIT_ASSERT_NOT_NULL(test, &man->move_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++)
+ KUNIT_ASSERT_NULL(test, man->eviction_fences[i]);
for (int i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[i]));
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f4d9e68b21e7..bd27607f8076 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -31,6 +31,8 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <drm/drm_print.h>
+#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
@@ -318,18 +320,17 @@ static void ttm_bo_release(struct kref *kref)
bo->destroy(bo);
}
-/**
- * ttm_bo_put
- *
- * @bo: The buffer object.
- *
- * Unreference a buffer object.
- */
+/* TODO: remove! */
void ttm_bo_put(struct ttm_buffer_object *bo)
{
kref_put(&bo->kref, ttm_bo_release);
}
-EXPORT_SYMBOL(ttm_bo_put);
+
+void ttm_bo_fini(struct ttm_buffer_object *bo)
+{
+ ttm_bo_put(bo);
+}
+EXPORT_SYMBOL(ttm_bo_fini);
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
@@ -658,34 +659,35 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unpin);
/*
- * Add the last move fence to the BO as kernel dependency and reserve a new
- * fence slot.
+ * Add the pipelined eviction fencesto the BO as kernel dependency and reserve new
+ * fence slots.
*/
-static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
- struct ttm_resource_manager *man,
- bool no_wait_gpu)
+static int ttm_bo_add_pipelined_eviction_fences(struct ttm_buffer_object *bo,
+ struct ttm_resource_manager *man,
+ bool no_wait_gpu)
{
struct dma_fence *fence;
- int ret;
+ int i;
- spin_lock(&man->move_lock);
- fence = dma_fence_get(man->move);
- spin_unlock(&man->move_lock);
-
- if (!fence)
- return 0;
+ spin_lock(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ fence = man->eviction_fences[i];
+ if (!fence)
+ continue;
- if (no_wait_gpu) {
- ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
- dma_fence_put(fence);
- return ret;
+ if (no_wait_gpu) {
+ if (!dma_fence_is_signaled(fence)) {
+ spin_unlock(&man->eviction_lock);
+ return -EBUSY;
+ }
+ } else {
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
+ }
}
+ spin_unlock(&man->eviction_lock);
- dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
-
- ret = dma_resv_reserve_fences(bo->base.resv, 1);
- dma_fence_put(fence);
- return ret;
+ /* TODO: this call should be removed. */
+ return dma_resv_reserve_fences(bo->base.resv, 1);
}
/**
@@ -718,7 +720,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
int i, ret;
ticket = dma_resv_locking_ctx(bo->base.resv);
- ret = dma_resv_reserve_fences(bo->base.resv, 1);
+ ret = dma_resv_reserve_fences(bo->base.resv, TTM_NUM_MOVE_FENCES);
if (unlikely(ret))
return ret;
@@ -757,7 +759,7 @@ static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
return ret;
}
- ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
+ ret = ttm_bo_add_pipelined_eviction_fences(bo, man, ctx->no_wait_gpu);
if (unlikely(ret)) {
ttm_resource_free(bo, res);
if (ret == -EBUSY)
@@ -878,7 +880,8 @@ bounce:
/* For backward compatibility with userspace */
if (ret == -ENOSPC)
- return -ENOMEM;
+ return bo->bdev->alloc_flags & TTM_ALLOCATION_PROPAGATE_ENOSPC ?
+ ret : -ENOMEM;
/*
* We might need to add a TTM.
@@ -1283,3 +1286,18 @@ int ttm_bo_populate(struct ttm_buffer_object *bo,
return 0;
}
EXPORT_SYMBOL(ttm_bo_populate);
+
+int ttm_bo_setup_export(struct ttm_buffer_object *bo,
+ struct ttm_operation_ctx *ctx)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(bo, false, false, NULL);
+ if (ret != 0)
+ return ret;
+
+ ret = ttm_bo_populate(bo, ctx);
+ ttm_bo_unreserve(bo);
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_setup_export);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
index 9d8b747a34db..e0d48eac74b0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_internal.h
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -55,4 +55,6 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
return bo;
}
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index acbbca9d5c92..2ff35d55e462 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -258,7 +258,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
- ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
+ ret = dma_resv_reserve_fences(&fbo->base.base._resv, TTM_NUM_MOVE_FENCES);
if (ret) {
dma_resv_unlock(&fbo->base.base._resv);
kfree(fbo);
@@ -646,20 +646,44 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
{
struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from;
+ struct dma_fence *tmp;
+ int i;
from = ttm_manager_type(bdev, bo->resource->mem_type);
/**
* BO doesn't have a TTM we need to bind/unbind. Just remember
- * this eviction and free up the allocation
+ * this eviction and free up the allocation.
+ * The fence will be saved in the first free slot or in the slot
+ * already used to store a fence from the same context. Since
+ * drivers can't use more than TTM_NUM_MOVE_FENCES contexts for
+ * evictions we should always find a slot to use.
*/
- spin_lock(&from->move_lock);
- if (!from->move || dma_fence_is_later(fence, from->move)) {
- dma_fence_put(from->move);
- from->move = dma_fence_get(fence);
+ spin_lock(&from->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ tmp = from->eviction_fences[i];
+ if (!tmp)
+ break;
+ if (fence->context != tmp->context)
+ continue;
+ if (dma_fence_is_later(fence, tmp)) {
+ dma_fence_put(tmp);
+ break;
+ }
+ goto unlock;
+ }
+ if (i < TTM_NUM_MOVE_FENCES) {
+ from->eviction_fences[i] = dma_fence_get(fence);
+ } else {
+ WARN(1, "not enough fence slots for all fence contexts");
+ spin_unlock(&from->eviction_lock);
+ dma_fence_wait(fence, false);
+ goto end;
}
- spin_unlock(&from->move_lock);
+unlock:
+ spin_unlock(&from->eviction_lock);
+end:
ttm_resource_free(bo, &bo->resource);
}
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index c3e2fcbdd2cc..9a51afaf0749 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -31,6 +31,7 @@
#include <linux/export.h>
#include <linux/mm.h>
+#include <drm/ttm/ttm_allocation.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_tt.h>
@@ -198,8 +199,7 @@ EXPORT_SYMBOL(ttm_device_swapout);
* @dev: The core kernel device pointer for DMA mappings and allocations.
* @mapping: The address space to use for this bo.
* @vma_manager: A pointer to a vma manager.
- * @use_dma_alloc: If coherent DMA allocation API should be used.
- * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
+ * @alloc_flags: TTM_ALLOCATION_* flags.
*
* Initializes a struct ttm_device:
* Returns:
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(ttm_device_swapout);
int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *funcs,
struct device *dev, struct address_space *mapping,
struct drm_vma_offset_manager *vma_manager,
- bool use_dma_alloc, bool use_dma32)
+ unsigned int alloc_flags)
{
struct ttm_global *glob = &ttm_glob;
int ret, nid;
@@ -227,6 +227,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
return -ENOMEM;
}
+ bdev->alloc_flags = alloc_flags;
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
@@ -236,7 +237,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
else
nid = NUMA_NO_NODE;
- ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
+ ttm_pool_init(&bdev->pool, dev, nid, alloc_flags);
bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index b3fffe7b5062..aa137ead5cc5 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -74,7 +74,8 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
#endif /* CONFIG_UML */
#endif /* __i386__ || __x86_64__ */
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
- defined(__powerpc__) || defined(__mips__) || defined(__loongarch__)
+ defined(__powerpc__) || defined(__mips__) || defined(__loongarch__) || \
+ defined(__riscv)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index baf27c70a419..18b6db015619 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -48,6 +48,7 @@
#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
+#include "ttm_pool_internal.h"
#ifdef CONFIG_FAULT_INJECTION
#include <linux/fault-inject.h>
@@ -135,6 +136,7 @@ static DECLARE_RWSEM(pool_shrink_rwsem);
static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
unsigned int order)
{
+ const unsigned int beneficial_order = ttm_pool_beneficial_order(pool);
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
struct ttm_pool_dma *dma;
struct page *p;
@@ -148,7 +150,14 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN |
__GFP_THISNODE;
- if (!pool->use_dma_alloc) {
+ /*
+ * Do not add latency to the allocation path for allocations orders
+ * device tolds us do not bring them additional performance gains.
+ */
+ if (beneficial_order && order > beneficial_order)
+ gfp_flags &= ~__GFP_DIRECT_RECLAIM;
+
+ if (!ttm_pool_uses_dma_alloc(pool)) {
p = alloc_pages_node(pool->nid, gfp_flags, order);
if (p)
p->private = order;
@@ -200,7 +209,7 @@ static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
set_pages_wb(p, 1 << order);
#endif
- if (!pool || !pool->use_dma_alloc) {
+ if (!pool || !ttm_pool_uses_dma_alloc(pool)) {
__free_pages(p, order);
return;
}
@@ -243,7 +252,7 @@ static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
{
dma_addr_t addr;
- if (pool->use_dma_alloc) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
struct ttm_pool_dma *dma = (void *)p->private;
addr = dma->addr;
@@ -265,7 +274,7 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
unsigned int num_pages)
{
/* Unmapped while freeing the page */
- if (pool->use_dma_alloc)
+ if (ttm_pool_uses_dma_alloc(pool))
return;
dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
@@ -339,7 +348,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching,
unsigned int order)
{
- if (pool->use_dma_alloc)
+ if (ttm_pool_uses_dma_alloc(pool))
return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86
@@ -348,7 +357,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
- if (pool->use_dma32)
+ if (ttm_pool_uses_dma32(pool))
return &global_dma32_write_combined[order];
return &global_write_combined[order];
@@ -356,7 +365,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
if (pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order];
- if (pool->use_dma32)
+ if (ttm_pool_uses_dma32(pool))
return &global_dma32_uncached[order];
return &global_uncached[order];
@@ -396,7 +405,7 @@ static unsigned int ttm_pool_shrink(void)
/* Return the allocation order based for a page */
static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
{
- if (pool->use_dma_alloc) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
struct ttm_pool_dma *dma = (void *)p->private;
return dma->vaddr & ~PAGE_MASK;
@@ -719,7 +728,7 @@ static int __ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
if (ctx->gfp_retry_mayfail)
gfp_flags |= __GFP_RETRY_MAYFAIL;
- if (pool->use_dma32)
+ if (ttm_pool_uses_dma32(pool))
gfp_flags |= GFP_DMA32;
else
gfp_flags |= GFP_HIGHUSER;
@@ -977,7 +986,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
return -EINVAL;
if ((!ttm_backup_bytes_avail() && !flags->purge) ||
- pool->use_dma_alloc || ttm_tt_is_backed_up(tt))
+ ttm_pool_uses_dma_alloc(pool) || ttm_tt_is_backed_up(tt))
return -EBUSY;
#ifdef CONFIG_X86
@@ -1014,7 +1023,7 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
if (flags->purge)
return shrunken;
- if (pool->use_dma32)
+ if (ttm_pool_uses_dma32(pool))
gfp = GFP_DMA32;
else
gfp = GFP_HIGHUSER;
@@ -1058,22 +1067,20 @@ long ttm_pool_backup(struct ttm_pool *pool, struct ttm_tt *tt,
* @pool: the pool to initialize
* @dev: device for DMA allocations and mappings
* @nid: NUMA node to use for allocations
- * @use_dma_alloc: true if coherent DMA alloc should be used
- * @use_dma32: true if GFP_DMA32 should be used
+ * @alloc_flags: TTM_ALLOCATION_POOL_* flags
*
* Initialize the pool and its pool types.
*/
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
- int nid, bool use_dma_alloc, bool use_dma32)
+ int nid, unsigned int alloc_flags)
{
unsigned int i, j;
- WARN_ON(!dev && use_dma_alloc);
+ WARN_ON(!dev && ttm_pool_uses_dma_alloc(pool));
pool->dev = dev;
pool->nid = nid;
- pool->use_dma_alloc = use_dma_alloc;
- pool->use_dma32 = use_dma32;
+ pool->alloc_flags = alloc_flags;
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
for (j = 0; j < NR_PAGE_ORDERS; ++j) {
@@ -1239,7 +1246,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) {
+ if (!ttm_pool_uses_dma_alloc(pool) && pool->nid == NUMA_NO_NODE) {
seq_puts(m, "unused\n");
return 0;
}
@@ -1250,7 +1257,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
if (!ttm_pool_select_type(pool, i, 0))
continue;
- if (pool->use_dma_alloc)
+ if (ttm_pool_uses_dma_alloc(pool))
seq_puts(m, "DMA ");
else
seq_printf(m, "N%d ", pool->nid);
diff --git a/drivers/gpu/drm/ttm/ttm_pool_internal.h b/drivers/gpu/drm/ttm/ttm_pool_internal.h
new file mode 100644
index 000000000000..82c4b7e56a99
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_pool_internal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright (c) 2025 Valve Corporation */
+
+#ifndef _TTM_POOL_INTERNAL_H_
+#define _TTM_POOL_INTERNAL_H_
+
+#include <drm/ttm/ttm_allocation.h>
+#include <drm/ttm/ttm_pool.h>
+
+static inline bool ttm_pool_uses_dma_alloc(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA_ALLOC;
+}
+
+static inline bool ttm_pool_uses_dma32(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & TTM_ALLOCATION_POOL_USE_DMA32;
+}
+
+static inline bool ttm_pool_beneficial_order(struct ttm_pool *pool)
+{
+ return pool->alloc_flags & 0xff;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index e2c82ad07eb4..f5aa29dc6ec0 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -34,6 +34,7 @@
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_print.h>
#include <drm/drm_util.h>
/* Detach the cursor from the bulk move list*/
@@ -523,14 +524,15 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
{
unsigned i;
- spin_lock_init(&man->move_lock);
man->bdev = bdev;
man->size = size;
man->usage = 0;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
- man->move = NULL;
+ spin_lock_init(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++)
+ man->eviction_fences[i] = NULL;
}
EXPORT_SYMBOL(ttm_resource_manager_init);
@@ -551,7 +553,7 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
.no_wait_gpu = false,
};
struct dma_fence *fence;
- int ret;
+ int ret, i;
do {
ret = ttm_bo_evict_first(bdev, man, &ctx);
@@ -561,18 +563,24 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
if (ret && ret != -ENOENT)
return ret;
- spin_lock(&man->move_lock);
- fence = dma_fence_get(man->move);
- spin_unlock(&man->move_lock);
-
- if (fence) {
- ret = dma_fence_wait(fence, false);
- dma_fence_put(fence);
- if (ret)
- return ret;
+ ret = 0;
+
+ spin_lock(&man->eviction_lock);
+ for (i = 0; i < TTM_NUM_MOVE_FENCES; i++) {
+ fence = man->eviction_fences[i];
+ if (fence && !dma_fence_is_signaled(fence)) {
+ dma_fence_get(fence);
+ spin_unlock(&man->eviction_lock);
+ ret = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ if (ret)
+ return ret;
+ spin_lock(&man->eviction_lock);
+ }
}
+ spin_unlock(&man->eviction_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ttm_resource_manager_evict_all);
@@ -587,6 +595,9 @@ uint64_t ttm_resource_manager_usage(struct ttm_resource_manager *man)
{
uint64_t usage;
+ if (WARN_ON_ONCE(!man->bdev))
+ return 0;
+
spin_lock(&man->bdev->lru_lock);
usage = man->usage;
spin_unlock(&man->bdev->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 506e257dfba8..611d20ab966d 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -40,12 +40,14 @@
#include <linux/shmem_fs.h>
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
+#include <drm/drm_print.h>
#include <drm/drm_util.h>
#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
#include "ttm_module.h"
+#include "ttm_pool_internal.h"
static unsigned long ttm_pages_limit;
@@ -93,7 +95,8 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
* mapped TT pages need to be decrypted or otherwise the drivers
* will end up sending encrypted mem to the gpu.
*/
- if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
+ if (ttm_pool_uses_dma_alloc(&bdev->pool) &&
+ cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
page_flags |= TTM_TT_FLAG_DECRYPTED;
drm_info_once(ddev, "TT memory decryption enabled.");
}
@@ -378,7 +381,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_add(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
@@ -416,7 +419,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
error:
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
@@ -439,7 +442,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
+ if (ttm_pool_uses_dma32(&bdev->pool))
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index 37bdd976ae59..26b6c65ef6fd 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -21,6 +21,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include "tve200_drm.h"
diff --git a/drivers/gpu/drm/tyr/Kconfig b/drivers/gpu/drm/tyr/Kconfig
new file mode 100644
index 000000000000..4b55308fd2eb
--- /dev/null
+++ b/drivers/gpu/drm/tyr/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+config DRM_TYR
+ tristate "Tyr (Rust DRM support for ARM Mali CSF-based GPUs)"
+ depends on DRM=y
+ depends on RUST
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
+ default n
+ help
+ Rust DRM driver for ARM Mali CSF-based GPUs.
+
+ This driver is for Mali (or Immortalis) Valhall Gxxx GPUs.
+
+ Note that the Mali-G68 and Mali-G78, while Valhall architecture, will
+ be supported with the panfrost driver as they are not CSF GPUs.
+
+ if M is selected, the module will be called tyr. This driver is work
+ in progress and may not be functional.
diff --git a/drivers/gpu/drm/tyr/Makefile b/drivers/gpu/drm/tyr/Makefile
new file mode 100644
index 000000000000..ba545f65f2c0
--- /dev/null
+++ b/drivers/gpu/drm/tyr/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0 or MIT
+
+obj-$(CONFIG_DRM_TYR) += tyr.o
diff --git a/drivers/gpu/drm/tyr/driver.rs b/drivers/gpu/drm/tyr/driver.rs
new file mode 100644
index 000000000000..0389c558c036
--- /dev/null
+++ b/drivers/gpu/drm/tyr/driver.rs
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::c_str;
+use kernel::clk::Clk;
+use kernel::clk::OptionalClk;
+use kernel::device::Bound;
+use kernel::device::Core;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::drm;
+use kernel::drm::ioctl;
+use kernel::new_mutex;
+use kernel::of;
+use kernel::platform;
+use kernel::prelude::*;
+use kernel::regulator;
+use kernel::regulator::Regulator;
+use kernel::sizes::SZ_2M;
+use kernel::sync::Arc;
+use kernel::sync::Mutex;
+use kernel::time;
+use kernel::types::ARef;
+
+use crate::file::File;
+use crate::gem::TyrObject;
+use crate::gpu;
+use crate::gpu::GpuInfo;
+use crate::regs;
+
+pub(crate) type IoMem = kernel::io::mem::IoMem<SZ_2M>;
+
+/// Convenience type alias for the DRM device type for this driver.
+pub(crate) type TyrDevice = drm::Device<TyrDriver>;
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct TyrDriver {
+ device: ARef<TyrDevice>,
+}
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct TyrData {
+ pub(crate) pdev: ARef<platform::Device>,
+
+ #[pin]
+ clks: Mutex<Clocks>,
+
+ #[pin]
+ regulators: Mutex<Regulators>,
+
+ /// Some information on the GPU.
+ ///
+ /// This is mainly queried by userspace, i.e.: Mesa.
+ pub(crate) gpu_info: GpuInfo,
+}
+
+// Both `Clk` and `Regulator` do not implement `Send` or `Sync`, but they
+// should. There are patches on the mailing list to address this, but they have
+// not landed yet.
+//
+// For now, add this workaround so that this patch compiles with the promise
+// that it will be removed in a future patch.
+//
+// SAFETY: This will be removed in a future patch.
+unsafe impl Send for TyrData {}
+// SAFETY: This will be removed in a future patch.
+unsafe impl Sync for TyrData {}
+
+fn issue_soft_reset(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result {
+ regs::GPU_CMD.write(dev, iomem, regs::GPU_CMD_SOFT_RESET)?;
+
+ // TODO: We cannot poll, as there is no support in Rust currently, so we
+ // sleep. Change this when read_poll_timeout() is implemented in Rust.
+ kernel::time::delay::fsleep(time::Delta::from_millis(100));
+
+ if regs::GPU_IRQ_RAWSTAT.read(dev, iomem)? & regs::GPU_IRQ_RAWSTAT_RESET_COMPLETED == 0 {
+ dev_err!(dev, "GPU reset failed with errno\n");
+ dev_err!(
+ dev,
+ "GPU_INT_RAWSTAT is {}\n",
+ regs::GPU_IRQ_RAWSTAT.read(dev, iomem)?
+ );
+
+ return Err(EIO);
+ }
+
+ Ok(())
+}
+
+kernel::of_device_table!(
+ OF_TABLE,
+ MODULE_OF_TABLE,
+ <TyrDriver as platform::Driver>::IdInfo,
+ [
+ (of::DeviceId::new(c_str!("rockchip,rk3588-mali")), ()),
+ (of::DeviceId::new(c_str!("arm,mali-valhall-csf")), ())
+ ]
+);
+
+impl platform::Driver for TyrDriver {
+ type IdInfo = ();
+ const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = Some(&OF_TABLE);
+
+ fn probe(
+ pdev: &platform::Device<Core>,
+ _info: Option<&Self::IdInfo>,
+ ) -> impl PinInit<Self, Error> {
+ let core_clk = Clk::get(pdev.as_ref(), Some(c_str!("core")))?;
+ let stacks_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("stacks")))?;
+ let coregroup_clk = OptionalClk::get(pdev.as_ref(), Some(c_str!("coregroup")))?;
+
+ core_clk.prepare_enable()?;
+ stacks_clk.prepare_enable()?;
+ coregroup_clk.prepare_enable()?;
+
+ let mali_regulator = Regulator::<regulator::Enabled>::get(pdev.as_ref(), c_str!("mali"))?;
+ let sram_regulator = Regulator::<regulator::Enabled>::get(pdev.as_ref(), c_str!("sram"))?;
+
+ let request = pdev.io_request_by_index(0).ok_or(ENODEV)?;
+ let iomem = Arc::pin_init(request.iomap_sized::<SZ_2M>(), GFP_KERNEL)?;
+
+ issue_soft_reset(pdev.as_ref(), &iomem)?;
+ gpu::l2_power_on(pdev.as_ref(), &iomem)?;
+
+ let gpu_info = GpuInfo::new(pdev.as_ref(), &iomem)?;
+ gpu_info.log(pdev);
+
+ let platform: ARef<platform::Device> = pdev.into();
+
+ let data = try_pin_init!(TyrData {
+ pdev: platform.clone(),
+ clks <- new_mutex!(Clocks {
+ core: core_clk,
+ stacks: stacks_clk,
+ coregroup: coregroup_clk,
+ }),
+ regulators <- new_mutex!(Regulators {
+ mali: mali_regulator,
+ sram: sram_regulator,
+ }),
+ gpu_info,
+ });
+
+ let tdev: ARef<TyrDevice> = drm::Device::new(pdev.as_ref(), data)?;
+ drm::driver::Registration::new_foreign_owned(&tdev, pdev.as_ref(), 0)?;
+
+ let driver = TyrDriver { device: tdev };
+
+ // We need this to be dev_info!() because dev_dbg!() does not work at
+ // all in Rust for now, and we need to see whether probe succeeded.
+ dev_info!(pdev.as_ref(), "Tyr initialized correctly.\n");
+ Ok(driver)
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for TyrDriver {
+ fn drop(self: Pin<&mut Self>) {}
+}
+
+#[pinned_drop]
+impl PinnedDrop for TyrData {
+ fn drop(self: Pin<&mut Self>) {
+ // TODO: the type-state pattern for Clks will fix this.
+ let clks = self.clks.lock();
+ clks.core.disable_unprepare();
+ clks.stacks.disable_unprepare();
+ clks.coregroup.disable_unprepare();
+ }
+}
+
+// We need to retain the name "panthor" to achieve drop-in compatibility with
+// the C driver in the userspace stack.
+const INFO: drm::DriverInfo = drm::DriverInfo {
+ major: 1,
+ minor: 5,
+ patchlevel: 0,
+ name: c_str!("panthor"),
+ desc: c_str!("ARM Mali Tyr DRM driver"),
+};
+
+#[vtable]
+impl drm::Driver for TyrDriver {
+ type Data = TyrData;
+ type File = File;
+ type Object = drm::gem::Object<TyrObject>;
+
+ const INFO: drm::DriverInfo = INFO;
+
+ kernel::declare_drm_ioctls! {
+ (PANTHOR_DEV_QUERY, drm_panthor_dev_query, ioctl::RENDER_ALLOW, File::dev_query),
+ }
+}
+
+#[pin_data]
+struct Clocks {
+ core: Clk,
+ stacks: OptionalClk,
+ coregroup: OptionalClk,
+}
+
+#[pin_data]
+struct Regulators {
+ mali: Regulator<regulator::Enabled>,
+ sram: Regulator<regulator::Enabled>,
+}
diff --git a/drivers/gpu/drm/tyr/file.rs b/drivers/gpu/drm/tyr/file.rs
new file mode 100644
index 000000000000..0ef432947b73
--- /dev/null
+++ b/drivers/gpu/drm/tyr/file.rs
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::drm;
+use kernel::prelude::*;
+use kernel::uaccess::UserSlice;
+use kernel::uapi;
+
+use crate::driver::TyrDevice;
+use crate::TyrDriver;
+
+#[pin_data]
+pub(crate) struct File {}
+
+/// Convenience type alias for our DRM `File` type
+pub(crate) type DrmFile = drm::file::File<File>;
+
+impl drm::file::DriverFile for File {
+ type Driver = TyrDriver;
+
+ fn open(_dev: &drm::Device<Self::Driver>) -> Result<Pin<KBox<Self>>> {
+ KBox::try_pin_init(try_pin_init!(Self {}), GFP_KERNEL)
+ }
+}
+
+impl File {
+ pub(crate) fn dev_query(
+ tdev: &TyrDevice,
+ devquery: &mut uapi::drm_panthor_dev_query,
+ _file: &DrmFile,
+ ) -> Result<u32> {
+ if devquery.pointer == 0 {
+ match devquery.type_ {
+ uapi::drm_panthor_dev_query_type_DRM_PANTHOR_DEV_QUERY_GPU_INFO => {
+ devquery.size = core::mem::size_of_val(&tdev.gpu_info) as u32;
+ Ok(0)
+ }
+ _ => Err(EINVAL),
+ }
+ } else {
+ match devquery.type_ {
+ uapi::drm_panthor_dev_query_type_DRM_PANTHOR_DEV_QUERY_GPU_INFO => {
+ let mut writer = UserSlice::new(
+ UserPtr::from_addr(devquery.pointer as usize),
+ devquery.size as usize,
+ )
+ .writer();
+
+ writer.write(&tdev.gpu_info)?;
+
+ Ok(0)
+ }
+ _ => Err(EINVAL),
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/tyr/gem.rs b/drivers/gpu/drm/tyr/gem.rs
new file mode 100644
index 000000000000..1273bf89dbd5
--- /dev/null
+++ b/drivers/gpu/drm/tyr/gem.rs
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use crate::driver::TyrDevice;
+use crate::driver::TyrDriver;
+use kernel::drm::gem;
+use kernel::prelude::*;
+
+/// GEM Object inner driver data
+#[pin_data]
+pub(crate) struct TyrObject {}
+
+impl gem::DriverObject for TyrObject {
+ type Driver = TyrDriver;
+
+ fn new(_dev: &TyrDevice, _size: usize) -> impl PinInit<Self, Error> {
+ try_pin_init!(TyrObject {})
+ }
+}
diff --git a/drivers/gpu/drm/tyr/gpu.rs b/drivers/gpu/drm/tyr/gpu.rs
new file mode 100644
index 000000000000..6c582910dd5d
--- /dev/null
+++ b/drivers/gpu/drm/tyr/gpu.rs
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+use kernel::bits::genmask_u32;
+use kernel::device::Bound;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::platform;
+use kernel::prelude::*;
+use kernel::time;
+use kernel::transmute::AsBytes;
+
+use crate::driver::IoMem;
+use crate::regs;
+
+/// Struct containing information that can be queried by userspace. This is read from
+/// the GPU's registers.
+///
+/// # Invariants
+///
+/// - The layout of this struct identical to the C `struct drm_panthor_gpu_info`.
+#[repr(C)]
+pub(crate) struct GpuInfo {
+ pub(crate) gpu_id: u32,
+ pub(crate) gpu_rev: u32,
+ pub(crate) csf_id: u32,
+ pub(crate) l2_features: u32,
+ pub(crate) tiler_features: u32,
+ pub(crate) mem_features: u32,
+ pub(crate) mmu_features: u32,
+ pub(crate) thread_features: u32,
+ pub(crate) max_threads: u32,
+ pub(crate) thread_max_workgroup_size: u32,
+ pub(crate) thread_max_barrier_size: u32,
+ pub(crate) coherency_features: u32,
+ pub(crate) texture_features: [u32; 4],
+ pub(crate) as_present: u32,
+ pub(crate) pad0: u32,
+ pub(crate) shader_present: u64,
+ pub(crate) l2_present: u64,
+ pub(crate) tiler_present: u64,
+ pub(crate) core_features: u32,
+ pub(crate) pad: u32,
+}
+
+impl GpuInfo {
+ pub(crate) fn new(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result<Self> {
+ let gpu_id = regs::GPU_ID.read(dev, iomem)?;
+ let csf_id = regs::GPU_CSF_ID.read(dev, iomem)?;
+ let gpu_rev = regs::GPU_REVID.read(dev, iomem)?;
+ let core_features = regs::GPU_CORE_FEATURES.read(dev, iomem)?;
+ let l2_features = regs::GPU_L2_FEATURES.read(dev, iomem)?;
+ let tiler_features = regs::GPU_TILER_FEATURES.read(dev, iomem)?;
+ let mem_features = regs::GPU_MEM_FEATURES.read(dev, iomem)?;
+ let mmu_features = regs::GPU_MMU_FEATURES.read(dev, iomem)?;
+ let thread_features = regs::GPU_THREAD_FEATURES.read(dev, iomem)?;
+ let max_threads = regs::GPU_THREAD_MAX_THREADS.read(dev, iomem)?;
+ let thread_max_workgroup_size = regs::GPU_THREAD_MAX_WORKGROUP_SIZE.read(dev, iomem)?;
+ let thread_max_barrier_size = regs::GPU_THREAD_MAX_BARRIER_SIZE.read(dev, iomem)?;
+ let coherency_features = regs::GPU_COHERENCY_FEATURES.read(dev, iomem)?;
+
+ let texture_features = regs::GPU_TEXTURE_FEATURES0.read(dev, iomem)?;
+
+ let as_present = regs::GPU_AS_PRESENT.read(dev, iomem)?;
+
+ let shader_present = u64::from(regs::GPU_SHADER_PRESENT_LO.read(dev, iomem)?);
+ let shader_present =
+ shader_present | u64::from(regs::GPU_SHADER_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ let tiler_present = u64::from(regs::GPU_TILER_PRESENT_LO.read(dev, iomem)?);
+ let tiler_present =
+ tiler_present | u64::from(regs::GPU_TILER_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ let l2_present = u64::from(regs::GPU_L2_PRESENT_LO.read(dev, iomem)?);
+ let l2_present = l2_present | u64::from(regs::GPU_L2_PRESENT_HI.read(dev, iomem)?) << 32;
+
+ Ok(Self {
+ gpu_id,
+ gpu_rev,
+ csf_id,
+ l2_features,
+ tiler_features,
+ mem_features,
+ mmu_features,
+ thread_features,
+ max_threads,
+ thread_max_workgroup_size,
+ thread_max_barrier_size,
+ coherency_features,
+ // TODO: Add texture_features_{1,2,3}.
+ texture_features: [texture_features, 0, 0, 0],
+ as_present,
+ pad0: 0,
+ shader_present,
+ l2_present,
+ tiler_present,
+ core_features,
+ pad: 0,
+ })
+ }
+
+ pub(crate) fn log(&self, pdev: &platform::Device) {
+ let major = (self.gpu_id >> 16) & 0xff;
+ let minor = (self.gpu_id >> 8) & 0xff;
+ let status = self.gpu_id & 0xff;
+
+ let model_name = if let Some(model) = GPU_MODELS
+ .iter()
+ .find(|&f| f.major == major && f.minor == minor)
+ {
+ model.name
+ } else {
+ "unknown"
+ };
+
+ dev_info!(
+ pdev.as_ref(),
+ "mali-{} id 0x{:x} major 0x{:x} minor 0x{:x} status 0x{:x}",
+ model_name,
+ self.gpu_id >> 16,
+ major,
+ minor,
+ status
+ );
+
+ dev_info!(
+ pdev.as_ref(),
+ "Features: L2:{:#x} Tiler:{:#x} Mem:{:#x} MMU:{:#x} AS:{:#x}",
+ self.l2_features,
+ self.tiler_features,
+ self.mem_features,
+ self.mmu_features,
+ self.as_present
+ );
+
+ dev_info!(
+ pdev.as_ref(),
+ "shader_present=0x{:016x} l2_present=0x{:016x} tiler_present=0x{:016x}",
+ self.shader_present,
+ self.l2_present,
+ self.tiler_present
+ );
+ }
+
+ /// Returns the number of virtual address bits supported by the GPU.
+ #[expect(dead_code)]
+ pub(crate) fn va_bits(&self) -> u32 {
+ self.mmu_features & genmask_u32(0..=7)
+ }
+
+ /// Returns the number of physical address bits supported by the GPU.
+ #[expect(dead_code)]
+ pub(crate) fn pa_bits(&self) -> u32 {
+ (self.mmu_features >> 8) & genmask_u32(0..=7)
+ }
+}
+
+// SAFETY: `GpuInfo`'s invariant guarantees that it is the same type that is
+// already exposed to userspace by the C driver. This implies that it fulfills
+// the requirements for `AsBytes`.
+//
+// This means:
+//
+// - No implicit padding,
+// - No kernel pointers,
+// - No interior mutability.
+unsafe impl AsBytes for GpuInfo {}
+
+struct GpuModels {
+ name: &'static str,
+ major: u32,
+ minor: u32,
+}
+
+const GPU_MODELS: [GpuModels; 1] = [GpuModels {
+ name: "g610",
+ major: 10,
+ minor: 7,
+}];
+
+#[allow(dead_code)]
+pub(crate) struct GpuId {
+ pub(crate) arch_major: u32,
+ pub(crate) arch_minor: u32,
+ pub(crate) arch_rev: u32,
+ pub(crate) prod_major: u32,
+ pub(crate) ver_major: u32,
+ pub(crate) ver_minor: u32,
+ pub(crate) ver_status: u32,
+}
+
+impl From<u32> for GpuId {
+ fn from(value: u32) -> Self {
+ GpuId {
+ arch_major: (value & genmask_u32(28..=31)) >> 28,
+ arch_minor: (value & genmask_u32(24..=27)) >> 24,
+ arch_rev: (value & genmask_u32(20..=23)) >> 20,
+ prod_major: (value & genmask_u32(16..=19)) >> 16,
+ ver_major: (value & genmask_u32(12..=15)) >> 12,
+ ver_minor: (value & genmask_u32(4..=11)) >> 4,
+ ver_status: value & genmask_u32(0..=3),
+ }
+ }
+}
+
+/// Powers on the l2 block.
+pub(crate) fn l2_power_on(dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result {
+ regs::L2_PWRON_LO.write(dev, iomem, 1)?;
+
+ // TODO: We cannot poll, as there is no support in Rust currently, so we
+ // sleep. Change this when read_poll_timeout() is implemented in Rust.
+ kernel::time::delay::fsleep(time::Delta::from_millis(100));
+
+ if regs::L2_READY_LO.read(dev, iomem)? != 1 {
+ dev_err!(dev, "Failed to power on the GPU\n");
+ return Err(EIO);
+ }
+
+ Ok(())
+}
diff --git a/drivers/gpu/drm/tyr/regs.rs b/drivers/gpu/drm/tyr/regs.rs
new file mode 100644
index 000000000000..f46933aaa221
--- /dev/null
+++ b/drivers/gpu/drm/tyr/regs.rs
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+// We don't expect that all the registers and fields will be used, even in the
+// future.
+//
+// Nevertheless, it is useful to have most of them defined, like the C driver
+// does.
+#![allow(dead_code)]
+
+use kernel::bits::bit_u32;
+use kernel::device::Bound;
+use kernel::device::Device;
+use kernel::devres::Devres;
+use kernel::prelude::*;
+
+use crate::driver::IoMem;
+
+/// Represents a register in the Register Set
+///
+/// TODO: Replace this with the Nova `register!()` macro when it is available.
+/// In particular, this will automatically give us 64bit register reads and
+/// writes.
+pub(crate) struct Register<const OFFSET: usize>;
+
+impl<const OFFSET: usize> Register<OFFSET> {
+ #[inline]
+ pub(crate) fn read(&self, dev: &Device<Bound>, iomem: &Devres<IoMem>) -> Result<u32> {
+ let value = (*iomem).access(dev)?.read32(OFFSET);
+ Ok(value)
+ }
+
+ #[inline]
+ pub(crate) fn write(&self, dev: &Device<Bound>, iomem: &Devres<IoMem>, value: u32) -> Result {
+ (*iomem).access(dev)?.write32(value, OFFSET);
+ Ok(())
+ }
+}
+
+pub(crate) const GPU_ID: Register<0x0> = Register;
+pub(crate) const GPU_L2_FEATURES: Register<0x4> = Register;
+pub(crate) const GPU_CORE_FEATURES: Register<0x8> = Register;
+pub(crate) const GPU_CSF_ID: Register<0x1c> = Register;
+pub(crate) const GPU_REVID: Register<0x280> = Register;
+pub(crate) const GPU_TILER_FEATURES: Register<0xc> = Register;
+pub(crate) const GPU_MEM_FEATURES: Register<0x10> = Register;
+pub(crate) const GPU_MMU_FEATURES: Register<0x14> = Register;
+pub(crate) const GPU_AS_PRESENT: Register<0x18> = Register;
+pub(crate) const GPU_IRQ_RAWSTAT: Register<0x20> = Register;
+
+pub(crate) const GPU_IRQ_RAWSTAT_FAULT: u32 = bit_u32(0);
+pub(crate) const GPU_IRQ_RAWSTAT_PROTECTED_FAULT: u32 = bit_u32(1);
+pub(crate) const GPU_IRQ_RAWSTAT_RESET_COMPLETED: u32 = bit_u32(8);
+pub(crate) const GPU_IRQ_RAWSTAT_POWER_CHANGED_SINGLE: u32 = bit_u32(9);
+pub(crate) const GPU_IRQ_RAWSTAT_POWER_CHANGED_ALL: u32 = bit_u32(10);
+pub(crate) const GPU_IRQ_RAWSTAT_CLEAN_CACHES_COMPLETED: u32 = bit_u32(17);
+pub(crate) const GPU_IRQ_RAWSTAT_DOORBELL_STATUS: u32 = bit_u32(18);
+pub(crate) const GPU_IRQ_RAWSTAT_MCU_STATUS: u32 = bit_u32(19);
+
+pub(crate) const GPU_IRQ_CLEAR: Register<0x24> = Register;
+pub(crate) const GPU_IRQ_MASK: Register<0x28> = Register;
+pub(crate) const GPU_IRQ_STAT: Register<0x2c> = Register;
+pub(crate) const GPU_CMD: Register<0x30> = Register;
+pub(crate) const GPU_CMD_SOFT_RESET: u32 = 1 | (1 << 8);
+pub(crate) const GPU_CMD_HARD_RESET: u32 = 1 | (2 << 8);
+pub(crate) const GPU_THREAD_FEATURES: Register<0xac> = Register;
+pub(crate) const GPU_THREAD_MAX_THREADS: Register<0xa0> = Register;
+pub(crate) const GPU_THREAD_MAX_WORKGROUP_SIZE: Register<0xa4> = Register;
+pub(crate) const GPU_THREAD_MAX_BARRIER_SIZE: Register<0xa8> = Register;
+pub(crate) const GPU_TEXTURE_FEATURES0: Register<0xb0> = Register;
+pub(crate) const GPU_SHADER_PRESENT_LO: Register<0x100> = Register;
+pub(crate) const GPU_SHADER_PRESENT_HI: Register<0x104> = Register;
+pub(crate) const GPU_TILER_PRESENT_LO: Register<0x110> = Register;
+pub(crate) const GPU_TILER_PRESENT_HI: Register<0x114> = Register;
+pub(crate) const GPU_L2_PRESENT_LO: Register<0x120> = Register;
+pub(crate) const GPU_L2_PRESENT_HI: Register<0x124> = Register;
+pub(crate) const L2_READY_LO: Register<0x160> = Register;
+pub(crate) const L2_READY_HI: Register<0x164> = Register;
+pub(crate) const L2_PWRON_LO: Register<0x1a0> = Register;
+pub(crate) const L2_PWRON_HI: Register<0x1a4> = Register;
+pub(crate) const L2_PWRTRANS_LO: Register<0x220> = Register;
+pub(crate) const L2_PWRTRANS_HI: Register<0x204> = Register;
+pub(crate) const L2_PWRACTIVE_LO: Register<0x260> = Register;
+pub(crate) const L2_PWRACTIVE_HI: Register<0x264> = Register;
+
+pub(crate) const MCU_CONTROL: Register<0x700> = Register;
+pub(crate) const MCU_CONTROL_ENABLE: u32 = 1;
+pub(crate) const MCU_CONTROL_AUTO: u32 = 2;
+pub(crate) const MCU_CONTROL_DISABLE: u32 = 0;
+
+pub(crate) const MCU_STATUS: Register<0x704> = Register;
+pub(crate) const MCU_STATUS_DISABLED: u32 = 0;
+pub(crate) const MCU_STATUS_ENABLED: u32 = 1;
+pub(crate) const MCU_STATUS_HALT: u32 = 2;
+pub(crate) const MCU_STATUS_FATAL: u32 = 3;
+
+pub(crate) const GPU_COHERENCY_FEATURES: Register<0x300> = Register;
+
+pub(crate) const JOB_IRQ_RAWSTAT: Register<0x1000> = Register;
+pub(crate) const JOB_IRQ_CLEAR: Register<0x1004> = Register;
+pub(crate) const JOB_IRQ_MASK: Register<0x1008> = Register;
+pub(crate) const JOB_IRQ_STAT: Register<0x100c> = Register;
+
+pub(crate) const JOB_IRQ_GLOBAL_IF: u32 = bit_u32(31);
+
+pub(crate) const MMU_IRQ_RAWSTAT: Register<0x2000> = Register;
+pub(crate) const MMU_IRQ_CLEAR: Register<0x2004> = Register;
+pub(crate) const MMU_IRQ_MASK: Register<0x2008> = Register;
+pub(crate) const MMU_IRQ_STAT: Register<0x200c> = Register;
diff --git a/drivers/gpu/drm/tyr/tyr.rs b/drivers/gpu/drm/tyr/tyr.rs
new file mode 100644
index 000000000000..861d1db43072
--- /dev/null
+++ b/drivers/gpu/drm/tyr/tyr.rs
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0 or MIT
+
+//! Arm Mali Tyr DRM driver.
+//!
+//! The name "Tyr" is inspired by Norse mythology, reflecting Arm's tradition of
+//! naming their GPUs after Nordic mythological figures and places.
+
+use crate::driver::TyrDriver;
+
+mod driver;
+mod file;
+mod gem;
+mod gpu;
+mod regs;
+
+kernel::module_platform_driver! {
+ type: TyrDriver,
+ name: "tyr",
+ authors: ["The Tyr driver authors"],
+ description: "Arm Mali Tyr DRM driver",
+ license: "Dual MIT/GPL",
+}
diff --git a/drivers/gpu/drm/udl/udl_edid.c b/drivers/gpu/drm/udl/udl_edid.c
index 12f48ae17073..af4cff2a7c51 100644
--- a/drivers/gpu/drm/udl/udl_edid.c
+++ b/drivers/gpu/drm/udl/udl_edid.c
@@ -4,6 +4,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "udl_drv.h"
#include "udl_edid.h"
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index c41476ddde68..d9547f5117b9 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -18,6 +18,8 @@
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
+#include <drm/drm_print.h>
+
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 7e789e181af0..89f24eec62a7 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -8,6 +8,7 @@
#include <linux/string_helpers.h>
#include <drm/drm_debugfs.h>
+#include <drm/drm_print.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 5e997ae8bc9c..e8a46c8bad8a 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
@@ -46,6 +47,7 @@ MODULE_PARM_DESC(super_pages, "Enable/Disable Super Pages support.");
static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct v3d_dev *v3d = to_v3d_dev(dev);
struct drm_v3d_get_param *args = data;
static const u32 reg_map[] = {
@@ -107,6 +109,16 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES:
args->value = !!v3d->gemfs;
return 0;
+ case DRM_V3D_PARAM_GLOBAL_RESET_COUNTER:
+ mutex_lock(&v3d->reset_lock);
+ args->value = v3d->reset_counter;
+ mutex_unlock(&v3d->reset_lock);
+ return 0;
+ case DRM_V3D_PARAM_CONTEXT_RESET_COUNTER:
+ mutex_lock(&v3d->reset_lock);
+ args->value = v3d_priv->reset_counter;
+ mutex_unlock(&v3d->reset_lock);
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -146,12 +158,24 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
+ struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
+ unsigned long irqflags;
enum v3d_queue q;
- for (q = 0; q < V3D_MAX_QUEUES; q++)
+ for (q = 0; q < V3D_MAX_QUEUES; q++) {
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_job *job = queue->active_job;
+
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
+ if (job && job->base.entity == &v3d_priv->sched_entity[q]) {
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ job->file_priv = NULL;
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
+ }
+ }
+
v3d_perfmon_close_file(v3d_priv);
kfree(v3d_priv);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 411e47702f8a..1884686985b8 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -58,6 +58,12 @@ struct v3d_queue_state {
/* Stores the GPU stats for this queue in the global context. */
struct v3d_stats stats;
+
+ /* Currently active job for this queue */
+ struct v3d_job *active_job;
+ spinlock_t queue_lock;
+ /* Protect dma fence for signalling job completion */
+ spinlock_t fence_lock;
};
/* Performance monitor object. The perform lifetime is controlled by userspace
@@ -159,18 +165,8 @@ struct v3d_dev {
struct work_struct overflow_mem_work;
- struct v3d_bin_job *bin_job;
- struct v3d_render_job *render_job;
- struct v3d_tfu_job *tfu_job;
- struct v3d_csd_job *csd_job;
-
struct v3d_queue_state queue[V3D_MAX_QUEUES];
- /* Spinlock used to synchronize the overflow memory
- * management against bin job submission.
- */
- spinlock_t job_lock;
-
/* Used to track the active perfmon if any. */
struct v3d_perfmon *active_perfmon;
@@ -204,6 +200,11 @@ struct v3d_dev {
* all jobs.
*/
struct v3d_perfmon *global_perfmon;
+
+ /* Global reset counter. The counter must be incremented when
+ * a GPU reset happens. It must be protected by @reset_lock.
+ */
+ unsigned int reset_counter;
};
static inline struct v3d_dev *
@@ -233,6 +234,12 @@ struct v3d_file_priv {
/* Stores the GPU stats for a specific queue for this fd. */
struct v3d_stats stats[V3D_MAX_QUEUES];
+
+ /* Per-fd reset counter, must be incremented when a job submitted
+ * by this fd causes a GPU reset. It must be protected by
+ * &struct v3d_dev->reset_lock.
+ */
+ unsigned int reset_counter;
};
struct v3d_bo {
@@ -316,9 +323,9 @@ struct v3d_job {
struct v3d_perfmon *perfmon;
/* File descriptor of the process that submitted the job that could be used
- * for collecting stats by process of GPU usage.
+ * to collect per-process information about the GPU.
*/
- struct drm_file *file;
+ struct v3d_file_priv *file_priv;
/* Callback for the freeing of the job on refcount going to 0. */
void (*free)(struct kref *ref);
@@ -559,7 +566,7 @@ void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp,
/* v3d_fence.c */
extern const struct dma_fence_ops v3d_fence_ops;
-struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue);
+struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q);
/* v3d_gem.c */
int v3d_gem_init(struct drm_device *dev);
@@ -603,7 +610,7 @@ void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info,
unsigned int count);
void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info,
unsigned int count);
-void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue);
+void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q);
int v3d_sched_init(struct v3d_dev *v3d);
void v3d_sched_fini(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c
index 89840ed212c0..c82500a1df73 100644
--- a/drivers/gpu/drm/v3d/v3d_fence.c
+++ b/drivers/gpu/drm/v3d/v3d_fence.c
@@ -3,8 +3,9 @@
#include "v3d_drv.h"
-struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
+struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q)
{
+ struct v3d_queue_state *queue = &v3d->queue[q];
struct v3d_fence *fence;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
@@ -12,10 +13,10 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue)
return ERR_PTR(-ENOMEM);
fence->dev = &v3d->drm;
- fence->queue = queue;
- fence->seqno = ++v3d->queue[queue].emit_seqno;
- dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock,
- v3d->queue[queue].fence_context, fence->seqno);
+ fence->queue = q;
+ fence->seqno = ++queue->emit_seqno;
+ dma_fence_init(&fence->base, &v3d_fence_ops, &queue->fence_lock,
+ queue->fence_context, fence->seqno);
return &fence->base;
}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 37bf5eecdd2c..5a180dc6c452 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "v3d_drv.h"
#include "v3d_regs.h"
@@ -271,10 +272,12 @@ v3d_gem_init(struct drm_device *dev)
queue->fence_context = dma_fence_context_alloc(1);
memset(&queue->stats, 0, sizeof(queue->stats));
seqcount_init(&queue->stats.lock);
+
+ spin_lock_init(&queue->queue_lock);
+ spin_lock_init(&queue->fence_lock);
}
spin_lock_init(&v3d->mm_lock);
- spin_lock_init(&v3d->job_lock);
ret = drmm_mutex_init(dev, &v3d->bo_lock);
if (ret)
return ret;
@@ -324,6 +327,7 @@ void
v3d_gem_destroy(struct drm_device *dev)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
+ enum v3d_queue q;
v3d_sched_fini(v3d);
v3d_gemfs_fini(v3d);
@@ -331,10 +335,8 @@ v3d_gem_destroy(struct drm_device *dev)
/* Waiting for jobs to finish would need to be done before
* unregistering V3D.
*/
- WARN_ON(v3d->bin_job);
- WARN_ON(v3d->render_job);
- WARN_ON(v3d->tfu_job);
- WARN_ON(v3d->csd_job);
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
+ WARN_ON(v3d->queue[q].active_job);
drm_mm_takedown(&v3d->mm);
diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c
index 8ec6ed82b3d9..bf351fc0d488 100644
--- a/drivers/gpu/drm/v3d/v3d_gemfs.c
+++ b/drivers/gpu/drm/v3d/v3d_gemfs.c
@@ -5,12 +5,9 @@
#include <linux/mount.h>
#include <linux/fs_context.h>
-#include "v3d_drv.h"
+#include <drm/drm_print.h>
-static int add_param(struct fs_context *fc, const char *key, const char *val)
-{
- return vfs_parse_fs_string(fc, key, val, strlen(val));
-}
+#include "v3d_drv.h"
void v3d_gemfs_init(struct v3d_dev *v3d)
{
@@ -38,9 +35,9 @@ void v3d_gemfs_init(struct v3d_dev *v3d)
fc = fs_context_for_mount(type, SB_KERNMOUNT);
if (IS_ERR(fc))
goto err;
- ret = add_param(fc, "source", "tmpfs");
+ ret = vfs_parse_fs_string(fc, "source", "tmpfs");
if (!ret)
- ret = add_param(fc, "huge", "within_size");
+ ret = vfs_parse_fs_string(fc, "huge", "within_size");
if (!ret)
gemfs = fc_mount_longterm(fc);
put_fs_context(fc);
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index a515a301e480..b55880fd6c50 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include <linux/sched/clock.h>
+#include <drm/drm_print.h>
+
#include "v3d_drv.h"
#include "v3d_regs.h"
#include "v3d_trace.h"
@@ -42,6 +44,8 @@ v3d_overflow_mem_work(struct work_struct *work)
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
+ struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
+ struct v3d_bin_job *bin_job;
struct drm_gem_object *obj;
unsigned long irqflags;
@@ -60,15 +64,17 @@ v3d_overflow_mem_work(struct work_struct *work)
* bin job got scheduled, that's fine. We'll just give them
* some binner pool anyway.
*/
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- if (!v3d->bin_job) {
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ bin_job = (struct v3d_bin_job *)queue->active_job;
+
+ if (!bin_job) {
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
goto out;
}
drm_gem_object_get(obj);
- list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list);
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ list_add_tail(&bo->unref_head, &bin_job->render->unref_list);
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
v3d_mmu_flush_all(v3d);
@@ -79,6 +85,20 @@ out:
drm_gem_object_put(obj);
}
+static void
+v3d_irq_signal_fence(struct v3d_dev *v3d, enum v3d_queue q,
+ void (*trace_irq)(struct drm_device *, uint64_t))
+{
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_fence *fence = to_v3d_fence(queue->active_job->irq_fence);
+
+ v3d_job_update_stats(queue->active_job, q);
+ trace_irq(&v3d->drm, fence->seqno);
+
+ queue->active_job = NULL;
+ dma_fence_signal(&fence->base);
+}
+
static irqreturn_t
v3d_irq(int irq, void *arg)
{
@@ -102,41 +122,17 @@ v3d_irq(int irq, void *arg)
}
if (intsts & V3D_INT_FLDONE) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->bin_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN);
- trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
-
- v3d->bin_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_BIN, trace_v3d_bcl_irq);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_FRDONE) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->render_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER);
- trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
-
- v3d->render_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_RENDER, trace_v3d_rcl_irq);
status = IRQ_HANDLED;
}
if (intsts & V3D_INT_CSDDONE(v3d->ver)) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->csd_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD);
- trace_v3d_csd_irq(&v3d->drm, fence->seqno);
-
- v3d->csd_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_CSD, trace_v3d_csd_irq);
status = IRQ_HANDLED;
}
@@ -168,15 +164,7 @@ v3d_hub_irq(int irq, void *arg)
V3D_WRITE(V3D_HUB_INT_CLR, intsts);
if (intsts & V3D_HUB_INT_TFUC) {
- struct v3d_fence *fence =
- to_v3d_fence(v3d->tfu_job->base.irq_fence);
-
- v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU);
- trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
-
- v3d->tfu_job = NULL;
- dma_fence_signal(&fence->base);
-
+ v3d_irq_signal_fence(v3d, V3D_TFU, trace_v3d_tfu_irq);
status = IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index cb9df8822472..0867250db7a6 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -21,6 +21,7 @@
#include <linux/sched/clock.h>
#include <linux/kthread.h>
+#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include "v3d_drv.h"
@@ -139,7 +140,7 @@ static void
v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
{
struct v3d_dev *v3d = job->v3d;
- struct v3d_file_priv *file = job->file->driver_priv;
+ struct v3d_file_priv *file = job->file_priv;
struct v3d_stats *global_stats = &v3d->queue[queue].stats;
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
@@ -194,11 +195,11 @@ v3d_stats_update(struct v3d_stats *stats, u64 now)
}
void
-v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
+v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q)
{
struct v3d_dev *v3d = job->v3d;
- struct v3d_file_priv *file = job->file->driver_priv;
- struct v3d_stats *global_stats = &v3d->queue[queue].stats;
+ struct v3d_queue_state *queue = &v3d->queue[q];
+ struct v3d_stats *global_stats = &queue->stats;
u64 now = local_clock();
unsigned long flags;
@@ -209,10 +210,10 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
preempt_disable();
/* Don't update the local stats if the file context has already closed */
- if (file)
- v3d_stats_update(&file->stats[queue], now);
- else
- drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n");
+ spin_lock(&queue->queue_lock);
+ if (job->file_priv)
+ v3d_stats_update(&job->file_priv->stats[q], now);
+ spin_unlock(&queue->queue_lock);
v3d_stats_update(global_stats, now);
@@ -226,27 +227,28 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
struct v3d_dev *v3d = job->base.v3d;
+ struct v3d_queue_state *queue = &v3d->queue[V3D_BIN];
struct drm_device *dev = &v3d->drm;
struct dma_fence *fence;
unsigned long irqflags;
if (unlikely(job->base.base.s_fence->finished.error)) {
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- v3d->bin_job = NULL;
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ queue->active_job = NULL;
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
return NULL;
}
/* Lock required around bin_job update vs
* v3d_overflow_mem_work().
*/
- spin_lock_irqsave(&v3d->job_lock, irqflags);
- v3d->bin_job = job;
+ spin_lock_irqsave(&queue->queue_lock, irqflags);
+ queue->active_job = &job->base;
/* Clear out the overflow allocation, so we don't
* reuse the overflow attached to a previous job.
*/
V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0);
- spin_unlock_irqrestore(&v3d->job_lock, irqflags);
+ spin_unlock_irqrestore(&queue->queue_lock, irqflags);
v3d_invalidate_caches(v3d);
@@ -290,11 +292,11 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->render_job = NULL;
+ v3d->queue[V3D_RENDER].active_job = NULL;
return NULL;
}
- v3d->render_job = job;
+ v3d->queue[V3D_RENDER].active_job = &job->base;
/* Can we avoid this flush? We need to be careful of
* scheduling, though -- imagine job0 rendering to texture and
@@ -338,11 +340,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->tfu_job = NULL;
+ v3d->queue[V3D_TFU].active_job = NULL;
return NULL;
}
- v3d->tfu_job = job;
+ v3d->queue[V3D_TFU].active_job = &job->base;
fence = v3d_fence_create(v3d, V3D_TFU);
if (IS_ERR(fence))
@@ -386,11 +388,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job)
int i, csd_cfg0_reg;
if (unlikely(job->base.base.s_fence->finished.error)) {
- v3d->csd_job = NULL;
+ v3d->queue[V3D_CSD].active_job = NULL;
return NULL;
}
- v3d->csd_job = job;
+ v3d->queue[V3D_CSD].active_job = &job->base;
v3d_invalidate_caches(v3d);
@@ -574,7 +576,7 @@ static void
v3d_reset_performance_queries(struct v3d_cpu_job *job)
{
struct v3d_performance_query_info *performance_query = &job->performance_query;
- struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_file_priv *v3d_priv = job->base.file_priv;
struct v3d_dev *v3d = job->base.v3d;
struct v3d_perfmon *perfmon;
@@ -604,7 +606,7 @@ v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data,
{
struct v3d_performance_query_info *performance_query =
&job->performance_query;
- struct v3d_file_priv *v3d_priv = job->base.file->driver_priv;
+ struct v3d_file_priv *v3d_priv = job->base.file_priv;
struct v3d_performance_query *perf_query =
&performance_query->queries[query];
struct v3d_dev *v3d = job->base.v3d;
@@ -700,6 +702,7 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job)
trace_v3d_cpu_job_end(&v3d->drm, job->job_type);
v3d_job_update_stats(&job->base, V3D_CPU);
+ /* Synchronous operation, so no fence to wait on. */
return NULL;
}
@@ -715,19 +718,24 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
v3d_job_update_stats(job, V3D_CACHE_CLEAN);
+ /* Synchronous operation, so no fence to wait on. */
return NULL;
}
static enum drm_gpu_sched_stat
-v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
+v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job,
+ enum v3d_queue q)
{
- enum v3d_queue q;
+ struct v3d_job *job = to_v3d_job(sched_job);
+ struct v3d_file_priv *v3d_priv = job->file_priv;
+ unsigned long irqflags;
+ enum v3d_queue i;
mutex_lock(&v3d->reset_lock);
/* block scheduler */
- for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_stop(&v3d->queue[q].sched, sched_job);
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_stop(&v3d->queue[i].sched, sched_job);
if (sched_job)
drm_sched_increase_karma(sched_job);
@@ -735,13 +743,18 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
/* get the GPU back into the init state */
v3d_reset(v3d);
- for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_resubmit_jobs(&v3d->queue[q].sched);
+ v3d->reset_counter++;
+ spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags);
+ if (v3d_priv)
+ v3d_priv->reset_counter++;
+ spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags);
+
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_resubmit_jobs(&v3d->queue[i].sched);
/* Unblock schedulers and restart their jobs. */
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_start(&v3d->queue[q].sched, 0);
- }
+ for (i = 0; i < V3D_MAX_QUEUES; i++)
+ drm_sched_start(&v3d->queue[i].sched, 0);
mutex_unlock(&v3d->reset_lock);
@@ -769,7 +782,7 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job, q);
}
static enum drm_gpu_sched_stat
@@ -791,11 +804,11 @@ v3d_render_job_timedout(struct drm_sched_job *sched_job)
}
static enum drm_gpu_sched_stat
-v3d_generic_job_timedout(struct drm_sched_job *sched_job)
+v3d_tfu_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(job->v3d, sched_job, V3D_TFU);
}
static enum drm_gpu_sched_stat
@@ -814,7 +827,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NO_HANG;
}
- return v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job, V3D_CSD);
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
@@ -831,7 +844,7 @@ static const struct drm_sched_backend_ops v3d_render_sched_ops = {
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
.run_job = v3d_tfu_job_run,
- .timedout_job = v3d_generic_job_timedout,
+ .timedout_job = v3d_tfu_job_timedout,
.free_job = v3d_sched_job_free,
};
@@ -843,13 +856,11 @@ static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
.run_job = v3d_cache_clean_job_run,
- .timedout_job = v3d_generic_job_timedout,
.free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cpu_sched_ops = {
.run_job = v3d_cpu_job_run,
- .timedout_job = v3d_generic_job_timedout,
.free_job = v3d_cpu_job_free
};
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 5171ffe9012d..7de5a95ee7ca 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -4,6 +4,7 @@
* Copyright (C) 2023 Raspberry Pi
*/
+#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include "v3d_drv.h"
@@ -166,7 +167,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->v3d = v3d;
job->free = free;
- job->file = file_priv;
+ job->file_priv = v3d_priv;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
1, v3d_priv, file_priv->client_id);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 903a6c48ee8b..37c66668df57 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "vbox_drv.h"
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 7f686a0190e6..aa6664542b20 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -12,6 +12,7 @@
#include <linux/vbox_err.h>
#include <drm/drm_damage_helper.h>
+#include <drm/drm_print.h>
#include "vbox_drv.h"
#include "vboxvideo_guest.h"
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 9ff3bade9795..d363c3f0afdf 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -22,6 +22,7 @@
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "hgsmi_channels.h"
@@ -262,8 +263,8 @@ static int vbox_primary_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state = NULL;
if (new_state->crtc) {
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
}
@@ -344,8 +345,8 @@ static int vbox_cursor_atomic_check(struct drm_plane *plane,
int ret;
if (new_state->crtc) {
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- new_state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index dc24c2172fd4..19bf8d023dc8 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -8,6 +8,7 @@
*/
#include <linux/pci.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "vbox_drv.h"
int vbox_mm_init(struct vbox_private *vbox)
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 123ab0ce1781..bb8c40be3250 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -35,6 +35,7 @@ config DRM_VC4_HDMI_CEC
bool "Broadcom VC4 HDMI CEC Support"
depends on DRM_VC4
select CEC_CORE
+ select DRM_DISPLAY_HDMI_CEC_HELPER
help
Choose this option if you have a Broadcom VC4 GPU
and want to use CEC.
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 4aaa587be3a5..46b4474ac41d 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -19,6 +19,7 @@
#include <linux/dma-buf.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index fac624a663ea..e765904e13f3 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include <linux/seq_file.h>
#include <linux/circ_buf.h>
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 960550c166d9..2afc88394d64 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -17,6 +17,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index c7cb1e3a6434..3846996f9028 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -36,6 +36,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 458e5d987964..deeeaebc702f 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -36,6 +36,7 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 255e5817618e..ab16164b5eda 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -30,6 +30,7 @@
#include <linux/dma-fence-array.h>
#include <drm/drm_exec.h>
+#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
#include "uapi/drm/vc4_drm.h"
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 07c91b450f93..1798d1156d10 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -32,12 +32,14 @@
*/
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
#include <drm/display/drm_scdc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
@@ -375,14 +377,6 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
drm_atomic_helper_connector_hdmi_hotplug(connector, status);
- if (status == connector_status_disconnected) {
- cec_phys_addr_invalidate(vc4_hdmi->cec_adap);
- return;
- }
-
- cec_s_phys_addr(vc4_hdmi->cec_adap,
- connector->display_info.source_physical_address, false);
-
if (status != connector_status_connected)
return;
@@ -2384,8 +2378,8 @@ static irqreturn_t vc4_cec_irq_handler_rx_thread(int irq, void *priv)
struct vc4_hdmi *vc4_hdmi = priv;
if (vc4_hdmi->cec_rx_msg.len)
- cec_received_msg(vc4_hdmi->cec_adap,
- &vc4_hdmi->cec_rx_msg);
+ drm_connector_hdmi_cec_received_msg(&vc4_hdmi->connector,
+ &vc4_hdmi->cec_rx_msg);
return IRQ_HANDLED;
}
@@ -2395,15 +2389,17 @@ static irqreturn_t vc4_cec_irq_handler_tx_thread(int irq, void *priv)
struct vc4_hdmi *vc4_hdmi = priv;
if (vc4_hdmi->cec_tx_ok) {
- cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_OK,
- 0, 0, 0, 0);
+ drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector,
+ CEC_TX_STATUS_OK,
+ 0, 0, 0, 0);
} else {
/*
* This CEC implementation makes 1 retry, so if we
* get a NACK, then that means it made 2 attempts.
*/
- cec_transmit_done(vc4_hdmi->cec_adap, CEC_TX_STATUS_NACK,
- 0, 2, 0, 0);
+ drm_connector_hdmi_cec_transmit_done(&vc4_hdmi->connector,
+ CEC_TX_STATUS_NACK,
+ 0, 2, 0, 0);
}
return IRQ_HANDLED;
}
@@ -2560,9 +2556,9 @@ static irqreturn_t vc4_cec_irq_handler(int irq, void *priv)
return ret;
}
-static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
+static int vc4_hdmi_cec_enable(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
/* clock period in microseconds */
const u32 usecs = 1000000 / CEC_CLOCK_FREQ;
@@ -2627,9 +2623,9 @@ static int vc4_hdmi_cec_enable(struct cec_adapter *adap)
return 0;
}
-static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
+static int vc4_hdmi_cec_disable(struct drm_connector *connector)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2663,17 +2659,17 @@ static int vc4_hdmi_cec_disable(struct cec_adapter *adap)
return 0;
}
-static int vc4_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+static int vc4_hdmi_cec_adap_enable(struct drm_connector *connector, bool enable)
{
if (enable)
- return vc4_hdmi_cec_enable(adap);
+ return vc4_hdmi_cec_enable(connector);
else
- return vc4_hdmi_cec_disable(adap);
+ return vc4_hdmi_cec_disable(connector);
}
-static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
+static int vc4_hdmi_cec_adap_log_addr(struct drm_connector *connector, u8 log_addr)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *drm = vc4_hdmi->connector.dev;
unsigned long flags;
int idx;
@@ -2699,10 +2695,10 @@ static int vc4_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
return 0;
}
-static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+static int vc4_hdmi_cec_adap_transmit(struct drm_connector *connector, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
- struct vc4_hdmi *vc4_hdmi = cec_get_drvdata(adap);
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct drm_device *dev = vc4_hdmi->connector.dev;
unsigned long flags;
u32 val;
@@ -2745,84 +2741,65 @@ static int vc4_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
return 0;
}
-static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = {
- .adap_enable = vc4_hdmi_cec_adap_enable,
- .adap_log_addr = vc4_hdmi_cec_adap_log_addr,
- .adap_transmit = vc4_hdmi_cec_adap_transmit,
-};
-
-static void vc4_hdmi_cec_release(void *ptr)
-{
- struct vc4_hdmi *vc4_hdmi = ptr;
-
- cec_unregister_adapter(vc4_hdmi->cec_adap);
- vc4_hdmi->cec_adap = NULL;
-}
-
-static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_cec_init(struct drm_connector *connector)
{
- struct cec_connector_info conn_info;
+ struct vc4_hdmi *vc4_hdmi = connector_to_vc4_hdmi(connector);
struct platform_device *pdev = vc4_hdmi->pdev;
struct device *dev = &pdev->dev;
int ret;
- if (!of_property_present(dev->of_node, "interrupts")) {
- dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
- return 0;
- }
-
- vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
- vc4_hdmi,
- vc4_hdmi->variant->card_name,
- CEC_CAP_DEFAULTS |
- CEC_CAP_CONNECTOR_INFO, 1);
- ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
- if (ret < 0)
- return ret;
-
- cec_fill_conn_info_from_drm(&conn_info, &vc4_hdmi->connector);
- cec_s_conn_info(vc4_hdmi->cec_adap, &conn_info);
-
if (vc4_hdmi->variant->external_irq_controller) {
ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-rx"),
vc4_cec_irq_handler_rx_bare,
vc4_cec_irq_handler_rx_thread, 0,
"vc4 hdmi cec rx", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
ret = devm_request_threaded_irq(dev, platform_get_irq_byname(pdev, "cec-tx"),
vc4_cec_irq_handler_tx_bare,
vc4_cec_irq_handler_tx_thread, 0,
"vc4 hdmi cec tx", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
} else {
ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0),
vc4_cec_irq_handler,
vc4_cec_irq_handler_thread, 0,
"vc4 hdmi cec", vc4_hdmi);
if (ret)
- goto err_delete_cec_adap;
+ return ret;
}
- ret = cec_register_adapter(vc4_hdmi->cec_adap, &pdev->dev);
- if (ret < 0)
- goto err_delete_cec_adap;
+ return 0;
+}
+
+static const struct drm_connector_hdmi_cec_funcs vc4_hdmi_cec_funcs = {
+ .init = vc4_hdmi_cec_init,
+ .enable = vc4_hdmi_cec_adap_enable,
+ .log_addr = vc4_hdmi_cec_adap_log_addr,
+ .transmit = vc4_hdmi_cec_adap_transmit,
+};
+
+static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi)
+{
+ struct platform_device *pdev = vc4_hdmi->pdev;
+ struct device *dev = &pdev->dev;
+
+ if (!of_property_present(dev->of_node, "interrupts")) {
+ dev_warn(dev, "'interrupts' DT property is missing, no CEC\n");
+ return 0;
+ }
/*
- * NOTE: Strictly speaking, we should probably use a DRM-managed
- * registration there to avoid removing the CEC adapter by the
- * time the DRM driver doesn't have any user anymore.
+ * NOTE: the CEC adapter will be unregistered by drmm cleanup from
+ * drm_managed_release(), which is called from drm_dev_release()
+ * during device unbind.
*
* However, the CEC framework already cleans up the CEC adapter
* only when the last user has closed its file descriptor, so we
* don't need to handle it in DRM.
*
- * By the time the device-managed hook is executed, we will give
- * up our reference to the CEC adapter and therefore don't
- * really care when it's actually freed.
- *
* There's still a problematic sequence: if we unregister our
* CEC adapter, but the userspace keeps a handle on the CEC
* adapter but not the DRM device for some reason. In such a
@@ -2833,19 +2810,14 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
* the CEC framework already handles this too, by calling
* cec_is_registered() in cec_ioctl() and cec_poll().
*/
- ret = devm_add_action_or_reset(dev, vc4_hdmi_cec_release, vc4_hdmi);
- if (ret)
- return ret;
-
- return 0;
-
-err_delete_cec_adap:
- cec_delete_adapter(vc4_hdmi->cec_adap);
-
- return ret;
+ return drmm_connector_hdmi_cec_register(&vc4_hdmi->connector,
+ &vc4_hdmi_cec_funcs,
+ vc4_hdmi->variant->card_name,
+ 1,
+ &pdev->dev);
}
#else
-static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
+static int vc4_hdmi_cec_register(struct vc4_hdmi *vc4_hdmi)
{
return 0;
}
@@ -3250,7 +3222,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_put_runtime_pm;
- ret = vc4_hdmi_cec_init(vc4_hdmi);
+ ret = vc4_hdmi_cec_register(vc4_hdmi);
if (ret)
goto err_put_runtime_pm;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index a31157c99bee..8d069718df00 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -147,7 +147,6 @@ struct vc4_hdmi {
*/
bool disable_wifi_frequencies;
- struct cec_adapter *cec_adap;
struct cec_msg cec_rx_msg;
bool cec_tx_ok;
bool cec_irq_was_rx;
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 4811d794001f..ee8d0738501b 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 69b399f3b802..63e88f90eef7 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -48,6 +48,7 @@
#include <linux/platform_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 8f983edb81ff..e563c1210937 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index f1342f917cf7..1ac80c0b258f 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -9,6 +9,8 @@
* The V3D block provides 16 hardware counters which can count various events.
*/
+#include <drm/drm_print.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 056d344c5411..f00d4076ba07 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -24,6 +24,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_print.h>
#include "uapi/drm/vc4_drm.h"
@@ -497,8 +498,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
u32 v_subsample = fb->format->vsub;
int ret;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
if (!crtc_state) {
DRM_DEBUG_KMS("Invalid crtc state\n");
return -EINVAL;
@@ -875,8 +875,7 @@ static void vc4_plane_calc_load(struct drm_plane_state *state)
unsigned int vscale_factor;
vc4_state = to_vc4_plane_state(state);
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
/* The HVS is able to process 2 pixels/cycle when scaling the source,
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 14079853338e..edc471e71c0e 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -35,6 +35,8 @@
* actually fairly low.
*/
+#include <drm/drm_print.h>
+
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_packet.h"
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 42acac05fe47..9082902100e4 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -21,6 +21,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index bb09df5000bd..3ffe09bc89d2 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -10,6 +10,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_print.h>
+
#include "vc4_drv.h"
#include "vc4_regs.h"
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 1e7bdda55698..545c4c3608f5 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -43,6 +43,8 @@
* to use) happens.
*/
+#include <drm/drm_print.h>
+
#include "uapi/drm/vc4_drm.h"
#include "vc4_drv.h"
#include "vc4_packet.h"
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 2d74e786914c..b50b6cdac3f4 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -41,6 +41,8 @@
* this validation is only performed at BO creation time.
*/
+#include <drm/drm_print.h>
+
#include "vc4_drv.h"
#include "vc4_qpu_defines.h"
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 06d702e879b0..b84fad2a5b23 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -17,6 +17,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index fd76730fd38c..07db319c3d7f 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -79,7 +79,7 @@ static struct dma_fence *vgem_fence_create(struct vgem_file *vfile,
dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
dma_fence_context_alloc(1), 1);
- timer_setup(&fence->timer, vgem_fence_timeout, 0);
+ timer_setup(&fence->timer, vgem_fence_timeout, TIMER_IRQSAFE);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 853dd9aa397e..3a68a16b58ae 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -27,6 +27,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index e5805ca646c7..6a962c1d6e95 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,8 +30,11 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "virtgpu_drv.h"
@@ -55,6 +58,7 @@ static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
};
static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = {
@@ -99,6 +103,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ drm_crtc_vblank_on(crtc);
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -108,6 +113,8 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+ drm_crtc_vblank_off(crtc);
+
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
virtio_gpu_notify(vgdev);
}
@@ -121,9 +128,10 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
- crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+ struct drm_pending_vblank_event *event;
/*
* virtio-gpu can't do modeset and plane update operations
@@ -131,9 +139,22 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
* in the plane update callback, and here we just check
* whenever we must force the modeset.
*/
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
output->needs_modeset = true;
+
+ spin_lock_irq(&dev->event_lock);
+
+ event = crtc_state->event;
+ crtc_state->event = NULL;
+
+ if (event) {
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
}
+
+ spin_unlock_irq(&dev->event_lock);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -258,6 +279,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
struct drm_encoder *encoder = &output->enc;
struct drm_crtc *crtc = &output->crtc;
struct drm_plane *primary, *cursor;
+ int ret;
output->index = index;
if (index == 0) {
@@ -272,8 +294,10 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
if (IS_ERR(cursor))
return PTR_ERR(cursor);
- drm_crtc_init_with_planes(dev, crtc, primary, cursor,
- &virtio_gpu_crtc_funcs, NULL);
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &virtio_gpu_crtc_funcs, NULL);
+ if (ret)
+ return ret;
drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
@@ -357,6 +381,10 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
vgdev_output_init(vgdev, i);
+ ret = drm_vblank_init(vgdev->ddev, vgdev->num_scanouts);
+ if (ret)
+ return ret;
+
drm_mode_config_reset(vgdev->ddev);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 71c6ccad4b99..a5ce96fb8a1d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -39,6 +39,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 7dfb2006c561..f3594695bb82 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -29,6 +29,7 @@
#include <drm/drm_file.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
@@ -162,18 +163,18 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL))
vgdev->has_virgl_3d = true;
#endif
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID))
vgdev->has_edid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC))
vgdev->has_indirect = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID))
vgdev->has_resource_assign_uuid = true;
- }
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB))
vgdev->has_resource_blob = true;
- }
+
if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
if (!devm_request_mem_region(&vgdev->vdev->dev,
@@ -193,9 +194,9 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
- if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
+
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT))
vgdev->has_context_init = true;
- }
DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 5517cff8715c..4270bfede7b9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -26,6 +26,8 @@
#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
+#include <drm/drm_print.h>
+
#include "virtgpu_drv.h"
static int virtio_gpu_virglrenderer_workaround = 1;
@@ -47,6 +49,7 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
+
if (handle < 0)
return handle;
*resid = handle + 1;
@@ -56,9 +59,8 @@ int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
{
- if (!virtio_gpu_virglrenderer_workaround) {
+ if (!virtio_gpu_virglrenderer_workaround)
ida_free(&vgdev->resource_ida, id - 1);
- }
}
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 698ea7adb951..a7863f8ee4ee 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -30,6 +30,7 @@
#include <linux/virtio_dma_buf.h>
#include <drm/drm_managed.h>
#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
@@ -120,7 +121,7 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_state))
- return PTR_ERR(crtc_state);
+ return PTR_ERR(crtc_state);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_NO_SCALING,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 55a15e247dd1..0c194b4e9488 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -32,6 +32,7 @@
#include <linux/virtio_ring.h>
#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
#include "virtgpu_drv.h"
#include "virtgpu_trace.h"
@@ -248,6 +249,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
+
cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
le32_to_cpu(resp->type),
@@ -468,6 +470,7 @@ static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
int sg_ents;
+
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
&sg_ents);
if (!sgt) {
diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig
index 3c02f928ffe6..3977bbb99f7d 100644
--- a/drivers/gpu/drm/vkms/Kconfig
+++ b/drivers/gpu/drm/vkms/Kconfig
@@ -7,6 +7,7 @@ config DRM_VKMS
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select CRC32
+ select CONFIGFS_FS
default n
help
Virtual Kernel Mode-Setting (VKMS) is used for testing or for
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index d657865e573f..9bb264091c38 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -8,7 +8,10 @@ vkms-y := \
vkms_composer.o \
vkms_writeback.o \
vkms_connector.o \
- vkms_config.o
+ vkms_config.o \
+ vkms_configfs.o \
+ vkms_colorop.o \
+ vkms_luts.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
index 5750f0bd9d40..d4d9ba8d4c54 100644
--- a/drivers/gpu/drm/vkms/tests/Makefile
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -2,6 +2,7 @@
vkms-kunit-tests-y := \
vkms_config_test.o \
- vkms_format_test.o
+ vkms_format_test.o \
+ vkms_color_test.o
obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms-kunit-tests.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_color_test.c b/drivers/gpu/drm/vkms/tests/vkms_color_test.c
new file mode 100644
index 000000000000..1a1c7cac2f15
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_color_test.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fixed.h>
+#include <drm/drm_mode.h>
+#include "../vkms_composer.h"
+#include "../vkms_drv.h"
+#include "../vkms_luts.h"
+
+#define TEST_LUT_SIZE 16
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+static struct drm_color_lut test_linear_array[TEST_LUT_SIZE] = {
+ { 0x0, 0x0, 0x0, 0 },
+ { 0x1111, 0x1111, 0x1111, 0 },
+ { 0x2222, 0x2222, 0x2222, 0 },
+ { 0x3333, 0x3333, 0x3333, 0 },
+ { 0x4444, 0x4444, 0x4444, 0 },
+ { 0x5555, 0x5555, 0x5555, 0 },
+ { 0x6666, 0x6666, 0x6666, 0 },
+ { 0x7777, 0x7777, 0x7777, 0 },
+ { 0x8888, 0x8888, 0x8888, 0 },
+ { 0x9999, 0x9999, 0x9999, 0 },
+ { 0xaaaa, 0xaaaa, 0xaaaa, 0 },
+ { 0xbbbb, 0xbbbb, 0xbbbb, 0 },
+ { 0xcccc, 0xcccc, 0xcccc, 0 },
+ { 0xdddd, 0xdddd, 0xdddd, 0 },
+ { 0xeeee, 0xeeee, 0xeeee, 0 },
+ { 0xffff, 0xffff, 0xffff, 0 },
+};
+
+/* lerp test parameters */
+struct vkms_color_test_lerp_params {
+ s64 t;
+ __u16 a;
+ __u16 b;
+ __u16 expected;
+};
+
+/* lerp test cases */
+static const struct vkms_color_test_lerp_params color_test_lerp_cases[] = {
+ /* Half-way round down */
+ { 0x80000000 - 1, 0x0, 0x10, 0x8 },
+ { 0x80000000 - 1, 0x1, 0x10, 0x8 }, /* Odd a */
+ { 0x80000000 - 1, 0x1, 0xf, 0x8 }, /* Odd b */
+ { 0x80000000 - 1, 0x10, 0x10, 0x10 }, /* b = a */
+ { 0x80000000 - 1, 0x10, 0x11, 0x10 }, /* b = a + 1*/
+ /* Half-way round up */
+ { 0x80000000, 0x0, 0x10, 0x8 },
+ { 0x80000000, 0x1, 0x10, 0x9 }, /* Odd a */
+ { 0x80000000, 0x1, 0xf, 0x8 }, /* Odd b */
+ { 0x80000000, 0x10, 0x10, 0x10 }, /* b = a */
+ { 0x80000000, 0x10, 0x11, 0x11 }, /* b = a + 1*/
+ /* t = 0.0 */
+ { 0x0, 0x0, 0x10, 0x0 },
+ { 0x0, 0x1, 0x10, 0x1 }, /* Odd a */
+ { 0x0, 0x1, 0xf, 0x1 }, /* Odd b */
+ { 0x0, 0x10, 0x10, 0x10 }, /* b = a */
+ { 0x0, 0x10, 0x11, 0x10 }, /* b = a + 1*/
+ /* t = 1.0 */
+ { 0x100000000, 0x0, 0x10, 0x10 },
+ { 0x100000000, 0x1, 0x10, 0x10 }, /* Odd a */
+ { 0x100000000, 0x1, 0xf, 0xf }, /* Odd b */
+ { 0x100000000, 0x10, 0x10, 0x10 }, /* b = a */
+ { 0x100000000, 0x10, 0x11, 0x11 }, /* b = a + 1*/
+ /* t = 0.0 + 1 */
+ { 0x0 + 1, 0x0, 0x10, 0x0 },
+ { 0x0 + 1, 0x1, 0x10, 0x1 }, /* Odd a */
+ { 0x0 + 1, 0x1, 0xf, 0x1 }, /* Odd b */
+ { 0x0 + 1, 0x10, 0x10, 0x10 }, /* b = a */
+ { 0x0 + 1, 0x10, 0x11, 0x10 }, /* b = a + 1*/
+ /* t = 1.0 - 1 */
+ { 0x100000000 - 1, 0x0, 0x10, 0x10 },
+ { 0x100000000 - 1, 0x1, 0x10, 0x10 }, /* Odd a */
+ { 0x100000000 - 1, 0x1, 0xf, 0xf }, /* Odd b */
+ { 0x100000000 - 1, 0x10, 0x10, 0x10 }, /* b = a */
+ { 0x100000000 - 1, 0x10, 0x11, 0x11 }, /* b = a + 1*/
+ /* t chosen to verify the flipping point of result a (or b) to a+1 (or b-1) */
+ { 0x80000000 - 1, 0x0, 0x1, 0x0 },
+ { 0x80000000, 0x0, 0x1, 0x1 },
+};
+
+static const struct vkms_color_lut test_linear_lut = {
+ .base = test_linear_array,
+ .lut_length = TEST_LUT_SIZE,
+ .channel_value2index_ratio = 0xf000fll
+};
+
+static void vkms_color_test_get_lut_index(struct kunit *test)
+{
+ s64 lut_index;
+ int i;
+
+ lut_index = get_lut_index(&test_linear_lut, test_linear_array[0].red);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int(lut_index), 0);
+
+ for (i = 0; i < TEST_LUT_SIZE; i++) {
+ lut_index = get_lut_index(&test_linear_lut, test_linear_array[i].red);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(lut_index), i);
+ }
+
+ KUNIT_EXPECT_EQ(test, drm_fixp2int(get_lut_index(&srgb_eotf, 0x0)), 0x0);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x0)), 0x0);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x101)), 0x1);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0x202)), 0x2);
+
+ KUNIT_EXPECT_EQ(test, drm_fixp2int(get_lut_index(&srgb_inv_eotf, 0x0)), 0x0);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x0)), 0x0);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x101)), 0x1);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_inv_eotf, 0x202)), 0x2);
+
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0xfefe)), 0xfe);
+ KUNIT_EXPECT_EQ(test, drm_fixp2int_ceil(get_lut_index(&srgb_eotf, 0xffff)), 0xff);
+}
+
+static void vkms_color_test_lerp(struct kunit *test)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(color_test_lerp_cases); i++) {
+ const struct vkms_color_test_lerp_params *params = &color_test_lerp_cases[i];
+
+ KUNIT_EXPECT_EQ(test, lerp_u16(params->a, params->b, params->t), params->expected);
+ }
+}
+
+static void vkms_color_test_linear(struct kunit *test)
+{
+ for (int i = 0; i < LUT_SIZE; i++) {
+ int linear = apply_lut_to_channel_value(&linear_eotf, i * 0x101, LUT_RED);
+
+ KUNIT_EXPECT_EQ(test, DIV_ROUND_CLOSEST(linear, 0x101), i);
+ }
+}
+
+static void vkms_color_srgb_inv_srgb(struct kunit *test)
+{
+ u16 srgb, final;
+
+ for (int i = 0; i < LUT_SIZE; i++) {
+ srgb = apply_lut_to_channel_value(&srgb_eotf, i * 0x101, LUT_RED);
+ final = apply_lut_to_channel_value(&srgb_inv_eotf, srgb, LUT_RED);
+
+ KUNIT_EXPECT_GE(test, final / 0x101, i - 1);
+ KUNIT_EXPECT_LE(test, final / 0x101, i + 1);
+ }
+}
+
+#define FIXPT_HALF (DRM_FIXED_ONE >> 1)
+#define FIXPT_QUARTER (DRM_FIXED_ONE >> 2)
+
+static const struct drm_color_ctm_3x4 test_matrix_3x4_50_desat = { {
+ FIXPT_HALF, FIXPT_QUARTER, FIXPT_QUARTER, 0,
+ FIXPT_QUARTER, FIXPT_HALF, FIXPT_QUARTER, 0,
+ FIXPT_QUARTER, FIXPT_QUARTER, FIXPT_HALF, 0
+} };
+
+static void vkms_color_ctm_3x4_50_desat(struct kunit *test)
+{
+ struct pixel_argb_s32 ref, out;
+
+ /* full white */
+ ref.a = 0xffff;
+ ref.r = 0xffff;
+ ref.g = 0xffff;
+ ref.b = 0xffff;
+
+ memcpy(&out, &ref, sizeof(out));
+ apply_3x4_matrix(&out, &test_matrix_3x4_50_desat);
+
+ KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out));
+
+ /* full black */
+ ref.a = 0xffff;
+ ref.r = 0x0;
+ ref.g = 0x0;
+ ref.b = 0x0;
+
+ memcpy(&out, &ref, sizeof(out));
+ apply_3x4_matrix(&out, &test_matrix_3x4_50_desat);
+
+ KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out));
+
+ /* 50% grey */
+ ref.a = 0xffff;
+ ref.r = 0x8000;
+ ref.g = 0x8000;
+ ref.b = 0x8000;
+
+ memcpy(&out, &ref, sizeof(out));
+ apply_3x4_matrix(&out, &test_matrix_3x4_50_desat);
+
+ KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out));
+
+ /* full red to 50% desat */
+ ref.a = 0xffff;
+ ref.r = 0x8000;
+ ref.g = 0x4000;
+ ref.b = 0x4000;
+
+ out.a = 0xffff;
+ out.r = 0xffff;
+ out.g = 0x0;
+ out.b = 0x0;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_50_desat);
+
+ KUNIT_EXPECT_MEMEQ(test, &ref, &out, sizeof(out));
+}
+
+/*
+ * BT.709 encoding matrix
+ *
+ * Values printed from within IGT when converting
+ * igt_matrix_3x4_bt709_enc to the fixed-point format expected
+ * by DRM/KMS.
+ */
+static const struct drm_color_ctm_3x4 test_matrix_3x4_bt709_enc = { {
+ 0x00000000366cf400ull, 0x00000000b7175900ull, 0x0000000127bb300ull, 0,
+ 0x800000001993b3a0ull, 0x800000005609fe80ull, 0x000000006f9db200ull, 0,
+ 0x000000009d70a400ull, 0x800000008f011100ull, 0x800000000e6f9330ull, 0
+} };
+
+static void vkms_color_ctm_3x4_bt709(struct kunit *test)
+{
+ struct pixel_argb_s32 out;
+
+ /* full white to bt709 */
+ out.a = 0xffff;
+ out.r = 0xffff;
+ out.g = 0xffff;
+ out.b = 0xffff;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 255 */
+ KUNIT_EXPECT_GT(test, out.r, 0xfe00);
+ KUNIT_EXPECT_LT(test, out.r, 0x10000);
+
+ /* U 0 */
+ KUNIT_EXPECT_LT(test, out.g, 0x0100);
+
+ /* V 0 */
+ KUNIT_EXPECT_LT(test, out.b, 0x0100);
+
+ /* full black to bt709 */
+ out.a = 0xffff;
+ out.r = 0x0;
+ out.g = 0x0;
+ out.b = 0x0;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 0 */
+ KUNIT_EXPECT_LT(test, out.r, 0x100);
+
+ /* U 0 */
+ KUNIT_EXPECT_LT(test, out.g, 0x0100);
+
+ /* V 0 */
+ KUNIT_EXPECT_LT(test, out.b, 0x0100);
+
+ /* gray to bt709 */
+ out.a = 0xffff;
+ out.r = 0x7fff;
+ out.g = 0x7fff;
+ out.b = 0x7fff;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 127 */
+ KUNIT_EXPECT_GT(test, out.r, 0x7e00);
+ KUNIT_EXPECT_LT(test, out.r, 0x8000);
+
+ /* U 0 */
+ KUNIT_EXPECT_LT(test, out.g, 0x0100);
+
+ /* V 0 */
+ KUNIT_EXPECT_LT(test, out.b, 0x0100);
+
+ /* == red 255 - bt709 enc == */
+ out.a = 0xffff;
+ out.r = 0xffff;
+ out.g = 0x0;
+ out.b = 0x0;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 54 */
+ KUNIT_EXPECT_GT(test, out.r, 0x3500);
+ KUNIT_EXPECT_LT(test, out.r, 0x3700);
+
+ /* U 0 */
+ KUNIT_EXPECT_LT(test, out.g, 0x0100);
+
+ /* V 157 */
+ KUNIT_EXPECT_GT(test, out.b, 0x9C00);
+ KUNIT_EXPECT_LT(test, out.b, 0x9E00);
+
+ /* == green 255 - bt709 enc == */
+ out.a = 0xffff;
+ out.r = 0x0;
+ out.g = 0xffff;
+ out.b = 0x0;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 182 */
+ KUNIT_EXPECT_GT(test, out.r, 0xB500);
+ KUNIT_EXPECT_LT(test, out.r, 0xB780); /* laxed by half*/
+
+ /* U 0 */
+ KUNIT_EXPECT_LT(test, out.g, 0x0100);
+
+ /* V 0 */
+ KUNIT_EXPECT_LT(test, out.b, 0x0100);
+
+ /* == blue 255 - bt709 enc == */
+ out.a = 0xffff;
+ out.r = 0x0;
+ out.g = 0x0;
+ out.b = 0xffff;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 18 */
+ KUNIT_EXPECT_GT(test, out.r, 0x1100);
+ KUNIT_EXPECT_LT(test, out.r, 0x1300);
+
+ /* U 111 */
+ KUNIT_EXPECT_GT(test, out.g, 0x6E00);
+ KUNIT_EXPECT_LT(test, out.g, 0x7000);
+
+ /* V 0 */
+ KUNIT_EXPECT_LT(test, out.b, 0x0100);
+
+ /* == red 140 - bt709 enc == */
+ out.a = 0xffff;
+ out.r = 0x8c8c;
+ out.g = 0x0;
+ out.b = 0x0;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 30 */
+ KUNIT_EXPECT_GT(test, out.r, 0x1D00);
+ KUNIT_EXPECT_LT(test, out.r, 0x1F00);
+
+ /* U 0 */
+ KUNIT_EXPECT_LT(test, out.g, 0x100);
+
+ /* V 87 */
+ KUNIT_EXPECT_GT(test, out.b, 0x5600);
+ KUNIT_EXPECT_LT(test, out.b, 0x5800);
+
+ /* == green 140 - bt709 enc == */
+ out.a = 0xffff;
+ out.r = 0x0;
+ out.g = 0x8c8c;
+ out.b = 0x0;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 30 */
+ KUNIT_EXPECT_GT(test, out.r, 0x6400);
+ KUNIT_EXPECT_LT(test, out.r, 0x6600);
+
+ /* U 0 */
+ KUNIT_EXPECT_LT(test, out.g, 0x100);
+
+ /* V 0 */
+ KUNIT_EXPECT_LT(test, out.b, 0x100);
+
+ /* == blue 140 - bt709 enc == */
+ out.a = 0xffff;
+ out.r = 0x0;
+ out.g = 0x0;
+ out.b = 0x8c8c;
+
+ apply_3x4_matrix(&out, &test_matrix_3x4_bt709_enc);
+
+ /* Y 30 */
+ KUNIT_EXPECT_GT(test, out.r, 0x900);
+ KUNIT_EXPECT_LT(test, out.r, 0xB00);
+
+ /* U 61 */
+ KUNIT_EXPECT_GT(test, out.g, 0x3C00);
+ KUNIT_EXPECT_LT(test, out.g, 0x3E00);
+
+ /* V 0 */
+ KUNIT_EXPECT_LT(test, out.b, 0x100);
+}
+
+static struct kunit_case vkms_color_test_cases[] = {
+ KUNIT_CASE(vkms_color_test_get_lut_index),
+ KUNIT_CASE(vkms_color_test_lerp),
+ KUNIT_CASE(vkms_color_test_linear),
+ KUNIT_CASE(vkms_color_srgb_inv_srgb),
+ KUNIT_CASE(vkms_color_ctm_3x4_50_desat),
+ KUNIT_CASE(vkms_color_ctm_3x4_bt709),
+ {}
+};
+
+static struct kunit_suite vkms_color_test_suite = {
+ .name = "vkms-color",
+ .test_cases = vkms_color_test_cases,
+};
+
+kunit_test_suite(vkms_color_test_suite);
+
+MODULE_DESCRIPTION("Kunit test for VKMS LUT handling");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
index ff4566cf9925..1e4ea1863420 100644
--- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c
+++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c
@@ -83,6 +83,7 @@ struct default_config_case {
bool enable_cursor;
bool enable_writeback;
bool enable_overlay;
+ bool enable_plane_pipeline;
};
static void vkms_config_test_empty_config(struct kunit *test)
@@ -108,14 +109,22 @@ static void vkms_config_test_empty_config(struct kunit *test)
}
static struct default_config_case default_config_cases[] = {
- { false, false, false },
- { true, false, false },
- { true, true, false },
- { true, false, true },
- { false, true, false },
- { false, true, true },
- { false, false, true },
- { true, true, true },
+ { false, false, false, false },
+ { true, false, false, false },
+ { true, true, false, false },
+ { true, false, true, false },
+ { false, true, false, false },
+ { false, true, true, false },
+ { false, false, true, false },
+ { true, true, true, false },
+ { false, false, false, true },
+ { true, false, false, true },
+ { true, true, false, true },
+ { true, false, true, true },
+ { false, true, false, true },
+ { false, true, true, true },
+ { false, false, true, true },
+ { true, true, true, true },
};
KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL);
@@ -132,11 +141,15 @@ static void vkms_config_test_default_config(struct kunit *test)
config = vkms_config_default_create(params->enable_cursor,
params->enable_writeback,
- params->enable_overlay);
+ params->enable_overlay,
+ params->enable_plane_pipeline);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
/* Planes */
vkms_config_for_each_plane(config, plane_cfg) {
+ KUNIT_EXPECT_EQ(test,
+ vkms_config_plane_get_default_pipeline(plane_cfg),
+ params->enable_plane_pipeline);
switch (vkms_config_plane_get_type(plane_cfg)) {
case DRM_PLANE_TYPE_PRIMARY:
n_primaries++;
@@ -200,6 +213,7 @@ static void vkms_config_test_get_planes(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_planes, 0);
plane_cfg1 = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
vkms_config_for_each_plane(config, plane_cfg) {
n_planes++;
if (plane_cfg != plane_cfg1)
@@ -209,6 +223,7 @@ static void vkms_config_test_get_planes(struct kunit *test)
n_planes = 0;
plane_cfg2 = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
vkms_config_for_each_plane(config, plane_cfg) {
n_planes++;
if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2)
@@ -242,6 +257,7 @@ static void vkms_config_test_get_crtcs(struct kunit *test)
KUNIT_FAIL(test, "Unexpected CRTC");
crtc_cfg1 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1);
vkms_config_for_each_crtc(config, crtc_cfg) {
if (crtc_cfg != crtc_cfg1)
@@ -249,6 +265,7 @@ static void vkms_config_test_get_crtcs(struct kunit *test)
}
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2);
vkms_config_for_each_crtc(config, crtc_cfg) {
if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2)
@@ -280,6 +297,7 @@ static void vkms_config_test_get_encoders(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_encoders, 0);
encoder_cfg1 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
vkms_config_for_each_encoder(config, encoder_cfg) {
n_encoders++;
if (encoder_cfg != encoder_cfg1)
@@ -289,6 +307,7 @@ static void vkms_config_test_get_encoders(struct kunit *test)
n_encoders = 0;
encoder_cfg2 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
vkms_config_for_each_encoder(config, encoder_cfg) {
n_encoders++;
if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2)
@@ -324,6 +343,7 @@ static void vkms_config_test_get_connectors(struct kunit *test)
KUNIT_ASSERT_EQ(test, n_connectors, 0);
connector_cfg1 = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
vkms_config_for_each_connector(config, connector_cfg) {
n_connectors++;
if (connector_cfg != connector_cfg1)
@@ -333,6 +353,7 @@ static void vkms_config_test_get_connectors(struct kunit *test)
n_connectors = 0;
connector_cfg2 = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
vkms_config_for_each_connector(config, connector_cfg) {
n_connectors++;
if (connector_cfg != connector_cfg1 &&
@@ -360,7 +381,7 @@ static void vkms_config_test_invalid_plane_number(struct kunit *test)
struct vkms_config_plane *plane_cfg;
int n;
- config = vkms_config_default_create(false, false, false);
+ config = vkms_config_default_create(false, false, false, false);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
/* Invalid: No planes */
@@ -370,7 +391,7 @@ static void vkms_config_test_invalid_plane_number(struct kunit *test)
/* Invalid: Too many planes */
for (n = 0; n <= 32; n++)
- vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_plane(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -385,7 +406,7 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
struct vkms_config_encoder *encoder_cfg;
int err;
- config = vkms_config_default_create(false, false, false);
+ config = vkms_config_default_create(false, false, false, false);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
plane_cfg = get_first_plane(config);
@@ -395,6 +416,7 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: No primary plane */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -402,11 +424,13 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Multiple primary planes */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -419,11 +443,13 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Multiple cursor planes */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -437,12 +463,16 @@ static void vkms_config_test_valid_plane_type(struct kunit *test)
/* Invalid: Second CRTC without primary plane */
crtc_cfg = vkms_config_create_crtc(config);
encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg);
+
err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
/* Valid: Second CRTC with a primary plane */
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -457,7 +487,7 @@ static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test)
struct vkms_config_plane *plane_cfg;
struct vkms_config_crtc *crtc_cfg;
- config = vkms_config_default_create(false, false, false);
+ config = vkms_config_default_create(false, false, false, false);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
plane_cfg = get_first_plane(config);
@@ -476,7 +506,7 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test)
struct vkms_config_crtc *crtc_cfg;
int n;
- config = vkms_config_default_create(false, false, false);
+ config = vkms_config_default_create(false, false, false, false);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
/* Invalid: No CRTCs */
@@ -486,7 +516,7 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test)
/* Invalid: Too many CRTCs */
for (n = 0; n <= 32; n++)
- vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_crtc(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -499,7 +529,7 @@ static void vkms_config_test_invalid_encoder_number(struct kunit *test)
struct vkms_config_encoder *encoder_cfg;
int n;
- config = vkms_config_default_create(false, false, false);
+ config = vkms_config_default_create(false, false, false, false);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
/* Invalid: No encoders */
@@ -509,7 +539,7 @@ static void vkms_config_test_invalid_encoder_number(struct kunit *test)
/* Invalid: Too many encoders */
for (n = 0; n <= 32; n++)
- vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_encoder(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -524,19 +554,22 @@ static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test)
struct vkms_config_encoder *encoder_cfg;
int err;
- config = vkms_config_default_create(false, false, false);
+ config = vkms_config_default_create(false, false, false, false);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
crtc_cfg1 = get_first_crtc(config);
/* Invalid: Encoder without a possible CRTC */
encoder_cfg = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg);
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
/* Valid: Second CRTC with shared encoder */
crtc_cfg2 = vkms_config_create_crtc(config);
-
plane_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg);
+
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY);
err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2);
KUNIT_EXPECT_EQ(test, err, 0);
@@ -567,7 +600,7 @@ static void vkms_config_test_invalid_connector_number(struct kunit *test)
struct vkms_config_connector *connector_cfg;
int n;
- config = vkms_config_default_create(false, false, false);
+ config = vkms_config_default_create(false, false, false, false);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
/* Invalid: No connectors */
@@ -577,7 +610,7 @@ static void vkms_config_test_invalid_connector_number(struct kunit *test)
/* Invalid: Too many connectors */
for (n = 0; n <= 32; n++)
- connector_cfg = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_connector(config));
KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config));
@@ -590,7 +623,7 @@ static void vkms_config_test_valid_connector_possible_encoders(struct kunit *tes
struct vkms_config_encoder *encoder_cfg;
struct vkms_config_connector *connector_cfg;
- config = vkms_config_default_create(false, false, false);
+ config = vkms_config_default_create(false, false, false, false);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
encoder_cfg = get_first_encoder(config);
@@ -669,13 +702,19 @@ static void vkms_config_test_plane_attach_crtc(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
overlay_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, overlay_cfg);
vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY);
+
primary_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, primary_cfg);
vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY);
+
cursor_cfg = vkms_config_create_plane(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cursor_cfg);
vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR);
crtc_cfg = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg);
/* No primary or cursor planes */
KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg));
@@ -735,6 +774,11 @@ static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test)
crtc_cfg1 = vkms_config_create_crtc(config);
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+
/* No possible CRTCs */
vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc)
KUNIT_FAIL(test, "Unexpected possible CRTC");
@@ -799,6 +843,11 @@ static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test)
crtc_cfg1 = vkms_config_create_crtc(config);
crtc_cfg2 = vkms_config_create_crtc(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2);
+
/* No possible CRTCs */
vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc)
KUNIT_FAIL(test, "Unexpected possible CRTC");
@@ -863,6 +912,11 @@ static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
encoder_cfg1 = vkms_config_create_encoder(config);
encoder_cfg2 = vkms_config_create_encoder(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2);
+
/* No possible encoders */
vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx,
possible_encoder)
@@ -916,6 +970,29 @@ static void vkms_config_test_connector_get_possible_encoders(struct kunit *test)
vkms_config_destroy(config);
}
+static void vkms_config_test_connector_status(struct kunit *test)
+{
+ struct vkms_config *config;
+ struct vkms_config_connector *connector_cfg;
+ enum drm_connector_status status;
+
+ config = vkms_config_create("test");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config);
+
+ connector_cfg = vkms_config_create_connector(config);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg);
+
+ status = vkms_config_connector_get_status(connector_cfg);
+ KUNIT_EXPECT_EQ(test, status, connector_status_connected);
+
+ vkms_config_connector_set_status(connector_cfg,
+ connector_status_disconnected);
+ status = vkms_config_connector_get_status(connector_cfg);
+ KUNIT_EXPECT_EQ(test, status, connector_status_disconnected);
+
+ vkms_config_destroy(config);
+}
+
static struct kunit_case vkms_config_test_cases[] = {
KUNIT_CASE(vkms_config_test_empty_config),
KUNIT_CASE_PARAM(vkms_config_test_default_config,
@@ -937,6 +1014,7 @@ static struct kunit_case vkms_config_test_cases[] = {
KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs),
KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs),
KUNIT_CASE(vkms_config_test_connector_get_possible_encoders),
+ KUNIT_CASE(vkms_config_test_connector_status),
{}
};
diff --git a/drivers/gpu/drm/vkms/tests/vkms_format_test.c b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
index 2e1daef94831..a7788fbc45dc 100644
--- a/drivers/gpu/drm/vkms/tests/vkms_format_test.c
+++ b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
@@ -14,20 +14,20 @@
MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
/**
- * struct pixel_yuv_u8 - Internal representation of a pixel color.
- * @y: Luma value, stored in 8 bits, without padding, using
+ * struct pixel_yuv_u16 - Internal representation of a pixel color.
+ * @y: Luma value, stored in 16 bits, without padding, using
* machine endianness
- * @u: Blue difference chroma value, stored in 8 bits, without padding, using
+ * @u: Blue difference chroma value, stored in 16 bits, without padding, using
* machine endianness
- * @v: Red difference chroma value, stored in 8 bits, without padding, using
+ * @v: Red difference chroma value, stored in 16 bits, without padding, using
* machine endianness
*/
-struct pixel_yuv_u8 {
- u8 y, u, v;
+struct pixel_yuv_u16 {
+ u16 y, u, v;
};
/*
- * struct yuv_u8_to_argb_u16_case - Reference values to test the color
+ * struct yuv_u16_to_argb_u16_case - Reference values to test the color
* conversions in VKMS between YUV to ARGB
*
* @encoding: Encoding used to convert RGB to YUV
@@ -39,13 +39,13 @@ struct pixel_yuv_u8 {
* @format_pair.yuv: Same color as @format_pair.rgb, but converted to
* YUV using @encoding and @range.
*/
-struct yuv_u8_to_argb_u16_case {
+struct yuv_u16_to_argb_u16_case {
enum drm_color_encoding encoding;
enum drm_color_range range;
size_t n_colors;
struct format_pair {
char *name;
- struct pixel_yuv_u8 yuv;
+ struct pixel_yuv_u16 yuv;
struct pixel_argb_u16 argb;
} colors[TEST_BUFF_SIZE];
};
@@ -57,14 +57,14 @@ struct yuv_u8_to_argb_u16_case {
* For more information got to the docs:
* https://colour.readthedocs.io/en/master/generated/colour.RGB_to_YCbCr.html
*/
-static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
+static struct yuv_u16_to_argb_u16_case yuv_u16_to_argb_u16_cases[] = {
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
* K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
*
@@ -76,13 +76,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x4c, 0x55, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0x96, 0x2c, 0x15 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x1d, 0xff, 0x6b }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4c8b, 0x54ce, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x9645, 0x2b33, 0x14d1 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d2f, 0xffff, 0x6b2f }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -90,7 +90,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -101,13 +101,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x51, 0x5a, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0x91, 0x36, 0x22 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x29, 0xf0, 0x6e }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x517b, 0x5a34, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x908e, 0x35cc, 0x2237 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x28f7, 0xf000, 0x6dc9 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -115,7 +115,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -126,21 +126,21 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x36, 0x63, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xb6, 0x1e, 0x0c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x12, 0xff, 0x74 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x366d, 0x62ac, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xb717, 0x1d55, 0x0bbd }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x127c, 0xffff, 0x7443 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
* K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
* in_bits = 16,
- * int_legal = False,
+ * in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -151,13 +151,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x3f, 0x66, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xad, 0x2a, 0x1a }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x20, 0xf0, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x3e8f, 0x6656, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xaca1, 0x29aa, 0x1a45 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1fd0, 0xf000, 0x75bb }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -165,7 +165,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = False,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -176,13 +176,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_FULL_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x43, 0x5c, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xad, 0x24, 0x0b }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x0f, 0xff, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4340, 0x5c41, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad91, 0x23bf, 0x0a4c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x0f2e, 0xffff, 0x75b5 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
/*
* colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
@@ -190,7 +190,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
* in_bits = 16,
* in_legal = False,
* in_int = True,
- * out_bits = 8,
+ * out_bits = 16,
* out_legal = True,
* out_int = True)
* Tests cases for color conversion generated by converting RGB
@@ -201,32 +201,30 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
.range = DRM_COLOR_YCBCR_LIMITED_RANGE,
.n_colors = 6,
.colors = {
- { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
- { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
- { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
- { "red", { 0x4a, 0x61, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
- { "green", { 0xa4, 0x2f, 0x19 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
- { "blue", { 0x1d, 0xf0, 0x77 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
- },
+ { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4988, 0x60b9, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xa47b, 0x2f47, 0x1902 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1cfd, 0xf000, 0x76fe }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ }
},
};
/*
- * vkms_format_test_yuv_u8_to_argb_u16 - Testing the conversion between YUV
+ * vkms_format_test_yuv_u16_to_argb_u16 - Testing the conversion between YUV
* colors to ARGB colors in VKMS
*
* This test will use the functions get_conversion_matrix_to_argb_u16 and
- * argb_u16_from_yuv888 to convert YUV colors (stored in
- * yuv_u8_to_argb_u16_cases) into ARGB colors.
+ * argb_u16_from_yuv161616 to convert YUV colors (stored in
+ * yuv_u16_to_argb_u16_cases) into ARGB colors.
*
* The conversion between YUV and RGB is not totally reversible, so there may be
* some difference between the expected value and the result.
- * In addition, there may be some rounding error as the input color is 8 bits
- * and output color is 16 bits.
*/
-static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
+static void vkms_format_test_yuv_u16_to_argb_u16(struct kunit *test)
{
- const struct yuv_u8_to_argb_u16_case *param = test->param_value;
+ const struct yuv_u16_to_argb_u16_case *param = test->param_value;
struct pixel_argb_u16 argb;
for (size_t i = 0; i < param->n_colors; i++) {
@@ -236,7 +234,8 @@ static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
get_conversion_matrix_to_argb_u16
(DRM_FORMAT_NV12, param->encoding, param->range, &matrix);
- argb = argb_u16_from_yuv888(color->yuv.y, color->yuv.u, color->yuv.v, &matrix);
+ argb = argb_u16_from_yuv161616(&matrix, color->yuv.y, color->yuv.u,
+ color->yuv.v);
KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.a, color->argb.a), 0x1ff,
"On the A channel of the color %s expected 0x%04x, got 0x%04x",
@@ -253,19 +252,19 @@ static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
}
}
-static void vkms_format_test_yuv_u8_to_argb_u16_case_desc(struct yuv_u8_to_argb_u16_case *t,
- char *desc)
+static void vkms_format_test_yuv_u16_to_argb_u16_case_desc(struct yuv_u16_to_argb_u16_case *t,
+ char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s - %s",
drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range));
}
-KUNIT_ARRAY_PARAM(yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_cases,
- vkms_format_test_yuv_u8_to_argb_u16_case_desc
+KUNIT_ARRAY_PARAM(yuv_u16_to_argb_u16, yuv_u16_to_argb_u16_cases,
+ vkms_format_test_yuv_u16_to_argb_u16_case_desc
);
static struct kunit_case vkms_format_test_cases[] = {
- KUNIT_CASE_PARAM(vkms_format_test_yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_gen_params),
+ KUNIT_CASE_PARAM(vkms_format_test_yuv_u16_to_argb_u16, yuv_u16_to_argb_u16_gen_params),
{}
};
diff --git a/drivers/gpu/drm/vkms/vkms_colorop.c b/drivers/gpu/drm/vkms/vkms_colorop.c
new file mode 100644
index 000000000000..5c3ffc78aea0
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_colorop.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/slab.h>
+#include <drm/drm_colorop.h>
+#include <drm/drm_print.h>
+#include <drm/drm_property.h>
+#include <drm/drm_plane.h>
+
+#include "vkms_drv.h"
+
+static const u64 supported_tfs =
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_EOTF) |
+ BIT(DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF);
+
+#define MAX_COLOR_PIPELINE_OPS 4
+
+static int vkms_initialize_color_pipeline(struct drm_plane *plane, struct drm_prop_enum_list *list)
+{
+ struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
+ struct drm_device *dev = plane->dev;
+ int ret;
+ int i = 0, j = 0;
+
+ memset(ops, 0, sizeof(ops));
+
+ /* 1st op: 1d curve */
+ ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL);
+ if (!ops[i]) {
+ drm_err(dev, "KMS: Failed to allocate colorop\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ list->type = ops[i]->base.id;
+ list->name = kasprintf(GFP_KERNEL, "Color Pipeline %d", ops[i]->base.id);
+
+ i++;
+
+ /* 2nd op: 3x4 matrix */
+ ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL);
+ if (!ops[i]) {
+ drm_err(dev, "KMS: Failed to allocate colorop\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i - 1], ops[i]);
+
+ i++;
+
+ /* 3rd op: 3x4 matrix */
+ ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL);
+ if (!ops[i]) {
+ drm_err(dev, "KMS: Failed to allocate colorop\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_ctm_3x4_init(dev, ops[i], plane, DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i - 1], ops[i]);
+
+ i++;
+
+ /* 4th op: 1d curve */
+ ops[i] = kzalloc(sizeof(*ops[i]), GFP_KERNEL);
+ if (!ops[i]) {
+ drm_err(dev, "KMS: Failed to allocate colorop\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = drm_plane_colorop_curve_1d_init(dev, ops[i], plane, supported_tfs,
+ DRM_COLOROP_FLAG_ALLOW_BYPASS);
+ if (ret)
+ goto cleanup;
+
+ drm_colorop_set_next_property(ops[i - 1], ops[i]);
+
+ return 0;
+
+cleanup:
+ for (j = 0; j < i; j++) {
+ if (ops[j]) {
+ drm_colorop_cleanup(ops[j]);
+ kfree(ops[j]);
+ }
+ }
+
+ return ret;
+}
+
+int vkms_initialize_colorops(struct drm_plane *plane)
+{
+ struct drm_prop_enum_list pipeline;
+ int ret;
+
+ /* Add color pipeline */
+ ret = vkms_initialize_color_pipeline(plane, &pipeline);
+ if (ret)
+ return ret;
+
+ /* Create COLOR_PIPELINE property and attach */
+ ret = drm_plane_create_color_pipeline_property(plane, &pipeline, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index fa269d279e25..3cf3f26e0d8e 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -8,10 +8,13 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_fixed.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <linux/minmax.h>
+#include <kunit/visibility.h>
-#include "vkms_drv.h"
+#include "vkms_composer.h"
+#include "vkms_luts.h"
static u16 pre_mul_blend_channel(u16 src, u16 dst, u16 alpha)
{
@@ -60,7 +63,7 @@ static void fill_background(const struct pixel_argb_u16 *background_color,
}
// lerp(a, b, t) = a + (b - a) * t
-static u16 lerp_u16(u16 a, u16 b, s64 t)
+VISIBLE_IF_KUNIT u16 lerp_u16(u16 a, u16 b, s64 t)
{
s64 a_fp = drm_int2fixp(a);
s64 b_fp = drm_int2fixp(b);
@@ -69,27 +72,18 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
return drm_fixp2int_round(a_fp + delta);
}
+EXPORT_SYMBOL_IF_KUNIT(lerp_u16);
-static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
+VISIBLE_IF_KUNIT s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
{
s64 color_channel_fp = drm_int2fixp(channel_value);
return drm_fixp_mul(color_channel_fp, lut->channel_value2index_ratio);
}
+EXPORT_SYMBOL_IF_KUNIT(get_lut_index);
-/*
- * This enum is related to the positions of the variables inside
- * `struct drm_color_lut`, so the order of both needs to be the same.
- */
-enum lut_channel {
- LUT_RED = 0,
- LUT_GREEN,
- LUT_BLUE,
- LUT_RESERVED
-};
-
-static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
- enum lut_channel channel)
+VISIBLE_IF_KUNIT u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
+ enum lut_channel channel)
{
s64 lut_index = get_lut_index(lut, channel_value);
u16 *floor_lut_value, *ceil_lut_value;
@@ -114,6 +108,8 @@ static u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 chan
return lerp_u16(floor_channel_value, ceil_channel_value,
lut_index & DRM_FIXED_DECIMAL_MASK);
}
+EXPORT_SYMBOL_IF_KUNIT(apply_lut_to_channel_value);
+
static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buffer *output_buffer)
{
@@ -132,6 +128,112 @@ static void apply_lut(const struct vkms_crtc_state *crtc_state, struct line_buff
}
}
+VISIBLE_IF_KUNIT void apply_3x4_matrix(struct pixel_argb_s32 *pixel,
+ const struct drm_color_ctm_3x4 *matrix)
+{
+ s64 rf, gf, bf;
+ s64 r, g, b;
+
+ r = drm_int2fixp(pixel->r);
+ g = drm_int2fixp(pixel->g);
+ b = drm_int2fixp(pixel->b);
+
+ rf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[0]), r) +
+ drm_fixp_mul(drm_sm2fixp(matrix->matrix[1]), g) +
+ drm_fixp_mul(drm_sm2fixp(matrix->matrix[2]), b) +
+ drm_sm2fixp(matrix->matrix[3]);
+
+ gf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[4]), r) +
+ drm_fixp_mul(drm_sm2fixp(matrix->matrix[5]), g) +
+ drm_fixp_mul(drm_sm2fixp(matrix->matrix[6]), b) +
+ drm_sm2fixp(matrix->matrix[7]);
+
+ bf = drm_fixp_mul(drm_sm2fixp(matrix->matrix[8]), r) +
+ drm_fixp_mul(drm_sm2fixp(matrix->matrix[9]), g) +
+ drm_fixp_mul(drm_sm2fixp(matrix->matrix[10]), b) +
+ drm_sm2fixp(matrix->matrix[11]);
+
+ pixel->r = drm_fixp2int_round(rf);
+ pixel->g = drm_fixp2int_round(gf);
+ pixel->b = drm_fixp2int_round(bf);
+}
+EXPORT_SYMBOL_IF_KUNIT(apply_3x4_matrix);
+
+static void apply_colorop(struct pixel_argb_s32 *pixel, struct drm_colorop *colorop)
+{
+ struct drm_colorop_state *colorop_state = colorop->state;
+ struct drm_device *dev = colorop->dev;
+
+ if (colorop->type == DRM_COLOROP_1D_CURVE) {
+ switch (colorop_state->curve_1d_type) {
+ case DRM_COLOROP_1D_CURVE_SRGB_INV_EOTF:
+ pixel->r = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->r, LUT_RED);
+ pixel->g = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->g, LUT_GREEN);
+ pixel->b = apply_lut_to_channel_value(&srgb_inv_eotf, pixel->b, LUT_BLUE);
+ break;
+ case DRM_COLOROP_1D_CURVE_SRGB_EOTF:
+ pixel->r = apply_lut_to_channel_value(&srgb_eotf, pixel->r, LUT_RED);
+ pixel->g = apply_lut_to_channel_value(&srgb_eotf, pixel->g, LUT_GREEN);
+ pixel->b = apply_lut_to_channel_value(&srgb_eotf, pixel->b, LUT_BLUE);
+ break;
+ default:
+ drm_WARN_ONCE(dev, true,
+ "unknown colorop 1D curve type %d\n",
+ colorop_state->curve_1d_type);
+ break;
+ }
+ } else if (colorop->type == DRM_COLOROP_CTM_3X4) {
+ if (colorop_state->data)
+ apply_3x4_matrix(pixel,
+ (struct drm_color_ctm_3x4 *)colorop_state->data->data);
+ }
+}
+
+static void pre_blend_color_transform(const struct vkms_plane_state *plane_state,
+ struct line_buffer *output_buffer)
+{
+ struct pixel_argb_s32 pixel;
+
+ for (size_t x = 0; x < output_buffer->n_pixels; x++) {
+ struct drm_colorop *colorop = plane_state->base.base.color_pipeline;
+
+ /*
+ * Some operations, such as applying a BT709 encoding matrix,
+ * followed by a decoding matrix, require that we preserve
+ * values above 1.0 and below 0.0 until the end of the pipeline.
+ *
+ * Pack the 16-bit UNORM values into s32 to give us head-room to
+ * avoid clipping until we're at the end of the pipeline. Clip
+ * intentionally at the end of the pipeline before packing
+ * UNORM values back into u16.
+ */
+ pixel.a = output_buffer->pixels[x].a;
+ pixel.r = output_buffer->pixels[x].r;
+ pixel.g = output_buffer->pixels[x].g;
+ pixel.b = output_buffer->pixels[x].b;
+
+ while (colorop) {
+ struct drm_colorop_state *colorop_state;
+
+ colorop_state = colorop->state;
+
+ if (!colorop_state)
+ return;
+
+ if (!colorop_state->bypass)
+ apply_colorop(&pixel, colorop);
+
+ colorop = colorop->next;
+ }
+
+ /* clamp values */
+ output_buffer->pixels[x].a = clamp_val(pixel.a, 0, 0xffff);
+ output_buffer->pixels[x].r = clamp_val(pixel.r, 0, 0xffff);
+ output_buffer->pixels[x].g = clamp_val(pixel.g, 0, 0xffff);
+ output_buffer->pixels[x].b = clamp_val(pixel.b, 0, 0xffff);
+ }
+}
+
/**
* direction_for_rotation() - Get the correct reading direction for a given rotation
*
@@ -347,7 +449,7 @@ static void blend_line(struct vkms_plane_state *current_plane, int y,
*/
current_plane->pixel_read_line(current_plane, src_x_start, src_y_start, direction,
pixel_count, &stage_buffer->pixels[dst_x_start]);
-
+ pre_blend_color_transform(current_plane, stage_buffer);
pre_mul_alpha_blend(stage_buffer, output_buffer,
dst_x_start, pixel_count);
}
diff --git a/drivers/gpu/drm/vkms/vkms_composer.h b/drivers/gpu/drm/vkms/vkms_composer.h
new file mode 100644
index 000000000000..04dd5646f672
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_composer.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_COMPOSER_H_
+#define _VKMS_COMPOSER_H_
+
+#include <kunit/visibility.h>
+#include "vkms_drv.h"
+
+/*
+ * This enum is related to the positions of the variables inside
+ * `struct drm_color_lut`, so the order of both needs to be the same.
+ */
+enum lut_channel {
+ LUT_RED = 0,
+ LUT_GREEN,
+ LUT_BLUE,
+ LUT_RESERVED
+};
+
+#if IS_ENABLED(CONFIG_KUNIT)
+u16 lerp_u16(u16 a, u16 b, s64 t);
+s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value);
+u16 apply_lut_to_channel_value(const struct vkms_color_lut *lut, u16 channel_value,
+ enum lut_channel channel);
+void apply_3x4_matrix(struct pixel_argb_s32 *pixel, const struct drm_color_ctm_3x4 *matrix);
+#endif
+
+#endif /* _VKMS_COMPOSER_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c
index a1df5659b0fb..8788df9edb7c 100644
--- a/drivers/gpu/drm/vkms/vkms_config.c
+++ b/drivers/gpu/drm/vkms/vkms_config.c
@@ -33,7 +33,8 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_create);
struct vkms_config *vkms_config_default_create(bool enable_cursor,
bool enable_writeback,
- bool enable_overlay)
+ bool enable_overlay,
+ bool enable_plane_pipeline)
{
struct vkms_config *config;
struct vkms_config_plane *plane_cfg;
@@ -58,6 +59,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor,
if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
goto err_alloc;
+ vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline);
if (enable_overlay) {
for (n = 0; n < NUM_OVERLAY_PLANES; n++) {
@@ -67,6 +69,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor,
vkms_config_plane_set_type(plane_cfg,
DRM_PLANE_TYPE_OVERLAY);
+ vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline);
if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
goto err_alloc;
@@ -79,6 +82,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor,
goto err_alloc;
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR);
+ vkms_config_plane_set_default_pipeline(plane_cfg, enable_plane_pipeline);
if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg))
goto err_alloc;
@@ -361,8 +365,11 @@ static int vkms_config_show(struct seq_file *m, void *data)
vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg)
seq_puts(m, "encoder\n");
- vkms_config_for_each_connector(vkmsdev->config, connector_cfg)
- seq_puts(m, "connector\n");
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg) {
+ seq_puts(m, "connector:\n");
+ seq_printf(m, "\tstatus=%d\n",
+ vkms_config_connector_get_status(connector_cfg));
+ }
return 0;
}
@@ -386,6 +393,7 @@ struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config)
return ERR_PTR(-ENOMEM);
plane_cfg->config = config;
+ plane_cfg->default_pipeline = false;
vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY);
xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC);
@@ -588,6 +596,7 @@ struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *c
return ERR_PTR(-ENOMEM);
connector_cfg->config = config;
+ connector_cfg->status = connector_status_connected;
xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC);
list_add_tail(&connector_cfg->link, &config->connectors);
diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h
index 0118e3f99706..8f7f286a4bdd 100644
--- a/drivers/gpu/drm/vkms/vkms_config.h
+++ b/drivers/gpu/drm/vkms/vkms_config.h
@@ -7,6 +7,8 @@
#include <linux/types.h>
#include <linux/xarray.h>
+#include <drm/drm_connector.h>
+
#include "vkms_drv.h"
/**
@@ -47,6 +49,7 @@ struct vkms_config_plane {
enum drm_plane_type type;
struct xarray possible_crtcs;
+ bool default_pipeline;
/* Internal usage */
struct vkms_plane *plane;
@@ -99,6 +102,7 @@ struct vkms_config_encoder {
*
* @link: Link to the others connector in vkms_config
* @config: The vkms_config this connector belongs to
+ * @status: Status (connected, disconnected...) of the connector
* @possible_encoders: Array of encoders that can be used with this connector
* @connector: Internal usage. This pointer should never be considered as valid.
* It can be used to store a temporary reference to a VKMS connector
@@ -109,6 +113,7 @@ struct vkms_config_connector {
struct list_head link;
struct vkms_config *config;
+ enum drm_connector_status status;
struct xarray possible_encoders;
/* Internal usage */
@@ -199,7 +204,8 @@ struct vkms_config *vkms_config_create(const char *dev_name);
*/
struct vkms_config *vkms_config_default_create(bool enable_cursor,
bool enable_writeback,
- bool enable_overlay);
+ bool enable_overlay,
+ bool enable_plane_pipeline);
/**
* vkms_config_destroy() - Free a VKMS configuration
@@ -285,6 +291,30 @@ vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg,
}
/**
+ * vkms_config_plane_get_default_pipeline() - Return if the plane will
+ * be created with the default pipeline
+ * @plane_cfg: Plane to get the information from
+ */
+static inline bool
+vkms_config_plane_get_default_pipeline(struct vkms_config_plane *plane_cfg)
+{
+ return plane_cfg->default_pipeline;
+}
+
+/**
+ * vkms_config_plane_set_default_pipeline() - Set if the plane will
+ * be created with the default pipeline
+ * @plane_cfg: Plane to configure the pipeline
+ * @default_pipeline: New default pipeline value
+ */
+static inline void
+vkms_config_plane_set_default_pipeline(struct vkms_config_plane *plane_cfg,
+ bool default_pipeline)
+{
+ plane_cfg->default_pipeline = default_pipeline;
+}
+
+/**
* vkms_config_plane_attach_crtc - Attach a plane to a CRTC
* @plane_cfg: Plane to attach
* @crtc_cfg: CRTC to attach @plane_cfg to
@@ -434,4 +464,26 @@ int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connect
void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg,
struct vkms_config_encoder *encoder_cfg);
+/**
+ * vkms_config_connector_get_status() - Return the status of the connector
+ * @connector_cfg: Connector to get the status from
+ */
+static inline enum drm_connector_status
+vkms_config_connector_get_status(struct vkms_config_connector *connector_cfg)
+{
+ return connector_cfg->status;
+}
+
+/**
+ * vkms_config_connector_set_status() - Set the status of the connector
+ * @connector_cfg: Connector to set the status to
+ * @status: New connector status
+ */
+static inline void
+vkms_config_connector_set_status(struct vkms_config_connector *connector_cfg,
+ enum drm_connector_status status)
+{
+ connector_cfg->status = status;
+}
+
#endif /* _VKMS_CONFIG_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_configfs.c b/drivers/gpu/drm/vkms/vkms_configfs.c
new file mode 100644
index 000000000000..506666e21c91
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_configfs.c
@@ -0,0 +1,843 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/cleanup.h>
+#include <linux/configfs.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "vkms_drv.h"
+#include "vkms_config.h"
+#include "vkms_configfs.h"
+#include "vkms_connector.h"
+
+/* To avoid registering configfs more than once or unregistering on error */
+static bool is_configfs_registered;
+
+/**
+ * struct vkms_configfs_device - Configfs representation of a VKMS device
+ *
+ * @group: Top level configuration group that represents a VKMS device.
+ * Initialized when a new directory is created under "/config/vkms/"
+ * @planes_group: Default subgroup of @group at "/config/vkms/planes"
+ * @crtcs_group: Default subgroup of @group at "/config/vkms/crtcs"
+ * @encoders_group: Default subgroup of @group at "/config/vkms/encoders"
+ * @connectors_group: Default subgroup of @group at "/config/vkms/connectors"
+ * @lock: Lock used to project concurrent access to the configuration attributes
+ * @config: Protected by @lock. Configuration of the VKMS device
+ * @enabled: Protected by @lock. The device is created or destroyed when this
+ * option changes
+ */
+struct vkms_configfs_device {
+ struct config_group group;
+ struct config_group planes_group;
+ struct config_group crtcs_group;
+ struct config_group encoders_group;
+ struct config_group connectors_group;
+
+ struct mutex lock;
+ struct vkms_config *config;
+ bool enabled;
+};
+
+/**
+ * struct vkms_configfs_plane - Configfs representation of a plane
+ *
+ * @group: Top level configuration group that represents a plane.
+ * Initialized when a new directory is created under "/config/vkms/planes"
+ * @possible_crtcs_group: Default subgroup of @group at "plane/possible_crtcs"
+ * @dev: The vkms_configfs_device this plane belongs to
+ * @config: Configuration of the VKMS plane
+ */
+struct vkms_configfs_plane {
+ struct config_group group;
+ struct config_group possible_crtcs_group;
+ struct vkms_configfs_device *dev;
+ struct vkms_config_plane *config;
+};
+
+/**
+ * struct vkms_configfs_crtc - Configfs representation of a CRTC
+ *
+ * @group: Top level configuration group that represents a CRTC.
+ * Initialized when a new directory is created under "/config/vkms/crtcs"
+ * @dev: The vkms_configfs_device this CRTC belongs to
+ * @config: Configuration of the VKMS CRTC
+ */
+struct vkms_configfs_crtc {
+ struct config_group group;
+ struct vkms_configfs_device *dev;
+ struct vkms_config_crtc *config;
+};
+
+/**
+ * struct vkms_configfs_encoder - Configfs representation of a encoder
+ *
+ * @group: Top level configuration group that represents a encoder.
+ * Initialized when a new directory is created under "/config/vkms/encoders"
+ * @possible_crtcs_group: Default subgroup of @group at "encoder/possible_crtcs"
+ * @dev: The vkms_configfs_device this encoder belongs to
+ * @config: Configuration of the VKMS encoder
+ */
+struct vkms_configfs_encoder {
+ struct config_group group;
+ struct config_group possible_crtcs_group;
+ struct vkms_configfs_device *dev;
+ struct vkms_config_encoder *config;
+};
+
+/**
+ * struct vkms_configfs_connector - Configfs representation of a connector
+ *
+ * @group: Top level configuration group that represents a connector.
+ * Initialized when a new directory is created under "/config/vkms/connectors"
+ * @possible_encoders_group: Default subgroup of @group at
+ * "connector/possible_encoders"
+ * @dev: The vkms_configfs_device this connector belongs to
+ * @config: Configuration of the VKMS connector
+ */
+struct vkms_configfs_connector {
+ struct config_group group;
+ struct config_group possible_encoders_group;
+ struct vkms_configfs_device *dev;
+ struct vkms_config_connector *config;
+};
+
+#define device_item_to_vkms_configfs_device(item) \
+ container_of(to_config_group((item)), struct vkms_configfs_device, \
+ group)
+
+#define child_group_to_vkms_configfs_device(group) \
+ device_item_to_vkms_configfs_device((&(group)->cg_item)->ci_parent)
+
+#define plane_item_to_vkms_configfs_plane(item) \
+ container_of(to_config_group((item)), struct vkms_configfs_plane, group)
+
+#define plane_possible_crtcs_item_to_vkms_configfs_plane(item) \
+ container_of(to_config_group((item)), struct vkms_configfs_plane, \
+ possible_crtcs_group)
+
+#define crtc_item_to_vkms_configfs_crtc(item) \
+ container_of(to_config_group((item)), struct vkms_configfs_crtc, group)
+
+#define encoder_item_to_vkms_configfs_encoder(item) \
+ container_of(to_config_group((item)), struct vkms_configfs_encoder, \
+ group)
+
+#define encoder_possible_crtcs_item_to_vkms_configfs_encoder(item) \
+ container_of(to_config_group((item)), struct vkms_configfs_encoder, \
+ possible_crtcs_group)
+
+#define connector_item_to_vkms_configfs_connector(item) \
+ container_of(to_config_group((item)), struct vkms_configfs_connector, \
+ group)
+
+#define connector_possible_encoders_item_to_vkms_configfs_connector(item) \
+ container_of(to_config_group((item)), struct vkms_configfs_connector, \
+ possible_encoders_group)
+
+static ssize_t crtc_writeback_show(struct config_item *item, char *page)
+{
+ struct vkms_configfs_crtc *crtc;
+ bool writeback;
+
+ crtc = crtc_item_to_vkms_configfs_crtc(item);
+
+ scoped_guard(mutex, &crtc->dev->lock)
+ writeback = vkms_config_crtc_get_writeback(crtc->config);
+
+ return sprintf(page, "%d\n", writeback);
+}
+
+static ssize_t crtc_writeback_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct vkms_configfs_crtc *crtc;
+ bool writeback;
+
+ crtc = crtc_item_to_vkms_configfs_crtc(item);
+
+ if (kstrtobool(page, &writeback))
+ return -EINVAL;
+
+ scoped_guard(mutex, &crtc->dev->lock) {
+ if (crtc->dev->enabled)
+ return -EBUSY;
+
+ vkms_config_crtc_set_writeback(crtc->config, writeback);
+ }
+
+ return (ssize_t)count;
+}
+
+CONFIGFS_ATTR(crtc_, writeback);
+
+static struct configfs_attribute *crtc_item_attrs[] = {
+ &crtc_attr_writeback,
+ NULL,
+};
+
+static void crtc_release(struct config_item *item)
+{
+ struct vkms_configfs_crtc *crtc;
+ struct mutex *lock;
+
+ crtc = crtc_item_to_vkms_configfs_crtc(item);
+ lock = &crtc->dev->lock;
+
+ scoped_guard(mutex, lock) {
+ vkms_config_destroy_crtc(crtc->dev->config, crtc->config);
+ kfree(crtc);
+ }
+}
+
+static struct configfs_item_operations crtc_item_operations = {
+ .release = &crtc_release,
+};
+
+static const struct config_item_type crtc_item_type = {
+ .ct_attrs = crtc_item_attrs,
+ .ct_item_ops = &crtc_item_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *make_crtc_group(struct config_group *group,
+ const char *name)
+{
+ struct vkms_configfs_device *dev;
+ struct vkms_configfs_crtc *crtc;
+ int ret;
+
+ dev = child_group_to_vkms_configfs_device(group);
+
+ scoped_guard(mutex, &dev->lock) {
+ if (dev->enabled)
+ return ERR_PTR(-EBUSY);
+
+ crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
+ if (!crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc->dev = dev;
+
+ crtc->config = vkms_config_create_crtc(dev->config);
+ if (IS_ERR(crtc->config)) {
+ ret = PTR_ERR(crtc->config);
+ kfree(crtc);
+ return ERR_PTR(ret);
+ }
+
+ config_group_init_type_name(&crtc->group, name, &crtc_item_type);
+ }
+
+ return &crtc->group;
+}
+
+static struct configfs_group_operations crtcs_group_operations = {
+ .make_group = &make_crtc_group,
+};
+
+static const struct config_item_type crtc_group_type = {
+ .ct_group_ops = &crtcs_group_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static int plane_possible_crtcs_allow_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct vkms_configfs_plane *plane;
+ struct vkms_configfs_crtc *crtc;
+ int ret;
+
+ if (target->ci_type != &crtc_item_type)
+ return -EINVAL;
+
+ plane = plane_possible_crtcs_item_to_vkms_configfs_plane(src);
+ crtc = crtc_item_to_vkms_configfs_crtc(target);
+
+ scoped_guard(mutex, &plane->dev->lock) {
+ if (plane->dev->enabled)
+ return -EBUSY;
+
+ ret = vkms_config_plane_attach_crtc(plane->config, crtc->config);
+ }
+
+ return ret;
+}
+
+static void plane_possible_crtcs_drop_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct vkms_configfs_plane *plane;
+ struct vkms_configfs_crtc *crtc;
+
+ plane = plane_possible_crtcs_item_to_vkms_configfs_plane(src);
+ crtc = crtc_item_to_vkms_configfs_crtc(target);
+
+ scoped_guard(mutex, &plane->dev->lock)
+ vkms_config_plane_detach_crtc(plane->config, crtc->config);
+}
+
+static struct configfs_item_operations plane_possible_crtcs_item_operations = {
+ .allow_link = plane_possible_crtcs_allow_link,
+ .drop_link = plane_possible_crtcs_drop_link,
+};
+
+static const struct config_item_type plane_possible_crtcs_group_type = {
+ .ct_item_ops = &plane_possible_crtcs_item_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t plane_type_show(struct config_item *item, char *page)
+{
+ struct vkms_configfs_plane *plane;
+ enum drm_plane_type type;
+
+ plane = plane_item_to_vkms_configfs_plane(item);
+
+ scoped_guard(mutex, &plane->dev->lock)
+ type = vkms_config_plane_get_type(plane->config);
+
+ return sprintf(page, "%u", type);
+}
+
+static ssize_t plane_type_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct vkms_configfs_plane *plane;
+ enum drm_plane_type type;
+
+ plane = plane_item_to_vkms_configfs_plane(item);
+
+ if (kstrtouint(page, 10, &type))
+ return -EINVAL;
+
+ if (type != DRM_PLANE_TYPE_OVERLAY && type != DRM_PLANE_TYPE_PRIMARY &&
+ type != DRM_PLANE_TYPE_CURSOR)
+ return -EINVAL;
+
+ scoped_guard(mutex, &plane->dev->lock) {
+ if (plane->dev->enabled)
+ return -EBUSY;
+
+ vkms_config_plane_set_type(plane->config, type);
+ }
+
+ return (ssize_t)count;
+}
+
+CONFIGFS_ATTR(plane_, type);
+
+static struct configfs_attribute *plane_item_attrs[] = {
+ &plane_attr_type,
+ NULL,
+};
+
+static void plane_release(struct config_item *item)
+{
+ struct vkms_configfs_plane *plane;
+ struct mutex *lock;
+
+ plane = plane_item_to_vkms_configfs_plane(item);
+ lock = &plane->dev->lock;
+
+ scoped_guard(mutex, lock) {
+ vkms_config_destroy_plane(plane->config);
+ kfree(plane);
+ }
+}
+
+static struct configfs_item_operations plane_item_operations = {
+ .release = &plane_release,
+};
+
+static const struct config_item_type plane_item_type = {
+ .ct_attrs = plane_item_attrs,
+ .ct_item_ops = &plane_item_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *make_plane_group(struct config_group *group,
+ const char *name)
+{
+ struct vkms_configfs_device *dev;
+ struct vkms_configfs_plane *plane;
+ int ret;
+
+ dev = child_group_to_vkms_configfs_device(group);
+
+ scoped_guard(mutex, &dev->lock) {
+ if (dev->enabled)
+ return ERR_PTR(-EBUSY);
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ plane->dev = dev;
+
+ plane->config = vkms_config_create_plane(dev->config);
+ if (IS_ERR(plane->config)) {
+ ret = PTR_ERR(plane->config);
+ kfree(plane);
+ return ERR_PTR(ret);
+ }
+
+ config_group_init_type_name(&plane->group, name, &plane_item_type);
+
+ config_group_init_type_name(&plane->possible_crtcs_group,
+ "possible_crtcs",
+ &plane_possible_crtcs_group_type);
+ configfs_add_default_group(&plane->possible_crtcs_group,
+ &plane->group);
+ }
+
+ return &plane->group;
+}
+
+static struct configfs_group_operations planes_group_operations = {
+ .make_group = &make_plane_group,
+};
+
+static const struct config_item_type plane_group_type = {
+ .ct_group_ops = &planes_group_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static int encoder_possible_crtcs_allow_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct vkms_configfs_encoder *encoder;
+ struct vkms_configfs_crtc *crtc;
+ int ret;
+
+ if (target->ci_type != &crtc_item_type)
+ return -EINVAL;
+
+ encoder = encoder_possible_crtcs_item_to_vkms_configfs_encoder(src);
+ crtc = crtc_item_to_vkms_configfs_crtc(target);
+
+ scoped_guard(mutex, &encoder->dev->lock) {
+ if (encoder->dev->enabled)
+ return -EBUSY;
+
+ ret = vkms_config_encoder_attach_crtc(encoder->config, crtc->config);
+ }
+
+ return ret;
+}
+
+static void encoder_possible_crtcs_drop_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct vkms_configfs_encoder *encoder;
+ struct vkms_configfs_crtc *crtc;
+
+ encoder = encoder_possible_crtcs_item_to_vkms_configfs_encoder(src);
+ crtc = crtc_item_to_vkms_configfs_crtc(target);
+
+ scoped_guard(mutex, &encoder->dev->lock)
+ vkms_config_encoder_detach_crtc(encoder->config, crtc->config);
+}
+
+static struct configfs_item_operations encoder_possible_crtcs_item_operations = {
+ .allow_link = encoder_possible_crtcs_allow_link,
+ .drop_link = encoder_possible_crtcs_drop_link,
+};
+
+static const struct config_item_type encoder_possible_crtcs_group_type = {
+ .ct_item_ops = &encoder_possible_crtcs_item_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static void encoder_release(struct config_item *item)
+{
+ struct vkms_configfs_encoder *encoder;
+ struct mutex *lock;
+
+ encoder = encoder_item_to_vkms_configfs_encoder(item);
+ lock = &encoder->dev->lock;
+
+ scoped_guard(mutex, lock) {
+ vkms_config_destroy_encoder(encoder->dev->config, encoder->config);
+ kfree(encoder);
+ }
+}
+
+static struct configfs_item_operations encoder_item_operations = {
+ .release = &encoder_release,
+};
+
+static const struct config_item_type encoder_item_type = {
+ .ct_item_ops = &encoder_item_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *make_encoder_group(struct config_group *group,
+ const char *name)
+{
+ struct vkms_configfs_device *dev;
+ struct vkms_configfs_encoder *encoder;
+ int ret;
+
+ dev = child_group_to_vkms_configfs_device(group);
+
+ scoped_guard(mutex, &dev->lock) {
+ if (dev->enabled)
+ return ERR_PTR(-EBUSY);
+
+ encoder = kzalloc(sizeof(*encoder), GFP_KERNEL);
+ if (!encoder)
+ return ERR_PTR(-ENOMEM);
+
+ encoder->dev = dev;
+
+ encoder->config = vkms_config_create_encoder(dev->config);
+ if (IS_ERR(encoder->config)) {
+ ret = PTR_ERR(encoder->config);
+ kfree(encoder);
+ return ERR_PTR(ret);
+ }
+
+ config_group_init_type_name(&encoder->group, name,
+ &encoder_item_type);
+
+ config_group_init_type_name(&encoder->possible_crtcs_group,
+ "possible_crtcs",
+ &encoder_possible_crtcs_group_type);
+ configfs_add_default_group(&encoder->possible_crtcs_group,
+ &encoder->group);
+ }
+
+ return &encoder->group;
+}
+
+static struct configfs_group_operations encoders_group_operations = {
+ .make_group = &make_encoder_group,
+};
+
+static const struct config_item_type encoder_group_type = {
+ .ct_group_ops = &encoders_group_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t connector_status_show(struct config_item *item, char *page)
+{
+ struct vkms_configfs_connector *connector;
+ enum drm_connector_status status;
+
+ connector = connector_item_to_vkms_configfs_connector(item);
+
+ scoped_guard(mutex, &connector->dev->lock)
+ status = vkms_config_connector_get_status(connector->config);
+
+ return sprintf(page, "%u", status);
+}
+
+static ssize_t connector_status_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct vkms_configfs_connector *connector;
+ enum drm_connector_status status;
+
+ connector = connector_item_to_vkms_configfs_connector(item);
+
+ if (kstrtouint(page, 10, &status))
+ return -EINVAL;
+
+ if (status != connector_status_connected &&
+ status != connector_status_disconnected &&
+ status != connector_status_unknown)
+ return -EINVAL;
+
+ scoped_guard(mutex, &connector->dev->lock) {
+ vkms_config_connector_set_status(connector->config, status);
+
+ if (connector->dev->enabled)
+ vkms_trigger_connector_hotplug(connector->dev->config->dev);
+ }
+
+ return (ssize_t)count;
+}
+
+CONFIGFS_ATTR(connector_, status);
+
+static struct configfs_attribute *connector_item_attrs[] = {
+ &connector_attr_status,
+ NULL,
+};
+
+static void connector_release(struct config_item *item)
+{
+ struct vkms_configfs_connector *connector;
+ struct mutex *lock;
+
+ connector = connector_item_to_vkms_configfs_connector(item);
+ lock = &connector->dev->lock;
+
+ scoped_guard(mutex, lock) {
+ vkms_config_destroy_connector(connector->config);
+ kfree(connector);
+ }
+}
+
+static struct configfs_item_operations connector_item_operations = {
+ .release = &connector_release,
+};
+
+static const struct config_item_type connector_item_type = {
+ .ct_attrs = connector_item_attrs,
+ .ct_item_ops = &connector_item_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static int connector_possible_encoders_allow_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct vkms_configfs_connector *connector;
+ struct vkms_configfs_encoder *encoder;
+ int ret;
+
+ if (target->ci_type != &encoder_item_type)
+ return -EINVAL;
+
+ connector = connector_possible_encoders_item_to_vkms_configfs_connector(src);
+ encoder = encoder_item_to_vkms_configfs_encoder(target);
+
+ scoped_guard(mutex, &connector->dev->lock) {
+ if (connector->dev->enabled)
+ return -EBUSY;
+
+ ret = vkms_config_connector_attach_encoder(connector->config,
+ encoder->config);
+ }
+
+ return ret;
+}
+
+static void connector_possible_encoders_drop_link(struct config_item *src,
+ struct config_item *target)
+{
+ struct vkms_configfs_connector *connector;
+ struct vkms_configfs_encoder *encoder;
+
+ connector = connector_possible_encoders_item_to_vkms_configfs_connector(src);
+ encoder = encoder_item_to_vkms_configfs_encoder(target);
+
+ scoped_guard(mutex, &connector->dev->lock) {
+ vkms_config_connector_detach_encoder(connector->config,
+ encoder->config);
+ }
+}
+
+static struct configfs_item_operations connector_possible_encoders_item_operations = {
+ .allow_link = connector_possible_encoders_allow_link,
+ .drop_link = connector_possible_encoders_drop_link,
+};
+
+static const struct config_item_type connector_possible_encoders_group_type = {
+ .ct_item_ops = &connector_possible_encoders_item_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *make_connector_group(struct config_group *group,
+ const char *name)
+{
+ struct vkms_configfs_device *dev;
+ struct vkms_configfs_connector *connector;
+ int ret;
+
+ dev = child_group_to_vkms_configfs_device(group);
+
+ scoped_guard(mutex, &dev->lock) {
+ if (dev->enabled)
+ return ERR_PTR(-EBUSY);
+
+ connector = kzalloc(sizeof(*connector), GFP_KERNEL);
+ if (!connector)
+ return ERR_PTR(-ENOMEM);
+
+ connector->dev = dev;
+
+ connector->config = vkms_config_create_connector(dev->config);
+ if (IS_ERR(connector->config)) {
+ ret = PTR_ERR(connector->config);
+ kfree(connector);
+ return ERR_PTR(ret);
+ }
+
+ config_group_init_type_name(&connector->group, name,
+ &connector_item_type);
+
+ config_group_init_type_name(&connector->possible_encoders_group,
+ "possible_encoders",
+ &connector_possible_encoders_group_type);
+ configfs_add_default_group(&connector->possible_encoders_group,
+ &connector->group);
+ }
+
+ return &connector->group;
+}
+
+static struct configfs_group_operations connectors_group_operations = {
+ .make_group = &make_connector_group,
+};
+
+static const struct config_item_type connector_group_type = {
+ .ct_group_ops = &connectors_group_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static ssize_t device_enabled_show(struct config_item *item, char *page)
+{
+ struct vkms_configfs_device *dev;
+ bool enabled;
+
+ dev = device_item_to_vkms_configfs_device(item);
+
+ scoped_guard(mutex, &dev->lock)
+ enabled = dev->enabled;
+
+ return sprintf(page, "%d\n", enabled);
+}
+
+static ssize_t device_enabled_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct vkms_configfs_device *dev;
+ bool enabled;
+ int ret = 0;
+
+ dev = device_item_to_vkms_configfs_device(item);
+
+ if (kstrtobool(page, &enabled))
+ return -EINVAL;
+
+ scoped_guard(mutex, &dev->lock) {
+ if (!dev->enabled && enabled) {
+ if (!vkms_config_is_valid(dev->config))
+ return -EINVAL;
+
+ ret = vkms_create(dev->config);
+ if (ret)
+ return ret;
+ } else if (dev->enabled && !enabled) {
+ vkms_destroy(dev->config);
+ }
+
+ dev->enabled = enabled;
+ }
+
+ return (ssize_t)count;
+}
+
+CONFIGFS_ATTR(device_, enabled);
+
+static struct configfs_attribute *device_item_attrs[] = {
+ &device_attr_enabled,
+ NULL,
+};
+
+static void device_release(struct config_item *item)
+{
+ struct vkms_configfs_device *dev;
+
+ dev = device_item_to_vkms_configfs_device(item);
+
+ if (dev->enabled)
+ vkms_destroy(dev->config);
+
+ mutex_destroy(&dev->lock);
+ vkms_config_destroy(dev->config);
+ kfree(dev);
+}
+
+static struct configfs_item_operations device_item_operations = {
+ .release = &device_release,
+};
+
+static const struct config_item_type device_item_type = {
+ .ct_attrs = device_item_attrs,
+ .ct_item_ops = &device_item_operations,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *make_device_group(struct config_group *group,
+ const char *name)
+{
+ struct vkms_configfs_device *dev;
+ int ret;
+
+ if (strcmp(name, DEFAULT_DEVICE_NAME) == 0)
+ return ERR_PTR(-EINVAL);
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->config = vkms_config_create(name);
+ if (IS_ERR(dev->config)) {
+ ret = PTR_ERR(dev->config);
+ kfree(dev);
+ return ERR_PTR(ret);
+ }
+
+ config_group_init_type_name(&dev->group, name, &device_item_type);
+ mutex_init(&dev->lock);
+
+ config_group_init_type_name(&dev->planes_group, "planes",
+ &plane_group_type);
+ configfs_add_default_group(&dev->planes_group, &dev->group);
+
+ config_group_init_type_name(&dev->crtcs_group, "crtcs",
+ &crtc_group_type);
+ configfs_add_default_group(&dev->crtcs_group, &dev->group);
+
+ config_group_init_type_name(&dev->encoders_group, "encoders",
+ &encoder_group_type);
+ configfs_add_default_group(&dev->encoders_group, &dev->group);
+
+ config_group_init_type_name(&dev->connectors_group, "connectors",
+ &connector_group_type);
+ configfs_add_default_group(&dev->connectors_group, &dev->group);
+
+ return &dev->group;
+}
+
+static struct configfs_group_operations device_group_ops = {
+ .make_group = &make_device_group,
+};
+
+static const struct config_item_type device_group_type = {
+ .ct_group_ops = &device_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem vkms_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_name = "vkms",
+ .ci_type = &device_group_type,
+ },
+ },
+ .su_mutex = __MUTEX_INITIALIZER(vkms_subsys.su_mutex),
+};
+
+int vkms_configfs_register(void)
+{
+ int ret;
+
+ if (is_configfs_registered)
+ return 0;
+
+ config_group_init(&vkms_subsys.su_group);
+ ret = configfs_register_subsystem(&vkms_subsys);
+
+ is_configfs_registered = ret == 0;
+
+ return ret;
+}
+
+void vkms_configfs_unregister(void)
+{
+ if (is_configfs_registered)
+ configfs_unregister_subsystem(&vkms_subsys);
+}
diff --git a/drivers/gpu/drm/vkms/vkms_configfs.h b/drivers/gpu/drm/vkms/vkms_configfs.h
new file mode 100644
index 000000000000..e9020b0043db
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_configfs.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef _VKMS_CONFIGFS_H_
+#define _VKMS_CONFIGFS_H_
+
+int vkms_configfs_register(void);
+void vkms_configfs_unregister(void);
+
+#endif /* _VKMS_CONFIGFS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c
index 48b10cba322a..b0a6b212d3f4 100644
--- a/drivers/gpu/drm/vkms/vkms_connector.c
+++ b/drivers/gpu/drm/vkms/vkms_connector.c
@@ -5,9 +5,37 @@
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include "vkms_config.h"
#include "vkms_connector.h"
+static enum drm_connector_status vkms_connector_detect(struct drm_connector *connector,
+ bool force)
+{
+ struct drm_device *dev = connector->dev;
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
+ struct vkms_connector *vkms_connector;
+ enum drm_connector_status status;
+ struct vkms_config_connector *connector_cfg;
+
+ vkms_connector = drm_connector_to_vkms_connector(connector);
+
+ /*
+ * The connector configuration might not exist if its configfs directory
+ * was deleted. Therefore, use the configuration if present or keep the
+ * current status if we can not access it anymore.
+ */
+ status = connector->status;
+
+ vkms_config_for_each_connector(vkmsdev->config, connector_cfg) {
+ if (connector_cfg->connector == vkms_connector)
+ status = vkms_config_connector_get_status(connector_cfg);
+ }
+
+ return status;
+}
+
static const struct drm_connector_funcs vkms_connector_funcs = {
+ .detect = vkms_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -59,3 +87,10 @@ struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev)
return connector;
}
+
+void vkms_trigger_connector_hotplug(struct vkms_device *vkmsdev)
+{
+ struct drm_device *dev = &vkmsdev->drm;
+
+ drm_kms_helper_hotplug_event(dev);
+}
diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h
index c9149c1b7af0..ed312f4eff3a 100644
--- a/drivers/gpu/drm/vkms/vkms_connector.h
+++ b/drivers/gpu/drm/vkms/vkms_connector.h
@@ -5,6 +5,9 @@
#include "vkms_drv.h"
+#define drm_connector_to_vkms_connector(target) \
+ container_of(target, struct vkms_connector, base)
+
/**
* struct vkms_connector - VKMS custom type wrapping around the DRM connector
*
@@ -23,4 +26,10 @@ struct vkms_connector {
*/
struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev);
+/**
+ * vkms_trigger_connector_hotplug() - Update the device's connectors status
+ * @vkmsdev: VKMS device to update
+ */
+void vkms_trigger_connector_hotplug(struct vkms_device *vkmsdev);
+
#endif /* _VKMS_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index e60573e0f3e9..9a7db1d51022 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -5,27 +5,21 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
+#include <drm/drm_vblank_helper.h>
#include "vkms_drv.h"
-static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+static bool vkms_crtc_handle_vblank_timeout(struct drm_crtc *crtc)
{
- struct vkms_output *output = container_of(timer, struct vkms_output,
- vblank_hrtimer);
- struct drm_crtc *crtc = &output->crtc;
+ struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
struct vkms_crtc_state *state;
- u64 ret_overrun;
bool ret, fence_cookie;
fence_cookie = dma_fence_begin_signalling();
- ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
- output->period_ns);
- if (ret_overrun != 1)
- pr_warn("%s: vblank timer overrun\n", __func__);
-
spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
@@ -57,55 +51,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
dma_fence_end_signalling(fence_cookie);
- return HRTIMER_RESTART;
-}
-
-static int vkms_enable_vblank(struct drm_crtc *crtc)
-{
- struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
- struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
-
- hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- out->period_ns = ktime_set(0, vblank->framedur_ns);
- hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
-
- return 0;
-}
-
-static void vkms_disable_vblank(struct drm_crtc *crtc)
-{
- struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
-
- hrtimer_cancel(&out->vblank_hrtimer);
-}
-
-static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq)
-{
- struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
- struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
-
- if (!READ_ONCE(vblank->enabled)) {
- *vblank_time = ktime_get();
- return true;
- }
-
- *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
-
- if (WARN_ON(*vblank_time == vblank->time))
- return true;
-
- /*
- * To prevent races we roll the hrtimer forward before we do any
- * interrupt processing - this is how real hw works (the interrupt is
- * only generated after all the vblank registers are updated) and what
- * the vblank core expects. Therefore we need to always correct the
- * timestampe by one frame.
- */
- *vblank_time -= output->period_ns;
-
return true;
}
@@ -159,9 +104,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
.reset = vkms_atomic_crtc_reset,
.atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
.atomic_destroy_state = vkms_atomic_crtc_destroy_state,
- .enable_vblank = vkms_enable_vblank,
- .disable_vblank = vkms_disable_vblank,
- .get_vblank_timestamp = vkms_get_vblank_timestamp,
+ DRM_CRTC_VBLANK_TIMER_FUNCS,
.get_crc_sources = vkms_get_crc_sources,
.set_crc_source = vkms_set_crc_source,
.verify_crc_source = vkms_verify_crc_source,
@@ -185,7 +128,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
+ plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane);
WARN_ON(!plane_state);
if (!plane_state->visible)
@@ -201,7 +144,7 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
i = 0;
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
- plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
+ plane_state = drm_atomic_get_new_plane_state(crtc_state->state, plane);
if (!plane_state->visible)
continue;
@@ -213,18 +156,6 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
-static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- drm_crtc_vblank_on(crtc);
-}
-
-static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- drm_crtc_vblank_off(crtc);
-}
-
static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_atomic_state *state)
__acquires(&vkms_output->lock)
@@ -265,8 +196,9 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
.atomic_check = vkms_crtc_atomic_check,
.atomic_begin = vkms_crtc_atomic_begin,
.atomic_flush = vkms_crtc_atomic_flush,
- .atomic_enable = vkms_crtc_atomic_enable,
- .atomic_disable = vkms_crtc_atomic_disable,
+ .atomic_enable = drm_crtc_vblank_atomic_enable,
+ .atomic_disable = drm_crtc_vblank_atomic_disable,
+ .handle_vblank_timeout = vkms_crtc_handle_vblank_timeout,
};
struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index e8472d9b6e3b..dd1402f43773 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -23,11 +23,13 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
#include "vkms_config.h"
+#include "vkms_configfs.h"
#include "vkms_drv.h"
#define DRIVER_NAME "vkms"
@@ -49,6 +51,14 @@ static bool enable_overlay;
module_param_named(enable_overlay, enable_overlay, bool, 0444);
MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support");
+static bool enable_plane_pipeline;
+module_param_named(enable_plane_pipeline, enable_plane_pipeline, bool, 0444);
+MODULE_PARM_DESC(enable_plane_pipeline, "Enable/Disable plane pipeline support");
+
+static bool create_default_dev = true;
+module_param_named(create_default_dev, create_default_dev, bool, 0444);
+MODULE_PARM_DESC(create_default_dev, "Create or not the default VKMS device");
+
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
@@ -146,7 +156,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
return vkms_output_init(vkmsdev);
}
-static int vkms_create(struct vkms_config *config)
+int vkms_create(struct vkms_config *config)
{
int ret;
struct faux_device *fdev;
@@ -214,7 +224,15 @@ static int __init vkms_init(void)
int ret;
struct vkms_config *config;
- config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay);
+ ret = vkms_configfs_register();
+ if (ret)
+ return ret;
+
+ if (!create_default_dev)
+ return 0;
+
+ config = vkms_config_default_create(enable_cursor, enable_writeback,
+ enable_overlay, enable_plane_pipeline);
if (IS_ERR(config))
return PTR_ERR(config);
@@ -229,7 +247,7 @@ static int __init vkms_init(void)
return 0;
}
-static void vkms_destroy(struct vkms_config *config)
+void vkms_destroy(struct vkms_config *config)
{
struct faux_device *fdev;
@@ -240,6 +258,7 @@ static void vkms_destroy(struct vkms_config *config)
fdev = config->dev->faux_dev;
+ drm_colorop_pipeline_destroy(&config->dev->drm);
drm_dev_unregister(&config->dev->drm);
drm_atomic_helper_shutdown(&config->dev->drm);
devres_release_group(&fdev->dev, NULL);
@@ -250,6 +269,8 @@ static void vkms_destroy(struct vkms_config *config)
static void __exit vkms_exit(void)
{
+ vkms_configfs_unregister();
+
if (!default_config)
return;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 8013c31efe3b..0933e4ce0ff0 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -45,6 +45,10 @@ struct vkms_frame_info {
unsigned int rotation;
};
+struct pixel_argb_s32 {
+ s32 a, r, g, b;
+};
+
/**
* struct pixel_argb_u16 - Internal representation of a pixel color.
* @a: Alpha component value, stored in 16 bits, without padding, using
@@ -215,8 +219,6 @@ struct vkms_output {
struct drm_crtc crtc;
struct drm_writeback_connector wb_connector;
struct drm_encoder wb_encoder;
- struct hrtimer vblank_hrtimer;
- ktime_t period_ns;
struct workqueue_struct *composer_workq;
spinlock_t lock;
@@ -227,6 +229,7 @@ struct vkms_output {
};
struct vkms_config;
+struct vkms_config_plane;
/**
* struct vkms_device - Description of a VKMS device
@@ -259,6 +262,26 @@ struct vkms_device {
container_of(target, struct vkms_plane_state, base.base)
/**
+ * vkms_create() - Create a device from a configuration
+ * @config: Config used to configure the new device
+ *
+ * A pointer to the created vkms_device is stored in @config
+ *
+ * Returns:
+ * 0 on success or an error.
+ */
+int vkms_create(struct vkms_config *config);
+
+/**
+ * vkms_destroy() - Destroy a device
+ * @config: Config from which the device was created
+ *
+ * The device is completely removed, but the @config is not freed. It can be
+ * reused or destroyed with vkms_config_destroy().
+ */
+void vkms_destroy(struct vkms_config *config);
+
+/**
* vkms_crtc_init() - Initialize a CRTC for VKMS
* @dev: DRM device associated with the VKMS buffer
* @crtc: uninitialized CRTC device
@@ -280,10 +303,10 @@ int vkms_output_init(struct vkms_device *vkmsdev);
* vkms_plane_init() - Initialize a plane
*
* @vkmsdev: VKMS device containing the plane
- * @type: type of plane to initialize
+ * @plane_cfg: plane configuration
*/
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type);
+ struct vkms_config_plane *plane_cfg);
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
@@ -300,4 +323,7 @@ void vkms_writeback_row(struct vkms_writeback_job *wb, const struct line_buffer
/* Writeback */
int vkms_enable_writeback_connector(struct vkms_device *vkmsdev, struct vkms_output *vkms_out);
+/* Colorops */
+int vkms_initialize_colorops(struct drm_plane *plane);
+
#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 6d0227c6635a..dfb8e13cba87 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -259,16 +259,27 @@ static struct pixel_argb_u16 argb_u16_from_grayu16(u16 gray)
return argb_u16_from_u16161616(0xFFFF, gray, gray, gray);
}
-VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
- const struct conversion_matrix *matrix)
+static struct pixel_argb_u16 argb_u16_from_BGR565(const __le16 *pixel)
+{
+ struct pixel_argb_u16 out_pixel;
+
+ out_pixel = argb_u16_from_RGB565(pixel);
+ swap(out_pixel.r, out_pixel.b);
+
+ return out_pixel;
+}
+
+VISIBLE_IF_KUNIT
+struct pixel_argb_u16 argb_u16_from_yuv161616(const struct conversion_matrix *matrix,
+ u16 y, u16 channel_1, u16 channel_2)
{
u16 r, g, b;
s64 fp_y, fp_channel_1, fp_channel_2;
s64 fp_r, fp_g, fp_b;
- fp_y = drm_int2fixp(((int)y - matrix->y_offset) * 257);
- fp_channel_1 = drm_int2fixp(((int)channel_1 - 128) * 257);
- fp_channel_2 = drm_int2fixp(((int)channel_2 - 128) * 257);
+ fp_y = drm_int2fixp((int)y - matrix->y_offset * 257);
+ fp_channel_1 = drm_int2fixp((int)channel_1 - 128 * 257);
+ fp_channel_2 = drm_int2fixp((int)channel_2 - 128 * 257);
fp_r = drm_fixp_mul(matrix->matrix[0][0], fp_y) +
drm_fixp_mul(matrix->matrix[0][1], fp_channel_1) +
@@ -290,7 +301,65 @@ VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1,
return argb_u16_from_u16161616(0xffff, r, g, b);
}
-EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv888);
+EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv161616);
+
+/**
+ * READ_LINE() - Generic generator for a read_line function which can be used for format with one
+ * plane and a block_h == block_w == 1.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: Temporary pixel name used in the @__VA_ARGS__ parameters
+ * @pixel_type: Used to specify the type you want to cast the pixel pointer
+ * @callback: Callback to call for each pixels. This fonction should take @__VA_ARGS__ as parameter
+ * and return a pixel_argb_u16
+ * __VA_ARGS__: Argument to pass inside the callback. You can use @pixel_name to access current
+ * pixel.
+ */
+#define READ_LINE(function_name, pixel_name, pixel_type, callback, ...) \
+static void function_name(const struct vkms_plane_state *plane, int x_start, \
+ int y_start, enum pixel_read_direction direction, int count, \
+ struct pixel_argb_u16 out_pixel[]) \
+{ \
+ struct pixel_argb_u16 *end = out_pixel + count; \
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); \
+ u8 *src_pixels; \
+ \
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); \
+ \
+ while (out_pixel < end) { \
+ pixel_type *(pixel_name) = (pixel_type *)src_pixels; \
+ *out_pixel = (callback)(__VA_ARGS__); \
+ out_pixel += 1; \
+ src_pixels += step; \
+ } \
+}
+
+/**
+ * READ_LINE_ARGB8888() - Generic generator for ARGB8888 formats.
+ * The pixel type used is u8, so pixel_name[0]..pixel_name[n] are the n components of the pixel.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: temporary pixel to use in @a, @r, @g and @b parameters
+ * @a: alpha value
+ * @r: red value
+ * @g: green value
+ * @b: blue value
+ */
+#define READ_LINE_ARGB8888(function_name, pixel_name, a, r, g, b) \
+ READ_LINE(function_name, pixel_name, u8, argb_u16_from_u8888, a, r, g, b)
+/**
+ * READ_LINE_le16161616() - Generic generator for ARGB16161616 formats.
+ * The pixel type used is u16, so pixel_name[0]..pixel_name[n] are the n components of the pixel.
+ *
+ * @function_name: Function name to generate
+ * @pixel_name: temporary pixel to use in @a, @r, @g and @b parameters
+ * @a: alpha value
+ * @r: red value
+ * @g: green value
+ * @b: blue value
+ */
+#define READ_LINE_le16161616(function_name, pixel_name, a, r, g, b) \
+ READ_LINE(function_name, pixel_name, __le16, argb_u16_from_le16161616, a, r, g, b)
/*
* The following functions are read_line function for each pixel format supported by VKMS.
@@ -378,138 +447,27 @@ static void R4_read_line(const struct vkms_plane_state *plane, int x_start,
Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
}
-static void R8_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- while (out_pixel < end) {
- *out_pixel = argb_u16_from_gray8(*src_pixels);
- src_pixels += step;
- out_pixel += 1;
- }
-}
-
-static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- *out_pixel = argb_u16_from_u8888(px[3], px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- *out_pixel = argb_u16_from_u8888(255, px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void ABGR8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
- enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- u8 *px = (u8 *)src_pixels;
- /* Switch blue and red pixels. */
- *out_pixel = argb_u16_from_u8888(px[3], px[0], px[1], px[2]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- while (out_pixel < end) {
- u16 *px = (u16 *)src_pixels;
- *out_pixel = argb_u16_from_u16161616(px[3], px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
+READ_LINE_ARGB8888(XRGB8888_read_line, px, 0xFF, px[2], px[1], px[0])
+READ_LINE_ARGB8888(XBGR8888_read_line, px, 0xFF, px[0], px[1], px[2])
-static void XRGB16161616_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
-
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
-
- while (out_pixel < end) {
- __le16 *px = (__le16 *)src_pixels;
- *out_pixel = argb_u16_from_le16161616(cpu_to_le16(0xFFFF), px[2], px[1], px[0]);
- out_pixel += 1;
- src_pixels += step;
- }
-}
-
-static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- struct pixel_argb_u16 *end = out_pixel + count;
- u8 *src_pixels;
+READ_LINE_ARGB8888(ARGB8888_read_line, px, px[3], px[2], px[1], px[0])
+READ_LINE_ARGB8888(ABGR8888_read_line, px, px[3], px[0], px[1], px[2])
+READ_LINE_ARGB8888(RGBA8888_read_line, px, px[0], px[3], px[2], px[1])
+READ_LINE_ARGB8888(BGRA8888_read_line, px, px[0], px[1], px[2], px[3])
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+READ_LINE_ARGB8888(RGB888_read_line, px, 0xFF, px[2], px[1], px[0])
+READ_LINE_ARGB8888(BGR888_read_line, px, 0xFF, px[0], px[1], px[2])
- int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+READ_LINE_le16161616(ARGB16161616_read_line, px, px[3], px[2], px[1], px[0])
+READ_LINE_le16161616(ABGR16161616_read_line, px, px[3], px[0], px[1], px[2])
+READ_LINE_le16161616(XRGB16161616_read_line, px, cpu_to_le16(0xFFFF), px[2], px[1], px[0])
+READ_LINE_le16161616(XBGR16161616_read_line, px, cpu_to_le16(0xFFFF), px[0], px[1], px[2])
- while (out_pixel < end) {
- __le16 *px = (__le16 *)src_pixels;
+READ_LINE(RGB565_read_line, px, __le16, argb_u16_from_RGB565, px)
+READ_LINE(BGR565_read_line, px, __le16, argb_u16_from_BGR565, px)
- *out_pixel = argb_u16_from_RGB565(px);
- out_pixel += 1;
- src_pixels += step;
- }
-}
+READ_LINE(R8_read_line, px, u8, argb_u16_from_gray8, *px)
/*
* This callback can be used for YUV formats where U and V values are
@@ -521,35 +479,57 @@ static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
* - Convert YUV and YVU with the same function (a column swap is needed when setting up
* plane->conversion_matrix)
*/
-static void semi_planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
- int y_start, enum pixel_read_direction direction, int count,
- struct pixel_argb_u16 out_pixel[])
-{
- u8 *y_plane;
- u8 *uv_plane;
-
- packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
- &y_plane);
- packed_pixels_addr_1x1(plane->frame_info,
- x_start / plane->frame_info->fb->format->hsub,
- y_start / plane->frame_info->fb->format->vsub, 1,
- &uv_plane);
- int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
- int step_uv = get_block_step_bytes(plane->frame_info->fb, direction, 1);
- int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
- int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
- const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
-
- for (int i = 0; i < count; i++) {
- *out_pixel = argb_u16_from_yuv888(y_plane[0], uv_plane[0], uv_plane[1],
- conversion_matrix);
- out_pixel += 1;
- y_plane += step_y;
- if ((i + subsampling_offset + 1) % subsampling == 0)
- uv_plane += step_uv;
- }
-}
+/**
+ * READ_LINE_YUV_SEMIPLANAR() - Generic generator for a read_line function which can be used for yuv
+ * formats with two planes and block_w == block_h == 1.
+ *
+ * @function_name: Function name to generate
+ * @pixel_1_name: temporary pixel name for the first plane used in the @__VA_ARGS__ parameters
+ * @pixel_2_name: temporary pixel name for the second plane used in the @__VA_ARGS__ parameters
+ * @pixel_1_type: Used to specify the type you want to cast the pixel pointer on the plane 1
+ * @pixel_2_type: Used to specify the type you want to cast the pixel pointer on the plane 2
+ * @callback: Callback to call for each pixels. This function should take
+ * (struct conversion_matrix*, @__VA_ARGS__) as parameter and return a pixel_argb_u16
+ * __VA_ARGS__: Argument to pass inside the callback. You can use @pixel_1_name and @pixel_2_name
+ * to access current pixel values
+ */
+#define READ_LINE_YUV_SEMIPLANAR(function_name, pixel_1_name, pixel_2_name, pixel_1_type, \
+ pixel_2_type, callback, ...) \
+static void function_name(const struct vkms_plane_state *plane, int x_start, \
+ int y_start, enum pixel_read_direction direction, int count, \
+ struct pixel_argb_u16 out_pixel[]) \
+{ \
+ u8 *plane_1; \
+ u8 *plane_2; \
+ \
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, \
+ &plane_1); \
+ packed_pixels_addr_1x1(plane->frame_info, \
+ x_start / plane->frame_info->fb->format->hsub, \
+ y_start / plane->frame_info->fb->format->vsub, 1, \
+ &plane_2); \
+ int step_1 = get_block_step_bytes(plane->frame_info->fb, direction, 0); \
+ int step_2 = get_block_step_bytes(plane->frame_info->fb, direction, 1); \
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction); \
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start); \
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix; \
+ \
+ for (int i = 0; i < count; i++) { \
+ pixel_1_type *(pixel_1_name) = (pixel_1_type *)plane_1; \
+ pixel_2_type *(pixel_2_name) = (pixel_2_type *)plane_2; \
+ *out_pixel = (callback)(conversion_matrix, __VA_ARGS__); \
+ out_pixel += 1; \
+ plane_1 += step_1; \
+ if ((i + subsampling_offset + 1) % subsampling == 0) \
+ plane_2 += step_2; \
+ } \
+}
+
+READ_LINE_YUV_SEMIPLANAR(YUV888_semiplanar_read_line, y, uv, u8, u8, argb_u16_from_yuv161616,
+ y[0] * 257, uv[0] * 257, uv[1] * 257)
+READ_LINE_YUV_SEMIPLANAR(YUV161616_semiplanar_read_line, y, uv, u16, u16, argb_u16_from_yuv161616,
+ y[0], uv[0], uv[1])
/*
* This callback can be used for YUV format where each color component is
* stored in a different plane (often called planar formats). It will
@@ -586,8 +566,9 @@ static void planar_yuv_read_line(const struct vkms_plane_state *plane, int x_sta
const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
for (int i = 0; i < count; i++) {
- *out_pixel = argb_u16_from_yuv888(*y_plane, *channel_1_plane, *channel_2_plane,
- conversion_matrix);
+ *out_pixel = argb_u16_from_yuv161616(conversion_matrix,
+ *y_plane * 257, *channel_1_plane * 257,
+ *channel_2_plane * 257);
out_pixel += 1;
y_plane += step_y;
if ((i + subsampling_offset + 1) % subsampling == 0) {
@@ -712,23 +693,43 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
switch (format) {
case DRM_FORMAT_ARGB8888:
return &ARGB8888_read_line;
- case DRM_FORMAT_XRGB8888:
- return &XRGB8888_read_line;
case DRM_FORMAT_ABGR8888:
return &ABGR8888_read_line;
+ case DRM_FORMAT_BGRA8888:
+ return &BGRA8888_read_line;
+ case DRM_FORMAT_RGBA8888:
+ return &RGBA8888_read_line;
+ case DRM_FORMAT_XRGB8888:
+ return &XRGB8888_read_line;
+ case DRM_FORMAT_XBGR8888:
+ return &XBGR8888_read_line;
+ case DRM_FORMAT_RGB888:
+ return &RGB888_read_line;
+ case DRM_FORMAT_BGR888:
+ return &BGR888_read_line;
case DRM_FORMAT_ARGB16161616:
return &ARGB16161616_read_line;
+ case DRM_FORMAT_ABGR16161616:
+ return &ABGR16161616_read_line;
case DRM_FORMAT_XRGB16161616:
return &XRGB16161616_read_line;
+ case DRM_FORMAT_XBGR16161616:
+ return &XBGR16161616_read_line;
case DRM_FORMAT_RGB565:
return &RGB565_read_line;
+ case DRM_FORMAT_BGR565:
+ return &BGR565_read_line;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV42:
- return &semi_planar_yuv_read_line;
+ return &YUV888_semiplanar_read_line;
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return &YUV161616_semiplanar_read_line;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YUV444:
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index b4fe62ab9c65..eeb208cdd6b1 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -14,8 +14,8 @@ void get_conversion_matrix_to_argb_u16(u32 format, enum drm_color_encoding encod
struct conversion_matrix *matrix);
#if IS_ENABLED(CONFIG_KUNIT)
-struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
- const struct conversion_matrix *matrix);
+struct pixel_argb_u16 argb_u16_from_yuv161616(const struct conversion_matrix *matrix,
+ u16 y, u16 channel_1, u16 channel_2);
#endif
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_luts.c b/drivers/gpu/drm/vkms/vkms_luts.c
new file mode 100644
index 000000000000..82cb792f10d8
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_luts.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <drm/drm_mode.h>
+
+#include "vkms_drv.h"
+#include "vkms_luts.h"
+
+/*
+ * These luts were generated with a LUT generated based on
+ * skia's transfer function code. The LUT generator can be
+ * found at
+ * https://gitlab.freedesktop.org/hwentland/lutgen
+ */
+
+static struct drm_color_lut linear_array[LUT_SIZE] = {
+ { 0x0, 0x0, 0x0, 0 },
+ { 0x101, 0x101, 0x101, 0 },
+ { 0x202, 0x202, 0x202, 0 },
+ { 0x303, 0x303, 0x303, 0 },
+ { 0x404, 0x404, 0x404, 0 },
+ { 0x505, 0x505, 0x505, 0 },
+ { 0x606, 0x606, 0x606, 0 },
+ { 0x707, 0x707, 0x707, 0 },
+ { 0x808, 0x808, 0x808, 0 },
+ { 0x909, 0x909, 0x909, 0 },
+ { 0xa0a, 0xa0a, 0xa0a, 0 },
+ { 0xb0b, 0xb0b, 0xb0b, 0 },
+ { 0xc0c, 0xc0c, 0xc0c, 0 },
+ { 0xd0d, 0xd0d, 0xd0d, 0 },
+ { 0xe0e, 0xe0e, 0xe0e, 0 },
+ { 0xf0f, 0xf0f, 0xf0f, 0 },
+ { 0x1010, 0x1010, 0x1010, 0 },
+ { 0x1111, 0x1111, 0x1111, 0 },
+ { 0x1212, 0x1212, 0x1212, 0 },
+ { 0x1313, 0x1313, 0x1313, 0 },
+ { 0x1414, 0x1414, 0x1414, 0 },
+ { 0x1515, 0x1515, 0x1515, 0 },
+ { 0x1616, 0x1616, 0x1616, 0 },
+ { 0x1717, 0x1717, 0x1717, 0 },
+ { 0x1818, 0x1818, 0x1818, 0 },
+ { 0x1919, 0x1919, 0x1919, 0 },
+ { 0x1a1a, 0x1a1a, 0x1a1a, 0 },
+ { 0x1b1b, 0x1b1b, 0x1b1b, 0 },
+ { 0x1c1c, 0x1c1c, 0x1c1c, 0 },
+ { 0x1d1d, 0x1d1d, 0x1d1d, 0 },
+ { 0x1e1e, 0x1e1e, 0x1e1e, 0 },
+ { 0x1f1f, 0x1f1f, 0x1f1f, 0 },
+ { 0x2020, 0x2020, 0x2020, 0 },
+ { 0x2121, 0x2121, 0x2121, 0 },
+ { 0x2222, 0x2222, 0x2222, 0 },
+ { 0x2323, 0x2323, 0x2323, 0 },
+ { 0x2424, 0x2424, 0x2424, 0 },
+ { 0x2525, 0x2525, 0x2525, 0 },
+ { 0x2626, 0x2626, 0x2626, 0 },
+ { 0x2727, 0x2727, 0x2727, 0 },
+ { 0x2828, 0x2828, 0x2828, 0 },
+ { 0x2929, 0x2929, 0x2929, 0 },
+ { 0x2a2a, 0x2a2a, 0x2a2a, 0 },
+ { 0x2b2b, 0x2b2b, 0x2b2b, 0 },
+ { 0x2c2c, 0x2c2c, 0x2c2c, 0 },
+ { 0x2d2d, 0x2d2d, 0x2d2d, 0 },
+ { 0x2e2e, 0x2e2e, 0x2e2e, 0 },
+ { 0x2f2f, 0x2f2f, 0x2f2f, 0 },
+ { 0x3030, 0x3030, 0x3030, 0 },
+ { 0x3131, 0x3131, 0x3131, 0 },
+ { 0x3232, 0x3232, 0x3232, 0 },
+ { 0x3333, 0x3333, 0x3333, 0 },
+ { 0x3434, 0x3434, 0x3434, 0 },
+ { 0x3535, 0x3535, 0x3535, 0 },
+ { 0x3636, 0x3636, 0x3636, 0 },
+ { 0x3737, 0x3737, 0x3737, 0 },
+ { 0x3838, 0x3838, 0x3838, 0 },
+ { 0x3939, 0x3939, 0x3939, 0 },
+ { 0x3a3a, 0x3a3a, 0x3a3a, 0 },
+ { 0x3b3b, 0x3b3b, 0x3b3b, 0 },
+ { 0x3c3c, 0x3c3c, 0x3c3c, 0 },
+ { 0x3d3d, 0x3d3d, 0x3d3d, 0 },
+ { 0x3e3e, 0x3e3e, 0x3e3e, 0 },
+ { 0x3f3f, 0x3f3f, 0x3f3f, 0 },
+ { 0x4040, 0x4040, 0x4040, 0 },
+ { 0x4141, 0x4141, 0x4141, 0 },
+ { 0x4242, 0x4242, 0x4242, 0 },
+ { 0x4343, 0x4343, 0x4343, 0 },
+ { 0x4444, 0x4444, 0x4444, 0 },
+ { 0x4545, 0x4545, 0x4545, 0 },
+ { 0x4646, 0x4646, 0x4646, 0 },
+ { 0x4747, 0x4747, 0x4747, 0 },
+ { 0x4848, 0x4848, 0x4848, 0 },
+ { 0x4949, 0x4949, 0x4949, 0 },
+ { 0x4a4a, 0x4a4a, 0x4a4a, 0 },
+ { 0x4b4b, 0x4b4b, 0x4b4b, 0 },
+ { 0x4c4c, 0x4c4c, 0x4c4c, 0 },
+ { 0x4d4d, 0x4d4d, 0x4d4d, 0 },
+ { 0x4e4e, 0x4e4e, 0x4e4e, 0 },
+ { 0x4f4f, 0x4f4f, 0x4f4f, 0 },
+ { 0x5050, 0x5050, 0x5050, 0 },
+ { 0x5151, 0x5151, 0x5151, 0 },
+ { 0x5252, 0x5252, 0x5252, 0 },
+ { 0x5353, 0x5353, 0x5353, 0 },
+ { 0x5454, 0x5454, 0x5454, 0 },
+ { 0x5555, 0x5555, 0x5555, 0 },
+ { 0x5656, 0x5656, 0x5656, 0 },
+ { 0x5757, 0x5757, 0x5757, 0 },
+ { 0x5858, 0x5858, 0x5858, 0 },
+ { 0x5959, 0x5959, 0x5959, 0 },
+ { 0x5a5a, 0x5a5a, 0x5a5a, 0 },
+ { 0x5b5b, 0x5b5b, 0x5b5b, 0 },
+ { 0x5c5c, 0x5c5c, 0x5c5c, 0 },
+ { 0x5d5d, 0x5d5d, 0x5d5d, 0 },
+ { 0x5e5e, 0x5e5e, 0x5e5e, 0 },
+ { 0x5f5f, 0x5f5f, 0x5f5f, 0 },
+ { 0x6060, 0x6060, 0x6060, 0 },
+ { 0x6161, 0x6161, 0x6161, 0 },
+ { 0x6262, 0x6262, 0x6262, 0 },
+ { 0x6363, 0x6363, 0x6363, 0 },
+ { 0x6464, 0x6464, 0x6464, 0 },
+ { 0x6565, 0x6565, 0x6565, 0 },
+ { 0x6666, 0x6666, 0x6666, 0 },
+ { 0x6767, 0x6767, 0x6767, 0 },
+ { 0x6868, 0x6868, 0x6868, 0 },
+ { 0x6969, 0x6969, 0x6969, 0 },
+ { 0x6a6a, 0x6a6a, 0x6a6a, 0 },
+ { 0x6b6b, 0x6b6b, 0x6b6b, 0 },
+ { 0x6c6c, 0x6c6c, 0x6c6c, 0 },
+ { 0x6d6d, 0x6d6d, 0x6d6d, 0 },
+ { 0x6e6e, 0x6e6e, 0x6e6e, 0 },
+ { 0x6f6f, 0x6f6f, 0x6f6f, 0 },
+ { 0x7070, 0x7070, 0x7070, 0 },
+ { 0x7171, 0x7171, 0x7171, 0 },
+ { 0x7272, 0x7272, 0x7272, 0 },
+ { 0x7373, 0x7373, 0x7373, 0 },
+ { 0x7474, 0x7474, 0x7474, 0 },
+ { 0x7575, 0x7575, 0x7575, 0 },
+ { 0x7676, 0x7676, 0x7676, 0 },
+ { 0x7777, 0x7777, 0x7777, 0 },
+ { 0x7878, 0x7878, 0x7878, 0 },
+ { 0x7979, 0x7979, 0x7979, 0 },
+ { 0x7a7a, 0x7a7a, 0x7a7a, 0 },
+ { 0x7b7b, 0x7b7b, 0x7b7b, 0 },
+ { 0x7c7c, 0x7c7c, 0x7c7c, 0 },
+ { 0x7d7d, 0x7d7d, 0x7d7d, 0 },
+ { 0x7e7e, 0x7e7e, 0x7e7e, 0 },
+ { 0x7f7f, 0x7f7f, 0x7f7f, 0 },
+ { 0x8080, 0x8080, 0x8080, 0 },
+ { 0x8181, 0x8181, 0x8181, 0 },
+ { 0x8282, 0x8282, 0x8282, 0 },
+ { 0x8383, 0x8383, 0x8383, 0 },
+ { 0x8484, 0x8484, 0x8484, 0 },
+ { 0x8585, 0x8585, 0x8585, 0 },
+ { 0x8686, 0x8686, 0x8686, 0 },
+ { 0x8787, 0x8787, 0x8787, 0 },
+ { 0x8888, 0x8888, 0x8888, 0 },
+ { 0x8989, 0x8989, 0x8989, 0 },
+ { 0x8a8a, 0x8a8a, 0x8a8a, 0 },
+ { 0x8b8b, 0x8b8b, 0x8b8b, 0 },
+ { 0x8c8c, 0x8c8c, 0x8c8c, 0 },
+ { 0x8d8d, 0x8d8d, 0x8d8d, 0 },
+ { 0x8e8e, 0x8e8e, 0x8e8e, 0 },
+ { 0x8f8f, 0x8f8f, 0x8f8f, 0 },
+ { 0x9090, 0x9090, 0x9090, 0 },
+ { 0x9191, 0x9191, 0x9191, 0 },
+ { 0x9292, 0x9292, 0x9292, 0 },
+ { 0x9393, 0x9393, 0x9393, 0 },
+ { 0x9494, 0x9494, 0x9494, 0 },
+ { 0x9595, 0x9595, 0x9595, 0 },
+ { 0x9696, 0x9696, 0x9696, 0 },
+ { 0x9797, 0x9797, 0x9797, 0 },
+ { 0x9898, 0x9898, 0x9898, 0 },
+ { 0x9999, 0x9999, 0x9999, 0 },
+ { 0x9a9a, 0x9a9a, 0x9a9a, 0 },
+ { 0x9b9b, 0x9b9b, 0x9b9b, 0 },
+ { 0x9c9c, 0x9c9c, 0x9c9c, 0 },
+ { 0x9d9d, 0x9d9d, 0x9d9d, 0 },
+ { 0x9e9e, 0x9e9e, 0x9e9e, 0 },
+ { 0x9f9f, 0x9f9f, 0x9f9f, 0 },
+ { 0xa0a0, 0xa0a0, 0xa0a0, 0 },
+ { 0xa1a1, 0xa1a1, 0xa1a1, 0 },
+ { 0xa2a2, 0xa2a2, 0xa2a2, 0 },
+ { 0xa3a3, 0xa3a3, 0xa3a3, 0 },
+ { 0xa4a4, 0xa4a4, 0xa4a4, 0 },
+ { 0xa5a5, 0xa5a5, 0xa5a5, 0 },
+ { 0xa6a6, 0xa6a6, 0xa6a6, 0 },
+ { 0xa7a7, 0xa7a7, 0xa7a7, 0 },
+ { 0xa8a8, 0xa8a8, 0xa8a8, 0 },
+ { 0xa9a9, 0xa9a9, 0xa9a9, 0 },
+ { 0xaaaa, 0xaaaa, 0xaaaa, 0 },
+ { 0xabab, 0xabab, 0xabab, 0 },
+ { 0xacac, 0xacac, 0xacac, 0 },
+ { 0xadad, 0xadad, 0xadad, 0 },
+ { 0xaeae, 0xaeae, 0xaeae, 0 },
+ { 0xafaf, 0xafaf, 0xafaf, 0 },
+ { 0xb0b0, 0xb0b0, 0xb0b0, 0 },
+ { 0xb1b1, 0xb1b1, 0xb1b1, 0 },
+ { 0xb2b2, 0xb2b2, 0xb2b2, 0 },
+ { 0xb3b3, 0xb3b3, 0xb3b3, 0 },
+ { 0xb4b4, 0xb4b4, 0xb4b4, 0 },
+ { 0xb5b5, 0xb5b5, 0xb5b5, 0 },
+ { 0xb6b6, 0xb6b6, 0xb6b6, 0 },
+ { 0xb7b7, 0xb7b7, 0xb7b7, 0 },
+ { 0xb8b8, 0xb8b8, 0xb8b8, 0 },
+ { 0xb9b9, 0xb9b9, 0xb9b9, 0 },
+ { 0xbaba, 0xbaba, 0xbaba, 0 },
+ { 0xbbbb, 0xbbbb, 0xbbbb, 0 },
+ { 0xbcbc, 0xbcbc, 0xbcbc, 0 },
+ { 0xbdbd, 0xbdbd, 0xbdbd, 0 },
+ { 0xbebe, 0xbebe, 0xbebe, 0 },
+ { 0xbfbf, 0xbfbf, 0xbfbf, 0 },
+ { 0xc0c0, 0xc0c0, 0xc0c0, 0 },
+ { 0xc1c1, 0xc1c1, 0xc1c1, 0 },
+ { 0xc2c2, 0xc2c2, 0xc2c2, 0 },
+ { 0xc3c3, 0xc3c3, 0xc3c3, 0 },
+ { 0xc4c4, 0xc4c4, 0xc4c4, 0 },
+ { 0xc5c5, 0xc5c5, 0xc5c5, 0 },
+ { 0xc6c6, 0xc6c6, 0xc6c6, 0 },
+ { 0xc7c7, 0xc7c7, 0xc7c7, 0 },
+ { 0xc8c8, 0xc8c8, 0xc8c8, 0 },
+ { 0xc9c9, 0xc9c9, 0xc9c9, 0 },
+ { 0xcaca, 0xcaca, 0xcaca, 0 },
+ { 0xcbcb, 0xcbcb, 0xcbcb, 0 },
+ { 0xcccc, 0xcccc, 0xcccc, 0 },
+ { 0xcdcd, 0xcdcd, 0xcdcd, 0 },
+ { 0xcece, 0xcece, 0xcece, 0 },
+ { 0xcfcf, 0xcfcf, 0xcfcf, 0 },
+ { 0xd0d0, 0xd0d0, 0xd0d0, 0 },
+ { 0xd1d1, 0xd1d1, 0xd1d1, 0 },
+ { 0xd2d2, 0xd2d2, 0xd2d2, 0 },
+ { 0xd3d3, 0xd3d3, 0xd3d3, 0 },
+ { 0xd4d4, 0xd4d4, 0xd4d4, 0 },
+ { 0xd5d5, 0xd5d5, 0xd5d5, 0 },
+ { 0xd6d6, 0xd6d6, 0xd6d6, 0 },
+ { 0xd7d7, 0xd7d7, 0xd7d7, 0 },
+ { 0xd8d8, 0xd8d8, 0xd8d8, 0 },
+ { 0xd9d9, 0xd9d9, 0xd9d9, 0 },
+ { 0xdada, 0xdada, 0xdada, 0 },
+ { 0xdbdb, 0xdbdb, 0xdbdb, 0 },
+ { 0xdcdc, 0xdcdc, 0xdcdc, 0 },
+ { 0xdddd, 0xdddd, 0xdddd, 0 },
+ { 0xdede, 0xdede, 0xdede, 0 },
+ { 0xdfdf, 0xdfdf, 0xdfdf, 0 },
+ { 0xe0e0, 0xe0e0, 0xe0e0, 0 },
+ { 0xe1e1, 0xe1e1, 0xe1e1, 0 },
+ { 0xe2e2, 0xe2e2, 0xe2e2, 0 },
+ { 0xe3e3, 0xe3e3, 0xe3e3, 0 },
+ { 0xe4e4, 0xe4e4, 0xe4e4, 0 },
+ { 0xe5e5, 0xe5e5, 0xe5e5, 0 },
+ { 0xe6e6, 0xe6e6, 0xe6e6, 0 },
+ { 0xe7e7, 0xe7e7, 0xe7e7, 0 },
+ { 0xe8e8, 0xe8e8, 0xe8e8, 0 },
+ { 0xe9e9, 0xe9e9, 0xe9e9, 0 },
+ { 0xeaea, 0xeaea, 0xeaea, 0 },
+ { 0xebeb, 0xebeb, 0xebeb, 0 },
+ { 0xecec, 0xecec, 0xecec, 0 },
+ { 0xeded, 0xeded, 0xeded, 0 },
+ { 0xeeee, 0xeeee, 0xeeee, 0 },
+ { 0xefef, 0xefef, 0xefef, 0 },
+ { 0xf0f0, 0xf0f0, 0xf0f0, 0 },
+ { 0xf1f1, 0xf1f1, 0xf1f1, 0 },
+ { 0xf2f2, 0xf2f2, 0xf2f2, 0 },
+ { 0xf3f3, 0xf3f3, 0xf3f3, 0 },
+ { 0xf4f4, 0xf4f4, 0xf4f4, 0 },
+ { 0xf5f5, 0xf5f5, 0xf5f5, 0 },
+ { 0xf6f6, 0xf6f6, 0xf6f6, 0 },
+ { 0xf7f7, 0xf7f7, 0xf7f7, 0 },
+ { 0xf8f8, 0xf8f8, 0xf8f8, 0 },
+ { 0xf9f9, 0xf9f9, 0xf9f9, 0 },
+ { 0xfafa, 0xfafa, 0xfafa, 0 },
+ { 0xfbfb, 0xfbfb, 0xfbfb, 0 },
+ { 0xfcfc, 0xfcfc, 0xfcfc, 0 },
+ { 0xfdfd, 0xfdfd, 0xfdfd, 0 },
+ { 0xfefe, 0xfefe, 0xfefe, 0 },
+ { 0xffff, 0xffff, 0xffff, 0 },
+};
+
+const struct vkms_color_lut linear_eotf = {
+ .base = linear_array,
+ .lut_length = LUT_SIZE,
+ .channel_value2index_ratio = 0xff00ffll
+};
+EXPORT_SYMBOL(linear_eotf);
+
+static struct drm_color_lut srgb_array[LUT_SIZE] = {
+ { 0x0, 0x0, 0x0, 0 },
+ { 0x13, 0x13, 0x13, 0 },
+ { 0x27, 0x27, 0x27, 0 },
+ { 0x3b, 0x3b, 0x3b, 0 },
+ { 0x4f, 0x4f, 0x4f, 0 },
+ { 0x63, 0x63, 0x63, 0 },
+ { 0x77, 0x77, 0x77, 0 },
+ { 0x8b, 0x8b, 0x8b, 0 },
+ { 0x9f, 0x9f, 0x9f, 0 },
+ { 0xb3, 0xb3, 0xb3, 0 },
+ { 0xc6, 0xc6, 0xc6, 0 },
+ { 0xdb, 0xdb, 0xdb, 0 },
+ { 0xf0, 0xf0, 0xf0, 0 },
+ { 0x107, 0x107, 0x107, 0 },
+ { 0x11f, 0x11f, 0x11f, 0 },
+ { 0x139, 0x139, 0x139, 0 },
+ { 0x153, 0x153, 0x153, 0 },
+ { 0x16f, 0x16f, 0x16f, 0 },
+ { 0x18c, 0x18c, 0x18c, 0 },
+ { 0x1aa, 0x1aa, 0x1aa, 0 },
+ { 0x1ca, 0x1ca, 0x1ca, 0 },
+ { 0x1eb, 0x1eb, 0x1eb, 0 },
+ { 0x20d, 0x20d, 0x20d, 0 },
+ { 0x231, 0x231, 0x231, 0 },
+ { 0x256, 0x256, 0x256, 0 },
+ { 0x27d, 0x27d, 0x27d, 0 },
+ { 0x2a4, 0x2a4, 0x2a4, 0 },
+ { 0x2ce, 0x2ce, 0x2ce, 0 },
+ { 0x2f9, 0x2f9, 0x2f9, 0 },
+ { 0x325, 0x325, 0x325, 0 },
+ { 0x352, 0x352, 0x352, 0 },
+ { 0x381, 0x381, 0x381, 0 },
+ { 0x3b2, 0x3b2, 0x3b2, 0 },
+ { 0x3e4, 0x3e4, 0x3e4, 0 },
+ { 0x418, 0x418, 0x418, 0 },
+ { 0x44d, 0x44d, 0x44d, 0 },
+ { 0x484, 0x484, 0x484, 0 },
+ { 0x4bc, 0x4bc, 0x4bc, 0 },
+ { 0x4f6, 0x4f6, 0x4f6, 0 },
+ { 0x531, 0x531, 0x531, 0 },
+ { 0x56e, 0x56e, 0x56e, 0 },
+ { 0x5ad, 0x5ad, 0x5ad, 0 },
+ { 0x5ed, 0x5ed, 0x5ed, 0 },
+ { 0x62f, 0x62f, 0x62f, 0 },
+ { 0x672, 0x672, 0x672, 0 },
+ { 0x6b7, 0x6b7, 0x6b7, 0 },
+ { 0x6fe, 0x6fe, 0x6fe, 0 },
+ { 0x746, 0x746, 0x746, 0 },
+ { 0x791, 0x791, 0x791, 0 },
+ { 0x7dc, 0x7dc, 0x7dc, 0 },
+ { 0x82a, 0x82a, 0x82a, 0 },
+ { 0x879, 0x879, 0x879, 0 },
+ { 0x8ca, 0x8ca, 0x8ca, 0 },
+ { 0x91d, 0x91d, 0x91d, 0 },
+ { 0x971, 0x971, 0x971, 0 },
+ { 0x9c7, 0x9c7, 0x9c7, 0 },
+ { 0xa1f, 0xa1f, 0xa1f, 0 },
+ { 0xa79, 0xa79, 0xa79, 0 },
+ { 0xad4, 0xad4, 0xad4, 0 },
+ { 0xb32, 0xb32, 0xb32, 0 },
+ { 0xb91, 0xb91, 0xb91, 0 },
+ { 0xbf2, 0xbf2, 0xbf2, 0 },
+ { 0xc54, 0xc54, 0xc54, 0 },
+ { 0xcb9, 0xcb9, 0xcb9, 0 },
+ { 0xd1f, 0xd1f, 0xd1f, 0 },
+ { 0xd88, 0xd88, 0xd88, 0 },
+ { 0xdf2, 0xdf2, 0xdf2, 0 },
+ { 0xe5e, 0xe5e, 0xe5e, 0 },
+ { 0xecc, 0xecc, 0xecc, 0 },
+ { 0xf3c, 0xf3c, 0xf3c, 0 },
+ { 0xfad, 0xfad, 0xfad, 0 },
+ { 0x1021, 0x1021, 0x1021, 0 },
+ { 0x1096, 0x1096, 0x1096, 0 },
+ { 0x110e, 0x110e, 0x110e, 0 },
+ { 0x1187, 0x1187, 0x1187, 0 },
+ { 0x1203, 0x1203, 0x1203, 0 },
+ { 0x1280, 0x1280, 0x1280, 0 },
+ { 0x12ff, 0x12ff, 0x12ff, 0 },
+ { 0x1380, 0x1380, 0x1380, 0 },
+ { 0x1404, 0x1404, 0x1404, 0 },
+ { 0x1489, 0x1489, 0x1489, 0 },
+ { 0x1510, 0x1510, 0x1510, 0 },
+ { 0x1599, 0x1599, 0x1599, 0 },
+ { 0x1624, 0x1624, 0x1624, 0 },
+ { 0x16b2, 0x16b2, 0x16b2, 0 },
+ { 0x1741, 0x1741, 0x1741, 0 },
+ { 0x17d2, 0x17d2, 0x17d2, 0 },
+ { 0x1865, 0x1865, 0x1865, 0 },
+ { 0x18fb, 0x18fb, 0x18fb, 0 },
+ { 0x1992, 0x1992, 0x1992, 0 },
+ { 0x1a2c, 0x1a2c, 0x1a2c, 0 },
+ { 0x1ac8, 0x1ac8, 0x1ac8, 0 },
+ { 0x1b65, 0x1b65, 0x1b65, 0 },
+ { 0x1c05, 0x1c05, 0x1c05, 0 },
+ { 0x1ca7, 0x1ca7, 0x1ca7, 0 },
+ { 0x1d4b, 0x1d4b, 0x1d4b, 0 },
+ { 0x1df1, 0x1df1, 0x1df1, 0 },
+ { 0x1e99, 0x1e99, 0x1e99, 0 },
+ { 0x1f44, 0x1f44, 0x1f44, 0 },
+ { 0x1ff0, 0x1ff0, 0x1ff0, 0 },
+ { 0x209f, 0x209f, 0x209f, 0 },
+ { 0x2150, 0x2150, 0x2150, 0 },
+ { 0x2203, 0x2203, 0x2203, 0 },
+ { 0x22b8, 0x22b8, 0x22b8, 0 },
+ { 0x2370, 0x2370, 0x2370, 0 },
+ { 0x2429, 0x2429, 0x2429, 0 },
+ { 0x24e5, 0x24e5, 0x24e5, 0 },
+ { 0x25a3, 0x25a3, 0x25a3, 0 },
+ { 0x2663, 0x2663, 0x2663, 0 },
+ { 0x2726, 0x2726, 0x2726, 0 },
+ { 0x27ea, 0x27ea, 0x27ea, 0 },
+ { 0x28b1, 0x28b1, 0x28b1, 0 },
+ { 0x297a, 0x297a, 0x297a, 0 },
+ { 0x2a45, 0x2a45, 0x2a45, 0 },
+ { 0x2b13, 0x2b13, 0x2b13, 0 },
+ { 0x2be3, 0x2be3, 0x2be3, 0 },
+ { 0x2cb5, 0x2cb5, 0x2cb5, 0 },
+ { 0x2d89, 0x2d89, 0x2d89, 0 },
+ { 0x2e60, 0x2e60, 0x2e60, 0 },
+ { 0x2f39, 0x2f39, 0x2f39, 0 },
+ { 0x3014, 0x3014, 0x3014, 0 },
+ { 0x30f2, 0x30f2, 0x30f2, 0 },
+ { 0x31d2, 0x31d2, 0x31d2, 0 },
+ { 0x32b4, 0x32b4, 0x32b4, 0 },
+ { 0x3398, 0x3398, 0x3398, 0 },
+ { 0x347f, 0x347f, 0x347f, 0 },
+ { 0x3569, 0x3569, 0x3569, 0 },
+ { 0x3654, 0x3654, 0x3654, 0 },
+ { 0x3742, 0x3742, 0x3742, 0 },
+ { 0x3832, 0x3832, 0x3832, 0 },
+ { 0x3925, 0x3925, 0x3925, 0 },
+ { 0x3a1a, 0x3a1a, 0x3a1a, 0 },
+ { 0x3b11, 0x3b11, 0x3b11, 0 },
+ { 0x3c0b, 0x3c0b, 0x3c0b, 0 },
+ { 0x3d07, 0x3d07, 0x3d07, 0 },
+ { 0x3e05, 0x3e05, 0x3e05, 0 },
+ { 0x3f06, 0x3f06, 0x3f06, 0 },
+ { 0x400a, 0x400a, 0x400a, 0 },
+ { 0x410f, 0x410f, 0x410f, 0 },
+ { 0x4218, 0x4218, 0x4218, 0 },
+ { 0x4322, 0x4322, 0x4322, 0 },
+ { 0x442f, 0x442f, 0x442f, 0 },
+ { 0x453f, 0x453f, 0x453f, 0 },
+ { 0x4650, 0x4650, 0x4650, 0 },
+ { 0x4765, 0x4765, 0x4765, 0 },
+ { 0x487c, 0x487c, 0x487c, 0 },
+ { 0x4995, 0x4995, 0x4995, 0 },
+ { 0x4ab1, 0x4ab1, 0x4ab1, 0 },
+ { 0x4bcf, 0x4bcf, 0x4bcf, 0 },
+ { 0x4cf0, 0x4cf0, 0x4cf0, 0 },
+ { 0x4e13, 0x4e13, 0x4e13, 0 },
+ { 0x4f39, 0x4f39, 0x4f39, 0 },
+ { 0x5061, 0x5061, 0x5061, 0 },
+ { 0x518b, 0x518b, 0x518b, 0 },
+ { 0x52b9, 0x52b9, 0x52b9, 0 },
+ { 0x53e8, 0x53e8, 0x53e8, 0 },
+ { 0x551b, 0x551b, 0x551b, 0 },
+ { 0x5650, 0x5650, 0x5650, 0 },
+ { 0x5787, 0x5787, 0x5787, 0 },
+ { 0x58c1, 0x58c1, 0x58c1, 0 },
+ { 0x59fd, 0x59fd, 0x59fd, 0 },
+ { 0x5b3c, 0x5b3c, 0x5b3c, 0 },
+ { 0x5c7e, 0x5c7e, 0x5c7e, 0 },
+ { 0x5dc2, 0x5dc2, 0x5dc2, 0 },
+ { 0x5f09, 0x5f09, 0x5f09, 0 },
+ { 0x6052, 0x6052, 0x6052, 0 },
+ { 0x619e, 0x619e, 0x619e, 0 },
+ { 0x62ec, 0x62ec, 0x62ec, 0 },
+ { 0x643d, 0x643d, 0x643d, 0 },
+ { 0x6591, 0x6591, 0x6591, 0 },
+ { 0x66e7, 0x66e7, 0x66e7, 0 },
+ { 0x6840, 0x6840, 0x6840, 0 },
+ { 0x699b, 0x699b, 0x699b, 0 },
+ { 0x6afa, 0x6afa, 0x6afa, 0 },
+ { 0x6c5a, 0x6c5a, 0x6c5a, 0 },
+ { 0x6dbe, 0x6dbe, 0x6dbe, 0 },
+ { 0x6f24, 0x6f24, 0x6f24, 0 },
+ { 0x708c, 0x708c, 0x708c, 0 },
+ { 0x71f8, 0x71f8, 0x71f8, 0 },
+ { 0x7366, 0x7366, 0x7366, 0 },
+ { 0x74d6, 0x74d6, 0x74d6, 0 },
+ { 0x764a, 0x764a, 0x764a, 0 },
+ { 0x77c0, 0x77c0, 0x77c0, 0 },
+ { 0x7938, 0x7938, 0x7938, 0 },
+ { 0x7ab4, 0x7ab4, 0x7ab4, 0 },
+ { 0x7c32, 0x7c32, 0x7c32, 0 },
+ { 0x7db3, 0x7db3, 0x7db3, 0 },
+ { 0x7f36, 0x7f36, 0x7f36, 0 },
+ { 0x80bc, 0x80bc, 0x80bc, 0 },
+ { 0x8245, 0x8245, 0x8245, 0 },
+ { 0x83d1, 0x83d1, 0x83d1, 0 },
+ { 0x855f, 0x855f, 0x855f, 0 },
+ { 0x86f0, 0x86f0, 0x86f0, 0 },
+ { 0x8884, 0x8884, 0x8884, 0 },
+ { 0x8a1a, 0x8a1a, 0x8a1a, 0 },
+ { 0x8bb4, 0x8bb4, 0x8bb4, 0 },
+ { 0x8d50, 0x8d50, 0x8d50, 0 },
+ { 0x8eee, 0x8eee, 0x8eee, 0 },
+ { 0x9090, 0x9090, 0x9090, 0 },
+ { 0x9234, 0x9234, 0x9234, 0 },
+ { 0x93db, 0x93db, 0x93db, 0 },
+ { 0x9585, 0x9585, 0x9585, 0 },
+ { 0x9732, 0x9732, 0x9732, 0 },
+ { 0x98e1, 0x98e1, 0x98e1, 0 },
+ { 0x9a93, 0x9a93, 0x9a93, 0 },
+ { 0x9c48, 0x9c48, 0x9c48, 0 },
+ { 0x9e00, 0x9e00, 0x9e00, 0 },
+ { 0x9fbb, 0x9fbb, 0x9fbb, 0 },
+ { 0xa178, 0xa178, 0xa178, 0 },
+ { 0xa338, 0xa338, 0xa338, 0 },
+ { 0xa4fb, 0xa4fb, 0xa4fb, 0 },
+ { 0xa6c1, 0xa6c1, 0xa6c1, 0 },
+ { 0xa88a, 0xa88a, 0xa88a, 0 },
+ { 0xaa56, 0xaa56, 0xaa56, 0 },
+ { 0xac24, 0xac24, 0xac24, 0 },
+ { 0xadf5, 0xadf5, 0xadf5, 0 },
+ { 0xafc9, 0xafc9, 0xafc9, 0 },
+ { 0xb1a0, 0xb1a0, 0xb1a0, 0 },
+ { 0xb37a, 0xb37a, 0xb37a, 0 },
+ { 0xb557, 0xb557, 0xb557, 0 },
+ { 0xb736, 0xb736, 0xb736, 0 },
+ { 0xb919, 0xb919, 0xb919, 0 },
+ { 0xbafe, 0xbafe, 0xbafe, 0 },
+ { 0xbce6, 0xbce6, 0xbce6, 0 },
+ { 0xbed2, 0xbed2, 0xbed2, 0 },
+ { 0xc0c0, 0xc0c0, 0xc0c0, 0 },
+ { 0xc2b0, 0xc2b0, 0xc2b0, 0 },
+ { 0xc4a4, 0xc4a4, 0xc4a4, 0 },
+ { 0xc69b, 0xc69b, 0xc69b, 0 },
+ { 0xc895, 0xc895, 0xc895, 0 },
+ { 0xca91, 0xca91, 0xca91, 0 },
+ { 0xcc91, 0xcc91, 0xcc91, 0 },
+ { 0xce93, 0xce93, 0xce93, 0 },
+ { 0xd098, 0xd098, 0xd098, 0 },
+ { 0xd2a1, 0xd2a1, 0xd2a1, 0 },
+ { 0xd4ac, 0xd4ac, 0xd4ac, 0 },
+ { 0xd6ba, 0xd6ba, 0xd6ba, 0 },
+ { 0xd8cb, 0xd8cb, 0xd8cb, 0 },
+ { 0xdadf, 0xdadf, 0xdadf, 0 },
+ { 0xdcf7, 0xdcf7, 0xdcf7, 0 },
+ { 0xdf11, 0xdf11, 0xdf11, 0 },
+ { 0xe12e, 0xe12e, 0xe12e, 0 },
+ { 0xe34e, 0xe34e, 0xe34e, 0 },
+ { 0xe571, 0xe571, 0xe571, 0 },
+ { 0xe796, 0xe796, 0xe796, 0 },
+ { 0xe9bf, 0xe9bf, 0xe9bf, 0 },
+ { 0xebeb, 0xebeb, 0xebeb, 0 },
+ { 0xee1a, 0xee1a, 0xee1a, 0 },
+ { 0xf04c, 0xf04c, 0xf04c, 0 },
+ { 0xf281, 0xf281, 0xf281, 0 },
+ { 0xf4b9, 0xf4b9, 0xf4b9, 0 },
+ { 0xf6f4, 0xf6f4, 0xf6f4, 0 },
+ { 0xf932, 0xf932, 0xf932, 0 },
+ { 0xfb73, 0xfb73, 0xfb73, 0 },
+ { 0xfdb7, 0xfdb7, 0xfdb7, 0 },
+ { 0xffff, 0xffff, 0xffff, 0 },
+};
+
+const struct vkms_color_lut srgb_eotf = {
+ .base = srgb_array,
+ .lut_length = LUT_SIZE,
+ .channel_value2index_ratio = 0xff00ffll
+};
+EXPORT_SYMBOL(srgb_eotf);
+
+static struct drm_color_lut srgb_inv_array[LUT_SIZE] = {
+ { 0x0, 0x0, 0x0, 0 },
+ { 0xcc2, 0xcc2, 0xcc2, 0 },
+ { 0x15be, 0x15be, 0x15be, 0 },
+ { 0x1c56, 0x1c56, 0x1c56, 0 },
+ { 0x21bd, 0x21bd, 0x21bd, 0 },
+ { 0x2666, 0x2666, 0x2666, 0 },
+ { 0x2a8a, 0x2a8a, 0x2a8a, 0 },
+ { 0x2e4c, 0x2e4c, 0x2e4c, 0 },
+ { 0x31c0, 0x31c0, 0x31c0, 0 },
+ { 0x34f6, 0x34f6, 0x34f6, 0 },
+ { 0x37f9, 0x37f9, 0x37f9, 0 },
+ { 0x3acf, 0x3acf, 0x3acf, 0 },
+ { 0x3d80, 0x3d80, 0x3d80, 0 },
+ { 0x4010, 0x4010, 0x4010, 0 },
+ { 0x4284, 0x4284, 0x4284, 0 },
+ { 0x44dd, 0x44dd, 0x44dd, 0 },
+ { 0x4720, 0x4720, 0x4720, 0 },
+ { 0x494e, 0x494e, 0x494e, 0 },
+ { 0x4b69, 0x4b69, 0x4b69, 0 },
+ { 0x4d73, 0x4d73, 0x4d73, 0 },
+ { 0x4f6e, 0x4f6e, 0x4f6e, 0 },
+ { 0x5159, 0x5159, 0x5159, 0 },
+ { 0x5337, 0x5337, 0x5337, 0 },
+ { 0x5509, 0x5509, 0x5509, 0 },
+ { 0x56cf, 0x56cf, 0x56cf, 0 },
+ { 0x588a, 0x588a, 0x588a, 0 },
+ { 0x5a3b, 0x5a3b, 0x5a3b, 0 },
+ { 0x5be2, 0x5be2, 0x5be2, 0 },
+ { 0x5d80, 0x5d80, 0x5d80, 0 },
+ { 0x5f16, 0x5f16, 0x5f16, 0 },
+ { 0x60a4, 0x60a4, 0x60a4, 0 },
+ { 0x6229, 0x6229, 0x6229, 0 },
+ { 0x63a8, 0x63a8, 0x63a8, 0 },
+ { 0x6520, 0x6520, 0x6520, 0 },
+ { 0x6691, 0x6691, 0x6691, 0 },
+ { 0x67fc, 0x67fc, 0x67fc, 0 },
+ { 0x6961, 0x6961, 0x6961, 0 },
+ { 0x6ac0, 0x6ac0, 0x6ac0, 0 },
+ { 0x6c19, 0x6c19, 0x6c19, 0 },
+ { 0x6d6e, 0x6d6e, 0x6d6e, 0 },
+ { 0x6ebd, 0x6ebd, 0x6ebd, 0 },
+ { 0x7008, 0x7008, 0x7008, 0 },
+ { 0x714d, 0x714d, 0x714d, 0 },
+ { 0x728f, 0x728f, 0x728f, 0 },
+ { 0x73cc, 0x73cc, 0x73cc, 0 },
+ { 0x7504, 0x7504, 0x7504, 0 },
+ { 0x7639, 0x7639, 0x7639, 0 },
+ { 0x776a, 0x776a, 0x776a, 0 },
+ { 0x7897, 0x7897, 0x7897, 0 },
+ { 0x79c1, 0x79c1, 0x79c1, 0 },
+ { 0x7ae7, 0x7ae7, 0x7ae7, 0 },
+ { 0x7c09, 0x7c09, 0x7c09, 0 },
+ { 0x7d28, 0x7d28, 0x7d28, 0 },
+ { 0x7e44, 0x7e44, 0x7e44, 0 },
+ { 0x7f5d, 0x7f5d, 0x7f5d, 0 },
+ { 0x8073, 0x8073, 0x8073, 0 },
+ { 0x8186, 0x8186, 0x8186, 0 },
+ { 0x8296, 0x8296, 0x8296, 0 },
+ { 0x83a4, 0x83a4, 0x83a4, 0 },
+ { 0x84ae, 0x84ae, 0x84ae, 0 },
+ { 0x85b6, 0x85b6, 0x85b6, 0 },
+ { 0x86bc, 0x86bc, 0x86bc, 0 },
+ { 0x87bf, 0x87bf, 0x87bf, 0 },
+ { 0x88bf, 0x88bf, 0x88bf, 0 },
+ { 0x89be, 0x89be, 0x89be, 0 },
+ { 0x8ab9, 0x8ab9, 0x8ab9, 0 },
+ { 0x8bb3, 0x8bb3, 0x8bb3, 0 },
+ { 0x8cab, 0x8cab, 0x8cab, 0 },
+ { 0x8da0, 0x8da0, 0x8da0, 0 },
+ { 0x8e93, 0x8e93, 0x8e93, 0 },
+ { 0x8f84, 0x8f84, 0x8f84, 0 },
+ { 0x9073, 0x9073, 0x9073, 0 },
+ { 0x9161, 0x9161, 0x9161, 0 },
+ { 0x924c, 0x924c, 0x924c, 0 },
+ { 0x9335, 0x9335, 0x9335, 0 },
+ { 0x941d, 0x941d, 0x941d, 0 },
+ { 0x9503, 0x9503, 0x9503, 0 },
+ { 0x95e7, 0x95e7, 0x95e7, 0 },
+ { 0x96c9, 0x96c9, 0x96c9, 0 },
+ { 0x97aa, 0x97aa, 0x97aa, 0 },
+ { 0x9889, 0x9889, 0x9889, 0 },
+ { 0x9966, 0x9966, 0x9966, 0 },
+ { 0x9a42, 0x9a42, 0x9a42, 0 },
+ { 0x9b1c, 0x9b1c, 0x9b1c, 0 },
+ { 0x9bf5, 0x9bf5, 0x9bf5, 0 },
+ { 0x9ccc, 0x9ccc, 0x9ccc, 0 },
+ { 0x9da1, 0x9da1, 0x9da1, 0 },
+ { 0x9e76, 0x9e76, 0x9e76, 0 },
+ { 0x9f49, 0x9f49, 0x9f49, 0 },
+ { 0xa01a, 0xa01a, 0xa01a, 0 },
+ { 0xa0ea, 0xa0ea, 0xa0ea, 0 },
+ { 0xa1b9, 0xa1b9, 0xa1b9, 0 },
+ { 0xa286, 0xa286, 0xa286, 0 },
+ { 0xa352, 0xa352, 0xa352, 0 },
+ { 0xa41d, 0xa41d, 0xa41d, 0 },
+ { 0xa4e7, 0xa4e7, 0xa4e7, 0 },
+ { 0xa5af, 0xa5af, 0xa5af, 0 },
+ { 0xa676, 0xa676, 0xa676, 0 },
+ { 0xa73c, 0xa73c, 0xa73c, 0 },
+ { 0xa801, 0xa801, 0xa801, 0 },
+ { 0xa8c5, 0xa8c5, 0xa8c5, 0 },
+ { 0xa987, 0xa987, 0xa987, 0 },
+ { 0xaa48, 0xaa48, 0xaa48, 0 },
+ { 0xab09, 0xab09, 0xab09, 0 },
+ { 0xabc8, 0xabc8, 0xabc8, 0 },
+ { 0xac86, 0xac86, 0xac86, 0 },
+ { 0xad43, 0xad43, 0xad43, 0 },
+ { 0xadff, 0xadff, 0xadff, 0 },
+ { 0xaeba, 0xaeba, 0xaeba, 0 },
+ { 0xaf74, 0xaf74, 0xaf74, 0 },
+ { 0xb02d, 0xb02d, 0xb02d, 0 },
+ { 0xb0e5, 0xb0e5, 0xb0e5, 0 },
+ { 0xb19c, 0xb19c, 0xb19c, 0 },
+ { 0xb252, 0xb252, 0xb252, 0 },
+ { 0xb307, 0xb307, 0xb307, 0 },
+ { 0xb3bb, 0xb3bb, 0xb3bb, 0 },
+ { 0xb46f, 0xb46f, 0xb46f, 0 },
+ { 0xb521, 0xb521, 0xb521, 0 },
+ { 0xb5d3, 0xb5d3, 0xb5d3, 0 },
+ { 0xb683, 0xb683, 0xb683, 0 },
+ { 0xb733, 0xb733, 0xb733, 0 },
+ { 0xb7e2, 0xb7e2, 0xb7e2, 0 },
+ { 0xb890, 0xb890, 0xb890, 0 },
+ { 0xb93d, 0xb93d, 0xb93d, 0 },
+ { 0xb9ea, 0xb9ea, 0xb9ea, 0 },
+ { 0xba96, 0xba96, 0xba96, 0 },
+ { 0xbb40, 0xbb40, 0xbb40, 0 },
+ { 0xbbea, 0xbbea, 0xbbea, 0 },
+ { 0xbc94, 0xbc94, 0xbc94, 0 },
+ { 0xbd3c, 0xbd3c, 0xbd3c, 0 },
+ { 0xbde4, 0xbde4, 0xbde4, 0 },
+ { 0xbe8b, 0xbe8b, 0xbe8b, 0 },
+ { 0xbf31, 0xbf31, 0xbf31, 0 },
+ { 0xbfd7, 0xbfd7, 0xbfd7, 0 },
+ { 0xc07b, 0xc07b, 0xc07b, 0 },
+ { 0xc120, 0xc120, 0xc120, 0 },
+ { 0xc1c3, 0xc1c3, 0xc1c3, 0 },
+ { 0xc266, 0xc266, 0xc266, 0 },
+ { 0xc308, 0xc308, 0xc308, 0 },
+ { 0xc3a9, 0xc3a9, 0xc3a9, 0 },
+ { 0xc449, 0xc449, 0xc449, 0 },
+ { 0xc4e9, 0xc4e9, 0xc4e9, 0 },
+ { 0xc589, 0xc589, 0xc589, 0 },
+ { 0xc627, 0xc627, 0xc627, 0 },
+ { 0xc6c5, 0xc6c5, 0xc6c5, 0 },
+ { 0xc763, 0xc763, 0xc763, 0 },
+ { 0xc7ff, 0xc7ff, 0xc7ff, 0 },
+ { 0xc89b, 0xc89b, 0xc89b, 0 },
+ { 0xc937, 0xc937, 0xc937, 0 },
+ { 0xc9d2, 0xc9d2, 0xc9d2, 0 },
+ { 0xca6c, 0xca6c, 0xca6c, 0 },
+ { 0xcb06, 0xcb06, 0xcb06, 0 },
+ { 0xcb9f, 0xcb9f, 0xcb9f, 0 },
+ { 0xcc37, 0xcc37, 0xcc37, 0 },
+ { 0xcccf, 0xcccf, 0xcccf, 0 },
+ { 0xcd66, 0xcd66, 0xcd66, 0 },
+ { 0xcdfd, 0xcdfd, 0xcdfd, 0 },
+ { 0xce93, 0xce93, 0xce93, 0 },
+ { 0xcf29, 0xcf29, 0xcf29, 0 },
+ { 0xcfbe, 0xcfbe, 0xcfbe, 0 },
+ { 0xd053, 0xd053, 0xd053, 0 },
+ { 0xd0e7, 0xd0e7, 0xd0e7, 0 },
+ { 0xd17a, 0xd17a, 0xd17a, 0 },
+ { 0xd20d, 0xd20d, 0xd20d, 0 },
+ { 0xd2a0, 0xd2a0, 0xd2a0, 0 },
+ { 0xd331, 0xd331, 0xd331, 0 },
+ { 0xd3c3, 0xd3c3, 0xd3c3, 0 },
+ { 0xd454, 0xd454, 0xd454, 0 },
+ { 0xd4e4, 0xd4e4, 0xd4e4, 0 },
+ { 0xd574, 0xd574, 0xd574, 0 },
+ { 0xd603, 0xd603, 0xd603, 0 },
+ { 0xd692, 0xd692, 0xd692, 0 },
+ { 0xd720, 0xd720, 0xd720, 0 },
+ { 0xd7ae, 0xd7ae, 0xd7ae, 0 },
+ { 0xd83c, 0xd83c, 0xd83c, 0 },
+ { 0xd8c9, 0xd8c9, 0xd8c9, 0 },
+ { 0xd955, 0xd955, 0xd955, 0 },
+ { 0xd9e1, 0xd9e1, 0xd9e1, 0 },
+ { 0xda6d, 0xda6d, 0xda6d, 0 },
+ { 0xdaf8, 0xdaf8, 0xdaf8, 0 },
+ { 0xdb83, 0xdb83, 0xdb83, 0 },
+ { 0xdc0d, 0xdc0d, 0xdc0d, 0 },
+ { 0xdc97, 0xdc97, 0xdc97, 0 },
+ { 0xdd20, 0xdd20, 0xdd20, 0 },
+ { 0xdda9, 0xdda9, 0xdda9, 0 },
+ { 0xde31, 0xde31, 0xde31, 0 },
+ { 0xdeb9, 0xdeb9, 0xdeb9, 0 },
+ { 0xdf41, 0xdf41, 0xdf41, 0 },
+ { 0xdfc8, 0xdfc8, 0xdfc8, 0 },
+ { 0xe04f, 0xe04f, 0xe04f, 0 },
+ { 0xe0d5, 0xe0d5, 0xe0d5, 0 },
+ { 0xe15b, 0xe15b, 0xe15b, 0 },
+ { 0xe1e0, 0xe1e0, 0xe1e0, 0 },
+ { 0xe266, 0xe266, 0xe266, 0 },
+ { 0xe2ea, 0xe2ea, 0xe2ea, 0 },
+ { 0xe36f, 0xe36f, 0xe36f, 0 },
+ { 0xe3f3, 0xe3f3, 0xe3f3, 0 },
+ { 0xe476, 0xe476, 0xe476, 0 },
+ { 0xe4f9, 0xe4f9, 0xe4f9, 0 },
+ { 0xe57c, 0xe57c, 0xe57c, 0 },
+ { 0xe5fe, 0xe5fe, 0xe5fe, 0 },
+ { 0xe680, 0xe680, 0xe680, 0 },
+ { 0xe702, 0xe702, 0xe702, 0 },
+ { 0xe783, 0xe783, 0xe783, 0 },
+ { 0xe804, 0xe804, 0xe804, 0 },
+ { 0xe884, 0xe884, 0xe884, 0 },
+ { 0xe905, 0xe905, 0xe905, 0 },
+ { 0xe984, 0xe984, 0xe984, 0 },
+ { 0xea04, 0xea04, 0xea04, 0 },
+ { 0xea83, 0xea83, 0xea83, 0 },
+ { 0xeb02, 0xeb02, 0xeb02, 0 },
+ { 0xeb80, 0xeb80, 0xeb80, 0 },
+ { 0xebfe, 0xebfe, 0xebfe, 0 },
+ { 0xec7b, 0xec7b, 0xec7b, 0 },
+ { 0xecf9, 0xecf9, 0xecf9, 0 },
+ { 0xed76, 0xed76, 0xed76, 0 },
+ { 0xedf2, 0xedf2, 0xedf2, 0 },
+ { 0xee6f, 0xee6f, 0xee6f, 0 },
+ { 0xeeeb, 0xeeeb, 0xeeeb, 0 },
+ { 0xef66, 0xef66, 0xef66, 0 },
+ { 0xefe2, 0xefe2, 0xefe2, 0 },
+ { 0xf05d, 0xf05d, 0xf05d, 0 },
+ { 0xf0d7, 0xf0d7, 0xf0d7, 0 },
+ { 0xf152, 0xf152, 0xf152, 0 },
+ { 0xf1cc, 0xf1cc, 0xf1cc, 0 },
+ { 0xf245, 0xf245, 0xf245, 0 },
+ { 0xf2bf, 0xf2bf, 0xf2bf, 0 },
+ { 0xf338, 0xf338, 0xf338, 0 },
+ { 0xf3b0, 0xf3b0, 0xf3b0, 0 },
+ { 0xf429, 0xf429, 0xf429, 0 },
+ { 0xf4a1, 0xf4a1, 0xf4a1, 0 },
+ { 0xf519, 0xf519, 0xf519, 0 },
+ { 0xf590, 0xf590, 0xf590, 0 },
+ { 0xf608, 0xf608, 0xf608, 0 },
+ { 0xf67e, 0xf67e, 0xf67e, 0 },
+ { 0xf6f5, 0xf6f5, 0xf6f5, 0 },
+ { 0xf76b, 0xf76b, 0xf76b, 0 },
+ { 0xf7e1, 0xf7e1, 0xf7e1, 0 },
+ { 0xf857, 0xf857, 0xf857, 0 },
+ { 0xf8cd, 0xf8cd, 0xf8cd, 0 },
+ { 0xf942, 0xf942, 0xf942, 0 },
+ { 0xf9b7, 0xf9b7, 0xf9b7, 0 },
+ { 0xfa2b, 0xfa2b, 0xfa2b, 0 },
+ { 0xfaa0, 0xfaa0, 0xfaa0, 0 },
+ { 0xfb14, 0xfb14, 0xfb14, 0 },
+ { 0xfb88, 0xfb88, 0xfb88, 0 },
+ { 0xfbfb, 0xfbfb, 0xfbfb, 0 },
+ { 0xfc6e, 0xfc6e, 0xfc6e, 0 },
+ { 0xfce1, 0xfce1, 0xfce1, 0 },
+ { 0xfd54, 0xfd54, 0xfd54, 0 },
+ { 0xfdc6, 0xfdc6, 0xfdc6, 0 },
+ { 0xfe39, 0xfe39, 0xfe39, 0 },
+ { 0xfeaa, 0xfeaa, 0xfeaa, 0 },
+ { 0xff1c, 0xff1c, 0xff1c, 0 },
+ { 0xff8d, 0xff8d, 0xff8d, 0 },
+ { 0xffff, 0xffff, 0xffff, 0 },
+};
+
+const struct vkms_color_lut srgb_inv_eotf = {
+ .base = srgb_inv_array,
+ .lut_length = LUT_SIZE,
+ .channel_value2index_ratio = 0xff00ffll
+};
+EXPORT_SYMBOL(srgb_inv_eotf);
diff --git a/drivers/gpu/drm/vkms/vkms_luts.h b/drivers/gpu/drm/vkms/vkms_luts.h
new file mode 100644
index 000000000000..925a4a7b84e2
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_luts.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _VKMS_LUTS_H_
+#define _VKMS_LUTS_H_
+
+#define LUT_SIZE 256
+
+extern const struct vkms_color_lut linear_eotf;
+extern const struct vkms_color_lut srgb_eotf;
+extern const struct vkms_color_lut srgb_inv_eotf;
+
+#endif /* _VKMS_LUTS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 8d7ca0cdd79f..86ce07a617f5 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -4,6 +4,7 @@
#include "vkms_connector.h"
#include "vkms_drv.h"
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
int vkms_output_init(struct vkms_device *vkmsdev)
{
@@ -19,11 +20,7 @@ int vkms_output_init(struct vkms_device *vkmsdev)
return -EINVAL;
vkms_config_for_each_plane(vkmsdev->config, plane_cfg) {
- enum drm_plane_type type;
-
- type = vkms_config_plane_get_type(plane_cfg);
-
- plane_cfg->plane = vkms_plane_init(vkmsdev, type);
+ plane_cfg->plane = vkms_plane_init(vkmsdev, plane_cfg);
if (IS_ERR(plane_cfg->plane)) {
DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n");
return PTR_ERR(plane_cfg->plane);
@@ -77,9 +74,22 @@ int vkms_output_init(struct vkms_device *vkmsdev)
return ret;
}
+ encoder_cfg->encoder->possible_clones |=
+ drm_encoder_mask(encoder_cfg->encoder);
+
vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) {
encoder_cfg->encoder->possible_crtcs |=
drm_crtc_mask(&possible_crtc->crtc->crtc);
+
+ if (vkms_config_crtc_get_writeback(possible_crtc)) {
+ struct drm_encoder *wb_encoder =
+ &possible_crtc->crtc->wb_encoder;
+
+ encoder_cfg->encoder->possible_clones |=
+ drm_encoder_mask(wb_encoder);
+ wb_encoder->possible_clones |=
+ drm_encoder_mask(encoder_cfg->encoder);
+ }
}
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e3fdd161d0f0..19fe6acad306 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
+#include "vkms_config.h"
#include <linux/iosys-map.h>
#include <drm/drm_atomic.h>
@@ -8,17 +9,26 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include "vkms_drv.h"
#include "vkms_formats.h"
static const u32 vkms_formats[] = {
DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
DRM_FORMAT_XRGB16161616,
+ DRM_FORMAT_XBGR16161616,
DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_ABGR16161616,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_NV24,
@@ -31,6 +41,9 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
DRM_FORMAT_YVU444,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
DRM_FORMAT_R1,
DRM_FORMAT_R2,
DRM_FORMAT_R4,
@@ -206,7 +219,7 @@ static const struct drm_plane_helper_funcs vkms_plane_helper_funcs = {
};
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type)
+ struct vkms_config_plane *plane_cfg)
{
struct drm_device *dev = &vkmsdev->drm;
struct vkms_plane *plane;
@@ -214,7 +227,8 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 0,
&vkms_plane_funcs,
vkms_formats, ARRAY_SIZE(vkms_formats),
- NULL, type, NULL);
+ NULL, vkms_config_plane_get_type(plane_cfg),
+ NULL);
if (IS_ERR(plane))
return plane;
@@ -232,5 +246,8 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_FULL_RANGE);
+ if (vkms_config_plane_get_default_pipeline(plane_cfg))
+ vkms_initialize_colorops(&plane->base);
+
return plane;
}
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index fe163271d5b5..097ae1f0a230 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -6,6 +6,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_writeback.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -174,6 +175,8 @@ int vkms_enable_writeback_connector(struct vkms_device *vkmsdev,
if (ret)
return ret;
vkms_output->wb_encoder.possible_crtcs |= drm_crtc_mask(&vkms_output->crtc);
+ vkms_output->wb_encoder.possible_clones |=
+ drm_encoder_mask(&vkms_output->wb_encoder);
drm_connector_helper_add(&wb->base, &vkms_wb_conn_helper_funcs);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
index 718832b08d96..c46f17ba7236 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c
@@ -100,8 +100,10 @@ vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps)
if (vmw->has_mob) {
if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0)
return VMW_CURSOR_UPDATE_MOB;
+ else
+ return VMW_CURSOR_UPDATE_GB_ONLY;
}
-
+ drm_warn_once(&vmw->drm, "Unknown Cursor Type!\n");
return VMW_CURSOR_UPDATE_NONE;
}
@@ -139,6 +141,7 @@ static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type,
{
switch (update_type) {
case VMW_CURSOR_UPDATE_LEGACY:
+ case VMW_CURSOR_UPDATE_GB_ONLY:
case VMW_CURSOR_UPDATE_NONE:
return 0;
case VMW_CURSOR_UPDATE_MOB:
@@ -623,6 +626,7 @@ int vmw_cursor_plane_prepare_fb(struct drm_plane *plane,
if (!surface || vps->cursor.legacy.id == surface->snooper.id)
vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE;
break;
+ case VMW_CURSOR_UPDATE_GB_ONLY:
case VMW_CURSOR_UPDATE_MOB: {
bo = vmw_user_object_buffer(&vps->uo);
if (bo) {
@@ -737,6 +741,7 @@ void
vmw_cursor_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
+ struct vmw_bo *bo;
struct drm_plane_state *new_state =
drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_state =
@@ -762,6 +767,15 @@ vmw_cursor_plane_atomic_update(struct drm_plane *plane,
case VMW_CURSOR_UPDATE_MOB:
vmw_cursor_update_mob(dev_priv, vps);
break;
+ case VMW_CURSOR_UPDATE_GB_ONLY:
+ bo = vmw_user_object_buffer(&vps->uo);
+ if (bo)
+ vmw_send_define_cursor_cmd(dev_priv, bo->map.virtual,
+ vps->base.crtc_w,
+ vps->base.crtc_h,
+ vps->base.hotspot_x,
+ vps->base.hotspot_y);
+ break;
case VMW_CURSOR_UPDATE_NONE:
/* do nothing */
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
index 40694925a70e..0c2cc0699b0d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h
@@ -33,6 +33,7 @@ static const u32 __maybe_unused vmw_cursor_plane_formats[] = {
enum vmw_cursor_update_type {
VMW_CURSOR_UPDATE_NONE = 0,
VMW_CURSOR_UPDATE_LEGACY,
+ VMW_CURSOR_UPDATE_GB_ONLY,
VMW_CURSOR_UPDATE_MOB,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8ff958d119be..599052d07ae8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1023,8 +1023,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
dev_priv->drm.dev,
dev_priv->drm.anon_inode->i_mapping,
dev_priv->drm.vma_offset_manager,
- dev_priv->map_mode == vmw_dma_alloc_coherent,
- false);
+ (dev_priv->map_mode == vmw_dma_alloc_coherent) ?
+ TTM_ALLOCATION_POOL_USE_DMA_ALLOC : 0);
if (unlikely(ret != 0)) {
drm_err(&dev_priv->drm,
"Failed initializing TTM buffer object driver.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index eda5b6f8f4c4..f2abaf1bda6a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -16,6 +16,7 @@
#include <drm/drm_auth.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
+#include <drm/drm_print.h>
#include <drm/drm_rect.h>
#include <drm/ttm/ttm_execbuf_util.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 819704ac675d..3057f8baa7d2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1497,6 +1497,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_bo *vmw_bo = NULL;
+ struct vmw_resource *res;
struct vmw_surface *srf = NULL;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
@@ -1532,18 +1533,24 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
VMW_RES_DIRTY_SET : 0;
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- dirty, user_surface_converter,
- &cmd->body.host.sid, NULL);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, dirty,
+ user_surface_converter, &cmd->body.host.sid,
+ NULL);
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
VMW_DEBUG_USER("could not find surface for DMA.\n");
return ret;
}
- srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
+ res = sw_context->res_cache[vmw_res_surface].res;
+ if (!res) {
+ VMW_DEBUG_USER("Invalid DMA surface.\n");
+ return -EINVAL;
+ }
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
+ srf = vmw_res_to_srf(res);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo,
+ header);
return 0;
}
@@ -3661,6 +3668,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
cmd_id = header->id;
+ if (header->size > SVGA_CMD_MAX_DATASIZE) {
+ VMW_DEBUG_USER("SVGA3D command: %d is too big.\n",
+ cmd_id + SVGA_3D_CMD_BASE);
+ return -E2BIG;
+ }
*size = header->size + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c2294abbe753..00be92da5509 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_to_timespec64(f->timestamp);
+ ts = ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index eedf1fe60be7..39f8c46550c2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -37,7 +37,7 @@ static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
if (bo)
- ttm_bo_put(bo);
+ ttm_bo_fini(bo);
}
static int vmw_gem_object_open(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 54ea1b513950..d32ce1cb579e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -553,6 +553,9 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
memcpy(&vfbs->uo, uo, sizeof(vfbs->uo));
vmw_user_object_ref(&vfbs->uo);
+ if (vfbs->uo.buffer)
+ vfbs->base.base.obj[0] = &vfbs->uo.buffer->tbo.base;
+
*out = &vfbs->base;
ret = drm_framebuffer_init(dev, &vfbs->base.base,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index 7de20e56082c..fd4e76486f2d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -32,22 +32,22 @@ enum vmw_bo_dirty_method {
/**
* struct vmw_bo_dirty - Dirty information for buffer objects
+ * @ref_count: Reference count for this structure. Must be first member!
* @start: First currently dirty bit
* @end: Last currently dirty bit + 1
* @method: The currently used dirty method
* @change_count: Number of consecutive method change triggers
- * @ref_count: Reference count for this structure
* @bitmap_size: The size of the bitmap in bits. Typically equal to the
* nuber of pages in the bo.
* @bitmap: A bitmap where each bit represents a page. A set bit means a
* dirty page.
*/
struct vmw_bo_dirty {
+ struct kref ref_count;
unsigned long start;
unsigned long end;
enum vmw_bo_dirty_method method;
unsigned int change_count;
- unsigned int ref_count;
unsigned long bitmap_size;
unsigned long bitmap[];
};
@@ -221,7 +221,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
int ret;
if (dirty) {
- dirty->ref_count++;
+ kref_get(&dirty->ref_count);
return 0;
}
@@ -235,7 +235,7 @@ int vmw_bo_dirty_add(struct vmw_bo *vbo)
dirty->bitmap_size = num_pages;
dirty->start = dirty->bitmap_size;
dirty->end = 0;
- dirty->ref_count = 1;
+ kref_init(&dirty->ref_count);
if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
dirty->method = VMW_BO_DIRTY_PAGETABLE;
} else {
@@ -274,10 +274,8 @@ void vmw_bo_dirty_release(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- if (dirty && --dirty->ref_count == 0) {
- kvfree(dirty);
+ if (dirty && kref_put(&dirty->ref_count, (void *)kvfree))
vbo->dirty = NULL;
- }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7e281c3c6bc5..c4ac9b47e23a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -15,6 +15,7 @@
#include "vmw_surface_cache.h"
#include "device_include/svga3d_surfacedefs.h"
+#include <drm/drm_dumb_buffers.h>
#include <drm/ttm/ttm_placement.h>
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
@@ -2267,23 +2268,9 @@ int vmw_dumb_create(struct drm_file *file_priv,
* contents is going to be rendered guest side.
*/
if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
- int cpp = DIV_ROUND_UP(args->bpp, 8);
-
- switch (cpp) {
- case 1: /* DRM_FORMAT_C8 */
- case 2: /* DRM_FORMAT_RGB565 */
- case 4: /* DRM_FORMAT_XRGB8888 */
- break;
- default:
- /*
- * Dumb buffers don't allow anything else.
- * This is tested via IGT's dumb_buffers
- */
- return -EINVAL;
- }
-
- args->pitch = args->width * cpp;
- args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
+ ret = drm_mode_size_dumb(dev, args, 0, 0);
+ if (ret)
+ return ret;
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index 7ee93e7191c7..35dc94c3db39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -308,8 +308,10 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
hash_add_rcu(ctx->sw_context->res_ht, &node->hash.head, node->hash.key);
}
node->res = vmw_resource_reference_unless_doomed(res);
- if (!node->res)
+ if (!node->res) {
+ hash_del_rcu(&node->hash.head);
return -ESRCH;
+ }
node->first_usage = 1;
if (!res->dev_priv->has_mob) {
@@ -636,7 +638,7 @@ void vmw_validation_drop_ht(struct vmw_validation_context *ctx)
hash_del_rcu(&val->hash.head);
list_for_each_entry(val, &ctx->resource_ctx_list, head)
- hash_del_rcu(&entry->hash.head);
+ hash_del_rcu(&val->hash.head);
ctx->sw_context = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
index aec774fa4d7b..5abd7f5ad2db 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c
@@ -247,9 +247,8 @@ vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vmw_private *vmw = vmw_priv(dev);
- unsigned int pipe = crtc->index;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
if (!vmw->vkms_enabled)
return false;
@@ -281,8 +280,7 @@ vmw_vkms_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct vmw_private *vmw = vmw_priv(dev);
- unsigned int pipe = drm_crtc_index(crtc);
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
if (!vmw->vkms_enabled)
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index 714d5702dfd7..4b288eb3f5b0 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -13,7 +13,6 @@ config DRM_XE
select TMPFS
select DRM_BUDDY
select DRM_CLIENT_SELECTION
- select DRM_EXEC
select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
select DRM_PANEL
@@ -40,12 +39,12 @@ config DRM_XE
select DRM_TTM
select DRM_TTM_HELPER
select DRM_EXEC
+ select DRM_GPUSVM if !UML && DEVICE_PRIVATE
select DRM_GPUVM
select DRM_SCHED
select MMU_NOTIFIER
select WANT_DEV_COREDUMP
select AUXILIARY_BUS
- select HMM_MIRROR
select REGMAP if I2C
help
Driver for Intel Xe2 series GPUs and later. Experimental support
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 01735c6ece8b..01227c77f6d7 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -40,23 +40,23 @@ config DRM_XE_DEBUG_VM
If in doubt, say "N".
-config DRM_XE_DEBUG_MEMIRQ
- bool "Enable extra memirq debugging"
+config DRM_XE_DEBUG_SRIOV
+ bool "Enable extra SR-IOV debugging"
default n
+ imply DRM_XE_DEBUG_MEMIRQ
help
- Choose this option to enable additional debugging info for
- memory based interrupts.
+ Enable extra SR-IOV debugging info.
Recommended for driver developers only.
If in doubt, say "N".
-config DRM_XE_DEBUG_SRIOV
- bool "Enable extra SR-IOV debugging"
+config DRM_XE_DEBUG_MEMIRQ
+ bool "Enable extra memirq debugging"
default n
- select DRM_XE_DEBUG_MEMIRQ
help
- Enable extra SR-IOV debugging info.
+ Choose this option to enable additional debugging info for
+ memory based interrupts.
Recommended for driver developers only.
@@ -104,6 +104,7 @@ config DRM_XE_DEBUG_GUC
config DRM_XE_USERPTR_INVAL_INJECT
bool "Inject userptr invalidation -EINVAL errors"
+ depends on DRM_GPUSVM
default n
help
Choose this option when debugging error paths that
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 07c71a29963d..62be4a5227e4 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -35,6 +35,7 @@ $(obj)/generated/%_device_wa_oob.c $(obj)/generated/%_device_wa_oob.h: $(obj)/xe
xe-y += xe_bb.o \
xe_bo.o \
xe_bo_evict.o \
+ xe_dep_scheduler.o \
xe_devcoredump.o \
xe_device.o \
xe_device_sysfs.o \
@@ -57,10 +58,8 @@ xe-y += xe_bb.o \
xe_gt_freq.o \
xe_gt_idle.o \
xe_gt_mcr.o \
- xe_gt_pagefault.o \
xe_gt_sysfs.o \
xe_gt_throttle.o \
- xe_gt_tlb_invalidation.o \
xe_gt_topology.o \
xe_guc.o \
xe_guc_ads.o \
@@ -73,28 +72,35 @@ xe-y += xe_bb.o \
xe_guc_id_mgr.o \
xe_guc_klv_helpers.o \
xe_guc_log.o \
+ xe_guc_pagefault.o \
xe_guc_pc.o \
xe_guc_submit.o \
+ xe_guc_tlb_inval.o \
xe_heci_gsc.o \
xe_huc.o \
xe_hw_engine.o \
xe_hw_engine_class_sysfs.o \
xe_hw_engine_group.o \
+ xe_hw_error.o \
xe_hw_fence.o \
xe_irq.o \
+ xe_late_bind_fw.o \
xe_lrc.o \
xe_migrate.o \
xe_mmio.o \
+ xe_mmio_gem.o \
xe_mocs.o \
xe_module.o \
xe_nvm.o \
xe_oa.o \
xe_observation.o \
+ xe_pagefault.o \
xe_pat.o \
xe_pci.o \
xe_pcode.o \
xe_pm.o \
xe_preempt_fence.o \
+ xe_psmi.o \
xe_pt.o \
xe_pt_walk.o \
xe_pxp.o \
@@ -114,6 +120,8 @@ xe-y += xe_bb.o \
xe_sync.o \
xe_tile.o \
xe_tile_sysfs.o \
+ xe_tlb_inval.o \
+ xe_tlb_inval_job.o \
xe_trace.o \
xe_trace_bo.o \
xe_trace_guc.o \
@@ -124,7 +132,9 @@ xe-y += xe_bb.o \
xe_tuning.o \
xe_uc.o \
xe_uc_fw.o \
+ xe_validation.o \
xe_vm.o \
+ xe_vm_madvise.o \
xe_vram.o \
xe_vram_freq.o \
xe_vsec.o \
@@ -133,8 +143,8 @@ xe-y += xe_bb.o \
xe_wopcm.o
xe-$(CONFIG_I2C) += xe_i2c.o
-xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o
xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o
+xe-$(CONFIG_DRM_GPUSVM) += xe_userptr.o
# graphics hardware monitoring (HWMON) support
xe-$(CONFIG_HWMON) += xe_hwmon.o
@@ -149,6 +159,7 @@ xe-y += \
xe_memirq.o \
xe_sriov.o \
xe_sriov_vf.o \
+ xe_sriov_vf_ccs.o \
xe_tile_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
@@ -163,8 +174,19 @@ xe-$(CONFIG_PCI_IOV) += \
xe_lmtt_2l.o \
xe_lmtt_ml.o \
xe_pci_sriov.o \
+ xe_sriov_packet.o \
xe_sriov_pf.o \
- xe_sriov_pf_service.o
+ xe_sriov_pf_control.o \
+ xe_sriov_pf_debugfs.o \
+ xe_sriov_pf_migration.o \
+ xe_sriov_pf_provision.o \
+ xe_sriov_pf_service.o \
+ xe_sriov_pf_sysfs.o \
+ xe_tile_sriov_pf_debugfs.o
+
+ifdef CONFIG_XE_VFIO_PCI
+ xe-$(CONFIG_PCI_IOV) += xe_sriov_vfio.o
+endif
# include helpers for tests even when XE is built-in
ifdef CONFIG_DRM_XE_KUNIT_TEST
@@ -191,7 +213,6 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
# Display code specific to xe
xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/ext/i915_irq.o \
- display/ext/i915_utils.o \
display/intel_bo.o \
display/intel_fb_bo.o \
display/intel_fbdev_fb.o \
@@ -202,7 +223,9 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
display/xe_hdcp_gsc.o \
+ display/xe_panic.o \
display/xe_plane_initial.o \
+ display/xe_stolen.o \
display/xe_tdf.o
# SOC code shared with i915
@@ -219,15 +242,19 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
i915-display/intel_bw.o \
+ i915-display/intel_casf.o \
i915-display/intel_cdclk.o \
i915-display/intel_cmtg.o \
i915-display/intel_color.o \
+ i915-display/intel_colorop.o \
+ i915-display/intel_color_pipeline.o \
i915-display/intel_combo_phy.o \
i915-display/intel_connector.o \
i915-display/intel_crtc.o \
i915-display/intel_crtc_state_dump.o \
i915-display/intel_cursor.o \
i915-display/intel_cx0_phy.o \
+ i915-display/intel_dbuf_bw.o \
i915-display/intel_ddi.o \
i915-display/intel_ddi_buf_trans.o \
i915-display/intel_display.o \
@@ -239,7 +266,9 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_display_power.o \
i915-display/intel_display_power_map.o \
i915-display/intel_display_power_well.o \
+ i915-display/intel_display_rpm.o \
i915-display/intel_display_trace.o \
+ i915-display/intel_display_utils.o \
i915-display/intel_display_wa.o \
i915-display/intel_dkl_phy.o \
i915-display/intel_dmc.o \
@@ -276,6 +305,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_hti.o \
i915-display/intel_link_bw.o \
i915-display/intel_lspcon.o \
+ i915-display/intel_lt_phy.o \
i915-display/intel_modeset_lock.o \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
@@ -296,6 +326,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_vga.o \
i915-display/intel_vrr.o \
i915-display/intel_wm.o \
+ i915-display/skl_prefill.o \
i915-display/skl_scaler.o \
i915-display/skl_universal_plane.o \
i915-display/skl_watermark.o
@@ -317,6 +348,7 @@ ifeq ($(CONFIG_DEBUG_FS),y)
xe_gt_stats.o \
xe_guc_debugfs.o \
xe_huc_debugfs.o \
+ xe_tile_debugfs.o \
xe_uc_debugfs.o
xe-$(CONFIG_PCI_IOV) += xe_gt_sriov_pf_debugfs.o
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 81eb046aeebf..47756e4674a1 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -117,6 +117,7 @@ enum xe_guc_action {
XE_GUC_ACTION_ENTER_S_STATE = 0x501,
XE_GUC_ACTION_EXIT_S_STATE = 0x502,
XE_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+ XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
XE_GUC_ACTION_SCHED_CONTEXT = 0x1000,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
XE_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
@@ -154,6 +155,8 @@ enum xe_guc_action {
XE_GUC_ACTION_NOTIFY_FLUSH_LOG_BUFFER_TO_FILE = 0x8003,
XE_GUC_ACTION_NOTIFY_CRASH_DUMP_POSTED = 0x8004,
XE_GUC_ACTION_NOTIFY_EXCEPTION = 0x8005,
+ XE_GUC_ACTION_TEST_G2G_SEND = 0xF001,
+ XE_GUC_ACTION_TEST_G2G_RECV = 0xF002,
XE_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
index b28c8fa061f7..ce5c59517528 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_slpc_abi.h
@@ -210,6 +210,11 @@ struct slpc_shared_data {
u8 reserved_mode_definition[4096];
} __packed;
+enum slpc_power_profile {
+ SLPC_POWER_PROFILE_BASE = 0x0,
+ SLPC_POWER_PROFILE_POWER_SAVING = 0x1
+};
+
/**
* DOC: SLPC H2G MESSAGE FORMAT
*
diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
index ecf748fd87df..ad76b4baf42e 100644
--- a/drivers/gpu/drm/xe/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
@@ -63,6 +63,7 @@ enum xe_guc_load_status {
XE_GUC_LOAD_STATUS_HWCONFIG_START = 0x05,
XE_GUC_LOAD_STATUS_HWCONFIG_DONE = 0x06,
XE_GUC_LOAD_STATUS_HWCONFIG_ERROR = 0x07,
+ XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH = 0x08,
XE_GUC_LOAD_STATUS_GDT_DONE = 0x10,
XE_GUC_LOAD_STATUS_IDT_DONE = 0x20,
XE_GUC_LOAD_STATUS_LAPIC_DONE = 0x30,
@@ -75,6 +76,8 @@ enum xe_guc_load_status {
XE_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_START,
XE_GUC_LOAD_STATUS_MPU_DATA_INVALID = 0x73,
XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID = 0x74,
+ XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR = 0x75,
+ XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG = 0x76,
XE_GUC_LOAD_STATUS_INVALID_INIT_DATA_RANGE_END,
XE_GUC_LOAD_STATUS_READY = 0xF0,
diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
index 0366a9da5977..265a135e7061 100644
--- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h
@@ -17,6 +17,7 @@
* | 0 | 31:16 | **KEY** - KLV key identifier |
* | | | - `GuC Self Config KLVs`_ |
* | | | - `GuC Opt In Feature KLVs`_ |
+ * | | | - `GuC Scheduling Policies KLVs`_ |
* | | | - `GuC VGT Policy KLVs`_ |
* | | | - `GuC VF Configuration KLVs`_ |
* | | | |
@@ -153,6 +154,30 @@ enum {
#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u
/**
+ * DOC: GuC Scheduling Policies KLVs
+ *
+ * `GuC KLV`_ keys available for use with UPDATE_SCHEDULING_POLICIES_KLV.
+ *
+ * _`GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD` : 0x1001
+ * Some platforms do not allow concurrent execution of RCS and CCS
+ * workloads from different address spaces. By default, the GuC prioritizes
+ * RCS submissions over CCS ones, which can lead to CCS workloads being
+ * significantly (or completely) starved of execution time. This KLV allows
+ * the driver to specify a quantum (in ms) and a ratio (percentage value
+ * between 0 and 100), and the GuC will prioritize the CCS for that
+ * percentage of each quantum. For example, specifying 100ms and 30% will
+ * make the GuC prioritize the CCS for 30ms of every 100ms.
+ * Note that this does not necessarly mean that RCS and CCS engines will
+ * only be active for their percentage of the quantum, as the restriction
+ * only kicks in if both classes are fully busy with non-compatible address
+ * spaces; i.e., if one engine is idle or running the same address space,
+ * a pending job on the other engine will still be submitted to the HW no
+ * matter what the ratio is
+ */
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_KEY 0x1001
+#define GUC_KLV_SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD_LEN 2u
+
+/**
* DOC: GuC VGT Policy KLVs
*
* `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY.
@@ -390,12 +415,14 @@ enum {
*/
enum xe_guc_klv_ids {
GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED = 0x9002,
+ GUC_WORKAROUND_KLV_DISABLE_PSMI_INTERRUPTS_AT_C6_ENTRY_RESTORE_AT_EXIT = 0x9004,
GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING = 0x9005,
GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE = 0x9007,
GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE = 0x9008,
GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET = 0x9009,
GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO = 0x900a,
GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH = 0x900b,
+ GUC_WA_KLV_RESTORE_UNSAVED_MEDIA_CONTROL_REG = 0x900c,
};
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
index 8a048980ea38..0548b2e0316f 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
@@ -5,10 +5,8 @@
#define __I915_GEM_OBJECT_H__
struct dma_fence;
-struct i915_sched_attr;
-static inline void i915_gem_fence_wait_priority(struct dma_fence *fence,
- const struct i915_sched_attr *attr)
+static inline void i915_gem_fence_wait_priority_display(struct dma_fence *fence)
{
}
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
index 41d39d67817a..48e3256ba37e 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h
@@ -6,87 +6,35 @@
#ifndef _I915_GEM_STOLEN_H_
#define _I915_GEM_STOLEN_H_
-#include "xe_ttm_stolen_mgr.h"
-#include "xe_res_cursor.h"
-
-struct xe_bo;
-
-struct i915_stolen_fb {
- struct xe_bo *bo;
-};
-
-static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe,
- struct i915_stolen_fb *fb,
- u32 size, u32 align,
- u32 start, u32 end)
-{
- struct xe_bo *bo;
- int err;
- u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
-
- if (start < SZ_4K)
- start = SZ_4K;
-
- if (align) {
- size = ALIGN(size, align);
- start = ALIGN(start, align);
- }
-
- bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe),
- NULL, size, start, end,
- ttm_bo_type_kernel, flags, 0);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- bo = NULL;
- return err;
- }
- err = xe_bo_pin(bo);
- xe_bo_unlock_vm_held(bo);
-
- if (err) {
- xe_bo_put(fb->bo);
- bo = NULL;
- }
-
- fb->bo = bo;
-
- return err;
-}
-
-static inline int i915_gem_stolen_insert_node(struct xe_device *xe,
- struct i915_stolen_fb *fb,
- u32 size, u32 align)
-{
- /* Not used on xe */
- BUG_ON(1);
- return -ENODEV;
-}
-
-static inline void i915_gem_stolen_remove_node(struct xe_device *xe,
- struct i915_stolen_fb *fb)
-{
- xe_bo_unpin_map_no_vm(fb->bo);
- fb->bo = NULL;
-}
-
-#define i915_gem_stolen_initialized(xe) (!!ttm_manager_type(&(xe)->ttm, XE_PL_STOLEN))
-#define i915_gem_stolen_node_allocated(fb) (!!((fb)->bo))
-
-static inline u32 i915_gem_stolen_node_offset(struct i915_stolen_fb *fb)
-{
- struct xe_res_cursor res;
-
- xe_res_first(fb->bo->ttm.resource, 0, 4096, &res);
- return res.start;
-}
-
-/* Used for < gen4. These are not supported by Xe */
-#define i915_gem_stolen_area_address(xe) (!WARN_ON(1))
-/* Used for gen9 specific WA. Gen9 is not supported by Xe */
-#define i915_gem_stolen_area_size(xe) (!WARN_ON(1))
-
-#define i915_gem_stolen_node_address(xe, fb) (xe_ttm_stolen_gpu_offset(xe) + \
- i915_gem_stolen_node_offset(fb))
-#define i915_gem_stolen_node_size(fb) ((u64)((fb)->bo->ttm.base.size))
+#include <linux/types.h>
+
+struct drm_device;
+struct intel_stolen_node;
+
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int align, u64 start, u64 end);
+
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size,
+ unsigned int align);
+
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node);
+
+bool i915_gem_stolen_initialized(struct drm_device *drm);
+
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node);
+
+u32 i915_gem_stolen_node_offset(struct intel_stolen_node *node);
+
+u64 i915_gem_stolen_area_address(struct drm_device *drm);
+
+u64 i915_gem_stolen_area_size(struct drm_device *drm);
+
+u64 i915_gem_stolen_node_address(struct intel_stolen_node *node);
+
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node);
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm);
+
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node);
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index 9b7572e06f34..3e79a74ff7de 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -12,8 +12,6 @@
#include <drm/drm_drv.h>
-#include "i915_utils.h"
-#include "xe_device.h" /* for xe_device_has_flat_ccs() */
#include "xe_device_types.h"
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
@@ -26,38 +24,14 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
#define IS_I915G(dev_priv) (dev_priv && 0)
#define IS_I915GM(dev_priv) (dev_priv && 0)
#define IS_PINEVIEW(dev_priv) (dev_priv && 0)
-#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0)
#define IS_VALLEYVIEW(dev_priv) (dev_priv && 0)
#define IS_CHERRYVIEW(dev_priv) (dev_priv && 0)
#define IS_HASWELL(dev_priv) (dev_priv && 0)
#define IS_BROADWELL(dev_priv) (dev_priv && 0)
-#define IS_SKYLAKE(dev_priv) (dev_priv && 0)
#define IS_BROXTON(dev_priv) (dev_priv && 0)
-#define IS_KABYLAKE(dev_priv) (dev_priv && 0)
#define IS_GEMINILAKE(dev_priv) (dev_priv && 0)
-#define IS_COFFEELAKE(dev_priv) (dev_priv && 0)
-#define IS_COMETLAKE(dev_priv) (dev_priv && 0)
-#define IS_ICELAKE(dev_priv) (dev_priv && 0)
-#define IS_JASPERLAKE(dev_priv) (dev_priv && 0)
-#define IS_ELKHARTLAKE(dev_priv) (dev_priv && 0)
-#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_TIGERLAKE)
-#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
-#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1)
-#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
-#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \
- IS_PLATFORM(dev_priv, XE_ALDERLAKE_N))
#define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2)
-#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE)
-#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE)
-#define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE)
-#define IS_PANTHERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_PANTHERLAKE)
-
-#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0)
-#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0)
#define IS_MOBILE(xe) (xe && 0)
-#define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe))
-#define HAS_128_BYTE_Y_TILING(xe) (xe || 1)
-
#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
deleted file mode 100644
index c11130440d31..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_scheduler_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/* Copyright © 2025 Intel Corporation */
-
-#ifndef __I915_SCHEDULER_TYPES_H__
-#define __I915_SCHEDULER_TYPES_H__
-
-#define I915_PRIORITY_DISPLAY 0
-
-struct i915_sched_attr {
- int priority;
-};
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
index 1d7c4360e5c0..bcd441dc0fce 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
@@ -3,4 +3,11 @@
* Copyright © 2023 Intel Corporation
*/
-#include "../../i915/i915_utils.h"
+/* for soc/ */
+#ifndef MISSING_CASE
+#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
+ __stringify(x), (long)(x))
+#endif
+
+/* for a couple of users under i915/display */
+#define i915_inject_probe_failure(unused) ((unused) && 0)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
index 4465c40f8134..b17e3bab23d5 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
@@ -26,8 +26,6 @@ struct i915_vma {
struct xe_ggtt_node *node;
};
-#define i915_ggtt_clear_scanout(bo) do { } while (0)
-
#define i915_vma_fence_id(vma) -1
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index d012f02bc84f..d93ddacdf743 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -91,27 +91,6 @@ static inline u32 intel_uncore_rmw(struct intel_uncore *uncore,
return xe_mmio_rmw32(__compat_uncore_to_mmio(uncore), reg, clear, set);
}
-static inline int intel_wait_for_register(struct intel_uncore *uncore,
- i915_reg_t i915_reg, u32 mask,
- u32 value, unsigned int timeout)
-{
- struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
-
- return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
- timeout * USEC_PER_MSEC, NULL, false);
-}
-
-static inline int intel_wait_for_register_fw(struct intel_uncore *uncore,
- i915_reg_t i915_reg, u32 mask,
- u32 value, unsigned int timeout,
- u32 *out_value)
-{
- struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
-
- return xe_mmio_wait32(__compat_uncore_to_mmio(uncore), reg, mask, value,
- timeout * USEC_PER_MSEC, out_value, false);
-}
-
static inline int
__intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
u32 mask, u32 value, unsigned int fast_timeout_us,
@@ -133,6 +112,16 @@ __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg,
out_value, atomic);
}
+static inline int
+__intel_wait_for_register_fw(struct intel_uncore *uncore, i915_reg_t i915_reg,
+ u32 mask, u32 value, unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
+{
+ return __intel_wait_for_register(uncore, i915_reg, mask, value,
+ fast_timeout_us, slow_timeout_ms,
+ out_value);
+}
+
static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
diff --git a/drivers/gpu/drm/xe/display/ext/i915_utils.c b/drivers/gpu/drm/xe/display/ext/i915_utils.c
deleted file mode 100644
index 43b10a2cc508..000000000000
--- a/drivers/gpu/drm/xe/display/ext/i915_utils.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "i915_drv.h"
-
-bool i915_vtd_active(struct drm_i915_private *i915)
-{
- if (device_iommu_mapped(i915->drm.dev))
- return true;
-
- /* Running as a guest, we assume the host is enforcing VT'd */
- return i915_run_as_guest();
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
-
-/* i915 specific, just put here for shutting it up */
-int __i915_inject_probe_error(struct drm_i915_private *i915, int err,
- const char *func, int line)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
index 910632f57c3d..bad2243b9114 100644
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_bo.c
@@ -1,15 +1,11 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
-#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
-#include <drm/drm_panic.h>
-
-#include "intel_fb.h"
-#include "intel_display_types.h"
#include "xe_bo.h"
#include "intel_bo.h"
+#include "intel_frontbuffer.h"
bool intel_bo_is_tiled(struct drm_gem_object *obj)
{
@@ -33,10 +29,6 @@ bool intel_bo_is_protected(struct drm_gem_object *obj)
return xe_bo_is_protected(gem_to_xe_bo(obj));
}
-void intel_bo_flush_if_display(struct drm_gem_object *obj)
-{
-}
-
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return drm_gem_prime_mmap(obj, vma);
@@ -49,104 +41,63 @@ int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, i
return xe_bo_read(bo, offset, dst, size);
}
-struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
-{
- return NULL;
-}
+struct xe_frontbuffer {
+ struct intel_frontbuffer base;
+ struct drm_gem_object *obj;
+ struct kref ref;
+};
-struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
- struct intel_frontbuffer *front)
+struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj)
{
- return front;
-}
+ struct xe_frontbuffer *front;
-void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
-{
- /* FIXME */
-}
+ front = kmalloc(sizeof(*front), GFP_KERNEL);
+ if (!front)
+ return NULL;
-struct xe_panic_data {
- struct page **pages;
- int page;
- void *vaddr;
-};
+ intel_frontbuffer_init(&front->base, obj->dev);
-struct xe_framebuffer {
- struct intel_framebuffer base;
- struct xe_panic_data panic;
-};
+ kref_init(&front->ref);
-static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
-{
- return &container_of_const(fb, struct xe_framebuffer, base)->panic;
-}
+ drm_gem_object_get(obj);
+ front->obj = obj;
-static void xe_panic_kunmap(struct xe_panic_data *panic)
-{
- if (panic->vaddr) {
- drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
- kunmap_local(panic->vaddr);
- panic->vaddr = NULL;
- }
+ return &front->base;
}
-/*
- * The scanout buffer pages are not mapped, so for each pixel,
- * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
- * Try to keep the map from the previous pixel, to avoid too much map/unmap.
- */
-static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
- unsigned int y, u32 color)
+void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
{
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct xe_panic_data *panic = to_xe_panic_data(fb);
- struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
- unsigned int new_page;
- unsigned int offset;
-
- if (fb->panic_tiling)
- offset = fb->panic_tiling(sb->width, x, y);
- else
- offset = y * sb->pitch[0] + x * sb->format->cpp[0];
-
- new_page = offset >> PAGE_SHIFT;
- offset = offset % PAGE_SIZE;
- if (new_page != panic->page) {
- xe_panic_kunmap(panic);
- panic->page = new_page;
- panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
- panic->page);
- }
- if (panic->vaddr) {
- u32 *pix = panic->vaddr + offset;
- *pix = color;
- }
+ struct xe_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ kref_get(&front->ref);
}
-struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
+static void frontbuffer_release(struct kref *ref)
{
- struct xe_framebuffer *xe_fb;
+ struct xe_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
- xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
- if (xe_fb)
- return &xe_fb->base;
- return NULL;
+ intel_frontbuffer_fini(&front->base);
+
+ drm_gem_object_put(front->obj);
+
+ kfree(front);
}
-int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
+void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
{
- struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
- struct xe_panic_data *panic = to_xe_panic_data(fb);
+ struct xe_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
- panic->page = -1;
- sb->set_pixel = xe_panic_page_set_pixel;
- return 0;
+ kref_put(&front->ref, frontbuffer_release);
}
-void intel_bo_panic_finish(struct intel_framebuffer *fb)
+void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front)
{
- struct xe_panic_data *panic = to_xe_panic_data(fb);
+}
- xe_panic_kunmap(panic);
- panic->page = -1;
+void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+{
+ /* FIXME */
}
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c
index ebdb22c9499d..db8b1a27b4de 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c
@@ -24,8 +24,7 @@ void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
xe_bo_put(bo);
}
-int intel_fb_bo_framebuffer_init(struct drm_framebuffer *fb,
- struct drm_gem_object *obj,
+int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index fba9617a75a5..7ad76022cb14 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -3,50 +3,39 @@
* Copyright © 2023 Intel Corporation
*/
-#include <drm/drm_fb_helper.h>
+#include <linux/fb.h>
-#include "intel_display_core.h"
-#include "intel_display_types.h"
-#include "intel_fb.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_wa.h"
-#include <generated/xe_wa_oob.h>
+#include <generated/xe_device_wa_oob.h>
-struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
+/*
+ * FIXME: There shouldn't be any reason to have XE_PAGE_SIZE stride
+ * alignment. The same 64 as i915 uses should be fine, and we shouldn't need to
+ * have driver specific values. However, dropping the stride alignment to 64
+ * leads to underflowing the bo pin count in the atomic cleanup work.
+ */
+u32 intel_fbdev_fb_pitch_align(u32 stride)
{
- struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct xe_device *xe = to_xe_device(dev);
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct xe_bo *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
+ return ALIGN(stride, XE_PAGE_SIZE);
+}
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), XE_PAGE_SIZE);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
+struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_bo *obj;
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe) && !XE_WA(xe_root_mmio_gt(xe), 22019338487_display)) {
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
- NULL, size,
- ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT);
+ if (!IS_DGFX(xe) && !XE_DEVICE_WA(xe, 22019338487_display)) {
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
+ size,
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT, false);
if (!IS_ERR(obj))
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
@@ -54,41 +43,30 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
}
if (IS_ERR(obj)) {
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, size,
- ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_GGTT);
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), size,
+ ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_FLAG_GGTT, false);
}
if (IS_ERR(obj)) {
drm_err(&xe->drm, "failed to allocate framebuffer (%pe)\n", obj);
- fb = ERR_PTR(-ENOMEM);
- goto err;
- }
-
- fb = intel_framebuffer_create(&obj->ttm.base,
- drm_get_format_info(dev,
- mode_cmd.pixel_format,
- mode_cmd.modifier[0]),
- &mode_cmd);
- if (IS_ERR(fb)) {
- xe_bo_unpin_map_no_vm(obj);
- goto err;
+ return ERR_PTR(-ENOMEM);
}
- drm_gem_object_put(&obj->ttm.base);
-
- return to_intel_framebuffer(fb);
+ return &obj->ttm.base;
+}
-err:
- return ERR_CAST(fb);
+void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj)
+{
+ xe_bo_unpin_map_no_vm(gem_to_xe_bo(obj));
}
-int intel_fbdev_fb_fill_info(struct intel_display *display, struct fb_info *info,
+int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *_obj, struct i915_vma *vma)
{
struct xe_bo *obj = gem_to_xe_bo(_obj);
- struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ struct pci_dev *pdev = to_pci_dev(drm->dev);
if (!(obj->flags & XE_BO_FLAG_SYSTEM)) {
if (obj->flags & XE_BO_FLAG_STOLEN)
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index e2e0771cf274..8b0afa270216 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -13,6 +13,8 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include <drm/intel/display_member.h>
+#include <drm/intel/display_parent_interface.h>
#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
@@ -20,7 +22,7 @@
#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_display.h"
-#include "intel_display_core.h"
+#include "intel_display_device.h"
#include "intel_display_driver.h"
#include "intel_display_irq.h"
#include "intel_display_types.h"
@@ -33,16 +35,13 @@
#include "intel_hotplug.h"
#include "intel_opregion.h"
#include "skl_watermark.h"
+#include "xe_display_rpm.h"
#include "xe_module.h"
-/* Xe device functions */
+/* Ensure drm and display members are placed properly. */
+INTEL_DISPLAY_MEMBER_STATIC_ASSERT(struct xe_device, drm, display);
-static bool has_display(struct xe_device *xe)
-{
- struct intel_display *display = xe->display;
-
- return HAS_DISPLAY(display);
-}
+/* Xe device functions */
/**
* xe_display_driver_probe_defer - Detect if we need to wait for other drivers
@@ -96,6 +95,7 @@ static void xe_display_fini_early(void *arg)
if (!xe->info.probe_display)
return;
+ intel_hpd_cancel_work(display);
intel_display_driver_remove_nogem(display);
intel_display_driver_remove_noirq(display);
intel_opregion_cleanup(display);
@@ -229,15 +229,14 @@ void xe_display_irq_reset(struct xe_device *xe)
gen11_display_irq_reset(display);
}
-void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
+void xe_display_irq_postinstall(struct xe_device *xe)
{
struct intel_display *display = xe->display;
if (!xe->info.probe_display)
return;
- if (gt->info.id == XE_GT0)
- gen11_de_irq_postinstall(display);
+ gen11_de_irq_postinstall(display);
}
static bool suspend_to_idle(void)
@@ -289,7 +288,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe)
intel_dmc_suspend(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_enable(display);
}
@@ -302,14 +301,14 @@ static void xe_display_disable_d3cold(struct xe_device *xe)
intel_dmc_resume(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
drm_mode_config_reset(&xe->drm);
intel_display_driver_init_hw(display);
intel_hpd_init(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_disable(display);
intel_opregion_resume(display);
@@ -330,9 +329,9 @@ void xe_display_pm_suspend(struct xe_device *xe)
* properly.
*/
intel_power_domains_disable(display);
- drm_client_dev_suspend(&xe->drm, false);
+ drm_client_dev_suspend(&xe->drm);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
intel_display_driver_suspend(display);
@@ -340,9 +339,11 @@ void xe_display_pm_suspend(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
+ intel_encoder_block_all_hpds(display);
+
intel_hpd_cancel_work(display);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
}
@@ -360,9 +361,9 @@ void xe_display_pm_shutdown(struct xe_device *xe)
return;
intel_power_domains_disable(display);
- drm_client_dev_suspend(&xe->drm, false);
+ drm_client_dev_suspend(&xe->drm);
- if (has_display(xe)) {
+ if (intel_display_device_present(display)) {
drm_kms_helper_poll_disable(&xe->drm);
intel_display_driver_disable_user_access(display);
intel_display_driver_suspend(display);
@@ -370,9 +371,10 @@ void xe_display_pm_shutdown(struct xe_device *xe)
xe_display_flush_cleanup_work(xe);
intel_dp_mst_suspend(display);
+ intel_encoder_block_all_hpds(display);
intel_hpd_cancel_work(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_display_driver_suspend_access(display);
intel_encoder_suspend_all(display);
@@ -461,28 +463,30 @@ void xe_display_pm_resume(struct xe_device *xe)
intel_dmc_resume(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
drm_mode_config_reset(&xe->drm);
intel_display_driver_init_hw(display);
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_display_driver_resume_access(display);
intel_hpd_init(display);
- if (has_display(xe)) {
+ intel_encoder_unblock_all_hpds(display);
+
+ if (intel_display_device_present(display)) {
intel_display_driver_resume(display);
drm_kms_helper_poll_enable(&xe->drm);
intel_display_driver_enable_user_access(display);
}
- if (has_display(xe))
+ if (intel_display_device_present(display))
intel_hpd_poll_disable(display);
intel_opregion_resume(display);
- drm_client_dev_resume(&xe->drm, false);
+ drm_client_dev_resume(&xe->drm);
intel_power_domains_enable(display);
}
@@ -512,6 +516,10 @@ static void display_device_remove(struct drm_device *dev, void *arg)
intel_display_device_remove(display);
}
+static const struct intel_display_parent_interface parent = {
+ .rpm = &xe_display_rpm_interface,
+};
+
/**
* xe_display_probe - probe display and create display struct
* @xe: XE device instance
@@ -532,7 +540,7 @@ int xe_display_probe(struct xe_device *xe)
if (!xe->info.probe_display)
goto no_display;
- display = intel_display_device_probe(pdev);
+ display = intel_display_device_probe(pdev, &parent);
if (IS_ERR(display))
return PTR_ERR(display);
@@ -542,7 +550,7 @@ int xe_display_probe(struct xe_device *xe)
xe->display = display;
- if (has_display(xe))
+ if (intel_display_device_present(display))
return 0;
no_display:
diff --git a/drivers/gpu/drm/xe/display/xe_display.h b/drivers/gpu/drm/xe/display/xe_display.h
index e533aa4750bc..76db95c25f7e 100644
--- a/drivers/gpu/drm/xe/display/xe_display.h
+++ b/drivers/gpu/drm/xe/display/xe_display.h
@@ -26,7 +26,7 @@ void xe_display_unregister(struct xe_device *xe);
void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl);
void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir);
void xe_display_irq_reset(struct xe_device *xe);
-void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt);
+void xe_display_irq_postinstall(struct xe_device *xe);
void xe_display_pm_suspend(struct xe_device *xe);
void xe_display_pm_shutdown(struct xe_device *xe);
@@ -55,7 +55,7 @@ static inline void xe_display_unregister(struct xe_device *xe) {}
static inline void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) {}
static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) {}
static inline void xe_display_irq_reset(struct xe_device *xe) {}
-static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {}
+static inline void xe_display_irq_postinstall(struct xe_device *xe) {}
static inline void xe_display_pm_suspend(struct xe_device *xe) {}
static inline void xe_display_pm_shutdown(struct xe_device *xe) {}
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c
index 3825376e98cc..340f65884812 100644
--- a/drivers/gpu/drm/xe/display/xe_display_rpm.c
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c
@@ -1,73 +1,74 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
+#include <drm/intel/display_parent_interface.h>
+
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_pm.h"
-static struct xe_device *display_to_xe(struct intel_display *display)
-{
- return to_xe_device(display->drm);
-}
-
-struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display)
+static struct ref_tracker *xe_display_rpm_get(const struct drm_device *drm)
{
- return intel_display_rpm_get(display);
+ return xe_pm_runtime_resume_and_get(to_xe_device(drm)) ? INTEL_WAKEREF_DEF : NULL;
}
-void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref)
+static struct ref_tracker *xe_display_rpm_get_if_in_use(const struct drm_device *drm)
{
- intel_display_rpm_put(display, wakeref);
+ return xe_pm_runtime_get_if_in_use(to_xe_device(drm)) ? INTEL_WAKEREF_DEF : NULL;
}
-struct ref_tracker *intel_display_rpm_get(struct intel_display *display)
+static struct ref_tracker *xe_display_rpm_get_noresume(const struct drm_device *drm)
{
- return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display)
-{
- return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL;
-}
-
-struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display)
-{
- xe_pm_runtime_get_noresume(display_to_xe(display));
+ xe_pm_runtime_get_noresume(to_xe_device(drm));
return INTEL_WAKEREF_DEF;
}
-void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref)
+static void xe_display_rpm_put(const struct drm_device *drm, struct ref_tracker *wakeref)
{
if (wakeref)
- xe_pm_runtime_put(display_to_xe(display));
+ xe_pm_runtime_put(to_xe_device(drm));
}
-void intel_display_rpm_put_unchecked(struct intel_display *display)
+static void xe_display_rpm_put_unchecked(const struct drm_device *drm)
{
- xe_pm_runtime_put(display_to_xe(display));
+ xe_pm_runtime_put(to_xe_device(drm));
}
-bool intel_display_rpm_suspended(struct intel_display *display)
+static bool xe_display_rpm_suspended(const struct drm_device *drm)
{
- struct xe_device *xe = display_to_xe(display);
+ struct xe_device *xe = to_xe_device(drm);
return pm_runtime_suspended(xe->drm.dev);
}
-void assert_display_rpm_held(struct intel_display *display)
+static void xe_display_rpm_assert_held(const struct drm_device *drm)
{
/* FIXME */
}
-void intel_display_rpm_assert_block(struct intel_display *display)
+static void xe_display_rpm_assert_block(const struct drm_device *drm)
{
/* FIXME */
}
-void intel_display_rpm_assert_unblock(struct intel_display *display)
+static void xe_display_rpm_assert_unblock(const struct drm_device *drm)
{
/* FIXME */
}
+
+const struct intel_display_rpm_interface xe_display_rpm_interface = {
+ .get = xe_display_rpm_get,
+ .get_raw = xe_display_rpm_get,
+ .get_if_in_use = xe_display_rpm_get_if_in_use,
+ .get_noresume = xe_display_rpm_get_noresume,
+ .put = xe_display_rpm_put,
+ .put_raw = xe_display_rpm_put,
+ .put_unchecked = xe_display_rpm_put_unchecked,
+ .suspended = xe_display_rpm_suspended,
+ .assert_held = xe_display_rpm_assert_held,
+ .assert_block = xe_display_rpm_assert_block,
+ .assert_unblock = xe_display_rpm_assert_unblock
+};
diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.h b/drivers/gpu/drm/xe/display/xe_display_rpm.h
new file mode 100644
index 000000000000..0bf9d31e87c1
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_rpm.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_DISPLAY_RPM_H_
+#define _XE_DISPLAY_RPM_H_
+
+extern const struct intel_display_rpm_interface xe_display_rpm_interface;
+
+#endif /* _XE_DISPLAY_RPM_H_ */
diff --git a/drivers/gpu/drm/xe/display/xe_display_wa.c b/drivers/gpu/drm/xe/display/xe_display_wa.c
index 68d1387d81a0..2aa1b8c03411 100644
--- a/drivers/gpu/drm/xe/display/xe_display_wa.c
+++ b/drivers/gpu/drm/xe/display/xe_display_wa.c
@@ -13,6 +13,7 @@
bool intel_display_needs_wa_16023588340(struct intel_display *display)
{
struct xe_device *xe = to_xe_device(display->drm);
+ struct xe_gt *wa_gt = xe_root_mmio_gt(xe);
- return XE_WA(xe_root_mmio_gt(xe), 16023588340);
+ return wa_gt && XE_GT_WA(wa_gt, 16023588340);
}
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index 9f941fc2e36b..58581d7aaae6 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -43,11 +43,11 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d
return false;
/* Set scanout flag for WC mapping */
- obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe),
- NULL, PAGE_ALIGN(size),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
- XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT);
+ obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
+ PAGE_ALIGN(size),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
+ XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT, false);
if (IS_ERR(obj)) {
kfree(vma);
return false;
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index c38fba18effe..1fd4a815e784 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -16,6 +16,7 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_pm.h"
+#include "xe_vram_types.h"
static void
write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs,
@@ -101,29 +102,29 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
XE_PAGE_SIZE);
if (IS_DGFX(xe))
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM0 |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
else
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_STOLEN |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_STOLEN |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
if (IS_ERR(dpt))
- dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL,
- dpt_size, ~0ull,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_PAGETABLE,
- alignment);
+ dpt = xe_bo_create_pin_map_at_novm(xe, tile0,
+ dpt_size, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_PAGETABLE,
+ alignment, false);
if (IS_ERR(dpt))
return PTR_ERR(dpt);
@@ -280,7 +281,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
- int ret;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int ret = 0;
if (!vma)
return ERR_PTR(-ENODEV);
@@ -289,7 +292,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) &&
intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 &&
!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) {
- struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_vram_region *vram = xe_device_get_root_tile(xe)->mem.vram;
/*
* If we need to able to access the clear-color value stored in
@@ -297,7 +300,7 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
* accessible. This is important on small-bar systems where
* only some subset of VRAM is CPU accessible.
*/
- if (tile->mem.vram.io_size < tile->mem.vram.usable_size) {
+ if (xe_vram_region_io_size(vram) < xe_vram_region_usable_size(vram)) {
ret = -EINVAL;
goto err;
}
@@ -307,17 +310,22 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
* Pin the framebuffer, we can't use xe_bo_(un)pin functions as the
* assumptions are incorrect for framebuffers
*/
- ret = ttm_bo_reserve(&bo->ttm, false, false, NULL);
- if (ret)
- goto err;
-
- if (IS_DGFX(xe))
- ret = xe_bo_migrate(bo, XE_PL_VRAM0);
- else
- ret = xe_bo_validate(bo, NULL, true);
- if (!ret)
- ttm_bo_pin(&bo->ttm);
- ttm_bo_unreserve(&bo->ttm);
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ if (IS_DGFX(xe))
+ ret = xe_bo_migrate(bo, XE_PL_VRAM0, NULL, &exec);
+ else
+ ret = xe_bo_validate(bo, NULL, true, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ if (!ret)
+ ttm_bo_pin(&bo->ttm);
+ }
if (ret)
goto err;
@@ -382,6 +390,7 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
const struct intel_plane_state *old_plane_state)
{
struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
struct xe_device *xe = to_xe_device(fb->base.dev);
struct intel_display *display = xe->display;
struct i915_vma *vma;
@@ -405,6 +414,10 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state,
found:
refcount_inc(&vma->ref);
new_plane_state->ggtt_vma = vma;
+
+ new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ plane->surf_offset(new_plane_state);
+
return true;
}
@@ -431,6 +444,10 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
return PTR_ERR(vma);
new_plane_state->ggtt_vma = vma;
+
+ new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ plane->surf_offset(new_plane_state);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 30f1073141fc..4ae847b628e2 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -72,10 +72,10 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe,
int ret = 0;
/* allocate object of two page for HDCP command memory and store it */
- bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe), PAGE_SIZE * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo)) {
drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n");
diff --git a/drivers/gpu/drm/xe/display/xe_panic.c b/drivers/gpu/drm/xe/display/xe_panic.c
new file mode 100644
index 000000000000..df663286092a
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_panic.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include <drm/drm_cache.h>
+#include <drm/drm_panic.h>
+
+#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_panic.h"
+#include "xe_bo.h"
+#include "xe_res_cursor.h"
+
+struct intel_panic {
+ struct xe_res_cursor res;
+ struct iosys_map vmap;
+
+ int page;
+};
+
+static void xe_panic_kunmap(struct intel_panic *panic)
+{
+ if (!panic->vmap.is_iomem && iosys_map_is_set(&panic->vmap)) {
+ drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE);
+ kunmap_local(panic->vmap.vaddr);
+ }
+ iosys_map_clear(&panic->vmap);
+ panic->page = -1;
+}
+
+/*
+ * The scanout buffer pages are not mapped, so for each pixel,
+ * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
+ * Try to keep the map from the previous pixel, to avoid too much map/unmap.
+ */
+static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
+ unsigned int y, u32 color)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct intel_panic *panic = fb->panic;
+ struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
+ unsigned int new_page;
+ unsigned int offset;
+
+ if (fb->panic_tiling)
+ offset = fb->panic_tiling(sb->width, x, y);
+ else
+ offset = y * sb->pitch[0] + x * sb->format->cpp[0];
+
+ new_page = offset >> PAGE_SHIFT;
+ offset = offset % PAGE_SIZE;
+ if (new_page != panic->page) {
+ if (xe_bo_is_vram(bo)) {
+ /* Display is always mapped on root tile */
+ struct xe_vram_region *vram = xe_bo_device(bo)->mem.vram;
+
+ if (panic->page < 0 || new_page < panic->page) {
+ xe_res_first(bo->ttm.resource, new_page * PAGE_SIZE,
+ bo->ttm.base.size - new_page * PAGE_SIZE, &panic->res);
+ } else {
+ xe_res_next(&panic->res, PAGE_SIZE * (new_page - panic->page));
+ }
+ iosys_map_set_vaddr_iomem(&panic->vmap,
+ vram->mapping + panic->res.start);
+ } else {
+ xe_panic_kunmap(panic);
+ iosys_map_set_vaddr(&panic->vmap,
+ ttm_bo_kmap_try_from_panic(&bo->ttm,
+ new_page));
+ }
+ panic->page = new_page;
+ }
+
+ if (iosys_map_is_set(&panic->vmap))
+ iosys_map_wr(&panic->vmap, offset, u32, color);
+}
+
+struct intel_panic *intel_panic_alloc(void)
+{
+ struct intel_panic *panic;
+
+ panic = kzalloc(sizeof(*panic), GFP_KERNEL);
+
+ return panic;
+}
+
+int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
+{
+ struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
+ struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
+
+ if (xe_bo_is_vram(bo) && !xe_bo_is_visible_vram(bo))
+ return -ENODEV;
+
+ panic->page = -1;
+ sb->set_pixel = xe_panic_page_set_pixel;
+ return 0;
+}
+
+void intel_panic_finish(struct intel_panic *panic)
+{
+ xe_panic_kunmap(panic);
+}
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index dcbc4b2d3fd9..12d25c5290fd 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -10,6 +10,7 @@
#include "xe_ggtt.h"
#include "xe_mmio.h"
+#include "i915_vma.h"
#include "intel_crtc.h"
#include "intel_display.h"
#include "intel_display_core.h"
@@ -21,9 +22,10 @@
#include "intel_plane.h"
#include "intel_plane_initial.h"
#include "xe_bo.h"
+#include "xe_vram_types.h"
#include "xe_wa.h"
-#include <generated/xe_wa_oob.h>
+#include <generated/xe_device_wa_oob.h>
void intel_plane_initial_vblank_wait(struct intel_crtc *crtc)
{
@@ -103,7 +105,7 @@ initial_plane_bo(struct xe_device *xe,
* We don't currently expect this to ever be placed in the
* stolen portion.
*/
- if (phys_base >= tile0->mem.vram.usable_size) {
+ if (phys_base >= xe_vram_region_usable_size(tile0->mem.vram)) {
drm_err(&xe->drm,
"Initial plane programming using invalid range, phys_base=%pa\n",
&phys_base);
@@ -121,7 +123,7 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
- if (XE_WA(xe_root_mmio_gt(xe), 22019338487_display))
+ if (XE_DEVICE_WA(xe, 22019338487_display))
return NULL;
/*
@@ -138,8 +140,8 @@ initial_plane_bo(struct xe_device *xe,
page_size);
size -= base;
- bo = xe_bo_create_pin_map_at(xe, tile0, NULL, size, phys_base,
- ttm_bo_type_kernel, flags);
+ bo = xe_bo_create_pin_map_at_novm(xe, tile0, size, phys_base,
+ ttm_bo_type_kernel, flags, 0, false);
if (IS_ERR(bo)) {
drm_dbg(&xe->drm,
"Failed to create bo phys_base=%pa size %u with flags %x: %li\n",
@@ -234,6 +236,9 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
goto nofb;
plane_state->ggtt_vma = vma;
+
+ plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma);
+
plane_state->uapi.src_x = 0;
plane_state->uapi.src_y = 0;
plane_state->uapi.src_w = fb->width << 16;
diff --git a/drivers/gpu/drm/xe/display/xe_stolen.c b/drivers/gpu/drm/xe/display/xe_stolen.c
new file mode 100644
index 000000000000..9f04ba36e930
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_stolen.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2025 Intel Corporation */
+
+#include "gem/i915_gem_stolen.h"
+#include "xe_res_cursor.h"
+#include "xe_ttm_stolen_mgr.h"
+#include "xe_validation.h"
+
+struct intel_stolen_node {
+ struct xe_device *xe;
+ struct xe_bo *bo;
+};
+
+int i915_gem_stolen_insert_node_in_range(struct intel_stolen_node *node, u64 size,
+ unsigned int align, u64 start, u64 end)
+{
+ struct xe_device *xe = node->xe;
+
+ struct xe_bo *bo;
+ int err = 0;
+ u32 flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_STOLEN;
+
+ if (start < SZ_4K)
+ start = SZ_4K;
+
+ if (align) {
+ size = ALIGN(size, align);
+ start = ALIGN(start, align);
+ }
+
+ bo = xe_bo_create_pin_range_novm(xe, xe_device_get_root_tile(xe),
+ size, start, end, ttm_bo_type_kernel, flags);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ bo = NULL;
+ return err;
+ }
+
+ node->bo = bo;
+
+ return err;
+}
+
+int i915_gem_stolen_insert_node(struct intel_stolen_node *node, u64 size, unsigned int align)
+{
+ /* Not used on xe */
+ WARN_ON(1);
+
+ return -ENODEV;
+}
+
+void i915_gem_stolen_remove_node(struct intel_stolen_node *node)
+{
+ xe_bo_unpin_map_no_vm(node->bo);
+ node->bo = NULL;
+}
+
+bool i915_gem_stolen_initialized(struct drm_device *drm)
+{
+ struct xe_device *xe = to_xe_device(drm);
+
+ return ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
+}
+
+bool i915_gem_stolen_node_allocated(const struct intel_stolen_node *node)
+{
+ return node->bo;
+}
+
+u32 i915_gem_stolen_node_offset(struct intel_stolen_node *node)
+{
+ struct xe_res_cursor res;
+
+ xe_res_first(node->bo->ttm.resource, 0, 4096, &res);
+ return res.start;
+}
+
+/* Used for < gen4. These are not supported by Xe */
+u64 i915_gem_stolen_area_address(struct drm_device *drm)
+{
+ WARN_ON(1);
+
+ return 0;
+}
+
+/* Used for gen9 specific WA. Gen9 is not supported by Xe */
+u64 i915_gem_stolen_area_size(struct drm_device *drm)
+{
+ WARN_ON(1);
+
+ return 0;
+}
+
+u64 i915_gem_stolen_node_address(struct intel_stolen_node *node)
+{
+ struct xe_device *xe = node->xe;
+
+ return xe_ttm_stolen_gpu_offset(xe) + i915_gem_stolen_node_offset(node);
+}
+
+u64 i915_gem_stolen_node_size(const struct intel_stolen_node *node)
+{
+ return node->bo->ttm.base.size;
+}
+
+struct intel_stolen_node *i915_gem_stolen_node_alloc(struct drm_device *drm)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct intel_stolen_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return NULL;
+
+ node->xe = xe;
+
+ return node;
+}
+
+void i915_gem_stolen_node_free(const struct intel_stolen_node *node)
+{
+ kfree(node);
+}
diff --git a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
index 8cfcd3360896..5d41ca297447 100644
--- a/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_gpu_commands.h
@@ -31,6 +31,12 @@
#define XY_FAST_COPY_BLT_D1_DST_TILE4 REG_BIT(30)
#define XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK GENMASK(23, 20)
+#define MEM_COPY_CMD (2 << 29 | 0x5a << 22 | 0x8)
+#define MEM_COPY_PAGE_COPY_MODE REG_BIT(19)
+#define MEM_COPY_MATRIX_COPY REG_BIT(17)
+#define MEM_COPY_SRC_MOCS_INDEX_MASK GENMASK(31, 28)
+#define MEM_COPY_DST_MOCS_INDEX_MASK GENMASK(6, 3)
+
#define PVC_MEM_SET_CMD (2 << 29 | 0x5b << 22)
#define PVC_MEM_SET_CMD_LEN_DW 7
#define PVC_MEM_SET_MATRIX REG_BIT(17)
diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
index e3f5e8bb3ebc..c47b290e0e9f 100644
--- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
+++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h
@@ -65,6 +65,7 @@
#define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4))
#define MI_LRM_USE_GGTT REG_BIT(22)
+#define MI_LRM_ASYNC REG_BIT(21)
#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3))
#define MI_LRR_DST_CS_MMIO REG_BIT(19)
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index 7ade41e2b7b3..68172b0248a6 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -111,6 +111,9 @@
#define PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS REG_BIT(14)
#define CS_PRIORITY_MEM_READ REG_BIT(7)
+#define CS_DEBUG_MODE2(base) XE_REG((base) + 0xd8, XE_REG_OPTION_MASKED)
+#define INSTRUCTION_STATE_CACHE_INVALIDATE REG_BIT(6)
+
#define FF_SLICE_CS_CHICKEN1(base) XE_REG((base) + 0xe0, XE_REG_OPTION_MASKED)
#define FFSC_PERCTX_PREEMPT_CTRL REG_BIT(14)
@@ -138,6 +141,8 @@
#define INHIBIT_SWITCH_UNTIL_PREEMPTED REG_BIT(31)
#define IDLE_DELAY REG_GENMASK(20, 0)
+#define RING_CURRENT_LRCA(base) XE_REG((base) + 0x240)
+
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_PXP_ENABLE REG_BIT(10)
#define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8)
@@ -150,6 +155,8 @@
#define GFX_DISABLE_LEGACY_MODE REG_BIT(3)
#define GFX_MSIX_INTERRUPT_ENABLE REG_BIT(13)
+#define RING_CSMQDEBUG(base) XE_REG((base) + 0x2b0)
+
#define RING_TIMESTAMP(base) XE_REG((base) + 0x358)
#define RING_TIMESTAMP_UDW(base) XE_REG((base) + 0x358 + 4)
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index 9b66cc972a63..180be82672ab 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -13,6 +13,8 @@
/* Definitions of GSC H/W registers, bits, etc */
+#define BMG_GSC_HECI1_BASE 0x373000
+
#define MTL_GSC_HECI1_BASE 0x00116000
#define MTL_GSC_HECI2_BASE 0x00117000
diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
index 5cd5ab8529c5..917a088c28f2 100644
--- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h
@@ -37,12 +37,18 @@
#define GMD_ID XE_REG(0xd8c)
#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
+/*
+ * Spec defines these bits as "Reserved", but then make them assume some
+ * meaning that depends on the ARCH. To avoid any confusion, call them
+ * SUBIP_FLAG_MASK.
+ */
+#define GMD_ID_SUBIP_FLAG_MASK REG_GENMASK(13, 6)
#define GMD_ID_REVID REG_GENMASK(5, 0)
#define FORCEWAKE_ACK_GSC XE_REG(0xdf8)
#define FORCEWAKE_ACK_GT_MTL XE_REG(0xdfc)
-#define MCFG_MCR_SELECTOR XE_REG(0xfd0)
+#define STEER_SEMAPHORE XE_REG(0xfd0)
#define MTL_MCR_SELECTOR XE_REG(0xfd4)
#define SF_MCR_SELECTOR XE_REG(0xfd8)
#define MCR_SELECTOR XE_REG(0xfdc)
@@ -95,7 +101,6 @@
#define XE2_LMEM_CFG XE_REG(0x48b0)
-#define XEHP_TILE_ADDR_RANGE(_idx) XE_REG_MCR(0x4900 + (_idx) * 4)
#define XEHP_FLAT_CCS_BASE_ADDR XE_REG_MCR(0x4910)
#define XEHP_FLAT_CCS_PTR REG_GENMASK(31, 8)
@@ -168,6 +173,7 @@
#define XEHP_SLICE_COMMON_ECO_CHICKEN1 XE_REG_MCR(0x731c, XE_REG_OPTION_MASKED)
#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14)
+#define FAST_CLEAR_VALIGN_FIX REG_BIT(13)
#define XE2LPM_CCCHKNREG1 XE_REG(0x82a8)
@@ -239,6 +245,9 @@
#define XE2_GT_GEOMETRY_DSS_1 XE_REG(0x9150)
#define XE2_GT_GEOMETRY_DSS_2 XE_REG(0x9154)
+#define SERVICE_COPY_ENABLE XE_REG(0x9170)
+#define FUSE_SERVICE_COPY_ENABLE_MASK REG_GENMASK(7, 0)
+
#define GDRST XE_REG(0x941c)
#define GRDOM_GUC REG_BIT(3)
#define GRDOM_FULL REG_BIT(0)
@@ -342,13 +351,10 @@
#define POWERGATE_ENABLE XE_REG(0xa210)
#define RENDER_POWERGATE_ENABLE REG_BIT(0)
#define MEDIA_POWERGATE_ENABLE REG_BIT(1)
+#define MEDIA_SAMPLERS_POWERGATE_ENABLE REG_BIT(2)
#define VDN_HCP_POWERGATE_ENABLE(n) REG_BIT(3 + 2 * (n))
#define VDN_MFXVDENC_POWERGATE_ENABLE(n) REG_BIT(4 + 2 * (n))
-#define CTC_MODE XE_REG(0xa26c)
-#define CTC_SHIFT_PARAMETER_MASK REG_GENMASK(2, 1)
-#define CTC_SOURCE_DIVIDE_LOGIC REG_BIT(0)
-
#define FORCEWAKE_RENDER XE_REG(0xa278)
#define POWERGATE_DOMAIN_STATUS XE_REG(0xa2a0)
@@ -522,6 +528,7 @@
#define TDL_CHICKEN XE_REG_MCR(0xe5f4, XE_REG_OPTION_MASKED)
#define QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE REG_BIT(12)
+#define EUSTALL_PERF_SAMPLING_DISABLE REG_BIT(5)
#define LSC_CHICKEN_BIT_0 XE_REG_MCR(0xe7c8)
#define DISABLE_D8_D16_COASLESCE REG_BIT(30)
@@ -543,6 +550,9 @@
#define SARB_CHICKEN1 XE_REG_MCR(0xe90c)
#define COMP_CKN_IN REG_GENMASK(30, 29)
+#define MAIN_GAMCTRL_MODE XE_REG(0xef00)
+#define MAIN_GAMCTRL_QUEUE_SELECT REG_BIT(0)
+
#define RCU_MODE XE_REG(0x14800, XE_REG_OPTION_MASKED)
#define RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
#define RCU_MODE_CCS_ENABLE REG_BIT(0)
@@ -579,6 +589,7 @@
#define GT_GFX_RC6 XE_REG(0x138108)
#define GT0_PERF_LIMIT_REASONS XE_REG(0x1381a8)
+/* Common performance limit reason bits - available on all platforms */
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
#define PROCHOT_MASK REG_BIT(0)
#define THERMAL_LIMIT_MASK REG_BIT(1)
@@ -588,6 +599,18 @@
#define POWER_LIMIT_4_MASK REG_BIT(8)
#define POWER_LIMIT_1_MASK REG_BIT(10)
#define POWER_LIMIT_2_MASK REG_BIT(11)
+/* Platform-specific performance limit reason bits - for Crescent Island */
+#define CRI_PERF_LIMIT_REASONS_MASK 0xfdff
+#define SOC_THERMAL_LIMIT_MASK REG_BIT(1)
+#define MEM_THERMAL_MASK REG_BIT(2)
+#define VR_THERMAL_MASK REG_BIT(3)
+#define ICCMAX_MASK REG_BIT(4)
+#define SOC_AVG_THERMAL_MASK REG_BIT(6)
+#define FASTVMODE_MASK REG_BIT(7)
+#define PSYS_PL1_MASK REG_BIT(12)
+#define PSYS_PL2_MASK REG_BIT(13)
+#define P0_FREQ_MASK REG_BIT(14)
+#define PSYS_CRIT_MASK REG_BIT(15)
#define GT_PERF_STATUS XE_REG(0x1381b4)
#define VOLTAGE_MASK REG_GENMASK(10, 0)
diff --git a/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h b/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h
new file mode 100644
index 000000000000..c146b9ef44eb
--- /dev/null
+++ b/drivers/gpu/drm/xe/regs/xe_hw_error_regs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_HW_ERROR_REGS_H_
+#define _XE_HW_ERROR_REGS_H_
+
+#define HEC_UNCORR_ERR_STATUS(base) XE_REG((base) + 0x118)
+#define UNCORR_FW_REPORTED_ERR BIT(6)
+
+#define HEC_UNCORR_FW_ERR_DW0(base) XE_REG((base) + 0x124)
+
+#define DEV_ERR_STAT_NONFATAL 0x100178
+#define DEV_ERR_STAT_CORRECTABLE 0x10017c
+#define DEV_ERR_STAT_REG(x) XE_REG(_PICK_EVEN((x), \
+ DEV_ERR_STAT_CORRECTABLE, \
+ DEV_ERR_STAT_NONFATAL))
+#define XE_CSC_ERROR BIT(17)
+#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
index af781c8e4a80..f2e455e2bfe4 100644
--- a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h
@@ -14,6 +14,9 @@
#define REG_SG_REMAP_ADDR_PREFIX XE_REG(SOC_BASE + 0x0164)
#define REG_SG_REMAP_ADDR_POSTFIX XE_REG(SOC_BASE + 0x0168)
+#define I2C_BRIDGE_PCICFGCTL XE_REG(I2C_BRIDGE_OFFSET + 0x200)
+#define ACPI_INTR_EN REG_BIT(1)
+
#define I2C_CONFIG_CMD XE_REG(I2C_CONFIG_SPACE_OFFSET + PCI_COMMAND)
#define I2C_CONFIG_PMCSR XE_REG(I2C_CONFIG_SPACE_OFFSET + 0x84)
diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
index 13635e4331d4..2f97662d958d 100644
--- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h
@@ -18,6 +18,7 @@
#define GFX_MSTR_IRQ XE_REG(0x190010, XE_REG_OPTION_VF)
#define MASTER_IRQ REG_BIT(31)
#define GU_MISC_IRQ REG_BIT(29)
+#define ERROR_IRQ(x) REG_BIT(26 + (x))
#define DISPLAY_IRQ REG_BIT(16)
#define I2C_IRQ REG_BIT(12)
#define GT_DW_IRQ(x) REG_BIT(x)
@@ -64,7 +65,10 @@
#define BCS_RSVD_INTR_MASK XE_REG(0x1900a0, XE_REG_OPTION_VF)
#define VCS0_VCS1_INTR_MASK XE_REG(0x1900a8, XE_REG_OPTION_VF)
#define VCS2_VCS3_INTR_MASK XE_REG(0x1900ac, XE_REG_OPTION_VF)
+#define VCS4_VCS5_INTR_MASK XE_REG(0x1900b0, XE_REG_OPTION_VF)
+#define VCS6_VCS7_INTR_MASK XE_REG(0x1900b4, XE_REG_OPTION_VF)
#define VECS0_VECS1_INTR_MASK XE_REG(0x1900d0, XE_REG_OPTION_VF)
+#define VECS2_VECS3_INTR_MASK XE_REG(0x1900d4, XE_REG_OPTION_VF)
#define HECI2_RSVD_INTR_MASK XE_REG(0x1900e4)
#define GUC_SG_INTR_MASK XE_REG(0x1900e8, XE_REG_OPTION_VF)
#define GPM_WGBOXPERF_INTR_MASK XE_REG(0x1900ec, XE_REG_OPTION_VF)
@@ -79,9 +83,10 @@
#define GT_WAIT_SEMAPHORE_INTERRUPT REG_BIT(11)
#define GT_CONTEXT_SWITCH_INTERRUPT REG_BIT(8)
#define GSC_ER_COMPLETE REG_BIT(5)
-#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT REG_BIT(4)
+#define GT_FLUSH_COMPLETE_INTERRUPT REG_BIT(4)
#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
-#define GT_RENDER_USER_INTERRUPT REG_BIT(0)
+#define GT_COMPUTE_WALKER_INTERRUPT REG_BIT(2)
+#define GT_MI_USER_INTERRUPT REG_BIT(0)
/* irqs for OTHER_KCR_INSTANCE */
#define KCR_PXP_STATE_TERMINATED_INTERRUPT REG_BIT(1)
diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
index 1b101edb838b..b5eff383902c 100644
--- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
+++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h
@@ -40,7 +40,4 @@
#define INDIRECT_CTX_RING_START_UDW (0x08 + 1)
#define INDIRECT_CTX_RING_CTL (0x0a + 1)
-#define CTX_INDIRECT_CTX_OFFSET_MASK REG_GENMASK(15, 6)
-#define CTX_INDIRECT_CTX_OFFSET_DEFAULT REG_FIELD_PREP(CTX_INDIRECT_CTX_OFFSET_MASK, 0xd)
-
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
index 2995d72c3f78..0f79c0714454 100644
--- a/drivers/gpu/drm/xe/regs/xe_pmt.h
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -21,4 +21,15 @@
#define SG_REMAP_INDEX1 XE_REG(SOC_BASE + 0x08)
#define SG_REMAP_BITS REG_GENMASK(31, 24)
+#define BMG_MODS_RESIDENCY_OFFSET (0x4D0)
+#define BMG_G2_RESIDENCY_OFFSET (0x530)
+#define BMG_G6_RESIDENCY_OFFSET (0x538)
+#define BMG_G7_RESIDENCY_OFFSET (0x4B0)
+#define BMG_G8_RESIDENCY_OFFSET (0x540)
+#define BMG_G10_RESIDENCY_OFFSET (0x548)
+
+#define BMG_PCIE_LINK_L0_RESIDENCY_OFFSET (0x570)
+#define BMG_PCIE_LINK_L1_RESIDENCY_OFFSET (0x578)
+#define BMG_PCIE_LINK_L1_2_RESIDENCY_OFFSET (0x580)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h
index 1926b4044314..ad93c57edd17 100644
--- a/drivers/gpu/drm/xe/regs/xe_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_regs.h
@@ -40,6 +40,8 @@
#define STOLEN_RESERVED XE_REG(0x1082c0)
#define WOPCM_SIZE_MASK REG_GENMASK64(9, 7)
+#define SG_TILE_ADDR_RANGE(_idx) XE_REG(0x1083a0 + (_idx) * 4)
+
#define MTL_RP_STATE_CAP XE_REG(0x138000)
#define MTL_GT_RPA_FREQUENCY XE_REG(0x138008)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 7b40cc8be1c9..2294cf89f3e1 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -23,7 +23,7 @@
static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
bool clear, u64 get_val, u64 assign_val,
- struct kunit *test)
+ struct kunit *test, struct drm_exec *exec)
{
struct dma_fence *fence;
struct ttm_tt *ttm;
@@ -35,7 +35,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
u32 offset;
/* Move bo to VRAM if not already there. */
- ret = xe_bo_validate(bo, NULL, false);
+ ret = xe_bo_validate(bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate bo.\n");
return ret;
@@ -60,7 +60,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo,
}
/* Evict to system. CCS data should be copied. */
- ret = xe_bo_evict(bo);
+ ret = xe_bo_evict(bo, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return ret;
@@ -132,14 +132,15 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
/* TODO: Sanity check */
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id);
else
kunit_info(test, "Testing system memory\n");
- bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo = xe_bo_create_user(xe, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
+ bo_flags, exec);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n");
return;
@@ -149,18 +150,18 @@ static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile,
kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
- test);
+ test, exec);
if (ret)
goto out_unlock;
kunit_info(test, "Verifying that CCS data survives migration.\n");
ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL,
- 0xdeadbeefdeadbeefULL, test);
+ 0xdeadbeefdeadbeefULL, test, exec);
if (ret)
goto out_unlock;
kunit_info(test, "Verifying that CCS data can be properly cleared.\n");
- ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test);
+ ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test, exec);
out_unlock:
xe_bo_unlock(bo);
@@ -210,6 +211,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
struct xe_bo *bo, *external;
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate);
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
struct xe_gt *__gt;
int err, i, id;
@@ -218,25 +220,25 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
for (i = 0; i < 2; ++i) {
xe_vm_lock(vm, false);
- bo = xe_bo_create_user(xe, NULL, vm, 0x10000,
+ bo = xe_bo_create_user(xe, vm, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo_flags, exec);
xe_vm_unlock(vm);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "bo create err=%pe\n", bo);
break;
}
- external = xe_bo_create_user(xe, NULL, NULL, 0x10000,
+ external = xe_bo_create_user(xe, NULL, 0x10000,
DRM_XE_GEM_CPU_CACHING_WC,
- bo_flags);
+ bo_flags, NULL);
if (IS_ERR(external)) {
KUNIT_FAIL(test, "external bo create err=%pe\n", external);
goto cleanup_bo;
}
xe_bo_lock(external, false);
- err = xe_bo_pin_external(external, false);
+ err = xe_bo_pin_external(external, false, exec);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo pin err=%pe\n",
@@ -294,7 +296,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
if (i) {
down_read(&vm->lock);
xe_vm_lock(vm, false);
- err = xe_bo_validate(bo, bo->vm, false);
+ err = xe_bo_validate(bo, bo->vm, false, exec);
xe_vm_unlock(vm);
up_read(&vm->lock);
if (err) {
@@ -303,7 +305,7 @@ static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struc
goto cleanup_all;
}
xe_bo_lock(external, false);
- err = xe_bo_validate(external, NULL, false);
+ err = xe_bo_validate(external, NULL, false, exec);
xe_bo_unlock(external);
if (err) {
KUNIT_FAIL(test, "external bo valid err=%pe\n",
@@ -495,9 +497,9 @@ static int shrink_test_run_device(struct xe_device *xe)
INIT_LIST_HEAD(&link->link);
/* We can create bos using WC caching here. But it is slower. */
- bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
+ bo = xe_bo_create_user(xe, NULL, XE_BO_SHRINK_SIZE,
DRM_XE_GEM_CPU_CACHING_WB,
- XE_BO_FLAG_SYSTEM);
+ XE_BO_FLAG_SYSTEM, NULL);
if (IS_ERR(bo)) {
if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index 121f17c112ec..5df98de5ba3c 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -27,9 +27,11 @@ static bool is_dynamic(struct dma_buf_test_params *params)
}
static void check_residency(struct kunit *test, struct xe_bo *exported,
- struct xe_bo *imported, struct dma_buf *dmabuf)
+ struct xe_bo *imported, struct dma_buf *dmabuf,
+ struct drm_exec *exec)
{
struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv);
+ struct dma_buf_attachment *attach;
u32 mem_type;
int ret;
@@ -45,7 +47,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
mem_type = XE_PL_TT;
else if (params->force_different_devices && !is_dynamic(params) &&
(params->mem_mask & XE_BO_FLAG_SYSTEM))
- /* Pin migrated to TT */
+ /* Pin migrated to TT on non-dynamic attachments. */
mem_type = XE_PL_TT;
if (!xe_bo_is_mem_type(exported, mem_type)) {
@@ -57,16 +59,12 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
return;
/*
- * Evict exporter. Note that the gem object dma_buf member isn't
- * set from xe_gem_prime_export(), and it's needed for the move_notify()
- * functionality, so hack that up here. Evicting the exported bo will
+ * Evict exporter. Evicting the exported bo will
* evict also the imported bo through the move_notify() functionality if
* importer is on a different device. If they're on the same device,
* the exporter and the importer should be the same bo.
*/
- swap(exported->ttm.base.dma_buf, dmabuf);
- ret = xe_bo_evict(exported);
- swap(exported->ttm.base.dma_buf, dmabuf);
+ ret = xe_bo_evict(exported, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n",
@@ -81,7 +79,7 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
}
/* Re-validate the importer. This should move also exporter in. */
- ret = xe_bo_validate(imported, NULL, false);
+ ret = xe_bo_validate(imported, NULL, false, exec);
if (ret) {
if (ret != -EINTR && ret != -ERESTARTSYS)
KUNIT_FAIL(test, "Validating importer failed with err=%d.\n",
@@ -91,6 +89,18 @@ static void check_residency(struct kunit *test, struct xe_bo *exported,
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+ /* Check that we can pin without migrating. */
+ attach = list_first_entry_or_null(&dmabuf->attachments, typeof(*attach), node);
+ if (attach) {
+ int err = dma_buf_pin(attach);
+
+ if (!err) {
+ KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type));
+ dma_buf_unpin(attach);
+ }
+ KUNIT_EXPECT_EQ(test, err, 0);
+ }
+
if (params->force_different_devices)
KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT));
else
@@ -117,8 +127,8 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
size = SZ_64K;
kunit_info(test, "running %s\n", __func__);
- bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
- params->mem_mask);
+ bo = xe_bo_create_user(xe, NULL, size, DRM_XE_GEM_CPU_CACHING_WC,
+ params->mem_mask, NULL);
if (IS_ERR(bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(bo));
@@ -131,6 +141,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
PTR_ERR(dmabuf));
goto out;
}
+ bo->ttm.base.dma_buf = dmabuf;
import = xe_gem_prime_import(&xe->drm, dmabuf);
if (!IS_ERR(import)) {
@@ -145,13 +156,14 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
KUNIT_FAIL(test,
"xe_gem_prime_import() succeeded when it shouldn't have\n");
} else {
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
int err;
/* Is everything where we expect it to be? */
xe_bo_lock(import_bo, false);
- err = xe_bo_validate(import_bo, NULL, false);
+ err = xe_bo_validate(import_bo, NULL, false, exec);
- /* Pinning in VRAM is not allowed. */
+ /* Pinning in VRAM is not allowed for non-dynamic attachments */
if (!is_dynamic(params) &&
params->force_different_devices &&
!(params->mem_mask & XE_BO_FLAG_SYSTEM))
@@ -162,7 +174,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
err == -ERESTARTSYS);
if (!err)
- check_residency(test, bo, import_bo, dmabuf);
+ check_residency(test, bo, import_bo, dmabuf, exec);
xe_bo_unlock(import_bo);
}
drm_gem_object_put(import);
@@ -178,6 +190,7 @@ static void xe_test_dmabuf_import_same_driver(struct xe_device *xe)
KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n",
PTR_ERR(import));
}
+ bo->ttm.base.dma_buf = NULL;
dma_buf_put(dmabuf);
out:
drm_gem_object_put(&bo->ttm.base);
@@ -198,7 +211,7 @@ static const struct dma_buf_attach_ops nop2p_attach_ops = {
static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_FLAG_VRAM0,
+ {.mem_mask = XE_BO_FLAG_VRAM0 | XE_BO_FLAG_NEEDS_CPU_ACCESS,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
@@ -230,7 +243,8 @@ static const struct dma_buf_test_params test_params[] = {
{.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
.attach_ops = &xe_dma_buf_attach_ops},
- {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0,
+ {.mem_mask = XE_BO_FLAG_SYSTEM | XE_BO_FLAG_VRAM0 |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS,
.attach_ops = &xe_dma_buf_attach_ops,
.force_different_devices = true},
diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
new file mode 100644
index 000000000000..42bfc4bcfbcf
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_config_kunit.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <kunit/static_stub.h>
+#include <kunit/test.h>
+#include <kunit/test-bug.h>
+
+#include "xe_kunit_helpers.h"
+#include "xe_pci_test.h"
+
+#define TEST_MAX_VFS 63
+
+static void pf_set_admin_mode(struct xe_device *xe, bool enable)
+{
+ /* should match logic of xe_sriov_pf_admin_only() */
+ xe->info.probe_display = !enable;
+ KUNIT_EXPECT_EQ(kunit_get_current_test(), enable, xe_sriov_pf_admin_only(xe));
+}
+
+static const void *num_vfs_gen_param(struct kunit *test, const void *prev, char *desc)
+{
+ unsigned long next = 1 + (unsigned long)prev;
+
+ if (next > TEST_MAX_VFS)
+ return NULL;
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%lu VF%s",
+ next, str_plural(next));
+ return (void *)next;
+}
+
+static int pf_gt_config_test_init(struct kunit *test)
+{
+ struct xe_pci_fake_data fake = {
+ .sriov_mode = XE_SRIOV_MODE_PF,
+ .platform = XE_TIGERLAKE, /* any random platform with SR-IOV */
+ .subplatform = XE_SUBPLATFORM_NONE,
+ };
+ struct xe_device *xe;
+ struct xe_gt *gt;
+
+ test->priv = &fake;
+ xe_kunit_helper_xe_device_test_init(test);
+
+ xe = test->priv;
+ KUNIT_ASSERT_TRUE(test, IS_SRIOV_PF(xe));
+
+ gt = xe_root_mmio_gt(xe);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt);
+ test->priv = gt;
+
+ /* pretend it can support up to 63 VFs */
+ xe->sriov.pf.device_total_vfs = TEST_MAX_VFS;
+ xe->sriov.pf.driver_max_vfs = TEST_MAX_VFS;
+ KUNIT_ASSERT_EQ(test, xe_sriov_pf_get_totalvfs(xe), 63);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0);
+
+ /* more sanity checks */
+ KUNIT_EXPECT_EQ(test, GUC_ID_MAX + 1, SZ_64K);
+ KUNIT_EXPECT_EQ(test, GUC_NUM_DOORBELLS, SZ_256);
+
+ return 0;
+}
+
+static void fair_contexts_1vf(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, 1));
+
+ pf_set_admin_mode(xe, true);
+ KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, SZ_64K - SZ_1K, pf_profile_fair_ctxs(gt, 1));
+}
+
+static void fair_contexts(struct kunit *test)
+{
+ unsigned int num_vfs = (unsigned long)test->param_value;
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+
+ KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_ctxs(gt, num_vfs)));
+ KUNIT_EXPECT_GT(test, GUC_ID_MAX, num_vfs * pf_profile_fair_ctxs(gt, num_vfs));
+
+ if (num_vfs > 31)
+ KUNIT_ASSERT_EQ(test, SZ_1K, pf_profile_fair_ctxs(gt, num_vfs));
+ else if (num_vfs > 15)
+ KUNIT_ASSERT_EQ(test, SZ_2K, pf_profile_fair_ctxs(gt, num_vfs));
+ else if (num_vfs > 7)
+ KUNIT_ASSERT_EQ(test, SZ_4K, pf_profile_fair_ctxs(gt, num_vfs));
+ else if (num_vfs > 3)
+ KUNIT_ASSERT_EQ(test, SZ_8K, pf_profile_fair_ctxs(gt, num_vfs));
+ else if (num_vfs > 1)
+ KUNIT_ASSERT_EQ(test, SZ_16K, pf_profile_fair_ctxs(gt, num_vfs));
+ else
+ KUNIT_ASSERT_EQ(test, SZ_32K, pf_profile_fair_ctxs(gt, num_vfs));
+}
+
+static void fair_doorbells_1vf(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, 128, pf_profile_fair_dbs(gt, 1));
+
+ pf_set_admin_mode(xe, true);
+ KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, 240, pf_profile_fair_dbs(gt, 1));
+}
+
+static void fair_doorbells(struct kunit *test)
+{
+ unsigned int num_vfs = (unsigned long)test->param_value;
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+
+ KUNIT_EXPECT_TRUE(test, is_power_of_2(pf_profile_fair_dbs(gt, num_vfs)));
+ KUNIT_EXPECT_GE(test, GUC_NUM_DOORBELLS, (num_vfs + 1) * pf_profile_fair_dbs(gt, num_vfs));
+
+ if (num_vfs > 31)
+ KUNIT_ASSERT_EQ(test, SZ_4, pf_profile_fair_dbs(gt, num_vfs));
+ else if (num_vfs > 15)
+ KUNIT_ASSERT_EQ(test, SZ_8, pf_profile_fair_dbs(gt, num_vfs));
+ else if (num_vfs > 7)
+ KUNIT_ASSERT_EQ(test, SZ_16, pf_profile_fair_dbs(gt, num_vfs));
+ else if (num_vfs > 3)
+ KUNIT_ASSERT_EQ(test, SZ_32, pf_profile_fair_dbs(gt, num_vfs));
+ else if (num_vfs > 1)
+ KUNIT_ASSERT_EQ(test, SZ_64, pf_profile_fair_dbs(gt, num_vfs));
+ else
+ KUNIT_ASSERT_EQ(test, SZ_128, pf_profile_fair_dbs(gt, num_vfs));
+}
+
+static void fair_ggtt_1vf(struct kunit *test)
+{
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, 1));
+
+ pf_set_admin_mode(xe, true);
+ KUNIT_ASSERT_TRUE(test, xe_sriov_pf_admin_only(xe));
+ KUNIT_EXPECT_EQ(test, SZ_2G + SZ_1G + SZ_512M, pf_profile_fair_ggtt(gt, 1));
+}
+
+static void fair_ggtt(struct kunit *test)
+{
+ unsigned int num_vfs = (unsigned long)test->param_value;
+ struct xe_gt *gt = test->priv;
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 alignment = pf_get_ggtt_alignment(gt);
+ u64 shareable = SZ_2G + SZ_1G + SZ_512M;
+
+ pf_set_admin_mode(xe, false);
+ KUNIT_ASSERT_FALSE(test, xe_sriov_pf_admin_only(xe));
+
+ KUNIT_EXPECT_TRUE(test, IS_ALIGNED(pf_profile_fair_ggtt(gt, num_vfs), alignment));
+ KUNIT_EXPECT_GE(test, shareable, num_vfs * pf_profile_fair_ggtt(gt, num_vfs));
+
+ if (num_vfs > 56)
+ KUNIT_ASSERT_EQ(test, SZ_64M - SZ_8M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 28)
+ KUNIT_ASSERT_EQ(test, SZ_64M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 14)
+ KUNIT_ASSERT_EQ(test, SZ_128M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 7)
+ KUNIT_ASSERT_EQ(test, SZ_256M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 3)
+ KUNIT_ASSERT_EQ(test, SZ_512M, pf_profile_fair_ggtt(gt, num_vfs));
+ else if (num_vfs > 1)
+ KUNIT_ASSERT_EQ(test, SZ_1G, pf_profile_fair_ggtt(gt, num_vfs));
+ else
+ KUNIT_ASSERT_EQ(test, SZ_2G, pf_profile_fair_ggtt(gt, num_vfs));
+}
+
+static struct kunit_case pf_gt_config_test_cases[] = {
+ KUNIT_CASE(fair_contexts_1vf),
+ KUNIT_CASE(fair_doorbells_1vf),
+ KUNIT_CASE(fair_ggtt_1vf),
+ KUNIT_CASE_PARAM(fair_contexts, num_vfs_gen_param),
+ KUNIT_CASE_PARAM(fair_doorbells, num_vfs_gen_param),
+ KUNIT_CASE_PARAM(fair_ggtt, num_vfs_gen_param),
+ {}
+};
+
+static struct kunit_suite pf_gt_config_suite = {
+ .name = "pf_gt_config",
+ .test_cases = pf_gt_config_test_cases,
+ .init = pf_gt_config_test_init,
+};
+
+kunit_test_suite(pf_gt_config_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c b/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c
new file mode 100644
index 000000000000..3b213fcae916
--- /dev/null
+++ b/drivers/gpu/drm/xe/tests/xe_guc_g2g_test.c
@@ -0,0 +1,776 @@
+// SPDX-License-Identifier: GPL-2.0 AND MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/delay.h>
+
+#include <kunit/test.h>
+#include <kunit/visibility.h>
+
+#include "tests/xe_kunit_helpers.h"
+#include "tests/xe_pci_test.h"
+#include "tests/xe_test.h"
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_pm.h"
+
+/*
+ * There are different ways to allocate the G2G buffers. The plan for this test
+ * is to make sure that all the possible options work. The particular option
+ * chosen by the driver may vary from one platform to another, it may also change
+ * with time. So to ensure consistency of testing, the relevant driver code is
+ * replicated here to guarantee it won't change without the test being updated
+ * to keep testing the other options.
+ *
+ * In order to test the actual code being used by the driver, there is also the
+ * 'default' scheme. That will use the official driver routines to test whatever
+ * method the driver is using on the current platform at the current time.
+ */
+enum {
+ /* Driver defined allocation scheme */
+ G2G_CTB_TYPE_DEFAULT,
+ /* Single buffer in host memory */
+ G2G_CTB_TYPE_HOST,
+ /* Single buffer in a specific tile, loops across all tiles */
+ G2G_CTB_TYPE_TILE,
+};
+
+/*
+ * Payload is opaque to GuC. So KMD can define any structure or size it wants.
+ */
+struct g2g_test_payload {
+ u32 tx_dev;
+ u32 tx_tile;
+ u32 rx_dev;
+ u32 rx_tile;
+ u32 seqno;
+};
+
+static void g2g_test_send(struct kunit *test, struct xe_guc *guc,
+ u32 far_tile, u32 far_dev,
+ struct g2g_test_payload *payload)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 *action, total;
+ size_t payload_len;
+ int ret;
+
+ static_assert(IS_ALIGNED(sizeof(*payload), sizeof(u32)));
+ payload_len = sizeof(*payload) / sizeof(u32);
+
+ total = 4 + payload_len;
+ action = kunit_kmalloc_array(test, total, sizeof(*action), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, action);
+
+ action[0] = XE_GUC_ACTION_TEST_G2G_SEND;
+ action[1] = far_tile;
+ action[2] = far_dev;
+ action[3] = payload_len;
+ memcpy(action + 4, payload, payload_len * sizeof(u32));
+
+ atomic_inc(&xe->g2g_test_count);
+
+ /*
+ * Should specify the expected response notification here. Problem is that
+ * the response will be coming from a different GuC. By the end, it should
+ * all add up as long as an equal number of messages are sent from each GuC
+ * and to each GuC. However, in the middle negative reservation space errors
+ * and such like can occur. Rather than add intrusive changes to the CT layer
+ * it is simpler to just not bother counting it at all. The system should be
+ * idle when running the selftest, and the selftest's notification total size
+ * is well within the G2H allocation size. So there should be no issues with
+ * needing to block for space, which is all the tracking code is really for.
+ */
+ ret = xe_guc_ct_send(&guc->ct, action, total, 0, 0);
+ kunit_kfree(test, action);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G send failed: %d [%d:%d -> %d:%d]\n", ret,
+ gt_to_tile(gt)->id, G2G_DEV(gt), far_tile, far_dev);
+}
+
+/*
+ * NB: Can't use KUNIT_ASSERT and friends in here as this is called asynchronously
+ * from the G2H notification handler. Need that to actually complete rather than
+ * thread-abort in order to keep the rest of the driver alive!
+ */
+int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *rx_gt = guc_to_gt(guc), *test_gt, *tx_gt = NULL;
+ u32 tx_tile, tx_dev, rx_tile, rx_dev, idx, got_len;
+ struct g2g_test_payload *payload;
+ size_t payload_len;
+ int ret = 0, i;
+
+ payload_len = sizeof(*payload) / sizeof(u32);
+
+ if (unlikely(len != (G2H_LEN_DW_G2G_NOTIFY_MIN + payload_len))) {
+ xe_gt_err(rx_gt, "G2G test notification invalid length %u", len);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ tx_tile = msg[0];
+ tx_dev = msg[1];
+ got_len = msg[2];
+ payload = (struct g2g_test_payload *)(msg + 3);
+
+ rx_tile = gt_to_tile(rx_gt)->id;
+ rx_dev = G2G_DEV(rx_gt);
+
+ if (got_len != payload_len) {
+ xe_gt_err(rx_gt, "G2G: Invalid payload length: %u vs %zu\n", got_len, payload_len);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ if (payload->tx_dev != tx_dev || payload->tx_tile != tx_tile ||
+ payload->rx_dev != rx_dev || payload->rx_tile != rx_tile) {
+ xe_gt_err(rx_gt, "G2G: Invalid payload: %d:%d -> %d:%d vs %d:%d -> %d:%d! [%d]\n",
+ payload->tx_tile, payload->tx_dev, payload->rx_tile, payload->rx_dev,
+ tx_tile, tx_dev, rx_tile, rx_dev, payload->seqno);
+ ret = -EPROTO;
+ goto done;
+ }
+
+ if (!xe->g2g_test_array) {
+ xe_gt_err(rx_gt, "G2G: Missing test array!\n");
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for_each_gt(test_gt, xe, i) {
+ if (gt_to_tile(test_gt)->id != tx_tile)
+ continue;
+
+ if (G2G_DEV(test_gt) != tx_dev)
+ continue;
+
+ if (tx_gt) {
+ xe_gt_err(rx_gt, "G2G: Got duplicate TX GTs: %d vs %d for %d:%d!\n",
+ tx_gt->info.id, test_gt->info.id, tx_tile, tx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ tx_gt = test_gt;
+ }
+ if (!tx_gt) {
+ xe_gt_err(rx_gt, "G2G: Failed to find a TX GT for %d:%d!\n", tx_tile, tx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ idx = (tx_gt->info.id * xe->info.gt_count) + rx_gt->info.id;
+
+ if (xe->g2g_test_array[idx] != payload->seqno - 1) {
+ xe_gt_err(rx_gt, "G2G: Seqno mismatch %d vs %d for %d:%d -> %d:%d!\n",
+ xe->g2g_test_array[idx], payload->seqno - 1,
+ tx_tile, tx_dev, rx_tile, rx_dev);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ xe->g2g_test_array[idx] = payload->seqno;
+
+done:
+ atomic_dec(&xe->g2g_test_count);
+ return ret;
+}
+
+/*
+ * Send the given seqno from all GuCs to all other GuCs in tile/GT order
+ */
+static void g2g_test_in_order(struct kunit *test, struct xe_device *xe, u32 seqno)
+{
+ struct xe_gt *near_gt, *far_gt;
+ int i, j;
+
+ for_each_gt(near_gt, xe, i) {
+ u32 near_tile = gt_to_tile(near_gt)->id;
+ u32 near_dev = G2G_DEV(near_gt);
+
+ for_each_gt(far_gt, xe, j) {
+ u32 far_tile = gt_to_tile(far_gt)->id;
+ u32 far_dev = G2G_DEV(far_gt);
+ struct g2g_test_payload payload;
+
+ if (far_gt->info.id == near_gt->info.id)
+ continue;
+
+ payload.tx_dev = near_dev;
+ payload.tx_tile = near_tile;
+ payload.rx_dev = far_dev;
+ payload.rx_tile = far_tile;
+ payload.seqno = seqno;
+ g2g_test_send(test, &near_gt->uc.guc, far_tile, far_dev, &payload);
+ }
+ }
+}
+
+#define WAIT_TIME_MS 100
+#define WAIT_COUNT (1000 / WAIT_TIME_MS)
+
+static void g2g_wait_for_complete(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+ struct kunit *test = kunit_get_current_test();
+ int wait = 0;
+
+ /* Wait for all G2H messages to be received */
+ while (atomic_read(&xe->g2g_test_count)) {
+ if (++wait > WAIT_COUNT)
+ break;
+
+ msleep(WAIT_TIME_MS);
+ }
+
+ KUNIT_ASSERT_EQ_MSG(test, 0, atomic_read(&xe->g2g_test_count),
+ "Timed out waiting for notifications\n");
+ kunit_info(test, "Got all notifications back\n");
+}
+
+#undef WAIT_TIME_MS
+#undef WAIT_COUNT
+
+static void g2g_clean_array(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+
+ xe->g2g_test_array = NULL;
+}
+
+#define NUM_LOOPS 16
+
+static void g2g_run_test(struct kunit *test, struct xe_device *xe)
+{
+ u32 seqno, max_array;
+ int ret, i, j;
+
+ max_array = xe->info.gt_count * xe->info.gt_count;
+ xe->g2g_test_array = kunit_kcalloc(test, max_array, sizeof(u32), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe->g2g_test_array);
+
+ ret = kunit_add_action_or_reset(test, g2g_clean_array, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n");
+
+ /*
+ * Send incrementing seqnos from all GuCs to all other GuCs in tile/GT order.
+ * Tile/GT order doesn't really mean anything to the hardware but it is going
+ * to be a fixed sequence every time.
+ *
+ * Verify that each one comes back having taken the correct route.
+ */
+ ret = kunit_add_action(test, g2g_wait_for_complete, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register clean up action\n");
+ for (seqno = 1; seqno < NUM_LOOPS; seqno++)
+ g2g_test_in_order(test, xe, seqno);
+ seqno--;
+
+ kunit_release_action(test, &g2g_wait_for_complete, xe);
+
+ /* Check for the final seqno in each slot */
+ for (i = 0; i < xe->info.gt_count; i++) {
+ for (j = 0; j < xe->info.gt_count; j++) {
+ u32 idx = (j * xe->info.gt_count) + i;
+
+ if (i == j)
+ KUNIT_ASSERT_EQ_MSG(test, 0, xe->g2g_test_array[idx],
+ "identity seqno modified: %d for %dx%d!\n",
+ xe->g2g_test_array[idx], i, j);
+ else
+ KUNIT_ASSERT_EQ_MSG(test, seqno, xe->g2g_test_array[idx],
+ "invalid seqno: %d vs %d for %dx%d!\n",
+ xe->g2g_test_array[idx], seqno, i, j);
+ }
+ }
+
+ kunit_kfree(test, xe->g2g_test_array);
+ kunit_release_action(test, &g2g_clean_array, xe);
+
+ kunit_info(test, "Test passed\n");
+}
+
+#undef NUM_LOOPS
+
+static void g2g_ct_stop(struct xe_guc *guc)
+{
+ struct xe_gt *remote_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ int i, t;
+
+ for_each_gt(remote_gt, xe, i) {
+ u32 tile, dev;
+
+ if (remote_gt->info.id == gt->info.id)
+ continue;
+
+ tile = gt_to_tile(remote_gt)->id;
+ dev = G2G_DEV(remote_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++)
+ guc_g2g_deregister(guc, tile, dev, t);
+ }
+}
+
+/* Size of a single allocation that contains all G2G CTBs across all GTs */
+static u32 g2g_ctb_size(struct kunit *test, struct xe_device *xe)
+{
+ unsigned int count = xe->info.gt_count;
+ u32 num_channels = (count * (count - 1)) / 2;
+
+ kunit_info(test, "Size: (%d * %d / 2) * %d * 0x%08X + 0x%08X => 0x%08X [%d]\n",
+ count, count - 1, XE_G2G_TYPE_LIMIT, G2G_BUFFER_SIZE, G2G_DESC_AREA_SIZE,
+ num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE,
+ num_channels * XE_G2G_TYPE_LIMIT);
+
+ return num_channels * XE_G2G_TYPE_LIMIT * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+}
+
+/*
+ * Use the driver's regular CTB allocation scheme.
+ */
+static void g2g_alloc_default(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ int i;
+
+ kunit_info(test, "Default [tiles = %d, GTs = %d]\n",
+ xe->info.tile_count, xe->info.gt_count);
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+ int ret;
+
+ ret = guc_g2g_alloc(guc);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G alloc failed: %pe", ERR_PTR(ret));
+ continue;
+ }
+}
+
+static void g2g_distribute(struct kunit *test, struct xe_device *xe, struct xe_bo *bo)
+{
+ struct xe_gt *root_gt, *gt;
+ int i;
+
+ root_gt = xe_device_get_gt(xe, 0);
+ root_gt->uc.guc.g2g.bo = bo;
+ root_gt->uc.guc.g2g.owned = true;
+ kunit_info(test, "[%d.%d] Assigned 0x%p\n", gt_to_tile(root_gt)->id, root_gt->info.id, bo);
+
+ for_each_gt(gt, xe, i) {
+ if (gt->info.id != 0) {
+ gt->uc.guc.g2g.owned = false;
+ gt->uc.guc.g2g.bo = xe_bo_get(bo);
+ kunit_info(test, "[%d.%d] Pinned 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, gt->uc.guc.g2g.bo);
+ }
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gt->uc.guc.g2g.bo);
+ }
+}
+
+/*
+ * Allocate a single blob on the host and split between all G2G CTBs.
+ */
+static void g2g_alloc_host(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ kunit_info(test, "Host [tiles = %d, GTs = %d]\n", xe->info.tile_count, xe->info.gt_count);
+
+ g2g_size = g2g_ctb_size(test, xe);
+ bo = xe_managed_bo_create_pin_map(xe, xe_device_get_root_tile(xe), g2g_size,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+ kunit_info(test, "[HST] G2G buffer create: 0x%p\n", bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+
+ g2g_distribute(test, xe, bo);
+}
+
+/*
+ * Allocate a single blob on the given tile and split between all G2G CTBs.
+ */
+static void g2g_alloc_tile(struct kunit *test, struct xe_device *xe, struct xe_tile *tile)
+{
+ struct xe_bo *bo;
+ u32 g2g_size;
+
+ KUNIT_ASSERT_TRUE(test, IS_DGFX(xe));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tile);
+
+ kunit_info(test, "Tile %d [tiles = %d, GTs = %d]\n",
+ tile->id, xe->info.tile_count, xe->info.gt_count);
+
+ g2g_size = g2g_ctb_size(test, xe);
+ bo = xe_managed_bo_create_pin_map(xe, tile, g2g_size,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT |
+ XE_BO_FLAG_GGTT_ALL |
+ XE_BO_FLAG_GGTT_INVALIDATE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
+ kunit_info(test, "[%d.*] G2G buffer create: 0x%p\n", tile->id, bo);
+
+ xe_map_memset(xe, &bo->vmap, 0, 0, g2g_size);
+
+ g2g_distribute(test, xe, bo);
+}
+
+static void g2g_free(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ struct xe_bo *bo;
+ int i;
+
+ for_each_gt(gt, xe, i) {
+ bo = gt->uc.guc.g2g.bo;
+ if (!bo)
+ continue;
+
+ if (gt->uc.guc.g2g.owned) {
+ xe_managed_bo_unpin_map_no_vm(bo);
+ kunit_info(test, "[%d.%d] Unmapped 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, bo);
+ } else {
+ xe_bo_put(bo);
+ kunit_info(test, "[%d.%d] Unpinned 0x%p\n",
+ gt_to_tile(gt)->id, gt->info.id, bo);
+ }
+
+ gt->uc.guc.g2g.bo = NULL;
+ }
+}
+
+static void g2g_stop(struct kunit *test, struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ int i;
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (!guc->g2g.bo)
+ continue;
+
+ g2g_ct_stop(guc);
+ }
+
+ g2g_free(test, xe);
+}
+
+/*
+ * Generate a unique id for each bi-directional CTB for each pair of
+ * near and far tiles/devices. The id can then be used as an index into
+ * a single allocation that is sub-divided into multiple CTBs.
+ *
+ * For example, with two devices per tile and two tiles, the table should
+ * look like:
+ * Far <tile>.<dev>
+ * 0.0 0.1 1.0 1.1
+ * N 0.0 --/-- 00/01 02/03 04/05
+ * e 0.1 01/00 --/-- 06/07 08/09
+ * a 1.0 03/02 07/06 --/-- 10/11
+ * r 1.1 05/04 09/08 11/10 --/--
+ *
+ * Where each entry is Rx/Tx channel id.
+ *
+ * So GuC #3 (tile 1, dev 1) talking to GuC #2 (tile 1, dev 0) would
+ * be reading from channel #11 and writing to channel #10. Whereas,
+ * GuC #2 talking to GuC #3 would be read on #10 and write to #11.
+ */
+static int g2g_slot_flat(u32 near_tile, u32 near_dev, u32 far_tile, u32 far_dev,
+ u32 type, u32 max_inst, bool have_dev)
+{
+ u32 near = near_tile, far = far_tile;
+ u32 idx = 0, x, y, direction;
+ int i;
+
+ if (have_dev) {
+ near = (near << 1) | near_dev;
+ far = (far << 1) | far_dev;
+ }
+
+ /* No need to send to one's self */
+ if (far == near)
+ return -1;
+
+ if (far > near) {
+ /* Top right table half */
+ x = far;
+ y = near;
+
+ /* T/R is 'forwards' direction */
+ direction = type;
+ } else {
+ /* Bottom left table half */
+ x = near;
+ y = far;
+
+ /* B/L is 'backwards' direction */
+ direction = (1 - type);
+ }
+
+ /* Count the rows prior to the target */
+ for (i = y; i > 0; i--)
+ idx += max_inst - i;
+
+ /* Count this row up to the target */
+ idx += (x - 1 - y);
+
+ /* Slots are in Rx/Tx pairs */
+ idx *= 2;
+
+ /* Pick Rx/Tx direction */
+ idx += direction;
+
+ return idx;
+}
+
+static int g2g_register_flat(struct xe_guc *guc, u32 far_tile, u32 far_dev, u32 type, bool have_dev)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ u32 near_tile = gt_to_tile(gt)->id;
+ u32 near_dev = G2G_DEV(gt);
+ u32 max = xe->info.gt_count;
+ int idx;
+ u32 base, desc, buf;
+
+ if (!guc->g2g.bo)
+ return -ENODEV;
+
+ idx = g2g_slot_flat(near_tile, near_dev, far_tile, far_dev, type, max, have_dev);
+ xe_assert(xe, idx >= 0);
+
+ base = guc_bo_ggtt_addr(guc, guc->g2g.bo);
+ desc = base + idx * G2G_DESC_SIZE;
+ buf = base + idx * G2G_BUFFER_SIZE + G2G_DESC_AREA_SIZE;
+
+ xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE);
+ xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(guc->g2g.bo));
+
+ return guc_action_register_g2g_buffer(guc, type, far_tile, far_dev,
+ desc, buf, G2G_BUFFER_SIZE);
+}
+
+static void g2g_start(struct kunit *test, struct xe_guc *guc)
+{
+ struct xe_gt *remote_gt, *gt = guc_to_gt(guc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int i;
+ int t, ret;
+ bool have_dev;
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, guc->g2g.bo);
+
+ /* GuC interface will need extending if more GT device types are ever created. */
+ KUNIT_ASSERT_TRUE(test,
+ (gt->info.type == XE_GT_TYPE_MAIN) ||
+ (gt->info.type == XE_GT_TYPE_MEDIA));
+
+ /* Channel numbering depends on whether there are multiple GTs per tile */
+ have_dev = xe->info.gt_count > xe->info.tile_count;
+
+ for_each_gt(remote_gt, xe, i) {
+ u32 tile, dev;
+
+ if (remote_gt->info.id == gt->info.id)
+ continue;
+
+ tile = gt_to_tile(remote_gt)->id;
+ dev = G2G_DEV(remote_gt);
+
+ for (t = 0; t < XE_G2G_TYPE_LIMIT; t++) {
+ ret = g2g_register_flat(guc, tile, dev, t, have_dev);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "G2G register failed: %pe", ERR_PTR(ret));
+ }
+ }
+}
+
+static void g2g_reinit(struct kunit *test, struct xe_device *xe, int ctb_type, struct xe_tile *tile)
+{
+ struct xe_gt *gt;
+ int i, found = 0;
+
+ g2g_stop(test, xe);
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ KUNIT_ASSERT_NULL(test, guc->g2g.bo);
+ }
+
+ switch (ctb_type) {
+ case G2G_CTB_TYPE_DEFAULT:
+ g2g_alloc_default(test, xe);
+ break;
+
+ case G2G_CTB_TYPE_HOST:
+ g2g_alloc_host(test, xe);
+ break;
+
+ case G2G_CTB_TYPE_TILE:
+ g2g_alloc_tile(test, xe, tile);
+ break;
+
+ default:
+ KUNIT_ASSERT_TRUE(test, false);
+ }
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (!guc->g2g.bo)
+ continue;
+
+ if (ctb_type == G2G_CTB_TYPE_DEFAULT)
+ guc_g2g_start(guc);
+ else
+ g2g_start(test, guc);
+ found++;
+ }
+
+ KUNIT_ASSERT_GT_MSG(test, found, 1, "insufficient G2G channels running: %d", found);
+
+ kunit_info(test, "Testing across %d GTs\n", found);
+}
+
+static void g2g_recreate_ctb(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+ struct kunit *test = kunit_get_current_test();
+
+ g2g_stop(test, xe);
+
+ if (xe_guc_g2g_wanted(xe))
+ g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL);
+}
+
+static void g2g_pm_runtime_put(void *_xe)
+{
+ struct xe_device *xe = (struct xe_device *)_xe;
+
+ xe_pm_runtime_put(xe);
+}
+
+static void g2g_pm_runtime_get(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = kunit_add_action_or_reset(test, g2g_pm_runtime_put, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register runtime PM action\n");
+}
+
+static void g2g_check_skip(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ struct xe_gt *gt;
+ int i;
+
+ if (IS_SRIOV_VF(xe))
+ kunit_skip(test, "not supported from a VF");
+
+ if (xe->info.gt_count <= 1)
+ kunit_skip(test, "not enough GTs");
+
+ for_each_gt(gt, xe, i) {
+ struct xe_guc *guc = &gt->uc.guc;
+
+ if (guc->fw.build_type == CSS_UKERNEL_INFO_BUILDTYPE_PROD)
+ kunit_skip(test,
+ "G2G test interface not available in production firmware builds\n");
+ }
+}
+
+/*
+ * Simple test that does not try to recreate the CTBs.
+ * Requires that the platform already enables G2G comms
+ * but has no risk of leaving the system in a broken state
+ * afterwards.
+ */
+static void xe_live_guc_g2g_kunit_default(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+
+ if (!xe_guc_g2g_wanted(xe))
+ kunit_skip(test, "G2G not enabled");
+
+ g2g_check_skip(test);
+
+ g2g_pm_runtime_get(test);
+
+ kunit_info(test, "Testing default CTBs\n");
+ g2g_run_test(test, xe);
+
+ kunit_release_action(test, &g2g_pm_runtime_put, xe);
+}
+
+/*
+ * More complex test that re-creates the CTBs in various location to
+ * test access to each location from each GuC. Can be run even on
+ * systems that do not enable G2G by default. On the other hand,
+ * because it recreates the CTBs, if something goes wrong it could
+ * leave the system with broken G2G comms.
+ */
+static void xe_live_guc_g2g_kunit_allmem(struct kunit *test)
+{
+ struct xe_device *xe = test->priv;
+ int ret;
+
+ g2g_check_skip(test);
+
+ g2g_pm_runtime_get(test);
+
+ /* Make sure to leave the system as we found it */
+ ret = kunit_add_action_or_reset(test, g2g_recreate_ctb, xe);
+ KUNIT_ASSERT_EQ_MSG(test, 0, ret, "Failed to register CTB re-creation action\n");
+
+ kunit_info(test, "Testing CTB type 'default'...\n");
+ g2g_reinit(test, xe, G2G_CTB_TYPE_DEFAULT, NULL);
+ g2g_run_test(test, xe);
+
+ kunit_info(test, "Testing CTB type 'host'...\n");
+ g2g_reinit(test, xe, G2G_CTB_TYPE_HOST, NULL);
+ g2g_run_test(test, xe);
+
+ if (IS_DGFX(xe)) {
+ struct xe_tile *tile;
+ int id;
+
+ for_each_tile(tile, xe, id) {
+ kunit_info(test, "Testing CTB type 'tile: #%d'...\n", id);
+
+ g2g_reinit(test, xe, G2G_CTB_TYPE_TILE, tile);
+ g2g_run_test(test, xe);
+ }
+ } else {
+ kunit_info(test, "Skipping local memory on integrated platform\n");
+ }
+
+ kunit_release_action(test, g2g_recreate_ctb, xe);
+ kunit_release_action(test, g2g_pm_runtime_put, xe);
+}
+
+static struct kunit_case xe_guc_g2g_tests[] = {
+ KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_default, xe_pci_live_device_gen_param),
+ KUNIT_CASE_PARAM(xe_live_guc_g2g_kunit_allmem, xe_pci_live_device_gen_param),
+ {}
+};
+
+VISIBLE_IF_KUNIT
+struct kunit_suite xe_guc_g2g_test_suite = {
+ .name = "xe_guc_g2g",
+ .test_cases = xe_guc_g2g_tests,
+ .init = xe_kunit_helper_xe_device_live_test_init,
+};
+EXPORT_SYMBOL_IF_KUNIT(xe_guc_g2g_test_suite);
diff --git a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
index 81277c77016d..c55e46f1ae92 100644
--- a/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
+++ b/drivers/gpu/drm/xe/tests/xe_live_test_mod.c
@@ -10,12 +10,14 @@ extern struct kunit_suite xe_bo_shrink_test_suite;
extern struct kunit_suite xe_dma_buf_test_suite;
extern struct kunit_suite xe_migrate_test_suite;
extern struct kunit_suite xe_mocs_test_suite;
+extern struct kunit_suite xe_guc_g2g_test_suite;
kunit_test_suite(xe_bo_test_suite);
kunit_test_suite(xe_bo_shrink_test_suite);
kunit_test_suite(xe_dma_buf_test_suite);
kunit_test_suite(xe_migrate_test_suite);
kunit_test_suite(xe_mocs_test_suite);
+kunit_test_suite(xe_guc_g2g_test_suite);
MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c
index edd1e701aa1c..5904d658d1f2 100644
--- a/drivers/gpu/drm/xe/tests/xe_migrate.c
+++ b/drivers/gpu/drm/xe/tests/xe_migrate.c
@@ -70,7 +70,7 @@ static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe,
} } while (0)
static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test, u32 region)
+ struct kunit *test, u32 region, struct drm_exec *exec)
{
struct xe_device *xe = tile_to_xe(m->tile);
u64 retval, expected = 0;
@@ -84,14 +84,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo,
ttm_bo_type_kernel,
region |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED,
+ exec);
if (IS_ERR(remote)) {
KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n",
str, remote);
return;
}
- err = xe_bo_validate(remote, NULL, false);
+ err = xe_bo_validate(remote, NULL, false, exec);
if (err) {
KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n",
str, err);
@@ -161,13 +162,13 @@ out_unlock:
}
static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
- test_copy(m, bo, test, XE_BO_FLAG_SYSTEM);
+ test_copy(m, bo, test, XE_BO_FLAG_SYSTEM, exec);
}
static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
u32 region;
@@ -178,10 +179,11 @@ static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo,
region = XE_BO_FLAG_VRAM1;
else
region = XE_BO_FLAG_VRAM0;
- test_copy(m, bo, test, region);
+ test_copy(m, bo, test, region, exec);
}
-static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
+static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test,
+ struct drm_exec *exec)
{
struct xe_tile *tile = m->tile;
struct xe_device *xe = tile_to_xe(tile);
@@ -202,7 +204,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(big)) {
KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big));
goto vunmap;
@@ -210,7 +213,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(pt)) {
KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n",
PTR_ERR(pt));
@@ -220,7 +224,8 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
tiny = xe_bo_create_pin_map(xe, tile, m->q->vm,
2 * SZ_4K,
ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile));
+ XE_BO_FLAG_VRAM_IF_DGFX(tile),
+ exec);
if (IS_ERR(tiny)) {
KUNIT_FAIL(test, "Failed to allocate tiny fake pt: %li\n",
PTR_ERR(tiny));
@@ -290,10 +295,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
check(retval, expected, "Command clear small last value", test);
kunit_info(test, "Copying small buffer object to system\n");
- test_copy_sysmem(m, tiny, test);
+ test_copy_sysmem(m, tiny, exec, test);
if (xe->info.tile_count > 1) {
kunit_info(test, "Copying small buffer object to other vram\n");
- test_copy_vram(m, tiny, test);
+ test_copy_vram(m, tiny, exec, test);
}
/* Clear a big bo */
@@ -312,10 +317,10 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
check(retval, expected, "Command clear big last value", test);
kunit_info(test, "Copying big buffer object to system\n");
- test_copy_sysmem(m, big, test);
+ test_copy_sysmem(m, big, exec, test);
if (xe->info.tile_count > 1) {
kunit_info(test, "Copying big buffer object to other vram\n");
- test_copy_vram(m, big, test);
+ test_copy_vram(m, big, exec, test);
}
out:
@@ -343,10 +348,11 @@ static int migrate_test_run_device(struct xe_device *xe)
for_each_tile(tile, xe, id) {
struct xe_migrate *m = tile->migrate;
+ struct drm_exec *exec = XE_VALIDATION_OPT_OUT;
kunit_info(test, "Testing tile id %d.\n", id);
xe_vm_lock(m->q->vm, false);
- xe_migrate_sanity_test(m, test);
+ xe_migrate_sanity_test(m, test, exec);
xe_vm_unlock(m->q->vm);
}
@@ -490,7 +496,7 @@ err_sync:
static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
struct xe_bo *sys_bo, struct xe_bo *vram_bo, struct xe_bo *ccs_bo,
- struct kunit *test)
+ struct drm_exec *exec, struct kunit *test)
{
struct dma_fence *fence;
u64 expected, retval;
@@ -509,7 +515,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Evict vram buffer object\n");
- ret = xe_bo_evict(vram_bo);
+ ret = xe_bo_evict(vram_bo, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n");
return;
@@ -538,7 +544,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile,
dma_fence_put(fence);
kunit_info(test, "Restore vram buffer object\n");
- ret = xe_bo_validate(vram_bo, NULL, false);
+ ret = xe_bo_validate(vram_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
return;
@@ -636,13 +642,14 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
{
struct xe_bo *sys_bo, *vram_bo = NULL, *ccs_bo = NULL;
unsigned int bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile);
+ struct drm_exec *exec;
long ret;
- sys_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ sys_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
XE_BO_FLAG_SYSTEM |
XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(sys_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -650,8 +657,9 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
return;
}
+ exec = XE_VALIDATION_OPT_OUT;
xe_bo_lock(sys_bo, false);
- ret = xe_bo_validate(sys_bo, NULL, false);
+ ret = xe_bo_validate(sys_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
goto free_sysbo;
@@ -664,10 +672,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_unlock(sys_bo);
- ccs_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ ccs_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(ccs_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
@@ -676,7 +684,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_lock(ccs_bo, false);
- ret = xe_bo_validate(ccs_bo, NULL, false);
+ ret = xe_bo_validate(ccs_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate system bo for: %li\n", ret);
goto free_ccsbo;
@@ -689,10 +697,10 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_unlock(ccs_bo);
- vram_bo = xe_bo_create_user(xe, NULL, NULL, SZ_4M,
+ vram_bo = xe_bo_create_user(xe, NULL, SZ_4M,
DRM_XE_GEM_CPU_CACHING_WC,
bo_flags | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
+ XE_BO_FLAG_PINNED, NULL);
if (IS_ERR(vram_bo)) {
KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n",
PTR_ERR(vram_bo));
@@ -700,7 +708,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
xe_bo_lock(vram_bo, false);
- ret = xe_bo_validate(vram_bo, NULL, false);
+ ret = xe_bo_validate(vram_bo, NULL, false, exec);
if (ret) {
KUNIT_FAIL(test, "Failed to validate vram bo for: %li\n", ret);
goto free_vrambo;
@@ -713,7 +721,7 @@ static void validate_ccs_test_run_tile(struct xe_device *xe, struct xe_tile *til
}
test_clear(xe, tile, sys_bo, vram_bo, test);
- test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, test);
+ test_migrate(xe, tile, sys_bo, vram_bo, ccs_bo, exec, test);
xe_bo_unlock(vram_bo);
xe_bo_lock(vram_bo, false);
diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c
index 0e502feaca81..6bb278167aaf 100644
--- a/drivers/gpu/drm/xe/tests/xe_mocs.c
+++ b/drivers/gpu/drm/xe/tests/xe_mocs.c
@@ -49,7 +49,7 @@ static void read_l3cc_table(struct xe_gt *gt,
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
xe_force_wake_put(gt_to_fw(gt), fw_ref);
- KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
+ KUNIT_FAIL_AND_ABORT(test, "Forcewake Failed.\n");
}
for (i = 0; i < info->num_mocs_regs; i++) {
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 9c715e59f030..f3179b31f13e 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,12 +12,220 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
+#define PLATFORM_CASE(platform__, graphics_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_NONE, \
+ .step = { .graphics = STEP_ ## graphics_step__ } \
+ }
+
+#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
+ .step = { .graphics = STEP_ ## graphics_step__ } \
+ }
+
+#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
+ media_verx100__, media_step__) \
+ { \
+ .platform = XE_ ## platform__, \
+ .subplatform = XE_SUBPLATFORM_NONE, \
+ .graphics_verx100 = graphics_verx100__, \
+ .media_verx100 = media_verx100__, \
+ .step = { .graphics = STEP_ ## graphics_step__, \
+ .media = STEP_ ## media_step__ } \
+ }
+
+static const struct xe_pci_fake_data cases[] = {
+ PLATFORM_CASE(TIGERLAKE, B0),
+ PLATFORM_CASE(DG1, A0),
+ PLATFORM_CASE(DG1, B0),
+ PLATFORM_CASE(ALDERLAKE_S, A0),
+ PLATFORM_CASE(ALDERLAKE_S, B0),
+ PLATFORM_CASE(ALDERLAKE_S, C0),
+ PLATFORM_CASE(ALDERLAKE_S, D0),
+ PLATFORM_CASE(ALDERLAKE_P, A0),
+ PLATFORM_CASE(ALDERLAKE_P, B0),
+ PLATFORM_CASE(ALDERLAKE_P, C0),
+ SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
+ SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
+ SUBPLATFORM_CASE(DG2, G10, C0),
+ SUBPLATFORM_CASE(DG2, G11, B1),
+ SUBPLATFORM_CASE(DG2, G12, A1),
+ GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
+ GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
+ GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
+ GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
+ GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
+ GMDID_CASE(PANTHERLAKE, 3000, A0, 3000, A0),
+};
+
+KUNIT_ARRAY_PARAM(platform, cases, xe_pci_fake_data_desc);
+
+/**
+ * xe_pci_fake_data_gen_params - Generate struct xe_pci_fake_data parameters
+ * @test: test context object
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct xe_pci_fake_data parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
+ */
+const void *xe_pci_fake_data_gen_params(struct kunit *test, const void *prev, char *desc)
+{
+ return platform_gen_params(test, prev, desc);
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_gen_params);
+
+static const struct xe_device_desc *lookup_desc(enum xe_platform p)
+{
+ const struct xe_device_desc *desc;
+ const struct pci_device_id *ids;
+
+ for (ids = pciidlist; ids->driver_data; ids++) {
+ desc = (const void *)ids->driver_data;
+ if (desc->platform == p)
+ return desc;
+ }
+ return NULL;
+}
+
+static const struct xe_subplatform_desc *lookup_sub_desc(enum xe_platform p, enum xe_subplatform s)
+{
+ const struct xe_device_desc *desc = lookup_desc(p);
+ const struct xe_subplatform_desc *spd;
+
+ if (desc && desc->subplatforms)
+ for (spd = desc->subplatforms; spd->subplatform; spd++)
+ if (spd->subplatform == s)
+ return spd;
+ return NULL;
+}
+
+static const char *lookup_platform_name(enum xe_platform p)
+{
+ const struct xe_device_desc *desc = lookup_desc(p);
+
+ return desc ? desc->platform_name : "INVALID";
+}
+
+static const char *__lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s)
+{
+ const struct xe_subplatform_desc *desc = lookup_sub_desc(p, s);
+
+ return desc ? desc->name : "INVALID";
+}
+
+static const char *lookup_subplatform_name(enum xe_platform p, enum xe_subplatform s)
+{
+ return s == XE_SUBPLATFORM_NONE ? "" : __lookup_subplatform_name(p, s);
+}
+
+static const char *subplatform_prefix(enum xe_subplatform s)
+{
+ return s == XE_SUBPLATFORM_NONE ? "" : " ";
+}
+
+static const char *step_prefix(enum xe_step step)
+{
+ return step == STEP_NONE ? "" : " ";
+}
+
+static const char *step_name(enum xe_step step)
+{
+ return step == STEP_NONE ? "" : xe_step_name(step);
+}
+
+static const char *sriov_prefix(enum xe_sriov_mode mode)
+{
+ return mode <= XE_SRIOV_MODE_NONE ? "" : " ";
+}
+
+static const char *sriov_name(enum xe_sriov_mode mode)
+{
+ return mode <= XE_SRIOV_MODE_NONE ? "" : xe_sriov_mode_to_string(mode);
+}
+
+static const char *lookup_graphics_name(unsigned int verx100)
+{
+ const struct xe_ip *ip = find_graphics_ip(verx100);
+
+ return ip ? ip->name : "";
+}
+
+static const char *lookup_media_name(unsigned int verx100)
+{
+ const struct xe_ip *ip = find_media_ip(verx100);
+
+ return ip ? ip->name : "";
+}
+
+/**
+ * xe_pci_fake_data_desc - Describe struct xe_pci_fake_data parameter
+ * @param: the &struct xe_pci_fake_data parameter to describe
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares description of the struct xe_pci_fake_data parameter.
+ *
+ * It is tailored for use in parameterized KUnit tests where parameter generator
+ * is based on the struct xe_pci_fake_data arrays.
+ */
+void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc)
+{
+ if (param->graphics_verx100 || param->media_verx100)
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s %u.%02u(%s)%s%s %u.%02u(%s)%s%s%s%s",
+ lookup_platform_name(param->platform),
+ subplatform_prefix(param->subplatform),
+ lookup_subplatform_name(param->platform, param->subplatform),
+ param->graphics_verx100 / 100, param->graphics_verx100 % 100,
+ lookup_graphics_name(param->graphics_verx100),
+ step_prefix(param->step.graphics), step_name(param->step.graphics),
+ param->media_verx100 / 100, param->media_verx100 % 100,
+ lookup_media_name(param->media_verx100),
+ step_prefix(param->step.media), step_name(param->step.media),
+ sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode));
+ else
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s%s%s%s%s%s%s",
+ lookup_platform_name(param->platform),
+ subplatform_prefix(param->subplatform),
+ lookup_subplatform_name(param->platform, param->subplatform),
+ step_prefix(param->step.graphics), step_name(param->step.graphics),
+ sriov_prefix(param->sriov_mode), sriov_name(param->sriov_mode));
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_data_desc);
+
static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
{
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s",
param->verx100 / 100, param->verx100 % 100, param->name);
}
+/*
+ * Pre-GMDID Graphics and Media IPs definitions.
+ *
+ * Mimic the way GMDID IPs are declared so the same
+ * param generator can be used for both
+ */
+static const struct xe_ip pre_gmdid_graphics_ips[] = {
+ { 1200, "Xe_LP", &graphics_xelp },
+ { 1210, "Xe_LP+", &graphics_xelp },
+ { 1255, "Xe_HPG", &graphics_xehpg },
+ { 1260, "Xe_HPC", &graphics_xehpc },
+};
+
+static const struct xe_ip pre_gmdid_media_ips[] = {
+ { 1200, "Xe_M", &media_xem },
+ { 1255, "Xe_HPM", &media_xem },
+};
+
+KUNIT_ARRAY_PARAM(pre_gmdid_graphics_ip, pre_gmdid_graphics_ips, xe_ip_kunit_desc);
+KUNIT_ARRAY_PARAM(pre_gmdid_media_ip, pre_gmdid_media_ips, xe_ip_kunit_desc);
+
KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc);
KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc);
@@ -35,6 +243,7 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
/**
* xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters
+ * @test: test context object
* @prev: the pointer to the previous parameter to iterate from or NULL
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
@@ -44,14 +253,22 @@ KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc)
+const void *xe_pci_graphics_ip_gen_param(struct kunit *test, const void *prev, char *desc)
{
- return graphics_ip_gen_params(prev, desc);
+ const void *next = pre_gmdid_graphics_ip_gen_params(test, prev, desc);
+
+ if (next)
+ return next;
+ if (is_insidevar(prev, pre_gmdid_graphics_ips))
+ prev = NULL;
+
+ return graphics_ip_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
/**
* xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters
+ * @test: test context object
* @prev: the pointer to the previous parameter to iterate from or NULL
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
@@ -61,14 +278,22 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_media_ip_gen_param(const void *prev, char *desc)
+const void *xe_pci_media_ip_gen_param(struct kunit *test, const void *prev, char *desc)
{
- return media_ip_gen_params(prev, desc);
+ const void *next = pre_gmdid_media_ip_gen_params(test, prev, desc);
+
+ if (next)
+ return next;
+ if (is_insidevar(prev, pre_gmdid_media_ips))
+ prev = NULL;
+
+ return media_ip_gen_params(test, prev, desc);
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
/**
* xe_pci_id_gen_param - Generate struct pci_device_id parameters
+ * @test: test context object
* @prev: the pointer to the previous parameter to iterate from or NULL
* @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
@@ -78,27 +303,34 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
*
* Return: pointer to the next parameter or NULL if no more parameters
*/
-const void *xe_pci_id_gen_param(const void *prev, char *desc)
+const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc)
{
- const struct pci_device_id *pci = pci_id_gen_params(prev, desc);
+ const struct pci_device_id *pci = pci_id_gen_params(test, prev, desc);
return pci->driver_data ? pci : NULL;
}
EXPORT_SYMBOL_IF_KUNIT(xe_pci_id_gen_param);
-static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
- u32 *ver, u32 *revid)
+static int fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
+ u32 *ver, u32 *revid)
{
struct kunit *test = kunit_get_current_test();
struct xe_pci_fake_data *data = test->priv;
if (type == GMDID_MEDIA) {
*ver = data->media_verx100;
- *revid = xe_step_to_gmdid(data->media_step);
+ *revid = xe_step_to_gmdid(data->step.media);
} else {
*ver = data->graphics_verx100;
- *revid = xe_step_to_gmdid(data->graphics_step);
+ *revid = xe_step_to_gmdid(data->step.graphics);
}
+
+ return 0;
+}
+
+static void fake_xe_info_probe_tile_count(struct xe_device *xe)
+{
+ /* Nothing to do, just use the statically defined value. */
}
int xe_pci_fake_device_init(struct xe_device *xe)
@@ -138,6 +370,8 @@ done:
data->sriov_mode : XE_SRIOV_MODE_NONE;
kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid);
+ kunit_activate_static_stub(test, xe_info_probe_tile_count,
+ fake_xe_info_probe_tile_count);
xe_info_init_early(xe, desc, subplatform_desc);
xe_info_init(xe, desc);
@@ -148,6 +382,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
/**
* xe_pci_live_device_gen_param - Helper to iterate Xe devices as KUnit parameters
+ * @test: test context object
* @prev: the previously returned value, or NULL for the first iteration
* @desc: the buffer for a parameter name
*
@@ -159,7 +394,7 @@ EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init);
* Return: pointer to the next &struct xe_device ready to be used as a parameter
* or NULL if there are no more Xe devices on the system.
*/
-const void *xe_pci_live_device_gen_param(const void *prev, char *desc)
+const void *xe_pci_live_device_gen_param(struct kunit *test, const void *prev, char *desc)
{
const struct xe_device *xe = prev;
struct device *dev = xe ? xe->drm.dev : NULL;
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 37b344df2dc3..4d10a7e2b570 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -44,21 +44,27 @@ static void check_media_ip(struct kunit *test)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void check_platform_gt_count(struct kunit *test)
+static void check_platform_desc(struct kunit *test)
{
const struct pci_device_id *pci = test->param_value;
const struct xe_device_desc *desc =
(const struct xe_device_desc *)pci->driver_data;
- int max_gt = desc->max_gt_per_tile;
- KUNIT_ASSERT_GT(test, max_gt, 0);
- KUNIT_ASSERT_LE(test, max_gt, XE_MAX_GT_PER_TILE);
+ KUNIT_EXPECT_GT(test, desc->dma_mask_size, 0);
+
+ KUNIT_EXPECT_GT(test, (unsigned int)desc->max_gt_per_tile, 0);
+ KUNIT_EXPECT_LE(test, (unsigned int)desc->max_gt_per_tile, XE_MAX_GT_PER_TILE);
+
+ KUNIT_EXPECT_GT(test, desc->va_bits, 0);
+ KUNIT_EXPECT_LE(test, desc->va_bits, 64);
+
+ KUNIT_EXPECT_GT(test, desc->vm_max_level, 0);
}
static struct kunit_case xe_pci_tests[] = {
KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param),
KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param),
- KUNIT_CASE_PARAM(check_platform_gt_count, xe_pci_id_gen_param),
+ KUNIT_CASE_PARAM(check_platform_desc, xe_pci_id_gen_param),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index ce4d2b86b778..30505d1cbefc 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -7,9 +7,11 @@
#define _XE_PCI_TEST_H_
#include <linux/types.h>
+#include <kunit/test.h>
#include "xe_platform_types.h"
#include "xe_sriov_types.h"
+#include "xe_step_types.h"
struct xe_device;
@@ -17,17 +19,18 @@ struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
enum xe_platform platform;
enum xe_subplatform subplatform;
+ struct xe_step_info step;
u32 graphics_verx100;
u32 media_verx100;
- u32 graphics_step;
- u32 media_step;
};
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_fake_data_gen_params(struct kunit *test, const void *prev, char *desc);
+void xe_pci_fake_data_desc(const struct xe_pci_fake_data *param, char *desc);
-const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc);
-const void *xe_pci_media_ip_gen_param(const void *prev, char *desc);
-const void *xe_pci_id_gen_param(const void *prev, char *desc);
-const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
+const void *xe_pci_graphics_ip_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_media_ip_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_id_gen_param(struct kunit *test, const void *prev, char *desc);
+const void *xe_pci_live_device_gen_param(struct kunit *test, const void *prev, char *desc);
#endif
diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
index b0254b014fe4..d2255a59e58f 100644
--- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c
@@ -48,12 +48,14 @@ struct rtp_test_case {
const struct xe_rtp_entry *entries;
};
-static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
+static bool match_yes(const struct xe_device *xe, const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
{
return true;
}
-static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe)
+static bool match_no(const struct xe_device *xe, const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
{
return false;
}
diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c
index c96d1fe34151..49d191043dfa 100644
--- a/drivers/gpu/drm/xe/tests/xe_wa_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c
@@ -15,86 +15,10 @@
#include "xe_tuning.h"
#include "xe_wa.h"
-struct platform_test_case {
- const char *name;
- enum xe_platform platform;
- enum xe_subplatform subplatform;
- u32 graphics_verx100;
- u32 media_verx100;
- struct xe_step_info step;
-};
-
-#define PLATFORM_CASE(platform__, graphics_step__) \
- { \
- .name = #platform__ " (" #graphics_step__ ")", \
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_NONE, \
- .step = { .graphics = STEP_ ## graphics_step__ } \
- }
-
-
-#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \
- { \
- .name = #platform__ "_" #subplatform__ " (" #graphics_step__ ")", \
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \
- .step = { .graphics = STEP_ ## graphics_step__ } \
- }
-
-#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \
- media_verx100__, media_step__) \
- { \
- .name = #platform__ " (g:" #graphics_step__ ", m:" #media_step__ ")",\
- .platform = XE_ ## platform__, \
- .subplatform = XE_SUBPLATFORM_NONE, \
- .graphics_verx100 = graphics_verx100__, \
- .media_verx100 = media_verx100__, \
- .step = { .graphics = STEP_ ## graphics_step__, \
- .media = STEP_ ## media_step__ } \
- }
-
-static const struct platform_test_case cases[] = {
- PLATFORM_CASE(TIGERLAKE, B0),
- PLATFORM_CASE(DG1, A0),
- PLATFORM_CASE(DG1, B0),
- PLATFORM_CASE(ALDERLAKE_S, A0),
- PLATFORM_CASE(ALDERLAKE_S, B0),
- PLATFORM_CASE(ALDERLAKE_S, C0),
- PLATFORM_CASE(ALDERLAKE_S, D0),
- PLATFORM_CASE(ALDERLAKE_P, A0),
- PLATFORM_CASE(ALDERLAKE_P, B0),
- PLATFORM_CASE(ALDERLAKE_P, C0),
- SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0),
- SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0),
- SUBPLATFORM_CASE(DG2, G10, C0),
- SUBPLATFORM_CASE(DG2, G11, B1),
- SUBPLATFORM_CASE(DG2, G12, A1),
- GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
- GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
- GMDID_CASE(METEORLAKE, 1274, A0, 1300, A0),
- GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
- GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0),
- GMDID_CASE(BATTLEMAGE, 2001, A0, 1301, A1),
-};
-
-static void platform_desc(const struct platform_test_case *t, char *desc)
-{
- strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
-}
-
-KUNIT_ARRAY_PARAM(platform, cases, platform_desc);
-
static int xe_wa_test_init(struct kunit *test)
{
- const struct platform_test_case *param = test->param_value;
- struct xe_pci_fake_data data = {
- .platform = param->platform,
- .subplatform = param->subplatform,
- .graphics_verx100 = param->graphics_verx100,
- .media_verx100 = param->media_verx100,
- .graphics_step = param->step.graphics,
- .media_step = param->step.media,
- };
+ const struct xe_pci_fake_data *param = test->param_value;
+ struct xe_pci_fake_data data = *param;
struct xe_device *xe;
struct device *dev;
int ret;
@@ -119,13 +43,6 @@ static int xe_wa_test_init(struct kunit *test)
return 0;
}
-static void xe_wa_test_exit(struct kunit *test)
-{
- struct xe_device *xe = test->priv;
-
- drm_kunit_helper_free_device(test, xe->drm.dev);
-}
-
static void xe_wa_gt(struct kunit *test)
{
struct xe_device *xe = test->priv;
@@ -143,14 +60,13 @@ static void xe_wa_gt(struct kunit *test)
}
static struct kunit_case xe_wa_tests[] = {
- KUNIT_CASE_PARAM(xe_wa_gt, platform_gen_params),
+ KUNIT_CASE_PARAM(xe_wa_gt, xe_pci_fake_data_gen_params),
{}
};
static struct kunit_suite xe_rtp_test_suite = {
.name = "xe_wa",
.init = xe_wa_test_init,
- .exit = xe_wa_test_exit,
.test_cases = xe_wa_tests,
};
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index 68fe70ce2be3..a818eaa05b7d 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -12,6 +12,7 @@
#include "xe_gt_types.h"
#include "xe_step.h"
+#include "xe_vram.h"
/**
* DOC: Xe Asserts
@@ -145,7 +146,8 @@
const struct xe_tile *__tile = (tile); \
char __buf[10] __maybe_unused; \
xe_assert_msg(tile_to_xe(__tile), condition, "tile: %u VRAM %s\n" msg, \
- __tile->id, ({ string_get_size(__tile->mem.vram.actual_physical_size, 1, \
+ __tile->id, ({ string_get_size( \
+ xe_vram_region_actual_physical_size(__tile->mem.vram), 1, \
STRING_UNITS_2, __buf, sizeof(__buf)); __buf; }), ## arg); \
})
diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c
index 5ce0e26822f2..6d20229c11de 100644
--- a/drivers/gpu/drm/xe/xe_bb.c
+++ b/drivers/gpu/drm/xe/xe_bb.c
@@ -60,6 +60,41 @@ err:
return ERR_PTR(err);
}
+struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords,
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id)
+{
+ struct xe_bb *bb = kmalloc(sizeof(*bb), GFP_KERNEL);
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_sa_manager *bb_pool;
+ int err;
+
+ if (!bb)
+ return ERR_PTR(-ENOMEM);
+ /*
+ * We need to allocate space for the requested number of dwords &
+ * one additional MI_BATCH_BUFFER_END dword. Since the whole SA
+ * is submitted to HW, we need to make sure that the last instruction
+ * is not over written when the last chunk of SA is allocated for BB.
+ * So, this extra DW acts as a guard here.
+ */
+
+ bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
+ bb->bo = xe_sa_bo_new(bb_pool, 4 * (dwords + 1));
+
+ if (IS_ERR(bb->bo)) {
+ err = PTR_ERR(bb->bo);
+ goto err;
+ }
+
+ bb->cs = xe_sa_bo_cpu_addr(bb->bo);
+ bb->len = 0;
+
+ return bb;
+err:
+ kfree(bb);
+ return ERR_PTR(err);
+}
+
static struct xe_sched_job *
__xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
{
diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h
index b5cc65506696..2a8adc9a6dee 100644
--- a/drivers/gpu/drm/xe/xe_bb.h
+++ b/drivers/gpu/drm/xe/xe_bb.h
@@ -13,8 +13,11 @@ struct dma_fence;
struct xe_gt;
struct xe_exec_queue;
struct xe_sched_job;
+enum xe_sriov_vf_ccs_rw_ctxs;
struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm);
+struct xe_bb *xe_bb_ccs_new(struct xe_gt *gt, u32 dwords,
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id);
struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_bb *bb);
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index bae7ff2e5927..b0bd31d14bb9 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -9,6 +9,7 @@
#include <linux/nospec.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_backup.h>
@@ -33,9 +34,12 @@
#include "xe_pxp.h"
#include "xe_res_cursor.h"
#include "xe_shrinker.h"
+#include "xe_sriov_vf_ccs.h"
+#include "xe_tile.h"
#include "xe_trace_bo.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_vm.h"
+#include "xe_vram_types.h"
const char *const xe_mem_type_to_name[TTM_NUM_MEM_TYPES] = {
[XE_PL_SYSTEM] = "system",
@@ -79,6 +83,10 @@ static struct ttm_placement tt_placement = {
.placement = tt_placement_flags,
};
+#define for_each_set_bo_vram_flag(bit__, bo_flags__) \
+ for (unsigned int __bit_tmp = BIT(0); __bit_tmp <= XE_BO_FLAG_VRAM_MASK; __bit_tmp <<= 1) \
+ for_each_if(((bit__) = __bit_tmp) & (bo_flags__) & XE_BO_FLAG_VRAM_MASK)
+
bool mem_type_is_vram(u32 mem_type)
{
return mem_type >= XE_PL_VRAM0 && mem_type != XE_PL_STOLEN;
@@ -200,6 +208,8 @@ static bool force_contiguous(u32 bo_flags)
else if (bo_flags & XE_BO_FLAG_PINNED &&
!(bo_flags & XE_BO_FLAG_PINNED_LATE_RESTORE))
return true; /* needs vmap */
+ else if (bo_flags & XE_BO_FLAG_CPU_ADDR_MIRROR)
+ return true;
/*
* For eviction / restore on suspend / resume objects pinned in VRAM
@@ -209,6 +219,27 @@ static bool force_contiguous(u32 bo_flags)
bo_flags & XE_BO_FLAG_PINNED;
}
+static u8 vram_bo_flag_to_tile_id(struct xe_device *xe, u32 vram_bo_flag)
+{
+ xe_assert(xe, vram_bo_flag & XE_BO_FLAG_VRAM_MASK);
+ xe_assert(xe, (vram_bo_flag & (vram_bo_flag - 1)) == 0);
+
+ return __ffs(vram_bo_flag >> (__ffs(XE_BO_FLAG_VRAM0) - 1)) - 1;
+}
+
+static u32 bo_vram_flags_to_vram_placement(struct xe_device *xe, u32 bo_flags, u32 vram_flag,
+ enum ttm_bo_type type)
+{
+ u8 tile_id = vram_bo_flag_to_tile_id(xe, vram_flag);
+
+ xe_assert(xe, tile_id < xe->info.tile_count);
+
+ if (type == ttm_bo_type_kernel && !(bo_flags & XE_BO_FLAG_FORCE_USER_VRAM))
+ return xe->tiles[tile_id].mem.kernel_vram->placement;
+ else
+ return xe->tiles[tile_id].mem.vram->placement;
+}
+
static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{
@@ -241,12 +272,15 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
}
static void try_add_vram(struct xe_device *xe, struct xe_bo *bo,
- u32 bo_flags, u32 *c)
+ u32 bo_flags, enum ttm_bo_type type, u32 *c)
{
- if (bo_flags & XE_BO_FLAG_VRAM0)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM0, c);
- if (bo_flags & XE_BO_FLAG_VRAM1)
- add_vram(xe, bo, bo->placements, bo_flags, XE_PL_VRAM1, c);
+ u32 vram_flag;
+
+ for_each_set_bo_vram_flag(vram_flag, bo_flags) {
+ u32 pl = bo_vram_flags_to_vram_placement(xe, bo_flags, vram_flag, type);
+
+ add_vram(xe, bo, bo->placements, bo_flags, pl, c);
+ }
}
static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
@@ -265,11 +299,11 @@ static void try_add_stolen(struct xe_device *xe, struct xe_bo *bo,
}
static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
- u32 bo_flags)
+ u32 bo_flags, enum ttm_bo_type type)
{
u32 c = 0;
- try_add_vram(xe, bo, bo_flags, &c);
+ try_add_vram(xe, bo, bo_flags, type, &c);
try_add_system(xe, bo, bo_flags, &c);
try_add_stolen(xe, bo, bo_flags, &c);
@@ -285,10 +319,10 @@ static int __xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
}
int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
- u32 bo_flags)
+ u32 bo_flags, enum ttm_bo_type type)
{
xe_bo_assert_held(bo);
- return __xe_bo_placement_for_flags(xe, bo, bo_flags);
+ return __xe_bo_placement_for_flags(xe, bo, bo_flags, type);
}
static void xe_evict_flags(struct ttm_buffer_object *tbo,
@@ -576,6 +610,23 @@ static bool xe_ttm_resource_visible(struct ttm_resource *mem)
return vres->used_visible_size == mem->size;
}
+/**
+ * xe_bo_is_visible_vram - check if BO is placed entirely in visible VRAM.
+ * @bo: The BO
+ *
+ * This function checks whether a given BO resides entirely in memory visible from the CPU
+ *
+ * Returns: true if the BO is entirely visible, false otherwise.
+ *
+ */
+bool xe_bo_is_visible_vram(struct xe_bo *bo)
+{
+ if (drm_WARN_ON(bo->ttm.base.dev, !xe_bo_is_vram(bo)))
+ return false;
+
+ return xe_ttm_resource_visible(bo->ttm.resource);
+}
+
static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
@@ -966,6 +1017,20 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
dma_fence_put(fence);
xe_pm_runtime_put(xe);
+ /*
+ * CCS meta data is migrated from TT -> SMEM. So, let us detach the
+ * BBs from BO as it is no longer needed.
+ */
+ if (IS_VF_CCS_READY(xe) && old_mem_type == XE_PL_TT &&
+ new_mem->mem_type == XE_PL_SYSTEM)
+ xe_sriov_vf_ccs_detach_bo(bo);
+
+ if (IS_VF_CCS_READY(xe) &&
+ ((move_lacks_source && new_mem->mem_type == XE_PL_TT) ||
+ (old_mem_type == XE_PL_SYSTEM && new_mem->mem_type == XE_PL_TT)) &&
+ handle_system_ccs)
+ ret = xe_sriov_vf_ccs_attach_bo(bo);
+
out:
if ((!ttm_bo->resource || ttm_bo->resource->mem_type == XE_PL_SYSTEM) &&
ttm_bo->ttm) {
@@ -976,6 +1041,9 @@ out:
if (timeout < 0)
ret = timeout;
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_detach_bo(bo);
+
xe_tt_unmap_sg(xe, ttm_bo->ttm);
}
@@ -1120,42 +1188,47 @@ out_unref:
int xe_bo_notifier_prepare_pinned(struct xe_bo *bo)
{
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_bo *backup;
int ret = 0;
- xe_bo_lock(bo, false);
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ xe_assert(xe, !ret);
+ xe_assert(xe, !bo->backup_obj);
- xe_assert(xe, !bo->backup_obj);
+ /*
+ * Since this is called from the PM notifier we might have raced with
+ * someone unpinning this after we dropped the pinned list lock and
+ * grabbing the above bo lock.
+ */
+ if (!xe_bo_is_pinned(bo))
+ break;
- /*
- * Since this is called from the PM notifier we might have raced with
- * someone unpinning this after we dropped the pinned list lock and
- * grabbing the above bo lock.
- */
- if (!xe_bo_is_pinned(bo))
- goto out_unlock_bo;
+ if (!xe_bo_is_vram(bo))
+ break;
- if (!xe_bo_is_vram(bo))
- goto out_unlock_bo;
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ break;
- if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
- goto out_unlock_bo;
+ backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED, &exec);
+ if (IS_ERR(backup)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(backup);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo),
- DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
- if (IS_ERR(backup)) {
- ret = PTR_ERR(backup);
- goto out_unlock_bo;
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ ttm_bo_pin(&backup->ttm);
+ bo->backup_obj = backup;
}
- backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
- ttm_bo_pin(&backup->ttm);
- bo->backup_obj = backup;
-
-out_unlock_bo:
- xe_bo_unlock(bo);
return ret;
}
@@ -1181,57 +1254,12 @@ int xe_bo_notifier_unprepare_pinned(struct xe_bo *bo)
return 0;
}
-/**
- * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
- * @bo: The buffer object to move.
- *
- * On successful completion, the object memory will be moved to system memory.
- *
- * This is needed to for special handling of pinned VRAM object during
- * suspend-resume.
- *
- * Return: 0 on success. Negative error code on failure.
- */
-int xe_bo_evict_pinned(struct xe_bo *bo)
+static int xe_bo_evict_pinned_copy(struct xe_bo *bo, struct xe_bo *backup)
{
- struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
- struct xe_bo *backup = bo->backup_obj;
- bool backup_created = false;
+ struct xe_device *xe = xe_bo_device(bo);
bool unmap = false;
int ret = 0;
- xe_bo_lock(bo, false);
-
- if (WARN_ON(!bo->ttm.resource)) {
- ret = -EINVAL;
- goto out_unlock_bo;
- }
-
- if (WARN_ON(!xe_bo_is_pinned(bo))) {
- ret = -EINVAL;
- goto out_unlock_bo;
- }
-
- if (!xe_bo_is_vram(bo))
- goto out_unlock_bo;
-
- if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
- goto out_unlock_bo;
-
- if (!backup) {
- backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv,
- NULL, xe_bo_size(bo),
- DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
- XE_BO_FLAG_PINNED);
- if (IS_ERR(backup)) {
- ret = PTR_ERR(backup);
- goto out_unlock_bo;
- }
- backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
- backup_created = true;
- }
-
if (xe_bo_is_user(bo) || (bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE)) {
struct xe_migrate *migrate;
struct dma_fence *fence;
@@ -1241,14 +1269,11 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
else
migrate = mem_type_to_migrate(xe, bo->ttm.resource->mem_type);
+ xe_assert(xe, bo->ttm.base.resv == backup->ttm.base.resv);
ret = dma_resv_reserve_fences(bo->ttm.base.resv, 1);
if (ret)
goto out_backup;
- ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
- if (ret)
- goto out_backup;
-
fence = xe_migrate_copy(migrate, bo, backup, bo->ttm.resource,
backup->ttm.resource, false);
if (IS_ERR(fence)) {
@@ -1258,8 +1283,6 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
dma_resv_add_fence(bo->ttm.base.resv, fence,
DMA_RESV_USAGE_KERNEL);
- dma_resv_add_fence(backup->ttm.base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
} else {
ret = xe_bo_vmap(backup);
@@ -1269,7 +1292,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (iosys_map_is_null(&bo->vmap)) {
ret = xe_bo_vmap(bo);
if (ret)
- goto out_backup;
+ goto out_vunmap;
unmap = true;
}
@@ -1279,15 +1302,78 @@ int xe_bo_evict_pinned(struct xe_bo *bo)
if (!bo->backup_obj)
bo->backup_obj = backup;
-
-out_backup:
+out_vunmap:
xe_bo_vunmap(backup);
- if (ret && backup_created)
- xe_bo_put(backup);
-out_unlock_bo:
+out_backup:
if (unmap)
xe_bo_vunmap(bo);
- xe_bo_unlock(bo);
+
+ return ret;
+}
+
+/**
+ * xe_bo_evict_pinned() - Evict a pinned VRAM object to system memory
+ * @bo: The buffer object to move.
+ *
+ * On successful completion, the object memory will be moved to system memory.
+ *
+ * This is needed to for special handling of pinned VRAM object during
+ * suspend-resume.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_bo_evict_pinned(struct xe_bo *bo)
+{
+ struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *backup = bo->backup_obj;
+ bool backup_created = false;
+ int ret = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.exclusive = true}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ xe_assert(xe, !ret);
+
+ if (WARN_ON(!bo->ttm.resource)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (WARN_ON(!xe_bo_is_pinned(bo))) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!xe_bo_is_vram(bo))
+ break;
+
+ if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE)
+ break;
+
+ if (!backup) {
+ backup = xe_bo_init_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL,
+ xe_bo_size(bo),
+ DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS |
+ XE_BO_FLAG_PINNED, &exec);
+ if (IS_ERR(backup)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(backup);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
+ backup->parent_obj = xe_bo_get(bo); /* Released by bo_destroy */
+ backup_created = true;
+ }
+
+ ret = xe_bo_evict_pinned_copy(bo, backup);
+ }
+
+ if (ret && backup_created)
+ xe_bo_put(backup);
+
return ret;
}
@@ -1337,10 +1423,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
if (ret)
goto out_unlock_bo;
- ret = dma_resv_reserve_fences(backup->ttm.base.resv, 1);
- if (ret)
- goto out_unlock_bo;
-
fence = xe_migrate_copy(migrate, backup, bo,
backup->ttm.resource, bo->ttm.resource,
false);
@@ -1351,8 +1433,6 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
dma_resv_add_fence(bo->ttm.base.resv, fence,
DMA_RESV_USAGE_KERNEL);
- dma_resv_add_fence(backup->ttm.base.resv, fence,
- DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
} else {
ret = xe_bo_vmap(backup);
@@ -1503,9 +1583,14 @@ static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
{
+ struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
+
if (!xe_bo_is_xe_bo(ttm_bo))
return;
+ if (IS_VF_CCS_READY(ttm_to_xe_device(ttm_bo->bdev)))
+ xe_sriov_vf_ccs_detach_bo(bo);
+
/*
* Object is idle and about to be destroyed. Release the
* dma-buf attachment.
@@ -1567,7 +1652,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo,
if (!mem_type_is_vram(ttm_bo->resource->mem_type))
return -EIO;
- if (!xe_ttm_resource_visible(ttm_bo->resource) || len >= SZ_16K) {
+ if (!xe_bo_is_visible_vram(bo) || len >= SZ_16K) {
struct xe_migrate *migrate =
mem_type_to_migrate(xe, ttm_bo->resource->mem_type);
@@ -1670,7 +1755,7 @@ static void xe_gem_object_free(struct drm_gem_object *obj)
* refcount directly if needed.
*/
__xe_bo_vunmap(gem_to_xe_bo(obj));
- ttm_bo_put(container_of(obj, struct ttm_buffer_object, base));
+ ttm_bo_fini(container_of(obj, struct ttm_buffer_object, base));
}
static void xe_gem_object_close(struct drm_gem_object *obj,
@@ -1687,50 +1772,258 @@ static void xe_gem_object_close(struct drm_gem_object *obj,
}
}
-static vm_fault_t xe_gem_fault(struct vm_fault *vmf)
+static bool should_migrate_to_smem(struct xe_bo *bo)
+{
+ /*
+ * NOTE: The following atomic checks are platform-specific. For example,
+ * if a device supports CXL atomics, these may not be necessary or
+ * may behave differently.
+ */
+
+ return bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL ||
+ bo->attr.atomic_access == DRM_XE_ATOMIC_CPU;
+}
+
+static int xe_bo_wait_usage_kernel(struct xe_bo *bo, struct ttm_operation_ctx *ctx)
+{
+ long lerr;
+
+ if (ctx->no_wait_gpu)
+ return dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL) ?
+ 0 : -EBUSY;
+
+ lerr = dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ ctx->interruptible, MAX_SCHEDULE_TIMEOUT);
+ if (lerr < 0)
+ return lerr;
+ if (lerr == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Populate the bo if swapped out, or migrate if the access mode requires that. */
+static int xe_bo_fault_migrate(struct xe_bo *bo, struct ttm_operation_ctx *ctx,
+ struct drm_exec *exec)
+{
+ struct ttm_buffer_object *tbo = &bo->ttm;
+ int err = 0;
+
+ if (ttm_manager_type(tbo->bdev, tbo->resource->mem_type)->use_tt) {
+ err = xe_bo_wait_usage_kernel(bo, ctx);
+ if (!err)
+ err = ttm_bo_populate(&bo->ttm, ctx);
+ } else if (should_migrate_to_smem(bo)) {
+ xe_assert(xe_bo_device(bo), bo->flags & XE_BO_FLAG_SYSTEM);
+ err = xe_bo_migrate(bo, XE_PL_TT, ctx, exec);
+ }
+
+ return err;
+}
+
+/* Call into TTM to populate PTEs, and register bo for PTE removal on runtime suspend. */
+static vm_fault_t __xe_bo_cpu_fault(struct vm_fault *vmf, struct xe_device *xe, struct xe_bo *bo)
+{
+ vm_fault_t ret;
+
+ trace_xe_bo_cpu_fault(bo);
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
+ /*
+ * When TTM is actually called to insert PTEs, ensure no blocking conditions
+ * remain, in which case TTM may drop locks and return VM_FAULT_RETRY.
+ */
+ xe_assert(xe, ret != VM_FAULT_RETRY);
+
+ if (ret == VM_FAULT_NOPAGE &&
+ mem_type_is_vram(bo->ttm.resource->mem_type)) {
+ mutex_lock(&xe->mem_access.vram_userfault.lock);
+ if (list_empty(&bo->vram_userfault_link))
+ list_add(&bo->vram_userfault_link,
+ &xe->mem_access.vram_userfault.list);
+ mutex_unlock(&xe->mem_access.vram_userfault.lock);
+ }
+
+ return ret;
+}
+
+static vm_fault_t xe_err_to_fault_t(int err)
+{
+ switch (err) {
+ case 0:
+ case -EINTR:
+ case -ERESTARTSYS:
+ case -EAGAIN:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ case -ENOSPC:
+ return VM_FAULT_OOM;
+ default:
+ break;
+ }
+ return VM_FAULT_SIGBUS;
+}
+
+static bool xe_ttm_bo_is_imported(struct ttm_buffer_object *tbo)
+{
+ dma_resv_assert_held(tbo->base.resv);
+
+ return tbo->ttm &&
+ (tbo->ttm->page_flags & (TTM_TT_FLAG_EXTERNAL | TTM_TT_FLAG_EXTERNAL_MAPPABLE)) ==
+ TTM_TT_FLAG_EXTERNAL;
+}
+
+static vm_fault_t xe_bo_cpu_fault_fastpath(struct vm_fault *vmf, struct xe_device *xe,
+ struct xe_bo *bo, bool needs_rpm)
+{
+ struct ttm_buffer_object *tbo = &bo->ttm;
+ vm_fault_t ret = VM_FAULT_RETRY;
+ struct xe_validation_ctx ctx;
+ struct ttm_operation_ctx tctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ .gfp_retry_mayfail = true,
+
+ };
+ int err;
+
+ if (needs_rpm && !xe_pm_runtime_get_if_active(xe))
+ return VM_FAULT_RETRY;
+
+ err = xe_validation_ctx_init(&ctx, &xe->val, NULL,
+ (struct xe_val_flags) {
+ .interruptible = true,
+ .no_block = true
+ });
+ if (err)
+ goto out_pm;
+
+ if (!dma_resv_trylock(tbo->base.resv))
+ goto out_validation;
+
+ if (xe_ttm_bo_is_imported(tbo)) {
+ ret = VM_FAULT_SIGBUS;
+ drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
+ goto out_unlock;
+ }
+
+ err = xe_bo_fault_migrate(bo, &tctx, NULL);
+ if (err) {
+ /* Return VM_FAULT_RETRY on these errors. */
+ if (err != -ENOMEM && err != -ENOSPC && err != -EBUSY)
+ ret = xe_err_to_fault_t(err);
+ goto out_unlock;
+ }
+
+ if (dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL))
+ ret = __xe_bo_cpu_fault(vmf, xe, bo);
+
+out_unlock:
+ dma_resv_unlock(tbo->base.resv);
+out_validation:
+ xe_validation_ctx_fini(&ctx);
+out_pm:
+ if (needs_rpm)
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static vm_fault_t xe_bo_cpu_fault(struct vm_fault *vmf)
{
struct ttm_buffer_object *tbo = vmf->vma->vm_private_data;
struct drm_device *ddev = tbo->base.dev;
struct xe_device *xe = to_xe_device(ddev);
struct xe_bo *bo = ttm_to_xe_bo(tbo);
bool needs_rpm = bo->flags & XE_BO_FLAG_VRAM_MASK;
+ bool retry_after_wait = false;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
vm_fault_t ret;
+ int err = 0;
int idx;
- if (needs_rpm)
- xe_pm_runtime_get(xe);
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
- ret = ttm_bo_vm_reserve(tbo, vmf);
- if (ret)
+ ret = xe_bo_cpu_fault_fastpath(vmf, xe, bo, needs_rpm);
+ if (ret != VM_FAULT_RETRY)
goto out;
- if (drm_dev_enter(ddev, &idx)) {
- trace_xe_bo_cpu_fault(bo);
-
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
- drm_dev_exit(idx);
+ if (fault_flag_allow_retry_first(vmf->flags)) {
+ if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out;
+ retry_after_wait = true;
+ xe_bo_get(bo);
+ mmap_read_unlock(vmf->vma->vm_mm);
} else {
- ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
+ ret = VM_FAULT_NOPAGE;
}
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- goto out;
/*
- * ttm_bo_vm_reserve() already has dma_resv_lock.
+ * The fastpath failed and we were not required to return and retry immediately.
+ * We're now running in one of two modes:
+ *
+ * 1) retry_after_wait == true: The mmap_read_lock() is dropped, and we're trying
+ * to resolve blocking waits. But we can't resolve the fault since the
+ * mmap_read_lock() is dropped. After retrying the fault, the aim is that the fastpath
+ * should succeed. But it may fail since we drop the bo lock.
+ *
+ * 2) retry_after_wait == false: The fastpath failed, typically even after
+ * a retry. Do whatever's necessary to resolve the fault.
+ *
+ * This construct is recommended to avoid excessive waits under the mmap_lock.
*/
- if (ret == VM_FAULT_NOPAGE && mem_type_is_vram(tbo->resource->mem_type)) {
- mutex_lock(&xe->mem_access.vram_userfault.lock);
- if (list_empty(&bo->vram_userfault_link))
- list_add(&bo->vram_userfault_link, &xe->mem_access.vram_userfault.list);
- mutex_unlock(&xe->mem_access.vram_userfault.lock);
+
+ if (needs_rpm)
+ xe_pm_runtime_get(xe);
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ struct ttm_operation_ctx tctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .gfp_retry_mayfail = retry_after_wait,
+ };
+
+ err = drm_exec_lock_obj(&exec, &tbo->base);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+
+ if (xe_ttm_bo_is_imported(tbo)) {
+ err = -EFAULT;
+ drm_dbg(&xe->drm, "CPU trying to access an imported buffer object.\n");
+ break;
+ }
+
+ err = xe_bo_fault_migrate(bo, &tctx, &exec);
+ if (err) {
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+
+ err = xe_bo_wait_usage_kernel(bo, &tctx);
+ if (err)
+ break;
+
+ if (!retry_after_wait)
+ ret = __xe_bo_cpu_fault(vmf, xe, bo);
}
+ /* if retry_after_wait == true, we *must* return VM_FAULT_RETRY. */
+ if (err && !retry_after_wait)
+ ret = xe_err_to_fault_t(err);
- dma_resv_unlock(tbo->base.resv);
-out:
if (needs_rpm)
xe_pm_runtime_put(xe);
+ if (retry_after_wait)
+ xe_bo_put(bo);
+out:
+ drm_dev_exit(idx);
+
return ret;
}
@@ -1774,7 +2067,7 @@ int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size)
}
static const struct vm_operations_struct xe_gem_vm_ops = {
- .fault = xe_gem_fault,
+ .fault = xe_bo_cpu_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
.access = xe_bo_vm_access,
@@ -1822,11 +2115,32 @@ void xe_bo_free(struct xe_bo *bo)
kfree(bo);
}
-struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
- struct xe_tile *tile, struct dma_resv *resv,
- struct ttm_lru_bulk_move *bulk, size_t size,
- u16 cpu_caching, enum ttm_bo_type type,
- u32 flags)
+/**
+ * xe_bo_init_locked() - Initialize or create an xe_bo.
+ * @xe: The xe device.
+ * @bo: An already allocated buffer object or NULL
+ * if the function should allocate a new one.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @resv: Pointer to a locked shared reservation object to use for this bo,
+ * or NULL for the xe_bo to use its own.
+ * @bulk: The bulk move to use for LRU bumping, or NULL for external bos.
+ * @size: The storage size to use for the bo.
+ * @cpu_caching: The cpu caching used for system memory backing store.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Initialize or create an xe buffer object. On failure, any allocated buffer
+ * object passed in @bo will have been unreferenced.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
+ struct xe_tile *tile, struct dma_resv *resv,
+ struct ttm_lru_bulk_move *bulk, size_t size,
+ u16 cpu_caching, enum ttm_bo_type type,
+ u32 flags, struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = true,
@@ -1895,8 +2209,9 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
ctx.resv = resv;
}
+ xe_validation_assert_exec(xe, exec, &bo->ttm.base);
if (!(flags & XE_BO_FLAG_FIXED_PLACEMENT)) {
- err = __xe_bo_placement_for_flags(xe, bo, bo->flags);
+ err = __xe_bo_placement_for_flags(xe, bo, bo->flags, type);
if (WARN_ON(err)) {
xe_ttm_bo_destroy(&bo->ttm);
return ERR_PTR(err);
@@ -1954,34 +2269,37 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
}
static int __xe_bo_fixed_placement(struct xe_device *xe,
- struct xe_bo *bo,
+ struct xe_bo *bo, enum ttm_bo_type type,
u32 flags,
u64 start, u64 end, u64 size)
{
struct ttm_place *place = bo->placements;
+ u32 vram_flag, vram_stolen_flags;
+
+ /*
+ * to allow fixed placement in GGTT of a VF, post-migration fixups would have to
+ * include selecting a new fixed offset and shifting the page ranges for it
+ */
+ xe_assert(xe, !IS_SRIOV_VF(xe) || !(bo->flags & XE_BO_FLAG_GGTT));
if (flags & (XE_BO_FLAG_USER | XE_BO_FLAG_SYSTEM))
return -EINVAL;
+ vram_flag = flags & XE_BO_FLAG_VRAM_MASK;
+ vram_stolen_flags = (flags & (XE_BO_FLAG_STOLEN)) | vram_flag;
+
+ /* check if more than one VRAM/STOLEN flag is set */
+ if (hweight32(vram_stolen_flags) > 1)
+ return -EINVAL;
+
place->flags = TTM_PL_FLAG_CONTIGUOUS;
place->fpfn = start >> PAGE_SHIFT;
place->lpfn = end >> PAGE_SHIFT;
- switch (flags & (XE_BO_FLAG_STOLEN | XE_BO_FLAG_VRAM_MASK)) {
- case XE_BO_FLAG_VRAM0:
- place->mem_type = XE_PL_VRAM0;
- break;
- case XE_BO_FLAG_VRAM1:
- place->mem_type = XE_PL_VRAM1;
- break;
- case XE_BO_FLAG_STOLEN:
+ if (flags & XE_BO_FLAG_STOLEN)
place->mem_type = XE_PL_STOLEN;
- break;
-
- default:
- /* 0 or multiple of the above set */
- return -EINVAL;
- }
+ else
+ place->mem_type = bo_vram_flags_to_vram_placement(xe, flags, vram_flag, type);
bo->placement = (struct ttm_placement) {
.num_placement = 1,
@@ -1996,7 +2314,7 @@ __xe_bo_create_locked(struct xe_device *xe,
struct xe_tile *tile, struct xe_vm *vm,
size_t size, u64 start, u64 end,
u16 cpu_caching, enum ttm_bo_type type, u32 flags,
- u64 alignment)
+ u64 alignment, struct drm_exec *exec)
{
struct xe_bo *bo = NULL;
int err;
@@ -2010,18 +2328,18 @@ __xe_bo_create_locked(struct xe_device *xe,
return bo;
flags |= XE_BO_FLAG_FIXED_PLACEMENT;
- err = __xe_bo_fixed_placement(xe, bo, flags, start, end, size);
+ err = __xe_bo_fixed_placement(xe, bo, type, flags, start, end, size);
if (err) {
xe_bo_free(bo);
return ERR_PTR(err);
}
}
- bo = ___xe_bo_create_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
- vm && !xe_vm_in_fault_mode(vm) &&
- flags & XE_BO_FLAG_USER ?
- &vm->lru_bulk_move : NULL, size,
- cpu_caching, type, flags);
+ bo = xe_bo_init_locked(xe, bo, tile, vm ? xe_vm_resv(vm) : NULL,
+ vm && !xe_vm_in_fault_mode(vm) &&
+ flags & XE_BO_FLAG_USER ?
+ &vm->lru_bulk_move : NULL, size,
+ cpu_caching, type, flags, exec);
if (IS_ERR(bo))
return bo;
@@ -2055,9 +2373,10 @@ __xe_bo_create_locked(struct xe_device *xe,
if (flags & XE_BO_FLAG_FIXED_PLACEMENT) {
err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo,
- start + xe_bo_size(bo), U64_MAX);
+ start + xe_bo_size(bo), U64_MAX,
+ exec);
} else {
- err = xe_ggtt_insert_bo(t->mem.ggtt, bo);
+ err = xe_ggtt_insert_bo(t->mem.ggtt, bo, exec);
}
if (err)
goto err_unlock_put_bo;
@@ -2074,82 +2393,166 @@ err_unlock_put_bo:
return ERR_PTR(err);
}
-struct xe_bo *
-xe_bo_create_locked_range(struct xe_device *xe,
- struct xe_tile *tile, struct xe_vm *vm,
- size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags, u64 alignment)
-{
- return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
- flags, alignment);
-}
-
+/**
+ * xe_bo_create_locked() - Create a BO
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @vm: The local vm or NULL for external objects.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Create a locked xe BO with no range- nor alignment restrictions.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec)
{
return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type,
- flags, 0);
+ flags, 0, exec);
}
-struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- u16 cpu_caching,
- u32 flags)
+static struct xe_bo *xe_bo_create_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u16 cpu_caching,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment, bool intr)
{
- struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL,
- cpu_caching, ttm_bo_type_device,
- flags | XE_BO_FLAG_USER, 0);
- if (!IS_ERR(bo))
- xe_bo_unlock_vm_held(bo);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int ret = 0;
- return bo;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
+ ret) {
+ bo = __xe_bo_create_locked(xe, tile, NULL, size, 0, ~0ULL,
+ cpu_caching, type, flags, alignment, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ } else {
+ xe_bo_unlock(bo);
+ }
+ }
+
+ return ret ? ERR_PTR(ret) : bo;
}
-struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+/**
+ * xe_bo_create_user() - Create a user BO
+ * @xe: The xe device.
+ * @vm: The local vm or NULL for external objects.
+ * @size: The storage size to use for the bo.
+ * @cpu_caching: The caching mode to be used for system backing store.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction, or NULL
+ * if such a transaction should be initiated by the call.
+ *
+ * Create a bo on behalf of user-space.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_create_user(struct xe_device *xe,
+ struct xe_vm *vm, size_t size,
+ u16 cpu_caching,
+ u32 flags, struct drm_exec *exec)
{
- struct xe_bo *bo = xe_bo_create_locked(xe, tile, vm, size, type, flags);
+ struct xe_bo *bo;
- if (!IS_ERR(bo))
- xe_bo_unlock_vm_held(bo);
+ flags |= XE_BO_FLAG_USER;
+
+ if (vm || exec) {
+ xe_assert(xe, exec);
+ bo = __xe_bo_create_locked(xe, NULL, vm, size, 0, ~0ULL,
+ cpu_caching, ttm_bo_type_device,
+ flags, 0, exec);
+ if (!IS_ERR(bo))
+ xe_bo_unlock_vm_held(bo);
+ } else {
+ bo = xe_bo_create_novm(xe, NULL, size, cpu_caching,
+ ttm_bo_type_device, flags, 0, true);
+ }
return bo;
}
-struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags)
+/**
+ * xe_bo_create_pin_range_novm() - Create and pin a BO with range options.
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @start: Start of fixed VRAM range or 0.
+ * @end: End of fixed VRAM range or ~0ULL.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ *
+ * Create an Xe BO with range- and options. If @start and @end indicate
+ * a fixed VRAM range, this must be a ttm_bo_type_kernel bo with VRAM placement
+ * only.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ */
+struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 start, u64 end,
+ enum ttm_bo_type type, u32 flags)
{
- return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset,
- type, flags, 0);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int err = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ bo = __xe_bo_create_locked(xe, tile, NULL, size, start, end,
+ 0, type, flags, 0, &exec);
+ if (IS_ERR(bo)) {
+ drm_exec_retry_on_contention(&exec);
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+
+ err = xe_bo_pin(bo, &exec);
+ xe_bo_unlock(bo);
+ if (err) {
+ xe_bo_put(bo);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
+
+ return err ? ERR_PTR(err) : bo;
}
-struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
- struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags,
- u64 alignment)
+static struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
+ struct xe_tile *tile,
+ struct xe_vm *vm,
+ size_t size, u64 offset,
+ enum ttm_bo_type type, u32 flags,
+ u64 alignment, struct drm_exec *exec)
{
struct xe_bo *bo;
int err;
u64 start = offset == ~0ull ? 0 : offset;
- u64 end = offset == ~0ull ? offset : start + size;
+ u64 end = offset == ~0ull ? ~0ull : start + size;
if (flags & XE_BO_FLAG_STOLEN &&
xe_ttm_stolen_cpu_access_needs_ggtt(xe))
flags |= XE_BO_FLAG_GGTT;
- bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type,
- flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
- alignment);
+ bo = __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type,
+ flags | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED,
+ alignment, exec);
if (IS_ERR(bo))
return bo;
- err = xe_bo_pin(bo);
+ err = xe_bo_pin(bo, exec);
if (err)
goto err_put;
@@ -2169,11 +2572,100 @@ err_put:
return ERR_PTR(err);
}
+/**
+ * xe_bo_create_pin_map_at_novm() - Create pinned and mapped bo at optional VRAM offset
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @offset: Optional VRAM offset or %~0ull for don't care.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @alignment: GGTT alignment.
+ * @intr: Whether to execute any waits for backing store interruptible.
+ *
+ * Create a pinned and optionally mapped bo with VRAM offset and GGTT alignment
+ * options. The bo will be external and not associated with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
+ * to true on entry.
+ */
+struct xe_bo *
+xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 offset, enum ttm_bo_type type, u32 flags,
+ u64 alignment, bool intr)
+{
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct xe_bo *bo;
+ int ret = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = intr},
+ ret) {
+ bo = xe_bo_create_pin_map_at_aligned(xe, tile, NULL, size, offset,
+ type, flags, alignment, &exec);
+ if (IS_ERR(bo)) {
+ drm_exec_retry_on_contention(&exec);
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ }
+ }
+
+ return ret ? ERR_PTR(ret) : bo;
+}
+
+/**
+ * xe_bo_create_pin_map() - Create pinned and mapped bo
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * @vm: The vm to associate the buffer object with. The vm's resv must be locked
+ * with the transaction represented by @exec.
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @exec: The drm_exec transaction to use for exhaustive eviction, and
+ * previously used for locking @vm's resv.
+ *
+ * Create a pinned and mapped bo. The bo will be external and not associated
+ * with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @exec was
+ * configured for interruptible locking.
+ */
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags)
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec)
{
- return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags);
+ return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, ~0ull, type, flags,
+ 0, exec);
+}
+
+/**
+ * xe_bo_create_pin_map_novm() - Create pinned and mapped bo
+ * @xe: The xe device.
+ * @tile: The tile to select for migration of this bo, and the tile used for
+ * GGTT binding if any. Only to be non-NULL for ttm_bo_type_kernel bos.
+ * @size: The storage size to use for the bo.
+ * @type: The TTM buffer object type.
+ * @flags: XE_BO_FLAG_ flags.
+ * @intr: Whether to execute any waits for backing store interruptible.
+ *
+ * Create a pinned and mapped bo. The bo will be external and not associated
+ * with a VM.
+ *
+ * Return: The buffer object on success. Negative error pointer on failure.
+ * In particular, the function may return ERR_PTR(%-EINTR) if @intr was set
+ * to true on entry.
+ */
+struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, enum ttm_bo_type type, u32 flags,
+ bool intr)
+{
+ return xe_bo_create_pin_map_at_novm(xe, tile, size, ~0ull, type, flags, 0, intr);
}
static void __xe_bo_unpin_map_no_vm(void *arg)
@@ -2188,8 +2680,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
int ret;
KUNIT_STATIC_STUB_REDIRECT(xe_managed_bo_create_pin_map, xe, tile, size, flags);
-
- bo = xe_bo_create_pin_map(xe, tile, NULL, size, ttm_bo_type_kernel, flags);
+ bo = xe_bo_create_pin_map_novm(xe, tile, size, ttm_bo_type_kernel, flags, true);
if (IS_ERR(bo))
return bo;
@@ -2200,6 +2691,11 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
return bo;
}
+void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo)
+{
+ devm_release_action(xe_bo_device(bo)->drm.dev, __xe_bo_unpin_map_no_vm, bo);
+}
+
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags)
{
@@ -2272,6 +2768,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
* xe_bo_pin_external - pin an external BO
* @bo: buffer object to be pinned
* @in_place: Pin in current placement, don't attempt to migrate.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Pin an external (not tied to a VM, can be exported via dma-buf / prime FD)
* BO. Unique call compared to xe_bo_pin as this function has it own set of
@@ -2279,7 +2776,7 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res)
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
+int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec)
{
struct xe_device *xe = xe_bo_device(bo);
int err;
@@ -2289,7 +2786,7 @@ int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
if (!xe_bo_is_pinned(bo)) {
if (!in_place) {
- err = xe_bo_validate(bo, NULL, false);
+ err = xe_bo_validate(bo, NULL, false, exec);
if (err)
return err;
}
@@ -2312,7 +2809,17 @@ int xe_bo_pin_external(struct xe_bo *bo, bool in_place)
return 0;
}
-int xe_bo_pin(struct xe_bo *bo)
+/**
+ * xe_bo_pin() - Pin a kernel bo after potentially migrating it
+ * @bo: The kernel bo to pin.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
+ *
+ * Attempts to migrate a bo to @bo->placement. If that succeeds,
+ * pins the bo.
+ *
+ * Return: %0 on success, negative error code on migration failure.
+ */
+int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec)
{
struct ttm_place *place = &bo->placements[0];
struct xe_device *xe = xe_bo_device(bo);
@@ -2334,7 +2841,7 @@ int xe_bo_pin(struct xe_bo *bo)
/* We only expect at most 1 pin */
xe_assert(xe, !xe_bo_is_pinned(bo));
- err = xe_bo_validate(bo, NULL, false);
+ err = xe_bo_validate(bo, NULL, false, exec);
if (err)
return err;
@@ -2427,6 +2934,7 @@ void xe_bo_unpin(struct xe_bo *bo)
* NULL. Used together with @allow_res_evict.
* @allow_res_evict: Whether it's allowed to evict bos sharing @vm's
* reservation object.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Make sure the bo is in allowed placement, migrating it if necessary. If
* needed, other bos will be evicted. If bos selected for eviction shares
@@ -2436,7 +2944,8 @@ void xe_bo_unpin(struct xe_bo *bo)
* Return: 0 on success, negative error code on failure. May return
* -EINTR or -ERESTARTSYS if internal waits are interrupted by a signal.
*/
-int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
+int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
+ struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = true,
@@ -2458,6 +2967,7 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict)
xe_vm_set_validating(vm, allow_res_evict);
trace_xe_bo_validate(bo);
+ xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx);
xe_vm_clear_validating(vm, allow_res_evict);
@@ -2653,8 +3163,9 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_gem_create *args = data;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_vm *vm = NULL;
- ktime_t end = 0;
struct xe_bo *bo;
unsigned int bo_flags;
u32 handle;
@@ -2728,25 +3239,26 @@ int xe_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
-retry:
- if (vm) {
- err = xe_vm_lock(vm, true);
- if (err)
- goto out_vm;
+ err = 0;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ if (vm) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+ }
+ bo = xe_bo_create_user(xe, vm, args->size, args->cpu_caching,
+ bo_flags, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
}
-
- bo = xe_bo_create_user(xe, NULL, vm, args->size, args->cpu_caching,
- bo_flags);
-
- if (vm)
- xe_vm_unlock(vm);
-
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &end))
- goto retry;
+ if (err)
goto out_vm;
- }
if (args->extensions) {
err = gem_create_user_extensions(xe, bo, args->extensions, 0);
@@ -2895,6 +3407,9 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
* xe_bo_migrate - Migrate an object to the desired region id
* @bo: The buffer object to migrate.
* @mem_type: The TTM region type to migrate to.
+ * @tctx: A pointer to a struct ttm_operation_ctx or NULL if
+ * a default interruptibe ctx is to be used.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Attempt to migrate the buffer object to the desired memory region. The
* buffer object may not be pinned, and must be locked.
@@ -2906,7 +3421,8 @@ static void xe_place_from_ttm_type(u32 mem_type, struct ttm_place *place)
* Return: 0 on success. Negative error code on failure. In particular may
* return -EINTR or -ERESTARTSYS if signal pending.
*/
-int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
+int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *tctx,
+ struct drm_exec *exec)
{
struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
struct ttm_operation_ctx ctx = {
@@ -2918,6 +3434,7 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
struct ttm_place requested;
xe_bo_assert_held(bo);
+ tctx = tctx ? tctx : &ctx;
if (bo->ttm.resource->mem_type == mem_type)
return 0;
@@ -2944,19 +3461,22 @@ int xe_bo_migrate(struct xe_bo *bo, u32 mem_type)
add_vram(xe, bo, &requested, bo->flags, mem_type, &c);
}
- return ttm_bo_validate(&bo->ttm, &placement, &ctx);
+ if (!tctx->no_wait_gpu)
+ xe_validation_assert_exec(xe_bo_device(bo), exec, &bo->ttm.base);
+ return ttm_bo_validate(&bo->ttm, &placement, tctx);
}
/**
* xe_bo_evict - Evict an object to evict placement
* @bo: The buffer object to migrate.
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* On successful completion, the object memory will be moved to evict
* placement. This function blocks until the object has been fully moved.
*
* Return: 0 on success. Negative error code on failure.
*/
-int xe_bo_evict(struct xe_bo *bo)
+int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
@@ -3107,20 +3627,19 @@ int xe_bo_dumb_create(struct drm_file *file_priv,
struct xe_device *xe = to_xe_device(dev);
struct xe_bo *bo;
uint32_t handle;
- int cpp = DIV_ROUND_UP(args->bpp, 8);
int err;
u32 page_size = max_t(u32, PAGE_SIZE,
xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K);
- args->pitch = ALIGN(args->width * cpp, 64);
- args->size = ALIGN(mul_u32_u32(args->pitch, args->height),
- page_size);
+ err = drm_mode_size_dumb(dev, args, SZ_64, page_size);
+ if (err)
+ return err;
- bo = xe_bo_create_user(xe, NULL, NULL, args->size,
+ bo = xe_bo_create_user(xe, NULL, args->size,
DRM_XE_GEM_CPU_CACHING_WC,
XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) |
XE_BO_FLAG_SCANOUT |
- XE_BO_FLAG_NEEDS_CPU_ACCESS);
+ XE_BO_FLAG_NEEDS_CPU_ACCESS, NULL);
if (IS_ERR(bo))
return PTR_ERR(bo);
diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h
index 9ce94d252015..911d5b90461a 100644
--- a/drivers/gpu/drm/xe/xe_bo.h
+++ b/drivers/gpu/drm/xe/xe_bo.h
@@ -10,8 +10,10 @@
#include "xe_bo_types.h"
#include "xe_macros.h"
+#include "xe_validation.h"
#include "xe_vm_types.h"
#include "xe_vm.h"
+#include "xe_vram_types.h"
#define XE_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
@@ -23,8 +25,9 @@
#define XE_BO_FLAG_VRAM_MASK (XE_BO_FLAG_VRAM0 | XE_BO_FLAG_VRAM1)
/* -- */
#define XE_BO_FLAG_STOLEN BIT(4)
+#define XE_BO_FLAG_VRAM(vram) (XE_BO_FLAG_VRAM0 << ((vram)->id))
#define XE_BO_FLAG_VRAM_IF_DGFX(tile) (IS_DGFX(tile_to_xe(tile)) ? \
- XE_BO_FLAG_VRAM0 << (tile)->id : \
+ XE_BO_FLAG_VRAM((tile)->mem.vram) : \
XE_BO_FLAG_SYSTEM)
#define XE_BO_FLAG_GGTT BIT(5)
#define XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE BIT(6)
@@ -46,6 +49,7 @@
#define XE_BO_FLAG_GGTT2 BIT(22)
#define XE_BO_FLAG_GGTT3 BIT(23)
#define XE_BO_FLAG_CPU_ADDR_MIRROR BIT(24)
+#define XE_BO_FLAG_FORCE_USER_VRAM BIT(25)
/* this one is trigger internally only */
#define XE_BO_FLAG_INTERNAL_TEST BIT(30)
@@ -86,46 +90,40 @@ struct sg_table;
struct xe_bo *xe_bo_alloc(void);
void xe_bo_free(struct xe_bo *bo);
-struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
- struct xe_tile *tile, struct dma_resv *resv,
- struct ttm_lru_bulk_move *bulk, size_t size,
- u16 cpu_caching, enum ttm_bo_type type,
- u32 flags);
-struct xe_bo *
-xe_bo_create_locked_range(struct xe_device *xe,
- struct xe_tile *tile, struct xe_vm *vm,
- size_t size, u64 start, u64 end,
- enum ttm_bo_type type, u32 flags, u64 alignment);
+struct xe_bo *xe_bo_init_locked(struct xe_device *xe, struct xe_bo *bo,
+ struct xe_tile *tile, struct dma_resv *resv,
+ struct ttm_lru_bulk_move *bulk, size_t size,
+ u16 cpu_caching, enum ttm_bo_type type,
+ u32 flags, struct drm_exec *exec);
struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size,
- u16 cpu_caching,
- u32 flags);
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec);
+struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_vm *vm, size_t size,
+ u16 cpu_caching, u32 flags, struct drm_exec *exec);
struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
struct xe_vm *vm, size_t size,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm, size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags);
-struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe,
- struct xe_tile *tile,
- struct xe_vm *vm,
- size_t size, u64 offset,
- enum ttm_bo_type type, u32 flags,
- u64 alignment);
+ enum ttm_bo_type type, u32 flags,
+ struct drm_exec *exec);
+struct xe_bo *xe_bo_create_pin_map_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, enum ttm_bo_type type, u32 flags,
+ bool intr);
+struct xe_bo *xe_bo_create_pin_range_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 start, u64 end,
+ enum ttm_bo_type type, u32 flags);
+struct xe_bo *
+xe_bo_create_pin_map_at_novm(struct xe_device *xe, struct xe_tile *tile,
+ size_t size, u64 offset, enum ttm_bo_type type,
+ u32 flags, u64 alignment, bool intr);
struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
size_t size, u32 flags);
+void xe_managed_bo_unpin_map_no_vm(struct xe_bo *bo);
struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
const void *data, size_t size, u32 flags);
int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, struct xe_bo **src);
int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
- u32 bo_flags);
+ u32 bo_flags, enum ttm_bo_type type);
static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
{
@@ -198,11 +196,12 @@ static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
}
}
-int xe_bo_pin_external(struct xe_bo *bo, bool in_place);
-int xe_bo_pin(struct xe_bo *bo);
+int xe_bo_pin_external(struct xe_bo *bo, bool in_place, struct drm_exec *exec);
+int xe_bo_pin(struct xe_bo *bo, struct drm_exec *exec);
void xe_bo_unpin_external(struct xe_bo *bo);
void xe_bo_unpin(struct xe_bo *bo);
-int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
+int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict,
+ struct drm_exec *exec);
static inline bool xe_bo_is_pinned(struct xe_bo *bo)
{
@@ -275,6 +274,7 @@ int xe_bo_read(struct xe_bo *bo, u64 offset, void *dst, int size);
bool mem_type_is_vram(u32 mem_type);
bool xe_bo_is_vram(struct xe_bo *bo);
+bool xe_bo_is_visible_vram(struct xe_bo *bo);
bool xe_bo_is_stolen(struct xe_bo *bo);
bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
bool xe_bo_is_vm_bound(struct xe_bo *bo);
@@ -283,8 +283,9 @@ uint64_t vram_region_gpu_offset(struct ttm_resource *res);
bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
-int xe_bo_evict(struct xe_bo *bo);
+int xe_bo_migrate(struct xe_bo *bo, u32 mem_type, struct ttm_operation_ctx *ctc,
+ struct drm_exec *exec);
+int xe_bo_evict(struct xe_bo *bo, struct drm_exec *exec);
int xe_bo_evict_pinned(struct xe_bo *bo);
int xe_bo_notifier_prepare_pinned(struct xe_bo *bo);
@@ -313,6 +314,21 @@ static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
return PAGE_ALIGN(xe_bo_size(bo));
}
+/**
+ * xe_bo_has_valid_ccs_bb - Check if CCS's BBs were setup for the BO.
+ * @bo: the &xe_bo to check
+ *
+ * The CCS's BBs should only be setup by the driver VF, but it is safe
+ * to call this function also by non-VF driver.
+ *
+ * Return: true iff the CCS's BBs are setup, false otherwise.
+ */
+static inline bool xe_bo_has_valid_ccs_bb(struct xe_bo *bo)
+{
+ return bo->bb_ccs[XE_SRIOV_VF_CCS_READ_CTX] &&
+ bo->bb_ccs[XE_SRIOV_VF_CCS_WRITE_CTX];
+}
+
static inline bool xe_bo_has_pages(struct xe_bo *bo)
{
if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
diff --git a/drivers/gpu/drm/xe/xe_bo_doc.h b/drivers/gpu/drm/xe/xe_bo_doc.h
index 25a884c64bf1..401e7dd26ef3 100644
--- a/drivers/gpu/drm/xe/xe_bo_doc.h
+++ b/drivers/gpu/drm/xe/xe_bo_doc.h
@@ -12,7 +12,7 @@
* BO management
* =============
*
- * TTM manages (placement, eviction, etc...) all BOs in XE.
+ * TTM manages (placement, eviction, etc...) all BOs in Xe.
*
* BO creation
* ===========
@@ -29,7 +29,7 @@
* a kernel BO (e.g. engine state, memory for page tables, etc...). These BOs
* are typically mapped in the GGTT (any kernel BOs aside memory for page tables
* are in the GGTT), are pinned (can't move or be evicted at runtime), have a
- * vmap (XE can access the memory via xe_map layer) and have contiguous physical
+ * vmap (Xe can access the memory via xe_map layer) and have contiguous physical
* memory.
*
* More details of why kernel BOs are pinned and contiguous below.
@@ -40,7 +40,7 @@
* A user BO is created via the DRM_IOCTL_XE_GEM_CREATE IOCTL. Once it is
* created the BO can be mmap'd (via DRM_IOCTL_XE_GEM_MMAP_OFFSET) for user
* access and it can be bound for GPU access (via DRM_IOCTL_XE_VM_BIND). All
- * user BOs are evictable and user BOs are never pinned by XE. The allocation of
+ * user BOs are evictable and user BOs are never pinned by Xe. The allocation of
* the backing store can be deferred from creation time until first use which is
* either mmap, bind, or pagefault.
*
@@ -84,7 +84,7 @@
* ====================
*
* All eviction (or in other words, moving a BO from one memory location to
- * another) is routed through TTM with a callback into XE.
+ * another) is routed through TTM with a callback into Xe.
*
* Runtime eviction
* ----------------
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index 7484ce55a303..7661fca7f278 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -73,6 +73,11 @@ int xe_bo_notifier_prepare_all_pinned(struct xe_device *xe)
&xe->pinned.late.kernel_bo_present,
xe_bo_notifier_prepare_pinned);
+ if (!ret)
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_notifier_prepare_pinned);
+
return ret;
}
@@ -93,6 +98,10 @@ void xe_bo_notifier_unprepare_all_pinned(struct xe_device *xe)
(void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
&xe->pinned.late.kernel_bo_present,
xe_bo_notifier_unprepare_pinned);
+
+ (void)xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external,
+ xe_bo_notifier_unprepare_pinned);
}
/**
@@ -158,8 +167,8 @@ int xe_bo_evict_all(struct xe_device *xe)
if (ret)
return ret;
- ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
- &xe->pinned.late.evicted, xe_bo_evict_pinned);
+ ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.external,
+ &xe->pinned.late.external, xe_bo_evict_pinned);
if (!ret)
ret = xe_bo_apply_to_pinned(xe, &xe->pinned.late.kernel_bo_present,
@@ -182,7 +191,6 @@ int xe_bo_evict_all(struct xe_device *xe)
static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
{
- struct xe_device *xe = xe_bo_device(bo);
int ret;
ret = xe_bo_restore_pinned(bo);
@@ -201,13 +209,6 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
}
}
- /*
- * We expect validate to trigger a move VRAM and our move code
- * should setup the iosys map.
- */
- xe_assert(xe, !(bo->flags & XE_BO_FLAG_PINNED_LATE_RESTORE) ||
- !iosys_map_is_null(&bo->vmap));
-
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h
index ff560d82496f..d4fe3c8dca5b 100644
--- a/drivers/gpu/drm/xe/xe_bo_types.h
+++ b/drivers/gpu/drm/xe/xe_bo_types.h
@@ -9,6 +9,7 @@
#include <linux/iosys-map.h>
#include <drm/drm_gpusvm.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
@@ -24,7 +25,9 @@ struct xe_vm;
/* TODO: To be selected with VM_MADVISE */
#define XE_BO_PRIORITY_NORMAL 1
-/** @xe_bo: XE buffer object */
+/**
+ * struct xe_bo - Xe buffer object
+ */
struct xe_bo {
/** @ttm: TTM base buffer object */
struct ttm_buffer_object ttm;
@@ -46,7 +49,7 @@ struct xe_bo {
struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE];
/** @vmap: iosys map of this buffer */
struct iosys_map vmap;
- /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
+ /** @kmap: TTM bo kmap object for internal use only. Keep off. */
struct ttm_bo_kmap_obj kmap;
/** @pinned_link: link to present / evicted list of pinned BO */
struct list_head pinned_link;
@@ -60,6 +63,14 @@ struct xe_bo {
*/
struct list_head client_link;
#endif
+ /** @attr: User controlled attributes for bo */
+ struct {
+ /**
+ * @atomic_access: type of atomic access bo needs
+ * protected by bo dma-resv lock
+ */
+ u32 atomic_access;
+ } attr;
/**
* @pxp_key_instance: PXP key instance this BO was created against. A
* 0 in this variable indicates that the BO does not use PXP encryption.
@@ -73,9 +84,12 @@ struct xe_bo {
/** @created: Whether the bo has passed initial creation */
bool created;
- /** @ccs_cleared */
+ /** @ccs_cleared: true means that CCS region of BO is already cleared */
bool ccs_cleared;
+ /** @bb_ccs: BB instructions of CCS read/write. Valid only for VF */
+ struct xe_bb *bb_ccs[XE_SRIOV_VF_CCS_CTX_COUNT];
+
/**
* @cpu_caching: CPU caching mode. Currently only used for userspace
* objects. Exceptions are system memory on DGFX, which is always
@@ -87,9 +101,10 @@ struct xe_bo {
struct drm_pagemap_devmem devmem_allocation;
/** @vram_userfault_link: Link into @mem_access.vram_userfault.list */
- struct list_head vram_userfault_link;
+ struct list_head vram_userfault_link;
- /** @min_align: minimum alignment needed for this BO if different
+ /**
+ * @min_align: minimum alignment needed for this BO if different
* from default
*/
u64 min_align;
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index e9b46a2d0019..9f6251b1008b 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -4,42 +4,70 @@
*/
#include <linux/bitops.h>
+#include <linux/ctype.h>
#include <linux/configfs.h>
+#include <linux/cleanup.h>
#include <linux/find.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/string.h>
+#include "instructions/xe_mi_commands.h"
#include "xe_configfs.h"
-#include "xe_module.h"
-
+#include "xe_gt_types.h"
#include "xe_hw_engine_types.h"
+#include "xe_module.h"
+#include "xe_pci_types.h"
+#include "xe_sriov_types.h"
/**
* DOC: Xe Configfs
*
* Overview
- * =========
+ * ========
*
- * Configfs is a filesystem-based manager of kernel objects. XE KMD registers a
- * configfs subsystem called ``'xe'`` that creates a directory in the mounted configfs directory
- * The user can create devices under this directory and configure them as necessary
- * See Documentation/filesystems/configfs.rst for more information about how configfs works.
+ * Configfs is a filesystem-based manager of kernel objects. Xe KMD registers a
+ * configfs subsystem called ``xe`` that creates a directory in the mounted
+ * configfs directory. The user can create devices under this directory and
+ * configure them as necessary. See Documentation/filesystems/configfs.rst for
+ * more information about how configfs works.
*
* Create devices
- * ===============
+ * ==============
+ *
+ * To create a device, the ``xe`` module should already be loaded, but some
+ * attributes can only be set before binding the device. It can be accomplished
+ * by blocking the driver autoprobe::
+ *
+ * # echo 0 > /sys/bus/pci/drivers_autoprobe
+ * # modprobe xe
*
- * In order to create a device, the user has to create a directory inside ``'xe'``::
+ * In order to create a device, the user has to create a directory inside ``xe``::
*
- * mkdir /sys/kernel/config/xe/0000:03:00.0/
+ * # mkdir /sys/kernel/config/xe/0000:03:00.0/
*
* Every device created is populated by the driver with entries that can be
* used to configure it::
*
* /sys/kernel/config/xe/
- * .. 0000:03:00.0/
- * ... survivability_mode
+ * ├── 0000:00:02.0
+ * │   └── ...
+ * ├── 0000:00:02.1
+ * │   └── ...
+ * :
+ * └── 0000:03:00.0
+ * ├── survivability_mode
+ * ├── gt_types_allowed
+ * ├── engines_allowed
+ * └── enable_psmi
+ *
+ * After configuring the attributes as per next section, the device can be
+ * probed with::
+ *
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind
+ * # # or
+ * # echo 0000:03:00.0 > /sys/bus/pci/drivers_probe
*
* Configure Attributes
* ====================
@@ -51,7 +79,46 @@
* effect when probing the device. Example to enable it::
*
* # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
- * # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
+ *
+ * This attribute can only be set before binding to the device.
+ *
+ * Allowed GT types:
+ * -----------------
+ *
+ * Allow only specific types of GTs to be detected and initialized by the
+ * driver. Any combination of GT types can be enabled/disabled, although
+ * some settings will cause the device to fail to probe.
+ *
+ * Writes support both comma- and newline-separated input format. Reads
+ * will always return one GT type per line. "primary" and "media" are the
+ * GT type names supported by this interface.
+ *
+ * This attribute can only be set before binding to the device.
+ *
+ * Examples:
+ *
+ * Allow both primary and media GTs to be initialized and used. This matches
+ * the driver's default behavior::
+ *
+ * # echo 'primary,media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
+ *
+ * Allow only the primary GT of each tile to be initialized and used,
+ * effectively disabling the media GT if it exists on the platform::
+ *
+ * # echo 'primary' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
+ *
+ * Allow only the media GT of each tile to be initialized and used,
+ * effectively disabling the primary GT. **This configuration will cause
+ * device probe failure on all current platforms, but may be allowed on
+ * igpu platforms in the future**::
+ *
+ * # echo 'media' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
+ *
+ * Disable all GTs. Only other GPU IP (such as display) is potentially usable.
+ * **This configuration will cause device probe failure on all current
+ * platforms, but may be allowed on igpu platforms in the future**::
+ *
+ * # echo '' > /sys/kernel/config/xe/0000:03:00.0/gt_types_allowed
*
* Allowed engines:
* ----------------
@@ -77,27 +144,158 @@
* available for migrations, but it's disabled. This is intended for debugging
* purposes only.
*
+ * This attribute can only be set before binding to the device.
+ *
+ * PSMI
+ * ----
+ *
+ * Enable extra debugging capabilities to trace engine execution. Only useful
+ * during early platform enabling and requires additional hardware connected.
+ * Once it's enabled, additionals WAs are added and runtime configuration is
+ * done via debugfs. Example to enable it::
+ *
+ * # echo 1 > /sys/kernel/config/xe/0000:03:00.0/enable_psmi
+ *
+ * This attribute can only be set before binding to the device.
+ *
+ * Context restore BB
+ * ------------------
+ *
+ * Allow to execute a batch buffer during any context switches. When the
+ * GPU is restoring the context, it executes additional commands. It's useful
+ * for testing additional workarounds and validating certain HW behaviors: it's
+ * not intended for normal execution and will taint the kernel with TAINT_TEST
+ * when used.
+ *
+ * The syntax allows to pass straight instructions to be executed by the engine
+ * in a batch buffer or set specific registers.
+ *
+ * #. Generic instruction::
+ *
+ * <engine-class> cmd <instr> [[dword0] [dword1] [...]]
+ *
+ * #. Simple register setting::
+ *
+ * <engine-class> reg <address> <value>
+ *
+ * Commands are saved per engine class: all instances of that class will execute
+ * those commands during context switch. The instruction, dword arguments,
+ * addresses and values are in hex format like in the examples below.
+ *
+ * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 after the
+ * normal context restore::
+ *
+ * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \
+ * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb
+ *
+ * #. Execute a LRI command to write 0xDEADBEEF to register 0x4f10 at the
+ * beginning of the context restore::
+ *
+ * # echo 'rcs cmd 11000001 4F100 DEADBEEF' \
+ * > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_mid_bb
+
+ * #. Load certain values in a couple of registers (it can be used as a simpler
+ * alternative to the `cmd`) action::
+ *
+ * # cat > /sys/kernel/config/xe/0000:03:00.0/ctx_restore_post_bb <<EOF
+ * rcs reg 4F100 DEADBEEF
+ * rcs reg 4F104 FFFFFFFF
+ * EOF
+ *
+ * .. note::
+ *
+ * When using multiple lines, make sure to use a command that is
+ * implemented with a single write syscall, like HEREDOC.
+ *
+ * Currently this is implemented only for post and mid context restore and
+ * these attributes can only be set before binding to the device.
+ *
+ * Max SR-IOV Virtual Functions
+ * ----------------------------
+ *
+ * This config allows to limit number of the Virtual Functions (VFs) that can
+ * be managed by the Physical Function (PF) driver, where value 0 disables the
+ * PF mode (no VFs).
+ *
+ * The default max_vfs config value is taken from the max_vfs modparam.
+ *
+ * How to enable PF with support with unlimited (up to HW limit) number of VFs::
+ *
+ * # echo unlimited > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
+ * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
+ *
+ * How to enable PF with support up to 3 VFs::
+ *
+ * # echo 3 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
+ * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
+ *
+ * How to disable PF mode and always run as native::
+ *
+ * # echo 0 > /sys/kernel/config/xe/0000:00:02.0/sriov/max_vfs
+ * # echo 0000:00:02.0 > /sys/bus/pci/drivers/xe/bind
+ *
+ * This setting only takes effect when probing the device.
+ *
* Remove devices
* ==============
*
* The created device directories can be removed using ``rmdir``::
*
- * rmdir /sys/kernel/config/xe/0000:03:00.0/
+ * # rmdir /sys/kernel/config/xe/0000:03:00.0/
*/
-struct xe_config_device {
- struct config_group group;
+/* Similar to struct xe_bb, but not tied to HW (yet) */
+struct wa_bb {
+ u32 *cs;
+ u32 len; /* in dwords */
+};
- bool survivability_mode;
- u64 engines_allowed;
+struct xe_config_group_device {
+ struct config_group group;
+ struct config_group sriov;
+
+ struct xe_config_device {
+ u64 gt_types_allowed;
+ u64 engines_allowed;
+ struct wa_bb ctx_restore_post_bb[XE_ENGINE_CLASS_MAX];
+ struct wa_bb ctx_restore_mid_bb[XE_ENGINE_CLASS_MAX];
+ bool survivability_mode;
+ bool enable_psmi;
+ struct {
+ unsigned int max_vfs;
+ } sriov;
+ } config;
/* protects attributes */
struct mutex lock;
+ /* matching descriptor */
+ const struct xe_device_desc *desc;
+ /* tentative SR-IOV mode */
+ enum xe_sriov_mode mode;
};
+static const struct xe_config_device device_defaults = {
+ .gt_types_allowed = U64_MAX,
+ .engines_allowed = U64_MAX,
+ .survivability_mode = false,
+ .enable_psmi = false,
+ .sriov = {
+ .max_vfs = UINT_MAX,
+ },
+};
+
+static void set_device_defaults(struct xe_config_device *config)
+{
+ *config = device_defaults;
+#ifdef CONFIG_PCI_IOV
+ config->sriov.max_vfs = xe_modparam.max_vfs;
+#endif
+}
+
struct engine_info {
const char *cls;
u64 mask;
+ enum xe_engine_class engine_class;
};
/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
@@ -105,17 +303,56 @@ struct engine_info {
#define MAX_ENGINE_INSTANCE_CHARS 2
static const struct engine_info engine_info[] = {
- { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK },
- { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK },
- { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK },
- { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK },
- { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK },
- { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK },
+ { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK, .engine_class = XE_ENGINE_CLASS_RENDER },
+ { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK, .engine_class = XE_ENGINE_CLASS_COPY },
+ { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_DECODE },
+ { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK, .engine_class = XE_ENGINE_CLASS_VIDEO_ENHANCE },
+ { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK, .engine_class = XE_ENGINE_CLASS_COMPUTE },
+ { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK, .engine_class = XE_ENGINE_CLASS_OTHER },
};
+static const struct {
+ const char *name;
+ enum xe_gt_type type;
+} gt_types[] = {
+ { .name = "primary", .type = XE_GT_TYPE_MAIN },
+ { .name = "media", .type = XE_GT_TYPE_MEDIA },
+};
+
+static struct xe_config_group_device *to_xe_config_group_device(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct xe_config_group_device, group);
+}
+
static struct xe_config_device *to_xe_config_device(struct config_item *item)
{
- return container_of(to_config_group(item), struct xe_config_device, group);
+ return &to_xe_config_group_device(item)->config;
+}
+
+static bool is_bound(struct xe_config_group_device *dev)
+{
+ unsigned int domain, bus, slot, function;
+ struct pci_dev *pdev;
+ const char *name;
+ bool ret;
+
+ lockdep_assert_held(&dev->lock);
+
+ name = dev->group.cg_item.ci_name;
+ if (sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function) != 4)
+ return false;
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ if (!pdev)
+ return false;
+
+ ret = pci_get_drvdata(pdev);
+ pci_dev_put(pdev);
+
+ if (ret)
+ pci_dbg(pdev, "Already bound to driver\n");
+
+ return ret;
}
static ssize_t survivability_mode_show(struct config_item *item, char *page)
@@ -127,7 +364,7 @@ static ssize_t survivability_mode_show(struct config_item *item, char *page)
static ssize_t survivability_mode_store(struct config_item *item, const char *page, size_t len)
{
- struct xe_config_device *dev = to_xe_config_device(item);
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
bool survivability_mode;
int ret;
@@ -135,9 +372,62 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa
if (ret)
return ret;
- mutex_lock(&dev->lock);
- dev->survivability_mode = survivability_mode;
- mutex_unlock(&dev->lock);
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.survivability_mode = survivability_mode;
+
+ return len;
+}
+
+static ssize_t gt_types_allowed_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ char *p = page;
+
+ for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++)
+ if (dev->gt_types_allowed & BIT_ULL(gt_types[i].type))
+ p += sprintf(p, "%s\n", gt_types[i].name);
+
+ return p - page;
+}
+
+static ssize_t gt_types_allowed_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+ char *buf __free(kfree) = kstrdup(page, GFP_KERNEL);
+ char *p = buf;
+ u64 typemask = 0;
+
+ if (!buf)
+ return -ENOMEM;
+
+ while (p) {
+ char *typename = strsep(&p, ",\n");
+ bool matched = false;
+
+ if (typename[0] == '\0')
+ continue;
+
+ for (size_t i = 0; i < ARRAY_SIZE(gt_types); i++) {
+ if (strcmp(typename, gt_types[i].name) == 0) {
+ typemask |= BIT(gt_types[i].type);
+ matched = true;
+ break;
+ }
+ }
+
+ if (!matched)
+ return -EINVAL;
+ }
+
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.gt_types_allowed = typemask;
return len;
}
@@ -166,7 +456,18 @@ static ssize_t engines_allowed_show(struct config_item *item, char *page)
return p - page;
}
-static bool lookup_engine_mask(const char *pattern, u64 *mask)
+/*
+ * Lookup engine_info. If @mask is not NULL, reduce the mask according to the
+ * instance in @pattern.
+ *
+ * Examples of inputs:
+ * - lookup_engine_info("rcs0", &mask): return "rcs" entry from @engine_info and
+ * mask == BIT_ULL(XE_HW_ENGINE_RCS0)
+ * - lookup_engine_info("rcs*", &mask): return "rcs" entry from @engine_info and
+ * mask == XE_HW_ENGINE_RCS_MASK
+ * - lookup_engine_info("rcs", NULL): return "rcs" entry from @engine_info
+ */
+static const struct engine_info *lookup_engine_info(const char *pattern, u64 *mask)
{
for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
u8 instance;
@@ -176,70 +477,361 @@ static bool lookup_engine_mask(const char *pattern, u64 *mask)
continue;
pattern += strlen(engine_info[i].cls);
+ if (!mask)
+ return *pattern ? NULL : &engine_info[i];
if (!strcmp(pattern, "*")) {
*mask = engine_info[i].mask;
- return true;
+ return &engine_info[i];
}
if (kstrtou8(pattern, 10, &instance))
- return false;
+ return NULL;
bit = __ffs64(engine_info[i].mask) + instance;
if (bit >= fls64(engine_info[i].mask))
- return false;
+ return NULL;
*mask = BIT_ULL(bit);
- return true;
+ return &engine_info[i];
}
- return false;
+ return NULL;
+}
+
+static int parse_engine(const char *s, const char *end_chars, u64 *mask,
+ const struct engine_info **pinfo)
+{
+ char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
+ const struct engine_info *info;
+ size_t len;
+
+ len = strcspn(s, end_chars);
+ if (len >= sizeof(buf))
+ return -EINVAL;
+
+ memcpy(buf, s, len);
+ buf[len] = '\0';
+
+ info = lookup_engine_info(buf, mask);
+ if (!info)
+ return -ENOENT;
+
+ if (pinfo)
+ *pinfo = info;
+
+ return len;
}
static ssize_t engines_allowed_store(struct config_item *item, const char *page,
size_t len)
{
- struct xe_config_device *dev = to_xe_config_device(item);
- size_t patternlen, p;
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+ ssize_t patternlen, p;
u64 mask, val = 0;
for (p = 0; p < len; p += patternlen + 1) {
- char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
-
- patternlen = strcspn(page + p, ",\n");
- if (patternlen >= sizeof(buf))
+ patternlen = parse_engine(page + p, ",\n", &mask, NULL);
+ if (patternlen < 0)
return -EINVAL;
- memcpy(buf, page + p, patternlen);
- buf[patternlen] = '\0';
+ val |= mask;
+ }
+
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.engines_allowed = val;
+
+ return len;
+}
+
+static ssize_t enable_psmi_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+
+ return sprintf(page, "%d\n", dev->enable_psmi);
+}
+
+static ssize_t enable_psmi_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+ bool val;
+ int ret;
+
+ ret = kstrtobool(page, &val);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&dev->lock);
+ if (is_bound(dev))
+ return -EBUSY;
+
+ dev->config.enable_psmi = val;
+
+ return len;
+}
+
+static bool wa_bb_read_advance(bool dereference, char **p,
+ const char *append, size_t len,
+ size_t *max_size)
+{
+ if (dereference) {
+ if (len >= *max_size)
+ return false;
+ *max_size -= len;
+ if (append)
+ memcpy(*p, append, len);
+ }
+
+ *p += len;
+
+ return true;
+}
- if (!lookup_engine_mask(buf, &mask))
+static ssize_t wa_bb_show(struct xe_config_group_device *dev,
+ struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
+ char *data, size_t sz)
+{
+ char *p = data;
+
+ guard(mutex)(&dev->lock);
+
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ enum xe_engine_class ec = engine_info[i].engine_class;
+ size_t len;
+
+ if (!wa_bb[ec].len)
+ continue;
+
+ len = snprintf(p, sz, "%s:", engine_info[i].cls);
+ if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
+ return -ENOBUFS;
+
+ for (size_t j = 0; j < wa_bb[ec].len; j++) {
+ len = snprintf(p, sz, " %08x", wa_bb[ec].cs[j]);
+ if (!wa_bb_read_advance(data, &p, NULL, len, &sz))
+ return -ENOBUFS;
+ }
+
+ if (!wa_bb_read_advance(data, &p, "\n", 1, &sz))
+ return -ENOBUFS;
+ }
+
+ if (!wa_bb_read_advance(data, &p, "", 1, &sz))
+ return -ENOBUFS;
+
+ /* Reserve one more to match check for '\0' */
+ if (!data)
+ p++;
+
+ return p - data;
+}
+
+static ssize_t ctx_restore_mid_bb_show(struct config_item *item, char *page)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_show(dev, dev->config.ctx_restore_mid_bb, page, SZ_4K);
+}
+
+static ssize_t ctx_restore_post_bb_show(struct config_item *item, char *page)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_show(dev, dev->config.ctx_restore_post_bb, page, SZ_4K);
+}
+
+static void wa_bb_append(struct wa_bb *wa_bb, u32 val)
+{
+ if (wa_bb->cs)
+ wa_bb->cs[wa_bb->len] = val;
+
+ wa_bb->len++;
+}
+
+static ssize_t parse_hex(const char *line, u32 *pval)
+{
+ char numstr[12];
+ const char *p;
+ ssize_t numlen;
+
+ p = line + strspn(line, " \t");
+ if (!*p || *p == '\n')
+ return 0;
+
+ numlen = strcspn(p, " \t\n");
+ if (!numlen || numlen >= sizeof(numstr) - 1)
+ return -EINVAL;
+
+ memcpy(numstr, p, numlen);
+ numstr[numlen] = '\0';
+ p += numlen;
+
+ if (kstrtou32(numstr, 16, pval))
+ return -EINVAL;
+
+ return p - line;
+}
+
+/*
+ * Parse lines with the format
+ *
+ * <engine-class> cmd <u32> <u32...>
+ * <engine-class> reg <u32_addr> <u32_val>
+ *
+ * and optionally save them in @wa_bb[i].cs is non-NULL.
+ *
+ * Return the number of dwords parsed.
+ */
+static ssize_t parse_wa_bb_lines(const char *lines,
+ struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX])
+{
+ ssize_t dwords = 0, ret;
+ const char *p;
+
+ for (p = lines; *p; p++) {
+ const struct engine_info *info = NULL;
+ u32 val, val2;
+
+ /* Also allow empty lines */
+ p += strspn(p, " \t\n");
+ if (!*p)
+ break;
+
+ ret = parse_engine(p, " \t\n", NULL, &info);
+ if (ret < 0)
+ return ret;
+
+ p += ret;
+ p += strspn(p, " \t");
+
+ if (str_has_prefix(p, "cmd")) {
+ for (p += strlen("cmd"); *p;) {
+ ret = parse_hex(p, &val);
+ if (ret < 0)
+ return -EINVAL;
+ if (!ret)
+ break;
+
+ p += ret;
+ dwords++;
+ wa_bb_append(&wa_bb[info->engine_class], val);
+ }
+ } else if (str_has_prefix(p, "reg")) {
+ p += strlen("reg");
+ ret = parse_hex(p, &val);
+ if (ret <= 0)
+ return -EINVAL;
+
+ p += ret;
+ ret = parse_hex(p, &val2);
+ if (ret <= 0)
+ return -EINVAL;
+
+ p += ret;
+ dwords += 3;
+ wa_bb_append(&wa_bb[info->engine_class],
+ MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1));
+ wa_bb_append(&wa_bb[info->engine_class], val);
+ wa_bb_append(&wa_bb[info->engine_class], val2);
+ } else {
return -EINVAL;
+ }
+ }
- val |= mask;
+ return dwords;
+}
+
+static ssize_t wa_bb_store(struct wa_bb wa_bb[static XE_ENGINE_CLASS_MAX],
+ struct xe_config_group_device *dev,
+ const char *page, size_t len)
+{
+ /* tmp_wa_bb must match wa_bb's size */
+ struct wa_bb tmp_wa_bb[XE_ENGINE_CLASS_MAX] = { };
+ ssize_t count, class;
+ u32 *tmp;
+
+ /* 1. Count dwords - wa_bb[i].cs is NULL for all classes */
+ count = parse_wa_bb_lines(page, tmp_wa_bb);
+ if (count < 0)
+ return count;
+
+ guard(mutex)(&dev->lock);
+
+ if (is_bound(dev))
+ return -EBUSY;
+
+ /*
+ * 2. Allocate a u32 array and set the pointers to the right positions
+ * according to the length of each class' wa_bb
+ */
+ tmp = krealloc(wa_bb[0].cs, count * sizeof(u32), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (!count) {
+ memset(wa_bb, 0, sizeof(tmp_wa_bb));
+ return len;
+ }
+
+ for (class = 0, count = 0; class < XE_ENGINE_CLASS_MAX; ++class) {
+ tmp_wa_bb[class].cs = tmp + count;
+ count += tmp_wa_bb[class].len;
+ tmp_wa_bb[class].len = 0;
}
- mutex_lock(&dev->lock);
- dev->engines_allowed = val;
- mutex_unlock(&dev->lock);
+ /* 3. Parse wa_bb lines again, this time saving the values */
+ count = parse_wa_bb_lines(page, tmp_wa_bb);
+ if (count < 0)
+ return count;
+
+ memcpy(wa_bb, tmp_wa_bb, sizeof(tmp_wa_bb));
return len;
}
-CONFIGFS_ATTR(, survivability_mode);
+static ssize_t ctx_restore_mid_bb_store(struct config_item *item,
+ const char *data, size_t sz)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_store(dev->config.ctx_restore_mid_bb, dev, data, sz);
+}
+
+static ssize_t ctx_restore_post_bb_store(struct config_item *item,
+ const char *data, size_t sz)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ return wa_bb_store(dev->config.ctx_restore_post_bb, dev, data, sz);
+}
+
+CONFIGFS_ATTR(, ctx_restore_mid_bb);
+CONFIGFS_ATTR(, ctx_restore_post_bb);
+CONFIGFS_ATTR(, enable_psmi);
CONFIGFS_ATTR(, engines_allowed);
+CONFIGFS_ATTR(, gt_types_allowed);
+CONFIGFS_ATTR(, survivability_mode);
static struct configfs_attribute *xe_config_device_attrs[] = {
- &attr_survivability_mode,
+ &attr_ctx_restore_mid_bb,
+ &attr_ctx_restore_post_bb,
+ &attr_enable_psmi,
&attr_engines_allowed,
+ &attr_gt_types_allowed,
+ &attr_survivability_mode,
NULL,
};
static void xe_config_device_release(struct config_item *item)
{
- struct xe_config_device *dev = to_xe_config_device(item);
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
mutex_destroy(&dev->lock);
+
+ kfree(dev->config.ctx_restore_post_bb[0].cs);
kfree(dev);
}
@@ -247,49 +839,192 @@ static struct configfs_item_operations xe_config_device_ops = {
.release = xe_config_device_release,
};
+static bool xe_config_device_is_visible(struct config_item *item,
+ struct configfs_attribute *attr, int n)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item);
+
+ if (attr == &attr_survivability_mode) {
+ if (!dev->desc->is_dgfx || dev->desc->platform < XE_BATTLEMAGE)
+ return false;
+ }
+
+ return true;
+}
+
+static struct configfs_group_operations xe_config_device_group_ops = {
+ .is_visible = xe_config_device_is_visible,
+};
+
static const struct config_item_type xe_config_device_type = {
.ct_item_ops = &xe_config_device_ops,
+ .ct_group_ops = &xe_config_device_group_ops,
.ct_attrs = xe_config_device_attrs,
.ct_owner = THIS_MODULE,
};
+static ssize_t sriov_max_vfs_show(struct config_item *item, char *page)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
+
+ guard(mutex)(&dev->lock);
+
+ if (dev->config.sriov.max_vfs == UINT_MAX)
+ return sprintf(page, "%s\n", "unlimited");
+ else
+ return sprintf(page, "%u\n", dev->config.sriov.max_vfs);
+}
+
+static ssize_t sriov_max_vfs_store(struct config_item *item, const char *page, size_t len)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
+ unsigned int max_vfs;
+ int ret;
+
+ guard(mutex)(&dev->lock);
+
+ if (is_bound(dev))
+ return -EBUSY;
+
+ ret = kstrtouint(page, 0, &max_vfs);
+ if (ret) {
+ if (!sysfs_streq(page, "unlimited"))
+ return ret;
+ max_vfs = UINT_MAX;
+ }
+
+ dev->config.sriov.max_vfs = max_vfs;
+ return len;
+}
+
+CONFIGFS_ATTR(sriov_, max_vfs);
+
+static struct configfs_attribute *xe_config_sriov_attrs[] = {
+ &sriov_attr_max_vfs,
+ NULL,
+};
+
+static bool xe_config_sriov_is_visible(struct config_item *item,
+ struct configfs_attribute *attr, int n)
+{
+ struct xe_config_group_device *dev = to_xe_config_group_device(item->ci_parent);
+
+ if (attr == &sriov_attr_max_vfs && dev->mode != XE_SRIOV_MODE_PF)
+ return false;
+
+ return true;
+}
+
+static struct configfs_group_operations xe_config_sriov_group_ops = {
+ .is_visible = xe_config_sriov_is_visible,
+};
+
+static const struct config_item_type xe_config_sriov_type = {
+ .ct_owner = THIS_MODULE,
+ .ct_group_ops = &xe_config_sriov_group_ops,
+ .ct_attrs = xe_config_sriov_attrs,
+};
+
+static const struct xe_device_desc *xe_match_desc(struct pci_dev *pdev)
+{
+ struct device_driver *driver = driver_find("xe", &pci_bus_type);
+ struct pci_driver *drv = to_pci_driver(driver);
+ const struct pci_device_id *ids = drv ? drv->id_table : NULL;
+ const struct pci_device_id *found = pci_match_id(ids, pdev);
+
+ return found ? (const void *)found->driver_data : NULL;
+}
+
+static struct pci_dev *get_physfn_instead(struct pci_dev *virtfn)
+{
+ struct pci_dev *physfn = pci_physfn(virtfn);
+
+ pci_dev_get(physfn);
+ pci_dev_put(virtfn);
+ return physfn;
+}
+
static struct config_group *xe_config_make_device_group(struct config_group *group,
const char *name)
{
unsigned int domain, bus, slot, function;
- struct xe_config_device *dev;
+ struct xe_config_group_device *dev;
+ const struct xe_device_desc *match;
+ enum xe_sriov_mode mode;
struct pci_dev *pdev;
+ char canonical[16];
+ int vfnumber = 0;
int ret;
- ret = sscanf(name, "%04x:%02x:%02x.%x", &domain, &bus, &slot, &function);
+ ret = sscanf(name, "%x:%x:%x.%x", &domain, &bus, &slot, &function);
if (ret != 4)
return ERR_PTR(-EINVAL);
+ ret = scnprintf(canonical, sizeof(canonical), "%04x:%02x:%02x.%d", domain, bus,
+ PCI_SLOT(PCI_DEVFN(slot, function)),
+ PCI_FUNC(PCI_DEVFN(slot, function)));
+ if (ret != 12 || strcmp(name, canonical))
+ return ERR_PTR(-EINVAL);
+
pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function));
+ mode = pdev ? dev_is_pf(&pdev->dev) ?
+ XE_SRIOV_MODE_PF : XE_SRIOV_MODE_NONE : XE_SRIOV_MODE_VF;
+
+ if (!pdev && function)
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, 0));
+ if (!pdev && slot)
+ pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(0, 0));
if (!pdev)
return ERR_PTR(-ENODEV);
+
+ if (PCI_DEVFN(slot, function) != pdev->devfn) {
+ pdev = get_physfn_instead(pdev);
+ vfnumber = PCI_DEVFN(slot, function) - pdev->devfn;
+ if (!dev_is_pf(&pdev->dev) || vfnumber > pci_sriov_get_totalvfs(pdev)) {
+ pci_dev_put(pdev);
+ return ERR_PTR(-ENODEV);
+ }
+ }
+
+ match = xe_match_desc(pdev);
+ if (match && vfnumber && !match->has_sriov) {
+ pci_info(pdev, "xe driver does not support VFs on this device\n");
+ match = NULL;
+ } else if (!match) {
+ pci_info(pdev, "xe driver does not support configuration of this device\n");
+ }
+
pci_dev_put(pdev);
+ if (!match)
+ return ERR_PTR(-ENOENT);
+
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
- /* Default values */
- dev->engines_allowed = U64_MAX;
+ dev->desc = match;
+ dev->mode = match->has_sriov ? mode : XE_SRIOV_MODE_NONE;
+
+ set_device_defaults(&dev->config);
config_group_init_type_name(&dev->group, name, &xe_config_device_type);
+ if (dev->mode != XE_SRIOV_MODE_NONE) {
+ config_group_init_type_name(&dev->sriov, "sriov", &xe_config_sriov_type);
+ configfs_add_default_group(&dev->sriov, &dev->group);
+ }
mutex_init(&dev->lock);
return &dev->group;
}
-static struct configfs_group_operations xe_config_device_group_ops = {
+static struct configfs_group_operations xe_config_group_ops = {
.make_group = xe_config_make_device_group,
};
static const struct config_item_type xe_configfs_type = {
- .ct_group_ops = &xe_config_device_group_ops,
+ .ct_group_ops = &xe_config_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -302,110 +1037,255 @@ static struct configfs_subsystem xe_configfs = {
},
};
-static struct xe_config_device *configfs_find_group(struct pci_dev *pdev)
+static struct xe_config_group_device *find_xe_config_group_device(struct pci_dev *pdev)
{
struct config_item *item;
- char name[64];
-
- snprintf(name, sizeof(name), "%04x:%02x:%02x.%x", pci_domain_nr(pdev->bus),
- pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
mutex_lock(&xe_configfs.su_mutex);
- item = config_group_find_item(&xe_configfs.su_group, name);
+ item = config_group_find_item(&xe_configfs.su_group, pci_name(pdev));
mutex_unlock(&xe_configfs.su_mutex);
if (!item)
return NULL;
- return to_xe_config_device(item);
+ return to_xe_config_group_device(item);
+}
+
+static void dump_custom_dev_config(struct pci_dev *pdev,
+ struct xe_config_group_device *dev)
+{
+#define PRI_CUSTOM_ATTR(fmt_, attr_) do { \
+ if (dev->config.attr_ != device_defaults.attr_) \
+ pci_info(pdev, "configfs: " __stringify(attr_) " = " fmt_ "\n", \
+ dev->config.attr_); \
+ } while (0)
+
+ PRI_CUSTOM_ATTR("%llx", gt_types_allowed);
+ PRI_CUSTOM_ATTR("%llx", engines_allowed);
+ PRI_CUSTOM_ATTR("%d", enable_psmi);
+ PRI_CUSTOM_ATTR("%d", survivability_mode);
+
+#undef PRI_CUSTOM_ATTR
+}
+
+/**
+ * xe_configfs_check_device() - Test if device was configured by configfs
+ * @pdev: the &pci_dev device to test
+ *
+ * Try to find the configfs group that belongs to the specified pci device
+ * and print a diagnostic message if different than the default value.
+ */
+void xe_configfs_check_device(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+
+ if (!dev)
+ return;
+
+ /* memcmp here is safe as both are zero-initialized */
+ if (memcmp(&dev->config, &device_defaults, sizeof(dev->config))) {
+ pci_info(pdev, "Found custom settings in configfs\n");
+ dump_custom_dev_config(pdev, dev);
+ }
+
+ config_group_put(&dev->group);
}
/**
* xe_configfs_get_survivability_mode - get configfs survivability mode attribute
* @pdev: pci device
*
- * find the configfs group that belongs to the pci device and return
- * the survivability mode attribute
- *
- * Return: survivability mode if config group is found, false otherwise
+ * Return: survivability_mode attribute in configfs
*/
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
bool mode;
if (!dev)
- return false;
+ return device_defaults.survivability_mode;
- mode = dev->survivability_mode;
- config_item_put(&dev->group.cg_item);
+ mode = dev->config.survivability_mode;
+ config_group_put(&dev->group);
return mode;
}
+static u64 get_gt_types_allowed(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u64 mask;
+
+ if (!dev)
+ return device_defaults.gt_types_allowed;
+
+ mask = dev->config.gt_types_allowed;
+ config_group_put(&dev->group);
+
+ return mask;
+}
+
/**
- * xe_configfs_clear_survivability_mode - clear configfs survivability mode attribute
+ * xe_configfs_primary_gt_allowed - determine whether primary GTs are supported
* @pdev: pci device
*
- * find the configfs group that belongs to the pci device and clear survivability
- * mode attribute
+ * Return: True if primary GTs are enabled, false if they have been disabled via
+ * configfs.
*/
-void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
+bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
-
- if (!dev)
- return;
-
- mutex_lock(&dev->lock);
- dev->survivability_mode = 0;
- mutex_unlock(&dev->lock);
+ return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MAIN);
+}
- config_item_put(&dev->group.cg_item);
+/**
+ * xe_configfs_media_gt_allowed - determine whether media GTs are supported
+ * @pdev: pci device
+ *
+ * Return: True if the media GTs are enabled, false if they have been disabled
+ * via configfs.
+ */
+bool xe_configfs_media_gt_allowed(struct pci_dev *pdev)
+{
+ return get_gt_types_allowed(pdev) & BIT_ULL(XE_GT_TYPE_MEDIA);
}
/**
* xe_configfs_get_engines_allowed - get engine allowed mask from configfs
* @pdev: pci device
*
- * Find the configfs group that belongs to the pci device and return
- * the mask of engines allowed to be used.
- *
- * Return: engine mask with allowed engines
+ * Return: engine mask with allowed engines set in configfs
*/
u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
{
- struct xe_config_device *dev = configfs_find_group(pdev);
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
u64 engines_allowed;
if (!dev)
- return U64_MAX;
+ return device_defaults.engines_allowed;
- engines_allowed = dev->engines_allowed;
- config_item_put(&dev->group.cg_item);
+ engines_allowed = dev->config.engines_allowed;
+ config_group_put(&dev->group);
return engines_allowed;
}
+/**
+ * xe_configfs_get_psmi_enabled - get configfs enable_psmi setting
+ * @pdev: pci device
+ *
+ * Return: enable_psmi setting in configfs
+ */
+bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ bool ret;
+
+ if (!dev)
+ return false;
+
+ ret = dev->config.enable_psmi;
+ config_group_put(&dev->group);
+
+ return ret;
+}
+
+/**
+ * xe_configfs_get_ctx_restore_mid_bb - get configfs ctx_restore_mid_bb setting
+ * @pdev: pci device
+ * @class: hw engine class
+ * @cs: pointer to the bb to use - only valid during probe
+ *
+ * Return: Number of dwords used in the mid_ctx_restore setting in configfs
+ */
+u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev,
+ enum xe_engine_class class,
+ const u32 **cs)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u32 len;
+
+ if (!dev)
+ return 0;
+
+ if (cs)
+ *cs = dev->config.ctx_restore_mid_bb[class].cs;
+
+ len = dev->config.ctx_restore_mid_bb[class].len;
+ config_group_put(&dev->group);
+
+ return len;
+}
+
+/**
+ * xe_configfs_get_ctx_restore_post_bb - get configfs ctx_restore_post_bb setting
+ * @pdev: pci device
+ * @class: hw engine class
+ * @cs: pointer to the bb to use - only valid during probe
+ *
+ * Return: Number of dwords used in the post_ctx_restore setting in configfs
+ */
+u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev,
+ enum xe_engine_class class,
+ const u32 **cs)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ u32 len;
+
+ if (!dev)
+ return 0;
+
+ *cs = dev->config.ctx_restore_post_bb[class].cs;
+ len = dev->config.ctx_restore_post_bb[class].len;
+ config_group_put(&dev->group);
+
+ return len;
+}
+
+#ifdef CONFIG_PCI_IOV
+/**
+ * xe_configfs_get_max_vfs() - Get number of VFs that could be managed
+ * @pdev: the &pci_dev device
+ *
+ * Find the configfs group that belongs to the PCI device and return maximum
+ * number of Virtual Functions (VFs) that could be managed by this device.
+ * If configfs group is not present, use value of max_vfs module parameter.
+ *
+ * Return: maximum number of VFs that could be managed.
+ */
+unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev)
+{
+ struct xe_config_group_device *dev = find_xe_config_group_device(pdev);
+ unsigned int max_vfs;
+
+ if (!dev)
+ return xe_modparam.max_vfs;
+
+ scoped_guard(mutex, &dev->lock)
+ max_vfs = dev->config.sriov.max_vfs;
+
+ config_group_put(&dev->group);
+
+ return max_vfs;
+}
+#endif
+
int __init xe_configfs_init(void)
{
- struct config_group *root = &xe_configfs.su_group;
int ret;
- config_group_init(root);
+ config_group_init(&xe_configfs.su_group);
mutex_init(&xe_configfs.su_mutex);
ret = configfs_register_subsystem(&xe_configfs);
if (ret) {
- pr_err("Error %d while registering %s subsystem\n",
- ret, root->cg_item.ci_namebuf);
+ mutex_destroy(&xe_configfs.su_mutex);
return ret;
}
return 0;
}
-void __exit xe_configfs_exit(void)
+void xe_configfs_exit(void)
{
configfs_unregister_subsystem(&xe_configfs);
+ mutex_destroy(&xe_configfs.su_mutex);
}
-
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
index fb8764008089..fed57be0b90e 100644
--- a/drivers/gpu/drm/xe/xe_configfs.h
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -8,20 +8,40 @@
#include <linux/limits.h>
#include <linux/types.h>
+#include <xe_hw_engine_types.h>
+
struct pci_dev;
#if IS_ENABLED(CONFIG_CONFIGFS_FS)
int xe_configfs_init(void);
void xe_configfs_exit(void);
+void xe_configfs_check_device(struct pci_dev *pdev);
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
-void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev);
+bool xe_configfs_media_gt_allowed(struct pci_dev *pdev);
u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
+bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev);
+u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs);
+u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs);
+#ifdef CONFIG_PCI_IOV
+unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev);
+#endif
#else
static inline int xe_configfs_init(void) { return 0; }
static inline void xe_configfs_exit(void) { }
+static inline void xe_configfs_check_device(struct pci_dev *pdev) { }
static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
-static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { }
+static inline bool xe_configfs_primary_gt_allowed(struct pci_dev *pdev) { return true; }
+static inline bool xe_configfs_media_gt_allowed(struct pci_dev *pdev) { return true; }
static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
+static inline bool xe_configfs_get_psmi_enabled(struct pci_dev *pdev) { return false; }
+static inline u32 xe_configfs_get_ctx_restore_mid_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs) { return 0; }
+static inline u32 xe_configfs_get_ctx_restore_post_bb(struct pci_dev *pdev, enum xe_engine_class,
+ const u32 **cs) { return 0; }
+static inline unsigned int xe_configfs_get_max_vfs(struct pci_dev *pdev) { return UINT_MAX; }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index 26e9d146ccbf..e91da9589c5f 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -11,17 +11,23 @@
#include <drm/drm_debugfs.h>
+#include "regs/xe_pmt.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt_debugfs.h"
#include "xe_gt_printk.h"
#include "xe_guc_ads.h"
+#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_psmi.h"
#include "xe_pxp_debugfs.h"
#include "xe_sriov.h"
-#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_debugfs.h"
+#include "xe_sriov_vf.h"
#include "xe_step.h"
+#include "xe_tile_debugfs.h"
+#include "xe_vsec.h"
#include "xe_wa.h"
#ifdef CONFIG_DRM_XE_DEBUG
@@ -31,6 +37,24 @@
#endif
DECLARE_FAULT_ATTR(gt_reset_failure);
+DECLARE_FAULT_ATTR(inject_csc_hw_error);
+
+static void read_residency_counter(struct xe_device *xe, struct xe_mmio *mmio,
+ u32 offset, const char *name, struct drm_printer *p)
+{
+ u64 residency = 0;
+ int ret;
+
+ ret = xe_pmt_telem_read(to_pci_dev(xe->drm.dev),
+ xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID),
+ &residency, offset, sizeof(residency));
+ if (ret != sizeof(residency)) {
+ drm_warn(&xe->drm, "%s counter failed to read, ret %d\n", name, ret);
+ return;
+ }
+
+ drm_printf(p, "%s : %llu\n", name, residency);
+}
static struct xe_device *node_to_xe(struct drm_info_node *node)
{
@@ -102,12 +126,73 @@ static int workaround_info(struct seq_file *m, void *data)
return 0;
}
+static int dgfx_pkg_residencies_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe;
+ struct xe_mmio *mmio;
+ struct drm_printer p;
+
+ xe = node_to_xe(m->private);
+ p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(xe);
+ mmio = xe_root_tile_mmio(xe);
+ static const struct {
+ u32 offset;
+ const char *name;
+ } residencies[] = {
+ {BMG_G2_RESIDENCY_OFFSET, "Package G2"},
+ {BMG_G6_RESIDENCY_OFFSET, "Package G6"},
+ {BMG_G7_RESIDENCY_OFFSET, "Package G7"},
+ {BMG_G8_RESIDENCY_OFFSET, "Package G8"},
+ {BMG_G10_RESIDENCY_OFFSET, "Package G10"},
+ {BMG_MODS_RESIDENCY_OFFSET, "Package ModS"}
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(residencies); i++)
+ read_residency_counter(xe, mmio, residencies[i].offset, residencies[i].name, &p);
+
+ xe_pm_runtime_put(xe);
+ return 0;
+}
+
+static int dgfx_pcie_link_residencies_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe;
+ struct xe_mmio *mmio;
+ struct drm_printer p;
+
+ xe = node_to_xe(m->private);
+ p = drm_seq_file_printer(m);
+ xe_pm_runtime_get(xe);
+ mmio = xe_root_tile_mmio(xe);
+
+ static const struct {
+ u32 offset;
+ const char *name;
+ } residencies[] = {
+ {BMG_PCIE_LINK_L0_RESIDENCY_OFFSET, "PCIE LINK L0 RESIDENCY"},
+ {BMG_PCIE_LINK_L1_RESIDENCY_OFFSET, "PCIE LINK L1 RESIDENCY"},
+ {BMG_PCIE_LINK_L1_2_RESIDENCY_OFFSET, "PCIE LINK L1.2 RESIDENCY"}
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(residencies); i++)
+ read_residency_counter(xe, mmio, residencies[i].offset, residencies[i].name, &p);
+
+ xe_pm_runtime_put(xe);
+ return 0;
+}
+
static const struct drm_info_list debugfs_list[] = {
{"info", info, 0},
{ .name = "sriov_info", .show = sriov_info, },
{ .name = "workarounds", .show = workaround_info, },
};
+static const struct drm_info_list debugfs_residencies[] = {
+ { .name = "dgfx_pkg_residencies", .show = dgfx_pkg_residencies_show, },
+ { .name = "dgfx_pcie_link_residencies", .show = dgfx_pcie_link_residencies_show, },
+};
+
static int forcewake_open(struct inode *inode, struct file *file)
{
struct xe_device *xe = inode->i_private;
@@ -247,20 +332,65 @@ static const struct file_operations atomic_svm_timeslice_ms_fops = {
.write = atomic_svm_timeslice_ms_set,
};
+static ssize_t disable_late_binding_show(struct file *f, char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ struct xe_late_bind *late_bind = &xe->late_bind;
+ char buf[32];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", late_bind->disable);
+
+ return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t disable_late_binding_set(struct file *f, const char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ struct xe_late_bind *late_bind = &xe->late_bind;
+ bool val;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, size, &val);
+ if (ret)
+ return ret;
+
+ late_bind->disable = val;
+ return size;
+}
+
+static const struct file_operations disable_late_binding_fops = {
+ .owner = THIS_MODULE,
+ .read = disable_late_binding_show,
+ .write = disable_late_binding_set,
+};
+
void xe_debugfs_register(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
struct drm_minor *minor = xe->drm.primary;
struct dentry *root = minor->debugfs_root;
struct ttm_resource_manager *man;
+ struct xe_tile *tile;
struct xe_gt *gt;
u32 mem_type;
+ u8 tile_id;
u8 id;
drm_debugfs_create_files(debugfs_list,
ARRAY_SIZE(debugfs_list),
root, minor);
+ if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) {
+ drm_debugfs_create_files(debugfs_residencies,
+ ARRAY_SIZE(debugfs_residencies),
+ root, minor);
+ fault_create_debugfs_attr("inject_csc_hw_error", root,
+ &inject_csc_hw_error);
+ }
+
debugfs_create_file("forcewake_all", 0400, root, xe,
&forcewake_all_fops);
@@ -270,6 +400,9 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
&atomic_svm_timeslice_ms_fops);
+ debugfs_create_file("disable_late_binding", 0600, root, xe,
+ &disable_late_binding_fops);
+
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
man = ttm_manager_type(bdev, mem_type);
@@ -288,13 +421,20 @@ void xe_debugfs_register(struct xe_device *xe)
if (man)
ttm_resource_manager_create_debugfs(man, root, "stolen_mm");
+ for_each_tile(tile, xe, tile_id)
+ xe_tile_debugfs_register(tile);
+
for_each_gt(gt, xe, id)
xe_gt_debugfs_register(gt);
xe_pxp_debugfs_register(xe->pxp);
+ xe_psmi_debugfs_register(xe);
+
fault_create_debugfs_attr("fail_gt_reset", root, &gt_reset_failure);
if (IS_SRIOV_PF(xe))
xe_sriov_pf_debugfs_register(xe, root);
+ else if (IS_SRIOV_VF(xe))
+ xe_sriov_vf_debugfs_register(xe, root);
}
diff --git a/drivers/gpu/drm/xe/xe_dep_job_types.h b/drivers/gpu/drm/xe/xe_dep_job_types.h
new file mode 100644
index 000000000000..c6a484f24c8c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_job_types.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_DEP_JOB_TYPES_H_
+#define _XE_DEP_JOB_TYPES_H_
+
+#include <drm/gpu_scheduler.h>
+
+struct xe_dep_job;
+
+/** struct xe_dep_job_ops - Generic Xe dependency job operations */
+struct xe_dep_job_ops {
+ /** @run_job: Run generic Xe dependency job */
+ struct dma_fence *(*run_job)(struct xe_dep_job *job);
+ /** @free_job: Free generic Xe dependency job */
+ void (*free_job)(struct xe_dep_job *job);
+};
+
+/** struct xe_dep_job - Generic dependency Xe job */
+struct xe_dep_job {
+ /** @drm: base DRM scheduler job */
+ struct drm_sched_job drm;
+ /** @ops: dependency job operations */
+ const struct xe_dep_job_ops *ops;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_dep_scheduler.c b/drivers/gpu/drm/xe/xe_dep_scheduler.c
new file mode 100644
index 000000000000..9bd3bfd2e526
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_scheduler.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include <drm/gpu_scheduler.h>
+
+#include "xe_dep_job_types.h"
+#include "xe_dep_scheduler.h"
+#include "xe_device_types.h"
+
+/**
+ * DOC: Xe Dependency Scheduler
+ *
+ * The Xe dependency scheduler is a simple wrapper built around the DRM
+ * scheduler to execute jobs once their dependencies are resolved (i.e., all
+ * input fences specified as dependencies are signaled). The jobs that are
+ * executed contain virtual functions to run (execute) and free the job,
+ * allowing a single dependency scheduler to handle jobs performing different
+ * operations.
+ *
+ * Example use cases include deferred resource freeing, TLB invalidations after
+ * bind jobs, etc.
+ */
+
+/** struct xe_dep_scheduler - Generic Xe dependency scheduler */
+struct xe_dep_scheduler {
+ /** @sched: DRM GPU scheduler */
+ struct drm_gpu_scheduler sched;
+ /** @entity: DRM scheduler entity */
+ struct drm_sched_entity entity;
+ /** @rcu: For safe freeing of exported dma fences */
+ struct rcu_head rcu;
+};
+
+static struct dma_fence *xe_dep_scheduler_run_job(struct drm_sched_job *drm_job)
+{
+ struct xe_dep_job *dep_job =
+ container_of(drm_job, typeof(*dep_job), drm);
+
+ return dep_job->ops->run_job(dep_job);
+}
+
+static void xe_dep_scheduler_free_job(struct drm_sched_job *drm_job)
+{
+ struct xe_dep_job *dep_job =
+ container_of(drm_job, typeof(*dep_job), drm);
+
+ dep_job->ops->free_job(dep_job);
+}
+
+static const struct drm_sched_backend_ops sched_ops = {
+ .run_job = xe_dep_scheduler_run_job,
+ .free_job = xe_dep_scheduler_free_job,
+};
+
+/**
+ * xe_dep_scheduler_create() - Generic Xe dependency scheduler create
+ * @xe: Xe device
+ * @submit_wq: Submit workqueue struct (can be NULL)
+ * @name: Name of dependency scheduler
+ * @job_limit: Max dependency jobs that can be scheduled
+ *
+ * Create a generic Xe dependency scheduler and initialize internal DRM
+ * scheduler objects.
+ *
+ * Return: Generic Xe dependency scheduler object on success, ERR_PTR failure
+ */
+struct xe_dep_scheduler *
+xe_dep_scheduler_create(struct xe_device *xe,
+ struct workqueue_struct *submit_wq,
+ const char *name, u32 job_limit)
+{
+ struct xe_dep_scheduler *dep_scheduler;
+ struct drm_gpu_scheduler *sched;
+ const struct drm_sched_init_args args = {
+ .ops = &sched_ops,
+ .submit_wq = submit_wq,
+ .num_rqs = 1,
+ .credit_limit = job_limit,
+ .timeout = MAX_SCHEDULE_TIMEOUT,
+ .name = name,
+ .dev = xe->drm.dev,
+ };
+ int err;
+
+ dep_scheduler = kzalloc(sizeof(*dep_scheduler), GFP_KERNEL);
+ if (!dep_scheduler)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_sched_init(&dep_scheduler->sched, &args);
+ if (err)
+ goto err_free;
+
+ sched = &dep_scheduler->sched;
+ err = drm_sched_entity_init(&dep_scheduler->entity, 0, &sched, 1, NULL);
+ if (err)
+ goto err_sched;
+
+ init_rcu_head(&dep_scheduler->rcu);
+
+ return dep_scheduler;
+
+err_sched:
+ drm_sched_fini(&dep_scheduler->sched);
+err_free:
+ kfree(dep_scheduler);
+
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_dep_scheduler_fini() - Generic Xe dependency scheduler finalize
+ * @dep_scheduler: Generic Xe dependency scheduler object
+ *
+ * Finalize internal DRM scheduler objects and free generic Xe dependency
+ * scheduler object
+ */
+void xe_dep_scheduler_fini(struct xe_dep_scheduler *dep_scheduler)
+{
+ drm_sched_entity_fini(&dep_scheduler->entity);
+ drm_sched_fini(&dep_scheduler->sched);
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(dep_scheduler, rcu);
+}
+
+/**
+ * xe_dep_scheduler_entity() - Retrieve a generic Xe dependency scheduler
+ * DRM scheduler entity
+ * @dep_scheduler: Generic Xe dependency scheduler object
+ *
+ * Return: The generic Xe dependency scheduler's DRM scheduler entity
+ */
+struct drm_sched_entity *
+xe_dep_scheduler_entity(struct xe_dep_scheduler *dep_scheduler)
+{
+ return &dep_scheduler->entity;
+}
diff --git a/drivers/gpu/drm/xe/xe_dep_scheduler.h b/drivers/gpu/drm/xe/xe_dep_scheduler.h
new file mode 100644
index 000000000000..853961eec64b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_dep_scheduler.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+struct drm_sched_entity;
+struct workqueue_struct;
+struct xe_dep_scheduler;
+struct xe_device;
+
+struct xe_dep_scheduler *
+xe_dep_scheduler_create(struct xe_device *xe,
+ struct workqueue_struct *submit_wq,
+ const char *name, u32 job_limit);
+
+void xe_dep_scheduler_fini(struct xe_dep_scheduler *dep_scheduler);
+
+struct drm_sched_entity *
+xe_dep_scheduler_entity(struct xe_dep_scheduler *dep_scheduler);
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 203e3038cc81..d444eda65ca6 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -106,9 +106,9 @@ static ssize_t __xe_devcoredump_read(char *buffer, ssize_t count,
drm_puts(&p, "module: " KBUILD_MODNAME "\n");
ts = ktime_to_timespec64(ss->snapshot_time);
- drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
+ drm_printf(&p, "Snapshot time: %ptSp\n", &ts);
ts = ktime_to_timespec64(ss->boot_time);
- drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec);
+ drm_printf(&p, "Uptime: %ptSp\n", &ts);
drm_printf(&p, "Process: %s [%d]\n", ss->process_name, ss->pid);
xe_device_snapshot_print(xe, &p);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index 6ece4defa9df..c7d373c70f0f 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -8,6 +8,7 @@
#include <linux/aperture.h>
#include <linux/delay.h>
#include <linux/fault-inject.h>
+#include <linux/iopoll.h>
#include <linux/units.h>
#include <drm/drm_atomic_helper.h>
@@ -45,15 +46,18 @@
#include "xe_hwmon.h"
#include "xe_i2c.h"
#include "xe_irq.h"
+#include "xe_late_bind_fw.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_nvm.h"
#include "xe_oa.h"
#include "xe_observation.h"
+#include "xe_pagefault.h"
#include "xe_pat.h"
#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_pmu.h"
+#include "xe_psmi.h"
#include "xe_pxp.h"
#include "xe_query.h"
#include "xe_shrinker.h"
@@ -63,7 +67,9 @@
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_sys_mgr.h"
#include "xe_vm.h"
+#include "xe_vm_madvise.h"
#include "xe_vram.h"
+#include "xe_vram_types.h"
#include "xe_vsec.h"
#include "xe_wait_user_fence.h"
#include "xe_wa.h"
@@ -200,6 +206,9 @@ static const struct drm_ioctl_desc xe_ioctls[] = {
DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl,
+ DRM_RENDER_ALLOW),
};
static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -429,7 +438,7 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
xe->drm.anon_inode->i_mapping,
- xe->drm.vma_offset_manager, false, false);
+ xe->drm.vma_offset_manager, 0);
if (WARN_ON(err))
goto err;
@@ -451,6 +460,8 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
+ xe_validation_device_init(&xe->val);
+
init_waitqueue_head(&xe->ufence_wq);
init_rwsem(&xe->usm.lock);
@@ -524,7 +535,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe)
* re-init and saving/restoring (or re-populating) the wiped memory. Since we
* perform the FLR as the very last action before releasing access to the HW
* during the driver release flow, we don't attempt recovery at all, because
- * if/when a new instance of i915 is bound to the device it will do a full
+ * if/when a new instance of Xe is bound to the device it will do a full
* re-init anyway.
*/
static void __xe_driver_flr(struct xe_device *xe)
@@ -621,16 +632,22 @@ mask_err:
return err;
}
-static bool verify_lmem_ready(struct xe_device *xe)
+static int lmem_initializing(struct xe_device *xe)
{
- u32 val = xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT;
+ if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT)
+ return 0;
- return !!val;
+ if (signal_pending(current))
+ return -EINTR;
+
+ return 1;
}
static int wait_for_lmem_ready(struct xe_device *xe)
{
- unsigned long timeout, start;
+ const unsigned long TIMEOUT_SEC = 60;
+ unsigned long prev_jiffies;
+ int initializing;
if (!IS_DGFX(xe))
return 0;
@@ -638,54 +655,65 @@ static int wait_for_lmem_ready(struct xe_device *xe)
if (IS_SRIOV_VF(xe))
return 0;
- if (verify_lmem_ready(xe))
+ if (!lmem_initializing(xe))
return 0;
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
+ prev_jiffies = jiffies;
- start = jiffies;
- timeout = start + secs_to_jiffies(60); /* 60 sec! */
-
- do {
- if (signal_pending(current))
- return -EINTR;
-
- /*
- * The boot firmware initializes local memory and
- * assesses its health. If memory training fails,
- * the punit will have been instructed to keep the GT powered
- * down.we won't be able to communicate with it
- *
- * If the status check is done before punit updates the register,
- * it can lead to the system being unusable.
- * use a timeout and defer the probe to prevent this.
- */
- if (time_after(jiffies, timeout)) {
- drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
- return -EPROBE_DEFER;
- }
-
- msleep(20);
-
- } while (!verify_lmem_ready(xe));
+ /*
+ * The boot firmware initializes local memory and
+ * assesses its health. If memory training fails,
+ * the punit will have been instructed to keep the GT powered
+ * down.we won't be able to communicate with it
+ *
+ * If the status check is done before punit updates the register,
+ * it can lead to the system being unusable.
+ * use a timeout and defer the probe to prevent this.
+ */
+ poll_timeout_us(initializing = lmem_initializing(xe),
+ initializing <= 0,
+ 20 * USEC_PER_MSEC, TIMEOUT_SEC * USEC_PER_SEC, true);
+ if (initializing < 0)
+ return initializing;
+
+ if (initializing) {
+ drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
+ return -EPROBE_DEFER;
+ }
drm_dbg(&xe->drm, "lmem ready after %ums",
- jiffies_to_msecs(jiffies - start));
+ jiffies_to_msecs(jiffies - prev_jiffies));
return 0;
}
ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
-static void sriov_update_device_info(struct xe_device *xe)
+static void vf_update_device_info(struct xe_device *xe)
{
+ xe_assert(xe, IS_SRIOV_VF(xe));
/* disable features that are not available/applicable to VFs */
- if (IS_SRIOV_VF(xe)) {
- xe->info.probe_display = 0;
- xe->info.has_heci_cscfi = 0;
- xe->info.has_heci_gscfi = 0;
- xe->info.skip_guc_pc = 1;
- xe->info.skip_pcode = 1;
- }
+ xe->info.probe_display = 0;
+ xe->info.has_heci_cscfi = 0;
+ xe->info.has_heci_gscfi = 0;
+ xe->info.has_late_bind = 0;
+ xe->info.skip_guc_pc = 1;
+ xe->info.skip_pcode = 1;
+}
+
+static int xe_device_vram_alloc(struct xe_device *xe)
+{
+ struct xe_vram_region *vram;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ xe->mem.vram = vram;
+ return 0;
}
/**
@@ -711,7 +739,8 @@ int xe_device_probe_early(struct xe_device *xe)
xe_sriov_probe_early(xe);
- sriov_update_device_info(xe);
+ if (IS_SRIOV_VF(xe))
+ vf_update_device_info(xe);
err = xe_pcode_probe_early(xe);
if (err || xe_survivability_mode_is_requested(xe)) {
@@ -722,7 +751,7 @@ int xe_device_probe_early(struct xe_device *xe)
* possible, but still return the previous error for error
* propagation
*/
- err = xe_survivability_mode_enable(xe);
+ err = xe_survivability_mode_boot_enable(xe);
if (err)
return err;
@@ -735,6 +764,10 @@ int xe_device_probe_early(struct xe_device *xe)
xe->wedged.mode = xe_modparam.wedged_mode;
+ err = xe_device_vram_alloc(xe);
+ if (err)
+ return err;
+
return 0;
}
ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
@@ -750,6 +783,8 @@ static int probe_has_flat_ccs(struct xe_device *xe)
return 0;
gt = xe_root_mmio_gt(xe);
+ if (!gt)
+ return 0;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
@@ -862,8 +897,12 @@ int xe_device_probe(struct xe_device *xe)
return err;
}
+ err = xe_pagefault_init(xe);
+ if (err)
+ return err;
+
if (xe->tiles->media_gt &&
- XE_WA(xe->tiles->media_gt, 15015404425_disable))
+ XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
XE_DEVICE_WA_DISABLE(xe, 15015404425);
err = xe_devcoredump_init(xe);
@@ -876,6 +915,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
+ err = xe_late_bind_init(&xe->late_bind);
+ if (err)
+ return err;
+
err = xe_oa_init(xe);
if (err)
return err;
@@ -888,6 +931,10 @@ int xe_device_probe(struct xe_device *xe)
if (err)
return err;
+ err = xe_psmi_init(xe);
+ if (err)
+ return err;
+
err = drm_dev_register(&xe->drm, 0);
if (err)
return err;
@@ -921,6 +968,10 @@ int xe_device_probe(struct xe_device *xe)
xe_vsec_init(xe);
+ err = xe_sriov_init_late(xe);
+ if (err)
+ goto err_unregister_display;
+
return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
err_unregister_display:
@@ -947,16 +998,16 @@ void xe_device_shutdown(struct xe_device *xe)
drm_dbg(&xe->drm, "Shutting down device\n");
- if (xe_driver_flr_disabled(xe)) {
- xe_display_pm_shutdown(xe);
+ xe_display_pm_shutdown(xe);
- xe_irq_suspend(xe);
+ xe_irq_suspend(xe);
- for_each_gt(gt, xe, id)
- xe_gt_shutdown(gt);
+ for_each_gt(gt, xe, id)
+ xe_gt_shutdown(gt);
- xe_display_pm_shutdown_late(xe);
- } else {
+ xe_display_pm_shutdown_late(xe);
+
+ if (!xe_driver_flr_disabled(xe)) {
/* BOOM! */
__xe_driver_flr(xe);
}
@@ -1018,8 +1069,10 @@ void xe_device_l2_flush(struct xe_device *xe)
unsigned int fw_ref;
gt = xe_root_mmio_gt(xe);
+ if (!gt)
+ return;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
@@ -1029,7 +1082,7 @@ void xe_device_l2_flush(struct xe_device *xe)
spin_lock(&gt->global_invl_lock);
xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
- if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
+ if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true))
xe_gt_err_once(gt, "Global invalidation timeout\n");
spin_unlock(&gt->global_invl_lock);
@@ -1063,7 +1116,10 @@ void xe_device_td_flush(struct xe_device *xe)
return;
root_gt = xe_root_mmio_gt(xe);
- if (XE_WA(root_gt, 16023588340)) {
+ if (!root_gt)
+ return;
+
+ if (XE_GT_WA(root_gt, 16023588340)) {
/* A transient flush is not sufficient: flush the L2 */
xe_device_l2_flush(xe);
} else {
@@ -1134,11 +1190,63 @@ static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
}
/**
+ * DOC: Xe Device Wedging
+ *
+ * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
+ * When device is in wedged state, every IOCTL will be blocked and GT cannot be
+ * used. Certain critical errors like gt reset failure, firmware failures can cause
+ * the device to be wedged. The default recovery method for a wedged state
+ * is rebind/bus-reset.
+ *
+ * Another recovery method is vendor-specific. Below are the cases that send
+ * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
+ *
+ * Case: Firmware Flash
+ * --------------------
+ *
+ * Identification Hint
+ * +++++++++++++++++++
+ *
+ * ``WEDGED=vendor-specific`` drm device wedged uevent with
+ * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
+ * admin/userspace consumer about the need for a firmware flash.
+ *
+ * Recovery Procedure
+ * ++++++++++++++++++
+ *
+ * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
+ * the below steps
+ *
+ * - Check Runtime Survivability mode sysfs.
+ * If enabled, firmware flash is required to recover the device.
+ *
+ * /sys/bus/pci/devices/<device>/survivability_mode
+ *
+ * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash
+ * firmware and restore device to normal operation.
+ */
+
+/**
+ * xe_device_set_wedged_method - Set wedged recovery method
+ * @xe: xe device instance
+ * @method: recovery method to set
+ *
+ * Set wedged recovery method to be sent in drm wedged uevent.
+ */
+void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method)
+{
+ xe->wedged.method = method;
+}
+
+/**
* xe_device_declare_wedged - Declare device wedged
* @xe: xe device instance
*
- * This is a final state that can only be cleared with a module
- * re-probe (unbind + bind).
+ * This is a final state that can only be cleared with the recovery method
+ * specified in the drm wedged uevent. The method can be set using
+ * xe_device_set_wedged_method before declaring the device as wedged. If no method
+ * is set, reprobe (unbind/re-bind) will be sent by default.
+ *
* In this state every IOCTL will be blocked so the GT cannot be used.
* In general it will be called upon any critical error such as gt reset
* failure or guc loading failure. Userspace will be notified of this state
@@ -1172,13 +1280,18 @@ void xe_device_declare_wedged(struct xe_device *xe)
"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
dev_name(xe->drm.dev));
-
- /* Notify userspace of wedged device */
- drm_dev_wedged_event(&xe->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
- NULL);
}
for_each_gt(gt, xe, id)
xe_gt_declare_wedged(gt);
+
+ if (xe_device_wedged(xe)) {
+ /* If no wedge recovery method is set, use default */
+ if (!xe->wedged.method)
+ xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND |
+ DRM_WEDGE_RECOVERY_BUS_RESET);
+
+ /* Notify userspace of wedged device */
+ drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL);
+ }
}
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index bc802e066a7d..32cc6323b7f6 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -187,6 +187,7 @@ static inline bool xe_device_wedged(struct xe_device *xe)
return atomic_read(&xe->wedged.flag);
}
+void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method);
void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c
index bd9015761aa0..ec9c06b06fb5 100644
--- a/drivers/gpu/drm/xe/xe_device_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_device_sysfs.c
@@ -38,13 +38,8 @@ vram_d3cold_threshold_show(struct device *dev,
{
struct pci_dev *pdev = to_pci_dev(dev);
struct xe_device *xe = pdev_to_xe_device(pdev);
- int ret;
-
- xe_pm_runtime_get(xe);
- ret = sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
- xe_pm_runtime_put(xe);
- return ret;
+ return sysfs_emit(buf, "%d\n", xe->d3cold.vram_threshold);
}
static ssize_t
@@ -71,12 +66,21 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RW(vram_d3cold_threshold);
+static struct attribute *vram_attrs[] = {
+ &dev_attr_vram_d3cold_threshold.attr,
+ NULL
+};
+
+static const struct attribute_group vram_attr_group = {
+ .attrs = vram_attrs,
+};
+
static ssize_t
lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
+ u32 cap = 0, ver_low = FAN_TABLE, ver_high = FAN_TABLE;
u16 major = 0, minor = 0, hotfix = 0, build = 0;
int ret;
@@ -115,7 +119,7 @@ lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *a
{
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
+ u32 cap = 0, ver_low = VR_CONFIG, ver_high = VR_CONFIG;
u16 major = 0, minor = 0, hotfix = 0, build = 0;
int ret;
@@ -149,62 +153,41 @@ out:
}
static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version);
-static int late_bind_create_files(struct device *dev)
-{
- struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
- struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap;
- int ret;
-
- xe_pm_runtime_get(xe);
-
- ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
- &cap, NULL);
- if (ret) {
- if (ret == -ENXIO) {
- drm_dbg(&xe->drm, "Late binding not supported by firmware\n");
- ret = 0;
- }
- goto out;
- }
-
- if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) {
- ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
- if (ret)
- goto out;
- }
-
- if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
- ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
-out:
- xe_pm_runtime_put(xe);
-
- return ret;
-}
+static struct attribute *late_bind_attrs[] = {
+ &dev_attr_lb_fan_control_version.attr,
+ &dev_attr_lb_voltage_regulator_version.attr,
+ NULL
+};
-static void late_bind_remove_files(struct device *dev)
+static umode_t late_bind_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
+ struct device *dev = kobj_to_dev(kobj);
struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev));
struct xe_tile *root = xe_device_get_root_tile(xe);
- u32 cap;
+ u32 cap = 0;
int ret;
- xe_pm_runtime_get(xe);
-
ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0),
&cap, NULL);
if (ret)
- goto out;
+ return 0;
- if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
- sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr);
+ if (attr == &dev_attr_lb_fan_control_version.attr &&
+ REG_FIELD_GET(V1_FAN_SUPPORTED, cap))
+ return attr->mode;
+ if (attr == &dev_attr_lb_voltage_regulator_version.attr &&
+ REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
+ return attr->mode;
- if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap))
- sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr);
-out:
- xe_pm_runtime_put(xe);
+ return 0;
}
+static const struct attribute_group late_bind_attr_group = {
+ .attrs = late_bind_attrs,
+ .is_visible = late_bind_attr_is_visible,
+};
+
/**
* DOC: PCIe Gen5 Limitations
*
@@ -278,24 +261,15 @@ auto_link_downgrade_status_show(struct device *dev, struct device_attribute *att
}
static DEVICE_ATTR_ADMIN_RO(auto_link_downgrade_status);
-static const struct attribute *auto_link_downgrade_attrs[] = {
+static struct attribute *auto_link_downgrade_attrs[] = {
&dev_attr_auto_link_downgrade_capable.attr,
&dev_attr_auto_link_downgrade_status.attr,
NULL
};
-static void xe_device_sysfs_fini(void *arg)
-{
- struct xe_device *xe = arg;
-
- if (xe->d3cold.capable)
- sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
-
- if (xe->info.platform == XE_BATTLEMAGE) {
- sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs);
- late_bind_remove_files(xe->drm.dev);
- }
-}
+static const struct attribute_group auto_link_downgrade_attr_group = {
+ .attrs = auto_link_downgrade_attrs,
+};
int xe_device_sysfs_init(struct xe_device *xe)
{
@@ -303,20 +277,20 @@ int xe_device_sysfs_init(struct xe_device *xe)
int ret;
if (xe->d3cold.capable) {
- ret = sysfs_create_file(&dev->kobj, &dev_attr_vram_d3cold_threshold.attr);
+ ret = devm_device_add_group(dev, &vram_attr_group);
if (ret)
return ret;
}
- if (xe->info.platform == XE_BATTLEMAGE) {
- ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs);
+ if (xe->info.platform == XE_BATTLEMAGE && !IS_SRIOV_VF(xe)) {
+ ret = devm_device_add_group(dev, &auto_link_downgrade_attr_group);
if (ret)
return ret;
- ret = late_bind_create_files(dev);
+ ret = devm_device_add_group(dev, &late_bind_attr_group);
if (ret)
return ret;
}
- return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe);
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 7ceb0c90f391..0b2fa7c56d38 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -10,23 +10,26 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_device.h>
#include "xe_devcoredump_types.h"
#include "xe_heci_gsc.h"
+#include "xe_late_bind_fw_types.h"
#include "xe_lmtt_types.h"
#include "xe_memirq_types.h"
#include "xe_oa_types.h"
+#include "xe_pagefault_types.h"
#include "xe_platform_types.h"
#include "xe_pmu_types.h"
#include "xe_pt_types.h"
#include "xe_sriov_pf_types.h"
#include "xe_sriov_types.h"
#include "xe_sriov_vf_types.h"
+#include "xe_sriov_vf_ccs_types.h"
#include "xe_step_types.h"
#include "xe_survivability_mode_types.h"
-#include "xe_ttm_vram_mgr_types.h"
+#include "xe_tile_sriov_vf_types.h"
+#include "xe_validation.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define TEST_VM_OPS_ERROR
@@ -39,6 +42,7 @@ struct xe_ggtt;
struct xe_i2c;
struct xe_pat_ops;
struct xe_pxp;
+struct xe_vram_region;
#define XE_BO_INVALID_OFFSET LONG_MAX
@@ -72,61 +76,6 @@ struct xe_pxp;
struct xe_tile * : (tile__)->xe)
/**
- * struct xe_vram_region - memory region structure
- * This is used to describe a memory region in xe
- * device, such as HBM memory or CXL extension memory.
- */
-struct xe_vram_region {
- /** @io_start: IO start address of this VRAM instance */
- resource_size_t io_start;
- /**
- * @io_size: IO size of this VRAM instance
- *
- * This represents how much of this VRAM we can access
- * via the CPU through the VRAM BAR. This can be smaller
- * than @usable_size, in which case only part of VRAM is CPU
- * accessible (typically the first 256M). This
- * configuration is known as small-bar.
- */
- resource_size_t io_size;
- /** @dpa_base: This memory regions's DPA (device physical address) base */
- resource_size_t dpa_base;
- /**
- * @usable_size: usable size of VRAM
- *
- * Usable size of VRAM excluding reserved portions
- * (e.g stolen mem)
- */
- resource_size_t usable_size;
- /**
- * @actual_physical_size: Actual VRAM size
- *
- * Actual VRAM size including reserved portions
- * (e.g stolen mem)
- */
- resource_size_t actual_physical_size;
- /** @mapping: pointer to VRAM mappable space */
- void __iomem *mapping;
- /** @ttm: VRAM TTM manager */
- struct xe_ttm_vram_mgr ttm;
-#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
- /** @pagemap: Used to remap device memory as ZONE_DEVICE */
- struct dev_pagemap pagemap;
- /**
- * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
- * pages of this tile.
- */
- struct drm_pagemap dpagemap;
- /**
- * @hpa_base: base host physical address
- *
- * This is generated when remap device memory as ZONE_DEVICE
- */
- resource_size_t hpa_base;
-#endif
-};
-
-/**
* struct xe_mmio - register mmio structure
*
* Represents an MMIO region that the CPU may use to access registers. A
@@ -211,12 +160,20 @@ struct xe_tile {
/** @mem: memory management info for tile */
struct {
/**
- * @mem.vram: VRAM info for tile.
+ * @mem.kernel_vram: kernel-dedicated VRAM info for tile.
*
* Although VRAM is associated with a specific tile, it can
* still be accessed by all tiles' GTs.
*/
- struct xe_vram_region vram;
+ struct xe_vram_region *kernel_vram;
+
+ /**
+ * @mem.vram: general purpose VRAM info for tile.
+ *
+ * Although VRAM is associated with a specific tile, it can
+ * still be accessed by all tiles' GTs.
+ */
+ struct xe_vram_region *vram;
/** @mem.ggtt: Global graphics translation table */
struct xe_ggtt *ggtt;
@@ -238,12 +195,17 @@ struct xe_tile {
struct {
/** @sriov.vf.ggtt_balloon: GGTT regions excluded from use. */
struct xe_ggtt_node *ggtt_balloon[2];
+ /** @sriov.vf.self_config: VF configuration data */
+ struct xe_tile_sriov_vf_selfconfig self_config;
} vf;
} sriov;
/** @memirq: Memory Based Interrupts. */
struct xe_memirq memirq;
+ /** @csc_hw_error_work: worker to report CSC HW errors */
+ struct work_struct csc_hw_error_work;
+
/** @pcode: tile's PCODE */
struct {
/** @pcode.lock: protecting tile's PCODE mailbox data */
@@ -255,15 +217,23 @@ struct xe_tile {
/** @sysfs: sysfs' kobj used by xe_tile_sysfs */
struct kobject *sysfs;
+
+ /** @debugfs: debugfs directory associated with this tile */
+ struct dentry *debugfs;
};
/**
- * struct xe_device - Top level struct of XE device
+ * struct xe_device - Top level struct of Xe device
*/
struct xe_device {
/** @drm: drm device */
struct drm_device drm;
+#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
+ /** @display: display device data, must be placed after drm device member */
+ struct intel_display *display;
+#endif
+
/** @devcoredump: device coredump */
struct xe_devcoredump devcoredump;
@@ -281,9 +251,9 @@ struct xe_device {
u32 media_verx100;
/** @info.mem_region_mask: mask of valid memory regions */
u32 mem_region_mask;
- /** @info.platform: XE platform enum */
+ /** @info.platform: Xe platform enum */
enum xe_platform platform;
- /** @info.subplatform: XE subplatform enum */
+ /** @info.subplatform: Xe subplatform enum */
enum xe_subplatform subplatform;
/** @info.devid: device ID */
u16 devid;
@@ -328,16 +298,20 @@ struct xe_device {
u8 has_heci_cscfi:1;
/** @info.has_heci_gscfi: device has heci gscfi */
u8 has_heci_gscfi:1;
+ /** @info.has_late_bind: Device has firmware late binding support */
+ u8 has_late_bind:1;
/** @info.has_llc: Device has a shared CPU+GPU last level cache */
u8 has_llc:1;
/** @info.has_mbx_power_limits: Device has support to manage power limits using
* pcode mailbox commands.
*/
u8 has_mbx_power_limits:1;
+ /** @info.has_mem_copy_instr: Device supports MEM_COPY instruction */
+ u8 has_mem_copy_instr:1;
/** @info.has_pxp: Device has PXP support */
u8 has_pxp:1;
- /** @info.has_range_tlb_invalidation: Has range based TLB invalidations */
- u8 has_range_tlb_invalidation:1;
+ /** @info.has_range_tlb_inval: Has range based TLB invalidations */
+ u8 has_range_tlb_inval:1;
/** @info.has_sriov: Supports SR-IOV */
u8 has_sriov:1;
/** @info.has_usm: Device has unified shared memory support */
@@ -363,6 +337,8 @@ struct xe_device {
u8 skip_mtcfg:1;
/** @info.skip_pcode: skip access to PCODE uC */
u8 skip_pcode:1;
+ /** @info.needs_shared_vf_gt_wq: needs shared GT WQ on VF */
+ u8 needs_shared_vf_gt_wq:1;
} info;
/** @wa_active: keep track of active workarounds */
@@ -412,7 +388,7 @@ struct xe_device {
/** @mem: memory info for device */
struct {
/** @mem.vram: VRAM info for device */
- struct xe_vram_region vram;
+ struct xe_vram_region *vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
/** @mem.sys_mgr: system memory shrinker. */
@@ -443,6 +419,16 @@ struct xe_device {
u32 next_asid;
/** @usm.lock: protects UM state */
struct rw_semaphore lock;
+ /** @usm.pf_wq: page fault work queue, unbound, high priority */
+ struct workqueue_struct *pf_wq;
+ /*
+ * We pick 4 here because, in the current implementation, it
+ * yields the best bandwidth utilization of the kernel paging
+ * engine.
+ */
+#define XE_PAGEFAULT_QUEUE_COUNT 4
+ /** @usm.pf_queue: Page fault queues */
+ struct xe_pagefault_queue pf_queue[XE_PAGEFAULT_QUEUE_COUNT];
} usm;
/** @pinned: pinned BO state */
@@ -476,7 +462,7 @@ struct xe_device {
/** @ordered_wq: used to serialize compute mode resume */
struct workqueue_struct *ordered_wq;
- /** @unordered_wq: used to serialize unordered work, mostly display */
+ /** @unordered_wq: used to serialize unordered work */
struct workqueue_struct *unordered_wq;
/** @destroy_wq: used to serialize user destroy work, like queue */
@@ -581,6 +567,9 @@ struct xe_device {
/** @nvm: discrete graphics non-volatile memory */
struct intel_dg_nvm_dev *nvm;
+ /** @late_bind: xe mei late bind interface */
+ struct xe_late_bind late_bind;
+
/** @oa: oa observation subsystem */
struct xe_oa oa;
@@ -596,6 +585,8 @@ struct xe_device {
atomic_t flag;
/** @wedged.mode: Mode controlled by kernel parameter and debugfs */
int mode;
+ /** @wedged.method: Recovery method to be sent in the drm device wedged uevent */
+ unsigned long method;
} wedged;
/** @bo_device: Struct to control async free of BOs */
@@ -630,6 +621,23 @@ struct xe_device {
*/
atomic64_t global_total_pages;
#endif
+ /** @val: The domain for exhaustive eviction, which is currently per device. */
+ struct xe_validation_device val;
+
+ /** @psmi: GPU debugging via additional validation HW */
+ struct {
+ /** @psmi.capture_obj: PSMI buffer for VRAM */
+ struct xe_bo *capture_obj[XE_MAX_TILES_PER_DEVICE + 1];
+ /** @psmi.region_mask: Mask of valid memory regions */
+ u8 region_mask;
+ } psmi;
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ /** @g2g_test_array: for testing G2G communications */
+ u32 *g2g_test_array;
+ /** @g2g_test_count: for testing G2G communications */
+ atomic_t g2g_test_count;
+#endif
/* private: */
@@ -640,8 +648,6 @@ struct xe_device {
* drm_i915_private during build. After cleanup these should go away,
* migrating to the right sub-structs
*/
- struct intel_display *display;
-
const struct dram_info *dram_info;
/*
@@ -650,27 +656,14 @@ struct xe_device {
*/
u32 edram_size_mb;
- /* To shut up runtime pm macros.. */
- struct xe_runtime_pm {} runtime_pm;
-
- /* only to allow build, not used functionally */
- u32 irq_mask;
-
struct intel_uncore {
spinlock_t lock;
} uncore;
-
- /* only to allow build, not used functionally */
- struct {
- unsigned int hpll_freq;
- unsigned int czclk_freq;
- unsigned int fsb_freq, mem_freq, is_ddr3;
- };
#endif
};
/**
- * struct xe_file - file handle for XE driver
+ * struct xe_file - file handle for Xe driver
*/
struct xe_file {
/** @xe: xe DEVICE **/
diff --git a/drivers/gpu/drm/xe/xe_device_wa_oob.rules b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
index 3a0c4ccc4224..55ba01bc8f38 100644
--- a/drivers/gpu/drm/xe/xe_device_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_device_wa_oob.rules
@@ -1,2 +1,5 @@
+22010954014 PLATFORM(DG2)
15015404425 PLATFORM(LUNARLAKE)
PLATFORM(PANTHERLAKE)
+22019338487_display PLATFORM(LUNARLAKE)
+14022085890 SUBPLATFORM(BATTLEMAGE, G21)
diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c
index af64baf872ef..54e42960daad 100644
--- a/drivers/gpu/drm/xe/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/xe_dma_buf.c
@@ -48,31 +48,43 @@ static void xe_dma_buf_detach(struct dma_buf *dmabuf,
static int xe_dma_buf_pin(struct dma_buf_attachment *attach)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct dma_buf *dmabuf = attach->dmabuf;
+ struct drm_gem_object *obj = dmabuf->priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = xe_bo_device(bo);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
+ bool allow_vram = true;
int ret;
- /*
- * For now only support pinning in TT memory, for two reasons:
- * 1) Avoid pinning in a placement not accessible to some importers.
- * 2) Pinning in VRAM requires PIN accounting which is a to-do.
- */
- if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) {
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ allow_vram = false;
+ } else {
+ list_for_each_entry(attach, &dmabuf->attachments, node) {
+ if (!attach->peer2peer) {
+ allow_vram = false;
+ break;
+ }
+ }
+ }
+
+ if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT) &&
+ !(xe_bo_is_vram(bo) && allow_vram)) {
drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n");
return -EINVAL;
}
- ret = xe_bo_migrate(bo, XE_PL_TT);
- if (ret) {
- if (ret != -EINTR && ret != -ERESTARTSYS)
- drm_dbg(&xe->drm,
- "Failed migrating dma-buf to TT memory: %pe\n",
- ERR_PTR(ret));
- return ret;
+ if (!allow_vram) {
+ ret = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
+ if (ret) {
+ if (ret != -EINTR && ret != -ERESTARTSYS)
+ drm_dbg(&xe->drm,
+ "Failed migrating dma-buf to TT memory: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
}
- ret = xe_bo_pin_external(bo, true);
+ ret = xe_bo_pin_external(bo, !allow_vram, exec);
xe_assert(xe, !ret);
return 0;
@@ -92,6 +104,7 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
struct sg_table *sgt;
int r = 0;
@@ -100,9 +113,9 @@ static struct sg_table *xe_dma_buf_map(struct dma_buf_attachment *attach,
if (!xe_bo_is_pinned(bo)) {
if (!attach->peer2peer)
- r = xe_bo_migrate(bo, XE_PL_TT);
+ r = xe_bo_migrate(bo, XE_PL_TT, NULL, exec);
else
- r = xe_bo_validate(bo, NULL, false);
+ r = xe_bo_validate(bo, NULL, false, exec);
if (r)
return ERR_PTR(r);
}
@@ -161,15 +174,26 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
struct xe_bo *bo = gem_to_xe_bo(obj);
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int ret = 0;
if (!reads)
return 0;
/* Can we do interruptible lock here? */
- xe_bo_lock(bo, false);
- (void)xe_bo_migrate(bo, XE_PL_TT);
- xe_bo_unlock(bo);
+ xe_validation_guard(&ctx, &xe_bo_device(bo)->val, &exec, (struct xe_val_flags) {}, ret) {
+ ret = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ ret = xe_bo_migrate(bo, XE_PL_TT, NULL, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ }
+ /* If we failed, cpu-access takes place in current placement. */
return 0;
}
@@ -191,10 +215,22 @@ struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
if (bo->vm)
return ERR_PTR(-EPERM);
+ ret = ttm_bo_setup_export(&bo->ttm, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
buf = drm_gem_prime_export(obj, flags);
if (!IS_ERR(buf))
buf->ops = &xe_dmabuf_ops;
@@ -208,32 +244,45 @@ xe_dma_buf_init_obj(struct drm_device *dev, struct xe_bo *storage,
{
struct dma_resv *resv = dma_buf->resv;
struct xe_device *xe = to_xe_device(dev);
+ struct xe_validation_ctx ctx;
+ struct drm_gem_object *dummy_obj;
+ struct drm_exec exec;
struct xe_bo *bo;
- int ret;
-
- dma_resv_lock(resv, NULL);
- bo = ___xe_bo_create_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
- 0, /* Will require 1way or 2way for vm_bind */
- ttm_bo_type_sg, XE_BO_FLAG_SYSTEM);
- if (IS_ERR(bo)) {
- ret = PTR_ERR(bo);
- goto error;
+ int ret = 0;
+
+ dummy_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
+ if (!dummy_obj)
+ return ERR_PTR(-ENOMEM);
+
+ dummy_obj->resv = resv;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, ret) {
+ ret = drm_exec_lock_obj(&exec, dummy_obj);
+ drm_exec_retry_on_contention(&exec);
+ if (ret)
+ break;
+
+ bo = xe_bo_init_locked(xe, storage, NULL, resv, NULL, dma_buf->size,
+ 0, /* Will require 1way or 2way for vm_bind */
+ ttm_bo_type_sg, XE_BO_FLAG_SYSTEM, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ ret = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &ret);
+ break;
+ }
}
- dma_resv_unlock(resv);
-
- return &bo->ttm.base;
+ drm_gem_object_put(dummy_obj);
-error:
- dma_resv_unlock(resv);
- return ERR_PTR(ret);
+ return ret ? ERR_PTR(ret) : &bo->ttm.base;
}
static void xe_dma_buf_move_notify(struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = attach->importer_priv;
struct xe_bo *bo = gem_to_xe_bo(obj);
+ struct drm_exec *exec = XE_VALIDATION_UNSUPPORTED;
- XE_WARN_ON(xe_bo_evict(bo));
+ XE_WARN_ON(xe_bo_evict(bo, exec));
}
static const struct dma_buf_attach_ops xe_dma_buf_attach_ops = {
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index af7916315ac6..97dfb7945b7a 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -49,6 +49,7 @@ struct xe_eu_stall_data_stream {
wait_queue_head_t poll_wq;
size_t data_record_size;
size_t per_xecore_buf_size;
+ unsigned int fw_ref;
struct xe_gt *gt;
struct xe_bo *bo;
@@ -124,6 +125,27 @@ struct xe_eu_stall_data_xe2 {
__u64 unused[6];
} __packed;
+/*
+ * EU stall data format for Xe3p arch GPUs.
+ */
+struct xe_eu_stall_data_xe3p {
+ __u64 ip_addr:61; /* Bits 0 to 60 */
+ __u64 tdr_count:8; /* Bits 61 to 68 */
+ __u64 other_count:8; /* Bits 69 to 76 */
+ __u64 control_count:8; /* Bits 77 to 84 */
+ __u64 pipestall_count:8; /* Bits 85 to 92 */
+ __u64 send_count:8; /* Bits 93 to 100 */
+ __u64 dist_acc_count:8; /* Bits 101 to 108 */
+ __u64 sbid_count:8; /* Bits 109 to 116 */
+ __u64 sync_count:8; /* Bits 117 to 124 */
+ __u64 inst_fetch_count:8; /* Bits 125 to 132 */
+ __u64 active_count:8; /* Bits 133 to 140 */
+ __u64 ex_id:3; /* Bits 141 to 143 */
+ __u64 end_flag:1; /* Bit 144 */
+ __u64 unused_bits:47;
+ __u64 unused[5];
+} __packed;
+
const u64 eu_stall_sampling_rates[] = {251, 251 * 2, 251 * 3, 251 * 4, 251 * 5, 251 * 6, 251 * 7};
/**
@@ -167,10 +189,13 @@ size_t xe_eu_stall_data_record_size(struct xe_device *xe)
{
size_t record_size = 0;
- if (xe->info.platform == XE_PVC)
- record_size = sizeof(struct xe_eu_stall_data_pvc);
+ if (GRAPHICS_VER(xe) >= 35)
+ record_size = sizeof(struct xe_eu_stall_data_xe3p);
else if (GRAPHICS_VER(xe) >= 20)
record_size = sizeof(struct xe_eu_stall_data_xe2);
+ else if (xe->info.platform == XE_PVC)
+ record_size = sizeof(struct xe_eu_stall_data_pvc);
+
xe_assert(xe, is_power_of_2(record_size));
@@ -617,9 +642,8 @@ static int xe_eu_stall_data_buf_alloc(struct xe_eu_stall_data_stream *stream,
size = stream->per_xecore_buf_size * last_xecore;
- bo = xe_bo_create_pin_map_at_aligned(tile->xe, tile, NULL,
- size, ~0ull, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64);
+ bo = xe_bo_create_pin_map_at_novm(tile->xe, tile, size, ~0ull, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, SZ_64, false);
if (IS_ERR(bo)) {
kfree(stream->xecore_buf);
return PTR_ERR(bo);
@@ -637,19 +661,18 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
struct per_xecore_buf *xecore_buf;
struct xe_gt *gt = stream->gt;
u16 group, instance;
- unsigned int fw_ref;
int xecore;
/* Take runtime pm ref and forcewake to disable RC6 */
xe_pm_runtime_get(gt_to_xe(gt));
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FW_RENDER)) {
+ stream->fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_RENDER);
+ if (!xe_force_wake_ref_has_domain(stream->fw_ref, XE_FW_RENDER)) {
xe_gt_err(gt, "Failed to get RENDER forcewake\n");
xe_pm_runtime_put(gt_to_xe(gt));
return -ETIMEDOUT;
}
- if (XE_WA(gt, 22016596838))
+ if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
@@ -805,11 +828,11 @@ static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
cancel_delayed_work_sync(&stream->buf_poll_work);
- if (XE_WA(gt, 22016596838))
+ if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
_MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
- xe_force_wake_put(gt_to_fw(gt), XE_FW_RENDER);
+ xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
xe_pm_runtime_put(gt_to_xe(gt));
return 0;
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 374c831e691b..4d81210e41f5 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -16,9 +16,12 @@
#include "xe_exec_queue.h"
#include "xe_hw_engine_group.h"
#include "xe_macros.h"
+#include "xe_pm.h"
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
+#include "xe_svm.h"
+#include "xe_trace.h"
#include "xe_vm.h"
/**
@@ -31,7 +34,7 @@
* - Binding at exec time
* - Flow controlling the ring at exec time
*
- * In XE we avoid all of this complication by not allowing a BO list to be
+ * In Xe we avoid all of this complication by not allowing a BO list to be
* passed into an exec, using the dma-buf implicit sync uAPI, have binds as
* separate operations, and using the DRM scheduler to flow control the ring.
* Let's deep dive on each of these.
@@ -97,9 +100,13 @@
static int xe_exec_fn(struct drm_gpuvm_exec *vm_exec)
{
struct xe_vm *vm = container_of(vm_exec->vm, struct xe_vm, gpuvm);
+ int ret;
/* The fence slot added here is intended for the exec sched job. */
- return xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
+ xe_vm_set_validation_exec(vm, &vm_exec->exec);
+ ret = xe_vm_validate_rebind(vm, &vm_exec->exec, 1);
+ xe_vm_set_validation_exec(vm, NULL);
+ return ret;
}
int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
@@ -115,10 +122,10 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_gpuvm_exec vm_exec = {.extra.fn = xe_exec_fn};
struct drm_exec *exec = &vm_exec.exec;
u32 i, num_syncs, num_ufence = 0;
+ struct xe_validation_ctx ctx;
struct xe_sched_job *job;
struct xe_vm *vm;
- bool write_locked, skip_retry = false;
- ktime_t end = 0;
+ bool write_locked;
int err = 0;
struct xe_hw_engine_group *group;
enum xe_hw_engine_group_execution_mode mode, previous_mode;
@@ -148,6 +155,12 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto err_exec_queue;
}
+ if (atomic_read(&q->job_cnt) >= XE_MAX_JOB_COUNT_PER_EXEC_QUEUE) {
+ trace_xe_exec_queue_reach_max_job_count(q, XE_MAX_JOB_COUNT_PER_EXEC_QUEUE);
+ err = -EAGAIN;
+ goto err_exec_queue;
+ }
+
if (args->num_syncs) {
syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL);
if (!syncs) {
@@ -160,7 +173,8 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
- &syncs_user[num_syncs], SYNC_PARSE_FLAG_EXEC |
+ &syncs_user[num_syncs], NULL, 0,
+ SYNC_PARSE_FLAG_EXEC |
(xe_vm_in_lr_mode(vm) ?
SYNC_PARSE_FLAG_LR_MODE : 0));
if (err)
@@ -242,21 +256,16 @@ retry:
* on task freezing during suspend / hibernate, the call will
* return -ERESTARTSYS and the IOCTL will be rerun.
*/
- err = wait_for_completion_interruptible(&xe->pm_block);
+ err = xe_pm_block_on_suspend(xe);
if (err)
goto err_unlock_list;
- vm_exec.vm = &vm->gpuvm;
- vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
- if (xe_vm_in_lr_mode(vm)) {
- drm_exec_init(exec, vm_exec.flags, 0);
- } else {
- err = drm_gpuvm_exec_lock(&vm_exec);
- if (err) {
- if (xe_vm_validate_should_retry(exec, err, &end))
- err = -EAGAIN;
+ if (!xe_vm_in_lr_mode(vm)) {
+ vm_exec.vm = &vm->gpuvm;
+ vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
+ err = xe_validation_exec_lock(&ctx, &vm_exec, &xe->val);
+ if (err)
goto err_unlock_list;
- }
}
if (xe_vm_is_closed_or_banned(q->vm)) {
@@ -265,12 +274,6 @@ retry:
goto err_exec;
}
- if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
- err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
- skip_retry = true;
- goto err_exec;
- }
-
if (xe_exec_queue_uses_pxp(q)) {
err = xe_vm_validate_protected(q->vm);
if (err)
@@ -299,11 +302,7 @@ retry:
goto err_put_job;
if (!xe_vm_in_lr_mode(vm)) {
- err = xe_sched_job_last_fence_add_dep(job, vm);
- if (err)
- goto err_put_job;
-
- err = down_read_interruptible(&vm->userptr.notifier_lock);
+ err = xe_svm_notifier_lock_interruptible(vm);
if (err)
goto err_put_job;
@@ -327,8 +326,6 @@ retry:
xe_sched_job_init_user_fence(job, &syncs[i]);
}
- if (xe_exec_queue_is_lr(q))
- q->ring_ops->emit_job(job);
if (!xe_vm_in_lr_mode(vm))
xe_exec_queue_last_fence_set(q, vm, &job->drm.s_fence->finished);
xe_sched_job_push(job);
@@ -345,15 +342,16 @@ retry:
err_repin:
if (!xe_vm_in_lr_mode(vm))
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
err_put_job:
if (err)
xe_sched_job_put(job);
err_exec:
- drm_exec_fini(exec);
+ if (!xe_vm_in_lr_mode(vm))
+ xe_validation_ctx_fini(&ctx);
err_unlock_list:
up_read(&vm->lock);
- if (err == -EAGAIN && !skip_retry)
+ if (err == -EAGAIN)
goto retry;
err_hw_exec_mode:
if (mode == EXEC_MODE_DMA_FENCE)
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index 8991b4aed440..8724f8de67e2 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -10,10 +10,13 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
#include <uapi/drm/xe_drm.h>
+#include "xe_dep_scheduler.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_hw_engine_group.h"
#include "xe_hw_fence.h"
@@ -27,6 +30,29 @@
#include "xe_vm.h"
#include "xe_pxp.h"
+/**
+ * DOC: Execution Queue
+ *
+ * An Execution queue is an interface for the HW context of execution.
+ * The user creates an execution queue, submits the GPU jobs through those
+ * queues and in the end destroys them.
+ *
+ * Execution queues can also be created by XeKMD itself for driver internal
+ * operations like object migration etc.
+ *
+ * An execution queue is associated with a specified HW engine or a group of
+ * engines (belonging to the same tile and engine class) and any GPU job
+ * submitted on the queue will be run on one of these engines.
+ *
+ * An execution queue is tied to an address space (VM). It holds a reference
+ * of the associated VM and the underlying Logical Ring Context/s (LRC/s)
+ * until the queue is destroyed.
+ *
+ * The execution queue sits on top of the submission backend. It opaquely
+ * handles the GuC and Execlist backends whichever the platform uses, and
+ * the ring operations the different engine classes support.
+ */
+
enum xe_exec_queue_sched_prop {
XE_EXEC_QUEUE_JOB_TIMEOUT = 0,
XE_EXEC_QUEUE_TIMESLICE = 1,
@@ -39,6 +65,12 @@ static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue
static void __xe_exec_queue_free(struct xe_exec_queue *q)
{
+ int i;
+
+ for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i)
+ if (q->tlb_inval[i].dep_scheduler)
+ xe_dep_scheduler_fini(q->tlb_inval[i].dep_scheduler);
+
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
if (q->vm)
@@ -50,6 +82,39 @@ static void __xe_exec_queue_free(struct xe_exec_queue *q)
kfree(q);
}
+static int alloc_dep_schedulers(struct xe_device *xe, struct xe_exec_queue *q)
+{
+ struct xe_tile *tile = gt_to_tile(q->gt);
+ int i;
+
+ for (i = 0; i < XE_EXEC_QUEUE_TLB_INVAL_COUNT; ++i) {
+ struct xe_dep_scheduler *dep_scheduler;
+ struct xe_gt *gt;
+ struct workqueue_struct *wq;
+
+ if (i == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT)
+ gt = tile->primary_gt;
+ else
+ gt = tile->media_gt;
+
+ if (!gt)
+ continue;
+
+ wq = gt->tlb_inval.job_wq;
+
+#define MAX_TLB_INVAL_JOBS 16 /* Picking a reasonable value */
+ dep_scheduler = xe_dep_scheduler_create(xe, wq, q->name,
+ MAX_TLB_INVAL_JOBS);
+ if (IS_ERR(dep_scheduler))
+ return PTR_ERR(dep_scheduler);
+
+ q->tlb_inval[i].dep_scheduler = dep_scheduler;
+ }
+#undef MAX_TLB_INVAL_JOBS
+
+ return 0;
+}
+
static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
struct xe_vm *vm,
u32 logical_mask,
@@ -94,6 +159,14 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
else
q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL;
+ if (q->flags & (EXEC_QUEUE_FLAG_MIGRATE | EXEC_QUEUE_FLAG_VM)) {
+ err = alloc_dep_schedulers(xe, q);
+ if (err) {
+ __xe_exec_queue_free(q);
+ return ERR_PTR(err);
+ }
+ }
+
if (vm)
q->vm = xe_vm_get(vm);
@@ -112,7 +185,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
return q;
}
-static int __xe_exec_queue_init(struct xe_exec_queue *q)
+static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
{
int i, err;
u32 flags = 0;
@@ -131,17 +204,37 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q)
flags |= XE_LRC_CREATE_RUNALONE;
}
+ if (!(exec_queue_flags & EXEC_QUEUE_FLAG_KERNEL))
+ flags |= XE_LRC_CREATE_USER_CTX;
+
+ err = q->ops->init(q);
+ if (err)
+ return err;
+
+ /*
+ * This must occur after q->ops->init to avoid race conditions during VF
+ * post-migration recovery, as the fixups for the LRC GGTT addresses
+ * depend on the queue being present in the backend tracking structure.
+ *
+ * In addition to above, we must wait on inflight GGTT changes to avoid
+ * writing out stale values here. Such wait provides a solid solution
+ * (without a race) only if the function can detect migration instantly
+ * from the moment vCPU resumes execution.
+ */
for (i = 0; i < q->width; ++i) {
- q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K, q->msix_vec, flags);
- if (IS_ERR(q->lrc[i])) {
- err = PTR_ERR(q->lrc[i]);
+ struct xe_lrc *lrc;
+
+ xe_gt_sriov_vf_wait_valid_ggtt(q->gt);
+ lrc = xe_lrc_create(q->hwe, q->vm, xe_lrc_ring_size(),
+ q->msix_vec, flags);
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
goto err_lrc;
}
- }
- err = q->ops->init(q);
- if (err)
- goto err_lrc;
+ /* Pairs with READ_ONCE to xe_exec_queue_contexts_hwsp_rebase */
+ WRITE_ONCE(q->lrc[i], lrc);
+ }
return 0;
@@ -151,6 +244,16 @@ err_lrc:
return err;
}
+static void __xe_exec_queue_fini(struct xe_exec_queue *q)
+{
+ int i;
+
+ q->ops->fini(q);
+
+ for (i = 0; i < q->width; ++i)
+ xe_lrc_put(q->lrc[i]);
+}
+
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hwe, u32 flags,
@@ -167,7 +270,7 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (IS_ERR(q))
return q;
- err = __xe_exec_queue_init(q);
+ err = __xe_exec_queue_init(q, flags);
if (err)
goto err_post_alloc;
@@ -181,11 +284,13 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
if (xe_exec_queue_uses_pxp(q)) {
err = xe_pxp_exec_queue_add(xe->pxp, q);
if (err)
- goto err_post_alloc;
+ goto err_post_init;
}
return q;
+err_post_init:
+ __xe_exec_queue_fini(q);
err_post_alloc:
__xe_exec_queue_free(q);
return ERR_PTR(err);
@@ -264,6 +369,16 @@ struct xe_exec_queue *xe_exec_queue_create_bind(struct xe_device *xe,
}
xe_vm_put(migrate_vm);
+ if (!IS_ERR(q)) {
+ int err = drm_syncobj_create(&q->ufence_syncobj,
+ DRM_SYNCOBJ_CREATE_SIGNALED,
+ NULL);
+ if (err) {
+ xe_exec_queue_put(q);
+ return ERR_PTR(err);
+ }
+ }
+
return q;
}
ALLOW_ERROR_INJECTION(xe_exec_queue_create_bind, ERRNO);
@@ -272,24 +387,31 @@ void xe_exec_queue_destroy(struct kref *ref)
{
struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount);
struct xe_exec_queue *eq, *next;
+ int i;
+
+ xe_assert(gt_to_xe(q->gt), atomic_read(&q->job_cnt) == 0);
+
+ if (q->ufence_syncobj)
+ drm_syncobj_put(q->ufence_syncobj);
if (xe_exec_queue_uses_pxp(q))
xe_pxp_exec_queue_remove(gt_to_xe(q->gt)->pxp, q);
xe_exec_queue_last_fence_put_unlocked(q);
+ for_each_tlb_inval(i)
+ xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, i);
+
if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) {
list_for_each_entry_safe(eq, next, &q->multi_gt_list,
multi_gt_link)
xe_exec_queue_put(eq);
}
- q->ops->fini(q);
+ q->ops->destroy(q);
}
void xe_exec_queue_fini(struct xe_exec_queue *q)
{
- int i;
-
/*
* Before releasing our ref to lrc and xef, accumulate our run ticks
* and wakeup any waiters.
@@ -298,9 +420,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
if (q->xef && atomic_dec_and_test(&q->xef->exec_queue.pending_removal))
wake_up_var(&q->xef->exec_queue.pending_removal);
- for (i = 0; i < q->width; ++i)
- xe_lrc_put(q->lrc[i]);
-
+ __xe_exec_queue_fini(q);
__xe_exec_queue_free(q);
}
@@ -742,34 +862,30 @@ int xe_exec_queue_get_property_ioctl(struct drm_device *dev, void *data,
}
/**
- * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
- * @q: The exec_queue
+ * xe_exec_queue_lrc() - Get the LRC from exec queue.
+ * @q: The exec_queue.
*
- * Return: True if the exec_queue is long-running, false otherwise.
+ * Retrieves the primary LRC for the exec queue. Note that this function
+ * returns only the first LRC instance, even when multiple parallel LRCs
+ * are configured.
+ *
+ * Return: Pointer to LRC on success, error on failure
*/
-bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
+struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q)
{
- return q->vm && xe_vm_in_lr_mode(q->vm) &&
- !(q->flags & EXEC_QUEUE_FLAG_VM);
-}
-
-static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q)
-{
- return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1;
+ return q->lrc[0];
}
/**
- * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
+ * xe_exec_queue_is_lr() - Whether an exec_queue is long-running
* @q: The exec_queue
*
- * Return: True if the exec_queue's ring is full, false otherwise.
+ * Return: True if the exec_queue is long-running, false otherwise.
*/
-bool xe_exec_queue_ring_full(struct xe_exec_queue *q)
+bool xe_exec_queue_is_lr(struct xe_exec_queue *q)
{
- struct xe_lrc *lrc = q->lrc[0];
- s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
-
- return xe_exec_queue_num_job_inflight(q) >= max_job;
+ return q->vm && xe_vm_in_lr_mode(q->vm) &&
+ !(q->flags & EXEC_QUEUE_FLAG_VM);
}
/**
@@ -902,7 +1018,9 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q,
struct xe_vm *vm)
{
- if (q->flags & EXEC_QUEUE_FLAG_VM) {
+ if (q->flags & EXEC_QUEUE_FLAG_MIGRATE) {
+ xe_migrate_job_lock_assert(q);
+ } else if (q->flags & EXEC_QUEUE_FLAG_VM) {
lockdep_assert_held(&vm->lock);
} else {
xe_vm_assert_held(vm);
@@ -1001,29 +1119,132 @@ void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm,
struct dma_fence *fence)
{
xe_exec_queue_last_fence_lockdep_assert(q, vm);
+ xe_assert(vm->xe, !dma_fence_is_container(fence));
xe_exec_queue_last_fence_put(q, vm);
q->last_fence = dma_fence_get(fence);
}
/**
- * xe_exec_queue_last_fence_test_dep - Test last fence dependency of queue
+ * xe_exec_queue_tlb_inval_last_fence_put() - Drop ref to last TLB invalidation fence
* @q: The exec queue
- * @vm: The VM the engine does a bind or exec for
+ * @vm: The VM the engine does a bind for
+ * @type: Either primary or media GT
+ */
+void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
+ struct xe_vm *vm,
+ unsigned int type)
+{
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+ xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+ type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+
+ xe_exec_queue_tlb_inval_last_fence_put_unlocked(q, type);
+}
+
+/**
+ * xe_exec_queue_tlb_inval_last_fence_put_unlocked() - Drop ref to last TLB
+ * invalidation fence unlocked
+ * @q: The exec queue
+ * @type: Either primary or media GT
+ *
+ * Only safe to be called from xe_exec_queue_destroy().
+ */
+void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
+ unsigned int type)
+{
+ xe_assert(q->vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+ type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+
+ dma_fence_put(q->tlb_inval[type].last_fence);
+ q->tlb_inval[type].last_fence = NULL;
+}
+
+/**
+ * xe_exec_queue_tlb_inval_last_fence_get() - Get last fence for TLB invalidation
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind for
+ * @type: Either primary or media GT
*
- * Returns:
- * -ETIME if there exists an unsignalled last fence dependency, zero otherwise.
+ * Get last fence, takes a ref
+ *
+ * Returns: last fence if not signaled, dma fence stub if signaled
*/
-int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm)
+struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
+ struct xe_vm *vm,
+ unsigned int type)
{
struct dma_fence *fence;
+
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+ xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+ type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+ xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
+ EXEC_QUEUE_FLAG_MIGRATE));
+
+ if (q->tlb_inval[type].last_fence &&
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &q->tlb_inval[type].last_fence->flags))
+ xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
+
+ fence = q->tlb_inval[type].last_fence ?: dma_fence_get_stub();
+ dma_fence_get(fence);
+ return fence;
+}
+
+/**
+ * xe_exec_queue_tlb_inval_last_fence_set() - Set last fence for TLB invalidation
+ * @q: The exec queue
+ * @vm: The VM the engine does a bind for
+ * @fence: The fence
+ * @type: Either primary or media GT
+ *
+ * Set the last fence for the tlb invalidation type on the queue. Increases
+ * reference count for fence, when closing queue
+ * xe_exec_queue_tlb_inval_last_fence_put should be called.
+ */
+void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
+ struct xe_vm *vm,
+ struct dma_fence *fence,
+ unsigned int type)
+{
+ xe_exec_queue_last_fence_lockdep_assert(q, vm);
+ xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+ type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+ xe_assert(vm->xe, q->flags & (EXEC_QUEUE_FLAG_VM |
+ EXEC_QUEUE_FLAG_MIGRATE));
+ xe_assert(vm->xe, !dma_fence_is_container(fence));
+
+ xe_exec_queue_tlb_inval_last_fence_put(q, vm, type);
+ q->tlb_inval[type].last_fence = dma_fence_get(fence);
+}
+
+/**
+ * xe_exec_queue_contexts_hwsp_rebase - Re-compute GGTT references
+ * within all LRCs of a queue.
+ * @q: the &xe_exec_queue struct instance containing target LRCs
+ * @scratch: scratch buffer to be used as temporary storage
+ *
+ * Returns: zero on success, negative error code on failure
+ */
+int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch)
+{
+ int i;
int err = 0;
- fence = xe_exec_queue_last_fence_get(q, vm);
- if (fence) {
- err = test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ?
- 0 : -ETIME;
- dma_fence_put(fence);
+ for (i = 0; i < q->width; ++i) {
+ struct xe_lrc *lrc;
+
+ /* Pairs with WRITE_ONCE in __xe_exec_queue_init */
+ lrc = READ_ONCE(q->lrc[i]);
+ if (!lrc)
+ continue;
+
+ xe_lrc_update_memirq_regs_with_address(lrc, q->hwe, scratch);
+ xe_lrc_update_hwctx_regs_with_address(lrc);
+ err = xe_lrc_setup_wa_bb_with_scratch(lrc, q->hwe, scratch);
+ if (err)
+ break;
}
return err;
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.h b/drivers/gpu/drm/xe/xe_exec_queue.h
index 17bc50a7f05a..fda4d4f9bda8 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue.h
@@ -14,6 +14,10 @@ struct drm_file;
struct xe_device;
struct xe_file;
+#define for_each_tlb_inval(__i) \
+ for (__i = XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT; \
+ __i <= XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT; ++__i)
+
struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
u32 logical_mask, u16 width,
struct xe_hw_engine *hw_engine, u32 flags,
@@ -64,8 +68,6 @@ static inline bool xe_exec_queue_uses_pxp(struct xe_exec_queue *q)
bool xe_exec_queue_is_lr(struct xe_exec_queue *q);
-bool xe_exec_queue_ring_full(struct xe_exec_queue *q);
-
bool xe_exec_queue_is_idle(struct xe_exec_queue *q);
void xe_exec_queue_kill(struct xe_exec_queue *q);
@@ -86,8 +88,27 @@ struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *
struct xe_vm *vm);
void xe_exec_queue_last_fence_set(struct xe_exec_queue *e, struct xe_vm *vm,
struct dma_fence *fence);
-int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q,
- struct xe_vm *vm);
+
+void xe_exec_queue_tlb_inval_last_fence_put(struct xe_exec_queue *q,
+ struct xe_vm *vm,
+ unsigned int type);
+
+void xe_exec_queue_tlb_inval_last_fence_put_unlocked(struct xe_exec_queue *q,
+ unsigned int type);
+
+struct dma_fence *xe_exec_queue_tlb_inval_last_fence_get(struct xe_exec_queue *q,
+ struct xe_vm *vm,
+ unsigned int type);
+
+void xe_exec_queue_tlb_inval_last_fence_set(struct xe_exec_queue *q,
+ struct xe_vm *vm,
+ struct dma_fence *fence,
+ unsigned int type);
+
void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q);
+int xe_exec_queue_contexts_hwsp_rebase(struct xe_exec_queue *q, void *scratch);
+
+struct xe_lrc *xe_exec_queue_lrc(struct xe_exec_queue *q);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_exec_queue_types.h b/drivers/gpu/drm/xe/xe_exec_queue_types.h
index cc1cffb5c87f..771ffe35cd0c 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_exec_queue_types.h
@@ -15,6 +15,7 @@
#include "xe_hw_fence_types.h"
#include "xe_lrc_types.h"
+struct drm_syncobj;
struct xe_execlist_exec_queue;
struct xe_gt;
struct xe_guc_exec_queue;
@@ -87,6 +88,8 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_HIGH_PRIORITY BIT(4)
/* flag to indicate low latency hint to guc */
#define EXEC_QUEUE_FLAG_LOW_LATENCY BIT(5)
+/* for migration (kernel copy, clear, bind) jobs */
+#define EXEC_QUEUE_FLAG_MIGRATE BIT(6)
/**
* @flags: flags for this exec queue, should statically setup aside from ban
@@ -132,6 +135,24 @@ struct xe_exec_queue {
struct list_head link;
} lr;
+#define XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT 0
+#define XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT 1
+#define XE_EXEC_QUEUE_TLB_INVAL_COUNT (XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT + 1)
+
+ /** @tlb_inval: TLB invalidations exec queue state */
+ struct {
+ /**
+ * @tlb_inval.dep_scheduler: The TLB invalidation
+ * dependency scheduler
+ */
+ struct xe_dep_scheduler *dep_scheduler;
+ /**
+ * @last_fence: last fence for tlb invalidation, protected by
+ * vm->lock in write mode
+ */
+ struct dma_fence *last_fence;
+ } tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_COUNT];
+
/** @pxp: PXP info tracking */
struct {
/** @pxp.type: PXP session type used by this queue */
@@ -140,6 +161,12 @@ struct xe_exec_queue {
struct list_head link;
} pxp;
+ /** @ufence_syncobj: User fence syncobj */
+ struct drm_syncobj *ufence_syncobj;
+
+ /** @ufence_timeline_value: User fence timeline value */
+ u64 ufence_timeline_value;
+
/** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops;
@@ -147,6 +174,11 @@ struct xe_exec_queue {
const struct xe_ring_ops *ring_ops;
/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
struct drm_sched_entity *entity;
+
+#define XE_MAX_JOB_COUNT_PER_EXEC_QUEUE 1000
+ /** @job_cnt: number of drm jobs in this exec queue */
+ atomic_t job_cnt;
+
/**
* @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
* Protected by @vm's resv. Unused if @vm == NULL.
@@ -166,8 +198,14 @@ struct xe_exec_queue_ops {
int (*init)(struct xe_exec_queue *q);
/** @kill: Kill inflight submissions for backend */
void (*kill)(struct xe_exec_queue *q);
- /** @fini: Fini exec queue for submission backend */
+ /** @fini: Undoes the init() for submission backend */
void (*fini)(struct xe_exec_queue *q);
+ /**
+ * @destroy: Destroy exec queue for submission backend. The backend
+ * function must call xe_exec_queue_fini() (which will in turn call the
+ * fini() backend function) to ensure the queue is properly cleaned up.
+ */
+ void (*destroy)(struct xe_exec_queue *q);
/** @set_priority: Set priority for exec queue */
int (*set_priority)(struct xe_exec_queue *q,
enum xe_exec_queue_priority priority);
@@ -186,6 +224,9 @@ struct xe_exec_queue_ops {
* call after suspend. In dma-fencing path thus must return within a
* reasonable amount of time. -ETIME return shall indicate an error
* waiting for suspend resulting in associated VM getting killed.
+ * -EAGAIN return indicates the wait should be tried again, if the wait
+ * is within a work item, the work item should be requeued as deadlock
+ * avoidance mechanism.
*/
int (*suspend_wait)(struct xe_exec_queue *q);
/**
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 788f56b066b6..769d05517f93 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -339,7 +339,7 @@ static int execlist_exec_queue_init(struct xe_exec_queue *q)
const struct drm_sched_init_args args = {
.ops = &drm_sched_ops,
.num_rqs = 1,
- .credit_limit = q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES,
+ .credit_limit = xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES,
.hang_limit = XE_SCHED_HANG_LIMIT,
.timeout = XE_SCHED_JOB_TIMEOUT,
.name = q->hwe->name,
@@ -385,10 +385,20 @@ err_free:
return err;
}
-static void execlist_exec_queue_fini_async(struct work_struct *w)
+static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_execlist_exec_queue *exl = q->execlist;
+
+ drm_sched_entity_fini(&exl->entity);
+ drm_sched_fini(&exl->sched);
+
+ kfree(exl);
+}
+
+static void execlist_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_execlist_exec_queue *ee =
- container_of(w, struct xe_execlist_exec_queue, fini_async);
+ container_of(w, struct xe_execlist_exec_queue, destroy_async);
struct xe_exec_queue *q = ee->q;
struct xe_execlist_exec_queue *exl = q->execlist;
struct xe_device *xe = gt_to_xe(q->gt);
@@ -401,10 +411,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags);
- drm_sched_entity_fini(&exl->entity);
- drm_sched_fini(&exl->sched);
- kfree(exl);
-
xe_exec_queue_fini(q);
}
@@ -413,10 +419,10 @@ static void execlist_exec_queue_kill(struct xe_exec_queue *q)
/* NIY */
}
-static void execlist_exec_queue_fini(struct xe_exec_queue *q)
+static void execlist_exec_queue_destroy(struct xe_exec_queue *q)
{
- INIT_WORK(&q->execlist->fini_async, execlist_exec_queue_fini_async);
- queue_work(system_unbound_wq, &q->execlist->fini_async);
+ INIT_WORK(&q->execlist->destroy_async, execlist_exec_queue_destroy_async);
+ queue_work(system_unbound_wq, &q->execlist->destroy_async);
}
static int execlist_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -467,6 +473,7 @@ static const struct xe_exec_queue_ops execlist_exec_queue_ops = {
.init = execlist_exec_queue_init,
.kill = execlist_exec_queue_kill,
.fini = execlist_exec_queue_fini,
+ .destroy = execlist_exec_queue_destroy,
.set_priority = execlist_exec_queue_set_priority,
.set_timeslice = execlist_exec_queue_set_timeslice,
.set_preempt_timeout = execlist_exec_queue_set_preempt_timeout,
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index 415140936f11..92c4ba52db0c 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -42,7 +42,7 @@ struct xe_execlist_exec_queue {
bool has_run;
- struct work_struct fini_async;
+ struct work_struct destroy_async;
enum xe_exec_queue_priority active_priority;
struct list_head active_link;
diff --git a/drivers/gpu/drm/xe/xe_force_wake_types.h b/drivers/gpu/drm/xe/xe_force_wake_types.h
index 899fbbcb3ea9..14b7b86e801b 100644
--- a/drivers/gpu/drm/xe/xe_force_wake_types.h
+++ b/drivers/gpu/drm/xe/xe_force_wake_types.h
@@ -52,7 +52,22 @@ enum xe_force_wake_domains {
};
/**
- * struct xe_force_wake_domain - XE force wake domains
+ * struct xe_force_wake_domain - Xe force wake power domain
+ *
+ * Represents an individual device-internal power domain. The driver must
+ * ensure the power domain is awake before accessing registers or other
+ * hardware functionality that is part of the power domain. Since different
+ * driver threads may access hardware units simultaneously, a reference count
+ * is used to ensure that the domain remains awake as long as any software
+ * is using the part of the hardware covered by the power domain.
+ *
+ * Hardware provides a register interface to allow the driver to request
+ * wake/sleep of power domains, although in most cases the actual action of
+ * powering the hardware up/down is handled by firmware (and may be subject to
+ * requirements and constraints outside of the driver's visibility) so the
+ * driver needs to wait for an acknowledgment that a wake request has been
+ * acted upon before accessing the parts of the hardware that reside within the
+ * power domain.
*/
struct xe_force_wake_domain {
/** @id: domain force wake id */
@@ -70,7 +85,14 @@ struct xe_force_wake_domain {
};
/**
- * struct xe_force_wake - XE force wake
+ * struct xe_force_wake - Xe force wake collection
+ *
+ * Represents a collection of related power domains (struct
+ * xe_force_wake_domain) associated with a subunit of the device.
+ *
+ * Currently only used for GT power domains (where the term "forcewake" is used
+ * in the hardware documentation), although the interface could be extended to
+ * power wells in other parts of the hardware in the future.
*/
struct xe_force_wake {
/** @gt: back pointers to GT */
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 29d4d3f51da1..ef481b334af4 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -23,13 +23,14 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
#include "xe_res_cursor.h"
#include "xe_sriov.h"
+#include "xe_tile_printk.h"
#include "xe_tile_sriov_vf.h"
+#include "xe_tlb_inval.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
@@ -106,10 +107,23 @@ static unsigned int probe_gsm_size(struct pci_dev *pdev)
static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
{
struct xe_tile *tile = ggtt->tile;
- struct xe_gt *affected_gt = XE_WA(tile->primary_gt, 22019338487) ?
- tile->primary_gt : tile->media_gt;
- struct xe_mmio *mmio = &affected_gt->mmio;
- u32 max_gtt_writes = XE_WA(ggtt->tile->primary_gt, 22019338487) ? 1100 : 63;
+ struct xe_gt *affected_gt;
+ u32 max_gtt_writes;
+
+ if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 22019338487)) {
+ affected_gt = tile->primary_gt;
+ max_gtt_writes = 1100;
+
+ /* Only expected to apply to primary GT on dgpu platforms */
+ xe_tile_assert(tile, IS_DGFX(tile_to_xe(tile)));
+ } else {
+ affected_gt = tile->media_gt;
+ max_gtt_writes = 63;
+
+ /* Only expected to apply to media GT on igpu platforms */
+ xe_tile_assert(tile, !IS_DGFX(tile_to_xe(tile)));
+ }
+
/*
* Wa_22019338487: GMD_ID is a RO register, a dummy write forces gunit
* to wait for completion of prior GTT writes before letting this through.
@@ -118,7 +132,7 @@ static void ggtt_update_access_counter(struct xe_ggtt *ggtt)
lockdep_assert_held(&ggtt->lock);
if ((++ggtt->access_count % max_gtt_writes) == 0) {
- xe_mmio_write32(mmio, GMD_ID, 0x0);
+ xe_mmio_write32(&affected_gt->mmio, GMD_ID, 0x0);
ggtt->access_count = 0;
}
}
@@ -137,6 +151,14 @@ static void xe_ggtt_set_pte_and_flush(struct xe_ggtt *ggtt, u64 addr, u64 pte)
ggtt_update_access_counter(ggtt);
}
+static u64 xe_ggtt_get_pte(struct xe_ggtt *ggtt, u64 addr)
+{
+ xe_tile_assert(ggtt->tile, !(addr & XE_PTE_MASK));
+ xe_tile_assert(ggtt->tile, addr < ggtt->size);
+
+ return readq(&ggtt->gsm[addr >> XE_PTE_SHIFT]);
+}
+
static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
{
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
@@ -158,6 +180,16 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
}
}
+static void primelockdep(struct xe_ggtt *ggtt)
+{
+ if (!IS_ENABLED(CONFIG_LOCKDEP))
+ return;
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ might_lock(&ggtt->lock);
+ fs_reclaim_release(GFP_KERNEL);
+}
+
/**
* xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
* @tile: &xe_tile
@@ -168,9 +200,19 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
*/
struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
{
- struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
- if (ggtt)
- ggtt->tile = tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_ggtt *ggtt;
+
+ ggtt = drmm_kzalloc(&xe->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt)
+ return NULL;
+
+ if (drmm_mutex_init(&xe->drm, &ggtt->lock))
+ return NULL;
+
+ primelockdep(ggtt);
+ ggtt->tile = tile;
+
return ggtt;
}
@@ -179,7 +221,6 @@ static void ggtt_fini_early(struct drm_device *drm, void *arg)
struct xe_ggtt *ggtt = arg;
destroy_workqueue(ggtt->wq);
- mutex_destroy(&ggtt->lock);
drm_mm_takedown(&ggtt->mm);
}
@@ -197,37 +238,28 @@ void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
}
#endif
-static void primelockdep(struct xe_ggtt *ggtt)
-{
- if (!IS_ENABLED(CONFIG_LOCKDEP))
- return;
-
- fs_reclaim_acquire(GFP_KERNEL);
- might_lock(&ggtt->lock);
- fs_reclaim_release(GFP_KERNEL);
-}
-
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
.pte_encode_flags = xelp_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
+ .ggtt_get_pte = xe_ggtt_get_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
.pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
+ .ggtt_get_pte = xe_ggtt_get_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
.pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
+ .ggtt_get_pte = xe_ggtt_get_pte,
};
static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
{
drm_mm_init(&ggtt->mm, reserved,
ggtt->size - reserved);
- mutex_init(&ggtt->lock);
- primelockdep(ggtt);
}
int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
@@ -269,7 +301,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
gsm_size = probe_gsm_size(pdev);
if (gsm_size == 0) {
- drm_err(&xe->drm, "Hardware reported no preallocated GSM\n");
+ xe_tile_err(ggtt->tile, "Hardware reported no preallocated GSM\n");
return -ENOMEM;
}
@@ -283,14 +315,17 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->size = GUC_GGTT_TOP;
if (GRAPHICS_VERx100(xe) >= 1270)
- ggtt->pt_ops = (ggtt->tile->media_gt &&
- XE_WA(ggtt->tile->media_gt, 22019338487)) ||
- XE_WA(ggtt->tile->primary_gt, 22019338487) ?
- &xelpg_pt_wa_ops : &xelpg_pt_ops;
+ ggtt->pt_ops =
+ (ggtt->tile->media_gt && XE_GT_WA(ggtt->tile->media_gt, 22019338487)) ||
+ (ggtt->tile->primary_gt && XE_GT_WA(ggtt->tile->primary_gt, 22019338487)) ?
+ &xelpg_pt_wa_ops : &xelpg_pt_ops;
else
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
+ if (!ggtt->wq)
+ return -ENOMEM;
+
__xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
@@ -438,9 +473,8 @@ static void ggtt_invalidate_gt_tlb(struct xe_gt *gt)
if (!gt)
return;
- err = xe_gt_tlb_invalidation_ggtt(gt);
- if (err)
- drm_warn(&gt_to_xe(gt)->drm, "xe_gt_tlb_invalidation_ggtt error=%d", err);
+ err = xe_tlb_inval_ggtt(&gt->tlb_inval);
+ xe_gt_WARN(gt, err, "Failed to invalidate GGTT (%pe)", ERR_PTR(err));
}
static void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
@@ -467,8 +501,8 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
string_get_size(node->size, 1, STRING_UNITS_2, buf, sizeof(buf));
- xe_gt_dbg(ggtt->tile->primary_gt, "GGTT %#llx-%#llx (%s) %s\n",
- node->start, node->start + node->size, buf, description);
+ xe_tile_dbg(ggtt->tile, "GGTT %#llx-%#llx (%s) %s\n",
+ node->start, node->start + node->size, buf, description);
}
}
@@ -500,9 +534,8 @@ int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64
err = drm_mm_reserve_node(&ggtt->mm, &node->base);
- if (xe_gt_WARN(ggtt->tile->primary_gt, err,
- "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
- node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
+ if (xe_tile_WARN(ggtt->tile, err, "Failed to balloon GGTT %#llx-%#llx (%pe)\n",
+ node->base.start, node->base.start + node->base.size, ERR_PTR(err)))
return err;
xe_ggtt_dump_node(ggtt, &node->base, "balloon");
@@ -676,6 +709,20 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
}
/**
+ * xe_ggtt_node_pt_size() - Get the size of page table entries needed to map a GGTT node.
+ * @node: the &xe_ggtt_node
+ *
+ * Return: GGTT node page table entries size in bytes.
+ */
+size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node)
+{
+ if (!node)
+ return 0;
+
+ return node->base.size / XE_PAGE_SIZE * sizeof(u64);
+}
+
+/**
* xe_ggtt_map_bo - Map the BO into GGTT
* @ggtt: the &xe_ggtt where node will be mapped
* @node: the &xe_ggtt_node where this BO is mapped
@@ -732,7 +779,7 @@ void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
}
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end)
+ u64 start, u64 end, struct drm_exec *exec)
{
u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE;
u8 tile_id = ggtt->tile->id;
@@ -747,7 +794,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
return 0;
}
- err = xe_bo_validate(bo, NULL, false);
+ err = xe_bo_validate(bo, NULL, false, exec);
if (err)
return err;
@@ -789,25 +836,28 @@ out:
* @bo: the &xe_bo to be inserted
* @start: address where it will be inserted
* @end: end of the range where it will be inserted
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Return: 0 on success or a negative error code on failure.
*/
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end)
+ u64 start, u64 end, struct drm_exec *exec)
{
- return __xe_ggtt_insert_bo_at(ggtt, bo, start, end);
+ return __xe_ggtt_insert_bo_at(ggtt, bo, start, end, exec);
}
/**
* xe_ggtt_insert_bo - Insert BO into GGTT
* @ggtt: the &xe_ggtt where bo will be inserted
* @bo: the &xe_bo to be inserted
+ * @exec: The drm_exec transaction to use for exhaustive eviction.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo,
+ struct drm_exec *exec)
{
- return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX);
+ return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX, exec);
}
/**
@@ -905,6 +955,85 @@ void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid)
xe_ggtt_assign_locked(node->ggtt, &node->base, vfid);
mutex_unlock(&node->ggtt->lock);
}
+
+/**
+ * xe_ggtt_node_save() - Save a &xe_ggtt_node to a buffer.
+ * @node: the &xe_ggtt_node to be saved
+ * @dst: destination buffer
+ * @size: destination buffer size in bytes
+ * @vfid: VF identifier
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid)
+{
+ struct xe_ggtt *ggtt;
+ u64 start, end;
+ u64 *buf = dst;
+ u64 pte;
+
+ if (!node)
+ return -ENOENT;
+
+ guard(mutex)(&node->ggtt->lock);
+
+ if (xe_ggtt_node_pt_size(node) != size)
+ return -EINVAL;
+
+ ggtt = node->ggtt;
+ start = node->base.start;
+ end = start + node->base.size - 1;
+
+ while (start < end) {
+ pte = ggtt->pt_ops->ggtt_get_pte(ggtt, start);
+ if (vfid != u64_get_bits(pte, GGTT_PTE_VFID))
+ return -EPERM;
+
+ *buf++ = u64_replace_bits(pte, 0, GGTT_PTE_VFID);
+ start += XE_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/**
+ * xe_ggtt_node_load() - Load a &xe_ggtt_node from a buffer.
+ * @node: the &xe_ggtt_node to be loaded
+ * @src: source buffer
+ * @size: source buffer size in bytes
+ * @vfid: VF identifier
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid)
+{
+ u64 vfid_pte = xe_encode_vfid_pte(vfid);
+ const u64 *buf = src;
+ struct xe_ggtt *ggtt;
+ u64 start, end;
+
+ if (!node)
+ return -ENOENT;
+
+ guard(mutex)(&node->ggtt->lock);
+
+ if (xe_ggtt_node_pt_size(node) != size)
+ return -EINVAL;
+
+ ggtt = node->ggtt;
+ start = node->base.start;
+ end = start + node->base.size - 1;
+
+ while (start < end) {
+ vfid_pte = u64_replace_bits(*buf++, vfid, GGTT_PTE_VFID);
+ ggtt->pt_ops->ggtt_set_pte(ggtt, start, vfid_pte);
+ start += XE_PAGE_SIZE;
+ }
+ xe_ggtt_invalidate(ggtt);
+
+ return 0;
+}
+
#endif
/**
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index fbe1e397d05d..93fea4b6079c 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -10,6 +10,7 @@
struct drm_printer;
struct xe_tile;
+struct drm_exec;
struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
@@ -28,12 +29,13 @@ int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
u32 size, u32 align, u32 mm_flags);
void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
+size_t xe_ggtt_node_pt_size(const struct xe_ggtt_node *node);
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
struct xe_bo *bo, u16 pat_index);
void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo);
-int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo, struct drm_exec *exec);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
- u64 start, u64 end);
+ u64 start, u64 end, struct drm_exec *exec);
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
u64 xe_ggtt_largest_hole(struct xe_ggtt *ggtt, u64 alignment, u64 *spare);
@@ -42,6 +44,8 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
#ifdef CONFIG_PCI_IOV
void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
+int xe_ggtt_node_save(struct xe_ggtt_node *node, void *dst, size_t size, u16 vfid);
+int xe_ggtt_node_load(struct xe_ggtt_node *node, const void *src, size_t size, u16 vfid);
#endif
#ifndef CONFIG_LOCKDEP
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index c5e999d58ff2..dacd796f8184 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -78,6 +78,8 @@ struct xe_ggtt_pt_ops {
u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index);
/** @ggtt_set_pte: Directly write into GGTT's PTE */
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
+ /** @ggtt_get_pte: Directly read from GGTT's PTE */
+ u64 (*ggtt_get_pte)(struct xe_ggtt *ggtt, u64 addr);
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.c b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
index 869b43a4151d..f91e06d03511 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.c
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.c
@@ -122,3 +122,17 @@ void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
list_add_tail(&msg->link, &sched->msgs);
xe_sched_process_msg_queue(sched);
}
+
+/**
+ * xe_sched_add_msg_head() - Xe GPU scheduler add message to head of list
+ * @sched: Xe GPU scheduler
+ * @msg: Message to add
+ */
+void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg)
+{
+ lockdep_assert_held(&sched->base.job_list_lock);
+
+ list_add(&msg->link, &sched->msgs);
+ xe_sched_process_msg_queue(sched);
+}
diff --git a/drivers/gpu/drm/xe/xe_gpu_scheduler.h b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
index 308061f0cf37..c7a77a3a9681 100644
--- a/drivers/gpu/drm/xe/xe_gpu_scheduler.h
+++ b/drivers/gpu/drm/xe/xe_gpu_scheduler.h
@@ -7,7 +7,7 @@
#define _XE_GPU_SCHEDULER_H_
#include "xe_gpu_scheduler_types.h"
-#include "xe_sched_job_types.h"
+#include "xe_sched_job.h"
int xe_sched_init(struct xe_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
@@ -28,6 +28,8 @@ void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
struct xe_sched_msg *msg);
+void xe_sched_add_msg_head(struct xe_gpu_scheduler *sched,
+ struct xe_sched_msg *msg);
static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
{
@@ -52,12 +54,14 @@ static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
{
struct drm_sched_job *s_job;
+ bool restore_replay = false;
list_for_each_entry(s_job, &sched->base.pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *hw_fence = s_fence->parent;
- if (hw_fence && !dma_fence_is_signaled(hw_fence))
+ restore_replay |= to_xe_sched_job(s_job)->restore_replay;
+ if (restore_replay || (hw_fence && !dma_fence_is_signaled(hw_fence)))
sched->base.ops->run_job(s_job);
}
}
@@ -76,17 +80,30 @@ static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
spin_unlock(&sched->base.job_list_lock);
}
+/**
+ * xe_sched_first_pending_job() - Find first pending job which is unsignaled
+ * @sched: Xe GPU scheduler
+ *
+ * Return first unsignaled job in pending list or NULL
+ */
static inline
struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
{
- struct xe_sched_job *job;
+ struct xe_sched_job *job, *r_job = NULL;
spin_lock(&sched->base.job_list_lock);
- job = list_first_entry_or_null(&sched->base.pending_list,
- struct xe_sched_job, drm.list);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ struct drm_sched_fence *s_fence = job->drm.s_fence;
+ struct dma_fence *hw_fence = s_fence->parent;
+
+ if (hw_fence && !dma_fence_is_signaled(hw_fence)) {
+ r_job = job;
+ break;
+ }
+ }
spin_unlock(&sched->base.job_list_lock);
- return job;
+ return r_job;
}
static inline int
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 1d84bf2f2cef..dd69cb834f8e 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -136,10 +136,10 @@ static int query_compatibility_version(struct xe_gsc *gsc)
u64 ggtt_offset;
int err;
- bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_VER_PKT_SZ * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, tile, GSC_VER_PKT_SZ * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo)) {
xe_gt_err(gt, "failed to allocate bo for GSC version query\n");
return PTR_ERR(bo);
@@ -266,7 +266,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
unsigned int fw_ref;
int ret;
- if (XE_WA(tile->primary_gt, 14018094691)) {
+ if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 14018094691)) {
fw_ref = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
/*
@@ -281,7 +281,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
ret = gsc_upload(gsc);
- if (XE_WA(tile->primary_gt, 14018094691))
+ if (tile->primary_gt && XE_GT_WA(tile->primary_gt, 14018094691))
xe_force_wake_put(gt_to_fw(tile->primary_gt), fw_ref);
if (ret)
@@ -593,7 +593,7 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
u32 gs1_clr = prep ? 0 : HECI_H_GS1_ER_PREP;
/* WA only applies if the GSC is loaded */
- if (!XE_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
+ if (!XE_GT_WA(gt, 14015076503) || !gsc_fw_is_loaded(gt))
return;
xe_mmio_rmw32(&gt->mmio, HECI_H_GS1(MTL_GSC_HECI2_BASE), gs1_clr, gs1_set);
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index c8eda36546d3..dbb5e7a9bc6a 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -32,15 +32,14 @@
#include "xe_gt_freq.h"
#include "xe_gt_idle.h"
#include "xe_gt_mcr.h"
-#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_vf.h"
#include "xe_gt_sysfs.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_gt_topology.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_pc.h"
+#include "xe_guc_submit.h"
#include "xe_hw_fence.h"
#include "xe_hw_engine_class_sysfs.h"
#include "xe_irq.h"
@@ -49,6 +48,7 @@
#include "xe_map.h"
#include "xe_migrate.h"
#include "xe_mmio.h"
+#include "xe_pagefault.h"
#include "xe_pat.h"
#include "xe_pm.h"
#include "xe_mocs.h"
@@ -57,6 +57,7 @@
#include "xe_sa.h"
#include "xe_sched_job.h"
#include "xe_sriov.h"
+#include "xe_tlb_inval.h"
#include "xe_tuning.h"
#include "xe_uc.h"
#include "xe_uc_fw.h"
@@ -64,29 +65,29 @@
#include "xe_wa.h"
#include "xe_wopcm.h"
-static void gt_fini(struct drm_device *drm, void *arg)
-{
- struct xe_gt *gt = arg;
-
- destroy_workqueue(gt->ordered_wq);
-}
-
struct xe_gt *xe_gt_alloc(struct xe_tile *tile)
{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct drm_device *drm = &xe->drm;
+ bool shared_wq = xe->info.needs_shared_vf_gt_wq && tile->primary_gt &&
+ IS_SRIOV_VF(xe);
+ struct workqueue_struct *ordered_wq;
struct xe_gt *gt;
- int err;
- gt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*gt), GFP_KERNEL);
+ gt = drmm_kzalloc(drm, sizeof(*gt), GFP_KERNEL);
if (!gt)
return ERR_PTR(-ENOMEM);
gt->tile = tile;
- gt->ordered_wq = alloc_ordered_workqueue("gt-ordered-wq",
- WQ_MEM_RECLAIM);
+ if (shared_wq && tile->primary_gt->ordered_wq)
+ ordered_wq = tile->primary_gt->ordered_wq;
+ else
+ ordered_wq = drmm_alloc_ordered_workqueue(drm, "gt-ordered-wq",
+ WQ_MEM_RECLAIM);
+ if (IS_ERR(ordered_wq))
+ return ERR_CAST(ordered_wq);
- err = drmm_add_action_or_reset(&gt_to_xe(gt)->drm, gt_fini, gt);
- if (err)
- return ERR_PTR(err);
+ gt->ordered_wq = ordered_wq;
return gt;
}
@@ -97,7 +98,7 @@ void xe_gt_sanitize(struct xe_gt *gt)
* FIXME: if xe_uc_sanitize is called here, on TGL driver will not
* reload
*/
- gt->uc.guc.submission_state.enabled = false;
+ xe_guc_submit_disable(&gt->uc.guc);
}
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
@@ -105,7 +106,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
unsigned int fw_ref;
u32 reg;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
@@ -127,7 +128,7 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
unsigned int fw_ref;
u32 reg;
- if (!XE_WA(gt, 16023588340))
+ if (!XE_GT_WA(gt, 16023588340))
return;
if (xe_gt_is_media_type(gt))
@@ -397,9 +398,15 @@ int xe_gt_init_early(struct xe_gt *gt)
return err;
}
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ err = xe_gt_sriov_vf_init_early(gt);
+ if (err)
+ return err;
+ }
+
xe_reg_sr_init(&gt->reg_sr, "GT", gt_to_xe(gt));
- err = xe_wa_init(gt);
+ err = xe_wa_gt_init(gt);
if (err)
return err;
@@ -407,12 +414,12 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_wa_process_oob(gt);
+ xe_wa_process_gt_oob(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
- err = xe_gt_tlb_invalidation_init_early(gt);
+ err = xe_gt_tlb_inval_init_early(gt);
if (err)
return err;
@@ -564,11 +571,9 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
if (xe_gt_is_main_type(gt)) {
struct xe_tile *tile = gt_to_tile(gt);
- tile->migrate = xe_migrate_init(tile);
- if (IS_ERR(tile->migrate)) {
- err = PTR_ERR(tile->migrate);
+ err = xe_migrate_init(tile->migrate);
+ if (err)
goto err_force_wake;
- }
}
err = xe_uc_load_hw(&gt->uc);
@@ -584,10 +589,8 @@ static int gt_init_with_all_forcewake(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
- if (IS_SRIOV_PF(gt_to_xe(gt))) {
- xe_gt_sriov_pf_init(gt);
+ if (IS_SRIOV_PF(gt_to_xe(gt)))
xe_gt_sriov_pf_init_hw(gt);
- }
xe_force_wake_put(gt_to_fw(gt), fw_ref);
@@ -604,6 +607,13 @@ static void xe_gt_fini(void *arg)
struct xe_gt *gt = arg;
int i;
+ if (disable_work_sync(&gt->reset.worker))
+ /*
+ * If gt_reset_worker was halted from executing, take care of
+ * releasing the rpm reference here.
+ */
+ xe_pm_runtime_put(gt_to_xe(gt));
+
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
xe_hw_fence_irq_finish(&gt->fence_irq[i]);
@@ -634,10 +644,6 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- err = xe_gt_pagefault_init(gt);
- if (err)
- return err;
-
err = xe_gt_idle_init(&gt->gtidle);
if (err)
return err;
@@ -658,6 +664,12 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
+ if (IS_SRIOV_VF(gt_to_xe(gt))) {
+ err = xe_gt_sriov_vf_init(gt);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -804,22 +816,21 @@ static int do_gt_restart(struct xe_gt *gt)
return 0;
}
-static int gt_reset(struct xe_gt *gt)
+static void gt_reset_worker(struct work_struct *w)
{
+ struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
unsigned int fw_ref;
int err;
if (xe_device_wedged(gt_to_xe(gt)))
- return -ECANCELED;
+ goto err_pm_put;
/* We only support GT resets with GuC submission */
if (!xe_device_uc_enabled(gt_to_xe(gt)))
- return -ENODEV;
+ goto err_pm_put;
xe_gt_info(gt, "reset started\n");
- xe_pm_runtime_get(gt_to_xe(gt));
-
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
@@ -838,11 +849,11 @@ static int gt_reset(struct xe_gt *gt)
xe_uc_gucrc_disable(&gt->uc);
xe_uc_stop_prepare(&gt->uc);
- xe_gt_pagefault_reset(gt);
+ xe_pagefault_reset(gt_to_xe(gt), gt);
xe_uc_stop(&gt->uc);
- xe_gt_tlb_invalidation_reset(gt);
+ xe_tlb_inval_reset(&gt->tlb_inval);
err = do_gt_reset(gt);
if (err)
@@ -853,29 +864,23 @@ static int gt_reset(struct xe_gt *gt)
goto err_out;
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ /* Pair with get while enqueueing the work in xe_gt_reset_async() */
xe_pm_runtime_put(gt_to_xe(gt));
xe_gt_info(gt, "reset done\n");
- return 0;
+ return;
err_out:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
XE_WARN_ON(xe_uc_start(&gt->uc));
+
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
-
xe_device_declare_wedged(gt_to_xe(gt));
+err_pm_put:
xe_pm_runtime_put(gt_to_xe(gt));
-
- return err;
-}
-
-static void gt_reset_worker(struct work_struct *w)
-{
- struct xe_gt *gt = container_of(w, typeof(*gt), reset.worker);
-
- gt_reset(gt);
}
void xe_gt_reset_async(struct xe_gt *gt)
@@ -887,7 +892,11 @@ void xe_gt_reset_async(struct xe_gt *gt)
return;
xe_gt_info(gt, "reset queued\n");
- queue_work(gt->ordered_wq, &gt->reset.worker);
+
+ /* Pair with put in gt_reset_worker() if work is enqueued */
+ xe_pm_runtime_get_noresume(gt_to_xe(gt));
+ if (!queue_work(gt->ordered_wq, &gt->reset.worker))
+ xe_pm_runtime_put(gt_to_xe(gt));
}
void xe_gt_suspend_prepare(struct xe_gt *gt)
@@ -958,7 +967,7 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
- XE_WA(gt, 22019338487))
+ XE_GT_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
return ret;
@@ -1056,5 +1065,5 @@ void xe_gt_declare_wedged(struct xe_gt *gt)
xe_gt_assert(gt, gt_to_xe(gt)->wedged.mode);
xe_uc_declare_wedged(&gt->uc);
- xe_gt_tlb_invalidation_reset(gt);
+ xe_tlb_inval_reset(&gt->tlb_inval);
}
diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h
index 41880979f4de..9d710049da45 100644
--- a/drivers/gpu/drm/xe/xe_gt.h
+++ b/drivers/gpu/drm/xe/xe_gt.h
@@ -12,6 +12,7 @@
#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_hw_engine.h"
#define for_each_hw_engine(hwe__, gt__, id__) \
@@ -21,6 +22,12 @@
#define CCS_MASK(gt) (((gt)->info.engine_mask & XE_HW_ENGINE_CCS_MASK) >> XE_HW_ENGINE_CCS0)
+#define GT_VER(gt) ({ \
+ typeof(gt) gt_ = (gt); \
+ struct xe_device *xe = gt_to_xe(gt_); \
+ xe_gt_is_media_type(gt_) ? MEDIA_VER(xe) : GRAPHICS_VER(xe); \
+})
+
extern struct fault_attr gt_reset_failure;
static inline bool xe_fault_inject_gt_reset(void)
{
@@ -124,4 +131,16 @@ static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
hwe->instance == gt->usm.reserved_bcs_instance;
}
+/**
+ * xe_gt_recovery_pending() - GT recovery pending
+ * @gt: the &xe_gt
+ *
+ * Return: True if GT recovery in pending, False otherwise
+ */
+static inline bool xe_gt_recovery_pending(struct xe_gt *gt)
+{
+ return IS_SRIOV_VF(gt_to_xe(gt)) &&
+ xe_gt_sriov_vf_recovery_pending(gt);
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c
index 4f011d1573c6..bfc25c46f798 100644
--- a/drivers/gpu/drm/xe/xe_gt_clock.c
+++ b/drivers/gpu/drm/xe/xe_gt_clock.c
@@ -55,30 +55,11 @@ static void read_crystal_clock(struct xe_gt *gt, u32 rpm_config_reg, u32 *freq,
}
}
-static void check_ctc_mode(struct xe_gt *gt)
-{
- /*
- * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later
- * platforms. In theory it could be a valid setting for pre-Xe2
- * platforms, but there's no documentation on how to properly handle
- * this case. Reading TIMESTAMP_OVERRIDE, as the driver attempted in
- * the past has been confirmed as incorrect by the hardware architects.
- *
- * For now just warn if we ever encounter hardware in the wild that
- * has this setting and move on as if it hadn't been set.
- */
- if (xe_mmio_read32(&gt->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC)
- xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n");
-}
-
int xe_gt_clock_init(struct xe_gt *gt)
{
u32 freq;
u32 c0;
- if (!IS_SRIOV_VF(gt_to_xe(gt)))
- check_ctc_mode(gt);
-
c0 = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
read_crystal_clock(gt, c0, &freq, &gt->info.timestamp_base);
@@ -93,11 +74,6 @@ int xe_gt_clock_init(struct xe_gt *gt)
return 0;
}
-static u64 div_u64_roundup(u64 n, u32 d)
-{
- return div_u64(n + d - 1, d);
-}
-
/**
* xe_gt_clock_interval_to_ms - Convert sampled GT clock ticks to msec
*
@@ -108,5 +84,5 @@ static u64 div_u64_roundup(u64 n, u32 d)
*/
u64 xe_gt_clock_interval_to_ms(struct xe_gt *gt, u64 count)
{
- return div_u64_roundup(count * MSEC_PER_SEC, gt->info.reference_clock);
+ return mul_u64_u32_div(count, MSEC_PER_SEC, gt->info.reference_clock);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 848618acdca8..e4fd632f43cf 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -12,7 +12,6 @@
#include "xe_device.h"
#include "xe_force_wake.h"
-#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_gt_mcr.h"
#include "xe_gt_idle.h"
@@ -29,11 +28,18 @@
#include "xe_pm.h"
#include "xe_reg_sr.h"
#include "xe_reg_whitelist.h"
+#include "xe_sa.h"
#include "xe_sriov.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_tuning.h"
#include "xe_uc_debugfs.h"
#include "xe_wa.h"
+static struct xe_gt *node_to_gt(struct drm_info_node *node)
+{
+ return node->dent->d_parent->d_inode->i_private;
+}
+
/**
* xe_gt_debugfs_simple_show - A show callback for struct drm_info_list
* @m: the &seq_file
@@ -76,8 +82,7 @@ int xe_gt_debugfs_simple_show(struct seq_file *m, void *data)
{
struct drm_printer p = drm_seq_file_printer(m);
struct drm_info_node *node = m->private;
- struct dentry *parent = node->dent->d_parent;
- struct xe_gt *gt = parent->d_inode->i_private;
+ struct xe_gt *gt = node_to_gt(node);
int (*print)(struct xe_gt *, struct drm_printer *) = node->info_ent->data;
if (WARN_ON(!print))
@@ -86,15 +91,36 @@ int xe_gt_debugfs_simple_show(struct seq_file *m, void *data)
return print(gt, &p);
}
-static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_gt_debugfs_show_with_rpm - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * Similar to xe_gt_debugfs_simple_show() but implicitly takes a RPM ref.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_debugfs_show_with_rpm(struct seq_file *m, void *data)
{
+ struct drm_info_node *node = m->private;
+ struct xe_gt *gt = node_to_gt(node);
struct xe_device *xe = gt_to_xe(gt);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_gt_debugfs_simple_show(m, data);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
+{
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
unsigned int fw_ref;
int ret = 0;
- xe_pm_runtime_get(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
ret = -ETIMEDOUT;
@@ -106,70 +132,21 @@ static int hw_engines(struct xe_gt *gt, struct drm_printer *p)
fw_put:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
- xe_pm_runtime_put(xe);
return ret;
}
-static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
-{
- int ret;
-
- xe_pm_runtime_get(gt_to_xe(gt));
- ret = xe_gt_idle_pg_print(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return ret;
-}
-
-static int sa_info(struct xe_gt *gt, struct drm_printer *p)
-{
- struct xe_tile *tile = gt_to_tile(gt);
-
- xe_pm_runtime_get(gt_to_xe(gt));
- drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
- tile->mem.kernel_bb_pool->gpu_addr);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int topology(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_topology_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
static int steering(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_mcr_steering_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
-static int ggtt(struct xe_gt *gt, struct drm_printer *p)
-{
- int ret;
-
- xe_pm_runtime_get(gt_to_xe(gt));
- ret = xe_ggtt_dump(gt_to_tile(gt)->mem.ggtt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return ret;
-}
-
static int register_save_restore(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_hw_engine *hwe;
enum xe_hw_engine_id id;
- xe_pm_runtime_get(gt_to_xe(gt));
-
xe_reg_sr_dump(&gt->reg_sr, p);
drm_printf(p, "\n");
@@ -187,98 +164,42 @@ static int register_save_restore(struct xe_gt *gt, struct drm_printer *p)
for_each_hw_engine(hwe, gt, id)
xe_reg_whitelist_dump(&hwe->reg_whitelist, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int workarounds(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_wa_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int tunings(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_tuning_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int pat(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_pat_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int mocs(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_mocs_dump(gt, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int rcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_RENDER);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int ccs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COMPUTE);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int bcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_COPY);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int vcs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_DECODE);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int vecs_default_lrc(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_lrc_dump_default(p, gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
{
- xe_pm_runtime_get(gt_to_xe(gt));
xe_guc_hwconfig_dump(&gt->uc.guc, p);
- xe_pm_runtime_put(gt_to_xe(gt));
-
return 0;
}
@@ -288,28 +209,26 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
* - without access to the PF specific data
*/
static const struct drm_info_list vf_safe_debugfs_list[] = {
- {"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
- {"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
- {"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
- {"register-save-restore", .show = xe_gt_debugfs_simple_show, .data = register_save_restore},
- {"workarounds", .show = xe_gt_debugfs_simple_show, .data = workarounds},
- {"tunings", .show = xe_gt_debugfs_simple_show, .data = tunings},
- {"default_lrc_rcs", .show = xe_gt_debugfs_simple_show, .data = rcs_default_lrc},
- {"default_lrc_ccs", .show = xe_gt_debugfs_simple_show, .data = ccs_default_lrc},
- {"default_lrc_bcs", .show = xe_gt_debugfs_simple_show, .data = bcs_default_lrc},
- {"default_lrc_vcs", .show = xe_gt_debugfs_simple_show, .data = vcs_default_lrc},
- {"default_lrc_vecs", .show = xe_gt_debugfs_simple_show, .data = vecs_default_lrc},
- {"stats", .show = xe_gt_debugfs_simple_show, .data = xe_gt_stats_print_info},
- {"hwconfig", .show = xe_gt_debugfs_simple_show, .data = hwconfig},
+ { "topology", .show = xe_gt_debugfs_show_with_rpm, .data = xe_gt_topology_dump },
+ { "register-save-restore",
+ .show = xe_gt_debugfs_show_with_rpm, .data = register_save_restore },
+ { "workarounds", .show = xe_gt_debugfs_show_with_rpm, .data = xe_wa_gt_dump },
+ { "tunings", .show = xe_gt_debugfs_show_with_rpm, .data = xe_tuning_dump },
+ { "default_lrc_rcs", .show = xe_gt_debugfs_show_with_rpm, .data = rcs_default_lrc },
+ { "default_lrc_ccs", .show = xe_gt_debugfs_show_with_rpm, .data = ccs_default_lrc },
+ { "default_lrc_bcs", .show = xe_gt_debugfs_show_with_rpm, .data = bcs_default_lrc },
+ { "default_lrc_vcs", .show = xe_gt_debugfs_show_with_rpm, .data = vcs_default_lrc },
+ { "default_lrc_vecs", .show = xe_gt_debugfs_show_with_rpm, .data = vecs_default_lrc },
+ { "hwconfig", .show = xe_gt_debugfs_show_with_rpm, .data = hwconfig },
};
/* everything else should be added here */
static const struct drm_info_list pf_only_debugfs_list[] = {
- {"hw_engines", .show = xe_gt_debugfs_simple_show, .data = hw_engines},
- {"mocs", .show = xe_gt_debugfs_simple_show, .data = mocs},
- {"pat", .show = xe_gt_debugfs_simple_show, .data = pat},
- {"powergate_info", .show = xe_gt_debugfs_simple_show, .data = powergate_info},
- {"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
+ { "hw_engines", .show = xe_gt_debugfs_show_with_rpm, .data = hw_engines },
+ { "mocs", .show = xe_gt_debugfs_show_with_rpm, .data = xe_mocs_dump },
+ { "pat", .show = xe_gt_debugfs_show_with_rpm, .data = xe_pat_dump },
+ { "powergate_info", .show = xe_gt_debugfs_show_with_rpm, .data = xe_gt_idle_pg_print },
+ { "steering", .show = xe_gt_debugfs_show_with_rpm, .data = steering },
};
static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t *ppos,
@@ -328,6 +247,24 @@ static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t
return count;
}
+static ssize_t stats_write(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, xe_gt_stats_clear, gt);
+}
+
+static int stats_show(struct seq_file *s, void *unused)
+{
+ struct drm_printer p = drm_seq_file_printer(s);
+ struct xe_gt *gt = s->private;
+
+ return xe_gt_stats_print_info(gt, &p);
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(stats);
+
static void force_reset(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -388,13 +325,18 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
struct drm_minor *minor = gt_to_xe(gt)->drm.primary;
+ struct dentry *parent = gt->tile->debugfs;
struct dentry *root;
+ char symlink[16];
char name[8];
xe_gt_assert(gt, minor->debugfs_root);
+ if (IS_ERR(parent))
+ return;
+
snprintf(name, sizeof(name), "gt%d", gt->info.id);
- root = debugfs_create_dir(name, minor->debugfs_root);
+ root = debugfs_create_dir(name, parent);
if (IS_ERR(root)) {
drm_warn(&xe->drm, "Create GT directory failed");
return;
@@ -408,6 +350,7 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
root->d_inode->i_private = gt;
/* VF safe */
+ debugfs_create_file("stats", 0600, root, gt, &stats_fops);
debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops);
debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops);
@@ -426,4 +369,11 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
xe_gt_sriov_pf_debugfs_register(gt, root);
else if (IS_SRIOV_VF(xe))
xe_gt_sriov_vf_debugfs_register(gt, root);
+
+ /*
+ * Backwards compatibility only: create a link for the legacy clients
+ * who may expect gt/ directory at the root level, not the tile level.
+ */
+ snprintf(symlink, sizeof(symlink), "tile%u/%s", gt->tile->id, name);
+ debugfs_create_symlink(name, minor->debugfs_root, symlink);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.h b/drivers/gpu/drm/xe/xe_gt_debugfs.h
index 05a6cc93c78c..32ee3264051b 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.h
@@ -11,5 +11,6 @@ struct xe_gt;
void xe_gt_debugfs_register(struct xe_gt *gt);
int xe_gt_debugfs_simple_show(struct seq_file *m, void *data);
+int xe_gt_debugfs_show_with_rpm(struct seq_file *m, void *data);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_freq.c b/drivers/gpu/drm/xe/xe_gt_freq.c
index 60d9354e7dbf..849ea6c86e8e 100644
--- a/drivers/gpu/drm/xe/xe_gt_freq.c
+++ b/drivers/gpu/drm/xe/xe_gt_freq.c
@@ -29,24 +29,26 @@
* PCODE is the ultimate decision maker of the actual running frequency, based
* on thermal and other running conditions.
*
- * Xe's Freq provides a sysfs API for frequency management:
+ * Xe's Freq provides a sysfs API for frequency management under
+ * ``<device>/tile#/gt#/freq0/`` directory.
*
- * device/tile#/gt#/freq0/<item>_freq *read-only* files:
+ * **Read-only** attributes:
*
- * - act_freq: The actual resolved frequency decided by PCODE.
- * - cur_freq: The current one requested by GuC PC to the PCODE.
- * - rpn_freq: The Render Performance (RP) N level, which is the minimal one.
- * - rpa_freq: The Render Performance (RP) A level, which is the achiveable one.
- * Calculated by PCODE at runtime based on multiple running conditions
- * - rpe_freq: The Render Performance (RP) E level, which is the efficient one.
- * Calculated by PCODE at runtime based on multiple running conditions
- * - rp0_freq: The Render Performance (RP) 0 level, which is the maximum one.
+ * - ``act_freq``: The actual resolved frequency decided by PCODE.
+ * - ``cur_freq``: The current one requested by GuC PC to the PCODE.
+ * - ``rpn_freq``: The Render Performance (RP) N level, which is the minimal one.
+ * - ``rpa_freq``: The Render Performance (RP) A level, which is the achievable one.
+ * Calculated by PCODE at runtime based on multiple running conditions
+ * - ``rpe_freq``: The Render Performance (RP) E level, which is the efficient one.
+ * Calculated by PCODE at runtime based on multiple running conditions
+ * - ``rp0_freq``: The Render Performance (RP) 0 level, which is the maximum one.
*
- * device/tile#/gt#/freq0/<item>_freq *read-write* files:
+ * **Read-write** attributes:
*
- * - min_freq: Min frequency request.
- * - max_freq: Max frequency request.
- * If max <= min, then freq_min becomes a fixed frequency request.
+ * - ``min_freq``: Min frequency request.
+ * - ``max_freq``: Max frequency request.
+ * If max <= min, then freq_min becomes a fixed frequency
+ * request.
*/
static struct xe_guc_pc *
@@ -99,13 +101,8 @@ static ssize_t rp0_freq_show(struct kobject *kobj,
{
struct device *dev = kobj_to_dev(kobj);
struct xe_guc_pc *pc = dev_to_pc(dev);
- u32 freq;
-
- xe_pm_runtime_get(dev_to_xe(dev));
- freq = xe_guc_pc_get_rp0_freq(pc);
- xe_pm_runtime_put(dev_to_xe(dev));
- return sysfs_emit(buf, "%d\n", freq);
+ return sysfs_emit(buf, "%d\n", xe_guc_pc_get_rp0_freq(pc));
}
static struct kobj_attribute attr_rp0_freq = __ATTR_RO(rp0_freq);
@@ -227,6 +224,33 @@ static ssize_t max_freq_store(struct kobject *kobj,
}
static struct kobj_attribute attr_max_freq = __ATTR_RW(max_freq);
+static ssize_t power_profile_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buff)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ xe_guc_pc_get_power_profile(dev_to_pc(dev), buff);
+
+ return strlen(buff);
+}
+
+static ssize_t power_profile_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buff, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct xe_guc_pc *pc = dev_to_pc(dev);
+ int err;
+
+ xe_pm_runtime_get(dev_to_xe(dev));
+ err = xe_guc_pc_set_power_profile(pc, buff);
+ xe_pm_runtime_put(dev_to_xe(dev));
+
+ return err ?: count;
+}
+static struct kobj_attribute attr_power_profile = __ATTR_RW(power_profile);
+
static const struct attribute *freq_attrs[] = {
&attr_act_freq.attr,
&attr_cur_freq.attr,
@@ -236,6 +260,7 @@ static const struct attribute *freq_attrs[] = {
&attr_rpn_freq.attr,
&attr_min_freq.attr,
&attr_max_freq.attr,
+ &attr_power_profile.attr,
NULL
};
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c
index ffb210216aa9..bdc9d9877ec4 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.c
+++ b/drivers/gpu/drm/xe/xe_gt_idle.c
@@ -124,6 +124,9 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt)
if (xe_gt_is_main_type(gt))
gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE;
+ if (MEDIA_VERx100(xe) >= 1100 && MEDIA_VERx100(xe) < 1255)
+ gtidle->powergate_enable |= MEDIA_SAMPLERS_POWERGATE_ENABLE;
+
if (xe->info.platform != XE_DG1) {
for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
if ((gt->info.engine_mask & BIT(i)))
@@ -246,6 +249,11 @@ int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "Media Slice%d Power Gate Status: %s\n", n,
str_up_down(pg_status & media_slices[n].status_bit));
}
+
+ if (MEDIA_VERx100(xe) >= 1100 && MEDIA_VERx100(xe) < 1255)
+ drm_printf(p, "Media Samplers Power Gating Enabled: %s\n",
+ str_yes_no(pg_enabled & MEDIA_SAMPLERS_POWERGATE_ENABLE));
+
return 0;
}
@@ -322,15 +330,11 @@ static void gt_idle_fini(void *arg)
{
struct kobject *kobj = arg;
struct xe_gt *gt = kobj_to_gt(kobj->parent);
- unsigned int fw_ref;
xe_gt_idle_disable_pg(gt);
- if (gt_to_xe(gt)->info.skip_guc_pc) {
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (gt_to_xe(gt)->info.skip_guc_pc)
xe_gt_idle_disable_c6(gt);
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- }
sysfs_remove_files(kobj, gt_idle_attrs);
kobject_put(kobj);
@@ -390,14 +394,23 @@ void xe_gt_idle_enable_c6(struct xe_gt *gt)
RC_CTL_HW_ENABLE | RC_CTL_TO_MODE | RC_CTL_RC6_ENABLE);
}
-void xe_gt_idle_disable_c6(struct xe_gt *gt)
+int xe_gt_idle_disable_c6(struct xe_gt *gt)
{
+ unsigned int fw_ref;
+
xe_device_assert_mem_access(gt_to_xe(gt));
- xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
if (IS_SRIOV_VF(gt_to_xe(gt)))
- return;
+ return 0;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
xe_mmio_write32(&gt->mmio, RC_CONTROL, 0);
xe_mmio_write32(&gt->mmio, RC_STATE, 0);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_idle.h b/drivers/gpu/drm/xe/xe_gt_idle.h
index 591a01e181bc..9c34a155e102 100644
--- a/drivers/gpu/drm/xe/xe_gt_idle.h
+++ b/drivers/gpu/drm/xe/xe_gt_idle.h
@@ -13,7 +13,7 @@ struct xe_gt;
int xe_gt_idle_init(struct xe_gt_idle *gtidle);
void xe_gt_idle_enable_c6(struct xe_gt *gt);
-void xe_gt_idle_disable_c6(struct xe_gt *gt);
+int xe_gt_idle_disable_c6(struct xe_gt *gt);
void xe_gt_idle_enable_pg(struct xe_gt *gt);
void xe_gt_idle_disable_pg(struct xe_gt *gt);
int xe_gt_idle_pg_print(struct xe_gt *gt, struct drm_printer *p);
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c
index 64a2f0d6aaf9..164010860664 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.c
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.c
@@ -46,8 +46,6 @@
* MCR registers are not available on Virtual Function (VF).
*/
-#define STEER_SEMAPHORE XE_REG(0xFD0)
-
static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr)
{
return reg_mcr.__reg;
@@ -171,6 +169,15 @@ static const struct xe_mmio_range xelpg_dss_steering_table[] = {
{},
};
+static const struct xe_mmio_range xe3p_xpc_xecore_steering_table[] = {
+ { 0x008140, 0x00817F }, /* SLICE, XeCore, SLICE */
+ { 0x009480, 0x00955F }, /* SLICE, XeCore */
+ { 0x00D800, 0x00D87F }, /* SLICE */
+ { 0x00DC00, 0x00E9FF }, /* SLICE, rsvd, XeCore, rsvd, XeCore, rsvd, XeCore */
+ { 0x013000, 0x0135FF }, /* XeCore, SLICE */
+ {},
+};
+
static const struct xe_mmio_range xelpmp_oaddrm_steering_table[] = {
{ 0x393200, 0x39323F },
{ 0x393400, 0x3934FF },
@@ -238,21 +245,60 @@ static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = {
};
static const struct xe_mmio_range xe3lpm_instance0_steering_table[] = {
- { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */
+ { 0x384000, 0x3841FF }, /* GAM */
+ { 0x384400, 0x3847DF }, /* GAM */
{ 0x384900, 0x384AFF }, /* GAM */
{ 0x389560, 0x3895FF }, /* MEDIAINF */
{ 0x38B600, 0x38B8FF }, /* L3BANK */
{ 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */
- { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, GAM */
+ { 0x38D0D0, 0x38F0FF }, /* MEDIAINF, rsvd, GAM */
{ 0x393C00, 0x393C7F }, /* MEDIAINF */
{},
};
+/*
+ * Different "GAM" ranges have different rules; GAMWKRS, STLB, and GAMREQSTRM
+ * range subtypes need to be steered to (1,0), while all other GAM subtypes
+ * are steered to (0,0) and are included in the "INSTANCE0" table farther
+ * down.
+ */
+static const struct xe_mmio_range xe3p_xpc_gam_grp1_steering_table[] = {
+ { 0x004000, 0x004AFF }, /* GAMREQSTRM, rsvd, STLB, GAMWKRS, GAMREQSTRM */
+ { 0x00F100, 0x00FFFF }, /* GAMWKRS */
+ {},
+};
+
+static const struct xe_mmio_range xe3p_xpc_node_steering_table[] = {
+ { 0x00B000, 0x00B0FF },
+ { 0x00D880, 0x00D8FF },
+ {},
+};
+
+static const struct xe_mmio_range xe3p_xpc_instance0_steering_table[] = {
+ { 0x00B500, 0x00B6FF }, /* PSMI */
+ { 0x00C800, 0x00CFFF }, /* GAMCTRL */
+ { 0x00F000, 0x00F0FF }, /* GAMCTRL */
+ {},
+};
+
static void init_steering_l3bank(struct xe_gt *gt)
{
+ struct xe_device *xe = gt_to_xe(gt);
struct xe_mmio *mmio = &gt->mmio;
- if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
+ if (GRAPHICS_VER(xe) >= 35) {
+ unsigned int first_bank = xe_l3_bank_mask_ffs(gt->fuse_topo.l3_bank_mask);
+ const int banks_per_node = 4;
+ unsigned int node = first_bank / banks_per_node;
+
+ /* L3BANK ranges place node in grpID, bank in instanceid */
+ gt->steering[L3BANK].group_target = node;
+ gt->steering[L3BANK].instance_target = first_bank % banks_per_node;
+
+ /* NODE ranges split the node across grpid and instanceid */
+ gt->steering[NODE].group_target = node >> 1;
+ gt->steering[NODE].instance_target = node & 1;
+ } else if (GRAPHICS_VERx100(xe) >= 1270) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
xe_mmio_read32(mmio, MIRROR_FUSE3));
u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK,
@@ -265,7 +311,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
gt->steering[L3BANK].group_target = __ffs(mslice_mask);
gt->steering[L3BANK].instance_target =
bank_mask & BIT(0) ? 0 : 2;
- } else if (gt_to_xe(gt)->info.platform == XE_DG2) {
+ } else if (xe->info.platform == XE_DG2) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
xe_mmio_read32(mmio, MIRROR_FUSE3));
u32 bank = __ffs(mslice_mask) * 8;
@@ -364,7 +410,7 @@ fallback:
* @group: pointer to storage for steering group ID
* @instance: pointer to storage for steering instance ID
*/
-void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
+void xe_gt_mcr_get_dss_steering(const struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance)
{
xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS);
@@ -420,16 +466,24 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt)
gt->steering[SQIDI_PSMI].instance_target = select & 0x1;
}
+static void init_steering_gam1(struct xe_gt *gt)
+{
+ gt->steering[GAM1].group_target = 1;
+ gt->steering[GAM1].instance_target = 0;
+}
+
static const struct {
const char *name;
void (*init)(struct xe_gt *gt);
} xe_steering_types[] = {
[L3BANK] = { "L3BANK", init_steering_l3bank },
+ [NODE] = { "NODE", NULL }, /* initialized by l3bank init */
[MSLICE] = { "MSLICE", init_steering_mslice },
[LNCF] = { "LNCF", NULL }, /* initialized by mslice init */
- [DSS] = { "DSS", init_steering_dss },
+ [DSS] = { "DSS / XeCore", init_steering_dss },
[OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm },
[SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi },
+ [GAM1] = { "GAMWKRS / STLB / GAMREQSTRM", init_steering_gam1 },
[INSTANCE0] = { "INSTANCE 0", NULL },
[IMPLICIT_STEERING] = { "IMPLICIT", NULL },
};
@@ -468,7 +522,19 @@ void xe_gt_mcr_init_early(struct xe_gt *gt)
gt->steering[OADDRM].ranges = xelpmp_oaddrm_steering_table;
}
} else {
- if (GRAPHICS_VER(xe) >= 20) {
+ if (GRAPHICS_VERx100(xe) == 3511) {
+ /*
+ * TODO: there are some ranges in bspec with missing
+ * termination: [0x00B000, 0x00B0FF] and
+ * [0x00D880, 0x00D8FF] (NODE); [0x00B100, 0x00B3FF]
+ * (L3BANK). Update them here once bspec is updated.
+ */
+ gt->steering[DSS].ranges = xe3p_xpc_xecore_steering_table;
+ gt->steering[GAM1].ranges = xe3p_xpc_gam_grp1_steering_table;
+ gt->steering[INSTANCE0].ranges = xe3p_xpc_instance0_steering_table;
+ gt->steering[L3BANK].ranges = xelpg_l3bank_steering_table;
+ gt->steering[NODE].ranges = xe3p_xpc_node_steering_table;
+ } else if (GRAPHICS_VER(xe) >= 20) {
gt->steering[DSS].ranges = xe2lpg_dss_steering_table;
gt->steering[SQIDI_PSMI].ranges = xe2lpg_sqidi_psmi_steering_table;
gt->steering[INSTANCE0].ranges = xe2lpg_instance0_steering_table;
@@ -533,7 +599,7 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
- xe_mmio_write32(&gt->mmio, MCFG_MCR_SELECTOR, steer_val);
+ xe_mmio_write32(&gt->mmio, STEER_SEMAPHORE, steer_val);
xe_mmio_write32(&gt->mmio, SF_MCR_SELECTOR, steer_val);
/*
* For GAM registers, all reads should be directed to instance 1
diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.h b/drivers/gpu/drm/xe/xe_gt_mcr.h
index bc06520befab..283a1c9770e2 100644
--- a/drivers/gpu/drm/xe/xe_gt_mcr.h
+++ b/drivers/gpu/drm/xe/xe_gt_mcr.h
@@ -31,7 +31,8 @@ bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
u8 *group, u8 *instance);
void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p);
-void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance);
+void xe_gt_mcr_get_dss_steering(const struct xe_gt *gt,
+ unsigned int dss, u16 *group, u16 *instance);
u32 xe_gt_mcr_steering_info_to_dss_id(struct xe_gt *gt, u16 group, u16 instance);
/*
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
deleted file mode 100644
index 5a75d56d8558..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ /dev/null
@@ -1,688 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#include "xe_gt_pagefault.h"
-
-#include <linux/bitfield.h>
-#include <linux/circ_buf.h>
-
-#include <drm/drm_exec.h>
-#include <drm/drm_managed.h>
-
-#include "abi/guc_actions_abi.h"
-#include "xe_bo.h"
-#include "xe_gt.h"
-#include "xe_gt_printk.h"
-#include "xe_gt_stats.h"
-#include "xe_gt_tlb_invalidation.h"
-#include "xe_guc.h"
-#include "xe_guc_ct.h"
-#include "xe_migrate.h"
-#include "xe_svm.h"
-#include "xe_trace_bo.h"
-#include "xe_vm.h"
-
-struct pagefault {
- u64 page_addr;
- u32 asid;
- u16 pdata;
- u8 vfid;
- u8 access_type;
- u8 fault_type;
- u8 fault_level;
- u8 engine_class;
- u8 engine_instance;
- u8 fault_unsuccessful;
- bool trva_fault;
-};
-
-enum access_type {
- ACCESS_TYPE_READ = 0,
- ACCESS_TYPE_WRITE = 1,
- ACCESS_TYPE_ATOMIC = 2,
- ACCESS_TYPE_RESERVED = 3,
-};
-
-enum fault_type {
- NOT_PRESENT = 0,
- WRITE_ACCESS_VIOLATION = 1,
- ATOMIC_ACCESS_VIOLATION = 2,
-};
-
-struct acc {
- u64 va_range_base;
- u32 asid;
- u32 sub_granularity;
- u8 granularity;
- u8 vfid;
- u8 access_type;
- u8 engine_class;
- u8 engine_instance;
-};
-
-static bool access_is_atomic(enum access_type access_type)
-{
- return access_type == ACCESS_TYPE_ATOMIC;
-}
-
-static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
-{
- return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
- vma->tile_invalidated);
-}
-
-static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
- bool atomic, unsigned int id)
-{
- struct xe_bo *bo = xe_vma_bo(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- int err;
-
- err = xe_vm_lock_vma(exec, vma);
- if (err)
- return err;
-
- if (atomic && IS_DGFX(vm->xe)) {
- if (xe_vma_is_userptr(vma)) {
- err = -EACCES;
- return err;
- }
-
- /* Migrate to VRAM, move should invalidate the VMA first */
- err = xe_bo_migrate(bo, XE_PL_VRAM0 + id);
- if (err)
- return err;
- } else if (bo) {
- /* Create backing store if needed */
- err = xe_bo_validate(bo, vm, true);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
- bool atomic)
-{
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_tile *tile = gt_to_tile(gt);
- struct drm_exec exec;
- struct dma_fence *fence;
- ktime_t end = 0;
- int err;
-
- lockdep_assert_held_write(&vm->lock);
-
- xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
- xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, xe_vma_size(vma) / 1024);
-
- trace_xe_vma_pagefault(vma);
-
- /* Check if VMA is valid, opportunistic check only */
- if (vma_is_valid(tile, vma) && !atomic)
- return 0;
-
-retry_userptr:
- if (xe_vma_is_userptr(vma) &&
- xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
- struct xe_userptr_vma *uvma = to_userptr_vma(vma);
-
- err = xe_vma_userptr_pin_pages(uvma);
- if (err)
- return err;
- }
-
- /* Lock VM and BOs dma-resv */
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- err = xe_pf_begin(&exec, vma, atomic, tile->id);
- drm_exec_retry_on_contention(&exec);
- if (xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
- if (err)
- goto unlock_dma_resv;
-
- /* Bind VMA only to the GT that has faulted */
- trace_xe_vma_pf_bind(vma);
- fence = xe_vma_rebind(vm, vma, BIT(tile->id));
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- if (xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
- goto unlock_dma_resv;
- }
- }
-
- dma_fence_wait(fence, false);
- dma_fence_put(fence);
-
-unlock_dma_resv:
- drm_exec_fini(&exec);
- if (err == -EAGAIN)
- goto retry_userptr;
-
- return err;
-}
-
-static struct xe_vm *asid_to_vm(struct xe_device *xe, u32 asid)
-{
- struct xe_vm *vm;
-
- down_read(&xe->usm.lock);
- vm = xa_load(&xe->usm.asid_to_vm, asid);
- if (vm && xe_vm_in_fault_mode(vm))
- xe_vm_get(vm);
- else
- vm = ERR_PTR(-EINVAL);
- up_read(&xe->usm.lock);
-
- return vm;
-}
-
-static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
-{
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_vm *vm;
- struct xe_vma *vma = NULL;
- int err;
- bool atomic;
-
- /* SW isn't expected to handle TRTT faults */
- if (pf->trva_fault)
- return -EFAULT;
-
- vm = asid_to_vm(xe, pf->asid);
- if (IS_ERR(vm))
- return PTR_ERR(vm);
-
- /*
- * TODO: Change to read lock? Using write lock for simplicity.
- */
- down_write(&vm->lock);
-
- if (xe_vm_is_closed(vm)) {
- err = -ENOENT;
- goto unlock_vm;
- }
-
- vma = xe_vm_find_vma_by_addr(vm, pf->page_addr);
- if (!vma) {
- err = -EINVAL;
- goto unlock_vm;
- }
-
- atomic = access_is_atomic(pf->access_type);
-
- if (xe_vma_is_cpu_addr_mirror(vma))
- err = xe_svm_handle_pagefault(vm, vma, gt,
- pf->page_addr, atomic);
- else
- err = handle_vma_pagefault(gt, vma, atomic);
-
-unlock_vm:
- if (!err)
- vm->usm.last_fault_vma = vma;
- up_write(&vm->lock);
- xe_vm_put(vm);
-
- return err;
-}
-
-static int send_pagefault_reply(struct xe_guc *guc,
- struct xe_guc_pagefault_reply *reply)
-{
- u32 action[] = {
- XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
- reply->dw0,
- reply->dw1,
- };
-
- return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
-}
-
-static void print_pagefault(struct xe_gt *gt, struct pagefault *pf)
-{
- xe_gt_dbg(gt, "\n\tASID: %d\n"
- "\tVFID: %d\n"
- "\tPDATA: 0x%04x\n"
- "\tFaulted Address: 0x%08x%08x\n"
- "\tFaultType: %d\n"
- "\tAccessType: %d\n"
- "\tFaultLevel: %d\n"
- "\tEngineClass: %d %s\n"
- "\tEngineInstance: %d\n",
- pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
- lower_32_bits(pf->page_addr),
- pf->fault_type, pf->access_type, pf->fault_level,
- pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
- pf->engine_instance);
-}
-
-#define PF_MSG_LEN_DW 4
-
-static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
-{
- const struct xe_guc_pagefault_desc *desc;
- bool ret = false;
-
- spin_lock_irq(&pf_queue->lock);
- if (pf_queue->tail != pf_queue->head) {
- desc = (const struct xe_guc_pagefault_desc *)
- (pf_queue->data + pf_queue->tail);
-
- pf->fault_level = FIELD_GET(PFD_FAULT_LEVEL, desc->dw0);
- pf->trva_fault = FIELD_GET(XE2_PFD_TRVA_FAULT, desc->dw0);
- pf->engine_class = FIELD_GET(PFD_ENG_CLASS, desc->dw0);
- pf->engine_instance = FIELD_GET(PFD_ENG_INSTANCE, desc->dw0);
- pf->pdata = FIELD_GET(PFD_PDATA_HI, desc->dw1) <<
- PFD_PDATA_HI_SHIFT;
- pf->pdata |= FIELD_GET(PFD_PDATA_LO, desc->dw0);
- pf->asid = FIELD_GET(PFD_ASID, desc->dw1);
- pf->vfid = FIELD_GET(PFD_VFID, desc->dw2);
- pf->access_type = FIELD_GET(PFD_ACCESS_TYPE, desc->dw2);
- pf->fault_type = FIELD_GET(PFD_FAULT_TYPE, desc->dw2);
- pf->page_addr = (u64)(FIELD_GET(PFD_VIRTUAL_ADDR_HI, desc->dw3)) <<
- PFD_VIRTUAL_ADDR_HI_SHIFT;
- pf->page_addr |= FIELD_GET(PFD_VIRTUAL_ADDR_LO, desc->dw2) <<
- PFD_VIRTUAL_ADDR_LO_SHIFT;
-
- pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
- pf_queue->num_dw;
- ret = true;
- }
- spin_unlock_irq(&pf_queue->lock);
-
- return ret;
-}
-
-static bool pf_queue_full(struct pf_queue *pf_queue)
-{
- lockdep_assert_held(&pf_queue->lock);
-
- return CIRC_SPACE(pf_queue->head, pf_queue->tail,
- pf_queue->num_dw) <=
- PF_MSG_LEN_DW;
-}
-
-int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- struct pf_queue *pf_queue;
- unsigned long flags;
- u32 asid;
- bool full;
-
- if (unlikely(len != PF_MSG_LEN_DW))
- return -EPROTO;
-
- asid = FIELD_GET(PFD_ASID, msg[1]);
- pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
-
- /*
- * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
- */
- xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW));
-
- spin_lock_irqsave(&pf_queue->lock, flags);
- full = pf_queue_full(pf_queue);
- if (!full) {
- memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
- pf_queue->head = (pf_queue->head + len) %
- pf_queue->num_dw;
- queue_work(gt->usm.pf_wq, &pf_queue->worker);
- } else {
- xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n");
- }
- spin_unlock_irqrestore(&pf_queue->lock, flags);
-
- return full ? -ENOSPC : 0;
-}
-
-#define USM_QUEUE_MAX_RUNTIME_MS 20
-
-static void pf_queue_work_func(struct work_struct *w)
-{
- struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
- struct xe_gt *gt = pf_queue->gt;
- struct xe_guc_pagefault_reply reply = {};
- struct pagefault pf = {};
- unsigned long threshold;
- int ret;
-
- threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
-
- while (get_pagefault(pf_queue, &pf)) {
- ret = handle_pagefault(gt, &pf);
- if (unlikely(ret)) {
- print_pagefault(gt, &pf);
- pf.fault_unsuccessful = 1;
- xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret));
- }
-
- reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
- FIELD_PREP(PFR_SUCCESS, pf.fault_unsuccessful) |
- FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
- FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
- FIELD_PREP(PFR_ASID, pf.asid);
-
- reply.dw1 = FIELD_PREP(PFR_VFID, pf.vfid) |
- FIELD_PREP(PFR_ENG_INSTANCE, pf.engine_instance) |
- FIELD_PREP(PFR_ENG_CLASS, pf.engine_class) |
- FIELD_PREP(PFR_PDATA, pf.pdata);
-
- send_pagefault_reply(&gt->uc.guc, &reply);
-
- if (time_after(jiffies, threshold) &&
- pf_queue->tail != pf_queue->head) {
- queue_work(gt->usm.pf_wq, w);
- break;
- }
- }
-}
-
-static void acc_queue_work_func(struct work_struct *w);
-
-static void pagefault_fini(void *arg)
-{
- struct xe_gt *gt = arg;
- struct xe_device *xe = gt_to_xe(gt);
-
- if (!xe->info.has_usm)
- return;
-
- destroy_workqueue(gt->usm.acc_wq);
- destroy_workqueue(gt->usm.pf_wq);
-}
-
-static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
-{
- struct xe_device *xe = gt_to_xe(gt);
- xe_dss_mask_t all_dss;
- int num_dss, num_eus;
-
- bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
- XE_MAX_DSS_FUSE_BITS);
-
- num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
- num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
- XE_MAX_EU_FUSE_BITS) * num_dss;
-
- /*
- * user can issue separate page faults per EU and per CS
- *
- * XXX: Multiplier required as compute UMD are getting PF queue errors
- * without it. Follow on why this multiplier is required.
- */
-#define PF_MULTIPLIER 8
- pf_queue->num_dw =
- (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER;
- pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw);
-#undef PF_MULTIPLIER
-
- pf_queue->gt = gt;
- pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
- sizeof(u32), GFP_KERNEL);
- if (!pf_queue->data)
- return -ENOMEM;
-
- spin_lock_init(&pf_queue->lock);
- INIT_WORK(&pf_queue->worker, pf_queue_work_func);
-
- return 0;
-}
-
-int xe_gt_pagefault_init(struct xe_gt *gt)
-{
- struct xe_device *xe = gt_to_xe(gt);
- int i, ret = 0;
-
- if (!xe->info.has_usm)
- return 0;
-
- for (i = 0; i < NUM_PF_QUEUE; ++i) {
- ret = xe_alloc_pf_queue(gt, &gt->usm.pf_queue[i]);
- if (ret)
- return ret;
- }
- for (i = 0; i < NUM_ACC_QUEUE; ++i) {
- gt->usm.acc_queue[i].gt = gt;
- spin_lock_init(&gt->usm.acc_queue[i].lock);
- INIT_WORK(&gt->usm.acc_queue[i].worker, acc_queue_work_func);
- }
-
- gt->usm.pf_wq = alloc_workqueue("xe_gt_page_fault_work_queue",
- WQ_UNBOUND | WQ_HIGHPRI, NUM_PF_QUEUE);
- if (!gt->usm.pf_wq)
- return -ENOMEM;
-
- gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
- WQ_UNBOUND | WQ_HIGHPRI,
- NUM_ACC_QUEUE);
- if (!gt->usm.acc_wq) {
- destroy_workqueue(gt->usm.pf_wq);
- return -ENOMEM;
- }
-
- return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt);
-}
-
-void xe_gt_pagefault_reset(struct xe_gt *gt)
-{
- struct xe_device *xe = gt_to_xe(gt);
- int i;
-
- if (!xe->info.has_usm)
- return;
-
- for (i = 0; i < NUM_PF_QUEUE; ++i) {
- spin_lock_irq(&gt->usm.pf_queue[i].lock);
- gt->usm.pf_queue[i].head = 0;
- gt->usm.pf_queue[i].tail = 0;
- spin_unlock_irq(&gt->usm.pf_queue[i].lock);
- }
-
- for (i = 0; i < NUM_ACC_QUEUE; ++i) {
- spin_lock(&gt->usm.acc_queue[i].lock);
- gt->usm.acc_queue[i].head = 0;
- gt->usm.acc_queue[i].tail = 0;
- spin_unlock(&gt->usm.acc_queue[i].lock);
- }
-}
-
-static int granularity_in_byte(int val)
-{
- switch (val) {
- case 0:
- return SZ_128K;
- case 1:
- return SZ_2M;
- case 2:
- return SZ_16M;
- case 3:
- return SZ_64M;
- default:
- return 0;
- }
-}
-
-static int sub_granularity_in_byte(int val)
-{
- return (granularity_in_byte(val) / 32);
-}
-
-static void print_acc(struct xe_gt *gt, struct acc *acc)
-{
- xe_gt_warn(gt, "Access counter request:\n"
- "\tType: %s\n"
- "\tASID: %d\n"
- "\tVFID: %d\n"
- "\tEngine: %d:%d\n"
- "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
- "\tSub_Granularity Vector: 0x%08x\n"
- "\tVA Range base: 0x%016llx\n",
- acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
- acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
- granularity_in_byte(acc->granularity) / SZ_1K,
- sub_granularity_in_byte(acc->granularity) / SZ_1K,
- acc->sub_granularity, acc->va_range_base);
-}
-
-static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
-{
- u64 page_va = acc->va_range_base + (ffs(acc->sub_granularity) - 1) *
- sub_granularity_in_byte(acc->granularity);
-
- return xe_vm_find_overlapping_vma(vm, page_va, SZ_4K);
-}
-
-static int handle_acc(struct xe_gt *gt, struct acc *acc)
-{
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_tile *tile = gt_to_tile(gt);
- struct drm_exec exec;
- struct xe_vm *vm;
- struct xe_vma *vma;
- int ret = 0;
-
- /* We only support ACC_TRIGGER at the moment */
- if (acc->access_type != ACC_TRIGGER)
- return -EINVAL;
-
- vm = asid_to_vm(xe, acc->asid);
- if (IS_ERR(vm))
- return PTR_ERR(vm);
-
- down_read(&vm->lock);
-
- /* Lookup VMA */
- vma = get_acc_vma(vm, acc);
- if (!vma) {
- ret = -EINVAL;
- goto unlock_vm;
- }
-
- trace_xe_vma_acc(vma);
-
- /* Userptr or null can't be migrated, nothing to do */
- if (xe_vma_has_no_bo(vma))
- goto unlock_vm;
-
- /* Lock VM and BOs dma-resv */
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- ret = xe_pf_begin(&exec, vma, true, tile->id);
- drm_exec_retry_on_contention(&exec);
- if (ret)
- break;
- }
-
- drm_exec_fini(&exec);
-unlock_vm:
- up_read(&vm->lock);
- xe_vm_put(vm);
-
- return ret;
-}
-
-#define make_u64(hi__, low__) ((u64)(hi__) << 32 | (u64)(low__))
-
-#define ACC_MSG_LEN_DW 4
-
-static bool get_acc(struct acc_queue *acc_queue, struct acc *acc)
-{
- const struct xe_guc_acc_desc *desc;
- bool ret = false;
-
- spin_lock(&acc_queue->lock);
- if (acc_queue->tail != acc_queue->head) {
- desc = (const struct xe_guc_acc_desc *)
- (acc_queue->data + acc_queue->tail);
-
- acc->granularity = FIELD_GET(ACC_GRANULARITY, desc->dw2);
- acc->sub_granularity = FIELD_GET(ACC_SUBG_HI, desc->dw1) << 31 |
- FIELD_GET(ACC_SUBG_LO, desc->dw0);
- acc->engine_class = FIELD_GET(ACC_ENG_CLASS, desc->dw1);
- acc->engine_instance = FIELD_GET(ACC_ENG_INSTANCE, desc->dw1);
- acc->asid = FIELD_GET(ACC_ASID, desc->dw1);
- acc->vfid = FIELD_GET(ACC_VFID, desc->dw2);
- acc->access_type = FIELD_GET(ACC_TYPE, desc->dw0);
- acc->va_range_base = make_u64(desc->dw3 & ACC_VIRTUAL_ADDR_RANGE_HI,
- desc->dw2 & ACC_VIRTUAL_ADDR_RANGE_LO);
-
- acc_queue->tail = (acc_queue->tail + ACC_MSG_LEN_DW) %
- ACC_QUEUE_NUM_DW;
- ret = true;
- }
- spin_unlock(&acc_queue->lock);
-
- return ret;
-}
-
-static void acc_queue_work_func(struct work_struct *w)
-{
- struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
- struct xe_gt *gt = acc_queue->gt;
- struct acc acc = {};
- unsigned long threshold;
- int ret;
-
- threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
-
- while (get_acc(acc_queue, &acc)) {
- ret = handle_acc(gt, &acc);
- if (unlikely(ret)) {
- print_acc(gt, &acc);
- xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret));
- }
-
- if (time_after(jiffies, threshold) &&
- acc_queue->tail != acc_queue->head) {
- queue_work(gt->usm.acc_wq, w);
- break;
- }
- }
-}
-
-static bool acc_queue_full(struct acc_queue *acc_queue)
-{
- lockdep_assert_held(&acc_queue->lock);
-
- return CIRC_SPACE(acc_queue->head, acc_queue->tail, ACC_QUEUE_NUM_DW) <=
- ACC_MSG_LEN_DW;
-}
-
-int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- struct acc_queue *acc_queue;
- u32 asid;
- bool full;
-
- /*
- * The below logic doesn't work unless ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW == 0
- */
- BUILD_BUG_ON(ACC_QUEUE_NUM_DW % ACC_MSG_LEN_DW);
-
- if (unlikely(len != ACC_MSG_LEN_DW))
- return -EPROTO;
-
- asid = FIELD_GET(ACC_ASID, msg[1]);
- acc_queue = &gt->usm.acc_queue[asid % NUM_ACC_QUEUE];
-
- spin_lock(&acc_queue->lock);
- full = acc_queue_full(acc_queue);
- if (!full) {
- memcpy(acc_queue->data + acc_queue->head, msg,
- len * sizeof(u32));
- acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
- queue_work(gt->usm.acc_wq, &acc_queue->worker);
- } else {
- xe_gt_warn(gt, "ACC Queue full, dropping ACC\n");
- }
- spin_unlock(&acc_queue->lock);
-
- return full ? -ENOSPC : 0;
-}
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.h b/drivers/gpu/drm/xe/xe_gt_pagefault.h
deleted file mode 100644
index 839c065a5e4c..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2022 Intel Corporation
- */
-
-#ifndef _XE_GT_PAGEFAULT_H_
-#define _XE_GT_PAGEFAULT_H_
-
-#include <linux/types.h>
-
-struct xe_gt;
-struct xe_guc;
-
-int xe_gt_pagefault_init(struct xe_gt *gt);
-void xe_gt_pagefault_reset(struct xe_gt *gt);
-int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len);
-int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len);
-
-#endif /* _XE_GT_PAGEFAULT_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_printk.h b/drivers/gpu/drm/xe/xe_gt_printk.h
index 11da0228cea7..1313d32862db 100644
--- a/drivers/gpu/drm/xe/xe_gt_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_printk.h
@@ -6,18 +6,22 @@
#ifndef _XE_GT_PRINTK_H_
#define _XE_GT_PRINTK_H_
-#include <drm/drm_print.h>
-
#include "xe_gt_types.h"
+#include "xe_tile_printk.h"
+
+#define __XE_GT_PRINTK_FMT(_gt, _fmt, _args...) "GT%u: " _fmt, (_gt)->info.id, ##_args
#define xe_gt_printk(_gt, _level, _fmt, ...) \
- drm_##_level(&gt_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_tile_printk((_gt)->tile, _level, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
+
+#define xe_gt_err(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
#define xe_gt_err_once(_gt, _fmt, ...) \
xe_gt_printk((_gt), err_once, _fmt, ##__VA_ARGS__)
-#define xe_gt_err(_gt, _fmt, ...) \
- xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
+#define xe_gt_err_ratelimited(_gt, _fmt, ...) \
+ xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
#define xe_gt_warn(_gt, _fmt, ...) \
xe_gt_printk((_gt), warn, _fmt, ##__VA_ARGS__)
@@ -31,20 +35,20 @@
#define xe_gt_dbg(_gt, _fmt, ...) \
xe_gt_printk((_gt), dbg, _fmt, ##__VA_ARGS__)
-#define xe_gt_err_ratelimited(_gt, _fmt, ...) \
- xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
+#define xe_gt_WARN_type(_gt, _type, _condition, _fmt, ...) \
+ xe_tile_WARN##_type((_gt)->tile, _condition, _fmt, ## __VA_ARGS__)
#define xe_gt_WARN(_gt, _condition, _fmt, ...) \
- drm_WARN(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_gt_WARN_type((_gt),, _condition, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
#define xe_gt_WARN_ONCE(_gt, _condition, _fmt, ...) \
- drm_WARN_ONCE(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+ xe_gt_WARN_type((_gt), _ONCE, _condition, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
#define xe_gt_WARN_ON(_gt, _condition) \
- xe_gt_WARN((_gt), _condition, "%s(%s)", "gt_WARN_ON", __stringify(_condition))
+ xe_gt_WARN((_gt), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
#define xe_gt_WARN_ON_ONCE(_gt, _condition) \
- xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "gt_WARN_ON_ONCE", __stringify(_condition))
+ xe_gt_WARN_ONCE((_gt), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
static inline void __xe_gt_printfn_err(struct drm_printer *p, struct va_format *vaf)
{
@@ -67,12 +71,12 @@ static inline void __xe_gt_printfn_dbg(struct drm_printer *p, struct va_format *
/*
* The original xe_gt_dbg() callsite annotations are useless here,
- * redirect to the tweaked drm_dbg_printer() instead.
+ * redirect to the tweaked xe_tile_dbg_printer() instead.
*/
- dbg = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, NULL);
+ dbg = xe_tile_dbg_printer((gt)->tile);
dbg.origin = p->origin;
- drm_printf(&dbg, "GT%u: %pV", gt->info.id, vaf);
+ drm_printf(&dbg, __XE_GT_PRINTK_FMT(gt, "%pV", vaf));
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index bdbd15f3afe3..0714c758b9c1 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -55,7 +55,12 @@ static void pf_init_workers(struct xe_gt *gt)
static void pf_fini_workers(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- disable_work_sync(&gt->sriov.pf.workers.restart);
+
+ if (disable_work_sync(&gt->sriov.pf.workers.restart)) {
+ xe_gt_sriov_dbg_verbose(gt, "pending restart disabled!\n");
+ /* release an rpm reference taken on the worker's behalf */
+ xe_pm_runtime_put(gt_to_xe(gt));
+ }
}
/**
@@ -153,39 +158,19 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
xe_gt_sriov_pf_service_update(gt);
}
-static u32 pf_get_vf_regs_stride(struct xe_device *xe)
-{
- return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
-}
-
-static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride)
-{
- struct xe_reg pf_reg = vf_reg;
-
- pf_reg.vf = 0;
- pf_reg.addr += stride * vfid;
-
- return pf_reg;
-}
-
static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid)
{
- u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt));
- struct xe_reg scratch;
- int n, count;
+ struct xe_mmio mmio;
+ int n;
+
+ xe_mmio_init_vf_view(&mmio, &gt->mmio, vfid);
if (xe_gt_is_media_type(gt)) {
- count = MED_VF_SW_FLAG_COUNT;
- for (n = 0; n < count; n++) {
- scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride);
- xe_mmio_write32(&gt->mmio, scratch, 0);
- }
+ for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
+ xe_mmio_write32(&mmio, MED_VF_SW_FLAG(n), 0);
} else {
- count = VF_SW_FLAG_COUNT;
- for (n = 0; n < count; n++) {
- scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride);
- xe_mmio_write32(&gt->mmio, scratch, 0);
- }
+ for (n = 0; n < VF_SW_FLAG_COUNT; n++)
+ xe_mmio_write32(&mmio, VF_SW_FLAG(n), 0);
}
}
@@ -207,8 +192,11 @@ static void pf_cancel_restart(struct xe_gt *gt)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- if (cancel_work_sync(&gt->sriov.pf.workers.restart))
+ if (cancel_work_sync(&gt->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n");
+ /* release an rpm reference taken on the worker's behalf */
+ xe_pm_runtime_put(gt_to_xe(gt));
+ }
}
/**
@@ -226,9 +214,12 @@ static void pf_restart(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- xe_pm_runtime_get(xe);
+ xe_gt_assert(gt, !xe_pm_runtime_suspended(xe));
+
xe_gt_sriov_pf_config_restart(gt);
xe_gt_sriov_pf_control_restart(gt);
+
+ /* release an rpm reference taken on our behalf */
xe_pm_runtime_put(xe);
xe_gt_sriov_dbg(gt, "restart completed\n");
@@ -247,8 +238,13 @@ static void pf_queue_restart(struct xe_gt *gt)
xe_gt_assert(gt, IS_SRIOV_PF(xe));
- if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart))
+ /* take an rpm reference on behalf of the worker */
+ xe_pm_runtime_get_noresume(xe);
+
+ if (!queue_work(xe->sriov.wq, &gt->sriov.pf.workers.restart)) {
xe_gt_sriov_dbg(gt, "restart already in queue!\n");
+ xe_pm_runtime_put(xe);
+ }
}
/**
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 494909f74eb2..59c5c6b4d994 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -9,6 +9,7 @@
#include "abi/guc_actions_sriov_abi.h"
#include "abi/guc_klvs_abi.h"
+#include "regs/xe_gtt_defs.h"
#include "regs/xe_guc_regs.h"
#include "xe_bo.h"
@@ -33,6 +34,7 @@
#include "xe_migrate.h"
#include "xe_sriov.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
#include "xe_wopcm.h"
#define make_u64_from_u32(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
@@ -696,6 +698,22 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
return fair;
}
+static u64 pf_profile_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
+{
+ bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
+ u64 shareable = ALIGN_DOWN(GUC_GGTT_TOP, SZ_512M);
+ u64 alignment = pf_get_ggtt_alignment(gt);
+
+ if (admin_only_pf && num_vfs == 1)
+ return ALIGN_DOWN(shareable, alignment);
+
+ /* need to hardcode due to ~512M of GGTT being reserved */
+ if (num_vfs > 56)
+ return SZ_64M - SZ_8M;
+
+ return rounddown_pow_of_two(div_u64(shareable, num_vfs));
+}
+
/**
* xe_gt_sriov_pf_config_set_fair_ggtt - Provision many VFs with fair GGTT.
* @gt: the &xe_gt (can't be media)
@@ -709,6 +727,7 @@ static u64 pf_estimate_fair_ggtt(struct xe_gt *gt, unsigned int num_vfs)
int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
unsigned int num_vfs)
{
+ u64 profile = pf_profile_fair_ggtt(gt, num_vfs);
u64 fair;
xe_gt_assert(gt, vfid);
@@ -722,9 +741,71 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid,
if (!fair)
return -ENOSPC;
+ fair = min(fair, profile);
+ if (fair < profile)
+ xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %llu vs %llu)\n",
+ "GGTT", fair, profile);
+
return xe_gt_sriov_pf_config_bulk_set_ggtt(gt, vfid, num_vfs, fair);
}
+/**
+ * xe_gt_sriov_pf_config_ggtt_save() - Save a VF provisioned GGTT data into a buffer.
+ * @gt: the &xe_gt
+ * @vfid: VF identifier (can't be 0)
+ * @buf: the GGTT data destination buffer (or NULL to query the buf size)
+ * @size: the size of the buffer (or 0 to query the buf size)
+ *
+ * This function can only be called on PF.
+ *
+ * Return: size of the buffer needed to save GGTT data if querying,
+ * 0 on successful save or a negative error code on failure.
+ */
+ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
+ void *buf, size_t size)
+{
+ struct xe_ggtt_node *node;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+ xe_gt_assert(gt, !(!buf ^ !size));
+
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ node = pf_pick_vf_config(gt, vfid)->ggtt_region;
+
+ if (!buf)
+ return xe_ggtt_node_pt_size(node);
+
+ return xe_ggtt_node_save(node, buf, size, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_ggtt_restore() - Restore a VF provisioned GGTT data from a buffer.
+ * @gt: the &xe_gt
+ * @vfid: VF identifier (can't be 0)
+ * @buf: the GGTT data source buffer
+ * @size: the size of the buffer
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size)
+{
+ struct xe_ggtt_node *node;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid);
+
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ node = pf_pick_vf_config(gt, vfid)->ggtt_region;
+
+ return xe_ggtt_node_load(node, buf, size, vfid);
+}
+
static u32 pf_get_min_spare_ctxs(struct xe_gt *gt)
{
/* XXX: preliminary */
@@ -923,7 +1004,8 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns
const char *what, const char *(*unit)(u32),
unsigned int last, int err)
{
- xe_gt_assert(gt, first);
+ char name[8];
+
xe_gt_assert(gt, num_vfs);
xe_gt_assert(gt, first <= last);
@@ -931,8 +1013,9 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns
return pf_config_set_u32_done(gt, first, value, get(gt, first), what, unit, err);
if (unlikely(err)) {
- xe_gt_sriov_notice(gt, "Failed to bulk provision VF%u..VF%u with %s\n",
- first, first + num_vfs - 1, what);
+ xe_gt_sriov_notice(gt, "Failed to bulk provision %s..VF%u with %s\n",
+ xe_sriov_function_name(first, name, sizeof(name)),
+ first + num_vfs - 1, what);
if (last > first)
pf_config_bulk_set_u32_done(gt, first, last - first, value,
get, what, unit, last, 0);
@@ -941,8 +1024,9 @@ static int pf_config_bulk_set_u32_done(struct xe_gt *gt, unsigned int first, uns
/* pick actual value from first VF - bulk provisioning shall be equal across all VFs */
value = get(gt, first);
- xe_gt_sriov_info(gt, "VF%u..VF%u provisioned with %u%s %s\n",
- first, first + num_vfs - 1, value, unit(value), what);
+ xe_gt_sriov_info(gt, "%s..VF%u provisioned with %u%s %s\n",
+ xe_sriov_function_name(first, name, sizeof(name)),
+ first + num_vfs - 1, value, unit(value), what);
return 0;
}
@@ -981,6 +1065,16 @@ int xe_gt_sriov_pf_config_bulk_set_ctxs(struct xe_gt *gt, unsigned int vfid,
"GuC context IDs", no_unit, n, err);
}
+static u32 pf_profile_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
+{
+ bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
+
+ if (admin_only_pf && num_vfs == 1)
+ return ALIGN_DOWN(GUC_ID_MAX, SZ_1K);
+
+ return rounddown_pow_of_two(GUC_ID_MAX / num_vfs);
+}
+
static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
{
struct xe_guc_id_mgr *idm = &gt->uc.guc.submission_state.idm;
@@ -1013,6 +1107,7 @@ static u32 pf_estimate_fair_ctxs(struct xe_gt *gt, unsigned int num_vfs)
int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
unsigned int num_vfs)
{
+ u32 profile = pf_profile_fair_ctxs(gt, num_vfs);
u32 fair;
xe_gt_assert(gt, vfid);
@@ -1025,6 +1120,11 @@ int xe_gt_sriov_pf_config_set_fair_ctxs(struct xe_gt *gt, unsigned int vfid,
if (!fair)
return -ENOSPC;
+ fair = min(fair, profile);
+ if (fair < profile)
+ xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n",
+ "GuC context IDs", fair, profile);
+
return xe_gt_sriov_pf_config_bulk_set_ctxs(gt, vfid, num_vfs, fair);
}
@@ -1229,6 +1329,17 @@ int xe_gt_sriov_pf_config_bulk_set_dbs(struct xe_gt *gt, unsigned int vfid,
"GuC doorbell IDs", no_unit, n, err);
}
+static u32 pf_profile_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
+{
+ bool admin_only_pf = xe_sriov_pf_admin_only(gt_to_xe(gt));
+
+ /* XXX: preliminary */
+ if (admin_only_pf && num_vfs == 1)
+ return GUC_NUM_DOORBELLS - SZ_16;
+
+ return rounddown_pow_of_two(GUC_NUM_DOORBELLS / (num_vfs + 1));
+}
+
static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
{
struct xe_guc_db_mgr *dbm = &gt->uc.guc.dbm;
@@ -1261,6 +1372,7 @@ static u32 pf_estimate_fair_dbs(struct xe_gt *gt, unsigned int num_vfs)
int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
unsigned int num_vfs)
{
+ u32 profile = pf_profile_fair_dbs(gt, num_vfs);
u32 fair;
xe_gt_assert(gt, vfid);
@@ -1273,6 +1385,11 @@ int xe_gt_sriov_pf_config_set_fair_dbs(struct xe_gt *gt, unsigned int vfid,
if (!fair)
return -ENOSPC;
+ fair = min(fair, profile);
+ if (fair < profile)
+ xe_gt_sriov_info(gt, "Using non-profile provisioning (%s %u vs %u)\n",
+ "GuC doorbell IDs", fair, profile);
+
return xe_gt_sriov_pf_config_bulk_set_dbs(gt, vfid, num_vfs, fair);
}
@@ -1433,7 +1550,8 @@ fail:
return err;
}
-static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+/* Return: %true if there was an LMEM provisioned, %false otherwise */
+static bool pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config)
{
xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt)));
xe_gt_assert(gt, xe_gt_is_main_type(gt));
@@ -1442,7 +1560,9 @@ static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_confi
if (config->lmem_obj) {
xe_bo_unpin_map_no_vm(config->lmem_obj);
config->lmem_obj = NULL;
+ return true;
}
+ return false;
}
static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
@@ -1474,23 +1594,17 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size)
return 0;
xe_gt_assert(gt, pf_get_lmem_alignment(gt) == SZ_2M);
- bo = xe_bo_create_locked(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_NEEDS_2M |
- XE_BO_FLAG_PINNED |
- XE_BO_FLAG_PINNED_LATE_RESTORE);
+ bo = xe_bo_create_pin_range_novm(xe, tile,
+ ALIGN(size, PAGE_SIZE), 0, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_NEEDS_2M |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE |
+ XE_BO_FLAG_FORCE_USER_VRAM);
if (IS_ERR(bo))
return PTR_ERR(bo);
- err = xe_bo_pin(bo);
- xe_bo_unlock(bo);
- if (unlikely(err)) {
- xe_bo_put(bo);
- return err;
- }
-
config->lmem_obj = bo;
if (xe_device_has_lmtt(xe)) {
@@ -1550,7 +1664,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
- xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
+ if (!xe_device_has_lmtt(gt_to_xe(gt)))
+ return -EPERM;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
@@ -1600,11 +1715,37 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid,
"LMEM", n, err);
}
+static struct xe_bo *pf_get_vf_config_lmem_obj(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+
+ return config->lmem_obj;
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_lmem_obj() - Take a reference to the struct &xe_bo backing VF LMEM.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function can only be called on PF.
+ * The caller is responsible for calling xe_bo_put() on the returned object.
+ *
+ * Return: pointer to struct &xe_bo backing VF LMEM (if any).
+ */
+struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, vfid);
+
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ return xe_bo_get(pf_get_vf_config_lmem_obj(gt, vfid));
+}
+
static u64 pf_query_free_lmem(struct xe_gt *gt)
{
struct xe_tile *tile = gt->tile;
- return xe_ttm_vram_get_avail(&tile->mem.vram.ttm.manager);
+ return xe_ttm_vram_get_avail(&tile->mem.vram->ttm.manager);
}
static u64 pf_query_max_lmem(struct xe_gt *gt)
@@ -1632,7 +1773,6 @@ static u64 pf_estimate_fair_lmem(struct xe_gt *gt, unsigned int num_vfs)
u64 fair;
fair = div_u64(available, num_vfs);
- fair = rounddown_pow_of_two(fair); /* XXX: ttm_vram_mgr & drm_buddy limitation */
fair = ALIGN_DOWN(fair, alignment);
#ifdef MAX_FAIR_LMEM
fair = min_t(u64, MAX_FAIR_LMEM, fair);
@@ -1726,7 +1866,7 @@ static int pf_provision_exec_quantum(struct xe_gt *gt, unsigned int vfid,
return 0;
}
-static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
+static u32 pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
@@ -1734,47 +1874,107 @@ static int pf_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
}
/**
- * xe_gt_sriov_pf_config_set_exec_quantum - Configure execution quantum for the VF.
+ * xe_gt_sriov_pf_config_set_exec_quantum_locked() - Configure PF/VF execution quantum.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the PF or VF identifier
* @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
*
- * This function can only be called on PF.
+ * This function can only be called on PF with the master mutex hold.
+ * It will log the provisioned value or an error in case of the failure.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
- u32 exec_quantum)
+int xe_gt_sriov_pf_config_set_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum)
{
int err;
- mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
err = pf_provision_exec_quantum(gt, vfid, exec_quantum);
- mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return pf_config_set_u32_done(gt, vfid, exec_quantum,
- xe_gt_sriov_pf_config_get_exec_quantum(gt, vfid),
+ pf_get_exec_quantum(gt, vfid),
"execution quantum", exec_quantum_unit, err);
}
/**
- * xe_gt_sriov_pf_config_get_exec_quantum - Get VF's execution quantum.
+ * xe_gt_sriov_pf_config_set_exec_quantum() - Configure PF/VF execution quantum.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the PF or VF identifier
+ * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
+ *
+ * This function can only be called on PF.
+ * It will log the provisioned value or an error in case of the failure.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum)
+{
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ return xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, exec_quantum);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_exec_quantum_locked() - Get PF/VF execution quantum.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
+ *
+ * This function can only be called on PF with the master mutex hold.
+ *
+ * Return: execution quantum in milliseconds (or 0 if infinity).
+ */
+u32 xe_gt_sriov_pf_config_get_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_get_exec_quantum(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_exec_quantum() - Get PF/VF execution quantum.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
*
* This function can only be called on PF.
*
- * Return: VF's (or PF's) execution quantum in milliseconds.
+ * Return: execution quantum in milliseconds (or 0 if infinity).
*/
u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid)
{
- u32 exec_quantum;
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
- mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- exec_quantum = pf_get_exec_quantum(gt, vfid);
- mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return pf_get_exec_quantum(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked() - Configure EQ for PF and VFs.
+ * @gt: the &xe_gt to configure
+ * @exec_quantum: requested execution quantum in milliseconds (0 is infinity)
+ *
+ * This function can only be called on PF with the master mutex hold.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(struct xe_gt *gt, u32 exec_quantum)
+{
+ unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int n;
+ int err = 0;
+
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 0; n <= totalvfs; n++) {
+ err = pf_provision_exec_quantum(gt, VFID(n), exec_quantum);
+ if (err)
+ break;
+ }
- return exec_quantum;
+ return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, exec_quantum,
+ pf_get_exec_quantum, "execution quantum",
+ exec_quantum_unit, n, err);
}
static const char *preempt_timeout_unit(u32 preempt_timeout)
@@ -1797,7 +1997,7 @@ static int pf_provision_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
return 0;
}
-static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
+static u32 pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
@@ -1805,47 +2005,106 @@ static int pf_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
}
/**
- * xe_gt_sriov_pf_config_set_preempt_timeout - Configure preemption timeout for the VF.
+ * xe_gt_sriov_pf_config_set_preempt_timeout_locked() - Configure PF/VF preemption timeout.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the PF or VF identifier
* @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
*
- * This function can only be called on PF.
+ * This function can only be called on PF with the master mutex hold.
+ * It will log the provisioned value or an error in case of the failure.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
- u32 preempt_timeout)
+int xe_gt_sriov_pf_config_set_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout)
{
int err;
- mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
err = pf_provision_preempt_timeout(gt, vfid, preempt_timeout);
- mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
return pf_config_set_u32_done(gt, vfid, preempt_timeout,
- xe_gt_sriov_pf_config_get_preempt_timeout(gt, vfid),
+ pf_get_preempt_timeout(gt, vfid),
"preemption timeout", preempt_timeout_unit, err);
}
/**
- * xe_gt_sriov_pf_config_get_preempt_timeout - Get VF's preemption timeout.
+ * xe_gt_sriov_pf_config_set_preempt_timeout() - Configure PF/VF preemption timeout.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the PF or VF identifier
+ * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
*
* This function can only be called on PF.
*
- * Return: VF's (or PF's) preemption timeout in microseconds.
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout)
+{
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
+
+ return xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, preempt_timeout);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_preempt_timeout_locked() - Get PF/VF preemption timeout.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
+ *
+ * This function can only be called on PF with the master mutex hold.
+ *
+ * Return: preemption timeout in microseconds (or 0 if infinity).
+ */
+u32 xe_gt_sriov_pf_config_get_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ return pf_get_preempt_timeout(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_config_get_preempt_timeout() - Get PF/VF preemption timeout.
+ * @gt: the &xe_gt
+ * @vfid: the PF or VF identifier
+ *
+ * This function can only be called on PF.
+ *
+ * Return: preemption timeout in microseconds (or 0 if infinity).
*/
u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid)
{
- u32 preempt_timeout;
+ guard(mutex)(xe_gt_sriov_pf_master_mutex(gt));
- mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
- preempt_timeout = pf_get_preempt_timeout(gt, vfid);
- mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+ return pf_get_preempt_timeout(gt, vfid);
+}
- return preempt_timeout;
+/**
+ * xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked() - Configure PT for PF and VFs.
+ * @gt: the &xe_gt to configure
+ * @preempt_timeout: requested preemption timeout in microseconds (0 is infinity)
+ *
+ * This function can only be called on PF with the master mutex hold.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(struct xe_gt *gt, u32 preempt_timeout)
+{
+ unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int n;
+ int err = 0;
+
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+ for (n = 0; n <= totalvfs; n++) {
+ err = pf_provision_preempt_timeout(gt, VFID(n), preempt_timeout);
+ if (err)
+ break;
+ }
+
+ return pf_config_bulk_set_u32_done(gt, 0, 1 + totalvfs, preempt_timeout,
+ pf_get_preempt_timeout, "preemption timeout",
+ preempt_timeout_unit, n, err);
}
static const char *sched_priority_unit(u32 priority)
@@ -2020,12 +2279,13 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
struct xe_device *xe = gt_to_xe(gt);
+ bool released;
if (xe_gt_is_main_type(gt)) {
pf_release_vf_config_ggtt(gt, config);
if (IS_DGFX(xe)) {
- pf_release_vf_config_lmem(gt, config);
- if (xe_device_has_lmtt(xe))
+ released = pf_release_vf_config_lmem(gt, config);
+ if (released && xe_device_has_lmtt(xe))
pf_update_vf_lmtt(xe, vfid);
}
}
@@ -2672,3 +2932,7 @@ int xe_gt_sriov_pf_config_print_available_ggtt(struct xe_gt *gt, struct drm_prin
return 0;
}
+
+#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_gt_sriov_pf_config_kunit.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index 513e6512a575..4975730423d7 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -36,14 +36,25 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs,
u64 size);
+struct xe_bo *xe_gt_sriov_pf_config_get_lmem_obj(struct xe_gt *gt, unsigned int vfid);
u32 xe_gt_sriov_pf_config_get_exec_quantum(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_config_set_exec_quantum(struct xe_gt *gt, unsigned int vfid, u32 exec_quantum);
+u32 xe_gt_sriov_pf_config_get_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_exec_quantum_locked(struct xe_gt *gt, unsigned int vfid,
+ u32 exec_quantum);
+int xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(struct xe_gt *gt, u32 exec_quantum);
+
u32 xe_gt_sriov_pf_config_get_preempt_timeout(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_config_set_preempt_timeout(struct xe_gt *gt, unsigned int vfid,
u32 preempt_timeout);
+u32 xe_gt_sriov_pf_config_get_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_config_set_preempt_timeout_locked(struct xe_gt *gt, unsigned int vfid,
+ u32 preempt_timeout);
+int xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(struct xe_gt *gt, u32 preempt_timeout);
+
u32 xe_gt_sriov_pf_config_get_sched_priority(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_config_set_sched_priority(struct xe_gt *gt, unsigned int vfid, u32 priority);
@@ -61,6 +72,11 @@ ssize_t xe_gt_sriov_pf_config_save(struct xe_gt *gt, unsigned int vfid, void *bu
int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid,
const void *buf, size_t size);
+ssize_t xe_gt_sriov_pf_config_ggtt_save(struct xe_gt *gt, unsigned int vfid,
+ void *buf, size_t size);
+int xe_gt_sriov_pf_config_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size);
+
bool xe_gt_sriov_pf_config_is_empty(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_config_init(struct xe_gt *gt);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index 4f7fff892bc0..bf48b05797de 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -18,6 +18,10 @@
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_sriov.h"
+#include "xe_sriov_packet.h"
+#include "xe_sriov_packet_types.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_migration.h"
#include "xe_sriov_pf_service.h"
#include "xe_tile.h"
@@ -170,6 +174,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
CASE2STR(FLR_SEND_START);
CASE2STR(FLR_WAIT_GUC);
CASE2STR(FLR_GUC_DONE);
+ CASE2STR(FLR_SYNC);
CASE2STR(FLR_RESET_CONFIG);
CASE2STR(FLR_RESET_DATA);
CASE2STR(FLR_RESET_MMIO);
@@ -179,9 +184,20 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
CASE2STR(PAUSE_SEND_PAUSE);
CASE2STR(PAUSE_WAIT_GUC);
CASE2STR(PAUSE_GUC_DONE);
- CASE2STR(PAUSE_SAVE_GUC);
CASE2STR(PAUSE_FAILED);
CASE2STR(PAUSED);
+ CASE2STR(SAVE_WIP);
+ CASE2STR(SAVE_PROCESS_DATA);
+ CASE2STR(SAVE_WAIT_DATA);
+ CASE2STR(SAVE_DATA_DONE);
+ CASE2STR(SAVE_FAILED);
+ CASE2STR(SAVED);
+ CASE2STR(RESTORE_WIP);
+ CASE2STR(RESTORE_PROCESS_DATA);
+ CASE2STR(RESTORE_WAIT_DATA);
+ CASE2STR(RESTORE_DATA_DONE);
+ CASE2STR(RESTORE_FAILED);
+ CASE2STR(RESTORED);
CASE2STR(RESUME_WIP);
CASE2STR(RESUME_SEND_RESUME);
CASE2STR(RESUME_FAILED);
@@ -206,6 +222,8 @@ static unsigned long pf_get_default_timeout(enum xe_gt_sriov_control_bits bit)
case XE_GT_SRIOV_STATE_FLR_WIP:
case XE_GT_SRIOV_STATE_FLR_RESET_CONFIG:
return 5 * HZ;
+ case XE_GT_SRIOV_STATE_RESTORE_WIP:
+ return 20 * HZ;
default:
return HZ;
}
@@ -223,7 +241,7 @@ static unsigned long *pf_peek_vf_state(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
- return &cs->state;
+ return cs->state;
}
static bool pf_check_vf_state(struct xe_gt *gt, unsigned int vfid,
@@ -271,12 +289,19 @@ static bool pf_expect_vf_not_state(struct xe_gt *gt, unsigned int vfid,
return result;
}
+static void pf_track_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit,
+ const char *what)
+{
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) %s\n",
+ vfid, control_bit_to_string(bit), bit, what);
+}
+
static bool pf_enter_vf_state(struct xe_gt *gt, unsigned int vfid,
enum xe_gt_sriov_control_bits bit)
{
if (!test_and_set_bit(bit, pf_peek_vf_state(gt, vfid))) {
- xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) enter\n",
- vfid, control_bit_to_string(bit), bit);
+ pf_track_vf_state(gt, vfid, bit, "enter");
return true;
}
return false;
@@ -286,8 +311,7 @@ static bool pf_exit_vf_state(struct xe_gt *gt, unsigned int vfid,
enum xe_gt_sriov_control_bits bit)
{
if (test_and_clear_bit(bit, pf_peek_vf_state(gt, vfid))) {
- xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) exit\n",
- vfid, control_bit_to_string(bit), bit);
+ pf_track_vf_state(gt, vfid, bit, "exit");
return true;
}
return false;
@@ -321,6 +345,8 @@ static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED);
}
#define pf_enter_vf_state_machine_bug(gt, vfid) ({ \
@@ -351,6 +377,8 @@ static void pf_queue_vf(struct xe_gt *gt, unsigned int vfid)
static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid);
static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid);
static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid);
static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid);
@@ -372,6 +400,8 @@ static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid)
pf_exit_vf_flr_wip(gt, vfid);
pf_exit_vf_stop_wip(gt, vfid);
+ pf_exit_vf_save_wip(gt, vfid);
+ pf_exit_vf_restore_wip(gt, vfid);
pf_exit_vf_pause_wip(gt, vfid);
pf_exit_vf_resume_wip(gt, vfid);
@@ -391,6 +421,8 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED);
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED);
pf_exit_vf_mismatch(gt, vfid);
pf_exit_vf_wip(gt, vfid);
}
@@ -421,8 +453,7 @@ static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
* : PAUSE_GUC_DONE o-----restart
* : | :
* : | o---<--busy :
- * : v / / :
- * : PAUSE_SAVE_GUC :
+ * : / :
* : / :
* : / :
* :....o..............o...............o...........:
@@ -442,7 +473,6 @@ static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE);
- pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC);
}
}
@@ -473,41 +503,12 @@ static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid)
pf_enter_vf_pause_failed(gt, vfid);
}
-static void pf_enter_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid)
-{
- if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC))
- pf_enter_vf_state_machine_bug(gt, vfid);
-}
-
-static bool pf_exit_vf_pause_save_guc(struct xe_gt *gt, unsigned int vfid)
-{
- int err;
-
- if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC))
- return false;
-
- err = xe_gt_sriov_pf_migration_save_guc_state(gt, vfid);
- if (err) {
- /* retry if busy */
- if (err == -EBUSY) {
- pf_enter_vf_pause_save_guc(gt, vfid);
- return true;
- }
- /* give up on error */
- if (err == -EIO)
- pf_enter_vf_mismatch(gt, vfid);
- }
-
- pf_enter_vf_pause_completed(gt, vfid);
- return true;
-}
-
static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
{
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
return false;
- pf_enter_vf_pause_save_guc(gt, vfid);
+ pf_enter_vf_pause_completed(gt, vfid);
return true;
}
@@ -616,7 +617,7 @@ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
}
if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
- xe_gt_sriov_info(gt, "VF%u paused!\n", vfid);
+ xe_gt_sriov_dbg(gt, "VF%u paused!\n", vfid);
return 0;
}
@@ -667,6 +668,8 @@ static void pf_enter_vf_resumed(struct xe_gt *gt, unsigned int vfid)
{
pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED);
pf_exit_vf_mismatch(gt, vfid);
pf_exit_vf_wip(gt, vfid);
}
@@ -745,6 +748,16 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
return -EPERM;
}
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
+ xe_gt_sriov_dbg(gt, "VF%u save is in progress!\n", vfid);
+ return -EBUSY;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
+ xe_gt_sriov_dbg(gt, "VF%u restore is in progress!\n", vfid);
+ return -EBUSY;
+ }
+
if (!pf_enter_vf_resume_wip(gt, vfid)) {
xe_gt_sriov_dbg(gt, "VF%u resume already in progress!\n", vfid);
return -EALREADY;
@@ -755,7 +768,7 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
return err;
if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED)) {
- xe_gt_sriov_info(gt, "VF%u resumed!\n", vfid);
+ xe_gt_sriov_dbg(gt, "VF%u resumed!\n", vfid);
return 0;
}
@@ -769,6 +782,562 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
}
/**
+ * DOC: The VF SAVE state machine
+ *
+ * SAVE extends the PAUSED state.
+ *
+ * The VF SAVE state machine looks like::
+ *
+ * ....PAUSED....................................................
+ * : :
+ * : (O)<---------o :
+ * : | \ :
+ * : save (SAVED) (SAVE_FAILED) :
+ * : | ^ ^ :
+ * : | | | :
+ * : ....V...............o...........o......SAVE_WIP......... :
+ * : : | | | : :
+ * : : | empty | : :
+ * : : | | | : :
+ * : : | | | : :
+ * : : | DATA_DONE | : :
+ * : : | ^ | : :
+ * : : | | error : :
+ * : : | no_data / : :
+ * : : | / / : :
+ * : : | / / : :
+ * : : | / / : :
+ * : : o---------->PROCESS_DATA<----consume : :
+ * : : \ \ : :
+ * : : \ \ : :
+ * : : \ \ : :
+ * : : ring_full----->WAIT_DATA : :
+ * : : : :
+ * : :......................................................: :
+ * :............................................................:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
+ xe_gt_sriov_pf_migration_ring_free(gt, vfid);
+
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
+ }
+}
+
+static void pf_enter_vf_saved(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ xe_gt_sriov_dbg(gt, "VF%u saved!\n", vfid);
+
+ pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_save_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid));
+
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static int pf_handle_vf_save_data(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid,
+ XE_SRIOV_PACKET_TYPE_GUC)) {
+ ret = xe_gt_sriov_pf_migration_guc_save(gt, vfid);
+ if (ret)
+ return ret;
+
+ xe_gt_sriov_pf_migration_save_data_complete(gt, vfid,
+ XE_SRIOV_PACKET_TYPE_GUC);
+
+ return -EAGAIN;
+ }
+
+ if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid,
+ XE_SRIOV_PACKET_TYPE_GGTT)) {
+ ret = xe_gt_sriov_pf_migration_ggtt_save(gt, vfid);
+ if (ret)
+ return ret;
+
+ xe_gt_sriov_pf_migration_save_data_complete(gt, vfid,
+ XE_SRIOV_PACKET_TYPE_GGTT);
+
+ return -EAGAIN;
+ }
+
+ if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid,
+ XE_SRIOV_PACKET_TYPE_MMIO)) {
+ ret = xe_gt_sriov_pf_migration_mmio_save(gt, vfid);
+ if (ret)
+ return ret;
+
+ xe_gt_sriov_pf_migration_save_data_complete(gt, vfid,
+ XE_SRIOV_PACKET_TYPE_MMIO);
+
+ return -EAGAIN;
+ }
+
+ if (xe_gt_sriov_pf_migration_save_data_pending(gt, vfid,
+ XE_SRIOV_PACKET_TYPE_VRAM)) {
+ ret = xe_gt_sriov_pf_migration_vram_save(gt, vfid);
+ if (ret == -EAGAIN)
+ return -EAGAIN;
+ else if (ret)
+ return ret;
+
+ xe_gt_sriov_pf_migration_save_data_complete(gt, vfid,
+ XE_SRIOV_PACKET_TYPE_VRAM);
+
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static bool pf_handle_vf_save(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA))
+ return false;
+
+ if (xe_gt_sriov_pf_migration_ring_full(gt, vfid)) {
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA);
+ return true;
+ }
+
+ ret = pf_handle_vf_save_data(gt, vfid);
+ if (ret == -EAGAIN)
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
+ else if (ret)
+ pf_enter_vf_save_failed(gt, vfid);
+ else
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
+
+ return true;
+}
+
+static void pf_exit_vf_save_wait_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA))
+ return;
+
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_enter_vf_save_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
+ xe_gt_sriov_pf_migration_save_init(gt, vfid);
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA);
+ pf_queue_vf(gt, vfid);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * xe_gt_sriov_pf_control_check_save_data_done() - Check if all save migration data was produced.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: true if all migration data was produced, false otherwise.
+ */
+bool xe_gt_sriov_pf_control_check_save_data_done(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
+}
+
+/**
+ * xe_gt_sriov_pf_control_check_save_failed() - Check if save processing has failed.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: true if save processing failed, false otherwise.
+ */
+bool xe_gt_sriov_pf_control_check_save_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED);
+}
+
+/**
+ * xe_gt_sriov_pf_control_process_save_data() - Queue VF save migration data processing.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_process_save_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_FAILED))
+ return -EIO;
+
+ pf_exit_vf_save_wait_data(gt, vfid);
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_trigger_save_vf() - Start an SR-IOV VF migration data save sequence.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid);
+ return -EPERM;
+ }
+
+ if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid);
+ return -EPERM;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
+ xe_gt_sriov_dbg(gt, "VF%u restore is in progress!\n", vfid);
+ return -EBUSY;
+ }
+
+ if (!pf_enter_vf_save_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u save already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_finish_save_vf() - Complete a VF migration data save sequence.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE)) {
+ xe_gt_sriov_err(gt, "VF%u save is still in progress!\n", vfid);
+ return -EIO;
+ }
+
+ pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_DATA_DONE);
+ pf_enter_vf_saved(gt, vfid);
+
+ return 0;
+}
+
+/**
+ * DOC: The VF RESTORE state machine
+ *
+ * RESTORE extends the PAUSED state.
+ *
+ * The VF RESTORE state machine looks like::
+ *
+ * ....PAUSED....................................................
+ * : :
+ * : (O)<---------o :
+ * : | \ :
+ * : restore (RESTORED) (RESTORE_FAILED) :
+ * : | ^ ^ :
+ * : | | | :
+ * : ....V...............o...........o......RESTORE_WIP...... :
+ * : : | | | : :
+ * : : | empty | : :
+ * : : | | | : :
+ * : : | | | : :
+ * : : | DATA_DONE | : :
+ * : : | ^ | : :
+ * : : | | error : :
+ * : : | trailer / : :
+ * : : | / / : :
+ * : : | / / : :
+ * : : | / / : :
+ * : : o---------->PROCESS_DATA<----produce : :
+ * : : \ \ : :
+ * : : \ \ : :
+ * : : \ \ : :
+ * : : ring_empty---->WAIT_DATA : :
+ * : : : :
+ * : :......................................................: :
+ * :............................................................:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_restore_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
+ xe_gt_sriov_pf_migration_ring_free(gt, vfid);
+
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE);
+ }
+}
+
+static void pf_enter_vf_restored(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ xe_gt_sriov_dbg(gt, "VF%u restored!\n", vfid);
+
+ pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_restore_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid));
+
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static int pf_handle_vf_restore_data(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_sriov_packet *data = xe_gt_sriov_pf_migration_restore_consume(gt, vfid);
+ int ret = 0;
+
+ switch (data->hdr.type) {
+ case XE_SRIOV_PACKET_TYPE_GGTT:
+ ret = xe_gt_sriov_pf_migration_ggtt_restore(gt, vfid, data);
+ break;
+ case XE_SRIOV_PACKET_TYPE_MMIO:
+ ret = xe_gt_sriov_pf_migration_mmio_restore(gt, vfid, data);
+ break;
+ case XE_SRIOV_PACKET_TYPE_GUC:
+ ret = xe_gt_sriov_pf_migration_guc_restore(gt, vfid, data);
+ break;
+ case XE_SRIOV_PACKET_TYPE_VRAM:
+ ret = xe_gt_sriov_pf_migration_vram_restore(gt, vfid, data);
+ break;
+ default:
+ xe_gt_sriov_notice(gt, "Skipping VF%u unknown data type: %d\n",
+ vfid, data->hdr.type);
+ break;
+ }
+
+ xe_sriov_packet_free(data);
+
+ return ret;
+}
+
+static bool pf_handle_vf_restore(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA))
+ return false;
+
+ if (xe_gt_sriov_pf_migration_ring_empty(gt, vfid)) {
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE))
+ pf_enter_vf_restored(gt, vfid);
+ else
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA);
+
+ return true;
+ }
+
+ ret = pf_handle_vf_restore_data(gt, vfid);
+ if (ret)
+ pf_enter_vf_restore_failed(gt, vfid);
+ else
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
+
+ return true;
+}
+
+static void pf_exit_vf_restore_wait_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA))
+ return;
+
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_enter_vf_restore_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA);
+ pf_queue_vf(gt, vfid);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * xe_gt_sriov_pf_control_check_restore_failed() - Check if restore processing has failed.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: true if restore processing failed, false otherwise.
+ */
+bool xe_gt_sriov_pf_control_check_restore_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED);
+}
+
+/**
+ * xe_gt_sriov_pf_control_restore_data_done() - Indicate the end of VF migration data stream.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_DATA_DONE)) {
+ pf_enter_vf_state_machine_bug(gt, vfid);
+ return -EIO;
+ }
+
+ return xe_gt_sriov_pf_control_process_restore_data(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_control_process_restore_data() - Queue VF restore migration data processing.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED)) {
+ xe_gt_sriov_pf_migration_ring_free(gt, vfid);
+ return -EIO;
+ }
+
+ pf_exit_vf_restore_wait_data(gt, vfid);
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_trigger restore_vf() - Start an SR-IOV VF migration data restore sequence.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid);
+ return -EPERM;
+ }
+
+ if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid);
+ return -EPERM;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WIP)) {
+ xe_gt_sriov_dbg(gt, "VF%u save is in progress!\n", vfid);
+ return -EBUSY;
+ }
+
+ if (!pf_enter_vf_restore_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u restore already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ return 0;
+}
+
+static int pf_wait_vf_restore_done(struct xe_gt *gt, unsigned int vfid)
+{
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_RESTORE_WIP);
+ int err;
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err) {
+ xe_gt_sriov_notice(gt, "VF%u RESTORE didn't finish in %u ms (%pe)\n",
+ vfid, jiffies_to_msecs(timeout), ERR_PTR(err));
+ return err;
+ }
+
+ if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_FAILED))
+ return -EIO;
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_finish_restore_vf() - Complete a VF migration data restore sequence.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ ret = pf_wait_vf_restore_done(gt, vfid);
+ if (ret)
+ return ret;
+
+ if (!pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED)) {
+ pf_enter_vf_mismatch(gt, vfid);
+ return -EIO;
+ }
+
+ pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+
+ return 0;
+}
+
+/**
* DOC: The VF STOP state machine
*
* The VF STOP state machine looks like::
@@ -809,6 +1378,8 @@ static void pf_enter_vf_stopped(struct xe_gt *gt, unsigned int vfid)
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORED);
pf_exit_vf_mismatch(gt, vfid);
pf_exit_vf_wip(gt, vfid);
}
@@ -896,7 +1467,7 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
return err;
if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
- xe_gt_sriov_info(gt, "VF%u stopped!\n", vfid);
+ xe_gt_sriov_dbg(gt, "VF%u stopped!\n", vfid);
return 0;
}
@@ -934,6 +1505,10 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
* : v : | |
* : FLR_GUC_DONE : | |
* : | : | |
+ * : | o--<--sync : | |
+ * : |/ / : | |
+ * : FLR_SYNC--o : | |
+ * : | : | |
* : FLR_RESET_CONFIG---failed--->-----------o--------+-----------o
* : | : | |
* : FLR_RESET_DATA : | |
@@ -985,6 +1560,8 @@ static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid)
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC);
pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START);
+
+ xe_sriov_pf_control_sync_flr(gt_to_xe(gt), vfid);
}
}
@@ -1141,12 +1718,38 @@ static bool pf_exit_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid)
return true;
}
+static bool pf_exit_vf_flr_sync(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC))
+ return false;
+
+ pf_enter_vf_flr_reset_config(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_sync(struct xe_gt *gt, unsigned int vfid)
+{
+ int ret;
+
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ ret = xe_sriov_pf_control_sync_flr(gt_to_xe(gt), vfid);
+ if (ret < 0) {
+ xe_gt_sriov_dbg_verbose(gt, "FLR checkpoint %pe\n", ERR_PTR(ret));
+ pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC);
+ } else {
+ xe_gt_sriov_dbg_verbose(gt, "FLR checkpoint pass\n");
+ pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC);
+ }
+}
+
static bool pf_exit_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
{
if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE))
return false;
- pf_enter_vf_flr_reset_config(gt, vfid);
+ pf_enter_vf_flr_sync(gt, vfid);
return true;
}
@@ -1167,10 +1770,52 @@ static void pf_enter_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid)
{
+ pf_enter_vf_flr_wip(gt, vfid);
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_sync_flr() - Synchronize on the VF FLR checkpoint.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @sync: if true it will allow to exit the checkpoint
+ *
+ * Return: non-zero if FLR checkpoint has been reached, zero if the is no FLR
+ * in progress, or a negative error code on the FLR busy or failed.
+ */
+int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync)
+{
+ if (sync && pf_exit_vf_flr_sync(gt, vfid))
+ return 1;
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SYNC))
+ return 1;
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP))
+ return -EBUSY;
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ return -EIO;
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_control_wait_flr() - Wait for a VF FLR to complete.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_wait_flr(struct xe_gt *gt, unsigned int vfid)
+{
unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_WIP);
int err;
- pf_enter_vf_flr_wip(gt, vfid);
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ return -EIO;
+
+ if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP))
+ return 0;
err = pf_wait_vf_wip_done(gt, vfid, timeout);
if (err) {
@@ -1378,7 +2023,22 @@ static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid)
if (pf_exit_vf_pause_guc_done(gt, vfid))
return true;
- if (pf_exit_vf_pause_save_guc(gt, vfid))
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_SAVE_WAIT_DATA)) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
+ control_bit_to_string(XE_GT_SRIOV_STATE_SAVE_WAIT_DATA));
+ return false;
+ }
+
+ if (pf_handle_vf_save(gt, vfid))
+ return true;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA)) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
+ control_bit_to_string(XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA));
+ return false;
+ }
+
+ if (pf_handle_vf_restore(gt, vfid))
return true;
if (pf_exit_vf_resume_send_resume(gt, vfid))
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
index c85e64f099cc..c36c8767f3ad 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
@@ -16,8 +16,20 @@ void xe_gt_sriov_pf_control_restart(struct xe_gt *gt);
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
+bool xe_gt_sriov_pf_control_check_save_data_done(struct xe_gt *gt, unsigned int vfid);
+bool xe_gt_sriov_pf_control_check_save_failed(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_process_save_data(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_trigger_save_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_finish_save_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_restore_data_done(struct xe_gt *gt, unsigned int vfid);
+bool xe_gt_sriov_pf_control_check_restore_failed(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync);
+int xe_gt_sriov_pf_control_wait_flr(struct xe_gt *gt, unsigned int vfid);
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32 len);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
index f02f941b4ad2..6027ba05a7f2 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
@@ -18,6 +18,7 @@
* @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command.
* @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
* @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_FLR_SYNC: indicates that the PF awaits to synchronize with other GuCs.
* @XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: indicates that the PF needs to clear VF's resources.
* @XE_GT_SRIOV_STATE_FLR_RESET_DATA: indicates that the PF needs to clear VF's data.
* @XE_GT_SRIOV_STATE_FLR_RESET_MMIO: indicates that the PF needs to reset VF's registers.
@@ -27,9 +28,20 @@
* @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command.
* @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
* @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC.
- * @XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC: indicates that the PF needs to save the VF GuC state.
* @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed.
* @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused.
+ * @XE_GT_SRIOV_STATE_SAVE_WIP: indicates that VF save operation is in progress.
+ * @XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA: indicates that VF migration data is being produced.
+ * @XE_GT_SRIOV_STATE_SAVE_WAIT_DATA: indicates that PF awaits for space in migration data ring.
+ * @XE_GT_SRIOV_STATE_SAVE_DATA_DONE: indicates that all migration data was produced by Xe.
+ * @XE_GT_SRIOV_STATE_SAVE_FAILED: indicates that VF save operation has failed.
+ * @XE_GT_SRIOV_STATE_SAVED: indicates that VF data is saved.
+ * @XE_GT_SRIOV_STATE_RESTORE_WIP: indicates that VF restore operation is in progress.
+ * @XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA: indicates that VF migration data is being consumed.
+ * @XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA: indicates that PF awaits for data in migration data ring.
+ * @XE_GT_SRIOV_STATE_RESTORE_DATA_DONE: indicates that all migration data was produced by the user.
+ * @XE_GT_SRIOV_STATE_RESTORE_FAILED: indicates that VF restore operation has failed.
+ * @XE_GT_SRIOV_STATE_RESTORED: indicates that VF data is restored.
* @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress.
* @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command.
* @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed.
@@ -47,6 +59,7 @@ enum xe_gt_sriov_control_bits {
XE_GT_SRIOV_STATE_FLR_SEND_START,
XE_GT_SRIOV_STATE_FLR_WAIT_GUC,
XE_GT_SRIOV_STATE_FLR_GUC_DONE,
+ XE_GT_SRIOV_STATE_FLR_SYNC,
XE_GT_SRIOV_STATE_FLR_RESET_CONFIG,
XE_GT_SRIOV_STATE_FLR_RESET_DATA,
XE_GT_SRIOV_STATE_FLR_RESET_MMIO,
@@ -57,10 +70,23 @@ enum xe_gt_sriov_control_bits {
XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE,
XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC,
XE_GT_SRIOV_STATE_PAUSE_GUC_DONE,
- XE_GT_SRIOV_STATE_PAUSE_SAVE_GUC,
XE_GT_SRIOV_STATE_PAUSE_FAILED,
XE_GT_SRIOV_STATE_PAUSED,
+ XE_GT_SRIOV_STATE_SAVE_WIP,
+ XE_GT_SRIOV_STATE_SAVE_PROCESS_DATA,
+ XE_GT_SRIOV_STATE_SAVE_WAIT_DATA,
+ XE_GT_SRIOV_STATE_SAVE_DATA_DONE,
+ XE_GT_SRIOV_STATE_SAVE_FAILED,
+ XE_GT_SRIOV_STATE_SAVED,
+
+ XE_GT_SRIOV_STATE_RESTORE_WIP,
+ XE_GT_SRIOV_STATE_RESTORE_PROCESS_DATA,
+ XE_GT_SRIOV_STATE_RESTORE_WAIT_DATA,
+ XE_GT_SRIOV_STATE_RESTORE_DATA_DONE,
+ XE_GT_SRIOV_STATE_RESTORE_FAILED,
+ XE_GT_SRIOV_STATE_RESTORED,
+
XE_GT_SRIOV_STATE_RESUME_WIP,
XE_GT_SRIOV_STATE_RESUME_SEND_RESUME,
XE_GT_SRIOV_STATE_RESUME_FAILED,
@@ -71,9 +97,11 @@ enum xe_gt_sriov_control_bits {
XE_GT_SRIOV_STATE_STOP_FAILED,
XE_GT_SRIOV_STATE_STOPPED,
- XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1,
+ XE_GT_SRIOV_STATE_MISMATCH, /* always keep as last */
};
+#define XE_GT_SRIOV_NUM_STATES (XE_GT_SRIOV_STATE_MISMATCH + 1)
+
/**
* struct xe_gt_sriov_control_state - GT-level per-VF control state.
*
@@ -81,7 +109,7 @@ enum xe_gt_sriov_control_bits {
*/
struct xe_gt_sriov_control_state {
/** @state: VF state bits */
- unsigned long state;
+ DECLARE_BITMAP(state, XE_GT_SRIOV_NUM_STATES);
/** @done: completion of async operations */
struct completion done;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 3ed245e04d0c..5278ea4fd655 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -23,14 +23,25 @@
#include "xe_gt_sriov_pf_service.h"
#include "xe_pm.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_provision.h"
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0 # d_inode->i_private = gt
- * │   ├── pf # d_inode->i_private = gt
- * │   ├── vf1 # d_inode->i_private = VFID(1)
- * :   :
- * │   ├── vfN # d_inode->i_private = VFID(N)
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── pf # d_inode->i_private = (xe_device*)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
+ * │ │ │ ├── gt1 # d_inode->i_private = (xe_gt*)
+ * │ │ ├── tile1
+ * │ │ │ :
+ * │ ├── vf1 # d_inode->i_private = VFID(1)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
+ * │ │ │ ├── gt1 # d_inode->i_private = (xe_gt*)
+ * │ │ ├── tile1
+ * │ │ │ :
+ * : :
+ * │ ├── vfN # d_inode->i_private = VFID(N)
*/
static void *extract_priv(struct dentry *d)
@@ -40,26 +51,31 @@ static void *extract_priv(struct dentry *d)
static struct xe_gt *extract_gt(struct dentry *d)
{
- return extract_priv(d->d_parent);
+ return extract_priv(d);
+}
+
+static struct xe_device *extract_xe(struct dentry *d)
+{
+ return extract_priv(d->d_parent->d_parent->d_parent);
}
static unsigned int extract_vfid(struct dentry *d)
{
- return extract_priv(d) == extract_gt(d) ? PFID : (uintptr_t)extract_priv(d);
+ void *priv = extract_priv(d->d_parent->d_parent);
+
+ return priv == extract_xe(d) ? PFID : (uintptr_t)priv;
}
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── contexts_provisioned
- * │   │   ├── doorbells_provisioned
- * │   │   ├── runtime_registers
- * │   │   ├── negotiated_versions
- * │   │   ├── adverse_events
- * ├── gt1
- * │   ├── pf
- * │   │   ├── ...
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── contexts_provisioned
+ * ├── doorbells_provisioned
+ * ├── runtime_registers
+ * ├── adverse_events
*/
static const struct drm_info_list pf_info[] = {
@@ -86,48 +102,14 @@ static const struct drm_info_list pf_info[] = {
};
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── ggtt_available
- * │   │   ├── ggtt_provisioned
- */
-
-static const struct drm_info_list pf_ggtt_info[] = {
- {
- "ggtt_available",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_available_ggtt,
- },
- {
- "ggtt_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_ggtt,
- },
-};
-
-/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── lmem_provisioned
- */
-
-static const struct drm_info_list pf_lmem_info[] = {
- {
- "lmem_provisioned",
- .show = xe_gt_debugfs_simple_show,
- .data = xe_gt_sriov_pf_config_print_lmem,
- },
-};
-
-/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── reset_engine
- * │   │   ├── sample_period
- * │   │   ├── sched_if_idle
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── reset_engine
+ * ├── sample_period
+ * ├── sched_if_idle
*/
#define DEFINE_SRIOV_GT_POLICY_DEBUGFS_ATTRIBUTE(POLICY, TYPE, FORMAT) \
@@ -143,6 +125,8 @@ static int POLICY##_set(void *data, u64 val) \
\
xe_pm_runtime_get(xe); \
err = xe_gt_sriov_pf_policy_set_##POLICY(gt, val); \
+ if (!err) \
+ xe_sriov_pf_provision_set_custom_mode(xe); \
xe_pm_runtime_put(xe); \
\
return err; \
@@ -173,24 +157,24 @@ static void pf_add_policy_attrs(struct xe_gt *gt, struct dentry *parent)
}
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── ggtt_spare
- * │   │   ├── lmem_spare
- * │   │   ├── doorbells_spare
- * │   │   ├── contexts_spare
- * │   │   ├── exec_quantum_ms
- * │   │   ├── preempt_timeout_us
- * │   │   ├── sched_priority
- * │   ├── vf1
- * │   │   ├── ggtt_quota
- * │   │   ├── lmem_quota
- * │   │   ├── doorbells_quota
- * │   │   ├── contexts_quota
- * │   │   ├── exec_quantum_ms
- * │   │   ├── preempt_timeout_us
- * │   │   ├── sched_priority
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * │ ├── tile0
+ * │ : ├── gt0
+ * │ : ├── doorbells_spare
+ * │ ├── contexts_spare
+ * │ ├── exec_quantum_ms
+ * │ ├── preempt_timeout_us
+ * │ ├── sched_priority
+ * ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── doorbells_quota
+ * ├── contexts_quota
+ * ├── exec_quantum_ms
+ * ├── preempt_timeout_us
+ * ├── sched_priority
*/
#define DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(CONFIG, TYPE, FORMAT) \
@@ -208,6 +192,8 @@ static int CONFIG##_set(void *data, u64 val) \
xe_pm_runtime_get(xe); \
err = xe_sriov_pf_wait_ready(xe) ?: \
xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ if (!err) \
+ xe_sriov_pf_provision_set_custom_mode(xe); \
xe_pm_runtime_put(xe); \
\
return err; \
@@ -224,8 +210,6 @@ static int CONFIG##_get(void *data, u64 *val) \
\
DEFINE_DEBUGFS_ATTRIBUTE(CONFIG##_fops, CONFIG##_get, CONFIG##_set, FORMAT)
-DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ggtt, u64, "%llu\n");
-DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(lmem, u64, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(ctxs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(dbs, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(exec_quantum, u32, "%llu\n");
@@ -233,22 +217,26 @@ DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(preempt_timeout, u32, "%llu\n");
DEFINE_SRIOV_GT_CONFIG_DEBUGFS_ATTRIBUTE(sched_priority, u32, "%llu\n");
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- * │   │   ├── threshold_cat_error_count
- * │   │   ├── threshold_doorbell_time_us
- * │   │   ├── threshold_engine_reset_count
- * │   │   ├── threshold_guc_time_us
- * │   │   ├── threshold_irq_time_us
- * │   │   ├── threshold_page_fault_count
- * │   ├── vf1
- * │   │   ├── threshold_cat_error_count
- * │   │   ├── threshold_doorbell_time_us
- * │   │   ├── threshold_engine_reset_count
- * │   │   ├── threshold_guc_time_us
- * │   │   ├── threshold_irq_time_us
- * │   │   ├── threshold_page_fault_count
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * │ ├── tile0
+ * │ : ├── gt0
+ * │ : ├── threshold_cat_error_count
+ * │ ├── threshold_doorbell_time_us
+ * │ ├── threshold_engine_reset_count
+ * │ ├── threshold_guc_time_us
+ * │ ├── threshold_irq_time_us
+ * │ ├── threshold_page_fault_count
+ * ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── threshold_cat_error_count
+ * ├── threshold_doorbell_time_us
+ * ├── threshold_engine_reset_count
+ * ├── threshold_guc_time_us
+ * ├── threshold_irq_time_us
+ * ├── threshold_page_fault_count
*/
static int set_threshold(void *data, u64 val, enum xe_guc_klv_threshold_index index)
@@ -263,6 +251,8 @@ static int set_threshold(void *data, u64 val, enum xe_guc_klv_threshold_index in
xe_pm_runtime_get(xe);
err = xe_gt_sriov_pf_config_set_threshold(gt, vfid, index, val);
+ if (!err)
+ xe_sriov_pf_provision_set_custom_mode(xe);
xe_pm_runtime_put(xe);
return err;
@@ -302,13 +292,6 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
xe_gt_assert(gt, gt == extract_gt(parent));
xe_gt_assert(gt, vfid == extract_vfid(parent));
- if (xe_gt_is_main_type(gt)) {
- debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
- 0644, parent, parent, &ggtt_fops);
- if (xe_device_has_lmtt(gt_to_xe(gt)))
- debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare",
- 0644, parent, parent, &lmem_fops);
- }
debugfs_create_file_unsafe(vfid ? "doorbells_quota" : "doorbells_spare",
0644, parent, parent, &dbs_fops);
debugfs_create_file_unsafe(vfid ? "contexts_quota" : "contexts_spare",
@@ -329,10 +312,12 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
}
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── vf1
- * │   │   ├── control { stop, pause, resume }
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── control { stop, pause, resume }
*/
static const struct {
@@ -342,9 +327,6 @@ static const struct {
{ "stop", xe_gt_sriov_pf_control_stop_vf },
{ "pause", xe_gt_sriov_pf_control_pause_vf },
{ "resume", xe_gt_sriov_pf_control_resume_vf },
-#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
- { "restore!", xe_gt_sriov_pf_migration_restore_guc_state },
-#endif
};
static ssize_t control_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
@@ -409,58 +391,27 @@ static const struct file_operations control_ops = {
};
/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── vf1
- * │   │   ├── guc_state
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── vf1
+ * : ├── tile0
+ * : ├── gt0
+ * : ├── config_blob
*/
-static ssize_t guc_state_read(struct file *file, char __user *buf,
- size_t count, loff_t *pos)
-{
- struct dentry *dent = file_dentry(file);
- struct dentry *parent = dent->d_parent;
- struct xe_gt *gt = extract_gt(parent);
- unsigned int vfid = extract_vfid(parent);
- return xe_gt_sriov_pf_migration_read_guc_state(gt, vfid, buf, count, pos);
-}
-
-static ssize_t guc_state_write(struct file *file, const char __user *buf,
- size_t count, loff_t *pos)
-{
- struct dentry *dent = file_dentry(file);
- struct dentry *parent = dent->d_parent;
- struct xe_gt *gt = extract_gt(parent);
- unsigned int vfid = extract_vfid(parent);
-
- if (*pos)
- return -EINVAL;
-
- return xe_gt_sriov_pf_migration_write_guc_state(gt, vfid, buf, count);
-}
-
-static const struct file_operations guc_state_ops = {
- .owner = THIS_MODULE,
- .read = guc_state_read,
- .write = guc_state_write,
- .llseek = default_llseek,
+struct config_blob_data {
+ size_t size;
+ u8 blob[];
};
-/*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── vf1
- * │   │   ├── config_blob
- */
-static ssize_t config_blob_read(struct file *file, char __user *buf,
- size_t count, loff_t *pos)
+static int config_blob_open(struct inode *inode, struct file *file)
{
struct dentry *dent = file_dentry(file);
struct dentry *parent = dent->d_parent;
struct xe_gt *gt = extract_gt(parent);
unsigned int vfid = extract_vfid(parent);
+ struct config_blob_data *cbd;
ssize_t ret;
- void *tmp;
ret = xe_gt_sriov_pf_config_save(gt, vfid, NULL, 0);
if (!ret)
@@ -468,16 +419,27 @@ static ssize_t config_blob_read(struct file *file, char __user *buf,
if (ret < 0)
return ret;
- tmp = kzalloc(ret, GFP_KERNEL);
- if (!tmp)
+ cbd = kzalloc(struct_size(cbd, blob, ret), GFP_KERNEL);
+ if (!cbd)
return -ENOMEM;
- ret = xe_gt_sriov_pf_config_save(gt, vfid, tmp, ret);
- if (ret > 0)
- ret = simple_read_from_buffer(buf, count, pos, tmp, ret);
+ ret = xe_gt_sriov_pf_config_save(gt, vfid, cbd->blob, ret);
+ if (ret < 0) {
+ kfree(cbd);
+ return ret;
+ }
+
+ cbd->size = ret;
+ file->private_data = cbd;
+ return nonseekable_open(inode, file);
+}
- kfree(tmp);
- return ret;
+static ssize_t config_blob_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct config_blob_data *cbd = file->private_data;
+
+ return simple_read_from_buffer(buf, count, pos, cbd->blob, cbd->size);
}
static ssize_t config_blob_write(struct file *file, const char __user *buf,
@@ -514,80 +476,147 @@ static ssize_t config_blob_write(struct file *file, const char __user *buf,
return ret;
}
+static int config_blob_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
static const struct file_operations config_blob_ops = {
.owner = THIS_MODULE,
+ .open = config_blob_open,
.read = config_blob_read,
.write = config_blob_write,
- .llseek = default_llseek,
+ .release = config_blob_release,
};
-/**
- * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs.
- * @gt: the &xe_gt to register
- * @root: the &dentry that represents the GT directory
- *
- * Register SR-IOV PF entries that are GT related and must be shown under GT debugfs.
- */
-void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
+static void pf_add_compat_attrs(struct xe_gt *gt, struct dentry *dent, unsigned int vfid)
{
struct xe_device *xe = gt_to_xe(gt);
- struct drm_minor *minor = xe->drm.primary;
- int n, totalvfs = xe_sriov_pf_get_totalvfs(xe);
- struct dentry *pfdentry;
- struct dentry *vfdentry;
- char buf[14]; /* should be enough up to "vf%u\0" for 2^32 - 1 */
-
- xe_gt_assert(gt, IS_SRIOV_PF(xe));
- xe_gt_assert(gt, root->d_inode->i_private == gt);
- /*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── pf
- */
- pfdentry = debugfs_create_dir("pf", root);
- if (IS_ERR(pfdentry))
+ if (!xe_gt_is_main_type(gt))
return;
- pfdentry->d_inode->i_private = gt;
-
- drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor);
- if (xe_gt_is_main_type(gt)) {
- drm_debugfs_create_files(pf_ggtt_info,
- ARRAY_SIZE(pf_ggtt_info),
- pfdentry, minor);
- if (xe_device_has_lmtt(gt_to_xe(gt)))
- drm_debugfs_create_files(pf_lmem_info,
- ARRAY_SIZE(pf_lmem_info),
- pfdentry, minor);
+
+ if (vfid) {
+ debugfs_create_symlink("ggtt_quota", dent, "../ggtt_quota");
+ if (xe_device_has_lmtt(xe))
+ debugfs_create_symlink("lmem_quota", dent, "../vram_quota");
+ } else {
+ debugfs_create_symlink("ggtt_spare", dent, "../ggtt_spare");
+ debugfs_create_symlink("ggtt_available", dent, "../ggtt_available");
+ debugfs_create_symlink("ggtt_provisioned", dent, "../ggtt_provisioned");
+ if (xe_device_has_lmtt(xe)) {
+ debugfs_create_symlink("lmem_spare", dent, "../vram_spare");
+ debugfs_create_symlink("lmem_provisioned", dent, "../vram_provisioned");
+ }
}
+}
- pf_add_policy_attrs(gt, pfdentry);
- pf_add_config_attrs(gt, pfdentry, PFID);
-
- for (n = 1; n <= totalvfs; n++) {
- /*
- * /sys/kernel/debug/dri/0/
- * ├── gt0
- * │   ├── vf1
- * │   ├── vf2
- */
- snprintf(buf, sizeof(buf), "vf%u", n);
- vfdentry = debugfs_create_dir(buf, root);
- if (IS_ERR(vfdentry))
- break;
- vfdentry->d_inode->i_private = (void *)(uintptr_t)n;
+static void pf_populate_gt(struct xe_gt *gt, struct dentry *dent, unsigned int vfid)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct drm_minor *minor = xe->drm.primary;
- pf_add_config_attrs(gt, vfdentry, VFID(n));
- debugfs_create_file("control", 0600, vfdentry, NULL, &control_ops);
+ if (vfid) {
+ pf_add_config_attrs(gt, dent, vfid);
+
+ debugfs_create_file("control", 0600, dent, NULL, &control_ops);
/* for testing/debugging purposes only! */
if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
- debugfs_create_file("guc_state",
- IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
- vfdentry, NULL, &guc_state_ops);
debugfs_create_file("config_blob",
IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) ? 0600 : 0400,
- vfdentry, NULL, &config_blob_ops);
+ dent, NULL, &config_blob_ops);
}
+
+ } else {
+ pf_add_config_attrs(gt, dent, PFID);
+ pf_add_policy_attrs(gt, dent);
+
+ drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), dent, minor);
}
+
+ /* for backward compatibility only */
+ pf_add_compat_attrs(gt, dent, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_debugfs_populate() - Create SR-IOV GT-level debugfs directories and files.
+ * @gt: the &xe_gt to register
+ * @parent: the parent &dentry that represents a &xe_tile
+ * @vfid: the VF identifier
+ *
+ * Add to the @parent directory new debugfs directory that will represent a @gt and
+ * populate it with GT files that are related to the SR-IOV @vfid function.
+ *
+ * This function can only be called on PF.
+ */
+void xe_gt_sriov_pf_debugfs_populate(struct xe_gt *gt, struct dentry *parent, unsigned int vfid)
+{
+ struct dentry *dent;
+ char name[8]; /* should be enough up to "gt%u\0" for 2^8 - 1 */
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, extract_priv(parent) == gt->tile);
+ xe_gt_assert(gt, extract_priv(parent->d_parent) == gt_to_xe(gt) ||
+ (uintptr_t)extract_priv(parent->d_parent) == vfid);
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── pf
+ * │ │ ├── tile0 # parent
+ * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
+ * │ │ │ ├── gt1
+ * │ │ : :
+ * │ ├── vf1
+ * │ │ ├── tile0 # parent
+ * │ │ │ ├── gt0 # d_inode->i_private = (xe_gt*)
+ * │ │ │ ├── gt1
+ * │ : : :
+ */
+ snprintf(name, sizeof(name), "gt%u", gt->info.id);
+ dent = debugfs_create_dir(name, parent);
+ if (IS_ERR(dent))
+ return;
+ dent->d_inode->i_private = gt;
+
+ xe_gt_assert(gt, extract_gt(dent) == gt);
+ xe_gt_assert(gt, extract_vfid(dent) == vfid);
+
+ pf_populate_gt(gt, dent, vfid);
+}
+
+static void pf_add_links(struct xe_gt *gt, struct dentry *dent)
+{
+ unsigned int totalvfs = xe_gt_sriov_pf_get_totalvfs(gt);
+ unsigned int vfid;
+ char name[16]; /* should be more than enough for "vf%u\0" and VFID(UINT_MAX) */
+ char symlink[64]; /* should be more enough for "../../sriov/vf%u/tile%u/gt%u\0" */
+
+ for (vfid = 0; vfid <= totalvfs; vfid++) {
+ if (vfid)
+ snprintf(name, sizeof(name), "vf%u", vfid);
+ else
+ snprintf(name, sizeof(name), "pf");
+ snprintf(symlink, sizeof(symlink), "../../sriov/%s/tile%u/gt%u",
+ name, gt->tile->id, gt->info.id);
+ debugfs_create_symlink(name, dent, symlink);
+ }
+}
+
+/**
+ * xe_gt_sriov_pf_debugfs_register - Register SR-IOV PF specific entries in GT debugfs.
+ * @gt: the &xe_gt to register
+ * @dent: the &dentry that represents the GT directory
+ *
+ * Instead of actual files, create symlinks for PF and each VF to their GT specific
+ * attributes that should be already exposed in the dedicated debugfs SR-IOV tree.
+ */
+void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *dent)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, dent->d_inode->i_private == gt);
+
+ pf_add_links(gt, dent);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h
index 038cc8ddc244..82ff3b7f0532 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.h
@@ -11,6 +11,7 @@ struct dentry;
#ifdef CONFIG_PCI_IOV
void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root);
+void xe_gt_sriov_pf_debugfs_populate(struct xe_gt *gt, struct dentry *parent, unsigned int vfid);
#else
static inline void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) { }
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
index c712111aa30d..3174a8dee779 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.c
@@ -5,14 +5,150 @@
#include <drm/drm_managed.h>
+#include "regs/xe_guc_regs.h"
+
#include "abi/guc_actions_sriov_abi.h"
#include "xe_bo.h"
+#include "xe_ggtt.h"
+#include "xe_gt.h"
+#include "xe_gt_sriov_pf.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_migration.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc.h"
+#include "xe_guc_buf.h"
#include "xe_guc_ct.h"
+#include "xe_migrate.h"
+#include "xe_mmio.h"
#include "xe_sriov.h"
+#include "xe_sriov_packet.h"
+#include "xe_sriov_packet_types.h"
+#include "xe_sriov_pf_migration.h"
+
+#define XE_GT_SRIOV_PF_MIGRATION_RING_SIZE 5
+
+static struct xe_gt_sriov_migration_data *pf_pick_gt_migration(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ return &gt->sriov.pf.vfs[vfid].migration;
+}
+
+static void pf_dump_mig_data(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data,
+ const char *what)
+{
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
+ struct drm_printer p = xe_gt_dbg_printer(gt);
+
+ drm_printf(&p, "VF%u %s (%llu bytes)\n", vfid, what, data->hdr.size);
+ drm_print_hex_dump(&p, "mig_hdr: ", (void *)&data->hdr, sizeof(data->hdr));
+ drm_print_hex_dump(&p, "mig_data: ", data->vaddr, min(SZ_64, data->hdr.size));
+ }
+}
+
+static ssize_t pf_migration_ggtt_size(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!xe_gt_is_main_type(gt))
+ return 0;
+
+ return xe_gt_sriov_pf_config_ggtt_save(gt, vfid, NULL, 0);
+}
+
+static int pf_save_vf_ggtt_mig_data(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_sriov_packet *data;
+ size_t size;
+ int ret;
+
+ size = pf_migration_ggtt_size(gt, vfid);
+ xe_gt_assert(gt, size);
+
+ data = xe_sriov_packet_alloc(gt_to_xe(gt));
+ if (!data)
+ return -ENOMEM;
+
+ ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id,
+ XE_SRIOV_PACKET_TYPE_GGTT, 0, size);
+ if (ret)
+ goto fail;
+
+ ret = xe_gt_sriov_pf_config_ggtt_save(gt, vfid, data->vaddr, size);
+ if (ret)
+ goto fail;
+
+ pf_dump_mig_data(gt, vfid, data, "GGTT data save");
+
+ ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ xe_sriov_packet_free(data);
+ xe_gt_sriov_err(gt, "Failed to save VF%u GGTT data (%pe)\n", vfid, ERR_PTR(ret));
+ return ret;
+}
+
+static int pf_restore_vf_ggtt_mig_data(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ int ret;
+
+ pf_dump_mig_data(gt, vfid, data, "GGTT data restore");
+
+ ret = xe_gt_sriov_pf_config_ggtt_restore(gt, vfid, data->vaddr, data->hdr.size);
+ if (ret) {
+ xe_gt_sriov_err(gt, "Failed to restore VF%u GGTT data (%pe)\n",
+ vfid, ERR_PTR(ret));
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_ggtt_save() - Save VF GGTT migration data.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ return pf_save_vf_ggtt_mig_data(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_ggtt_restore() - Restore VF GGTT migration data.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ * @data: the &xe_sriov_packet containing migration data
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+
+ return pf_restore_vf_ggtt_mig_data(gt, vfid, data);
+}
/* Return: number of dwords saved/restored/required or a negative error code on failure */
static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
@@ -33,7 +169,7 @@ static int guc_action_vf_save_restore(struct xe_guc *guc, u32 vfid, u32 opcode,
}
/* Return: size of the state in dwords or a negative error code on failure */
-static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
+static int pf_send_guc_query_vf_mig_data_size(struct xe_gt *gt, unsigned int vfid)
{
int ret;
@@ -42,353 +178,856 @@ static int pf_send_guc_query_vf_state_size(struct xe_gt *gt, unsigned int vfid)
}
/* Return: number of state dwords saved or a negative error code on failure */
-static int pf_send_guc_save_vf_state(struct xe_gt *gt, unsigned int vfid,
- void *buff, size_t size)
+static int pf_send_guc_save_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
+ void *dst, size_t size)
{
const int ndwords = size / sizeof(u32);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
struct xe_guc *guc = &gt->uc.guc;
- struct xe_bo *bo;
+ CLASS(xe_guc_buf, buf)(&guc->buf, ndwords);
int ret;
xe_gt_assert(gt, size % sizeof(u32) == 0);
xe_gt_assert(gt, size == ndwords * sizeof(u32));
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ /* FW expects this buffer to be zero-initialized */
+ memset(xe_guc_buf_cpu_ptr(buf), 0, size);
ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_SAVE,
- xe_bo_ggtt_addr(bo), ndwords);
+ xe_guc_buf_flush(buf), ndwords);
if (!ret)
ret = -ENODATA;
else if (ret > ndwords)
ret = -EPROTO;
else if (ret > 0)
- xe_map_memcpy_from(xe, buff, &bo->vmap, 0, ret * sizeof(u32));
+ memcpy(dst, xe_guc_buf_sync_read(buf), ret * sizeof(u32));
- xe_bo_unpin_map_no_vm(bo);
return ret;
}
/* Return: number of state dwords restored or a negative error code on failure */
-static int pf_send_guc_restore_vf_state(struct xe_gt *gt, unsigned int vfid,
- const void *buff, size_t size)
+static int pf_send_guc_restore_vf_mig_data(struct xe_gt *gt, unsigned int vfid,
+ const void *src, size_t size)
{
const int ndwords = size / sizeof(u32);
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
struct xe_guc *guc = &gt->uc.guc;
- struct xe_bo *bo;
+ CLASS(xe_guc_buf_from_data, buf)(&guc->buf, src, size);
int ret;
xe_gt_assert(gt, size % sizeof(u32) == 0);
xe_gt_assert(gt, size == ndwords * sizeof(u32));
- bo = xe_bo_create_pin_map(xe, tile, NULL,
- ALIGN(size, PAGE_SIZE),
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT |
- XE_BO_FLAG_GGTT_INVALIDATE);
- if (IS_ERR(bo))
- return PTR_ERR(bo);
-
- xe_map_memcpy_to(xe, &bo->vmap, 0, buff, size);
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
ret = guc_action_vf_save_restore(guc, vfid, GUC_PF_OPCODE_VF_RESTORE,
- xe_bo_ggtt_addr(bo), ndwords);
+ xe_guc_buf_flush(buf), ndwords);
if (!ret)
ret = -ENODATA;
else if (ret > ndwords)
ret = -EPROTO;
- xe_bo_unpin_map_no_vm(bo);
return ret;
}
static bool pf_migration_supported(struct xe_gt *gt)
{
- xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- return gt->sriov.pf.migration.supported;
+ return xe_sriov_pf_migration_supported(gt_to_xe(gt));
}
-static struct mutex *pf_migration_mutex(struct xe_gt *gt)
+static int pf_save_vf_guc_mig_data(struct xe_gt *gt, unsigned int vfid)
{
- xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- return &gt->sriov.pf.migration.snapshot_lock;
+ struct xe_sriov_packet *data;
+ size_t size;
+ int ret;
+
+ ret = pf_send_guc_query_vf_mig_data_size(gt, vfid);
+ if (ret < 0)
+ goto fail;
+
+ size = ret * sizeof(u32);
+
+ data = xe_sriov_packet_alloc(gt_to_xe(gt));
+ if (!data) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id,
+ XE_SRIOV_PACKET_TYPE_GUC, 0, size);
+ if (ret)
+ goto fail_free;
+
+ ret = pf_send_guc_save_vf_mig_data(gt, vfid, data->vaddr, size);
+ if (ret < 0)
+ goto fail_free;
+ size = ret * sizeof(u32);
+ xe_gt_assert(gt, size);
+ xe_gt_assert(gt, size <= data->hdr.size);
+ data->hdr.size = size;
+ data->remaining = size;
+
+ pf_dump_mig_data(gt, vfid, data, "GuC data save");
+
+ ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
+ if (ret)
+ goto fail_free;
+
+ return 0;
+
+fail_free:
+ xe_sriov_packet_free(data);
+fail:
+ xe_gt_sriov_err(gt, "Failed to save VF%u GuC data (%pe)\n",
+ vfid, ERR_PTR(ret));
+ return ret;
}
-static struct xe_gt_sriov_state_snapshot *pf_pick_vf_snapshot(struct xe_gt *gt,
- unsigned int vfid)
+static ssize_t pf_migration_guc_size(struct xe_gt *gt, unsigned int vfid)
+{
+ ssize_t size;
+
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ size = pf_send_guc_query_vf_mig_data_size(gt, vfid);
+ if (size >= 0)
+ size *= sizeof(u32);
+
+ return size;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_guc_save() - Save VF GuC migration data.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid)
{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
- lockdep_assert_held(pf_migration_mutex(gt));
- return &gt->sriov.pf.vfs[vfid].snapshot;
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ return pf_save_vf_guc_mig_data(gt, vfid);
}
-static unsigned int pf_snapshot_index(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
{
- return container_of(snapshot, struct xe_gt_sriov_metadata, snapshot) - gt->sriov.pf.vfs;
+ int ret;
+
+ xe_gt_assert(gt, data->hdr.size);
+
+ pf_dump_mig_data(gt, vfid, data, "GuC data restore");
+
+ ret = pf_send_guc_restore_vf_mig_data(gt, vfid, data->vaddr, data->hdr.size);
+ if (ret < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ xe_gt_sriov_err(gt, "Failed to restore VF%u GuC data (%pe)\n",
+ vfid, ERR_PTR(ret));
+ return ret;
}
-static void pf_free_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+/**
+ * xe_gt_sriov_pf_migration_guc_restore() - Restore VF GuC migration data.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @data: the &xe_sriov_packet containing migration data
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
{
- struct xe_device *xe = gt_to_xe(gt);
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
- drmm_kfree(&xe->drm, snapshot->guc.buff);
- snapshot->guc.buff = NULL;
- snapshot->guc.size = 0;
+ if (!pf_migration_supported(gt))
+ return -ENOPKG;
+
+ return pf_restore_vf_guc_state(gt, vfid, data);
}
-static int pf_alloc_guc_state(struct xe_gt *gt,
- struct xe_gt_sriov_state_snapshot *snapshot,
- size_t size)
+static ssize_t pf_migration_mmio_size(struct xe_gt *gt, unsigned int vfid)
{
- struct xe_device *xe = gt_to_xe(gt);
- void *p;
-
- pf_free_guc_state(gt, snapshot);
+ if (xe_gt_is_media_type(gt))
+ return MED_VF_SW_FLAG_COUNT * sizeof(u32);
+ else
+ return VF_SW_FLAG_COUNT * sizeof(u32);
+}
- if (!size)
- return -ENODATA;
+static int pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid, void *buf, size_t size)
+{
+ struct xe_mmio mmio;
+ u32 *regs = buf;
+ int n;
- if (size % sizeof(u32))
+ if (size != pf_migration_mmio_size(gt, vfid))
return -EINVAL;
- if (size > SZ_2M)
- return -EFBIG;
+ xe_mmio_init_vf_view(&mmio, &gt->mmio, vfid);
- p = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
- if (!p)
- return -ENOMEM;
+ if (xe_gt_is_media_type(gt))
+ for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
+ regs[n] = xe_mmio_read32(&gt->mmio, MED_VF_SW_FLAG(n));
+ else
+ for (n = 0; n < VF_SW_FLAG_COUNT; n++)
+ regs[n] = xe_mmio_read32(&gt->mmio, VF_SW_FLAG(n));
- snapshot->guc.buff = p;
- snapshot->guc.size = size;
return 0;
}
-static void pf_dump_guc_state(struct xe_gt *gt, struct xe_gt_sriov_state_snapshot *snapshot)
+static int pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
+ const void *buf, size_t size)
{
- if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) {
- unsigned int vfid __maybe_unused = pf_snapshot_index(gt, snapshot);
+ const u32 *regs = buf;
+ struct xe_mmio mmio;
+ int n;
- xe_gt_sriov_dbg_verbose(gt, "VF%u GuC state is %zu dwords:\n",
- vfid, snapshot->guc.size / sizeof(u32));
- print_hex_dump_bytes("state: ", DUMP_PREFIX_OFFSET,
- snapshot->guc.buff, min(SZ_64, snapshot->guc.size));
- }
+ if (size != pf_migration_mmio_size(gt, vfid))
+ return -EINVAL;
+
+ xe_mmio_init_vf_view(&mmio, &gt->mmio, vfid);
+
+ if (xe_gt_is_media_type(gt))
+ for (n = 0; n < MED_VF_SW_FLAG_COUNT; n++)
+ xe_mmio_write32(&gt->mmio, MED_VF_SW_FLAG(n), regs[n]);
+ else
+ for (n = 0; n < VF_SW_FLAG_COUNT; n++)
+ xe_mmio_write32(&gt->mmio, VF_SW_FLAG(n), regs[n]);
+
+ return 0;
}
-static int pf_save_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
+static int pf_save_vf_mmio_mig_data(struct xe_gt *gt, unsigned int vfid)
{
- struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
+ struct xe_sriov_packet *data;
size_t size;
int ret;
- ret = pf_send_guc_query_vf_state_size(gt, vfid);
- if (ret < 0)
+ size = pf_migration_mmio_size(gt, vfid);
+ xe_gt_assert(gt, size);
+
+ data = xe_sriov_packet_alloc(gt_to_xe(gt));
+ if (!data)
+ return -ENOMEM;
+
+ ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id,
+ XE_SRIOV_PACKET_TYPE_MMIO, 0, size);
+ if (ret)
goto fail;
- size = ret * sizeof(u32);
- xe_gt_sriov_dbg_verbose(gt, "VF%u state size is %d dwords (%zu bytes)\n", vfid, ret, size);
- ret = pf_alloc_guc_state(gt, snapshot, size);
- if (ret < 0)
+ ret = pf_migration_mmio_save(gt, vfid, data->vaddr, size);
+ if (ret)
goto fail;
- ret = pf_send_guc_save_vf_state(gt, vfid, snapshot->guc.buff, size);
- if (ret < 0)
+ pf_dump_mig_data(gt, vfid, data, "MMIO data save");
+
+ ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
+ if (ret)
goto fail;
- size = ret * sizeof(u32);
- xe_gt_assert(gt, size);
- xe_gt_assert(gt, size <= snapshot->guc.size);
- snapshot->guc.size = size;
- pf_dump_guc_state(gt, snapshot);
return 0;
fail:
- xe_gt_sriov_dbg(gt, "Unable to save VF%u state (%pe)\n", vfid, ERR_PTR(ret));
- pf_free_guc_state(gt, snapshot);
+ xe_sriov_packet_free(data);
+ xe_gt_sriov_err(gt, "Failed to save VF%u MMIO data (%pe)\n", vfid, ERR_PTR(ret));
return ret;
}
+static int pf_restore_vf_mmio_mig_data(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ int ret;
+
+ pf_dump_mig_data(gt, vfid, data, "MMIO data restore");
+
+ ret = pf_migration_mmio_restore(gt, vfid, data->vaddr, data->hdr.size);
+ if (ret) {
+ xe_gt_sriov_err(gt, "Failed to restore VF%u MMIO data (%pe)\n",
+ vfid, ERR_PTR(ret));
+
+ return ret;
+ }
+
+ return 0;
+}
+
/**
- * xe_gt_sriov_pf_migration_save_guc_state() - Take a GuC VF state snapshot.
+ * xe_gt_sriov_pf_migration_mmio_save() - Save VF MMIO migration data.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the VF identifier (can't be 0)
*
* This function is for PF only.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid)
+int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid)
{
- int err;
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+ return pf_save_vf_mmio_mig_data(gt, vfid);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_mmio_restore() - Restore VF MMIO migration data.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ * @data: the &xe_sriov_packet containing migration data
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
- if (!pf_migration_supported(gt))
- return -ENOPKG;
+ return pf_restore_vf_mmio_mig_data(gt, vfid, data);
+}
- mutex_lock(pf_migration_mutex(gt));
- err = pf_save_vf_guc_state(gt, vfid);
- mutex_unlock(pf_migration_mutex(gt));
+static ssize_t pf_migration_vram_size(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!xe_gt_is_main_type(gt))
+ return 0;
- return err;
+ return xe_gt_sriov_pf_config_get_lmem(gt, vfid);
+}
+
+static struct dma_fence *__pf_save_restore_vram(struct xe_gt *gt, unsigned int vfid,
+ struct xe_bo *vram, u64 vram_offset,
+ struct xe_bo *sysmem, u64 sysmem_offset,
+ size_t size, bool save)
+{
+ struct dma_fence *ret = NULL;
+ struct drm_exec exec;
+ int err;
+
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ err = drm_exec_lock_obj(&exec, &vram->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err;
+ }
+
+ err = drm_exec_lock_obj(&exec, &sysmem->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err;
+ }
+ }
+
+ ret = xe_migrate_vram_copy_chunk(vram, vram_offset, sysmem, sysmem_offset, size,
+ save ? XE_MIGRATE_COPY_TO_SRAM : XE_MIGRATE_COPY_TO_VRAM);
+
+err:
+ drm_exec_fini(&exec);
+
+ return ret;
}
-static int pf_restore_vf_guc_state(struct xe_gt *gt, unsigned int vfid)
+#define PF_VRAM_SAVE_RESTORE_TIMEOUT (5 * HZ)
+static int pf_save_vram_chunk(struct xe_gt *gt, unsigned int vfid,
+ struct xe_bo *src_vram, u64 src_vram_offset,
+ size_t size)
{
- struct xe_gt_sriov_state_snapshot *snapshot = pf_pick_vf_snapshot(gt, vfid);
+ struct xe_sriov_packet *data;
+ struct dma_fence *fence;
int ret;
- if (!snapshot->guc.size)
- return -ENODATA;
+ data = xe_sriov_packet_alloc(gt_to_xe(gt));
+ if (!data)
+ return -ENOMEM;
- xe_gt_sriov_dbg_verbose(gt, "restoring %zu dwords of VF%u GuC state\n",
- snapshot->guc.size / sizeof(u32), vfid);
- ret = pf_send_guc_restore_vf_state(gt, vfid, snapshot->guc.buff, snapshot->guc.size);
- if (ret < 0)
+ ret = xe_sriov_packet_init(data, gt->tile->id, gt->info.id,
+ XE_SRIOV_PACKET_TYPE_VRAM, src_vram_offset,
+ size);
+ if (ret)
+ goto fail;
+
+ fence = __pf_save_restore_vram(gt, vfid,
+ src_vram, src_vram_offset,
+ data->bo, 0, size, true);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto fail;
+ }
+
+ ret = dma_fence_wait_timeout(fence, false, PF_VRAM_SAVE_RESTORE_TIMEOUT);
+ dma_fence_put(fence);
+ if (!ret) {
+ ret = -ETIME;
+ goto fail;
+ }
+
+ pf_dump_mig_data(gt, vfid, data, "VRAM data save");
+
+ ret = xe_gt_sriov_pf_migration_save_produce(gt, vfid, data);
+ if (ret)
goto fail;
- xe_gt_sriov_dbg_verbose(gt, "restored %d dwords of VF%u GuC state\n", ret, vfid);
return 0;
fail:
- xe_gt_sriov_dbg(gt, "Failed to restore VF%u GuC state (%pe)\n", vfid, ERR_PTR(ret));
+ xe_sriov_packet_free(data);
+ return ret;
+}
+
+#define VF_VRAM_STATE_CHUNK_MAX_SIZE SZ_512M
+static int pf_save_vf_vram_mig_data(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid);
+ loff_t *offset = &migration->save.vram_offset;
+ struct xe_bo *vram;
+ size_t vram_size, chunk_size;
+ int ret;
+
+ vram = xe_gt_sriov_pf_config_get_lmem_obj(gt, vfid);
+ if (!vram)
+ return -ENXIO;
+
+ vram_size = xe_bo_size(vram);
+
+ xe_gt_assert(gt, *offset < vram_size);
+
+ chunk_size = min(vram_size - *offset, VF_VRAM_STATE_CHUNK_MAX_SIZE);
+
+ ret = pf_save_vram_chunk(gt, vfid, vram, *offset, chunk_size);
+ if (ret)
+ goto fail;
+
+ *offset += chunk_size;
+
+ xe_bo_put(vram);
+
+ if (*offset < vram_size)
+ return -EAGAIN;
+
+ return 0;
+
+fail:
+ xe_bo_put(vram);
+ xe_gt_sriov_err(gt, "Failed to save VF%u VRAM data (%pe)\n", vfid, ERR_PTR(ret));
+ return ret;
+}
+
+static int pf_restore_vf_vram_mig_data(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ u64 end = data->hdr.offset + data->hdr.size;
+ struct dma_fence *fence;
+ struct xe_bo *vram;
+ size_t size;
+ int ret = 0;
+
+ vram = xe_gt_sriov_pf_config_get_lmem_obj(gt, vfid);
+ if (!vram)
+ return -ENXIO;
+
+ size = xe_bo_size(vram);
+
+ if (end > size || end < data->hdr.size) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ pf_dump_mig_data(gt, vfid, data, "VRAM data restore");
+
+ fence = __pf_save_restore_vram(gt, vfid, vram, data->hdr.offset,
+ data->bo, 0, data->hdr.size, false);
+ if (IS_ERR(fence)) {
+ ret = PTR_ERR(fence);
+ goto err;
+ }
+
+ ret = dma_fence_wait_timeout(fence, false, PF_VRAM_SAVE_RESTORE_TIMEOUT);
+ dma_fence_put(fence);
+ if (!ret) {
+ ret = -ETIME;
+ goto err;
+ }
+
+ xe_bo_put(vram);
+
+ return 0;
+err:
+ xe_bo_put(vram);
+ xe_gt_sriov_err(gt, "Failed to restore VF%u VRAM data (%pe)\n", vfid, ERR_PTR(ret));
return ret;
}
/**
- * xe_gt_sriov_pf_migration_restore_guc_state() - Restore a GuC VF state.
+ * xe_gt_sriov_pf_migration_vram_save() - Save VF VRAM migration data.
* @gt: the &xe_gt
- * @vfid: the VF identifier
+ * @vfid: the VF identifier (can't be 0)
*
* This function is for PF only.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid)
+int xe_gt_sriov_pf_migration_vram_save(struct xe_gt *gt, unsigned int vfid)
{
- int ret;
-
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
- if (!pf_migration_supported(gt))
- return -ENOPKG;
+ return pf_save_vf_vram_mig_data(gt, vfid);
+}
- mutex_lock(pf_migration_mutex(gt));
- ret = pf_restore_vf_guc_state(gt, vfid);
- mutex_unlock(pf_migration_mutex(gt));
+/**
+ * xe_gt_sriov_pf_migration_vram_restore() - Restore VF VRAM migration data.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ * @data: the &xe_sriov_packet containing migration data
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_vram_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid != PFID);
+ xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
- return ret;
+ return pf_restore_vf_vram_mig_data(gt, vfid, data);
}
-#ifdef CONFIG_DEBUG_FS
/**
- * xe_gt_sriov_pf_migration_read_guc_state() - Read a GuC VF state.
+ * xe_gt_sriov_pf_migration_size() - Total size of migration data from all components within a GT.
* @gt: the &xe_gt
- * @vfid: the VF identifier
- * @buf: the user space buffer to read to
- * @count: the maximum number of bytes to read
- * @pos: the current position in the buffer
+ * @vfid: the VF identifier (can't be 0)
*
* This function is for PF only.
*
- * This function reads up to @count bytes from the saved VF GuC state buffer
- * at offset @pos into the user space address starting at @buf.
- *
- * Return: the number of bytes read or a negative error code on failure.
+ * Return: total migration data size in bytes or a negative error code on failure.
*/
-ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
- char __user *buf, size_t count, loff_t *pos)
+ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid)
{
- struct xe_gt_sriov_state_snapshot *snapshot;
- ssize_t ret;
+ ssize_t total = 0;
+ ssize_t size;
xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
xe_gt_assert(gt, vfid != PFID);
xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
- if (!pf_migration_supported(gt))
- return -ENOPKG;
+ size = pf_migration_guc_size(gt, vfid);
+ if (size < 0)
+ return size;
+ if (size > 0)
+ size += sizeof(struct xe_sriov_packet_hdr);
+ total += size;
+
+ size = pf_migration_ggtt_size(gt, vfid);
+ if (size < 0)
+ return size;
+ if (size > 0)
+ size += sizeof(struct xe_sriov_packet_hdr);
+ total += size;
+
+ size = pf_migration_mmio_size(gt, vfid);
+ if (size < 0)
+ return size;
+ if (size > 0)
+ size += sizeof(struct xe_sriov_packet_hdr);
+ total += size;
+
+ size = pf_migration_vram_size(gt, vfid);
+ if (size < 0)
+ return size;
+ if (size > 0)
+ size += sizeof(struct xe_sriov_packet_hdr);
+ total += size;
+
+ return total;
+}
- mutex_lock(pf_migration_mutex(gt));
- snapshot = pf_pick_vf_snapshot(gt, vfid);
- if (snapshot->guc.size)
- ret = simple_read_from_buffer(buf, count, pos, snapshot->guc.buff,
- snapshot->guc.size);
- else
- ret = -ENODATA;
- mutex_unlock(pf_migration_mutex(gt));
+/**
+ * xe_gt_sriov_pf_migration_ring_empty() - Check if a migration ring is empty.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * Return: true if the ring is empty, otherwise false.
+ */
+bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid)
+{
+ return ptr_ring_empty(&pf_pick_gt_migration(gt, vfid)->ring);
+}
- return ret;
+/**
+ * xe_gt_sriov_pf_migration_ring_full() - Check if a migration ring is full.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * Return: true if the ring is full, otherwise false.
+ */
+bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid)
+{
+ return ptr_ring_full(&pf_pick_gt_migration(gt, vfid)->ring);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_ring_free() - Consume and free all data in migration ring
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ */
+void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid);
+ struct xe_sriov_packet *data;
+
+ if (ptr_ring_empty(&migration->ring))
+ return;
+
+ xe_gt_sriov_notice(gt, "VF%u unprocessed migration data left in the ring!\n", vfid);
+
+ while ((data = ptr_ring_consume(&migration->ring)))
+ xe_sriov_packet_free(data);
+}
+
+static void pf_migration_save_data_todo(struct xe_gt *gt, unsigned int vfid,
+ enum xe_sriov_packet_type type)
+{
+ set_bit(type, &pf_pick_gt_migration(gt, vfid)->save.data_remaining);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_save_init() - Initialize per-GT migration related data.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ */
+void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid);
+
+ migration->save.data_remaining = 0;
+ migration->save.vram_offset = 0;
+
+ xe_gt_assert(gt, pf_migration_guc_size(gt, vfid) > 0);
+ pf_migration_save_data_todo(gt, vfid, XE_SRIOV_PACKET_TYPE_GUC);
+
+ if (pf_migration_ggtt_size(gt, vfid) > 0)
+ pf_migration_save_data_todo(gt, vfid, XE_SRIOV_PACKET_TYPE_GGTT);
+
+ xe_gt_assert(gt, pf_migration_mmio_size(gt, vfid) > 0);
+ pf_migration_save_data_todo(gt, vfid, XE_SRIOV_PACKET_TYPE_MMIO);
+
+ if (pf_migration_vram_size(gt, vfid) > 0)
+ pf_migration_save_data_todo(gt, vfid, XE_SRIOV_PACKET_TYPE_VRAM);
}
/**
- * xe_gt_sriov_pf_migration_write_guc_state() - Write a GuC VF state.
+ * xe_gt_sriov_pf_migration_save_data_pending() - Check if migration data type needs to be saved.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ * @type: the &xe_sriov_packet_type of data to be checked
+ *
+ * Return: true if the data needs saving, otherwise false.
+ */
+bool xe_gt_sriov_pf_migration_save_data_pending(struct xe_gt *gt, unsigned int vfid,
+ enum xe_sriov_packet_type type)
+{
+ return test_bit(type, &pf_pick_gt_migration(gt, vfid)->save.data_remaining);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_save_data_complete() - Complete migration data type save.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be 0)
+ * @type: the &xe_sriov_packet_type to be marked as completed.
+ */
+void xe_gt_sriov_pf_migration_save_data_complete(struct xe_gt *gt, unsigned int vfid,
+ enum xe_sriov_packet_type type)
+{
+ clear_bit(type, &pf_pick_gt_migration(gt, vfid)->save.data_remaining);
+}
+
+/**
+ * xe_gt_sriov_pf_migration_save_produce() - Add VF save data packet to migration ring.
* @gt: the &xe_gt
* @vfid: the VF identifier
- * @buf: the user space buffer with GuC VF state
- * @size: the size of GuC VF state (in bytes)
+ * @data: the &xe_sriov_packet
*
- * This function is for PF only.
+ * Called by the save migration data producer (PF SR-IOV Control worker) when
+ * processing migration data.
+ * Wakes up the save migration data consumer (userspace), that is potentially
+ * waiting for data when the ring was empty.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ int ret;
+
+ ret = ptr_ring_produce(&pf_pick_gt_migration(gt, vfid)->ring, data);
+ if (ret)
+ return ret;
+
+ wake_up_all(xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid));
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_restore_consume() - Get VF restore data packet from migration ring.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
*
- * This function reads @size bytes of the VF GuC state stored at user space
- * address @buf and writes it into a internal VF state buffer.
+ * Called by the restore migration data consumer (PF SR-IOV Control worker) when
+ * processing migration data.
+ * Wakes up the restore migration data producer (userspace), that is
+ * potentially waiting to add more data when the ring is full.
*
- * Return: the number of bytes used or a negative error code on failure.
+ * Return: Pointer to &xe_sriov_packet on success,
+ * NULL if ring is empty.
*/
-ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
- const char __user *buf, size_t size)
+struct xe_sriov_packet *
+xe_gt_sriov_pf_migration_restore_consume(struct xe_gt *gt, unsigned int vfid)
{
- struct xe_gt_sriov_state_snapshot *snapshot;
- loff_t pos = 0;
- ssize_t ret;
+ struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid);
+ struct wait_queue_head *wq = xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid);
+ struct xe_sriov_packet *data;
- xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
- xe_gt_assert(gt, vfid != PFID);
- xe_gt_assert(gt, vfid <= xe_sriov_pf_get_totalvfs(gt_to_xe(gt)));
+ data = ptr_ring_consume(&migration->ring);
+ if (data)
+ wake_up_all(wq);
- if (!pf_migration_supported(gt))
- return -ENOPKG;
+ return data;
+}
- mutex_lock(pf_migration_mutex(gt));
- snapshot = pf_pick_vf_snapshot(gt, vfid);
- ret = pf_alloc_guc_state(gt, snapshot, size);
- if (!ret) {
- ret = simple_write_to_buffer(snapshot->guc.buff, size, &pos, buf, size);
- if (ret < 0)
- pf_free_guc_state(gt, snapshot);
- else
- pf_dump_guc_state(gt, snapshot);
+static bool pf_restore_data_ready(struct xe_gt *gt, unsigned int vfid)
+{
+ if (xe_gt_sriov_pf_control_check_restore_failed(gt, vfid) ||
+ !ptr_ring_full(&pf_pick_gt_migration(gt, vfid)->ring))
+ return true;
+
+ return false;
+}
+
+/**
+ * xe_gt_sriov_pf_migration_restore_produce() - Add VF restore data packet to migration ring.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ * @data: the &xe_sriov_packet
+ *
+ * Called by the restore migration data producer (userspace) when processing
+ * migration data.
+ * If the ring is full, waits until there is space.
+ * Queues the restore migration data consumer (PF SR-IOV Control worker), that
+ * is potentially waiting for data when the ring was empty.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ int ret;
+
+ xe_gt_assert(gt, data->hdr.tile_id == gt->tile->id);
+ xe_gt_assert(gt, data->hdr.gt_id == gt->info.id);
+
+ for (;;) {
+ if (xe_gt_sriov_pf_control_check_restore_failed(gt, vfid))
+ return -EIO;
+
+ ret = ptr_ring_produce(&pf_pick_gt_migration(gt, vfid)->ring, data);
+ if (!ret)
+ break;
+
+ ret = wait_event_interruptible(*xe_sriov_pf_migration_waitqueue(gt_to_xe(gt), vfid),
+ pf_restore_data_ready(gt, vfid));
+ if (ret)
+ return ret;
}
- mutex_unlock(pf_migration_mutex(gt));
- return ret;
+ return xe_gt_sriov_pf_control_process_restore_data(gt, vfid);
}
-#endif /* CONFIG_DEBUG_FS */
-static bool pf_check_migration_support(struct xe_gt *gt)
+/**
+ * xe_gt_sriov_pf_migration_save_consume() - Get VF save data packet from migration ring.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier
+ *
+ * Called by the save migration data consumer (userspace) when
+ * processing migration data.
+ * Queues the save migration data producer (PF SR-IOV Control worker), that is
+ * potentially waiting to add more data when the ring is full.
+ *
+ * Return: Pointer to &xe_sriov_packet on success,
+ * NULL if ring is empty and there's no more data available,
+ * ERR_PTR(-EAGAIN) if the ring is empty, but data is still produced.
+ */
+struct xe_sriov_packet *
+xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid)
{
- /* GuC 70.25 with save/restore v2 is required */
- xe_gt_assert(gt, GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 25, 0));
+ struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, vfid);
+ struct xe_sriov_packet *data;
+ int ret;
+
+ data = ptr_ring_consume(&migration->ring);
+ if (data) {
+ ret = xe_gt_sriov_pf_control_process_save_data(gt, vfid);
+ if (ret) {
+ xe_sriov_packet_free(data);
+ return ERR_PTR(ret);
+ }
- /* XXX: for now this is for feature enabling only */
- return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+ return data;
+ }
+
+ if (xe_gt_sriov_pf_control_check_save_data_done(gt, vfid))
+ return NULL;
+
+ if (xe_gt_sriov_pf_control_check_save_failed(gt, vfid))
+ return ERR_PTR(-EIO);
+
+ return ERR_PTR(-EAGAIN);
+}
+
+static void destroy_pf_packet(void *ptr)
+{
+ struct xe_sriov_packet *data = ptr;
+
+ xe_sriov_packet_free(data);
+}
+
+static void action_ring_cleanup(void *arg)
+{
+ struct ptr_ring *r = arg;
+
+ ptr_ring_cleanup(r, destroy_pf_packet);
+}
+
+static void pf_gt_migration_check_support(struct xe_gt *gt)
+{
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) < MAKE_GUC_VER(70, 54, 0))
+ xe_sriov_pf_migration_disable(gt_to_xe(gt), "requires GuC version >= 70.54.0");
}
/**
@@ -402,18 +1041,29 @@ static bool pf_check_migration_support(struct xe_gt *gt)
int xe_gt_sriov_pf_migration_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, totalvfs;
int err;
xe_gt_assert(gt, IS_SRIOV_PF(xe));
- gt->sriov.pf.migration.supported = pf_check_migration_support(gt);
+ pf_gt_migration_check_support(gt);
if (!pf_migration_supported(gt))
return 0;
- err = drmm_mutex_init(&xe->drm, &gt->sriov.pf.migration.snapshot_lock);
- if (err)
- return err;
+ totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ for (n = 1; n <= totalvfs; n++) {
+ struct xe_gt_sriov_migration_data *migration = pf_pick_gt_migration(gt, n);
+
+ err = ptr_ring_init(&migration->ring,
+ XE_GT_SRIOV_PF_MIGRATION_RING_SIZE, GFP_KERNEL);
+ if (err)
+ return err;
+
+ err = devm_add_action_or_reset(xe->drm.dev, action_ring_cleanup, &migration->ring);
+ if (err)
+ return err;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
index 09faeae00ddb..181207a637b9 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration.h
@@ -9,16 +9,46 @@
#include <linux/types.h>
struct xe_gt;
+struct xe_sriov_packet;
+enum xe_sriov_packet_type;
+
+/* TODO: get this information by querying GuC in the future */
+#define XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE SZ_8M
int xe_gt_sriov_pf_migration_init(struct xe_gt *gt);
-int xe_gt_sriov_pf_migration_save_guc_state(struct xe_gt *gt, unsigned int vfid);
-int xe_gt_sriov_pf_migration_restore_guc_state(struct xe_gt *gt, unsigned int vfid);
-
-#ifdef CONFIG_DEBUG_FS
-ssize_t xe_gt_sriov_pf_migration_read_guc_state(struct xe_gt *gt, unsigned int vfid,
- char __user *buf, size_t count, loff_t *pos);
-ssize_t xe_gt_sriov_pf_migration_write_guc_state(struct xe_gt *gt, unsigned int vfid,
- const char __user *buf, size_t count);
-#endif
+int xe_gt_sriov_pf_migration_guc_save(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_guc_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data);
+int xe_gt_sriov_pf_migration_ggtt_save(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_ggtt_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data);
+int xe_gt_sriov_pf_migration_mmio_save(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_mmio_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data);
+int xe_gt_sriov_pf_migration_vram_save(struct xe_gt *gt, unsigned int vfid);
+int xe_gt_sriov_pf_migration_vram_restore(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data);
+
+ssize_t xe_gt_sriov_pf_migration_size(struct xe_gt *gt, unsigned int vfid);
+
+bool xe_gt_sriov_pf_migration_ring_empty(struct xe_gt *gt, unsigned int vfid);
+bool xe_gt_sriov_pf_migration_ring_full(struct xe_gt *gt, unsigned int vfid);
+void xe_gt_sriov_pf_migration_ring_free(struct xe_gt *gt, unsigned int vfid);
+
+void xe_gt_sriov_pf_migration_save_init(struct xe_gt *gt, unsigned int vfid);
+bool xe_gt_sriov_pf_migration_save_data_pending(struct xe_gt *gt, unsigned int vfid,
+ enum xe_sriov_packet_type type);
+void xe_gt_sriov_pf_migration_save_data_complete(struct xe_gt *gt, unsigned int vfid,
+ enum xe_sriov_packet_type type);
+
+int xe_gt_sriov_pf_migration_save_produce(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data);
+struct xe_sriov_packet *
+xe_gt_sriov_pf_migration_restore_consume(struct xe_gt *gt, unsigned int vfid);
+
+int xe_gt_sriov_pf_migration_restore_produce(struct xe_gt *gt, unsigned int vfid,
+ struct xe_sriov_packet *data);
+struct xe_sriov_packet *
+xe_gt_sriov_pf_migration_save_consume(struct xe_gt *gt, unsigned int vfid);
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
index 1f3110b6d44f..f50c64241e9c 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_migration_types.h
@@ -6,35 +6,23 @@
#ifndef _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
#define _XE_GT_SRIOV_PF_MIGRATION_TYPES_H_
-#include <linux/mutex.h>
-#include <linux/types.h>
+#include <linux/ptr_ring.h>
/**
- * struct xe_gt_sriov_state_snapshot - GT-level per-VF state snapshot data.
+ * struct xe_gt_sriov_migration_data - GT-level per-VF migration data.
*
* Used by the PF driver to maintain per-VF migration data.
*/
-struct xe_gt_sriov_state_snapshot {
- /** @guc: GuC VF state snapshot */
+struct xe_gt_sriov_migration_data {
+ /** @ring: queue containing VF save / restore migration data */
+ struct ptr_ring ring;
+ /** @save: structure for currently processed save migration data */
struct {
- /** @guc.buff: buffer with the VF state */
- u32 *buff;
- /** @guc.size: size of the buffer (must be dwords aligned) */
- u32 size;
- } guc;
-};
-
-/**
- * struct xe_gt_sriov_pf_migration - GT-level data.
- *
- * Used by the PF driver to maintain non-VF specific per-GT data.
- */
-struct xe_gt_sriov_pf_migration {
- /** @supported: indicates whether the feature is supported */
- bool supported;
-
- /** @snapshot_lock: protects all VFs snapshots */
- struct mutex snapshot_lock;
+ /** @save.data_remaining: bitmap of migration types that need to be saved */
+ unsigned long data_remaining;
+ /** @save.vram_offset: last saved offset within VRAM, used for chunked VRAM save */
+ loff_t vram_offset;
+ } save;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
index 76dd9233ef9f..2eb21610e5a0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c
@@ -99,11 +99,30 @@ static const struct xe_reg ver_3000_runtime_regs[] = {
HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */
};
+static const struct xe_reg ver_35_runtime_regs[] = {
+ RPM_CONFIG0, /* _MMIO(0x0d00) */
+ XEHP_FUSE4, /* _MMIO(0x9114) */
+ MIRROR_FUSE3, /* _MMIO(0x9118) */
+ MIRROR_L3BANK_ENABLE, /* _MMIO(0x9130) */
+ XELP_EU_ENABLE, /* _MMIO(0x9134) */
+ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */
+ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */
+ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */
+ XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */
+ XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */
+ XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */
+ SERVICE_COPY_ENABLE, /* _MMIO(0x9170) */
+};
+
static const struct xe_reg *pick_runtime_regs(struct xe_device *xe, unsigned int *count)
{
const struct xe_reg *regs;
- if (GRAPHICS_VERx100(xe) >= 3000) {
+ if (GRAPHICS_VER(xe) >= 35) {
+ *count = ARRAY_SIZE(ver_35_runtime_regs);
+ regs = ver_35_runtime_regs;
+ } else if (GRAPHICS_VERx100(xe) >= 3000) {
*count = ARRAY_SIZE(ver_3000_runtime_regs);
regs = ver_3000_runtime_regs;
} else if (GRAPHICS_VERx100(xe) >= 2000) {
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index a64a6835ad65..667b8310478d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -31,8 +31,8 @@ struct xe_gt_sriov_metadata {
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
- /** @snapshot: snapshot of the VF state data */
- struct xe_gt_sriov_state_snapshot snapshot;
+ /** @migration: per-VF migration data. */
+ struct xe_gt_sriov_migration_data migration;
};
/**
@@ -58,7 +58,6 @@ struct xe_gt_sriov_pf {
struct xe_gt_sriov_pf_service service;
struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
- struct xe_gt_sriov_pf_migration migration;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
};
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
index 17624b16300a..d3457d608db8 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_printk.h
@@ -7,10 +7,13 @@
#define _XE_GT_SRIOV_PRINTK_H_
#include "xe_gt_printk.h"
-#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_printk.h"
+
+#define __XE_GT_SRIOV_PRINTK_FMT(_gt, _fmt, ...) \
+ __XE_TILE_SRIOV_PRINTK_FMT((_gt)->tile, __XE_GT_PRINTK_FMT((_gt), _fmt, ##__VA_ARGS__))
#define __xe_gt_sriov_printk(gt, _level, fmt, ...) \
- xe_gt_printk((gt), _level, "%s" fmt, xe_sriov_printk_prefix(gt_to_xe(gt)), ##__VA_ARGS__)
+ xe_sriov_##_level(gt_to_xe(gt), __XE_GT_SRIOV_PRINTK_FMT((gt), fmt, ##__VA_ARGS__))
#define xe_gt_sriov_err(_gt, _fmt, ...) \
__xe_gt_sriov_printk(_gt, err, _fmt, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index b282838d59e6..4c73a077d314 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -23,11 +23,19 @@
#include "xe_gt_sriov_vf.h"
#include "xe_gt_sriov_vf_types.h"
#include "xe_guc.h"
+#include "xe_guc_ct.h"
#include "xe_guc_hxg_helpers.h"
#include "xe_guc_relay.h"
+#include "xe_guc_submit.h"
+#include "xe_irq.h"
+#include "xe_lrc.h"
+#include "xe_memirq.h"
#include "xe_mmio.h"
#include "xe_sriov.h"
#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
+#include "xe_tile_sriov_vf.h"
+#include "xe_tlb_inval.h"
#include "xe_uc_fw.h"
#include "xe_wopcm.h"
@@ -306,13 +314,13 @@ static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
}
/**
- * xe_gt_sriov_vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
+ * vf_notify_resfix_done - Notify GuC about resource fixups apply completed.
* @gt: the &xe_gt struct instance linked to target GuC
*
* Returns: 0 if the operation completed successfully, or a negative error
* code otherwise.
*/
-int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt)
+static int vf_notify_resfix_done(struct xe_gt *gt)
{
struct xe_guc *guc = &gt->uc.guc;
int err;
@@ -432,13 +440,17 @@ u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt)
static int vf_get_ggtt_info(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
struct xe_guc *guc = &gt->uc.guc;
- u64 start, size;
+ u64 start, size, ggtt_size;
+ s64 shift;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ guard(mutex)(&ggtt->lock);
+
err = guc_action_query_single_klv64(guc, GUC_KLV_VF_CFG_GGTT_START_KEY, &start);
if (unlikely(err))
return err;
@@ -447,28 +459,44 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (config->ggtt_size && config->ggtt_size != size) {
+ if (!size)
+ return -ENODATA;
+
+ ggtt_size = xe_tile_sriov_vf_ggtt(tile);
+ if (ggtt_size && ggtt_size != size) {
xe_gt_sriov_err(gt, "Unexpected GGTT reassignment: %lluK != %lluK\n",
- size / SZ_1K, config->ggtt_size / SZ_1K);
+ size / SZ_1K, ggtt_size / SZ_1K);
return -EREMCHG;
}
xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
start, start + size - 1, size / SZ_1K);
- config->ggtt_shift = start - (s64)config->ggtt_base;
- config->ggtt_base = start;
- config->ggtt_size = size;
+ shift = start - (s64)xe_tile_sriov_vf_ggtt_base(tile);
+ xe_tile_sriov_vf_ggtt_base_store(tile, start);
+ xe_tile_sriov_vf_ggtt_store(tile, size);
+
+ if (shift && shift != start) {
+ xe_gt_sriov_info(gt, "Shifting GGTT base by %lld to 0x%016llx\n",
+ shift, start);
+ xe_tile_sriov_vf_fixup_ggtt_nodes_locked(gt_to_tile(gt), shift);
+ }
+
+ if (xe_sriov_vf_migration_supported(gt_to_xe(gt))) {
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false);
+ smp_wmb(); /* Ensure above write visible before wake */
+ wake_up_all(&gt->sriov.vf.migration.wq);
+ }
- return config->ggtt_size ? 0 : -ENODATA;
+ return 0;
}
static int vf_get_lmem_info(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
+ struct xe_tile *tile = gt_to_tile(gt);
struct xe_guc *guc = &gt->uc.guc;
char size_str[10];
- u64 size;
+ u64 size, lmem_size;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
@@ -477,18 +505,19 @@ static int vf_get_lmem_info(struct xe_gt *gt)
if (unlikely(err))
return err;
- if (config->lmem_size && config->lmem_size != size) {
+ lmem_size = xe_tile_sriov_vf_lmem(tile);
+ if (lmem_size && lmem_size != size) {
xe_gt_sriov_err(gt, "Unexpected LMEM reassignment: %lluM != %lluM\n",
- size / SZ_1M, config->lmem_size / SZ_1M);
+ size / SZ_1M, lmem_size / SZ_1M);
return -EREMCHG;
}
string_get_size(size, 1, STRING_UNITS_2, size_str, sizeof(size_str));
xe_gt_sriov_dbg_verbose(gt, "LMEM %lluM %s\n", size / SZ_1M, size_str);
- config->lmem_size = size;
+ xe_tile_sriov_vf_lmem_store(tile, size);
- return config->lmem_size ? 0 : -ENODATA;
+ return size ? 0 : -ENODATA;
}
static int vf_get_submission_cfg(struct xe_gt *gt)
@@ -539,7 +568,9 @@ static void vf_cache_gmdid(struct xe_gt *gt)
* xe_gt_sriov_vf_query_config - Query SR-IOV config data over MMIO.
* @gt: the &xe_gt
*
- * This function is for VF use only.
+ * This function is for VF use only. This function may shift the GGTT and is
+ * performed under GGTT lock, making this step visible to all GTs that share a
+ * GGTT.
*
* Return: 0 on success or a negative error code on failure.
*/
@@ -585,75 +616,6 @@ u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt)
return gt->sriov.vf.self_config.num_ctxs;
}
-/**
- * xe_gt_sriov_vf_lmem - VF LMEM configuration.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: size of the LMEM assigned to VF.
- */
-u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.lmem_size);
-
- return gt->sriov.vf.self_config.lmem_size;
-}
-
-/**
- * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: size of the GGTT assigned to VF.
- */
-u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
-
- return gt->sriov.vf.self_config.ggtt_size;
-}
-
-/**
- * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
- * @gt: the &xe_gt
- *
- * This function is for VF use only.
- *
- * Return: base offset of the GGTT assigned to VF.
- */
-u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
-{
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
- xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
-
- return gt->sriov.vf.self_config.ggtt_base;
-}
-
-/**
- * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
- * @gt: the &xe_gt struct instance
- *
- * This function is for VF use only.
- *
- * Return: The shift value; could be negative
- */
-s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
-{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
-
- xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- xe_gt_assert(gt, xe_gt_is_main_type(gt));
-
- return config->ggtt_shift;
-}
-
static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
{
u32 request[VF2PF_HANDSHAKE_REQUEST_MSG_LEN] = {
@@ -751,6 +713,44 @@ failed:
}
/**
+ * xe_gt_sriov_vf_default_lrcs_hwsp_rebase - Update GGTT references in HWSP of default LRCs.
+ * @gt: the &xe_gt struct instance
+ */
+static void xe_gt_sriov_vf_default_lrcs_hwsp_rebase(struct xe_gt *gt)
+{
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+
+ for_each_hw_engine(hwe, gt, id)
+ xe_default_lrc_update_memirq_regs_with_address(hwe);
+}
+
+static void vf_start_migration_recovery(struct xe_gt *gt)
+{
+ bool started;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ spin_lock(&gt->sriov.vf.migration.lock);
+
+ if (!gt->sriov.vf.migration.recovery_queued ||
+ !gt->sriov.vf.migration.recovery_teardown) {
+ gt->sriov.vf.migration.recovery_queued = true;
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, true);
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, true);
+ smp_wmb(); /* Ensure above writes visible before wake */
+
+ xe_guc_ct_wake_waiters(&gt->uc.guc.ct);
+
+ started = queue_work(gt->ordered_wq, &gt->sriov.vf.migration.worker);
+ xe_gt_sriov_info(gt, "VF migration recovery %s\n", started ?
+ "scheduled" : "already in progress");
+ }
+
+ spin_unlock(&gt->sriov.vf.migration.lock);
+}
+
+/**
* xe_gt_sriov_vf_migrated_event_handler - Start a VF migration recovery,
* or just mark that a GuC is ready for it.
* @gt: the &xe_gt struct instance linked to target GuC
@@ -762,16 +762,15 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt);
xe_gt_assert(gt, IS_SRIOV_VF(xe));
+ xe_gt_assert(gt, xe_gt_sriov_vf_recovery_pending(gt));
- set_bit(gt->info.id, &xe->sriov.vf.migration.gt_flags);
- /*
- * We need to be certain that if all flags were set, at least one
- * thread will notice that and schedule the recovery.
- */
- smp_mb__after_atomic();
+ if (!xe_sriov_vf_migration_supported(xe)) {
+ xe_gt_sriov_err(gt, "migration not supported\n");
+ return;
+ }
xe_gt_sriov_info(gt, "ready for recovery after migration\n");
- xe_sriov_vf_start_migration_recovery(xe);
+ vf_start_migration_recovery(gt);
}
static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor)
@@ -1026,22 +1025,25 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
struct xe_device *xe = gt_to_xe(gt);
+ u64 lmem_size;
char buf[10];
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
- drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
- config->ggtt_base,
- config->ggtt_base + config->ggtt_size - 1);
-
- string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
- drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
+ if (xe_gt_is_main_type(gt)) {
+ u64 ggtt_size = xe_tile_sriov_vf_ggtt(gt_to_tile(gt));
+ u64 ggtt_base = xe_tile_sriov_vf_ggtt_base(gt_to_tile(gt));
- drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
+ drm_printf(p, "GGTT range:\t%#llx-%#llx\n",
+ ggtt_base, ggtt_base + ggtt_size - 1);
+ string_get_size(ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "GGTT size:\t%llu (%s)\n", ggtt_size, buf);
- if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) {
- string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
- drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
+ if (IS_DGFX(xe)) {
+ lmem_size = xe_tile_sriov_vf_lmem(gt_to_tile(gt));
+ string_get_size(lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
+ drm_printf(p, "LMEM size:\t%llu (%s)\n", lmem_size, buf);
+ }
}
drm_printf(p, "GuC contexts:\t%u\n", config->num_ctxs);
@@ -1104,3 +1106,272 @@ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "\thandshake:\t%u.%u\n",
pf_version->major, pf_version->minor);
}
+
+static bool vf_post_migration_shutdown(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ /*
+ * On platforms where CCS must be restored by the primary GT, the media
+ * GT's VF post-migration recovery must run afterward. Detect this case
+ * and re-queue the media GT's restore work item if necessary.
+ */
+ if (xe->info.needs_shared_vf_gt_wq && xe_gt_is_media_type(gt)) {
+ struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt;
+
+ if (xe_gt_sriov_vf_recovery_pending(primary_gt))
+ return true;
+ }
+
+ spin_lock_irq(&gt->sriov.vf.migration.lock);
+ gt->sriov.vf.migration.recovery_queued = false;
+ spin_unlock_irq(&gt->sriov.vf.migration.lock);
+
+ xe_guc_ct_flush_and_stop(&gt->uc.guc.ct);
+ xe_guc_submit_pause(&gt->uc.guc);
+ xe_tlb_inval_reset(&gt->tlb_inval);
+
+ return false;
+}
+
+static size_t post_migration_scratch_size(struct xe_device *xe)
+{
+ return max(xe_lrc_reg_size(xe), LRC_WA_BB_SIZE);
+}
+
+static int vf_post_migration_fixups(struct xe_gt *gt)
+{
+ void *buf = gt->sriov.vf.migration.scratch;
+ int err;
+
+ /* xe_gt_sriov_vf_query_config will fixup the GGTT addresses */
+ err = xe_gt_sriov_vf_query_config(gt);
+ if (err)
+ return err;
+
+ if (xe_gt_is_main_type(gt))
+ xe_sriov_vf_ccs_rebase(gt_to_xe(gt));
+
+ xe_gt_sriov_vf_default_lrcs_hwsp_rebase(gt);
+ err = xe_guc_contexts_hwsp_rebase(&gt->uc.guc, buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void vf_post_migration_rearm(struct xe_gt *gt)
+{
+ xe_guc_ct_restart(&gt->uc.guc.ct);
+ xe_guc_submit_unpause_prepare(&gt->uc.guc);
+}
+
+static void vf_post_migration_kickstart(struct xe_gt *gt)
+{
+ xe_guc_submit_unpause(&gt->uc.guc);
+}
+
+static void vf_post_migration_abort(struct xe_gt *gt)
+{
+ spin_lock_irq(&gt->sriov.vf.migration.lock);
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
+ WRITE_ONCE(gt->sriov.vf.migration.ggtt_need_fixes, false);
+ spin_unlock_irq(&gt->sriov.vf.migration.lock);
+
+ wake_up_all(&gt->sriov.vf.migration.wq);
+
+ xe_guc_submit_pause_abort(&gt->uc.guc);
+}
+
+static int vf_post_migration_notify_resfix_done(struct xe_gt *gt)
+{
+ bool skip_resfix = false;
+
+ spin_lock_irq(&gt->sriov.vf.migration.lock);
+ if (gt->sriov.vf.migration.recovery_queued) {
+ skip_resfix = true;
+ xe_gt_sriov_dbg(gt, "another recovery imminent, resfix skipped\n");
+ } else {
+ WRITE_ONCE(gt->sriov.vf.migration.recovery_inprogress, false);
+ }
+ spin_unlock_irq(&gt->sriov.vf.migration.lock);
+
+ if (skip_resfix)
+ return -EAGAIN;
+
+ /*
+ * Make sure interrupts on the new HW are properly set. The GuC IRQ
+ * must be working at this point, since the recovery did started,
+ * but the rest was not enabled using the procedure from spec.
+ */
+ xe_irq_resume(gt_to_xe(gt));
+
+ return vf_notify_resfix_done(gt);
+}
+
+static void vf_post_migration_recovery(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ int err;
+ bool retry;
+
+ xe_gt_sriov_dbg(gt, "migration recovery in progress\n");
+
+ retry = vf_post_migration_shutdown(gt);
+ if (retry)
+ goto queue;
+
+ if (!xe_sriov_vf_migration_supported(xe)) {
+ xe_gt_sriov_err(gt, "migration is not supported\n");
+ err = -ENOTRECOVERABLE;
+ goto fail;
+ }
+
+ err = vf_post_migration_fixups(gt);
+ if (err)
+ goto fail;
+
+ vf_post_migration_rearm(gt);
+
+ err = vf_post_migration_notify_resfix_done(gt);
+ if (err && err != -EAGAIN)
+ goto fail;
+
+ vf_post_migration_kickstart(gt);
+
+ xe_gt_sriov_notice(gt, "migration recovery ended\n");
+ return;
+fail:
+ vf_post_migration_abort(gt);
+ xe_gt_sriov_err(gt, "migration recovery failed (%pe)\n", ERR_PTR(err));
+ xe_device_declare_wedged(xe);
+ return;
+
+queue:
+ xe_gt_sriov_info(gt, "Re-queuing migration recovery\n");
+ queue_work(gt->ordered_wq, &gt->sriov.vf.migration.worker);
+}
+
+static void migration_worker_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, struct xe_gt,
+ sriov.vf.migration.worker);
+
+ vf_post_migration_recovery(gt);
+}
+
+static void vf_migration_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+
+ spin_lock_irq(&gt->sriov.vf.migration.lock);
+ gt->sriov.vf.migration.recovery_teardown = true;
+ spin_unlock_irq(&gt->sriov.vf.migration.lock);
+
+ cancel_work_sync(&gt->sriov.vf.migration.worker);
+}
+
+/**
+ * xe_gt_sriov_vf_init_early() - GT VF init early
+ * @gt: the &xe_gt
+ *
+ * Return 0 on success, errno on failure
+ */
+int xe_gt_sriov_vf_init_early(struct xe_gt *gt)
+{
+ void *buf;
+
+ if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return 0;
+
+ buf = drmm_kmalloc(&gt_to_xe(gt)->drm,
+ post_migration_scratch_size(gt_to_xe(gt)),
+ GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ gt->sriov.vf.migration.scratch = buf;
+ spin_lock_init(&gt->sriov.vf.migration.lock);
+ INIT_WORK(&gt->sriov.vf.migration.worker, migration_worker_func);
+ init_waitqueue_head(&gt->sriov.vf.migration.wq);
+
+ return 0;
+}
+
+/**
+ * xe_gt_sriov_vf_init() - GT VF init
+ * @gt: the &xe_gt
+ *
+ * Return 0 on success, errno on failure
+ */
+int xe_gt_sriov_vf_init(struct xe_gt *gt)
+{
+ if (!xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return 0;
+
+ /*
+ * We want to tear down the VF post-migration early during driver
+ * unload; therefore, we add this finalization action later during
+ * driver load.
+ */
+ return devm_add_action_or_reset(gt_to_xe(gt)->drm.dev,
+ vf_migration_fini, gt);
+}
+
+/**
+ * xe_gt_sriov_vf_recovery_pending() - VF post migration recovery pending
+ * @gt: the &xe_gt
+ *
+ * The return value of this function must be immediately visible upon vCPU
+ * unhalt and must persist until RESFIX_DONE is issued. This guarantee is
+ * currently implemented only for platforms that support memirq. If non-memirq
+ * platforms begin to support VF migration, this function will need to be
+ * updated accordingly.
+ *
+ * Return: True if VF post migration recovery is pending, False otherwise
+ */
+bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt)
+{
+ struct xe_memirq *memirq = &gt_to_tile(gt)->memirq;
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ /* early detection until recovery starts */
+ if (xe_device_uses_memirq(gt_to_xe(gt)) &&
+ xe_memirq_guc_sw_int_0_irq_pending(memirq, &gt->uc.guc))
+ return true;
+
+ return READ_ONCE(gt->sriov.vf.migration.recovery_inprogress);
+}
+
+static bool vf_valid_ggtt(struct xe_gt *gt)
+{
+ struct xe_memirq *memirq = &gt_to_tile(gt)->memirq;
+ bool irq_pending = xe_device_uses_memirq(gt_to_xe(gt)) &&
+ xe_memirq_guc_sw_int_0_irq_pending(memirq, &gt->uc.guc);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+
+ if (irq_pending || READ_ONCE(gt->sriov.vf.migration.ggtt_need_fixes))
+ return false;
+
+ return true;
+}
+
+/**
+ * xe_gt_sriov_vf_wait_valid_ggtt() - VF wait for valid GGTT addresses
+ * @gt: the &xe_gt
+ */
+void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt)
+{
+ int ret;
+
+ if (!IS_SRIOV_VF(gt_to_xe(gt)) ||
+ !xe_sriov_vf_migration_supported(gt_to_xe(gt)))
+ return;
+
+ ret = wait_event_interruptible_timeout(gt->sriov.vf.migration.wq,
+ vf_valid_ggtt(gt),
+ HZ * 5);
+ xe_gt_WARN_ON(gt, !ret);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index e0357f341a2d..af40276790fa 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -21,15 +21,15 @@ void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
-int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
+int xe_gt_sriov_vf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_vf_init(struct xe_gt *gt);
+bool xe_gt_sriov_vf_recovery_pending(struct xe_gt *gt);
+
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
-u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
-u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
-s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
@@ -38,4 +38,6 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p);
void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_sriov_vf_wait_valid_ggtt(struct xe_gt *gt);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index 298dedf4b009..420b0e6089de 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -7,20 +7,14 @@
#define _XE_GT_SRIOV_VF_TYPES_H_
#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
#include "xe_uc_fw_types.h"
/**
* struct xe_gt_sriov_vf_selfconfig - VF configuration data.
*/
struct xe_gt_sriov_vf_selfconfig {
- /** @ggtt_base: assigned base offset of the GGTT region. */
- u64 ggtt_base;
- /** @ggtt_size: assigned size of the GGTT region. */
- u64 ggtt_size;
- /** @ggtt_shift: difference in ggtt_base on last migration */
- s64 ggtt_shift;
- /** @lmem_size: assigned size of the LMEM. */
- u64 lmem_size;
/** @num_ctxs: assigned number of GuC submission context IDs. */
u16 num_ctxs;
/** @num_dbs: assigned number of GuC doorbells IDs. */
@@ -47,6 +41,28 @@ struct xe_gt_sriov_vf_runtime {
};
/**
+ * xe_gt_sriov_vf_migration - VF migration data.
+ */
+struct xe_gt_sriov_vf_migration {
+ /** @migration: VF migration recovery worker */
+ struct work_struct worker;
+ /** @lock: Protects recovery_queued, teardown */
+ spinlock_t lock;
+ /** @wq: wait queue for migration fixes */
+ wait_queue_head_t wq;
+ /** @scratch: Scratch memory for VF recovery */
+ void *scratch;
+ /** @recovery_teardown: VF post migration recovery is being torn down */
+ bool recovery_teardown;
+ /** @recovery_queued: VF post migration recovery in queued */
+ bool recovery_queued;
+ /** @recovery_inprogress: VF post migration recovery in progress */
+ bool recovery_inprogress;
+ /** @ggtt_need_fixes: VF GGTT needs fixes */
+ bool ggtt_need_fixes;
+};
+
+/**
* struct xe_gt_sriov_vf - GT level VF virtualization data.
*/
struct xe_gt_sriov_vf {
@@ -58,6 +74,8 @@ struct xe_gt_sriov_vf {
struct xe_gt_sriov_vf_selfconfig self_config;
/** @runtime: runtime data retrieved from the PF. */
struct xe_gt_sriov_vf_runtime runtime;
+ /** @migration: migration data for the VF. */
+ struct xe_gt_sriov_vf_migration migration;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c
index 30f942671c2b..5f74706bab81 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.c
+++ b/drivers/gpu/drm/xe/xe_gt_stats.c
@@ -26,11 +26,46 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr)
atomic64_add(incr, &gt->stats.counters[id]);
}
+#define DEF_STAT_STR(ID, name) [XE_GT_STATS_ID_##ID] = name
+
static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = {
- "svm_pagefault_count",
- "tlb_inval_count",
- "vma_pagefault_count",
- "vma_pagefault_kb",
+ DEF_STAT_STR(SVM_PAGEFAULT_COUNT, "svm_pagefault_count"),
+ DEF_STAT_STR(TLB_INVAL, "tlb_inval_count"),
+ DEF_STAT_STR(SVM_TLB_INVAL_COUNT, "svm_tlb_inval_count"),
+ DEF_STAT_STR(SVM_TLB_INVAL_US, "svm_tlb_inval_us"),
+ DEF_STAT_STR(VMA_PAGEFAULT_COUNT, "vma_pagefault_count"),
+ DEF_STAT_STR(VMA_PAGEFAULT_KB, "vma_pagefault_kb"),
+ DEF_STAT_STR(SVM_4K_PAGEFAULT_COUNT, "svm_4K_pagefault_count"),
+ DEF_STAT_STR(SVM_64K_PAGEFAULT_COUNT, "svm_64K_pagefault_count"),
+ DEF_STAT_STR(SVM_2M_PAGEFAULT_COUNT, "svm_2M_pagefault_count"),
+ DEF_STAT_STR(SVM_4K_VALID_PAGEFAULT_COUNT, "svm_4K_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_64K_VALID_PAGEFAULT_COUNT, "svm_64K_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_2M_VALID_PAGEFAULT_COUNT, "svm_2M_valid_pagefault_count"),
+ DEF_STAT_STR(SVM_4K_PAGEFAULT_US, "svm_4K_pagefault_us"),
+ DEF_STAT_STR(SVM_64K_PAGEFAULT_US, "svm_64K_pagefault_us"),
+ DEF_STAT_STR(SVM_2M_PAGEFAULT_US, "svm_2M_pagefault_us"),
+ DEF_STAT_STR(SVM_4K_MIGRATE_COUNT, "svm_4K_migrate_count"),
+ DEF_STAT_STR(SVM_64K_MIGRATE_COUNT, "svm_64K_migrate_count"),
+ DEF_STAT_STR(SVM_2M_MIGRATE_COUNT, "svm_2M_migrate_count"),
+ DEF_STAT_STR(SVM_4K_MIGRATE_US, "svm_4K_migrate_us"),
+ DEF_STAT_STR(SVM_64K_MIGRATE_US, "svm_64K_migrate_us"),
+ DEF_STAT_STR(SVM_2M_MIGRATE_US, "svm_2M_migrate_us"),
+ DEF_STAT_STR(SVM_DEVICE_COPY_US, "svm_device_copy_us"),
+ DEF_STAT_STR(SVM_4K_DEVICE_COPY_US, "svm_4K_device_copy_us"),
+ DEF_STAT_STR(SVM_64K_DEVICE_COPY_US, "svm_64K_device_copy_us"),
+ DEF_STAT_STR(SVM_2M_DEVICE_COPY_US, "svm_2M_device_copy_us"),
+ DEF_STAT_STR(SVM_CPU_COPY_US, "svm_cpu_copy_us"),
+ DEF_STAT_STR(SVM_4K_CPU_COPY_US, "svm_4K_cpu_copy_us"),
+ DEF_STAT_STR(SVM_64K_CPU_COPY_US, "svm_64K_cpu_copy_us"),
+ DEF_STAT_STR(SVM_2M_CPU_COPY_US, "svm_2M_cpu_copy_us"),
+ DEF_STAT_STR(SVM_DEVICE_COPY_KB, "svm_device_copy_kb"),
+ DEF_STAT_STR(SVM_CPU_COPY_KB, "svm_cpu_copy_kb"),
+ DEF_STAT_STR(SVM_4K_GET_PAGES_US, "svm_4K_get_pages_us"),
+ DEF_STAT_STR(SVM_64K_GET_PAGES_US, "svm_64K_get_pages_us"),
+ DEF_STAT_STR(SVM_2M_GET_PAGES_US, "svm_2M_get_pages_us"),
+ DEF_STAT_STR(SVM_4K_BIND_US, "svm_4K_bind_us"),
+ DEF_STAT_STR(SVM_64K_BIND_US, "svm_64K_bind_us"),
+ DEF_STAT_STR(SVM_2M_BIND_US, "svm_2M_bind_us"),
};
/**
@@ -50,3 +85,17 @@ int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p)
return 0;
}
+
+/**
+ * xe_gt_stats_clear - Clear the GT stats
+ * @gt: GT structure
+ *
+ * This clear (zeros) all the available GT stats.
+ */
+void xe_gt_stats_clear(struct xe_gt *gt)
+{
+ int id;
+
+ for (id = 0; id < ARRAY_SIZE(gt->stats.counters); ++id)
+ atomic64_set(&gt->stats.counters[id], 0);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_stats.h b/drivers/gpu/drm/xe/xe_gt_stats.h
index 38325ef53617..e8aea32bc971 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats.h
@@ -13,6 +13,7 @@ struct drm_printer;
#ifdef CONFIG_DEBUG_FS
int xe_gt_stats_print_info(struct xe_gt *gt, struct drm_printer *p);
+void xe_gt_stats_clear(struct xe_gt *gt);
void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr);
#else
static inline void
diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h
index be3244d7133c..d8348a8de2e1 100644
--- a/drivers/gpu/drm/xe/xe_gt_stats_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h
@@ -9,8 +9,41 @@
enum xe_gt_stats_id {
XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT,
XE_GT_STATS_ID_TLB_INVAL,
+ XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT,
+ XE_GT_STATS_ID_SVM_TLB_INVAL_US,
XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT,
XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
+ XE_GT_STATS_ID_SVM_4K_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_64K_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_2M_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_4K_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_64K_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_2M_VALID_PAGEFAULT_COUNT,
+ XE_GT_STATS_ID_SVM_4K_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_64K_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_2M_PAGEFAULT_US,
+ XE_GT_STATS_ID_SVM_4K_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_64K_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_2M_MIGRATE_COUNT,
+ XE_GT_STATS_ID_SVM_4K_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_64K_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_2M_MIGRATE_US,
+ XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
+ XE_GT_STATS_ID_SVM_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
+ XE_GT_STATS_ID_SVM_DEVICE_COPY_KB,
+ XE_GT_STATS_ID_SVM_CPU_COPY_KB,
+ XE_GT_STATS_ID_SVM_4K_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_64K_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_2M_GET_PAGES_US,
+ XE_GT_STATS_ID_SVM_4K_BIND_US,
+ XE_GT_STATS_ID_SVM_64K_BIND_US,
+ XE_GT_STATS_ID_SVM_2M_BIND_US,
/* must be the last entry */
__XE_GT_STATS_NUM_IDS,
};
diff --git a/drivers/gpu/drm/xe/xe_gt_throttle.c b/drivers/gpu/drm/xe/xe_gt_throttle.c
index aa962c783cdf..82c5fbcdfbe3 100644
--- a/drivers/gpu/drm/xe/xe_gt_throttle.c
+++ b/drivers/gpu/drm/xe/xe_gt_throttle.c
@@ -8,221 +8,222 @@
#include <regs/xe_gt_regs.h>
#include "xe_device.h"
#include "xe_gt.h"
-#include "xe_gt_printk.h"
#include "xe_gt_sysfs.h"
#include "xe_gt_throttle.h"
#include "xe_mmio.h"
+#include "xe_platform_types.h"
#include "xe_pm.h"
/**
* DOC: Xe GT Throttle
*
- * Provides sysfs entries and other helpers for frequency throttle reasons in GT
+ * The GT frequency may be throttled by hardware/firmware for various reasons
+ * that are provided through attributes under the ``freq0/throttle/`` directory.
+ * Their availability depend on the platform and some may not be visible if that
+ * reason is not available.
*
- * device/gt#/freq0/throttle/status - Overall status
- * device/gt#/freq0/throttle/reason_pl1 - Frequency throttle due to PL1
- * device/gt#/freq0/throttle/reason_pl2 - Frequency throttle due to PL2
- * device/gt#/freq0/throttle/reason_pl4 - Frequency throttle due to PL4, Iccmax etc.
- * device/gt#/freq0/throttle/reason_thermal - Frequency throttle due to thermal
- * device/gt#/freq0/throttle/reason_prochot - Frequency throttle due to prochot
- * device/gt#/freq0/throttle/reason_ratl - Frequency throttle due to RATL
- * device/gt#/freq0/throttle/reason_vr_thermalert - Frequency throttle due to VR THERMALERT
- * device/gt#/freq0/throttle/reason_vr_tdc - Frequency throttle due to VR TDC
+ * The ``reasons`` attribute can be used by sysadmin to monitor all possible
+ * reasons for throttling and report them. It's preferred over monitoring
+ * ``status`` and then reading the reason from individual attributes since that
+ * is racy. If there's no throttling happening, "none" is returned.
+ *
+ * The following attributes are available on Crescent Island platform:
+ *
+ * - ``status``: Overall throttle status (0: no throttling, 1: throttling)
+ * - ``reasons``: Array of reasons causing throttling separated by space
+ * - ``reason_pl1``: package PL1
+ * - ``reason_pl2``: package PL2
+ * - ``reason_pl4``: package PL4
+ * - ``reason_prochot``: prochot
+ * - ``reason_soc_thermal``: SoC thermal
+ * - ``reason_mem_thermal``: Memory thermal
+ * - ``reason_vr_thermal``: VR thermal
+ * - ``reason_iccmax``: ICCMAX
+ * - ``reason_ratl``: RATL thermal algorithm
+ * - ``reason_soc_avg_thermal``: SoC average temp
+ * - ``reason_fastvmode``: VR is hitting FastVMode
+ * - ``reason_psys_pl1``: PSYS PL1
+ * - ``reason_psys_pl2``: PSYS PL2
+ * - ``reason_p0_freq``: P0 frequency
+ * - ``reason_psys_crit``: PSYS critical
+ *
+ * Other platforms support the following reasons:
+ *
+ * - ``status``: Overall throttle status (0: no throttling, 1: throttling)
+ * - ``reasons``: Array of reasons causing throttling separated by space
+ * - ``reason_pl1``: package PL1
+ * - ``reason_pl2``: package PL2
+ * - ``reason_pl4``: package PL4, Iccmax etc.
+ * - ``reason_thermal``: thermal
+ * - ``reason_prochot``: prochot
+ * - ``reason_ratl``: RATL hermal algorithm
+ * - ``reason_vr_thermalert``: VR THERMALERT
+ * - ``reason_vr_tdc``: VR TDC
*/
-static struct xe_gt *
-dev_to_gt(struct device *dev)
-{
- return kobj_to_gt(dev->kobj.parent);
-}
-
-u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt)
-{
- u32 reg;
-
- xe_pm_runtime_get(gt_to_xe(gt));
- if (xe_gt_is_media_type(gt))
- reg = xe_mmio_read32(&gt->mmio, MTL_MEDIA_PERF_LIMIT_REASONS);
- else
- reg = xe_mmio_read32(&gt->mmio, GT0_PERF_LIMIT_REASONS);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return reg;
-}
-
-static u32 read_status(struct xe_gt *gt)
-{
- u32 status = xe_gt_throttle_get_limit_reasons(gt) & GT0_PERF_LIMIT_REASONS_MASK;
-
- xe_gt_dbg(gt, "throttle reasons: 0x%08x\n", status);
- return status;
-}
+struct throttle_attribute {
+ struct kobj_attribute attr;
+ u32 mask;
+};
-static u32 read_reason_pl1(struct xe_gt *gt)
+static struct xe_gt *dev_to_gt(struct device *dev)
{
- u32 pl1 = xe_gt_throttle_get_limit_reasons(gt) & POWER_LIMIT_1_MASK;
-
- return pl1;
+ return kobj_to_gt(dev->kobj.parent);
}
-static u32 read_reason_pl2(struct xe_gt *gt)
+static struct xe_gt *throttle_to_gt(struct kobject *kobj)
{
- u32 pl2 = xe_gt_throttle_get_limit_reasons(gt) & POWER_LIMIT_2_MASK;
-
- return pl2;
+ return dev_to_gt(kobj_to_dev(kobj));
}
-static u32 read_reason_pl4(struct xe_gt *gt)
+static struct throttle_attribute *kobj_attribute_to_throttle(struct kobj_attribute *attr)
{
- u32 pl4 = xe_gt_throttle_get_limit_reasons(gt) & POWER_LIMIT_4_MASK;
-
- return pl4;
+ return container_of(attr, struct throttle_attribute, attr);
}
-static u32 read_reason_thermal(struct xe_gt *gt)
-{
- u32 thermal = xe_gt_throttle_get_limit_reasons(gt) & THERMAL_LIMIT_MASK;
-
- return thermal;
-}
-
-static u32 read_reason_prochot(struct xe_gt *gt)
+u32 xe_gt_throttle_get_limit_reasons(struct xe_gt *gt)
{
- u32 prochot = xe_gt_throttle_get_limit_reasons(gt) & PROCHOT_MASK;
-
- return prochot;
-}
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_reg reg;
+ u32 val, mask;
-static u32 read_reason_ratl(struct xe_gt *gt)
-{
- u32 ratl = xe_gt_throttle_get_limit_reasons(gt) & RATL_MASK;
+ if (xe_gt_is_media_type(gt))
+ reg = MTL_MEDIA_PERF_LIMIT_REASONS;
+ else
+ reg = GT0_PERF_LIMIT_REASONS;
- return ratl;
-}
+ if (xe->info.platform == XE_CRESCENTISLAND)
+ mask = CRI_PERF_LIMIT_REASONS_MASK;
+ else
+ mask = GT0_PERF_LIMIT_REASONS_MASK;
-static u32 read_reason_vr_thermalert(struct xe_gt *gt)
-{
- u32 thermalert = xe_gt_throttle_get_limit_reasons(gt) & VR_THERMALERT_MASK;
+ xe_pm_runtime_get(xe);
+ val = xe_mmio_read32(&gt->mmio, reg) & mask;
+ xe_pm_runtime_put(xe);
- return thermalert;
+ return val;
}
-static u32 read_reason_vr_tdc(struct xe_gt *gt)
+static bool is_throttled_by(struct xe_gt *gt, u32 mask)
{
- u32 tdc = xe_gt_throttle_get_limit_reasons(gt) & VR_TDC_MASK;
-
- return tdc;
+ return xe_gt_throttle_get_limit_reasons(gt) & mask;
}
-static ssize_t status_show(struct kobject *kobj,
+static ssize_t reason_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buff)
{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool status = !!read_status(gt);
+ struct throttle_attribute *ta = kobj_attribute_to_throttle(attr);
+ struct xe_gt *gt = throttle_to_gt(kobj);
- return sysfs_emit(buff, "%u\n", status);
+ return sysfs_emit(buff, "%u\n", is_throttled_by(gt, ta->mask));
}
-static struct kobj_attribute attr_status = __ATTR_RO(status);
-static ssize_t reason_pl1_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buff)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool pl1 = !!read_reason_pl1(gt);
+static const struct attribute_group *get_platform_throttle_group(struct xe_device *xe);
- return sysfs_emit(buff, "%u\n", pl1);
-}
-static struct kobj_attribute attr_reason_pl1 = __ATTR_RO(reason_pl1);
-
-static ssize_t reason_pl2_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buff)
+static ssize_t reasons_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool pl2 = !!read_reason_pl2(gt);
+ struct xe_gt *gt = throttle_to_gt(kobj);
+ struct xe_device *xe = gt_to_xe(gt);
+ const struct attribute_group *group;
+ struct attribute **pother;
+ ssize_t ret = 0;
+ u32 reasons;
- return sysfs_emit(buff, "%u\n", pl2);
-}
-static struct kobj_attribute attr_reason_pl2 = __ATTR_RO(reason_pl2);
+ reasons = xe_gt_throttle_get_limit_reasons(gt);
+ if (!reasons)
+ goto ret_none;
-static ssize_t reason_pl4_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buff)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool pl4 = !!read_reason_pl4(gt);
+ group = get_platform_throttle_group(xe);
+ for (pother = group->attrs; *pother; pother++) {
+ struct kobj_attribute *kattr = container_of(*pother, struct kobj_attribute, attr);
+ struct throttle_attribute *other_ta = kobj_attribute_to_throttle(kattr);
- return sysfs_emit(buff, "%u\n", pl4);
-}
-static struct kobj_attribute attr_reason_pl4 = __ATTR_RO(reason_pl4);
+ if (other_ta->mask != U32_MAX && reasons & other_ta->mask)
+ ret += sysfs_emit_at(buff, ret, "%s ", (*pother)->name);
+ }
-static ssize_t reason_thermal_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buff)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool thermal = !!read_reason_thermal(gt);
+ if (drm_WARN_ONCE(&xe->drm, !ret, "Unknown reason: %#x\n", reasons))
+ goto ret_none;
- return sysfs_emit(buff, "%u\n", thermal);
-}
-static struct kobj_attribute attr_reason_thermal = __ATTR_RO(reason_thermal);
+ /* Drop extra space from last iteration above */
+ ret--;
+ ret += sysfs_emit_at(buff, ret, "\n");
-static ssize_t reason_prochot_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buff)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool prochot = !!read_reason_prochot(gt);
+ return ret;
- return sysfs_emit(buff, "%u\n", prochot);
+ret_none:
+ return sysfs_emit(buff, "none\n");
}
-static struct kobj_attribute attr_reason_prochot = __ATTR_RO(reason_prochot);
-static ssize_t reason_ratl_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buff)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool ratl = !!read_reason_ratl(gt);
-
- return sysfs_emit(buff, "%u\n", ratl);
-}
-static struct kobj_attribute attr_reason_ratl = __ATTR_RO(reason_ratl);
-
-static ssize_t reason_vr_thermalert_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buff)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool thermalert = !!read_reason_vr_thermalert(gt);
-
- return sysfs_emit(buff, "%u\n", thermalert);
-}
-static struct kobj_attribute attr_reason_vr_thermalert = __ATTR_RO(reason_vr_thermalert);
-
-static ssize_t reason_vr_tdc_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buff)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct xe_gt *gt = dev_to_gt(dev);
- bool tdc = !!read_reason_vr_tdc(gt);
-
- return sysfs_emit(buff, "%u\n", tdc);
-}
-static struct kobj_attribute attr_reason_vr_tdc = __ATTR_RO(reason_vr_tdc);
+#define THROTTLE_ATTR_RO(name, _mask) \
+ struct throttle_attribute attr_##name = { \
+ .attr = __ATTR(name, 0444, reason_show, NULL), \
+ .mask = _mask, \
+ }
+
+#define THROTTLE_ATTR_RO_FUNC(name, _mask, _show) \
+ struct throttle_attribute attr_##name = { \
+ .attr = __ATTR(name, 0444, _show, NULL), \
+ .mask = _mask, \
+ }
+
+static THROTTLE_ATTR_RO_FUNC(reasons, 0, reasons_show);
+static THROTTLE_ATTR_RO(status, U32_MAX);
+static THROTTLE_ATTR_RO(reason_pl1, POWER_LIMIT_1_MASK);
+static THROTTLE_ATTR_RO(reason_pl2, POWER_LIMIT_2_MASK);
+static THROTTLE_ATTR_RO(reason_pl4, POWER_LIMIT_4_MASK);
+static THROTTLE_ATTR_RO(reason_thermal, THERMAL_LIMIT_MASK);
+static THROTTLE_ATTR_RO(reason_prochot, PROCHOT_MASK);
+static THROTTLE_ATTR_RO(reason_ratl, RATL_MASK);
+static THROTTLE_ATTR_RO(reason_vr_thermalert, VR_THERMALERT_MASK);
+static THROTTLE_ATTR_RO(reason_vr_tdc, VR_TDC_MASK);
static struct attribute *throttle_attrs[] = {
- &attr_status.attr,
- &attr_reason_pl1.attr,
- &attr_reason_pl2.attr,
- &attr_reason_pl4.attr,
- &attr_reason_thermal.attr,
- &attr_reason_prochot.attr,
- &attr_reason_ratl.attr,
- &attr_reason_vr_thermalert.attr,
- &attr_reason_vr_tdc.attr,
+ &attr_reasons.attr.attr,
+ &attr_status.attr.attr,
+ &attr_reason_pl1.attr.attr,
+ &attr_reason_pl2.attr.attr,
+ &attr_reason_pl4.attr.attr,
+ &attr_reason_thermal.attr.attr,
+ &attr_reason_prochot.attr.attr,
+ &attr_reason_ratl.attr.attr,
+ &attr_reason_vr_thermalert.attr.attr,
+ &attr_reason_vr_tdc.attr.attr,
+ NULL
+};
+
+static THROTTLE_ATTR_RO(reason_vr_thermal, VR_THERMAL_MASK);
+static THROTTLE_ATTR_RO(reason_soc_thermal, SOC_THERMAL_LIMIT_MASK);
+static THROTTLE_ATTR_RO(reason_mem_thermal, MEM_THERMAL_MASK);
+static THROTTLE_ATTR_RO(reason_iccmax, ICCMAX_MASK);
+static THROTTLE_ATTR_RO(reason_soc_avg_thermal, SOC_AVG_THERMAL_MASK);
+static THROTTLE_ATTR_RO(reason_fastvmode, FASTVMODE_MASK);
+static THROTTLE_ATTR_RO(reason_psys_pl1, PSYS_PL1_MASK);
+static THROTTLE_ATTR_RO(reason_psys_pl2, PSYS_PL2_MASK);
+static THROTTLE_ATTR_RO(reason_p0_freq, P0_FREQ_MASK);
+static THROTTLE_ATTR_RO(reason_psys_crit, PSYS_CRIT_MASK);
+
+static struct attribute *cri_throttle_attrs[] = {
+ /* Common */
+ &attr_reasons.attr.attr,
+ &attr_status.attr.attr,
+ &attr_reason_pl1.attr.attr,
+ &attr_reason_pl2.attr.attr,
+ &attr_reason_pl4.attr.attr,
+ &attr_reason_prochot.attr.attr,
+ &attr_reason_ratl.attr.attr,
+ /* CRI */
+ &attr_reason_vr_thermal.attr.attr,
+ &attr_reason_soc_thermal.attr.attr,
+ &attr_reason_mem_thermal.attr.attr,
+ &attr_reason_iccmax.attr.attr,
+ &attr_reason_soc_avg_thermal.attr.attr,
+ &attr_reason_fastvmode.attr.attr,
+ &attr_reason_psys_pl1.attr.attr,
+ &attr_reason_psys_pl2.attr.attr,
+ &attr_reason_p0_freq.attr.attr,
+ &attr_reason_psys_crit.attr.attr,
NULL
};
@@ -231,19 +232,37 @@ static const struct attribute_group throttle_group_attrs = {
.attrs = throttle_attrs,
};
+static const struct attribute_group cri_throttle_group_attrs = {
+ .name = "throttle",
+ .attrs = cri_throttle_attrs,
+};
+
+static const struct attribute_group *get_platform_throttle_group(struct xe_device *xe)
+{
+ switch (xe->info.platform) {
+ case XE_CRESCENTISLAND:
+ return &cri_throttle_group_attrs;
+ default:
+ return &throttle_group_attrs;
+ }
+}
+
static void gt_throttle_sysfs_fini(void *arg)
{
struct xe_gt *gt = arg;
+ struct xe_device *xe = gt_to_xe(gt);
+ const struct attribute_group *group = get_platform_throttle_group(xe);
- sysfs_remove_group(gt->freq, &throttle_group_attrs);
+ sysfs_remove_group(gt->freq, group);
}
int xe_gt_throttle_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
+ const struct attribute_group *group = get_platform_throttle_group(xe);
int err;
- err = sysfs_create_group(gt->freq, &throttle_group_attrs);
+ err = sysfs_create_group(gt->freq, group);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
deleted file mode 100644
index 086c12ee3d9d..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ /dev/null
@@ -1,596 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "xe_gt_tlb_invalidation.h"
-
-#include "abi/guc_actions_abi.h"
-#include "xe_device.h"
-#include "xe_force_wake.h"
-#include "xe_gt.h"
-#include "xe_gt_printk.h"
-#include "xe_guc.h"
-#include "xe_guc_ct.h"
-#include "xe_gt_stats.h"
-#include "xe_mmio.h"
-#include "xe_pm.h"
-#include "xe_sriov.h"
-#include "xe_trace.h"
-#include "regs/xe_guc_regs.h"
-
-#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
-
-/*
- * TLB inval depends on pending commands in the CT queue and then the real
- * invalidation time. Double up the time to process full CT queue
- * just to be on the safe side.
- */
-static long tlb_timeout_jiffies(struct xe_gt *gt)
-{
- /* this reflects what HW/GuC needs to process TLB inv request */
- const long hw_tlb_timeout = HZ / 4;
-
- /* this estimates actual delay caused by the CTB transport */
- long delay = xe_guc_ct_queue_proc_time_jiffies(&gt->uc.guc.ct);
-
- return hw_tlb_timeout + 2 * delay;
-}
-
-static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
-{
- if (WARN_ON_ONCE(!fence->gt))
- return;
-
- xe_pm_runtime_put(gt_to_xe(fence->gt));
- fence->gt = NULL; /* fini() should be called once */
-}
-
-static void
-__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
-
- trace_xe_gt_tlb_invalidation_fence_signal(xe, fence);
- xe_gt_tlb_invalidation_fence_fini(fence);
- dma_fence_signal(&fence->base);
- if (!stack)
- dma_fence_put(&fence->base);
-}
-
-static void
-invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
-{
- list_del(&fence->link);
- __invalidation_fence_signal(xe, fence);
-}
-
-void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence)
-{
- if (WARN_ON_ONCE(!fence->gt))
- return;
-
- __invalidation_fence_signal(gt_to_xe(fence->gt), fence);
-}
-
-static void xe_gt_tlb_fence_timeout(struct work_struct *work)
-{
- struct xe_gt *gt = container_of(work, struct xe_gt,
- tlb_invalidation.fence_tdr.work);
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_gt_tlb_invalidation_fence *fence, *next;
-
- LNL_FLUSH_WORK(&gt->uc.guc.ct.g2h_worker);
-
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link) {
- s64 since_inval_ms = ktime_ms_delta(ktime_get(),
- fence->invalidation_time);
-
- if (msecs_to_jiffies(since_inval_ms) < tlb_timeout_jiffies(gt))
- break;
-
- trace_xe_gt_tlb_invalidation_fence_timeout(xe, fence);
- xe_gt_err(gt, "TLB invalidation fence timeout, seqno=%d recv=%d",
- fence->seqno, gt->tlb_invalidation.seqno_recv);
-
- fence->base.error = -ETIME;
- invalidation_fence_signal(xe, fence);
- }
- if (!list_empty(&gt->tlb_invalidation.pending_fences))
- queue_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
-}
-
-/**
- * xe_gt_tlb_invalidation_init_early - Initialize GT TLB invalidation state
- * @gt: GT structure
- *
- * Initialize GT TLB invalidation state, purely software initialization, should
- * be called once during driver load.
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt)
-{
- gt->tlb_invalidation.seqno = 1;
- INIT_LIST_HEAD(&gt->tlb_invalidation.pending_fences);
- spin_lock_init(&gt->tlb_invalidation.pending_lock);
- spin_lock_init(&gt->tlb_invalidation.lock);
- INIT_DELAYED_WORK(&gt->tlb_invalidation.fence_tdr,
- xe_gt_tlb_fence_timeout);
-
- return 0;
-}
-
-/**
- * xe_gt_tlb_invalidation_reset - Initialize GT TLB invalidation reset
- * @gt: GT structure
- *
- * Signal any pending invalidation fences, should be called during a GT reset
- */
-void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
-{
- struct xe_gt_tlb_invalidation_fence *fence, *next;
- int pending_seqno;
-
- /*
- * we can get here before the CTs are even initialized if we're wedging
- * very early, in which case there are not going to be any pending
- * fences so we can bail immediately.
- */
- if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
- return;
-
- /*
- * CT channel is already disabled at this point. No new TLB requests can
- * appear.
- */
-
- mutex_lock(&gt->uc.guc.ct.lock);
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
- /*
- * We might have various kworkers waiting for TLB flushes to complete
- * which are not tracked with an explicit TLB fence, however at this
- * stage that will never happen since the CT is already disabled, so
- * make sure we signal them here under the assumption that we have
- * completed a full GT reset.
- */
- if (gt->tlb_invalidation.seqno == 1)
- pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
- else
- pending_seqno = gt->tlb_invalidation.seqno - 1;
- WRITE_ONCE(gt->tlb_invalidation.seqno_recv, pending_seqno);
-
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link)
- invalidation_fence_signal(gt_to_xe(gt), fence);
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- mutex_unlock(&gt->uc.guc.ct.lock);
-}
-
-static bool tlb_invalidation_seqno_past(struct xe_gt *gt, int seqno)
-{
- int seqno_recv = READ_ONCE(gt->tlb_invalidation.seqno_recv);
-
- if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
- return false;
-
- if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
- return true;
-
- return seqno_recv >= seqno;
-}
-
-static int send_tlb_invalidation(struct xe_guc *guc,
- struct xe_gt_tlb_invalidation_fence *fence,
- u32 *action, int len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
- int seqno;
- int ret;
-
- xe_gt_assert(gt, fence);
-
- /*
- * XXX: The seqno algorithm relies on TLB invalidation being processed
- * in order which they currently are, if that changes the algorithm will
- * need to be updated.
- */
-
- mutex_lock(&guc->ct.lock);
- seqno = gt->tlb_invalidation.seqno;
- fence->seqno = seqno;
- trace_xe_gt_tlb_invalidation_fence_send(xe, fence);
- action[1] = seqno;
- ret = xe_guc_ct_send_locked(&guc->ct, action, len,
- G2H_LEN_DW_TLB_INVALIDATE, 1);
- if (!ret) {
- spin_lock_irq(&gt->tlb_invalidation.pending_lock);
- /*
- * We haven't actually published the TLB fence as per
- * pending_fences, but in theory our seqno could have already
- * been written as we acquired the pending_lock. In such a case
- * we can just go ahead and signal the fence here.
- */
- if (tlb_invalidation_seqno_past(gt, seqno)) {
- __invalidation_fence_signal(xe, fence);
- } else {
- fence->invalidation_time = ktime_get();
- list_add_tail(&fence->link,
- &gt->tlb_invalidation.pending_fences);
-
- if (list_is_singular(&gt->tlb_invalidation.pending_fences))
- queue_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- }
- spin_unlock_irq(&gt->tlb_invalidation.pending_lock);
- } else {
- __invalidation_fence_signal(xe, fence);
- }
- if (!ret) {
- gt->tlb_invalidation.seqno = (gt->tlb_invalidation.seqno + 1) %
- TLB_INVALIDATION_SEQNO_MAX;
- if (!gt->tlb_invalidation.seqno)
- gt->tlb_invalidation.seqno = 1;
- }
- mutex_unlock(&guc->ct.lock);
- xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
-
- return ret;
-}
-
-#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
- XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
- XE_GUC_TLB_INVAL_FLUSH_CACHE)
-
-/**
- * xe_gt_tlb_invalidation_guc - Issue a TLB invalidation on this GT for the GuC
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion
- *
- * Issue a TLB invalidation for the GuC. Completion of TLB is asynchronous and
- * caller can use the invalidation fence to wait for completion.
- *
- * Return: 0 on success, negative error code on error
- */
-static int xe_gt_tlb_invalidation_guc(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence)
-{
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION,
- 0, /* seqno, replaced in send_tlb_invalidation */
- MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
- };
- int ret;
-
- ret = send_tlb_invalidation(&gt->uc.guc, fence, action,
- ARRAY_SIZE(action));
- /*
- * -ECANCELED indicates the CT is stopped for a GT reset. TLB caches
- * should be nuked on a GT reset so this error can be ignored.
- */
- if (ret == -ECANCELED)
- return 0;
-
- return ret;
-}
-
-/**
- * xe_gt_tlb_invalidation_ggtt - Issue a TLB invalidation on this GT for the GGTT
- * @gt: GT structure
- *
- * Issue a TLB invalidation for the GGTT. Completion of TLB invalidation is
- * synchronous.
- *
- * Return: 0 on success, negative error code on error
- */
-int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
-{
- struct xe_device *xe = gt_to_xe(gt);
- unsigned int fw_ref;
-
- if (xe_guc_ct_enabled(&gt->uc.guc.ct) &&
- gt->uc.guc.submission_state.enabled) {
- struct xe_gt_tlb_invalidation_fence fence;
- int ret;
-
- xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
- ret = xe_gt_tlb_invalidation_guc(gt, &fence);
- if (ret)
- return ret;
-
- xe_gt_tlb_invalidation_fence_wait(&fence);
- } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
- struct xe_mmio *mmio = &gt->mmio;
-
- if (IS_SRIOV_VF(xe))
- return 0;
-
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
- xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
- PVC_GUC_TLB_INV_DESC1_INVALIDATE);
- xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
- PVC_GUC_TLB_INV_DESC0_VALID);
- } else {
- xe_mmio_write32(mmio, GUC_TLB_INV_CR,
- GUC_TLB_INV_CR_INVALIDATE);
- }
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- }
-
- return 0;
-}
-
-static int send_tlb_invalidation_all(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence)
-{
- u32 action[] = {
- XE_GUC_ACTION_TLB_INVALIDATION_ALL,
- 0, /* seqno, replaced in send_tlb_invalidation */
- MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
- };
-
- return send_tlb_invalidation(&gt->uc.guc, fence, action, ARRAY_SIZE(action));
-}
-
-/**
- * xe_gt_tlb_invalidation_all - Invalidate all TLBs across PF and all VFs.
- * @gt: the &xe_gt structure
- * @fence: the &xe_gt_tlb_invalidation_fence to be signaled on completion
- *
- * Send a request to invalidate all TLBs across PF and all VFs.
- *
- * Return: 0 on success, negative error code on error
- */
-int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence)
-{
- int err;
-
- xe_gt_assert(gt, gt == fence->gt);
-
- err = send_tlb_invalidation_all(gt, fence);
- if (err)
- xe_gt_err(gt, "TLB invalidation request failed (%pe)", ERR_PTR(err));
-
- return err;
-}
-
-/*
- * Ensure that roundup_pow_of_two(length) doesn't overflow.
- * Note that roundup_pow_of_two() operates on unsigned long,
- * not on u64.
- */
-#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
-
-/**
- * xe_gt_tlb_invalidation_range - Issue a TLB invalidation on this GT for an
- * address range
- *
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion
- * @start: start address
- * @end: end address
- * @asid: address space id
- *
- * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can use
- * the invalidation fence to wait for completion.
- *
- * Return: Negative error code on error, 0 on success
- */
-int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- u64 start, u64 end, u32 asid)
-{
- struct xe_device *xe = gt_to_xe(gt);
-#define MAX_TLB_INVALIDATION_LEN 7
- u32 action[MAX_TLB_INVALIDATION_LEN];
- u64 length = end - start;
- int len = 0;
-
- xe_gt_assert(gt, fence);
-
- /* Execlists not supported */
- if (gt_to_xe(gt)->info.force_execlist) {
- __invalidation_fence_signal(xe, fence);
- return 0;
- }
-
- action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
- action[len++] = 0; /* seqno, replaced in send_tlb_invalidation */
- if (!xe->info.has_range_tlb_invalidation ||
- length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
- action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
- } else {
- u64 orig_start = start;
- u64 align;
-
- if (length < SZ_4K)
- length = SZ_4K;
-
- /*
- * We need to invalidate a higher granularity if start address
- * is not aligned to length. When start is not aligned with
- * length we need to find the length large enough to create an
- * address mask covering the required range.
- */
- align = roundup_pow_of_two(length);
- start = ALIGN_DOWN(start, align);
- end = ALIGN(end, align);
- length = align;
- while (start + length < end) {
- length <<= 1;
- start = ALIGN_DOWN(orig_start, length);
- }
-
- /*
- * Minimum invalidation size for a 2MB page that the hardware
- * expects is 16MB
- */
- if (length >= SZ_2M) {
- length = max_t(u64, SZ_16M, length);
- start = ALIGN_DOWN(orig_start, length);
- }
-
- xe_gt_assert(gt, length >= SZ_4K);
- xe_gt_assert(gt, is_power_of_2(length));
- xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
- ilog2(SZ_2M) + 1)));
- xe_gt_assert(gt, IS_ALIGNED(start, length));
-
- action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
- action[len++] = asid;
- action[len++] = lower_32_bits(start);
- action[len++] = upper_32_bits(start);
- action[len++] = ilog2(length) - ilog2(SZ_4K);
- }
-
- xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
-
- return send_tlb_invalidation(&gt->uc.guc, fence, action, len);
-}
-
-/**
- * xe_gt_tlb_invalidation_vm - Issue a TLB invalidation on this GT for a VM
- * @gt: graphics tile
- * @vm: VM to invalidate
- *
- * Invalidate entire VM's address space
- */
-void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
-{
- struct xe_gt_tlb_invalidation_fence fence;
- u64 range = 1ull << vm->xe->info.va_bits;
- int ret;
-
- xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
-
- ret = xe_gt_tlb_invalidation_range(gt, &fence, 0, range, vm->usm.asid);
- if (ret < 0)
- return;
-
- xe_gt_tlb_invalidation_fence_wait(&fence);
-}
-
-/**
- * xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
- * @guc: guc
- * @msg: message indicating TLB invalidation done
- * @len: length of message
- *
- * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
- * invalidation fences for seqno. Algorithm for this depends on seqno being
- * received in-order and asserts this assumption.
- *
- * Return: 0 on success, -EPROTO for malformed messages.
- */
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
-{
- struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
- struct xe_gt_tlb_invalidation_fence *fence, *next;
- unsigned long flags;
-
- if (unlikely(len != 1))
- return -EPROTO;
-
- /*
- * This can also be run both directly from the IRQ handler and also in
- * process_g2h_msg(). Only one may process any individual CT message,
- * however the order they are processed here could result in skipping a
- * seqno. To handle that we just process all the seqnos from the last
- * seqno_recv up to and including the one in msg[0]. The delta should be
- * very small so there shouldn't be much of pending_fences we actually
- * need to iterate over here.
- *
- * From GuC POV we expect the seqnos to always appear in-order, so if we
- * see something later in the timeline we can be sure that anything
- * appearing earlier has already signalled, just that we have yet to
- * officially process the CT message like if racing against
- * process_g2h_msg().
- */
- spin_lock_irqsave(&gt->tlb_invalidation.pending_lock, flags);
- if (tlb_invalidation_seqno_past(gt, msg[0])) {
- spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
- return 0;
- }
-
- WRITE_ONCE(gt->tlb_invalidation.seqno_recv, msg[0]);
-
- list_for_each_entry_safe(fence, next,
- &gt->tlb_invalidation.pending_fences, link) {
- trace_xe_gt_tlb_invalidation_fence_recv(xe, fence);
-
- if (!tlb_invalidation_seqno_past(gt, fence->seqno))
- break;
-
- invalidation_fence_signal(xe, fence);
- }
-
- if (!list_empty(&gt->tlb_invalidation.pending_fences))
- mod_delayed_work(system_wq,
- &gt->tlb_invalidation.fence_tdr,
- tlb_timeout_jiffies(gt));
- else
- cancel_delayed_work(&gt->tlb_invalidation.fence_tdr);
-
- spin_unlock_irqrestore(&gt->tlb_invalidation.pending_lock, flags);
-
- return 0;
-}
-
-static const char *
-invalidation_fence_get_driver_name(struct dma_fence *dma_fence)
-{
- return "xe";
-}
-
-static const char *
-invalidation_fence_get_timeline_name(struct dma_fence *dma_fence)
-{
- return "invalidation_fence";
-}
-
-static const struct dma_fence_ops invalidation_fence_ops = {
- .get_driver_name = invalidation_fence_get_driver_name,
- .get_timeline_name = invalidation_fence_get_timeline_name,
-};
-
-/**
- * xe_gt_tlb_invalidation_fence_init - Initialize TLB invalidation fence
- * @gt: GT
- * @fence: TLB invalidation fence to initialize
- * @stack: fence is stack variable
- *
- * Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
- * will be automatically called when fence is signalled (all fences must signal),
- * even on error.
- */
-void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- bool stack)
-{
- xe_pm_runtime_get_noresume(gt_to_xe(gt));
-
- spin_lock_irq(&gt->tlb_invalidation.lock);
- dma_fence_init(&fence->base, &invalidation_fence_ops,
- &gt->tlb_invalidation.lock,
- dma_fence_context_alloc(1), 1);
- spin_unlock_irq(&gt->tlb_invalidation.lock);
- INIT_LIST_HEAD(&fence->link);
- if (stack)
- set_bit(FENCE_STACK_BIT, &fence->base.flags);
- else
- dma_fence_get(&fence->base);
- fence->gt = gt;
-}
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
deleted file mode 100644
index f7f0f2eaf4b5..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_GT_TLB_INVALIDATION_H_
-#define _XE_GT_TLB_INVALIDATION_H_
-
-#include <linux/types.h>
-
-#include "xe_gt_tlb_invalidation_types.h"
-
-struct xe_gt;
-struct xe_guc;
-struct xe_vm;
-struct xe_vma;
-
-int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
-
-void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
-void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
-int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence);
-int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- u64 start, u64 end, u32 asid);
-int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
-
-void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- bool stack);
-void xe_gt_tlb_invalidation_fence_signal(struct xe_gt_tlb_invalidation_fence *fence);
-
-static inline void
-xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
-{
- dma_fence_wait(&fence->base, false);
-}
-
-#endif /* _XE_GT_TLB_INVALIDATION_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
deleted file mode 100644
index de6e825e0851..000000000000
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation_types.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _XE_GT_TLB_INVALIDATION_TYPES_H_
-#define _XE_GT_TLB_INVALIDATION_TYPES_H_
-
-#include <linux/dma-fence.h>
-
-struct xe_gt;
-
-/**
- * struct xe_gt_tlb_invalidation_fence - XE GT TLB invalidation fence
- *
- * Optionally passed to xe_gt_tlb_invalidation and will be signaled upon TLB
- * invalidation completion.
- */
-struct xe_gt_tlb_invalidation_fence {
- /** @base: dma fence base */
- struct dma_fence base;
- /** @gt: GT which fence belong to */
- struct xe_gt *gt;
- /** @link: link into list of pending tlb fences */
- struct list_head link;
- /** @seqno: seqno of TLB invalidation to signal fence one */
- int seqno;
- /** @invalidation_time: time of TLB invalidation */
- ktime_t invalidation_time;
-};
-
-#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 8c63e3263643..bd5260221d8d 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -12,6 +12,7 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_mmio.h"
#include "xe_wa.h"
@@ -122,6 +123,21 @@ gen_l3_mask_from_pattern(struct xe_device *xe, xe_l3_bank_mask_t dst,
}
}
+bool xe_gt_topology_report_l3(struct xe_gt *gt)
+{
+ /*
+ * No known userspace needs/uses the L3 bank mask reported by
+ * the media GT, and the hardware itself is known to report bogus
+ * values on several platforms. Only report L3 bank mask as part
+ * of the media GT's topology on pre-Xe3 platforms since that's
+ * already part of our ABI.
+ */
+ if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) >= 30)
+ return false;
+
+ return true;
+}
+
static void
load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
{
@@ -129,19 +145,14 @@ load_l3_bank_mask(struct xe_gt *gt, xe_l3_bank_mask_t l3_bank_mask)
struct xe_mmio *mmio = &gt->mmio;
u32 fuse3 = xe_mmio_read32(mmio, MIRROR_FUSE3);
- /*
- * PTL platforms with media version 30.00 do not provide proper values
- * for the media GT's L3 bank registers. Skip the readout since we
- * don't have any way to obtain real values.
- *
- * This may get re-described as an official workaround in the future,
- * but there's no tracking number assigned yet so we use a custom
- * OOB workaround descriptor.
- */
- if (XE_WA(gt, no_media_l3))
+ if (!xe_gt_topology_report_l3(gt))
return;
- if (GRAPHICS_VER(xe) >= 30) {
+ if (GRAPHICS_VER(xe) >= 35) {
+ u32 fuse_val = xe_mmio_read32(mmio, MIRROR_L3BANK_ENABLE);
+
+ bitmap_from_arr32(l3_bank_mask, &fuse_val, 32);
+ } else if (GRAPHICS_VER(xe) >= 30) {
xe_l3_bank_mask_t per_node = {};
u32 meml3_en = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, fuse3);
u32 mirror_l3bank_enable = xe_mmio_read32(mmio, MIRROR_L3BANK_ENABLE);
@@ -262,8 +273,14 @@ static const char *eu_type_to_str(enum xe_gt_eu_type eu_type)
return NULL;
}
-void
-xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_gt_topology_dump() - Dump GT topology into a drm printer.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Return: always 0.
+ */
+int xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
{
drm_printf(p, "dss mask (geometry): %*pb\n", XE_MAX_DSS_FUSE_BITS,
gt->fuse_topo.g_dss_mask);
@@ -275,8 +292,10 @@ xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p)
drm_printf(p, "EU type: %s\n",
eu_type_to_str(gt->fuse_topo.eu_type));
- drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
- gt->fuse_topo.l3_bank_mask);
+ if (xe_gt_topology_report_l3(gt))
+ drm_printf(p, "L3 bank mask: %*pb\n", XE_MAX_L3_BANK_MASK_BITS,
+ gt->fuse_topo.l3_bank_mask);
+ return 0;
}
/*
@@ -290,6 +309,13 @@ xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum)
return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize);
}
+/* Used to obtain the index of the first L3 bank. */
+unsigned int
+xe_l3_bank_mask_ffs(const xe_l3_bank_mask_t mask)
+{
+ return find_first_bit(mask, XE_MAX_L3_BANK_MASK_BITS);
+}
+
/**
* xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant
* @gt: GT to check
@@ -328,3 +354,19 @@ bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss)
{
return test_bit(dss, gt->fuse_topo.c_dss_mask);
}
+
+bool xe_gt_has_discontiguous_dss_groups(const struct xe_gt *gt)
+{
+ unsigned int xecore;
+ int last_group = -1;
+ u16 group, instance;
+
+ for_each_dss_steering(xecore, gt, group, instance) {
+ if (last_group != group) {
+ if (group - last_group > 1)
+ return true;
+ last_group = group;
+ }
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h
index c8140704ad4c..162d603c9b81 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.h
+++ b/drivers/gpu/drm/xe/xe_gt_topology.h
@@ -23,7 +23,7 @@ struct drm_printer;
void xe_gt_topology_init(struct xe_gt *gt);
-void xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_gt_topology_dump(struct xe_gt *gt, struct drm_printer *p);
/**
* xe_gt_topology_mask_last_dss() - Returns the index of the last DSS in a mask.
@@ -40,6 +40,8 @@ xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask)
unsigned int
xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum);
+unsigned int
+xe_l3_bank_mask_ffs(const xe_l3_bank_mask_t mask);
bool
xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
@@ -47,4 +49,8 @@ xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad);
bool xe_gt_has_geometry_dss(struct xe_gt *gt, unsigned int dss);
bool xe_gt_has_compute_dss(struct xe_gt *gt, unsigned int dss);
+bool xe_gt_has_discontiguous_dss_groups(const struct xe_gt *gt);
+
+bool xe_gt_topology_report_l3(struct xe_gt *gt);
+
#endif /* _XE_GT_TOPOLOGY_H_ */
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 96344c604726..0a728180b6fe 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -17,6 +17,7 @@
#include "xe_oa_types.h"
#include "xe_reg_sr_types.h"
#include "xe_sa_types.h"
+#include "xe_tlb_inval_types.h"
#include "xe_uc_types.h"
struct xe_exec_queue_ops;
@@ -65,6 +66,7 @@ struct xe_mmio_range {
*/
enum xe_steering_type {
L3BANK,
+ NODE,
MSLICE,
LNCF,
DSS,
@@ -72,6 +74,13 @@ enum xe_steering_type {
SQIDI_PSMI,
/*
+ * Although most GAM ranges must be steered to (0,0) and thus use the
+ * INSTANCE0 type farther down, some platforms have special rules
+ * for specific subtypes that require steering to (1,0) instead.
+ */
+ GAM1,
+
+ /*
* On some platforms there are multiple types of MCR registers that
* will always return a non-terminated value at instance (0, 0). We'll
* lump those all into a single category to keep things simple.
@@ -185,34 +194,8 @@ struct xe_gt {
struct work_struct worker;
} reset;
- /** @tlb_invalidation: TLB invalidation state */
- struct {
- /** @tlb_invalidation.seqno: TLB invalidation seqno, protected by CT lock */
-#define TLB_INVALIDATION_SEQNO_MAX 0x100000
- int seqno;
- /**
- * @tlb_invalidation.seqno_recv: last received TLB invalidation seqno,
- * protected by CT lock
- */
- int seqno_recv;
- /**
- * @tlb_invalidation.pending_fences: list of pending fences waiting TLB
- * invaliations, protected by CT lock
- */
- struct list_head pending_fences;
- /**
- * @tlb_invalidation.pending_lock: protects @tlb_invalidation.pending_fences
- * and updating @tlb_invalidation.seqno_recv.
- */
- spinlock_t pending_lock;
- /**
- * @tlb_invalidation.fence_tdr: schedules a delayed call to
- * xe_gt_tlb_fence_timeout after the timeut interval is over.
- */
- struct delayed_work fence_tdr;
- /** @tlb_invalidation.lock: protects TLB invalidation fences */
- spinlock_t lock;
- } tlb_invalidation;
+ /** @tlb_inval: TLB invalidation state */
+ struct xe_tlb_inval tlb_inval;
/**
* @ccs_mode: Number of compute engines enabled.
@@ -227,81 +210,16 @@ struct xe_gt {
/**
* @usm.bb_pool: Pool from which batchbuffers, for USM operations
* (e.g. migrations, fixing page tables), are allocated.
- * Dedicated pool needed so USM operations to not get blocked
+ * Dedicated pool needed so USM operations do not get blocked
* behind any user operations which may have resulted in a
* fault.
*/
struct xe_sa_manager *bb_pool;
/**
* @usm.reserved_bcs_instance: reserved BCS instance used for USM
- * operations (e.g. mmigrations, fixing page tables)
+ * operations (e.g. migrations, fixing page tables)
*/
u16 reserved_bcs_instance;
- /** @usm.pf_wq: page fault work queue, unbound, high priority */
- struct workqueue_struct *pf_wq;
- /** @usm.acc_wq: access counter work queue, unbound, high priority */
- struct workqueue_struct *acc_wq;
- /**
- * @usm.pf_queue: Page fault queue used to sync faults so faults can
- * be processed not under the GuC CT lock. The queue is sized so
- * it can sync all possible faults (1 per physical engine).
- * Multiple queues exists for page faults from different VMs are
- * be processed in parallel.
- */
- struct pf_queue {
- /** @usm.pf_queue.gt: back pointer to GT */
- struct xe_gt *gt;
- /** @usm.pf_queue.data: data in the page fault queue */
- u32 *data;
- /**
- * @usm.pf_queue.num_dw: number of DWORDS in the page
- * fault queue. Dynamically calculated based on the number
- * of compute resources available.
- */
- u32 num_dw;
- /**
- * @usm.pf_queue.tail: tail pointer in DWs for page fault queue,
- * moved by worker which processes faults (consumer).
- */
- u16 tail;
- /**
- * @usm.pf_queue.head: head pointer in DWs for page fault queue,
- * moved by G2H handler (producer).
- */
- u16 head;
- /** @usm.pf_queue.lock: protects page fault queue */
- spinlock_t lock;
- /** @usm.pf_queue.worker: to process page faults */
- struct work_struct worker;
-#define NUM_PF_QUEUE 4
- } pf_queue[NUM_PF_QUEUE];
- /**
- * @usm.acc_queue: Same as page fault queue, cannot process access
- * counters under CT lock.
- */
- struct acc_queue {
- /** @usm.acc_queue.gt: back pointer to GT */
- struct xe_gt *gt;
-#define ACC_QUEUE_NUM_DW 128
- /** @usm.acc_queue.data: data in the page fault queue */
- u32 data[ACC_QUEUE_NUM_DW];
- /**
- * @usm.acc_queue.tail: tail pointer in DWs for access counter queue,
- * moved by worker which processes counters
- * (consumer).
- */
- u16 tail;
- /**
- * @usm.acc_queue.head: head pointer in DWs for access counter queue,
- * moved by G2H handler (producer).
- */
- u16 head;
- /** @usm.acc_queue.lock: protects page fault queue */
- spinlock_t lock;
- /** @usm.acc_queue.worker: to process access counters */
- struct work_struct worker;
-#define NUM_ACC_QUEUE 4
- } acc_queue[NUM_ACC_QUEUE];
} usm;
/** @ordered_wq: used to serialize GT resets and TDRs */
@@ -411,8 +329,8 @@ struct xe_gt {
unsigned long *oob;
/**
* @wa_active.oob_initialized: mark oob as initialized to help
- * detecting misuse of XE_WA() - it can only be called on
- * initialization after OOB WAs have being processed
+ * detecting misuse of XE_GT_WA() - it can only be called on
+ * initialization after OOB WAs have been processed
*/
bool oob_initialized;
} wa_active;
diff --git a/drivers/gpu/drm/xe/xe_guard.h b/drivers/gpu/drm/xe/xe_guard.h
new file mode 100644
index 000000000000..333f8e13b5a1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guard.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUARD_H_
+#define _XE_GUARD_H_
+
+#include <linux/spinlock.h>
+
+/**
+ * struct xe_guard - Simple logic to protect a feature.
+ *
+ * Implements simple semaphore-like logic that can be used to lockdown the
+ * feature unless it is already in use. Allows enabling of the otherwise
+ * incompatible features, where we can't follow the strict owner semantics
+ * required by the &rw_semaphore.
+ *
+ * NOTE! It shouldn't be used to protect a data, use &rw_semaphore instead.
+ */
+struct xe_guard {
+ /**
+ * @counter: implements simple exclusive/lockdown logic:
+ * if == 0 then guard/feature is idle/not in use,
+ * if < 0 then feature is active and can't be locked-down,
+ * if > 0 then feature is lockded-down and can't be activated.
+ */
+ int counter;
+
+ /** @name: the name of the guard (useful for debug) */
+ const char *name;
+
+ /** @owner: the info about the last owner of the guard (for debug) */
+ void *owner;
+
+ /** @lock: protects guard's data */
+ spinlock_t lock;
+};
+
+/**
+ * xe_guard_init() - Initialize the guard.
+ * @guard: the &xe_guard to init
+ * @name: name of the guard
+ */
+static inline void xe_guard_init(struct xe_guard *guard, const char *name)
+{
+ spin_lock_init(&guard->lock);
+ guard->counter = 0;
+ guard->name = name;
+}
+
+/**
+ * xe_guard_arm() - Arm the guard for the exclusive/lockdown mode.
+ * @guard: the &xe_guard to arm
+ * @lockdown: arm for lockdown(true) or exclusive(false) mode
+ * @who: optional owner info (for debug only)
+ *
+ * Multiple lockdown requests are allowed.
+ * Only single exclusive access can be granted.
+ * Will fail if the guard is already in exclusive mode.
+ * On success, must call the xe_guard_disarm() to release.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static inline int xe_guard_arm(struct xe_guard *guard, bool lockdown, void *who)
+{
+ guard(spinlock)(&guard->lock);
+
+ if (lockdown) {
+ if (guard->counter < 0)
+ return -EBUSY;
+ guard->counter++;
+ } else {
+ if (guard->counter > 0)
+ return -EPERM;
+ if (guard->counter < 0)
+ return -EUSERS;
+ guard->counter--;
+ }
+
+ guard->owner = who;
+ return 0;
+}
+
+/**
+ * xe_guard_disarm() - Disarm the guard from exclusive/lockdown mode.
+ * @guard: the &xe_guard to disarm
+ * @lockdown: disarm from lockdown(true) or exclusive(false) mode
+ *
+ * Return: true if successfully disarmed or false in case of mismatch.
+ */
+static inline bool xe_guard_disarm(struct xe_guard *guard, bool lockdown)
+{
+ guard(spinlock)(&guard->lock);
+
+ if (lockdown) {
+ if (guard->counter <= 0)
+ return false;
+ guard->counter--;
+ } else {
+ if (guard->counter != -1)
+ return false;
+ guard->counter++;
+ }
+ return true;
+}
+
+/**
+ * xe_guard_mode_str() - Convert guard mode into a string.
+ * @lockdown: flag used to select lockdown or exclusive mode
+ *
+ * Return: "lockdown" or "exclusive" string.
+ */
+static inline const char *xe_guard_mode_str(bool lockdown)
+{
+ return lockdown ? "lockdown" : "exclusive";
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index b1d1d6da3758..a686b04879d6 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -5,6 +5,7 @@
#include "xe_guc.h"
+#include <linux/iopoll.h>
#include <drm/drm_managed.h>
#include <generated/xe_wa_oob.h>
@@ -16,12 +17,14 @@
#include "regs/xe_guc_regs.h"
#include "regs/xe_irq_regs.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
#include "xe_gt_sriov_vf.h"
#include "xe_gt_throttle.h"
+#include "xe_gt_sriov_pf_migration.h"
#include "xe_guc_ads.h"
#include "xe_guc_buf.h"
#include "xe_guc_capture.h"
@@ -38,6 +41,7 @@
#include "xe_mmio.h"
#include "xe_platform_types.h"
#include "xe_sriov.h"
+#include "xe_sriov_pf_migration.h"
#include "xe_uc.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
@@ -73,19 +77,25 @@ static u32 guc_ctl_debug_flags(struct xe_guc *guc)
if (!GUC_LOG_LEVEL_IS_VERBOSE(level))
flags |= GUC_LOG_DISABLED;
else
- flags |= GUC_LOG_LEVEL_TO_VERBOSITY(level) <<
- GUC_LOG_VERBOSITY_SHIFT;
+ flags |= FIELD_PREP(GUC_LOG_VERBOSITY, GUC_LOG_LEVEL_TO_VERBOSITY(level));
return flags;
}
static u32 guc_ctl_feature_flags(struct xe_guc *guc)
{
+ struct xe_device *xe = guc_to_xe(guc);
u32 flags = GUC_CTL_ENABLE_LITE_RESTORE;
- if (!guc_to_xe(guc)->info.skip_guc_pc)
+ if (!xe->info.skip_guc_pc)
flags |= GUC_CTL_ENABLE_SLPC;
+ if (xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)))
+ flags |= GUC_CTL_ENABLE_PSMI_LOGGING;
+
+ if (xe_guc_using_main_gamctrl_queues(guc))
+ flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES;
+
return flags;
}
@@ -117,22 +127,14 @@ static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
BUILD_BUG_ON(!CAPTURE_BUFFER_SIZE);
BUILD_BUG_ON(!IS_ALIGNED(CAPTURE_BUFFER_SIZE, CAPTURE_UNIT));
- BUILD_BUG_ON((CRASH_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
- BUILD_BUG_ON((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) >
- (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
- BUILD_BUG_ON((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) >
- (GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT));
-
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
CAPTURE_FLAG |
LOG_FLAG |
- ((CRASH_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
- ((DEBUG_BUFFER_SIZE / LOG_UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
- ((CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) <<
- GUC_LOG_CAPTURE_SHIFT) |
- (offset << GUC_LOG_BUF_ADDR_SHIFT);
+ FIELD_PREP(GUC_LOG_CRASH, CRASH_BUFFER_SIZE / LOG_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_DEBUG, DEBUG_BUFFER_SIZE / LOG_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_CAPTURE, CAPTURE_BUFFER_SIZE / CAPTURE_UNIT - 1) |
+ FIELD_PREP(GUC_LOG_BUF_ADDR, offset);
#undef LOG_UNIT
#undef LOG_FLAG
@@ -145,7 +147,7 @@ static u32 guc_ctl_log_params_flags(struct xe_guc *guc)
static u32 guc_ctl_ads_flags(struct xe_guc *guc)
{
u32 ads = guc_bo_ggtt_addr(guc, guc->ads.bo) >> PAGE_SHIFT;
- u32 flags = ads << GUC_ADS_ADDR_SHIFT;
+ u32 flags = FIELD_PREP(GUC_ADS_ADDR, ads);
return flags;
}
@@ -157,7 +159,7 @@ static bool needs_wa_dual_queue(struct xe_gt *gt)
* on RCS and CCSes with different address spaces, which on DG2 is
* required as a WA for an HW bug.
*/
- if (XE_WA(gt, 22011391025))
+ if (XE_GT_WA(gt, 22011391025))
return true;
/*
@@ -184,10 +186,10 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
u32 flags = 0;
- if (XE_WA(gt, 22012773006))
+ if (XE_GT_WA(gt, 22012773006))
flags |= GUC_WA_POLLCS;
- if (XE_WA(gt, 14014475959))
+ if (XE_GT_WA(gt, 14014475959))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
if (needs_wa_dual_queue(gt))
@@ -201,19 +203,22 @@ static u32 guc_ctl_wa_flags(struct xe_guc *guc)
if (GRAPHICS_VERx100(xe) < 1270)
flags |= GUC_WA_PRE_PARSER;
- if (XE_WA(gt, 22012727170) || XE_WA(gt, 22012727685))
+ if (XE_GT_WA(gt, 22012727170) || XE_GT_WA(gt, 22012727685))
flags |= GUC_WA_CONTEXT_ISOLATION;
- if (XE_WA(gt, 18020744125) &&
+ if (XE_GT_WA(gt, 18020744125) &&
!xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_RENDER))
flags |= GUC_WA_RCS_REGS_IN_CCS_REGS_LIST;
- if (XE_WA(gt, 1509372804))
+ if (XE_GT_WA(gt, 1509372804))
flags |= GUC_WA_RENDER_RST_RC6_EXIT;
- if (XE_WA(gt, 14018913170))
+ if (XE_GT_WA(gt, 14018913170))
flags |= GUC_WA_ENABLE_TSC_CHECK_ON_RC6;
+ if (XE_GT_WA(gt, 16023683509))
+ flags |= GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6;
+
return flags;
}
@@ -701,10 +706,6 @@ static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
- ret = xe_managed_bo_reinit_in_vram(xe, tile, &guc->ct.bo);
- if (ret)
- return ret;
-
return 0;
}
@@ -822,6 +823,14 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
return 0;
}
+static u32 guc_additional_cache_size(struct xe_device *xe)
+{
+ if (IS_SRIOV_PF(xe) && xe_sriov_pf_migration_supported(xe))
+ return XE_GT_SRIOV_PF_MIGRATION_GUC_DATA_MAX_SIZE;
+ else
+ return 0; /* Fallback to default size */
+}
+
/**
* xe_guc_init_post_hwconfig - initialize GuC post hwconfig load
* @guc: The GuC object
@@ -839,6 +848,10 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
+ ret = xe_guc_ct_init_post_hwconfig(&guc->ct);
+ if (ret)
+ return ret;
+
guc_init_params_post_hwconfig(guc);
ret = xe_guc_submit_init(guc, ~0);
@@ -857,7 +870,8 @@ int xe_guc_init_post_hwconfig(struct xe_guc *guc)
if (ret)
return ret;
- ret = xe_guc_buf_cache_init(&guc->buf);
+ ret = xe_guc_buf_cache_init_with_size(&guc->buf,
+ guc_additional_cache_size(guc_to_xe(guc)));
if (ret)
return ret;
@@ -880,9 +894,7 @@ int xe_guc_post_load_init(struct xe_guc *guc)
return ret;
}
- guc->submission_state.enabled = true;
-
- return 0;
+ return xe_guc_submit_enable(guc);
}
int xe_guc_reset(struct xe_guc *guc)
@@ -974,33 +986,109 @@ static int guc_xfer_rsa(struct xe_guc *guc)
}
/*
- * Check a previously read GuC status register (GUC_STATUS) looking for
- * known terminal states (either completion or failure) of either the
- * microkernel status field or the boot ROM status field. Returns +1 for
- * successful completion, -1 for failure and 0 for any intermediate state.
+ * Wait for the GuC to start up.
+ *
+ * Measurements indicate this should take no more than 20ms (assuming the GT
+ * clock is at maximum frequency). However, thermal throttling and other issues
+ * can prevent the clock hitting max and thus making the load take significantly
+ * longer. Allow up to 3s as a safety margin in normal builds. For
+ * CONFIG_DRM_XE_DEBUG allow up to 10s to account for slower execution, issues
+ * in PCODE, driver, fan, etc.
+ *
+ * Keep checking the GUC_STATUS every 10ms with a debug message every 100
+ * attempts as a "I'm slow, but alive" message. Regardless, if it takes more
+ * than 200ms, emit a warning.
*/
-static int guc_load_done(u32 status)
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define GUC_LOAD_TIMEOUT_SEC 20
+#else
+#define GUC_LOAD_TIMEOUT_SEC 3
+#endif
+#define GUC_LOAD_TIME_WARN_MSEC 200
+
+static void print_load_status_err(struct xe_gt *gt, u32 status)
{
- u32 uk_val = REG_FIELD_GET(GS_UKERNEL_MASK, status);
- u32 br_val = REG_FIELD_GET(GS_BOOTROM_MASK, status);
+ struct xe_mmio *mmio = &gt->mmio;
+ u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
+ u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
+
+ xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
+ REG_FIELD_GET(GS_MIA_IN_RESET, status),
+ bootrom, ukernel,
+ REG_FIELD_GET(GS_MIA_MASK, status),
+ REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
- switch (uk_val) {
+ switch (bootrom) {
+ case XE_BOOTROM_STATUS_NO_KEY_FOUND:
+ xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
+ xe_mmio_read32(mmio, GUC_HEADER_INFO));
+ break;
+ case XE_BOOTROM_STATUS_RSA_FAILED:
+ xe_gt_err(gt, "firmware signature verification failed\n");
+ break;
+ case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
+ xe_gt_err(gt, "firmware production part check failure\n");
+ break;
+ }
+
+ switch (ukernel) {
+ case XE_GUC_LOAD_STATUS_HWCONFIG_START:
+ xe_gt_err(gt, "still extracting hwconfig table.\n");
+ break;
+ case XE_GUC_LOAD_STATUS_EXCEPTION:
+ xe_gt_err(gt, "firmware exception. EIP: %#x\n",
+ xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
+ break;
+ case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
+ xe_gt_err(gt, "illegal init/ADS data\n");
+ break;
+ case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ xe_gt_err(gt, "illegal register in save/restore workaround list\n");
+ break;
+ case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ xe_gt_err(gt, "illegal workaround KLV data\n");
+ break;
+ case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
+ xe_gt_err(gt, "illegal feature flag specified\n");
+ break;
+ }
+}
+
+/*
+ * Check GUC_STATUS looking for known terminal states (either completion or
+ * failure) of either the microkernel status field or the boot ROM status field.
+ *
+ * Returns 1 for successful completion, -1 for failure and 0 for any
+ * intermediate state.
+ */
+static int guc_load_done(struct xe_gt *gt, u32 *status, u32 *tries)
+{
+ u32 ukernel, bootrom;
+
+ *status = xe_mmio_read32(&gt->mmio, GUC_STATUS);
+ ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, *status);
+ bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, *status);
+
+ switch (ukernel) {
case XE_GUC_LOAD_STATUS_READY:
return 1;
-
case XE_GUC_LOAD_STATUS_ERROR_DEVID_BUILD_MISMATCH:
case XE_GUC_LOAD_STATUS_GUC_PREPROD_BUILD_MISMATCH:
case XE_GUC_LOAD_STATUS_ERROR_DEVID_INVALID_GUCTYPE:
case XE_GUC_LOAD_STATUS_HWCONFIG_ERROR:
+ case XE_GUC_LOAD_STATUS_BOOTROM_VERSION_MISMATCH:
case XE_GUC_LOAD_STATUS_DPC_ERROR:
case XE_GUC_LOAD_STATUS_EXCEPTION:
case XE_GUC_LOAD_STATUS_INIT_DATA_INVALID:
case XE_GUC_LOAD_STATUS_MPU_DATA_INVALID:
case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
+ case XE_GUC_LOAD_STATUS_KLV_WORKAROUND_INIT_ERROR:
+ case XE_GUC_LOAD_STATUS_INVALID_FTR_FLAG:
return -1;
}
- switch (br_val) {
+ switch (bootrom) {
case XE_BOOTROM_STATUS_NO_KEY_FOUND:
case XE_BOOTROM_STATUS_RSA_FAILED:
case XE_BOOTROM_STATUS_PAVPC_FAILED:
@@ -1014,155 +1102,63 @@ static int guc_load_done(u32 status)
return -1;
}
- return 0;
-}
+ if (++*tries >= 100) {
+ struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
-static s32 guc_pc_get_cur_freq(struct xe_guc_pc *guc_pc)
-{
- u32 freq;
- int ret = xe_guc_pc_get_cur_freq(guc_pc, &freq);
+ *tries = 0;
+ xe_gt_dbg(gt, "GuC load still in progress, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n",
+ xe_guc_pc_get_act_freq(guc_pc),
+ xe_guc_pc_get_cur_freq_fw(guc_pc),
+ *status, ukernel, bootrom);
+ }
- return ret ? ret : freq;
+ return 0;
}
-/*
- * Wait for the GuC to start up.
- *
- * Measurements indicate this should take no more than 20ms (assuming the GT
- * clock is at maximum frequency). However, thermal throttling and other issues
- * can prevent the clock hitting max and thus making the load take significantly
- * longer. Allow up to 200ms as a safety margin for real world worst case situations.
- *
- * However, bugs anywhere from KMD to GuC to PCODE to fan failure in a CI farm can
- * lead to even longer times. E.g. if the GT is clamped to minimum frequency then
- * the load times can be in the seconds range. So the timeout is increased for debug
- * builds to ensure that problems can be correctly analysed. For release builds, the
- * timeout is kept short so that users don't wait forever to find out that there is a
- * problem. In either case, if the load took longer than is reasonable even with some
- * 'sensible' throttling, then flag a warning because something is not right.
- *
- * Note that there is a limit on how long an individual usleep_range() can wait for,
- * hence longer waits require wrapping a shorter wait in a loop.
- *
- * Note that the only reason an end user should hit the shorter timeout is in case of
- * extreme thermal throttling. And a system that is that hot during boot is probably
- * dead anyway!
- */
-#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
-#define GUC_LOAD_RETRY_LIMIT 20
-#else
-#define GUC_LOAD_RETRY_LIMIT 3
-#endif
-#define GUC_LOAD_TIME_WARN_MS 200
-
-static void guc_wait_ucode(struct xe_guc *guc)
+static int guc_wait_ucode(struct xe_guc *guc)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_mmio *mmio = &gt->mmio;
struct xe_guc_pc *guc_pc = &gt->uc.guc.pc;
- ktime_t before, after, delta;
- int load_done;
- u32 status = 0;
- int count = 0;
+ u32 before_freq, act_freq, cur_freq;
+ u32 status = 0, tries = 0;
+ ktime_t before;
u64 delta_ms;
- u32 before_freq;
+ int ret;
before_freq = xe_guc_pc_get_act_freq(guc_pc);
before = ktime_get();
- /*
- * Note, can't use any kind of timing information from the call to xe_mmio_wait.
- * It could return a thousand intermediate stages at random times. Instead, must
- * manually track the total time taken and locally implement the timeout.
- */
- do {
- u32 last_status = status & (GS_UKERNEL_MASK | GS_BOOTROM_MASK);
- int ret;
-
- /*
- * Wait for any change (intermediate or terminal) in the status register.
- * Note, the return value is a don't care. The only failure code is timeout
- * but the timeouts need to be accumulated over all the intermediate partial
- * timeouts rather than allowing a huge timeout each time. So basically, need
- * to treat a timeout no different to a value change.
- */
- ret = xe_mmio_wait32_not(mmio, GUC_STATUS, GS_UKERNEL_MASK | GS_BOOTROM_MASK,
- last_status, 1000 * 1000, &status, false);
- if (ret < 0)
- count++;
- after = ktime_get();
- delta = ktime_sub(after, before);
- delta_ms = ktime_to_ms(delta);
-
- load_done = guc_load_done(status);
- if (load_done != 0)
- break;
- if (delta_ms >= (GUC_LOAD_RETRY_LIMIT * 1000))
- break;
-
- xe_gt_dbg(gt, "load still in progress, timeouts = %d, freq = %dMHz (req %dMHz), status = 0x%08X [0x%02X/%02X]\n",
- count, xe_guc_pc_get_act_freq(guc_pc),
- guc_pc_get_cur_freq(guc_pc), status,
- REG_FIELD_GET(GS_BOOTROM_MASK, status),
- REG_FIELD_GET(GS_UKERNEL_MASK, status));
- } while (1);
+ ret = poll_timeout_us(ret = guc_load_done(gt, &status, &tries), ret,
+ 10 * USEC_PER_MSEC,
+ GUC_LOAD_TIMEOUT_SEC * USEC_PER_SEC, false);
- if (load_done != 1) {
- u32 ukernel = REG_FIELD_GET(GS_UKERNEL_MASK, status);
- u32 bootrom = REG_FIELD_GET(GS_BOOTROM_MASK, status);
+ delta_ms = ktime_to_ms(ktime_sub(ktime_get(), before));
+ act_freq = xe_guc_pc_get_act_freq(guc_pc);
+ cur_freq = xe_guc_pc_get_cur_freq_fw(guc_pc);
- xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz), done = %d\n",
+ if (ret) {
+ xe_gt_err(gt, "load failed: status = 0x%08X, time = %lldms, freq = %dMHz (req %dMHz)\n",
status, delta_ms, xe_guc_pc_get_act_freq(guc_pc),
- guc_pc_get_cur_freq(guc_pc), load_done);
- xe_gt_err(gt, "load failed: status: Reset = %d, BootROM = 0x%02X, UKernel = 0x%02X, MIA = 0x%02X, Auth = 0x%02X\n",
- REG_FIELD_GET(GS_MIA_IN_RESET, status),
- bootrom, ukernel,
- REG_FIELD_GET(GS_MIA_MASK, status),
- REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
-
- switch (bootrom) {
- case XE_BOOTROM_STATUS_NO_KEY_FOUND:
- xe_gt_err(gt, "invalid key requested, header = 0x%08X\n",
- xe_mmio_read32(mmio, GUC_HEADER_INFO));
- break;
+ xe_guc_pc_get_cur_freq_fw(guc_pc));
+ print_load_status_err(gt, status);
- case XE_BOOTROM_STATUS_RSA_FAILED:
- xe_gt_err(gt, "firmware signature verification failed\n");
- break;
-
- case XE_BOOTROM_STATUS_PROD_KEY_CHECK_FAILURE:
- xe_gt_err(gt, "firmware production part check failure\n");
- break;
- }
-
- switch (ukernel) {
- case XE_GUC_LOAD_STATUS_EXCEPTION:
- xe_gt_err(gt, "firmware exception. EIP: %#x\n",
- xe_mmio_read32(mmio, SOFT_SCRATCH(13)));
- break;
-
- case XE_GUC_LOAD_STATUS_INIT_MMIO_SAVE_RESTORE_INVALID:
- xe_gt_err(gt, "illegal register in save/restore workaround list\n");
- break;
-
- case XE_GUC_LOAD_STATUS_HWCONFIG_START:
- xe_gt_err(gt, "still extracting hwconfig table.\n");
- break;
- }
+ return -EPROTO;
+ }
- xe_device_declare_wedged(gt_to_xe(gt));
- } else if (delta_ms > GUC_LOAD_TIME_WARN_MS) {
- xe_gt_warn(gt, "excessive init time: %lldms! [status = 0x%08X, timeouts = %d]\n",
- delta_ms, status, count);
- xe_gt_warn(gt, "excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n",
- xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc),
- before_freq, xe_gt_throttle_get_limit_reasons(gt));
+ if (delta_ms > GUC_LOAD_TIME_WARN_MSEC) {
+ xe_gt_warn(gt, "GuC load: excessive init time: %lldms! [status = 0x%08X]\n",
+ delta_ms, status);
+ xe_gt_warn(gt, "GuC load: excessive init time: [freq = %dMHz (req = %dMHz), before = %dMHz, perf_limit_reasons = 0x%08X]\n",
+ act_freq, cur_freq, before_freq,
+ xe_gt_throttle_get_limit_reasons(gt));
} else {
- xe_gt_dbg(gt, "init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X, timeouts = %d\n",
- delta_ms, xe_guc_pc_get_act_freq(guc_pc), guc_pc_get_cur_freq(guc_pc),
- before_freq, status, count);
+ xe_gt_dbg(gt, "GuC load: init took %lldms, freq = %dMHz (req = %dMHz), before = %dMHz, status = 0x%08X\n",
+ delta_ms, act_freq, cur_freq, before_freq, status);
}
+
+ return 0;
}
+ALLOW_ERROR_INJECTION(guc_wait_ucode, ERRNO);
static int __xe_guc_upload(struct xe_guc *guc)
{
@@ -1194,14 +1190,16 @@ static int __xe_guc_upload(struct xe_guc *guc)
goto out;
/* Wait for authentication */
- guc_wait_ucode(guc);
+ ret = guc_wait_ucode(guc);
+ if (ret)
+ goto out;
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_RUNNING);
return 0;
out:
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
- return 0 /* FIXME: ret, don't want to stop load currently */;
+ return ret;
}
static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
@@ -1271,8 +1269,13 @@ int xe_guc_min_load_for_hwconfig(struct xe_guc *guc)
int xe_guc_upload(struct xe_guc *guc)
{
+ struct xe_gt *gt = guc_to_gt(guc);
+
xe_guc_ads_populate(&guc->ads);
+ if (xe_guc_using_main_gamctrl_queues(guc))
+ xe_mmio_write32(&gt->mmio, MAIN_GAMCTRL_MODE, MAIN_GAMCTRL_QUEUE_SELECT);
+
return __xe_guc_upload(guc);
}
@@ -1455,7 +1458,7 @@ timeout:
BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1);
ret = xe_mmio_wait32(mmio, reply_reg, resp_mask, resp_mask,
- 1000000, &header, false);
+ 2000000, &header, false);
if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
GUC_HXG_ORIGIN_GUC))
@@ -1579,7 +1582,7 @@ void xe_guc_sanitize(struct xe_guc *guc)
{
xe_uc_fw_sanitize(&guc->fw);
xe_guc_ct_disable(&guc->ct);
- guc->submission_state.enabled = false;
+ xe_guc_submit_disable(guc);
}
int xe_guc_reset_prepare(struct xe_guc *guc)
@@ -1672,3 +1675,45 @@ void xe_guc_declare_wedged(struct xe_guc *guc)
xe_guc_ct_stop(&guc->ct);
xe_guc_submit_wedge(guc);
}
+
+/**
+ * xe_guc_using_main_gamctrl_queues() - Detect which reporting queues to use.
+ * @guc: The GuC object
+ *
+ * For Xe3p and beyond, we want to program the hardware to use the
+ * "Main GAMCTRL queue" rather than the legacy queue before we upload
+ * the GuC firmware. This will allow the GuC to use a new set of
+ * registers for pagefault handling and avoid some unnecessary
+ * complications with MCR register range handling.
+ *
+ * Return: true if can use new main gamctrl queues.
+ */
+bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ /*
+ * For Xe3p media gt (35), the GuC and the CS subunits may be still Xe3
+ * that lacks the Main GAMCTRL support. Reserved bits from the GMD_ID
+ * inform the IP version of the subunits.
+ */
+ if (xe_gt_is_media_type(gt) && MEDIA_VER(gt_to_xe(gt)) == 35) {
+ u32 val = xe_mmio_read32(&gt->mmio, GMD_ID);
+ u32 subip = REG_FIELD_GET(GMD_ID_SUBIP_FLAG_MASK, val);
+
+ if (!subip)
+ return true;
+
+ xe_gt_WARN(gt, subip != 1,
+ "GMD_ID has unknown value in the SUBIP_FLAG field - 0x%x\n",
+ subip);
+
+ return false;
+ }
+
+ return GT_VER(gt) >= 35;
+}
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+#include "tests/xe_guc_g2g_test.c"
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h
index 22cf019a11bf..e2d4c5f44ae3 100644
--- a/drivers/gpu/drm/xe/xe_guc.h
+++ b/drivers/gpu/drm/xe/xe_guc.h
@@ -52,6 +52,11 @@ void xe_guc_stop_prepare(struct xe_guc *guc);
void xe_guc_stop(struct xe_guc *guc);
int xe_guc_start(struct xe_guc *guc);
void xe_guc_declare_wedged(struct xe_guc *guc);
+bool xe_guc_using_main_gamctrl_queues(struct xe_guc *guc);
+
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+int xe_guc_g2g_test_notification(struct xe_guc *guc, u32 *payload, u32 len);
+#endif
static inline u16 xe_engine_class_to_guc_class(enum xe_engine_class class)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 131cfc56be00..bcb85a1bf26d 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -18,6 +18,7 @@
#include "xe_bo.h"
#include "xe_gt.h"
#include "xe_gt_ccs_mode.h"
+#include "xe_gt_mcr.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_guc_buf.h"
@@ -30,7 +31,6 @@
#include "xe_platform_types.h"
#include "xe_uc_fw.h"
#include "xe_wa.h"
-#include "xe_gt_mcr.h"
/* Slack of a few additional entries per engine */
#define ADS_REGSET_EXTRA_MAX 8
@@ -247,7 +247,7 @@ static size_t calculate_regset_size(struct xe_gt *gt)
count += ADS_REGSET_EXTRA_MAX * XE_NUM_HW_ENGINES;
- if (XE_WA(gt, 1607983814))
+ if (XE_GT_WA(gt, 1607983814))
count += LNCFCMOCS_REG_COUNT;
return count * sizeof(struct guc_mmio_reg);
@@ -284,52 +284,26 @@ static size_t calculate_golden_lrc_size(struct xe_guc_ads *ads)
return total_size;
}
-static void guc_waklv_enable_one_word(struct xe_guc_ads *ads,
- enum xe_guc_klv_ids klv_id,
- u32 value,
- u32 *offset, u32 *remain)
+static void guc_waklv_enable(struct xe_guc_ads *ads,
+ u32 data[], u32 data_len_dw,
+ u32 *offset, u32 *remain,
+ enum xe_guc_klv_ids klv_id)
{
- u32 size;
- u32 klv_entry[] = {
- /* 16:16 key/length */
- FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
- FIELD_PREP(GUC_KLV_0_LEN, 1),
- value,
- /* 1 dword data */
- };
-
- size = sizeof(klv_entry);
+ size_t size = sizeof(u32) * (1 + data_len_dw);
if (*remain < size) {
drm_warn(&ads_to_xe(ads)->drm,
- "w/a klv buffer too small to add klv id %d\n", klv_id);
- } else {
- xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
- klv_entry, size);
- *offset += size;
- *remain -= size;
+ "w/a klv buffer too small to add klv id 0x%04X\n", klv_id);
+ return;
}
-}
-static void guc_waklv_enable_simple(struct xe_guc_ads *ads,
- enum xe_guc_klv_ids klv_id, u32 *offset, u32 *remain)
-{
- u32 klv_entry[] = {
- /* 16:16 key/length */
- FIELD_PREP(GUC_KLV_0_KEY, klv_id) |
- FIELD_PREP(GUC_KLV_0_LEN, 0),
- /* 0 dwords data */
- };
- u32 size;
+ /* 16:16 key/length */
+ xe_map_wr(ads_to_xe(ads), ads_to_map(ads), *offset, u32,
+ FIELD_PREP(GUC_KLV_0_KEY, klv_id) | FIELD_PREP(GUC_KLV_0_LEN, data_len_dw));
+ /* data_len_dw dwords of data */
+ xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads),
+ *offset + sizeof(u32), data, data_len_dw * sizeof(u32));
- size = sizeof(klv_entry);
-
- if (xe_gt_WARN(ads_to_gt(ads), *remain < size,
- "w/a klv buffer too small to add klv id %d\n", klv_id))
- return;
-
- xe_map_memcpy_to(ads_to_xe(ads), ads_to_map(ads), *offset,
- klv_entry, size);
*offset += size;
*remain -= size;
}
@@ -343,44 +317,51 @@ static void guc_waklv_init(struct xe_guc_ads *ads)
offset = guc_ads_waklv_offset(ads);
remain = guc_ads_waklv_size(ads);
- if (XE_WA(gt, 14019882105) || XE_WA(gt, 16021333562))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED,
- &offset, &remain);
- if (XE_WA(gt, 18024947630))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING,
- &offset, &remain);
- if (XE_WA(gt, 16022287689))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE,
- &offset, &remain);
-
- if (XE_WA(gt, 14022866841))
- guc_waklv_enable_simple(ads,
- GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO,
- &offset, &remain);
+ if (XE_GT_WA(gt, 14019882105) || XE_GT_WA(gt, 16021333562))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_BLOCK_INTERRUPTS_WHEN_MGSR_BLOCKED);
+ if (XE_GT_WA(gt, 18024947630))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_GAM_PFQ_SHADOW_TAIL_POLLING);
+ if (XE_GT_WA(gt, 16022287689))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_DISABLE_MTP_DURING_ASYNC_COMPUTE);
+
+ if (XE_GT_WA(gt, 14022866841))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WA_KLV_WAKE_POWER_DOMAINS_FOR_OUTBOUND_MMIO);
/*
* On RC6 exit, GuC will write register 0xB04 with the default value provided. As of now,
* the default value for this register is determined to be 0xC40. This could change in the
* future, so GuC depends on KMD to send it the correct value.
*/
- if (XE_WA(gt, 13011645652))
- guc_waklv_enable_one_word(ads,
- GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE,
- 0xC40,
- &offset, &remain);
-
- if (XE_WA(gt, 14022293748) || XE_WA(gt, 22019794406))
- guc_waklv_enable_simple(ads,
- GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET,
- &offset, &remain);
-
- if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_WA(gt, 16026508708))
- guc_waklv_enable_simple(ads,
- GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH,
- &offset, &remain);
+ if (XE_GT_WA(gt, 13011645652)) {
+ u32 data = 0xC40;
+
+ guc_waklv_enable(ads, &data, 1, &offset, &remain,
+ GUC_WA_KLV_NP_RD_WRITE_TO_CLEAR_RCSM_AT_CGP_LATE_RESTORE);
+ }
+
+ if (XE_GT_WA(gt, 14022293748) || XE_GT_WA(gt, 22019794406))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_ID_BACK_TO_BACK_RCS_ENGINE_RESET);
+
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 44, 0) && XE_GT_WA(gt, 16026508708))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WA_KLV_RESET_BB_STACK_PTR_ON_VF_SWITCH);
+ if (GUC_FIRMWARE_VER(&gt->uc.guc) >= MAKE_GUC_VER(70, 47, 0) && XE_GT_WA(gt, 16026007364)) {
+ u32 data[] = {
+ 0x0,
+ 0xF,
+ };
+ guc_waklv_enable(ads, data, ARRAY_SIZE(data), &offset, &remain,
+ GUC_WA_KLV_RESTORE_UNSAVED_MEDIA_CONTROL_REG);
+ }
+
+ if (XE_GT_WA(gt, 14020001231))
+ guc_waklv_enable(ads, NULL, 0, &offset, &remain,
+ GUC_WORKAROUND_KLV_DISABLE_PSMI_INTERRUPTS_AT_C6_ENTRY_RESTORE_AT_EXIT);
size = guc_ads_waklv_size(ads) - remain;
if (!size)
@@ -784,7 +765,7 @@ static unsigned int guc_mmio_regset_write(struct xe_guc_ads *ads,
guc_mmio_regset_write_one(ads, regset_map, e->reg, count++);
}
- if (XE_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_GT_WA(hwe->gt, 1607983814) && hwe->class == XE_ENGINE_CLASS_RENDER) {
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) {
guc_mmio_regset_write_one(ads, regset_map,
XELP_LNCFCMOCS(i), count++);
@@ -839,16 +820,20 @@ static void guc_mmio_reg_state_init(struct xe_guc_ads *ads)
static void guc_um_init_params(struct xe_guc_ads *ads)
{
u32 um_queue_offset = guc_ads_um_queues_offset(ads);
+ struct xe_guc *guc = ads_to_guc(ads);
u64 base_dpa;
u32 base_ggtt;
+ bool with_dpa;
int i;
+ with_dpa = !xe_guc_using_main_gamctrl_queues(guc);
+
base_ggtt = xe_bo_ggtt_addr(ads->bo) + um_queue_offset;
base_dpa = xe_bo_main_addr(ads->bo, PAGE_SIZE) + um_queue_offset;
for (i = 0; i < GUC_UM_HW_QUEUE_MAX; ++i) {
ads_blob_write(ads, um_init_params.queue_params[i].base_dpa,
- base_dpa + (i * GUC_UM_QUEUE_SIZE));
+ with_dpa ? (base_dpa + (i * GUC_UM_QUEUE_SIZE)) : 0);
ads_blob_write(ads, um_init_params.queue_params[i].base_ggtt_address,
base_ggtt + (i * GUC_UM_QUEUE_SIZE));
ads_blob_write(ads, um_init_params.queue_params[i].size_in_bytes,
diff --git a/drivers/gpu/drm/xe/xe_guc_ads_types.h b/drivers/gpu/drm/xe/xe_guc_ads_types.h
index 70c132458ac3..48a8e092023f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ads_types.h
@@ -14,7 +14,7 @@ struct xe_bo;
* struct xe_guc_ads - GuC additional data structures (ADS)
*/
struct xe_guc_ads {
- /** @bo: XE BO for GuC ads blob */
+ /** @bo: Xe BO for GuC ads blob */
struct xe_bo *bo;
/** @golden_lrc_size: golden LRC size */
size_t golden_lrc_size;
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
index 14a07dca48e7..3ce442500130 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.c
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -13,6 +13,8 @@
#include "xe_guc_buf.h"
#include "xe_sa.h"
+#define XE_GUC_BUF_CACHE_DEFAULT_SIZE SZ_8K
+
static struct xe_guc *cache_to_guc(struct xe_guc_buf_cache *cache)
{
return container_of(cache, struct xe_guc, buf);
@@ -23,21 +25,12 @@ static struct xe_gt *cache_to_gt(struct xe_guc_buf_cache *cache)
return guc_to_gt(cache_to_guc(cache));
}
-/**
- * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
- * @cache: the &xe_guc_buf_cache to initialize
- *
- * The Buffer Cache allows to obtain a reusable buffer that can be used to pass
- * indirect H2G data to GuC without a need to create a ad-hoc allocation.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
+static int guc_buf_cache_init(struct xe_guc_buf_cache *cache, u32 size)
{
struct xe_gt *gt = cache_to_gt(cache);
struct xe_sa_manager *sam;
- sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
+ sam = __xe_sa_bo_manager_init(gt_to_tile(gt), size, 0, sizeof(u32));
if (IS_ERR(sam))
return PTR_ERR(sam);
cache->sam = sam;
@@ -49,6 +42,35 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
}
/**
+ * xe_guc_buf_cache_init() - Initialize the GuC Buffer Cache.
+ * @cache: the &xe_guc_buf_cache to initialize
+ *
+ * The Buffer Cache allows to obtain a reusable buffer that can be used to pass
+ * data to GuC or read data from GuC without a need to create a ad-hoc allocation.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
+{
+ return guc_buf_cache_init(cache, XE_GUC_BUF_CACHE_DEFAULT_SIZE);
+}
+
+/**
+ * xe_guc_buf_cache_init_with_size() - Initialize the GuC Buffer Cache.
+ * @cache: the &xe_guc_buf_cache to initialize
+ * @size: size in bytes
+ *
+ * Like xe_guc_buf_cache_init(), except it allows the caller to make the cache
+ * buffer larger, allowing to accommodate larger objects.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size)
+{
+ return guc_buf_cache_init(cache, max(XE_GUC_BUF_CACHE_DEFAULT_SIZE, size));
+}
+
+/**
* xe_guc_buf_cache_dwords() - Number of dwords the GuC Buffer Cache supports.
* @cache: the &xe_guc_buf_cache to query
*
@@ -116,6 +138,19 @@ void xe_guc_buf_release(const struct xe_guc_buf buf)
}
/**
+ * xe_guc_buf_sync_read() - Copy the data from the GPU memory to the sub-allocation.
+ * @buf: the &xe_guc_buf to sync
+ *
+ * Return: a CPU pointer of the sub-allocation.
+ */
+void *xe_guc_buf_sync_read(const struct xe_guc_buf buf)
+{
+ xe_sa_bo_sync_read(buf.sa);
+
+ return xe_sa_bo_cpu_addr(buf.sa);
+}
+
+/**
* xe_guc_buf_flush() - Copy the data from the sub-allocation to the GPU memory.
* @buf: the &xe_guc_buf to flush
*
@@ -164,7 +199,7 @@ u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *p
if (offset < 0 || offset + size > cache->sam->base.size)
return 0;
- return cache->sam->gpu_addr + offset;
+ return xe_sa_manager_gpu_addr(cache->sam) + offset;
}
#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST)
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.h b/drivers/gpu/drm/xe/xe_guc_buf.h
index 0d67604d96bd..e3cca553fb00 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.h
+++ b/drivers/gpu/drm/xe/xe_guc_buf.h
@@ -12,6 +12,7 @@
#include "xe_guc_buf_types.h"
int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache);
+int xe_guc_buf_cache_init_with_size(struct xe_guc_buf_cache *cache, u32 size);
u32 xe_guc_buf_cache_dwords(struct xe_guc_buf_cache *cache);
struct xe_guc_buf xe_guc_buf_reserve(struct xe_guc_buf_cache *cache, u32 dwords);
struct xe_guc_buf xe_guc_buf_from_data(struct xe_guc_buf_cache *cache,
@@ -30,6 +31,7 @@ static inline bool xe_guc_buf_is_valid(const struct xe_guc_buf buf)
}
void *xe_guc_buf_cpu_ptr(const struct xe_guc_buf buf);
+void *xe_guc_buf_sync_read(const struct xe_guc_buf buf);
u64 xe_guc_buf_flush(const struct xe_guc_buf buf);
u64 xe_guc_buf_gpu_addr(const struct xe_guc_buf buf);
u64 xe_guc_cache_gpu_addr_from_ptr(struct xe_guc_buf_cache *cache, const void *ptr, u32 size);
diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c
index 243dad3e2418..0c1fbe97b8bf 100644
--- a/drivers/gpu/drm/xe/xe_guc_capture.c
+++ b/drivers/gpu/drm/xe/xe_guc_capture.c
@@ -122,6 +122,7 @@ struct __guc_capture_parsed_output {
{ RING_IPEHR(0), REG_32BIT, 0, 0, 0, "IPEHR"}, \
{ RING_INSTDONE(0), REG_32BIT, 0, 0, 0, "RING_INSTDONE"}, \
{ INDIRECT_RING_STATE(0), REG_32BIT, 0, 0, 0, "INDIRECT_RING_STATE"}, \
+ { RING_CURRENT_LRCA(0), REG_32BIT, 0, 0, 0, "CURRENT_LRCA"}, \
{ RING_ACTHD(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
{ RING_ACTHD_UDW(0), REG_64BIT_HI_DW, 0, 0, 0, "ACTHD"}, \
{ RING_BBADDR(0), REG_64BIT_LOW_DW, 0, 0, 0, NULL}, \
@@ -149,6 +150,9 @@ struct __guc_capture_parsed_output {
{ SFC_DONE(2), 0, 0, 0, 0, "SFC_DONE[2]"}, \
{ SFC_DONE(3), 0, 0, 0, 0, "SFC_DONE[3]"}
+#define XE3P_BASE_ENGINE_INSTANCE \
+ { RING_CSMQDEBUG(0), REG_32BIT, 0, 0, 0, "CSMQDEBUG"}
+
/* XE_LP Global */
static const struct __guc_mmio_reg_descr xe_lp_global_regs[] = {
COMMON_XELP_BASE_GLOBAL,
@@ -195,6 +199,12 @@ static const struct __guc_mmio_reg_descr xe_lp_gsc_inst_regs[] = {
COMMON_BASE_ENGINE_INSTANCE,
};
+/* Render / Compute Per-Engine-Instance */
+static const struct __guc_mmio_reg_descr xe3p_rc_inst_regs[] = {
+ COMMON_BASE_ENGINE_INSTANCE,
+ XE3P_BASE_ENGINE_INSTANCE,
+};
+
/*
* Empty list to prevent warnings about unknown class/instance types
* as not all class/instance types have entries on all platforms.
@@ -245,6 +255,21 @@ static const struct __guc_mmio_reg_descr_group xe_hpg_lists[] = {
{}
};
+ /* List of lists for Xe3p and beyond */
+static const struct __guc_mmio_reg_descr_group xe3p_lists[] = {
+ MAKE_REGLIST(xe_lp_global_regs, PF, GLOBAL, 0),
+ MAKE_REGLIST(xe_hpg_rc_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(xe3p_rc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_RENDER_COMPUTE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vd_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEO),
+ MAKE_REGLIST(xe_vec_class_regs, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(xe_vec_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_VIDEOENHANCE),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(xe_blt_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_BLITTER),
+ MAKE_REGLIST(empty_regs_list, PF, ENGINE_CLASS, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ MAKE_REGLIST(xe_lp_gsc_inst_regs, PF, ENGINE_INSTANCE, GUC_CAPTURE_LIST_CLASS_GSC_OTHER),
+ {}
+};
static const char * const capture_list_type_names[] = {
"Global",
"Class",
@@ -292,7 +317,9 @@ guc_capture_remove_stale_matches_from_list(struct xe_guc_state_capture *gc,
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct xe_device *xe)
{
- if (GRAPHICS_VERx100(xe) >= 1255)
+ if (GRAPHICS_VER(xe) >= 35)
+ return xe3p_lists;
+ else if (GRAPHICS_VERx100(xe) >= 1255)
return xe_hpg_lists;
else
return xe_lp_lists;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 3f4e6a46ff16..4ac434ad216f 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -21,24 +21,26 @@
#include "xe_devcoredump.h"
#include "xe_device.h"
#include "xe_gt.h"
-#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
-#include "xe_gt_sriov_printk.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_log.h"
+#include "xe_guc_pagefault.h"
#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
+#include "xe_guc_tlb_inval.h"
#include "xe_map.h"
#include "xe_pm.h"
+#include "xe_sriov_vf.h"
#include "xe_trace_guc.h"
static void receive_g2h(struct xe_guc_ct *ct);
static void g2h_worker_func(struct work_struct *w);
static void safe_mode_worker_func(struct work_struct *w);
static void ct_exit_safe_mode(struct xe_guc_ct *ct);
+static void guc_ct_change_state(struct xe_guc_ct *ct,
+ enum xe_guc_ct_state state);
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
enum {
@@ -91,8 +93,6 @@ struct g2h_fence {
bool done;
};
-#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
-
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
memset(g2h_fence, 0, sizeof(*g2h_fence));
@@ -167,6 +167,7 @@ ct_to_xe(struct xe_guc_ct *ct)
*/
#define CTB_DESC_SIZE ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
+#define CTB_H2G_BUFFER_OFFSET (CTB_DESC_SIZE * 2)
#define CTB_H2G_BUFFER_SIZE (SZ_4K)
#define CTB_G2H_BUFFER_SIZE (SZ_128K)
#define G2H_ROOM_BUFFER_SIZE (CTB_G2H_BUFFER_SIZE / 2)
@@ -190,7 +191,7 @@ long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
static size_t guc_ct_size(void)
{
- return 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE +
+ return CTB_H2G_BUFFER_OFFSET + CTB_H2G_BUFFER_SIZE +
CTB_G2H_BUFFER_SIZE;
}
@@ -198,6 +199,9 @@ static void guc_ct_fini(struct drm_device *drm, void *arg)
{
struct xe_guc_ct *ct = arg;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+ cancel_work_sync(&ct->dead.worker);
+#endif
ct_exit_safe_mode(ct);
destroy_workqueue(ct->g2h_wq);
xa_destroy(&ct->fence_lookup);
@@ -221,6 +225,12 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE));
+ err = drmm_mutex_init(&xe->drm, &ct->lock);
+ if (err)
+ return err;
+
+ primelockdep(ct);
+
ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", WQ_MEM_RECLAIM);
if (!ct->g2h_wq)
return -ENOMEM;
@@ -232,16 +242,13 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
spin_lock_init(&ct->dead.lock);
INIT_WORK(&ct->dead.worker, ct_dead_worker_func);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ stack_depot_init();
+#endif
#endif
init_waitqueue_head(&ct->wq);
init_waitqueue_head(&ct->g2h_fence_wq);
- err = drmm_mutex_init(&xe->drm, &ct->lock);
- if (err)
- return err;
-
- primelockdep(ct);
-
err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
if (err)
return err;
@@ -252,6 +259,13 @@ int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct)
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */
+static void guc_action_disable_ct(void *arg)
+{
+ struct xe_guc_ct *ct = arg;
+
+ guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED);
+}
+
int xe_guc_ct_init(struct xe_guc_ct *ct)
{
struct xe_device *xe = ct_to_xe(ct);
@@ -268,10 +282,39 @@ int xe_guc_ct_init(struct xe_guc_ct *ct)
return PTR_ERR(bo);
ct->bo = bo;
- return 0;
+
+ return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
}
ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */
+/**
+ * xe_guc_ct_init_post_hwconfig - Reinitialize the GuC CTB in VRAM
+ * @ct: the &xe_guc_ct
+ *
+ * Allocate a new BO in VRAM and free the previous BO that was allocated
+ * in system memory (SMEM). Applicable only for DGFX products.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct)
+{
+ struct xe_device *xe = ct_to_xe(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_tile *tile = gt_to_tile(gt);
+ int ret;
+
+ xe_assert(xe, !xe_guc_ct_enabled(ct));
+
+ if (IS_DGFX(xe)) {
+ ret = xe_managed_bo_reinit_in_vram(xe, tile, &ct->bo);
+ if (ret)
+ return ret;
+ }
+
+ devm_remove_action(xe->drm.dev, guc_action_disable_ct, ct);
+ return devm_add_action_or_reset(xe->drm.dev, guc_action_disable_ct, ct);
+}
+
#define desc_read(xe_, guc_ctb__, field_) \
xe_map_rd_field(xe_, &guc_ctb__->desc, 0, \
struct guc_ct_buffer_desc, field_)
@@ -295,7 +338,7 @@ static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
h2g->desc = *map;
xe_map_memset(xe, &h2g->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
- h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2);
+ h2g->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET);
}
static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
@@ -313,7 +356,7 @@ static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
g2h->desc = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE);
xe_map_memset(xe, &g2h->desc, 0, 0, sizeof(struct guc_ct_buffer_desc));
- g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_DESC_SIZE * 2 +
+ g2h->cmds = IOSYS_MAP_INIT_OFFSET(map, CTB_H2G_BUFFER_OFFSET +
CTB_H2G_BUFFER_SIZE);
}
@@ -324,7 +367,7 @@ static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
int err;
desc_addr = xe_bo_ggtt_addr(ct->bo);
- ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET;
size = ct->ctbs.h2g.info.size * sizeof(u32);
err = xe_guc_self_cfg64(guc,
@@ -351,7 +394,7 @@ static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
int err;
desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
- ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
+ ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_H2G_BUFFER_OFFSET +
CTB_H2G_BUFFER_SIZE;
size = ct->ctbs.g2h.info.size * sizeof(u32);
@@ -465,7 +508,7 @@ static void ct_exit_safe_mode(struct xe_guc_ct *ct)
xe_gt_dbg(ct_to_gt(ct), "GuC CT safe-mode disabled\n");
}
-int xe_guc_ct_enable(struct xe_guc_ct *ct)
+static int __xe_guc_ct_start(struct xe_guc_ct *ct, bool needs_register)
{
struct xe_device *xe = ct_to_xe(ct);
struct xe_gt *gt = ct_to_gt(ct);
@@ -473,21 +516,29 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct)
xe_gt_assert(gt, !xe_guc_ct_enabled(ct));
- xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
- guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
- guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
+ if (needs_register) {
+ xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo));
+ guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
+ guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
- err = guc_ct_ctb_h2g_register(ct);
- if (err)
- goto err_out;
+ err = guc_ct_ctb_h2g_register(ct);
+ if (err)
+ goto err_out;
- err = guc_ct_ctb_g2h_register(ct);
- if (err)
- goto err_out;
+ err = guc_ct_ctb_g2h_register(ct);
+ if (err)
+ goto err_out;
- err = guc_ct_control_toggle(ct, true);
- if (err)
- goto err_out;
+ err = guc_ct_control_toggle(ct, true);
+ if (err)
+ goto err_out;
+ } else {
+ ct->ctbs.h2g.info.broken = false;
+ ct->ctbs.g2h.info.broken = false;
+ /* Skip everything in H2G buffer */
+ xe_map_memset(xe, &ct->bo->vmap, CTB_H2G_BUFFER_OFFSET, 0,
+ CTB_H2G_BUFFER_SIZE);
+ }
guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED);
@@ -519,6 +570,32 @@ err_out:
return err;
}
+/**
+ * xe_guc_ct_restart() - Restart GuC CT
+ * @ct: the &xe_guc_ct
+ *
+ * Restart GuC CT to an empty state without issuing a CT register MMIO command.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_restart(struct xe_guc_ct *ct)
+{
+ return __xe_guc_ct_start(ct, false);
+}
+
+/**
+ * xe_guc_ct_enable() - Enable GuC CT
+ * @ct: the &xe_guc_ct
+ *
+ * Enable GuC CT to an empty state and issue a CT register MMIO command.
+ *
+ * Return: 0 on success, or a negative errno on failure.
+ */
+int xe_guc_ct_enable(struct xe_guc_ct *ct)
+{
+ return __xe_guc_ct_start(ct, true);
+}
+
static void stop_g2h_handler(struct xe_guc_ct *ct)
{
cancel_work_sync(&ct->g2h_worker);
@@ -539,6 +616,16 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
}
/**
+ * xe_guc_ct_flush_and_stop - Flush and stop all processing of G2H / H2G
+ * @ct: the &xe_guc_ct
+ */
+void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct)
+{
+ receive_g2h(ct);
+ xe_guc_ct_stop(ct);
+}
+
+/**
* xe_guc_ct_stop - Set GuC to stopped state
* @ct: the &xe_guc_ct
*
@@ -701,6 +788,28 @@ static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
return seqno;
}
+#define MAKE_ACTION(type, __action) \
+({ \
+ FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) | \
+ FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION | \
+ GUC_HXG_EVENT_MSG_0_DATA0, __action); \
+})
+
+static bool vf_action_can_safely_fail(struct xe_device *xe, u32 action)
+{
+ /*
+ * When resuming a VF, we can't reliably track whether context
+ * registration has completed in the GuC state machine. It is harmless
+ * to resend the request, as it will fail silently if GUC_HXG_TYPE_EVENT
+ * is used. Additionally, if there is an H2G protocol issue on a VF,
+ * subsequent H2G messages sent as GUC_HXG_TYPE_FAST_REQUEST will likely
+ * fail.
+ */
+ return IS_SRIOV_VF(xe) && xe_sriov_vf_migration_supported(xe) &&
+ (action == XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC ||
+ action == XE_GUC_ACTION_REGISTER_CONTEXT);
+}
+
#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
@@ -772,18 +881,14 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
FIELD_PREP(GUC_CTB_MSG_0_FENCE, ct_fence_value);
if (want_response) {
- cmd[1] =
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
- FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
- GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_REQUEST, action[0]);
+ } else if (vf_action_can_safely_fail(xe, action[0])) {
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_EVENT, action[0]);
} else {
fast_req_track(ct, ct_fence_value,
FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
- cmd[1] =
- FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
- FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
- GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+ cmd[1] = MAKE_ACTION(GUC_HXG_TYPE_FAST_REQUEST, action[0]);
}
/* H2G header in cmd[1] replaces action[0] so: */
@@ -816,7 +921,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
{
- struct xe_gt *gt __maybe_unused = ct_to_gt(ct);
+ struct xe_gt *gt = ct_to_gt(ct);
u16 seqno;
int ret;
@@ -837,7 +942,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
goto out;
}
- if (ct->state == XE_GUC_CT_STATE_STOPPED) {
+ if (ct->state == XE_GUC_CT_STATE_STOPPED || xe_gt_recovery_pending(gt)) {
ret = -ECANCELED;
goto out;
}
@@ -892,22 +997,15 @@ static void kick_reset(struct xe_guc_ct *ct)
static int dequeue_one_g2h(struct xe_guc_ct *ct);
-static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
- u32 g2h_len, u32 num_g2h,
- struct g2h_fence *g2h_fence)
+/*
+ * wait before retry of sending h2g message
+ * Return: true if ready for retry, false if the wait timeouted
+ */
+static bool guc_ct_send_wait_for_retry(struct xe_guc_ct *ct, u32 len,
+ u32 g2h_len, struct g2h_fence *g2h_fence,
+ unsigned int *sleep_period_ms)
{
struct xe_device *xe = ct_to_xe(ct);
- struct xe_gt *gt = ct_to_gt(ct);
- unsigned int sleep_period_ms = 1;
- int ret;
-
- xe_gt_assert(gt, !g2h_len || !g2h_fence);
- lockdep_assert_held(&ct->lock);
- xe_device_assert_mem_access(ct_to_xe(ct));
-
-try_again:
- ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
- g2h_fence);
/*
* We wait to try to restore credits for about 1 second before bailing.
@@ -916,24 +1014,22 @@ try_again:
* the case of G2H we process any G2H in the channel, hopefully freeing
* credits as we consume the G2H messages.
*/
- if (unlikely(ret == -EBUSY &&
- !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
+ if (!h2g_has_room(ct, len + GUC_CTB_HDR_LEN)) {
struct guc_ctb *h2g = &ct->ctbs.h2g;
- if (sleep_period_ms == 1024)
- goto broken;
+ if (*sleep_period_ms == 1024)
+ return false;
trace_xe_guc_ct_h2g_flow_control(xe, h2g->info.head, h2g->info.tail,
h2g->info.size,
h2g->info.space,
len + GUC_CTB_HDR_LEN);
- msleep(sleep_period_ms);
- sleep_period_ms <<= 1;
-
- goto try_again;
- } else if (unlikely(ret == -EBUSY)) {
+ msleep(*sleep_period_ms);
+ *sleep_period_ms <<= 1;
+ } else {
struct xe_device *xe = ct_to_xe(ct);
struct guc_ctb *g2h = &ct->ctbs.g2h;
+ int ret;
trace_xe_guc_ct_g2h_flow_control(xe, g2h->info.head,
desc_read(xe, g2h, tail),
@@ -947,7 +1043,7 @@ try_again:
(desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
g2h_avail(ct), HZ))
- goto broken;
+ return false;
#undef g2h_avail
ret = dequeue_one_g2h(ct);
@@ -955,9 +1051,32 @@ try_again:
if (ret != -ECANCELED)
xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)",
ERR_PTR(ret));
- goto broken;
+ return false;
}
+ }
+ return true;
+}
+static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
+ u32 g2h_len, u32 num_g2h,
+ struct g2h_fence *g2h_fence)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ unsigned int sleep_period_ms = 1;
+ int ret;
+
+ xe_gt_assert(gt, !g2h_len || !g2h_fence);
+ lockdep_assert_held(&ct->lock);
+ xe_device_assert_mem_access(ct_to_xe(ct));
+
+try_again:
+ ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
+ g2h_fence);
+
+ if (unlikely(ret == -EBUSY)) {
+ if (!guc_ct_send_wait_for_retry(ct, len, g2h_len, g2h_fence,
+ &sleep_period_ms))
+ goto broken;
goto try_again;
}
@@ -1040,11 +1159,15 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret)
return true;
}
+#define GUC_SEND_RETRY_LIMIT 50
+#define GUC_SEND_RETRY_MSLEEP 5
+
static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
u32 *response_buffer, bool no_fail)
{
struct xe_gt *gt = ct_to_gt(ct);
struct g2h_fence g2h_fence;
+ unsigned int retries = 0;
int ret = 0;
/*
@@ -1109,6 +1232,12 @@ retry_same_fence:
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
action[0], g2h_fence.reason);
mutex_unlock(&ct->lock);
+ if (++retries > GUC_SEND_RETRY_LIMIT) {
+ xe_gt_err(gt, "H2G action %#x reached retry limit=%u, aborting\n",
+ action[0], GUC_SEND_RETRY_LIMIT);
+ return -ELOOP;
+ }
+ msleep(GUC_SEND_RETRY_MSLEEP * retries);
goto retry;
}
if (g2h_fence.fail) {
@@ -1289,6 +1418,10 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
fast_req_report(ct, fence);
+ /* FIXME: W/A race in the GuC, will get in firmware soon */
+ if (xe_gt_recovery_pending(gt))
+ return 0;
+
CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
@@ -1416,12 +1549,7 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
ret = xe_guc_pagefault_handler(guc, payload, adj_len);
break;
case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
- ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
- adj_len);
- break;
- case XE_GUC_ACTION_ACCESS_COUNTER_NOTIFY:
- ret = xe_guc_access_counter_notify_handler(guc, payload,
- adj_len);
+ ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
break;
case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
@@ -1439,6 +1567,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
case XE_GUC_ACTION_NOTIFY_EXCEPTION:
ret = guc_crash_process_msg(ct, action);
break;
+#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
+ case XE_GUC_ACTION_TEST_G2G_RECV:
+ ret = xe_guc_g2g_test_notification(guc, payload, adj_len);
+ break;
+#endif
default:
xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action);
}
@@ -1618,8 +1751,7 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
break;
case XE_GUC_ACTION_TLB_INVALIDATION_DONE:
__g2h_release_space(ct, len);
- ret = xe_guc_tlb_invalidation_done_handler(guc, payload,
- adj_len);
+ ret = xe_guc_tlb_inval_done_handler(guc, payload, adj_len);
break;
default:
xe_gt_warn(gt, "NOT_POSSIBLE");
@@ -1742,186 +1874,6 @@ static void g2h_worker_func(struct work_struct *w)
receive_g2h(ct);
}
-static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds,
- u32 size, u32 idx, s64 shift)
-{
- u32 hi, lo;
- u64 offset;
-
- lo = xe_map_rd_ring_u32(xe, cmds, idx, size);
- hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size);
- offset = make_u64(hi, lo);
- offset += shift;
- lo = lower_32_bits(offset);
- hi = upper_32_bits(offset);
- xe_map_wr_ring_u32(xe, cmds, idx, size, lo);
- xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi);
-}
-
-/*
- * Shift any GGTT addresses within a single message left within CTB from
- * before post-migration recovery.
- * @ct: pointer to CT struct of the target GuC
- * @cmds: iomap buffer containing CT messages
- * @head: start of the target message within the buffer
- * @len: length of the target message
- * @size: size of the commands buffer
- * @shift: the address shift to be added to each GGTT reference
- * Return: true if the message was fixed or needed no fixups, false on failure
- */
-static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct,
- struct iosys_map *cmds, u32 head,
- u32 len, u32 size, s64 shift)
-{
- struct xe_gt *gt = ct_to_gt(ct);
- struct xe_device *xe = ct_to_xe(ct);
- u32 msg[GUC_HXG_MSG_MIN_LEN];
- u32 action, i, n;
-
- xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN);
-
- msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size);
- action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
-
- xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action);
-
- switch (action) {
- case XE_GUC_ACTION_REGISTER_CONTEXT:
- if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN)
- goto err_len;
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
- shift);
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
- shift);
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
- break;
- case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
- if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN)
- goto err_len;
- n = xe_map_rd_ring_u32(xe, cmds, head +
- XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size);
- if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n)
- goto err_len;
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
- shift);
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
- shift);
- for (i = 0; i < n; i++)
- xe_fixup_u64_in_cmds(xe, cmds, size, head +
- XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
- + 2 * i, shift);
- break;
- default:
- break;
- }
- return true;
-
-err_len:
- xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len);
- return false;
-}
-
-/*
- * Apply fixups to the next outgoing CT message within given CTB
- * @ct: the &xe_guc_ct struct instance representing the target GuC
- * @h2g: the &guc_ctb struct instance of the target buffer
- * @shift: shift to be added to all GGTT addresses within the CTB
- * @mhead: pointer to an integer storing message start position; the
- * position is changed to next message before this function return
- * @avail: size of the area available for parsing, that is length
- * of all remaining messages stored within the CTB
- * Return: size of the area available for parsing after one message
- * has been parsed, that is length remaining from the updated mhead
- */
-static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g,
- s64 shift, u32 *mhead, s32 avail)
-{
- struct xe_gt *gt = ct_to_gt(ct);
- struct xe_device *xe = ct_to_xe(ct);
- u32 msg[GUC_HXG_MSG_MIN_LEN];
- u32 size = h2g->info.size;
- u32 head = *mhead;
- u32 len;
-
- xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN);
-
- /* Read header */
- msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size);
- len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
-
- if (unlikely(len > (u32)avail)) {
- xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
- avail, len);
- return 0;
- }
-
- head = (head + GUC_CTB_MSG_MIN_LEN) % size;
- if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift))
- return 0;
- *mhead = (head + msg_len_to_hxg_len(len)) % size;
-
- return avail - len;
-}
-
-/**
- * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
- * @ct: pointer to CT struct of the target GuC
- * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
- *
- * Messages in GuC to Host CTB are owned by GuC and any fixups in them
- * are made by GuC. But content of the Host to GuC CTB is owned by the
- * KMD, so fixups to GGTT references in any pending messages need to be
- * applied here.
- * This function updates GGTT offsets in payloads of pending H2G CTB
- * messages (messages which were not consumed by GuC before the VF got
- * paused).
- */
-void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
-{
- struct guc_ctb *h2g = &ct->ctbs.h2g;
- struct xe_guc *guc = ct_to_guc(ct);
- struct xe_gt *gt = guc_to_gt(guc);
- u32 head, tail, size;
- s32 avail;
-
- if (unlikely(h2g->info.broken))
- return;
-
- h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
- head = h2g->info.head;
- tail = READ_ONCE(h2g->info.tail);
- size = h2g->info.size;
-
- if (unlikely(head > size))
- goto corrupted;
-
- if (unlikely(tail >= size))
- goto corrupted;
-
- avail = tail - head;
-
- /* beware of buffer wrap case */
- if (unlikely(avail < 0))
- avail += size;
- xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
- xe_gt_assert(gt, avail >= 0);
-
- while (avail > 0)
- avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail);
-
- return;
-
-corrupted:
- xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
- head, tail, size);
- h2g->info.broken = true;
-}
-
static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
bool want_ctb)
{
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 18d4225e6502..ca1ce2b3c354 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -13,9 +13,12 @@ struct xe_device;
int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct);
int xe_guc_ct_init(struct xe_guc_ct *ct);
+int xe_guc_ct_init_post_hwconfig(struct xe_guc_ct *ct);
int xe_guc_ct_enable(struct xe_guc_ct *ct);
+int xe_guc_ct_restart(struct xe_guc_ct *ct);
void xe_guc_ct_disable(struct xe_guc_ct *ct);
void xe_guc_ct_stop(struct xe_guc_ct *ct);
+void xe_guc_ct_flush_and_stop(struct xe_guc_ct *ct);
void xe_guc_ct_fast_path(struct xe_guc_ct *ct);
struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct);
@@ -23,8 +26,6 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
-void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift);
-
static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
{
return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
@@ -73,4 +74,13 @@ xe_guc_ct_send_block_no_fail(struct xe_guc_ct *ct, const u32 *action, u32 len)
long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct);
+/**
+ * xe_guc_ct_wake_waiters() - GuC CT wake up waiters
+ * @ct: GuC CT object
+ */
+static inline void xe_guc_ct_wake_waiters(struct xe_guc_ct *ct)
+{
+ wake_up_all(&ct->wq);
+}
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index 8b03b50313d9..09d7ff1ef42a 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -126,7 +126,7 @@ struct xe_fast_req_fence {
* for the H2G and G2H requests sent and received through the buffers.
*/
struct xe_guc_ct {
- /** @bo: XE BO for CT */
+ /** @bo: Xe BO for CT */
struct xe_bo *bo;
/** @lock: protects everything in CT layer */
struct mutex lock;
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 92e1f9f41b8c..2b99c1ebdd58 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -94,16 +94,17 @@ static int allocate_engine_activity_buffers(struct xe_guc *guc,
struct xe_tile *tile = gt_to_tile(gt);
struct xe_bo *bo, *metadata_bo;
- metadata_bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(metadata_size),
- ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+ metadata_bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(metadata_size),
+ ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE,
+ false);
if (IS_ERR(metadata_bo))
return PTR_ERR(metadata_bo);
- bo = xe_bo_create_pin_map(gt_to_xe(gt), tile, NULL, PAGE_ALIGN(size),
- ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE);
+ bo = xe_bo_create_pin_map_novm(gt_to_xe(gt), tile, PAGE_ALIGN(size),
+ ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_GGTT | XE_BO_FLAG_GGTT_INVALIDATE, false);
if (IS_ERR(bo)) {
xe_bo_unpin_map_no_vm(metadata_bo);
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index a3f421e2adc0..a3b034e4b205 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -35,8 +35,8 @@ struct xe_guc_exec_queue {
struct xe_sched_msg static_msgs[MAX_STATIC_MSG_TYPE];
/** @lr_tdr: long running TDR worker */
struct work_struct lr_tdr;
- /** @fini_async: do final fini async from this worker */
- struct work_struct fini_async;
+ /** @destroy_async: do final destroy async from this worker */
+ struct work_struct destroy_async;
/** @resume_time: time of last resume */
u64 resume_time;
/** @state: GuC specific state for this xe_exec_queue */
@@ -51,6 +51,21 @@ struct xe_guc_exec_queue {
wait_queue_head_t suspend_wait;
/** @suspend_pending: a suspend of the exec_queue is pending */
bool suspend_pending;
+ /**
+ * @needs_cleanup: Needs a cleanup message during VF post migration
+ * recovery.
+ */
+ bool needs_cleanup;
+ /**
+ * @needs_suspend: Needs a suspend message during VF post migration
+ * recovery.
+ */
+ bool needs_suspend;
+ /**
+ * @needs_resume: Needs a resume message during VF post migration
+ * recovery.
+ */
+ bool needs_resume;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h
index 6f57578b07cb..c90dd266e9cf 100644
--- a/drivers/gpu/drm/xe/xe_guc_fwif.h
+++ b/drivers/gpu/drm/xe/xe_guc_fwif.h
@@ -15,6 +15,7 @@
#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 4
#define G2H_LEN_DW_DEREGISTER_CONTEXT 3
#define G2H_LEN_DW_TLB_INVALIDATE 3
+#define G2H_LEN_DW_G2G_NOTIFY_MIN 3
#define GUC_ID_MAX 65535
#define GUC_ID_UNKNOWN 0xffffffff
@@ -45,6 +46,11 @@
#define GUC_MAX_ENGINE_CLASSES 16
#define GUC_MAX_INSTANCES_PER_CLASS 32
+#define GUC_CONTEXT_NORMAL 0
+#define GUC_CONTEXT_COMPRESSION_SAVE 1
+#define GUC_CONTEXT_COMPRESSION_RESTORE 2
+#define GUC_CONTEXT_COUNT (GUC_CONTEXT_COMPRESSION_RESTORE + 1)
+
/* Helper for context registration H2G */
struct guc_ctxt_registration_info {
u32 flags;
@@ -60,6 +66,7 @@ struct guc_ctxt_registration_info {
u32 hwlrca_hi;
};
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
+#define CONTEXT_REGISTRATION_FLAG_TYPE GENMASK(2, 1)
/* 32-bit KLV structure as used by policy updates and others */
struct guc_klv_generic_dw_t {
@@ -84,13 +91,10 @@ struct guc_update_exec_queue_policy {
#define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1)
#define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2)
#define GUC_LOG_LOG_ALLOC_UNITS BIT(3)
-#define GUC_LOG_CRASH_SHIFT 4
-#define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
-#define GUC_LOG_DEBUG_SHIFT 6
-#define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT)
-#define GUC_LOG_CAPTURE_SHIFT 10
-#define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT)
-#define GUC_LOG_BUF_ADDR_SHIFT 12
+#define GUC_LOG_CRASH REG_GENMASK(5, 4)
+#define GUC_LOG_DEBUG REG_GENMASK(9, 6)
+#define GUC_LOG_CAPTURE REG_GENMASK(11, 10)
+#define GUC_LOG_BUF_ADDR REG_GENMASK(31, 12)
#define GUC_CTL_WA 1
#define GUC_WA_GAM_CREDITS BIT(10)
@@ -103,28 +107,24 @@ struct guc_update_exec_queue_policy {
#define GUC_WA_RENDER_RST_RC6_EXIT BIT(19)
#define GUC_WA_RCS_REGS_IN_CCS_REGS_LIST BIT(21)
#define GUC_WA_ENABLE_TSC_CHECK_ON_RC6 BIT(22)
+#define GUC_WA_SAVE_RESTORE_MCFG_REG_AT_MC6 BIT(25)
#define GUC_CTL_FEATURE 2
#define GUC_CTL_ENABLE_SLPC BIT(2)
#define GUC_CTL_ENABLE_LITE_RESTORE BIT(4)
+#define GUC_CTL_ENABLE_PSMI_LOGGING BIT(7)
+#define GUC_CTL_MAIN_GAMCTRL_QUEUES BIT(9)
#define GUC_CTL_DISABLE_SCHEDULER BIT(14)
#define GUC_CTL_DEBUG 3
-#define GUC_LOG_VERBOSITY_SHIFT 0
-#define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
-#define GUC_LOG_VERBOSITY_MIN 0
+#define GUC_LOG_VERBOSITY REG_GENMASK(1, 0)
#define GUC_LOG_VERBOSITY_MAX 3
-#define GUC_LOG_VERBOSITY_MASK 0x0000000f
-#define GUC_LOG_DESTINATION_MASK (3 << 4)
-#define GUC_LOG_DISABLED (1 << 6)
-#define GUC_PROFILE_ENABLED (1 << 7)
+#define GUC_LOG_DESTINATION REG_GENMASK(5, 4)
+#define GUC_LOG_DISABLED BIT(6)
+#define GUC_PROFILE_ENABLED BIT(7)
#define GUC_CTL_ADS 4
-#define GUC_ADS_ADDR_SHIFT 1
-#define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
+#define GUC_ADS_ADDR REG_GENMASK(21, 1)
#define GUC_CTL_DEVID 5
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index f1e2b0be90a9..98a47ac42b08 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -17,7 +17,7 @@ struct xe_device;
#define DEBUG_BUFFER_SIZE SZ_8M
#define CAPTURE_BUFFER_SIZE SZ_2M
#else
-#define CRASH_BUFFER_SIZE SZ_8K
+#define CRASH_BUFFER_SIZE SZ_16K
#define DEBUG_BUFFER_SIZE SZ_64K
#define CAPTURE_BUFFER_SIZE SZ_1M
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_log_types.h b/drivers/gpu/drm/xe/xe_guc_log_types.h
index b3d5c72ac752..02851b924aa4 100644
--- a/drivers/gpu/drm/xe/xe_guc_log_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_log_types.h
@@ -44,7 +44,7 @@ struct xe_guc_log_snapshot {
struct xe_guc_log {
/** @level: GuC log level */
u32 level;
- /** @bo: XE BO for GuC log */
+ /** @bo: Xe BO for GuC log */
struct xe_bo *bo;
/** @stats: logging related stats */
struct {
diff --git a/drivers/gpu/drm/xe/xe_guc_pagefault.c b/drivers/gpu/drm/xe/xe_guc_pagefault.c
new file mode 100644
index 000000000000..719a18187a31
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_pagefault.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "abi/guc_actions_abi.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_pagefault.h"
+#include "xe_pagefault.h"
+
+static void guc_ack_fault(struct xe_pagefault *pf, int err)
+{
+ u32 vfid = FIELD_GET(PFD_VFID, pf->producer.msg[2]);
+ u32 engine_instance = FIELD_GET(PFD_ENG_INSTANCE, pf->producer.msg[0]);
+ u32 engine_class = FIELD_GET(PFD_ENG_CLASS, pf->producer.msg[0]);
+ u32 pdata = FIELD_GET(PFD_PDATA_LO, pf->producer.msg[0]) |
+ (FIELD_GET(PFD_PDATA_HI, pf->producer.msg[1]) <<
+ PFD_PDATA_HI_SHIFT);
+ u32 action[] = {
+ XE_GUC_ACTION_PAGE_FAULT_RES_DESC,
+
+ FIELD_PREP(PFR_VALID, 1) |
+ FIELD_PREP(PFR_SUCCESS, !!err) |
+ FIELD_PREP(PFR_REPLY, PFR_ACCESS) |
+ FIELD_PREP(PFR_DESC_TYPE, FAULT_RESPONSE_DESC) |
+ FIELD_PREP(PFR_ASID, pf->consumer.asid),
+
+ FIELD_PREP(PFR_VFID, vfid) |
+ FIELD_PREP(PFR_ENG_INSTANCE, engine_instance) |
+ FIELD_PREP(PFR_ENG_CLASS, engine_class) |
+ FIELD_PREP(PFR_PDATA, pdata),
+ };
+ struct xe_guc *guc = pf->producer.private;
+
+ xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
+}
+
+static const struct xe_pagefault_ops guc_pagefault_ops = {
+ .ack_fault = guc_ack_fault,
+};
+
+/**
+ * xe_guc_pagefault_handler() - G2H page fault handler
+ * @guc: GuC object
+ * @msg: G2H message
+ * @len: Length of G2H message
+ *
+ * Parse GuC to host (G2H) message into a struct xe_pagefault and forward onto
+ * the Xe page fault layer.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_pagefault pf;
+ int i;
+
+#define GUC_PF_MSG_LEN_DW \
+ (sizeof(struct xe_guc_pagefault_desc) / sizeof(u32))
+
+ BUILD_BUG_ON(GUC_PF_MSG_LEN_DW > XE_PAGEFAULT_PRODUCER_MSG_LEN_DW);
+
+ if (len != GUC_PF_MSG_LEN_DW)
+ return -EPROTO;
+
+ pf.gt = guc_to_gt(guc);
+
+ /*
+ * XXX: These values happen to match the enum in xe_pagefault_types.h.
+ * If that changes, we’ll need to remap them here.
+ */
+ pf.consumer.page_addr = ((u64)FIELD_GET(PFD_VIRTUAL_ADDR_HI, msg[3])
+ << PFD_VIRTUAL_ADDR_HI_SHIFT) |
+ (FIELD_GET(PFD_VIRTUAL_ADDR_LO, msg[2]) <<
+ PFD_VIRTUAL_ADDR_LO_SHIFT);
+ pf.consumer.asid = FIELD_GET(PFD_ASID, msg[1]);
+ pf.consumer.access_type = FIELD_GET(PFD_ACCESS_TYPE, msg[2]);
+ pf.consumer.fault_type = FIELD_GET(PFD_FAULT_TYPE, msg[2]);
+ if (FIELD_GET(XE2_PFD_TRVA_FAULT, msg[0]))
+ pf.consumer.fault_level = XE_PAGEFAULT_LEVEL_NACK;
+ else
+ pf.consumer.fault_level = FIELD_GET(PFD_FAULT_LEVEL, msg[0]);
+ pf.consumer.engine_class = FIELD_GET(PFD_ENG_CLASS, msg[0]);
+ pf.consumer.engine_instance = FIELD_GET(PFD_ENG_INSTANCE, msg[0]);
+
+ pf.producer.private = guc;
+ pf.producer.ops = &guc_pagefault_ops;
+ for (i = 0; i < GUC_PF_MSG_LEN_DW; ++i)
+ pf.producer.msg[i] = msg[i];
+
+#undef GUC_PF_MSG_LEN_DW
+
+ return xe_pagefault_handler(guc_to_xe(guc), &pf);
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_pagefault.h b/drivers/gpu/drm/xe/xe_guc_pagefault.h
new file mode 100644
index 000000000000..3bd599e7207c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_pagefault.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_PAGEFAULT_H_
+#define _XE_GUC_PAGEFAULT_H_
+
+#include <linux/types.h>
+
+struct xe_guc;
+
+int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 68b192fe3b32..951a49fb1d3e 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -7,12 +7,14 @@
#include <linux/cleanup.h>
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/wait_bit.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
+#include <generated/xe_device_wa_oob.h>
#include <generated/xe_wa_oob.h>
#include "abi/guc_actions_slpc_abi.h"
@@ -79,6 +81,11 @@
* Xe driver enables SLPC with all of its defaults features and frequency
* selection, which varies per platform.
*
+ * Power profiles add another level of control to SLPC. When power saving
+ * profile is chosen, SLPC will use conservative thresholds to ramp frequency,
+ * thus saving power. Base profile is default and ensures balanced performance
+ * for any workload.
+ *
* Render-C States:
* ================
*
@@ -125,26 +132,16 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc)
FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count))
static int wait_for_pc_state(struct xe_guc_pc *pc,
- enum slpc_global_state state,
+ enum slpc_global_state target_state,
int timeout_ms)
{
- int timeout_us = 1000 * timeout_ms;
- int slept, wait = 10;
+ enum slpc_global_state state;
xe_device_assert_mem_access(pc_to_xe(pc));
- for (slept = 0; slept < timeout_us;) {
- if (slpc_shared_data_read(pc, header.global_state) == state)
- return 0;
-
- usleep_range(wait, wait << 1);
- slept += wait;
- wait <<= 1;
- if (slept + wait > timeout_us)
- wait = timeout_us - slept;
- }
-
- return -ETIMEDOUT;
+ return poll_timeout_us(state = slpc_shared_data_read(pc, header.global_state),
+ state == target_state,
+ 20, timeout_ms * USEC_PER_MSEC, false);
}
static int wait_for_flush_complete(struct xe_guc_pc *pc)
@@ -159,24 +156,15 @@ static int wait_for_flush_complete(struct xe_guc_pc *pc)
return 0;
}
-static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq)
+static int wait_for_act_freq_max_limit(struct xe_guc_pc *pc, u32 max_limit)
{
- int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC;
- int slept, wait = 10;
-
- for (slept = 0; slept < timeout_us;) {
- if (xe_guc_pc_get_act_freq(pc) <= freq)
- return 0;
-
- usleep_range(wait, wait << 1);
- slept += wait;
- wait <<= 1;
- if (slept + wait > timeout_us)
- wait = timeout_us - slept;
- }
+ u32 freq;
- return -ETIMEDOUT;
+ return poll_timeout_us(freq = xe_guc_pc_get_act_freq(pc),
+ freq <= max_limit,
+ 20, SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC, false);
}
+
static int pc_action_reset(struct xe_guc_pc *pc)
{
struct xe_guc_ct *ct = pc_to_ct(pc);
@@ -343,7 +331,7 @@ static int pc_set_min_freq(struct xe_guc_pc *pc, u32 freq)
* Our goal is to have the admin choices respected.
*/
pc_action_set_param(pc, SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
- freq < pc->rpe_freq);
+ freq < xe_guc_pc_get_rpe_freq(pc));
return pc_action_set_param(pc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
@@ -375,7 +363,7 @@ static int pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
freq);
}
-static void mtl_update_rpa_value(struct xe_guc_pc *pc)
+static u32 mtl_get_rpa_freq(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
@@ -385,10 +373,10 @@ static void mtl_update_rpa_value(struct xe_guc_pc *pc)
else
reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPA_FREQUENCY);
- pc->rpa_freq = decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
+ return decode_freq(REG_FIELD_GET(MTL_RPA_MASK, reg));
}
-static void mtl_update_rpe_value(struct xe_guc_pc *pc)
+static u32 mtl_get_rpe_freq(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
u32 reg;
@@ -398,68 +386,56 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc)
else
reg = xe_mmio_read32(&gt->mmio, MTL_GT_RPE_FREQUENCY);
- pc->rpe_freq = decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
+ return decode_freq(REG_FIELD_GET(MTL_RPE_MASK, reg));
}
-static void tgl_update_rpa_value(struct xe_guc_pc *pc)
+static u32 pvc_get_rpa_freq(struct xe_guc_pc *pc)
{
- struct xe_gt *gt = pc_to_gt(pc);
- struct xe_device *xe = gt_to_xe(gt);
- u32 reg;
-
/*
* For PVC we still need to use fused RP0 as the approximation for RPa
* For other platforms than PVC we get the resolved RPa directly from
* PCODE at a different register
*/
- if (xe->info.platform == XE_PVC) {
- reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
- pc->rpa_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
- } else {
- reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
- pc->rpa_freq = REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
- }
+
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
+ return REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
}
-static void tgl_update_rpe_value(struct xe_guc_pc *pc)
+static u32 tgl_get_rpa_freq(struct xe_guc_pc *pc)
+{
+ struct xe_gt *gt = pc_to_gt(pc);
+ u32 reg;
+
+ reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
+ return REG_FIELD_GET(RPA_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
+}
+
+static u32 pvc_get_rpe_freq(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
- struct xe_device *xe = gt_to_xe(gt);
u32 reg;
/*
* For PVC we still need to use fused RP1 as the approximation for RPe
- * For other platforms than PVC we get the resolved RPe directly from
- * PCODE at a different register
*/
- if (xe->info.platform == XE_PVC) {
- reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
- pc->rpe_freq = REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
- } else {
- reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
- pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
- }
+ reg = xe_mmio_read32(&gt->mmio, PVC_RP_STATE_CAP);
+ return REG_FIELD_GET(RP1_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
}
-static void pc_update_rp_values(struct xe_guc_pc *pc)
+static u32 tgl_get_rpe_freq(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
- struct xe_device *xe = gt_to_xe(gt);
-
- if (GRAPHICS_VERx100(xe) >= 1270) {
- mtl_update_rpa_value(pc);
- mtl_update_rpe_value(pc);
- } else {
- tgl_update_rpa_value(pc);
- tgl_update_rpe_value(pc);
- }
+ u32 reg;
/*
- * RPe is decided at runtime by PCODE. In the rare case where that's
- * smaller than the fused min, we will trust the PCODE and use that
- * as our minimum one.
+ * For other platforms than PVC, we get the resolved RPe directly from
+ * PCODE at a different register
*/
- pc->rpn_freq = min(pc->rpn_freq, pc->rpe_freq);
+ reg = xe_mmio_read32(&gt->mmio, FREQ_INFO_REC);
+ return REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
}
/**
@@ -560,9 +536,15 @@ u32 xe_guc_pc_get_rp0_freq(struct xe_guc_pc *pc)
*/
u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
{
- pc_update_rp_values(pc);
+ struct xe_gt *gt = pc_to_gt(pc);
+ struct xe_device *xe = gt_to_xe(gt);
- return pc->rpa_freq;
+ if (GRAPHICS_VERx100(xe) == 1260)
+ return pvc_get_rpa_freq(pc);
+ else if (GRAPHICS_VERx100(xe) >= 1270)
+ return mtl_get_rpa_freq(pc);
+ else
+ return tgl_get_rpa_freq(pc);
}
/**
@@ -573,9 +555,17 @@ u32 xe_guc_pc_get_rpa_freq(struct xe_guc_pc *pc)
*/
u32 xe_guc_pc_get_rpe_freq(struct xe_guc_pc *pc)
{
- pc_update_rp_values(pc);
+ struct xe_device *xe = pc_to_xe(pc);
+ u32 freq;
+
+ if (GRAPHICS_VERx100(xe) == 1260)
+ freq = pvc_get_rpe_freq(pc);
+ else if (GRAPHICS_VERx100(xe) >= 1270)
+ freq = mtl_get_rpe_freq(pc);
+ else
+ freq = tgl_get_rpe_freq(pc);
- return pc->rpe_freq;
+ return freq;
}
/**
@@ -722,7 +712,7 @@ static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq)
*/
int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq)
{
- if (XE_WA(pc_to_gt(pc), 22019338487)) {
+ if (XE_GT_WA(pc_to_gt(pc), 22019338487)) {
if (wait_for_flush_complete(pc) != 0)
return -EAGAIN;
}
@@ -835,7 +825,7 @@ static u32 pc_max_freq_cap(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
- if (XE_WA(gt, 22019338487)) {
+ if (XE_GT_WA(gt, 22019338487)) {
if (xe_gt_is_media_type(gt))
return min(LNL_MERT_FREQ_CAP, pc->rp0_freq);
else
@@ -899,7 +889,7 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
if (pc_get_min_freq(pc) > pc->rp0_freq)
ret = pc_set_min_freq(pc, pc->rp0_freq);
- if (XE_WA(tile->primary_gt, 14022085890))
+ if (XE_DEVICE_WA(tile_to_xe(tile), 14022085890))
ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
out:
@@ -931,7 +921,7 @@ static bool needs_flush_freq_limit(struct xe_guc_pc *pc)
{
struct xe_gt *gt = pc_to_gt(pc);
- return XE_WA(gt, 22019338487) &&
+ return XE_GT_WA(gt, 22019338487) &&
pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP;
}
@@ -978,7 +968,7 @@ void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc)
* Wait for actual freq to go below the flush cap: even if the previous
* max was below cap, the current one might still be above it
*/
- ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
+ ret = wait_for_act_freq_max_limit(pc, BMG_MERT_FLUSH_FREQ_CAP);
if (ret)
xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n",
BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret));
@@ -1017,7 +1007,7 @@ static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
{
int ret;
- if (!XE_WA(pc_to_gt(pc), 22019338487))
+ if (!XE_GT_WA(pc_to_gt(pc), 22019338487))
return 0;
guard(mutex)(&pc->freq_lock);
@@ -1034,7 +1024,7 @@ static int pc_set_mert_freq_cap(struct xe_guc_pc *pc)
/*
* Ensure min and max are bound by MERT_FREQ_CAP until driver loads.
*/
- ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc)));
+ ret = pc_set_min_freq(pc, min(xe_guc_pc_get_rpe_freq(pc), pc_max_freq_cap(pc)));
if (!ret)
ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc)));
@@ -1076,7 +1066,6 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
{
struct xe_device *xe = pc_to_xe(pc);
struct xe_gt *gt = pc_to_gt(pc);
- unsigned int fw_ref;
int ret = 0;
if (xe->info.skip_guc_pc)
@@ -1086,17 +1075,7 @@ int xe_guc_pc_gucrc_disable(struct xe_guc_pc *pc)
if (ret)
return ret;
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
- return -ETIMEDOUT;
- }
-
- xe_gt_idle_disable_c6(gt);
-
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
- return 0;
+ return xe_gt_idle_disable_c6(gt);
}
/**
@@ -1156,8 +1135,6 @@ static int pc_init_freqs(struct xe_guc_pc *pc)
if (ret)
goto out;
- pc_update_rp_values(pc);
-
pc_init_pcode_freq(pc);
/*
@@ -1182,6 +1159,61 @@ static int pc_action_set_strategy(struct xe_guc_pc *pc, u32 val)
return ret;
}
+static const char *power_profile_to_string(struct xe_guc_pc *pc)
+{
+ switch (pc->power_profile) {
+ case SLPC_POWER_PROFILE_BASE:
+ return "base";
+ case SLPC_POWER_PROFILE_POWER_SAVING:
+ return "power_saving";
+ default:
+ return "invalid";
+ }
+}
+
+void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile)
+{
+ switch (pc->power_profile) {
+ case SLPC_POWER_PROFILE_BASE:
+ sprintf(profile, "[%s] %s\n", "base", "power_saving");
+ break;
+ case SLPC_POWER_PROFILE_POWER_SAVING:
+ sprintf(profile, "%s [%s]\n", "base", "power_saving");
+ break;
+ default:
+ sprintf(profile, "invalid");
+ }
+}
+
+int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf)
+{
+ int ret = 0;
+ u32 val;
+
+ if (strncmp("base", buf, strlen("base")) == 0)
+ val = SLPC_POWER_PROFILE_BASE;
+ else if (strncmp("power_saving", buf, strlen("power_saving")) == 0)
+ val = SLPC_POWER_PROFILE_POWER_SAVING;
+ else
+ return -EINVAL;
+
+ guard(mutex)(&pc->freq_lock);
+ xe_pm_runtime_get_noresume(pc_to_xe(pc));
+
+ ret = pc_action_set_param(pc,
+ SLPC_PARAM_POWER_PROFILE,
+ val);
+ if (ret)
+ xe_gt_err_once(pc_to_gt(pc), "Failed to set power profile to %d: %pe\n",
+ val, ERR_PTR(ret));
+ else
+ pc->power_profile = val;
+
+ xe_pm_runtime_put(pc_to_xe(pc));
+
+ return ret;
+}
+
/**
* xe_guc_pc_start - Start GuC's Power Conservation component
* @pc: Xe_GuC_PC instance
@@ -1260,6 +1292,11 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
/* Enable SLPC Optimized Strategy for compute */
ret = pc_action_set_strategy(pc, SLPC_OPTIMIZED_STRATEGY_COMPUTE);
+ /* Set cached value of power_profile */
+ ret = xe_guc_pc_set_power_profile(pc, power_profile_to_string(pc));
+ if (unlikely(ret))
+ xe_gt_err(gt, "Failed to set SLPC power profile: %pe\n", ERR_PTR(ret));
+
out:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
return ret;
@@ -1303,7 +1340,7 @@ static void xe_guc_pc_fini_hw(void *arg)
XE_WARN_ON(xe_guc_pc_stop(pc));
/* Bind requested freq to mert_freq_cap before unload */
- pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), pc->rpe_freq));
+ pc_set_cur_freq(pc, min(pc_max_freq_cap(pc), xe_guc_pc_get_rpe_freq(pc)));
xe_force_wake_put(gt_to_fw(pc_to_gt(pc)), fw_ref);
}
@@ -1338,6 +1375,8 @@ int xe_guc_pc_init(struct xe_guc_pc *pc)
pc->bo = bo;
+ pc->power_profile = SLPC_POWER_PROFILE_BASE;
+
return devm_add_action_or_reset(xe->drm.dev, xe_guc_pc_fini_hw, pc);
}
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h
index 52ecdd5ddbff..0e31396f103c 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc.h
@@ -31,6 +31,8 @@ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq);
int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq);
int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq);
int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq);
+int xe_guc_pc_set_power_profile(struct xe_guc_pc *pc, const char *buf);
+void xe_guc_pc_get_power_profile(struct xe_guc_pc *pc, char *profile);
enum xe_gt_idle_state xe_guc_pc_c_status(struct xe_guc_pc *pc);
u64 xe_guc_pc_rc6_residency(struct xe_guc_pc *pc);
diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h
index c02053948a57..711bbcdcb0d3 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h
@@ -19,10 +19,6 @@ struct xe_guc_pc {
atomic_t flush_freq_limit;
/** @rp0_freq: HW RP0 frequency - The Maximum one */
u32 rp0_freq;
- /** @rpa_freq: HW RPa frequency - The Achievable one */
- u32 rpa_freq;
- /** @rpe_freq: HW RPe frequency - The Efficient one */
- u32 rpe_freq;
/** @rpn_freq: HW RPN frequency - The Minimum one */
u32 rpn_freq;
/** @user_requested_min: Stash the minimum requested freq by user */
@@ -37,6 +33,8 @@ struct xe_guc_pc {
struct mutex freq_lock;
/** @freq_ready: Only handle freq changes, if they are really ready */
bool freq_ready;
+ /** @power_profile: Base or power_saving profile */
+ u32 power_profile;
};
#endif /* _XE_GUC_PC_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_guc_relay.c b/drivers/gpu/drm/xe/xe_guc_relay.c
index e5dc94f3e618..0c0ff24ba62a 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay.c
+++ b/drivers/gpu/drm/xe/xe_guc_relay.c
@@ -56,9 +56,19 @@ static struct xe_device *relay_to_xe(struct xe_guc_relay *relay)
return gt_to_xe(relay_to_gt(relay));
}
+#define XE_RELAY_DIAG_RATELIMIT_INTERVAL (10 * HZ)
+#define XE_RELAY_DIAG_RATELIMIT_BURST 10
+
+#define relay_ratelimit_printk(relay, _level, fmt...) ({ \
+ typeof(relay) _r = (relay); \
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV) || \
+ ___ratelimit(&_r->diag_ratelimit, "xe_guc_relay")) \
+ xe_gt_sriov_##_level(relay_to_gt(_r), "relay: " fmt); \
+})
+
#define relay_assert(relay, condition) xe_gt_assert(relay_to_gt(relay), condition)
-#define relay_notice(relay, msg...) xe_gt_sriov_notice(relay_to_gt(relay), "relay: " msg)
-#define relay_debug(relay, msg...) xe_gt_sriov_dbg_verbose(relay_to_gt(relay), "relay: " msg)
+#define relay_notice(relay, msg...) relay_ratelimit_printk((relay), notice, msg)
+#define relay_debug(relay, msg...) relay_ratelimit_printk((relay), dbg_verbose, msg)
static int relay_get_totalvfs(struct xe_guc_relay *relay)
{
@@ -345,6 +355,9 @@ int xe_guc_relay_init(struct xe_guc_relay *relay)
INIT_WORK(&relay->worker, relays_worker_fn);
INIT_LIST_HEAD(&relay->pending_relays);
INIT_LIST_HEAD(&relay->incoming_actions);
+ ratelimit_state_init(&relay->diag_ratelimit,
+ XE_RELAY_DIAG_RATELIMIT_INTERVAL,
+ XE_RELAY_DIAG_RATELIMIT_BURST);
err = mempool_init_kmalloc_pool(&relay->pool, XE_RELAY_MEMPOOL_MIN_NUM +
relay_get_totalvfs(relay),
diff --git a/drivers/gpu/drm/xe/xe_guc_relay_types.h b/drivers/gpu/drm/xe/xe_guc_relay_types.h
index 5999fcb77e96..20eee10856b2 100644
--- a/drivers/gpu/drm/xe/xe_guc_relay_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_relay_types.h
@@ -7,6 +7,7 @@
#define _XE_GUC_RELAY_TYPES_H_
#include <linux/mempool.h>
+#include <linux/ratelimit_types.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -31,6 +32,9 @@ struct xe_guc_relay {
/** @last_rid: last Relay-ID used while sending a message. */
u32 last_rid;
+
+ /** @diag_ratelimit: ratelimit state used to throttle diagnostics messages. */
+ struct ratelimit_state diag_ratelimit;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index cafb47711e9b..ed7be50b2f72 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -32,6 +32,7 @@
#include "xe_guc_ct.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_guc_id_mgr.h"
+#include "xe_guc_klv_helpers.h"
#include "xe_guc_submit_types.h"
#include "xe_hw_engine.h"
#include "xe_hw_fence.h"
@@ -43,6 +44,7 @@
#include "xe_ring_ops_types.h"
#include "xe_sched_job.h"
#include "xe_trace.h"
+#include "xe_uc_fw.h"
#include "xe_vm.h"
static struct xe_guc *
@@ -68,6 +70,8 @@ exec_queue_to_guc(struct xe_exec_queue *q)
#define EXEC_QUEUE_STATE_BANNED (1 << 9)
#define EXEC_QUEUE_STATE_CHECK_TIMEOUT (1 << 10)
#define EXEC_QUEUE_STATE_EXTRA_REF (1 << 11)
+#define EXEC_QUEUE_STATE_PENDING_RESUME (1 << 12)
+#define EXEC_QUEUE_STATE_PENDING_TDR_EXIT (1 << 13)
static bool exec_queue_registered(struct xe_exec_queue *q)
{
@@ -139,6 +143,11 @@ static void set_exec_queue_destroyed(struct xe_exec_queue *q)
atomic_or(EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
}
+static void clear_exec_queue_destroyed(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_DESTROYED, &q->guc->state);
+}
+
static bool exec_queue_banned(struct xe_exec_queue *q)
{
return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_BANNED;
@@ -219,6 +228,41 @@ static void set_exec_queue_extra_ref(struct xe_exec_queue *q)
atomic_or(EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
}
+static void clear_exec_queue_extra_ref(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_EXTRA_REF, &q->guc->state);
+}
+
+static bool exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_RESUME;
+}
+
+static void set_exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
+}
+
+static void clear_exec_queue_pending_resume(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_PENDING_RESUME, &q->guc->state);
+}
+
+static bool exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ return atomic_read(&q->guc->state) & EXEC_QUEUE_STATE_PENDING_TDR_EXIT;
+}
+
+static void set_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ atomic_or(EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
+}
+
+static void clear_exec_queue_pending_tdr_exit(struct xe_exec_queue *q)
+{
+ atomic_and(~EXEC_QUEUE_STATE_PENDING_TDR_EXIT, &q->guc->state);
+}
+
static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
{
return (atomic_read(&q->guc->state) &
@@ -316,6 +360,71 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
return drmm_add_action_or_reset(&xe->drm, guc_submit_fini, guc);
}
+/*
+ * Given that we want to guarantee enough RCS throughput to avoid missing
+ * frames, we set the yield policy to 20% of each 80ms interval.
+ */
+#define RC_YIELD_DURATION 80 /* in ms */
+#define RC_YIELD_RATIO 20 /* in percent */
+static u32 *emit_render_compute_yield_klv(u32 *emit)
+{
+ *emit++ = PREP_GUC_KLV_TAG(SCHEDULING_POLICIES_RENDER_COMPUTE_YIELD);
+ *emit++ = RC_YIELD_DURATION;
+ *emit++ = RC_YIELD_RATIO;
+
+ return emit;
+}
+
+#define SCHEDULING_POLICY_MAX_DWORDS 16
+static int guc_init_global_schedule_policy(struct xe_guc *guc)
+{
+ u32 data[SCHEDULING_POLICY_MAX_DWORDS];
+ u32 *emit = data;
+ u32 count = 0;
+ int ret;
+
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
+ return 0;
+
+ *emit++ = XE_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
+
+ if (CCS_MASK(guc_to_gt(guc)))
+ emit = emit_render_compute_yield_klv(emit);
+
+ count = emit - data;
+ if (count > 1) {
+ xe_assert(guc_to_xe(guc), count <= SCHEDULING_POLICY_MAX_DWORDS);
+
+ ret = xe_guc_ct_send_block(&guc->ct, data, count);
+ if (ret < 0) {
+ xe_gt_err(guc_to_gt(guc),
+ "failed to enable GuC scheduling policies: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int xe_guc_submit_enable(struct xe_guc *guc)
+{
+ int ret;
+
+ ret = guc_init_global_schedule_policy(guc);
+ if (ret)
+ return ret;
+
+ guc->submission_state.enabled = true;
+
+ return 0;
+}
+
+void xe_guc_submit_disable(struct xe_guc *guc)
+{
+ guc->submission_state.enabled = false;
+}
+
static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa_count)
{
int i;
@@ -542,7 +651,7 @@ static void __register_exec_queue(struct xe_guc *guc,
xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
-static void register_exec_queue(struct xe_exec_queue *q)
+static void register_exec_queue(struct xe_exec_queue *q, int ctx_type)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
@@ -550,6 +659,7 @@ static void register_exec_queue(struct xe_exec_queue *q)
struct guc_ctxt_registration_info info;
xe_gt_assert(guc_to_gt(guc), !exec_queue_registered(q));
+ xe_gt_assert(guc_to_gt(guc), ctx_type < GUC_CONTEXT_COUNT);
memset(&info, 0, sizeof(info));
info.context_idx = q->guc->id;
@@ -557,7 +667,8 @@ static void register_exec_queue(struct xe_exec_queue *q)
info.engine_submit_mask = q->logical_mask;
info.hwlrca_lo = lower_32_bits(xe_lrc_descriptor(lrc));
info.hwlrca_hi = upper_32_bits(xe_lrc_descriptor(lrc));
- info.flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ info.flags = CONTEXT_REGISTRATION_FLAG_KMD |
+ FIELD_PREP(CONTEXT_REGISTRATION_FLAG_TYPE, ctx_type);
if (xe_exec_queue_is_parallel(q)) {
u64 ggtt_addr = xe_lrc_parallel_ggtt_addr(lrc);
@@ -601,6 +712,11 @@ static u32 wq_space_until_wrap(struct xe_exec_queue *q)
return (WQ_SIZE - q->guc->wqi_tail);
}
+static bool vf_recovery(struct xe_guc *guc)
+{
+ return xe_gt_recovery_pending(guc_to_gt(guc));
+}
+
static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
{
struct xe_guc *guc = exec_queue_to_guc(q);
@@ -610,7 +726,7 @@ static int wq_wait_for_space(struct xe_exec_queue *q, u32 wqi_size)
#define AVAILABLE_SPACE \
CIRC_SPACE(q->guc->wqi_tail, q->guc->wqi_head, WQ_SIZE)
- if (wqi_size > AVAILABLE_SPACE) {
+ if (wqi_size > AVAILABLE_SPACE && !vf_recovery(guc)) {
try_again:
q->guc->wqi_head = parallel_read(xe, map, wq_desc.head);
if (wqi_size > AVAILABLE_SPACE) {
@@ -694,7 +810,7 @@ static void wq_item_append(struct xe_exec_queue *q)
}
#define RESUME_PENDING ~0x0ull
-static void submit_exec_queue(struct xe_exec_queue *q)
+static void submit_exec_queue(struct xe_exec_queue *q, struct xe_sched_job *job)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_lrc *lrc = q->lrc[0];
@@ -706,10 +822,13 @@ static void submit_exec_queue(struct xe_exec_queue *q)
xe_gt_assert(guc_to_gt(guc), exec_queue_registered(q));
- if (xe_exec_queue_is_parallel(q))
- wq_item_append(q);
- else
- xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+ if (!job->restore_replay || job->last_replay) {
+ if (xe_exec_queue_is_parallel(q))
+ wq_item_append(q);
+ else
+ xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+ job->last_replay = false;
+ }
if (exec_queue_suspended(q) && !xe_exec_queue_is_parallel(q))
return;
@@ -751,30 +870,33 @@ guc_exec_queue_run_job(struct drm_sched_job *drm_job)
struct xe_sched_job *job = to_xe_sched_job(drm_job);
struct xe_exec_queue *q = job->q;
struct xe_guc *guc = exec_queue_to_guc(q);
- struct dma_fence *fence = NULL;
- bool lr = xe_exec_queue_is_lr(q);
+ bool lr = xe_exec_queue_is_lr(q), killed_or_banned_or_wedged =
+ exec_queue_killed_or_banned_or_wedged(q);
xe_gt_assert(guc_to_gt(guc), !(exec_queue_destroyed(q) || exec_queue_pending_disable(q)) ||
exec_queue_banned(q) || exec_queue_suspended(q));
trace_xe_sched_job_run(job);
- if (!exec_queue_killed_or_banned_or_wedged(q) && !xe_sched_job_is_error(job)) {
+ if (!killed_or_banned_or_wedged && !xe_sched_job_is_error(job)) {
if (!exec_queue_registered(q))
- register_exec_queue(q);
- if (!lr) /* LR jobs are emitted in the exec IOCTL */
+ register_exec_queue(q, GUC_CONTEXT_NORMAL);
+ if (!job->restore_replay)
q->ring_ops->emit_job(job);
- submit_exec_queue(q);
+ submit_exec_queue(q, job);
+ job->restore_replay = false;
}
- if (lr) {
- xe_sched_job_set_error(job, -EOPNOTSUPP);
- dma_fence_put(job->fence); /* Drop ref from xe_sched_job_arm */
- } else {
- fence = job->fence;
- }
+ /*
+ * We don't care about job-fence ordering in LR VMs because these fences
+ * are never exported; they are used solely to keep jobs on the pending
+ * list. Once a queue enters an error state, there's no need to track
+ * them.
+ */
+ if (killed_or_banned_or_wedged && lr)
+ xe_sched_job_set_error(job, -ECANCELED);
- return fence;
+ return job->fence;
}
static void guc_exec_queue_free_job(struct drm_sched_job *drm_job)
@@ -808,15 +930,17 @@ static void disable_scheduling_deregister(struct xe_guc *guc,
ret = wait_event_timeout(guc->ct.wq,
(!exec_queue_pending_enable(q) &&
!exec_queue_pending_disable(q)) ||
- xe_guc_read_stopped(guc),
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc),
HZ * 5);
- if (!ret) {
+ if (!ret && !vf_recovery(guc)) {
struct xe_gpu_scheduler *sched = &q->guc->sched;
xe_gt_warn(q->gt, "Pending enable/disable failed to respond\n");
xe_sched_submission_start(sched);
xe_gt_reset_async(q->gt);
- xe_sched_tdr_queue_imm(sched);
+ if (!xe_exec_queue_is_lr(q))
+ xe_sched_tdr_queue_imm(sched);
return;
}
@@ -908,9 +1032,14 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_gpu_scheduler *sched = &ge->sched;
+ struct xe_sched_job *job;
bool wedged = false;
xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q));
+
+ if (vf_recovery(guc))
+ return;
+
trace_xe_exec_queue_lr_cleanup(q);
if (!exec_queue_killed(q))
@@ -943,7 +1072,11 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
*/
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ return;
+
if (!ret) {
xe_gt_warn(q->gt, "Schedule disable failed to respond, guc_id=%d\n",
q->guc->id);
@@ -958,7 +1091,16 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w)
if (!exec_queue_killed(q) && !xe_lrc_ring_is_idle(q->lrc[0]))
xe_devcoredump(q, NULL, "LR job cleanup, guc_id=%d", q->guc->id);
+ xe_hw_fence_irq_stop(q->fence_irq);
+
xe_sched_submission_start(sched);
+
+ spin_lock(&sched->base.job_list_lock);
+ list_for_each_entry(job, &sched->base.pending_list, drm.list)
+ xe_sched_job_set_error(job, -ECANCELED);
+ spin_unlock(&sched->base.job_list_lock);
+
+ xe_hw_fence_irq_start(q->fence_irq);
}
#define ADJUST_FIVE_PERCENT(__t) mul_u64_u32_div(__t, 105, 100)
@@ -1024,12 +1166,14 @@ static void enable_scheduling(struct xe_exec_queue *q)
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_enable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
- if (!ret || xe_guc_read_stopped(guc)) {
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if ((!ret && !vf_recovery(guc)) || xe_guc_read_stopped(guc)) {
xe_gt_warn(guc_to_gt(guc), "Schedule enable failed to respond");
set_exec_queue_banned(q);
xe_gt_reset_async(q->gt);
- xe_sched_tdr_queue_imm(&q->guc->sched);
+ if (!xe_exec_queue_is_lr(q))
+ xe_sched_tdr_queue_imm(&q->guc->sched);
}
}
@@ -1087,13 +1231,16 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
int i = 0;
bool wedged = false, skip_timeout_check;
+ xe_gt_assert(guc_to_gt(guc), !xe_exec_queue_is_lr(q));
+
/*
* TDR has fired before free job worker. Common if exec queue
* immediately closed after last fence signaled. Add back to pending
* list so job can be freed and kick scheduler ensuring free job is not
* lost.
*/
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags))
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags) ||
+ vf_recovery(guc))
return DRM_GPU_SCHED_STAT_NO_HANG;
/* Kill the run_job entry point */
@@ -1145,7 +1292,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
ret = wait_event_timeout(guc->ct.wq,
(!exec_queue_pending_enable(q) &&
!exec_queue_pending_disable(q)) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ goto handle_vf_resume;
if (!ret || xe_guc_read_stopped(guc))
goto trigger_reset;
@@ -1170,7 +1320,10 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
smp_rmb();
ret = wait_event_timeout(guc->ct.wq,
!exec_queue_pending_disable(q) ||
- xe_guc_read_stopped(guc), HZ * 5);
+ xe_guc_read_stopped(guc) ||
+ vf_recovery(guc), HZ * 5);
+ if (vf_recovery(guc))
+ goto handle_vf_resume;
if (!ret || xe_guc_read_stopped(guc)) {
trigger_reset:
if (!ret)
@@ -1266,6 +1419,7 @@ trigger_reset:
return DRM_GPU_SCHED_STAT_RESET;
sched_enable:
+ set_exec_queue_pending_tdr_exit(q);
enable_scheduling(q);
rearm:
/*
@@ -1274,51 +1428,61 @@ rearm:
* some thought, do this in a follow up.
*/
xe_sched_submission_start(sched);
+handle_vf_resume:
return DRM_GPU_SCHED_STAT_NO_HANG;
}
-static void __guc_exec_queue_fini_async(struct work_struct *w)
+static void guc_exec_queue_fini(struct xe_exec_queue *q)
+{
+ struct xe_guc_exec_queue *ge = q->guc;
+ struct xe_guc *guc = exec_queue_to_guc(q);
+
+ release_guc_id(guc, q);
+ xe_sched_entity_fini(&ge->entity);
+ xe_sched_fini(&ge->sched);
+
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
+}
+
+static void __guc_exec_queue_destroy_async(struct work_struct *w)
{
struct xe_guc_exec_queue *ge =
- container_of(w, struct xe_guc_exec_queue, fini_async);
+ container_of(w, struct xe_guc_exec_queue, destroy_async);
struct xe_exec_queue *q = ge->q;
struct xe_guc *guc = exec_queue_to_guc(q);
xe_pm_runtime_get(guc_to_xe(guc));
trace_xe_exec_queue_destroy(q);
- release_guc_id(guc, q);
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
/* Confirm no work left behind accessing device structures */
cancel_delayed_work_sync(&ge->sched.base.work_tdr);
- xe_sched_entity_fini(&ge->entity);
- xe_sched_fini(&ge->sched);
- /*
- * RCU free due sched being exported via DRM scheduler fences
- * (timeline name).
- */
- kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
+
xe_pm_runtime_put(guc_to_xe(guc));
}
-static void guc_exec_queue_fini_async(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy_async(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
struct xe_device *xe = guc_to_xe(guc);
- INIT_WORK(&q->guc->fini_async, __guc_exec_queue_fini_async);
+ INIT_WORK(&q->guc->destroy_async, __guc_exec_queue_destroy_async);
/* We must block on kernel engines so slabs are empty on driver unload */
if (q->flags & EXEC_QUEUE_FLAG_PERMANENT || exec_queue_wedged(q))
- __guc_exec_queue_fini_async(&q->guc->fini_async);
+ __guc_exec_queue_destroy_async(&q->guc->destroy_async);
else
- queue_work(xe->destroy_wq, &q->guc->fini_async);
+ queue_work(xe->destroy_wq, &q->guc->destroy_async);
}
-static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
+static void __guc_exec_queue_destroy(struct xe_guc *guc, struct xe_exec_queue *q)
{
/*
* Might be done from within the GPU scheduler, need to do async as we
@@ -1327,7 +1491,7 @@ static void __guc_exec_queue_fini(struct xe_guc *guc, struct xe_exec_queue *q)
* this we and don't really care when everything is fini'd, just that it
* is.
*/
- guc_exec_queue_fini_async(q);
+ guc_exec_queue_destroy_async(q);
}
static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
@@ -1338,10 +1502,20 @@ static void __guc_exec_queue_process_msg_cleanup(struct xe_sched_msg *msg)
xe_gt_assert(guc_to_gt(guc), !(q->flags & EXEC_QUEUE_FLAG_PERMANENT));
trace_xe_exec_queue_cleanup_entity(q);
- if (exec_queue_registered(q))
+ /*
+ * Expected state transitions for cleanup:
+ * - If the exec queue is registered and GuC firmware is running, we must first
+ * disable scheduling and deregister the queue to ensure proper teardown and
+ * resource release in the GuC, then destroy the exec queue on driver side.
+ * - If the GuC is already stopped (e.g., during driver unload or GPU reset),
+ * we cannot expect a response for the deregister request. In this case,
+ * it is safe to directly destroy the exec queue on driver side, as the GuC
+ * will not process further requests and all resources must be cleaned up locally.
+ */
+ if (exec_queue_registered(q) && xe_uc_fw_is_running(&guc->fw))
disable_scheduling_deregister(guc, q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
static bool guc_exec_queue_allowed_to_change_state(struct xe_exec_queue *q)
@@ -1361,11 +1535,24 @@ static void __guc_exec_queue_process_msg_set_sched_props(struct xe_sched_msg *ms
static void __suspend_fence_signal(struct xe_exec_queue *q)
{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+
if (!q->guc->suspend_pending)
return;
WRITE_ONCE(q->guc->suspend_pending, false);
- wake_up(&q->guc->suspend_wait);
+
+ /*
+ * We use a GuC shared wait queue for VFs because the VF resfix start
+ * interrupt must be able to wake all instances of suspend_wait. This
+ * prevents the VF migration worker from being starved during
+ * scheduling.
+ */
+ if (IS_SRIOV_VF(xe))
+ wake_up_all(&guc->ct.wq);
+ else
+ wake_up(&q->guc->suspend_wait);
}
static void suspend_fence_signal(struct xe_exec_queue *q)
@@ -1386,8 +1573,9 @@ static void __guc_exec_queue_process_msg_suspend(struct xe_sched_msg *msg)
if (guc_exec_queue_allowed_to_change_state(q) && !exec_queue_suspended(q) &&
exec_queue_enabled(q)) {
- wait_event(guc->ct.wq, (q->guc->resume_time != RESUME_PENDING ||
- xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q));
+ wait_event(guc->ct.wq, vf_recovery(guc) ||
+ ((q->guc->resume_time != RESUME_PENDING ||
+ xe_guc_read_stopped(guc)) && !exec_queue_pending_disable(q)));
if (!xe_guc_read_stopped(guc)) {
s64 since_resume_ms =
@@ -1416,6 +1604,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
clear_exec_queue_suspended(q);
if (!exec_queue_enabled(q)) {
q->guc->resume_time = RESUME_PENDING;
+ set_exec_queue_pending_resume(q);
enable_scheduling(q);
}
} else {
@@ -1429,6 +1618,7 @@ static void __guc_exec_queue_process_msg_resume(struct xe_sched_msg *msg)
#define RESUME 4
#define OPCODE_MASK 0xf
#define MSG_LOCKED BIT(8)
+#define MSG_HEAD BIT(9)
static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
{
@@ -1491,7 +1681,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
- NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
+ NULL, xe_lrc_ring_size() / MAX_JOB_SIZE_BYTES, 64,
timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev);
if (err)
@@ -1513,7 +1703,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->entity = &ge->entity;
- if (xe_guc_read_stopped(guc))
+ if (xe_guc_read_stopped(guc) || vf_recovery(guc))
xe_sched_stop(sched);
mutex_unlock(&guc->submission_state.lock);
@@ -1553,12 +1743,24 @@ static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg
msg->private_data = q;
trace_xe_sched_msg_add(msg);
- if (opcode & MSG_LOCKED)
+ if (opcode & MSG_HEAD)
+ xe_sched_add_msg_head(&q->guc->sched, msg);
+ else if (opcode & MSG_LOCKED)
xe_sched_add_msg_locked(&q->guc->sched, msg);
else
xe_sched_add_msg(&q->guc->sched, msg);
}
+static void guc_exec_queue_try_add_msg_head(struct xe_exec_queue *q,
+ struct xe_sched_msg *msg,
+ u32 opcode)
+{
+ if (!list_empty(&msg->link))
+ return;
+
+ guc_exec_queue_add_msg(q, msg, opcode | MSG_LOCKED | MSG_HEAD);
+}
+
static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
struct xe_sched_msg *msg,
u32 opcode)
@@ -1574,14 +1776,14 @@ static bool guc_exec_queue_try_add_msg(struct xe_exec_queue *q,
#define STATIC_MSG_CLEANUP 0
#define STATIC_MSG_SUSPEND 1
#define STATIC_MSG_RESUME 2
-static void guc_exec_queue_fini(struct xe_exec_queue *q)
+static void guc_exec_queue_destroy(struct xe_exec_queue *q)
{
struct xe_sched_msg *msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && !exec_queue_wedged(q))
guc_exec_queue_add_msg(q, msg, CLEANUP);
else
- __guc_exec_queue_fini(exec_queue_to_guc(q), q);
+ __guc_exec_queue_destroy(exec_queue_to_guc(q), q);
}
static int guc_exec_queue_set_priority(struct xe_exec_queue *q,
@@ -1659,6 +1861,7 @@ static int guc_exec_queue_suspend(struct xe_exec_queue *q)
static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
{
struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
int ret;
/*
@@ -1666,11 +1869,21 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
* suspend_pending upon kill but to be paranoid but races in which
* suspend_pending is set after kill also check kill here.
*/
- ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
- !READ_ONCE(q->guc->suspend_pending) ||
- exec_queue_killed(q) ||
- xe_guc_read_stopped(guc),
- HZ * 5);
+#define WAIT_COND \
+ (!READ_ONCE(q->guc->suspend_pending) || exec_queue_killed(q) || \
+ xe_guc_read_stopped(guc))
+
+retry:
+ if (IS_SRIOV_VF(xe))
+ ret = wait_event_interruptible_timeout(guc->ct.wq, WAIT_COND ||
+ vf_recovery(guc),
+ HZ * 5);
+ else
+ ret = wait_event_interruptible_timeout(q->guc->suspend_wait,
+ WAIT_COND, HZ * 5);
+
+ if (vf_recovery(guc) && !xe_device_wedged((guc_to_xe(guc))))
+ return -EAGAIN;
if (!ret) {
xe_gt_warn(guc_to_gt(guc),
@@ -1678,8 +1891,13 @@ static int guc_exec_queue_suspend_wait(struct xe_exec_queue *q)
q->guc->id);
/* XXX: Trigger GT reset? */
return -ETIME;
+ } else if (IS_SRIOV_VF(xe) && !WAIT_COND) {
+ /* Corner case on RESFIX DONE where vf_recovery() changes */
+ goto retry;
}
+#undef WAIT_COND
+
return ret < 0 ? ret : 0;
}
@@ -1702,7 +1920,7 @@ static bool guc_exec_queue_reset_status(struct xe_exec_queue *q)
}
/*
- * All of these functions are an abstraction layer which other parts of XE can
+ * All of these functions are an abstraction layer which other parts of Xe can
* use to trap into the GuC backend. All of these functions, aside from init,
* really shouldn't do much other than trap into the DRM scheduler which
* synchronizes these operations.
@@ -1711,6 +1929,7 @@ static const struct xe_exec_queue_ops guc_exec_queue_ops = {
.init = guc_exec_queue_init,
.kill = guc_exec_queue_kill,
.fini = guc_exec_queue_fini,
+ .destroy = guc_exec_queue_destroy,
.set_priority = guc_exec_queue_set_priority,
.set_timeslice = guc_exec_queue_set_timeslice,
.set_preempt_timeout = guc_exec_queue_set_preempt_timeout,
@@ -1732,7 +1951,7 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else if (exec_queue_destroyed(q))
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
if (q->guc->suspend_pending) {
set_exec_queue_suspended(q);
@@ -1777,6 +1996,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
+ if (xe_gt_WARN_ON(guc_to_gt(guc), vf_recovery(guc)))
+ return 0;
+
if (!guc->submission_state.initialized)
return 0;
@@ -1826,16 +2048,177 @@ void xe_guc_submit_stop(struct xe_guc *guc)
}
+static void guc_exec_queue_revert_pending_state_change(struct xe_guc *guc,
+ struct xe_exec_queue *q)
+{
+ bool pending_enable, pending_disable, pending_resume;
+
+ pending_enable = exec_queue_pending_enable(q);
+ pending_resume = exec_queue_pending_resume(q);
+
+ if (pending_enable && pending_resume) {
+ q->guc->needs_resume = true;
+ xe_gt_dbg(guc_to_gt(guc), "Replay RESUME - guc_id=%d",
+ q->guc->id);
+ }
+
+ if (pending_enable && !pending_resume &&
+ !exec_queue_pending_tdr_exit(q)) {
+ clear_exec_queue_registered(q);
+ if (xe_exec_queue_is_lr(q))
+ xe_exec_queue_put(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay REGISTER - guc_id=%d",
+ q->guc->id);
+ }
+
+ if (pending_enable) {
+ clear_exec_queue_enabled(q);
+ clear_exec_queue_pending_resume(q);
+ clear_exec_queue_pending_tdr_exit(q);
+ clear_exec_queue_pending_enable(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay ENABLE - guc_id=%d",
+ q->guc->id);
+ }
+
+ if (exec_queue_destroyed(q) && exec_queue_registered(q)) {
+ clear_exec_queue_destroyed(q);
+ if (exec_queue_extra_ref(q))
+ xe_exec_queue_put(q);
+ else
+ q->guc->needs_cleanup = true;
+ clear_exec_queue_extra_ref(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay CLEANUP - guc_id=%d",
+ q->guc->id);
+ }
+
+ pending_disable = exec_queue_pending_disable(q);
+
+ if (pending_disable && exec_queue_suspended(q)) {
+ clear_exec_queue_suspended(q);
+ q->guc->needs_suspend = true;
+ xe_gt_dbg(guc_to_gt(guc), "Replay SUSPEND - guc_id=%d",
+ q->guc->id);
+ }
+
+ if (pending_disable) {
+ if (!pending_enable)
+ set_exec_queue_enabled(q);
+ clear_exec_queue_pending_disable(q);
+ clear_exec_queue_check_timeout(q);
+ xe_gt_dbg(guc_to_gt(guc), "Replay DISABLE - guc_id=%d",
+ q->guc->id);
+ }
+
+ q->guc->resume_time = 0;
+}
+
+static void lrc_parallel_clear(struct xe_lrc *lrc)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ struct iosys_map map = xe_lrc_parallel_map(lrc);
+ int i;
+
+ for (i = 0; i < WQ_SIZE / sizeof(u32); ++i)
+ parallel_write(xe, map, wq[i],
+ FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, 0));
+}
+
+/*
+ * This function is quite complex but only real way to ensure no state is lost
+ * during VF resume flows. The function scans the queue state, make adjustments
+ * as needed, and queues jobs / messages which replayed upon unpause.
+ */
+static void guc_exec_queue_pause(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job;
+ int i;
+
+ lockdep_assert_held(&guc->submission_state.lock);
+
+ /* Stop scheduling + flush any DRM scheduler operations */
+ xe_sched_submission_stop(sched);
+ if (xe_exec_queue_is_lr(q))
+ cancel_work_sync(&q->guc->lr_tdr);
+ else
+ cancel_delayed_work_sync(&sched->base.work_tdr);
+
+ guc_exec_queue_revert_pending_state_change(guc, q);
+
+ if (xe_exec_queue_is_parallel(q)) {
+ /* Pairs with WRITE_ONCE in __xe_exec_queue_init */
+ struct xe_lrc *lrc = READ_ONCE(q->lrc[0]);
+
+ /*
+ * NOP existing WQ commands that may contain stale GGTT
+ * addresses. These will be replayed upon unpause. The hardware
+ * seems to get confused if the WQ head/tail pointers are
+ * adjusted.
+ */
+ if (lrc)
+ lrc_parallel_clear(lrc);
+ }
+
+ job = xe_sched_first_pending_job(sched);
+ if (job) {
+ job->restore_replay = true;
+
+ /*
+ * Adjust software tail so jobs submitted overwrite previous
+ * position in ring buffer with new GGTT addresses.
+ */
+ for (i = 0; i < q->width; ++i)
+ q->lrc[i]->ring.tail = job->ptrs[i].head;
+ }
+}
+
+/**
+ * xe_guc_submit_pause - Stop further runs of submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be disabled
+ */
+void xe_guc_submit_pause(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ guc_exec_queue_pause(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
static void guc_exec_queue_start(struct xe_exec_queue *q)
{
struct xe_gpu_scheduler *sched = &q->guc->sched;
if (!exec_queue_killed_or_banned_or_wedged(q)) {
+ struct xe_sched_job *job = xe_sched_first_pending_job(sched);
int i;
trace_xe_exec_queue_resubmit(q);
- for (i = 0; i < q->width; ++i)
- xe_lrc_set_ring_head(q->lrc[i], q->lrc[i]->ring.tail);
+ if (job) {
+ for (i = 0; i < q->width; ++i) {
+ /*
+ * The GuC context is unregistered at this point
+ * time, adjusting software ring tail ensures
+ * jobs are rewritten in original placement,
+ * adjusting LRC tail ensures the newly loaded
+ * GuC / contexts only view the LRC tail
+ * increasing as jobs are written out.
+ */
+ q->lrc[i]->ring.tail = job->ptrs[i].head;
+ xe_lrc_set_ring_tail(q->lrc[i],
+ xe_lrc_ring_head(q->lrc[i]));
+ }
+ }
xe_sched_resubmit_jobs(sched);
}
@@ -1866,6 +2249,151 @@ int xe_guc_submit_start(struct xe_guc *guc)
return 0;
}
+static void guc_exec_queue_unpause_prepare(struct xe_guc *guc,
+ struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_job *job = NULL;
+ bool restore_replay = false;
+
+ list_for_each_entry(job, &sched->base.pending_list, drm.list) {
+ restore_replay |= job->restore_replay;
+ if (restore_replay) {
+ xe_gt_dbg(guc_to_gt(guc), "Replay JOB - guc_id=%d, seqno=%d",
+ q->guc->id, xe_sched_job_seqno(job));
+
+ q->ring_ops->emit_job(job);
+ job->restore_replay = true;
+ }
+ }
+
+ if (job)
+ job->last_replay = true;
+}
+
+/**
+ * xe_guc_submit_unpause_prepare - Prepare unpause submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be prepared for unpause
+ */
+void xe_guc_submit_unpause_prepare(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ xe_gt_assert(guc_to_gt(guc), vf_recovery(guc));
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ guc_exec_queue_unpause_prepare(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
+static void guc_exec_queue_replay_pending_state_change(struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ struct xe_sched_msg *msg;
+
+ if (q->guc->needs_cleanup) {
+ msg = q->guc->static_msgs + STATIC_MSG_CLEANUP;
+
+ guc_exec_queue_add_msg(q, msg, CLEANUP);
+ q->guc->needs_cleanup = false;
+ }
+
+ if (q->guc->needs_suspend) {
+ msg = q->guc->static_msgs + STATIC_MSG_SUSPEND;
+
+ xe_sched_msg_lock(sched);
+ guc_exec_queue_try_add_msg_head(q, msg, SUSPEND);
+ xe_sched_msg_unlock(sched);
+
+ q->guc->needs_suspend = false;
+ }
+
+ /*
+ * The resume must be in the message queue before the suspend as it is
+ * not possible for a resume to be issued if a suspend pending is, but
+ * the inverse is possible.
+ */
+ if (q->guc->needs_resume) {
+ msg = q->guc->static_msgs + STATIC_MSG_RESUME;
+
+ xe_sched_msg_lock(sched);
+ guc_exec_queue_try_add_msg_head(q, msg, RESUME);
+ xe_sched_msg_unlock(sched);
+
+ q->guc->needs_resume = false;
+ }
+}
+
+static void guc_exec_queue_unpause(struct xe_guc *guc, struct xe_exec_queue *q)
+{
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+ bool needs_tdr = exec_queue_killed_or_banned_or_wedged(q);
+
+ lockdep_assert_held(&guc->submission_state.lock);
+
+ xe_sched_resubmit_jobs(sched);
+ guc_exec_queue_replay_pending_state_change(q);
+ xe_sched_submission_start(sched);
+ if (needs_tdr)
+ xe_guc_exec_queue_trigger_cleanup(q);
+ xe_sched_submission_resume_tdr(sched);
+}
+
+/**
+ * xe_guc_submit_unpause - Allow further runs of submission tasks on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be enabled
+ */
+void xe_guc_submit_unpause(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /*
+ * Prevent redundant attempts to stop parallel queues, or queues
+ * created after resfix done.
+ */
+ if (q->guc->id != index ||
+ !READ_ONCE(q->guc->sched.base.pause_submit))
+ continue;
+
+ guc_exec_queue_unpause(guc, q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
+/**
+ * xe_guc_submit_pause_abort - Abort all paused submission task on given GuC.
+ * @guc: the &xe_guc struct instance whose scheduler is to be aborted
+ */
+void xe_guc_submit_pause_abort(struct xe_guc *guc)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ struct xe_gpu_scheduler *sched = &q->guc->sched;
+
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ xe_sched_submission_start(sched);
+ if (exec_queue_killed_or_banned_or_wedged(q))
+ xe_guc_exec_queue_trigger_cleanup(q);
+ }
+ mutex_unlock(&guc->submission_state.lock);
+}
+
static struct xe_exec_queue *
g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
{
@@ -1879,7 +2407,7 @@ g2h_exec_queue_lookup(struct xe_guc *guc, u32 guc_id)
q = xa_load(&guc->submission_state.exec_queue_lookup, guc_id);
if (unlikely(!q)) {
- xe_gt_err(gt, "Not engine present for guc_id %u\n", guc_id);
+ xe_gt_err(gt, "No exec queue found for guc_id %u\n", guc_id);
return NULL;
}
@@ -1915,6 +2443,8 @@ static void handle_sched_done(struct xe_guc *guc, struct xe_exec_queue *q,
xe_gt_assert(guc_to_gt(guc), exec_queue_pending_enable(q));
q->guc->resume_time = ktime_get();
+ clear_exec_queue_pending_resume(q);
+ clear_exec_queue_pending_tdr_exit(q);
clear_exec_queue_pending_enable(q);
smp_wmb();
wake_up_all(&guc->ct.wq);
@@ -1989,7 +2519,7 @@ static void handle_deregister_done(struct xe_guc *guc, struct xe_exec_queue *q)
if (exec_queue_extra_ref(q) || xe_exec_queue_is_lr(q))
xe_exec_queue_put(q);
else
- __guc_exec_queue_fini(guc, q);
+ __guc_exec_queue_destroy(guc, q);
}
int xe_guc_deregister_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
@@ -2378,6 +2908,34 @@ static void guc_exec_queue_print(struct xe_exec_queue *q, struct drm_printer *p)
}
/**
+ * xe_guc_register_vf_exec_queue - Register exec queue for a given context type.
+ * @q: Execution queue
+ * @ctx_type: Type of the context
+ *
+ * This function registers the execution queue with the guc. Special context
+ * types like GUC_CONTEXT_COMPRESSION_SAVE and GUC_CONTEXT_COMPRESSION_RESTORE
+ * are only applicable for IGPU and in the VF.
+ * Submits the execution queue to GUC after registering it.
+ *
+ * Returns - None.
+ */
+void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type)
+{
+ struct xe_guc *guc = exec_queue_to_guc(q);
+ struct xe_device *xe = guc_to_xe(guc);
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ xe_gt_assert(gt, IS_SRIOV_VF(xe));
+ xe_gt_assert(gt, !IS_DGFX(xe));
+ xe_gt_assert(gt, ctx_type == GUC_CONTEXT_COMPRESSION_SAVE ||
+ ctx_type == GUC_CONTEXT_COMPRESSION_RESTORE);
+ xe_gt_assert(gt, GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 23, 0));
+
+ register_exec_queue(q, ctx_type);
+ enable_scheduling(q);
+}
+
+/**
* xe_guc_submit_print - GuC Submit Print.
* @guc: GuC.
* @p: drm_printer where it will be printed out.
@@ -2397,3 +2955,32 @@ void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p)
guc_exec_queue_print(q, p);
mutex_unlock(&guc->submission_state.lock);
}
+
+/**
+ * xe_guc_contexts_hwsp_rebase - Re-compute GGTT references within all
+ * exec queues registered to given GuC.
+ * @guc: the &xe_guc struct instance
+ * @scratch: scratch buffer to be used as temporary storage
+ *
+ * Returns: zero on success, negative error code on failure.
+ */
+int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch)
+{
+ struct xe_exec_queue *q;
+ unsigned long index;
+ int err = 0;
+
+ mutex_lock(&guc->submission_state.lock);
+ xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
+ /* Prevent redundant attempts to stop parallel queues */
+ if (q->guc->id != index)
+ continue;
+
+ err = xe_exec_queue_contexts_hwsp_rebase(q, scratch);
+ if (err)
+ break;
+ }
+ mutex_unlock(&guc->submission_state.lock);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.h b/drivers/gpu/drm/xe/xe_guc_submit.h
index 9b71a986c6ca..b49a2748ec46 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.h
+++ b/drivers/gpu/drm/xe/xe_guc_submit.h
@@ -13,11 +13,17 @@ struct xe_exec_queue;
struct xe_guc;
int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids);
+int xe_guc_submit_enable(struct xe_guc *guc);
+void xe_guc_submit_disable(struct xe_guc *guc);
int xe_guc_submit_reset_prepare(struct xe_guc *guc);
void xe_guc_submit_reset_wait(struct xe_guc *guc);
void xe_guc_submit_stop(struct xe_guc *guc);
int xe_guc_submit_start(struct xe_guc *guc);
+void xe_guc_submit_pause(struct xe_guc *guc);
+void xe_guc_submit_unpause(struct xe_guc *guc);
+void xe_guc_submit_unpause_prepare(struct xe_guc *guc);
+void xe_guc_submit_pause_abort(struct xe_guc *guc);
void xe_guc_submit_wedge(struct xe_guc *guc);
int xe_guc_read_stopped(struct xe_guc *guc);
@@ -39,5 +45,8 @@ xe_guc_exec_queue_snapshot_print(struct xe_guc_submit_exec_queue_snapshot *snaps
void
xe_guc_exec_queue_snapshot_free(struct xe_guc_submit_exec_queue_snapshot *snapshot);
void xe_guc_submit_print(struct xe_guc *guc, struct drm_printer *p);
+void xe_guc_register_vf_exec_queue(struct xe_exec_queue *q, int ctx_type);
+
+int xe_guc_contexts_hwsp_rebase(struct xe_guc *guc, void *scratch);
#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.c b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
new file mode 100644
index 000000000000..a80175c7c478
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "abi/guc_actions_abi.h"
+
+#include "xe_device.h"
+#include "xe_gt_stats.h"
+#include "xe_gt_types.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_tlb_inval.h"
+#include "xe_force_wake.h"
+#include "xe_mmio.h"
+#include "xe_tlb_inval.h"
+
+#include "regs/xe_guc_regs.h"
+
+/*
+ * XXX: The seqno algorithm relies on TLB invalidation being processed in order
+ * which they currently are by the GuC, if that changes the algorithm will need
+ * to be updated.
+ */
+
+static int send_tlb_inval(struct xe_guc *guc, const u32 *action, int len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ xe_gt_assert(gt, action[1]); /* Seqno */
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_TLB_INVAL, 1);
+ return xe_guc_ct_send(&guc->ct, action, len,
+ G2H_LEN_DW_TLB_INVALIDATE, 1);
+}
+
+#define MAKE_INVAL_OP(type) ((type << XE_GUC_TLB_INVAL_TYPE_SHIFT) | \
+ XE_GUC_TLB_INVAL_MODE_HEAVY << XE_GUC_TLB_INVAL_MODE_SHIFT | \
+ XE_GUC_TLB_INVAL_FLUSH_CACHE)
+
+static int send_tlb_inval_all(struct xe_tlb_inval *tlb_inval, u32 seqno)
+{
+ struct xe_guc *guc = tlb_inval->private;
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION_ALL,
+ seqno,
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL),
+ };
+
+ return send_tlb_inval(guc, action, ARRAY_SIZE(action));
+}
+
+static int send_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval, u32 seqno)
+{
+ struct xe_guc *guc = tlb_inval->private;
+ struct xe_gt *gt = guc_to_gt(guc);
+ struct xe_device *xe = guc_to_xe(guc);
+
+ /*
+ * Returning -ECANCELED in this function is squashed at the caller and
+ * signals waiters.
+ */
+
+ if (xe_guc_ct_enabled(&guc->ct) && guc->submission_state.enabled) {
+ u32 action[] = {
+ XE_GUC_ACTION_TLB_INVALIDATION,
+ seqno,
+ MAKE_INVAL_OP(XE_GUC_TLB_INVAL_GUC),
+ };
+
+ return send_tlb_inval(guc, action, ARRAY_SIZE(action));
+ } else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
+ struct xe_mmio *mmio = &gt->mmio;
+ unsigned int fw_ref;
+
+ if (IS_SRIOV_VF(xe))
+ return -ECANCELED;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (xe->info.platform == XE_PVC || GRAPHICS_VER(xe) >= 20) {
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC1,
+ PVC_GUC_TLB_INV_DESC1_INVALIDATE);
+ xe_mmio_write32(mmio, PVC_GUC_TLB_INV_DESC0,
+ PVC_GUC_TLB_INV_DESC0_VALID);
+ } else {
+ xe_mmio_write32(mmio, GUC_TLB_INV_CR,
+ GUC_TLB_INV_CR_INVALIDATE);
+ }
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ }
+
+ return -ECANCELED;
+}
+
+/*
+ * Ensure that roundup_pow_of_two(length) doesn't overflow.
+ * Note that roundup_pow_of_two() operates on unsigned long,
+ * not on u64.
+ */
+#define MAX_RANGE_TLB_INVALIDATION_LENGTH (rounddown_pow_of_two(ULONG_MAX))
+
+static int send_tlb_inval_ppgtt(struct xe_tlb_inval *tlb_inval, u32 seqno,
+ u64 start, u64 end, u32 asid)
+{
+#define MAX_TLB_INVALIDATION_LEN 7
+ struct xe_guc *guc = tlb_inval->private;
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 action[MAX_TLB_INVALIDATION_LEN];
+ u64 length = end - start;
+ int len = 0;
+
+ if (guc_to_xe(guc)->info.force_execlist)
+ return -ECANCELED;
+
+ action[len++] = XE_GUC_ACTION_TLB_INVALIDATION;
+ action[len++] = seqno;
+ if (!gt_to_xe(gt)->info.has_range_tlb_inval ||
+ length > MAX_RANGE_TLB_INVALIDATION_LENGTH) {
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL);
+ } else {
+ u64 orig_start = start;
+ u64 align;
+
+ if (length < SZ_4K)
+ length = SZ_4K;
+
+ /*
+ * We need to invalidate a higher granularity if start address
+ * is not aligned to length. When start is not aligned with
+ * length we need to find the length large enough to create an
+ * address mask covering the required range.
+ */
+ align = roundup_pow_of_two(length);
+ start = ALIGN_DOWN(start, align);
+ end = ALIGN(end, align);
+ length = align;
+ while (start + length < end) {
+ length <<= 1;
+ start = ALIGN_DOWN(orig_start, length);
+ }
+
+ /*
+ * Minimum invalidation size for a 2MB page that the hardware
+ * expects is 16MB
+ */
+ if (length >= SZ_2M) {
+ length = max_t(u64, SZ_16M, length);
+ start = ALIGN_DOWN(orig_start, length);
+ }
+
+ xe_gt_assert(gt, length >= SZ_4K);
+ xe_gt_assert(gt, is_power_of_2(length));
+ xe_gt_assert(gt, !(length & GENMASK(ilog2(SZ_16M) - 1,
+ ilog2(SZ_2M) + 1)));
+ xe_gt_assert(gt, IS_ALIGNED(start, length));
+
+ action[len++] = MAKE_INVAL_OP(XE_GUC_TLB_INVAL_PAGE_SELECTIVE);
+ action[len++] = asid;
+ action[len++] = lower_32_bits(start);
+ action[len++] = upper_32_bits(start);
+ action[len++] = ilog2(length) - ilog2(SZ_4K);
+ }
+
+ xe_gt_assert(gt, len <= MAX_TLB_INVALIDATION_LEN);
+
+ return send_tlb_inval(guc, action, len);
+}
+
+static bool tlb_inval_initialized(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ return xe_guc_ct_initialized(&guc->ct);
+}
+
+static void tlb_inval_flush(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ LNL_FLUSH_WORK(&guc->ct.g2h_worker);
+}
+
+static long tlb_inval_timeout_delay(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_guc *guc = tlb_inval->private;
+
+ /* this reflects what HW/GuC needs to process TLB inv request */
+ const long hw_tlb_timeout = HZ / 4;
+
+ /* this estimates actual delay caused by the CTB transport */
+ long delay = xe_guc_ct_queue_proc_time_jiffies(&guc->ct);
+
+ return hw_tlb_timeout + 2 * delay;
+}
+
+static const struct xe_tlb_inval_ops guc_tlb_inval_ops = {
+ .all = send_tlb_inval_all,
+ .ggtt = send_tlb_inval_ggtt,
+ .ppgtt = send_tlb_inval_ppgtt,
+ .initialized = tlb_inval_initialized,
+ .flush = tlb_inval_flush,
+ .timeout_delay = tlb_inval_timeout_delay,
+};
+
+/**
+ * xe_guc_tlb_inval_init_early() - Init GuC TLB invalidation early
+ * @guc: GuC object
+ * @tlb_inval: TLB invalidation client
+ *
+ * Initialize GuC TLB invalidation by setting back pointer in TLB invalidation
+ * client to the GuC and setting GuC backend ops.
+ */
+void xe_guc_tlb_inval_init_early(struct xe_guc *guc,
+ struct xe_tlb_inval *tlb_inval)
+{
+ tlb_inval->private = guc;
+ tlb_inval->ops = &guc_tlb_inval_ops;
+}
+
+/**
+ * xe_guc_tlb_inval_done_handler() - TLB invalidation done handler
+ * @guc: guc
+ * @msg: message indicating TLB invalidation done
+ * @len: length of message
+ *
+ * Parse seqno of TLB invalidation, wake any waiters for seqno, and signal any
+ * invalidation fences for seqno. Algorithm for this depends on seqno being
+ * received in-order and asserts this assumption.
+ *
+ * Return: 0 on success, -EPROTO for malformed messages.
+ */
+int xe_guc_tlb_inval_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+
+ if (unlikely(len != 1))
+ return -EPROTO;
+
+ xe_tlb_inval_done_handler(&gt->tlb_inval, msg[0]);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_guc_tlb_inval.h b/drivers/gpu/drm/xe/xe_guc_tlb_inval.h
new file mode 100644
index 000000000000..07d668b02e3d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_guc_tlb_inval.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_GUC_TLB_INVAL_H_
+#define _XE_GUC_TLB_INVAL_H_
+
+#include <linux/types.h>
+
+struct xe_guc;
+struct xe_tlb_inval;
+
+void xe_guc_tlb_inval_init_early(struct xe_guc *guc,
+ struct xe_tlb_inval *tlb_inval);
+
+int xe_guc_tlb_inval_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h
index 1fde7614fcc5..c7b9642b41ba 100644
--- a/drivers/gpu/drm/xe/xe_guc_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_types.h
@@ -85,6 +85,12 @@ struct xe_guc {
struct xarray exec_queue_lookup;
/** @submission_state.stopped: submissions are stopped */
atomic_t stopped;
+ /**
+ * @submission_state.reset_blocked: reset attempts are blocked;
+ * blocking reset in order to delay it may be required if running
+ * an operation which is sensitive to resets.
+ */
+ atomic_t reset_blocked;
/** @submission_state.lock: protects submission state */
struct mutex lock;
/** @submission_state.enabled: submission is enabled */
diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c
index 6d7b62724126..2b3d49dd394c 100644
--- a/drivers/gpu/drm/xe/xe_heci_gsc.c
+++ b/drivers/gpu/drm/xe/xe_heci_gsc.c
@@ -8,6 +8,8 @@
#include <linux/pci.h>
#include <linux/sizes.h>
+#include <drm/drm_print.h>
+
#include "xe_device_types.h"
#include "xe_drv.h"
#include "xe_heci_gsc.h"
@@ -197,7 +199,7 @@ int xe_heci_gsc_init(struct xe_device *xe)
if (ret)
return ret;
- if (!def->use_polling && !xe_survivability_mode_is_enabled(xe)) {
+ if (!def->use_polling && !xe_survivability_mode_is_boot_enabled(xe)) {
ret = heci_gsc_irq_setup(xe);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
deleted file mode 100644
index 57b71956ddf4..000000000000
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ /dev/null
@@ -1,325 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2024 Intel Corporation
- */
-
-#include <linux/scatterlist.h>
-#include <linux/mmu_notifier.h>
-#include <linux/dma-mapping.h>
-#include <linux/memremap.h>
-#include <linux/swap.h>
-#include <linux/hmm.h>
-#include <linux/mm.h>
-#include "xe_hmm.h"
-#include "xe_vm.h"
-#include "xe_bo.h"
-
-static u64 xe_npages_in_range(unsigned long start, unsigned long end)
-{
- return (end - start) >> PAGE_SHIFT;
-}
-
-static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
- struct hmm_range *range, struct rw_semaphore *notifier_sem)
-{
- unsigned long i, npages, hmm_pfn;
- unsigned long num_chunks = 0;
- int ret;
-
- /* HMM docs says this is needed. */
- ret = down_read_interruptible(notifier_sem);
- if (ret)
- return ret;
-
- if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
- up_read(notifier_sem);
- return -EAGAIN;
- }
-
- npages = xe_npages_in_range(range->start, range->end);
- for (i = 0; i < npages;) {
- unsigned long len;
-
- hmm_pfn = range->hmm_pfns[i];
- xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
-
- len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
-
- /* If order > 0 the page may extend beyond range->start */
- len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
- i += len;
- num_chunks++;
- }
- up_read(notifier_sem);
-
- return sg_alloc_table(st, num_chunks, GFP_KERNEL);
-}
-
-/**
- * xe_build_sg() - build a scatter gather table for all the physical pages/pfn
- * in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
- * and will be used to program GPU page table later.
- * @xe: the xe device who will access the dma-address in sg table
- * @range: the hmm range that we build the sg table from. range->hmm_pfns[]
- * has the pfn numbers of pages that back up this hmm address range.
- * @st: pointer to the sg table.
- * @notifier_sem: The xe notifier lock.
- * @write: whether we write to this range. This decides dma map direction
- * for system pages. If write we map it bi-diretional; otherwise
- * DMA_TO_DEVICE
- *
- * All the contiguous pfns will be collapsed into one entry in
- * the scatter gather table. This is for the purpose of efficiently
- * programming GPU page table.
- *
- * The dma_address in the sg table will later be used by GPU to
- * access memory. So if the memory is system memory, we need to
- * do a dma-mapping so it can be accessed by GPU/DMA.
- *
- * FIXME: This function currently only support pages in system
- * memory. If the memory is GPU local memory (of the GPU who
- * is going to access memory), we need gpu dpa (device physical
- * address), and there is no need of dma-mapping. This is TBD.
- *
- * FIXME: dma-mapping for peer gpu device to access remote gpu's
- * memory. Add this when you support p2p
- *
- * This function allocates the storage of the sg table. It is
- * caller's responsibility to free it calling sg_free_table.
- *
- * Returns 0 if successful; -ENOMEM if fails to allocate memory
- */
-static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
- struct sg_table *st,
- struct rw_semaphore *notifier_sem,
- bool write)
-{
- unsigned long npages = xe_npages_in_range(range->start, range->end);
- struct device *dev = xe->drm.dev;
- struct scatterlist *sgl;
- struct page *page;
- unsigned long i, j;
-
- lockdep_assert_held(notifier_sem);
-
- i = 0;
- for_each_sg(st->sgl, sgl, st->nents, j) {
- unsigned long hmm_pfn, size;
-
- hmm_pfn = range->hmm_pfns[i];
- page = hmm_pfn_to_page(hmm_pfn);
- xe_assert(xe, !is_device_private_page(page));
-
- size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
- size -= page_to_pfn(page) & (size - 1);
- i += size;
-
- if (unlikely(j == st->nents - 1)) {
- xe_assert(xe, i >= npages);
- if (i > npages)
- size -= (i - npages);
-
- sg_mark_end(sgl);
- } else {
- xe_assert(xe, i < npages);
- }
-
- sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
- }
-
- return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
-}
-
-static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
-
- lockdep_assert_held_write(&vm->lock);
- lockdep_assert_held(&vm->userptr.notifier_lock);
-
- mutex_lock(&userptr->unmap_mutex);
- xe_assert(vm->xe, !userptr->mapped);
- userptr->mapped = true;
- mutex_unlock(&userptr->unmap_mutex);
-}
-
-void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- bool write = !xe_vma_read_only(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
-
- if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
- !lockdep_is_held_type(&vm->lock, 0) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
- /* Don't unmap in exec critical section. */
- xe_vm_assert_held(vm);
- /* Don't unmap while mapping the sg. */
- lockdep_assert_held(&vm->lock);
- }
-
- mutex_lock(&userptr->unmap_mutex);
- if (userptr->sg && userptr->mapped)
- dma_unmap_sgtable(xe->drm.dev, userptr->sg,
- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
- userptr->mapped = false;
- mutex_unlock(&userptr->unmap_mutex);
-}
-
-/**
- * xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
- * @uvma: the userptr vma which hold the scatter gather table
- *
- * With function xe_userptr_populate_range, we allocate storage of
- * the userptr sg table. This is a helper function to free this
- * sg table, and dma unmap the address in the table.
- */
-void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
-
- xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
- xe_hmm_userptr_unmap(uvma);
- sg_free_table(userptr->sg);
- userptr->sg = NULL;
-}
-
-/**
- * xe_hmm_userptr_populate_range() - Populate physical pages of a virtual
- * address range
- *
- * @uvma: userptr vma which has information of the range to populate.
- * @is_mm_mmap_locked: True if mmap_read_lock is already acquired by caller.
- *
- * This function populate the physical pages of a virtual
- * address range. The populated physical pages is saved in
- * userptr's sg table. It is similar to get_user_pages but call
- * hmm_range_fault.
- *
- * This function also read mmu notifier sequence # (
- * mmu_interval_read_begin), for the purpose of later
- * comparison (through mmu_interval_read_retry).
- *
- * This must be called with mmap read or write lock held.
- *
- * This function allocates the storage of the userptr sg table.
- * It is caller's responsibility to free it calling sg_free_table.
- *
- * returns: 0 for success; negative error no on failure
- */
-int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
- bool is_mm_mmap_locked)
-{
- unsigned long timeout =
- jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- unsigned long *pfns;
- struct xe_userptr *userptr;
- struct xe_vma *vma = &uvma->vma;
- u64 userptr_start = xe_vma_userptr(vma);
- u64 userptr_end = userptr_start + xe_vma_size(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct hmm_range hmm_range = {
- .pfn_flags_mask = 0, /* ignore pfns */
- .default_flags = HMM_PFN_REQ_FAULT,
- .start = userptr_start,
- .end = userptr_end,
- .notifier = &uvma->userptr.notifier,
- .dev_private_owner = vm->xe,
- };
- bool write = !xe_vma_read_only(vma);
- unsigned long notifier_seq;
- u64 npages;
- int ret;
-
- userptr = &uvma->userptr;
-
- if (is_mm_mmap_locked)
- mmap_assert_locked(userptr->notifier.mm);
-
- if (vma->gpuva.flags & XE_VMA_DESTROYED)
- return 0;
-
- notifier_seq = mmu_interval_read_begin(&userptr->notifier);
- if (notifier_seq == userptr->notifier_seq)
- return 0;
-
- if (userptr->sg)
- xe_hmm_userptr_free_sg(uvma);
-
- npages = xe_npages_in_range(userptr_start, userptr_end);
- pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
- if (unlikely(!pfns))
- return -ENOMEM;
-
- if (write)
- hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
-
- if (!mmget_not_zero(userptr->notifier.mm)) {
- ret = -EFAULT;
- goto free_pfns;
- }
-
- hmm_range.hmm_pfns = pfns;
-
- while (true) {
- hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
-
- if (!is_mm_mmap_locked)
- mmap_read_lock(userptr->notifier.mm);
-
- ret = hmm_range_fault(&hmm_range);
-
- if (!is_mm_mmap_locked)
- mmap_read_unlock(userptr->notifier.mm);
-
- if (ret == -EBUSY) {
- if (time_after(jiffies, timeout))
- break;
-
- continue;
- }
- break;
- }
-
- mmput(userptr->notifier.mm);
-
- if (ret)
- goto free_pfns;
-
- ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
- if (ret)
- goto free_pfns;
-
- ret = down_read_interruptible(&vm->userptr.notifier_lock);
- if (ret)
- goto free_st;
-
- if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
- &vm->userptr.notifier_lock, write);
- if (ret)
- goto out_unlock;
-
- userptr->sg = &userptr->sgt;
- xe_hmm_userptr_set_mapped(uvma);
- userptr->notifier_seq = hmm_range.notifier_seq;
- up_read(&vm->userptr.notifier_lock);
- kvfree(pfns);
- return 0;
-
-out_unlock:
- up_read(&vm->userptr.notifier_lock);
-free_st:
- sg_free_table(&userptr->sgt);
-free_pfns:
- kvfree(pfns);
- return ret;
-}
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
deleted file mode 100644
index 0ea98d8e7bbc..000000000000
--- a/drivers/gpu/drm/xe/xe_hmm.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: MIT
- *
- * Copyright © 2024 Intel Corporation
- */
-
-#ifndef _XE_HMM_H_
-#define _XE_HMM_H_
-
-#include <linux/types.h>
-
-struct xe_userptr_vma;
-
-int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
-
-void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
-
-void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
-#endif
diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c
index 7e43b2dd6a32..0a70c8924582 100644
--- a/drivers/gpu/drm/xe/xe_huc.c
+++ b/drivers/gpu/drm/xe/xe_huc.c
@@ -66,14 +66,18 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
int xe_huc_init(struct xe_huc *huc)
{
struct xe_gt *gt = huc_to_gt(huc);
- struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = gt_to_xe(gt);
int ret;
huc->fw.type = XE_UC_FW_TYPE_HUC;
- /* On platforms with a media GT the HuC is only available there */
- if (tile->media_gt && (gt != tile->media_gt)) {
+ /*
+ * The HuC is only available on the media GT on most platforms. The
+ * exception to that rule are the old Xe1 platforms where there was
+ * no separate GT for media IP, so the HuC was part of the primary
+ * GT. Such platforms have graphics versions 12.55 and earlier.
+ */
+ if (!xe_gt_is_media_type(gt) && GRAPHICS_VERx100(xe) > 1255) {
xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 796ba8c34a16..6a9e2a4272dd 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -346,17 +346,26 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
}
-static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_gt *gt,
+static bool xe_hw_engine_match_fixed_cslice_mode(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
+ /*
+ * Xe3p no longer supports load balance mode, so "fixed cslice" mode
+ * is automatic and no RCU_MODE programming is required.
+ */
+ if (GRAPHICS_VER(gt_to_xe(gt)) >= 35)
+ return false;
+
return xe_gt_ccs_mode_enabled(gt) &&
- xe_rtp_match_first_render_or_compute(gt, hwe);
+ xe_rtp_match_first_render_or_compute(xe, gt, hwe);
}
-static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_gt *gt,
+static bool xe_rtp_cfeg_wmtp_disabled(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
- if (GRAPHICS_VER(gt_to_xe(gt)) < 20)
+ if (GRAPHICS_VER(xe) < 20)
return false;
if (hwe->class != XE_ENGINE_CLASS_COMPUTE &&
@@ -576,7 +585,7 @@ static void adjust_idledly(struct xe_hw_engine *hwe)
u32 maxcnt_units_ns = 640;
bool inhibit_switch = 0;
- if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) {
+ if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_GT_WA(gt, 16023105232)) {
idledly = xe_mmio_read32(&gt->mmio, RING_IDLEDLY(hwe->mmio_base));
maxcnt = xe_mmio_read32(&gt->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base));
@@ -709,27 +718,52 @@ static void read_media_fuses(struct xe_gt *gt)
}
}
+static u32 infer_svccopy_from_meml3(struct xe_gt *gt)
+{
+ u32 meml3 = REG_FIELD_GET(MEML3_EN_MASK,
+ xe_mmio_read32(&gt->mmio, MIRROR_FUSE3));
+ u32 svccopy_mask = 0;
+
+ /*
+ * Each of the four meml3 bits determines the fusing of two service
+ * copy engines.
+ */
+ for (int i = 0; i < 4; i++)
+ svccopy_mask |= (meml3 & BIT(i)) ? 0b11 << 2 * i : 0;
+
+ return svccopy_mask;
+}
+
+static u32 read_svccopy_fuses(struct xe_gt *gt)
+{
+ return REG_FIELD_GET(FUSE_SERVICE_COPY_ENABLE_MASK,
+ xe_mmio_read32(&gt->mmio, SERVICE_COPY_ENABLE));
+}
+
static void read_copy_fuses(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
u32 bcs_mask;
- if (GRAPHICS_VERx100(xe) < 1260 || GRAPHICS_VERx100(xe) >= 1270)
- return;
-
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
- bcs_mask = xe_mmio_read32(&gt->mmio, MIRROR_FUSE3);
- bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
+ if (GRAPHICS_VER(xe) >= 35)
+ bcs_mask = read_svccopy_fuses(gt);
+ else if (GRAPHICS_VERx100(xe) == 1260)
+ bcs_mask = infer_svccopy_from_meml3(gt);
+ else
+ return;
- /* BCS0 is always present; only BCS1-BCS8 may be fused off */
- for (int i = XE_HW_ENGINE_BCS1, j = 0; i <= XE_HW_ENGINE_BCS8; ++i, ++j) {
+ /* Only BCS1-BCS8 may be fused off */
+ bcs_mask <<= XE_HW_ENGINE_BCS1;
+ for (int i = XE_HW_ENGINE_BCS1; i <= XE_HW_ENGINE_BCS8; ++i) {
if (!(gt->info.engine_mask & BIT(i)))
continue;
- if (!(BIT(j / 2) & bcs_mask)) {
+ if (!(bcs_mask & BIT(i))) {
gt->info.engine_mask &= ~BIT(i);
- xe_gt_info(gt, "bcs%u fused off\n", j);
+ xe_gt_info(gt, "bcs%u fused off\n",
+ i - XE_HW_ENGINE_BCS0);
}
}
}
@@ -870,7 +904,7 @@ void xe_hw_engine_handle_irq(struct xe_hw_engine *hwe, u16 intr_vec)
if (hwe->irq_handler)
hwe->irq_handler(hwe, intr_vec);
- if (intr_vec & GT_RENDER_USER_INTERRUPT)
+ if (intr_vec & GT_MI_USER_INTERRUPT)
xe_hw_fence_irq_run(hwe->fence_irq);
}
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c
index c926f840c87b..fa4db5f23342 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_group.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c
@@ -103,8 +103,8 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt)
break;
case XE_ENGINE_CLASS_OTHER:
break;
- default:
- drm_warn(&xe->drm, "NOT POSSIBLE");
+ case XE_ENGINE_CLASS_MAX:
+ xe_gt_assert(gt, false);
}
}
@@ -213,17 +213,13 @@ static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group
err = q->ops->suspend_wait(q);
if (err)
- goto err_suspend;
+ return err;
}
if (need_resume)
xe_hw_engine_group_resume_faulting_lr_jobs(group);
return 0;
-
-err_suspend:
- up_write(&group->mode_sem);
- return err;
}
/**
diff --git a/drivers/gpu/drm/xe/xe_hw_error.c b/drivers/gpu/drm/xe/xe_hw_error.c
new file mode 100644
index 000000000000..8c65291f36fc
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_error.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/fault-inject.h>
+
+#include "regs/xe_gsc_regs.h"
+#include "regs/xe_hw_error_regs.h"
+#include "regs/xe_irq_regs.h"
+
+#include "xe_device.h"
+#include "xe_hw_error.h"
+#include "xe_mmio.h"
+#include "xe_survivability_mode.h"
+
+#define HEC_UNCORR_FW_ERR_BITS 4
+extern struct fault_attr inject_csc_hw_error;
+
+/* Error categories reported by hardware */
+enum hardware_error {
+ HARDWARE_ERROR_CORRECTABLE = 0,
+ HARDWARE_ERROR_NONFATAL = 1,
+ HARDWARE_ERROR_FATAL = 2,
+ HARDWARE_ERROR_MAX,
+};
+
+static const char * const hec_uncorrected_fw_errors[] = {
+ "Fatal",
+ "CSE Disabled",
+ "FD Corruption",
+ "Data Corruption"
+};
+
+static const char *hw_error_to_str(const enum hardware_error hw_err)
+{
+ switch (hw_err) {
+ case HARDWARE_ERROR_CORRECTABLE:
+ return "CORRECTABLE";
+ case HARDWARE_ERROR_NONFATAL:
+ return "NONFATAL";
+ case HARDWARE_ERROR_FATAL:
+ return "FATAL";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static bool fault_inject_csc_hw_error(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(&inject_csc_hw_error, 1);
+}
+
+static void csc_hw_error_work(struct work_struct *work)
+{
+ struct xe_tile *tile = container_of(work, typeof(*tile), csc_hw_error_work);
+ struct xe_device *xe = tile_to_xe(tile);
+ int ret;
+
+ ret = xe_survivability_mode_runtime_enable(xe);
+ if (ret)
+ drm_err(&xe->drm, "Failed to enable runtime survivability mode\n");
+}
+
+static void csc_hw_error_handler(struct xe_tile *tile, const enum hardware_error hw_err)
+{
+ const char *hw_err_str = hw_error_to_str(hw_err);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_mmio *mmio = &tile->mmio;
+ u32 base, err_bit, err_src;
+ unsigned long fw_err;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return;
+
+ base = BMG_GSC_HECI1_BASE;
+ lockdep_assert_held(&xe->irq.lock);
+ err_src = xe_mmio_read32(mmio, HEC_UNCORR_ERR_STATUS(base));
+ if (!err_src) {
+ drm_err_ratelimited(&xe->drm, HW_ERR "Tile%d reported HEC_ERR_STATUS_%s blank\n",
+ tile->id, hw_err_str);
+ return;
+ }
+
+ if (err_src & UNCORR_FW_REPORTED_ERR) {
+ fw_err = xe_mmio_read32(mmio, HEC_UNCORR_FW_ERR_DW0(base));
+ for_each_set_bit(err_bit, &fw_err, HEC_UNCORR_FW_ERR_BITS) {
+ drm_err_ratelimited(&xe->drm, HW_ERR
+ "%s: HEC Uncorrected FW %s error reported, bit[%d] is set\n",
+ hw_err_str, hec_uncorrected_fw_errors[err_bit],
+ err_bit);
+
+ schedule_work(&tile->csc_hw_error_work);
+ }
+ }
+
+ xe_mmio_write32(mmio, HEC_UNCORR_ERR_STATUS(base), err_src);
+}
+
+static void hw_error_source_handler(struct xe_tile *tile, const enum hardware_error hw_err)
+{
+ const char *hw_err_str = hw_error_to_str(hw_err);
+ struct xe_device *xe = tile_to_xe(tile);
+ unsigned long flags;
+ u32 err_src;
+
+ if (xe->info.platform != XE_BATTLEMAGE)
+ return;
+
+ spin_lock_irqsave(&xe->irq.lock, flags);
+ err_src = xe_mmio_read32(&tile->mmio, DEV_ERR_STAT_REG(hw_err));
+ if (!err_src) {
+ drm_err_ratelimited(&xe->drm, HW_ERR "Tile%d reported DEV_ERR_STAT_%s blank!\n",
+ tile->id, hw_err_str);
+ goto unlock;
+ }
+
+ if (err_src & XE_CSC_ERROR)
+ csc_hw_error_handler(tile, hw_err);
+
+ xe_mmio_write32(&tile->mmio, DEV_ERR_STAT_REG(hw_err), err_src);
+
+unlock:
+ spin_unlock_irqrestore(&xe->irq.lock, flags);
+}
+
+/**
+ * xe_hw_error_irq_handler - irq handling for hw errors
+ * @tile: tile instance
+ * @master_ctl: value read from master interrupt register
+ *
+ * Xe platforms add three error bits to the master interrupt register to support error handling.
+ * These three bits are used to convey the class of error FATAL, NONFATAL, or CORRECTABLE.
+ * To process the interrupt, determine the source of error by reading the Device Error Source
+ * Register that corresponds to the class of error being serviced.
+ */
+void xe_hw_error_irq_handler(struct xe_tile *tile, const u32 master_ctl)
+{
+ enum hardware_error hw_err;
+
+ if (fault_inject_csc_hw_error())
+ schedule_work(&tile->csc_hw_error_work);
+
+ for (hw_err = 0; hw_err < HARDWARE_ERROR_MAX; hw_err++)
+ if (master_ctl & ERROR_IRQ(hw_err))
+ hw_error_source_handler(tile, hw_err);
+}
+
+/*
+ * Process hardware errors during boot
+ */
+static void process_hw_errors(struct xe_device *xe)
+{
+ struct xe_tile *tile;
+ u32 master_ctl;
+ u8 id;
+
+ for_each_tile(tile, xe, id) {
+ master_ctl = xe_mmio_read32(&tile->mmio, GFX_MSTR_IRQ);
+ xe_hw_error_irq_handler(tile, master_ctl);
+ xe_mmio_write32(&tile->mmio, GFX_MSTR_IRQ, master_ctl);
+ }
+}
+
+/**
+ * xe_hw_error_init - Initialize hw errors
+ * @xe: xe device instance
+ *
+ * Initialize and check for errors that occurred during boot
+ * prior to driver load
+ */
+void xe_hw_error_init(struct xe_device *xe)
+{
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
+ return;
+
+ INIT_WORK(&tile->csc_hw_error_work, csc_hw_error_work);
+
+ process_hw_errors(xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_hw_error.h b/drivers/gpu/drm/xe/xe_hw_error.h
new file mode 100644
index 000000000000..d86e28c5180c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_hw_error.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+#ifndef XE_HW_ERROR_H_
+#define XE_HW_ERROR_H_
+
+#include <linux/types.h>
+
+struct xe_tile;
+struct xe_device;
+
+void xe_hw_error_irq_handler(struct xe_tile *tile, const u32 master_ctl);
+void xe_hw_error_init(struct xe_device *xe);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index c17ed1ae8649..97879daeefc1 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -179,7 +179,7 @@ static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr
u32 clr, u32 set)
{
struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
- u32 val0, val1;
+ u32 val0 = 0, val1 = 0;
int ret = 0;
ret = xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
@@ -286,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
*/
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
- u64 reg_val = 0, min, max;
+ u32 reg_val = 0;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@@ -294,7 +294,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
- xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, (u32 *)&reg_val);
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
} else {
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@@ -304,19 +304,21 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
/* Check if PL limits are disabled. */
if (!(reg_val & PWR_LIM_EN)) {
*value = PL_DISABLE;
- drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%016llx\n",
+ drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n",
PWR_ATTR_TO_STR(attr), channel, reg_val);
goto unlock;
}
reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
- *value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
+ *value = mul_u32_u32(reg_val, SF_POWER) >> hwmon->scl_shift_power;
/* For platforms with mailbox power limit support clamping would be done by pcode. */
if (!hwmon->xe->info.has_mbx_power_limits) {
- reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
- min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
- max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
+ u64 pkg_pwr, min, max;
+
+ pkg_pwr = xe_mmio_read64_2x32(mmio, pkg_power_sku);
+ min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr);
+ max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
if (min && max)
@@ -493,8 +495,8 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- u32 x, y, x_w = 2; /* 2 bits */
- u64 r, tau4, out;
+ u32 reg_val, x, y, x_w = 2; /* 2 bits */
+ u64 tau4, out;
int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
@@ -505,23 +507,24 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
mutex_lock(&hwmon->hwmon_lock);
if (hwmon->xe->info.has_mbx_power_limits) {
- ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
+ ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &reg_val);
if (ret) {
drm_err(&hwmon->xe->drm,
- "power interval read fail, ch %d, attr %d, r 0%llx, ret %d\n",
- channel, power_attr, r, ret);
- r = 0;
+ "power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n",
+ channel, power_attr, reg_val, ret);
+ reg_val = 0;
}
} else {
- r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
+ channel));
}
mutex_unlock(&hwmon->hwmon_lock);
xe_pm_runtime_put(hwmon->xe);
- x = REG_FIELD_GET(PWR_LIM_TIME_X, r);
- y = REG_FIELD_GET(PWR_LIM_TIME_Y, r);
+ x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val);
+ y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val);
/*
* tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
@@ -655,8 +658,6 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_reg rapl_limit;
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
- xe_pm_runtime_get(hwmon->xe);
-
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval);
} else if (power_attr != PL2_HWMON_ATTR) {
@@ -666,8 +667,6 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
}
ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
- xe_pm_runtime_put(hwmon->xe);
-
return ret;
}
@@ -734,7 +733,7 @@ static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
long *value, u32 scale_factor)
{
int ret;
- u32 uval;
+ u32 uval = 0;
mutex_lock(&hwmon->hwmon_lock);
@@ -918,7 +917,7 @@ xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
static umode_t
xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
/* hwmon sysfs attribute of current available only for package */
if (channel != CHANNEL_PKG)
@@ -1020,7 +1019,7 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
if (!hwmon->xe->info.has_fan_control)
return 0;
@@ -1093,8 +1092,6 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
- xe_pm_runtime_get(hwmon->xe);
-
switch (type) {
case hwmon_temp:
ret = xe_hwmon_temp_is_visible(hwmon, attr, channel);
@@ -1119,8 +1116,6 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
break;
}
- xe_pm_runtime_put(hwmon->xe);
-
return ret;
}
@@ -1294,13 +1289,6 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
xe_hwmon_fan_input_read(hwmon, channel, &fan_speed);
}
-static void xe_hwmon_mutex_destroy(void *arg)
-{
- struct xe_hwmon *hwmon = arg;
-
- mutex_destroy(&hwmon->hwmon_lock);
-}
-
int xe_hwmon_register(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
@@ -1319,8 +1307,7 @@ int xe_hwmon_register(struct xe_device *xe)
if (!hwmon)
return -ENOMEM;
- mutex_init(&hwmon->hwmon_lock);
- ret = devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon);
+ ret = devm_mutex_init(dev, &hwmon->hwmon_lock);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c
index bc7dc2099470..0b5452be0c87 100644
--- a/drivers/gpu/drm/xe/xe_i2c.c
+++ b/drivers/gpu/drm/xe/xe_i2c.c
@@ -147,6 +147,25 @@ static void xe_i2c_unregister_adapter(struct xe_i2c *i2c)
}
/**
+ * xe_i2c_present - I2C controller is present and functional
+ * @xe: xe device instance
+ *
+ * Check whether the I2C controller is present and functioning with valid
+ * endpoint cookie.
+ *
+ * Return: %true if present, %false otherwise.
+ */
+bool xe_i2c_present(struct xe_device *xe)
+{
+ return xe->i2c && xe->i2c->ep.cookie == XE_I2C_EP_COOKIE_DEVICE;
+}
+
+static bool xe_i2c_irq_present(struct xe_device *xe)
+{
+ return xe->i2c && xe->i2c->adapter_irq;
+}
+
+/**
* xe_i2c_irq_handler: Handler for I2C interrupts
* @xe: xe device instance
* @master_ctl: interrupt register
@@ -156,13 +175,33 @@ static void xe_i2c_unregister_adapter(struct xe_i2c *i2c)
*/
void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl)
{
- if (!xe->i2c || !xe->i2c->adapter_irq)
+ if (!xe_i2c_irq_present(xe))
return;
if (master_ctl & I2C_IRQ)
generic_handle_irq_safe(xe->i2c->adapter_irq);
}
+void xe_i2c_irq_reset(struct xe_device *xe)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe_i2c_irq_present(xe))
+ return;
+
+ xe_mmio_rmw32(mmio, I2C_BRIDGE_PCICFGCTL, ACPI_INTR_EN, 0);
+}
+
+void xe_i2c_irq_postinstall(struct xe_device *xe)
+{
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
+
+ if (!xe_i2c_irq_present(xe))
+ return;
+
+ xe_mmio_rmw32(mmio, I2C_BRIDGE_PCICFGCTL, 0, ACPI_INTR_EN);
+}
+
static int xe_i2c_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw_irq_num)
{
@@ -230,7 +269,7 @@ void xe_i2c_pm_suspend(struct xe_device *xe)
{
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ if (!xe_i2c_present(xe))
return;
xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D3hot);
@@ -241,11 +280,11 @@ void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold)
{
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE)
+ if (!xe_i2c_present(xe))
return;
if (d3cold)
- xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY);
+ xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0);
drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR));
@@ -320,6 +359,7 @@ int xe_i2c_probe(struct xe_device *xe)
if (ret)
goto err_remove_irq;
+ xe_i2c_irq_postinstall(xe);
return devm_add_action_or_reset(drm_dev, xe_i2c_remove, i2c);
err_remove_irq:
diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h
index b767ed8ce52b..425d8160835f 100644
--- a/drivers/gpu/drm/xe/xe_i2c.h
+++ b/drivers/gpu/drm/xe/xe_i2c.h
@@ -49,12 +49,18 @@ struct xe_i2c {
#if IS_ENABLED(CONFIG_I2C)
int xe_i2c_probe(struct xe_device *xe);
+bool xe_i2c_present(struct xe_device *xe);
void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl);
+void xe_i2c_irq_postinstall(struct xe_device *xe);
+void xe_i2c_irq_reset(struct xe_device *xe);
void xe_i2c_pm_suspend(struct xe_device *xe);
void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold);
#else
static inline int xe_i2c_probe(struct xe_device *xe) { return 0; }
+static inline bool xe_i2c_present(struct xe_device *xe) { return false; }
static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { }
+static inline void xe_i2c_irq_postinstall(struct xe_device *xe) { }
+static inline void xe_i2c_irq_reset(struct xe_device *xe) { }
static inline void xe_i2c_pm_suspend(struct xe_device *xe) { }
static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { }
#endif
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 5df5b8c2a3e4..024e13e606ec 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -18,6 +18,7 @@
#include "xe_gt.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
+#include "xe_hw_error.h"
#include "xe_i2c.h"
#include "xe_memirq.h"
#include "xe_mmio.h"
@@ -138,68 +139,112 @@ void xe_irq_enable_hwe(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
struct xe_mmio *mmio = &gt->mmio;
- u32 ccs_mask, bcs_mask;
- u32 irqs, dmask, smask;
- u32 gsc_mask = 0;
- u32 heci_mask = 0;
+ u32 common_mask, val, gsc_mask = 0, heci_mask = 0,
+ rcs_mask = 0, bcs_mask = 0, vcs_mask = 0, vecs_mask = 0,
+ ccs_mask = 0;
if (xe_device_uses_memirq(xe))
return;
if (xe_device_uc_enabled(xe)) {
- irqs = GT_RENDER_USER_INTERRUPT |
- GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
+ common_mask = GT_MI_USER_INTERRUPT |
+ GT_FLUSH_COMPLETE_INTERRUPT;
+
+ /* Enable Compute Walker Interrupt for non-MSIX platforms */
+ if (GRAPHICS_VERx100(xe) >= 3511 && !xe_device_has_msix(xe)) {
+ rcs_mask |= GT_COMPUTE_WALKER_INTERRUPT;
+ ccs_mask |= GT_COMPUTE_WALKER_INTERRUPT;
+ }
} else {
- irqs = GT_RENDER_USER_INTERRUPT |
- GT_CS_MASTER_ERROR_INTERRUPT |
- GT_CONTEXT_SWITCH_INTERRUPT |
- GT_WAIT_SEMAPHORE_INTERRUPT;
+ common_mask = GT_MI_USER_INTERRUPT |
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT |
+ GT_WAIT_SEMAPHORE_INTERRUPT;
}
- ccs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
- bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
-
- dmask = irqs << 16 | irqs;
- smask = irqs << 16;
+ rcs_mask |= common_mask;
+ bcs_mask |= common_mask;
+ vcs_mask |= common_mask;
+ vecs_mask |= common_mask;
+ ccs_mask |= common_mask;
if (xe_gt_is_main_type(gt)) {
+ /*
+ * For enabling the interrupts, the information about fused off
+ * engines doesn't matter much, but this also allows to check if
+ * the engine is available architecturally in the platform
+ */
+ u32 ccs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COMPUTE);
+ u32 bcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
+
/* Enable interrupts for each engine class */
- xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask);
- if (ccs_mask)
- xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE, smask);
+ xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE,
+ REG_FIELD_PREP(ENGINE1_MASK, rcs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, bcs_mask));
+ if (ccs_fuse_mask)
+ xe_mmio_write32(mmio, CCS_RSVD_INTR_ENABLE,
+ REG_FIELD_PREP(ENGINE1_MASK, ccs_mask));
/* Unmask interrupts for each engine instance */
- xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, ~smask);
- xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, ~smask);
- if (bcs_mask & (BIT(1)|BIT(2)))
- xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
- if (bcs_mask & (BIT(3)|BIT(4)))
- xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
- if (bcs_mask & (BIT(5)|BIT(6)))
- xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
- if (bcs_mask & (BIT(7)|BIT(8)))
- xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
- if (ccs_mask & (BIT(0)|BIT(1)))
- xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~dmask);
- if (ccs_mask & (BIT(2)|BIT(3)))
- xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~dmask);
+ val = ~REG_FIELD_PREP(ENGINE1_MASK, rcs_mask);
+ xe_mmio_write32(mmio, RCS0_RSVD_INTR_MASK, val);
+ val = ~REG_FIELD_PREP(ENGINE1_MASK, bcs_mask);
+ xe_mmio_write32(mmio, BCS_RSVD_INTR_MASK, val);
+
+ val = ~(REG_FIELD_PREP(ENGINE1_MASK, bcs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, bcs_mask));
+ if (bcs_fuse_mask & (BIT(1)|BIT(2)))
+ xe_mmio_write32(mmio, XEHPC_BCS1_BCS2_INTR_MASK, val);
+ if (bcs_fuse_mask & (BIT(3)|BIT(4)))
+ xe_mmio_write32(mmio, XEHPC_BCS3_BCS4_INTR_MASK, val);
+ if (bcs_fuse_mask & (BIT(5)|BIT(6)))
+ xe_mmio_write32(mmio, XEHPC_BCS5_BCS6_INTR_MASK, val);
+ if (bcs_fuse_mask & (BIT(7)|BIT(8)))
+ xe_mmio_write32(mmio, XEHPC_BCS7_BCS8_INTR_MASK, val);
+
+ val = ~(REG_FIELD_PREP(ENGINE1_MASK, ccs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, ccs_mask));
+ if (ccs_fuse_mask & (BIT(0)|BIT(1)))
+ xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, val);
+ if (ccs_fuse_mask & (BIT(2)|BIT(3)))
+ xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, val);
}
if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) {
+ u32 vcs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_DECODE);
+ u32 vecs_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_VIDEO_ENHANCE);
+ u32 other_fuse_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER);
+
/* Enable interrupts for each engine class */
- xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE, dmask);
+ xe_mmio_write32(mmio, VCS_VECS_INTR_ENABLE,
+ REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, vecs_mask));
/* Unmask interrupts for each engine instance */
- xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, ~dmask);
- xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, ~dmask);
- xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, ~dmask);
+ val = ~(REG_FIELD_PREP(ENGINE1_MASK, vcs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, vcs_mask));
+ if (vcs_fuse_mask & (BIT(0) | BIT(1)))
+ xe_mmio_write32(mmio, VCS0_VCS1_INTR_MASK, val);
+ if (vcs_fuse_mask & (BIT(2) | BIT(3)))
+ xe_mmio_write32(mmio, VCS2_VCS3_INTR_MASK, val);
+ if (vcs_fuse_mask & (BIT(4) | BIT(5)))
+ xe_mmio_write32(mmio, VCS4_VCS5_INTR_MASK, val);
+ if (vcs_fuse_mask & (BIT(6) | BIT(7)))
+ xe_mmio_write32(mmio, VCS6_VCS7_INTR_MASK, val);
+
+ val = ~(REG_FIELD_PREP(ENGINE1_MASK, vecs_mask) |
+ REG_FIELD_PREP(ENGINE0_MASK, vecs_mask));
+ if (vecs_fuse_mask & (BIT(0) | BIT(1)))
+ xe_mmio_write32(mmio, VECS0_VECS1_INTR_MASK, val);
+ if (vecs_fuse_mask & (BIT(2) | BIT(3)))
+ xe_mmio_write32(mmio, VECS2_VECS3_INTR_MASK, val);
/*
* the heci2 interrupt is enabled via the same register as the
* GSCCS interrupts, but it has its own mask register.
*/
- if (xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_OTHER)) {
- gsc_mask = irqs | GSC_ER_COMPLETE;
+ if (other_fuse_mask) {
+ gsc_mask = common_mask | GSC_ER_COMPLETE;
heci_mask = GSC_IRQ_INTF(1);
} else if (xe->info.has_heci_gscfi) {
gsc_mask = GSC_IRQ_INTF(1);
@@ -468,6 +513,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
xe_mmio_write32(mmio, GFX_MSTR_IRQ, master_ctl);
gt_irq_handler(tile, master_ctl, intr_dw, identity);
+ xe_hw_error_irq_handler(tile, master_ctl);
/*
* Display interrupts (including display backlight operations
@@ -492,11 +538,15 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
static void gt_irq_reset(struct xe_tile *tile)
{
struct xe_mmio *mmio = &tile->mmio;
-
- u32 ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
- XE_ENGINE_CLASS_COMPUTE);
- u32 bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
- XE_ENGINE_CLASS_COPY);
+ u32 ccs_mask = ~0;
+ u32 bcs_mask = ~0;
+
+ if (tile->primary_gt) {
+ ccs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
+ XE_ENGINE_CLASS_COMPUTE);
+ bcs_mask = xe_hw_engine_mask_per_class(tile->primary_gt,
+ XE_ENGINE_CLASS_COPY);
+ }
/* Disable RCS, BCS, VCS and VECS class engines. */
xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, 0);
@@ -614,6 +664,7 @@ static void xe_irq_reset(struct xe_device *xe)
tile = xe_device_get_root_tile(xe);
mask_and_disable(tile, GU_MISC_IRQ_OFFSET);
xe_display_irq_reset(xe);
+ xe_i2c_irq_reset(xe);
/*
* The tile's top-level status register should be the last one
@@ -654,7 +705,8 @@ static void xe_irq_postinstall(struct xe_device *xe)
xe_memirq_postinstall(&tile->memirq);
}
- xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe));
+ xe_display_irq_postinstall(xe);
+ xe_i2c_irq_postinstall(xe);
/*
* ASLE backlight operations are reported via GUnit GSE interrupts
@@ -756,6 +808,8 @@ int xe_irq_install(struct xe_device *xe)
int nvec = 1;
int err;
+ xe_hw_error_init(xe);
+
xe_irq_reset(xe);
if (xe_device_has_msix(xe)) {
@@ -843,22 +897,6 @@ static int xe_irq_msix_init(struct xe_device *xe)
return 0;
}
-static irqreturn_t guc2host_irq_handler(int irq, void *arg)
-{
- struct xe_device *xe = arg;
- struct xe_tile *tile;
- u8 id;
-
- if (!atomic_read(&xe->irq.enabled))
- return IRQ_NONE;
-
- for_each_tile(tile, xe, id)
- xe_guc_irq_handler(&tile->primary_gt->uc.guc,
- GUC_INTR_GUC2HOST);
-
- return IRQ_HANDLED;
-}
-
static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
{
unsigned int tile_id, gt_id;
@@ -975,7 +1013,7 @@ int xe_irq_msix_request_irqs(struct xe_device *xe)
u16 msix;
msix = GUC2HOST_MSIX;
- err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
+ err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe,
DRIVER_NAME "-guc2host", false, &msix);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.c b/drivers/gpu/drm/xe/xe_late_bind_fw.c
new file mode 100644
index 000000000000..768442ca7da6
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw.c
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/component.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+
+#include <drm/drm_managed.h>
+#include <drm/intel/i915_component.h>
+#include <drm/intel/intel_lb_mei_interface.h>
+#include <drm/drm_print.h>
+
+#include "xe_device.h"
+#include "xe_late_bind_fw.h"
+#include "xe_pcode.h"
+#include "xe_pcode_api.h"
+#include "xe_pm.h"
+
+/*
+ * The component should load quite quickly in most cases, but it could take
+ * a bit. Using a very big timeout just to cover the worst case scenario
+ */
+#define LB_INIT_TIMEOUT_MS 20000
+
+/*
+ * Retry interval set to 6 seconds, in steps of 200 ms, to allow time for
+ * other OS components to release the MEI CL handle
+ */
+#define LB_FW_LOAD_RETRY_MAXCOUNT 30
+#define LB_FW_LOAD_RETRY_PAUSE_MS 200
+
+static const u32 fw_id_to_type[] = {
+ [XE_LB_FW_FAN_CONTROL] = INTEL_LB_TYPE_FAN_CONTROL,
+ };
+
+static const char * const fw_id_to_name[] = {
+ [XE_LB_FW_FAN_CONTROL] = "fan_control",
+ };
+
+static struct xe_device *
+late_bind_to_xe(struct xe_late_bind *late_bind)
+{
+ return container_of(late_bind, struct xe_device, late_bind);
+}
+
+static struct xe_device *
+late_bind_fw_to_xe(struct xe_late_bind_fw *lb_fw)
+{
+ return container_of(lb_fw, struct xe_device, late_bind.late_bind_fw[lb_fw->id]);
+}
+
+/* Refer to the "Late Bind based Firmware Layout" documentation entry for details */
+static int parse_cpd_header(struct xe_late_bind_fw *lb_fw,
+ const void *data, size_t size, const char *manifest_entry)
+{
+ struct xe_device *xe = late_bind_fw_to_xe(lb_fw);
+ const struct gsc_cpd_header_v2 *header = data;
+ const struct gsc_manifest_header *manifest;
+ const struct gsc_cpd_entry *entry;
+ size_t min_size = sizeof(*header);
+ u32 offset = 0;
+ int i;
+
+ /* manifest_entry is mandatory */
+ xe_assert(xe, manifest_entry);
+
+ if (size < min_size || header->header_marker != GSC_CPD_HEADER_MARKER)
+ return -ENOENT;
+
+ if (header->header_length < sizeof(struct gsc_cpd_header_v2)) {
+ drm_err(&xe->drm, "%s late binding fw: Invalid CPD header length %u!\n",
+ fw_id_to_name[lb_fw->id], header->header_length);
+ return -EINVAL;
+ }
+
+ min_size = header->header_length + sizeof(struct gsc_cpd_entry) * header->num_of_entries;
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ /* Look for the manifest first */
+ entry = (void *)header + header->header_length;
+ for (i = 0; i < header->num_of_entries; i++, entry++)
+ if (strcmp(entry->name, manifest_entry) == 0)
+ offset = entry->offset & GSC_CPD_ENTRY_OFFSET_MASK;
+
+ if (!offset) {
+ drm_err(&xe->drm, "%s late binding fw: Failed to find manifest_entry\n",
+ fw_id_to_name[lb_fw->id]);
+ return -ENODATA;
+ }
+
+ min_size = offset + sizeof(struct gsc_manifest_header);
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ manifest = data + offset;
+
+ lb_fw->version = manifest->fw_version;
+
+ return 0;
+}
+
+/* Refer to the "Late Bind based Firmware Layout" documentation entry for details */
+static int parse_lb_layout(struct xe_late_bind_fw *lb_fw,
+ const void *data, size_t size, const char *fpt_entry)
+{
+ struct xe_device *xe = late_bind_fw_to_xe(lb_fw);
+ const struct csc_fpt_header *header = data;
+ const struct csc_fpt_entry *entry;
+ size_t min_size = sizeof(*header);
+ u32 offset = 0;
+ int i;
+
+ /* fpt_entry is mandatory */
+ xe_assert(xe, fpt_entry);
+
+ if (size < min_size || header->header_marker != CSC_FPT_HEADER_MARKER)
+ return -ENOENT;
+
+ if (header->header_length < sizeof(struct csc_fpt_header)) {
+ drm_err(&xe->drm, "%s late binding fw: Invalid FPT header length %u!\n",
+ fw_id_to_name[lb_fw->id], header->header_length);
+ return -EINVAL;
+ }
+
+ min_size = header->header_length + sizeof(struct csc_fpt_entry) * header->num_of_entries;
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ /* Look for the cpd header first */
+ entry = (void *)header + header->header_length;
+ for (i = 0; i < header->num_of_entries; i++, entry++)
+ if (strcmp(entry->name, fpt_entry) == 0)
+ offset = entry->offset;
+
+ if (!offset) {
+ drm_err(&xe->drm, "%s late binding fw: Failed to find fpt_entry\n",
+ fw_id_to_name[lb_fw->id]);
+ return -ENODATA;
+ }
+
+ min_size = offset + sizeof(struct gsc_cpd_header_v2);
+ if (size < min_size) {
+ drm_err(&xe->drm, "%s late binding fw: too small! %zu < %zu\n",
+ fw_id_to_name[lb_fw->id], size, min_size);
+ return -ENODATA;
+ }
+
+ return parse_cpd_header(lb_fw, data + offset, size - offset, "LTES.man");
+}
+
+static const char *xe_late_bind_parse_status(uint32_t status)
+{
+ switch (status) {
+ case INTEL_LB_STATUS_SUCCESS:
+ return "success";
+ case INTEL_LB_STATUS_4ID_MISMATCH:
+ return "4Id Mismatch";
+ case INTEL_LB_STATUS_ARB_FAILURE:
+ return "ARB Failure";
+ case INTEL_LB_STATUS_GENERAL_ERROR:
+ return "General Error";
+ case INTEL_LB_STATUS_INVALID_PARAMS:
+ return "Invalid Params";
+ case INTEL_LB_STATUS_INVALID_SIGNATURE:
+ return "Invalid Signature";
+ case INTEL_LB_STATUS_INVALID_PAYLOAD:
+ return "Invalid Payload";
+ case INTEL_LB_STATUS_TIMEOUT:
+ return "Timeout";
+ default:
+ return "Unknown error";
+ }
+}
+
+static int xe_late_bind_fw_num_fans(struct xe_late_bind *late_bind, u32 *num_fans)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_tile *root_tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_read(root_tile,
+ PCODE_MBOX(FAN_SPEED_CONTROL, FSC_READ_NUM_FANS, 0), num_fans, NULL);
+}
+
+void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_late_bind_fw *lbfw;
+ int fw_id;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ lbfw = &late_bind->late_bind_fw[fw_id];
+ if (lbfw->payload && late_bind->wq) {
+ drm_dbg(&xe->drm, "Flush work: load %s firmware\n",
+ fw_id_to_name[lbfw->id]);
+ flush_work(&lbfw->work);
+ }
+ }
+}
+
+static void xe_late_bind_work(struct work_struct *work)
+{
+ struct xe_late_bind_fw *lbfw = container_of(work, struct xe_late_bind_fw, work);
+ struct xe_late_bind *late_bind = container_of(lbfw, struct xe_late_bind,
+ late_bind_fw[lbfw->id]);
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ int retry = LB_FW_LOAD_RETRY_MAXCOUNT;
+ int ret;
+ int slept;
+
+ xe_device_assert_mem_access(xe);
+
+ /* we can queue this before the component is bound */
+ for (slept = 0; slept < LB_INIT_TIMEOUT_MS; slept += 100) {
+ if (late_bind->component.ops)
+ break;
+ msleep(100);
+ }
+
+ if (!late_bind->component.ops) {
+ drm_err(&xe->drm, "Late bind component not bound\n");
+ /* Do not re-attempt fw load */
+ drmm_kfree(&xe->drm, (void *)lbfw->payload);
+ lbfw->payload = NULL;
+ goto out;
+ }
+
+ drm_dbg(&xe->drm, "Load %s firmware\n", fw_id_to_name[lbfw->id]);
+
+ do {
+ ret = late_bind->component.ops->push_payload(late_bind->component.mei_dev,
+ lbfw->type,
+ lbfw->flags,
+ lbfw->payload,
+ lbfw->payload_size);
+ if (!ret)
+ break;
+ msleep(LB_FW_LOAD_RETRY_PAUSE_MS);
+ } while (--retry && ret == -EBUSY);
+
+ if (!ret) {
+ drm_dbg(&xe->drm, "Load %s firmware successful\n",
+ fw_id_to_name[lbfw->id]);
+ goto out;
+ }
+
+ if (ret > 0)
+ drm_err(&xe->drm, "Load %s firmware failed with err %d, %s\n",
+ fw_id_to_name[lbfw->id], ret, xe_late_bind_parse_status(ret));
+ else
+ drm_err(&xe->drm, "Load %s firmware failed with err %d",
+ fw_id_to_name[lbfw->id], ret);
+ /* Do not re-attempt fw load */
+ drmm_kfree(&xe->drm, (void *)lbfw->payload);
+ lbfw->payload = NULL;
+
+out:
+ xe_pm_runtime_put(xe);
+}
+
+int xe_late_bind_fw_load(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct xe_late_bind_fw *lbfw;
+ int fw_id;
+
+ if (!late_bind->component_added)
+ return -ENODEV;
+
+ if (late_bind->disable)
+ return 0;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ lbfw = &late_bind->late_bind_fw[fw_id];
+ if (lbfw->payload) {
+ xe_pm_runtime_get_noresume(xe);
+ queue_work(late_bind->wq, &lbfw->work);
+ }
+ }
+ return 0;
+}
+
+static int __xe_late_bind_fw_init(struct xe_late_bind *late_bind, u32 fw_id)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ struct xe_late_bind_fw *lb_fw;
+ const struct firmware *fw;
+ u32 num_fans;
+ int ret;
+
+ if (fw_id >= XE_LB_FW_MAX_ID)
+ return -EINVAL;
+
+ lb_fw = &late_bind->late_bind_fw[fw_id];
+
+ lb_fw->id = fw_id;
+ lb_fw->type = fw_id_to_type[lb_fw->id];
+ lb_fw->flags &= ~INTEL_LB_FLAG_IS_PERSISTENT;
+
+ if (lb_fw->type == INTEL_LB_TYPE_FAN_CONTROL) {
+ ret = xe_late_bind_fw_num_fans(late_bind, &num_fans);
+ if (ret) {
+ drm_dbg(&xe->drm, "Failed to read number of fans: %d\n", ret);
+ return 0; /* Not a fatal error, continue without fan control */
+ }
+ drm_dbg(&xe->drm, "Number of Fans: %d\n", num_fans);
+ if (!num_fans)
+ return 0;
+ }
+
+ snprintf(lb_fw->blob_path, sizeof(lb_fw->blob_path), "xe/%s_8086_%04x_%04x_%04x.bin",
+ fw_id_to_name[lb_fw->id], pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ drm_dbg(&xe->drm, "Request late binding firmware %s\n", lb_fw->blob_path);
+ ret = firmware_request_nowarn(&fw, lb_fw->blob_path, xe->drm.dev);
+ if (ret) {
+ drm_dbg(&xe->drm, "%s late binding fw not available for current device",
+ fw_id_to_name[lb_fw->id]);
+ return 0;
+ }
+
+ if (fw->size > XE_LB_MAX_PAYLOAD_SIZE) {
+ drm_err(&xe->drm, "Firmware %s size %zu is larger than max pay load size %u\n",
+ lb_fw->blob_path, fw->size, XE_LB_MAX_PAYLOAD_SIZE);
+ release_firmware(fw);
+ return -ENODATA;
+ }
+
+ ret = parse_lb_layout(lb_fw, fw->data, fw->size, "LTES");
+ if (ret)
+ return ret;
+
+ lb_fw->payload_size = fw->size;
+ lb_fw->payload = drmm_kzalloc(&xe->drm, lb_fw->payload_size, GFP_KERNEL);
+ if (!lb_fw->payload) {
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ drm_info(&xe->drm, "Using %s firmware from %s version %u.%u.%u.%u\n",
+ fw_id_to_name[lb_fw->id], lb_fw->blob_path,
+ lb_fw->version.major, lb_fw->version.minor,
+ lb_fw->version.hotfix, lb_fw->version.build);
+
+ memcpy((void *)lb_fw->payload, fw->data, lb_fw->payload_size);
+ release_firmware(fw);
+ INIT_WORK(&lb_fw->work, xe_late_bind_work);
+
+ return 0;
+}
+
+static int xe_late_bind_fw_init(struct xe_late_bind *late_bind)
+{
+ int ret;
+ int fw_id;
+
+ late_bind->wq = alloc_ordered_workqueue("late-bind-ordered-wq", 0);
+ if (!late_bind->wq)
+ return -ENOMEM;
+
+ for (fw_id = 0; fw_id < XE_LB_FW_MAX_ID; fw_id++) {
+ ret = __xe_late_bind_fw_init(late_bind, fw_id);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xe_late_bind_component_bind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
+ struct xe_late_bind *late_bind = &xe->late_bind;
+
+ late_bind->component.ops = data;
+ late_bind->component.mei_dev = mei_kdev;
+
+ return 0;
+}
+
+static void xe_late_bind_component_unbind(struct device *xe_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct xe_device *xe = kdev_to_xe_device(xe_kdev);
+ struct xe_late_bind *late_bind = &xe->late_bind;
+
+ xe_late_bind_wait_for_worker_completion(late_bind);
+
+ late_bind->component.ops = NULL;
+}
+
+static const struct component_ops xe_late_bind_component_ops = {
+ .bind = xe_late_bind_component_bind,
+ .unbind = xe_late_bind_component_unbind,
+};
+
+static void xe_late_bind_remove(void *arg)
+{
+ struct xe_late_bind *late_bind = arg;
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+
+ xe_late_bind_wait_for_worker_completion(late_bind);
+
+ late_bind->component_added = false;
+
+ component_del(xe->drm.dev, &xe_late_bind_component_ops);
+ if (late_bind->wq) {
+ destroy_workqueue(late_bind->wq);
+ late_bind->wq = NULL;
+ }
+}
+
+/**
+ * xe_late_bind_init() - add xe mei late binding component
+ * @late_bind: pointer to late bind structure.
+ *
+ * Return: 0 if the initialization was successful, a negative errno otherwise.
+ */
+int xe_late_bind_init(struct xe_late_bind *late_bind)
+{
+ struct xe_device *xe = late_bind_to_xe(late_bind);
+ int err;
+
+ if (!xe->info.has_late_bind)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_LB) || !IS_ENABLED(CONFIG_INTEL_MEI_GSC)) {
+ drm_info(&xe->drm, "Can't init xe mei late bind missing mei component\n");
+ return 0;
+ }
+
+ err = component_add_typed(xe->drm.dev, &xe_late_bind_component_ops,
+ INTEL_COMPONENT_LB);
+ if (err < 0) {
+ drm_err(&xe->drm, "Failed to add mei late bind component (%pe)\n", ERR_PTR(err));
+ return err;
+ }
+
+ late_bind->component_added = true;
+
+ err = devm_add_action_or_reset(xe->drm.dev, xe_late_bind_remove, late_bind);
+ if (err)
+ return err;
+
+ err = xe_late_bind_fw_init(late_bind);
+ if (err)
+ return err;
+
+ return xe_late_bind_fw_load(late_bind);
+}
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw.h b/drivers/gpu/drm/xe/xe_late_bind_fw.h
new file mode 100644
index 000000000000..07e437390539
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_LATE_BIND_FW_H_
+#define _XE_LATE_BIND_FW_H_
+
+#include <linux/types.h>
+
+struct xe_late_bind;
+
+int xe_late_bind_init(struct xe_late_bind *late_bind);
+int xe_late_bind_fw_load(struct xe_late_bind *late_bind);
+void xe_late_bind_wait_for_worker_completion(struct xe_late_bind *late_bind);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_late_bind_fw_types.h b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
new file mode 100644
index 000000000000..0f5da89ce98b
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_late_bind_fw_types.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_LATE_BIND_TYPES_H_
+#define _XE_LATE_BIND_TYPES_H_
+
+#include <linux/iosys-map.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "xe_uc_fw_abi.h"
+
+#define XE_LB_MAX_PAYLOAD_SIZE SZ_4K
+
+/**
+ * xe_late_bind_fw_id - enum to determine late binding fw index
+ */
+enum xe_late_bind_fw_id {
+ XE_LB_FW_FAN_CONTROL = 0,
+ XE_LB_FW_MAX_ID
+};
+
+/**
+ * struct xe_late_bind_fw
+ */
+struct xe_late_bind_fw {
+ /** @id: firmware index */
+ u32 id;
+ /** @blob_path: firmware binary path */
+ char blob_path[PATH_MAX];
+ /** @type: firmware type */
+ u32 type;
+ /** @flags: firmware flags */
+ u32 flags;
+ /** @payload: to store the late binding blob */
+ const u8 *payload;
+ /** @payload_size: late binding blob payload_size */
+ size_t payload_size;
+ /** @work: worker to upload latebind blob */
+ struct work_struct work;
+ /** @version: late binding blob manifest version */
+ struct gsc_version version;
+};
+
+/**
+ * struct xe_late_bind_component - Late Binding services component
+ * @mei_dev: device that provide Late Binding service.
+ * @ops: Ops implemented by Late Binding driver, used by Xe driver.
+ *
+ * Communication between Xe and MEI drivers for Late Binding services
+ */
+struct xe_late_bind_component {
+ struct device *mei_dev;
+ const struct intel_lb_component_ops *ops;
+};
+
+/**
+ * struct xe_late_bind
+ */
+struct xe_late_bind {
+ /** @component: struct for communication with mei component */
+ struct xe_late_bind_component component;
+ /** @late_bind_fw: late binding firmware array */
+ struct xe_late_bind_fw late_bind_fw[XE_LB_FW_MAX_ID];
+ /** @wq: workqueue to submit request to download late bind blob */
+ struct workqueue_struct *wq;
+ /** @component_added: whether the component has been added */
+ bool component_added;
+ /** @disable: to block late binding reload during pm resume flow*/
+ bool disable;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c
index a2000307d5bf..4dc1de482eee 100644
--- a/drivers/gpu/drm/xe/xe_lmtt.c
+++ b/drivers/gpu/drm/xe/xe_lmtt.c
@@ -11,13 +11,13 @@
#include "xe_assert.h"
#include "xe_bo.h"
-#include "xe_gt_tlb_invalidation.h"
+#include "xe_tlb_inval.h"
#include "xe_lmtt.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_res_cursor.h"
#include "xe_sriov.h"
-#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_printk.h"
/**
* DOC: Local Memory Translation Table
@@ -32,7 +32,7 @@
*/
#define lmtt_assert(lmtt, condition) xe_tile_assert(lmtt_to_tile(lmtt), condition)
-#define lmtt_debug(lmtt, msg...) xe_sriov_dbg_verbose(lmtt_to_xe(lmtt), "LMTT: " msg)
+#define lmtt_debug(lmtt, msg...) xe_tile_sriov_dbg_verbose(lmtt_to_tile(lmtt), "LMTT: " msg)
static bool xe_has_multi_level_lmtt(struct xe_device *xe)
{
@@ -67,12 +67,12 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level
goto out;
}
- bo = xe_bo_create_pin_map(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt), NULL,
- PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
- lmtt->ops->lmtt_pte_num(level)),
- ttm_bo_type_kernel,
- XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
- XE_BO_FLAG_NEEDS_64K);
+ bo = xe_bo_create_pin_map_novm(lmtt_to_xe(lmtt), lmtt_to_tile(lmtt),
+ PAGE_ALIGN(lmtt->ops->lmtt_pte_size(level) *
+ lmtt->ops->lmtt_pte_num(level)),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(lmtt_to_tile(lmtt)) |
+ XE_BO_FLAG_NEEDS_64K, false);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_free_pt;
@@ -195,14 +195,17 @@ static void lmtt_setup_dir_ptr(struct xe_lmtt *lmtt)
struct xe_tile *tile = lmtt_to_tile(lmtt);
struct xe_device *xe = tile_to_xe(tile);
dma_addr_t offset = xe_bo_main_addr(lmtt->pd->bo, XE_PAGE_SIZE);
+ struct xe_gt *gt;
+ u8 id;
lmtt_debug(lmtt, "DIR offset %pad\n", &offset);
lmtt_assert(lmtt, xe_bo_is_vram(lmtt->pd->bo));
lmtt_assert(lmtt, IS_ALIGNED(offset, SZ_64K));
- xe_mmio_write32(&tile->mmio,
- GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
- LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
+ for_each_gt_on_tile(gt, tile, id)
+ xe_mmio_write32(&gt->mmio,
+ GRAPHICS_VER(xe) >= 20 ? XE2_LMEM_CFG : LMEM_CFG,
+ LMEM_EN | REG_FIELD_PREP(LMTT_DIR_PTR, offset / SZ_64K));
}
/**
@@ -225,8 +228,8 @@ void xe_lmtt_init_hw(struct xe_lmtt *lmtt)
static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
{
- struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE];
- struct xe_gt_tlb_invalidation_fence *fence = fences;
+ struct xe_tlb_inval_fence fences[XE_MAX_GT_PER_TILE];
+ struct xe_tlb_inval_fence *fence = fences;
struct xe_tile *tile = lmtt_to_tile(lmtt);
struct xe_gt *gt;
int result = 0;
@@ -234,8 +237,8 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
u8 id;
for_each_gt_on_tile(gt, tile, id) {
- xe_gt_tlb_invalidation_fence_init(gt, fence, true);
- err = xe_gt_tlb_invalidation_all(gt, fence);
+ xe_tlb_inval_fence_init(&gt->tlb_inval, fence, true);
+ err = xe_tlb_inval_all(&gt->tlb_inval, fence);
result = result ?: err;
fence++;
}
@@ -249,7 +252,7 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
*/
fence = fences;
for_each_gt_on_tile(gt, tile, id)
- xe_gt_tlb_invalidation_fence_wait(fence++);
+ xe_tlb_inval_fence_wait(fence++);
return result;
}
@@ -264,15 +267,14 @@ static int lmtt_invalidate_hw(struct xe_lmtt *lmtt)
*/
void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt)
{
- struct xe_device *xe = lmtt_to_xe(lmtt);
int err;
- lmtt_assert(lmtt, IS_SRIOV_PF(xe));
+ lmtt_assert(lmtt, IS_SRIOV_PF(lmtt_to_xe(lmtt)));
err = lmtt_invalidate_hw(lmtt);
if (err)
- xe_sriov_warn(xe, "LMTT%u invalidation failed (%pe)",
- lmtt_to_tile(lmtt)->id, ERR_PTR(err));
+ xe_tile_sriov_err(lmtt_to_tile(lmtt), "LMTT invalidation failed (%pe)",
+ ERR_PTR(err));
}
static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt,
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 6d38411bdeba..b5083c99dd50 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -8,6 +8,7 @@
#include <generated/xe_wa_oob.h>
#include <linux/ascii85.h>
+#include <linux/panic.h>
#include "instructions/xe_mi_commands.h"
#include "instructions/xe_gfxpipe_commands.h"
@@ -16,6 +17,7 @@
#include "regs/xe_lrc_layout.h"
#include "xe_bb.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue_types.h"
@@ -41,7 +43,6 @@
#define LRC_PPHWSP_SIZE SZ_4K
#define LRC_INDIRECT_CTX_BO_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
-#define LRC_WA_BB_SIZE SZ_4K
/*
* Layout of the LRC and associated data allocated as
@@ -76,6 +77,17 @@ lrc_to_xe(struct xe_lrc *lrc)
static bool
gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (XE_GT_WA(gt, 16010904313) &&
+ (class == XE_ENGINE_CLASS_RENDER ||
+ class == XE_ENGINE_CLASS_COMPUTE))
+ return true;
+
+ if (xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev),
+ class, NULL))
+ return true;
+
return false;
}
@@ -692,7 +704,13 @@ u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
return xe_lrc_pphwsp_offset(lrc) + LRC_PPHWSP_SIZE;
}
-static size_t lrc_reg_size(struct xe_device *xe)
+/**
+ * xe_lrc_reg_size() - Get size of the LRC registers area within queues
+ * @xe: the &xe_device struct instance
+ *
+ * Returns: Size of the LRC registers area for current platform
+ */
+size_t xe_lrc_reg_size(struct xe_device *xe)
{
if (GRAPHICS_VERx100(xe) >= 1250)
return 96 * sizeof(u32);
@@ -702,7 +720,7 @@ static size_t lrc_reg_size(struct xe_device *xe)
size_t xe_lrc_skip_size(struct xe_device *xe)
{
- return LRC_PPHWSP_SIZE + lrc_reg_size(xe);
+ return LRC_PPHWSP_SIZE + xe_lrc_reg_size(xe);
}
static inline u32 __xe_lrc_seqno_offset(struct xe_lrc *lrc)
@@ -943,6 +961,47 @@ static void *empty_lrc_data(struct xe_hw_engine *hwe)
return data;
}
+/**
+ * xe_default_lrc_update_memirq_regs_with_address - Re-compute GGTT references in default LRC
+ * of given engine.
+ * @hwe: the &xe_hw_engine struct instance
+ */
+void xe_default_lrc_update_memirq_regs_with_address(struct xe_hw_engine *hwe)
+{
+ struct xe_gt *gt = hwe->gt;
+ u32 *regs;
+
+ if (!gt->default_lrc[hwe->class])
+ return;
+
+ regs = gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE;
+ set_memory_based_intr(regs, hwe);
+}
+
+/**
+ * xe_lrc_update_memirq_regs_with_address - Re-compute GGTT references in mem interrupt data
+ * for given LRC.
+ * @lrc: the &xe_lrc struct instance
+ * @hwe: the &xe_hw_engine struct instance
+ * @regs: scratch buffer to be used as temporary storage
+ */
+void xe_lrc_update_memirq_regs_with_address(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *regs)
+{
+ struct xe_gt *gt = hwe->gt;
+ struct iosys_map map;
+ size_t regs_len;
+
+ if (!xe_device_uses_memirq(gt_to_xe(gt)))
+ return;
+
+ map = __xe_lrc_regs_map(lrc);
+ regs_len = xe_lrc_reg_size(gt_to_xe(gt));
+ xe_map_memcpy_from(gt_to_xe(gt), regs, &map, 0, regs_len);
+ set_memory_based_intr(regs, hwe);
+ xe_map_memcpy_to(gt_to_xe(gt), &map, 0, regs, regs_len);
+}
+
static void xe_lrc_set_ppgtt(struct xe_lrc *lrc, struct xe_vm *vm)
{
u64 desc = xe_vm_pdp4_descriptor(vm, gt_to_tile(lrc->gt));
@@ -1014,6 +1073,121 @@ static ssize_t setup_utilization_wa(struct xe_lrc *lrc,
return cmd - batch;
}
+static ssize_t setup_timestamp_wa(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ const u32 ts_addr = __xe_lrc_ctx_timestamp_ggtt_addr(lrc);
+ u32 *cmd = batch;
+
+ if (!XE_GT_WA(lrc->gt, 16010904313) ||
+ !(hwe->class == XE_ENGINE_CLASS_RENDER ||
+ hwe->class == XE_ENGINE_CLASS_COMPUTE ||
+ hwe->class == XE_ENGINE_CLASS_COPY ||
+ hwe->class == XE_ENGINE_CLASS_VIDEO_DECODE ||
+ hwe->class == XE_ENGINE_CLASS_VIDEO_ENHANCE))
+ return 0;
+
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 12))
+ return -ENOSPC;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO |
+ MI_LRM_ASYNC;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO |
+ MI_LRM_ASYNC;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ *cmd++ = MI_LOAD_REGISTER_MEM | MI_LRM_USE_GGTT | MI_LRI_LRM_CS_MMIO;
+ *cmd++ = RING_CTX_TIMESTAMP(0).addr;
+ *cmd++ = ts_addr;
+ *cmd++ = 0;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_configfs_post_ctx_restore_bb(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ const u32 *user_batch;
+ u32 *cmd = batch;
+ u32 count;
+
+ count = xe_configfs_get_ctx_restore_post_bb(to_pci_dev(xe->drm.dev),
+ hwe->class, &user_batch);
+ if (!count)
+ return 0;
+
+ if (count > max_len)
+ return -ENOSPC;
+
+ /*
+ * This should be used only for tests and validation. Taint the kernel
+ * as anything could be submitted directly in context switches
+ */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ memcpy(cmd, user_batch, count * sizeof(u32));
+ cmd += count;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_configfs_mid_ctx_restore_bb(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ struct xe_device *xe = gt_to_xe(lrc->gt);
+ const u32 *user_batch;
+ u32 *cmd = batch;
+ u32 count;
+
+ count = xe_configfs_get_ctx_restore_mid_bb(to_pci_dev(xe->drm.dev),
+ hwe->class, &user_batch);
+ if (!count)
+ return 0;
+
+ if (count > max_len)
+ return -ENOSPC;
+
+ /*
+ * This should be used only for tests and validation. Taint the kernel
+ * as anything could be submitted directly in context switches
+ */
+ add_taint(TAINT_TEST, LOCKDEP_STILL_OK);
+
+ memcpy(cmd, user_batch, count * sizeof(u32));
+ cmd += count;
+
+ return cmd - batch;
+}
+
+static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc,
+ struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
+{
+ u32 *cmd = batch;
+
+ if (!XE_GT_WA(lrc->gt, 18022495364) ||
+ hwe->class != XE_ENGINE_CLASS_RENDER)
+ return 0;
+
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 3))
+ return -ENOSPC;
+
+ *cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
+ *cmd++ = CS_DEBUG_MODE1(0).addr;
+ *cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+
+ return cmd - batch;
+}
+
struct bo_setup {
ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
u32 *batch, size_t max_size);
@@ -1040,13 +1214,10 @@ static int setup_bo(struct bo_setup_state *state)
ssize_t remain;
if (state->lrc->bo->vmap.is_iomem) {
- state->buffer = kmalloc(state->max_size, GFP_KERNEL);
- if (!state->buffer)
- return -ENOMEM;
+ xe_gt_assert(state->hwe->gt, state->buffer);
state->ptr = state->buffer;
} else {
state->ptr = state->lrc->bo->vmap.vaddr + state->offset;
- state->buffer = NULL;
}
remain = state->max_size / sizeof(u32);
@@ -1071,30 +1242,39 @@ static int setup_bo(struct bo_setup_state *state)
return 0;
fail:
- kfree(state->buffer);
return -ENOSPC;
}
static void finish_bo(struct bo_setup_state *state)
{
- if (!state->buffer)
+ if (!state->lrc->bo->vmap.is_iomem)
return;
xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap,
state->offset, state->buffer,
state->written * sizeof(u32));
- kfree(state->buffer);
}
-static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+/**
+ * xe_lrc_setup_wa_bb_with_scratch - Execute all wa bb setup callbacks.
+ * @lrc: the &xe_lrc struct instance
+ * @hwe: the &xe_hw_engine struct instance
+ * @scratch: preallocated scratch buffer for temporary storage
+ * Return: 0 on success, negative error code on failure
+ */
+int xe_lrc_setup_wa_bb_with_scratch(struct xe_lrc *lrc, struct xe_hw_engine *hwe, u32 *scratch)
{
static const struct bo_setup funcs[] = {
+ { .setup = setup_timestamp_wa },
+ { .setup = setup_invalidate_state_cache_wa },
{ .setup = setup_utilization_wa },
+ { .setup = setup_configfs_post_ctx_restore_bb },
};
struct bo_setup_state state = {
.lrc = lrc,
.hwe = hwe,
.max_size = LRC_WA_BB_SIZE,
+ .buffer = scratch,
.reserve_dw = 1,
.offset = __xe_lrc_wa_bb_offset(lrc),
.funcs = funcs,
@@ -1117,15 +1297,39 @@ static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
return 0;
}
+static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ u32 *buf = NULL;
+ int ret;
+
+ if (lrc->bo->vmap.is_iomem) {
+ buf = kmalloc(LRC_WA_BB_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ ret = xe_lrc_setup_wa_bb_with_scratch(lrc, hwe, buf);
+
+ kfree(buf);
+
+ return ret;
+}
+
static int
setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
{
- static struct bo_setup rcs_funcs[] = {
+ static const struct bo_setup rcs_funcs[] = {
+ { .setup = setup_timestamp_wa },
+ { .setup = setup_configfs_mid_ctx_restore_bb },
+ };
+ static const struct bo_setup xcs_funcs[] = {
+ { .setup = setup_configfs_mid_ctx_restore_bb },
};
struct bo_setup_state state = {
.lrc = lrc,
.hwe = hwe,
.max_size = (63 * 64) /* max 63 cachelines */,
+ .buffer = NULL,
.offset = __xe_lrc_indirect_ctx_offset(lrc),
};
int ret;
@@ -1137,14 +1341,25 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
hwe->class == XE_ENGINE_CLASS_COMPUTE) {
state.funcs = rcs_funcs;
state.num_funcs = ARRAY_SIZE(rcs_funcs);
+ } else {
+ state.funcs = xcs_funcs;
+ state.num_funcs = ARRAY_SIZE(xcs_funcs);
}
if (xe_gt_WARN_ON(lrc->gt, !state.funcs))
return 0;
+ if (lrc->bo->vmap.is_iomem) {
+ state.buffer = kmalloc(state.max_size, GFP_KERNEL);
+ if (!state.buffer)
+ return -ENOMEM;
+ }
+
ret = setup_bo(&state);
- if (ret)
+ if (ret) {
+ kfree(state.buffer);
return ret;
+ }
/*
* Align to 64B cacheline so there's no garbage at the end for CS to
@@ -1156,15 +1371,17 @@ setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
}
finish_bo(&state);
+ kfree(state.buffer);
+ /*
+ * Enable INDIRECT_CTX leaving INDIRECT_CTX_OFFSET at its default: it
+ * varies per engine class, but the default is good enough
+ */
xe_lrc_write_ctx_reg(lrc,
CTX_CS_INDIRECT_CTX,
(xe_bo_ggtt_addr(lrc->bo) + state.offset) |
/* Size in CLs. */
(state.written * sizeof(u32) / 64));
- xe_lrc_write_ctx_reg(lrc,
- CTX_CS_INDIRECT_CTX_OFFSET,
- CTX_INDIRECT_CTX_OFFSET_DEFAULT);
return 0;
}
@@ -1200,12 +1417,14 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
bo_flags = XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_GGTT |
XE_BO_FLAG_GGTT_INVALIDATE;
- if (vm && vm->xef) /* userspace */
- bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
- lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size,
- ttm_bo_type_kernel,
- bo_flags);
+ if ((vm && vm->xef) || init_flags & XE_LRC_CREATE_USER_CTX) /* userspace */
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM;
+
+ lrc->bo = xe_bo_create_pin_map_novm(xe, tile,
+ bo_size,
+ ttm_bo_type_kernel,
+ bo_flags, false);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
@@ -1374,6 +1593,23 @@ void xe_lrc_destroy(struct kref *ref)
kfree(lrc);
}
+/**
+ * xe_lrc_update_hwctx_regs_with_address - Re-compute GGTT references within given LRC.
+ * @lrc: the &xe_lrc struct instance
+ */
+void xe_lrc_update_hwctx_regs_with_address(struct xe_lrc *lrc)
+{
+ if (xe_lrc_has_indirect_ring_state(lrc)) {
+ xe_lrc_write_ctx_reg(lrc, CTX_INDIRECT_RING_STATE,
+ __xe_lrc_indirect_ring_ggtt_addr(lrc));
+
+ xe_lrc_write_indirect_ctx_reg(lrc, INDIRECT_CTX_RING_START,
+ __xe_lrc_ring_ggtt_addr(lrc));
+ } else {
+ xe_lrc_write_ctx_reg(lrc, CTX_RING_START, __xe_lrc_ring_ggtt_addr(lrc));
+ }
+}
+
void xe_lrc_set_ring_tail(struct xe_lrc *lrc, u32 tail)
{
if (xe_lrc_has_indirect_ring_state(lrc))
@@ -1939,7 +2175,7 @@ u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs)
* continue to emit all of the SVG state since it's best not to leak
* any of the state between contexts, even if that leakage is harmless.
*/
- if (XE_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
+ if (XE_GT_WA(gt, 14019789679) && q->hwe->class == XE_ENGINE_CLASS_RENDER) {
state_table = xe_hpg_svg_state;
state_table_size = ARRAY_SIZE(xe_hpg_svg_state);
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
index b6c8053c581b..2fb628da5c43 100644
--- a/drivers/gpu/drm/xe/xe_lrc.h
+++ b/drivers/gpu/drm/xe/xe_lrc.h
@@ -42,8 +42,12 @@ struct xe_lrc_snapshot {
#define LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR (0x34 * 4)
#define LRC_PPHWSP_PXP_INVAL_SCRATCH_ADDR (0x40 * 4)
-#define XE_LRC_CREATE_RUNALONE 0x1
-#define XE_LRC_CREATE_PXP 0x2
+#define LRC_WA_BB_SIZE SZ_4K
+
+#define XE_LRC_CREATE_RUNALONE BIT(0)
+#define XE_LRC_CREATE_PXP BIT(1)
+#define XE_LRC_CREATE_USER_CTX BIT(2)
+
struct xe_lrc *xe_lrc_create(struct xe_hw_engine *hwe, struct xe_vm *vm,
u32 ring_size, u16 msix_vec, u32 flags);
void xe_lrc_destroy(struct kref *ref);
@@ -72,6 +76,16 @@ static inline void xe_lrc_put(struct xe_lrc *lrc)
kref_put(&lrc->refcount, xe_lrc_destroy);
}
+/**
+ * xe_lrc_ring_size() - Xe LRC ring size
+ *
+ * Return: Size of LRC ring buffer
+ */
+static inline size_t xe_lrc_ring_size(void)
+{
+ return SZ_16K;
+}
+
size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class);
u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc);
u32 xe_lrc_regs_offset(struct xe_lrc *lrc);
@@ -88,6 +102,10 @@ bool xe_lrc_ring_is_idle(struct xe_lrc *lrc);
u32 xe_lrc_indirect_ring_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ggtt_addr(struct xe_lrc *lrc);
u32 *xe_lrc_regs(struct xe_lrc *lrc);
+void xe_lrc_update_hwctx_regs_with_address(struct xe_lrc *lrc);
+void xe_default_lrc_update_memirq_regs_with_address(struct xe_hw_engine *hwe);
+void xe_lrc_update_memirq_regs_with_address(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *regs);
u32 xe_lrc_read_ctx_reg(struct xe_lrc *lrc, int reg_nr);
void xe_lrc_write_ctx_reg(struct xe_lrc *lrc, int reg_nr, u32 val);
@@ -106,6 +124,7 @@ s32 xe_lrc_start_seqno(struct xe_lrc *lrc);
u32 xe_lrc_parallel_ggtt_addr(struct xe_lrc *lrc);
struct iosys_map xe_lrc_parallel_map(struct xe_lrc *lrc);
+size_t xe_lrc_reg_size(struct xe_device *xe);
size_t xe_lrc_skip_size(struct xe_device *xe);
void xe_lrc_dump_default(struct drm_printer *p,
@@ -124,6 +143,8 @@ u32 xe_lrc_ctx_timestamp_udw_ggtt_addr(struct xe_lrc *lrc);
u64 xe_lrc_ctx_timestamp(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp_ggtt_addr(struct xe_lrc *lrc);
u32 xe_lrc_ctx_job_timestamp(struct xe_lrc *lrc);
+int xe_lrc_setup_wa_bb_with_scratch(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *scratch);
/**
* xe_lrc_update_timestamp - readout LRC timestamp and update cached value
diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
index 8d67f6ba2d95..c44777125691 100644
--- a/drivers/gpu/drm/xe/xe_map.h
+++ b/drivers/gpu/drm/xe/xe_map.h
@@ -14,9 +14,9 @@
* DOC: Map layer
*
* All access to any memory shared with a device (both sysmem and vram) in the
- * XE driver should go through this layer (xe_map). This layer is built on top
+ * Xe driver should go through this layer (xe_map). This layer is built on top
* of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory`
- * and with extra hooks into the XE driver that allows adding asserts to memory
+ * and with extra hooks into the Xe driver that allows adding asserts to memory
* accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics).
*/
@@ -78,24 +78,6 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
iosys_map_wr(map__, offset__, type__, val__); \
})
-#define xe_map_rd_array(xe__, map__, index__, type__) \
- xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
-
-#define xe_map_wr_array(xe__, map__, index__, type__, val__) \
- xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
-
-#define xe_map_rd_array_u32(xe__, map__, index__) \
- xe_map_rd_array(xe__, map__, index__, u32)
-
-#define xe_map_wr_array_u32(xe__, map__, index__, val__) \
- xe_map_wr_array(xe__, map__, index__, u32, val__)
-
-#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \
- xe_map_rd_array_u32(xe__, map__, (index__) % (size__))
-
-#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \
- xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__)
-
#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c
index 49c45ec3e83c..b0c7ce0a5d1e 100644
--- a/drivers/gpu/drm/xe/xe_memirq.c
+++ b/drivers/gpu/drm/xe/xe_memirq.c
@@ -14,16 +14,15 @@
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_gt.h"
-#include "xe_gt_printk.h"
#include "xe_guc.h"
#include "xe_hw_engine.h"
#include "xe_map.h"
#include "xe_memirq.h"
+#include "xe_tile_printk.h"
#define memirq_assert(m, condition) xe_tile_assert(memirq_to_tile(m), condition)
#define memirq_printk(m, _level, _fmt, ...) \
- drm_##_level(&memirq_to_xe(m)->drm, "MEMIRQ%u: " _fmt, \
- memirq_to_tile(m)->id, ##__VA_ARGS__)
+ xe_tile_##_level(memirq_to_tile(m), "MEMIRQ: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_XE_DEBUG_MEMIRQ
#define memirq_debug(m, _fmt, ...) memirq_printk(m, dbg, _fmt, ##__VA_ARGS__)
@@ -398,8 +397,9 @@ void xe_memirq_postinstall(struct xe_memirq *memirq)
memirq_set_enable(memirq, true);
}
-static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
- u16 offset, const char *name)
+static bool __memirq_received(struct xe_memirq *memirq,
+ struct iosys_map *vector, u16 offset,
+ const char *name, bool clear)
{
u8 value;
@@ -409,19 +409,33 @@ static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
memirq_err_ratelimited(memirq,
"Unexpected memirq value %#x from %s at %u\n",
value, name, offset);
- iosys_map_wr(vector, offset, u8, 0x00);
+ if (clear)
+ iosys_map_wr(vector, offset, u8, 0x00);
}
return value;
}
+static bool memirq_received_noclear(struct xe_memirq *memirq,
+ struct iosys_map *vector,
+ u16 offset, const char *name)
+{
+ return __memirq_received(memirq, vector, offset, name, false);
+}
+
+static bool memirq_received(struct xe_memirq *memirq, struct iosys_map *vector,
+ u16 offset, const char *name)
+{
+ return __memirq_received(memirq, vector, offset, name, true);
+}
+
static void memirq_dispatch_engine(struct xe_memirq *memirq, struct iosys_map *status,
struct xe_hw_engine *hwe)
{
memirq_debug(memirq, "STATUS %s %*ph\n", hwe->name, 16, status->vaddr);
- if (memirq_received(memirq, status, ilog2(GT_RENDER_USER_INTERRUPT), hwe->name))
- xe_hw_engine_handle_irq(hwe, GT_RENDER_USER_INTERRUPT);
+ if (memirq_received(memirq, status, ilog2(GT_MI_USER_INTERRUPT), hwe->name))
+ xe_hw_engine_handle_irq(hwe, GT_MI_USER_INTERRUPT);
}
static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *status,
@@ -434,8 +448,16 @@ static void memirq_dispatch_guc(struct xe_memirq *memirq, struct iosys_map *stat
if (memirq_received(memirq, status, ilog2(GUC_INTR_GUC2HOST), name))
xe_guc_irq_handler(guc, GUC_INTR_GUC2HOST);
- if (memirq_received(memirq, status, ilog2(GUC_INTR_SW_INT_0), name))
+ /*
+ * This is a software interrupt that must be cleared after it's consumed
+ * to avoid race conditions where xe_gt_sriov_vf_recovery_pending()
+ * returns false.
+ */
+ if (memirq_received_noclear(memirq, status, ilog2(GUC_INTR_SW_INT_0),
+ name)) {
xe_guc_irq_handler(guc, GUC_INTR_SW_INT_0);
+ iosys_map_wr(status, ilog2(GUC_INTR_SW_INT_0), u8, 0x00);
+ }
}
/**
@@ -461,6 +483,23 @@ void xe_memirq_hwe_handler(struct xe_memirq *memirq, struct xe_hw_engine *hwe)
}
/**
+ * xe_memirq_guc_sw_int_0_irq_pending() - SW_INT_0 IRQ is pending
+ * @memirq: the &xe_memirq
+ * @guc: the &xe_guc to check for IRQ
+ *
+ * Return: True if SW_INT_0 IRQ is pending on @guc, False otherwise
+ */
+bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc)
+{
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 offset = xe_gt_is_media_type(gt) ? ilog2(INTR_MGUC) : ilog2(INTR_GUC);
+ struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&memirq->status, offset * SZ_16);
+
+ return memirq_received_noclear(memirq, &map, ilog2(GUC_INTR_SW_INT_0),
+ guc_name(guc));
+}
+
+/**
* xe_memirq_handler - The `Memory Based Interrupts`_ Handler.
* @memirq: the &xe_memirq
*
diff --git a/drivers/gpu/drm/xe/xe_memirq.h b/drivers/gpu/drm/xe/xe_memirq.h
index 06130650e9d6..e25d2234ab87 100644
--- a/drivers/gpu/drm/xe/xe_memirq.h
+++ b/drivers/gpu/drm/xe/xe_memirq.h
@@ -25,4 +25,6 @@ void xe_memirq_handler(struct xe_memirq *memirq);
int xe_memirq_init_guc(struct xe_memirq *memirq, struct xe_guc *guc);
+bool xe_memirq_guc_sw_int_0_irq_pending(struct xe_memirq *memirq, struct xe_guc *guc);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 84f412fd3c5d..2184af413b91 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -9,6 +9,7 @@
#include <linux/sizes.h>
#include <drm/drm_managed.h>
+#include <drm/drm_pagemap.h>
#include <drm/ttm/ttm_tt.h>
#include <uapi/drm/xe_drm.h>
@@ -28,12 +29,16 @@
#include "xe_lrc.h"
#include "xe_map.h"
#include "xe_mocs.h"
+#include "xe_printk.h"
#include "xe_pt.h"
#include "xe_res_cursor.h"
+#include "xe_sa.h"
#include "xe_sched_job.h"
#include "xe_sync.h"
#include "xe_trace_bo.h"
+#include "xe_validation.h"
#include "xe_vm.h"
+#include "xe_vram.h"
/**
* struct xe_migrate - migrate context.
@@ -53,6 +58,13 @@ struct xe_migrate {
u64 usm_batch_base_ofs;
/** @cleared_mem_ofs: VM offset of @cleared_bo. */
u64 cleared_mem_ofs;
+ /** @large_page_copy_ofs: VM offset of 2M pages used for large copies */
+ u64 large_page_copy_ofs;
+ /**
+ * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for
+ * large copies
+ */
+ u64 large_page_copy_pdes;
/**
* @fence: dma-fence representing the last migration job batch.
* Protected by @job_mutex.
@@ -84,19 +96,6 @@ struct xe_migrate {
*/
#define MAX_PTE_PER_SDI 0x1FEU
-/**
- * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue.
- * @tile: The tile.
- *
- * Returns the default migrate exec queue of this tile.
- *
- * Return: The default migrate exec queue
- */
-struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile)
-{
- return tile->migrate->q;
-}
-
static void xe_migrate_fini(void *arg)
{
struct xe_migrate *m = arg;
@@ -130,38 +129,39 @@ static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte)
u64 identity_offset = IDENTITY_OFFSET;
if (GRAPHICS_VER(xe) >= 20 && is_comp_pte)
- identity_offset += DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size
+ (xe->mem.vram), SZ_1G);
- addr -= xe->mem.vram.dpa_base;
+ addr -= xe_vram_region_dpa_base(xe->mem.vram);
return addr + (identity_offset << xe_pt_shift(2));
}
static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo,
u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs)
{
+ struct xe_vram_region *vram = xe->mem.vram;
+ resource_size_t dpa_base = xe_vram_region_dpa_base(vram);
u64 pos, ofs, flags;
u64 entry;
/* XXX: Unclear if this should be usable_size? */
- u64 vram_limit = xe->mem.vram.actual_physical_size +
- xe->mem.vram.dpa_base;
+ u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base;
u32 level = 2;
ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8;
flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
true, 0);
- xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M));
+ xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M));
/*
* Use 1GB pages when possible, last chunk always use 2M
* pages as mixing reserved memory (stolen, WOCPM) with a single
* mapping is not allowed on certain platforms.
*/
- for (pos = xe->mem.vram.dpa_base; pos < vram_limit;
+ for (pos = dpa_base; pos < vram_limit;
pos += SZ_1G, ofs += 8) {
if (pos + SZ_1G >= vram_limit) {
- entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs,
- pat_index);
+ entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs);
xe_map_wr(xe, &bo->vmap, ofs, u64, entry);
flags = vm->pt_ops->pte_encode_addr(xe, 0,
@@ -182,7 +182,7 @@ static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm,
}
static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
- struct xe_vm *vm)
+ struct xe_vm *vm, struct drm_exec *exec)
{
struct xe_device *xe = tile_to_xe(tile);
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
@@ -209,13 +209,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
num_entries * XE_PAGE_SIZE,
ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_PAGETABLE);
+ XE_BO_FLAG_PAGETABLE, exec);
if (IS_ERR(bo))
return PTR_ERR(bo);
/* PT30 & PT31 reserved for 2M identity map */
pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE;
- entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index);
+ entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE;
@@ -283,20 +283,25 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
flags = XE_PDE_64K;
entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
- XE_PAGE_SIZE, pat_index);
+ XE_PAGE_SIZE);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
}
/* Write PDE's that point to our BO. */
- for (i = 0; i < map_ofs / PAGE_SIZE; i++) {
- entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
- pat_index);
+ for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) {
+ entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
(i + 1) * 8, u64, entry);
}
+ /* Reserve 2M PDEs */
+ level = 1;
+ m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level);
+ m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level +
+ NUM_PT_SLOTS * 8;
+
/* Set up a 1GiB NULL mapping at 255GiB offset. */
level = 2;
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64,
@@ -307,11 +312,11 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Identity map the entire vram at 256GiB offset */
if (IS_DGFX(xe)) {
u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE;
+ resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram);
xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET,
pat_index, pt30_ofs);
- xe_assert(xe, xe->mem.vram.actual_physical_size <=
- (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
+ xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G);
/*
* Identity map the entire vram for compressed pat_index for xe2+
@@ -320,11 +325,11 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) {
u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION];
u64 vram_offset = IDENTITY_OFFSET +
- DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G);
+ DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G);
u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE;
- xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE -
- IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G);
+ xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET -
+ IDENTITY_OFFSET / 2) * SZ_1G);
xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset,
comp_pat_index, pt31_ofs);
}
@@ -387,38 +392,63 @@ static bool xe_migrate_needs_ccs_emit(struct xe_device *xe)
}
/**
- * xe_migrate_init() - Initialize a migrate context
- * @tile: Back-pointer to the tile we're initializing for.
+ * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile
+ * @tile: &xe_tile
+ *
+ * Allocates a &xe_migrate for a given tile.
*
- * Return: Pointer to a migrate context on success. Error pointer on error.
+ * Return: &xe_migrate on success, or NULL when out of memory.
*/
-struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
+struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile)
+{
+ struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL);
+
+ if (m)
+ m->tile = tile;
+ return m;
+}
+
+static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm)
{
struct xe_device *xe = tile_to_xe(tile);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ int err = 0;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ err = xe_migrate_prepare_vm(tile, m, vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ }
+
+ return err;
+}
+
+/**
+ * xe_migrate_init() - Initialize a migrate context
+ * @m: The migration context
+ *
+ * Return: 0 if successful, negative error code on failure
+ */
+int xe_migrate_init(struct xe_migrate *m)
+{
+ struct xe_tile *tile = m->tile;
struct xe_gt *primary_gt = tile->primary_gt;
- struct xe_migrate *m;
+ struct xe_device *xe = tile_to_xe(tile);
struct xe_vm *vm;
int err;
- m = devm_kzalloc(xe->drm.dev, sizeof(*m), GFP_KERNEL);
- if (!m)
- return ERR_PTR(-ENOMEM);
-
- m->tile = tile;
-
/* Special layout, prepared below.. */
vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
XE_VM_FLAG_SET_TILE_ID(tile), NULL);
if (IS_ERR(vm))
- return ERR_CAST(vm);
+ return PTR_ERR(vm);
- xe_vm_lock(vm, false);
- err = xe_migrate_prepare_vm(tile, m, vm);
- xe_vm_unlock(vm);
- if (err) {
- xe_vm_close_and_put(vm);
- return ERR_PTR(err);
- }
+ err = xe_migrate_lock_prepare_vm(tile, m, vm);
+ if (err)
+ goto err_out;
if (xe->info.has_usm) {
struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt,
@@ -427,8 +457,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
false);
u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt);
- if (!hwe || !logical_mask)
- return ERR_PTR(-EINVAL);
+ if (!hwe || !logical_mask) {
+ err = -EINVAL;
+ goto err_out;
+ }
/*
* XXX: Currently only reserving 1 (likely slow) BCS instance on
@@ -437,16 +469,18 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
EXEC_QUEUE_FLAG_KERNEL |
EXEC_QUEUE_FLAG_PERMANENT |
- EXEC_QUEUE_FLAG_HIGH_PRIORITY, 0);
+ EXEC_QUEUE_FLAG_HIGH_PRIORITY |
+ EXEC_QUEUE_FLAG_MIGRATE, 0);
} else {
m->q = xe_exec_queue_create_class(xe, primary_gt, vm,
XE_ENGINE_CLASS_COPY,
EXEC_QUEUE_FLAG_KERNEL |
- EXEC_QUEUE_FLAG_PERMANENT, 0);
+ EXEC_QUEUE_FLAG_PERMANENT |
+ EXEC_QUEUE_FLAG_MIGRATE, 0);
}
if (IS_ERR(m->q)) {
- xe_vm_close_and_put(vm);
- return ERR_CAST(m->q);
+ err = PTR_ERR(m->q);
+ goto err_out;
}
mutex_init(&m->job_mutex);
@@ -456,7 +490,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m);
if (err)
- return ERR_PTR(err);
+ return err;
if (IS_DGFX(xe)) {
if (xe_migrate_needs_ccs_emit(xe))
@@ -471,7 +505,12 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
(unsigned long long)m->min_chunk_size);
}
- return m;
+ return err;
+
+err_out:
+ xe_vm_close_and_put(vm);
+ return err;
+
}
static u64 max_mem_transfer_per_pass(struct xe_device *xe)
@@ -661,9 +700,9 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
}
#define EMIT_COPY_DW 10
-static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
- u64 src_ofs, u64 dst_ofs, unsigned int size,
- unsigned int pitch)
+static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
+ u64 dst_ofs, unsigned int size,
+ unsigned int pitch)
{
struct xe_device *xe = gt_to_xe(gt);
u32 mocs = 0;
@@ -692,6 +731,61 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
bb->cs[bb->len++] = upper_32_bits(src_ofs);
}
+#define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */
+static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
+ u64 dst_ofs, unsigned int size, unsigned int pitch)
+{
+ u32 mode, copy_type, width;
+
+ xe_gt_assert(gt, IS_ALIGNED(size, pitch));
+ xe_gt_assert(gt, pitch <= U16_MAX);
+ xe_gt_assert(gt, pitch);
+ xe_gt_assert(gt, size);
+
+ if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) &&
+ IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) &&
+ IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) {
+ mode = MEM_COPY_PAGE_COPY_MODE;
+ copy_type = 0; /* linear copy */
+ width = size / PAGE_COPY_MODE_PS;
+ } else if (pitch > 1) {
+ xe_gt_assert(gt, size / pitch <= U16_MAX);
+ mode = 0; /* BYTE_COPY */
+ copy_type = MEM_COPY_MATRIX_COPY;
+ width = pitch;
+ } else {
+ mode = 0; /* BYTE_COPY */
+ copy_type = 0; /* linear copy */
+ width = size;
+ }
+
+ xe_gt_assert(gt, width <= U16_MAX);
+
+ bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type;
+ bb->cs[bb->len++] = width - 1;
+ bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */
+ bb->cs[bb->len++] = pitch - 1;
+ bb->cs[bb->len++] = pitch - 1;
+ bb->cs[bb->len++] = lower_32_bits(src_ofs);
+ bb->cs[bb->len++] = upper_32_bits(src_ofs);
+ bb->cs[bb->len++] = lower_32_bits(dst_ofs);
+ bb->cs[bb->len++] = upper_32_bits(dst_ofs);
+ bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) |
+ FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index);
+}
+
+static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
+ u64 src_ofs, u64 dst_ofs, unsigned int size,
+ unsigned int pitch)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (xe->info.has_mem_copy_instr)
+ emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
+ else
+ emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch);
+}
+
static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm)
{
return usm ? m->usm_batch_base_ofs : m->batch_base_ofs;
@@ -809,7 +903,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
&ccs_it);
while (size) {
- u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+ u32 batch_size = 1; /* MI_BATCH_BUFFER_END */
struct xe_sched_job *job;
struct xe_bb *bb;
u32 flush_flags = 0;
@@ -834,11 +928,15 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0,
&src_L0_ofs, &src_L0_pt, 0, 0,
avail_pts);
-
- pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
- batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0,
- &dst_L0_ofs, &dst_L0_pt, 0,
- avail_pts, avail_pts);
+ if (copy_only_ccs) {
+ dst_L0_ofs = src_L0_ofs;
+ } else {
+ pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
+ batch_size += pte_update_size(m, pte_flags, dst,
+ &dst_it, &src_L0,
+ &dst_L0_ofs, &dst_L0_pt,
+ 0, avail_pts, avail_pts);
+ }
if (copy_system_ccs) {
xe_assert(xe, type_device);
@@ -868,7 +966,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
xe_res_next(&dst_it, src_L0);
- else
+ else if (!copy_only_ccs)
emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
&dst_it, src_L0, dst);
@@ -896,11 +994,11 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
goto err;
}
- xe_sched_job_add_migrate_flush(job, flush_flags);
+ xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB);
if (!fence) {
err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
- if (!err && src_bo != dst_bo)
+ if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv)
err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv,
DMA_RESV_USAGE_BOOKKEEP);
if (err)
@@ -940,6 +1038,301 @@ err_sync:
return fence;
}
+/**
+ * xe_migrate_lrc() - Get the LRC from migrate context.
+ * @migrate: Migrate context.
+ *
+ * Return: Pointer to LRC on success, error on failure
+ */
+struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate)
+{
+ return migrate->q->lrc[0];
+}
+
+static u64 migrate_vm_ppgtt_addr_tlb_inval(void)
+{
+ /*
+ * The migrate VM is self-referential so it can modify its own PTEs (see
+ * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE
+ * entries for kernel operations (copies, clears, CCS migrate), and
+ * suballocate the rest to user operations (binds/unbinds). With
+ * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates,
+ * so assign NUM_KERNEL_PDE - 2 for TLB invalidation.
+ */
+ return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE;
+}
+
+static int emit_flush_invalidate(u32 *dw, int i, u32 flags)
+{
+ u64 addr = migrate_vm_ppgtt_addr_tlb_inval();
+
+ dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | flags;
+ dw[i++] = lower_32_bits(addr);
+ dw[i++] = upper_32_bits(addr);
+ dw[i++] = MI_NOOP;
+ dw[i++] = MI_NOOP;
+
+ return i;
+}
+
+/**
+ * xe_migrate_ccs_rw_copy() - Copy content of TTM resources.
+ * @tile: Tile whose migration context to be used.
+ * @q : Execution to be used along with migration context.
+ * @src_bo: The buffer object @src is currently bound to.
+ * @read_write : Creates BB commands for CCS read/write.
+ *
+ * Creates batch buffer instructions to copy CCS metadata from CCS pool to
+ * memory and vice versa.
+ *
+ * This function should only be called for IGPU.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
+int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
+ struct xe_bo *src_bo,
+ enum xe_sriov_vf_ccs_rw_ctxs read_write)
+
+{
+ bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX;
+ bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX;
+ struct ttm_resource *src = src_bo->ttm.resource;
+ struct xe_migrate *m = tile->migrate;
+ struct xe_gt *gt = tile->primary_gt;
+ u32 batch_size, batch_size_allocated;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_res_cursor src_it, ccs_it;
+ u64 size = xe_bo_size(src_bo);
+ struct xe_bb *bb = NULL;
+ u64 src_L0, src_L0_ofs;
+ u32 src_L0_pt;
+ int err;
+
+ xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it);
+
+ xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo),
+ PAGE_ALIGN(xe_device_ccs_bytes(xe, size)),
+ &ccs_it);
+
+ /* Calculate Batch buffer size */
+ batch_size = 0;
+ while (size) {
+ batch_size += 10; /* Flush + ggtt addr + 2 NOP */
+ u64 ccs_ofs, ccs_size;
+ u32 ccs_pt;
+
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size);
+
+ batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ ccs_size = xe_device_ccs_bytes(xe, src_L0);
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
+ &ccs_pt, 0, avail_pts, avail_pts);
+ xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
+
+ /* Add copy commands size here */
+ batch_size += EMIT_COPY_CCS_DW;
+
+ size -= src_L0;
+ }
+
+ bb = xe_bb_ccs_new(gt, batch_size, read_write);
+ if (IS_ERR(bb)) {
+ drm_err(&xe->drm, "BB allocation failed.\n");
+ err = PTR_ERR(bb);
+ goto err_ret;
+ }
+
+ batch_size_allocated = batch_size;
+ size = xe_bo_size(src_bo);
+ batch_size = 0;
+
+ /*
+ * Emit PTE and copy commands here.
+ * The CCS copy command can only support limited size. If the size to be
+ * copied is more than the limit, divide copy into chunks. So, calculate
+ * sizes here again before copy command is emitted.
+ */
+ while (size) {
+ batch_size += 10; /* Flush + ggtt addr + 2 NOP */
+ u32 flush_flags = 0;
+ u64 ccs_ofs, ccs_size;
+ u32 ccs_pt;
+
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ src_L0 = xe_migrate_res_sizes(m, &src_it);
+
+ batch_size += pte_update_size(m, false, src, &src_it, &src_L0,
+ &src_L0_ofs, &src_L0_pt, 0, 0,
+ avail_pts);
+
+ ccs_size = xe_device_ccs_bytes(xe, src_L0);
+ batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs,
+ &ccs_pt, 0, avail_pts, avail_pts);
+ xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE));
+ batch_size += EMIT_COPY_CCS_DW;
+
+ emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src);
+
+ emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
+
+ bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
+ flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt,
+ src_L0_ofs, dst_is_pltt,
+ src_L0, ccs_ofs, true);
+ bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags);
+
+ size -= src_L0;
+ }
+
+ xe_assert(xe, (batch_size_allocated == bb->len));
+ src_bo->bb_ccs[read_write] = bb;
+
+ return 0;
+
+err_ret:
+ return err;
+}
+
+/**
+ * xe_get_migrate_exec_queue() - Get the execution queue from migrate context.
+ * @migrate: Migrate context.
+ *
+ * Return: Pointer to execution queue on success, error on failure
+ */
+struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate)
+{
+ return migrate->q;
+}
+
+/**
+ * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object.
+ * @vram_bo: The VRAM buffer object.
+ * @vram_offset: The VRAM offset.
+ * @sysmem_bo: The sysmem buffer object.
+ * @sysmem_offset: The sysmem offset.
+ * @size: The size of VRAM chunk to copy.
+ * @dir: The direction of the copy operation.
+ *
+ * Copies a portion of a buffer object between VRAM and system memory.
+ * On Xe2 platforms that support flat CCS, VRAM data is decompressed when
+ * copying to system memory.
+ *
+ * Return: Pointer to a dma_fence representing the last copy batch, or
+ * an error pointer on failure. If there is a failure, any copy operation
+ * started by the function call has been synced.
+ */
+struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
+ struct xe_bo *sysmem_bo, u64 sysmem_offset,
+ u64 size, enum xe_migrate_copy_dir dir)
+{
+ struct xe_device *xe = xe_bo_device(vram_bo);
+ struct xe_tile *tile = vram_bo->tile;
+ struct xe_gt *gt = tile->primary_gt;
+ struct xe_migrate *m = tile->migrate;
+ struct dma_fence *fence = NULL;
+ struct ttm_resource *vram = vram_bo->ttm.resource;
+ struct ttm_resource *sysmem = sysmem_bo->ttm.resource;
+ struct xe_res_cursor vram_it, sysmem_it;
+ u64 vram_L0_ofs, sysmem_L0_ofs;
+ u32 vram_L0_pt, sysmem_L0_pt;
+ u64 vram_L0, sysmem_L0;
+ bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM);
+ bool use_comp_pat = to_sysmem &&
+ GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe);
+ int pass = 0;
+ int err;
+
+ xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE));
+ xe_assert(xe, xe_bo_is_vram(vram_bo));
+ xe_assert(xe, !xe_bo_is_vram(sysmem_bo));
+ xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size));
+ xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size));
+
+ xe_res_first(vram, vram_offset, size, &vram_it);
+ xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it);
+
+ while (size) {
+ u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM;
+ u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */
+ struct xe_sched_job *job;
+ struct xe_bb *bb;
+ u32 update_idx;
+ bool usm = xe->info.has_usm;
+ u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE;
+
+ sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it);
+ vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0);
+
+ xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0);
+
+ pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0;
+ batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0,
+ &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts);
+
+ batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs,
+ &sysmem_L0_pt, 0, avail_pts, avail_pts);
+ batch_size += EMIT_COPY_DW;
+
+ bb = xe_bb_new(gt, batch_size, usm);
+ if (IS_ERR(bb)) {
+ err = PTR_ERR(bb);
+ return ERR_PTR(err);
+ }
+
+ if (xe_migrate_allow_identity(vram_L0, &vram_it))
+ xe_res_next(&vram_it, vram_L0);
+ else
+ emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram);
+
+ emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem);
+
+ bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
+ update_idx = bb->len;
+
+ if (to_sysmem)
+ emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE);
+ else
+ emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE);
+
+ job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm),
+ update_idx);
+ if (IS_ERR(job)) {
+ xe_bb_free(bb, NULL);
+ err = PTR_ERR(job);
+ return ERR_PTR(err);
+ }
+
+ xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+
+ xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP));
+ xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP));
+
+ scoped_guard(mutex, &m->job_mutex) {
+ xe_sched_job_arm(job);
+ dma_fence_put(fence);
+ fence = dma_fence_get(&job->drm.s_fence->finished);
+ xe_sched_job_push(job);
+
+ dma_fence_put(m->fence);
+ m->fence = dma_fence_get(fence);
+ }
+
+ xe_bb_free(bb, fence);
+ size -= vram_L0;
+ }
+
+ return fence;
+}
+
static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs,
u32 size, u32 pitch)
{
@@ -1097,7 +1490,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
/* Calculate final sizes and batch size.. */
pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0;
- batch_size = 2 +
+ batch_size = 1 +
pte_update_size(m, pte_flags, src, &src_it,
&clear_L0, &clear_L0_ofs, &clear_L0_pt,
clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0,
@@ -1119,11 +1512,13 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
size -= clear_L0;
/* Preemption is enabled again by the ring ops. */
- if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
+ if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) {
xe_res_next(&src_it, clear_L0);
- else
- emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs,
- &src_it, clear_L0, dst);
+ } else {
+ emit_pte(m, bb, clear_L0_pt, clear_vram,
+ clear_only_system_ccs, &src_it, clear_L0, dst);
+ flush_flags |= MI_INVALIDATE_TLB;
+ }
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
@@ -1134,7 +1529,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
if (xe_migrate_needs_ccs_emit(xe)) {
emit_copy_ccs(gt, bb, clear_L0_ofs, true,
m->cleared_mem_ofs, false, clear_L0);
- flush_flags = MI_FLUSH_DW_CCS;
+ flush_flags |= MI_FLUSH_DW_CCS;
}
job = xe_bb_create_migration_job(m->q, bb,
@@ -1469,6 +1864,8 @@ next_cmd:
goto err_sa;
}
+ xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+
if (ops->pre_commit) {
pt_update->job = job;
err = ops->pre_commit(pt_update);
@@ -1571,16 +1968,23 @@ static u32 pte_update_cmd_size(u64 size)
static void build_pt_update_batch_sram(struct xe_migrate *m,
struct xe_bb *bb, u32 pt_offset,
- dma_addr_t *sram_addr, u32 size)
+ struct drm_pagemap_addr *sram_addr,
+ u32 size, int level)
{
u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
+ u64 gpu_page_size = 0x1ull << xe_pt_shift(level);
u32 ptes;
int i = 0;
- ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE);
+ xe_tile_assert(m->tile, PAGE_ALIGNED(size));
+
+ ptes = DIV_ROUND_UP(size, gpu_page_size);
while (ptes) {
u32 chunk = min(MAX_PTE_PER_SDI, ptes);
+ if (!level)
+ chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE);
+
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk);
bb->cs[bb->len++] = pt_offset;
bb->cs[bb->len++] = 0;
@@ -1589,53 +1993,100 @@ static void build_pt_update_batch_sram(struct xe_migrate *m,
ptes -= chunk;
while (chunk--) {
- u64 addr = sram_addr[i++] & PAGE_MASK;
+ u64 addr = sram_addr[i].addr;
+ u64 pte;
+ xe_tile_assert(m->tile, sram_addr[i].proto ==
+ DRM_INTERCONNECT_SYSTEM);
xe_tile_assert(m->tile, addr);
- addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
- addr, pat_index,
- 0, false, 0);
- bb->cs[bb->len++] = lower_32_bits(addr);
- bb->cs[bb->len++] = upper_32_bits(addr);
+ xe_tile_assert(m->tile, PAGE_ALIGNED(addr));
+
+again:
+ pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
+ addr, pat_index,
+ level, false, 0);
+ bb->cs[bb->len++] = lower_32_bits(pte);
+ bb->cs[bb->len++] = upper_32_bits(pte);
+
+ if (gpu_page_size < PAGE_SIZE) {
+ addr += XE_PAGE_SIZE;
+ if (!PAGE_ALIGNED(addr)) {
+ chunk--;
+ goto again;
+ }
+ i++;
+ } else {
+ i += gpu_page_size / PAGE_SIZE;
+ }
}
}
}
-enum xe_migrate_copy_dir {
- XE_MIGRATE_COPY_TO_VRAM,
- XE_MIGRATE_COPY_TO_SRAM,
-};
+static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr,
+ unsigned long size)
+{
+ u32 large_size = (0x1 << xe_pt_shift(1));
+ unsigned long i, incr = large_size / PAGE_SIZE;
+
+ for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr)
+ if (PAGE_SIZE << sram_addr[i].order != large_size)
+ return false;
+
+ return true;
+}
#define XE_CACHELINE_BYTES 64ull
#define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1)
+static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len)
+{
+ u32 pitch;
+
+ if (IS_ALIGNED(len, PAGE_SIZE))
+ pitch = PAGE_SIZE;
+ else if (IS_ALIGNED(len, SZ_4K))
+ pitch = SZ_4K;
+ else if (IS_ALIGNED(len, SZ_256))
+ pitch = SZ_256;
+ else if (IS_ALIGNED(len, 4))
+ pitch = 4;
+ else
+ pitch = 1;
+
+ xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr);
+ return pitch;
+}
+
static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long len,
unsigned long sram_offset,
- dma_addr_t *sram_addr, u64 vram_addr,
+ struct drm_pagemap_addr *sram_addr,
+ u64 vram_addr,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
bool use_usm_batch = xe->info.has_usm;
struct dma_fence *fence = NULL;
- u32 batch_size = 2;
+ u32 batch_size = 1;
u64 src_L0_ofs, dst_L0_ofs;
struct xe_sched_job *job;
struct xe_bb *bb;
u32 update_idx, pt_slot = 0;
unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE);
- unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ?
- PAGE_SIZE : 4;
+ unsigned int pitch = xe_migrate_copy_pitch(xe, len);
int err;
+ unsigned long i, j;
+ bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset);
- if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) ||
- (sram_offset | vram_addr) & XE_CACHELINE_MASK))
+ if (!xe->info.has_mem_copy_instr &&
+ drm_WARN_ON(&xe->drm,
+ (!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK))
return ERR_PTR(-EOPNOTSUPP);
xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER);
- batch_size += pte_update_cmd_size(len);
+ batch_size += pte_update_cmd_size(npages << PAGE_SHIFT);
batch_size += EMIT_COPY_DW;
bb = xe_bb_new(gt, batch_size, use_usm_batch);
@@ -1644,16 +2095,44 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
return ERR_PTR(err);
}
- build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
- sram_addr, len + sram_offset);
+ /*
+ * If the order of a struct drm_pagemap_addr entry is greater than 0,
+ * the entry is populated by GPU pagemap but subsequent entries within
+ * the range of that order are not populated.
+ * build_pt_update_batch_sram() expects a fully populated array of
+ * struct drm_pagemap_addr. Ensure this is the case even with higher
+ * orders.
+ */
+ for (i = 0; !use_pde && i < npages;) {
+ unsigned int order = sram_addr[i].order;
+
+ for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
+ if (!sram_addr[i + j].addr)
+ sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE;
+
+ i += NR_PAGES(order);
+ }
+
+ if (use_pde)
+ build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes,
+ sram_addr, npages << PAGE_SHIFT, 1);
+ else
+ build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE,
+ sram_addr, npages << PAGE_SHIFT, 0);
if (dir == XE_MIGRATE_COPY_TO_VRAM) {
- src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
+ if (use_pde)
+ src_L0_ofs = m->large_page_copy_ofs + sram_offset;
+ else
+ src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
} else {
src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false);
- dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
+ if (use_pde)
+ dst_L0_ofs = m->large_page_copy_ofs + sram_offset;
+ else
+ dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset;
}
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
@@ -1669,7 +2148,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
goto err;
}
- xe_sched_job_add_migrate_flush(job, 0);
+ xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
@@ -1694,17 +2173,17 @@ err:
* xe_migrate_to_vram() - Migrate to VRAM
* @m: The migration context.
* @npages: Number of pages to migrate.
- * @src_addr: Array of dma addresses (source of migrate)
+ * @src_addr: Array of DMA information (source of migrate)
* @dst_addr: Device physical address of VRAM (destination of migrate)
*
* Copy from an array dma addresses to a VRAM device physical address
*
- * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * Return: dma fence for migrate to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
- dma_addr_t *src_addr,
+ struct drm_pagemap_addr *src_addr,
u64 dst_addr)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
@@ -1716,71 +2195,75 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
* @m: The migration context.
* @npages: Number of pages to migrate.
* @src_addr: Device physical address of VRAM (source of migrate)
- * @dst_addr: Array of dma addresses (destination of migrate)
+ * @dst_addr: Array of DMA information (destination of migrate)
*
* Copy from a VRAM device physical address to an array dma addresses
*
- * Return: dma fence for migrate to signal completion on succees, ERR_PTR on
+ * Return: dma fence for migrate to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- dma_addr_t *dst_addr)
+ struct drm_pagemap_addr *dst_addr)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
XE_MIGRATE_COPY_TO_SRAM);
}
-static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr,
+static void xe_migrate_dma_unmap(struct xe_device *xe,
+ struct drm_pagemap_addr *pagemap_addr,
int len, int write)
{
unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
for (i = 0; i < npages; ++i) {
- if (!dma_addr[i])
+ if (!pagemap_addr[i].addr)
break;
- dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE,
+ dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE,
write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
}
- kfree(dma_addr);
+ kfree(pagemap_addr);
}
-static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe,
- void *buf, int len, int write)
+static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe,
+ void *buf, int len,
+ int write)
{
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE);
- dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL);
- if (!dma_addr)
+ pagemap_addr = kcalloc(npages, sizeof(*pagemap_addr), GFP_KERNEL);
+ if (!pagemap_addr)
return ERR_PTR(-ENOMEM);
for (i = 0; i < npages; ++i) {
dma_addr_t addr;
struct page *page;
+ enum dma_data_direction dir = write ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE;
if (is_vmalloc_addr(buf))
page = vmalloc_to_page(buf);
else
page = virt_to_page(buf);
- addr = dma_map_page(xe->drm.dev,
- page, 0, PAGE_SIZE,
- write ? DMA_TO_DEVICE :
- DMA_FROM_DEVICE);
+ addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir);
if (dma_mapping_error(xe->drm.dev, addr))
goto err_fault;
- dma_addr[i] = addr;
+ pagemap_addr[i] =
+ drm_pagemap_addr_encode(addr,
+ DRM_INTERCONNECT_SYSTEM,
+ 0, dir);
buf += PAGE_SIZE;
}
- return dma_addr;
+ return pagemap_addr;
err_fault:
- xe_migrate_dma_unmap(xe, dma_addr, len, write);
+ xe_migrate_dma_unmap(xe, pagemap_addr, len, write);
return ERR_PTR(-EFAULT);
}
@@ -1809,7 +2292,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
struct xe_device *xe = tile_to_xe(tile);
struct xe_res_cursor cursor;
struct dma_fence *fence = NULL;
- dma_addr_t *dma_addr;
+ struct drm_pagemap_addr *pagemap_addr;
unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK;
int bytes_left = len, current_page = 0;
void *orig_buf = buf;
@@ -1817,8 +2300,10 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
xe_bo_assert_held(bo);
/* Use bounce buffer for small access and unaligned access */
- if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
- !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
+ if (!xe->info.has_mem_copy_instr &&
+ (!IS_ALIGNED(len, 4) ||
+ !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) ||
+ !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) {
int buf_offset = 0;
void *bounce;
int err;
@@ -1869,9 +2354,9 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
return err;
}
- dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
- if (IS_ERR(dma_addr))
- return PTR_ERR(dma_addr);
+ pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
+ if (IS_ERR(pagemap_addr))
+ return PTR_ERR(pagemap_addr);
xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor);
@@ -1880,6 +2365,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) +
cursor.start;
int current_bytes;
+ u32 pitch;
if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER)
current_bytes = min_t(int, bytes_left,
@@ -1887,15 +2373,17 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
else
current_bytes = min_t(int, bytes_left, cursor.size);
- if (current_bytes & ~PAGE_MASK) {
- int pitch = 4;
-
- current_bytes = min_t(int, current_bytes, S16_MAX * pitch);
- }
+ pitch = xe_migrate_copy_pitch(xe, current_bytes);
+ if (xe->info.has_mem_copy_instr)
+ current_bytes = min_t(int, current_bytes, U16_MAX * pitch);
+ else
+ current_bytes = min_t(int, current_bytes,
+ round_down(S16_MAX * pitch,
+ XE_CACHELINE_BYTES));
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
- dma_addr + current_page,
+ &pagemap_addr[current_page],
vram_addr, write ?
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
@@ -1923,10 +2411,60 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
dma_fence_put(fence);
out_err:
- xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write);
+ xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write);
return IS_ERR(fence) ? PTR_ERR(fence) : 0;
}
+/**
+ * xe_migrate_job_lock() - Lock migrate job lock
+ * @m: The migration context.
+ * @q: Queue associated with the operation which requires a lock
+ *
+ * Lock the migrate job lock if the queue is a migration queue, otherwise
+ * assert the VM's dma-resv is held (user queue's have own locking).
+ */
+void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q)
+{
+ bool is_migrate = q == m->q;
+
+ if (is_migrate)
+ mutex_lock(&m->job_mutex);
+ else
+ xe_vm_assert_held(q->vm); /* User queues VM's should be locked */
+}
+
+/**
+ * xe_migrate_job_unlock() - Unlock migrate job lock
+ * @m: The migration context.
+ * @q: Queue associated with the operation which requires a lock
+ *
+ * Unlock the migrate job lock if the queue is a migration queue, otherwise
+ * assert the VM's dma-resv is held (user queue's have own locking).
+ */
+void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q)
+{
+ bool is_migrate = q == m->q;
+
+ if (is_migrate)
+ mutex_unlock(&m->job_mutex);
+ else
+ xe_vm_assert_held(q->vm); /* User queues VM's should be locked */
+}
+
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+/**
+ * xe_migrate_job_lock_assert() - Assert migrate job lock held of queue
+ * @q: Migrate queue
+ */
+void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
+{
+ struct xe_migrate *m = gt_to_tile(q->gt)->migrate;
+
+ xe_gt_assert(q->gt, q == m->q);
+ lockdep_assert_held(&m->job_mutex);
+}
+#endif
+
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
#include "tests/xe_migrate.c"
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index fb9839c1bae0..260e298e5dd7 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -9,11 +9,13 @@
#include <linux/types.h>
struct dma_fence;
+struct drm_pagemap_addr;
struct iosys_map;
struct ttm_resource;
struct xe_bo;
struct xe_gt;
+struct xe_tlb_inval_job;
struct xe_exec_queue;
struct xe_migrate;
struct xe_migrate_pt_update;
@@ -24,6 +26,13 @@ struct xe_vm;
struct xe_vm_pgtable_update;
struct xe_vma;
+enum xe_sriov_vf_ccs_rw_ctxs;
+
+enum xe_migrate_copy_dir {
+ XE_MIGRATE_COPY_TO_VRAM,
+ XE_MIGRATE_COPY_TO_SRAM,
+};
+
/**
* struct xe_migrate_pt_update_ops - Callbacks for the
* xe_migrate_update_pgtables() function.
@@ -89,21 +98,30 @@ struct xe_migrate_pt_update {
struct xe_vma_ops *vops;
/** @job: The job if a GPU page-table update. NULL otherwise */
struct xe_sched_job *job;
+ /**
+ * @ijob: The TLB invalidation job for primary GT. NULL otherwise
+ */
+ struct xe_tlb_inval_job *ijob;
+ /**
+ * @mjob: The TLB invalidation job for media GT. NULL otherwise
+ */
+ struct xe_tlb_inval_job *mjob;
/** @tile_id: Tile ID of the update */
u8 tile_id;
};
-struct xe_migrate *xe_migrate_init(struct xe_tile *tile);
+struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile);
+int xe_migrate_init(struct xe_migrate *m);
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
- dma_addr_t *src_addr,
+ struct drm_pagemap_addr *src_addr,
u64 dst_addr);
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- dma_addr_t *dst_addr);
+ struct drm_pagemap_addr *dst_addr);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
@@ -112,6 +130,15 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct ttm_resource *dst,
bool copy_only_ccs);
+int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q,
+ struct xe_bo *src_bo,
+ enum xe_sriov_vf_ccs_rw_ctxs read_write);
+
+struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate);
+struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate);
+struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset,
+ struct xe_bo *sysmem_bo, u64 sysmem_offset,
+ u64 size, enum xe_migrate_copy_dir dir);
int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
unsigned long offset, void *buf, int len,
int write);
@@ -133,5 +160,15 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
void xe_migrate_wait(struct xe_migrate *m);
-struct xe_exec_queue *xe_tile_migrate_exec_queue(struct xe_tile *tile);
+#if IS_ENABLED(CONFIG_PROVE_LOCKING)
+void xe_migrate_job_lock_assert(struct xe_exec_queue *q);
+#else
+static inline void xe_migrate_job_lock_assert(struct xe_exec_queue *q)
+{
+}
+#endif
+
+void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q);
+void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_migrate_doc.h b/drivers/gpu/drm/xe/xe_migrate_doc.h
index 63c7d67b5b62..c082bc0b7068 100644
--- a/drivers/gpu/drm/xe/xe_migrate_doc.h
+++ b/drivers/gpu/drm/xe/xe_migrate_doc.h
@@ -9,7 +9,7 @@
/**
* DOC: Migrate Layer
*
- * The XE migrate layer is used generate jobs which can copy memory (eviction),
+ * The Xe migrate layer is used generate jobs which can copy memory (eviction),
* clear memory, or program tables (binds). This layer exists in every GT, has
* a migrate engine, and uses a special VM for all generated jobs.
*
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index e4db8d58ea2d..350dca1f0925 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -58,7 +58,6 @@ static void tiles_fini(void *arg)
static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
{
struct xe_tile *tile;
- struct xe_gt *gt;
u8 id;
/*
@@ -68,38 +67,6 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size)
if (xe->info.tile_count == 1)
return;
- /* Possibly override number of tile based on configuration register */
- if (!xe->info.skip_mtcfg) {
- struct xe_mmio *mmio = xe_root_tile_mmio(xe);
- u8 tile_count, gt_count;
- u32 mtcfg;
-
- /*
- * Although the per-tile mmio regs are not yet initialized, this
- * is fine as it's going to the root tile's mmio, that's
- * guaranteed to be initialized earlier in xe_mmio_probe_early()
- */
- mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
- tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
-
- if (tile_count < xe->info.tile_count) {
- drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
- xe->info.tile_count, tile_count);
- xe->info.tile_count = tile_count;
-
- /*
- * We've already setup gt_count according to the full
- * tile count. Re-calculate it to only include the GTs
- * that belong to the remaining tile(s).
- */
- gt_count = 0;
- for_each_gt(gt, xe, id)
- if (gt->info.id < tile_count * xe->info.max_gt_per_tile)
- gt_count++;
- xe->info.gt_count = gt_count;
- }
- }
-
for_each_remote_tile(tile, xe, id)
xe_mmio_init(&tile->mmio, tile, xe->mmio.regs + id * tile_mmio_size, SZ_4M);
}
@@ -412,3 +379,32 @@ int xe_mmio_wait32_not(struct xe_mmio *mmio, struct xe_reg reg, u32 mask, u32 va
{
return __xe_mmio_wait32(mmio, reg, mask, val, timeout_us, out_val, atomic, false);
}
+
+#ifdef CONFIG_PCI_IOV
+static size_t vf_regs_stride(struct xe_device *xe)
+{
+ return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000;
+}
+
+/**
+ * xe_mmio_init_vf_view() - Initialize an MMIO instance for accesses like the VF
+ * @mmio: the target &xe_mmio to initialize as VF's view
+ * @base: the source &xe_mmio to initialize from
+ * @vfid: the VF identifier
+ */
+void xe_mmio_init_vf_view(struct xe_mmio *mmio, const struct xe_mmio *base, unsigned int vfid)
+{
+ struct xe_tile *tile = base->tile;
+ struct xe_device *xe = tile->xe;
+ size_t offset = vf_regs_stride(xe) * vfid;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, vfid);
+ xe_assert(xe, !base->sriov_vf_gt);
+ xe_assert(xe, base->regs_size > offset);
+
+ *mmio = *base;
+ mmio->regs += offset;
+ mmio->regs_size -= offset;
+}
+#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index c151ba569003..15362789ab99 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -42,4 +42,8 @@ static inline struct xe_mmio *xe_root_tile_mmio(struct xe_device *xe)
return &xe->tiles[0].mmio;
}
+#ifdef CONFIG_PCI_IOV
+void xe_mmio_init_vf_view(struct xe_mmio *mmio, const struct xe_mmio *base, unsigned int vfid);
+#endif
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_mmio_gem.c b/drivers/gpu/drm/xe/xe_mmio_gem.c
new file mode 100644
index 000000000000..9a97c4387e4f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio_gem.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_mmio_gem.h"
+
+#include <drm/drm_drv.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device_types.h"
+
+/**
+ * DOC: Exposing MMIO regions to userspace
+ *
+ * In certain cases, the driver may allow userspace to mmap a portion of the hardware registers.
+ *
+ * This can be done as follows:
+ * 1. Call xe_mmio_gem_create() to create a GEM object with an mmap-able fake offset.
+ * 2. Use xe_mmio_gem_mmap_offset() on the created GEM object to retrieve the fake offset.
+ * 3. Provide the fake offset to userspace.
+ * 4. Userspace can call mmap with the fake offset. The length provided to mmap
+ * must match the size of the GEM object.
+ * 5. When the region is no longer needed, call xe_mmio_gem_destroy() to release the GEM object.
+ *
+ * NOTE: The exposed MMIO region must be page-aligned with regards to its BAR offset and size.
+ *
+ * WARNING: Exposing MMIO regions to userspace can have security and stability implications.
+ * Make sure not to expose any sensitive registers.
+ */
+
+static void xe_mmio_gem_free(struct drm_gem_object *);
+static int xe_mmio_gem_mmap(struct drm_gem_object *, struct vm_area_struct *);
+static vm_fault_t xe_mmio_gem_vm_fault(struct vm_fault *);
+
+struct xe_mmio_gem {
+ struct drm_gem_object base;
+ phys_addr_t phys_addr;
+};
+
+static const struct vm_operations_struct vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+ .fault = xe_mmio_gem_vm_fault,
+};
+
+static const struct drm_gem_object_funcs xe_mmio_gem_funcs = {
+ .free = xe_mmio_gem_free,
+ .mmap = xe_mmio_gem_mmap,
+ .vm_ops = &vm_ops,
+};
+
+static inline struct xe_mmio_gem *to_xe_mmio_gem(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct xe_mmio_gem, base);
+}
+
+/**
+ * xe_mmio_gem_create - Expose an MMIO region to userspace
+ * @xe: The xe device
+ * @file: DRM file descriptor
+ * @phys_addr: Start of the exposed MMIO region
+ * @size: The size of the exposed MMIO region
+ *
+ * This function creates a GEM object that exposes an MMIO region with an mmap-able
+ * fake offset.
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+struct xe_mmio_gem *xe_mmio_gem_create(struct xe_device *xe, struct drm_file *file,
+ phys_addr_t phys_addr, size_t size)
+{
+ struct xe_mmio_gem *obj;
+ struct drm_gem_object *base;
+ int err;
+
+ if ((phys_addr % PAGE_SIZE != 0) || (size % PAGE_SIZE != 0))
+ return ERR_PTR(-EINVAL);
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ base = &obj->base;
+ base->funcs = &xe_mmio_gem_funcs;
+ obj->phys_addr = phys_addr;
+
+ drm_gem_private_object_init(&xe->drm, base, size);
+
+ err = drm_gem_create_mmap_offset(base);
+ if (err)
+ goto free_gem;
+
+ err = drm_vma_node_allow(&base->vma_node, file);
+ if (err)
+ goto free_gem;
+
+ return obj;
+
+free_gem:
+ xe_mmio_gem_free(base);
+ return ERR_PTR(err);
+}
+
+/**
+ * xe_mmio_gem_mmap_offset - Return the mmap-able fake offset
+ * @gem: the GEM object created with xe_mmio_gem_create()
+ *
+ * This function returns the mmap-able fake offset allocated during
+ * xe_mmio_gem_create().
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+u64 xe_mmio_gem_mmap_offset(struct xe_mmio_gem *gem)
+{
+ return drm_vma_node_offset_addr(&gem->base.vma_node);
+}
+
+static void xe_mmio_gem_free(struct drm_gem_object *base)
+{
+ struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
+
+ drm_gem_object_release(base);
+ kfree(obj);
+}
+
+/**
+ * xe_mmio_gem_destroy - Destroy the GEM object that exposes an MMIO region
+ * @gem: the GEM object to destroy
+ *
+ * This function releases resources associated with the GEM object created by
+ * xe_mmio_gem_create().
+ *
+ * See: "Exposing MMIO regions to userspace"
+ */
+void xe_mmio_gem_destroy(struct xe_mmio_gem *gem)
+{
+ xe_mmio_gem_free(&gem->base);
+}
+
+static int xe_mmio_gem_mmap(struct drm_gem_object *base, struct vm_area_struct *vma)
+{
+ if (vma->vm_end - vma->vm_start != base->size)
+ return -EINVAL;
+
+ if ((vma->vm_flags & VM_SHARED) == 0)
+ return -EINVAL;
+
+ /* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 */
+ vma->vm_pgoff = 0;
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
+
+ /* Defer actual mapping to the fault handler. */
+ return 0;
+}
+
+static void xe_mmio_gem_release_dummy_page(struct drm_device *dev, void *res)
+{
+ __free_page((struct page *)res);
+}
+
+static vm_fault_t xe_mmio_gem_vm_fault_dummy_page(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *base = vma->vm_private_data;
+ struct drm_device *dev = base->dev;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ struct page *page;
+ unsigned long pfn;
+ unsigned long i;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return VM_FAULT_OOM;
+
+ if (drmm_add_action_or_reset(dev, xe_mmio_gem_release_dummy_page, page))
+ return VM_FAULT_OOM;
+
+ pfn = page_to_pfn(page);
+
+ /* Map the entire VMA to the same dummy page */
+ for (i = 0; i < base->size; i += PAGE_SIZE) {
+ unsigned long addr = vma->vm_start + i;
+
+ ret = vmf_insert_pfn(vma, addr, pfn);
+ if (ret & VM_FAULT_ERROR)
+ break;
+ }
+
+ return ret;
+}
+
+static vm_fault_t xe_mmio_gem_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *base = vma->vm_private_data;
+ struct xe_mmio_gem *obj = to_xe_mmio_gem(base);
+ struct drm_device *dev = base->dev;
+ vm_fault_t ret = VM_FAULT_NOPAGE;
+ unsigned long i;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx)) {
+ /*
+ * Provide a dummy page to avoid SIGBUS for events such as hot-unplug.
+ * This gives the userspace the option to recover instead of crashing.
+ * It is assumed the userspace will receive the notification via some
+ * other channel (e.g. drm uevent).
+ */
+ return xe_mmio_gem_vm_fault_dummy_page(vma);
+ }
+
+ for (i = 0; i < base->size; i += PAGE_SIZE) {
+ unsigned long addr = vma->vm_start + i;
+ unsigned long phys_addr = obj->phys_addr + i;
+
+ ret = vmf_insert_pfn(vma, addr, PHYS_PFN(phys_addr));
+ if (ret & VM_FAULT_ERROR)
+ break;
+ }
+
+ drm_dev_exit(idx);
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_mmio_gem.h b/drivers/gpu/drm/xe/xe_mmio_gem.h
new file mode 100644
index 000000000000..4b76d5586ebb
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_mmio_gem.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_MMIO_GEM_H_
+#define _XE_MMIO_GEM_H_
+
+#include <linux/types.h>
+
+struct drm_file;
+struct xe_device;
+struct xe_mmio_gem;
+
+struct xe_mmio_gem *xe_mmio_gem_create(struct xe_device *xe, struct drm_file *file,
+ phys_addr_t phys_addr, size_t size);
+u64 xe_mmio_gem_mmap_offset(struct xe_mmio_gem *gem);
+void xe_mmio_gem_destroy(struct xe_mmio_gem *gem);
+
+#endif /* _XE_MMIO_GEM_H_ */
diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c
index 0c737413fcb6..6613d3b48a84 100644
--- a/drivers/gpu/drm/xe/xe_mocs.c
+++ b/drivers/gpu/drm/xe/xe_mocs.c
@@ -568,6 +568,23 @@ static const struct xe_mocs_ops xe2_mocs_ops = {
.dump = xe2_mocs_dump,
};
+/*
+ * Note that the "L3" and "L4" register fields actually control the L2 and L3
+ * caches respectively on this platform.
+ */
+static const struct xe_mocs_entry xe3p_xpc_mocs_table[] = {
+ /* Defer to PAT */
+ MOCS_ENTRY(0, XE2_L3_0_WB | L4_3_UC, 0),
+ /* UC */
+ MOCS_ENTRY(1, IG_PAT | XE2_L3_3_UC | L4_3_UC, 0),
+ /* L2 */
+ MOCS_ENTRY(2, IG_PAT | XE2_L3_0_WB | L4_3_UC, 0),
+ /* L3 */
+ MOCS_ENTRY(3, IG_PAT | XE2_L3_3_UC | L4_0_WB, 0),
+ /* L2 + L3 */
+ MOCS_ENTRY(4, IG_PAT | XE2_L3_0_WB | L4_0_WB, 0),
+};
+
static unsigned int get_mocs_settings(struct xe_device *xe,
struct xe_mocs_info *info)
{
@@ -576,6 +593,16 @@ static unsigned int get_mocs_settings(struct xe_device *xe,
memset(info, 0, sizeof(struct xe_mocs_info));
switch (xe->info.platform) {
+ case XE_CRESCENTISLAND:
+ info->ops = &xe2_mocs_ops;
+ info->table_size = ARRAY_SIZE(xe3p_xpc_mocs_table);
+ info->table = xe3p_xpc_mocs_table;
+ info->num_mocs_regs = XE2_NUM_MOCS_ENTRIES;
+ info->uc_index = 1;
+ info->wb_index = 4;
+ info->unused_entries_index = 4;
+ break;
+ case XE_NOVALAKE_S:
case XE_PANTHERLAKE:
case XE_LUNARLAKE:
case XE_BATTLEMAGE:
@@ -772,12 +799,20 @@ void xe_mocs_init(struct xe_gt *gt)
init_l3cc_table(gt, &table);
}
-void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_mocs_dump() - Dump MOCS table.
+ * @gt: the &xe_gt with MOCS table
+ * @p: the &drm_printer to dump info to
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
enum xe_force_wake_domains domain;
struct xe_mocs_info table;
unsigned int fw_ref, flags;
+ int err = 0;
flags = get_mocs_settings(xe, &table);
@@ -785,14 +820,17 @@ void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p)
xe_pm_runtime_get_noresume(xe);
fw_ref = xe_force_wake_get(gt_to_fw(gt), domain);
- if (!xe_force_wake_ref_has_domain(fw_ref, domain))
+ if (!xe_force_wake_ref_has_domain(fw_ref, domain)) {
+ err = -ETIMEDOUT;
goto err_fw;
+ }
table.ops->dump(&table, flags, gt, p);
err_fw:
xe_force_wake_put(gt_to_fw(gt), fw_ref);
xe_pm_runtime_put(xe);
+ return err;
}
#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
diff --git a/drivers/gpu/drm/xe/xe_mocs.h b/drivers/gpu/drm/xe/xe_mocs.h
index dc972ffd4d07..f00bbb269829 100644
--- a/drivers/gpu/drm/xe/xe_mocs.h
+++ b/drivers/gpu/drm/xe/xe_mocs.h
@@ -11,12 +11,6 @@ struct xe_gt;
void xe_mocs_init_early(struct xe_gt *gt);
void xe_mocs_init(struct xe_gt *gt);
-
-/**
- * xe_mocs_dump - Dump mocs table
- * @gt: GT structure
- * @p: Printer to dump info to
- */
-void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index d9391bd08194..d08338fc3bc1 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -135,24 +135,17 @@ static const struct init_funcs init_funcs[] = {
},
};
-static int __init xe_call_init_func(unsigned int i)
+static int __init xe_call_init_func(const struct init_funcs *func)
{
- if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
- return 0;
- if (!init_funcs[i].init)
- return 0;
-
- return init_funcs[i].init();
+ if (func->init)
+ return func->init();
+ return 0;
}
-static void xe_call_exit_func(unsigned int i)
+static void xe_call_exit_func(const struct init_funcs *func)
{
- if (WARN_ON(i >= ARRAY_SIZE(init_funcs)))
- return;
- if (!init_funcs[i].exit)
- return;
-
- init_funcs[i].exit();
+ if (func->exit)
+ func->exit();
}
static int __init xe_init(void)
@@ -160,10 +153,12 @@ static int __init xe_init(void)
int err, i;
for (i = 0; i < ARRAY_SIZE(init_funcs); i++) {
- err = xe_call_init_func(i);
+ err = xe_call_init_func(init_funcs + i);
if (err) {
+ pr_info("%s: module_init aborted at %ps %pe\n",
+ DRIVER_NAME, init_funcs[i].init, ERR_PTR(err));
while (i--)
- xe_call_exit_func(i);
+ xe_call_exit_func(init_funcs + i);
return err;
}
}
@@ -176,7 +171,7 @@ static void __exit xe_exit(void)
int i;
for (i = ARRAY_SIZE(init_funcs) - 1; i >= 0; i--)
- xe_call_exit_func(i);
+ xe_call_exit_func(init_funcs + i);
}
module_init(xe_init);
diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c
index 61b0a1531a53..33f4ac82fc80 100644
--- a/drivers/gpu/drm/xe/xe_nvm.c
+++ b/drivers/gpu/drm/xe/xe_nvm.c
@@ -35,21 +35,25 @@ static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = {
static void xe_nvm_release_dev(struct device *dev)
{
+ struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
+ struct intel_dg_nvm_dev *nvm = container_of(aux, struct intel_dg_nvm_dev, aux_dev);
+
+ kfree(nvm);
}
static bool xe_nvm_non_posted_erase(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
if (xe->info.platform != XE_BATTLEMAGE)
return false;
- return !(xe_mmio_read32(&gt->mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
+ return !(xe_mmio_read32(mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) &
NVM_NON_POSTED_ERASE_CHICKEN_BIT);
}
static bool xe_nvm_writable_override(struct xe_device *xe)
{
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_mmio *mmio = xe_root_tile_mmio(xe);
bool writable_override;
resource_size_t base;
@@ -72,7 +76,7 @@ static bool xe_nvm_writable_override(struct xe_device *xe)
}
writable_override =
- !(xe_mmio_read32(&gt->mmio, HECI_FWSTS2(base)) &
+ !(xe_mmio_read32(mmio, HECI_FWSTS2(base)) &
HECI_FW_STATUS_2_NVM_ACCESS_MODE);
if (writable_override)
drm_info(&xe->drm, "NVM access overridden by jumper\n");
@@ -162,6 +166,5 @@ void xe_nvm_fini(struct xe_device *xe)
auxiliary_device_delete(&nvm->aux_dev);
auxiliary_device_uninit(&nvm->aux_dev);
- kfree(nvm);
xe->nvm = NULL;
}
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 5729e7d3e335..890c363282ae 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -10,6 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
+#include <drm/drm_syncobj.h>
#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
@@ -822,7 +823,7 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
u32 sqcnt1;
/* Enable thread stall DOP gating and EU DOP gating. */
- if (XE_WA(stream->gt, 1508761755)) {
+ if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -837,7 +838,8 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
xe_oa_configure_oa_context(stream, false);
/* Make sure we disable noa to save power. */
- xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0);
+ if (GT_VER(stream->gt) < 35)
+ xe_mmio_rmw32(mmio, RPM_CONFIG1, GT_NOA_ENABLE, 0);
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
@@ -868,7 +870,7 @@ static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
xe_oa_free_oa_buffer(stream);
- xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
xe_pm_runtime_put(stream->oa->xe);
/* Wa_1509372804:pvc: Unset the override of GUCRC mode to enable rc6 */
@@ -883,9 +885,9 @@ static int xe_oa_alloc_oa_buffer(struct xe_oa_stream *stream, size_t size)
{
struct xe_bo *bo;
- bo = xe_bo_create_pin_map(stream->oa->xe, stream->gt->tile, NULL,
- size, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(stream->oa->xe, stream->gt->tile,
+ size, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT, false);
if (IS_ERR(bo))
return PTR_ERR(bo);
@@ -1079,7 +1081,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
* EU NOA signals behave incorrectly if EU clock gating is enabled.
* Disable thread stall DOP gating and EU DOP gating.
*/
- if (XE_WA(stream->gt, 1508761755)) {
+ if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
_MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
@@ -1389,7 +1391,9 @@ static int xe_oa_user_extensions(struct xe_oa *oa, enum xe_oa_user_extn_from fro
return 0;
}
-static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
+static int xe_oa_parse_syncs(struct xe_oa *oa,
+ struct xe_oa_stream *stream,
+ struct xe_oa_open_param *param)
{
int ret, num_syncs, num_ufence = 0;
@@ -1409,7 +1413,9 @@ static int xe_oa_parse_syncs(struct xe_oa *oa, struct xe_oa_open_param *param)
for (num_syncs = 0; num_syncs < param->num_syncs; num_syncs++) {
ret = xe_sync_entry_parse(oa->xe, param->xef, &param->syncs[num_syncs],
- &param->syncs_user[num_syncs], 0);
+ &param->syncs_user[num_syncs],
+ stream->ufence_syncobj,
+ ++stream->ufence_timeline_value, 0);
if (ret)
goto err_syncs;
@@ -1539,7 +1545,7 @@ static long xe_oa_config_locked(struct xe_oa_stream *stream, u64 arg)
return -ENODEV;
param.xef = stream->xef;
- err = xe_oa_parse_syncs(stream->oa, &param);
+ err = xe_oa_parse_syncs(stream->oa, stream, &param);
if (err)
goto err_config_put;
@@ -1635,6 +1641,7 @@ static void xe_oa_destroy_locked(struct xe_oa_stream *stream)
if (stream->exec_q)
xe_exec_queue_put(stream->exec_q);
+ drm_syncobj_put(stream->ufence_syncobj);
kfree(stream);
}
@@ -1710,7 +1717,6 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
struct xe_gt *gt = param->hwe->gt;
- unsigned int fw_ref;
int ret;
stream->exec_q = param->exec_q;
@@ -1754,7 +1760,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
* GuC reset of engines causes OA to lose configuration
* state. Prevent this by overriding GUCRC mode.
*/
- if (XE_WA(stream->gt, 1509372804)) {
+ if (XE_GT_WA(stream->gt, 1509372804)) {
ret = xe_guc_pc_override_gucrc_mode(&gt->uc.guc.pc,
SLPC_GUCRC_MODE_GUCRC_NO_RC6);
if (ret)
@@ -1765,8 +1771,8 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
/* Take runtime pm ref and forcewake to disable RC6 */
xe_pm_runtime_get(stream->oa->xe);
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
- if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
+ stream->fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
+ if (!xe_force_wake_ref_has_domain(stream->fw_ref, XE_FORCEWAKE_ALL)) {
ret = -ETIMEDOUT;
goto err_fw_put;
}
@@ -1811,7 +1817,7 @@ err_put_k_exec_q:
err_free_oa_buf:
xe_oa_free_oa_buffer(stream);
err_fw_put:
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
xe_pm_runtime_put(stream->oa->xe);
if (stream->override_gucrc)
xe_gt_WARN_ON(gt, xe_guc_pc_unset_gucrc_mode(&gt->uc.guc.pc));
@@ -1826,6 +1832,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
struct xe_oa_open_param *param)
{
struct xe_oa_stream *stream;
+ struct drm_syncobj *ufence_syncobj;
int stream_fd;
int ret;
@@ -1836,17 +1843,31 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
goto exit;
}
+ ret = drm_syncobj_create(&ufence_syncobj, DRM_SYNCOBJ_CREATE_SIGNALED,
+ NULL);
+ if (ret)
+ goto exit;
+
stream = kzalloc(sizeof(*stream), GFP_KERNEL);
if (!stream) {
ret = -ENOMEM;
- goto exit;
+ goto err_syncobj;
}
-
+ stream->ufence_syncobj = ufence_syncobj;
stream->oa = oa;
- ret = xe_oa_stream_init(stream, param);
+
+ ret = xe_oa_parse_syncs(oa, stream, param);
if (ret)
goto err_free;
+ ret = xe_oa_stream_init(stream, param);
+ if (ret) {
+ while (param->num_syncs--)
+ xe_sync_entry_cleanup(&param->syncs[param->num_syncs]);
+ kfree(param->syncs);
+ goto err_free;
+ }
+
if (!param->disabled) {
ret = xe_oa_enable_locked(stream);
if (ret)
@@ -1870,6 +1891,8 @@ err_destroy:
xe_oa_stream_destroy(stream);
err_free:
kfree(stream);
+err_syncobj:
+ drm_syncobj_put(ufence_syncobj);
exit:
return ret;
}
@@ -1886,7 +1909,7 @@ u32 xe_oa_timestamp_frequency(struct xe_gt *gt)
{
u32 reg, shift;
- if (XE_WA(gt, 18013179988) || XE_WA(gt, 14015568240)) {
+ if (XE_GT_WA(gt, 18013179988) || XE_GT_WA(gt, 14015568240)) {
xe_pm_runtime_get(gt_to_xe(gt));
reg = xe_mmio_read32(&gt->mmio, RPM_CONFIG0);
xe_pm_runtime_put(gt_to_xe(gt));
@@ -2083,22 +2106,14 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
goto err_exec_q;
}
- ret = xe_oa_parse_syncs(oa, &param);
- if (ret)
- goto err_exec_q;
-
mutex_lock(&param.hwe->gt->oa.gt_lock);
ret = xe_oa_stream_open_ioctl_locked(oa, &param);
mutex_unlock(&param.hwe->gt->oa.gt_lock);
if (ret < 0)
- goto err_sync_cleanup;
+ goto err_exec_q;
return ret;
-err_sync_cleanup:
- while (param.num_syncs--)
- xe_sync_entry_cleanup(&param.syncs[param.num_syncs]);
- kfree(param.syncs);
err_exec_q:
if (param.exec_q)
xe_exec_queue_put(param.exec_q);
@@ -2388,11 +2403,13 @@ int xe_oa_add_config_ioctl(struct drm_device *dev, u64 data, struct drm_file *fi
goto sysfs_err;
}
- mutex_unlock(&oa->metrics_lock);
+ id = oa_config->id;
- drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, oa_config->id);
+ drm_dbg(&oa->xe->drm, "Added config %s id=%i\n", oa_config->uuid, id);
+
+ mutex_unlock(&oa->metrics_lock);
- return oa_config->id;
+ return id;
sysfs_err:
mutex_unlock(&oa->metrics_lock);
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 2628f78c4e8d..cf080f412189 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -15,6 +15,8 @@
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
+struct drm_syncobj;
+
#define DEFAULT_XE_OA_BUFFER_SIZE SZ_16M
enum xe_oa_report_header {
@@ -248,6 +250,12 @@ struct xe_oa_stream {
/** @xef: xe_file with which the stream was opened */
struct xe_file *xef;
+ /** @ufence_syncobj: User fence syncobj */
+ struct drm_syncobj *ufence_syncobj;
+
+ /** @ufence_timeline_value: User fence timeline value */
+ u64 ufence_timeline_value;
+
/** @last_fence: fence to use in stream destroy when needed */
struct dma_fence *last_fence;
@@ -256,5 +264,8 @@ struct xe_oa_stream {
/** @syncs: syncs to wait on and to signal */
struct xe_sync_entry *syncs;
+
+ /** @fw_ref: Forcewake reference */
+ unsigned int fw_ref;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_pagefault.c b/drivers/gpu/drm/xe/xe_pagefault.c
new file mode 100644
index 000000000000..afb06598b6e1
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pagefault.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/circ_buf.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_managed.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_types.h"
+#include "xe_gt_stats.h"
+#include "xe_hw_engine.h"
+#include "xe_pagefault.h"
+#include "xe_pagefault_types.h"
+#include "xe_svm.h"
+#include "xe_trace_bo.h"
+#include "xe_vm.h"
+
+/**
+ * DOC: Xe page faults
+ *
+ * Xe page faults are handled in two layers. The producer layer interacts with
+ * hardware or firmware to receive and parse faults into struct xe_pagefault,
+ * then forwards them to the consumer. The consumer layer services the faults
+ * (e.g., memory migration, page table updates) and acknowledges the result back
+ * to the producer, which then forwards the results to the hardware or firmware.
+ * The consumer uses a page fault queue sized to absorb all potential faults and
+ * a multi-threaded worker to process them. Multiple producers are supported,
+ * with a single shared consumer.
+ *
+ * xe_pagefault.c implements the consumer layer.
+ */
+
+static int xe_pagefault_entry_size(void)
+{
+ /*
+ * Power of two alignment is not a hardware requirement, rather a
+ * software restriction which makes the math for page fault queue
+ * management simplier.
+ */
+ return roundup_pow_of_two(sizeof(struct xe_pagefault));
+}
+
+static int xe_pagefault_begin(struct drm_exec *exec, struct xe_vma *vma,
+ struct xe_vram_region *vram, bool need_vram_move)
+{
+ struct xe_bo *bo = xe_vma_bo(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ int err;
+
+ err = xe_vm_lock_vma(exec, vma);
+ if (err)
+ return err;
+
+ if (!bo)
+ return 0;
+
+ return need_vram_move ? xe_bo_migrate(bo, vram->placement, NULL, exec) :
+ xe_bo_validate(bo, vm, true, exec);
+}
+
+static int xe_pagefault_handle_vma(struct xe_gt *gt, struct xe_vma *vma,
+ bool atomic)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
+ struct dma_fence *fence;
+ int err, needs_vram;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ needs_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
+ if (needs_vram < 0 || (needs_vram && xe_vma_is_userptr(vma)))
+ return needs_vram < 0 ? needs_vram : -EACCES;
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, 1);
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_VMA_PAGEFAULT_KB,
+ xe_vma_size(vma) / SZ_1K);
+
+ trace_xe_vma_pagefault(vma);
+
+ /* Check if VMA is valid, opportunistic check only */
+ if (xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
+ vma->tile_invalidated) && !atomic)
+ return 0;
+
+retry_userptr:
+ if (xe_vma_is_userptr(vma) &&
+ xe_vma_userptr_check_repin(to_userptr_vma(vma))) {
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
+
+ err = xe_vma_userptr_pin_pages(uvma);
+ if (err)
+ return err;
+ }
+
+ /* Lock VM and BOs dma-resv */
+ xe_validation_ctx_init(&ctx, &vm->xe->val, &exec, (struct xe_val_flags) {});
+ drm_exec_until_all_locked(&exec) {
+ err = xe_pagefault_begin(&exec, vma, tile->mem.vram,
+ needs_vram == 1);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ if (err)
+ goto unlock_dma_resv;
+
+ /* Bind VMA only to the GT that has faulted */
+ trace_xe_vma_pf_bind(vma);
+ xe_vm_set_validation_exec(vm, &exec);
+ fence = xe_vma_rebind(vm, vma, BIT(tile->id));
+ xe_vm_set_validation_exec(vm, NULL);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ xe_validation_retry_on_oom(&ctx, &err);
+ goto unlock_dma_resv;
+ }
+ }
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+unlock_dma_resv:
+ xe_validation_ctx_fini(&ctx);
+ if (err == -EAGAIN)
+ goto retry_userptr;
+
+ return err;
+}
+
+static bool
+xe_pagefault_access_is_atomic(enum xe_pagefault_access_type access_type)
+{
+ return access_type == XE_PAGEFAULT_ACCESS_TYPE_ATOMIC;
+}
+
+static struct xe_vm *xe_pagefault_asid_to_vm(struct xe_device *xe, u32 asid)
+{
+ struct xe_vm *vm;
+
+ down_read(&xe->usm.lock);
+ vm = xa_load(&xe->usm.asid_to_vm, asid);
+ if (vm && xe_vm_in_fault_mode(vm))
+ xe_vm_get(vm);
+ else
+ vm = ERR_PTR(-EINVAL);
+ up_read(&xe->usm.lock);
+
+ return vm;
+}
+
+static int xe_pagefault_service(struct xe_pagefault *pf)
+{
+ struct xe_gt *gt = pf->gt;
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_vm *vm;
+ struct xe_vma *vma = NULL;
+ int err;
+ bool atomic;
+
+ /* Producer flagged this fault to be nacked */
+ if (pf->consumer.fault_level == XE_PAGEFAULT_LEVEL_NACK)
+ return -EFAULT;
+
+ vm = xe_pagefault_asid_to_vm(xe, pf->consumer.asid);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+ /*
+ * TODO: Change to read lock? Using write lock for simplicity.
+ */
+ down_write(&vm->lock);
+
+ if (xe_vm_is_closed(vm)) {
+ err = -ENOENT;
+ goto unlock_vm;
+ }
+
+ vma = xe_vm_find_vma_by_addr(vm, pf->consumer.page_addr);
+ if (!vma) {
+ err = -EINVAL;
+ goto unlock_vm;
+ }
+
+ atomic = xe_pagefault_access_is_atomic(pf->consumer.access_type);
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ err = xe_svm_handle_pagefault(vm, vma, gt,
+ pf->consumer.page_addr, atomic);
+ else
+ err = xe_pagefault_handle_vma(gt, vma, atomic);
+
+unlock_vm:
+ if (!err)
+ vm->usm.last_fault_vma = vma;
+ up_write(&vm->lock);
+ xe_vm_put(vm);
+
+ return err;
+}
+
+static bool xe_pagefault_queue_pop(struct xe_pagefault_queue *pf_queue,
+ struct xe_pagefault *pf)
+{
+ bool found_fault = false;
+
+ spin_lock_irq(&pf_queue->lock);
+ if (pf_queue->tail != pf_queue->head) {
+ memcpy(pf, pf_queue->data + pf_queue->tail, sizeof(*pf));
+ pf_queue->tail = (pf_queue->tail + xe_pagefault_entry_size()) %
+ pf_queue->size;
+ found_fault = true;
+ }
+ spin_unlock_irq(&pf_queue->lock);
+
+ return found_fault;
+}
+
+static void xe_pagefault_print(struct xe_pagefault *pf)
+{
+ xe_gt_dbg(pf->gt, "\n\tASID: %d\n"
+ "\tFaulted Address: 0x%08x%08x\n"
+ "\tFaultType: %d\n"
+ "\tAccessType: %d\n"
+ "\tFaultLevel: %d\n"
+ "\tEngineClass: %d %s\n"
+ "\tEngineInstance: %d\n",
+ pf->consumer.asid,
+ upper_32_bits(pf->consumer.page_addr),
+ lower_32_bits(pf->consumer.page_addr),
+ pf->consumer.fault_type,
+ pf->consumer.access_type,
+ pf->consumer.fault_level,
+ pf->consumer.engine_class,
+ xe_hw_engine_class_to_str(pf->consumer.engine_class),
+ pf->consumer.engine_instance);
+}
+
+static void xe_pagefault_queue_work(struct work_struct *w)
+{
+ struct xe_pagefault_queue *pf_queue =
+ container_of(w, typeof(*pf_queue), worker);
+ struct xe_pagefault pf;
+ unsigned long threshold;
+
+#define USM_QUEUE_MAX_RUNTIME_MS 20
+ threshold = jiffies + msecs_to_jiffies(USM_QUEUE_MAX_RUNTIME_MS);
+
+ while (xe_pagefault_queue_pop(pf_queue, &pf)) {
+ int err;
+
+ if (!pf.gt) /* Fault squashed during reset */
+ continue;
+
+ err = xe_pagefault_service(&pf);
+ if (err) {
+ xe_pagefault_print(&pf);
+ xe_gt_dbg(pf.gt, "Fault response: Unsuccessful %pe\n",
+ ERR_PTR(err));
+ }
+
+ pf.producer.ops->ack_fault(&pf, err);
+
+ if (time_after(jiffies, threshold)) {
+ queue_work(gt_to_xe(pf.gt)->usm.pf_wq, w);
+ break;
+ }
+ }
+#undef USM_QUEUE_MAX_RUNTIME_MS
+}
+
+static int xe_pagefault_queue_init(struct xe_device *xe,
+ struct xe_pagefault_queue *pf_queue)
+{
+ struct xe_gt *gt;
+ int total_num_eus = 0;
+ u8 id;
+
+ for_each_gt(gt, xe, id) {
+ xe_dss_mask_t all_dss;
+ int num_dss, num_eus;
+
+ bitmap_or(all_dss, gt->fuse_topo.g_dss_mask,
+ gt->fuse_topo.c_dss_mask, XE_MAX_DSS_FUSE_BITS);
+
+ num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
+ num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
+ XE_MAX_EU_FUSE_BITS) * num_dss;
+
+ total_num_eus += num_eus;
+ }
+
+ xe_assert(xe, total_num_eus);
+
+ /*
+ * user can issue separate page faults per EU and per CS
+ *
+ * XXX: Multiplier required as compute UMD are getting PF queue errors
+ * without it. Follow on why this multiplier is required.
+ */
+#define PF_MULTIPLIER 8
+ pf_queue->size = (total_num_eus + XE_NUM_HW_ENGINES) *
+ xe_pagefault_entry_size() * PF_MULTIPLIER;
+ pf_queue->size = roundup_pow_of_two(pf_queue->size);
+#undef PF_MULTIPLIER
+
+ drm_dbg(&xe->drm, "xe_pagefault_entry_size=%d, total_num_eus=%d, pf_queue->size=%u",
+ xe_pagefault_entry_size(), total_num_eus, pf_queue->size);
+
+ spin_lock_init(&pf_queue->lock);
+ INIT_WORK(&pf_queue->worker, xe_pagefault_queue_work);
+
+ pf_queue->data = drmm_kzalloc(&xe->drm, pf_queue->size, GFP_KERNEL);
+ if (!pf_queue->data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void xe_pagefault_fini(void *arg)
+{
+ struct xe_device *xe = arg;
+
+ destroy_workqueue(xe->usm.pf_wq);
+}
+
+/**
+ * xe_pagefault_init() - Page fault init
+ * @xe: xe device instance
+ *
+ * Initialize Xe page fault state. Must be done after reading fuses.
+ *
+ * Return: 0 on Success, errno on failure
+ */
+int xe_pagefault_init(struct xe_device *xe)
+{
+ int err, i;
+
+ if (!xe->info.has_usm)
+ return 0;
+
+ xe->usm.pf_wq = alloc_workqueue("xe_page_fault_work_queue",
+ WQ_UNBOUND | WQ_HIGHPRI,
+ XE_PAGEFAULT_QUEUE_COUNT);
+ if (!xe->usm.pf_wq)
+ return -ENOMEM;
+
+ for (i = 0; i < XE_PAGEFAULT_QUEUE_COUNT; ++i) {
+ err = xe_pagefault_queue_init(xe, xe->usm.pf_queue + i);
+ if (err)
+ goto err_out;
+ }
+
+ return devm_add_action_or_reset(xe->drm.dev, xe_pagefault_fini, xe);
+
+err_out:
+ destroy_workqueue(xe->usm.pf_wq);
+ return err;
+}
+
+static void xe_pagefault_queue_reset(struct xe_device *xe, struct xe_gt *gt,
+ struct xe_pagefault_queue *pf_queue)
+{
+ u32 i;
+
+ /* Driver load failure guard / USM not enabled guard */
+ if (!pf_queue->data)
+ return;
+
+ /* Squash all pending faults on the GT */
+
+ spin_lock_irq(&pf_queue->lock);
+ for (i = pf_queue->tail; i != pf_queue->head;
+ i = (i + xe_pagefault_entry_size()) % pf_queue->size) {
+ struct xe_pagefault *pf = pf_queue->data + i;
+
+ if (pf->gt == gt)
+ pf->gt = NULL;
+ }
+ spin_unlock_irq(&pf_queue->lock);
+}
+
+/**
+ * xe_pagefault_reset() - Page fault reset for a GT
+ * @xe: xe device instance
+ * @gt: GT being reset
+ *
+ * Reset the Xe page fault state for a GT; that is, squash any pending faults on
+ * the GT.
+ */
+void xe_pagefault_reset(struct xe_device *xe, struct xe_gt *gt)
+{
+ int i;
+
+ for (i = 0; i < XE_PAGEFAULT_QUEUE_COUNT; ++i)
+ xe_pagefault_queue_reset(xe, gt, xe->usm.pf_queue + i);
+}
+
+static bool xe_pagefault_queue_full(struct xe_pagefault_queue *pf_queue)
+{
+ lockdep_assert_held(&pf_queue->lock);
+
+ return CIRC_SPACE(pf_queue->head, pf_queue->tail, pf_queue->size) <=
+ xe_pagefault_entry_size();
+}
+
+/**
+ * xe_pagefault_handler() - Page fault handler
+ * @xe: xe device instance
+ * @pf: Page fault
+ *
+ * Sink the page fault to a queue (i.e., a memory buffer) and queue a worker to
+ * service it. Safe to be called from IRQ or process context. Reclaim safe.
+ *
+ * Return: 0 on success, errno on failure
+ */
+int xe_pagefault_handler(struct xe_device *xe, struct xe_pagefault *pf)
+{
+ struct xe_pagefault_queue *pf_queue = xe->usm.pf_queue +
+ (pf->consumer.asid % XE_PAGEFAULT_QUEUE_COUNT);
+ unsigned long flags;
+ bool full;
+
+ spin_lock_irqsave(&pf_queue->lock, flags);
+ full = xe_pagefault_queue_full(pf_queue);
+ if (!full) {
+ memcpy(pf_queue->data + pf_queue->head, pf, sizeof(*pf));
+ pf_queue->head = (pf_queue->head + xe_pagefault_entry_size()) %
+ pf_queue->size;
+ queue_work(xe->usm.pf_wq, &pf_queue->worker);
+ } else {
+ drm_warn(&xe->drm,
+ "PageFault Queue (%d) full, shouldn't be possible\n",
+ pf->consumer.asid % XE_PAGEFAULT_QUEUE_COUNT);
+ }
+ spin_unlock_irqrestore(&pf_queue->lock, flags);
+
+ return full ? -ENOSPC : 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_pagefault.h b/drivers/gpu/drm/xe/xe_pagefault.h
new file mode 100644
index 000000000000..bd0cdf9ed37f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pagefault.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PAGEFAULT_H_
+#define _XE_PAGEFAULT_H_
+
+struct xe_device;
+struct xe_gt;
+struct xe_pagefault;
+
+int xe_pagefault_init(struct xe_device *xe);
+
+void xe_pagefault_reset(struct xe_device *xe, struct xe_gt *gt);
+
+int xe_pagefault_handler(struct xe_device *xe, struct xe_pagefault *pf);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pagefault_types.h b/drivers/gpu/drm/xe/xe_pagefault_types.h
new file mode 100644
index 000000000000..d3b516407d60
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_pagefault_types.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PAGEFAULT_TYPES_H_
+#define _XE_PAGEFAULT_TYPES_H_
+
+#include <linux/workqueue.h>
+
+struct xe_gt;
+struct xe_pagefault;
+
+/** enum xe_pagefault_access_type - Xe page fault access type */
+enum xe_pagefault_access_type {
+ /** @XE_PAGEFAULT_ACCESS_TYPE_READ: Read access type */
+ XE_PAGEFAULT_ACCESS_TYPE_READ = 0,
+ /** @XE_PAGEFAULT_ACCESS_TYPE_WRITE: Write access type */
+ XE_PAGEFAULT_ACCESS_TYPE_WRITE = 1,
+ /** @XE_PAGEFAULT_ACCESS_TYPE_ATOMIC: Atomic access type */
+ XE_PAGEFAULT_ACCESS_TYPE_ATOMIC = 2,
+};
+
+/** enum xe_pagefault_type - Xe page fault type */
+enum xe_pagefault_type {
+ /** @XE_PAGEFAULT_TYPE_NOT_PRESENT: Not present */
+ XE_PAGEFAULT_TYPE_NOT_PRESENT = 0,
+ /** @XE_PAGEFAULT_TYPE_WRITE_ACCESS_VIOLATION: Write access violation */
+ XE_PAGEFAULT_TYPE_WRITE_ACCESS_VIOLATION = 1,
+ /** @XE_PAGEFAULT_TYPE_ATOMIC_ACCESS_VIOLATION: Atomic access violation */
+ XE_PAGEFAULT_TYPE_ATOMIC_ACCESS_VIOLATION = 2,
+};
+
+/** struct xe_pagefault_ops - Xe pagefault ops (producer) */
+struct xe_pagefault_ops {
+ /**
+ * @ack_fault: Ack fault
+ * @pf: Page fault
+ * @err: Error state of fault
+ *
+ * Page fault producer receives acknowledgment from the consumer and
+ * sends the result to the HW/FW interface.
+ */
+ void (*ack_fault)(struct xe_pagefault *pf, int err);
+};
+
+/**
+ * struct xe_pagefault - Xe page fault
+ *
+ * Generic page fault structure for communication between producer and consumer.
+ * Carefully sized to be 64 bytes. Upon a device page fault, the producer
+ * populates this structure, and the consumer copies it into the page-fault
+ * queue for deferred handling.
+ */
+struct xe_pagefault {
+ /**
+ * @gt: GT of fault
+ */
+ struct xe_gt *gt;
+ /**
+ * @consumer: State for the software handling the fault. Populated by
+ * the producer and may be modified by the consumer to communicate
+ * information back to the producer upon fault acknowledgment.
+ */
+ struct {
+ /** @consumer.page_addr: address of page fault */
+ u64 page_addr;
+ /** @consumer.asid: address space ID */
+ u32 asid;
+ /**
+ * @consumer.access_type: access type, u8 rather than enum to
+ * keep size compact
+ */
+ u8 access_type;
+ /**
+ * @consumer.fault_type: fault type, u8 rather than enum to
+ * keep size compact
+ */
+ u8 fault_type;
+#define XE_PAGEFAULT_LEVEL_NACK 0xff /* Producer indicates nack fault */
+ /** @consumer.fault_level: fault level */
+ u8 fault_level;
+ /** @consumer.engine_class: engine class */
+ u8 engine_class;
+ /** @consumer.engine_instance: engine instance */
+ u8 engine_instance;
+ /** consumer.reserved: reserved bits for future expansion */
+ u8 reserved[7];
+ } consumer;
+ /**
+ * @producer: State for the producer (i.e., HW/FW interface). Populated
+ * by the producer and should not be modified—or even inspected—by the
+ * consumer, except for calling operations.
+ */
+ struct {
+ /** @producer.private: private pointer */
+ void *private;
+ /** @producer.ops: operations */
+ const struct xe_pagefault_ops *ops;
+#define XE_PAGEFAULT_PRODUCER_MSG_LEN_DW 4
+ /**
+ * @producer.msg: page fault message, used by producer in fault
+ * acknowledgment to formulate response to HW/FW interface.
+ * Included in the page-fault message because the producer
+ * typically receives the fault in a context where memory cannot
+ * be allocated (e.g., atomic context or the reclaim path).
+ */
+ u32 msg[XE_PAGEFAULT_PRODUCER_MSG_LEN_DW];
+ } producer;
+};
+
+/**
+ * struct xe_pagefault_queue: Xe pagefault queue (consumer)
+ *
+ * Used to capture all device page faults for deferred processing. Size this
+ * queue to absorb the device’s worst-case number of outstanding faults.
+ */
+struct xe_pagefault_queue {
+ /**
+ * @data: Data in queue containing struct xe_pagefault, protected by
+ * @lock
+ */
+ void *data;
+ /** @size: Size of queue in bytes */
+ u32 size;
+ /** @head: Head pointer in bytes, moved by producer, protected by @lock */
+ u32 head;
+ /** @tail: Tail pointer in bytes, moved by consumer, protected by @lock */
+ u32 tail;
+ /** @lock: protects page fault queue */
+ spinlock_t lock;
+ /** @worker: to process page faults */
+ struct work_struct worker;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 2e7cb99ae87a..68171cceea18 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -57,7 +57,7 @@ struct xe_pat_ops {
int n_entries);
void (*program_media)(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries);
- void (*dump)(struct xe_gt *gt, struct drm_printer *p);
+ int (*dump)(struct xe_gt *gt, struct drm_printer *p);
};
static const struct xe_pat_table_entry xelp_pat_table[] = {
@@ -115,7 +115,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
REG_FIELD_PREP(XE2_L4_POLICY, l4_policy) | \
REG_FIELD_PREP(XE2_COH_MODE, __coh_mode), \
.coh_mode = (BUILD_BUG_ON_ZERO(__coh_mode && comp_en) || __coh_mode) ? \
- XE_COH_AT_LEAST_1WAY : XE_COH_NONE \
+ XE_COH_AT_LEAST_1WAY : XE_COH_NONE, \
+ .valid = 1 \
}
static const struct xe_pat_table_entry xe2_pat_table[] = {
@@ -154,6 +155,41 @@ static const struct xe_pat_table_entry xe2_pat_table[] = {
static const struct xe_pat_table_entry xe2_pat_ats = XE2_PAT( 0, 0, 0, 0, 3, 3 );
static const struct xe_pat_table_entry xe2_pat_pta = XE2_PAT( 0, 0, 0, 0, 3, 0 );
+/*
+ * Xe3p_XPC PAT table uses the same layout as Xe2/Xe3, except that there's no
+ * option for compression. Also note that the "L3" and "L4" register fields
+ * actually control L2 and L3 cache respectively on this platform.
+ */
+#define XE3P_XPC_PAT(no_promote, l3clos, l3_policy, l4_policy, __coh_mode) \
+ XE2_PAT(no_promote, 0, l3clos, l3_policy, l4_policy, __coh_mode)
+
+static const struct xe_pat_table_entry xe3p_xpc_pat_ats = XE3P_XPC_PAT( 0, 0, 0, 0, 3 );
+static const struct xe_pat_table_entry xe3p_xpc_pat_pta = XE3P_XPC_PAT( 0, 0, 0, 0, 0 );
+
+static const struct xe_pat_table_entry xe3p_xpc_pat_table[] = {
+ [ 0] = XE3P_XPC_PAT( 0, 0, 0, 0, 0 ),
+ [ 1] = XE3P_XPC_PAT( 0, 0, 0, 0, 2 ),
+ [ 2] = XE3P_XPC_PAT( 0, 0, 0, 0, 3 ),
+ [ 3] = XE3P_XPC_PAT( 0, 0, 3, 3, 0 ),
+ [ 4] = XE3P_XPC_PAT( 0, 0, 3, 3, 2 ),
+ [ 5] = XE3P_XPC_PAT( 0, 0, 3, 0, 0 ),
+ [ 6] = XE3P_XPC_PAT( 0, 0, 3, 0, 2 ),
+ [ 7] = XE3P_XPC_PAT( 0, 0, 3, 0, 3 ),
+ [ 8] = XE3P_XPC_PAT( 0, 0, 0, 3, 0 ),
+ [ 9] = XE3P_XPC_PAT( 0, 0, 0, 3, 2 ),
+ [10] = XE3P_XPC_PAT( 0, 0, 0, 3, 3 ),
+ /* 11..22 are reserved; leave set to all 0's */
+ [23] = XE3P_XPC_PAT( 0, 1, 0, 0, 0 ),
+ [24] = XE3P_XPC_PAT( 0, 1, 0, 0, 2 ),
+ [25] = XE3P_XPC_PAT( 0, 1, 0, 0, 3 ),
+ [26] = XE3P_XPC_PAT( 0, 2, 0, 0, 0 ),
+ [27] = XE3P_XPC_PAT( 0, 2, 0, 0, 2 ),
+ [28] = XE3P_XPC_PAT( 0, 2, 0, 0, 3 ),
+ [29] = XE3P_XPC_PAT( 0, 3, 0, 0, 0 ),
+ [30] = XE3P_XPC_PAT( 0, 3, 0, 0, 2 ),
+ [31] = XE3P_XPC_PAT( 0, 3, 0, 0, 3 ),
+};
+
u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
{
WARN_ON(pat_index >= xe->pat.n_entries);
@@ -194,7 +230,7 @@ static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry ta
xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value);
}
-static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xelp_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -202,7 +238,7 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -215,6 +251,7 @@ static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
}
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
static const struct xe_pat_ops xelp_pat_ops = {
@@ -222,7 +259,7 @@ static const struct xe_pat_ops xelp_pat_ops = {
.dump = xelp_dump,
};
-static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xehp_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -230,7 +267,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -245,6 +282,7 @@ static void xehp_dump(struct xe_gt *gt, struct drm_printer *p)
}
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
static const struct xe_pat_ops xehp_pat_ops = {
@@ -252,7 +290,7 @@ static const struct xe_pat_ops xehp_pat_ops = {
.dump = xehp_dump,
};
-static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -260,7 +298,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -273,6 +311,7 @@ static void xehpc_dump(struct xe_gt *gt, struct drm_printer *p)
}
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
static const struct xe_pat_ops xehpc_pat_ops = {
@@ -280,7 +319,7 @@ static const struct xe_pat_ops xehpc_pat_ops = {
.dump = xehpc_dump,
};
-static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -288,7 +327,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
drm_printf(p, "PAT table:\n");
@@ -306,6 +345,7 @@ static void xelpg_dump(struct xe_gt *gt, struct drm_printer *p)
}
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
/*
@@ -318,7 +358,7 @@ static const struct xe_pat_ops xelpg_pat_ops = {
.dump = xelpg_dump,
};
-static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
+static int xe2_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
unsigned int fw_ref;
@@ -327,9 +367,9 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
if (!fw_ref)
- return;
+ return -ETIMEDOUT;
- drm_printf(p, "PAT table:\n");
+ drm_printf(p, "PAT table: (* = reserved entry)\n");
for (i = 0; i < xe->pat.n_entries; i++) {
if (xe_gt_is_media_type(gt))
@@ -337,14 +377,14 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
else
pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
- drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u, %u ] (%#8x)\n", i,
+ drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u, %u ] (%#8x)%s\n", i,
!!(pat & XE2_NO_PROMOTE),
!!(pat & XE2_COMP_EN),
REG_FIELD_GET(XE2_L3_CLOS, pat),
REG_FIELD_GET(XE2_L3_POLICY, pat),
REG_FIELD_GET(XE2_L4_POLICY, pat),
REG_FIELD_GET(XE2_COH_MODE, pat),
- pat);
+ pat, xe->pat.table[i].valid ? "" : " *");
}
/*
@@ -367,6 +407,7 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
pat);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
}
static const struct xe_pat_ops xe2_pat_ops = {
@@ -375,9 +416,68 @@ static const struct xe_pat_ops xe2_pat_ops = {
.dump = xe2_dump,
};
+static int xe3p_xpc_dump(struct xe_gt *gt, struct drm_printer *p)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref;
+ u32 pat;
+ int i;
+
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
+ drm_printf(p, "PAT table: (* = reserved entry)\n");
+
+ for (i = 0; i < xe->pat.n_entries; i++) {
+ pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_INDEX(i)));
+
+ drm_printf(p, "PAT[%2d] = [ %u, %u, %u, %u, %u ] (%#8x)%s\n", i,
+ !!(pat & XE2_NO_PROMOTE),
+ REG_FIELD_GET(XE2_L3_CLOS, pat),
+ REG_FIELD_GET(XE2_L3_POLICY, pat),
+ REG_FIELD_GET(XE2_L4_POLICY, pat),
+ REG_FIELD_GET(XE2_COH_MODE, pat),
+ pat, xe->pat.table[i].valid ? "" : " *");
+ }
+
+ /*
+ * Also print PTA_MODE, which describes how the hardware accesses
+ * PPGTT entries.
+ */
+ pat = xe_gt_mcr_unicast_read_any(gt, XE_REG_MCR(_PAT_PTA));
+
+ drm_printf(p, "Page Table Access:\n");
+ drm_printf(p, "PTA_MODE= [ %u, %u, %u, %u, %u ] (%#8x)\n",
+ !!(pat & XE2_NO_PROMOTE),
+ REG_FIELD_GET(XE2_L3_CLOS, pat),
+ REG_FIELD_GET(XE2_L3_POLICY, pat),
+ REG_FIELD_GET(XE2_L4_POLICY, pat),
+ REG_FIELD_GET(XE2_COH_MODE, pat),
+ pat);
+
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ return 0;
+}
+
+static const struct xe_pat_ops xe3p_xpc_pat_ops = {
+ .program_graphics = program_pat_mcr,
+ .program_media = program_pat,
+ .dump = xe3p_xpc_dump,
+};
+
void xe_pat_init_early(struct xe_device *xe)
{
- if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
+ if (GRAPHICS_VERx100(xe) == 3511) {
+ xe->pat.ops = &xe3p_xpc_pat_ops;
+ xe->pat.table = xe3p_xpc_pat_table;
+ xe->pat.pat_ats = &xe3p_xpc_pat_ats;
+ xe->pat.pat_pta = &xe3p_xpc_pat_pta;
+ xe->pat.n_entries = ARRAY_SIZE(xe3p_xpc_pat_table);
+ xe->pat.idx[XE_CACHE_NONE] = 3;
+ xe->pat.idx[XE_CACHE_WT] = 3; /* N/A (no display); use UC */
+ xe->pat.idx[XE_CACHE_WB] = 2;
+ } else if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
xe->pat.pat_ats = &xe2_pat_ats;
@@ -462,12 +562,19 @@ void xe_pat_init(struct xe_gt *gt)
xe->pat.ops->program_graphics(gt, xe->pat.table, xe->pat.n_entries);
}
-void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_pat_dump() - Dump GT PAT table into a drm printer.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_pat_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
if (!xe->pat.ops)
- return;
+ return -EOPNOTSUPP;
- xe->pat.ops->dump(gt, p);
+ return xe->pat.ops->dump(gt, p);
}
diff --git a/drivers/gpu/drm/xe/xe_pat.h b/drivers/gpu/drm/xe/xe_pat.h
index fa0dfbe525cd..05dae03a5f54 100644
--- a/drivers/gpu/drm/xe/xe_pat.h
+++ b/drivers/gpu/drm/xe/xe_pat.h
@@ -29,6 +29,11 @@ struct xe_pat_table_entry {
#define XE_COH_NONE 1
#define XE_COH_AT_LEAST_1WAY 2
u16 coh_mode;
+
+ /**
+ * @valid: Set to 1 if the entry is valid, 0 if it's reserved.
+ */
+ u16 valid;
};
/**
@@ -43,12 +48,7 @@ void xe_pat_init_early(struct xe_device *xe);
*/
void xe_pat_init(struct xe_gt *gt);
-/**
- * xe_pat_dump - Dump PAT table
- * @gt: GT structure
- * @p: Printer to dump info to
- */
-void xe_pat_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_pat_dump(struct xe_gt *gt, struct drm_printer *p);
/**
* xe_pat_index_get_coh_mode - Extract the coherency mode for the given
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index 3c40ef426f0c..9c9ea10d994c 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -17,6 +17,8 @@
#include "display/xe_display.h"
#include "regs/xe_gt_regs.h"
+#include "regs/xe_regs.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_drv.h"
#include "xe_gt.h"
@@ -28,6 +30,7 @@
#include "xe_pci_sriov.h"
#include "xe_pci_types.h"
#include "xe_pm.h"
+#include "xe_printk.h"
#include "xe_sriov.h"
#include "xe_step.h"
#include "xe_survivability_mode.h"
@@ -49,15 +52,10 @@ __diag_ignore_all("-Woverride-init", "Allow field overrides in table");
static const struct xe_graphics_desc graphics_xelp = {
.hw_engine_mask = BIT(XE_HW_ENGINE_RCS0) | BIT(XE_HW_ENGINE_BCS0),
-
- .va_bits = 48,
- .vm_max_level = 3,
};
#define XE_HP_FEATURES \
- .has_range_tlb_invalidation = true, \
- .va_bits = 48, \
- .vm_max_level = 3
+ .has_range_tlb_inval = true
static const struct xe_graphics_desc graphics_xehpg = {
.hw_engine_mask =
@@ -66,9 +64,6 @@ static const struct xe_graphics_desc graphics_xehpg = {
BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
XE_HP_FEATURES,
- .vram_flags = XE_VRAM_FLAGS_NEED64K,
-
- .has_flat_ccs = 1,
};
static const struct xe_graphics_desc graphics_xehpc = {
@@ -82,9 +77,6 @@ static const struct xe_graphics_desc graphics_xehpc = {
BIT(XE_HW_ENGINE_CCS2) | BIT(XE_HW_ENGINE_CCS3),
XE_HP_FEATURES,
- .va_bits = 57,
- .vm_max_level = 4,
- .vram_flags = XE_VRAM_FLAGS_NEED64K,
.has_asid = 1,
.has_atomic_enable_pte_bit = 1,
@@ -102,12 +94,9 @@ static const struct xe_graphics_desc graphics_xelpg = {
#define XE2_GFX_FEATURES \
.has_asid = 1, \
.has_atomic_enable_pte_bit = 1, \
- .has_flat_ccs = 1, \
- .has_range_tlb_invalidation = 1, \
+ .has_range_tlb_inval = 1, \
.has_usm = 1, \
.has_64bit_timestamp = 1, \
- .va_bits = 48, \
- .vm_max_level = 4, \
.hw_engine_mask = \
BIT(XE_HW_ENGINE_RCS0) | \
BIT(XE_HW_ENGINE_BCS8) | BIT(XE_HW_ENGINE_BCS0) | \
@@ -117,6 +106,13 @@ static const struct xe_graphics_desc graphics_xe2 = {
XE2_GFX_FEATURES,
};
+static const struct xe_graphics_desc graphics_xe3p_xpc = {
+ XE2_GFX_FEATURES,
+ .hw_engine_mask =
+ GENMASK(XE_HW_ENGINE_BCS8, XE_HW_ENGINE_BCS1) |
+ GENMASK(XE_HW_ENGINE_CCS3, XE_HW_ENGINE_CCS0),
+};
+
static const struct xe_media_desc media_xem = {
.hw_engine_mask =
GENMASK(XE_HW_ENGINE_VCS7, XE_HW_ENGINE_VCS0) |
@@ -147,6 +143,9 @@ static const struct xe_ip graphics_ips[] = {
{ 3000, "Xe3_LPG", &graphics_xe2 },
{ 3001, "Xe3_LPG", &graphics_xe2 },
{ 3003, "Xe3_LPG", &graphics_xe2 },
+ { 3004, "Xe3_LPG", &graphics_xe2 },
+ { 3005, "Xe3_LPG", &graphics_xe2 },
+ { 3511, "Xe3p_XPC", &graphics_xe3p_xpc },
};
/* Pre-GMDID Media IPs */
@@ -160,6 +159,8 @@ static const struct xe_ip media_ips[] = {
{ 2000, "Xe2_LPM", &media_xelpmp },
{ 3000, "Xe3_LPM", &media_xelpmp },
{ 3002, "Xe3_LPM", &media_xelpmp },
+ { 3500, "Xe3p_LPM", &media_xelpmp },
+ { 3503, "Xe3p_HPM", &media_xelpmp },
};
static const struct xe_device_desc tgl_desc = {
@@ -169,8 +170,11 @@ static const struct xe_device_desc tgl_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const struct xe_device_desc rkl_desc = {
@@ -182,6 +186,8 @@ static const struct xe_device_desc rkl_desc = {
.has_llc = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const u16 adls_rpls_ids[] = { INTEL_RPLS_IDS(NOP), 0 };
@@ -193,12 +199,15 @@ static const struct xe_device_desc adl_s_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids },
{},
},
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const u16 adlp_rplu_ids[] = { INTEL_RPLU_IDS(NOP), 0 };
@@ -210,12 +219,15 @@ static const struct xe_device_desc adl_p_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
.subplatforms = (const struct xe_subplatform_desc[]) {
{ XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids },
{},
},
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const struct xe_device_desc adl_n_desc = {
@@ -225,8 +237,11 @@ static const struct xe_device_desc adl_n_desc = {
.dma_mask_size = 39,
.has_display = true,
.has_llc = true,
+ .has_sriov = true,
.max_gt_per_tile = 1,
.require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
#define DGFX_FEATURES \
@@ -243,6 +258,8 @@ static const struct xe_device_desc dg1_desc = {
.has_heci_gscfi = 1,
.max_gt_per_tile = 1,
.require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const u16 dg2_g10_ids[] = { INTEL_DG2_G10_IDS(NOP), INTEL_ATS_M150_IDS(NOP), 0 };
@@ -252,6 +269,7 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
#define DG2_FEATURES \
DGFX_FEATURES, \
PLATFORM(DG2), \
+ .has_flat_ccs = 1, \
.has_gsc_nvm = 1, \
.has_heci_gscfi = 1, \
.subplatforms = (const struct xe_subplatform_desc[]) { \
@@ -259,7 +277,10 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 };
{ XE_SUBPLATFORM_DG2_G11, "G11", dg2_g11_ids }, \
{ XE_SUBPLATFORM_DG2_G12, "G12", dg2_g12_ids }, \
{ } \
- }
+ }, \
+ .va_bits = 48, \
+ .vm_max_level = 3, \
+ .vram_flags = XE_VRAM_FLAGS_NEED64K
static const struct xe_device_desc ats_m_desc = {
.pre_gmdid_graphics_ip = &graphics_ip_xehpg,
@@ -270,6 +291,7 @@ static const struct xe_device_desc ats_m_desc = {
DG2_FEATURES,
.has_display = false,
+ .has_sriov = true,
};
static const struct xe_device_desc dg2_desc = {
@@ -296,6 +318,9 @@ static const __maybe_unused struct xe_device_desc pvc_desc = {
.max_gt_per_tile = 1,
.max_remote_tiles = 1,
.require_force_probe = true,
+ .va_bits = 57,
+ .vm_max_level = 4,
+ .vram_flags = XE_VRAM_FLAGS_NEED64K,
.has_mbx_power_limits = false,
};
@@ -307,38 +332,86 @@ static const struct xe_device_desc mtl_desc = {
.has_display = true,
.has_pxp = true,
.max_gt_per_tile = 2,
+ .va_bits = 48,
+ .vm_max_level = 3,
};
static const struct xe_device_desc lnl_desc = {
PLATFORM(LUNARLAKE),
.dma_mask_size = 46,
.has_display = true,
+ .has_flat_ccs = 1,
.has_pxp = true,
+ .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
+ .va_bits = 48,
+ .vm_max_level = 4,
};
+static const u16 bmg_g21_ids[] = { INTEL_BMG_G21_IDS(NOP), 0 };
+
static const struct xe_device_desc bmg_desc = {
DGFX_FEATURES,
PLATFORM(BATTLEMAGE),
.dma_mask_size = 46,
.has_display = true,
.has_fan_control = true,
+ .has_flat_ccs = 1,
.has_mbx_power_limits = true,
.has_gsc_nvm = 1,
.has_heci_cscfi = 1,
+ .has_late_bind = true,
.has_sriov = true,
+ .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
+ .subplatforms = (const struct xe_subplatform_desc[]) {
+ { XE_SUBPLATFORM_BATTLEMAGE_G21, "G21", bmg_g21_ids },
+ { }
+ },
+ .va_bits = 48,
+ .vm_max_level = 4,
};
static const struct xe_device_desc ptl_desc = {
PLATFORM(PANTHERLAKE),
.dma_mask_size = 46,
.has_display = true,
+ .has_flat_ccs = 1,
.has_sriov = true,
+ .has_mem_copy_instr = true,
.max_gt_per_tile = 2,
.needs_scratch = true,
+ .needs_shared_vf_gt_wq = true,
+ .va_bits = 48,
+ .vm_max_level = 4,
+};
+
+static const struct xe_device_desc nvls_desc = {
+ PLATFORM(NOVALAKE_S),
+ .dma_mask_size = 46,
+ .has_display = true,
+ .has_flat_ccs = 1,
+ .has_mem_copy_instr = true,
+ .max_gt_per_tile = 2,
+ .require_force_probe = true,
+ .va_bits = 48,
+ .vm_max_level = 4,
+};
+
+static const struct xe_device_desc cri_desc = {
+ DGFX_FEATURES,
+ PLATFORM(CRESCENTISLAND),
+ .dma_mask_size = 52,
+ .has_display = false,
+ .has_flat_ccs = false,
+ .has_mbx_power_limits = true,
+ .has_sriov = true,
+ .max_gt_per_tile = 2,
+ .require_force_probe = true,
+ .va_bits = 57,
+ .vm_max_level = 4,
};
#undef PLATFORM
@@ -367,6 +440,9 @@ static const struct pci_device_id pciidlist[] = {
INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
+ INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
+ INTEL_NVLS_IDS(INTEL_VGA_DEVICE, &nvls_desc),
+ INTEL_CRI_IDS(INTEL_PCI_DEVICE, &cri_desc),
{ }
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -439,7 +515,7 @@ enum xe_gmdid_type {
GMDID_MEDIA
};
-static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
+static int read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid)
{
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct xe_reg gmdid_reg = GMD_ID;
@@ -448,22 +524,24 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
KUNIT_STATIC_STUB_REDIRECT(read_gmdid, xe, type, ver, revid);
if (IS_SRIOV_VF(xe)) {
- struct xe_gt *gt = xe_root_mmio_gt(xe);
-
/*
* To get the value of the GMDID register, VFs must obtain it
* from the GuC using MMIO communication.
*
- * Note that at this point the xe_gt is not fully uninitialized
- * and only basic access to MMIO registers is possible. To use
- * our existing GuC communication functions we must perform at
- * least basic xe_gt and xe_guc initialization.
- *
- * Since to obtain the value of GMDID_MEDIA we need to use the
- * media GuC, temporarily tweak the gt type.
+ * Note that at this point the GTs are not initialized and only
+ * tile-level access to MMIO registers is possible. To use our
+ * existing GuC communication functions we must create a dummy
+ * GT structure and perform at least basic xe_gt and xe_guc
+ * initialization.
*/
- xe_gt_assert(gt, gt->info.type == XE_GT_TYPE_UNINITIALIZED);
+ struct xe_gt *gt __free(kfree) = NULL;
+ int err;
+
+ gt = kzalloc(sizeof(*gt), GFP_KERNEL);
+ if (!gt)
+ return -ENOMEM;
+ gt->tile = &xe->tiles[0];
if (type == GMDID_MEDIA) {
gt->info.id = 1;
gt->info.type = XE_GT_TYPE_MEDIA;
@@ -475,15 +553,11 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
xe_gt_mmio_init(gt);
xe_guc_comm_init_early(&gt->uc.guc);
- /* Don't bother with GMDID if failed to negotiate the GuC ABI */
- val = xe_gt_sriov_vf_bootstrap(gt) ? 0 : xe_gt_sriov_vf_gmdid(gt);
+ err = xe_gt_sriov_vf_bootstrap(gt);
+ if (err)
+ return err;
- /*
- * Only undo xe_gt.info here, the remaining changes made above
- * will be overwritten as part of the regular initialization.
- */
- gt->info.id = 0;
- gt->info.type = XE_GT_TYPE_UNINITIALIZED;
+ val = xe_gt_sriov_vf_gmdid(gt);
} else {
/*
* GMD_ID is a GT register, but at this point in the driver
@@ -501,55 +575,71 @@ static void read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver,
*ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val) * 100 + REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
*revid = REG_FIELD_GET(GMD_ID_REVID, val);
+
+ return 0;
+}
+
+static const struct xe_ip *find_graphics_ip(unsigned int verx100)
+{
+ KUNIT_STATIC_STUB_REDIRECT(find_graphics_ip, verx100);
+
+ for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++)
+ if (graphics_ips[i].verx100 == verx100)
+ return &graphics_ips[i];
+ return NULL;
+}
+
+static const struct xe_ip *find_media_ip(unsigned int verx100)
+{
+ KUNIT_STATIC_STUB_REDIRECT(find_media_ip, verx100);
+
+ for (int i = 0; i < ARRAY_SIZE(media_ips); i++)
+ if (media_ips[i].verx100 == verx100)
+ return &media_ips[i];
+ return NULL;
}
/*
* Read IP version from hardware and select graphics/media IP descriptors
* based on the result.
*/
-static void handle_gmdid(struct xe_device *xe,
- const struct xe_ip **graphics_ip,
- const struct xe_ip **media_ip,
- u32 *graphics_revid,
- u32 *media_revid)
+static int handle_gmdid(struct xe_device *xe,
+ const struct xe_ip **graphics_ip,
+ const struct xe_ip **media_ip,
+ u32 *graphics_revid,
+ u32 *media_revid)
{
u32 ver;
+ int ret;
*graphics_ip = NULL;
*media_ip = NULL;
- read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
-
- for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
- if (ver == graphics_ips[i].verx100) {
- *graphics_ip = &graphics_ips[i];
-
- break;
- }
- }
+ ret = read_gmdid(xe, GMDID_GRAPHICS, &ver, graphics_revid);
+ if (ret)
+ return ret;
+ *graphics_ip = find_graphics_ip(ver);
if (!*graphics_ip) {
drm_err(&xe->drm, "Hardware reports unknown graphics version %u.%02u\n",
ver / 100, ver % 100);
}
- read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
+ ret = read_gmdid(xe, GMDID_MEDIA, &ver, media_revid);
+ if (ret)
+ return ret;
+
/* Media may legitimately be fused off / not present */
if (ver == 0)
- return;
-
- for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
- if (ver == media_ips[i].verx100) {
- *media_ip = &media_ips[i];
-
- break;
- }
- }
+ return 0;
+ *media_ip = find_media_ip(ver);
if (!*media_ip) {
drm_err(&xe->drm, "Hardware reports unknown media version %u.%02u\n",
ver / 100, ver % 100);
}
+
+ return 0;
}
/*
@@ -568,19 +658,29 @@ static int xe_info_init_early(struct xe_device *xe,
subplatform_desc->subplatform : XE_SUBPLATFORM_NONE;
xe->info.dma_mask_size = desc->dma_mask_size;
+ xe->info.va_bits = desc->va_bits;
+ xe->info.vm_max_level = desc->vm_max_level;
+ xe->info.vram_flags = desc->vram_flags;
+
xe->info.is_dgfx = desc->is_dgfx;
xe->info.has_fan_control = desc->has_fan_control;
+ /* runtime fusing may force flat_ccs to disabled later */
+ xe->info.has_flat_ccs = desc->has_flat_ccs;
xe->info.has_mbx_power_limits = desc->has_mbx_power_limits;
xe->info.has_gsc_nvm = desc->has_gsc_nvm;
xe->info.has_heci_gscfi = desc->has_heci_gscfi;
xe->info.has_heci_cscfi = desc->has_heci_cscfi;
+ xe->info.has_late_bind = desc->has_late_bind;
xe->info.has_llc = desc->has_llc;
xe->info.has_pxp = desc->has_pxp;
- xe->info.has_sriov = desc->has_sriov;
+ xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) &&
+ desc->has_sriov;
+ xe->info.has_mem_copy_instr = desc->has_mem_copy_instr;
xe->info.skip_guc_pc = desc->skip_guc_pc;
xe->info.skip_mtcfg = desc->skip_mtcfg;
xe->info.skip_pcode = desc->skip_pcode;
xe->info.needs_scratch = desc->needs_scratch;
+ xe->info.needs_shared_vf_gt_wq = desc->needs_shared_vf_gt_wq;
xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) &&
xe_modparam.probe_display &&
@@ -599,6 +699,101 @@ static int xe_info_init_early(struct xe_device *xe,
}
/*
+ * Possibly override number of tile based on configuration register.
+ */
+static void xe_info_probe_tile_count(struct xe_device *xe)
+{
+ struct xe_mmio *mmio;
+ u8 tile_count;
+ u32 mtcfg;
+
+ KUNIT_STATIC_STUB_REDIRECT(xe_info_probe_tile_count, xe);
+
+ /*
+ * Probe for tile count only for platforms that support multiple
+ * tiles.
+ */
+ if (xe->info.tile_count == 1)
+ return;
+
+ if (xe->info.skip_mtcfg)
+ return;
+
+ mmio = xe_root_tile_mmio(xe);
+
+ /*
+ * Although the per-tile mmio regs are not yet initialized, this
+ * is fine as it's going to the root tile's mmio, that's
+ * guaranteed to be initialized earlier in xe_mmio_probe_early()
+ */
+ mtcfg = xe_mmio_read32(mmio, XEHP_MTCFG_ADDR);
+ tile_count = REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
+
+ if (tile_count < xe->info.tile_count) {
+ drm_info(&xe->drm, "tile_count: %d, reduced_tile_count %d\n",
+ xe->info.tile_count, tile_count);
+ xe->info.tile_count = tile_count;
+ }
+}
+
+static struct xe_gt *alloc_primary_gt(struct xe_tile *tile,
+ const struct xe_graphics_desc *graphics_desc,
+ const struct xe_media_desc *media_desc)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_gt *gt;
+
+ if (!xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev))) {
+ xe_info(xe, "Primary GT disabled via configfs\n");
+ return NULL;
+ }
+
+ gt = xe_gt_alloc(tile);
+ if (IS_ERR(gt))
+ return gt;
+
+ gt->info.type = XE_GT_TYPE_MAIN;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile;
+ gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
+ gt->info.engine_mask = graphics_desc->hw_engine_mask;
+
+ /*
+ * Before media version 13, the media IP was part of the primary GT
+ * so we need to add the media engines to the primary GT's engine list.
+ */
+ if (MEDIA_VER(xe) < 13 && media_desc)
+ gt->info.engine_mask |= media_desc->hw_engine_mask;
+
+ return gt;
+}
+
+static struct xe_gt *alloc_media_gt(struct xe_tile *tile,
+ const struct xe_media_desc *media_desc)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_gt *gt;
+
+ if (!xe_configfs_media_gt_allowed(to_pci_dev(xe->drm.dev))) {
+ xe_info(xe, "Media GT disabled via configfs\n");
+ return NULL;
+ }
+
+ if (MEDIA_VER(xe) < 13 || !media_desc)
+ return NULL;
+
+ gt = xe_gt_alloc(tile);
+ if (IS_ERR(gt))
+ return gt;
+
+ gt->info.type = XE_GT_TYPE_MEDIA;
+ gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
+ gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
+ gt->info.engine_mask = media_desc->hw_engine_mask;
+
+ return gt;
+}
+
+/*
* Initialize device info content that does require knowledge about
* graphics / media IP version.
* Make sure that GT / tile structures allocated by the driver match the data
@@ -614,6 +809,7 @@ static int xe_info_init(struct xe_device *xe,
const struct xe_media_desc *media_desc;
struct xe_tile *tile;
struct xe_gt *gt;
+ int ret;
u8 id;
/*
@@ -629,8 +825,11 @@ static int xe_info_init(struct xe_device *xe,
xe->info.step = xe_step_pre_gmdid_get(xe);
} else {
xe_assert(xe, !desc->pre_gmdid_media_ip);
- handle_gmdid(xe, &graphics_ip, &media_ip,
- &graphics_gmdid_revid, &media_gmdid_revid);
+ ret = handle_gmdid(xe, &graphics_ip, &media_ip,
+ &graphics_gmdid_revid, &media_gmdid_revid);
+ if (ret)
+ return ret;
+
xe->info.step = xe_step_gmdid_get(xe,
graphics_gmdid_revid,
media_gmdid_revid);
@@ -657,21 +856,17 @@ static int xe_info_init(struct xe_device *xe,
media_desc = NULL;
}
- xe->info.vram_flags = graphics_desc->vram_flags;
- xe->info.va_bits = graphics_desc->va_bits;
- xe->info.vm_max_level = graphics_desc->vm_max_level;
xe->info.has_asid = graphics_desc->has_asid;
xe->info.has_atomic_enable_pte_bit = graphics_desc->has_atomic_enable_pte_bit;
if (xe->info.platform != XE_PVC)
xe->info.has_device_atomics_on_smem = 1;
- /* Runtime detection may change this later */
- xe->info.has_flat_ccs = graphics_desc->has_flat_ccs;
-
- xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation;
+ xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval;
xe->info.has_usm = graphics_desc->has_usm;
xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp;
+ xe_info_probe_tile_count(xe);
+
for_each_remote_tile(tile, xe, id) {
int err;
@@ -680,41 +875,41 @@ static int xe_info_init(struct xe_device *xe,
return err;
}
- /*
- * All platforms have at least one primary GT. Any platform with media
- * version 13 or higher has an additional dedicated media GT. And
- * depending on the graphics IP there may be additional "remote tiles."
- * All of these together determine the overall GT count.
- */
+ /* Allocate any GT and VRAM structures necessary for the platform. */
for_each_tile(tile, xe, id) {
- gt = tile->primary_gt;
- gt->info.type = XE_GT_TYPE_MAIN;
- gt->info.id = tile->id * xe->info.max_gt_per_tile;
- gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state;
- gt->info.engine_mask = graphics_desc->hw_engine_mask;
- xe->info.gt_count++;
+ int err;
- if (MEDIA_VER(xe) < 13 && media_desc)
- gt->info.engine_mask |= media_desc->hw_engine_mask;
+ err = xe_tile_alloc_vram(tile);
+ if (err)
+ return err;
- if (MEDIA_VER(xe) < 13 || !media_desc)
- continue;
+ tile->primary_gt = alloc_primary_gt(tile, graphics_desc, media_desc);
+ if (IS_ERR(tile->primary_gt))
+ return PTR_ERR(tile->primary_gt);
/*
- * Allocate and setup media GT for platforms with standalone
- * media.
+ * It's not currently possible to probe a device with the
+ * primary GT disabled. With some work, this may be future in
+ * the possible for igpu platforms (although probably not for
+ * dgpu's since access to the primary GT's BCS engines is
+ * required for VRAM management).
*/
- tile->media_gt = xe_gt_alloc(tile);
+ if (!tile->primary_gt) {
+ drm_err(&xe->drm, "Cannot probe device with without a primary GT\n");
+ return -ENODEV;
+ }
+
+ tile->media_gt = alloc_media_gt(tile, media_desc);
if (IS_ERR(tile->media_gt))
return PTR_ERR(tile->media_gt);
+ }
- gt = tile->media_gt;
- gt->info.type = XE_GT_TYPE_MEDIA;
- gt->info.id = tile->id * xe->info.max_gt_per_tile + 1;
- gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state;
- gt->info.engine_mask = media_desc->hw_engine_mask;
+ /*
+ * Now that we have tiles and GTs defined, let's loop over valid GTs
+ * in order to define gt_count.
+ */
+ for_each_gt(gt, xe, id)
xe->info.gt_count++;
- }
return 0;
}
@@ -726,7 +921,7 @@ static void xe_pci_remove(struct pci_dev *pdev)
if (IS_SRIOV_PF(xe))
xe_pci_sriov_configure(pdev, 0);
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return;
xe_device_remove(xe);
@@ -759,6 +954,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct xe_device *xe;
int err;
+ xe_configfs_check_device(pdev);
+
if (desc->require_force_probe && !id_forced(pdev->device)) {
dev_info(&pdev->dev,
"Your graphics device %04x is not officially supported\n"
@@ -799,6 +996,8 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
return err;
+ xe_vram_resize_bar(xe);
+
err = xe_device_probe_early(xe);
/*
* In Boot Survivability mode, no drm card is exposed and driver
@@ -806,7 +1005,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* flashed through mei. Return success, if survivability mode
* is enabled due to pcode failure or configfs being set
*/
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return 0;
if (err)
@@ -900,7 +1099,7 @@ static int xe_pci_suspend(struct device *dev)
struct xe_device *xe = pdev_to_xe_device(pdev);
int err;
- if (xe_survivability_mode_is_enabled(xe))
+ if (xe_survivability_mode_is_boot_enabled(xe))
return -EBUSY;
err = xe_pm_suspend(xe);
@@ -1024,6 +1223,23 @@ static struct pci_driver xe_pci_driver = {
#endif
};
+/**
+ * xe_pci_to_pf_device() - Get PF &xe_device.
+ * @pdev: the VF &pci_dev device
+ *
+ * Return: pointer to PF &xe_device, NULL otherwise.
+ */
+struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev)
+{
+ struct drm_device *drm;
+
+ drm = pci_iov_get_pf_drvdata(pdev, &xe_pci_driver);
+ if (IS_ERR(drm))
+ return NULL;
+
+ return to_xe_device(drm);
+}
+
int xe_register_pci_driver(void)
{
return pci_register_driver(&xe_pci_driver);
diff --git a/drivers/gpu/drm/xe/xe_pci.h b/drivers/gpu/drm/xe/xe_pci.h
index 611c1209b14c..11bcc5fe2c5b 100644
--- a/drivers/gpu/drm/xe/xe_pci.h
+++ b/drivers/gpu/drm/xe/xe_pci.h
@@ -6,7 +6,10 @@
#ifndef _XE_PCI_H_
#define _XE_PCI_H_
+struct pci_dev;
+
int xe_register_pci_driver(void);
void xe_unregister_pci_driver(void);
+struct xe_device *xe_pci_to_pf_device(struct pci_dev *pdev);
#endif
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c
index af05db07162e..9ff69c4843b0 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.c
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.c
@@ -17,68 +17,18 @@
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_control.h"
#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_provision.h"
+#include "xe_sriov_pf_sysfs.h"
#include "xe_sriov_printk.h"
-static int pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs)
-{
- unsigned int n;
-
- for (n = 1; n <= num_vfs; n++)
- if (!xe_gt_sriov_pf_config_is_empty(gt, n))
- return false;
-
- return true;
-}
-
-static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
-{
- struct xe_gt *gt;
- unsigned int id;
- int result = 0, err;
-
- for_each_gt(gt, xe, id) {
- if (!pf_needs_provisioning(gt, num_vfs))
- continue;
- err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs);
- result = result ?: err;
- }
-
- return result;
-}
-
-static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
-{
- struct xe_gt *gt;
- unsigned int id;
- unsigned int n;
-
- for_each_gt(gt, xe, id)
- for (n = 1; n <= num_vfs; n++)
- xe_gt_sriov_pf_config_release(gt, n, true);
-}
-
static void pf_reset_vfs(struct xe_device *xe, unsigned int num_vfs)
{
- struct xe_gt *gt;
- unsigned int id;
unsigned int n;
- for_each_gt(gt, xe, id)
- for (n = 1; n <= num_vfs; n++)
- xe_gt_sriov_pf_control_trigger_flr(gt, n);
-}
-
-static struct pci_dev *xe_pci_pf_get_vf_dev(struct xe_device *xe, unsigned int vf_id)
-{
- struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
-
- xe_assert(xe, IS_SRIOV_PF(xe));
-
- /* caller must use pci_dev_put() */
- return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
- pdev->bus->number,
- pci_iov_virtfn_devfn(pdev, vf_id));
+ for (n = 1; n <= num_vfs; n++)
+ xe_sriov_pf_control_reset_vf(xe, n);
}
static void pf_link_vfs(struct xe_device *xe, int num_vfs)
@@ -99,7 +49,7 @@ static void pf_link_vfs(struct xe_device *xe, int num_vfs)
* enforce correct resume order.
*/
for (n = 1; n <= num_vfs; n++) {
- pdev_vf = xe_pci_pf_get_vf_dev(xe, n - 1);
+ pdev_vf = xe_pci_sriov_get_vf_pdev(pdev_pf, n);
/* unlikely, something weird is happening, abort */
if (!pdev_vf) {
@@ -144,6 +94,20 @@ static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
}
+static int pf_prepare_vfs_enabling(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ /* make sure we are not locked-down by other components */
+ return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, false, NULL);
+}
+
+static void pf_finish_vfs_enabling(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ /* allow other components to lockdown VFs enabling */
+ xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, false, NULL);
+}
+
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -159,6 +123,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
if (err)
goto out;
+ err = pf_prepare_vfs_enabling(xe);
+ if (err)
+ goto out;
+
/*
* We must hold additional reference to the runtime PM to keep PF in D0
* during VFs lifetime, as our VFs do not implement the PM capability.
@@ -170,7 +138,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
*/
xe_pm_runtime_get_noresume(xe);
- err = pf_provision_vfs(xe, num_vfs);
+ err = xe_sriov_pf_provision_vfs(xe, num_vfs);
if (err < 0)
goto failed;
@@ -189,13 +157,16 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
xe_sriov_info(xe, "Enabled %u of %u VF%s\n",
num_vfs, total_vfs, str_plural(total_vfs));
+ xe_sriov_pf_sysfs_link_vfs(xe, num_vfs);
+
pf_engine_activity_stats(xe, num_vfs, true);
return num_vfs;
failed:
- pf_unprovision_vfs(xe, num_vfs);
+ xe_sriov_pf_unprovision_vfs(xe, num_vfs);
xe_pm_runtime_put(xe);
+ pf_finish_vfs_enabling(xe);
out:
xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n",
num_vfs, str_plural(num_vfs), ERR_PTR(err));
@@ -216,15 +187,19 @@ static int pf_disable_vfs(struct xe_device *xe)
pf_engine_activity_stats(xe, num_vfs, false);
+ xe_sriov_pf_sysfs_unlink_vfs(xe, num_vfs);
+
pci_disable_sriov(pdev);
pf_reset_vfs(xe, num_vfs);
- pf_unprovision_vfs(xe, num_vfs);
+ xe_sriov_pf_unprovision_vfs(xe, num_vfs);
/* not needed anymore - see pf_enable_vfs() */
xe_pm_runtime_put(xe);
+ pf_finish_vfs_enabling(xe);
+
xe_sriov_info(xe, "Disabled %u VF%s\n", num_vfs, str_plural(num_vfs));
return 0;
}
@@ -267,3 +242,25 @@ int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
return ret;
}
+
+/**
+ * xe_pci_sriov_get_vf_pdev() - Lookup the VF's PCI device using the VF identifier.
+ * @pdev: the PF's &pci_dev
+ * @vfid: VF identifier (1-based)
+ *
+ * The caller must decrement the reference count by calling pci_dev_put().
+ *
+ * Return: the VF's &pci_dev or NULL if the VF device was not found.
+ */
+struct pci_dev *xe_pci_sriov_get_vf_pdev(struct pci_dev *pdev, unsigned int vfid)
+{
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+
+ xe_assert(xe, dev_is_pf(&pdev->dev));
+ xe_assert(xe, vfid);
+ xe_assert(xe, vfid <= pci_sriov_get_totalvfs(pdev));
+
+ return pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
+ pdev->bus->number,
+ pci_iov_virtfn_devfn(pdev, vfid - 1));
+}
diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.h b/drivers/gpu/drm/xe/xe_pci_sriov.h
index c76dd0d90495..b9105d71dbb1 100644
--- a/drivers/gpu/drm/xe/xe_pci_sriov.h
+++ b/drivers/gpu/drm/xe/xe_pci_sriov.h
@@ -10,6 +10,7 @@ struct pci_dev;
#ifdef CONFIG_PCI_IOV
int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs);
+struct pci_dev *xe_pci_sriov_get_vf_pdev(struct pci_dev *pdev, unsigned int vfid);
#else
static inline int xe_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h
index 4de6f69ed975..9892c063a9c5 100644
--- a/drivers/gpu/drm/xe/xe_pci_types.h
+++ b/drivers/gpu/drm/xe/xe_pci_types.h
@@ -30,37 +30,39 @@ struct xe_device_desc {
u8 dma_mask_size;
u8 max_remote_tiles:2;
u8 max_gt_per_tile:2;
+ u8 va_bits;
+ u8 vm_max_level;
+ u8 vram_flags;
u8 require_force_probe:1;
u8 is_dgfx:1;
u8 has_display:1;
u8 has_fan_control:1;
+ u8 has_flat_ccs:1;
u8 has_gsc_nvm:1;
u8 has_heci_gscfi:1;
u8 has_heci_cscfi:1;
+ u8 has_late_bind:1;
u8 has_llc:1;
u8 has_mbx_power_limits:1;
+ u8 has_mem_copy_instr:1;
u8 has_pxp:1;
u8 has_sriov:1;
u8 needs_scratch:1;
u8 skip_guc_pc:1;
u8 skip_mtcfg:1;
u8 skip_pcode:1;
+ u8 needs_shared_vf_gt_wq:1;
};
struct xe_graphics_desc {
- u8 va_bits;
- u8 vm_max_level;
- u8 vram_flags;
-
u64 hw_engine_mask; /* hardware engines provided by graphics IP */
u8 has_asid:1;
u8 has_atomic_enable_pte_bit:1;
- u8 has_flat_ccs:1;
u8 has_indirect_ring_state:1;
- u8 has_range_tlb_invalidation:1;
+ u8 has_range_tlb_inval:1;
u8 has_usm:1;
u8 has_64bit_timestamp:1;
};
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 6a7ddb9005f9..0d33c14ea0cf 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -32,27 +32,39 @@
static int pcode_mailbox_status(struct xe_tile *tile)
{
+ const char *err_str;
+ int err_decode;
u32 err;
- static const struct pcode_err_decode err_decode[] = {
- [PCODE_ILLEGAL_CMD] = {-ENXIO, "Illegal Command"},
- [PCODE_TIMEOUT] = {-ETIMEDOUT, "Timed out"},
- [PCODE_ILLEGAL_DATA] = {-EINVAL, "Illegal Data"},
- [PCODE_ILLEGAL_SUBCOMMAND] = {-ENXIO, "Illegal Subcommand"},
- [PCODE_LOCKED] = {-EBUSY, "PCODE Locked"},
- [PCODE_GT_RATIO_OUT_OF_RANGE] = {-EOVERFLOW,
- "GT ratio out of range"},
- [PCODE_REJECTED] = {-EACCES, "PCODE Rejected"},
- [PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
- };
+
+#define CASE_ERR(_err, _err_decode, _err_str) \
+ case _err: \
+ err_decode = _err_decode; \
+ err_str = _err_str; \
+ break
err = xe_mmio_read32(&tile->mmio, PCODE_MAILBOX) & PCODE_ERROR_MASK;
+ switch (err) {
+ CASE_ERR(PCODE_ILLEGAL_CMD, -ENXIO, "Illegal Command");
+ CASE_ERR(PCODE_TIMEOUT, -ETIMEDOUT, "Timed out");
+ CASE_ERR(PCODE_ILLEGAL_DATA, -EINVAL, "Illegal Data");
+ CASE_ERR(PCODE_ILLEGAL_SUBCOMMAND, -ENXIO, "Illegal Subcommand");
+ CASE_ERR(PCODE_LOCKED, -EBUSY, "PCODE Locked");
+ CASE_ERR(PCODE_GT_RATIO_OUT_OF_RANGE, -EOVERFLOW, "GT ratio out of range");
+ CASE_ERR(PCODE_REJECTED, -EACCES, "PCODE Rejected");
+ default:
+ err_decode = -EPROTO;
+ err_str = "Unknown";
+ }
+
if (err) {
- drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
- err_decode[err].str ?: "Unknown");
- return err_decode[err].errno ?: -EPROTO;
+ drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s",
+ err_decode, err_str);
+
+ return err_decode;
}
return 0;
+#undef CASE_ERR
}
static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h
index 92bfcba51e19..70dcd6625680 100644
--- a/drivers/gpu/drm/xe/xe_pcode_api.h
+++ b/drivers/gpu/drm/xe/xe_pcode_api.h
@@ -92,9 +92,3 @@
#define BMG_PCIE_CAP XE_REG(0x138340)
#define LINK_DOWNGRADE REG_GENMASK(1, 0)
#define DOWNGRADE_CAPABLE 2
-
-struct pcode_err_decode {
- int errno;
- const char *str;
-};
-
diff --git a/drivers/gpu/drm/xe/xe_platform_types.h b/drivers/gpu/drm/xe/xe_platform_types.h
index d08574c4cdb8..f516dbddfd88 100644
--- a/drivers/gpu/drm/xe/xe_platform_types.h
+++ b/drivers/gpu/drm/xe/xe_platform_types.h
@@ -24,6 +24,8 @@ enum xe_platform {
XE_LUNARLAKE,
XE_BATTLEMAGE,
XE_PANTHERLAKE,
+ XE_NOVALAKE_S,
+ XE_CRESCENTISLAND,
};
enum xe_subplatform {
@@ -34,6 +36,7 @@ enum xe_subplatform {
XE_SUBPLATFORM_DG2_G10,
XE_SUBPLATFORM_DG2_G11,
XE_SUBPLATFORM_DG2_G12,
+ XE_SUBPLATFORM_BATTLEMAGE_G21,
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index bb9b6ecad2af..766922530265 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -18,11 +18,13 @@
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
-#include "xe_guc.h"
+#include "xe_gt_idle.h"
#include "xe_i2c.h"
#include "xe_irq.h"
+#include "xe_late_bind_fw.h"
#include "xe_pcode.h"
#include "xe_pxp.h"
+#include "xe_sriov_vf_ccs.h"
#include "xe_trace.h"
#include "xe_vm.h"
#include "xe_wa.h"
@@ -81,8 +83,58 @@ static struct lockdep_map xe_pm_runtime_d3cold_map = {
static struct lockdep_map xe_pm_runtime_nod3cold_map = {
.name = "xe_rpm_nod3cold_map"
};
+
+static struct lockdep_map xe_pm_block_lockdep_map = {
+ .name = "xe_pm_block_map",
+};
#endif
+static void xe_pm_block_begin_signalling(void)
+{
+ lock_acquire_shared_recursive(&xe_pm_block_lockdep_map, 0, 1, NULL, _RET_IP_);
+}
+
+static void xe_pm_block_end_signalling(void)
+{
+ lock_release(&xe_pm_block_lockdep_map, _RET_IP_);
+}
+
+/**
+ * xe_pm_might_block_on_suspend() - Annotate that the code might block on suspend
+ *
+ * Annotation to use where the code might block or seize to make
+ * progress pending resume completion.
+ */
+void xe_pm_might_block_on_suspend(void)
+{
+ lock_map_acquire(&xe_pm_block_lockdep_map);
+ lock_map_release(&xe_pm_block_lockdep_map);
+}
+
+/**
+ * xe_pm_block_on_suspend() - Block pending suspend.
+ * @xe: The xe device about to be suspended.
+ *
+ * Block if the pm notifier has start evicting bos, to avoid
+ * racing and validating those bos back. The function is
+ * annotated to ensure no locks are held that are also grabbed
+ * in the pm notifier or the device suspend / resume.
+ * This is intended to be used by freezable tasks only.
+ * (Not freezable workqueues), with the intention that the function
+ * returns %-ERESTARTSYS when tasks are frozen during suspend,
+ * and allows the task to freeze. The caller must be able to
+ * handle the %-ERESTARTSYS.
+ *
+ * Return: %0 on success, %-ERESTARTSYS on signal pending or
+ * if freezing requested.
+ */
+int xe_pm_block_on_suspend(struct xe_device *xe)
+{
+ xe_pm_might_block_on_suspend();
+
+ return wait_for_completion_interruptible(&xe->pm_block);
+}
+
/**
* xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
* @xe: The xe device.
@@ -122,12 +174,15 @@ int xe_pm_suspend(struct xe_device *xe)
int err;
drm_dbg(&xe->drm, "Suspending device\n");
+ xe_pm_block_begin_signalling();
trace_xe_pm_suspend(xe, __builtin_return_address(0));
err = xe_pxp_pm_suspend(xe->pxp);
if (err)
goto err;
+ xe_late_bind_wait_for_worker_completion(&xe->late_bind);
+
for_each_gt(gt, xe, id)
xe_gt_suspend_prepare(gt);
@@ -151,6 +206,8 @@ int xe_pm_suspend(struct xe_device *xe)
xe_i2c_pm_suspend(xe);
drm_dbg(&xe->drm, "Device suspended\n");
+ xe_pm_block_end_signalling();
+
return 0;
err_display:
@@ -158,6 +215,7 @@ err_display:
xe_pxp_pm_resume(xe->pxp);
err:
drm_dbg(&xe->drm, "Device suspend failed %d\n", err);
+ xe_pm_block_end_signalling();
return err;
}
@@ -174,9 +232,13 @@ int xe_pm_resume(struct xe_device *xe)
u8 id;
int err;
+ xe_pm_block_begin_signalling();
drm_dbg(&xe->drm, "Resuming device\n");
trace_xe_pm_resume(xe, __builtin_return_address(0));
+ for_each_gt(gt, xe, id)
+ xe_gt_idle_disable_c6(gt);
+
for_each_tile(tile, xe, id)
xe_wa_apply_tile_workarounds(tile);
@@ -194,7 +256,7 @@ int xe_pm_resume(struct xe_device *xe)
if (err)
goto err;
- xe_i2c_pm_resume(xe, xe->d3cold.allowed);
+ xe_i2c_pm_resume(xe, true);
xe_irq_resume(xe);
@@ -209,10 +271,17 @@ int xe_pm_resume(struct xe_device *xe)
xe_pxp_pm_resume(xe->pxp);
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_register_context(xe);
+
+ xe_late_bind_fw_load(&xe->late_bind);
+
drm_dbg(&xe->drm, "Device resumed\n");
+ xe_pm_block_end_signalling();
return 0;
err:
drm_dbg(&xe->drm, "Device resume failed %d\n", err);
+ xe_pm_block_end_signalling();
return err;
}
@@ -244,6 +313,10 @@ static void xe_pm_runtime_init(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
+ /* Our current VFs do not support RPM. so, disable it */
+ if (IS_SRIOV_VF(xe))
+ return;
+
/*
* Disable the system suspend direct complete optimization.
* We need to ensure that the regular device suspend/resume functions
@@ -313,9 +386,16 @@ static int xe_pm_notifier_callback(struct notifier_block *nb,
switch (action) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ {
+ struct xe_validation_ctx ctx;
+
reinit_completion(&xe->pm_block);
+ xe_pm_block_begin_signalling();
xe_pm_runtime_get(xe);
+ (void)xe_validation_ctx_init(&ctx, &xe->val, NULL,
+ (struct xe_val_flags) {.exclusive = true});
err = xe_bo_evict_all_user(xe);
+ xe_validation_ctx_fini(&ctx);
if (err)
drm_dbg(&xe->drm, "Notifier evict user failed (%d)\n", err);
@@ -327,7 +407,9 @@ static int xe_pm_notifier_callback(struct notifier_block *nb,
* avoid a runtime suspend interfering with evicted objects or backup
* allocations.
*/
+ xe_pm_block_end_signalling();
break;
+ }
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
complete_all(&xe->pm_block);
@@ -389,6 +471,10 @@ static void xe_pm_runtime_fini(struct xe_device *xe)
{
struct device *dev = xe->drm.dev;
+ /* Our current VFs do not support RPM. so, disable it */
+ if (IS_SRIOV_VF(xe))
+ return;
+
pm_runtime_get_sync(dev);
pm_runtime_forbid(dev);
}
@@ -547,6 +633,9 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_rpm_lockmap_acquire(xe);
+ for_each_gt(gt, xe, id)
+ xe_gt_idle_disable_c6(gt);
+
if (xe->d3cold.allowed) {
err = xe_pcode_ready(xe, true);
if (err)
@@ -580,6 +669,12 @@ int xe_pm_runtime_resume(struct xe_device *xe)
xe_pxp_pm_resume(xe->pxp);
+ if (IS_VF_CCS_READY(xe))
+ xe_sriov_vf_ccs_register_context(xe);
+
+ if (xe->d3cold.allowed)
+ xe_late_bind_fw_load(&xe->late_bind);
+
out:
xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL);
@@ -631,6 +726,13 @@ static void xe_pm_runtime_lockdep_prime(void)
/**
* xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
* @xe: xe device instance
+ *
+ * When possible, scope-based runtime PM (through guard(xe_pm_runtime)) is
+ * be preferred over direct usage of this function. Manual get/put handling
+ * should only be used when the function contains goto-based logic which
+ * can break scope-based handling, or when the lifetime of the runtime PM
+ * reference does not match a specific scope (e.g., runtime PM obtained in one
+ * function and released in a different one).
*/
void xe_pm_runtime_get(struct xe_device *xe)
{
@@ -663,6 +765,13 @@ void xe_pm_runtime_put(struct xe_device *xe)
* xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
* @xe: xe device instance
*
+ * When possible, scope-based runtime PM (through
+ * ACQUIRE(xe_pm_runtime_ioctl, ...)) is be preferred over direct usage of this
+ * function. Manual get/put handling should only be used when the function
+ * contains goto-based logic which can break scope-based handling, or when the
+ * lifetime of the runtime PM reference does not match a specific scope (e.g.,
+ * runtime PM obtained in one function and released in a different one).
+ *
* Returns: Any number greater than or equal to 0 for success, negative error
* code otherwise.
*/
@@ -732,6 +841,13 @@ static bool xe_pm_suspending_or_resuming(struct xe_device *xe)
* It will warn if not protected.
* The reference should be put back after this function regardless, since it
* will always bump the usage counter, regardless.
+ *
+ * When possible, scope-based runtime PM (through guard(xe_pm_runtime_noresume))
+ * is be preferred over direct usage of this function. Manual get/put handling
+ * should only be used when the function contains goto-based logic which can
+ * break scope-based handling, or when the lifetime of the runtime PM reference
+ * does not match a specific scope (e.g., runtime PM obtained in one function
+ * and released in a different one).
*/
void xe_pm_runtime_get_noresume(struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 59678b310e55..6b27039e7b2d 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -6,6 +6,7 @@
#ifndef _XE_PM_H_
#define _XE_PM_H_
+#include <linux/cleanup.h>
#include <linux/pm_runtime.h>
#define DEFAULT_VRAM_THRESHOLD 300 /* in MB */
@@ -33,6 +34,24 @@ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
+int xe_pm_block_on_suspend(struct xe_device *xe);
+void xe_pm_might_block_on_suspend(void);
int xe_pm_module_init(void);
+static inline void __xe_pm_runtime_noop(struct xe_device *xe) {}
+
+DEFINE_GUARD(xe_pm_runtime, struct xe_device *,
+ xe_pm_runtime_get(_T), xe_pm_runtime_put(_T))
+DEFINE_GUARD(xe_pm_runtime_noresume, struct xe_device *,
+ xe_pm_runtime_get_noresume(_T), xe_pm_runtime_put(_T))
+DEFINE_GUARD_COND(xe_pm_runtime, _ioctl, xe_pm_runtime_get_ioctl(_T), _RET >= 0)
+
+/*
+ * Used when a function needs to release runtime PM in all possible cases
+ * and error paths, but the wakeref was already acquired by a different
+ * function (i.e., get() has already happened so only a put() is needed).
+ */
+DEFINE_GUARD(xe_pm_runtime_release_only, struct xe_device *,
+ __xe_pm_runtime_noop(_T), xe_pm_runtime_put(_T));
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c
index cab51d826345..c63335eb69e5 100644
--- a/drivers/gpu/drm/xe/xe_pmu.c
+++ b/drivers/gpu/drm/xe/xe_pmu.c
@@ -497,7 +497,12 @@ static const struct attribute_group *pmu_events_attr_update[] = {
static void set_supported_events(struct xe_pmu *pmu)
{
struct xe_device *xe = container_of(pmu, typeof(*xe), pmu);
- struct xe_gt *gt = xe_device_get_gt(xe, 0);
+ struct xe_gt *gt;
+ int id;
+
+ /* If there are no GTs, don't support any GT-related events */
+ if (xe->info.gt_count == 0)
+ return;
if (!xe->info.skip_guc_pc) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_C6_RESIDENCY);
@@ -505,6 +510,10 @@ static void set_supported_events(struct xe_pmu *pmu)
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_GT_REQUESTED_FREQUENCY);
}
+ /* Find the first available GT to query engine event capabilities */
+ for_each_gt(gt, xe, id)
+ break;
+
if (xe_guc_engine_activity_supported(&gt->uc.guc)) {
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_ACTIVE_TICKS);
pmu->supported_events |= BIT_ULL(XE_PMU_EVENT_ENGINE_TOTAL_TICKS);
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 83fbeea5aa20..7f587ca3947d 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -8,6 +8,8 @@
#include <linux/slab.h>
#include "xe_exec_queue.h"
+#include "xe_gt_printk.h"
+#include "xe_guc_exec_queue_types.h"
#include "xe_vm.h"
static void preempt_fence_work_func(struct work_struct *w)
@@ -22,6 +24,15 @@ static void preempt_fence_work_func(struct work_struct *w)
} else if (!q->ops->reset_status(q)) {
int err = q->ops->suspend_wait(q);
+ if (err == -EAGAIN) {
+ xe_gt_dbg(q->gt, "PREEMPT FENCE RETRY guc_id=%d",
+ q->guc->id);
+ queue_work(q->vm->xe->preempt_fence_wq,
+ &pfence->preempt_work);
+ dma_fence_end_signalling(cookie);
+ return;
+ }
+
if (err)
dma_fence_set_error(&pfence->base, err);
} else {
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence_types.h b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
index 312c3372a49f..ac125c697a41 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence_types.h
+++ b/drivers/gpu/drm/xe/xe_preempt_fence_types.h
@@ -12,7 +12,7 @@
struct xe_exec_queue;
/**
- * struct xe_preempt_fence - XE preempt fence
+ * struct xe_preempt_fence - Xe preempt fence
*
* hardware and triggers a callback once the xe_engine is complete.
*/
diff --git a/drivers/gpu/drm/xe/xe_printk.h b/drivers/gpu/drm/xe/xe_printk.h
new file mode 100644
index 000000000000..c5be2385aa95
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_printk.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PRINTK_H_
+#define _XE_PRINTK_H_
+
+#include <drm/drm_print.h>
+
+#include "xe_device_types.h"
+
+#define __XE_PRINTK_FMT(_xe, _fmt, _args...) _fmt, ##_args
+
+#define xe_printk(_xe, _level, _fmt, ...) \
+ drm_##_level(&(_xe)->drm, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_err(_xe, _fmt, ...) \
+ xe_printk((_xe), err, _fmt, ##__VA_ARGS__)
+
+#define xe_err_once(_xe, _fmt, ...) \
+ xe_printk((_xe), err_once, _fmt, ##__VA_ARGS__)
+
+#define xe_err_ratelimited(_xe, _fmt, ...) \
+ xe_printk((_xe), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define xe_warn(_xe, _fmt, ...) \
+ xe_printk((_xe), warn, _fmt, ##__VA_ARGS__)
+
+#define xe_notice(_xe, _fmt, ...) \
+ xe_printk((_xe), notice, _fmt, ##__VA_ARGS__)
+
+#define xe_info(_xe, _fmt, ...) \
+ xe_printk((_xe), info, _fmt, ##__VA_ARGS__)
+
+#define xe_dbg(_xe, _fmt, ...) \
+ xe_printk((_xe), dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_WARN_type(_xe, _type, _condition, _fmt, ...) \
+ drm_WARN##_type(&(_xe)->drm, _condition, _fmt, ## __VA_ARGS__)
+
+#define xe_WARN(_xe, _condition, _fmt, ...) \
+ xe_WARN_type((_xe),, _condition, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_WARN_ONCE(_xe, _condition, _fmt, ...) \
+ xe_WARN_type((_xe), _ONCE, _condition, __XE_PRINTK_FMT((_xe), _fmt, ## __VA_ARGS__))
+
+#define xe_WARN_ON(_xe, _condition) \
+ xe_WARN((_xe), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
+
+#define xe_WARN_ON_ONCE(_xe, _condition) \
+ xe_WARN_ONCE((_xe), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
+
+static inline void __xe_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+
+ xe_err(xe, "%pV", vaf);
+}
+
+static inline void __xe_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+
+ xe_info(xe, "%pV", vaf);
+}
+
+static inline void __xe_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_device *xe = p->arg;
+ struct drm_printer ddp;
+
+ /*
+ * The original xe_dbg() callsite annotations are useless here,
+ * redirect to the tweaked drm_dbg_printer() instead.
+ */
+ ddp = drm_dbg_printer(&xe->drm, DRM_UT_DRIVER, NULL);
+ ddp.origin = p->origin;
+
+ drm_printf(&ddp, __XE_PRINTK_FMT(xe, "%pV", vaf));
+}
+
+/**
+ * xe_err_printer - Construct a &drm_printer that outputs to xe_err()
+ * @xe: the &xe_device pointer to use in xe_err()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_err_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_err,
+ .arg = xe,
+ };
+ return p;
+}
+
+/**
+ * xe_info_printer - Construct a &drm_printer that outputs to xe_info()
+ * @xe: the &xe_device pointer to use in xe_info()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_info_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_info,
+ .arg = xe,
+ };
+ return p;
+}
+
+/**
+ * xe_dbg_printer - Construct a &drm_printer that outputs like xe_dbg()
+ * @xe: the &xe_device pointer to use in xe_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_dbg_printer(struct xe_device *xe)
+{
+ struct drm_printer p = {
+ .printfn = __xe_printfn_dbg,
+ .arg = xe,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_psmi.c b/drivers/gpu/drm/xe/xe_psmi.c
new file mode 100644
index 000000000000..6a54e38b81ba
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_psmi.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_configfs.h"
+#include "xe_psmi.h"
+
+/*
+ * PSMI capture support
+ *
+ * Requirement for PSMI capture is to have a physically contiguous buffer. The
+ * PSMI tool owns doing all necessary configuration (MMIO register writes are
+ * done from user-space). However, KMD needs to provide the PSMI tool with the
+ * required physical address of the base of PSMI buffer in case of VRAM.
+ *
+ * VRAM backed PSMI buffer:
+ * Buffer is allocated as GEM object and with XE_BO_CREATE_PINNED_BIT flag which
+ * creates a contiguous allocation. The physical address is returned from
+ * psmi_debugfs_capture_addr_show(). PSMI tool can mmap the buffer via the
+ * PCIBAR through sysfs.
+ *
+ * SYSTEM memory backed PSMI buffer:
+ * Interface here does not support allocating from SYSTEM memory region. The
+ * PSMI tool needs to allocate memory themselves using hugetlbfs. In order to
+ * get the physical address, user-space can query /proc/[pid]/pagemap. As an
+ * alternative, CMA debugfs could also be used to allocate reserved CMA memory.
+ */
+
+static bool psmi_enabled(struct xe_device *xe)
+{
+ return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev));
+}
+
+static void psmi_free_object(struct xe_bo *bo)
+{
+ xe_bo_lock(bo, NULL);
+ xe_bo_unpin(bo);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+}
+
+/*
+ * Free PSMI capture buffer objects.
+ */
+static void psmi_cleanup(struct xe_device *xe)
+{
+ unsigned long id, region_mask = xe->psmi.region_mask;
+ struct xe_bo *bo;
+
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = xe->psmi.capture_obj[id];
+ if (bo) {
+ psmi_free_object(bo);
+ xe->psmi.capture_obj[id] = NULL;
+ }
+ }
+}
+
+static struct xe_bo *psmi_alloc_object(struct xe_device *xe,
+ unsigned int id, size_t bo_size)
+{
+ struct xe_tile *tile;
+
+ xe_assert(xe, id);
+ xe_assert(xe, bo_size);
+
+ tile = &xe->tiles[id - 1];
+
+ /* VRAM: Allocate GEM object for the capture buffer */
+ return xe_bo_create_pin_range_novm(xe, tile, bo_size, 0, ~0ull,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_VRAM_IF_DGFX(tile) |
+ XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_PINNED_LATE_RESTORE |
+ XE_BO_FLAG_NEEDS_CPU_ACCESS);
+}
+
+/*
+ * Allocate PSMI capture buffer objects (via debugfs set function), based on
+ * which regions the user has selected in region_mask. @size: size in bytes
+ * (should be power of 2)
+ *
+ * Always release/free the current buffer objects before attempting to allocate
+ * new ones. Size == 0 will free all current buffers.
+ *
+ * Note, we don't write any registers as the capture tool is already configuring
+ * all PSMI registers itself via mmio space.
+ */
+static int psmi_resize_object(struct xe_device *xe, size_t size)
+{
+ unsigned long id, region_mask = xe->psmi.region_mask;
+ struct xe_bo *bo = NULL;
+ int err = 0;
+
+ /* if resizing, free currently allocated buffers first */
+ psmi_cleanup(xe);
+
+ /* can set size to 0, in which case, now done */
+ if (!size)
+ return 0;
+
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = psmi_alloc_object(xe, id, size);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ break;
+ }
+ xe->psmi.capture_obj[id] = bo;
+
+ drm_info(&xe->drm,
+ "PSMI capture size requested: %zu bytes, allocated: %lu:%zu\n",
+ size, id, bo ? xe_bo_size(bo) : 0);
+ }
+
+ /* on error, reverse what was allocated */
+ if (err)
+ psmi_cleanup(xe);
+
+ return err;
+}
+
+/*
+ * Returns an address for the capture tool to use to find start of capture
+ * buffer. Capture tool requires the capability to have a buffer allocated per
+ * each tile (VRAM region), thus we return an address for each region.
+ */
+static int psmi_debugfs_capture_addr_show(struct seq_file *m, void *data)
+{
+ struct xe_device *xe = m->private;
+ unsigned long id, region_mask;
+ struct xe_bo *bo;
+ u64 val;
+
+ region_mask = xe->psmi.region_mask;
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ /* VRAM region */
+ bo = xe->psmi.capture_obj[id];
+ if (!bo)
+ continue;
+
+ /* pinned, so don't need bo_lock */
+ val = __xe_bo_addr(bo, 0, PAGE_SIZE);
+ seq_printf(m, "%ld: 0x%llx\n", id, val);
+ }
+
+ return 0;
+}
+
+/*
+ * Return capture buffer size, using the size from first allocated object that
+ * is found. This works because all objects must be of the same size.
+ */
+static int psmi_debugfs_capture_size_get(void *data, u64 *val)
+{
+ unsigned long id, region_mask;
+ struct xe_device *xe = data;
+ struct xe_bo *bo;
+
+ region_mask = xe->psmi.region_mask;
+ for_each_set_bit(id, &region_mask,
+ ARRAY_SIZE(xe->psmi.capture_obj)) {
+ /* smem should never be set */
+ xe_assert(xe, id);
+
+ bo = xe->psmi.capture_obj[id];
+ if (bo) {
+ *val = xe_bo_size(bo);
+ return 0;
+ }
+ }
+
+ /* no capture objects are allocated */
+ *val = 0;
+
+ return 0;
+}
+
+/*
+ * Set size of PSMI capture buffer. This triggers the allocation of capture
+ * buffer in each memory region as specified with prior write to
+ * psmi_capture_region_mask.
+ */
+static int psmi_debugfs_capture_size_set(void *data, u64 val)
+{
+ struct xe_device *xe = data;
+
+ /* user must have specified at least one region */
+ if (!xe->psmi.region_mask)
+ return -EINVAL;
+
+ return psmi_resize_object(xe, val);
+}
+
+static int psmi_debugfs_capture_region_mask_get(void *data, u64 *val)
+{
+ struct xe_device *xe = data;
+
+ *val = xe->psmi.region_mask;
+
+ return 0;
+}
+
+/*
+ * Select VRAM regions for multi-tile devices, only allowed when buffer is not
+ * currently allocated.
+ */
+static int psmi_debugfs_capture_region_mask_set(void *data, u64 region_mask)
+{
+ struct xe_device *xe = data;
+ u64 size = 0;
+
+ /* SMEM is not supported (see comments at top of file) */
+ if (region_mask & 0x1)
+ return -EOPNOTSUPP;
+
+ /* input bitmask should contain only valid TTM regions */
+ if (!region_mask || region_mask & ~xe->info.mem_region_mask)
+ return -EINVAL;
+
+ /* only allow setting mask if buffer is not yet allocated */
+ psmi_debugfs_capture_size_get(xe, &size);
+ if (size)
+ return -EBUSY;
+
+ xe->psmi.region_mask = region_mask;
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(psmi_debugfs_capture_addr);
+
+DEFINE_DEBUGFS_ATTRIBUTE(psmi_debugfs_capture_region_mask_fops,
+ psmi_debugfs_capture_region_mask_get,
+ psmi_debugfs_capture_region_mask_set,
+ "0x%llx\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(psmi_debugfs_capture_size_fops,
+ psmi_debugfs_capture_size_get,
+ psmi_debugfs_capture_size_set,
+ "%lld\n");
+
+void xe_psmi_debugfs_register(struct xe_device *xe)
+{
+ struct drm_minor *minor;
+
+ if (!psmi_enabled(xe))
+ return;
+
+ minor = xe->drm.primary;
+ if (!minor->debugfs_root)
+ return;
+
+ debugfs_create_file("psmi_capture_addr",
+ 0400, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_addr_fops);
+
+ debugfs_create_file("psmi_capture_region_mask",
+ 0600, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_region_mask_fops);
+
+ debugfs_create_file("psmi_capture_size",
+ 0600, minor->debugfs_root, xe,
+ &psmi_debugfs_capture_size_fops);
+}
+
+static void psmi_fini(void *arg)
+{
+ psmi_cleanup(arg);
+}
+
+int xe_psmi_init(struct xe_device *xe)
+{
+ if (!psmi_enabled(xe))
+ return 0;
+
+ return devm_add_action(xe->drm.dev, psmi_fini, xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_psmi.h b/drivers/gpu/drm/xe/xe_psmi.h
new file mode 100644
index 000000000000..b1dfba80d893
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_psmi.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_PSMI_H_
+#define _XE_PSMI_H_
+
+struct xe_device;
+
+int xe_psmi_init(struct xe_device *xe);
+void xe_psmi_debugfs_register(struct xe_device *xe);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index c8e63bd23300..884127b4d97d 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -3,8 +3,6 @@
* Copyright © 2022 Intel Corporation
*/
-#include <linux/dma-fence-array.h>
-
#include "xe_pt.h"
#include "regs/xe_gtt_defs.h"
@@ -13,16 +11,17 @@
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
#include "xe_gt.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pt_types.h"
#include "xe_pt_walk.h"
#include "xe_res_cursor.h"
#include "xe_sched_job.h"
-#include "xe_sync.h"
#include "xe_svm.h"
+#include "xe_sync.h"
+#include "xe_tlb_inval_job.h"
#include "xe_trace.h"
#include "xe_ttm_stolen_mgr.h"
+#include "xe_userptr.h"
#include "xe_vm.h"
struct xe_pt_dir {
@@ -69,7 +68,7 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
if (level > MAX_HUGEPTE_LEVEL)
return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
- 0, pat_index);
+ 0);
return vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) |
XE_PTE_NULL;
@@ -88,6 +87,7 @@ static void xe_pt_free(struct xe_pt *pt)
* @vm: The vm to create for.
* @tile: The tile to create for.
* @level: The page-table level.
+ * @exec: The drm_exec object used to lock the vm.
*
* Allocate and initialize a single struct xe_pt metadata structure. Also
* create the corresponding page-table bo, but don't initialize it. If the
@@ -99,7 +99,7 @@ static void xe_pt_free(struct xe_pt *pt)
* error.
*/
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
- unsigned int level)
+ unsigned int level, struct drm_exec *exec)
{
struct xe_pt *pt;
struct xe_bo *bo;
@@ -120,12 +120,14 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE |
XE_BO_FLAG_NO_RESV_EVICT | XE_BO_FLAG_PAGETABLE;
if (vm->xef) /* userspace */
- bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE;
+ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE | XE_BO_FLAG_FORCE_USER_VRAM;
pt->level = level;
+
+ drm_WARN_ON(&vm->xe->drm, IS_ERR_OR_NULL(exec));
bo = xe_bo_create_pin_map(vm->xe, tile, vm, SZ_4K,
ttm_bo_type_kernel,
- bo_flags);
+ bo_flags, exec);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto err_kfree;
@@ -518,7 +520,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
{
struct xe_pt_stage_bind_walk *xe_walk =
container_of(walk, typeof(*xe_walk), base);
- u16 pat_index = xe_walk->vma->pat_index;
+ u16 pat_index = xe_walk->vma->attr.pat_index;
struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
struct xe_vm *vm = xe_walk->vm;
struct xe_pt *xe_child;
@@ -589,7 +591,8 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
if (covers || !*child) {
u64 flags = 0;
- xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1);
+ xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1,
+ xe_vm_validation_exec(vm));
if (IS_ERR(xe_child))
return PTR_ERR(xe_child);
@@ -616,7 +619,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
xe_child->is_compact = true;
}
- pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
+ pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0) | flags;
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
pte);
}
@@ -640,28 +643,31 @@ static const struct xe_pt_walk_ops xe_pt_stage_bind_ops = {
* - In all other cases device atomics will be disabled with AE=0 until an application
* request differently using a ioctl like madvise.
*/
-static bool xe_atomic_for_vram(struct xe_vm *vm)
+static bool xe_atomic_for_vram(struct xe_vm *vm, struct xe_vma *vma)
{
+ if (vma->attr.atomic_access == DRM_XE_ATOMIC_CPU)
+ return false;
+
return true;
}
-static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_bo *bo)
+static bool xe_atomic_for_system(struct xe_vm *vm, struct xe_vma *vma)
{
struct xe_device *xe = vm->xe;
+ struct xe_bo *bo = xe_vma_bo(vma);
- if (!xe->info.has_device_atomics_on_smem)
+ if (!xe->info.has_device_atomics_on_smem ||
+ vma->attr.atomic_access == DRM_XE_ATOMIC_CPU)
return false;
+ if (vma->attr.atomic_access == DRM_XE_ATOMIC_DEVICE)
+ return true;
+
/*
* If a SMEM+LMEM allocation is backed by SMEM, a device
* atomics will cause a gpu page fault and which then
* gets migrated to LMEM, bind such allocations with
* device atomics enabled.
- *
- * TODO: Revisit this. Perhaps add something like a
- * fault_on_atomics_in_system UAPI flag.
- * Note that this also prohibits GPU atomics in LR mode for
- * userptr and system memory on DGFX.
*/
return (!IS_DGFX(xe) || (!xe_vm_in_lr_mode(vm) ||
(bo && xe_bo_has_single_placement(bo))));
@@ -707,7 +713,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.vm = vm,
.tile = tile,
.curs = &curs,
- .va_curs_start = range ? range->base.itree.start :
+ .va_curs_start = range ? xe_svm_range_start(range) :
xe_vma_start(vma),
.vma = vma,
.wupd.entries = entries,
@@ -725,8 +731,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
return -EAGAIN;
}
if (xe_svm_range_has_dma_mapping(range)) {
- xe_res_first_dma(range->base.dma_addr, 0,
- range->base.itree.last + 1 - range->base.itree.start,
+ xe_res_first_dma(range->base.pages.dma_addr, 0,
+ xe_svm_range_size(range),
&curs);
xe_svm_range_debug(range, "BIND PREPARE - MIXED");
} else {
@@ -744,8 +750,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
goto walk_pt;
if (vma->gpuva.flags & XE_VMA_ATOMIC_PTE_BIT) {
- xe_walk.default_vram_pte = xe_atomic_for_vram(vm) ? XE_USM_PPGTT_PTE_AE : 0;
- xe_walk.default_system_pte = xe_atomic_for_system(vm, bo) ?
+ xe_walk.default_vram_pte = xe_atomic_for_vram(vm, vma) ? XE_USM_PPGTT_PTE_AE : 0;
+ xe_walk.default_system_pte = xe_atomic_for_system(vm, vma) ?
XE_USM_PPGTT_PTE_AE : 0;
}
@@ -756,8 +762,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
if (!xe_vma_is_null(vma) && !range) {
if (xe_vma_is_userptr(vma))
- xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
- xe_vma_size(vma), &curs);
+ xe_res_first_dma(to_userptr_vma(vma)->userptr.pages.dma_addr, 0,
+ xe_vma_size(vma), &curs);
else if (xe_bo_is_vram(bo) || xe_bo_is_stolen(bo))
xe_res_first(bo->ttm.resource, xe_vma_bo_offset(vma),
xe_vma_size(vma), &curs);
@@ -770,8 +776,8 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
walk_pt:
ret = xe_pt_walk_range(&pt->base, pt->level,
- range ? range->base.itree.start : xe_vma_start(vma),
- range ? range->base.itree.last + 1 : xe_vma_end(vma),
+ range ? xe_svm_range_start(range) : xe_vma_start(vma),
+ range ? xe_svm_range_end(range) : xe_vma_end(vma),
&xe_walk.base);
*num_entries = xe_walk.wupd.num_used_entries;
@@ -910,7 +916,7 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
if (xe_vma_bo(vma))
xe_bo_assert_held(xe_vma_bo(vma));
else if (xe_vma_is_userptr(vma))
- lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
+ lockdep_assert_held(&xe_vma_vm(vma)->svm.gpusvm.notifier_lock);
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -950,13 +956,25 @@ bool xe_pt_zap_ptes_range(struct xe_tile *tile, struct xe_vm *vm,
struct xe_pt *pt = vm->pt_root[tile->id];
u8 pt_mask = (range->tile_present & ~range->tile_invalidated);
- xe_svm_assert_in_notifier(vm);
+ /*
+ * Locking rules:
+ *
+ * - notifier_lock (write): full protection against page table changes
+ * and MMU notifier invalidations.
+ *
+ * - notifier_lock (read) + vm_lock (write): combined protection against
+ * invalidations and concurrent page table modifications. (e.g., madvise)
+ *
+ */
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+ lockdep_is_held_type(&vm->lock, 0)));
if (!(pt_mask & BIT(tile->id)))
return false;
- (void)xe_pt_walk_shared(&pt->base, pt->level, range->base.itree.start,
- range->base.itree.last + 1, &xe_walk.base);
+ (void)xe_pt_walk_shared(&pt->base, pt->level, xe_svm_range_start(range),
+ xe_svm_range_end(range), &xe_walk.base);
return xe_walk.needs_invalidate;
}
@@ -1033,7 +1051,7 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
xe_pt_commit_prepare_locks_assert(vma);
if (xe_vma_is_userptr(vma))
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
}
static void xe_pt_commit(struct xe_vma *vma,
@@ -1261,6 +1279,8 @@ static int op_add_deps(struct xe_vm *vm, struct xe_vma_op *op,
}
static int xe_pt_vm_dependencies(struct xe_sched_job *job,
+ struct xe_tlb_inval_job *ijob,
+ struct xe_tlb_inval_job *mjob,
struct xe_vm *vm,
struct xe_vma_ops *vops,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -1318,16 +1338,23 @@ static int xe_pt_vm_dependencies(struct xe_sched_job *job,
return err;
}
- if (!(pt_update_ops->q->flags & EXEC_QUEUE_FLAG_KERNEL)) {
- if (job)
- err = xe_sched_job_last_fence_add_dep(job, vm);
- else
- err = xe_exec_queue_last_fence_test_dep(pt_update_ops->q, vm);
- }
-
for (i = 0; job && !err && i < vops->num_syncs; i++)
err = xe_sync_entry_add_deps(&vops->syncs[i], job);
+ if (job) {
+ if (ijob) {
+ err = xe_tlb_inval_job_alloc_dep(ijob);
+ if (err)
+ return err;
+ }
+
+ if (mjob) {
+ err = xe_tlb_inval_job_alloc_dep(mjob);
+ if (err)
+ return err;
+ }
+ }
+
return err;
}
@@ -1339,10 +1366,12 @@ static int xe_pt_pre_commit(struct xe_migrate_pt_update *pt_update)
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[pt_update->tile_id];
- return xe_pt_vm_dependencies(pt_update->job, vm, pt_update->vops,
+ return xe_pt_vm_dependencies(pt_update->job, pt_update->ijob,
+ pt_update->mjob, vm, pt_update->vops,
pt_update_ops, rftree);
}
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
#ifdef CONFIG_DRM_XE_USERPTR_INVAL_INJECT
static bool xe_pt_userptr_inject_eagain(struct xe_userptr_vma *uvma)
@@ -1373,7 +1402,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
struct xe_userptr_vma *uvma;
unsigned long notifier_seq;
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
if (!xe_vma_is_userptr(vma))
return 0;
@@ -1382,7 +1411,7 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
if (xe_pt_userptr_inject_eagain(uvma))
xe_vma_userptr_force_invalidate(uvma);
- notifier_seq = uvma->userptr.notifier_seq;
+ notifier_seq = uvma->userptr.pages.notifier_seq;
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
notifier_seq))
@@ -1398,12 +1427,12 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
return 0;
}
-static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
- struct xe_vm_pgtable_update_ops *pt_update)
+static int op_check_svm_userptr(struct xe_vm *vm, struct xe_vma_op *op,
+ struct xe_vm_pgtable_update_ops *pt_update)
{
int err = 0;
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
@@ -1421,64 +1450,10 @@ static int op_check_userptr(struct xe_vm *vm, struct xe_vma_op *op,
case DRM_GPUVA_OP_UNMAP:
break;
case DRM_GPUVA_OP_PREFETCH:
- err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va),
- pt_update);
- break;
- default:
- drm_warn(&vm->xe->drm, "NOT POSSIBLE");
- }
-
- return err;
-}
-
-static int xe_pt_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
- struct xe_vm *vm = pt_update->vops->vm;
- struct xe_vma_ops *vops = pt_update->vops;
- struct xe_vm_pgtable_update_ops *pt_update_ops =
- &vops->pt_update_ops[pt_update->tile_id];
- struct xe_vma_op *op;
- int err;
-
- err = xe_pt_pre_commit(pt_update);
- if (err)
- return err;
-
- down_read(&vm->userptr.notifier_lock);
-
- list_for_each_entry(op, &vops->list, link) {
- err = op_check_userptr(vm, op, pt_update_ops);
- if (err) {
- up_read(&vm->userptr.notifier_lock);
- break;
- }
- }
-
- return err;
-}
-
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
-{
- struct xe_vm *vm = pt_update->vops->vm;
- struct xe_vma_ops *vops = pt_update->vops;
- struct xe_vma_op *op;
- unsigned long i;
- int err;
-
- err = xe_pt_pre_commit(pt_update);
- if (err)
- return err;
-
- xe_svm_notifier_lock(vm);
-
- list_for_each_entry(op, &vops->list, link) {
- struct xe_svm_range *range = NULL;
-
- if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
- continue;
+ if (xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va))) {
+ struct xe_svm_range *range = op->map_range.range;
+ unsigned long i;
- if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
xe_assert(vm->xe,
xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
xa_for_each(&op->prefetch_range.range, i, range) {
@@ -1486,97 +1461,62 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
return -ENODATA;
}
}
} else {
+ err = vma_check_userptr(vm, gpuva_to_vma(op->base.prefetch.va), pt_update);
+ }
+ break;
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+ case DRM_GPUVA_OP_DRIVER:
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
+ struct xe_svm_range *range = op->map_range.range;
+
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
- xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
- range = op->map_range.range;
xe_svm_range_debug(range, "PRE-COMMIT");
if (!xe_svm_range_pages_valid(range)) {
xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
return -EAGAIN;
}
}
- }
-
- return 0;
-}
+ break;
#endif
-
-struct invalidation_fence {
- struct xe_gt_tlb_invalidation_fence base;
- struct xe_gt *gt;
- struct dma_fence *fence;
- struct dma_fence_cb cb;
- struct work_struct work;
- u64 start;
- u64 end;
- u32 asid;
-};
-
-static void invalidation_fence_cb(struct dma_fence *fence,
- struct dma_fence_cb *cb)
-{
- struct invalidation_fence *ifence =
- container_of(cb, struct invalidation_fence, cb);
- struct xe_device *xe = gt_to_xe(ifence->gt);
-
- trace_xe_gt_tlb_invalidation_fence_cb(xe, &ifence->base);
- if (!ifence->fence->error) {
- queue_work(system_wq, &ifence->work);
- } else {
- ifence->base.base.error = ifence->fence->error;
- xe_gt_tlb_invalidation_fence_signal(&ifence->base);
+ default:
+ drm_warn(&vm->xe->drm, "NOT POSSIBLE");
}
- dma_fence_put(ifence->fence);
-}
-
-static void invalidation_fence_work_func(struct work_struct *w)
-{
- struct invalidation_fence *ifence =
- container_of(w, struct invalidation_fence, work);
- struct xe_device *xe = gt_to_xe(ifence->gt);
- trace_xe_gt_tlb_invalidation_fence_work_func(xe, &ifence->base);
- xe_gt_tlb_invalidation_range(ifence->gt, &ifence->base, ifence->start,
- ifence->end, ifence->asid);
+ return err;
}
-static void invalidation_fence_init(struct xe_gt *gt,
- struct invalidation_fence *ifence,
- struct dma_fence *fence,
- u64 start, u64 end, u32 asid)
+static int xe_pt_svm_userptr_pre_commit(struct xe_migrate_pt_update *pt_update)
{
- int ret;
-
- trace_xe_gt_tlb_invalidation_fence_create(gt_to_xe(gt), &ifence->base);
+ struct xe_vm *vm = pt_update->vops->vm;
+ struct xe_vma_ops *vops = pt_update->vops;
+ struct xe_vm_pgtable_update_ops *pt_update_ops =
+ &vops->pt_update_ops[pt_update->tile_id];
+ struct xe_vma_op *op;
+ int err;
- xe_gt_tlb_invalidation_fence_init(gt, &ifence->base, false);
+ err = xe_pt_pre_commit(pt_update);
+ if (err)
+ return err;
- ifence->fence = fence;
- ifence->gt = gt;
- ifence->start = start;
- ifence->end = end;
- ifence->asid = asid;
+ xe_svm_notifier_lock(vm);
- INIT_WORK(&ifence->work, invalidation_fence_work_func);
- ret = dma_fence_add_callback(fence, &ifence->cb, invalidation_fence_cb);
- if (ret == -ENOENT) {
- dma_fence_put(ifence->fence); /* Usually dropped in CB */
- invalidation_fence_work_func(&ifence->work);
- } else if (ret) {
- dma_fence_put(&ifence->base.base); /* Caller ref */
- dma_fence_put(&ifence->base.base); /* Creation ref */
+ list_for_each_entry(op, &vops->list, link) {
+ err = op_check_svm_userptr(vm, op, pt_update_ops);
+ if (err) {
+ xe_svm_notifier_unlock(vm);
+ break;
+ }
}
- xe_gt_assert(gt, !ret || ret == -ENOENT);
+ return err;
}
+#endif
struct xe_pt_stage_unbind_walk {
/** @base: The pagewalk base-class. */
@@ -1712,8 +1652,8 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile,
struct xe_svm_range *range,
struct xe_vm_pgtable_update *entries)
{
- u64 start = range ? range->base.itree.start : xe_vma_start(vma);
- u64 end = range ? range->base.itree.last + 1 : xe_vma_end(vma);
+ u64 start = range ? xe_svm_range_start(range) : xe_vma_start(vma);
+ u64 end = range ? xe_svm_range_end(range) : xe_vma_end(vma);
struct xe_pt_stage_unbind_walk xe_walk = {
.base = {
.ops = &xe_pt_stage_unbind_ops,
@@ -1879,7 +1819,7 @@ static int bind_op_prepare(struct xe_vm *vm, struct xe_tile *tile,
xe_vma_start(vma),
xe_vma_end(vma));
++pt_update_ops->current_op;
- pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
/*
* If rebind, we have to invalidate TLB on !LR vms to invalidate
@@ -1923,7 +1863,7 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
vm_dbg(&xe_vma_vm(vma)->xe->drm,
"Preparing bind, with range [%lx...%lx)\n",
- range->base.itree.start, range->base.itree.last);
+ xe_svm_range_start(range), xe_svm_range_end(range) - 1);
pt_op->vma = NULL;
pt_op->bind = true;
@@ -1938,8 +1878,8 @@ static int bind_range_prepare(struct xe_vm *vm, struct xe_tile *tile,
pt_op->num_entries, true);
xe_pt_update_ops_rfence_interval(pt_update_ops,
- range->base.itree.start,
- range->base.itree.last + 1);
+ xe_svm_range_start(range),
+ xe_svm_range_end(range));
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
@@ -1987,7 +1927,7 @@ static int unbind_op_prepare(struct xe_tile *tile,
xe_pt_update_ops_rfence_interval(pt_update_ops, xe_vma_start(vma),
xe_vma_end(vma));
++pt_update_ops->current_op;
- pt_update_ops->needs_userptr_lock |= xe_vma_is_userptr(vma);
+ pt_update_ops->needs_svm_lock |= xe_vma_is_userptr(vma);
pt_update_ops->needs_invalidation = true;
xe_pt_commit_prepare_unbind(vma, pt_op->entries, pt_op->num_entries);
@@ -2034,7 +1974,7 @@ static int unbind_range_prepare(struct xe_vm *vm,
vm_dbg(&vm->xe->drm,
"Preparing unbind, with range [%lx...%lx)\n",
- range->base.itree.start, range->base.itree.last);
+ xe_svm_range_start(range), xe_svm_range_end(range) - 1);
pt_op->vma = XE_INVALID_VMA;
pt_op->bind = false;
@@ -2045,8 +1985,8 @@ static int unbind_range_prepare(struct xe_vm *vm,
xe_vm_dbg_print_entries(tile_to_xe(tile), pt_op->entries,
pt_op->num_entries, false);
- xe_pt_update_ops_rfence_interval(pt_update_ops, range->base.itree.start,
- range->base.itree.last + 1);
+ xe_pt_update_ops_rfence_interval(pt_update_ops, xe_svm_range_start(range),
+ xe_svm_range_end(range));
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
@@ -2073,7 +2013,7 @@ static int op_prepare(struct xe_vm *vm,
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm) &&
!op->map.invalidate_on_bind) ||
- op->map.is_cpu_addr_mirror)
+ (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma,
@@ -2235,7 +2175,7 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
vma->tile_invalidated & ~BIT(tile->id));
vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
to_userptr_vma(vma)->userptr.initial_bind = true;
}
@@ -2271,7 +2211,7 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
if (!vma->tile_present) {
list_del_init(&vma->combined_links.rebind);
if (xe_vma_is_userptr(vma)) {
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
+ xe_svm_assert_held_read(vm);
spin_lock(&vm->userptr.invalidated_lock);
list_del_init(&to_userptr_vma(vma)->userptr.invalidate_link);
@@ -2303,7 +2243,7 @@ static void op_commit(struct xe_vm *vm,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) ||
- op->map.is_cpu_addr_mirror)
+ (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR))
break;
bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
@@ -2374,22 +2314,25 @@ static const struct xe_migrate_pt_update_ops migrate_ops = {
.pre_commit = xe_pt_pre_commit,
};
-static const struct xe_migrate_pt_update_ops userptr_migrate_ops = {
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops = {
.populate = xe_vm_populate_pgtable,
.clear = xe_migrate_clear_pgtable_callback,
- .pre_commit = xe_pt_userptr_pre_commit,
-};
-
-#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
-static const struct xe_migrate_pt_update_ops svm_migrate_ops = {
- .populate = xe_vm_populate_pgtable,
- .clear = xe_migrate_clear_pgtable_callback,
- .pre_commit = xe_pt_svm_pre_commit,
+ .pre_commit = xe_pt_svm_userptr_pre_commit,
};
#else
-static const struct xe_migrate_pt_update_ops svm_migrate_ops;
+static const struct xe_migrate_pt_update_ops svm_userptr_migrate_ops;
#endif
+static struct xe_dep_scheduler *to_dep_scheduler(struct xe_exec_queue *q,
+ struct xe_gt *gt)
+{
+ if (xe_gt_is_media_type(gt))
+ return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT].dep_scheduler;
+
+ return q->tlb_inval[XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT].dep_scheduler;
+}
+
/**
* xe_pt_update_ops_run() - Run PT update operations
* @tile: Tile of PT update operations
@@ -2407,18 +2350,15 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vm *vm = vops->vm;
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[tile->id];
- struct dma_fence *fence;
- struct invalidation_fence *ifence = NULL, *mfence = NULL;
- struct dma_fence **fences = NULL;
- struct dma_fence_array *cf = NULL;
+ struct xe_exec_queue *q = pt_update_ops->q;
+ struct dma_fence *fence, *ifence = NULL, *mfence = NULL;
+ struct xe_tlb_inval_job *ijob = NULL, *mjob = NULL;
struct xe_range_fence *rfence;
struct xe_vma_op *op;
int err = 0, i;
struct xe_migrate_pt_update update = {
.ops = pt_update_ops->needs_svm_lock ?
- &svm_migrate_ops :
- pt_update_ops->needs_userptr_lock ?
- &userptr_migrate_ops :
+ &svm_userptr_migrate_ops :
&migrate_ops,
.vops = vops,
.tile_id = tile->id,
@@ -2440,34 +2380,41 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
#endif
if (pt_update_ops->needs_invalidation) {
- ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!ifence) {
- err = -ENOMEM;
+ struct xe_dep_scheduler *dep_scheduler =
+ to_dep_scheduler(q, tile->primary_gt);
+
+ ijob = xe_tlb_inval_job_create(q, &tile->primary_gt->tlb_inval,
+ dep_scheduler, vm,
+ pt_update_ops->start,
+ pt_update_ops->last,
+ XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+ if (IS_ERR(ijob)) {
+ err = PTR_ERR(ijob);
goto kill_vm_tile1;
}
+ update.ijob = ijob;
+
if (tile->media_gt) {
- mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
- if (!mfence) {
- err = -ENOMEM;
- goto free_ifence;
- }
- fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
- if (!fences) {
- err = -ENOMEM;
- goto free_ifence;
- }
- cf = dma_fence_array_alloc(2);
- if (!cf) {
- err = -ENOMEM;
- goto free_ifence;
+ dep_scheduler = to_dep_scheduler(q, tile->media_gt);
+
+ mjob = xe_tlb_inval_job_create(q,
+ &tile->media_gt->tlb_inval,
+ dep_scheduler, vm,
+ pt_update_ops->start,
+ pt_update_ops->last,
+ XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT);
+ if (IS_ERR(mjob)) {
+ err = PTR_ERR(mjob);
+ goto free_ijob;
}
+ update.mjob = mjob;
}
}
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
if (!rfence) {
err = -ENOMEM;
- goto free_ifence;
+ goto free_ijob;
}
fence = xe_migrate_update_pgtables(tile->migrate, &update);
@@ -2491,30 +2438,12 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
pt_update_ops->last, fence))
dma_fence_wait(fence, false);
- /* tlb invalidation must be done before signaling rebind */
- if (ifence) {
- if (mfence)
- dma_fence_get(fence);
- invalidation_fence_init(tile->primary_gt, ifence, fence,
- pt_update_ops->start,
- pt_update_ops->last, vm->usm.asid);
- if (mfence) {
- invalidation_fence_init(tile->media_gt, mfence, fence,
- pt_update_ops->start,
- pt_update_ops->last, vm->usm.asid);
- fences[0] = &ifence->base.base;
- fences[1] = &mfence->base.base;
- dma_fence_array_init(cf, 2, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- fence = &cf->base;
- } else {
- fence = &ifence->base.base;
- }
- }
+ if (ijob)
+ ifence = xe_tlb_inval_job_push(ijob, tile->migrate, fence);
+ if (mjob)
+ mfence = xe_tlb_inval_job_push(mjob, tile->migrate, fence);
- if (!mfence) {
+ if (!mjob && !ijob) {
dma_resv_add_fence(xe_vm_resv(vm), fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
@@ -2522,36 +2451,52 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
list_for_each_entry(op, &vops->list, link)
op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
+ } else if (ijob && !mjob) {
+ dma_resv_add_fence(xe_vm_resv(vm), ifence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op, ifence, NULL);
} else {
- dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
+ dma_resv_add_fence(xe_vm_resv(vm), ifence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
+ dma_resv_add_fence(xe_vm_resv(vm), mfence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
list_for_each_entry(op, &vops->list, link)
- op_commit(vops->vm, tile, pt_update_ops, op,
- &ifence->base.base, &mfence->base.base);
+ op_commit(vops->vm, tile, pt_update_ops, op, ifence,
+ mfence);
}
if (pt_update_ops->needs_svm_lock)
xe_svm_notifier_unlock(vm);
- if (pt_update_ops->needs_userptr_lock)
- up_read(&vm->userptr.notifier_lock);
+
+ /*
+ * The last fence is only used for zero bind queue idling; migrate
+ * queues are not exposed to user space.
+ */
+ if (!(q->flags & EXEC_QUEUE_FLAG_MIGRATE))
+ xe_exec_queue_last_fence_set(q, vm, fence);
+
+ xe_tlb_inval_job_put(mjob);
+ xe_tlb_inval_job_put(ijob);
+ dma_fence_put(ifence);
+ dma_fence_put(mfence);
return fence;
free_rfence:
kfree(rfence);
-free_ifence:
- kfree(cf);
- kfree(fences);
- kfree(mfence);
- kfree(ifence);
+free_ijob:
+ xe_tlb_inval_job_put(mjob);
+ xe_tlb_inval_job_put(ijob);
kill_vm_tile1:
if (err != -EAGAIN && err != -ENODATA && tile->id)
xe_vm_kill(vops->vm, false);
diff --git a/drivers/gpu/drm/xe/xe_pt.h b/drivers/gpu/drm/xe/xe_pt.h
index 5ecf003d513c..4daeebaab5a1 100644
--- a/drivers/gpu/drm/xe/xe_pt.h
+++ b/drivers/gpu/drm/xe/xe_pt.h
@@ -10,6 +10,7 @@
#include "xe_pt_types.h"
struct dma_fence;
+struct drm_exec;
struct xe_bo;
struct xe_device;
struct xe_exec_queue;
@@ -29,7 +30,7 @@ struct xe_vma_ops;
unsigned int xe_pt_shift(unsigned int level);
struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
- unsigned int level);
+ unsigned int level, struct drm_exec *exec);
void xe_pt_populate_empty(struct xe_tile *tile, struct xe_vm *vm,
struct xe_pt *pt);
diff --git a/drivers/gpu/drm/xe/xe_pt_types.h b/drivers/gpu/drm/xe/xe_pt_types.h
index 69eab6f37cfe..881f01e14db8 100644
--- a/drivers/gpu/drm/xe/xe_pt_types.h
+++ b/drivers/gpu/drm/xe/xe_pt_types.h
@@ -45,8 +45,7 @@ struct xe_pt_ops {
u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
u16 pat_index,
u32 pt_level, bool devmem, u64 flags);
- u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index);
+ u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset);
};
struct xe_pt_entry {
@@ -106,8 +105,6 @@ struct xe_vm_pgtable_update_ops {
u32 current_op;
/** @needs_svm_lock: Needs SVM lock */
bool needs_svm_lock;
- /** @needs_userptr_lock: Needs userptr lock */
- bool needs_userptr_lock;
/** @needs_invalidation: Needs invalidation */
bool needs_invalidation;
/**
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index 3d62008c99f1..bdbdbbf6a678 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -688,6 +688,7 @@ start:
return ret;
}
+ALLOW_ERROR_INJECTION(xe_pxp_exec_queue_add, ERRNO);
static void __pxp_exec_queue_remove(struct xe_pxp *pxp, struct xe_exec_queue *q, bool lock)
{
diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c
index ca95f2a4d4ef..e60526e30030 100644
--- a/drivers/gpu/drm/xe/xe_pxp_submit.c
+++ b/drivers/gpu/drm/xe/xe_pxp_submit.c
@@ -54,8 +54,9 @@ static int allocate_vcs_execution_resources(struct xe_pxp *pxp)
* Each termination is 16 DWORDS, so 4K is enough to contain a
* termination for each sessions.
*/
- bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K, ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+ bo = xe_bo_create_pin_map_novm(xe, tile, SZ_4K, ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT,
+ false);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
goto out_queue;
@@ -87,7 +88,9 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
{
struct xe_tile *tile = gt_to_tile(gt);
struct xe_device *xe = tile_to_xe(tile);
+ struct xe_validation_ctx ctx;
struct xe_hw_engine *hwe;
+ struct drm_exec exec;
struct xe_vm *vm;
struct xe_bo *bo;
struct xe_exec_queue *q;
@@ -106,15 +109,26 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
return PTR_ERR(vm);
/* We allocate a single object for the batch and the in/out memory */
- xe_vm_lock(vm, false);
- bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
- ttm_bo_type_kernel,
- XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED | XE_BO_FLAG_NEEDS_UC);
- xe_vm_unlock(vm);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- goto vm_out;
+
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags){}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ break;
+
+ bo = xe_bo_create_pin_map(xe, tile, vm, PXP_BB_SIZE + inout_size * 2,
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED |
+ XE_BO_FLAG_NEEDS_UC, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
}
+ if (err)
+ goto vm_out;
fence = xe_vm_bind_kernel_bo(vm, bo, NULL, 0, XE_CACHE_WB);
if (IS_ERR(fence)) {
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index d517ec9ddcbf..1c0915e2cc16 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -21,12 +21,14 @@
#include "xe_force_wake.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
+#include "xe_gt_topology.h"
#include "xe_guc_hwconfig.h"
#include "xe_macros.h"
#include "xe_mmio.h"
#include "xe_oa.h"
#include "xe_pxp.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
#include "xe_wa.h"
static const u16 xe_to_user_engine_class[] = {
@@ -274,8 +276,7 @@ static int query_mem_regions(struct xe_device *xe,
mem_regions->mem_regions[0].instance = 0;
mem_regions->mem_regions[0].min_page_size = PAGE_SIZE;
mem_regions->mem_regions[0].total_size = man->size << PAGE_SHIFT;
- if (perfmon_capable())
- mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
+ mem_regions->mem_regions[0].used = ttm_resource_manager_usage(man);
mem_regions->num_mem_regions = 1;
for (i = XE_PL_VRAM0; i <= XE_PL_VRAM1; ++i) {
@@ -291,13 +292,11 @@ static int query_mem_regions(struct xe_device *xe,
mem_regions->mem_regions[mem_regions->num_mem_regions].total_size =
man->size;
- if (perfmon_capable()) {
- xe_ttm_vram_get_used(man,
- &mem_regions->mem_regions
- [mem_regions->num_mem_regions].used,
- &mem_regions->mem_regions
- [mem_regions->num_mem_regions].cpu_visible_used);
- }
+ xe_ttm_vram_get_used(man,
+ &mem_regions->mem_regions
+ [mem_regions->num_mem_regions].used,
+ &mem_regions->mem_regions
+ [mem_regions->num_mem_regions].cpu_visible_used);
mem_regions->mem_regions[mem_regions->num_mem_regions].cpu_visible_size =
xe_ttm_vram_get_cpu_visible_size(man);
@@ -337,7 +336,7 @@ static int query_config(struct xe_device *xe, struct drm_xe_device_query *query)
config->num_params = num_params;
config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] =
xe->info.devid | (xe->info.revid << 16);
- if (xe_device_get_root_tile(xe)->mem.vram.usable_size)
+ if (xe->mem.vram)
config->info[DRM_XE_QUERY_CONFIG_FLAGS] |=
DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM;
if (xe->info.has_usm && IS_ENABLED(CONFIG_DRM_XE_GPUSVM))
@@ -410,7 +409,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query
gt_list->gt_list[iter].near_mem_regions = 0x1;
else
gt_list->gt_list[iter].near_mem_regions =
- BIT(gt_to_tile(gt)->id) << 1;
+ BIT(gt_to_tile(gt)->mem.vram->id) << 1;
gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^
gt_list->gt_list[iter].near_mem_regions;
@@ -437,7 +436,7 @@ static int query_hwconfig(struct xe_device *xe,
struct drm_xe_device_query *query)
{
struct xe_gt *gt = xe_root_mmio_gt(xe);
- size_t size = xe_guc_hwconfig_size(&gt->uc.guc);
+ size_t size = gt ? xe_guc_hwconfig_size(&gt->uc.guc) : 0;
void __user *query_ptr = u64_to_user_ptr(query->data);
void *hwconfig;
@@ -476,7 +475,7 @@ static size_t calc_topo_query_size(struct xe_device *xe)
sizeof_field(struct xe_gt, fuse_topo.eu_mask_per_dss);
/* L3bank mask may not be available for some GTs */
- if (!XE_WA(gt, no_media_l3))
+ if (xe_gt_topology_report_l3(gt))
query_size += sizeof(struct drm_xe_query_topology_mask) +
sizeof_field(struct xe_gt, fuse_topo.l3_bank_mask);
}
@@ -539,7 +538,7 @@ static int query_gt_topology(struct xe_device *xe,
* mask, then it's better to omit L3 from the query rather than
* reporting bogus or zeroed information to userspace.
*/
- if (!XE_WA(gt, no_media_l3)) {
+ if (xe_gt_topology_report_l3(gt)) {
topo.type = DRM_XE_TOPO_L3_BANK;
err = copy_mask(&query_ptr, &topo, gt->fuse_topo.l3_bank_mask,
sizeof(gt->fuse_topo.l3_bank_mask));
@@ -748,10 +747,8 @@ static int query_eu_stall(struct xe_device *xe,
u32 num_rates;
int ret;
- if (!xe_eu_stall_supported_on_platform(xe)) {
- drm_dbg(&xe->drm, "EU stall monitoring is not supported on this platform\n");
+ if (!xe_eu_stall_supported_on_platform(xe))
return -ENODEV;
- }
array_size = xe_eu_stall_get_sampling_rates(&num_rates, &rates);
size = sizeof(struct drm_xe_query_eu_stall) + array_size;
diff --git a/drivers/gpu/drm/xe/xe_range_fence.h b/drivers/gpu/drm/xe/xe_range_fence.h
index edd58b34f5c0..4934729dd904 100644
--- a/drivers/gpu/drm/xe/xe_range_fence.h
+++ b/drivers/gpu/drm/xe/xe_range_fence.h
@@ -13,13 +13,13 @@
struct xe_range_fence_tree;
struct xe_range_fence;
-/** struct xe_range_fence_ops - XE range fence ops */
+/** struct xe_range_fence_ops - Xe range fence ops */
struct xe_range_fence_ops {
/** @free: free range fence op */
void (*free)(struct xe_range_fence *rfence);
};
-/** struct xe_range_fence - XE range fence (address conflict tracking) */
+/** struct xe_range_fence - Xe range fence (address conflict tracking) */
struct xe_range_fence {
/** @rb: RB tree node inserted into interval tree */
struct rb_node rb;
diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c
index 23f6c81d9994..7ca360b2c20d 100644
--- a/drivers/gpu/drm/xe/xe_reg_whitelist.c
+++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c
@@ -19,7 +19,8 @@
#undef XE_REG_MCR
#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1)
-static bool match_not_render(const struct xe_gt *gt,
+static bool match_not_render(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
return hwe->class != XE_ENGINE_CLASS_RENDER;
@@ -88,6 +89,13 @@ static const struct xe_rtp_entry_sr register_whitelist[] = {
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4))
},
+ { XE_RTP_NAME("14024997852"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(WHITELIST(FF_MODE,
+ RING_FORCE_TO_NONPRIV_ACCESS_RW),
+ WHITELIST(VFLSKPD,
+ RING_FORCE_TO_NONPRIV_ACCESS_RW))
+ },
};
static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe)
diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res_cursor.h
index d1a403cfb628..4e00008b7081 100644
--- a/drivers/gpu/drm/xe/xe_res_cursor.h
+++ b/drivers/gpu/drm/xe/xe_res_cursor.h
@@ -55,8 +55,8 @@ struct xe_res_cursor {
u32 mem_type;
/** @sgl: Scatterlist for cursor */
struct scatterlist *sgl;
- /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */
- const struct drm_pagemap_device_addr *dma_addr;
+ /** @dma_addr: Current element in a struct drm_pagemap_addr array */
+ const struct drm_pagemap_addr *dma_addr;
/** @mm: Buddy allocator for VRAM cursor */
struct drm_buddy *mm;
/**
@@ -170,7 +170,7 @@ static inline void __xe_res_sg_next(struct xe_res_cursor *cur)
*/
static inline void __xe_res_dma_next(struct xe_res_cursor *cur)
{
- const struct drm_pagemap_device_addr *addr = cur->dma_addr;
+ const struct drm_pagemap_addr *addr = cur->dma_addr;
u64 start = cur->start;
while (start >= cur->dma_seg_size) {
@@ -222,14 +222,14 @@ static inline void xe_res_first_sg(const struct sg_table *sg,
/**
* xe_res_first_dma - initialize a xe_res_cursor with dma_addr array
*
- * @dma_addr: struct drm_pagemap_device_addr array to walk
+ * @dma_addr: struct drm_pagemap_addr array to walk
* @start: Start of the range
* @size: Size of the range
* @cur: cursor object to initialize
*
* Start walking over the range of allocations between @start and @size.
*/
-static inline void xe_res_first_dma(const struct drm_pagemap_device_addr *dma_addr,
+static inline void xe_res_first_dma(const struct drm_pagemap_addr *dma_addr,
u64 start, u64 size,
struct xe_res_cursor *cur)
{
diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
index 7b50c7c1ee21..ac0c6dcffe15 100644
--- a/drivers/gpu/drm/xe/xe_ring_ops.c
+++ b/drivers/gpu/drm/xe/xe_ring_ops.c
@@ -110,10 +110,10 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
return i;
}
-static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i)
+static int emit_flush_invalidate(u32 addr, u32 val, u32 flush_flags, u32 *dw, int i)
{
- dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
- MI_FLUSH_IMM_DW;
+ dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW |
+ MI_FLUSH_IMM_DW | (flush_flags & MI_INVALIDATE_TLB) ?: 0;
dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
dw[i++] = 0;
@@ -179,7 +179,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 flags;
- if (XE_WA(gt, 14016712196))
+ if (XE_GT_WA(gt, 14016712196))
i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_DEPTH_CACHE_FLUSH,
LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR, 0);
@@ -190,7 +190,7 @@ static int emit_render_cache_flush(struct xe_sched_job *job, u32 *dw, int i)
PIPE_CONTROL_DC_FLUSH_ENABLE |
PIPE_CONTROL_FLUSH_ENABLE);
- if (XE_WA(gt, 1409600907))
+ if (XE_GT_WA(gt, 1409600907))
flags |= PIPE_CONTROL_DEPTH_STALL;
if (lacks_render)
@@ -206,7 +206,7 @@ static int emit_pipe_control_to_ring_end(struct xe_hw_engine *hwe, u32 *dw, int
if (hwe->class != XE_ENGINE_CLASS_RENDER)
return i;
- if (XE_WA(hwe->gt, 16020292621))
+ if (XE_GT_WA(hwe->gt, 16020292621))
i = emit_pipe_control(dw, i, 0, PIPE_CONTROL_LRI_POST_SYNC,
RING_NOPID(hwe->mmio_base).addr, 0);
@@ -245,12 +245,14 @@ static int emit_copy_timestamp(struct xe_lrc *lrc, u32 *dw, int i)
/* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head, u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
struct xe_gt *gt = job->q->gt;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
if (job->ring_ops_flush_tlb) {
@@ -296,7 +298,7 @@ static bool has_aux_ccs(struct xe_device *xe)
}
static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head, u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
@@ -304,6 +306,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
struct xe_device *xe = gt_to_xe(gt);
bool decode = job->q->class == XE_ENGINE_CLASS_VIDEO_DECODE;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
dw[i++] = preparser_disable(true);
@@ -346,7 +350,8 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
struct xe_lrc *lrc,
- u64 batch_addr, u32 seqno)
+ u64 batch_addr, u32 *head,
+ u32 seqno)
{
u32 dw[MAX_JOB_SIZE_DW], i = 0;
u32 ppgtt_flag = get_ppgtt_flag(job);
@@ -355,6 +360,8 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
u32 mask_flags = 0;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
dw[i++] = preparser_disable(true);
@@ -396,11 +403,14 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
}
static void emit_migration_job_gen12(struct xe_sched_job *job,
- struct xe_lrc *lrc, u32 seqno)
+ struct xe_lrc *lrc, u32 *head,
+ u32 seqno)
{
u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc);
u32 dw[MAX_JOB_SIZE_DW], i = 0;
+ *head = lrc->ring.tail;
+
i = emit_copy_timestamp(lrc, dw, i);
i = emit_store_imm_ggtt(saddr, seqno, dw, i);
@@ -410,16 +420,14 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i);
dw[i++] = preparser_disable(true);
- i = emit_flush_invalidate(saddr, seqno, dw, i);
+ i = emit_flush_invalidate(saddr, seqno, job->migrate_flush_flags, dw, i);
dw[i++] = preparser_disable(false);
i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i);
- dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | job->migrate_flush_flags |
- MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW;
- dw[i++] = xe_lrc_seqno_ggtt_addr(lrc) | MI_FLUSH_DW_USE_GTT;
- dw[i++] = 0;
- dw[i++] = seqno; /* value */
+ i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno,
+ job->migrate_flush_flags,
+ dw, i);
i = emit_user_interrupt(dw, i);
@@ -436,6 +444,7 @@ static void emit_job_gen12_gsc(struct xe_sched_job *job)
__emit_job_gen12_simple(job, job->q->lrc[0],
job->ptrs[0].batch_addr,
+ &job->ptrs[0].head,
xe_sched_job_lrc_seqno(job));
}
@@ -445,6 +454,7 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
if (xe_sched_job_is_migration(job->q)) {
emit_migration_job_gen12(job, job->q->lrc[0],
+ &job->ptrs[0].head,
xe_sched_job_lrc_seqno(job));
return;
}
@@ -452,6 +462,7 @@ static void emit_job_gen12_copy(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_simple(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
@@ -463,6 +474,7 @@ static void emit_job_gen12_video(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_video(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
@@ -473,6 +485,7 @@ static void emit_job_gen12_render_compute(struct xe_sched_job *job)
for (i = 0; i < job->q->width; ++i)
__emit_job_gen12_render_compute(job, job->q->lrc[i],
job->ptrs[i].batch_addr,
+ &job->ptrs[i].head,
xe_sched_job_lrc_seqno(job));
}
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index 95571b87aa73..ed509b1c8cfc 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -9,6 +9,7 @@
#include <uapi/drm/xe_drm.h>
+#include "xe_configfs.h"
#include "xe_gt.h"
#include "xe_gt_topology.h"
#include "xe_macros.h"
@@ -132,10 +133,7 @@ static bool rule_matches(const struct xe_device *xe,
match = hwe->class != r->engine_class;
break;
case XE_RTP_MATCH_FUNC:
- if (drm_WARN_ON(&xe->drm, !gt))
- return false;
-
- match = r->match_func(gt, hwe);
+ match = r->match_func(xe, gt, hwe);
break;
default:
drm_warn(&xe->drm, "Invalid RTP match %u\n",
@@ -342,13 +340,15 @@ void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
}
EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process);
-bool xe_rtp_match_even_instance(const struct xe_gt *gt,
+bool xe_rtp_match_even_instance(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
return hwe->instance % 2 == 0;
}
-bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
+bool xe_rtp_match_first_render_or_compute(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
u64 render_compute_mask = gt->info.engine_mask &
@@ -358,8 +358,30 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
hwe->engine_id == __ffs(render_compute_mask);
}
-bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+bool xe_rtp_match_not_sriov_vf(const struct xe_device *xe,
+ const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return !IS_SRIOV_VF(xe);
+}
+
+bool xe_rtp_match_psmi_enabled(const struct xe_device *xe,
+ const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev));
+}
+
+bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_device *xe,
+ const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe)
+{
+ return xe_gt_has_discontiguous_dss_groups(gt);
+}
+
+bool xe_rtp_match_has_flat_ccs(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe)
{
- return !IS_SRIOV_VF(gt_to_xe(gt));
+ return xe->info.has_flat_ccs;
}
diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h
index 5ed6c14b9ae3..ba5f940c0a96 100644
--- a/drivers/gpu/drm/xe/xe_rtp.h
+++ b/drivers/gpu/drm/xe/xe_rtp.h
@@ -440,18 +440,21 @@ void xe_rtp_process(struct xe_rtp_process_ctx *ctx,
/**
* xe_rtp_match_even_instance - Match if engine instance is even
+ * @xe: Device structure
* @gt: GT structure
* @hwe: Engine instance
*
* Returns: true if engine instance is even, false otherwise
*/
-bool xe_rtp_match_even_instance(const struct xe_gt *gt,
+bool xe_rtp_match_even_instance(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
/*
* xe_rtp_match_first_render_or_compute - Match if it's first render or compute
* engine in the GT
*
+ * @xe: Device structure
* @gt: GT structure
* @hwe: Engine instance
*
@@ -463,18 +466,41 @@ bool xe_rtp_match_even_instance(const struct xe_gt *gt,
* Returns: true if engine id is the first to match the render reset domain,
* false otherwise.
*/
-bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
+bool xe_rtp_match_first_render_or_compute(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
/*
* xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device
*
+ * @xe: Device structure
* @gt: GT structure
* @hwe: Engine instance
*
* Returns: true if device is not VF, false otherwise.
*/
-bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt,
+bool xe_rtp_match_not_sriov_vf(const struct xe_device *xe,
+ const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
+bool xe_rtp_match_psmi_enabled(const struct xe_device *xe,
+ const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
+bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_device *xe,
+ const struct xe_gt *gt,
+ const struct xe_hw_engine *hwe);
+
+/**
+ * xe_rtp_match_has_flat_ccs - Match when platform has FlatCCS compression
+ * @xe: Device structure
+ * @gt: GT structure
+ * @hwe: Engine instance
+ *
+ * Returns: true if platform has FlatCCS compression, false otherwise
+ */
+bool xe_rtp_match_has_flat_ccs(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h
index f4cf30e298cf..6ba7f226c227 100644
--- a/drivers/gpu/drm/xe/xe_rtp_types.h
+++ b/drivers/gpu/drm/xe/xe_rtp_types.h
@@ -10,6 +10,7 @@
#include "regs/xe_reg_defs.h"
+struct xe_device;
struct xe_hw_engine;
struct xe_gt;
@@ -86,7 +87,8 @@ struct xe_rtp_rule {
u8 engine_class;
};
/* MATCH_FUNC */
- bool (*match_func)(const struct xe_gt *gt,
+ bool (*match_func)(const struct xe_device *xe,
+ const struct xe_gt *gt,
const struct xe_hw_engine *hwe);
};
};
diff --git a/drivers/gpu/drm/xe/xe_sa.c b/drivers/gpu/drm/xe/xe_sa.c
index 1d43e183ca21..63a5263dcf1b 100644
--- a/drivers/gpu/drm/xe/xe_sa.c
+++ b/drivers/gpu/drm/xe/xe_sa.c
@@ -69,7 +69,6 @@ struct xe_sa_manager *__xe_sa_bo_manager_init(struct xe_tile *tile, u32 size, u3
}
sa_manager->bo = bo;
sa_manager->is_iomem = bo->vmap.is_iomem;
- sa_manager->gpu_addr = xe_bo_ggtt_addr(bo);
if (bo->vmap.is_iomem) {
sa_manager->cpu_ptr = kvzalloc(managed_size, GFP_KERNEL);
@@ -111,6 +110,10 @@ struct drm_suballoc *__xe_sa_bo_new(struct xe_sa_manager *sa_manager, u32 size,
return drm_suballoc_new(&sa_manager->base, size, gfp, true, 0);
}
+/**
+ * xe_sa_bo_flush_write() - Copy the data from the sub-allocation to the GPU memory.
+ * @sa_bo: the &drm_suballoc to flush
+ */
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
{
struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
@@ -124,6 +127,23 @@ void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo)
drm_suballoc_size(sa_bo));
}
+/**
+ * xe_sa_bo_sync_read() - Copy the data from GPU memory to the sub-allocation.
+ * @sa_bo: the &drm_suballoc to sync
+ */
+void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo)
+{
+ struct xe_sa_manager *sa_manager = to_xe_sa_manager(sa_bo->manager);
+ struct xe_device *xe = tile_to_xe(sa_manager->bo->tile);
+
+ if (!sa_manager->bo->vmap.is_iomem)
+ return;
+
+ xe_map_memcpy_from(xe, xe_sa_bo_cpu_addr(sa_bo), &sa_manager->bo->vmap,
+ drm_suballoc_soffset(sa_bo),
+ drm_suballoc_size(sa_bo));
+}
+
void xe_sa_bo_free(struct drm_suballoc *sa_bo,
struct dma_fence *fence)
{
diff --git a/drivers/gpu/drm/xe/xe_sa.h b/drivers/gpu/drm/xe/xe_sa.h
index 1170ee5a81a8..1be744350836 100644
--- a/drivers/gpu/drm/xe/xe_sa.h
+++ b/drivers/gpu/drm/xe/xe_sa.h
@@ -7,6 +7,8 @@
#include <linux/sizes.h>
#include <linux/types.h>
+
+#include "xe_bo.h"
#include "xe_sa_types.h"
struct dma_fence;
@@ -35,6 +37,7 @@ static inline struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager
}
void xe_sa_bo_flush_write(struct drm_suballoc *sa_bo);
+void xe_sa_bo_sync_read(struct drm_suballoc *sa_bo);
void xe_sa_bo_free(struct drm_suballoc *sa_bo, struct dma_fence *fence);
static inline struct xe_sa_manager *
@@ -43,9 +46,20 @@ to_xe_sa_manager(struct drm_suballoc_manager *mng)
return container_of(mng, struct xe_sa_manager, base);
}
+/**
+ * xe_sa_manager_gpu_addr - Retrieve GPU address of a back storage BO
+ * within suballocator.
+ * @sa_manager: the &xe_sa_manager struct instance
+ * Return: GGTT address of the back storage BO.
+ */
+static inline u64 xe_sa_manager_gpu_addr(struct xe_sa_manager *sa_manager)
+{
+ return xe_bo_ggtt_addr(sa_manager->bo);
+}
+
static inline u64 xe_sa_bo_gpu_addr(struct drm_suballoc *sa)
{
- return to_xe_sa_manager(sa->manager)->gpu_addr +
+ return xe_sa_manager_gpu_addr(to_xe_sa_manager(sa->manager)) +
drm_suballoc_soffset(sa);
}
diff --git a/drivers/gpu/drm/xe/xe_sa_types.h b/drivers/gpu/drm/xe/xe_sa_types.h
index 2b070ff1292e..cb7238799dcb 100644
--- a/drivers/gpu/drm/xe/xe_sa_types.h
+++ b/drivers/gpu/drm/xe/xe_sa_types.h
@@ -12,7 +12,6 @@ struct xe_bo;
struct xe_sa_manager {
struct drm_suballoc_manager base;
struct xe_bo *bo;
- u64 gpu_addr;
void *cpu_ptr;
bool is_iomem;
};
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index d21bf8f26964..cb674a322113 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -146,6 +146,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
for (i = 0; i < width; ++i)
job->ptrs[i].batch_addr = batch_addr[i];
+ atomic_inc(&q->job_cnt);
xe_pm_runtime_get_noresume(job_to_xe(job));
trace_xe_sched_job_create(job);
return job;
@@ -160,11 +161,11 @@ err_free:
}
/**
- * xe_sched_job_destroy - Destroy XE schedule job
- * @ref: reference to XE schedule job
+ * xe_sched_job_destroy - Destroy Xe schedule job
+ * @ref: reference to Xe schedule job
*
* Called when ref == 0, drop a reference to job's xe_engine + fence, cleanup
- * base DRM schedule job, and free memory for XE schedule job.
+ * base DRM schedule job, and free memory for Xe schedule job.
*/
void xe_sched_job_destroy(struct kref *ref)
{
@@ -177,6 +178,7 @@ void xe_sched_job_destroy(struct kref *ref)
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);
job_free(job);
+ atomic_dec(&q->job_cnt);
xe_exec_queue_put(q);
xe_pm_runtime_put(xe);
}
@@ -296,23 +298,6 @@ void xe_sched_job_push(struct xe_sched_job *job)
}
/**
- * xe_sched_job_last_fence_add_dep - Add last fence dependency to job
- * @job:job to add the last fence dependency to
- * @vm: virtual memory job belongs to
- *
- * Returns:
- * 0 on success, or an error on failing to expand the array.
- */
-int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm)
-{
- struct dma_fence *fence;
-
- fence = xe_exec_queue_last_fence_get(job->q, vm);
-
- return drm_sched_job_add_dependency(&job->drm, fence);
-}
-
-/**
* xe_sched_job_init_user_fence - Initialize user_fence for the job
* @job: job whose user_fence needs an init
* @sync: sync to be use to init user_fence
diff --git a/drivers/gpu/drm/xe/xe_sched_job.h b/drivers/gpu/drm/xe/xe_sched_job.h
index 3dc72c5c1f13..1c1cb44216c3 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.h
+++ b/drivers/gpu/drm/xe/xe_sched_job.h
@@ -23,10 +23,10 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
void xe_sched_job_destroy(struct kref *ref);
/**
- * xe_sched_job_get - get reference to XE schedule job
- * @job: XE schedule job object
+ * xe_sched_job_get - get reference to Xe schedule job
+ * @job: Xe schedule job object
*
- * Increment XE schedule job's reference count
+ * Increment Xe schedule job's reference count
*/
static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
{
@@ -35,10 +35,10 @@ static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
}
/**
- * xe_sched_job_put - put reference to XE schedule job
- * @job: XE schedule job object
+ * xe_sched_job_put - put reference to Xe schedule job
+ * @job: Xe schedule job object
*
- * Decrement XE schedule job's reference count, call xe_sched_job_destroy when
+ * Decrement Xe schedule job's reference count, call xe_sched_job_destroy when
* reference count == 0.
*/
static inline void xe_sched_job_put(struct xe_sched_job *job)
@@ -58,7 +58,6 @@ bool xe_sched_job_completed(struct xe_sched_job *job);
void xe_sched_job_arm(struct xe_sched_job *job);
void xe_sched_job_push(struct xe_sched_job *job);
-int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
void xe_sched_job_init_user_fence(struct xe_sched_job *job,
struct xe_sync_entry *sync);
diff --git a/drivers/gpu/drm/xe/xe_sched_job_types.h b/drivers/gpu/drm/xe/xe_sched_job_types.h
index dbf260dded8d..7c4c54fe920a 100644
--- a/drivers/gpu/drm/xe/xe_sched_job_types.h
+++ b/drivers/gpu/drm/xe/xe_sched_job_types.h
@@ -24,10 +24,15 @@ struct xe_job_ptrs {
struct dma_fence_chain *chain_fence;
/** @batch_addr: Batch buffer address. */
u64 batch_addr;
+ /**
+ * @head: The tail pointer of the LRC (so head pointer of job) when the
+ * job was submitted
+ */
+ u32 head;
};
/**
- * struct xe_sched_job - XE schedule job (batch buffer tracking)
+ * struct xe_sched_job - Xe schedule job (batch buffer tracking)
*/
struct xe_sched_job {
/** @drm: base DRM scheduler job */
@@ -58,6 +63,10 @@ struct xe_sched_job {
bool ring_ops_flush_tlb;
/** @ggtt: mapped in ggtt. */
bool ggtt;
+ /** @restore_replay: job being replayed for restore */
+ bool restore_replay;
+ /** @last_replay: last job being replayed */
+ bool last_replay;
/** @ptrs: per instance pointers. */
struct xe_job_ptrs ptrs[];
};
diff --git a/drivers/gpu/drm/xe/xe_sriov.c b/drivers/gpu/drm/xe/xe_sriov.c
index a0eab44c0e76..ea411944609b 100644
--- a/drivers/gpu/drm/xe/xe_sriov.c
+++ b/drivers/gpu/drm/xe/xe_sriov.c
@@ -15,6 +15,7 @@
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
/**
* xe_sriov_mode_to_string - Convert enum value to string.
@@ -157,3 +158,19 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t size)
strscpy(buf, "PF", size);
return buf;
}
+
+/**
+ * xe_sriov_init_late() - SR-IOV late initialization functions.
+ * @xe: the &xe_device to initialize
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_init_late(struct xe_device *xe)
+{
+ if (IS_SRIOV_PF(xe))
+ return xe_sriov_pf_init_late(xe);
+ if (IS_SRIOV_VF(xe))
+ return xe_sriov_vf_init_late(xe);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov.h b/drivers/gpu/drm/xe/xe_sriov.h
index 688fbabf08f1..6db45df55615 100644
--- a/drivers/gpu/drm/xe/xe_sriov.h
+++ b/drivers/gpu/drm/xe/xe_sriov.h
@@ -18,6 +18,7 @@ const char *xe_sriov_function_name(unsigned int n, char *buf, size_t len);
void xe_sriov_probe_early(struct xe_device *xe);
void xe_sriov_print_info(struct xe_device *xe, struct drm_printer *p);
int xe_sriov_init(struct xe_device *xe);
+int xe_sriov_init_late(struct xe_device *xe);
static inline enum xe_sriov_mode xe_device_sriov_mode(const struct xe_device *xe)
{
diff --git a/drivers/gpu/drm/xe/xe_sriov_packet.c b/drivers/gpu/drm/xe/xe_sriov_packet.c
new file mode 100644
index 000000000000..bab994696896
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_packet.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_guc_klv_helpers.h"
+#include "xe_printk.h"
+#include "xe_sriov_packet.h"
+#include "xe_sriov_packet_types.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_migration.h"
+#include "xe_sriov_printk.h"
+
+static struct mutex *pf_migration_mutex(struct xe_device *xe, unsigned int vfid)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
+
+ return &xe->sriov.pf.vfs[vfid].migration.lock;
+}
+
+static struct xe_sriov_packet **pf_pick_pending(struct xe_device *xe, unsigned int vfid)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
+ lockdep_assert_held(pf_migration_mutex(xe, vfid));
+
+ return &xe->sriov.pf.vfs[vfid].migration.pending;
+}
+
+static struct xe_sriov_packet **
+pf_pick_descriptor(struct xe_device *xe, unsigned int vfid)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
+ lockdep_assert_held(pf_migration_mutex(xe, vfid));
+
+ return &xe->sriov.pf.vfs[vfid].migration.descriptor;
+}
+
+static struct xe_sriov_packet **pf_pick_trailer(struct xe_device *xe, unsigned int vfid)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
+ lockdep_assert_held(pf_migration_mutex(xe, vfid));
+
+ return &xe->sriov.pf.vfs[vfid].migration.trailer;
+}
+
+static struct xe_sriov_packet **pf_pick_read_packet(struct xe_device *xe,
+ unsigned int vfid)
+{
+ struct xe_sriov_packet **data;
+
+ data = pf_pick_descriptor(xe, vfid);
+ if (*data)
+ return data;
+
+ data = pf_pick_pending(xe, vfid);
+ if (!*data)
+ *data = xe_sriov_pf_migration_save_consume(xe, vfid);
+ if (*data)
+ return data;
+
+ data = pf_pick_trailer(xe, vfid);
+ if (*data)
+ return data;
+
+ return NULL;
+}
+
+static bool pkt_needs_bo(struct xe_sriov_packet *data)
+{
+ return data->hdr.type == XE_SRIOV_PACKET_TYPE_VRAM;
+}
+
+/**
+ * xe_sriov_packet_alloc() - Allocate migration data packet
+ * @xe: the &xe_device
+ *
+ * Only allocates the "outer" structure, without initializing the migration
+ * data backing storage.
+ *
+ * Return: Pointer to &xe_sriov_packet on success,
+ * NULL in case of error.
+ */
+struct xe_sriov_packet *xe_sriov_packet_alloc(struct xe_device *xe)
+{
+ struct xe_sriov_packet *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->xe = xe;
+ data->hdr_remaining = sizeof(data->hdr);
+
+ return data;
+}
+
+/**
+ * xe_sriov_packet_free() - Free migration data packet.
+ * @data: the &xe_sriov_packet
+ */
+void xe_sriov_packet_free(struct xe_sriov_packet *data)
+{
+ if (IS_ERR_OR_NULL(data))
+ return;
+
+ if (pkt_needs_bo(data))
+ xe_bo_unpin_map_no_vm(data->bo);
+ else
+ kvfree(data->buff);
+
+ kfree(data);
+}
+
+static int pkt_init(struct xe_sriov_packet *data)
+{
+ struct xe_gt *gt = xe_device_get_gt(data->xe, data->hdr.gt_id);
+
+ if (!gt)
+ return -EINVAL;
+
+ if (data->hdr.size == 0)
+ return 0;
+
+ if (pkt_needs_bo(data)) {
+ struct xe_bo *bo;
+
+ bo = xe_bo_create_pin_map_novm(data->xe, gt->tile, PAGE_ALIGN(data->hdr.size),
+ ttm_bo_type_kernel,
+ XE_BO_FLAG_SYSTEM | XE_BO_FLAG_PINNED, false);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ data->bo = bo;
+ data->vaddr = bo->vmap.vaddr;
+ } else {
+ void *buff = kvzalloc(data->hdr.size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ data->buff = buff;
+ data->vaddr = buff;
+ }
+
+ return 0;
+}
+
+#define XE_SRIOV_PACKET_SUPPORTED_VERSION 1
+
+/**
+ * xe_sriov_packet_init() - Initialize migration packet header and backing storage.
+ * @data: the &xe_sriov_packet
+ * @tile_id: tile identifier
+ * @gt_id: GT identifier
+ * @type: &xe_sriov_packet_type
+ * @offset: offset of data packet payload (within wider resource)
+ * @size: size of data packet payload
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id,
+ enum xe_sriov_packet_type type, loff_t offset, size_t size)
+{
+ data->hdr.version = XE_SRIOV_PACKET_SUPPORTED_VERSION;
+ data->hdr.type = type;
+ data->hdr.tile_id = tile_id;
+ data->hdr.gt_id = gt_id;
+ data->hdr.offset = offset;
+ data->hdr.size = size;
+ data->remaining = size;
+
+ return pkt_init(data);
+}
+
+/**
+ * xe_sriov_packet_init_from_hdr() - Initialize migration packet backing storage based on header.
+ * @data: the &xe_sriov_packet
+ *
+ * Header data is expected to be filled prior to calling this function.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data)
+{
+ xe_assert(data->xe, !data->hdr_remaining);
+
+ if (data->hdr.version != XE_SRIOV_PACKET_SUPPORTED_VERSION)
+ return -EINVAL;
+
+ data->remaining = data->hdr.size;
+
+ return pkt_init(data);
+}
+
+static ssize_t pkt_hdr_read(struct xe_sriov_packet *data,
+ char __user *buf, size_t len)
+{
+ loff_t offset = sizeof(data->hdr) - data->hdr_remaining;
+
+ if (!data->hdr_remaining)
+ return -EINVAL;
+
+ if (len > data->hdr_remaining)
+ len = data->hdr_remaining;
+
+ if (copy_to_user(buf, (void *)&data->hdr + offset, len))
+ return -EFAULT;
+
+ data->hdr_remaining -= len;
+
+ return len;
+}
+
+static ssize_t pkt_data_read(struct xe_sriov_packet *data,
+ char __user *buf, size_t len)
+{
+ if (len > data->remaining)
+ len = data->remaining;
+
+ if (copy_to_user(buf, data->vaddr + (data->hdr.size - data->remaining), len))
+ return -EFAULT;
+
+ data->remaining -= len;
+
+ return len;
+}
+
+static ssize_t pkt_read_single(struct xe_sriov_packet **data,
+ unsigned int vfid, char __user *buf, size_t len)
+{
+ ssize_t copied = 0;
+
+ if ((*data)->hdr_remaining)
+ copied = pkt_hdr_read(*data, buf, len);
+ else
+ copied = pkt_data_read(*data, buf, len);
+
+ if ((*data)->remaining == 0 && (*data)->hdr_remaining == 0) {
+ xe_sriov_packet_free(*data);
+ *data = NULL;
+ }
+
+ return copied;
+}
+
+/**
+ * xe_sriov_packet_read_single() - Read migration data from a single packet.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @buf: start address of userspace buffer
+ * @len: requested read size from userspace
+ *
+ * Return: number of bytes that has been successfully read,
+ * 0 if no more migration data is available,
+ * -errno on failure.
+ */
+ssize_t xe_sriov_packet_read_single(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len)
+{
+ struct xe_sriov_packet **data = pf_pick_read_packet(xe, vfid);
+
+ if (!data)
+ return -ENODATA;
+ if (IS_ERR(*data))
+ return PTR_ERR(*data);
+
+ return pkt_read_single(data, vfid, buf, len);
+}
+
+static ssize_t pkt_hdr_write(struct xe_sriov_packet *data,
+ const char __user *buf, size_t len)
+{
+ loff_t offset = sizeof(data->hdr) - data->hdr_remaining;
+ int ret;
+
+ if (len > data->hdr_remaining)
+ len = data->hdr_remaining;
+
+ if (copy_from_user((void *)&data->hdr + offset, buf, len))
+ return -EFAULT;
+
+ data->hdr_remaining -= len;
+
+ if (!data->hdr_remaining) {
+ ret = xe_sriov_packet_init_from_hdr(data);
+ if (ret)
+ return ret;
+ }
+
+ return len;
+}
+
+static ssize_t pkt_data_write(struct xe_sriov_packet *data,
+ const char __user *buf, size_t len)
+{
+ if (len > data->remaining)
+ len = data->remaining;
+
+ if (copy_from_user(data->vaddr + (data->hdr.size - data->remaining), buf, len))
+ return -EFAULT;
+
+ data->remaining -= len;
+
+ return len;
+}
+
+/**
+ * xe_sriov_packet_write_single() - Write migration data to a single packet.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @buf: start address of userspace buffer
+ * @len: requested write size from userspace
+ *
+ * Return: number of bytes that has been successfully written,
+ * -errno on failure.
+ */
+ssize_t xe_sriov_packet_write_single(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len)
+{
+ struct xe_sriov_packet **data = pf_pick_pending(xe, vfid);
+ int ret;
+ ssize_t copied;
+
+ if (IS_ERR_OR_NULL(*data)) {
+ *data = xe_sriov_packet_alloc(xe);
+ if (!*data)
+ return -ENOMEM;
+ }
+
+ if ((*data)->hdr_remaining)
+ copied = pkt_hdr_write(*data, buf, len);
+ else
+ copied = pkt_data_write(*data, buf, len);
+
+ if ((*data)->hdr_remaining == 0 && (*data)->remaining == 0) {
+ ret = xe_sriov_pf_migration_restore_produce(xe, vfid, *data);
+ if (ret) {
+ xe_sriov_packet_free(*data);
+ return ret;
+ }
+
+ *data = NULL;
+ }
+
+ return copied;
+}
+
+#define MIGRATION_KLV_DEVICE_DEVID_KEY 0xf001u
+#define MIGRATION_KLV_DEVICE_DEVID_LEN 1u
+#define MIGRATION_KLV_DEVICE_REVID_KEY 0xf002u
+#define MIGRATION_KLV_DEVICE_REVID_LEN 1u
+
+#define MIGRATION_DESCRIPTOR_DWORDS (GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_DEVID_LEN + \
+ GUC_KLV_LEN_MIN + MIGRATION_KLV_DEVICE_REVID_LEN)
+static size_t pf_descriptor_init(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_sriov_packet **desc = pf_pick_descriptor(xe, vfid);
+ struct xe_sriov_packet *data;
+ unsigned int len = 0;
+ u32 *klvs;
+ int ret;
+
+ data = xe_sriov_packet_alloc(xe);
+ if (!data)
+ return -ENOMEM;
+
+ ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_DESCRIPTOR,
+ 0, MIGRATION_DESCRIPTOR_DWORDS * sizeof(u32));
+ if (ret) {
+ xe_sriov_packet_free(data);
+ return ret;
+ }
+
+ klvs = data->vaddr;
+ klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_DEVID_KEY,
+ MIGRATION_KLV_DEVICE_DEVID_LEN);
+ klvs[len++] = xe->info.devid;
+ klvs[len++] = PREP_GUC_KLV_CONST(MIGRATION_KLV_DEVICE_REVID_KEY,
+ MIGRATION_KLV_DEVICE_REVID_LEN);
+ klvs[len++] = xe->info.revid;
+
+ xe_assert(xe, len == MIGRATION_DESCRIPTOR_DWORDS);
+
+ *desc = data;
+
+ return 0;
+}
+
+/**
+ * xe_sriov_packet_process_descriptor() - Process migration data descriptor packet.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @data: the &xe_sriov_packet containing the descriptor
+ *
+ * The descriptor uses the same KLV format as GuC, and contains metadata used for
+ * checking migration data compatibility.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int xe_sriov_packet_process_descriptor(struct xe_device *xe, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ u32 num_dwords = data->hdr.size / sizeof(u32);
+ u32 *klvs = data->vaddr;
+
+ xe_assert(xe, data->hdr.type == XE_SRIOV_PACKET_TYPE_DESCRIPTOR);
+
+ if (data->hdr.size % sizeof(u32)) {
+ xe_sriov_warn(xe, "Aborting migration, descriptor not in KLV format (size=%llu)\n",
+ data->hdr.size);
+ return -EINVAL;
+ }
+
+ while (num_dwords >= GUC_KLV_LEN_MIN) {
+ u32 key = FIELD_GET(GUC_KLV_0_KEY, klvs[0]);
+ u32 len = FIELD_GET(GUC_KLV_0_LEN, klvs[0]);
+
+ klvs += GUC_KLV_LEN_MIN;
+ num_dwords -= GUC_KLV_LEN_MIN;
+
+ if (len > num_dwords) {
+ xe_sriov_warn(xe, "Aborting migration, truncated KLV %#x, len %u\n",
+ key, len);
+ return -EINVAL;
+ }
+
+ switch (key) {
+ case MIGRATION_KLV_DEVICE_DEVID_KEY:
+ if (*klvs != xe->info.devid) {
+ xe_sriov_warn(xe,
+ "Aborting migration, devid mismatch %#06x!=%#06x\n",
+ *klvs, xe->info.devid);
+ return -ENODEV;
+ }
+ break;
+ case MIGRATION_KLV_DEVICE_REVID_KEY:
+ if (*klvs != xe->info.revid) {
+ xe_sriov_warn(xe,
+ "Aborting migration, revid mismatch %#06x!=%#06x\n",
+ *klvs, xe->info.revid);
+ return -ENODEV;
+ }
+ break;
+ default:
+ xe_sriov_dbg(xe,
+ "Skipping unknown migration KLV %#x, len=%u\n",
+ key, len);
+ print_hex_dump_bytes("desc: ", DUMP_PREFIX_OFFSET, klvs,
+ min(SZ_64, len * sizeof(u32)));
+ break;
+ }
+
+ klvs += len;
+ num_dwords -= len;
+ }
+
+ return 0;
+}
+
+static void pf_pending_init(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_sriov_packet **data = pf_pick_pending(xe, vfid);
+
+ *data = NULL;
+}
+
+#define MIGRATION_TRAILER_SIZE 0
+static int pf_trailer_init(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_sriov_packet **trailer = pf_pick_trailer(xe, vfid);
+ struct xe_sriov_packet *data;
+ int ret;
+
+ data = xe_sriov_packet_alloc(xe);
+ if (!data)
+ return -ENOMEM;
+
+ ret = xe_sriov_packet_init(data, 0, 0, XE_SRIOV_PACKET_TYPE_TRAILER,
+ 0, MIGRATION_TRAILER_SIZE);
+ if (ret) {
+ xe_sriov_packet_free(data);
+ return ret;
+ }
+
+ *trailer = data;
+
+ return 0;
+}
+
+/**
+ * xe_sriov_packet_save_init() - Initialize the pending save migration packets.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+int xe_sriov_packet_save_init(struct xe_device *xe, unsigned int vfid)
+{
+ int ret;
+
+ scoped_cond_guard(mutex_intr, return -EINTR, pf_migration_mutex(xe, vfid)) {
+ ret = pf_descriptor_init(xe, vfid);
+ if (ret)
+ return ret;
+
+ ret = pf_trailer_init(xe, vfid);
+ if (ret)
+ return ret;
+
+ pf_pending_init(xe, vfid);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_packet.h b/drivers/gpu/drm/xe/xe_sriov_packet.h
new file mode 100644
index 000000000000..2731e52cf7ef
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_packet.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PACKET_H_
+#define _XE_SRIOV_PACKET_H_
+
+#include <linux/types.h>
+
+struct xe_device;
+struct xe_sriov_packet;
+enum xe_sriov_packet_type;
+
+struct xe_sriov_packet *xe_sriov_packet_alloc(struct xe_device *xe);
+void xe_sriov_packet_free(struct xe_sriov_packet *data);
+
+int xe_sriov_packet_init(struct xe_sriov_packet *data, u8 tile_id, u8 gt_id,
+ enum xe_sriov_packet_type, loff_t offset, size_t size);
+int xe_sriov_packet_init_from_hdr(struct xe_sriov_packet *data);
+
+ssize_t xe_sriov_packet_read_single(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len);
+ssize_t xe_sriov_packet_write_single(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len);
+int xe_sriov_packet_save_init(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_packet_process_descriptor(struct xe_device *xe, unsigned int vfid,
+ struct xe_sriov_packet *data);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_packet_types.h b/drivers/gpu/drm/xe/xe_sriov_packet_types.h
new file mode 100644
index 000000000000..078a1c95e786
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_packet_types.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PACKET_TYPES_H_
+#define _XE_SRIOV_PACKET_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * enum xe_sriov_packet_type - Xe SR-IOV VF migration data packet type
+ * @XE_SRIOV_PACKET_TYPE_DESCRIPTOR: Descriptor with VF device metadata
+ * @XE_SRIOV_PACKET_TYPE_TRAILER: Trailer indicating end-of-stream
+ * @XE_SRIOV_PACKET_TYPE_GGTT: Global GTT migration data
+ * @XE_SRIOV_PACKET_TYPE_MMIO: MMIO registers migration data
+ * @XE_SRIOV_PACKET_TYPE_GUC: GuC firmware migration data
+ * @XE_SRIOV_PACKET_TYPE_VRAM: VRAM migration data
+ */
+enum xe_sriov_packet_type {
+ /* Skipping 0 to catch uninitialized data */
+ XE_SRIOV_PACKET_TYPE_DESCRIPTOR = 1,
+ XE_SRIOV_PACKET_TYPE_TRAILER,
+ XE_SRIOV_PACKET_TYPE_GGTT,
+ XE_SRIOV_PACKET_TYPE_MMIO,
+ XE_SRIOV_PACKET_TYPE_GUC,
+ XE_SRIOV_PACKET_TYPE_VRAM,
+};
+
+/**
+ * struct xe_sriov_packet_hdr - Xe SR-IOV VF migration data packet header
+ */
+struct xe_sriov_packet_hdr {
+ /** @version: migration data protocol version */
+ u8 version;
+ /** @type: migration data type */
+ u8 type;
+ /** @tile_id: migration data tile id */
+ u8 tile_id;
+ /** @gt_id: migration data gt id */
+ u8 gt_id;
+ /** @flags: migration data flags */
+ u32 flags;
+ /**
+ * @offset: offset into the resource;
+ * used when multiple packets of given type are used for migration
+ */
+ u64 offset;
+ /** @size: migration data size */
+ u64 size;
+} __packed;
+
+/**
+ * struct xe_sriov_packet - Xe SR-IOV VF migration data packet
+ */
+struct xe_sriov_packet {
+ /** @xe: the PF &xe_device this data packet belongs to */
+ struct xe_device *xe;
+ /** @vaddr: CPU pointer to payload data */
+ void *vaddr;
+ /** @remaining: payload data remaining */
+ size_t remaining;
+ /** @hdr_remaining: header data remaining */
+ size_t hdr_remaining;
+ union {
+ /** @bo: Buffer object with migration data */
+ struct xe_bo *bo;
+ /** @buff: Buffer with migration data */
+ void *buff;
+ };
+ /** @hdr: data packet header */
+ struct xe_sriov_packet_hdr hdr;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c
index 27ddf3cc80e9..7c779d63179f 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.c
@@ -8,17 +8,22 @@
#include <drm/drm_managed.h>
#include "xe_assert.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_gt_sriov_pf.h"
#include "xe_module.h"
#include "xe_sriov.h"
#include "xe_sriov_pf.h"
#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_migration.h"
#include "xe_sriov_pf_service.h"
+#include "xe_sriov_pf_sysfs.h"
#include "xe_sriov_printk.h"
static unsigned int wanted_max_vfs(struct xe_device *xe)
{
+ if (IS_ENABLED(CONFIG_CONFIGFS_FS))
+ return xe_configfs_get_max_vfs(to_pci_dev(xe->drm.dev));
return xe_modparam.max_vfs;
}
@@ -98,12 +103,47 @@ int xe_sriov_pf_init_early(struct xe_device *xe)
if (err)
return err;
+ err = xe_sriov_pf_migration_init(xe);
+ if (err)
+ return err;
+
+ xe_guard_init(&xe->sriov.pf.guard_vfs_enabling, "vfs_enabling");
+
xe_sriov_pf_service_init(xe);
return 0;
}
/**
+ * xe_sriov_pf_init_late() - Late initialization of the SR-IOV PF.
+ * @xe: the &xe_device to initialize
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_init_late(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int err;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_init(gt);
+ if (err)
+ return err;
+ }
+
+ err = xe_sriov_pf_sysfs_init(xe);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
* xe_sriov_pf_wait_ready() - Wait until PF is ready to operate.
* @xe: the &xe_device to test
*
@@ -130,61 +170,114 @@ int xe_sriov_pf_wait_ready(struct xe_device *xe)
}
/**
- * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information.
- * @xe: the &xe_device to print info from
- * @p: the &drm_printer
+ * xe_sriov_pf_arm_guard() - Arm the guard for exclusive/lockdown mode.
+ * @xe: the PF &xe_device
+ * @guard: the &xe_guard to arm
+ * @lockdown: arm for lockdown(true) or exclusive(false) mode
+ * @who: the address of the new owner, or NULL if it's a caller
*
- * Print SR-IOV PF related information into provided DRM printer.
+ * This function can only be called on PF.
+ *
+ * It is a simple wrapper for xe_guard_arm() with additional debug
+ * messages.
+ *
+ * Return: 0 on success or a negative error code on failure.
*/
-void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p)
+int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard,
+ bool lockdown, void *who)
{
- struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ void *new_owner = who ?: __builtin_return_address(0);
+ int err;
- xe_assert(xe, IS_SRIOV_PF(xe));
+ err = xe_guard_arm(guard, lockdown, new_owner);
+ if (err) {
+ xe_sriov_dbg(xe, "%s/%s mode denied (%pe) last owner %ps\n",
+ guard->name, xe_guard_mode_str(lockdown),
+ ERR_PTR(err), guard->owner);
+ return err;
+ }
- drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs);
- drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs);
- drm_printf(p, "enabled: %u\n", pci_num_vf(pdev));
+ xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n",
+ guard->name, xe_guard_mode_str(lockdown),
+ new_owner);
+ return 0;
}
-static int simple_show(struct seq_file *m, void *data)
+/**
+ * xe_sriov_pf_disarm_guard() - Disarm the guard.
+ * @xe: the PF &xe_device
+ * @guard: the &xe_guard to disarm
+ * @lockdown: disarm from lockdown(true) or exclusive(false) mode
+ * @who: the address of the indirect owner, or NULL if it's a caller
+ *
+ * This function can only be called on PF.
+ *
+ * It is a simple wrapper for xe_guard_disarm() with additional debug
+ * messages and xe_assert() to easily catch any illegal calls.
+ */
+void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard,
+ bool lockdown, void *who)
{
- struct drm_printer p = drm_seq_file_printer(m);
- struct drm_info_node *node = m->private;
- struct dentry *parent = node->dent->d_parent;
- struct xe_device *xe = parent->d_inode->i_private;
- void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data;
+ bool disarmed;
- print(xe, &p);
- return 0;
+ xe_sriov_dbg_verbose(xe, "%s/%s by %ps\n",
+ guard->name, xe_guard_mode_str(lockdown),
+ who ?: __builtin_return_address(0));
+
+ disarmed = xe_guard_disarm(guard, lockdown);
+ xe_assert_msg(xe, disarmed, "%s/%s not armed? last owner %ps",
+ guard->name, xe_guard_mode_str(lockdown), guard->owner);
}
-static const struct drm_info_list debugfs_list[] = {
- { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary },
- { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions },
-};
+/**
+ * xe_sriov_pf_lockdown() - Lockdown the PF to prevent VFs enabling.
+ * @xe: the PF &xe_device
+ *
+ * This function can only be called on PF.
+ *
+ * Once the PF is locked down, it will not enable VFs.
+ * If VFs are already enabled, the -EBUSY will be returned.
+ * To allow the PF enable VFs again call xe_sriov_pf_end_lockdown().
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_lockdown(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ return xe_sriov_pf_arm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true,
+ __builtin_return_address(0));
+}
/**
- * xe_sriov_pf_debugfs_register - Register PF debugfs attributes.
- * @xe: the &xe_device
- * @root: the root &dentry
+ * xe_sriov_pf_end_lockdown() - Allow the PF to enable VFs again.
+ * @xe: the PF &xe_device
*
- * Prepare debugfs attributes exposed by the PF.
+ * This function can only be called on PF.
+ * See xe_sriov_pf_lockdown() for details.
*/
-void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+void xe_sriov_pf_end_lockdown(struct xe_device *xe)
{
- struct drm_minor *minor = xe->drm.primary;
- struct dentry *parent;
-
- /*
- * /sys/kernel/debug/dri/0/
- * ├── pf
- * │   ├── ...
- */
- parent = debugfs_create_dir("pf", root);
- if (IS_ERR(parent))
- return;
- parent->d_inode->i_private = xe;
-
- drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), parent, minor);
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ xe_sriov_pf_disarm_guard(xe, &xe->sriov.pf.guard_vfs_enabling, true,
+ __builtin_return_address(0));
+}
+
+/**
+ * xe_sriov_pf_print_vfs_summary - Print SR-IOV PF information.
+ * @xe: the &xe_device to print info from
+ * @p: the &drm_printer
+ *
+ * Print SR-IOV PF related information into provided DRM printer.
+ */
+void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p)
+{
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ drm_printf(p, "total: %u\n", xe->sriov.pf.device_total_vfs);
+ drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs);
+ drm_printf(p, "enabled: %u\n", pci_num_vf(pdev));
}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h
index e3b34f8f5e04..b4d050ad5b7c 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf.h
@@ -15,23 +15,17 @@ struct xe_device;
#ifdef CONFIG_PCI_IOV
bool xe_sriov_pf_readiness(struct xe_device *xe);
int xe_sriov_pf_init_early(struct xe_device *xe);
+int xe_sriov_pf_init_late(struct xe_device *xe);
int xe_sriov_pf_wait_ready(struct xe_device *xe);
-void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
+int xe_sriov_pf_lockdown(struct xe_device *xe);
+void xe_sriov_pf_end_lockdown(struct xe_device *xe);
void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p);
#else
-static inline bool xe_sriov_pf_readiness(struct xe_device *xe)
-{
- return false;
-}
-
-static inline int xe_sriov_pf_init_early(struct xe_device *xe)
-{
- return 0;
-}
-
-static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
-{
-}
+static inline bool xe_sriov_pf_readiness(struct xe_device *xe) { return false; }
+static inline int xe_sriov_pf_init_early(struct xe_device *xe) { return 0; }
+static inline int xe_sriov_pf_init_late(struct xe_device *xe) { return 0; }
+static inline int xe_sriov_pf_lockdown(struct xe_device *xe) { return 0; }
+static inline void xe_sriov_pf_end_lockdown(struct xe_device *xe) { }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c
new file mode 100644
index 000000000000..ed4b9820b06e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_device.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_pf_migration.h"
+#include "xe_sriov_packet.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_printk.h"
+
+/**
+ * xe_sriov_pf_control_pause_vf() - Pause a VF on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier (can't be 0 == PFID)
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_pause_vf(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ if (result)
+ return result;
+
+ xe_sriov_info(xe, "VF%u paused!\n", vfid);
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_control_resume_vf() - Resume a VF on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_resume_vf(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ if (result)
+ return result;
+
+ xe_sriov_info(xe, "VF%u resumed!\n", vfid);
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_control_stop_vf - Stop a VF on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_stop_vf(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ if (result)
+ return result;
+
+ xe_sriov_info(xe, "VF%u stopped!\n", vfid);
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_control_reset_vf() - Perform a VF reset (FLR).
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_trigger_flr(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_wait_flr(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ return result;
+}
+
+/**
+ * xe_sriov_pf_control_wait_flr() - Wait for a VF reset (FLR) to complete.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_control_wait_flr(gt, vfid);
+ result = result ? -EUCLEAN : err;
+ }
+
+ return result;
+}
+
+/**
+ * xe_sriov_pf_control_sync_flr() - Synchronize a VF FLR between all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_gt_sriov_pf_control_sync_flr(gt, vfid, false);
+ if (ret < 0)
+ return ret;
+ }
+ for_each_gt(gt, xe, id) {
+ ret = xe_gt_sriov_pf_control_sync_flr(gt, vfid, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_control_trigger_save_vf() - Start VF migration data SAVE sequence on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret;
+
+ ret = xe_sriov_packet_save_init(xe, vfid);
+ if (ret)
+ return ret;
+
+ for_each_gt(gt, xe, id) {
+ xe_gt_sriov_pf_migration_save_init(gt, vfid);
+
+ ret = xe_gt_sriov_pf_control_trigger_save_vf(gt, vfid);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_control_finish_save_vf() - Complete VF migration data SAVE sequence on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_gt_sriov_pf_control_finish_save_vf(gt, vfid);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * xe_sriov_pf_control_trigger_restore_vf() - Start VF migration data RESTORE sequence on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_trigger_restore_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_gt_sriov_pf_control_trigger_restore_vf(gt, vfid);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * xe_sriov_pf_control_finish_restore_vf() - Complete VF migration data RESTORE sequence on all GTs.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_control_finish_restore_vf(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int ret;
+
+ for_each_gt(gt, xe, id) {
+ ret = xe_gt_sriov_pf_control_finish_restore_vf(gt, vfid);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_sriov_pf_control.h
new file mode 100644
index 000000000000..ef9f219b2109
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_CONTROL_H_
+#define _XE_SRIOV_PF_CONTROL_H_
+
+struct xe_device;
+
+int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_finish_save_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_trigger_restore_vf(struct xe_device *xe, unsigned int vfid);
+int xe_sriov_pf_control_finish_restore_vf(struct xe_device *xe, unsigned int vfid);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c
new file mode 100644
index 000000000000..bad751217e1e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_pm.h"
+#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_debugfs.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_migration.h"
+#include "xe_sriov_pf_provision.h"
+#include "xe_sriov_pf_service.h"
+#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_pf_debugfs.h"
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── pf # d_inode->i_private = (xe_device*)
+ * │ ├── vf1 # d_inode->i_private = VFID(1)
+ * : :
+ * │ ├── vfN # d_inode->i_private = VFID(N)
+ */
+
+static void *extract_priv(struct dentry *d)
+{
+ return d->d_inode->i_private;
+}
+
+static struct xe_device *extract_xe(struct dentry *d)
+{
+ return extract_priv(d->d_parent);
+}
+
+static unsigned int extract_vfid(struct dentry *d)
+{
+ void *p = extract_priv(d);
+
+ return p == extract_xe(d) ? PFID : (uintptr_t)p;
+}
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── restore_auto_provisioning
+ * │ :
+ * │ ├── pf/
+ * │ ├── vf1
+ * │ │ ├── ...
+ */
+
+static ssize_t from_file_write_to_xe_call(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos,
+ int (*call)(struct xe_device *))
+{
+ struct dentry *dent = file_dentry(file);
+ struct xe_device *xe = extract_xe(dent);
+ bool yes;
+ int ret;
+
+ if (*ppos)
+ return -EINVAL;
+ ret = kstrtobool_from_user(userbuf, count, &yes);
+ if (ret < 0)
+ return ret;
+ if (yes) {
+ xe_pm_runtime_get(xe);
+ ret = call(xe);
+ xe_pm_runtime_put(xe);
+ }
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+#define DEFINE_SRIOV_ATTRIBUTE(OP) \
+static int OP##_show(struct seq_file *s, void *unused) \
+{ \
+ return 0; \
+} \
+static ssize_t OP##_write(struct file *file, const char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ return from_file_write_to_xe_call(file, userbuf, count, ppos, \
+ xe_sriov_pf_##OP); \
+} \
+DEFINE_SHOW_STORE_ATTRIBUTE(OP)
+
+static inline int xe_sriov_pf_restore_auto_provisioning(struct xe_device *xe)
+{
+ return xe_sriov_pf_provision_set_mode(xe, XE_SRIOV_PROVISIONING_MODE_AUTO);
+}
+
+DEFINE_SRIOV_ATTRIBUTE(restore_auto_provisioning);
+
+static int lockdown_vfs_enabling_open(struct inode *inode, struct file *file)
+{
+ struct dentry *dent = file_dentry(file);
+ struct xe_device *xe = extract_xe(dent);
+ ssize_t ret;
+
+ ret = xe_sriov_pf_lockdown(xe);
+ if (ret < 0)
+ return ret;
+
+ file->private_data = xe;
+ return nonseekable_open(inode, file);
+}
+
+static int lockdown_vfs_enabling_release(struct inode *inode, struct file *file)
+{
+ struct xe_device *xe = file->private_data;
+
+ xe_sriov_pf_end_lockdown(xe);
+ return 0;
+}
+
+static const struct file_operations lockdown_vfs_enabling_fops = {
+ .owner = THIS_MODULE,
+ .open = lockdown_vfs_enabling_open,
+ .release = lockdown_vfs_enabling_release,
+};
+
+static void pf_populate_root(struct xe_device *xe, struct dentry *dent)
+{
+ debugfs_create_file("restore_auto_provisioning", 0200, dent, xe,
+ &restore_auto_provisioning_fops);
+ debugfs_create_file("lockdown_vfs_enabling", 0400, dent, xe,
+ &lockdown_vfs_enabling_fops);
+}
+
+static int simple_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct dentry *parent = node->dent->d_parent;
+ struct xe_device *xe = parent->d_inode->i_private;
+ void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data;
+
+ print(xe, &p);
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary },
+ { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions },
+};
+
+static void pf_populate_pf(struct xe_device *xe, struct dentry *pfdent)
+{
+ struct drm_minor *minor = xe->drm.primary;
+
+ drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), pfdent, minor);
+}
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── vf1
+ * │ │ ├── migration_data
+ * │ │ ├── pause
+ * │ │ ├── reset
+ * │ │ ├── resume
+ * │ │ ├── stop
+ * │ │ ├── save
+ * │ │ ├── restore
+ * │ │ :
+ * │ ├── vf2
+ * │ │ ├── ...
+ */
+
+static int from_file_read_to_vf_call(struct seq_file *s,
+ int (*call)(struct xe_device *, unsigned int))
+{
+ struct dentry *dent = file_dentry(s->file)->d_parent;
+ struct xe_device *xe = extract_xe(dent);
+ unsigned int vfid = extract_vfid(dent);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = call(xe, vfid);
+ xe_pm_runtime_put(xe);
+
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static ssize_t from_file_write_to_vf_call(struct file *file, const char __user *userbuf,
+ size_t count, loff_t *ppos,
+ int (*call)(struct xe_device *, unsigned int))
+{
+ struct dentry *dent = file_dentry(file)->d_parent;
+ struct xe_device *xe = extract_xe(dent);
+ unsigned int vfid = extract_vfid(dent);
+ bool yes;
+ int ret;
+
+ if (*ppos)
+ return -EINVAL;
+ ret = kstrtobool_from_user(userbuf, count, &yes);
+ if (ret < 0)
+ return ret;
+ if (yes) {
+ xe_pm_runtime_get(xe);
+ ret = call(xe, vfid);
+ xe_pm_runtime_put(xe);
+ }
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+#define DEFINE_VF_CONTROL_ATTRIBUTE(OP) \
+static int OP##_show(struct seq_file *s, void *unused) \
+{ \
+ return 0; \
+} \
+static ssize_t OP##_write(struct file *file, const char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ return from_file_write_to_vf_call(file, userbuf, count, ppos, \
+ xe_sriov_pf_control_##OP); \
+} \
+DEFINE_SHOW_STORE_ATTRIBUTE(OP)
+
+#define DEFINE_VF_CONTROL_ATTRIBUTE_RW(OP) \
+static int OP##_show(struct seq_file *s, void *unused) \
+{ \
+ return from_file_read_to_vf_call(s, \
+ xe_sriov_pf_control_finish_##OP); \
+} \
+static ssize_t OP##_write(struct file *file, const char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ return from_file_write_to_vf_call(file, userbuf, count, ppos, \
+ xe_sriov_pf_control_trigger_##OP); \
+} \
+DEFINE_SHOW_STORE_ATTRIBUTE(OP)
+
+DEFINE_VF_CONTROL_ATTRIBUTE(pause_vf);
+DEFINE_VF_CONTROL_ATTRIBUTE(resume_vf);
+DEFINE_VF_CONTROL_ATTRIBUTE(stop_vf);
+DEFINE_VF_CONTROL_ATTRIBUTE(reset_vf);
+DEFINE_VF_CONTROL_ATTRIBUTE_RW(save_vf);
+DEFINE_VF_CONTROL_ATTRIBUTE_RW(restore_vf);
+
+static ssize_t data_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
+{
+ struct dentry *dent = file_dentry(file)->d_parent;
+ struct xe_device *xe = extract_xe(dent);
+ unsigned int vfid = extract_vfid(dent);
+
+ if (*pos)
+ return -ESPIPE;
+
+ return xe_sriov_pf_migration_write(xe, vfid, buf, count);
+}
+
+static ssize_t data_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ struct dentry *dent = file_dentry(file)->d_parent;
+ struct xe_device *xe = extract_xe(dent);
+ unsigned int vfid = extract_vfid(dent);
+
+ if (*ppos)
+ return -ESPIPE;
+
+ return xe_sriov_pf_migration_read(xe, vfid, buf, count);
+}
+
+static const struct file_operations data_vf_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = data_write,
+ .read = data_read,
+ .llseek = default_llseek,
+};
+
+static ssize_t size_read(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct dentry *dent = file_dentry(file)->d_parent;
+ struct xe_device *xe = extract_xe(dent);
+ unsigned int vfid = extract_vfid(dent);
+ char buf[21];
+ ssize_t ret;
+ int len;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_sriov_pf_migration_size(xe, vfid);
+ xe_pm_runtime_put(xe);
+ if (ret < 0)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%zd\n", ret);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations size_vf_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = size_read,
+ .llseek = default_llseek,
+};
+
+static void pf_populate_vf(struct xe_device *xe, struct dentry *vfdent)
+{
+ debugfs_create_file("pause", 0200, vfdent, xe, &pause_vf_fops);
+ debugfs_create_file("resume", 0200, vfdent, xe, &resume_vf_fops);
+ debugfs_create_file("stop", 0200, vfdent, xe, &stop_vf_fops);
+ debugfs_create_file("reset", 0200, vfdent, xe, &reset_vf_fops);
+ debugfs_create_file("save", 0600, vfdent, xe, &save_vf_fops);
+ debugfs_create_file("restore", 0600, vfdent, xe, &restore_vf_fops);
+ debugfs_create_file("migration_data", 0600, vfdent, xe, &data_vf_fops);
+ debugfs_create_file("migration_size", 0400, vfdent, xe, &size_vf_fops);
+}
+
+static void pf_populate_with_tiles(struct xe_device *xe, struct dentry *dent, unsigned int vfid)
+{
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id)
+ xe_tile_sriov_pf_debugfs_populate(tile, dent, vfid);
+}
+
+/**
+ * xe_sriov_pf_debugfs_register - Register PF debugfs attributes.
+ * @xe: the &xe_device
+ * @root: the root &dentry
+ *
+ * Create separate directory that will contain all SR-IOV related files,
+ * organized per each SR-IOV function (PF, VF1, VF2, ..., VFn).
+ */
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root)
+{
+ int totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ struct dentry *pfdent;
+ struct dentry *vfdent;
+ struct dentry *dent;
+ char vfname[16]; /* should be more than enough for "vf%u\0" and VFID(UINT_MAX) */
+ unsigned int n;
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── ...
+ */
+ dent = debugfs_create_dir("sriov", root);
+ if (IS_ERR(dent))
+ return;
+ dent->d_inode->i_private = xe;
+
+ pf_populate_root(xe, dent);
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── pf # d_inode->i_private = (xe_device*)
+ * │ │ ├── ...
+ */
+ pfdent = debugfs_create_dir("pf", dent);
+ if (IS_ERR(pfdent))
+ return;
+ pfdent->d_inode->i_private = xe;
+
+ pf_populate_pf(xe, pfdent);
+ pf_populate_with_tiles(xe, pfdent, PFID);
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── vf1 # d_inode->i_private = VFID(1)
+ * │ ├── vf2 # d_inode->i_private = VFID(2)
+ * │ ├── ...
+ */
+ for (n = 1; n <= totalvfs; n++) {
+ snprintf(vfname, sizeof(vfname), "vf%u", VFID(n));
+ vfdent = debugfs_create_dir(vfname, dent);
+ if (IS_ERR(vfdent))
+ return;
+ vfdent->d_inode->i_private = (void *)(uintptr_t)VFID(n);
+
+ pf_populate_vf(xe, vfdent);
+ pf_populate_with_tiles(xe, vfdent, VFID(n));
+ }
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h
new file mode 100644
index 000000000000..93db13585b82
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_debugfs.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_DEBUGFS_H_
+#define _XE_SRIOV_PF_DEBUGFS_H_
+
+struct dentry;
+struct xe_device;
+
+#ifdef CONFIG_PCI_IOV
+void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root);
+#else
+static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root) { }
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
index dd1df950b021..9054fdc34597 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_helpers.h
@@ -37,10 +37,37 @@ static inline int xe_sriov_pf_get_totalvfs(struct xe_device *xe)
return xe->sriov.pf.driver_max_vfs;
}
+/**
+ * xe_sriov_pf_num_vfs() - Number of enabled VFs on the PF.
+ * @xe: the PF &xe_device
+ *
+ * Return: Number of enabled VFs on the PF.
+ */
+static inline unsigned int xe_sriov_pf_num_vfs(const struct xe_device *xe)
+{
+ return pci_num_vf(to_pci_dev(xe->drm.dev));
+}
+
+/**
+ * xe_sriov_pf_admin_only() - Check if PF is mainly used for VFs administration.
+ * @xe: the PF &xe_device
+ *
+ * Return: True if PF is mainly used for VFs administration.
+ */
+static inline bool xe_sriov_pf_admin_only(const struct xe_device *xe)
+{
+ return !xe->info.probe_display;
+}
+
static inline struct mutex *xe_sriov_pf_master_mutex(struct xe_device *xe)
{
xe_assert(xe, IS_SRIOV_PF(xe));
return &xe->sriov.pf.master_lock;
}
+int xe_sriov_pf_arm_guard(struct xe_device *xe, struct xe_guard *guard,
+ bool write, void *who);
+void xe_sriov_pf_disarm_guard(struct xe_device *xe, struct xe_guard *guard,
+ bool write, void *who);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.c b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
new file mode 100644
index 000000000000..6c4b16409cc9
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.c
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_pf_migration.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
+#include "xe_sriov_packet.h"
+#include "xe_sriov_packet_types.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_migration.h"
+#include "xe_sriov_printk.h"
+
+static struct xe_sriov_migration_state *pf_pick_migration(struct xe_device *xe, unsigned int vfid)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, vfid <= xe_sriov_pf_get_totalvfs(xe));
+
+ return &xe->sriov.pf.vfs[vfid].migration;
+}
+
+/**
+ * xe_sriov_pf_migration_waitqueue() - Get waitqueue for migration.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * Return: pointer to the migration waitqueue.
+ */
+wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid)
+{
+ return &pf_pick_migration(xe, vfid)->wq;
+}
+
+/**
+ * xe_sriov_pf_migration_supported() - Check if SR-IOV VF migration is supported by the device
+ * @xe: the &xe_device
+ *
+ * Return: true if migration is supported, false otherwise
+ */
+bool xe_sriov_pf_migration_supported(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG) || !xe->sriov.pf.migration.disabled;
+}
+
+/**
+ * xe_sriov_pf_migration_disable() - Turn off SR-IOV VF migration support on PF.
+ * @xe: the &xe_device instance.
+ * @fmt: format string for the log message, to be combined with following VAs.
+ */
+void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list va_args;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+ xe_sriov_notice(xe, "migration %s: %pV\n",
+ IS_ENABLED(CONFIG_DRM_XE_DEBUG) ?
+ "missing prerequisite" : "disabled",
+ &vaf);
+ va_end(va_args);
+
+ xe->sriov.pf.migration.disabled = true;
+}
+
+static void pf_migration_check_support(struct xe_device *xe)
+{
+ if (!xe_device_has_memirq(xe))
+ xe_sriov_pf_migration_disable(xe, "requires memory-based IRQ support");
+}
+
+static void pf_migration_cleanup(void *arg)
+{
+ struct xe_sriov_migration_state *migration = arg;
+
+ xe_sriov_packet_free(migration->pending);
+ xe_sriov_packet_free(migration->trailer);
+ xe_sriov_packet_free(migration->descriptor);
+}
+
+/**
+ * xe_sriov_pf_migration_init() - Initialize support for SR-IOV VF migration.
+ * @xe: the &xe_device
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_migration_init(struct xe_device *xe)
+{
+ unsigned int n, totalvfs;
+ int err;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ pf_migration_check_support(xe);
+
+ if (!xe_sriov_pf_migration_supported(xe))
+ return 0;
+
+ totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ for (n = 1; n <= totalvfs; n++) {
+ struct xe_sriov_migration_state *migration = pf_pick_migration(xe, n);
+
+ err = drmm_mutex_init(&xe->drm, &migration->lock);
+ if (err)
+ return err;
+
+ init_waitqueue_head(&migration->wq);
+
+ err = devm_add_action_or_reset(xe->drm.dev, pf_migration_cleanup, migration);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool pf_migration_data_ready(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_gt *gt;
+ u8 gt_id;
+
+ for_each_gt(gt, xe, gt_id) {
+ if (xe_gt_sriov_pf_control_check_save_failed(gt, vfid) ||
+ xe_gt_sriov_pf_control_check_save_data_done(gt, vfid) ||
+ !xe_gt_sriov_pf_migration_ring_empty(gt, vfid))
+ return true;
+ }
+
+ return false;
+}
+
+static struct xe_sriov_packet *
+pf_migration_consume(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_sriov_packet *data;
+ bool more_data = false;
+ struct xe_gt *gt;
+ u8 gt_id;
+
+ for_each_gt(gt, xe, gt_id) {
+ data = xe_gt_sriov_pf_migration_save_consume(gt, vfid);
+ if (data && PTR_ERR(data) != EAGAIN)
+ return data;
+ if (PTR_ERR(data) == -EAGAIN)
+ more_data = true;
+ }
+
+ if (!more_data)
+ return NULL;
+
+ return ERR_PTR(-EAGAIN);
+}
+
+/**
+ * xe_sriov_pf_migration_save_consume() - Consume a VF migration data packet from the device.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ *
+ * Called by the save migration data consumer (userspace) when
+ * processing migration data.
+ * If there is no migration data to process, wait until more data is available.
+ *
+ * Return: Pointer to &xe_sriov_packet on success,
+ * NULL if ring is empty and no more migration data is expected,
+ * ERR_PTR value in case of error.
+ */
+struct xe_sriov_packet *
+xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid);
+ struct xe_sriov_packet *data;
+ int ret;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ for (;;) {
+ data = pf_migration_consume(xe, vfid);
+ if (PTR_ERR(data) != -EAGAIN)
+ break;
+
+ ret = wait_event_interruptible(migration->wq,
+ pf_migration_data_ready(xe, vfid));
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ return data;
+}
+
+static int pf_handle_descriptor(struct xe_device *xe, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ int ret;
+
+ if (data->hdr.tile_id != 0 || data->hdr.gt_id != 0)
+ return -EINVAL;
+
+ ret = xe_sriov_packet_process_descriptor(xe, vfid, data);
+ if (ret)
+ return ret;
+
+ xe_sriov_packet_free(data);
+
+ return 0;
+}
+
+static int pf_handle_trailer(struct xe_device *xe, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ struct xe_gt *gt;
+ u8 gt_id;
+
+ if (data->hdr.tile_id != 0 || data->hdr.gt_id != 0)
+ return -EINVAL;
+ if (data->hdr.offset != 0 || data->hdr.size != 0 || data->buff || data->bo)
+ return -EINVAL;
+
+ xe_sriov_packet_free(data);
+
+ for_each_gt(gt, xe, gt_id)
+ xe_gt_sriov_pf_control_restore_data_done(gt, vfid);
+
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_migration_restore_produce() - Produce a VF migration data packet to the device.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @data: Pointer to &xe_sriov_packet
+ *
+ * Called by the restore migration data producer (userspace) when processing
+ * migration data.
+ * If the underlying data structure is full, wait until there is space.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
+ struct xe_sriov_packet *data)
+{
+ struct xe_gt *gt;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ if (data->hdr.type == XE_SRIOV_PACKET_TYPE_DESCRIPTOR)
+ return pf_handle_descriptor(xe, vfid, data);
+ if (data->hdr.type == XE_SRIOV_PACKET_TYPE_TRAILER)
+ return pf_handle_trailer(xe, vfid, data);
+
+ gt = xe_device_get_gt(xe, data->hdr.gt_id);
+ if (!gt || data->hdr.tile_id != gt->tile->id || data->hdr.type == 0) {
+ xe_sriov_err_ratelimited(xe, "Received invalid restore packet for VF%u (type:%u, tile:%u, GT:%u)\n",
+ vfid, data->hdr.type, data->hdr.tile_id, data->hdr.gt_id);
+ return -EINVAL;
+ }
+
+ return xe_gt_sriov_pf_migration_restore_produce(gt, vfid, data);
+}
+
+/**
+ * xe_sriov_pf_migration_read() - Read migration data from the device.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @buf: start address of userspace buffer
+ * @len: requested read size from userspace
+ *
+ * Return: number of bytes that has been successfully read,
+ * 0 if no more migration data is available,
+ * -errno on failure.
+ */
+ssize_t xe_sriov_pf_migration_read(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len)
+{
+ struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid);
+ ssize_t ret, consumed = 0;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ scoped_cond_guard(mutex_intr, return -EINTR, &migration->lock) {
+ while (consumed < len) {
+ ret = xe_sriov_packet_read_single(xe, vfid, buf, len - consumed);
+ if (ret == -ENODATA)
+ break;
+ if (ret < 0)
+ return ret;
+
+ consumed += ret;
+ buf += ret;
+ }
+ }
+
+ return consumed;
+}
+
+/**
+ * xe_sriov_pf_migration_write() - Write migration data to the device.
+ * @xe: the &xe_device
+ * @vfid: the VF identifier
+ * @buf: start address of userspace buffer
+ * @len: requested write size from userspace
+ *
+ * Return: number of bytes that has been successfully written,
+ * -errno on failure.
+ */
+ssize_t xe_sriov_pf_migration_write(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len)
+{
+ struct xe_sriov_migration_state *migration = pf_pick_migration(xe, vfid);
+ ssize_t ret, produced = 0;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ scoped_cond_guard(mutex_intr, return -EINTR, &migration->lock) {
+ while (produced < len) {
+ ret = xe_sriov_packet_write_single(xe, vfid, buf, len - produced);
+ if (ret < 0)
+ return ret;
+
+ produced += ret;
+ buf += ret;
+ }
+ }
+
+ return produced;
+}
+
+/**
+ * xe_sriov_pf_migration_size() - Total size of migration data from all components within a device
+ * @xe: the &xe_device
+ * @vfid: the VF identifier (can't be 0)
+ *
+ * This function is for PF only.
+ *
+ * Return: total migration data size in bytes or a negative error code on failure.
+ */
+ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid)
+{
+ size_t size = 0;
+ struct xe_gt *gt;
+ ssize_t ret;
+ u8 gt_id;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, vfid);
+
+ for_each_gt(gt, xe, gt_id) {
+ ret = xe_gt_sriov_pf_migration_size(gt, vfid);
+ if (ret < 0)
+ return ret;
+
+ size += ret;
+ }
+
+ return size;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
new file mode 100644
index 000000000000..f8f408df8481
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_MIGRATION_H_
+#define _XE_SRIOV_PF_MIGRATION_H_
+
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct xe_device;
+struct xe_sriov_packet;
+
+int xe_sriov_pf_migration_init(struct xe_device *xe);
+bool xe_sriov_pf_migration_supported(struct xe_device *xe);
+void xe_sriov_pf_migration_disable(struct xe_device *xe, const char *fmt, ...);
+int xe_sriov_pf_migration_restore_produce(struct xe_device *xe, unsigned int vfid,
+ struct xe_sriov_packet *data);
+struct xe_sriov_packet *
+xe_sriov_pf_migration_save_consume(struct xe_device *xe, unsigned int vfid);
+ssize_t xe_sriov_pf_migration_size(struct xe_device *xe, unsigned int vfid);
+wait_queue_head_t *xe_sriov_pf_migration_waitqueue(struct xe_device *xe, unsigned int vfid);
+
+ssize_t xe_sriov_pf_migration_read(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len);
+ssize_t xe_sriov_pf_migration_write(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
new file mode 100644
index 000000000000..7d9a8a278d91
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_migration_types.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_MIGRATION_TYPES_H_
+#define _XE_SRIOV_PF_MIGRATION_TYPES_H_
+
+#include <linux/types.h>
+#include <linux/mutex_types.h>
+#include <linux/wait.h>
+
+/**
+ * struct xe_sriov_pf_migration - Xe device level VF migration data
+ */
+struct xe_sriov_pf_migration {
+ /** @disabled: indicates whether VF migration feature is disabled */
+ bool disabled;
+};
+
+/**
+ * struct xe_sriov_migration_state - Per VF device-level migration related data
+ */
+struct xe_sriov_migration_state {
+ /** @wq: waitqueue used to avoid busy-waiting for snapshot production/consumption */
+ wait_queue_head_t wq;
+ /** @lock: Mutex protecting the migration data */
+ struct mutex lock;
+ /** @pending: currently processed data packet of VF resource */
+ struct xe_sriov_packet *pending;
+ /** @trailer: data packet used to indicate the end of stream */
+ struct xe_sriov_packet *trailer;
+ /** @descriptor: data packet containing the metadata describing the device */
+ struct xe_sriov_packet *descriptor;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision.c b/drivers/gpu/drm/xe/xe_sriov_pf_provision.c
new file mode 100644
index 000000000000..01470c42e8a7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision.c
@@ -0,0 +1,438 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_assert.h"
+#include "xe_device.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_policy.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_provision.h"
+#include "xe_sriov_pf_provision_types.h"
+#include "xe_sriov_printk.h"
+
+static const char *mode_to_string(enum xe_sriov_provisioning_mode mode)
+{
+ switch (mode) {
+ case XE_SRIOV_PROVISIONING_MODE_AUTO:
+ return "auto";
+ case XE_SRIOV_PROVISIONING_MODE_CUSTOM:
+ return "custom";
+ default:
+ return "<invalid>";
+ }
+}
+
+static bool pf_auto_provisioning_mode(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ return xe->sriov.pf.provision.mode == XE_SRIOV_PROVISIONING_MODE_AUTO;
+}
+
+static bool pf_needs_provisioning(struct xe_gt *gt, unsigned int num_vfs)
+{
+ unsigned int n;
+
+ for (n = 1; n <= num_vfs; n++)
+ if (!xe_gt_sriov_pf_config_is_empty(gt, n))
+ return false;
+
+ return true;
+}
+
+static int pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ if (!pf_needs_provisioning(gt, num_vfs))
+ return -EUCLEAN;
+ err = xe_gt_sriov_pf_config_set_fair(gt, VFID(1), num_vfs);
+ result = result ?: err;
+ }
+
+ return result;
+}
+
+static void pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ unsigned int n;
+
+ for_each_gt(gt, xe, id)
+ for (n = 1; n <= num_vfs; n++)
+ xe_gt_sriov_pf_config_release(gt, n, true);
+}
+
+static void pf_unprovision_all_vfs(struct xe_device *xe)
+{
+ pf_unprovision_vfs(xe, xe_sriov_pf_get_totalvfs(xe));
+}
+
+/**
+ * xe_sriov_pf_provision_vfs() - Provision VFs in auto-mode.
+ * @xe: the PF &xe_device
+ * @num_vfs: the number of VFs to auto-provision
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ if (!pf_auto_provisioning_mode(xe))
+ return 0;
+
+ return pf_provision_vfs(xe, num_vfs);
+}
+
+/**
+ * xe_sriov_pf_unprovision_vfs() - Unprovision VFs in auto-mode.
+ * @xe: the PF &xe_device
+ * @num_vfs: the number of VFs to unprovision
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ if (!pf_auto_provisioning_mode(xe))
+ return 0;
+
+ pf_unprovision_vfs(xe, num_vfs);
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_provision_set_mode() - Change VFs provision mode.
+ * @xe: the PF &xe_device
+ * @mode: the new VFs provisioning mode
+ *
+ * When changing from AUTO to CUSTOM mode, any already allocated VFs resources
+ * will remain allocated and will not be released upon VFs disabling.
+ *
+ * When changing back to AUTO mode, if VFs are not enabled, already allocated
+ * VFs resources will be immediately released. If VFs are still enabled, such
+ * mode change is rejected.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode)
+{
+ xe_assert(xe, IS_SRIOV_PF(xe));
+
+ if (mode == xe->sriov.pf.provision.mode)
+ return 0;
+
+ if (mode == XE_SRIOV_PROVISIONING_MODE_AUTO) {
+ if (xe_sriov_pf_num_vfs(xe)) {
+ xe_sriov_dbg(xe, "can't restore %s: VFs must be disabled!\n",
+ mode_to_string(mode));
+ return -EBUSY;
+ }
+ pf_unprovision_all_vfs(xe);
+ }
+
+ xe_sriov_dbg(xe, "mode %s changed to %s by %ps\n",
+ mode_to_string(xe->sriov.pf.provision.mode),
+ mode_to_string(mode), __builtin_return_address(0));
+ xe->sriov.pf.provision.mode = mode;
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_provision_bulk_apply_eq() - Change execution quantum for all VFs and PF.
+ * @xe: the PF &xe_device
+ * @eq: execution quantum in [ms] to set
+ *
+ * Change execution quantum (EQ) provisioning on all tiles/GTs.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_bulk_apply_eq(struct xe_device *xe, u32 eq)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ guard(mutex)(xe_sriov_pf_master_mutex(xe));
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_config_bulk_set_exec_quantum_locked(gt, eq);
+ result = result ?: err;
+ }
+
+ return result;
+}
+
+/**
+ * xe_sriov_pf_provision_apply_vf_eq() - Change VF's execution quantum.
+ * @xe: the PF &xe_device
+ * @vfid: the VF identifier
+ * @eq: execution quantum in [ms] to set
+ *
+ * Change VF's execution quantum (EQ) provisioning on all tiles/GTs.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_apply_vf_eq(struct xe_device *xe, unsigned int vfid, u32 eq)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ guard(mutex)(xe_sriov_pf_master_mutex(xe));
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_config_set_exec_quantum_locked(gt, vfid, eq);
+ result = result ?: err;
+ }
+
+ return result;
+}
+
+static int pf_report_unclean(struct xe_gt *gt, unsigned int vfid,
+ const char *what, u32 found, u32 expected)
+{
+ char name[8];
+
+ xe_sriov_dbg(gt_to_xe(gt), "%s on GT%u has %s=%u (expected %u)\n",
+ xe_sriov_function_name(vfid, name, sizeof(name)),
+ gt->info.id, what, found, expected);
+ return -EUCLEAN;
+}
+
+/**
+ * xe_sriov_pf_provision_query_vf_eq() - Query VF's execution quantum.
+ * @xe: the PF &xe_device
+ * @vfid: the VF identifier
+ * @eq: placeholder for the returned execution quantum in [ms]
+ *
+ * Query VF's execution quantum (EQ) provisioning from all tiles/GTs.
+ * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_query_vf_eq(struct xe_device *xe, unsigned int vfid, u32 *eq)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int count = 0;
+ u32 value;
+
+ guard(mutex)(xe_sriov_pf_master_mutex(xe));
+
+ for_each_gt(gt, xe, id) {
+ value = xe_gt_sriov_pf_config_get_exec_quantum_locked(gt, vfid);
+ if (!count++)
+ *eq = value;
+ else if (value != *eq)
+ return pf_report_unclean(gt, vfid, "EQ", value, *eq);
+ }
+
+ return !count ? -ENODATA : 0;
+}
+
+/**
+ * xe_sriov_pf_provision_bulk_apply_pt() - Change preemption timeout for all VFs and PF.
+ * @xe: the PF &xe_device
+ * @pt: preemption timeout in [us] to set
+ *
+ * Change preemption timeout (PT) provisioning on all tiles/GTs.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_bulk_apply_pt(struct xe_device *xe, u32 pt)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ guard(mutex)(xe_sriov_pf_master_mutex(xe));
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_config_bulk_set_preempt_timeout_locked(gt, pt);
+ result = result ?: err;
+ }
+
+ return result;
+}
+
+/**
+ * xe_sriov_pf_provision_apply_vf_pt() - Change VF's preemption timeout.
+ * @xe: the PF &xe_device
+ * @vfid: the VF identifier
+ * @pt: preemption timeout in [us] to set
+ *
+ * Change VF's preemption timeout (PT) provisioning on all tiles/GTs.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_apply_vf_pt(struct xe_device *xe, unsigned int vfid, u32 pt)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ guard(mutex)(xe_sriov_pf_master_mutex(xe));
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_config_set_preempt_timeout_locked(gt, vfid, pt);
+ result = result ?: err;
+ }
+
+ return result;
+}
+
+/**
+ * xe_sriov_pf_provision_query_vf_pt() - Query VF's preemption timeout.
+ * @xe: the PF &xe_device
+ * @vfid: the VF identifier
+ * @pt: placeholder for the returned preemption timeout in [us]
+ *
+ * Query VF's preemption timeout (PT) provisioning from all tiles/GTs.
+ * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_query_vf_pt(struct xe_device *xe, unsigned int vfid, u32 *pt)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int count = 0;
+ u32 value;
+
+ guard(mutex)(xe_sriov_pf_master_mutex(xe));
+
+ for_each_gt(gt, xe, id) {
+ value = xe_gt_sriov_pf_config_get_preempt_timeout_locked(gt, vfid);
+ if (!count++)
+ *pt = value;
+ else if (value != *pt)
+ return pf_report_unclean(gt, vfid, "PT", value, *pt);
+ }
+
+ return !count ? -ENODATA : 0;
+}
+
+/**
+ * xe_sriov_pf_provision_bulk_apply_priority() - Change scheduling priority of all VFs and PF.
+ * @xe: the PF &xe_device
+ * @prio: scheduling priority to set
+ *
+ * Change the scheduling priority provisioning on all tiles/GTs.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_bulk_apply_priority(struct xe_device *xe, u32 prio)
+{
+ bool sched_if_idle;
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ /*
+ * Currently, priority changes that involves VFs are only allowed using
+ * the 'sched_if_idle' policy KLV, so only LOW and NORMAL are supported.
+ */
+ xe_assert(xe, prio < GUC_SCHED_PRIORITY_HIGH);
+ sched_if_idle = prio == GUC_SCHED_PRIORITY_NORMAL;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_policy_set_sched_if_idle(gt, sched_if_idle);
+ result = result ?: err;
+ }
+
+ return result;
+}
+
+/**
+ * xe_sriov_pf_provision_apply_vf_priority() - Change VF's scheduling priority.
+ * @xe: the PF &xe_device
+ * @vfid: the VF identifier
+ * @prio: scheduling priority to set
+ *
+ * Change VF's scheduling priority provisioning on all tiles/GTs.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_apply_vf_priority(struct xe_device *xe, unsigned int vfid, u32 prio)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int result = 0;
+ int err;
+
+ for_each_gt(gt, xe, id) {
+ err = xe_gt_sriov_pf_config_set_sched_priority(gt, vfid, prio);
+ result = result ?: err;
+ }
+
+ return result;
+}
+
+/**
+ * xe_sriov_pf_provision_query_vf_priority() - Query VF's scheduling priority.
+ * @xe: the PF &xe_device
+ * @vfid: the VF identifier
+ * @prio: placeholder for the returned scheduling priority
+ *
+ * Query VF's scheduling priority provisioning from all tiles/GTs.
+ * If values across tiles/GTs are inconsistent then -EUCLEAN error will be returned.
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_provision_query_vf_priority(struct xe_device *xe, unsigned int vfid, u32 *prio)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+ int count = 0;
+ u32 value;
+
+ for_each_gt(gt, xe, id) {
+ value = xe_gt_sriov_pf_config_get_sched_priority(gt, vfid);
+ if (!count++)
+ *prio = value;
+ else if (value != *prio)
+ return pf_report_unclean(gt, vfid, "priority", value, *prio);
+ }
+
+ return !count ? -ENODATA : 0;
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision.h b/drivers/gpu/drm/xe/xe_sriov_pf_provision.h
new file mode 100644
index 000000000000..bccf23d51396
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_PROVISION_H_
+#define _XE_SRIOV_PF_PROVISION_H_
+
+#include <linux/types.h>
+
+#include "xe_sriov_pf_provision_types.h"
+
+struct xe_device;
+
+int xe_sriov_pf_provision_bulk_apply_eq(struct xe_device *xe, u32 eq);
+int xe_sriov_pf_provision_apply_vf_eq(struct xe_device *xe, unsigned int vfid, u32 eq);
+int xe_sriov_pf_provision_query_vf_eq(struct xe_device *xe, unsigned int vfid, u32 *eq);
+
+int xe_sriov_pf_provision_bulk_apply_pt(struct xe_device *xe, u32 pt);
+int xe_sriov_pf_provision_apply_vf_pt(struct xe_device *xe, unsigned int vfid, u32 pt);
+int xe_sriov_pf_provision_query_vf_pt(struct xe_device *xe, unsigned int vfid, u32 *pt);
+
+int xe_sriov_pf_provision_bulk_apply_priority(struct xe_device *xe, u32 prio);
+int xe_sriov_pf_provision_apply_vf_priority(struct xe_device *xe, unsigned int vfid, u32 prio);
+int xe_sriov_pf_provision_query_vf_priority(struct xe_device *xe, unsigned int vfid, u32 *prio);
+
+int xe_sriov_pf_provision_vfs(struct xe_device *xe, unsigned int num_vfs);
+int xe_sriov_pf_unprovision_vfs(struct xe_device *xe, unsigned int num_vfs);
+
+int xe_sriov_pf_provision_set_mode(struct xe_device *xe, enum xe_sriov_provisioning_mode mode);
+
+/**
+ * xe_sriov_pf_provision_set_custom_mode() - Change VFs provision mode to custom.
+ * @xe: the PF &xe_device
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+static inline int xe_sriov_pf_provision_set_custom_mode(struct xe_device *xe)
+{
+ return xe_sriov_pf_provision_set_mode(xe, XE_SRIOV_PROVISIONING_MODE_CUSTOM);
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h
new file mode 100644
index 000000000000..a847b8a4c4da
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_provision_types.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_PROVISION_TYPES_H_
+#define _XE_SRIOV_PF_PROVISION_TYPES_H_
+
+#include <linux/build_bug.h>
+
+/**
+ * enum xe_sriov_provisioning_mode - SR-IOV provisioning mode.
+ *
+ * @XE_SRIOV_PROVISIONING_MODE_AUTO: VFs are provisioned during VFs enabling.
+ * Any allocated resources to the VFs will be
+ * automatically released when disabling VFs.
+ * This is a default mode.
+ * @XE_SRIOV_PROVISIONING_MODE_CUSTOM: Explicit VFs provisioning using uABI interfaces.
+ * VFs resources remains allocated regardless if
+ * VFs are enabled or not.
+ */
+enum xe_sriov_provisioning_mode {
+ XE_SRIOV_PROVISIONING_MODE_AUTO,
+ XE_SRIOV_PROVISIONING_MODE_CUSTOM,
+};
+static_assert(XE_SRIOV_PROVISIONING_MODE_AUTO == 0);
+
+/**
+ * struct xe_sriov_pf_provision - Data used by the PF provisioning.
+ */
+struct xe_sriov_pf_provision {
+ /** @mode: selected provisioning mode. */
+ enum xe_sriov_provisioning_mode mode;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c
new file mode 100644
index 000000000000..c0b767ac735c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.c
@@ -0,0 +1,647 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include <drm/drm_managed.h>
+
+#include "xe_assert.h"
+#include "xe_pci_sriov.h"
+#include "xe_pm.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_provision.h"
+#include "xe_sriov_pf_sysfs.h"
+#include "xe_sriov_printk.h"
+
+static int emit_choice(char *buf, int choice, const char * const *array, size_t size)
+{
+ int pos = 0;
+ int n;
+
+ for (n = 0; n < size; n++) {
+ pos += sysfs_emit_at(buf, pos, "%s%s%s%s",
+ n ? " " : "",
+ n == choice ? "[" : "",
+ array[n],
+ n == choice ? "]" : "");
+ }
+ pos += sysfs_emit_at(buf, pos, "\n");
+
+ return pos;
+}
+
+/*
+ * /sys/bus/pci/drivers/xe/BDF/
+ * :
+ * ├── sriov_admin/
+ * ├── ...
+ * ├── .bulk_profile
+ * │ ├── exec_quantum_ms
+ * │ ├── preempt_timeout_us
+ * │ └── sched_priority
+ * ├── pf/
+ * │ ├── ...
+ * │ ├── device -> ../../../BDF
+ * │ └── profile
+ * │ ├── exec_quantum_ms
+ * │ ├── preempt_timeout_us
+ * │ └── sched_priority
+ * ├── vf1/
+ * │ ├── ...
+ * │ ├── device -> ../../../BDF.1
+ * │ ├── stop
+ * │ └── profile
+ * │ ├── exec_quantum_ms
+ * │ ├── preempt_timeout_us
+ * │ └── sched_priority
+ * ├── vf2/
+ * :
+ * └── vfN/
+ */
+
+struct xe_sriov_kobj {
+ struct kobject base;
+ struct xe_device *xe;
+ unsigned int vfid;
+};
+#define to_xe_sriov_kobj(p) container_of_const((p), struct xe_sriov_kobj, base)
+
+struct xe_sriov_dev_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct xe_device *xe, char *buf);
+ ssize_t (*store)(struct xe_device *xe, const char *buf, size_t count);
+};
+#define to_xe_sriov_dev_attr(p) container_of_const((p), struct xe_sriov_dev_attr, attr)
+
+#define XE_SRIOV_DEV_ATTR(NAME) \
+struct xe_sriov_dev_attr xe_sriov_dev_attr_##NAME = \
+ __ATTR(NAME, 0644, xe_sriov_dev_attr_##NAME##_show, xe_sriov_dev_attr_##NAME##_store)
+
+#define XE_SRIOV_DEV_ATTR_RO(NAME) \
+struct xe_sriov_dev_attr xe_sriov_dev_attr_##NAME = \
+ __ATTR(NAME, 0444, xe_sriov_dev_attr_##NAME##_show, NULL)
+
+#define XE_SRIOV_DEV_ATTR_WO(NAME) \
+struct xe_sriov_dev_attr xe_sriov_dev_attr_##NAME = \
+ __ATTR(NAME, 0200, NULL, xe_sriov_dev_attr_##NAME##_store)
+
+struct xe_sriov_vf_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct xe_device *xe, unsigned int vfid, char *buf);
+ ssize_t (*store)(struct xe_device *xe, unsigned int vfid, const char *buf, size_t count);
+};
+#define to_xe_sriov_vf_attr(p) container_of_const((p), struct xe_sriov_vf_attr, attr)
+
+#define XE_SRIOV_VF_ATTR(NAME) \
+struct xe_sriov_vf_attr xe_sriov_vf_attr_##NAME = \
+ __ATTR(NAME, 0644, xe_sriov_vf_attr_##NAME##_show, xe_sriov_vf_attr_##NAME##_store)
+
+#define XE_SRIOV_VF_ATTR_RO(NAME) \
+struct xe_sriov_vf_attr xe_sriov_vf_attr_##NAME = \
+ __ATTR(NAME, 0444, xe_sriov_vf_attr_##NAME##_show, NULL)
+
+#define XE_SRIOV_VF_ATTR_WO(NAME) \
+struct xe_sriov_vf_attr xe_sriov_vf_attr_##NAME = \
+ __ATTR(NAME, 0200, NULL, xe_sriov_vf_attr_##NAME##_store)
+
+/* device level attributes go here */
+
+#define DEFINE_SIMPLE_BULK_PROVISIONING_SRIOV_DEV_ATTR_WO(NAME, ITEM, TYPE) \
+ \
+static ssize_t xe_sriov_dev_attr_##NAME##_store(struct xe_device *xe, \
+ const char *buf, size_t count) \
+{ \
+ TYPE value; \
+ int err; \
+ \
+ err = kstrto##TYPE(buf, 0, &value); \
+ if (err) \
+ return err; \
+ \
+ err = xe_sriov_pf_provision_bulk_apply_##ITEM(xe, value); \
+ return err ?: count; \
+} \
+ \
+static XE_SRIOV_DEV_ATTR_WO(NAME)
+
+DEFINE_SIMPLE_BULK_PROVISIONING_SRIOV_DEV_ATTR_WO(exec_quantum_ms, eq, u32);
+DEFINE_SIMPLE_BULK_PROVISIONING_SRIOV_DEV_ATTR_WO(preempt_timeout_us, pt, u32);
+
+static const char * const sched_priority_names[] = {
+ [GUC_SCHED_PRIORITY_LOW] = "low",
+ [GUC_SCHED_PRIORITY_NORMAL] = "normal",
+ [GUC_SCHED_PRIORITY_HIGH] = "high",
+};
+
+static bool sched_priority_change_allowed(unsigned int vfid)
+{
+ /* As of today GuC FW allows to selectively change only the PF priority. */
+ return vfid == PFID;
+}
+
+static bool sched_priority_high_allowed(unsigned int vfid)
+{
+ /* As of today GuC FW allows to select 'high' priority only for the PF. */
+ return vfid == PFID;
+}
+
+static bool sched_priority_bulk_high_allowed(struct xe_device *xe)
+{
+ /* all VFs are equal - it's sufficient to check VF1 only */
+ return sched_priority_high_allowed(VFID(1));
+}
+
+static ssize_t xe_sriov_dev_attr_sched_priority_store(struct xe_device *xe,
+ const char *buf, size_t count)
+{
+ size_t num_priorities = ARRAY_SIZE(sched_priority_names);
+ int match;
+ int err;
+
+ if (!sched_priority_bulk_high_allowed(xe))
+ num_priorities--;
+
+ match = __sysfs_match_string(sched_priority_names, num_priorities, buf);
+ if (match < 0)
+ return -EINVAL;
+
+ err = xe_sriov_pf_provision_bulk_apply_priority(xe, match);
+ return err ?: count;
+}
+
+static XE_SRIOV_DEV_ATTR_WO(sched_priority);
+
+static struct attribute *bulk_profile_dev_attrs[] = {
+ &xe_sriov_dev_attr_exec_quantum_ms.attr,
+ &xe_sriov_dev_attr_preempt_timeout_us.attr,
+ &xe_sriov_dev_attr_sched_priority.attr,
+ NULL
+};
+
+static const struct attribute_group bulk_profile_dev_attr_group = {
+ .name = ".bulk_profile",
+ .attrs = bulk_profile_dev_attrs,
+};
+
+static const struct attribute_group *xe_sriov_dev_attr_groups[] = {
+ &bulk_profile_dev_attr_group,
+ NULL
+};
+
+/* and VF-level attributes go here */
+
+#define DEFINE_SIMPLE_PROVISIONING_SRIOV_VF_ATTR(NAME, ITEM, TYPE, FORMAT) \
+static ssize_t xe_sriov_vf_attr_##NAME##_show(struct xe_device *xe, unsigned int vfid, \
+ char *buf) \
+{ \
+ TYPE value = 0; \
+ int err; \
+ \
+ err = xe_sriov_pf_provision_query_vf_##ITEM(xe, vfid, &value); \
+ if (err) \
+ return err; \
+ \
+ return sysfs_emit(buf, FORMAT, value); \
+} \
+ \
+static ssize_t xe_sriov_vf_attr_##NAME##_store(struct xe_device *xe, unsigned int vfid, \
+ const char *buf, size_t count) \
+{ \
+ TYPE value; \
+ int err; \
+ \
+ err = kstrto##TYPE(buf, 0, &value); \
+ if (err) \
+ return err; \
+ \
+ err = xe_sriov_pf_provision_apply_vf_##ITEM(xe, vfid, value); \
+ return err ?: count; \
+} \
+ \
+static XE_SRIOV_VF_ATTR(NAME)
+
+DEFINE_SIMPLE_PROVISIONING_SRIOV_VF_ATTR(exec_quantum_ms, eq, u32, "%u\n");
+DEFINE_SIMPLE_PROVISIONING_SRIOV_VF_ATTR(preempt_timeout_us, pt, u32, "%u\n");
+
+static ssize_t xe_sriov_vf_attr_sched_priority_show(struct xe_device *xe, unsigned int vfid,
+ char *buf)
+{
+ size_t num_priorities = ARRAY_SIZE(sched_priority_names);
+ u32 priority;
+ int err;
+
+ err = xe_sriov_pf_provision_query_vf_priority(xe, vfid, &priority);
+ if (err)
+ return err;
+
+ if (!sched_priority_high_allowed(vfid))
+ num_priorities--;
+
+ xe_assert(xe, priority < num_priorities);
+ return emit_choice(buf, priority, sched_priority_names, num_priorities);
+}
+
+static ssize_t xe_sriov_vf_attr_sched_priority_store(struct xe_device *xe, unsigned int vfid,
+ const char *buf, size_t count)
+{
+ size_t num_priorities = ARRAY_SIZE(sched_priority_names);
+ int match;
+ int err;
+
+ if (!sched_priority_change_allowed(vfid))
+ return -EOPNOTSUPP;
+
+ if (!sched_priority_high_allowed(vfid))
+ num_priorities--;
+
+ match = __sysfs_match_string(sched_priority_names, num_priorities, buf);
+ if (match < 0)
+ return -EINVAL;
+
+ err = xe_sriov_pf_provision_apply_vf_priority(xe, vfid, match);
+ return err ?: count;
+}
+
+static XE_SRIOV_VF_ATTR(sched_priority);
+
+static struct attribute *profile_vf_attrs[] = {
+ &xe_sriov_vf_attr_exec_quantum_ms.attr,
+ &xe_sriov_vf_attr_preempt_timeout_us.attr,
+ &xe_sriov_vf_attr_sched_priority.attr,
+ NULL
+};
+
+static umode_t profile_vf_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj);
+
+ if (attr == &xe_sriov_vf_attr_sched_priority.attr &&
+ !sched_priority_change_allowed(vkobj->vfid))
+ return attr->mode & 0444;
+
+ return attr->mode;
+}
+
+static const struct attribute_group profile_vf_attr_group = {
+ .name = "profile",
+ .attrs = profile_vf_attrs,
+ .is_visible = profile_vf_attr_is_visible,
+};
+
+#define DEFINE_SIMPLE_CONTROL_SRIOV_VF_ATTR(NAME) \
+ \
+static ssize_t xe_sriov_vf_attr_##NAME##_store(struct xe_device *xe, unsigned int vfid, \
+ const char *buf, size_t count) \
+{ \
+ bool yes; \
+ int err; \
+ \
+ if (!vfid) \
+ return -EPERM; \
+ \
+ err = kstrtobool(buf, &yes); \
+ if (err) \
+ return err; \
+ if (!yes) \
+ return count; \
+ \
+ err = xe_sriov_pf_control_##NAME##_vf(xe, vfid); \
+ return err ?: count; \
+} \
+ \
+static XE_SRIOV_VF_ATTR_WO(NAME)
+
+DEFINE_SIMPLE_CONTROL_SRIOV_VF_ATTR(stop);
+
+static struct attribute *control_vf_attrs[] = {
+ &xe_sriov_vf_attr_stop.attr,
+ NULL
+};
+
+static umode_t control_vf_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj);
+
+ if (vkobj->vfid == PFID)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group control_vf_attr_group = {
+ .attrs = control_vf_attrs,
+ .is_visible = control_vf_attr_is_visible,
+};
+
+static const struct attribute_group *xe_sriov_vf_attr_groups[] = {
+ &profile_vf_attr_group,
+ &control_vf_attr_group,
+ NULL
+};
+
+/* no user serviceable parts below */
+
+static struct kobject *create_xe_sriov_kobj(struct xe_device *xe, unsigned int vfid)
+{
+ struct xe_sriov_kobj *vkobj;
+
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ vkobj = kzalloc(sizeof(*vkobj), GFP_KERNEL);
+ if (!vkobj)
+ return NULL;
+
+ vkobj->xe = xe;
+ vkobj->vfid = vfid;
+ return &vkobj->base;
+}
+
+static void release_xe_sriov_kobj(struct kobject *kobj)
+{
+ struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj);
+
+ kfree(vkobj);
+}
+
+static ssize_t xe_sriov_dev_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct xe_sriov_dev_attr *vattr = to_xe_sriov_dev_attr(attr);
+ struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj);
+ struct xe_device *xe = vkobj->xe;
+
+ if (!vattr->show)
+ return -EPERM;
+
+ return vattr->show(xe, buf);
+}
+
+static ssize_t xe_sriov_dev_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_sriov_dev_attr *vattr = to_xe_sriov_dev_attr(attr);
+ struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj);
+ struct xe_device *xe = vkobj->xe;
+ ssize_t ret;
+
+ if (!vattr->store)
+ return -EPERM;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_sriov_pf_wait_ready(xe) ?: vattr->store(xe, buf, count);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static ssize_t xe_sriov_vf_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ struct xe_sriov_vf_attr *vattr = to_xe_sriov_vf_attr(attr);
+ struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj);
+ struct xe_device *xe = vkobj->xe;
+ unsigned int vfid = vkobj->vfid;
+
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ if (!vattr->show)
+ return -EPERM;
+
+ return vattr->show(xe, vfid, buf);
+}
+
+static ssize_t xe_sriov_vf_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct xe_sriov_vf_attr *vattr = to_xe_sriov_vf_attr(attr);
+ struct xe_sriov_kobj *vkobj = to_xe_sriov_kobj(kobj);
+ struct xe_device *xe = vkobj->xe;
+ unsigned int vfid = vkobj->vfid;
+ ssize_t ret;
+
+ xe_sriov_pf_assert_vfid(xe, vfid);
+
+ if (!vattr->store)
+ return -EPERM;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_sriov_pf_wait_ready(xe) ?: vattr->store(xe, vfid, buf, count);
+ xe_pm_runtime_get(xe);
+
+ return ret;
+}
+
+static const struct sysfs_ops xe_sriov_dev_sysfs_ops = {
+ .show = xe_sriov_dev_attr_show,
+ .store = xe_sriov_dev_attr_store,
+};
+
+static const struct sysfs_ops xe_sriov_vf_sysfs_ops = {
+ .show = xe_sriov_vf_attr_show,
+ .store = xe_sriov_vf_attr_store,
+};
+
+static const struct kobj_type xe_sriov_dev_ktype = {
+ .release = release_xe_sriov_kobj,
+ .sysfs_ops = &xe_sriov_dev_sysfs_ops,
+ .default_groups = xe_sriov_dev_attr_groups,
+};
+
+static const struct kobj_type xe_sriov_vf_ktype = {
+ .release = release_xe_sriov_kobj,
+ .sysfs_ops = &xe_sriov_vf_sysfs_ops,
+ .default_groups = xe_sriov_vf_attr_groups,
+};
+
+static int pf_sysfs_error(struct xe_device *xe, int err, const char *what)
+{
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ xe_sriov_dbg(xe, "Failed to setup sysfs %s (%pe)\n", what, ERR_PTR(err));
+ return err;
+}
+
+static void pf_sysfs_note(struct xe_device *xe, int err, const char *what)
+{
+ xe_sriov_dbg(xe, "Failed to setup sysfs %s (%pe)\n", what, ERR_PTR(err));
+}
+
+static void action_put_kobject(void *arg)
+{
+ struct kobject *kobj = arg;
+
+ kobject_put(kobj);
+}
+
+static int pf_setup_root(struct xe_device *xe)
+{
+ struct kobject *parent = &xe->drm.dev->kobj;
+ struct kobject *root;
+ int err;
+
+ root = create_xe_sriov_kobj(xe, PFID);
+ if (!root)
+ return pf_sysfs_error(xe, -ENOMEM, "root obj");
+
+ err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root);
+ if (err)
+ return pf_sysfs_error(xe, err, "root action");
+
+ err = kobject_init_and_add(root, &xe_sriov_dev_ktype, parent, "sriov_admin");
+ if (err)
+ return pf_sysfs_error(xe, err, "root init");
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, !xe->sriov.pf.sysfs.root);
+ xe->sriov.pf.sysfs.root = root;
+ return 0;
+}
+
+static int pf_setup_tree(struct xe_device *xe)
+{
+ unsigned int totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ struct kobject *root, *kobj;
+ unsigned int n;
+ int err;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ root = xe->sriov.pf.sysfs.root;
+
+ for (n = 0; n <= totalvfs; n++) {
+ kobj = create_xe_sriov_kobj(xe, VFID(n));
+ if (!kobj)
+ return pf_sysfs_error(xe, -ENOMEM, "tree obj");
+
+ err = devm_add_action_or_reset(xe->drm.dev, action_put_kobject, root);
+ if (err)
+ return pf_sysfs_error(xe, err, "tree action");
+
+ if (n)
+ err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype,
+ root, "vf%u", n);
+ else
+ err = kobject_init_and_add(kobj, &xe_sriov_vf_ktype,
+ root, "pf");
+ if (err)
+ return pf_sysfs_error(xe, err, "tree init");
+
+ xe_assert(xe, !xe->sriov.pf.vfs[n].kobj);
+ xe->sriov.pf.vfs[n].kobj = kobj;
+ }
+
+ return 0;
+}
+
+static void action_rm_device_link(void *arg)
+{
+ struct kobject *kobj = arg;
+
+ sysfs_remove_link(kobj, "device");
+}
+
+static int pf_link_pf_device(struct xe_device *xe)
+{
+ struct kobject *kobj = xe->sriov.pf.vfs[PFID].kobj;
+ int err;
+
+ err = sysfs_create_link(kobj, &xe->drm.dev->kobj, "device");
+ if (err)
+ return pf_sysfs_error(xe, err, "PF device link");
+
+ err = devm_add_action_or_reset(xe->drm.dev, action_rm_device_link, kobj);
+ if (err)
+ return pf_sysfs_error(xe, err, "PF unlink action");
+
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_sysfs_init() - Setup PF's SR-IOV sysfs tree.
+ * @xe: the PF &xe_device to setup sysfs
+ *
+ * This function will create additional nodes that will represent PF and VFs
+ * devices, each populated with SR-IOV Xe specific attributes.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_sriov_pf_sysfs_init(struct xe_device *xe)
+{
+ int err;
+
+ err = pf_setup_root(xe);
+ if (err)
+ return err;
+
+ err = pf_setup_tree(xe);
+ if (err)
+ return err;
+
+ err = pf_link_pf_device(xe);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * xe_sriov_pf_sysfs_link_vfs() - Add VF's links in SR-IOV sysfs tree.
+ * @xe: the &xe_device where to update sysfs
+ * @num_vfs: number of enabled VFs to link
+ *
+ * This function is specific for the PF driver.
+ *
+ * This function will add symbolic links between VFs represented in the SR-IOV
+ * sysfs tree maintained by the PF and enabled VF PCI devices.
+ *
+ * The @xe_sriov_pf_sysfs_unlink_vfs() shall be used to remove those links.
+ */
+void xe_sriov_pf_sysfs_link_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ unsigned int totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ struct pci_dev *pf_pdev = to_pci_dev(xe->drm.dev);
+ struct pci_dev *vf_pdev = NULL;
+ unsigned int n;
+ int err;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, num_vfs <= totalvfs);
+
+ for (n = 1; n <= num_vfs; n++) {
+ vf_pdev = xe_pci_sriov_get_vf_pdev(pf_pdev, VFID(n));
+ if (!vf_pdev)
+ return pf_sysfs_note(xe, -ENOENT, "VF link");
+
+ err = sysfs_create_link(xe->sriov.pf.vfs[VFID(n)].kobj,
+ &vf_pdev->dev.kobj, "device");
+
+ /* must balance xe_pci_sriov_get_vf_pdev() */
+ pci_dev_put(vf_pdev);
+
+ if (err)
+ return pf_sysfs_note(xe, err, "VF link");
+ }
+}
+
+/**
+ * xe_sriov_pf_sysfs_unlink_vfs() - Remove VF's links from SR-IOV sysfs tree.
+ * @xe: the &xe_device where to update sysfs
+ * @num_vfs: number of VFs to unlink
+ *
+ * This function shall be called only on the PF.
+ * This function will remove "device" links added by @xe_sriov_sysfs_link_vfs().
+ */
+void xe_sriov_pf_sysfs_unlink_vfs(struct xe_device *xe, unsigned int num_vfs)
+{
+ unsigned int n;
+
+ xe_assert(xe, IS_SRIOV_PF(xe));
+ xe_assert(xe, num_vfs <= xe_sriov_pf_get_totalvfs(xe));
+
+ for (n = 1; n <= num_vfs; n++)
+ sysfs_remove_link(xe->sriov.pf.vfs[VFID(n)].kobj, "device");
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h
new file mode 100644
index 000000000000..ae92ed1766e7
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_sysfs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_PF_SYSFS_H_
+#define _XE_SRIOV_PF_SYSFS_H_
+
+struct xe_device;
+
+int xe_sriov_pf_sysfs_init(struct xe_device *xe);
+
+void xe_sriov_pf_sysfs_link_vfs(struct xe_device *xe, unsigned int num_vfs);
+void xe_sriov_pf_sysfs_unlink_vfs(struct xe_device *xe, unsigned int num_vfs);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
index 956a88f9f213..b0253e1ae5da 100644
--- a/drivers/gpu/drm/xe/xe_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h
@@ -9,14 +9,24 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include "xe_guard.h"
+#include "xe_sriov_pf_migration_types.h"
+#include "xe_sriov_pf_provision_types.h"
#include "xe_sriov_pf_service_types.h"
+struct kobject;
+
/**
* struct xe_sriov_metadata - per-VF device level metadata
*/
struct xe_sriov_metadata {
+ /** @kobj: kobject representing VF in PF's SR-IOV sysfs tree. */
+ struct kobject *kobj;
+
/** @version: negotiated VF/PF ABI version */
struct xe_sriov_pf_service_version version;
+ /** @migration: migration state */
+ struct xe_sriov_migration_state migration;
};
/**
@@ -32,12 +42,27 @@ struct xe_device_pf {
/** @driver_max_vfs: Maximum number of VFs supported by the driver. */
u16 driver_max_vfs;
+ /** @guard_vfs_enabling: guards VFs enabling */
+ struct xe_guard guard_vfs_enabling;
+
/** @master_lock: protects all VFs configurations across GTs */
struct mutex master_lock;
+ /** @provision: device level provisioning data. */
+ struct xe_sriov_pf_provision provision;
+
+ /** @migration: device level migration data. */
+ struct xe_sriov_pf_migration migration;
+
/** @service: device level service data. */
struct xe_sriov_pf_service service;
+ /** @sysfs: device level sysfs data. */
+ struct {
+ /** @sysfs.root: the root kobject for all SR-IOV entries in sysfs. */
+ struct kobject *root;
+ } sysfs;
+
/** @vfs: metadata for all VFs. */
struct xe_sriov_metadata *vfs;
};
diff --git a/drivers/gpu/drm/xe/xe_sriov_printk.h b/drivers/gpu/drm/xe/xe_sriov_printk.h
index 117e1d541692..4c6b5c3d2190 100644
--- a/drivers/gpu/drm/xe/xe_sriov_printk.h
+++ b/drivers/gpu/drm/xe/xe_sriov_printk.h
@@ -1,22 +1,22 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2023 Intel Corporation
+ * Copyright © 2023-2025 Intel Corporation
*/
#ifndef _XE_SRIOV_PRINTK_H_
#define _XE_SRIOV_PRINTK_H_
-#include <drm/drm_print.h>
-
-#include "xe_device_types.h"
-#include "xe_sriov_types.h"
+#include "xe_printk.h"
#define xe_sriov_printk_prefix(xe) \
((xe)->sriov.__mode == XE_SRIOV_MODE_PF ? "PF: " : \
(xe)->sriov.__mode == XE_SRIOV_MODE_VF ? "VF: " : "")
+#define __XE_SRIOV_PRINTK_FMT(_xe, _fmt, _args...) \
+ "%s" _fmt, xe_sriov_printk_prefix(_xe), ##_args
+
#define xe_sriov_printk(xe, _level, fmt, ...) \
- drm_##_level(&(xe)->drm, "%s" fmt, xe_sriov_printk_prefix(xe), ##__VA_ARGS__)
+ xe_##_level((xe), __XE_SRIOV_PRINTK_FMT((xe), fmt, ##__VA_ARGS__))
#define xe_sriov_err(xe, fmt, ...) \
xe_sriov_printk((xe), err, fmt, ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index 26e243c28994..284ce37ca92d 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -3,19 +3,15 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <drm/drm_debugfs.h>
#include <drm/drm_managed.h>
-#include "xe_assert.h"
-#include "xe_device.h"
#include "xe_gt.h"
-#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
-#include "xe_guc_ct.h"
-#include "xe_pm.h"
-#include "xe_sriov.h"
+#include "xe_guc.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
-#include "xe_tile_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
/**
* DOC: VF restore procedure in PF KMD and VF KMD
@@ -124,185 +120,92 @@
* | | |
*/
-static bool vf_migration_supported(struct xe_device *xe)
+/**
+ * xe_sriov_vf_migration_supported - Report whether SR-IOV VF migration is
+ * supported or not.
+ * @xe: the &xe_device to check
+ *
+ * Returns: true if VF migration is supported, false otherwise.
+ */
+bool xe_sriov_vf_migration_supported(struct xe_device *xe)
{
- /*
- * TODO: Add conditions to allow specific platforms, when they're
- * supported at production quality.
- */
- return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+ xe_assert(xe, IS_SRIOV_VF(xe));
+ return !xe->sriov.vf.migration.disabled;
}
-static void migration_worker_func(struct work_struct *w);
-
/**
- * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data.
- * @xe: the &xe_device to initialize
+ * xe_sriov_vf_migration_disable - Turn off VF migration with given log message.
+ * @xe: the &xe_device instance.
+ * @fmt: format string for the log message, to be combined with following VAs.
*/
-void xe_sriov_vf_init_early(struct xe_device *xe)
+void xe_sriov_vf_migration_disable(struct xe_device *xe, const char *fmt, ...)
{
- INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+ struct va_format vaf;
+ va_list va_args;
- if (!vf_migration_supported(xe))
- xe_sriov_info(xe, "migration not supported by this module version\n");
-}
+ xe_assert(xe, IS_SRIOV_VF(xe));
-static bool gt_vf_post_migration_needed(struct xe_gt *gt)
-{
- return test_bit(gt->info.id, &gt_to_xe(gt)->sriov.vf.migration.gt_flags);
+ va_start(va_args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &va_args;
+ xe_sriov_notice(xe, "migration disabled: %pV\n", &vaf);
+ va_end(va_args);
+
+ xe->sriov.vf.migration.disabled = true;
}
-/*
- * Notify GuCs marked in flags about resource fixups apply finished.
- * @xe: the &xe_device struct instance
- * @gt_flags: flags marking to which GTs the notification shall be sent
- */
-static int vf_post_migration_notify_resfix_done(struct xe_device *xe, unsigned long gt_flags)
+static void vf_migration_init_early(struct xe_device *xe)
{
- struct xe_gt *gt;
- unsigned int id;
- int err = 0;
-
- for_each_gt(gt, xe, id) {
- if (!test_bit(id, &gt_flags))
- continue;
- /* skip asking GuC for RESFIX exit if new recovery request arrived */
- if (gt_vf_post_migration_needed(gt))
- continue;
- err = xe_gt_sriov_vf_notify_resfix_done(gt);
- if (err)
- break;
- clear_bit(id, &gt_flags);
- }
+ if (!xe_device_has_memirq(xe))
+ return xe_sriov_vf_migration_disable(xe, "requires memory-based IRQ support");
- if (gt_flags && !err)
- drm_dbg(&xe->drm, "another recovery imminent, skipped some notifications\n");
- return err;
}
-static int vf_get_next_migrated_gt_id(struct xe_device *xe)
+/**
+ * xe_sriov_vf_init_early - Initialize SR-IOV VF specific data.
+ * @xe: the &xe_device to initialize
+ */
+void xe_sriov_vf_init_early(struct xe_device *xe)
{
- struct xe_gt *gt;
- unsigned int id;
-
- for_each_gt(gt, xe, id) {
- if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags))
- return id;
- }
- return -1;
+ vf_migration_init_early(xe);
}
/**
- * Perform post-migration fixups on a single GT.
+ * xe_sriov_vf_init_late() - SR-IOV VF late initialization functions.
+ * @xe: the &xe_device to initialize
*
- * After migration, GuC needs to be re-queried for VF configuration to check
- * if it matches previous provisioning. Most of VF provisioning shall be the
- * same, except GGTT range, since GGTT is not virtualized per-VF. If GGTT
- * range has changed, we have to perform fixups - shift all GGTT references
- * used anywhere within the driver. After the fixups in this function succeed,
- * it is allowed to ask the GuC bound to this GT to continue normal operation.
+ * This function initializes code for CCS migration.
*
- * Returns: 0 if the operation completed successfully, or a negative error
- * code otherwise.
+ * Return: 0 on success or a negative error code on failure.
*/
-static int gt_vf_post_migration_fixups(struct xe_gt *gt)
-{
- s64 shift;
- int err;
-
- err = xe_gt_sriov_vf_query_config(gt);
- if (err)
- return err;
-
- shift = xe_gt_sriov_vf_ggtt_shift(gt);
- if (shift) {
- xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift);
- /* FIXME: add the recovery steps */
- xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
- }
- return 0;
-}
-
-static void vf_post_migration_recovery(struct xe_device *xe)
+int xe_sriov_vf_init_late(struct xe_device *xe)
{
- unsigned long fixed_gts = 0;
- int id, err;
-
- drm_dbg(&xe->drm, "migration recovery in progress\n");
- xe_pm_runtime_get(xe);
-
- if (!vf_migration_supported(xe)) {
- xe_sriov_err(xe, "migration not supported by this module version\n");
- err = -ENOTRECOVERABLE;
- goto fail;
- }
-
- while (id = vf_get_next_migrated_gt_id(xe), id >= 0) {
- struct xe_gt *gt = xe_device_get_gt(xe, id);
-
- err = gt_vf_post_migration_fixups(gt);
- if (err)
- goto fail;
-
- set_bit(id, &fixed_gts);
- }
-
- err = vf_post_migration_notify_resfix_done(xe, fixed_gts);
- if (err)
- goto fail;
-
- xe_pm_runtime_put(xe);
- drm_notice(&xe->drm, "migration recovery ended\n");
- return;
-fail:
- xe_pm_runtime_put(xe);
- drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err));
- xe_device_declare_wedged(xe);
+ return xe_sriov_vf_ccs_init(xe);
}
-static void migration_worker_func(struct work_struct *w)
+static int sa_info_vf_ccs(struct seq_file *m, void *data)
{
- struct xe_device *xe = container_of(w, struct xe_device,
- sriov.vf.migration.worker);
+ struct drm_info_node *node = m->private;
+ struct xe_device *xe = to_xe_device(node->minor->dev);
+ struct drm_printer p = drm_seq_file_printer(m);
- vf_post_migration_recovery(xe);
+ xe_sriov_vf_ccs_print(xe, &p);
+ return 0;
}
-/*
- * Check if post-restore recovery is coming on any of GTs.
- * @xe: the &xe_device struct instance
- *
- * Return: True if migration recovery worker will soon be running. Any worker currently
- * executing does not affect the result.
- */
-static bool vf_ready_to_recovery_on_any_gts(struct xe_device *xe)
-{
- struct xe_gt *gt;
- unsigned int id;
-
- for_each_gt(gt, xe, id) {
- if (test_bit(id, &xe->sriov.vf.migration.gt_flags))
- return true;
- }
- return false;
-}
+static const struct drm_info_list debugfs_list[] = {
+ { .name = "sa_info_vf_ccs", .show = sa_info_vf_ccs },
+};
/**
- * xe_sriov_vf_start_migration_recovery - Start VF migration recovery.
- * @xe: the &xe_device to start recovery on
+ * xe_sriov_vf_debugfs_register - Register VF debugfs attributes.
+ * @xe: the &xe_device
+ * @root: the root &dentry
*
- * This function shall be called only by VF.
+ * Prepare debugfs attributes exposed by the VF.
*/
-void xe_sriov_vf_start_migration_recovery(struct xe_device *xe)
+void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root)
{
- bool started;
-
- xe_assert(xe, IS_SRIOV_VF(xe));
-
- if (!vf_ready_to_recovery_on_any_gts(xe))
- return;
-
- started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker);
- drm_info(&xe->drm, "VF migration recovery %s\n", started ?
- "scheduled" : "already in progress");
+ drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list),
+ root, xe->drm.primary);
}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.h b/drivers/gpu/drm/xe/xe_sriov_vf.h
index 7b8622cff2b7..e967d4166a43 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.h
@@ -6,9 +6,15 @@
#ifndef _XE_SRIOV_VF_H_
#define _XE_SRIOV_VF_H_
+#include <linux/types.h>
+
+struct dentry;
struct xe_device;
void xe_sriov_vf_init_early(struct xe_device *xe);
-void xe_sriov_vf_start_migration_recovery(struct xe_device *xe);
+int xe_sriov_vf_init_late(struct xe_device *xe);
+bool xe_sriov_vf_migration_supported(struct xe_device *xe);
+void xe_sriov_vf_migration_disable(struct xe_device *xe, const char *fmt, ...);
+void xe_sriov_vf_debugfs_register(struct xe_device *xe, struct dentry *root);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
new file mode 100644
index 000000000000..797a4b866226
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.c
@@ -0,0 +1,480 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "instructions/xe_mi_commands.h"
+#include "instructions/xe_gpu_commands.h"
+#include "xe_bb.h"
+#include "xe_bo.h"
+#include "xe_device.h"
+#include "xe_exec_queue.h"
+#include "xe_exec_queue_types.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_guc.h"
+#include "xe_guc_submit.h"
+#include "xe_lrc.h"
+#include "xe_migrate.h"
+#include "xe_pm.h"
+#include "xe_sa.h"
+#include "xe_sriov_printk.h"
+#include "xe_sriov_vf.h"
+#include "xe_sriov_vf_ccs.h"
+#include "xe_sriov_vf_ccs_types.h"
+
+/**
+ * DOC: VF save/restore of compression Meta Data
+ *
+ * VF KMD registers two special contexts/LRCAs.
+ *
+ * Save Context/LRCA: contain necessary cmds+page table to trigger Meta data /
+ * compression control surface (Aka CCS) save in regular System memory in VM.
+ *
+ * Restore Context/LRCA: contain necessary cmds+page table to trigger Meta data /
+ * compression control surface (Aka CCS) Restore from regular System memory in
+ * VM to corresponding CCS pool.
+ *
+ * Below diagram explain steps needed for VF save/Restore of compression Meta Data::
+ *
+ * CCS Save CCS Restore VF KMD Guc BCS
+ * LRCA LRCA
+ * | | | | |
+ * | | | | |
+ * | Create Save LRCA | | |
+ * [ ]<----------------------------- [ ] | |
+ * | | | | |
+ * | | | | |
+ * | | | Register save LRCA | |
+ * | | | with Guc | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | Create restore LRCA | | |
+ * | [ ]<------------------[ ] | |
+ * | | | | |
+ * | | | Register restore LRCA | |
+ * | | | with Guc | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | | | |
+ * | | [ ]------------------------- | |
+ * | | [ ] Allocate main memory. | | |
+ * | | [ ] Allocate CCS memory. | | |
+ * | | [ ] Update Main memory & | | |
+ * [ ]<------------------------------[ ] CCS pages PPGTT + BB | | |
+ * | [ ]<------------------[ ] cmds to save & restore.| | |
+ * | | [ ]<------------------------ | |
+ * | | | | |
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VF Paused -------------------------------------
+ * | | | | |
+ * | | | | |
+ * | | | |Schedule |
+ * | | | |CCS Save |
+ * | | | | LRCA |
+ * | | | [ ]------>[ ]
+ * | | | | |
+ * | | | | |
+ * | | | |CCS save |
+ * | | | |completed|
+ * | | | [ ]<------[ ]
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VM Migrated -----------------------------------
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ---------------------------- VF Resumed ------------------------------------
+ * | | | | |
+ * | | | | |
+ * | | [ ]-------------- | |
+ * | | [ ] Fix up GGTT | | |
+ * | | [ ]<------------- | |
+ * | | | | |
+ * | | | | |
+ * | | | Notify VF_RESFIX_DONE | |
+ * | | [ ]--------------------------->[ ] |
+ * | | | | |
+ * | | | |Schedule |
+ * | | | |CCS |
+ * | | | |Restore |
+ * | | | |LRCA |
+ * | | | [ ]------>[ ]
+ * | | | | |
+ * | | | | |
+ * | | | |CCS |
+ * | | | |restore |
+ * | | | |completed|
+ * | | | [ ]<------[ ]
+ * | | | | |
+ * | | | | |
+ * | | | VF_RESFIX_DONE complete | |
+ * | | | notification | |
+ * | | [ ]<---------------------------[ ] |
+ * | | | | |
+ * | | | | |
+ * : : : : :
+ * ------------------------- Continue VM restore ------------------------------
+ */
+
+static u64 get_ccs_bb_pool_size(struct xe_device *xe)
+{
+ u64 sys_mem_size, ccs_mem_size, ptes, bb_pool_size;
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ sys_mem_size = si.totalram * si.mem_unit;
+ ccs_mem_size = div64_u64(sys_mem_size, NUM_BYTES_PER_CCS_BYTE(xe));
+ ptes = DIV_ROUND_UP_ULL(sys_mem_size + ccs_mem_size, XE_PAGE_SIZE);
+
+ /**
+ * We need below BB size to hold PTE mappings and some DWs for copy
+ * command. In reality, we need space for many copy commands. So, let
+ * us allocate double the calculated size which is enough to holds GPU
+ * instructions for the whole region.
+ */
+ bb_pool_size = ptes * sizeof(u32);
+
+ return round_up(bb_pool_size * 2, SZ_1M);
+}
+
+static int alloc_bb_pool(struct xe_tile *tile, struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_sa_manager *sa_manager;
+ u64 bb_pool_size;
+ int offset, err;
+
+ bb_pool_size = get_ccs_bb_pool_size(xe);
+ xe_sriov_info(xe, "Allocating %s CCS BB pool size = %lldMB\n",
+ ctx->ctx_id ? "Restore" : "Save", bb_pool_size / SZ_1M);
+
+ sa_manager = xe_sa_bo_manager_init(tile, bb_pool_size, SZ_16);
+
+ if (IS_ERR(sa_manager)) {
+ xe_sriov_err(xe, "Suballocator init failed with error: %pe\n",
+ sa_manager);
+ err = PTR_ERR(sa_manager);
+ return err;
+ }
+
+ offset = 0;
+ xe_map_memset(xe, &sa_manager->bo->vmap, offset, MI_NOOP,
+ bb_pool_size);
+
+ offset = bb_pool_size - sizeof(u32);
+ xe_map_wr(xe, &sa_manager->bo->vmap, offset, u32, MI_BATCH_BUFFER_END);
+
+ ctx->mem.ccs_bb_pool = sa_manager;
+
+ return 0;
+}
+
+static void ccs_rw_update_ring(struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ u64 addr = xe_sa_manager_gpu_addr(ctx->mem.ccs_bb_pool);
+ struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
+ u32 dw[10], i = 0;
+
+ /*
+ * XXX: Save/restore fixes — for some reason, the GuC only accepts the
+ * save/restore context if the LRC head pointer is zero. This is evident
+ * from repeated VF migrations failing when the LRC head pointer is
+ * non-zero.
+ */
+ lrc->ring.tail = 0;
+ xe_lrc_set_ring_head(lrc, 0);
+
+ dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ dw[i++] = MI_BATCH_BUFFER_START | XE_INSTR_NUM_DW(3);
+ dw[i++] = lower_32_bits(addr);
+ dw[i++] = upper_32_bits(addr);
+ dw[i++] = MI_NOOP;
+ dw[i++] = MI_NOOP;
+
+ xe_lrc_write_ring(lrc, dw, i * sizeof(u32));
+ xe_lrc_set_ring_tail(lrc, lrc->ring.tail);
+}
+
+/**
+ * xe_sriov_vf_ccs_rebase - Rebase GGTT addresses for CCS save / restore
+ * @xe: the &xe_device.
+ */
+void xe_sriov_vf_ccs_rebase(struct xe_device *xe)
+{
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ if (!IS_VF_CCS_READY(xe))
+ return;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ struct xe_sriov_vf_ccs_ctx *ctx =
+ &xe->sriov.vf.ccs.contexts[ctx_id];
+
+ ccs_rw_update_ring(ctx);
+ }
+}
+
+static int register_save_restore_context(struct xe_sriov_vf_ccs_ctx *ctx)
+{
+ int ctx_type;
+
+ switch (ctx->ctx_id) {
+ case XE_SRIOV_VF_CCS_READ_CTX:
+ ctx_type = GUC_CONTEXT_COMPRESSION_SAVE;
+ break;
+ case XE_SRIOV_VF_CCS_WRITE_CTX:
+ ctx_type = GUC_CONTEXT_COMPRESSION_RESTORE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ xe_guc_register_vf_exec_queue(ctx->mig_q, ctx_type);
+ return 0;
+}
+
+/**
+ * xe_sriov_vf_ccs_register_context - Register read/write contexts with guc.
+ * @xe: the &xe_device to register contexts on.
+ *
+ * This function registers read and write contexts with Guc. Re-registration
+ * is needed whenever resuming from pm runtime suspend.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_sriov_vf_ccs_register_context(struct xe_device *xe)
+{
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ int err;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ err = register_save_restore_context(ctx);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+/*
+ * Whether GuC requires CCS copy BBs for VF migration.
+ * @xe: the &xe_device instance.
+ *
+ * Only selected platforms require VF KMD to maintain CCS copy BBs and linked LRCAs.
+ *
+ * Return: true if VF driver must participate in the CCS migration, false otherwise.
+ */
+static bool vf_migration_ccs_bb_needed(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ return !IS_DGFX(xe) && xe_device_has_flat_ccs(xe);
+}
+
+/*
+ * Check for disable migration due to no CCS BBs support in GuC FW.
+ * @xe: the &xe_device instance.
+ *
+ * Performs late disable of VF migration feature in case GuC FW cannot support it.
+ *
+ * Returns: True if VF migration with CCS BBs is supported, false otherwise.
+ */
+static bool vf_migration_ccs_bb_support_check(struct xe_device *xe)
+{
+ struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_uc_fw_version guc_version;
+
+ xe_gt_sriov_vf_guc_versions(gt, NULL, &guc_version);
+ if (MAKE_GUC_VER_STRUCT(guc_version) < MAKE_GUC_VER(1, 23, 0)) {
+ xe_sriov_vf_migration_disable(xe,
+ "CCS migration requires GuC ABI >= 1.23 but only %u.%u found",
+ guc_version.major, guc_version.minor);
+ return false;
+ }
+
+ return true;
+}
+
+static void xe_sriov_vf_ccs_fini(void *arg)
+{
+ struct xe_sriov_vf_ccs_ctx *ctx = arg;
+ struct xe_lrc *lrc = xe_exec_queue_lrc(ctx->mig_q);
+
+ /*
+ * Make TAIL = HEAD in the ring so that no issues are seen if Guc
+ * submits this context to HW on VF pause after unbinding device.
+ */
+ xe_lrc_set_ring_tail(lrc, xe_lrc_ring_head(lrc));
+ xe_exec_queue_put(ctx->mig_q);
+}
+
+/**
+ * xe_sriov_vf_ccs_init - Setup LRCA for save & restore.
+ * @xe: the &xe_device to start recovery on
+ *
+ * This function shall be called only by VF. It initializes
+ * LRCA and suballocator needed for CCS save & restore.
+ *
+ * Return: 0 on success. Negative error code on failure.
+ */
+int xe_sriov_vf_ccs_init(struct xe_device *xe)
+{
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ struct xe_exec_queue *q;
+ u32 flags;
+ int err;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ if (!xe_sriov_vf_migration_supported(xe) ||
+ !vf_migration_ccs_bb_needed(xe) ||
+ !vf_migration_ccs_bb_support_check(xe))
+ return 0;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ ctx->ctx_id = ctx_id;
+
+ flags = EXEC_QUEUE_FLAG_KERNEL |
+ EXEC_QUEUE_FLAG_PERMANENT |
+ EXEC_QUEUE_FLAG_MIGRATE;
+ q = xe_exec_queue_create_bind(xe, tile, flags, 0);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
+ goto err_ret;
+ }
+ ctx->mig_q = q;
+
+ err = alloc_bb_pool(tile, ctx);
+ if (err)
+ goto err_free_queue;
+
+ ccs_rw_update_ring(ctx);
+
+ err = register_save_restore_context(ctx);
+ if (err)
+ goto err_free_queue;
+
+ err = devm_add_action_or_reset(xe->drm.dev,
+ xe_sriov_vf_ccs_fini,
+ ctx);
+ if (err)
+ goto err_ret;
+ }
+
+ xe->sriov.vf.ccs.initialized = 1;
+
+ return 0;
+
+err_free_queue:
+ xe_exec_queue_put(q);
+
+err_ret:
+ return err;
+}
+
+/**
+ * xe_sriov_vf_ccs_attach_bo - Insert CCS read write commands in the BO.
+ * @bo: the &buffer object to which batch buffer commands will be added.
+ *
+ * This function shall be called only by VF. It inserts the PTEs and copy
+ * command instructions in the BO by calling xe_migrate_ccs_rw_copy()
+ * function.
+ *
+ * Returns: 0 if successful, negative error code on failure.
+ */
+int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_sriov_vf_ccs_ctx *ctx;
+ struct xe_tile *tile;
+ struct xe_bb *bb;
+ int err = 0;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ tile = xe_device_get_root_tile(xe);
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb = bo->bb_ccs[ctx_id];
+ /* bb should be NULL here. Assert if not NULL */
+ xe_assert(xe, !bb);
+
+ ctx = &xe->sriov.vf.ccs.contexts[ctx_id];
+ err = xe_migrate_ccs_rw_copy(tile, ctx->mig_q, bo, ctx_id);
+ }
+ return err;
+}
+
+/**
+ * xe_sriov_vf_ccs_detach_bo - Remove CCS read write commands from the BO.
+ * @bo: the &buffer object from which batch buffer commands will be removed.
+ *
+ * This function shall be called only by VF. It removes the PTEs and copy
+ * command instructions from the BO. Make sure to update the BB with MI_NOOP
+ * before freeing.
+ *
+ * Returns: 0 if successful.
+ */
+int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+ struct xe_bb *bb;
+
+ xe_assert(xe, IS_VF_CCS_READY(xe));
+
+ if (!xe_bo_has_valid_ccs_bb(bo))
+ return 0;
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb = bo->bb_ccs[ctx_id];
+ if (!bb)
+ continue;
+
+ memset(bb->cs, MI_NOOP, bb->len * sizeof(u32));
+ xe_bb_free(bb, NULL);
+ bo->bb_ccs[ctx_id] = NULL;
+ }
+ return 0;
+}
+
+/**
+ * xe_sriov_vf_ccs_print - Print VF CCS details.
+ * @xe: the &xe_device
+ * @p: the &drm_printer
+ *
+ * This function is for VF use only.
+ */
+void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p)
+{
+ struct xe_sa_manager *bb_pool;
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ if (!IS_VF_CCS_READY(xe))
+ return;
+
+ xe_pm_runtime_get(xe);
+
+ for_each_ccs_rw_ctx(ctx_id) {
+ bb_pool = xe->sriov.vf.ccs.contexts[ctx_id].mem.ccs_bb_pool;
+ if (!bb_pool)
+ break;
+
+ drm_printf(p, "ccs %s bb suballoc info\n", ctx_id ? "write" : "read");
+ drm_printf(p, "-------------------------\n");
+ drm_suballoc_dump_debug_info(&bb_pool->base, p, xe_sa_manager_gpu_addr(bb_pool));
+ drm_puts(p, "\n");
+ }
+
+ xe_pm_runtime_put(xe);
+}
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
new file mode 100644
index 000000000000..f8ca6efce9ee
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_CCS_H_
+#define _XE_SRIOV_VF_CCS_H_
+
+#include "xe_device_types.h"
+#include "xe_sriov.h"
+#include "xe_sriov_vf_ccs_types.h"
+
+struct drm_printer;
+struct xe_device;
+struct xe_bo;
+
+int xe_sriov_vf_ccs_init(struct xe_device *xe);
+int xe_sriov_vf_ccs_attach_bo(struct xe_bo *bo);
+int xe_sriov_vf_ccs_detach_bo(struct xe_bo *bo);
+int xe_sriov_vf_ccs_register_context(struct xe_device *xe);
+void xe_sriov_vf_ccs_rebase(struct xe_device *xe);
+void xe_sriov_vf_ccs_print(struct xe_device *xe, struct drm_printer *p);
+
+static inline bool xe_sriov_vf_ccs_ready(struct xe_device *xe)
+{
+ xe_assert(xe, IS_SRIOV_VF(xe));
+ return xe->sriov.vf.ccs.initialized;
+}
+
+#define IS_VF_CCS_READY(xe) ({ \
+ struct xe_device *xe__ = (xe); \
+ IS_SRIOV_VF(xe__) && xe_sriov_vf_ccs_ready(xe__); \
+ })
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
new file mode 100644
index 000000000000..22c499943d2a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_ccs_types.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_SRIOV_VF_CCS_TYPES_H_
+#define _XE_SRIOV_VF_CCS_TYPES_H_
+
+#include <linux/types.h>
+
+#define for_each_ccs_rw_ctx(id__) \
+ for ((id__) = 0; (id__) < XE_SRIOV_VF_CCS_CTX_COUNT; (id__)++)
+
+enum xe_sriov_vf_ccs_rw_ctxs {
+ XE_SRIOV_VF_CCS_READ_CTX,
+ XE_SRIOV_VF_CCS_WRITE_CTX,
+ XE_SRIOV_VF_CCS_CTX_COUNT
+};
+
+struct xe_migrate;
+struct xe_sa_manager;
+
+/**
+ * struct xe_sriov_vf_ccs_ctx - VF CCS migration context data.
+ */
+struct xe_sriov_vf_ccs_ctx {
+ /** @ctx_id: Id to which context it belongs to */
+ enum xe_sriov_vf_ccs_rw_ctxs ctx_id;
+
+ /** @mig_q: exec queues used for migration */
+ struct xe_exec_queue *mig_q;
+
+ /** @mem: memory data */
+ struct {
+ /** @mem.ccs_bb_pool: Pool from which batch buffers are allocated. */
+ struct xe_sa_manager *ccs_bb_pool;
+ } mem;
+};
+
+/**
+ * struct xe_sriov_vf_ccs - The VF CCS migration support data.
+ */
+struct xe_sriov_vf_ccs {
+ /** @contexts: CCS read and write contexts for VF. */
+ struct xe_sriov_vf_ccs_ctx contexts[XE_SRIOV_VF_CCS_CTX_COUNT];
+
+ /** @initialized: Initialization of VF CCS is completed or not. */
+ bool initialized;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
index 8300416a6226..d5f72d667817 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
#include <linux/workqueue_types.h>
+#include "xe_sriov_vf_ccs_types.h"
+
/**
* struct xe_sriov_vf_relay_version - PF ABI version details.
*/
@@ -31,11 +33,15 @@ struct xe_device_vf {
/** @migration: VF Migration state data */
struct {
- /** @migration.worker: VF migration recovery worker */
- struct work_struct worker;
- /** @migration.gt_flags: Per-GT request flags for VF migration recovery */
- unsigned long gt_flags;
+ /**
+ * @migration.disabled: flag indicating if migration support
+ * was turned off due to missing prerequisites
+ */
+ bool disabled;
} migration;
+
+ /** @ccs: VF CCS state data */
+ struct xe_sriov_vf_ccs ccs;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c b/drivers/gpu/drm/xe/xe_sriov_vfio.c
new file mode 100644
index 000000000000..e9a7615bb5c5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/intel/xe_sriov_vfio.h>
+#include <linux/cleanup.h>
+
+#include "xe_pci.h"
+#include "xe_pm.h"
+#include "xe_sriov_pf_control.h"
+#include "xe_sriov_pf_helpers.h"
+#include "xe_sriov_pf_migration.h"
+
+struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev)
+{
+ return xe_pci_to_pf_device(pdev);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_get_pf, "xe-vfio-pci");
+
+bool xe_sriov_vfio_migration_supported(struct xe_device *xe)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+
+ return xe_sriov_pf_migration_supported(xe);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_migration_supported, "xe-vfio-pci");
+
+#define DEFINE_XE_SRIOV_VFIO_FUNCTION(_type, _func, _impl) \
+_type xe_sriov_vfio_##_func(struct xe_device *xe, unsigned int vfid) \
+{ \
+ if (!IS_SRIOV_PF(xe)) \
+ return -EPERM; \
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe)) \
+ return -EINVAL; \
+ \
+ guard(xe_pm_runtime_noresume)(xe); \
+ \
+ return xe_sriov_pf_##_impl(xe, vfid); \
+} \
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_##_func, "xe-vfio-pci")
+
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, wait_flr_done, control_wait_flr);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, suspend_device, control_pause_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_device, control_resume_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_enter, control_trigger_save_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_exit, control_finish_save_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_enter, control_trigger_restore_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_data_exit, control_finish_restore_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(int, error, control_stop_vf);
+DEFINE_XE_SRIOV_VFIO_FUNCTION(ssize_t, stop_copy_size, migration_size);
+
+ssize_t xe_sriov_vfio_data_read(struct xe_device *xe, unsigned int vfid,
+ char __user *buf, size_t len)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
+ return -EINVAL;
+
+ guard(xe_pm_runtime_noresume)(xe);
+
+ return xe_sriov_pf_migration_read(xe, vfid, buf, len);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_read, "xe-vfio-pci");
+
+ssize_t xe_sriov_vfio_data_write(struct xe_device *xe, unsigned int vfid,
+ const char __user *buf, size_t len)
+{
+ if (!IS_SRIOV_PF(xe))
+ return -EPERM;
+ if (vfid == PFID || vfid > xe_sriov_pf_num_vfs(xe))
+ return -EINVAL;
+
+ guard(xe_pm_runtime_noresume)(xe);
+
+ return xe_sriov_pf_migration_write(xe, vfid, buf, len);
+}
+EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_data_write, "xe-vfio-pci");
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c
index 8f7b0add2364..1662bfddd4bc 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.c
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.c
@@ -22,15 +22,18 @@
#define MAX_SCRATCH_MMIO 8
/**
- * DOC: Xe Boot Survivability
+ * DOC: Survivability Mode
*
- * Boot Survivability is a software based workflow for recovering a system in a failed boot state
+ * Survivability Mode is a software based workflow for recovering a system in a failed boot state
* Here system recoverability is concerned with recovering the firmware responsible for boot.
*
- * This is implemented by loading the driver with bare minimum (no drm card) to allow the firmware
- * to be flashed through mei and collect telemetry. The driver's probe flow is modified
- * such that it enters survivability mode when pcode initialization is incomplete and boot status
- * denotes a failure.
+ * Boot Survivability
+ * ===================
+ *
+ * Boot Survivability is implemented by loading the driver with bare minimum (no drm card) to allow
+ * the firmware to be flashed through mei driver and collect telemetry. The driver's probe flow is
+ * modified such that it enters survivability mode when pcode initialization is incomplete and boot
+ * status denotes a failure.
*
* Survivability mode can also be entered manually using the survivability mode attribute available
* through configfs which is beneficial in several usecases. It can be used to address scenarios
@@ -48,7 +51,7 @@
* Survivability mode is indicated by the below admin-only readable sysfs which provides additional
* debug information::
*
- * /sys/bus/pci/devices/<device>/surivability_mode
+ * /sys/bus/pci/devices/<device>/survivability_mode
*
* Capability Information:
* Provides boot status
@@ -58,6 +61,22 @@
* Provides history of previous failures
* Auxiliary Information
* Certain failures may have information in addition to postcode information
+ *
+ * Runtime Survivability
+ * =====================
+ *
+ * Certain runtime firmware errors can cause the device to enter a wedged state
+ * (:ref:`xe-device-wedging`) requiring a firmware flash to restore normal operation.
+ * Runtime Survivability Mode indicates that a firmware flash is necessary to recover the device and
+ * is indicated by the presence of survivability mode sysfs::
+ *
+ * /sys/bus/pci/devices/<device>/survivability_mode
+ *
+ * Survivability mode sysfs provides information about the type of survivability mode.
+ *
+ * When such errors occur, userspace is notified with the drm device wedged uevent and runtime
+ * survivability mode. User can then initiate a firmware flash using userspace tools like fwupd
+ * to restore device to normal operation.
*/
static u32 aux_history_offset(u32 reg_value)
@@ -123,6 +142,14 @@ static void log_survivability_info(struct pci_dev *pdev)
}
}
+static int check_boot_failure(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+
+ return survivability->boot_status == NON_CRITICAL_FAILURE ||
+ survivability->boot_status == CRITICAL_FAILURE;
+}
+
static ssize_t survivability_mode_show(struct device *dev,
struct device_attribute *attr, char *buff)
{
@@ -132,6 +159,12 @@ static ssize_t survivability_mode_show(struct device *dev,
struct xe_survivability_info *info = survivability->info;
int index = 0, count = 0;
+ count += sysfs_emit_at(buff, count, "Survivability mode type: %s\n",
+ survivability->type ? "Runtime" : "Boot");
+
+ if (!check_boot_failure(xe))
+ return count;
+
for (index = 0; index < MAX_SCRATCH_MMIO; index++) {
if (info[index].reg)
count += sysfs_emit_at(buff, count, "%s: 0x%x - 0x%x\n", info[index].name,
@@ -152,12 +185,11 @@ static void xe_survivability_mode_fini(void *arg)
sysfs_remove_file(&dev->kobj, &dev_attr_survivability_mode.attr);
}
-static int enable_survivability_mode(struct pci_dev *pdev)
+static int create_survivability_sysfs(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct xe_device *xe = pdev_to_xe_device(pdev);
- struct xe_survivability *survivability = &xe->survivability;
- int ret = 0;
+ int ret;
/* create survivability mode sysfs */
ret = sysfs_create_file(&dev->kobj, &dev_attr_survivability_mode.attr);
@@ -171,6 +203,20 @@ static int enable_survivability_mode(struct pci_dev *pdev)
if (ret)
return ret;
+ return 0;
+}
+
+static int enable_boot_survivability_mode(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct xe_device *xe = pdev_to_xe_device(pdev);
+ struct xe_survivability *survivability = &xe->survivability;
+ int ret = 0;
+
+ ret = create_survivability_sysfs(pdev);
+ if (ret)
+ return ret;
+
/* Make sure xe_heci_gsc_init() knows about survivability mode */
survivability->mode = true;
@@ -193,15 +239,36 @@ err:
return ret;
}
+static int init_survivability_mode(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct xe_survivability_info *info;
+
+ survivability->size = MAX_SCRATCH_MMIO;
+
+ info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ survivability->info = info;
+
+ populate_survivability_info(xe);
+
+ return 0;
+}
+
/**
- * xe_survivability_mode_is_enabled - check if survivability mode is enabled
+ * xe_survivability_mode_is_boot_enabled- check if boot survivability mode is enabled
* @xe: xe device instance
*
- * Returns true if in survivability mode, false otherwise
+ * Returns true if in boot survivability mode of type, else false
*/
-bool xe_survivability_mode_is_enabled(struct xe_device *xe)
+bool xe_survivability_mode_is_boot_enabled(struct xe_device *xe)
{
- return xe->survivability.mode;
+ struct xe_survivability *survivability = &xe->survivability;
+
+ return survivability->mode && survivability->type == XE_SURVIVABILITY_TYPE_BOOT;
}
/**
@@ -222,19 +289,10 @@ bool xe_survivability_mode_is_requested(struct xe_device *xe)
u32 data;
bool survivability_mode;
- if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe) || xe->info.platform < XE_BATTLEMAGE)
return false;
survivability_mode = xe_configfs_get_survivability_mode(pdev);
-
- if (xe->info.platform < XE_BATTLEMAGE) {
- if (survivability_mode) {
- dev_err(&pdev->dev, "Survivability Mode is not supported on this card\n");
- xe_configfs_clear_survivability_mode(pdev);
- }
- return false;
- }
-
/* Enable survivability mode if set via configfs */
if (survivability_mode)
return true;
@@ -242,44 +300,78 @@ bool xe_survivability_mode_is_requested(struct xe_device *xe)
data = xe_mmio_read32(mmio, PCODE_SCRATCH(0));
survivability->boot_status = REG_FIELD_GET(BOOT_STATUS, data);
- return survivability->boot_status == NON_CRITICAL_FAILURE ||
- survivability->boot_status == CRITICAL_FAILURE;
+ return check_boot_failure(xe);
}
/**
- * xe_survivability_mode_enable - Initialize and enable the survivability mode
+ * xe_survivability_mode_runtime_enable - Initialize and enable runtime survivability mode
* @xe: xe device instance
*
- * Initialize survivability information and enable survivability mode
+ * Initialize survivability information and enable runtime survivability mode.
+ * Runtime survivability mode is enabled when certain errors cause the device to be
+ * in non-recoverable state. The device is declared wedged with the appropriate
+ * recovery method and survivability mode sysfs exposed to userspace
*
- * Return: 0 if survivability mode is enabled or not requested; negative error
- * code otherwise.
+ * Return: 0 if runtime survivability mode is enabled, negative error code otherwise.
*/
-int xe_survivability_mode_enable(struct xe_device *xe)
+int xe_survivability_mode_runtime_enable(struct xe_device *xe)
{
struct xe_survivability *survivability = &xe->survivability;
- struct xe_survivability_info *info;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret;
- if (!xe_survivability_mode_is_requested(xe))
- return 0;
+ if (!IS_DGFX(xe) || IS_SRIOV_VF(xe) || xe->info.platform < XE_BATTLEMAGE) {
+ dev_err(&pdev->dev, "Runtime Survivability Mode not supported\n");
+ return -EINVAL;
+ }
- survivability->size = MAX_SCRATCH_MMIO;
+ ret = init_survivability_mode(xe);
+ if (ret)
+ return ret;
- info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
+ ret = create_survivability_sysfs(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to create survivability mode sysfs\n");
- survivability->info = info;
+ survivability->type = XE_SURVIVABILITY_TYPE_RUNTIME;
+ dev_err(&pdev->dev, "Runtime Survivability mode enabled\n");
- populate_survivability_info(xe);
+ xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_VENDOR);
+ xe_device_declare_wedged(xe);
+ dev_err(&pdev->dev, "Firmware flash required, Please refer to the userspace documentation for more details!\n");
+
+ return 0;
+}
+
+/**
+ * xe_survivability_mode_boot_enable - Initialize and enable boot survivability mode
+ * @xe: xe device instance
+ *
+ * Initialize survivability information and enable boot survivability mode
+ *
+ * Return: 0 if boot survivability mode is enabled or not requested, negative error
+ * code otherwise.
+ */
+int xe_survivability_mode_boot_enable(struct xe_device *xe)
+{
+ struct xe_survivability *survivability = &xe->survivability;
+ struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
+ int ret;
- /* Only log debug information and exit if it is a critical failure */
+ if (!xe_survivability_mode_is_requested(xe))
+ return 0;
+
+ ret = init_survivability_mode(xe);
+ if (ret)
+ return ret;
+
+ /* Log breadcrumbs but do not enter survivability mode for Critical boot errors */
if (survivability->boot_status == CRITICAL_FAILURE) {
log_survivability_info(pdev);
return -ENXIO;
}
- return enable_survivability_mode(pdev);
+ survivability->type = XE_SURVIVABILITY_TYPE_BOOT;
+
+ return enable_boot_survivability_mode(pdev);
}
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h
index 02231c2bf008..1cc94226aa82 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode.h
@@ -10,8 +10,9 @@
struct xe_device;
-int xe_survivability_mode_enable(struct xe_device *xe);
-bool xe_survivability_mode_is_enabled(struct xe_device *xe);
+int xe_survivability_mode_boot_enable(struct xe_device *xe);
+int xe_survivability_mode_runtime_enable(struct xe_device *xe);
+bool xe_survivability_mode_is_boot_enabled(struct xe_device *xe);
bool xe_survivability_mode_is_requested(struct xe_device *xe);
#endif /* _XE_SURVIVABILITY_MODE_H_ */
diff --git a/drivers/gpu/drm/xe/xe_survivability_mode_types.h b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
index 19d433e253df..cd65a5d167c9 100644
--- a/drivers/gpu/drm/xe/xe_survivability_mode_types.h
+++ b/drivers/gpu/drm/xe/xe_survivability_mode_types.h
@@ -9,6 +9,11 @@
#include <linux/limits.h>
#include <linux/types.h>
+enum xe_survivability_type {
+ XE_SURVIVABILITY_TYPE_BOOT,
+ XE_SURVIVABILITY_TYPE_RUNTIME,
+};
+
struct xe_survivability_info {
char name[NAME_MAX];
u32 reg;
@@ -30,6 +35,9 @@ struct xe_survivability {
/** @mode: boolean to indicate survivability mode */
bool mode;
+
+ /** @type: survivability type */
+ enum xe_survivability_type type;
};
#endif /* _XE_SURVIVABILITY_MODE_TYPES_H_ */
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index a7ff5975873f..55c5a0eb82e1 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -6,8 +6,8 @@
#include <drm/drm_drv.h>
#include "xe_bo.h"
+#include "xe_exec_queue_types.h"
#include "xe_gt_stats.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
#include "xe_pm.h"
@@ -17,6 +17,7 @@
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
+#include "xe_vram_types.h"
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
@@ -25,9 +26,9 @@ static bool xe_svm_range_in_vram(struct xe_svm_range *range)
* memory.
*/
- struct drm_gpusvm_range_flags flags = {
+ struct drm_gpusvm_pages_flags flags = {
/* Pairs with WRITE_ONCE in drm_gpusvm.c */
- .__flags = READ_ONCE(range->base.flags.__flags),
+ .__flags = READ_ONCE(range->base.pages.flags.__flags),
};
return flags.has_devmem_pages;
@@ -49,15 +50,15 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-#define range_debug(r__, operaton__) \
+#define range_debug(r__, operation__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
"start=0x%014lx, end=0x%014lx, size=%lu", \
- (operaton__), range_to_vm(&(r__)->base)->usm.asid, \
+ (operation__), range_to_vm(&(r__)->base)->usm.asid, \
(r__)->base.gpusvm, \
xe_svm_range_in_vram((r__)) ? 1 : 0, \
xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
- (r__)->base.notifier_seq, \
+ (r__)->base.pages.notifier_seq, \
xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
xe_svm_range_size((r__)))
@@ -66,11 +67,6 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
range_debug(range, operation);
}
-static void *xe_svm_devm_owner(struct xe_device *xe)
-{
- return xe;
-}
-
static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
{
@@ -108,8 +104,12 @@ xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
&vm->svm.garbage_collector.range_list);
spin_unlock(&vm->svm.garbage_collector.lock);
- queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq,
- &vm->svm.garbage_collector.work);
+ queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work);
+}
+
+static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
+{
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
}
static u8
@@ -128,7 +128,7 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
range_debug(range, "NOTIFIER");
/* Skip if already unmapped or if no binding exist */
- if (range->base.flags.unmapped || !range->tile_present)
+ if (range->base.pages.flags.unmapped || !range->tile_present)
return 0;
range_debug(range, "NOTIFIER - EXECUTE");
@@ -144,13 +144,19 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
*/
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
- tile_mask |= BIT(id);
/*
* WRITE_ONCE pairs with READ_ONCE in
* xe_vm_has_valid_gpu_mapping()
*/
WRITE_ONCE(range->tile_invalidated,
range->tile_invalidated | BIT(id));
+
+ if (!(tile_mask & BIT(id))) {
+ xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
+ if (tile->media_gt)
+ xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
+ tile_mask |= BIT(id);
+ }
}
return tile_mask;
@@ -170,6 +176,24 @@ xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
mmu_range);
}
+static s64 xe_svm_stats_ktime_us_delta(ktime_t start)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ?
+ ktime_us_delta(ktime_get(), start) : 0;
+}
+
+static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
+{
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
+}
+
+static ktime_t xe_svm_stats_ktime_get(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) ? ktime_get() : 0;
+}
+
static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_notifier *notifier,
const struct mmu_notifier_range *mmu_range)
@@ -177,8 +201,10 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
struct drm_gpusvm_range *r, *first;
+ struct xe_tile *tile;
+ ktime_t start = xe_svm_stats_ktime_get();
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
- u8 tile_mask = 0;
+ u8 tile_mask = 0, id;
long err;
xe_svm_assert_in_notifier(vm);
@@ -224,13 +250,20 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
WARN_ON_ONCE(err);
range_notifier_event_end:
r = first;
drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
xe_svm_range_notifier_event_end(vm, r, mmu_range);
+ for_each_tile(tile, xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
+ if (tile->media_gt)
+ xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
+ }
+ }
}
static int __xe_svm_garbage_collector(struct xe_vm *vm,
@@ -252,24 +285,78 @@ static int __xe_svm_garbage_collector(struct xe_vm *vm,
return 0;
}
+static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 range_end)
+{
+ struct xe_vma *vma;
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ },
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ };
+ int err = 0;
+
+ vma = xe_vm_find_vma_by_addr(vm, range_start);
+ if (!vma)
+ return -EINVAL;
+
+ if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
+ drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
+ return 0;
+ }
+
+ if (xe_vma_has_default_mem_attrs(vma))
+ return 0;
+
+ vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
+ xe_vma_start(vma), xe_vma_end(vma));
+
+ if (xe_vma_start(vma) == range_start && xe_vma_end(vma) == range_end) {
+ default_attr.pat_index = vma->attr.default_pat_index;
+ default_attr.default_pat_index = vma->attr.default_pat_index;
+ vma->attr = default_attr;
+ } else {
+ vm_dbg(&vm->xe->drm, "Split VMA start=0x%016llx, vma_end=0x%016llx",
+ range_start, range_end);
+ err = xe_vm_alloc_cpu_addr_mirror_vma(vm, range_start, range_end - range_start);
+ if (err) {
+ drm_warn(&vm->xe->drm, "VMA SPLIT failed: %pe\n", ERR_PTR(err));
+ xe_vm_kill(vm, true);
+ return err;
+ }
+ }
+
+ /*
+ * On call from xe_svm_handle_pagefault original VMA might be changed
+ * signal this to lookup for VMA again.
+ */
+ return -EAGAIN;
+}
+
static int xe_svm_garbage_collector(struct xe_vm *vm)
{
struct xe_svm_range *range;
- int err;
+ u64 range_start;
+ u64 range_end;
+ int err, ret = 0;
lockdep_assert_held_write(&vm->lock);
if (xe_vm_is_closed_or_banned(vm))
return -ENOENT;
- spin_lock(&vm->svm.garbage_collector.lock);
for (;;) {
+ spin_lock(&vm->svm.garbage_collector.lock);
range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
typeof(*range),
garbage_collector_link);
if (!range)
break;
+ range_start = xe_svm_range_start(range);
+ range_end = xe_svm_range_end(range);
+
list_del(&range->garbage_collector_link);
spin_unlock(&vm->svm.garbage_collector.lock);
@@ -282,11 +369,17 @@ static int xe_svm_garbage_collector(struct xe_vm *vm)
return err;
}
- spin_lock(&vm->svm.garbage_collector.lock);
+ err = xe_svm_range_set_default_attr(vm, range_start, range_end);
+ if (err) {
+ if (err == -EAGAIN)
+ ret = -EAGAIN;
+ else
+ return err;
+ }
}
spin_unlock(&vm->svm.garbage_collector.lock);
- return 0;
+ return ret;
}
static void xe_svm_garbage_collector_work_func(struct work_struct *w)
@@ -306,21 +399,15 @@ static struct xe_vram_region *page_to_vr(struct page *page)
return container_of(page_pgmap(page), struct xe_vram_region, pagemap);
}
-static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
-{
- return container_of(vr, struct xe_tile, mem.vram);
-}
-
static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
struct page *page)
{
u64 dpa;
- struct xe_tile *tile = vr_to_tile(vr);
u64 pfn = page_to_pfn(page);
u64 offset;
- xe_tile_assert(tile, is_device_private_page(page));
- xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+ xe_assert(vr->xe, is_device_private_page(page));
+ xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= vr->hpa_base);
offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
dpa = vr->dpa_base + offset;
@@ -333,17 +420,74 @@ enum xe_svm_copy_dir {
XE_SVM_COPY_TO_SRAM,
};
-static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
+static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
+ const enum xe_svm_copy_dir dir,
+ int kb)
+{
+ if (dir == XE_SVM_COPY_TO_VRAM)
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
+ else
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
+}
+
+static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
+ const enum xe_svm_copy_dir dir,
+ unsigned long npages,
+ ktime_t start)
+{
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start);
+
+ if (dir == XE_SVM_COPY_TO_VRAM) {
+ switch (npages) {
+ case 1:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
+ us_delta);
+ break;
+ case 16:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
+ us_delta);
+ break;
+ case 512:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
+ us_delta);
+ break;
+ }
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
+ us_delta);
+ } else {
+ switch (npages) {
+ case 1:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
+ us_delta);
+ break;
+ case 16:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
+ us_delta);
+ break;
+ case 512:
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
+ us_delta);
+ break;
+ }
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
+ us_delta);
+ }
+}
+
+static int xe_svm_copy(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages, const enum xe_svm_copy_dir dir)
{
struct xe_vram_region *vr = NULL;
- struct xe_tile *tile;
+ struct xe_gt *gt = NULL;
+ struct xe_device *xe;
struct dma_fence *fence = NULL;
unsigned long i;
#define XE_VRAM_ADDR_INVALID ~0x0ull
u64 vram_addr = XE_VRAM_ADDR_INVALID;
int err = 0, pos = 0;
bool sram = dir == XE_SVM_COPY_TO_SRAM;
+ ktime_t start = xe_svm_stats_ktime_get();
/*
* This flow is complex: it locates physically contiguous device pages,
@@ -365,12 +509,13 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
last = (i + 1) == npages;
/* No CPU page and no device pages queue'd to copy */
- if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID)
+ if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
continue;
if (!vr && spage) {
vr = page_to_vr(spage);
- tile = vr_to_tile(vr);
+ gt = xe_migrate_exec_queue(vr->migrate)->gt;
+ xe = vr->xe;
}
XE_WARN_ON(spage && page_to_vr(spage) != vr);
@@ -379,7 +524,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
* first device page, check if physical contiguous on subsequent
* device pages.
*/
- if (dma_addr[i] && spage) {
+ if (pagemap_addr[i].addr && spage) {
__vram_addr = xe_vram_region_page_to_dpa(vr, spage);
if (vram_addr == XE_VRAM_ADDR_INVALID) {
vram_addr = __vram_addr;
@@ -387,6 +532,14 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
}
match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
+ /* Expected with contiguous memory */
+ xe_assert(vr->xe, match);
+
+ if (pagemap_addr[i].order) {
+ i += NR_PAGES(pagemap_addr[i].order) - 1;
+ chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
+ last = (i + 1) == npages;
+ }
}
/*
@@ -401,21 +554,26 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
int incr = (match && last) ? 1 : 0;
if (vram_addr != XE_VRAM_ADDR_INVALID) {
+ xe_svm_copy_kb_stats_incr(gt, dir,
+ (i - pos + incr) *
+ (PAGE_SIZE / SZ_1K));
if (sram) {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
- vram_addr, (u64)dma_addr[pos], i - pos + incr);
- __fence = xe_migrate_from_vram(tile->migrate,
+ vram_addr,
+ (u64)pagemap_addr[pos].addr, i - pos + incr);
+ __fence = xe_migrate_from_vram(vr->migrate,
i - pos + incr,
vram_addr,
- dma_addr + pos);
+ &pagemap_addr[pos]);
} else {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
- (u64)dma_addr[pos], vram_addr, i - pos + incr);
- __fence = xe_migrate_to_vram(tile->migrate,
+ (u64)pagemap_addr[pos].addr, vram_addr,
+ i - pos + incr);
+ __fence = xe_migrate_to_vram(vr->migrate,
i - pos + incr,
- dma_addr + pos,
+ &pagemap_addr[pos],
vram_addr);
}
if (IS_ERR(__fence)) {
@@ -428,7 +586,7 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
}
/* Setup physical address of next device page */
- if (dma_addr[i] && spage) {
+ if (pagemap_addr[i].addr && spage) {
vram_addr = __vram_addr;
pos = i;
} else {
@@ -437,19 +595,21 @@ static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr,
/* Extra mismatched device page, copy it */
if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
+ xe_svm_copy_kb_stats_incr(gt, dir,
+ (PAGE_SIZE / SZ_1K));
if (sram) {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
- vram_addr, (u64)dma_addr[pos], 1);
- __fence = xe_migrate_from_vram(tile->migrate, 1,
+ vram_addr, (u64)pagemap_addr[pos].addr, 1);
+ __fence = xe_migrate_from_vram(vr->migrate, 1,
vram_addr,
- dma_addr + pos);
+ &pagemap_addr[pos]);
} else {
- vm_dbg(&tile->xe->drm,
+ vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
- (u64)dma_addr[pos], vram_addr, 1);
- __fence = xe_migrate_to_vram(tile->migrate, 1,
- dma_addr + pos,
+ (u64)pagemap_addr[pos].addr, vram_addr, 1);
+ __fence = xe_migrate_to_vram(vr->migrate, 1,
+ &pagemap_addr[pos],
vram_addr);
}
if (IS_ERR(__fence)) {
@@ -470,21 +630,31 @@ err_out:
dma_fence_put(fence);
}
+ /*
+ * XXX: We can't derive the GT here (or anywhere in this functions, but
+ * compute always uses the primary GT so accumulate stats on the likely
+ * GT of the fault.
+ */
+ if (gt)
+ xe_svm_copy_us_stats_incr(gt, dir, npages, start);
+
return err;
#undef XE_MIGRATE_CHUNK_SIZE
#undef XE_VRAM_ADDR_INVALID
}
-static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr,
+static int xe_svm_copy_to_devmem(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages)
{
- return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
}
-static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr,
+static int xe_svm_copy_to_ram(struct page **pages,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages)
{
- return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
}
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
@@ -506,9 +676,9 @@ static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
return PHYS_PFN(offset + vr->hpa_base);
}
-static struct drm_buddy *tile_to_buddy(struct xe_tile *tile)
+static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
{
- return &tile->mem.vram.ttm.mm;
+ return &vram->ttm.mm;
}
static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
@@ -522,8 +692,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocati
list_for_each_entry(block, blocks, link) {
struct xe_vram_region *vr = block->private;
- struct xe_tile *tile = vr_to_tile(vr);
- struct drm_buddy *buddy = tile_to_buddy(tile);
+ struct drm_buddy *buddy = vram_to_buddy(vr);
u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block));
int i;
@@ -567,22 +736,25 @@ int xe_svm_init(struct xe_vm *vm)
{
int err;
- spin_lock_init(&vm->svm.garbage_collector.lock);
- INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
- INIT_WORK(&vm->svm.garbage_collector.work,
- xe_svm_garbage_collector_work_func);
-
- err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
- current->mm, xe_svm_devm_owner(vm->xe), 0,
- vm->size, xe_modparam.svm_notifier_size * SZ_1M,
- &gpusvm_ops, fault_chunk_sizes,
- ARRAY_SIZE(fault_chunk_sizes));
- if (err)
- return err;
-
- drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+ if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
+ spin_lock_init(&vm->svm.garbage_collector.lock);
+ INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
+ INIT_WORK(&vm->svm.garbage_collector.work,
+ xe_svm_garbage_collector_work_func);
+
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
+ current->mm, 0, vm->size,
+ xe_modparam.svm_notifier_size * SZ_1M,
+ &gpusvm_ops, fault_chunk_sizes,
+ ARRAY_SIZE(fault_chunk_sizes));
+ drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);
+ } else {
+ err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
+ &vm->xe->drm, NULL, 0, 0, 0, NULL,
+ NULL, 0);
+ }
- return 0;
+ return err;
}
/**
@@ -653,7 +825,7 @@ bool xe_svm_range_validate(struct xe_vm *vm,
xe_svm_notifier_lock(vm);
ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
- (devmem_preferred == range->base.flags.has_devmem_pages);
+ (devmem_preferred == range->base.pages.flags.has_devmem_pages);
xe_svm_notifier_unlock(vm);
@@ -683,66 +855,57 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *v
}
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
-{
- return &tile->mem.vram;
-}
-
static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms)
{
- struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
- struct xe_device *xe = tile_to_xe(tile);
+ struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
- struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
+ struct xe_validation_ctx vctx;
struct list_head *blocks;
+ struct drm_exec exec;
struct xe_bo *bo;
- ktime_t time_end = 0;
- int err, idx;
+ int err = 0, idx;
if (!drm_dev_enter(&xe->drm, &idx))
return -ENODEV;
xe_pm_runtime_get(xe);
- retry:
- bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
- ttm_bo_type_device,
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_CPU_ADDR_MIRROR);
- if (IS_ERR(bo)) {
- err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &time_end))
- goto retry;
- goto out_pm_put;
- }
-
- drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
- &dpagemap_devmem_ops,
- &tile->mem.vram.dpagemap,
- end - start);
-
- blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
- list_for_each_entry(block, blocks, link)
- block->private = vr;
+ xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
+ bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
+ ttm_bo_type_device,
+ (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
+ XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
+ drm_exec_retry_on_contention(&exec);
+ if (IS_ERR(bo)) {
+ err = PTR_ERR(bo);
+ xe_validation_retry_on_oom(&vctx, &err);
+ break;
+ }
- xe_bo_get(bo);
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
+ &dpagemap_devmem_ops, dpagemap, end - start);
- /* Ensure the device has a pm ref while there are device pages active. */
- xe_pm_runtime_get_noresume(xe);
- err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
- start, end, timeslice_ms,
- xe_svm_devm_owner(xe));
- if (err)
- xe_svm_devmem_release(&bo->devmem_allocation);
+ blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
+ list_for_each_entry(block, blocks, link)
+ block->private = vr;
- xe_bo_unlock(bo);
- xe_bo_put(bo);
+ xe_bo_get(bo);
-out_pm_put:
+ /* Ensure the device has a pm ref while there are device pages active. */
+ xe_pm_runtime_get_noresume(xe);
+ err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
+ start, end, timeslice_ms,
+ xe_svm_devm_owner(xe));
+ if (err)
+ xe_svm_devmem_release(&bo->devmem_allocation);
+ xe_bo_unlock(bo);
+ xe_bo_put(bo);
+ }
xe_pm_runtime_put(xe);
drm_dev_exit(idx);
@@ -772,17 +935,17 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
- if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
+ if (!range->base.pages.flags.migrate_devmem || !preferred_region_is_vram)
return false;
xe_assert(vm->xe, IS_DGFX(vm->xe));
- if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ if (xe_svm_range_in_vram(range)) {
drm_info(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
- if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
@@ -790,40 +953,78 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
return true;
}
-/**
- * xe_svm_handle_pagefault() - SVM handle page fault
- * @vm: The VM.
- * @vma: The CPU address mirror VMA.
- * @gt: The gt upon the fault occurred.
- * @fault_addr: The GPU fault address.
- * @atomic: The fault atomic access bit.
- *
- * Create GPU bindings for a SVM page fault. Optionally migrate to device
- * memory.
- *
- * Return: 0 on success, negative error code on error.
- */
-int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
- struct xe_gt *gt, u64 fault_addr,
- bool atomic)
+#define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
+static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
+ struct xe_svm_range *range) \
+{ \
+ switch (xe_svm_range_size(range)) { \
+ case SZ_4K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
+ break; \
+ case SZ_64K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
+ break; \
+ case SZ_2M: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
+ break; \
+ } \
+} \
+
+DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
+DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
+DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)
+
+#define DECL_SVM_RANGE_US_STATS(elem, stat) \
+static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
+ struct xe_svm_range *range, \
+ ktime_t start) \
+{ \
+ s64 us_delta = xe_svm_stats_ktime_us_delta(start); \
+\
+ switch (xe_svm_range_size(range)) { \
+ case SZ_4K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
+ us_delta); \
+ break; \
+ case SZ_64K: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
+ us_delta); \
+ break; \
+ case SZ_2M: \
+ xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
+ us_delta); \
+ break; \
+ } \
+} \
+
+DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
+DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
+DECL_SVM_RANGE_US_STATS(bind, BIND)
+DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)
+
+static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool need_vram)
{
+ int devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct drm_gpusvm_ctx ctx = {
.read_only = xe_vma_read_only(vma),
- .devmem_possible = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .check_pages_threshold = IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
- .devmem_only = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP),
- .timeslice_ms = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ?
+ .devmem_possible = devmem_possible,
+ .check_pages_threshold = devmem_possible ? SZ_64K : 0,
+ .devmem_only = need_vram && devmem_possible,
+ .timeslice_ms = need_vram && devmem_possible ?
vm->xe->atomic_svm_timeslice_ms : 0,
+ .device_private_page_owner = xe_svm_devm_owner(vm->xe),
};
+ struct xe_validation_ctx vctx;
+ struct drm_exec exec;
struct xe_svm_range *range;
struct dma_fence *fence;
+ struct drm_pagemap *dpagemap;
struct xe_tile *tile = gt_to_tile(gt);
int migrate_try_count = ctx.devmem_only ? 3 : 1;
- ktime_t end = 0;
+ ktime_t start = xe_svm_stats_ktime_get(), bind_start, get_pages_start;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -837,29 +1038,58 @@ retry:
if (err)
return err;
+ dpagemap = xe_vma_resolve_pagemap(vma, tile);
+ if (!dpagemap && !ctx.devmem_only)
+ ctx.device_private_page_owner = NULL;
range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
if (IS_ERR(range))
return PTR_ERR(range);
- if (ctx.devmem_only && !range->base.flags.migrate_devmem)
- return -EACCES;
+ xe_svm_range_fault_count_stats_incr(gt, range);
- if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
- return 0;
+ if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
+ err = -EACCES;
+ goto out;
+ }
+
+ if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) {
+ xe_svm_range_valid_fault_count_stats_incr(gt, range);
+ range_debug(range, "PAGE FAULT - VALID");
+ goto out;
+ }
range_debug(range, "PAGE FAULT");
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
+ xe_svm_range_needs_migrate_to_vram(range, vma, !!dpagemap || ctx.devmem_only)) {
+ ktime_t migrate_start = xe_svm_stats_ktime_get();
+
+ /* TODO : For multi-device dpagemap will be used to find the
+ * remote tile and remote device. Will need to modify
+ * xe_svm_alloc_vram to use dpagemap for future multi-device
+ * support.
+ */
+ xe_svm_range_migrate_count_stats_incr(gt, range);
err = xe_svm_alloc_vram(tile, range, &ctx);
+ xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
if (migrate_try_count || !ctx.devmem_only) {
drm_dbg(&vm->xe->drm,
"VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
vm->usm.asid, ERR_PTR(err));
- goto retry;
+
+ /*
+ * In the devmem-only case, mixed mappings may
+ * be found. The get_pages function will fix
+ * these up to a single location, allowing the
+ * page fault handler to make forward progress.
+ */
+ if (ctx.devmem_only)
+ goto get_pages;
+ else
+ goto retry;
} else {
drm_err(&vm->xe->drm,
"VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
@@ -869,6 +1099,9 @@ retry:
}
}
+get_pages:
+ get_pages_start = xe_svm_stats_ktime_get();
+
range_debug(range, "GET PAGES");
err = xe_svm_range_get_pages(vm, range, &ctx);
/* Corner where CPU mappings have changed */
@@ -888,37 +1121,89 @@ retry:
}
if (err) {
range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
- goto err_out;
+ goto out;
}
+ xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
range_debug(range, "PAGE FAULT - BIND");
-retry_bind:
- xe_vm_lock(vm, false);
- fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
- if (IS_ERR(fence)) {
- xe_vm_unlock(vm);
- err = PTR_ERR(fence);
- if (err == -EAGAIN) {
- ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
- range_debug(range, "PAGE FAULT - RETRY BIND");
- goto retry;
+ bind_start = xe_svm_stats_ktime_get();
+ xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
+
+ xe_vm_set_validation_exec(vm, &exec);
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ xe_vm_set_validation_exec(vm, NULL);
+ if (IS_ERR(fence)) {
+ drm_exec_retry_on_contention(&exec);
+ err = PTR_ERR(fence);
+ xe_validation_retry_on_oom(&vctx, &err);
+ xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
+ break;
}
- if (xe_vm_validate_should_retry(NULL, err, &end))
- goto retry_bind;
- goto err_out;
}
- xe_vm_unlock(vm);
+ if (err)
+ goto err_out;
dma_fence_wait(fence, false);
dma_fence_put(fence);
+ xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
+
+out:
+ xe_svm_range_fault_us_stats_incr(gt, range, start);
+ return 0;
err_out:
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
+ }
return err;
}
/**
+ * xe_svm_handle_pagefault() - SVM handle page fault
+ * @vm: The VM.
+ * @vma: The CPU address mirror VMA.
+ * @gt: The gt upon the fault occurred.
+ * @fault_addr: The GPU fault address.
+ * @atomic: The fault atomic access bit.
+ *
+ * Create GPU bindings for a SVM page fault. Optionally migrate to device
+ * memory.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
+ struct xe_gt *gt, u64 fault_addr,
+ bool atomic)
+{
+ int need_vram, ret;
+retry:
+ need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
+ if (need_vram < 0)
+ return need_vram;
+
+ ret = __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
+ need_vram ? true : false);
+ if (ret == -EAGAIN) {
+ /*
+ * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
+ * may have been split by xe_svm_range_set_default_attr.
+ */
+ vma = xe_vm_find_vma_by_addr(vm, fault_addr);
+ if (!vma)
+ return -EINVAL;
+
+ goto retry;
+ }
+ return ret;
+}
+
+/**
* xe_svm_has_mapping() - SVM has mappings
* @vm: The VM.
* @start: Start address.
@@ -934,6 +1219,41 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
}
/**
+ * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
+ * @vm: The VM
+ * @start: start addr
+ * @end: end addr
+ *
+ * This function UNMAPS svm ranges if start or end address are inside them.
+ */
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier, *next;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *range, *__next;
+
+ drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
+ if (start > drm_gpusvm_range_start(range) ||
+ end < drm_gpusvm_range_end(range)) {
+ if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
+ drm_gpusvm_range_get(range);
+ __xe_svm_garbage_collector(vm, to_xe_range(range));
+ if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
+ spin_lock(&vm->svm.garbage_collector.lock);
+ list_del(&to_xe_range(range)->garbage_collector_link);
+ spin_unlock(&vm->svm.garbage_collector.lock);
+ }
+ drm_gpusvm_range_put(range);
+ }
+ }
+ }
+}
+
+/**
* xe_svm_bo_evict() - SVM evict BO to system memory
* @bo: BO to evict
*
@@ -967,7 +1287,7 @@ struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
xe_vma_start(vma), xe_vma_end(vma), ctx);
if (IS_ERR(r))
- return ERR_PTR(PTR_ERR(r));
+ return ERR_CAST(r);
return to_xe_range(r);
}
@@ -997,8 +1317,94 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
return err;
}
+/**
+ * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
+ * @vm: Pointer to the xe_vm structure
+ * @start: Start of the input range
+ * @end: End of the input range
+ *
+ * This function removes the page table entries (PTEs) associated
+ * with the svm ranges within the given input start and end
+ *
+ * Return: tile_mask for which gt's need to be tlb invalidated.
+ */
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpusvm_notifier *notifier;
+ struct xe_svm_range *range;
+ u64 adj_start, adj_end;
+ struct xe_tile *tile;
+ u8 tile_mask = 0;
+ u8 id;
+
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
+ lockdep_is_held_type(&vm->lock, 0));
+
+ drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
+ struct drm_gpusvm_range *r = NULL;
+
+ adj_start = max(start, drm_gpusvm_notifier_start(notifier));
+ adj_end = min(end, drm_gpusvm_notifier_end(notifier));
+ drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
+ range = to_xe_range(r);
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes_range(tile, vm, range)) {
+ tile_mask |= BIT(id);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping().
+ * Must not fail after setting
+ * tile_invalidated and before
+ * TLB invalidation.
+ */
+ WRITE_ONCE(range->tile_invalidated,
+ range->tile_invalidated | BIT(id));
+ }
+ }
+ }
+ }
+
+ return tile_mask;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+static struct drm_pagemap *tile_local_pagemap(struct xe_tile *tile)
+{
+ return &tile->mem.vram->dpagemap;
+}
+
+/**
+ * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
+ * @vma: Pointer to the xe_vma structure containing memory attributes
+ * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
+ *
+ * This function determines the correct DRM pagemap to use for a given VMA.
+ * It first checks if a valid devmem_fd is provided in the VMA's preferred
+ * location. If the devmem_fd is negative, it returns NULL, indicating no
+ * pagemap is available and smem to be used as preferred location.
+ * If the devmem_fd is equal to the default faulting
+ * GT identifier, it returns the VRAM pagemap associated with the tile.
+ *
+ * Future support for multi-device configurations may use drm_pagemap_from_fd()
+ * to resolve pagemaps from arbitrary file descriptors.
+ *
+ * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
+ */
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ s32 fd = (s32)vma->attr.preferred_loc.devmem_fd;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
+ return NULL;
+
+ if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
+ return IS_DGFX(tile_to_xe(tile)) ? tile_local_pagemap(tile) : NULL;
+
+ /* TODO: Support multi-device with drm_pagemap_from_fd(fd) */
+ return NULL;
+}
+
/**
* xe_svm_alloc_vram()- Allocate device memory pages for range,
* migrating existing data.
@@ -1013,17 +1419,17 @@ int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range,
{
struct drm_pagemap *dpagemap;
- xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem);
+ xe_assert(tile_to_xe(tile), range->base.pages.flags.migrate_devmem);
range_debug(range, "ALLOCATE VRAM");
- dpagemap = xe_tile_local_pagemap(tile);
+ dpagemap = tile_local_pagemap(tile);
return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
xe_svm_range_end(range),
range->base.gpusvm->mm,
ctx->timeslice_ms);
}
-static struct drm_pagemap_device_addr
+static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
struct device *dev,
struct page *page,
@@ -1042,7 +1448,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
prot = 0;
}
- return drm_pagemap_device_addr_encode(addr, prot, order, dir);
+ return drm_pagemap_addr_encode(addr, prot, order, dir);
}
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
@@ -1111,6 +1517,11 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
}
+
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ return NULL;
+}
#endif
/**
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index da9a69ea0bb1..0955d2ac8d74 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -6,6 +6,20 @@
#ifndef _XE_SVM_H_
#define _XE_SVM_H_
+struct xe_device;
+
+/**
+ * xe_svm_devm_owner() - Return the owner of device private memory
+ * @xe: The xe device.
+ *
+ * Return: The owner of this device's device private memory to use in
+ * hmm_range_fault()-
+ */
+static inline void *xe_svm_devm_owner(struct xe_device *xe)
+{
+ return xe;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
#include <drm/drm_pagemap.h>
@@ -90,6 +104,12 @@ bool xe_svm_range_validate(struct xe_vm *vm,
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end);
+
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end);
+
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -99,7 +119,7 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *v
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
{
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
- return range->base.flags.has_dma_mapping;
+ return range->base.pages.flags.has_dma_mapping;
}
/**
@@ -149,21 +169,13 @@ static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
return drm_gpusvm_range_size(&range->base);
}
-#define xe_svm_assert_in_notifier(vm__) \
- lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
-
-#define xe_svm_notifier_lock(vm__) \
- drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
-
-#define xe_svm_notifier_unlock(vm__) \
- drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
-
void xe_svm_flush(struct xe_vm *vm);
#else
#include <linux/interval_tree.h>
+#include "xe_vm.h"
-struct drm_pagemap_device_addr;
+struct drm_pagemap_addr;
struct drm_gpusvm_ctx;
struct drm_gpusvm_range;
struct xe_bo;
@@ -178,7 +190,9 @@ struct xe_vram_region;
struct xe_svm_range {
struct {
struct interval_tree_node itree;
- const struct drm_pagemap_device_addr *dma_addr;
+ struct {
+ const struct drm_pagemap_addr *dma_addr;
+ } pages;
} base;
u32 tile_present;
u32 tile_invalidated;
@@ -198,12 +212,21 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
static inline
int xe_svm_init(struct xe_vm *vm)
{
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ return drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)", &vm->xe->drm,
+ NULL, NULL, 0, 0, 0, NULL, NULL, 0);
+#else
return 0;
+#endif
}
static inline
void xe_svm_fini(struct xe_vm *vm)
{
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+ xe_assert(vm->xe, xe_vm_is_closed(vm));
+ drm_gpusvm_fini(&vm->svm.gpusvm);
+#endif
}
static inline
@@ -303,19 +326,64 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vm
return ULONG_MAX;
}
-#define xe_svm_assert_in_notifier(...) do {} while (0)
+static inline
+void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
+{
+}
+
+static inline
+u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ return 0;
+}
+
+static inline
+struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
+{
+ return NULL;
+}
+
+static inline void xe_svm_flush(struct xe_vm *vm)
+{
+}
#define xe_svm_range_has_dma_mapping(...) false
+#endif /* CONFIG_DRM_XE_GPUSVM */
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM) /* Need to support userptr without XE_GPUSVM */
+#define xe_svm_assert_in_notifier(vm__) \
+ lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_assert_held_read(vm__) \
+ lockdep_assert_held_read(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_lock(vm__) \
+ drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)
+
+#define xe_svm_notifier_lock_interruptible(vm__) \
+ down_read_interruptible(&(vm__)->svm.gpusvm.notifier_lock)
+
+#define xe_svm_notifier_unlock(vm__) \
+ drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+
+#else
+#define xe_svm_assert_in_notifier(...) do {} while (0)
+
+static inline void xe_svm_assert_held_read(struct xe_vm *vm)
+{
+}
static inline void xe_svm_notifier_lock(struct xe_vm *vm)
{
}
-static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
+static inline int xe_svm_notifier_lock_interruptible(struct xe_vm *vm)
{
+ return 0;
}
-static inline void xe_svm_flush(struct xe_vm *vm)
+static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
}
-#endif
+#endif /* CONFIG_DRM_GPUSVM */
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 82872a51f098..ff74528ca0c6 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -14,7 +14,7 @@
#include <drm/drm_syncobj.h>
#include <uapi/drm/xe_drm.h>
-#include "xe_device_types.h"
+#include "xe_device.h"
#include "xe_exec_queue.h"
#include "xe_macros.h"
#include "xe_sched_job_types.h"
@@ -113,6 +113,8 @@ static void user_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user,
+ struct drm_syncobj *ufence_syncobj,
+ u64 ufence_timeline_value,
unsigned int flags)
{
struct drm_xe_sync sync_in;
@@ -192,10 +194,15 @@ int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
if (exec) {
sync->addr = sync_in.addr;
} else {
+ sync->ufence_timeline_value = ufence_timeline_value;
sync->ufence = user_fence_create(xe, sync_in.addr,
sync_in.timeline_value);
if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
return PTR_ERR(sync->ufence);
+ sync->ufence_chain_fence = dma_fence_chain_alloc();
+ if (!sync->ufence_chain_fence)
+ return -ENOMEM;
+ sync->ufence_syncobj = ufence_syncobj;
}
break;
@@ -239,7 +246,12 @@ void xe_sync_entry_signal(struct xe_sync_entry *sync, struct dma_fence *fence)
} else if (sync->ufence) {
int err;
- dma_fence_get(fence);
+ drm_syncobj_add_point(sync->ufence_syncobj,
+ sync->ufence_chain_fence,
+ fence, sync->ufence_timeline_value);
+ sync->ufence_chain_fence = NULL;
+
+ fence = drm_syncobj_fence_get(sync->ufence_syncobj);
user_fence_get(sync->ufence);
err = dma_fence_add_callback(fence, &sync->ufence->cb,
user_fence_cb);
@@ -259,7 +271,8 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
drm_syncobj_put(sync->syncobj);
dma_fence_put(sync->fence);
dma_fence_chain_free(sync->chain_fence);
- if (sync->ufence)
+ dma_fence_chain_free(sync->ufence_chain_fence);
+ if (!IS_ERR_OR_NULL(sync->ufence))
user_fence_put(sync->ufence);
}
@@ -284,51 +297,59 @@ xe_sync_in_fence_get(struct xe_sync_entry *sync, int num_sync,
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
struct dma_fence *fence;
- int i, num_in_fence = 0, current_fence = 0;
+ int i, num_fence = 0, current_fence = 0;
lockdep_assert_held(&vm->lock);
- /* Count in-fences */
- for (i = 0; i < num_sync; ++i) {
- if (sync[i].fence) {
- ++num_in_fence;
- fence = sync[i].fence;
+ /* Reject in fences */
+ for (i = 0; i < num_sync; ++i)
+ if (sync[i].fence)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (q->flags & EXEC_QUEUE_FLAG_VM) {
+ struct xe_exec_queue *__q;
+ struct xe_tile *tile;
+ u8 id;
+
+ for_each_tile(tile, vm->xe, id)
+ num_fence += (1 + XE_MAX_GT_PER_TILE);
+
+ fences = kmalloc_array(num_fence, sizeof(*fences),
+ GFP_KERNEL);
+ if (!fences)
+ return ERR_PTR(-ENOMEM);
+
+ fences[current_fence++] =
+ xe_exec_queue_last_fence_get(q, vm);
+ for_each_tlb_inval(i)
+ fences[current_fence++] =
+ xe_exec_queue_tlb_inval_last_fence_get(q, vm, i);
+ list_for_each_entry(__q, &q->multi_gt_list,
+ multi_gt_link) {
+ fences[current_fence++] =
+ xe_exec_queue_last_fence_get(__q, vm);
+ for_each_tlb_inval(i)
+ fences[current_fence++] =
+ xe_exec_queue_tlb_inval_last_fence_get(__q, vm, i);
}
- }
- /* Easy case... */
- if (!num_in_fence) {
- fence = xe_exec_queue_last_fence_get(q, vm);
- return fence;
- }
+ xe_assert(vm->xe, current_fence == num_fence);
+ cf = dma_fence_array_create(num_fence, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!cf)
+ goto err_out;
- /* Create composite fence */
- fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL);
- if (!fences)
- return ERR_PTR(-ENOMEM);
- for (i = 0; i < num_sync; ++i) {
- if (sync[i].fence) {
- dma_fence_get(sync[i].fence);
- fences[current_fence++] = sync[i].fence;
- }
- }
- fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm);
- cf = dma_fence_array_create(num_in_fence, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- if (!cf) {
- --vm->composite_fence_seqno;
- goto err_out;
+ return &cf->base;
}
- return &cf->base;
+ fence = xe_exec_queue_last_fence_get(q, vm);
+ return fence;
err_out:
while (current_fence)
dma_fence_put(fences[--current_fence]);
kfree(fences);
- kfree(cf);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/gpu/drm/xe/xe_sync.h b/drivers/gpu/drm/xe/xe_sync.h
index 256ffc1e54dc..51f2d803e977 100644
--- a/drivers/gpu/drm/xe/xe_sync.h
+++ b/drivers/gpu/drm/xe/xe_sync.h
@@ -8,6 +8,7 @@
#include "xe_sync_types.h"
+struct drm_syncobj;
struct xe_device;
struct xe_exec_queue;
struct xe_file;
@@ -21,6 +22,8 @@ struct xe_vm;
int xe_sync_entry_parse(struct xe_device *xe, struct xe_file *xef,
struct xe_sync_entry *sync,
struct drm_xe_sync __user *sync_user,
+ struct drm_syncobj *ufence_syncobj,
+ u64 ufence_timeline_value,
unsigned int flags);
int xe_sync_entry_add_deps(struct xe_sync_entry *sync,
struct xe_sched_job *job);
diff --git a/drivers/gpu/drm/xe/xe_sync_types.h b/drivers/gpu/drm/xe/xe_sync_types.h
index 30ac3f51993b..b88f1833e28c 100644
--- a/drivers/gpu/drm/xe/xe_sync_types.h
+++ b/drivers/gpu/drm/xe/xe_sync_types.h
@@ -18,9 +18,12 @@ struct xe_sync_entry {
struct drm_syncobj *syncobj;
struct dma_fence *fence;
struct dma_fence_chain *chain_fence;
+ struct dma_fence_chain *ufence_chain_fence;
+ struct drm_syncobj *ufence_syncobj;
struct xe_user_fence *ufence;
u64 addr;
u64 timeline_value;
+ u64 ufence_timeline_value;
u32 type;
u32 flags;
};
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 86e9811e60ba..4f4f9a5c43af 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -7,6 +7,7 @@
#include <drm/drm_managed.h>
+#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
@@ -18,6 +19,8 @@
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram.h"
+#include "xe_vram_types.h"
#include "xe_wa.h"
/**
@@ -92,6 +95,43 @@ static int xe_tile_alloc(struct xe_tile *tile)
if (!tile->mem.ggtt)
return -ENOMEM;
+ tile->migrate = xe_migrate_alloc(tile);
+ if (!tile->migrate)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * xe_tile_alloc_vram - Perform per-tile VRAM structs allocation
+ * @tile: Tile to perform allocations for
+ *
+ * Allocates VRAM per-tile data structures using DRM-managed allocations.
+ * Does not touch the hardware.
+ *
+ * Returns -ENOMEM if allocations fail, otherwise 0.
+ */
+int xe_tile_alloc_vram(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct xe_vram_region *vram;
+
+ if (!IS_DGFX(xe))
+ return 0;
+
+ vram = xe_vram_region_alloc(xe, tile->id, XE_PL_VRAM0 + tile->id);
+ if (!vram)
+ return -ENOMEM;
+ tile->mem.vram = vram;
+
+ /*
+ * If the kernel_vram is not already allocated,
+ * it means that tile has common VRAM region for
+ * kernel and user space.
+ */
+ if (!tile->mem.kernel_vram)
+ tile->mem.kernel_vram = tile->mem.vram;
+
return 0;
}
@@ -117,31 +157,12 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
if (err)
return err;
- tile->primary_gt = xe_gt_alloc(tile);
- if (IS_ERR(tile->primary_gt))
- return PTR_ERR(tile->primary_gt);
-
xe_pcode_init(tile);
return 0;
}
ALLOW_ERROR_INJECTION(xe_tile_init_early, ERRNO); /* See xe_pci_probe() */
-static int tile_ttm_mgr_init(struct xe_tile *tile)
-{
- struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- if (tile->mem.vram.usable_size) {
- err = xe_ttm_vram_mgr_init(tile, &tile->mem.vram.ttm);
- if (err)
- return err;
- xe->info.mem_region_mask |= BIT(tile->id) << 1;
- }
-
- return 0;
-}
-
/**
* xe_tile_init_noalloc - Init tile up to the point where allocations can happen.
* @tile: The tile to initialize.
@@ -159,16 +180,19 @@ static int tile_ttm_mgr_init(struct xe_tile *tile)
int xe_tile_init_noalloc(struct xe_tile *tile)
{
struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- err = tile_ttm_mgr_init(tile);
- if (err)
- return err;
xe_wa_apply_tile_workarounds(tile);
if (xe->info.has_usm && IS_DGFX(xe))
- xe_devm_add(tile, &tile->mem.vram);
+ xe_devm_add(tile, tile->mem.vram);
+
+ if (IS_DGFX(xe) && !ttm_resource_manager_used(&tile->mem.vram->ttm.manager)) {
+ int err = xe_ttm_vram_mgr_init(xe, tile->mem.vram);
+
+ if (err)
+ return err;
+ xe->info.mem_region_mask |= BIT(tile->mem.vram->id) << 1;
+ }
return xe_tile_sysfs_init(tile);
}
diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h
index cc33e8733983..dceb6297aa01 100644
--- a/drivers/gpu/drm/xe/xe_tile.h
+++ b/drivers/gpu/drm/xe/xe_tile.h
@@ -14,19 +14,9 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id);
int xe_tile_init_noalloc(struct xe_tile *tile);
int xe_tile_init(struct xe_tile *tile);
-void xe_tile_migrate_wait(struct xe_tile *tile);
+int xe_tile_alloc_vram(struct xe_tile *tile);
-#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
-static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
-{
- return &tile->mem.vram.dpagemap;
-}
-#else
-static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
-{
- return NULL;
-}
-#endif
+void xe_tile_migrate_wait(struct xe_tile *tile);
static inline bool xe_tile_is_root(struct xe_tile *tile)
{
diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.c b/drivers/gpu/drm/xe/xe_tile_debugfs.c
new file mode 100644
index 000000000000..fff242a5ae56
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_debugfs.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+
+#include "xe_ggtt.h"
+#include "xe_pm.h"
+#include "xe_sa.h"
+#include "xe_tile_debugfs.h"
+
+static struct xe_tile *node_to_tile(struct drm_info_node *node)
+{
+ return node->dent->d_parent->d_inode->i_private;
+}
+
+/**
+ * xe_tile_debugfs_simple_show() - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * This callback can be used in struct drm_info_list to describe debugfs
+ * files that are &xe_tile specific.
+ *
+ * It is assumed that those debugfs files will be created on directory entry
+ * which struct dentry d_inode->i_private points to &xe_tile.
+ *
+ * /sys/kernel/debug/dri/0/
+ * ├── tile0/ # tile = dentry->d_inode->i_private
+ * │ │ ├── id # tile = dentry->d_parent->d_inode->i_private
+ *
+ * This function assumes that &m->private will be set to the &struct
+ * drm_info_node corresponding to the instance of the info on a given &struct
+ * drm_minor (see struct drm_info_list.show for details).
+ *
+ * This function also assumes that struct drm_info_list.data will point to the
+ * function code that will actually print a file content::
+ *
+ * int (*print)(struct xe_tile *, struct drm_printer *)
+ *
+ * Example::
+ *
+ * int tile_id(struct xe_tile *tile, struct drm_printer *p)
+ * {
+ * drm_printf(p, "%u\n", tile->id);
+ * return 0;
+ * }
+ *
+ * static const struct drm_info_list info[] = {
+ * { name = "id", .show = tile_debugfs_simple_show, .data = tile_id },
+ * };
+ *
+ * dir = debugfs_create_dir("tile0", parent);
+ * dir->d_inode->i_private = tile;
+ * drm_debugfs_create_files(info, ARRAY_SIZE(info), dir, minor);
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_debugfs_simple_show(struct seq_file *m, void *data)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct drm_info_node *node = m->private;
+ struct xe_tile *tile = node_to_tile(node);
+ int (*print)(struct xe_tile *, struct drm_printer *) = node->info_ent->data;
+
+ return print(tile, &p);
+}
+
+/**
+ * xe_tile_debugfs_show_with_rpm() - A show callback for struct drm_info_list
+ * @m: the &seq_file
+ * @data: data used by the drm debugfs helpers
+ *
+ * Similar to tile_debugfs_simple_show() but implicitly takes a RPM ref.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_debugfs_show_with_rpm(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct xe_tile *tile = node_to_tile(node);
+ struct xe_device *xe = tile_to_xe(tile);
+ int ret;
+
+ xe_pm_runtime_get(xe);
+ ret = xe_tile_debugfs_simple_show(m, data);
+ xe_pm_runtime_put(xe);
+
+ return ret;
+}
+
+static int ggtt(struct xe_tile *tile, struct drm_printer *p)
+{
+ return xe_ggtt_dump(tile->mem.ggtt, p);
+}
+
+static int sa_info(struct xe_tile *tile, struct drm_printer *p)
+{
+ drm_suballoc_dump_debug_info(&tile->mem.kernel_bb_pool->base, p,
+ xe_sa_manager_gpu_addr(tile->mem.kernel_bb_pool));
+
+ return 0;
+}
+
+/* only for debugfs files which can be safely used on the VF */
+static const struct drm_info_list vf_safe_debugfs_list[] = {
+ { "ggtt", .show = xe_tile_debugfs_show_with_rpm, .data = ggtt },
+ { "sa_info", .show = xe_tile_debugfs_show_with_rpm, .data = sa_info },
+};
+
+/**
+ * xe_tile_debugfs_register - Register tile's debugfs attributes
+ * @tile: the &xe_tile to register
+ *
+ * Create debugfs sub-directory with a name that includes a tile ID and
+ * then creates set of debugfs files (attributes) specific to this tile.
+ */
+void xe_tile_debugfs_register(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ struct drm_minor *minor = xe->drm.primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[8];
+
+ snprintf(name, sizeof(name), "tile%u", tile->id);
+ tile->debugfs = debugfs_create_dir(name, root);
+ if (IS_ERR(tile->debugfs))
+ return;
+
+ /*
+ * Store the xe_tile pointer as private data of the tile/ directory
+ * node so other tile specific attributes under that directory may
+ * refer to it by looking at its parent node private data.
+ */
+ tile->debugfs->d_inode->i_private = tile;
+
+ drm_debugfs_create_files(vf_safe_debugfs_list,
+ ARRAY_SIZE(vf_safe_debugfs_list),
+ tile->debugfs, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_debugfs.h b/drivers/gpu/drm/xe/xe_tile_debugfs.h
new file mode 100644
index 000000000000..4429c22542f4
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_debugfs.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_DEBUGFS_H_
+#define _XE_TILE_DEBUGFS_H_
+
+struct seq_file;
+struct xe_tile;
+
+void xe_tile_debugfs_register(struct xe_tile *tile);
+int xe_tile_debugfs_simple_show(struct seq_file *m, void *data);
+int xe_tile_debugfs_show_with_rpm(struct seq_file *m, void *data);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_printk.h b/drivers/gpu/drm/xe/xe_tile_printk.h
new file mode 100644
index 000000000000..63640a42685d
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_printk.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _xe_tile_printk_H_
+#define _xe_tile_printk_H_
+
+#include "xe_printk.h"
+
+#define __XE_TILE_PRINTK_FMT(_tile, _fmt, _args...) "Tile%u: " _fmt, (_tile)->id, ##_args
+
+#define xe_tile_printk(_tile, _level, _fmt, ...) \
+ xe_printk((_tile)->xe, _level, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_err(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_err_once(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err_once, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_err_ratelimited(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_warn(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), warn, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_notice(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), notice, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_info(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), info, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_dbg(_tile, _fmt, ...) \
+ xe_tile_printk((_tile), dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_WARN_type(_tile, _type, _condition, _fmt, ...) \
+ xe_WARN##_type((_tile)->xe, _condition, _fmt, ## __VA_ARGS__)
+
+#define xe_tile_WARN(_tile, _condition, _fmt, ...) \
+ xe_tile_WARN_type((_tile),, _condition, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_WARN_ONCE(_tile, _condition, _fmt, ...) \
+ xe_tile_WARN_type((_tile), _ONCE, _condition, __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_WARN_ON(_tile, _condition) \
+ xe_tile_WARN((_tile), _condition, "%s(%s)", "WARN_ON", __stringify(_condition))
+
+#define xe_tile_WARN_ON_ONCE(_tile, _condition) \
+ xe_tile_WARN_ONCE((_tile), _condition, "%s(%s)", "WARN_ON_ONCE", __stringify(_condition))
+
+static inline void __xe_tile_printfn_err(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+
+ xe_tile_err(tile, "%pV", vaf);
+}
+
+static inline void __xe_tile_printfn_info(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+
+ xe_tile_info(tile, "%pV", vaf);
+}
+
+static inline void __xe_tile_printfn_dbg(struct drm_printer *p, struct va_format *vaf)
+{
+ struct xe_tile *tile = p->arg;
+ struct drm_printer dbg;
+
+ /*
+ * The original xe_tile_dbg() callsite annotations are useless here,
+ * redirect to the tweaked xe_dbg_printer() instead.
+ */
+ dbg = xe_dbg_printer(tile->xe);
+ dbg.origin = p->origin;
+
+ drm_printf(&dbg, __XE_TILE_PRINTK_FMT(tile, "%pV", vaf));
+}
+
+/**
+ * xe_tile_err_printer - Construct a &drm_printer that outputs to xe_tile_err()
+ * @tile: the &xe_tile pointer to use in xe_tile_err()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_err_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_err,
+ .arg = tile,
+ };
+ return p;
+}
+
+/**
+ * xe_tile_info_printer - Construct a &drm_printer that outputs to xe_tile_info()
+ * @tile: the &xe_tile pointer to use in xe_tile_info()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_info_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_info,
+ .arg = tile,
+ };
+ return p;
+}
+
+/**
+ * xe_tile_dbg_printer - Construct a &drm_printer that outputs like xe_tile_dbg()
+ * @tile: the &xe_tile pointer to use in xe_tile_dbg()
+ *
+ * Return: The &drm_printer object.
+ */
+static inline struct drm_printer xe_tile_dbg_printer(struct xe_tile *tile)
+{
+ struct drm_printer p = {
+ .printfn = __xe_tile_printfn_dbg,
+ .arg = tile,
+ .origin = (const void *)_THIS_IP_,
+ };
+ return p;
+}
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c
new file mode 100644
index 000000000000..f3f478f14ff5
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_debugfs.h>
+
+#include "xe_device.h"
+#include "xe_device_types.h"
+#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_debugfs.h"
+#include "xe_pm.h"
+#include "xe_tile_debugfs.h"
+#include "xe_tile_sriov_pf_debugfs.h"
+#include "xe_sriov.h"
+#include "xe_sriov_pf.h"
+#include "xe_sriov_pf_provision.h"
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov # d_inode->i_private = (xe_device*)
+ * │ ├── pf # d_inode->i_private = (xe_device*)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * │ │ : :
+ * │ ├── vf1 # d_inode->i_private = VFID(1)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * │ │ : :
+ * │ ├── vfN # d_inode->i_private = VFID(N)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * : : : :
+ */
+
+static void *extract_priv(struct dentry *d)
+{
+ return d->d_inode->i_private;
+}
+
+__maybe_unused
+static struct xe_tile *extract_tile(struct dentry *d)
+{
+ return extract_priv(d);
+}
+
+static struct xe_device *extract_xe(struct dentry *d)
+{
+ return extract_priv(d->d_parent->d_parent);
+}
+
+__maybe_unused
+static unsigned int extract_vfid(struct dentry *d)
+{
+ void *pp = extract_priv(d->d_parent);
+
+ return pp == extract_xe(d) ? PFID : (uintptr_t)pp;
+}
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * : ├── tile0
+ * : ├── ggtt_available
+ * ├── ggtt_provisioned
+ */
+
+static int pf_config_print_available_ggtt(struct xe_tile *tile, struct drm_printer *p)
+{
+ return xe_gt_sriov_pf_config_print_available_ggtt(tile->primary_gt, p);
+}
+
+static int pf_config_print_ggtt(struct xe_tile *tile, struct drm_printer *p)
+{
+ return xe_gt_sriov_pf_config_print_ggtt(tile->primary_gt, p);
+}
+
+static const struct drm_info_list pf_ggtt_info[] = {
+ {
+ "ggtt_available",
+ .show = xe_tile_debugfs_simple_show,
+ .data = pf_config_print_available_ggtt,
+ },
+ {
+ "ggtt_provisioned",
+ .show = xe_tile_debugfs_simple_show,
+ .data = pf_config_print_ggtt,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * : ├── pf
+ * : ├── tile0
+ * : ├── vram_provisioned
+ */
+
+static int pf_config_print_vram(struct xe_tile *tile, struct drm_printer *p)
+{
+ return xe_gt_sriov_pf_config_print_lmem(tile->primary_gt, p);
+}
+
+static const struct drm_info_list pf_vram_info[] = {
+ {
+ "vram_provisioned",
+ .show = xe_tile_debugfs_simple_show,
+ .data = pf_config_print_vram,
+ },
+};
+
+/*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── pf
+ * │ │ ├── tile0
+ * │ │ │ ├── ggtt_spare
+ * │ │ │ ├── vram_spare
+ * │ │ ├── tile1
+ * │ │ : :
+ * │ ├── vf1
+ * │ : ├── tile0
+ * │ │ ├── ggtt_quota
+ * │ │ ├── vram_quota
+ * │ ├── tile1
+ * │ : :
+ */
+
+#define DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(NAME, CONFIG, TYPE, FORMAT) \
+ \
+static int NAME##_set(void *data, u64 val) \
+{ \
+ struct xe_tile *tile = extract_tile(data); \
+ unsigned int vfid = extract_vfid(data); \
+ struct xe_gt *gt = tile->primary_gt; \
+ struct xe_device *xe = tile->xe; \
+ int err; \
+ \
+ if (val > (TYPE)~0ull) \
+ return -EOVERFLOW; \
+ \
+ xe_pm_runtime_get(xe); \
+ err = xe_sriov_pf_wait_ready(xe) ?: \
+ xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \
+ if (!err) \
+ xe_sriov_pf_provision_set_custom_mode(xe); \
+ xe_pm_runtime_put(xe); \
+ \
+ return err; \
+} \
+ \
+static int NAME##_get(void *data, u64 *val) \
+{ \
+ struct xe_tile *tile = extract_tile(data); \
+ unsigned int vfid = extract_vfid(data); \
+ struct xe_gt *gt = tile->primary_gt; \
+ \
+ *val = xe_gt_sriov_pf_config_get_##CONFIG(gt, vfid); \
+ return 0; \
+} \
+ \
+DEFINE_DEBUGFS_ATTRIBUTE(NAME##_fops, NAME##_get, NAME##_set, FORMAT)
+
+DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(ggtt, ggtt, u64, "%llu\n");
+DEFINE_SRIOV_TILE_CONFIG_DEBUGFS_ATTRIBUTE(vram, lmem, u64, "%llu\n");
+
+static void pf_add_config_attrs(struct xe_tile *tile, struct dentry *dent, unsigned int vfid)
+{
+ struct xe_device *xe = tile->xe;
+
+ xe_tile_assert(tile, tile == extract_tile(dent));
+ xe_tile_assert(tile, vfid == extract_vfid(dent));
+
+ debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
+ 0644, dent, dent, &ggtt_fops);
+ if (IS_DGFX(xe))
+ debugfs_create_file_unsafe(vfid ? "vram_quota" : "vram_spare",
+ xe_device_has_lmtt(xe) ? 0644 : 0444,
+ dent, dent, &vram_fops);
+}
+
+static void pf_populate_tile(struct xe_tile *tile, struct dentry *dent, unsigned int vfid)
+{
+ struct xe_device *xe = tile->xe;
+ struct drm_minor *minor = xe->drm.primary;
+ struct xe_gt *gt;
+ unsigned int id;
+
+ pf_add_config_attrs(tile, dent, vfid);
+
+ if (!vfid) {
+ drm_debugfs_create_files(pf_ggtt_info,
+ ARRAY_SIZE(pf_ggtt_info),
+ dent, minor);
+ if (IS_DGFX(xe))
+ drm_debugfs_create_files(pf_vram_info,
+ ARRAY_SIZE(pf_vram_info),
+ dent, minor);
+ }
+
+ for_each_gt_on_tile(gt, tile, id)
+ xe_gt_sriov_pf_debugfs_populate(gt, dent, vfid);
+}
+
+/**
+ * xe_tile_sriov_pf_debugfs_populate() - Populate SR-IOV debugfs tree with tile files.
+ * @tile: the &xe_tile to register
+ * @parent: the parent &dentry that represents the SR-IOV @vfid function
+ * @vfid: the VF identifier
+ *
+ * Add to the @parent directory new debugfs directory that will represent a @tile and
+ * populate it with files that are related to the SR-IOV @vfid function.
+ *
+ * This function can only be called on PF.
+ */
+void xe_tile_sriov_pf_debugfs_populate(struct xe_tile *tile, struct dentry *parent,
+ unsigned int vfid)
+{
+ struct xe_device *xe = tile->xe;
+ struct dentry *dent;
+ char name[10]; /* should be enough up to "tile%u\0" for 2^16 - 1 */
+
+ xe_tile_assert(tile, IS_SRIOV_PF(xe));
+ xe_tile_assert(tile, extract_priv(parent->d_parent) == xe);
+ xe_tile_assert(tile, extract_priv(parent) == tile->xe ||
+ (uintptr_t)extract_priv(parent) == vfid);
+
+ /*
+ * /sys/kernel/debug/dri/BDF/
+ * ├── sriov
+ * │ ├── pf # parent, d_inode->i_private = (xe_device*)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * │ │ : :
+ * │ ├── vf1 # parent, d_inode->i_private = VFID(1)
+ * │ │ ├── tile0 # d_inode->i_private = (xe_tile*)
+ * │ │ ├── tile1
+ * : : : :
+ */
+ snprintf(name, sizeof(name), "tile%u", tile->id);
+ dent = debugfs_create_dir(name, parent);
+ if (IS_ERR(dent))
+ return;
+ dent->d_inode->i_private = tile;
+
+ xe_tile_assert(tile, extract_tile(dent) == tile);
+ xe_tile_assert(tile, extract_vfid(dent) == vfid);
+ xe_tile_assert(tile, extract_xe(dent) == xe);
+
+ pf_populate_tile(tile, dent, vfid);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h
new file mode 100644
index 000000000000..55d179c44634
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_pf_debugfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_PF_DEBUGFS_H_
+#define _XE_TILE_SRIOV_PF_DEBUGFS_H_
+
+struct dentry;
+struct xe_tile;
+
+void xe_tile_sriov_pf_debugfs_populate(struct xe_tile *tile, struct dentry *parent,
+ unsigned int vfid);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_printk.h b/drivers/gpu/drm/xe/xe_tile_sriov_printk.h
new file mode 100644
index 000000000000..68323512872c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_printk.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_PRINTK_H_
+#define _XE_TILE_SRIOV_PRINTK_H_
+
+#include "xe_tile_printk.h"
+#include "xe_sriov_printk.h"
+
+#define __XE_TILE_SRIOV_PRINTK_FMT(_tile, _fmt, ...) \
+ __XE_TILE_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_printk(_tile, _level, _fmt, ...) \
+ xe_sriov_##_level((_tile)->xe, __XE_TILE_SRIOV_PRINTK_FMT((_tile), _fmt, ##__VA_ARGS__))
+
+#define xe_tile_sriov_err(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, err, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_notice(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, notice, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_info(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, info, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_dbg(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, dbg, _fmt, ##__VA_ARGS__)
+
+#define xe_tile_sriov_dbg_verbose(_tile, _fmt, ...) \
+ xe_tile_sriov_printk(_tile, dbg_verbose, _fmt, ##__VA_ARGS__)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
index f221dbed16f0..c9bac2cfdd04 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
@@ -9,7 +9,6 @@
#include "xe_assert.h"
#include "xe_ggtt.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_tile_sriov_vf.h"
@@ -40,10 +39,10 @@ static int vf_init_ggtt_balloons(struct xe_tile *tile)
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
+static int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
{
- u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
- u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
+ u64 ggtt_base = tile->sriov.vf.self_config.ggtt_base;
+ u64 ggtt_size = tile->sriov.vf.self_config.ggtt_size;
struct xe_device *xe = tile_to_xe(tile);
u64 wopcm = xe_wopcm_size(xe);
u64 start, end;
@@ -232,7 +231,7 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
*/
/**
- * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
+ * xe_tile_sriov_vf_fixup_ggtt_nodes_locked - Shift GGTT allocations to match assigned range.
* @tile: the &xe_tile struct instance
* @shift: the shift value
*
@@ -240,15 +239,112 @@ int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
* within the global space. This range might have changed during migration,
* which requires all memory addresses pointing to GGTT to be shifted.
*/
-void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
+void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift)
{
struct xe_ggtt *ggtt = tile->mem.ggtt;
- mutex_lock(&ggtt->lock);
+ lockdep_assert_held(&ggtt->lock);
xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
xe_ggtt_shift_nodes_locked(ggtt, shift);
xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+}
- mutex_unlock(&ggtt->lock);
+/**
+ * xe_tile_sriov_vf_lmem - VF LMEM configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the LMEM assigned to VF.
+ */
+u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->lmem_size;
+}
+
+/**
+ * xe_tile_sriov_vf_lmem_store - Store VF LMEM configuration
+ * @tile: the &xe_tile
+ * @lmem_size: VF LMEM size to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->lmem_size = lmem_size;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt - VF GGTT configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the GGTT assigned to VF.
+ */
+u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->ggtt_size;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_store - Store VF GGTT configuration
+ * @tile: the &xe_tile
+ * @ggtt_size: VF GGTT size to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->ggtt_size = ggtt_size;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_base - VF GGTT base configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: base of the GGTT assigned to VF.
+ */
+u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ return config->ggtt_base;
+}
+
+/**
+ * xe_tile_sriov_vf_ggtt_base_store - Store VF GGTT base configuration
+ * @tile: the &xe_tile
+ * @ggtt_base: VF GGTT base to store
+ *
+ * This function is for VF use only.
+ */
+void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_base)
+{
+ struct xe_tile_sriov_vf_selfconfig *config = &tile->sriov.vf.self_config;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ config->ggtt_base = ggtt_base;
}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
index 93eb043171e8..749f41504883 100644
--- a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
@@ -11,8 +11,13 @@
struct xe_tile;
int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
-int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
-void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
+void xe_tile_sriov_vf_fixup_ggtt_nodes_locked(struct xe_tile *tile, s64 shift);
+u64 xe_tile_sriov_vf_ggtt(struct xe_tile *tile);
+void xe_tile_sriov_vf_ggtt_store(struct xe_tile *tile, u64 ggtt_size);
+u64 xe_tile_sriov_vf_ggtt_base(struct xe_tile *tile);
+void xe_tile_sriov_vf_ggtt_base_store(struct xe_tile *tile, u64 ggtt_size);
+u64 xe_tile_sriov_vf_lmem(struct xe_tile *tile);
+void xe_tile_sriov_vf_lmem_store(struct xe_tile *tile, u64 lmem_size);
#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
new file mode 100644
index 000000000000..4807ca51614c
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf_types.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_VF_TYPES_H_
+#define _XE_TILE_SRIOV_VF_TYPES_H_
+
+#include <linux/types.h>
+
+/**
+ * struct xe_tile_sriov_vf_selfconfig - VF configuration data.
+ */
+struct xe_tile_sriov_vf_selfconfig {
+ /** @ggtt_base: assigned base offset of the GGTT region. */
+ u64 ggtt_base;
+ /** @ggtt_size: assigned size of the GGTT region. */
+ u64 ggtt_size;
+ /** @lmem_size: assigned size of the LMEM. */
+ u64 lmem_size;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tile_sysfs.c b/drivers/gpu/drm/xe/xe_tile_sysfs.c
index b804234a6551..9e1236a9ec67 100644
--- a/drivers/gpu/drm/xe/xe_tile_sysfs.c
+++ b/drivers/gpu/drm/xe/xe_tile_sysfs.c
@@ -44,16 +44,18 @@ int xe_tile_sysfs_init(struct xe_tile *tile)
kt->tile = tile;
err = kobject_add(&kt->base, &dev->kobj, "tile%d", tile->id);
- if (err) {
- kobject_put(&kt->base);
- return err;
- }
+ if (err)
+ goto err_object;
tile->sysfs = &kt->base;
err = xe_vram_freq_sysfs_init(tile);
if (err)
- return err;
+ goto err_object;
return devm_add_action_or_reset(xe->drm.dev, tile_sysfs_fini, tile);
+
+err_object:
+ kobject_put(&kt->base);
+ return err;
}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.c b/drivers/gpu/drm/xe/xe_tlb_inval.c
new file mode 100644
index 000000000000..918a59e686ea
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "abi/guc_actions_abi.h"
+#include "xe_device.h"
+#include "xe_force_wake.h"
+#include "xe_gt.h"
+#include "xe_gt_printk.h"
+#include "xe_gt_stats.h"
+#include "xe_guc.h"
+#include "xe_guc_ct.h"
+#include "xe_guc_tlb_inval.h"
+#include "xe_mmio.h"
+#include "xe_pm.h"
+#include "xe_tlb_inval.h"
+#include "xe_trace.h"
+
+/**
+ * DOC: Xe TLB invalidation
+ *
+ * Xe TLB invalidation is implemented in two layers. The first is the frontend
+ * API, which provides an interface for TLB invalidations to the driver code.
+ * The frontend handles seqno assignment, synchronization (fences), and the
+ * timeout mechanism. The frontend is implemented via an embedded structure
+ * xe_tlb_inval that includes a set of ops hooking into the backend. The backend
+ * interacts with the hardware (or firmware) to perform the actual invalidation.
+ */
+
+#define FENCE_STACK_BIT DMA_FENCE_FLAG_USER_BITS
+
+static void xe_tlb_inval_fence_fini(struct xe_tlb_inval_fence *fence)
+{
+ if (WARN_ON_ONCE(!fence->tlb_inval))
+ return;
+
+ xe_pm_runtime_put(fence->tlb_inval->xe);
+ fence->tlb_inval = NULL; /* fini() should be called once */
+}
+
+static void
+xe_tlb_inval_fence_signal(struct xe_tlb_inval_fence *fence)
+{
+ bool stack = test_bit(FENCE_STACK_BIT, &fence->base.flags);
+
+ lockdep_assert_held(&fence->tlb_inval->pending_lock);
+
+ list_del(&fence->link);
+ trace_xe_tlb_inval_fence_signal(fence->tlb_inval->xe, fence);
+ xe_tlb_inval_fence_fini(fence);
+ dma_fence_signal(&fence->base);
+ if (!stack)
+ dma_fence_put(&fence->base);
+}
+
+static void
+xe_tlb_inval_fence_signal_unlocked(struct xe_tlb_inval_fence *fence)
+{
+ struct xe_tlb_inval *tlb_inval = fence->tlb_inval;
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ xe_tlb_inval_fence_signal(fence);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+}
+
+static void xe_tlb_inval_fence_timeout(struct work_struct *work)
+{
+ struct xe_tlb_inval *tlb_inval = container_of(work, struct xe_tlb_inval,
+ fence_tdr.work);
+ struct xe_device *xe = tlb_inval->xe;
+ struct xe_tlb_inval_fence *fence, *next;
+ long timeout_delay = tlb_inval->ops->timeout_delay(tlb_inval);
+
+ tlb_inval->ops->flush(tlb_inval);
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link) {
+ s64 since_inval_ms = ktime_ms_delta(ktime_get(),
+ fence->inval_time);
+
+ if (msecs_to_jiffies(since_inval_ms) < timeout_delay)
+ break;
+
+ trace_xe_tlb_inval_fence_timeout(xe, fence);
+ drm_err(&xe->drm,
+ "TLB invalidation fence timeout, seqno=%d recv=%d",
+ fence->seqno, tlb_inval->seqno_recv);
+
+ fence->base.error = -ETIME;
+ xe_tlb_inval_fence_signal(fence);
+ }
+ if (!list_empty(&tlb_inval->pending_fences))
+ queue_delayed_work(system_wq, &tlb_inval->fence_tdr,
+ timeout_delay);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+}
+
+/**
+ * tlb_inval_fini - Clean up TLB invalidation state
+ * @drm: @drm_device
+ * @arg: pointer to struct @xe_tlb_inval
+ *
+ * Cancel pending fence workers and clean up any additional
+ * TLB invalidation state.
+ */
+static void tlb_inval_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_tlb_inval *tlb_inval = arg;
+
+ xe_tlb_inval_reset(tlb_inval);
+}
+
+/**
+ * xe_gt_tlb_inval_init - Initialize TLB invalidation state
+ * @gt: GT structure
+ *
+ * Initialize TLB invalidation state, purely software initialization, should
+ * be called once during driver load.
+ *
+ * Return: 0 on success, negative error code on error.
+ */
+int xe_gt_tlb_inval_init_early(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ struct xe_tlb_inval *tlb_inval = &gt->tlb_inval;
+ int err;
+
+ tlb_inval->xe = xe;
+ tlb_inval->seqno = 1;
+ INIT_LIST_HEAD(&tlb_inval->pending_fences);
+ spin_lock_init(&tlb_inval->pending_lock);
+ spin_lock_init(&tlb_inval->lock);
+ INIT_DELAYED_WORK(&tlb_inval->fence_tdr, xe_tlb_inval_fence_timeout);
+
+ err = drmm_mutex_init(&xe->drm, &tlb_inval->seqno_lock);
+ if (err)
+ return err;
+
+ tlb_inval->job_wq = drmm_alloc_ordered_workqueue(&xe->drm,
+ "gt-tbl-inval-job-wq",
+ WQ_MEM_RECLAIM);
+ if (IS_ERR(tlb_inval->job_wq))
+ return PTR_ERR(tlb_inval->job_wq);
+
+ /* XXX: Blindly setting up backend to GuC */
+ xe_guc_tlb_inval_init_early(&gt->uc.guc, tlb_inval);
+
+ return drmm_add_action_or_reset(&xe->drm, tlb_inval_fini, tlb_inval);
+}
+
+/**
+ * xe_tlb_inval_reset() - TLB invalidation reset
+ * @tlb_inval: TLB invalidation client
+ *
+ * Signal any pending invalidation fences, should be called during a GT reset
+ */
+void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_tlb_inval_fence *fence, *next;
+ int pending_seqno;
+
+ /*
+ * we can get here before the backends are even initialized if we're
+ * wedging very early, in which case there are not going to be any
+ * pendind fences so we can bail immediately.
+ */
+ if (!tlb_inval->ops->initialized(tlb_inval))
+ return;
+
+ /*
+ * Backend is already disabled at this point. No new TLB requests can
+ * appear.
+ */
+
+ mutex_lock(&tlb_inval->seqno_lock);
+ spin_lock_irq(&tlb_inval->pending_lock);
+ cancel_delayed_work(&tlb_inval->fence_tdr);
+ /*
+ * We might have various kworkers waiting for TLB flushes to complete
+ * which are not tracked with an explicit TLB fence, however at this
+ * stage that will never happen since the backend is already disabled,
+ * so make sure we signal them here under the assumption that we have
+ * completed a full GT reset.
+ */
+ if (tlb_inval->seqno == 1)
+ pending_seqno = TLB_INVALIDATION_SEQNO_MAX - 1;
+ else
+ pending_seqno = tlb_inval->seqno - 1;
+ WRITE_ONCE(tlb_inval->seqno_recv, pending_seqno);
+
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link)
+ xe_tlb_inval_fence_signal(fence);
+ spin_unlock_irq(&tlb_inval->pending_lock);
+ mutex_unlock(&tlb_inval->seqno_lock);
+}
+
+static bool xe_tlb_inval_seqno_past(struct xe_tlb_inval *tlb_inval, int seqno)
+{
+ int seqno_recv = READ_ONCE(tlb_inval->seqno_recv);
+
+ lockdep_assert_held(&tlb_inval->pending_lock);
+
+ if (seqno - seqno_recv < -(TLB_INVALIDATION_SEQNO_MAX / 2))
+ return false;
+
+ if (seqno - seqno_recv > (TLB_INVALIDATION_SEQNO_MAX / 2))
+ return true;
+
+ return seqno_recv >= seqno;
+}
+
+static void xe_tlb_inval_fence_prep(struct xe_tlb_inval_fence *fence)
+{
+ struct xe_tlb_inval *tlb_inval = fence->tlb_inval;
+
+ fence->seqno = tlb_inval->seqno;
+ trace_xe_tlb_inval_fence_send(tlb_inval->xe, fence);
+
+ spin_lock_irq(&tlb_inval->pending_lock);
+ fence->inval_time = ktime_get();
+ list_add_tail(&fence->link, &tlb_inval->pending_fences);
+
+ if (list_is_singular(&tlb_inval->pending_fences))
+ queue_delayed_work(system_wq, &tlb_inval->fence_tdr,
+ tlb_inval->ops->timeout_delay(tlb_inval));
+ spin_unlock_irq(&tlb_inval->pending_lock);
+
+ tlb_inval->seqno = (tlb_inval->seqno + 1) %
+ TLB_INVALIDATION_SEQNO_MAX;
+ if (!tlb_inval->seqno)
+ tlb_inval->seqno = 1;
+}
+
+#define xe_tlb_inval_issue(__tlb_inval, __fence, op, args...) \
+({ \
+ int __ret; \
+ \
+ xe_assert((__tlb_inval)->xe, (__tlb_inval)->ops); \
+ xe_assert((__tlb_inval)->xe, (__fence)); \
+ \
+ mutex_lock(&(__tlb_inval)->seqno_lock); \
+ xe_tlb_inval_fence_prep((__fence)); \
+ __ret = op((__tlb_inval), (__fence)->seqno, ##args); \
+ if (__ret < 0) \
+ xe_tlb_inval_fence_signal_unlocked((__fence)); \
+ mutex_unlock(&(__tlb_inval)->seqno_lock); \
+ \
+ __ret == -ECANCELED ? 0 : __ret; \
+})
+
+/**
+ * xe_tlb_inval_all() - Issue a TLB invalidation for all TLBs
+ * @tlb_inval: TLB invalidation client
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
+ *
+ * Issue a TLB invalidation for all TLBs. Completion of TLB is asynchronous and
+ * caller can use the invalidation fence to wait for completion.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence)
+{
+ return xe_tlb_inval_issue(tlb_inval, fence, tlb_inval->ops->all);
+}
+
+/**
+ * xe_tlb_inval_ggtt() - Issue a TLB invalidation for the GGTT
+ * @tlb_inval: TLB invalidation client
+ *
+ * Issue a TLB invalidation for the GGTT. Completion of TLB is asynchronous and
+ * caller can use the invalidation fence to wait for completion.
+ *
+ * Return: 0 on success, negative error code on error
+ */
+int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval)
+{
+ struct xe_tlb_inval_fence fence, *fence_ptr = &fence;
+ int ret;
+
+ xe_tlb_inval_fence_init(tlb_inval, fence_ptr, true);
+ ret = xe_tlb_inval_issue(tlb_inval, fence_ptr, tlb_inval->ops->ggtt);
+ xe_tlb_inval_fence_wait(fence_ptr);
+
+ return ret;
+}
+
+/**
+ * xe_tlb_inval_range() - Issue a TLB invalidation for an address range
+ * @tlb_inval: TLB invalidation client
+ * @fence: invalidation fence which will be signal on TLB invalidation
+ * completion
+ * @start: start address
+ * @end: end address
+ * @asid: address space id
+ *
+ * Issue a range based TLB invalidation if supported, if not fallback to a full
+ * TLB invalidation. Completion of TLB is asynchronous and caller can use
+ * the invalidation fence to wait for completion.
+ *
+ * Return: Negative error code on error, 0 on success
+ */
+int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence, u64 start, u64 end,
+ u32 asid)
+{
+ return xe_tlb_inval_issue(tlb_inval, fence, tlb_inval->ops->ppgtt,
+ start, end, asid);
+}
+
+/**
+ * xe_tlb_inval_vm() - Issue a TLB invalidation for a VM
+ * @tlb_inval: TLB invalidation client
+ * @vm: VM to invalidate
+ *
+ * Invalidate entire VM's address space
+ */
+void xe_tlb_inval_vm(struct xe_tlb_inval *tlb_inval, struct xe_vm *vm)
+{
+ struct xe_tlb_inval_fence fence;
+ u64 range = 1ull << vm->xe->info.va_bits;
+
+ xe_tlb_inval_fence_init(tlb_inval, &fence, true);
+ xe_tlb_inval_range(tlb_inval, &fence, 0, range, vm->usm.asid);
+ xe_tlb_inval_fence_wait(&fence);
+}
+
+/**
+ * xe_tlb_inval_done_handler() - TLB invalidation done handler
+ * @tlb_inval: TLB invalidation client
+ * @seqno: seqno of invalidation that is done
+ *
+ * Update recv seqno, signal any TLB invalidation fences, and restart TDR
+ */
+void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno)
+{
+ struct xe_device *xe = tlb_inval->xe;
+ struct xe_tlb_inval_fence *fence, *next;
+ unsigned long flags;
+
+ /*
+ * This can also be run both directly from the IRQ handler and also in
+ * process_g2h_msg(). Only one may process any individual CT message,
+ * however the order they are processed here could result in skipping a
+ * seqno. To handle that we just process all the seqnos from the last
+ * seqno_recv up to and including the one in msg[0]. The delta should be
+ * very small so there shouldn't be much of pending_fences we actually
+ * need to iterate over here.
+ *
+ * From GuC POV we expect the seqnos to always appear in-order, so if we
+ * see something later in the timeline we can be sure that anything
+ * appearing earlier has already signalled, just that we have yet to
+ * officially process the CT message like if racing against
+ * process_g2h_msg().
+ */
+ spin_lock_irqsave(&tlb_inval->pending_lock, flags);
+ if (xe_tlb_inval_seqno_past(tlb_inval, seqno)) {
+ spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
+ return;
+ }
+
+ WRITE_ONCE(tlb_inval->seqno_recv, seqno);
+
+ list_for_each_entry_safe(fence, next,
+ &tlb_inval->pending_fences, link) {
+ trace_xe_tlb_inval_fence_recv(xe, fence);
+
+ if (!xe_tlb_inval_seqno_past(tlb_inval, fence->seqno))
+ break;
+
+ xe_tlb_inval_fence_signal(fence);
+ }
+
+ if (!list_empty(&tlb_inval->pending_fences))
+ mod_delayed_work(system_wq,
+ &tlb_inval->fence_tdr,
+ tlb_inval->ops->timeout_delay(tlb_inval));
+ else
+ cancel_delayed_work(&tlb_inval->fence_tdr);
+
+ spin_unlock_irqrestore(&tlb_inval->pending_lock, flags);
+}
+
+static const char *
+xe_inval_fence_get_driver_name(struct dma_fence *dma_fence)
+{
+ return "xe";
+}
+
+static const char *
+xe_inval_fence_get_timeline_name(struct dma_fence *dma_fence)
+{
+ return "tlb_inval_fence";
+}
+
+static const struct dma_fence_ops inval_fence_ops = {
+ .get_driver_name = xe_inval_fence_get_driver_name,
+ .get_timeline_name = xe_inval_fence_get_timeline_name,
+};
+
+/**
+ * xe_tlb_inval_fence_init() - Initialize TLB invalidation fence
+ * @tlb_inval: TLB invalidation client
+ * @fence: TLB invalidation fence to initialize
+ * @stack: fence is stack variable
+ *
+ * Initialize TLB invalidation fence for use. xe_tlb_inval_fence_fini
+ * will be automatically called when fence is signalled (all fences must signal),
+ * even on error.
+ */
+void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ bool stack)
+{
+ xe_pm_runtime_get_noresume(tlb_inval->xe);
+
+ spin_lock_irq(&tlb_inval->lock);
+ dma_fence_init(&fence->base, &inval_fence_ops, &tlb_inval->lock,
+ dma_fence_context_alloc(1), 1);
+ spin_unlock_irq(&tlb_inval->lock);
+ INIT_LIST_HEAD(&fence->link);
+ if (stack)
+ set_bit(FENCE_STACK_BIT, &fence->base.flags);
+ else
+ dma_fence_get(&fence->base);
+ fence->tlb_inval = tlb_inval;
+}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval.h b/drivers/gpu/drm/xe/xe_tlb_inval.h
new file mode 100644
index 000000000000..05614915463a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_H_
+#define _XE_TLB_INVAL_H_
+
+#include <linux/types.h>
+
+#include "xe_tlb_inval_types.h"
+
+struct xe_gt;
+struct xe_guc;
+struct xe_vm;
+
+int xe_gt_tlb_inval_init_early(struct xe_gt *gt);
+
+void xe_tlb_inval_reset(struct xe_tlb_inval *tlb_inval);
+int xe_tlb_inval_all(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence);
+int xe_tlb_inval_ggtt(struct xe_tlb_inval *tlb_inval);
+void xe_tlb_inval_vm(struct xe_tlb_inval *tlb_inval, struct xe_vm *vm);
+int xe_tlb_inval_range(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ u64 start, u64 end, u32 asid);
+
+void xe_tlb_inval_fence_init(struct xe_tlb_inval *tlb_inval,
+ struct xe_tlb_inval_fence *fence,
+ bool stack);
+
+/**
+ * xe_tlb_inval_fence_wait() - TLB invalidiation fence wait
+ * @fence: TLB invalidation fence to wait on
+ *
+ * Wait on a TLB invalidiation fence until it signals, non interruptible
+ */
+static inline void
+xe_tlb_inval_fence_wait(struct xe_tlb_inval_fence *fence)
+{
+ dma_fence_wait(&fence->base, false);
+}
+
+void xe_tlb_inval_done_handler(struct xe_tlb_inval *tlb_inval, int seqno);
+
+#endif /* _XE_TLB_INVAL_ */
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.c b/drivers/gpu/drm/xe/xe_tlb_inval_job.c
new file mode 100644
index 000000000000..1ae0dec2cf31
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_assert.h"
+#include "xe_dep_job_types.h"
+#include "xe_dep_scheduler.h"
+#include "xe_exec_queue.h"
+#include "xe_gt_types.h"
+#include "xe_tlb_inval.h"
+#include "xe_tlb_inval_job.h"
+#include "xe_migrate.h"
+#include "xe_pm.h"
+#include "xe_vm.h"
+
+/** struct xe_tlb_inval_job - TLB invalidation job */
+struct xe_tlb_inval_job {
+ /** @dep: base generic dependency Xe job */
+ struct xe_dep_job dep;
+ /** @tlb_inval: TLB invalidation client */
+ struct xe_tlb_inval *tlb_inval;
+ /** @q: exec queue issuing the invalidate */
+ struct xe_exec_queue *q;
+ /** @vm: VM which TLB invalidation is being issued for */
+ struct xe_vm *vm;
+ /** @refcount: ref count of this job */
+ struct kref refcount;
+ /**
+ * @fence: dma fence to indicate completion. 1 way relationship - job
+ * can safely reference fence, fence cannot safely reference job.
+ */
+ struct dma_fence *fence;
+ /** @start: Start address to invalidate */
+ u64 start;
+ /** @end: End address to invalidate */
+ u64 end;
+ /** @type: GT type */
+ int type;
+ /** @fence_armed: Fence has been armed */
+ bool fence_armed;
+};
+
+static struct dma_fence *xe_tlb_inval_job_run(struct xe_dep_job *dep_job)
+{
+ struct xe_tlb_inval_job *job =
+ container_of(dep_job, typeof(*job), dep);
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+
+ xe_tlb_inval_range(job->tlb_inval, ifence, job->start,
+ job->end, job->vm->usm.asid);
+
+ return job->fence;
+}
+
+static void xe_tlb_inval_job_free(struct xe_dep_job *dep_job)
+{
+ struct xe_tlb_inval_job *job =
+ container_of(dep_job, typeof(*job), dep);
+
+ /* Pairs with get in xe_tlb_inval_job_push */
+ xe_tlb_inval_job_put(job);
+}
+
+static const struct xe_dep_job_ops dep_job_ops = {
+ .run_job = xe_tlb_inval_job_run,
+ .free_job = xe_tlb_inval_job_free,
+};
+
+/**
+ * xe_tlb_inval_job_create() - TLB invalidation job create
+ * @q: exec queue issuing the invalidate
+ * @tlb_inval: TLB invalidation client
+ * @dep_scheduler: Dependency scheduler for job
+ * @vm: VM which TLB invalidation is being issued for
+ * @start: Start address to invalidate
+ * @end: End address to invalidate
+ * @type: GT type
+ *
+ * Create a TLB invalidation job and initialize internal fields. The caller is
+ * responsible for releasing the creation reference.
+ *
+ * Return: TLB invalidation job object on success, ERR_PTR failure
+ */
+struct xe_tlb_inval_job *
+xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
+ struct xe_dep_scheduler *dep_scheduler,
+ struct xe_vm *vm, u64 start, u64 end, int type)
+{
+ struct xe_tlb_inval_job *job;
+ struct drm_sched_entity *entity =
+ xe_dep_scheduler_entity(dep_scheduler);
+ struct xe_tlb_inval_fence *ifence;
+ int err;
+
+ xe_assert(vm->xe, type == XE_EXEC_QUEUE_TLB_INVAL_MEDIA_GT ||
+ type == XE_EXEC_QUEUE_TLB_INVAL_PRIMARY_GT);
+
+ job = kmalloc(sizeof(*job), GFP_KERNEL);
+ if (!job)
+ return ERR_PTR(-ENOMEM);
+
+ job->q = q;
+ job->vm = vm;
+ job->tlb_inval = tlb_inval;
+ job->start = start;
+ job->end = end;
+ job->fence_armed = false;
+ job->dep.ops = &dep_job_ops;
+ job->type = type;
+ kref_init(&job->refcount);
+ xe_exec_queue_get(q); /* Pairs with put in xe_tlb_inval_job_destroy */
+ xe_vm_get(vm); /* Pairs with put in xe_tlb_inval_job_destroy */
+
+ ifence = kmalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!ifence) {
+ err = -ENOMEM;
+ goto err_job;
+ }
+ job->fence = &ifence->base;
+
+ err = drm_sched_job_init(&job->dep.drm, entity, 1, NULL,
+ q->xef ? q->xef->drm->client_id : 0);
+ if (err)
+ goto err_fence;
+
+ /* Pairs with put in xe_tlb_inval_job_destroy */
+ xe_pm_runtime_get_noresume(gt_to_xe(q->gt));
+
+ return job;
+
+err_fence:
+ kfree(ifence);
+err_job:
+ xe_vm_put(vm);
+ xe_exec_queue_put(q);
+ kfree(job);
+
+ return ERR_PTR(err);
+}
+
+static void xe_tlb_inval_job_destroy(struct kref *ref)
+{
+ struct xe_tlb_inval_job *job = container_of(ref, typeof(*job),
+ refcount);
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+ struct xe_exec_queue *q = job->q;
+ struct xe_device *xe = gt_to_xe(q->gt);
+ struct xe_vm *vm = job->vm;
+
+ if (!job->fence_armed)
+ kfree(ifence);
+ else
+ /* Ref from xe_tlb_inval_fence_init */
+ dma_fence_put(job->fence);
+
+ drm_sched_job_cleanup(&job->dep.drm);
+ kfree(job);
+ xe_vm_put(vm); /* Pairs with get from xe_tlb_inval_job_create */
+ xe_exec_queue_put(q); /* Pairs with get from xe_tlb_inval_job_create */
+ xe_pm_runtime_put(xe); /* Pairs with get from xe_tlb_inval_job_create */
+}
+
+/**
+ * xe_tlb_inval_alloc_dep() - TLB invalidation job alloc dependency
+ * @job: TLB invalidation job to alloc dependency for
+ *
+ * Allocate storage for a dependency in the TLB invalidation fence. This
+ * function should be called at most once per job and must be paired with
+ * xe_tlb_inval_job_push being called with a real fence.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job)
+{
+ xe_assert(gt_to_xe(job->q->gt), !xa_load(&job->dep.drm.dependencies, 0));
+ might_alloc(GFP_KERNEL);
+
+ return drm_sched_job_add_dependency(&job->dep.drm,
+ dma_fence_get_stub());
+}
+
+/**
+ * xe_tlb_inval_job_push() - TLB invalidation job push
+ * @job: TLB invalidation job to push
+ * @m: The migration object being used
+ * @fence: Dependency for TLB invalidation job
+ *
+ * Pushes a TLB invalidation job for execution, using @fence as a dependency.
+ * Storage for @fence must be preallocated with xe_tlb_inval_job_alloc_dep
+ * prior to this call if @fence is not signaled. Takes a reference to the job’s
+ * finished fence, which the caller is responsible for releasing, and return it
+ * to the caller. This function is safe to be called in the path of reclaim.
+ *
+ * Return: Job's finished fence on success, cannot fail
+ */
+struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
+ struct xe_migrate *m,
+ struct dma_fence *fence)
+{
+ struct xe_tlb_inval_fence *ifence =
+ container_of(job->fence, typeof(*ifence), base);
+
+ if (!dma_fence_is_signaled(fence)) {
+ void *ptr;
+
+ /*
+ * Can be in path of reclaim, hence the preallocation of fence
+ * storage in xe_tlb_inval_job_alloc_dep. Verify caller did
+ * this correctly.
+ */
+ xe_assert(gt_to_xe(job->q->gt),
+ xa_load(&job->dep.drm.dependencies, 0) ==
+ dma_fence_get_stub());
+
+ dma_fence_get(fence); /* ref released once dependency processed by scheduler */
+ ptr = xa_store(&job->dep.drm.dependencies, 0, fence,
+ GFP_ATOMIC);
+ xe_assert(gt_to_xe(job->q->gt), !xa_is_err(ptr));
+ }
+
+ xe_tlb_inval_job_get(job); /* Pairs with put in free_job */
+ job->fence_armed = true;
+
+ /*
+ * We need the migration lock to protect the job's seqno and the spsc
+ * queue, only taken on migration queue, user queues protected dma-resv
+ * VM lock.
+ */
+ xe_migrate_job_lock(m, job->q);
+
+ /* Creation ref pairs with put in xe_tlb_inval_job_destroy */
+ xe_tlb_inval_fence_init(job->tlb_inval, ifence, false);
+ dma_fence_get(job->fence); /* Pairs with put in DRM scheduler */
+
+ drm_sched_job_arm(&job->dep.drm);
+ /*
+ * caller ref, get must be done before job push as it could immediately
+ * signal and free.
+ */
+ dma_fence_get(&job->dep.drm.s_fence->finished);
+ drm_sched_entity_push_job(&job->dep.drm);
+
+ /* Let the upper layers fish this out */
+ xe_exec_queue_tlb_inval_last_fence_set(job->q, job->vm,
+ &job->dep.drm.s_fence->finished,
+ job->type);
+
+ xe_migrate_job_unlock(m, job->q);
+
+ /*
+ * Not using job->fence, as it has its own dma-fence context, which does
+ * not allow TLB invalidation fences on the same queue, GT tuple to
+ * be squashed in dma-resv/DRM scheduler. Instead, we use the DRM scheduler
+ * context and job's finished fence, which enables squashing.
+ */
+ return &job->dep.drm.s_fence->finished;
+}
+
+/**
+ * xe_tlb_inval_job_get() - Get a reference to TLB invalidation job
+ * @job: TLB invalidation job object
+ *
+ * Increment the TLB invalidation job's reference count
+ */
+void xe_tlb_inval_job_get(struct xe_tlb_inval_job *job)
+{
+ kref_get(&job->refcount);
+}
+
+/**
+ * xe_tlb_inval_job_put() - Put a reference to TLB invalidation job
+ * @job: TLB invalidation job object
+ *
+ * Decrement the TLB invalidation job's reference count, call
+ * xe_tlb_inval_job_destroy when reference count == 0. Skips decrement if
+ * input @job is NULL or IS_ERR.
+ */
+void xe_tlb_inval_job_put(struct xe_tlb_inval_job *job)
+{
+ if (!IS_ERR_OR_NULL(job))
+ kref_put(&job->refcount, xe_tlb_inval_job_destroy);
+}
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_job.h b/drivers/gpu/drm/xe/xe_tlb_inval_job.h
new file mode 100644
index 000000000000..4d6df1a6c6ca
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_job.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_JOB_H_
+#define _XE_TLB_INVAL_JOB_H_
+
+#include <linux/types.h>
+
+struct dma_fence;
+struct xe_dep_scheduler;
+struct xe_exec_queue;
+struct xe_migrate;
+struct xe_tlb_inval;
+struct xe_tlb_inval_job;
+struct xe_vm;
+
+struct xe_tlb_inval_job *
+xe_tlb_inval_job_create(struct xe_exec_queue *q, struct xe_tlb_inval *tlb_inval,
+ struct xe_dep_scheduler *dep_scheduler,
+ struct xe_vm *vm, u64 start, u64 end, int type);
+
+int xe_tlb_inval_job_alloc_dep(struct xe_tlb_inval_job *job);
+
+struct dma_fence *xe_tlb_inval_job_push(struct xe_tlb_inval_job *job,
+ struct xe_migrate *m,
+ struct dma_fence *fence);
+
+void xe_tlb_inval_job_get(struct xe_tlb_inval_job *job);
+
+void xe_tlb_inval_job_put(struct xe_tlb_inval_job *job);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tlb_inval_types.h b/drivers/gpu/drm/xe/xe_tlb_inval_types.h
new file mode 100644
index 000000000000..8f8b060e9005
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tlb_inval_types.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _XE_TLB_INVAL_TYPES_H_
+#define _XE_TLB_INVAL_TYPES_H_
+
+#include <linux/workqueue.h>
+#include <linux/dma-fence.h>
+
+struct xe_tlb_inval;
+
+/** struct xe_tlb_inval_ops - TLB invalidation ops (backend) */
+struct xe_tlb_inval_ops {
+ /**
+ * @all: Invalidate all TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*all)(struct xe_tlb_inval *tlb_inval, u32 seqno);
+
+ /**
+ * @ggtt: Invalidate global translation TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*ggtt)(struct xe_tlb_inval *tlb_inval, u32 seqno);
+
+ /**
+ * @ppgtt: Invalidate per-process translation TLBs
+ * @tlb_inval: TLB invalidation client
+ * @seqno: Seqno of TLB invalidation
+ * @start: Start address
+ * @end: End address
+ * @asid: Address space ID
+ *
+ * Return 0 on success, -ECANCELED if backend is mid-reset, error on
+ * failure
+ */
+ int (*ppgtt)(struct xe_tlb_inval *tlb_inval, u32 seqno, u64 start,
+ u64 end, u32 asid);
+
+ /**
+ * @initialized: Backend is initialized
+ * @tlb_inval: TLB invalidation client
+ *
+ * Return: True if back is initialized, False otherwise
+ */
+ bool (*initialized)(struct xe_tlb_inval *tlb_inval);
+
+ /**
+ * @flush: Flush pending TLB invalidations
+ * @tlb_inval: TLB invalidation client
+ */
+ void (*flush)(struct xe_tlb_inval *tlb_inval);
+
+ /**
+ * @timeout_delay: Timeout delay for TLB invalidation
+ * @tlb_inval: TLB invalidation client
+ *
+ * Return: Timeout delay for TLB invalidation in jiffies
+ */
+ long (*timeout_delay)(struct xe_tlb_inval *tlb_inval);
+};
+
+/** struct xe_tlb_inval - TLB invalidation client (frontend) */
+struct xe_tlb_inval {
+ /** @private: Backend private pointer */
+ void *private;
+ /** @xe: Pointer to Xe device */
+ struct xe_device *xe;
+ /** @ops: TLB invalidation ops */
+ const struct xe_tlb_inval_ops *ops;
+ /** @tlb_inval.seqno: TLB invalidation seqno, protected by CT lock */
+#define TLB_INVALIDATION_SEQNO_MAX 0x100000
+ int seqno;
+ /** @tlb_invalidation.seqno_lock: protects @tlb_invalidation.seqno */
+ struct mutex seqno_lock;
+ /**
+ * @seqno_recv: last received TLB invalidation seqno, protected by
+ * CT lock
+ */
+ int seqno_recv;
+ /**
+ * @pending_fences: list of pending fences waiting TLB invaliations,
+ * protected CT lock
+ */
+ struct list_head pending_fences;
+ /**
+ * @pending_lock: protects @pending_fences and updating @seqno_recv.
+ */
+ spinlock_t pending_lock;
+ /**
+ * @fence_tdr: schedules a delayed call to xe_tlb_fence_timeout after
+ * the timeout interval is over.
+ */
+ struct delayed_work fence_tdr;
+ /** @job_wq: schedules TLB invalidation jobs */
+ struct workqueue_struct *job_wq;
+ /** @tlb_inval.lock: protects TLB invalidation fences */
+ spinlock_t lock;
+};
+
+/**
+ * struct xe_tlb_inval_fence - TLB invalidation fence
+ *
+ * Optionally passed to xe_tlb_inval* functions and will be signaled upon TLB
+ * invalidation completion.
+ */
+struct xe_tlb_inval_fence {
+ /** @base: dma fence base */
+ struct dma_fence base;
+ /** @tlb_inval: TLB invalidation client which fence belong to */
+ struct xe_tlb_inval *tlb_inval;
+ /** @link: link into list of pending tlb fences */
+ struct list_head link;
+ /** @seqno: seqno of TLB invalidation to signal fence one */
+ int seqno;
+ /** @inval_time: time of TLB invalidation */
+ ktime_t inval_time;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index b4a3577df70c..79a97b086cb2 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -14,10 +14,10 @@
#include "xe_exec_queue_types.h"
#include "xe_gpu_scheduler_types.h"
-#include "xe_gt_tlb_invalidation_types.h"
#include "xe_gt_types.h"
#include "xe_guc_exec_queue_types.h"
#include "xe_sched_job.h"
+#include "xe_tlb_inval_types.h"
#include "xe_vm.h"
#define __dev_name_xe(xe) dev_name((xe)->drm.dev)
@@ -25,13 +25,13 @@
#define __dev_name_gt(gt) __dev_name_xe(gt_to_xe((gt)))
#define __dev_name_eq(q) __dev_name_gt((q)->gt)
-DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DECLARE_EVENT_CLASS(xe_tlb_inval_fence,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence),
TP_STRUCT__entry(
__string(dev, __dev_name_xe(xe))
- __field(struct xe_gt_tlb_invalidation_fence *, fence)
+ __field(struct xe_tlb_inval_fence *, fence)
__field(int, seqno)
),
@@ -45,39 +45,23 @@ DECLARE_EVENT_CLASS(xe_gt_tlb_invalidation_fence,
__get_str(dev), __entry->fence, __entry->seqno)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_create,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_send,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence,
- xe_gt_tlb_invalidation_fence_work_func,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_recv,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_cb,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_signal,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_send,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_recv,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_signal,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
- TP_ARGS(xe, fence)
-);
-
-DEFINE_EVENT(xe_gt_tlb_invalidation_fence, xe_gt_tlb_invalidation_fence_timeout,
- TP_PROTO(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence),
+DEFINE_EVENT(xe_tlb_inval_fence, xe_tlb_inval_fence_timeout,
+ TP_PROTO(struct xe_device *xe, struct xe_tlb_inval_fence *fence),
TP_ARGS(xe, fence)
);
@@ -457,6 +441,29 @@ TRACE_EVENT(xe_eu_stall_data_read,
__entry->read_size, __entry->total_size)
);
+TRACE_EVENT(xe_exec_queue_reach_max_job_count,
+ TP_PROTO(struct xe_exec_queue *q, int max_cnt),
+ TP_ARGS(q, max_cnt),
+
+ TP_STRUCT__entry(__string(dev, __dev_name_eq(q))
+ __field(enum xe_engine_class, class)
+ __field(u32, logical_mask)
+ __field(u16, guc_id)
+ __field(int, max_cnt)
+ ),
+
+ TP_fast_assign(__assign_str(dev);
+ __entry->class = q->class;
+ __entry->logical_mask = q->logical_mask;
+ __entry->guc_id = q->guc->id;
+ __entry->max_cnt = max_cnt;
+ ),
+
+ TP_printk("dev=%s, job count exceeded the maximum limit (%d) per exec queue. engine_class=0x%x, logical_mask=0x%x, guc_id=%d",
+ __get_str(dev), __entry->max_cnt,
+ __entry->class, __entry->logical_mask, __entry->guc_id)
+);
+
#endif
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index d9c9d2547aad..1bddecfb723a 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021-2023 Intel Corporation
- * Copyright (C) 2021-2002 Red Hat
+ * Copyright (C) 2021-2022 Red Hat
*/
#include <drm/drm_managed.h>
@@ -24,6 +24,7 @@
#include "xe_sriov.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram.h"
#include "xe_wa.h"
struct xe_ttm_stolen_mgr {
@@ -80,17 +81,18 @@ static u32 get_wopcm_size(struct xe_device *xe)
return wopcm_size;
}
-static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
+static u64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
- struct xe_tile *tile = xe_device_get_root_tile(xe);
+ struct xe_vram_region *tile_vram = xe_device_get_root_tile(xe)->mem.vram;
+ resource_size_t tile_io_start = xe_vram_region_io_start(tile_vram);
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
- tile_offset = tile->mem.vram.io_start - xe->mem.vram.io_start;
- tile_size = tile->mem.vram.actual_physical_size;
+ tile_offset = tile_io_start - xe_vram_region_io_start(xe->mem.vram);
+ tile_size = xe_vram_region_actual_physical_size(tile_vram);
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = (xe_mmio_read64_2x32(mmio, DSMBASE) & BDSM_MASK) - tile_offset;
@@ -103,11 +105,13 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
return 0;
stolen_size = tile_size - mgr->stolen_base;
+
+ xe_assert(xe, stolen_size >= wopcm_size);
stolen_size -= wopcm_size;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
- mgr->io_base = tile->mem.vram.io_start + mgr->stolen_base;
+ mgr->io_base = tile_io_start + mgr->stolen_base;
/*
* There may be few KB of platform dependent reserved memory at the end
@@ -164,7 +168,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr
stolen_size -= wopcm_size;
- if (media_gt && XE_WA(media_gt, 14019821291)) {
+ if (media_gt && XE_GT_WA(media_gt, 14019821291)) {
u64 gscpsmi_base = xe_mmio_read64_2x32(&media_gt->mmio, GSCPSMI_BASE)
& ~GENMASK_ULL(5, 0);
diff --git a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
index d38b91872da3..3e404eb8d098 100644
--- a/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_sys_mgr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021-2022 Intel Corporation
- * Copyright (C) 2021-2002 Red Hat
+ * Copyright (C) 2021-2022 Red Hat
*/
#include "xe_ttm_sys_mgr.h"
@@ -85,7 +85,7 @@ static const struct ttm_resource_manager_func xe_ttm_sys_mgr_func = {
.debug = xe_ttm_sys_mgr_debug
};
-static void ttm_sys_mgr_fini(struct drm_device *drm, void *arg)
+static void xe_ttm_sys_mgr_fini(struct drm_device *drm, void *arg)
{
struct xe_device *xe = (struct xe_device *)arg;
struct ttm_resource_manager *man = &xe->mem.sys_mgr;
@@ -116,5 +116,5 @@ int xe_ttm_sys_mgr_init(struct xe_device *xe)
ttm_resource_manager_init(man, &xe->ttm, gtt_size >> PAGE_SHIFT);
ttm_set_driver_manager(&xe->ttm, XE_PL_TT, man);
ttm_resource_manager_set_used(man, true);
- return drmm_add_action_or_reset(&xe->drm, ttm_sys_mgr_fini, xe);
+ return drmm_add_action_or_reset(&xe->drm, xe_ttm_sys_mgr_fini, xe);
}
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
index 9e375a40aee9..9f70802fce92 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021-2022 Intel Corporation
- * Copyright (C) 2021-2002 Red Hat
+ * Copyright (C) 2021-2022 Red Hat
*/
#include <drm/drm_managed.h>
@@ -15,6 +15,7 @@
#include "xe_gt.h"
#include "xe_res_cursor.h"
#include "xe_ttm_vram_mgr.h"
+#include "xe_vram_types.h"
static inline struct drm_buddy_block *
xe_ttm_vram_mgr_first_block(struct list_head *list)
@@ -283,7 +284,7 @@ static const struct ttm_resource_manager_func xe_ttm_vram_mgr_func = {
.debug = xe_ttm_vram_mgr_debug
};
-static void ttm_vram_mgr_fini(struct drm_device *dev, void *arg)
+static void xe_ttm_vram_mgr_fini(struct drm_device *dev, void *arg)
{
struct xe_device *xe = to_xe_device(dev);
struct xe_ttm_vram_mgr *mgr = arg;
@@ -334,16 +335,23 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
ttm_set_driver_manager(&xe->ttm, mem_type, &mgr->manager);
ttm_resource_manager_set_used(&mgr->manager, true);
- return drmm_add_action_or_reset(&xe->drm, ttm_vram_mgr_fini, mgr);
+ return drmm_add_action_or_reset(&xe->drm, xe_ttm_vram_mgr_fini, mgr);
}
-int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr)
+/**
+ * xe_ttm_vram_mgr_init - initialize TTM VRAM region
+ * @xe: pointer to Xe device
+ * @vram: pointer to xe_vram_region that contains the memory region attributes
+ *
+ * Initialize the Xe TTM for given @vram region using the given parameters.
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_vram_region *vram)
{
- struct xe_device *xe = tile_to_xe(tile);
- struct xe_vram_region *vram = &tile->mem.vram;
-
- return __xe_ttm_vram_mgr_init(xe, mgr, XE_PL_VRAM0 + tile->id,
- vram->usable_size, vram->io_size,
+ return __xe_ttm_vram_mgr_init(xe, &vram->ttm, vram->placement,
+ xe_vram_region_usable_size(vram),
+ xe_vram_region_io_size(vram),
PAGE_SIZE);
}
@@ -392,7 +400,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
*/
xe_res_first(res, offset, length, &cursor);
for_each_sgtable_sg((*sgt), sg, i) {
- phys_addr_t phys = cursor.start + tile->mem.vram.io_start;
+ phys_addr_t phys = cursor.start + xe_vram_region_io_start(tile->mem.vram);
size_t size = min_t(u64, cursor.size, SZ_2G);
dma_addr_t addr;
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
index cc76050e376d..87b7fae5edba 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.h
@@ -11,11 +11,12 @@
enum dma_data_direction;
struct xe_device;
struct xe_tile;
+struct xe_vram_region;
int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_ttm_vram_mgr *mgr,
u32 mem_type, u64 size, u64 io_size,
u64 default_page_size);
-int xe_ttm_vram_mgr_init(struct xe_tile *tile, struct xe_ttm_vram_mgr *mgr);
+int xe_ttm_vram_mgr_init(struct xe_device *xe, struct xe_vram_region *vram);
int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe,
struct ttm_resource *res,
u64 offset, u64 length,
diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
index 1144f9232ebb..a71e14818ec2 100644
--- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
+++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h
@@ -10,7 +10,7 @@
#include <drm/ttm/ttm_device.h>
/**
- * struct xe_ttm_vram_mgr - XE TTM VRAM manager
+ * struct xe_ttm_vram_mgr - Xe TTM VRAM manager
*
* Manages placement of TTM resource in VRAM.
*/
@@ -32,7 +32,7 @@ struct xe_ttm_vram_mgr {
};
/**
- * struct xe_ttm_vram_mgr_resource - XE TTM VRAM resource
+ * struct xe_ttm_vram_mgr_resource - Xe TTM VRAM resource
*/
struct xe_ttm_vram_mgr_resource {
/** @base: Base TTM resource */
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 828b45b24c23..5766fa7742d3 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -8,6 +8,7 @@
#include <kunit/visibility.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include "regs/xe_gt_regs.h"
#include "xe_gt_types.h"
@@ -40,7 +41,8 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)))
},
{ XE_RTP_NAME("Tuning: Compression Overfetch"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_has_flat_ccs)),
XE_RTP_ACTIONS(CLR(CCCHKNREG1, ENCOMPPERFFIX),
SET(CCCHKNREG1, L3CMPCTRL))
},
@@ -58,12 +60,14 @@ static const struct xe_rtp_entry_sr gt_tunings[] = {
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG3, COMPPWOVERFETCHEN))
},
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_has_flat_ccs)),
XE_RTP_ACTIONS(SET(L3SQCREG2,
COMPMEMRD256BOVRFETCHEN))
},
{ XE_RTP_NAME("Tuning: L2 Overfetch Compressible Only - media"),
- XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED)),
+ XE_RTP_RULES(MEDIA_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_has_flat_ccs)),
XE_RTP_ACTIONS(SET(XE2LPM_L3SQCREG2,
COMPMEMRD256BOVRFETCHEN))
},
@@ -99,7 +103,7 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
{ XE_RTP_NAME("Tuning: Disable NULL query for Anyhit Shader"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, XE_RTP_END_VERSION_UNDEFINED),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2000, XE_RTP_END_VERSION_UNDEFINED),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
},
@@ -214,7 +218,14 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe)
xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc);
}
-void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_tuning_dump() - Dump GT tuning info into a drm printer.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Return: always 0.
+ */
+int xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
{
size_t idx;
@@ -222,11 +233,15 @@ void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p)
for_each_set_bit(idx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings))
drm_printf_indent(p, 1, "%s\n", gt_tunings[idx].name);
- drm_printf(p, "\nEngine Tunings\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "Engine Tunings\n");
for_each_set_bit(idx, gt->tuning_active.engine, ARRAY_SIZE(engine_tunings))
drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name);
- drm_printf(p, "\nLRC Tunings\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "LRC Tunings\n");
for_each_set_bit(idx, gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings))
drm_printf_indent(p, 1, "%s\n", lrc_tunings[idx].name);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_tuning.h b/drivers/gpu/drm/xe/xe_tuning.h
index dd0d3ccc9c65..c1cc5927fda7 100644
--- a/drivers/gpu/drm/xe/xe_tuning.h
+++ b/drivers/gpu/drm/xe/xe_tuning.h
@@ -14,6 +14,6 @@ int xe_tuning_init(struct xe_gt *gt);
void xe_tuning_process_gt(struct xe_gt *gt);
void xe_tuning_process_engine(struct xe_hw_engine *hwe);
void xe_tuning_process_lrc(struct xe_hw_engine *hwe);
-void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 9bbdde604923..622b76078567 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -115,8 +115,8 @@ struct fw_blobs_by_type {
#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED
#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \
- fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 47, 0)) \
- fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \
+ fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 49, 4)) \
+ fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 49, 4)) \
fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \
fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \
fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \
@@ -328,7 +328,7 @@ static void uc_fw_fini(struct drm_device *drm, void *arg)
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_SELECTED);
}
-static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
+static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_guc_info *guc_info)
{
struct xe_gt *gt = uc_fw_to_gt(uc_fw);
struct xe_uc_fw_version *release = &uc_fw->versions.found[XE_UC_FW_VER_RELEASE];
@@ -343,11 +343,12 @@ static int guc_read_css_info(struct xe_uc_fw *uc_fw, struct uc_css_header *css)
return -EINVAL;
}
- compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->submission_version);
- compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->submission_version);
- compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->submission_version);
+ compatibility->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, guc_info->submission_version);
+ compatibility->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, guc_info->submission_version);
+ compatibility->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, guc_info->submission_version);
- uc_fw->private_data_size = css->private_data_size;
+ uc_fw->build_type = FIELD_GET(CSS_UKERNEL_INFO_BUILDTYPE, guc_info->ukernel_info);
+ uc_fw->private_data_size = guc_info->private_data_size;
return 0;
}
@@ -416,8 +417,8 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
css = (struct uc_css_header *)fw_data;
/* Check integrity of size values inside CSS header */
- size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
- css->exponent_size_dw) * sizeof(u32);
+ size = (css->header_size_dw - css->rsa_info.key_size_dw - css->rsa_info.modulus_size_dw -
+ css->rsa_info.exponent_size_dw) * sizeof(u32);
if (unlikely(size != sizeof(struct uc_css_header))) {
drm_warn(&xe->drm,
"%s firmware %s: unexpected header size: %zu != %zu\n",
@@ -430,7 +431,7 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
/* now RSA */
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+ uc_fw->rsa_size = css->rsa_info.key_size_dw * sizeof(u32);
/* At least, it should have header, uCode and RSA. Size of all three. */
size = sizeof(struct uc_css_header) + uc_fw->ucode_size +
@@ -443,12 +444,12 @@ static int parse_css_header(struct xe_uc_fw *uc_fw, const void *fw_data, size_t
}
/* Get version numbers from the CSS header */
- release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->sw_version);
- release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->sw_version);
- release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->sw_version);
+ release->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css->guc_info.sw_version);
+ release->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css->guc_info.sw_version);
+ release->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css->guc_info.sw_version);
if (uc_fw->type == XE_UC_FW_TYPE_GUC)
- return guc_read_css_info(uc_fw, css);
+ return guc_read_css_info(uc_fw, &css->guc_info);
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_abi.h b/drivers/gpu/drm/xe/xe_uc_fw_abi.h
index 87ade41209d0..3c9a63d13032 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_abi.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_abi.h
@@ -44,6 +44,39 @@
* in fw. So driver will load a truncated firmware in this case.
*/
+struct uc_css_rsa_info {
+ u32 key_size_dw;
+ u32 modulus_size_dw;
+ u32 exponent_size_dw;
+} __packed;
+
+struct uc_css_guc_info {
+ u32 time;
+#define CSS_TIME_HOUR (0xFF << 0)
+#define CSS_TIME_MIN (0xFF << 8)
+#define CSS_TIME_SEC (0xFFFF << 16)
+ u32 reserved0[5];
+ u32 sw_version;
+#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
+#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
+#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
+ u32 submission_version;
+ u32 reserved1[11];
+ u32 header_info;
+#define CSS_HEADER_INFO_SVN (0xFF)
+#define CSS_HEADER_INFO_COPY_VALID (0x1 << 31)
+ u32 private_data_size;
+ u32 ukernel_info;
+#define CSS_UKERNEL_INFO_DEVICEID (0xFFFF << 16)
+#define CSS_UKERNEL_INFO_PRODKEY (0xFF << 8)
+#define CSS_UKERNEL_INFO_BUILDTYPE (0x3 << 2)
+#define CSS_UKERNEL_INFO_BUILDTYPE_PROD 0
+#define CSS_UKERNEL_INFO_BUILDTYPE_PREPROD 1
+#define CSS_UKERNEL_INFO_BUILDTYPE_DEBUG 2
+#define CSS_UKERNEL_INFO_ENCSTATUS (0x1 << 1)
+#define CSS_UKERNEL_INFO_COPY_VALID (0x1 << 0)
+} __packed;
+
struct uc_css_header {
u32 module_type;
/*
@@ -52,36 +85,21 @@ struct uc_css_header {
*/
u32 header_size_dw;
u32 header_version;
- u32 module_id;
+ u32 reserved0;
u32 module_vendor;
u32 date;
-#define CSS_DATE_DAY (0xFF << 0)
-#define CSS_DATE_MONTH (0xFF << 8)
-#define CSS_DATE_YEAR (0xFFFF << 16)
+#define CSS_DATE_DAY (0xFF << 0)
+#define CSS_DATE_MONTH (0xFF << 8)
+#define CSS_DATE_YEAR (0xFFFF << 16)
u32 size_dw; /* uCode plus header_size_dw */
- u32 key_size_dw;
- u32 modulus_size_dw;
- u32 exponent_size_dw;
- u32 time;
-#define CSS_TIME_HOUR (0xFF << 0)
-#define CSS_DATE_MIN (0xFF << 8)
-#define CSS_DATE_SEC (0xFFFF << 16)
- char username[8];
- char buildnumber[12];
- u32 sw_version;
-#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
-#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
-#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
union {
- u32 submission_version; /* only applies to GuC */
- u32 reserved2;
+ u32 reserved1[3];
+ struct uc_css_rsa_info rsa_info;
};
- u32 reserved0[12];
union {
- u32 private_data_size; /* only applies to GuC */
- u32 reserved1;
+ u32 reserved2[22];
+ struct uc_css_guc_info guc_info;
};
- u32 header_info;
} __packed;
static_assert(sizeof(struct uc_css_header) == 128);
@@ -318,4 +336,70 @@ struct gsc_manifest_header {
u32 exponent_size; /* in dwords */
} __packed;
+/**
+ * DOC: Late binding Firmware Layout
+ *
+ * The Late binding binary starts with FPT header, which contains locations
+ * of various partitions of the binary. Here we're interested in finding out
+ * manifest version. To the manifest version, we need to locate CPD header
+ * one of the entry in CPD header points to manifest header. Manifest header
+ * contains the version.
+ *
+ * +================================================+
+ * | FPT Header |
+ * +================================================+
+ * | FPT entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | "LTES" |
+ * | ... |
+ * | offset >-----------------------------|------o
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | CPD Header |<-----o
+ * +================================================+
+ * | CPD entries[] |
+ * | entry1 |
+ * | ... |
+ * | entryX |
+ * | "LTES.man" |
+ * | ... |
+ * | offset >----------------------------|------o
+ * +================================================+ |
+ * |
+ * +================================================+ |
+ * | Manifest Header |<-----o
+ * | ... |
+ * | FW version |
+ * | ... |
+ * +================================================+
+ */
+
+/* FPT Headers */
+struct csc_fpt_header {
+ u32 header_marker;
+#define CSC_FPT_HEADER_MARKER 0x54504624
+ u32 num_of_entries;
+ u8 header_version;
+ u8 entry_version;
+ u8 header_length; /* in bytes */
+ u8 flags;
+ u16 ticks_to_add;
+ u16 tokens_to_add;
+ u32 uma_size;
+ u32 crc32;
+ struct gsc_version fitc_version;
+} __packed;
+
+struct csc_fpt_entry {
+ u8 name[4]; /* partition name */
+ u32 reserved1;
+ u32 offset; /* offset from beginning of CSE region */
+ u32 length; /* partition length in bytes */
+ u32 reserved2[3];
+ u32 partition_flags;
+} __packed;
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index 914026015019..2ebe8c9db6ce 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -62,7 +62,7 @@ enum xe_uc_fw_type {
};
/**
- * struct xe_uc_fw_version - Version for XE micro controller firmware
+ * struct xe_uc_fw_version - Version for Xe micro controller firmware
*/
struct xe_uc_fw_version {
/** @branch: branch version of the FW (not always available) */
@@ -84,7 +84,7 @@ enum xe_uc_fw_version_types {
};
/**
- * struct xe_uc_fw - XE micro controller firmware
+ * struct xe_uc_fw - Xe micro controller firmware
*/
struct xe_uc_fw {
/** @type: type uC firmware */
@@ -112,7 +112,7 @@ struct xe_uc_fw {
/** @size: size of uC firmware including css header */
size_t size;
- /** @bo: XE BO for uC firmware */
+ /** @bo: Xe BO for uC firmware */
struct xe_bo *bo;
/** @has_gsc_headers: whether the FW image starts with GSC headers */
@@ -147,6 +147,9 @@ struct xe_uc_fw {
/** @private_data_size: size of private data found in uC css header */
u32 private_data_size;
+
+ /** @build_type: Firmware build type (see CSS_UKERNEL_INFO_BUILDTYPE for definitions) */
+ u32 build_type;
};
#endif
diff --git a/drivers/gpu/drm/xe/xe_uc_types.h b/drivers/gpu/drm/xe/xe_uc_types.h
index 9924e4484866..1708379dc834 100644
--- a/drivers/gpu/drm/xe/xe_uc_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_types.h
@@ -12,7 +12,7 @@
#include "xe_wopcm_types.h"
/**
- * struct xe_uc - XE micro controllers
+ * struct xe_uc - Xe micro controllers
*/
struct xe_uc {
/** @guc: Graphics micro controller */
diff --git a/drivers/gpu/drm/xe/xe_userptr.c b/drivers/gpu/drm/xe/xe_userptr.c
new file mode 100644
index 000000000000..0d9130b1958a
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_userptr.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_svm.h"
+#include "xe_userptr.h"
+
+#include <linux/mm.h>
+
+#include "xe_trace_bo.h"
+
+/**
+ * xe_vma_userptr_check_repin() - Advisory check for repin needed
+ * @uvma: The userptr vma
+ *
+ * Check if the userptr vma has been invalidated since last successful
+ * repin. The check is advisory only and can the function can be called
+ * without the vm->svm.gpusvm.notifier_lock held. There is no guarantee that the
+ * vma userptr will remain valid after a lockless check, so typically
+ * the call needs to be followed by a proper check under the notifier_lock.
+ *
+ * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
+ */
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
+{
+ return mmu_interval_check_retry(&uvma->userptr.notifier,
+ uvma->userptr.pages.notifier_seq) ?
+ -EAGAIN : 0;
+}
+
+/**
+ * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
+ * that need repinning.
+ * @vm: The VM.
+ *
+ * This function checks for whether the VM has userptrs that need repinning,
+ * and provides a release-type barrier on the svm.gpusvm.notifier_lock after
+ * checking.
+ *
+ * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
+ */
+int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
+{
+ lockdep_assert_held_read(&vm->svm.gpusvm.notifier_lock);
+
+ return (list_empty(&vm->userptr.repin_list) &&
+ list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
+}
+
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
+{
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
+ struct drm_gpusvm_ctx ctx = {
+ .read_only = xe_vma_read_only(vma),
+ .device_private_page_owner = xe_svm_devm_owner(xe),
+ .allow_mixed = true,
+ };
+
+ lockdep_assert_held(&vm->lock);
+ xe_assert(xe, xe_vma_is_userptr(vma));
+
+ if (vma->gpuva.flags & XE_VMA_DESTROYED)
+ return 0;
+
+ return drm_gpusvm_get_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ uvma->userptr.notifier.mm,
+ &uvma->userptr.notifier,
+ xe_vma_userptr(vma),
+ xe_vma_userptr(vma) + xe_vma_size(vma),
+ &ctx);
+}
+
+static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ struct drm_gpusvm_ctx ctx = {
+ .in_notifier = true,
+ .read_only = xe_vma_read_only(vma),
+ };
+ long err;
+
+ /*
+ * Tell exec and rebind worker they need to repin and rebind this
+ * userptr.
+ */
+ if (!xe_vm_in_fault_mode(vm) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_move_tail(&userptr->invalidate_link,
+ &vm->userptr.invalidated);
+ spin_unlock(&vm->userptr.invalidated_lock);
+ }
+
+ /*
+ * Preempt fences turn into schedule disables, pipeline these.
+ * Note that even in fault mode, we need to wait for binds and
+ * unbinds to complete, and those are attached as BOOKMARK fences
+ * to the vm.
+ */
+ dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
+ dma_resv_iter_end(&cursor);
+
+ err = dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ XE_WARN_ON(err <= 0);
+
+ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
+ err = xe_vm_invalidate_vma(vma);
+ XE_WARN_ON(err);
+ }
+
+ drm_gpusvm_unmap_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ xe_vma_size(vma) >> PAGE_SHIFT, &ctx);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
+ down_write(&vm->svm.gpusvm.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ __vma_userptr_invalidate(vm, uvma);
+ up_write(&vm->svm.gpusvm.notifier_lock);
+ trace_xe_vma_userptr_invalidate_complete(vma);
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
+ .invalidate = vma_userptr_invalidate,
+};
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+/**
+ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
+ * @uvma: The userptr vma to invalidate
+ *
+ * Perform a forced userptr invalidation for testing purposes.
+ */
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ /* Protect against concurrent userptr pinning */
+ lockdep_assert_held(&vm->lock);
+ /* Protect against concurrent notifiers */
+ lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
+ /*
+ * Protect against concurrent instances of this function and
+ * the critical exec sections
+ */
+ xe_vm_assert_held(vm);
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ uvma->userptr.pages.notifier_seq))
+ uvma->userptr.pages.notifier_seq -= 2;
+ __vma_userptr_invalidate(vm, uvma);
+}
+#endif
+
+int xe_vm_userptr_pin(struct xe_vm *vm)
+{
+ struct xe_userptr_vma *uvma, *next;
+ int err = 0;
+
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
+ lockdep_assert_held_write(&vm->lock);
+
+ /* Collect invalidated userptrs */
+ spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
+ list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
+ userptr.invalidate_link) {
+ list_del_init(&uvma->userptr.invalidate_link);
+ list_add_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+
+ /* Pin and move to bind list */
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ err = xe_vma_userptr_pin_pages(uvma);
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
+ /*
+ * We might have already done the pin once already, but
+ * then had to retry before the re-bind happened, due
+ * some other condition in the caller, but in the
+ * meantime the userptr got dinged by the notifier such
+ * that we need to revalidate here, but this time we hit
+ * the EFAULT. In such a case make sure we remove
+ * ourselves from the rebind list to avoid going down in
+ * flames.
+ */
+ if (!list_empty(&uvma->vma.combined_links.rebind))
+ list_del_init(&uvma->vma.combined_links.rebind);
+
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ down_read(&vm->svm.gpusvm.notifier_lock);
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ up_read(&vm->svm.gpusvm.notifier_lock);
+ xe_vm_unlock(vm);
+ if (err)
+ break;
+ } else {
+ if (err)
+ break;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
+ }
+
+ if (err) {
+ down_write(&vm->svm.gpusvm.notifier_lock);
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+ up_write(&vm->svm.gpusvm.notifier_lock);
+ }
+ return err;
+}
+
+/**
+ * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
+ * that need repinning.
+ * @vm: The VM.
+ *
+ * This function does an advisory check for whether the VM has userptrs that
+ * need repinning.
+ *
+ * Return: 0 if there are no indications of userptrs needing repinning,
+ * -EAGAIN if there are.
+ */
+int xe_vm_userptr_check_repin(struct xe_vm *vm)
+{
+ return (list_empty_careful(&vm->userptr.repin_list) &&
+ list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
+}
+
+int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
+ unsigned long range)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ int err;
+
+ INIT_LIST_HEAD(&userptr->invalidate_link);
+ INIT_LIST_HEAD(&userptr->repin_link);
+
+ err = mmu_interval_notifier_insert(&userptr->notifier, current->mm,
+ start, range,
+ &vma_userptr_notifier_ops);
+ if (err)
+ return err;
+
+ userptr->pages.notifier_seq = LONG_MAX;
+
+ return 0;
+}
+
+void xe_userptr_remove(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+ struct xe_userptr *userptr = &uvma->userptr;
+
+ drm_gpusvm_free_pages(&vm->svm.gpusvm, &uvma->userptr.pages,
+ xe_vma_size(&uvma->vma) >> PAGE_SHIFT);
+
+ /*
+ * Since userptr pages are not pinned, we can't remove
+ * the notifier until we're sure the GPU is not accessing
+ * them anymore
+ */
+ mmu_interval_notifier_remove(&userptr->notifier);
+}
+
+void xe_userptr_destroy(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&uvma->userptr.repin_link));
+ list_del(&uvma->userptr.invalidate_link);
+ spin_unlock(&vm->userptr.invalidated_lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_userptr.h b/drivers/gpu/drm/xe/xe_userptr.h
new file mode 100644
index 000000000000..ef801234991e
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_userptr.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_USERPTR_H_
+#define _XE_USERPTR_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_gpusvm.h>
+
+struct xe_vm;
+struct xe_vma;
+struct xe_userptr_vma;
+
+/** struct xe_userptr_vm - User pointer VM level state */
+struct xe_userptr_vm {
+ /**
+ * @userptr.repin_list: list of VMAs which are user pointers,
+ * and needs repinning. Protected by @lock.
+ */
+ struct list_head repin_list;
+ /**
+ * @userptr.invalidated_lock: Protects the
+ * @userptr.invalidated list.
+ */
+ spinlock_t invalidated_lock;
+ /**
+ * @userptr.invalidated: List of invalidated userptrs, not yet
+ * picked
+ * up for revalidation. Protected from access with the
+ * @invalidated_lock. Removing items from the list
+ * additionally requires @lock in write mode, and adding
+ * items to the list requires either the @svm.gpusvm.notifier_lock in
+ * write mode, OR @lock in write mode.
+ */
+ struct list_head invalidated;
+};
+
+/** struct xe_userptr - User pointer */
+struct xe_userptr {
+ /** @invalidate_link: Link for the vm::userptr.invalidated list */
+ struct list_head invalidate_link;
+ /** @userptr: link into VM repin list if userptr. */
+ struct list_head repin_link;
+ /**
+ * @pages: gpusvm pages for this user pointer.
+ */
+ struct drm_gpusvm_pages pages;
+ /**
+ * @notifier: MMU notifier for user pointer (invalidation call back)
+ */
+ struct mmu_interval_notifier notifier;
+
+ /**
+ * @initial_bind: user pointer has been bound at least once.
+ * write: vm->svm.gpusvm.notifier_lock in read mode and vm->resv held.
+ * read: vm->svm.gpusvm.notifier_lock in write mode or vm->resv held.
+ */
+ bool initial_bind;
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+ u32 divisor;
+#endif
+};
+
+#if IS_ENABLED(CONFIG_DRM_GPUSVM)
+void xe_userptr_remove(struct xe_userptr_vma *uvma);
+int xe_userptr_setup(struct xe_userptr_vma *uvma, unsigned long start,
+ unsigned long range);
+void xe_userptr_destroy(struct xe_userptr_vma *uvma);
+
+int xe_vm_userptr_pin(struct xe_vm *vm);
+int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
+int xe_vm_userptr_check_repin(struct xe_vm *vm);
+int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
+int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_userptr_remove(struct xe_userptr_vma *uvma) {}
+
+static inline int xe_userptr_setup(struct xe_userptr_vma *uvma,
+ unsigned long start, unsigned long range)
+{
+ return -ENODEV;
+}
+
+static inline void xe_userptr_destroy(struct xe_userptr_vma *uvma) {}
+
+static inline int xe_vm_userptr_pin(struct xe_vm *vm) { return 0; }
+static inline int __xe_vm_userptr_needs_repin(struct xe_vm *vm) { return 0; }
+static inline int xe_vm_userptr_check_repin(struct xe_vm *vm) { return 0; }
+static inline int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) { return -ENODEV; }
+static inline int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) { return -ENODEV; };
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_validation.c b/drivers/gpu/drm/xe/xe_validation.c
new file mode 100644
index 000000000000..826cd09966ef
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_validation.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#include "xe_bo.h"
+#include <drm/drm_exec.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gpuvm.h>
+
+#include "xe_assert.h"
+#include "xe_validation.h"
+
+#ifdef CONFIG_DRM_XE_DEBUG
+/**
+ * xe_validation_assert_exec() - Assert that the drm_exec pointer is suitable
+ * for validation.
+ * @xe: Pointer to the xe device.
+ * @exec: The drm_exec pointer to check.
+ * @obj: Pointer to the object subject to validation.
+ *
+ * NULL exec pointers are not allowed.
+ * For XE_VALIDATION_UNIMPLEMENTED, no checking.
+ * For XE_VLIDATION_OPT_OUT, check that the caller is a kunit test
+ * For XE_VALIDATION_UNSUPPORTED, check that the object subject to
+ * validation is a dma-buf, for which support for ww locking is
+ * not in place in the dma-buf layer.
+ */
+void xe_validation_assert_exec(const struct xe_device *xe,
+ const struct drm_exec *exec,
+ const struct drm_gem_object *obj)
+{
+ xe_assert(xe, exec);
+ if (IS_ERR(exec)) {
+ switch (PTR_ERR(exec)) {
+ case __XE_VAL_UNIMPLEMENTED:
+ break;
+ case __XE_VAL_UNSUPPORTED:
+ xe_assert(xe, !!obj->dma_buf);
+ break;
+#if IS_ENABLED(CONFIG_KUNIT)
+ case __XE_VAL_OPT_OUT:
+ xe_assert(xe, current->kunit_test);
+ break;
+#endif
+ default:
+ xe_assert(xe, false);
+ }
+ }
+}
+#endif
+
+static int xe_validation_lock(struct xe_validation_ctx *ctx)
+{
+ struct xe_validation_device *val = ctx->val;
+ int ret = 0;
+
+ if (ctx->val_flags.interruptible) {
+ if (ctx->request_exclusive)
+ ret = down_write_killable(&val->lock);
+ else
+ ret = down_read_interruptible(&val->lock);
+ } else {
+ if (ctx->request_exclusive)
+ down_write(&val->lock);
+ else
+ down_read(&val->lock);
+ }
+
+ if (!ret) {
+ ctx->lock_held = true;
+ ctx->lock_held_exclusive = ctx->request_exclusive;
+ }
+
+ return ret;
+}
+
+static int xe_validation_trylock(struct xe_validation_ctx *ctx)
+{
+ struct xe_validation_device *val = ctx->val;
+ bool locked;
+
+ if (ctx->request_exclusive)
+ locked = down_write_trylock(&val->lock);
+ else
+ locked = down_read_trylock(&val->lock);
+
+ if (locked) {
+ ctx->lock_held = true;
+ ctx->lock_held_exclusive = ctx->request_exclusive;
+ }
+
+ return locked ? 0 : -EWOULDBLOCK;
+}
+
+static void xe_validation_unlock(struct xe_validation_ctx *ctx)
+{
+ if (!ctx->lock_held)
+ return;
+
+ if (ctx->lock_held_exclusive)
+ up_write(&ctx->val->lock);
+ else
+ up_read(&ctx->val->lock);
+
+ ctx->lock_held = false;
+}
+
+/**
+ * xe_validation_ctx_init() - Initialize an xe_validation_ctx
+ * @ctx: The xe_validation_ctx to initialize.
+ * @val: The xe_validation_device representing the validation domain.
+ * @exec: The struct drm_exec to use for the transaction. May be NULL.
+ * @flags: The flags to use for initialization.
+ *
+ * Initialize and lock a an xe_validation transaction using the validation domain
+ * represented by @val. Also initialize the drm_exec object forwarding parts of
+ * @flags to the drm_exec initialization. The @flags.exclusive flag should
+ * typically be set to false to avoid locking out other validators from the
+ * domain until an OOM is hit. For testing- or final attempt purposes it can,
+ * however, be set to true.
+ *
+ * Return: %0 on success, %-EINTR if interruptible initial locking failed with a
+ * signal pending. If @flags.no_block is set to true, a failed trylock
+ * returns %-EWOULDBLOCK.
+ */
+int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val,
+ struct drm_exec *exec, const struct xe_val_flags flags)
+{
+ int ret;
+
+ ctx->exec = exec;
+ ctx->val = val;
+ ctx->lock_held = false;
+ ctx->lock_held_exclusive = false;
+ ctx->request_exclusive = flags.exclusive;
+ ctx->val_flags = flags;
+ ctx->exec_flags = 0;
+ ctx->nr = 0;
+
+ if (flags.no_block)
+ ret = xe_validation_trylock(ctx);
+ else
+ ret = xe_validation_lock(ctx);
+ if (ret)
+ return ret;
+
+ if (exec) {
+ if (flags.interruptible)
+ ctx->exec_flags |= DRM_EXEC_INTERRUPTIBLE_WAIT;
+ if (flags.exec_ignore_duplicates)
+ ctx->exec_flags |= DRM_EXEC_IGNORE_DUPLICATES;
+ drm_exec_init(exec, ctx->exec_flags, ctx->nr);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+/*
+ * This abuses both drm_exec and ww_mutex internals and should be
+ * replaced by checking for -EDEADLK when we can make TTM
+ * stop converting -EDEADLK to -ENOMEM.
+ * An alternative is to not have exhaustive eviction with
+ * CONFIG_DEBUG_WW_MUTEX_SLOWPATH until that happens.
+ */
+static bool xe_validation_contention_injected(struct drm_exec *exec)
+{
+ return !!exec->ticket.contending_lock;
+}
+
+#else
+
+static bool xe_validation_contention_injected(struct drm_exec *exec)
+{
+ return false;
+}
+
+#endif
+
+static bool __xe_validation_should_retry(struct xe_validation_ctx *ctx, int ret)
+{
+ if (ret == -ENOMEM &&
+ ((ctx->request_exclusive &&
+ xe_validation_contention_injected(ctx->exec)) ||
+ !ctx->request_exclusive)) {
+ ctx->request_exclusive = true;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * xe_validation_exec_lock() - Perform drm_gpuvm_exec_lock within a validation
+ * transaction.
+ * @ctx: An uninitialized xe_validation_ctx.
+ * @vm_exec: An initialized struct vm_exec.
+ * @val: The validation domain.
+ *
+ * The drm_gpuvm_exec_lock() function internally initializes its drm_exec
+ * transaction and therefore doesn't lend itself very well to be using
+ * xe_validation_ctx_init(). Provide a helper that takes an uninitialized
+ * xe_validation_ctx and calls drm_gpuvm_exec_lock() with OOM retry.
+ *
+ * Return: %0 on success, negative error code on failure.
+ */
+int xe_validation_exec_lock(struct xe_validation_ctx *ctx,
+ struct drm_gpuvm_exec *vm_exec,
+ struct xe_validation_device *val)
+{
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->exec = &vm_exec->exec;
+ ctx->exec_flags = vm_exec->flags;
+ ctx->val = val;
+ if (ctx->exec_flags & DRM_EXEC_INTERRUPTIBLE_WAIT)
+ ctx->val_flags.interruptible = 1;
+ if (ctx->exec_flags & DRM_EXEC_IGNORE_DUPLICATES)
+ ctx->val_flags.exec_ignore_duplicates = 1;
+retry:
+ ret = xe_validation_lock(ctx);
+ if (ret)
+ return ret;
+
+ ret = drm_gpuvm_exec_lock(vm_exec);
+ if (ret) {
+ xe_validation_unlock(ctx);
+ if (__xe_validation_should_retry(ctx, ret))
+ goto retry;
+ }
+
+ return ret;
+}
+
+/**
+ * xe_validation_ctx_fini() - Finalize a validation transaction
+ * @ctx: The Validation transaction to finalize.
+ *
+ * Finalize a validation transaction and its related drm_exec transaction.
+ */
+void xe_validation_ctx_fini(struct xe_validation_ctx *ctx)
+{
+ if (ctx->exec)
+ drm_exec_fini(ctx->exec);
+ xe_validation_unlock(ctx);
+}
+
+/**
+ * xe_validation_should_retry() - Determine if a validation transaction should retry
+ * @ctx: The validation transaction.
+ * @ret: Pointer to a return value variable.
+ *
+ * Determines whether a validation transaction should retry based on the
+ * internal transaction state and the return value pointed to by @ret.
+ * If a validation should be retried, the transaction is prepared for that,
+ * and the validation locked might be re-locked in exclusive mode, and *@ret
+ * is set to %0. If the re-locking errors, typically due to interruptible
+ * locking with signal pending, *@ret is instead set to -EINTR and the
+ * function returns %false.
+ *
+ * Return: %true if validation should be retried, %false otherwise.
+ */
+bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret)
+{
+ if (__xe_validation_should_retry(ctx, *ret)) {
+ drm_exec_fini(ctx->exec);
+ *ret = 0;
+ if (ctx->request_exclusive != ctx->lock_held_exclusive) {
+ xe_validation_unlock(ctx);
+ *ret = xe_validation_lock(ctx);
+ }
+ drm_exec_init(ctx->exec, ctx->exec_flags, ctx->nr);
+ return !*ret;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/xe/xe_validation.h b/drivers/gpu/drm/xe/xe_validation.h
new file mode 100644
index 000000000000..a30e732c4d51
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_validation.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+#ifndef _XE_VALIDATION_H_
+#define _XE_VALIDATION_H_
+
+#include <linux/dma-resv.h>
+#include <linux/types.h>
+#include <linux/rwsem.h>
+
+struct drm_exec;
+struct drm_gem_object;
+struct drm_gpuvm_exec;
+struct xe_device;
+
+#ifdef CONFIG_PROVE_LOCKING
+/**
+ * xe_validation_lockdep() - Assert that a drm_exec locking transaction can
+ * be initialized at this point.
+ */
+static inline void xe_validation_lockdep(void)
+{
+ struct ww_acquire_ctx ticket;
+
+ ww_acquire_init(&ticket, &reservation_ww_class);
+ ww_acquire_fini(&ticket);
+}
+#else
+static inline void xe_validation_lockdep(void)
+{
+}
+#endif
+
+/*
+ * Various values of the drm_exec pointer where we've not (yet)
+ * implemented full ww locking.
+ *
+ * XE_VALIDATION_UNIMPLEMENTED means implementation is pending.
+ * A lockdep check is made to assure that a drm_exec locking
+ * transaction can actually take place where the macro is
+ * used. If this asserts, the exec pointer needs to be assigned
+ * higher up in the callchain and passed down.
+ *
+ * XE_VALIDATION_UNSUPPORTED is for dma-buf code only where
+ * the dma-buf layer doesn't support WW locking.
+ *
+ * XE_VALIDATION_OPT_OUT is for simplification of kunit tests where
+ * exhaustive eviction isn't necessary.
+ */
+#define __XE_VAL_UNIMPLEMENTED -EINVAL
+#define XE_VALIDATION_UNIMPLEMENTED (xe_validation_lockdep(), \
+ (struct drm_exec *)ERR_PTR(__XE_VAL_UNIMPLEMENTED))
+
+#define __XE_VAL_UNSUPPORTED -EOPNOTSUPP
+#define XE_VALIDATION_UNSUPPORTED ((struct drm_exec *)ERR_PTR(__XE_VAL_UNSUPPORTED))
+
+#define __XE_VAL_OPT_OUT -ENOMEM
+#define XE_VALIDATION_OPT_OUT (xe_validation_lockdep(), \
+ (struct drm_exec *)ERR_PTR(__XE_VAL_OPT_OUT))
+#ifdef CONFIG_DRM_XE_DEBUG
+void xe_validation_assert_exec(const struct xe_device *xe, const struct drm_exec *exec,
+ const struct drm_gem_object *obj);
+#else
+#define xe_validation_assert_exec(_xe, _exec, _obj) \
+ do { \
+ (void)_xe; (void)_exec; (void)_obj; \
+ } while (0)
+#endif
+
+/**
+ * struct xe_validation_device - The domain for exhaustive eviction
+ * @lock: The lock used to exclude other processes from allocating graphics memory
+ *
+ * The struct xe_validation_device represents the domain for which we want to use
+ * exhaustive eviction. The @lock is typically grabbed in read mode for allocations
+ * but when graphics memory allocation fails, it is retried with the write mode held.
+ */
+struct xe_validation_device {
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct xe_val_flags - Flags for xe_validation_ctx_init().
+ * @exclusive: Start the validation transaction by locking out all other validators.
+ * @no_block: Don't block on initialization.
+ * @interruptible: Block interruptible if blocking. Implies initializing the drm_exec
+ * context with the DRM_EXEC_INTERRUPTIBLE_WAIT flag.
+ * @exec_ignore_duplicates: Initialize the drm_exec context with the
+ * DRM_EXEC_IGNORE_DUPLICATES flag.
+ */
+struct xe_val_flags {
+ u32 exclusive :1;
+ u32 no_block :1;
+ u32 interruptible :1;
+ u32 exec_ignore_duplicates :1;
+};
+
+/**
+ * struct xe_validation_ctx - A struct drm_exec subclass with support for
+ * exhaustive eviction
+ * @exec: The drm_exec object base class. Note that we use a pointer instead of
+ * embedding to avoid diamond inheritance.
+ * @val: The exhaustive eviction domain.
+ * @val_flags: Copy of the struct xe_val_flags passed to xe_validation_ctx_init.
+ * @lock_held: Whether The domain lock is currently held.
+ * @lock_held_exclusive: Whether the domain lock is held in exclusive mode.
+ * @request_exclusive: Whether to lock exclusively (write mode) the next time
+ * the domain lock is locked.
+ * @exec_flags: The drm_exec flags used for drm_exec (re-)initialization.
+ * @nr: The drm_exec nr parameter used for drm_exec (re-)initialization.
+ */
+struct xe_validation_ctx {
+ struct drm_exec *exec;
+ struct xe_validation_device *val;
+ struct xe_val_flags val_flags;
+ bool lock_held;
+ bool lock_held_exclusive;
+ bool request_exclusive;
+ u32 exec_flags;
+ unsigned int nr;
+};
+
+int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val,
+ struct drm_exec *exec, const struct xe_val_flags flags);
+
+int xe_validation_exec_lock(struct xe_validation_ctx *ctx, struct drm_gpuvm_exec *vm_exec,
+ struct xe_validation_device *val);
+
+void xe_validation_ctx_fini(struct xe_validation_ctx *ctx);
+
+bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret);
+
+/**
+ * xe_validation_retry_on_oom() - Retry on oom in an xe_validaton transaction
+ * @_ctx: Pointer to the xe_validation_ctx
+ * @_ret: The current error value possibly holding -ENOMEM
+ *
+ * Use this in way similar to drm_exec_retry_on_contention().
+ * If @_ret contains -ENOMEM the transaction is restarted once in a way that
+ * blocks other transactions and allows exhastive eviction. If the transaction
+ * was already restarted once, Just return the -ENOMEM. May also set
+ * _ret to -EINTR if not retrying and waits are interruptible.
+ * May only be used within a drm_exec_until_all_locked() loop.
+ */
+#define xe_validation_retry_on_oom(_ctx, _ret) \
+ do { \
+ if (xe_validation_should_retry(_ctx, _ret)) \
+ goto *__drm_exec_retry_ptr; \
+ } while (0)
+
+/**
+ * xe_validation_device_init - Initialize a struct xe_validation_device
+ * @val: The xe_validation_device to init.
+ */
+static inline void
+xe_validation_device_init(struct xe_validation_device *val)
+{
+ init_rwsem(&val->lock);
+}
+
+/*
+ * Make guard() and scoped_guard() work with xe_validation_ctx
+ * so that we can exit transactions without caring about the
+ * cleanup.
+ */
+DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,
+ if (_T) xe_validation_ctx_fini(_T);,
+ ({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
+ *_ret ? NULL : _ctx; }),
+ struct xe_validation_ctx *_ctx, struct xe_validation_device *_val,
+ struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret);
+static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
+{return *_T; }
+#define class_xe_validation_is_conditional true
+
+/**
+ * xe_validation_guard() - An auto-cleanup xe_validation_ctx transaction
+ * @_ctx: The xe_validation_ctx.
+ * @_val: The xe_validation_device.
+ * @_exec: The struct drm_exec object
+ * @_flags: Flags for the xe_validation_ctx initialization.
+ * @_ret: Return in / out parameter. May be set by this macro. Typically 0 when called.
+ *
+ * This macro is will initiate a drm_exec transaction with additional support for
+ * exhaustive eviction.
+ */
+#define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \
+ scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \
+ drm_exec_until_all_locked(_exec)
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index dc4f61e56579..7cac646bdf1c 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -27,8 +27,6 @@
#include "xe_device.h"
#include "xe_drm_client.h"
#include "xe_exec_queue.h"
-#include "xe_gt_pagefault.h"
-#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_pat.h"
#include "xe_pm.h"
@@ -36,11 +34,13 @@
#include "xe_pt.h"
#include "xe_pxp.h"
#include "xe_res_cursor.h"
+#include "xe_sriov_vf.h"
#include "xe_svm.h"
#include "xe_sync.h"
+#include "xe_tile.h"
+#include "xe_tlb_inval.h"
#include "xe_trace_bo.h"
#include "xe_wa.h"
-#include "xe_hmm.h"
static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
{
@@ -48,34 +48,17 @@ static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm)
}
/**
- * xe_vma_userptr_check_repin() - Advisory check for repin needed
- * @uvma: The userptr vma
+ * xe_vm_drm_exec_lock() - Lock the vm's resv with a drm_exec transaction
+ * @vm: The vm whose resv is to be locked.
+ * @exec: The drm_exec transaction.
*
- * Check if the userptr vma has been invalidated since last successful
- * repin. The check is advisory only and can the function can be called
- * without the vm->userptr.notifier_lock held. There is no guarantee that the
- * vma userptr will remain valid after a lockless check, so typically
- * the call needs to be followed by a proper check under the notifier_lock.
+ * Helper to lock the vm's resv as part of a drm_exec transaction.
*
- * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended.
+ * Return: %0 on success. See drm_exec_lock_obj() for error codes.
*/
-int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma)
+int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec)
{
- return mmu_interval_check_retry(&uvma->userptr.notifier,
- uvma->userptr.notifier_seq) ?
- -EAGAIN : 0;
-}
-
-int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma)
-{
- struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
-
- lockdep_assert_held(&vm->lock);
- xe_assert(xe, xe_vma_is_userptr(vma));
-
- return xe_hmm_userptr_populate_range(uvma, false);
+ return drm_exec_lock_obj(exec, xe_vm_obj(vm));
}
static bool preempt_fences_waiting(struct xe_vm *vm)
@@ -128,12 +111,22 @@ static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list,
static int wait_for_existing_preempt_fences(struct xe_vm *vm)
{
struct xe_exec_queue *q;
+ bool vf_migration = IS_SRIOV_VF(vm->xe) &&
+ xe_sriov_vf_migration_supported(vm->xe);
+ signed long wait_time = vf_migration ? HZ / 5 : MAX_SCHEDULE_TIMEOUT;
xe_vm_assert_held(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
if (q->lr.pfence) {
- long timeout = dma_fence_wait(q->lr.pfence, false);
+ long timeout;
+
+ timeout = dma_fence_wait_timeout(q->lr.pfence, false,
+ wait_time);
+ if (!timeout) {
+ xe_assert(vm->xe, vf_migration);
+ return -EAGAIN;
+ }
/* Only -ETIME on fence indicates VM needs to be killed */
if (timeout < 0 || q->lr.pfence->error == -ETIME)
@@ -227,6 +220,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
.num_fences = 1,
};
struct drm_exec *exec = &vm_exec.exec;
+ struct xe_validation_ctx ctx;
struct dma_fence *pfence;
int err;
bool wait;
@@ -234,14 +228,14 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
down_write(&vm->lock);
- err = drm_gpuvm_exec_lock(&vm_exec);
+ err = xe_validation_exec_lock(&ctx, &vm_exec, &vm->xe->val);
if (err)
goto out_up_write;
pfence = xe_preempt_fence_create(q, q->lr.context,
++q->lr.seqno);
- if (!pfence) {
- err = -ENOMEM;
+ if (IS_ERR(pfence)) {
+ err = PTR_ERR(pfence);
goto out_fini;
}
@@ -249,7 +243,7 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
++vm->preempt.num_exec_queues;
q->lr.pfence = pfence;
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
@@ -263,10 +257,10 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
if (wait)
dma_fence_enable_sw_signaling(pfence);
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
out_fini:
- drm_exec_fini(exec);
+ xe_validation_ctx_fini(&ctx);
out_up_write:
up_write(&vm->lock);
@@ -299,25 +293,6 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
up_write(&vm->lock);
}
-/**
- * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs
- * that need repinning.
- * @vm: The VM.
- *
- * This function checks for whether the VM has userptrs that need repinning,
- * and provides a release-type barrier on the userptr.notifier_lock after
- * checking.
- *
- * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are.
- */
-int __xe_vm_userptr_needs_repin(struct xe_vm *vm)
-{
- lockdep_assert_held_read(&vm->userptr.notifier_lock);
-
- return (list_empty(&vm->userptr.repin_list) &&
- list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
-}
-
#define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000
/**
@@ -349,39 +324,6 @@ void xe_vm_kill(struct xe_vm *vm, bool unlocked)
/* TODO: Inform user the VM is banned */
}
-/**
- * xe_vm_validate_should_retry() - Whether to retry after a validate error.
- * @exec: The drm_exec object used for locking before validation.
- * @err: The error returned from ttm_bo_validate().
- * @end: A ktime_t cookie that should be set to 0 before first use and
- * that should be reused on subsequent calls.
- *
- * With multiple active VMs, under memory pressure, it is possible that
- * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM.
- * Until ttm properly handles locking in such scenarios, best thing the
- * driver can do is retry with a timeout. Check if that is necessary, and
- * if so unlock the drm_exec's objects while keeping the ticket to prepare
- * for a rerun.
- *
- * Return: true if a retry after drm_exec_init() is recommended;
- * false otherwise.
- */
-bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end)
-{
- ktime_t cur;
-
- if (err != -ENOMEM)
- return false;
-
- cur = ktime_get();
- *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS);
- if (!ktime_before(cur, *end))
- return false;
-
- msleep(20);
- return true;
-}
-
static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
{
struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm);
@@ -396,7 +338,7 @@ static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
if (!try_wait_for_completion(&vm->xe->pm_block))
return -EAGAIN;
- ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false);
+ ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false, exec);
if (ret)
return ret;
@@ -512,10 +454,10 @@ void xe_vm_resume_rebind_worker(struct xe_vm *vm)
static void preempt_rebind_work_func(struct work_struct *w)
{
struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work);
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
unsigned int fence_count = 0;
LIST_HEAD(preempt_fences);
- ktime_t end = 0;
int err = 0;
long wait;
int __maybe_unused tries = 0;
@@ -534,6 +476,8 @@ static void preempt_rebind_work_func(struct work_struct *w)
retry:
if (!try_wait_for_completion(&vm->xe->pm_block) && vm_suspend_rebind_worker(vm)) {
up_write(&vm->lock);
+ /* We don't actually block but don't make progress. */
+ xe_pm_might_block_on_suspend();
return;
}
@@ -543,18 +487,19 @@ retry:
goto out_unlock_outer;
}
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ err = xe_validation_ctx_init(&ctx, &vm->xe->val, &exec,
+ (struct xe_val_flags) {.interruptible = true});
+ if (err)
+ goto out_unlock_outer;
drm_exec_until_all_locked(&exec) {
bool done = false;
err = xe_preempt_work_begin(&exec, vm, &done);
drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
if (err || done) {
- drm_exec_fini(&exec);
- if (err && xe_vm_validate_should_retry(&exec, err, &end))
- err = -EAGAIN;
-
+ xe_validation_ctx_fini(&ctx);
goto out_unlock_outer;
}
}
@@ -563,7 +508,9 @@ retry:
if (err)
goto out_unlock;
+ xe_vm_set_validation_exec(vm, &exec);
err = xe_vm_rebind(vm, true);
+ xe_vm_set_validation_exec(vm, NULL);
if (err)
goto out_unlock;
@@ -581,9 +528,9 @@ retry:
(!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \
__xe_vm_userptr_needs_repin(__vm))
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
if (retry_required(tries, vm)) {
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
err = -EAGAIN;
goto out_unlock;
}
@@ -597,13 +544,26 @@ retry:
/* Point of no return. */
arm_preempt_fences(vm, &preempt_fences);
resume_and_reinstall_preempt_fences(vm, &exec);
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
out_unlock:
- drm_exec_fini(&exec);
+ xe_validation_ctx_fini(&ctx);
out_unlock_outer:
if (err == -EAGAIN) {
trace_xe_vm_rebind_worker_retry(vm);
+
+ /*
+ * We can't block in workers on a VF which supports migration
+ * given this can block the VF post-migration workers from
+ * getting scheduled.
+ */
+ if (IS_SRIOV_VF(vm->xe) &&
+ xe_sriov_vf_migration_supported(vm->xe)) {
+ up_write(&vm->lock);
+ xe_vm_queue_rebind_worker(vm);
+ return;
+ }
+
goto retry;
}
@@ -618,203 +578,6 @@ out_unlock_outer:
trace_xe_vm_rebind_worker_exit(vm);
}
-static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
-{
- struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long err;
-
- /*
- * Tell exec and rebind worker they need to repin and rebind this
- * userptr.
- */
- if (!xe_vm_in_fault_mode(vm) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&userptr->invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
- }
-
- /*
- * Preempt fences turn into schedule disables, pipeline these.
- * Note that even in fault mode, we need to wait for binds and
- * unbinds to complete, and those are attached as BOOKMARK fences
- * to the vm.
- */
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
- err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
- XE_WARN_ON(err <= 0);
-
- if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
- err = xe_vm_invalidate_vma(vma);
- XE_WARN_ON(err);
- }
-
- xe_hmm_userptr_unmap(uvma);
-}
-
-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
-{
- struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
- struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
-
- xe_assert(vm->xe, xe_vma_is_userptr(vma));
- trace_xe_vma_userptr_invalidate(vma);
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
- xe_vma_start(vma), xe_vma_size(vma));
-
- down_write(&vm->userptr.notifier_lock);
- mmu_interval_set_seq(mni, cur_seq);
-
- __vma_userptr_invalidate(vm, uvma);
- up_write(&vm->userptr.notifier_lock);
- trace_xe_vma_userptr_invalidate_complete(vma);
-
- return true;
-}
-
-static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
- .invalidate = vma_userptr_invalidate,
-};
-
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
-/**
- * xe_vma_userptr_force_invalidate() - force invalidate a userptr
- * @uvma: The userptr vma to invalidate
- *
- * Perform a forced userptr invalidation for testing purposes.
- */
-void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
-{
- struct xe_vm *vm = xe_vma_vm(&uvma->vma);
-
- /* Protect against concurrent userptr pinning */
- lockdep_assert_held(&vm->lock);
- /* Protect against concurrent notifiers */
- lockdep_assert_held(&vm->userptr.notifier_lock);
- /*
- * Protect against concurrent instances of this function and
- * the critical exec sections
- */
- xe_vm_assert_held(vm);
-
- if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- uvma->userptr.notifier_seq))
- uvma->userptr.notifier_seq -= 2;
- __vma_userptr_invalidate(vm, uvma);
-}
-#endif
-
-int xe_vm_userptr_pin(struct xe_vm *vm)
-{
- struct xe_userptr_vma *uvma, *next;
- int err = 0;
-
- xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
- lockdep_assert_held_write(&vm->lock);
-
- /* Collect invalidated userptrs */
- spin_lock(&vm->userptr.invalidated_lock);
- xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
- list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
- userptr.invalidate_link) {
- list_del_init(&uvma->userptr.invalidate_link);
- list_add_tail(&uvma->userptr.repin_link,
- &vm->userptr.repin_list);
- }
- spin_unlock(&vm->userptr.invalidated_lock);
-
- /* Pin and move to bind list */
- list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
- userptr.repin_link) {
- err = xe_vma_userptr_pin_pages(uvma);
- if (err == -EFAULT) {
- list_del_init(&uvma->userptr.repin_link);
- /*
- * We might have already done the pin once already, but
- * then had to retry before the re-bind happened, due
- * some other condition in the caller, but in the
- * meantime the userptr got dinged by the notifier such
- * that we need to revalidate here, but this time we hit
- * the EFAULT. In such a case make sure we remove
- * ourselves from the rebind list to avoid going down in
- * flames.
- */
- if (!list_empty(&uvma->vma.combined_links.rebind))
- list_del_init(&uvma->vma.combined_links.rebind);
-
- /* Wait for pending binds */
- xe_vm_lock(vm, false);
- dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
-
- down_read(&vm->userptr.notifier_lock);
- err = xe_vm_invalidate_vma(&uvma->vma);
- up_read(&vm->userptr.notifier_lock);
- xe_vm_unlock(vm);
- if (err)
- break;
- } else {
- if (err)
- break;
-
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->vma.combined_links.rebind,
- &vm->rebind_list);
- }
- }
-
- if (err) {
- down_write(&vm->userptr.notifier_lock);
- spin_lock(&vm->userptr.invalidated_lock);
- list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
- userptr.repin_link) {
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- }
- spin_unlock(&vm->userptr.invalidated_lock);
- up_write(&vm->userptr.notifier_lock);
- }
- return err;
-}
-
-/**
- * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs
- * that need repinning.
- * @vm: The VM.
- *
- * This function does an advisory check for whether the VM has userptrs that
- * need repinning.
- *
- * Return: 0 if there are no indications of userptrs needing repinning,
- * -EAGAIN if there are.
- */
-int xe_vm_userptr_check_repin(struct xe_vm *vm)
-{
- return (list_empty_careful(&vm->userptr.repin_list) &&
- list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
-}
-
static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
{
int i;
@@ -878,6 +641,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask,
vops->pt_update_ops[i].num_ops += inc_val;
}
+#define XE_VMA_CREATE_MASK ( \
+ XE_VMA_READ_ONLY | \
+ XE_VMA_DUMPABLE | \
+ XE_VMA_SYSTEM_ALLOCATOR | \
+ DRM_GPUVA_SPARSE | \
+ XE_VMA_MADV_AUTORESET)
+
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
u8 tile_mask)
{
@@ -890,8 +660,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
op->base.map.gem.offset = vma->gpuva.gem.offset;
op->map.vma = vma;
op->map.immediate = true;
- op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE;
- op->map.is_null = xe_vma_is_null(vma);
+ op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK;
}
static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
@@ -985,10 +754,11 @@ struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma, u8 tile_ma
xe_assert(vm->xe, xe_vm_in_fault_mode(vm));
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_rebind(&vops, vma, tile_mask);
@@ -1054,7 +824,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
*
* (re)bind SVM range setting up GPU page tables for the range.
*
- * Return: dma fence for rebind to signal completion on succees, ERR_PTR on
+ * Return: dma fence for rebind to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
@@ -1075,10 +845,11 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));
xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+ vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT;
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask);
@@ -1137,7 +908,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
*
* Unbind SVM range removing the GPU page tables for the range.
*
- * Return: dma fence for unbind to signal completion on succees, ERR_PTR on
+ * Return: dma fence for unbind to signal completion on success, ERR_PTR on
* failure
*/
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
@@ -1161,7 +932,7 @@ struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
for_each_tile(tile, vm->xe, id) {
vops.pt_update_ops[id].wait_vm_bookkeep = true;
vops.pt_update_ops[tile->id].q =
- xe_tile_migrate_exec_queue(tile);
+ xe_migrate_exec_queue(tile->migrate);
}
err = xe_vm_ops_add_range_unbind(&vops, range);
@@ -1194,25 +965,18 @@ static void xe_vma_free(struct xe_vma *vma)
kfree(vma);
}
-#define VMA_CREATE_FLAG_READ_ONLY BIT(0)
-#define VMA_CREATE_FLAG_IS_NULL BIT(1)
-#define VMA_CREATE_FLAG_DUMPABLE BIT(2)
-#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3)
-
static struct xe_vma *xe_vma_create(struct xe_vm *vm,
struct xe_bo *bo,
u64 bo_offset_or_userptr,
u64 start, u64 end,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr *attr,
+ unsigned int flags)
{
struct xe_vma *vma;
struct xe_tile *tile;
u8 id;
- bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY);
- bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL);
- bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE);
- bool is_cpu_addr_mirror =
- (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR);
+ bool is_null = (flags & DRM_GPUVA_SPARSE);
+ bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR);
xe_assert(vm->xe, start < end);
xe_assert(vm->xe, end < vm->size);
@@ -1233,10 +997,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (!vma)
return ERR_PTR(-ENOMEM);
- if (is_cpu_addr_mirror)
- vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR;
- if (is_null)
- vma->gpuva.flags |= DRM_GPUVA_SPARSE;
if (bo)
vma->gpuva.gem.obj = &bo->ttm.base;
}
@@ -1247,10 +1007,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->gpuva.vm = &vm->gpuvm;
vma->gpuva.va.addr = start;
vma->gpuva.va.range = end - start + 1;
- if (read_only)
- vma->gpuva.flags |= XE_VMA_READ_ONLY;
- if (dumpable)
- vma->gpuva.flags |= XE_VMA_DUMPABLE;
+ vma->gpuva.flags = flags;
for_each_tile(tile, vm->xe, id)
vma->tile_mask |= 0x1 << id;
@@ -1258,7 +1015,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
if (vm->xe->info.has_atomic_enable_pte_bit)
vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT;
- vma->pat_index = pat_index;
+ vma->attr = *attr;
if (bo) {
struct drm_gpuvm_bo *vm_bo;
@@ -1278,25 +1035,17 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
drm_gpuvm_bo_put(vm_bo);
} else /* userptr or null */ {
if (!is_null && !is_cpu_addr_mirror) {
- struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr;
+ struct xe_userptr_vma *uvma = to_userptr_vma(vma);
u64 size = end - start + 1;
int err;
- INIT_LIST_HEAD(&userptr->invalidate_link);
- INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
- mutex_init(&userptr->unmap_mutex);
- err = mmu_interval_notifier_insert(&userptr->notifier,
- current->mm,
- xe_vma_userptr(vma), size,
- &vma_userptr_notifier_ops);
+ err = xe_userptr_setup(uvma, xe_vma_userptr(vma), size);
if (err) {
xe_vma_free(vma);
return ERR_PTR(err);
}
-
- userptr->notifier_seq = LONG_MAX;
}
xe_vm_get(vm);
@@ -1316,18 +1065,8 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
if (xe_vma_is_userptr(vma)) {
struct xe_userptr_vma *uvma = to_userptr_vma(vma);
- struct xe_userptr *userptr = &uvma->userptr;
- if (userptr->sg)
- xe_hmm_userptr_free_sg(uvma);
-
- /*
- * Since userptr pages are not pinned, we can't remove
- * the notifier until we're sure the GPU is not accessing
- * them anymore
- */
- mmu_interval_notifier_remove(&userptr->notifier);
- mutex_destroy(&userptr->unmap_mutex);
+ xe_userptr_remove(uvma);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma) || xe_vma_is_cpu_addr_mirror(vma)) {
xe_vm_put(vm);
@@ -1364,11 +1103,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
if (xe_vma_is_userptr(vma)) {
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
-
- spin_lock(&vm->userptr.invalidated_lock);
- xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
- list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
- spin_unlock(&vm->userptr.invalidated_lock);
+ xe_userptr_destroy(to_userptr_vma(vma));
} else if (!xe_vma_is_null(vma) && !xe_vma_is_cpu_addr_mirror(vma)) {
xe_bo_assert_held(xe_vma_bo(vma));
@@ -1416,20 +1151,19 @@ int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma)
static void xe_vma_destroy_unlocked(struct xe_vma *vma)
{
+ struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
- int err;
+ int err = 0;
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
err = xe_vm_lock_vma(&exec, vma);
drm_exec_retry_on_contention(&exec);
if (XE_WARN_ON(err))
break;
+ xe_vma_destroy(vma, NULL);
}
-
- xe_vma_destroy(vma, NULL);
-
- drm_exec_fini(&exec);
+ xe_assert(xe, !err);
}
struct xe_vma *
@@ -1547,14 +1281,39 @@ static u64 pte_encode_ps(u32 pt_level)
return 0;
}
-static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
- const u16 pat_index)
+static u16 pde_pat_index(struct xe_bo *bo)
+{
+ struct xe_device *xe = xe_bo_device(bo);
+ u16 pat_index;
+
+ /*
+ * We only have two bits to encode the PAT index in non-leaf nodes, but
+ * these only point to other paging structures so we only need a minimal
+ * selection of options. The user PAT index is only for encoding leaf
+ * nodes, where we have use of more bits to do the encoding. The
+ * non-leaf nodes are instead under driver control so the chosen index
+ * here should be distinct from the user PAT index. Also the
+ * corresponding coherency of the PAT index should be tied to the
+ * allocation type of the page table (or at least we should pick
+ * something which is always safe).
+ */
+ if (!xe_bo_is_vram(bo) && bo->ttm.ttm->caching == ttm_cached)
+ pat_index = xe->pat.idx[XE_CACHE_WB];
+ else
+ pat_index = xe->pat.idx[XE_CACHE_NONE];
+
+ xe_assert(xe, pat_index <= 3);
+
+ return pat_index;
+}
+
+static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset)
{
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
- pde |= pde_encode_pat_index(pat_index);
+ pde |= pde_encode_pat_index(pde_pat_index(bo));
return pde;
}
@@ -1629,6 +1388,7 @@ static void vm_destroy_work_func(struct work_struct *w);
* @xe: xe device.
* @tile: tile to set up for.
* @vm: vm to set up for.
+ * @exec: The struct drm_exec object used to lock the vm resv.
*
* Sets up a pagetable tree with one page-table per level and a single
* leaf PTE. All pagetable entries point to the single page-table or,
@@ -1638,20 +1398,19 @@ static void vm_destroy_work_func(struct work_struct *w);
* Return: 0 on success, negative error code on error.
*/
static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile,
- struct xe_vm *vm)
+ struct xe_vm *vm, struct drm_exec *exec)
{
u8 id = tile->id;
int i;
for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) {
- vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i);
+ vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i, exec);
if (IS_ERR(vm->scratch_pt[id][i])) {
int err = PTR_ERR(vm->scratch_pt[id][i]);
vm->scratch_pt[id][i] = NULL;
return err;
}
-
xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]);
}
@@ -1679,11 +1438,28 @@ static void xe_vm_free_scratch(struct xe_vm *vm)
}
}
+static void xe_vm_pt_destroy(struct xe_vm *vm)
+{
+ struct xe_tile *tile;
+ u8 id;
+
+ xe_vm_assert_held(vm);
+
+ for_each_tile(tile, vm->xe, id) {
+ if (vm->pt_root[id]) {
+ xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
+ vm->pt_root[id] = NULL;
+ }
+ }
+}
+
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
{
struct drm_gem_object *vm_resv_obj;
+ struct xe_validation_ctx ctx;
+ struct drm_exec exec;
struct xe_vm *vm;
- int err, number_tiles = 0;
+ int err;
struct xe_tile *tile;
u8 id;
@@ -1725,7 +1501,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
INIT_LIST_HEAD(&vm->userptr.repin_list);
INIT_LIST_HEAD(&vm->userptr.invalidated);
- init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
ttm_lru_bulk_move_init(&vm->lru_bulk_move);
@@ -1752,11 +1527,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
INIT_LIST_HEAD(&vm->preempt.pm_activate_link);
}
- if (flags & XE_VM_FLAG_FAULT_MODE) {
- err = xe_svm_init(vm);
- if (err)
- goto err_no_resv;
- }
+ err = xe_svm_init(vm);
+ if (err)
+ goto err_no_resv;
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
@@ -1769,49 +1542,68 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
drm_gem_object_put(vm_resv_obj);
- err = xe_vm_lock(vm, true);
- if (err)
- goto err_close;
-
- if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
- vm->flags |= XE_VM_FLAG_64K;
-
- for_each_tile(tile, xe, id) {
- if (flags & XE_VM_FLAG_MIGRATION &&
- tile->id != XE_VM_FLAG_TILE_ID(flags))
- continue;
+ err = 0;
+ xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {.interruptible = true},
+ err) {
+ err = xe_vm_drm_exec_lock(vm, &exec);
+ drm_exec_retry_on_contention(&exec);
- vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level);
- if (IS_ERR(vm->pt_root[id])) {
- err = PTR_ERR(vm->pt_root[id]);
- vm->pt_root[id] = NULL;
- goto err_unlock_close;
- }
- }
+ if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
+ vm->flags |= XE_VM_FLAG_64K;
- if (xe_vm_has_scratch(vm)) {
for_each_tile(tile, xe, id) {
- if (!vm->pt_root[id])
+ if (flags & XE_VM_FLAG_MIGRATION &&
+ tile->id != XE_VM_FLAG_TILE_ID(flags))
continue;
- err = xe_vm_create_scratch(xe, tile, vm);
+ vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level,
+ &exec);
+ if (IS_ERR(vm->pt_root[id])) {
+ err = PTR_ERR(vm->pt_root[id]);
+ vm->pt_root[id] = NULL;
+ xe_vm_pt_destroy(vm);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
+ if (err)
+ break;
+
+ if (xe_vm_has_scratch(vm)) {
+ for_each_tile(tile, xe, id) {
+ if (!vm->pt_root[id])
+ continue;
+
+ err = xe_vm_create_scratch(xe, tile, vm, &exec);
+ if (err) {
+ xe_vm_free_scratch(vm);
+ xe_vm_pt_destroy(vm);
+ drm_exec_retry_on_contention(&exec);
+ xe_validation_retry_on_oom(&ctx, &err);
+ break;
+ }
+ }
if (err)
- goto err_unlock_close;
+ break;
+ vm->batch_invalidate_tlb = true;
}
- vm->batch_invalidate_tlb = true;
- }
- if (vm->flags & XE_VM_FLAG_LR_MODE)
- vm->batch_invalidate_tlb = false;
+ if (vm->flags & XE_VM_FLAG_LR_MODE) {
+ INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func);
+ vm->batch_invalidate_tlb = false;
+ }
- /* Fill pt_root after allocating scratch tables */
- for_each_tile(tile, xe, id) {
- if (!vm->pt_root[id])
- continue;
+ /* Fill pt_root after allocating scratch tables */
+ for_each_tile(tile, xe, id) {
+ if (!vm->pt_root[id])
+ continue;
- xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+ xe_pt_populate_empty(tile, vm, vm->pt_root[id]);
+ }
}
- xe_vm_unlock(vm);
+ if (err)
+ goto err_close;
/* Kernel migration VM shouldn't have a circular loop.. */
if (!(flags & XE_VM_FLAG_MIGRATION)) {
@@ -1828,13 +1620,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
goto err_close;
}
vm->q[id] = q;
- number_tiles++;
}
}
- if (number_tiles > 1)
- vm->composite_fence_ctx = dma_fence_context_alloc(1);
-
if (xef && xe->info.has_asid) {
u32 asid;
@@ -1844,7 +1632,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
&xe->usm.next_asid, GFP_KERNEL);
up_write(&xe->usm.lock);
if (err < 0)
- goto err_unlock_close;
+ goto err_close;
vm->usm.asid = asid;
}
@@ -1853,8 +1641,6 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
return vm;
-err_unlock_close:
- xe_vm_unlock(vm);
err_close:
xe_vm_close_and_put(vm);
return ERR_PTR(err);
@@ -1907,7 +1693,7 @@ static void xe_vm_close(struct xe_vm *vm)
xe_pt_clear(xe, vm->pt_root[id]);
for_each_gt(gt, xe, id)
- xe_gt_tlb_invalidation_vm(gt, vm);
+ xe_tlb_inval_vm(&gt->tlb_inval, vm);
}
}
@@ -1942,8 +1728,13 @@ void xe_vm_close_and_put(struct xe_vm *vm)
down_write(&vm->lock);
for_each_tile(tile, xe, id) {
- if (vm->q[id])
+ if (vm->q[id]) {
+ int i;
+
xe_exec_queue_last_fence_put(vm->q[id], vm);
+ for_each_tlb_inval(i)
+ xe_exec_queue_tlb_inval_last_fence_put(vm->q[id], vm, i);
+ }
}
up_write(&vm->lock);
@@ -1961,9 +1752,9 @@ void xe_vm_close_and_put(struct xe_vm *vm)
vma = gpuva_to_vma(gpuva);
if (xe_vma_has_no_bo(vma)) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags |= XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
}
xe_vm_remove_vma(vm, vma);
@@ -1987,13 +1778,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
* destroy the pagetables immediately.
*/
xe_vm_free_scratch(vm);
-
- for_each_tile(tile, xe, id) {
- if (vm->pt_root[id]) {
- xe_pt_destroy(vm->pt_root[id], vm->flags, NULL);
- vm->pt_root[id] = NULL;
- }
- }
+ xe_vm_pt_destroy(vm);
xe_vm_unlock(vm);
/*
@@ -2007,8 +1792,7 @@ void xe_vm_close_and_put(struct xe_vm *vm)
xe_vma_destroy_unlocked(vma);
}
- if (xe_vm_in_fault_mode(vm))
- xe_svm_fini(vm);
+ xe_svm_fini(vm);
up_write(&vm->lock);
@@ -2085,8 +1869,7 @@ struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
- return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
- tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
+ return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0);
}
static struct xe_exec_queue *
@@ -2120,6 +1903,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = to_xe_file(file);
struct drm_xe_vm_create *args = data;
+ struct xe_gt *wa_gt = xe_root_mmio_gt(xe);
struct xe_vm *vm;
u32 id;
int err;
@@ -2128,7 +1912,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
- if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
+ if (wa_gt && XE_GT_WA(wa_gt, 22014953428))
args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE;
if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE &&
@@ -2210,6 +1994,110 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static int xe_vm_query_vmas(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpuva *gpuva;
+ u32 num_vmas = 0;
+
+ lockdep_assert_held(&vm->lock);
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end)
+ num_vmas++;
+
+ return num_vmas;
+}
+
+static int get_mem_attrs(struct xe_vm *vm, u32 *num_vmas, u64 start,
+ u64 end, struct drm_xe_mem_range_attr *attrs)
+{
+ struct drm_gpuva *gpuva;
+ int i = 0;
+
+ lockdep_assert_held(&vm->lock);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (i == *num_vmas)
+ return -ENOSPC;
+
+ attrs[i].start = xe_vma_start(vma);
+ attrs[i].end = xe_vma_end(vma);
+ attrs[i].atomic.val = vma->attr.atomic_access;
+ attrs[i].pat_index.val = vma->attr.pat_index;
+ attrs[i].preferred_mem_loc.devmem_fd = vma->attr.preferred_loc.devmem_fd;
+ attrs[i].preferred_mem_loc.migration_policy =
+ vma->attr.preferred_loc.migration_policy;
+
+ i++;
+ }
+
+ *num_vmas = i;
+ return 0;
+}
+
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_mem_range_attr *mem_attrs;
+ struct drm_xe_vm_query_mem_range_attr *args = data;
+ u64 __user *attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
+ struct xe_vm *vm;
+ int err = 0;
+
+ if (XE_IOCTL_DBG(xe,
+ ((args->num_mem_ranges == 0 &&
+ (attrs_user || args->sizeof_mem_range_attr != 0)) ||
+ (args->num_mem_ranges > 0 &&
+ (!attrs_user ||
+ args->sizeof_mem_range_attr !=
+ sizeof(struct drm_xe_mem_range_attr))))))
+ return -EINVAL;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ err = down_read_interruptible(&vm->lock);
+ if (err)
+ goto put_vm;
+
+ attrs_user = u64_to_user_ptr(args->vector_of_mem_attr);
+
+ if (args->num_mem_ranges == 0 && !attrs_user) {
+ args->num_mem_ranges = xe_vm_query_vmas(vm, args->start, args->start + args->range);
+ args->sizeof_mem_range_attr = sizeof(struct drm_xe_mem_range_attr);
+ goto unlock_vm;
+ }
+
+ mem_attrs = kvmalloc_array(args->num_mem_ranges, args->sizeof_mem_range_attr,
+ GFP_KERNEL | __GFP_ACCOUNT |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!mem_attrs) {
+ err = args->num_mem_ranges > 1 ? -ENOBUFS : -ENOMEM;
+ goto unlock_vm;
+ }
+
+ memset(mem_attrs, 0, args->num_mem_ranges * args->sizeof_mem_range_attr);
+ err = get_mem_attrs(vm, &args->num_mem_ranges, args->start,
+ args->start + args->range, mem_attrs);
+ if (err)
+ goto free_mem_attrs;
+
+ err = copy_to_user(attrs_user, mem_attrs,
+ args->sizeof_mem_range_attr * args->num_mem_ranges);
+ if (err)
+ err = -EFAULT;
+
+free_mem_attrs:
+ kvfree(mem_attrs);
+unlock_vm:
+ up_read(&vm->lock);
+put_vm:
+ xe_vm_put(vm);
+ return err;
+}
+
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
{
if (page_addr > xe_vma_end(vma) - 1 ||
@@ -2248,9 +2136,9 @@ static const u32 region_to_mem_type[] = {
static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma,
bool post_commit)
{
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags |= XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_remove_vma(vm, vma);
}
@@ -2357,10 +2245,17 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
switch (operation) {
case DRM_XE_VM_BIND_OP_MAP:
- case DRM_XE_VM_BIND_OP_MAP_USERPTR:
- ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
- obj, bo_offset_or_userptr);
+ case DRM_XE_VM_BIND_OP_MAP_USERPTR: {
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = addr,
+ .map.va.range = range,
+ .map.gem.obj = obj,
+ .map.gem.offset = bo_offset_or_userptr,
+ };
+
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, &map_req);
break;
+ }
case DRM_XE_VM_BIND_OP_UNMAP:
ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range);
break;
@@ -2397,20 +2292,25 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
- op->map.read_only =
- flags & DRM_XE_VM_BIND_FLAG_READONLY;
- op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
- op->map.is_cpu_addr_mirror = flags &
- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR;
- op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
+ if (flags & DRM_XE_VM_BIND_FLAG_READONLY)
+ op->map.vma_flags |= XE_VMA_READ_ONLY;
+ if (flags & DRM_XE_VM_BIND_FLAG_NULL)
+ op->map.vma_flags |= DRM_GPUVA_SPARSE;
+ if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR;
+ if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE)
+ op->map.vma_flags |= XE_VMA_DUMPABLE;
+ if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
+ op->map.vma_flags |= XE_VMA_MADV_AUTORESET;
op->map.pat_index = pat_index;
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_tile *tile;
struct xe_svm_range *svm_range;
struct drm_gpusvm_ctx ctx = {};
- struct xe_tile *tile;
+ struct drm_pagemap *dpagemap;
u8 id, tile_mask = 0;
u32 i;
@@ -2427,8 +2327,24 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
tile_mask |= 0x1 << id;
xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
- op->prefetch_range.region = prefetch_region;
op->prefetch_range.ranges_count = 0;
+ tile = NULL;
+
+ if (prefetch_region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC) {
+ dpagemap = xe_vma_resolve_pagemap(vma,
+ xe_device_get_root_tile(vm->xe));
+ /*
+ * TODO: Once multigpu support is enabled will need
+ * something to dereference tile from dpagemap.
+ */
+ if (dpagemap)
+ tile = xe_device_get_root_tile(vm->xe);
+ } else if (prefetch_region) {
+ tile = &vm->xe->tiles[region_to_mem_type[prefetch_region] -
+ XE_PL_VRAM0];
+ }
+
+ op->prefetch_range.tile = tile;
alloc_next_range:
svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
@@ -2447,7 +2363,7 @@ alloc_next_range:
goto unwind_prefetch_ops;
}
- if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) {
+ if (xe_svm_range_validate(vm, svm_range, tile_mask, !!tile)) {
xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
goto check_next_range;
}
@@ -2484,9 +2400,10 @@ unwind_prefetch_ops:
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
- u16 pat_index, unsigned int flags)
+ struct xe_vma_mem_attr *attr, unsigned int flags)
{
struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL;
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct xe_vma *vma;
int err = 0;
@@ -2494,9 +2411,9 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
lockdep_assert_held_write(&vm->lock);
if (bo) {
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
- drm_exec_until_all_locked(&exec) {
- err = 0;
+ err = 0;
+ xe_validation_guard(&ctx, &vm->xe->val, &exec,
+ (struct xe_val_flags) {.interruptible = true}, err) {
if (!bo->vm) {
err = drm_exec_lock_obj(&exec, xe_vm_obj(vm));
drm_exec_retry_on_contention(&exec);
@@ -2505,27 +2422,35 @@ static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
err = drm_exec_lock_obj(&exec, &bo->ttm.base);
drm_exec_retry_on_contention(&exec);
}
- if (err) {
- drm_exec_fini(&exec);
+ if (err)
return ERR_PTR(err);
- }
- }
- }
- vma = xe_vma_create(vm, bo, op->gem.offset,
- op->va.addr, op->va.addr +
- op->va.range - 1, pat_index, flags);
- if (IS_ERR(vma))
- goto err_unlock;
- if (xe_vma_is_userptr(vma))
- err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
- else if (!xe_vma_has_no_bo(vma) && !bo->vm)
- err = add_preempt_fences(vm, bo);
+ vma = xe_vma_create(vm, bo, op->gem.offset,
+ op->va.addr, op->va.addr +
+ op->va.range - 1, attr, flags);
+ if (IS_ERR(vma))
+ return vma;
-err_unlock:
- if (bo)
- drm_exec_fini(&exec);
+ if (!bo->vm) {
+ err = add_preempt_fences(vm, bo);
+ if (err) {
+ prep_vma_destroy(vm, vma, false);
+ xe_vma_destroy(vma, NULL);
+ }
+ }
+ }
+ if (err)
+ return ERR_PTR(err);
+ } else {
+ vma = xe_vma_create(vm, NULL, op->gem.offset,
+ op->va.addr, op->va.addr +
+ op->va.range - 1, attr, flags);
+ if (IS_ERR(vma))
+ return vma;
+ if (xe_vma_is_userptr(vma))
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ }
if (err) {
prep_vma_destroy(vm, vma, false);
xe_vma_destroy_unlocked(vma);
@@ -2630,6 +2555,29 @@ static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
return err;
}
+/**
+ * xe_vma_has_default_mem_attrs - Check if a VMA has default memory attributes
+ * @vma: Pointer to the xe_vma structure to check
+ *
+ * This function determines whether the given VMA (Virtual Memory Area)
+ * has its memory attributes set to their default values. Specifically,
+ * it checks the following conditions:
+ *
+ * - `atomic_access` is `DRM_XE_VMA_ATOMIC_UNDEFINED`
+ * - `pat_index` is equal to `default_pat_index`
+ * - `preferred_loc.devmem_fd` is `DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE`
+ * - `preferred_loc.migration_policy` is `DRM_XE_MIGRATE_ALL_PAGES`
+ *
+ * Return: true if all attributes are at their default values, false otherwise.
+ */
+bool xe_vma_has_default_mem_attrs(struct xe_vma *vma)
+{
+ return (vma->attr.atomic_access == DRM_XE_ATOMIC_UNDEFINED &&
+ vma->attr.pat_index == vma->attr.default_pat_index &&
+ vma->attr.preferred_loc.devmem_fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE &&
+ vma->attr.preferred_loc.migration_policy == DRM_XE_MIGRATE_ALL_PAGES);
+}
+
static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
struct xe_vma_ops *vops)
{
@@ -2656,23 +2604,26 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
{
- flags |= op->map.read_only ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->map.is_null ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->map.dumpable ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
- flags |= op->map.is_cpu_addr_mirror ?
- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
-
- vma = new_vma(vm, &op->base.map, op->map.pat_index,
+ struct xe_vma_mem_attr default_attr = {
+ .preferred_loc = {
+ .devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
+ .migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
+ },
+ .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
+ .default_pat_index = op->map.pat_index,
+ .pat_index = op->map.pat_index,
+ };
+
+ flags |= op->map.vma_flags & XE_VMA_CREATE_MASK;
+
+ vma = new_vma(vm, &op->base.map, &default_attr,
flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
op->map.vma = vma;
if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) &&
- !op->map.is_cpu_addr_mirror) ||
+ !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
op->tile_mask, 1);
@@ -2693,27 +2644,20 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
end = op->base.remap.next->va.addr;
if (xe_vma_is_cpu_addr_mirror(old) &&
- xe_svm_has_mapping(vm, start, end))
- return -EBUSY;
+ xe_svm_has_mapping(vm, start, end)) {
+ if (vops->flags & XE_VMA_OPS_FLAG_MADVISE)
+ xe_svm_unmap_address_range(vm, start, end);
+ else
+ return -EBUSY;
+ }
op->remap.start = xe_vma_start(old);
op->remap.range = xe_vma_size(old);
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_READ_ONLY ?
- VMA_CREATE_FLAG_READ_ONLY : 0;
- flags |= op->base.remap.unmap->va->flags &
- DRM_GPUVA_SPARSE ?
- VMA_CREATE_FLAG_IS_NULL : 0;
- flags |= op->base.remap.unmap->va->flags &
- XE_VMA_DUMPABLE ?
- VMA_CREATE_FLAG_DUMPABLE : 0;
- flags |= xe_vma_is_cpu_addr_mirror(old) ?
- VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0;
-
+ flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK;
if (op->base.remap.prev) {
vma = new_vma(vm, op->base.remap.prev,
- old->pat_index, flags);
+ &old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2743,7 +2687,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (op->base.remap.next) {
vma = new_vma(vm, op->base.remap.next,
- old->pat_index, flags);
+ &old->attr, flags);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -2832,9 +2776,9 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va);
if (vma) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags &= ~XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_insert_vma(vm, vma);
}
@@ -2853,9 +2797,9 @@ static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op,
xe_vma_destroy_unlocked(op->remap.next);
}
if (vma) {
- down_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_lock(vm);
vma->gpuva.flags &= ~XE_VMA_DESTROYED;
- up_read(&vm->userptr.notifier_lock);
+ xe_svm_notifier_unlock(vm);
if (post_commit)
xe_vm_insert_vma(vm, vma);
}
@@ -2894,7 +2838,7 @@ static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm,
}
static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
- bool validate)
+ bool res_evict, bool validate)
{
struct xe_bo *bo = xe_vma_bo(vma);
struct xe_vm *vm = xe_vma_vm(vma);
@@ -2905,7 +2849,8 @@ static int vma_lock_and_validate(struct drm_exec *exec, struct xe_vma *vma,
err = drm_exec_lock_obj(exec, &bo->ttm.base);
if (!err && validate)
err = xe_bo_validate(bo, vm,
- !xe_vm_in_preempt_fence_mode(vm));
+ !xe_vm_in_preempt_fence_mode(vm) &&
+ res_evict, exec);
}
return err;
@@ -2930,30 +2875,27 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
{
bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_tile *tile = op->prefetch_range.tile;
int err = 0;
struct xe_svm_range *svm_range;
struct drm_gpusvm_ctx ctx = {};
- struct xe_tile *tile;
unsigned long i;
- u32 region;
if (!xe_vma_is_cpu_addr_mirror(vma))
return 0;
- region = op->prefetch_range.region;
-
ctx.read_only = xe_vma_read_only(vma);
ctx.devmem_possible = devmem_possible;
ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
+ ctx.device_private_page_owner = xe_svm_devm_owner(vm->xe);
/* TODO: Threading the migration */
xa_for_each(&op->prefetch_range.range, i, svm_range) {
- if (!region)
+ if (!tile)
xe_svm_range_migrate_to_smem(vm, svm_range);
- if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
- tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
+ if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, !!tile)) {
err = xe_svm_alloc_vram(tile, svm_range, &ctx);
if (err) {
drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
@@ -2978,14 +2920,23 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
}
static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
- struct xe_vma_op *op)
+ struct xe_vma_ops *vops, struct xe_vma_op *op)
{
int err = 0;
+ bool res_evict;
+
+ /*
+ * We only allow evicting a BO within the VM if it is not part of an
+ * array of binds, as an array of binds can evict another BO within the
+ * bind.
+ */
+ res_evict = !(vops->flags & XE_VMA_OPS_ARRAY_OF_BINDS);
switch (op->base.op) {
case DRM_GPUVA_OP_MAP:
if (!op->map.invalidate_on_bind)
err = vma_lock_and_validate(exec, op->map.vma,
+ res_evict,
!xe_vm_in_fault_mode(vm) ||
op->map.immediate);
break;
@@ -2996,11 +2947,13 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.remap.unmap->va),
- false);
+ res_evict, false);
if (!err && op->remap.prev)
- err = vma_lock_and_validate(exec, op->remap.prev, true);
+ err = vma_lock_and_validate(exec, op->remap.prev,
+ res_evict, true);
if (!err && op->remap.next)
- err = vma_lock_and_validate(exec, op->remap.next, true);
+ err = vma_lock_and_validate(exec, op->remap.next,
+ res_evict, true);
break;
case DRM_GPUVA_OP_UNMAP:
err = check_ufence(gpuva_to_vma(op->base.unmap.va));
@@ -3009,26 +2962,27 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.unmap.va),
- false);
+ res_evict, false);
break;
case DRM_GPUVA_OP_PREFETCH:
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
u32 region;
- if (xe_vma_is_cpu_addr_mirror(vma))
- region = op->prefetch_range.region;
- else
+ if (!xe_vma_is_cpu_addr_mirror(vma)) {
region = op->prefetch.region;
-
- xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
+ xe_assert(vm->xe, region == DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC ||
+ region <= ARRAY_SIZE(region_to_mem_type));
+ }
err = vma_lock_and_validate(exec,
gpuva_to_vma(op->base.prefetch.va),
- false);
+ res_evict, false);
if (!err && !xe_vma_has_no_bo(vma))
err = xe_bo_migrate(xe_vma_bo(vma),
- region_to_mem_type[region]);
+ region_to_mem_type[region],
+ NULL,
+ exec);
break;
}
default:
@@ -3069,7 +3023,7 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
return err;
list_for_each_entry(op, &vops->list, link) {
- err = op_lock_and_prep(exec, vm, op);
+ err = op_lock_and_prep(exec, vm, vops, op);
if (err)
return err;
}
@@ -3150,20 +3104,31 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
struct dma_fence *fence = NULL;
struct dma_fence **fences = NULL;
struct dma_fence_array *cf = NULL;
- int number_tiles = 0, current_fence = 0, err;
+ int number_tiles = 0, current_fence = 0, n_fence = 0, err;
u8 id;
number_tiles = vm_ops_setup_tile_args(vm, vops);
if (number_tiles == 0)
return ERR_PTR(-ENODATA);
- if (number_tiles > 1) {
- fences = kmalloc_array(number_tiles, sizeof(*fences),
- GFP_KERNEL);
- if (!fences) {
- fence = ERR_PTR(-ENOMEM);
- goto err_trace;
- }
+ if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) {
+ for_each_tile(tile, vm->xe, id)
+ ++n_fence;
+ } else {
+ for_each_tile(tile, vm->xe, id)
+ n_fence += (1 + XE_MAX_GT_PER_TILE);
+ }
+
+ fences = kmalloc_array(n_fence, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ fence = ERR_PTR(-ENOMEM);
+ goto err_trace;
+ }
+
+ cf = dma_fence_array_alloc(n_fence);
+ if (!cf) {
+ fence = ERR_PTR(-ENOMEM);
+ goto err_out;
}
for_each_tile(tile, vm->xe, id) {
@@ -3180,30 +3145,34 @@ static struct dma_fence *ops_execute(struct xe_vm *vm,
trace_xe_vm_ops_execute(vops);
for_each_tile(tile, vm->xe, id) {
+ struct xe_exec_queue *q = vops->pt_update_ops[tile->id].q;
+ int i;
+
+ fence = NULL;
if (!vops->pt_update_ops[id].num_ops)
- continue;
+ goto collect_fences;
fence = xe_pt_update_ops_run(tile, vops);
if (IS_ERR(fence))
goto err_out;
- if (fences)
- fences[current_fence++] = fence;
- }
+collect_fences:
+ fences[current_fence++] = fence ?: dma_fence_get_stub();
+ if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT)
+ continue;
- if (fences) {
- cf = dma_fence_array_create(number_tiles, fences,
- vm->composite_fence_ctx,
- vm->composite_fence_seqno++,
- false);
- if (!cf) {
- --vm->composite_fence_seqno;
- fence = ERR_PTR(-ENOMEM);
- goto err_out;
- }
- fence = &cf->base;
+ xe_migrate_job_lock(tile->migrate, q);
+ for_each_tlb_inval(i)
+ fences[current_fence++] =
+ xe_exec_queue_tlb_inval_last_fence_get(q, vm, i);
+ xe_migrate_job_unlock(tile->migrate, q);
}
+ xe_assert(vm->xe, current_fence == n_fence);
+ dma_fence_array_init(cf, n_fence, fences, dma_fence_context_alloc(1),
+ 1, false);
+ fence = &cf->base;
+
for_each_tile(tile, vm->xe, id) {
if (!vops->pt_update_ops[id].num_ops)
continue;
@@ -3263,7 +3232,6 @@ static void op_add_ufence(struct xe_vm *vm, struct xe_vma_op *op,
static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
struct dma_fence *fence)
{
- struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, vops->q);
struct xe_user_fence *ufence;
struct xe_vma_op *op;
int i;
@@ -3284,42 +3252,43 @@ static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops,
if (fence) {
for (i = 0; i < vops->num_syncs; i++)
xe_sync_entry_signal(vops->syncs + i, fence);
- xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence);
}
}
static struct dma_fence *vm_bind_ioctl_ops_execute(struct xe_vm *vm,
struct xe_vma_ops *vops)
{
+ struct xe_validation_ctx ctx;
struct drm_exec exec;
struct dma_fence *fence;
- int err;
+ int err = 0;
lockdep_assert_held_write(&vm->lock);
- drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
- DRM_EXEC_IGNORE_DUPLICATES, 0);
- drm_exec_until_all_locked(&exec) {
+ xe_validation_guard(&ctx, &vm->xe->val, &exec,
+ ((struct xe_val_flags) {
+ .interruptible = true,
+ .exec_ignore_duplicates = true,
+ }), err) {
err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops);
drm_exec_retry_on_contention(&exec);
- if (err) {
- fence = ERR_PTR(err);
- goto unlock;
- }
+ xe_validation_retry_on_oom(&ctx, &err);
+ if (err)
+ return ERR_PTR(err);
+ xe_vm_set_validation_exec(vm, &exec);
fence = ops_execute(vm, vops);
+ xe_vm_set_validation_exec(vm, NULL);
if (IS_ERR(fence)) {
if (PTR_ERR(fence) == -ENODATA)
vm_bind_ioctl_ops_fini(vm, vops, NULL);
- goto unlock;
+ return fence;
}
vm_bind_ioctl_ops_fini(vm, vops, fence);
}
-unlock:
- drm_exec_fini(&exec);
- return fence;
+ return err ? ERR_PTR(err) : fence;
}
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
@@ -3329,7 +3298,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO);
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE | \
DRM_XE_VM_BIND_FLAG_CHECK_PXP | \
- DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR)
+ DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \
+ DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
@@ -3435,14 +3405,20 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
op == DRM_XE_VM_BIND_OP_MAP_USERPTR) ||
+ XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR &&
+ !IS_ENABLED(CONFIG_DRM_GPUSVM)) ||
XE_IOCTL_DBG(xe, obj &&
op == DRM_XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, prefetch_region &&
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
- XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
- xe->info.mem_region_mask)) ||
+ XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
+ /* Guard against undefined shift in BIT(prefetch_region) */
+ (prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
+ !(BIT(prefetch_region) & xe->info.mem_region_mask)))) ||
XE_IOCTL_DBG(xe, obj &&
- op == DRM_XE_VM_BIND_OP_UNMAP)) {
+ op == DRM_XE_VM_BIND_OP_UNMAP) ||
+ XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
+ (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) {
err = -EINVAL;
goto free_bind_ops;
}
@@ -3471,19 +3447,19 @@ static int vm_bind_ioctl_signal_fences(struct xe_vm *vm,
struct xe_sync_entry *syncs,
int num_syncs)
{
- struct dma_fence *fence;
+ struct dma_fence *fence = NULL;
int i, err = 0;
- fence = xe_sync_in_fence_get(syncs, num_syncs,
- to_wait_exec_queue(vm, q), vm);
- if (IS_ERR(fence))
- return PTR_ERR(fence);
+ if (num_syncs) {
+ fence = xe_sync_in_fence_get(syncs, num_syncs,
+ to_wait_exec_queue(vm, q), vm);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], fence);
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], fence);
+ }
- xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm,
- fence);
dma_fence_put(fence);
return err;
@@ -3587,7 +3563,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
q = xe_exec_queue_lookup(xef, args->exec_queue_id);
if (XE_IOCTL_DBG(xe, !q)) {
err = -ENOENT;
- goto put_vm;
+ goto free_bind_ops;
}
if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) {
@@ -3633,7 +3609,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
__GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (!ops) {
err = -ENOMEM;
- goto release_vm_lock;
+ goto free_bos;
}
}
@@ -3674,8 +3650,12 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
syncs_user = u64_to_user_ptr(args->syncs);
for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) {
+ struct xe_exec_queue *__q = q ?: vm->q[0];
+
err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs],
&syncs_user[num_syncs],
+ __q->ufence_syncobj,
+ ++__q->ufence_timeline_value,
(xe_vm_in_lr_mode(vm) ?
SYNC_PARSE_FLAG_LR_MODE : 0) |
(!args->num_binds ?
@@ -3698,6 +3678,8 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
xe_vma_ops_init(&vops, vm, q, syncs, num_syncs);
+ if (args->num_binds > 1)
+ vops.flags |= XE_VMA_OPS_ARRAY_OF_BINDS;
for (i = 0; i < args->num_binds; ++i) {
u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr;
@@ -3767,17 +3749,20 @@ free_syncs:
put_obj:
for (i = 0; i < args->num_binds; ++i)
xe_bo_put(bos[i]);
+
+ kvfree(ops);
+free_bos:
+ kvfree(bos);
release_vm_lock:
up_write(&vm->lock);
put_exec_queue:
if (q)
xe_exec_queue_put(q);
-put_vm:
- xe_vm_put(vm);
- kvfree(bos);
- kvfree(ops);
+free_bind_ops:
if (args->num_binds > 1)
kvfree(bind_ops);
+put_vm:
+ xe_vm_put(vm);
return err;
}
@@ -3867,10 +3852,14 @@ release_vm_lock:
*/
int xe_vm_lock(struct xe_vm *vm, bool intr)
{
+ int ret;
+
if (intr)
- return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+ ret = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL);
+ else
+ ret = dma_resv_lock(xe_vm_resv(vm), NULL);
- return dma_resv_lock(xe_vm_resv(vm), NULL);
+ return ret;
}
/**
@@ -3885,7 +3874,7 @@ void xe_vm_unlock(struct xe_vm *vm)
}
/**
- * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
+ * xe_vm_range_tilemask_tlb_inval - Issue a TLB invalidation on this tilemask for an
* address range
* @vm: The VM
* @start: start address
@@ -3896,10 +3885,11 @@ void xe_vm_unlock(struct xe_vm *vm)
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
- u64 end, u8 tile_mask)
+int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask)
{
- struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ struct xe_tlb_inval_fence
+ fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
struct xe_tile *tile;
u32 fence_id = 0;
u8 id;
@@ -3909,39 +3899,36 @@ int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
return 0;
for_each_tile(tile, vm->xe, id) {
- if (tile_mask & BIT(id)) {
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->primary_gt,
- &fence[fence_id],
- start,
- end,
- vm->usm.asid);
- if (err)
- goto wait;
- ++fence_id;
+ if (!(tile_mask & BIT(id)))
+ continue;
- if (!tile->media_gt)
- continue;
+ xe_tlb_inval_fence_init(&tile->primary_gt->tlb_inval,
+ &fence[fence_id], true);
+
+ err = xe_tlb_inval_range(&tile->primary_gt->tlb_inval,
+ &fence[fence_id], start, end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id], true);
+ if (!tile->media_gt)
+ continue;
- err = xe_gt_tlb_invalidation_range(tile->media_gt,
- &fence[fence_id],
- start,
- end,
- vm->usm.asid);
- if (err)
- goto wait;
- ++fence_id;
- }
+ xe_tlb_inval_fence_init(&tile->media_gt->tlb_inval,
+ &fence[fence_id], true);
+
+ err = xe_tlb_inval_range(&tile->media_gt->tlb_inval,
+ &fence[fence_id], start, end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
}
wait:
for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ xe_tlb_inval_fence_wait(&fence[id]);
return err;
}
@@ -3979,13 +3966,13 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
*/
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
- lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) ||
- (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) &&
+ lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
WARN_ON_ONCE(!mmu_interval_check_retry
(&to_userptr_vma(vma)->userptr.notifier,
- to_userptr_vma(vma)->userptr.notifier_seq));
+ to_userptr_vma(vma)->userptr.pages.notifier_seq));
WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP));
@@ -4000,8 +3987,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
xe_device_wmb(xe);
- ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
- xe_vma_end(vma), tile_mask);
+ ret = xe_vm_range_tilemask_tlb_inval(xe_vma_vm(vma), xe_vma_start(vma),
+ xe_vma_end(vma), tile_mask);
/* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
@@ -4203,3 +4190,221 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap)
}
kvfree(snap);
}
+
+/**
+ * xe_vma_need_vram_for_atomic - Check if VMA needs VRAM migration for atomic operations
+ * @xe: Pointer to the Xe device structure
+ * @vma: Pointer to the virtual memory area (VMA) structure
+ * @is_atomic: In pagefault path and atomic operation
+ *
+ * This function determines whether the given VMA needs to be migrated to
+ * VRAM in order to do atomic GPU operation.
+ *
+ * Return:
+ * 1 - Migration to VRAM is required
+ * 0 - Migration is not required
+ * -EACCES - Invalid access for atomic memory attr
+ *
+ */
+int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic)
+{
+ u32 atomic_access = xe_vma_bo(vma) ? xe_vma_bo(vma)->attr.atomic_access :
+ vma->attr.atomic_access;
+
+ if (!IS_DGFX(xe) || !is_atomic)
+ return false;
+
+ /*
+ * NOTE: The checks implemented here are platform-specific. For
+ * instance, on a device supporting CXL atomics, these would ideally
+ * work universally without additional handling.
+ */
+ switch (atomic_access) {
+ case DRM_XE_ATOMIC_DEVICE:
+ return !xe->info.has_device_atomics_on_smem;
+
+ case DRM_XE_ATOMIC_CPU:
+ return -EACCES;
+
+ case DRM_XE_ATOMIC_UNDEFINED:
+ case DRM_XE_ATOMIC_GLOBAL:
+ default:
+ return 1;
+ }
+}
+
+static int xe_vm_alloc_vma(struct xe_vm *vm,
+ struct drm_gpuvm_map_req *map_req,
+ bool is_madvise)
+{
+ struct xe_vma_ops vops;
+ struct drm_gpuva_ops *ops = NULL;
+ struct drm_gpuva_op *__op;
+ unsigned int vma_flags = 0;
+ bool remap_op = false;
+ struct xe_vma_mem_attr tmp_attr;
+ u16 default_pat;
+ int err;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ if (is_madvise)
+ ops = drm_gpuvm_madvise_ops_create(&vm->gpuvm, map_req);
+ else
+ ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, map_req);
+
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (list_empty(&ops->list)) {
+ err = 0;
+ goto free_ops;
+ }
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma = NULL;
+
+ if (!is_madvise) {
+ if (__op->op == DRM_GPUVA_OP_UNMAP) {
+ vma = gpuva_to_vma(op->base.unmap.va);
+ XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma));
+ default_pat = vma->attr.default_pat_index;
+ vma_flags = vma->gpuva.flags;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ default_pat = vma->attr.default_pat_index;
+ vma_flags = vma->gpuva.flags;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
+ op->map.pat_index = default_pat;
+ }
+ } else {
+ if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ xe_assert(vm->xe, !remap_op);
+ xe_assert(vm->xe, xe_vma_has_no_bo(vma));
+ remap_op = true;
+ vma_flags = vma->gpuva.flags;
+ }
+
+ if (__op->op == DRM_GPUVA_OP_MAP) {
+ xe_assert(vm->xe, remap_op);
+ remap_op = false;
+ /*
+ * In case of madvise ops DRM_GPUVA_OP_MAP is
+ * always after DRM_GPUVA_OP_REMAP, so ensure
+ * to propagate the flags from the vma we're
+ * unmapping.
+ */
+ op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK;
+ }
+ }
+ print_op(vm->xe, __op);
+ }
+
+ xe_vma_ops_init(&vops, vm, NULL, NULL, 0);
+
+ if (is_madvise)
+ vops.flags |= XE_VMA_OPS_FLAG_MADVISE;
+
+ err = vm_bind_ioctl_ops_parse(vm, ops, &vops);
+ if (err)
+ goto unwind_ops;
+
+ xe_vm_lock(vm, false);
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+ struct xe_vma *vma;
+
+ if (__op->op == DRM_GPUVA_OP_UNMAP) {
+ vma = gpuva_to_vma(op->base.unmap.va);
+ /* There should be no unmap for madvise */
+ if (is_madvise)
+ XE_WARN_ON("UNEXPECTED UNMAP");
+
+ xe_vma_destroy(vma, NULL);
+ } else if (__op->op == DRM_GPUVA_OP_REMAP) {
+ vma = gpuva_to_vma(op->base.remap.unmap->va);
+ /* In case of madvise ops Store attributes for REMAP UNMAPPED
+ * VMA, so they can be assigned to newly MAP created vma.
+ */
+ if (is_madvise)
+ tmp_attr = vma->attr;
+
+ xe_vma_destroy(gpuva_to_vma(op->base.remap.unmap->va), NULL);
+ } else if (__op->op == DRM_GPUVA_OP_MAP) {
+ vma = op->map.vma;
+ /* In case of madvise call, MAP will always be followed by REMAP.
+ * Therefore temp_attr will always have sane values, making it safe to
+ * copy them to new vma.
+ */
+ if (is_madvise)
+ vma->attr = tmp_attr;
+ }
+ }
+
+ xe_vm_unlock(vm);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return 0;
+
+unwind_ops:
+ vm_bind_ioctl_ops_unwind(vm, &ops, 1);
+free_ops:
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return err;
+}
+
+/**
+ * xe_vm_alloc_madvise_vma - Allocate VMA's with madvise ops
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits existing vma to create new vma for user provided input range
+ *
+ * Return: 0 if success
+ */
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = start,
+ .map.va.range = range,
+ };
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm, "MADVISE_OPS_CREATE: addr=0x%016llx, size=0x%016llx", start, range);
+
+ return xe_vm_alloc_vma(vm, &map_req, true);
+}
+
+/**
+ * xe_vm_alloc_cpu_addr_mirror_vma - Allocate CPU addr mirror vma
+ * @vm: Pointer to the xe_vm structure
+ * @start: Starting input address
+ * @range: Size of the input range
+ *
+ * This function splits/merges existing vma to create new vma for user provided input range
+ *
+ * Return: 0 if success
+ */
+int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t start, uint64_t range)
+{
+ struct drm_gpuvm_map_req map_req = {
+ .map.va.addr = start,
+ .map.va.range = range,
+ };
+
+ lockdep_assert_held_write(&vm->lock);
+
+ vm_dbg(&vm->xe->drm, "CPU_ADDR_MIRROR_VMA_OPS_CREATE: addr=0x%016llx, size=0x%016llx",
+ start, range);
+
+ return xe_vm_alloc_vma(vm, &map_req, false);
+}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 82b112795807..ef8a5019574e 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -66,6 +66,8 @@ static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
struct xe_vma *
xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
+bool xe_vma_has_default_mem_attrs(struct xe_vma *vma);
+
/**
* xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
* @vm: The vm
@@ -171,6 +173,12 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic);
+
+int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
+int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
@@ -191,7 +199,7 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-
+int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void xe_vm_close_and_put(struct xe_vm *vm);
static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
@@ -212,12 +220,6 @@ static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
-int xe_vm_userptr_pin(struct xe_vm *vm);
-
-int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
-
-int xe_vm_userptr_check_repin(struct xe_vm *vm);
-
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
u8 tile_mask);
@@ -228,8 +230,8 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
-int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
- u64 end, u8 tile_mask);
+int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask);
int xe_vm_invalidate_vma(struct xe_vma *vma);
@@ -258,12 +260,6 @@ static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
}
}
-int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
-
-int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
-
-bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
-
int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
@@ -294,6 +290,8 @@ void xe_vm_kill(struct xe_vm *vm, bool unlocked);
*/
#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
+int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec);
+
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
#define vm_dbg drm_dbg
#else
@@ -323,7 +321,7 @@ static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
if (vm && !allow_res_evict) {
xe_vm_assert_held(vm);
/* Pairs with READ_ONCE in xe_vm_is_validating() */
- WRITE_ONCE(vm->validating, current);
+ WRITE_ONCE(vm->validation.validating, current);
}
}
@@ -341,7 +339,7 @@ static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict
{
if (vm && !allow_res_evict) {
/* Pairs with READ_ONCE in xe_vm_is_validating() */
- WRITE_ONCE(vm->validating, NULL);
+ WRITE_ONCE(vm->validation.validating, NULL);
}
}
@@ -359,7 +357,7 @@ static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict
static inline bool xe_vm_is_validating(struct xe_vm *vm)
{
/* Pairs with WRITE_ONCE in xe_vm_is_validating() */
- if (READ_ONCE(vm->validating) == current) {
+ if (READ_ONCE(vm->validation.validating) == current) {
xe_vm_assert_held(vm);
return true;
}
@@ -367,6 +365,34 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
}
/**
+ * xe_vm_set_validation_exec() - Accessor to set the drm_exec object
+ * @vm: The vm we want to register a drm_exec object with.
+ * @exec: The exec object we want to register.
+ *
+ * Set the drm_exec object used to lock the vm's resv.
+ */
+static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *exec)
+{
+ xe_vm_assert_held(vm);
+ xe_assert(vm->xe, !!exec ^ !!vm->validation._exec);
+ vm->validation._exec = exec;
+}
+
+/**
+ * xe_vm_set_validation_exec() - Accessor to read the drm_exec object
+ * @vm: The vm we want to register a drm_exec object with.
+ *
+ * Return: The drm_exec object used to lock the vm's resv. The value
+ * is a valid pointer, %NULL, or one of the special values defined in
+ * xe_validation.h.
+ */
+static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
+{
+ xe_vm_assert_held(vm);
+ return vm->validation._exec;
+}
+
+/**
* xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
* a valid GPU mapping
* @tile: The tile which the GPU mapping belongs to
@@ -385,11 +411,4 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
-void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
-#else
-static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
-{
-}
-#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_doc.h b/drivers/gpu/drm/xe/xe_vm_doc.h
index 1030ce214032..02e5288373c9 100644
--- a/drivers/gpu/drm/xe/xe_vm_doc.h
+++ b/drivers/gpu/drm/xe/xe_vm_doc.h
@@ -7,7 +7,7 @@
#define _XE_VM_DOC_H_
/**
- * DOC: XE VM (user address space)
+ * DOC: Xe VM (user address space)
*
* VM creation
* ===========
@@ -202,13 +202,13 @@
* User pointers are user allocated memory (malloc'd, mmap'd, etc..) for which the
* user wants to create a GPU mapping. Typically in other DRM drivers a dummy BO
* was created and then a binding was created. We bypass creating a dummy BO in
- * XE and simply create a binding directly from the userptr.
+ * Xe and simply create a binding directly from the userptr.
*
* Invalidation
* ------------
*
* Since this a core kernel managed memory the kernel can move this memory
- * whenever it wants. We register an invalidation MMU notifier to alert XE when
+ * whenever it wants. We register an invalidation MMU notifier to alert Xe when
* a user pointer is about to move. The invalidation notifier needs to block
* until all pending users (jobs or compute mode engines) of the userptr are
* idle to ensure no faults. This done by waiting on all of VM's dma-resv slots.
@@ -419,7 +419,7 @@
* =======
*
* VM locking protects all of the core data paths (bind operations, execs,
- * evictions, and compute mode rebind worker) in XE.
+ * evictions, and compute mode rebind worker) in Xe.
*
* Locks
* -----
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c
new file mode 100644
index 000000000000..cad3cf627c3f
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include "xe_vm_madvise.h"
+
+#include <linux/nospec.h>
+#include <drm/xe_drm.h>
+
+#include "xe_bo.h"
+#include "xe_pat.h"
+#include "xe_pt.h"
+#include "xe_svm.h"
+
+struct xe_vmas_in_madvise_range {
+ u64 addr;
+ u64 range;
+ struct xe_vma **vmas;
+ int num_vmas;
+ bool has_bo_vmas;
+ bool has_svm_userptr_vmas;
+};
+
+static int get_vmas(struct xe_vm *vm, struct xe_vmas_in_madvise_range *madvise_range)
+{
+ u64 addr = madvise_range->addr;
+ u64 range = madvise_range->range;
+
+ struct xe_vma **__vmas;
+ struct drm_gpuva *gpuva;
+ int max_vmas = 8;
+
+ lockdep_assert_held(&vm->lock);
+
+ madvise_range->num_vmas = 0;
+ madvise_range->vmas = kmalloc_array(max_vmas, sizeof(*madvise_range->vmas), GFP_KERNEL);
+ if (!madvise_range->vmas)
+ return -ENOMEM;
+
+ vm_dbg(&vm->xe->drm, "VMA's in range: start=0x%016llx, end=0x%016llx", addr, addr + range);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, addr, addr + range) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (xe_vma_bo(vma))
+ madvise_range->has_bo_vmas = true;
+ else if (xe_vma_is_cpu_addr_mirror(vma) || xe_vma_is_userptr(vma))
+ madvise_range->has_svm_userptr_vmas = true;
+
+ if (madvise_range->num_vmas == max_vmas) {
+ max_vmas <<= 1;
+ __vmas = krealloc(madvise_range->vmas,
+ max_vmas * sizeof(*madvise_range->vmas),
+ GFP_KERNEL);
+ if (!__vmas) {
+ kfree(madvise_range->vmas);
+ return -ENOMEM;
+ }
+ madvise_range->vmas = __vmas;
+ }
+
+ madvise_range->vmas[madvise_range->num_vmas] = vma;
+ (madvise_range->num_vmas)++;
+ }
+
+ if (!madvise_range->num_vmas)
+ kfree(madvise_range->vmas);
+
+ vm_dbg(&vm->xe->drm, "madvise_range-num_vmas = %d\n", madvise_range->num_vmas);
+
+ return 0;
+}
+
+static void madvise_preferred_mem_loc(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC);
+
+ for (i = 0; i < num_vmas; i++) {
+ /*TODO: Extend attributes to bo based vmas */
+ if ((vmas[i]->attr.preferred_loc.devmem_fd == op->preferred_mem_loc.devmem_fd &&
+ vmas[i]->attr.preferred_loc.migration_policy ==
+ op->preferred_mem_loc.migration_policy) ||
+ !xe_vma_is_cpu_addr_mirror(vmas[i])) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.preferred_loc.devmem_fd = op->preferred_mem_loc.devmem_fd;
+ /* Till multi-device support is not added migration_policy
+ * is of no use and can be ignored.
+ */
+ vmas[i]->attr.preferred_loc.migration_policy =
+ op->preferred_mem_loc.migration_policy;
+ }
+ }
+}
+
+static void madvise_atomic(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ struct xe_bo *bo;
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC);
+ xe_assert(vm->xe, op->atomic.val <= DRM_XE_ATOMIC_CPU);
+
+ for (i = 0; i < num_vmas; i++) {
+ if (xe_vma_is_userptr(vmas[i]) &&
+ !(op->atomic.val == DRM_XE_ATOMIC_DEVICE &&
+ xe->info.has_device_atomics_on_smem)) {
+ vmas[i]->skip_invalidation = true;
+ continue;
+ }
+
+ if (vmas[i]->attr.atomic_access == op->atomic.val) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.atomic_access = op->atomic.val;
+ }
+
+ bo = xe_vma_bo(vmas[i]);
+ if (!bo || bo->attr.atomic_access == op->atomic.val)
+ continue;
+
+ vmas[i]->skip_invalidation = false;
+ xe_bo_assert_held(bo);
+ bo->attr.atomic_access = op->atomic.val;
+
+ /* Invalidate cpu page table, so bo can migrate to smem in next access */
+ if (xe_bo_is_vram(bo) &&
+ (bo->attr.atomic_access == DRM_XE_ATOMIC_CPU ||
+ bo->attr.atomic_access == DRM_XE_ATOMIC_GLOBAL))
+ ttm_bo_unmap_virtual(&bo->ttm);
+ }
+}
+
+static void madvise_pat_index(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op)
+{
+ int i;
+
+ xe_assert(vm->xe, op->type == DRM_XE_MEM_RANGE_ATTR_PAT);
+
+ for (i = 0; i < num_vmas; i++) {
+ if (vmas[i]->attr.pat_index == op->pat_index.val) {
+ vmas[i]->skip_invalidation = true;
+ } else {
+ vmas[i]->skip_invalidation = false;
+ vmas[i]->attr.pat_index = op->pat_index.val;
+ }
+ }
+}
+
+typedef void (*madvise_func)(struct xe_device *xe, struct xe_vm *vm,
+ struct xe_vma **vmas, int num_vmas,
+ struct drm_xe_madvise *op);
+
+static const madvise_func madvise_funcs[] = {
+ [DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC] = madvise_preferred_mem_loc,
+ [DRM_XE_MEM_RANGE_ATTR_ATOMIC] = madvise_atomic,
+ [DRM_XE_MEM_RANGE_ATTR_PAT] = madvise_pat_index,
+};
+
+static u8 xe_zap_ptes_in_madvise_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ struct drm_gpuva *gpuva;
+ struct xe_tile *tile;
+ u8 id, tile_mask = 0;
+
+ lockdep_assert_held_write(&vm->lock);
+
+ /* Wait for pending binds */
+ if (dma_resv_wait_timeout(xe_vm_resv(vm), DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT) <= 0)
+ XE_WARN_ON(1);
+
+ drm_gpuvm_for_each_va_range(gpuva, &vm->gpuvm, start, end) {
+ struct xe_vma *vma = gpuva_to_vma(gpuva);
+
+ if (vma->skip_invalidation || xe_vma_is_null(vma))
+ continue;
+
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ tile_mask |= xe_svm_ranges_zap_ptes_in_range(vm,
+ xe_vma_start(vma),
+ xe_vma_end(vma));
+ } else {
+ for_each_tile(tile, vm->xe, id) {
+ if (xe_pt_zap_ptes(tile, vma)) {
+ tile_mask |= BIT(id);
+
+ /*
+ * WRITE_ONCE pairs with READ_ONCE
+ * in xe_vm_has_valid_gpu_mapping()
+ */
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated | BIT(id));
+ }
+ }
+ }
+ }
+
+ return tile_mask;
+}
+
+static int xe_vm_invalidate_madvise_range(struct xe_vm *vm, u64 start, u64 end)
+{
+ u8 tile_mask = xe_zap_ptes_in_madvise_range(vm, start, end);
+
+ if (!tile_mask)
+ return 0;
+
+ xe_device_wmb(vm->xe);
+
+ return xe_vm_range_tilemask_tlb_inval(vm, start, end, tile_mask);
+}
+
+static bool madvise_args_are_sane(struct xe_device *xe, const struct drm_xe_madvise *args)
+{
+ if (XE_IOCTL_DBG(xe, !args))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->start, SZ_4K)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, !IS_ALIGNED(args->range, SZ_4K)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->range < SZ_4K))
+ return false;
+
+ switch (args->type) {
+ case DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC:
+ {
+ s32 fd = (s32)args->preferred_mem_loc.devmem_fd;
+
+ if (XE_IOCTL_DBG(xe, fd < DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.migration_policy >
+ DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->preferred_mem_loc.reserved))
+ return false;
+ break;
+ }
+ case DRM_XE_MEM_RANGE_ATTR_ATOMIC:
+ if (XE_IOCTL_DBG(xe, args->atomic.val > DRM_XE_ATOMIC_CPU))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->atomic.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->atomic.reserved))
+ return false;
+
+ break;
+ case DRM_XE_MEM_RANGE_ATTR_PAT:
+ {
+ u16 coh_mode = xe_pat_index_get_coh_mode(xe, args->pat_index.val);
+
+ if (XE_IOCTL_DBG(xe, !coh_mode))
+ return false;
+
+ if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->pat_index.pad))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, args->pat_index.reserved))
+ return false;
+ break;
+ }
+ default:
+ if (XE_IOCTL_DBG(xe, 1))
+ return false;
+ }
+
+ if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1]))
+ return false;
+
+ return true;
+}
+
+static bool check_bo_args_are_sane(struct xe_vm *vm, struct xe_vma **vmas,
+ int num_vmas, u32 atomic_val)
+{
+ struct xe_device *xe = vm->xe;
+ struct xe_bo *bo;
+ int i;
+
+ for (i = 0; i < num_vmas; i++) {
+ bo = xe_vma_bo(vmas[i]);
+ if (!bo)
+ continue;
+ /*
+ * NOTE: The following atomic checks are platform-specific. For example,
+ * if a device supports CXL atomics, these may not be necessary or
+ * may behave differently.
+ */
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_CPU &&
+ !(bo->flags & XE_BO_FLAG_SYSTEM)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_DEVICE &&
+ !(bo->flags & XE_BO_FLAG_VRAM0) &&
+ !(bo->flags & XE_BO_FLAG_VRAM1) &&
+ !(bo->flags & XE_BO_FLAG_SYSTEM &&
+ xe->info.has_device_atomics_on_smem)))
+ return false;
+
+ if (XE_IOCTL_DBG(xe, atomic_val == DRM_XE_ATOMIC_GLOBAL &&
+ (!(bo->flags & XE_BO_FLAG_SYSTEM) ||
+ (!(bo->flags & XE_BO_FLAG_VRAM0) &&
+ !(bo->flags & XE_BO_FLAG_VRAM1)))))
+ return false;
+ }
+ return true;
+}
+/**
+ * xe_vm_madvise_ioctl - Handle MADVise ioctl for a VM
+ * @dev: DRM device pointer
+ * @data: Pointer to ioctl data (drm_xe_madvise*)
+ * @file: DRM file pointer
+ *
+ * Handles the MADVISE ioctl to provide memory advice for vma's within
+ * input range.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_madvise *args = data;
+ struct xe_vmas_in_madvise_range madvise_range = {.addr = args->start,
+ .range = args->range, };
+ struct xe_vm *vm;
+ struct drm_exec exec;
+ int err, attr_type;
+
+ vm = xe_vm_lookup(xef, args->vm_id);
+ if (XE_IOCTL_DBG(xe, !vm))
+ return -EINVAL;
+
+ if (!madvise_args_are_sane(vm->xe, args)) {
+ err = -EINVAL;
+ goto put_vm;
+ }
+
+ xe_svm_flush(vm);
+
+ err = down_write_killable(&vm->lock);
+ if (err)
+ goto put_vm;
+
+ if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) {
+ err = -ENOENT;
+ goto unlock_vm;
+ }
+
+ err = xe_vm_alloc_madvise_vma(vm, args->start, args->range);
+ if (err)
+ goto unlock_vm;
+
+ err = get_vmas(vm, &madvise_range);
+ if (err || !madvise_range.num_vmas)
+ goto unlock_vm;
+
+ if (madvise_range.has_bo_vmas) {
+ if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) {
+ if (!check_bo_args_are_sane(vm, madvise_range.vmas,
+ madvise_range.num_vmas,
+ args->atomic.val)) {
+ err = -EINVAL;
+ goto unlock_vm;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ for (int i = 0; i < madvise_range.num_vmas; i++) {
+ struct xe_bo *bo = xe_vma_bo(madvise_range.vmas[i]);
+
+ if (!bo)
+ continue;
+ err = drm_exec_lock_obj(&exec, &bo->ttm.base);
+ drm_exec_retry_on_contention(&exec);
+ if (err)
+ goto err_fini;
+ }
+ }
+ }
+
+ if (madvise_range.has_svm_userptr_vmas) {
+ err = xe_svm_notifier_lock_interruptible(vm);
+ if (err)
+ goto err_fini;
+ }
+
+ attr_type = array_index_nospec(args->type, ARRAY_SIZE(madvise_funcs));
+ madvise_funcs[attr_type](xe, vm, madvise_range.vmas, madvise_range.num_vmas, args);
+
+ err = xe_vm_invalidate_madvise_range(vm, args->start, args->start + args->range);
+
+ if (madvise_range.has_svm_userptr_vmas)
+ xe_svm_notifier_unlock(vm);
+
+err_fini:
+ if (madvise_range.has_bo_vmas)
+ drm_exec_fini(&exec);
+ kfree(madvise_range.vmas);
+ madvise_range.vmas = NULL;
+unlock_vm:
+ up_write(&vm->lock);
+put_vm:
+ xe_vm_put(vm);
+ return err;
+}
diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.h b/drivers/gpu/drm/xe/xe_vm_madvise.h
new file mode 100644
index 000000000000..b0e1fc445f23
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vm_madvise.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_VM_MADVISE_H_
+#define _XE_VM_MADVISE_H_
+
+struct drm_device;
+struct drm_file;
+
+int xe_vm_madvise_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 6058cf739388..ccd6cc090309 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -17,6 +17,7 @@
#include "xe_device_types.h"
#include "xe_pt_types.h"
#include "xe_range_fence.h"
+#include "xe_userptr.h"
struct xe_bo;
struct xe_svm_range;
@@ -45,36 +46,44 @@ struct xe_vm_pgtable_update_op;
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9)
+#define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10)
+
+/**
+ * struct xe_vma_mem_attr - memory attributes associated with vma
+ */
+struct xe_vma_mem_attr {
+ /** @preferred_loc: preferred memory_location */
+ struct {
+ /** @preferred_loc.migration_policy: Pages migration policy */
+ u32 migration_policy;
+
+ /**
+ * @preferred_loc.devmem_fd: used for determining pagemap_fd
+ * requested by user DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM and
+ * DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE mean system memory or
+ * closest device memory respectively.
+ */
+ u32 devmem_fd;
+ } preferred_loc;
-/** struct xe_userptr - User pointer */
-struct xe_userptr {
- /** @invalidate_link: Link for the vm::userptr.invalidated list */
- struct list_head invalidate_link;
- /** @userptr: link into VM repin list if userptr. */
- struct list_head repin_link;
/**
- * @notifier: MMU notifier for user pointer (invalidation call back)
+ * @atomic_access: The atomic access type for the vma
+ * See %DRM_XE_VMA_ATOMIC_UNDEFINED, %DRM_XE_VMA_ATOMIC_DEVICE,
+ * %DRM_XE_VMA_ATOMIC_GLOBAL, and %DRM_XE_VMA_ATOMIC_CPU for possible
+ * values. These are defined in uapi/drm/xe_drm.h.
*/
- struct mmu_interval_notifier notifier;
- /** @sgt: storage for a scatter gather table */
- struct sg_table sgt;
- /** @sg: allocated scatter gather table */
- struct sg_table *sg;
- /** @notifier_seq: notifier sequence number */
- unsigned long notifier_seq;
- /** @unmap_mutex: Mutex protecting dma-unmapping */
- struct mutex unmap_mutex;
+ u32 atomic_access;
+
/**
- * @initial_bind: user pointer has been bound at least once.
- * write: vm->userptr.notifier_lock in read mode and vm->resv held.
- * read: vm->userptr.notifier_lock in write mode or vm->resv held.
+ * @default_pat_index: The pat index for VMA set during first bind by user.
*/
- bool initial_bind;
- /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
- bool mapped;
-#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
- u32 divisor;
-#endif
+ u16 default_pat_index;
+
+ /**
+ * @pat_index: The pat index to use when encoding the PTEs for this vma.
+ * same as default_pat_index unless overwritten by madvise.
+ */
+ u16 pat_index;
};
struct xe_vma {
@@ -102,10 +111,10 @@ struct xe_vma {
/**
* @tile_invalidated: Tile mask of binding are invalidated for this VMA.
- * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in
- * write mode for writing or vm->userptr.notifier_lock in read mode and
+ * protected by BO's resv and for userptrs, vm->svm.gpusvm.notifier_lock in
+ * write mode for writing or vm->svm.gpusvm.notifier_lock in read mode and
* the vm->resv. For stable reading, BO's resv or userptr
- * vm->userptr.notifier_lock in read mode is required. Can be
+ * vm->svm.gpusvm.notifier_lock in read mode is required. Can be
* opportunistically read with READ_ONCE outside of locks.
*/
u8 tile_invalidated;
@@ -116,7 +125,7 @@ struct xe_vma {
/**
* @tile_present: Tile mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
- * vm->userptr.notifier_lock for writing. Needs either for reading,
+ * vm->svm.gpusvm.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
* in write mode.
*/
@@ -126,15 +135,22 @@ struct xe_vma {
u8 tile_staged;
/**
- * @pat_index: The pat index to use when encoding the PTEs for this vma.
+ * @skip_invalidation: Used in madvise to avoid invalidation
+ * if mem attributes doesn't change
*/
- u16 pat_index;
+ bool skip_invalidation;
/**
* @ufence: The user fence that was provided with MAP.
* Needs to be signalled before UNMAP can be processed.
*/
struct xe_user_fence *ufence;
+
+ /**
+ * @attr: The attributes of vma which determines the migration policy
+ * and encoding of the PTEs for this vma.
+ */
+ struct xe_vma_mem_attr attr;
};
/**
@@ -205,11 +221,6 @@ struct xe_vm {
#define XE_VM_FLAG_GSC BIT(8)
unsigned long flags;
- /** @composite_fence_ctx: context composite fence */
- u64 composite_fence_ctx;
- /** @composite_fence_seqno: seqno for composite fence */
- u32 composite_fence_seqno;
-
/**
* @lock: outer most lock, protects objects of anything attached to this
* VM
@@ -244,33 +255,7 @@ struct xe_vm {
const struct xe_pt_ops *pt_ops;
/** @userptr: user pointer state */
- struct {
- /**
- * @userptr.repin_list: list of VMAs which are user pointers,
- * and needs repinning. Protected by @lock.
- */
- struct list_head repin_list;
- /**
- * @notifier_lock: protects notifier in write mode and
- * submission in read mode.
- */
- struct rw_semaphore notifier_lock;
- /**
- * @userptr.invalidated_lock: Protects the
- * @userptr.invalidated list.
- */
- spinlock_t invalidated_lock;
- /**
- * @userptr.invalidated: List of invalidated userptrs, not yet
- * picked
- * up for revalidation. Protected from access with the
- * @invalidated_lock. Removing items from the list
- * additionally requires @lock in write mode, and adding
- * items to the list requires either the @userptr.notifier_lock in
- * write mode, OR @lock in write mode.
- */
- struct list_head invalidated;
- } userptr;
+ struct xe_userptr_vm userptr;
/** @preempt: preempt state */
struct {
@@ -318,21 +303,37 @@ struct xe_vm {
} error_capture;
/**
+ * @validation: Validation data only valid with the vm resv held.
+ * Note: This is really task state of the task holding the vm resv,
+ * and moving forward we should
+ * come up with a better way of passing this down the call-
+ * chain.
+ */
+ struct {
+ /**
+ * @validation.validating: The task that is currently making bos resident.
+ * for this vm.
+ * Protected by the VM's resv for writing. Opportunistic reading can be done
+ * using READ_ONCE. Note: This is a workaround for the
+ * TTM eviction_valuable() callback not being passed a struct
+ * ttm_operation_context(). Future work might want to address this.
+ */
+ struct task_struct *validating;
+ /**
+ * @validation.exec The drm_exec context used when locking the vm resv.
+ * Protected by the vm's resv.
+ */
+ struct drm_exec *_exec;
+ } validation;
+
+ /**
* @tlb_flush_seqno: Required TLB flush seqno for the next exec.
* protected by the vm resv.
*/
u64 tlb_flush_seqno;
- /**
- * @validating: The task that is currently making bos resident for this vm.
- * Protected by the VM's resv for writing. Opportunistic reading can be done
- * using READ_ONCE. Note: This is a workaround for the
- * TTM eviction_valuable() callback not being passed a struct
- * ttm_operation_context(). Future work might want to address this.
- */
- struct task_struct *validating;
/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
bool batch_invalidate_tlb;
- /** @xef: XE file handle for tracking this VM's drm client */
+ /** @xef: Xe file handle for tracking this VM's drm client */
struct xe_file *xef;
};
@@ -340,17 +341,10 @@ struct xe_vm {
struct xe_vma_op_map {
/** @vma: VMA to map */
struct xe_vma *vma;
+ unsigned int vma_flags;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
- bool read_only;
- /** @is_null: is NULL binding */
- bool is_null;
- /** @is_cpu_addr_mirror: is CPU address mirror binding */
- bool is_cpu_addr_mirror;
- /** @dumpable: whether BO is dumped on GPU hang */
- bool dumpable;
- /** @invalidate: invalidate the VMA before bind */
bool invalidate_on_bind;
/** @pat_index: The pat index to use for this operation. */
u16 pat_index;
@@ -400,8 +394,11 @@ struct xe_vma_op_prefetch_range {
struct xarray range;
/** @ranges_count: number of svm ranges to map */
u32 ranges_count;
- /** @region: memory region to prefetch to */
- u32 region;
+ /**
+ * @tile: Pointer to the tile structure containing memory to prefetch.
+ * NULL if prefetch requested region is smem
+ */
+ struct xe_tile *tile;
};
/** enum xe_vma_op_flags - flags for VMA operation */
@@ -467,6 +464,9 @@ struct xe_vma_ops {
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
/** @flag: signify the properties within xe_vma_ops*/
#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
+#define XE_VMA_OPS_FLAG_MADVISE BIT(1)
+#define XE_VMA_OPS_ARRAY_OF_BINDS BIT(2)
+#define XE_VMA_OPS_FLAG_SKIP_TLB_WAIT BIT(3)
u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c
index e421a74fb87c..d50baefcd124 100644
--- a/drivers/gpu/drm/xe/xe_vram.c
+++ b/drivers/gpu/drm/xe/xe_vram.c
@@ -3,6 +3,7 @@
* Copyright © 2021-2024 Intel Corporation
*/
+#include <kunit/visibility.h>
#include <linux/pci.h>
#include <drm/drm_managed.h>
@@ -12,28 +13,25 @@
#include "regs/xe_gt_regs.h"
#include "regs/xe_regs.h"
#include "xe_assert.h"
+#include "xe_bo.h"
#include "xe_device.h"
#include "xe_force_wake.h"
#include "xe_gt_mcr.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_mmio.h"
#include "xe_module.h"
#include "xe_sriov.h"
+#include "xe_tile_sriov_vf.h"
+#include "xe_ttm_vram_mgr.h"
#include "xe_vram.h"
+#include "xe_vram_types.h"
-#define BAR_SIZE_SHIFT 20
-
-static void
-_resize_bar(struct xe_device *xe, int resno, resource_size_t size)
+static void resize_bar(struct xe_device *xe, int resno, resource_size_t size)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int bar_size = pci_rebar_bytes_to_size(size);
int ret;
- if (pci_resource_len(pdev, resno))
- pci_release_resource(pdev, resno);
-
- ret = pci_resize_resource(pdev, resno, bar_size);
+ ret = pci_resize_resource(pdev, resno, bar_size, 0);
if (ret) {
drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support in your BIOS\n",
resno, 1 << bar_size, ERR_PTR(ret));
@@ -47,7 +45,7 @@ _resize_bar(struct xe_device *xe, int resno, resource_size_t size)
* if force_vram_bar_size is set, attempt to set to the requested size
* else set to maximum possible size
*/
-static void resize_vram_bar(struct xe_device *xe)
+void xe_vram_resize_bar(struct xe_device *xe)
{
int force_vram_bar_size = xe_modparam.force_vram_bar_size;
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -55,41 +53,37 @@ static void resize_vram_bar(struct xe_device *xe)
resource_size_t current_size;
resource_size_t rebar_size;
struct resource *root_res;
- u32 bar_size_mask;
+ int max_size, i;
u32 pci_cmd;
- int i;
/* gather some relevant info */
current_size = pci_resource_len(pdev, LMEM_BAR);
- bar_size_mask = pci_rebar_get_possible_sizes(pdev, LMEM_BAR);
-
- if (!bar_size_mask)
- return;
if (force_vram_bar_size < 0)
return;
/* set to a specific size? */
if (force_vram_bar_size) {
- u32 bar_size_bit;
-
- rebar_size = force_vram_bar_size * (resource_size_t)SZ_1M;
-
- bar_size_bit = bar_size_mask & BIT(pci_rebar_bytes_to_size(rebar_size));
+ rebar_size = pci_rebar_bytes_to_size(force_vram_bar_size *
+ (resource_size_t)SZ_1M);
- if (!bar_size_bit) {
+ if (!pci_rebar_size_supported(pdev, LMEM_BAR, rebar_size)) {
drm_info(&xe->drm,
- "Requested size: %lluMiB is not supported by rebar sizes: 0x%x. Leaving default: %lluMiB\n",
- (u64)rebar_size >> 20, bar_size_mask, (u64)current_size >> 20);
+ "Requested size: %lluMiB is not supported by rebar sizes: 0x%llx. Leaving default: %lluMiB\n",
+ (u64)pci_rebar_size_to_bytes(rebar_size) >> 20,
+ pci_rebar_get_possible_sizes(pdev, LMEM_BAR),
+ (u64)current_size >> 20);
return;
}
- rebar_size = 1ULL << (__fls(bar_size_bit) + BAR_SIZE_SHIFT);
-
+ rebar_size = pci_rebar_size_to_bytes(rebar_size);
if (rebar_size == current_size)
return;
} else {
- rebar_size = 1ULL << (__fls(bar_size_mask) + BAR_SIZE_SHIFT);
+ max_size = pci_rebar_get_max_size(pdev, LMEM_BAR);
+ if (max_size < 0)
+ return;
+ rebar_size = pci_rebar_size_to_bytes(max_size);
/* only resize if larger than current */
if (rebar_size <= current_size)
@@ -116,7 +110,7 @@ static void resize_vram_bar(struct xe_device *xe)
pci_read_config_dword(pdev, PCI_COMMAND, &pci_cmd);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd & ~PCI_COMMAND_MEMORY);
- _resize_bar(xe, LMEM_BAR, rebar_size);
+ resize_bar(xe, LMEM_BAR, rebar_size);
pci_assign_unassigned_bus_resources(pdev->bus);
pci_write_config_dword(pdev, PCI_COMMAND, pci_cmd);
@@ -136,7 +130,7 @@ static bool resource_is_valid(struct pci_dev *pdev, int bar)
return true;
}
-static int determine_lmem_bar_size(struct xe_device *xe)
+static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region *lmem_bar)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
@@ -145,28 +139,31 @@ static int determine_lmem_bar_size(struct xe_device *xe)
return -ENXIO;
}
- resize_vram_bar(xe);
-
- xe->mem.vram.io_start = pci_resource_start(pdev, LMEM_BAR);
- xe->mem.vram.io_size = pci_resource_len(pdev, LMEM_BAR);
- if (!xe->mem.vram.io_size)
+ lmem_bar->io_start = pci_resource_start(pdev, LMEM_BAR);
+ lmem_bar->io_size = pci_resource_len(pdev, LMEM_BAR);
+ if (!lmem_bar->io_size)
return -EIO;
/* XXX: Need to change when xe link code is ready */
- xe->mem.vram.dpa_base = 0;
+ lmem_bar->dpa_base = 0;
/* set up a map to the total memory area. */
- xe->mem.vram.mapping = ioremap_wc(xe->mem.vram.io_start, xe->mem.vram.io_size);
+ lmem_bar->mapping = devm_ioremap_wc(&pdev->dev, lmem_bar->io_start, lmem_bar->io_size);
return 0;
}
-static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
+static int get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size, u64 *poffset)
{
struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref;
u64 offset;
u32 reg;
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
+ if (!fw_ref)
+ return -ETIMEDOUT;
+
if (GRAPHICS_VER(xe) >= 20) {
u64 ccs_size = tile_size / 512;
u64 offset_hi, offset_lo;
@@ -196,7 +193,10 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size)
offset = (u64)REG_FIELD_GET(XEHP_FLAT_CCS_PTR, reg) * SZ_64K;
}
- return offset;
+ xe_force_wake_put(gt_to_fw(gt), fw_ref);
+ *poffset = offset;
+
+ return 0;
}
/*
@@ -223,7 +223,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
{
struct xe_device *xe = tile_to_xe(tile);
struct xe_gt *gt = tile->primary_gt;
- unsigned int fw_ref;
u64 offset;
u32 reg;
@@ -234,32 +233,31 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
offset = 0;
for_each_tile(t, xe, id)
for_each_if(t->id < tile->id)
- offset += xe_gt_sriov_vf_lmem(t->primary_gt);
+ offset += xe_tile_sriov_vf_lmem(t);
- *tile_size = xe_gt_sriov_vf_lmem(gt);
+ *tile_size = xe_tile_sriov_vf_lmem(tile);
*vram_size = *tile_size;
*tile_offset = offset;
return 0;
}
- fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
- if (!fw_ref)
- return -ETIMEDOUT;
-
/* actual size */
if (unlikely(xe->info.platform == XE_DG1)) {
*tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), LMEM_BAR);
*tile_offset = 0;
} else {
- reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id));
+ reg = xe_mmio_read32(&tile->mmio, SG_TILE_ADDR_RANGE(tile->id));
*tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G;
*tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G;
}
/* minus device usage */
if (xe->info.has_flat_ccs) {
- offset = get_flat_ccs_offset(gt, *tile_size);
+ int ret = get_flat_ccs_offset(gt, *tile_size, &offset);
+
+ if (ret)
+ return ret;
} else {
offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE);
}
@@ -267,8 +265,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size,
/* remove the tile offset so we have just the available size */
*vram_size = offset - *tile_offset;
- xe_force_wake_put(gt_to_fw(gt), fw_ref);
-
return 0;
}
@@ -278,13 +274,74 @@ static void vram_fini(void *arg)
struct xe_tile *tile;
int id;
- if (xe->mem.vram.mapping)
- iounmap(xe->mem.vram.mapping);
+ xe->mem.vram->mapping = NULL;
+
+ for_each_tile(tile, xe, id) {
+ tile->mem.vram->mapping = NULL;
+ if (tile->mem.kernel_vram)
+ tile->mem.kernel_vram->mapping = NULL;
+ }
+}
+
+struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement)
+{
+ struct xe_vram_region *vram;
+ struct drm_device *drm = &xe->drm;
+
+ xe_assert(xe, id < xe->info.tile_count);
+
+ vram = drmm_kzalloc(drm, sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return NULL;
+
+ vram->xe = xe;
+ vram->id = id;
+ vram->placement = placement;
+#if defined(CONFIG_DRM_XE_PAGEMAP)
+ vram->migrate = xe->tiles[id].migrate;
+#endif
+ return vram;
+}
+
+static void print_vram_region_info(struct xe_device *xe, struct xe_vram_region *vram)
+{
+ struct drm_device *drm = &xe->drm;
- xe->mem.vram.mapping = NULL;
+ if (vram->io_size < vram->usable_size)
+ drm_info(drm, "Small BAR device\n");
- for_each_tile(tile, xe, id)
- tile->mem.vram.mapping = NULL;
+ drm_info(drm,
+ "VRAM[%u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n",
+ vram->id, &vram->actual_physical_size, &vram->usable_size, &vram->io_size);
+ drm_info(drm, "VRAM[%u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n",
+ vram->id, &vram->dpa_base, vram->dpa_base + (u64)vram->actual_physical_size,
+ &vram->io_start, vram->io_start + (u64)vram->io_size);
+}
+
+static int vram_region_init(struct xe_device *xe, struct xe_vram_region *vram,
+ struct xe_vram_region *lmem_bar, u64 offset, u64 usable_size,
+ u64 region_size, resource_size_t remain_io_size)
+{
+ /* Check if VRAM region is already initialized */
+ if (vram->mapping)
+ return 0;
+
+ vram->actual_physical_size = region_size;
+ vram->io_start = lmem_bar->io_start + offset;
+ vram->io_size = min_t(u64, usable_size, remain_io_size);
+
+ if (!vram->io_size) {
+ drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
+ return -ENODEV;
+ }
+
+ vram->dpa_base = lmem_bar->dpa_base + offset;
+ vram->mapping = lmem_bar->mapping + offset;
+ vram->usable_size = usable_size;
+
+ print_vram_region_info(xe, vram);
+
+ return 0;
}
/**
@@ -298,78 +355,108 @@ static void vram_fini(void *arg)
int xe_vram_probe(struct xe_device *xe)
{
struct xe_tile *tile;
- resource_size_t io_size;
+ struct xe_vram_region lmem_bar;
+ resource_size_t remain_io_size;
u64 available_size = 0;
u64 total_size = 0;
- u64 tile_offset;
- u64 tile_size;
- u64 vram_size;
int err;
u8 id;
if (!IS_DGFX(xe))
return 0;
- /* Get the size of the root tile's vram for later accessibility comparison */
- tile = xe_device_get_root_tile(xe);
- err = tile_vram_size(tile, &vram_size, &tile_size, &tile_offset);
- if (err)
- return err;
-
- err = determine_lmem_bar_size(xe);
+ err = determine_lmem_bar_size(xe, &lmem_bar);
if (err)
return err;
+ drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &lmem_bar.io_start, &lmem_bar.io_size);
- drm_info(&xe->drm, "VISIBLE VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &xe->mem.vram.io_size);
-
- io_size = xe->mem.vram.io_size;
+ remain_io_size = lmem_bar.io_size;
- /* tile specific ranges */
for_each_tile(tile, xe, id) {
- err = tile_vram_size(tile, &vram_size, &tile_size, &tile_offset);
+ u64 region_size;
+ u64 usable_size;
+ u64 tile_offset;
+
+ err = tile_vram_size(tile, &usable_size, &region_size, &tile_offset);
if (err)
return err;
- tile->mem.vram.actual_physical_size = tile_size;
- tile->mem.vram.io_start = xe->mem.vram.io_start + tile_offset;
- tile->mem.vram.io_size = min_t(u64, vram_size, io_size);
+ total_size += region_size;
+ available_size += usable_size;
- if (!tile->mem.vram.io_size) {
- drm_err(&xe->drm, "Tile without any CPU visible VRAM. Aborting.\n");
- return -ENODEV;
+ err = vram_region_init(xe, tile->mem.vram, &lmem_bar, tile_offset, usable_size,
+ region_size, remain_io_size);
+ if (err)
+ return err;
+
+ if (total_size > lmem_bar.io_size) {
+ drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
+ &total_size, &lmem_bar.io_size);
}
- tile->mem.vram.dpa_base = xe->mem.vram.dpa_base + tile_offset;
- tile->mem.vram.usable_size = vram_size;
- tile->mem.vram.mapping = xe->mem.vram.mapping + tile_offset;
+ remain_io_size -= min_t(u64, tile->mem.vram->actual_physical_size, remain_io_size);
+ }
- if (tile->mem.vram.io_size < tile->mem.vram.usable_size)
- drm_info(&xe->drm, "Small BAR device\n");
- drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
- tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
- drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
- &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
- &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
+ err = vram_region_init(xe, xe->mem.vram, &lmem_bar, 0, available_size, total_size,
+ lmem_bar.io_size);
+ if (err)
+ return err;
- /* calculate total size using tile size to get the correct HW sizing */
- total_size += tile_size;
- available_size += vram_size;
+ return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe);
+}
- if (total_size > xe->mem.vram.io_size) {
- drm_info(&xe->drm, "VRAM: %pa is larger than resource %pa\n",
- &total_size, &xe->mem.vram.io_size);
- }
+/**
+ * xe_vram_region_io_start - Get the IO start of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the IO start of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram)
+{
+ return vram ? vram->io_start : 0;
+}
- io_size -= min_t(u64, tile_size, io_size);
- }
+/**
+ * xe_vram_region_io_size - Get the IO size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the IO size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->io_size : 0;
+}
- xe->mem.vram.actual_physical_size = total_size;
+/**
+ * xe_vram_region_dpa_base - Get the DPA base of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the DPA base of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram)
+{
+ return vram ? vram->dpa_base : 0;
+}
- drm_info(&xe->drm, "Total VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &xe->mem.vram.actual_physical_size);
- drm_info(&xe->drm, "Available VRAM: %pa, %pa\n", &xe->mem.vram.io_start,
- &available_size);
+/**
+ * xe_vram_region_usable_size - Get the usable size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the usable size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_usable_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->usable_size : 0;
+}
- return devm_add_action_or_reset(xe->drm.dev, vram_fini, xe);
+/**
+ * xe_vram_region_actual_physical_size - Get the actual physical size of a VRAM region
+ * @vram: the VRAM region
+ *
+ * Return: the actual physical size of the VRAM region, or 0 if not valid
+ */
+resource_size_t xe_vram_region_actual_physical_size(const struct xe_vram_region *vram)
+{
+ return vram ? vram->actual_physical_size : 0;
}
+EXPORT_SYMBOL_IF_KUNIT(xe_vram_region_actual_physical_size);
diff --git a/drivers/gpu/drm/xe/xe_vram.h b/drivers/gpu/drm/xe/xe_vram.h
index e31cc04ec0db..13505cfb184d 100644
--- a/drivers/gpu/drm/xe/xe_vram.h
+++ b/drivers/gpu/drm/xe/xe_vram.h
@@ -6,8 +6,20 @@
#ifndef _XE_VRAM_H_
#define _XE_VRAM_H_
+#include <linux/types.h>
+
struct xe_device;
+struct xe_vram_region;
+void xe_vram_resize_bar(struct xe_device *xe);
int xe_vram_probe(struct xe_device *xe);
+struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement);
+
+resource_size_t xe_vram_region_io_start(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_io_size(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_dpa_base(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_usable_size(const struct xe_vram_region *vram);
+resource_size_t xe_vram_region_actual_physical_size(const struct xe_vram_region *vram);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index b26e26d73dae..17bc84da4cdc 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -34,7 +34,7 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- u32 val, mbox;
+ u32 val = 0, mbox;
int err;
mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
@@ -56,7 +56,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- u32 val, mbox;
+ u32 val = 0, mbox;
int err;
mbox = REG_FIELD_PREP(PCODE_MB_COMMAND, PCODE_FREQUENCY_CONFIG)
diff --git a/drivers/gpu/drm/xe/xe_vram_types.h b/drivers/gpu/drm/xe/xe_vram_types.h
new file mode 100644
index 000000000000..83772dcbf1af
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_vram_types.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_VRAM_TYPES_H_
+#define _XE_VRAM_TYPES_H_
+
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+#include <drm/drm_pagemap.h>
+#endif
+
+#include "xe_ttm_vram_mgr_types.h"
+
+struct xe_device;
+struct xe_migrate;
+
+/**
+ * struct xe_vram_region - memory region structure
+ * This is used to describe a memory region in xe
+ * device, such as HBM memory or CXL extension memory.
+ */
+struct xe_vram_region {
+ /** @xe: Back pointer to xe device */
+ struct xe_device *xe;
+ /**
+ * @id: VRAM region instance id
+ *
+ * The value should be unique for VRAM region.
+ */
+ u8 id;
+ /** @io_start: IO start address of this VRAM instance */
+ resource_size_t io_start;
+ /**
+ * @io_size: IO size of this VRAM instance
+ *
+ * This represents how much of this VRAM we can access
+ * via the CPU through the VRAM BAR. This can be smaller
+ * than @usable_size, in which case only part of VRAM is CPU
+ * accessible (typically the first 256M). This
+ * configuration is known as small-bar.
+ */
+ resource_size_t io_size;
+ /** @dpa_base: This memory regions's DPA (device physical address) base */
+ resource_size_t dpa_base;
+ /**
+ * @usable_size: usable size of VRAM
+ *
+ * Usable size of VRAM excluding reserved portions
+ * (e.g stolen mem)
+ */
+ resource_size_t usable_size;
+ /**
+ * @actual_physical_size: Actual VRAM size
+ *
+ * Actual VRAM size including reserved portions
+ * (e.g stolen mem)
+ */
+ resource_size_t actual_physical_size;
+ /** @mapping: pointer to VRAM mappable space */
+ void __iomem *mapping;
+ /** @ttm: VRAM TTM manager */
+ struct xe_ttm_vram_mgr ttm;
+ /** @placement: TTM placement dedicated for this region */
+ u32 placement;
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+ /** @migrate: Back pointer to migrate */
+ struct xe_migrate *migrate;
+ /** @pagemap: Used to remap device memory as ZONE_DEVICE */
+ struct dev_pagemap pagemap;
+ /**
+ * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
+ * pages of this tile.
+ */
+ struct drm_pagemap dpagemap;
+ /**
+ * @hpa_base: base host physical address
+ *
+ * This is generated when remap device memory as ZONE_DEVICE
+ */
+ resource_size_t hpa_base;
+#endif
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 22a98600fd8f..3764abca3d4f 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -39,7 +39,8 @@
* Register Immediate commands) once when initializing the device and saved in
* the default context. That default context is then used on every context
* creation to have a "primed golden context", i.e. a context image that
- * already contains the changes needed to all the registers.
+ * already contains the changes needed to all the registers. See
+ * drivers/gpu/drm/xe/xe_lrc.c for default context handling.
*
* - Engine workarounds: the list of these WAs is applied whenever the specific
* engine is reset. It's also possible that a set of engine classes share a
@@ -48,10 +49,10 @@
* them need to keeep the workaround programming: the approach taken in the
* driver is to tie those workarounds to the first compute/render engine that
* is registered. When executing with GuC submission, engine resets are
- * outside of kernel driver control, hence the list of registers involved in
+ * outside of kernel driver control, hence the list of registers involved is
* written once, on engine initialization, and then passed to GuC, that
* saves/restores their values before/after the reset takes place. See
- * ``drivers/gpu/drm/xe/xe_guc_ads.c`` for reference.
+ * drivers/gpu/drm/xe/xe_guc_ads.c for reference.
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -66,21 +67,39 @@
* hardware on every HW context restore. These buffers are created and
* programmed in the default context so the hardware always go through those
* programming sequences when switching contexts. The support for workaround
- * batchbuffers is enabled these hardware mechanisms:
+ * batchbuffers is enabled via these hardware mechanisms:
*
- * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
- * context, pointing the hardware to jump to that location when that offset
- * is reached in the context restore. Workaround batchbuffer in the driver
- * currently uses this mechanism for all platforms.
+ * #. INDIRECT_CTX (also known as **mid context restore bb**): A batchbuffer
+ * and an offset are provided in the default context, pointing the hardware
+ * to jump to that location when that offset is reached in the context
+ * restore. When a context is being restored, this is executed after the
+ * ring context, in the middle (or beginning) of the engine context image.
*
- * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
- * pointing the hardware to a buffer to continue executing after the
- * engine registers are restored in a context restore sequence. This is
- * currently not used in the driver.
+ * #. BB_PER_CTX_PTR (also known as **post context restore bb**): A
+ * batchbuffer is provided in the default context, pointing the hardware to
+ * a buffer to continue executing after the engine registers are restored
+ * in a context restore sequence.
+ *
+ * Below is the timeline for a context restore sequence:
+ *
+ * .. code::
+ *
+ * INDIRECT_CTX_OFFSET
+ * |----------->|
+ * .------------.------------.-------------.------------.--------------.-----------.
+ * |Ring | Engine | Mid-context | Engine | Post-context | Ring |
+ * |Restore | Restore (1)| BB Restore | Restore (2)| BB Restore | Execution |
+ * `------------'------------'-------------'------------'--------------'-----------'
*
* - Other/OOB: There are WAs that, due to their nature, cannot be applied from
* a central place. Those are peppered around the rest of the code, as needed.
- * Workarounds related to the display IP are the main example.
+ * There's a central place to control which workarounds are enabled:
+ * drivers/gpu/drm/xe/xe_wa_oob.rules for GT workarounds and
+ * drivers/gpu/drm/xe/xe_device_wa_oob.rules for device/SoC workarounds.
+ * These files only record which workarounds are enabled: during early device
+ * initialization those rules are evaluated and recorded by the driver. Then
+ * later the driver checks with ``XE_GT_WA()`` and ``XE_DEVICE_WA()`` to
+ * implement them.
*
* .. [1] Technically, some registers are powercontext saved & restored, so they
* survive a suspend/resume. In practice, writing them again is not too
@@ -538,6 +557,11 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
/* Xe2_HPG */
@@ -602,6 +626,18 @@ static const struct xe_rtp_entry_sr engine_was[] = {
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT))
},
+ { XE_RTP_NAME("13012615864"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
+ },
+ { XE_RTP_NAME("18041344222"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute),
+ FUNC(xe_rtp_match_not_sriov_vf),
+ FUNC(xe_rtp_match_gt_has_discontiguous_dss_groups)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, EUSTALL_PERF_SAMPLING_DISABLE))
+ },
/* Xe2_LPM */
@@ -643,11 +679,14 @@ static const struct xe_rtp_entry_sr engine_was[] = {
},
{ XE_RTP_NAME("14023061436"),
XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute), OR,
+ GRAPHICS_VERSION_RANGE(3003, 3005),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_CHICKEN, QID_WAIT_FOR_THREAD_NOT_RUN_DISABLE))
},
{ XE_RTP_NAME("13012615864"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001), OR,
+ GRAPHICS_VERSION_RANGE(3003, 3005),
FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS))
},
@@ -658,9 +697,16 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
},
{ XE_RTP_NAME("14021402888"),
- XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3003, 3005), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
+ { XE_RTP_NAME("18041344222"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3001),
+ FUNC(xe_rtp_match_first_render_or_compute),
+ FUNC(xe_rtp_match_not_sriov_vf),
+ FUNC(xe_rtp_match_gt_has_discontiguous_dss_groups)),
+ XE_RTP_ACTIONS(SET(TDL_CHICKEN, EUSTALL_PERF_SAMPLING_DISABLE))
+ },
};
static const struct xe_rtp_entry_sr lrc_was[] = {
@@ -868,6 +914,19 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_PARTIAL_AUTOSTRIP |
DIS_AUTOSTRIP))
},
+ { XE_RTP_NAME("22021007897"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
+ },
+ { XE_RTP_NAME("14024681466"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, 3005), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(XEHP_SLICE_COMMON_ECO_CHICKEN1, FAST_CLEAR_VALIGN_FIX))
+ },
+ { XE_RTP_NAME("15016589081"),
+ XE_RTP_RULES(GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0),
+ ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
+ },
};
static __maybe_unused const struct xe_rtp_entry oob_was[] = {
@@ -905,13 +964,13 @@ void xe_wa_process_device_oob(struct xe_device *xe)
}
/**
- * xe_wa_process_oob - process OOB workaround table
+ * xe_wa_process_gt_oob - process GT OOB workaround table
* @gt: GT instance to process workarounds for
*
* Process OOB workaround table for this platform, marking in @gt the
* workarounds that are active.
*/
-void xe_wa_process_oob(struct xe_gt *gt)
+void xe_wa_process_gt_oob(struct xe_gt *gt)
{
struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt);
@@ -995,12 +1054,12 @@ int xe_wa_device_init(struct xe_device *xe)
}
/**
- * xe_wa_init - initialize gt with workaround bookkeeping
+ * xe_wa_gt_init - initialize gt with workaround bookkeeping
* @gt: GT instance to initialize
*
* Returns 0 for success, negative error code otherwise.
*/
-int xe_wa_init(struct xe_gt *gt)
+int xe_wa_gt_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
size_t n_oob, n_lrc, n_engine, n_gt, total;
@@ -1026,7 +1085,7 @@ int xe_wa_init(struct xe_gt *gt)
return 0;
}
-ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */
+ALLOW_ERROR_INJECTION(xe_wa_gt_init, ERRNO); /* See xe_pci_probe() */
void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p)
{
@@ -1038,7 +1097,14 @@ void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p)
drm_printf_indent(p, 1, "%s\n", device_oob_was[idx].name);
}
-void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
+/**
+ * xe_wa_gt_dump() - Dump GT workarounds into a drm printer.
+ * @gt: the &xe_gt
+ * @p: the &drm_printer
+ *
+ * Return: always 0.
+ */
+int xe_wa_gt_dump(struct xe_gt *gt, struct drm_printer *p)
{
size_t idx;
@@ -1046,18 +1112,22 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p)
for_each_set_bit(idx, gt->wa_active.gt, ARRAY_SIZE(gt_was))
drm_printf_indent(p, 1, "%s\n", gt_was[idx].name);
- drm_printf(p, "\nEngine Workarounds\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "Engine Workarounds\n");
for_each_set_bit(idx, gt->wa_active.engine, ARRAY_SIZE(engine_was))
drm_printf_indent(p, 1, "%s\n", engine_was[idx].name);
- drm_printf(p, "\nLRC Workarounds\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "LRC Workarounds\n");
for_each_set_bit(idx, gt->wa_active.lrc, ARRAY_SIZE(lrc_was))
drm_printf_indent(p, 1, "%s\n", lrc_was[idx].name);
- drm_printf(p, "\nOOB Workarounds\n");
+ drm_puts(p, "\n");
+ drm_printf(p, "OOB Workarounds\n");
for_each_set_bit(idx, gt->wa_active.oob, ARRAY_SIZE(oob_was))
if (oob_was[idx].name)
drm_printf_indent(p, 1, "%s\n", oob_was[idx].name);
+ return 0;
}
/*
@@ -1079,6 +1149,6 @@ void xe_wa_apply_tile_workarounds(struct xe_tile *tile)
if (IS_SRIOV_VF(tile->xe))
return;
- if (XE_WA(tile->primary_gt, 22010954014))
+ if (XE_DEVICE_WA(tile->xe, 22010954014))
xe_mmio_rmw32(mmio, XEHP_CLOCK_GATE_DIS, 0, SGSI_SIDECLK_DIS);
}
diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h
index f3880c65cb8d..8fd6a5af0910 100644
--- a/drivers/gpu/drm/xe/xe_wa.h
+++ b/drivers/gpu/drm/xe/xe_wa.h
@@ -14,22 +14,22 @@ struct xe_hw_engine;
struct xe_tile;
int xe_wa_device_init(struct xe_device *xe);
-int xe_wa_init(struct xe_gt *gt);
+int xe_wa_gt_init(struct xe_gt *gt);
void xe_wa_process_device_oob(struct xe_device *xe);
-void xe_wa_process_oob(struct xe_gt *gt);
+void xe_wa_process_gt_oob(struct xe_gt *gt);
void xe_wa_process_gt(struct xe_gt *gt);
void xe_wa_process_engine(struct xe_hw_engine *hwe);
void xe_wa_process_lrc(struct xe_hw_engine *hwe);
void xe_wa_apply_tile_workarounds(struct xe_tile *tile);
void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p);
-void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p);
+int xe_wa_gt_dump(struct xe_gt *gt, struct drm_printer *p);
/**
- * XE_WA - Out-of-band workarounds, to be queried and called as needed.
+ * XE_GT_WA - Out-of-band GT workarounds, to be queried and called as needed.
* @gt__: gt instance
* @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h
*/
-#define XE_WA(gt__, id__) ({ \
+#define XE_GT_WA(gt__, id__) ({ \
xe_gt_assert(gt__, (gt__)->wa_active.oob_initialized); \
test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \
})
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 710f4423726c..fb38eb3d6e9a 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -1,4 +1,6 @@
1607983814 GRAPHICS_VERSION_RANGE(1200, 1210)
+16010904313 GRAPHICS_VERSION_RANGE(1200, 1210)
+18022495364 GRAPHICS_VERSION_RANGE(1200, 1210)
22012773006 GRAPHICS_VERSION_RANGE(1200, 1250)
14014475959 GRAPHICS_VERSION_RANGE(1270, 1271), GRAPHICS_STEP(A0, B0)
PLATFORM(DG2)
@@ -9,10 +11,9 @@
18020744125 PLATFORM(PVC)
1509372804 PLATFORM(PVC), GRAPHICS_STEP(A0, C0)
1409600907 GRAPHICS_VERSION_RANGE(1200, 1250)
-14016763929 SUBPLATFORM(DG2, G10)
+22014953428 SUBPLATFORM(DG2, G10)
SUBPLATFORM(DG2, G12)
16017236439 PLATFORM(PVC)
-22010954014 PLATFORM(DG2)
14019821291 MEDIA_VERSION_RANGE(1300, 2000)
14015076503 MEDIA_VERSION(1300)
16020292621 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
@@ -32,22 +33,21 @@
13011645652 GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3004, 3005)
14022293748 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
- GRAPHICS_VERSION_RANGE(3000, 3001)
- GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3000, 3005)
22019794406 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3004, 3005)
22019338487 MEDIA_VERSION(2000)
GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf)
-22019338487_display PLATFORM(LUNARLAKE)
16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf)
14019789679 GRAPHICS_VERSION(1255)
GRAPHICS_VERSION_RANGE(1270, 2004)
-no_media_l3 MEDIA_VERSION(3000)
14022866841 GRAPHICS_VERSION(3000), GRAPHICS_STEP(A0, B0)
MEDIA_VERSION(3000), MEDIA_STEP(A0, B0)
16021333562 GRAPHICS_VERSION_RANGE(1200, 1274)
@@ -62,14 +62,17 @@ no_media_l3 MEDIA_VERSION(3000)
16023105232 GRAPHICS_VERSION_RANGE(2001, 3001)
MEDIA_VERSION_RANGE(1301, 3000)
MEDIA_VERSION(3002)
- GRAPHICS_VERSION(3003)
+ GRAPHICS_VERSION_RANGE(3003, 3005)
16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
MEDIA_VERSION_RANGE(1300, 3000)
MEDIA_VERSION(3002)
- GRAPHICS_VERSION(3003)
-
-# SoC workaround - currently applies to all platforms with the following
-# primary GT GMDID
-14022085890 GRAPHICS_VERSION(2001)
+ GRAPHICS_VERSION_RANGE(3003, 3005)
+14020001231 GRAPHICS_VERSION_RANGE(2001,2004), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3002), FUNC(xe_rtp_match_psmi_enabled)
+16023683509 MEDIA_VERSION(2000), FUNC(xe_rtp_match_psmi_enabled)
+ MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_psmi_enabled)
15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER)
+16026007364 MEDIA_VERSION(3000)
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 1bda7ef606cc..4fa45dbe1dcb 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -18,6 +18,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_print.h>
#include <xen/platform_pci.h>
#include <xen/xen.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 63112ed975c4..386ae7441093 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -15,6 +15,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <xen/balloon.h>
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 806ec66ee7f7..48772b5fe71c 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -16,6 +16,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c
index 2bee0a2275ed..02f3a7d78cf8 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_kms.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c
@@ -19,6 +19,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
+#include <drm/drm_dumb_buffers.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fbdev_dma.h>
#include <drm/drm_fourcc.h>
@@ -363,10 +364,12 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm);
- unsigned int pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ int ret;
/* Enforce the alignment constraints of the DMA engine. */
- args->pitch = ALIGN(pitch, dpsub->dma_align);
+ ret = drm_mode_size_dumb(drm, args, dpsub->dma_align, 0);
+ if (ret)
+ return ret;
return drm_gem_dma_dumb_create_internal(file_priv, drm, args);
}
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 344cc9e741c1..723a80895cd4 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -471,6 +471,18 @@ static int host1x_device_add(struct host1x *host1x,
mutex_unlock(&clients_lock);
+ /*
+ * Add device even if there are no subdevs to ensure syncpoint functionality
+ * is available regardless of whether any engine subdevices are present
+ */
+ if (list_empty(&device->subdevs)) {
+ err = device_add(&device->dev);
+ if (err < 0)
+ dev_err(&device->dev, "failed to add device: %d\n", err);
+ else
+ device->registered = true;
+ }
+
return 0;
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 1f93e5e276c0..3f475f0e6545 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -71,6 +71,15 @@ u32 host1x_sync_readl(struct host1x *host1x, u32 r)
return readl(sync_regs + r);
}
+#ifdef CONFIG_64BIT
+u64 host1x_sync_readq(struct host1x *host1x, u32 r)
+{
+ void __iomem *sync_regs = host1x->regs + host1x->info->sync_offset;
+
+ return readq(sync_regs + r);
+}
+#endif
+
void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r)
{
writel(v, ch->regs + r);
@@ -585,14 +594,8 @@ static int host1x_probe(struct platform_device *pdev)
}
host->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(host->clk)) {
- err = PTR_ERR(host->clk);
-
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to get clock: %d\n", err);
-
- return err;
- }
+ if (IS_ERR(host->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(host->clk), "failed to get clock\n");
err = host1x_get_resets(host);
if (err)
@@ -821,6 +824,7 @@ u64 host1x_get_dma_mask(struct host1x *host1x)
}
EXPORT_SYMBOL(host1x_get_dma_mask);
+MODULE_SOFTDEP("post: tegra-drm");
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
MODULE_DESCRIPTION("Host1x driver for Tegra products");
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index d3855a1c6b47..ef44618ed88a 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -179,6 +179,9 @@ void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r);
u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r);
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r);
u32 host1x_sync_readl(struct host1x *host1x, u32 r);
+#ifdef CONFIG_64BIT
+u64 host1x_sync_readq(struct host1x *host1x, u32 r);
+#endif
void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r);
u32 host1x_ch_readl(struct host1x_channel *ch, u32 r);
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index d44b8de890be..2df6a16d484e 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -47,24 +47,11 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
}
}
-static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
- u32 next_class)
+static void submit_wait(struct host1x_job *job, u32 id, u32 threshold)
{
struct host1x_cdma *cdma = &job->channel->cdma;
-#if HOST1X_HW >= 6
- u32 stream_id;
-
- /*
- * If a memory context has been set, use it. Otherwise
- * (if context isolation is disabled) use the engine's
- * firmware stream ID.
- */
- if (job->memory_context)
- stream_id = job->memory_context->stream_id;
- else
- stream_id = job->engine_fallback_streamid;
-
+#if HOST1X_HW >= 2
host1x_cdma_push_wide(cdma,
host1x_opcode_setclass(
HOST1X_CLASS_HOST1X,
@@ -76,23 +63,6 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
id,
HOST1X_OPCODE_NOP
);
- host1x_cdma_push_wide(&job->channel->cdma,
- host1x_opcode_setclass(job->class, 0, 0),
- host1x_opcode_setpayload(stream_id),
- host1x_opcode_setstreamid(job->engine_streamid_offset / 4),
- HOST1X_OPCODE_NOP);
-#elif HOST1X_HW >= 2
- host1x_cdma_push_wide(cdma,
- host1x_opcode_setclass(
- HOST1X_CLASS_HOST1X,
- HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32,
- /* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */
- BIT(0) | BIT(2)
- ),
- threshold,
- id,
- host1x_opcode_setclass(next_class, 0, 0)
- );
#else
/* TODO add waitchk or use waitbases or other mitigation */
host1x_cdma_push(cdma,
@@ -103,6 +73,32 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
),
host1x_class_host_wait_syncpt(id, threshold)
);
+#endif
+}
+
+static void submit_setclass(struct host1x_job *job, u32 next_class)
+{
+ struct host1x_cdma *cdma = &job->channel->cdma;
+
+#if HOST1X_HW >= 6
+ u32 stream_id;
+
+ /*
+ * If a memory context has been set, use it. Otherwise
+ * (if context isolation is disabled) use the engine's
+ * firmware stream ID.
+ */
+ if (job->memory_context)
+ stream_id = job->memory_context->stream_id;
+ else
+ stream_id = job->engine_fallback_streamid;
+
+ host1x_cdma_push_wide(cdma,
+ host1x_opcode_setclass(next_class, 0, 0),
+ host1x_opcode_setpayload(stream_id),
+ host1x_opcode_setstreamid(job->engine_streamid_offset / 4),
+ HOST1X_OPCODE_NOP);
+#else
host1x_cdma_push(cdma,
host1x_opcode_setclass(next_class, 0, 0),
HOST1X_OPCODE_NOP
@@ -110,7 +106,8 @@ static void submit_wait(struct host1x_job *job, u32 id, u32 threshold,
#endif
}
-static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
+static void submit_gathers(struct host1x_job *job, struct host1x_job_cmd *cmds, u32 num_cmds,
+ u32 job_syncpt_base)
{
struct host1x_cdma *cdma = &job->channel->cdma;
#if HOST1X_HW < 6
@@ -119,8 +116,8 @@ static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
unsigned int i;
u32 threshold;
- for (i = 0; i < job->num_cmds; i++) {
- struct host1x_job_cmd *cmd = &job->cmds[i];
+ for (i = 0; i < num_cmds; i++) {
+ struct host1x_job_cmd *cmd = &cmds[i];
if (cmd->is_wait) {
if (cmd->wait.relative)
@@ -128,7 +125,8 @@ static void submit_gathers(struct host1x_job *job, u32 job_syncpt_base)
else
threshold = cmd->wait.threshold;
- submit_wait(job, cmd->wait.id, threshold, cmd->wait.next_class);
+ submit_wait(job, cmd->wait.id, threshold);
+ submit_setclass(job, cmd->wait.next_class);
} else {
struct host1x_job_gather *g = &cmd->gather;
@@ -216,7 +214,34 @@ static void channel_program_cdma(struct host1x_job *job)
#if HOST1X_HW >= 6
u32 fence;
+ int i = 0;
+
+ if (job->num_cmds == 0)
+ goto prefences_done;
+ if (!job->cmds[0].is_wait || job->cmds[0].wait.relative)
+ goto prefences_done;
+
+ /* Enter host1x class with invalid stream ID for prefence waits. */
+ host1x_cdma_push_wide(cdma,
+ host1x_opcode_acquire_mlock(1),
+ host1x_opcode_setclass(1, 0, 0),
+ host1x_opcode_setpayload(0),
+ host1x_opcode_setstreamid(0x1fffff));
+
+ for (i = 0; i < job->num_cmds; i++) {
+ struct host1x_job_cmd *cmd = &job->cmds[i];
+
+ if (!cmd->is_wait || cmd->wait.relative)
+ break;
+
+ submit_wait(job, cmd->wait.id, cmd->wait.threshold);
+ }
+
+ host1x_cdma_push(cdma,
+ HOST1X_OPCODE_NOP,
+ host1x_opcode_release_mlock(1));
+prefences_done:
/* Enter engine class with invalid stream ID. */
host1x_cdma_push_wide(cdma,
host1x_opcode_acquire_mlock(job->class),
@@ -230,11 +255,12 @@ static void channel_program_cdma(struct host1x_job *job)
host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1),
HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) |
HOST1X_UCLASS_INCR_SYNCPT_COND_F(4));
- submit_wait(job, job->syncpt->id, fence, job->class);
+ submit_wait(job, job->syncpt->id, fence);
+ submit_setclass(job, job->class);
/* Submit work. */
job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs);
- submit_gathers(job, job->syncpt_end - job->syncpt_incrs);
+ submit_gathers(job, job->cmds + i, job->num_cmds - i, job->syncpt_end - job->syncpt_incrs);
/* Before releasing MLOCK, ensure engine is idle again. */
fence = host1x_syncpt_incr_max(sp, 1);
@@ -242,7 +268,7 @@ static void channel_program_cdma(struct host1x_job *job)
host1x_opcode_nonincr(HOST1X_UCLASS_INCR_SYNCPT, 1),
HOST1X_UCLASS_INCR_SYNCPT_INDX_F(job->syncpt->id) |
HOST1X_UCLASS_INCR_SYNCPT_COND_F(4));
- submit_wait(job, job->syncpt->id, fence, job->class);
+ submit_wait(job, job->syncpt->id, fence);
/* Release MLOCK. */
host1x_cdma_push(cdma,
@@ -272,7 +298,7 @@ static void channel_program_cdma(struct host1x_job *job)
job->syncpt_end = host1x_syncpt_incr_max(sp, job->syncpt_incrs);
- submit_gathers(job, job->syncpt_end - job->syncpt_incrs);
+ submit_gathers(job, job->cmds, job->num_cmds, job->syncpt_end - job->syncpt_incrs);
#endif
}
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 415f8d7e4202..bd5b5ef62f35 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -11,26 +11,64 @@
#include "../intr.h"
#include "../dev.h"
+static void process_32_syncpts(struct host1x *host, unsigned long val, u32 reg_offset)
+{
+ unsigned int id;
+
+ if (!val)
+ return;
+
+ host1x_sync_writel(host, val, HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(reg_offset));
+ host1x_sync_writel(host, val, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(reg_offset));
+
+ for_each_set_bit(id, &val, 32)
+ host1x_intr_handle_interrupt(host, reg_offset * 32 + id);
+}
+
static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
{
struct host1x_intr_irq_data *irq_data = dev_id;
struct host1x *host = irq_data->host;
unsigned long reg;
- unsigned int i, id;
+ unsigned int i;
+#if !defined(CONFIG_64BIT)
for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 32);
i += host->num_syncpt_irqs) {
reg = host1x_sync_readl(host,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
- host1x_sync_writel(host, reg,
- HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
- host1x_sync_writel(host, reg,
+ process_32_syncpts(host, reg, i);
+ }
+#elif HOST1X_HW == 6 || HOST1X_HW == 7
+ /*
+ * Tegra186 and Tegra194 have the first INT_STATUS register not 64-bit aligned,
+ * and only have one interrupt line.
+ */
+ reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(0));
+ process_32_syncpts(host, reg, 0);
+
+ for (i = 1; i < (host->info->nb_pts / 32) - 1; i += 2) {
+ reg = host1x_sync_readq(host,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
- for_each_set_bit(id, &reg, 32)
- host1x_intr_handle_interrupt(host, i * 32 + id);
+ process_32_syncpts(host, lower_32_bits(reg), i);
+ process_32_syncpts(host, upper_32_bits(reg), i + 1);
+ }
+
+ reg = host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+ process_32_syncpts(host, reg, i);
+#else
+ /* All 64-bit capable SoCs have number of syncpoints divisible by 64 */
+ for (i = irq_data->offset; i < DIV_ROUND_UP(host->info->nb_pts, 64);
+ i += host->num_syncpt_irqs) {
+ reg = host1x_sync_readq(host,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i * 2));
+
+ process_32_syncpts(host, lower_32_bits(reg), i * 2 + 0);
+ process_32_syncpts(host, upper_32_bits(reg), i * 2 + 1);
}
+#endif
return IRQ_HANDLED;
}
@@ -68,12 +106,12 @@ host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
/*
* Program threshold interrupt destination among 8 lines per VM,
- * per syncpoint. For each group of 32 syncpoints (corresponding to one
- * interrupt status register), direct to one interrupt line, going
+ * per syncpoint. For each group of 64 syncpoints (corresponding to two
+ * interrupt status registers), direct to one interrupt line, going
* around in a round robin fashion.
*/
for (id = 0; id < host->info->nb_pts; id++) {
- u32 reg_offset = id / 32;
+ u32 reg_offset = id / 64;
u32 irq_index = reg_offset % host->num_syncpt_irqs;
host1x_sync_writel(host, irq_index, HOST1X_SYNC_SYNCPT_INTR_DEST(id));
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index f63d14a57a1d..acc7d82e0585 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -345,8 +345,6 @@ static void syncpt_release(struct kref *ref)
sp->locked = false;
- mutex_lock(&sp->host->syncpt_mutex);
-
host1x_syncpt_base_free(sp->base);
kfree(sp->name);
sp->base = NULL;
@@ -369,7 +367,7 @@ void host1x_syncpt_put(struct host1x_syncpt *sp)
if (!sp)
return;
- kref_put(&sp->ref, syncpt_release);
+ kref_put_mutex(&sp->ref, syncpt_release, &sp->host->syncpt_mutex);
}
EXPORT_SYMBOL(host1x_syncpt_put);
diff --git a/drivers/gpu/nova-core/bitfield.rs b/drivers/gpu/nova-core/bitfield.rs
new file mode 100644
index 000000000000..16e143658c51
--- /dev/null
+++ b/drivers/gpu/nova-core/bitfield.rs
@@ -0,0 +1,330 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Bitfield library for Rust structures
+//!
+//! Support for defining bitfields in Rust structures. Also used by the [`register!`] macro.
+
+/// Defines a struct with accessors to access bits within an inner unsigned integer.
+///
+/// # Syntax
+///
+/// ```rust
+/// use nova_core::bitfield;
+///
+/// #[derive(Debug, Clone, Copy, Default)]
+/// enum Mode {
+/// #[default]
+/// Low = 0,
+/// High = 1,
+/// Auto = 2,
+/// }
+///
+/// impl TryFrom<u8> for Mode {
+/// type Error = u8;
+/// fn try_from(value: u8) -> Result<Self, Self::Error> {
+/// match value {
+/// 0 => Ok(Mode::Low),
+/// 1 => Ok(Mode::High),
+/// 2 => Ok(Mode::Auto),
+/// _ => Err(value),
+/// }
+/// }
+/// }
+///
+/// impl From<Mode> for u8 {
+/// fn from(mode: Mode) -> u8 {
+/// mode as u8
+/// }
+/// }
+///
+/// #[derive(Debug, Clone, Copy, Default)]
+/// enum State {
+/// #[default]
+/// Inactive = 0,
+/// Active = 1,
+/// }
+///
+/// impl From<bool> for State {
+/// fn from(value: bool) -> Self {
+/// if value { State::Active } else { State::Inactive }
+/// }
+/// }
+///
+/// impl From<State> for bool {
+/// fn from(state: State) -> bool {
+/// match state {
+/// State::Inactive => false,
+/// State::Active => true,
+/// }
+/// }
+/// }
+///
+/// bitfield! {
+/// pub struct ControlReg(u32) {
+/// 7:7 state as bool => State;
+/// 3:0 mode as u8 ?=> Mode;
+/// }
+/// }
+/// ```
+///
+/// This generates a struct with:
+/// - Field accessors: `mode()`, `state()`, etc.
+/// - Field setters: `set_mode()`, `set_state()`, etc. (supports chaining with builder pattern).
+/// Note that the compiler will error out if the size of the setter's arg exceeds the
+/// struct's storage size.
+/// - Debug and Default implementations.
+///
+/// Note: Field accessors and setters inherit the same visibility as the struct itself.
+/// In the example above, both `mode()` and `set_mode()` methods will be `pub`.
+///
+/// Fields are defined as follows:
+///
+/// - `as <type>` simply returns the field value casted to <type>, typically `u32`, `u16`, `u8` or
+/// `bool`. Note that `bool` fields must have a range of 1 bit.
+/// - `as <type> => <into_type>` calls `<into_type>`'s `From::<<type>>` implementation and returns
+/// the result.
+/// - `as <type> ?=> <try_into_type>` calls `<try_into_type>`'s `TryFrom::<<type>>` implementation
+/// and returns the result. This is useful with fields for which not all values are valid.
+macro_rules! bitfield {
+ // Main entry point - defines the bitfield struct with fields
+ ($vis:vis struct $name:ident($storage:ty) $(, $comment:literal)? { $($fields:tt)* }) => {
+ bitfield!(@core $vis $name $storage $(, $comment)? { $($fields)* });
+ };
+
+ // All rules below are helpers.
+
+ // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`,
+ // `Default`, and conversion to the value type) and field accessor methods.
+ (@core $vis:vis $name:ident $storage:ty $(, $comment:literal)? { $($fields:tt)* }) => {
+ $(
+ #[doc=$comment]
+ )?
+ #[repr(transparent)]
+ #[derive(Clone, Copy)]
+ $vis struct $name($storage);
+
+ impl ::core::convert::From<$name> for $storage {
+ fn from(val: $name) -> $storage {
+ val.0
+ }
+ }
+
+ bitfield!(@fields_dispatcher $vis $name $storage { $($fields)* });
+ };
+
+ // Captures the fields and passes them to all the implementers that require field information.
+ //
+ // Used to simplify the matching rules for implementers, so they don't need to match the entire
+ // complex fields rule even though they only make use of part of it.
+ (@fields_dispatcher $vis:vis $name:ident $storage:ty {
+ $($hi:tt:$lo:tt $field:ident as $type:tt
+ $(?=> $try_into_type:ty)?
+ $(=> $into_type:ty)?
+ $(, $comment:literal)?
+ ;
+ )*
+ }
+ ) => {
+ bitfield!(@field_accessors $vis $name $storage {
+ $(
+ $hi:$lo $field as $type
+ $(?=> $try_into_type)?
+ $(=> $into_type)?
+ $(, $comment)?
+ ;
+ )*
+ });
+ bitfield!(@debug $name { $($field;)* });
+ bitfield!(@default $name { $($field;)* });
+ };
+
+ // Defines all the field getter/setter methods for `$name`.
+ (
+ @field_accessors $vis:vis $name:ident $storage:ty {
+ $($hi:tt:$lo:tt $field:ident as $type:tt
+ $(?=> $try_into_type:ty)?
+ $(=> $into_type:ty)?
+ $(, $comment:literal)?
+ ;
+ )*
+ }
+ ) => {
+ $(
+ bitfield!(@check_field_bounds $hi:$lo $field as $type);
+ )*
+
+ #[allow(dead_code)]
+ impl $name {
+ $(
+ bitfield!(@field_accessor $vis $name $storage, $hi:$lo $field as $type
+ $(?=> $try_into_type)?
+ $(=> $into_type)?
+ $(, $comment)?
+ ;
+ );
+ )*
+ }
+ };
+
+ // Boolean fields must have `$hi == $lo`.
+ (@check_field_bounds $hi:tt:$lo:tt $field:ident as bool) => {
+ #[allow(clippy::eq_op)]
+ const _: () = {
+ ::kernel::build_assert!(
+ $hi == $lo,
+ concat!("boolean field `", stringify!($field), "` covers more than one bit")
+ );
+ };
+ };
+
+ // Non-boolean fields must have `$hi >= $lo`.
+ (@check_field_bounds $hi:tt:$lo:tt $field:ident as $type:tt) => {
+ #[allow(clippy::eq_op)]
+ const _: () = {
+ ::kernel::build_assert!(
+ $hi >= $lo,
+ concat!("field `", stringify!($field), "`'s MSB is smaller than its LSB")
+ );
+ };
+ };
+
+ // Catches fields defined as `bool` and convert them into a boolean value.
+ (
+ @field_accessor $vis:vis $name:ident $storage:ty, $hi:tt:$lo:tt $field:ident as bool
+ => $into_type:ty $(, $comment:literal)?;
+ ) => {
+ bitfield!(
+ @leaf_accessor $vis $name $storage, $hi:$lo $field
+ { |f| <$into_type>::from(f != 0) }
+ bool $into_type => $into_type $(, $comment)?;
+ );
+ };
+
+ // Shortcut for fields defined as `bool` without the `=>` syntax.
+ (
+ @field_accessor $vis:vis $name:ident $storage:ty, $hi:tt:$lo:tt $field:ident as bool
+ $(, $comment:literal)?;
+ ) => {
+ bitfield!(
+ @field_accessor $vis $name $storage, $hi:$lo $field as bool => bool $(, $comment)?;
+ );
+ };
+
+ // Catches the `?=>` syntax for non-boolean fields.
+ (
+ @field_accessor $vis:vis $name:ident $storage:ty, $hi:tt:$lo:tt $field:ident as $type:tt
+ ?=> $try_into_type:ty $(, $comment:literal)?;
+ ) => {
+ bitfield!(@leaf_accessor $vis $name $storage, $hi:$lo $field
+ { |f| <$try_into_type>::try_from(f as $type) } $type $try_into_type =>
+ ::core::result::Result<
+ $try_into_type,
+ <$try_into_type as ::core::convert::TryFrom<$type>>::Error
+ >
+ $(, $comment)?;);
+ };
+
+ // Catches the `=>` syntax for non-boolean fields.
+ (
+ @field_accessor $vis:vis $name:ident $storage:ty, $hi:tt:$lo:tt $field:ident as $type:tt
+ => $into_type:ty $(, $comment:literal)?;
+ ) => {
+ bitfield!(@leaf_accessor $vis $name $storage, $hi:$lo $field
+ { |f| <$into_type>::from(f as $type) } $type $into_type => $into_type $(, $comment)?;);
+ };
+
+ // Shortcut for non-boolean fields defined without the `=>` or `?=>` syntax.
+ (
+ @field_accessor $vis:vis $name:ident $storage:ty, $hi:tt:$lo:tt $field:ident as $type:tt
+ $(, $comment:literal)?;
+ ) => {
+ bitfield!(
+ @field_accessor $vis $name $storage, $hi:$lo $field as $type => $type $(, $comment)?;
+ );
+ };
+
+ // Generates the accessor methods for a single field.
+ (
+ @leaf_accessor $vis:vis $name:ident $storage:ty, $hi:tt:$lo:tt $field:ident
+ { $process:expr } $prim_type:tt $to_type:ty => $res_type:ty $(, $comment:literal)?;
+ ) => {
+ ::kernel::macros::paste!(
+ const [<$field:upper _RANGE>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
+ const [<$field:upper _MASK>]: $storage = {
+ // Generate mask for shifting
+ match ::core::mem::size_of::<$storage>() {
+ 1 => ::kernel::bits::genmask_u8($lo..=$hi) as $storage,
+ 2 => ::kernel::bits::genmask_u16($lo..=$hi) as $storage,
+ 4 => ::kernel::bits::genmask_u32($lo..=$hi) as $storage,
+ 8 => ::kernel::bits::genmask_u64($lo..=$hi) as $storage,
+ _ => ::kernel::build_error!("Unsupported storage type size")
+ }
+ };
+ const [<$field:upper _SHIFT>]: u32 = $lo;
+ );
+
+ $(
+ #[doc="Returns the value of this field:"]
+ #[doc=$comment]
+ )?
+ #[inline(always)]
+ $vis fn $field(self) -> $res_type {
+ ::kernel::macros::paste!(
+ const MASK: $storage = $name::[<$field:upper _MASK>];
+ const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
+ );
+ let field = ((self.0 & MASK) >> SHIFT);
+
+ $process(field)
+ }
+
+ ::kernel::macros::paste!(
+ $(
+ #[doc="Sets the value of this field:"]
+ #[doc=$comment]
+ )?
+ #[inline(always)]
+ $vis fn [<set_ $field>](mut self, value: $to_type) -> Self {
+ const MASK: $storage = $name::[<$field:upper _MASK>];
+ const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
+ let value = ($storage::from($prim_type::from(value)) << SHIFT) & MASK;
+ self.0 = (self.0 & !MASK) | value;
+
+ self
+ }
+ );
+ };
+
+ // Generates the `Debug` implementation for `$name`.
+ (@debug $name:ident { $($field:ident;)* }) => {
+ impl ::kernel::fmt::Debug for $name {
+ fn fmt(&self, f: &mut ::kernel::fmt::Formatter<'_>) -> ::kernel::fmt::Result {
+ f.debug_struct(stringify!($name))
+ .field("<raw>", &::kernel::prelude::fmt!("{:#x}", &self.0))
+ $(
+ .field(stringify!($field), &self.$field())
+ )*
+ .finish()
+ }
+ }
+ };
+
+ // Generates the `Default` implementation for `$name`.
+ (@default $name:ident { $($field:ident;)* }) => {
+ /// Returns a value for the bitfield where all fields are set to their default value.
+ impl ::core::default::Default for $name {
+ fn default() -> Self {
+ #[allow(unused_mut)]
+ let mut value = Self(Default::default());
+
+ ::kernel::macros::paste!(
+ $(
+ value.[<set_ $field>](Default::default());
+ )*
+ );
+
+ value
+ }
+ }
+ };
+}
diff --git a/drivers/gpu/nova-core/dma.rs b/drivers/gpu/nova-core/dma.rs
index 94f44bcfd748..7215398969da 100644
--- a/drivers/gpu/nova-core/dma.rs
+++ b/drivers/gpu/nova-core/dma.rs
@@ -2,12 +2,17 @@
//! Simple DMA object wrapper.
-use core::ops::{Deref, DerefMut};
-
-use kernel::device;
-use kernel::dma::CoherentAllocation;
-use kernel::page::PAGE_SIZE;
-use kernel::prelude::*;
+use core::ops::{
+ Deref,
+ DerefMut, //
+};
+
+use kernel::{
+ device,
+ dma::CoherentAllocation,
+ page::PAGE_SIZE,
+ prelude::*, //
+};
pub(crate) struct DmaObject {
dma: CoherentAllocation<u8>,
@@ -25,20 +30,11 @@ impl DmaObject {
}
pub(crate) fn from_data(dev: &device::Device<device::Bound>, data: &[u8]) -> Result<Self> {
- Self::new(dev, data.len()).map(|mut dma_obj| {
- // TODO[COHA]: replace with `CoherentAllocation::write()` once available.
- // SAFETY:
- // - `dma_obj`'s size is at least `data.len()`.
- // - We have just created this object and there is no other user at this stage.
- unsafe {
- core::ptr::copy_nonoverlapping(
- data.as_ptr(),
- dma_obj.dma.start_ptr_mut(),
- data.len(),
- );
- }
-
- dma_obj
+ Self::new(dev, data.len()).and_then(|mut dma_obj| {
+ // SAFETY: We have just allocated the DMA memory, we are the only users and
+ // we haven't made the device aware of the handle yet.
+ unsafe { dma_obj.write(data, 0)? }
+ Ok(dma_obj)
})
}
}
diff --git a/drivers/gpu/nova-core/driver.rs b/drivers/gpu/nova-core/driver.rs
index 274989ea1fb4..b8b0cc0f2d93 100644
--- a/drivers/gpu/nova-core/driver.rs
+++ b/drivers/gpu/nova-core/driver.rs
@@ -1,6 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*, sizes::SZ_16M, sync::Arc};
+use kernel::{
+ auxiliary,
+ c_str,
+ device::Core,
+ devres::Devres,
+ dma::Device,
+ dma::DmaMask,
+ pci,
+ pci::{
+ Class,
+ ClassMask,
+ Vendor, //
+ },
+ prelude::*,
+ sizes::SZ_16M,
+ sync::Arc, //
+};
use crate::gpu::Gpu;
@@ -8,50 +24,81 @@ use crate::gpu::Gpu;
pub(crate) struct NovaCore {
#[pin]
pub(crate) gpu: Gpu,
- _reg: auxiliary::Registration,
+ #[pin]
+ _reg: Devres<auxiliary::Registration>,
}
const BAR0_SIZE: usize = SZ_16M;
+
+// For now we only support Ampere which can use up to 47-bit DMA addresses.
+//
+// TODO: Add an abstraction for this to support newer GPUs which may support
+// larger DMA addresses. Limiting these GPUs to smaller address widths won't
+// have any adverse affects, unless installed on systems which require larger
+// DMA addresses. These systems should be quite rare.
+const GPU_DMA_BITS: u32 = 47;
+
pub(crate) type Bar0 = pci::Bar<BAR0_SIZE>;
kernel::pci_device_table!(
PCI_TABLE,
MODULE_PCI_TABLE,
<NovaCore as pci::Driver>::IdInfo,
- [(
- pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as u32),
- ()
- )]
+ [
+ // Modern NVIDIA GPUs will show up as either VGA or 3D controllers.
+ (
+ pci::DeviceId::from_class_and_vendor(
+ Class::DISPLAY_VGA,
+ ClassMask::ClassSubclass,
+ Vendor::NVIDIA
+ ),
+ ()
+ ),
+ (
+ pci::DeviceId::from_class_and_vendor(
+ Class::DISPLAY_3D,
+ ClassMask::ClassSubclass,
+ Vendor::NVIDIA
+ ),
+ ()
+ ),
+ ]
);
impl pci::Driver for NovaCore {
type IdInfo = ();
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
- fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
- dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
+ fn probe(pdev: &pci::Device<Core>, _info: &Self::IdInfo) -> impl PinInit<Self, Error> {
+ pin_init::pin_init_scope(move || {
+ dev_dbg!(pdev.as_ref(), "Probe Nova Core GPU driver.\n");
- pdev.enable_device_mem()?;
- pdev.set_master();
+ pdev.enable_device_mem()?;
+ pdev.set_master();
- let bar = Arc::pin_init(
- pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")),
- GFP_KERNEL,
- )?;
+ // SAFETY: No concurrent DMA allocations or mappings can be made because
+ // the device is still being probed and therefore isn't being used by
+ // other threads of execution.
+ unsafe { pdev.dma_set_mask_and_coherent(DmaMask::new::<GPU_DMA_BITS>())? };
- let this = KBox::pin_init(
- try_pin_init!(Self {
- gpu <- Gpu::new(pdev, bar)?,
- _reg: auxiliary::Registration::new(
+ let bar = Arc::pin_init(
+ pdev.iomap_region_sized::<BAR0_SIZE>(0, c_str!("nova-core/bar0")),
+ GFP_KERNEL,
+ )?;
+
+ Ok(try_pin_init!(Self {
+ gpu <- Gpu::new(pdev, bar.clone(), bar.access(pdev.as_ref())?),
+ _reg <- auxiliary::Registration::new(
pdev.as_ref(),
c_str!("nova-drm"),
0, // TODO[XARR]: Once it lands, use XArray; for now we don't use the ID.
crate::MODULE_NAME
- )?,
- }),
- GFP_KERNEL,
- )?;
+ ),
+ }))
+ })
+ }
- Ok(this)
+ fn unbind(pdev: &pci::Device<Core>, this: Pin<&Self>) {
+ this.gpu.unbind(pdev.as_ref());
}
}
diff --git a/drivers/gpu/nova-core/falcon.rs b/drivers/gpu/nova-core/falcon.rs
index 50437c67c14a..82c661aef594 100644
--- a/drivers/gpu/nova-core/falcon.rs
+++ b/drivers/gpu/nova-core/falcon.rs
@@ -3,29 +3,43 @@
//! Falcon microprocessor base support
use core::ops::Deref;
+
use hal::FalconHal;
-use kernel::bindings;
-use kernel::device;
-use kernel::prelude::*;
-use kernel::time::Delta;
-use kernel::types::ARef;
-
-use crate::dma::DmaObject;
-use crate::driver::Bar0;
-use crate::gpu::Chipset;
-use crate::regs;
-use crate::util;
+
+use kernel::{
+ device,
+ dma::DmaAddress,
+ io::poll::read_poll_timeout,
+ prelude::*,
+ sync::aref::ARef,
+ time::{
+ delay::fsleep,
+ Delta, //
+ },
+};
+
+use crate::{
+ dma::DmaObject,
+ driver::Bar0,
+ gpu::Chipset,
+ num::{
+ FromSafeCast,
+ IntoSafeCast, //
+ },
+ regs,
+ regs::macros::RegisterBase, //
+};
pub(crate) mod gsp;
mod hal;
pub(crate) mod sec2;
// TODO[FPRI]: Replace with `ToPrimitive`.
-macro_rules! impl_from_enum_to_u32 {
+macro_rules! impl_from_enum_to_u8 {
($enum_type:ty) => {
- impl From<$enum_type> for u32 {
+ impl From<$enum_type> for u8 {
fn from(value: $enum_type) -> Self {
- value as u32
+ value as u8
}
}
};
@@ -45,7 +59,7 @@ pub(crate) enum FalconCoreRev {
Rev6 = 6,
Rev7 = 7,
}
-impl_from_enum_to_u32!(FalconCoreRev);
+impl_from_enum_to_u8!(FalconCoreRev);
// TODO[FPRI]: replace with `FromPrimitive`.
impl TryFrom<u8> for FalconCoreRev {
@@ -80,7 +94,7 @@ pub(crate) enum FalconCoreRevSubversion {
Subversion2 = 2,
Subversion3 = 3,
}
-impl_from_enum_to_u32!(FalconCoreRevSubversion);
+impl_from_enum_to_u8!(FalconCoreRevSubversion);
// TODO[FPRI]: replace with `FromPrimitive`.
impl TryFrom<u8> for FalconCoreRevSubversion {
@@ -124,7 +138,7 @@ pub(crate) enum FalconSecurityModel {
/// Also known as High-Secure, Privilege Level 3 or PL3.
Heavy = 3,
}
-impl_from_enum_to_u32!(FalconSecurityModel);
+impl_from_enum_to_u8!(FalconSecurityModel);
// TODO[FPRI]: replace with `FromPrimitive`.
impl TryFrom<u8> for FalconSecurityModel {
@@ -156,7 +170,7 @@ pub(crate) enum FalconModSelAlgo {
#[default]
Rsa3k = 1,
}
-impl_from_enum_to_u32!(FalconModSelAlgo);
+impl_from_enum_to_u8!(FalconModSelAlgo);
// TODO[FPRI]: replace with `FromPrimitive`.
impl TryFrom<u8> for FalconModSelAlgo {
@@ -178,7 +192,7 @@ pub(crate) enum DmaTrfCmdSize {
#[default]
Size256B = 0x6,
}
-impl_from_enum_to_u32!(DmaTrfCmdSize);
+impl_from_enum_to_u8!(DmaTrfCmdSize);
// TODO[FPRI]: replace with `FromPrimitive`.
impl TryFrom<u8> for DmaTrfCmdSize {
@@ -201,7 +215,6 @@ pub(crate) enum PeregrineCoreSelect {
/// RISC-V core is active.
Riscv = 1,
}
-impl_from_enum_to_u32!(PeregrineCoreSelect);
impl From<bool> for PeregrineCoreSelect {
fn from(value: bool) -> Self {
@@ -212,6 +225,15 @@ impl From<bool> for PeregrineCoreSelect {
}
}
+impl From<PeregrineCoreSelect> for bool {
+ fn from(value: PeregrineCoreSelect) -> Self {
+ match value {
+ PeregrineCoreSelect::Falcon => false,
+ PeregrineCoreSelect::Riscv => true,
+ }
+ }
+}
+
/// Different types of memory present in a falcon core.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub(crate) enum FalconMem {
@@ -235,7 +257,7 @@ pub(crate) enum FalconFbifTarget {
/// Non-coherent system memory (System DRAM).
NoncoherentSysmem = 2,
}
-impl_from_enum_to_u32!(FalconFbifTarget);
+impl_from_enum_to_u8!(FalconFbifTarget);
// TODO[FPRI]: replace with `FromPrimitive`.
impl TryFrom<u8> for FalconFbifTarget {
@@ -262,7 +284,6 @@ pub(crate) enum FalconFbifMemType {
/// Physical memory addresses.
Physical = 1,
}
-impl_from_enum_to_u32!(FalconFbifMemType);
/// Conversion from a single-bit register field.
impl From<bool> for FalconFbifMemType {
@@ -274,14 +295,34 @@ impl From<bool> for FalconFbifMemType {
}
}
-/// Trait defining the parameters of a given Falcon instance.
-pub(crate) trait FalconEngine: Sync {
- /// Base I/O address for the falcon, relative from which its registers are accessed.
- const BASE: usize;
+impl From<FalconFbifMemType> for bool {
+ fn from(value: FalconFbifMemType) -> Self {
+ match value {
+ FalconFbifMemType::Virtual => false,
+ FalconFbifMemType::Physical => true,
+ }
+ }
+}
+
+/// Type used to represent the `PFALCON` registers address base for a given falcon engine.
+pub(crate) struct PFalconBase(());
+
+/// Type used to represent the `PFALCON2` registers address base for a given falcon engine.
+pub(crate) struct PFalcon2Base(());
+
+/// Trait defining the parameters of a given Falcon engine.
+///
+/// Each engine provides one base for `PFALCON` and `PFALCON2` registers. The `ID` constant is used
+/// to identify a given Falcon instance with register I/O methods.
+pub(crate) trait FalconEngine:
+ Send + Sync + RegisterBase<PFalconBase> + RegisterBase<PFalcon2Base> + Sized
+{
+ /// Singleton of the engine, used to identify it with register I/O methods.
+ const ID: Self;
}
/// Represents a portion of the firmware to be loaded into a particular memory (e.g. IMEM or DMEM).
-#[derive(Debug)]
+#[derive(Debug, Clone)]
pub(crate) struct FalconLoadTarget {
/// Offset from the start of the source object to copy from.
pub(crate) src_start: u32,
@@ -292,7 +333,7 @@ pub(crate) struct FalconLoadTarget {
}
/// Parameters for the falcon boot ROM.
-#[derive(Debug)]
+#[derive(Debug, Clone)]
pub(crate) struct FalconBromParams {
/// Offset in `DMEM`` of the firmware's signature.
pub(crate) pkc_data_offset: u32,
@@ -334,71 +375,50 @@ pub(crate) struct Falcon<E: FalconEngine> {
impl<E: FalconEngine + 'static> Falcon<E> {
/// Create a new falcon instance.
- ///
- /// `need_riscv` is set to `true` if the caller expects the falcon to be a dual falcon/riscv
- /// controller.
- pub(crate) fn new(
- dev: &device::Device,
- chipset: Chipset,
- bar: &Bar0,
- need_riscv: bool,
- ) -> Result<Self> {
- let hwcfg1 = regs::NV_PFALCON_FALCON_HWCFG1::read(bar, E::BASE);
- // Check that the revision and security model contain valid values.
- let _ = hwcfg1.core_rev()?;
- let _ = hwcfg1.security_model()?;
-
- if need_riscv {
- let hwcfg2 = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
- if !hwcfg2.riscv() {
- dev_err!(
- dev,
- "riscv support requested on a controller that does not support it\n"
- );
- return Err(EINVAL);
- }
- }
-
+ pub(crate) fn new(dev: &device::Device, chipset: Chipset) -> Result<Self> {
Ok(Self {
hal: hal::falcon_hal(chipset)?,
dev: dev.into(),
})
}
+ /// Resets DMA-related registers.
+ pub(crate) fn dma_reset(&self, bar: &Bar0) {
+ regs::NV_PFALCON_FBIF_CTL::update(bar, &E::ID, |v| v.set_allow_phys_no_ctx(true));
+ regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, &E::ID);
+ }
+
/// Wait for memory scrubbing to complete.
fn reset_wait_mem_scrubbing(&self, bar: &Bar0) -> Result {
// TIMEOUT: memory scrubbing should complete in less than 20ms.
- util::wait_on(Delta::from_millis(20), || {
- if regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE).mem_scrubbing_done() {
- Some(())
- } else {
- None
- }
- })
+ read_poll_timeout(
+ || Ok(regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID)),
+ |r| r.mem_scrubbing_done(),
+ Delta::ZERO,
+ Delta::from_millis(20),
+ )
+ .map(|_| ())
}
/// Reset the falcon engine.
fn reset_eng(&self, bar: &Bar0) -> Result {
- let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
+ let _ = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID);
// According to OpenRM's `kflcnPreResetWait_GA102` documentation, HW sometimes does not set
// RESET_READY so a non-failing timeout is used.
- let _ = util::wait_on(Delta::from_micros(150), || {
- let r = regs::NV_PFALCON_FALCON_HWCFG2::read(bar, E::BASE);
- if r.reset_ready() {
- Some(())
- } else {
- None
- }
- });
+ let _ = read_poll_timeout(
+ || Ok(regs::NV_PFALCON_FALCON_HWCFG2::read(bar, &E::ID)),
+ |r| r.reset_ready(),
+ Delta::ZERO,
+ Delta::from_micros(150),
+ );
- regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(true));
+ regs::NV_PFALCON_FALCON_ENGINE::update(bar, &E::ID, |v| v.set_reset(true));
- // TODO[DLAY]: replace with udelay() or equivalent once available.
// TIMEOUT: falcon engine should not take more than 10us to reset.
- let _: Result = util::wait_on(Delta::from_micros(10), || None);
+ fsleep(Delta::from_micros(10));
- regs::NV_PFALCON_FALCON_ENGINE::alter(bar, E::BASE, |v| v.set_reset(false));
+ regs::NV_PFALCON_FALCON_ENGINE::update(bar, &E::ID, |v| v.set_reset(false));
self.reset_wait_mem_scrubbing(bar)?;
@@ -413,7 +433,7 @@ impl<E: FalconEngine + 'static> Falcon<E> {
regs::NV_PFALCON_FALCON_RM::default()
.set_value(regs::NV_PMC_BOOT_0::read(bar).into())
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
@@ -440,10 +460,10 @@ impl<E: FalconEngine + 'static> Falcon<E> {
FalconMem::Imem => (load_offsets.src_start, fw.dma_handle()),
FalconMem::Dmem => (
0,
- fw.dma_handle_with_offset(load_offsets.src_start as usize)?,
+ fw.dma_handle_with_offset(load_offsets.src_start.into_safe_cast())?,
),
};
- if dma_start % bindings::dma_addr_t::from(DMA_LEN) > 0 {
+ if dma_start % DmaAddress::from(DMA_LEN) > 0 {
dev_err!(
self.dev,
"DMA transfer start addresses must be a multiple of {}",
@@ -451,50 +471,65 @@ impl<E: FalconEngine + 'static> Falcon<E> {
);
return Err(EINVAL);
}
- if load_offsets.len % DMA_LEN > 0 {
- dev_err!(
- self.dev,
- "DMA transfer length must be a multiple of {}",
- DMA_LEN
- );
- return Err(EINVAL);
- }
+
+ // DMA transfers can only be done in units of 256 bytes. Compute how many such transfers we
+ // need to perform.
+ let num_transfers = load_offsets.len.div_ceil(DMA_LEN);
+
+ // Check that the area we are about to transfer is within the bounds of the DMA object.
+ // Upper limit of transfer is `(num_transfers * DMA_LEN) + load_offsets.src_start`.
+ match num_transfers
+ .checked_mul(DMA_LEN)
+ .and_then(|size| size.checked_add(load_offsets.src_start))
+ {
+ None => {
+ dev_err!(self.dev, "DMA transfer length overflow");
+ return Err(EOVERFLOW);
+ }
+ Some(upper_bound) if usize::from_safe_cast(upper_bound) > fw.size() => {
+ dev_err!(self.dev, "DMA transfer goes beyond range of DMA object");
+ return Err(EINVAL);
+ }
+ Some(_) => (),
+ };
// Set up the base source DMA address.
regs::NV_PFALCON_FALCON_DMATRFBASE::default()
+ // CAST: `as u32` is used on purpose since we do want to strip the upper bits, which
+ // will be written to `NV_PFALCON_FALCON_DMATRFBASE1`.
.set_base((dma_start >> 8) as u32)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON_FALCON_DMATRFBASE1::default()
+ // CAST: `as u16` is used on purpose since the remaining bits are guaranteed to fit
+ // within a `u16`.
.set_base((dma_start >> 40) as u16)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
let cmd = regs::NV_PFALCON_FALCON_DMATRFCMD::default()
.set_size(DmaTrfCmdSize::Size256B)
.set_imem(target_mem == FalconMem::Imem)
.set_sec(if sec { 1 } else { 0 });
- for pos in (0..load_offsets.len).step_by(DMA_LEN as usize) {
+ for pos in (0..num_transfers).map(|i| i * DMA_LEN) {
// Perform a transfer of size `DMA_LEN`.
regs::NV_PFALCON_FALCON_DMATRFMOFFS::default()
.set_offs(load_offsets.dst_start + pos)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON_FALCON_DMATRFFBOFFS::default()
.set_offs(src_start + pos)
- .write(bar, E::BASE);
- cmd.write(bar, E::BASE);
+ .write(bar, &E::ID);
+ cmd.write(bar, &E::ID);
// Wait for the transfer to complete.
// TIMEOUT: arbitrarily large value, no DMA transfer to the falcon's small memories
// should ever take that long.
- util::wait_on(Delta::from_secs(2), || {
- let r = regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, E::BASE);
- if r.idle() {
- Some(())
- } else {
- None
- }
- })?;
+ read_poll_timeout(
+ || Ok(regs::NV_PFALCON_FALCON_DMATRFCMD::read(bar, &E::ID)),
+ |r| r.idle(),
+ Delta::ZERO,
+ Delta::from_secs(2),
+ )?;
}
Ok(())
@@ -502,9 +537,8 @@ impl<E: FalconEngine + 'static> Falcon<E> {
/// Perform a DMA load into `IMEM` and `DMEM` of `fw`, and prepare the falcon to run it.
pub(crate) fn dma_load<F: FalconFirmware<Target = E>>(&self, bar: &Bar0, fw: &F) -> Result {
- regs::NV_PFALCON_FBIF_CTL::alter(bar, E::BASE, |v| v.set_allow_phys_no_ctx(true));
- regs::NV_PFALCON_FALCON_DMACTL::default().write(bar, E::BASE);
- regs::NV_PFALCON_FBIF_TRANSCFG::alter(bar, E::BASE, |v| {
+ self.dma_reset(bar);
+ regs::NV_PFALCON_FBIF_TRANSCFG::update(bar, &E::ID, 0, |v| {
v.set_target(FalconFbifTarget::CoherentSysmem)
.set_mem_type(FalconFbifMemType::Physical)
});
@@ -517,61 +551,88 @@ impl<E: FalconEngine + 'static> Falcon<E> {
// Set `BootVec` to start of non-secure code.
regs::NV_PFALCON_FALCON_BOOTVEC::default()
.set_value(fw.boot_addr())
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
- /// Runs the loaded firmware and waits for its completion.
- ///
- /// `mbox0` and `mbox1` are optional parameters to write into the `MBOX0` and `MBOX1` registers
- /// prior to running.
- ///
- /// Wait up to two seconds for the firmware to complete, and return its exit status read from
- /// the `MBOX0` and `MBOX1` registers.
- pub(crate) fn boot(
- &self,
- bar: &Bar0,
- mbox0: Option<u32>,
- mbox1: Option<u32>,
- ) -> Result<(u32, u32)> {
+ /// Wait until the falcon CPU is halted.
+ pub(crate) fn wait_till_halted(&self, bar: &Bar0) -> Result<()> {
+ // TIMEOUT: arbitrarily large value, firmwares should complete in less than 2 seconds.
+ read_poll_timeout(
+ || Ok(regs::NV_PFALCON_FALCON_CPUCTL::read(bar, &E::ID)),
+ |r| r.halted(),
+ Delta::ZERO,
+ Delta::from_secs(2),
+ )?;
+
+ Ok(())
+ }
+
+ /// Start the falcon CPU.
+ pub(crate) fn start(&self, bar: &Bar0) -> Result<()> {
+ match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, &E::ID).alias_en() {
+ true => regs::NV_PFALCON_FALCON_CPUCTL_ALIAS::default()
+ .set_startcpu(true)
+ .write(bar, &E::ID),
+ false => regs::NV_PFALCON_FALCON_CPUCTL::default()
+ .set_startcpu(true)
+ .write(bar, &E::ID),
+ }
+
+ Ok(())
+ }
+
+ /// Writes values to the mailbox registers if provided.
+ pub(crate) fn write_mailboxes(&self, bar: &Bar0, mbox0: Option<u32>, mbox1: Option<u32>) {
if let Some(mbox0) = mbox0 {
regs::NV_PFALCON_FALCON_MAILBOX0::default()
.set_value(mbox0)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
}
if let Some(mbox1) = mbox1 {
regs::NV_PFALCON_FALCON_MAILBOX1::default()
.set_value(mbox1)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
}
+ }
- match regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE).alias_en() {
- true => regs::NV_PFALCON_FALCON_CPUCTL_ALIAS::default()
- .set_startcpu(true)
- .write(bar, E::BASE),
- false => regs::NV_PFALCON_FALCON_CPUCTL::default()
- .set_startcpu(true)
- .write(bar, E::BASE),
- }
+ /// Reads the value from `mbox0` register.
+ pub(crate) fn read_mailbox0(&self, bar: &Bar0) -> u32 {
+ regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, &E::ID).value()
+ }
- // TIMEOUT: arbitrarily large value, firmwares should complete in less than 2 seconds.
- util::wait_on(Delta::from_secs(2), || {
- let r = regs::NV_PFALCON_FALCON_CPUCTL::read(bar, E::BASE);
- if r.halted() {
- Some(())
- } else {
- None
- }
- })?;
+ /// Reads the value from `mbox1` register.
+ pub(crate) fn read_mailbox1(&self, bar: &Bar0) -> u32 {
+ regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, &E::ID).value()
+ }
- let (mbox0, mbox1) = (
- regs::NV_PFALCON_FALCON_MAILBOX0::read(bar, E::BASE).value(),
- regs::NV_PFALCON_FALCON_MAILBOX1::read(bar, E::BASE).value(),
- );
+ /// Reads values from both mailbox registers.
+ pub(crate) fn read_mailboxes(&self, bar: &Bar0) -> (u32, u32) {
+ let mbox0 = self.read_mailbox0(bar);
+ let mbox1 = self.read_mailbox1(bar);
- Ok((mbox0, mbox1))
+ (mbox0, mbox1)
+ }
+
+ /// Start running the loaded firmware.
+ ///
+ /// `mbox0` and `mbox1` are optional parameters to write into the `MBOX0` and `MBOX1` registers
+ /// prior to running.
+ ///
+ /// Wait up to two seconds for the firmware to complete, and return its exit status read from
+ /// the `MBOX0` and `MBOX1` registers.
+ pub(crate) fn boot(
+ &self,
+ bar: &Bar0,
+ mbox0: Option<u32>,
+ mbox1: Option<u32>,
+ ) -> Result<(u32, u32)> {
+ self.write_mailboxes(bar, mbox0, mbox1);
+ self.start(bar)?;
+ self.wait_till_halted(bar)?;
+ Ok(self.read_mailboxes(bar))
}
/// Returns the fused version of the signature to use in order to run a HS firmware on this
@@ -585,4 +646,19 @@ impl<E: FalconEngine + 'static> Falcon<E> {
self.hal
.signature_reg_fuse_version(self, bar, engine_id_mask, ucode_id)
}
+
+ /// Check if the RISC-V core is active.
+ ///
+ /// Returns `true` if the RISC-V core is active, `false` otherwise.
+ pub(crate) fn is_riscv_active(&self, bar: &Bar0) -> bool {
+ let cpuctl = regs::NV_PRISCV_RISCV_CPUCTL::read(bar, &E::ID);
+ cpuctl.active_stat()
+ }
+
+ /// Write the application version to the OS register.
+ pub(crate) fn write_os_version(&self, bar: &Bar0, app_version: u32) {
+ regs::NV_PFALCON_FALCON_OS::default()
+ .set_value(app_version)
+ .write(bar, &E::ID);
+ }
}
diff --git a/drivers/gpu/nova-core/falcon/gsp.rs b/drivers/gpu/nova-core/falcon/gsp.rs
index d622e9a64470..67edef3636c1 100644
--- a/drivers/gpu/nova-core/falcon/gsp.rs
+++ b/drivers/gpu/nova-core/falcon/gsp.rs
@@ -1,24 +1,57 @@
// SPDX-License-Identifier: GPL-2.0
+use kernel::{
+ io::poll::read_poll_timeout,
+ prelude::*,
+ time::Delta, //
+};
+
use crate::{
driver::Bar0,
- falcon::{Falcon, FalconEngine},
- regs,
+ falcon::{
+ Falcon,
+ FalconEngine,
+ PFalcon2Base,
+ PFalconBase, //
+ },
+ regs::{
+ self,
+ macros::RegisterBase, //
+ },
};
/// Type specifying the `Gsp` falcon engine. Cannot be instantiated.
pub(crate) struct Gsp(());
-impl FalconEngine for Gsp {
+impl RegisterBase<PFalconBase> for Gsp {
const BASE: usize = 0x00110000;
}
+impl RegisterBase<PFalcon2Base> for Gsp {
+ const BASE: usize = 0x00111000;
+}
+
+impl FalconEngine for Gsp {
+ const ID: Self = Gsp(());
+}
+
impl Falcon<Gsp> {
/// Clears the SWGEN0 bit in the Falcon's IRQ status clear register to
/// allow GSP to signal CPU for processing new messages in message queue.
pub(crate) fn clear_swgen0_intr(&self, bar: &Bar0) {
regs::NV_PFALCON_FALCON_IRQSCLR::default()
.set_swgen0(true)
- .write(bar, Gsp::BASE);
+ .write(bar, &Gsp::ID);
+ }
+
+ /// Checks if GSP reload/resume has completed during the boot process.
+ pub(crate) fn check_reload_completed(&self, bar: &Bar0, timeout: Delta) -> Result<bool> {
+ read_poll_timeout(
+ || Ok(regs::NV_PGC6_BSI_SECURE_SCRATCH_14::read(bar)),
+ |val| val.boot_stage_3_handoff(),
+ Delta::ZERO,
+ timeout,
+ )
+ .map(|_| true)
}
}
diff --git a/drivers/gpu/nova-core/falcon/hal.rs b/drivers/gpu/nova-core/falcon/hal.rs
index b233bc365882..8dc56a28ad65 100644
--- a/drivers/gpu/nova-core/falcon/hal.rs
+++ b/drivers/gpu/nova-core/falcon/hal.rs
@@ -2,9 +2,15 @@
use kernel::prelude::*;
-use crate::driver::Bar0;
-use crate::falcon::{Falcon, FalconBromParams, FalconEngine};
-use crate::gpu::Chipset;
+use crate::{
+ driver::Bar0,
+ falcon::{
+ Falcon,
+ FalconBromParams,
+ FalconEngine, //
+ },
+ gpu::Chipset,
+};
mod ga102;
@@ -13,7 +19,7 @@ mod ga102;
/// Implements chipset-specific low-level operations. The trait is generic against [`FalconEngine`]
/// so its `BASE` parameter can be used in order to avoid runtime bound checks when accessing
/// registers.
-pub(crate) trait FalconHal<E: FalconEngine>: Sync {
+pub(crate) trait FalconHal<E: FalconEngine>: Send + Sync {
/// Activates the Falcon core if the engine is a risvc/falcon dual engine.
fn select_core(&self, _falcon: &Falcon<E>, _bar: &Bar0) -> Result {
Ok(())
@@ -44,7 +50,7 @@ pub(super) fn falcon_hal<E: FalconEngine + 'static>(
use Chipset::*;
let hal = match chipset {
- GA102 | GA103 | GA104 | GA106 | GA107 => {
+ GA102 | GA103 | GA104 | GA106 | GA107 | AD102 | AD103 | AD104 | AD106 | AD107 => {
KBox::new(ga102::Ga102::<E>::new(), GFP_KERNEL)? as KBox<dyn FalconHal<E>>
}
_ => return Err(ENOTSUPP),
diff --git a/drivers/gpu/nova-core/falcon/hal/ga102.rs b/drivers/gpu/nova-core/falcon/hal/ga102.rs
index 52c33d3f22a8..69a7a95cac16 100644
--- a/drivers/gpu/nova-core/falcon/hal/ga102.rs
+++ b/drivers/gpu/nova-core/falcon/hal/ga102.rs
@@ -2,35 +2,41 @@
use core::marker::PhantomData;
-use kernel::device;
-use kernel::prelude::*;
-use kernel::time::Delta;
+use kernel::{
+ device,
+ io::poll::read_poll_timeout,
+ prelude::*,
+ time::Delta, //
+};
-use crate::driver::Bar0;
-use crate::falcon::{
- Falcon, FalconBromParams, FalconEngine, FalconModSelAlgo, PeregrineCoreSelect,
+use crate::{
+ driver::Bar0,
+ falcon::{
+ Falcon,
+ FalconBromParams,
+ FalconEngine,
+ FalconModSelAlgo,
+ PeregrineCoreSelect, //
+ },
+ regs,
};
-use crate::regs;
-use crate::util;
use super::FalconHal;
fn select_core_ga102<E: FalconEngine>(bar: &Bar0) -> Result {
- let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE);
+ let bcr_ctrl = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, &E::ID);
if bcr_ctrl.core_select() != PeregrineCoreSelect::Falcon {
regs::NV_PRISCV_RISCV_BCR_CTRL::default()
.set_core_select(PeregrineCoreSelect::Falcon)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
// TIMEOUT: falcon core should take less than 10ms to report being enabled.
- util::wait_on(Delta::from_millis(10), || {
- let r = regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, E::BASE);
- if r.valid() {
- Some(())
- } else {
- None
- }
- })?;
+ read_poll_timeout(
+ || Ok(regs::NV_PRISCV_RISCV_BCR_CTRL::read(bar, &E::ID)),
+ |r| r.valid(),
+ Delta::ZERO,
+ Delta::from_millis(10),
+ )?;
}
Ok(())
@@ -42,50 +48,45 @@ fn signature_reg_fuse_version_ga102(
engine_id_mask: u16,
ucode_id: u8,
) -> Result<u32> {
- // TODO[REGA]: The ucode fuse versions are contained in the
- // FUSE_OPT_FPF_<ENGINE>_UCODE<X>_VERSION registers, which are an array. Our register
- // definition macros do not allow us to manage them properly, so we need to hardcode their
- // addresses for now. Clean this up once we support register arrays.
-
// Each engine has 16 ucode version registers numbered from 1 to 16.
- if ucode_id == 0 || ucode_id > 16 {
- dev_err!(dev, "invalid ucode id {:#x}", ucode_id);
- return Err(EINVAL);
- }
+ let ucode_idx = match usize::from(ucode_id) {
+ ucode_id @ 1..=regs::NV_FUSE_OPT_FPF_SIZE => ucode_id - 1,
+ _ => {
+ dev_err!(dev, "invalid ucode id {:#x}", ucode_id);
+ return Err(EINVAL);
+ }
+ };
- // Base address of the FUSE registers array corresponding to the engine.
- let reg_fuse_base = if engine_id_mask & 0x0001 != 0 {
- regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::OFFSET
+ // `ucode_idx` is guaranteed to be in the range [0..15], making the `read` calls provable valid
+ // at build-time.
+ let reg_fuse_version = if engine_id_mask & 0x0001 != 0 {
+ regs::NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION::read(bar, ucode_idx).data()
} else if engine_id_mask & 0x0004 != 0 {
- regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::OFFSET
+ regs::NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION::read(bar, ucode_idx).data()
} else if engine_id_mask & 0x0400 != 0 {
- regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::OFFSET
+ regs::NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION::read(bar, ucode_idx).data()
} else {
dev_err!(dev, "unexpected engine_id_mask {:#x}", engine_id_mask);
return Err(EINVAL);
};
- // Read `reg_fuse_base[ucode_id - 1]`.
- let reg_fuse_version =
- bar.read32(reg_fuse_base + ((ucode_id - 1) as usize * core::mem::size_of::<u32>()));
-
// TODO[NUMM]: replace with `last_set_bit` once it lands.
- Ok(u32::BITS - reg_fuse_version.leading_zeros())
+ Ok(u16::BITS - reg_fuse_version.leading_zeros())
}
fn program_brom_ga102<E: FalconEngine>(bar: &Bar0, params: &FalconBromParams) -> Result {
regs::NV_PFALCON2_FALCON_BROM_PARAADDR::default()
.set_value(params.pkc_data_offset)
- .write(bar, E::BASE);
+ .write(bar, &E::ID, 0);
regs::NV_PFALCON2_FALCON_BROM_ENGIDMASK::default()
.set_value(u32::from(params.engine_id_mask))
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID::default()
.set_ucode_id(params.ucode_id)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
regs::NV_PFALCON2_FALCON_MOD_SEL::default()
.set_algo(FalconModSelAlgo::Rsa3k)
- .write(bar, E::BASE);
+ .write(bar, &E::ID);
Ok(())
}
diff --git a/drivers/gpu/nova-core/falcon/sec2.rs b/drivers/gpu/nova-core/falcon/sec2.rs
index 5147d9e2a7fe..b57d362e576a 100644
--- a/drivers/gpu/nova-core/falcon/sec2.rs
+++ b/drivers/gpu/nova-core/falcon/sec2.rs
@@ -1,10 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::falcon::FalconEngine;
+use crate::{
+ falcon::{
+ FalconEngine,
+ PFalcon2Base,
+ PFalconBase, //
+ },
+ regs::macros::RegisterBase,
+};
/// Type specifying the `Sec2` falcon engine. Cannot be instantiated.
pub(crate) struct Sec2(());
-impl FalconEngine for Sec2 {
+impl RegisterBase<PFalconBase> for Sec2 {
const BASE: usize = 0x00840000;
}
+
+impl RegisterBase<PFalcon2Base> for Sec2 {
+ const BASE: usize = 0x00841000;
+}
+
+impl FalconEngine for Sec2 {
+ const ID: Self = Sec2(());
+}
diff --git a/drivers/gpu/nova-core/fb.rs b/drivers/gpu/nova-core/fb.rs
index 4a702525fff4..3c9cf151786c 100644
--- a/drivers/gpu/nova-core/fb.rs
+++ b/drivers/gpu/nova-core/fb.rs
@@ -2,15 +2,29 @@
use core::ops::Range;
-use kernel::prelude::*;
-use kernel::sizes::*;
-use kernel::types::ARef;
-use kernel::{dev_warn, device};
-
-use crate::dma::DmaObject;
-use crate::driver::Bar0;
-use crate::gpu::Chipset;
-use crate::regs;
+use kernel::{
+ device,
+ prelude::*,
+ ptr::{
+ Alignable,
+ Alignment, //
+ },
+ sizes::*,
+ sync::aref::ARef, //
+};
+
+use crate::{
+ dma::DmaObject,
+ driver::Bar0,
+ firmware::gsp::GspFirmware,
+ gpu::Chipset,
+ gsp,
+ num::{
+ usize_as_u64,
+ FromSafeCast, //
+ },
+ regs,
+};
mod hal;
@@ -84,16 +98,28 @@ impl SysmemFlush {
///
/// Contains ranges of GPU memory reserved for a given purpose during the GSP boot process.
#[derive(Debug)]
-#[expect(dead_code)]
pub(crate) struct FbLayout {
+ /// Range of the framebuffer. Starts at `0`.
pub(crate) fb: Range<u64>,
+ /// VGA workspace, small area of reserved memory at the end of the framebuffer.
pub(crate) vga_workspace: Range<u64>,
+ /// FRTS range.
pub(crate) frts: Range<u64>,
+ /// Memory area containing the GSP bootloader image.
+ pub(crate) boot: Range<u64>,
+ /// Memory area containing the GSP firmware image.
+ pub(crate) elf: Range<u64>,
+ /// WPR2 heap.
+ pub(crate) wpr2_heap: Range<u64>,
+ /// WPR2 region range, starting with an instance of `GspFwWprMeta`.
+ pub(crate) wpr2: Range<u64>,
+ pub(crate) heap: Range<u64>,
+ pub(crate) vf_partition_count: u8,
}
impl FbLayout {
- /// Computes the FB layout.
- pub(crate) fn new(chipset: Chipset, bar: &Bar0) -> Result<Self> {
+ /// Computes the FB layout for `chipset` required to run the `gsp_fw` GSP firmware.
+ pub(crate) fn new(chipset: Chipset, bar: &Bar0, gsp_fw: &GspFirmware) -> Result<Self> {
let hal = hal::fb_hal(chipset);
let fb = {
@@ -104,14 +130,14 @@ impl FbLayout {
let vga_workspace = {
let vga_base = {
- const NV_PRAMIN_SIZE: u64 = SZ_1M as u64;
+ const NV_PRAMIN_SIZE: u64 = usize_as_u64(SZ_1M);
let base = fb.end - NV_PRAMIN_SIZE;
if hal.supports_display(bar) {
match regs::NV_PDISP_VGA_WORKSPACE_BASE::read(bar).vga_workspace_addr() {
Some(addr) => {
if addr < base {
- const VBIOS_WORKSPACE_SIZE: u64 = SZ_128K as u64;
+ const VBIOS_WORKSPACE_SIZE: u64 = usize_as_u64(SZ_128K);
// Point workspace address to end of framebuffer.
fb.end - VBIOS_WORKSPACE_SIZE
@@ -130,18 +156,62 @@ impl FbLayout {
};
let frts = {
- const FRTS_DOWN_ALIGN: u64 = SZ_128K as u64;
- const FRTS_SIZE: u64 = SZ_1M as u64;
- // TODO[NUMM]: replace with `align_down` once it lands.
- let frts_base = (vga_workspace.start & !(FRTS_DOWN_ALIGN - 1)) - FRTS_SIZE;
+ const FRTS_DOWN_ALIGN: Alignment = Alignment::new::<SZ_128K>();
+ const FRTS_SIZE: u64 = usize_as_u64(SZ_1M);
+ let frts_base = vga_workspace.start.align_down(FRTS_DOWN_ALIGN) - FRTS_SIZE;
frts_base..frts_base + FRTS_SIZE
};
+ let boot = {
+ const BOOTLOADER_DOWN_ALIGN: Alignment = Alignment::new::<SZ_4K>();
+ let bootloader_size = u64::from_safe_cast(gsp_fw.bootloader.ucode.size());
+ let bootloader_base = (frts.start - bootloader_size).align_down(BOOTLOADER_DOWN_ALIGN);
+
+ bootloader_base..bootloader_base + bootloader_size
+ };
+
+ let elf = {
+ const ELF_DOWN_ALIGN: Alignment = Alignment::new::<SZ_64K>();
+ let elf_size = u64::from_safe_cast(gsp_fw.size);
+ let elf_addr = (boot.start - elf_size).align_down(ELF_DOWN_ALIGN);
+
+ elf_addr..elf_addr + elf_size
+ };
+
+ let wpr2_heap = {
+ const WPR2_HEAP_DOWN_ALIGN: Alignment = Alignment::new::<SZ_1M>();
+ let wpr2_heap_size =
+ gsp::LibosParams::from_chipset(chipset).wpr_heap_size(chipset, fb.end);
+ let wpr2_heap_addr = (elf.start - wpr2_heap_size).align_down(WPR2_HEAP_DOWN_ALIGN);
+
+ wpr2_heap_addr..(elf.start).align_down(WPR2_HEAP_DOWN_ALIGN)
+ };
+
+ let wpr2 = {
+ const WPR2_DOWN_ALIGN: Alignment = Alignment::new::<SZ_1M>();
+ let wpr2_addr = (wpr2_heap.start - u64::from_safe_cast(size_of::<gsp::GspFwWprMeta>()))
+ .align_down(WPR2_DOWN_ALIGN);
+
+ wpr2_addr..frts.end
+ };
+
+ let heap = {
+ const HEAP_SIZE: u64 = usize_as_u64(SZ_1M);
+
+ wpr2.start - HEAP_SIZE..wpr2.start
+ };
+
Ok(Self {
fb,
vga_workspace,
frts,
+ boot,
+ elf,
+ wpr2_heap,
+ wpr2,
+ heap,
+ vf_partition_count: 0,
})
}
}
diff --git a/drivers/gpu/nova-core/fb/hal.rs b/drivers/gpu/nova-core/fb/hal.rs
index 2f914948bb9a..aba0abd8ee00 100644
--- a/drivers/gpu/nova-core/fb/hal.rs
+++ b/drivers/gpu/nova-core/fb/hal.rs
@@ -2,8 +2,10 @@
use kernel::prelude::*;
-use crate::driver::Bar0;
-use crate::gpu::Chipset;
+use crate::{
+ driver::Bar0,
+ gpu::Chipset, //
+};
mod ga100;
mod ga102;
diff --git a/drivers/gpu/nova-core/fb/hal/ga100.rs b/drivers/gpu/nova-core/fb/hal/ga100.rs
index 871c42bf033a..e0acc41aa7cd 100644
--- a/drivers/gpu/nova-core/fb/hal/ga100.rs
+++ b/drivers/gpu/nova-core/fb/hal/ga100.rs
@@ -1,15 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
-struct Ga100;
-
use kernel::prelude::*;
-use crate::driver::Bar0;
-use crate::fb::hal::FbHal;
-use crate::regs;
+use crate::{
+ driver::Bar0,
+ fb::hal::FbHal,
+ regs, //
+};
use super::tu102::FLUSH_SYSMEM_ADDR_SHIFT;
+struct Ga100;
+
pub(super) fn read_sysmem_flush_page_ga100(bar: &Bar0) -> u64 {
u64::from(regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR::read(bar).adr_39_08()) << FLUSH_SYSMEM_ADDR_SHIFT
| u64::from(regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI::read(bar).adr_63_40())
@@ -18,9 +20,13 @@ pub(super) fn read_sysmem_flush_page_ga100(bar: &Bar0) -> u64 {
pub(super) fn write_sysmem_flush_page_ga100(bar: &Bar0, addr: u64) {
regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI::default()
+ // CAST: `as u32` is used on purpose since the remaining bits are guaranteed to fit within
+ // a `u32`.
.set_adr_63_40((addr >> FLUSH_SYSMEM_ADDR_SHIFT_HI) as u32)
.write(bar);
regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR::default()
+ // CAST: `as u32` is used on purpose since we want to strip the upper bits that have been
+ // written to `NV_PFB_NISO_FLUSH_SYSMEM_ADDR_HI`.
.set_adr_39_08((addr >> FLUSH_SYSMEM_ADDR_SHIFT) as u32)
.write(bar);
}
diff --git a/drivers/gpu/nova-core/fb/hal/ga102.rs b/drivers/gpu/nova-core/fb/hal/ga102.rs
index a73b77e39715..734605905031 100644
--- a/drivers/gpu/nova-core/fb/hal/ga102.rs
+++ b/drivers/gpu/nova-core/fb/hal/ga102.rs
@@ -2,9 +2,11 @@
use kernel::prelude::*;
-use crate::driver::Bar0;
-use crate::fb::hal::FbHal;
-use crate::regs;
+use crate::{
+ driver::Bar0,
+ fb::hal::FbHal,
+ regs, //
+};
fn vidmem_size_ga102(bar: &Bar0) -> u64 {
regs::NV_USABLE_FB_SIZE_IN_MB::read(bar).usable_fb_size()
diff --git a/drivers/gpu/nova-core/fb/hal/tu102.rs b/drivers/gpu/nova-core/fb/hal/tu102.rs
index b022c781caf4..eec984f4e816 100644
--- a/drivers/gpu/nova-core/fb/hal/tu102.rs
+++ b/drivers/gpu/nova-core/fb/hal/tu102.rs
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-use crate::driver::Bar0;
-use crate::fb::hal::FbHal;
-use crate::regs;
use kernel::prelude::*;
+use crate::{
+ driver::Bar0,
+ fb::hal::FbHal,
+ regs, //
+};
+
/// Shift applied to the sysmem address before it is written into `NV_PFB_NISO_FLUSH_SYSMEM_ADDR`,
/// to be used by HALs.
pub(super) const FLUSH_SYSMEM_ADDR_SHIFT: u32 = 8;
@@ -15,15 +18,13 @@ pub(super) fn read_sysmem_flush_page_gm107(bar: &Bar0) -> u64 {
pub(super) fn write_sysmem_flush_page_gm107(bar: &Bar0, addr: u64) -> Result {
// Check that the address doesn't overflow the receiving 32-bit register.
- if addr >> (u32::BITS + FLUSH_SYSMEM_ADDR_SHIFT) == 0 {
- regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR::default()
- .set_adr_39_08((addr >> FLUSH_SYSMEM_ADDR_SHIFT) as u32)
- .write(bar);
-
- Ok(())
- } else {
- Err(EINVAL)
- }
+ u32::try_from(addr >> FLUSH_SYSMEM_ADDR_SHIFT)
+ .map_err(|_| EINVAL)
+ .map(|addr| {
+ regs::NV_PFB_NISO_FLUSH_SYSMEM_ADDR::default()
+ .set_adr_39_08(addr)
+ .write(bar)
+ })
}
pub(super) fn display_enabled_gm107(bar: &Bar0) -> bool {
diff --git a/drivers/gpu/nova-core/firmware.rs b/drivers/gpu/nova-core/firmware.rs
index 2931912ddba0..2d2008b33fb4 100644
--- a/drivers/gpu/nova-core/firmware.rs
+++ b/drivers/gpu/nova-core/firmware.rs
@@ -5,47 +5,42 @@
use core::marker::PhantomData;
-use kernel::device;
-use kernel::firmware;
-use kernel::prelude::*;
-use kernel::str::CString;
-
-use crate::dma::DmaObject;
-use crate::falcon::FalconFirmware;
-use crate::gpu;
-use crate::gpu::Chipset;
-
+use kernel::{
+ device,
+ firmware,
+ prelude::*,
+ str::CString,
+ transmute::FromBytes, //
+};
+
+use crate::{
+ dma::DmaObject,
+ falcon::FalconFirmware,
+ gpu,
+ num::{
+ FromSafeCast,
+ IntoSafeCast, //
+ },
+};
+
+pub(crate) mod booter;
pub(crate) mod fwsec;
-
-pub(crate) const FIRMWARE_VERSION: &str = "535.113.01";
-
-/// Structure encapsulating the firmware blobs required for the GPU to operate.
-#[expect(dead_code)]
-pub(crate) struct Firmware {
- booter_load: firmware::Firmware,
- booter_unload: firmware::Firmware,
- bootloader: firmware::Firmware,
- gsp: firmware::Firmware,
-}
-
-impl Firmware {
- pub(crate) fn new(dev: &device::Device, chipset: Chipset, ver: &str) -> Result<Firmware> {
- let mut chip_name = CString::try_from_fmt(fmt!("{chipset}"))?;
- chip_name.make_ascii_lowercase();
- let chip_name = &*chip_name;
-
- let request = |name_| {
- CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name_}-{ver}.bin"))
- .and_then(|path| firmware::Firmware::request(&path, dev))
- };
-
- Ok(Firmware {
- booter_load: request("booter_load")?,
- booter_unload: request("booter_unload")?,
- bootloader: request("bootloader")?,
- gsp: request("gsp")?,
- })
- }
+pub(crate) mod gsp;
+pub(crate) mod riscv;
+
+pub(crate) const FIRMWARE_VERSION: &str = "570.144";
+
+/// Requests the GPU firmware `name` suitable for `chipset`, with version `ver`.
+fn request_firmware(
+ dev: &device::Device,
+ chipset: gpu::Chipset,
+ name: &str,
+ ver: &str,
+) -> Result<firmware::Firmware> {
+ let chip_name = chipset.name();
+
+ CString::try_from_fmt(fmt!("nvidia/{chip_name}/gsp/{name}-{ver}.bin"))
+ .and_then(|path| firmware::Firmware::request(&path, dev))
}
/// Structure used to describe some firmwares, notably FWSEC-FRTS.
@@ -87,7 +82,7 @@ impl FalconUCodeDescV3 {
const HDR_SIZE_SHIFT: u32 = 16;
const HDR_SIZE_MASK: u32 = 0xffff0000;
- ((self.hdr & HDR_SIZE_MASK) >> HDR_SIZE_SHIFT) as usize
+ ((self.hdr & HDR_SIZE_MASK) >> HDR_SIZE_SHIFT).into_safe_cast()
}
}
@@ -150,6 +145,65 @@ impl<F: FalconFirmware> FirmwareDmaObject<F, Unsigned> {
}
}
+/// Header common to most firmware files.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct BinHdr {
+ /// Magic number, must be `0x10de`.
+ bin_magic: u32,
+ /// Version of the header.
+ bin_ver: u32,
+ /// Size in bytes of the binary (to be ignored).
+ bin_size: u32,
+ /// Offset of the start of the application-specific header.
+ header_offset: u32,
+ /// Offset of the start of the data payload.
+ data_offset: u32,
+ /// Size in bytes of the data payload.
+ data_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for BinHdr {}
+
+// A firmware blob starting with a `BinHdr`.
+struct BinFirmware<'a> {
+ hdr: BinHdr,
+ fw: &'a [u8],
+}
+
+impl<'a> BinFirmware<'a> {
+ /// Interpret `fw` as a firmware image starting with a [`BinHdr`], and returns the
+ /// corresponding [`BinFirmware`] that can be used to extract its payload.
+ fn new(fw: &'a firmware::Firmware) -> Result<Self> {
+ const BIN_MAGIC: u32 = 0x10de;
+ let fw = fw.data();
+
+ fw.get(0..size_of::<BinHdr>())
+ // Extract header.
+ .and_then(BinHdr::from_bytes_copy)
+ // Validate header.
+ .and_then(|hdr| {
+ if hdr.bin_magic == BIN_MAGIC {
+ Some(hdr)
+ } else {
+ None
+ }
+ })
+ .map(|hdr| Self { hdr, fw })
+ .ok_or(EINVAL)
+ }
+
+ /// Returns the data payload of the firmware, or `None` if the data range is out of bounds of
+ /// the firmware image.
+ fn data(&self) -> Option<&[u8]> {
+ let fw_start = usize::from_safe_cast(self.hdr.data_offset);
+ let fw_size = usize::from_safe_cast(self.hdr.data_size);
+
+ self.fw.get(fw_start..fw_start + fw_size)
+ }
+}
+
pub(crate) struct ModInfoBuilder<const N: usize>(firmware::ModInfoBuilder<N>);
impl<const N: usize> ModInfoBuilder<N> {
@@ -180,8 +234,8 @@ impl<const N: usize> ModInfoBuilder<N> {
let mut this = Self(firmware::ModInfoBuilder::new(module_name));
let mut i = 0;
- while i < gpu::Chipset::NAMES.len() {
- this = this.make_entry_chipset(gpu::Chipset::NAMES[i]);
+ while i < gpu::Chipset::ALL.len() {
+ this = this.make_entry_chipset(gpu::Chipset::ALL[i].name());
i += 1;
}
diff --git a/drivers/gpu/nova-core/firmware/booter.rs b/drivers/gpu/nova-core/firmware/booter.rs
new file mode 100644
index 000000000000..f107f753214a
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/booter.rs
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Support for loading and patching the `Booter` firmware. `Booter` is a Heavy Secured firmware
+//! running on [`Sec2`], that is used on Turing/Ampere to load the GSP firmware into the GSP falcon
+//! (and optionally unload it through a separate firmware image).
+
+use core::{
+ marker::PhantomData,
+ ops::Deref, //
+};
+
+use kernel::{
+ device,
+ prelude::*,
+ transmute::FromBytes, //
+};
+
+use crate::{
+ dma::DmaObject,
+ driver::Bar0,
+ falcon::{
+ sec2::Sec2,
+ Falcon,
+ FalconBromParams,
+ FalconFirmware,
+ FalconLoadParams,
+ FalconLoadTarget, //
+ },
+ firmware::{
+ BinFirmware,
+ FirmwareDmaObject,
+ FirmwareSignature,
+ Signed,
+ Unsigned, //
+ },
+ gpu::Chipset,
+ num::{
+ FromSafeCast,
+ IntoSafeCast, //
+ },
+};
+
+/// Local convenience function to return a copy of `S` by reinterpreting the bytes starting at
+/// `offset` in `slice`.
+fn frombytes_at<S: FromBytes + Sized>(slice: &[u8], offset: usize) -> Result<S> {
+ slice
+ .get(offset..offset + size_of::<S>())
+ .and_then(S::from_bytes_copy)
+ .ok_or(EINVAL)
+}
+
+/// Heavy-Secured firmware header.
+///
+/// Such firmwares have an application-specific payload that needs to be patched with a given
+/// signature.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsHeaderV2 {
+ /// Offset to the start of the signatures.
+ sig_prod_offset: u32,
+ /// Size in bytes of the signatures.
+ sig_prod_size: u32,
+ /// Offset to a `u32` containing the location at which to patch the signature in the microcode
+ /// image.
+ patch_loc_offset: u32,
+ /// Offset to a `u32` containing the index of the signature to patch.
+ patch_sig_offset: u32,
+ /// Start offset to the signature metadata.
+ meta_data_offset: u32,
+ /// Size in bytes of the signature metadata.
+ meta_data_size: u32,
+ /// Offset to a `u32` containing the number of signatures in the signatures section.
+ num_sig_offset: u32,
+ /// Offset of the application-specific header.
+ header_offset: u32,
+ /// Size in bytes of the application-specific header.
+ header_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsHeaderV2 {}
+
+/// Heavy-Secured Firmware image container.
+///
+/// This provides convenient access to the fields of [`HsHeaderV2`] that are actually indices to
+/// read from in the firmware data.
+struct HsFirmwareV2<'a> {
+ hdr: HsHeaderV2,
+ fw: &'a [u8],
+}
+
+impl<'a> HsFirmwareV2<'a> {
+ /// Interprets the header of `bin_fw` as a [`HsHeaderV2`] and returns an instance of
+ /// `HsFirmwareV2` for further parsing.
+ ///
+ /// Fails if the header pointed at by `bin_fw` is not within the bounds of the firmware image.
+ fn new(bin_fw: &BinFirmware<'a>) -> Result<Self> {
+ frombytes_at::<HsHeaderV2>(bin_fw.fw, bin_fw.hdr.header_offset.into_safe_cast())
+ .map(|hdr| Self { hdr, fw: bin_fw.fw })
+ }
+
+ /// Returns the location at which the signatures should be patched in the microcode image.
+ ///
+ /// Fails if the offset of the patch location is outside the bounds of the firmware
+ /// image.
+ fn patch_location(&self) -> Result<u32> {
+ frombytes_at::<u32>(self.fw, self.hdr.patch_loc_offset.into_safe_cast())
+ }
+
+ /// Returns an iterator to the signatures of the firmware. The iterator can be empty if the
+ /// firmware is unsigned.
+ ///
+ /// Fails if the pointed signatures are outside the bounds of the firmware image.
+ fn signatures_iter(&'a self) -> Result<impl Iterator<Item = BooterSignature<'a>>> {
+ let num_sig = frombytes_at::<u32>(self.fw, self.hdr.num_sig_offset.into_safe_cast())?;
+ let iter = match self.hdr.sig_prod_size.checked_div(num_sig) {
+ // If there are no signatures, return an iterator that will yield zero elements.
+ None => (&[] as &[u8]).chunks_exact(1),
+ Some(sig_size) => {
+ let patch_sig =
+ frombytes_at::<u32>(self.fw, self.hdr.patch_sig_offset.into_safe_cast())?;
+ let signatures_start = usize::from_safe_cast(self.hdr.sig_prod_offset + patch_sig);
+
+ self.fw
+ // Get signatures range.
+ .get(
+ signatures_start
+ ..signatures_start + usize::from_safe_cast(self.hdr.sig_prod_size),
+ )
+ .ok_or(EINVAL)?
+ .chunks_exact(sig_size.into_safe_cast())
+ }
+ };
+
+ // Map the byte slices into signatures.
+ Ok(iter.map(BooterSignature))
+ }
+}
+
+/// Signature parameters, as defined in the firmware.
+#[repr(C)]
+struct HsSignatureParams {
+ /// Fuse version to use.
+ fuse_ver: u32,
+ /// Mask of engine IDs this firmware applies to.
+ engine_id_mask: u32,
+ /// ID of the microcode.
+ ucode_id: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsSignatureParams {}
+
+impl HsSignatureParams {
+ /// Returns the signature parameters contained in `hs_fw`.
+ ///
+ /// Fails if the meta data parameter of `hs_fw` is outside the bounds of the firmware image, or
+ /// if its size doesn't match that of [`HsSignatureParams`].
+ fn new(hs_fw: &HsFirmwareV2<'_>) -> Result<Self> {
+ let start = usize::from_safe_cast(hs_fw.hdr.meta_data_offset);
+ let end = start
+ .checked_add(hs_fw.hdr.meta_data_size.into_safe_cast())
+ .ok_or(EINVAL)?;
+
+ hs_fw
+ .fw
+ .get(start..end)
+ .and_then(Self::from_bytes_copy)
+ .ok_or(EINVAL)
+ }
+}
+
+/// Header for code and data load offsets.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsLoadHeaderV2 {
+ // Offset at which the code starts.
+ os_code_offset: u32,
+ // Total size of the code, for all apps.
+ os_code_size: u32,
+ // Offset at which the data starts.
+ os_data_offset: u32,
+ // Size of the data.
+ os_data_size: u32,
+ // Number of apps following this header. Each app is described by a [`HsLoadHeaderV2App`].
+ num_apps: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsLoadHeaderV2 {}
+
+impl HsLoadHeaderV2 {
+ /// Returns the load header contained in `hs_fw`.
+ ///
+ /// Fails if the header pointed at by `hs_fw` is not within the bounds of the firmware image.
+ fn new(hs_fw: &HsFirmwareV2<'_>) -> Result<Self> {
+ frombytes_at::<Self>(hs_fw.fw, hs_fw.hdr.header_offset.into_safe_cast())
+ }
+}
+
+/// Header for app code loader.
+#[repr(C)]
+#[derive(Debug, Clone)]
+struct HsLoadHeaderV2App {
+ /// Offset at which to load the app code.
+ offset: u32,
+ /// Length in bytes of the app code.
+ len: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for HsLoadHeaderV2App {}
+
+impl HsLoadHeaderV2App {
+ /// Returns the [`HsLoadHeaderV2App`] for app `idx` of `hs_fw`.
+ ///
+ /// Fails if `idx` is larger than the number of apps declared in `hs_fw`, or if the header is
+ /// not within the bounds of the firmware image.
+ fn new(hs_fw: &HsFirmwareV2<'_>, idx: u32) -> Result<Self> {
+ let load_hdr = HsLoadHeaderV2::new(hs_fw)?;
+ if idx >= load_hdr.num_apps {
+ Err(EINVAL)
+ } else {
+ frombytes_at::<Self>(
+ hs_fw.fw,
+ usize::from_safe_cast(hs_fw.hdr.header_offset)
+ // Skip the load header...
+ .checked_add(size_of::<HsLoadHeaderV2>())
+ // ... and jump to app header `idx`.
+ .and_then(|offset| {
+ offset
+ .checked_add(usize::from_safe_cast(idx).checked_mul(size_of::<Self>())?)
+ })
+ .ok_or(EINVAL)?,
+ )
+ }
+ }
+}
+
+/// Signature for Booter firmware. Their size is encoded into the header and not known a compile
+/// time, so we just wrap a byte slices on which we can implement [`FirmwareSignature`].
+struct BooterSignature<'a>(&'a [u8]);
+
+impl<'a> AsRef<[u8]> for BooterSignature<'a> {
+ fn as_ref(&self) -> &[u8] {
+ self.0
+ }
+}
+
+impl<'a> FirmwareSignature<BooterFirmware> for BooterSignature<'a> {}
+
+/// The `Booter` loader firmware, responsible for loading the GSP.
+pub(crate) struct BooterFirmware {
+ // Load parameters for `IMEM` falcon memory.
+ imem_load_target: FalconLoadTarget,
+ // Load parameters for `DMEM` falcon memory.
+ dmem_load_target: FalconLoadTarget,
+ // BROM falcon parameters.
+ brom_params: FalconBromParams,
+ // Device-mapped firmware image.
+ ucode: FirmwareDmaObject<Self, Signed>,
+}
+
+impl FirmwareDmaObject<BooterFirmware, Unsigned> {
+ fn new_booter(dev: &device::Device<device::Bound>, data: &[u8]) -> Result<Self> {
+ DmaObject::from_data(dev, data).map(|ucode| Self(ucode, PhantomData))
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub(crate) enum BooterKind {
+ Loader,
+ #[expect(unused)]
+ Unloader,
+}
+
+impl BooterFirmware {
+ /// Parses the Booter firmware contained in `fw`, and patches the correct signature so it is
+ /// ready to be loaded and run on `falcon`.
+ pub(crate) fn new(
+ dev: &device::Device<device::Bound>,
+ kind: BooterKind,
+ chipset: Chipset,
+ ver: &str,
+ falcon: &Falcon<<Self as FalconFirmware>::Target>,
+ bar: &Bar0,
+ ) -> Result<Self> {
+ let fw_name = match kind {
+ BooterKind::Loader => "booter_load",
+ BooterKind::Unloader => "booter_unload",
+ };
+ let fw = super::request_firmware(dev, chipset, fw_name, ver)?;
+ let bin_fw = BinFirmware::new(&fw)?;
+
+ // The binary firmware embeds a Heavy-Secured firmware.
+ let hs_fw = HsFirmwareV2::new(&bin_fw)?;
+
+ // The Heavy-Secured firmware embeds a firmware load descriptor.
+ let load_hdr = HsLoadHeaderV2::new(&hs_fw)?;
+
+ // Offset in `ucode` where to patch the signature.
+ let patch_loc = hs_fw.patch_location()?;
+
+ let sig_params = HsSignatureParams::new(&hs_fw)?;
+ let brom_params = FalconBromParams {
+ // `load_hdr.os_data_offset` is an absolute index, but `pkc_data_offset` is from the
+ // signature patch location.
+ pkc_data_offset: patch_loc
+ .checked_sub(load_hdr.os_data_offset)
+ .ok_or(EINVAL)?,
+ engine_id_mask: u16::try_from(sig_params.engine_id_mask).map_err(|_| EINVAL)?,
+ ucode_id: u8::try_from(sig_params.ucode_id).map_err(|_| EINVAL)?,
+ };
+ let app0 = HsLoadHeaderV2App::new(&hs_fw, 0)?;
+
+ // Object containing the firmware microcode to be signature-patched.
+ let ucode = bin_fw
+ .data()
+ .ok_or(EINVAL)
+ .and_then(|data| FirmwareDmaObject::<Self, _>::new_booter(dev, data))?;
+
+ let ucode_signed = {
+ let mut signatures = hs_fw.signatures_iter()?.peekable();
+
+ if signatures.peek().is_none() {
+ // If there are no signatures, then the firmware is unsigned.
+ ucode.no_patch_signature()
+ } else {
+ // Obtain the version from the fuse register, and extract the corresponding
+ // signature.
+ let reg_fuse_version = falcon.signature_reg_fuse_version(
+ bar,
+ brom_params.engine_id_mask,
+ brom_params.ucode_id,
+ )?;
+
+ // `0` means the last signature should be used.
+ const FUSE_VERSION_USE_LAST_SIG: u32 = 0;
+ let signature = match reg_fuse_version {
+ FUSE_VERSION_USE_LAST_SIG => signatures.last(),
+ // Otherwise hardware fuse version needs to be subtracted to obtain the index.
+ reg_fuse_version => {
+ let Some(idx) = sig_params.fuse_ver.checked_sub(reg_fuse_version) else {
+ dev_err!(dev, "invalid fuse version for Booter firmware\n");
+ return Err(EINVAL);
+ };
+ signatures.nth(idx.into_safe_cast())
+ }
+ }
+ .ok_or(EINVAL)?;
+
+ ucode.patch_signature(&signature, patch_loc.into_safe_cast())?
+ }
+ };
+
+ Ok(Self {
+ imem_load_target: FalconLoadTarget {
+ src_start: app0.offset,
+ dst_start: 0,
+ len: app0.len,
+ },
+ dmem_load_target: FalconLoadTarget {
+ src_start: load_hdr.os_data_offset,
+ dst_start: 0,
+ len: load_hdr.os_data_size,
+ },
+ brom_params,
+ ucode: ucode_signed,
+ })
+ }
+}
+
+impl FalconLoadParams for BooterFirmware {
+ fn imem_load_params(&self) -> FalconLoadTarget {
+ self.imem_load_target.clone()
+ }
+
+ fn dmem_load_params(&self) -> FalconLoadTarget {
+ self.dmem_load_target.clone()
+ }
+
+ fn brom_params(&self) -> FalconBromParams {
+ self.brom_params.clone()
+ }
+
+ fn boot_addr(&self) -> u32 {
+ self.imem_load_target.src_start
+ }
+}
+
+impl Deref for BooterFirmware {
+ type Target = DmaObject;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ucode.0
+ }
+}
+
+impl FalconFirmware for BooterFirmware {
+ type Target = Sec2;
+}
diff --git a/drivers/gpu/nova-core/firmware/fwsec.rs b/drivers/gpu/nova-core/firmware/fwsec.rs
index 0dff3cfa90af..b28e34d279f4 100644
--- a/drivers/gpu/nova-core/firmware/fwsec.rs
+++ b/drivers/gpu/nova-core/firmware/fwsec.rs
@@ -10,20 +10,48 @@
//! - The command to be run, as this firmware can perform several tasks ;
//! - The ucode signature, so the GSP falcon can run FWSEC in HS mode.
-use core::marker::PhantomData;
-use core::mem::{align_of, size_of};
-use core::ops::Deref;
-
-use kernel::device::{self, Device};
-use kernel::prelude::*;
-use kernel::transmute::FromBytes;
-
-use crate::dma::DmaObject;
-use crate::driver::Bar0;
-use crate::falcon::gsp::Gsp;
-use crate::falcon::{Falcon, FalconBromParams, FalconFirmware, FalconLoadParams, FalconLoadTarget};
-use crate::firmware::{FalconUCodeDescV3, FirmwareDmaObject, FirmwareSignature, Signed, Unsigned};
-use crate::vbios::Vbios;
+use core::{
+ marker::PhantomData,
+ mem::size_of,
+ ops::Deref, //
+};
+
+use kernel::{
+ device::{
+ self,
+ Device, //
+ },
+ prelude::*,
+ transmute::{
+ AsBytes,
+ FromBytes, //
+ },
+};
+
+use crate::{
+ dma::DmaObject,
+ driver::Bar0,
+ falcon::{
+ gsp::Gsp,
+ Falcon,
+ FalconBromParams,
+ FalconFirmware,
+ FalconLoadParams,
+ FalconLoadTarget, //
+ },
+ firmware::{
+ FalconUCodeDescV3,
+ FirmwareDmaObject,
+ FirmwareSignature,
+ Signed,
+ Unsigned, //
+ },
+ num::{
+ FromSafeCast,
+ IntoSafeCast, //
+ },
+ vbios::Vbios,
+};
const NVFW_FALCON_APPIF_ID_DMEMMAPPER: u32 = 0x4;
@@ -35,7 +63,7 @@ struct FalconAppifHdrV1 {
entry_size: u8,
entry_count: u8,
}
-// SAFETY: any byte sequence is valid for this struct.
+// SAFETY: Any byte sequence is valid for this struct.
unsafe impl FromBytes for FalconAppifHdrV1 {}
#[repr(C, packed)]
@@ -44,7 +72,7 @@ struct FalconAppifV1 {
id: u32,
dmem_base: u32,
}
-// SAFETY: any byte sequence is valid for this struct.
+// SAFETY: Any byte sequence is valid for this struct.
unsafe impl FromBytes for FalconAppifV1 {}
#[derive(Debug)]
@@ -68,8 +96,10 @@ struct FalconAppifDmemmapperV3 {
ucode_cmd_mask1: u32,
multi_tgt_tbl: u32,
}
-// SAFETY: any byte sequence is valid for this struct.
+// SAFETY: Any byte sequence is valid for this struct.
unsafe impl FromBytes for FalconAppifDmemmapperV3 {}
+// SAFETY: This struct doesn't contain uninitialized bytes and doesn't have interior mutability.
+unsafe impl AsBytes for FalconAppifDmemmapperV3 {}
#[derive(Debug)]
#[repr(C, packed)]
@@ -80,8 +110,10 @@ struct ReadVbios {
size: u32,
flags: u32,
}
-// SAFETY: any byte sequence is valid for this struct.
+// SAFETY: Any byte sequence is valid for this struct.
unsafe impl FromBytes for ReadVbios {}
+// SAFETY: This struct doesn't contain uninitialized bytes and doesn't have interior mutability.
+unsafe impl AsBytes for ReadVbios {}
#[derive(Debug)]
#[repr(C, packed)]
@@ -92,8 +124,10 @@ struct FrtsRegion {
size: u32,
ftype: u32,
}
-// SAFETY: any byte sequence is valid for this struct.
+// SAFETY: Any byte sequence is valid for this struct.
unsafe impl FromBytes for FrtsRegion {}
+// SAFETY: This struct doesn't contain uninitialized bytes and doesn't have interior mutability.
+unsafe impl AsBytes for FrtsRegion {}
const NVFW_FRTS_CMD_REGION_TYPE_FB: u32 = 2;
@@ -102,8 +136,10 @@ struct FrtsCmd {
read_vbios: ReadVbios,
frts_region: FrtsRegion,
}
-// SAFETY: any byte sequence is valid for this struct.
+// SAFETY: Any byte sequence is valid for this struct.
unsafe impl FromBytes for FrtsCmd {}
+// SAFETY: This struct doesn't contain uninitialized bytes and doesn't have interior mutability.
+unsafe impl AsBytes for FrtsCmd {}
const NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS: u32 = 0x15;
const NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB: u32 = 0x19;
@@ -147,26 +183,15 @@ impl FirmwareSignature<FwsecFirmware> for Bcrt30Rsa3kSignature {}
///
/// # Safety
///
-/// Callers must ensure that the region of memory returned is not written for as long as the
-/// returned reference is alive.
-///
-/// TODO[TRSM][COHA]: Remove this and `transmute_mut` once `CoherentAllocation::as_slice` is
-/// available and we have a way to transmute objects implementing FromBytes, e.g.:
-/// https://lore.kernel.org/lkml/20250330234039.29814-1-christiansantoslima21@gmail.com/
-unsafe fn transmute<'a, 'b, T: Sized + FromBytes>(
- fw: &'a DmaObject,
- offset: usize,
-) -> Result<&'b T> {
- if offset + size_of::<T>() > fw.size() {
- return Err(EINVAL);
- }
- if (fw.start_ptr() as usize + offset) % align_of::<T>() != 0 {
- return Err(EINVAL);
- }
-
- // SAFETY: we have checked that the pointer is properly aligned that its pointed memory is
- // large enough the contains an instance of `T`, which implements `FromBytes`.
- Ok(unsafe { &*(fw.start_ptr().add(offset).cast::<T>()) })
+/// * Callers must ensure that the device does not read/write to/from memory while the returned
+/// reference is live.
+/// * Callers must ensure that this call does not race with a write to the same region while
+/// the returned reference is live.
+unsafe fn transmute<T: Sized + FromBytes>(fw: &DmaObject, offset: usize) -> Result<&T> {
+ // SAFETY: The safety requirements of the function guarantee the device won't read
+ // or write to memory while the reference is alive and that this call won't race
+ // with writes to the same memory region.
+ T::from_bytes(unsafe { fw.as_slice(offset, size_of::<T>())? }).ok_or(EINVAL)
}
/// Reinterpret the area starting from `offset` in `fw` as a mutable instance of `T` (which must
@@ -174,22 +199,18 @@ unsafe fn transmute<'a, 'b, T: Sized + FromBytes>(
///
/// # Safety
///
-/// Callers must ensure that the region of memory returned is not read or written for as long as
-/// the returned reference is alive.
-unsafe fn transmute_mut<'a, 'b, T: Sized + FromBytes>(
- fw: &'a mut DmaObject,
+/// * Callers must ensure that the device does not read/write to/from memory while the returned
+/// slice is live.
+/// * Callers must ensure that this call does not race with a read or write to the same region
+/// while the returned slice is live.
+unsafe fn transmute_mut<T: Sized + FromBytes + AsBytes>(
+ fw: &mut DmaObject,
offset: usize,
-) -> Result<&'b mut T> {
- if offset + size_of::<T>() > fw.size() {
- return Err(EINVAL);
- }
- if (fw.start_ptr_mut() as usize + offset) % align_of::<T>() != 0 {
- return Err(EINVAL);
- }
-
- // SAFETY: we have checked that the pointer is properly aligned that its pointed memory is
- // large enough the contains an instance of `T`, which implements `FromBytes`.
- Ok(unsafe { &mut *(fw.start_ptr_mut().add(offset).cast::<T>()) })
+) -> Result<&mut T> {
+ // SAFETY: The safety requirements of the function guarantee the device won't read
+ // or write to memory while the reference is alive and that this call won't race
+ // with writes or reads to the same memory region.
+ T::from_bytes_mut(unsafe { fw.as_slice_mut(offset, size_of::<T>())? }).ok_or(EINVAL)
}
/// The FWSEC microcode, extracted from the BIOS and to be run on the GSP falcon.
@@ -202,9 +223,6 @@ pub(crate) struct FwsecFirmware {
ucode: FirmwareDmaObject<Self, Signed>,
}
-// We need to load full DMEM pages.
-const DMEM_LOAD_SIZE_ALIGN: u32 = 256;
-
impl FalconLoadParams for FwsecFirmware {
fn imem_load_params(&self) -> FalconLoadTarget {
FalconLoadTarget {
@@ -218,11 +236,7 @@ impl FalconLoadParams for FwsecFirmware {
FalconLoadTarget {
src_start: self.desc.imem_load_size,
dst_start: self.desc.dmem_phys_base,
- // TODO[NUMM]: replace with `align_up` once it lands.
- len: self
- .desc
- .dmem_load_size
- .next_multiple_of(DMEM_LOAD_SIZE_ALIGN),
+ len: self.desc.dmem_load_size,
}
}
@@ -253,11 +267,11 @@ impl FalconFirmware for FwsecFirmware {
impl FirmwareDmaObject<FwsecFirmware, Unsigned> {
fn new_fwsec(dev: &Device<device::Bound>, bios: &Vbios, cmd: FwsecCommand) -> Result<Self> {
- let desc = bios.fwsec_image().header(dev)?;
- let ucode = bios.fwsec_image().ucode(dev, desc)?;
+ let desc = bios.fwsec_image().header()?;
+ let ucode = bios.fwsec_image().ucode(desc)?;
let mut dma_object = DmaObject::from_data(dev, ucode)?;
- let hdr_offset = (desc.imem_load_size + desc.interface_offset) as usize;
+ let hdr_offset = usize::from_safe_cast(desc.imem_load_size + desc.interface_offset);
// SAFETY: we have exclusive access to `dma_object`.
let hdr: &FalconAppifHdrV1 = unsafe { transmute(&dma_object, hdr_offset) }?;
@@ -266,61 +280,62 @@ impl FirmwareDmaObject<FwsecFirmware, Unsigned> {
}
// Find the DMEM mapper section in the firmware.
- for i in 0..hdr.entry_count as usize {
- let app: &FalconAppifV1 =
+ for i in 0..usize::from(hdr.entry_count) {
// SAFETY: we have exclusive access to `dma_object`.
- unsafe {
+ let app: &FalconAppifV1 = unsafe {
transmute(
&dma_object,
- hdr_offset + hdr.header_size as usize + i * hdr.entry_size as usize
+ hdr_offset + usize::from(hdr.header_size) + i * usize::from(hdr.entry_size),
)
}?;
if app.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER {
continue;
}
+ let dmem_base = app.dmem_base;
// SAFETY: we have exclusive access to `dma_object`.
let dmem_mapper: &mut FalconAppifDmemmapperV3 = unsafe {
transmute_mut(
&mut dma_object,
- (desc.imem_load_size + app.dmem_base) as usize,
+ (desc.imem_load_size + dmem_base).into_safe_cast(),
)
}?;
+ dmem_mapper.init_cmd = match cmd {
+ FwsecCommand::Frts { .. } => NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS,
+ FwsecCommand::Sb => NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB,
+ };
+ let cmd_in_buffer_offset = dmem_mapper.cmd_in_buffer_offset;
+
// SAFETY: we have exclusive access to `dma_object`.
let frts_cmd: &mut FrtsCmd = unsafe {
transmute_mut(
&mut dma_object,
- (desc.imem_load_size + dmem_mapper.cmd_in_buffer_offset) as usize,
+ (desc.imem_load_size + cmd_in_buffer_offset).into_safe_cast(),
)
}?;
frts_cmd.read_vbios = ReadVbios {
ver: 1,
- hdr: size_of::<ReadVbios>() as u32,
+ hdr: u32::try_from(size_of::<ReadVbios>())?,
addr: 0,
size: 0,
flags: 2,
};
-
- dmem_mapper.init_cmd = match cmd {
- FwsecCommand::Frts {
- frts_addr,
- frts_size,
- } => {
- frts_cmd.frts_region = FrtsRegion {
- ver: 1,
- hdr: size_of::<FrtsRegion>() as u32,
- addr: (frts_addr >> 12) as u32,
- size: (frts_size >> 12) as u32,
- ftype: NVFW_FRTS_CMD_REGION_TYPE_FB,
- };
-
- NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS
- }
- FwsecCommand::Sb => NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB,
- };
+ if let FwsecCommand::Frts {
+ frts_addr,
+ frts_size,
+ } = cmd
+ {
+ frts_cmd.frts_region = FrtsRegion {
+ ver: 1,
+ hdr: u32::try_from(size_of::<FrtsRegion>())?,
+ addr: u32::try_from(frts_addr >> 12)?,
+ size: u32::try_from(frts_size >> 12)?,
+ ftype: NVFW_FRTS_CMD_REGION_TYPE_FB,
+ };
+ }
// Return early as we found and patched the DMEMMAPPER region.
return Ok(Self(dma_object, PhantomData));
@@ -343,9 +358,9 @@ impl FwsecFirmware {
let ucode_dma = FirmwareDmaObject::<Self, _>::new_fwsec(dev, bios, cmd)?;
// Patch signature if needed.
- let desc = bios.fwsec_image().header(dev)?;
+ let desc = bios.fwsec_image().header()?;
let ucode_signed = if desc.signature_count != 0 {
- let sig_base_img = (desc.imem_load_size + desc.pkc_data_offset) as usize;
+ let sig_base_img = usize::from_safe_cast(desc.imem_load_size + desc.pkc_data_offset);
let desc_sig_versions = u32::from(desc.signature_versions);
let reg_fuse_version =
falcon.signature_reg_fuse_version(bar, desc.engine_id_mask, desc.ucode_id)?;
@@ -376,13 +391,13 @@ impl FwsecFirmware {
// Mask of the bits of `desc_sig_versions` to preserve.
let reg_fuse_version_mask = reg_fuse_version_bit.wrapping_sub(1);
- (desc_sig_versions & reg_fuse_version_mask).count_ones() as usize
+ usize::from_safe_cast((desc_sig_versions & reg_fuse_version_mask).count_ones())
};
dev_dbg!(dev, "patching signature with index {}\n", signature_idx);
let signature = bios
.fwsec_image()
- .sigs(dev, desc)
+ .sigs(desc)
.and_then(|sigs| sigs.get(signature_idx).ok_or(EINVAL))?;
ucode_dma.patch_signature(signature, sig_base_img)?
diff --git a/drivers/gpu/nova-core/firmware/gsp.rs b/drivers/gpu/nova-core/firmware/gsp.rs
new file mode 100644
index 000000000000..0549805282ab
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/gsp.rs
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::mem::size_of_val;
+
+use kernel::{
+ device,
+ dma::{
+ DataDirection,
+ DmaAddress, //
+ },
+ kvec,
+ prelude::*,
+ scatterlist::{
+ Owned,
+ SGTable, //
+ },
+};
+
+use crate::{
+ dma::DmaObject,
+ firmware::riscv::RiscvFirmware,
+ gpu::{
+ Architecture,
+ Chipset, //
+ },
+ gsp::GSP_PAGE_SIZE,
+ num::FromSafeCast,
+};
+
+/// Ad-hoc and temporary module to extract sections from ELF images.
+///
+/// Some firmware images are currently packaged as ELF files, where sections names are used as keys
+/// to specific and related bits of data. Future firmware versions are scheduled to move away from
+/// that scheme before nova-core becomes stable, which means this module will eventually be
+/// removed.
+mod elf {
+ use core::mem::size_of;
+
+ use kernel::bindings;
+ use kernel::str::CStr;
+ use kernel::transmute::FromBytes;
+
+ /// Newtype to provide a [`FromBytes`] implementation.
+ #[repr(transparent)]
+ struct Elf64Hdr(bindings::elf64_hdr);
+ // SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+ unsafe impl FromBytes for Elf64Hdr {}
+
+ #[repr(transparent)]
+ struct Elf64SHdr(bindings::elf64_shdr);
+ // SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+ unsafe impl FromBytes for Elf64SHdr {}
+
+ /// Tries to extract section with name `name` from the ELF64 image `elf`, and returns it.
+ pub(super) fn elf64_section<'a, 'b>(elf: &'a [u8], name: &'b str) -> Option<&'a [u8]> {
+ let hdr = &elf
+ .get(0..size_of::<bindings::elf64_hdr>())
+ .and_then(Elf64Hdr::from_bytes)?
+ .0;
+
+ // Get all the section headers.
+ let mut shdr = {
+ let shdr_num = usize::from(hdr.e_shnum);
+ let shdr_start = usize::try_from(hdr.e_shoff).ok()?;
+ let shdr_end = shdr_num
+ .checked_mul(size_of::<Elf64SHdr>())
+ .and_then(|v| v.checked_add(shdr_start))?;
+
+ elf.get(shdr_start..shdr_end)
+ .map(|slice| slice.chunks_exact(size_of::<Elf64SHdr>()))?
+ };
+
+ // Get the strings table.
+ let strhdr = shdr
+ .clone()
+ .nth(usize::from(hdr.e_shstrndx))
+ .and_then(Elf64SHdr::from_bytes)?;
+
+ // Find the section which name matches `name` and return it.
+ shdr.find(|&sh| {
+ let Some(hdr) = Elf64SHdr::from_bytes(sh) else {
+ return false;
+ };
+
+ let Some(name_idx) = strhdr
+ .0
+ .sh_offset
+ .checked_add(u64::from(hdr.0.sh_name))
+ .and_then(|idx| usize::try_from(idx).ok())
+ else {
+ return false;
+ };
+
+ // Get the start of the name.
+ elf.get(name_idx..)
+ // Stop at the first `0`.
+ .and_then(|nstr| nstr.get(0..=nstr.iter().position(|b| *b == 0)?))
+ // Convert into CStr. This should never fail because of the line above.
+ .and_then(|nstr| CStr::from_bytes_with_nul(nstr).ok())
+ // Convert into str.
+ .and_then(|c_str| c_str.to_str().ok())
+ // Check that the name matches.
+ .map(|str| str == name)
+ .unwrap_or(false)
+ })
+ // Return the slice containing the section.
+ .and_then(|sh| {
+ let hdr = Elf64SHdr::from_bytes(sh)?;
+ let start = usize::try_from(hdr.0.sh_offset).ok()?;
+ let end = usize::try_from(hdr.0.sh_size)
+ .ok()
+ .and_then(|sh_size| start.checked_add(sh_size))?;
+
+ elf.get(start..end)
+ })
+ }
+}
+
+/// GSP firmware with 3-level radix page tables for the GSP bootloader.
+///
+/// The bootloader expects firmware to be mapped starting at address 0 in GSP's virtual address
+/// space:
+///
+/// ```text
+/// Level 0: 1 page, 1 entry -> points to first level 1 page
+/// Level 1: Multiple pages/entries -> each entry points to a level 2 page
+/// Level 2: Multiple pages/entries -> each entry points to a firmware page
+/// ```
+///
+/// Each page is 4KB, each entry is 8 bytes (64-bit DMA address).
+/// Also known as "Radix3" firmware.
+#[pin_data]
+pub(crate) struct GspFirmware {
+ /// The GSP firmware inside a [`VVec`], device-mapped via a SG table.
+ #[pin]
+ fw: SGTable<Owned<VVec<u8>>>,
+ /// Level 2 page table whose entries contain DMA addresses of firmware pages.
+ #[pin]
+ level2: SGTable<Owned<VVec<u8>>>,
+ /// Level 1 page table whose entries contain DMA addresses of level 2 pages.
+ #[pin]
+ level1: SGTable<Owned<VVec<u8>>>,
+ /// Level 0 page table (single 4KB page) with one entry: DMA address of first level 1 page.
+ level0: DmaObject,
+ /// Size in bytes of the firmware contained in [`Self::fw`].
+ pub(crate) size: usize,
+ /// Device-mapped GSP signatures matching the GPU's [`Chipset`].
+ pub(crate) signatures: DmaObject,
+ /// GSP bootloader, verifies the GSP firmware before loading and running it.
+ pub(crate) bootloader: RiscvFirmware,
+}
+
+impl GspFirmware {
+ /// Loads the GSP firmware binaries, map them into `dev`'s address-space, and creates the page
+ /// tables expected by the GSP bootloader to load it.
+ pub(crate) fn new<'a, 'b>(
+ dev: &'a device::Device<device::Bound>,
+ chipset: Chipset,
+ ver: &'b str,
+ ) -> Result<impl PinInit<Self, Error> + 'a> {
+ let fw = super::request_firmware(dev, chipset, "gsp", ver)?;
+
+ let fw_section = elf::elf64_section(fw.data(), ".fwimage").ok_or(EINVAL)?;
+
+ let sigs_section = match chipset.arch() {
+ Architecture::Ampere => ".fwsignature_ga10x",
+ Architecture::Ada => ".fwsignature_ad10x",
+ _ => return Err(ENOTSUPP),
+ };
+ let signatures = elf::elf64_section(fw.data(), sigs_section)
+ .ok_or(EINVAL)
+ .and_then(|data| DmaObject::from_data(dev, data))?;
+
+ let size = fw_section.len();
+
+ // Move the firmware into a vmalloc'd vector and map it into the device address
+ // space.
+ let fw_vvec = VVec::with_capacity(fw_section.len(), GFP_KERNEL)
+ .and_then(|mut v| {
+ v.extend_from_slice(fw_section, GFP_KERNEL)?;
+ Ok(v)
+ })
+ .map_err(|_| ENOMEM)?;
+
+ let bl = super::request_firmware(dev, chipset, "bootloader", ver)?;
+ let bootloader = RiscvFirmware::new(dev, &bl)?;
+
+ Ok(try_pin_init!(Self {
+ fw <- SGTable::new(dev, fw_vvec, DataDirection::ToDevice, GFP_KERNEL),
+ level2 <- {
+ // Allocate the level 2 page table, map the firmware onto it, and map it into the
+ // device address space.
+ VVec::<u8>::with_capacity(
+ fw.iter().count() * core::mem::size_of::<u64>(),
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)
+ .and_then(|level2| map_into_lvl(&fw, level2))
+ .map(|level2| SGTable::new(dev, level2, DataDirection::ToDevice, GFP_KERNEL))?
+ },
+ level1 <- {
+ // Allocate the level 1 page table, map the level 2 page table onto it, and map it
+ // into the device address space.
+ VVec::<u8>::with_capacity(
+ level2.iter().count() * core::mem::size_of::<u64>(),
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)
+ .and_then(|level1| map_into_lvl(&level2, level1))
+ .map(|level1| SGTable::new(dev, level1, DataDirection::ToDevice, GFP_KERNEL))?
+ },
+ level0: {
+ // Allocate the level 0 page table as a device-visible DMA object, and map the
+ // level 1 page table onto it.
+
+ // Level 0 page table data.
+ let mut level0_data = kvec![0u8; GSP_PAGE_SIZE]?;
+
+ // Fill level 1 page entry.
+ let level1_entry = level1.iter().next().ok_or(EINVAL)?;
+ let level1_entry_addr = level1_entry.dma_address();
+ let dst = &mut level0_data[..size_of_val(&level1_entry_addr)];
+ dst.copy_from_slice(&level1_entry_addr.to_le_bytes());
+
+ // Turn the level0 page table into a [`DmaObject`].
+ DmaObject::from_data(dev, &level0_data)?
+ },
+ size,
+ signatures,
+ bootloader,
+ }))
+ }
+
+ /// Returns the DMA handle of the radix3 level 0 page table.
+ pub(crate) fn radix3_dma_handle(&self) -> DmaAddress {
+ self.level0.dma_handle()
+ }
+}
+
+/// Build a page table from a scatter-gather list.
+///
+/// Takes each DMA-mapped region from `sg_table` and writes page table entries
+/// for all 4KB pages within that region. For example, a 16KB SG entry becomes
+/// 4 consecutive page table entries.
+fn map_into_lvl(sg_table: &SGTable<Owned<VVec<u8>>>, mut dst: VVec<u8>) -> Result<VVec<u8>> {
+ for sg_entry in sg_table.iter() {
+ // Number of pages we need to map.
+ let num_pages = usize::from_safe_cast(sg_entry.dma_len()).div_ceil(GSP_PAGE_SIZE);
+
+ for i in 0..num_pages {
+ let entry = sg_entry.dma_address()
+ + (u64::from_safe_cast(i) * u64::from_safe_cast(GSP_PAGE_SIZE));
+ dst.extend_from_slice(&entry.to_le_bytes(), GFP_KERNEL)?;
+ }
+ }
+
+ Ok(dst)
+}
diff --git a/drivers/gpu/nova-core/firmware/riscv.rs b/drivers/gpu/nova-core/firmware/riscv.rs
new file mode 100644
index 000000000000..28dfef63657a
--- /dev/null
+++ b/drivers/gpu/nova-core/firmware/riscv.rs
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Support for firmware binaries designed to run on a RISC-V core. Such firmwares files have a
+//! dedicated header.
+
+use core::mem::size_of;
+
+use kernel::{
+ device,
+ firmware::Firmware,
+ prelude::*,
+ transmute::FromBytes, //
+};
+
+use crate::{
+ dma::DmaObject,
+ firmware::BinFirmware,
+ num::FromSafeCast, //
+};
+
+/// Descriptor for microcode running on a RISC-V core.
+#[repr(C)]
+#[derive(Debug)]
+struct RmRiscvUCodeDesc {
+ version: u32,
+ bootloader_offset: u32,
+ bootloader_size: u32,
+ bootloader_param_offset: u32,
+ bootloader_param_size: u32,
+ riscv_elf_offset: u32,
+ riscv_elf_size: u32,
+ app_version: u32,
+ manifest_offset: u32,
+ manifest_size: u32,
+ monitor_data_offset: u32,
+ monitor_data_size: u32,
+ monitor_code_offset: u32,
+ monitor_code_size: u32,
+}
+
+// SAFETY: all bit patterns are valid for this type, and it doesn't use interior mutability.
+unsafe impl FromBytes for RmRiscvUCodeDesc {}
+
+impl RmRiscvUCodeDesc {
+ /// Interprets the header of `bin_fw` as a [`RmRiscvUCodeDesc`] and returns it.
+ ///
+ /// Fails if the header pointed at by `bin_fw` is not within the bounds of the firmware image.
+ fn new(bin_fw: &BinFirmware<'_>) -> Result<Self> {
+ let offset = usize::from_safe_cast(bin_fw.hdr.header_offset);
+
+ bin_fw
+ .fw
+ .get(offset..offset + size_of::<Self>())
+ .and_then(Self::from_bytes_copy)
+ .ok_or(EINVAL)
+ }
+}
+
+/// A parsed firmware for a RISC-V core, ready to be loaded and run.
+pub(crate) struct RiscvFirmware {
+ /// Offset at which the code starts in the firmware image.
+ pub(crate) code_offset: u32,
+ /// Offset at which the data starts in the firmware image.
+ pub(crate) data_offset: u32,
+ /// Offset at which the manifest starts in the firmware image.
+ pub(crate) manifest_offset: u32,
+ /// Application version.
+ pub(crate) app_version: u32,
+ /// Device-mapped firmware image.
+ pub(crate) ucode: DmaObject,
+}
+
+impl RiscvFirmware {
+ /// Parses the RISC-V firmware image contained in `fw`.
+ pub(crate) fn new(dev: &device::Device<device::Bound>, fw: &Firmware) -> Result<Self> {
+ let bin_fw = BinFirmware::new(fw)?;
+
+ let riscv_desc = RmRiscvUCodeDesc::new(&bin_fw)?;
+
+ let ucode = {
+ let start = usize::from_safe_cast(bin_fw.hdr.data_offset);
+ let len = usize::from_safe_cast(bin_fw.hdr.data_size);
+
+ DmaObject::from_data(dev, fw.data().get(start..start + len).ok_or(EINVAL)?)?
+ };
+
+ Ok(Self {
+ ucode,
+ code_offset: riscv_desc.monitor_code_offset,
+ data_offset: riscv_desc.monitor_data_offset,
+ manifest_offset: riscv_desc.manifest_offset,
+ app_version: riscv_desc.app_version,
+ })
+ }
+}
diff --git a/drivers/gpu/nova-core/gfw.rs b/drivers/gpu/nova-core/gfw.rs
index 8ac1ed187199..9121f400046d 100644
--- a/drivers/gpu/nova-core/gfw.rs
+++ b/drivers/gpu/nova-core/gfw.rs
@@ -18,13 +18,16 @@
//!
//! Note that the devinit sequence also needs to run during suspend/resume.
-use kernel::bindings;
-use kernel::prelude::*;
-use kernel::time::Delta;
+use kernel::{
+ io::poll::read_poll_timeout,
+ prelude::*,
+ time::Delta, //
+};
-use crate::driver::Bar0;
-use crate::regs;
-use crate::util;
+use crate::{
+ driver::Bar0,
+ regs, //
+};
/// Wait for the `GFW` (GPU firmware) boot completion signal (`GFW_BOOT`), or a 4 seconds timeout.
///
@@ -50,22 +53,19 @@ pub(crate) fn wait_gfw_boot_completion(bar: &Bar0) -> Result {
//
// TIMEOUT: arbitrarily large value. GFW starts running immediately after the GPU is put out of
// reset, and should complete in less time than that.
- util::wait_on(Delta::from_secs(4), || {
- // Check that FWSEC has lowered its protection level before reading the GFW_BOOT status.
- let gfw_booted = regs::NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK::read(bar)
- .read_protection_level0()
- && regs::NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT::read(bar).completed();
-
- if gfw_booted {
- Some(())
- } else {
- // TODO[DLAY]: replace with [1] once it merges.
- // [1] https://lore.kernel.org/rust-for-linux/20250423192857.199712-6-fujita.tomonori@gmail.com/
- //
- // SAFETY: `msleep()` is safe to call with any parameter.
- unsafe { bindings::msleep(1) };
-
- None
- }
- })
+ read_poll_timeout(
+ || {
+ Ok(
+ // Check that FWSEC has lowered its protection level before reading the GFW_BOOT
+ // status.
+ regs::NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK::read(bar)
+ .read_protection_level0()
+ && regs::NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT::read(bar).completed(),
+ )
+ },
+ |&gfw_booted| gfw_booted,
+ Delta::from_millis(1),
+ Delta::from_secs(4),
+ )
+ .map(|_| ())
}
diff --git a/drivers/gpu/nova-core/gpu.rs b/drivers/gpu/nova-core/gpu.rs
index b5c9786619a9..629c9d2dc994 100644
--- a/drivers/gpu/nova-core/gpu.rs
+++ b/drivers/gpu/nova-core/gpu.rs
@@ -1,18 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::{device, devres::Devres, error::code::*, pci, prelude::*, sync::Arc};
-
-use crate::driver::Bar0;
-use crate::falcon::{gsp::Gsp, sec2::Sec2, Falcon};
-use crate::fb::FbLayout;
-use crate::fb::SysmemFlush;
-use crate::firmware::fwsec::{FwsecCommand, FwsecFirmware};
-use crate::firmware::{Firmware, FIRMWARE_VERSION};
-use crate::gfw;
-use crate::regs;
-use crate::util;
-use crate::vbios::Vbios;
-use core::fmt;
+use kernel::{
+ device,
+ devres::Devres,
+ fmt,
+ pci,
+ prelude::*,
+ sync::Arc, //
+};
+
+use crate::{
+ driver::Bar0,
+ falcon::{
+ gsp::Gsp as GspFalcon,
+ sec2::Sec2 as Sec2Falcon,
+ Falcon, //
+ },
+ fb::SysmemFlush,
+ gfw,
+ gsp::Gsp,
+ regs,
+};
macro_rules! define_chipset {
({ $($variant:ident = $value:expr),* $(,)* }) =>
@@ -28,13 +36,23 @@ macro_rules! define_chipset {
$( Chipset::$variant, )*
];
- pub(crate) const NAMES: [&'static str; Self::ALL.len()] = [
- $( util::const_bytes_to_str(
- util::to_lowercase_bytes::<{ stringify!($variant).len() }>(
- stringify!($variant)
- ).as_slice()
- ), )*
- ];
+ ::kernel::macros::paste!(
+ /// Returns the name of this chipset, in lowercase.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let chipset = Chipset::GA102;
+ /// assert_eq!(chipset.name(), "ga102");
+ /// ```
+ pub(crate) const fn name(&self) -> &'static str {
+ match *self {
+ $(
+ Chipset::$variant => stringify!([<$variant:lower>]),
+ )*
+ }
+ }
+ );
}
// TODO[FPRI]: replace with something like derive(FromPrimitive)
@@ -104,8 +122,14 @@ impl fmt::Display for Chipset {
}
/// Enum representation of the GPU generation.
-#[derive(fmt::Debug)]
+///
+/// TODO: remove the `Default` trait implementation, and the `#[default]`
+/// attribute, once the register!() macro (which creates Architecture items) no
+/// longer requires it for read-only fields.
+#[derive(fmt::Debug, Default, Copy, Clone)]
+#[repr(u8)]
pub(crate) enum Architecture {
+ #[default]
Turing = 0x16,
Ampere = 0x17,
Ada = 0x19,
@@ -124,13 +148,20 @@ impl TryFrom<u8> for Architecture {
}
}
+impl From<Architecture> for u8 {
+ fn from(value: Architecture) -> Self {
+ // CAST: `Architecture` is `repr(u8)`, so this cast is always lossless.
+ value as u8
+ }
+}
+
pub(crate) struct Revision {
major: u8,
minor: u8,
}
-impl Revision {
- fn from_boot0(boot0: regs::NV_PMC_BOOT_0) -> Self {
+impl From<regs::NV_PMC_BOOT_42> for Revision {
+ fn from(boot0: regs::NV_PMC_BOOT_42) -> Self {
Self {
major: boot0.major_revision(),
minor: boot0.minor_revision(),
@@ -144,169 +175,128 @@ impl fmt::Display for Revision {
}
}
-/// Structure holding the metadata of the GPU.
+/// Structure holding a basic description of the GPU: `Chipset` and `Revision`.
pub(crate) struct Spec {
chipset: Chipset,
- /// The revision of the chipset.
revision: Revision,
}
impl Spec {
- fn new(bar: &Bar0) -> Result<Spec> {
+ fn new(dev: &device::Device, bar: &Bar0) -> Result<Spec> {
+ // Some brief notes about boot0 and boot42, in chronological order:
+ //
+ // NV04 through NV50:
+ //
+ // Not supported by Nova. boot0 is necessary and sufficient to identify these GPUs.
+ // boot42 may not even exist on some of these GPUs.
+ //
+ // Fermi through Volta:
+ //
+ // Not supported by Nova. boot0 is still sufficient to identify these GPUs, but boot42
+ // is also guaranteed to be both present and accurate.
+ //
+ // Turing and later:
+ //
+ // Supported by Nova. Identified by first checking boot0 to ensure that the GPU is not
+ // from an earlier (pre-Fermi) era, and then using boot42 to precisely identify the GPU.
+ // Somewhere in the Rubin timeframe, boot0 will no longer have space to add new GPU IDs.
+
let boot0 = regs::NV_PMC_BOOT_0::read(bar);
+ if boot0.is_older_than_fermi() {
+ return Err(ENODEV);
+ }
+
+ let boot42 = regs::NV_PMC_BOOT_42::read(bar);
+ Spec::try_from(boot42).inspect_err(|_| {
+ dev_err!(dev, "Unsupported chipset: {}\n", boot42);
+ })
+ }
+}
+
+impl TryFrom<regs::NV_PMC_BOOT_42> for Spec {
+ type Error = Error;
+
+ fn try_from(boot42: regs::NV_PMC_BOOT_42) -> Result<Self> {
Ok(Self {
- chipset: boot0.chipset()?,
- revision: Revision::from_boot0(boot0),
+ chipset: boot42.chipset()?,
+ revision: boot42.into(),
})
}
}
+impl fmt::Display for Spec {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_fmt(fmt!(
+ "Chipset: {}, Architecture: {:?}, Revision: {}",
+ self.chipset,
+ self.chipset.arch(),
+ self.revision
+ ))
+ }
+}
+
/// Structure holding the resources required to operate the GPU.
-#[pin_data(PinnedDrop)]
+#[pin_data]
pub(crate) struct Gpu {
spec: Spec,
/// MMIO mapping of PCI BAR 0
bar: Arc<Devres<Bar0>>,
- fw: Firmware,
/// System memory page required for flushing all pending GPU-side memory writes done through
/// PCIE into system memory, via sysmembar (A GPU-initiated HW memory-barrier operation).
sysmem_flush: SysmemFlush,
-}
-
-#[pinned_drop]
-impl PinnedDrop for Gpu {
- fn drop(self: Pin<&mut Self>) {
- // Unregister the sysmem flush page before we release it.
- self.bar
- .try_access_with(|b| self.sysmem_flush.unregister(b));
- }
+ /// GSP falcon instance, used for GSP boot up and cleanup.
+ gsp_falcon: Falcon<GspFalcon>,
+ /// SEC2 falcon instance, used for GSP boot up and cleanup.
+ sec2_falcon: Falcon<Sec2Falcon>,
+ /// GSP runtime data. Temporarily an empty placeholder.
+ #[pin]
+ gsp: Gsp,
}
impl Gpu {
- /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly
- /// created the WPR2 region.
- ///
- /// TODO: this needs to be moved into a larger type responsible for booting the whole GSP
- /// (`GspBooter`?).
- fn run_fwsec_frts(
- dev: &device::Device<device::Bound>,
- falcon: &Falcon<Gsp>,
- bar: &Bar0,
- bios: &Vbios,
- fb_layout: &FbLayout,
- ) -> Result<()> {
- // Check that the WPR2 region does not already exists - if it does, we cannot run
- // FWSEC-FRTS until the GPU is reset.
- if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 {
- dev_err!(
- dev,
- "WPR2 region already exists - GPU needs to be reset to proceed\n"
- );
- return Err(EBUSY);
- }
-
- let fwsec_frts = FwsecFirmware::new(
- dev,
- falcon,
- bar,
- bios,
- FwsecCommand::Frts {
- frts_addr: fb_layout.frts.start,
- frts_size: fb_layout.frts.end - fb_layout.frts.start,
- },
- )?;
-
- // Run FWSEC-FRTS to create the WPR2 region.
- fwsec_frts.run(dev, falcon, bar)?;
-
- // SCRATCH_E contains the error code for FWSEC-FRTS.
- let frts_status = regs::NV_PBUS_SW_SCRATCH_0E::read(bar).frts_err_code();
- if frts_status != 0 {
- dev_err!(
- dev,
- "FWSEC-FRTS returned with error code {:#x}",
- frts_status
- );
-
- return Err(EIO);
- }
-
- // Check that the WPR2 region has been created as we requested.
- let (wpr2_lo, wpr2_hi) = (
- regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(),
- regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(),
- );
-
- match (wpr2_lo, wpr2_hi) {
- (_, 0) => {
- dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n");
-
- Err(EIO)
- }
- (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => {
- dev_err!(
- dev,
- "WPR2 region created at unexpected address {:#x}; expected {:#x}\n",
- wpr2_lo,
- fb_layout.frts.start,
- );
-
- Err(EIO)
- }
- (wpr2_lo, wpr2_hi) => {
- dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi);
- dev_dbg!(dev, "GPU instance built\n");
-
- Ok(())
- }
- }
- }
-
- pub(crate) fn new(
- pdev: &pci::Device<device::Bound>,
+ pub(crate) fn new<'a>(
+ pdev: &'a pci::Device<device::Bound>,
devres_bar: Arc<Devres<Bar0>>,
- ) -> Result<impl PinInit<Self>> {
- let bar = devres_bar.access(pdev.as_ref())?;
- let spec = Spec::new(bar)?;
- let fw = Firmware::new(pdev.as_ref(), spec.chipset, FIRMWARE_VERSION)?;
-
- dev_info!(
- pdev.as_ref(),
- "NVIDIA (Chipset: {}, Architecture: {:?}, Revision: {})\n",
- spec.chipset,
- spec.chipset.arch(),
- spec.revision
- );
-
- // We must wait for GFW_BOOT completion before doing any significant setup on the GPU.
- gfw::wait_gfw_boot_completion(bar)
- .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?;
-
- let sysmem_flush = SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?;
+ bar: &'a Bar0,
+ ) -> impl PinInit<Self, Error> + 'a {
+ try_pin_init!(Self {
+ spec: Spec::new(pdev.as_ref(), bar).inspect(|spec| {
+ dev_info!(pdev.as_ref(),"NVIDIA ({})\n", spec);
+ })?,
+
+ // We must wait for GFW_BOOT completion before doing any significant setup on the GPU.
+ _: {
+ gfw::wait_gfw_boot_completion(bar)
+ .inspect_err(|_| dev_err!(pdev.as_ref(), "GFW boot did not complete"))?;
+ },
- let gsp_falcon = Falcon::<Gsp>::new(
- pdev.as_ref(),
- spec.chipset,
- bar,
- spec.chipset > Chipset::GA100,
- )?;
- gsp_falcon.clear_swgen0_intr(bar);
+ sysmem_flush: SysmemFlush::register(pdev.as_ref(), bar, spec.chipset)?,
- let _sec2_falcon = Falcon::<Sec2>::new(pdev.as_ref(), spec.chipset, bar, true)?;
+ gsp_falcon: Falcon::new(
+ pdev.as_ref(),
+ spec.chipset,
+ )
+ .inspect(|falcon| falcon.clear_swgen0_intr(bar))?,
- let fb_layout = FbLayout::new(spec.chipset, bar)?;
- dev_dbg!(pdev.as_ref(), "{:#x?}\n", fb_layout);
+ sec2_falcon: Falcon::new(pdev.as_ref(), spec.chipset)?,
- let bios = Vbios::new(pdev, bar)?;
+ gsp <- Gsp::new(pdev)?,
- Self::run_fwsec_frts(pdev.as_ref(), &gsp_falcon, bar, &bios, &fb_layout)?;
+ _: { gsp.boot(pdev, bar, spec.chipset, gsp_falcon, sec2_falcon)? },
- Ok(pin_init!(Self {
- spec,
bar: devres_bar,
- fw,
- sysmem_flush,
- }))
+ })
+ }
+
+ /// Called when the corresponding [`Device`](device::Device) is unbound.
+ ///
+ /// Note: This method must only be called from `Driver::unbind`.
+ pub(crate) fn unbind(&self, dev: &device::Device<device::Core>) {
+ kernel::warn_on!(self
+ .bar
+ .access(dev)
+ .inspect(|bar| self.sysmem_flush.unregister(bar))
+ .is_err());
}
}
diff --git a/drivers/gpu/nova-core/gsp.rs b/drivers/gpu/nova-core/gsp.rs
new file mode 100644
index 000000000000..fb6f74797178
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp.rs
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+
+mod boot;
+
+use kernel::{
+ device,
+ dma::{
+ CoherentAllocation,
+ DmaAddress, //
+ },
+ dma_write,
+ pci,
+ prelude::*,
+ transmute::AsBytes, //
+};
+
+pub(crate) mod cmdq;
+pub(crate) mod commands;
+mod fw;
+mod sequencer;
+
+pub(crate) use fw::{
+ GspFwWprMeta,
+ LibosParams, //
+};
+
+use crate::{
+ gsp::cmdq::Cmdq,
+ gsp::fw::{
+ GspArgumentsCached,
+ LibosMemoryRegionInitArgument, //
+ },
+ num,
+};
+
+pub(crate) const GSP_PAGE_SHIFT: usize = 12;
+pub(crate) const GSP_PAGE_SIZE: usize = 1 << GSP_PAGE_SHIFT;
+
+/// Number of GSP pages to use in a RM log buffer.
+const RM_LOG_BUFFER_NUM_PAGES: usize = 0x10;
+
+/// Array of page table entries, as understood by the GSP bootloader.
+#[repr(C)]
+struct PteArray<const NUM_ENTRIES: usize>([u64; NUM_ENTRIES]);
+
+/// SAFETY: arrays of `u64` implement `AsBytes` and we are but a wrapper around one.
+unsafe impl<const NUM_ENTRIES: usize> AsBytes for PteArray<NUM_ENTRIES> {}
+
+impl<const NUM_PAGES: usize> PteArray<NUM_PAGES> {
+ /// Creates a new page table array mapping `NUM_PAGES` GSP pages starting at address `start`.
+ fn new(start: DmaAddress) -> Result<Self> {
+ let mut ptes = [0u64; NUM_PAGES];
+ for (i, pte) in ptes.iter_mut().enumerate() {
+ *pte = start
+ .checked_add(num::usize_as_u64(i) << GSP_PAGE_SHIFT)
+ .ok_or(EOVERFLOW)?;
+ }
+
+ Ok(Self(ptes))
+ }
+}
+
+/// The logging buffers are byte queues that contain encoded printf-like
+/// messages from GSP-RM. They need to be decoded by a special application
+/// that can parse the buffers.
+///
+/// The 'loginit' buffer contains logs from early GSP-RM init and
+/// exception dumps. The 'logrm' buffer contains the subsequent logs. Both are
+/// written to directly by GSP-RM and can be any multiple of GSP_PAGE_SIZE.
+///
+/// The physical address map for the log buffer is stored in the buffer
+/// itself, starting with offset 1. Offset 0 contains the "put" pointer (pp).
+/// Initially, pp is equal to 0. If the buffer has valid logging data in it,
+/// then pp points to index into the buffer where the next logging entry will
+/// be written. Therefore, the logging data is valid if:
+/// 1 <= pp < sizeof(buffer)/sizeof(u64)
+struct LogBuffer(CoherentAllocation<u8>);
+
+impl LogBuffer {
+ /// Creates a new `LogBuffer` mapped on `dev`.
+ fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
+ const NUM_PAGES: usize = RM_LOG_BUFFER_NUM_PAGES;
+
+ let mut obj = Self(CoherentAllocation::<u8>::alloc_coherent(
+ dev,
+ NUM_PAGES * GSP_PAGE_SIZE,
+ GFP_KERNEL | __GFP_ZERO,
+ )?);
+ let ptes = PteArray::<NUM_PAGES>::new(obj.0.dma_handle())?;
+
+ // SAFETY: `obj` has just been created and we are its sole user.
+ unsafe {
+ // Copy the self-mapping PTE at the expected location.
+ obj.0
+ .as_slice_mut(size_of::<u64>(), size_of_val(&ptes))?
+ .copy_from_slice(ptes.as_bytes())
+ };
+
+ Ok(obj)
+ }
+}
+
+/// GSP runtime data.
+#[pin_data]
+pub(crate) struct Gsp {
+ /// Libos arguments.
+ pub(crate) libos: CoherentAllocation<LibosMemoryRegionInitArgument>,
+ /// Init log buffer.
+ loginit: LogBuffer,
+ /// Interrupts log buffer.
+ logintr: LogBuffer,
+ /// RM log buffer.
+ logrm: LogBuffer,
+ /// Command queue.
+ pub(crate) cmdq: Cmdq,
+ /// RM arguments.
+ rmargs: CoherentAllocation<GspArgumentsCached>,
+}
+
+impl Gsp {
+ // Creates an in-place initializer for a `Gsp` manager for `pdev`.
+ pub(crate) fn new(pdev: &pci::Device<device::Bound>) -> Result<impl PinInit<Self, Error>> {
+ let dev = pdev.as_ref();
+ let libos = CoherentAllocation::<LibosMemoryRegionInitArgument>::alloc_coherent(
+ dev,
+ GSP_PAGE_SIZE / size_of::<LibosMemoryRegionInitArgument>(),
+ GFP_KERNEL | __GFP_ZERO,
+ )?;
+
+ // Initialise the logging structures. The OpenRM equivalents are in:
+ // _kgspInitLibosLoggingStructures (allocates memory for buffers)
+ // kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
+ let loginit = LogBuffer::new(dev)?;
+ dma_write!(libos[0] = LibosMemoryRegionInitArgument::new("LOGINIT", &loginit.0))?;
+
+ let logintr = LogBuffer::new(dev)?;
+ dma_write!(libos[1] = LibosMemoryRegionInitArgument::new("LOGINTR", &logintr.0))?;
+
+ let logrm = LogBuffer::new(dev)?;
+ dma_write!(libos[2] = LibosMemoryRegionInitArgument::new("LOGRM", &logrm.0))?;
+
+ let cmdq = Cmdq::new(dev)?;
+
+ let rmargs = CoherentAllocation::<GspArgumentsCached>::alloc_coherent(
+ dev,
+ 1,
+ GFP_KERNEL | __GFP_ZERO,
+ )?;
+ dma_write!(rmargs[0] = fw::GspArgumentsCached::new(&cmdq))?;
+ dma_write!(libos[3] = LibosMemoryRegionInitArgument::new("RMARGS", &rmargs))?;
+
+ Ok(try_pin_init!(Self {
+ libos,
+ loginit,
+ logintr,
+ logrm,
+ rmargs,
+ cmdq,
+ }))
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/boot.rs b/drivers/gpu/nova-core/gsp/boot.rs
new file mode 100644
index 000000000000..54937606b5b0
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/boot.rs
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ device,
+ dma::CoherentAllocation,
+ dma_write,
+ io::poll::read_poll_timeout,
+ pci,
+ prelude::*,
+ time::Delta, //
+};
+
+use crate::{
+ driver::Bar0,
+ falcon::{
+ gsp::Gsp,
+ sec2::Sec2,
+ Falcon, //
+ },
+ fb::FbLayout,
+ firmware::{
+ booter::{
+ BooterFirmware,
+ BooterKind, //
+ },
+ fwsec::{
+ FwsecCommand,
+ FwsecFirmware, //
+ },
+ gsp::GspFirmware,
+ FIRMWARE_VERSION, //
+ },
+ gpu::Chipset,
+ gsp::{
+ commands,
+ sequencer::{
+ GspSequencer,
+ GspSequencerParams, //
+ },
+ GspFwWprMeta, //
+ },
+ regs,
+ vbios::Vbios,
+};
+
+impl super::Gsp {
+ /// Helper function to load and run the FWSEC-FRTS firmware and confirm that it has properly
+ /// created the WPR2 region.
+ fn run_fwsec_frts(
+ dev: &device::Device<device::Bound>,
+ falcon: &Falcon<Gsp>,
+ bar: &Bar0,
+ bios: &Vbios,
+ fb_layout: &FbLayout,
+ ) -> Result<()> {
+ // Check that the WPR2 region does not already exists - if it does, we cannot run
+ // FWSEC-FRTS until the GPU is reset.
+ if regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound() != 0 {
+ dev_err!(
+ dev,
+ "WPR2 region already exists - GPU needs to be reset to proceed\n"
+ );
+ return Err(EBUSY);
+ }
+
+ let fwsec_frts = FwsecFirmware::new(
+ dev,
+ falcon,
+ bar,
+ bios,
+ FwsecCommand::Frts {
+ frts_addr: fb_layout.frts.start,
+ frts_size: fb_layout.frts.end - fb_layout.frts.start,
+ },
+ )?;
+
+ // Run FWSEC-FRTS to create the WPR2 region.
+ fwsec_frts.run(dev, falcon, bar)?;
+
+ // SCRATCH_E contains the error code for FWSEC-FRTS.
+ let frts_status = regs::NV_PBUS_SW_SCRATCH_0E_FRTS_ERR::read(bar).frts_err_code();
+ if frts_status != 0 {
+ dev_err!(
+ dev,
+ "FWSEC-FRTS returned with error code {:#x}",
+ frts_status
+ );
+
+ return Err(EIO);
+ }
+
+ // Check that the WPR2 region has been created as we requested.
+ let (wpr2_lo, wpr2_hi) = (
+ regs::NV_PFB_PRI_MMU_WPR2_ADDR_LO::read(bar).lower_bound(),
+ regs::NV_PFB_PRI_MMU_WPR2_ADDR_HI::read(bar).higher_bound(),
+ );
+
+ match (wpr2_lo, wpr2_hi) {
+ (_, 0) => {
+ dev_err!(dev, "WPR2 region not created after running FWSEC-FRTS\n");
+
+ Err(EIO)
+ }
+ (wpr2_lo, _) if wpr2_lo != fb_layout.frts.start => {
+ dev_err!(
+ dev,
+ "WPR2 region created at unexpected address {:#x}; expected {:#x}\n",
+ wpr2_lo,
+ fb_layout.frts.start,
+ );
+
+ Err(EIO)
+ }
+ (wpr2_lo, wpr2_hi) => {
+ dev_dbg!(dev, "WPR2: {:#x}-{:#x}\n", wpr2_lo, wpr2_hi);
+ dev_dbg!(dev, "GPU instance built\n");
+
+ Ok(())
+ }
+ }
+ }
+
+ /// Attempt to boot the GSP.
+ ///
+ /// This is a GPU-dependent and complex procedure that involves loading firmware files from
+ /// user-space, patching them with signatures, and building firmware-specific intricate data
+ /// structures that the GSP will use at runtime.
+ ///
+ /// Upon return, the GSP is up and running, and its runtime object given as return value.
+ pub(crate) fn boot(
+ mut self: Pin<&mut Self>,
+ pdev: &pci::Device<device::Bound>,
+ bar: &Bar0,
+ chipset: Chipset,
+ gsp_falcon: &Falcon<Gsp>,
+ sec2_falcon: &Falcon<Sec2>,
+ ) -> Result {
+ let dev = pdev.as_ref();
+
+ let bios = Vbios::new(dev, bar)?;
+
+ let gsp_fw = KBox::pin_init(
+ GspFirmware::new(dev, chipset, FIRMWARE_VERSION)?,
+ GFP_KERNEL,
+ )?;
+
+ let fb_layout = FbLayout::new(chipset, bar, &gsp_fw)?;
+ dev_dbg!(dev, "{:#x?}\n", fb_layout);
+
+ Self::run_fwsec_frts(dev, gsp_falcon, bar, &bios, &fb_layout)?;
+
+ let booter_loader = BooterFirmware::new(
+ dev,
+ BooterKind::Loader,
+ chipset,
+ FIRMWARE_VERSION,
+ sec2_falcon,
+ bar,
+ )?;
+
+ let wpr_meta =
+ CoherentAllocation::<GspFwWprMeta>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?;
+ dma_write!(wpr_meta[0] = GspFwWprMeta::new(&gsp_fw, &fb_layout))?;
+
+ self.cmdq
+ .send_command(bar, commands::SetSystemInfo::new(pdev))?;
+ self.cmdq.send_command(bar, commands::SetRegistry::new())?;
+
+ gsp_falcon.reset(bar)?;
+ let libos_handle = self.libos.dma_handle();
+ let (mbox0, mbox1) = gsp_falcon.boot(
+ bar,
+ Some(libos_handle as u32),
+ Some((libos_handle >> 32) as u32),
+ )?;
+ dev_dbg!(
+ pdev.as_ref(),
+ "GSP MBOX0: {:#x}, MBOX1: {:#x}\n",
+ mbox0,
+ mbox1
+ );
+
+ dev_dbg!(
+ pdev.as_ref(),
+ "Using SEC2 to load and run the booter_load firmware...\n"
+ );
+
+ sec2_falcon.reset(bar)?;
+ sec2_falcon.dma_load(bar, &booter_loader)?;
+ let wpr_handle = wpr_meta.dma_handle();
+ let (mbox0, mbox1) = sec2_falcon.boot(
+ bar,
+ Some(wpr_handle as u32),
+ Some((wpr_handle >> 32) as u32),
+ )?;
+ dev_dbg!(
+ pdev.as_ref(),
+ "SEC2 MBOX0: {:#x}, MBOX1{:#x}\n",
+ mbox0,
+ mbox1
+ );
+
+ if mbox0 != 0 {
+ dev_err!(
+ pdev.as_ref(),
+ "Booter-load failed with error {:#x}\n",
+ mbox0
+ );
+ return Err(ENODEV);
+ }
+
+ gsp_falcon.write_os_version(bar, gsp_fw.bootloader.app_version);
+
+ // Poll for RISC-V to become active before running sequencer
+ read_poll_timeout(
+ || Ok(gsp_falcon.is_riscv_active(bar)),
+ |val: &bool| *val,
+ Delta::from_millis(10),
+ Delta::from_secs(5),
+ )?;
+
+ dev_dbg!(
+ pdev.as_ref(),
+ "RISC-V active? {}\n",
+ gsp_falcon.is_riscv_active(bar),
+ );
+
+ // Create and run the GSP sequencer.
+ let seq_params = GspSequencerParams {
+ bootloader_app_version: gsp_fw.bootloader.app_version,
+ libos_dma_handle: libos_handle,
+ gsp_falcon,
+ sec2_falcon,
+ dev: pdev.as_ref().into(),
+ bar,
+ };
+ GspSequencer::run(&mut self.cmdq, seq_params)?;
+
+ // Wait until GSP is fully initialized.
+ commands::wait_gsp_init_done(&mut self.cmdq)?;
+
+ // Obtain and display basic GPU information.
+ let info = commands::get_gsp_info(&mut self.cmdq, bar)?;
+ dev_info!(
+ pdev.as_ref(),
+ "GPU name: {}\n",
+ info.gpu_name().unwrap_or("invalid GPU name")
+ );
+
+ Ok(())
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/cmdq.rs b/drivers/gpu/nova-core/gsp/cmdq.rs
new file mode 100644
index 000000000000..6f946d14868a
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/cmdq.rs
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::{
+ cmp,
+ mem,
+ sync::atomic::{
+ fence,
+ Ordering, //
+ }, //
+};
+
+use kernel::{
+ device,
+ dma::{
+ CoherentAllocation,
+ DmaAddress, //
+ },
+ dma_write,
+ io::poll::read_poll_timeout,
+ prelude::*,
+ sync::aref::ARef,
+ time::Delta,
+ transmute::{
+ AsBytes,
+ FromBytes, //
+ },
+};
+
+use crate::{
+ driver::Bar0,
+ gsp::{
+ fw::{
+ GspMsgElement,
+ MsgFunction,
+ MsgqRxHeader,
+ MsgqTxHeader, //
+ },
+ PteArray,
+ GSP_PAGE_SHIFT,
+ GSP_PAGE_SIZE, //
+ },
+ num,
+ regs,
+ sbuffer::SBufferIter, //
+};
+
+/// Trait implemented by types representing a command to send to the GSP.
+///
+/// The main purpose of this trait is to provide [`Cmdq::send_command`] with the information it
+/// needs to send a given command.
+///
+/// [`CommandToGsp::init`] in particular is responsible for initializing the command directly
+/// into the space reserved for it in the command queue buffer.
+///
+/// Some commands may be followed by a variable-length payload. For these, the
+/// [`CommandToGsp::variable_payload_len`] and [`CommandToGsp::init_variable_payload`] need to be
+/// defined as well.
+pub(crate) trait CommandToGsp {
+ /// Function identifying this command to the GSP.
+ const FUNCTION: MsgFunction;
+
+ /// Type generated by [`CommandToGsp::init`], to be written into the command queue buffer.
+ type Command: FromBytes + AsBytes;
+
+ /// Error type returned by [`CommandToGsp::init`].
+ type InitError;
+
+ /// In-place command initializer responsible for filling the command in the command queue
+ /// buffer.
+ fn init(&self) -> impl Init<Self::Command, Self::InitError>;
+
+ /// Size of the variable-length payload following the command structure generated by
+ /// [`CommandToGsp::init`].
+ ///
+ /// Most commands don't have a variable-length payload, so this is zero by default.
+ fn variable_payload_len(&self) -> usize {
+ 0
+ }
+
+ /// Method initializing the variable-length payload.
+ ///
+ /// The command buffer is circular, which means that we may need to jump back to its beginning
+ /// while in the middle of a command. For this reason, the variable-length payload is
+ /// initialized using a [`SBufferIter`].
+ ///
+ /// This method will receive a buffer of the length returned by
+ /// [`CommandToGsp::variable_payload_len`], and must write every single byte of it. Leaving
+ /// unwritten space will lead to an error.
+ ///
+ /// Most commands don't have a variable-length payload, so this does nothing by default.
+ fn init_variable_payload(
+ &self,
+ _dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
+ ) -> Result {
+ Ok(())
+ }
+}
+
+/// Trait representing messages received from the GSP.
+///
+/// This trait tells [`Cmdq::receive_msg`] how it can receive a given type of message.
+pub(crate) trait MessageFromGsp: Sized {
+ /// Function identifying this message from the GSP.
+ const FUNCTION: MsgFunction;
+
+ /// Error type returned by [`MessageFromGsp::read`].
+ type InitError;
+
+ /// Type containing the raw message to be read from the message queue.
+ type Message: FromBytes;
+
+ /// Method reading the message from the message queue and returning it.
+ ///
+ /// From a `Self::Message` and a [`SBufferIter`], constructs an instance of `Self` and returns
+ /// it.
+ fn read(
+ msg: &Self::Message,
+ sbuffer: &mut SBufferIter<core::array::IntoIter<&[u8], 2>>,
+ ) -> Result<Self, Self::InitError>;
+}
+
+/// Number of GSP pages making the [`Msgq`].
+pub(crate) const MSGQ_NUM_PAGES: u32 = 0x3f;
+
+/// Circular buffer of a [`Msgq`].
+///
+/// This area of memory is to be shared between the driver and the GSP to exchange commands or
+/// messages.
+#[repr(C, align(0x1000))]
+#[derive(Debug)]
+struct MsgqData {
+ data: [[u8; GSP_PAGE_SIZE]; num::u32_as_usize(MSGQ_NUM_PAGES)],
+}
+
+// Annoyingly we are forced to use a literal to specify the alignment of
+// `MsgqData`, so check that it corresponds to the actual GSP page size here.
+static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
+
+/// Unidirectional message queue.
+///
+/// Contains the data for a message queue, that either the driver or GSP writes to.
+///
+/// Note that while the write pointer of `tx` corresponds to the `msgq` of the same instance, the
+/// read pointer of `rx` actually refers to the `Msgq` owned by the other side.
+/// This design ensures that only the driver or GSP ever writes to a given instance of this struct.
+#[repr(C)]
+// There is no struct defined for this in the open-gpu-kernel-source headers.
+// Instead it is defined by code in `GspMsgQueuesInit()`.
+struct Msgq {
+ /// Header for sending messages, including the write pointer.
+ tx: MsgqTxHeader,
+ /// Header for receiving messages, including the read pointer.
+ rx: MsgqRxHeader,
+ /// The message queue proper.
+ msgq: MsgqData,
+}
+
+/// Structure shared between the driver and the GSP and containing the command and message queues.
+#[repr(C)]
+struct GspMem {
+ /// Self-mapping page table entries.
+ ptes: PteArray<{ GSP_PAGE_SIZE / size_of::<u64>() }>,
+ /// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
+ /// write and read pointers that the CPU updates.
+ ///
+ /// This member is read-only for the GSP.
+ cpuq: Msgq,
+ /// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
+ /// write and read pointers that the GSP updates.
+ ///
+ /// This member is read-only for the driver.
+ gspq: Msgq,
+}
+
+// SAFETY: These structs don't meet the no-padding requirements of AsBytes but
+// that is not a problem because they are not used outside the kernel.
+unsafe impl AsBytes for GspMem {}
+
+// SAFETY: These structs don't meet the no-padding requirements of FromBytes but
+// that is not a problem because they are not used outside the kernel.
+unsafe impl FromBytes for GspMem {}
+
+/// Wrapper around [`GspMem`] to share it with the GPU using a [`CoherentAllocation`].
+///
+/// This provides the low-level functionality to communicate with the GSP, including allocation of
+/// queue space to write messages to and management of read/write pointers.
+///
+/// This is shared with the GSP, with clear ownership rules regarding the command queues:
+///
+/// * The driver owns (i.e. can write to) the part of the CPU message queue between the CPU write
+/// pointer and the GSP read pointer. This region is returned by [`Self::driver_write_area`].
+/// * The driver owns (i.e. can read from) the part of the GSP message queue between the CPU read
+/// pointer and the GSP write pointer. This region is returned by [`Self::driver_read_area`].
+struct DmaGspMem(CoherentAllocation<GspMem>);
+
+impl DmaGspMem {
+ /// Allocate a new instance and map it for `dev`.
+ fn new(dev: &device::Device<device::Bound>) -> Result<Self> {
+ const MSGQ_SIZE: u32 = num::usize_into_u32::<{ size_of::<Msgq>() }>();
+ const RX_HDR_OFF: u32 = num::usize_into_u32::<{ mem::offset_of!(Msgq, rx) }>();
+
+ let gsp_mem =
+ CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?;
+ dma_write!(gsp_mem[0].ptes = PteArray::new(gsp_mem.dma_handle())?)?;
+ dma_write!(gsp_mem[0].cpuq.tx = MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES))?;
+ dma_write!(gsp_mem[0].cpuq.rx = MsgqRxHeader::new())?;
+
+ Ok(Self(gsp_mem))
+ }
+
+ /// Returns the region of the CPU message queue that the driver is currently allowed to write
+ /// to.
+ ///
+ /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
+ /// that case the second slice will have a non-zero length.
+ fn driver_write_area(&mut self) -> (&mut [[u8; GSP_PAGE_SIZE]], &mut [[u8; GSP_PAGE_SIZE]]) {
+ let tx = self.cpu_write_ptr() as usize;
+ let rx = self.gsp_read_ptr() as usize;
+
+ // SAFETY:
+ // - The `CoherentAllocation` contains exactly one object.
+ // - We will only access the driver-owned part of the shared memory.
+ // - Per the safety statement of the function, no concurrent access will be performed.
+ let gsp_mem = &mut unsafe { self.0.as_slice_mut(0, 1) }.unwrap()[0];
+ // PANIC: per the invariant of `cpu_write_ptr`, `tx` is `<= MSGQ_NUM_PAGES`.
+ let (before_tx, after_tx) = gsp_mem.cpuq.msgq.data.split_at_mut(tx);
+
+ if rx <= tx {
+ // The area from `tx` up to the end of the ring, and from the beginning of the ring up
+ // to `rx`, minus one unit, belongs to the driver.
+ if rx == 0 {
+ let last = after_tx.len() - 1;
+ (&mut after_tx[..last], &mut before_tx[0..0])
+ } else {
+ (after_tx, &mut before_tx[..rx])
+ }
+ } else {
+ // The area from `tx` to `rx`, minus one unit, belongs to the driver.
+ //
+ // PANIC: per the invariants of `cpu_write_ptr` and `gsp_read_ptr`, `rx` and `tx` are
+ // `<= MSGQ_NUM_PAGES`, and the test above ensured that `rx > tx`.
+ (after_tx.split_at_mut(rx - tx).0, &mut before_tx[0..0])
+ }
+ }
+
+ /// Returns the region of the GSP message queue that the driver is currently allowed to read
+ /// from.
+ ///
+ /// As the message queue is a circular buffer, the region may be discontiguous in memory. In
+ /// that case the second slice will have a non-zero length.
+ fn driver_read_area(&self) -> (&[[u8; GSP_PAGE_SIZE]], &[[u8; GSP_PAGE_SIZE]]) {
+ let tx = self.gsp_write_ptr() as usize;
+ let rx = self.cpu_read_ptr() as usize;
+
+ // SAFETY:
+ // - The `CoherentAllocation` contains exactly one object.
+ // - We will only access the driver-owned part of the shared memory.
+ // - Per the safety statement of the function, no concurrent access will be performed.
+ let gsp_mem = &unsafe { self.0.as_slice(0, 1) }.unwrap()[0];
+ // PANIC: per the invariant of `cpu_read_ptr`, `xx` is `<= MSGQ_NUM_PAGES`.
+ let (before_rx, after_rx) = gsp_mem.gspq.msgq.data.split_at(rx);
+
+ match tx.cmp(&rx) {
+ cmp::Ordering::Equal => (&after_rx[0..0], &after_rx[0..0]),
+ cmp::Ordering::Greater => (&after_rx[..tx], &before_rx[0..0]),
+ cmp::Ordering::Less => (after_rx, &before_rx[..tx]),
+ }
+ }
+
+ /// Allocates a region on the command queue that is large enough to send a command of `size`
+ /// bytes.
+ ///
+ /// This returns a [`GspCommand`] ready to be written to by the caller.
+ ///
+ /// # Errors
+ ///
+ /// - `EAGAIN` if the driver area is too small to hold the requested command.
+ /// - `EIO` if the command header is not properly aligned.
+ fn allocate_command(&mut self, size: usize) -> Result<GspCommand<'_>> {
+ // Get the current writable area as an array of bytes.
+ let (slice_1, slice_2) = {
+ let (slice_1, slice_2) = self.driver_write_area();
+
+ #[allow(clippy::incompatible_msrv)]
+ (slice_1.as_flattened_mut(), slice_2.as_flattened_mut())
+ };
+
+ // If the GSP is still processing previous messages the shared region
+ // may be full in which case we will have to retry once the GSP has
+ // processed the existing commands.
+ if size_of::<GspMsgElement>() + size > slice_1.len() + slice_2.len() {
+ return Err(EAGAIN);
+ }
+
+ // Extract area for the `GspMsgElement`.
+ let (header, slice_1) = GspMsgElement::from_bytes_mut_prefix(slice_1).ok_or(EIO)?;
+
+ // Create the contents area.
+ let (slice_1, slice_2) = if slice_1.len() > size {
+ // Contents fits entirely in `slice_1`.
+ (&mut slice_1[..size], &mut slice_2[0..0])
+ } else {
+ // Need all of `slice_1` and some of `slice_2`.
+ let slice_2_len = size - slice_1.len();
+ (slice_1, &mut slice_2[..slice_2_len])
+ };
+
+ Ok(GspCommand {
+ header,
+ contents: (slice_1, slice_2),
+ })
+ }
+
+ // Returns the index of the memory page the GSP will write the next message to.
+ //
+ // # Invariants
+ //
+ // - The returned value is between `0` and `MSGQ_NUM_PAGES`.
+ fn gsp_write_ptr(&self) -> u32 {
+ let gsp_mem = self.0.start_ptr();
+
+ // SAFETY:
+ // - The 'CoherentAllocation' contains at least one object.
+ // - By the invariants of `CoherentAllocation` the pointer is valid.
+ (unsafe { (*gsp_mem).gspq.tx.write_ptr() } % MSGQ_NUM_PAGES)
+ }
+
+ // Returns the index of the memory page the GSP will read the next command from.
+ //
+ // # Invariants
+ //
+ // - The returned value is between `0` and `MSGQ_NUM_PAGES`.
+ fn gsp_read_ptr(&self) -> u32 {
+ let gsp_mem = self.0.start_ptr();
+
+ // SAFETY:
+ // - The 'CoherentAllocation' contains at least one object.
+ // - By the invariants of `CoherentAllocation` the pointer is valid.
+ (unsafe { (*gsp_mem).gspq.rx.read_ptr() } % MSGQ_NUM_PAGES)
+ }
+
+ // Returns the index of the memory page the CPU can read the next message from.
+ //
+ // # Invariants
+ //
+ // - The returned value is between `0` and `MSGQ_NUM_PAGES`.
+ fn cpu_read_ptr(&self) -> u32 {
+ let gsp_mem = self.0.start_ptr();
+
+ // SAFETY:
+ // - The ['CoherentAllocation'] contains at least one object.
+ // - By the invariants of CoherentAllocation the pointer is valid.
+ (unsafe { (*gsp_mem).cpuq.rx.read_ptr() } % MSGQ_NUM_PAGES)
+ }
+
+ // Informs the GSP that it can send `elem_count` new pages into the message queue.
+ fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
+ let rptr = self.cpu_read_ptr().wrapping_add(elem_count) % MSGQ_NUM_PAGES;
+
+ // Ensure read pointer is properly ordered.
+ fence(Ordering::SeqCst);
+
+ let gsp_mem = self.0.start_ptr_mut();
+
+ // SAFETY:
+ // - The 'CoherentAllocation' contains at least one object.
+ // - By the invariants of `CoherentAllocation` the pointer is valid.
+ unsafe { (*gsp_mem).cpuq.rx.set_read_ptr(rptr) };
+ }
+
+ // Returns the index of the memory page the CPU can write the next command to.
+ //
+ // # Invariants
+ //
+ // - The returned value is between `0` and `MSGQ_NUM_PAGES`.
+ fn cpu_write_ptr(&self) -> u32 {
+ let gsp_mem = self.0.start_ptr();
+
+ // SAFETY:
+ // - The 'CoherentAllocation' contains at least one object.
+ // - By the invariants of `CoherentAllocation` the pointer is valid.
+ (unsafe { (*gsp_mem).cpuq.tx.write_ptr() } % MSGQ_NUM_PAGES)
+ }
+
+ // Informs the GSP that it can process `elem_count` new pages from the command queue.
+ fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
+ let wptr = self.cpu_write_ptr().wrapping_add(elem_count) & MSGQ_NUM_PAGES;
+ let gsp_mem = self.0.start_ptr_mut();
+
+ // SAFETY:
+ // - The 'CoherentAllocation' contains at least one object.
+ // - By the invariants of `CoherentAllocation` the pointer is valid.
+ unsafe { (*gsp_mem).cpuq.tx.set_write_ptr(wptr) };
+
+ // Ensure all command data is visible before triggering the GSP read.
+ fence(Ordering::SeqCst);
+ }
+}
+
+/// A command ready to be sent on the command queue.
+///
+/// This is the type returned by [`DmaGspMem::allocate_command`].
+struct GspCommand<'a> {
+ // Writable reference to the header of the command.
+ header: &'a mut GspMsgElement,
+ // Writable slices to the contents of the command. The second slice is zero unless the command
+ // loops over the command queue.
+ contents: (&'a mut [u8], &'a mut [u8]),
+}
+
+/// A message ready to be processed from the message queue.
+///
+/// This is the type returned by [`Cmdq::wait_for_msg`].
+struct GspMessage<'a> {
+ // Reference to the header of the message.
+ header: &'a GspMsgElement,
+ // Slices to the contents of the message. The second slice is zero unless the message loops
+ // over the message queue.
+ contents: (&'a [u8], &'a [u8]),
+}
+
+/// GSP command queue.
+///
+/// Provides the ability to send commands and receive messages from the GSP using a shared memory
+/// area.
+pub(crate) struct Cmdq {
+ /// Device this command queue belongs to.
+ dev: ARef<device::Device>,
+ /// Current command sequence number.
+ seq: u32,
+ /// Memory area shared with the GSP for communicating commands and messages.
+ gsp_mem: DmaGspMem,
+}
+
+impl Cmdq {
+ /// Offset of the data after the PTEs.
+ const POST_PTE_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq);
+
+ /// Offset of command queue ring buffer.
+ pub(crate) const CMDQ_OFFSET: usize = core::mem::offset_of!(GspMem, cpuq)
+ + core::mem::offset_of!(Msgq, msgq)
+ - Self::POST_PTE_OFFSET;
+
+ /// Offset of message queue ring buffer.
+ pub(crate) const STATQ_OFFSET: usize = core::mem::offset_of!(GspMem, gspq)
+ + core::mem::offset_of!(Msgq, msgq)
+ - Self::POST_PTE_OFFSET;
+
+ /// Number of page table entries for the GSP shared region.
+ pub(crate) const NUM_PTES: usize = size_of::<GspMem>() >> GSP_PAGE_SHIFT;
+
+ /// Creates a new command queue for `dev`.
+ pub(crate) fn new(dev: &device::Device<device::Bound>) -> Result<Cmdq> {
+ let gsp_mem = DmaGspMem::new(dev)?;
+
+ Ok(Cmdq {
+ dev: dev.into(),
+ seq: 0,
+ gsp_mem,
+ })
+ }
+
+ /// Computes the checksum for the message pointed to by `it`.
+ ///
+ /// A message is made of several parts, so `it` is an iterator over byte slices representing
+ /// these parts.
+ fn calculate_checksum<T: Iterator<Item = u8>>(it: T) -> u32 {
+ let sum64 = it
+ .enumerate()
+ .map(|(idx, byte)| (((idx % 8) * 8) as u32, byte))
+ .fold(0, |acc, (rol, byte)| acc ^ u64::from(byte).rotate_left(rol));
+
+ ((sum64 >> 32) as u32) ^ (sum64 as u32)
+ }
+
+ /// Notifies the GSP that we have updated the command queue pointers.
+ fn notify_gsp(bar: &Bar0) {
+ regs::NV_PGSP_QUEUE_HEAD::default()
+ .set_address(0)
+ .write(bar);
+ }
+
+ /// Sends `command` to the GSP.
+ ///
+ /// # Errors
+ ///
+ /// - `EAGAIN` if there was not enough space in the command queue to send the command.
+ /// - `EIO` if the variable payload requested by the command has not been entirely
+ /// written to by its [`CommandToGsp::init_variable_payload`] method.
+ ///
+ /// Error codes returned by the command initializers are propagated as-is.
+ pub(crate) fn send_command<M>(&mut self, bar: &Bar0, command: M) -> Result
+ where
+ M: CommandToGsp,
+ // This allows all error types, including `Infallible`, to be used for `M::InitError`.
+ Error: From<M::InitError>,
+ {
+ let command_size = size_of::<M::Command>() + command.variable_payload_len();
+ let dst = self.gsp_mem.allocate_command(command_size)?;
+
+ // Extract area for the command itself.
+ let (cmd, payload_1) = M::Command::from_bytes_mut_prefix(dst.contents.0).ok_or(EIO)?;
+
+ // Fill the header and command in-place.
+ let msg_element = GspMsgElement::init(self.seq, command_size, M::FUNCTION);
+ // SAFETY: `msg_header` and `cmd` are valid references, and not touched if the initializer
+ // fails.
+ unsafe {
+ msg_element.__init(core::ptr::from_mut(dst.header))?;
+ command.init().__init(core::ptr::from_mut(cmd))?;
+ }
+
+ // Fill the variable-length payload.
+ if command_size > size_of::<M::Command>() {
+ let mut sbuffer =
+ SBufferIter::new_writer([&mut payload_1[..], &mut dst.contents.1[..]]);
+ command.init_variable_payload(&mut sbuffer)?;
+
+ if !sbuffer.is_empty() {
+ return Err(EIO);
+ }
+ }
+
+ // Compute checksum now that the whole message is ready.
+ dst.header
+ .set_checksum(Cmdq::calculate_checksum(SBufferIter::new_reader([
+ dst.header.as_bytes(),
+ dst.contents.0,
+ dst.contents.1,
+ ])));
+
+ dev_dbg!(
+ &self.dev,
+ "GSP RPC: send: seq# {}, function={}, length=0x{:x}\n",
+ self.seq,
+ M::FUNCTION,
+ dst.header.length(),
+ );
+
+ // All set - update the write pointer and inform the GSP of the new command.
+ let elem_count = dst.header.element_count();
+ self.seq += 1;
+ self.gsp_mem.advance_cpu_write_ptr(elem_count);
+ Cmdq::notify_gsp(bar);
+
+ Ok(())
+ }
+
+ /// Wait for a message to become available on the message queue.
+ ///
+ /// This works purely at the transport layer and does not interpret or validate the message
+ /// beyond the advertised length in its [`GspMsgElement`].
+ ///
+ /// This method returns:
+ ///
+ /// - A reference to the [`GspMsgElement`] of the message,
+ /// - Two byte slices with the contents of the message. The second slice is empty unless the
+ /// message loops across the message queue.
+ ///
+ /// # Errors
+ ///
+ /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
+ /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
+ /// message queue.
+ ///
+ /// Error codes returned by the message constructor are propagated as-is.
+ fn wait_for_msg(&self, timeout: Delta) -> Result<GspMessage<'_>> {
+ // Wait for a message to arrive from the GSP.
+ let (slice_1, slice_2) = read_poll_timeout(
+ || Ok(self.gsp_mem.driver_read_area()),
+ |driver_area| !driver_area.0.is_empty(),
+ Delta::from_millis(1),
+ timeout,
+ )
+ .map(|(slice_1, slice_2)| {
+ #[allow(clippy::incompatible_msrv)]
+ (slice_1.as_flattened(), slice_2.as_flattened())
+ })?;
+
+ // Extract the `GspMsgElement`.
+ let (header, slice_1) = GspMsgElement::from_bytes_prefix(slice_1).ok_or(EIO)?;
+
+ dev_dbg!(
+ self.dev,
+ "GSP RPC: receive: seq# {}, function={:?}, length=0x{:x}\n",
+ header.sequence(),
+ header.function(),
+ header.length(),
+ );
+
+ // Check that the driver read area is large enough for the message.
+ if slice_1.len() + slice_2.len() < header.length() {
+ return Err(EIO);
+ }
+
+ // Cut the message slices down to the actual length of the message.
+ let (slice_1, slice_2) = if slice_1.len() > header.length() {
+ // PANIC: we checked above that `slice_1` is at least as long as `msg_header.length()`.
+ (slice_1.split_at(header.length()).0, &slice_2[0..0])
+ } else {
+ (
+ slice_1,
+ // PANIC: we checked above that `slice_1.len() + slice_2.len()` is at least as
+ // large as `msg_header.length()`.
+ slice_2.split_at(header.length() - slice_1.len()).0,
+ )
+ };
+
+ // Validate checksum.
+ if Cmdq::calculate_checksum(SBufferIter::new_reader([
+ header.as_bytes(),
+ slice_1,
+ slice_2,
+ ])) != 0
+ {
+ dev_err!(
+ self.dev,
+ "GSP RPC: receive: Call {} - bad checksum",
+ header.sequence()
+ );
+ return Err(EIO);
+ }
+
+ Ok(GspMessage {
+ header,
+ contents: (slice_1, slice_2),
+ })
+ }
+
+ /// Receive a message from the GSP.
+ ///
+ /// `init` is a closure tasked with processing the message. It receives a reference to the
+ /// message in the message queue, and a [`SBufferIter`] pointing to its variable-length
+ /// payload, if any.
+ ///
+ /// The expected message is specified using the `M` generic parameter. If the pending message
+ /// is different, `EAGAIN` is returned and the unexpected message is dropped.
+ ///
+ /// This design is by no means final, but it is simple and will let us go through GSP
+ /// initialization.
+ ///
+ /// # Errors
+ ///
+ /// - `ETIMEDOUT` if `timeout` has elapsed before any message becomes available.
+ /// - `EIO` if there was some inconsistency (e.g. message shorter than advertised) on the
+ /// message queue.
+ /// - `EINVAL` if the function of the message was unrecognized.
+ pub(crate) fn receive_msg<M: MessageFromGsp>(&mut self, timeout: Delta) -> Result<M>
+ where
+ // This allows all error types, including `Infallible`, to be used for `M::InitError`.
+ Error: From<M::InitError>,
+ {
+ let message = self.wait_for_msg(timeout)?;
+ let function = message.header.function().map_err(|_| EINVAL)?;
+
+ // Extract the message. Store the result as we want to advance the read pointer even in
+ // case of failure.
+ let result = if function == M::FUNCTION {
+ let (cmd, contents_1) = M::Message::from_bytes_prefix(message.contents.0).ok_or(EIO)?;
+ let mut sbuffer = SBufferIter::new_reader([contents_1, message.contents.1]);
+
+ M::read(cmd, &mut sbuffer).map_err(|e| e.into())
+ } else {
+ Err(ERANGE)
+ };
+
+ // Advance the read pointer past this message.
+ self.gsp_mem.advance_cpu_read_ptr(u32::try_from(
+ message.header.length().div_ceil(GSP_PAGE_SIZE),
+ )?);
+
+ result
+ }
+
+ /// Returns the DMA handle of the command queue's shared memory region.
+ pub(crate) fn dma_handle(&self) -> DmaAddress {
+ self.gsp_mem.0.dma_handle()
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/commands.rs b/drivers/gpu/nova-core/gsp/commands.rs
new file mode 100644
index 000000000000..0425c65b5d6f
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/commands.rs
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::{
+ array,
+ convert::Infallible, //
+};
+
+use kernel::{
+ device,
+ pci,
+ prelude::*,
+ time::Delta,
+ transmute::{
+ AsBytes,
+ FromBytes, //
+ }, //
+};
+
+use crate::{
+ driver::Bar0,
+ gsp::{
+ cmdq::{
+ Cmdq,
+ CommandToGsp,
+ MessageFromGsp, //
+ },
+ fw::{
+ commands::*,
+ MsgFunction, //
+ },
+ },
+ sbuffer::SBufferIter,
+ util,
+};
+
+/// The `GspSetSystemInfo` command.
+pub(crate) struct SetSystemInfo<'a> {
+ pdev: &'a pci::Device<device::Bound>,
+}
+
+impl<'a> SetSystemInfo<'a> {
+ /// Creates a new `GspSetSystemInfo` command using the parameters of `pdev`.
+ pub(crate) fn new(pdev: &'a pci::Device<device::Bound>) -> Self {
+ Self { pdev }
+ }
+}
+
+impl<'a> CommandToGsp for SetSystemInfo<'a> {
+ const FUNCTION: MsgFunction = MsgFunction::GspSetSystemInfo;
+ type Command = GspSetSystemInfo;
+ type InitError = Error;
+
+ fn init(&self) -> impl Init<Self::Command, Self::InitError> {
+ GspSetSystemInfo::init(self.pdev)
+ }
+}
+
+struct RegistryEntry {
+ key: &'static str,
+ value: u32,
+}
+
+/// The `SetRegistry` command.
+pub(crate) struct SetRegistry {
+ entries: [RegistryEntry; Self::NUM_ENTRIES],
+}
+
+impl SetRegistry {
+ // For now we hard-code the registry entries. Future work will allow others to
+ // be added as module parameters.
+ const NUM_ENTRIES: usize = 3;
+
+ /// Creates a new `SetRegistry` command, using a set of hardcoded entries.
+ pub(crate) fn new() -> Self {
+ Self {
+ entries: [
+ // RMSecBusResetEnable - enables PCI secondary bus reset
+ RegistryEntry {
+ key: "RMSecBusResetEnable",
+ value: 1,
+ },
+ // RMForcePcieConfigSave - forces GSP-RM to preserve PCI configuration registers on
+ // any PCI reset.
+ RegistryEntry {
+ key: "RMForcePcieConfigSave",
+ value: 1,
+ },
+ // RMDevidCheckIgnore - allows GSP-RM to boot even if the PCI dev ID is not found
+ // in the internal product name database.
+ RegistryEntry {
+ key: "RMDevidCheckIgnore",
+ value: 1,
+ },
+ ],
+ }
+ }
+}
+
+impl CommandToGsp for SetRegistry {
+ const FUNCTION: MsgFunction = MsgFunction::SetRegistry;
+ type Command = PackedRegistryTable;
+ type InitError = Infallible;
+
+ fn init(&self) -> impl Init<Self::Command, Self::InitError> {
+ PackedRegistryTable::init(Self::NUM_ENTRIES as u32, self.variable_payload_len() as u32)
+ }
+
+ fn variable_payload_len(&self) -> usize {
+ let mut key_size = 0;
+ for i in 0..Self::NUM_ENTRIES {
+ key_size += self.entries[i].key.len() + 1; // +1 for NULL terminator
+ }
+ Self::NUM_ENTRIES * size_of::<PackedRegistryEntry>() + key_size
+ }
+
+ fn init_variable_payload(
+ &self,
+ dst: &mut SBufferIter<core::array::IntoIter<&mut [u8], 2>>,
+ ) -> Result {
+ let string_data_start_offset =
+ size_of::<PackedRegistryTable>() + Self::NUM_ENTRIES * size_of::<PackedRegistryEntry>();
+
+ // Array for string data.
+ let mut string_data = KVec::new();
+
+ for entry in self.entries.iter().take(Self::NUM_ENTRIES) {
+ dst.write_all(
+ PackedRegistryEntry::new(
+ (string_data_start_offset + string_data.len()) as u32,
+ entry.value,
+ )
+ .as_bytes(),
+ )?;
+
+ let key_bytes = entry.key.as_bytes();
+ string_data.extend_from_slice(key_bytes, GFP_KERNEL)?;
+ string_data.push(0, GFP_KERNEL)?;
+ }
+
+ dst.write_all(string_data.as_slice())
+ }
+}
+
+/// Message type for GSP initialization done notification.
+struct GspInitDone {}
+
+// SAFETY: `GspInitDone` is a zero-sized type with no bytes, therefore it
+// trivially has no uninitialized bytes.
+unsafe impl FromBytes for GspInitDone {}
+
+impl MessageFromGsp for GspInitDone {
+ const FUNCTION: MsgFunction = MsgFunction::GspInitDone;
+ type InitError = Infallible;
+ type Message = GspInitDone;
+
+ fn read(
+ _msg: &Self::Message,
+ _sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>,
+ ) -> Result<Self, Self::InitError> {
+ Ok(GspInitDone {})
+ }
+}
+
+/// Waits for GSP initialization to complete.
+pub(crate) fn wait_gsp_init_done(cmdq: &mut Cmdq) -> Result {
+ loop {
+ match cmdq.receive_msg::<GspInitDone>(Delta::from_secs(10)) {
+ Ok(_) => break Ok(()),
+ Err(ERANGE) => continue,
+ Err(e) => break Err(e),
+ }
+ }
+}
+
+/// The `GetGspStaticInfo` command.
+struct GetGspStaticInfo;
+
+impl CommandToGsp for GetGspStaticInfo {
+ const FUNCTION: MsgFunction = MsgFunction::GetGspStaticInfo;
+ type Command = GspStaticConfigInfo;
+ type InitError = Infallible;
+
+ fn init(&self) -> impl Init<Self::Command, Self::InitError> {
+ GspStaticConfigInfo::init_zeroed()
+ }
+}
+
+/// The reply from the GSP to the [`GetGspInfo`] command.
+pub(crate) struct GetGspStaticInfoReply {
+ gpu_name: [u8; 64],
+}
+
+impl MessageFromGsp for GetGspStaticInfoReply {
+ const FUNCTION: MsgFunction = MsgFunction::GetGspStaticInfo;
+ type Message = GspStaticConfigInfo;
+ type InitError = Infallible;
+
+ fn read(
+ msg: &Self::Message,
+ _sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>,
+ ) -> Result<Self, Self::InitError> {
+ Ok(GetGspStaticInfoReply {
+ gpu_name: msg.gpu_name_str(),
+ })
+ }
+}
+
+impl GetGspStaticInfoReply {
+ /// Returns the name of the GPU as a string, or `None` if the string given by the GSP was
+ /// invalid.
+ pub(crate) fn gpu_name(&self) -> Option<&str> {
+ util::str_from_null_terminated(&self.gpu_name)
+ }
+}
+
+/// Send the [`GetGspInfo`] command and awaits for its reply.
+pub(crate) fn get_gsp_info(cmdq: &mut Cmdq, bar: &Bar0) -> Result<GetGspStaticInfoReply> {
+ cmdq.send_command(bar, GetGspStaticInfo)?;
+
+ loop {
+ match cmdq.receive_msg::<GetGspStaticInfoReply>(Delta::from_secs(5)) {
+ Ok(info) => return Ok(info),
+ Err(ERANGE) => continue,
+ Err(e) => return Err(e),
+ }
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/fw.rs b/drivers/gpu/nova-core/gsp/fw.rs
new file mode 100644
index 000000000000..abffd6beec65
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw.rs
@@ -0,0 +1,928 @@
+// SPDX-License-Identifier: GPL-2.0
+
+pub(crate) mod commands;
+mod r570_144;
+
+// Alias to avoid repeating the version number with every use.
+use r570_144 as bindings;
+
+use core::ops::Range;
+
+use kernel::{
+ dma::CoherentAllocation,
+ fmt,
+ prelude::*,
+ ptr::{
+ Alignable,
+ Alignment, //
+ },
+ sizes::{
+ SZ_128K,
+ SZ_1M, //
+ },
+ transmute::{
+ AsBytes,
+ FromBytes, //
+ },
+};
+
+use crate::{
+ fb::FbLayout,
+ firmware::gsp::GspFirmware,
+ gpu::Chipset,
+ gsp::{
+ cmdq::Cmdq, //
+ GSP_PAGE_SIZE,
+ },
+ num::{
+ self,
+ FromSafeCast, //
+ },
+};
+
+/// Empty type to group methods related to heap parameters for running the GSP firmware.
+enum GspFwHeapParams {}
+
+/// Minimum required alignment for the GSP heap.
+const GSP_HEAP_ALIGNMENT: Alignment = Alignment::new::<{ 1 << 20 }>();
+
+impl GspFwHeapParams {
+ /// Returns the amount of GSP-RM heap memory used during GSP-RM boot and initialization (up to
+ /// and including the first client subdevice allocation).
+ fn base_rm_size(_chipset: Chipset) -> u64 {
+ // TODO: this needs to be updated to return the correct value for Hopper+ once support for
+ // them is added:
+ // u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_GH100)
+ u64::from(bindings::GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X)
+ }
+
+ /// Returns the amount of heap memory required to support a single channel allocation.
+ fn client_alloc_size() -> u64 {
+ u64::from(bindings::GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE)
+ .align_up(GSP_HEAP_ALIGNMENT)
+ .unwrap_or(u64::MAX)
+ }
+
+ /// Returns the amount of memory to reserve for management purposes for a framebuffer of size
+ /// `fb_size`.
+ fn management_overhead(fb_size: u64) -> u64 {
+ let fb_size_gb = fb_size.div_ceil(u64::from_safe_cast(kernel::sizes::SZ_1G));
+
+ u64::from(bindings::GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB)
+ .saturating_mul(fb_size_gb)
+ .align_up(GSP_HEAP_ALIGNMENT)
+ .unwrap_or(u64::MAX)
+ }
+}
+
+/// Heap memory requirements and constraints for a given version of the GSP LIBOS.
+pub(crate) struct LibosParams {
+ /// The base amount of heap required by the GSP operating system, in bytes.
+ carveout_size: u64,
+ /// The minimum and maximum sizes allowed for the GSP FW heap, in bytes.
+ allowed_heap_size: Range<u64>,
+}
+
+impl LibosParams {
+ /// Version 2 of the GSP LIBOS (Turing and GA100)
+ const LIBOS2: LibosParams = LibosParams {
+ carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2),
+ allowed_heap_size: num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB)
+ * num::usize_as_u64(SZ_1M)
+ ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB)
+ * num::usize_as_u64(SZ_1M),
+ };
+
+ /// Version 3 of the GSP LIBOS (GA102+)
+ const LIBOS3: LibosParams = LibosParams {
+ carveout_size: num::u32_as_u64(bindings::GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL),
+ allowed_heap_size: num::u32_as_u64(
+ bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB,
+ ) * num::usize_as_u64(SZ_1M)
+ ..num::u32_as_u64(bindings::GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB)
+ * num::usize_as_u64(SZ_1M),
+ };
+
+ /// Returns the libos parameters corresponding to `chipset`.
+ pub(crate) fn from_chipset(chipset: Chipset) -> &'static LibosParams {
+ if chipset < Chipset::GA102 {
+ &Self::LIBOS2
+ } else {
+ &Self::LIBOS3
+ }
+ }
+
+ /// Returns the amount of memory (in bytes) to allocate for the WPR heap for a framebuffer size
+ /// of `fb_size` (in bytes) for `chipset`.
+ pub(crate) fn wpr_heap_size(&self, chipset: Chipset, fb_size: u64) -> u64 {
+ // The WPR heap will contain the following:
+ // LIBOS carveout,
+ self.carveout_size
+ // RM boot working memory,
+ .saturating_add(GspFwHeapParams::base_rm_size(chipset))
+ // One RM client,
+ .saturating_add(GspFwHeapParams::client_alloc_size())
+ // Overhead for memory management.
+ .saturating_add(GspFwHeapParams::management_overhead(fb_size))
+ // Clamp to the supported heap sizes.
+ .clamp(self.allowed_heap_size.start, self.allowed_heap_size.end - 1)
+ }
+}
+
+/// Structure passed to the GSP bootloader, containing the framebuffer layout as well as the DMA
+/// addresses of the GSP bootloader and firmware.
+#[repr(transparent)]
+pub(crate) struct GspFwWprMeta(bindings::GspFwWprMeta);
+
+// SAFETY: Padding is explicit and does not contain uninitialized data.
+unsafe impl AsBytes for GspFwWprMeta {}
+
+// SAFETY: This struct only contains integer types for which all bit patterns
+// are valid.
+unsafe impl FromBytes for GspFwWprMeta {}
+
+type GspFwWprMetaBootResumeInfo = r570_144::GspFwWprMeta__bindgen_ty_1;
+type GspFwWprMetaBootInfo = r570_144::GspFwWprMeta__bindgen_ty_1__bindgen_ty_1;
+
+impl GspFwWprMeta {
+ /// Fill in and return a `GspFwWprMeta` suitable for booting `gsp_firmware` using the
+ /// `fb_layout` layout.
+ pub(crate) fn new(gsp_firmware: &GspFirmware, fb_layout: &FbLayout) -> Self {
+ Self(bindings::GspFwWprMeta {
+ // CAST: we want to store the bits of `GSP_FW_WPR_META_MAGIC` unmodified.
+ magic: r570_144::GSP_FW_WPR_META_MAGIC as u64,
+ revision: u64::from(r570_144::GSP_FW_WPR_META_REVISION),
+ sysmemAddrOfRadix3Elf: gsp_firmware.radix3_dma_handle(),
+ sizeOfRadix3Elf: u64::from_safe_cast(gsp_firmware.size),
+ sysmemAddrOfBootloader: gsp_firmware.bootloader.ucode.dma_handle(),
+ sizeOfBootloader: u64::from_safe_cast(gsp_firmware.bootloader.ucode.size()),
+ bootloaderCodeOffset: u64::from(gsp_firmware.bootloader.code_offset),
+ bootloaderDataOffset: u64::from(gsp_firmware.bootloader.data_offset),
+ bootloaderManifestOffset: u64::from(gsp_firmware.bootloader.manifest_offset),
+ __bindgen_anon_1: GspFwWprMetaBootResumeInfo {
+ __bindgen_anon_1: GspFwWprMetaBootInfo {
+ sysmemAddrOfSignature: gsp_firmware.signatures.dma_handle(),
+ sizeOfSignature: u64::from_safe_cast(gsp_firmware.signatures.size()),
+ },
+ },
+ gspFwRsvdStart: fb_layout.heap.start,
+ nonWprHeapOffset: fb_layout.heap.start,
+ nonWprHeapSize: fb_layout.heap.end - fb_layout.heap.start,
+ gspFwWprStart: fb_layout.wpr2.start,
+ gspFwHeapOffset: fb_layout.wpr2_heap.start,
+ gspFwHeapSize: fb_layout.wpr2_heap.end - fb_layout.wpr2_heap.start,
+ gspFwOffset: fb_layout.elf.start,
+ bootBinOffset: fb_layout.boot.start,
+ frtsOffset: fb_layout.frts.start,
+ frtsSize: fb_layout.frts.end - fb_layout.frts.start,
+ gspFwWprEnd: fb_layout
+ .vga_workspace
+ .start
+ .align_down(Alignment::new::<SZ_128K>()),
+ gspFwHeapVfPartitionCount: fb_layout.vf_partition_count,
+ fbSize: fb_layout.fb.end - fb_layout.fb.start,
+ vgaWorkspaceOffset: fb_layout.vga_workspace.start,
+ vgaWorkspaceSize: fb_layout.vga_workspace.end - fb_layout.vga_workspace.start,
+ ..Default::default()
+ })
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+#[repr(u32)]
+pub(crate) enum MsgFunction {
+ // Common function codes
+ Nop = bindings::NV_VGPU_MSG_FUNCTION_NOP,
+ SetGuestSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO,
+ AllocRoot = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT,
+ AllocDevice = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE,
+ AllocMemory = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
+ AllocCtxDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA,
+ AllocChannelDma = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA,
+ MapMemory = bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY,
+ BindCtxDma = bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA,
+ AllocObject = bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT,
+ Free = bindings::NV_VGPU_MSG_FUNCTION_FREE,
+ Log = bindings::NV_VGPU_MSG_FUNCTION_LOG,
+ GetGspStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO,
+ SetRegistry = bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY,
+ GspSetSystemInfo = bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO,
+ GspInitPostObjGpu = bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU,
+ GspRmControl = bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL,
+ GetStaticInfo = bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO,
+
+ // Event codes
+ GspInitDone = bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE,
+ GspRunCpuSequencer = bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
+ PostEvent = bindings::NV_VGPU_MSG_EVENT_POST_EVENT,
+ RcTriggered = bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED,
+ MmuFaultQueued = bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
+ OsErrorLog = bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG,
+ GspPostNoCat = bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD,
+ GspLockdownNotice = bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE,
+ UcodeLibOsPrint = bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT,
+}
+
+impl fmt::Display for MsgFunction {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ // Common function codes
+ MsgFunction::Nop => write!(f, "NOP"),
+ MsgFunction::SetGuestSystemInfo => write!(f, "SET_GUEST_SYSTEM_INFO"),
+ MsgFunction::AllocRoot => write!(f, "ALLOC_ROOT"),
+ MsgFunction::AllocDevice => write!(f, "ALLOC_DEVICE"),
+ MsgFunction::AllocMemory => write!(f, "ALLOC_MEMORY"),
+ MsgFunction::AllocCtxDma => write!(f, "ALLOC_CTX_DMA"),
+ MsgFunction::AllocChannelDma => write!(f, "ALLOC_CHANNEL_DMA"),
+ MsgFunction::MapMemory => write!(f, "MAP_MEMORY"),
+ MsgFunction::BindCtxDma => write!(f, "BIND_CTX_DMA"),
+ MsgFunction::AllocObject => write!(f, "ALLOC_OBJECT"),
+ MsgFunction::Free => write!(f, "FREE"),
+ MsgFunction::Log => write!(f, "LOG"),
+ MsgFunction::GetGspStaticInfo => write!(f, "GET_GSP_STATIC_INFO"),
+ MsgFunction::SetRegistry => write!(f, "SET_REGISTRY"),
+ MsgFunction::GspSetSystemInfo => write!(f, "GSP_SET_SYSTEM_INFO"),
+ MsgFunction::GspInitPostObjGpu => write!(f, "GSP_INIT_POST_OBJGPU"),
+ MsgFunction::GspRmControl => write!(f, "GSP_RM_CONTROL"),
+ MsgFunction::GetStaticInfo => write!(f, "GET_STATIC_INFO"),
+
+ // Event codes
+ MsgFunction::GspInitDone => write!(f, "INIT_DONE"),
+ MsgFunction::GspRunCpuSequencer => write!(f, "RUN_CPU_SEQUENCER"),
+ MsgFunction::PostEvent => write!(f, "POST_EVENT"),
+ MsgFunction::RcTriggered => write!(f, "RC_TRIGGERED"),
+ MsgFunction::MmuFaultQueued => write!(f, "MMU_FAULT_QUEUED"),
+ MsgFunction::OsErrorLog => write!(f, "OS_ERROR_LOG"),
+ MsgFunction::GspPostNoCat => write!(f, "NOCAT"),
+ MsgFunction::GspLockdownNotice => write!(f, "LOCKDOWN_NOTICE"),
+ MsgFunction::UcodeLibOsPrint => write!(f, "LIBOS_PRINT"),
+ }
+ }
+}
+
+impl TryFrom<u32> for MsgFunction {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u32) -> Result<MsgFunction> {
+ match value {
+ bindings::NV_VGPU_MSG_FUNCTION_NOP => Ok(MsgFunction::Nop),
+ bindings::NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO => {
+ Ok(MsgFunction::SetGuestSystemInfo)
+ }
+ bindings::NV_VGPU_MSG_FUNCTION_ALLOC_ROOT => Ok(MsgFunction::AllocRoot),
+ bindings::NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE => Ok(MsgFunction::AllocDevice),
+ bindings::NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY => Ok(MsgFunction::AllocMemory),
+ bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA => Ok(MsgFunction::AllocCtxDma),
+ bindings::NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA => Ok(MsgFunction::AllocChannelDma),
+ bindings::NV_VGPU_MSG_FUNCTION_MAP_MEMORY => Ok(MsgFunction::MapMemory),
+ bindings::NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA => Ok(MsgFunction::BindCtxDma),
+ bindings::NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT => Ok(MsgFunction::AllocObject),
+ bindings::NV_VGPU_MSG_FUNCTION_FREE => Ok(MsgFunction::Free),
+ bindings::NV_VGPU_MSG_FUNCTION_LOG => Ok(MsgFunction::Log),
+ bindings::NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO => Ok(MsgFunction::GetGspStaticInfo),
+ bindings::NV_VGPU_MSG_FUNCTION_SET_REGISTRY => Ok(MsgFunction::SetRegistry),
+ bindings::NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO => Ok(MsgFunction::GspSetSystemInfo),
+ bindings::NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU => {
+ Ok(MsgFunction::GspInitPostObjGpu)
+ }
+ bindings::NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL => Ok(MsgFunction::GspRmControl),
+ bindings::NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO => Ok(MsgFunction::GetStaticInfo),
+ bindings::NV_VGPU_MSG_EVENT_GSP_INIT_DONE => Ok(MsgFunction::GspInitDone),
+ bindings::NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER => {
+ Ok(MsgFunction::GspRunCpuSequencer)
+ }
+ bindings::NV_VGPU_MSG_EVENT_POST_EVENT => Ok(MsgFunction::PostEvent),
+ bindings::NV_VGPU_MSG_EVENT_RC_TRIGGERED => Ok(MsgFunction::RcTriggered),
+ bindings::NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED => Ok(MsgFunction::MmuFaultQueued),
+ bindings::NV_VGPU_MSG_EVENT_OS_ERROR_LOG => Ok(MsgFunction::OsErrorLog),
+ bindings::NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD => Ok(MsgFunction::GspPostNoCat),
+ bindings::NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE => Ok(MsgFunction::GspLockdownNotice),
+ bindings::NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT => Ok(MsgFunction::UcodeLibOsPrint),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+impl From<MsgFunction> for u32 {
+ fn from(value: MsgFunction) -> Self {
+ // CAST: `MsgFunction` is `repr(u32)` and can thus be cast losslessly.
+ value as u32
+ }
+}
+
+/// Sequencer buffer opcode for GSP sequencer commands.
+#[derive(Copy, Clone, Debug, PartialEq)]
+#[repr(u32)]
+pub(crate) enum SeqBufOpcode {
+ // Core operation opcodes
+ CoreReset = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET,
+ CoreResume = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+ CoreStart = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START,
+ CoreWaitForHalt = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+
+ // Delay opcode
+ DelayUs = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US,
+
+ // Register operation opcodes
+ RegModify = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+ RegPoll = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL,
+ RegStore = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE,
+ RegWrite = r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE,
+}
+
+impl fmt::Display for SeqBufOpcode {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ SeqBufOpcode::CoreReset => write!(f, "CORE_RESET"),
+ SeqBufOpcode::CoreResume => write!(f, "CORE_RESUME"),
+ SeqBufOpcode::CoreStart => write!(f, "CORE_START"),
+ SeqBufOpcode::CoreWaitForHalt => write!(f, "CORE_WAIT_FOR_HALT"),
+ SeqBufOpcode::DelayUs => write!(f, "DELAY_US"),
+ SeqBufOpcode::RegModify => write!(f, "REG_MODIFY"),
+ SeqBufOpcode::RegPoll => write!(f, "REG_POLL"),
+ SeqBufOpcode::RegStore => write!(f, "REG_STORE"),
+ SeqBufOpcode::RegWrite => write!(f, "REG_WRITE"),
+ }
+ }
+}
+
+impl TryFrom<u32> for SeqBufOpcode {
+ type Error = kernel::error::Error;
+
+ fn try_from(value: u32) -> Result<SeqBufOpcode> {
+ match value {
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET => {
+ Ok(SeqBufOpcode::CoreReset)
+ }
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME => {
+ Ok(SeqBufOpcode::CoreResume)
+ }
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START => {
+ Ok(SeqBufOpcode::CoreStart)
+ }
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT => {
+ Ok(SeqBufOpcode::CoreWaitForHalt)
+ }
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US => Ok(SeqBufOpcode::DelayUs),
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY => {
+ Ok(SeqBufOpcode::RegModify)
+ }
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL => Ok(SeqBufOpcode::RegPoll),
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE => Ok(SeqBufOpcode::RegStore),
+ r570_144::GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE => Ok(SeqBufOpcode::RegWrite),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
+impl From<SeqBufOpcode> for u32 {
+ fn from(value: SeqBufOpcode) -> Self {
+ // CAST: `SeqBufOpcode` is `repr(u32)` and can thus be cast losslessly.
+ value as u32
+ }
+}
+
+/// Wrapper for GSP sequencer register write payload.
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub(crate) struct RegWritePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_WRITE);
+
+impl RegWritePayload {
+ /// Returns the register address.
+ pub(crate) fn addr(&self) -> u32 {
+ self.0.addr
+ }
+
+ /// Returns the value to write.
+ pub(crate) fn val(&self) -> u32 {
+ self.0.val
+ }
+}
+
+// SAFETY: This struct only contains integer types for which all bit patterns are valid.
+unsafe impl FromBytes for RegWritePayload {}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for RegWritePayload {}
+
+/// Wrapper for GSP sequencer register modify payload.
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub(crate) struct RegModifyPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_MODIFY);
+
+impl RegModifyPayload {
+ /// Returns the register address.
+ pub(crate) fn addr(&self) -> u32 {
+ self.0.addr
+ }
+
+ /// Returns the mask to apply.
+ pub(crate) fn mask(&self) -> u32 {
+ self.0.mask
+ }
+
+ /// Returns the value to write.
+ pub(crate) fn val(&self) -> u32 {
+ self.0.val
+ }
+}
+
+// SAFETY: This struct only contains integer types for which all bit patterns are valid.
+unsafe impl FromBytes for RegModifyPayload {}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for RegModifyPayload {}
+
+/// Wrapper for GSP sequencer register poll payload.
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub(crate) struct RegPollPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_POLL);
+
+impl RegPollPayload {
+ /// Returns the register address.
+ pub(crate) fn addr(&self) -> u32 {
+ self.0.addr
+ }
+
+ /// Returns the mask to apply.
+ pub(crate) fn mask(&self) -> u32 {
+ self.0.mask
+ }
+
+ /// Returns the expected value.
+ pub(crate) fn val(&self) -> u32 {
+ self.0.val
+ }
+
+ /// Returns the timeout in microseconds.
+ pub(crate) fn timeout(&self) -> u32 {
+ self.0.timeout
+ }
+}
+
+// SAFETY: This struct only contains integer types for which all bit patterns are valid.
+unsafe impl FromBytes for RegPollPayload {}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for RegPollPayload {}
+
+/// Wrapper for GSP sequencer delay payload.
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub(crate) struct DelayUsPayload(r570_144::GSP_SEQ_BUF_PAYLOAD_DELAY_US);
+
+impl DelayUsPayload {
+ /// Returns the delay value in microseconds.
+ pub(crate) fn val(&self) -> u32 {
+ self.0.val
+ }
+}
+
+// SAFETY: This struct only contains integer types for which all bit patterns are valid.
+unsafe impl FromBytes for DelayUsPayload {}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for DelayUsPayload {}
+
+/// Wrapper for GSP sequencer register store payload.
+#[repr(transparent)]
+#[derive(Copy, Clone)]
+pub(crate) struct RegStorePayload(r570_144::GSP_SEQ_BUF_PAYLOAD_REG_STORE);
+
+impl RegStorePayload {
+ /// Returns the register address.
+ pub(crate) fn addr(&self) -> u32 {
+ self.0.addr
+ }
+
+ /// Returns the storage index.
+ #[allow(unused)]
+ pub(crate) fn index(&self) -> u32 {
+ self.0.index
+ }
+}
+
+// SAFETY: This struct only contains integer types for which all bit patterns are valid.
+unsafe impl FromBytes for RegStorePayload {}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for RegStorePayload {}
+
+/// Wrapper for GSP sequencer buffer command.
+#[repr(transparent)]
+pub(crate) struct SequencerBufferCmd(r570_144::GSP_SEQUENCER_BUFFER_CMD);
+
+impl SequencerBufferCmd {
+ /// Returns the opcode as a `SeqBufOpcode` enum, or error if invalid.
+ pub(crate) fn opcode(&self) -> Result<SeqBufOpcode> {
+ self.0.opCode.try_into()
+ }
+
+ /// Returns the register write payload by value.
+ ///
+ /// Returns an error if the opcode is not `SeqBufOpcode::RegWrite`.
+ pub(crate) fn reg_write_payload(&self) -> Result<RegWritePayload> {
+ if self.opcode()? != SeqBufOpcode::RegWrite {
+ return Err(EINVAL);
+ }
+ // SAFETY: Opcode is verified to be `RegWrite`, so union contains valid `RegWritePayload`.
+ let payload_bytes = unsafe {
+ core::slice::from_raw_parts(
+ core::ptr::addr_of!(self.0.payload.regWrite).cast::<u8>(),
+ core::mem::size_of::<RegWritePayload>(),
+ )
+ };
+ Ok(*RegWritePayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
+ }
+
+ /// Returns the register modify payload by value.
+ ///
+ /// Returns an error if the opcode is not `SeqBufOpcode::RegModify`.
+ pub(crate) fn reg_modify_payload(&self) -> Result<RegModifyPayload> {
+ if self.opcode()? != SeqBufOpcode::RegModify {
+ return Err(EINVAL);
+ }
+ // SAFETY: Opcode is verified to be `RegModify`, so union contains valid `RegModifyPayload`.
+ let payload_bytes = unsafe {
+ core::slice::from_raw_parts(
+ core::ptr::addr_of!(self.0.payload.regModify).cast::<u8>(),
+ core::mem::size_of::<RegModifyPayload>(),
+ )
+ };
+ Ok(*RegModifyPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
+ }
+
+ /// Returns the register poll payload by value.
+ ///
+ /// Returns an error if the opcode is not `SeqBufOpcode::RegPoll`.
+ pub(crate) fn reg_poll_payload(&self) -> Result<RegPollPayload> {
+ if self.opcode()? != SeqBufOpcode::RegPoll {
+ return Err(EINVAL);
+ }
+ // SAFETY: Opcode is verified to be `RegPoll`, so union contains valid `RegPollPayload`.
+ let payload_bytes = unsafe {
+ core::slice::from_raw_parts(
+ core::ptr::addr_of!(self.0.payload.regPoll).cast::<u8>(),
+ core::mem::size_of::<RegPollPayload>(),
+ )
+ };
+ Ok(*RegPollPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
+ }
+
+ /// Returns the delay payload by value.
+ ///
+ /// Returns an error if the opcode is not `SeqBufOpcode::DelayUs`.
+ pub(crate) fn delay_us_payload(&self) -> Result<DelayUsPayload> {
+ if self.opcode()? != SeqBufOpcode::DelayUs {
+ return Err(EINVAL);
+ }
+ // SAFETY: Opcode is verified to be `DelayUs`, so union contains valid `DelayUsPayload`.
+ let payload_bytes = unsafe {
+ core::slice::from_raw_parts(
+ core::ptr::addr_of!(self.0.payload.delayUs).cast::<u8>(),
+ core::mem::size_of::<DelayUsPayload>(),
+ )
+ };
+ Ok(*DelayUsPayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
+ }
+
+ /// Returns the register store payload by value.
+ ///
+ /// Returns an error if the opcode is not `SeqBufOpcode::RegStore`.
+ pub(crate) fn reg_store_payload(&self) -> Result<RegStorePayload> {
+ if self.opcode()? != SeqBufOpcode::RegStore {
+ return Err(EINVAL);
+ }
+ // SAFETY: Opcode is verified to be `RegStore`, so union contains valid `RegStorePayload`.
+ let payload_bytes = unsafe {
+ core::slice::from_raw_parts(
+ core::ptr::addr_of!(self.0.payload.regStore).cast::<u8>(),
+ core::mem::size_of::<RegStorePayload>(),
+ )
+ };
+ Ok(*RegStorePayload::from_bytes(payload_bytes).ok_or(EINVAL)?)
+ }
+}
+
+// SAFETY: This struct only contains integer types for which all bit patterns are valid.
+unsafe impl FromBytes for SequencerBufferCmd {}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for SequencerBufferCmd {}
+
+/// Wrapper for GSP run CPU sequencer RPC.
+#[repr(transparent)]
+pub(crate) struct RunCpuSequencer(r570_144::rpc_run_cpu_sequencer_v17_00);
+
+impl RunCpuSequencer {
+ /// Returns the command index.
+ pub(crate) fn cmd_index(&self) -> u32 {
+ self.0.cmdIndex
+ }
+}
+
+// SAFETY: This struct only contains integer types for which all bit patterns are valid.
+unsafe impl FromBytes for RunCpuSequencer {}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for RunCpuSequencer {}
+
+/// Struct containing the arguments required to pass a memory buffer to the GSP
+/// for use during initialisation.
+///
+/// The GSP only understands 4K pages (GSP_PAGE_SIZE), so even if the kernel is
+/// configured for a larger page size (e.g. 64K pages), we need to give
+/// the GSP an array of 4K pages. Since we only create physically contiguous
+/// buffers the math to calculate the addresses is simple.
+///
+/// The buffers must be a multiple of GSP_PAGE_SIZE. GSP-RM also currently
+/// ignores the @kind field for LOGINIT, LOGINTR, and LOGRM, but expects the
+/// buffers to be physically contiguous anyway.
+///
+/// The memory allocated for the arguments must remain until the GSP sends the
+/// init_done RPC.
+#[repr(transparent)]
+pub(crate) struct LibosMemoryRegionInitArgument(bindings::LibosMemoryRegionInitArgument);
+
+// SAFETY: Padding is explicit and does not contain uninitialized data.
+unsafe impl AsBytes for LibosMemoryRegionInitArgument {}
+
+// SAFETY: This struct only contains integer types for which all bit patterns
+// are valid.
+unsafe impl FromBytes for LibosMemoryRegionInitArgument {}
+
+impl LibosMemoryRegionInitArgument {
+ pub(crate) fn new<A: AsBytes + FromBytes>(
+ name: &'static str,
+ obj: &CoherentAllocation<A>,
+ ) -> Self {
+ /// Generates the `ID8` identifier required for some GSP objects.
+ fn id8(name: &str) -> u64 {
+ let mut bytes = [0u8; core::mem::size_of::<u64>()];
+
+ for (c, b) in name.bytes().rev().zip(&mut bytes) {
+ *b = c;
+ }
+
+ u64::from_ne_bytes(bytes)
+ }
+
+ Self(bindings::LibosMemoryRegionInitArgument {
+ id8: id8(name),
+ pa: obj.dma_handle(),
+ size: num::usize_as_u64(obj.size()),
+ kind: num::u32_into_u8::<
+ { bindings::LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS },
+ >(),
+ loc: num::u32_into_u8::<
+ { bindings::LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM },
+ >(),
+ ..Default::default()
+ })
+ }
+}
+
+/// TX header for setting up a message queue with the GSP.
+#[repr(transparent)]
+pub(crate) struct MsgqTxHeader(bindings::msgqTxHeader);
+
+impl MsgqTxHeader {
+ /// Create a new TX queue header.
+ ///
+ /// # Arguments
+ ///
+ /// * `msgq_size` - Total size of the message queue structure, in bytes.
+ /// * `rx_hdr_offset` - Offset, in bytes, of the start of the RX header in the message queue
+ /// structure.
+ /// * `msg_count` - Number of messages that can be sent, i.e. the number of memory pages
+ /// allocated for the message queue in the message queue structure.
+ pub(crate) fn new(msgq_size: u32, rx_hdr_offset: u32, msg_count: u32) -> Self {
+ Self(bindings::msgqTxHeader {
+ version: 0,
+ size: msgq_size,
+ msgSize: num::usize_into_u32::<GSP_PAGE_SIZE>(),
+ msgCount: msg_count,
+ writePtr: 0,
+ flags: 1,
+ rxHdrOff: rx_hdr_offset,
+ entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(),
+ })
+ }
+
+ /// Returns the value of the write pointer for this queue.
+ pub(crate) fn write_ptr(&self) -> u32 {
+ let ptr = core::ptr::from_ref(&self.0.writePtr);
+
+ // SAFETY: `ptr` is a valid pointer to a `u32`.
+ unsafe { ptr.read_volatile() }
+ }
+
+ /// Sets the value of the write pointer for this queue.
+ pub(crate) fn set_write_ptr(&mut self, val: u32) {
+ let ptr = core::ptr::from_mut(&mut self.0.writePtr);
+
+ // SAFETY: `ptr` is a valid pointer to a `u32`.
+ unsafe { ptr.write_volatile(val) }
+ }
+}
+
+// SAFETY: Padding is explicit and does not contain uninitialized data.
+unsafe impl AsBytes for MsgqTxHeader {}
+
+/// RX header for setting up a message queue with the GSP.
+#[repr(transparent)]
+pub(crate) struct MsgqRxHeader(bindings::msgqRxHeader);
+
+/// Header for the message RX queue.
+impl MsgqRxHeader {
+ /// Creates a new RX queue header.
+ pub(crate) fn new() -> Self {
+ Self(Default::default())
+ }
+
+ /// Returns the value of the read pointer for this queue.
+ pub(crate) fn read_ptr(&self) -> u32 {
+ let ptr = core::ptr::from_ref(&self.0.readPtr);
+
+ // SAFETY: `ptr` is a valid pointer to a `u32`.
+ unsafe { ptr.read_volatile() }
+ }
+
+ /// Sets the value of the read pointer for this queue.
+ pub(crate) fn set_read_ptr(&mut self, val: u32) {
+ let ptr = core::ptr::from_mut(&mut self.0.readPtr);
+
+ // SAFETY: `ptr` is a valid pointer to a `u32`.
+ unsafe { ptr.write_volatile(val) }
+ }
+}
+
+// SAFETY: Padding is explicit and does not contain uninitialized data.
+unsafe impl AsBytes for MsgqRxHeader {}
+
+bitfield! {
+ struct MsgHeaderVersion(u32) {
+ 31:24 major as u8;
+ 23:16 minor as u8;
+ }
+}
+
+impl MsgHeaderVersion {
+ const MAJOR_TOT: u8 = 3;
+ const MINOR_TOT: u8 = 0;
+
+ fn new() -> Self {
+ Self::default()
+ .set_major(Self::MAJOR_TOT)
+ .set_minor(Self::MINOR_TOT)
+ }
+}
+
+impl bindings::rpc_message_header_v {
+ fn init(cmd_size: usize, function: MsgFunction) -> impl Init<Self, Error> {
+ type RpcMessageHeader = bindings::rpc_message_header_v;
+
+ try_init!(RpcMessageHeader {
+ header_version: MsgHeaderVersion::new().into(),
+ signature: bindings::NV_VGPU_MSG_SIGNATURE_VALID,
+ function: function.into(),
+ length: size_of::<Self>()
+ .checked_add(cmd_size)
+ .ok_or(EOVERFLOW)
+ .and_then(|v| v.try_into().map_err(|_| EINVAL))?,
+ rpc_result: 0xffffffff,
+ rpc_result_private: 0xffffffff,
+ ..Zeroable::init_zeroed()
+ })
+ }
+}
+
+// SAFETY: We can't derive the Zeroable trait for this binding because the
+// procedural macro doesn't support the syntax used by bindgen to create the
+// __IncompleteArrayField types. So instead we implement it here, which is safe
+// because these are explicitly padded structures only containing types for
+// which any bit pattern, including all zeros, is valid.
+unsafe impl Zeroable for bindings::rpc_message_header_v {}
+
+/// GSP Message Element.
+///
+/// This is essentially a message header expected to be followed by the message data.
+#[repr(transparent)]
+pub(crate) struct GspMsgElement {
+ inner: bindings::GSP_MSG_QUEUE_ELEMENT,
+}
+
+impl GspMsgElement {
+ /// Creates a new message element.
+ ///
+ /// # Arguments
+ ///
+ /// * `sequence` - Sequence number of the message.
+ /// * `cmd_size` - Size of the command (not including the message element), in bytes.
+ /// * `function` - Function of the message.
+ #[allow(non_snake_case)]
+ pub(crate) fn init(
+ sequence: u32,
+ cmd_size: usize,
+ function: MsgFunction,
+ ) -> impl Init<Self, Error> {
+ type RpcMessageHeader = bindings::rpc_message_header_v;
+ type InnerGspMsgElement = bindings::GSP_MSG_QUEUE_ELEMENT;
+ let init_inner = try_init!(InnerGspMsgElement {
+ seqNum: sequence,
+ elemCount: size_of::<Self>()
+ .checked_add(cmd_size)
+ .ok_or(EOVERFLOW)?
+ .div_ceil(GSP_PAGE_SIZE)
+ .try_into()
+ .map_err(|_| EOVERFLOW)?,
+ rpc <- RpcMessageHeader::init(cmd_size, function),
+ ..Zeroable::init_zeroed()
+ });
+
+ try_init!(GspMsgElement {
+ inner <- init_inner,
+ })
+ }
+
+ /// Sets the checksum of this message.
+ ///
+ /// Since the header is also part of the checksum, this is usually called after the whole
+ /// message has been written to the shared memory area.
+ pub(crate) fn set_checksum(&mut self, checksum: u32) {
+ self.inner.checkSum = checksum;
+ }
+
+ /// Returns the total length of the message.
+ pub(crate) fn length(&self) -> usize {
+ // `rpc.length` includes the length of the GspRpcHeader but not the message header.
+ size_of::<Self>() - size_of::<bindings::rpc_message_header_v>()
+ + num::u32_as_usize(self.inner.rpc.length)
+ }
+
+ // Returns the sequence number of the message.
+ pub(crate) fn sequence(&self) -> u32 {
+ self.inner.rpc.sequence
+ }
+
+ // Returns the function of the message, if it is valid, or the invalid function number as an
+ // error.
+ pub(crate) fn function(&self) -> Result<MsgFunction, u32> {
+ self.inner
+ .rpc
+ .function
+ .try_into()
+ .map_err(|_| self.inner.rpc.function)
+ }
+
+ // Returns the number of elements (i.e. memory pages) used by this message.
+ pub(crate) fn element_count(&self) -> u32 {
+ self.inner.elemCount
+ }
+}
+
+// SAFETY: Padding is explicit and does not contain uninitialized data.
+unsafe impl AsBytes for GspMsgElement {}
+
+// SAFETY: This struct only contains integer types for which all bit patterns
+// are valid.
+unsafe impl FromBytes for GspMsgElement {}
+
+/// Arguments for GSP startup.
+#[repr(transparent)]
+pub(crate) struct GspArgumentsCached(bindings::GSP_ARGUMENTS_CACHED);
+
+impl GspArgumentsCached {
+ /// Creates the arguments for starting the GSP up using `cmdq` as its command queue.
+ pub(crate) fn new(cmdq: &Cmdq) -> Self {
+ Self(bindings::GSP_ARGUMENTS_CACHED {
+ messageQueueInitArguments: MessageQueueInitArguments::new(cmdq).0,
+ bDmemStack: 1,
+ ..Default::default()
+ })
+ }
+}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for GspArgumentsCached {}
+
+// SAFETY: This struct only contains integer types for which all bit patterns
+// are valid.
+unsafe impl FromBytes for GspArgumentsCached {}
+
+/// Init arguments for the message queue.
+#[repr(transparent)]
+struct MessageQueueInitArguments(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS);
+
+impl MessageQueueInitArguments {
+ /// Creates a new init arguments structure for `cmdq`.
+ fn new(cmdq: &Cmdq) -> Self {
+ Self(bindings::MESSAGE_QUEUE_INIT_ARGUMENTS {
+ sharedMemPhysAddr: cmdq.dma_handle(),
+ pageTableEntryCount: num::usize_into_u32::<{ Cmdq::NUM_PTES }>(),
+ cmdQueueOffset: num::usize_as_u64(Cmdq::CMDQ_OFFSET),
+ statQueueOffset: num::usize_as_u64(Cmdq::STATQ_OFFSET),
+ ..Default::default()
+ })
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/fw/commands.rs b/drivers/gpu/nova-core/gsp/fw/commands.rs
new file mode 100644
index 000000000000..21be44199693
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw/commands.rs
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::prelude::*;
+use kernel::transmute::{AsBytes, FromBytes};
+use kernel::{device, pci};
+
+use crate::gsp::GSP_PAGE_SIZE;
+
+use super::bindings;
+
+/// Payload of the `GspSetSystemInfo` command.
+#[repr(transparent)]
+pub(crate) struct GspSetSystemInfo {
+ inner: bindings::GspSystemInfo,
+}
+static_assert!(size_of::<GspSetSystemInfo>() < GSP_PAGE_SIZE);
+
+impl GspSetSystemInfo {
+ /// Returns an in-place initializer for the `GspSetSystemInfo` command.
+ #[allow(non_snake_case)]
+ pub(crate) fn init<'a>(dev: &'a pci::Device<device::Bound>) -> impl Init<Self, Error> + 'a {
+ type InnerGspSystemInfo = bindings::GspSystemInfo;
+ let init_inner = try_init!(InnerGspSystemInfo {
+ gpuPhysAddr: dev.resource_start(0)?,
+ gpuPhysFbAddr: dev.resource_start(1)?,
+ gpuPhysInstAddr: dev.resource_start(3)?,
+ nvDomainBusDeviceFunc: u64::from(dev.dev_id()),
+
+ // Using TASK_SIZE in r535_gsp_rpc_set_system_info() seems wrong because
+ // TASK_SIZE is per-task. That's probably a design issue in GSP-RM though.
+ maxUserVa: (1 << 47) - 4096,
+ pciConfigMirrorBase: 0x088000,
+ pciConfigMirrorSize: 0x001000,
+
+ PCIDeviceID: (u32::from(dev.device_id()) << 16) | u32::from(dev.vendor_id().as_raw()),
+ PCISubDeviceID: (u32::from(dev.subsystem_device_id()) << 16)
+ | u32::from(dev.subsystem_vendor_id()),
+ PCIRevisionID: u32::from(dev.revision_id()),
+ bIsPrimary: 0,
+ bPreserveVideoMemoryAllocations: 0,
+ ..Zeroable::init_zeroed()
+ });
+
+ try_init!(GspSetSystemInfo {
+ inner <- init_inner,
+ })
+ }
+}
+
+// SAFETY: These structs don't meet the no-padding requirements of AsBytes but
+// that is not a problem because they are not used outside the kernel.
+unsafe impl AsBytes for GspSetSystemInfo {}
+
+// SAFETY: These structs don't meet the no-padding requirements of FromBytes but
+// that is not a problem because they are not used outside the kernel.
+unsafe impl FromBytes for GspSetSystemInfo {}
+
+#[repr(transparent)]
+pub(crate) struct PackedRegistryEntry(bindings::PACKED_REGISTRY_ENTRY);
+
+impl PackedRegistryEntry {
+ pub(crate) fn new(offset: u32, value: u32) -> Self {
+ Self({
+ bindings::PACKED_REGISTRY_ENTRY {
+ nameOffset: offset,
+
+ // We only support DWORD types for now. Support for other types
+ // will come later if required.
+ type_: bindings::REGISTRY_TABLE_ENTRY_TYPE_DWORD as u8,
+ __bindgen_padding_0: Default::default(),
+ data: value,
+ length: 0,
+ }
+ })
+ }
+}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for PackedRegistryEntry {}
+
+/// Payload of the `SetRegistry` command.
+#[repr(transparent)]
+pub(crate) struct PackedRegistryTable {
+ inner: bindings::PACKED_REGISTRY_TABLE,
+}
+
+impl PackedRegistryTable {
+ #[allow(non_snake_case)]
+ pub(crate) fn init(num_entries: u32, size: u32) -> impl Init<Self> {
+ type InnerPackedRegistryTable = bindings::PACKED_REGISTRY_TABLE;
+ let init_inner = init!(InnerPackedRegistryTable {
+ numEntries: num_entries,
+ size,
+ entries: Default::default()
+ });
+
+ init!(PackedRegistryTable { inner <- init_inner })
+ }
+}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for PackedRegistryTable {}
+
+// SAFETY: This struct only contains integer types for which all bit patterns
+// are valid.
+unsafe impl FromBytes for PackedRegistryTable {}
+
+/// Payload of the `GetGspStaticInfo` command and message.
+#[repr(transparent)]
+pub(crate) struct GspStaticConfigInfo(bindings::GspStaticConfigInfo_t);
+
+impl GspStaticConfigInfo {
+ /// Returns a bytes array containing the (hopefully) zero-terminated name of this GPU.
+ pub(crate) fn gpu_name_str(&self) -> [u8; 64] {
+ self.0.gpuNameString
+ }
+}
+
+// SAFETY: Padding is explicit and will not contain uninitialized data.
+unsafe impl AsBytes for GspStaticConfigInfo {}
+
+// SAFETY: This struct only contains integer types for which all bit patterns
+// are valid.
+unsafe impl FromBytes for GspStaticConfigInfo {}
+
+// SAFETY: This struct only contains integer types and fixed-size arrays for which
+// all bit patterns are valid.
+unsafe impl Zeroable for GspStaticConfigInfo {}
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144.rs b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
new file mode 100644
index 000000000000..048234d1a9d1
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144.rs
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Firmware bindings.
+//!
+//! Imports the generated bindings by `bindgen`.
+//!
+//! This module may not be directly used. Please abstract or re-export the needed symbols in the
+//! parent module instead.
+
+#![cfg_attr(test, allow(deref_nullptr))]
+#![cfg_attr(test, allow(unaligned_references))]
+#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
+#![allow(
+ dead_code,
+ clippy::all,
+ clippy::undocumented_unsafe_blocks,
+ clippy::ptr_as_ptr,
+ clippy::ref_as_ptr,
+ missing_docs,
+ non_camel_case_types,
+ non_upper_case_globals,
+ non_snake_case,
+ improper_ctypes,
+ unreachable_pub,
+ unsafe_op_in_unsafe_fn
+)]
+use kernel::{
+ ffi,
+ prelude::Zeroable, //
+};
+include!("r570_144/bindings.rs");
diff --git a/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
new file mode 100644
index 000000000000..5bcfbcd1ad22
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/fw/r570_144/bindings.rs
@@ -0,0 +1,951 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#[repr(C)]
+#[derive(Default)]
+pub struct __IncompleteArrayField<T>(::core::marker::PhantomData<T>, [T; 0]);
+impl<T> __IncompleteArrayField<T> {
+ #[inline]
+ pub const fn new() -> Self {
+ __IncompleteArrayField(::core::marker::PhantomData, [])
+ }
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ self as *const _ as *const T
+ }
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ self as *mut _ as *mut T
+ }
+ #[inline]
+ pub unsafe fn as_slice(&self, len: usize) -> &[T] {
+ ::core::slice::from_raw_parts(self.as_ptr(), len)
+ }
+ #[inline]
+ pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
+ ::core::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
+ }
+}
+impl<T> ::core::fmt::Debug for __IncompleteArrayField<T> {
+ fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
+ fmt.write_str("__IncompleteArrayField")
+ }
+}
+pub const NV_VGPU_MSG_SIGNATURE_VALID: u32 = 1129337430;
+pub const GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS2: u32 = 0;
+pub const GSP_FW_HEAP_PARAM_OS_SIZE_LIBOS3_BAREMETAL: u32 = 23068672;
+pub const GSP_FW_HEAP_PARAM_BASE_RM_SIZE_TU10X: u32 = 8388608;
+pub const GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB: u32 = 98304;
+pub const GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE: u32 = 100663296;
+pub const GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MIN_MB: u32 = 64;
+pub const GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS2_MAX_MB: u32 = 256;
+pub const GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MIN_MB: u32 = 88;
+pub const GSP_FW_HEAP_SIZE_OVERRIDE_LIBOS3_BAREMETAL_MAX_MB: u32 = 280;
+pub const GSP_FW_WPR_META_REVISION: u32 = 1;
+pub const GSP_FW_WPR_META_MAGIC: i64 = -2577556379034558285;
+pub const REGISTRY_TABLE_ENTRY_TYPE_DWORD: u32 = 1;
+pub type __u8 = ffi::c_uchar;
+pub type __u16 = ffi::c_ushort;
+pub type __u32 = ffi::c_uint;
+pub type __u64 = ffi::c_ulonglong;
+pub type u8_ = __u8;
+pub type u16_ = __u16;
+pub type u32_ = __u32;
+pub type u64_ = __u64;
+pub const NV_VGPU_MSG_FUNCTION_NOP: _bindgen_ty_2 = 0;
+pub const NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO: _bindgen_ty_2 = 1;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_ROOT: _bindgen_ty_2 = 2;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_DEVICE: _bindgen_ty_2 = 3;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY: _bindgen_ty_2 = 4;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_CTX_DMA: _bindgen_ty_2 = 5;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_CHANNEL_DMA: _bindgen_ty_2 = 6;
+pub const NV_VGPU_MSG_FUNCTION_MAP_MEMORY: _bindgen_ty_2 = 7;
+pub const NV_VGPU_MSG_FUNCTION_BIND_CTX_DMA: _bindgen_ty_2 = 8;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_OBJECT: _bindgen_ty_2 = 9;
+pub const NV_VGPU_MSG_FUNCTION_FREE: _bindgen_ty_2 = 10;
+pub const NV_VGPU_MSG_FUNCTION_LOG: _bindgen_ty_2 = 11;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_VIDMEM: _bindgen_ty_2 = 12;
+pub const NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY: _bindgen_ty_2 = 13;
+pub const NV_VGPU_MSG_FUNCTION_MAP_MEMORY_DMA: _bindgen_ty_2 = 14;
+pub const NV_VGPU_MSG_FUNCTION_UNMAP_MEMORY_DMA: _bindgen_ty_2 = 15;
+pub const NV_VGPU_MSG_FUNCTION_GET_EDID: _bindgen_ty_2 = 16;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_DISP_CHANNEL: _bindgen_ty_2 = 17;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_DISP_OBJECT: _bindgen_ty_2 = 18;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_SUBDEVICE: _bindgen_ty_2 = 19;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_DYNAMIC_MEMORY: _bindgen_ty_2 = 20;
+pub const NV_VGPU_MSG_FUNCTION_DUP_OBJECT: _bindgen_ty_2 = 21;
+pub const NV_VGPU_MSG_FUNCTION_IDLE_CHANNELS: _bindgen_ty_2 = 22;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_EVENT: _bindgen_ty_2 = 23;
+pub const NV_VGPU_MSG_FUNCTION_SEND_EVENT: _bindgen_ty_2 = 24;
+pub const NV_VGPU_MSG_FUNCTION_REMAPPER_CONTROL: _bindgen_ty_2 = 25;
+pub const NV_VGPU_MSG_FUNCTION_DMA_CONTROL: _bindgen_ty_2 = 26;
+pub const NV_VGPU_MSG_FUNCTION_DMA_FILL_PTE_MEM: _bindgen_ty_2 = 27;
+pub const NV_VGPU_MSG_FUNCTION_MANAGE_HW_RESOURCE: _bindgen_ty_2 = 28;
+pub const NV_VGPU_MSG_FUNCTION_BIND_ARBITRARY_CTX_DMA: _bindgen_ty_2 = 29;
+pub const NV_VGPU_MSG_FUNCTION_CREATE_FB_SEGMENT: _bindgen_ty_2 = 30;
+pub const NV_VGPU_MSG_FUNCTION_DESTROY_FB_SEGMENT: _bindgen_ty_2 = 31;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_SHARE_DEVICE: _bindgen_ty_2 = 32;
+pub const NV_VGPU_MSG_FUNCTION_DEFERRED_API_CONTROL: _bindgen_ty_2 = 33;
+pub const NV_VGPU_MSG_FUNCTION_REMOVE_DEFERRED_API: _bindgen_ty_2 = 34;
+pub const NV_VGPU_MSG_FUNCTION_SIM_ESCAPE_READ: _bindgen_ty_2 = 35;
+pub const NV_VGPU_MSG_FUNCTION_SIM_ESCAPE_WRITE: _bindgen_ty_2 = 36;
+pub const NV_VGPU_MSG_FUNCTION_SIM_MANAGE_DISPLAY_CONTEXT_DMA: _bindgen_ty_2 = 37;
+pub const NV_VGPU_MSG_FUNCTION_FREE_VIDMEM_VIRT: _bindgen_ty_2 = 38;
+pub const NV_VGPU_MSG_FUNCTION_PERF_GET_PSTATE_INFO: _bindgen_ty_2 = 39;
+pub const NV_VGPU_MSG_FUNCTION_PERF_GET_PERFMON_SAMPLE: _bindgen_ty_2 = 40;
+pub const NV_VGPU_MSG_FUNCTION_PERF_GET_VIRTUAL_PSTATE_INFO: _bindgen_ty_2 = 41;
+pub const NV_VGPU_MSG_FUNCTION_PERF_GET_LEVEL_INFO: _bindgen_ty_2 = 42;
+pub const NV_VGPU_MSG_FUNCTION_MAP_SEMA_MEMORY: _bindgen_ty_2 = 43;
+pub const NV_VGPU_MSG_FUNCTION_UNMAP_SEMA_MEMORY: _bindgen_ty_2 = 44;
+pub const NV_VGPU_MSG_FUNCTION_SET_SURFACE_PROPERTIES: _bindgen_ty_2 = 45;
+pub const NV_VGPU_MSG_FUNCTION_CLEANUP_SURFACE: _bindgen_ty_2 = 46;
+pub const NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER: _bindgen_ty_2 = 47;
+pub const NV_VGPU_MSG_FUNCTION_TDR_SET_TIMEOUT_STATE: _bindgen_ty_2 = 48;
+pub const NV_VGPU_MSG_FUNCTION_SWITCH_TO_VGA: _bindgen_ty_2 = 49;
+pub const NV_VGPU_MSG_FUNCTION_GPU_EXEC_REG_OPS: _bindgen_ty_2 = 50;
+pub const NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO: _bindgen_ty_2 = 51;
+pub const NV_VGPU_MSG_FUNCTION_ALLOC_VIRTMEM: _bindgen_ty_2 = 52;
+pub const NV_VGPU_MSG_FUNCTION_UPDATE_PDE_2: _bindgen_ty_2 = 53;
+pub const NV_VGPU_MSG_FUNCTION_SET_PAGE_DIRECTORY: _bindgen_ty_2 = 54;
+pub const NV_VGPU_MSG_FUNCTION_GET_STATIC_PSTATE_INFO: _bindgen_ty_2 = 55;
+pub const NV_VGPU_MSG_FUNCTION_TRANSLATE_GUEST_GPU_PTES: _bindgen_ty_2 = 56;
+pub const NV_VGPU_MSG_FUNCTION_RESERVED_57: _bindgen_ty_2 = 57;
+pub const NV_VGPU_MSG_FUNCTION_RESET_CURRENT_GR_CONTEXT: _bindgen_ty_2 = 58;
+pub const NV_VGPU_MSG_FUNCTION_SET_SEMA_MEM_VALIDATION_STATE: _bindgen_ty_2 = 59;
+pub const NV_VGPU_MSG_FUNCTION_GET_ENGINE_UTILIZATION: _bindgen_ty_2 = 60;
+pub const NV_VGPU_MSG_FUNCTION_UPDATE_GPU_PDES: _bindgen_ty_2 = 61;
+pub const NV_VGPU_MSG_FUNCTION_GET_ENCODER_CAPACITY: _bindgen_ty_2 = 62;
+pub const NV_VGPU_MSG_FUNCTION_VGPU_PF_REG_READ32: _bindgen_ty_2 = 63;
+pub const NV_VGPU_MSG_FUNCTION_SET_GUEST_SYSTEM_INFO_EXT: _bindgen_ty_2 = 64;
+pub const NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO: _bindgen_ty_2 = 65;
+pub const NV_VGPU_MSG_FUNCTION_RMFS_INIT: _bindgen_ty_2 = 66;
+pub const NV_VGPU_MSG_FUNCTION_RMFS_CLOSE_QUEUE: _bindgen_ty_2 = 67;
+pub const NV_VGPU_MSG_FUNCTION_RMFS_CLEANUP: _bindgen_ty_2 = 68;
+pub const NV_VGPU_MSG_FUNCTION_RMFS_TEST: _bindgen_ty_2 = 69;
+pub const NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE: _bindgen_ty_2 = 70;
+pub const NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD: _bindgen_ty_2 = 71;
+pub const NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO: _bindgen_ty_2 = 72;
+pub const NV_VGPU_MSG_FUNCTION_SET_REGISTRY: _bindgen_ty_2 = 73;
+pub const NV_VGPU_MSG_FUNCTION_GSP_INIT_POST_OBJGPU: _bindgen_ty_2 = 74;
+pub const NV_VGPU_MSG_FUNCTION_SUBDEV_EVENT_SET_NOTIFICATION: _bindgen_ty_2 = 75;
+pub const NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL: _bindgen_ty_2 = 76;
+pub const NV_VGPU_MSG_FUNCTION_GET_STATIC_INFO2: _bindgen_ty_2 = 77;
+pub const NV_VGPU_MSG_FUNCTION_DUMP_PROTOBUF_COMPONENT: _bindgen_ty_2 = 78;
+pub const NV_VGPU_MSG_FUNCTION_UNSET_PAGE_DIRECTORY: _bindgen_ty_2 = 79;
+pub const NV_VGPU_MSG_FUNCTION_GET_CONSOLIDATED_STATIC_INFO: _bindgen_ty_2 = 80;
+pub const NV_VGPU_MSG_FUNCTION_GMMU_REGISTER_FAULT_BUFFER: _bindgen_ty_2 = 81;
+pub const NV_VGPU_MSG_FUNCTION_GMMU_UNREGISTER_FAULT_BUFFER: _bindgen_ty_2 = 82;
+pub const NV_VGPU_MSG_FUNCTION_GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER: _bindgen_ty_2 = 83;
+pub const NV_VGPU_MSG_FUNCTION_GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER: _bindgen_ty_2 = 84;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_VGPU_FB_USAGE: _bindgen_ty_2 = 85;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_NVFBC_SW_SESSION_UPDATE_INFO: _bindgen_ty_2 = 86;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_NVENC_SW_SESSION_UPDATE_INFO: _bindgen_ty_2 = 87;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_RESET_CHANNEL: _bindgen_ty_2 = 88;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_RESET_ISOLATED_CHANNEL: _bindgen_ty_2 = 89;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_HANDLE_VF_PRI_FAULT: _bindgen_ty_2 = 90;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CLK_GET_EXTENDED_INFO: _bindgen_ty_2 = 91;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_BOOST: _bindgen_ty_2 = 92;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_VPSTATES_GET_CONTROL: _bindgen_ty_2 = 93;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_ZBC_CLEAR_TABLE: _bindgen_ty_2 = 94;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_COLOR_CLEAR: _bindgen_ty_2 = 95;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_DEPTH_CLEAR: _bindgen_ty_2 = 96;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_SCHEDULE: _bindgen_ty_2 = 97;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_TIMESLICE: _bindgen_ty_2 = 98;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_PREEMPT: _bindgen_ty_2 = 99;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FIFO_DISABLE_CHANNELS: _bindgen_ty_2 = 100;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_TSG_INTERLEAVE_LEVEL: _bindgen_ty_2 = 101;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_CHANNEL_INTERLEAVE_LEVEL: _bindgen_ty_2 = 102;
+pub const NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC: _bindgen_ty_2 = 103;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS_V2: _bindgen_ty_2 = 104;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_AES_ENCRYPT: _bindgen_ty_2 = 105;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_SESSION_KEY: _bindgen_ty_2 = 106;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CIPHER_SESSION_KEY_STATUS: _bindgen_ty_2 = 107;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES: _bindgen_ty_2 = 108;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_READ_ALL_SM_ERROR_STATES: _bindgen_ty_2 = 109;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_EXCEPTION_MASK: _bindgen_ty_2 = 110;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_PROMOTE_CTX: _bindgen_ty_2 = 111;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_CTXSW_PREEMPTION_BIND: _bindgen_ty_2 = 112;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_SET_CTXSW_PREEMPTION_MODE: _bindgen_ty_2 = 113;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_CTXSW_ZCULL_BIND: _bindgen_ty_2 = 114;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_INITIALIZE_CTX: _bindgen_ty_2 = 115;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES: _bindgen_ty_2 = 116;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FIFO_CLEAR_FAULTED_BIT: _bindgen_ty_2 = 117;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_LATEST_ECC_ADDRESSES: _bindgen_ty_2 = 118;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_MC_SERVICE_INTERRUPTS: _bindgen_ty_2 = 119;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DMA_SET_DEFAULT_VASPACE: _bindgen_ty_2 = 120;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_CE_PCE_MASK: _bindgen_ty_2 = 121;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_ZBC_CLEAR_TABLE_ENTRY: _bindgen_ty_2 = 122;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_NVLINK_PEER_ID_MASK: _bindgen_ty_2 = 123;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_NVLINK_STATUS: _bindgen_ty_2 = 124;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS: _bindgen_ty_2 = 125;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_P2P_CAPS_MATRIX: _bindgen_ty_2 = 126;
+pub const NV_VGPU_MSG_FUNCTION_RESERVED_0: _bindgen_ty_2 = 127;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_PM_AREA_SMPC: _bindgen_ty_2 = 128;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_HWPM_LEGACY: _bindgen_ty_2 = 129;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_B0CC_EXEC_REG_OPS: _bindgen_ty_2 = 130;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_BIND_PM_RESOURCES: _bindgen_ty_2 = 131;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SUSPEND_CONTEXT: _bindgen_ty_2 = 132;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_RESUME_CONTEXT: _bindgen_ty_2 = 133;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_EXEC_REG_OPS: _bindgen_ty_2 = 134;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_MMU_DEBUG: _bindgen_ty_2 = 135;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_READ_SINGLE_SM_ERROR_STATE: _bindgen_ty_2 = 136;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE: _bindgen_ty_2 = 137;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_ERRBAR_DEBUG: _bindgen_ty_2 = 138;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE: _bindgen_ty_2 = 139;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_ALLOC_PMA_STREAM: _bindgen_ty_2 = 140;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_PMA_STREAM_UPDATE_GET_PUT: _bindgen_ty_2 = 141;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FB_GET_INFO_V2: _bindgen_ty_2 = 142;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FIFO_SET_CHANNEL_PROPERTIES: _bindgen_ty_2 = 143;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_GET_CTX_BUFFER_INFO: _bindgen_ty_2 = 144;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_KGR_GET_CTX_BUFFER_PTES: _bindgen_ty_2 = 145;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_EVICT_CTX: _bindgen_ty_2 = 146;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FB_GET_FS_INFO: _bindgen_ty_2 = 147;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GRMGR_GET_GR_FS_INFO: _bindgen_ty_2 = 148;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_STOP_CHANNEL: _bindgen_ty_2 = 149;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_PC_SAMPLING_MODE: _bindgen_ty_2 = 150;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_RATED_TDP_GET_STATUS: _bindgen_ty_2 = 151;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_RATED_TDP_SET_CONTROL: _bindgen_ty_2 = 152;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FREE_PMA_STREAM: _bindgen_ty_2 = 153;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_TIMER_SET_GR_TICK_FREQ: _bindgen_ty_2 = 154;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB: _bindgen_ty_2 = 155;
+pub const NV_VGPU_MSG_FUNCTION_GET_CONSOLIDATED_GR_STATIC_INFO: _bindgen_ty_2 = 156;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP: _bindgen_ty_2 = 157;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_GET_TPC_PARTITION_MODE: _bindgen_ty_2 = 158;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GR_SET_TPC_PARTITION_MODE: _bindgen_ty_2 = 159;
+pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_ALLOCATE: _bindgen_ty_2 = 160;
+pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_DESTROY: _bindgen_ty_2 = 161;
+pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_MAP: _bindgen_ty_2 = 162;
+pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_UNMAP: _bindgen_ty_2 = 163;
+pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_PUSH_STREAM: _bindgen_ty_2 = 164;
+pub const NV_VGPU_MSG_FUNCTION_UVM_PAGING_CHANNEL_SET_HANDLES: _bindgen_ty_2 = 165;
+pub const NV_VGPU_MSG_FUNCTION_UVM_METHOD_STREAM_GUEST_PAGES_OPERATION: _bindgen_ty_2 = 166;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_QUIESCE_PMA_CHANNEL: _bindgen_ty_2 = 167;
+pub const NV_VGPU_MSG_FUNCTION_DCE_RM_INIT: _bindgen_ty_2 = 168;
+pub const NV_VGPU_MSG_FUNCTION_REGISTER_VIRTUAL_EVENT_BUFFER: _bindgen_ty_2 = 169;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_EVENT_BUFFER_UPDATE_GET: _bindgen_ty_2 = 170;
+pub const NV_VGPU_MSG_FUNCTION_GET_PLCABLE_ADDRESS_KIND: _bindgen_ty_2 = 171;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_PERF_LIMITS_SET_STATUS_V2: _bindgen_ty_2 = 172;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM: _bindgen_ty_2 = 173;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_MMU_DEBUG_MODE: _bindgen_ty_2 = 174;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS: _bindgen_ty_2 = 175;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FLCN_GET_CTX_BUFFER_SIZE: _bindgen_ty_2 = 176;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FLCN_GET_CTX_BUFFER_INFO: _bindgen_ty_2 = 177;
+pub const NV_VGPU_MSG_FUNCTION_DISABLE_CHANNELS: _bindgen_ty_2 = 178;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FABRIC_MEMORY_DESCRIBE: _bindgen_ty_2 = 179;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FABRIC_MEM_STATS: _bindgen_ty_2 = 180;
+pub const NV_VGPU_MSG_FUNCTION_SAVE_HIBERNATION_DATA: _bindgen_ty_2 = 181;
+pub const NV_VGPU_MSG_FUNCTION_RESTORE_HIBERNATION_DATA: _bindgen_ty_2 = 182;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED: _bindgen_ty_2 = 183;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_CREATE: _bindgen_ty_2 = 184;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_DELETE: _bindgen_ty_2 = 185;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN: _bindgen_ty_2 = 186;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX: _bindgen_ty_2 = 187;
+pub const NV_VGPU_MSG_FUNCTION_PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION: _bindgen_ty_2 =
+ 188;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK:
+ _bindgen_ty_2 = 189;
+pub const NV_VGPU_MSG_FUNCTION_SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER: _bindgen_ty_2 = 190;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_P2P_CAPS: _bindgen_ty_2 = 191;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_BUS_SET_P2P_MAPPING: _bindgen_ty_2 = 192;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_BUS_UNSET_P2P_MAPPING: _bindgen_ty_2 = 193;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK: _bindgen_ty_2 = 194;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_MIGRATABLE_OPS: _bindgen_ty_2 = 195;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_TOTAL_HS_CREDITS: _bindgen_ty_2 = 196;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GET_HS_CREDITS: _bindgen_ty_2 = 197;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_HS_CREDITS: _bindgen_ty_2 = 198;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_PM_AREA_PC_SAMPLER: _bindgen_ty_2 = 199;
+pub const NV_VGPU_MSG_FUNCTION_INVALIDATE_TLB: _bindgen_ty_2 = 200;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_QUERY_ECC_STATUS: _bindgen_ty_2 = 201;
+pub const NV_VGPU_MSG_FUNCTION_ECC_NOTIFIER_WRITE_ACK: _bindgen_ty_2 = 202;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_GET_MODE_MMU_DEBUG: _bindgen_ty_2 = 203;
+pub const NV_VGPU_MSG_FUNCTION_RM_API_CONTROL: _bindgen_ty_2 = 204;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_INTERNAL_GPU_START_FABRIC_PROBE: _bindgen_ty_2 = 205;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_NVLINK_GET_INBAND_RECEIVED_DATA: _bindgen_ty_2 = 206;
+pub const NV_VGPU_MSG_FUNCTION_GET_STATIC_DATA: _bindgen_ty_2 = 207;
+pub const NV_VGPU_MSG_FUNCTION_RESERVED_208: _bindgen_ty_2 = 208;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_GPU_GET_INFO_V2: _bindgen_ty_2 = 209;
+pub const NV_VGPU_MSG_FUNCTION_GET_BRAND_CAPS: _bindgen_ty_2 = 210;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_NVLINK_INBAND_SEND_DATA: _bindgen_ty_2 = 211;
+pub const NV_VGPU_MSG_FUNCTION_UPDATE_GPM_GUEST_BUFFER_INFO: _bindgen_ty_2 = 212;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_INTERNAL_CONTROL_GSP_TRACE: _bindgen_ty_2 = 213;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SET_ZBC_STENCIL_CLEAR: _bindgen_ty_2 = 214;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_VGPU_HEAP_STATS: _bindgen_ty_2 = 215;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_SUBDEVICE_GET_LIBOS_HEAP_STATS: _bindgen_ty_2 = 216;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_SET_MODE_MMU_GCC_DEBUG: _bindgen_ty_2 = 217;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_DBG_GET_MODE_MMU_GCC_DEBUG: _bindgen_ty_2 = 218;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_HES: _bindgen_ty_2 = 219;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_RELEASE_HES: _bindgen_ty_2 = 220;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_RESERVE_CCU_PROF: _bindgen_ty_2 = 221;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_RELEASE_CCU_PROF: _bindgen_ty_2 = 222;
+pub const NV_VGPU_MSG_FUNCTION_RESERVED: _bindgen_ty_2 = 223;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_GET_CHIPLET_HS_CREDIT_POOL: _bindgen_ty_2 = 224;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_CMD_GET_HS_CREDITS_MAPPING: _bindgen_ty_2 = 225;
+pub const NV_VGPU_MSG_FUNCTION_CTRL_EXEC_PARTITIONS_EXPORT: _bindgen_ty_2 = 226;
+pub const NV_VGPU_MSG_FUNCTION_NUM_FUNCTIONS: _bindgen_ty_2 = 227;
+pub type _bindgen_ty_2 = ffi::c_uint;
+pub const NV_VGPU_MSG_EVENT_FIRST_EVENT: _bindgen_ty_3 = 4096;
+pub const NV_VGPU_MSG_EVENT_GSP_INIT_DONE: _bindgen_ty_3 = 4097;
+pub const NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER: _bindgen_ty_3 = 4098;
+pub const NV_VGPU_MSG_EVENT_POST_EVENT: _bindgen_ty_3 = 4099;
+pub const NV_VGPU_MSG_EVENT_RC_TRIGGERED: _bindgen_ty_3 = 4100;
+pub const NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED: _bindgen_ty_3 = 4101;
+pub const NV_VGPU_MSG_EVENT_OS_ERROR_LOG: _bindgen_ty_3 = 4102;
+pub const NV_VGPU_MSG_EVENT_RG_LINE_INTR: _bindgen_ty_3 = 4103;
+pub const NV_VGPU_MSG_EVENT_GPUACCT_PERFMON_UTIL_SAMPLES: _bindgen_ty_3 = 4104;
+pub const NV_VGPU_MSG_EVENT_SIM_READ: _bindgen_ty_3 = 4105;
+pub const NV_VGPU_MSG_EVENT_SIM_WRITE: _bindgen_ty_3 = 4106;
+pub const NV_VGPU_MSG_EVENT_SEMAPHORE_SCHEDULE_CALLBACK: _bindgen_ty_3 = 4107;
+pub const NV_VGPU_MSG_EVENT_UCODE_LIBOS_PRINT: _bindgen_ty_3 = 4108;
+pub const NV_VGPU_MSG_EVENT_VGPU_GSP_PLUGIN_TRIGGERED: _bindgen_ty_3 = 4109;
+pub const NV_VGPU_MSG_EVENT_PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK: _bindgen_ty_3 = 4110;
+pub const NV_VGPU_MSG_EVENT_PERF_BRIDGELESS_INFO_UPDATE: _bindgen_ty_3 = 4111;
+pub const NV_VGPU_MSG_EVENT_VGPU_CONFIG: _bindgen_ty_3 = 4112;
+pub const NV_VGPU_MSG_EVENT_DISPLAY_MODESET: _bindgen_ty_3 = 4113;
+pub const NV_VGPU_MSG_EVENT_EXTDEV_INTR_SERVICE: _bindgen_ty_3 = 4114;
+pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_256: _bindgen_ty_3 = 4115;
+pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_512: _bindgen_ty_3 = 4116;
+pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_1024: _bindgen_ty_3 = 4117;
+pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_2048: _bindgen_ty_3 = 4118;
+pub const NV_VGPU_MSG_EVENT_NVLINK_INBAND_RECEIVED_DATA_4096: _bindgen_ty_3 = 4119;
+pub const NV_VGPU_MSG_EVENT_TIMED_SEMAPHORE_RELEASE: _bindgen_ty_3 = 4120;
+pub const NV_VGPU_MSG_EVENT_NVLINK_IS_GPU_DEGRADED: _bindgen_ty_3 = 4121;
+pub const NV_VGPU_MSG_EVENT_PFM_REQ_HNDLR_STATE_SYNC_CALLBACK: _bindgen_ty_3 = 4122;
+pub const NV_VGPU_MSG_EVENT_NVLINK_FAULT_UP: _bindgen_ty_3 = 4123;
+pub const NV_VGPU_MSG_EVENT_GSP_LOCKDOWN_NOTICE: _bindgen_ty_3 = 4124;
+pub const NV_VGPU_MSG_EVENT_MIG_CI_CONFIG_UPDATE: _bindgen_ty_3 = 4125;
+pub const NV_VGPU_MSG_EVENT_UPDATE_GSP_TRACE: _bindgen_ty_3 = 4126;
+pub const NV_VGPU_MSG_EVENT_NVLINK_FATAL_ERROR_RECOVERY: _bindgen_ty_3 = 4127;
+pub const NV_VGPU_MSG_EVENT_GSP_POST_NOCAT_RECORD: _bindgen_ty_3 = 4128;
+pub const NV_VGPU_MSG_EVENT_FECS_ERROR: _bindgen_ty_3 = 4129;
+pub const NV_VGPU_MSG_EVENT_RECOVERY_ACTION: _bindgen_ty_3 = 4130;
+pub const NV_VGPU_MSG_EVENT_NUM_EVENTS: _bindgen_ty_3 = 4131;
+pub type _bindgen_ty_3 = ffi::c_uint;
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+ pub totalVFs: u32_,
+ pub firstVfOffset: u32_,
+ pub vfFeatureMask: u32_,
+ pub FirstVFBar0Address: u64_,
+ pub FirstVFBar1Address: u64_,
+ pub FirstVFBar2Address: u64_,
+ pub bar0Size: u64_,
+ pub bar1Size: u64_,
+ pub bar2Size: u64_,
+ pub b64bitBar0: u8_,
+ pub b64bitBar1: u8_,
+ pub b64bitBar2: u8_,
+ pub bSriovEnabled: u8_,
+ pub bSriovHeavyEnabled: u8_,
+ pub bEmulateVFBar0TlbInvalidationRegister: u8_,
+ pub bClientRmAllocatedCtxBuffer: u8_,
+ pub bNonPowerOf2ChannelCountSupported: u8_,
+ pub bVfResizableBAR1Supported: u8_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+ pub BoardID: u32_,
+ pub chipSKU: [ffi::c_char; 9usize],
+ pub chipSKUMod: [ffi::c_char; 5usize],
+ pub skuConfigVersion: u32_,
+ pub project: [ffi::c_char; 5usize],
+ pub projectSKU: [ffi::c_char; 5usize],
+ pub CDP: [ffi::c_char; 6usize],
+ pub projectSKUMod: [ffi::c_char; 2usize],
+ pub businessCycle: u32_,
+}
+pub type NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG = [u8_; 17usize];
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+ pub base: u64_,
+ pub limit: u64_,
+ pub reserved: u64_,
+ pub performance: u32_,
+ pub supportCompressed: u8_,
+ pub supportISO: u8_,
+ pub bProtected: u8_,
+ pub blackList: NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+ pub numFBRegions: u32_,
+ pub fbRegion: [NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO; 16usize],
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ pub index: u32_,
+ pub flags: u32_,
+ pub length: u32_,
+ pub data: [u8_; 256usize],
+}
+impl Default for NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct DOD_METHOD_DATA {
+ pub status: u32_,
+ pub acpiIdListLen: u32_,
+ pub acpiIdList: [u32_; 16usize],
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct JT_METHOD_DATA {
+ pub status: u32_,
+ pub jtCaps: u32_,
+ pub jtRevId: u16_,
+ pub bSBIOSCaps: u8_,
+ pub __bindgen_padding_0: u8,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct MUX_METHOD_DATA_ELEMENT {
+ pub acpiId: u32_,
+ pub mode: u32_,
+ pub status: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct MUX_METHOD_DATA {
+ pub tableLen: u32_,
+ pub acpiIdMuxModeTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
+ pub acpiIdMuxPartTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
+ pub acpiIdMuxStateTable: [MUX_METHOD_DATA_ELEMENT; 16usize],
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct CAPS_METHOD_DATA {
+ pub status: u32_,
+ pub optimusCaps: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct ACPI_METHOD_DATA {
+ pub bValid: u8_,
+ pub __bindgen_padding_0: [u8; 3usize],
+ pub dodMethodData: DOD_METHOD_DATA,
+ pub jtMethodData: JT_METHOD_DATA,
+ pub muxMethodData: MUX_METHOD_DATA,
+ pub capsMethodData: CAPS_METHOD_DATA,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS {
+ pub headIndex: u32_,
+ pub maxHResolution: u32_,
+ pub maxVResolution: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS {
+ pub numHeads: u32_,
+ pub maxNumHeads: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct BUSINFO {
+ pub deviceID: u16_,
+ pub vendorID: u16_,
+ pub subdeviceID: u16_,
+ pub subvendorID: u16_,
+ pub revisionID: u8_,
+ pub __bindgen_padding_0: u8,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GSP_VF_INFO {
+ pub totalVFs: u32_,
+ pub firstVFOffset: u32_,
+ pub FirstVFBar0Address: u64_,
+ pub FirstVFBar1Address: u64_,
+ pub FirstVFBar2Address: u64_,
+ pub b64bitBar0: u8_,
+ pub b64bitBar1: u8_,
+ pub b64bitBar2: u8_,
+ pub __bindgen_padding_0: [u8; 5usize],
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GSP_PCIE_CONFIG_REG {
+ pub linkCap: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct EcidManufacturingInfo {
+ pub ecidLow: u32_,
+ pub ecidHigh: u32_,
+ pub ecidExtended: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct FW_WPR_LAYOUT_OFFSET {
+ pub nonWprHeapOffset: u64_,
+ pub frtsOffset: u64_,
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct GspStaticConfigInfo_t {
+ pub grCapsBits: [u8_; 23usize],
+ pub gidInfo: NV2080_CTRL_GPU_GET_GID_INFO_PARAMS,
+ pub SKUInfo: NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS,
+ pub fbRegionInfoParams: NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS,
+ pub sriovCaps: NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS,
+ pub sriovMaxGfid: u32_,
+ pub engineCaps: [u32_; 3usize],
+ pub poisonFuseEnabled: u8_,
+ pub fb_length: u64_,
+ pub fbio_mask: u64_,
+ pub fb_bus_width: u32_,
+ pub fb_ram_type: u32_,
+ pub fbp_mask: u64_,
+ pub l2_cache_size: u32_,
+ pub gpuNameString: [u8_; 64usize],
+ pub gpuShortNameString: [u8_; 64usize],
+ pub gpuNameString_Unicode: [u16_; 64usize],
+ pub bGpuInternalSku: u8_,
+ pub bIsQuadroGeneric: u8_,
+ pub bIsQuadroAd: u8_,
+ pub bIsNvidiaNvs: u8_,
+ pub bIsVgx: u8_,
+ pub bGeforceSmb: u8_,
+ pub bIsTitan: u8_,
+ pub bIsTesla: u8_,
+ pub bIsMobile: u8_,
+ pub bIsGc6Rtd3Allowed: u8_,
+ pub bIsGc8Rtd3Allowed: u8_,
+ pub bIsGcOffRtd3Allowed: u8_,
+ pub bIsGcoffLegacyAllowed: u8_,
+ pub bIsMigSupported: u8_,
+ pub RTD3GC6TotalBoardPower: u16_,
+ pub RTD3GC6PerstDelay: u16_,
+ pub bar1PdeBase: u64_,
+ pub bar2PdeBase: u64_,
+ pub bVbiosValid: u8_,
+ pub vbiosSubVendor: u32_,
+ pub vbiosSubDevice: u32_,
+ pub bPageRetirementSupported: u8_,
+ pub bSplitVasBetweenServerClientRm: u8_,
+ pub bClRootportNeedsNosnoopWAR: u8_,
+ pub displaylessMaxHeads: VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS,
+ pub displaylessMaxResolution: VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS,
+ pub displaylessMaxPixels: u64_,
+ pub hInternalClient: u32_,
+ pub hInternalDevice: u32_,
+ pub hInternalSubdevice: u32_,
+ pub bSelfHostedMode: u8_,
+ pub bAtsSupported: u8_,
+ pub bIsGpuUefi: u8_,
+ pub bIsEfiInit: u8_,
+ pub ecidInfo: [EcidManufacturingInfo; 2usize],
+ pub fwWprLayoutOffset: FW_WPR_LAYOUT_OFFSET,
+}
+impl Default for GspStaticConfigInfo_t {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GspSystemInfo {
+ pub gpuPhysAddr: u64_,
+ pub gpuPhysFbAddr: u64_,
+ pub gpuPhysInstAddr: u64_,
+ pub gpuPhysIoAddr: u64_,
+ pub nvDomainBusDeviceFunc: u64_,
+ pub simAccessBufPhysAddr: u64_,
+ pub notifyOpSharedSurfacePhysAddr: u64_,
+ pub pcieAtomicsOpMask: u64_,
+ pub consoleMemSize: u64_,
+ pub maxUserVa: u64_,
+ pub pciConfigMirrorBase: u32_,
+ pub pciConfigMirrorSize: u32_,
+ pub PCIDeviceID: u32_,
+ pub PCISubDeviceID: u32_,
+ pub PCIRevisionID: u32_,
+ pub pcieAtomicsCplDeviceCapMask: u32_,
+ pub oorArch: u8_,
+ pub __bindgen_padding_0: [u8; 7usize],
+ pub clPdbProperties: u64_,
+ pub Chipset: u32_,
+ pub bGpuBehindBridge: u8_,
+ pub bFlrSupported: u8_,
+ pub b64bBar0Supported: u8_,
+ pub bMnocAvailable: u8_,
+ pub chipsetL1ssEnable: u32_,
+ pub bUpstreamL0sUnsupported: u8_,
+ pub bUpstreamL1Unsupported: u8_,
+ pub bUpstreamL1PorSupported: u8_,
+ pub bUpstreamL1PorMobileOnly: u8_,
+ pub bSystemHasMux: u8_,
+ pub upstreamAddressValid: u8_,
+ pub FHBBusInfo: BUSINFO,
+ pub chipsetIDInfo: BUSINFO,
+ pub __bindgen_padding_1: [u8; 2usize],
+ pub acpiMethodData: ACPI_METHOD_DATA,
+ pub hypervisorType: u32_,
+ pub bIsPassthru: u8_,
+ pub __bindgen_padding_2: [u8; 7usize],
+ pub sysTimerOffsetNs: u64_,
+ pub gspVFInfo: GSP_VF_INFO,
+ pub bIsPrimary: u8_,
+ pub isGridBuild: u8_,
+ pub __bindgen_padding_3: [u8; 2usize],
+ pub pcieConfigReg: GSP_PCIE_CONFIG_REG,
+ pub gridBuildCsp: u32_,
+ pub bPreserveVideoMemoryAllocations: u8_,
+ pub bTdrEventSupported: u8_,
+ pub bFeatureStretchVblankCapable: u8_,
+ pub bEnableDynamicGranularityPageArrays: u8_,
+ pub bClockBoostSupported: u8_,
+ pub bRouteDispIntrsToCPU: u8_,
+ pub __bindgen_padding_4: [u8; 6usize],
+ pub hostPageSize: u64_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct MESSAGE_QUEUE_INIT_ARGUMENTS {
+ pub sharedMemPhysAddr: u64_,
+ pub pageTableEntryCount: u32_,
+ pub __bindgen_padding_0: [u8; 4usize],
+ pub cmdQueueOffset: u64_,
+ pub statQueueOffset: u64_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GSP_SR_INIT_ARGUMENTS {
+ pub oldLevel: u32_,
+ pub flags: u32_,
+ pub bInPMTransition: u8_,
+ pub __bindgen_padding_0: [u8; 3usize],
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GSP_ARGUMENTS_CACHED {
+ pub messageQueueInitArguments: MESSAGE_QUEUE_INIT_ARGUMENTS,
+ pub srInitArguments: GSP_SR_INIT_ARGUMENTS,
+ pub gpuInstance: u32_,
+ pub bDmemStack: u8_,
+ pub __bindgen_padding_0: [u8; 7usize],
+ pub profilerArgs: GSP_ARGUMENTS_CACHED__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GSP_ARGUMENTS_CACHED__bindgen_ty_1 {
+ pub pa: u64_,
+ pub size: u64_,
+}
+#[repr(C)]
+#[derive(Copy, Clone, Zeroable)]
+pub union rpc_message_rpc_union_field_v03_00 {
+ pub spare: u32_,
+ pub cpuRmGfid: u32_,
+}
+impl Default for rpc_message_rpc_union_field_v03_00 {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+pub type rpc_message_rpc_union_field_v = rpc_message_rpc_union_field_v03_00;
+#[repr(C)]
+pub struct rpc_message_header_v03_00 {
+ pub header_version: u32_,
+ pub signature: u32_,
+ pub length: u32_,
+ pub function: u32_,
+ pub rpc_result: u32_,
+ pub rpc_result_private: u32_,
+ pub sequence: u32_,
+ pub u: rpc_message_rpc_union_field_v,
+ pub rpc_message_data: __IncompleteArrayField<u8_>,
+}
+impl Default for rpc_message_header_v03_00 {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+pub type rpc_message_header_v = rpc_message_header_v03_00;
+#[repr(C)]
+#[derive(Copy, Clone, Zeroable)]
+pub struct GspFwWprMeta {
+ pub magic: u64_,
+ pub revision: u64_,
+ pub sysmemAddrOfRadix3Elf: u64_,
+ pub sizeOfRadix3Elf: u64_,
+ pub sysmemAddrOfBootloader: u64_,
+ pub sizeOfBootloader: u64_,
+ pub bootloaderCodeOffset: u64_,
+ pub bootloaderDataOffset: u64_,
+ pub bootloaderManifestOffset: u64_,
+ pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_1,
+ pub gspFwRsvdStart: u64_,
+ pub nonWprHeapOffset: u64_,
+ pub nonWprHeapSize: u64_,
+ pub gspFwWprStart: u64_,
+ pub gspFwHeapOffset: u64_,
+ pub gspFwHeapSize: u64_,
+ pub gspFwOffset: u64_,
+ pub bootBinOffset: u64_,
+ pub frtsOffset: u64_,
+ pub frtsSize: u64_,
+ pub gspFwWprEnd: u64_,
+ pub fbSize: u64_,
+ pub vgaWorkspaceOffset: u64_,
+ pub vgaWorkspaceSize: u64_,
+ pub bootCount: u64_,
+ pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_2,
+ pub gspFwHeapVfPartitionCount: u8_,
+ pub flags: u8_,
+ pub padding: [u8_; 2usize],
+ pub pmuReservedSize: u32_,
+ pub verified: u64_,
+}
+#[repr(C)]
+#[derive(Copy, Clone, Zeroable)]
+pub union GspFwWprMeta__bindgen_ty_1 {
+ pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_1__bindgen_ty_1,
+ pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_1__bindgen_ty_2,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_1 {
+ pub sysmemAddrOfSignature: u64_,
+ pub sizeOfSignature: u64_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GspFwWprMeta__bindgen_ty_1__bindgen_ty_2 {
+ pub gspFwHeapFreeListWprOffset: u32_,
+ pub unused0: u32_,
+ pub unused1: u64_,
+}
+impl Default for GspFwWprMeta__bindgen_ty_1 {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+#[repr(C)]
+#[derive(Copy, Clone, Zeroable)]
+pub union GspFwWprMeta__bindgen_ty_2 {
+ pub __bindgen_anon_1: GspFwWprMeta__bindgen_ty_2__bindgen_ty_1,
+ pub __bindgen_anon_2: GspFwWprMeta__bindgen_ty_2__bindgen_ty_2,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_1 {
+ pub partitionRpcAddr: u64_,
+ pub partitionRpcRequestOffset: u16_,
+ pub partitionRpcReplyOffset: u16_,
+ pub elfCodeOffset: u32_,
+ pub elfDataOffset: u32_,
+ pub elfCodeSize: u32_,
+ pub elfDataSize: u32_,
+ pub lsUcodeVersion: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct GspFwWprMeta__bindgen_ty_2__bindgen_ty_2 {
+ pub partitionRpcPadding: [u32_; 4usize],
+ pub sysmemAddrOfCrashReportQueue: u64_,
+ pub sizeOfCrashReportQueue: u32_,
+ pub lsUcodeVersionPadding: [u32_; 1usize],
+}
+impl Default for GspFwWprMeta__bindgen_ty_2 {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+impl Default for GspFwWprMeta {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+pub type LibosAddress = u64_;
+pub const LibosMemoryRegionKind_LIBOS_MEMORY_REGION_NONE: LibosMemoryRegionKind = 0;
+pub const LibosMemoryRegionKind_LIBOS_MEMORY_REGION_CONTIGUOUS: LibosMemoryRegionKind = 1;
+pub const LibosMemoryRegionKind_LIBOS_MEMORY_REGION_RADIX3: LibosMemoryRegionKind = 2;
+pub type LibosMemoryRegionKind = ffi::c_uint;
+pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_NONE: LibosMemoryRegionLoc = 0;
+pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_SYSMEM: LibosMemoryRegionLoc = 1;
+pub const LibosMemoryRegionLoc_LIBOS_MEMORY_REGION_LOC_FB: LibosMemoryRegionLoc = 2;
+pub type LibosMemoryRegionLoc = ffi::c_uint;
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct LibosMemoryRegionInitArgument {
+ pub id8: LibosAddress,
+ pub pa: LibosAddress,
+ pub size: LibosAddress,
+ pub kind: u8_,
+ pub loc: u8_,
+ pub __bindgen_padding_0: [u8; 6usize],
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct PACKED_REGISTRY_ENTRY {
+ pub nameOffset: u32_,
+ pub type_: u8_,
+ pub __bindgen_padding_0: [u8; 3usize],
+ pub data: u32_,
+ pub length: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default)]
+pub struct PACKED_REGISTRY_TABLE {
+ pub size: u32_,
+ pub numEntries: u32_,
+ pub entries: __IncompleteArrayField<PACKED_REGISTRY_ENTRY>,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct msgqTxHeader {
+ pub version: u32_,
+ pub size: u32_,
+ pub msgSize: u32_,
+ pub msgCount: u32_,
+ pub writePtr: u32_,
+ pub flags: u32_,
+ pub rxHdrOff: u32_,
+ pub entryOff: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone, Zeroable)]
+pub struct msgqRxHeader {
+ pub readPtr: u32_,
+}
+#[repr(C)]
+#[repr(align(8))]
+#[derive(Zeroable)]
+pub struct GSP_MSG_QUEUE_ELEMENT {
+ pub authTagBuffer: [u8_; 16usize],
+ pub aadBuffer: [u8_; 16usize],
+ pub checkSum: u32_,
+ pub seqNum: u32_,
+ pub elemCount: u32_,
+ pub __bindgen_padding_0: [u8; 4usize],
+ pub rpc: rpc_message_header_v,
+}
+impl Default for GSP_MSG_QUEUE_ELEMENT {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+#[repr(C)]
+#[derive(Debug, Default)]
+pub struct rpc_run_cpu_sequencer_v17_00 {
+ pub bufferSizeDWord: u32_,
+ pub cmdIndex: u32_,
+ pub regSaveArea: [u32_; 8usize],
+ pub commandBuffer: __IncompleteArrayField<u32_>,
+}
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_WRITE: GSP_SEQ_BUF_OPCODE = 0;
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_MODIFY: GSP_SEQ_BUF_OPCODE = 1;
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_POLL: GSP_SEQ_BUF_OPCODE = 2;
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_DELAY_US: GSP_SEQ_BUF_OPCODE = 3;
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_REG_STORE: GSP_SEQ_BUF_OPCODE = 4;
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESET: GSP_SEQ_BUF_OPCODE = 5;
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_START: GSP_SEQ_BUF_OPCODE = 6;
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT: GSP_SEQ_BUF_OPCODE = 7;
+pub const GSP_SEQ_BUF_OPCODE_GSP_SEQ_BUF_OPCODE_CORE_RESUME: GSP_SEQ_BUF_OPCODE = 8;
+pub type GSP_SEQ_BUF_OPCODE = ffi::c_uint;
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct GSP_SEQ_BUF_PAYLOAD_REG_WRITE {
+ pub addr: u32_,
+ pub val: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct GSP_SEQ_BUF_PAYLOAD_REG_MODIFY {
+ pub addr: u32_,
+ pub mask: u32_,
+ pub val: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct GSP_SEQ_BUF_PAYLOAD_REG_POLL {
+ pub addr: u32_,
+ pub mask: u32_,
+ pub val: u32_,
+ pub timeout: u32_,
+ pub error: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct GSP_SEQ_BUF_PAYLOAD_DELAY_US {
+ pub val: u32_,
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct GSP_SEQ_BUF_PAYLOAD_REG_STORE {
+ pub addr: u32_,
+ pub index: u32_,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct GSP_SEQUENCER_BUFFER_CMD {
+ pub opCode: GSP_SEQ_BUF_OPCODE,
+ pub payload: GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1 {
+ pub regWrite: GSP_SEQ_BUF_PAYLOAD_REG_WRITE,
+ pub regModify: GSP_SEQ_BUF_PAYLOAD_REG_MODIFY,
+ pub regPoll: GSP_SEQ_BUF_PAYLOAD_REG_POLL,
+ pub delayUs: GSP_SEQ_BUF_PAYLOAD_DELAY_US,
+ pub regStore: GSP_SEQ_BUF_PAYLOAD_REG_STORE,
+}
+impl Default for GSP_SEQUENCER_BUFFER_CMD__bindgen_ty_1 {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
+impl Default for GSP_SEQUENCER_BUFFER_CMD {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+}
diff --git a/drivers/gpu/nova-core/gsp/sequencer.rs b/drivers/gpu/nova-core/gsp/sequencer.rs
new file mode 100644
index 000000000000..2d0369c49092
--- /dev/null
+++ b/drivers/gpu/nova-core/gsp/sequencer.rs
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! GSP Sequencer implementation for Pre-hopper GSP boot sequence.
+
+use core::{
+ array,
+ mem::{
+ size_of,
+ size_of_val, //
+ },
+};
+
+use kernel::{
+ device,
+ io::poll::read_poll_timeout,
+ prelude::*,
+ time::{
+ delay::fsleep,
+ Delta, //
+ },
+ transmute::FromBytes,
+ types::ARef, //
+};
+
+use crate::{
+ driver::Bar0,
+ falcon::{
+ gsp::Gsp,
+ sec2::Sec2,
+ Falcon, //
+ },
+ gsp::{
+ cmdq::{
+ Cmdq,
+ MessageFromGsp, //
+ },
+ fw,
+ },
+ num::FromSafeCast,
+ sbuffer::SBufferIter,
+};
+
+/// GSP Sequencer information containing the command sequence and data.
+struct GspSequence {
+ /// Current command index for error reporting.
+ cmd_index: u32,
+ /// Command data buffer containing the sequence of commands.
+ cmd_data: KVec<u8>,
+}
+
+impl MessageFromGsp for GspSequence {
+ const FUNCTION: fw::MsgFunction = fw::MsgFunction::GspRunCpuSequencer;
+ type InitError = Error;
+ type Message = fw::RunCpuSequencer;
+
+ fn read(
+ msg: &Self::Message,
+ sbuffer: &mut SBufferIter<array::IntoIter<&[u8], 2>>,
+ ) -> Result<Self, Self::InitError> {
+ let cmd_data = sbuffer.flush_into_kvec(GFP_KERNEL)?;
+ Ok(GspSequence {
+ cmd_index: msg.cmd_index(),
+ cmd_data,
+ })
+ }
+}
+
+const CMD_SIZE: usize = size_of::<fw::SequencerBufferCmd>();
+
+/// GSP Sequencer Command types with payload data.
+/// Commands have an opcode and an opcode-dependent struct.
+#[allow(clippy::enum_variant_names)]
+pub(crate) enum GspSeqCmd {
+ RegWrite(fw::RegWritePayload),
+ RegModify(fw::RegModifyPayload),
+ RegPoll(fw::RegPollPayload),
+ DelayUs(fw::DelayUsPayload),
+ RegStore(fw::RegStorePayload),
+ CoreReset,
+ CoreStart,
+ CoreWaitForHalt,
+ CoreResume,
+}
+
+impl GspSeqCmd {
+ /// Creates a new `GspSeqCmd` from raw data returning the command and its size in bytes.
+ pub(crate) fn new(data: &[u8], dev: &device::Device) -> Result<(Self, usize)> {
+ let fw_cmd = fw::SequencerBufferCmd::from_bytes(data).ok_or(EINVAL)?;
+ let opcode_size = core::mem::size_of::<u32>();
+
+ let (cmd, size) = match fw_cmd.opcode()? {
+ fw::SeqBufOpcode::RegWrite => {
+ let payload = fw_cmd.reg_write_payload()?;
+ let size = opcode_size + size_of_val(&payload);
+ (GspSeqCmd::RegWrite(payload), size)
+ }
+ fw::SeqBufOpcode::RegModify => {
+ let payload = fw_cmd.reg_modify_payload()?;
+ let size = opcode_size + size_of_val(&payload);
+ (GspSeqCmd::RegModify(payload), size)
+ }
+ fw::SeqBufOpcode::RegPoll => {
+ let payload = fw_cmd.reg_poll_payload()?;
+ let size = opcode_size + size_of_val(&payload);
+ (GspSeqCmd::RegPoll(payload), size)
+ }
+ fw::SeqBufOpcode::DelayUs => {
+ let payload = fw_cmd.delay_us_payload()?;
+ let size = opcode_size + size_of_val(&payload);
+ (GspSeqCmd::DelayUs(payload), size)
+ }
+ fw::SeqBufOpcode::RegStore => {
+ let payload = fw_cmd.reg_store_payload()?;
+ let size = opcode_size + size_of_val(&payload);
+ (GspSeqCmd::RegStore(payload), size)
+ }
+ fw::SeqBufOpcode::CoreReset => (GspSeqCmd::CoreReset, opcode_size),
+ fw::SeqBufOpcode::CoreStart => (GspSeqCmd::CoreStart, opcode_size),
+ fw::SeqBufOpcode::CoreWaitForHalt => (GspSeqCmd::CoreWaitForHalt, opcode_size),
+ fw::SeqBufOpcode::CoreResume => (GspSeqCmd::CoreResume, opcode_size),
+ };
+
+ if data.len() < size {
+ dev_err!(dev, "Data is not enough for command");
+ return Err(EINVAL);
+ }
+
+ Ok((cmd, size))
+ }
+}
+
+/// GSP Sequencer for executing firmware commands during boot.
+pub(crate) struct GspSequencer<'a> {
+ /// Sequencer information with command data.
+ seq_info: GspSequence,
+ /// `Bar0` for register access.
+ bar: &'a Bar0,
+ /// SEC2 falcon for core operations.
+ sec2_falcon: &'a Falcon<Sec2>,
+ /// GSP falcon for core operations.
+ gsp_falcon: &'a Falcon<Gsp>,
+ /// LibOS DMA handle address.
+ libos_dma_handle: u64,
+ /// Bootloader application version.
+ bootloader_app_version: u32,
+ /// Device for logging.
+ dev: ARef<device::Device>,
+}
+
+/// Trait for running sequencer commands.
+pub(crate) trait GspSeqCmdRunner {
+ fn run(&self, sequencer: &GspSequencer<'_>) -> Result;
+}
+
+impl GspSeqCmdRunner for fw::RegWritePayload {
+ fn run(&self, sequencer: &GspSequencer<'_>) -> Result {
+ let addr = usize::from_safe_cast(self.addr());
+
+ sequencer.bar.try_write32(self.val(), addr)
+ }
+}
+
+impl GspSeqCmdRunner for fw::RegModifyPayload {
+ fn run(&self, sequencer: &GspSequencer<'_>) -> Result {
+ let addr = usize::from_safe_cast(self.addr());
+
+ sequencer.bar.try_read32(addr).and_then(|val| {
+ sequencer
+ .bar
+ .try_write32((val & !self.mask()) | self.val(), addr)
+ })
+ }
+}
+
+impl GspSeqCmdRunner for fw::RegPollPayload {
+ fn run(&self, sequencer: &GspSequencer<'_>) -> Result {
+ let addr = usize::from_safe_cast(self.addr());
+
+ // Default timeout to 4 seconds.
+ let timeout_us = if self.timeout() == 0 {
+ 4_000_000
+ } else {
+ i64::from(self.timeout())
+ };
+
+ // First read.
+ sequencer.bar.try_read32(addr)?;
+
+ // Poll the requested register with requested timeout.
+ read_poll_timeout(
+ || sequencer.bar.try_read32(addr),
+ |current| (current & self.mask()) == self.val(),
+ Delta::ZERO,
+ Delta::from_micros(timeout_us),
+ )
+ .map(|_| ())
+ }
+}
+
+impl GspSeqCmdRunner for fw::DelayUsPayload {
+ fn run(&self, _sequencer: &GspSequencer<'_>) -> Result {
+ fsleep(Delta::from_micros(i64::from(self.val())));
+ Ok(())
+ }
+}
+
+impl GspSeqCmdRunner for fw::RegStorePayload {
+ fn run(&self, sequencer: &GspSequencer<'_>) -> Result {
+ let addr = usize::from_safe_cast(self.addr());
+
+ sequencer.bar.try_read32(addr).map(|_| ())
+ }
+}
+
+impl GspSeqCmdRunner for GspSeqCmd {
+ fn run(&self, seq: &GspSequencer<'_>) -> Result {
+ match self {
+ GspSeqCmd::RegWrite(cmd) => cmd.run(seq),
+ GspSeqCmd::RegModify(cmd) => cmd.run(seq),
+ GspSeqCmd::RegPoll(cmd) => cmd.run(seq),
+ GspSeqCmd::DelayUs(cmd) => cmd.run(seq),
+ GspSeqCmd::RegStore(cmd) => cmd.run(seq),
+ GspSeqCmd::CoreReset => {
+ seq.gsp_falcon.reset(seq.bar)?;
+ seq.gsp_falcon.dma_reset(seq.bar);
+ Ok(())
+ }
+ GspSeqCmd::CoreStart => {
+ seq.gsp_falcon.start(seq.bar)?;
+ Ok(())
+ }
+ GspSeqCmd::CoreWaitForHalt => {
+ seq.gsp_falcon.wait_till_halted(seq.bar)?;
+ Ok(())
+ }
+ GspSeqCmd::CoreResume => {
+ // At this point, 'SEC2-RTOS' has been loaded into SEC2 by the sequencer
+ // but neither SEC2-RTOS nor GSP-RM is running yet. This part of the
+ // sequencer will start both.
+
+ // Reset the GSP to prepare it for resuming.
+ seq.gsp_falcon.reset(seq.bar)?;
+
+ // Write the libOS DMA handle to GSP mailboxes.
+ seq.gsp_falcon.write_mailboxes(
+ seq.bar,
+ Some(seq.libos_dma_handle as u32),
+ Some((seq.libos_dma_handle >> 32) as u32),
+ );
+
+ // Start the SEC2 falcon which will trigger GSP-RM to resume on the GSP.
+ seq.sec2_falcon.start(seq.bar)?;
+
+ // Poll until GSP-RM reload/resume has completed (up to 2 seconds).
+ seq.gsp_falcon
+ .check_reload_completed(seq.bar, Delta::from_secs(2))?;
+
+ // Verify SEC2 completed successfully by checking its mailbox for errors.
+ let mbox0 = seq.sec2_falcon.read_mailbox0(seq.bar);
+ if mbox0 != 0 {
+ dev_err!(seq.dev, "Sequencer: sec2 errors: {:?}\n", mbox0);
+ return Err(EIO);
+ }
+
+ // Configure GSP with the bootloader version.
+ seq.gsp_falcon
+ .write_os_version(seq.bar, seq.bootloader_app_version);
+
+ // Verify the GSP's RISC-V core is active indicating successful GSP boot.
+ if !seq.gsp_falcon.is_riscv_active(seq.bar) {
+ dev_err!(seq.dev, "Sequencer: RISC-V core is not active\n");
+ return Err(EIO);
+ }
+ Ok(())
+ }
+ }
+ }
+}
+
+/// Iterator over GSP sequencer commands.
+pub(crate) struct GspSeqIter<'a> {
+ /// Command data buffer.
+ cmd_data: &'a [u8],
+ /// Current position in the buffer.
+ current_offset: usize,
+ /// Total number of commands to process.
+ total_cmds: u32,
+ /// Number of commands processed so far.
+ cmds_processed: u32,
+ /// Device for logging.
+ dev: ARef<device::Device>,
+}
+
+impl<'a> Iterator for GspSeqIter<'a> {
+ type Item = Result<GspSeqCmd>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // Stop if we've processed all commands or reached the end of data.
+ if self.cmds_processed >= self.total_cmds || self.current_offset >= self.cmd_data.len() {
+ return None;
+ }
+
+ // Check if we have enough data for opcode.
+ if self.current_offset + core::mem::size_of::<u32>() > self.cmd_data.len() {
+ return Some(Err(EIO));
+ }
+
+ let offset = self.current_offset;
+
+ // Handle command creation based on available data,
+ // zero-pad if necessary (since last command may not be full size).
+ let mut buffer = [0u8; CMD_SIZE];
+ let copy_len = if offset + CMD_SIZE <= self.cmd_data.len() {
+ CMD_SIZE
+ } else {
+ self.cmd_data.len() - offset
+ };
+ buffer[..copy_len].copy_from_slice(&self.cmd_data[offset..offset + copy_len]);
+ let cmd_result = GspSeqCmd::new(&buffer, &self.dev);
+
+ cmd_result.map_or_else(
+ |_err| {
+ dev_err!(self.dev, "Error parsing command at offset {}", offset);
+ None
+ },
+ |(cmd, size)| {
+ self.current_offset += size;
+ self.cmds_processed += 1;
+ Some(Ok(cmd))
+ },
+ )
+ }
+}
+
+impl<'a> GspSequencer<'a> {
+ fn iter(&self) -> GspSeqIter<'_> {
+ let cmd_data = &self.seq_info.cmd_data[..];
+
+ GspSeqIter {
+ cmd_data,
+ current_offset: 0,
+ total_cmds: self.seq_info.cmd_index,
+ cmds_processed: 0,
+ dev: self.dev.clone(),
+ }
+ }
+}
+
+/// Parameters for running the GSP sequencer.
+pub(crate) struct GspSequencerParams<'a> {
+ /// Bootloader application version.
+ pub(crate) bootloader_app_version: u32,
+ /// LibOS DMA handle address.
+ pub(crate) libos_dma_handle: u64,
+ /// GSP falcon for core operations.
+ pub(crate) gsp_falcon: &'a Falcon<Gsp>,
+ /// SEC2 falcon for core operations.
+ pub(crate) sec2_falcon: &'a Falcon<Sec2>,
+ /// Device for logging.
+ pub(crate) dev: ARef<device::Device>,
+ /// BAR0 for register access.
+ pub(crate) bar: &'a Bar0,
+}
+
+impl<'a> GspSequencer<'a> {
+ pub(crate) fn run(cmdq: &mut Cmdq, params: GspSequencerParams<'a>) -> Result {
+ let seq_info = loop {
+ match cmdq.receive_msg::<GspSequence>(Delta::from_secs(10)) {
+ Ok(seq_info) => break seq_info,
+ Err(ERANGE) => continue,
+ Err(e) => return Err(e),
+ }
+ };
+
+ let sequencer = GspSequencer {
+ seq_info,
+ bar: params.bar,
+ sec2_falcon: params.sec2_falcon,
+ gsp_falcon: params.gsp_falcon,
+ libos_dma_handle: params.libos_dma_handle,
+ bootloader_app_version: params.bootloader_app_version,
+ dev: params.dev,
+ };
+
+ dev_dbg!(sequencer.dev, "Running CPU Sequencer commands");
+
+ for cmd_result in sequencer.iter() {
+ match cmd_result {
+ Ok(cmd) => cmd.run(&sequencer)?,
+ Err(e) => {
+ dev_err!(
+ sequencer.dev,
+ "Error running command at index {}",
+ sequencer.seq_info.cmd_index
+ );
+ return Err(e);
+ }
+ }
+ }
+
+ dev_dbg!(
+ sequencer.dev,
+ "CPU Sequencer commands completed successfully"
+ );
+ Ok(())
+ }
+}
diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nova_core.rs
index cb2bbb30cba1..b98a1c03f13d 100644
--- a/drivers/gpu/nova-core/nova_core.rs
+++ b/drivers/gpu/nova-core/nova_core.rs
@@ -2,6 +2,9 @@
//! Nova Core GPU Driver
+#[macro_use]
+mod bitfield;
+
mod dma;
mod driver;
mod falcon;
@@ -9,7 +12,10 @@ mod fb;
mod firmware;
mod gfw;
mod gpu;
+mod gsp;
+mod num;
mod regs;
+mod sbuffer;
mod util;
mod vbios;
diff --git a/drivers/gpu/nova-core/num.rs b/drivers/gpu/nova-core/num.rs
new file mode 100644
index 000000000000..c952a834e662
--- /dev/null
+++ b/drivers/gpu/nova-core/num.rs
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Numerical helpers functions and traits.
+//!
+//! This is essentially a staging module for code to mature until it can be moved to the `kernel`
+//! crate.
+
+use kernel::{
+ macros::paste,
+ prelude::*, //
+};
+
+/// Implements safe `as` conversion functions from a given type into a series of target types.
+///
+/// These functions can be used in place of `as`, with the guarantee that they will be lossless.
+macro_rules! impl_safe_as {
+ ($from:ty as { $($into:ty),* }) => {
+ $(
+ paste! {
+ #[doc = ::core::concat!(
+ "Losslessly converts a [`",
+ ::core::stringify!($from),
+ "`] into a [`",
+ ::core::stringify!($into),
+ "`].")]
+ ///
+ /// This conversion is allowed as it is always lossless. Prefer this over the `as`
+ /// keyword to ensure no lossy casts are performed.
+ ///
+ /// This is for use from a `const` context. For non `const` use, prefer the
+ /// [`FromSafeCast`] and [`IntoSafeCast`] traits.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crate::num;
+ ///
+ #[doc = ::core::concat!(
+ "assert_eq!(num::",
+ ::core::stringify!($from),
+ "_as_",
+ ::core::stringify!($into),
+ "(1",
+ ::core::stringify!($from),
+ "), 1",
+ ::core::stringify!($into),
+ ");")]
+ /// ```
+ #[allow(unused)]
+ #[inline(always)]
+ pub(crate) const fn [<$from _as_ $into>](value: $from) -> $into {
+ kernel::static_assert!(size_of::<$into>() >= size_of::<$from>());
+
+ value as $into
+ }
+ }
+ )*
+ };
+}
+
+impl_safe_as!(u8 as { u16, u32, u64, usize });
+impl_safe_as!(u16 as { u32, u64, usize });
+impl_safe_as!(u32 as { u64, usize } );
+// `u64` and `usize` have the same size on 64-bit platforms.
+#[cfg(CONFIG_64BIT)]
+impl_safe_as!(u64 as { usize } );
+
+// A `usize` fits into a `u64` on 32 and 64-bit platforms.
+#[cfg(any(CONFIG_32BIT, CONFIG_64BIT))]
+impl_safe_as!(usize as { u64 });
+
+// A `usize` fits into a `u32` on 32-bit platforms.
+#[cfg(CONFIG_32BIT)]
+impl_safe_as!(usize as { u32 });
+
+/// Extension trait providing guaranteed lossless cast to `Self` from `T`.
+///
+/// The standard library's `From` implementations do not cover conversions that are not portable or
+/// future-proof. For instance, even though it is safe today, `From<usize>` is not implemented for
+/// [`u64`] because of the possibility to support larger-than-64bit architectures in the future.
+///
+/// The workaround is to either deal with the error handling of [`TryFrom`] for an operation that
+/// technically cannot fail, or to use the `as` keyword, which can silently strip data if the
+/// destination type is smaller than the source.
+///
+/// Both options are hardly acceptable for the kernel. It is also a much more architecture
+/// dependent environment, supporting only 32 and 64 bit architectures, with some modules
+/// explicitly depending on a specific bus width that could greatly benefit from infallible
+/// conversion operations.
+///
+/// Thus this extension trait that provides, for the architecture the kernel is built for, safe
+/// conversion between types for which such cast is lossless.
+///
+/// In other words, this trait is implemented if, for the current build target and with `t: T`, the
+/// `t as Self` operation is completely lossless.
+///
+/// Prefer this over the `as` keyword to ensure no lossy casts are performed.
+///
+/// If you need to perform a conversion in `const` context, use [`u64_as_usize`], [`u32_as_usize`],
+/// [`usize_as_u64`], etc.
+///
+/// # Examples
+///
+/// ```
+/// use crate::num::FromSafeCast;
+///
+/// assert_eq!(usize::from_safe_cast(0xf00u32), 0xf00u32 as usize);
+/// ```
+pub(crate) trait FromSafeCast<T> {
+ /// Create a `Self` from `value`. This operation is guaranteed to be lossless.
+ fn from_safe_cast(value: T) -> Self;
+}
+
+impl FromSafeCast<usize> for u64 {
+ fn from_safe_cast(value: usize) -> Self {
+ usize_as_u64(value)
+ }
+}
+
+#[cfg(CONFIG_32BIT)]
+impl FromSafeCast<usize> for u32 {
+ fn from_safe_cast(value: usize) -> Self {
+ usize_as_u32(value)
+ }
+}
+
+impl FromSafeCast<u32> for usize {
+ fn from_safe_cast(value: u32) -> Self {
+ u32_as_usize(value)
+ }
+}
+
+#[cfg(CONFIG_64BIT)]
+impl FromSafeCast<u64> for usize {
+ fn from_safe_cast(value: u64) -> Self {
+ u64_as_usize(value)
+ }
+}
+
+/// Counterpart to the [`FromSafeCast`] trait, i.e. this trait is to [`FromSafeCast`] what [`Into`]
+/// is to [`From`].
+///
+/// See the documentation of [`FromSafeCast`] for the motivation.
+///
+/// # Examples
+///
+/// ```
+/// use crate::num::IntoSafeCast;
+///
+/// assert_eq!(0xf00u32.into_safe_cast(), 0xf00u32 as usize);
+/// ```
+pub(crate) trait IntoSafeCast<T> {
+ /// Convert `self` into a `T`. This operation is guaranteed to be lossless.
+ fn into_safe_cast(self) -> T;
+}
+
+/// Reverse operation for types implementing [`FromSafeCast`].
+impl<S, T> IntoSafeCast<T> for S
+where
+ T: FromSafeCast<S>,
+{
+ fn into_safe_cast(self) -> T {
+ T::from_safe_cast(self)
+ }
+}
+
+/// Implements lossless conversion of a constant from a larger type into a smaller one.
+macro_rules! impl_const_into {
+ ($from:ty => { $($into:ty),* }) => {
+ $(
+ paste! {
+ #[doc = ::core::concat!(
+ "Performs a build-time safe conversion of a [`",
+ ::core::stringify!($from),
+ "`] constant value into a [`",
+ ::core::stringify!($into),
+ "`].")]
+ ///
+ /// This checks at compile-time that the conversion is lossless, and triggers a build
+ /// error if it isn't.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use crate::num;
+ ///
+ /// // Succeeds because the value of the source fits into the destination's type.
+ #[doc = ::core::concat!(
+ "assert_eq!(num::",
+ ::core::stringify!($from),
+ "_into_",
+ ::core::stringify!($into),
+ "::<1",
+ ::core::stringify!($from),
+ ">(), 1",
+ ::core::stringify!($into),
+ ");")]
+ /// ```
+ #[allow(unused)]
+ pub(crate) const fn [<$from _into_ $into>]<const N: $from>() -> $into {
+ // Make sure that the target type is smaller than the source one.
+ static_assert!($from::BITS >= $into::BITS);
+ // CAST: we statically enforced above that `$from` is larger than `$into`, so the
+ // `as` conversion will be lossless.
+ build_assert!(N >= $into::MIN as $from && N <= $into::MAX as $from);
+
+ N as $into
+ }
+ }
+ )*
+ };
+}
+
+impl_const_into!(usize => { u8, u16, u32 });
+impl_const_into!(u64 => { u8, u16, u32 });
+impl_const_into!(u32 => { u8, u16 });
+impl_const_into!(u16 => { u8 });
diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs
index d49fddf6a3c6..82cc6c0790e5 100644
--- a/drivers/gpu/nova-core/regs.rs
+++ b/drivers/gpu/nova-core/regs.rs
@@ -5,15 +5,30 @@
#![allow(non_camel_case_types)]
#[macro_use]
-mod macros;
+pub(crate) mod macros;
-use crate::falcon::{
- DmaTrfCmdSize, FalconCoreRev, FalconCoreRevSubversion, FalconFbifMemType, FalconFbifTarget,
- FalconModSelAlgo, FalconSecurityModel, PeregrineCoreSelect,
-};
-use crate::gpu::{Architecture, Chipset};
use kernel::prelude::*;
+use crate::{
+ falcon::{
+ DmaTrfCmdSize,
+ FalconCoreRev,
+ FalconCoreRevSubversion,
+ FalconFbifMemType,
+ FalconFbifTarget,
+ FalconModSelAlgo,
+ FalconSecurityModel,
+ PFalcon2Base,
+ PFalconBase,
+ PeregrineCoreSelect, //
+ },
+ gpu::{
+ Architecture,
+ Chipset, //
+ },
+ num::FromSafeCast,
+};
+
// PMC
register!(NV_PMC_BOOT_0 @ 0x00000000, "Basic revision information about the GPU" {
@@ -25,27 +40,59 @@ register!(NV_PMC_BOOT_0 @ 0x00000000, "Basic revision information about the GPU"
});
impl NV_PMC_BOOT_0 {
- /// Combines `architecture_0` and `architecture_1` to obtain the architecture of the chip.
- pub(crate) fn architecture(self) -> Result<Architecture> {
- Architecture::try_from(
- self.architecture_0() | (self.architecture_1() << Self::ARCHITECTURE_0.len()),
- )
+ pub(crate) fn is_older_than_fermi(self) -> bool {
+ // From https://github.com/NVIDIA/open-gpu-doc/tree/master/manuals :
+ const NV_PMC_BOOT_0_ARCHITECTURE_GF100: u8 = 0xc;
+
+ // Older chips left arch1 zeroed out. That, combined with an arch0 value that is less than
+ // GF100, means "older than Fermi".
+ self.architecture_1() == 0 && self.architecture_0() < NV_PMC_BOOT_0_ARCHITECTURE_GF100
}
+}
+register!(NV_PMC_BOOT_42 @ 0x00000a00, "Extended architecture information" {
+ 15:12 minor_revision as u8, "Minor revision of the chip";
+ 19:16 major_revision as u8, "Major revision of the chip";
+ 23:20 implementation as u8, "Implementation version of the architecture";
+ 29:24 architecture as u8 ?=> Architecture, "Architecture value";
+});
+
+impl NV_PMC_BOOT_42 {
/// Combines `architecture` and `implementation` to obtain a code unique to the chipset.
pub(crate) fn chipset(self) -> Result<Chipset> {
self.architecture()
.map(|arch| {
- ((arch as u32) << Self::IMPLEMENTATION.len()) | u32::from(self.implementation())
+ ((arch as u32) << Self::IMPLEMENTATION_RANGE.len())
+ | u32::from(self.implementation())
})
.and_then(Chipset::try_from)
}
+
+ /// Returns the raw architecture value from the register.
+ fn architecture_raw(self) -> u8 {
+ ((self.0 >> Self::ARCHITECTURE_RANGE.start()) & ((1 << Self::ARCHITECTURE_RANGE.len()) - 1))
+ as u8
+ }
+}
+
+impl kernel::fmt::Display for NV_PMC_BOOT_42 {
+ fn fmt(&self, f: &mut kernel::fmt::Formatter<'_>) -> kernel::fmt::Result {
+ write!(
+ f,
+ "boot42 = 0x{:08x} (architecture 0x{:x}, implementation 0x{:x})",
+ self.0,
+ self.architecture_raw(),
+ self.implementation()
+ )
+ }
}
// PBUS
-// TODO[REGA]: this is an array of registers.
-register!(NV_PBUS_SW_SCRATCH_0E@0x00001438 {
+register!(NV_PBUS_SW_SCRATCH @ 0x00001400[64] {});
+
+register!(NV_PBUS_SW_SCRATCH_0E_FRTS_ERR => NV_PBUS_SW_SCRATCH[0xe],
+ "scratch register 0xe used as FRTS firmware error code" {
31:16 frts_err_code as u16;
});
@@ -68,11 +115,15 @@ register!(NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE @ 0x00100ce0 {
30:30 ecc_mode_enabled as bool;
});
+register!(NV_PGSP_QUEUE_HEAD @ 0x00110c00 {
+ 31:0 address as u32;
+});
+
impl NV_PFB_PRI_MMU_LOCAL_MEMORY_RANGE {
/// Returns the usable framebuffer size, in bytes.
pub(crate) fn usable_fb_size(self) -> u64 {
let size = (u64::from(self.lower_mag()) << u64::from(self.lower_scale()))
- * kernel::sizes::SZ_1M as u64;
+ * u64::from_safe_cast(kernel::sizes::SZ_1M);
if self.ecc_mode_enabled() {
// Remove the amount of memory reserved for ECC (one per 16 units).
@@ -116,6 +167,12 @@ impl NV_PFB_PRI_MMU_WPR2_ADDR_HI {
// These scratch registers remain powered on even in a low-power state and have a designated group
// number.
+// Boot Sequence Interface (BSI) register used to determine
+// if GSP reload/resume has completed during the boot process.
+register!(NV_PGC6_BSI_SECURE_SCRATCH_14 @ 0x001180f8 {
+ 26:26 boot_stage_3_handoff as bool;
+});
+
// Privilege level mask register. It dictates whether the host CPU has privilege to access the
// `PGC6_AON_SECURE_SCRATCH_GROUP_05` register (which it needs to read GFW_BOOT).
register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK @ 0x00118128,
@@ -123,13 +180,12 @@ register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_PRIV_LEVEL_MASK @ 0x00118128,
0:0 read_protection_level0 as bool, "Set after FWSEC lowers its protection level";
});
-// TODO[REGA]: This is an array of registers.
-register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234 {
- 31:0 value as u32;
-});
+// OpenRM defines this as a register array, but doesn't specify its size and only uses its first
+// element. Be conservative until we know the actual size or need to use more registers.
+register!(NV_PGC6_AON_SECURE_SCRATCH_GROUP_05 @ 0x00118234[1] {});
register!(
- NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05,
+ NV_PGC6_AON_SECURE_SCRATCH_GROUP_05_0_GFW_BOOT => NV_PGC6_AON_SECURE_SCRATCH_GROUP_05[0],
"Scratch group 05 register 0 used as GFW boot progress indicator" {
7:0 progress as u8, "Progress of GFW boot (0xff means completed)";
}
@@ -156,7 +212,7 @@ register!(
impl NV_USABLE_FB_SIZE_IN_MB {
/// Returns the usable framebuffer size, in bytes.
pub(crate) fn usable_fb_size(self) -> u64 {
- u64::from(self.value()) * kernel::sizes::SZ_1M as u64
+ u64::from(self.value()) * u64::from_safe_cast(kernel::sizes::SZ_1M)
}
}
@@ -180,38 +236,46 @@ impl NV_PDISP_VGA_WORKSPACE_BASE {
// FUSE
-register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100 {
+pub(crate) const NV_FUSE_OPT_FPF_SIZE: usize = 16;
+
+register!(NV_FUSE_OPT_FPF_NVDEC_UCODE1_VERSION @ 0x00824100[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
-register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140 {
+register!(NV_FUSE_OPT_FPF_SEC2_UCODE1_VERSION @ 0x00824140[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
-register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0 {
+register!(NV_FUSE_OPT_FPF_GSP_UCODE1_VERSION @ 0x008241c0[NV_FUSE_OPT_FPF_SIZE] {
15:0 data as u16;
});
// PFALCON
-register!(NV_PFALCON_FALCON_IRQSCLR @ +0x00000004 {
+register!(NV_PFALCON_FALCON_IRQSCLR @ PFalconBase[0x00000004] {
4:4 halt as bool;
6:6 swgen0 as bool;
});
-register!(NV_PFALCON_FALCON_MAILBOX0 @ +0x00000040 {
+register!(NV_PFALCON_FALCON_MAILBOX0 @ PFalconBase[0x00000040] {
+ 31:0 value as u32;
+});
+
+register!(NV_PFALCON_FALCON_MAILBOX1 @ PFalconBase[0x00000044] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_MAILBOX1 @ +0x00000044 {
+// Used to store version information about the firmware running
+// on the Falcon processor.
+register!(NV_PFALCON_FALCON_OS @ PFalconBase[0x00000080] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_RM @ +0x00000084 {
+register!(NV_PFALCON_FALCON_RM @ PFalconBase[0x00000084] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_HWCFG2 @ +0x000000f4 {
+register!(NV_PFALCON_FALCON_HWCFG2 @ PFalconBase[0x000000f4] {
10:10 riscv as bool;
12:12 mem_scrubbing as bool, "Set to 0 after memory scrubbing is completed";
31:31 reset_ready as bool, "Signal indicating that reset is completed (GA102+)";
@@ -224,17 +288,17 @@ impl NV_PFALCON_FALCON_HWCFG2 {
}
}
-register!(NV_PFALCON_FALCON_CPUCTL @ +0x00000100 {
+register!(NV_PFALCON_FALCON_CPUCTL @ PFalconBase[0x00000100] {
1:1 startcpu as bool;
4:4 halted as bool;
6:6 alias_en as bool;
});
-register!(NV_PFALCON_FALCON_BOOTVEC @ +0x00000104 {
+register!(NV_PFALCON_FALCON_BOOTVEC @ PFalconBase[0x00000104] {
31:0 value as u32;
});
-register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c {
+register!(NV_PFALCON_FALCON_DMACTL @ PFalconBase[0x0000010c] {
0:0 require_ctx as bool;
1:1 dmem_scrubbing as bool;
2:2 imem_scrubbing as bool;
@@ -242,15 +306,15 @@ register!(NV_PFALCON_FALCON_DMACTL @ +0x0000010c {
7:7 secure_stat as bool;
});
-register!(NV_PFALCON_FALCON_DMATRFBASE @ +0x00000110 {
+register!(NV_PFALCON_FALCON_DMATRFBASE @ PFalconBase[0x00000110] {
31:0 base as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFMOFFS @ +0x00000114 {
+register!(NV_PFALCON_FALCON_DMATRFMOFFS @ PFalconBase[0x00000114] {
23:0 offs as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 {
+register!(NV_PFALCON_FALCON_DMATRFCMD @ PFalconBase[0x00000118] {
0:0 full as bool;
1:1 idle as bool;
3:2 sec as u8;
@@ -261,60 +325,67 @@ register!(NV_PFALCON_FALCON_DMATRFCMD @ +0x00000118 {
16:16 set_dmtag as u8;
});
-register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ +0x0000011c {
+register!(NV_PFALCON_FALCON_DMATRFFBOFFS @ PFalconBase[0x0000011c] {
31:0 offs as u32;
});
-register!(NV_PFALCON_FALCON_DMATRFBASE1 @ +0x00000128 {
+register!(NV_PFALCON_FALCON_DMATRFBASE1 @ PFalconBase[0x00000128] {
8:0 base as u16;
});
-register!(NV_PFALCON_FALCON_HWCFG1 @ +0x0000012c {
+register!(NV_PFALCON_FALCON_HWCFG1 @ PFalconBase[0x0000012c] {
3:0 core_rev as u8 ?=> FalconCoreRev, "Core revision";
5:4 security_model as u8 ?=> FalconSecurityModel, "Security model";
7:6 core_rev_subversion as u8 ?=> FalconCoreRevSubversion, "Core revision subversion";
});
-register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ +0x00000130 {
+register!(NV_PFALCON_FALCON_CPUCTL_ALIAS @ PFalconBase[0x00000130] {
1:1 startcpu as bool;
});
// Actually known as `NV_PSEC_FALCON_ENGINE` and `NV_PGSP_FALCON_ENGINE` depending on the falcon
// instance.
-register!(NV_PFALCON_FALCON_ENGINE @ +0x000003c0 {
+register!(NV_PFALCON_FALCON_ENGINE @ PFalconBase[0x000003c0] {
0:0 reset as bool;
});
-// TODO[REGA]: this is an array of registers.
-register!(NV_PFALCON_FBIF_TRANSCFG @ +0x00000600 {
+register!(NV_PFALCON_FBIF_TRANSCFG @ PFalconBase[0x00000600[8]] {
1:0 target as u8 ?=> FalconFbifTarget;
2:2 mem_type as bool => FalconFbifMemType;
});
-register!(NV_PFALCON_FBIF_CTL @ +0x00000624 {
+register!(NV_PFALCON_FBIF_CTL @ PFalconBase[0x00000624] {
7:7 allow_phys_no_ctx as bool;
});
-register!(NV_PFALCON2_FALCON_MOD_SEL @ +0x00001180 {
+/* PFALCON2 */
+
+register!(NV_PFALCON2_FALCON_MOD_SEL @ PFalcon2Base[0x00000180] {
7:0 algo as u8 ?=> FalconModSelAlgo;
});
-register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ +0x00001198 {
+register!(NV_PFALCON2_FALCON_BROM_CURR_UCODE_ID @ PFalcon2Base[0x00000198] {
7:0 ucode_id as u8;
});
-register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ +0x0000119c {
+register!(NV_PFALCON2_FALCON_BROM_ENGIDMASK @ PFalcon2Base[0x0000019c] {
31:0 value as u32;
});
-// TODO[REGA]: this is an array of registers.
-register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ +0x00001210 {
+// OpenRM defines this as a register array, but doesn't specify its size and only uses its first
+// element. Be conservative until we know the actual size or need to use more registers.
+register!(NV_PFALCON2_FALCON_BROM_PARAADDR @ PFalcon2Base[0x00000210[1]] {
31:0 value as u32;
});
// PRISCV
-register!(NV_PRISCV_RISCV_BCR_CTRL @ +0x00001668 {
+register!(NV_PRISCV_RISCV_CPUCTL @ PFalcon2Base[0x00000388] {
+ 0:0 halted as bool;
+ 7:7 active_stat as bool;
+});
+
+register!(NV_PRISCV_RISCV_BCR_CTRL @ PFalcon2Base[0x00000668] {
0:0 valid as bool;
4:4 core_select as bool => PeregrineCoreSelect;
8:8 br_fetch as bool;
diff --git a/drivers/gpu/nova-core/regs/macros.rs b/drivers/gpu/nova-core/regs/macros.rs
index a3e6de1779d4..fd1a815fa57d 100644
--- a/drivers/gpu/nova-core/regs/macros.rs
+++ b/drivers/gpu/nova-core/regs/macros.rs
@@ -1,17 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
-//! Macro to define register layout and accessors.
+//! `register!` macro to define register layout and accessors.
//!
//! A single register typically includes several fields, which are accessed through a combination
//! of bit-shift and mask operations that introduce a class of potential mistakes, notably because
//! not all possible field values are necessarily valid.
//!
-//! The macro in this module allow to define, using an intruitive and readable syntax, a dedicated
-//! type for each register with its own field accessors that can return an error is a field's value
-//! is invalid.
+//! The `register!` macro in this module provides an intuitive and readable syntax for defining a
+//! dedicated type for each register. Each such type comes with its own field accessors that can
+//! return an error if a field's value is invalid. Please look at the [`bitfield`] macro for the
+//! complete syntax of fields definitions.
-/// Defines a dedicated type for a register with an absolute offset, alongside with getter and
-/// setter methods for its fields and methods to read and write it from an `Io` region.
+/// Trait providing a base address to be added to the offset of a relative register to obtain
+/// its actual offset.
+///
+/// The `T` generic argument is used to distinguish which base to use, in case a type provides
+/// several bases. It is given to the `register!` macro to restrict the use of the register to
+/// implementors of this particular variant.
+pub(crate) trait RegisterBase<T> {
+ const BASE: usize;
+}
+
+/// Defines a dedicated type for a register with an absolute offset, including getter and setter
+/// methods for its fields and methods to read and write it from an `Io` region.
///
/// Example:
///
@@ -24,7 +35,7 @@
/// ```
///
/// This defines a `BOOT_0` type which can be read or written from offset `0x100` of an `Io`
-/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 less
+/// region. It is composed of 3 fields, for instance `minor_revision` is made of the 4 least
/// significant bits of the register. Each field can be accessed and modified using accessor
/// methods:
///
@@ -33,390 +44,677 @@
/// let boot0 = BOOT_0::read(&bar);
/// pr_info!("chip revision: {}.{}", boot0.major_revision(), boot0.minor_revision());
///
-/// // `Chipset::try_from` will be called with the value of the field and returns an error if the
-/// // value is invalid.
+/// // `Chipset::try_from` is called with the value of the `chipset` field and returns an
+/// // error if it is invalid.
/// let chipset = boot0.chipset()?;
///
/// // Update some fields and write the value back.
/// boot0.set_major_revision(3).set_minor_revision(10).write(&bar);
///
-/// // Or just read and update the register in a single step:
-/// BOOT_0::alter(&bar, |r| r.set_major_revision(3).set_minor_revision(10));
+/// // Or, just read and update the register in a single step:
+/// BOOT_0::update(&bar, |r| r.set_major_revision(3).set_minor_revision(10));
/// ```
///
-/// Fields can be defined as follows:
-///
-/// - `as <type>` simply returns the field value casted as the requested integer type, typically
-/// `u32`, `u16`, `u8` or `bool`. Note that `bool` fields must have a range of 1 bit.
-/// - `as <type> => <into_type>` calls `<into_type>`'s `From::<<type>>` implementation and returns
-/// the result.
-/// - `as <type> ?=> <try_into_type>` calls `<try_into_type>`'s `TryFrom::<<type>>` implementation
-/// and returns the result. This is useful on fields for which not all values are value.
-///
/// The documentation strings are optional. If present, they will be added to the type's
/// definition, or the field getter and setter methods they are attached to.
///
-/// Putting a `+` before the address of the register makes it relative to a base: the `read` and
-/// `write` methods take a `base` argument that is added to the specified address before access,
-/// and `try_read` and `try_write` methods are also created, allowing access with offsets unknown
-/// at compile-time:
+/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful
+/// for cases where a register's interpretation depends on the context:
///
/// ```no_run
-/// register!(CPU_CTL @ +0x0000010, "CPU core control" {
-/// 0:0 start as bool, "Start the CPU core";
+/// register!(SCRATCH @ 0x00000200, "Scratch register" {
+/// 31:0 value as u32, "Raw value";
/// });
///
-/// // Flip the `start` switch for the CPU core which base address is at `CPU_BASE`.
-/// let cpuctl = CPU_CTL::read(&bar, CPU_BASE);
-/// pr_info!("CPU CTL: {:#x}", cpuctl);
-/// cpuctl.set_start(true).write(&bar, CPU_BASE);
+/// register!(SCRATCH_BOOT_STATUS => SCRATCH, "Boot status of the firmware" {
+/// 0:0 completed as bool, "Whether the firmware has completed booting";
+/// });
/// ```
///
-/// It is also possible to create a alias register by using the `=> ALIAS` syntax. This is useful
-/// for cases where a register's interpretation depends on the context:
+/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH`, while also
+/// providing its own `completed` field.
+///
+/// ## Relative registers
+///
+/// A register can be defined as being accessible from a fixed offset of a provided base. For
+/// instance, imagine the following I/O space:
+///
+/// ```text
+/// +-----------------------------+
+/// | ... |
+/// | |
+/// 0x100--->+------------CPU0-------------+
+/// | |
+/// 0x110--->+-----------------------------+
+/// | CPU_CTL |
+/// +-----------------------------+
+/// | ... |
+/// | |
+/// | |
+/// 0x200--->+------------CPU1-------------+
+/// | |
+/// 0x210--->+-----------------------------+
+/// | CPU_CTL |
+/// +-----------------------------+
+/// | ... |
+/// +-----------------------------+
+/// ```
+///
+/// `CPU0` and `CPU1` both have a `CPU_CTL` register that starts at offset `0x10` of their I/O
+/// space segment. Since both instances of `CPU_CTL` share the same layout, we don't want to define
+/// them twice and would prefer a way to select which one to use from a single definition
+///
+/// This can be done using the `Base[Offset]` syntax when specifying the register's address.
+///
+/// `Base` is an arbitrary type (typically a ZST) to be used as a generic parameter of the
+/// [`RegisterBase`] trait to provide the base as a constant, i.e. each type providing a base for
+/// this register needs to implement `RegisterBase<Base>`. Here is the above example translated
+/// into code:
///
/// ```no_run
-/// register!(SCRATCH_0 @ 0x0000100, "Scratch register 0" {
-/// 31:0 value as u32, "Raw value";
+/// // Type used to identify the base.
+/// pub(crate) struct CpuCtlBase;
///
-/// register!(SCRATCH_0_BOOT_STATUS => SCRATCH_0, "Boot status of the firmware" {
-/// 0:0 completed as bool, "Whether the firmware has completed booting";
+/// // ZST describing `CPU0`.
+/// struct Cpu0;
+/// impl RegisterBase<CpuCtlBase> for Cpu0 {
+/// const BASE: usize = 0x100;
+/// }
+/// // Singleton of `CPU0` used to identify it.
+/// const CPU0: Cpu0 = Cpu0;
+///
+/// // ZST describing `CPU1`.
+/// struct Cpu1;
+/// impl RegisterBase<CpuCtlBase> for Cpu1 {
+/// const BASE: usize = 0x200;
+/// }
+/// // Singleton of `CPU1` used to identify it.
+/// const CPU1: Cpu1 = Cpu1;
+///
+/// // This makes `CPU_CTL` accessible from all implementors of `RegisterBase<CpuCtlBase>`.
+/// register!(CPU_CTL @ CpuCtlBase[0x10], "CPU core control" {
+/// 0:0 start as bool, "Start the CPU core";
+/// });
+///
+/// // The `read`, `write` and `update` methods of relative registers take an extra `base` argument
+/// // that is used to resolve its final address by adding its `BASE` to the offset of the
+/// // register.
+///
+/// // Start `CPU0`.
+/// CPU_CTL::update(bar, &CPU0, |r| r.set_start(true));
+///
+/// // Start `CPU1`.
+/// CPU_CTL::update(bar, &CPU1, |r| r.set_start(true));
+///
+/// // Aliases can also be defined for relative register.
+/// register!(CPU_CTL_ALIAS => CpuCtlBase[CPU_CTL], "Alias to CPU core control" {
+/// 1:1 alias_start as bool, "Start the aliased CPU core";
+/// });
+///
+/// // Start the aliased `CPU0`.
+/// CPU_CTL_ALIAS::update(bar, &CPU0, |r| r.set_alias_start(true));
/// ```
///
-/// In this example, `SCRATCH_0_BOOT_STATUS` uses the same I/O address as `SCRATCH_0`, while also
-/// providing its own `completed` method.
+/// ## Arrays of registers
+///
+/// Some I/O areas contain consecutive values that can be interpreted in the same way. These areas
+/// can be defined as an array of identical registers, allowing them to be accessed by index with
+/// compile-time or runtime bound checking. Simply define their address as `Address[Size]`, and add
+/// an `idx` parameter to their `read`, `write` and `update` methods:
+///
+/// ```no_run
+/// # fn no_run() -> Result<(), Error> {
+/// # fn get_scratch_idx() -> usize {
+/// # 0x15
+/// # }
+/// // Array of 64 consecutive registers with the same layout starting at offset `0x80`.
+/// register!(SCRATCH @ 0x00000080[64], "Scratch registers" {
+/// 31:0 value as u32;
+/// });
+///
+/// // Read scratch register 0, i.e. I/O address `0x80`.
+/// let scratch_0 = SCRATCH::read(bar, 0).value();
+/// // Read scratch register 15, i.e. I/O address `0x80 + (15 * 4)`.
+/// let scratch_15 = SCRATCH::read(bar, 15).value();
+///
+/// // This is out of bounds and won't build.
+/// // let scratch_128 = SCRATCH::read(bar, 128).value();
+///
+/// // Runtime-obtained array index.
+/// let scratch_idx = get_scratch_idx();
+/// // Access on a runtime index returns an error if it is out-of-bounds.
+/// let some_scratch = SCRATCH::try_read(bar, scratch_idx)?.value();
+///
+/// // Alias to a particular register in an array.
+/// // Here `SCRATCH[8]` is used to convey the firmware exit code.
+/// register!(FIRMWARE_STATUS => SCRATCH[8], "Firmware exit status code" {
+/// 7:0 status as u8;
+/// });
+///
+/// let status = FIRMWARE_STATUS::read(bar).status();
+///
+/// // Non-contiguous register arrays can be defined by adding a stride parameter.
+/// // Here, each of the 16 registers of the array are separated by 8 bytes, meaning that the
+/// // registers of the two declarations below are interleaved.
+/// register!(SCRATCH_INTERLEAVED_0 @ 0x000000c0[16 ; 8], "Scratch registers bank 0" {
+/// 31:0 value as u32;
+/// });
+/// register!(SCRATCH_INTERLEAVED_1 @ 0x000000c4[16 ; 8], "Scratch registers bank 1" {
+/// 31:0 value as u32;
+/// });
+/// # Ok(())
+/// # }
+/// ```
+///
+/// ## Relative arrays of registers
+///
+/// Combining the two features described in the sections above, arrays of registers accessible from
+/// a base can also be defined:
+///
+/// ```no_run
+/// # fn no_run() -> Result<(), Error> {
+/// # fn get_scratch_idx() -> usize {
+/// # 0x15
+/// # }
+/// // Type used as parameter of `RegisterBase` to specify the base.
+/// pub(crate) struct CpuCtlBase;
+///
+/// // ZST describing `CPU0`.
+/// struct Cpu0;
+/// impl RegisterBase<CpuCtlBase> for Cpu0 {
+/// const BASE: usize = 0x100;
+/// }
+/// // Singleton of `CPU0` used to identify it.
+/// const CPU0: Cpu0 = Cpu0;
+///
+/// // ZST describing `CPU1`.
+/// struct Cpu1;
+/// impl RegisterBase<CpuCtlBase> for Cpu1 {
+/// const BASE: usize = 0x200;
+/// }
+/// // Singleton of `CPU1` used to identify it.
+/// const CPU1: Cpu1 = Cpu1;
+///
+/// // 64 per-cpu scratch registers, arranged as an contiguous array.
+/// register!(CPU_SCRATCH @ CpuCtlBase[0x00000080[64]], "Per-CPU scratch registers" {
+/// 31:0 value as u32;
+/// });
+///
+/// let cpu0_scratch_0 = CPU_SCRATCH::read(bar, &Cpu0, 0).value();
+/// let cpu1_scratch_15 = CPU_SCRATCH::read(bar, &Cpu1, 15).value();
+///
+/// // This won't build.
+/// // let cpu0_scratch_128 = CPU_SCRATCH::read(bar, &Cpu0, 128).value();
+///
+/// // Runtime-obtained array index.
+/// let scratch_idx = get_scratch_idx();
+/// // Access on a runtime value returns an error if it is out-of-bounds.
+/// let cpu0_some_scratch = CPU_SCRATCH::try_read(bar, &Cpu0, scratch_idx)?.value();
+///
+/// // `SCRATCH[8]` is used to convey the firmware exit code.
+/// register!(CPU_FIRMWARE_STATUS => CpuCtlBase[CPU_SCRATCH[8]],
+/// "Per-CPU firmware exit status code" {
+/// 7:0 status as u8;
+/// });
+///
+/// let cpu0_status = CPU_FIRMWARE_STATUS::read(bar, &Cpu0).status();
+///
+/// // Non-contiguous register arrays can be defined by adding a stride parameter.
+/// // Here, each of the 16 registers of the array are separated by 8 bytes, meaning that the
+/// // registers of the two declarations below are interleaved.
+/// register!(CPU_SCRATCH_INTERLEAVED_0 @ CpuCtlBase[0x00000d00[16 ; 8]],
+/// "Scratch registers bank 0" {
+/// 31:0 value as u32;
+/// });
+/// register!(CPU_SCRATCH_INTERLEAVED_1 @ CpuCtlBase[0x00000d04[16 ; 8]],
+/// "Scratch registers bank 1" {
+/// 31:0 value as u32;
+/// });
+/// # Ok(())
+/// # }
+/// ```
macro_rules! register {
// Creates a register at a fixed offset of the MMIO space.
+ ($name:ident @ $offset:literal $(, $comment:literal)? { $($fields:tt)* } ) => {
+ bitfield!(pub(crate) struct $name(u32) $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $offset);
+ };
+
+ // Creates an alias register of fixed offset register `alias` with its own fields.
+ ($name:ident => $alias:ident $(, $comment:literal)? { $($fields:tt)* } ) => {
+ bitfield!(pub(crate) struct $name(u32) $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $alias::OFFSET);
+ };
+
+ // Creates a register at a relative offset from a base address provider.
+ ($name:ident @ $base:ty [ $offset:literal ] $(, $comment:literal)? { $($fields:tt)* } ) => {
+ bitfield!(pub(crate) struct $name(u32) $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $offset ]);
+ };
+
+ // Creates an alias register of relative offset register `alias` with its own fields.
+ ($name:ident => $base:ty [ $alias:ident ] $(, $comment:literal)? { $($fields:tt)* }) => {
+ bitfield!(pub(crate) struct $name(u32) $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $alias::OFFSET ]);
+ };
+
+ // Creates an array of registers at a fixed offset of the MMIO space.
(
- $name:ident @ $offset:literal $(, $comment:literal)? {
+ $name:ident @ $offset:literal [ $size:expr ; $stride:expr ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $offset $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ $offset);
+ static_assert!(::core::mem::size_of::<u32>() <= $stride);
+ bitfield!(pub(crate) struct $name(u32) $(, $comment)? { $($fields)* } );
+ register!(@io_array $name @ $offset [ $size ; $stride ]);
};
- // Creates a alias register of fixed offset register `alias` with its own fields.
+ // Shortcut for contiguous array of registers (stride == size of element).
(
- $name:ident => $alias:ident $(, $comment:literal)? {
+ $name:ident @ $offset:literal [ $size:expr ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $alias::OFFSET $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ $alias::OFFSET);
+ register!($name @ $offset [ $size ; ::core::mem::size_of::<u32>() ] $(, $comment)? {
+ $($fields)*
+ } );
+ };
+
+ // Creates an array of registers at a relative offset from a base address provider.
+ (
+ $name:ident @ $base:ty [ $offset:literal [ $size:expr ; $stride:expr ] ]
+ $(, $comment:literal)? { $($fields:tt)* }
+ ) => {
+ static_assert!(::core::mem::size_of::<u32>() <= $stride);
+ bitfield!(pub(crate) struct $name(u32) $(, $comment)? { $($fields)* } );
+ register!(@io_relative_array $name @ $base [ $offset [ $size ; $stride ] ]);
};
- // Creates a register at a relative offset from a base address.
+ // Shortcut for contiguous array of relative registers (stride == size of element).
(
- $name:ident @ + $offset:literal $(, $comment:literal)? {
+ $name:ident @ $base:ty [ $offset:literal [ $size:expr ] ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $offset $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io$name @ + $offset);
+ register!($name @ $base [ $offset [ $size ; ::core::mem::size_of::<u32>() ] ]
+ $(, $comment)? { $($fields)* } );
};
- // Creates a alias register of relative offset register `alias` with its own fields.
+ // Creates an alias of register `idx` of relative array of registers `alias` with its own
+ // fields.
(
- $name:ident => + $alias:ident $(, $comment:literal)? {
+ $name:ident => $base:ty [ $alias:ident [ $idx:expr ] ] $(, $comment:literal)? {
$($fields:tt)*
}
) => {
- register!(@common $name @ $alias::OFFSET $(, $comment)?);
- register!(@field_accessors $name { $($fields)* });
- register!(@io $name @ + $alias::OFFSET);
+ static_assert!($idx < $alias::SIZE);
+ bitfield!(pub(crate) struct $name(u32) $(, $comment)? { $($fields)* } );
+ register!(@io_relative $name @ $base [ $alias::OFFSET + $idx * $alias::STRIDE ] );
};
- // All rules below are helpers.
-
- // Defines the wrapper `$name` type, as well as its relevant implementations (`Debug`, `BitOr`,
- // and conversion to regular `u32`).
- (@common $name:ident @ $offset:expr $(, $comment:literal)?) => {
- $(
- #[doc=$comment]
- )?
- #[repr(transparent)]
- #[derive(Clone, Copy, Default)]
- pub(crate) struct $name(u32);
+ // Creates an alias of register `idx` of array of registers `alias` with its own fields.
+ // This rule belongs to the (non-relative) register arrays set, but needs to be put last
+ // to avoid it being interpreted in place of the relative register array alias rule.
+ ($name:ident => $alias:ident [ $idx:expr ] $(, $comment:literal)? { $($fields:tt)* }) => {
+ static_assert!($idx < $alias::SIZE);
+ bitfield!(pub(crate) struct $name(u32) $(, $comment)? { $($fields)* } );
+ register!(@io_fixed $name @ $alias::OFFSET + $idx * $alias::STRIDE );
+ };
+ // Generates the IO accessors for a fixed offset register.
+ (@io_fixed $name:ident @ $offset:expr) => {
#[allow(dead_code)]
impl $name {
pub(crate) const OFFSET: usize = $offset;
- }
- // TODO[REGA]: display the raw hex value, then the value of all the fields. This requires
- // matching the fields, which will complexify the syntax considerably...
- impl ::core::fmt::Debug for $name {
- fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
- f.debug_tuple(stringify!($name))
- .field(&format_args!("0x{0:x}", &self.0))
- .finish()
+ /// Read the register from its address in `io`.
+ #[inline(always)]
+ pub(crate) fn read<const SIZE: usize, T>(io: &T) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ Self(io.read32($offset))
}
- }
- impl ::core::ops::BitOr for $name {
- type Output = Self;
-
- fn bitor(self, rhs: Self) -> Self::Output {
- Self(self.0 | rhs.0)
+ /// Write the value contained in `self` to the register address in `io`.
+ #[inline(always)]
+ pub(crate) fn write<const SIZE: usize, T>(self, io: &T) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ io.write32(self.0, $offset)
}
- }
- impl ::core::convert::From<$name> for u32 {
- fn from(reg: $name) -> u32 {
- reg.0
+ /// Read the register from its address in `io` and run `f` on its value to obtain a new
+ /// value to write back.
+ #[inline(always)]
+ pub(crate) fn update<const SIZE: usize, T, F>(
+ io: &T,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io));
+ reg.write(io);
}
}
};
- // Defines all the field getter/methods methods for `$name`.
- (
- @field_accessors $name:ident {
- $($hi:tt:$lo:tt $field:ident as $type:tt
- $(?=> $try_into_type:ty)?
- $(=> $into_type:ty)?
- $(, $comment:literal)?
- ;
- )*
- }
- ) => {
- $(
- register!(@check_field_bounds $hi:$lo $field as $type);
- )*
-
+ // Generates the IO accessors for a relative offset register.
+ (@io_relative $name:ident @ $base:ty [ $offset:expr ]) => {
#[allow(dead_code)]
impl $name {
- $(
- register!(@field_accessor $name $hi:$lo $field as $type
- $(?=> $try_into_type)?
- $(=> $into_type)?
- $(, $comment)?
- ;
- );
- )*
- }
- };
-
- // Boolean fields must have `$hi == $lo`.
- (@check_field_bounds $hi:tt:$lo:tt $field:ident as bool) => {
- #[allow(clippy::eq_op)]
- const _: () = {
- ::kernel::build_assert!(
- $hi == $lo,
- concat!("boolean field `", stringify!($field), "` covers more than one bit")
- );
- };
- };
-
- // Non-boolean fields must have `$hi >= $lo`.
- (@check_field_bounds $hi:tt:$lo:tt $field:ident as $type:tt) => {
- #[allow(clippy::eq_op)]
- const _: () = {
- ::kernel::build_assert!(
- $hi >= $lo,
- concat!("field `", stringify!($field), "`'s MSB is smaller than its LSB")
- );
- };
- };
-
- // Catches fields defined as `bool` and convert them into a boolean value.
- (
- @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as bool => $into_type:ty
- $(, $comment:literal)?;
- ) => {
- register!(
- @leaf_accessor $name $hi:$lo $field as bool
- { |f| <$into_type>::from(if f != 0 { true } else { false }) }
- $into_type => $into_type $(, $comment)?;
- );
- };
+ pub(crate) const OFFSET: usize = $offset;
- // Shortcut for fields defined as `bool` without the `=>` syntax.
- (
- @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as bool $(, $comment:literal)?;
- ) => {
- register!(@field_accessor $name $hi:$lo $field as bool => bool $(, $comment)?;);
- };
+ /// Read the register from `io`, using the base address provided by `base` and adding
+ /// the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn read<const SIZE: usize, T, B>(
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ ) -> Self where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ const OFFSET: usize = $name::OFFSET;
- // Catches the `?=>` syntax for non-boolean fields.
- (
- @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt ?=> $try_into_type:ty
- $(, $comment:literal)?;
- ) => {
- register!(@leaf_accessor $name $hi:$lo $field as $type
- { |f| <$try_into_type>::try_from(f as $type) } $try_into_type =>
- ::core::result::Result<
- $try_into_type,
- <$try_into_type as ::core::convert::TryFrom<$type>>::Error
- >
- $(, $comment)?;);
- };
+ let value = io.read32(
+ <B as crate::regs::macros::RegisterBase<$base>>::BASE + OFFSET
+ );
- // Catches the `=>` syntax for non-boolean fields.
- (
- @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt => $into_type:ty
- $(, $comment:literal)?;
- ) => {
- register!(@leaf_accessor $name $hi:$lo $field as $type
- { |f| <$into_type>::from(f as $type) } $into_type => $into_type $(, $comment)?;);
- };
+ Self(value)
+ }
- // Shortcut for fields defined as non-`bool` without the `=>` or `?=>` syntax.
- (
- @field_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:tt
- $(, $comment:literal)?;
- ) => {
- register!(@field_accessor $name $hi:$lo $field as $type => $type $(, $comment)?;);
- };
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn write<const SIZE: usize, T, B>(
+ self,
+ io: &T,
+ #[allow(unused_variables)]
+ base: &B,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ {
+ const OFFSET: usize = $name::OFFSET;
- // Generates the accessor methods for a single field.
- (
- @leaf_accessor $name:ident $hi:tt:$lo:tt $field:ident as $type:ty
- { $process:expr } $to_type:ty => $res_type:ty $(, $comment:literal)?;
- ) => {
- ::kernel::macros::paste!(
- const [<$field:upper>]: ::core::ops::RangeInclusive<u8> = $lo..=$hi;
- const [<$field:upper _MASK>]: u32 = ((((1 << $hi) - 1) << 1) + 1) - ((1 << $lo) - 1);
- const [<$field:upper _SHIFT>]: u32 = Self::[<$field:upper _MASK>].trailing_zeros();
- );
-
- $(
- #[doc="Returns the value of this field:"]
- #[doc=$comment]
- )?
- #[inline]
- pub(crate) fn $field(self) -> $res_type {
- ::kernel::macros::paste!(
- const MASK: u32 = $name::[<$field:upper _MASK>];
- const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
- );
- let field = ((self.0 & MASK) >> SHIFT);
-
- $process(field)
- }
+ io.write32(
+ self.0,
+ <B as crate::regs::macros::RegisterBase<$base>>::BASE + OFFSET
+ );
+ }
- ::kernel::macros::paste!(
- $(
- #[doc="Sets the value of this field:"]
- #[doc=$comment]
- )?
- #[inline]
- pub(crate) fn [<set_ $field>](mut self, value: $to_type) -> Self {
- const MASK: u32 = $name::[<$field:upper _MASK>];
- const SHIFT: u32 = $name::[<$field:upper _SHIFT>];
- let value = (u32::from(value) << SHIFT) & MASK;
- self.0 = (self.0 & !MASK) | value;
-
- self
+ /// Read the register from `io`, using the base address provided by `base` and adding
+ /// the register's offset to it, then run `f` on its value to obtain a new value to
+ /// write back.
+ #[inline(always)]
+ pub(crate) fn update<const SIZE: usize, T, B, F>(
+ io: &T,
+ base: &B,
+ f: F,
+ ) where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ let reg = f(Self::read(io, base));
+ reg.write(io, base);
+ }
}
- );
};
- // Creates the IO accessors for a fixed offset register.
- (@io $name:ident @ $offset:expr) => {
+ // Generates the IO accessors for an array of registers.
+ (@io_array $name:ident @ $offset:literal [ $size:expr ; $stride:expr ]) => {
#[allow(dead_code)]
impl $name {
- #[inline]
- pub(crate) fn read<const SIZE: usize, T>(io: &T) -> Self where
+ pub(crate) const OFFSET: usize = $offset;
+ pub(crate) const SIZE: usize = $size;
+ pub(crate) const STRIDE: usize = $stride;
+
+ /// Read the array register at index `idx` from its address in `io`.
+ #[inline(always)]
+ pub(crate) fn read<const SIZE: usize, T>(
+ io: &T,
+ idx: usize,
+ ) -> Self where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- Self(io.read32($offset))
+ build_assert!(idx < Self::SIZE);
+
+ let offset = Self::OFFSET + (idx * Self::STRIDE);
+ let value = io.read32(offset);
+
+ Self(value)
}
- #[inline]
- pub(crate) fn write<const SIZE: usize, T>(self, io: &T) where
+ /// Write the value contained in `self` to the array register with index `idx` in `io`.
+ #[inline(always)]
+ pub(crate) fn write<const SIZE: usize, T>(
+ self,
+ io: &T,
+ idx: usize
+ ) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
{
- io.write32(self.0, $offset)
+ build_assert!(idx < Self::SIZE);
+
+ let offset = Self::OFFSET + (idx * Self::STRIDE);
+
+ io.write32(self.0, offset);
}
- #[inline]
- pub(crate) fn alter<const SIZE: usize, T, F>(
+ /// Read the array register at index `idx` in `io` and run `f` on its value to obtain a
+ /// new value to write back.
+ #[inline(always)]
+ pub(crate) fn update<const SIZE: usize, T, F>(
io: &T,
+ idx: usize,
f: F,
) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
F: ::core::ops::FnOnce(Self) -> Self,
{
- let reg = f(Self::read(io));
- reg.write(io);
+ let reg = f(Self::read(io, idx));
+ reg.write(io, idx);
+ }
+
+ /// Read the array register at index `idx` from its address in `io`.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_read<const SIZE: usize, T>(
+ io: &T,
+ idx: usize,
+ ) -> ::kernel::error::Result<Self> where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ if idx < Self::SIZE {
+ Ok(Self::read(io, idx))
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Write the value contained in `self` to the array register with index `idx` in `io`.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_write<const SIZE: usize, T>(
+ self,
+ io: &T,
+ idx: usize,
+ ) -> ::kernel::error::Result where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ {
+ if idx < Self::SIZE {
+ Ok(self.write(io, idx))
+ } else {
+ Err(EINVAL)
+ }
+ }
+
+ /// Read the array register at index `idx` in `io` and run `f` on its value to obtain a
+ /// new value to write back.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_update<const SIZE: usize, T, F>(
+ io: &T,
+ idx: usize,
+ f: F,
+ ) -> ::kernel::error::Result where
+ T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ F: ::core::ops::FnOnce(Self) -> Self,
+ {
+ if idx < Self::SIZE {
+ Ok(Self::update(io, idx, f))
+ } else {
+ Err(EINVAL)
+ }
}
}
};
- // Create the IO accessors for a relative offset register.
- (@io $name:ident @ + $offset:literal) => {
+ // Generates the IO accessors for an array of relative registers.
+ (
+ @io_relative_array $name:ident @ $base:ty
+ [ $offset:literal [ $size:expr ; $stride:expr ] ]
+ ) => {
#[allow(dead_code)]
impl $name {
- #[inline]
- pub(crate) fn read<const SIZE: usize, T>(
+ pub(crate) const OFFSET: usize = $offset;
+ pub(crate) const SIZE: usize = $size;
+ pub(crate) const STRIDE: usize = $stride;
+
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it.
+ #[inline(always)]
+ pub(crate) fn read<const SIZE: usize, T, B>(
io: &T,
- base: usize,
+ #[allow(unused_variables)]
+ base: &B,
+ idx: usize,
) -> Self where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
{
- Self(io.read32(base + $offset))
+ build_assert!(idx < Self::SIZE);
+
+ let offset = <B as crate::regs::macros::RegisterBase<$base>>::BASE +
+ Self::OFFSET + (idx * Self::STRIDE);
+ let value = io.read32(offset);
+
+ Self(value)
}
- #[inline]
- pub(crate) fn write<const SIZE: usize, T>(
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the offset of array register `idx` to it.
+ #[inline(always)]
+ pub(crate) fn write<const SIZE: usize, T, B>(
self,
io: &T,
- base: usize,
+ #[allow(unused_variables)]
+ base: &B,
+ idx: usize
) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
{
- io.write32(self.0, base + $offset)
+ build_assert!(idx < Self::SIZE);
+
+ let offset = <B as crate::regs::macros::RegisterBase<$base>>::BASE +
+ Self::OFFSET + (idx * Self::STRIDE);
+
+ io.write32(self.0, offset);
}
- #[inline]
- pub(crate) fn alter<const SIZE: usize, T, F>(
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it, then run `f` on its value to
+ /// obtain a new value to write back.
+ #[inline(always)]
+ pub(crate) fn update<const SIZE: usize, T, B, F>(
io: &T,
- base: usize,
+ base: &B,
+ idx: usize,
f: F,
) where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
F: ::core::ops::FnOnce(Self) -> Self,
{
- let reg = f(Self::read(io, base));
- reg.write(io, base);
+ let reg = f(Self::read(io, base, idx));
+ reg.write(io, base, idx);
}
- #[inline]
- pub(crate) fn try_read<const SIZE: usize, T>(
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_read<const SIZE: usize, T, B>(
io: &T,
- base: usize,
+ base: &B,
+ idx: usize,
) -> ::kernel::error::Result<Self> where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
{
- io.try_read32(base + $offset).map(Self)
+ if idx < Self::SIZE {
+ Ok(Self::read(io, base, idx))
+ } else {
+ Err(EINVAL)
+ }
}
- #[inline]
- pub(crate) fn try_write<const SIZE: usize, T>(
+ /// Write the value contained in `self` to `io`, using the base address provided by
+ /// `base` and adding the offset of array register `idx` to it.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_write<const SIZE: usize, T, B>(
self,
io: &T,
- base: usize,
- ) -> ::kernel::error::Result<()> where
+ base: &B,
+ idx: usize,
+ ) -> ::kernel::error::Result where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
{
- io.try_write32(self.0, base + $offset)
+ if idx < Self::SIZE {
+ Ok(self.write(io, base, idx))
+ } else {
+ Err(EINVAL)
+ }
}
- #[inline]
- pub(crate) fn try_alter<const SIZE: usize, T, F>(
+ /// Read the array register at index `idx` from `io`, using the base address provided
+ /// by `base` and adding the register's offset to it, then run `f` on its value to
+ /// obtain a new value to write back.
+ ///
+ /// The validity of `idx` is checked at run-time, and `EINVAL` is returned is the
+ /// access was out-of-bounds.
+ #[inline(always)]
+ pub(crate) fn try_update<const SIZE: usize, T, B, F>(
io: &T,
- base: usize,
+ base: &B,
+ idx: usize,
f: F,
- ) -> ::kernel::error::Result<()> where
+ ) -> ::kernel::error::Result where
T: ::core::ops::Deref<Target = ::kernel::io::Io<SIZE>>,
+ B: crate::regs::macros::RegisterBase<$base>,
F: ::core::ops::FnOnce(Self) -> Self,
{
- let reg = f(Self::try_read(io, base)?);
- reg.try_write(io, base)
+ if idx < Self::SIZE {
+ Ok(Self::update(io, base, idx, f))
+ } else {
+ Err(EINVAL)
+ }
}
}
};
diff --git a/drivers/gpu/nova-core/sbuffer.rs b/drivers/gpu/nova-core/sbuffer.rs
new file mode 100644
index 000000000000..64758b7fae56
--- /dev/null
+++ b/drivers/gpu/nova-core/sbuffer.rs
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::ops::Deref;
+
+use kernel::{
+ alloc::KVec,
+ prelude::*, //
+};
+
+/// A buffer abstraction for discontiguous byte slices.
+///
+/// This allows you to treat multiple non-contiguous `&mut [u8]` slices
+/// of the same length as a single stream-like read/write buffer.
+///
+/// # Examples
+///
+/// ```
+// let mut buf1 = [0u8; 5];
+/// let mut buf2 = [0u8; 5];
+/// let mut sbuffer = SBufferIter::new_writer([&mut buf1[..], &mut buf2[..]]);
+///
+/// let data = b"hi world!";
+/// sbuffer.write_all(data)?;
+/// drop(sbuffer);
+///
+/// assert_eq!(buf1, *b"hi wo");
+/// assert_eq!(buf2, *b"rld!\0");
+///
+/// # Ok::<(), Error>(())
+/// ```
+pub(crate) struct SBufferIter<I: Iterator> {
+ // [`Some`] if we are not at the end of the data yet.
+ cur_slice: Option<I::Item>,
+ // All the slices remaining after `cur_slice`.
+ slices: I,
+}
+
+impl<'a, I> SBufferIter<I>
+where
+ I: Iterator,
+{
+ /// Creates a reader buffer for a discontiguous set of byte slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let buf1: [u8; 5] = [0, 1, 2, 3, 4];
+ /// let buf2: [u8; 5] = [5, 6, 7, 8, 9];
+ /// let sbuffer = SBufferIter::new_reader([&buf1[..], &buf2[..]]);
+ /// let sum: u8 = sbuffer.sum();
+ /// assert_eq!(sum, 45);
+ /// ```
+ pub(crate) fn new_reader(slices: impl IntoIterator<IntoIter = I>) -> Self
+ where
+ I: Iterator<Item = &'a [u8]>,
+ {
+ Self::new(slices)
+ }
+
+ /// Creates a writeable buffer for a discontiguous set of byte slices.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut buf1 = [0u8; 5];
+ /// let mut buf2 = [0u8; 5];
+ /// let mut sbuffer = SBufferIter::new_writer([&mut buf1[..], &mut buf2[..]]);
+ /// sbuffer.write_all(&[0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9][..])?;
+ /// drop(sbuffer);
+ /// assert_eq!(buf1, [0, 1, 2, 3, 4]);
+ /// assert_eq!(buf2, [5, 6, 7, 8, 9]);
+ ///
+ /// ```
+ pub(crate) fn new_writer(slices: impl IntoIterator<IntoIter = I>) -> Self
+ where
+ I: Iterator<Item = &'a mut [u8]>,
+ {
+ Self::new(slices)
+ }
+
+ fn new(slices: impl IntoIterator<IntoIter = I>) -> Self
+ where
+ I::Item: Deref<Target = [u8]>,
+ {
+ let mut slices = slices.into_iter();
+
+ Self {
+ // Skip empty slices.
+ cur_slice: slices.find(|s| !s.deref().is_empty()),
+ slices,
+ }
+ }
+
+ /// Returns a slice of at most `len` bytes, or [`None`] if we are at the end of the data.
+ ///
+ /// If a slice shorter than `len` bytes has been returned, the caller can call this method
+ /// again until it returns [`None`] to try and obtain the remainder of the data.
+ ///
+ /// The closure `f` should split the slice received in it's first parameter
+ /// at the position given in the second parameter.
+ fn get_slice_internal(
+ &mut self,
+ len: usize,
+ mut f: impl FnMut(I::Item, usize) -> (I::Item, I::Item),
+ ) -> Option<I::Item>
+ where
+ I::Item: Deref<Target = [u8]>,
+ {
+ match self.cur_slice.take() {
+ None => None,
+ Some(cur_slice) => {
+ if len >= cur_slice.len() {
+ // Caller requested more data than is in the current slice, return it entirely
+ // and prepare the following slice for being used. Skip empty slices to avoid
+ // trouble.
+ self.cur_slice = self.slices.find(|s| !s.is_empty());
+
+ Some(cur_slice)
+ } else {
+ // The current slice can satisfy the request, split it and return a slice of
+ // the requested size.
+ let (ret, next) = f(cur_slice, len);
+ self.cur_slice = Some(next);
+
+ Some(ret)
+ }
+ }
+ }
+ }
+
+ /// Returns whether this buffer still has data available.
+ pub(crate) fn is_empty(&self) -> bool {
+ self.cur_slice.is_none()
+ }
+}
+
+/// Provides a way to get non-mutable slices of data to read from.
+impl<'a, I> SBufferIter<I>
+where
+ I: Iterator<Item = &'a [u8]>,
+{
+ /// Returns a slice of at most `len` bytes, or [`None`] if we are at the end of the data.
+ ///
+ /// If a slice shorter than `len` bytes has been returned, the caller can call this method
+ /// again until it returns [`None`] to try and obtain the remainder of the data.
+ fn get_slice(&mut self, len: usize) -> Option<&'a [u8]> {
+ self.get_slice_internal(len, |s, pos| s.split_at(pos))
+ }
+
+ /// Ideally we would implement `Read`, but it is not available in `core`.
+ /// So mimic `std::io::Read::read_exact`.
+ #[expect(unused)]
+ pub(crate) fn read_exact(&mut self, mut dst: &mut [u8]) -> Result {
+ while !dst.is_empty() {
+ match self.get_slice(dst.len()) {
+ None => return Err(EINVAL),
+ Some(src) => {
+ let dst_slice;
+ (dst_slice, dst) = dst.split_at_mut(src.len());
+ dst_slice.copy_from_slice(src);
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Read all the remaining data into a [`KVec`].
+ ///
+ /// `self` will be empty after this operation.
+ pub(crate) fn flush_into_kvec(&mut self, flags: kernel::alloc::Flags) -> Result<KVec<u8>> {
+ let mut buf = KVec::<u8>::new();
+
+ if let Some(slice) = core::mem::take(&mut self.cur_slice) {
+ buf.extend_from_slice(slice, flags)?;
+ }
+ for slice in &mut self.slices {
+ buf.extend_from_slice(slice, flags)?;
+ }
+
+ Ok(buf)
+ }
+}
+
+/// Provides a way to get mutable slices of data to write into.
+impl<'a, I> SBufferIter<I>
+where
+ I: Iterator<Item = &'a mut [u8]>,
+{
+ /// Returns a mutable slice of at most `len` bytes, or [`None`] if we are at the end of the
+ /// data.
+ ///
+ /// If a slice shorter than `len` bytes has been returned, the caller can call this method
+ /// again until it returns `None` to try and obtain the remainder of the data.
+ fn get_slice_mut(&mut self, len: usize) -> Option<&'a mut [u8]> {
+ self.get_slice_internal(len, |s, pos| s.split_at_mut(pos))
+ }
+
+ /// Ideally we would implement [`Write`], but it is not available in `core`.
+ /// So mimic `std::io::Write::write_all`.
+ pub(crate) fn write_all(&mut self, mut src: &[u8]) -> Result {
+ while !src.is_empty() {
+ match self.get_slice_mut(src.len()) {
+ None => return Err(ETOOSMALL),
+ Some(dst) => {
+ let src_slice;
+ (src_slice, src) = src.split_at(dst.len());
+ dst.copy_from_slice(src_slice);
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl<'a, I> Iterator for SBufferIter<I>
+where
+ I: Iterator<Item = &'a [u8]>,
+{
+ type Item = u8;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // Returned slices are guaranteed to not be empty so we can safely index the first entry.
+ self.get_slice(1).map(|s| s[0])
+ }
+}
diff --git a/drivers/gpu/nova-core/util.rs b/drivers/gpu/nova-core/util.rs
index 76cedf3710d7..4b503249a3ef 100644
--- a/drivers/gpu/nova-core/util.rs
+++ b/drivers/gpu/nova-core/util.rs
@@ -1,47 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
-use kernel::prelude::*;
-use kernel::time::{Delta, Instant, Monotonic};
-
-pub(crate) const fn to_lowercase_bytes<const N: usize>(s: &str) -> [u8; N] {
- let src = s.as_bytes();
- let mut dst = [0; N];
- let mut i = 0;
-
- while i < src.len() && i < N {
- dst[i] = (src[i] as char).to_ascii_lowercase() as u8;
- i += 1;
- }
-
- dst
-}
-
-pub(crate) const fn const_bytes_to_str(bytes: &[u8]) -> &str {
- match core::str::from_utf8(bytes) {
- Ok(string) => string,
- Err(_) => kernel::build_error!("Bytes are not valid UTF-8."),
- }
-}
-
-/// Wait until `cond` is true or `timeout` elapsed.
-///
-/// When `cond` evaluates to `Some`, its return value is returned.
-///
-/// `Err(ETIMEDOUT)` is returned if `timeout` has been reached without `cond` evaluating to
-/// `Some`.
+/// Converts a null-terminated byte slice to a string, or `None` if the array does not
+/// contains any null byte or contains invalid characters.
///
-/// TODO[DLAY]: replace with `read_poll_timeout` once it is available.
-/// (https://lore.kernel.org/lkml/20250220070611.214262-8-fujita.tomonori@gmail.com/)
-pub(crate) fn wait_on<R, F: Fn() -> Option<R>>(timeout: Delta, cond: F) -> Result<R> {
- let start_time = Instant::<Monotonic>::now();
-
- loop {
- if let Some(ret) = cond() {
- return Ok(ret);
- }
-
- if start_time.elapsed().as_nanos() > timeout.as_nanos() {
- return Err(ETIMEDOUT);
- }
- }
+/// Contrary to [`kernel::str::CStr::from_bytes_with_nul`], the null byte can be anywhere in the
+/// slice, and not only in the last position.
+pub(crate) fn str_from_null_terminated(bytes: &[u8]) -> Option<&str> {
+ use kernel::str::CStr;
+
+ bytes
+ .iter()
+ .position(|&b| b == 0)
+ .and_then(|null_pos| CStr::from_bytes_with_nul(&bytes[..=null_pos]).ok())
+ .and_then(|cstr| cstr.to_str().ok())
}
diff --git a/drivers/gpu/nova-core/vbios.rs b/drivers/gpu/nova-core/vbios.rs
index 5b5d9f38cbb3..abf423560ff4 100644
--- a/drivers/gpu/nova-core/vbios.rs
+++ b/drivers/gpu/nova-core/vbios.rs
@@ -2,14 +2,27 @@
//! VBIOS extraction and parsing.
-use crate::driver::Bar0;
-use crate::firmware::fwsec::Bcrt30Rsa3kSignature;
-use crate::firmware::FalconUCodeDescV3;
use core::convert::TryFrom;
-use kernel::device;
-use kernel::error::Result;
-use kernel::pci;
-use kernel::prelude::*;
+
+use kernel::{
+ device,
+ prelude::*,
+ ptr::{
+ Alignable,
+ Alignment, //
+ },
+ transmute::FromBytes,
+ types::ARef,
+};
+
+use crate::{
+ driver::Bar0,
+ firmware::{
+ fwsec::Bcrt30Rsa3kSignature,
+ FalconUCodeDescV3, //
+ },
+ num::FromSafeCast,
+};
/// The offset of the VBIOS ROM in the BAR0 space.
const ROM_OFFSET: usize = 0x300000;
@@ -21,6 +34,34 @@ const BIOS_READ_AHEAD_SIZE: usize = 1024;
/// indicates the last image. Bit 0-6 are reserved, bit 7 is last image bit.
const LAST_IMAGE_BIT_MASK: u8 = 0x80;
+/// BIOS Image Type from PCI Data Structure code_type field.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[repr(u8)]
+enum BiosImageType {
+ /// PC-AT compatible BIOS image (x86 legacy)
+ PciAt = 0x00,
+ /// EFI (Extensible Firmware Interface) BIOS image
+ Efi = 0x03,
+ /// NBSI (Notebook System Information) BIOS image
+ Nbsi = 0x70,
+ /// FwSec (Firmware Security) BIOS image
+ FwSec = 0xE0,
+}
+
+impl TryFrom<u8> for BiosImageType {
+ type Error = Error;
+
+ fn try_from(code: u8) -> Result<Self> {
+ match code {
+ 0x00 => Ok(Self::PciAt),
+ 0x03 => Ok(Self::Efi),
+ 0x70 => Ok(Self::Nbsi),
+ 0xE0 => Ok(Self::FwSec),
+ _ => Err(EINVAL),
+ }
+ }
+}
+
// PMU lookup table entry types. Used to locate PMU table entries
// in the Fwsec image, corresponding to falcon ucodes.
#[expect(dead_code)]
@@ -31,7 +72,7 @@ const FALCON_UCODE_ENTRY_APPID_FWSEC_PROD: u8 = 0x85;
/// Vbios Reader for constructing the VBIOS data.
struct VbiosIterator<'a> {
- pdev: &'a pci::Device,
+ dev: &'a device::Device,
bar0: &'a Bar0,
/// VBIOS data vector: As BIOS images are scanned, they are added to this vector for reference
/// or copying into other data structures. It is the entire scanned contents of the VBIOS which
@@ -46,9 +87,9 @@ struct VbiosIterator<'a> {
}
impl<'a> VbiosIterator<'a> {
- fn new(pdev: &'a pci::Device, bar0: &'a Bar0) -> Result<Self> {
+ fn new(dev: &'a device::Device, bar0: &'a Bar0) -> Result<Self> {
Ok(Self {
- pdev,
+ dev,
bar0,
data: KVec::new(),
current_offset: 0,
@@ -64,7 +105,7 @@ impl<'a> VbiosIterator<'a> {
// Ensure length is a multiple of 4 for 32-bit reads
if len % core::mem::size_of::<u32>() != 0 {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"VBIOS read length {} is not a multiple of 4\n",
len
);
@@ -89,7 +130,7 @@ impl<'a> VbiosIterator<'a> {
/// Read bytes at a specific offset, filling any gap.
fn read_more_at_offset(&mut self, offset: usize, len: usize) -> Result {
if offset > BIOS_MAX_SCAN_LEN {
- dev_err!(self.pdev.as_ref(), "Error: exceeded BIOS scan limit.\n");
+ dev_err!(self.dev, "Error: exceeded BIOS scan limit.\n");
return Err(EINVAL);
}
@@ -115,7 +156,7 @@ impl<'a> VbiosIterator<'a> {
if offset + len > data_len {
self.read_more_at_offset(offset, len).inspect_err(|e| {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"Failed to read more at offset {:#x}: {:?}\n",
offset,
e
@@ -123,9 +164,9 @@ impl<'a> VbiosIterator<'a> {
})?;
}
- BiosImage::new(self.pdev, &self.data[offset..offset + len]).inspect_err(|err| {
+ BiosImage::new(self.dev, &self.data[offset..offset + len]).inspect_err(|err| {
dev_err!(
- self.pdev.as_ref(),
+ self.dev,
"Failed to {} at offset {:#x}: {:?}\n",
context,
offset,
@@ -146,10 +187,7 @@ impl<'a> Iterator for VbiosIterator<'a> {
}
if self.current_offset > BIOS_MAX_SCAN_LEN {
- dev_err!(
- self.pdev.as_ref(),
- "Error: exceeded BIOS scan limit, stopping scan\n"
- );
+ dev_err!(self.dev, "Error: exceeded BIOS scan limit, stopping scan\n");
return None;
}
@@ -177,8 +215,7 @@ impl<'a> Iterator for VbiosIterator<'a> {
// Advance to next image (aligned to 512 bytes).
self.current_offset += image_size;
- // TODO[NUMM]: replace with `align_up` once it lands.
- self.current_offset = self.current_offset.next_multiple_of(512);
+ self.current_offset = self.current_offset.align_up(Alignment::new::<512>())?;
Some(Ok(full_image))
}
@@ -192,40 +229,45 @@ impl Vbios {
/// Probe for VBIOS extraction.
///
/// Once the VBIOS object is built, `bar0` is not read for [`Vbios`] purposes anymore.
- pub(crate) fn new(pdev: &pci::Device, bar0: &Bar0) -> Result<Vbios> {
+ pub(crate) fn new(dev: &device::Device, bar0: &Bar0) -> Result<Vbios> {
// Images to extract from iteration
let mut pci_at_image: Option<PciAtBiosImage> = None;
let mut first_fwsec_image: Option<FwSecBiosBuilder> = None;
let mut second_fwsec_image: Option<FwSecBiosBuilder> = None;
// Parse all VBIOS images in the ROM
- for image_result in VbiosIterator::new(pdev, bar0)? {
- let full_image = image_result?;
+ for image_result in VbiosIterator::new(dev, bar0)? {
+ let image = image_result?;
dev_dbg!(
- pdev.as_ref(),
- "Found BIOS image: size: {:#x}, type: {}, last: {}\n",
- full_image.image_size_bytes(),
- full_image.image_type_str(),
- full_image.is_last()
+ dev,
+ "Found BIOS image: size: {:#x}, type: {:?}, last: {}\n",
+ image.image_size_bytes(),
+ image.image_type(),
+ image.is_last()
);
- // Get references to images we will need after the loop, in order to
- // setup the falcon data offset.
- match full_image {
- BiosImage::PciAt(image) => {
- pci_at_image = Some(image);
+ // Convert to a specific image type
+ match BiosImageType::try_from(image.pcir.code_type) {
+ Ok(BiosImageType::PciAt) => {
+ pci_at_image = Some(PciAtBiosImage::try_from(image)?);
}
- BiosImage::FwSec(image) => {
+ Ok(BiosImageType::FwSec) => {
+ let fwsec = FwSecBiosBuilder {
+ base: image,
+ falcon_data_offset: None,
+ pmu_lookup_table: None,
+ falcon_ucode_offset: None,
+ };
if first_fwsec_image.is_none() {
- first_fwsec_image = Some(image);
+ first_fwsec_image = Some(fwsec);
} else {
- second_fwsec_image = Some(image);
+ second_fwsec_image = Some(fwsec);
}
}
- // For now we don't need to handle these
- BiosImage::Efi(_image) => {}
- BiosImage::Nbsi(_image) => {}
+ _ => {
+ // Ignore other image types or unknown types
+ }
}
}
@@ -234,14 +276,14 @@ impl Vbios {
(second_fwsec_image, first_fwsec_image, pci_at_image)
{
second
- .setup_falcon_data(pdev, &pci_at, &first)
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Falcon data setup failed: {:?}\n", e))?;
+ .setup_falcon_data(&pci_at, &first)
+ .inspect_err(|e| dev_err!(dev, "Falcon data setup failed: {:?}\n", e))?;
Ok(Vbios {
- fwsec_image: second.build(pdev)?,
+ fwsec_image: second.build()?,
})
} else {
dev_err!(
- pdev.as_ref(),
+ dev,
"Missing required images for falcon data setup, skipping\n"
);
Err(EINVAL)
@@ -283,49 +325,29 @@ struct PcirStruct {
max_runtime_image_len: u16,
}
-impl PcirStruct {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
- if data.len() < core::mem::size_of::<PcirStruct>() {
- dev_err!(pdev.as_ref(), "Not enough data for PcirStruct\n");
- return Err(EINVAL);
- }
+// SAFETY: all bit patterns are valid for `PcirStruct`.
+unsafe impl FromBytes for PcirStruct {}
- let mut signature = [0u8; 4];
- signature.copy_from_slice(&data[0..4]);
+impl PcirStruct {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
+ let (pcir, _) = PcirStruct::from_bytes_copy_prefix(data).ok_or(EINVAL)?;
// Signature should be "PCIR" (0x52494350) or "NPDS" (0x5344504e).
- if &signature != b"PCIR" && &signature != b"NPDS" {
+ if &pcir.signature != b"PCIR" && &pcir.signature != b"NPDS" {
dev_err!(
- pdev.as_ref(),
+ dev,
"Invalid signature for PcirStruct: {:?}\n",
- signature
+ pcir.signature
);
return Err(EINVAL);
}
- let mut class_code = [0u8; 3];
- class_code.copy_from_slice(&data[13..16]);
-
- let image_len = u16::from_le_bytes([data[16], data[17]]);
- if image_len == 0 {
- dev_err!(pdev.as_ref(), "Invalid image length: 0\n");
+ if pcir.image_len == 0 {
+ dev_err!(dev, "Invalid image length: 0\n");
return Err(EINVAL);
}
- Ok(PcirStruct {
- signature,
- vendor_id: u16::from_le_bytes([data[4], data[5]]),
- device_id: u16::from_le_bytes([data[6], data[7]]),
- device_list_ptr: u16::from_le_bytes([data[8], data[9]]),
- pci_data_struct_len: u16::from_le_bytes([data[10], data[11]]),
- pci_data_struct_rev: data[12],
- class_code,
- image_len,
- vendor_rom_rev: u16::from_le_bytes([data[18], data[19]]),
- code_type: data[20],
- last_image: data[21],
- max_runtime_image_len: u16::from_le_bytes([data[22], data[23]]),
- })
+ Ok(pcir)
}
/// Check if this is the last image in the ROM.
@@ -335,7 +357,7 @@ impl PcirStruct {
/// Calculate image size in bytes from 512-byte blocks.
fn image_size_bytes(&self) -> usize {
- self.image_len as usize * 512
+ usize::from(self.image_len) * 512
}
}
@@ -345,7 +367,7 @@ impl PcirStruct {
/// its header) is in the [`PciAtBiosImage`] and the falcon data it is pointing to is in the
/// [`FwSecBiosImage`].
#[derive(Debug, Clone, Copy)]
-#[expect(dead_code)]
+#[repr(C)]
struct BitHeader {
/// 0h: BIT Header Identifier (BMP=0x7FFF/BIT=0xB8FF)
id: u16,
@@ -363,30 +385,19 @@ struct BitHeader {
checksum: u8,
}
+// SAFETY: all bit patterns are valid for `BitHeader`.
+unsafe impl FromBytes for BitHeader {}
+
impl BitHeader {
fn new(data: &[u8]) -> Result<Self> {
- if data.len() < 12 {
- return Err(EINVAL);
- }
-
- let mut signature = [0u8; 4];
- signature.copy_from_slice(&data[2..6]);
+ let (header, _) = BitHeader::from_bytes_copy_prefix(data).ok_or(EINVAL)?;
// Check header ID and signature
- let id = u16::from_le_bytes([data[0], data[1]]);
- if id != 0xB8FF || &signature != b"BIT\0" {
+ if header.id != 0xB8FF || &header.signature != b"BIT\0" {
return Err(EINVAL);
}
- Ok(BitHeader {
- id,
- signature,
- bcd_version: u16::from_le_bytes([data[6], data[7]]),
- header_size: data[8],
- token_size: data[9],
- token_entries: data[10],
- checksum: data[11],
- })
+ Ok(header)
}
}
@@ -413,13 +424,13 @@ impl BitToken {
let header = &image.bit_header;
// Offset to the first token entry
- let tokens_start = image.bit_offset + header.header_size as usize;
+ let tokens_start = image.bit_offset + usize::from(header.header_size);
- for i in 0..header.token_entries as usize {
- let entry_offset = tokens_start + (i * header.token_size as usize);
+ for i in 0..usize::from(header.token_entries) {
+ let entry_offset = tokens_start + (i * usize::from(header.token_size));
// Make sure we don't go out of bounds
- if entry_offset + header.token_size as usize > image.base.data.len() {
+ if entry_offset + usize::from(header.token_size) > image.base.data.len() {
return Err(EINVAL);
}
@@ -467,7 +478,7 @@ struct PciRomHeader {
}
impl PciRomHeader {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
if data.len() < 26 {
// Need at least 26 bytes to read pciDataStrucPtr and sizeOfBlock.
return Err(EINVAL);
@@ -479,7 +490,7 @@ impl PciRomHeader {
match signature {
0xAA55 | 0xBB77 | 0x4E56 => {}
_ => {
- dev_err!(pdev.as_ref(), "ROM signature unknown {:#x}\n", signature);
+ dev_err!(dev, "ROM signature unknown {:#x}\n", signature);
return Err(EINVAL);
}
}
@@ -537,39 +548,29 @@ struct NpdeStruct {
last_image: u8,
}
-impl NpdeStruct {
- fn new(pdev: &pci::Device, data: &[u8]) -> Option<Self> {
- if data.len() < core::mem::size_of::<Self>() {
- dev_dbg!(pdev.as_ref(), "Not enough data for NpdeStruct\n");
- return None;
- }
+// SAFETY: all bit patterns are valid for `NpdeStruct`.
+unsafe impl FromBytes for NpdeStruct {}
- let mut signature = [0u8; 4];
- signature.copy_from_slice(&data[0..4]);
+impl NpdeStruct {
+ fn new(dev: &device::Device, data: &[u8]) -> Option<Self> {
+ let (npde, _) = NpdeStruct::from_bytes_copy_prefix(data)?;
// Signature should be "NPDE" (0x4544504E).
- if &signature != b"NPDE" {
+ if &npde.signature != b"NPDE" {
dev_dbg!(
- pdev.as_ref(),
+ dev,
"Invalid signature for NpdeStruct: {:?}\n",
- signature
+ npde.signature
);
return None;
}
- let subimage_len = u16::from_le_bytes([data[8], data[9]]);
- if subimage_len == 0 {
- dev_dbg!(pdev.as_ref(), "Invalid subimage length: 0\n");
+ if npde.subimage_len == 0 {
+ dev_dbg!(dev, "Invalid subimage length: 0\n");
return None;
}
- Some(NpdeStruct {
- signature,
- npci_data_ext_rev: u16::from_le_bytes([data[4], data[5]]),
- npci_data_ext_len: u16::from_le_bytes([data[6], data[7]]),
- subimage_len,
- last_image: data[10],
- })
+ Some(npde)
}
/// Check if this is the last image in the ROM.
@@ -579,134 +580,55 @@ impl NpdeStruct {
/// Calculate image size in bytes from 512-byte blocks.
fn image_size_bytes(&self) -> usize {
- self.subimage_len as usize * 512
+ usize::from(self.subimage_len) * 512
}
/// Try to find NPDE in the data, the NPDE is right after the PCIR.
fn find_in_data(
- pdev: &pci::Device,
+ dev: &device::Device,
data: &[u8],
rom_header: &PciRomHeader,
pcir: &PcirStruct,
) -> Option<Self> {
// Calculate the offset where NPDE might be located
// NPDE should be right after the PCIR structure, aligned to 16 bytes
- let pcir_offset = rom_header.pci_data_struct_offset as usize;
- let npde_start = (pcir_offset + pcir.pci_data_struct_len as usize + 0x0F) & !0x0F;
+ let pcir_offset = usize::from(rom_header.pci_data_struct_offset);
+ let npde_start = (pcir_offset + usize::from(pcir.pci_data_struct_len) + 0x0F) & !0x0F;
// Check if we have enough data
if npde_start + core::mem::size_of::<Self>() > data.len() {
- dev_dbg!(pdev.as_ref(), "Not enough data for NPDE\n");
+ dev_dbg!(dev, "Not enough data for NPDE\n");
return None;
}
// Try to create NPDE from the data
- NpdeStruct::new(pdev, &data[npde_start..])
- }
-}
-
-// Use a macro to implement BiosImage enum and methods. This avoids having to
-// repeat each enum type when implementing functions like base() in BiosImage.
-macro_rules! bios_image {
- (
- $($variant:ident: $class:ident),* $(,)?
- ) => {
- // BiosImage enum with variants for each image type
- enum BiosImage {
- $($variant($class)),*
- }
-
- impl BiosImage {
- /// Get a reference to the common BIOS image data regardless of type
- fn base(&self) -> &BiosImageBase {
- match self {
- $(Self::$variant(img) => &img.base),*
- }
- }
-
- /// Returns a string representing the type of BIOS image
- fn image_type_str(&self) -> &'static str {
- match self {
- $(Self::$variant(_) => stringify!($variant)),*
- }
- }
- }
+ NpdeStruct::new(dev, &data[npde_start..])
}
}
-impl BiosImage {
- /// Check if this is the last image.
- fn is_last(&self) -> bool {
- let base = self.base();
-
- // For NBSI images (type == 0x70), return true as they're
- // considered the last image
- if matches!(self, Self::Nbsi(_)) {
- return true;
- }
-
- // For other image types, check the NPDE first if available
- if let Some(ref npde) = base.npde {
- return npde.is_last();
- }
-
- // Otherwise, fall back to checking the PCIR last_image flag
- base.pcir.is_last()
- }
-
- /// Get the image size in bytes.
- fn image_size_bytes(&self) -> usize {
- let base = self.base();
-
- // Prefer NPDE image size if available
- if let Some(ref npde) = base.npde {
- return npde.image_size_bytes();
- }
-
- // Otherwise, fall back to the PCIR image size
- base.pcir.image_size_bytes()
- }
-
- /// Create a [`BiosImageBase`] from a byte slice and convert it to a [`BiosImage`] which
- /// triggers the constructor of the specific BiosImage enum variant.
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
- let base = BiosImageBase::new(pdev, data)?;
- let image = base.into_image().inspect_err(|e| {
- dev_err!(pdev.as_ref(), "Failed to create BiosImage: {:?}\n", e);
- })?;
-
- Ok(image)
- }
-}
-
-bios_image! {
- PciAt: PciAtBiosImage, // PCI-AT compatible BIOS image
- Efi: EfiBiosImage, // EFI (Extensible Firmware Interface)
- Nbsi: NbsiBiosImage, // NBSI (Nvidia Bios System Interface)
- FwSec: FwSecBiosBuilder, // FWSEC (Firmware Security)
-}
-
/// The PciAt BIOS image is typically the first BIOS image type found in the BIOS image chain.
///
/// It contains the BIT header and the BIT tokens.
struct PciAtBiosImage {
- base: BiosImageBase,
+ base: BiosImage,
bit_header: BitHeader,
bit_offset: usize,
}
+#[expect(dead_code)]
struct EfiBiosImage {
- base: BiosImageBase,
+ base: BiosImage,
// EFI-specific fields can be added here in the future.
}
+#[expect(dead_code)]
struct NbsiBiosImage {
- base: BiosImageBase,
+ base: BiosImage,
// NBSI-specific fields can be added here in the future.
}
struct FwSecBiosBuilder {
- base: BiosImageBase,
+ base: BiosImage,
/// These are temporary fields that are used during the construction of the
/// [`FwSecBiosBuilder`].
///
@@ -725,38 +647,18 @@ struct FwSecBiosBuilder {
///
/// The PMU table contains voltage/frequency tables as well as a pointer to the Falcon Ucode.
pub(crate) struct FwSecBiosImage {
- base: BiosImageBase,
+ base: BiosImage,
/// The offset of the Falcon ucode.
falcon_ucode_offset: usize,
}
-// Convert from BiosImageBase to BiosImage
-impl TryFrom<BiosImageBase> for BiosImage {
- type Error = Error;
-
- fn try_from(base: BiosImageBase) -> Result<Self> {
- match base.pcir.code_type {
- 0x00 => Ok(BiosImage::PciAt(base.try_into()?)),
- 0x03 => Ok(BiosImage::Efi(EfiBiosImage { base })),
- 0x70 => Ok(BiosImage::Nbsi(NbsiBiosImage { base })),
- 0xE0 => Ok(BiosImage::FwSec(FwSecBiosBuilder {
- base,
- falcon_data_offset: None,
- pmu_lookup_table: None,
- falcon_ucode_offset: None,
- })),
- _ => Err(EINVAL),
- }
- }
-}
-
/// BIOS Image structure containing various headers and reference fields to all BIOS images.
///
-/// Each BiosImage type has a BiosImageBase type along with other image-specific fields. Note that
-/// Rust favors composition of types over inheritance.
-#[derive(Debug)]
+/// A BiosImage struct is embedded into all image types and implements common operations.
#[expect(dead_code)]
-struct BiosImageBase {
+struct BiosImage {
+ /// Used for logging.
+ dev: ARef<device::Device>,
/// PCI ROM Expansion Header
rom_header: PciRomHeader,
/// PCI Data Structure
@@ -767,52 +669,81 @@ struct BiosImageBase {
data: KVec<u8>,
}
-impl BiosImageBase {
- fn into_image(self) -> Result<BiosImage> {
- BiosImage::try_from(self)
+impl BiosImage {
+ /// Get the image size in bytes.
+ fn image_size_bytes(&self) -> usize {
+ // Prefer NPDE image size if available
+ if let Some(ref npde) = self.npde {
+ npde.image_size_bytes()
+ } else {
+ // Otherwise, fall back to the PCIR image size
+ self.pcir.image_size_bytes()
+ }
+ }
+
+ /// Get the BIOS image type.
+ fn image_type(&self) -> Result<BiosImageType> {
+ BiosImageType::try_from(self.pcir.code_type)
}
- /// Creates a new BiosImageBase from raw byte data.
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
+ /// Check if this is the last image.
+ fn is_last(&self) -> bool {
+ // For NBSI images, return true as they're considered the last image.
+ if self.image_type() == Ok(BiosImageType::Nbsi) {
+ return true;
+ }
+
+ // For other image types, check the NPDE first if available
+ if let Some(ref npde) = self.npde {
+ return npde.is_last();
+ }
+
+ // Otherwise, fall back to checking the PCIR last_image flag
+ self.pcir.is_last()
+ }
+
+ /// Creates a new BiosImage from raw byte data.
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
// Ensure we have enough data for the ROM header.
if data.len() < 26 {
- dev_err!(pdev.as_ref(), "Not enough data for ROM header\n");
+ dev_err!(dev, "Not enough data for ROM header\n");
return Err(EINVAL);
}
// Parse the ROM header.
- let rom_header = PciRomHeader::new(pdev, &data[0..26])
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PciRomHeader: {:?}\n", e))?;
+ let rom_header = PciRomHeader::new(dev, &data[0..26])
+ .inspect_err(|e| dev_err!(dev, "Failed to create PciRomHeader: {:?}\n", e))?;
// Get the PCI Data Structure using the pointer from the ROM header.
- let pcir_offset = rom_header.pci_data_struct_offset as usize;
+ let pcir_offset = usize::from(rom_header.pci_data_struct_offset);
let pcir_data = data
.get(pcir_offset..pcir_offset + core::mem::size_of::<PcirStruct>())
.ok_or(EINVAL)
.inspect_err(|_| {
dev_err!(
- pdev.as_ref(),
+ dev,
"PCIR offset {:#x} out of bounds (data length: {})\n",
pcir_offset,
data.len()
);
dev_err!(
- pdev.as_ref(),
+ dev,
"Consider reading more data for construction of BiosImage\n"
);
})?;
- let pcir = PcirStruct::new(pdev, pcir_data)
- .inspect_err(|e| dev_err!(pdev.as_ref(), "Failed to create PcirStruct: {:?}\n", e))?;
+ let pcir = PcirStruct::new(dev, pcir_data)
+ .inspect_err(|e| dev_err!(dev, "Failed to create PcirStruct: {:?}\n", e))?;
// Look for NPDE structure if this is not an NBSI image (type != 0x70).
- let npde = NpdeStruct::find_in_data(pdev, data, &rom_header, &pcir);
+ let npde = NpdeStruct::find_in_data(dev, data, &rom_header, &pcir);
// Create a copy of the data.
let mut data_copy = KVec::new();
data_copy.extend_from_slice(data, GFP_KERNEL)?;
- Ok(BiosImageBase {
+ Ok(BiosImage {
+ dev: dev.into(),
rom_header,
pcir,
npde,
@@ -848,25 +779,25 @@ impl PciAtBiosImage {
///
/// This is just a 4 byte structure that contains a pointer to the Falcon data in the FWSEC
/// image.
- fn falcon_data_ptr(&self, pdev: &pci::Device) -> Result<u32> {
+ fn falcon_data_ptr(&self) -> Result<u32> {
let token = self.get_bit_token(BIT_TOKEN_ID_FALCON_DATA)?;
// Make sure we don't go out of bounds
- if token.data_offset as usize + 4 > self.base.data.len() {
+ if usize::from(token.data_offset) + 4 > self.base.data.len() {
return Err(EINVAL);
}
// read the 4 bytes at the offset specified in the token
- let offset = token.data_offset as usize;
+ let offset = usize::from(token.data_offset);
let bytes: [u8; 4] = self.base.data[offset..offset + 4].try_into().map_err(|_| {
- dev_err!(pdev.as_ref(), "Failed to convert data slice to array");
+ dev_err!(self.base.dev, "Failed to convert data slice to array");
EINVAL
})?;
let data_ptr = u32::from_le_bytes(bytes);
- if (data_ptr as usize) < self.base.data.len() {
- dev_err!(pdev.as_ref(), "Falcon data pointer out of bounds\n");
+ if (usize::from_safe_cast(data_ptr)) < self.base.data.len() {
+ dev_err!(self.base.dev, "Falcon data pointer out of bounds\n");
return Err(EINVAL);
}
@@ -874,10 +805,10 @@ impl PciAtBiosImage {
}
}
-impl TryFrom<BiosImageBase> for PciAtBiosImage {
+impl TryFrom<BiosImage> for PciAtBiosImage {
type Error = Error;
- fn try_from(base: BiosImageBase) -> Result<Self> {
+ fn try_from(base: BiosImage) -> Result<Self> {
let data_slice = &base.data;
let (bit_header, bit_offset) = PciAtBiosImage::find_bit_header(data_slice)?;
@@ -892,7 +823,7 @@ impl TryFrom<BiosImageBase> for PciAtBiosImage {
/// The [`PmuLookupTableEntry`] structure is a single entry in the [`PmuLookupTable`].
///
/// See the [`PmuLookupTable`] description for more information.
-#[expect(dead_code)]
+#[repr(C, packed)]
struct PmuLookupTableEntry {
application_id: u8,
target_id: u8,
@@ -901,7 +832,7 @@ struct PmuLookupTableEntry {
impl PmuLookupTableEntry {
fn new(data: &[u8]) -> Result<Self> {
- if data.len() < 6 {
+ if data.len() < core::mem::size_of::<Self>() {
return Err(EINVAL);
}
@@ -913,37 +844,39 @@ impl PmuLookupTableEntry {
}
}
+#[repr(C)]
+struct PmuLookupTableHeader {
+ version: u8,
+ header_len: u8,
+ entry_len: u8,
+ entry_count: u8,
+}
+
+// SAFETY: all bit patterns are valid for `PmuLookupTableHeader`.
+unsafe impl FromBytes for PmuLookupTableHeader {}
+
/// The [`PmuLookupTableEntry`] structure is used to find the [`PmuLookupTableEntry`] for a given
/// application ID.
///
/// The table of entries is pointed to by the falcon data pointer in the BIT table, and is used to
/// locate the Falcon Ucode.
-#[expect(dead_code)]
struct PmuLookupTable {
- version: u8,
- header_len: u8,
- entry_len: u8,
- entry_count: u8,
+ header: PmuLookupTableHeader,
table_data: KVec<u8>,
}
impl PmuLookupTable {
- fn new(pdev: &pci::Device, data: &[u8]) -> Result<Self> {
- if data.len() < 4 {
- return Err(EINVAL);
- }
+ fn new(dev: &device::Device, data: &[u8]) -> Result<Self> {
+ let (header, _) = PmuLookupTableHeader::from_bytes_copy_prefix(data).ok_or(EINVAL)?;
- let header_len = data[1] as usize;
- let entry_len = data[2] as usize;
- let entry_count = data[3] as usize;
+ let header_len = usize::from(header.header_len);
+ let entry_len = usize::from(header.entry_len);
+ let entry_count = usize::from(header.entry_count);
let required_bytes = header_len + (entry_count * entry_len);
if data.len() < required_bytes {
- dev_err!(
- pdev.as_ref(),
- "PmuLookupTable data length less than required\n"
- );
+ dev_err!(dev, "PmuLookupTable data length less than required\n");
return Err(EINVAL);
}
@@ -956,34 +889,24 @@ impl PmuLookupTable {
// Debug logging of entries (dumps the table data to dmesg)
for i in (header_len..required_bytes).step_by(entry_len) {
- dev_dbg!(
- pdev.as_ref(),
- "PMU entry: {:02x?}\n",
- &data[i..][..entry_len]
- );
+ dev_dbg!(dev, "PMU entry: {:02x?}\n", &data[i..][..entry_len]);
}
- Ok(PmuLookupTable {
- version: data[0],
- header_len: header_len as u8,
- entry_len: entry_len as u8,
- entry_count: entry_count as u8,
- table_data,
- })
+ Ok(PmuLookupTable { header, table_data })
}
fn lookup_index(&self, idx: u8) -> Result<PmuLookupTableEntry> {
- if idx >= self.entry_count {
+ if idx >= self.header.entry_count {
return Err(EINVAL);
}
- let index = (idx as usize) * self.entry_len as usize;
+ let index = (usize::from(idx)) * usize::from(self.header.entry_len);
PmuLookupTableEntry::new(&self.table_data[index..])
}
// find entry by type value
fn find_entry_by_type(&self, entry_type: u8) -> Result<PmuLookupTableEntry> {
- for i in 0..self.entry_count {
+ for i in 0..self.header.entry_count {
let entry = self.lookup_index(i)?;
if entry.application_id == entry_type {
return Ok(entry);
@@ -997,11 +920,10 @@ impl PmuLookupTable {
impl FwSecBiosBuilder {
fn setup_falcon_data(
&mut self,
- pdev: &pci::Device,
pci_at_image: &PciAtBiosImage,
first_fwsec: &FwSecBiosBuilder,
) -> Result {
- let mut offset = pci_at_image.falcon_data_ptr(pdev)? as usize;
+ let mut offset = usize::from_safe_cast(pci_at_image.falcon_data_ptr()?);
let mut pmu_in_first_fwsec = false;
// The falcon data pointer assumes that the PciAt and FWSEC images
@@ -1024,10 +946,15 @@ impl FwSecBiosBuilder {
self.falcon_data_offset = Some(offset);
if pmu_in_first_fwsec {
- self.pmu_lookup_table =
- Some(PmuLookupTable::new(pdev, &first_fwsec.base.data[offset..])?);
+ self.pmu_lookup_table = Some(PmuLookupTable::new(
+ &self.base.dev,
+ &first_fwsec.base.data[offset..],
+ )?);
} else {
- self.pmu_lookup_table = Some(PmuLookupTable::new(pdev, &self.base.data[offset..])?);
+ self.pmu_lookup_table = Some(PmuLookupTable::new(
+ &self.base.dev,
+ &self.base.data[offset..],
+ )?);
}
match self
@@ -1037,10 +964,10 @@ impl FwSecBiosBuilder {
.find_entry_by_type(FALCON_UCODE_ENTRY_APPID_FWSEC_PROD)
{
Ok(entry) => {
- let mut ucode_offset = entry.data as usize;
+ let mut ucode_offset = usize::from_safe_cast(entry.data);
ucode_offset -= pci_at_image.base.data.len();
if ucode_offset < first_fwsec.base.data.len() {
- dev_err!(pdev.as_ref(), "Falcon Ucode offset not in second Fwsec.\n");
+ dev_err!(self.base.dev, "Falcon Ucode offset not in second Fwsec.\n");
return Err(EINVAL);
}
ucode_offset -= first_fwsec.base.data.len();
@@ -1048,7 +975,7 @@ impl FwSecBiosBuilder {
}
Err(e) => {
dev_err!(
- pdev.as_ref(),
+ self.base.dev,
"PmuLookupTableEntry not found, error: {:?}\n",
e
);
@@ -1059,7 +986,7 @@ impl FwSecBiosBuilder {
}
/// Build the final FwSecBiosImage from this builder
- fn build(self, pdev: &pci::Device) -> Result<FwSecBiosImage> {
+ fn build(self) -> Result<FwSecBiosImage> {
let ret = FwSecBiosImage {
base: self.base,
falcon_ucode_offset: self.falcon_ucode_offset.ok_or(EINVAL)?,
@@ -1067,8 +994,8 @@ impl FwSecBiosBuilder {
if cfg!(debug_assertions) {
// Print the desc header for debugging
- let desc = ret.header(pdev.as_ref())?;
- dev_dbg!(pdev.as_ref(), "PmuLookupTableEntry desc: {:#?}\n", desc);
+ let desc = ret.header()?;
+ dev_dbg!(ret.base.dev, "PmuLookupTableEntry desc: {:#?}\n", desc);
}
Ok(ret)
@@ -1077,13 +1004,16 @@ impl FwSecBiosBuilder {
impl FwSecBiosImage {
/// Get the FwSec header ([`FalconUCodeDescV3`]).
- pub(crate) fn header(&self, dev: &device::Device) -> Result<&FalconUCodeDescV3> {
+ pub(crate) fn header(&self) -> Result<&FalconUCodeDescV3> {
// Get the falcon ucode offset that was found in setup_falcon_data.
let falcon_ucode_offset = self.falcon_ucode_offset;
// Make sure the offset is within the data bounds.
if falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>() > self.base.data.len() {
- dev_err!(dev, "fwsec-frts header not contained within BIOS bounds\n");
+ dev_err!(
+ self.base.dev,
+ "fwsec-frts header not contained within BIOS bounds\n"
+ );
return Err(ERANGE);
}
@@ -1095,7 +1025,7 @@ impl FwSecBiosImage {
let ver = (hdr & 0xff00) >> 8;
if ver != 3 {
- dev_err!(dev, "invalid fwsec firmware version: {:?}\n", ver);
+ dev_err!(self.base.dev, "invalid fwsec firmware version: {:?}\n", ver);
return Err(EINVAL);
}
@@ -1115,36 +1045,37 @@ impl FwSecBiosImage {
}
/// Get the ucode data as a byte slice
- pub(crate) fn ucode(&self, dev: &device::Device, desc: &FalconUCodeDescV3) -> Result<&[u8]> {
+ pub(crate) fn ucode(&self, desc: &FalconUCodeDescV3) -> Result<&[u8]> {
let falcon_ucode_offset = self.falcon_ucode_offset;
// The ucode data follows the descriptor.
let ucode_data_offset = falcon_ucode_offset + desc.size();
- let size = (desc.imem_load_size + desc.dmem_load_size) as usize;
+ let size = usize::from_safe_cast(desc.imem_load_size + desc.dmem_load_size);
// Get the data slice, checking bounds in a single operation.
self.base
.data
.get(ucode_data_offset..ucode_data_offset + size)
.ok_or(ERANGE)
- .inspect_err(|_| dev_err!(dev, "fwsec ucode data not contained within BIOS bounds\n"))
+ .inspect_err(|_| {
+ dev_err!(
+ self.base.dev,
+ "fwsec ucode data not contained within BIOS bounds\n"
+ )
+ })
}
/// Get the signatures as a byte slice
- pub(crate) fn sigs(
- &self,
- dev: &device::Device,
- desc: &FalconUCodeDescV3,
- ) -> Result<&[Bcrt30Rsa3kSignature]> {
+ pub(crate) fn sigs(&self, desc: &FalconUCodeDescV3) -> Result<&[Bcrt30Rsa3kSignature]> {
// The signatures data follows the descriptor.
let sigs_data_offset = self.falcon_ucode_offset + core::mem::size_of::<FalconUCodeDescV3>();
- let sigs_size =
- desc.signature_count as usize * core::mem::size_of::<Bcrt30Rsa3kSignature>();
+ let sigs_count = usize::from(desc.signature_count);
+ let sigs_size = sigs_count * core::mem::size_of::<Bcrt30Rsa3kSignature>();
// Make sure the data is within bounds.
if sigs_data_offset + sigs_size > self.base.data.len() {
dev_err!(
- dev,
+ self.base.dev,
"fwsec signatures data not contained within BIOS bounds\n"
);
return Err(ERANGE);
@@ -1159,7 +1090,7 @@ impl FwSecBiosImage {
.as_ptr()
.add(sigs_data_offset)
.cast::<Bcrt30Rsa3kSignature>(),
- desc.signature_count as usize,
+ sigs_count,
)
})
}
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index 9610f878da1b..87186f891a6a 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -644,8 +644,8 @@ static int cc1352_bootloader_wait_for_ack(struct gb_beagleplay *bg)
ret = wait_for_completion_timeout(
&bg->fwl_ack_com, msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
- if (ret < 0)
- return dev_err_probe(&bg->sd->dev, ret,
+ if (!ret)
+ return dev_err_probe(&bg->sd->dev, -ETIMEDOUT,
"Failed to acquire ack semaphore");
switch (READ_ONCE(bg->fwl_ack)) {
@@ -683,8 +683,8 @@ static int cc1352_bootloader_get_status(struct gb_beagleplay *bg)
ret = wait_for_completion_timeout(
&bg->fwl_cmd_response_com,
msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
- if (ret < 0)
- return dev_err_probe(&bg->sd->dev, ret,
+ if (!ret)
+ return dev_err_probe(&bg->sd->dev, -ETIMEDOUT,
"Failed to acquire last status semaphore");
switch (READ_ONCE(bg->fwl_cmd_response)) {
@@ -768,8 +768,8 @@ static int cc1352_bootloader_crc32(struct gb_beagleplay *bg, u32 *crc32)
ret = wait_for_completion_timeout(
&bg->fwl_cmd_response_com,
msecs_to_jiffies(CC1352_BOOTLOADER_TIMEOUT));
- if (ret < 0)
- return dev_err_probe(&bg->sd->dev, ret,
+ if (!ret)
+ return dev_err_probe(&bg->sd->dev, -ETIMEDOUT,
"Failed to acquire last status semaphore");
*crc32 = READ_ONCE(bg->fwl_cmd_response);
diff --git a/drivers/greybus/operation.c b/drivers/greybus/operation.c
index 54ccc434a1f7..7e12ffb2dd60 100644
--- a/drivers/greybus/operation.c
+++ b/drivers/greybus/operation.c
@@ -1238,7 +1238,7 @@ int __init gb_operation_init(void)
goto err_destroy_message_cache;
gb_operation_completion_wq = alloc_workqueue("greybus_completion",
- 0, 0);
+ WQ_PERCPU, 0);
if (!gb_operation_completion_wq)
goto err_destroy_operation_cache;
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
index 4256467fcd35..35ea7147dca6 100644
--- a/drivers/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -10,6 +10,7 @@
#include <linux/kstrtox.h>
#include <linux/workqueue.h>
#include <linux/greybus.h>
+#include <linux/string_choices.h>
#define SVC_INTF_EJECT_TIMEOUT 9000
#define SVC_INTF_ACTIVATE_TIMEOUT 6000
@@ -73,7 +74,7 @@ static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
struct gb_svc *svc = to_gb_svc(dev);
return sprintf(buf, "%s\n",
- gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
+ str_enabled_disabled(gb_svc_watchdog_enabled(svc)));
}
static ssize_t watchdog_store(struct device *dev,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 79997553d8f9..920a64b66b25 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -92,6 +92,17 @@ config HID_GENERIC
If unsure, say Y.
+config HID_HAPTIC
+ bool "Haptic touchpad support"
+ default n
+ help
+ Support for touchpads with force sensors and haptic actuators instead of a
+ traditional button.
+ Adds extra parsing and FF device for the hid multitouch driver.
+ It can be used for Elan 2703 haptic touchpad.
+
+ If unsure, say N.
+
menu "Special HID drivers"
config HID_A4TECH
@@ -372,6 +383,7 @@ config HID_EVISION
help
Support for some EVision keyboards. Note that this is needed only when
applying customization using userspace programs.
+ Support for some EVision devices requiring report descriptor fixups.
config HID_EZKEY
tristate "Ezkey BTC 8193 keyboard"
@@ -597,8 +609,6 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
- depends on ACPI
- select ACPI_PLATFORM_PROFILE
select NEW_LEDS
select LEDS_CLASS
help
@@ -1162,7 +1172,7 @@ config GREENASIA_FF
config HID_HYPERV_MOUSE
tristate "Microsoft Hyper-V mouse driver"
- depends on HYPERV
+ depends on HYPERV_VMBUS
help
Select this option to enable the Hyper-V mouse driver.
@@ -1309,6 +1319,8 @@ config HID_WINWING
help
Support for WinWing Orion2 throttle base with the following grips:
+ * TGRIP-15E
+ * TGRIP-15EX
* TGRIP-16EX
* TGRIP-18
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 10ae5dedbd84..361a7daedeb8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -4,6 +4,7 @@
#
hid-y := hid-core.o hid-input.o hid-quirks.o
hid-$(CONFIG_DEBUG_FS) += hid-debug.o
+hid-$(CONFIG_HID_HAPTIC) += hid-haptic.o
obj-$(CONFIG_HID_BPF) += bpf/
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 0f2cbae39b2b..7017bfa59093 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -39,8 +39,12 @@ int amd_sfh_get_report(struct hid_device *hid, int report_id, int report_type)
struct amdtp_hid_data *hid_data = hid->driver_data;
struct amdtp_cl_data *cli_data = hid_data->cli_data;
struct request_list *req_list = &cli_data->req_list;
+ struct amd_input_data *in_data = cli_data->in_data;
+ struct amd_mp2_dev *mp2;
int i;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->hid_sensor_hubs[i] == hid) {
struct request_list *new = kzalloc(sizeof(*new), GFP_KERNEL);
@@ -75,6 +79,8 @@ void amd_sfh_work(struct work_struct *work)
u8 report_id, node_type;
u8 report_size = 0;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
req_node = list_last_entry(&req_list->list, struct request_list, list);
list_del(&req_node->list);
current_index = req_node->current_index;
@@ -83,7 +89,6 @@ void amd_sfh_work(struct work_struct *work)
node_type = req_node->report_type;
kfree(req_node);
- mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
mp2_ops = mp2->mp2_ops;
if (node_type == HID_FEATURE_REPORT) {
report_size = mp2_ops->get_feat_rep(sensor_index, report_id,
@@ -107,6 +112,8 @@ void amd_sfh_work(struct work_struct *work)
cli_data->cur_hid_dev = current_index;
cli_data->sensor_requested_cnt[current_index] = 0;
amdtp_hid_wakeup(cli_data->hid_sensor_hubs[current_index]);
+ if (!list_empty(&req_list->list))
+ schedule_delayed_work(&cli_data->work, 0);
}
void amd_sfh_work_buffer(struct work_struct *work)
@@ -117,9 +124,10 @@ void amd_sfh_work_buffer(struct work_struct *work)
u8 report_size;
int i;
+ mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
+ guard(mutex)(&mp2->lock);
for (i = 0; i < cli_data->num_hid_devices; i++) {
if (cli_data->sensor_sts[i] == SENSOR_ENABLED) {
- mp2 = container_of(in_data, struct amd_mp2_dev, in_data);
report_size = mp2->mp2_ops->get_in_rep(i, cli_data->sensor_idx[i],
cli_data->report_id[i], in_data);
hid_input_report(cli_data->hid_sensor_hubs[i], HID_INPUT_REPORT,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_common.h b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
index f44a3bb2fbd4..78f830c133e5 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_common.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_common.h
@@ -10,6 +10,7 @@
#ifndef AMD_SFH_COMMON_H
#define AMD_SFH_COMMON_H
+#include <linux/mutex.h>
#include <linux/pci.h>
#include "amd_sfh_hid.h"
@@ -59,6 +60,8 @@ struct amd_mp2_dev {
u32 mp2_acs;
struct sfh_dev_status dev_en;
struct work_struct work;
+ /* mp2 to protect data */
+ struct mutex lock;
u8 init_done;
u8 rver;
};
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2983af969579..1d9f955573aa 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -466,6 +466,10 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!privdata->cl_data)
return -ENOMEM;
+ rc = devm_mutex_init(&pdev->dev, &privdata->lock);
+ if (rc)
+ return rc;
+
privdata->sfh1_1_ops = (const struct amd_sfh1_1_ops *)id->driver_data;
if (privdata->sfh1_1_ops) {
if (boot_cpu_data.x86 >= 0x1A)
diff --git a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
index 0a9b44ce4904..b0bab2a1ddcc 100644
--- a/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
+++ b/drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
@@ -194,6 +194,8 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
if (rc)
goto cleanup;
+ mp2_ops->stop(privdata, cl_data->sensor_idx[i]);
+ amd_sfh_wait_for_response(privdata, cl_data->sensor_idx[i], DISABLE_SENSOR);
writel(0, privdata->mmio + amd_get_p2c_val(privdata, 0));
mp2_ops->start(privdata, info);
status = amd_sfh_wait_for_response
diff --git a/drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c b/drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c
new file mode 100644
index 000000000000..183d408d893a
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Inspiroy-2-M.bpf.c
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Red Hat, Inc
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256C
+#define PID_INSPIROY_2_M 0x0067
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_HUION, PID_INSPIROY_2_M),
+);
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+/* The prefix of the firmware ID we expect for this device. The full firmware
+ * string has a date suffix, e.g. HUION_T21j_221221
+ */
+char EXPECTED_FIRMWARE_ID[] = "HUION_T21k_";
+
+/* How this BPF program works: the tablet has two modes, firmware mode and
+ * tablet mode. In firmware mode (out of the box) the tablet sends button events
+ * and the dial as keyboard combinations. In tablet mode it uses a vendor specific
+ * hid report to report everything instead.
+ * Depending on the mode some hid reports are never sent and the corresponding
+ * devices are mute.
+ *
+ * To switch the tablet use e.g. https://github.com/whot/huion-switcher
+ * or one of the tools from the digimend project
+ *
+ * This BPF works for both modes. The huion-switcher tool sets the
+ * HUION_FIRMWARE_ID udev property - if that is set then we disable the firmware
+ * pad and pen reports (by making them vendor collections that are ignored).
+ * If that property is not set we fix all hidraw nodes so the tablet works in
+ * either mode though the drawback is that the device will show up twice if
+ * you bind it to all event nodes
+ *
+ * Default report descriptor for the first exposed hidraw node:
+ *
+ * # HUION Huion Tablet_H641P
+ * # Report descriptor length: 18 bytes
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 0xFF00) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x58, // Report Size (88) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 13
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * R: 18 06 00 ff 09 01 a1 01 85 08 75 58 95 01 09 01 81 02 c0
+ *
+ * This rdesc does nothing until the tablet is switched to raw mode, see
+ * https://github.com/whot/huion-switcher
+ *
+ *
+ * Second hidraw node is the Pen. This one sends events until the tablet is
+ * switched to raw mode, then it's mute.
+ *
+ * # Report descriptor length: 93 bytes
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x0a, // Report ID (10) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x01, // Collection (Application) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x45, // Usage (Eraser) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18 <-- has no Invert eraser
+ * # 0x15, 0x00, // Logical Minimum (0) 20
+ * # 0x25, 0x01, // Logical Maximum (1) 22
+ * # 0x75, 0x01, // Report Size (1) 24
+ * # 0x95, 0x06, // Report Count (6) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x09, 0x32, // Usage (In Range) 30
+ * # 0x75, 0x01, // Report Size (1) 32
+ * # 0x95, 0x01, // Report Count (1) 34
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 40
+ * # 0x09, 0x30, // Usage (X) 42
+ * # 0x09, 0x31, // Usage (Y) 44
+ * # 0x55, 0x0d, // Unit Exponent (-3) 46 <-- change to -2
+ * # 0x65, 0x33, // Unit (EnglishLinear: in³) 48 <-- change in³ to in
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 50
+ * # 0x35, 0x00, // Physical Minimum (0) 53
+ * # 0x46, 0x00, 0x08, // Physical Maximum (2048) 55 <-- invalid size
+ * # 0x75, 0x10, // Report Size (16) 58
+ * # 0x95, 0x02, // Report Count (2) 60
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 62
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 64
+ * # 0x09, 0x30, // Usage (Tip Pressure) 66
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 68
+ * # 0x75, 0x10, // Report Size (16) 71
+ * # 0x95, 0x01, // Report Count (1) 73
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 75
+ * # 0x09, 0x3d, // Usage (X Tilt) 77 <-- No tilt reported
+ * # 0x09, 0x3e, // Usage (Y Tilt) 79
+ * # 0x15, 0x81, // Logical Minimum (-127) 81
+ * # 0x25, 0x7f, // Logical Maximum (127) 83
+ * # 0x75, 0x08, // Report Size (8) 85
+ * # 0x95, 0x02, // Report Count (2) 87
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 89
+ * # 0xc0, // End Collection 91
+ * # 0xc0, // End Collection 92
+ * R: 93 05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 45 09 3c 15 00 25 01 7501 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 09 3d09 3e 15 81 25 7f 75 08 95 02 81 02 c0 c0
+ *
+ * Third hidraw node is the pad which sends a combination of keyboard shortcuts until
+ * the tablet is switched to raw mode, then it's mute:
+ *
+ * # Report descriptor length: 65 bytes
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x06, // Usage (Keyboard) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x03, // Report ID (3) 6
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 8
+ * # 0x19, 0xe0, // UsageMinimum (224) 10
+ * # 0x29, 0xe7, // UsageMaximum (231) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x08, // Report Count (8) 20
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 22
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 24
+ * # 0x19, 0x00, // UsageMinimum (0) 26
+ * # 0x29, 0xff, // UsageMaximum (255) 28
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 30
+ * # 0x75, 0x08, // Report Size (8) 33
+ * # 0x95, 0x06, // Report Count (6) 35
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 37
+ * # 0xc0, // End Collection 39
+ * # 0x05, 0x0c, // Usage Page (Consumer) 40
+ * # 0x09, 0x01, // Usage (Consumer Control) 42
+ * # 0xa1, 0x01, // Collection (Application) 44
+ * # 0x85, 0x04, // Report ID (4) 46
+ * # 0x19, 0x00, // UsageMinimum (0) 48
+ * # 0x2a, 0x3c, 0x02, // UsageMaximum (572) 50
+ * # 0x15, 0x00, // Logical Minimum (0) 53
+ * # 0x26, 0x3c, 0x02, // Logical Maximum (572) 55
+ * # 0x95, 0x01, // Report Count (1) 58
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 62
+ * # 0xc0, // End Collection 64
+ * R: 65 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 0507 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0 05 0c 09 01 a1 01 85 04 19 00 2a 3c02 15 00 26 3c 02 95 01 75 10 81 00 c0
+ * N: HUION Huion Tablet_H641P
+ */
+
+#define PAD_REPORT_DESCRIPTOR_LENGTH 133
+#define PEN_REPORT_DESCRIPTOR_LENGTH 93
+#define VENDOR_REPORT_DESCRIPTOR_LENGTH 36
+#define PAD_REPORT_ID 3
+#define PEN_REPORT_ID 10
+#define VENDOR_REPORT_ID 8
+#define PAD_REPORT_LENGTH 8
+#define PEN_REPORT_LENGTH 10
+#define VENDOR_REPORT_LENGTH 12
+
+
+__u16 last_button_state;
+
+static const __u8 fixed_rdesc_pad[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(PAD_REPORT_ID)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 in report - just exists so we get to be a tablet pad
+ Usage_Dig_BarrelSwitch // BTN_STYLUS
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // padding
+ Input(Const)
+ // Bytes 2/3 in report - just exists so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 4 in report is the wheel
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ // Byte 5 is the button state
+ UsagePage_Button
+ UsageMinimum_i8(0x1)
+ UsageMaximum_i8(0x8)
+ LogicalMinimum_i8(0x1)
+ LogicalMaximum_i8(0x8)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Arr|Abs)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(PAD_REPORT_LENGTH)
+ )
+};
+
+static const __u8 fixed_rdesc_pen[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ // -- Byte 0 in report
+ ReportId(PEN_REPORT_ID)
+ Usage_Dig_Pen
+ CollectionPhysical(
+ // -- Byte 1 in report
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch // maps eraser to BTN_STYLUS2
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportSize(1)
+ ReportCount(3)
+ Input(Var|Abs)
+ ReportCount(4) // Padding
+ Input(Const)
+ Usage_Dig_InRange
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-1)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(160)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32767)
+ Usage_GD_X
+ Input(Var|Abs) // Bytes 2+3
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(100)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32767)
+ Usage_GD_Y
+ Input(Var|Abs) // Bytes 4+5
+ )
+ UsagePage_Digitizers
+ Usage_Dig_TipPressure
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8191)
+ Input(Var|Abs) // Byte 6+7
+ // Two bytes padding so we don't need to change the report at all
+ ReportSize(8)
+ ReportCount(2)
+ Input(Const) // Byte 6+7
+ )
+ )
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ // Byte 0
+ // We leave the pen on the vendor report ID
+ ReportId(VENDOR_REPORT_ID)
+ Usage_Dig_Pen
+ CollectionPhysical(
+ // Byte 1 are the buttons
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportSize(1)
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch
+ ReportCount(3)
+ Input(Var|Abs)
+ ReportCount(4) // Padding
+ Input(Const)
+ Usage_Dig_InRange
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-1)
+ // Note: reported logical range differs
+ // from the pen report ID for x and y
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(32000)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(160)
+ // Bytes 2/3 in report
+ Usage_GD_X
+ Input(Var|Abs)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(20000)
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(100)
+ // Bytes 4/5 in report
+ Usage_GD_Y
+ Input(Var|Abs)
+ )
+ // Bytes 6/7 in report
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8192)
+ Usage_Dig_TipPressure
+ Input(Var|Abs)
+ )
+ )
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0
+ ReportId(PAD_REPORT_ID)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 are the buttons
+ Usage_Dig_BarrelSwitch // BTN_STYLUS, needed so we get to be a tablet pad
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // Padding
+ Input(Const)
+ // Bytes 2/3 - x/y just exist so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Bytes 4 and 5 are the button state
+ UsagePage_Button
+ UsageMinimum_i8(0x1)
+ UsageMaximum_i8(0xa)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(10)
+ ReportSize(1)
+ Input(Var|Abs)
+ Usage_i8(0x31) // maps to BTN_SOUTH
+ ReportCount(1)
+ Input(Var|Abs)
+ ReportCount(5)
+ Input(Const)
+ // Byte 6 is the wheel
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(8)
+ Input(Var|Rel)
+ )
+ // Make sure we match our original report length
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+static const __u8 disabled_rdesc_pen[] = {
+ FixedSizeVendorReport(PEN_REPORT_LENGTH)
+};
+
+static const __u8 disabled_rdesc_pad[] = {
+ FixedSizeVendorReport(PAD_REPORT_LENGTH)
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* If we have a firmware ID and it matches our expected prefix, we
+ * disable the default pad/pen nodes. They won't send events
+ * but cause duplicate devices.
+ */
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+ if (rdesc_size == PAD_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pad, sizeof(disabled_rdesc_pad));
+ return sizeof(disabled_rdesc_pad);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+ }
+ if (rdesc_size == PEN_REPORT_DESCRIPTOR_LENGTH) {
+ if (have_fw_id) {
+ __builtin_memcpy(data, disabled_rdesc_pen, sizeof(disabled_rdesc_pen));
+ return sizeof(disabled_rdesc_pen);
+ }
+
+ __builtin_memcpy(data, fixed_rdesc_pen, sizeof(fixed_rdesc_pen));
+ return sizeof(fixed_rdesc_pen);
+ }
+ /* Always fix the vendor mode so the tablet will work even if nothing sets
+ * the udev property (e.g. huion-switcher run manually)
+ */
+ if (rdesc_size == VENDOR_REPORT_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+ }
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(inspiroy_2_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Only sent if tablet is in default mode */
+ if (data[0] == PAD_REPORT_ID) {
+ /* Nicely enough, this device only supports one button down at a time so
+ * the reports are easy to match. Buttons numbered from the top
+ * Button released: 03 00 00 00 00 00 00 00
+ * Button 1: 03 00 05 00 00 00 00 00 -> b
+ * Button 2: 03 07 11 00 00 00 00 00 -> Ctrl Shift N
+ * Button 3: 03 00 08 00 00 00 00 00 -> e
+ * Button 4: 03 00 0c 00 00 00 00 00 -> i
+ * Button 5: 03 00 2c 00 00 00 00 00 -> space
+ * Button 6: 03 01 08 00 00 00 00 00 -> Ctrl E
+ * Button 7: 03 01 16 00 00 00 00 00 -> Ctrl S
+ * Button 8: 03 05 1d 00 00 00 00 00 -> Ctrl Alt Z
+ *
+ * Wheel down: 03 01 2d 00 00 00 00 00 -> Ctrl -
+ * Wheel up: 03 01 2e 00 00 00 00 00 -> Ctrl =
+ */
+ __u8 button = 0;
+ __u8 wheel = 0;
+
+ switch (data[1] << 8 | data[2]) {
+ case 0x0000:
+ break;
+ case 0x0005:
+ button = 1;
+ break;
+ case 0x0711:
+ button = 2;
+ break;
+ case 0x0008:
+ button = 3;
+ break;
+ case 0x000c:
+ button = 4;
+ break;
+ case 0x002c:
+ button = 5;
+ break;
+ case 0x0108:
+ button = 6;
+ break;
+ case 0x0116:
+ button = 7;
+ break;
+ case 0x051d:
+ button = 8;
+ break;
+ case 0x012d:
+ wheel = -1;
+ break;
+ case 0x012e:
+ wheel = 1;
+ break;
+ }
+
+ __u8 report[6] = {PAD_REPORT_ID, 0x0, 0x0, 0x0, wheel, button};
+
+ __builtin_memcpy(data, report, sizeof(report));
+ return sizeof(report);
+ }
+
+ /* Nothing to do for the PEN_REPORT_ID, it's already mapped */
+
+ /* Only sent if tablet is in raw mode */
+ if (data[0] == VENDOR_REPORT_ID) {
+ /* Pad reports */
+ if (data[1] & 0x20) {
+ /* See fixed_rdesc_pad */
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus;
+ __u8 x;
+ __u8 y;
+ __u16 buttons;
+ __u8 wheel;
+ } __attribute__((packed)) *pad_report;
+ __u8 wheel = 0;
+
+ /* Wheel report */
+ if (data[1] == 0xf1) {
+ if (data[5] == 2)
+ wheel = 0xff;
+ else
+ wheel = data[5];
+ } else {
+ /* data[4] and data[5] are the buttons, mapped correctly */
+ last_button_state = data[4] | (data[5] << 8);
+ wheel = 0; // wheel
+ }
+
+ pad_report = (struct pad_report *)data;
+
+ pad_report->report_id = PAD_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->wheel = wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ /* Pen reports need nothing done */
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(inspiroy_2) = {
+ .hid_device_event = (void *)inspiroy_2_fix_events,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ case PEN_REPORT_DESCRIPTOR_LENGTH:
+ case VENDOR_REPORT_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c b/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
index 13f64fb49800..79453362bf97 100644
--- a/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Inspiroy-2-S.bpf.c
@@ -163,6 +163,9 @@ char EXPECTED_FIRMWARE_ID[] = "HUION_T21j_";
__u8 last_button_state;
+__u8 last_tip_state;
+__u8 last_sec_barrel_state;
+__u8 force_tip_down_count;
static const __u8 fixed_rdesc_pad[] = {
UsagePage_GenericDesktop
@@ -522,9 +525,31 @@ int BPF_PROG(inspiroy_2_fix_events, struct hid_bpf_ctx *hctx)
pad_report->wheel = wheel;
return sizeof(struct pad_report);
- }
+ } else if (data[1] & 0x80) { /* Pen reports with InRange 1 */
+ __u8 tip_state = data[1] & 0x1;
+ __u8 sec_barrel_state = data[1] & 0x4;
+
+ if (force_tip_down_count > 0) {
+ data[1] |= 0x1;
+ --force_tip_down_count;
+ if (tip_state)
+ force_tip_down_count = 0;
+ }
- /* Pen reports need nothing done */
+ /* Tip was down and we just pressed or released the
+ * secondary barrel switch (the physical eraser
+ * button). The device will send up to 4
+ * reports with Tip Switch 0 and sometimes
+ * this report has Tip Switch 0.
+ */
+ if (last_tip_state &&
+ last_sec_barrel_state != sec_barrel_state) {
+ force_tip_down_count = 4;
+ data[1] |= 0x1;
+ }
+ last_tip_state = tip_state;
+ last_sec_barrel_state = sec_barrel_state;
+ }
}
return 0;
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
index 489cb4fcc2cd..5f43e4071848 100644
--- a/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
+++ b/drivers/hid/bpf/progs/Huion__Kamvas-Pro-19.bpf.c
@@ -9,12 +9,15 @@
#define VID_HUION 0x256C
#define PID_KAMVAS_PRO_19 0x006B
+#define PID_KAMVAS_PRO_27 0x006c
#define NAME_KAMVAS_PRO_19 "HUION Huion Tablet_GT1902"
+#define NAME_KAMVAS_PRO_27 "HUION Huion Tablet_GT2701"
#define TEST_PREFIX "uhid test "
HID_BPF_CONFIG(
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, VID_HUION, PID_KAMVAS_PRO_19),
+ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, VID_HUION, PID_KAMVAS_PRO_27),
);
bool prev_was_out_of_range;
@@ -351,7 +354,8 @@ int probe(struct hid_bpf_probe_args *ctx)
if (!__builtin_memcmp(name, TEST_PREFIX, sizeof(TEST_PREFIX) - 1))
name += sizeof(TEST_PREFIX) - 1;
- if (__builtin_memcmp(name, NAME_KAMVAS_PRO_19, sizeof(NAME_KAMVAS_PRO_19)))
+ if (__builtin_memcmp(name, NAME_KAMVAS_PRO_19, sizeof(NAME_KAMVAS_PRO_19)) &&
+ __builtin_memcmp(name, NAME_KAMVAS_PRO_27, sizeof(NAME_KAMVAS_PRO_27)))
ctx->retval = -EINVAL;
hid_bpf_release_context(hctx);
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c
new file mode 100644
index 000000000000..b63f9a48ea45
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Kamvas13Gen3.bpf.c
@@ -0,0 +1,1395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Nicholas LaPointe
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256c
+#define PID_KAMVAS13_GEN3 0x2008
+
+#define VENDOR_DESCRIPTOR_LENGTH 36
+#define TABLET_DESCRIPTOR_LENGTH 368
+#define WHEEL_DESCRIPTOR_LENGTH 108
+
+#define VENDOR_REPORT_ID 8
+#define VENDOR_REPORT_LENGTH 14
+
+#define VENDOR_REPORT_SUBTYPE_PEN 0x08
+#define VENDOR_REPORT_SUBTYPE_PEN_OUT 0x00
+#define VENDOR_REPORT_SUBTYPE_BUTTONS 0x0e
+#define VENDOR_REPORT_SUBTYPE_WHEELS 0x0f
+
+/* For the reports that we create ourselves */
+#define CUSTOM_PAD_REPORT_ID 9
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_ANY, VID_HUION, PID_KAMVAS13_GEN3),
+);
+
+
+/*
+ * This tablet can send reports using one of two different data formats,
+ * depending on what "mode" the tablet is in.
+ *
+ * By default, the tablet will send reports that can be decoded using its
+ * included HID descriptors (descriptors 1 and 2, shown below).
+ * This mode will be called "firmware mode" throughout this file.
+ *
+ * The HID descriptor that describes pen events in firmware mode (descriptor 1)
+ * has multiple bugs:
+ * * "Secondary Tip Switch" instead of "Secondary Barrel Switch"
+ * * "Invert" instead of (or potentially shared with) third barrel button
+ * * Specified tablet area of 2048 in³ instead of 293.8 x 165.2mm
+ * * Specified tilt range of -90 to +90 instead of -60 to +60
+ *
+ * While these can be easily patched up by editing the descriptor, a larger
+ * problem with the firmware mode exists: it is impossible to tell which of the
+ * two wheels are being rotated (or having their central button pressed).
+ *
+ *
+ * By using a tool such as huion-switcher (https://github.com/whot/huion-switcher),
+ * the tablet can be made to send reports using a proprietary format that is not
+ * adequately described by its relevant descriptor (descriptor 0, shown below).
+ * This mode will be called "vendor mode" throughout this file.
+ *
+ * The reports sent while in vendor mode allow for proper decoding of the wheels.
+ *
+ * For simplicity and maximum functionality, this BPF focuses strictly on
+ * enabling one to make use of the vendor mode.
+ */
+
+/*
+ * DESCRIPTORS
+ * DESCRIPTOR 0
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # ┅ 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x68, // Report Size (104) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 13
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 18
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 21
+ * # 0xa1, 0x01, // Collection (Application) 23
+ * # ┅ 0x85, 0x16, // Report ID (22) 25
+ * # 0x75, 0x08, // Report Size (8) 27
+ * # 0x95, 0x07, // Report Count (7) 29
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 31
+ * # ║ 0xb1, 0x02, // Feature (Data,Var,Abs) 33
+ * # 0xc0, // End Collection 35
+ * R: 36 06 00 ff 09 01 a1 01 85 08 75 68 95 01 09 01 81 02 c0 06 00 ff 09 01 a1 01 85 16 75 08 95 07 09 01 b1 02 c0
+ * N: HUION Huion Tablet_GS1333
+ * I: 3 256c 2008
+ *
+ * DESCRIPTOR 1
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # ┅ 0x85, 0x0a, // Report ID (10) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x01, // Collection (Application) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x43, // Usage (Secondary Tip Switch) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18
+ * # 0x09, 0x45, // Usage (Eraser) 20
+ * # 0x15, 0x00, // Logical Minimum (0) 22
+ * # 0x25, 0x01, // Logical Maximum (1) 24
+ * # 0x75, 0x01, // Report Size (1) 26
+ * # 0x95, 0x06, // Report Count (6) 28
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 30
+ * # 0x09, 0x32, // Usage (In Range) 32
+ * # 0x75, 0x01, // Report Size (1) 34
+ * # 0x95, 0x01, // Report Count (1) 36
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 38
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x30, // Usage (X) 44
+ * # 0x09, 0x31, // Usage (Y) 46
+ * # 0x55, 0x0d, // Unit Exponent (-3) 48
+ * # 0x65, 0x33, // Unit (EnglishLinear: in³) 50
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 52
+ * # 0x35, 0x00, // Physical Minimum (0) 55
+ * # 0x46, 0x00, 0x08, // Physical Maximum (2048) 57
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x95, 0x02, // Report Count (2) 62
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 64
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 66
+ * # 0x09, 0x30, // Usage (Tip Pressure) 68
+ * # 0x26, 0xff, 0x3f, // Logical Maximum (16383) 70
+ * # 0x75, 0x10, // Report Size (16) 73
+ * # 0x95, 0x01, // Report Count (1) 75
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 77
+ * # 0x09, 0x3d, // Usage (X Tilt) 79
+ * # 0x09, 0x3e, // Usage (Y Tilt) 81
+ * # 0x15, 0xa6, // Logical Minimum (-90) 83
+ * # 0x25, 0x5a, // Logical Maximum (90) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x02, // Report Count (2) 89
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 91
+ * # 0xc0, // End Collection 93
+ * # 0xc0, // End Collection 94
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 95
+ * # 0x09, 0x04, // Usage (Touch Screen) 97
+ * # 0xa1, 0x01, // Collection (Application) 99
+ * # ┅ 0x85, 0x04, // Report ID (4) 101
+ * # 0x09, 0x22, // Usage (Finger) 103
+ * # 0xa1, 0x02, // Collection (Logical) 105
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 107
+ * # 0x95, 0x01, // Report Count (1) 109
+ * # 0x75, 0x06, // Report Size (6) 111
+ * # 0x09, 0x51, // Usage (Contact Identifier) 113
+ * # 0x15, 0x00, // Logical Minimum (0) 115
+ * # 0x25, 0x3f, // Logical Maximum (63) 117
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 119
+ * # 0x09, 0x42, // Usage (Tip Switch) 121
+ * # 0x25, 0x01, // Logical Maximum (1) 123
+ * # 0x75, 0x01, // Report Size (1) 125
+ * # 0x95, 0x01, // Report Count (1) 127
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 129
+ * # 0x75, 0x01, // Report Size (1) 131
+ * # 0x95, 0x01, // Report Count (1) 133
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 135
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 137
+ * # 0x75, 0x10, // Report Size (16) 139
+ * # 0x55, 0x0e, // Unit Exponent (-2) 141
+ * # 0x65, 0x11, // Unit (SILinear: cm) 143
+ * # 0x09, 0x30, // Usage (X) 145
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 147
+ * # 0x35, 0x00, // Physical Minimum (0) 150
+ * # 0x46, 0x15, 0x0c, // Physical Maximum (3093) 152
+ * # ┇ 0x81, 0x42, // Input (Data,Var,Abs,Null) 155
+ * # 0x09, 0x31, // Usage (Y) 157
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 159
+ * # 0x46, 0xcb, 0x06, // Physical Maximum (1739) 162
+ * # ┇ 0x81, 0x42, // Input (Data,Var,Abs,Null) 165
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 167
+ * # 0x09, 0x30, // Usage (Tip Pressure) 169
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 171
+ * # 0x75, 0x10, // Report Size (16) 174
+ * # 0x95, 0x01, // Report Count (1) 176
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 178
+ * # 0xc0, // End Collection 180
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 181
+ * # 0x09, 0x22, // Usage (Finger) 183
+ * # 0xa1, 0x02, // Collection (Logical) 185
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 187
+ * # 0x95, 0x01, // Report Count (1) 189
+ * # 0x75, 0x06, // Report Size (6) 191
+ * # 0x09, 0x51, // Usage (Contact Identifier) 193
+ * # 0x15, 0x00, // Logical Minimum (0) 195
+ * # 0x25, 0x3f, // Logical Maximum (63) 197
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 199
+ * # 0x09, 0x42, // Usage (Tip Switch) 201
+ * # 0x25, 0x01, // Logical Maximum (1) 203
+ * # 0x75, 0x01, // Report Size (1) 205
+ * # 0x95, 0x01, // Report Count (1) 207
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 209
+ * # 0x75, 0x01, // Report Size (1) 211
+ * # 0x95, 0x01, // Report Count (1) 213
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 215
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 217
+ * # 0x75, 0x10, // Report Size (16) 219
+ * # 0x55, 0x0e, // Unit Exponent (-2) 221
+ * # 0x65, 0x11, // Unit (SILinear: cm) 223
+ * # 0x09, 0x30, // Usage (X) 225
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 227
+ * # 0x35, 0x00, // Physical Minimum (0) 230
+ * # 0x46, 0x15, 0x0c, // Physical Maximum (3093) 232
+ * # ┇ 0x81, 0x42, // Input (Data,Var,Abs,Null) 235
+ * # 0x09, 0x31, // Usage (Y) 237
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 239
+ * # 0x46, 0xcb, 0x06, // Physical Maximum (1739) 242
+ * # ┇ 0x81, 0x42, // Input (Data,Var,Abs,Null) 245
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 247
+ * # 0x09, 0x30, // Usage (Tip Pressure) 249
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 251
+ * # 0x75, 0x10, // Report Size (16) 254
+ * # 0x95, 0x01, // Report Count (1) 256
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 258
+ * # 0xc0, // End Collection 260
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 261
+ * # 0x09, 0x56, // Usage (Scan Time) 263
+ * # 0x55, 0x00, // Unit Exponent (0) 265
+ * # 0x65, 0x00, // Unit (None) 267
+ * # 0x27, 0xff, 0xff, 0xff, 0x7f, // Logical Maximum (2147483647) 269
+ * # 0x95, 0x01, // Report Count (1) 274
+ * # 0x75, 0x20, // Report Size (32) 276
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 278
+ * # 0x09, 0x54, // Usage (Contact Count) 280
+ * # 0x25, 0x7f, // Logical Maximum (127) 282
+ * # 0x95, 0x01, // Report Count (1) 284
+ * # 0x75, 0x08, // Report Size (8) 286
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 288
+ * # 0x75, 0x08, // Report Size (8) 290
+ * # 0x95, 0x08, // Report Count (8) 292
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 294
+ * # ┅ 0x85, 0x05, // Report ID (5) 296
+ * # 0x09, 0x55, // Usage (Contact Count Maximum) 298
+ * # 0x25, 0x0a, // Logical Maximum (10) 300
+ * # 0x75, 0x08, // Report Size (8) 302
+ * # 0x95, 0x01, // Report Count (1) 304
+ * # ║ 0xb1, 0x02, // Feature (Data,Var,Abs) 306
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page FF00) 308
+ * # 0x09, 0xc5, // Usage (Vendor Usage 0xc5) 311
+ * # ┅ 0x85, 0x06, // Report ID (6) 313
+ * # 0x15, 0x00, // Logical Minimum (0) 315
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 317
+ * # 0x75, 0x08, // Report Size (8) 320
+ * # 0x96, 0x00, 0x01, // Report Count (256) 322
+ * # ║ 0xb1, 0x02, // Feature (Data,Var,Abs) 325
+ * # 0xc0, // End Collection 327
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 328
+ * # 0x09, 0x06, // Usage (Keyboard) 330
+ * # 0xa1, 0x01, // Collection (Application) 332
+ * # ┅ 0x85, 0x03, // Report ID (3) 334
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 336
+ * # 0x19, 0xe0, // UsageMinimum (224) 338
+ * # 0x29, 0xe7, // UsageMaximum (231) 340
+ * # 0x15, 0x00, // Logical Minimum (0) 342
+ * # 0x25, 0x01, // Logical Maximum (1) 344
+ * # 0x75, 0x01, // Report Size (1) 346
+ * # 0x95, 0x08, // Report Count (8) 348
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 350
+ * # 0x05, 0x07, // Usage Page (Keyboard/Keypad) 352
+ * # 0x19, 0x00, // UsageMinimum (0) 354
+ * # 0x29, 0xff, // UsageMaximum (255) 356
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 358
+ * # 0x75, 0x08, // Report Size (8) 361
+ * # 0x95, 0x06, // Report Count (6) 363
+ * # ┇ 0x81, 0x00, // Input (Data,Arr,Abs) 365
+ * # 0xc0, // End Collection 367
+ * R: 368 05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 43 09 3c 09 45 15 00 25 01 75 01 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff 7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 3f 75 10 95 01 81 02 09 3d 09 3e 15 a6 25 5a 75 08 95 02 81 02 c0 c0 05 0d 09 04 a1 01 85 04 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 56 55 00 65 00 27 ff ff ff 7f 95 01 75 20 81 02 09 54 25 7f 95 01 75 08 81 02 75 08 95 08 81 03 85 05 09 55 25 0a 75 08 95 01 b1 02 06 00 ff 09 c5 85 06 15 00 26 ff 00 75 08 96 00 01 b1 02 c0 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 05 07 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0
+ * N: HUION Huion Tablet_GS1333
+ * I: 3 256c 2008
+ *
+ * DESCRIPTOR 2
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x0e, // Usage (System Multi-Axis Controller) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # ┅ 0x85, 0x11, // Report ID (17) 6
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 8
+ * # 0x09, 0x21, // Usage (Puck) 10
+ * # 0xa1, 0x02, // Collection (Logical) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x01, // Report Count (1) 20
+ * # 0xa1, 0x00, // Collection (Physical) 22
+ * # 0x05, 0x09, // Usage Page (Button) 24
+ * # 0x09, 0x01, // Usage (Button 1) 26
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 30
+ * # 0x09, 0x33, // Usage (Touch) 32
+ * # ┇ 0x81, 0x02, // Input (Data,Var,Abs) 34
+ * # 0x95, 0x06, // Report Count (6) 36
+ * # ┇ 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0xa1, 0x02, // Collection (Logical) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x37, // Usage (Dial) 44
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 46
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 49
+ * # 0x75, 0x10, // Report Size (16) 52
+ * # 0x95, 0x01, // Report Count (1) 54
+ * # ┇ 0x81, 0x06, // Input (Data,Var,Rel) 56
+ * # 0x35, 0x00, // Physical Minimum (0) 58
+ * # 0x46, 0x10, 0x0e, // Physical Maximum (3600) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 63
+ * # 0x26, 0x10, 0x0e, // Logical Maximum (3600) 65
+ * # 0x09, 0x48, // Usage (Resolution Multiplier) 68
+ * # ║ 0xb1, 0x02, // Feature (Data,Var,Abs) 70
+ * # 0x45, 0x00, // Physical Maximum (0) 72
+ * # 0xc0, // End Collection 74
+ * # 0x75, 0x08, // Report Size (8) 75
+ * # 0x95, 0x01, // Report Count (1) 77
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 79
+ * # 0x75, 0x08, // Report Size (8) 81
+ * # 0x95, 0x01, // Report Count (1) 83
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x01, // Report Count (1) 89
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 91
+ * # 0x75, 0x08, // Report Size (8) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 97
+ * # 0x75, 0x08, // Report Size (8) 99
+ * # 0x95, 0x01, // Report Count (1) 101
+ * # ┇ 0x81, 0x01, // Input (Cnst,Arr,Abs) 103
+ * # 0xc0, // End Collection 105
+ * # 0xc0, // End Collection 106
+ * # 0xc0, // End Collection 107
+ * R: 108 05 01 09 0e a1 01 85 11 05 0d 09 21 a1 02 15 00 25 01 75 01 95 01 a1 00 05 09 09 01 81 02 05 0d 09 33 81 02 95 06 81 03 a1 02 05 01 09 37 16 00 80 26 ff 7f 75 10 95 01 81 06 35 00 46 10 0e 15 00 26 10 0e 09 48 b1 02 45 00 c0 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 c0 c0 c0
+ * N: HUION Huion Tablet_GS1333
+ * I: 3 256c 2008
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * VENDOR MODE
+ * HUION_FIRMWARE_ID="HUION_M22c_240606"
+ * HUION_MAGIC_BYTES="140388e500108100ff3fd8130307008008004010"
+ *
+ * MAGIC BYTES
+ * [LogicalMaximum, X] [LogicalMaximum, Y] [LogicalMaximum, Pressure] [ LPI]
+ * 14 03 [ 88 e5] 00 [ 10 81] 00 [ ff 3f] [d8 13] 03 07 00 80 08 00 40 10
+ *
+ *
+ * HIDRAW 0
+ * DESCRIPTIONS
+ * report_subtype = (data[1] >> 4) & 0x0f
+ *
+ * REPORT SUBTYPES
+ * 0x0e Buttons
+ * (data[4] & 0x01) button 1
+ * (data[4] & 0x02) button 2
+ * (data[4] & 0x04) button 3
+ * (data[4] & 0x08) button 4
+ * (data[4] & 0x10) button 5
+ * (data[4] & 0x20) button 6 (top wheel button)
+ * (data[4] & 0x40) button 7 (bottom wheel button)
+ *
+ * All tablet buttons release with the same report:
+ * 08 e0 01 01 00 00 00 00 00 00 00 00 00 00
+ *
+ * Despite data[4] looking like a bit field, only one button
+ * can be unambiguously tracked at a time.
+ * (See NOTES ON SIMULTANEOUS BUTTON HOLDS at the end of this
+ * comment for examples of the confusion this can create.)
+ *
+ * All buttons, with the exceptions of 6 and 7, will repeatedly
+ * report a press event approximately every 225ms while held.
+ *
+ * 0x0f Wheels
+ * data[3] == 1: top wheel
+ * data[3] == 2: bottom wheel
+ * data[5] == 1: clockwise
+ * data[5] == 2: counter-clockwise
+ *
+ * 0x08/0x00 Pen
+ * report_subtype == 0x08: in-range
+ * report_subtype == 0x00: out-of-range
+ * For clarity, this is also equivalent to:
+ * (data[1] & 0x80) in-range
+ *
+ * Switches
+ * (data[1] & 0x01) tip switch
+ * (data[1] & 0x02) barrel switch
+ * (data[1] & 0x04) secondary barrel switch
+ * (data[1] & 0x08) third barrel switch
+ *
+ * Unfortunately, I don't have a pen with an eraser, so I can't
+ * confirm where the invert and eraser bits reside.
+ * If we guess using the definitions from HID descriptor 1,
+ * then they might be...
+ * (data[1] & 0x08) invert (conflicts with third barrel switch)
+ * (data[1] & 0x10) eraser
+ *
+ * data[2], data[3] X (little-endian, maximum 0xe588)
+ *
+ * data[4], data[5] Y (little-endian, maximum 0x8110)
+ *
+ * data[6], data[7] Pressure (little-endian, maximum 0x3fff)
+ *
+ * data[10] X tilt (signed, -60 to +60)
+ * data[11] Y tilt (signed, -60 to +60, inverted)
+ *
+ *
+ * EXAMPLE REPORTS
+ * Top wheel button, press, hold, then release
+ * E: 000000.000040 14 08 e0 01 01 20 00 00 00 00 00 00 00 00 00
+ * E: 000001.531559 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00
+ *
+ * Bottom wheel button, press, hold, then release
+ * E: 000002.787603 14 08 e0 01 01 40 00 00 00 00 00 00 00 00 00
+ * E: 000004.215609 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00
+ *
+ *
+ * Top wheel rotation, one detent CW
+ * E: 000194.003899 14 08 f1 01 01 00 01 00 00 00 00 00 00 00 00
+ *
+ * Top wheel rotation, one detent CCW
+ * E: 000194.997812 14 08 f1 01 01 00 02 00 00 00 00 00 00 00 00
+ *
+ * Bottom wheel rotation, one detent CW
+ * E: 000196.693840 14 08 f1 01 02 00 01 00 00 00 00 00 00 00 00
+ *
+ * Bottom wheel rotation, one detent CCW
+ * E: 000197.757895 14 08 f1 01 02 00 02 00 00 00 00 00 00 00 00
+ *
+ *
+ * Button 1, press, hold, then release
+ * E: 000000.000149 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00 < press
+ * E: 000000.447598 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000000.673586 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00
+ * E: 000000.900582 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00
+ * E: 000001.126703 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00
+ * E: 000001.347706 14 08 e0 01 01 01 00 00 00 00 00 00 00 00 00
+ * E: 000001.533721 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ * Button 2, press, hold, then release
+ * E: 000003.304735 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00 < press
+ * E: 000003.746743 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000003.973741 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.199832 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.426732 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.647738 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.874733 14 08 e0 01 01 02 00 00 00 00 00 00 00 00 00
+ * E: 000004.930713 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ * Button 3, press, hold, then release
+ * E: 000006.650346 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00 < press
+ * E: 000007.051782 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000007.273738 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000007.499794 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000007.726725 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000007.947765 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000008.174755 14 08 e0 01 01 04 00 00 00 00 00 00 00 00 00
+ * E: 000008.328786 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ * Button 4, press, hold, then release
+ * E: 000009.893820 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00 < press
+ * E: 000010.274781 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000010.500931 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000010.722777 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000010.948778 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000011.175799 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000011.401153 14 08 e0 01 01 08 00 00 00 00 00 00 00 00 00
+ * E: 000011.432114 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ * Button 5, press, hold, then release
+ * E: 000013.007778 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00 < press
+ * E: 000013.424741 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00 < starting to auto-repeat, every ~225ms
+ * E: 000013.651715 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00
+ * E: 000013.872763 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00
+ * E: 000014.099789 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00
+ * E: 000014.325734 14 08 e0 01 01 10 00 00 00 00 00 00 00 00 00
+ * E: 000014.438080 14 08 e0 01 01 00 00 00 00 00 00 00 00 00 00 < release
+ *
+ *
+ * Pen: Top-left, then out of range
+ * E: 000368.572184 14 08 80 00 00 00 00 00 00 00 00 fb ed 03 00
+ * E: 000368.573030 14 08 00 00 00 00 00 00 00 00 00 fb ed 03 00
+ *
+ * Pen: Bottom-right, then out of range
+ * E: 000544.433185 14 08 80 88 e5 10 81 00 00 00 00 00 00 03 00
+ * E: 000544.434183 14 08 00 88 e5 10 81 00 00 00 00 00 00 03 00
+ *
+ * Pen: Max Y tilt (tip of pen points down)
+ * E: 000002.231927 14 08 80 f5 5d 6c 36 00 00 00 00 09 3c 03 00
+ *
+ * Pen: Min Y Tilt (tip of pen points up)
+ * E: 000657.593338 14 08 80 5f 69 fa 2c 00 00 00 00 fe c4 03 00
+ *
+ * Pen: Max X tilt (tip of pen points left)
+ * E: 000742.246503 14 08 80 2a 4f c4 38 00 00 00 00 3c ed 03 00
+ *
+ * Pen: Min X Tilt (tip of pen points right)
+ * E: 000776.404446 14 08 00 18 85 7c 3b 00 00 00 00 c4 ed 03 00
+ *
+ * Pen: Tip switch, max pressure, then low pressure
+ * E: 001138.935675 14 08 81 d2 66 04 40 ff 3f 00 00 00 08 03 00
+ *
+ * E: 001142.403715 14 08 81 9d 69 47 3e 82 04 00 00 00 07 03 00
+ *
+ * Pen: Barrel switch
+ * E: 001210.645652 14 08 82 0d 72 ea 2b 00 00 00 00 db c4 03 00
+ *
+ * Pen: Secondary barrel switch
+ * E: 001211.519729 14 08 84 2c 71 51 2b 00 00 00 00 da c4 03 00
+ *
+ * Pen: Third switch
+ * E: 001212.443722 14 08 88 1d 72 df 2b 00 00 00 00 dc c4 03 00
+ *
+ *
+ * HIDRAW 1
+ * No reports
+ *
+ *
+ * HIDRAW 2
+ * No reports
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * FIRMWARE MODE
+ * HIDRAW 0
+ * No reports
+ *
+ *
+ * HIDRAW 1
+ * EXAMPLE REPORTS
+ * Top wheel button, *release*
+ * E: 000067.043739 8 03 00 00 00 00 00 00 00
+ *
+ * Bottom wheel button, *release*
+ * E: 000068.219161 8 03 00 00 00 00 00 00 00
+ *
+ *
+ * Button 1, press, then release
+ * E: 000163.767870 8 03 00 05 00 00 00 00 00
+ * E: 000165.969193 8 03 00 00 00 00 00 00 00
+ *
+ * Button 2, press, then release
+ * E: 000261.728935 8 03 05 11 00 00 00 00 00
+ * E: 000262.956220 8 03 00 00 00 00 00 00 00
+ *
+ * Button 3, press, then release
+ * E: 000289.127881 8 03 01 16 00 00 00 00 00
+ * E: 000290.014594 8 03 00 00 00 00 00 00 00
+ *
+ * Button 4, press, then release
+ * E: 000303.025839 8 03 00 2c 00 00 00 00 00
+ * E: 000303.994479 8 03 00 00 00 00 00 00 00
+ *
+ * Button 5, press, then release
+ * E: 000315.500835 8 03 05 1d 00 00 00 00 00
+ * E: 000316.603274 8 03 00 00 00 00 00 00 00
+ *
+ * BUTTON SUMMARY
+ * 1 E: 000163.767870 8 03 00 05 00 00 00 00 00 Keyboard: B
+ * 2 E: 000261.728935 8 03 05 11 00 00 00 00 00 Keyboard: LCtrl+LAlt N
+ * 3 E: 000289.127881 8 03 01 16 00 00 00 00 00 Keyboard: LCtrl S
+ * 4 E: 000303.025839 8 03 00 2c 00 00 00 00 00 Keyboard: Space
+ * 5 E: 000315.500835 8 03 05 1d 00 00 00 00 00 Keyboard: LCtrl+LAlt
+ *
+ * All buttons (including the wheel buttons) release the same way:
+ * 03 00 00 00 00 00 00 00
+ *
+ *
+ * Pen: Top-left, then out of range
+ * E: 000063.196828 10 0a c0 00 00 00 00 00 00 00 02
+ * E: 000063.197762 10 0a 00 00 00 00 00 00 00 00 02
+ *
+ * Pen: Bottom-right, then out of range
+ * E: 000197.123138 10 0a c0 ff 7f ff 7f 00 00 00 00
+ * E: 000197.124915 10 0a 00 ff 7f ff 7f 00 00 00 00
+ *
+ * Pen: Max Y Tilt (tip of pen points up)
+ * E: 000291.399541 10 0a c0 19 32 0b 58 00 00 00 3c
+ *
+ * Pen: Min Y tilt (tip of pen points down)
+ * E: 000340.888288 10 0a c0 85 40 89 6e 00 00 17 c4
+ *
+ * Pen: Max X tilt (tip of pen points left)
+ * E: 000165.575115 10 0a c0 a7 34 99 42 00 00 3c f4
+ *
+ * Pen: Min X Tilt (tip of pen points right)
+ * E: 000129.507883 10 0a c0 ea 4b 08 40 00 00 c4 1a
+ *
+ * Pen: Tip switch, max pressure, then low pressure
+ * E: 000242.077160 10 0a c1 7e 3c 12 31 ff 3f 03 fd
+ *
+ * E: 000339.139188 10 0a c1 ee 3a 9e 32 b5 00 06 f6
+ *
+ * Pen: Barrel switch
+ * E: 000037.949777 10 0a c2 5c 28 47 2a 00 00 f6 3c
+ *
+ * Pen: Secondary barrel switch
+ * E: 000038.320840 10 0a c4 e4 27 fd 29 00 00 f3 38
+ *
+ * Pen: Third switch
+ * E: 000038.923822 10 0a c8 97 27 5f 29 00 00 f2 33
+ *
+ *
+ * HIDRAW 2
+ * EXAMPLE REPORTS
+ * Either wheel rotation, one detent CW
+ * E: 000097.276573 9 11 00 01 00 00 00 00 00 00
+ *
+ * Either wheel rotation, one detent CCW
+ * E: 000153.416538 9 11 00 ff ff 00 00 00 00 00
+ *
+ * Either wheel rotation, increasing rotation speed CW
+ * (Note that the wheels on my particular tablet may be
+ * damaged, so the false rotation direction changes
+ * that can be observed might not happen on other units.)
+ * E: 000210.514925 9 11 00 01 00 00 00 00 00 00
+ * E: 000210.725718 9 11 00 01 00 00 00 00 00 00
+ * E: 000210.924009 9 11 00 01 00 00 00 00 00 00
+ * E: 000211.205629 9 11 00 01 00 00 00 00 00 00
+ * E: 000211.280521 9 11 00 0b 00 00 00 00 00 00
+ * E: 000211.340121 9 11 00 0e 00 00 00 00 00 00
+ * E: 000211.404018 9 11 00 0d 00 00 00 00 00 00
+ * E: 000211.462060 9 11 00 0e 00 00 00 00 00 00
+ * E: 000211.544886 9 11 00 0a 00 00 00 00 00 00
+ * E: 000211.606130 9 11 00 0d 00 00 00 00 00 00
+ * E: 000211.674560 9 11 00 0c 00 00 00 00 00 00
+ * E: 000211.712039 9 11 00 16 00 00 00 00 00 00
+ * E: 000211.748076 9 11 00 17 00 00 00 00 00 00
+ * E: 000211.786016 9 11 00 17 00 00 00 00 00 00
+ * E: 000211.832960 9 11 00 11 00 00 00 00 00 00
+ * E: 000211.874081 9 11 00 14 00 00 00 00 00 00
+ * E: 000211.925094 9 11 00 10 00 00 00 00 00 00
+ * E: 000211.959048 9 11 00 18 00 00 00 00 00 00
+ * E: 000212.006937 9 11 00 11 00 00 00 00 00 00
+ * E: 000212.050055 9 11 00 13 00 00 00 00 00 00
+ * E: 000212.091947 9 11 00 14 00 00 00 00 00 00
+ * E: 000212.122989 9 11 00 1a 00 00 00 00 00 00
+ * E: 000212.160866 9 11 00 16 00 00 00 00 00 00
+ * E: 000212.194002 9 11 00 19 00 00 00 00 00 00
+ * E: 000212.242249 9 11 00 11 00 00 00 00 00 00
+ * E: 000212.278061 9 11 00 18 00 00 00 00 00 00
+ * E: 000212.328899 9 11 00 10 00 00 00 00 00 00
+ * E: 000212.354005 9 11 00 22 00 00 00 00 00 00
+ * E: 000212.398995 9 11 00 12 00 00 00 00 00 00
+ * E: 000212.432050 9 11 00 19 00 00 00 00 00 00
+ * E: 000212.471164 9 11 00 16 00 00 00 00 00 00
+ * E: 000212.507047 9 11 00 17 00 00 00 00 00 00
+ * E: 000212.540964 9 11 00 19 00 00 00 00 00 00
+ * E: 000212.567942 9 11 00 1f 00 00 00 00 00 00
+ * E: 000212.610007 9 11 00 14 00 00 00 00 00 00
+ * E: 000212.641101 9 11 00 1b 00 00 00 00 00 00
+ * E: 000212.674113 9 11 00 19 00 00 00 00 00 00
+ * E: 000212.674909 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.677062 9 11 00 00 02 00 00 00 00 00
+ * E: 000212.679048 9 11 00 55 01 00 00 00 00 00
+ * E: 000212.682166 9 11 00 55 01 00 00 00 00 00
+ * E: 000212.682788 9 11 00 ff ff 00 00 00 00 00
+ * E: 000212.683899 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.685827 9 11 00 67 fe 00 00 00 00 00
+ * E: 000212.686941 9 11 00 00 08 00 00 00 00 00
+ * E: 000212.727840 9 11 00 14 00 00 00 00 00 00
+ * E: 000212.772884 9 11 00 13 00 00 00 00 00 00
+ * E: 000212.810975 9 11 00 16 00 00 00 00 00 00
+ * E: 000212.811793 9 11 00 00 08 00 00 00 00 00
+ * E: 000212.812683 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.813905 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.814909 9 11 00 00 04 00 00 00 00 00
+ * E: 000212.816942 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.817851 9 11 00 ff ff 00 00 00 00 00
+ * E: 000212.818752 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.819910 9 11 00 56 fd 00 00 00 00 00
+ * E: 000212.820781 9 11 00 ff ff 00 00 00 00 00
+ * E: 000212.821811 9 11 00 00 04 00 00 00 00 00
+ * E: 000212.822920 9 11 00 00 08 00 00 00 00 00
+ * E: 000212.823861 9 11 00 00 02 00 00 00 00 00
+ * E: 000212.828781 9 11 00 ba 00 00 00 00 00 00
+ * E: 000212.874097 9 11 00 12 00 00 00 00 00 00
+ * E: 000212.874872 9 11 00 00 fc 00 00 00 00 00
+ * E: 000212.876136 9 11 00 00 fc 00 00 00 00 00
+ * E: 000212.877036 9 11 00 00 f8 00 00 00 00 00
+ * E: 000212.877993 9 11 00 00 f8 00 00 00 00 00
+ * E: 000212.879748 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.880728 9 11 00 01 00 00 00 00 00 00
+ * E: 000212.881956 9 11 00 00 04 00 00 00 00 00
+ * E: 000212.885065 9 11 00 ff ff 00 00 00 00 00
+ * E: 000212.917060 9 11 00 1a 00 00 00 00 00 00
+ * E: 000212.936458 9 11 00 2d 00 00 00 00 00 00
+ * E: 000212.957860 9 11 00 25 00 00 00 00 00 00
+ * E: 000212.984019 9 11 00 20 00 00 00 00 00 00
+ * E: 000213.017915 9 11 00 19 00 00 00 00 00 00
+ * E: 000213.039973 9 11 00 27 00 00 00 00 00 00
+ * E: 000213.065933 9 11 00 21 00 00 00 00 00 00
+ * E: 000213.085807 9 11 00 28 00 00 00 00 00 00
+ * E: 000213.108888 9 11 00 25 00 00 00 00 00 00
+ * E: 000213.129726 9 11 00 29 00 00 00 00 00 00
+ * E: 000213.172043 9 11 00 14 00 00 00 00 00 00
+ * E: 000213.195873 9 11 00 23 00 00 00 00 00 00
+ * E: 000213.222884 9 11 00 20 00 00 00 00 00 00
+ * E: 000213.243220 9 11 00 2a 00 00 00 00 00 00
+ * E: 000213.266778 9 11 00 24 00 00 00 00 00 00
+ * E: 000213.285951 9 11 00 2b 00 00 00 00 00 00
+ * E: 000213.306045 9 11 00 2a 00 00 00 00 00 00
+ * E: 000213.306796 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.307755 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.308820 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.309971 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.310980 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.311853 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.312861 9 11 00 aa 02 00 00 00 00 00
+ * E: 000213.313884 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.315111 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.315992 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.316955 9 11 00 00 08 00 00 00 00 00
+ * E: 000213.346065 9 11 00 1d 00 00 00 00 00 00
+ * E: 000213.346963 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.347874 9 11 00 00 08 00 00 00 00 00
+ * E: 000213.348736 9 11 00 00 08 00 00 00 00 00
+ * E: 000213.349795 9 11 00 00 04 00 00 00 00 00
+ * E: 000213.350791 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.351791 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.352729 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.353811 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.354755 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.355795 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.356813 9 11 00 01 00 00 00 00 00 00
+ * E: 000213.357817 9 11 00 00 04 00 00 00 00 00
+ * E: 000213.393838 9 11 00 17 00 00 00 00 00 00
+ * E: 000213.394719 9 11 00 00 04 00 00 00 00 00
+ * E: 000213.395682 9 11 00 00 08 00 00 00 00 00
+ * E: 000213.396679 9 11 00 00 04 00 00 00 00 00
+ * E: 000213.397651 9 11 00 00 fc 00 00 00 00 00
+ * E: 000213.398661 9 11 00 ff ff 00 00 00 00 00
+ * E: 000213.400308 9 11 00 56 fd 00 00 00 00 00
+ * E: 000213.400909 9 11 00 00 f8 00 00 00 00 00
+ * E: 000213.401837 9 11 00 01 00 00 00 00 00 00
+ *
+ * Either wheel rotation, increasing rotation speed CCW
+ * (Note that the wheels on my particular tablet may be
+ * damaged, so the false rotation direction changes
+ * that can be observed might not happen on other units.)
+ * E: 000040.527820 9 11 00 ff ff 00 00 00 00 00
+ * E: 000040.816644 9 11 00 ff ff 00 00 00 00 00
+ * E: 000040.880423 9 11 00 f3 ff 00 00 00 00 00
+ * E: 000040.882570 9 11 00 ff ff 00 00 00 00 00
+ * E: 000040.883381 9 11 00 ff ff 00 00 00 00 00
+ * E: 000040.885463 9 11 00 aa 02 00 00 00 00 00
+ * E: 000040.924106 9 11 00 ea ff 00 00 00 00 00
+ * E: 000041.006155 9 11 00 f6 ff 00 00 00 00 00
+ * E: 000041.085799 9 11 00 f6 ff 00 00 00 00 00
+ * E: 000041.168492 9 11 00 f6 ff 00 00 00 00 00
+ * E: 000041.233453 9 11 00 f3 ff 00 00 00 00 00
+ * E: 000041.296641 9 11 00 f3 ff 00 00 00 00 00
+ * E: 000041.370302 9 11 00 f5 ff 00 00 00 00 00
+ * E: 000041.437410 9 11 00 f4 ff 00 00 00 00 00
+ * E: 000041.474514 9 11 00 e9 ff 00 00 00 00 00
+ * E: 000041.522171 9 11 00 ef ff 00 00 00 00 00
+ * E: 000041.568160 9 11 00 ee ff 00 00 00 00 00
+ * E: 000041.608146 9 11 00 ec ff 00 00 00 00 00
+ * E: 000041.627132 9 11 00 d3 ff 00 00 00 00 00
+ * E: 000041.656151 9 11 00 e3 ff 00 00 00 00 00
+ * E: 000041.682264 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000041.714186 9 11 00 e6 ff 00 00 00 00 00
+ * E: 000041.740339 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000041.772087 9 11 00 e5 ff 00 00 00 00 00
+ * E: 000041.801093 9 11 00 e3 ff 00 00 00 00 00
+ * E: 000041.834051 9 11 00 e7 ff 00 00 00 00 00
+ * E: 000041.863094 9 11 00 e3 ff 00 00 00 00 00
+ * E: 000041.901016 9 11 00 ea ff 00 00 00 00 00
+ * E: 000041.901956 9 11 00 00 04 00 00 00 00 00
+ * E: 000041.902837 9 11 00 00 fe 00 00 00 00 00
+ * E: 000041.903927 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.905066 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.907214 9 11 00 00 fe 00 00 00 00 00
+ * E: 000041.909011 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.909953 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.910917 9 11 00 00 08 00 00 00 00 00
+ * E: 000041.913280 9 11 00 00 fe 00 00 00 00 00
+ * E: 000041.914121 9 11 00 56 fd 00 00 00 00 00
+ * E: 000041.915346 9 11 00 ff ff 00 00 00 00 00
+ * E: 000041.962101 9 11 00 ee ff 00 00 00 00 00
+ * E: 000041.964062 9 11 00 56 fd 00 00 00 00 00
+ * E: 000041.964978 9 11 00 00 fc 00 00 00 00 00
+ * E: 000041.968058 9 11 00 24 01 00 00 00 00 00
+ * E: 000041.968880 9 11 00 56 fd 00 00 00 00 00
+ * E: 000041.970977 9 11 00 aa 02 00 00 00 00 00
+ * E: 000041.971932 9 11 00 ff ff 00 00 00 00 00
+ * E: 000041.972943 9 11 00 01 00 00 00 00 00 00
+ * E: 000041.975291 9 11 00 ff ff 00 00 00 00 00
+ * E: 000041.978274 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.035079 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.041283 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.042057 9 11 00 00 04 00 00 00 00 00
+ * E: 000042.045169 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.051242 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.056099 9 11 00 63 ff 00 00 00 00 00
+ * E: 000042.106329 9 11 00 ef ff 00 00 00 00 00
+ * E: 000042.108601 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.116259 9 11 00 6b 00 00 00 00 00 00
+ * E: 000042.119140 9 11 00 55 01 00 00 00 00 00
+ * E: 000042.126101 9 11 00 88 ff 00 00 00 00 00
+ * E: 000042.158009 9 11 00 e6 ff 00 00 00 00 00
+ * E: 000042.172108 9 11 00 be ff 00 00 00 00 00
+ * E: 000042.207417 9 11 00 e8 ff 00 00 00 00 00
+ * E: 000042.223155 9 11 00 cc ff 00 00 00 00 00
+ * E: 000042.255185 9 11 00 e6 ff 00 00 00 00 00
+ * E: 000042.276280 9 11 00 d7 ff 00 00 00 00 00
+ * E: 000042.302128 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000042.317423 9 11 00 c8 ff 00 00 00 00 00
+ * E: 000042.345226 9 11 00 e1 ff 00 00 00 00 00
+ * E: 000042.357243 9 11 00 bc ff 00 00 00 00 00
+ * E: 000042.381308 9 11 00 dc ff 00 00 00 00 00
+ * E: 000042.383180 9 11 00 dc fe 00 00 00 00 00
+ * E: 000042.412288 9 11 00 e3 ff 00 00 00 00 00
+ * E: 000042.451216 9 11 00 eb ff 00 00 00 00 00
+ * E: 000042.478372 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000042.502116 9 11 00 dd ff 00 00 00 00 00
+ * E: 000042.520105 9 11 00 d3 ff 00 00 00 00 00
+ * E: 000042.540345 9 11 00 d6 ff 00 00 00 00 00
+ * E: 000042.541021 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.542009 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.543045 9 11 00 00 04 00 00 00 00 00
+ * E: 000042.544279 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.545097 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.546074 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.547237 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.548029 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.549304 9 11 00 00 f8 00 00 00 00 00
+ * E: 000042.553123 9 11 00 00 ff 00 00 00 00 00
+ * E: 000042.581186 9 11 00 e1 ff 00 00 00 00 00
+ * E: 000042.582238 9 11 00 00 f8 00 00 00 00 00
+ * E: 000042.583150 9 11 00 00 fc 00 00 00 00 00
+ * E: 000042.584273 9 11 00 00 f8 00 00 00 00 00
+ * E: 000042.585019 9 11 00 00 fc 00 00 00 00 00
+ * E: 000042.586059 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.589012 9 11 00 67 fe 00 00 00 00 00
+ * E: 000042.590066 9 11 00 00 fc 00 00 00 00 00
+ * E: 000042.592916 9 11 00 dc fe 00 00 00 00 00
+ * E: 000042.621124 9 11 00 e1 ff 00 00 00 00 00
+ * E: 000042.622092 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.623069 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.624030 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.625006 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.626068 9 11 00 00 04 00 00 00 00 00
+ * E: 000042.626876 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.628392 9 11 00 00 08 00 00 00 00 00
+ * E: 000042.628918 9 11 00 01 00 00 00 00 00 00
+ * E: 000042.630009 9 11 00 ff ff 00 00 00 00 00
+ * E: 000042.631934 9 11 00 00 fe 00 00 00 00 00
+ * E: 000042.656285 9 11 00 dd ff 00 00 00 00 00
+ * E: 000042.659870 9 11 00 cc 00 00 00 00 00 00
+ * E: 000042.666128 9 11 00 9d 00 00 00 00 00 00
+ * E: 000042.672458 9 11 00 80 ff 00 00 00 00 00
+ * E: 000042.696106 9 11 00 dc ff 00 00 00 00 00
+ * E: 000042.705129 9 11 00 61 00 00 00 00 00 00
+ * E: 000042.731303 9 11 00 e0 ff 00 00 00 00 00
+ * E: 000042.741278 9 11 00 ab ff 00 00 00 00 00
+ * E: 000042.788181 9 11 00 ee ff 00 00 00 00 00
+ * E: 000042.810441 9 11 00 db ff 00 00 00 00 00
+ * E: 000042.838073 9 11 00 e1 ff 00 00 00 00 00
+ * E: 000042.852235 9 11 00 c4 ff 00 00 00 00 00
+ * E: 000042.882290 9 11 00 e4 ff 00 00 00 00 00
+ *
+ * Either wheel button, press, hold, then release
+ * E: 000202.084982 9 11 02 00 00 00 00 00 00 00
+ * E: 000202.090172 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.094139 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.099172 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.105055 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.109132 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.114185 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.119212 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.124264 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.130147 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.135138 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.140072 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.145146 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.150157 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.155339 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.160064 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.165026 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.170037 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.175154 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.180044 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.186280 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.191281 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.196106 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.201083 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.206166 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.211084 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.216175 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.221036 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.226271 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.231150 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.235924 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.242046 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.247164 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.252359 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.257295 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.262167 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.267081 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.272175 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.277085 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.282596 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.287078 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.292191 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.298196 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.303004 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.308113 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.313079 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.318243 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.323309 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.328190 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.333050 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.338162 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.343022 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.348113 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.354133 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.359132 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.364053 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.369034 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.374144 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.379027 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.384238 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.389249 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.394049 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.398949 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.404203 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.410098 9 11 03 00 00 00 00 00 00 00
+ * E: 000202.415237 9 11 00 00 00 00 00 00 00 00
+ *
+ *
+ * Top wheel button press and release while holding bottom wheel button
+ * (The reverse action (a bottom wheel button press while holding the top wheel button) is invisible.)
+ * E: 000071.126966 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.133117 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.137481 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.142036 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.147027 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.151988 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.157945 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.163657 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.168240 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.173109 9 11 02 00 00 00 00 00 00 00 < top wheel button press?
+ * E: 000071.178119 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.183046 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.187983 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.192996 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.198341 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.203122 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.208998 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.214037 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.218945 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.223835 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.228987 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.234082 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.239028 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.244307 9 11 00 00 00 00 00 00 00 00 < top wheel button release?
+ * E: 000071.245867 9 11 03 00 00 00 00 00 00 00 < continued hold of bottom button
+ * E: 000071.249959 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.255032 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.259972 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.265409 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.270156 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.275530 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.279975 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.285046 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.290906 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.296146 9 11 03 00 00 00 00 00 00 00
+ * E: 000071.301288 9 11 03 00 00 00 00 00 00 00
+ *
+ * Top wheel button hold while top wheel rotate CCW
+ * (I did not test the other combinations of this)
+ * E: 000022.253144 9 11 03 00 00 00 00 00 00 00
+ * E: 000022.258157 9 11 03 00 00 00 00 00 00 00
+ * E: 000022.262011 9 11 00 ff ff 00 00 00 00 00
+ * E: 000022.264015 9 11 03 00 00 00 00 00 00 00
+ * E: 000022.268976 9 11 03 00 00 00 00 00 00 00
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ *
+ * NOTES ON SIMULTANEOUS BUTTON HOLDS
+ * (applies to vendor mode only)
+ * Value replacements for ease of reading:
+ * .7 = 0x40 (button 7, a wheel button)
+ * .1 = 0x01 (button 1, a pad button)
+ * rr = 0x00 (no buttons pressed)
+ *
+ * Press 7
+ * Press 1
+ * Release 7
+ * Release 1
+ * B: 000000.000152 42 08 e0 01 01 .7 00 00 00 00 00 00 00 00 00
+ * B: 000000.781784 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000000.869845 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.095688 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.322635 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.543643 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.770652 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000001.885659 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 7
+ * B: 000001.993620 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000002.220671 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000002.446589 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000002.672559 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000002.765183 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 1
+ *
+ * Press 7
+ * Press 1
+ * Release 1
+ * Release 7
+ * B: 000017.071517 42 08 e0 01 01 .7 00 00 00 00 00 00 00 00 00
+ * B: 000018.270461 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000018.419486 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000018.646438 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000018.872493 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000019.094422 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000019.320488 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000020.360505 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 1 is not reported until 7 is released, then both are rapidly reported
+ * B: 000020.361091 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00
+ *
+ * Press 1
+ * Press 7
+ * Release 7
+ * Release 1
+ * B: 000031.516315 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000031.922299 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000032.144165 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000032.370262 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000032.396242 42 08 e0 01 01 .7 00 00 00 00 00 00 00 00 00
+ * B: 000032.597270 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000032.818187 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.045143 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.267535 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 7
+ * B: 000033.272602 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.494246 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.721266 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000033.947237 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000034.169294 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000034.183585 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 1
+ *
+ * Press 1
+ * Press 7
+ * Release 1
+ * Release 7
+ * B: 000056.628429 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.046348 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.272044 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.494434 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.601224 42 08 e0 01 01 .7 00 00 00 00 00 00 00 00 00
+ * B: 000057.719262 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000057.946941 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000058.172346 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000058.393994 42 08 e0 01 01 .1 00 00 00 00 00 00 00 00 00
+ * B: 000059.434576 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00 release of 1 is not reported until 7 is released, then both are rapidly reported
+ * B: 000059.435857 42 08 e0 01 01 rr 00 00 00 00 00 00 00 00 00
+ */
+
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+char EXPECTED_FIRMWARE_ID[] = "HUION_M22c_";
+
+__u8 last_button_state;
+
+static const __u8 disabled_rdesc_tablet[] = {
+ FixedSizeVendorReport(28) /* Input report 4 */
+};
+
+static const __u8 disabled_rdesc_wheel[] = {
+ FixedSizeVendorReport(9) /* Input report 17 */
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ ReportId(VENDOR_REPORT_ID)
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionPhysical(
+ /*
+ * I have only examined the tablet's behavior while using
+ * the PW600L pen, which does not have an eraser.
+ * Because of this, I don't know where the Eraser and Invert
+ * bits will go, or if they work as one would expect.
+ *
+ * For the time being, there is no expectation that a pen
+ * with an eraser will work without modifications here.
+ */
+ ReportSize(1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportCount(3)
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch
+ Input(Var|Abs)
+ PushPop(
+ ReportCount(1)
+ UsagePage_Button
+ Usage_i8(0x4a) /* (BTN_STYLUS3 + 1) & 0xff */
+ Input(Var|Abs)
+ )
+ ReportCount(3)
+ Input(Const)
+ ReportCount(1)
+ Usage_Dig_InRange
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-2)
+ LogicalMinimum_i16(0)
+ PhysicalMinimum_i16(0)
+ /*
+ * The tablet has a logical maximum of 58760 x 33040
+ * and a claimed resolution of 5080 LPI (200 L/mm)
+ * This works out to a physical maximum of
+ * 293.8 x 165.2mm, which matches Huion's advertised
+ * active area dimensions from
+ * https://www.huion.com/products/pen_display/Kamvas/kamvas-13-gen-3.html
+ */
+ LogicalMaximum_i16(58760)
+ PhysicalMaximum_i16(2938)
+ Usage_GD_X
+ Input(Var|Abs)
+ LogicalMaximum_i16(33040)
+ PhysicalMaximum_i16(1652)
+ Usage_GD_Y
+ Input(Var|Abs)
+ )
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(16383)
+ Usage_Dig_TipPressure
+ Input(Var|Abs)
+ ReportCount(1)
+ Input(Const)
+ ReportSize(8)
+ ReportCount(2)
+ PushPop(
+ Unit(deg)
+ UnitExponent(0)
+ LogicalMinimum_i8(-60)
+ PhysicalMinimum_i8(-60)
+ LogicalMaximum_i8(60)
+ PhysicalMaximum_i8(60)
+ Usage_Dig_XTilt
+ Usage_Dig_YTilt
+ Input(Var|Abs)
+ )
+ )
+ )
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ ReportId(CUSTOM_PAD_REPORT_ID)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ /*
+ * The first 3 bytes are somewhat vestigial and will
+ * always be set to zero. Their presence here is needed
+ * to ensure that this device will be detected as a
+ * tablet pad by software that otherwise wouldn't know
+ * any better.
+ */
+ /* (data[1] & 0x01) barrel switch */
+ ReportSize(1)
+ ReportCount(1)
+ Usage_Dig_BarrelSwitch
+ Input(Var|Abs)
+ ReportCount(7)
+ Input(Const)
+ /* data[2] X */
+ /* data[3] Y */
+ ReportSize(8)
+ ReportCount(2)
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ Input(Var|Abs)
+ /*
+ * (data[4] & 0x01) button 1
+ * (data[4] & 0x02) button 2
+ * (data[4] & 0x04) button 3
+ * (data[4] & 0x08) button 4
+ * (data[4] & 0x10) button 5
+ * (data[4] & 0x20) button 6 (top wheel button)
+ * (data[4] & 0x40) button 7 (bottom wheel button)
+ */
+ ReportSize(1)
+ ReportCount(7)
+ UsagePage_Button
+ UsageMinimum_i8(1)
+ UsageMaximum_i8(7)
+ Input(Var|Abs)
+ ReportCount(1)
+ Input(Const)
+ /* data[5] top wheel (signed, positive clockwise) */
+ ReportSize(8)
+ ReportCount(1)
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ Input(Var|Rel)
+ /* data[6] bottom wheel (signed, positive clockwise) */
+ UsagePage_Consumer
+ Usage_Con_ACPan
+ Input(Var|Rel)
+ )
+ /*
+ * The kernel will drop reports that are bigger than the
+ * largest report specified in the HID descriptor.
+ * Therefore, our modified descriptor needs to have at least one
+ * HID report that is as long as, or longer than, the largest
+ * report in the original descriptor.
+ *
+ * This macro expands to a no-op report that is padded to the
+ * provided length.
+ */
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc_huion_kamvas13_gen3, struct hid_bpf_ctx *hid_ctx)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hid_ctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+
+ if (have_fw_id) {
+ /*
+ * Tablet should be in vendor mode.
+ * Disable the unused devices
+ */
+ if (rdesc_size == TABLET_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, disabled_rdesc_tablet,
+ sizeof(disabled_rdesc_tablet));
+ return sizeof(disabled_rdesc_tablet);
+ }
+
+ if (rdesc_size == WHEEL_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, disabled_rdesc_wheel,
+ sizeof(disabled_rdesc_wheel));
+ return sizeof(disabled_rdesc_wheel);
+ }
+ }
+
+ /*
+ * Regardless of which mode the tablet is in, always fix the vendor
+ * descriptor in case the udev property just happened to not be set
+ */
+ if (rdesc_size == VENDOR_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(hid_fix_event_huion_kamvas13_gen3, struct hid_bpf_ctx *hid_ctx)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, VENDOR_REPORT_LENGTH /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Handle vendor reports only */
+ if (hid_ctx->size != VENDOR_REPORT_LENGTH)
+ return 0;
+ if (data[0] != VENDOR_REPORT_ID)
+ return 0;
+
+ __u8 report_subtype = (data[1] >> 4) & 0x0f;
+
+ if (report_subtype == VENDOR_REPORT_SUBTYPE_PEN ||
+ report_subtype == VENDOR_REPORT_SUBTYPE_PEN_OUT) {
+ /* Invert Y tilt */
+ data[11] = -data[11];
+
+ } else if (report_subtype == VENDOR_REPORT_SUBTYPE_BUTTONS ||
+ report_subtype == VENDOR_REPORT_SUBTYPE_WHEELS) {
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus:1;
+ __u8 padding:7;
+ __u8 x;
+ __u8 y;
+ __u8 buttons;
+ __s8 top_wheel;
+ __s8 bottom_wheel;
+ } __attribute__((packed)) *pad_report;
+
+ __s8 top_wheel = 0;
+ __s8 bottom_wheel = 0;
+
+ switch (report_subtype) {
+ case VENDOR_REPORT_SUBTYPE_WHEELS:
+ /*
+ * The wheel direction byte is 1 for clockwise rotation
+ * and 2 for counter-clockwise.
+ * Change it to 1 and -1, respectively.
+ */
+ switch (data[3]) {
+ case 1:
+ top_wheel = (data[5] == 1) ? 1 : -1;
+ break;
+ case 2:
+ bottom_wheel = (data[5] == 1) ? 1 : -1;
+ break;
+ }
+ break;
+
+ case VENDOR_REPORT_SUBTYPE_BUTTONS:
+ /*
+ * If a button is already being held, ignore any new
+ * button event unless it's a release.
+ *
+ * The tablet only cleanly handles one button being held
+ * at a time, and trying to hold multiple buttons
+ * (particularly wheel+pad buttons) can result in sequences
+ * of reports that look like imaginary presses and releases.
+ *
+ * This is an imperfect way to filter out some of these
+ * reports.
+ */
+ if (last_button_state != 0x00 && data[4] != 0x00)
+ break;
+
+ last_button_state = data[4];
+ break;
+ }
+
+
+ pad_report = (struct pad_report *)data;
+
+ pad_report->report_id = CUSTOM_PAD_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->top_wheel = top_wheel;
+ pad_report->bottom_wheel = bottom_wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(huion_kamvas13_gen3) = {
+ .hid_device_event = (void *)hid_fix_event_huion_kamvas13_gen3,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_huion_kamvas13_gen3,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case VENDOR_DESCRIPTOR_LENGTH:
+ case TABLET_DESCRIPTOR_LENGTH:
+ case WHEEL_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c b/drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c
new file mode 100644
index 000000000000..ac66c6e65eb4
--- /dev/null
+++ b/drivers/hid/bpf/progs/Huion__Kamvas16Gen3.bpf.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Nicholas LaPointe
+ * Copyright (c) 2025 Higgins Dragon
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_HUION 0x256c
+#define PID_KAMVAS16_GEN3 0x2009
+
+#define VENDOR_DESCRIPTOR_LENGTH 36
+#define TABLET_DESCRIPTOR_LENGTH 328
+#define WHEEL_DESCRIPTOR_LENGTH 200
+
+#define VENDOR_REPORT_ID 8
+#define VENDOR_REPORT_LENGTH 14
+
+#define VENDOR_REPORT_SUBTYPE_PEN 0x08
+#define VENDOR_REPORT_SUBTYPE_PEN_OUT 0x00
+#define VENDOR_REPORT_SUBTYPE_BUTTONS 0x0e
+#define VENDOR_REPORT_SUBTYPE_WHEELS 0x0f
+
+/* For the reports that we create ourselves */
+#define CUSTOM_PAD_REPORT_ID 9
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_ANY, VID_HUION, PID_KAMVAS16_GEN3),
+);
+
+/*
+ * This tablet can send reports using one of two different data formats,
+ * depending on what "mode" the tablet is in.
+ *
+ * By default, the tablet will send reports that can be decoded using its
+ * included HID descriptors (descriptors 1 and 2, shown below).
+ * This mode will be called "firmware mode" throughout this file.
+ *
+ * The HID descriptor that describes pen events in firmware mode (descriptor 1)
+ * has multiple bugs:
+ * * "Secondary Tip Switch" instead of "Secondary Barrel Switch"
+ * * "Invert" instead of (or potentially shared with) third barrel button
+ * * Specified tablet area of 2048 in³ instead of 293.8 x 165.2mm
+ * * Specified tilt range of -90 to +90 instead of -60 to +60
+ *
+ * While these can be easily patched up by editing the descriptor, a larger
+ * problem with the firmware mode exists: it is impossible to tell which of the
+ * two wheels are being rotated (or having their central button pressed).
+ *
+ *
+ * By using a tool such as huion-switcher (https://github.com/whot/huion-switcher),
+ * the tablet can be made to send reports using a proprietary format that is not
+ * adequately described by its relevant descriptor (descriptor 0, shown below).
+ * This mode will be called "vendor mode" throughout this file.
+ *
+ * The reports sent while in vendor mode allow for proper decoding of the wheels.
+ *
+ * For simplicity and maximum functionality, this BPF focuses strictly on
+ * enabling one to make use of the vendor mode.
+ */
+
+/*
+ * DESCRIPTORS
+ * DESCRIPTOR 0
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 0
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 3
+ * # 0xa1, 0x01, // Collection (Application) 5
+ * # 0x85, 0x08, // Report ID (8) 7
+ * # 0x75, 0x68, // Report Size (104) 9
+ * # 0x95, 0x01, // Report Count (1) 11
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 13
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 15
+ * # 0xc0, // End Collection 17
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 18
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 21
+ * # 0xa1, 0x01, // Collection (Application) 23
+ * # 0x85, 0x16, // Report ID (22) 25
+ * # 0x75, 0x08, // Report Size (8) 27
+ * # 0x95, 0x07, // Report Count (7) 29
+ * # 0x09, 0x01, // Usage (Vendor Usage 1) 31
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 33
+ * # 0xc0, // End Collection 35
+ * #
+ * R: 36 06 00 ff 09 01 a1 01 85 08 75 68 95 01 09 01 81 02 c0 06 00 ff 09 01 a1 01 85 16 75 08 95 07 09 01 b1 02 c0
+ * N: HUION Huion Tablet_GS1563
+ * I: 3 256c 2009
+ *
+ *
+ * DESCRIPTOR 1
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x0a, // Report ID (10) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x01, // Collection (Application) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x43, // Usage (Secondary Tip Switch) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18
+ * # 0x09, 0x45, // Usage (Eraser) 20
+ * # 0x15, 0x00, // Logical Minimum (0) 22
+ * # 0x25, 0x01, // Logical Maximum (1) 24
+ * # 0x75, 0x01, // Report Size (1) 26
+ * # 0x95, 0x06, // Report Count (6) 28
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 30
+ * # 0x09, 0x32, // Usage (In Range) 32
+ * # 0x75, 0x01, // Report Size (1) 34
+ * # 0x95, 0x01, // Report Count (1) 36
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 38
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x30, // Usage (X) 44
+ * # 0x09, 0x31, // Usage (Y) 46
+ * # 0x55, 0x0d, // Unit Exponent (-3) 48
+ * # 0x65, 0x33, // Unit (EnglishLinear: in³) 50
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 52
+ * # 0x35, 0x00, // Physical Minimum (0) 55
+ * # 0x46, 0x00, 0x08, // Physical Maximum (2048) 57
+ * # 0x75, 0x10, // Report Size (16) 60
+ * # 0x95, 0x02, // Report Count (2) 62
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 64
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 66
+ * # 0x09, 0x30, // Usage (Tip Pressure) 68
+ * # 0x26, 0xff, 0x3f, // Logical Maximum (16383) 70
+ * # 0x75, 0x10, // Report Size (16) 73
+ * # 0x95, 0x01, // Report Count (1) 75
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 77
+ * # 0x09, 0x3d, // Usage (X Tilt) 79
+ * # 0x09, 0x3e, // Usage (Y Tilt) 81
+ * # 0x15, 0xa6, // Logical Minimum (-90) 83
+ * # 0x25, 0x5a, // Logical Maximum (90) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x02, // Report Count (2) 89
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 91
+ * # 0xc0, // End Collection 93
+ * # 0xc0, // End Collection 94
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 95
+ * # 0x09, 0x04, // Usage (Touch Screen) 97
+ * # 0xa1, 0x01, // Collection (Application) 99
+ * # 0x85, 0x04, // Report ID (4) 101
+ * # 0x09, 0x22, // Usage (Finger) 103
+ * # 0xa1, 0x02, // Collection (Logical) 105
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 107
+ * # 0x95, 0x01, // Report Count (1) 109
+ * # 0x75, 0x06, // Report Size (6) 111
+ * # 0x09, 0x51, // Usage (Contact Id) 113
+ * # 0x15, 0x00, // Logical Minimum (0) 115
+ * # 0x25, 0x3f, // Logical Maximum (63) 117
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 119
+ * # 0x09, 0x42, // Usage (Tip Switch) 121
+ * # 0x25, 0x01, // Logical Maximum (1) 123
+ * # 0x75, 0x01, // Report Size (1) 125
+ * # 0x95, 0x01, // Report Count (1) 127
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 129
+ * # 0x75, 0x01, // Report Size (1) 131
+ * # 0x95, 0x01, // Report Count (1) 133
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 135
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 137
+ * # 0x75, 0x10, // Report Size (16) 139
+ * # 0x55, 0x0e, // Unit Exponent (-2) 141
+ * # 0x65, 0x11, // Unit (SILinear: cm) 143
+ * # 0x09, 0x30, // Usage (X) 145
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 147
+ * # 0x35, 0x00, // Physical Minimum (0) 150
+ * # 0x46, 0x15, 0x0c, // Physical Maximum (3093) 152
+ * # 0x81, 0x42, // Input (Data,Var,Abs,Null) 155
+ * # 0x09, 0x31, // Usage (Y) 157
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 159
+ * # 0x46, 0xcb, 0x06, // Physical Maximum (1739) 162
+ * # 0x81, 0x42, // Input (Data,Var,Abs,Null) 165
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 167
+ * # 0x09, 0x30, // Usage (Tip Pressure) 169
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 171
+ * # 0x75, 0x10, // Report Size (16) 174
+ * # 0x95, 0x01, // Report Count (1) 176
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 178
+ * # 0xc0, // End Collection 180
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 181
+ * # 0x09, 0x22, // Usage (Finger) 183
+ * # 0xa1, 0x02, // Collection (Logical) 185
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 187
+ * # 0x95, 0x01, // Report Count (1) 189
+ * # 0x75, 0x06, // Report Size (6) 191
+ * # 0x09, 0x51, // Usage (Contact Id) 193
+ * # 0x15, 0x00, // Logical Minimum (0) 195
+ * # 0x25, 0x3f, // Logical Maximum (63) 197
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 199
+ * # 0x09, 0x42, // Usage (Tip Switch) 201
+ * # 0x25, 0x01, // Logical Maximum (1) 203
+ * # 0x75, 0x01, // Report Size (1) 205
+ * # 0x95, 0x01, // Report Count (1) 207
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 209
+ * # 0x75, 0x01, // Report Size (1) 211
+ * # 0x95, 0x01, // Report Count (1) 213
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 215
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 217
+ * # 0x75, 0x10, // Report Size (16) 219
+ * # 0x55, 0x0e, // Unit Exponent (-2) 221
+ * # 0x65, 0x11, // Unit (SILinear: cm) 223
+ * # 0x09, 0x30, // Usage (X) 225
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 227
+ * # 0x35, 0x00, // Physical Minimum (0) 230
+ * # 0x46, 0x15, 0x0c, // Physical Maximum (3093) 232
+ * # 0x81, 0x42, // Input (Data,Var,Abs,Null) 235
+ * # 0x09, 0x31, // Usage (Y) 237
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 239
+ * # 0x46, 0xcb, 0x06, // Physical Maximum (1739) 242
+ * # 0x81, 0x42, // Input (Data,Var,Abs,Null) 245
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 247
+ * # 0x09, 0x30, // Usage (Tip Pressure) 249
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 251
+ * # 0x75, 0x10, // Report Size (16) 254
+ * # 0x95, 0x01, // Report Count (1) 256
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 258
+ * # 0xc0, // End Collection 260
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 261
+ * # 0x09, 0x56, // Usage (Scan Time) 263
+ * # 0x55, 0x00, // Unit Exponent (0) 265
+ * # 0x65, 0x00, // Unit (None) 267
+ * # 0x27, 0xff, 0xff, 0xff, 0x7f, // Logical Maximum (2147483647) 269
+ * # 0x95, 0x01, // Report Count (1) 274
+ * # 0x75, 0x20, // Report Size (32) 276
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 278
+ * # 0x09, 0x54, // Usage (Contact Count) 280
+ * # 0x25, 0x7f, // Logical Maximum (127) 282
+ * # 0x95, 0x01, // Report Count (1) 284
+ * # 0x75, 0x08, // Report Size (8) 286
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 288
+ * # 0x75, 0x08, // Report Size (8) 290
+ * # 0x95, 0x08, // Report Count (8) 292
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 294
+ * # 0x85, 0x05, // Report ID (5) 296
+ * # 0x09, 0x55, // Usage (Contact Max) 298
+ * # 0x25, 0x0a, // Logical Maximum (10) 300
+ * # 0x75, 0x08, // Report Size (8) 302
+ * # 0x95, 0x01, // Report Count (1) 304
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 306
+ * # 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 308
+ * # 0x09, 0xc5, // Usage (Vendor Usage 0xc5) 311
+ * # 0x85, 0x06, // Report ID (6) 313
+ * # 0x15, 0x00, // Logical Minimum (0) 315
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 317
+ * # 0x75, 0x08, // Report Size (8) 320
+ * # 0x96, 0x00, 0x01, // Report Count (256) 322
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 325
+ * # 0xc0, // End Collection 327
+ * #
+ * R: 328 05 0d 09 02 a1 01 85 0a 09 20 a1 01 09 42 09 44 09 43 09 3c 09 45 15 00 25 01 75 01 95 06 81 02 09 32 75 01 95 01 81 02 81 03 05 01 09 30 09 31 55 0d 65 33 26 ff 7f 35 00 46 00 08 75 10 95 02 81 02 05 0d 09 30 26 ff 3f 75 10 95 01 81 02 09 3d 09 3e 15 a6 25 5a 75 08 95 02 81 02 c0 c0 05 0d 09 04 a1 01 85 04 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 22 a1 02 05 0d 95 01 75 06 09 51 15 00 25 3f 81 02 09 42 25 01 75 01 95 01 81 02 75 01 95 01 81 03 05 01 75 10 55 0e 65 11 09 30 26 ff 7f 35 00 46 15 0c 81 42 09 31 26 ff 7f 46 cb 06 81 42 05 0d 09 30 26 ff 1f 75 10 95 01 81 02 c0 05 0d 09 56 55 00 65 00 27 ff ff ff 7f 95 01 75 20 81 02 09 54 25 7f 95 01 75 08 81 02 75 08 95 08 81 03 85 05 09 55 25 0a 75 08 95 01 b1 02 06 00 ff 09 c5 85 06 15 00 26 ff 00 75 08 96 00 01 b1 02 c0
+ * N: HUION Huion Tablet_GS1563
+ * I: 3 256c 2009
+ *
+ * DESCRIPTOR 2
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * # 0x09, 0x0e, // Usage (System Multi-Axis Controller) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x11, // Report ID (17) 6
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 8
+ * # 0x09, 0x21, // Usage (Puck) 10
+ * # 0xa1, 0x02, // Collection (Logical) 12
+ * # 0x15, 0x00, // Logical Minimum (0) 14
+ * # 0x25, 0x01, // Logical Maximum (1) 16
+ * # 0x75, 0x01, // Report Size (1) 18
+ * # 0x95, 0x01, // Report Count (1) 20
+ * # 0xa1, 0x00, // Collection (Physical) 22
+ * # 0x05, 0x09, // Usage Page (Button) 24
+ * # 0x09, 0x01, // Usage (Vendor Usage 0x01) 26
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 30
+ * # 0x09, 0x33, // Usage (Touch) 32
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 34
+ * # 0x95, 0x06, // Report Count (6) 36
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 38
+ * # 0xa1, 0x02, // Collection (Logical) 40
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 42
+ * # 0x09, 0x37, // Usage (Dial) 44
+ * # 0x16, 0x00, 0x80, // Logical Minimum (-32768) 46
+ * # 0x26, 0xff, 0x7f, // Logical Maximum (32767) 49
+ * # 0x75, 0x10, // Report Size (16) 52
+ * # 0x95, 0x01, // Report Count (1) 54
+ * # 0x81, 0x06, // Input (Data,Var,Rel) 56
+ * # 0x35, 0x00, // Physical Minimum (0) 58
+ * # 0x46, 0x10, 0x0e, // Physical Maximum (3600) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 63
+ * # 0x26, 0x10, 0x0e, // Logical Maximum (3600) 65
+ * # 0x09, 0x48, // Usage (Resolution Multiplier) 68
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 70
+ * # 0x45, 0x00, // Physical Maximum (0) 72
+ * # 0xc0, // End Collection 74
+ * # 0x75, 0x08, // Report Size (8) 75
+ * # 0x95, 0x01, // Report Count (1) 77
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 79
+ * # 0x75, 0x08, // Report Size (8) 81
+ * # 0x95, 0x01, // Report Count (1) 83
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 85
+ * # 0x75, 0x08, // Report Size (8) 87
+ * # 0x95, 0x01, // Report Count (1) 89
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 91
+ * # 0x75, 0x08, // Report Size (8) 93
+ * # 0x95, 0x01, // Report Count (1) 95
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 97
+ * # 0x75, 0x08, // Report Size (8) 99
+ * # 0x95, 0x01, // Report Count (1) 101
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 103
+ * # 0xc0, // End Collection 105
+ * # 0xc0, // End Collection 106
+ * # 0xc0, // End Collection 107
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 108
+ * # 0x09, 0x06, // Usage (Keyboard) 110
+ * # 0xa1, 0x01, // Collection (Application) 112
+ * # 0x85, 0x03, // Report ID (3) 114
+ * # 0x05, 0x07, // Usage Page (Keyboard) 116
+ * # 0x19, 0xe0, // Usage Minimum (224) 118
+ * # 0x29, 0xe7, // Usage Maximum (231) 120
+ * # 0x15, 0x00, // Logical Minimum (0) 122
+ * # 0x25, 0x01, // Logical Maximum (1) 124
+ * # 0x75, 0x01, // Report Size (1) 126
+ * # 0x95, 0x08, // Report Count (8) 128
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 130
+ * # 0x05, 0x07, // Usage Page (Keyboard) 132
+ * # 0x19, 0x00, // Usage Minimum (0) 134
+ * # 0x29, 0xff, // Usage Maximum (255) 136
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 138
+ * # 0x75, 0x08, // Report Size (8) 141
+ * # 0x95, 0x06, // Report Count (6) 143
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 145
+ * # 0xc0, // End Collection 147
+ * # 0x05, 0x0c, // Usage Page (Consumer Devices) 148
+ * # 0x09, 0x01, // Usage (Consumer Control) 150
+ * # 0xa1, 0x01, // Collection (Application) 152
+ * # 0x85, 0x04, // Report ID (4) 154
+ * # 0x19, 0x01, // Usage Minimum (1) 156
+ * # 0x2a, 0x9c, 0x02, // Usage Maximum (668) 158
+ * # 0x15, 0x01, // Logical Minimum (1) 161
+ * # 0x26, 0x9c, 0x02, // Logical Maximum (668) 163
+ * # 0x95, 0x01, // Report Count (1) 166
+ * # 0x75, 0x10, // Report Size (16) 168
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 170
+ * # 0xc0, // End Collection 172
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 173
+ * # 0x09, 0x80, // Usage (System Control) 175
+ * # 0xa1, 0x01, // Collection (Application) 177
+ * # 0x85, 0x05, // Report ID (5) 179
+ * # 0x19, 0x81, // Usage Minimum (129) 181
+ * # 0x29, 0x83, // Usage Maximum (131) 183
+ * # 0x15, 0x00, // Logical Minimum (0) 185
+ * # 0x25, 0x01, // Logical Maximum (1) 187
+ * # 0x75, 0x01, // Report Size (1) 189
+ * # 0x95, 0x03, // Report Count (3) 191
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 193
+ * # 0x95, 0x05, // Report Count (5) 195
+ * # 0x81, 0x01, // Input (Cnst,Arr,Abs) 197
+ * # 0xc0, // End Collection 199
+ * #
+ * R: 200 05 01 09 0e a1 01 85 11 05 0d 09 21 a1 02 15 00 25 01 75 01 95 01 a1 00 05 09 09 01 81 02 05 0d 09 33 81 02 95 06 81 03 a1 02 05 01 09 37 16 00 80 26 ff 7f 75 10 95 01 81 06 35 00 46 10 0e 15 00 26 10 0e 09 48 b1 02 45 00 c0 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 75 08 95 01 81 01 c0 c0 c0 05 01 09 06 a1 01 85 03 05 07 19 e0 29 e7 15 00 25 01 75 01 95 08 81 02 05 07 19 00 29 ff 26 ff 00 75 08 95 06 81 00 c0 05 0c 09 01 a1 01 85 04 19 01 2a 9c 02 15 01 26 9c 02 95 01 75 10 81 00 c0 05 01 09 80 a1 01 85 05 19 81 29 83 15 00 25 01 75 01 95 03 81 02 95 05 81 01 c0
+ * N: HUION Huion Tablet_GS1563
+ * I: 3 256c 2009
+ *
+ *
+ *
+ * VENDOR MODE
+ * HUION_FIRMWARE_ID="HUION_M22d_241101"
+ * HUION_MAGIC_BYTES="1403201101ac9900ff3fd81305080080083c4010"
+ *
+ * MAGIC BYTES
+ * [LogicalMaximum, X ] [LogicalMaximum, Y ] [LogicalMaximum, Pressure] [ LPI]
+ * 14 03 [ 20 11 01] [ ac 99 00] [ ff 3f] [d8 13] 05 08 00 80 08 3c 40 10
+ *
+ * See Huion__Kamvas13Gen3.bpf.c for more details on detailed button/dial reports and caveats. It's very
+ * similar to the Kamvas 16 Gen 3.
+ */
+
+
+/* Filled in by udev-hid-bpf */
+char UDEV_PROP_HUION_FIRMWARE_ID[64];
+
+char EXPECTED_FIRMWARE_ID[] = "HUION_M22d_";
+
+__u8 last_button_state;
+
+static const __u8 disabled_rdesc_tablet[] = {
+ FixedSizeVendorReport(28) /* Input report 4 */
+};
+
+static const __u8 disabled_rdesc_wheel[] = {
+ FixedSizeVendorReport(9) /* Input report 17 */
+};
+
+static const __u8 fixed_rdesc_vendor[] = {
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ ReportId(VENDOR_REPORT_ID)
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionPhysical(
+ /*
+ * I have only examined the tablet's behavior while using
+ * the PW600L pen, which does not have an eraser.
+ * Because of this, I don't know where the Eraser and Invert
+ * bits will go, or if they work as one would expect.
+ *
+ * For the time being, there is no expectation that a pen
+ * with an eraser will work without modifications here.
+ */
+ ReportSize(1)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportCount(3)
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_SecondaryBarrelSwitch
+ Input(Var|Abs)
+ PushPop(
+ ReportCount(1)
+ UsagePage_Button
+ Usage_i8(0x4a) /* (BTN_STYLUS3 + 1) & 0xff */
+ Input(Var|Abs)
+ )
+ ReportCount(3)
+ Input(Const)
+ ReportCount(1)
+ Usage_Dig_InRange
+ Input(Var|Abs)
+ ReportSize(16)
+ ReportCount(1)
+ PushPop(
+ UsagePage_GenericDesktop
+ Unit(cm)
+ UnitExponent(-2)
+ LogicalMinimum_i16(0)
+ PhysicalMinimum_i16(0)
+ /*
+ * The tablet has a logical maximum of 69920 x 39340
+ * and a claimed resolution of 5080 LPI (200 L/mm)
+ * This works out to a physical maximum of
+ * 349.6 x 196.7mm, which matches Huion's advertised
+ * (rounded) active area dimensions from
+ * https://www.huion.com/products/pen_display/Kamvas/kamvas-16-gen-3.html
+ *
+ * The Kamvas uses data[8] for the 3rd byte of the X-axis, and adding
+ * that after data[2] and data[3] makes a contiguous little-endian
+ * 24-bit value. (See BPF_PROG below)
+ */
+ ReportSize(24)
+ LogicalMaximum_i32(69920)
+ PhysicalMaximum_i16(3496)
+ Usage_GD_X
+ Input(Var|Abs)
+ ReportSize(16)
+ LogicalMaximum_i16(39340)
+ PhysicalMaximum_i16(1967)
+ Usage_GD_Y
+ Input(Var|Abs)
+ )
+ ReportSize(16)
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(16383)
+ Usage_Dig_TipPressure
+ Input(Var|Abs)
+ ReportSize(8)
+ ReportCount(1)
+ Input(Const)
+ ReportCount(2)
+ PushPop(
+ Unit(deg)
+ UnitExponent(0)
+ LogicalMinimum_i8(-60)
+ PhysicalMinimum_i8(-60)
+ LogicalMaximum_i8(60)
+ PhysicalMaximum_i8(60)
+ Usage_Dig_XTilt
+ Usage_Dig_YTilt
+ Input(Var|Abs)
+ )
+ )
+ )
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ ReportId(CUSTOM_PAD_REPORT_ID)
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ /*
+ * The first 3 bytes are somewhat vestigial and will
+ * always be set to zero. Their presence here is needed
+ * to ensure that this device will be detected as a
+ * tablet pad by software that otherwise wouldn't know
+ * any better.
+ */
+ /* (data[1] & 0x01) barrel switch */
+ ReportSize(1)
+ ReportCount(1)
+ Usage_Dig_BarrelSwitch
+ Input(Var|Abs)
+ ReportCount(7)
+ Input(Const)
+ /* data[2] X */
+ /* data[3] Y */
+ ReportSize(8)
+ ReportCount(2)
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ Input(Var|Abs)
+ /*
+ * (data[4] & 0x01) button 1
+ * (data[4] & 0x02) button 2
+ * (data[4] & 0x04) button 3
+ * (data[4] & 0x08) button 4
+ * (data[4] & 0x10) button 5
+ * (data[4] & 0x20) button 6
+ * (data[4] & 0x40) button 7 (top wheel button)
+ * (data[4] & 0x80) button 8 (bottom wheel button)
+ */
+ ReportSize(1)
+ ReportCount(8)
+ UsagePage_Button
+ UsageMinimum_i8(1)
+ UsageMaximum_i8(8)
+ Input(Var|Abs)
+ /* data[5] top wheel (signed, positive clockwise) */
+ ReportSize(8)
+ ReportCount(1)
+ UsagePage_GenericDesktop
+ Usage_GD_Wheel
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ Input(Var|Rel)
+ /* data[6] bottom wheel (signed, positive clockwise) */
+ UsagePage_Consumer
+ Usage_Con_ACPan
+ Input(Var|Rel)
+ )
+ /*
+ * The kernel will drop reports that are bigger than the
+ * largest report specified in the HID descriptor.
+ * Therefore, our modified descriptor needs to have at least one
+ * HID report that is as long as, or longer than, the largest
+ * report in the original descriptor.
+ *
+ * This macro expands to a no-op report that is padded to the
+ * provided length.
+ */
+ FixedSizeVendorReport(VENDOR_REPORT_LENGTH)
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc_huion_kamvas16_gen3, struct hid_bpf_ctx *hid_ctx)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+ __s32 rdesc_size = hid_ctx->size;
+ __u8 have_fw_id;
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ have_fw_id = __builtin_memcmp(UDEV_PROP_HUION_FIRMWARE_ID,
+ EXPECTED_FIRMWARE_ID,
+ sizeof(EXPECTED_FIRMWARE_ID) - 1) == 0;
+
+ if (have_fw_id) {
+ /*
+ * Tablet should be in vendor mode.
+ * Disable the unused devices
+ */
+ if (rdesc_size == TABLET_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, disabled_rdesc_tablet,
+ sizeof(disabled_rdesc_tablet));
+ return sizeof(disabled_rdesc_tablet);
+ }
+
+ if (rdesc_size == WHEEL_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, disabled_rdesc_wheel,
+ sizeof(disabled_rdesc_wheel));
+ return sizeof(disabled_rdesc_wheel);
+ }
+ }
+
+ /*
+ * Regardless of which mode the tablet is in, always fix the vendor
+ * descriptor in case the udev property just happened to not be set
+ */
+ if (rdesc_size == VENDOR_DESCRIPTOR_LENGTH) {
+ __builtin_memcpy(data, fixed_rdesc_vendor, sizeof(fixed_rdesc_vendor));
+ return sizeof(fixed_rdesc_vendor);
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(hid_fix_event_huion_kamvas16_gen3, struct hid_bpf_ctx *hid_ctx)
+{
+ __u8 *data = hid_bpf_get_data(hid_ctx, 0 /* offset */, VENDOR_REPORT_LENGTH /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Handle vendor reports only */
+ if (hid_ctx->size != VENDOR_REPORT_LENGTH)
+ return 0;
+ if (data[0] != VENDOR_REPORT_ID)
+ return 0;
+
+ __u8 report_subtype = (data[1] >> 4) & 0x0f;
+
+ if (report_subtype == VENDOR_REPORT_SUBTYPE_PEN ||
+ report_subtype == VENDOR_REPORT_SUBTYPE_PEN_OUT) {
+ /* Invert Y tilt */
+ data[11] = -data[11];
+
+ /*
+ * Rearrange the bytes of the report so that
+ * [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
+ * will be arranged as
+ * [0, 1, 2, 3, 8, 4, 5, 6, 7, 9, 10, 11, 12, 13]
+ */
+ __u8 x_24 = data[8];
+
+ data[8] = data[7];
+ data[7] = data[6];
+ data[6] = data[5];
+ data[5] = data[4];
+
+ data[4] = x_24;
+
+ } else if (report_subtype == VENDOR_REPORT_SUBTYPE_BUTTONS ||
+ report_subtype == VENDOR_REPORT_SUBTYPE_WHEELS) {
+ struct pad_report {
+ __u8 report_id;
+ __u8 btn_stylus:1;
+ __u8 padding:7;
+ __u8 x;
+ __u8 y;
+ __u8 buttons;
+ __s8 top_wheel;
+ __s8 bottom_wheel;
+ } __attribute__((packed)) *pad_report;
+
+ __s8 top_wheel = 0;
+ __s8 bottom_wheel = 0;
+
+ switch (report_subtype) {
+ case VENDOR_REPORT_SUBTYPE_WHEELS:
+ /*
+ * The wheel direction byte is 1 for clockwise rotation
+ * and 2 for counter-clockwise.
+ * Change it to 1 and -1, respectively.
+ */
+ switch (data[3]) {
+ case 1:
+ top_wheel = (data[5] == 1) ? 1 : -1;
+ break;
+ case 2:
+ bottom_wheel = (data[5] == 1) ? 1 : -1;
+ break;
+ }
+ break;
+
+ case VENDOR_REPORT_SUBTYPE_BUTTONS:
+ /*
+ * If a button is already being held, ignore any new
+ * button event unless it's a release.
+ *
+ * The tablet only cleanly handles one button being held
+ * at a time, and trying to hold multiple buttons
+ * (particularly wheel+pad buttons) can result in sequences
+ * of reports that look like imaginary presses and releases.
+ *
+ * This is an imperfect way to filter out some of these
+ * reports.
+ */
+ if (last_button_state != 0x00 && data[4] != 0x00)
+ break;
+
+ last_button_state = data[4];
+ break;
+ }
+
+ pad_report = (struct pad_report *)data;
+
+ pad_report->report_id = CUSTOM_PAD_REPORT_ID;
+ pad_report->btn_stylus = 0;
+ pad_report->x = 0;
+ pad_report->y = 0;
+ pad_report->buttons = last_button_state;
+ pad_report->top_wheel = top_wheel;
+ pad_report->bottom_wheel = bottom_wheel;
+
+ return sizeof(struct pad_report);
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(huion_kamvas16_gen3) = {
+ .hid_device_event = (void *)hid_fix_event_huion_kamvas16_gen3,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc_huion_kamvas16_gen3,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case VENDOR_DESCRIPTOR_LENGTH:
+ case TABLET_DESCRIPTOR_LENGTH:
+ case WHEEL_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c b/drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c
new file mode 100644
index 000000000000..b17719d6d9c7
--- /dev/null
+++ b/drivers/hid/bpf/progs/Logitech__SpaceNavigator.bpf.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Curran Muhlberger
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_LOGITECH 0x046D
+#define PID_SPACENAVIGATOR 0xC626
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_ANY, VID_LOGITECH, PID_SPACENAVIGATOR)
+);
+
+/*
+ * The 3Dconnexion SpaceNavigator 3D Mouse is a multi-axis controller with 6
+ * axes (grouped as X,Y,Z and Rx,Ry,Rz). Axis data is absolute, but the report
+ * descriptor erroneously declares it to be relative. We fix the report
+ * descriptor to mark both axis collections as absolute.
+ *
+ * The kernel attempted to fix this in commit 24985cf68612 (HID: support
+ * Logitech/3DConnexion SpaceTraveler and SpaceNavigator), but the descriptor
+ * data offsets are incorrect for at least some SpaceNavigator units.
+ */
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ /* Offset of Input item in X,Y,Z and Rx,Ry,Rz collections for all known
+ * firmware variants.
+ * - 2009 model: X,Y,Z @ 32-33, Rx,Ry,Rz @ 49-50 (fixup originally
+ * applied in kernel)
+ * - 2016 model (size==228): X,Y,Z @ 36-37, Rx,Ry,Rz @ 53-54
+ *
+ * The descriptor size of the 2009 model is not known, and there is evidence
+ * for at least two other variants (with sizes 202 & 217) besides the 2016
+ * model, so we try all known offsets regardless of descriptor size.
+ */
+ const u8 offsets[] = {32, 36, 49, 53};
+
+ for (size_t idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
+ u8 offset = offsets[idx];
+
+ /* if Input (Data,Var,Rel) , make it Input (Data,Var,Abs) */
+ if (data[offset] == 0x81 && data[offset + 1] == 0x06)
+ data[offset + 1] = 0x02;
+ }
+
+ return 0;
+}
+
+HID_BPF_OPS(logitech_spacenavigator) = {
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ /* Ensure report descriptor size matches one of the known variants. */
+ if (ctx->rdesc_size != 202 &&
+ ctx->rdesc_size != 217 &&
+ ctx->rdesc_size != 228) {
+ ctx->retval = -EINVAL;
+ return 0;
+ }
+
+ /* Check whether the kernel has already applied the fix. */
+ if ((ctx->rdesc[32] == 0x81 && ctx->rdesc[33] == 0x02 &&
+ ctx->rdesc[49] == 0x81 && ctx->rdesc[50] == 0x02) ||
+ (ctx->rdesc[36] == 0x81 && ctx->rdesc[37] == 0x02 &&
+ ctx->rdesc[53] == 0x81 && ctx->rdesc[54] == 0x02))
+ ctx->retval = -EINVAL;
+ else
+ ctx->retval = 0;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c b/drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c
new file mode 100644
index 000000000000..156d75af516d
--- /dev/null
+++ b/drivers/hid/bpf/progs/WALTOP__Batteryless-Tablet.bpf.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Red Hat
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_WALTOP 0x172F
+#define PID_BATTERYLESS_TABLET 0x0505
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_ANY, VID_WALTOP, PID_BATTERYLESS_TABLET)
+);
+
+#define EXPECTED_RDESC_SIZE 335
+#define PEN_REPORT_ID 16
+
+#define TIP_SWITCH BIT(0)
+#define BARREL_SWITCH BIT(1)
+#define SECONDARY_BARREL_SWITCH BIT(5)
+
+static __u8 last_button_state;
+
+static const __u8 fixed_rdesc[] = {
+ 0x05, 0x01, // Usage Page (Generic Desktop)
+ 0x09, 0x02, // Usage (Mouse)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, 0x01, // Report ID (1)
+ 0x09, 0x01, // Usage (Pointer)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x05, 0x09, // Usage Page (Button)
+ 0x19, 0x01, // Usage Minimum (1)
+ 0x29, 0x05, // Usage Maximum (5)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x01, // Report Size (1)
+ 0x95, 0x05, // Report Count (5)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x75, 0x03, // Report Size (3)
+ 0x95, 0x01, // Report Count (1)
+ 0x81, 0x03, // Input (Cnst,Var,Abs)
+ 0x05, 0x01, // Usage Page (Generic Desktop)
+ 0x09, 0x30, // Usage (X)
+ 0x09, 0x31, // Usage (Y)
+ 0x09, 0x38, // Usage (Wheel)
+ 0x15, 0x81, // Logical Minimum (-127)
+ 0x25, 0x7f, // Logical Maximum (127)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x03, // Report Count (3)
+ 0x81, 0x06, // Input (Data,Var,Rel)
+ 0x05, 0x0c, // Usage Page (Consumer)
+ 0x15, 0x81, // Logical Minimum (-127)
+ 0x25, 0x7f, // Logical Maximum (127)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x01, // Report Count (1)
+ 0x0a, 0x38, 0x02, // Usage (AC Pan)
+ 0x81, 0x06, // Input (Data,Var,Rel)
+ 0xc0, // End Collection
+ 0xc0, // End Collection
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x02, // Usage (Pen)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, 0x02, // Report ID (2)
+ 0x09, 0x20, // Usage (Stylus)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x09, 0x00, // Usage (0x0000)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x09, // Report Count (9)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x09, 0x3f, // Usage (Azimuth)
+ 0x09, 0x40, // Usage (Altitude)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x02, // Report Count (2)
+ 0xb1, 0x02, // Feature (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, 0x05, // Report ID (5)
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x20, // Usage (Stylus)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x09, 0x00, // Usage (0x0000)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x07, // Report Count (7)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, 0x0a, // Report ID (10)
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x20, // Usage (Stylus)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x09, 0x00, // Usage (0x0000)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x00, // Logical Maximum (255)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x07, // Report Count (7)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0x85, 0x10, // Report ID (16)
+ 0x09, 0x20, // Usage (Stylus)
+ 0xa1, 0x00, // Collection (Physical)
+ 0x09, 0x42, // Usage (Tip Switch)
+ 0x09, 0x44, // Usage (Barrel Switch)
+ 0x09, 0x3c, // Usage (Invert)
+ 0x09, 0x45, // Usage (Eraser)
+ 0x09, 0x32, // Usage (In Range)
+ 0x09, 0x5a, // Usage (Secondary Barrel Switch) <-- added
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x01, // Report Size (1)
+ 0x95, 0x06, // Report Count (6) <--- changed from 5
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x95, 0x02, // Report Count (2) <--- changed from 3
+ 0x81, 0x03, // Input (Cnst,Var,Abs)
+ 0x05, 0x01, // Usage Page (Generic Desktop)
+ 0x09, 0x30, // Usage (X)
+ 0x75, 0x10, // Report Size (16)
+ 0x95, 0x01, // Report Count (1)
+ 0xa4, // Push
+ 0x55, 0x0d, // Unit Exponent (-3)
+ 0x65, 0x33, // Unit (EnglishLinear: in³)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0x00, 0x7d, // Logical Maximum (32000)
+ 0x35, 0x00, // Physical Minimum (0)
+ 0x46, 0x00, 0x7d, // Physical Maximum (32000)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x09, 0x31, // Usage (Y)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0x20, 0x4e, // Logical Maximum (20000)
+ 0x35, 0x00, // Physical Minimum (0)
+ 0x46, 0x20, 0x4e, // Physical Maximum (20000)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x30, // Usage (Tip Pressure)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x26, 0xff, 0x07, // Logical Maximum (2047)
+ 0x35, 0x00, // Physical Minimum (0)
+ 0x46, 0xff, 0x07, // Physical Maximum (2047)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x05, 0x0d, // Usage Page (Digitizers)
+ 0x09, 0x3d, // Usage (X Tilt)
+ 0x09, 0x3e, // Usage (Y Tilt)
+ 0x15, 0xc4, // Logical Minimum (-60) <- changed from -127
+ 0x25, 0x3c, // Logical Maximum (60) <- changed from 127
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x02, // Report Count (2)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0xc0, // End Collection
+ 0xc0, // End Collection
+ 0x05, 0x01, // Usage Page (Generic Desktop)
+ 0x09, 0x06, // Usage (Keyboard)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, 0x0d, // Report ID (13)
+ 0x05, 0x07, // Usage Page (Keyboard/Keypad)
+ 0x19, 0xe0, // Usage Minimum (224)
+ 0x29, 0xe7, // Usage Maximum (231)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x01, // Report Size (1)
+ 0x95, 0x08, // Report Count (8)
+ 0x81, 0x02, // Input (Data,Var,Abs)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x01, // Report Count (1)
+ 0x81, 0x01, // Input (Cnst,Arr,Abs)
+ 0x05, 0x07, // Usage Page (Keyboard/Keypad)
+ 0x19, 0x00, // Usage Minimum (0)
+ 0x29, 0x65, // Usage Maximum (101)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x65, // Logical Maximum (101)
+ 0x75, 0x08, // Report Size (8)
+ 0x95, 0x05, // Report Count (5)
+ 0x81, 0x00, // Input (Data,Arr,Abs)
+ 0xc0, // End Collection
+ 0x05, 0x0c, // Usage Page (Consumer)
+ 0x09, 0x01, // Usage (Consumer Control)
+ 0xa1, 0x01, // Collection (Application)
+ 0x85, 0x0c, // Report ID (12)
+ 0x09, 0xe9, // Usage (Volume Increment)
+ 0x09, 0xea, // Usage (Volume Decrement)
+ 0x09, 0xe2, // Usage (Mute)
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x01, // Logical Maximum (1)
+ 0x75, 0x01, // Report Size (1)
+ 0x95, 0x03, // Report Count (3)
+ 0x81, 0x06, // Input (Data,Var,Rel)
+ 0x75, 0x05, // Report Size (5)
+ 0x95, 0x01, // Report Count (1)
+ 0x81, 0x07, // Input (Cnst,Var,Rel)
+ 0xc0, // End Collection
+};
+
+static inline unsigned int bitwidth32(__u32 x)
+{
+ return 32 - __builtin_clzg(x, 32);
+}
+
+static inline unsigned int floor_log2_32(__u32 x)
+{
+ return bitwidth32(x) - 1;
+}
+
+/* Maps the interval [0, 2047] to itself using a scaled
+ * approximation of the function log2(x+1).
+ */
+static unsigned int scaled_log2(__u16 v)
+{
+ const unsigned int XMAX = 2047;
+ const unsigned int YMAX = 11; /* log2(2048) = 11 */
+
+ unsigned int x = v + 1;
+ unsigned int n = floor_log2_32(x);
+ unsigned int b = 1 << n;
+
+ /* Fixed-point fraction in [0, 1), linearly
+ * interpolated using delta-y = 1 and
+ * delta-x = (2b - b) = b.
+ */
+ unsigned int frac = (x - b) << YMAX;
+ unsigned int lerp = frac / b;
+ unsigned int log2 = (n << YMAX) + lerp;
+
+ return ((log2 * XMAX) / YMAX) >> YMAX;
+}
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(hid_fix_rdesc, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 4096 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ __builtin_memcpy(data, fixed_rdesc, sizeof(fixed_rdesc));
+
+ return sizeof(fixed_rdesc);
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(waltop_fix_events, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, 10 /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ __u8 report_id = data[0];
+
+ if (report_id != PEN_REPORT_ID)
+ return 0;
+
+ /* On this tablet if the secondary barrel switch is pressed,
+ * the tablet sends tip down and barrel down. Change this to
+ * just secondary barrel down when there is no ambiguity.
+ *
+ * It's possible that there is a bug in the firmware and the
+ * device intends to set invert + eraser instead (i.e. the
+ * pysical button is an eraser button) but since
+ * the pressure is always zero, said eraser button
+ * would be useless anyway.
+ *
+ * So let's just change the button to secondary barrel down.
+ */
+
+ __u8 tip_switch = data[1] & TIP_SWITCH;
+ __u8 barrel_switch = data[1] & BARREL_SWITCH;
+
+ __u8 tip_held = last_button_state & TIP_SWITCH;
+ __u8 barrel_held = last_button_state & BARREL_SWITCH;
+
+ if (tip_switch && barrel_switch && !tip_held && !barrel_held) {
+ data[1] &= ~(TIP_SWITCH | BARREL_SWITCH); /* release tip and barrel */
+ data[1] |= SECONDARY_BARREL_SWITCH; /* set secondary barrel switch */
+ }
+
+ last_button_state = data[1];
+
+ /* The pressure sensor on this tablet maps around half of the
+ * logical pressure range into the interval [0-100]. Further
+ * pressure causes the sensor value to increase exponentially
+ * up to a maximum value of 2047.
+ *
+ * The values 12 and 102 were chosen to have an integer slope
+ * with smooth transition between the two curves around the
+ * value 100.
+ */
+
+ __u16 pressure = (((__u16)data[6]) << 0) | (((__u16)data[7]) << 8);
+
+ if (pressure <= 102)
+ pressure *= 12;
+ else
+ pressure = scaled_log2(pressure);
+
+ data[6] = pressure >> 0;
+ data[7] = pressure >> 8;
+
+ return 0;
+}
+
+HID_BPF_OPS(waltop_batteryless) = {
+ .hid_device_event = (void *)waltop_fix_events,
+ .hid_rdesc_fixup = (void *)hid_fix_rdesc,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ if (ctx->rdesc_size == EXPECTED_RDESC_SIZE)
+ ctx->retval = 0;
+ else
+ ctx->retval = -EINVAL;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c b/drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c
new file mode 100644
index 000000000000..2502fcc9ede6
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__Deco01V3.bpf.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2025 Red Hat
+ */
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_UGEE 0x28BD /* VID is shared with SinoWealth and Glorious and prob others */
+#define PID_DECO_01_V3 0x0947
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_DECO_01_V3),
+);
+
+/*
+ * Default report descriptor reports:
+ * - a report descriptor for the pad buttons, reported as key sequences
+ * - a report descriptor for the pen
+ * - a vendor-specific report descriptor
+ *
+ * The Pad report descriptor, see
+ * https://gitlab.freedesktop.org/libevdev/udev-hid-bpf/-/issues/54
+ *
+ * # Report descriptor length: 102 bytes
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 0
+ * 0x09, 0x02, // Usage (Mouse) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x09, // Report ID (9) 6
+ * 0x09, 0x01, // Usage (Pointer) 8
+ * 0xa1, 0x00, // Collection (Physical) 10
+ * 0x05, 0x09, // Usage Page (Button) 12
+ * 0x19, 0x01, // UsageMinimum (1) 14
+ * 0x29, 0x03, // UsageMaximum (3) 16
+ * 0x15, 0x00, // Logical Minimum (0) 18
+ * 0x25, 0x01, // Logical Maximum (1) 20
+ * 0x95, 0x03, // Report Count (3) 22
+ * 0x75, 0x01, // Report Size (1) 24
+ * 0x81, 0x02, // Input (Data,Var,Abs) 26
+ * 0x95, 0x05, // Report Count (5) 28
+ * 0x81, 0x01, // Input (Cnst,Arr,Abs) 30
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 32
+ * 0x09, 0x30, // Usage (X) 34
+ * 0x09, 0x31, // Usage (Y) 36
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 38
+ * 0x95, 0x02, // Report Count (2) 41
+ * 0x75, 0x10, // Report Size (16) 43
+ * 0x81, 0x02, // Input (Data,Var,Abs) 45
+ * 0x05, 0x0d, // Usage Page (Digitizers) 47
+ * 0x09, 0x30, // Usage (Tip Pressure) 49
+ * 0x26, 0xff, 0x07, // Logical Maximum (2047) 51
+ * 0x95, 0x01, // Report Count (1) 54
+ * 0x75, 0x10, // Report Size (16) 56
+ * 0x81, 0x02, // Input (Data,Var,Abs) 58
+ * 0xc0, // End Collection 60
+ * 0xc0, // End Collection 61
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 62
+ * 0x09, 0x06, // Usage (Keyboard) 64
+ * 0xa1, 0x01, // Collection (Application) 66
+ * 0x85, 0x06, // Report ID (6) 68
+ * 0x05, 0x07, // Usage Page (Keyboard/Keypad) 70
+ * 0x19, 0xe0, // UsageMinimum (224) 72
+ * 0x29, 0xe7, // UsageMaximum (231) 74
+ * 0x15, 0x00, // Logical Minimum (0) 76
+ * 0x25, 0x01, // Logical Maximum (1) 78
+ * 0x75, 0x01, // Report Size (1) 80
+ * 0x95, 0x08, // Report Count (8) 82
+ * 0x81, 0x02, // Input (Data,Var,Abs) 84
+ * 0x05, 0x07, // Usage Page (Keyboard/Keypad) 86
+ * 0x19, 0x00, // UsageMinimum (0) 88
+ * 0x29, 0xff, // UsageMaximum (255) 90
+ * 0x26, 0xff, 0x00, // Logical Maximum (255) 92
+ * 0x75, 0x08, // Report Size (8) 95
+ * 0x95, 0x06, // Report Count (6) 97
+ * 0x81, 0x00, // Input (Data,Arr,Abs) 99
+ * 0xc0, // End Collection 101
+ *
+ * And key events for buttons top->bottom are:
+ * Buttons released: 06 00 00 00 00 00 00 00
+ * Button1: 06 00 05 00 00 00 00 00 -> b
+ * Button2: 06 00 08 00 00 00 00 00 -> e
+ * Button3: 06 04 00 00 00 00 00 00 -> LAlt
+ * Button4: 06 00 2c 00 00 00 00 00 -> Space
+ * Button5: 06 01 16 00 00 00 00 00 -> LControl + s
+ * Button6: 06 01 1d 00 00 00 00 00 -> LControl + z
+ * Button7: 06 01 57 00 00 00 00 00 -> LControl + Keypad Plus
+ * Button8: 06 01 56 00 00 00 00 00 -> LControl + Keypad Dash
+ *
+ * When multiple buttons are pressed at the same time, the values used to
+ * identify the buttons are identical, but they appear in different bytes of the
+ * record. For example, when button 2 (0x08) and button 1 (0x05) are pressed,
+ * this is the report:
+ *
+ * Buttons 2 and 1: 06 00 08 05 00 00 00 00 -> e + b
+ *
+ * Buttons 1, 2, 4, 5 and 6 can be matched by finding their values in the
+ * report.
+ *
+ * Button 3 is pressed when the 3rd bit is 1. For example, pressing buttons 3
+ * and 5 generates this report:
+ *
+ * Buttons 3 and 5: 06 05 16 00 00 00 00 00 -> LControl + LAlt + s
+ * -- --
+ * | |
+ * | `- Button 5 (0x16)
+ * `- 0x05 = 0101. Button 3 is pressed
+ * ^
+ *
+ * pad_buttons contains a list of buttons that can be matched in
+ * HID_BPF_DEVICE_EVENT. Button 3 as it has a dedicated bit.
+ *
+ *
+ * The Pen report descriptor announces a wrong tilt range:
+ *
+ * Report descriptor length: 109 bytes
+ * 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * 0x09, 0x02, // Usage (Pen) 2
+ * 0xa1, 0x01, // Collection (Application) 4
+ * 0x85, 0x07, // Report ID (7) 6
+ * 0x09, 0x20, // Usage (Stylus) 8
+ * 0xa1, 0x01, // Collection (Application) 10
+ * 0x09, 0x42, // Usage (Tip Switch) 12
+ * 0x09, 0x44, // Usage (Barrel Switch) 14
+ * 0x09, 0x45, // Usage (Eraser) 16
+ * 0x09, 0x3c, // Usage (Invert) 18
+ * 0x15, 0x00, // Logical Minimum (0) 20
+ * 0x25, 0x01, // Logical Maximum (1) 22
+ * 0x75, 0x01, // Report Size (1) 24
+ * 0x95, 0x04, // Report Count (4) 26
+ * 0x81, 0x02, // Input (Data,Var,Abs) 28
+ * 0x95, 0x01, // Report Count (1) 30
+ * 0x81, 0x03, // Input (Cnst,Var,Abs) 32
+ * 0x09, 0x32, // Usage (In Range) 34
+ * 0x95, 0x01, // Report Count (1) 36
+ * 0x81, 0x02, // Input (Data,Var,Abs) 38
+ * 0x95, 0x02, // Report Count (2) 40
+ * 0x81, 0x03, // Input (Cnst,Var,Abs) 42
+ * 0x75, 0x10, // Report Size (16) 44
+ * 0x95, 0x01, // Report Count (1) 46
+ * 0x35, 0x00, // Physical Minimum (0) 48
+ * 0xa4, // Push 50
+ * 0x05, 0x01, // Usage Page (Generic Desktop) 51
+ * 0x09, 0x30, // Usage (X) 53
+ * 0x65, 0x13, // Unit (EnglishLinear: in) 55
+ * 0x55, 0x0d, // Unit Exponent (-3) 57
+ * 0x46, 0x10, 0x27, // Physical Maximum (10000) 59
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 62
+ * 0x81, 0x02, // Input (Data,Var,Abs) 65
+ * 0x09, 0x31, // Usage (Y) 67
+ * 0x46, 0x6a, 0x18, // Physical Maximum (6250) 69
+ * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 72
+ * 0x81, 0x02, // Input (Data,Var,Abs) 75
+ * 0xb4, // Pop 77
+ * 0x09, 0x30, // Usage (X) 78
+ * 0x45, 0x00, // Physical Maximum (0) 80
+ * 0x26, 0xff, 0x3f, // Logical Maximum (16383) 82
+ * 0x81, 0x42, // Input (Data,Var,Abs,Null) 85
+ * 0x09, 0x3d, // Usage (Start) 87
+ * 0x15, 0x81, // Logical Minimum (-127) 89 <- Change from -127 to -60
+ * 0x25, 0x7f, // Logical Maximum (127) 91 <- Change from 127 to 60
+ * 0x75, 0x08, // Report Size (8) 93
+ * 0x95, 0x01, // Report Count (1) 95
+ * 0x81, 0x02, // Input (Data,Var,Abs) 97
+ * 0x09, 0x3e, // Usage (Select) 99
+ * 0x15, 0x81, // Logical Minimum (-127) 101 <- Change from -127 to -60
+ * 0x25, 0x7f, // Logical Maximum (127) 103 <- Change from 127 to 60
+ * 0x81, 0x02, // Input (Data,Var,Abs) 105
+ * 0xc0, // End Collection 107
+ * 0xc0, // End Collection 108
+ */
+
+#define PEN_REPORT_DESCRIPTOR_LENGTH 109
+#define PAD_REPORT_DESCRIPTOR_LENGTH 102
+#define PAD_REPORT_LENGTH 8
+#define PAD_REPORT_ID 6
+#define PAD_NUM_BUTTONS 8
+
+static const __u8 fixed_rdesc_pad[] = {
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ // Byte 0 in report is the report ID
+ ReportId(PAD_REPORT_ID)
+ ReportCount(1)
+ ReportSize(8)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ // Byte 1 is the button state
+ UsagePage_Button
+ UsageMinimum_i8(0x01)
+ UsageMaximum_i8(PAD_NUM_BUTTONS)
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ ReportCount(PAD_NUM_BUTTONS)
+ ReportSize(1)
+ Input(Var|Abs)
+ // Byte 2 in report - just exists so we get to be a tablet pad
+ UsagePage_Digitizers
+ Usage_Dig_BarrelSwitch // BTN_STYLUS
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) // padding
+ Input(Const)
+ // Bytes 3/4 in report - just exists so we get to be a tablet pad
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ // Byte 5-7 are padding so we match the original report lengtth
+ ReportCount(3)
+ ReportSize(8)
+ Input(Const)
+ )
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(xppen_deco01v3_rdesc_fixup, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, HID_MAX_DESCRIPTOR_SIZE /* size */);
+
+ const __u8 wrong_logical_range[] = {0x15, 0x81, 0x25, 0x7f};
+ const __u8 correct_logical_range[] = {0x15, 0xc4, 0x25, 0x3c};
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ switch (hctx->size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+ case PEN_REPORT_DESCRIPTOR_LENGTH:
+ if (__builtin_memcmp(&data[89], wrong_logical_range,
+ sizeof(wrong_logical_range)) == 0)
+ __builtin_memcpy(&data[89], correct_logical_range,
+ sizeof(correct_logical_range));
+ if (__builtin_memcmp(&data[101], wrong_logical_range,
+ sizeof(wrong_logical_range)) == 0)
+ __builtin_memcpy(&data[101], correct_logical_range,
+ sizeof(correct_logical_range));
+ break;
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(xppen_deco01v3_device_event, struct hid_bpf_ctx *hctx)
+{
+ static const __u8 pad_buttons[] = { 0x05, 0x08, 0x00, 0x2c, 0x16, 0x1d, 0x57, 0x56 };
+ __u8 *data = hid_bpf_get_data(hctx, 0 /* offset */, PAD_REPORT_LENGTH /* size */);
+
+ if (!data)
+ return 0; /* EPERM check */
+
+ if (data[0] == PAD_REPORT_ID) {
+ __u8 button_mask = 0;
+ size_t d, b;
+
+ /* data[1] stores the status of BTN_2 in the 3rd bit*/
+ if (data[1] & BIT(2))
+ button_mask |= BIT(2);
+
+ /* The rest of the descriptor stores the buttons as in pad_buttons */
+ for (d = 2; d < 8; d++) {
+ for (b = 0; b < sizeof(pad_buttons); b++) {
+ if (data[d] != 0 && data[d] == pad_buttons[b])
+ button_mask |= BIT(b);
+ }
+ }
+
+ __u8 report[8] = {PAD_REPORT_ID, button_mask, 0x00};
+
+ __builtin_memcpy(data, report, sizeof(report));
+ }
+ return 0;
+}
+
+HID_BPF_OPS(xppen_deco01v3) = {
+ .hid_rdesc_fixup = (void *)xppen_deco01v3_rdesc_fixup,
+ .hid_device_event = (void *)xppen_deco01v3_device_event,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ switch (ctx->rdesc_size) {
+ case PAD_REPORT_DESCRIPTOR_LENGTH:
+ case PEN_REPORT_DESCRIPTOR_LENGTH:
+ ctx->retval = 0;
+ break;
+ default:
+ ctx->retval = -EINVAL;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/XPPen__Deco02.bpf.c b/drivers/hid/bpf/progs/XPPen__Deco02.bpf.c
new file mode 100644
index 000000000000..4b2549031e56
--- /dev/null
+++ b/drivers/hid/bpf/progs/XPPen__Deco02.bpf.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include "vmlinux.h"
+#include "hid_bpf.h"
+#include "hid_bpf_helpers.h"
+#include "hid_report_helpers.h"
+#include <bpf/bpf_tracing.h>
+
+#define VID_UGEE 0x28BD
+#define PID_DECO_02 0x0803
+
+HID_BPF_CONFIG(
+ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_UGEE, PID_DECO_02),
+);
+
+/*
+ * Devices are:
+ * - Pad input, including pen (This is the only one we are interested in)
+ * - Pen input as mouse
+ * - Vendor
+ *
+ * Descriptors on main device are:
+ * - 7: Pen
+ * - 6: Vendor settings? Unclear
+ * - 3: Keyboard (This is what we want to modify)
+ * - 5: Feature report
+ *
+ * This creates three event nodes:
+ * - XP-PEN DECO 02 Stylus
+ * - XP-PEN DECO 02
+ * - XP-PEN DECO 02 Keyboard (Again, what we want to modify)
+ *
+ * # Report descriptor length: 188 bytes
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 0
+ * # 0x09, 0x02, // Usage (Pen) 2
+ * # 0xa1, 0x01, // Collection (Application) 4
+ * # 0x85, 0x07, // Report ID (7) 6
+ * # 0x09, 0x20, // Usage (Stylus) 8
+ * # 0xa1, 0x00, // Collection (Physical) 10
+ * # 0x09, 0x42, // Usage (Tip Switch) 12
+ * # 0x09, 0x44, // Usage (Barrel Switch) 14
+ * # 0x09, 0x45, // Usage (Eraser) 16
+ * # 0x09, 0x3c, // Usage (Invert) 18
+ * # 0x09, 0x32, // Usage (In Range) 20
+ * # 0x15, 0x00, // Logical Minimum (0) 22
+ * # 0x25, 0x01, // Logical Maximum (1) 24
+ * # 0x75, 0x01, // Report Size (1) 26
+ * # 0x95, 0x05, // Report Count (5) 28
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 30
+ * # 0x95, 0x03, // Report Count (3) 32
+ * # 0x81, 0x03, // Input (Cnst,Var,Abs) 34
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 36
+ * # 0x09, 0x30, // Usage (X) 38
+ * # 0x15, 0x00, // Logical Minimum (0) 40
+ * # 0x26, 0x50, 0x57, // Logical Maximum (22352) 42
+ * # 0x55, 0x0d, // Unit Exponent (-3) 45
+ * # 0x65, 0x13, // Unit (EnglishLinear: in) 47
+ * # 0x35, 0x00, // Physical Minimum (0) 49
+ * # 0x46, 0x50, 0x57, // Physical Maximum (22352) 51
+ * # 0x75, 0x10, // Report Size (16) 54
+ * # 0x95, 0x01, // Report Count (1) 56
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 58
+ * # 0x09, 0x31, // Usage (Y) 60
+ * # 0x15, 0x00, // Logical Minimum (0) 62
+ * # 0x26, 0x92, 0x36, // Logical Maximum (13970) 64
+ * # 0x55, 0x0d, // Unit Exponent (-3) 67
+ * # 0x65, 0x13, // Unit (EnglishLinear: in) 69
+ * # 0x35, 0x00, // Physical Minimum (0) 71
+ * # 0x46, 0x92, 0x36, // Physical Maximum (13970) 73
+ * # 0x75, 0x10, // Report Size (16) 76
+ * # 0x95, 0x01, // Report Count (1) 78
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 80
+ * # 0x05, 0x0d, // Usage Page (Digitizers) 82
+ * # 0x09, 0x30, // Usage (Tip Pressure) 84
+ * # 0x15, 0x00, // Logical Minimum (0) 86
+ * # 0x26, 0xff, 0x1f, // Logical Maximum (8191) 88
+ * # 0x75, 0x10, // Report Size (16) 91
+ * # 0x95, 0x01, // Report Count (1) 93
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 95
+ * # 0xc0, // End Collection 97
+ * # 0xc0, // End Collection 98
+ * # 0x09, 0x0e, // Usage (Device Configuration) 99
+ * # 0xa1, 0x01, // Collection (Application) 101
+ * # 0x85, 0x05, // Report ID (5) 103
+ * # 0x09, 0x23, // Usage (Device Settings) 105
+ * # 0xa1, 0x02, // Collection (Logical) 107
+ * # 0x09, 0x52, // Usage (Inputmode) 109
+ * # 0x09, 0x53, // Usage (Device Index) 111
+ * # 0x25, 0x0a, // Logical Maximum (10) 113
+ * # 0x75, 0x08, // Report Size (8) 115
+ * # 0x95, 0x02, // Report Count (2) 117
+ * # 0xb1, 0x02, // Feature (Data,Var,Abs) 119
+ * # 0xc0, // End Collection 121
+ * # 0xc0, // End Collection 122
+ * # 0x05, 0x0c, // Usage Page (Consumer Devices) 123
+ * # 0x09, 0x36, // Usage (Function Buttons) 125
+ * # 0xa1, 0x00, // Collection (Physical) 127
+ * # 0x85, 0x06, // Report ID (6) 129
+ * # 0x05, 0x09, // Usage Page (Button) 131
+ * # 0x19, 0x01, // Usage Minimum (1) 133
+ * # 0x29, 0x20, // Usage Maximum (32) 135
+ * # 0x15, 0x00, // Logical Minimum (0) 137
+ * # 0x25, 0x01, // Logical Maximum (1) 139
+ * # 0x95, 0x20, // Report Count (32) 141
+ * # 0x75, 0x01, // Report Size (1) 143
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 145
+ * # 0xc0, // End Collection 147
+ * # 0x05, 0x01, // Usage Page (Generic Desktop) 148
+ * # 0x09, 0x06, // Usage (Keyboard) 150
+ * # 0xa1, 0x01, // Collection (Application) 152
+ * # 0x85, 0x03, // Report ID (3) 154
+ * # 0x05, 0x07, // Usage Page (Keyboard) 156
+ * # 0x19, 0xe0, // Usage Minimum (224) 158
+ * # 0x29, 0xe7, // Usage Maximum (231) 160
+ * # 0x15, 0x00, // Logical Minimum (0) 162
+ * # 0x25, 0x01, // Logical Maximum (1) 164
+ * # 0x75, 0x01, // Report Size (1) 166
+ * # 0x95, 0x08, // Report Count (8) 168
+ * # 0x81, 0x02, // Input (Data,Var,Abs) 170
+ * # 0x05, 0x07, // Usage Page (Keyboard) 172
+ * # 0x19, 0x00, // Usage Minimum (0) 174
+ * # 0x29, 0xff, // Usage Maximum (255) 176
+ * # 0x26, 0xff, 0x00, // Logical Maximum (255) 178
+ * # 0x75, 0x08, // Report Size (8) 181
+ * # 0x95, 0x06, // Report Count (6) 183
+ * # 0x81, 0x00, // Input (Data,Arr,Abs) 185
+ * # 0xc0, // End Collection 187
+ *
+ * Key events; top to bottom:
+ * Buttons released: 03 00 00 00 00 00 00 00
+ * Button1: 03 00 05 00 00 00 00 00 -> 'b and B'
+ * Button2: 03 00 2c 00 00 00 00 00 -> 'Spacebar'
+ * Button3: 03 00 08 00 00 00 00 00 -> 'e and E'
+ * Button4: 03 00 0c 00 00 00 00 00 -> 'i and I'
+ * Button5: 03 05 1d 00 00 00 00 00 -> LeftControl + LeftAlt + 'z and Z'
+ * Button6: 03 01 16 00 00 00 00 00 -> LeftControl + 's and S'
+ *
+ * Dial Events:
+ * Clockwise: 03 01 2e 00 00 00 00 00 -> LeftControl + '= and +'
+ * Anticlockwise: 03 01 2d 00 00 00 00 00 -> LeftControl + '- and (underscore)'
+ *
+ * NOTE: Input event descriptions begin at byte 2, and progressively build
+ * towards byte 7 as each new key is pressed maintaining the press order.
+ * For example:
+ * BTN1 followed by BTN2 is 03 00 05 2c 00 00 00 00
+ * BTN2 followed by BTN1 is 03 00 2c 05 00 00 00 00
+ *
+ * Releasing a button causes its byte to be freed, and the next item in the list
+ * is pushed forwards. Dial events are released immediately after an event is
+ * registered (i.e. after each "click"), so will continually appear pushed
+ * backwards in the report.
+ *
+ * When a button with a modifier key is pressed, the button identifier stacks in
+ * an abnormal way, where the highest modifier byte always supersedes others.
+ * In these cases, the button with the higher modifier is always last.
+ * For example:
+ * BTN6 followed by BTN5 is 03 05 1d 16 00 00 00 00
+ * BTN5 followed by BTN6 is 03 05 1d 16 00 00 00 00
+ * BTN5 followed by BTN1 is 03 05 05 1d 00 00 00 00
+ *
+ * For three button presses in order, demonstrating strictly above rules:
+ * BTN6, BTN1, BTN5 is 03 05 05 1d 16 00 00 00
+ * BTN5, BTN1, BTN6 is 03 05 05 1d 16 00 00 00
+ *
+ * In short, when BTN5/6 are pressed, the order of operations is lost, as they
+ * will always float to the end when pressed in combination with others.
+ *
+ * Fortunately, all states are recorded in the same way, with no overlaps.
+ * Byte 1 can be used as a spare for the wheel, since this is for mod keys.
+ */
+
+#define RDESC_SIZE_PAD 188
+#define REPORT_SIZE_PAD 8
+#define REPORT_ID_BUTTONS 3
+#define PAD_BUTTON_COUNT 6
+#define RDESC_KEYBOARD_OFFSET 148
+
+static const __u8 fixed_rdesc_pad[] = {
+ /* Copy of pen descriptor to avoid losing functionality */
+ UsagePage_Digitizers
+ Usage_Dig_Pen
+ CollectionApplication(
+ ReportId(7)
+ Usage_Dig_Stylus
+ CollectionPhysical(
+ Usage_Dig_TipSwitch
+ Usage_Dig_BarrelSwitch
+ Usage_Dig_Eraser
+ Usage_Dig_Invert
+ Usage_Dig_InRange
+ LogicalMinimum_i8(0)
+ LogicalMaximum_i8(1)
+ ReportSize(1)
+ ReportCount(5)
+ Input(Var|Abs)
+ ReportCount(3)
+ Input(Const) /* Input (Const, Var, Abs) */
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(22352)
+ UnitExponent(-3)
+ Unit(in) /* (EnglishLinear: in) */
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(22352)
+ ReportSize(16)
+ ReportCount(1)
+ Input(Var|Abs)
+ Usage_GD_Y
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(13970)
+ UnitExponent(-3)
+ Unit(in) /* (EnglishLinear: in) */
+ PhysicalMinimum_i16(0)
+ PhysicalMaximum_i16(13970)
+ ReportSize(16)
+ ReportCount(1)
+ Input(Var|Abs)
+ UsagePage_Digitizers
+ Usage_Dig_TipPressure
+ LogicalMinimum_i16(0)
+ LogicalMaximum_i16(8191)
+ ReportSize(16)
+ ReportCount(1)
+ Input(Var|Abs)
+ )
+ )
+
+ /* FIXES BEGIN */
+ UsagePage_GenericDesktop
+ Usage_GD_Keypad
+ CollectionApplication(
+ ReportId(REPORT_ID_BUTTONS) /* Retain original ID on byte 0 */
+ ReportCount(1)
+ ReportSize(REPORT_SIZE_PAD)
+ UsagePage_Digitizers
+ Usage_Dig_TabletFunctionKeys
+ CollectionPhysical(
+ /* Byte 1: Dial state */
+ UsagePage_GenericDesktop
+ Usage_GD_Dial
+ LogicalMinimum_i8(-1)
+ LogicalMaximum_i8(1)
+ ReportCount(1)
+ ReportSize(REPORT_SIZE_PAD)
+ Input(Var|Rel)
+ /* Byte 2: Button state */
+ UsagePage_Button
+ ReportSize(1)
+ ReportCount(PAD_BUTTON_COUNT)
+ UsageMinimum_i8(0x01)
+ UsageMaximum_i8(PAD_BUTTON_COUNT) /* Number of buttons */
+ LogicalMinimum_i8(0x0)
+ LogicalMaximum_i8(0x1)
+ Input(Var|Abs)
+ /* Byte 3: Exists to be tablet pad */
+ UsagePage_Digitizers
+ Usage_Dig_BarrelSwitch
+ ReportCount(1)
+ ReportSize(1)
+ Input(Var|Abs)
+ ReportCount(7) /* Padding, to fill full report space */
+ Input(Const)
+ /* Byte 4/5: Exists to be a tablet pad */
+ UsagePage_GenericDesktop
+ Usage_GD_X
+ Usage_GD_Y
+ ReportCount(2)
+ ReportSize(8)
+ Input(Var|Abs)
+ /* Bytes 6/7: Padding, to match original length */
+ ReportCount(2)
+ ReportSize(8)
+ Input(Const)
+ )
+ FixedSizeVendorReport(RDESC_SIZE_PAD)
+ )
+};
+
+SEC(HID_BPF_RDESC_FIXUP)
+int BPF_PROG(xppen_deco02_rdesc_fixup, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE);
+
+ if (!data)
+ return 0; /* EPERM Check */
+
+ if (hctx->size == RDESC_SIZE_PAD) {
+ __builtin_memcpy(data, fixed_rdesc_pad, sizeof(fixed_rdesc_pad));
+ return sizeof(fixed_rdesc_pad);
+ }
+
+ return 0;
+}
+
+SEC(HID_BPF_DEVICE_EVENT)
+int BPF_PROG(xppen_deco02_device_event, struct hid_bpf_ctx *hctx)
+{
+ __u8 *data = hid_bpf_get_data(hctx, 0, REPORT_SIZE_PAD);
+
+ if (!data || data[0] != REPORT_ID_BUTTONS)
+ return 0; /* EPERM or wrong report */
+
+ __u8 dial_code = 0;
+ __u8 button_mask = 0;
+ size_t d;
+
+ /* Start from 2; 0 is report ID, 1 is modifier keys, replaced by dial */
+ for (d = 2; d < 8; d++) {
+ switch (data[d]) {
+ case 0x2e:
+ dial_code = 1;
+ break;
+ case 0x2d:
+ dial_code = -1;
+ break;
+ /* below are buttons, top to bottom */
+ case 0x05:
+ button_mask |= BIT(0);
+ break;
+ case 0x2c:
+ button_mask |= BIT(1);
+ break;
+ case 0x08:
+ button_mask |= BIT(2);
+ break;
+ case 0x0c:
+ button_mask |= BIT(3);
+ break;
+ case 0x1d:
+ button_mask |= BIT(4);
+ break;
+ case 0x16:
+ button_mask |= BIT(05);
+ break;
+ default:
+ break;
+ }
+ }
+
+ __u8 report[8] = { REPORT_ID_BUTTONS, dial_code, button_mask, 0x00 };
+
+ __builtin_memcpy(data, report, sizeof(report));
+ return 0;
+}
+
+HID_BPF_OPS(xppen_deco02) = {
+ .hid_rdesc_fixup = (void *)xppen_deco02_rdesc_fixup,
+ .hid_device_event = (void *)xppen_deco02_device_event,
+};
+
+SEC("syscall")
+int probe(struct hid_bpf_probe_args *ctx)
+{
+ ctx->retval = ctx->rdesc_size != RDESC_SIZE_PAD ? -EINVAL : 0;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/progs/hid_report_helpers.h b/drivers/hid/bpf/progs/hid_report_helpers.h
index 9b2a48e4a311..9944ff54d31d 100644
--- a/drivers/hid/bpf/progs/hid_report_helpers.h
+++ b/drivers/hid/bpf/progs/hid_report_helpers.h
@@ -143,8 +143,11 @@
* report with Report ID 0xac of the given size in bytes.
* The size is inclusive of the 1 byte Report ID prefix.
*
- * HID-BPF requires that at least one report has
- * the same size as the original report from the device.
+ * The kernel discards any HID reports that are larger
+ * than the largest report in a HID report descriptor.
+ * Thus at least one report must have (at least)
+ * the same size as the largest original report from
+ * the device.
* The easy way to ensure that is to add this
* macro as the last element of your CollectionApplication
* other reports can be of any size less than this.
@@ -295,6 +298,7 @@
#define Usage_GD_SystemSpeakerMute Usage_i8(0xa7)
#define Usage_GD_SystemHibernate Usage_i8(0xa8)
#define Usage_GD_SystemMicrophoneMute Usage_i8(0xa9)
+#define Usage_GD_SystemAccessibilityBinding Usage_i8(0xaa)
#define Usage_GD_SystemDisplayInvert Usage_i8(0xb0)
#define Usage_GD_SystemDisplayInternal Usage_i8(0xb1)
#define Usage_GD_SystemDisplayExternal Usage_i8(0xb2)
@@ -2669,7 +2673,7 @@
#define Usage_BS_iDeviceName Usage_i8(0x88)
#define Usage_BS_iDeviceChemistry Usage_i8(0x89)
#define Usage_BS_ManufacturerData Usage_i8(0x8a)
-#define Usage_BS_Rechargable Usage_i8(0x8b)
+#define Usage_BS_Rechargeable Usage_i8(0x8b)
#define Usage_BS_WarningCapacityLimit Usage_i8(0x8c)
#define Usage_BS_CapacityGranularity1 Usage_i8(0x8d)
#define Usage_BS_CapacityGranularity2 Usage_i8(0x8e)
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 61404d7a43ee..57da4f86a9fa 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -355,6 +355,7 @@ static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
static const struct apple_non_apple_keyboard non_apple_keyboards[] = {
{ "SONiX USB DEVICE" },
+ { "SONiX AK870 PRO" },
{ "Keychron" },
{ "AONE" },
{ "GANSS" },
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index d27dcfb2b9e4..472bca54642b 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -27,6 +27,7 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/platform_data/x86/asus-wmi.h>
+#include <linux/platform_data/x86/asus-wmi-leds-ids.h>
#include <linux/input/mt.h>
#include <linux/usb.h> /* For to_usb_interface for T100 touchpad intf check */
#include <linux/power_supply.h>
@@ -974,7 +975,10 @@ static int asus_input_mapping(struct hid_device *hdev,
case 0xc4: asus_map_key_clear(KEY_KBDILLUMUP); break;
case 0xc5: asus_map_key_clear(KEY_KBDILLUMDOWN); break;
case 0xc7: asus_map_key_clear(KEY_KBDILLUMTOGGLE); break;
+ case 0x4e: asus_map_key_clear(KEY_FN_ESC); break;
+ case 0x7e: asus_map_key_clear(KEY_EMOJI_PICKER); break;
+ case 0x8b: asus_map_key_clear(KEY_PROG1); break; /* ProArt Creator Hub key */
case 0x6b: asus_map_key_clear(KEY_F21); break; /* ASUS touchpad toggle */
case 0x38: asus_map_key_clear(KEY_PROG1); break; /* ROG key */
case 0xba: asus_map_key_clear(KEY_PROG2); break; /* Fn+C ASUS Splendid */
@@ -1385,9 +1389,6 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
- USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3),
- QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
- { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
@@ -1417,6 +1418,9 @@ static const struct hid_device_id asus_devices[] = {
* part, while letting hid-multitouch.c handle the touchpad.
*/
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_Z13_FOLIO),
+ QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD) },
{ }
};
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 5419a6c10907..a5b3a8ca2fcb 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -944,6 +944,15 @@ static int hid_scan_report(struct hid_device *hid)
hid->group = HID_GROUP_GENERIC;
/*
+ * In case we are re-scanning after a BPF has been loaded,
+ * we need to use the bpf report descriptor, not the original one.
+ */
+ if (hid->bpf_rdesc && hid->bpf_rsize) {
+ start = hid->bpf_rdesc;
+ end = start + hid->bpf_rsize;
+ }
+
+ /*
* The parsing is simpler than the one in hid_open_report() as we should
* be robust against hid errors. Those errors will be raised by
* hid_open_report() anyway.
@@ -2708,12 +2717,32 @@ static bool hid_check_device_match(struct hid_device *hdev,
return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
}
+static void hid_set_group(struct hid_device *hdev)
+{
+ int ret;
+
+ if (hid_ignore_special_drivers) {
+ hdev->group = HID_GROUP_GENERIC;
+ } else if (!hdev->group &&
+ !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
+ ret = hid_scan_report(hdev);
+ if (ret)
+ hid_warn(hdev, "bad device descriptor (%d)\n", ret);
+ }
+}
+
static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
{
const struct hid_device_id *id;
int ret;
if (!hdev->bpf_rsize) {
+ /* we keep a reference to the currently scanned report descriptor */
+ const __u8 *original_rdesc = hdev->bpf_rdesc;
+
+ if (!original_rdesc)
+ original_rdesc = hdev->dev_rdesc;
+
/* in case a bpf program gets detached, we need to free the old one */
hid_free_bpf_rdesc(hdev);
@@ -2723,6 +2752,12 @@ static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
/* call_hid_bpf_rdesc_fixup will always return a valid pointer */
hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
&hdev->bpf_rsize);
+
+ /* the report descriptor changed, we need to re-scan it */
+ if (original_rdesc != hdev->bpf_rdesc) {
+ hdev->group = 0;
+ hid_set_group(hdev);
+ }
}
if (!hid_check_device_match(hdev, hdrv, &id))
@@ -2903,14 +2938,7 @@ int hid_add_device(struct hid_device *hdev)
/*
* Scan generic devices for group information
*/
- if (hid_ignore_special_drivers) {
- hdev->group = HID_GROUP_GENERIC;
- } else if (!hdev->group &&
- !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
- ret = hid_scan_report(hdev);
- if (ret)
- hid_warn(hdev, "bad device descriptor (%d)\n", ret);
- }
+ hid_set_group(hdev);
hdev->id = atomic_inc_return(&id);
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index fee134a7eba3..5e9a5b8f7f16 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -553,9 +553,8 @@ static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
if (IS_ERR(new_supply)) {
hid_err(drvdata->hid_dev,
- "failed to register battery '%s' (reason: %ld)\n",
- drvdata->battery_desc.name,
- PTR_ERR(new_supply));
+ "failed to register battery '%s' (reason: %pe)\n",
+ drvdata->battery_desc.name, new_supply);
return;
}
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 482f62a78c41..803b883ae875 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -229,10 +229,12 @@ static int cp2112_gpio_set_unlocked(struct cp2112_device *dev,
ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
HID_REQ_SET_REPORT);
- if (ret < 0)
+ if (ret != CP2112_GPIO_SET_LENGTH) {
hid_err(hdev, "error setting GPIO values: %d\n", ret);
+ return ret < 0 ? ret : -EIO;
+ }
- return ret;
+ return 0;
}
static int cp2112_gpio_set(struct gpio_chip *chip, unsigned int offset,
@@ -309,9 +311,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
*/
- cp2112_gpio_set_unlocked(dev, offset, value);
-
- return 0;
+ return cp2112_gpio_set_unlocked(dev, offset, value);
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
@@ -689,7 +689,14 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
count = cp2112_write_read_req(buf, addr, read_length,
command, NULL, 0);
} else {
- count = cp2112_write_req(buf, addr, command,
+ /* Copy starts from data->block[1] so the length can
+ * be at max I2C_SMBUS_CLOCK_MAX + 1
+ */
+
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX + 1)
+ count = -EINVAL;
+ else
+ count = cp2112_write_req(buf, addr, command,
data->block + 1,
data->block[0]);
}
@@ -700,7 +707,14 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
I2C_SMBUS_BLOCK_MAX,
command, NULL, 0);
} else {
- count = cp2112_write_req(buf, addr, command,
+ /* data_length here is data->block[0] + 1
+ * so make sure that the data->block[0] is
+ * less than or equals I2C_SMBUS_BLOCK_MAX + 1
+ */
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX + 1)
+ count = -EINVAL;
+ else
+ count = cp2112_write_req(buf, addr, command,
data->block,
data->block[0] + 1);
}
@@ -709,7 +723,14 @@ static int cp2112_xfer(struct i2c_adapter *adap, u16 addr,
size = I2C_SMBUS_BLOCK_DATA;
read_write = I2C_SMBUS_READ;
- count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX,
+ /* data_length is data->block[0] + 1, so
+ * so data->block[0] should be less than or
+ * equal to the I2C_SMBUS_BLOCK_MAX + 1
+ */
+ if (data->block[0] > I2C_SMBUS_BLOCK_MAX + 1)
+ count = -EINVAL;
+ else
+ count = cp2112_write_read_req(buf, addr, I2C_SMBUS_BLOCK_MAX,
command, data->block,
data->block[0] + 1);
break;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 7107071c7c51..337d2dc81b4c 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -2523,7 +2523,7 @@ static const struct hid_usage_entry hid_usage_table[] = {
{ 0x85, 0x0088, "iDeviceName" },
{ 0x85, 0x0089, "iDeviceChemistry" },
{ 0x85, 0x008a, "ManufacturerData" },
- { 0x85, 0x008b, "Rechargable" },
+ { 0x85, 0x008b, "Rechargeable" },
{ 0x85, 0x008c, "WarningCapacityLimit" },
{ 0x85, 0x008d, "CapacityGranularity1" },
{ 0x85, 0x008e, "CapacityGranularity2" },
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index 69771fd35006..981d1b6e9658 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -75,7 +75,8 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 20, 28, 22, 14, 8);
break;
- case USB_DEVICE_ID_ELECOM_M_XT3URBK:
+ case USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB:
+ case USB_DEVICE_ID_ELECOM_M_XT3URBK_018F:
case USB_DEVICE_ID_ELECOM_M_XT3DRBK:
case USB_DEVICE_ID_ELECOM_M_XT4DRBK:
/*
@@ -119,7 +120,8 @@ static const __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
static const struct hid_device_id elecom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_018F) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
diff --git a/drivers/hid/hid-evision.c b/drivers/hid/hid-evision.c
index bb5997078491..3e7f43ab80bb 100644
--- a/drivers/hid/hid-evision.c
+++ b/drivers/hid/hid-evision.c
@@ -18,6 +18,10 @@ static int evision_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ /* mapping only applies to USB_DEVICE_ID_EVISION_ICL01 */
+ if (hdev->product != USB_DEVICE_ID_EVISION_ICL01)
+ return 0;
+
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
return 0;
@@ -37,8 +41,24 @@ static int evision_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
+#define REP_DSC_SIZE 236
+#define USAGE_MAX_INDEX 59
+
+static const __u8 *evision_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (hdev->product == USB_DEVICE_ID_EV_TELINK_RECEIVER &&
+ *rsize == REP_DSC_SIZE && rdesc[USAGE_MAX_INDEX] == 0x29 &&
+ rdesc[USAGE_MAX_INDEX + 1] == 3) {
+ hid_info(hdev, "fixing EVision:TeLink Receiver report descriptor\n");
+ rdesc[USAGE_MAX_INDEX + 1] = 5; // change usage max from 3 to 5
+ }
+ return rdesc;
+}
+
static const struct hid_device_id evision_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_EVISION, USB_DEVICE_ID_EVISION_ICL01) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EVISION, USB_DEVICE_ID_EV_TELINK_RECEIVER) },
{ }
};
MODULE_DEVICE_TABLE(hid, evision_devices);
@@ -47,6 +67,7 @@ static struct hid_driver evision_driver = {
.name = "evision",
.id_table = evision_devices,
.input_mapping = evision_input_mapping,
+ .report_fixup = evision_report_fixup,
};
module_hid_driver(evision_driver);
diff --git a/drivers/hid/hid-generic.c b/drivers/hid/hid-generic.c
index 9e04c6d0fcc8..c2de916747de 100644
--- a/drivers/hid/hid-generic.c
+++ b/drivers/hid/hid-generic.c
@@ -70,6 +70,14 @@ static int hid_generic_probe(struct hid_device *hdev,
return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
}
+static int hid_generic_reset_resume(struct hid_device *hdev)
+{
+ if (hdev->claimed & HID_CLAIMED_INPUT)
+ hidinput_reset_resume(hdev);
+
+ return 0;
+}
+
static const struct hid_device_id hid_table[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, HID_ANY_ID, HID_ANY_ID) },
{ }
@@ -81,6 +89,7 @@ static struct hid_driver hid_generic = {
.id_table = hid_table,
.match = hid_generic_match,
.probe = hid_generic_probe,
+ .reset_resume = hid_generic_reset_resume,
};
module_hid_driver(hid_generic);
diff --git a/drivers/hid/hid-haptic.c b/drivers/hid/hid-haptic.c
new file mode 100644
index 000000000000..fc8a9997f815
--- /dev/null
+++ b/drivers/hid/hid-haptic.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID Haptic support for Linux
+ *
+ * Copyright (c) 2021 Angela Czubak <acz@semihalf.com>
+ */
+
+#include <linux/input/mt.h>
+#include <linux/module.h>
+
+#include "hid-haptic.h"
+
+void hid_haptic_feature_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_field *field, struct hid_usage *usage)
+{
+ u16 usage_hid;
+
+ if (usage->hid == HID_HP_AUTOTRIGGER) {
+ if (usage->usage_index >= field->report_count) {
+ dev_err(&hdev->dev,
+ "HID_HP_AUTOTRIGGER out of range\n");
+ return;
+ }
+
+ hid_device_io_start(hdev);
+ hid_hw_request(hdev, field->report, HID_REQ_GET_REPORT);
+ hid_hw_wait(hdev);
+ hid_device_io_stop(hdev);
+ haptic->default_auto_trigger =
+ field->value[usage->usage_index];
+ haptic->auto_trigger_report = field->report;
+ } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_ORDINAL) {
+ usage_hid = usage->hid & HID_USAGE;
+ switch (field->logical) {
+ case HID_HP_WAVEFORMLIST:
+ if (usage_hid > haptic->max_waveform_id)
+ haptic->max_waveform_id = usage_hid;
+ break;
+ case HID_HP_DURATIONLIST:
+ if (usage_hid > haptic->max_duration_id)
+ haptic->max_duration_id = usage_hid;
+ break;
+ default:
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(hid_haptic_feature_mapping);
+
+bool hid_haptic_check_pressure_unit(struct hid_haptic_device *haptic,
+ struct hid_input *hi, struct hid_field *field)
+{
+ if (field->unit == HID_UNIT_GRAM || field->unit == HID_UNIT_NEWTON) {
+ haptic->force_logical_minimum = field->logical_minimum;
+ haptic->force_physical_minimum = field->physical_minimum;
+ haptic->force_resolution = input_abs_get_res(hi->input,
+ ABS_MT_PRESSURE);
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_check_pressure_unit);
+
+int hid_haptic_input_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->hid == HID_HP_MANUALTRIGGER) {
+ haptic->manual_trigger_report = field->report;
+ /* we don't really want to map these fields */
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_input_mapping);
+
+int hid_haptic_input_configured(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi)
+{
+
+ if (hi->application == HID_DG_TOUCHPAD) {
+ if (haptic->auto_trigger_report &&
+ haptic->manual_trigger_report) {
+ __set_bit(INPUT_PROP_PRESSUREPAD, hi->input->propbit);
+ return 1;
+ }
+ return 0;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_input_configured);
+
+static void parse_auto_trigger_field(struct hid_haptic_device *haptic,
+ struct hid_field *field)
+{
+ int count = field->report_count;
+ int n;
+ u16 usage_hid;
+
+ for (n = 0; n < count; n++) {
+ switch (field->usage[n].hid & HID_USAGE_PAGE) {
+ case HID_UP_ORDINAL:
+ usage_hid = field->usage[n].hid & HID_USAGE;
+ switch (field->logical) {
+ case HID_HP_WAVEFORMLIST:
+ haptic->hid_usage_map[usage_hid] = field->value[n];
+ if (field->value[n] ==
+ (HID_HP_WAVEFORMPRESS & HID_USAGE)) {
+ haptic->press_ordinal = usage_hid;
+ } else if (field->value[n] ==
+ (HID_HP_WAVEFORMRELEASE & HID_USAGE)) {
+ haptic->release_ordinal = usage_hid;
+ }
+ break;
+ case HID_HP_DURATIONLIST:
+ haptic->duration_map[usage_hid] =
+ field->value[n];
+ break;
+ default:
+ break;
+ }
+ break;
+ case HID_UP_HAPTIC:
+ switch (field->usage[n].hid) {
+ case HID_HP_WAVEFORMVENDORID:
+ haptic->vendor_id = field->value[n];
+ break;
+ case HID_HP_WAVEFORMVENDORPAGE:
+ haptic->vendor_page = field->value[n];
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ /* Should not really happen */
+ break;
+ }
+ }
+}
+
+static void fill_effect_buf(struct hid_haptic_device *haptic,
+ struct ff_haptic_effect *effect,
+ struct hid_haptic_effect *haptic_effect,
+ int waveform_ordinal)
+{
+ struct hid_report *rep = haptic->manual_trigger_report;
+ struct hid_usage *usage;
+ struct hid_field *field;
+ s32 value;
+ int i, j;
+ u8 *buf = haptic_effect->report_buf;
+
+ mutex_lock(&haptic->manual_trigger_mutex);
+ for (i = 0; i < rep->maxfield; i++) {
+ field = rep->field[i];
+ /* Ignore if report count is out of bounds. */
+ if (field->report_count < 1)
+ continue;
+
+ for (j = 0; j < field->maxusage; j++) {
+ usage = &field->usage[j];
+
+ switch (usage->hid) {
+ case HID_HP_INTENSITY:
+ if (effect->intensity > 100) {
+ value = field->logical_maximum;
+ } else {
+ value = field->logical_minimum +
+ effect->intensity *
+ (field->logical_maximum -
+ field->logical_minimum) / 100;
+ }
+ break;
+ case HID_HP_REPEATCOUNT:
+ value = effect->repeat_count;
+ break;
+ case HID_HP_RETRIGGERPERIOD:
+ value = effect->retrigger_period;
+ break;
+ case HID_HP_MANUALTRIGGER:
+ value = waveform_ordinal;
+ break;
+ default:
+ break;
+ }
+
+ field->value[j] = value;
+ }
+ }
+
+ hid_output_report(rep, buf);
+ mutex_unlock(&haptic->manual_trigger_mutex);
+}
+
+static void switch_mode(struct hid_device *hdev, struct hid_haptic_device *haptic,
+ int mode)
+{
+ struct hid_report *rep = haptic->auto_trigger_report;
+ struct hid_field *field;
+ s32 value;
+ int i, j;
+
+ if (mode == HID_HAPTIC_MODE_HOST)
+ value = HID_HAPTIC_ORDINAL_WAVEFORMSTOP;
+ else
+ value = haptic->default_auto_trigger;
+
+ mutex_lock(&haptic->auto_trigger_mutex);
+ for (i = 0; i < rep->maxfield; i++) {
+ field = rep->field[i];
+ /* Ignore if report count is out of bounds. */
+ if (field->report_count < 1)
+ continue;
+
+ for (j = 0; j < field->maxusage; j++) {
+ if (field->usage[j].hid == HID_HP_AUTOTRIGGER)
+ field->value[j] = value;
+ }
+ }
+
+ /* send the report */
+ hid_hw_request(hdev, rep, HID_REQ_SET_REPORT);
+ mutex_unlock(&haptic->auto_trigger_mutex);
+ haptic->mode = mode;
+}
+
+static int hid_haptic_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+ struct ff_effect *old)
+{
+ struct hid_device *hdev = input_get_drvdata(dev);
+ struct ff_device *ff = dev->ff;
+ struct hid_haptic_device *haptic = ff->private;
+ int i, ordinal = 0;
+ bool switch_modes = false;
+
+ /* If vendor range, check vendor id and page */
+ if (effect->u.haptic.hid_usage >= (HID_HP_VENDORWAVEFORMMIN & HID_USAGE) &&
+ effect->u.haptic.hid_usage <= (HID_HP_VENDORWAVEFORMMAX & HID_USAGE) &&
+ (effect->u.haptic.vendor_id != haptic->vendor_id ||
+ effect->u.haptic.vendor_waveform_page != haptic->vendor_page))
+ return -EINVAL;
+
+ /* Check hid_usage */
+ for (i = 1; i <= haptic->max_waveform_id; i++) {
+ if (haptic->hid_usage_map[i] == effect->u.haptic.hid_usage) {
+ ordinal = i;
+ break;
+ }
+ }
+ if (ordinal < 1)
+ return -EINVAL;
+
+ /* Fill the buffer for the effect id */
+ fill_effect_buf(haptic, &effect->u.haptic, &haptic->effect[effect->id],
+ ordinal);
+
+ if (effect->u.haptic.hid_usage == (HID_HP_WAVEFORMPRESS & HID_USAGE) ||
+ effect->u.haptic.hid_usage == (HID_HP_WAVEFORMRELEASE & HID_USAGE))
+ switch_modes = true;
+
+ /* If device is in autonomous mode, and the uploaded effect signals userspace
+ * wants control of the device, change modes
+ */
+ if (switch_modes && haptic->mode == HID_HAPTIC_MODE_DEVICE)
+ switch_mode(hdev, haptic, HID_HAPTIC_MODE_HOST);
+
+ return 0;
+}
+
+static int play_effect(struct hid_device *hdev, struct hid_haptic_device *haptic,
+ struct hid_haptic_effect *effect)
+{
+ int ret;
+
+ ret = hid_hw_output_report(hdev, effect->report_buf,
+ haptic->manual_trigger_report_len);
+ if (ret < 0) {
+ ret = hid_hw_raw_request(hdev,
+ haptic->manual_trigger_report->id,
+ effect->report_buf,
+ haptic->manual_trigger_report_len,
+ HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
+ }
+
+ return ret;
+}
+
+static void haptic_work_handler(struct work_struct *work)
+{
+
+ struct hid_haptic_effect *effect = container_of(work,
+ struct hid_haptic_effect,
+ work);
+ struct input_dev *dev = effect->input_dev;
+ struct hid_device *hdev = input_get_drvdata(dev);
+ struct hid_haptic_device *haptic = dev->ff->private;
+
+ mutex_lock(&haptic->manual_trigger_mutex);
+ if (effect != &haptic->stop_effect)
+ play_effect(hdev, haptic, &haptic->stop_effect);
+
+ play_effect(hdev, haptic, effect);
+ mutex_unlock(&haptic->manual_trigger_mutex);
+
+}
+
+static int hid_haptic_playback(struct input_dev *dev, int effect_id, int value)
+{
+ struct hid_haptic_device *haptic = dev->ff->private;
+
+ if (value)
+ queue_work(haptic->wq, &haptic->effect[effect_id].work);
+ else
+ queue_work(haptic->wq, &haptic->stop_effect.work);
+
+ return 0;
+}
+
+static void effect_set_default(struct ff_effect *effect)
+{
+ effect->type = FF_HAPTIC;
+ effect->id = -1;
+ effect->u.haptic.hid_usage = HID_HP_WAVEFORMNONE & HID_USAGE;
+ effect->u.haptic.intensity = 100;
+ effect->u.haptic.retrigger_period = 0;
+ effect->u.haptic.repeat_count = 0;
+}
+
+static int hid_haptic_erase(struct input_dev *dev, int effect_id)
+{
+ struct hid_haptic_device *haptic = dev->ff->private;
+ struct hid_device *hdev = input_get_drvdata(dev);
+ struct ff_effect effect;
+ int ordinal;
+
+ effect_set_default(&effect);
+
+ if (effect.u.haptic.hid_usage == (HID_HP_WAVEFORMRELEASE & HID_USAGE)) {
+ ordinal = haptic->release_ordinal;
+ if (!ordinal) {
+ ordinal = HID_HAPTIC_ORDINAL_WAVEFORMNONE;
+ if (haptic->mode == HID_HAPTIC_MODE_HOST)
+ switch_mode(hdev, haptic, HID_HAPTIC_MODE_DEVICE);
+ } else
+ effect.u.haptic.hid_usage = HID_HP_WAVEFORMRELEASE & HID_USAGE;
+
+ fill_effect_buf(haptic, &effect.u.haptic, &haptic->effect[effect_id],
+ ordinal);
+ } else if (effect.u.haptic.hid_usage == (HID_HP_WAVEFORMPRESS & HID_USAGE)) {
+ ordinal = haptic->press_ordinal;
+ if (!ordinal) {
+ ordinal = HID_HAPTIC_ORDINAL_WAVEFORMNONE;
+ if (haptic->mode == HID_HAPTIC_MODE_HOST)
+ switch_mode(hdev, haptic, HID_HAPTIC_MODE_DEVICE);
+ }
+ else
+ effect.u.haptic.hid_usage = HID_HP_WAVEFORMPRESS & HID_USAGE;
+
+ fill_effect_buf(haptic, &effect.u.haptic, &haptic->effect[effect_id],
+ ordinal);
+ }
+
+ return 0;
+}
+
+static void hid_haptic_destroy(struct ff_device *ff)
+{
+ struct hid_haptic_device *haptic = ff->private;
+ struct hid_device *hdev = haptic->hdev;
+ int r;
+
+ if (hdev)
+ put_device(&hdev->dev);
+
+ kfree(haptic->stop_effect.report_buf);
+ haptic->stop_effect.report_buf = NULL;
+
+ if (haptic->effect) {
+ for (r = 0; r < ff->max_effects; r++)
+ kfree(haptic->effect[r].report_buf);
+ kfree(haptic->effect);
+ }
+ haptic->effect = NULL;
+
+ destroy_workqueue(haptic->wq);
+ haptic->wq = NULL;
+
+ kfree(haptic->duration_map);
+ haptic->duration_map = NULL;
+
+ kfree(haptic->hid_usage_map);
+ haptic->hid_usage_map = NULL;
+
+ module_put(THIS_MODULE);
+}
+
+int hid_haptic_init(struct hid_device *hdev,
+ struct hid_haptic_device **haptic_ptr)
+{
+ struct hid_haptic_device *haptic = *haptic_ptr;
+ struct input_dev *dev = NULL;
+ struct hid_input *hidinput;
+ struct ff_device *ff;
+ int ret = 0, r;
+ struct ff_haptic_effect stop_effect = {
+ .hid_usage = HID_HP_WAVEFORMSTOP & HID_USAGE,
+ };
+ const char *prefix = "hid-haptic";
+ char *name;
+ int (*flush)(struct input_dev *dev, struct file *file);
+ int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
+
+ haptic->hdev = hdev;
+ haptic->max_waveform_id = max(2u, haptic->max_waveform_id);
+ haptic->max_duration_id = max(2u, haptic->max_duration_id);
+
+ haptic->hid_usage_map = kcalloc(haptic->max_waveform_id + 1,
+ sizeof(u16), GFP_KERNEL);
+ if (!haptic->hid_usage_map) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ haptic->duration_map = kcalloc(haptic->max_duration_id + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!haptic->duration_map) {
+ ret = -ENOMEM;
+ goto usage_map;
+ }
+
+ if (haptic->max_waveform_id != haptic->max_duration_id)
+ dev_warn(&hdev->dev,
+ "Haptic duration and waveform lists have different max id (%u and %u).\n",
+ haptic->max_duration_id, haptic->max_waveform_id);
+
+ haptic->hid_usage_map[HID_HAPTIC_ORDINAL_WAVEFORMNONE] =
+ HID_HP_WAVEFORMNONE & HID_USAGE;
+ haptic->hid_usage_map[HID_HAPTIC_ORDINAL_WAVEFORMSTOP] =
+ HID_HP_WAVEFORMSTOP & HID_USAGE;
+
+ mutex_init(&haptic->auto_trigger_mutex);
+ for (r = 0; r < haptic->auto_trigger_report->maxfield; r++)
+ parse_auto_trigger_field(haptic, haptic->auto_trigger_report->field[r]);
+
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+ if (hidinput->application == HID_DG_TOUCHPAD) {
+ dev = hidinput->input;
+ break;
+ }
+ }
+
+ if (!dev) {
+ dev_err(&hdev->dev, "Failed to find the input device\n");
+ ret = -ENODEV;
+ goto duration_map;
+ }
+
+ haptic->input_dev = dev;
+ haptic->manual_trigger_report_len =
+ hid_report_len(haptic->manual_trigger_report);
+ mutex_init(&haptic->manual_trigger_mutex);
+ name = kmalloc(strlen(prefix) + strlen(hdev->name) + 2, GFP_KERNEL);
+ if (name) {
+ sprintf(name, "%s %s", prefix, hdev->name);
+ haptic->wq = create_singlethread_workqueue(name);
+ kfree(name);
+ }
+ if (!haptic->wq) {
+ ret = -ENOMEM;
+ goto duration_map;
+ }
+ haptic->effect = kcalloc(FF_MAX_EFFECTS,
+ sizeof(struct hid_haptic_effect), GFP_KERNEL);
+ if (!haptic->effect) {
+ ret = -ENOMEM;
+ goto output_queue;
+ }
+ for (r = 0; r < FF_MAX_EFFECTS; r++) {
+ haptic->effect[r].report_buf =
+ hid_alloc_report_buf(haptic->manual_trigger_report,
+ GFP_KERNEL);
+ if (!haptic->effect[r].report_buf) {
+ dev_err(&hdev->dev,
+ "Failed to allocate a buffer for an effect.\n");
+ ret = -ENOMEM;
+ goto buffer_free;
+ }
+ haptic->effect[r].input_dev = dev;
+ INIT_WORK(&haptic->effect[r].work, haptic_work_handler);
+ }
+ haptic->stop_effect.report_buf =
+ hid_alloc_report_buf(haptic->manual_trigger_report,
+ GFP_KERNEL);
+ if (!haptic->stop_effect.report_buf) {
+ dev_err(&hdev->dev,
+ "Failed to allocate a buffer for stop effect.\n");
+ ret = -ENOMEM;
+ goto buffer_free;
+ }
+ haptic->stop_effect.input_dev = dev;
+ INIT_WORK(&haptic->stop_effect.work, haptic_work_handler);
+ fill_effect_buf(haptic, &stop_effect, &haptic->stop_effect,
+ HID_HAPTIC_ORDINAL_WAVEFORMSTOP);
+
+ input_set_capability(dev, EV_FF, FF_HAPTIC);
+
+ flush = dev->flush;
+ event = dev->event;
+ ret = input_ff_create(dev, FF_MAX_EFFECTS);
+ if (ret) {
+ dev_err(&hdev->dev, "Failed to create ff device.\n");
+ goto stop_buffer_free;
+ }
+
+ ff = dev->ff;
+ ff->private = haptic;
+ ff->upload = hid_haptic_upload_effect;
+ ff->playback = hid_haptic_playback;
+ ff->erase = hid_haptic_erase;
+ ff->destroy = hid_haptic_destroy;
+ if (!try_module_get(THIS_MODULE)) {
+ dev_err(&hdev->dev, "Failed to increase module count.\n");
+ goto input_free;
+ }
+ if (!get_device(&hdev->dev)) {
+ dev_err(&hdev->dev, "Failed to get hdev device.\n");
+ module_put(THIS_MODULE);
+ goto input_free;
+ }
+ return 0;
+
+input_free:
+ input_ff_destroy(dev);
+ /* Do not let double free happen, input_ff_destroy will call
+ * hid_haptic_destroy.
+ */
+ *haptic_ptr = NULL;
+ /* Restore dev flush and event */
+ dev->flush = flush;
+ dev->event = event;
+ return ret;
+stop_buffer_free:
+ kfree(haptic->stop_effect.report_buf);
+ haptic->stop_effect.report_buf = NULL;
+buffer_free:
+ while (--r >= 0)
+ kfree(haptic->effect[r].report_buf);
+ kfree(haptic->effect);
+ haptic->effect = NULL;
+output_queue:
+ destroy_workqueue(haptic->wq);
+ haptic->wq = NULL;
+duration_map:
+ kfree(haptic->duration_map);
+ haptic->duration_map = NULL;
+usage_map:
+ kfree(haptic->hid_usage_map);
+ haptic->hid_usage_map = NULL;
+exit:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_init);
+
+void hid_haptic_pressure_reset(struct hid_haptic_device *haptic)
+{
+ haptic->pressure_sum = 0;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_pressure_reset);
+
+void hid_haptic_pressure_increase(struct hid_haptic_device *haptic,
+ __s32 pressure)
+{
+ haptic->pressure_sum += pressure;
+}
+EXPORT_SYMBOL_GPL(hid_haptic_pressure_increase);
diff --git a/drivers/hid/hid-haptic.h b/drivers/hid/hid-haptic.h
new file mode 100644
index 000000000000..c6539ac04c1d
--- /dev/null
+++ b/drivers/hid/hid-haptic.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * HID Haptic support for Linux
+ *
+ * Copyright (c) 2021 Angela Czubak <acz@semihalf.com>
+ */
+
+#include <linux/hid.h>
+
+#define HID_HAPTIC_ORDINAL_WAVEFORMNONE 1
+#define HID_HAPTIC_ORDINAL_WAVEFORMSTOP 2
+
+#define HID_HAPTIC_MODE_DEVICE 0
+#define HID_HAPTIC_MODE_HOST 1
+
+struct hid_haptic_effect {
+ u8 *report_buf;
+ struct input_dev *input_dev;
+ struct work_struct work;
+ struct list_head control;
+ struct mutex control_mutex;
+};
+
+struct hid_haptic_effect_node {
+ struct list_head node;
+ struct file *file;
+};
+
+struct hid_haptic_device {
+ struct input_dev *input_dev;
+ struct hid_device *hdev;
+ struct hid_report *auto_trigger_report;
+ struct mutex auto_trigger_mutex;
+ struct workqueue_struct *wq;
+ struct hid_report *manual_trigger_report;
+ struct mutex manual_trigger_mutex;
+ size_t manual_trigger_report_len;
+ int pressed_state;
+ s32 pressure_sum;
+ s32 force_logical_minimum;
+ s32 force_physical_minimum;
+ s32 force_resolution;
+ u32 mode;
+ u32 default_auto_trigger;
+ u32 vendor_page;
+ u32 vendor_id;
+ u32 max_waveform_id;
+ u32 max_duration_id;
+ u16 *hid_usage_map;
+ u32 *duration_map;
+ u16 press_ordinal;
+ u16 release_ordinal;
+ struct hid_haptic_effect *effect;
+ struct hid_haptic_effect stop_effect;
+};
+
+#if IS_ENABLED(CONFIG_HID_HAPTIC)
+void hid_haptic_feature_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_field *field, struct hid_usage
+ *usage);
+bool hid_haptic_check_pressure_unit(struct hid_haptic_device *haptic,
+ struct hid_input *hi, struct hid_field *field);
+int hid_haptic_input_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max);
+int hid_haptic_input_configured(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi);
+int hid_haptic_init(struct hid_device *hdev, struct hid_haptic_device **haptic_ptr);
+void hid_haptic_handle_press_release(struct hid_haptic_device *haptic);
+void hid_haptic_pressure_reset(struct hid_haptic_device *haptic);
+void hid_haptic_pressure_increase(struct hid_haptic_device *haptic,
+ __s32 pressure);
+#else
+static inline
+void hid_haptic_feature_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_field *field, struct hid_usage
+ *usage)
+{}
+static inline
+bool hid_haptic_check_pressure_unit(struct hid_haptic_device *haptic,
+ struct hid_input *hi, struct hid_field *field)
+{
+ return false;
+}
+static inline
+int hid_haptic_input_mapping(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ return 0;
+}
+static inline
+int hid_haptic_input_configured(struct hid_device *hdev,
+ struct hid_haptic_device *haptic,
+ struct hid_input *hi)
+{
+ return 0;
+}
+static inline
+void hid_haptic_reset(struct hid_device *hdev, struct hid_haptic_device *haptic)
+{}
+static inline
+int hid_haptic_init(struct hid_device *hdev, struct hid_haptic_device **haptic_ptr)
+{
+ return 0;
+}
+static inline
+void hid_haptic_handle_press_release(struct hid_haptic_device *haptic) {}
+static inline
+bool hid_haptic_handle_input(struct hid_haptic_device *haptic)
+{
+ return false;
+}
+static inline
+void hid_haptic_pressure_reset(struct hid_haptic_device *haptic) {}
+static inline
+void hid_haptic_pressure_increase(struct hid_haptic_device *haptic,
+ __s32 pressure)
+{}
+#endif
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 149798754570..d31711f1aaec 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -223,7 +223,7 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD 0x1866
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD2 0x19b6
-#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_KEYBOARD3 0x1a30
+#define USB_DEVICE_ID_ASUSTEK_ROG_Z13_FOLIO 0x1a30
#define USB_DEVICE_ID_ASUSTEK_ROG_Z13_LIGHTBAR 0x18c6
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY 0x1abe
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c
@@ -342,6 +342,9 @@
#define USB_DEVICE_ID_CODEMERCS_IOW_FIRST 0x1500
#define USB_DEVICE_ID_CODEMERCS_IOW_LAST 0x15ff
+#define USB_VENDOR_ID_COOLER_MASTER 0x2516
+#define USB_DEVICE_ID_COOLER_MASTER_MICE_DONGLE 0x01b7
+
#define USB_VENDOR_ID_CORSAIR 0x1b1c
#define USB_DEVICE_ID_CORSAIR_K90 0x1b02
#define USB_DEVICE_ID_CORSAIR_K70R 0x1b09
@@ -446,7 +449,8 @@
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
#define USB_DEVICE_ID_ELECOM_M_XGL20DLBK 0x00e6
-#define USB_DEVICE_ID_ELECOM_M_XT3URBK 0x00fb
+#define USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB 0x00fb
+#define USB_DEVICE_ID_ELECOM_M_XT3URBK_018F 0x018f
#define USB_DEVICE_ID_ELECOM_M_XT3DRBK 0x00fc
#define USB_DEVICE_ID_ELECOM_M_XT4DRBK 0x00fd
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
@@ -473,6 +477,7 @@
#define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118
#define USB_VENDOR_ID_EVISION 0x320f
+#define USB_DEVICE_ID_EV_TELINK_RECEIVER 0x226f
#define USB_DEVICE_ID_EVISION_ICL01 0x5041
#define USB_VENDOR_ID_FFBEAST 0x045b
@@ -715,6 +720,7 @@
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
#define I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720 0x837a
#define USB_DEVICE_ID_ITE_LENOVO_YOGA900 0x8396
+#define I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD 0x8987
#define USB_DEVICE_ID_ITE8595 0x8595
#define USB_DEVICE_ID_ITE_MEDION_E1239T 0xce50
@@ -876,6 +882,7 @@
#define USB_DEVICE_ID_LOGITECH_DUAL_ACTION 0xc216
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2 0xc218
#define USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2 0xc219
+#define USB_DEVICE_ID_LOGITECH_G13 0xc21c
#define USB_DEVICE_ID_LOGITECH_G15_LCD 0xc222
#define USB_DEVICE_ID_LOGITECH_G11 0xc225
#define USB_DEVICE_ID_LOGITECH_G15_V2_LCD 0xc227
@@ -911,6 +918,8 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1 0xc539
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_1 0xc53f
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2 0xc543
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_3 0xc547
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_4 0xc54d
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_LOGITECH_BOLT_RECEIVER 0xc548
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
@@ -1296,6 +1305,8 @@
#define USB_VENDOR_ID_STEELSERIES 0x1038
#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
+#define USB_DEVICE_ID_STEELSERIES_ARCTIS_1 0x12b6
+#define USB_DEVICE_ID_STEELSERIES_ARCTIS_9 0x12c2
#define USB_VENDOR_ID_SUN 0x0430
#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
@@ -1414,6 +1425,7 @@
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW 0x0933
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06 0x0078
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_22R_PRO 0x091b
+#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_24_PRO 0x092d
#define USB_DEVICE_ID_UGEE_TABLET_G5 0x0074
#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
#define USB_DEVICE_ID_UGEE_TABLET_RAINBOW_CV720 0x0055
@@ -1430,6 +1442,7 @@
#define USB_VENDOR_ID_VRS 0x0483
#define USB_DEVICE_ID_VRS_DFP 0xa355
+#define USB_DEVICE_ID_VRS_R295 0xa44c
#define USB_VENDOR_ID_VTL 0x0306
#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f
@@ -1537,7 +1550,7 @@
#define USB_VENDOR_ID_SIGNOTEC 0x2133
#define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018
-#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a
-#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155
+#define USB_VENDOR_ID_JIELI_SDK_DEFAULT 0x4c4a
+#define USB_DEVICE_ID_JIELI_SDK_4155 0x4155
#endif
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index f45f856a127f..2633fcd8f910 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -303,6 +303,19 @@ __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
}
break;
+ case ABS_PRESSURE:
+ case ABS_MT_PRESSURE:
+ if (field->unit == HID_UNIT_NEWTON) {
+ /* Convert to grams, 1 newton is 101.97 grams */
+ prev = physical_extents;
+ physical_extents *= 10197;
+ if (physical_extents < prev)
+ return 0;
+ unit_exponent -= 2;
+ } else if (field->unit != HID_UNIT_GRAM) {
+ return 0;
+ }
+ break;
default:
return 0;
}
@@ -386,10 +399,11 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM),
HID_BATTERY_QUIRK_AVOID_QUERY },
/*
- * Elan I2C-HID touchscreens seem to all report a non present battery,
- * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C-HID devices.
+ * Elan HID touchscreens seem to all report a non present battery,
+ * set HID_BATTERY_QUIRK_IGNORE for all Elan I2C and USB HID devices.
*/
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -622,7 +636,10 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
return;
}
- if (value == 0 || value < dev->battery_min || value > dev->battery_max)
+ if ((usage & HID_USAGE_PAGE) == HID_UP_DIGITIZER && value == 0)
+ return;
+
+ if (value < dev->battery_min || value > dev->battery_max)
return;
capacity = hidinput_scale_battery_capacity(dev, value);
@@ -683,9 +700,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
if (field->report_count < 1)
goto ignore;
- /* only LED usages are supported in output fields */
+ /* only LED and HAPTIC usages are supported in output fields */
if (field->report_type == HID_OUTPUT_REPORT &&
- (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
+ (usage->hid & HID_USAGE_PAGE) != HID_UP_LED &&
+ (usage->hid & HID_USAGE_PAGE) != HID_UP_HAPTIC) {
goto ignore;
}
@@ -860,7 +878,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
switch (usage->hid) {
/* These usage IDs map directly to the usage codes. */
- case HID_GD_X: case HID_GD_Y: case HID_GD_Z:
+ case HID_GD_X: case HID_GD_Y:
case HID_GD_RX: case HID_GD_RY: case HID_GD_RZ:
if (field->flags & HID_MAIN_ITEM_RELATIVE)
map_rel(usage->hid & 0xf);
@@ -868,6 +886,22 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
map_abs_clear(usage->hid & 0xf);
break;
+ case HID_GD_Z:
+ /* HID_GD_Z is mapped to ABS_DISTANCE for stylus/pen */
+ if (field->flags & HID_MAIN_ITEM_RELATIVE) {
+ map_rel(usage->hid & 0xf);
+ } else {
+ if (field->application == HID_DG_PEN ||
+ field->physical == HID_DG_PEN ||
+ field->logical == HID_DG_STYLUS ||
+ field->physical == HID_DG_STYLUS ||
+ field->application == HID_DG_DIGITIZER)
+ map_abs_clear(ABS_DISTANCE);
+ else
+ map_abs_clear(usage->hid & 0xf);
+ }
+ break;
+
case HID_GD_WHEEL:
if (field->flags & HID_MAIN_ITEM_RELATIVE) {
set_bit(REL_WHEEL, input->relbit);
@@ -2382,6 +2416,13 @@ void hidinput_disconnect(struct hid_device *hid)
}
EXPORT_SYMBOL_GPL(hidinput_disconnect);
+void hidinput_reset_resume(struct hid_device *hid)
+{
+ /* renegotiate host-device shared state after reset */
+ hidinput_change_resolution_multipliers(hid);
+}
+EXPORT_SYMBOL_GPL(hidinput_reset_resume);
+
#ifdef CONFIG_HID_KUNIT_TEST
#include "hid-input-test.c"
#endif
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index b3121fa7a72d..9cc3e029e9f6 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,8 +32,6 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
-#include <linux/platform_profile.h>
-
#include "hid-ids.h"
/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
@@ -150,6 +148,14 @@ static const __u8 lenovo_tpIIbtkbd_need_fixup_collection[] = {
0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
};
+static const __u8 lenovo_yoga7x_kbd_need_fixup_collection[] = {
+ 0x15, 0x00, // Logical Minimum (0)
+ 0x25, 0x65, // Logical Maximum (101)
+ 0x05, 0x07, // Usage Page (Keyboard)
+ 0x19, 0x00, // Usage Minimum (0)
+ 0x29, 0xDD, // Usage Maximum (221)
+};
+
static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -179,6 +185,13 @@ static const __u8 *lenovo_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[260] = 0x01; /* report count (2) = 0x01 */
}
break;
+ case I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD:
+ if (*rsize == 176 &&
+ memcmp(&rdesc[52], lenovo_yoga7x_kbd_need_fixup_collection,
+ sizeof(lenovo_yoga7x_kbd_need_fixup_collection)) == 0) {
+ rdesc[55] = rdesc[61]; // logical maximum = usage maximum
+ }
+ break;
}
return rdesc;
}
@@ -734,7 +747,7 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
report_key_event(input, KEY_RFKILL);
return 1;
}
- platform_profile_cycle();
+ report_key_event(input, KEY_PERFORMANCE);
return 1;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
@@ -1540,6 +1553,8 @@ static const struct hid_device_id lenovo_devices[] = {
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X12_TAB2) },
+ { HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_YOGA_SLIM_7X_KEYBOARD) },
{ }
};
diff --git a/drivers/hid/hid-lg-g15.c b/drivers/hid/hid-lg-g15.c
index f8605656257b..1a88bc44ada4 100644
--- a/drivers/hid/hid-lg-g15.c
+++ b/drivers/hid/hid-lg-g15.c
@@ -26,7 +26,24 @@
#define LG_G510_FEATURE_BACKLIGHT_RGB 0x05
#define LG_G510_FEATURE_POWER_ON_RGB 0x06
+#define LG_G510_INPUT_MACRO_KEYS 0x03
+#define LG_G510_INPUT_KBD_BACKLIGHT 0x04
+
+#define LG_G13_INPUT_REPORT 0x01
+#define LG_G13_FEATURE_M_KEYS_LEDS 0x05
+#define LG_G13_FEATURE_BACKLIGHT_RGB 0x07
+#define LG_G13_BACKLIGHT_HW_ON_BIT 23
+
+/**
+ * g13_input_report.keybits[] is not 32-bit aligned, so we can't use the bitops macros.
+ *
+ * @ary: Pointer to array of u8s
+ * @b: Bit index into ary, LSB first. Not range checked.
+ */
+#define TEST_BIT(ary, b) ((1 << ((b) & 7)) & (ary)[(b) >> 3])
+
enum lg_g15_model {
+ LG_G13,
LG_G15,
LG_G15_V2,
LG_G510,
@@ -45,6 +62,12 @@ enum lg_g15_led_type {
LG_G15_LED_MAX
};
+struct g13_input_report {
+ u8 report_id; /* Report ID is always set to 1. */
+ u8 joy_x, joy_y;
+ u8 keybits[5];
+};
+
struct lg_g15_led {
union {
struct led_classdev cdev;
@@ -63,12 +86,188 @@ struct lg_g15_data {
struct mutex mutex;
struct work_struct work;
struct input_dev *input;
+ struct input_dev *input_js; /* Separate joystick device for G13. */
struct hid_device *hdev;
enum lg_g15_model model;
struct lg_g15_led leds[LG_G15_LED_MAX];
bool game_mode_enabled;
+ bool backlight_disabled; /* true == HW backlight toggled *OFF* */
};
+/********* G13 LED functions ***********/
+/*
+ * G13 retains no state across power cycles, and always powers up with the backlight on,
+ * color #5AFF6E, all macro key LEDs off.
+ */
+static int lg_g13_get_leds_state(struct lg_g15_data *g15)
+{
+ u8 * const tbuf = g15->transfer_buf;
+ int ret, high;
+
+ /* RGB backlight. */
+ ret = hid_hw_raw_request(g15->hdev, LG_G13_FEATURE_BACKLIGHT_RGB,
+ tbuf, 5,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 5) {
+ hid_err(g15->hdev, "Error getting backlight brightness: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+
+ /* Normalize RGB intensities against the highest component. */
+ high = max3(tbuf[1], tbuf[2], tbuf[3]);
+ if (high) {
+ g15->leds[LG_G15_KBD_BRIGHTNESS].red =
+ DIV_ROUND_CLOSEST(tbuf[1] * 255, high);
+ g15->leds[LG_G15_KBD_BRIGHTNESS].green =
+ DIV_ROUND_CLOSEST(tbuf[2] * 255, high);
+ g15->leds[LG_G15_KBD_BRIGHTNESS].blue =
+ DIV_ROUND_CLOSEST(tbuf[3] * 255, high);
+ g15->leds[LG_G15_KBD_BRIGHTNESS].brightness = high;
+ } else {
+ g15->leds[LG_G15_KBD_BRIGHTNESS].red = 255;
+ g15->leds[LG_G15_KBD_BRIGHTNESS].green = 255;
+ g15->leds[LG_G15_KBD_BRIGHTNESS].blue = 255;
+ g15->leds[LG_G15_KBD_BRIGHTNESS].brightness = 0;
+ }
+
+ /* Macro LEDs. */
+ ret = hid_hw_raw_request(g15->hdev, LG_G13_FEATURE_M_KEYS_LEDS,
+ tbuf, 5,
+ HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 5) {
+ hid_err(g15->hdev, "Error getting macro LED brightness: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+
+ for (int i = LG_G15_MACRO_PRESET1; i < LG_G15_LED_MAX; ++i)
+ g15->leds[i].brightness = !!(tbuf[1] & (1 << (i - LG_G15_MACRO_PRESET1)));
+
+ /*
+ * Bit 23 of g13_input_report.keybits[] contains the backlight's
+ * current HW toggle state. Retrieve it from the device.
+ */
+ ret = hid_hw_raw_request(g15->hdev, LG_G13_INPUT_REPORT,
+ tbuf, sizeof(struct g13_input_report),
+ HID_INPUT_REPORT, HID_REQ_GET_REPORT);
+ if (ret != sizeof(struct g13_input_report)) {
+ hid_err(g15->hdev, "Error getting backlight on/off state: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+ g15->backlight_disabled =
+ !TEST_BIT(((struct g13_input_report *) tbuf)->keybits,
+ LG_G13_BACKLIGHT_HW_ON_BIT);
+
+ return 0;
+}
+
+static int lg_g13_kbd_led_write(struct lg_g15_data *g15,
+ struct lg_g15_led *g15_led,
+ enum led_brightness brightness)
+{
+ struct mc_subled const * const subleds = g15_led->mcdev.subled_info;
+ u8 * const tbuf = g15->transfer_buf;
+ int ret;
+
+ guard(mutex)(&g15->mutex);
+
+ led_mc_calc_color_components(&g15_led->mcdev, brightness);
+
+ tbuf[0] = 5;
+ tbuf[1] = subleds[0].brightness;
+ tbuf[2] = subleds[1].brightness;
+ tbuf[3] = subleds[2].brightness;
+ tbuf[4] = 0;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G13_FEATURE_BACKLIGHT_RGB,
+ tbuf, 5,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret != 5) {
+ hid_err(g15->hdev, "Error setting backlight brightness: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+
+ g15_led->brightness = brightness;
+ return 0;
+}
+
+static int lg_g13_kbd_led_set(struct led_classdev *led_cdev, enum led_brightness brightness)
+{
+ struct led_classdev_mc *mc = lcdev_to_mccdev(led_cdev);
+ struct lg_g15_led *g15_led =
+ container_of(mc, struct lg_g15_led, mcdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+
+ /* Ignore LED off on unregister / keyboard unplug */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return 0;
+
+ return lg_g13_kbd_led_write(g15, g15_led, brightness);
+}
+
+static enum led_brightness lg_g13_kbd_led_get(struct led_classdev *led_cdev)
+{
+ struct led_classdev_mc const * const mc = lcdev_to_mccdev(led_cdev);
+ struct lg_g15_led const *g15_led =
+ container_of(mc, struct lg_g15_led, mcdev);
+
+ return g15_led->brightness;
+}
+
+static int lg_g13_mkey_led_set(struct led_classdev *led_cdev, enum led_brightness brightness)
+{
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+ struct lg_g15_data *g15 = dev_get_drvdata(led_cdev->dev->parent);
+ int i, ret;
+ u8 * const tbuf = g15->transfer_buf;
+ u8 val, mask = 0;
+
+ /* Ignore LED off on unregister / keyboard unplug */
+ if (led_cdev->flags & LED_UNREGISTERING)
+ return 0;
+
+ guard(mutex)(&g15->mutex);
+
+ for (i = LG_G15_MACRO_PRESET1; i < LG_G15_LED_MAX; ++i) {
+ if (i == g15_led->led)
+ val = brightness;
+ else
+ val = g15->leds[i].brightness;
+
+ if (val)
+ mask |= 1 << (i - LG_G15_MACRO_PRESET1);
+ }
+
+ tbuf[0] = 5;
+ tbuf[1] = mask;
+ tbuf[2] = 0;
+ tbuf[3] = 0;
+ tbuf[4] = 0;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G13_FEATURE_M_KEYS_LEDS,
+ tbuf, 5,
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (ret != 5) {
+ hid_err(g15->hdev, "Error setting LED brightness: %d\n", ret);
+ return (ret < 0) ? ret : -EIO;
+ }
+
+ g15_led->brightness = brightness;
+ return 0;
+}
+
+static enum led_brightness lg_g13_mkey_led_get(struct led_classdev *led_cdev)
+{
+ /*
+ * G13 doesn't change macro key LEDs behind our back, so they're
+ * whatever we last set them to.
+ */
+ struct lg_g15_led *g15_led =
+ container_of(led_cdev, struct lg_g15_led, cdev);
+
+ return g15_led->brightness;
+}
+
/******** G15 and G15 v2 LED functions ********/
static int lg_g15_update_led_brightness(struct lg_g15_data *g15)
@@ -227,6 +426,20 @@ static int lg_g510_get_initial_led_brightness(struct lg_g15_data *g15, int i)
g15->leds[i].brightness = 0;
}
+ if (i)
+ return 0;
+
+ ret = hid_hw_raw_request(g15->hdev, LG_G510_INPUT_KBD_BACKLIGHT,
+ g15->transfer_buf, 2,
+ HID_INPUT_REPORT, HID_REQ_GET_REPORT);
+ if (ret != 2) {
+ /* This can happen when a KVM switch is used, so only warn. */
+ hid_warn(g15->hdev, "Error getting backlight state: %d\n", ret);
+ return 0;
+ }
+
+ g15->backlight_disabled = g15->transfer_buf[1] & 0x04;
+
return 0;
}
@@ -390,6 +603,8 @@ static int lg_g15_get_initial_led_brightness(struct lg_g15_data *g15)
int ret;
switch (g15->model) {
+ case LG_G13:
+ return lg_g13_get_leds_state(g15);
case LG_G15:
case LG_G15_V2:
return lg_g15_update_led_brightness(g15);
@@ -417,6 +632,108 @@ static int lg_g15_get_initial_led_brightness(struct lg_g15_data *g15)
/******** Input functions ********/
+/* Table mapping keybits[] bit positions to event codes. */
+/* Note: Indices are discontinuous to aid readability. */
+static const u16 g13_keys_for_bits[] = {
+ /* Main keypad - keys G1 - G22 */
+ [0] = KEY_MACRO1,
+ [1] = KEY_MACRO2,
+ [2] = KEY_MACRO3,
+ [3] = KEY_MACRO4,
+ [4] = KEY_MACRO5,
+ [5] = KEY_MACRO6,
+ [6] = KEY_MACRO7,
+ [7] = KEY_MACRO8,
+ [8] = KEY_MACRO9,
+ [9] = KEY_MACRO10,
+ [10] = KEY_MACRO11,
+ [11] = KEY_MACRO12,
+ [12] = KEY_MACRO13,
+ [13] = KEY_MACRO14,
+ [14] = KEY_MACRO15,
+ [15] = KEY_MACRO16,
+ [16] = KEY_MACRO17,
+ [17] = KEY_MACRO18,
+ [18] = KEY_MACRO19,
+ [19] = KEY_MACRO20,
+ [20] = KEY_MACRO21,
+ [21] = KEY_MACRO22,
+
+ /* LCD menu buttons. */
+ [24] = KEY_KBD_LCD_MENU5, /* "Next page" button */
+ [25] = KEY_KBD_LCD_MENU1, /* Left-most */
+ [26] = KEY_KBD_LCD_MENU2,
+ [27] = KEY_KBD_LCD_MENU3,
+ [28] = KEY_KBD_LCD_MENU4, /* Right-most */
+
+ /* Macro preset and record buttons; have red LEDs under them. */
+ [29] = KEY_MACRO_PRESET1,
+ [30] = KEY_MACRO_PRESET2,
+ [31] = KEY_MACRO_PRESET3,
+ [32] = KEY_MACRO_RECORD_START,
+
+ /* 33-35 handled by joystick device. */
+
+ /* Backlight toggle. */
+ [37] = KEY_LIGHTS_TOGGLE,
+};
+
+#define G13_JS_KEYBITS_OFFSET 33
+
+static const u16 g13_keys_for_bits_js[] = {
+ /* Joystick buttons */
+ /* These keybits are at bit indices 33, 34, and 35. */
+ BTN_BASE, /* Left side */
+ BTN_BASE2, /* Bottom side */
+ BTN_THUMB, /* Stick depress */
+};
+
+static int lg_g13_event(struct lg_g15_data *g15, u8 const *data)
+{
+ struct g13_input_report const * const rep = (struct g13_input_report *) data;
+ int i, val;
+ bool backlight_disabled;
+
+ /*
+ * Main macropad and menu keys.
+ * Emit key events defined for each bit position.
+ */
+ for (i = 0; i < ARRAY_SIZE(g13_keys_for_bits); ++i) {
+ if (g13_keys_for_bits[i]) {
+ val = TEST_BIT(rep->keybits, i);
+ input_report_key(g15->input, g13_keys_for_bits[i], val);
+ }
+ }
+ input_sync(g15->input);
+
+ /*
+ * Joystick.
+ * Emit button and deflection events.
+ */
+ for (i = 0; i < ARRAY_SIZE(g13_keys_for_bits_js); ++i) {
+ val = TEST_BIT(rep->keybits, i + G13_JS_KEYBITS_OFFSET);
+ input_report_key(g15->input_js, g13_keys_for_bits_js[i], val);
+ }
+ input_report_abs(g15->input_js, ABS_X, rep->joy_x);
+ input_report_abs(g15->input_js, ABS_Y, rep->joy_y);
+ input_sync(g15->input_js);
+
+ /*
+ * Bit 23 of keybits[] reports the current backlight on/off state. If
+ * it has changed from the last cached value, apply an update.
+ */
+ backlight_disabled = !TEST_BIT(rep->keybits, LG_G13_BACKLIGHT_HW_ON_BIT);
+ if (backlight_disabled ^ g15->backlight_disabled) {
+ led_classdev_notify_brightness_hw_changed(
+ &g15->leds[LG_G15_KBD_BRIGHTNESS].mcdev.led_cdev,
+ backlight_disabled
+ ? 0 : g15->leds[LG_G15_KBD_BRIGHTNESS].brightness);
+ g15->backlight_disabled = backlight_disabled;
+ }
+
+ return 0;
+}
+
/* On the G15 Mark I Logitech has been quite creative with which bit is what */
static void lg_g15_handle_lcd_menu_keys(struct lg_g15_data *g15, u8 *data)
{
@@ -549,14 +866,24 @@ static int lg_g510_event(struct lg_g15_data *g15, u8 *data)
static int lg_g510_leds_event(struct lg_g15_data *g15, u8 *data)
{
+ struct lg_g15_led *g15_led = &g15->leds[LG_G15_KBD_BRIGHTNESS];
bool backlight_disabled;
+ backlight_disabled = data[1] & 0x04;
+ if (backlight_disabled == g15->backlight_disabled)
+ return 0;
+
+ led_classdev_notify_brightness_hw_changed(
+ &g15_led->mcdev.led_cdev,
+ backlight_disabled ? 0 : g15_led->brightness);
+
+ g15->backlight_disabled = backlight_disabled;
+
/*
* The G510 ignores backlight updates when the backlight is turned off
* through the light toggle button on the keyboard, to work around this
* we queue a workitem to sync values when the backlight is turned on.
*/
- backlight_disabled = data[1] & 0x04;
if (!backlight_disabled)
schedule_work(&g15->work);
@@ -572,6 +899,10 @@ static int lg_g15_raw_event(struct hid_device *hdev, struct hid_report *report,
return 0;
switch (g15->model) {
+ case LG_G13:
+ if (data[0] == 0x01 && size == sizeof(struct g13_input_report))
+ return lg_g13_event(g15, data);
+ break;
case LG_G15:
if (data[0] == 0x02 && size == 9)
return lg_g15_event(g15, data);
@@ -588,9 +919,9 @@ static int lg_g15_raw_event(struct hid_device *hdev, struct hid_report *report,
break;
case LG_G510:
case LG_G510_USB_AUDIO:
- if (data[0] == 0x03 && size == 5)
+ if (data[0] == LG_G510_INPUT_MACRO_KEYS && size == 5)
return lg_g510_event(g15, data);
- if (data[0] == 0x04 && size == 2)
+ if (data[0] == LG_G510_INPUT_KBD_BACKLIGHT && size == 2)
return lg_g510_leds_event(g15, data);
break;
}
@@ -616,13 +947,24 @@ static void lg_g15_setup_led_rgb(struct lg_g15_data *g15, int index)
{
int i;
struct mc_subled *subled_info;
-
- g15->leds[index].mcdev.led_cdev.brightness_set_blocking =
- lg_g510_kbd_led_set;
- g15->leds[index].mcdev.led_cdev.brightness_get =
- lg_g510_kbd_led_get;
- g15->leds[index].mcdev.led_cdev.max_brightness = 255;
- g15->leds[index].mcdev.num_colors = 3;
+ struct lg_g15_led * const gled = &g15->leds[index];
+
+ if (g15->model == LG_G13) {
+ gled->mcdev.led_cdev.brightness_set_blocking =
+ lg_g13_kbd_led_set;
+ gled->mcdev.led_cdev.brightness_get =
+ lg_g13_kbd_led_get;
+ gled->mcdev.led_cdev.flags = LED_BRIGHT_HW_CHANGED;
+ } else {
+ gled->mcdev.led_cdev.brightness_set_blocking =
+ lg_g510_kbd_led_set;
+ gled->mcdev.led_cdev.brightness_get =
+ lg_g510_kbd_led_get;
+ if (index == LG_G15_KBD_BRIGHTNESS)
+ g15->leds[index].mcdev.led_cdev.flags = LED_BRIGHT_HW_CHANGED;
+ }
+ gled->mcdev.led_cdev.max_brightness = 255;
+ gled->mcdev.num_colors = 3;
subled_info = devm_kcalloc(&g15->hdev->dev, 3, sizeof(*subled_info), GFP_KERNEL);
if (!subled_info)
@@ -632,20 +974,20 @@ static void lg_g15_setup_led_rgb(struct lg_g15_data *g15, int index)
switch (i + 1) {
case LED_COLOR_ID_RED:
subled_info[i].color_index = LED_COLOR_ID_RED;
- subled_info[i].intensity = g15->leds[index].red;
+ subled_info[i].intensity = gled->red;
break;
case LED_COLOR_ID_GREEN:
subled_info[i].color_index = LED_COLOR_ID_GREEN;
- subled_info[i].intensity = g15->leds[index].green;
+ subled_info[i].intensity = gled->green;
break;
case LED_COLOR_ID_BLUE:
subled_info[i].color_index = LED_COLOR_ID_BLUE;
- subled_info[i].intensity = g15->leds[index].blue;
+ subled_info[i].intensity = gled->blue;
break;
}
subled_info[i].channel = i;
}
- g15->leds[index].mcdev.subled_info = subled_info;
+ gled->mcdev.subled_info = subled_info;
}
static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
@@ -656,6 +998,23 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
g15->leds[i].cdev.name = name;
switch (g15->model) {
+ case LG_G13:
+ if (i < LG_G15_BRIGHTNESS_MAX) {
+ /* RGB backlight. */
+ lg_g15_setup_led_rgb(g15, i);
+ ret = devm_led_classdev_multicolor_register_ext(&g15->hdev->dev,
+ &g15->leds[i].mcdev,
+ NULL);
+ } else {
+ /* Macro keys */
+ g15->leds[i].cdev.brightness_set_blocking = lg_g13_mkey_led_set;
+ g15->leds[i].cdev.brightness_get = lg_g13_mkey_led_get;
+ g15->leds[i].cdev.max_brightness = 1;
+
+ ret = devm_led_classdev_register(&g15->hdev->dev,
+ &g15->leds[i].cdev);
+ }
+ break;
case LG_G15:
case LG_G15_V2:
g15->leds[i].cdev.brightness_get = lg_g15_led_get;
@@ -702,11 +1061,9 @@ static int lg_g15_register_led(struct lg_g15_data *g15, int i, const char *name)
}
/* Common input device init code shared between keyboards and Z-10 speaker handling */
-static void lg_g15_init_input_dev(struct hid_device *hdev, struct input_dev *input,
- const char *name)
+static void lg_g15_init_input_dev_core(struct hid_device *hdev, struct input_dev *input,
+ char const *name)
{
- int i;
-
input->name = name;
input->phys = hdev->phys;
input->uniq = hdev->uniq;
@@ -717,12 +1074,42 @@ static void lg_g15_init_input_dev(struct hid_device *hdev, struct input_dev *inp
input->dev.parent = &hdev->dev;
input->open = lg_g15_input_open;
input->close = lg_g15_input_close;
+}
+
+static void lg_g15_init_input_dev(struct hid_device *hdev, struct input_dev *input,
+ const char *name)
+{
+ int i;
+
+ lg_g15_init_input_dev_core(hdev, input, name);
/* Keys below the LCD, intended for controlling a menu on the LCD */
for (i = 0; i < 5; i++)
input_set_capability(input, EV_KEY, KEY_KBD_LCD_MENU1 + i);
}
+static void lg_g13_init_input_dev(struct hid_device *hdev,
+ struct input_dev *input, const char *name,
+ struct input_dev *input_js, const char *name_js)
+{
+ /* Macropad. */
+ lg_g15_init_input_dev_core(hdev, input, name);
+ for (int i = 0; i < ARRAY_SIZE(g13_keys_for_bits); ++i) {
+ if (g13_keys_for_bits[i])
+ input_set_capability(input, EV_KEY, g13_keys_for_bits[i]);
+ }
+
+ /* OBTW, we're a joystick, too... */
+ lg_g15_init_input_dev_core(hdev, input_js, name_js);
+ for (int i = 0; i < ARRAY_SIZE(g13_keys_for_bits_js); ++i)
+ input_set_capability(input_js, EV_KEY, g13_keys_for_bits_js[i]);
+
+ input_set_capability(input_js, EV_ABS, ABS_X);
+ input_set_abs_params(input_js, ABS_X, 0, 255, 0, 0);
+ input_set_capability(input_js, EV_ABS, ABS_Y);
+ input_set_abs_params(input_js, ABS_Y, 0, 255, 0, 0);
+}
+
static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
static const char * const led_names[] = {
@@ -739,7 +1126,7 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
unsigned int connect_mask = 0;
bool has_ff000000 = false;
struct lg_g15_data *g15;
- struct input_dev *input;
+ struct input_dev *input, *input_js;
struct hid_report *rep;
int ret, i, gkeys = 0;
@@ -778,6 +1165,25 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
hid_set_drvdata(hdev, (void *)g15);
switch (g15->model) {
+ case LG_G13:
+ /*
+ * The G13 has an analog thumbstick with nearby buttons. Some
+ * libraries and applications are known to ignore devices that
+ * don't "look like" a joystick, and a device with two ABS axes
+ * and 25+ macro keys would confuse them.
+ *
+ * Create an additional input device dedicated to appear as a
+ * simplified joystick (two ABS axes, three BTN buttons).
+ */
+ input_js = devm_input_allocate_device(&hdev->dev);
+ if (!input_js)
+ return -ENOMEM;
+ g15->input_js = input_js;
+ input_set_drvdata(input_js, hdev);
+
+ connect_mask = HID_CONNECT_HIDRAW;
+ gkeys = 25;
+ break;
case LG_G15:
INIT_WORK(&g15->work, lg_g15_leds_changed_work);
/*
@@ -859,6 +1265,38 @@ static int lg_g15_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto error_hw_stop;
return 0; /* All done */
+ } else if (g15->model == LG_G13) {
+ static char const * const g13_led_names[] = {
+ /* Backlight is shared between LCD and keys. */
+ "g13:rgb:kbd_backlight",
+ NULL, /* Keep in sync with led_type enum */
+ "g13:red:macro_preset_1",
+ "g13:red:macro_preset_2",
+ "g13:red:macro_preset_3",
+ "g13:red:macro_record",
+ };
+ lg_g13_init_input_dev(hdev,
+ input, "Logitech G13 Gaming Keypad",
+ input_js, "Logitech G13 Thumbstick");
+ ret = input_register_device(input);
+ if (ret)
+ goto error_hw_stop;
+ ret = input_register_device(input_js);
+ if (ret)
+ goto error_hw_stop;
+
+ for (i = 0; i < ARRAY_SIZE(g13_led_names); ++i) {
+ if (g13_led_names[i]) {
+ ret = lg_g15_register_led(g15, i, g13_led_names[i]);
+ if (ret)
+ goto error_hw_stop;
+ }
+ }
+ led_classdev_notify_brightness_hw_changed(
+ &g15->leds[LG_G15_KBD_BRIGHTNESS].mcdev.led_cdev,
+ g15->backlight_disabled
+ ? 0 : g15->leds[LG_G15_KBD_BRIGHTNESS].brightness);
+ return 0;
}
/* Setup and register input device */
@@ -903,6 +1341,13 @@ error_hw_stop:
}
static const struct hid_device_id lg_g15_devices[] = {
+ /*
+ * The G13 is a macropad-only device with an LCD, LED backlighing,
+ * and joystick.
+ */
+ { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_G13),
+ .driver_data = LG_G13 },
/* The G11 is a G15 without the LCD, treat it as a G15 */
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_G11),
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index cce54dd9884a..44b716697510 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -116,6 +116,7 @@ enum recvr_type {
recvr_type_dj,
recvr_type_hidpp,
recvr_type_gaming_hidpp,
+ recvr_type_gaming_hidpp_ls_1_3,
recvr_type_mouse_only,
recvr_type_27mhz,
recvr_type_bluetooth,
@@ -148,6 +149,7 @@ struct dj_receiver_dev {
struct kfifo notif_fifo;
unsigned long last_query; /* in jiffies */
bool ready;
+ bool dj_mode;
enum recvr_type type;
unsigned int unnumbered_application;
spinlock_t lock;
@@ -211,6 +213,44 @@ static const char kbd_descriptor[] = {
0xC0
};
+/* Gaming Keyboard descriptor (1) */
+static const char kbd_lightspeed_1_3_descriptor[] = {
+ 0x05, 0x01, /* Usage Page (Generic Desktop) */
+ 0x09, 0x06, /* Usage (Keyboard) */
+ 0xA1, 0x01, /* Collection (Application) */
+ 0x85, 0x01, /* Report ID (1) */
+ 0x05, 0x07, /* Usage Page (Kbrd/Keypad) */
+ 0x19, 0xE0, /* Usage Minimum (0xE0) */
+ 0x29, 0xE7, /* Usage Maximum (0xE7) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x25, 0x01, /* Logical Maximum (1) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x81, 0x02, /* Input (Data,Var) */
+ 0x95, 0x70, /* Report Count (112) */
+ 0x19, 0x04, /* Usage Minimum (0x04) */
+ 0x29, 0x73, /* Usage Maximum (0x73) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0x95, 0x05, /* Report Count (5) */
+ 0x19, 0x87, /* Usage Minimum (0x87) */
+ 0x29, 0x8B, /* Usage Maximum (0x8B) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0x95, 0x03, /* Report Count (3) */
+ 0x19, 0x90, /* Usage Minimum (0x90) */
+ 0x29, 0x92, /* Usage Maximum (0x92) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0x95, 0x05, /* Report Count (5) */
+ 0x85, 0x0E, /* Report ID (14) */
+ 0x05, 0x08, /* Usage Page (LEDs) */
+ 0x19, 0x01, /* Usage Minimum (Num Lock) */
+ 0x29, 0x05, /* Usage Maximum (Kana) */
+ 0x91, 0x02, /* Output (Data,Var,Abs) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x75, 0x03, /* Report Size (3) */
+ 0x91, 0x03, /* Output (Const,Var,Abs) */
+ 0xC0, /* End Collection */
+};
+
/* Mouse descriptor (2) */
static const char mse_descriptor[] = {
0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
@@ -415,6 +455,51 @@ static const char mse_high_res_descriptor[] = {
0xC0, /* END_COLLECTION */
};
+/* Gaming Mouse descriptor with vendor data (2) */
+static const char mse_high_res_ls_1_3_descriptor[] = {
+ 0x05, 0x01, /* Usage Page (Generic Desktop) */
+ 0x09, 0x02, /* Usage (Mouse) */
+ 0xA1, 0x01, /* Collection (Application) */
+ 0x85, 0x02, /* Report ID (2) */
+ 0x09, 0x01, /* Usage (Pointer) */
+ 0xA1, 0x00, /* Collection (Physical) */
+ 0x95, 0x10, /* Report Count (16) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x25, 0x01, /* Logical Maximum (1) */
+ 0x05, 0x09, /* Usage Page (Button) */
+ 0x19, 0x01, /* Usage Minimum (0x01) */
+ 0x29, 0x10, /* Usage Maximum (0x10) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0x95, 0x02, /* Report Count (2) */
+ 0x75, 0x10, /* Report Size (16) */
+ 0x16, 0x01, 0x80, /* Logical Minimum (-32767) */
+ 0x26, 0xFF, 0x7F, /* Logical Maximum (32767) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop) */
+ 0x09, 0x30, /* Usage (X) */
+ 0x09, 0x31, /* Usage (Y) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x15, 0x81, /* Logical Minimum (-127) */
+ 0x25, 0x7F, /* Logical Maximum (127) */
+ 0x09, 0x38, /* Usage (Wheel) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x05, 0x0C, /* Usage Page (Consumer) */
+ 0x0A, 0x38, 0x02, /* Usage (AC Pan) */
+ 0x81, 0x06, /* Input (Data,Var,Rel) */
+ 0xC0, /* End Collection */
+ 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined 0xFF00) */
+ 0x09, 0xF1, /* Usage (0xF1) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x05, /* Report Count (5) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x81, 0x00, /* Input (Data,Array,Abs) */
+ 0xC0, /* End Collection */
+};
+
/* Consumer Control descriptor (3) */
static const char consumer_descriptor[] = {
0x05, 0x0C, /* USAGE_PAGE (Consumer Devices) */
@@ -520,9 +605,9 @@ static const char hidpp_descriptor[] = {
/* Maximum size of all defined hid reports in bytes (including report id) */
#define MAX_REPORT_SIZE 8
-/* Make sure all descriptors are present here */
+/* Make sure the largest of each descriptor type is present here */
#define MAX_RDESC_SIZE \
- (sizeof(kbd_descriptor) + \
+ (sizeof(kbd_lightspeed_1_3_descriptor) +\
sizeof(mse_bluetooth_descriptor) + \
sizeof(mse5_bluetooth_descriptor) + \
sizeof(consumer_descriptor) + \
@@ -557,6 +642,8 @@ static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
static const struct hid_ll_driver logi_dj_ll_driver;
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
+static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
+ unsigned int timeout);
static void delayedwork_callback(struct work_struct *work);
static LIST_HEAD(dj_hdev_list);
@@ -805,7 +892,6 @@ static void delayedwork_callback(struct work_struct *work)
struct dj_workitem workitem;
unsigned long flags;
int count;
- int retval;
dbg_hid("%s\n", __func__);
@@ -842,11 +928,10 @@ static void delayedwork_callback(struct work_struct *work)
logi_dj_recv_destroy_djhid_device(djrcv_dev, &workitem);
break;
case WORKITEM_TYPE_UNKNOWN:
- retval = logi_dj_recv_query_paired_devices(djrcv_dev);
- if (retval) {
- hid_err(djrcv_dev->hidpp, "%s: logi_dj_recv_query_paired_devices error: %d\n",
- __func__, retval);
- }
+ if (!djrcv_dev->dj_mode)
+ logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
+
+ logi_dj_recv_query_paired_devices(djrcv_dev);
break;
case WORKITEM_TYPE_EMPTY:
dbg_hid("%s: device list is empty\n", __func__);
@@ -1239,8 +1324,13 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
djrcv_dev->last_query = jiffies;
- if (djrcv_dev->type != recvr_type_dj)
- return logi_dj_recv_query_hidpp_devices(djrcv_dev);
+ if (!djrcv_dev->dj_mode)
+ return 0;
+
+ if (djrcv_dev->type != recvr_type_dj) {
+ retval = logi_dj_recv_query_hidpp_devices(djrcv_dev);
+ goto out;
+ }
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
@@ -1250,6 +1340,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
kfree(dj_report);
+out:
+ if (retval < 0)
+ hid_err(djrcv_dev->hidpp, "%s error:%d\n", __func__, retval);
+
return retval;
}
@@ -1275,6 +1369,8 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
(u8)timeout;
retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+ if (retval)
+ goto out;
/*
* Ugly sleep to work around a USB 3.0 bug when the receiver is
@@ -1283,11 +1379,6 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
* 50 msec should gives enough time to the receiver to be ready.
*/
msleep(50);
-
- if (retval) {
- kfree(dj_report);
- return retval;
- }
}
/*
@@ -1313,7 +1404,13 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT,
HID_REQ_SET_REPORT);
+out:
kfree(dj_report);
+
+ if (retval < 0)
+ hid_err(hdev, "%s error:%d\n", __func__, retval);
+
+ djrcv_dev->dj_mode = retval >= 0;
return retval;
}
@@ -1374,12 +1471,19 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
return -EINVAL;
if (djrcv_dev->type != recvr_type_dj && count >= 2) {
+ unsigned char led_report_id = 0;
+
if (!djrcv_dev->keyboard) {
hid_warn(hid, "Received REPORT_TYPE_LEDS request before the keyboard interface was enumerated\n");
return 0;
}
+
+ /* This Lightspeed receiver expects LED reports with report ID 1 */
+ if (djrcv_dev->type == recvr_type_gaming_hidpp_ls_1_3)
+ led_report_id = 1;
+
/* usbhid overrides the report ID and ignores the first byte */
- return hid_hw_raw_request(djrcv_dev->keyboard, 0, buf, count,
+ return hid_hw_raw_request(djrcv_dev->keyboard, led_report_id, buf, count,
report_type, reqtype);
}
@@ -1426,7 +1530,11 @@ static int logi_dj_ll_parse(struct hid_device *hid)
if (djdev->reports_supported & STD_KEYBOARD) {
dbg_hid("%s: sending a kbd descriptor, reports_supported: %llx\n",
__func__, djdev->reports_supported);
- rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
+ if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp_ls_1_3)
+ rdcat(rdesc, &rsize, kbd_lightspeed_1_3_descriptor,
+ sizeof(kbd_lightspeed_1_3_descriptor));
+ else
+ rdcat(rdesc, &rsize, kbd_descriptor, sizeof(kbd_descriptor));
}
if (djdev->reports_supported & STD_MOUSE) {
@@ -1436,6 +1544,9 @@ static int logi_dj_ll_parse(struct hid_device *hid)
djdev->dj_receiver_dev->type == recvr_type_mouse_only)
rdcat(rdesc, &rsize, mse_high_res_descriptor,
sizeof(mse_high_res_descriptor));
+ else if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp_ls_1_3)
+ rdcat(rdesc, &rsize, mse_high_res_ls_1_3_descriptor,
+ sizeof(mse_high_res_ls_1_3_descriptor));
else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
rdcat(rdesc, &rsize, mse_27mhz_descriptor,
sizeof(mse_27mhz_descriptor));
@@ -1695,11 +1806,12 @@ static int logi_dj_raw_event(struct hid_device *hdev,
}
/*
* Mouse-only receivers send unnumbered mouse data. The 27 MHz
- * receiver uses 6 byte packets, the nano receiver 8 bytes.
+ * receiver uses 6 byte packets, the nano receiver 8 bytes,
+ * the lightspeed receiver (Pro X Superlight) 13 bytes.
*/
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
- size <= 8) {
- u8 mouse_report[9];
+ size <= 13){
+ u8 mouse_report[14];
/* Prepend report id */
mouse_report[0] = REPORT_TYPE_MOUSE;
@@ -1776,6 +1888,7 @@ static int logi_dj_probe(struct hid_device *hdev,
case recvr_type_dj: no_dj_interfaces = 3; break;
case recvr_type_hidpp: no_dj_interfaces = 2; break;
case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break;
+ case recvr_type_gaming_hidpp_ls_1_3: no_dj_interfaces = 3; break;
case recvr_type_mouse_only: no_dj_interfaces = 2; break;
case recvr_type_27mhz: no_dj_interfaces = 2; break;
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
@@ -1834,12 +1947,11 @@ static int logi_dj_probe(struct hid_device *hdev,
}
if (has_hidpp) {
- retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
- if (retval < 0) {
- hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n",
- __func__, retval);
- goto switch_to_dj_mode_fail;
- }
+ /*
+ * This can fail with a KVM. Ignore errors to let the probe
+ * succeed, logi_dj_recv_queue_unknown_work will retry later.
+ */
+ logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
}
/* This is enabling the polling urb on the IN endpoint */
@@ -1857,21 +1969,13 @@ static int logi_dj_probe(struct hid_device *hdev,
spin_lock_irqsave(&djrcv_dev->lock, flags);
djrcv_dev->ready = true;
spin_unlock_irqrestore(&djrcv_dev->lock, flags);
- retval = logi_dj_recv_query_paired_devices(djrcv_dev);
- if (retval < 0) {
- hid_err(hdev, "%s: logi_dj_recv_query_paired_devices error:%d\n",
- __func__, retval);
- /*
- * This can happen with a KVM, let the probe succeed,
- * logi_dj_recv_queue_unknown_work will retry later.
- */
- }
+ /* This too can fail with a KVM, ignore errors. */
+ logi_dj_recv_query_paired_devices(djrcv_dev);
}
return 0;
llopen_failed:
-switch_to_dj_mode_fail:
hid_hw_stop(hdev);
hid_hw_start_fail:
@@ -1882,18 +1986,12 @@ hid_hw_start_fail:
#ifdef CONFIG_PM
static int logi_dj_reset_resume(struct hid_device *hdev)
{
- int retval;
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
if (!djrcv_dev || djrcv_dev->hidpp != hdev)
return 0;
- retval = logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
- if (retval < 0) {
- hid_err(hdev, "%s: logi_dj_recv_switch_to_dj_mode returned error:%d\n",
- __func__, retval);
- }
-
+ logi_dj_recv_switch_to_dj_mode(djrcv_dev, 0);
return 0;
}
#endif
@@ -1987,6 +2085,14 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_2),
.driver_data = recvr_type_gaming_hidpp},
+ { /* Logitech lightspeed receiver (0xc547) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_3),
+ .driver_data = recvr_type_gaming_hidpp_ls_1_3},
+ { /* Logitech lightspeed receiver (0xc54d) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED_1_4),
+ .driver_data = recvr_type_gaming_hidpp_ls_1_3},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index aaef405a717e..d5011a5d0890 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -75,6 +75,7 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(27)
#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(28)
#define HIDPP_QUIRK_WIRELESS_STATUS BIT(29)
+#define HIDPP_QUIRK_RESET_HI_RES_SCROLL BIT(30)
/* These are just aliases for now */
#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
@@ -193,6 +194,7 @@ struct hidpp_device {
void *private_data;
struct work_struct work;
+ struct work_struct reset_hi_res_work;
struct kfifo delayed_work_fifo;
struct input_dev *delayed_input;
@@ -350,10 +352,15 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
do {
ret = __do_hidpp_send_message_sync(hidpp, message, response);
- if (ret != HIDPP20_ERROR_BUSY)
+ if (response->report_id == REPORT_ID_HIDPP_SHORT &&
+ ret != HIDPP_ERROR_BUSY)
+ break;
+ if ((response->report_id == REPORT_ID_HIDPP_LONG ||
+ response->report_id == REPORT_ID_HIDPP_VERY_LONG) &&
+ ret != HIDPP20_ERROR_BUSY)
break;
- dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
+ dbg_hid("%s:got busy hidpp error %02X, retrying\n", __func__, ret);
} while (--max_retries);
mutex_unlock(&hidpp->send_mutex);
@@ -969,7 +976,8 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
}
/* the device might not be connected */
- if (ret == HIDPP_ERROR_RESOURCE_ERROR)
+ if (ret == HIDPP_ERROR_RESOURCE_ERROR ||
+ ret == HIDPP_ERROR_UNKNOWN_DEVICE)
return -EIO;
if (ret > 0) {
@@ -3836,6 +3844,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
struct hidpp_report *answer = hidpp->send_receive_buf;
struct hidpp_report *report = (struct hidpp_report *)data;
int ret;
+ int last_online;
/*
* If the mutex is locked then we have a pending answer from a
@@ -3877,6 +3886,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
"See: https://gitlab.freedesktop.org/jwrdegoede/logitech-27mhz-keyboard-encryption-setup/\n");
}
+ last_online = hidpp->battery.online;
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) {
ret = hidpp20_battery_event_1000(hidpp, data, size);
if (ret != 0)
@@ -3901,6 +3911,11 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
return ret;
}
+ if (hidpp->quirks & HIDPP_QUIRK_RESET_HI_RES_SCROLL) {
+ if (last_online == 0 && hidpp->battery.online == 1)
+ schedule_work(&hidpp->reset_hi_res_work);
+ }
+
if (hidpp->quirks & HIDPP_QUIRK_HIDPP_WHEELS) {
ret = hidpp10_wheel_raw_event(hidpp, data, size);
if (ret != 0)
@@ -4274,6 +4289,13 @@ static void hidpp_connect_event(struct work_struct *work)
hidpp->delayed_input = input;
}
+static void hidpp_reset_hi_res_handler(struct work_struct *work)
+{
+ struct hidpp_device *hidpp = container_of(work, struct hidpp_device, reset_hi_res_work);
+
+ hi_res_scroll_enable(hidpp);
+}
+
static DEVICE_ATTR(builtin_power_supply, 0000, NULL, NULL);
static struct attribute *sysfs_attrs[] = {
@@ -4404,6 +4426,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
}
INIT_WORK(&hidpp->work, hidpp_connect_event);
+ INIT_WORK(&hidpp->reset_hi_res_work, hidpp_reset_hi_res_handler);
mutex_init(&hidpp->send_mutex);
init_waitqueue_head(&hidpp->wait);
@@ -4499,6 +4522,7 @@ static void hidpp_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
cancel_work_sync(&hidpp->work);
+ cancel_work_sync(&hidpp->reset_hi_res_work);
mutex_destroy(&hidpp->send_mutex);
}
@@ -4546,6 +4570,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
+ { /* Logitech G502 Lightspeed Wireless Gaming Mouse */
+ LDJ_DEVICE(0x407f),
+ .driver_data = HIDPP_QUIRK_RESET_HI_RES_SCROLL },
{ LDJ_DEVICE(HID_ANY_ID) },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 22c6314a8843..179dc316b4b5 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -49,6 +49,8 @@ MODULE_LICENSE("GPL");
#include "hid-ids.h"
+#include "hid-haptic.h"
+
/* quirks to control the device */
#define MT_QUIRK_NOT_SEEN_MEANS_UP BIT(0)
#define MT_QUIRK_SLOT_IS_CONTACTID BIT(1)
@@ -92,9 +94,8 @@ enum report_mode {
TOUCHPAD_REPORT_ALL = TOUCHPAD_REPORT_BUTTONS | TOUCHPAD_REPORT_CONTACTS,
};
-#define MT_IO_FLAGS_RUNNING 0
-#define MT_IO_FLAGS_ACTIVE_SLOTS 1
-#define MT_IO_FLAGS_PENDING_SLOTS 2
+#define MT_IO_SLOTS_MASK GENMASK(7, 0) /* reserve first 8 bits for slot tracking */
+#define MT_IO_FLAGS_RUNNING 32
static const bool mtrue = true; /* default for true */
static const bool mfalse; /* default for false */
@@ -168,11 +169,17 @@ struct mt_report_data {
struct mt_device {
struct mt_class mtclass; /* our mt device class */
struct timer_list release_timer; /* to release sticky fingers */
+ struct hid_haptic_device *haptic; /* haptic related configuration */
struct hid_device *hdev; /* hid_device we're attached to */
- unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_*) */
+ unsigned long mt_io_flags; /* mt flags (MT_IO_FLAGS_RUNNING)
+ * first 8 bits are reserved for keeping the slot
+ * states, this is fine because we only support up
+ * to 250 slots (MT_MAX_MAXCONTACT)
+ */
__u8 inputmode_value; /* InputMode HID feature value */
__u8 maxcontacts;
bool is_buttonpad; /* is this device a button pad? */
+ bool is_haptic_touchpad; /* is this device a haptic touchpad? */
bool serial_maybe; /* need to check for serial protocol */
struct list_head applications;
@@ -533,6 +540,8 @@ static void mt_feature_mapping(struct hid_device *hdev,
mt_get_feature(hdev, field->report);
break;
}
+
+ hid_haptic_feature_mapping(hdev, td->haptic, field, usage);
}
static void set_abs(struct input_dev *input, unsigned int code,
@@ -888,6 +897,9 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
case HID_DG_TIPPRESSURE:
set_abs(hi->input, ABS_MT_PRESSURE, field,
cls->sn_pressure);
+ td->is_haptic_touchpad =
+ hid_haptic_check_pressure_unit(td->haptic,
+ hi, field);
MT_STORE_FIELD(p);
return 1;
case HID_DG_SCANTIME:
@@ -977,6 +989,7 @@ static void mt_release_pending_palms(struct mt_device *td,
for_each_set_bit(slotnum, app->pending_palm_slots, td->maxcontacts) {
clear_bit(slotnum, app->pending_palm_slots);
+ clear_bit(slotnum, &td->mt_io_flags);
input_mt_slot(input, slotnum);
input_mt_report_slot_inactive(input);
@@ -1008,12 +1021,8 @@ static void mt_sync_frame(struct mt_device *td, struct mt_application *app,
app->num_received = 0;
app->left_button_state = 0;
-
- if (test_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags))
- set_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
- else
- clear_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags);
- clear_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
+ if (td->is_haptic_touchpad)
+ hid_haptic_pressure_reset(td->haptic);
}
static int mt_compute_timestamp(struct mt_application *app, __s32 value)
@@ -1165,6 +1174,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
minor = minor >> 1;
}
+ if (td->is_haptic_touchpad)
+ hid_haptic_pressure_increase(td->haptic, *slot->p);
+
x = hdev->quirks & HID_QUIRK_X_INVERT ?
input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->x :
*slot->x;
@@ -1188,7 +1200,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major);
input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor);
- set_bit(MT_IO_FLAGS_ACTIVE_SLOTS, &td->mt_io_flags);
+ set_bit(slotnum, &td->mt_io_flags);
+ } else {
+ clear_bit(slotnum, &td->mt_io_flags);
}
return 0;
@@ -1323,7 +1337,7 @@ static void mt_touch_report(struct hid_device *hid,
* defect.
*/
if (app->quirks & MT_QUIRK_STICKY_FINGERS) {
- if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
+ if (td->mt_io_flags & MT_IO_SLOTS_MASK)
mod_timer(&td->release_timer,
jiffies + msecs_to_jiffies(100));
else
@@ -1366,6 +1380,9 @@ static int mt_touch_input_configured(struct hid_device *hdev,
if (cls->is_indirect)
app->mt_flags |= INPUT_MT_POINTER;
+ if (td->is_haptic_touchpad)
+ app->mt_flags |= INPUT_MT_TOTAL_FORCE;
+
if (app->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
app->mt_flags |= INPUT_MT_DROP_UNUSED;
@@ -1401,6 +1418,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_application *application;
struct mt_report_data *rdata;
+ int ret;
rdata = mt_find_report_data(td, field->report);
if (!rdata) {
@@ -1463,6 +1481,11 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
if (field->physical == HID_DG_STYLUS)
hi->application = HID_DG_STYLUS;
+ ret = hid_haptic_input_mapping(hdev, td->haptic, hi, field, usage, bit,
+ max);
+ if (ret != 0)
+ return ret;
+
/* let hid-core decide for the others */
return 0;
}
@@ -1685,6 +1708,14 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
struct hid_report *report;
int ret;
+ if (td->is_haptic_touchpad && (td->mtclass.name == MT_CLS_WIN_8 ||
+ td->mtclass.name == MT_CLS_WIN_8_FORCE_MULTI_INPUT)) {
+ if (hid_haptic_input_configured(hdev, td->haptic, hi) == 0)
+ td->is_haptic_touchpad = false;
+ } else {
+ td->is_haptic_touchpad = false;
+ }
+
list_for_each_entry(report, &hi->reports, hidinput_list) {
rdata = mt_find_report_data(td, report);
if (!rdata) {
@@ -1711,6 +1742,7 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
case HID_CP_CONSUMER_CONTROL:
case HID_GD_WIRELESS_RADIO_CTLS:
case HID_GD_SYSTEM_MULTIAXIS:
+ case HID_DG_PEN:
/* already handled by hid core */
break;
case HID_DG_TOUCHSCREEN:
@@ -1782,6 +1814,7 @@ static void mt_release_contacts(struct hid_device *hid)
for (i = 0; i < mt->num_slots; i++) {
input_mt_slot(input_dev, i);
input_mt_report_slot_inactive(input_dev);
+ clear_bit(i, &td->mt_io_flags);
}
input_mt_sync_frame(input_dev);
input_sync(input_dev);
@@ -1804,7 +1837,7 @@ static void mt_expired_timeout(struct timer_list *t)
*/
if (test_and_set_bit_lock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags))
return;
- if (test_bit(MT_IO_FLAGS_PENDING_SLOTS, &td->mt_io_flags))
+ if (td->mt_io_flags & MT_IO_SLOTS_MASK)
mt_release_contacts(hdev);
clear_bit_unlock(MT_IO_FLAGS_RUNNING, &td->mt_io_flags);
}
@@ -1827,6 +1860,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
return -ENOMEM;
}
+ td->haptic = devm_kzalloc(&hdev->dev, sizeof(*(td->haptic)), GFP_KERNEL);
+ if (!td->haptic)
+ return -ENOMEM;
+
+ td->haptic->hdev = hdev;
td->hdev = hdev;
td->mtclass = *mtclass;
td->inputmode_value = MT_INPUTMODE_TOUCHSCREEN;
@@ -1895,6 +1933,17 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
mt_set_modes(hdev, HID_LATENCY_NORMAL, TOUCHPAD_REPORT_ALL);
+ if (td->is_haptic_touchpad) {
+ if (hid_haptic_init(hdev, &td->haptic)) {
+ dev_warn(&hdev->dev, "Cannot allocate haptic for %s\n",
+ hdev->name);
+ td->is_haptic_touchpad = false;
+ devm_kfree(&hdev->dev, td->haptic);
+ }
+ } else {
+ devm_kfree(&hdev->dev, td->haptic);
+ }
+
return 0;
}
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index fb4985988615..7ac9217d9096 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -819,7 +819,7 @@ static void joycon_wait_for_input_report(struct joycon_ctlr *ctlr)
#define JC_INPUT_REPORT_MAX_DELTA 17
#define JC_SUBCMD_TX_OFFSET_MS 4
#define JC_SUBCMD_VALID_DELTA_REQ 3
-#define JC_SUBCMD_RATE_MAX_ATTEMPTS 500
+#define JC_SUBCMD_RATE_MAX_ATTEMPTS 25
#define JC_SUBCMD_RATE_LIMITER_USB_MS 20
#define JC_SUBCMD_RATE_LIMITER_BT_MS 60
#define JC_SUBCMD_RATE_LIMITER_MS(ctlr) ((ctlr)->hdev->bus == BUS_USB ? JC_SUBCMD_RATE_LIMITER_USB_MS : JC_SUBCMD_RATE_LIMITER_BT_MS)
@@ -1455,10 +1455,10 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
ctlr->imu_avg_delta_ms;
ctlr->imu_timestamp_us += 1000 * ctlr->imu_avg_delta_ms;
if (dropped_pkts > JC_IMU_DROPPED_PKT_WARNING) {
- hid_warn(ctlr->hdev,
+ hid_warn_ratelimited(ctlr->hdev,
"compensating for %u dropped IMU reports\n",
dropped_pkts);
- hid_warn(ctlr->hdev,
+ hid_warn_ratelimited(ctlr->hdev,
"delta=%u avg_delta=%u\n",
delta, ctlr->imu_avg_delta_ms);
}
@@ -2420,7 +2420,7 @@ static int joycon_read_info(struct joycon_ctlr *ctlr)
struct joycon_input_report *report;
req.subcmd_id = JC_SUBCMD_REQ_DEV_INFO;
- ret = joycon_send_subcmd(ctlr, &req, 0, HZ);
+ ret = joycon_send_subcmd(ctlr, &req, 0, 2 * HZ);
if (ret) {
hid_err(ctlr->hdev, "Failed to get joycon info; ret=%d\n", ret);
return ret;
@@ -2648,7 +2648,8 @@ static int nintendo_hid_probe(struct hid_device *hdev,
init_waitqueue_head(&ctlr->wait);
spin_lock_init(&ctlr->lock);
ctlr->rumble_queue = alloc_workqueue("hid-nintendo-rumble_wq",
- WQ_FREEZABLE | WQ_MEM_RECLAIM, 0);
+ WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+ 0);
if (!ctlr->rumble_queue) {
ret = -ENOMEM;
goto err;
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 0f76e241e0af..a7f10c45f62b 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -142,13 +142,13 @@ static void ntrig_report_version(struct hid_device *hdev)
int ret;
char buf[20];
struct usb_device *usb_dev = hid_to_usb_dev(hdev);
- unsigned char *data = kmalloc(8, GFP_KERNEL);
+ unsigned char *data __free(kfree) = kmalloc(8, GFP_KERNEL);
if (!hid_is_usb(hdev))
return;
if (!data)
- goto err_free;
+ return;
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
USB_REQ_CLEAR_FEATURE,
@@ -163,9 +163,6 @@ static void ntrig_report_version(struct hid_device *hdev)
hid_info(hdev, "Firmware version: %s (%02x%02x %02x%02x)\n",
buf, data[2], data[3], data[4], data[5]);
}
-
-err_free:
- kfree(data);
}
static ssize_t show_phys_width(struct device *dev,
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
index 1468fb11e39d..128aa6abd10b 100644
--- a/drivers/hid/hid-playstation.c
+++ b/drivers/hid/hid-playstation.c
@@ -5,7 +5,9 @@
* Copyright (c) 2020-2022 Sony Interactive Entertainment
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/crc32.h>
#include <linux/device.h>
#include <linux/hid.h>
@@ -36,19 +38,19 @@ enum PS_TYPE {
struct ps_device {
struct list_head list;
struct hid_device *hdev;
- spinlock_t lock;
+ spinlock_t lock; /* Sync between event handler and workqueue */
- uint32_t player_id;
+ u32 player_id;
struct power_supply_desc battery_desc;
struct power_supply *battery;
- uint8_t battery_capacity;
+ u8 battery_capacity;
int battery_status;
const char *input_dev_name; /* Name of primary input device. */
- uint8_t mac_address[6]; /* Note: stored in little endian order. */
- uint32_t hw_version;
- uint32_t fw_version;
+ u8 mac_address[6]; /* Note: stored in little endian order. */
+ u32 hw_version;
+ u32 fw_version;
int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size);
void (*remove)(struct ps_device *dev);
@@ -110,41 +112,62 @@ struct ps_led_info {
#define DS_BUTTONS2_TOUCHPAD BIT(1)
#define DS_BUTTONS2_MIC_MUTE BIT(2)
-/* Status field of DualSense input report. */
-#define DS_STATUS_BATTERY_CAPACITY GENMASK(3, 0)
-#define DS_STATUS_CHARGING GENMASK(7, 4)
-#define DS_STATUS_CHARGING_SHIFT 4
+/* Status fields of DualSense input report. */
+#define DS_STATUS0_BATTERY_CAPACITY GENMASK(3, 0)
+#define DS_STATUS0_CHARGING GENMASK(7, 4)
+#define DS_STATUS1_HP_DETECT BIT(0)
+#define DS_STATUS1_MIC_DETECT BIT(1)
+#define DS_STATUS1_JACK_DETECT (DS_STATUS1_HP_DETECT | DS_STATUS1_MIC_DETECT)
+#define DS_STATUS1_MIC_MUTE BIT(2)
/* Feature version from DualSense Firmware Info report. */
-#define DS_FEATURE_VERSION(major, minor) ((major & 0xff) << 8 | (minor & 0xff))
-
+#define DS_FEATURE_VERSION_MINOR GENMASK(7, 0)
+#define DS_FEATURE_VERSION_MAJOR GENMASK(15, 8)
+#define DS_FEATURE_VERSION(major, minor) (FIELD_PREP(DS_FEATURE_VERSION_MAJOR, major) | \
+ FIELD_PREP(DS_FEATURE_VERSION_MINOR, minor))
/*
* Status of a DualSense touch point contact.
* Contact IDs, with highest bit set are 'inactive'
* and any associated data is then invalid.
*/
-#define DS_TOUCH_POINT_INACTIVE BIT(7)
+#define DS_TOUCH_POINT_INACTIVE BIT(7)
+#define DS_TOUCH_POINT_X_LO GENMASK(7, 0)
+#define DS_TOUCH_POINT_X_HI GENMASK(11, 8)
+#define DS_TOUCH_POINT_X(hi, lo) (FIELD_PREP(DS_TOUCH_POINT_X_HI, hi) | \
+ FIELD_PREP(DS_TOUCH_POINT_X_LO, lo))
+#define DS_TOUCH_POINT_Y_LO GENMASK(3, 0)
+#define DS_TOUCH_POINT_Y_HI GENMASK(11, 4)
+#define DS_TOUCH_POINT_Y(hi, lo) (FIELD_PREP(DS_TOUCH_POINT_Y_HI, hi) | \
+ FIELD_PREP(DS_TOUCH_POINT_Y_LO, lo))
/* Magic value required in tag field of Bluetooth output report. */
-#define DS_OUTPUT_TAG 0x10
+#define DS_OUTPUT_TAG 0x10
+#define DS_OUTPUT_SEQ_TAG GENMASK(3, 0)
+#define DS_OUTPUT_SEQ_NO GENMASK(7, 4)
/* Flags for DualSense output report. */
-#define DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION BIT(0)
-#define DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT BIT(1)
-#define DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE BIT(0)
-#define DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE BIT(1)
-#define DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE BIT(2)
-#define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3)
-#define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4)
-#define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1)
-#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2)
-#define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4)
-#define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1)
+#define DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION BIT(0)
+#define DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT BIT(1)
+#define DS_OUTPUT_VALID_FLAG0_SPEAKER_VOLUME_ENABLE BIT(5)
+#define DS_OUTPUT_VALID_FLAG0_MIC_VOLUME_ENABLE BIT(6)
+#define DS_OUTPUT_VALID_FLAG0_AUDIO_CONTROL_ENABLE BIT(7)
+#define DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE BIT(0)
+#define DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE BIT(2)
+#define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3)
+#define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4)
+#define DS_OUTPUT_VALID_FLAG1_AUDIO_CONTROL2_ENABLE BIT(7)
+#define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1)
+#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2)
+#define DS_OUTPUT_AUDIO_FLAGS_OUTPUT_PATH_SEL GENMASK(5, 4)
+#define DS_OUTPUT_AUDIO_FLAGS2_SP_PREAMP_GAIN GENMASK(2, 0)
+#define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4)
+#define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1)
/* DualSense hardware limits */
#define DS_ACC_RES_PER_G 8192
-#define DS_ACC_RANGE (4*DS_ACC_RES_PER_G)
+#define DS_ACC_RANGE (4 * DS_ACC_RES_PER_G)
#define DS_GYRO_RES_PER_DEG_S 1024
-#define DS_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S)
+#define DS_GYRO_RANGE (2048 * DS_GYRO_RES_PER_DEG_S)
#define DS_TOUCHPAD_WIDTH 1920
#define DS_TOUCHPAD_HEIGHT 1080
@@ -153,9 +176,10 @@ struct dualsense {
struct input_dev *gamepad;
struct input_dev *sensors;
struct input_dev *touchpad;
+ struct input_dev *jack;
/* Update version is used as a feature/capability version. */
- uint16_t update_version;
+ u16 update_version;
/* Calibration data for accelerometer and gyroscope. */
struct ps_calibration_data accel_calib_data[3];
@@ -163,21 +187,26 @@ struct dualsense {
/* Timestamp for sensor data */
bool sensor_timestamp_initialized;
- uint32_t prev_sensor_timestamp;
- uint32_t sensor_timestamp_us;
+ u32 prev_sensor_timestamp;
+ u32 sensor_timestamp_us;
/* Compatible rumble state */
bool use_vibration_v2;
bool update_rumble;
- uint8_t motor_left;
- uint8_t motor_right;
+ u8 motor_left;
+ u8 motor_right;
/* RGB lightbar */
struct led_classdev_mc lightbar;
bool update_lightbar;
- uint8_t lightbar_red;
- uint8_t lightbar_green;
- uint8_t lightbar_blue;
+ u8 lightbar_red;
+ u8 lightbar_green;
+ u8 lightbar_blue;
+
+ /* Audio Jack plugged state */
+ u8 plugged_state;
+ u8 prev_plugged_state;
+ bool prev_plugged_state_valid;
/* Microphone */
bool update_mic_mute;
@@ -186,90 +215,94 @@ struct dualsense {
/* Player leds */
bool update_player_leds;
- uint8_t player_leds_state;
+ u8 player_leds_state;
struct led_classdev player_leds[5];
struct work_struct output_worker;
bool output_worker_initialized;
void *output_report_dmabuf;
- uint8_t output_seq; /* Sequence number for output report. */
+ u8 output_seq; /* Sequence number for output report. */
};
struct dualsense_touch_point {
- uint8_t contact;
- uint8_t x_lo;
- uint8_t x_hi:4, y_lo:4;
- uint8_t y_hi;
+ u8 contact;
+ u8 x_lo;
+ u8 x_hi:4, y_lo:4;
+ u8 y_hi;
} __packed;
static_assert(sizeof(struct dualsense_touch_point) == 4);
/* Main DualSense input report excluding any BT/USB specific headers. */
struct dualsense_input_report {
- uint8_t x, y;
- uint8_t rx, ry;
- uint8_t z, rz;
- uint8_t seq_number;
- uint8_t buttons[4];
- uint8_t reserved[4];
+ u8 x, y;
+ u8 rx, ry;
+ u8 z, rz;
+ u8 seq_number;
+ u8 buttons[4];
+ u8 reserved[4];
/* Motion sensors */
__le16 gyro[3]; /* x, y, z */
__le16 accel[3]; /* x, y, z */
__le32 sensor_timestamp;
- uint8_t reserved2;
+ u8 reserved2;
/* Touchpad */
struct dualsense_touch_point points[2];
- uint8_t reserved3[12];
- uint8_t status;
- uint8_t reserved4[10];
+ u8 reserved3[12];
+ u8 status[3];
+ u8 reserved4[8];
} __packed;
/* Common input report size shared equals the size of the USB report minus 1 byte for ReportID. */
static_assert(sizeof(struct dualsense_input_report) == DS_INPUT_REPORT_USB_SIZE - 1);
/* Common data between DualSense BT/USB main output report. */
struct dualsense_output_report_common {
- uint8_t valid_flag0;
- uint8_t valid_flag1;
+ u8 valid_flag0;
+ u8 valid_flag1;
/* For DualShock 4 compatibility mode. */
- uint8_t motor_right;
- uint8_t motor_left;
+ u8 motor_right;
+ u8 motor_left;
/* Audio controls */
- uint8_t reserved[4];
- uint8_t mute_button_led;
+ u8 headphone_volume; /* 0x0 - 0x7f */
+ u8 speaker_volume; /* 0x0 - 0xff */
+ u8 mic_volume; /* 0x0 - 0x40 */
+ u8 audio_control;
+ u8 mute_button_led;
- uint8_t power_save_control;
- uint8_t reserved2[28];
+ u8 power_save_control;
+ u8 reserved2[27];
+ u8 audio_control2;
/* LEDs and lightbar */
- uint8_t valid_flag2;
- uint8_t reserved3[2];
- uint8_t lightbar_setup;
- uint8_t led_brightness;
- uint8_t player_leds;
- uint8_t lightbar_red;
- uint8_t lightbar_green;
- uint8_t lightbar_blue;
+ u8 valid_flag2;
+ u8 reserved3[2];
+ u8 lightbar_setup;
+ u8 led_brightness;
+ u8 player_leds;
+ u8 lightbar_red;
+ u8 lightbar_green;
+ u8 lightbar_blue;
} __packed;
static_assert(sizeof(struct dualsense_output_report_common) == 47);
struct dualsense_output_report_bt {
- uint8_t report_id; /* 0x31 */
- uint8_t seq_tag;
- uint8_t tag;
+ u8 report_id; /* 0x31 */
+ u8 seq_tag;
+ u8 tag;
struct dualsense_output_report_common common;
- uint8_t reserved[24];
+ u8 reserved[24];
__le32 crc32;
} __packed;
static_assert(sizeof(struct dualsense_output_report_bt) == DS_OUTPUT_REPORT_BT_SIZE);
struct dualsense_output_report_usb {
- uint8_t report_id; /* 0x02 */
+ u8 report_id; /* 0x02 */
struct dualsense_output_report_common common;
- uint8_t reserved[15];
+ u8 reserved[15];
} __packed;
static_assert(sizeof(struct dualsense_output_report_usb) == DS_OUTPUT_REPORT_USB_SIZE);
@@ -279,8 +312,8 @@ static_assert(sizeof(struct dualsense_output_report_usb) == DS_OUTPUT_REPORT_USB
* This structure hide the differences between the two to simplify sending output reports.
*/
struct dualsense_output_report {
- uint8_t *data; /* Start of data */
- uint8_t len; /* Size of output report */
+ u8 *data; /* Start of data */
+ u8 len; /* Size of output report */
/* Points to Bluetooth data payload in case for a Bluetooth report else NULL. */
struct dualsense_output_report_bt *bt;
@@ -315,7 +348,9 @@ struct dualsense_output_report {
* Contact IDs, with highest bit set are 'inactive'
* and any associated data is then invalid.
*/
-#define DS4_TOUCH_POINT_INACTIVE BIT(7)
+#define DS4_TOUCH_POINT_INACTIVE BIT(7)
+#define DS4_TOUCH_POINT_X(hi, lo) DS_TOUCH_POINT_X(hi, lo)
+#define DS4_TOUCH_POINT_Y(hi, lo) DS_TOUCH_POINT_Y(hi, lo)
/* Status field of DualShock4 input report. */
#define DS4_STATUS0_BATTERY_CAPACITY GENMASK(3, 0)
@@ -323,7 +358,7 @@ struct dualsense_output_report {
/* Battery status within batery_status field. */
#define DS4_BATTERY_STATUS_FULL 11
/* Status1 bit2 contains dongle connection state:
- * 0 = connectd
+ * 0 = connected
* 1 = disconnected
*/
#define DS4_STATUS1_DONGLE_STATE BIT(2)
@@ -349,9 +384,9 @@ struct dualsense_output_report {
/* DualShock4 hardware limits */
#define DS4_ACC_RES_PER_G 8192
-#define DS4_ACC_RANGE (4*DS_ACC_RES_PER_G)
+#define DS4_ACC_RANGE (4 * DS_ACC_RES_PER_G)
#define DS4_GYRO_RES_PER_DEG_S 1024
-#define DS4_GYRO_RANGE (2048*DS_GYRO_RES_PER_DEG_S)
+#define DS4_GYRO_RANGE (2048 * DS_GYRO_RES_PER_DEG_S)
#define DS4_LIGHTBAR_MAX_BLINK 255 /* 255 centiseconds */
#define DS4_TOUCHPAD_WIDTH 1920
#define DS4_TOUCHPAD_HEIGHT 942
@@ -380,26 +415,26 @@ struct dualshock4 {
/* Timestamp for sensor data */
bool sensor_timestamp_initialized;
- uint32_t prev_sensor_timestamp;
- uint32_t sensor_timestamp_us;
+ u32 prev_sensor_timestamp;
+ u32 sensor_timestamp_us;
/* Bluetooth poll interval */
bool update_bt_poll_interval;
- uint8_t bt_poll_interval;
+ u8 bt_poll_interval;
bool update_rumble;
- uint8_t motor_left;
- uint8_t motor_right;
+ u8 motor_left;
+ u8 motor_right;
/* Lightbar leds */
bool update_lightbar;
bool update_lightbar_blink;
bool lightbar_enabled; /* For use by global LED control. */
- uint8_t lightbar_red;
- uint8_t lightbar_green;
- uint8_t lightbar_blue;
- uint8_t lightbar_blink_on; /* In increments of 10ms. */
- uint8_t lightbar_blink_off; /* In increments of 10ms. */
+ u8 lightbar_red;
+ u8 lightbar_green;
+ u8 lightbar_blue;
+ u8 lightbar_blink_on; /* In increments of 10ms. */
+ u8 lightbar_blink_off; /* In increments of 10ms. */
struct led_classdev lightbar_leds[4];
struct work_struct output_worker;
@@ -408,88 +443,88 @@ struct dualshock4 {
};
struct dualshock4_touch_point {
- uint8_t contact;
- uint8_t x_lo;
- uint8_t x_hi:4, y_lo:4;
- uint8_t y_hi;
+ u8 contact;
+ u8 x_lo;
+ u8 x_hi:4, y_lo:4;
+ u8 y_hi;
} __packed;
static_assert(sizeof(struct dualshock4_touch_point) == 4);
struct dualshock4_touch_report {
- uint8_t timestamp;
+ u8 timestamp;
struct dualshock4_touch_point points[2];
} __packed;
static_assert(sizeof(struct dualshock4_touch_report) == 9);
/* Main DualShock4 input report excluding any BT/USB specific headers. */
struct dualshock4_input_report_common {
- uint8_t x, y;
- uint8_t rx, ry;
- uint8_t buttons[3];
- uint8_t z, rz;
+ u8 x, y;
+ u8 rx, ry;
+ u8 buttons[3];
+ u8 z, rz;
/* Motion sensors */
__le16 sensor_timestamp;
- uint8_t sensor_temperature;
+ u8 sensor_temperature;
__le16 gyro[3]; /* x, y, z */
__le16 accel[3]; /* x, y, z */
- uint8_t reserved2[5];
+ u8 reserved2[5];
- uint8_t status[2];
- uint8_t reserved3;
+ u8 status[2];
+ u8 reserved3;
} __packed;
static_assert(sizeof(struct dualshock4_input_report_common) == 32);
struct dualshock4_input_report_usb {
- uint8_t report_id; /* 0x01 */
+ u8 report_id; /* 0x01 */
struct dualshock4_input_report_common common;
- uint8_t num_touch_reports;
+ u8 num_touch_reports;
struct dualshock4_touch_report touch_reports[3];
- uint8_t reserved[3];
+ u8 reserved[3];
} __packed;
static_assert(sizeof(struct dualshock4_input_report_usb) == DS4_INPUT_REPORT_USB_SIZE);
struct dualshock4_input_report_bt {
- uint8_t report_id; /* 0x11 */
- uint8_t reserved[2];
+ u8 report_id; /* 0x11 */
+ u8 reserved[2];
struct dualshock4_input_report_common common;
- uint8_t num_touch_reports;
+ u8 num_touch_reports;
struct dualshock4_touch_report touch_reports[4]; /* BT has 4 compared to 3 for USB */
- uint8_t reserved2[2];
+ u8 reserved2[2];
__le32 crc32;
} __packed;
static_assert(sizeof(struct dualshock4_input_report_bt) == DS4_INPUT_REPORT_BT_SIZE);
/* Common data between Bluetooth and USB DualShock4 output reports. */
struct dualshock4_output_report_common {
- uint8_t valid_flag0;
- uint8_t valid_flag1;
+ u8 valid_flag0;
+ u8 valid_flag1;
- uint8_t reserved;
+ u8 reserved;
- uint8_t motor_right;
- uint8_t motor_left;
+ u8 motor_right;
+ u8 motor_left;
- uint8_t lightbar_red;
- uint8_t lightbar_green;
- uint8_t lightbar_blue;
- uint8_t lightbar_blink_on;
- uint8_t lightbar_blink_off;
+ u8 lightbar_red;
+ u8 lightbar_green;
+ u8 lightbar_blue;
+ u8 lightbar_blink_on;
+ u8 lightbar_blink_off;
} __packed;
struct dualshock4_output_report_usb {
- uint8_t report_id; /* 0x5 */
+ u8 report_id; /* 0x5 */
struct dualshock4_output_report_common common;
- uint8_t reserved[21];
+ u8 reserved[21];
} __packed;
static_assert(sizeof(struct dualshock4_output_report_usb) == DS4_OUTPUT_REPORT_USB_SIZE);
struct dualshock4_output_report_bt {
- uint8_t report_id; /* 0x11 */
- uint8_t hw_control;
- uint8_t audio_control;
+ u8 report_id; /* 0x11 */
+ u8 hw_control;
+ u8 audio_control;
struct dualshock4_output_report_common common;
- uint8_t reserved[61];
+ u8 reserved[61];
__le32 crc32;
} __packed;
static_assert(sizeof(struct dualshock4_output_report_bt) == DS4_OUTPUT_REPORT_BT_SIZE);
@@ -500,8 +535,8 @@ static_assert(sizeof(struct dualshock4_output_report_bt) == DS4_OUTPUT_REPORT_BT
* This structure hide the differences between the two to simplify sending output reports.
*/
struct dualshock4_output_report {
- uint8_t *data; /* Start of data */
- uint8_t len; /* Size of output report */
+ u8 *data; /* Start of data */
+ u8 len; /* Size of output report */
/* Points to Bluetooth data payload in case for a Bluetooth report else NULL. */
struct dualshock4_output_report_bt *bt;
@@ -540,7 +575,7 @@ static const struct {int x; int y; } ps_gamepad_hat_mapping[] = {
static int dualshock4_get_calibration_data(struct dualshock4 *ds4);
static inline void dualsense_schedule_work(struct dualsense *ds);
static inline void dualshock4_schedule_work(struct dualshock4 *ds4);
-static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue);
+static void dualsense_set_lightbar(struct dualsense *ds, u8 red, u8 green, u8 blue);
static void dualshock4_set_default_lightbar_colors(struct dualshock4 *ds4);
/*
@@ -552,26 +587,25 @@ static int ps_devices_list_add(struct ps_device *dev)
{
struct ps_device *entry;
- mutex_lock(&ps_devices_lock);
+ guard(mutex)(&ps_devices_lock);
+
list_for_each_entry(entry, &ps_devices_list, list) {
if (!memcmp(entry->mac_address, dev->mac_address, sizeof(dev->mac_address))) {
hid_err(dev->hdev, "Duplicate device found for MAC address %pMR.\n",
- dev->mac_address);
- mutex_unlock(&ps_devices_lock);
+ dev->mac_address);
return -EEXIST;
}
}
list_add_tail(&dev->list, &ps_devices_list);
- mutex_unlock(&ps_devices_lock);
return 0;
}
static int ps_devices_list_remove(struct ps_device *dev)
{
- mutex_lock(&ps_devices_lock);
+ guard(mutex)(&ps_devices_lock);
+
list_del(&dev->list);
- mutex_unlock(&ps_devices_lock);
return 0;
}
@@ -593,7 +627,8 @@ static void ps_device_release_player_id(struct ps_device *dev)
dev->player_id = U32_MAX;
}
-static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev, const char *name_suffix)
+static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev,
+ const char *name_suffix)
{
struct input_dev *input_dev;
@@ -608,8 +643,8 @@ static struct input_dev *ps_allocate_input_dev(struct hid_device *hdev, const ch
input_dev->uniq = hdev->uniq;
if (name_suffix) {
- input_dev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name,
- name_suffix);
+ input_dev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s",
+ hdev->name, name_suffix);
if (!input_dev->name)
return ERR_PTR(-ENOMEM);
} else {
@@ -629,19 +664,18 @@ static enum power_supply_property ps_power_supply_props[] = {
};
static int ps_battery_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+ enum power_supply_property psp,
+ union power_supply_propval *val)
{
struct ps_device *dev = power_supply_get_drvdata(psy);
- uint8_t battery_capacity;
+ u8 battery_capacity;
int battery_status;
- unsigned long flags;
int ret = 0;
- spin_lock_irqsave(&dev->lock, flags);
- battery_capacity = dev->battery_capacity;
- battery_status = dev->battery_status;
- spin_unlock_irqrestore(&dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &dev->lock) {
+ battery_capacity = dev->battery_capacity;
+ battery_status = dev->battery_status;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -675,7 +709,7 @@ static int ps_device_register_battery(struct ps_device *dev)
dev->battery_desc.num_properties = ARRAY_SIZE(ps_power_supply_props);
dev->battery_desc.get_property = ps_battery_get_property;
dev->battery_desc.name = devm_kasprintf(&dev->hdev->dev, GFP_KERNEL,
- "ps-controller-battery-%pMR", dev->mac_address);
+ "ps-controller-battery-%pMR", dev->mac_address);
if (!dev->battery_desc.name)
return -ENOMEM;
@@ -697,9 +731,9 @@ static int ps_device_register_battery(struct ps_device *dev)
}
/* Compute crc32 of HID data and compare against expected CRC. */
-static bool ps_check_crc32(uint8_t seed, uint8_t *data, size_t len, uint32_t report_crc)
+static bool ps_check_crc32(u8 seed, u8 *data, size_t len, u32 report_crc)
{
- uint32_t crc;
+ u32 crc;
crc = crc32_le(0xFFFFFFFF, &seed, 1);
crc = ~crc32_le(crc, data, len);
@@ -707,8 +741,9 @@ static bool ps_check_crc32(uint8_t seed, uint8_t *data, size_t len, uint32_t rep
return crc == report_crc;
}
-static struct input_dev *ps_gamepad_create(struct hid_device *hdev,
- int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
+static struct input_dev *
+ps_gamepad_create(struct hid_device *hdev,
+ int (*play_effect)(struct input_dev *, void *, struct ff_effect *))
{
struct input_dev *gamepad;
unsigned int i;
@@ -745,8 +780,8 @@ static struct input_dev *ps_gamepad_create(struct hid_device *hdev,
return gamepad;
}
-static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *buf, size_t size,
- bool check_crc)
+static int ps_get_report(struct hid_device *hdev, u8 report_id, u8 *buf,
+ size_t size, bool check_crc)
{
int ret;
@@ -769,8 +804,8 @@ static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *bu
if (hdev->bus == BUS_BLUETOOTH && check_crc) {
/* Last 4 bytes contains crc32. */
- uint8_t crc_offset = size - 4;
- uint32_t report_crc = get_unaligned_le32(&buf[crc_offset]);
+ u8 crc_offset = size - 4;
+ u32 report_crc = get_unaligned_le32(&buf[crc_offset]);
if (!ps_check_crc32(PS_FEATURE_CRC32_SEED, buf, crc_offset, report_crc)) {
hid_err(hdev, "CRC check failed for reportID=%d\n", report_id);
@@ -782,17 +817,20 @@ static int ps_get_report(struct hid_device *hdev, uint8_t report_id, uint8_t *bu
}
static int ps_led_register(struct ps_device *ps_dev, struct led_classdev *led,
- const struct ps_led_info *led_info)
+ const struct ps_led_info *led_info)
{
int ret;
if (led_info->name) {
- led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL,
- "%s:%s:%s", ps_dev->input_dev_name, led_info->color, led_info->name);
+ led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL, "%s:%s:%s",
+ ps_dev->input_dev_name, led_info->color,
+ led_info->name);
} else {
- /* Backwards compatible mode for hid-sony, but not compliant with LED class spec. */
- led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL,
- "%s:%s", ps_dev->input_dev_name, led_info->color);
+ /* Backwards compatible mode for hid-sony, but not compliant
+ * with LED class spec.
+ */
+ led->name = devm_kasprintf(&ps_dev->hdev->dev, GFP_KERNEL, "%s:%s",
+ ps_dev->input_dev_name, led_info->color);
}
if (!led->name)
@@ -816,7 +854,7 @@ static int ps_led_register(struct ps_device *ps_dev, struct led_classdev *led,
/* Register a DualSense/DualShock4 RGB lightbar represented by a multicolor LED. */
static int ps_lightbar_register(struct ps_device *ps_dev, struct led_classdev_mc *lightbar_mc_dev,
- int (*brightness_set)(struct led_classdev *, enum led_brightness))
+ int (*brightness_set)(struct led_classdev *, enum led_brightness))
{
struct hid_device *hdev = ps_dev->hdev;
struct mc_subled *mc_led_info;
@@ -837,7 +875,7 @@ static int ps_lightbar_register(struct ps_device *ps_dev, struct led_classdev_mc
led_cdev = &lightbar_mc_dev->led_cdev;
led_cdev->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s:rgb:indicator",
- ps_dev->input_dev_name);
+ ps_dev->input_dev_name);
if (!led_cdev->name)
return -ENOMEM;
led_cdev->brightness = 255;
@@ -853,8 +891,8 @@ static int ps_lightbar_register(struct ps_device *ps_dev, struct led_classdev_mc
return 0;
}
-static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_range, int accel_res,
- int gyro_range, int gyro_res)
+static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_range,
+ int accel_res, int gyro_range, int gyro_res)
{
struct input_dev *sensors;
int ret;
@@ -890,8 +928,8 @@ static struct input_dev *ps_sensors_create(struct hid_device *hdev, int accel_ra
return sensors;
}
-static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width, int height,
- unsigned int num_contacts)
+static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width,
+ int height, unsigned int num_contacts)
{
struct input_dev *touchpad;
int ret;
@@ -918,9 +956,27 @@ static struct input_dev *ps_touchpad_create(struct hid_device *hdev, int width,
return touchpad;
}
+static struct input_dev *ps_headset_jack_create(struct hid_device *hdev)
+{
+ struct input_dev *jack;
+ int ret;
+
+ jack = ps_allocate_input_dev(hdev, "Headset Jack");
+ if (IS_ERR(jack))
+ return ERR_CAST(jack);
+
+ input_set_capability(jack, EV_SW, SW_HEADPHONE_INSERT);
+ input_set_capability(jack, EV_SW, SW_MICROPHONE_INSERT);
+
+ ret = input_register_device(jack);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return jack;
+}
+
static ssize_t firmware_version_show(struct device *dev,
- struct device_attribute
- *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
struct ps_device *ps_dev = hid_get_drvdata(hdev);
@@ -931,8 +987,7 @@ static ssize_t firmware_version_show(struct device *dev,
static DEVICE_ATTR_RO(firmware_version);
static ssize_t hardware_version_show(struct device *dev,
- struct device_attribute
- *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct hid_device *hdev = to_hid_device(dev);
struct ps_device *ps_dev = hid_get_drvdata(hdev);
@@ -963,14 +1018,14 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
int range_2g;
int ret = 0;
int i;
- uint8_t *buf;
+ u8 *buf;
buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_CALIBRATION, buf,
- DS_FEATURE_REPORT_CALIBRATION_SIZE, true);
+ DS_FEATURE_REPORT_CALIBRATION_SIZE, true);
if (ret) {
hid_err(ds->base.hdev, "Failed to retrieve DualSense calibration info: %d\n", ret);
goto err_free;
@@ -1001,19 +1056,19 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
speed_2x = (gyro_speed_plus + gyro_speed_minus);
ds->gyro_calib_data[0].abs_code = ABS_RX;
ds->gyro_calib_data[0].bias = 0;
- ds->gyro_calib_data[0].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[0].sens_numer = speed_2x * DS_GYRO_RES_PER_DEG_S;
ds->gyro_calib_data[0].sens_denom = abs(gyro_pitch_plus - gyro_pitch_bias) +
abs(gyro_pitch_minus - gyro_pitch_bias);
ds->gyro_calib_data[1].abs_code = ABS_RY;
ds->gyro_calib_data[1].bias = 0;
- ds->gyro_calib_data[1].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[1].sens_numer = speed_2x * DS_GYRO_RES_PER_DEG_S;
ds->gyro_calib_data[1].sens_denom = abs(gyro_yaw_plus - gyro_yaw_bias) +
abs(gyro_yaw_minus - gyro_yaw_bias);
ds->gyro_calib_data[2].abs_code = ABS_RZ;
ds->gyro_calib_data[2].bias = 0;
- ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
+ ds->gyro_calib_data[2].sens_numer = speed_2x * DS_GYRO_RES_PER_DEG_S;
ds->gyro_calib_data[2].sens_denom = abs(gyro_roll_plus - gyro_roll_bias) +
abs(gyro_roll_minus - gyro_roll_bias);
@@ -1024,8 +1079,9 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
*/
for (i = 0; i < ARRAY_SIZE(ds->gyro_calib_data); i++) {
if (ds->gyro_calib_data[i].sens_denom == 0) {
- hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
- ds->gyro_calib_data[i].abs_code);
+ hid_warn(hdev,
+ "Invalid gyro calibration data for axis (%d), disabling calibration.",
+ ds->gyro_calib_data[i].abs_code);
ds->gyro_calib_data[i].bias = 0;
ds->gyro_calib_data[i].sens_numer = DS_GYRO_RANGE;
ds->gyro_calib_data[i].sens_denom = S16_MAX;
@@ -1039,19 +1095,19 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
range_2g = acc_x_plus - acc_x_minus;
ds->accel_calib_data[0].abs_code = ABS_X;
ds->accel_calib_data[0].bias = acc_x_plus - range_2g / 2;
- ds->accel_calib_data[0].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[0].sens_numer = 2 * DS_ACC_RES_PER_G;
ds->accel_calib_data[0].sens_denom = range_2g;
range_2g = acc_y_plus - acc_y_minus;
ds->accel_calib_data[1].abs_code = ABS_Y;
ds->accel_calib_data[1].bias = acc_y_plus - range_2g / 2;
- ds->accel_calib_data[1].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[1].sens_numer = 2 * DS_ACC_RES_PER_G;
ds->accel_calib_data[1].sens_denom = range_2g;
range_2g = acc_z_plus - acc_z_minus;
ds->accel_calib_data[2].abs_code = ABS_Z;
ds->accel_calib_data[2].bias = acc_z_plus - range_2g / 2;
- ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G;
+ ds->accel_calib_data[2].sens_numer = 2 * DS_ACC_RES_PER_G;
ds->accel_calib_data[2].sens_denom = range_2g;
/*
@@ -1061,8 +1117,9 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
*/
for (i = 0; i < ARRAY_SIZE(ds->accel_calib_data); i++) {
if (ds->accel_calib_data[i].sens_denom == 0) {
- hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
- ds->accel_calib_data[i].abs_code);
+ hid_warn(hdev,
+ "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
+ ds->accel_calib_data[i].abs_code);
ds->accel_calib_data[i].bias = 0;
ds->accel_calib_data[i].sens_numer = DS_ACC_RANGE;
ds->accel_calib_data[i].sens_denom = S16_MAX;
@@ -1074,10 +1131,9 @@ err_free:
return ret;
}
-
static int dualsense_get_firmware_info(struct dualsense *ds)
{
- uint8_t *buf;
+ u8 *buf;
int ret;
buf = kzalloc(DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL);
@@ -1085,7 +1141,7 @@ static int dualsense_get_firmware_info(struct dualsense *ds)
return -ENOMEM;
ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_FIRMWARE_INFO, buf,
- DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, true);
+ DS_FEATURE_REPORT_FIRMWARE_INFO_SIZE, true);
if (ret) {
hid_err(ds->base.hdev, "Failed to retrieve DualSense firmware info: %d\n", ret);
goto err_free;
@@ -1110,7 +1166,7 @@ err_free:
static int dualsense_get_mac_address(struct dualsense *ds)
{
- uint8_t *buf;
+ u8 *buf;
int ret = 0;
buf = kzalloc(DS_FEATURE_REPORT_PAIRING_INFO_SIZE, GFP_KERNEL);
@@ -1118,7 +1174,7 @@ static int dualsense_get_mac_address(struct dualsense *ds)
return -ENOMEM;
ret = ps_get_report(ds->base.hdev, DS_FEATURE_REPORT_PAIRING_INFO, buf,
- DS_FEATURE_REPORT_PAIRING_INFO_SIZE, true);
+ DS_FEATURE_REPORT_PAIRING_INFO_SIZE, true);
if (ret) {
hid_err(ds->base.hdev, "Failed to retrieve DualSense pairing info: %d\n", ret);
goto err_free;
@@ -1132,11 +1188,11 @@ err_free:
}
static int dualsense_lightbar_set_brightness(struct led_classdev *cdev,
- enum led_brightness brightness)
+ enum led_brightness brightness)
{
struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
struct dualsense *ds = container_of(mc_cdev, struct dualsense, lightbar);
- uint8_t red, green, blue;
+ u8 red, green, blue;
led_mc_calc_color_components(mc_cdev, brightness);
red = mc_cdev->subled_info[0].brightness;
@@ -1159,27 +1215,25 @@ static int dualsense_player_led_set_brightness(struct led_classdev *led, enum le
{
struct hid_device *hdev = to_hid_device(led->dev->parent);
struct dualsense *ds = hid_get_drvdata(hdev);
- unsigned long flags;
unsigned int led_index;
- spin_lock_irqsave(&ds->base.lock, flags);
-
- led_index = led - ds->player_leds;
- if (value == LED_OFF)
- ds->player_leds_state &= ~BIT(led_index);
- else
- ds->player_leds_state |= BIT(led_index);
+ scoped_guard(spinlock_irqsave, &ds->base.lock) {
+ led_index = led - ds->player_leds;
+ if (value == LED_OFF)
+ ds->player_leds_state &= ~BIT(led_index);
+ else
+ ds->player_leds_state |= BIT(led_index);
- ds->update_player_leds = true;
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ ds->update_player_leds = true;
+ }
dualsense_schedule_work(ds);
return 0;
}
-static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_output_report *rp,
- void *buf)
+static void dualsense_init_output_report(struct dualsense *ds,
+ struct dualsense_output_report *rp, void *buf)
{
struct hid_device *hdev = ds->base.hdev;
@@ -1194,7 +1248,8 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_
* Highest 4-bit is a sequence number, which needs to be increased
* every report. Lowest 4-bit is tag and can be zero for now.
*/
- bt->seq_tag = (ds->output_seq << 4) | 0x0;
+ bt->seq_tag = FIELD_PREP(DS_OUTPUT_SEQ_NO, ds->output_seq) |
+ FIELD_PREP(DS_OUTPUT_SEQ_TAG, 0x0);
if (++ds->output_seq == 16)
ds->output_seq = 0;
@@ -1219,12 +1274,10 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_
static inline void dualsense_schedule_work(struct dualsense *ds)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ds->base.lock, flags);
- if (ds->output_worker_initialized)
- schedule_work(&ds->output_worker);
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ /* Using scoped_guard() instead of guard() to make sparse happy */
+ scoped_guard(spinlock_irqsave, &ds->base.lock)
+ if (ds->output_worker_initialized)
+ schedule_work(&ds->output_worker);
}
/*
@@ -1232,14 +1285,14 @@ static inline void dualsense_schedule_work(struct dualsense *ds)
* for Bluetooth reports.
*/
static void dualsense_send_output_report(struct dualsense *ds,
- struct dualsense_output_report *report)
+ struct dualsense_output_report *report)
{
struct hid_device *hdev = ds->base.hdev;
/* Bluetooth packets need to be signed with a CRC in the last 4 bytes. */
if (report->bt) {
- uint32_t crc;
- uint8_t seed = PS_OUTPUT_CRC32_SEED;
+ u32 crc;
+ u8 seed = PS_OUTPUT_CRC32_SEED;
crc = crc32_le(0xFFFFFFFF, &seed, 1);
crc = ~crc32_le(crc, report->data, report->len - 4);
@@ -1255,74 +1308,125 @@ static void dualsense_output_worker(struct work_struct *work)
struct dualsense *ds = container_of(work, struct dualsense, output_worker);
struct dualsense_output_report report;
struct dualsense_output_report_common *common;
- unsigned long flags;
dualsense_init_output_report(ds, &report, ds->output_report_dmabuf);
common = report.common;
- spin_lock_irqsave(&ds->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds->base.lock) {
+ if (ds->update_rumble) {
+ /* Select classic rumble style haptics and enable it. */
+ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT;
+ if (ds->use_vibration_v2)
+ common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2;
+ else
+ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION;
+ common->motor_left = ds->motor_left;
+ common->motor_right = ds->motor_right;
+ ds->update_rumble = false;
+ }
- if (ds->update_rumble) {
- /* Select classic rumble style haptics and enable it. */
- common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT;
- if (ds->use_vibration_v2)
- common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2;
- else
- common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION;
- common->motor_left = ds->motor_left;
- common->motor_right = ds->motor_right;
- ds->update_rumble = false;
- }
+ if (ds->update_lightbar) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE;
+ common->lightbar_red = ds->lightbar_red;
+ common->lightbar_green = ds->lightbar_green;
+ common->lightbar_blue = ds->lightbar_blue;
- if (ds->update_lightbar) {
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_LIGHTBAR_CONTROL_ENABLE;
- common->lightbar_red = ds->lightbar_red;
- common->lightbar_green = ds->lightbar_green;
- common->lightbar_blue = ds->lightbar_blue;
+ ds->update_lightbar = false;
+ }
- ds->update_lightbar = false;
- }
+ if (ds->update_player_leds) {
+ common->valid_flag1 |=
+ DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE;
+ common->player_leds = ds->player_leds_state;
- if (ds->update_player_leds) {
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE;
- common->player_leds = ds->player_leds_state;
+ ds->update_player_leds = false;
+ }
- ds->update_player_leds = false;
- }
+ if (ds->plugged_state != ds->prev_plugged_state) {
+ u8 val = ds->plugged_state & DS_STATUS1_HP_DETECT;
+
+ if (val != (ds->prev_plugged_state & DS_STATUS1_HP_DETECT)) {
+ common->valid_flag0 = DS_OUTPUT_VALID_FLAG0_AUDIO_CONTROL_ENABLE;
+ /*
+ * _--------> Output path setup in audio_flag0
+ * / _------> Headphone (HP) Left channel sink
+ * | / _----> Headphone (HP) Right channel sink
+ * | | / _--> Internal Speaker (SP) sink
+ * | | | /
+ * | | | | L/R - Left/Right channel source
+ * 0 L-R X X - Unrouted (muted) channel source
+ * 1 L-L X
+ * 2 L-L R
+ * 3 X-X R
+ */
+ if (val) {
+ /* Mute SP and route L+R channels to HP */
+ common->audio_control = 0;
+ } else {
+ /* Mute HP and route R channel to SP */
+ common->audio_control =
+ FIELD_PREP(DS_OUTPUT_AUDIO_FLAGS_OUTPUT_PATH_SEL,
+ 0x3);
+ /*
+ * Set SP hardware volume to 100%.
+ * Note the accepted range seems to be [0x3d..0x64]
+ */
+ common->valid_flag0 |=
+ DS_OUTPUT_VALID_FLAG0_SPEAKER_VOLUME_ENABLE;
+ common->speaker_volume = 0x64;
+ /* Set SP preamp gain to +6dB */
+ common->valid_flag1 =
+ DS_OUTPUT_VALID_FLAG1_AUDIO_CONTROL2_ENABLE;
+ common->audio_control2 =
+ FIELD_PREP(DS_OUTPUT_AUDIO_FLAGS2_SP_PREAMP_GAIN,
+ 0x2);
+ }
- if (ds->update_mic_mute) {
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE;
- common->mute_button_led = ds->mic_muted;
+ input_report_switch(ds->jack, SW_HEADPHONE_INSERT, val);
+ }
- if (ds->mic_muted) {
- /* Disable microphone */
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
- common->power_save_control |= DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
- } else {
- /* Enable microphone */
- common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
- common->power_save_control &= ~DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ val = ds->plugged_state & DS_STATUS1_MIC_DETECT;
+ if (val != (ds->prev_plugged_state & DS_STATUS1_MIC_DETECT))
+ input_report_switch(ds->jack, SW_MICROPHONE_INSERT, val);
+
+ input_sync(ds->jack);
+ ds->prev_plugged_state = ds->plugged_state;
}
- ds->update_mic_mute = false;
- }
+ if (ds->update_mic_mute) {
+ common->valid_flag1 |= DS_OUTPUT_VALID_FLAG1_MIC_MUTE_LED_CONTROL_ENABLE;
+ common->mute_button_led = ds->mic_muted;
+
+ if (ds->mic_muted) {
+ /* Disable microphone */
+ common->valid_flag1 |=
+ DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
+ common->power_save_control |= DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ } else {
+ /* Enable microphone */
+ common->valid_flag1 |=
+ DS_OUTPUT_VALID_FLAG1_POWER_SAVE_CONTROL_ENABLE;
+ common->power_save_control &=
+ ~DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE;
+ }
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ ds->update_mic_mute = false;
+ }
+ }
dualsense_send_output_report(ds, &report);
}
static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *report,
- u8 *data, int size)
+ u8 *data, int size)
{
struct hid_device *hdev = ps_dev->hdev;
struct dualsense *ds = container_of(ps_dev, struct dualsense, base);
struct dualsense_input_report *ds_report;
- uint8_t battery_data, battery_capacity, charging_status, value;
+ u8 battery_data, battery_capacity, charging_status, value;
int battery_status;
- uint32_t sensor_timestamp;
+ u32 sensor_timestamp;
bool btn_mic_state;
- unsigned long flags;
int i;
/*
@@ -1331,12 +1435,12 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
* the full report using reportID 49.
*/
if (hdev->bus == BUS_USB && report->id == DS_INPUT_REPORT_USB &&
- size == DS_INPUT_REPORT_USB_SIZE) {
+ size == DS_INPUT_REPORT_USB_SIZE) {
ds_report = (struct dualsense_input_report *)&data[1];
} else if (hdev->bus == BUS_BLUETOOTH && report->id == DS_INPUT_REPORT_BT &&
- size == DS_INPUT_REPORT_BT_SIZE) {
+ size == DS_INPUT_REPORT_BT_SIZE) {
/* Last 4 bytes of input report contain crc32 */
- uint32_t report_crc = get_unaligned_le32(&data[size - 4]);
+ u32 report_crc = get_unaligned_le32(&data[size - 4]);
if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, report_crc)) {
hid_err(hdev, "DualSense input CRC's check failed\n");
@@ -1384,16 +1488,42 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
*/
btn_mic_state = !!(ds_report->buttons[2] & DS_BUTTONS2_MIC_MUTE);
if (btn_mic_state && !ds->last_btn_mic_state) {
- spin_lock_irqsave(&ps_dev->lock, flags);
- ds->update_mic_mute = true;
- ds->mic_muted = !ds->mic_muted; /* toggle */
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ds->update_mic_mute = true;
+ ds->mic_muted = !ds->mic_muted; /* toggle */
+ }
/* Schedule updating of microphone state at hardware level. */
dualsense_schedule_work(ds);
}
ds->last_btn_mic_state = btn_mic_state;
+ /*
+ * Parse HP/MIC plugged state data for USB use case, since Bluetooth
+ * audio is currently not supported.
+ */
+ if (hdev->bus == BUS_USB) {
+ value = ds_report->status[1] & DS_STATUS1_JACK_DETECT;
+
+ if (!ds->prev_plugged_state_valid) {
+ /* Initial handling of the plugged state report */
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ds->plugged_state = (~value) & DS_STATUS1_JACK_DETECT;
+ ds->prev_plugged_state_valid = true;
+ }
+ }
+
+ if (value != ds->plugged_state) {
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ds->prev_plugged_state = ds->plugged_state;
+ ds->plugged_state = value;
+ }
+
+ /* Schedule audio routing towards active endpoint. */
+ dualsense_schedule_work(ds);
+ }
+ }
+
/* Parse and calibrate gyroscope data. */
for (i = 0; i < ARRAY_SIZE(ds_report->gyro); i++) {
int raw_data = (short)le16_to_cpu(ds_report->gyro[i]);
@@ -1419,7 +1549,7 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
ds->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp, 3);
ds->sensor_timestamp_initialized = true;
} else {
- uint32_t delta;
+ u32 delta;
if (ds->prev_sensor_timestamp > sensor_timestamp)
delta = (U32_MAX - ds->prev_sensor_timestamp + sensor_timestamp + 1);
@@ -1439,19 +1569,18 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
input_mt_report_slot_state(ds->touchpad, MT_TOOL_FINGER, active);
if (active) {
- int x = (point->x_hi << 8) | point->x_lo;
- int y = (point->y_hi << 4) | point->y_lo;
-
- input_report_abs(ds->touchpad, ABS_MT_POSITION_X, x);
- input_report_abs(ds->touchpad, ABS_MT_POSITION_Y, y);
+ input_report_abs(ds->touchpad, ABS_MT_POSITION_X,
+ DS_TOUCH_POINT_X(point->x_hi, point->x_lo));
+ input_report_abs(ds->touchpad, ABS_MT_POSITION_Y,
+ DS_TOUCH_POINT_Y(point->y_hi, point->y_lo));
}
}
input_mt_sync_frame(ds->touchpad);
input_report_key(ds->touchpad, BTN_LEFT, ds_report->buttons[2] & DS_BUTTONS2_TOUCHPAD);
input_sync(ds->touchpad);
- battery_data = ds_report->status & DS_STATUS_BATTERY_CAPACITY;
- charging_status = (ds_report->status & DS_STATUS_CHARGING) >> DS_STATUS_CHARGING_SHIFT;
+ battery_data = FIELD_GET(DS_STATUS0_BATTERY_CAPACITY, ds_report->status[0]);
+ charging_status = FIELD_GET(DS_STATUS0_CHARGING, ds_report->status[0]);
switch (charging_status) {
case 0x0:
@@ -1481,10 +1610,10 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
}
- spin_lock_irqsave(&ps_dev->lock, flags);
- ps_dev->battery_capacity = battery_capacity;
- ps_dev->battery_status = battery_status;
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ps_dev->battery_capacity = battery_capacity;
+ ps_dev->battery_status = battery_status;
+ }
return 0;
}
@@ -1493,16 +1622,15 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef
{
struct hid_device *hdev = input_get_drvdata(dev);
struct dualsense *ds = hid_get_drvdata(hdev);
- unsigned long flags;
if (effect->type != FF_RUMBLE)
return 0;
- spin_lock_irqsave(&ds->base.lock, flags);
- ds->update_rumble = true;
- ds->motor_left = effect->u.rumble.strong_magnitude / 256;
- ds->motor_right = effect->u.rumble.weak_magnitude / 256;
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds->base.lock) {
+ ds->update_rumble = true;
+ ds->motor_left = effect->u.rumble.strong_magnitude / 256;
+ ds->motor_right = effect->u.rumble.weak_magnitude / 256;
+ }
dualsense_schedule_work(ds);
return 0;
@@ -1511,11 +1639,9 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef
static void dualsense_remove(struct ps_device *ps_dev)
{
struct dualsense *ds = container_of(ps_dev, struct dualsense, base);
- unsigned long flags;
- spin_lock_irqsave(&ds->base.lock, flags);
- ds->output_worker_initialized = false;
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds->base.lock)
+ ds->output_worker_initialized = false;
cancel_work_sync(&ds->output_worker);
}
@@ -1523,9 +1649,9 @@ static void dualsense_remove(struct ps_device *ps_dev)
static int dualsense_reset_leds(struct dualsense *ds)
{
struct dualsense_output_report report;
- uint8_t *buf;
+ struct dualsense_output_report_bt *buf;
- buf = kzalloc(sizeof(struct dualsense_output_report_bt), GFP_KERNEL);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1545,16 +1671,14 @@ static int dualsense_reset_leds(struct dualsense *ds)
return 0;
}
-static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue)
+static void dualsense_set_lightbar(struct dualsense *ds, u8 red, u8 green, u8 blue)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ds->base.lock, flags);
- ds->update_lightbar = true;
- ds->lightbar_red = red;
- ds->lightbar_green = green;
- ds->lightbar_blue = blue;
- spin_unlock_irqrestore(&ds->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds->base.lock) {
+ ds->update_lightbar = true;
+ ds->lightbar_red = red;
+ ds->lightbar_green = green;
+ ds->lightbar_blue = blue;
+ }
dualsense_schedule_work(ds);
}
@@ -1575,7 +1699,7 @@ static void dualsense_set_player_leds(struct dualsense *ds)
BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0)
};
- uint8_t player_id = ds->base.player_id % ARRAY_SIZE(player_ids);
+ u8 player_id = ds->base.player_id % ARRAY_SIZE(player_ids);
ds->update_player_leds = true;
ds->player_leds_state = player_ids[player_id];
@@ -1586,7 +1710,7 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
{
struct dualsense *ds;
struct ps_device *ps_dev;
- uint8_t max_output_report_size;
+ u8 max_output_report_size;
int i, ret;
static const struct ps_led_info player_leds_info[] = {
@@ -1675,7 +1799,7 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
ps_dev->input_dev_name = dev_name(&ds->gamepad->dev);
ds->sensors = ps_sensors_create(hdev, DS_ACC_RANGE, DS_ACC_RES_PER_G,
- DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S);
+ DS_GYRO_RANGE, DS_GYRO_RES_PER_DEG_S);
if (IS_ERR(ds->sensors)) {
ret = PTR_ERR(ds->sensors);
goto err;
@@ -1687,6 +1811,15 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
goto err;
}
+ /* Bluetooth audio is currently not supported. */
+ if (hdev->bus == BUS_USB) {
+ ds->jack = ps_headset_jack_create(hdev);
+ if (IS_ERR(ds->jack)) {
+ ret = PTR_ERR(ds->jack);
+ goto err;
+ }
+ }
+
ret = ps_device_register_battery(ps_dev);
if (ret)
goto err;
@@ -1729,7 +1862,7 @@ static struct ps_device *dualsense_create(struct hid_device *hdev)
* can change behavior.
*/
hid_info(hdev, "Registered DualSense controller hw_version=0x%08x fw_version=0x%08x\n",
- ds->base.hw_version, ds->base.fw_version);
+ ds->base.hw_version, ds->base.fw_version);
return &ds->base;
@@ -1741,7 +1874,6 @@ err:
static void dualshock4_dongle_calibration_work(struct work_struct *work)
{
struct dualshock4 *ds4 = container_of(work, struct dualshock4, dongle_hotplug_worker);
- unsigned long flags;
enum dualshock4_dongle_state dongle_state;
int ret;
@@ -1753,16 +1885,16 @@ static void dualshock4_dongle_calibration_work(struct work_struct *work)
* DS4 hotplug is detect from sony_raw_event as any issues
* are likely resolved then (the dongle is quite stupid).
*/
- hid_err(ds4->base.hdev, "DualShock 4 USB dongle: calibration failed, disabling device\n");
+ hid_err(ds4->base.hdev,
+ "DualShock 4 USB dongle: calibration failed, disabling device\n");
dongle_state = DONGLE_DISABLED;
} else {
hid_info(ds4->base.hdev, "DualShock 4 USB dongle: calibration completed\n");
dongle_state = DONGLE_CONNECTED;
}
- spin_lock_irqsave(&ds4->base.lock, flags);
- ds4->dongle_state = dongle_state;
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds4->base.lock)
+ ds4->dongle_state = dongle_state;
}
static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
@@ -1779,7 +1911,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
int range_2g;
int ret = 0;
int i;
- uint8_t *buf;
+ u8 *buf;
if (ds4->base.hdev->bus == BUS_USB) {
int retries;
@@ -1798,15 +1930,19 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
*/
for (retries = 0; retries < 3; retries++) {
ret = ps_get_report(hdev, DS4_FEATURE_REPORT_CALIBRATION, buf,
- DS4_FEATURE_REPORT_CALIBRATION_SIZE, true);
+ DS4_FEATURE_REPORT_CALIBRATION_SIZE, true);
if (ret) {
if (retries < 2) {
- hid_warn(hdev, "Retrying DualShock 4 get calibration report (0x02) request\n");
+ hid_warn(hdev,
+ "Retrying DualShock 4 get calibration report (0x02) request\n");
continue;
}
- hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
+ hid_warn(hdev,
+ "Failed to retrieve DualShock4 calibration info: %d\n",
+ ret);
ret = -EILSEQ;
+ kfree(buf);
goto transfer_failed;
} else {
break;
@@ -1820,10 +1956,11 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
}
ret = ps_get_report(hdev, DS4_FEATURE_REPORT_CALIBRATION_BT, buf,
- DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE, true);
+ DS4_FEATURE_REPORT_CALIBRATION_BT_SIZE, true);
if (ret) {
hid_warn(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
+ kfree(buf);
goto transfer_failed;
}
}
@@ -1867,19 +2004,19 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
speed_2x = (gyro_speed_plus + gyro_speed_minus);
ds4->gyro_calib_data[0].abs_code = ABS_RX;
ds4->gyro_calib_data[0].bias = 0;
- ds4->gyro_calib_data[0].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
+ ds4->gyro_calib_data[0].sens_numer = speed_2x * DS4_GYRO_RES_PER_DEG_S;
ds4->gyro_calib_data[0].sens_denom = abs(gyro_pitch_plus - gyro_pitch_bias) +
abs(gyro_pitch_minus - gyro_pitch_bias);
ds4->gyro_calib_data[1].abs_code = ABS_RY;
ds4->gyro_calib_data[1].bias = 0;
- ds4->gyro_calib_data[1].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
+ ds4->gyro_calib_data[1].sens_numer = speed_2x * DS4_GYRO_RES_PER_DEG_S;
ds4->gyro_calib_data[1].sens_denom = abs(gyro_yaw_plus - gyro_yaw_bias) +
abs(gyro_yaw_minus - gyro_yaw_bias);
ds4->gyro_calib_data[2].abs_code = ABS_RZ;
ds4->gyro_calib_data[2].bias = 0;
- ds4->gyro_calib_data[2].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
+ ds4->gyro_calib_data[2].sens_numer = speed_2x * DS4_GYRO_RES_PER_DEG_S;
ds4->gyro_calib_data[2].sens_denom = abs(gyro_roll_plus - gyro_roll_bias) +
abs(gyro_roll_minus - gyro_roll_bias);
@@ -1890,19 +2027,19 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
range_2g = acc_x_plus - acc_x_minus;
ds4->accel_calib_data[0].abs_code = ABS_X;
ds4->accel_calib_data[0].bias = acc_x_plus - range_2g / 2;
- ds4->accel_calib_data[0].sens_numer = 2*DS4_ACC_RES_PER_G;
+ ds4->accel_calib_data[0].sens_numer = 2 * DS4_ACC_RES_PER_G;
ds4->accel_calib_data[0].sens_denom = range_2g;
range_2g = acc_y_plus - acc_y_minus;
ds4->accel_calib_data[1].abs_code = ABS_Y;
ds4->accel_calib_data[1].bias = acc_y_plus - range_2g / 2;
- ds4->accel_calib_data[1].sens_numer = 2*DS4_ACC_RES_PER_G;
+ ds4->accel_calib_data[1].sens_numer = 2 * DS4_ACC_RES_PER_G;
ds4->accel_calib_data[1].sens_denom = range_2g;
range_2g = acc_z_plus - acc_z_minus;
ds4->accel_calib_data[2].abs_code = ABS_Z;
ds4->accel_calib_data[2].bias = acc_z_plus - range_2g / 2;
- ds4->accel_calib_data[2].sens_numer = 2*DS4_ACC_RES_PER_G;
+ ds4->accel_calib_data[2].sens_numer = 2 * DS4_ACC_RES_PER_G;
ds4->accel_calib_data[2].sens_denom = range_2g;
transfer_failed:
@@ -1914,8 +2051,9 @@ transfer_failed:
for (i = 0; i < ARRAY_SIZE(ds4->gyro_calib_data); i++) {
if (ds4->gyro_calib_data[i].sens_denom == 0) {
ds4->gyro_calib_data[i].abs_code = ABS_RX + i;
- hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
- ds4->gyro_calib_data[i].abs_code);
+ hid_warn(hdev,
+ "Invalid gyro calibration data for axis (%d), disabling calibration.",
+ ds4->gyro_calib_data[i].abs_code);
ds4->gyro_calib_data[i].bias = 0;
ds4->gyro_calib_data[i].sens_numer = DS4_GYRO_RANGE;
ds4->gyro_calib_data[i].sens_denom = S16_MAX;
@@ -1930,8 +2068,9 @@ transfer_failed:
for (i = 0; i < ARRAY_SIZE(ds4->accel_calib_data); i++) {
if (ds4->accel_calib_data[i].sens_denom == 0) {
ds4->accel_calib_data[i].abs_code = ABS_X + i;
- hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
- ds4->accel_calib_data[i].abs_code);
+ hid_warn(hdev,
+ "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
+ ds4->accel_calib_data[i].abs_code);
ds4->accel_calib_data[i].bias = 0;
ds4->accel_calib_data[i].sens_numer = DS4_ACC_RANGE;
ds4->accel_calib_data[i].sens_denom = S16_MAX;
@@ -1943,7 +2082,7 @@ transfer_failed:
static int dualshock4_get_firmware_info(struct dualshock4 *ds4)
{
- uint8_t *buf;
+ u8 *buf;
int ret;
buf = kzalloc(DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE, GFP_KERNEL);
@@ -1954,7 +2093,7 @@ static int dualshock4_get_firmware_info(struct dualshock4 *ds4)
* lacks CRC support, so must be disabled in ps_get_report.
*/
ret = ps_get_report(ds4->base.hdev, DS4_FEATURE_REPORT_FIRMWARE_INFO, buf,
- DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE, false);
+ DS4_FEATURE_REPORT_FIRMWARE_INFO_SIZE, false);
if (ret) {
hid_err(ds4->base.hdev, "Failed to retrieve DualShock4 firmware info: %d\n", ret);
goto err_free;
@@ -1971,7 +2110,7 @@ err_free:
static int dualshock4_get_mac_address(struct dualshock4 *ds4)
{
struct hid_device *hdev = ds4->base.hdev;
- uint8_t *buf;
+ u8 *buf;
int ret = 0;
if (hdev->bus == BUS_USB) {
@@ -1980,7 +2119,7 @@ static int dualshock4_get_mac_address(struct dualshock4 *ds4)
return -ENOMEM;
ret = ps_get_report(hdev, DS4_FEATURE_REPORT_PAIRING_INFO, buf,
- DS4_FEATURE_REPORT_PAIRING_INFO_SIZE, false);
+ DS4_FEATURE_REPORT_PAIRING_INFO_SIZE, false);
if (ret) {
hid_err(hdev, "Failed to retrieve DualShock4 pairing info: %d\n", ret);
goto err_free;
@@ -1993,9 +2132,9 @@ static int dualshock4_get_mac_address(struct dualshock4 *ds4)
return -EINVAL;
ret = sscanf(hdev->uniq, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- &ds4->base.mac_address[5], &ds4->base.mac_address[4],
- &ds4->base.mac_address[3], &ds4->base.mac_address[2],
- &ds4->base.mac_address[1], &ds4->base.mac_address[0]);
+ &ds4->base.mac_address[5], &ds4->base.mac_address[4],
+ &ds4->base.mac_address[3], &ds4->base.mac_address[2],
+ &ds4->base.mac_address[1], &ds4->base.mac_address[0]);
if (ret != sizeof(ds4->base.mac_address))
return -EINVAL;
@@ -2030,28 +2169,27 @@ static enum led_brightness dualshock4_led_get_brightness(struct led_classdev *le
}
static int dualshock4_led_set_blink(struct led_classdev *led, unsigned long *delay_on,
- unsigned long *delay_off)
+ unsigned long *delay_off)
{
struct hid_device *hdev = to_hid_device(led->dev->parent);
struct dualshock4 *ds4 = hid_get_drvdata(hdev);
- unsigned long flags;
- spin_lock_irqsave(&ds4->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds4->base.lock) {
+ if (!*delay_on && !*delay_off) {
+ /* Default to 1 Hz (50 centiseconds on, 50 centiseconds off). */
+ ds4->lightbar_blink_on = 50;
+ ds4->lightbar_blink_off = 50;
+ } else {
+ /* Blink delays in centiseconds. */
+ ds4->lightbar_blink_on = min_t(unsigned long, *delay_on / 10,
+ DS4_LIGHTBAR_MAX_BLINK);
+ ds4->lightbar_blink_off = min_t(unsigned long, *delay_off / 10,
+ DS4_LIGHTBAR_MAX_BLINK);
+ }
- if (!*delay_on && !*delay_off) {
- /* Default to 1 Hz (50 centiseconds on, 50 centiseconds off). */
- ds4->lightbar_blink_on = 50;
- ds4->lightbar_blink_off = 50;
- } else {
- /* Blink delays in centiseconds. */
- ds4->lightbar_blink_on = min_t(unsigned long, *delay_on/10, DS4_LIGHTBAR_MAX_BLINK);
- ds4->lightbar_blink_off = min_t(unsigned long, *delay_off/10, DS4_LIGHTBAR_MAX_BLINK);
+ ds4->update_lightbar_blink = true;
}
- ds4->update_lightbar_blink = true;
-
- spin_unlock_irqrestore(&ds4->base.lock, flags);
-
dualshock4_schedule_work(ds4);
/* Report scaled values back to LED subsystem */
@@ -2065,36 +2203,33 @@ static int dualshock4_led_set_brightness(struct led_classdev *led, enum led_brig
{
struct hid_device *hdev = to_hid_device(led->dev->parent);
struct dualshock4 *ds4 = hid_get_drvdata(hdev);
- unsigned long flags;
unsigned int led_index;
- spin_lock_irqsave(&ds4->base.lock, flags);
-
- led_index = led - ds4->lightbar_leds;
- switch (led_index) {
- case 0:
- ds4->lightbar_red = value;
- break;
- case 1:
- ds4->lightbar_green = value;
- break;
- case 2:
- ds4->lightbar_blue = value;
- break;
- case 3:
- ds4->lightbar_enabled = !!value;
-
- /* brightness = 0 also cancels blinking in Linux. */
- if (!ds4->lightbar_enabled) {
- ds4->lightbar_blink_off = 0;
- ds4->lightbar_blink_on = 0;
- ds4->update_lightbar_blink = true;
+ scoped_guard(spinlock_irqsave, &ds4->base.lock) {
+ led_index = led - ds4->lightbar_leds;
+ switch (led_index) {
+ case 0:
+ ds4->lightbar_red = value;
+ break;
+ case 1:
+ ds4->lightbar_green = value;
+ break;
+ case 2:
+ ds4->lightbar_blue = value;
+ break;
+ case 3:
+ ds4->lightbar_enabled = !!value;
+
+ /* brightness = 0 also cancels blinking in Linux. */
+ if (!ds4->lightbar_enabled) {
+ ds4->lightbar_blink_off = 0;
+ ds4->lightbar_blink_on = 0;
+ ds4->update_lightbar_blink = true;
+ }
}
- }
- ds4->update_lightbar = true;
-
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ ds4->update_lightbar = true;
+ }
dualshock4_schedule_work(ds4);
@@ -2102,7 +2237,7 @@ static int dualshock4_led_set_brightness(struct led_classdev *led, enum led_brig
}
static void dualshock4_init_output_report(struct dualshock4 *ds4,
- struct dualshock4_output_report *rp, void *buf)
+ struct dualshock4_output_report *rp, void *buf)
{
struct hid_device *hdev = ds4->base.hdev;
@@ -2136,66 +2271,63 @@ static void dualshock4_output_worker(struct work_struct *work)
struct dualshock4 *ds4 = container_of(work, struct dualshock4, output_worker);
struct dualshock4_output_report report;
struct dualshock4_output_report_common *common;
- unsigned long flags;
dualshock4_init_output_report(ds4, &report, ds4->output_report_dmabuf);
common = report.common;
- spin_lock_irqsave(&ds4->base.lock, flags);
-
- /*
- * Some 3rd party gamepads expect updates to rumble and lightbar
- * together, and setting one may cancel the other.
- *
- * Let's maximise compatibility by always sending rumble and lightbar
- * updates together, even when only one has been scheduled, resulting
- * in:
- *
- * ds4->valid_flag0 >= 0x03
- *
- * Hopefully this will maximise compatibility with third-party pads.
- *
- * Any further update bits, such as 0x04 for lightbar blinking, will
- * be or'd on top of this like before.
- */
- if (ds4->update_rumble || ds4->update_lightbar) {
- ds4->update_rumble = true; /* 0x01 */
- ds4->update_lightbar = true; /* 0x02 */
- }
+ scoped_guard(spinlock_irqsave, &ds4->base.lock) {
+ /*
+ * Some 3rd party gamepads expect updates to rumble and lightbar
+ * together, and setting one may cancel the other.
+ *
+ * Let's maximise compatibility by always sending rumble and lightbar
+ * updates together, even when only one has been scheduled, resulting
+ * in:
+ *
+ * ds4->valid_flag0 >= 0x03
+ *
+ * Hopefully this will maximise compatibility with third-party pads.
+ *
+ * Any further update bits, such as 0x04 for lightbar blinking, will
+ * be or'd on top of this like before.
+ */
+ if (ds4->update_rumble || ds4->update_lightbar) {
+ ds4->update_rumble = true; /* 0x01 */
+ ds4->update_lightbar = true; /* 0x02 */
+ }
- if (ds4->update_rumble) {
- /* Select classic rumble style haptics and enable it. */
- common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_MOTOR;
- common->motor_left = ds4->motor_left;
- common->motor_right = ds4->motor_right;
- ds4->update_rumble = false;
- }
+ if (ds4->update_rumble) {
+ /* Select classic rumble style haptics and enable it. */
+ common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_MOTOR;
+ common->motor_left = ds4->motor_left;
+ common->motor_right = ds4->motor_right;
+ ds4->update_rumble = false;
+ }
- if (ds4->update_lightbar) {
- common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED;
- /* Comptabile behavior with hid-sony, which used a dummy global LED to
- * allow enabling/disabling the lightbar. The global LED maps to
- * lightbar_enabled.
- */
- common->lightbar_red = ds4->lightbar_enabled ? ds4->lightbar_red : 0;
- common->lightbar_green = ds4->lightbar_enabled ? ds4->lightbar_green : 0;
- common->lightbar_blue = ds4->lightbar_enabled ? ds4->lightbar_blue : 0;
- ds4->update_lightbar = false;
- }
+ if (ds4->update_lightbar) {
+ common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED;
+ /* Compatible behavior with hid-sony, which used a dummy global LED to
+ * allow enabling/disabling the lightbar. The global LED maps to
+ * lightbar_enabled.
+ */
+ common->lightbar_red = ds4->lightbar_enabled ? ds4->lightbar_red : 0;
+ common->lightbar_green = ds4->lightbar_enabled ? ds4->lightbar_green : 0;
+ common->lightbar_blue = ds4->lightbar_enabled ? ds4->lightbar_blue : 0;
+ ds4->update_lightbar = false;
+ }
- if (ds4->update_lightbar_blink) {
- common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED_BLINK;
- common->lightbar_blink_on = ds4->lightbar_blink_on;
- common->lightbar_blink_off = ds4->lightbar_blink_off;
- ds4->update_lightbar_blink = false;
+ if (ds4->update_lightbar_blink) {
+ common->valid_flag0 |= DS4_OUTPUT_VALID_FLAG0_LED_BLINK;
+ common->lightbar_blink_on = ds4->lightbar_blink_on;
+ common->lightbar_blink_off = ds4->lightbar_blink_off;
+ ds4->update_lightbar_blink = false;
+ }
}
- spin_unlock_irqrestore(&ds4->base.lock, flags);
-
/* Bluetooth packets need additional flags as well as a CRC in the last 4 bytes. */
if (report.bt) {
- uint32_t crc;
- uint8_t seed = PS_OUTPUT_CRC32_SEED;
+ u32 crc;
+ u8 seed = PS_OUTPUT_CRC32_SEED;
/* Hardware control flags need to set to let the device know
* there is HID data as well as CRC.
@@ -2217,16 +2349,15 @@ static void dualshock4_output_worker(struct work_struct *work)
}
static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *report,
- u8 *data, int size)
+ u8 *data, int size)
{
struct hid_device *hdev = ps_dev->hdev;
struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base);
struct dualshock4_input_report_common *ds4_report;
struct dualshock4_touch_report *touch_reports;
- uint8_t battery_capacity, num_touch_reports, value;
+ u8 battery_capacity, num_touch_reports, value;
int battery_status, i, j;
- uint16_t sensor_timestamp;
- unsigned long flags;
+ u16 sensor_timestamp;
bool is_minimal = false;
/*
@@ -2235,16 +2366,17 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
* the full report using reportID 17.
*/
if (hdev->bus == BUS_USB && report->id == DS4_INPUT_REPORT_USB &&
- size == DS4_INPUT_REPORT_USB_SIZE) {
- struct dualshock4_input_report_usb *usb = (struct dualshock4_input_report_usb *)data;
+ size == DS4_INPUT_REPORT_USB_SIZE) {
+ struct dualshock4_input_report_usb *usb =
+ (struct dualshock4_input_report_usb *)data;
ds4_report = &usb->common;
num_touch_reports = usb->num_touch_reports;
touch_reports = usb->touch_reports;
} else if (hdev->bus == BUS_BLUETOOTH && report->id == DS4_INPUT_REPORT_BT &&
- size == DS4_INPUT_REPORT_BT_SIZE) {
+ size == DS4_INPUT_REPORT_BT_SIZE) {
struct dualshock4_input_report_bt *bt = (struct dualshock4_input_report_bt *)data;
- uint32_t report_crc = get_unaligned_le32(&bt->crc32);
+ u32 report_crc = get_unaligned_le32(&bt->crc32);
/* Last 4 bytes of input report contains CRC. */
if (!ps_check_crc32(PS_INPUT_CRC32_SEED, data, size - 4, report_crc)) {
@@ -2325,16 +2457,16 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
/* Convert timestamp (in 5.33us unit) to timestamp_us */
sensor_timestamp = le16_to_cpu(ds4_report->sensor_timestamp);
if (!ds4->sensor_timestamp_initialized) {
- ds4->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp*16, 3);
+ ds4->sensor_timestamp_us = DIV_ROUND_CLOSEST(sensor_timestamp * 16, 3);
ds4->sensor_timestamp_initialized = true;
} else {
- uint16_t delta;
+ u16 delta;
if (ds4->prev_sensor_timestamp > sensor_timestamp)
delta = (U16_MAX - ds4->prev_sensor_timestamp + sensor_timestamp + 1);
else
delta = sensor_timestamp - ds4->prev_sensor_timestamp;
- ds4->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta*16, 3);
+ ds4->sensor_timestamp_us += DIV_ROUND_CLOSEST(delta * 16, 3);
}
ds4->prev_sensor_timestamp = sensor_timestamp;
input_event(ds4->sensors, EV_MSC, MSC_TIMESTAMP, ds4->sensor_timestamp_us);
@@ -2351,11 +2483,10 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
input_mt_report_slot_state(ds4->touchpad, MT_TOOL_FINGER, active);
if (active) {
- int x = (point->x_hi << 8) | point->x_lo;
- int y = (point->y_hi << 4) | point->y_lo;
-
- input_report_abs(ds4->touchpad, ABS_MT_POSITION_X, x);
- input_report_abs(ds4->touchpad, ABS_MT_POSITION_Y, y);
+ input_report_abs(ds4->touchpad, ABS_MT_POSITION_X,
+ DS4_TOUCH_POINT_X(point->x_hi, point->x_lo));
+ input_report_abs(ds4->touchpad, ABS_MT_POSITION_Y,
+ DS4_TOUCH_POINT_Y(point->y_hi, point->y_lo));
}
}
input_mt_sync_frame(ds4->touchpad);
@@ -2374,7 +2505,7 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
* - 15: charge error
*/
if (ds4_report->status[0] & DS4_STATUS0_CABLE_STATE) {
- uint8_t battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY;
+ u8 battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY;
if (battery_data < 10) {
/* Take the mid-point for each battery capacity value,
@@ -2395,7 +2526,7 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
}
} else {
- uint8_t battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY;
+ u8 battery_data = ds4_report->status[0] & DS4_STATUS0_BATTERY_CAPACITY;
if (battery_data < 10)
battery_capacity = battery_data * 10 + 5;
@@ -2405,16 +2536,16 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
}
- spin_lock_irqsave(&ps_dev->lock, flags);
- ps_dev->battery_capacity = battery_capacity;
- ps_dev->battery_status = battery_status;
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock) {
+ ps_dev->battery_capacity = battery_capacity;
+ ps_dev->battery_status = battery_status;
+ }
return 0;
}
static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_report *report,
- u8 *data, int size)
+ u8 *data, int size)
{
struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base);
bool connected = false;
@@ -2425,8 +2556,8 @@ static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_r
* parsing code.
*/
if (data[0] == DS4_INPUT_REPORT_USB && size == DS4_INPUT_REPORT_USB_SIZE) {
- struct dualshock4_input_report_common *ds4_report = (struct dualshock4_input_report_common *)&data[1];
- unsigned long flags;
+ struct dualshock4_input_report_common *ds4_report =
+ (struct dualshock4_input_report_common *)&data[1];
connected = ds4_report->status[1] & DS4_STATUS1_DONGLE_STATE ? false : true;
@@ -2435,9 +2566,8 @@ static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_r
dualshock4_set_default_lightbar_colors(ds4);
- spin_lock_irqsave(&ps_dev->lock, flags);
- ds4->dongle_state = DONGLE_CALIBRATING;
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock)
+ ds4->dongle_state = DONGLE_CALIBRATING;
schedule_work(&ds4->dongle_hotplug_worker);
@@ -2449,9 +2579,8 @@ static int dualshock4_dongle_parse_report(struct ps_device *ps_dev, struct hid_r
ds4->dongle_state == DONGLE_DISABLED) && !connected) {
hid_info(ps_dev->hdev, "DualShock 4 USB dongle: controller disconnected\n");
- spin_lock_irqsave(&ps_dev->lock, flags);
- ds4->dongle_state = DONGLE_DISCONNECTED;
- spin_unlock_irqrestore(&ps_dev->lock, flags);
+ scoped_guard(spinlock_irqsave, &ps_dev->lock)
+ ds4->dongle_state = DONGLE_DISCONNECTED;
/* Return 0, so hidraw can get the report. */
return 0;
@@ -2473,16 +2602,15 @@ static int dualshock4_play_effect(struct input_dev *dev, void *data, struct ff_e
{
struct hid_device *hdev = input_get_drvdata(dev);
struct dualshock4 *ds4 = hid_get_drvdata(hdev);
- unsigned long flags;
if (effect->type != FF_RUMBLE)
return 0;
- spin_lock_irqsave(&ds4->base.lock, flags);
- ds4->update_rumble = true;
- ds4->motor_left = effect->u.rumble.strong_magnitude / 256;
- ds4->motor_right = effect->u.rumble.weak_magnitude / 256;
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds4->base.lock) {
+ ds4->update_rumble = true;
+ ds4->motor_left = effect->u.rumble.strong_magnitude / 256;
+ ds4->motor_right = effect->u.rumble.weak_magnitude / 256;
+ }
dualshock4_schedule_work(ds4);
return 0;
@@ -2491,11 +2619,9 @@ static int dualshock4_play_effect(struct input_dev *dev, void *data, struct ff_e
static void dualshock4_remove(struct ps_device *ps_dev)
{
struct dualshock4 *ds4 = container_of(ps_dev, struct dualshock4, base);
- unsigned long flags;
- spin_lock_irqsave(&ds4->base.lock, flags);
- ds4->output_worker_initialized = false;
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ scoped_guard(spinlock_irqsave, &ds4->base.lock)
+ ds4->output_worker_initialized = false;
cancel_work_sync(&ds4->output_worker);
@@ -2505,15 +2631,13 @@ static void dualshock4_remove(struct ps_device *ps_dev)
static inline void dualshock4_schedule_work(struct dualshock4 *ds4)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ds4->base.lock, flags);
- if (ds4->output_worker_initialized)
- schedule_work(&ds4->output_worker);
- spin_unlock_irqrestore(&ds4->base.lock, flags);
+ /* Using scoped_guard() instead of guard() to make sparse happy */
+ scoped_guard(spinlock_irqsave, &ds4->base.lock)
+ if (ds4->output_worker_initialized)
+ schedule_work(&ds4->output_worker);
}
-static void dualshock4_set_bt_poll_interval(struct dualshock4 *ds4, uint8_t interval)
+static void dualshock4_set_bt_poll_interval(struct dualshock4 *ds4, u8 interval)
{
ds4->bt_poll_interval = interval;
ds4->update_bt_poll_interval = true;
@@ -2533,7 +2657,7 @@ static void dualshock4_set_default_lightbar_colors(struct dualshock4 *ds4)
{ 0x20, 0x00, 0x20 } /* Pink */
};
- uint8_t player_id = ds4->base.player_id % ARRAY_SIZE(player_colors);
+ u8 player_id = ds4->base.player_id % ARRAY_SIZE(player_colors);
ds4->lightbar_enabled = true;
ds4->lightbar_red = player_colors[player_id][0];
@@ -2548,7 +2672,7 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
{
struct dualshock4 *ds4;
struct ps_device *ps_dev;
- uint8_t max_output_report_size;
+ u8 max_output_report_size;
int i, ret;
/* The DualShock4 has an RGB lightbar, which the original hid-sony driver
@@ -2561,11 +2685,14 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
* existing applications (e.g. Android). Nothing matches against MAC address.
*/
static const struct ps_led_info lightbar_leds_info[] = {
- { NULL, "red", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness },
- { NULL, "green", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness },
- { NULL, "blue", 255, dualshock4_led_get_brightness, dualshock4_led_set_brightness },
- { NULL, "global", 1, dualshock4_led_get_brightness, dualshock4_led_set_brightness,
- dualshock4_led_set_blink },
+ { NULL, "red", 255, dualshock4_led_get_brightness,
+ dualshock4_led_set_brightness },
+ { NULL, "green", 255, dualshock4_led_get_brightness,
+ dualshock4_led_set_brightness },
+ { NULL, "blue", 255, dualshock4_led_get_brightness,
+ dualshock4_led_set_brightness },
+ { NULL, "global", 1, dualshock4_led_get_brightness,
+ dualshock4_led_set_brightness, dualshock4_led_set_blink },
};
ds4 = devm_kzalloc(&hdev->dev, sizeof(*ds4), GFP_KERNEL);
@@ -2635,7 +2762,7 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
ps_dev->input_dev_name = dev_name(&ds4->gamepad->dev);
ds4->sensors = ps_sensors_create(hdev, DS4_ACC_RANGE, DS4_ACC_RES_PER_G,
- DS4_GYRO_RANGE, DS4_GYRO_RES_PER_DEG_S);
+ DS4_GYRO_RANGE, DS4_GYRO_RES_PER_DEG_S);
if (IS_ERR(ds4->sensors)) {
ret = PTR_ERR(ds4->sensors);
goto err;
@@ -2674,7 +2801,7 @@ static struct ps_device *dualshock4_create(struct hid_device *hdev)
* can change behavior.
*/
hid_info(hdev, "Registered DualShock4 controller hw_version=0x%08x fw_version=0x%08x\n",
- ds4->base.hw_version, ds4->base.fw_version);
+ ds4->base.hw_version, ds4->base.fw_version);
return &ds4->base;
err:
@@ -2683,7 +2810,7 @@ err:
}
static int ps_raw_event(struct hid_device *hdev, struct hid_report *report,
- u8 *data, int size)
+ u8 *data, int size)
{
struct ps_device *dev = hid_get_drvdata(hdev);
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index f619ed10535d..c89a015686c0 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -57,6 +57,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_COOLER_MASTER, USB_DEVICE_ID_COOLER_MASTER_MICE_DONGLE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
@@ -206,6 +207,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_R295), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
@@ -408,7 +410,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_ELECOM)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XGL20DLBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_00FB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3URBK_018F) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT3DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_XT4DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
@@ -695,6 +698,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
#endif
#if IS_ENABLED(CONFIG_HID_STEELSERIES)
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_9) },
#endif
#if IS_ENABLED(CONFIG_HID_SUNPLUS)
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
@@ -911,7 +916,6 @@ static const struct hid_device_id hid_ignore_list[] = {
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
- { HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) },
{ }
};
@@ -1060,6 +1064,18 @@ bool hid_ignore(struct hid_device *hdev)
strlen(elan_acpi_id[i].id)))
return true;
break;
+ case USB_VENDOR_ID_JIELI_SDK_DEFAULT:
+ /*
+ * Multiple USB devices with identical IDs (mic & touchscreen).
+ * The touch screen requires hid core processing, but the
+ * microphone does not. They can be distinguished by manufacturer
+ * and serial number.
+ */
+ if (hdev->product == USB_DEVICE_ID_JIELI_SDK_4155 &&
+ strncmp(hdev->name, "SmartlinkTechnology", 19) == 0 &&
+ strncmp(hdev->uniq, "20201111000001", 14) == 0)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
index d4bd7848b8c6..f98435631aa1 100644
--- a/drivers/hid/hid-steelseries.c
+++ b/drivers/hid/hid-steelseries.c
@@ -249,11 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
{
int ret, i;
struct led_classdev *led;
+ struct steelseries_srws1_data *drv_data;
size_t name_sz;
char *name;
- struct steelseries_srws1_data *drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
-
+ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
if (drv_data == NULL) {
hid_err(hdev, "can't alloc SRW-S1 memory\n");
return -ENOMEM;
@@ -264,18 +264,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err_free;
+ goto err;
}
if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) {
ret = -ENODEV;
- goto err_free;
+ goto err;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
- goto err_free;
+ goto err;
}
/* register led subsystem */
@@ -288,10 +288,10 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
name_sz = strlen(hdev->uniq) + 16;
/* 'ALL', for setting all LEDs simultaneously */
- led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "can't allocate memory for LED ALL\n");
- goto err_led;
+ goto out;
}
name = (void *)(&led[1]);
@@ -303,16 +303,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
led->brightness_set = steelseries_srws1_led_all_set_brightness;
drv_data->led[SRWS1_NUMBER_LEDS] = led;
- ret = led_classdev_register(&hdev->dev, led);
- if (ret)
- goto err_led;
+ ret = devm_led_classdev_register(&hdev->dev, led);
+ if (ret) {
+ hid_err(hdev, "failed to register LED %d. Aborting.\n", SRWS1_NUMBER_LEDS);
+ goto out; /* let the driver continue without LEDs */
+ }
/* Each individual LED */
for (i = 0; i < SRWS1_NUMBER_LEDS; i++) {
- led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
+ led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
hid_err(hdev, "can't allocate memory for LED %d\n", i);
- goto err_led;
+ break;
}
name = (void *)(&led[1]);
@@ -324,53 +326,18 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
led->brightness_set = steelseries_srws1_led_set_brightness;
drv_data->led[i] = led;
- ret = led_classdev_register(&hdev->dev, led);
+ ret = devm_led_classdev_register(&hdev->dev, led);
if (ret) {
hid_err(hdev, "failed to register LED %d. Aborting.\n", i);
-err_led:
- /* Deregister all LEDs (if any) */
- for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
- led = drv_data->led[i];
- drv_data->led[i] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
- goto out; /* but let the driver continue without LEDs */
+ break; /* but let the driver continue without LEDs */
}
}
out:
return 0;
-err_free:
- kfree(drv_data);
+err:
return ret;
}
-
-static void steelseries_srws1_remove(struct hid_device *hdev)
-{
- int i;
- struct led_classdev *led;
-
- struct steelseries_srws1_data *drv_data = hid_get_drvdata(hdev);
-
- if (drv_data) {
- /* Deregister LEDs (if any) */
- for (i = 0; i < SRWS1_NUMBER_LEDS + 1; i++) {
- led = drv_data->led[i];
- drv_data->led[i] = NULL;
- if (!led)
- continue;
- led_classdev_unregister(led);
- kfree(led);
- }
-
- }
-
- hid_hw_stop(hdev);
- kfree(drv_data);
-}
#endif
#define STEELSERIES_HEADSET_BATTERY_TIMEOUT_MS 3000
@@ -405,13 +372,12 @@ static int steelseries_headset_request_battery(struct hid_device *hdev,
static void steelseries_headset_fetch_battery(struct hid_device *hdev)
{
- struct steelseries_device *sd = hid_get_drvdata(hdev);
int ret = 0;
- if (sd->quirks & STEELSERIES_ARCTIS_1)
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_1)
ret = steelseries_headset_request_battery(hdev,
arctis_1_battery_request, sizeof(arctis_1_battery_request));
- else if (sd->quirks & STEELSERIES_ARCTIS_9)
+ else if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_9)
ret = steelseries_headset_request_battery(hdev,
arctis_9_battery_request, sizeof(arctis_9_battery_request));
@@ -567,14 +533,7 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
struct steelseries_device *sd;
int ret;
- sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
- if (!sd)
- return -ENOMEM;
- hid_set_drvdata(hdev, sd);
- sd->hdev = hdev;
- sd->quirks = id->driver_data;
-
- if (sd->quirks & STEELSERIES_SRWS1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1) {
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
(IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
return steelseries_srws1_probe(hdev, id);
@@ -583,6 +542,13 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
#endif
}
+ sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
+ if (!sd)
+ return -ENOMEM;
+ hid_set_drvdata(hdev, sd);
+ sd->hdev = hdev;
+ sd->quirks = id->driver_data;
+
ret = hid_parse(hdev);
if (ret)
return ret;
@@ -610,17 +576,19 @@ static int steelseries_probe(struct hid_device *hdev, const struct hid_device_id
static void steelseries_remove(struct hid_device *hdev)
{
- struct steelseries_device *sd = hid_get_drvdata(hdev);
+ struct steelseries_device *sd;
unsigned long flags;
- if (sd->quirks & STEELSERIES_SRWS1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1) {
#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \
(IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES))
- steelseries_srws1_remove(hdev);
+ hid_hw_stop(hdev);
#endif
return;
}
+ sd = hid_get_drvdata(hdev);
+
spin_lock_irqsave(&sd->lock, flags);
sd->removed = true;
spin_unlock_irqrestore(&sd->lock, flags);
@@ -667,10 +635,10 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
unsigned long flags;
/* Not a headset */
- if (sd->quirks & STEELSERIES_SRWS1)
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_SRWS1)
return 0;
- if (sd->quirks & STEELSERIES_ARCTIS_1) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_1) {
hid_dbg(sd->hdev,
"Parsing raw event for Arctis 1 headset (%*ph)\n", size, read_buf);
if (size < ARCTIS_1_BATTERY_RESPONSE_LEN ||
@@ -688,7 +656,7 @@ static int steelseries_headset_raw_event(struct hid_device *hdev,
}
}
- if (sd->quirks & STEELSERIES_ARCTIS_9) {
+ if (hdev->product == USB_DEVICE_ID_STEELSERIES_ARCTIS_9) {
hid_dbg(sd->hdev,
"Parsing raw event for Arctis 9 headset (%*ph)\n", size, read_buf);
if (size < ARCTIS_9_BATTERY_RESPONSE_LEN) {
@@ -757,11 +725,11 @@ static const struct hid_device_id steelseries_devices[] = {
.driver_data = STEELSERIES_SRWS1 },
{ /* SteelSeries Arctis 1 Wireless for XBox */
- HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12b6),
- .driver_data = STEELSERIES_ARCTIS_1 },
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_1),
+ .driver_data = STEELSERIES_ARCTIS_1 },
{ /* SteelSeries Arctis 9 Wireless for XBox */
- HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, 0x12c2),
+ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_ARCTIS_9),
.driver_data = STEELSERIES_ARCTIS_9 },
{ }
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index 34fb03ae8ee2..90ebb81041ea 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -362,6 +362,23 @@ static int uclogic_raw_event_pen(struct uclogic_drvdata *drvdata,
data[8] = pressure_low_byte;
data[9] = pressure_high_byte;
}
+ if (size == 12 && pen->fragmented_hires2) {
+ // 00 00 when on the left side, 01 00 in the right
+ // we move these to the end of the x coord (u16) to create a correct x coord (u32)
+ u8 lsb_low_byte = data[10];
+ u8 lsb_high_byte = data[11];
+
+ // shift everything right by 2 bytes, to make space for the moved lsb
+ data[11] = data[9];
+ data[10] = data[8];
+ data[9] = data[7];
+ data[8] = data[6];
+ data[7] = data[5];
+ data[6] = data[4];
+
+ data[4] = lsb_low_byte;
+ data[5] = lsb_high_byte;
+ }
/* If we need to emulate in-range detection */
if (pen->inrange == UCLOGIC_PARAMS_PEN_INRANGE_NONE) {
/* Set in-range bit */
@@ -604,6 +621,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_22R_PRO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_24_PRO) },
{ }
};
MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 4a17f7332c3f..e28176d9d9c9 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -20,6 +20,7 @@
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/unaligned.h>
+#include <linux/string_choices.h>
/**
* uclogic_params_pen_inrange_to_str() - Convert a pen in-range reporting type
@@ -59,7 +60,7 @@ static void uclogic_params_pen_hid_dbg(const struct hid_device *hdev,
size_t i;
hid_dbg(hdev, "\t.usage_invalid = %s\n",
- (pen->usage_invalid ? "true" : "false"));
+ str_true_false(pen->usage_invalid));
hid_dbg(hdev, "\t.desc_ptr = %p\n", pen->desc_ptr);
hid_dbg(hdev, "\t.desc_size = %u\n", pen->desc_size);
hid_dbg(hdev, "\t.id = %u\n", pen->id);
@@ -74,9 +75,9 @@ static void uclogic_params_pen_hid_dbg(const struct hid_device *hdev,
hid_dbg(hdev, "\t.inrange = %s\n",
uclogic_params_pen_inrange_to_str(pen->inrange));
hid_dbg(hdev, "\t.fragmented_hires = %s\n",
- (pen->fragmented_hires ? "true" : "false"));
+ str_true_false(pen->fragmented_hires));
hid_dbg(hdev, "\t.tilt_y_flipped = %s\n",
- (pen->tilt_y_flipped ? "true" : "false"));
+ str_true_false(pen->tilt_y_flipped));
}
/**
@@ -119,8 +120,7 @@ void uclogic_params_hid_dbg(const struct hid_device *hdev,
{
size_t i;
- hid_dbg(hdev, ".invalid = %s\n",
- params->invalid ? "true" : "false");
+ hid_dbg(hdev, ".invalid = %s\n", str_true_false(params->invalid));
hid_dbg(hdev, ".desc_ptr = %p\n", params->desc_ptr);
hid_dbg(hdev, ".desc_size = %u\n", params->desc_size);
hid_dbg(hdev, ".pen = {\n");
@@ -1123,6 +1123,9 @@ static int uclogic_params_parse_ugee_v2_desc(const __u8 *str_desc,
return -EINVAL;
pen_x_lm = get_unaligned_le16(str_desc + 2);
+ if (str_desc_size > 12)
+ pen_x_lm += (u8)str_desc[12] << 16;
+
pen_y_lm = get_unaligned_le16(str_desc + 4);
frame_num_buttons = str_desc[6];
*frame_type = str_desc[7];
@@ -1369,8 +1372,10 @@ static int uclogic_params_ugee_v2_init_event_hooks(struct hid_device *hdev,
event_hook->hdev = hdev;
event_hook->size = ARRAY_SIZE(reconnect_event);
event_hook->event = kmemdup(reconnect_event, event_hook->size, GFP_KERNEL);
- if (!event_hook->event)
+ if (!event_hook->event) {
+ kfree(event_hook);
return -ENOMEM;
+ }
list_add_tail(&event_hook->list, &p->event_hooks->list);
@@ -1532,7 +1537,7 @@ cleanup:
}
/*
- * uclogic_params_init_ugee_xppen_pro_22r() - Initializes a UGEE XP-Pen Pro 22R tablet device.
+ * uclogic_params_init_ugee_xppen_pro() - Initializes a UGEE XP-Pen Pro tablet device.
*
* @hdev: The HID device of the tablet interface to initialize and get
* parameters from. Cannot be NULL.
@@ -1543,15 +1548,17 @@ cleanup:
* Returns:
* Zero, if successful. A negative errno code on error.
*/
-static int uclogic_params_init_ugee_xppen_pro_22r(struct uclogic_params *params,
- struct hid_device *hdev,
- const u8 rdesc_frame_arr[],
- const size_t rdesc_frame_size)
+static int uclogic_params_init_ugee_xppen_pro(struct uclogic_params *params,
+ struct hid_device *hdev,
+ const u8 rdesc_pen_arr[],
+ const size_t rdesc_pen_size,
+ const u8 rdesc_frame_arr[],
+ const size_t rdesc_frame_size,
+ size_t str_desc_len)
{
int rc = 0;
struct usb_interface *iface;
__u8 bInterfaceNumber;
- const int str_desc_len = 12;
u8 *str_desc = NULL;
__u8 *rdesc_pen = NULL;
s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
@@ -1614,8 +1621,8 @@ static int uclogic_params_init_ugee_xppen_pro_22r(struct uclogic_params *params,
/* Initialize the pen interface */
rdesc_pen = uclogic_rdesc_template_apply(
- uclogic_rdesc_ugee_v2_pen_template_arr,
- uclogic_rdesc_ugee_v2_pen_template_size,
+ rdesc_pen_arr,
+ rdesc_pen_size,
desc_params, ARRAY_SIZE(desc_params));
if (!rdesc_pen) {
rc = -ENOMEM;
@@ -1623,7 +1630,7 @@ static int uclogic_params_init_ugee_xppen_pro_22r(struct uclogic_params *params,
}
p.pen.desc_ptr = rdesc_pen;
- p.pen.desc_size = uclogic_rdesc_ugee_v2_pen_template_size;
+ p.pen.desc_size = rdesc_pen_size;
p.pen.id = 0x02;
p.pen.subreport_list[0].value = 0xf0;
p.pen.subreport_list[0].id = UCLOGIC_RDESC_V1_FRAME_ID;
@@ -1970,10 +1977,30 @@ int uclogic_params_init(struct uclogic_params *params,
break;
case VID_PID(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_22R_PRO):
- rc = uclogic_params_init_ugee_xppen_pro_22r(&p,
+ rc = uclogic_params_init_ugee_xppen_pro(&p,
hdev,
+ uclogic_rdesc_ugee_v2_pen_template_arr,
+ uclogic_rdesc_ugee_v2_pen_template_size,
uclogic_rdesc_xppen_artist_22r_pro_frame_arr,
- uclogic_rdesc_xppen_artist_22r_pro_frame_size);
+ uclogic_rdesc_xppen_artist_22r_pro_frame_size,
+ 12);
+ if (rc != 0)
+ goto cleanup;
+
+ break;
+ case VID_PID(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_24_PRO):
+ rc = uclogic_params_init_ugee_xppen_pro(&p,
+ hdev,
+ uclogic_rdesc_xppen_artist_24_pro_pen_template_arr,
+ uclogic_rdesc_xppen_artist_24_pro_pen_template_size,
+ uclogic_rdesc_xppen_artist_24_pro_frame_arr,
+ uclogic_rdesc_xppen_artist_24_pro_frame_size,
+ 14);
+
+ // The 24 Pro has a fragmented X Coord.
+ p.pen.fragmented_hires2 = true;
+
if (rc != 0)
goto cleanup;
diff --git a/drivers/hid/hid-uclogic-params.h b/drivers/hid/hid-uclogic-params.h
index 6ec8643d2ee5..c84ff17fb5d5 100644
--- a/drivers/hid/hid-uclogic-params.h
+++ b/drivers/hid/hid-uclogic-params.h
@@ -103,6 +103,11 @@ struct uclogic_params_pen {
* Only valid if "id" is not zero.
*/
bool tilt_y_flipped;
+ /*
+ * True, if reports include fragmented high resolution X coords.
+ * This moves bytes 10-11 to the LSB of the X coordinate.
+ */
+ bool fragmented_hires2;
};
/*
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index 08a89c6aae3b..a1b31511b625 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -1237,6 +1237,131 @@ const __u8 uclogic_rdesc_xppen_artist_22r_pro_frame_arr[] = {
const size_t uclogic_rdesc_xppen_artist_22r_pro_frame_size =
sizeof(uclogic_rdesc_xppen_artist_22r_pro_frame_arr);
+/* Fixed report descriptor template for XP-PEN 24 Pro reports
+ * Mostly identical to uclogic_rdesc_ugee_v2_pen_template_arr except that the X coordinate has to be
+ * 32-bits instead of 16-bits.
+ */
+const __u8 uclogic_rdesc_xppen_artist_24_pro_pen_template_arr[] = {
+ 0x05, 0x0d, /* Usage Page (Digitizers), */
+ 0x09, 0x01, /* Usage (Digitizer), */
+ 0xa1, 0x01, /* Collection (Application), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x09, 0x20, /* Usage (Stylus), */
+ 0xa1, 0x00, /* Collection (Physical), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x09, 0x44, /* Usage (Barrel Switch), */
+ 0x09, 0x46, /* Usage (Tablet Pick), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x09, 0x32, /* Usage (In Range), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x35, 0x00, /* Physical Minimum (0), */
+ 0xa4, /* Push, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x65, 0x13, /* Unit (Inch), */
+ 0x55, 0x0d, /* Unit Exponent (-3), */
+ 0x27, UCLOGIC_RDESC_PEN_PH(X_LM),
+ /* Logical Maximum (PLACEHOLDER), */
+ 0x47, UCLOGIC_RDESC_PEN_PH(X_PM),
+ /* Physical Maximum (PLACEHOLDER), */
+ 0x75, 0x20, /* Report Size (32), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x27, UCLOGIC_RDESC_PEN_PH(Y_LM),
+ /* Logical Maximum (PLACEHOLDER), */
+ 0x47, UCLOGIC_RDESC_PEN_PH(Y_PM),
+ /* Physical Maximum (PLACEHOLDER), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xb4, /* Pop, */
+ 0x09, 0x30, /* Usage (Tip Pressure), */
+ 0x45, 0x00, /* Physical Maximum (0), */
+ 0x27, UCLOGIC_RDESC_PEN_PH(PRESSURE_LM),
+ /* Logical Maximum (PLACEHOLDER), */
+ 0x75, 0x0D, /* Report Size (13), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x09, 0x3d, /* Usage (X Tilt), */
+ 0x35, 0xC3, /* Physical Minimum (-61), */
+ 0x45, 0x3C, /* Physical Maximum (60), */
+ 0x15, 0xC3, /* Logical Minimum (-61), */
+ 0x25, 0x3C, /* Logical Maximum (60), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x3e, /* Usage (Y Tilt), */
+ 0x35, 0xC3, /* Physical Minimum (-61), */
+ 0x45, 0x3C, /* Physical Maximum (60), */
+ 0x15, 0xC3, /* Logical Minimum (-61), */
+ 0x25, 0x3C, /* Logical Maximum (60), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xc0, /* End Collection, */
+ 0xc0, /* End Collection */
+};
+const size_t uclogic_rdesc_xppen_artist_24_pro_pen_template_size =
+ sizeof(uclogic_rdesc_xppen_artist_24_pro_pen_template_arr);
+
+/* Fixed report descriptor for XP-Pen Arist 24 Pro frame */
+const __u8 uclogic_rdesc_xppen_artist_24_pro_frame_arr[] = {
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x07, /* Usage (Keypad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, UCLOGIC_RDESC_V1_FRAME_ID,
+ /* Report ID (Virtual report), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x39, /* Usage (Tablet Function Keys), */
+ 0xA0, /* Collection (Physical), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x08, /* Report Count (8), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x14, /* Usage Maximum (14h), */
+ 0x95, 0x14, /* Report Count (20), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x14, /* Report Count (20), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x38, /* Usage (Wheel), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x15, 0xFF, /* Logical Minimum (-1), */
+ 0x25, 0x08, /* Logical Maximum (8), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x05, 0x0C, /* Usage Page (Consumer Devices), */
+ 0x0A, 0x38, 0x02, /* Usage (AC PAN), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 16, /* Report Count (16), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection */
+ 0xC0, /* End Collection */
+};
+
+const size_t uclogic_rdesc_xppen_artist_24_pro_frame_size =
+ sizeof(uclogic_rdesc_xppen_artist_24_pro_frame_arr);
+
/**
* uclogic_rdesc_template_apply() - apply report descriptor parameters to a
* report descriptor template, creating a report descriptor. Copies the
diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h
index 644a35ff12f2..0619daa6849d 100644
--- a/drivers/hid/hid-uclogic-rdesc.h
+++ b/drivers/hid/hid-uclogic-rdesc.h
@@ -214,4 +214,12 @@ extern const size_t uclogic_rdesc_ugee_g5_frame_size;
extern const __u8 uclogic_rdesc_xppen_artist_22r_pro_frame_arr[];
extern const size_t uclogic_rdesc_xppen_artist_22r_pro_frame_size;
+/* Fixed report descriptor for XP-Pen Arist 24 Pro frame */
+extern const __u8 uclogic_rdesc_xppen_artist_24_pro_pen_template_arr[];
+extern const size_t uclogic_rdesc_xppen_artist_24_pro_pen_template_size;
+
+/* Fixed report descriptor for XP-Pen Arist 24 Pro frame */
+extern const __u8 uclogic_rdesc_xppen_artist_24_pro_frame_arr[];
+extern const size_t uclogic_rdesc_xppen_artist_24_pro_frame_size;
+
#endif /* _HID_UCLOGIC_RDESC_H */
diff --git a/drivers/hid/hid-universal-pidff.c b/drivers/hid/hid-universal-pidff.c
index 554a6559aeb7..549dac555d40 100644
--- a/drivers/hid/hid-universal-pidff.c
+++ b/drivers/hid/hid-universal-pidff.c
@@ -8,12 +8,12 @@
* Copyright (c) 2024, 2025 Tomasz Pakuła
*/
+#include "hid-ids.h"
+#include "usbhid/hid-pidff.h"
#include <linux/device.h>
#include <linux/hid.h>
-#include <linux/module.h>
#include <linux/input-event-codes.h>
-#include "hid-ids.h"
-#include "usbhid/hid-pidff.h"
+#include <linux/module.h>
#define JOY_RANGE (BTN_DEAD - BTN_JOYSTICK + 1)
@@ -21,8 +21,10 @@
* Map buttons manually to extend the default joystick button limit
*/
static int universal_pidff_input_mapping(struct hid_device *hdev,
- struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
+ struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
{
if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
return 0;
@@ -126,65 +128,64 @@ static int universal_pidff_input_configured(struct hid_device *hdev,
if (!test_bit(axis, input->absbit))
continue;
- input_set_abs_params(input, axis,
- input->absinfo[axis].minimum,
- input->absinfo[axis].maximum,
- axis == ABS_X ? 0 : 8, 0);
+ input_set_abs_params(input, axis, input->absinfo[axis].minimum,
+ input->absinfo[axis].maximum,
+ axis == ABS_X ? 0 : 8, 0);
}
/* Remove fuzz and deadzone from the second joystick axis */
if (hdev->vendor == USB_VENDOR_ID_FFBEAST &&
hdev->product == USB_DEVICE_ID_FFBEAST_JOYSTICK)
input_set_abs_params(input, ABS_Y,
- input->absinfo[ABS_Y].minimum,
- input->absinfo[ABS_Y].maximum, 0, 0);
+ input->absinfo[ABS_Y].minimum,
+ input->absinfo[ABS_Y].maximum, 0, 0);
return 0;
}
static const struct hid_device_id universal_pidff_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R3_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R5_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R9_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R12_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_MOZA, USB_DEVICE_ID_MOZA_R16_R21_2),
- .driver_data = HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION },
+ .driver_data = HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION },
{ HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CAMMUS, USB_DEVICE_ID_CAMMUS_C12) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VRS, USB_DEVICE_ID_VRS_DFP),
- .driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL },
+ .driver_data = HID_PIDFF_QUIRK_PERMISSIVE_CONTROL },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_JOYSTICK), },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_RUDDER), },
{ HID_USB_DEVICE(USB_VENDOR_ID_FFBEAST, USB_DEVICE_ID_FFBEAST_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V10),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_PXN_V12_LITE_2),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_LITE_STAR, USB_DEVICE_ID_LITE_STAR_GT987),
- .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
+ .driver_data = HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_INVICTA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_FORTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_LA_PRIMA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASETEK, USB_DEVICE_ID_ASETEK_TONY_KANAAN) },
- { }
+ {}
};
MODULE_DEVICE_TABLE(hid, universal_pidff_devices);
diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
index d4afbbd27807..ab65dc12d1e0 100644
--- a/drivers/hid/hid-winwing.c
+++ b/drivers/hid/hid-winwing.c
@@ -37,6 +37,7 @@ struct winwing_drv_data {
struct hid_device *hdev;
__u8 *report_buf;
struct mutex lock;
+ int map_more_buttons;
unsigned int num_leds;
struct winwing_led leds[];
};
@@ -81,12 +82,10 @@ static int winwing_init_led(struct hid_device *hdev,
int ret;
int i;
- size_t data_size = struct_size(data, leds, 3);
-
- data = devm_kzalloc(&hdev->dev, data_size, GFP_KERNEL);
+ data = hid_get_drvdata(hdev);
if (!data)
- return -ENOMEM;
+ return -EINVAL;
data->report_buf = devm_kmalloc(&hdev->dev, MAX_REPORT, GFP_KERNEL);
@@ -106,6 +105,7 @@ static int winwing_init_led(struct hid_device *hdev,
"%s::%s",
dev_name(&input->dev),
info->led_name);
+
if (!led->cdev.name)
return -ENOMEM;
@@ -114,14 +114,98 @@ static int winwing_init_led(struct hid_device *hdev,
return ret;
}
- hid_set_drvdata(hdev, data);
-
return ret;
}
+static int winwing_map_button(int button, int map_more_buttons)
+{
+ if (button < 1)
+ return KEY_RESERVED;
+
+ if (button > 112)
+ return KEY_RESERVED;
+
+ if (button <= 16) {
+ /*
+ * Grip buttons [1 .. 16] are mapped to
+ * key codes BTN_TRIGGER .. BTN_DEAD
+ */
+ return (button - 1) + BTN_JOYSTICK;
+ }
+
+ if (button >= 65) {
+ /*
+ * Base buttons [65 .. 112] are mapped to
+ * key codes BTN_TRIGGER_HAPPY17 .. KEY_MAX
+ */
+ return (button - 65) + BTN_TRIGGER_HAPPY17;
+ }
+
+ if (!map_more_buttons) {
+ /*
+ * Not mapping numbers [33 .. 64] which
+ * are not assigned to any real buttons
+ */
+ if (button >= 33)
+ return KEY_RESERVED;
+ /*
+ * Grip buttons [17 .. 32] are mapped to
+ * BTN_TRIGGER_HAPPY1 .. BTN_TRIGGER_HAPPY16
+ */
+ return (button - 17) + BTN_TRIGGER_HAPPY1;
+ }
+
+ if (button >= 49) {
+ /*
+ * Grip buttons [49 .. 64] are mapped to
+ * BTN_TRIGGER_HAPPY1 .. BTN_TRIGGER_HAPPY16
+ */
+ return (button - 49) + BTN_TRIGGER_HAPPY1;
+ }
+
+ /*
+ * Grip buttons [17 .. 44] are mapped to
+ * key codes KEY_MACRO1 .. KEY_MACRO28;
+ * also mapping numbers [45 .. 48] which
+ * are not assigned to any real buttons.
+ */
+ return (button - 17) + KEY_MACRO1;
+}
+
+static int winwing_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct winwing_drv_data *data;
+ int code = KEY_RESERVED;
+ int button = 0;
+
+ data = hid_get_drvdata(hdev);
+
+ if (!data)
+ return -EINVAL;
+
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON)
+ return 0;
+
+ if (field->application != HID_GD_JOYSTICK)
+ return 0;
+
+ /* Button numbers start with 1 */
+ button = usage->hid & HID_USAGE;
+
+ code = winwing_map_button(button, data->map_more_buttons);
+
+ hid_map_usage(hi, usage, bit, max, EV_KEY, code);
+
+ return 1;
+}
+
static int winwing_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
+ struct winwing_drv_data *data;
+ size_t data_size = struct_size(data, leds, 3);
int ret;
ret = hid_parse(hdev);
@@ -130,6 +214,15 @@ static int winwing_probe(struct hid_device *hdev,
return ret;
}
+ data = devm_kzalloc(&hdev->dev, data_size, GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ data->map_more_buttons = id->driver_data;
+
+ hid_set_drvdata(hdev, data);
+
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
@@ -152,64 +245,11 @@ static int winwing_input_configured(struct hid_device *hdev,
return ret;
}
-static const __u8 original_rdesc_buttons[] = {
- 0x05, 0x09, 0x19, 0x01, 0x29, 0x6F,
- 0x15, 0x00, 0x25, 0x01, 0x35, 0x00,
- 0x45, 0x01, 0x75, 0x01, 0x95, 0x6F,
- 0x81, 0x02, 0x75, 0x01, 0x95, 0x01,
- 0x81, 0x01
-};
-
-/*
- * HID report descriptor shows 111 buttons, which exceeds maximum
- * number of buttons (80) supported by Linux kernel HID subsystem.
- *
- * This module skips numbers 32-63, unused on some throttle grips.
- */
-
-static const __u8 *winwing_report_fixup(struct hid_device *hdev, __u8 *rdesc,
- unsigned int *rsize)
-{
- int sig_length = sizeof(original_rdesc_buttons);
- int unused_button_numbers = 32;
-
- if (*rsize < 34)
- return rdesc;
-
- if (memcmp(rdesc + 8, original_rdesc_buttons, sig_length) == 0) {
-
- /* Usage Maximum */
- rdesc[13] -= unused_button_numbers;
-
- /* Report Count for buttons */
- rdesc[25] -= unused_button_numbers;
-
- /* Report Count for padding [HID1_11, 6.2.2.9] */
- rdesc[31] += unused_button_numbers;
-
- hid_info(hdev, "winwing descriptor fixed\n");
- }
-
- return rdesc;
-}
-
-static int winwing_raw_event(struct hid_device *hdev,
- struct hid_report *report, u8 *raw_data, int size)
-{
- if (size >= 15) {
- /* Skip buttons 32 .. 63 */
- memmove(raw_data + 5, raw_data + 9, 6);
-
- /* Clear the padding */
- memset(raw_data + 11, 0, 4);
- }
-
- return 0;
-}
-
static const struct hid_device_id winwing_devices[] = {
- { HID_USB_DEVICE(0x4098, 0xbe62) }, /* TGRIP-18 */
- { HID_USB_DEVICE(0x4098, 0xbe68) }, /* TGRIP-16EX */
+ { HID_USB_DEVICE(0x4098, 0xbd65), .driver_data = 1 }, /* TGRIP-15E */
+ { HID_USB_DEVICE(0x4098, 0xbd64), .driver_data = 1 }, /* TGRIP-15EX */
+ { HID_USB_DEVICE(0x4098, 0xbe68), .driver_data = 0 }, /* TGRIP-16EX */
+ { HID_USB_DEVICE(0x4098, 0xbe62), .driver_data = 0 }, /* TGRIP-18 */
{}
};
@@ -218,10 +258,9 @@ MODULE_DEVICE_TABLE(hid, winwing_devices);
static struct hid_driver winwing_driver = {
.name = "winwing",
.id_table = winwing_devices,
- .probe = winwing_probe,
.input_configured = winwing_input_configured,
- .report_fixup = winwing_report_fixup,
- .raw_event = winwing_raw_event,
+ .input_mapping = winwing_input_mapping,
+ .probe = winwing_probe,
};
module_hid_driver(winwing_driver);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index c887f48756f4..bbd6f23bce78 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -394,27 +394,15 @@ static int hidraw_revoke(struct hidraw_list *list)
return 0;
}
-static long hidraw_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+static long hidraw_fixed_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *arg)
{
- struct inode *inode = file_inode(file);
- unsigned int minor = iminor(inode);
- long ret = 0;
- struct hidraw *dev;
- struct hidraw_list *list = file->private_data;
- void __user *user_arg = (void __user*) arg;
-
- down_read(&minors_rwsem);
- dev = hidraw_table[minor];
- if (!dev || !dev->exist || hidraw_is_revoked(list)) {
- ret = -ENODEV;
- goto out;
- }
+ struct hid_device *hid = dev->hid;
switch (cmd) {
case HIDIOCGRDESCSIZE:
- if (put_user(dev->hid->rsize, (int __user *)arg))
- ret = -EFAULT;
+ if (put_user(hid->rsize, (int __user *)arg))
+ return -EFAULT;
break;
case HIDIOCGRDESC:
@@ -422,113 +410,145 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
__u32 len;
if (get_user(len, (int __user *)arg))
- ret = -EFAULT;
- else if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
- ret = -EINVAL;
- else if (copy_to_user(user_arg + offsetof(
- struct hidraw_report_descriptor,
- value[0]),
- dev->hid->rdesc,
- min(dev->hid->rsize, len)))
- ret = -EFAULT;
+ return -EFAULT;
+
+ if (len > HID_MAX_DESCRIPTOR_SIZE - 1)
+ return -EINVAL;
+
+ if (copy_to_user(arg + offsetof(
+ struct hidraw_report_descriptor,
+ value[0]),
+ hid->rdesc,
+ min(hid->rsize, len)))
+ return -EFAULT;
+
break;
}
case HIDIOCGRAWINFO:
{
struct hidraw_devinfo dinfo;
- dinfo.bustype = dev->hid->bus;
- dinfo.vendor = dev->hid->vendor;
- dinfo.product = dev->hid->product;
- if (copy_to_user(user_arg, &dinfo, sizeof(dinfo)))
- ret = -EFAULT;
+ dinfo.bustype = hid->bus;
+ dinfo.vendor = hid->vendor;
+ dinfo.product = hid->product;
+ if (copy_to_user(arg, &dinfo, sizeof(dinfo)))
+ return -EFAULT;
break;
}
case HIDIOCREVOKE:
{
- if (user_arg)
- ret = -EINVAL;
- else
- ret = hidraw_revoke(list);
- break;
+ struct hidraw_list *list = file->private_data;
+
+ if (arg)
+ return -EINVAL;
+
+ return hidraw_revoke(list);
}
default:
- {
- struct hid_device *hid = dev->hid;
- if (_IOC_TYPE(cmd) != 'H') {
- ret = -EINVAL;
- break;
- }
+ /*
+ * None of the above ioctls can return -EAGAIN, so
+ * use it as a marker that we need to check variable
+ * length ioctls.
+ */
+ return -EAGAIN;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSFEATURE(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGFEATURE(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
- break;
- }
+ return 0;
+}
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSINPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGINPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
- break;
- }
+static long hidraw_rw_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *user_arg)
+{
+ int len = _IOC_SIZE(cmd);
+
+ switch (cmd & ~IOCSIZE_MASK) {
+ case HIDIOCSFEATURE(0):
+ return hidraw_send_report(file, user_arg, len, HID_FEATURE_REPORT);
+ case HIDIOCGFEATURE(0):
+ return hidraw_get_report(file, user_arg, len, HID_FEATURE_REPORT);
+ case HIDIOCSINPUT(0):
+ return hidraw_send_report(file, user_arg, len, HID_INPUT_REPORT);
+ case HIDIOCGINPUT(0):
+ return hidraw_get_report(file, user_arg, len, HID_INPUT_REPORT);
+ case HIDIOCSOUTPUT(0):
+ return hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
+ case HIDIOCGOUTPUT(0):
+ return hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCSOUTPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_send_report(file, user_arg, len, HID_OUTPUT_REPORT);
- break;
- }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGOUTPUT(0))) {
- int len = _IOC_SIZE(cmd);
- ret = hidraw_get_report(file, user_arg, len, HID_OUTPUT_REPORT);
- break;
- }
+ return -EINVAL;
+}
- /* Begin Read-only ioctls. */
- if (_IOC_DIR(cmd) != _IOC_READ) {
- ret = -EINVAL;
- break;
- }
+static long hidraw_ro_variable_size_ioctl(struct file *file, struct hidraw *dev, unsigned int cmd,
+ void __user *user_arg)
+{
+ struct hid_device *hid = dev->hid;
+ int len = _IOC_SIZE(cmd);
+ int field_len;
+
+ switch (cmd & ~IOCSIZE_MASK) {
+ case HIDIOCGRAWNAME(0):
+ field_len = strlen(hid->name) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->name, len) ? -EFAULT : len;
+ case HIDIOCGRAWPHYS(0):
+ field_len = strlen(hid->phys) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->phys, len) ? -EFAULT : len;
+ case HIDIOCGRAWUNIQ(0):
+ field_len = strlen(hid->uniq) + 1;
+ if (len > field_len)
+ len = field_len;
+ return copy_to_user(user_arg, hid->uniq, len) ? -EFAULT : len;
+ }
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) {
- int len = strlen(hid->name) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->name, len) ?
- -EFAULT : len;
- break;
- }
+ return -EINVAL;
+}
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) {
- int len = strlen(hid->phys) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->phys, len) ?
- -EFAULT : len;
- break;
- }
+static long hidraw_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct inode *inode = file_inode(file);
+ unsigned int minor = iminor(inode);
+ struct hidraw *dev;
+ struct hidraw_list *list = file->private_data;
+ void __user *user_arg = (void __user *)arg;
+ int ret;
- if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) {
- int len = strlen(hid->uniq) + 1;
- if (len > _IOC_SIZE(cmd))
- len = _IOC_SIZE(cmd);
- ret = copy_to_user(user_arg, hid->uniq, len) ?
- -EFAULT : len;
- break;
- }
- }
+ down_read(&minors_rwsem);
+ dev = hidraw_table[minor];
+ if (!dev || !dev->exist || hidraw_is_revoked(list)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (_IOC_TYPE(cmd) != 'H') {
+ ret = -EINVAL;
+ goto out;
+ }
+ if (_IOC_NR(cmd) > HIDIOCTL_LAST || _IOC_NR(cmd) == 0) {
ret = -ENOTTY;
+ goto out;
}
+
+ ret = hidraw_fixed_size_ioctl(file, dev, cmd, user_arg);
+ if (ret != -EAGAIN)
+ goto out;
+
+ switch (_IOC_DIR(cmd)) {
+ case (_IOC_READ | _IOC_WRITE):
+ ret = hidraw_rw_variable_size_ioctl(file, dev, cmd, user_arg);
+ break;
+ case _IOC_READ:
+ ret = hidraw_ro_variable_size_ioctl(file, dev, cmd, user_arg);
+ break;
+ default:
+ /* Any other IOC_DIR is wrong */
+ ret = -EINVAL;
+ }
+
out:
up_read(&minors_rwsem);
return ret;
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
index 1b49243adb16..abd700a101f4 100644
--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -76,6 +76,13 @@ static int i2c_hid_acpi_get_descriptor(struct i2c_hid_acpi *ihid_acpi)
return hid_descriptor_address;
}
+static void i2c_hid_acpi_restore_sequence(struct i2chid_ops *ops)
+{
+ struct i2c_hid_acpi *ihid_acpi = container_of(ops, struct i2c_hid_acpi, ops);
+
+ i2c_hid_acpi_get_descriptor(ihid_acpi);
+}
+
static void i2c_hid_acpi_shutdown_tail(struct i2chid_ops *ops)
{
struct i2c_hid_acpi *ihid_acpi = container_of(ops, struct i2c_hid_acpi, ops);
@@ -96,6 +103,7 @@ static int i2c_hid_acpi_probe(struct i2c_client *client)
ihid_acpi->adev = ACPI_COMPANION(dev);
ihid_acpi->ops.shutdown_tail = i2c_hid_acpi_shutdown_tail;
+ ihid_acpi->ops.restore_sequence = i2c_hid_acpi_restore_sequence;
ret = i2c_hid_acpi_get_descriptor(ihid_acpi);
if (ret < 0)
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index d3912e3f2f13..63f46a2e5788 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -112,9 +112,9 @@ struct i2c_hid {
struct i2chid_ops *ops;
struct drm_panel_follower panel_follower;
- struct work_struct panel_follower_prepare_work;
+ struct work_struct panel_follower_work;
bool is_panel_follower;
- bool prepare_work_finished;
+ bool panel_follower_work_finished;
};
static const struct i2c_hid_quirks {
@@ -961,6 +961,14 @@ static void i2c_hid_core_shutdown_tail(struct i2c_hid *ihid)
ihid->ops->shutdown_tail(ihid->ops);
}
+static void i2c_hid_core_restore_sequence(struct i2c_hid *ihid)
+{
+ if (!ihid->ops->restore_sequence)
+ return;
+
+ ihid->ops->restore_sequence(ihid->ops);
+}
+
static int i2c_hid_core_suspend(struct i2c_hid *ihid, bool force_poweroff)
{
struct i2c_client *client = ihid->client;
@@ -1110,10 +1118,10 @@ err_power_down:
return ret;
}
-static void ihid_core_panel_prepare_work(struct work_struct *work)
+static void ihid_core_panel_follower_work(struct work_struct *work)
{
struct i2c_hid *ihid = container_of(work, struct i2c_hid,
- panel_follower_prepare_work);
+ panel_follower_work);
struct hid_device *hid = ihid->hid;
int ret;
@@ -1130,7 +1138,7 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
if (ret)
dev_warn(&ihid->client->dev, "Power on failed: %d\n", ret);
else
- WRITE_ONCE(ihid->prepare_work_finished, true);
+ WRITE_ONCE(ihid->panel_follower_work_finished, true);
/*
* The work APIs provide a number of memory ordering guarantees
@@ -1139,12 +1147,12 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
* guarantee that a write that happened in the work is visible after
* cancel_work_sync(). We'll add a write memory barrier here to match
* with i2c_hid_core_panel_unpreparing() to ensure that our write to
- * prepare_work_finished is visible there.
+ * panel_follower_work_finished is visible there.
*/
smp_wmb();
}
-static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
+static int i2c_hid_core_panel_follower_resume(struct drm_panel_follower *follower)
{
struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
@@ -1152,29 +1160,36 @@ static int i2c_hid_core_panel_prepared(struct drm_panel_follower *follower)
* Powering on a touchscreen can be a slow process. Queue the work to
* the system workqueue so we don't block the panel's power up.
*/
- WRITE_ONCE(ihid->prepare_work_finished, false);
- schedule_work(&ihid->panel_follower_prepare_work);
+ WRITE_ONCE(ihid->panel_follower_work_finished, false);
+ schedule_work(&ihid->panel_follower_work);
return 0;
}
-static int i2c_hid_core_panel_unpreparing(struct drm_panel_follower *follower)
+static int i2c_hid_core_panel_follower_suspend(struct drm_panel_follower *follower)
{
struct i2c_hid *ihid = container_of(follower, struct i2c_hid, panel_follower);
- cancel_work_sync(&ihid->panel_follower_prepare_work);
+ cancel_work_sync(&ihid->panel_follower_work);
- /* Match with ihid_core_panel_prepare_work() */
+ /* Match with ihid_core_panel_follower_work() */
smp_rmb();
- if (!READ_ONCE(ihid->prepare_work_finished))
+ if (!READ_ONCE(ihid->panel_follower_work_finished))
return 0;
return i2c_hid_core_suspend(ihid, true);
}
-static const struct drm_panel_follower_funcs i2c_hid_core_panel_follower_funcs = {
- .panel_prepared = i2c_hid_core_panel_prepared,
- .panel_unpreparing = i2c_hid_core_panel_unpreparing,
+static const struct drm_panel_follower_funcs
+ i2c_hid_core_panel_follower_prepare_funcs = {
+ .panel_prepared = i2c_hid_core_panel_follower_resume,
+ .panel_unpreparing = i2c_hid_core_panel_follower_suspend,
+};
+
+static const struct drm_panel_follower_funcs
+ i2c_hid_core_panel_follower_enable_funcs = {
+ .panel_enabled = i2c_hid_core_panel_follower_resume,
+ .panel_disabling = i2c_hid_core_panel_follower_suspend,
};
static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
@@ -1182,7 +1197,10 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
struct device *dev = &ihid->client->dev;
int ret;
- ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs;
+ if (ihid->hid->initial_quirks & HID_QUIRK_POWER_ON_AFTER_BACKLIGHT)
+ ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_enable_funcs;
+ else
+ ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_prepare_funcs;
/*
* If we're not in control of our own power up/power down then we can't
@@ -1237,7 +1255,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
init_waitqueue_head(&ihid->wait);
mutex_init(&ihid->cmd_lock);
mutex_init(&ihid->reset_lock);
- INIT_WORK(&ihid->panel_follower_prepare_work, ihid_core_panel_prepare_work);
+ INIT_WORK(&ihid->panel_follower_work, ihid_core_panel_follower_work);
/* we need to allocate the command buffer without knowing the maximum
* size of the reports. Let's use HID_MIN_BUFFER_SIZE, then we do the
@@ -1360,8 +1378,26 @@ static int i2c_hid_core_pm_resume(struct device *dev)
return i2c_hid_core_resume(ihid);
}
+static int i2c_hid_core_pm_restore(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct i2c_hid *ihid = i2c_get_clientdata(client);
+
+ if (ihid->is_panel_follower)
+ return 0;
+
+ i2c_hid_core_restore_sequence(ihid);
+
+ return i2c_hid_core_resume(ihid);
+}
+
const struct dev_pm_ops i2c_hid_core_pm = {
- SYSTEM_SLEEP_PM_OPS(i2c_hid_core_pm_suspend, i2c_hid_core_pm_resume)
+ .suspend = pm_sleep_ptr(i2c_hid_core_pm_suspend),
+ .resume = pm_sleep_ptr(i2c_hid_core_pm_resume),
+ .freeze = pm_sleep_ptr(i2c_hid_core_pm_suspend),
+ .thaw = pm_sleep_ptr(i2c_hid_core_pm_resume),
+ .poweroff = pm_sleep_ptr(i2c_hid_core_pm_suspend),
+ .restore = pm_sleep_ptr(i2c_hid_core_pm_restore),
};
EXPORT_SYMBOL_GPL(i2c_hid_core_pm);
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-elan.c b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
index 3fcff6daa0d3..0215f217f6d8 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-elan.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-elan.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
+#include <linux/hid.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -23,6 +24,7 @@ struct elan_i2c_hid_chip_data {
unsigned int post_power_delay_ms;
u16 hid_descriptor_address;
const char *main_supply_name;
+ bool power_after_backlight;
};
struct i2c_hid_of_elan {
@@ -97,6 +99,7 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
{
struct i2c_hid_of_elan *ihid_elan;
int ret;
+ u32 quirks = 0;
ihid_elan = devm_kzalloc(&client->dev, sizeof(*ihid_elan), GFP_KERNEL);
if (!ihid_elan)
@@ -131,8 +134,12 @@ static int i2c_hid_of_elan_probe(struct i2c_client *client)
}
}
+ if (ihid_elan->chip_data->power_after_backlight)
+ quirks = HID_QUIRK_POWER_ON_AFTER_BACKLIGHT;
+
ret = i2c_hid_core_probe(client, &ihid_elan->ops,
- ihid_elan->chip_data->hid_descriptor_address, 0);
+ ihid_elan->chip_data->hid_descriptor_address,
+ quirks);
if (ret)
goto err_deassert_reset;
@@ -150,6 +157,7 @@ static const struct elan_i2c_hid_chip_data elan_ekth6915_chip_data = {
.post_gpio_reset_on_delay_ms = 300,
.hid_descriptor_address = 0x0001,
.main_supply_name = "vcc33",
+ .power_after_backlight = true,
};
static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
@@ -157,6 +165,7 @@ static const struct elan_i2c_hid_chip_data elan_ekth6a12nay_chip_data = {
.post_gpio_reset_on_delay_ms = 300,
.hid_descriptor_address = 0x0001,
.main_supply_name = "vcc33",
+ .power_after_backlight = true,
};
static const struct elan_i2c_hid_chip_data ilitek_ili9882t_chip_data = {
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
index 2c7b66d5caa0..1724a435c783 100644
--- a/drivers/hid/i2c-hid/i2c-hid.h
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -27,11 +27,13 @@ static inline u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product)
* @power_up: do sequencing to power up the device.
* @power_down: do sequencing to power down the device.
* @shutdown_tail: called at the end of shutdown.
+ * @restore_sequence: hibernation restore sequence.
*/
struct i2chid_ops {
int (*power_up)(struct i2chid_ops *ops);
void (*power_down)(struct i2chid_ops *ops);
void (*shutdown_tail)(struct i2chid_ops *ops);
+ void (*restore_sequence)(struct i2chid_ops *ops);
};
int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 4c861119e97a..abf9c9a31c39 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -481,6 +481,20 @@ out:
return ret;
}
+static void ish_send_reset_notify_ack(struct ishtp_device *dev)
+{
+ /* Read reset ID */
+ u32 reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
+
+ /*
+ * Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
+ * RESET_NOTIFY_ACK - FW will be checking for it
+ */
+ ish_set_host_rdy(dev);
+ /* Send RESET_NOTIFY_ACK (with reset_id) */
+ ipc_send_mng_msg(dev, MNG_RESET_NOTIFY_ACK, &reset_id, sizeof(u32));
+}
+
#define TIME_SLICE_FOR_FW_RDY_MS 100
#define TIME_SLICE_FOR_INPUT_RDY_MS 100
#define TIMEOUT_FOR_FW_RDY_MS 2000
@@ -496,11 +510,8 @@ out:
*/
static int ish_fw_reset_handler(struct ishtp_device *dev)
{
- uint32_t reset_id;
unsigned long flags;
-
- /* Read reset ID */
- reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
+ int ret;
/* Clear IPC output queue */
spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
@@ -510,30 +521,21 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
/* ISHTP notification in IPC_RESET */
ishtp_reset_handler(dev);
- if (!ish_is_input_ready(dev))
- timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
- TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
-
+ ret = timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS,
+ TIMEOUT_FOR_INPUT_RDY_MS);
/* ISH FW is dead */
- if (!ish_is_input_ready(dev))
+ if (ret)
return -EPIPE;
/* Send clock sync at once after reset */
ishtp_dev->prev_sync = 0;
- /*
- * Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
- * RESET_NOTIFY_ACK - FW will be checking for it
- */
- ish_set_host_rdy(dev);
- /* Send RESET_NOTIFY_ACK (with reset_id) */
- ipc_send_mng_msg(dev, MNG_RESET_NOTIFY_ACK, &reset_id,
- sizeof(uint32_t));
-
/* Wait for ISH FW'es ILUP and ISHTP_READY */
- timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
- TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
- if (!ishtp_fw_is_ready(dev)) {
+ ret = timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS,
+ TIMEOUT_FOR_FW_RDY_MS);
+ if (ret) {
/* ISH FW is dead */
uint32_t ish_status;
@@ -562,8 +564,6 @@ static void fw_reset_work_fn(struct work_struct *work)
if (!rv) {
/* ISH is ILUP & ISHTP-ready. Restart ISHTP */
msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
- ishtp_dev->recvd_hw_ready = 1;
- wake_up_interruptible(&ishtp_dev->wait_hw_ready);
/* ISHTP notification in IPC_RESET sequence completion */
if (!work_pending(work))
@@ -624,15 +624,14 @@ static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
break;
case MNG_RESET_NOTIFY:
- if (!ishtp_dev) {
- ishtp_dev = dev;
- }
- schedule_work(&fw_reset_work);
- break;
+ ish_send_reset_notify_ack(ishtp_dev);
+ fallthrough;
case MNG_RESET_NOTIFY_ACK:
dev->recvd_hw_ready = 1;
wake_up_interruptible(&dev->wait_hw_ready);
+ if (!work_pending(&fw_reset_work))
+ queue_work(dev->unbound_wq, &fw_reset_work);
break;
}
}
@@ -729,22 +728,28 @@ int ish_disable_dma(struct ishtp_device *dev)
* ish_wakeup() - wakeup ishfw from waiting-for-host state
* @dev: ishtp device pointer
*
- * Set the dma enable bit and send a void message to FW,
+ * Set the dma enable bit and send a IPC RESET message to FW,
* it wil wakeup FW from waiting-for-host state.
+ *
+ * Return: 0 for success else error code.
*/
-static void ish_wakeup(struct ishtp_device *dev)
+static int ish_wakeup(struct ishtp_device *dev)
{
+ int ret;
+
/* Set dma enable bit */
ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
/*
- * Send 0 IPC message so that ISH FW wakes up if it was already
+ * Send IPC RESET message so that ISH FW wakes up if it was already
* asleep.
*/
- ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+ ret = ish_ipc_reset(dev);
/* Flush writes to doorbell and REMAP2 */
ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ return ret;
}
/**
@@ -793,11 +798,11 @@ static int _ish_hw_reset(struct ishtp_device *dev)
pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
/* Now we can enable ISH DMA operation and wakeup ISHFW */
- ish_wakeup(dev);
-
- return 0;
+ return ish_wakeup(dev);
}
+#define RECVD_HW_READY_TIMEOUT (10 * HZ)
+
/**
* _ish_ipc_reset() - IPC reset
* @dev: ishtp device pointer
@@ -832,7 +837,8 @@ static int _ish_ipc_reset(struct ishtp_device *dev)
}
wait_event_interruptible_timeout(dev->wait_hw_ready,
- dev->recvd_hw_ready, 2 * HZ);
+ dev->recvd_hw_ready,
+ RECVD_HW_READY_TIMEOUT);
if (!dev->recvd_hw_ready) {
dev_err(dev->devc, "Timed out waiting for HW ready\n");
rv = -ENODEV;
@@ -856,21 +862,7 @@ int ish_hw_start(struct ishtp_device *dev)
set_host_ready(dev);
/* After that we can enable ISH DMA operation and wakeup ISHFW */
- ish_wakeup(dev);
-
- /* wait for FW-initiated reset flow */
- if (!dev->recvd_hw_ready)
- wait_event_interruptible_timeout(dev->wait_hw_ready,
- dev->recvd_hw_ready,
- 10 * HZ);
-
- if (!dev->recvd_hw_ready) {
- dev_err(dev->devc,
- "[ishtp-ish]: Timed out waiting for FW-initiated reset\n");
- return -ENODEV;
- }
-
- return 0;
+ return ish_wakeup(dev);
}
/**
@@ -932,6 +924,25 @@ static const struct ishtp_hw_ops ish_hw_ops = {
.dma_no_cache_snooping = _dma_no_cache_snooping
};
+static void ishtp_free_workqueue(void *wq)
+{
+ destroy_workqueue(wq);
+}
+
+static struct workqueue_struct *devm_ishtp_alloc_workqueue(struct device *dev)
+{
+ struct workqueue_struct *wq;
+
+ wq = alloc_workqueue("ishtp_unbound_%d", WQ_UNBOUND, 0, dev->id);
+ if (!wq)
+ return NULL;
+
+ if (devm_add_action_or_reset(dev, ishtp_free_workqueue, wq))
+ return NULL;
+
+ return wq;
+}
+
/**
* ish_dev_init() -Initialize ISH devoce
* @pdev: PCI device
@@ -952,6 +963,10 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
if (!dev)
return NULL;
+ dev->unbound_wq = devm_ishtp_alloc_workqueue(&pdev->dev);
+ if (!dev->unbound_wq)
+ return NULL;
+
dev->devc = &pdev->dev;
ishtp_device_init(dev);
@@ -981,6 +996,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
list_add_tail(&tx_buf->link, &dev->wr_free_list);
}
+ ishtp_dev = dev;
ret = devm_work_autocancel(&pdev->dev, &fw_reset_work, fw_reset_work_fn);
if (ret) {
dev_err(dev->devc, "Failed to initialise FW reset work\n");
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 9d150ce234f2..1612e8cb23f0 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -147,6 +147,12 @@ static inline bool ish_should_enter_d0i3(struct pci_dev *pdev)
static inline bool ish_should_leave_d0i3(struct pci_dev *pdev)
{
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+ u32 fwsts = dev->ops->get_fw_status(dev);
+
+ if (dev->suspend_flag || !IPC_IS_ISH_ILUP(fwsts))
+ return false;
+
return !pm_resume_via_firmware() || pdev->device == PCI_DEVICE_ID_INTEL_ISH_CHV;
}
@@ -277,10 +283,8 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work)
{
struct pci_dev *pdev = to_pci_dev(ish_resume_device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
- uint32_t fwsts = dev->ops->get_fw_status(dev);
- if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag
- && IPC_IS_ISH_ILUP(fwsts)) {
+ if (ish_should_leave_d0i3(pdev)) {
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(pdev->irq);
@@ -384,12 +388,29 @@ static int __maybe_unused ish_resume(struct device *device)
ish_resume_device = device;
dev->resume_flag = 1;
- schedule_work(&resume_work);
+ /* If ISH resume from D3, reset ishtp clients before return */
+ if (!ish_should_leave_d0i3(pdev))
+ ishtp_reset_handler(dev);
+
+ queue_work(dev->unbound_wq, &resume_work);
return 0;
}
-static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
+static int __maybe_unused ish_freeze(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+
+ return pci_save_state(pdev);
+}
+
+static const struct dev_pm_ops __maybe_unused ish_pm_ops = {
+ .suspend = pm_sleep_ptr(ish_suspend),
+ .resume = pm_sleep_ptr(ish_resume),
+ .freeze = pm_sleep_ptr(ish_freeze),
+ .restore = pm_sleep_ptr(ish_resume),
+ .poweroff = pm_sleep_ptr(ish_suspend),
+};
static ssize_t base_version_show(struct device *cdev,
struct device_attribute *attr, char *buf)
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index d8c3c54a8c0f..f37b3bc2bb7d 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -757,8 +757,15 @@ static void hid_ishtp_cl_resume_handler(struct work_struct *work)
struct ishtp_cl *hid_ishtp_cl = client_data->hid_ishtp_cl;
if (ishtp_wait_resume(ishtp_get_ishtp_device(hid_ishtp_cl))) {
- client_data->suspended = false;
- wake_up_interruptible(&client_data->ishtp_resume_wait);
+ /*
+ * Clear the suspended flag only when the connection is established.
+ * If the connection is not established, the suspended flag will be cleared after
+ * the connection is made.
+ */
+ if (ishtp_get_connection_state(hid_ishtp_cl) == ISHTP_CL_CONNECTED) {
+ client_data->suspended = false;
+ wake_up_interruptible(&client_data->ishtp_resume_wait);
+ }
} else {
hid_ishtp_trace(client_data, "hid client: wait for resume timed out");
dev_err(cl_data_to_dev(client_data), "wait for resume timed out");
@@ -860,7 +867,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- schedule_work(&client_data->work);
+ queue_work(ishtp_get_workqueue(cl_device), &client_data->work);
return 0;
}
@@ -902,7 +909,7 @@ static int hid_ishtp_cl_resume(struct device *device)
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
hid_ishtp_cl);
- schedule_work(&client_data->resume_work);
+ queue_work(ishtp_get_workqueue(cl_device), &client_data->resume_work);
return 0;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 93a0432e7058..c6ce37244e49 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -541,7 +541,7 @@ void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device)
return;
if (device->event_cb)
- schedule_work(&device->event_work);
+ queue_work(device->ishtp_dev->unbound_wq, &device->event_work);
}
/**
@@ -877,6 +877,22 @@ struct device *ishtp_get_pci_device(struct ishtp_cl_device *device)
EXPORT_SYMBOL(ishtp_get_pci_device);
/**
+ * ishtp_get_workqueue - Retrieve the workqueue associated with an ISHTP device
+ * @cl_device: Pointer to the ISHTP client device structure
+ *
+ * Returns the workqueue_struct pointer (unbound_wq) associated with the given
+ * ISHTP client device. This workqueue is typically used for scheduling work
+ * related to the device.
+ *
+ * Return: Pointer to struct workqueue_struct.
+ */
+struct workqueue_struct *ishtp_get_workqueue(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->ishtp_dev->unbound_wq;
+}
+EXPORT_SYMBOL(ishtp_get_workqueue);
+
+/**
* ishtp_trace_callback() - Return trace callback
* @cl_device: ISH-TP client device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 21a2c0773cc2..40f510b1c072 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -1261,6 +1261,12 @@ void ishtp_set_connection_state(struct ishtp_cl *cl, int state)
}
EXPORT_SYMBOL(ishtp_set_connection_state);
+int ishtp_get_connection_state(struct ishtp_cl *cl)
+{
+ return cl->state;
+}
+EXPORT_SYMBOL(ishtp_get_connection_state);
+
void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id)
{
cl->fw_client_id = fw_client_id;
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
index 8ee5467127d8..97c4fcd9e3c6 100644
--- a/drivers/hid/intel-ish-hid/ishtp/hbm.c
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -573,7 +573,7 @@ void ishtp_hbm_dispatch(struct ishtp_device *dev,
/* Start firmware loading process if it has loader capability */
if (version_res->host_version_supported & ISHTP_SUPPORT_CAP_LOADER)
- schedule_work(&dev->work_fw_loader);
+ queue_work(dev->unbound_wq, &dev->work_fw_loader);
dev->version.major_version = HBM_MAJOR_VERSION;
dev->version.minor_version = HBM_MINOR_VERSION;
@@ -864,7 +864,7 @@ void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
(RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
- schedule_work(&dev->bh_hbm_work);
+ queue_work(dev->unbound_wq, &dev->bh_hbm_work);
eoi:
return;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 23db97ecf21c..4b0596eadf1c 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -175,6 +175,9 @@ struct ishtp_device {
struct hbm_version version;
int transfer_path; /* Choice of transfer path: IPC or DMA */
+ /* Alloc a dedicated unbound workqueue for ishtp device */
+ struct workqueue_struct *unbound_wq;
+
/* work structure for scheduling firmware loading tasks */
struct work_struct work_fw_loader;
/* waitq for waiting for command response from the firmware loader */
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
index 854926b3cfd4..cfda66ee4895 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/pci-quicki2c.c
@@ -23,6 +23,7 @@
static struct quicki2c_ddata ptl_ddata = {
.max_detect_size = MAX_RX_DETECT_SIZE_PTL,
+ .max_interrupt_delay = MAX_RX_INTERRUPT_DELAY,
};
/* THC QuickI2C ACPI method to get device properties */
@@ -200,6 +201,21 @@ static int quicki2c_get_acpi_resources(struct quicki2c_device *qcdev)
return -EOPNOTSUPP;
}
+ if (qcdev->ddata) {
+ qcdev->i2c_max_frame_size_enable = i2c_config.FSEN;
+ qcdev->i2c_int_delay_enable = i2c_config.INDE;
+
+ if (i2c_config.FSVL <= qcdev->ddata->max_detect_size)
+ qcdev->i2c_max_frame_size = i2c_config.FSVL;
+ else
+ qcdev->i2c_max_frame_size = qcdev->ddata->max_detect_size;
+
+ if (i2c_config.INDV <= qcdev->ddata->max_interrupt_delay)
+ qcdev->i2c_int_delay = i2c_config.INDV;
+ else
+ qcdev->i2c_int_delay = qcdev->ddata->max_interrupt_delay;
+ }
+
return 0;
}
@@ -328,7 +344,6 @@ exit:
if (try_recover(qcdev))
qcdev->state = QUICKI2C_DISABLED;
- pm_runtime_mark_last_busy(qcdev->dev);
pm_runtime_put_autosuspend(qcdev->dev);
return IRQ_HANDLED;
@@ -441,17 +456,24 @@ static void quicki2c_dma_adv_enable(struct quicki2c_device *qcdev)
* max input length <= THC detect capability, enable the feature with device
* max input length.
*/
- if (qcdev->ddata->max_detect_size >=
- le16_to_cpu(qcdev->dev_desc.max_input_len)) {
- thc_i2c_set_rx_max_size(qcdev->thc_hw,
- le16_to_cpu(qcdev->dev_desc.max_input_len));
+ if (qcdev->i2c_max_frame_size_enable) {
+ if (qcdev->i2c_max_frame_size >=
+ le16_to_cpu(qcdev->dev_desc.max_input_len)) {
+ thc_i2c_set_rx_max_size(qcdev->thc_hw,
+ le16_to_cpu(qcdev->dev_desc.max_input_len));
+ } else {
+ dev_warn(qcdev->dev,
+ "Max frame size is smaller than hid max input length!");
+ thc_i2c_set_rx_max_size(qcdev->thc_hw,
+ qcdev->i2c_max_frame_size);
+ }
thc_i2c_rx_max_size_enable(qcdev->thc_hw, true);
}
/* If platform supports interrupt delay feature, enable it with given delay */
- if (qcdev->ddata->interrupt_delay) {
+ if (qcdev->i2c_int_delay_enable) {
thc_i2c_set_rx_int_delay(qcdev->thc_hw,
- qcdev->ddata->interrupt_delay);
+ qcdev->i2c_int_delay * 10);
thc_i2c_rx_int_delay_enable(qcdev->thc_hw, true);
}
}
@@ -464,10 +486,10 @@ static void quicki2c_dma_adv_enable(struct quicki2c_device *qcdev)
*/
static void quicki2c_dma_adv_disable(struct quicki2c_device *qcdev)
{
- if (qcdev->ddata->max_detect_size)
+ if (qcdev->i2c_max_frame_size_enable)
thc_i2c_rx_max_size_enable(qcdev->thc_hw, false);
- if (qcdev->ddata->interrupt_delay)
+ if (qcdev->i2c_int_delay_enable)
thc_i2c_rx_int_delay_enable(qcdev->thc_hw, false);
}
@@ -712,7 +734,6 @@ static int quicki2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Enable runtime power management */
pm_runtime_use_autosuspend(qcdev->dev);
pm_runtime_set_autosuspend_delay(qcdev->dev, DEFAULT_AUTO_SUSPEND_DELAY_MS);
- pm_runtime_mark_last_busy(qcdev->dev);
pm_runtime_put_noidle(qcdev->dev);
pm_runtime_put_autosuspend(qcdev->dev);
@@ -997,6 +1018,8 @@ static const struct pci_device_id quicki2c_pci_tbl[] = {
{ PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
{ PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT1, &ptl_ddata) },
+ { PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_I2C_PORT2, &ptl_ddata) },
{ }
};
MODULE_DEVICE_TABLE(pci, quicki2c_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
index d412eafcf9ea..2cb5471a8133 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-dev.h
@@ -13,6 +13,8 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_I2C_PORT2 0xE34A
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT1 0xE448
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_I2C_PORT2 0xE44A
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT1 0x4D48
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_I2C_PORT2 0x4D4A
/* Packet size value, the unit is 16 bytes */
#define MAX_PACKET_SIZE_VALUE_LNL 256
@@ -38,6 +40,8 @@
/* PTL Max packet size detection capability is 255 Bytes */
#define MAX_RX_DETECT_SIZE_PTL 255
+/* Max interrupt delay capability is 2.56ms */
+#define MAX_RX_INTERRUPT_DELAY 256
/* Default interrupt delay is 1ms, suitable for most devices */
#define DEFAULT_INTERRUPT_DELAY_US (1 * USEC_PER_MSEC)
@@ -101,6 +105,10 @@ struct quicki2c_subip_acpi_parameter {
* @HMTD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Transmit HOLD Period
* @HMRD: High Speed Mode Plus (3.4Mbits/sec) Serial Data Line Receive HOLD Period
* @HMSL: Maximum length (in ic_clk_cycles) of suppressed spikes in High Speed Mode
+ * @FSEN: Maximum Frame Size Feature Enable Control
+ * @FSVL: Maximum Frame Size Value (unit in Bytes)
+ * @INDE: Interrupt Delay Feature Enable Control
+ * @INDV: Interrupt Delay Value (unit in 10 us)
*
* Those properties get from QUICKI2C_ACPI_METHOD_NAME_ISUB method, used for
* I2C timing configure.
@@ -127,17 +135,22 @@ struct quicki2c_subip_acpi_config {
u64 HMTD;
u64 HMRD;
u64 HMSL;
+
+ u64 FSEN;
+ u64 FSVL;
+ u64 INDE;
+ u64 INDV;
u8 reserved;
};
/**
* struct quicki2c_ddata - Driver specific data for quicki2c device
* @max_detect_size: Identify max packet size detect for rx
- * @interrupt_delay: Identify interrupt detect delay for rx
+ * @interrupt_delay: Identify max interrupt detect delay for rx
*/
struct quicki2c_ddata {
u32 max_detect_size;
- u32 interrupt_delay;
+ u32 max_interrupt_delay;
};
struct device;
@@ -170,6 +183,10 @@ struct acpi_device;
* @report_len: The length of input/output report packet
* @reset_ack_wq: Workqueue for waiting reset response from device
* @reset_ack: Indicate reset response received or not
+ * @i2c_max_frame_size_enable: Indicate max frame size feature enabled or not
+ * @i2c_max_frame_size: Max RX frame size (unit in Bytes)
+ * @i2c_int_delay_enable: Indicate interrupt delay feature enabled or not
+ * @i2c_int_delay: Interrupt detection delay value (unit in 10 us)
*/
struct quicki2c_device {
struct device *dev;
@@ -200,6 +217,11 @@ struct quicki2c_device {
wait_queue_head_t reset_ack_wq;
bool reset_ack;
+
+ u32 i2c_max_frame_size_enable;
+ u32 i2c_max_frame_size;
+ u32 i2c_int_delay_enable;
+ u32 i2c_int_delay;
};
#endif /* _QUICKI2C_DEV_H_ */
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
index 5c3ec95bb3fd..834a537b6780 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
@@ -72,7 +72,6 @@ static int quicki2c_hid_raw_request(struct hid_device *hid,
break;
}
- pm_runtime_mark_last_busy(qcdev->dev);
pm_runtime_put_autosuspend(qcdev->dev);
return ret;
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index 5e5f179dd113..ad6bd59963b2 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -33,6 +33,10 @@ struct quickspi_driver_data ptl = {
.max_packet_size_value = MAX_PACKET_SIZE_VALUE_LNL,
};
+struct quickspi_driver_data arl = {
+ .max_packet_size_value = MAX_PACKET_SIZE_VALUE_MTL,
+};
+
/* THC QuickSPI ACPI method to get device properties */
/* HIDSPI Method: {6e2ac436-0fcf-41af-a265-b32a220dcfab} */
static guid_t hidspi_guid =
@@ -335,7 +339,6 @@ end:
if (try_recover(qsdev))
qsdev->state = QUICKSPI_DISABLED;
- pm_runtime_mark_last_busy(qsdev->dev);
pm_runtime_put_autosuspend(qsdev->dev);
return IRQ_HANDLED;
@@ -670,7 +673,6 @@ static int quickspi_probe(struct pci_dev *pdev,
/* Enable runtime power management */
pm_runtime_use_autosuspend(qsdev->dev);
pm_runtime_set_autosuspend_delay(qsdev->dev, DEFAULT_AUTO_SUSPEND_DELAY_MS);
- pm_runtime_mark_last_busy(qsdev->dev);
pm_runtime_put_noidle(qsdev->dev);
pm_runtime_put_autosuspend(qsdev->dev);
@@ -976,6 +978,10 @@ static const struct pci_device_id quickspi_pci_tbl[] = {
{PCI_DEVICE_DATA(INTEL, THC_PTL_H_DEVICE_ID_SPI_PORT2, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT1, &ptl), },
{PCI_DEVICE_DATA(INTEL, THC_PTL_U_DEVICE_ID_SPI_PORT2, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT1, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_WCL_DEVICE_ID_SPI_PORT2, &ptl), },
+ {PCI_DEVICE_DATA(INTEL, THC_ARL_DEVICE_ID_SPI_PORT1, &arl), },
+ {PCI_DEVICE_DATA(INTEL, THC_ARL_DEVICE_ID_SPI_PORT2, &arl), },
{}
};
MODULE_DEVICE_TABLE(pci, quickspi_pci_tbl);
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
index 6fdf674b21c5..c30e1a42eb09 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-dev.h
@@ -19,6 +19,10 @@
#define PCI_DEVICE_ID_INTEL_THC_PTL_H_DEVICE_ID_SPI_PORT2 0xE34B
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT1 0xE449
#define PCI_DEVICE_ID_INTEL_THC_PTL_U_DEVICE_ID_SPI_PORT2 0xE44B
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT1 0x4D49
+#define PCI_DEVICE_ID_INTEL_THC_WCL_DEVICE_ID_SPI_PORT2 0x4D4B
+#define PCI_DEVICE_ID_INTEL_THC_ARL_DEVICE_ID_SPI_PORT1 0x7749
+#define PCI_DEVICE_ID_INTEL_THC_ARL_DEVICE_ID_SPI_PORT2 0x774B
/* HIDSPI special ACPI parameters DSM methods */
#define ACPI_QUICKSPI_REVISION_NUM 2
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
index ad52e402c28a..82c72bfa2795 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
@@ -71,7 +71,6 @@ static int quickspi_hid_raw_request(struct hid_device *hid,
break;
}
- pm_runtime_mark_last_busy(qsdev->dev);
pm_runtime_put_autosuspend(qsdev->dev);
return ret;
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
index e6ba2ddcc9cb..16f780bc879b 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
@@ -280,8 +280,7 @@ int reset_tic(struct quickspi_device *qsdev)
qsdev->reset_ack = false;
- /* First interrupt uses level trigger to avoid missing interrupt */
- thc_int_trigger_type_select(qsdev->thc_hw, false);
+ thc_int_trigger_type_select(qsdev->thc_hw, true);
ret = acpi_tic_reset(qsdev);
if (ret)
diff --git a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
index e1cb9b117ebc..636a68306501 100644
--- a/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
+++ b/drivers/hid/intel-thc-hid/intel-thc/intel-thc-dev.c
@@ -4,6 +4,7 @@
#include <linux/bitfield.h>
#include <linux/math.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include "intel-thc-dev.h"
#include "intel-thc-hw.h"
@@ -664,7 +665,7 @@ int thc_interrupt_quiesce(const struct thc_device *dev, bool int_quiesce)
if (ret) {
dev_err_once(dev->dev,
"Timeout while waiting THC idle, target quiesce state = %s\n",
- int_quiesce ? "true" : "false");
+ str_true_false(int_quiesce));
return ret;
}
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index 614a20b62023..95377c5f6335 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -9,12 +9,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "hid-pidff.h"
+#include <linux/hid.h>
#include <linux/input.h>
+#include <linux/minmax.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/hid.h>
-#include <linux/minmax.h>
-
#define PID_EFFECTS_MAX 64
#define PID_INFINITE U16_MAX
@@ -33,7 +32,7 @@
#define PID_DEVICE_CONTROL 6
#define PID_CREATE_NEW_EFFECT 7
-#define PID_REQUIRED_REPORTS 7
+#define PID_REQUIRED_REPORTS 8
#define PID_SET_ENVELOPE 8
#define PID_SET_CONDITION 9
@@ -51,6 +50,7 @@ static const u8 pidff_reports[] = {
/* PID special fields */
#define PID_EFFECT_TYPE 0x25
+#define PID_AXES_ENABLE 0x55
#define PID_DIRECTION 0x57
#define PID_EFFECT_OPERATION_ARRAY 0x78
#define PID_BLOCK_LOAD_STATUS 0x8b
@@ -141,37 +141,74 @@ static const u8 pidff_effect_types[] = {
#define PID_BLOCK_LOAD_SUCCESS 0
#define PID_BLOCK_LOAD_FULL 1
#define PID_BLOCK_LOAD_ERROR 2
-static const u8 pidff_block_load_status[] = { 0x8c, 0x8d, 0x8e};
+static const u8 pidff_block_load_status[] = { 0x8c, 0x8d, 0x8e };
#define PID_EFFECT_START 0
#define PID_EFFECT_STOP 1
static const u8 pidff_effect_operation_status[] = { 0x79, 0x7b };
-/* Polar direction 90 degrees (East) */
-#define PIDFF_FIXED_WHEEL_DIRECTION 0x4000
+#define PID_DIRECTION_NORTH 0x0000
+#define PID_DIRECTION_EAST 0x4000
+#define PID_DIRECTION_SOUTH 0x8000
+#define PID_DIRECTION_WEST 0xc000
+
+#define PIDFF_FIXED_WHEEL_DIRECTION PID_DIRECTION_EAST
+
+/* AXES_ENABLE and DIRECTION axes */
+enum pid_axes {
+ PID_AXIS_X,
+ PID_AXIS_Y,
+ PID_AXIS_Z,
+ PID_AXIS_RX,
+ PID_AXIS_RY,
+ PID_AXIS_RZ,
+ PID_AXIS_SLIDER,
+ PID_AXIS_DIAL,
+ PID_AXIS_WHEEL,
+ PID_AXES_COUNT,
+};
+static const u8 pidff_direction_axis[] = {
+ HID_USAGE & HID_GD_X,
+ HID_USAGE & HID_GD_Y,
+ HID_USAGE & HID_GD_Z,
+ HID_USAGE & HID_GD_RX,
+ HID_USAGE & HID_GD_RY,
+ HID_USAGE & HID_GD_RZ,
+ HID_USAGE & HID_GD_SLIDER,
+ HID_USAGE & HID_GD_DIAL,
+ HID_USAGE & HID_GD_WHEEL,
+};
struct pidff_usage {
struct hid_field *field;
s32 *value;
};
+struct pidff_effect {
+ int pid_id;
+ int is_infinite;
+ unsigned int loop_count;
+};
+
struct pidff_device {
struct hid_device *hid;
- struct hid_report *reports[sizeof(pidff_reports)];
+ struct hid_report *reports[ARRAY_SIZE(pidff_reports)];
- struct pidff_usage set_effect[sizeof(pidff_set_effect)];
- struct pidff_usage set_envelope[sizeof(pidff_set_envelope)];
- struct pidff_usage set_condition[sizeof(pidff_set_condition)];
- struct pidff_usage set_periodic[sizeof(pidff_set_periodic)];
- struct pidff_usage set_constant[sizeof(pidff_set_constant)];
- struct pidff_usage set_ramp[sizeof(pidff_set_ramp)];
+ struct pidff_usage set_effect[ARRAY_SIZE(pidff_set_effect)];
+ struct pidff_usage set_envelope[ARRAY_SIZE(pidff_set_envelope)];
+ struct pidff_usage set_condition[ARRAY_SIZE(pidff_set_condition)];
+ struct pidff_usage set_periodic[ARRAY_SIZE(pidff_set_periodic)];
+ struct pidff_usage set_constant[ARRAY_SIZE(pidff_set_constant)];
+ struct pidff_usage set_ramp[ARRAY_SIZE(pidff_set_ramp)];
- struct pidff_usage device_gain[sizeof(pidff_device_gain)];
- struct pidff_usage block_load[sizeof(pidff_block_load)];
- struct pidff_usage pool[sizeof(pidff_pool)];
- struct pidff_usage effect_operation[sizeof(pidff_effect_operation)];
- struct pidff_usage block_free[sizeof(pidff_block_free)];
+ struct pidff_usage device_gain[ARRAY_SIZE(pidff_device_gain)];
+ struct pidff_usage block_load[ARRAY_SIZE(pidff_block_load)];
+ struct pidff_usage pool[ARRAY_SIZE(pidff_pool)];
+ struct pidff_usage effect_operation[ARRAY_SIZE(pidff_effect_operation)];
+ struct pidff_usage block_free[ARRAY_SIZE(pidff_block_free)];
+
+ struct pidff_effect effect[PID_EFFECTS_MAX];
/*
* Special field is a field that is not composed of
@@ -184,6 +221,7 @@ struct pidff_device {
/* Special fields in set_effect */
struct hid_field *set_effect_type;
struct hid_field *effect_direction;
+ struct hid_field *axes_enable;
/* Special field in device_control */
struct hid_field *device_control;
@@ -194,17 +232,86 @@ struct pidff_device {
/* Special field in effect_operation */
struct hid_field *effect_operation_status;
- int control_id[sizeof(pidff_device_control)];
- int type_id[sizeof(pidff_effect_types)];
- int status_id[sizeof(pidff_block_load_status)];
- int operation_id[sizeof(pidff_effect_operation_status)];
-
- int pid_id[PID_EFFECTS_MAX];
+ int control_id[ARRAY_SIZE(pidff_device_control)];
+ int type_id[ARRAY_SIZE(pidff_effect_types)];
+ int status_id[ARRAY_SIZE(pidff_block_load_status)];
+ int operation_id[ARRAY_SIZE(pidff_effect_operation_status)];
+ int direction_axis_id[ARRAY_SIZE(pidff_direction_axis)];
u32 quirks;
u8 effect_count;
+ u8 axis_count;
};
+static int pidff_is_effect_conditional(struct ff_effect *effect)
+{
+ return effect->type == FF_SPRING ||
+ effect->type == FF_DAMPER ||
+ effect->type == FF_INERTIA ||
+ effect->type == FF_FRICTION;
+}
+
+static int pidff_is_duration_infinite(u16 duration)
+{
+ return duration == FF_INFINITE || duration == PID_INFINITE;
+}
+
+/*
+ * Get PID effect index from FF effect type.
+ * Return 0 if invalid.
+ */
+static int pidff_effect_ff_to_pid(struct ff_effect *effect)
+{
+ switch (effect->type) {
+ case FF_CONSTANT:
+ return PID_CONSTANT;
+ case FF_RAMP:
+ return PID_RAMP;
+ case FF_SPRING:
+ return PID_SPRING;
+ case FF_DAMPER:
+ return PID_DAMPER;
+ case FF_INERTIA:
+ return PID_INERTIA;
+ case FF_FRICTION:
+ return PID_FRICTION;
+ case FF_PERIODIC:
+ switch (effect->u.periodic.waveform) {
+ case FF_SQUARE:
+ return PID_SQUARE;
+ case FF_TRIANGLE:
+ return PID_TRIANGLE;
+ case FF_SINE:
+ return PID_SINE;
+ case FF_SAW_UP:
+ return PID_SAW_UP;
+ case FF_SAW_DOWN:
+ return PID_SAW_DOWN;
+ }
+ }
+ pr_err("invalid effect type\n");
+ return -EINVAL;
+}
+
+/*
+ * Get effect id in the device descriptor.
+ * Return 0 if invalid.
+ */
+static int pidff_get_effect_type_id(struct pidff_device *pidff,
+ struct ff_effect *effect)
+{
+ int id = pidff_effect_ff_to_pid(effect);
+
+ if (id < 0)
+ return 0;
+
+ if (effect->type == FF_PERIODIC &&
+ pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY)
+ id = PID_SINE;
+
+ return pidff->type_id[id];
+}
+
/*
* Clamp value for a given field
*/
@@ -219,7 +326,7 @@ static s32 pidff_clamp(s32 i, struct hid_field *field)
static int pidff_rescale(int i, int max, struct hid_field *field)
{
return i * (field->logical_maximum - field->logical_minimum) / max +
- field->logical_minimum;
+ field->logical_minimum;
}
/*
@@ -265,28 +372,24 @@ static void pidff_set_signed(struct pidff_usage *usage, s16 value)
else {
if (value < 0)
usage->value[0] =
- pidff_rescale(-value, -S16_MIN, usage->field);
+ pidff_rescale(-value, -S16_MIN, usage->field);
else
usage->value[0] =
- pidff_rescale(value, S16_MAX, usage->field);
+ pidff_rescale(value, S16_MAX, usage->field);
}
pr_debug("calculated from %d to %d\n", value, usage->value[0]);
}
static void pidff_set_time(struct pidff_usage *usage, u16 time)
{
- usage->value[0] = pidff_clamp(
- pidff_rescale_time(time, usage->field), usage->field);
+ usage->value[0] = pidff_clamp(pidff_rescale_time(time, usage->field),
+ usage->field);
}
static void pidff_set_duration(struct pidff_usage *usage, u16 duration)
{
- /* Infinite value conversion from Linux API -> PID */
- if (duration == FF_INFINITE)
- duration = PID_INFINITE;
-
/* PID defines INFINITE as the max possible value for duration field */
- if (duration == PID_INFINITE) {
+ if (pidff_is_duration_infinite(duration)) {
usage->value[0] = (1U << usage->field->report_size) - 1;
return;
}
@@ -294,6 +397,43 @@ static void pidff_set_duration(struct pidff_usage *usage, u16 duration)
pidff_set_time(usage, duration);
}
+static void pidff_set_effect_direction(struct pidff_device *pidff,
+ struct ff_effect *effect)
+{
+ u16 direction = effect->direction;
+ int direction_enable = 1;
+
+ /* Use fixed direction if needed */
+ if (pidff->quirks & HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION &&
+ pidff_is_effect_conditional(effect))
+ direction = PIDFF_FIXED_WHEEL_DIRECTION;
+
+ pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = direction_enable;
+ pidff->effect_direction->value[0] =
+ pidff_rescale(direction, U16_MAX, pidff->effect_direction);
+
+ if (direction_enable)
+ return;
+
+ /*
+ * For use with improved FFB API
+ * We want to read the selected axes and their direction from the effect
+ * struct and only enable those. For now, enable all axes.
+ *
+ */
+ for (int i = 0; i < PID_AXES_COUNT; i++) {
+ /* HID index starts with 1 */
+ int index = pidff->direction_axis_id[i] - 1;
+
+ if (index < 0)
+ continue;
+
+ pidff->axes_enable->value[index] = 1;
+ pidff->effect_direction->value[index] = pidff_rescale(
+ direction, U16_MAX, pidff->effect_direction);
+ }
+}
+
/*
* Send envelope report to the device
*/
@@ -313,16 +453,12 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
pidff->set_envelope[PID_FADE_LEVEL].field);
pidff_set_time(&pidff->set_envelope[PID_ATTACK_TIME],
- envelope->attack_length);
+ envelope->attack_length);
pidff_set_time(&pidff->set_envelope[PID_FADE_TIME],
- envelope->fade_length);
-
- hid_dbg(pidff->hid, "attack %u => %d\n",
- envelope->attack_level,
- pidff->set_envelope[PID_ATTACK_LEVEL].value[0]);
+ envelope->fade_length);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_ENVELOPE],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -331,7 +467,7 @@ static void pidff_set_envelope_report(struct pidff_device *pidff,
static int pidff_needs_set_envelope(struct ff_envelope *envelope,
struct ff_envelope *old)
{
- bool needs_new_envelope;
+ int needs_new_envelope;
needs_new_envelope = envelope->attack_level != 0 ||
envelope->fade_level != 0 ||
@@ -339,8 +475,7 @@ static int pidff_needs_set_envelope(struct ff_envelope *envelope,
envelope->fade_length != 0;
if (!needs_new_envelope)
- return false;
-
+ return 0;
if (!old)
return needs_new_envelope;
@@ -353,8 +488,8 @@ static int pidff_needs_set_envelope(struct ff_envelope *envelope,
/*
* Send constant force report to the device
*/
-static void pidff_set_constant_force_report(struct pidff_device *pidff,
- struct ff_effect *effect)
+static void pidff_set_constant_report(struct pidff_device *pidff,
+ struct ff_effect *effect)
{
pidff->set_constant[PID_EFFECT_BLOCK_INDEX].value[0] =
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
@@ -362,7 +497,7 @@ static void pidff_set_constant_force_report(struct pidff_device *pidff,
effect->u.constant.level);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_CONSTANT],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -386,28 +521,23 @@ static void pidff_set_effect_report(struct pidff_device *pidff,
pidff->create_new_effect_type->value[0];
pidff_set_duration(&pidff->set_effect[PID_DURATION],
- effect->replay.length);
+ effect->replay.length);
pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button;
pidff_set_time(&pidff->set_effect[PID_TRIGGER_REPEAT_INT],
- effect->trigger.interval);
+ effect->trigger.interval);
pidff->set_effect[PID_GAIN].value[0] =
pidff->set_effect[PID_GAIN].field->logical_maximum;
- pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
- /* Use fixed direction if needed */
- pidff->effect_direction->value[0] = pidff_rescale(
- pidff->quirks & HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION ?
- PIDFF_FIXED_WHEEL_DIRECTION : effect->direction,
- U16_MAX, pidff->effect_direction);
+ pidff_set_effect_direction(pidff, effect);
/* Omit setting delay field if it's missing */
if (!(pidff->quirks & HID_PIDFF_QUIRK_MISSING_DELAY))
pidff_set_time(&pidff->set_effect[PID_START_DELAY],
- effect->replay.delay);
+ effect->replay.delay);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -437,10 +567,10 @@ static void pidff_set_periodic_report(struct pidff_device *pidff,
effect->u.periodic.offset);
pidff_set(&pidff->set_periodic[PID_PHASE], effect->u.periodic.phase);
pidff_set_time(&pidff->set_periodic[PID_PERIOD],
- effect->u.periodic.period);
+ effect->u.periodic.period);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_PERIODIC],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -487,7 +617,7 @@ static void pidff_set_condition_report(struct pidff_device *pidff,
pidff_set(&pidff->set_condition[PID_DEAD_BAND],
effect->u.condition[i].deadband);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_CONDITION],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
}
@@ -518,8 +648,8 @@ static int pidff_needs_set_condition(struct ff_effect *effect,
/*
* Send ramp force report to the device
*/
-static void pidff_set_ramp_force_report(struct pidff_device *pidff,
- struct ff_effect *effect)
+static void pidff_set_ramp_report(struct pidff_device *pidff,
+ struct ff_effect *effect)
{
pidff->set_ramp[PID_EFFECT_BLOCK_INDEX].value[0] =
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
@@ -528,7 +658,7 @@ static void pidff_set_ramp_force_report(struct pidff_device *pidff,
pidff_set_signed(&pidff->set_ramp[PID_RAMP_END],
effect->u.ramp.end_level);
hid_hw_request(pidff->hid, pidff->reports[PID_SET_RAMP],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -550,7 +680,7 @@ static void pidff_set_gain_report(struct pidff_device *pidff, u16 gain)
pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain);
hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_GAIN],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -558,8 +688,7 @@ static void pidff_set_gain_report(struct pidff_device *pidff, u16 gain)
*/
static void pidff_set_device_control(struct pidff_device *pidff, int field)
{
- int i, index;
- int field_index = pidff->control_id[field];
+ const int field_index = pidff->control_id[field];
if (field_index < 1)
return;
@@ -569,8 +698,9 @@ static void pidff_set_device_control(struct pidff_device *pidff, int field)
hid_dbg(pidff->hid, "DEVICE_CONTROL is a bitmask\n");
/* Clear current bitmask */
- for (i = 0; i < sizeof(pidff_device_control); i++) {
- index = pidff->control_id[i];
+ for (int i = 0; i < ARRAY_SIZE(pidff_device_control); i++) {
+ int index = pidff->control_id[i];
+
if (index < 1)
continue;
@@ -585,16 +715,8 @@ static void pidff_set_device_control(struct pidff_device *pidff, int field)
hid_hw_request(pidff->hid, pidff->reports[PID_DEVICE_CONTROL], HID_REQ_SET_REPORT);
hid_hw_wait(pidff->hid);
-}
-
-/*
- * Modify actuators state
- */
-static void pidff_set_actuators(struct pidff_device *pidff, bool enable)
-{
- hid_dbg(pidff->hid, "%s actuators\n", enable ? "Enable" : "Disable");
- pidff_set_device_control(pidff,
- enable ? PID_ENABLE_ACTUATORS : PID_DISABLE_ACTUATORS);
+ hid_dbg(pidff->hid, "Device control command 0x%02x sent",
+ pidff_device_control[field]);
}
/*
@@ -608,7 +730,7 @@ static void pidff_reset(struct pidff_device *pidff)
pidff->effect_count = 0;
pidff_set_device_control(pidff, PID_STOP_ALL_EFFECTS);
- pidff_set_actuators(pidff, 1);
+ pidff_set_device_control(pidff, PID_ENABLE_ACTUATORS);
}
/*
@@ -644,32 +766,25 @@ static void pidff_fetch_pool(struct pidff_device *pidff)
*/
static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
{
- int j;
-
- if (!pidff->effect_count)
- pidff_reset(pidff);
-
pidff->create_new_effect_type->value[0] = efnum;
hid_hw_request(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
hid_dbg(pidff->hid, "create_new_effect sent, type: %d\n", efnum);
pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] = 0;
pidff->block_load_status->value[0] = 0;
hid_hw_wait(pidff->hid);
- for (j = 0; j < 60; j++) {
+ for (int i = 0; i < 60; i++) {
hid_dbg(pidff->hid, "pid_block_load requested\n");
hid_hw_request(pidff->hid, pidff->reports[PID_BLOCK_LOAD],
- HID_REQ_GET_REPORT);
+ HID_REQ_GET_REPORT);
hid_hw_wait(pidff->hid);
if (pidff->block_load_status->value[0] ==
pidff->status_id[PID_BLOCK_LOAD_SUCCESS]) {
hid_dbg(pidff->hid, "device reported free memory: %d bytes\n",
pidff->block_load[PID_RAM_POOL_AVAILABLE].value ?
pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1);
-
- pidff->effect_count++;
return 0;
}
if (pidff->block_load_status->value[0] ==
@@ -689,6 +804,12 @@ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum)
return -EIO;
}
+static int pidff_needs_playback(struct pidff_device *pidff, int effect_id, int n)
+{
+ return !pidff->effect[effect_id].is_infinite ||
+ pidff->effect[effect_id].loop_count != n;
+}
+
/*
* Play the effect with PID id n times
*/
@@ -696,6 +817,9 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
{
pidff->effect_operation[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id;
+ hid_dbg(pidff->hid, "%s PID effect %d", n == 0 ? "stopping" : "playing",
+ pid_id);
+
if (n == 0) {
pidff->effect_operation_status->value[0] =
pidff->operation_id[PID_EFFECT_STOP];
@@ -707,7 +831,7 @@ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n)
}
hid_hw_request(pidff->hid, pidff->reports[PID_EFFECT_OPERATION],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -717,7 +841,14 @@ static int pidff_playback(struct input_dev *dev, int effect_id, int value)
{
struct pidff_device *pidff = dev->ff->private;
- pidff_playback_pid(pidff, pidff->pid_id[effect_id], value);
+ if (!pidff_needs_playback(pidff, effect_id, value))
+ return 0;
+
+ hid_dbg(pidff->hid, "requesting %s on FF effect %d",
+ value == 0 ? "stop" : "playback", effect_id);
+
+ pidff->effect[effect_id].loop_count = value;
+ pidff_playback_pid(pidff, pidff->effect[effect_id].pid_id, value);
return 0;
}
@@ -729,10 +860,7 @@ static void pidff_erase_pid(struct pidff_device *pidff, int pid_id)
{
pidff->block_free[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id;
hid_hw_request(pidff->hid, pidff->reports[PID_BLOCK_FREE],
- HID_REQ_SET_REPORT);
-
- if (pidff->effect_count > 0)
- pidff->effect_count--;
+ HID_REQ_SET_REPORT);
}
/*
@@ -741,10 +869,9 @@ static void pidff_erase_pid(struct pidff_device *pidff, int pid_id)
static int pidff_erase_effect(struct input_dev *dev, int effect_id)
{
struct pidff_device *pidff = dev->ff->private;
- int pid_id = pidff->pid_id[effect_id];
+ int pid_id = pidff->effect[effect_id].pid_id;
- hid_dbg(pidff->hid, "starting to erase %d/%d\n",
- effect_id, pidff->pid_id[effect_id]);
+ hid_dbg(pidff->hid, "starting to erase %d/%d\n", effect_id, pid_id);
/*
* Wait for the queue to clear. We do not want
@@ -754,139 +881,83 @@ static int pidff_erase_effect(struct input_dev *dev, int effect_id)
pidff_playback_pid(pidff, pid_id, 0);
pidff_erase_pid(pidff, pid_id);
+ if (pidff->effect_count > 0)
+ pidff->effect_count--;
+
+ hid_dbg(pidff->hid, "current effect count: %d", pidff->effect_count);
return 0;
}
+#define PIDFF_SET_REPORT_IF_NEEDED(type, effect, old) \
+ ({ if (!old || pidff_needs_set_## type(effect, old)) \
+ pidff_set_ ##type## _report(pidff, effect); })
+
+#define PIDFF_SET_ENVELOPE_IF_NEEDED(type, effect, old) \
+ ({ if (pidff_needs_set_envelope(&effect->u.type.envelope, \
+ old ? &old->u.type.envelope : NULL)) \
+ pidff_set_envelope_report(pidff, &effect->u.type.envelope); })
+
/*
* Effect upload handler
*/
-static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect,
+static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *new,
struct ff_effect *old)
{
struct pidff_device *pidff = dev->ff->private;
- int type_id;
- int error;
+ const int type_id = pidff_get_effect_type_id(pidff, new);
- pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] = 0;
- if (old) {
- pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] =
- pidff->pid_id[effect->id];
+ if (!type_id) {
+ hid_err(pidff->hid, "effect type not supported\n");
+ return -EINVAL;
}
- switch (effect->type) {
+ if (!pidff->effect_count)
+ pidff_reset(pidff);
+
+ if (!old) {
+ int error = pidff_request_effect_upload(pidff, type_id);
+
+ if (error)
+ return error;
+
+ pidff->effect_count++;
+ hid_dbg(pidff->hid, "current effect count: %d", pidff->effect_count);
+ pidff->effect[new->id].loop_count = 0;
+ pidff->effect[new->id].pid_id =
+ pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
+ }
+
+ pidff->effect[new->id].is_infinite =
+ pidff_is_duration_infinite(new->replay.length);
+
+ pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] =
+ pidff->effect[new->id].pid_id;
+
+ PIDFF_SET_REPORT_IF_NEEDED(effect, new, old);
+ switch (new->type) {
case FF_CONSTANT:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_CONSTANT]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_constant(effect, old))
- pidff_set_constant_force_report(pidff, effect);
- if (pidff_needs_set_envelope(&effect->u.constant.envelope,
- old ? &old->u.constant.envelope : NULL))
- pidff_set_envelope_report(pidff, &effect->u.constant.envelope);
+ PIDFF_SET_REPORT_IF_NEEDED(constant, new, old);
+ PIDFF_SET_ENVELOPE_IF_NEEDED(constant, new, old);
break;
case FF_PERIODIC:
- if (!old) {
- switch (effect->u.periodic.waveform) {
- case FF_SQUARE:
- type_id = PID_SQUARE;
- break;
- case FF_TRIANGLE:
- type_id = PID_TRIANGLE;
- break;
- case FF_SINE:
- type_id = PID_SINE;
- break;
- case FF_SAW_UP:
- type_id = PID_SAW_UP;
- break;
- case FF_SAW_DOWN:
- type_id = PID_SAW_DOWN;
- break;
- default:
- hid_err(pidff->hid, "invalid waveform\n");
- return -EINVAL;
- }
-
- if (pidff->quirks & HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY)
- type_id = PID_SINE;
-
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[type_id]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_periodic(effect, old))
- pidff_set_periodic_report(pidff, effect);
- if (pidff_needs_set_envelope(&effect->u.periodic.envelope,
- old ? &old->u.periodic.envelope : NULL))
- pidff_set_envelope_report(pidff, &effect->u.periodic.envelope);
+ PIDFF_SET_REPORT_IF_NEEDED(periodic, new, old);
+ PIDFF_SET_ENVELOPE_IF_NEEDED(periodic, new, old);
break;
case FF_RAMP:
- if (!old) {
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[PID_RAMP]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_ramp(effect, old))
- pidff_set_ramp_force_report(pidff, effect);
- if (pidff_needs_set_envelope(&effect->u.ramp.envelope,
- old ? &old->u.ramp.envelope : NULL))
- pidff_set_envelope_report(pidff, &effect->u.ramp.envelope);
+ PIDFF_SET_REPORT_IF_NEEDED(ramp, new, old);
+ PIDFF_SET_ENVELOPE_IF_NEEDED(ramp, new, old);
break;
case FF_SPRING:
case FF_DAMPER:
case FF_INERTIA:
case FF_FRICTION:
- if (!old) {
- switch (effect->type) {
- case FF_SPRING:
- type_id = PID_SPRING;
- break;
- case FF_DAMPER:
- type_id = PID_DAMPER;
- break;
- case FF_INERTIA:
- type_id = PID_INERTIA;
- break;
- case FF_FRICTION:
- type_id = PID_FRICTION;
- break;
- }
- error = pidff_request_effect_upload(pidff,
- pidff->type_id[type_id]);
- if (error)
- return error;
- }
- if (!old || pidff_needs_set_effect(effect, old))
- pidff_set_effect_report(pidff, effect);
- if (!old || pidff_needs_set_condition(effect, old))
- pidff_set_condition_report(pidff, effect);
+ PIDFF_SET_REPORT_IF_NEEDED(condition, new, old);
break;
-
- default:
- hid_err(pidff->hid, "invalid type\n");
- return -EINVAL;
}
-
- if (!old)
- pidff->pid_id[effect->id] =
- pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0];
-
hid_dbg(pidff->hid, "uploaded\n");
-
return 0;
}
@@ -924,7 +995,7 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
pidff->set_effect[PID_START_DELAY].value[0] = 0;
hid_hw_request(pidff->hid, pidff->reports[PID_SET_EFFECT],
- HID_REQ_SET_REPORT);
+ HID_REQ_SET_REPORT);
}
/*
@@ -936,56 +1007,85 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude)
}
/*
+ * Find specific usage in a given hid_field
+ */
+static int pidff_find_usage(struct hid_field *fld, unsigned int usage_code)
+{
+ for (int i = 0; i < fld->maxusage; i++) {
+ if (fld->usage[i].hid == usage_code)
+ return i;
+ }
+ return -1;
+}
+
+/*
+ * Find hid_field with a specific usage. Return the usage index as well
+ */
+static int pidff_find_field_with_usage(int *usage_index,
+ struct hid_report *report,
+ unsigned int usage_code)
+{
+ for (int i = 0; i < report->maxfield; i++) {
+ struct hid_field *fld = report->field[i];
+
+ if (fld->maxusage != fld->report_count) {
+ pr_debug("maxusage and report_count do not match, skipping\n");
+ continue;
+ }
+
+ int index = pidff_find_usage(fld, usage_code);
+
+ if (index >= 0) {
+ *usage_index = index;
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
* Find fields from a report and fill a pidff_usage
*/
static int pidff_find_fields(struct pidff_usage *usage, const u8 *table,
- struct hid_report *report, int count, int strict)
+ struct hid_report *report, int count, int strict,
+ u32 *quirks)
{
+ const u8 block_offset = pidff_set_condition[PID_PARAM_BLOCK_OFFSET];
+ const u8 delay = pidff_set_effect[PID_START_DELAY];
+
if (!report) {
pr_debug("%s, null report\n", __func__);
return -1;
}
- int i, j, k, found;
- int return_value = 0;
+ for (int i = 0; i < count; i++) {
+ int index;
+ int found = pidff_find_field_with_usage(&index, report,
+ HID_UP_PID | table[i]);
- for (k = 0; k < count; k++) {
- found = 0;
- for (i = 0; i < report->maxfield; i++) {
- if (report->field[i]->maxusage !=
- report->field[i]->report_count) {
- pr_debug("maxusage and report_count do not match, skipping\n");
- continue;
- }
- for (j = 0; j < report->field[i]->maxusage; j++) {
- if (report->field[i]->usage[j].hid ==
- (HID_UP_PID | table[k])) {
- pr_debug("found %d at %d->%d\n",
- k, i, j);
- usage[k].field = report->field[i];
- usage[k].value =
- &report->field[i]->value[j];
- found = 1;
- break;
- }
- }
- if (found)
- break;
+ if (found >= 0) {
+ pr_debug("found %d at %d->%d\n", i, found, index);
+ usage[i].field = report->field[found];
+ usage[i].value = &report->field[found]->value[index];
+ continue;
}
- if (!found && table[k] == pidff_set_effect[PID_START_DELAY]) {
+
+ if (table[i] == delay) {
pr_debug("Delay field not found, but that's OK\n");
pr_debug("Setting MISSING_DELAY quirk\n");
- return_value |= HID_PIDFF_QUIRK_MISSING_DELAY;
- } else if (!found && table[k] == pidff_set_condition[PID_PARAM_BLOCK_OFFSET]) {
+ *quirks |= HID_PIDFF_QUIRK_MISSING_DELAY;
+
+ } else if (table[i] == block_offset) {
pr_debug("PBO field not found, but that's OK\n");
pr_debug("Setting MISSING_PBO quirk\n");
- return_value |= HID_PIDFF_QUIRK_MISSING_PBO;
- } else if (!found && strict) {
- pr_debug("failed to locate %d\n", k);
+ *quirks |= HID_PIDFF_QUIRK_MISSING_PBO;
+
+ } else if (strict) {
+ pr_debug("failed to locate %d\n", i);
return -1;
}
}
- return return_value;
+ return 0;
}
/*
@@ -995,7 +1095,7 @@ static int pidff_check_usage(int usage)
{
int i;
- for (i = 0; i < sizeof(pidff_reports); i++)
+ for (i = 0; i < ARRAY_SIZE(pidff_reports); i++)
if (usage == (HID_UP_PID | pidff_reports[i]))
return i;
@@ -1050,9 +1150,7 @@ static void pidff_find_reports(struct hid_device *hid, int report_type,
*/
static int pidff_reports_ok(struct pidff_device *pidff)
{
- int i;
-
- for (i = 0; i <= PID_REQUIRED_REPORTS; i++) {
+ for (int i = 0; i < PID_REQUIRED_REPORTS; i++) {
if (!pidff->reports[i]) {
hid_dbg(pidff->hid, "%d missing\n", i);
return 0;
@@ -1073,9 +1171,7 @@ static struct hid_field *pidff_find_special_field(struct hid_report *report,
return NULL;
}
- int i;
-
- for (i = 0; i < report->maxfield; i++) {
+ for (int i = 0; i < report->maxfield; i++) {
if (report->field[i]->logical == (HID_UP_PID | usage) &&
report->field[i]->report_count > 0) {
if (!enforce_min ||
@@ -1093,27 +1189,29 @@ static struct hid_field *pidff_find_special_field(struct hid_report *report,
* Fill a pidff->*_id struct table
*/
static int pidff_find_special_keys(int *keys, struct hid_field *fld,
- const u8 *usagetable, int count)
+ const u8 *usagetable, int count,
+ unsigned int usage_page)
{
-
- int i, j;
int found = 0;
- for (i = 0; i < count; i++) {
- for (j = 0; j < fld->maxusage; j++) {
- if (fld->usage[j].hid == (HID_UP_PID | usagetable[i])) {
- keys[i] = j + 1;
- found++;
- break;
- }
- }
+ if (!fld)
+ return 0;
+
+ for (int i = 0; i < count; i++) {
+ keys[i] = pidff_find_usage(fld, usage_page | usagetable[i]) + 1;
+ if (keys[i])
+ found++;
}
return found;
}
#define PIDFF_FIND_SPECIAL_KEYS(keys, field, name) \
pidff_find_special_keys(pidff->keys, pidff->field, pidff_ ## name, \
- sizeof(pidff_ ## name))
+ ARRAY_SIZE(pidff_ ## name), HID_UP_PID)
+
+#define PIDFF_FIND_GENERAL_DESKTOP(keys, field, name) \
+ pidff_find_special_keys(pidff->keys, pidff->field, pidff_ ## name, \
+ ARRAY_SIZE(pidff_ ## name), HID_UP_GENDESK)
/*
* Find and check the special fields
@@ -1128,13 +1226,24 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
pidff->set_effect_type =
pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
PID_EFFECT_TYPE, 1);
+ pidff->axes_enable =
+ pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
+ PID_AXES_ENABLE, 0);
pidff->effect_direction =
pidff_find_special_field(pidff->reports[PID_SET_EFFECT],
PID_DIRECTION, 0);
pidff->device_control =
pidff_find_special_field(pidff->reports[PID_DEVICE_CONTROL],
- PID_DEVICE_CONTROL_ARRAY,
- !(pidff->quirks & HID_PIDFF_QUIRK_PERMISSIVE_CONTROL));
+ PID_DEVICE_CONTROL_ARRAY, 1);
+
+ /* Detect and set permissive control quirk */
+ if (!pidff->device_control) {
+ pr_debug("Setting PERMISSIVE_CONTROL quirk\n");
+ pidff->quirks |= HID_PIDFF_QUIRK_PERMISSIVE_CONTROL;
+ pidff->device_control = pidff_find_special_field(
+ pidff->reports[PID_DEVICE_CONTROL],
+ PID_DEVICE_CONTROL_ARRAY, 0);
+ }
pidff->block_load_status =
pidff_find_special_field(pidff->reports[PID_BLOCK_LOAD],
@@ -1180,7 +1289,7 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
if (PIDFF_FIND_SPECIAL_KEYS(status_id, block_load_status,
block_load_status) !=
- sizeof(pidff_block_load_status)) {
+ ARRAY_SIZE(pidff_block_load_status)) {
hid_err(pidff->hid,
"block load status identifiers not found\n");
return -1;
@@ -1188,11 +1297,37 @@ static int pidff_find_special_fields(struct pidff_device *pidff)
if (PIDFF_FIND_SPECIAL_KEYS(operation_id, effect_operation_status,
effect_operation_status) !=
- sizeof(pidff_effect_operation_status)) {
+ ARRAY_SIZE(pidff_effect_operation_status)) {
hid_err(pidff->hid, "effect operation identifiers not found\n");
return -1;
}
+ if (!pidff->axes_enable) {
+ hid_info(pidff->hid, "axes enable field not found!\n");
+ return 0;
+ }
+
+ hid_dbg(pidff->hid, "axes enable report count: %u\n",
+ pidff->axes_enable->report_count);
+
+ uint found = PIDFF_FIND_GENERAL_DESKTOP(direction_axis_id, axes_enable,
+ direction_axis);
+
+ pidff->axis_count = found;
+ hid_dbg(pidff->hid, "found direction axes: %u", found);
+
+ for (int i = 0; i < ARRAY_SIZE(pidff_direction_axis); i++) {
+ if (!pidff->direction_axis_id[i])
+ continue;
+
+ hid_dbg(pidff->hid, "axis %d, usage: 0x%04x, index: %d", i + 1,
+ pidff_direction_axis[i], pidff->direction_axis_id[i]);
+ }
+
+ if (pidff->axes_enable && found != pidff->axes_enable->report_count)
+ hid_warn(pidff->hid, "axes_enable: %u != direction axes: %u",
+ pidff->axes_enable->report_count, found);
+
return 0;
}
@@ -1204,7 +1339,7 @@ static int pidff_find_effects(struct pidff_device *pidff,
{
int i;
- for (i = 0; i < sizeof(pidff_effect_types); i++) {
+ for (i = 0; i < ARRAY_SIZE(pidff_effect_types); i++) {
int pidff_type = pidff->type_id[i];
if (pidff->set_effect_type->usage[pidff_type].hid !=
@@ -1254,26 +1389,17 @@ static int pidff_find_effects(struct pidff_device *pidff,
#define PIDFF_FIND_FIELDS(name, report, strict) \
pidff_find_fields(pidff->name, pidff_ ## name, \
pidff->reports[report], \
- sizeof(pidff_ ## name), strict)
+ ARRAY_SIZE(pidff_ ## name), strict, &pidff->quirks)
/*
* Fill and check the pidff_usages
*/
static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
{
- int status = 0;
-
- /* Save info about the device not having the DELAY ffb field. */
- status = PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1);
- if (status == -1) {
+ if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) {
hid_err(pidff->hid, "unknown set_effect report layout\n");
return -ENODEV;
}
- pidff->quirks |= status;
-
- if (status & HID_PIDFF_QUIRK_MISSING_DELAY)
- hid_dbg(pidff->hid, "Adding MISSING_DELAY quirk\n");
-
PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0);
if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) {
@@ -1307,39 +1433,25 @@ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev)
"has periodic effect but no envelope\n");
}
- if (test_bit(FF_CONSTANT, dev->ffbit) &&
- PIDFF_FIND_FIELDS(set_constant, PID_SET_CONSTANT, 1)) {
+ if (PIDFF_FIND_FIELDS(set_constant, PID_SET_CONSTANT, 1) &&
+ test_and_clear_bit(FF_CONSTANT, dev->ffbit))
hid_warn(pidff->hid, "unknown constant effect layout\n");
- clear_bit(FF_CONSTANT, dev->ffbit);
- }
- if (test_bit(FF_RAMP, dev->ffbit) &&
- PIDFF_FIND_FIELDS(set_ramp, PID_SET_RAMP, 1)) {
+ if (PIDFF_FIND_FIELDS(set_ramp, PID_SET_RAMP, 1) &&
+ test_and_clear_bit(FF_RAMP, dev->ffbit))
hid_warn(pidff->hid, "unknown ramp effect layout\n");
- clear_bit(FF_RAMP, dev->ffbit);
- }
-
- if (test_bit(FF_SPRING, dev->ffbit) ||
- test_bit(FF_DAMPER, dev->ffbit) ||
- test_bit(FF_FRICTION, dev->ffbit) ||
- test_bit(FF_INERTIA, dev->ffbit)) {
- status = PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1);
- if (status < 0) {
+ if (PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) {
+ if (test_and_clear_bit(FF_SPRING, dev->ffbit) ||
+ test_and_clear_bit(FF_DAMPER, dev->ffbit) ||
+ test_and_clear_bit(FF_FRICTION, dev->ffbit) ||
+ test_and_clear_bit(FF_INERTIA, dev->ffbit))
hid_warn(pidff->hid, "unknown condition effect layout\n");
- clear_bit(FF_SPRING, dev->ffbit);
- clear_bit(FF_DAMPER, dev->ffbit);
- clear_bit(FF_FRICTION, dev->ffbit);
- clear_bit(FF_INERTIA, dev->ffbit);
- }
- pidff->quirks |= status;
}
- if (test_bit(FF_PERIODIC, dev->ffbit) &&
- PIDFF_FIND_FIELDS(set_periodic, PID_SET_PERIODIC, 1)) {
+ if (PIDFF_FIND_FIELDS(set_periodic, PID_SET_PERIODIC, 1) &&
+ test_and_clear_bit(FF_PERIODIC, dev->ffbit))
hid_warn(pidff->hid, "unknown periodic effect layout\n");
- clear_bit(FF_PERIODIC, dev->ffbit);
- }
PIDFF_FIND_FIELDS(pool, PID_POOL, 0);
@@ -1392,8 +1504,8 @@ static int pidff_check_autocenter(struct pidff_device *pidff,
int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks)
{
struct pidff_device *pidff;
- struct hid_input *hidinput = list_entry(hid->inputs.next,
- struct hid_input, list);
+ struct hid_input *hidinput =
+ list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct ff_device *ff;
int max_effects;
@@ -1473,14 +1585,14 @@ int hid_pidff_init_with_quirks(struct hid_device *hid, u32 initial_quirks)
ff->set_autocenter = pidff_set_autocenter;
ff->playback = pidff_playback;
- hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n");
- hid_dbg(dev, "Active quirks mask: 0x%x\n", pidff->quirks);
+ hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula\n");
+ hid_dbg(dev, "Active quirks mask: 0x%08x\n", pidff->quirks);
hid_device_io_stop(hid);
return 0;
- fail:
+fail:
hid_device_io_stop(hid);
kfree(pidff);
diff --git a/drivers/hid/usbhid/hid-pidff.h b/drivers/hid/usbhid/hid-pidff.h
index a53a8b436baa..f321f675e131 100644
--- a/drivers/hid/usbhid/hid-pidff.h
+++ b/drivers/hid/usbhid/hid-pidff.h
@@ -16,7 +16,7 @@
#define HID_PIDFF_QUIRK_PERMISSIVE_CONTROL BIT(2)
/* Use fixed 0x4000 direction during SET_EFFECT report upload */
-#define HID_PIDFF_QUIRK_FIX_WHEEL_DIRECTION BIT(3)
+#define HID_PIDFF_QUIRK_FIX_CONDITIONAL_DIRECTION BIT(3)
/* Force all periodic effects to be uploaded as SINE */
#define HID_PIDFF_QUIRK_PERIODIC_SINE_ONLY BIT(4)
diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index aeb92b803a17..50dde968febe 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -362,7 +362,6 @@ static int ssi_async_break(struct hsi_msg *msg)
spin_unlock_bh(&omap_port->lock);
}
out:
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return err;
@@ -401,7 +400,6 @@ static int ssi_async(struct hsi_msg *msg)
msg->status = HSI_STATUS_ERROR;
}
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
msg->status, msg->ttype, msg->channel);
@@ -504,7 +502,6 @@ static int ssi_setup(struct hsi_client *cl)
omap_port->ssr.mode = cl->rx_cfg.mode;
out:
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return err;
@@ -570,7 +567,6 @@ static int ssi_flush(struct hsi_client *cl)
pinctrl_pm_select_default_state(omap_port->pdev);
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return 0;
@@ -625,7 +621,6 @@ static int ssi_stop_tx(struct hsi_client *cl)
writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
spin_unlock_bh(&omap_port->wk_lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
@@ -653,7 +648,6 @@ static void ssi_transfer(struct omap_ssi_port *omap_port,
}
}
spin_unlock_bh(&omap_port->lock);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
@@ -683,7 +677,6 @@ static void ssi_cleanup_queues(struct hsi_client *cl)
txbufstate |= (1 << i);
status |= SSI_DATAACCEPT(i);
/* Release the clocks writes, also GDD ones */
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
ssi_flush_queue(&omap_port->txqueue[i], cl);
@@ -739,7 +732,6 @@ static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
* ssi_cleanup_queues
*/
if (msg->ttype == HSI_MSG_READ) {
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
omap_ssi->gdd_trn[i].msg = NULL;
@@ -936,7 +928,6 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
if (msg->ttype == HSI_MSG_WRITE) {
/* Release clocks for write transfer */
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
reg &= ~val;
@@ -981,7 +972,6 @@ static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
/* TODO: sleep if we retry? */
} while (status_reg);
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
return IRQ_HANDLED;
@@ -1018,7 +1008,6 @@ static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
}
hsi_event(port, HSI_EVENT_STOP_RX);
if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
- pm_runtime_mark_last_busy(omap_port->pdev);
pm_runtime_put_autosuspend(omap_port->pdev);
}
}
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 57623ca7f350..7937ac0cbd0f 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -3,20 +3,22 @@
menu "Microsoft Hyper-V guest support"
config HYPERV
- tristate "Microsoft Hyper-V client drivers"
+ bool "Microsoft Hyper-V core hypervisor support"
depends on (X86 && X86_LOCAL_APIC && HYPERVISOR_GUEST) \
|| (ARM64 && !CPU_BIG_ENDIAN)
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
select OF_EARLY_FLATTREE if OF
select SYSFB if EFI && !HYPERV_VTL_MODE
+ select IRQ_MSI_LIB if X86
help
Select this option to run Linux as a Hyper-V client operating
system.
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
- depends on (X86_64 || ARM64) && HYPERV
+ depends on (X86_64 && HAVE_STATIC_CALL) || ARM64
+ depends on HYPERV
depends on SMP
default n
help
@@ -44,18 +46,25 @@ config HYPERV_TIMER
config HYPERV_UTILS
tristate "Microsoft Hyper-V Utilities driver"
- depends on HYPERV && CONNECTOR && NLS
+ depends on HYPERV_VMBUS && CONNECTOR && NLS
depends on PTP_1588_CLOCK_OPTIONAL
help
Select this option to enable the Hyper-V Utilities.
config HYPERV_BALLOON
tristate "Microsoft Hyper-V Balloon driver"
- depends on HYPERV
+ depends on HYPERV_VMBUS
select PAGE_REPORTING
help
Select this option to enable Hyper-V Balloon driver.
+config HYPERV_VMBUS
+ tristate "Microsoft Hyper-V VMBus driver"
+ depends on HYPERV
+ default HYPERV
+ help
+ Select this option to enable Hyper-V Vmbus driver.
+
config MSHV_ROOT
tristate "Microsoft Hyper-V root partition support"
depends on HYPERV && (X86_64 || ARM64)
@@ -66,6 +75,9 @@ config MSHV_ROOT
# no particular order, making it impossible to reassemble larger pages
depends on PAGE_SIZE_4KB
select EVENTFD
+ select VIRT_XFER_TO_GUEST_WORK
+ select HMM_MIRROR
+ select MMU_NOTIFIER
default n
help
Select this option to enable support for booting and running as root
@@ -73,4 +85,28 @@ config MSHV_ROOT
If unsure, say N.
+config MSHV_VTL
+ tristate "Microsoft Hyper-V VTL driver"
+ depends on X86_64 && HYPERV_VTL_MODE
+ depends on HYPERV_VMBUS
+ # Mapping VTL0 memory to a userspace process in VTL2 is supported in OpenHCL.
+ # VTL2 for OpenHCL makes use of Huge Pages to improve performance on VMs,
+ # specially with large memory requirements.
+ depends on TRANSPARENT_HUGEPAGE
+ # MTRRs are controlled by VTL0, and are not specific to individual VTLs.
+ # Therefore, do not attempt to access or modify MTRRs here.
+ depends on !MTRR
+ select CPUMASK_OFFSTACK
+ select VIRT_XFER_TO_GUEST_WORK
+ default n
+ help
+ Select this option to enable Hyper-V VTL driver support.
+ This driver provides interfaces for Virtual Machine Manager (VMM) running in VTL2
+ userspace to create VTLs and partitions, setup and manage VTL0 memory and
+ allow userspace to make direct hypercalls. This also allows to map VTL0's address
+ space to a usermode process in VTL2 and supports getting new VMBus messages and channel
+ events in VTL2.
+
+ If unsure, say N.
+
endmenu
diff --git a/drivers/hv/Makefile b/drivers/hv/Makefile
index 976189c725dc..a49f93c2d245 100644
--- a/drivers/hv/Makefile
+++ b/drivers/hv/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_HYPERV) += hv_vmbus.o
+obj-$(CONFIG_HYPERV_VMBUS) += hv_vmbus.o
obj-$(CONFIG_HYPERV_UTILS) += hv_utils.o
obj-$(CONFIG_HYPERV_BALLOON) += hv_balloon.o
obj-$(CONFIG_MSHV_ROOT) += mshv_root.o
+obj-$(CONFIG_MSHV_VTL) += mshv_vtl.o
CFLAGS_hv_trace.o = -I$(src)
CFLAGS_hv_balloon.o = -I$(src)
@@ -13,8 +14,12 @@ hv_vmbus-y := vmbus_drv.o \
hv_vmbus-$(CONFIG_HYPERV_TESTING) += hv_debugfs.o
hv_utils-y := hv_util.o hv_kvp.o hv_snapshot.o hv_utils_transport.o
mshv_root-y := mshv_root_main.o mshv_synic.o mshv_eventfd.o mshv_irq.o \
- mshv_root_hv_call.o mshv_portid_table.o
+ mshv_root_hv_call.o mshv_portid_table.o mshv_regions.o
+mshv_vtl-y := mshv_vtl_main.o
# Code that must be built-in
-obj-$(subst m,y,$(CONFIG_HYPERV)) += hv_common.o
-obj-$(subst m,y,$(CONFIG_MSHV_ROOT)) += hv_proc.o mshv_common.o
+obj-$(CONFIG_HYPERV) += hv_common.o
+obj-$(subst m,y,$(CONFIG_MSHV_ROOT)) += hv_proc.o
+ifneq ($(CONFIG_MSHV_ROOT)$(CONFIG_MSHV_VTL),)
+ obj-y += mshv_common.o
+endif
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 7c7c66e0dc3f..6821f225248b 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -410,6 +410,21 @@ static int create_gpadl_header(enum hv_gpadl_type type, void *kbuffer,
return 0;
}
+static void vmbus_free_channel_msginfo(struct vmbus_channel_msginfo *msginfo)
+{
+ struct vmbus_channel_msginfo *submsginfo, *tmp;
+
+ if (!msginfo)
+ return;
+
+ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
+ msglistentry) {
+ kfree(submsginfo);
+ }
+
+ kfree(msginfo);
+}
+
/*
* __vmbus_establish_gpadl - Establish a GPADL for a buffer or ringbuffer
*
@@ -429,7 +444,7 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_msginfo *submsginfo, *tmp;
+ struct vmbus_channel_msginfo *submsginfo;
struct list_head *curr;
u32 next_gpadl_handle;
unsigned long flags;
@@ -444,20 +459,24 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
return ret;
}
- /*
- * Set the "decrypted" flag to true for the set_memory_decrypted()
- * success case. In the failure case, the encryption state of the
- * memory is unknown. Leave "decrypted" as true to ensure the
- * memory will be leaked instead of going back on the free list.
- */
- gpadl->decrypted = true;
- ret = set_memory_decrypted((unsigned long)kbuffer,
- PFN_UP(size));
- if (ret) {
- dev_warn(&channel->device_obj->device,
- "Failed to set host visibility for new GPADL %d.\n",
- ret);
- return ret;
+ gpadl->decrypted = !((channel->co_external_memory && type == HV_GPADL_BUFFER) ||
+ (channel->co_ring_buffer && type == HV_GPADL_RING));
+ if (gpadl->decrypted) {
+ /*
+ * The "decrypted" flag being true assumes that set_memory_decrypted() succeeds.
+ * But if it fails, the encryption state of the memory is unknown. In that case,
+ * leave "decrypted" as true to ensure the memory is leaked instead of going back
+ * on the free list.
+ */
+ ret = set_memory_decrypted((unsigned long)kbuffer,
+ PFN_UP(size));
+ if (ret) {
+ dev_warn(&channel->device_obj->device,
+ "Failed to set host visibility for new GPADL %d.\n",
+ ret);
+ vmbus_free_channel_msginfo(msginfo);
+ return ret;
+ }
}
init_completion(&msginfo->waitevent);
@@ -532,12 +551,8 @@ cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
- list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
- msglistentry) {
- kfree(submsginfo);
- }
- kfree(msginfo);
+ vmbus_free_channel_msginfo(msginfo);
if (ret) {
/*
@@ -545,8 +560,10 @@ cleanup:
* left as true so the memory is leaked instead of being
* put back on the free list.
*/
- if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
- gpadl->decrypted = false;
+ if (gpadl->decrypted) {
+ if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
+ gpadl->decrypted = false;
+ }
}
return ret;
@@ -573,7 +590,7 @@ EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
* keeps track of the next available slot in the array. Initially, each
* slot points to the next one (as in a Linked List). The last slot
* does not point to anything, so its value is U64_MAX by default.
- * @size The size of the array
+ * @size: The size of the array
*/
static u64 *request_arr_init(u32 size)
{
@@ -677,12 +694,13 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
goto error_clean_ring;
err = hv_ringbuffer_init(&newchannel->outbound,
- page, send_pages, 0);
+ page, send_pages, 0, newchannel->co_ring_buffer);
if (err)
goto error_free_gpadl;
err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
- recv_pages, newchannel->max_pkt_size);
+ recv_pages, newchannel->max_pkt_size,
+ newchannel->co_ring_buffer);
if (err)
goto error_free_gpadl;
@@ -863,8 +881,11 @@ post_msg_err:
kfree(info);
- ret = set_memory_encrypted((unsigned long)gpadl->buffer,
- PFN_UP(gpadl->size));
+ if (gpadl->decrypted)
+ ret = set_memory_encrypted((unsigned long)gpadl->buffer,
+ PFN_UP(gpadl->size));
+ else
+ ret = 0;
if (ret)
pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
@@ -925,7 +946,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
/* Send a closing message */
- msg = &channel->close_msg.msg;
+ msg = &channel->close_msg;
msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
msg->child_relid = channel->offermsg.child_relid;
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 65dd299e2944..74fed2c073d4 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -844,14 +844,14 @@ static void vmbus_wait_for_unload(void)
= per_cpu_ptr(hv_context.cpu_context, cpu);
/*
- * In a CoCo VM the synic_message_page is not allocated
+ * In a CoCo VM the hyp_synic_message_page is not allocated
* in hv_synic_alloc(). Instead it is set/cleared in
- * hv_synic_enable_regs() and hv_synic_disable_regs()
+ * hv_hyp_synic_enable_regs() and hv_hyp_synic_disable_regs()
* such that it is set only when the CPU is online. If
* not all present CPUs are online, the message page
* might be NULL, so skip such CPUs.
*/
- page_addr = hv_cpu->synic_message_page;
+ page_addr = hv_cpu->hyp_synic_message_page;
if (!page_addr)
continue;
@@ -892,7 +892,7 @@ completed:
struct hv_per_cpu_context *hv_cpu
= per_cpu_ptr(hv_context.cpu_context, cpu);
- page_addr = hv_cpu->synic_message_page;
+ page_addr = hv_cpu->hyp_synic_message_page;
if (!page_addr)
continue;
@@ -1022,6 +1022,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
struct vmbus_channel_offer_channel *offer;
struct vmbus_channel *oldchannel, *newchannel;
size_t offer_sz;
+ bool co_ring_buffer, co_external_memory;
offer = (struct vmbus_channel_offer_channel *)hdr;
@@ -1034,6 +1035,22 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
return;
}
+ co_ring_buffer = is_co_ring_buffer(offer);
+ co_external_memory = is_co_external_memory(offer);
+ if (!co_ring_buffer && co_external_memory) {
+ pr_err("Invalid offer relid=%d: the ring buffer isn't encrypted\n",
+ offer->child_relid);
+ return;
+ }
+ if (co_ring_buffer || co_external_memory) {
+ if (vmbus_proto_version < VERSION_WIN10_V6_0 || !vmbus_is_confidential()) {
+ pr_err("Invalid offer relid=%d: no support for confidential VMBus\n",
+ offer->child_relid);
+ atomic_dec(&vmbus_connection.offer_in_progress);
+ return;
+ }
+ }
+
oldchannel = find_primary_channel_by_offer(offer);
if (oldchannel != NULL) {
@@ -1112,6 +1129,8 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
pr_err("Unable to allocate channel object\n");
return;
}
+ newchannel->co_ring_buffer = co_ring_buffer;
+ newchannel->co_external_memory = co_external_memory;
vmbus_setup_channel_state(newchannel, offer);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 1fe3573ae52a..5d9cb5bf2d62 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -51,6 +51,7 @@ EXPORT_SYMBOL_GPL(vmbus_proto_version);
* Linux guests and are not listed.
*/
static __u32 vmbus_versions[] = {
+ VERSION_WIN10_V6_0,
VERSION_WIN10_V5_3,
VERSION_WIN10_V5_2,
VERSION_WIN10_V5_1,
@@ -65,7 +66,7 @@ static __u32 vmbus_versions[] = {
* Maximal VMBus protocol version guests can negotiate. Useful to cap the
* VMBus version for testing and debugging purpose.
*/
-static uint max_version = VERSION_WIN10_V5_3;
+static uint max_version = VERSION_WIN10_V6_0;
module_param(max_version, uint, S_IRUGO);
MODULE_PARM_DESC(max_version,
@@ -105,6 +106,9 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID;
}
+ if (vmbus_is_confidential() && version >= VERSION_WIN10_V6_0)
+ msg->feature_flags = VMBUS_FEATURE_FLAG_CONFIDENTIAL_CHANNELS;
+
/*
* shared_gpa_boundary is zero in non-SNP VMs, so it's safe to always
* bitwise OR it
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index b14c5f9e0ef2..c100f04b3581 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -18,6 +18,7 @@
#include <linux/clockchips.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/export.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include <linux/set_memory.h>
@@ -25,6 +26,7 @@
/* The one and only */
struct hv_context hv_context;
+EXPORT_SYMBOL_FOR_MODULES(hv_context, "mshv_vtl");
/*
* hv_init - Main initialization routine.
@@ -74,7 +76,11 @@ int hv_post_message(union hv_connection_id connection_id,
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
- if (ms_hyperv.paravisor_present) {
+ if (ms_hyperv.paravisor_present && !vmbus_is_confidential()) {
+ /*
+ * If the VMBus isn't confidential, use the CoCo-specific
+ * mechanism to communicate with the hypervisor.
+ */
if (hv_isolation_type_tdx())
status = hv_tdx_hypercall(HVCALL_POST_MESSAGE,
virt_to_phys(aligned_msg), 0);
@@ -88,6 +94,11 @@ int hv_post_message(union hv_connection_id connection_id,
u64 control = HVCALL_POST_MESSAGE;
control |= hv_nested ? HV_HYPERCALL_NESTED : 0;
+ /*
+ * If there is no paravisor, this will go to the hypervisor.
+ * In the Confidential VMBus case, there is the paravisor
+ * to which this will trap.
+ */
status = hv_do_hypercall(control, aligned_msg, NULL);
}
@@ -95,11 +106,72 @@ int hv_post_message(union hv_connection_id connection_id,
return hv_result(status);
}
+EXPORT_SYMBOL_FOR_MODULES(hv_post_message, "mshv_vtl");
+
+static int hv_alloc_page(void **page, bool decrypt, const char *note)
+{
+ int ret = 0;
+
+ /*
+ * After the page changes its encryption status, its contents might
+ * appear scrambled on some hardware. Thus `get_zeroed_page` would
+ * zero the page out in vain, so do that explicitly exactly once.
+ *
+ * By default, the page is allocated encrypted in a CoCo VM.
+ */
+ *page = (void *)__get_free_page(GFP_KERNEL);
+ if (!*page)
+ return -ENOMEM;
+
+ if (decrypt)
+ ret = set_memory_decrypted((unsigned long)*page, 1);
+ if (ret)
+ goto failed;
+
+ memset(*page, 0, PAGE_SIZE);
+ return 0;
+
+failed:
+ /*
+ * Report the failure but don't put the page back on the free list as
+ * its encryption status is unknown.
+ */
+ pr_err("allocation failed for %s page, error %d, decrypted %d\n",
+ note, ret, decrypt);
+ *page = NULL;
+ return ret;
+}
+
+static int hv_free_page(void **page, bool encrypt, const char *note)
+{
+ int ret = 0;
+
+ if (!*page)
+ return 0;
+
+ if (encrypt)
+ ret = set_memory_encrypted((unsigned long)*page, 1);
+
+ /*
+ * In the case of the failure, the page is leaked. Something is wrong,
+ * prefer to lose the page with the unknown encryption status and stay afloat.
+ */
+ if (ret)
+ pr_err("deallocation failed for %s page, error %d, encrypt %d\n",
+ note, ret, encrypt);
+ else
+ free_page((unsigned long)*page);
+
+ *page = NULL;
+
+ return ret;
+}
int hv_synic_alloc(void)
{
int cpu, ret = -ENOMEM;
struct hv_per_cpu_context *hv_cpu;
+ const bool decrypt = !vmbus_is_confidential();
/*
* First, zero all per-cpu memory areas so hv_synic_free() can
@@ -125,73 +197,37 @@ int hv_synic_alloc(void)
vmbus_on_msg_dpc, (unsigned long)hv_cpu);
if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
- hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
- if (!hv_cpu->post_msg_page) {
- pr_err("Unable to allocate post msg page\n");
+ ret = hv_alloc_page(&hv_cpu->post_msg_page,
+ decrypt, "post msg");
+ if (ret)
goto err;
- }
-
- ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
- if (ret) {
- pr_err("Failed to decrypt post msg page: %d\n", ret);
- /* Just leak the page, as it's unsafe to free the page. */
- hv_cpu->post_msg_page = NULL;
- goto err;
- }
-
- memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
}
/*
- * Synic message and event pages are allocated by paravisor.
- * Skip these pages allocation here.
+ * If these SynIC pages are not allocated, SIEF and SIM pages
+ * are configured using what the root partition or the paravisor
+ * provides upon reading the SIEFP and SIMP registers.
*/
if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
- hv_cpu->synic_message_page =
- (void *)get_zeroed_page(GFP_ATOMIC);
- if (!hv_cpu->synic_message_page) {
- pr_err("Unable to allocate SYNIC message page\n");
+ ret = hv_alloc_page(&hv_cpu->hyp_synic_message_page,
+ decrypt, "hypervisor SynIC msg");
+ if (ret)
goto err;
- }
-
- hv_cpu->synic_event_page =
- (void *)get_zeroed_page(GFP_ATOMIC);
- if (!hv_cpu->synic_event_page) {
- pr_err("Unable to allocate SYNIC event page\n");
-
- free_page((unsigned long)hv_cpu->synic_message_page);
- hv_cpu->synic_message_page = NULL;
+ ret = hv_alloc_page(&hv_cpu->hyp_synic_event_page,
+ decrypt, "hypervisor SynIC event");
+ if (ret)
goto err;
- }
}
- if (!ms_hyperv.paravisor_present &&
- (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
- ret = set_memory_decrypted((unsigned long)
- hv_cpu->synic_message_page, 1);
- if (ret) {
- pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
- hv_cpu->synic_message_page = NULL;
-
- /*
- * Free the event page here so that hv_synic_free()
- * won't later try to re-encrypt it.
- */
- free_page((unsigned long)hv_cpu->synic_event_page);
- hv_cpu->synic_event_page = NULL;
+ if (vmbus_is_confidential()) {
+ ret = hv_alloc_page(&hv_cpu->para_synic_message_page,
+ false, "paravisor SynIC msg");
+ if (ret)
goto err;
- }
-
- ret = set_memory_decrypted((unsigned long)
- hv_cpu->synic_event_page, 1);
- if (ret) {
- pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
- hv_cpu->synic_event_page = NULL;
+ ret = hv_alloc_page(&hv_cpu->para_synic_event_page,
+ false, "paravisor SynIC event");
+ if (ret)
goto err;
- }
-
- memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
- memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
}
}
@@ -207,70 +243,46 @@ err:
void hv_synic_free(void)
{
- int cpu, ret;
+ int cpu;
+ const bool encrypt = !vmbus_is_confidential();
for_each_present_cpu(cpu) {
struct hv_per_cpu_context *hv_cpu =
per_cpu_ptr(hv_context.cpu_context, cpu);
- /* It's better to leak the page if the encryption fails. */
- if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
- if (hv_cpu->post_msg_page) {
- ret = set_memory_encrypted((unsigned long)
- hv_cpu->post_msg_page, 1);
- if (ret) {
- pr_err("Failed to encrypt post msg page: %d\n", ret);
- hv_cpu->post_msg_page = NULL;
- }
- }
+ if (ms_hyperv.paravisor_present && hv_isolation_type_tdx())
+ hv_free_page(&hv_cpu->post_msg_page,
+ encrypt, "post msg");
+ if (!ms_hyperv.paravisor_present && !hv_root_partition()) {
+ hv_free_page(&hv_cpu->hyp_synic_event_page,
+ encrypt, "hypervisor SynIC event");
+ hv_free_page(&hv_cpu->hyp_synic_message_page,
+ encrypt, "hypervisor SynIC msg");
}
-
- if (!ms_hyperv.paravisor_present &&
- (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
- if (hv_cpu->synic_message_page) {
- ret = set_memory_encrypted((unsigned long)
- hv_cpu->synic_message_page, 1);
- if (ret) {
- pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
- hv_cpu->synic_message_page = NULL;
- }
- }
-
- if (hv_cpu->synic_event_page) {
- ret = set_memory_encrypted((unsigned long)
- hv_cpu->synic_event_page, 1);
- if (ret) {
- pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
- hv_cpu->synic_event_page = NULL;
- }
- }
+ if (vmbus_is_confidential()) {
+ hv_free_page(&hv_cpu->para_synic_event_page,
+ false, "paravisor SynIC event");
+ hv_free_page(&hv_cpu->para_synic_message_page,
+ false, "paravisor SynIC msg");
}
-
- free_page((unsigned long)hv_cpu->post_msg_page);
- free_page((unsigned long)hv_cpu->synic_event_page);
- free_page((unsigned long)hv_cpu->synic_message_page);
}
kfree(hv_context.hv_numa_map);
}
/*
- * hv_synic_init - Initialize the Synthetic Interrupt Controller.
- *
- * If it is already initialized by another entity (ie x2v shim), we need to
- * retrieve the initialized message and event pages. Otherwise, we create and
- * initialize the message and event pages.
+ * hv_hyp_synic_enable_regs - Initialize the Synthetic Interrupt Controller
+ * with the hypervisor.
*/
-void hv_synic_enable_regs(unsigned int cpu)
+void hv_hyp_synic_enable_regs(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu =
per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_simp simp;
union hv_synic_siefp siefp;
union hv_synic_sint shared_sint;
- union hv_synic_scontrol sctrl;
- /* Setup the Synic's message page */
+ /* Setup the Synic's message page with the hypervisor. */
simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
simp.simp_enabled = 1;
@@ -278,18 +290,18 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (simp.base_simp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_message_page =
+ hv_cpu->hyp_synic_message_page =
(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
- if (!hv_cpu->synic_message_page)
+ if (!hv_cpu->hyp_synic_message_page)
pr_err("Fail to map synic message page.\n");
} else {
- simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->hyp_synic_message_page)
>> HV_HYP_PAGE_SHIFT;
}
hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
- /* Setup the Synic's event page */
+ /* Setup the Synic's event page with the hypervisor. */
siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 1;
@@ -297,16 +309,17 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Mask out vTOM bit. ioremap_cache() maps decrypted */
u64 base = (siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT) &
~ms_hyperv.shared_gpa_boundary;
- hv_cpu->synic_event_page =
+ hv_cpu->hyp_synic_event_page =
(void *)ioremap_cache(base, HV_HYP_PAGE_SIZE);
- if (!hv_cpu->synic_event_page)
+ if (!hv_cpu->hyp_synic_event_page)
pr_err("Fail to map synic event page.\n");
} else {
- siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->hyp_synic_event_page)
>> HV_HYP_PAGE_SHIFT;
}
hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
+ hv_enable_coco_interrupt(cpu, vmbus_interrupt, true);
/* Setup the shared SINT. */
if (vmbus_irq != -1)
@@ -317,6 +330,11 @@ void hv_synic_enable_regs(unsigned int cpu)
shared_sint.masked = false;
shared_sint.auto_eoi = hv_recommend_using_aeoi();
hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+}
+
+static void hv_hyp_synic_enable_interrupts(void)
+{
+ union hv_synic_scontrol sctrl;
/* Enable the global synic bit */
sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
@@ -325,23 +343,72 @@ void hv_synic_enable_regs(unsigned int cpu)
hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
}
+static void hv_para_synic_enable_regs(unsigned int cpu)
+{
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
+
+ /* Setup the Synic's message page with the paravisor. */
+ simp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIMP);
+ simp.simp_enabled = 1;
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->para_synic_message_page)
+ >> HV_HYP_PAGE_SHIFT;
+ hv_para_set_synic_register(HV_MSR_SIMP, simp.as_uint64);
+
+ /* Setup the Synic's event page with the paravisor. */
+ siefp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIEFP);
+ siefp.siefp_enabled = 1;
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->para_synic_event_page)
+ >> HV_HYP_PAGE_SHIFT;
+ hv_para_set_synic_register(HV_MSR_SIEFP, siefp.as_uint64);
+}
+
+static void hv_para_synic_enable_interrupts(void)
+{
+ union hv_synic_scontrol sctrl;
+
+ /* Enable the global synic bit */
+ sctrl.as_uint64 = hv_para_get_synic_register(HV_MSR_SCONTROL);
+ sctrl.enable = 1;
+ hv_para_set_synic_register(HV_MSR_SCONTROL, sctrl.as_uint64);
+}
+
int hv_synic_init(unsigned int cpu)
{
- hv_synic_enable_regs(cpu);
+ if (vmbus_is_confidential())
+ hv_para_synic_enable_regs(cpu);
+
+ /*
+ * The SINT is set in hv_hyp_synic_enable_regs() by calling
+ * hv_set_msr(). hv_set_msr() in turn has special case code for the
+ * SINT MSRs that write to the hypervisor version of the MSR *and*
+ * the paravisor version of the MSR (but *without* the proxy bit when
+ * VMBus is confidential).
+ *
+ * Then enable interrupts via the paravisor if VMBus is confidential,
+ * and otherwise via the hypervisor.
+ */
+
+ hv_hyp_synic_enable_regs(cpu);
+ if (vmbus_is_confidential())
+ hv_para_synic_enable_interrupts();
+ else
+ hv_hyp_synic_enable_interrupts();
hv_stimer_legacy_init(cpu, VMBUS_MESSAGE_SINT);
return 0;
}
-void hv_synic_disable_regs(unsigned int cpu)
+void hv_hyp_synic_disable_regs(unsigned int cpu)
{
struct hv_per_cpu_context *hv_cpu =
per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
- union hv_synic_scontrol sctrl;
shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
@@ -350,18 +417,21 @@ void hv_synic_disable_regs(unsigned int cpu)
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
+ hv_enable_coco_interrupt(cpu, vmbus_interrupt, false);
simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
/*
- * In Isolation VM, sim and sief pages are allocated by
+ * In Isolation VM, simp and sief pages are allocated by
* paravisor. These pages also will be used by kdump
* kernel. So just reset enable bit here and keep page
* addresses.
*/
simp.simp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition()) {
- iounmap(hv_cpu->synic_message_page);
- hv_cpu->synic_message_page = NULL;
+ if (hv_cpu->hyp_synic_message_page) {
+ iounmap(hv_cpu->hyp_synic_message_page);
+ hv_cpu->hyp_synic_message_page = NULL;
+ }
} else {
simp.base_simp_gpa = 0;
}
@@ -372,21 +442,51 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.siefp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition()) {
- iounmap(hv_cpu->synic_event_page);
- hv_cpu->synic_event_page = NULL;
+ if (hv_cpu->hyp_synic_event_page) {
+ iounmap(hv_cpu->hyp_synic_event_page);
+ hv_cpu->hyp_synic_event_page = NULL;
+ }
} else {
siefp.base_siefp_gpa = 0;
}
hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
+}
+
+static void hv_hyp_synic_disable_interrupts(void)
+{
+ union hv_synic_scontrol sctrl;
/* Disable the global synic bit */
sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 0;
hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
+}
- if (vmbus_irq != -1)
- disable_percpu_irq(vmbus_irq);
+static void hv_para_synic_disable_regs(unsigned int cpu)
+{
+ union hv_synic_simp simp;
+ union hv_synic_siefp siefp;
+
+ /* Disable SynIC's message page in the paravisor. */
+ simp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIMP);
+ simp.simp_enabled = 0;
+ hv_para_set_synic_register(HV_MSR_SIMP, simp.as_uint64);
+
+ /* Disable SynIC's event page in the paravisor. */
+ siefp.as_uint64 = hv_para_get_synic_register(HV_MSR_SIEFP);
+ siefp.siefp_enabled = 0;
+ hv_para_set_synic_register(HV_MSR_SIEFP, siefp.as_uint64);
+}
+
+static void hv_para_synic_disable_interrupts(void)
+{
+ union hv_synic_scontrol sctrl;
+
+ /* Disable the global synic bit */
+ sctrl.as_uint64 = hv_para_get_synic_register(HV_MSR_SCONTROL);
+ sctrl.enable = 0;
+ hv_para_set_synic_register(HV_MSR_SCONTROL, sctrl.as_uint64);
}
#define HV_MAX_TRIES 3
@@ -399,16 +499,18 @@ void hv_synic_disable_regs(unsigned int cpu)
* that the normal interrupt handling mechanism will find and process the channel interrupt
* "very soon", and in the process clear the bit.
*/
-static bool hv_synic_event_pending(void)
+static bool __hv_synic_event_pending(union hv_synic_event_flags *event, int sint)
{
- struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
- union hv_synic_event_flags *event =
- (union hv_synic_event_flags *)hv_cpu->synic_event_page + VMBUS_MESSAGE_SINT;
- unsigned long *recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
+ unsigned long *recv_int_page;
bool pending;
u32 relid;
int tries = 0;
+ if (!event)
+ return false;
+
+ event += sint;
+ recv_int_page = event->flags; /* assumes VMBus version >= VERSION_WIN8 */
retry:
pending = false;
for_each_set_bit(relid, recv_int_page, HV_EVENT_FLAGS_COUNT) {
@@ -425,6 +527,17 @@ retry:
return pending;
}
+static bool hv_synic_event_pending(void)
+{
+ struct hv_per_cpu_context *hv_cpu = this_cpu_ptr(hv_context.cpu_context);
+ union hv_synic_event_flags *hyp_synic_event_page = hv_cpu->hyp_synic_event_page;
+ union hv_synic_event_flags *para_synic_event_page = hv_cpu->para_synic_event_page;
+
+ return
+ __hv_synic_event_pending(hyp_synic_event_page, VMBUS_MESSAGE_SINT) ||
+ __hv_synic_event_pending(para_synic_event_page, VMBUS_MESSAGE_SINT);
+}
+
static int hv_pick_new_cpu(struct vmbus_channel *channel)
{
int ret = -EBUSY;
@@ -517,7 +630,27 @@ int hv_synic_cleanup(unsigned int cpu)
always_cleanup:
hv_stimer_legacy_cleanup(cpu);
- hv_synic_disable_regs(cpu);
+ /*
+ * First, disable the event and message pages
+ * used for communicating with the host, and then
+ * disable the host interrupts if VMBus is not
+ * confidential.
+ */
+ hv_hyp_synic_disable_regs(cpu);
+ if (!vmbus_is_confidential())
+ hv_hyp_synic_disable_interrupts();
+
+ /*
+ * Perform the same steps for the Confidential VMBus.
+ * The sequencing provides the guarantee that no data
+ * may be posted for processing before disabling interrupts.
+ */
+ if (vmbus_is_confidential()) {
+ hv_para_synic_disable_regs(cpu);
+ hv_para_synic_disable_interrupts();
+ }
+ if (vmbus_irq != -1)
+ disable_percpu_irq(vmbus_irq);
return ret;
}
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 49898d10faff..0a3ab7efed46 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -257,7 +257,7 @@ static void hv_kmsg_dump_register(void)
static inline bool hv_output_page_exists(void)
{
- return hv_root_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
+ return hv_parent_partition() || IS_ENABLED(CONFIG_HYPERV_VTL_MODE);
}
void __init hv_get_partition_id(void)
@@ -315,9 +315,9 @@ int __init hv_common_init(void)
int i;
union hv_hypervisor_version_info version;
- /* Get information about the Hyper-V host version */
+ /* Get information about the Microsoft Hypervisor version */
if (!hv_get_hypervisor_version(&version))
- pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n",
+ pr_info("Hyper-V: Hypervisor Build %d.%d.%d.%d-%d-%d\n",
version.major_version, version.minor_version,
version.build_number, version.service_number,
version.service_pack, version.service_branch);
@@ -377,7 +377,7 @@ int __init hv_common_init(void)
BUG_ON(!hyperv_pcpu_output_arg);
}
- if (hv_root_partition()) {
+ if (hv_parent_partition()) {
hv_synic_eventring_tail = alloc_percpu(u8 *);
BUG_ON(!hv_synic_eventring_tail);
}
@@ -487,7 +487,7 @@ int hv_common_cpu_init(unsigned int cpu)
* online and then taken offline
*/
if (!*inputarg) {
- mem = kmalloc(pgcount * HV_HYP_PAGE_SIZE, flags);
+ mem = kmalloc_array(pgcount, HV_HYP_PAGE_SIZE, flags);
if (!mem)
return -ENOMEM;
@@ -531,7 +531,7 @@ int hv_common_cpu_init(unsigned int cpu)
if (msr_vp_index > hv_max_vp_index)
hv_max_vp_index = msr_vp_index;
- if (hv_root_partition()) {
+ if (hv_parent_partition()) {
synic_eventring_tail = (u8 **)this_cpu_ptr(hv_synic_eventring_tail);
*synic_eventring_tail = kcalloc(HV_SYNIC_SINT_COUNT,
sizeof(u8), flags);
@@ -558,7 +558,7 @@ int hv_common_cpu_die(unsigned int cpu)
* originally allocated memory is reused in hv_common_cpu_init().
*/
- if (hv_root_partition()) {
+ if (hv_parent_partition()) {
synic_eventring_tail = this_cpu_ptr(hv_synic_eventring_tail);
kfree(*synic_eventring_tail);
*synic_eventring_tail = NULL;
@@ -716,6 +716,27 @@ u64 __weak hv_tdx_hypercall(u64 control, u64 param1, u64 param2)
}
EXPORT_SYMBOL_GPL(hv_tdx_hypercall);
+void __weak hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set)
+{
+}
+EXPORT_SYMBOL_GPL(hv_enable_coco_interrupt);
+
+void __weak hv_para_set_sint_proxy(bool enable)
+{
+}
+EXPORT_SYMBOL_GPL(hv_para_set_sint_proxy);
+
+u64 __weak hv_para_get_synic_register(unsigned int reg)
+{
+ return ~0ULL;
+}
+EXPORT_SYMBOL_GPL(hv_para_get_synic_register);
+
+void __weak hv_para_set_synic_register(unsigned int reg, u64 val)
+{
+}
+EXPORT_SYMBOL_GPL(hv_para_set_synic_register);
+
void hv_identify_partition_type(void)
{
/* Assume guest role */
@@ -729,13 +750,17 @@ void hv_identify_partition_type(void)
* the root partition setting if also a Confidential VM.
*/
if ((ms_hyperv.priv_high & HV_CREATE_PARTITIONS) &&
- (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) &&
!(ms_hyperv.priv_high & HV_ISOLATION)) {
- pr_info("Hyper-V: running as root partition\n");
- if (IS_ENABLED(CONFIG_MSHV_ROOT))
- hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
- else
+
+ if (!IS_ENABLED(CONFIG_MSHV_ROOT)) {
pr_crit("Hyper-V: CONFIG_MSHV_ROOT not enabled!\n");
+ } else if (ms_hyperv.priv_high & HV_CPU_MANAGEMENT) {
+ pr_info("Hyper-V: running as root partition\n");
+ hv_curr_partition_type = HV_PARTITION_TYPE_ROOT;
+ } else {
+ pr_info("Hyper-V: running as L1VH partition\n");
+ hv_curr_partition_type = HV_PARTITION_TYPE_L1VH;
+ }
}
}
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 36ee89c0358b..7e9c8e169c66 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -586,7 +586,7 @@ static int util_probe(struct hv_device *dev,
(struct hv_util_service *)dev_id->driver_data;
int ret;
- srv->recv_buffer = kmalloc(HV_HYP_PAGE_SIZE * 4, GFP_KERNEL);
+ srv->recv_buffer = kmalloc_array(4, HV_HYP_PAGE_SIZE, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
srv->channel = dev->channel;
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index 832885198643..b3de35ff6334 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -129,8 +129,7 @@ static int hvt_op_open(struct inode *inode, struct file *file)
* device gets released.
*/
hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
- }
- else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
+ } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
/*
* We're switching from netlink communication to using char
* device. Issue the reset first.
@@ -195,7 +194,7 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
}
spin_unlock(&hvt_list_lock);
if (!hvt_found) {
- pr_warn("hvt_cn_callback: spurious message received!\n");
+ pr_warn("%s: spurious message received!\n", __func__);
return;
}
@@ -210,7 +209,7 @@ static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
hvt_found->on_msg(msg->data, msg->len);
else
- pr_warn("hvt_cn_callback: unexpected netlink message!\n");
+ pr_warn("%s: unexpected netlink message!\n", __func__);
mutex_unlock(&hvt->lock);
}
@@ -260,8 +259,9 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
hvt->outmsg_len = len;
hvt->on_read = on_read_cb;
wake_up_interruptible(&hvt->outmsg_q);
- } else
+ } else {
ret = -ENOMEM;
+ }
out_unlock:
mutex_unlock(&hvt->lock);
return ret;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 0b450e53161e..b2862e0a317a 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -15,6 +15,7 @@
#include <linux/list.h>
#include <linux/bitops.h>
#include <asm/sync_bitops.h>
+#include <asm/mshyperv.h>
#include <linux/atomic.h>
#include <linux/hyperv.h>
#include <linux/interrupt.h>
@@ -32,6 +33,7 @@
*/
#define HV_UTIL_NEGO_TIMEOUT 55
+void vmbus_isr(void);
/* Definitions for the monitored notification facility */
union hv_monitor_trigger_group {
@@ -120,8 +122,26 @@ enum {
* Per cpu state for channel handling
*/
struct hv_per_cpu_context {
- void *synic_message_page;
- void *synic_event_page;
+ /*
+ * SynIC pages for communicating with the host.
+ *
+ * These pages are accessible to the host partition and the hypervisor.
+ * They may be used for exchanging data with the host partition and the
+ * hypervisor even when they aren't trusted yet the guest partition
+ * must be prepared to handle the malicious behavior.
+ */
+ void *hyp_synic_message_page;
+ void *hyp_synic_event_page;
+ /*
+ * SynIC pages for communicating with the paravisor.
+ *
+ * These pages may be accessed from within the guest partition only in
+ * CoCo VMs. Neither the host partition nor the hypervisor can access
+ * these pages in that case; they are used for exchanging data with the
+ * paravisor.
+ */
+ void *para_synic_message_page;
+ void *para_synic_event_page;
/*
* The page is only used in hv_post_message() for a TDX VM (with the
@@ -171,10 +191,10 @@ extern int hv_synic_alloc(void);
extern void hv_synic_free(void);
-extern void hv_synic_enable_regs(unsigned int cpu);
+extern void hv_hyp_synic_enable_regs(unsigned int cpu);
extern int hv_synic_init(unsigned int cpu);
-extern void hv_synic_disable_regs(unsigned int cpu);
+extern void hv_hyp_synic_disable_regs(unsigned int cpu);
extern int hv_synic_cleanup(unsigned int cpu);
/* Interface */
@@ -182,7 +202,8 @@ extern int hv_synic_cleanup(unsigned int cpu);
void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
- struct page *pages, u32 pagecnt, u32 max_pkt_size);
+ struct page *pages, u32 pagecnt, u32 max_pkt_size,
+ bool confidential);
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
@@ -333,6 +354,51 @@ extern const struct vmbus_channel_message_table_entry
/* General vmbus interface */
+bool vmbus_is_confidential(void);
+
+#if IS_ENABLED(CONFIG_HYPERV_VMBUS)
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
+{
+ /*
+ * On crash we're reading some other CPU's message page and we need
+ * to be careful: this other CPU may already had cleared the header
+ * and the host may already had delivered some other message there.
+ * In case we blindly write msg->header.message_type we're going
+ * to lose it. We can still lose a message of the same type but
+ * we count on the fact that there can only be one
+ * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
+ * on crash.
+ */
+ if (cmpxchg(&msg->header.message_type, old_msg_type,
+ HVMSG_NONE) != old_msg_type)
+ return;
+
+ /*
+ * The cmxchg() above does an implicit memory barrier to
+ * ensure the write to MessageType (ie set to
+ * HVMSG_NONE) happens before we read the
+ * MessagePending and EOMing. Otherwise, the EOMing
+ * will not deliver any more messages since there is
+ * no empty slot
+ */
+ if (msg->header.message_flags.msg_pending) {
+ /*
+ * This will cause message queue rescan to
+ * possibly deliver another msg from the
+ * hypervisor
+ */
+ if (vmbus_is_confidential())
+ hv_para_set_synic_register(HV_MSR_EOM, 0);
+ else
+ hv_set_msr(HV_MSR_EOM, 0);
+ }
+}
+
+extern int vmbus_interrupt;
+extern int vmbus_irq;
+#endif /* CONFIG_HYPERV_VMBUS */
+
struct hv_device *vmbus_device_create(const guid_t *type,
const guid_t *instance,
struct vmbus_channel *channel);
diff --git a/drivers/hv/mshv.h b/drivers/hv/mshv.h
index 0340a67acd0a..d4813df92b9c 100644
--- a/drivers/hv/mshv.h
+++ b/drivers/hv/mshv.h
@@ -25,6 +25,4 @@ int hv_call_set_vp_registers(u32 vp_index, u64 partition_id, u16 count,
int hv_call_get_partition_property(u64 partition_id, u64 property_code,
u64 *property_value);
-int mshv_do_pre_guest_mode_work(ulong th_flags);
-
#endif /* _MSHV_H */
diff --git a/drivers/hv/mshv_common.c b/drivers/hv/mshv_common.c
index 6f227a8a5af7..58027b23c206 100644
--- a/drivers/hv/mshv_common.c
+++ b/drivers/hv/mshv_common.c
@@ -14,6 +14,9 @@
#include <asm/mshyperv.h>
#include <linux/resume_user_mode.h>
#include <linux/export.h>
+#include <linux/acpi.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
#include "mshv.h"
@@ -140,23 +143,97 @@ int hv_call_get_partition_property(u64 partition_id,
EXPORT_SYMBOL_GPL(hv_call_get_partition_property);
/*
- * Handle any pre-processing before going into the guest mode on this cpu, most
- * notably call schedule(). Must be invoked with both preemption and
- * interrupts enabled.
+ * Corresponding sleep states have to be initialized in order for a subsequent
+ * HVCALL_ENTER_SLEEP_STATE call to succeed. Currently only S5 state as per
+ * ACPI 6.4 chapter 7.4.2 is relevant, while S1, S2 and S3 can be supported.
*
- * Returns: 0 on success, -errno on error.
+ * In order to pass proper PM values to mshv, ACPI should be initialized and
+ * should support S5 sleep state when this method is invoked.
*/
-int mshv_do_pre_guest_mode_work(ulong th_flags)
+static int hv_initialize_sleep_states(void)
{
- if (th_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
- return -EINTR;
+ u64 status;
+ unsigned long flags;
+ struct hv_input_set_system_property *in;
+ acpi_status acpi_status;
+ u8 sleep_type_a, sleep_type_b;
+
+ if (!acpi_sleep_state_supported(ACPI_STATE_S5)) {
+ pr_err("%s: S5 sleep state not supported.\n", __func__);
+ return -ENODEV;
+ }
+
+ acpi_status = acpi_get_sleep_type_data(ACPI_STATE_S5, &sleep_type_a,
+ &sleep_type_b);
+ if (ACPI_FAILURE(acpi_status))
+ return -ENODEV;
- if (th_flags & _TIF_NEED_RESCHED)
- schedule();
+ local_irq_save(flags);
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ memset(in, 0, sizeof(*in));
+
+ in->property_id = HV_SYSTEM_PROPERTY_SLEEP_STATE;
+ in->set_sleep_state_info.sleep_state = HV_SLEEP_STATE_S5;
+ in->set_sleep_state_info.pm1a_slp_typ = sleep_type_a;
+ in->set_sleep_state_info.pm1b_slp_typ = sleep_type_b;
+
+ status = hv_do_hypercall(HVCALL_SET_SYSTEM_PROPERTY, in, NULL);
+ local_irq_restore(flags);
- if (th_flags & _TIF_NOTIFY_RESUME)
- resume_user_mode_work(NULL);
+ if (!hv_result_success(status)) {
+ hv_status_err(status, "\n");
+ return hv_result_to_errno(status);
+ }
return 0;
}
-EXPORT_SYMBOL_GPL(mshv_do_pre_guest_mode_work);
+
+/*
+ * This notifier initializes sleep states in mshv hypervisor which will be
+ * used during power off.
+ */
+static int hv_reboot_notifier_handler(struct notifier_block *this,
+ unsigned long code, void *another)
+{
+ int ret = 0;
+
+ if (code == SYS_HALT || code == SYS_POWER_OFF)
+ ret = hv_initialize_sleep_states();
+
+ return ret ? NOTIFY_DONE : NOTIFY_OK;
+}
+
+static struct notifier_block hv_reboot_notifier = {
+ .notifier_call = hv_reboot_notifier_handler,
+};
+
+void hv_sleep_notifiers_register(void)
+{
+ int ret;
+
+ ret = register_reboot_notifier(&hv_reboot_notifier);
+ if (ret)
+ pr_err("%s: cannot register reboot notifier %d\n", __func__,
+ ret);
+}
+
+/*
+ * Power off the machine by entering S5 sleep state via Hyper-V hypercall.
+ * This call does not return if successful.
+ */
+void hv_machine_power_off(void)
+{
+ unsigned long flags;
+ struct hv_input_enter_sleep_state *in;
+
+ local_irq_save(flags);
+ in = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ in->sleep_state = HV_SLEEP_STATE_S5;
+
+ (void)hv_do_hypercall(HVCALL_ENTER_SLEEP_STATE, in, NULL);
+ local_irq_restore(flags);
+
+ /* should never reach here */
+ BUG();
+
+}
diff --git a/drivers/hv/mshv_eventfd.c b/drivers/hv/mshv_eventfd.c
index 806674722868..d93a18f09c76 100644
--- a/drivers/hv/mshv_eventfd.c
+++ b/drivers/hv/mshv_eventfd.c
@@ -163,8 +163,10 @@ static int mshv_try_assert_irq_fast(struct mshv_irqfd *irqfd)
if (hv_scheduler_type != HV_SCHEDULER_TYPE_ROOT)
return -EOPNOTSUPP;
+#if IS_ENABLED(CONFIG_X86)
if (irq->lapic_control.logical_dest_mode)
return -EOPNOTSUPP;
+#endif
vp = partition->pt_vp_array[irq->lapic_apic_id];
@@ -196,8 +198,10 @@ static void mshv_assert_irq_slow(struct mshv_irqfd *irqfd)
unsigned int seq;
int idx;
+#if IS_ENABLED(CONFIG_X86)
WARN_ON(irqfd->irqfd_resampler &&
!irq->lapic_control.level_triggered);
+#endif
idx = srcu_read_lock(&partition->pt_irq_srcu);
if (irqfd->irqfd_girq_ent.guest_irq_num) {
@@ -469,6 +473,7 @@ static int mshv_irqfd_assign(struct mshv_partition *pt,
init_poll_funcptr(&irqfd->irqfd_polltbl, mshv_irqfd_queue_proc);
spin_lock_irq(&pt->pt_irqfds_lock);
+#if IS_ENABLED(CONFIG_X86)
if (args->flags & BIT(MSHV_IRQFD_BIT_RESAMPLE) &&
!irqfd->irqfd_lapic_irq.lapic_control.level_triggered) {
/*
@@ -479,6 +484,7 @@ static int mshv_irqfd_assign(struct mshv_partition *pt,
ret = -EINVAL;
goto fail;
}
+#endif
ret = 0;
hlist_for_each_entry(tmp, &pt->pt_irqfds_list, irqfd_hnode) {
if (irqfd->irqfd_eventfd_ctx != tmp->irqfd_eventfd_ctx)
@@ -592,7 +598,7 @@ static void mshv_irqfd_release(struct mshv_partition *pt)
int mshv_irqfd_wq_init(void)
{
- irqfd_cleanup_wq = alloc_workqueue("mshv-irqfd-cleanup", 0, 0);
+ irqfd_cleanup_wq = alloc_workqueue("mshv-irqfd-cleanup", WQ_PERCPU, 0);
if (!irqfd_cleanup_wq)
return -ENOMEM;
diff --git a/drivers/hv/mshv_irq.c b/drivers/hv/mshv_irq.c
index d0fb9ef734f4..798e7e1ab06e 100644
--- a/drivers/hv/mshv_irq.c
+++ b/drivers/hv/mshv_irq.c
@@ -119,6 +119,10 @@ void mshv_copy_girq_info(struct mshv_guest_irq_ent *ent,
lirq->lapic_vector = ent->girq_irq_data & 0xFF;
lirq->lapic_apic_id = (ent->girq_addr_lo >> 12) & 0xFF;
lirq->lapic_control.interrupt_type = (ent->girq_irq_data & 0x700) >> 8;
+#if IS_ENABLED(CONFIG_X86)
lirq->lapic_control.level_triggered = (ent->girq_irq_data >> 15) & 0x1;
lirq->lapic_control.logical_dest_mode = (ent->girq_addr_lo >> 2) & 0x1;
+#elif IS_ENABLED(CONFIG_ARM64)
+ lirq->lapic_control.asserted = 1;
+#endif
}
diff --git a/drivers/hv/mshv_regions.c b/drivers/hv/mshv_regions.c
new file mode 100644
index 000000000000..202b9d551e39
--- /dev/null
+++ b/drivers/hv/mshv_regions.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Microsoft Corporation.
+ *
+ * Memory region management for mshv_root module.
+ *
+ * Authors: Microsoft Linux virtualization team
+ */
+
+#include <linux/hmm.h>
+#include <linux/hyperv.h>
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+
+#include <asm/mshyperv.h>
+
+#include "mshv_root.h"
+
+#define MSHV_MAP_FAULT_IN_PAGES PTRS_PER_PMD
+
+/**
+ * mshv_region_process_chunk - Processes a contiguous chunk of memory pages
+ * in a region.
+ * @region : Pointer to the memory region structure.
+ * @flags : Flags to pass to the handler.
+ * @page_offset: Offset into the region's pages array to start processing.
+ * @page_count : Number of pages to process.
+ * @handler : Callback function to handle the chunk.
+ *
+ * This function scans the region's pages starting from @page_offset,
+ * checking for contiguous present pages of the same size (normal or huge).
+ * It invokes @handler for the chunk of contiguous pages found. Returns the
+ * number of pages handled, or a negative error code if the first page is
+ * not present or the handler fails.
+ *
+ * Note: The @handler callback must be able to handle both normal and huge
+ * pages.
+ *
+ * Return: Number of pages handled, or negative error code.
+ */
+static long mshv_region_process_chunk(struct mshv_mem_region *region,
+ u32 flags,
+ u64 page_offset, u64 page_count,
+ int (*handler)(struct mshv_mem_region *region,
+ u32 flags,
+ u64 page_offset,
+ u64 page_count))
+{
+ u64 count, stride;
+ unsigned int page_order;
+ struct page *page;
+ int ret;
+
+ page = region->pages[page_offset];
+ if (!page)
+ return -EINVAL;
+
+ page_order = folio_order(page_folio(page));
+ /* The hypervisor only supports 4K and 2M page sizes */
+ if (page_order && page_order != HPAGE_PMD_ORDER)
+ return -EINVAL;
+
+ stride = 1 << page_order;
+
+ /* Start at stride since the first page is validated */
+ for (count = stride; count < page_count; count += stride) {
+ page = region->pages[page_offset + count];
+
+ /* Break if current page is not present */
+ if (!page)
+ break;
+
+ /* Break if page size changes */
+ if (page_order != folio_order(page_folio(page)))
+ break;
+ }
+
+ ret = handler(region, flags, page_offset, count);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/**
+ * mshv_region_process_range - Processes a range of memory pages in a
+ * region.
+ * @region : Pointer to the memory region structure.
+ * @flags : Flags to pass to the handler.
+ * @page_offset: Offset into the region's pages array to start processing.
+ * @page_count : Number of pages to process.
+ * @handler : Callback function to handle each chunk of contiguous
+ * pages.
+ *
+ * Iterates over the specified range of pages in @region, skipping
+ * non-present pages. For each contiguous chunk of present pages, invokes
+ * @handler via mshv_region_process_chunk.
+ *
+ * Note: The @handler callback must be able to handle both normal and huge
+ * pages.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+static int mshv_region_process_range(struct mshv_mem_region *region,
+ u32 flags,
+ u64 page_offset, u64 page_count,
+ int (*handler)(struct mshv_mem_region *region,
+ u32 flags,
+ u64 page_offset,
+ u64 page_count))
+{
+ long ret;
+
+ if (page_offset + page_count > region->nr_pages)
+ return -EINVAL;
+
+ while (page_count) {
+ /* Skip non-present pages */
+ if (!region->pages[page_offset]) {
+ page_offset++;
+ page_count--;
+ continue;
+ }
+
+ ret = mshv_region_process_chunk(region, flags,
+ page_offset,
+ page_count,
+ handler);
+ if (ret < 0)
+ return ret;
+
+ page_offset += ret;
+ page_count -= ret;
+ }
+
+ return 0;
+}
+
+struct mshv_mem_region *mshv_region_create(u64 guest_pfn, u64 nr_pages,
+ u64 uaddr, u32 flags)
+{
+ struct mshv_mem_region *region;
+
+ region = vzalloc(sizeof(*region) + sizeof(struct page *) * nr_pages);
+ if (!region)
+ return ERR_PTR(-ENOMEM);
+
+ region->nr_pages = nr_pages;
+ region->start_gfn = guest_pfn;
+ region->start_uaddr = uaddr;
+ region->hv_map_flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_ADJUSTABLE;
+ if (flags & BIT(MSHV_SET_MEM_BIT_WRITABLE))
+ region->hv_map_flags |= HV_MAP_GPA_WRITABLE;
+ if (flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE))
+ region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE;
+
+ kref_init(&region->refcount);
+
+ return region;
+}
+
+static int mshv_region_chunk_share(struct mshv_mem_region *region,
+ u32 flags,
+ u64 page_offset, u64 page_count)
+{
+ struct page *page = region->pages[page_offset];
+
+ if (PageHuge(page) || PageTransCompound(page))
+ flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
+
+ return hv_call_modify_spa_host_access(region->partition->pt_id,
+ region->pages + page_offset,
+ page_count,
+ HV_MAP_GPA_READABLE |
+ HV_MAP_GPA_WRITABLE,
+ flags, true);
+}
+
+int mshv_region_share(struct mshv_mem_region *region)
+{
+ u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_SHARED;
+
+ return mshv_region_process_range(region, flags,
+ 0, region->nr_pages,
+ mshv_region_chunk_share);
+}
+
+static int mshv_region_chunk_unshare(struct mshv_mem_region *region,
+ u32 flags,
+ u64 page_offset, u64 page_count)
+{
+ struct page *page = region->pages[page_offset];
+
+ if (PageHuge(page) || PageTransCompound(page))
+ flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
+
+ return hv_call_modify_spa_host_access(region->partition->pt_id,
+ region->pages + page_offset,
+ page_count, 0,
+ flags, false);
+}
+
+int mshv_region_unshare(struct mshv_mem_region *region)
+{
+ u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE;
+
+ return mshv_region_process_range(region, flags,
+ 0, region->nr_pages,
+ mshv_region_chunk_unshare);
+}
+
+static int mshv_region_chunk_remap(struct mshv_mem_region *region,
+ u32 flags,
+ u64 page_offset, u64 page_count)
+{
+ struct page *page = region->pages[page_offset];
+
+ if (PageHuge(page) || PageTransCompound(page))
+ flags |= HV_MAP_GPA_LARGE_PAGE;
+
+ return hv_call_map_gpa_pages(region->partition->pt_id,
+ region->start_gfn + page_offset,
+ page_count, flags,
+ region->pages + page_offset);
+}
+
+static int mshv_region_remap_pages(struct mshv_mem_region *region,
+ u32 map_flags,
+ u64 page_offset, u64 page_count)
+{
+ return mshv_region_process_range(region, map_flags,
+ page_offset, page_count,
+ mshv_region_chunk_remap);
+}
+
+int mshv_region_map(struct mshv_mem_region *region)
+{
+ u32 map_flags = region->hv_map_flags;
+
+ return mshv_region_remap_pages(region, map_flags,
+ 0, region->nr_pages);
+}
+
+static void mshv_region_invalidate_pages(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
+{
+ if (region->type == MSHV_REGION_TYPE_MEM_PINNED)
+ unpin_user_pages(region->pages + page_offset, page_count);
+
+ memset(region->pages + page_offset, 0,
+ page_count * sizeof(struct page *));
+}
+
+void mshv_region_invalidate(struct mshv_mem_region *region)
+{
+ mshv_region_invalidate_pages(region, 0, region->nr_pages);
+}
+
+int mshv_region_pin(struct mshv_mem_region *region)
+{
+ u64 done_count, nr_pages;
+ struct page **pages;
+ __u64 userspace_addr;
+ int ret;
+
+ for (done_count = 0; done_count < region->nr_pages; done_count += ret) {
+ pages = region->pages + done_count;
+ userspace_addr = region->start_uaddr +
+ done_count * HV_HYP_PAGE_SIZE;
+ nr_pages = min(region->nr_pages - done_count,
+ MSHV_PIN_PAGES_BATCH_SIZE);
+
+ /*
+ * Pinning assuming 4k pages works for large pages too.
+ * All page structs within the large page are returned.
+ *
+ * Pin requests are batched because pin_user_pages_fast
+ * with the FOLL_LONGTERM flag does a large temporary
+ * allocation of contiguous memory.
+ */
+ ret = pin_user_pages_fast(userspace_addr, nr_pages,
+ FOLL_WRITE | FOLL_LONGTERM,
+ pages);
+ if (ret < 0)
+ goto release_pages;
+ }
+
+ return 0;
+
+release_pages:
+ mshv_region_invalidate_pages(region, 0, done_count);
+ return ret;
+}
+
+static int mshv_region_chunk_unmap(struct mshv_mem_region *region,
+ u32 flags,
+ u64 page_offset, u64 page_count)
+{
+ struct page *page = region->pages[page_offset];
+
+ if (PageHuge(page) || PageTransCompound(page))
+ flags |= HV_UNMAP_GPA_LARGE_PAGE;
+
+ return hv_call_unmap_gpa_pages(region->partition->pt_id,
+ region->start_gfn + page_offset,
+ page_count, flags);
+}
+
+static int mshv_region_unmap(struct mshv_mem_region *region)
+{
+ return mshv_region_process_range(region, 0,
+ 0, region->nr_pages,
+ mshv_region_chunk_unmap);
+}
+
+static void mshv_region_destroy(struct kref *ref)
+{
+ struct mshv_mem_region *region =
+ container_of(ref, struct mshv_mem_region, refcount);
+ struct mshv_partition *partition = region->partition;
+ int ret;
+
+ if (region->type == MSHV_REGION_TYPE_MEM_MOVABLE)
+ mshv_region_movable_fini(region);
+
+ if (mshv_partition_encrypted(partition)) {
+ ret = mshv_region_share(region);
+ if (ret) {
+ pt_err(partition,
+ "Failed to regain access to memory, unpinning user pages will fail and crash the host error: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ mshv_region_unmap(region);
+
+ mshv_region_invalidate(region);
+
+ vfree(region);
+}
+
+void mshv_region_put(struct mshv_mem_region *region)
+{
+ kref_put(&region->refcount, mshv_region_destroy);
+}
+
+int mshv_region_get(struct mshv_mem_region *region)
+{
+ return kref_get_unless_zero(&region->refcount);
+}
+
+/**
+ * mshv_region_hmm_fault_and_lock - Handle HMM faults and lock the memory region
+ * @region: Pointer to the memory region structure
+ * @range: Pointer to the HMM range structure
+ *
+ * This function performs the following steps:
+ * 1. Reads the notifier sequence for the HMM range.
+ * 2. Acquires a read lock on the memory map.
+ * 3. Handles HMM faults for the specified range.
+ * 4. Releases the read lock on the memory map.
+ * 5. If successful, locks the memory region mutex.
+ * 6. Verifies if the notifier sequence has changed during the operation.
+ * If it has, releases the mutex and returns -EBUSY to match with
+ * hmm_range_fault() return code for repeating.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+static int mshv_region_hmm_fault_and_lock(struct mshv_mem_region *region,
+ struct hmm_range *range)
+{
+ int ret;
+
+ range->notifier_seq = mmu_interval_read_begin(range->notifier);
+ mmap_read_lock(region->mni.mm);
+ ret = hmm_range_fault(range);
+ mmap_read_unlock(region->mni.mm);
+ if (ret)
+ return ret;
+
+ mutex_lock(&region->mutex);
+
+ if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
+ mutex_unlock(&region->mutex);
+ cond_resched();
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * mshv_region_range_fault - Handle memory range faults for a given region.
+ * @region: Pointer to the memory region structure.
+ * @page_offset: Offset of the page within the region.
+ * @page_count: Number of pages to handle.
+ *
+ * This function resolves memory faults for a specified range of pages
+ * within a memory region. It uses HMM (Heterogeneous Memory Management)
+ * to fault in the required pages and updates the region's page array.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+static int mshv_region_range_fault(struct mshv_mem_region *region,
+ u64 page_offset, u64 page_count)
+{
+ struct hmm_range range = {
+ .notifier = &region->mni,
+ .default_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
+ };
+ unsigned long *pfns;
+ int ret;
+ u64 i;
+
+ pfns = kmalloc_array(page_count, sizeof(*pfns), GFP_KERNEL);
+ if (!pfns)
+ return -ENOMEM;
+
+ range.hmm_pfns = pfns;
+ range.start = region->start_uaddr + page_offset * HV_HYP_PAGE_SIZE;
+ range.end = range.start + page_count * HV_HYP_PAGE_SIZE;
+
+ do {
+ ret = mshv_region_hmm_fault_and_lock(region, &range);
+ } while (ret == -EBUSY);
+
+ if (ret)
+ goto out;
+
+ for (i = 0; i < page_count; i++)
+ region->pages[page_offset + i] = hmm_pfn_to_page(pfns[i]);
+
+ ret = mshv_region_remap_pages(region, region->hv_map_flags,
+ page_offset, page_count);
+
+ mutex_unlock(&region->mutex);
+out:
+ kfree(pfns);
+ return ret;
+}
+
+bool mshv_region_handle_gfn_fault(struct mshv_mem_region *region, u64 gfn)
+{
+ u64 page_offset, page_count;
+ int ret;
+
+ /* Align the page offset to the nearest MSHV_MAP_FAULT_IN_PAGES. */
+ page_offset = ALIGN_DOWN(gfn - region->start_gfn,
+ MSHV_MAP_FAULT_IN_PAGES);
+
+ /* Map more pages than requested to reduce the number of faults. */
+ page_count = min(region->nr_pages - page_offset,
+ MSHV_MAP_FAULT_IN_PAGES);
+
+ ret = mshv_region_range_fault(region, page_offset, page_count);
+
+ WARN_ONCE(ret,
+ "p%llu: GPA intercept failed: region %#llx-%#llx, gfn %#llx, page_offset %llu, page_count %llu\n",
+ region->partition->pt_id, region->start_uaddr,
+ region->start_uaddr + (region->nr_pages << HV_HYP_PAGE_SHIFT),
+ gfn, page_offset, page_count);
+
+ return !ret;
+}
+
+/**
+ * mshv_region_interval_invalidate - Invalidate a range of memory region
+ * @mni: Pointer to the mmu_interval_notifier structure
+ * @range: Pointer to the mmu_notifier_range structure
+ * @cur_seq: Current sequence number for the interval notifier
+ *
+ * This function invalidates a memory region by remapping its pages with
+ * no access permissions. It locks the region's mutex to ensure thread safety
+ * and updates the sequence number for the interval notifier. If the range
+ * is blockable, it uses a blocking lock; otherwise, it attempts a non-blocking
+ * lock and returns false if unsuccessful.
+ *
+ * NOTE: Failure to invalidate a region is a serious error, as the pages will
+ * be considered freed while they are still mapped by the hypervisor.
+ * Any attempt to access such pages will likely crash the system.
+ *
+ * Return: true if the region was successfully invalidated, false otherwise.
+ */
+static bool mshv_region_interval_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct mshv_mem_region *region = container_of(mni,
+ struct mshv_mem_region,
+ mni);
+ u64 page_offset, page_count;
+ unsigned long mstart, mend;
+ int ret = -EPERM;
+
+ if (mmu_notifier_range_blockable(range))
+ mutex_lock(&region->mutex);
+ else if (!mutex_trylock(&region->mutex))
+ goto out_fail;
+
+ mmu_interval_set_seq(mni, cur_seq);
+
+ mstart = max(range->start, region->start_uaddr);
+ mend = min(range->end, region->start_uaddr +
+ (region->nr_pages << HV_HYP_PAGE_SHIFT));
+
+ page_offset = HVPFN_DOWN(mstart - region->start_uaddr);
+ page_count = HVPFN_DOWN(mend - mstart);
+
+ ret = mshv_region_remap_pages(region, HV_MAP_GPA_NO_ACCESS,
+ page_offset, page_count);
+ if (ret)
+ goto out_fail;
+
+ mshv_region_invalidate_pages(region, page_offset, page_count);
+
+ mutex_unlock(&region->mutex);
+
+ return true;
+
+out_fail:
+ WARN_ONCE(ret,
+ "Failed to invalidate region %#llx-%#llx (range %#lx-%#lx, event: %u, pages %#llx-%#llx, mm: %#llx): %d\n",
+ region->start_uaddr,
+ region->start_uaddr + (region->nr_pages << HV_HYP_PAGE_SHIFT),
+ range->start, range->end, range->event,
+ page_offset, page_offset + page_count - 1, (u64)range->mm, ret);
+ return false;
+}
+
+static const struct mmu_interval_notifier_ops mshv_region_mni_ops = {
+ .invalidate = mshv_region_interval_invalidate,
+};
+
+void mshv_region_movable_fini(struct mshv_mem_region *region)
+{
+ mmu_interval_notifier_remove(&region->mni);
+}
+
+bool mshv_region_movable_init(struct mshv_mem_region *region)
+{
+ int ret;
+
+ ret = mmu_interval_notifier_insert(&region->mni, current->mm,
+ region->start_uaddr,
+ region->nr_pages << HV_HYP_PAGE_SHIFT,
+ &mshv_region_mni_ops);
+ if (ret)
+ return false;
+
+ mutex_init(&region->mutex);
+
+ return true;
+}
diff --git a/drivers/hv/mshv_root.h b/drivers/hv/mshv_root.h
index e3931b0f1269..3c1d88b36741 100644
--- a/drivers/hv/mshv_root.h
+++ b/drivers/hv/mshv_root.h
@@ -15,6 +15,7 @@
#include <linux/hashtable.h>
#include <linux/dev_printk.h>
#include <linux/build_bug.h>
+#include <linux/mmu_notifier.h>
#include <uapi/linux/mshv.h>
/*
@@ -70,18 +71,23 @@ do { \
#define vp_info(v, fmt, ...) vp_devprintk(info, v, fmt, ##__VA_ARGS__)
#define vp_dbg(v, fmt, ...) vp_devprintk(dbg, v, fmt, ##__VA_ARGS__)
+enum mshv_region_type {
+ MSHV_REGION_TYPE_MEM_PINNED,
+ MSHV_REGION_TYPE_MEM_MOVABLE,
+ MSHV_REGION_TYPE_MMIO
+};
+
struct mshv_mem_region {
struct hlist_node hnode;
+ struct kref refcount;
u64 nr_pages;
u64 start_gfn;
u64 start_uaddr;
u32 hv_map_flags;
- struct {
- u64 large_pages: 1; /* 2MiB */
- u64 range_pinned: 1;
- u64 reserved: 62;
- } flags;
struct mshv_partition *partition;
+ enum mshv_region_type type;
+ struct mmu_interval_notifier mni;
+ struct mutex mutex; /* protects region pages remapping */
struct page *pages[];
};
@@ -98,6 +104,8 @@ struct mshv_partition {
u64 pt_id;
refcount_t pt_ref_count;
struct mutex pt_mutex;
+
+ spinlock_t pt_mem_regions_lock;
struct hlist_head pt_mem_regions; // not ordered
u32 pt_vp_count;
@@ -169,7 +177,7 @@ struct mshv_girq_routing_table {
};
struct hv_synic_pages {
- struct hv_message_page *synic_message_page;
+ struct hv_message_page *hyp_synic_message_page;
struct hv_synic_event_flags_page *synic_event_flags_page;
struct hv_synic_event_ring_page *synic_event_ring_page;
};
@@ -178,6 +186,7 @@ struct mshv_root {
struct hv_synic_pages __percpu *synic_pages;
spinlock_t pt_ht_lock;
DECLARE_HASHTABLE(pt_htable, MSHV_PARTITIONS_HASH_BITS);
+ struct hv_partition_property_vmm_capabilities vmm_caps;
};
/*
@@ -278,11 +287,12 @@ int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
/* Choose between pages and bytes */
struct hv_vp_state_data state_data, u64 page_count,
struct page **pages, u32 num_bytes, u8 *bytes);
-int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
- union hv_input_vtl input_vtl,
- struct page **state_page);
-int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
- union hv_input_vtl input_vtl);
+int hv_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page);
+int hv_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ struct page *state_page,
+ union hv_input_vtl input_vtl);
int hv_call_create_port(u64 port_partition_id, union hv_port_id port_id,
u64 connection_partition_id, struct hv_port_info *port_info,
u8 port_vtl, u8 min_connection_vtl, int node);
@@ -295,17 +305,32 @@ int hv_call_connect_port(u64 port_partition_id, union hv_port_id port_id,
int hv_call_disconnect_port(u64 connection_partition_id,
union hv_connection_id connection_id);
int hv_call_notify_port_ring_empty(u32 sint_index);
-int hv_call_map_stat_page(enum hv_stats_object_type type,
- const union hv_stats_object_identity *identity,
- void **addr);
-int hv_call_unmap_stat_page(enum hv_stats_object_type type,
- const union hv_stats_object_identity *identity);
+int hv_map_stats_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr);
+int hv_unmap_stats_page(enum hv_stats_object_type type, void *page_addr,
+ const union hv_stats_object_identity *identity);
int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
u64 page_struct_count, u32 host_access,
u32 flags, u8 acquire);
+int hv_call_get_partition_property_ex(u64 partition_id, u64 property_code, u64 arg,
+ void *property_value, size_t property_value_sz);
extern struct mshv_root mshv_root;
extern enum hv_scheduler_type hv_scheduler_type;
extern u8 * __percpu *hv_synic_eventring_tail;
+struct mshv_mem_region *mshv_region_create(u64 guest_pfn, u64 nr_pages,
+ u64 uaddr, u32 flags);
+int mshv_region_share(struct mshv_mem_region *region);
+int mshv_region_unshare(struct mshv_mem_region *region);
+int mshv_region_map(struct mshv_mem_region *region);
+void mshv_region_invalidate(struct mshv_mem_region *region);
+int mshv_region_pin(struct mshv_mem_region *region);
+void mshv_region_put(struct mshv_mem_region *region);
+int mshv_region_get(struct mshv_mem_region *region);
+bool mshv_region_handle_gfn_fault(struct mshv_mem_region *region, u64 gfn);
+void mshv_region_movable_fini(struct mshv_mem_region *region);
+bool mshv_region_movable_init(struct mshv_mem_region *region);
+
#endif /* _MSHV_ROOT_H_ */
diff --git a/drivers/hv/mshv_root_hv_call.c b/drivers/hv/mshv_root_hv_call.c
index c9c274f29c3c..598eaff4ff29 100644
--- a/drivers/hv/mshv_root_hv_call.c
+++ b/drivers/hv/mshv_root_hv_call.c
@@ -388,7 +388,13 @@ int hv_call_assert_virtual_interrupt(u64 partition_id, u32 vector,
memset(input, 0, sizeof(*input));
input->partition_id = partition_id;
input->vector = vector;
+ /*
+ * NOTE: dest_addr only needs to be provided while asserting an
+ * interrupt on x86 platform
+ */
+#if IS_ENABLED(CONFIG_X86)
input->dest_addr = dest_addr;
+#endif
input->control = control;
status = hv_do_hypercall(HVCALL_ASSERT_VIRTUAL_INTERRUPT, input, NULL);
local_irq_restore(flags);
@@ -526,9 +532,9 @@ int hv_call_set_vp_state(u32 vp_index, u64 partition_id,
return ret;
}
-int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
- union hv_input_vtl input_vtl,
- struct page **state_page)
+static int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page)
{
struct hv_input_map_vp_state_page *input;
struct hv_output_map_vp_state_page *output;
@@ -542,12 +548,20 @@ int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
input = *this_cpu_ptr(hyperv_pcpu_input_arg);
output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+ memset(input, 0, sizeof(*input));
input->partition_id = partition_id;
input->vp_index = vp_index;
input->type = type;
input->input_vtl = input_vtl;
- status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input, output);
+ if (*state_page) {
+ input->flags.map_location_provided = 1;
+ input->requested_map_location =
+ page_to_pfn(*state_page);
+ }
+
+ status = hv_do_hypercall(HVCALL_MAP_VP_STATE_PAGE, input,
+ output);
if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
if (hv_result_success(status))
@@ -565,8 +579,41 @@ int hv_call_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
return ret;
}
-int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
- union hv_input_vtl input_vtl)
+static bool mshv_use_overlay_gpfn(void)
+{
+ return hv_l1vh_partition() &&
+ mshv_root.vmm_caps.vmm_can_provide_overlay_gpfn;
+}
+
+int hv_map_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl,
+ struct page **state_page)
+{
+ int ret = 0;
+ struct page *allocated_page = NULL;
+
+ if (mshv_use_overlay_gpfn()) {
+ allocated_page = alloc_page(GFP_KERNEL);
+ if (!allocated_page)
+ return -ENOMEM;
+ *state_page = allocated_page;
+ } else {
+ *state_page = NULL;
+ }
+
+ ret = hv_call_map_vp_state_page(partition_id, vp_index, type, input_vtl,
+ state_page);
+
+ if (ret && allocated_page) {
+ __free_page(allocated_page);
+ *state_page = NULL;
+ }
+
+ return ret;
+}
+
+static int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ union hv_input_vtl input_vtl)
{
unsigned long flags;
u64 status;
@@ -590,6 +637,48 @@ int hv_call_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
return hv_result_to_errno(status);
}
+int hv_unmap_vp_state_page(u64 partition_id, u32 vp_index, u32 type,
+ struct page *state_page, union hv_input_vtl input_vtl)
+{
+ int ret = hv_call_unmap_vp_state_page(partition_id, vp_index, type, input_vtl);
+
+ if (mshv_use_overlay_gpfn() && state_page)
+ __free_page(state_page);
+
+ return ret;
+}
+
+int hv_call_get_partition_property_ex(u64 partition_id, u64 property_code,
+ u64 arg, void *property_value,
+ size_t property_value_sz)
+{
+ u64 status;
+ unsigned long flags;
+ struct hv_input_get_partition_property_ex *input;
+ struct hv_output_get_partition_property_ex *output;
+
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+ output = *this_cpu_ptr(hyperv_pcpu_output_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->partition_id = partition_id;
+ input->property_code = property_code;
+ input->arg = arg;
+ status = hv_do_hypercall(HVCALL_GET_PARTITION_PROPERTY_EX, input, output);
+
+ if (!hv_result_success(status)) {
+ local_irq_restore(flags);
+ hv_status_debug(status, "\n");
+ return hv_result_to_errno(status);
+ }
+ memcpy(property_value, &output->property_value, property_value_sz);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
int
hv_call_clear_virtual_interrupt(u64 partition_id)
{
@@ -724,9 +813,51 @@ hv_call_notify_port_ring_empty(u32 sint_index)
return hv_result_to_errno(status);
}
-int hv_call_map_stat_page(enum hv_stats_object_type type,
- const union hv_stats_object_identity *identity,
- void **addr)
+static int hv_call_map_stats_page2(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ u64 map_location)
+{
+ unsigned long flags;
+ struct hv_input_map_stats_page2 *input;
+ u64 status;
+ int ret;
+
+ if (!map_location || !mshv_use_overlay_gpfn())
+ return -EINVAL;
+
+ do {
+ local_irq_save(flags);
+ input = *this_cpu_ptr(hyperv_pcpu_input_arg);
+
+ memset(input, 0, sizeof(*input));
+ input->type = type;
+ input->identity = *identity;
+ input->map_location = map_location;
+
+ status = hv_do_hypercall(HVCALL_MAP_STATS_PAGE2, input, NULL);
+
+ local_irq_restore(flags);
+
+ ret = hv_result_to_errno(status);
+
+ if (!ret)
+ break;
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ hv_status_debug(status, "\n");
+ break;
+ }
+
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ hv_current_partition_id, 1);
+ } while (!ret);
+
+ return ret;
+}
+
+static int hv_call_map_stats_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr)
{
unsigned long flags;
struct hv_input_map_stats_page *input;
@@ -765,8 +896,38 @@ int hv_call_map_stat_page(enum hv_stats_object_type type,
return ret;
}
-int hv_call_unmap_stat_page(enum hv_stats_object_type type,
- const union hv_stats_object_identity *identity)
+int hv_map_stats_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity,
+ void **addr)
+{
+ int ret;
+ struct page *allocated_page = NULL;
+
+ if (!addr)
+ return -EINVAL;
+
+ if (mshv_use_overlay_gpfn()) {
+ allocated_page = alloc_page(GFP_KERNEL);
+ if (!allocated_page)
+ return -ENOMEM;
+
+ ret = hv_call_map_stats_page2(type, identity,
+ page_to_pfn(allocated_page));
+ *addr = page_address(allocated_page);
+ } else {
+ ret = hv_call_map_stats_page(type, identity, addr);
+ }
+
+ if (ret && allocated_page) {
+ __free_page(allocated_page);
+ *addr = NULL;
+ }
+
+ return ret;
+}
+
+static int hv_call_unmap_stats_page(enum hv_stats_object_type type,
+ const union hv_stats_object_identity *identity)
{
unsigned long flags;
struct hv_input_unmap_stats_page *input;
@@ -785,6 +946,19 @@ int hv_call_unmap_stat_page(enum hv_stats_object_type type,
return hv_result_to_errno(status);
}
+int hv_unmap_stats_page(enum hv_stats_object_type type, void *page_addr,
+ const union hv_stats_object_identity *identity)
+{
+ int ret;
+
+ ret = hv_call_unmap_stats_page(type, identity);
+
+ if (mshv_use_overlay_gpfn() && page_addr)
+ __free_page(virt_to_page(page_addr));
+
+ return ret;
+}
+
int hv_call_modify_spa_host_access(u64 partition_id, struct page **pages,
u64 page_struct_count, u32 host_access,
u32 flags, u8 acquire)
diff --git a/drivers/hv/mshv_root_main.c b/drivers/hv/mshv_root_main.c
index 72df774e410a..1134a82c7881 100644
--- a/drivers/hv/mshv_root_main.c
+++ b/drivers/hv/mshv_root_main.c
@@ -8,6 +8,7 @@
* Authors: Microsoft Linux virtualization team
*/
+#include <linux/entry-virt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/fs.h>
@@ -28,6 +29,7 @@
#include <linux/crash_dump.h>
#include <linux/panic_notifier.h>
#include <linux/vmalloc.h>
+#include <linux/rseq.h>
#include "mshv_eventfd.h"
#include "mshv.h"
@@ -37,16 +39,10 @@ MODULE_AUTHOR("Microsoft");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microsoft Hyper-V root partition VMM interface /dev/mshv");
-/* TODO move this to mshyperv.h when needed outside driver */
-static inline bool hv_parent_partition(void)
-{
- return hv_root_partition();
-}
-
/* TODO move this to another file when debugfs code is added */
enum hv_stats_vp_counters { /* HV_THREAD_COUNTER */
#if defined(CONFIG_X86)
- VpRootDispatchThreadBlocked = 201,
+ VpRootDispatchThreadBlocked = 202,
#elif defined(CONFIG_ARM64)
VpRootDispatchThreadBlocked = 94,
#endif
@@ -127,6 +123,7 @@ static struct miscdevice mshv_dev = {
*/
static u16 mshv_passthru_hvcalls[] = {
HVCALL_GET_PARTITION_PROPERTY,
+ HVCALL_GET_PARTITION_PROPERTY_EX,
HVCALL_SET_PARTITION_PROPERTY,
HVCALL_INSTALL_INTERCEPT,
HVCALL_GET_VP_REGISTERS,
@@ -141,6 +138,16 @@ static u16 mshv_passthru_hvcalls[] = {
HVCALL_GET_VP_CPUID_VALUES,
};
+/*
+ * Only allow hypercalls that are safe to be called by the VMM with the host
+ * partition as target (i.e. HV_PARTITION_ID_SELF). Carefully audit that a
+ * hypercall cannot be misused by the VMM before adding it to this list.
+ */
+static u16 mshv_self_passthru_hvcalls[] = {
+ HVCALL_GET_PARTITION_PROPERTY,
+ HVCALL_GET_PARTITION_PROPERTY_EX,
+};
+
static bool mshv_hvcall_is_async(u16 code)
{
switch (code) {
@@ -152,18 +159,38 @@ static bool mshv_hvcall_is_async(u16 code)
return false;
}
+static bool mshv_passthru_hvcall_allowed(u16 code, u64 pt_id)
+{
+ int i;
+ int n = ARRAY_SIZE(mshv_passthru_hvcalls);
+ u16 *allowed_hvcalls = mshv_passthru_hvcalls;
+
+ if (pt_id == HV_PARTITION_ID_SELF) {
+ n = ARRAY_SIZE(mshv_self_passthru_hvcalls);
+ allowed_hvcalls = mshv_self_passthru_hvcalls;
+ }
+
+ for (i = 0; i < n; ++i)
+ if (allowed_hvcalls[i] == code)
+ return true;
+
+ return false;
+}
+
static int mshv_ioctl_passthru_hvcall(struct mshv_partition *partition,
bool partition_locked,
void __user *user_args)
{
u64 status;
- int ret = 0, i;
+ int ret = 0;
bool is_async;
struct mshv_root_hvcall args;
struct page *page;
unsigned int pages_order;
void *input_pg = NULL;
void *output_pg = NULL;
+ u16 reps_completed;
+ u64 pt_id = partition ? partition->pt_id : HV_PARTITION_ID_SELF;
if (copy_from_user(&args, user_args, sizeof(args)))
return -EFAULT;
@@ -175,17 +202,13 @@ static int mshv_ioctl_passthru_hvcall(struct mshv_partition *partition,
if (args.out_ptr && (!args.out_sz || args.out_sz > HV_HYP_PAGE_SIZE))
return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(mshv_passthru_hvcalls); ++i)
- if (args.code == mshv_passthru_hvcalls[i])
- break;
-
- if (i >= ARRAY_SIZE(mshv_passthru_hvcalls))
+ if (!mshv_passthru_hvcall_allowed(args.code, pt_id))
return -EINVAL;
is_async = mshv_hvcall_is_async(args.code);
if (is_async) {
/* async hypercalls can only be called from partition fd */
- if (!partition_locked)
+ if (!partition || !partition_locked)
return -EINVAL;
ret = mshv_init_async_handler(partition);
if (ret)
@@ -213,43 +236,44 @@ static int mshv_ioctl_passthru_hvcall(struct mshv_partition *partition,
* NOTE: This only works because all the allowed hypercalls' input
* structs begin with a u64 partition_id field.
*/
- *(u64 *)input_pg = partition->pt_id;
+ *(u64 *)input_pg = pt_id;
- if (args.reps)
- status = hv_do_rep_hypercall(args.code, args.reps, 0,
- input_pg, output_pg);
- else
- status = hv_do_hypercall(args.code, input_pg, output_pg);
-
- if (hv_result(status) == HV_STATUS_CALL_PENDING) {
- if (is_async) {
- mshv_async_hvcall_handler(partition, &status);
- } else { /* Paranoia check. This shouldn't happen! */
- ret = -EBADFD;
- goto free_pages_out;
+ reps_completed = 0;
+ do {
+ if (args.reps) {
+ status = hv_do_rep_hypercall_ex(args.code, args.reps,
+ 0, reps_completed,
+ input_pg, output_pg);
+ reps_completed = hv_repcomp(status);
+ } else {
+ status = hv_do_hypercall(args.code, input_pg, output_pg);
}
- }
- if (hv_result(status) == HV_STATUS_INSUFFICIENT_MEMORY) {
- ret = hv_call_deposit_pages(NUMA_NO_NODE, partition->pt_id, 1);
- if (!ret)
- ret = -EAGAIN;
- } else if (!hv_result_success(status)) {
- ret = hv_result_to_errno(status);
- }
+ if (hv_result(status) == HV_STATUS_CALL_PENDING) {
+ if (is_async) {
+ mshv_async_hvcall_handler(partition, &status);
+ } else { /* Paranoia check. This shouldn't happen! */
+ ret = -EBADFD;
+ goto free_pages_out;
+ }
+ }
+
+ if (hv_result_success(status))
+ break;
+
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY)
+ ret = hv_result_to_errno(status);
+ else
+ ret = hv_call_deposit_pages(NUMA_NO_NODE,
+ pt_id, 1);
+ } while (!ret);
- /*
- * Always return the status and output data regardless of result.
- * The VMM may need it to determine how to proceed. E.g. the status may
- * contain the number of reps completed if a rep hypercall partially
- * succeeded.
- */
args.status = hv_result(status);
- args.reps = args.reps ? hv_repcomp(status) : 0;
+ args.reps = reps_completed;
if (copy_to_user(user_args, &args, sizeof(args)))
ret = -EFAULT;
- if (output_pg &&
+ if (!ret && output_pg &&
copy_to_user((void __user *)args.out_ptr, output_pg, args.out_sz))
ret = -EFAULT;
@@ -487,28 +511,6 @@ mshv_vp_wait_for_hv_kick(struct mshv_vp *vp)
return 0;
}
-static int mshv_pre_guest_mode_work(struct mshv_vp *vp)
-{
- const ulong work_flags = _TIF_NOTIFY_SIGNAL | _TIF_SIGPENDING |
- _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME;
- ulong th_flags;
-
- th_flags = read_thread_flags();
- while (th_flags & work_flags) {
- int ret;
-
- /* nb: following will call schedule */
- ret = mshv_do_pre_guest_mode_work(th_flags);
-
- if (ret)
- return ret;
-
- th_flags = read_thread_flags();
- }
-
- return 0;
-}
-
/* Must be called with interrupts enabled */
static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
{
@@ -529,9 +531,11 @@ static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
u32 flags = 0;
struct hv_output_dispatch_vp output;
- ret = mshv_pre_guest_mode_work(vp);
- if (ret)
- break;
+ if (__xfer_to_guest_mode_work_pending()) {
+ ret = xfer_to_guest_mode_handle_work();
+ if (ret)
+ break;
+ }
if (vp->run.flags.intercept_suspend)
flags |= HV_DISPATCH_VP_FLAG_CLEAR_INTERCEPT_SUSPEND;
@@ -585,20 +589,106 @@ static long mshv_run_vp_with_root_scheduler(struct mshv_vp *vp)
}
} while (!vp->run.flags.intercept_suspend);
+ rseq_virt_userspace_exit();
+
return ret;
}
static_assert(sizeof(struct hv_message) <= MSHV_RUN_VP_BUF_SZ,
"sizeof(struct hv_message) must not exceed MSHV_RUN_VP_BUF_SZ");
+static struct mshv_mem_region *
+mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn)
+{
+ struct mshv_mem_region *region;
+
+ hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
+ if (gfn >= region->start_gfn &&
+ gfn < region->start_gfn + region->nr_pages)
+ return region;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_X86_64
+static struct mshv_mem_region *
+mshv_partition_region_by_gfn_get(struct mshv_partition *p, u64 gfn)
+{
+ struct mshv_mem_region *region;
+
+ spin_lock(&p->pt_mem_regions_lock);
+ region = mshv_partition_region_by_gfn(p, gfn);
+ if (!region || !mshv_region_get(region)) {
+ spin_unlock(&p->pt_mem_regions_lock);
+ return NULL;
+ }
+ spin_unlock(&p->pt_mem_regions_lock);
+
+ return region;
+}
+
+/**
+ * mshv_handle_gpa_intercept - Handle GPA (Guest Physical Address) intercepts.
+ * @vp: Pointer to the virtual processor structure.
+ *
+ * This function processes GPA intercepts by identifying the memory region
+ * corresponding to the intercepted GPA, aligning the page offset, and
+ * mapping the required pages. It ensures that the region is valid and
+ * handles faults efficiently by mapping multiple pages at once.
+ *
+ * Return: true if the intercept was handled successfully, false otherwise.
+ */
+static bool mshv_handle_gpa_intercept(struct mshv_vp *vp)
+{
+ struct mshv_partition *p = vp->vp_partition;
+ struct mshv_mem_region *region;
+ struct hv_x64_memory_intercept_message *msg;
+ bool ret;
+ u64 gfn;
+
+ msg = (struct hv_x64_memory_intercept_message *)
+ vp->vp_intercept_msg_page->u.payload;
+
+ gfn = HVPFN_DOWN(msg->guest_physical_address);
+
+ region = mshv_partition_region_by_gfn_get(p, gfn);
+ if (!region)
+ return false;
+
+ /* Only movable memory ranges are supported for GPA intercepts */
+ if (region->type == MSHV_REGION_TYPE_MEM_MOVABLE)
+ ret = mshv_region_handle_gfn_fault(region, gfn);
+ else
+ ret = false;
+
+ mshv_region_put(region);
+
+ return ret;
+}
+#else /* CONFIG_X86_64 */
+static bool mshv_handle_gpa_intercept(struct mshv_vp *vp) { return false; }
+#endif /* CONFIG_X86_64 */
+
+static bool mshv_vp_handle_intercept(struct mshv_vp *vp)
+{
+ switch (vp->vp_intercept_msg_page->header.message_type) {
+ case HVMSG_GPA_INTERCEPT:
+ return mshv_handle_gpa_intercept(vp);
+ }
+ return false;
+}
+
static long mshv_vp_ioctl_run_vp(struct mshv_vp *vp, void __user *ret_msg)
{
long rc;
- if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
- rc = mshv_run_vp_with_root_scheduler(vp);
- else
- rc = mshv_run_vp_with_hyp_scheduler(vp);
+ do {
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ rc = mshv_run_vp_with_root_scheduler(vp);
+ else
+ rc = mshv_run_vp_with_hyp_scheduler(vp);
+ } while (rc == 0 && mshv_vp_handle_intercept(vp));
if (rc)
return rc;
@@ -866,7 +956,8 @@ mshv_vp_release(struct inode *inode, struct file *filp)
return 0;
}
-static void mshv_vp_stats_unmap(u64 partition_id, u32 vp_index)
+static void mshv_vp_stats_unmap(u64 partition_id, u32 vp_index,
+ void *stats_pages[])
{
union hv_stats_object_identity identity = {
.vp.partition_id = partition_id,
@@ -874,10 +965,10 @@ static void mshv_vp_stats_unmap(u64 partition_id, u32 vp_index)
};
identity.vp.stats_area_type = HV_STATS_AREA_SELF;
- hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+ hv_unmap_stats_page(HV_STATS_OBJECT_VP, NULL, &identity);
identity.vp.stats_area_type = HV_STATS_AREA_PARENT;
- hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+ hv_unmap_stats_page(HV_STATS_OBJECT_VP, NULL, &identity);
}
static int mshv_vp_stats_map(u64 partition_id, u32 vp_index,
@@ -890,14 +981,14 @@ static int mshv_vp_stats_map(u64 partition_id, u32 vp_index,
int err;
identity.vp.stats_area_type = HV_STATS_AREA_SELF;
- err = hv_call_map_stat_page(HV_STATS_OBJECT_VP, &identity,
- &stats_pages[HV_STATS_AREA_SELF]);
+ err = hv_map_stats_page(HV_STATS_OBJECT_VP, &identity,
+ &stats_pages[HV_STATS_AREA_SELF]);
if (err)
return err;
identity.vp.stats_area_type = HV_STATS_AREA_PARENT;
- err = hv_call_map_stat_page(HV_STATS_OBJECT_VP, &identity,
- &stats_pages[HV_STATS_AREA_PARENT]);
+ err = hv_map_stats_page(HV_STATS_OBJECT_VP, &identity,
+ &stats_pages[HV_STATS_AREA_PARENT]);
if (err)
goto unmap_self;
@@ -905,7 +996,7 @@ static int mshv_vp_stats_map(u64 partition_id, u32 vp_index,
unmap_self:
identity.vp.stats_area_type = HV_STATS_AREA_SELF;
- hv_call_unmap_stat_page(HV_STATS_OBJECT_VP, &identity);
+ hv_unmap_stats_page(HV_STATS_OBJECT_VP, NULL, &identity);
return err;
}
@@ -915,7 +1006,7 @@ mshv_partition_ioctl_create_vp(struct mshv_partition *partition,
{
struct mshv_create_vp args;
struct mshv_vp *vp;
- struct page *intercept_message_page, *register_page, *ghcb_page;
+ struct page *intercept_msg_page, *register_page, *ghcb_page;
void *stats_pages[2];
long ret;
@@ -933,33 +1024,34 @@ mshv_partition_ioctl_create_vp(struct mshv_partition *partition,
if (ret)
return ret;
- ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
- HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
- input_vtl_zero,
- &intercept_message_page);
+ ret = hv_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ input_vtl_zero, &intercept_msg_page);
if (ret)
goto destroy_vp;
if (!mshv_partition_encrypted(partition)) {
- ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
- HV_VP_STATE_PAGE_REGISTERS,
- input_vtl_zero,
- &register_page);
+ ret = hv_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ input_vtl_zero, &register_page);
if (ret)
goto unmap_intercept_message_page;
}
if (mshv_partition_encrypted(partition) &&
is_ghcb_mapping_available()) {
- ret = hv_call_map_vp_state_page(partition->pt_id, args.vp_index,
- HV_VP_STATE_PAGE_GHCB,
- input_vtl_normal,
- &ghcb_page);
+ ret = hv_map_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ input_vtl_normal, &ghcb_page);
if (ret)
goto unmap_register_page;
}
- if (hv_parent_partition()) {
+ /*
+ * This mapping of the stats page is for detecting if dispatch thread
+ * is blocked - only relevant for root scheduler
+ */
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT) {
ret = mshv_vp_stats_map(partition->pt_id, args.vp_index,
stats_pages);
if (ret)
@@ -981,14 +1073,14 @@ mshv_partition_ioctl_create_vp(struct mshv_partition *partition,
atomic64_set(&vp->run.vp_signaled_count, 0);
vp->vp_index = args.vp_index;
- vp->vp_intercept_msg_page = page_to_virt(intercept_message_page);
+ vp->vp_intercept_msg_page = page_to_virt(intercept_msg_page);
if (!mshv_partition_encrypted(partition))
vp->vp_register_page = page_to_virt(register_page);
if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available())
vp->vp_ghcb_page = page_to_virt(ghcb_page);
- if (hv_parent_partition())
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
memcpy(vp->vp_stats_pages, stats_pages, sizeof(stats_pages));
/*
@@ -1011,24 +1103,22 @@ put_partition:
free_vp:
kfree(vp);
unmap_stats_pages:
- if (hv_parent_partition())
- mshv_vp_stats_unmap(partition->pt_id, args.vp_index);
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ mshv_vp_stats_unmap(partition->pt_id, args.vp_index, stats_pages);
unmap_ghcb_page:
- if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available()) {
- hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
- HV_VP_STATE_PAGE_GHCB,
- input_vtl_normal);
- }
+ if (mshv_partition_encrypted(partition) && is_ghcb_mapping_available())
+ hv_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_GHCB, ghcb_page,
+ input_vtl_normal);
unmap_register_page:
- if (!mshv_partition_encrypted(partition)) {
- hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
- HV_VP_STATE_PAGE_REGISTERS,
- input_vtl_zero);
- }
+ if (!mshv_partition_encrypted(partition))
+ hv_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ register_page, input_vtl_zero);
unmap_intercept_message_page:
- hv_call_unmap_vp_state_page(partition->pt_id, args.vp_index,
- HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
- input_vtl_zero);
+ hv_unmap_vp_state_page(partition->pt_id, args.vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ intercept_msg_page, input_vtl_zero);
destroy_vp:
hv_call_delete_vp(partition->pt_id, args.vp_index);
return ret;
@@ -1056,162 +1146,6 @@ static void mshv_async_hvcall_handler(void *data, u64 *status)
*status = partition->async_hypercall_status;
}
-static int
-mshv_partition_region_share(struct mshv_mem_region *region)
-{
- u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_SHARED;
-
- if (region->flags.large_pages)
- flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
-
- return hv_call_modify_spa_host_access(region->partition->pt_id,
- region->pages, region->nr_pages,
- HV_MAP_GPA_READABLE | HV_MAP_GPA_WRITABLE,
- flags, true);
-}
-
-static int
-mshv_partition_region_unshare(struct mshv_mem_region *region)
-{
- u32 flags = HV_MODIFY_SPA_PAGE_HOST_ACCESS_MAKE_EXCLUSIVE;
-
- if (region->flags.large_pages)
- flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE;
-
- return hv_call_modify_spa_host_access(region->partition->pt_id,
- region->pages, region->nr_pages,
- 0,
- flags, false);
-}
-
-static int
-mshv_region_remap_pages(struct mshv_mem_region *region, u32 map_flags,
- u64 page_offset, u64 page_count)
-{
- if (page_offset + page_count > region->nr_pages)
- return -EINVAL;
-
- if (region->flags.large_pages)
- map_flags |= HV_MAP_GPA_LARGE_PAGE;
-
- /* ask the hypervisor to map guest ram */
- return hv_call_map_gpa_pages(region->partition->pt_id,
- region->start_gfn + page_offset,
- page_count, map_flags,
- region->pages + page_offset);
-}
-
-static int
-mshv_region_map(struct mshv_mem_region *region)
-{
- u32 map_flags = region->hv_map_flags;
-
- return mshv_region_remap_pages(region, map_flags,
- 0, region->nr_pages);
-}
-
-static void
-mshv_region_evict_pages(struct mshv_mem_region *region,
- u64 page_offset, u64 page_count)
-{
- if (region->flags.range_pinned)
- unpin_user_pages(region->pages + page_offset, page_count);
-
- memset(region->pages + page_offset, 0,
- page_count * sizeof(struct page *));
-}
-
-static void
-mshv_region_evict(struct mshv_mem_region *region)
-{
- mshv_region_evict_pages(region, 0, region->nr_pages);
-}
-
-static int
-mshv_region_populate_pages(struct mshv_mem_region *region,
- u64 page_offset, u64 page_count)
-{
- u64 done_count, nr_pages;
- struct page **pages;
- __u64 userspace_addr;
- int ret;
-
- if (page_offset + page_count > region->nr_pages)
- return -EINVAL;
-
- for (done_count = 0; done_count < page_count; done_count += ret) {
- pages = region->pages + page_offset + done_count;
- userspace_addr = region->start_uaddr +
- (page_offset + done_count) *
- HV_HYP_PAGE_SIZE;
- nr_pages = min(page_count - done_count,
- MSHV_PIN_PAGES_BATCH_SIZE);
-
- /*
- * Pinning assuming 4k pages works for large pages too.
- * All page structs within the large page are returned.
- *
- * Pin requests are batched because pin_user_pages_fast
- * with the FOLL_LONGTERM flag does a large temporary
- * allocation of contiguous memory.
- */
- if (region->flags.range_pinned)
- ret = pin_user_pages_fast(userspace_addr,
- nr_pages,
- FOLL_WRITE | FOLL_LONGTERM,
- pages);
- else
- ret = -EOPNOTSUPP;
-
- if (ret < 0)
- goto release_pages;
- }
-
- if (PageHuge(region->pages[page_offset]))
- region->flags.large_pages = true;
-
- return 0;
-
-release_pages:
- mshv_region_evict_pages(region, page_offset, done_count);
- return ret;
-}
-
-static int
-mshv_region_populate(struct mshv_mem_region *region)
-{
- return mshv_region_populate_pages(region, 0, region->nr_pages);
-}
-
-static struct mshv_mem_region *
-mshv_partition_region_by_gfn(struct mshv_partition *partition, u64 gfn)
-{
- struct mshv_mem_region *region;
-
- hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
- if (gfn >= region->start_gfn &&
- gfn < region->start_gfn + region->nr_pages)
- return region;
- }
-
- return NULL;
-}
-
-static struct mshv_mem_region *
-mshv_partition_region_by_uaddr(struct mshv_partition *partition, u64 uaddr)
-{
- struct mshv_mem_region *region;
-
- hlist_for_each_entry(region, &partition->pt_mem_regions, hnode) {
- if (uaddr >= region->start_uaddr &&
- uaddr < region->start_uaddr +
- (region->nr_pages << HV_HYP_PAGE_SHIFT))
- return region;
- }
-
- return NULL;
-}
-
/*
* NB: caller checks and makes sure mem->size is page aligned
* Returns: 0 with regionpp updated on success, or -errno
@@ -1221,53 +1155,61 @@ static int mshv_partition_create_region(struct mshv_partition *partition,
struct mshv_mem_region **regionpp,
bool is_mmio)
{
- struct mshv_mem_region *region;
+ struct mshv_mem_region *rg;
u64 nr_pages = HVPFN_DOWN(mem->size);
/* Reject overlapping regions */
- if (mshv_partition_region_by_gfn(partition, mem->guest_pfn) ||
- mshv_partition_region_by_gfn(partition, mem->guest_pfn + nr_pages - 1) ||
- mshv_partition_region_by_uaddr(partition, mem->userspace_addr) ||
- mshv_partition_region_by_uaddr(partition, mem->userspace_addr + mem->size - 1))
+ spin_lock(&partition->pt_mem_regions_lock);
+ hlist_for_each_entry(rg, &partition->pt_mem_regions, hnode) {
+ if (mem->guest_pfn + nr_pages <= rg->start_gfn ||
+ rg->start_gfn + rg->nr_pages <= mem->guest_pfn)
+ continue;
+ spin_unlock(&partition->pt_mem_regions_lock);
return -EEXIST;
+ }
+ spin_unlock(&partition->pt_mem_regions_lock);
- region = vzalloc(sizeof(*region) + sizeof(struct page *) * nr_pages);
- if (!region)
- return -ENOMEM;
-
- region->nr_pages = nr_pages;
- region->start_gfn = mem->guest_pfn;
- region->start_uaddr = mem->userspace_addr;
- region->hv_map_flags = HV_MAP_GPA_READABLE | HV_MAP_GPA_ADJUSTABLE;
- if (mem->flags & BIT(MSHV_SET_MEM_BIT_WRITABLE))
- region->hv_map_flags |= HV_MAP_GPA_WRITABLE;
- if (mem->flags & BIT(MSHV_SET_MEM_BIT_EXECUTABLE))
- region->hv_map_flags |= HV_MAP_GPA_EXECUTABLE;
+ rg = mshv_region_create(mem->guest_pfn, nr_pages,
+ mem->userspace_addr, mem->flags);
+ if (IS_ERR(rg))
+ return PTR_ERR(rg);
- /* Note: large_pages flag populated when we pin the pages */
- if (!is_mmio)
- region->flags.range_pinned = true;
+ if (is_mmio)
+ rg->type = MSHV_REGION_TYPE_MMIO;
+ else if (mshv_partition_encrypted(partition) ||
+ !mshv_region_movable_init(rg))
+ rg->type = MSHV_REGION_TYPE_MEM_PINNED;
+ else
+ rg->type = MSHV_REGION_TYPE_MEM_MOVABLE;
- region->partition = partition;
+ rg->partition = partition;
- *regionpp = region;
+ *regionpp = rg;
return 0;
}
-/*
- * Map guest ram. if snp, make sure to release that from the host first
- * Side Effects: In case of failure, pages are unpinned when feasible.
+/**
+ * mshv_prepare_pinned_region - Pin and map memory regions
+ * @region: Pointer to the memory region structure
+ *
+ * This function processes memory regions that are explicitly marked as pinned.
+ * Pinned regions are preallocated, mapped upfront, and do not rely on fault-based
+ * population. The function ensures the region is properly populated, handles
+ * encryption requirements for SNP partitions if applicable, maps the region,
+ * and performs necessary sharing or eviction operations based on the mapping
+ * result.
+ *
+ * Return: 0 on success, negative error code on failure.
*/
-static int
-mshv_partition_mem_region_map(struct mshv_mem_region *region)
+static int mshv_prepare_pinned_region(struct mshv_mem_region *region)
{
struct mshv_partition *partition = region->partition;
int ret;
- ret = mshv_region_populate(region);
+ ret = mshv_region_pin(region);
if (ret) {
- pt_err(partition, "Failed to populate memory region: %d\n",
+ pt_err(partition, "Failed to pin memory region: %d\n",
ret);
goto err_out;
}
@@ -1280,12 +1222,12 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region)
* access to guest memory regions.
*/
if (mshv_partition_encrypted(partition)) {
- ret = mshv_partition_region_unshare(region);
+ ret = mshv_region_unshare(region);
if (ret) {
pt_err(partition,
"Failed to unshare memory region (guest_pfn: %llu): %d\n",
region->start_gfn, ret);
- goto evict_region;
+ goto invalidate_region;
}
}
@@ -1293,9 +1235,9 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region)
if (ret && mshv_partition_encrypted(partition)) {
int shrc;
- shrc = mshv_partition_region_share(region);
+ shrc = mshv_region_share(region);
if (!shrc)
- goto evict_region;
+ goto invalidate_region;
pt_err(partition,
"Failed to share memory region (guest_pfn: %llu): %d\n",
@@ -1309,8 +1251,8 @@ mshv_partition_mem_region_map(struct mshv_mem_region *region)
return 0;
-evict_region:
- mshv_region_evict(region);
+invalidate_region:
+ mshv_region_invalidate(region);
err_out:
return ret;
}
@@ -1355,17 +1297,35 @@ mshv_map_user_memory(struct mshv_partition *partition,
if (ret)
return ret;
- if (is_mmio)
- ret = hv_call_map_mmio_pages(partition->pt_id, mem.guest_pfn,
- mmio_pfn, HVPFN_DOWN(mem.size));
- else
- ret = mshv_partition_mem_region_map(region);
+ switch (region->type) {
+ case MSHV_REGION_TYPE_MEM_PINNED:
+ ret = mshv_prepare_pinned_region(region);
+ break;
+ case MSHV_REGION_TYPE_MEM_MOVABLE:
+ /*
+ * For movable memory regions, remap with no access to let
+ * the hypervisor track dirty pages, enabling pre-copy live
+ * migration.
+ */
+ ret = hv_call_map_gpa_pages(partition->pt_id,
+ region->start_gfn,
+ region->nr_pages,
+ HV_MAP_GPA_NO_ACCESS, NULL);
+ break;
+ case MSHV_REGION_TYPE_MMIO:
+ ret = hv_call_map_mmio_pages(partition->pt_id,
+ region->start_gfn,
+ mmio_pfn,
+ region->nr_pages);
+ break;
+ }
if (ret)
goto errout;
- /* Install the new region */
+ spin_lock(&partition->pt_mem_regions_lock);
hlist_add_head(&region->hnode, &partition->pt_mem_regions);
+ spin_unlock(&partition->pt_mem_regions_lock);
return 0;
@@ -1380,33 +1340,32 @@ mshv_unmap_user_memory(struct mshv_partition *partition,
struct mshv_user_mem_region mem)
{
struct mshv_mem_region *region;
- u32 unmap_flags = 0;
if (!(mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP)))
return -EINVAL;
+ spin_lock(&partition->pt_mem_regions_lock);
+
region = mshv_partition_region_by_gfn(partition, mem.guest_pfn);
- if (!region)
- return -EINVAL;
+ if (!region) {
+ spin_unlock(&partition->pt_mem_regions_lock);
+ return -ENOENT;
+ }
/* Paranoia check */
if (region->start_uaddr != mem.userspace_addr ||
region->start_gfn != mem.guest_pfn ||
- region->nr_pages != HVPFN_DOWN(mem.size))
+ region->nr_pages != HVPFN_DOWN(mem.size)) {
+ spin_unlock(&partition->pt_mem_regions_lock);
return -EINVAL;
+ }
hlist_del(&region->hnode);
- if (region->flags.large_pages)
- unmap_flags |= HV_UNMAP_GPA_LARGE_PAGE;
-
- /* ignore unmap failures and continue as process may be exiting */
- hv_call_unmap_gpa_pages(partition->pt_id, region->start_gfn,
- region->nr_pages, unmap_flags);
+ spin_unlock(&partition->pt_mem_regions_lock);
- mshv_region_evict(region);
+ mshv_region_put(region);
- vfree(region);
return 0;
}
@@ -1742,8 +1701,8 @@ static void destroy_partition(struct mshv_partition *partition)
{
struct mshv_vp *vp;
struct mshv_mem_region *region;
- int i, ret;
struct hlist_node *n;
+ int i;
if (refcount_read(&partition->pt_ref_count)) {
pt_err(partition,
@@ -1765,28 +1724,32 @@ static void destroy_partition(struct mshv_partition *partition)
if (!vp)
continue;
- if (hv_parent_partition())
- mshv_vp_stats_unmap(partition->pt_id, vp->vp_index);
+ if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT)
+ mshv_vp_stats_unmap(partition->pt_id, vp->vp_index,
+ (void **)vp->vp_stats_pages);
if (vp->vp_register_page) {
- (void)hv_call_unmap_vp_state_page(partition->pt_id,
- vp->vp_index,
- HV_VP_STATE_PAGE_REGISTERS,
- input_vtl_zero);
+ (void)hv_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_REGISTERS,
+ virt_to_page(vp->vp_register_page),
+ input_vtl_zero);
vp->vp_register_page = NULL;
}
- (void)hv_call_unmap_vp_state_page(partition->pt_id,
- vp->vp_index,
- HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
- input_vtl_zero);
+ (void)hv_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_INTERCEPT_MESSAGE,
+ virt_to_page(vp->vp_intercept_msg_page),
+ input_vtl_zero);
vp->vp_intercept_msg_page = NULL;
if (vp->vp_ghcb_page) {
- (void)hv_call_unmap_vp_state_page(partition->pt_id,
- vp->vp_index,
- HV_VP_STATE_PAGE_GHCB,
- input_vtl_normal);
+ (void)hv_unmap_vp_state_page(partition->pt_id,
+ vp->vp_index,
+ HV_VP_STATE_PAGE_GHCB,
+ virt_to_page(vp->vp_ghcb_page),
+ input_vtl_normal);
vp->vp_ghcb_page = NULL;
}
@@ -1803,24 +1766,10 @@ static void destroy_partition(struct mshv_partition *partition)
remove_partition(partition);
- /* Remove regions, regain access to the memory and unpin the pages */
hlist_for_each_entry_safe(region, n, &partition->pt_mem_regions,
hnode) {
hlist_del(&region->hnode);
-
- if (mshv_partition_encrypted(partition)) {
- ret = mshv_partition_region_share(region);
- if (ret) {
- pt_err(partition,
- "Failed to regain access to memory, unpinning user pages will fail and crash the host error: %d\n",
- ret);
- return;
- }
- }
-
- mshv_region_evict(region);
-
- vfree(region);
+ mshv_region_put(region);
}
/* Withdraw and free all pages we deposited */
@@ -1887,43 +1836,117 @@ add_partition(struct mshv_partition *partition)
return 0;
}
-static long
-mshv_ioctl_create_partition(void __user *user_arg, struct device *module_dev)
+static_assert(MSHV_NUM_CPU_FEATURES_BANKS ==
+ HV_PARTITION_PROCESSOR_FEATURES_BANKS);
+
+static long mshv_ioctl_process_pt_flags(void __user *user_arg, u64 *pt_flags,
+ struct hv_partition_creation_properties *cr_props,
+ union hv_partition_isolation_properties *isol_props)
{
- struct mshv_create_partition args;
- u64 creation_flags;
- struct hv_partition_creation_properties creation_properties = {};
- union hv_partition_isolation_properties isolation_properties = {};
- struct mshv_partition *partition;
- struct file *file;
- int fd;
- long ret;
+ int i;
+ struct mshv_create_partition_v2 args;
+ union hv_partition_processor_features *disabled_procs;
+ union hv_partition_processor_xsave_features *disabled_xsave;
- if (copy_from_user(&args, user_arg, sizeof(args)))
+ /* First, copy v1 struct in case user is on previous versions */
+ if (copy_from_user(&args, user_arg,
+ sizeof(struct mshv_create_partition)))
return -EFAULT;
if ((args.pt_flags & ~MSHV_PT_FLAGS_MASK) ||
args.pt_isolation >= MSHV_PT_ISOLATION_COUNT)
return -EINVAL;
+ disabled_procs = &cr_props->disabled_processor_features;
+ disabled_xsave = &cr_props->disabled_processor_xsave_features;
+
+ /* Check if user provided newer struct with feature fields */
+ if (args.pt_flags & BIT_ULL(MSHV_PT_BIT_CPU_AND_XSAVE_FEATURES)) {
+ if (copy_from_user(&args, user_arg, sizeof(args)))
+ return -EFAULT;
+
+ /* Re-validate v1 fields after second copy_from_user() */
+ if ((args.pt_flags & ~MSHV_PT_FLAGS_MASK) ||
+ args.pt_isolation >= MSHV_PT_ISOLATION_COUNT)
+ return -EINVAL;
+
+ if (args.pt_num_cpu_fbanks != MSHV_NUM_CPU_FEATURES_BANKS ||
+ mshv_field_nonzero(args, pt_rsvd) ||
+ mshv_field_nonzero(args, pt_rsvd1))
+ return -EINVAL;
+
+ /*
+ * Note this assumes MSHV_NUM_CPU_FEATURES_BANKS will never
+ * change and equals HV_PARTITION_PROCESSOR_FEATURES_BANKS
+ * (i.e. 2).
+ *
+ * Further banks (index >= 2) will be modifiable as 'early'
+ * properties via the set partition property hypercall.
+ */
+ for (i = 0; i < HV_PARTITION_PROCESSOR_FEATURES_BANKS; i++)
+ disabled_procs->as_uint64[i] = args.pt_cpu_fbanks[i];
+
+#if IS_ENABLED(CONFIG_X86_64)
+ disabled_xsave->as_uint64 = args.pt_disabled_xsave;
+#else
+ /*
+ * In practice this field is ignored on arm64, but safer to
+ * zero it in case it is ever used.
+ */
+ disabled_xsave->as_uint64 = 0;
+
+ if (mshv_field_nonzero(args, pt_rsvd2))
+ return -EINVAL;
+#endif
+ } else {
+ /*
+ * v1 behavior: try to enable everything. The hypervisor will
+ * disable features that are not supported. The banks can be
+ * queried via the get partition property hypercall.
+ */
+ for (i = 0; i < HV_PARTITION_PROCESSOR_FEATURES_BANKS; i++)
+ disabled_procs->as_uint64[i] = 0;
+
+ disabled_xsave->as_uint64 = 0;
+ }
+
/* Only support EXO partitions */
- creation_flags = HV_PARTITION_CREATION_FLAG_EXO_PARTITION |
- HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED;
+ *pt_flags = HV_PARTITION_CREATION_FLAG_EXO_PARTITION |
+ HV_PARTITION_CREATION_FLAG_INTERCEPT_MESSAGE_PAGE_ENABLED;
+
+ if (args.pt_flags & BIT_ULL(MSHV_PT_BIT_LAPIC))
+ *pt_flags |= HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED;
+ if (args.pt_flags & BIT_ULL(MSHV_PT_BIT_X2APIC))
+ *pt_flags |= HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE;
+ if (args.pt_flags & BIT_ULL(MSHV_PT_BIT_GPA_SUPER_PAGES))
+ *pt_flags |= HV_PARTITION_CREATION_FLAG_GPA_SUPER_PAGES_ENABLED;
- if (args.pt_flags & BIT(MSHV_PT_BIT_LAPIC))
- creation_flags |= HV_PARTITION_CREATION_FLAG_LAPIC_ENABLED;
- if (args.pt_flags & BIT(MSHV_PT_BIT_X2APIC))
- creation_flags |= HV_PARTITION_CREATION_FLAG_X2APIC_CAPABLE;
- if (args.pt_flags & BIT(MSHV_PT_BIT_GPA_SUPER_PAGES))
- creation_flags |= HV_PARTITION_CREATION_FLAG_GPA_SUPER_PAGES_ENABLED;
+ isol_props->as_uint64 = 0;
switch (args.pt_isolation) {
case MSHV_PT_ISOLATION_NONE:
- isolation_properties.isolation_type =
- HV_PARTITION_ISOLATION_TYPE_NONE;
+ isol_props->isolation_type = HV_PARTITION_ISOLATION_TYPE_NONE;
break;
}
+ return 0;
+}
+
+static long
+mshv_ioctl_create_partition(void __user *user_arg, struct device *module_dev)
+{
+ u64 creation_flags;
+ struct hv_partition_creation_properties creation_properties;
+ union hv_partition_isolation_properties isolation_properties;
+ struct mshv_partition *partition;
+ long ret;
+
+ ret = mshv_ioctl_process_pt_flags(user_arg, &creation_flags,
+ &creation_properties,
+ &isolation_properties);
+ if (ret)
+ return ret;
+
partition = kzalloc(sizeof(*partition), GFP_KERNEL);
if (!partition)
return -ENOMEM;
@@ -1943,6 +1966,7 @@ mshv_ioctl_create_partition(void __user *user_arg, struct device *module_dev)
INIT_HLIST_HEAD(&partition->pt_devices);
+ spin_lock_init(&partition->pt_mem_regions_lock);
INIT_HLIST_HEAD(&partition->pt_mem_regions);
mshv_eventfd_init(partition);
@@ -1963,29 +1987,13 @@ mshv_ioctl_create_partition(void __user *user_arg, struct device *module_dev)
goto delete_partition;
ret = mshv_init_async_handler(partition);
- if (ret)
- goto remove_partition;
-
- fd = get_unused_fd_flags(O_CLOEXEC);
- if (fd < 0) {
- ret = fd;
- goto remove_partition;
- }
-
- file = anon_inode_getfile("mshv_partition", &mshv_partition_fops,
- partition, O_RDWR);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto put_fd;
+ if (!ret) {
+ ret = FD_ADD(O_CLOEXEC, anon_inode_getfile("mshv_partition",
+ &mshv_partition_fops,
+ partition, O_RDWR));
+ if (ret >= 0)
+ return ret;
}
-
- fd_install(fd, file);
-
- return fd;
-
-put_fd:
- put_unused_fd(fd);
-remove_partition:
remove_partition(partition);
delete_partition:
hv_call_delete_partition(partition->pt_id);
@@ -2006,6 +2014,9 @@ static long mshv_dev_ioctl(struct file *filp, unsigned int ioctl,
case MSHV_CREATE_PARTITION:
return mshv_ioctl_create_partition((void __user *)arg,
misc->this_device);
+ case MSHV_ROOT_HVCALL:
+ return mshv_ioctl_passthru_hvcall(NULL, false,
+ (void __user *)arg);
}
return -ENOTTY;
@@ -2074,9 +2085,13 @@ static int __init hv_retrieve_scheduler_type(enum hv_scheduler_type *out)
/* Retrieve and stash the supported scheduler type */
static int __init mshv_retrieve_scheduler_type(struct device *dev)
{
- int ret;
+ int ret = 0;
+
+ if (hv_l1vh_partition())
+ hv_scheduler_type = HV_SCHEDULER_TYPE_CORE_SMT;
+ else
+ ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
- ret = hv_retrieve_scheduler_type(&hv_scheduler_type);
if (ret)
return ret;
@@ -2203,9 +2218,6 @@ static int __init mshv_root_partition_init(struct device *dev)
{
int err;
- if (mshv_retrieve_scheduler_type(dev))
- return -ENODEV;
-
err = root_scheduler_init(dev);
if (err)
return err;
@@ -2221,13 +2233,29 @@ root_sched_deinit:
return err;
}
+static void mshv_init_vmm_caps(struct device *dev)
+{
+ /*
+ * This can only fail here if HVCALL_GET_PARTITION_PROPERTY_EX or
+ * HV_PARTITION_PROPERTY_VMM_CAPABILITIES are not supported. In that
+ * case it's valid to proceed as if all vmm_caps are disabled (zero).
+ */
+ if (hv_call_get_partition_property_ex(HV_PARTITION_ID_SELF,
+ HV_PARTITION_PROPERTY_VMM_CAPABILITIES,
+ 0, &mshv_root.vmm_caps,
+ sizeof(mshv_root.vmm_caps)))
+ dev_warn(dev, "Unable to get VMM capabilities\n");
+
+ dev_dbg(dev, "vmm_caps = %#llx\n", mshv_root.vmm_caps.as_uint64[0]);
+}
+
static int __init mshv_parent_partition_init(void)
{
int ret;
struct device *dev;
union hv_hypervisor_version_info version_info;
- if (!hv_root_partition() || is_kdump_kernel())
+ if (!hv_parent_partition() || is_kdump_kernel())
return -ENODEV;
if (hv_get_hypervisor_version(&version_info))
@@ -2264,10 +2292,17 @@ static int __init mshv_parent_partition_init(void)
mshv_cpuhp_online = ret;
- ret = mshv_root_partition_init(dev);
+ ret = mshv_retrieve_scheduler_type(dev);
if (ret)
goto remove_cpu_state;
+ if (hv_root_partition())
+ ret = mshv_root_partition_init(dev);
+ if (ret)
+ goto remove_cpu_state;
+
+ mshv_init_vmm_caps(dev);
+
ret = mshv_irqfd_wq_init();
if (ret)
goto exit_partition;
diff --git a/drivers/hv/mshv_synic.c b/drivers/hv/mshv_synic.c
index e6b6381b7c36..f8b0337cdc82 100644
--- a/drivers/hv/mshv_synic.c
+++ b/drivers/hv/mshv_synic.c
@@ -394,7 +394,7 @@ unlock_out:
void mshv_isr(void)
{
struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
- struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
struct hv_message *msg;
bool handled;
@@ -456,7 +456,7 @@ int mshv_synic_init(unsigned int cpu)
#endif
union hv_synic_scontrol sctrl;
struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
- struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
struct hv_synic_event_flags_page **event_flags_page =
&spages->synic_event_flags_page;
struct hv_synic_event_ring_page **event_ring_page =
@@ -550,7 +550,7 @@ int mshv_synic_cleanup(unsigned int cpu)
union hv_synic_sirbp sirbp;
union hv_synic_scontrol sctrl;
struct hv_synic_pages *spages = this_cpu_ptr(mshv_root.synic_pages);
- struct hv_message_page **msg_page = &spages->synic_message_page;
+ struct hv_message_page **msg_page = &spages->hyp_synic_message_page;
struct hv_synic_event_flags_page **event_flags_page =
&spages->synic_event_flags_page;
struct hv_synic_event_ring_page **event_ring_page =
diff --git a/drivers/hv/mshv_vtl.h b/drivers/hv/mshv_vtl.h
new file mode 100644
index 000000000000..a6eea52f7aa2
--- /dev/null
+++ b/drivers/hv/mshv_vtl.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _MSHV_VTL_H
+#define _MSHV_VTL_H
+
+#include <linux/mshv.h>
+#include <linux/types.h>
+
+struct mshv_vtl_run {
+ u32 cancel;
+ u32 vtl_ret_action_size;
+ u32 pad[2];
+ char exit_message[MSHV_MAX_RUN_MSG_SIZE];
+ union {
+ struct mshv_vtl_cpu_context cpu_context;
+
+ /*
+ * Reserving room for the cpu context to grow and to maintain compatibility
+ * with user mode.
+ */
+ char reserved[1024];
+ };
+ char vtl_ret_actions[MSHV_MAX_RUN_MSG_SIZE];
+};
+
+#endif /* _MSHV_VTL_H */
diff --git a/drivers/hv/mshv_vtl_main.c b/drivers/hv/mshv_vtl_main.c
new file mode 100644
index 000000000000..2cebe9de5a5a
--- /dev/null
+++ b/drivers/hv/mshv_vtl_main.c
@@ -0,0 +1,1392 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023, Microsoft Corporation.
+ *
+ * Author:
+ * Roman Kisel <romank@linux.microsoft.com>
+ * Saurabh Sengar <ssengar@linux.microsoft.com>
+ * Naman Jain <namjain@linux.microsoft.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/anon_inodes.h>
+#include <linux/cpuhotplug.h>
+#include <linux/count_zeros.h>
+#include <linux/entry-virt.h>
+#include <linux/eventfd.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/vmalloc.h>
+#include <asm/debugreg.h>
+#include <asm/mshyperv.h>
+#include <trace/events/ipi.h>
+#include <uapi/asm/mtrr.h>
+#include <uapi/linux/mshv.h>
+#include <hyperv/hvhdk.h>
+
+#include "../../kernel/fpu/legacy.h"
+#include "mshv.h"
+#include "mshv_vtl.h"
+#include "hyperv_vmbus.h"
+
+MODULE_AUTHOR("Microsoft");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Microsoft Hyper-V VTL Driver");
+
+#define MSHV_ENTRY_REASON_LOWER_VTL_CALL 0x1
+#define MSHV_ENTRY_REASON_INTERRUPT 0x2
+#define MSHV_ENTRY_REASON_INTERCEPT 0x3
+
+#define MSHV_REAL_OFF_SHIFT 16
+#define MSHV_PG_OFF_CPU_MASK (BIT_ULL(MSHV_REAL_OFF_SHIFT) - 1)
+#define MSHV_RUN_PAGE_OFFSET 0
+#define MSHV_REG_PAGE_OFFSET 1
+#define VTL2_VMBUS_SINT_INDEX 7
+
+static struct device *mem_dev;
+
+static struct tasklet_struct msg_dpc;
+static wait_queue_head_t fd_wait_queue;
+static bool has_message;
+static struct eventfd_ctx *flag_eventfds[HV_EVENT_FLAGS_COUNT];
+static DEFINE_MUTEX(flag_lock);
+static bool __read_mostly mshv_has_reg_page;
+
+/* hvcall code is of type u16, allocate a bitmap of size (1 << 16) to accommodate it */
+#define MAX_BITMAP_SIZE ((U16_MAX + 1) / 8)
+
+struct mshv_vtl_hvcall_fd {
+ u8 allow_bitmap[MAX_BITMAP_SIZE];
+ bool allow_map_initialized;
+ /*
+ * Used to protect hvcall setup in IOCTLs
+ */
+ struct mutex init_mutex;
+ struct miscdevice *dev;
+};
+
+struct mshv_vtl_poll_file {
+ struct file *file;
+ wait_queue_entry_t wait;
+ wait_queue_head_t *wqh;
+ poll_table pt;
+ int cpu;
+};
+
+struct mshv_vtl {
+ struct device *module_dev;
+ u64 id;
+};
+
+struct mshv_vtl_per_cpu {
+ struct mshv_vtl_run *run;
+ struct page *reg_page;
+};
+
+/* SYNIC_OVERLAY_PAGE_MSR - internal, identical to hv_synic_simp */
+union hv_synic_overlay_page_msr {
+ u64 as_uint64;
+ struct {
+ u64 enabled: 1;
+ u64 reserved: 11;
+ u64 pfn: 52;
+ } __packed;
+};
+
+static struct mutex mshv_vtl_poll_file_lock;
+static union hv_register_vsm_page_offsets mshv_vsm_page_offsets;
+static union hv_register_vsm_capabilities mshv_vsm_capabilities;
+
+static DEFINE_PER_CPU(struct mshv_vtl_poll_file, mshv_vtl_poll_file);
+static DEFINE_PER_CPU(unsigned long long, num_vtl0_transitions);
+static DEFINE_PER_CPU(struct mshv_vtl_per_cpu, mshv_vtl_per_cpu);
+
+static const union hv_input_vtl input_vtl_zero;
+static const union hv_input_vtl input_vtl_normal = {
+ .use_target_vtl = 1,
+};
+
+static const struct file_operations mshv_vtl_fops;
+
+static long
+mshv_ioctl_create_vtl(void __user *user_arg, struct device *module_dev)
+{
+ struct mshv_vtl *vtl;
+ struct file *file;
+ int fd;
+
+ vtl = kzalloc(sizeof(*vtl), GFP_KERNEL);
+ if (!vtl)
+ return -ENOMEM;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ kfree(vtl);
+ return fd;
+ }
+ file = anon_inode_getfile("mshv_vtl", &mshv_vtl_fops,
+ vtl, O_RDWR);
+ if (IS_ERR(file)) {
+ kfree(vtl);
+ return PTR_ERR(file);
+ }
+ vtl->module_dev = module_dev;
+ fd_install(fd, file);
+
+ return fd;
+}
+
+static long
+mshv_ioctl_check_extension(void __user *user_arg)
+{
+ u32 arg;
+
+ if (copy_from_user(&arg, user_arg, sizeof(arg)))
+ return -EFAULT;
+
+ switch (arg) {
+ case MSHV_CAP_CORE_API_STABLE:
+ return 0;
+ case MSHV_CAP_REGISTER_PAGE:
+ return mshv_has_reg_page;
+ case MSHV_CAP_VTL_RETURN_ACTION:
+ return mshv_vsm_capabilities.return_action_available;
+ case MSHV_CAP_DR6_SHARED:
+ return mshv_vsm_capabilities.dr6_shared;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static long
+mshv_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct miscdevice *misc = filp->private_data;
+
+ switch (ioctl) {
+ case MSHV_CHECK_EXTENSION:
+ return mshv_ioctl_check_extension((void __user *)arg);
+ case MSHV_CREATE_VTL:
+ return mshv_ioctl_create_vtl((void __user *)arg, misc->this_device);
+ }
+
+ return -ENOTTY;
+}
+
+static const struct file_operations mshv_dev_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = mshv_dev_ioctl,
+ .llseek = noop_llseek,
+};
+
+static struct miscdevice mshv_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "mshv",
+ .fops = &mshv_dev_fops,
+ .mode = 0600,
+};
+
+static struct mshv_vtl_run *mshv_vtl_this_run(void)
+{
+ return *this_cpu_ptr(&mshv_vtl_per_cpu.run);
+}
+
+static struct mshv_vtl_run *mshv_vtl_cpu_run(int cpu)
+{
+ return *per_cpu_ptr(&mshv_vtl_per_cpu.run, cpu);
+}
+
+static struct page *mshv_vtl_cpu_reg_page(int cpu)
+{
+ return *per_cpu_ptr(&mshv_vtl_per_cpu.reg_page, cpu);
+}
+
+static void mshv_vtl_configure_reg_page(struct mshv_vtl_per_cpu *per_cpu)
+{
+ struct hv_register_assoc reg_assoc = {};
+ union hv_synic_overlay_page_msr overlay = {};
+ struct page *reg_page;
+
+ reg_page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_RETRY_MAYFAIL);
+ if (!reg_page) {
+ WARN(1, "failed to allocate register page\n");
+ return;
+ }
+
+ overlay.enabled = 1;
+ overlay.pfn = page_to_hvpfn(reg_page);
+ reg_assoc.name = HV_X64_REGISTER_REG_PAGE;
+ reg_assoc.value.reg64 = overlay.as_uint64;
+
+ if (hv_call_set_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
+ 1, input_vtl_zero, &reg_assoc)) {
+ WARN(1, "failed to setup register page\n");
+ __free_page(reg_page);
+ return;
+ }
+
+ per_cpu->reg_page = reg_page;
+ mshv_has_reg_page = true;
+}
+
+static void mshv_vtl_synic_enable_regs(unsigned int cpu)
+{
+ union hv_synic_sint sint;
+
+ sint.as_uint64 = 0;
+ sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ sint.masked = false;
+ sint.auto_eoi = hv_recommend_using_aeoi();
+
+ /* Enable intercepts */
+ if (!mshv_vsm_capabilities.intercept_page_available)
+ hv_set_msr(HV_MSR_SINT0 + HV_SYNIC_INTERCEPTION_SINT_INDEX,
+ sint.as_uint64);
+
+ /* VTL2 Host VSP SINT is (un)masked when the user mode requests that */
+}
+
+static int mshv_vtl_get_vsm_regs(void)
+{
+ struct hv_register_assoc registers[2];
+ int ret, count = 2;
+
+ registers[0].name = HV_REGISTER_VSM_CODE_PAGE_OFFSETS;
+ registers[1].name = HV_REGISTER_VSM_CAPABILITIES;
+
+ ret = hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
+ count, input_vtl_zero, registers);
+ if (ret)
+ return ret;
+
+ mshv_vsm_page_offsets.as_uint64 = registers[0].value.reg64;
+ mshv_vsm_capabilities.as_uint64 = registers[1].value.reg64;
+
+ return ret;
+}
+
+static int mshv_vtl_configure_vsm_partition(struct device *dev)
+{
+ union hv_register_vsm_partition_config config;
+ struct hv_register_assoc reg_assoc;
+
+ config.as_uint64 = 0;
+ config.default_vtl_protection_mask = HV_MAP_GPA_PERMISSIONS_MASK;
+ config.enable_vtl_protection = 1;
+ config.zero_memory_on_reset = 1;
+ config.intercept_vp_startup = 1;
+ config.intercept_cpuid_unimplemented = 1;
+
+ if (mshv_vsm_capabilities.intercept_page_available) {
+ dev_dbg(dev, "using intercept page\n");
+ config.intercept_page = 1;
+ }
+
+ reg_assoc.name = HV_REGISTER_VSM_PARTITION_CONFIG;
+ reg_assoc.value.reg64 = config.as_uint64;
+
+ return hv_call_set_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
+ 1, input_vtl_zero, &reg_assoc);
+}
+
+static void mshv_vtl_vmbus_isr(void)
+{
+ struct hv_per_cpu_context *per_cpu;
+ struct hv_message *msg;
+ u32 message_type;
+ union hv_synic_event_flags *event_flags;
+ struct eventfd_ctx *eventfd;
+ u16 i;
+
+ per_cpu = this_cpu_ptr(hv_context.cpu_context);
+ if (smp_processor_id() == 0) {
+ msg = (struct hv_message *)per_cpu->hyp_synic_message_page + VTL2_VMBUS_SINT_INDEX;
+ message_type = READ_ONCE(msg->header.message_type);
+ if (message_type != HVMSG_NONE)
+ tasklet_schedule(&msg_dpc);
+ }
+
+ event_flags = (union hv_synic_event_flags *)per_cpu->hyp_synic_event_page +
+ VTL2_VMBUS_SINT_INDEX;
+ for_each_set_bit(i, event_flags->flags, HV_EVENT_FLAGS_COUNT) {
+ if (!sync_test_and_clear_bit(i, event_flags->flags))
+ continue;
+ rcu_read_lock();
+ eventfd = READ_ONCE(flag_eventfds[i]);
+ if (eventfd)
+ eventfd_signal(eventfd);
+ rcu_read_unlock();
+ }
+
+ vmbus_isr();
+}
+
+static int mshv_vtl_alloc_context(unsigned int cpu)
+{
+ struct mshv_vtl_per_cpu *per_cpu = this_cpu_ptr(&mshv_vtl_per_cpu);
+
+ per_cpu->run = (struct mshv_vtl_run *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+ if (!per_cpu->run)
+ return -ENOMEM;
+
+ if (mshv_vsm_capabilities.intercept_page_available)
+ mshv_vtl_configure_reg_page(per_cpu);
+
+ mshv_vtl_synic_enable_regs(cpu);
+
+ return 0;
+}
+
+static int mshv_vtl_cpuhp_online;
+
+static int hv_vtl_setup_synic(void)
+{
+ int ret;
+
+ /* Use our isr to first filter out packets destined for userspace */
+ hv_setup_vmbus_handler(mshv_vtl_vmbus_isr);
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hyperv/vtl:online",
+ mshv_vtl_alloc_context, NULL);
+ if (ret < 0) {
+ hv_setup_vmbus_handler(vmbus_isr);
+ return ret;
+ }
+
+ mshv_vtl_cpuhp_online = ret;
+
+ return 0;
+}
+
+static void hv_vtl_remove_synic(void)
+{
+ cpuhp_remove_state(mshv_vtl_cpuhp_online);
+ hv_setup_vmbus_handler(vmbus_isr);
+}
+
+static int vtl_get_vp_register(struct hv_register_assoc *reg)
+{
+ return hv_call_get_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
+ 1, input_vtl_normal, reg);
+}
+
+static int vtl_set_vp_register(struct hv_register_assoc *reg)
+{
+ return hv_call_set_vp_registers(HV_VP_INDEX_SELF, HV_PARTITION_ID_SELF,
+ 1, input_vtl_normal, reg);
+}
+
+static int mshv_vtl_ioctl_add_vtl0_mem(struct mshv_vtl *vtl, void __user *arg)
+{
+ struct mshv_vtl_ram_disposition vtl0_mem;
+ struct dev_pagemap *pgmap;
+ void *addr;
+
+ if (copy_from_user(&vtl0_mem, arg, sizeof(vtl0_mem)))
+ return -EFAULT;
+ /* vtl0_mem.last_pfn is excluded in the pagemap range for VTL0 as per design */
+ if (vtl0_mem.last_pfn <= vtl0_mem.start_pfn) {
+ dev_err(vtl->module_dev, "range start pfn (%llx) > end pfn (%llx)\n",
+ vtl0_mem.start_pfn, vtl0_mem.last_pfn);
+ return -EFAULT;
+ }
+
+ pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
+ if (!pgmap)
+ return -ENOMEM;
+
+ pgmap->ranges[0].start = PFN_PHYS(vtl0_mem.start_pfn);
+ pgmap->ranges[0].end = PFN_PHYS(vtl0_mem.last_pfn) - 1;
+ pgmap->nr_range = 1;
+ pgmap->type = MEMORY_DEVICE_GENERIC;
+
+ /*
+ * Determine the highest page order that can be used for the given memory range.
+ * This works best when the range is aligned; i.e. both the start and the length.
+ */
+ pgmap->vmemmap_shift = count_trailing_zeros(vtl0_mem.start_pfn | vtl0_mem.last_pfn);
+ dev_dbg(vtl->module_dev,
+ "Add VTL0 memory: start: 0x%llx, end_pfn: 0x%llx, page order: %lu\n",
+ vtl0_mem.start_pfn, vtl0_mem.last_pfn, pgmap->vmemmap_shift);
+
+ addr = devm_memremap_pages(mem_dev, pgmap);
+ if (IS_ERR(addr)) {
+ dev_err(vtl->module_dev, "devm_memremap_pages error: %ld\n", PTR_ERR(addr));
+ kfree(pgmap);
+ return -EFAULT;
+ }
+
+ /* Don't free pgmap, since it has to stick around until the memory
+ * is unmapped, which will never happen as there is no scenario
+ * where VTL0 can be released/shutdown without bringing down VTL2.
+ */
+ return 0;
+}
+
+static void mshv_vtl_cancel(int cpu)
+{
+ int here = get_cpu();
+
+ if (here != cpu) {
+ if (!xchg_relaxed(&mshv_vtl_cpu_run(cpu)->cancel, 1))
+ smp_send_reschedule(cpu);
+ } else {
+ WRITE_ONCE(mshv_vtl_this_run()->cancel, 1);
+ }
+ put_cpu();
+}
+
+static int mshv_vtl_poll_file_wake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
+{
+ struct mshv_vtl_poll_file *poll_file = container_of(wait, struct mshv_vtl_poll_file, wait);
+
+ mshv_vtl_cancel(poll_file->cpu);
+
+ return 0;
+}
+
+static void mshv_vtl_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
+{
+ struct mshv_vtl_poll_file *poll_file = container_of(pt, struct mshv_vtl_poll_file, pt);
+
+ WARN_ON(poll_file->wqh);
+ poll_file->wqh = wqh;
+ add_wait_queue(wqh, &poll_file->wait);
+}
+
+static int mshv_vtl_ioctl_set_poll_file(struct mshv_vtl_set_poll_file __user *user_input)
+{
+ struct file *file, *old_file;
+ struct mshv_vtl_poll_file *poll_file;
+ struct mshv_vtl_set_poll_file input;
+
+ if (copy_from_user(&input, user_input, sizeof(input)))
+ return -EFAULT;
+
+ if (input.cpu >= num_possible_cpus() || !cpu_online(input.cpu))
+ return -EINVAL;
+ /*
+ * CPU Hotplug is not supported in VTL2 in OpenHCL, where this kernel driver exists.
+ * CPU is expected to remain online after above cpu_online() check.
+ */
+
+ file = NULL;
+ file = fget(input.fd);
+ if (!file)
+ return -EBADFD;
+
+ poll_file = per_cpu_ptr(&mshv_vtl_poll_file, READ_ONCE(input.cpu));
+ if (!poll_file)
+ return -EINVAL;
+
+ mutex_lock(&mshv_vtl_poll_file_lock);
+
+ if (poll_file->wqh)
+ remove_wait_queue(poll_file->wqh, &poll_file->wait);
+ poll_file->wqh = NULL;
+
+ old_file = poll_file->file;
+ poll_file->file = file;
+ poll_file->cpu = input.cpu;
+
+ if (file) {
+ init_waitqueue_func_entry(&poll_file->wait, mshv_vtl_poll_file_wake);
+ init_poll_funcptr(&poll_file->pt, mshv_vtl_ptable_queue_proc);
+ vfs_poll(file, &poll_file->pt);
+ }
+
+ mutex_unlock(&mshv_vtl_poll_file_lock);
+
+ if (old_file)
+ fput(old_file);
+
+ return 0;
+}
+
+/* Static table mapping register names to their corresponding actions */
+static const struct {
+ enum hv_register_name reg_name;
+ int debug_reg_num; /* -1 if not a debug register */
+ u32 msr_addr; /* 0 if not an MSR */
+} reg_table[] = {
+ /* Debug registers */
+ {HV_X64_REGISTER_DR0, 0, 0},
+ {HV_X64_REGISTER_DR1, 1, 0},
+ {HV_X64_REGISTER_DR2, 2, 0},
+ {HV_X64_REGISTER_DR3, 3, 0},
+ {HV_X64_REGISTER_DR6, 6, 0},
+ /* MTRR MSRs */
+ {HV_X64_REGISTER_MSR_MTRR_CAP, -1, MSR_MTRRcap},
+ {HV_X64_REGISTER_MSR_MTRR_DEF_TYPE, -1, MSR_MTRRdefType},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE0, -1, MTRRphysBase_MSR(0)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE1, -1, MTRRphysBase_MSR(1)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE2, -1, MTRRphysBase_MSR(2)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE3, -1, MTRRphysBase_MSR(3)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE4, -1, MTRRphysBase_MSR(4)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE5, -1, MTRRphysBase_MSR(5)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE6, -1, MTRRphysBase_MSR(6)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE7, -1, MTRRphysBase_MSR(7)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE8, -1, MTRRphysBase_MSR(8)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASE9, -1, MTRRphysBase_MSR(9)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASEA, -1, MTRRphysBase_MSR(0xa)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASEB, -1, MTRRphysBase_MSR(0xb)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASEC, -1, MTRRphysBase_MSR(0xc)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASED, -1, MTRRphysBase_MSR(0xd)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASEE, -1, MTRRphysBase_MSR(0xe)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_BASEF, -1, MTRRphysBase_MSR(0xf)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK0, -1, MTRRphysMask_MSR(0)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK1, -1, MTRRphysMask_MSR(1)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK2, -1, MTRRphysMask_MSR(2)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK3, -1, MTRRphysMask_MSR(3)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK4, -1, MTRRphysMask_MSR(4)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK5, -1, MTRRphysMask_MSR(5)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK6, -1, MTRRphysMask_MSR(6)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK7, -1, MTRRphysMask_MSR(7)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK8, -1, MTRRphysMask_MSR(8)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASK9, -1, MTRRphysMask_MSR(9)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASKA, -1, MTRRphysMask_MSR(0xa)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASKB, -1, MTRRphysMask_MSR(0xb)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASKC, -1, MTRRphysMask_MSR(0xc)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASKD, -1, MTRRphysMask_MSR(0xd)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASKE, -1, MTRRphysMask_MSR(0xe)},
+ {HV_X64_REGISTER_MSR_MTRR_PHYS_MASKF, -1, MTRRphysMask_MSR(0xf)},
+ {HV_X64_REGISTER_MSR_MTRR_FIX64K00000, -1, MSR_MTRRfix64K_00000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX16K80000, -1, MSR_MTRRfix16K_80000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX16KA0000, -1, MSR_MTRRfix16K_A0000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX4KC0000, -1, MSR_MTRRfix4K_C0000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX4KC8000, -1, MSR_MTRRfix4K_C8000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX4KD0000, -1, MSR_MTRRfix4K_D0000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX4KD8000, -1, MSR_MTRRfix4K_D8000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX4KE0000, -1, MSR_MTRRfix4K_E0000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX4KE8000, -1, MSR_MTRRfix4K_E8000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX4KF0000, -1, MSR_MTRRfix4K_F0000},
+ {HV_X64_REGISTER_MSR_MTRR_FIX4KF8000, -1, MSR_MTRRfix4K_F8000},
+};
+
+static int mshv_vtl_get_set_reg(struct hv_register_assoc *regs, bool set)
+{
+ u64 *reg64;
+ enum hv_register_name gpr_name;
+ int i;
+
+ gpr_name = regs->name;
+ reg64 = &regs->value.reg64;
+
+ /* Search for the register in the table */
+ for (i = 0; i < ARRAY_SIZE(reg_table); i++) {
+ if (reg_table[i].reg_name != gpr_name)
+ continue;
+ if (reg_table[i].debug_reg_num != -1) {
+ /* Handle debug registers */
+ if (gpr_name == HV_X64_REGISTER_DR6 &&
+ !mshv_vsm_capabilities.dr6_shared)
+ goto hypercall;
+ if (set)
+ native_set_debugreg(reg_table[i].debug_reg_num, *reg64);
+ else
+ *reg64 = native_get_debugreg(reg_table[i].debug_reg_num);
+ } else {
+ /* Handle MSRs */
+ if (set)
+ wrmsrl(reg_table[i].msr_addr, *reg64);
+ else
+ rdmsrl(reg_table[i].msr_addr, *reg64);
+ }
+ return 0;
+ }
+
+hypercall:
+ return 1;
+}
+
+static void mshv_vtl_return(struct mshv_vtl_cpu_context *vtl0)
+{
+ struct hv_vp_assist_page *hvp;
+
+ hvp = hv_vp_assist_page[smp_processor_id()];
+
+ /*
+ * Process signal event direct set in the run page, if any.
+ */
+ if (mshv_vsm_capabilities.return_action_available) {
+ u32 offset = READ_ONCE(mshv_vtl_this_run()->vtl_ret_action_size);
+
+ WRITE_ONCE(mshv_vtl_this_run()->vtl_ret_action_size, 0);
+
+ /*
+ * Hypervisor will take care of clearing out the actions
+ * set in the assist page.
+ */
+ memcpy(hvp->vtl_ret_actions,
+ mshv_vtl_this_run()->vtl_ret_actions,
+ min_t(u32, offset, sizeof(hvp->vtl_ret_actions)));
+ }
+
+ mshv_vtl_return_call(vtl0);
+}
+
+static bool mshv_vtl_process_intercept(void)
+{
+ struct hv_per_cpu_context *mshv_cpu;
+ void *synic_message_page;
+ struct hv_message *msg;
+ u32 message_type;
+
+ mshv_cpu = this_cpu_ptr(hv_context.cpu_context);
+ synic_message_page = mshv_cpu->hyp_synic_message_page;
+ if (unlikely(!synic_message_page))
+ return true;
+
+ msg = (struct hv_message *)synic_message_page + HV_SYNIC_INTERCEPTION_SINT_INDEX;
+ message_type = READ_ONCE(msg->header.message_type);
+ if (message_type == HVMSG_NONE)
+ return true;
+
+ memcpy(mshv_vtl_this_run()->exit_message, msg, sizeof(*msg));
+ vmbus_signal_eom(msg, message_type);
+
+ return false;
+}
+
+static int mshv_vtl_ioctl_return_to_lower_vtl(void)
+{
+ preempt_disable();
+ for (;;) {
+ unsigned long irq_flags;
+ struct hv_vp_assist_page *hvp;
+ int ret;
+
+ if (__xfer_to_guest_mode_work_pending()) {
+ preempt_enable();
+ ret = xfer_to_guest_mode_handle_work();
+ if (ret)
+ return ret;
+ preempt_disable();
+ }
+
+ local_irq_save(irq_flags);
+ if (READ_ONCE(mshv_vtl_this_run()->cancel)) {
+ local_irq_restore(irq_flags);
+ preempt_enable();
+ return -EINTR;
+ }
+
+ mshv_vtl_return(&mshv_vtl_this_run()->cpu_context);
+ local_irq_restore(irq_flags);
+
+ hvp = hv_vp_assist_page[smp_processor_id()];
+ this_cpu_inc(num_vtl0_transitions);
+ switch (hvp->vtl_entry_reason) {
+ case MSHV_ENTRY_REASON_INTERRUPT:
+ if (!mshv_vsm_capabilities.intercept_page_available &&
+ likely(!mshv_vtl_process_intercept()))
+ goto done;
+ break;
+
+ case MSHV_ENTRY_REASON_INTERCEPT:
+ WARN_ON(!mshv_vsm_capabilities.intercept_page_available);
+ memcpy(mshv_vtl_this_run()->exit_message, hvp->intercept_message,
+ sizeof(hvp->intercept_message));
+ goto done;
+
+ default:
+ panic("unknown entry reason: %d", hvp->vtl_entry_reason);
+ }
+ }
+
+done:
+ preempt_enable();
+
+ return 0;
+}
+
+static long
+mshv_vtl_ioctl_get_regs(void __user *user_args)
+{
+ struct mshv_vp_registers args;
+ struct hv_register_assoc reg;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ /* This IOCTL supports processing only one register at a time. */
+ if (args.count != 1)
+ return -EINVAL;
+
+ if (copy_from_user(&reg, (void __user *)args.regs_ptr,
+ sizeof(reg)))
+ return -EFAULT;
+
+ ret = mshv_vtl_get_set_reg(&reg, false);
+ if (!ret)
+ goto copy_args; /* No need of hypercall */
+ ret = vtl_get_vp_register(&reg);
+ if (ret)
+ return ret;
+
+copy_args:
+ if (copy_to_user((void __user *)args.regs_ptr, &reg, sizeof(reg)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static long
+mshv_vtl_ioctl_set_regs(void __user *user_args)
+{
+ struct mshv_vp_registers args;
+ struct hv_register_assoc reg;
+ long ret;
+
+ if (copy_from_user(&args, user_args, sizeof(args)))
+ return -EFAULT;
+
+ /* This IOCTL supports processing only one register at a time. */
+ if (args.count != 1)
+ return -EINVAL;
+
+ if (copy_from_user(&reg, (void __user *)args.regs_ptr, sizeof(reg)))
+ return -EFAULT;
+
+ ret = mshv_vtl_get_set_reg(&reg, true);
+ if (!ret)
+ return ret; /* No need of hypercall */
+ ret = vtl_set_vp_register(&reg);
+
+ return ret;
+}
+
+static long
+mshv_vtl_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ long ret;
+ struct mshv_vtl *vtl = filp->private_data;
+
+ switch (ioctl) {
+ case MSHV_SET_POLL_FILE:
+ ret = mshv_vtl_ioctl_set_poll_file((struct mshv_vtl_set_poll_file __user *)arg);
+ break;
+ case MSHV_GET_VP_REGISTERS:
+ ret = mshv_vtl_ioctl_get_regs((void __user *)arg);
+ break;
+ case MSHV_SET_VP_REGISTERS:
+ ret = mshv_vtl_ioctl_set_regs((void __user *)arg);
+ break;
+ case MSHV_RETURN_TO_LOWER_VTL:
+ ret = mshv_vtl_ioctl_return_to_lower_vtl();
+ break;
+ case MSHV_ADD_VTL0_MEMORY:
+ ret = mshv_vtl_ioctl_add_vtl0_mem(vtl, (void __user *)arg);
+ break;
+ default:
+ dev_err(vtl->module_dev, "invalid vtl ioctl: %#x\n", ioctl);
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static vm_fault_t mshv_vtl_fault(struct vm_fault *vmf)
+{
+ struct page *page;
+ int cpu = vmf->pgoff & MSHV_PG_OFF_CPU_MASK;
+ int real_off = vmf->pgoff >> MSHV_REAL_OFF_SHIFT;
+
+ if (!cpu_online(cpu))
+ return VM_FAULT_SIGBUS;
+ /*
+ * CPU Hotplug is not supported in VTL2 in OpenHCL, where this kernel driver exists.
+ * CPU is expected to remain online after above cpu_online() check.
+ */
+
+ if (real_off == MSHV_RUN_PAGE_OFFSET) {
+ page = virt_to_page(mshv_vtl_cpu_run(cpu));
+ } else if (real_off == MSHV_REG_PAGE_OFFSET) {
+ if (!mshv_has_reg_page)
+ return VM_FAULT_SIGBUS;
+ page = mshv_vtl_cpu_reg_page(cpu);
+ } else {
+ return VM_FAULT_NOPAGE;
+ }
+
+ get_page(page);
+ vmf->page = page;
+
+ return 0;
+}
+
+static const struct vm_operations_struct mshv_vtl_vm_ops = {
+ .fault = mshv_vtl_fault,
+};
+
+static int mshv_vtl_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &mshv_vtl_vm_ops;
+
+ return 0;
+}
+
+static int mshv_vtl_release(struct inode *inode, struct file *filp)
+{
+ struct mshv_vtl *vtl = filp->private_data;
+
+ kfree(vtl);
+
+ return 0;
+}
+
+static const struct file_operations mshv_vtl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = mshv_vtl_ioctl,
+ .release = mshv_vtl_release,
+ .mmap = mshv_vtl_mmap,
+};
+
+static void mshv_vtl_synic_mask_vmbus_sint(const u8 *mask)
+{
+ union hv_synic_sint sint;
+
+ sint.as_uint64 = 0;
+ sint.vector = HYPERVISOR_CALLBACK_VECTOR;
+ sint.masked = (*mask != 0);
+ sint.auto_eoi = hv_recommend_using_aeoi();
+
+ hv_set_msr(HV_MSR_SINT0 + VTL2_VMBUS_SINT_INDEX,
+ sint.as_uint64);
+
+ if (!sint.masked)
+ pr_debug("%s: Unmasking VTL2 VMBUS SINT on VP %d\n", __func__, smp_processor_id());
+ else
+ pr_debug("%s: Masking VTL2 VMBUS SINT on VP %d\n", __func__, smp_processor_id());
+}
+
+static void mshv_vtl_read_remote(void *buffer)
+{
+ struct hv_per_cpu_context *mshv_cpu = this_cpu_ptr(hv_context.cpu_context);
+ struct hv_message *msg = (struct hv_message *)mshv_cpu->hyp_synic_message_page +
+ VTL2_VMBUS_SINT_INDEX;
+ u32 message_type = READ_ONCE(msg->header.message_type);
+
+ WRITE_ONCE(has_message, false);
+ if (message_type == HVMSG_NONE)
+ return;
+
+ memcpy(buffer, msg, sizeof(*msg));
+ vmbus_signal_eom(msg, message_type);
+}
+
+static bool vtl_synic_mask_vmbus_sint_masked = true;
+
+static ssize_t mshv_vtl_sint_read(struct file *filp, char __user *arg, size_t size, loff_t *offset)
+{
+ struct hv_message msg = {};
+ int ret;
+
+ if (size < sizeof(msg))
+ return -EINVAL;
+
+ for (;;) {
+ smp_call_function_single(VMBUS_CONNECT_CPU, mshv_vtl_read_remote, &msg, true);
+ if (msg.header.message_type != HVMSG_NONE)
+ break;
+
+ if (READ_ONCE(vtl_synic_mask_vmbus_sint_masked))
+ return 0; /* EOF */
+
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(fd_wait_queue,
+ READ_ONCE(has_message) ||
+ READ_ONCE(vtl_synic_mask_vmbus_sint_masked));
+ if (ret)
+ return ret;
+ }
+
+ if (copy_to_user(arg, &msg, sizeof(msg)))
+ return -EFAULT;
+
+ return sizeof(msg);
+}
+
+static __poll_t mshv_vtl_sint_poll(struct file *filp, poll_table *wait)
+{
+ __poll_t mask = 0;
+
+ poll_wait(filp, &fd_wait_queue, wait);
+ if (READ_ONCE(has_message) || READ_ONCE(vtl_synic_mask_vmbus_sint_masked))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ return mask;
+}
+
+static void mshv_vtl_sint_on_msg_dpc(unsigned long data)
+{
+ WRITE_ONCE(has_message, true);
+ wake_up_interruptible_poll(&fd_wait_queue, EPOLLIN);
+}
+
+static int mshv_vtl_sint_ioctl_post_msg(struct mshv_vtl_sint_post_msg __user *arg)
+{
+ struct mshv_vtl_sint_post_msg message;
+ u8 payload[HV_MESSAGE_PAYLOAD_BYTE_COUNT];
+
+ if (copy_from_user(&message, arg, sizeof(message)))
+ return -EFAULT;
+ if (message.payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
+ return -EINVAL;
+ if (copy_from_user(payload, (void __user *)message.payload_ptr,
+ message.payload_size))
+ return -EFAULT;
+
+ return hv_post_message((union hv_connection_id)message.connection_id,
+ message.message_type, (void *)payload,
+ message.payload_size);
+}
+
+static int mshv_vtl_sint_ioctl_signal_event(struct mshv_vtl_signal_event __user *arg)
+{
+ u64 input, status;
+ struct mshv_vtl_signal_event signal_event;
+
+ if (copy_from_user(&signal_event, arg, sizeof(signal_event)))
+ return -EFAULT;
+
+ input = signal_event.connection_id | ((u64)signal_event.flag << 32);
+
+ status = hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, input);
+
+ return hv_result_to_errno(status);
+}
+
+static int mshv_vtl_sint_ioctl_set_eventfd(struct mshv_vtl_set_eventfd __user *arg)
+{
+ struct mshv_vtl_set_eventfd set_eventfd;
+ struct eventfd_ctx *eventfd, *old_eventfd;
+
+ if (copy_from_user(&set_eventfd, arg, sizeof(set_eventfd)))
+ return -EFAULT;
+ if (set_eventfd.flag >= HV_EVENT_FLAGS_COUNT)
+ return -EINVAL;
+
+ eventfd = NULL;
+ if (set_eventfd.fd >= 0) {
+ eventfd = eventfd_ctx_fdget(set_eventfd.fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+ }
+
+ guard(mutex)(&flag_lock);
+ old_eventfd = READ_ONCE(flag_eventfds[set_eventfd.flag]);
+ WRITE_ONCE(flag_eventfds[set_eventfd.flag], eventfd);
+
+ if (old_eventfd) {
+ synchronize_rcu();
+ eventfd_ctx_put(old_eventfd);
+ }
+
+ return 0;
+}
+
+static int mshv_vtl_sint_ioctl_pause_msg_stream(struct mshv_sint_mask __user *arg)
+{
+ static DEFINE_MUTEX(vtl2_vmbus_sint_mask_mutex);
+ struct mshv_sint_mask mask;
+
+ if (copy_from_user(&mask, arg, sizeof(mask)))
+ return -EFAULT;
+ guard(mutex)(&vtl2_vmbus_sint_mask_mutex);
+ on_each_cpu((smp_call_func_t)mshv_vtl_synic_mask_vmbus_sint, &mask.mask, 1);
+ WRITE_ONCE(vtl_synic_mask_vmbus_sint_masked, mask.mask != 0);
+ if (mask.mask)
+ wake_up_interruptible_poll(&fd_wait_queue, EPOLLIN);
+
+ return 0;
+}
+
+static long mshv_vtl_sint_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case MSHV_SINT_POST_MESSAGE:
+ return mshv_vtl_sint_ioctl_post_msg((struct mshv_vtl_sint_post_msg __user *)arg);
+ case MSHV_SINT_SIGNAL_EVENT:
+ return mshv_vtl_sint_ioctl_signal_event((struct mshv_vtl_signal_event __user *)arg);
+ case MSHV_SINT_SET_EVENTFD:
+ return mshv_vtl_sint_ioctl_set_eventfd((struct mshv_vtl_set_eventfd __user *)arg);
+ case MSHV_SINT_PAUSE_MESSAGE_STREAM:
+ return mshv_vtl_sint_ioctl_pause_msg_stream((struct mshv_sint_mask __user *)arg);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct file_operations mshv_vtl_sint_ops = {
+ .owner = THIS_MODULE,
+ .read = mshv_vtl_sint_read,
+ .poll = mshv_vtl_sint_poll,
+ .unlocked_ioctl = mshv_vtl_sint_ioctl,
+};
+
+static struct miscdevice mshv_vtl_sint_dev = {
+ .name = "mshv_sint",
+ .fops = &mshv_vtl_sint_ops,
+ .mode = 0600,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static int mshv_vtl_hvcall_dev_open(struct inode *node, struct file *f)
+{
+ struct miscdevice *dev = f->private_data;
+ struct mshv_vtl_hvcall_fd *fd;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ fd = vzalloc(sizeof(*fd));
+ if (!fd)
+ return -ENOMEM;
+ fd->dev = dev;
+ f->private_data = fd;
+ mutex_init(&fd->init_mutex);
+
+ return 0;
+}
+
+static int mshv_vtl_hvcall_dev_release(struct inode *node, struct file *f)
+{
+ struct mshv_vtl_hvcall_fd *fd;
+
+ fd = f->private_data;
+ if (fd) {
+ vfree(fd);
+ f->private_data = NULL;
+ }
+
+ return 0;
+}
+
+static int mshv_vtl_hvcall_do_setup(struct mshv_vtl_hvcall_fd *fd,
+ struct mshv_vtl_hvcall_setup __user *hvcall_setup_user)
+{
+ struct mshv_vtl_hvcall_setup hvcall_setup;
+
+ guard(mutex)(&fd->init_mutex);
+
+ if (fd->allow_map_initialized) {
+ dev_err(fd->dev->this_device,
+ "Hypercall allow map has already been set, pid %d\n",
+ current->pid);
+ return -EINVAL;
+ }
+
+ if (copy_from_user(&hvcall_setup, hvcall_setup_user,
+ sizeof(struct mshv_vtl_hvcall_setup))) {
+ return -EFAULT;
+ }
+ if (hvcall_setup.bitmap_array_size > ARRAY_SIZE(fd->allow_bitmap))
+ return -EINVAL;
+
+ if (copy_from_user(&fd->allow_bitmap,
+ (void __user *)hvcall_setup.allow_bitmap_ptr,
+ hvcall_setup.bitmap_array_size)) {
+ return -EFAULT;
+ }
+
+ dev_info(fd->dev->this_device, "Hypercall allow map has been set, pid %d\n",
+ current->pid);
+ fd->allow_map_initialized = true;
+ return 0;
+}
+
+static bool mshv_vtl_hvcall_is_allowed(struct mshv_vtl_hvcall_fd *fd, u16 call_code)
+{
+ return test_bit(call_code, (unsigned long *)fd->allow_bitmap);
+}
+
+static int mshv_vtl_hvcall_call(struct mshv_vtl_hvcall_fd *fd,
+ struct mshv_vtl_hvcall __user *hvcall_user)
+{
+ struct mshv_vtl_hvcall hvcall;
+ void *in, *out;
+ int ret;
+
+ if (copy_from_user(&hvcall, hvcall_user, sizeof(struct mshv_vtl_hvcall)))
+ return -EFAULT;
+ if (hvcall.input_size > HV_HYP_PAGE_SIZE)
+ return -EINVAL;
+ if (hvcall.output_size > HV_HYP_PAGE_SIZE)
+ return -EINVAL;
+
+ /*
+ * By default, all hypercalls are not allowed.
+ * The user mode code has to set up the allow bitmap once.
+ */
+
+ if (!mshv_vtl_hvcall_is_allowed(fd, hvcall.control & 0xFFFF)) {
+ dev_err(fd->dev->this_device,
+ "Hypercall with control data %#llx isn't allowed\n",
+ hvcall.control);
+ return -EPERM;
+ }
+
+ /*
+ * This may create a problem for Confidential VM (CVM) usecase where we need to use
+ * Hyper-V driver allocated per-cpu input and output pages (hyperv_pcpu_input_arg and
+ * hyperv_pcpu_output_arg) for making a hypervisor call.
+ *
+ * TODO: Take care of this when CVM support is added.
+ */
+ in = (void *)__get_free_page(GFP_KERNEL);
+ out = (void *)__get_free_page(GFP_KERNEL);
+
+ if (copy_from_user(in, (void __user *)hvcall.input_ptr, hvcall.input_size)) {
+ ret = -EFAULT;
+ goto free_pages;
+ }
+
+ hvcall.status = hv_do_hypercall(hvcall.control, in, out);
+
+ if (copy_to_user((void __user *)hvcall.output_ptr, out, hvcall.output_size)) {
+ ret = -EFAULT;
+ goto free_pages;
+ }
+ ret = put_user(hvcall.status, &hvcall_user->status);
+free_pages:
+ free_page((unsigned long)in);
+ free_page((unsigned long)out);
+
+ return ret;
+}
+
+static long mshv_vtl_hvcall_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ struct mshv_vtl_hvcall_fd *fd = f->private_data;
+
+ switch (cmd) {
+ case MSHV_HVCALL_SETUP:
+ return mshv_vtl_hvcall_do_setup(fd, (struct mshv_vtl_hvcall_setup __user *)arg);
+ case MSHV_HVCALL:
+ return mshv_vtl_hvcall_call(fd, (struct mshv_vtl_hvcall __user *)arg);
+ default:
+ break;
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static const struct file_operations mshv_vtl_hvcall_dev_file_ops = {
+ .owner = THIS_MODULE,
+ .open = mshv_vtl_hvcall_dev_open,
+ .release = mshv_vtl_hvcall_dev_release,
+ .unlocked_ioctl = mshv_vtl_hvcall_dev_ioctl,
+};
+
+static struct miscdevice mshv_vtl_hvcall_dev = {
+ .name = "mshv_hvcall",
+ .nodename = "mshv_hvcall",
+ .fops = &mshv_vtl_hvcall_dev_file_ops,
+ .mode = 0600,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static int mshv_vtl_low_open(struct inode *inodep, struct file *filp)
+{
+ pid_t pid = task_pid_vnr(current);
+ uid_t uid = current_uid().val;
+ int ret = 0;
+
+ pr_debug("%s: Opening VTL low, task group %d, uid %d\n", __func__, pid, uid);
+
+ if (capable(CAP_SYS_ADMIN)) {
+ filp->private_data = inodep;
+ } else {
+ pr_err("%s: VTL low open failed: CAP_SYS_ADMIN required. task group %d, uid %d",
+ __func__, pid, uid);
+ ret = -EPERM;
+ }
+
+ return ret;
+}
+
+static bool can_fault(struct vm_fault *vmf, unsigned long size, unsigned long *pfn)
+{
+ unsigned long mask = size - 1;
+ unsigned long start = vmf->address & ~mask;
+ unsigned long end = start + size;
+ bool is_valid;
+
+ is_valid = (vmf->address & mask) == ((vmf->pgoff << PAGE_SHIFT) & mask) &&
+ start >= vmf->vma->vm_start &&
+ end <= vmf->vma->vm_end;
+
+ if (is_valid)
+ *pfn = vmf->pgoff & ~(mask >> PAGE_SHIFT);
+
+ return is_valid;
+}
+
+static vm_fault_t mshv_vtl_low_huge_fault(struct vm_fault *vmf, unsigned int order)
+{
+ unsigned long pfn = vmf->pgoff;
+ vm_fault_t ret = VM_FAULT_FALLBACK;
+
+ switch (order) {
+ case 0:
+ return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+
+ case PMD_ORDER:
+ if (can_fault(vmf, PMD_SIZE, &pfn))
+ ret = vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
+ return ret;
+
+ case PUD_ORDER:
+ if (can_fault(vmf, PUD_SIZE, &pfn))
+ ret = vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
+ return ret;
+
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static vm_fault_t mshv_vtl_low_fault(struct vm_fault *vmf)
+{
+ return mshv_vtl_low_huge_fault(vmf, 0);
+}
+
+static const struct vm_operations_struct mshv_vtl_low_vm_ops = {
+ .fault = mshv_vtl_low_fault,
+ .huge_fault = mshv_vtl_low_huge_fault,
+};
+
+static int mshv_vtl_low_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &mshv_vtl_low_vm_ops;
+ vm_flags_set(vma, VM_HUGEPAGE | VM_MIXEDMAP);
+
+ return 0;
+}
+
+static const struct file_operations mshv_vtl_low_file_ops = {
+ .owner = THIS_MODULE,
+ .open = mshv_vtl_low_open,
+ .mmap = mshv_vtl_low_mmap,
+};
+
+static struct miscdevice mshv_vtl_low = {
+ .name = "mshv_vtl_low",
+ .nodename = "mshv_vtl_low",
+ .fops = &mshv_vtl_low_file_ops,
+ .mode = 0600,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static int __init mshv_vtl_init(void)
+{
+ int ret;
+ struct device *dev = mshv_dev.this_device;
+
+ /*
+ * This creates /dev/mshv which provides functionality to create VTLs and partitions.
+ */
+ ret = misc_register(&mshv_dev);
+ if (ret) {
+ dev_err(dev, "mshv device register failed: %d\n", ret);
+ goto free_dev;
+ }
+
+ tasklet_init(&msg_dpc, mshv_vtl_sint_on_msg_dpc, 0);
+ init_waitqueue_head(&fd_wait_queue);
+
+ if (mshv_vtl_get_vsm_regs()) {
+ dev_emerg(dev, "Unable to get VSM capabilities !!\n");
+ ret = -ENODEV;
+ goto free_dev;
+ }
+ if (mshv_vtl_configure_vsm_partition(dev)) {
+ dev_emerg(dev, "VSM configuration failed !!\n");
+ ret = -ENODEV;
+ goto free_dev;
+ }
+
+ mshv_vtl_return_call_init(mshv_vsm_page_offsets.vtl_return_offset);
+ ret = hv_vtl_setup_synic();
+ if (ret)
+ goto free_dev;
+
+ /*
+ * mshv_sint device adds VMBus relay ioctl support.
+ * This provides a channel for VTL0 to communicate with VTL2.
+ */
+ ret = misc_register(&mshv_vtl_sint_dev);
+ if (ret)
+ goto free_synic;
+
+ /*
+ * mshv_hvcall device adds interface to enable userspace for direct hypercalls support.
+ */
+ ret = misc_register(&mshv_vtl_hvcall_dev);
+ if (ret)
+ goto free_sint;
+
+ /*
+ * mshv_vtl_low device is used to map VTL0 address space to a user-mode process in VTL2.
+ * It implements mmap() to allow a user-mode process in VTL2 to map to the address of VTL0.
+ */
+ ret = misc_register(&mshv_vtl_low);
+ if (ret)
+ goto free_hvcall;
+
+ /*
+ * "mshv vtl mem dev" device is later used to setup VTL0 memory.
+ */
+ mem_dev = kzalloc(sizeof(*mem_dev), GFP_KERNEL);
+ if (!mem_dev) {
+ ret = -ENOMEM;
+ goto free_low;
+ }
+
+ mutex_init(&mshv_vtl_poll_file_lock);
+
+ device_initialize(mem_dev);
+ dev_set_name(mem_dev, "mshv vtl mem dev");
+ ret = device_add(mem_dev);
+ if (ret) {
+ dev_err(dev, "mshv vtl mem dev add: %d\n", ret);
+ goto free_mem;
+ }
+
+ return 0;
+
+free_mem:
+ kfree(mem_dev);
+free_low:
+ misc_deregister(&mshv_vtl_low);
+free_hvcall:
+ misc_deregister(&mshv_vtl_hvcall_dev);
+free_sint:
+ misc_deregister(&mshv_vtl_sint_dev);
+free_synic:
+ hv_vtl_remove_synic();
+free_dev:
+ misc_deregister(&mshv_dev);
+
+ return ret;
+}
+
+static void __exit mshv_vtl_exit(void)
+{
+ device_del(mem_dev);
+ kfree(mem_dev);
+ misc_deregister(&mshv_vtl_low);
+ misc_deregister(&mshv_vtl_hvcall_dev);
+ misc_deregister(&mshv_vtl_sint_dev);
+ hv_vtl_remove_synic();
+ misc_deregister(&mshv_dev);
+}
+
+module_init(mshv_vtl_init);
+module_exit(mshv_vtl_exit);
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 23ce1fb70de1..3c421a7f78c0 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -184,7 +184,8 @@ void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
/* Initialize the ring buffer. */
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
- struct page *pages, u32 page_cnt, u32 max_pkt_size)
+ struct page *pages, u32 page_cnt, u32 max_pkt_size,
+ bool confidential)
{
struct page **pages_wraparound;
int i;
@@ -208,7 +209,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
ring_info->ring_buffer = (struct hv_ring_buffer *)
vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
- pgprot_decrypted(PAGE_KERNEL));
+ confidential ? PAGE_KERNEL : pgprot_decrypted(PAGE_KERNEL));
kfree(pages_wraparound);
if (!ring_info->ring_buffer)
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 2ed5a1e89d69..a53af6fe81a6 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -36,6 +36,7 @@
#include <linux/syscore_ops.h>
#include <linux/dma-map-ops.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <clocksource/hyperv_timer.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -57,6 +58,18 @@ int vmbus_irq;
int vmbus_interrupt;
/*
+ * If the Confidential VMBus is used, the data on the "wire" is not
+ * visible to either the host or the hypervisor.
+ */
+static bool is_confidential;
+
+bool vmbus_is_confidential(void)
+{
+ return is_confidential;
+}
+EXPORT_SYMBOL_GPL(vmbus_is_confidential);
+
+/*
* The panic notifier below is responsible solely for unloading the
* vmbus connection, which is necessary in a panic event.
*
@@ -322,7 +335,7 @@ static ssize_t out_read_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sysfs_emit(buf, "%d\n", outbound.current_read_index);
+ return sysfs_emit(buf, "%u\n", outbound.current_read_index);
}
static DEVICE_ATTR_RO(out_read_index);
@@ -341,7 +354,7 @@ static ssize_t out_write_index_show(struct device *dev,
&outbound);
if (ret < 0)
return ret;
- return sysfs_emit(buf, "%d\n", outbound.current_write_index);
+ return sysfs_emit(buf, "%u\n", outbound.current_write_index);
}
static DEVICE_ATTR_RO(out_write_index);
@@ -1045,12 +1058,9 @@ static void vmbus_onmessage_work(struct work_struct *work)
kfree(ctx);
}
-void vmbus_on_msg_dpc(unsigned long data)
+static void __vmbus_on_msg_dpc(void *message_page_addr)
{
- struct hv_per_cpu_context *hv_cpu = (void *)data;
- void *page_addr = hv_cpu->synic_message_page;
- struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
- VMBUS_MESSAGE_SINT;
+ struct hv_message msg_copy, *msg;
struct vmbus_channel_message_header *hdr;
enum vmbus_channel_message_type msgtype;
const struct vmbus_channel_message_table_entry *entry;
@@ -1058,6 +1068,10 @@ void vmbus_on_msg_dpc(unsigned long data)
__u8 payload_size;
u32 message_type;
+ if (!message_page_addr)
+ return;
+ msg = (struct hv_message *)message_page_addr + VMBUS_MESSAGE_SINT;
+
/*
* 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
* it is being used in 'struct vmbus_channel_message_header' definition
@@ -1183,6 +1197,14 @@ msg_handled:
vmbus_signal_eom(msg, message_type);
}
+void vmbus_on_msg_dpc(unsigned long data)
+{
+ struct hv_per_cpu_context *hv_cpu = (void *)data;
+
+ __vmbus_on_msg_dpc(hv_cpu->hyp_synic_message_page);
+ __vmbus_on_msg_dpc(hv_cpu->para_synic_message_page);
+}
+
#ifdef CONFIG_PM_SLEEP
/*
* Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
@@ -1221,21 +1243,19 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
#endif /* CONFIG_PM_SLEEP */
/*
- * Schedule all channels with events pending
+ * Schedule all channels with events pending.
+ * The event page can be directly checked to get the id of
+ * the channel that has the interrupt pending.
*/
-static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
+static void vmbus_chan_sched(void *event_page_addr)
{
unsigned long *recv_int_page;
u32 maxbits, relid;
+ union hv_synic_event_flags *event;
- /*
- * The event page can be directly checked to get the id of
- * the channel that has the interrupt pending.
- */
- void *page_addr = hv_cpu->synic_event_page;
- union hv_synic_event_flags *event
- = (union hv_synic_event_flags *)page_addr +
- VMBUS_MESSAGE_SINT;
+ if (!event_page_addr)
+ return;
+ event = (union hv_synic_event_flags *)event_page_addr + VMBUS_MESSAGE_SINT;
maxbits = HV_EVENT_FLAGS_COUNT;
recv_int_page = event->flags;
@@ -1243,6 +1263,11 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
if (unlikely(!recv_int_page))
return;
+ /*
+ * Suggested-by: Michael Kelley <mhklinux@outlook.com>
+ * One possible optimization would be to keep track of the largest relID that's in use,
+ * and only scan up to that relID.
+ */
for_each_set_bit(relid, recv_int_page, maxbits) {
void (*callback_fn)(void *context);
struct vmbus_channel *channel;
@@ -1306,29 +1331,39 @@ sched_unlock_rcu:
}
}
-static void vmbus_isr(void)
+static void vmbus_message_sched(struct hv_per_cpu_context *hv_cpu, void *message_page_addr)
{
- struct hv_per_cpu_context *hv_cpu
- = this_cpu_ptr(hv_context.cpu_context);
- void *page_addr;
struct hv_message *msg;
- vmbus_chan_sched(hv_cpu);
-
- page_addr = hv_cpu->synic_message_page;
- msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
+ if (!message_page_addr)
+ return;
+ msg = (struct hv_message *)message_page_addr + VMBUS_MESSAGE_SINT;
/* Check if there are actual msgs to be processed */
if (msg->header.message_type != HVMSG_NONE) {
if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
hv_stimer0_isr();
vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
- } else
+ } else {
tasklet_schedule(&hv_cpu->msg_dpc);
+ }
}
+}
+
+void vmbus_isr(void)
+{
+ struct hv_per_cpu_context *hv_cpu
+ = this_cpu_ptr(hv_context.cpu_context);
+
+ vmbus_chan_sched(hv_cpu->hyp_synic_event_page);
+ vmbus_chan_sched(hv_cpu->para_synic_event_page);
+
+ vmbus_message_sched(hv_cpu, hv_cpu->hyp_synic_message_page);
+ vmbus_message_sched(hv_cpu, hv_cpu->para_synic_message_page);
add_interrupt_randomness(vmbus_interrupt);
}
+EXPORT_SYMBOL_FOR_MODULES(vmbus_isr, "mshv_vtl");
static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
{
@@ -1343,6 +1378,59 @@ static void vmbus_percpu_work(struct work_struct *work)
hv_synic_init(cpu);
}
+static int vmbus_alloc_synic_and_connect(void)
+{
+ int ret, cpu;
+ struct work_struct __percpu *works;
+ int hyperv_cpuhp_online;
+
+ ret = hv_synic_alloc();
+ if (ret < 0)
+ goto err_alloc;
+
+ works = alloc_percpu(struct work_struct);
+ if (!works) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /*
+ * Initialize the per-cpu interrupt state and stimer state.
+ * Then connect to the host.
+ */
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ struct work_struct *work = per_cpu_ptr(works, cpu);
+
+ INIT_WORK(work, vmbus_percpu_work);
+ schedule_work_on(cpu, work);
+ }
+
+ for_each_online_cpu(cpu)
+ flush_work(per_cpu_ptr(works, cpu));
+
+ /* Register the callbacks for possible CPU online/offline'ing */
+ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
+ hv_synic_init, hv_synic_cleanup);
+ cpus_read_unlock();
+ free_percpu(works);
+ if (ret < 0)
+ goto err_alloc;
+ hyperv_cpuhp_online = ret;
+
+ ret = vmbus_connect();
+ if (ret)
+ goto err_connect;
+ return 0;
+
+err_connect:
+ cpuhp_remove_state(hyperv_cpuhp_online);
+ return -ENODEV;
+err_alloc:
+ hv_synic_free();
+ return -ENOMEM;
+}
+
/*
* vmbus_bus_init -Main vmbus driver initialization routine.
*
@@ -1353,8 +1441,7 @@ static void vmbus_percpu_work(struct work_struct *work)
*/
static int vmbus_bus_init(void)
{
- int ret, cpu;
- struct work_struct __percpu *works;
+ int ret;
ret = hv_init();
if (ret != 0) {
@@ -1389,41 +1476,15 @@ static int vmbus_bus_init(void)
}
}
- ret = hv_synic_alloc();
- if (ret)
- goto err_alloc;
-
- works = alloc_percpu(struct work_struct);
- if (!works) {
- ret = -ENOMEM;
- goto err_alloc;
- }
-
/*
- * Initialize the per-cpu interrupt state and stimer state.
- * Then connect to the host.
+ * Cache the value as getting it involves a VM exit on x86(_64), and
+ * doing that on each VP while initializing SynIC's wastes time.
*/
- cpus_read_lock();
- for_each_online_cpu(cpu) {
- struct work_struct *work = per_cpu_ptr(works, cpu);
-
- INIT_WORK(work, vmbus_percpu_work);
- schedule_work_on(cpu, work);
- }
-
- for_each_online_cpu(cpu)
- flush_work(per_cpu_ptr(works, cpu));
-
- /* Register the callbacks for possible CPU online/offline'ing */
- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
- hv_synic_init, hv_synic_cleanup);
- cpus_read_unlock();
- free_percpu(works);
- if (ret < 0)
- goto err_alloc;
- hyperv_cpuhp_online = ret;
-
- ret = vmbus_connect();
+ is_confidential = ms_hyperv.confidential_vmbus_available;
+ if (is_confidential)
+ pr_info("Establishing connection to the confidential VMBus\n");
+ hv_para_set_sint_proxy(!is_confidential);
+ ret = vmbus_alloc_synic_and_connect();
if (ret)
goto err_connect;
@@ -1439,9 +1500,6 @@ static int vmbus_bus_init(void)
return 0;
err_connect:
- cpuhp_remove_state(hyperv_cpuhp_online);
-err_alloc:
- hv_synic_free();
if (vmbus_irq == -1) {
hv_remove_vmbus_handler();
} else {
@@ -1742,7 +1800,7 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
u32 target_cpu;
ssize_t ret;
- if (sscanf(buf, "%uu", &target_cpu) != 1)
+ if (sscanf(buf, "%u", &target_cpu) != 1)
return -EIO;
cpus_read_lock();
@@ -1947,7 +2005,7 @@ static const struct kobj_type vmbus_chan_ktype = {
* is running.
* For example, HV_NIC device is used either by uio_hv_generic or hv_netvsc at any given point of
* time, and "ring" sysfs is needed only when uio_hv_generic is bound to that device. To avoid
- * exposing the ring buffer by default, this function is reponsible to enable visibility of
+ * exposing the ring buffer by default, this function is responsible to enable visibility of
* ring for userspace to use.
* Note: Race conditions can happen with userspace and it is not encouraged to create new
* use-cases for this. This was added to maintain backward compatibility, while solving
@@ -2110,7 +2168,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
ret = vmbus_add_channel_kobj(child_device_obj,
child_device_obj->channel);
if (ret) {
- pr_err("Unable to register primary channeln");
+ pr_err("Unable to register primary channel\n");
goto err_kset_unregister;
}
hv_debug_add_dev_dir(child_device_obj);
@@ -2798,10 +2856,10 @@ static void hv_crash_handler(struct pt_regs *regs)
*/
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
- hv_synic_disable_regs(cpu);
+ hv_hyp_synic_disable_regs(cpu);
};
-static int hv_synic_suspend(void)
+static int hv_synic_suspend(void *data)
{
/*
* When we reach here, all the non-boot CPUs have been offlined.
@@ -2823,14 +2881,14 @@ static int hv_synic_suspend(void)
* interrupts-disabled context.
*/
- hv_synic_disable_regs(0);
+ hv_hyp_synic_disable_regs(0);
return 0;
}
-static void hv_synic_resume(void)
+static void hv_synic_resume(void *data)
{
- hv_synic_enable_regs(0);
+ hv_hyp_synic_enable_regs(0);
/*
* Note: we don't need to call hv_stimer_init(0), because the timer
@@ -2840,11 +2898,15 @@ static void hv_synic_resume(void)
}
/* The callbacks run only on CPU0, with irqs_disabled. */
-static struct syscore_ops hv_synic_syscore_ops = {
+static const struct syscore_ops hv_synic_syscore_ops = {
.suspend = hv_synic_suspend,
.resume = hv_synic_resume,
};
+static struct syscore hv_synic_syscore = {
+ .ops = &hv_synic_syscore_ops,
+};
+
static int __init hv_acpi_init(void)
{
int ret;
@@ -2887,7 +2949,7 @@ static int __init hv_acpi_init(void)
hv_setup_kexec_handler(hv_kexec_handler);
hv_setup_crash_handler(hv_crash_handler);
- register_syscore_ops(&hv_synic_syscore_ops);
+ register_syscore(&hv_synic_syscore);
return 0;
@@ -2901,7 +2963,7 @@ static void __exit vmbus_exit(void)
{
int cpu;
- unregister_syscore_ops(&hv_synic_syscore_ops);
+ unregister_syscore(&hv_synic_syscore);
hv_remove_kexec_handler();
hv_remove_crash_handler();
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 9d28fcf7cd2a..157678b821fc 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -175,7 +175,7 @@ config SENSORS_ADT7X10
select REGMAP
help
This module contains common code shared by the ADT7310/ADT7320 and
- ADT7410/ADT7420 temperature monitoring chip drivers.
+ ADT7410/ADT7420/ADT7422 temperature monitoring chip drivers.
If built as a module, the module will be called adt7x10.
@@ -191,12 +191,12 @@ config SENSORS_ADT7310
will be called adt7310.
config SENSORS_ADT7410
- tristate "Analog Devices ADT7410/ADT7420"
+ tristate "Analog Devices ADT7410/ADT7420/ADT7422"
depends on I2C
select SENSORS_ADT7X10
help
If you say yes here you get support for the Analog Devices
- ADT7410 and ADT7420 temperature monitoring chips.
+ ADT7410, ADT7420 and ADT7422 temperature monitoring chips.
This driver can also be built as a module. If so, the module
will be called adt7410.
@@ -245,12 +245,12 @@ config SENSORS_ADT7475
will be called adt7475.
config SENSORS_AHT10
- tristate "Aosong AHT10, AHT20"
+ tristate "Aosong AHT10, AHT20, DHT20"
depends on I2C
select CRC8
help
- If you say yes here, you get support for the Aosong AHT10 and AHT20
- temperature and humidity sensors
+ If you say yes here, you get support for the Aosong AHT10, AHT20 and
+ DHT20 temperature and humidity sensors
This driver can also be built as a module. If so, the module
will be called aht10.
@@ -769,6 +769,16 @@ config SENSORS_GL520SM
This driver can also be built as a module. If so, the module
will be called gl520sm.
+config SENSORS_GPD
+ tristate "GPD handhelds"
+ depends on X86 && DMI && HAS_IOPORT
+ help
+ If you say yes here you get support for fan readings and
+ control over GPD handheld devices.
+
+ Can also be built as a module. In that case it will be
+ called gpd-fan.
+
config SENSORS_G760A
tristate "GMT G760A"
depends on I2C
@@ -1164,6 +1174,18 @@ config SENSORS_LTQ_CPUTEMP
If you say yes here you get support for the temperature
sensor inside your CPU.
+config SENSORS_MACSMC_HWMON
+ tristate "Apple SMC (Apple Silicon)"
+ depends on MFD_MACSMC && OF
+ help
+ This driver enables hwmon support for current, power, temperature,
+ and voltage sensors, as well as fan speed reporting and control
+ on Apple Silicon devices. Say Y here if you have an Apple Silicon
+ device.
+
+ This driver can also be built as a module. If so, the module will
+ be called macsmc-hwmon.
+
config SENSORS_MAX1111
tristate "Maxim MAX1111 Serial 8-bit ADC chip and compatibles"
depends on SPI_MASTER
@@ -1698,6 +1720,16 @@ config SENSORS_NCT6683
This driver can also be built as a module. If so, the module
will be called nct6683.
+config SENSORS_NCT6694
+ tristate "Nuvoton NCT6694 Hardware Monitor support"
+ depends on MFD_NCT6694
+ help
+ Say Y here to support Nuvoton NCT6694 hardware monitoring
+ functionality.
+
+ This driver can also be built as a module. If so, the module
+ will be called nct6694-hwmon.
+
config SENSORS_NCT6775_CORE
tristate
select REGMAP
@@ -1895,6 +1927,16 @@ config SENSORS_RASPBERRYPI_HWMON
This driver can also be built as a module. If so, the module
will be called raspberrypi-hwmon.
+config SENSORS_SA67MCU
+ tristate "Kontron sa67mcu hardware monitoring driver"
+ depends on MFD_SL28CPLD || COMPILE_TEST
+ help
+ If you say yes here you get support for the voltage and temperature
+ monitor of the sa67 board management controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called sa67mcu-hwmon.
+
config SENSORS_SL28CPLD
tristate "Kontron sl28cpld hardware monitoring driver"
depends on MFD_SL28CPLD || COMPILE_TEST
@@ -1930,8 +1972,8 @@ config SENSORS_SHT21
tristate "Sensiron humidity and temperature sensors. SHT21 and compat."
depends on I2C
help
- If you say yes here you get support for the Sensiron SHT21, SHT25
- humidity and temperature sensors.
+ If you say yes here you get support for the Sensiron SHT20, SHT21,
+ SHT25 humidity and temperature sensors.
This driver can also be built as a module. If so, the module
will be called sht21.
@@ -2252,13 +2294,14 @@ config SENSORS_INA2XX
will be called ina2xx.
config SENSORS_INA238
- tristate "Texas Instruments INA238"
+ tristate "Texas Instruments INA238 and compatibles"
depends on I2C
select REGMAP_I2C
help
- If you say yes here you get support for the INA238 power monitor
- chip. This driver supports voltage, current, power and temperature
- measurements as well as alarm configuration.
+ If you say yes here you get support for INA228, INA237, INA238,
+ INA700, INA780, and SQ52206 power monitor chips. This driver supports
+ voltage, current, power, energy, and temperature measurements as well
+ as alarm configuration.
This driver can also be built as a module. If so, the module
will be called ina238.
@@ -2403,6 +2446,18 @@ config SENSORS_TMP513
This driver can also be built as a module. If so, the module
will be called tmp513.
+config SENSORS_TSC1641
+ tristate "ST Microelectronics TSC1641 Power Monitor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for TSC1641 power monitor chip.
+ The TSC1641 driver is configured for the default configuration of
+ the part except temperature is enabled by default.
+
+ This driver can also be built as a module. If so, the module
+ will be called tsc1641.
+
config SENSORS_VEXPRESS
tristate "Versatile Express"
depends on VEXPRESS_CONFIG
@@ -2673,9 +2728,10 @@ config SENSORS_ASUS_EC
depends on ACPI_EC
help
If you say yes here you get support for the ACPI embedded controller
- hardware monitoring interface found in ASUS motherboards. The driver
- currently supports B550/X570 boards, although other ASUS boards might
- provide this monitoring interface as well.
+ hardware monitoring interface found in some ASUS motherboards. This is
+ where such sensors as water flow and temperature, optional fans, and
+ additional temperature sensors (T_Sensor, chipset temperatures)
+ find themselves.
This driver can also be built as a module. If so, the module
will be called asus_ec_sensors.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index cd8bc4752b4d..eade8e3b1bde 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_SENSORS_GIGABYTE_WATERFORCE) += gigabyte_waterforce.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
obj-$(CONFIG_SENSORS_GSC) += gsc-hwmon.o
+obj-$(CONFIG_SENSORS_GPD) += gpd-fan.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_GXP_FAN_CTRL) += gxp-fan-ctrl.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
@@ -147,6 +148,7 @@ obj-$(CONFIG_SENSORS_LTC4260) += ltc4260.o
obj-$(CONFIG_SENSORS_LTC4261) += ltc4261.o
obj-$(CONFIG_SENSORS_LTC4282) += ltc4282.o
obj-$(CONFIG_SENSORS_LTQ_CPUTEMP) += ltq-cputemp.o
+obj-$(CONFIG_SENSORS_MACSMC_HWMON) += macsmc-hwmon.o
obj-$(CONFIG_SENSORS_MAX1111) += max1111.o
obj-$(CONFIG_SENSORS_MAX127) += max127.o
obj-$(CONFIG_SENSORS_MAX16065) += max16065.o
@@ -174,6 +176,7 @@ obj-$(CONFIG_SENSORS_MLXREG_FAN) += mlxreg-fan.o
obj-$(CONFIG_SENSORS_MENF21BMC_HWMON) += menf21bmc_hwmon.o
obj-$(CONFIG_SENSORS_MR75203) += mr75203.o
obj-$(CONFIG_SENSORS_NCT6683) += nct6683.o
+obj-$(CONFIG_SENSORS_NCT6694) += nct6694-hwmon.o
obj-$(CONFIG_SENSORS_NCT6775_CORE) += nct6775-core.o
nct6775-objs := nct6775-platform.o
obj-$(CONFIG_SENSORS_NCT6775) += nct6775.o
@@ -196,6 +199,7 @@ obj-$(CONFIG_SENSORS_PT5161L) += pt5161l.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
obj-$(CONFIG_SENSORS_QNAP_MCU_HWMON) += qnap-mcu-hwmon.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
+obj-$(CONFIG_SENSORS_SA67MCU) += sa67mcu-hwmon.o
obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o
obj-$(CONFIG_SENSORS_SBRMI) += sbrmi.o
obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o
@@ -230,6 +234,7 @@ obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
obj-$(CONFIG_SENSORS_TMP464) += tmp464.o
obj-$(CONFIG_SENSORS_TMP513) += tmp513.o
+obj-$(CONFIG_SENSORS_TSC1641) += tsc1641.o
obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
obj-$(CONFIG_SENSORS_VIA686A) += via686a.o
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 80d09b017d3b..c38c932e5d2a 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -197,8 +197,16 @@ static int adm1026_scaling[] = { /* .001 Volts */
#define FAN_TO_REG(val, div) ((val) <= 0 ? 0xff : \
clamp_val(1350000 / ((val) * (div)), \
1, 254))
-#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : (val) == 0xff ? 0 : \
- 1350000 / ((val) * (div)))
+
+static int fan_from_reg(int val, int div)
+{
+ if (val == 0)
+ return -1;
+ if (val == 0xff)
+ return 0;
+ return 1350000 / (val * div);
+}
+
#define DIV_FROM_REG(val) (1 << (val))
#define DIV_TO_REG(val) ((val) >= 8 ? 3 : (val) >= 4 ? 2 : (val) >= 2 ? 1 : 0)
@@ -656,7 +664,7 @@ static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
+ return sprintf(buf, "%d\n", fan_from_reg(data->fan[nr],
data->fan_div[nr]));
}
static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
@@ -665,7 +673,7 @@ static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct adm1026_data *data = adm1026_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
+ return sprintf(buf, "%d\n", fan_from_reg(data->fan_min[nr],
data->fan_div[nr]));
}
static ssize_t fan_min_store(struct device *dev,
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 761c13092488..71eea8ae51b9 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -171,14 +171,17 @@ fan_show(struct device *dev, struct device_attribute *devattr, char *buf)
struct adm1029_data *data = adm1029_update_device(dev);
u16 val;
+ mutex_lock(&data->update_lock);
if (data->fan[attr->index] == 0 ||
(data->fan_div[attr->index] & 0xC0) == 0 ||
data->fan[attr->index] == 255) {
+ mutex_unlock(&data->update_lock);
return sprintf(buf, "0\n");
}
val = 1880 * 120 / DIV_FROM_REG(data->fan_div[attr->index])
/ data->fan[attr->index];
+ mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", val);
}
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 6dfbeb6acf00..86f6044b5bd0 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -37,7 +37,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
/* Addresses to scan */
@@ -125,7 +124,6 @@ static inline unsigned int AOUT_FROM_REG(u8 reg)
struct adm9240_data {
struct device *dev;
struct regmap *regmap;
- struct mutex update_lock;
u8 fan_div[2]; /* rw fan1_div, read-only accessor */
u8 vrm; /* -- vrm set on startup, no accessor */
@@ -170,8 +168,6 @@ static int adm9240_fan_min_write(struct adm9240_data *data, int channel, long va
u8 fan_min;
int err;
- mutex_lock(&data->update_lock);
-
if (!val) {
fan_min = 255;
new_div = data->fan_div[channel];
@@ -206,8 +202,6 @@ static int adm9240_fan_min_write(struct adm9240_data *data, int channel, long va
}
err = regmap_write(data->regmap, ADM9240_REG_FAN_MIN(channel), fan_min);
- mutex_unlock(&data->update_lock);
-
return err;
}
@@ -501,23 +495,17 @@ static int adm9240_fan_read(struct device *dev, u32 attr, int channel, long *val
switch (attr) {
case hwmon_fan_input:
- mutex_lock(&data->update_lock);
err = regmap_read(data->regmap, ADM9240_REG_FAN(channel), &regval);
- if (err < 0) {
- mutex_unlock(&data->update_lock);
+ if (err < 0)
return err;
- }
if (regval == 255 && data->fan_div[channel] < 3) {
/* adjust fan clock divider on overflow */
err = adm9240_write_fan_div(data, channel,
++data->fan_div[channel]);
- if (err) {
- mutex_unlock(&data->update_lock);
+ if (err)
return err;
- }
}
*val = FAN_FROM_REG(regval, BIT(data->fan_div[channel]));
- mutex_unlock(&data->update_lock);
break;
case hwmon_fan_div:
*val = BIT(data->fan_div[channel]);
@@ -791,7 +779,6 @@ static int adm9240_probe(struct i2c_client *client)
return -ENOMEM;
data->dev = dev;
- mutex_init(&data->update_lock);
data->regmap = devm_regmap_init_i2c(client, &adm9240_regmap_config);
if (IS_ERR(data->regmap))
return PTR_ERR(data->regmap);
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 3bf0e0a0882c..73b196a78f3a 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -7,6 +7,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
@@ -90,14 +91,24 @@ static int adt7410_i2c_probe(struct i2c_client *client)
static const struct i2c_device_id adt7410_ids[] = {
{ "adt7410" },
{ "adt7420" },
+ { "adt7422" },
{}
};
MODULE_DEVICE_TABLE(i2c, adt7410_ids);
+static const struct of_device_id adt7410_of_match[] = {
+ { .compatible = "adi,adt7410" },
+ { .compatible = "adi,adt7420" },
+ { .compatible = "adi,adt7422" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adt7410_of_match);
+
static struct i2c_driver adt7410_driver = {
.driver = {
.name = "adt7410",
.pm = pm_sleep_ptr(&adt7x10_dev_pm_ops),
+ .of_match_table = adt7410_of_match,
},
.probe = adt7410_i2c_probe,
.id_table = adt7410_ids,
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 08d0effd97f7..b9991a69e6c6 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -11,7 +11,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
@@ -99,8 +98,6 @@ static const u8 adt7411_in_alarm_bits[] = {
};
struct adt7411_data {
- struct mutex device_lock; /* for "atomic" device accesses */
- struct mutex update_lock;
unsigned long next_update;
long vref_cached;
struct i2c_client *client;
@@ -110,55 +107,41 @@ struct adt7411_data {
/*
* When reading a register containing (up to 4) lsb, all associated
* msb-registers get locked by the hardware. After _one_ of those msb is read,
- * _all_ are unlocked. In order to use this locking correctly, reading lsb/msb
- * is protected here with a mutex, too.
+ * _all_ are unlocked.
*/
static int adt7411_read_10_bit(struct i2c_client *client, u8 lsb_reg,
- u8 msb_reg, u8 lsb_shift)
+ u8 msb_reg, u8 lsb_shift)
{
- struct adt7411_data *data = i2c_get_clientdata(client);
int val, tmp;
- mutex_lock(&data->device_lock);
-
val = i2c_smbus_read_byte_data(client, lsb_reg);
if (val < 0)
- goto exit_unlock;
+ return val;
tmp = (val >> lsb_shift) & 3;
val = i2c_smbus_read_byte_data(client, msb_reg);
+ if (val < 0)
+ return val;
- if (val >= 0)
- val = (val << 2) | tmp;
-
- exit_unlock:
- mutex_unlock(&data->device_lock);
-
+ val = (val << 2) | tmp;
return val;
}
static int adt7411_modify_bit(struct i2c_client *client, u8 reg, u8 bit,
- bool flag)
+ bool flag)
{
- struct adt7411_data *data = i2c_get_clientdata(client);
int ret, val;
- mutex_lock(&data->device_lock);
-
ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
- goto exit_unlock;
+ return ret;
if (flag)
val = ret | bit;
else
val = ret & ~bit;
- ret = i2c_smbus_write_byte_data(client, reg, val);
-
- exit_unlock:
- mutex_unlock(&data->device_lock);
- return ret;
+ return i2c_smbus_write_byte_data(client, reg, val);
}
static ssize_t adt7411_show_bit(struct device *dev,
@@ -186,12 +169,11 @@ static ssize_t adt7411_set_bit(struct device *dev,
if (ret || flag > 1)
return -EINVAL;
+ hwmon_lock(dev);
ret = adt7411_modify_bit(client, s_attr2->index, s_attr2->nr, flag);
-
/* force update */
- mutex_lock(&data->update_lock);
data->next_update = jiffies;
- mutex_unlock(&data->update_lock);
+ hwmon_unlock(dev);
return ret < 0 ? ret : count;
}
@@ -294,10 +276,9 @@ static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
int reg, lsb_reg, lsb_shift;
int nr = channel - 1;
- mutex_lock(&data->update_lock);
ret = adt7411_update_vref(dev);
if (ret < 0)
- goto exit_unlock;
+ return ret;
switch (attr) {
case hwmon_in_input:
@@ -307,7 +288,7 @@ static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
ADT7411_REG_EXT_TEMP_AIN1_MSB + nr,
lsb_shift);
if (ret < 0)
- goto exit_unlock;
+ return ret;
*val = ret * data->vref_cached / 1024;
ret = 0;
break;
@@ -318,7 +299,7 @@ static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
: ADT7411_REG_IN_HIGH(channel);
ret = i2c_smbus_read_byte_data(client, reg);
if (ret < 0)
- goto exit_unlock;
+ return ret;
*val = ret * data->vref_cached / 256;
ret = 0;
break;
@@ -329,8 +310,6 @@ static int adt7411_read_in_chan(struct device *dev, u32 attr, int channel,
ret = -EOPNOTSUPP;
break;
}
- exit_unlock:
- mutex_unlock(&data->update_lock);
return ret;
}
@@ -457,10 +436,9 @@ static int adt7411_write_in_chan(struct device *dev, u32 attr, int channel,
struct i2c_client *client = data->client;
int ret, reg;
- mutex_lock(&data->update_lock);
ret = adt7411_update_vref(dev);
if (ret < 0)
- goto exit_unlock;
+ return ret;
val = clamp_val(val, 0, 255 * data->vref_cached / 256);
val = DIV_ROUND_CLOSEST(val * 256, data->vref_cached);
@@ -472,13 +450,10 @@ static int adt7411_write_in_chan(struct device *dev, u32 attr, int channel,
reg = ADT7411_REG_IN_HIGH(channel);
break;
default:
- ret = -EOPNOTSUPP;
- goto exit_unlock;
+ return -EOPNOTSUPP;
}
ret = i2c_smbus_write_byte_data(client, reg, val);
- exit_unlock:
- mutex_unlock(&data->update_lock);
return ret;
}
@@ -679,8 +654,6 @@ static int adt7411_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
data->client = client;
- mutex_init(&data->device_lock);
- mutex_init(&data->update_lock);
ret = adt7411_init_device(data);
if (ret < 0)
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index 2d329391ed3f..d003ee3ebf06 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -15,7 +15,6 @@
#include <linux/jiffies.h>
#include <linux/hwmon.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
@@ -55,7 +54,6 @@
/* Each client has this additional data */
struct adt7x10_data {
struct regmap *regmap;
- struct mutex update_lock;
u8 config;
u8 oldconfig;
bool valid; /* true if temperature valid */
@@ -137,17 +135,13 @@ static int adt7x10_temp_read(struct adt7x10_data *data, int index, long *val)
unsigned int regval;
int ret;
- mutex_lock(&data->update_lock);
if (index == adt7x10_temperature && !data->valid) {
/* wait for valid temperature */
ret = adt7x10_temp_ready(data->regmap);
- if (ret) {
- mutex_unlock(&data->update_lock);
+ if (ret)
return ret;
- }
data->valid = true;
}
- mutex_unlock(&data->update_lock);
ret = regmap_read(data->regmap, ADT7X10_REG_TEMP[index], &regval);
if (ret)
@@ -159,13 +153,8 @@ static int adt7x10_temp_read(struct adt7x10_data *data, int index, long *val)
static int adt7x10_temp_write(struct adt7x10_data *data, int index, long temp)
{
- int ret;
-
- mutex_lock(&data->update_lock);
- ret = regmap_write(data->regmap, ADT7X10_REG_TEMP[index],
- ADT7X10_TEMP_TO_REG(temp));
- mutex_unlock(&data->update_lock);
- return ret;
+ return regmap_write(data->regmap, ADT7X10_REG_TEMP[index],
+ ADT7X10_TEMP_TO_REG(temp));
}
static int adt7x10_hyst_read(struct adt7x10_data *data, int index, long *val)
@@ -197,22 +186,17 @@ static int adt7x10_hyst_write(struct adt7x10_data *data, long hyst)
unsigned int regval;
int limit, ret;
- mutex_lock(&data->update_lock);
-
/* convert absolute hysteresis value to a 4 bit delta value */
ret = regmap_read(data->regmap, ADT7X10_T_ALARM_HIGH, &regval);
if (ret < 0)
- goto abort;
+ return ret;
limit = ADT7X10_REG_TO_TEMP(data, regval);
hyst = clamp_val(hyst, ADT7X10_TEMP_MIN, ADT7X10_TEMP_MAX);
regval = clamp_val(DIV_ROUND_CLOSEST(limit - hyst, 1000), 0,
ADT7X10_T_HYST_MASK);
- ret = regmap_write(data->regmap, ADT7X10_T_HYST, regval);
-abort:
- mutex_unlock(&data->update_lock);
- return ret;
+ return regmap_write(data->regmap, ADT7X10_T_HYST, regval);
}
static int adt7x10_alarm_read(struct adt7x10_data *data, int index, long *val)
@@ -344,7 +328,6 @@ int adt7x10_probe(struct device *dev, const char *name, int irq,
data->regmap = regmap;
dev_set_drvdata(dev, data);
- mutex_init(&data->update_lock);
/* configure as specified */
ret = regmap_read(regmap, ADT7X10_CONFIG, &config);
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
index d1c55e2eb479..007befdba977 100644
--- a/drivers/hwmon/aht10.c
+++ b/drivers/hwmon/aht10.c
@@ -37,6 +37,8 @@
#define AHT10_CMD_MEAS 0b10101100
#define AHT10_CMD_RST 0b10111010
+#define DHT20_CMD_INIT 0x71
+
/*
* Flags in the answer byte/command
*/
@@ -48,11 +50,12 @@
#define AHT10_MAX_POLL_INTERVAL_LEN 30
-enum aht10_variant { aht10, aht20 };
+enum aht10_variant { aht10, aht20, dht20};
static const struct i2c_device_id aht10_id[] = {
{ "aht10", aht10 },
{ "aht20", aht20 },
+ { "dht20", dht20 },
{ },
};
MODULE_DEVICE_TABLE(i2c, aht10_id);
@@ -60,8 +63,6 @@ MODULE_DEVICE_TABLE(i2c, aht10_id);
/**
* struct aht10_data - All the data required to operate an AHT10/AHT20 chip
* @client: the i2c client associated with the AHT10/AHT20
- * @lock: a mutex that is used to prevent parallel access to the
- * i2c client
* @min_poll_interval: the minimum poll interval
* While the poll rate limit is not 100% necessary,
* the datasheet recommends that a measurement
@@ -77,21 +78,18 @@ MODULE_DEVICE_TABLE(i2c, aht10_id);
* AHT10/AHT20
* @crc8: crc8 support flag
* @meas_size: measurements data size
+ * @init_cmd: Initialization command
*/
struct aht10_data {
struct i2c_client *client;
- /*
- * Prevent simultaneous access to the i2c
- * client and previous_poll_time
- */
- struct mutex lock;
ktime_t min_poll_interval;
ktime_t previous_poll_time;
int temperature;
int humidity;
bool crc8;
unsigned int meas_size;
+ u8 init_cmd;
};
/*
@@ -101,13 +99,13 @@ struct aht10_data {
*/
static int aht10_init(struct aht10_data *data)
{
- const u8 cmd_init[] = {AHT10_CMD_INIT, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
+ const u8 cmd_init[] = {data->init_cmd, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
0x00};
int res;
u8 status;
struct i2c_client *client = data->client;
- res = i2c_master_send(client, cmd_init, 3);
+ res = i2c_master_send(client, cmd_init, sizeof(cmd_init));
if (res < 0)
return res;
@@ -168,32 +166,24 @@ static int aht10_read_values(struct aht10_data *data)
u8 raw_data[AHT20_MEAS_SIZE];
struct i2c_client *client = data->client;
- mutex_lock(&data->lock);
- if (!aht10_polltime_expired(data)) {
- mutex_unlock(&data->lock);
+ if (!aht10_polltime_expired(data))
return 0;
- }
res = i2c_master_send(client, cmd_meas, sizeof(cmd_meas));
- if (res < 0) {
- mutex_unlock(&data->lock);
+ if (res < 0)
return res;
- }
usleep_range(AHT10_MEAS_DELAY, AHT10_MEAS_DELAY + AHT10_DELAY_EXTRA);
res = i2c_master_recv(client, raw_data, data->meas_size);
if (res != data->meas_size) {
- mutex_unlock(&data->lock);
if (res >= 0)
return -ENODATA;
return res;
}
- if (data->crc8 && crc8_check(raw_data, data->meas_size)) {
- mutex_unlock(&data->lock);
+ if (data->crc8 && crc8_check(raw_data, data->meas_size))
return -EIO;
- }
hum = ((u32)raw_data[1] << 12u) |
((u32)raw_data[2] << 4u) |
@@ -210,7 +200,6 @@ static int aht10_read_values(struct aht10_data *data)
data->humidity = hum;
data->previous_poll_time = ktime_get_boottime();
- mutex_unlock(&data->lock);
return 0;
}
@@ -352,14 +341,20 @@ static int aht10_probe(struct i2c_client *client)
data->meas_size = AHT20_MEAS_SIZE;
data->crc8 = true;
crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
+ data->init_cmd = AHT10_CMD_INIT;
+ break;
+ case dht20:
+ data->meas_size = AHT20_MEAS_SIZE;
+ data->crc8 = true;
+ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
+ data->init_cmd = DHT20_CMD_INIT;
break;
default:
data->meas_size = AHT10_MEAS_SIZE;
+ data->init_cmd = AHT10_CMD_INIT;
break;
}
- mutex_init(&data->lock);
-
res = aht10_init(data);
if (res < 0)
return res;
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index 0dcb8a3a691d..1ca70e726298 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -20,7 +20,6 @@
#include <linux/jiffies.h>
#include <linux/ktime.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/seq_file.h>
#include <linux/unaligned.h>
@@ -551,7 +550,6 @@ struct aqc_data {
struct hid_device *hdev;
struct device *hwmon_dev;
struct dentry *debugfs;
- struct mutex mutex; /* Used for locking access when reading and writing PWM values */
enum kinds kind;
const char *name;
@@ -662,7 +660,6 @@ static void aqc_delay_ctrl_report(struct aqc_data *priv)
}
}
-/* Expects the mutex to be locked */
static int aqc_get_ctrl_data(struct aqc_data *priv)
{
int ret;
@@ -680,7 +677,6 @@ static int aqc_get_ctrl_data(struct aqc_data *priv)
return ret;
}
-/* Expects the mutex to be locked */
static int aqc_send_ctrl_data(struct aqc_data *priv)
{
int ret;
@@ -721,11 +717,9 @@ static int aqc_get_ctrl_val(struct aqc_data *priv, int offset, long *val, int ty
{
int ret;
- mutex_lock(&priv->mutex);
-
ret = aqc_get_ctrl_data(priv);
if (ret < 0)
- goto unlock_and_return;
+ return ret;
switch (type) {
case AQC_BE16:
@@ -737,9 +731,6 @@ static int aqc_get_ctrl_val(struct aqc_data *priv, int offset, long *val, int ty
default:
ret = -EINVAL;
}
-
-unlock_and_return:
- mutex_unlock(&priv->mutex);
return ret;
}
@@ -747,11 +738,9 @@ static int aqc_set_ctrl_vals(struct aqc_data *priv, int *offsets, long *vals, in
{
int ret, i;
- mutex_lock(&priv->mutex);
-
ret = aqc_get_ctrl_data(priv);
if (ret < 0)
- goto unlock_and_return;
+ return ret;
for (i = 0; i < len; i++) {
switch (types[i]) {
@@ -762,18 +751,11 @@ static int aqc_set_ctrl_vals(struct aqc_data *priv, int *offsets, long *vals, in
priv->buffer[offsets[i]] = (u8)vals[i];
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
}
- if (ret < 0)
- goto unlock_and_return;
-
- ret = aqc_send_ctrl_data(priv);
-
-unlock_and_return:
- mutex_unlock(&priv->mutex);
- return ret;
+ return aqc_send_ctrl_data(priv);
}
static int aqc_set_ctrl_val(struct aqc_data *priv, int offset, long val, int type)
@@ -953,13 +935,11 @@ static int aqc_legacy_read(struct aqc_data *priv)
{
int ret, i, sensor_value;
- mutex_lock(&priv->mutex);
-
memset(priv->buffer, 0x00, priv->buffer_size);
ret = hid_hw_raw_request(priv->hdev, priv->status_report_id, priv->buffer,
priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
if (ret < 0)
- goto unlock_and_return;
+ return ret;
/* Temperature sensor readings */
for (i = 0; i < priv->num_temp_sensors; i++) {
@@ -1020,10 +1000,7 @@ static int aqc_legacy_read(struct aqc_data *priv)
}
priv->updated = jiffies;
-
-unlock_and_return:
- mutex_unlock(&priv->mutex);
- return ret;
+ return 0;
}
static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
@@ -1870,8 +1847,6 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto fail_and_close;
}
- mutex_init(&priv->mutex);
-
priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, priv->name, priv,
&aqc_chip_info, NULL);
diff --git a/drivers/hwmon/aspeed-g6-pwm-tach.c b/drivers/hwmon/aspeed-g6-pwm-tach.c
index 4174b129d1fc..44e1ecba205d 100644
--- a/drivers/hwmon/aspeed-g6-pwm-tach.c
+++ b/drivers/hwmon/aspeed-g6-pwm-tach.c
@@ -528,6 +528,9 @@ static const struct of_device_id aspeed_pwm_tach_match[] = {
{
.compatible = "aspeed,ast2600-pwm-tach",
},
+ {
+ .compatible = "aspeed,ast2700-pwm-tach",
+ },
{},
};
MODULE_DEVICE_TABLE(of, aspeed_pwm_tach_match);
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 4ac554731e98..61b18b88ee8f 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -49,15 +49,19 @@ static char *mutex_path_override;
*/
#define ASUS_EC_MAX_BANK 3
-#define ACPI_LOCK_DELAY_MS 500
+#define ACPI_LOCK_DELAY_MS 800
/* ACPI mutex for locking access to the EC for the firmware */
#define ASUS_HW_ACCESS_MUTEX_ASMX "\\AMW0.ASMX"
#define ASUS_HW_ACCESS_MUTEX_RMTW_ASMX "\\RMTW.ASMX"
+#define ASUS_HW_ACCESS_MUTEX_SB_PC00_LPCB_SIO1_MUT0 "\\_SB.PC00.LPCB.SIO1.MUT0"
+
#define ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0 "\\_SB_.PCI0.SBRG.SIO1.MUT0"
+#define ASUS_HW_ACCESS_MUTEX_SB_PCI0_LPCB_SIO1_MUT0 "\\_SB_.PCI0.LPCB.SIO1.MUT0"
+
#define MAX_IDENTICAL_BOARD_VARIATIONS 3
/* Moniker for the ACPI global lock (':' is not allowed in ASL identifiers) */
@@ -109,16 +113,28 @@ enum ec_sensors {
ec_sensor_temp_t_sensor,
/* VRM temperature [℃] */
ec_sensor_temp_vrm,
+ /* VRM east (right) temperature [℃] */
+ ec_sensor_temp_vrme,
+ /* VRM west (left) temperature [℃] */
+ ec_sensor_temp_vrmw,
/* CPU Core voltage [mV] */
ec_sensor_in_cpu_core,
/* CPU_Opt fan [RPM] */
ec_sensor_fan_cpu_opt,
/* VRM heat sink fan [RPM] */
ec_sensor_fan_vrm_hs,
+ /* VRM east (right) heat sink fan [RPM] */
+ ec_sensor_fan_vrme_hs,
+ /* VRM west (left) heat sink fan [RPM] */
+ ec_sensor_fan_vrmw_hs,
/* Chipset fan [RPM] */
ec_sensor_fan_chipset,
/* Water flow sensor reading [RPM] */
ec_sensor_fan_water_flow,
+ /* USB4 fan [RPM] */
+ ec_sensor_fan_usb4,
+ /* M.2 fan [RPM] */
+ ec_sensor_fan_m2,
/* CPU current [A] */
ec_sensor_curr_cpu,
/* "Water_In" temperature sensor reading [℃] */
@@ -145,11 +161,17 @@ enum ec_sensors {
#define SENSOR_TEMP_MB BIT(ec_sensor_temp_mb)
#define SENSOR_TEMP_T_SENSOR BIT(ec_sensor_temp_t_sensor)
#define SENSOR_TEMP_VRM BIT(ec_sensor_temp_vrm)
+#define SENSOR_TEMP_VRME BIT(ec_sensor_temp_vrme)
+#define SENSOR_TEMP_VRMW BIT(ec_sensor_temp_vrmw)
#define SENSOR_IN_CPU_CORE BIT(ec_sensor_in_cpu_core)
#define SENSOR_FAN_CPU_OPT BIT(ec_sensor_fan_cpu_opt)
#define SENSOR_FAN_VRM_HS BIT(ec_sensor_fan_vrm_hs)
+#define SENSOR_FAN_VRME_HS BIT(ec_sensor_fan_vrme_hs)
+#define SENSOR_FAN_VRMW_HS BIT(ec_sensor_fan_vrmw_hs)
#define SENSOR_FAN_CHIPSET BIT(ec_sensor_fan_chipset)
#define SENSOR_FAN_WATER_FLOW BIT(ec_sensor_fan_water_flow)
+#define SENSOR_FAN_USB4 BIT(ec_sensor_fan_usb4)
+#define SENSOR_FAN_M2 BIT(ec_sensor_fan_m2)
#define SENSOR_CURR_CPU BIT(ec_sensor_curr_cpu)
#define SENSOR_TEMP_WATER_IN BIT(ec_sensor_temp_water_in)
#define SENSOR_TEMP_WATER_OUT BIT(ec_sensor_temp_water_out)
@@ -166,9 +188,13 @@ enum board_family {
family_amd_500_series,
family_amd_600_series,
family_amd_800_series,
+ family_amd_trx_50,
+ family_amd_wrx_90,
+ family_intel_200_series,
family_intel_300_series,
family_intel_400_series,
- family_intel_600_series
+ family_intel_600_series,
+ family_intel_700_series
};
/*
@@ -275,6 +301,46 @@ static const struct ec_sensor_info sensors_family_amd_800[] = {
EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
};
+static const struct ec_sensor_info sensors_family_amd_trx_50[] = {
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x30),
+ [ec_sensor_temp_cpu_package] =
+ EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_temp_vrme] = EC_SENSOR("VRM_E", hwmon_temp, 1, 0x00, 0x33),
+ [ec_sensor_temp_vrmw] = EC_SENSOR("VRM_W", hwmon_temp, 1, 0x00, 0x34),
+ [ec_sensor_fan_cpu_opt] = EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+ [ec_sensor_fan_vrmw_hs] = EC_SENSOR("VRM_E HS", hwmon_fan, 2, 0x00, 0xb4),
+ [ec_sensor_fan_vrme_hs] = EC_SENSOR("VRM_W HS", hwmon_fan, 2, 0x00, 0xbc),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x01, 0x04),
+};
+
+static const struct ec_sensor_info sensors_family_amd_wrx_90[] = {
+ [ec_sensor_temp_cpu_package] =
+ EC_SENSOR("CPU Package", hwmon_temp, 1, 0x00, 0x31),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+ [ec_sensor_fan_vrmw_hs] =
+ EC_SENSOR("VRMW HS", hwmon_fan, 2, 0x00, 0xb4),
+ [ec_sensor_fan_usb4] = EC_SENSOR("USB4", hwmon_fan, 2, 0x00, 0xb6),
+ [ec_sensor_fan_vrme_hs] =
+ EC_SENSOR("VRME HS", hwmon_fan, 2, 0x00, 0xbc),
+ [ec_sensor_fan_m2] = EC_SENSOR("M.2", hwmon_fan, 2, 0x00, 0xbe),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x01, 0x04),
+};
+
+static const struct ec_sensor_info sensors_family_intel_200[] = {
+ [ec_sensor_temp_chipset] =
+ EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
+ [ec_sensor_temp_cpu] = EC_SENSOR("CPU", hwmon_temp, 1, 0x00, 0x3b),
+ [ec_sensor_temp_mb] =
+ EC_SENSOR("Motherboard", hwmon_temp, 1, 0x00, 0x3c),
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x00, 0x3d),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xbc),
+};
+
static const struct ec_sensor_info sensors_family_intel_300[] = {
[ec_sensor_temp_chipset] =
EC_SENSOR("Chipset", hwmon_temp, 1, 0x00, 0x3a),
@@ -323,6 +389,16 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
EC_SENSOR("Water_Block_In", hwmon_temp, 1, 0x01, 0x02),
};
+static const struct ec_sensor_info sensors_family_intel_700[] = {
+ [ec_sensor_temp_t_sensor] =
+ EC_SENSOR("T_Sensor", hwmon_temp, 1, 0x01, 0x09),
+ [ec_sensor_temp_t_sensor_2] =
+ EC_SENSOR("T_Sensor 2", hwmon_temp, 1, 0x01, 0x05),
+ [ec_sensor_temp_vrm] = EC_SENSOR("VRM", hwmon_temp, 1, 0x00, 0x33),
+ [ec_sensor_fan_cpu_opt] =
+ EC_SENSOR("CPU_Opt", hwmon_fan, 2, 0x00, 0xb0),
+};
+
/* Shortcuts for common combinations */
#define SENSOR_SET_TEMP_CHIPSET_CPU_MB \
(SENSOR_TEMP_CHIPSET | SENSOR_TEMP_CPU | SENSOR_TEMP_MB)
@@ -343,6 +419,52 @@ struct ec_board_info {
enum board_family family;
};
+static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_impact = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_x670e_gene = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
+static const struct ec_board_info board_info_crosshair_x670e_hero = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
static const struct ec_board_info board_info_maximus_vi_hero = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR |
@@ -352,6 +474,22 @@ static const struct ec_board_info board_info_maximus_vi_hero = {
.family = family_intel_300_series,
};
+static const struct ec_board_info board_info_maximus_xi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_intel_300_series,
+};
+
+static const struct ec_board_info board_info_maximus_z690_formula = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_SET_TEMP_WATER | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
static const struct ec_board_info board_info_prime_x470_pro = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
@@ -376,6 +514,21 @@ static const struct ec_board_info board_info_prime_x670e_pro_wifi = {
.family = family_amd_600_series,
};
+static const struct ec_board_info board_info_prime_z270_a = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_LPCB_SIO1_MUT0,
+ .family = family_intel_200_series,
+};
+
+static const struct ec_board_info board_info_pro_art_b550_creator = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
@@ -396,16 +549,26 @@ static const struct ec_board_info board_info_pro_art_x870E_creator_wifi = {
.sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
.family = family_amd_800_series,
};
-static const struct ec_board_info board_info_pro_art_b550_creator = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_FAN_CPU_OPT,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
+static const struct ec_board_info board_info_pro_ws_trx50_sage_wifi = {
+ /* Board also has a nct6798 */
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE | SENSOR_TEMP_VRME |
+ SENSOR_TEMP_VRMW | SENSOR_FAN_CPU_OPT | SENSOR_FAN_VRME_HS |
+ SENSOR_FAN_VRMW_HS | SENSOR_TEMP_T_SENSOR,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_amd_trx_50,
+};
+
+static const struct ec_board_info board_info_pro_ws_wrx90e_sage_se = {
+ /* Board also has a nct6798 with 7 more fans and temperatures */
+ .sensors = SENSOR_TEMP_CPU_PACKAGE | SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_USB4 | SENSOR_FAN_M2 |
+ SENSOR_FAN_VRME_HS | SENSOR_FAN_VRMW_HS,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_amd_wrx_90,
};
static const struct ec_board_info board_info_pro_ws_x570_ace = {
@@ -416,83 +579,43 @@ static const struct ec_board_info board_info_pro_ws_x570_ace = {
.family = family_amd_500_series,
};
-static const struct ec_board_info board_info_crosshair_x670e_hero = {
- .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
- SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
- SENSOR_SET_TEMP_WATER,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_600_series,
-};
-
-static const struct ec_board_info board_info_crosshair_x670e_gene = {
- .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_600_series,
-};
-
-static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
+static const struct ec_board_info board_info_strix_b550_e_gaming = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
.family = family_amd_500_series,
};
-static const struct ec_board_info board_info_crosshair_viii_hero = {
+static const struct ec_board_info board_info_strix_b550_i_gaming = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
SENSOR_IN_CPU_CORE,
.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
.family = family_amd_500_series,
};
-static const struct ec_board_info board_info_maximus_xi_hero = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_intel_300_series,
-};
-
-static const struct ec_board_info board_info_maximus_z690_formula = {
- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_SET_TEMP_WATER | SENSOR_FAN_WATER_FLOW,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
- .family = family_intel_600_series,
-};
-
-static const struct ec_board_info board_info_crosshair_viii_impact = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
+static const struct ec_board_info board_info_strix_b650e_i_gaming = {
+ .sensors = SENSOR_TEMP_VRM | SENSOR_TEMP_T_SENSOR |
+ SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_IN_CPU_CORE,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
};
-static const struct ec_board_info board_info_strix_b550_e_gaming = {
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
+static const struct ec_board_info board_info_strix_b850_i_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_800_series,
};
-static const struct ec_board_info board_info_strix_b550_i_gaming = {
+static const struct ec_board_info board_info_strix_x470_i_gaming = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
.mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
+ .family = family_amd_400_series,
};
static const struct ec_board_info board_info_strix_x570_e_gaming = {
@@ -528,6 +651,50 @@ static const struct ec_board_info board_info_strix_x570_i_gaming = {
.family = family_amd_500_series,
};
+static const struct ec_board_info board_info_strix_x670e_e_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_600_series,
+};
+
+static const struct ec_board_info board_info_strix_x670e_i_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
+static const struct ec_board_info board_info_strix_x870_f_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM | SENSOR_TEMP_T_SENSOR,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_800_series,
+};
+
+static const struct ec_board_info board_info_strix_x870_i_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_800_series,
+};
+
+static const struct ec_board_info board_info_strix_x870e_e_gaming_wifi = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_800_series,
+};
+
+static const struct ec_board_info board_info_strix_x870e_h_gaming_wifi7 = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM | SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_800_series,
+};
+
static const struct ec_board_info board_info_strix_z390_f_gaming = {
.sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
SENSOR_TEMP_T_SENSOR |
@@ -554,6 +721,35 @@ static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
.family = family_intel_600_series,
};
+static const struct ec_board_info board_info_strix_z690_e_gaming_wifi = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
+static const struct ec_board_info board_info_strix_z790_e_gaming_wifi_ii = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PC00_LPCB_SIO1_MUT0,
+ .family = family_intel_700_series,
+};
+
+static const struct ec_board_info board_info_strix_z790_i_gaming_wifi = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_T_SENSOR_2 |
+ SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PC00_LPCB_SIO1_MUT0,
+ .family = family_intel_700_series,
+};
+
+static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
+ .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
+ SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_600_series,
+};
+
static const struct ec_board_info board_info_zenith_ii_extreme = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
@@ -566,15 +762,6 @@ static const struct ec_board_info board_info_zenith_ii_extreme = {
.family = family_amd_500_series,
};
-static const struct ec_board_info board_info_tuf_gaming_x670e_plus = {
- .sensors = SENSOR_TEMP_CPU | SENSOR_TEMP_CPU_PACKAGE |
- SENSOR_TEMP_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_WATER_IN | SENSOR_TEMP_WATER_OUT |
- SENSOR_FAN_CPU_OPT,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_600_series,
-};
-
#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
{ \
.matches = { \
@@ -594,14 +781,20 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_prime_x570_pro),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X670E-PRO WIFI",
&board_info_prime_x670e_pro_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME Z270-A",
+ &board_info_prime_z270_a),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt B550-CREATOR",
+ &board_info_pro_art_b550_creator),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
&board_info_pro_art_x570_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X670E-CREATOR WIFI",
&board_info_pro_art_x670E_creator_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X870E-CREATOR WIFI",
&board_info_pro_art_x870E_creator_wifi),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt B550-CREATOR",
- &board_info_pro_art_b550_creator),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS TRX50-SAGE WIFI",
+ &board_info_pro_ws_trx50_sage_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS WRX90E-SAGE SE",
+ &board_info_pro_ws_wrx90e_sage_se),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
&board_info_pro_ws_x570_ace),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO",
@@ -612,22 +805,28 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_crosshair_viii_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
&board_info_crosshair_viii_hero),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR X670E HERO",
- &board_info_crosshair_x670e_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
+ &board_info_crosshair_viii_impact),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR X670E GENE",
&board_info_crosshair_x670e_gene),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR X670E HERO",
+ &board_info_crosshair_x670e_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
&board_info_maximus_xi_hero),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS Z690 FORMULA",
&board_info_maximus_z690_formula),
- DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
- &board_info_crosshair_viii_impact),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
&board_info_strix_b550_e_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING",
&board_info_strix_b550_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B650E-I GAMING WIFI",
+ &board_info_strix_b650e_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B850-I GAMING WIFI",
+ &board_info_strix_b850_i_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X470-I GAMING",
+ &board_info_strix_x470_i_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING",
&board_info_strix_x570_e_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II",
@@ -636,18 +835,38 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_strix_x570_f_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING",
&board_info_strix_x570_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X670E-E GAMING WIFI",
+ &board_info_strix_x670e_e_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X670E-I GAMING WIFI",
+ &board_info_strix_x670e_i_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X870-F GAMING WIFI",
+ &board_info_strix_x870_f_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X870-I GAMING WIFI",
+ &board_info_strix_x870_i_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X870E-E GAMING WIFI",
+ &board_info_strix_x870e_e_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X870E-H GAMING WIFI7",
+ &board_info_strix_x870e_h_gaming_wifi7),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z390-F GAMING",
&board_info_strix_z390_f_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z490-F GAMING",
&board_info_strix_z490_f_gaming),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
&board_info_strix_z690_a_gaming_wifi_d4),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-E GAMING WIFI",
+ &board_info_strix_z690_e_gaming_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z790-E GAMING WIFI II",
+ &board_info_strix_z790_e_gaming_wifi_ii),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z790-I GAMING WIFI",
+ &board_info_strix_z790_i_gaming_wifi),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
&board_info_zenith_ii_extreme),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME ALPHA",
&board_info_zenith_ii_extreme),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("TUF GAMING X670E-PLUS",
&board_info_tuf_gaming_x670e_plus),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("TUF GAMING X670E-PLUS WIFI",
+ &board_info_tuf_gaming_x670e_plus),
{},
};
@@ -1115,6 +1334,15 @@ static int asus_ec_probe(struct platform_device *pdev)
case family_amd_800_series:
ec_data->sensors_info = sensors_family_amd_800;
break;
+ case family_amd_trx_50:
+ ec_data->sensors_info = sensors_family_amd_trx_50;
+ break;
+ case family_amd_wrx_90:
+ ec_data->sensors_info = sensors_family_amd_wrx_90;
+ break;
+ case family_intel_200_series:
+ ec_data->sensors_info = sensors_family_intel_200;
+ break;
case family_intel_300_series:
ec_data->sensors_info = sensors_family_intel_300;
break;
@@ -1124,6 +1352,9 @@ static int asus_ec_probe(struct platform_device *pdev)
case family_intel_600_series:
ec_data->sensors_info = sensors_family_intel_600;
break;
+ case family_intel_700_series:
+ ec_data->sensors_info = sensors_family_intel_700;
+ break;
default:
dev_err(dev, "Unknown board family: %d",
ec_data->board_info->family);
diff --git a/drivers/hwmon/asus_rog_ryujin.c b/drivers/hwmon/asus_rog_ryujin.c
index e5e93a20723c..10a1f5aca988 100644
--- a/drivers/hwmon/asus_rog_ryujin.c
+++ b/drivers/hwmon/asus_rog_ryujin.c
@@ -81,10 +81,6 @@ static const char *const rog_ryujin_speed_label[] = {
struct rog_ryujin_data {
struct hid_device *hdev;
struct device *hwmon_dev;
- /* For locking access to buffer */
- struct mutex buffer_lock;
- /* For queueing multiple readers */
- struct mutex status_report_request_mutex;
/* For reinitializing the completions below */
spinlock_t status_report_request_lock;
struct completion cooler_status_received;
@@ -153,18 +149,10 @@ static umode_t rog_ryujin_is_visible(const void *data,
/* Writes the command to the device with the rest of the report filled with zeroes */
static int rog_ryujin_write_expanded(struct rog_ryujin_data *priv, const u8 *cmd, int cmd_length)
{
- int ret;
-
- mutex_lock(&priv->buffer_lock);
-
memcpy_and_pad(priv->buffer, MAX_REPORT_LENGTH, cmd, cmd_length, 0x00);
- ret = hid_hw_output_report(priv->hdev, priv->buffer, MAX_REPORT_LENGTH);
-
- mutex_unlock(&priv->buffer_lock);
- return ret;
+ return hid_hw_output_report(priv->hdev, priv->buffer, MAX_REPORT_LENGTH);
}
-/* Assumes priv->status_report_request_mutex is locked */
static int rog_ryujin_execute_cmd(struct rog_ryujin_data *priv, const u8 *cmd, int cmd_length,
struct completion *status_completion)
{
@@ -196,14 +184,11 @@ static int rog_ryujin_execute_cmd(struct rog_ryujin_data *priv, const u8 *cmd, i
static int rog_ryujin_get_status(struct rog_ryujin_data *priv)
{
- int ret = mutex_lock_interruptible(&priv->status_report_request_mutex);
-
- if (ret < 0)
- return ret;
+ int ret;
if (!time_after(jiffies, priv->updated + msecs_to_jiffies(STATUS_VALIDITY))) {
/* Data is up to date */
- goto unlock_and_return;
+ return 0;
}
/* Retrieve cooler status */
@@ -211,36 +196,30 @@ static int rog_ryujin_get_status(struct rog_ryujin_data *priv)
rog_ryujin_execute_cmd(priv, get_cooler_status_cmd, GET_CMD_LENGTH,
&priv->cooler_status_received);
if (ret < 0)
- goto unlock_and_return;
+ return ret;
/* Retrieve controller status (speeds) */
ret =
rog_ryujin_execute_cmd(priv, get_controller_speed_cmd, GET_CMD_LENGTH,
&priv->controller_status_received);
if (ret < 0)
- goto unlock_and_return;
+ return ret;
/* Retrieve cooler duty */
ret =
rog_ryujin_execute_cmd(priv, get_cooler_duty_cmd, GET_CMD_LENGTH,
&priv->cooler_duty_received);
if (ret < 0)
- goto unlock_and_return;
+ return ret;
/* Retrieve controller duty */
ret =
rog_ryujin_execute_cmd(priv, get_controller_duty_cmd, GET_CMD_LENGTH,
&priv->controller_duty_received);
if (ret < 0)
- goto unlock_and_return;
-
- priv->updated = jiffies;
-
-unlock_and_return:
- mutex_unlock(&priv->status_report_request_mutex);
- if (ret < 0)
return ret;
+ priv->updated = jiffies;
return 0;
}
@@ -303,14 +282,11 @@ static int rog_ryujin_write_fixed_duty(struct rog_ryujin_data *priv, int channel
* Retrieve cooler duty since both pump and internal fan are set
* together, then write back with one of them modified.
*/
- ret = mutex_lock_interruptible(&priv->status_report_request_mutex);
- if (ret < 0)
- return ret;
ret =
rog_ryujin_execute_cmd(priv, get_cooler_duty_cmd, GET_CMD_LENGTH,
&priv->cooler_duty_received);
if (ret < 0)
- goto unlock_and_return;
+ return ret;
memcpy(set_cmd, set_cooler_duty_cmd, SET_CMD_LENGTH);
@@ -329,11 +305,7 @@ static int rog_ryujin_write_fixed_duty(struct rog_ryujin_data *priv, int channel
set_cmd[RYUJIN_SET_COOLER_FAN_DUTY_OFFSET] = val;
}
- ret = rog_ryujin_execute_cmd(priv, set_cmd, SET_CMD_LENGTH, &priv->cooler_duty_set);
-unlock_and_return:
- mutex_unlock(&priv->status_report_request_mutex);
- if (ret < 0)
- return ret;
+ return rog_ryujin_execute_cmd(priv, set_cmd, SET_CMD_LENGTH, &priv->cooler_duty_set);
} else {
/*
* Controller fan duty (channel == 2). No need to retrieve current
@@ -538,8 +510,6 @@ static int rog_ryujin_probe(struct hid_device *hdev, const struct hid_device_id
goto fail_and_close;
}
- mutex_init(&priv->status_report_request_mutex);
- mutex_init(&priv->buffer_lock);
spin_lock_init(&priv->status_report_request_lock);
init_completion(&priv->cooler_status_received);
init_completion(&priv->controller_status_received);
diff --git a/drivers/hwmon/cgbc-hwmon.c b/drivers/hwmon/cgbc-hwmon.c
index 772f44d56ccf..3aff4e092132 100644
--- a/drivers/hwmon/cgbc-hwmon.c
+++ b/drivers/hwmon/cgbc-hwmon.c
@@ -107,6 +107,9 @@ static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data *
nb_sensors = data[0];
hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL);
+ if (!hwmon->sensors)
+ return -ENOMEM;
+
sensor = hwmon->sensors;
for (i = 0; i < nb_sensors; i++) {
diff --git a/drivers/hwmon/chipcap2.c b/drivers/hwmon/chipcap2.c
index 9d071f7ca9d2..645b8c2e704e 100644
--- a/drivers/hwmon/chipcap2.c
+++ b/drivers/hwmon/chipcap2.c
@@ -81,7 +81,6 @@ struct cc2_data {
struct completion complete;
struct device *hwmon;
struct i2c_client *client;
- struct mutex dev_access_lock; /* device access lock */
struct regulator *regulator;
const char *name;
int irq_ready;
@@ -558,8 +557,6 @@ static int cc2_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
{
struct cc2_data *data = dev_get_drvdata(dev);
- guard(mutex)(&data->dev_access_lock);
-
switch (type) {
case hwmon_temp:
return cc2_measurement(data, type, val);
@@ -600,8 +597,6 @@ static int cc2_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (val < 0 || val > CC2_RH_MAX)
return -EINVAL;
- guard(mutex)(&data->dev_access_lock);
-
switch (attr) {
case hwmon_humidity_min:
cmd = CC2_W_ALARM_L_ON;
@@ -708,8 +703,6 @@ static int cc2_probe(struct i2c_client *client)
i2c_set_clientdata(client, data);
- mutex_init(&data->dev_access_lock);
-
data->client = client;
data->regulator = devm_regulator_get_exclusive(dev, "vdd");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 1b9203b20d70..ad79db5a183e 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -122,29 +122,29 @@ static const struct tjmax tjmax_table[] = {
};
struct tjmax_model {
- u8 model;
- u8 mask;
+ u32 vfm;
+ u8 stepping_mask;
int tjmax;
};
#define ANY 0xff
static const struct tjmax_model tjmax_model_table[] = {
- { 0x1c, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
- { 0x1c, ANY, 90000 }, /* Z5xx, N2xx, possibly others
- * Note: Also matches 230 and 330,
- * which are covered by tjmax_table
- */
- { 0x26, ANY, 90000 }, /* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
- * Note: TjMax for E6xxT is 110C, but CPU type
- * is undetectable by software
- */
- { 0x27, ANY, 90000 }, /* Atom Medfield (Z2460) */
- { 0x35, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */
- { 0x36, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
- * Also matches S12x0 (stepping 9), covered by
- * PCI table
- */
+ { INTEL_ATOM_BONNELL, 10, 100000 }, /* D4xx, K4xx, N4xx, D5xx, K5xx, N5xx */
+ { INTEL_ATOM_BONNELL, ANY, 90000 }, /* Z5xx, N2xx, possibly others
+ * Note: Also matches 230 and 330,
+ * which are covered by tjmax_table
+ */
+ { INTEL_ATOM_BONNELL_MID, ANY, 90000 }, /* Atom Tunnel Creek (Exx), Lincroft (Z6xx)
+ * Note: TjMax for E6xxT is 110C, but CPU type
+ * is undetectable by software
+ */
+ { INTEL_ATOM_SALTWELL_MID, ANY, 90000 }, /* Atom Medfield (Z2460) */
+ { INTEL_ATOM_SALTWELL_TABLET, ANY, 90000 }, /* Atom Clover Trail/Cloverview (Z27x0) */
+ { INTEL_ATOM_SALTWELL, ANY, 100000 }, /* Atom Cedar Trail/Cedarview (N2xxx, D2xxx)
+ * Also matches S12x0 (stepping 9), covered by
+ * PCI table
+ */
};
static bool is_pkg_temp_data(struct temp_data *tdata)
@@ -180,6 +180,11 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
}
pci_dev_put(host_bridge);
+ /*
+ * This is literally looking for "CPU XXX" in the model string.
+ * Not checking it against the model as well. Just purely a
+ * string search.
+ */
for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
if (strstr(c->x86_model_id, tjmax_table[i].id))
return tjmax_table[i].tjmax;
@@ -187,17 +192,18 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
const struct tjmax_model *tm = &tjmax_model_table[i];
- if (c->x86_model == tm->model &&
- (tm->mask == ANY || c->x86_stepping == tm->mask))
+ if (c->x86_vfm == tm->vfm &&
+ (tm->stepping_mask == ANY ||
+ tm->stepping_mask == c->x86_stepping))
return tm->tjmax;
}
/* Early chips have no MSR for TjMax */
- if (c->x86_model == 0xf && c->x86_stepping < 4)
+ if (c->x86_vfm == INTEL_CORE2_MEROM && c->x86_stepping < 4)
usemsr_ee = 0;
- if (c->x86_model > 0xe && usemsr_ee) {
+ if (c->x86_vfm > INTEL_CORE_YONAH && usemsr_ee) {
u8 platform_id;
/*
@@ -211,7 +217,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
"Unable to access MSR 0x17, assuming desktop"
" CPU\n");
usemsr_ee = 0;
- } else if (c->x86_model < 0x17 && !(eax & 0x10000000)) {
+ } else if (c->x86_vfm < INTEL_CORE2_PENRYN &&
+ !(eax & 0x10000000)) {
/*
* Trust bit 28 up to Penryn, I could not find any
* documentation on that; if you happen to know
@@ -226,7 +233,7 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
* Mobile Penryn CPU seems to be platform ID 7 or 5
* (guesswork)
*/
- if (c->x86_model == 0x17 &&
+ if (c->x86_vfm == INTEL_CORE2_PENRYN &&
(platform_id == 5 || platform_id == 7)) {
/*
* If MSR EE bit is set, set it to 90 degrees C,
@@ -258,18 +265,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return tjmax;
}
-static bool cpu_has_tjmax(struct cpuinfo_x86 *c)
-{
- u8 model = c->x86_model;
-
- return model > 0xe &&
- model != 0x1c &&
- model != 0x26 &&
- model != 0x27 &&
- model != 0x35 &&
- model != 0x36;
-}
-
static int get_tjmax(struct temp_data *tdata, struct device *dev)
{
struct cpuinfo_x86 *c = &cpu_data(tdata->cpu);
@@ -287,8 +282,7 @@ static int get_tjmax(struct temp_data *tdata, struct device *dev)
*/
err = rdmsr_safe_on_cpu(tdata->cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (err) {
- if (cpu_has_tjmax(c))
- dev_warn(dev, "Unable to read TjMax from CPU %u\n", tdata->cpu);
+ dev_warn_once(dev, "Unable to read TjMax from CPU %u\n", tdata->cpu);
} else {
val = (eax >> 16) & 0xff;
if (val)
@@ -460,7 +454,7 @@ static int chk_ucode_version(unsigned int cpu)
* Readings might stop update when processor visited too deep sleep,
* fixed for stepping D0 (6EC).
*/
- if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
+ if (c->x86_vfm == INTEL_CORE_YONAH && c->x86_stepping < 0xc && c->microcode < 0x39) {
pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
return -ENODEV;
}
@@ -580,7 +574,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
* MSR_IA32_TEMPERATURE_TARGET register. Atoms don't have the register
* at all.
*/
- if (c->x86_model > 0xe && c->x86_model != 0x1c)
+ if (c->x86_vfm > INTEL_CORE_YONAH && c->x86_vfm != INTEL_ATOM_BONNELL)
if (get_ttarget(tdata, &pdev->dev) >= 0)
tdata->attr_size++;
@@ -793,7 +787,9 @@ static int __init coretemp_init(void)
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
* sensors. We check this bit only, all the early CPUs
- * without thermal sensors will be filtered out.
+ * without thermal sensors will be filtered out. This
+ * includes all the Family 5 and Family 15 (Pentium 4)
+ * models, since they never set the CPUID bit.
*/
if (!x86_match_cpu(coretemp_ids))
return -ENODEV;
diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c
index b7b911f8359c..b6e508e43fa1 100644
--- a/drivers/hwmon/corsair-cpro.c
+++ b/drivers/hwmon/corsair-cpro.c
@@ -40,7 +40,7 @@
#define CTL_GET_TMP 0x11 /*
* send: byte 1 is channel, rest zero
* rcv: returns temp for channel in centi-degree celsius
- * in bytes 1 and 2
+ * in bytes 1 and 2 as a two's complement value
* returns 0x11 in byte 0 if no sensor is connected
*/
#define CTL_GET_VOLT 0x12 /*
@@ -90,10 +90,10 @@ struct ccp_device {
u8 *cmd_buffer;
u8 *buffer;
int buffer_recv_size; /* number of received bytes in buffer */
- int target[6];
+ int target[NUM_FANS];
DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS);
DECLARE_BITMAP(fan_cnct, NUM_FANS);
- char fan_label[6][LABEL_LENGTH];
+ char fan_label[NUM_FANS][LABEL_LENGTH];
u8 firmware_ver[3];
u8 bootloader_ver[2];
};
@@ -258,7 +258,7 @@ static int ccp_read(struct device *dev, enum hwmon_sensor_types type,
ret = get_data(ccp, CTL_GET_TMP, channel, true);
if (ret < 0)
return ret;
- *val = ret * 10;
+ *val = (s16)ret * 10;
return 0;
default:
break;
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 6b5c8f200780..dddbd2463f8d 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -9,11 +9,9 @@
#include <linux/errno.h>
#include <linux/hid.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -124,7 +122,6 @@ struct corsairpsu_data {
struct device *hwmon_dev;
struct dentry *debugfs;
struct completion wait_completion;
- struct mutex lock; /* for locking access to cmd_buffer */
u8 *cmd_buffer;
char vendor[REPLY_SIZE];
char product[REPLY_SIZE];
@@ -220,7 +217,6 @@ static int corsairpsu_request(struct corsairpsu_data *priv, u8 cmd, u8 rail, voi
{
int ret;
- mutex_lock(&priv->lock);
switch (cmd) {
case PSU_CMD_RAIL_VOLTS_HCRIT:
case PSU_CMD_RAIL_VOLTS_LCRIT:
@@ -230,17 +226,13 @@ static int corsairpsu_request(struct corsairpsu_data *priv, u8 cmd, u8 rail, voi
case PSU_CMD_RAIL_WATTS:
ret = corsairpsu_usb_cmd(priv, 2, PSU_CMD_SELECT_RAIL, rail, NULL);
if (ret < 0)
- goto cmd_fail;
+ return ret;
break;
default:
break;
}
- ret = corsairpsu_usb_cmd(priv, 3, cmd, 0, data);
-
-cmd_fail:
- mutex_unlock(&priv->lock);
- return ret;
+ return corsairpsu_usb_cmd(priv, 3, cmd, 0, data);
}
static int corsairpsu_get_value(struct corsairpsu_data *priv, u8 cmd, u8 rail, long *val)
@@ -797,7 +789,6 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id
priv->hdev = hdev;
hid_set_drvdata(hdev, priv);
- mutex_init(&priv->lock);
init_completion(&priv->wait_completion);
hid_device_io_start(hdev);
diff --git a/drivers/hwmon/cros_ec_hwmon.c b/drivers/hwmon/cros_ec_hwmon.c
index 9991c3fa020a..48331703f2f5 100644
--- a/drivers/hwmon/cros_ec_hwmon.c
+++ b/drivers/hwmon/cros_ec_hwmon.c
@@ -7,20 +7,34 @@
#include <linux/device.h>
#include <linux/hwmon.h>
+#include <linux/math.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/thermal.h>
#include <linux/types.h>
#include <linux/units.h>
#define DRV_NAME "cros-ec-hwmon"
+#define CROS_EC_HWMON_PWM_GET_FAN_DUTY_CMD_VERSION 0
+#define CROS_EC_HWMON_PWM_SET_FAN_DUTY_CMD_VERSION 1
+#define CROS_EC_HWMON_THERMAL_AUTO_FAN_CTRL_CMD_VERSION 2
+
struct cros_ec_hwmon_priv {
struct cros_ec_device *cros_ec;
const char *temp_sensor_names[EC_TEMP_SENSOR_ENTRIES + EC_TEMP_SENSOR_B_ENTRIES];
u8 usable_fans;
+ bool fan_control_supported;
+ u8 manual_fans; /* bits to indicate whether the fan is set to manual */
+ u8 manual_fan_pwm[EC_FAN_SPEED_ENTRIES];
+};
+
+struct cros_ec_hwmon_cooling_priv {
+ struct cros_ec_hwmon_priv *hwmon_priv;
+ u8 index;
};
static int cros_ec_hwmon_read_fan_speed(struct cros_ec_device *cros_ec, u8 index, u16 *speed)
@@ -36,6 +50,42 @@ static int cros_ec_hwmon_read_fan_speed(struct cros_ec_device *cros_ec, u8 index
return 0;
}
+static int cros_ec_hwmon_read_pwm_value(struct cros_ec_device *cros_ec, u8 index, u8 *pwm_value)
+{
+ struct ec_params_pwm_get_fan_duty req = {
+ .fan_idx = index,
+ };
+ struct ec_response_pwm_get_fan_duty resp;
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, CROS_EC_HWMON_PWM_GET_FAN_DUTY_CMD_VERSION,
+ EC_CMD_PWM_GET_FAN_DUTY, &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ *pwm_value = (u8)DIV_ROUND_CLOSEST(le32_to_cpu(resp.percent) * 255, 100);
+ return 0;
+}
+
+static int cros_ec_hwmon_read_pwm_enable(struct cros_ec_device *cros_ec, u8 index,
+ u8 *control_method)
+{
+ struct ec_params_auto_fan_ctrl_v2 req = {
+ .cmd = EC_AUTO_FAN_CONTROL_CMD_GET,
+ .fan_idx = index,
+ };
+ struct ec_response_auto_fan_control resp;
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, CROS_EC_HWMON_THERMAL_AUTO_FAN_CTRL_CMD_VERSION,
+ EC_CMD_THERMAL_AUTO_FAN_CTRL, &req, sizeof(req), &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ *control_method = resp.is_auto ? 2 : 1;
+ return 0;
+}
+
static int cros_ec_hwmon_read_temp(struct cros_ec_device *cros_ec, u8 index, u8 *temp)
{
unsigned int offset;
@@ -75,6 +125,8 @@ static int cros_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
{
struct cros_ec_hwmon_priv *priv = dev_get_drvdata(dev);
int ret = -EOPNOTSUPP;
+ u8 control_method;
+ u8 pwm_value;
u16 speed;
u8 temp;
@@ -92,6 +144,17 @@ static int cros_ec_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
if (ret == 0)
*val = cros_ec_hwmon_is_error_fan(speed);
}
+ } else if (type == hwmon_pwm) {
+ if (attr == hwmon_pwm_enable) {
+ ret = cros_ec_hwmon_read_pwm_enable(priv->cros_ec, channel,
+ &control_method);
+ if (ret == 0)
+ *val = control_method;
+ } else if (attr == hwmon_pwm_input) {
+ ret = cros_ec_hwmon_read_pwm_value(priv->cros_ec, channel, &pwm_value);
+ if (ret == 0)
+ *val = pwm_value;
+ }
} else if (type == hwmon_temp) {
if (attr == hwmon_temp_input) {
ret = cros_ec_hwmon_read_temp(priv->cros_ec, channel, &temp);
@@ -124,6 +187,74 @@ static int cros_ec_hwmon_read_string(struct device *dev, enum hwmon_sensor_types
return -EOPNOTSUPP;
}
+static int cros_ec_hwmon_set_fan_pwm_val(struct cros_ec_device *cros_ec, u8 index, u8 val)
+{
+ struct ec_params_pwm_set_fan_duty_v1 req = {
+ .fan_idx = index,
+ .percent = DIV_ROUND_CLOSEST((uint32_t)val * 100, 255),
+ };
+ int ret;
+
+ ret = cros_ec_cmd(cros_ec, CROS_EC_HWMON_PWM_SET_FAN_DUTY_CMD_VERSION,
+ EC_CMD_PWM_SET_FAN_DUTY, &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int cros_ec_hwmon_write_pwm_input(struct cros_ec_device *cros_ec, u8 index, u8 val)
+{
+ u8 control_method;
+ int ret;
+
+ ret = cros_ec_hwmon_read_pwm_enable(cros_ec, index, &control_method);
+ if (ret)
+ return ret;
+ if (control_method != 1)
+ return -EOPNOTSUPP;
+
+ return cros_ec_hwmon_set_fan_pwm_val(cros_ec, index, val);
+}
+
+static int cros_ec_hwmon_write_pwm_enable(struct cros_ec_device *cros_ec, u8 index, u8 val)
+{
+ struct ec_params_auto_fan_ctrl_v2 req = {
+ .fan_idx = index,
+ .cmd = EC_AUTO_FAN_CONTROL_CMD_SET,
+ };
+ int ret;
+
+ /* No CrOS EC supports no fan speed control */
+ if (val == 0)
+ return -EOPNOTSUPP;
+
+ req.set_auto = (val != 1) ? true : false;
+ ret = cros_ec_cmd(cros_ec, CROS_EC_HWMON_THERMAL_AUTO_FAN_CTRL_CMD_VERSION,
+ EC_CMD_THERMAL_AUTO_FAN_CTRL, &req, sizeof(req), NULL, 0);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int cros_ec_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct cros_ec_hwmon_priv *priv = dev_get_drvdata(dev);
+
+ if (type == hwmon_pwm) {
+ switch (attr) {
+ case hwmon_pwm_input:
+ return cros_ec_hwmon_write_pwm_input(priv->cros_ec, channel, val);
+ case hwmon_pwm_enable:
+ return cros_ec_hwmon_write_pwm_enable(priv->cros_ec, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
static umode_t cros_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -132,6 +263,9 @@ static umode_t cros_ec_hwmon_is_visible(const void *data, enum hwmon_sensor_type
if (type == hwmon_fan) {
if (priv->usable_fans & BIT(channel))
return 0444;
+ } else if (type == hwmon_pwm) {
+ if (priv->fan_control_supported && priv->usable_fans & BIT(channel))
+ return 0644;
} else if (type == hwmon_temp) {
if (priv->temp_sensor_names[channel])
return 0444;
@@ -147,6 +281,11 @@ static const struct hwmon_channel_info * const cros_ec_hwmon_info[] = {
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
HWMON_CHANNEL_INFO(temp,
HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_LABEL,
@@ -175,9 +314,46 @@ static const struct hwmon_channel_info * const cros_ec_hwmon_info[] = {
NULL
};
+static int cros_ec_hwmon_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *val)
+{
+ *val = 255;
+ return 0;
+}
+
+static int cros_ec_hwmon_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *val)
+{
+ const struct cros_ec_hwmon_cooling_priv *priv = cdev->devdata;
+ u8 read_val;
+ int ret;
+
+ ret = cros_ec_hwmon_read_pwm_value(priv->hwmon_priv->cros_ec, priv->index, &read_val);
+ if (ret)
+ return ret;
+
+ *val = read_val;
+ return 0;
+}
+
+static int cros_ec_hwmon_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long val)
+{
+ const struct cros_ec_hwmon_cooling_priv *priv = cdev->devdata;
+
+ return cros_ec_hwmon_write_pwm_input(priv->hwmon_priv->cros_ec, priv->index, val);
+}
+
+static const struct thermal_cooling_device_ops cros_ec_thermal_cooling_ops = {
+ .get_max_state = cros_ec_hwmon_cooling_get_max_state,
+ .get_cur_state = cros_ec_hwmon_cooling_get_cur_state,
+ .set_cur_state = cros_ec_hwmon_cooling_set_cur_state,
+};
+
static const struct hwmon_ops cros_ec_hwmon_ops = {
.read = cros_ec_hwmon_read,
.read_string = cros_ec_hwmon_read_string,
+ .write = cros_ec_hwmon_write,
.is_visible = cros_ec_hwmon_is_visible,
};
@@ -233,6 +409,65 @@ static void cros_ec_hwmon_probe_fans(struct cros_ec_hwmon_priv *priv)
}
}
+static inline bool is_cros_ec_cmd_available(struct cros_ec_device *cros_ec,
+ u16 cmd, u8 version)
+{
+ int ret;
+
+ ret = cros_ec_get_cmd_versions(cros_ec, cmd);
+ return ret >= 0 && (ret & EC_VER_MASK(version));
+}
+
+static bool cros_ec_hwmon_probe_fan_control_supported(struct cros_ec_device *cros_ec)
+{
+ return is_cros_ec_cmd_available(cros_ec, EC_CMD_PWM_GET_FAN_DUTY,
+ CROS_EC_HWMON_PWM_GET_FAN_DUTY_CMD_VERSION) &&
+ is_cros_ec_cmd_available(cros_ec, EC_CMD_PWM_SET_FAN_DUTY,
+ CROS_EC_HWMON_PWM_SET_FAN_DUTY_CMD_VERSION) &&
+ is_cros_ec_cmd_available(cros_ec, EC_CMD_THERMAL_AUTO_FAN_CTRL,
+ CROS_EC_HWMON_THERMAL_AUTO_FAN_CTRL_CMD_VERSION);
+}
+
+static void cros_ec_hwmon_register_fan_cooling_devices(struct device *dev,
+ struct cros_ec_hwmon_priv *priv)
+{
+ struct cros_ec_hwmon_cooling_priv *cpriv;
+ struct thermal_cooling_device *cdev;
+ const char *type;
+ size_t i;
+
+ if (!IS_ENABLED(CONFIG_THERMAL))
+ return;
+
+ if (!priv->fan_control_supported)
+ return;
+
+ for (i = 0; i < EC_FAN_SPEED_ENTRIES; i++) {
+ if (!(priv->usable_fans & BIT(i)))
+ continue;
+
+ cpriv = devm_kzalloc(dev, sizeof(*cpriv), GFP_KERNEL);
+ if (!cpriv)
+ continue;
+
+ type = devm_kasprintf(dev, GFP_KERNEL, "%s-fan%zu", dev_name(dev), i);
+ if (!type) {
+ dev_warn(dev, "no memory to compose cooling device type for fan %zu\n", i);
+ continue;
+ }
+
+ cpriv->hwmon_priv = priv;
+ cpriv->index = i;
+ cdev = devm_thermal_of_cooling_device_register(dev, NULL, type, cpriv,
+ &cros_ec_thermal_cooling_ops);
+ if (IS_ERR(cdev)) {
+ dev_warn(dev, "failed to register fan %zu as a cooling device: %pe\n", i,
+ cdev);
+ continue;
+ }
+ }
+}
+
static int cros_ec_hwmon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -259,13 +494,89 @@ static int cros_ec_hwmon_probe(struct platform_device *pdev)
cros_ec_hwmon_probe_temp_sensors(dev, priv, thermal_version);
cros_ec_hwmon_probe_fans(priv);
+ priv->fan_control_supported = cros_ec_hwmon_probe_fan_control_supported(priv->cros_ec);
+ cros_ec_hwmon_register_fan_cooling_devices(dev, priv);
hwmon_dev = devm_hwmon_device_register_with_info(dev, "cros_ec", priv,
&cros_ec_hwmon_chip_info, NULL);
+ platform_set_drvdata(pdev, priv);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
+static int cros_ec_hwmon_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct cros_ec_hwmon_priv *priv = platform_get_drvdata(pdev);
+ u8 control_method;
+ size_t i;
+ int ret;
+
+ if (!priv->fan_control_supported)
+ return 0;
+
+ /* EC sets fan control to auto after suspended, store settings before suspending. */
+ for (i = 0; i < EC_FAN_SPEED_ENTRIES; i++) {
+ if (!(priv->usable_fans & BIT(i)))
+ continue;
+
+ ret = cros_ec_hwmon_read_pwm_enable(priv->cros_ec, i, &control_method);
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get mode setting for fan %zu: %d\n", i,
+ ret);
+ continue;
+ }
+
+ if (control_method != 1) {
+ priv->manual_fans &= ~BIT(i);
+ continue;
+ } else {
+ priv->manual_fans |= BIT(i);
+ }
+
+ ret = cros_ec_hwmon_read_pwm_value(priv->cros_ec, i, &priv->manual_fan_pwm[i]);
+ /*
+ * If storing the value failed, invalidate the stored mode value by setting it
+ * to auto control. EC will automatically switch to auto mode for that fan after
+ * suspended.
+ */
+ if (ret) {
+ dev_warn(&pdev->dev, "failed to get PWM setting for fan %zu: %pe\n", i,
+ ERR_PTR(ret));
+ priv->manual_fans &= ~BIT(i);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static int cros_ec_hwmon_resume(struct platform_device *pdev)
+{
+ const struct cros_ec_hwmon_priv *priv = platform_get_drvdata(pdev);
+ size_t i;
+ int ret;
+
+ if (!priv->fan_control_supported)
+ return 0;
+
+ /* EC sets fan control to auto after suspend, restore to settings before suspend. */
+ for (i = 0; i < EC_FAN_SPEED_ENTRIES; i++) {
+ if (!(priv->manual_fans & BIT(i)))
+ continue;
+
+ /*
+ * Setting fan PWM value to EC will change the mode to manual for that fan in EC as
+ * well, so we do not need to issue a separate fan mode to manual call.
+ */
+ ret = cros_ec_hwmon_set_fan_pwm_val(priv->cros_ec, i, priv->manual_fan_pwm[i]);
+ if (ret)
+ dev_warn(&pdev->dev, "failed to restore settings for fan %zu: %pe\n", i,
+ ERR_PTR(ret));
+ }
+
+ return 0;
+}
+
static const struct platform_device_id cros_ec_hwmon_id[] = {
{ DRV_NAME, 0 },
{}
@@ -274,6 +585,8 @@ static const struct platform_device_id cros_ec_hwmon_id[] = {
static struct platform_driver cros_ec_hwmon_driver = {
.driver.name = DRV_NAME,
.probe = cros_ec_hwmon_probe,
+ .suspend = pm_ptr(cros_ec_hwmon_suspend),
+ .resume = pm_ptr(cros_ec_hwmon_resume),
.id_table = cros_ec_hwmon_id,
};
module_platform_driver(cros_ec_hwmon_driver);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 1e2c8e284001..a34753fc2973 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -24,6 +24,7 @@
#include <linux/init.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
@@ -446,7 +447,6 @@ static int i8k_set_fan(const struct dell_smm_data *data, u8 fan, int speed)
if (disallow_fan_support)
return -EINVAL;
- speed = (speed < 0) ? 0 : ((speed > data->i8k_fan_max) ? data->i8k_fan_max : speed);
regs.ebx = fan | (speed << 8);
return dell_smm_call(data->ops, &regs);
@@ -637,6 +637,8 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
if (copy_from_user(&speed, argp + 1, sizeof(int)))
return -EFAULT;
+ speed = clamp_val(speed, 0, data->i8k_fan_max);
+
mutex_lock(&data->i8k_mutex);
err = i8k_set_fan(data, val, speed);
if (err < 0)
@@ -762,6 +764,13 @@ static int dell_smm_get_cur_state(struct thermal_cooling_device *dev, unsigned l
if (ret < 0)
return ret;
+ /*
+ * A fan state bigger than i8k_fan_max might indicate that
+ * the fan is currently in automatic mode.
+ */
+ if (ret > cdata->data->i8k_fan_max)
+ return -ENODATA;
+
*state = ret;
return 0;
@@ -849,7 +858,14 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
break;
case hwmon_pwm_enable:
- if (auto_fan)
+ if (auto_fan) {
+ /*
+ * The setting affects all fans, so only create a
+ * single attribute for the first fan channel.
+ */
+ if (channel != 0)
+ return 0;
+
/*
* There is no command for retrieve the current status
* from BIOS, and userspace/firmware itself can change
@@ -857,6 +873,10 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
* Thus we can only provide write-only access for now.
*/
return 0200;
+ }
+
+ if (data->fan[channel] && data->i8k_fan_max < I8K_FAN_AUTO)
+ return 0644;
break;
default:
@@ -926,15 +946,29 @@ static int dell_smm_read(struct device *dev, enum hwmon_sensor_types type, u32 a
}
break;
case hwmon_pwm:
+ ret = i8k_get_fan_status(data, channel);
+ if (ret < 0)
+ return ret;
+
switch (attr) {
case hwmon_pwm_input:
- ret = i8k_get_fan_status(data, channel);
- if (ret < 0)
- return ret;
+ /*
+ * A fan state bigger than i8k_fan_max might indicate that
+ * the fan is currently in automatic mode.
+ */
+ if (ret > data->i8k_fan_max)
+ return -ENODATA;
*val = clamp_val(ret * data->i8k_pwm_mult, 0, 255);
return 0;
+ case hwmon_pwm_enable:
+ if (ret == I8K_FAN_AUTO)
+ *val = 2;
+ else
+ *val = 1;
+
+ return 0;
default:
break;
}
@@ -1020,16 +1054,32 @@ static int dell_smm_write(struct device *dev, enum hwmon_sensor_types type, u32
return 0;
case hwmon_pwm_enable:
- if (!val)
- return -EINVAL;
-
- if (val == 1)
+ switch (val) {
+ case 1:
enable = false;
- else
+ break;
+ case 2:
enable = true;
+ break;
+ default:
+ return -EINVAL;
+ }
mutex_lock(&data->i8k_mutex);
- err = i8k_enable_fan_auto_mode(data, enable);
+ if (auto_fan) {
+ err = i8k_enable_fan_auto_mode(data, enable);
+ } else {
+ /*
+ * When putting the fan into manual control mode we have to ensure
+ * that the device does not overheat until the userspace fan control
+ * software takes over. Because of this we set the fan speed to
+ * i8k_fan_max when disabling automatic fan control.
+ */
+ if (enable)
+ err = i8k_set_fan(data, channel, I8K_FAN_AUTO);
+ else
+ err = i8k_set_fan(data, channel, data->i8k_fan_max);
+ }
mutex_unlock(&data->i8k_mutex);
if (err < 0)
@@ -1080,9 +1130,9 @@ static const struct hwmon_channel_info * const dell_smm_info[] = {
),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT,
- HWMON_PWM_INPUT
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE
),
NULL
};
@@ -1281,6 +1331,13 @@ static const struct dmi_system_id i8k_dmi_table[] __initconst = {
},
},
{
+ .ident = "Dell OptiPlex 7040",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7040"),
+ },
+ },
+ {
.ident = "Dell Precision",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1331,7 +1388,6 @@ struct i8k_config_data {
enum i8k_configs {
DELL_LATITUDE_D520,
- DELL_PRECISION_490,
DELL_STUDIO,
DELL_XPS,
};
@@ -1341,10 +1397,6 @@ static const struct i8k_config_data i8k_config_data[] __initconst = {
.fan_mult = 1,
.fan_max = I8K_FAN_TURBO,
},
- [DELL_PRECISION_490] = {
- .fan_mult = 1,
- .fan_max = I8K_FAN_TURBO,
- },
[DELL_STUDIO] = {
.fan_mult = 1,
.fan_max = I8K_FAN_HIGH,
@@ -1365,15 +1417,6 @@ static const struct dmi_system_id i8k_config_dmi_table[] __initconst = {
.driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
},
{
- .ident = "Dell Precision 490",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME,
- "Precision WorkStation 490"),
- },
- .driver_data = (void *)&i8k_config_data[DELL_PRECISION_490],
- },
- {
.ident = "Dell Studio",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -1491,6 +1534,15 @@ static const struct i8k_fan_control_data i8k_fan_control_data[] __initconst = {
static const struct dmi_system_id i8k_whitelist_fan_control[] __initconst = {
{
+ .ident = "Dell G5 5505",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G5 5505"),
+
+ },
+ .driver_data = (void *)&i8k_fan_control_data[I8K_FAN_34A3_35A3],
+ },
+ {
.ident = "Dell Latitude 5480",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 291d91f68646..9c5b021aab86 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -102,7 +102,6 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
@@ -110,7 +109,6 @@
struct drivetemp_data {
struct list_head list; /* list of instantiated devices */
- struct mutex lock; /* protect data buffer accesses */
struct scsi_device *sdev; /* SCSI device */
struct device *dev; /* instantiating device */
struct device *hwdev; /* hardware monitoring device */
@@ -462,9 +460,7 @@ static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp_input:
case hwmon_temp_lowest:
case hwmon_temp_highest:
- mutex_lock(&st->lock);
err = st->get_temp(st, attr, val);
- mutex_unlock(&st->lock);
break;
case hwmon_temp_lcrit:
*val = st->temp_lcrit;
@@ -566,7 +562,6 @@ static int drivetemp_add(struct device *dev)
st->sdev = sdev;
st->dev = dev;
- mutex_init(&st->lock);
if (drivetemp_identify(st)) {
err = -ENODEV;
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index eca33220d34a..ccce948a4306 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -17,7 +17,6 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/sysfs.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/util_macros.h>
@@ -30,7 +29,6 @@ enum emc1403_chip { emc1402, emc1403, emc1404, emc1428 };
struct thermal_data {
enum emc1403_chip chip;
struct regmap *regmap;
- struct mutex mutex;
};
static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -268,8 +266,8 @@ static s8 emc1403_temp_regs_low[][4] = {
},
};
-static int __emc1403_get_temp(struct thermal_data *data, int channel,
- enum emc1403_reg_map map, long *val)
+static int emc1403_get_temp(struct thermal_data *data, int channel,
+ enum emc1403_reg_map map, long *val)
{
unsigned int regvalh;
unsigned int regvall = 0;
@@ -295,38 +293,23 @@ static int __emc1403_get_temp(struct thermal_data *data, int channel,
return 0;
}
-static int emc1403_get_temp(struct thermal_data *data, int channel,
- enum emc1403_reg_map map, long *val)
-{
- int ret;
-
- mutex_lock(&data->mutex);
- ret = __emc1403_get_temp(data, channel, map, val);
- mutex_unlock(&data->mutex);
-
- return ret;
-}
-
static int emc1403_get_hyst(struct thermal_data *data, int channel,
enum emc1403_reg_map map, long *val)
{
int hyst, ret;
long limit;
- mutex_lock(&data->mutex);
- ret = __emc1403_get_temp(data, channel, map, &limit);
+ ret = emc1403_get_temp(data, channel, map, &limit);
if (ret < 0)
- goto unlock;
+ return ret;
ret = regmap_read(data->regmap, 0x21, &hyst);
if (ret < 0)
- goto unlock;
+ return ret;
if (map == temp_min)
*val = limit + hyst * 1000;
else
*val = limit - hyst * 1000;
-unlock:
- mutex_unlock(&data->mutex);
- return ret;
+ return 0;
}
static int emc1403_temp_read(struct thermal_data *data, u32 attr, int channel, long *val)
@@ -451,20 +434,16 @@ static int emc1403_set_hyst(struct thermal_data *data, long val)
else
val = clamp_val(val, 0, 255000);
- mutex_lock(&data->mutex);
- ret = __emc1403_get_temp(data, 0, temp_crit, &limit);
+ ret = emc1403_get_temp(data, 0, temp_crit, &limit);
if (ret < 0)
- goto unlock;
+ return ret;
hyst = limit - val;
if (data->chip == emc1428)
hyst = clamp_val(DIV_ROUND_CLOSEST(hyst, 1000), 0, 127);
else
hyst = clamp_val(DIV_ROUND_CLOSEST(hyst, 1000), 0, 255);
- ret = regmap_write(data->regmap, 0x21, hyst);
-unlock:
- mutex_unlock(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, 0x21, hyst);
}
static int emc1403_set_temp(struct thermal_data *data, int channel,
@@ -478,7 +457,6 @@ static int emc1403_set_temp(struct thermal_data *data, int channel,
regh = emc1403_temp_regs[channel][map];
regl = emc1403_temp_regs_low[channel][map];
- mutex_lock(&data->mutex);
if (regl >= 0) {
if (data->chip == emc1428)
val = clamp_val(val, -128000, 127875);
@@ -487,7 +465,7 @@ static int emc1403_set_temp(struct thermal_data *data, int channel,
regval = DIV_ROUND_CLOSEST(val, 125);
ret = regmap_write(data->regmap, regh, (regval >> 3) & 0xff);
if (ret < 0)
- goto unlock;
+ return ret;
ret = regmap_write(data->regmap, regl, (regval & 0x07) << 5);
} else {
if (data->chip == emc1428)
@@ -497,8 +475,6 @@ static int emc1403_set_temp(struct thermal_data *data, int channel,
regval = DIV_ROUND_CLOSEST(val, 1000);
ret = regmap_write(data->regmap, regh, regval);
}
-unlock:
- mutex_unlock(&data->mutex);
return ret;
}
@@ -695,8 +671,6 @@ static int emc1403_probe(struct i2c_client *client)
if (IS_ERR(data->regmap))
return PTR_ERR(data->regmap);
- mutex_init(&data->mutex);
-
hwmon_dev = devm_hwmon_device_register_with_info(&client->dev,
client->name, data,
&emc1403_chip_info,
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 60eddc7b0270..9b8e925af030 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -277,8 +277,10 @@ fan1_input_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct emc2103_data *data = emc2103_update_device(dev);
int rpm = 0;
+ mutex_lock(&data->update_lock);
if (data->fan_tach != 0)
rpm = (FAN_RPM_FACTOR * data->fan_multiplier) / data->fan_tach;
+ mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", rpm);
}
@@ -363,10 +365,12 @@ fan1_target_show(struct device *dev, struct device_attribute *da, char *buf)
struct emc2103_data *data = emc2103_update_device(dev);
int rpm = 0;
+ mutex_lock(&data->update_lock);
/* high byte of 0xff indicates disabled so return 0 */
if ((data->fan_target != 0) && ((data->fan_target & 0x1fe0) != 0x1fe0))
rpm = (FAN_RPM_FACTOR * data->fan_multiplier)
/ data->fan_target;
+ mutex_unlock(&data->update_lock);
return sprintf(buf, "%d\n", rpm);
}
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
index 60809289f816..ceae96c07ac4 100644
--- a/drivers/hwmon/emc2305.c
+++ b/drivers/hwmon/emc2305.c
@@ -593,10 +593,8 @@ static int emc2305_probe_childs_from_dt(struct device *dev)
for_each_child_of_node(dev->of_node, child) {
if (of_property_present(child, "reg")) {
ret = emc2305_of_parse_pwm_child(dev, child, data);
- if (ret) {
- of_node_put(child);
+ if (ret)
continue;
- }
count++;
}
}
@@ -685,8 +683,10 @@ static int emc2305_probe(struct i2c_client *client)
i = 0;
for_each_child_of_node(dev->of_node, child) {
ret = emc2305_set_single_tz(dev, child, i);
- if (ret != 0)
+ if (ret != 0) {
+ of_node_put(child);
return ret;
+ }
i++;
}
} else {
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index 8aeec16a7a90..08dcc6a7fb62 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -12,7 +12,6 @@
#include <linux/jiffies.h>
#include <linux/math.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
@@ -62,10 +61,6 @@ enum WATCHDOG_RESOLUTION {
struct fts_data {
struct i2c_client *client;
- /* update sensor data lock */
- struct mutex update_lock;
- /* read/write register lock */
- struct mutex access_lock;
unsigned long last_updated; /* in jiffies */
struct watchdog_device wdd;
enum WATCHDOG_RESOLUTION resolution;
@@ -98,21 +93,15 @@ static int fts_read_byte(struct i2c_client *client, unsigned short reg)
{
int ret;
unsigned char page = reg >> 8;
- struct fts_data *data = dev_get_drvdata(&client->dev);
-
- mutex_lock(&data->access_lock);
dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
if (ret < 0)
- goto error;
+ return ret;
reg &= 0xFF;
ret = i2c_smbus_read_byte_data(client, reg);
dev_dbg(&client->dev, "read - reg: 0x%.02x: val: 0x%.02x\n", reg, ret);
-
-error:
- mutex_unlock(&data->access_lock);
return ret;
}
@@ -121,22 +110,16 @@ static int fts_write_byte(struct i2c_client *client, unsigned short reg,
{
int ret;
unsigned char page = reg >> 8;
- struct fts_data *data = dev_get_drvdata(&client->dev);
-
- mutex_lock(&data->access_lock);
dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
if (ret < 0)
- goto error;
+ return ret;
reg &= 0xFF;
dev_dbg(&client->dev,
"write - reg: 0x%.02x: val: 0x%.02x\n", reg, value);
ret = i2c_smbus_write_byte_data(client, reg, value);
-
-error:
- mutex_unlock(&data->access_lock);
return ret;
}
@@ -145,44 +128,40 @@ error:
/*****************************************************************************/
static int fts_update_device(struct fts_data *data)
{
- int i;
- int err = 0;
+ int i, err;
- mutex_lock(&data->update_lock);
if (!time_after(jiffies, data->last_updated + 2 * HZ) && data->valid)
- goto exit;
+ return 0;
err = fts_read_byte(data->client, FTS_DEVICE_STATUS_REG);
if (err < 0)
- goto exit;
+ return err;
data->valid = !!(err & 0x02); /* Data not ready yet */
- if (unlikely(!data->valid)) {
- err = -EAGAIN;
- goto exit;
- }
+ if (unlikely(!data->valid))
+ return -EAGAIN;
err = fts_read_byte(data->client, FTS_FAN_PRESENT_REG);
if (err < 0)
- goto exit;
+ return err;
data->fan_present = err;
err = fts_read_byte(data->client, FTS_FAN_EVENT_REG);
if (err < 0)
- goto exit;
+ return err;
data->fan_alarm = err;
for (i = 0; i < FTS_NO_FAN_SENSORS; i++) {
if (data->fan_present & BIT(i)) {
err = fts_read_byte(data->client, FTS_REG_FAN_INPUT(i));
if (err < 0)
- goto exit;
+ return err;
data->fan_input[i] = err;
err = fts_read_byte(data->client,
FTS_REG_FAN_SOURCE(i));
if (err < 0)
- goto exit;
+ return err;
data->fan_source[i] = err;
} else {
data->fan_input[i] = 0;
@@ -192,27 +171,24 @@ static int fts_update_device(struct fts_data *data)
err = fts_read_byte(data->client, FTS_SENSOR_EVENT_REG);
if (err < 0)
- goto exit;
+ return err;
data->temp_alarm = err;
for (i = 0; i < FTS_NO_TEMP_SENSORS; i++) {
err = fts_read_byte(data->client, FTS_REG_TEMP_INPUT(i));
if (err < 0)
- goto exit;
+ return err;
data->temp_input[i] = err;
}
for (i = 0; i < FTS_NO_VOLT_SENSORS; i++) {
err = fts_read_byte(data->client, FTS_REG_VOLT(i));
if (err < 0)
- goto exit;
+ return err;
data->volt[i] = err;
}
data->last_updated = jiffies;
- err = 0;
-exit:
- mutex_unlock(&data->update_lock);
- return err;
+ return 0;
}
/*****************************************************************************/
@@ -470,18 +446,14 @@ static int fts_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (val)
return -EINVAL;
- mutex_lock(&data->update_lock);
ret = fts_read_byte(data->client, FTS_REG_TEMP_CONTROL(channel));
- if (ret >= 0)
- ret = fts_write_byte(data->client, FTS_REG_TEMP_CONTROL(channel),
- ret | 0x1);
- if (ret >= 0)
- data->valid = false;
-
- mutex_unlock(&data->update_lock);
if (ret < 0)
return ret;
-
+ ret = fts_write_byte(data->client, FTS_REG_TEMP_CONTROL(channel),
+ ret | 0x1);
+ if (ret < 0)
+ return ret;
+ data->valid = false;
return 0;
default:
break;
@@ -493,18 +465,14 @@ static int fts_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (val)
return -EINVAL;
- mutex_lock(&data->update_lock);
ret = fts_read_byte(data->client, FTS_REG_FAN_CONTROL(channel));
- if (ret >= 0)
- ret = fts_write_byte(data->client, FTS_REG_FAN_CONTROL(channel),
- ret | 0x1);
- if (ret >= 0)
- data->valid = false;
-
- mutex_unlock(&data->update_lock);
if (ret < 0)
return ret;
-
+ ret = fts_write_byte(data->client, FTS_REG_FAN_CONTROL(channel),
+ ret | 0x1);
+ if (ret < 0)
+ return ret;
+ data->valid = false;
return 0;
default:
break;
@@ -648,8 +616,6 @@ static int fts_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- mutex_init(&data->update_lock);
- mutex_init(&data->access_lock);
data->client = client;
dev_set_drvdata(&client->dev, data);
diff --git a/drivers/hwmon/gpd-fan.c b/drivers/hwmon/gpd-fan.c
new file mode 100644
index 000000000000..237f496c4862
--- /dev/null
+++ b/drivers/hwmon/gpd-fan.c
@@ -0,0 +1,683 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/* Platform driver for GPD devices that expose fan control via hwmon sysfs.
+ *
+ * Fan control is provided via pwm interface in the range [0-255].
+ * Each model has a different range in the EC, the written value is scaled to
+ * accommodate for that.
+ *
+ * Based on this repo:
+ * https://github.com/Cryolitia/gpd-fan-driver
+ *
+ * Copyright (c) 2024 Cryolitia PukNgae
+ */
+
+#include <linux/dmi.h>
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define DRIVER_NAME "gpdfan"
+#define GPD_PWM_CTR_OFFSET 0x1841
+
+static char *gpd_fan_board = "";
+module_param(gpd_fan_board, charp, 0444);
+
+enum gpd_board {
+ win_mini,
+ win4_6800u,
+ win_max_2,
+ duo,
+};
+
+enum FAN_PWM_ENABLE {
+ DISABLE = 0,
+ MANUAL = 1,
+ AUTOMATIC = 2,
+};
+
+static struct {
+ enum FAN_PWM_ENABLE pwm_enable;
+ u8 pwm_value;
+
+ const struct gpd_fan_drvdata *drvdata;
+} gpd_driver_priv;
+
+struct gpd_fan_drvdata {
+ const char *board_name; // Board name for module param comparison
+ const enum gpd_board board;
+
+ const u8 addr_port;
+ const u8 data_port;
+ const u16 manual_control_enable;
+ const u16 rpm_read;
+ const u16 pwm_write;
+ const u16 pwm_max;
+};
+
+static struct gpd_fan_drvdata gpd_win_mini_drvdata = {
+ .board_name = "win_mini",
+ .board = win_mini,
+
+ .addr_port = 0x4E,
+ .data_port = 0x4F,
+ .manual_control_enable = 0x047A,
+ .rpm_read = 0x0478,
+ .pwm_write = 0x047A,
+ .pwm_max = 244,
+};
+
+static struct gpd_fan_drvdata gpd_duo_drvdata = {
+ .board_name = "duo",
+ .board = duo,
+
+ .addr_port = 0x4E,
+ .data_port = 0x4F,
+ .manual_control_enable = 0x047A,
+ .rpm_read = 0x0478,
+ .pwm_write = 0x047A,
+ .pwm_max = 244,
+};
+
+static struct gpd_fan_drvdata gpd_win4_drvdata = {
+ .board_name = "win4",
+ .board = win4_6800u,
+
+ .addr_port = 0x2E,
+ .data_port = 0x2F,
+ .manual_control_enable = 0xC311,
+ .rpm_read = 0xC880,
+ .pwm_write = 0xC311,
+ .pwm_max = 127,
+};
+
+static struct gpd_fan_drvdata gpd_wm2_drvdata = {
+ .board_name = "wm2",
+ .board = win_max_2,
+
+ .addr_port = 0x4E,
+ .data_port = 0x4F,
+ .manual_control_enable = 0x0275,
+ .rpm_read = 0x0218,
+ .pwm_write = 0x1809,
+ .pwm_max = 184,
+};
+
+static const struct dmi_system_id dmi_table[] = {
+ {
+ // GPD Win Mini
+ // GPD Win Mini with AMD Ryzen 8840U
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1617-01")
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {
+ // GPD Win Mini
+ // GPD Win Mini with AMD Ryzen HX370
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1617-02")
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {
+ // GPD Win Mini
+ // GPD Win Mini with AMD Ryzen HX370
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1617-02-L")
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {
+ // GPD Win 4 with AMD Ryzen 6800U
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1618-04"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Default string"),
+ },
+ .driver_data = &gpd_win4_drvdata,
+ },
+ {
+ // GPD Win 4 with Ryzen 7840U
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1618-04"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Ver. 1.0"),
+ },
+ // Since 7840U, win4 uses the same drvdata as wm2
+ .driver_data = &gpd_wm2_drvdata,
+ },
+ {
+ // GPD Win 4 with Ryzen 7840U (another)
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1618-04"),
+ DMI_MATCH(DMI_BOARD_VERSION, "Ver.1.0"),
+ },
+ .driver_data = &gpd_wm2_drvdata,
+ },
+ {
+ // GPD Win Max 2 with Ryzen 6800U
+ // GPD Win Max 2 2023 with Ryzen 7840U
+ // GPD Win Max 2 2024 with Ryzen 8840U
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+ },
+ .driver_data = &gpd_wm2_drvdata,
+ },
+ {
+ // GPD Win Max 2 with AMD Ryzen HX370
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1619-05"),
+ },
+ .driver_data = &gpd_wm2_drvdata,
+ },
+ {
+ // GPD Duo
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1622-01"),
+ },
+ .driver_data = &gpd_duo_drvdata,
+ },
+ {
+ // GPD Duo (another)
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1622-01-L"),
+ },
+ .driver_data = &gpd_duo_drvdata,
+ },
+ {
+ // GPD Pocket 4
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1628-04"),
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {
+ // GPD Pocket 4 (another)
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "G1628-04-L"),
+ },
+ .driver_data = &gpd_win_mini_drvdata,
+ },
+ {}
+};
+
+static const struct gpd_fan_drvdata *gpd_module_drvdata[] = {
+ &gpd_win_mini_drvdata, &gpd_win4_drvdata, &gpd_wm2_drvdata, NULL
+};
+
+// Helper functions to handle EC read/write
+static void gpd_ecram_read(u16 offset, u8 *val)
+{
+ u16 addr_port = gpd_driver_priv.drvdata->addr_port;
+ u16 data_port = gpd_driver_priv.drvdata->data_port;
+
+ outb(0x2E, addr_port);
+ outb(0x11, data_port);
+ outb(0x2F, addr_port);
+ outb((u8)((offset >> 8) & 0xFF), data_port);
+
+ outb(0x2E, addr_port);
+ outb(0x10, data_port);
+ outb(0x2F, addr_port);
+ outb((u8)(offset & 0xFF), data_port);
+
+ outb(0x2E, addr_port);
+ outb(0x12, data_port);
+ outb(0x2F, addr_port);
+ *val = inb(data_port);
+}
+
+static void gpd_ecram_write(u16 offset, u8 value)
+{
+ u16 addr_port = gpd_driver_priv.drvdata->addr_port;
+ u16 data_port = gpd_driver_priv.drvdata->data_port;
+
+ outb(0x2E, addr_port);
+ outb(0x11, data_port);
+ outb(0x2F, addr_port);
+ outb((u8)((offset >> 8) & 0xFF), data_port);
+
+ outb(0x2E, addr_port);
+ outb(0x10, data_port);
+ outb(0x2F, addr_port);
+ outb((u8)(offset & 0xFF), data_port);
+
+ outb(0x2E, addr_port);
+ outb(0x12, data_port);
+ outb(0x2F, addr_port);
+ outb(value, data_port);
+}
+
+static int gpd_generic_read_rpm(void)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+ u8 high, low;
+
+ gpd_ecram_read(drvdata->rpm_read, &high);
+ gpd_ecram_read(drvdata->rpm_read + 1, &low);
+
+ return (u16)high << 8 | low;
+}
+
+static int gpd_wm2_read_rpm(void)
+{
+ for (u16 pwm_ctr_offset = GPD_PWM_CTR_OFFSET;
+ pwm_ctr_offset <= GPD_PWM_CTR_OFFSET + 2; pwm_ctr_offset++) {
+ u8 PWMCTR;
+
+ gpd_ecram_read(pwm_ctr_offset, &PWMCTR);
+
+ if (PWMCTR != 0xB8)
+ gpd_ecram_write(pwm_ctr_offset, 0xB8);
+ }
+
+ return gpd_generic_read_rpm();
+}
+
+// Read value for fan1_input
+static int gpd_read_rpm(void)
+{
+ switch (gpd_driver_priv.drvdata->board) {
+ case win4_6800u:
+ case win_mini:
+ case duo:
+ return gpd_generic_read_rpm();
+ case win_max_2:
+ return gpd_wm2_read_rpm();
+ }
+
+ return 0;
+}
+
+static int gpd_wm2_read_pwm(void)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+ u8 var;
+
+ gpd_ecram_read(drvdata->pwm_write, &var);
+
+ // Match gpd_generic_write_pwm(u8) below
+ return DIV_ROUND_CLOSEST((var - 1) * 255, (drvdata->pwm_max - 1));
+}
+
+// Read value for pwm1
+static int gpd_read_pwm(void)
+{
+ switch (gpd_driver_priv.drvdata->board) {
+ case win_mini:
+ case duo:
+ case win4_6800u:
+ switch (gpd_driver_priv.pwm_enable) {
+ case DISABLE:
+ return 255;
+ case MANUAL:
+ return gpd_driver_priv.pwm_value;
+ case AUTOMATIC:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case win_max_2:
+ return gpd_wm2_read_pwm();
+ }
+ return 0;
+}
+
+// PWM value's range in EC is 1 - pwm_max, cast 0 - 255 to it.
+static inline u8 gpd_cast_pwm_range(u8 val)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+
+ return DIV_ROUND_CLOSEST(val * (drvdata->pwm_max - 1), 255) + 1;
+}
+
+static void gpd_generic_write_pwm(u8 val)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+ u8 pwm_reg;
+
+ pwm_reg = gpd_cast_pwm_range(val);
+ gpd_ecram_write(drvdata->pwm_write, pwm_reg);
+}
+
+static void gpd_duo_write_pwm(u8 val)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+ u8 pwm_reg;
+
+ pwm_reg = gpd_cast_pwm_range(val);
+ gpd_ecram_write(drvdata->pwm_write, pwm_reg);
+ gpd_ecram_write(drvdata->pwm_write + 1, pwm_reg);
+}
+
+// Write value for pwm1
+static int gpd_write_pwm(u8 val)
+{
+ if (gpd_driver_priv.pwm_enable != MANUAL)
+ return -EPERM;
+
+ switch (gpd_driver_priv.drvdata->board) {
+ case duo:
+ gpd_duo_write_pwm(val);
+ break;
+ case win_mini:
+ case win4_6800u:
+ case win_max_2:
+ gpd_generic_write_pwm(val);
+ break;
+ }
+
+ return 0;
+}
+
+static void gpd_win_mini_set_pwm_enable(enum FAN_PWM_ENABLE pwm_enable)
+{
+ switch (pwm_enable) {
+ case DISABLE:
+ gpd_generic_write_pwm(255);
+ break;
+ case MANUAL:
+ gpd_generic_write_pwm(gpd_driver_priv.pwm_value);
+ break;
+ case AUTOMATIC:
+ gpd_ecram_write(gpd_driver_priv.drvdata->pwm_write, 0);
+ break;
+ }
+}
+
+static void gpd_duo_set_pwm_enable(enum FAN_PWM_ENABLE pwm_enable)
+{
+ switch (pwm_enable) {
+ case DISABLE:
+ gpd_duo_write_pwm(255);
+ break;
+ case MANUAL:
+ gpd_duo_write_pwm(gpd_driver_priv.pwm_value);
+ break;
+ case AUTOMATIC:
+ gpd_ecram_write(gpd_driver_priv.drvdata->pwm_write, 0);
+ break;
+ }
+}
+
+static void gpd_wm2_set_pwm_enable(enum FAN_PWM_ENABLE enable)
+{
+ const struct gpd_fan_drvdata *const drvdata = gpd_driver_priv.drvdata;
+
+ switch (enable) {
+ case DISABLE:
+ gpd_generic_write_pwm(255);
+ gpd_ecram_write(drvdata->manual_control_enable, 1);
+ break;
+ case MANUAL:
+ gpd_generic_write_pwm(gpd_driver_priv.pwm_value);
+ gpd_ecram_write(drvdata->manual_control_enable, 1);
+ break;
+ case AUTOMATIC:
+ gpd_ecram_write(drvdata->manual_control_enable, 0);
+ break;
+ }
+}
+
+// Write value for pwm1_enable
+static void gpd_set_pwm_enable(enum FAN_PWM_ENABLE enable)
+{
+ if (enable == MANUAL)
+ // Set pwm_value to max firstly when switching to manual mode, in
+ // consideration of device safety.
+ gpd_driver_priv.pwm_value = 255;
+
+ switch (gpd_driver_priv.drvdata->board) {
+ case win_mini:
+ case win4_6800u:
+ gpd_win_mini_set_pwm_enable(enable);
+ break;
+ case duo:
+ gpd_duo_set_pwm_enable(enable);
+ break;
+ case win_max_2:
+ gpd_wm2_set_pwm_enable(enable);
+ break;
+ }
+}
+
+static umode_t gpd_fan_hwmon_is_visible(__always_unused const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ __always_unused int channel)
+{
+ if (type == hwmon_fan && attr == hwmon_fan_input) {
+ return 0444;
+ } else if (type == hwmon_pwm) {
+ switch (attr) {
+ case hwmon_pwm_enable:
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static int gpd_fan_hwmon_read(__always_unused struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ __always_unused int channel, long *val)
+{
+ int ret;
+
+ if (type == hwmon_fan) {
+ if (attr == hwmon_fan_input) {
+ ret = gpd_read_rpm();
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ }
+ } else if (type == hwmon_pwm) {
+ switch (attr) {
+ case hwmon_pwm_enable:
+ *val = gpd_driver_priv.pwm_enable;
+ return 0;
+ case hwmon_pwm_input:
+ ret = gpd_read_pwm();
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
+ return 0;
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int gpd_fan_hwmon_write(__always_unused struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ __always_unused int channel, long val)
+{
+ if (type == hwmon_pwm) {
+ switch (attr) {
+ case hwmon_pwm_enable:
+ if (!in_range(val, 0, 3))
+ return -EINVAL;
+
+ gpd_driver_priv.pwm_enable = val;
+
+ gpd_set_pwm_enable(gpd_driver_priv.pwm_enable);
+ return 0;
+ case hwmon_pwm_input:
+ if (!in_range(val, 0, 256))
+ return -EINVAL;
+
+ gpd_driver_priv.pwm_value = val;
+
+ return gpd_write_pwm(val);
+ }
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops gpd_fan_ops = {
+ .is_visible = gpd_fan_hwmon_is_visible,
+ .read = gpd_fan_hwmon_read,
+ .write = gpd_fan_hwmon_write,
+};
+
+static const struct hwmon_channel_info *gpd_fan_hwmon_channel_info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ NULL
+};
+
+static struct hwmon_chip_info gpd_fan_chip_info = {
+ .ops = &gpd_fan_ops,
+ .info = gpd_fan_hwmon_channel_info
+};
+
+static void gpd_win4_init_ec(void)
+{
+ u8 chip_id, chip_ver;
+
+ gpd_ecram_read(0x2000, &chip_id);
+
+ if (chip_id == 0x55) {
+ gpd_ecram_read(0x1060, &chip_ver);
+ gpd_ecram_write(0x1060, chip_ver | 0x80);
+ }
+}
+
+static void gpd_init_ec(void)
+{
+ // The buggy firmware won't initialize EC properly on boot.
+ // Before its initialization, reading RPM will always return 0,
+ // and writing PWM will have no effect.
+ // Initialize it manually on driver load.
+ if (gpd_driver_priv.drvdata->board == win4_6800u)
+ gpd_win4_init_ec();
+}
+
+static int gpd_fan_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct resource *region;
+ const struct resource *res;
+ const struct device *hwdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get platform resource\n");
+
+ region = devm_request_region(dev, res->start,
+ resource_size(res), DRIVER_NAME);
+ if (!region)
+ return dev_err_probe(dev, -EBUSY,
+ "Failed to request region\n");
+
+ hwdev = devm_hwmon_device_register_with_info(dev,
+ DRIVER_NAME,
+ NULL,
+ &gpd_fan_chip_info,
+ NULL);
+ if (IS_ERR(hwdev))
+ return dev_err_probe(dev, PTR_ERR(hwdev),
+ "Failed to register hwmon device\n");
+
+ gpd_init_ec();
+
+ return 0;
+}
+
+static void gpd_fan_remove(__always_unused struct platform_device *pdev)
+{
+ gpd_driver_priv.pwm_enable = AUTOMATIC;
+ gpd_set_pwm_enable(AUTOMATIC);
+}
+
+static struct platform_driver gpd_fan_driver = {
+ .probe = gpd_fan_probe,
+ .remove = gpd_fan_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ },
+};
+
+static struct platform_device *gpd_fan_platform_device;
+
+static int __init gpd_fan_init(void)
+{
+ const struct gpd_fan_drvdata *match = NULL;
+
+ for (const struct gpd_fan_drvdata **p = gpd_module_drvdata; *p; p++) {
+ if (strcmp(gpd_fan_board, (*p)->board_name) == 0) {
+ match = *p;
+ break;
+ }
+ }
+
+ if (!match) {
+ const struct dmi_system_id *dmi_match =
+ dmi_first_match(dmi_table);
+ if (dmi_match)
+ match = dmi_match->driver_data;
+ }
+
+ if (!match)
+ return -ENODEV;
+
+ gpd_driver_priv.pwm_enable = AUTOMATIC;
+ gpd_driver_priv.pwm_value = 255;
+ gpd_driver_priv.drvdata = match;
+
+ struct resource gpd_fan_resources[] = {
+ {
+ .start = match->addr_port,
+ .end = match->data_port,
+ .flags = IORESOURCE_IO,
+ },
+ };
+
+ gpd_fan_platform_device = platform_create_bundle(&gpd_fan_driver,
+ gpd_fan_probe,
+ gpd_fan_resources,
+ 1, NULL, 0);
+
+ if (IS_ERR(gpd_fan_platform_device)) {
+ pr_warn("Failed to create platform device\n");
+ return PTR_ERR(gpd_fan_platform_device);
+ }
+
+ return 0;
+}
+
+static void __exit gpd_fan_exit(void)
+{
+ platform_device_unregister(gpd_fan_platform_device);
+ platform_driver_unregister(&gpd_fan_driver);
+}
+
+MODULE_DEVICE_TABLE(dmi, dmi_table);
+
+module_init(gpd_fan_init);
+module_exit(gpd_fan_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cryolitia PukNgae <cryolitia@uniontech.com>");
+MODULE_DESCRIPTION("GPD Devices fan control driver");
diff --git a/drivers/hwmon/hs3001.c b/drivers/hwmon/hs3001.c
index 24ed3fb9a43a..50c6c15f8b18 100644
--- a/drivers/hwmon/hs3001.c
+++ b/drivers/hwmon/hs3001.c
@@ -42,7 +42,6 @@
struct hs3001_data {
struct i2c_client *client;
- struct mutex i2c_lock; /* lock for sending i2c commands */
u32 wait_time; /* in us */
int temperature; /* in milli degree */
u32 humidity; /* in milli % */
@@ -112,12 +111,9 @@ static int hs3001_read(struct device *dev, enum hwmon_sensor_types type,
struct i2c_client *client = data->client;
int ret;
- mutex_lock(&data->i2c_lock);
ret = i2c_master_send(client, NULL, 0);
- if (ret < 0) {
- mutex_unlock(&data->i2c_lock);
+ if (ret < 0)
return ret;
- }
/*
* Sensor needs some time to process measurement depending on
@@ -126,8 +122,6 @@ static int hs3001_read(struct device *dev, enum hwmon_sensor_types type,
fsleep(data->wait_time);
ret = hs3001_data_fetch_command(client, data);
- mutex_unlock(&data->i2c_lock);
-
if (ret < 0)
return ret;
@@ -211,8 +205,6 @@ static int hs3001_probe(struct i2c_client *client)
data->wait_time = (HS3001_WAKEUP_TIME + HS3001_14BIT_RESOLUTION +
HS3001_14BIT_RESOLUTION);
- mutex_init(&data->i2c_lock);
-
hwmon_dev = devm_hwmon_device_register_with_info(dev,
client->name,
data,
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 1688c210888a..0b4bdcd33c7b 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -19,6 +19,7 @@
#include <linux/kstrtox.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/property.h>
#include <linux/slab.h>
@@ -36,6 +37,7 @@ struct hwmon_device {
const char *label;
struct device dev;
const struct hwmon_chip_info *chip;
+ struct mutex lock;
struct list_head tzdata;
struct attribute_group group;
const struct attribute_group **groups;
@@ -165,6 +167,8 @@ static int hwmon_thermal_get_temp(struct thermal_zone_device *tz, int *temp)
int ret;
long t;
+ guard(mutex)(&hwdev->lock);
+
ret = hwdev->chip->ops->read(tdata->dev, hwmon_temp, hwmon_temp_input,
tdata->index, &t);
if (ret < 0)
@@ -193,6 +197,8 @@ static int hwmon_thermal_set_trips(struct thermal_zone_device *tz, int low, int
if (!info[i])
return 0;
+ guard(mutex)(&hwdev->lock);
+
if (info[i]->config[tdata->index] & HWMON_T_MIN) {
err = chip->ops->write(tdata->dev, hwmon_temp,
hwmon_temp_min, tdata->index, low);
@@ -330,8 +336,6 @@ static int hwmon_attr_base(enum hwmon_sensor_types type)
* attached to an i2c client device.
*/
-static DEFINE_MUTEX(hwmon_pec_mutex);
-
static int hwmon_match_device(struct device *dev, const void *data)
{
return dev->class == &hwmon_class;
@@ -362,17 +366,16 @@ static ssize_t pec_store(struct device *dev, struct device_attribute *devattr,
if (!hdev)
return -ENODEV;
- mutex_lock(&hwmon_pec_mutex);
-
/*
* If there is no write function, we assume that chip specific
* handling is not required.
*/
hwdev = to_hwmon_device(hdev);
+ guard(mutex)(&hwdev->lock);
if (hwdev->chip->ops->write) {
err = hwdev->chip->ops->write(hdev, hwmon_chip, hwmon_chip_pec, 0, val);
if (err && err != -EOPNOTSUPP)
- goto unlock;
+ goto put;
}
if (!val)
@@ -381,8 +384,7 @@ static ssize_t pec_store(struct device *dev, struct device_attribute *devattr,
client->flags |= I2C_CLIENT_PEC;
err = count;
-unlock:
- mutex_unlock(&hwmon_pec_mutex);
+put:
put_device(hdev);
return err;
@@ -426,18 +428,25 @@ static ssize_t hwmon_attr_show(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+ s64 val64;
long val;
int ret;
+ guard(mutex)(&hwdev->lock);
+
ret = hattr->ops->read(dev, hattr->type, hattr->attr, hattr->index,
- &val);
+ (hattr->type == hwmon_energy64) ? (long *)&val64 : &val);
if (ret < 0)
return ret;
+ if (hattr->type != hwmon_energy64)
+ val64 = val;
+
trace_hwmon_attr_show(hattr->index + hwmon_attr_base(hattr->type),
- hattr->name, val);
+ hattr->name, val64);
- return sprintf(buf, "%ld\n", val);
+ return sprintf(buf, "%lld\n", val64);
}
static ssize_t hwmon_attr_show_string(struct device *dev,
@@ -445,10 +454,13 @@ static ssize_t hwmon_attr_show_string(struct device *dev,
char *buf)
{
struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
enum hwmon_sensor_types type = hattr->type;
const char *s;
int ret;
+ guard(mutex)(&hwdev->lock);
+
ret = hattr->ops->read_string(dev, hattr->type, hattr->attr,
hattr->index, &s);
if (ret < 0)
@@ -465,6 +477,7 @@ static ssize_t hwmon_attr_store(struct device *dev,
const char *buf, size_t count)
{
struct hwmon_device_attribute *hattr = to_hwmon_attr(devattr);
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
long val;
int ret;
@@ -472,13 +485,15 @@ static ssize_t hwmon_attr_store(struct device *dev,
if (ret < 0)
return ret;
+ guard(mutex)(&hwdev->lock);
+
ret = hattr->ops->write(dev, hattr->type, hattr->attr, hattr->index,
val);
if (ret < 0)
return ret;
trace_hwmon_attr_store(hattr->index + hwmon_attr_base(hattr->type),
- hattr->name, val);
+ hattr->name, (s64)val);
return count;
}
@@ -734,6 +749,7 @@ static const char * const *__templates[] = {
[hwmon_curr] = hwmon_curr_attr_templates,
[hwmon_power] = hwmon_power_attr_templates,
[hwmon_energy] = hwmon_energy_attr_templates,
+ [hwmon_energy64] = hwmon_energy_attr_templates,
[hwmon_humidity] = hwmon_humidity_attr_templates,
[hwmon_fan] = hwmon_fan_attr_templates,
[hwmon_pwm] = hwmon_pwm_attr_templates,
@@ -747,6 +763,7 @@ static const int __templates_size[] = {
[hwmon_curr] = ARRAY_SIZE(hwmon_curr_attr_templates),
[hwmon_power] = ARRAY_SIZE(hwmon_power_attr_templates),
[hwmon_energy] = ARRAY_SIZE(hwmon_energy_attr_templates),
+ [hwmon_energy64] = ARRAY_SIZE(hwmon_energy_attr_templates),
[hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates),
[hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates),
[hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates),
@@ -785,6 +802,22 @@ int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type,
}
EXPORT_SYMBOL_GPL(hwmon_notify_event);
+void hwmon_lock(struct device *dev)
+{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ mutex_lock(&hwdev->lock);
+}
+EXPORT_SYMBOL_GPL(hwmon_lock);
+
+void hwmon_unlock(struct device *dev)
+{
+ struct hwmon_device *hwdev = to_hwmon_device(dev);
+
+ mutex_unlock(&hwdev->lock);
+}
+EXPORT_SYMBOL_GPL(hwmon_unlock);
+
static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info)
{
int i, n;
@@ -945,6 +978,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
tdev = tdev->parent;
hdev->of_node = tdev ? tdev->of_node : NULL;
hwdev->chip = chip;
+ mutex_init(&hwdev->lock);
dev_set_drvdata(hdev, drvdata);
dev_set_name(hdev, HWMON_ID_FORMAT, id);
err = device_register(hdev);
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
index 2a530da21949..bf006cb272b1 100644
--- a/drivers/hwmon/i5500_temp.c
+++ b/drivers/hwmon/i5500_temp.c
@@ -8,13 +8,10 @@
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/jiffies.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/hwmon.h>
#include <linux/err.h>
-#include <linux/mutex.h>
/* Register definitions from datasheet */
#define REG_TSTHRCATA 0xE2
diff --git a/drivers/hwmon/ina238.c b/drivers/hwmon/ina238.c
index 59a2c8889fa2..ff67b03189f7 100644
--- a/drivers/hwmon/ina238.c
+++ b/drivers/hwmon/ina238.c
@@ -16,8 +16,6 @@
#include <linux/of.h>
#include <linux/regmap.h>
-#include <linux/platform_data/ina2xx.h>
-
/* INA238 register definitions */
#define INA238_CONFIG 0x0
#define INA238_ADC_CONFIG 0x1
@@ -53,7 +51,7 @@
#define INA238_REGISTERS 0x20
-#define INA238_RSHUNT_DEFAULT 10000 /* uOhm */
+#define INA238_RSHUNT_DEFAULT 2500 /* uOhm */
/* Default configuration of device on reset. */
#define INA238_CONFIG_DEFAULT 0
@@ -62,6 +60,7 @@
#define INA238_ADC_CONFIG_DEFAULT 0xfb6a
/* Configure alerts to be based on averaged value (SLOWALERT) */
#define INA238_DIAG_ALERT_DEFAULT 0x2000
+#define INA238_DIAG_ALERT_APOL BIT(12)
/*
* This driver uses a fixed calibration value in order to scale current/power
* based on a fixed shunt resistor value. This allows for conversion within the
@@ -69,46 +68,32 @@
* relative to the shunt resistor value within the driver. This is similar to
* how the ina2xx driver handles current/power scaling.
*
- * The end result of this is that increasing shunt values (from a fixed 20 mOhm
- * shunt) increase the effective current/power accuracy whilst limiting the
- * range and decreasing shunt values decrease the effective accuracy but
- * increase the range.
- *
- * The value of the Current register is calculated given the following:
- * Current (A) = (shunt voltage register * 5) * calibration / 81920
- *
- * The maximum shunt voltage is 163.835 mV (0x7fff, ADC_RANGE = 0, gain = 4).
- * With the maximum current value of 0x7fff and a fixed shunt value results in
- * a calibration value of 16384 (0x4000).
- *
- * 0x7fff = (0x7fff * 5) * calibration / 81920
- * calibration = 0x4000
- *
- * Equivalent calibration is applied for the Power register (maximum value for
- * bus voltage is 102396.875 mV, 0x7fff), where the maximum power that can
- * occur is ~16776192 uW (register value 0x147a8):
- *
- * This scaling means the resulting values for Current and Power registers need
- * to be scaled by the difference between the fixed shunt resistor and the
- * actual shunt resistor:
- *
- * shunt = 0x4000 / (819.2 * 10^6) / 0.001 = 20000 uOhms (with 1mA/lsb)
+ * To achieve the best possible dynamic range, the value of the shunt voltage
+ * register should match the value of the current register. With that, the shunt
+ * voltage of 0x7fff = 32,767 uV = 163,785 uV matches the maximum current,
+ * and no accuracy is lost. Experiments with a real chip show that this is
+ * achieved by setting the SHUNT_CAL register to a value of 0x1000 = 4,096.
+ * Per datasheet,
+ * SHUNT_CAL = 819.2 x 10^6 x CURRENT_LSB x Rshunt
+ * = 819,200,000 x CURRENT_LSB x Rshunt
+ * With SHUNT_CAL set to 4,096, we get
+ * CURRENT_LSB = 4,096 / (819,200,000 x Rshunt)
+ * Assuming an Rshunt value of 5 mOhm, we get
+ * CURRENT_LSB = 4,096 / (819,200,000 x 0.005) = 1mA
+ * and thus a dynamic range of 1mA ... 32,767mA, which is sufficient for most
+ * applications. The actual dynamic range is of course determined by the actual
+ * shunt resistor value.
*
- * Current (mA) = register value * 20000 / rshunt / 4 * gain
- * Power (mW) = 0.2 * register value * 20000 / rshunt / 4 * gain
- * (Specific for SQ52206)
- * Power (mW) = 0.24 * register value * 20000 / rshunt / 4 * gain
- * Energy (uJ) = 16 * 0.24 * register value * 20000 / rshunt / 4 * gain * 1000
+ * Power and energy values are scaled accordingly.
*/
-#define INA238_CALIBRATION_VALUE 16384
-#define INA238_FIXED_SHUNT 20000
+#define INA238_CALIBRATION_VALUE 4096
+#define INA238_FIXED_SHUNT 5000
-#define INA238_SHUNT_VOLTAGE_LSB 5 /* 5 uV/lsb */
-#define INA238_BUS_VOLTAGE_LSB 3125 /* 3.125 mV/lsb */
-#define INA238_DIE_TEMP_LSB 1250000 /* 125.0000 mC/lsb */
-#define SQ52206_BUS_VOLTAGE_LSB 3750 /* 3.75 mV/lsb */
-#define SQ52206_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
-#define INA228_DIE_TEMP_LSB 78125 /* 7.8125 mC/lsb */
+#define INA238_SHUNT_VOLTAGE_LSB 5000 /* 5 uV/lsb, in nV */
+#define INA238_BUS_VOLTAGE_LSB 3125000 /* 3.125 mV/lsb, in nV */
+#define SQ52206_BUS_VOLTAGE_LSB 3750000 /* 3.75 mV/lsb, in nV */
+
+#define NUNIT_PER_MUNIT 1000000 /* n[AV] -> m[AV] */
static const struct regmap_config ina238_regmap_config = {
.max_register = INA238_REGISTERS,
@@ -116,68 +101,87 @@ static const struct regmap_config ina238_regmap_config = {
.val_bits = 16,
};
-enum ina238_ids { ina238, ina237, sq52206, ina228 };
+enum ina238_ids { ina228, ina237, ina238, ina700, ina780, sq52206 };
struct ina238_config {
bool has_20bit_voltage_current; /* vshunt, vbus and current are 20-bit fields */
bool has_power_highest; /* chip detection power peak */
bool has_energy; /* chip detection energy */
- u8 temp_shift; /* fixed parameters for temp calculate */
- u32 power_calculate_factor; /* fixed parameters for power calculate */
+ u8 temp_resolution; /* temperature register resolution in bit */
u16 config_default; /* Power-on default state */
- int bus_voltage_lsb; /* use for temperature calculate, uV/lsb */
- int temp_lsb; /* use for temperature calculate */
+ u32 power_calculate_factor; /* fixed parameter for power calculation, from datasheet */
+ u32 bus_voltage_lsb; /* bus voltage LSB, in nV */
+ int current_lsb; /* current LSB, in uA */
};
struct ina238_data {
const struct ina238_config *config;
struct i2c_client *client;
- struct mutex config_lock;
struct regmap *regmap;
u32 rshunt;
int gain;
+ u32 voltage_lsb[2]; /* shunt, bus voltage LSB, in nV */
+ int current_lsb; /* current LSB, in uA */
+ int power_lsb; /* power LSB, in uW */
+ int energy_lsb; /* energy LSB, in uJ */
};
static const struct ina238_config ina238_config[] = {
- [ina238] = {
+ [ina228] = {
+ .has_20bit_voltage_current = true,
+ .has_energy = true,
+ .has_power_highest = false,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_resolution = 16,
+ },
+ [ina237] = {
.has_20bit_voltage_current = false,
.has_energy = false,
.has_power_highest = false,
- .temp_shift = 4,
.power_calculate_factor = 20,
.config_default = INA238_CONFIG_DEFAULT,
.bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
- .temp_lsb = INA238_DIE_TEMP_LSB,
+ .temp_resolution = 12,
},
- [ina237] = {
+ [ina238] = {
.has_20bit_voltage_current = false,
.has_energy = false,
.has_power_highest = false,
- .temp_shift = 4,
.power_calculate_factor = 20,
.config_default = INA238_CONFIG_DEFAULT,
.bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
- .temp_lsb = INA238_DIE_TEMP_LSB,
+ .temp_resolution = 12,
},
- [sq52206] = {
+ [ina700] = {
.has_20bit_voltage_current = false,
.has_energy = true,
- .has_power_highest = true,
- .temp_shift = 0,
- .power_calculate_factor = 24,
- .config_default = SQ52206_CONFIG_DEFAULT,
- .bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
- .temp_lsb = SQ52206_DIE_TEMP_LSB,
+ .has_power_highest = false,
+ .power_calculate_factor = 20,
+ .config_default = INA238_CONFIG_DEFAULT,
+ .bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
+ .temp_resolution = 12,
+ .current_lsb = 480,
},
- [ina228] = {
- .has_20bit_voltage_current = true,
+ [ina780] = {
+ .has_20bit_voltage_current = false,
.has_energy = true,
.has_power_highest = false,
- .temp_shift = 0,
.power_calculate_factor = 20,
.config_default = INA238_CONFIG_DEFAULT,
.bus_voltage_lsb = INA238_BUS_VOLTAGE_LSB,
- .temp_lsb = INA228_DIE_TEMP_LSB,
+ .temp_resolution = 12,
+ .current_lsb = 2400,
+ },
+ [sq52206] = {
+ .has_20bit_voltage_current = false,
+ .has_energy = true,
+ .has_power_highest = true,
+ .power_calculate_factor = 24,
+ .config_default = SQ52206_CONFIG_DEFAULT,
+ .bus_voltage_lsb = SQ52206_BUS_VOLTAGE_LSB,
+ .temp_resolution = 16,
},
};
@@ -232,45 +236,28 @@ static int ina238_read_field_s20(const struct i2c_client *client, u8 reg, s32 *v
return 0;
}
-static int ina228_read_shunt_voltage(struct device *dev, u32 attr, int channel,
- long *val)
-{
- struct ina238_data *data = dev_get_drvdata(dev);
- int regval;
- int err;
-
- err = ina238_read_field_s20(data->client, INA238_SHUNT_VOLTAGE, &regval);
- if (err)
- return err;
-
- /*
- * gain of 1 -> LSB / 4
- * This field has 16 bit on ina238. ina228 adds another 4 bits of
- * precision. ina238 conversion factors can still be applied when
- * dividing by 16.
- */
- *val = (regval * INA238_SHUNT_VOLTAGE_LSB) * data->gain / (1000 * 4) / 16;
- return 0;
-}
-
-static int ina228_read_bus_voltage(struct device *dev, u32 attr, int channel,
- long *val)
+static int ina228_read_voltage(struct ina238_data *data, int channel, long *val)
{
- struct ina238_data *data = dev_get_drvdata(dev);
- int regval;
- int err;
+ int reg = channel ? INA238_BUS_VOLTAGE : INA238_CURRENT;
+ u32 lsb = data->voltage_lsb[channel];
+ u32 factor = NUNIT_PER_MUNIT;
+ int err, regval;
- err = ina238_read_field_s20(data->client, INA238_BUS_VOLTAGE, &regval);
- if (err)
- return err;
+ if (data->config->has_20bit_voltage_current) {
+ err = ina238_read_field_s20(data->client, reg, &regval);
+ if (err)
+ return err;
+ /* Adjust accuracy: LSB in units of 500 pV */
+ lsb /= 8;
+ factor *= 2;
+ } else {
+ err = regmap_read(data->regmap, reg, &regval);
+ if (err)
+ return err;
+ regval = (s16)regval;
+ }
- /*
- * gain of 1 -> LSB / 4
- * This field has 16 bit on ina238. ina228 adds another 4 bits of
- * precision. ina238 conversion factors can still be applied when
- * dividing by 16.
- */
- *val = (regval * data->config->bus_voltage_lsb) / 1000 / 16;
+ *val = DIV_S64_ROUND_CLOSEST((s64)regval * lsb, factor);
return 0;
}
@@ -278,18 +265,16 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
long *val)
{
struct ina238_data *data = dev_get_drvdata(dev);
- int reg, mask;
+ int reg, mask = 0;
int regval;
int err;
+ if (attr == hwmon_in_input)
+ return ina228_read_voltage(data, channel, val);
+
switch (channel) {
case 0:
switch (attr) {
- case hwmon_in_input:
- if (data->config->has_20bit_voltage_current)
- return ina228_read_shunt_voltage(dev, attr, channel, val);
- reg = INA238_SHUNT_VOLTAGE;
- break;
case hwmon_in_max:
reg = INA238_SHUNT_OVER_VOLTAGE;
break;
@@ -310,11 +295,6 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
break;
case 1:
switch (attr) {
- case hwmon_in_input:
- if (data->config->has_20bit_voltage_current)
- return ina228_read_bus_voltage(dev, attr, channel, val);
- reg = INA238_BUS_VOLTAGE;
- break;
case hwmon_in_max:
reg = INA238_BUS_OVER_VOLTAGE;
break;
@@ -341,112 +321,126 @@ static int ina238_read_in(struct device *dev, u32 attr, int channel,
if (err < 0)
return err;
- switch (attr) {
- case hwmon_in_input:
- case hwmon_in_max:
- case hwmon_in_min:
- /* signed register, value in mV */
- regval = (s16)regval;
- if (channel == 0)
- /* gain of 1 -> LSB / 4 */
- *val = (regval * INA238_SHUNT_VOLTAGE_LSB) *
- data->gain / (1000 * 4);
- else
- *val = (regval * data->config->bus_voltage_lsb) / 1000;
- break;
- case hwmon_in_max_alarm:
- case hwmon_in_min_alarm:
+ if (mask)
*val = !!(regval & mask);
- break;
- }
+ else
+ *val = DIV_S64_ROUND_CLOSEST((s64)(s16)regval * data->voltage_lsb[channel],
+ NUNIT_PER_MUNIT);
return 0;
}
-static int ina238_write_in(struct device *dev, u32 attr, int channel,
- long val)
+static int ina238_write_in(struct device *dev, u32 attr, int channel, long val)
{
struct ina238_data *data = dev_get_drvdata(dev);
+ static const int low_limits[2] = {-164, 0};
+ static const int high_limits[2] = {164, 150000};
+ static const u8 low_regs[2] = {INA238_SHUNT_UNDER_VOLTAGE, INA238_BUS_UNDER_VOLTAGE};
+ static const u8 high_regs[2] = {INA238_SHUNT_OVER_VOLTAGE, INA238_BUS_OVER_VOLTAGE};
int regval;
- if (attr != hwmon_in_max && attr != hwmon_in_min)
- return -EOPNOTSUPP;
-
- /* convert decimal to register value */
- switch (channel) {
- case 0:
- /* signed value, clamp to max range +/-163 mV */
- regval = clamp_val(val, -163, 163);
- regval = (regval * 1000 * 4) /
- (INA238_SHUNT_VOLTAGE_LSB * data->gain);
- regval = clamp_val(regval, S16_MIN, S16_MAX) & 0xffff;
-
- switch (attr) {
- case hwmon_in_max:
- return regmap_write(data->regmap,
- INA238_SHUNT_OVER_VOLTAGE, regval);
- case hwmon_in_min:
- return regmap_write(data->regmap,
- INA238_SHUNT_UNDER_VOLTAGE, regval);
- default:
- return -EOPNOTSUPP;
- }
- case 1:
- /* signed value, positive values only. Clamp to max 102.396 V */
- regval = clamp_val(val, 0, 102396);
- regval = (regval * 1000) / data->config->bus_voltage_lsb;
- regval = clamp_val(regval, 0, S16_MAX);
+ /* Initial clamp to avoid overflows */
+ val = clamp_val(val, low_limits[channel], high_limits[channel]);
+ val = DIV_S64_ROUND_CLOSEST((s64)val * NUNIT_PER_MUNIT, data->voltage_lsb[channel]);
+ /* Final clamp to register limits */
+ regval = clamp_val(val, S16_MIN, S16_MAX) & 0xffff;
- switch (attr) {
- case hwmon_in_max:
- return regmap_write(data->regmap,
- INA238_BUS_OVER_VOLTAGE, regval);
- case hwmon_in_min:
- return regmap_write(data->regmap,
- INA238_BUS_UNDER_VOLTAGE, regval);
- default:
- return -EOPNOTSUPP;
- }
+ switch (attr) {
+ case hwmon_in_min:
+ return regmap_write(data->regmap, low_regs[channel], regval);
+ case hwmon_in_max:
+ return regmap_write(data->regmap, high_regs[channel], regval);
default:
return -EOPNOTSUPP;
}
}
-static int ina238_read_current(struct device *dev, u32 attr, long *val)
+static int __ina238_read_curr(struct ina238_data *data, long *val)
+{
+ u32 lsb = data->current_lsb;
+ int err, regval;
+
+ if (data->config->has_20bit_voltage_current) {
+ err = ina238_read_field_s20(data->client, INA238_CURRENT, &regval);
+ if (err)
+ return err;
+ lsb /= 16; /* Adjust accuracy */
+ } else {
+ err = regmap_read(data->regmap, INA238_CURRENT, &regval);
+ if (err)
+ return err;
+ regval = (s16)regval;
+ }
+
+ *val = DIV_S64_ROUND_CLOSEST((s64)regval * lsb, 1000);
+ return 0;
+}
+
+static int ina238_read_curr(struct device *dev, u32 attr, long *val)
{
struct ina238_data *data = dev_get_drvdata(dev);
+ int reg, mask = 0;
int regval;
int err;
- switch (attr) {
- case hwmon_curr_input:
- if (data->config->has_20bit_voltage_current) {
- err = ina238_read_field_s20(data->client, INA238_CURRENT, &regval);
- if (err)
- return err;
- } else {
- err = regmap_read(data->regmap, INA238_CURRENT, &regval);
- if (err < 0)
- return err;
- /* sign-extend */
- regval = (s16)regval;
- }
-
- /* Signed register, fixed 1mA current lsb. result in mA */
- *val = div_s64((s64)regval * INA238_FIXED_SHUNT * data->gain,
- data->rshunt * 4);
+ if (attr == hwmon_curr_input)
+ return __ina238_read_curr(data, val);
- /* Account for 4 bit offset */
- if (data->config->has_20bit_voltage_current)
- *val /= 16;
+ switch (attr) {
+ case hwmon_curr_min:
+ reg = INA238_SHUNT_UNDER_VOLTAGE;
+ break;
+ case hwmon_curr_min_alarm:
+ reg = INA238_DIAG_ALERT;
+ mask = INA238_DIAG_ALERT_SHNTUL;
+ break;
+ case hwmon_curr_max:
+ reg = INA238_SHUNT_OVER_VOLTAGE;
+ break;
+ case hwmon_curr_max_alarm:
+ reg = INA238_DIAG_ALERT;
+ mask = INA238_DIAG_ALERT_SHNTOL;
break;
default:
return -EOPNOTSUPP;
}
+ err = regmap_read(data->regmap, reg, &regval);
+ if (err < 0)
+ return err;
+
+ if (mask)
+ *val = !!(regval & mask);
+ else
+ *val = DIV_S64_ROUND_CLOSEST((s64)(s16)regval * data->current_lsb, 1000);
+
return 0;
}
+static int ina238_write_curr(struct device *dev, u32 attr, long val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+
+ /* Set baseline range to avoid over/underflows */
+ val = clamp_val(val, -1000000, 1000000);
+ /* Scale */
+ val = DIV_ROUND_CLOSEST(val * 1000, data->current_lsb);
+ /* Clamp to register size */
+ regval = clamp_val(val, S16_MIN, S16_MAX) & 0xffff;
+
+ switch (attr) {
+ case hwmon_curr_min:
+ return regmap_write(data->regmap, INA238_SHUNT_UNDER_VOLTAGE,
+ regval);
+ case hwmon_curr_max:
+ return regmap_write(data->regmap, INA238_SHUNT_OVER_VOLTAGE,
+ regval);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int ina238_read_power(struct device *dev, u32 attr, long *val)
{
struct ina238_data *data = dev_get_drvdata(dev);
@@ -460,9 +454,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
if (err)
return err;
- /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
- data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ power = (long long)regval * data->power_lsb;
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -471,9 +463,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
if (err)
return err;
- /* Fixed 1mA lsb, scaled by 1000000 to have result in uW */
- power = div_u64(regval * 1000ULL * INA238_FIXED_SHUNT * data->gain *
- data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ power = (long long)regval * data->power_lsb;
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -486,8 +476,7 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
* Truncated 24-bit compare register, lower 8-bits are
* truncated. Same conversion to/from uW as POWER register.
*/
- power = div_u64((regval << 8) * 1000ULL * INA238_FIXED_SHUNT * data->gain *
- data->config->power_calculate_factor, 4 * 100 * data->rshunt);
+ power = ((long long)regval << 8) * data->power_lsb;
/* Clamp value to maximum value of long */
*val = clamp_val(power, 0, LONG_MAX);
break;
@@ -505,13 +494,9 @@ static int ina238_read_power(struct device *dev, u32 attr, long *val)
return 0;
}
-static int ina238_write_power(struct device *dev, u32 attr, long val)
+static int ina238_write_power_max(struct device *dev, long val)
{
struct ina238_data *data = dev_get_drvdata(dev);
- long regval;
-
- if (attr != hwmon_power_max)
- return -EOPNOTSUPP;
/*
* Unsigned postive values. Compared against the 24-bit power register,
@@ -519,12 +504,16 @@ static int ina238_write_power(struct device *dev, u32 attr, long val)
* register.
* The first clamp_val() is to establish a baseline to avoid overflows.
*/
- regval = clamp_val(val, 0, LONG_MAX / 2);
- regval = div_u64(regval * 4 * 100 * data->rshunt, data->config->power_calculate_factor *
- 1000ULL * INA238_FIXED_SHUNT * data->gain);
- regval = clamp_val(regval >> 8, 0, U16_MAX);
+ val = clamp_val(val, 0, LONG_MAX / 2);
+ val = DIV_ROUND_CLOSEST(val, data->power_lsb);
+ val = clamp_val(val >> 8, 0, U16_MAX);
+
+ return regmap_write(data->regmap, INA238_POWER_LIMIT, val);
+}
- return regmap_write(data->regmap, INA238_POWER_LIMIT, regval);
+static int ina238_temp_from_reg(s16 regval, u8 resolution)
+{
+ return ((regval >> (16 - resolution)) * 1000) >> (resolution - 9);
}
static int ina238_read_temp(struct device *dev, u32 attr, long *val)
@@ -538,17 +527,14 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
err = regmap_read(data->regmap, INA238_DIE_TEMP, &regval);
if (err)
return err;
- /* Signed, result in mC */
- *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
- (s64)data->config->temp_lsb, 10000);
+ *val = ina238_temp_from_reg(regval, data->config->temp_resolution);
break;
case hwmon_temp_max:
err = regmap_read(data->regmap, INA238_TEMP_LIMIT, &regval);
if (err)
return err;
/* Signed, result in mC */
- *val = div_s64(((s64)((s16)regval) >> data->config->temp_shift) *
- (s64)data->config->temp_lsb, 10000);
+ *val = ina238_temp_from_reg(regval, data->config->temp_resolution);
break;
case hwmon_temp_max_alarm:
err = regmap_read(data->regmap, INA238_DIAG_ALERT, &regval);
@@ -564,39 +550,37 @@ static int ina238_read_temp(struct device *dev, u32 attr, long *val)
return 0;
}
-static int ina238_write_temp(struct device *dev, u32 attr, long val)
+static u16 ina238_temp_to_reg(long val, u8 resolution)
{
- struct ina238_data *data = dev_get_drvdata(dev);
- int regval;
+ int fraction = 1000 - DIV_ROUND_CLOSEST(1000, BIT(resolution - 9));
- if (attr != hwmon_temp_max)
- return -EOPNOTSUPP;
+ val = clamp_val(val, -255000 - fraction, 255000 + fraction);
- /* Signed */
- val = clamp_val(val, -40000, 125000);
- regval = div_s64(val * 10000, data->config->temp_lsb) << data->config->temp_shift;
- regval = clamp_val(regval, S16_MIN, S16_MAX) & (0xffff << data->config->temp_shift);
+ return (DIV_ROUND_CLOSEST(val << (resolution - 9), 1000) << (16 - resolution)) & 0xffff;
+}
+static int ina238_write_temp_max(struct device *dev, long val)
+{
+ struct ina238_data *data = dev_get_drvdata(dev);
+ int regval;
+
+ regval = ina238_temp_to_reg(val, data->config->temp_resolution);
return regmap_write(data->regmap, INA238_TEMP_LIMIT, regval);
}
-static ssize_t energy1_input_show(struct device *dev,
- struct device_attribute *da, char *buf)
+static int ina238_read_energy(struct device *dev, s64 *energy)
{
struct ina238_data *data = dev_get_drvdata(dev);
- int ret;
u64 regval;
- u64 energy;
+ int ret;
ret = ina238_read_reg40(data->client, SQ52206_ENERGY, &regval);
if (ret)
return ret;
/* result in uJ */
- energy = div_u64(regval * INA238_FIXED_SHUNT * data->gain * 16 * 10 *
- data->config->power_calculate_factor, 4 * data->rshunt);
-
- return sysfs_emit(buf, "%llu\n", energy);
+ *energy = regval * data->energy_lsb;
+ return 0;
}
static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
@@ -606,9 +590,11 @@ static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_in:
return ina238_read_in(dev, attr, channel, val);
case hwmon_curr:
- return ina238_read_current(dev, attr, val);
+ return ina238_read_curr(dev, attr, val);
case hwmon_power:
return ina238_read_power(dev, attr, val);
+ case hwmon_energy64:
+ return ina238_read_energy(dev, (s64 *)val);
case hwmon_temp:
return ina238_read_temp(dev, attr, val);
default:
@@ -620,28 +606,18 @@ static int ina238_read(struct device *dev, enum hwmon_sensor_types type,
static int ina238_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
- struct ina238_data *data = dev_get_drvdata(dev);
- int err;
-
- mutex_lock(&data->config_lock);
-
switch (type) {
case hwmon_in:
- err = ina238_write_in(dev, attr, channel, val);
- break;
+ return ina238_write_in(dev, attr, channel, val);
+ case hwmon_curr:
+ return ina238_write_curr(dev, attr, val);
case hwmon_power:
- err = ina238_write_power(dev, attr, val);
- break;
+ return ina238_write_power_max(dev, val);
case hwmon_temp:
- err = ina238_write_temp(dev, attr, val);
- break;
+ return ina238_write_temp_max(dev, val);
default:
- err = -EOPNOTSUPP;
- break;
+ return -EOPNOTSUPP;
}
-
- mutex_unlock(&data->config_lock);
- return err;
}
static umode_t ina238_is_visible(const void *drvdata,
@@ -650,6 +626,7 @@ static umode_t ina238_is_visible(const void *drvdata,
{
const struct ina238_data *data = drvdata;
bool has_power_highest = data->config->has_power_highest;
+ bool has_energy = data->config->has_energy;
switch (type) {
case hwmon_in:
@@ -667,7 +644,12 @@ static umode_t ina238_is_visible(const void *drvdata,
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
+ case hwmon_curr_max_alarm:
+ case hwmon_curr_min_alarm:
return 0444;
+ case hwmon_curr_max:
+ case hwmon_curr_min:
+ return 0644;
default:
return 0;
}
@@ -685,6 +667,11 @@ static umode_t ina238_is_visible(const void *drvdata,
default:
return 0;
}
+ case hwmon_energy64:
+ /* hwmon_energy_input */
+ if (has_energy)
+ return 0444;
+ return 0;
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
@@ -712,11 +699,14 @@ static const struct hwmon_channel_info * const ina238_info[] = {
INA238_HWMON_IN_CONFIG),
HWMON_CHANNEL_INFO(curr,
/* 0: current through shunt */
- HWMON_C_INPUT),
+ HWMON_C_INPUT | HWMON_C_MIN | HWMON_C_MIN_ALARM |
+ HWMON_C_MAX | HWMON_C_MAX_ALARM),
HWMON_CHANNEL_INFO(power,
/* 0: power */
HWMON_P_INPUT | HWMON_P_MAX |
HWMON_P_MAX_ALARM | HWMON_P_INPUT_HIGHEST),
+ HWMON_CHANNEL_INFO(energy64,
+ HWMON_E_INPUT),
HWMON_CHANNEL_INFO(temp,
/* 0: die temperature */
HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM),
@@ -734,18 +724,8 @@ static const struct hwmon_chip_info ina238_chip_info = {
.info = ina238_info,
};
-/* energy attributes are 5 bytes wide so we need u64 */
-static DEVICE_ATTR_RO(energy1_input);
-
-static struct attribute *ina238_attrs[] = {
- &dev_attr_energy1_input.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(ina238);
-
static int ina238_probe(struct i2c_client *client)
{
- struct ina2xx_platform_data *pdata = dev_get_platdata(&client->dev);
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct ina238_data *data;
@@ -763,41 +743,54 @@ static int ina238_probe(struct i2c_client *client)
/* set the device type */
data->config = &ina238_config[chip];
- mutex_init(&data->config_lock);
-
data->regmap = devm_regmap_init_i2c(client, &ina238_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(dev, "failed to allocate register map\n");
return PTR_ERR(data->regmap);
}
- /* load shunt value */
- data->rshunt = INA238_RSHUNT_DEFAULT;
- if (device_property_read_u32(dev, "shunt-resistor", &data->rshunt) < 0 && pdata)
- data->rshunt = pdata->shunt_uohms;
- if (data->rshunt == 0) {
- dev_err(dev, "invalid shunt resister value %u\n", data->rshunt);
- return -EINVAL;
- }
-
- /* load shunt gain value */
- if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
- data->gain = 4; /* Default of ADCRANGE = 0 */
- if (data->gain != 1 && data->gain != 2 && data->gain != 4) {
- dev_err(dev, "invalid shunt gain value %u\n", data->gain);
- return -EINVAL;
- }
-
/* Setup CONFIG register */
config = data->config->config_default;
- if (chip == sq52206) {
- if (data->gain == 1)
- config |= SQ52206_CONFIG_ADCRANGE_HIGH; /* ADCRANGE = 10/11 is /1 */
- else if (data->gain == 2)
- config |= SQ52206_CONFIG_ADCRANGE_LOW; /* ADCRANGE = 01 is /2 */
- } else if (data->gain == 1) {
- config |= INA238_CONFIG_ADCRANGE; /* ADCRANGE = 1 is /1 */
+ if (data->config->current_lsb) {
+ data->voltage_lsb[0] = INA238_SHUNT_VOLTAGE_LSB;
+ data->current_lsb = data->config->current_lsb;
+ } else {
+ /* load shunt value */
+ if (device_property_read_u32(dev, "shunt-resistor", &data->rshunt) < 0)
+ data->rshunt = INA238_RSHUNT_DEFAULT;
+ if (data->rshunt == 0) {
+ dev_err(dev, "invalid shunt resister value %u\n", data->rshunt);
+ return -EINVAL;
+ }
+
+ /* load shunt gain value */
+ if (device_property_read_u32(dev, "ti,shunt-gain", &data->gain) < 0)
+ data->gain = 4; /* Default of ADCRANGE = 0 */
+ if (data->gain != 1 && data->gain != 2 && data->gain != 4) {
+ dev_err(dev, "invalid shunt gain value %u\n", data->gain);
+ return -EINVAL;
+ }
+
+ /* Setup SHUNT_CALIBRATION register with fixed value */
+ ret = regmap_write(data->regmap, INA238_SHUNT_CALIBRATION,
+ INA238_CALIBRATION_VALUE);
+ if (ret < 0) {
+ dev_err(dev, "error configuring the device: %d\n", ret);
+ return -ENODEV;
+ }
+ if (chip == sq52206) {
+ if (data->gain == 1) /* ADCRANGE = 10/11 is /1 */
+ config |= SQ52206_CONFIG_ADCRANGE_HIGH;
+ else if (data->gain == 2) /* ADCRANGE = 01 is /2 */
+ config |= SQ52206_CONFIG_ADCRANGE_LOW;
+ } else if (data->gain == 1) { /* ADCRANGE = 1 is /1 */
+ config |= INA238_CONFIG_ADCRANGE;
+ }
+ data->voltage_lsb[0] = INA238_SHUNT_VOLTAGE_LSB * data->gain / 4;
+ data->current_lsb = DIV_U64_ROUND_CLOSEST(250ULL * INA238_FIXED_SHUNT * data->gain,
+ data->rshunt);
}
+
ret = regmap_write(data->regmap, INA238_CONFIG, config);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
@@ -812,31 +805,33 @@ static int ina238_probe(struct i2c_client *client)
return -ENODEV;
}
- /* Setup SHUNT_CALIBRATION register with fixed value */
- ret = regmap_write(data->regmap, INA238_SHUNT_CALIBRATION,
- INA238_CALIBRATION_VALUE);
- if (ret < 0) {
- dev_err(dev, "error configuring the device: %d\n", ret);
- return -ENODEV;
- }
-
/* Setup alert/alarm configuration */
- ret = regmap_write(data->regmap, INA238_DIAG_ALERT,
- INA238_DIAG_ALERT_DEFAULT);
+ config = INA238_DIAG_ALERT_DEFAULT;
+ if (device_property_read_bool(dev, "ti,alert-polarity-active-high"))
+ config |= INA238_DIAG_ALERT_APOL;
+
+ ret = regmap_write(data->regmap, INA238_DIAG_ALERT, config);
if (ret < 0) {
dev_err(dev, "error configuring the device: %d\n", ret);
return -ENODEV;
}
+ data->voltage_lsb[1] = data->config->bus_voltage_lsb;
+
+ data->power_lsb = DIV_ROUND_CLOSEST(data->current_lsb *
+ data->config->power_calculate_factor,
+ 100);
+
+ data->energy_lsb = data->power_lsb * 16;
+
hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
- &ina238_chip_info,
- data->config->has_energy ?
- ina238_groups : NULL);
+ &ina238_chip_info, NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
- dev_info(dev, "power monitor %s (Rshunt = %u uOhm, gain = %u)\n",
- client->name, data->rshunt, data->gain);
+ if (data->rshunt)
+ dev_info(dev, "power monitor %s (Rshunt = %u uOhm, gain = %u)\n",
+ client->name, data->rshunt, data->gain);
return 0;
}
@@ -845,6 +840,8 @@ static const struct i2c_device_id ina238_id[] = {
{ "ina228", ina228 },
{ "ina237", ina237 },
{ "ina238", ina238 },
+ { "ina700", ina700 },
+ { "ina780", ina780 },
{ "sq52206", sq52206 },
{ }
};
@@ -864,6 +861,14 @@ static const struct of_device_id __maybe_unused ina238_of_match[] = {
.data = (void *)ina238
},
{
+ .compatible = "ti,ina700",
+ .data = (void *)ina700
+ },
+ {
+ .compatible = "ti,ina780",
+ .data = (void *)ina780
+ },
+ {
.compatible = "silergy,sq52206",
.data = (void *)sq52206
},
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index bc3c1f7314b3..69ac0468dee4 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -156,7 +156,6 @@ struct ina2xx_data {
long rshunt;
long current_lsb_uA;
long power_lsb_uW;
- struct mutex config_lock;
struct regmap *regmap;
struct i2c_client *client;
};
@@ -390,22 +389,19 @@ static int ina226_alert_limit_read(struct ina2xx_data *data, u32 mask, int reg,
int regval;
int ret;
- mutex_lock(&data->config_lock);
ret = regmap_read(regmap, INA226_MASK_ENABLE, &regval);
if (ret)
- goto abort;
+ return ret;
if (regval & mask) {
ret = regmap_read(regmap, INA226_ALERT_LIMIT, &regval);
if (ret)
- goto abort;
+ return ret;
*val = ina2xx_get_value(data, reg, regval);
} else {
*val = 0;
}
-abort:
- mutex_unlock(&data->config_lock);
- return ret;
+ return 0;
}
static int ina226_alert_limit_write(struct ina2xx_data *data, u32 mask, int reg, long val)
@@ -421,23 +417,20 @@ static int ina226_alert_limit_write(struct ina2xx_data *data, u32 mask, int reg,
* due to register write sequence. Then, only enable the alert
* if the value is non-zero.
*/
- mutex_lock(&data->config_lock);
ret = regmap_update_bits(regmap, INA226_MASK_ENABLE,
INA226_ALERT_CONFIG_MASK, 0);
if (ret < 0)
- goto abort;
+ return ret;
ret = regmap_write(regmap, INA226_ALERT_LIMIT,
ina226_alert_to_reg(data, reg, val));
if (ret < 0)
- goto abort;
+ return ret;
if (val)
- ret = regmap_update_bits(regmap, INA226_MASK_ENABLE,
- INA226_ALERT_CONFIG_MASK, mask);
-abort:
- mutex_unlock(&data->config_lock);
- return ret;
+ return regmap_update_bits(regmap, INA226_MASK_ENABLE,
+ INA226_ALERT_CONFIG_MASK, mask);
+ return 0;
}
static int ina2xx_chip_read(struct device *dev, u32 attr, long *val)
@@ -859,9 +852,9 @@ static ssize_t shunt_resistor_store(struct device *dev,
if (status < 0)
return status;
- mutex_lock(&data->config_lock);
+ hwmon_lock(dev);
status = ina2xx_set_shunt(data, val);
- mutex_unlock(&data->config_lock);
+ hwmon_unlock(dev);
if (status < 0)
return status;
return count;
@@ -951,7 +944,6 @@ static int ina2xx_probe(struct i2c_client *client)
data->client = client;
data->config = &ina2xx_config[chip];
data->chip = chip;
- mutex_init(&data->config_lock);
data->regmap = devm_regmap_init_i2c(client, &ina2xx_regmap_config);
if (IS_ERR(data->regmap)) {
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
index ce0e3f214f5b..5ecc68dcf169 100644
--- a/drivers/hwmon/ina3221.c
+++ b/drivers/hwmon/ina3221.c
@@ -11,7 +11,6 @@
#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -115,7 +114,6 @@ struct ina3221_input {
* @regmap: Register map of the device
* @fields: Register fields of the device
* @inputs: Array of channel input source specific structures
- * @lock: mutex lock to serialize sysfs attribute accesses
* @reg_config: Register value of INA3221_CONFIG
* @summation_shunt_resistor: equivalent shunt resistor value for summation
* @summation_channel_control: Value written to SCC field in INA3221_MASK_ENABLE
@@ -126,7 +124,6 @@ struct ina3221_data {
struct regmap *regmap;
struct regmap_field *fields[F_MAX_FIELDS];
struct ina3221_input inputs[INA3221_NUM_CHANNELS];
- struct mutex lock;
u32 reg_config;
int summation_shunt_resistor;
u32 summation_channel_control;
@@ -530,11 +527,8 @@ fail:
static int ina3221_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
- struct ina3221_data *ina = dev_get_drvdata(dev);
int ret;
- mutex_lock(&ina->lock);
-
switch (type) {
case hwmon_chip:
ret = ina3221_read_chip(dev, attr, val);
@@ -550,20 +544,14 @@ static int ina3221_read(struct device *dev, enum hwmon_sensor_types type,
ret = -EOPNOTSUPP;
break;
}
-
- mutex_unlock(&ina->lock);
-
return ret;
}
static int ina3221_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
- struct ina3221_data *ina = dev_get_drvdata(dev);
int ret;
- mutex_lock(&ina->lock);
-
switch (type) {
case hwmon_chip:
ret = ina3221_write_chip(dev, attr, val);
@@ -579,9 +567,6 @@ static int ina3221_write(struct device *dev, enum hwmon_sensor_types type,
ret = -EOPNOTSUPP;
break;
}
-
- mutex_unlock(&ina->lock);
-
return ret;
}
@@ -886,7 +871,6 @@ static int ina3221_probe(struct i2c_client *client)
}
ina->pm_dev = dev;
- mutex_init(&ina->lock);
dev_set_drvdata(dev, ina);
/* Enable PM runtime -- status is suspended by default */
@@ -925,7 +909,6 @@ fail:
/* pm_runtime_put_noidle() will decrease the PM refcount until 0 */
for (i = 0; i < INA3221_NUM_CHANNELS; i++)
pm_runtime_put_noidle(ina->pm_dev);
- mutex_destroy(&ina->lock);
return ret;
}
@@ -941,8 +924,6 @@ static void ina3221_remove(struct i2c_client *client)
/* pm_runtime_put_noidle() will decrease the PM refcount until 0 */
for (i = 0; i < INA3221_NUM_CHANNELS; i++)
pm_runtime_put_noidle(ina->pm_dev);
-
- mutex_destroy(&ina->lock);
}
static int ina3221_suspend(struct device *dev)
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 06f0ab2f52fa..6549dc543781 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -19,7 +19,6 @@
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
/* Addresses to scan */
@@ -179,7 +178,6 @@ static struct jc42_chips jc42_chips[] = {
/* Each client has this additional data */
struct jc42_data {
- struct mutex update_lock; /* protect register access */
struct regmap *regmap;
bool extended; /* true if extended range supported */
bool valid;
@@ -216,8 +214,6 @@ static int jc42_read(struct device *dev, enum hwmon_sensor_types type,
unsigned int regval;
int ret, temp, hyst;
- mutex_lock(&data->update_lock);
-
switch (attr) {
case hwmon_temp_input:
ret = regmap_read(data->regmap, JC42_REG_TEMP, &regval);
@@ -295,8 +291,6 @@ static int jc42_read(struct device *dev, enum hwmon_sensor_types type,
break;
}
- mutex_unlock(&data->update_lock);
-
return ret;
}
@@ -308,8 +302,6 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
int diff, hyst;
int ret;
- mutex_lock(&data->update_lock);
-
switch (attr) {
case hwmon_temp_min:
ret = regmap_write(data->regmap, JC42_REG_TEMP_LOWER,
@@ -356,8 +348,6 @@ static int jc42_write(struct device *dev, enum hwmon_sensor_types type,
break;
}
- mutex_unlock(&data->update_lock);
-
return ret;
}
@@ -498,7 +488,6 @@ static int jc42_probe(struct i2c_client *client)
return PTR_ERR(data->regmap);
i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
ret = regmap_read(data->regmap, JC42_REG_CAP, &cap);
if (ret)
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index babf2413d666..a5d8f45b7881 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -31,9 +31,6 @@ static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
-/* Provide lock for writing to NB_SMU_IND_ADDR */
-static DEFINE_MUTEX(nb_smu_ind_mutex);
-
#ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3
#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
#endif
@@ -84,6 +81,19 @@ static DEFINE_MUTEX(nb_smu_ind_mutex);
*/
#define AMD_I3255_STR "3255"
+/*
+ * PCI Device IDs for AMD's Family 17h-based SOCs.
+ * Defining locally as IDs are not shared.
+ */
+#define PCI_DEVICE_ID_AMD_17H_M90H_DF_F3 0x1663
+
+/*
+ * PCI Device IDs for AMD's Family 1Ah-based SOCs.
+ * Defining locally as IDs are not shared.
+ */
+#define PCI_DEVICE_ID_AMD_1AH_M50H_DF_F3 0x12cb
+#define PCI_DEVICE_ID_AMD_1AH_M90H_DF_F3 0x127b
+
struct k10temp_data {
struct pci_dev *pdev;
void (*read_htcreg)(struct pci_dev *pdev, u32 *regval);
@@ -130,12 +140,10 @@ static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval)
static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn,
unsigned int base, int offset, u32 *val)
{
- mutex_lock(&nb_smu_ind_mutex);
pci_bus_write_config_dword(pdev->bus, devfn,
base, offset);
pci_bus_read_config_dword(pdev->bus, devfn,
base + 4, val);
- mutex_unlock(&nb_smu_ind_mutex);
}
static void read_htcreg_nb_f15(struct pci_dev *pdev, u32 *regval)
@@ -546,6 +554,7 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M40H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_M90H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
@@ -556,7 +565,10 @@ static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_19H_M78H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M50H_DF_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M90H_DF_F3) },
{ PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
{}
};
diff --git a/drivers/hwmon/lenovo-ec-sensors.c b/drivers/hwmon/lenovo-ec-sensors.c
index 143fb79713f7..8681bbf6665b 100644
--- a/drivers/hwmon/lenovo-ec-sensors.c
+++ b/drivers/hwmon/lenovo-ec-sensors.c
@@ -66,7 +66,7 @@ enum systems {
LENOVO_P8,
};
-static int px_temp_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+static int px_temp_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31, 32};
static const char * const lenovo_px_ec_temp_label[] = {
"CPU1",
@@ -84,9 +84,29 @@ static const char * const lenovo_px_ec_temp_label[] = {
"PCI_Z3",
"PCI_Z4",
"AMB",
+ "PSU1",
+ "PSU2",
};
-static int gen_temp_map[] = {0, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+static int p8_temp_map[] = {0, 1, 2, 8, 9, 13, 14, 15, 16, 17, 19, 20, 33};
+
+static const char * const lenovo_p8_ec_temp_label[] = {
+ "CPU1",
+ "CPU_DIMM_BANK1",
+ "CPU_DIMM_BANK2",
+ "M2_Z2R",
+ "M2_Z3R",
+ "DIMM_RIGHT",
+ "DIMM_LEFT",
+ "PCI_Z1",
+ "PCI_Z2",
+ "PCI_Z3",
+ "AMB",
+ "REAR_VR",
+ "PSU",
+};
+
+static int gen_temp_map[] = {0, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 31};
static const char * const lenovo_gen_ec_temp_label[] = {
"CPU1",
@@ -101,6 +121,7 @@ static const char * const lenovo_gen_ec_temp_label[] = {
"PCI_Z3",
"PCI_Z4",
"AMB",
+ "PSU",
};
static int px_fan_map[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
@@ -293,6 +314,8 @@ static const struct hwmon_channel_info *lenovo_ec_hwmon_info_px[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
@@ -327,6 +350,7 @@ static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p8[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
@@ -359,6 +383,7 @@ static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p7[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
@@ -388,6 +413,7 @@ static const struct hwmon_channel_info *lenovo_ec_hwmon_info_p5[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MAX,
@@ -545,9 +571,9 @@ static int lenovo_ec_probe(struct platform_device *pdev)
break;
case 3:
ec_data->fan_labels = p8_ec_fan_label;
- ec_data->temp_labels = lenovo_gen_ec_temp_label;
+ ec_data->temp_labels = lenovo_p8_ec_temp_label;
ec_data->fan_map = p8_fan_map;
- ec_data->temp_map = gen_temp_map;
+ ec_data->temp_map = p8_temp_map;
lenovo_ec_chip_info.info = lenovo_ec_hwmon_info_p8;
break;
default:
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 9b4875e2fd8d..eda93a8c23c9 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -39,6 +39,7 @@ enum lm75_type { /* keep sorted in alphabetical order */
max6626,
max31725,
mcp980x,
+ p3t1750,
p3t1755,
pct2075,
stds75,
@@ -222,6 +223,13 @@ static const struct lm75_params device_params[] = {
.default_resolution = 9,
.default_sample_time = MSEC_PER_SEC / 18,
},
+ [p3t1750] = {
+ .clr_mask = 1 << 1 | 1 << 7, /* disable SMBAlert and one-shot */
+ .default_resolution = 12,
+ .default_sample_time = 55,
+ .num_sample_times = 4,
+ .sample_times = (unsigned int []){ 28, 55, 110, 220 },
+ },
[p3t1755] = {
.clr_mask = 1 << 1 | 1 << 7, /* disable SMBAlert and one-shot */
.default_resolution = 12,
@@ -613,7 +621,7 @@ static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct i3c_device *i3cdev = context;
struct lm75_data *data = i3cdev_get_drvdata(i3cdev);
- struct i3c_priv_xfer xfers[] = {
+ struct i3c_xfer xfers[] = {
{
.rnw = false,
.len = 1,
@@ -632,7 +640,7 @@ static int lm75_i3c_reg_read(void *context, unsigned int reg, unsigned int *val)
if (reg == LM75_REG_CONF && !data->params->config_reg_16bits)
xfers[1].len--;
- ret = i3c_device_do_priv_xfers(i3cdev, xfers, 2);
+ ret = i3c_device_do_xfers(i3cdev, xfers, 2, I3C_SDR);
if (ret < 0)
return ret;
@@ -650,7 +658,7 @@ static int lm75_i3c_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct i3c_device *i3cdev = context;
struct lm75_data *data = i3cdev_get_drvdata(i3cdev);
- struct i3c_priv_xfer xfers[] = {
+ struct i3c_xfer xfers[] = {
{
.rnw = false,
.len = 3,
@@ -672,7 +680,7 @@ static int lm75_i3c_reg_write(void *context, unsigned int reg, unsigned int val)
data->val_buf[2] = val & 0xff;
}
- return i3c_device_do_priv_xfers(i3cdev, xfers, 1);
+ return i3c_device_do_xfers(i3cdev, xfers, 1, I3C_SDR);
}
static const struct regmap_bus lm75_i3c_regmap_bus = {
@@ -805,6 +813,7 @@ static const struct i2c_device_id lm75_i2c_ids[] = {
{ "max31725", max31725, },
{ "max31726", max31725, },
{ "mcp980x", mcp980x, },
+ { "p3t1750", p3t1750, },
{ "p3t1755", p3t1755, },
{ "pct2075", pct2075, },
{ "stds75", stds75, },
@@ -917,6 +926,10 @@ static const struct of_device_id __maybe_unused lm75_of_match[] = {
.data = (void *)mcp980x
},
{
+ .compatible = "nxp,p3t1750",
+ .data = (void *)p3t1750
+ },
+ {
.compatible = "nxp,p3t1755",
.data = (void *)p3t1755
},
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 8b53bb312069..9378a47bf5af 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -843,17 +843,18 @@ static int __init lm78_isa_found(unsigned short address)
}
}
-#define REALLY_SLOW_IO
/*
* We need the timeouts for at least some LM78-like
* chips. But only if we read 'undefined' registers.
+ * There used to be a "#define REALLY_SLOW_IO" to enforce that, but
+ * this has been without any effect since more than a decade, so it
+ * has been dropped.
*/
val = inb_p(address + 1);
if (inb_p(address + 2) != val
|| inb_p(address + 3) != val
|| inb_p(address + 7) != val)
goto release;
-#undef REALLY_SLOW_IO
/*
* We should be able to change the 7 LSB of the address port. The
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index d2d970e73c61..37bf2d1d3d09 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -116,8 +116,14 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
(((val) < 0 ? (val) - 500 : \
(val) + 500) / 1000))
-#define FAN_FROM_REG(reg, div) ((reg) == 255 || (reg) == 0 ? 0 : \
- (1350000 + (reg)*(div) / 2) / ((reg) * (div)))
+static int fan_from_reg(int reg, int div)
+{
+ if (reg == 255 || reg == 0)
+ return 0;
+
+ return (1350000 + reg * div / 2) / (reg * div);
+}
+
#define FAN_TO_REG(val, div) ((val) * (div) * 255 <= 1350000 ? 255 : \
(1350000 + (val)*(div) / 2) / ((val) * (div)))
@@ -465,7 +471,7 @@ static ssize_t fan_input_show(struct device *dev,
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
+ return sprintf(buf, "%d\n", fan_from_reg(data->fan[nr],
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
@@ -475,7 +481,7 @@ static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
struct lm87_data *data = lm87_update_device(dev);
int nr = to_sensor_dev_attr(attr)->index;
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
+ return sprintf(buf, "%d\n", fan_from_reg(data->fan_min[nr],
FAN_DIV_FROM_REG(data->fan_div[nr])));
}
@@ -534,7 +540,7 @@ static ssize_t fan_div_store(struct device *dev,
return err;
mutex_lock(&data->update_lock);
- min = FAN_FROM_REG(data->fan_min[nr],
+ min = fan_from_reg(data->fan_min[nr],
FAN_DIV_FROM_REG(data->fan_div[nr]));
switch (val) {
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index c1f528e292f3..3c10a5066b53 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -108,7 +108,6 @@
#include <linux/hwmon.h>
#include <linux/kstrtox.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
@@ -735,7 +734,6 @@ struct lm90_data {
struct hwmon_channel_info temp_info;
const struct hwmon_channel_info *info[3];
struct hwmon_chip_info chip;
- struct mutex update_lock;
struct delayed_work alert_work;
struct work_struct report_work;
bool valid; /* true if register values are valid */
@@ -1226,9 +1224,9 @@ static int lm90_update_alarms(struct lm90_data *data, bool force)
{
int err;
- mutex_lock(&data->update_lock);
+ hwmon_lock(data->hwmon_dev);
err = lm90_update_alarms_locked(data, force);
- mutex_unlock(&data->update_lock);
+ hwmon_unlock(data->hwmon_dev);
return err;
}
@@ -1519,9 +1517,7 @@ static int lm90_temp_read(struct device *dev, u32 attr, int channel, long *val)
int err;
u16 bit;
- mutex_lock(&data->update_lock);
err = lm90_update_device(dev);
- mutex_unlock(&data->update_lock);
if (err)
return err;
@@ -1590,11 +1586,9 @@ static int lm90_temp_write(struct device *dev, u32 attr, int channel, long val)
struct lm90_data *data = dev_get_drvdata(dev);
int err;
- mutex_lock(&data->update_lock);
-
err = lm90_update_device(dev);
if (err)
- goto error;
+ return err;
switch (attr) {
case hwmon_temp_min:
@@ -1624,9 +1618,6 @@ static int lm90_temp_write(struct device *dev, u32 attr, int channel, long val)
err = -EOPNOTSUPP;
break;
}
-error:
- mutex_unlock(&data->update_lock);
-
return err;
}
@@ -1662,9 +1653,7 @@ static int lm90_chip_read(struct device *dev, u32 attr, int channel, long *val)
struct lm90_data *data = dev_get_drvdata(dev);
int err;
- mutex_lock(&data->update_lock);
err = lm90_update_device(dev);
- mutex_unlock(&data->update_lock);
if (err)
return err;
@@ -1710,11 +1699,9 @@ static int lm90_chip_write(struct device *dev, u32 attr, int channel, long val)
struct i2c_client *client = data->client;
int err;
- mutex_lock(&data->update_lock);
-
err = lm90_update_device(dev);
if (err)
- goto error;
+ return err;
switch (attr) {
case hwmon_chip_update_interval:
@@ -1728,9 +1715,6 @@ static int lm90_chip_write(struct device *dev, u32 attr, int channel, long val)
err = -EOPNOTSUPP;
break;
}
-error:
- mutex_unlock(&data->update_lock);
-
return err;
}
@@ -2793,7 +2777,6 @@ static int lm90_probe(struct i2c_client *client)
data->client = client;
i2c_set_clientdata(client, data);
- mutex_init(&data->update_lock);
INIT_DELAYED_WORK(&data->alert_work, lm90_alert_work);
INIT_WORK(&data->report_work, lm90_report_alarms);
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 0be439b38ee1..91a6b7525bb6 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -78,7 +77,6 @@ static inline u8 ALARMS_FROM_REG(s16 reg)
/* Client data (each client gets its own) */
struct lm92_data {
struct regmap *regmap;
- struct mutex update_lock;
int resolution;
};
@@ -199,15 +197,11 @@ static int lm92_temp_write(struct lm92_data *data, u32 attr, long val)
break;
case hwmon_temp_crit_hyst:
val = clamp_val(val, -120000, 220000);
- mutex_lock(&data->update_lock);
err = regmap_read(regmap, LM92_REG_TEMP_CRIT, &temp);
if (err)
- goto unlock;
+ return err;
val = TEMP_TO_REG(TEMP_FROM_REG(temp) - val, data->resolution);
- err = regmap_write(regmap, LM92_REG_TEMP_HYST, val);
-unlock:
- mutex_unlock(&data->update_lock);
- return err;
+ return regmap_write(regmap, LM92_REG_TEMP_HYST, val);
default:
return -EOPNOTSUPP;
}
@@ -396,7 +390,6 @@ static int lm92_probe(struct i2c_client *client)
data->regmap = regmap;
data->resolution = (unsigned long)i2c_get_match_data(client);
- mutex_init(&data->update_lock);
/* Initialize the chipset */
err = lm92_init_client(regmap);
diff --git a/drivers/hwmon/lm95234.c b/drivers/hwmon/lm95234.c
index 7da6c8f07332..387b3ba81dbf 100644
--- a/drivers/hwmon/lm95234.c
+++ b/drivers/hwmon/lm95234.c
@@ -14,7 +14,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/util_macros.h>
@@ -54,7 +53,6 @@ static const unsigned short normal_i2c[] = {
/* Client data (each client gets its own) */
struct lm95234_data {
struct regmap *regmap;
- struct mutex update_lock;
enum chips type;
};
@@ -107,19 +105,14 @@ static ssize_t lm95234_hyst_set(struct lm95234_data *data, long val)
u32 tcrit;
int ret;
- mutex_lock(&data->update_lock);
-
ret = regmap_read(data->regmap, LM95234_REG_TCRIT1(0), &tcrit);
if (ret)
- goto unlock;
+ return ret;
val = DIV_ROUND_CLOSEST(clamp_val(val, -255000, 255000), 1000);
val = clamp_val((int)tcrit - val, 0, 31);
- ret = regmap_write(data->regmap, LM95234_REG_TCRIT_HYST, val);
-unlock:
- mutex_unlock(&data->update_lock);
- return ret;
+ return regmap_write(data->regmap, LM95234_REG_TCRIT_HYST, val);
}
static int lm95234_crit_reg(int channel)
@@ -526,7 +519,6 @@ static int lm95234_probe(struct i2c_client *client)
return PTR_ERR(regmap);
data->regmap = regmap;
- mutex_init(&data->update_lock);
/* Initialize the LM95234 chip */
err = lm95234_init_client(dev, regmap);
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index cad0a0ff8416..456381b0938e 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -15,7 +15,6 @@
#include <linux/jiffies.h>
#include <linux/hwmon.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
#define DEVNAME "lm95241"
@@ -75,7 +74,6 @@ static const u8 lm95241_reg_address[] = {
/* Client data (each client gets its own) */
struct lm95241_data {
struct i2c_client *client;
- struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
unsigned long interval; /* in milli-seconds */
bool valid; /* false until following fields are valid */
@@ -102,8 +100,6 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
struct lm95241_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- mutex_lock(&data->update_lock);
-
if (time_after(jiffies, data->last_updated
+ msecs_to_jiffies(data->interval)) ||
!data->valid) {
@@ -120,9 +116,6 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
data->last_updated = jiffies;
data->valid = true;
}
-
- mutex_unlock(&data->update_lock);
-
return data;
}
@@ -204,8 +197,6 @@ static int lm95241_write_chip(struct device *dev, u32 attr, int channel,
u8 config;
int ret;
- mutex_lock(&data->update_lock);
-
switch (attr) {
case hwmon_chip_update_interval:
config = data->config & ~CFG_CRMASK;
@@ -231,7 +222,6 @@ static int lm95241_write_chip(struct device *dev, u32 attr, int channel,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&data->update_lock);
return ret;
}
@@ -242,8 +232,6 @@ static int lm95241_write_temp(struct device *dev, u32 attr, int channel,
struct i2c_client *client = data->client;
int ret;
- mutex_lock(&data->update_lock);
-
switch (attr) {
case hwmon_temp_min:
if (channel == 1) {
@@ -313,9 +301,6 @@ static int lm95241_write_temp(struct device *dev, u32 attr, int channel,
ret = -EOPNOTSUPP;
break;
}
-
- mutex_unlock(&data->update_lock);
-
return ret;
}
@@ -443,7 +428,6 @@ static int lm95241_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- mutex_init(&data->update_lock);
/* Initialize the LM95241 chip */
lm95241_init_client(client, data);
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index 3bdc30530847..9ed300c6b5f7 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -13,7 +13,6 @@
#include <linux/hwmon.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -86,7 +85,6 @@ static const unsigned short normal_i2c[] = {
/* Client data (each client gets its own) */
struct lm95245_data {
struct regmap *regmap;
- struct mutex update_lock;
int interval; /* in msecs */
};
@@ -279,20 +277,16 @@ static int lm95245_write_temp(struct device *dev, u32 attr, int channel,
ret = regmap_write(regmap, reg, val);
return ret;
case hwmon_temp_crit_hyst:
- mutex_lock(&data->update_lock);
ret = regmap_read(regmap, LM95245_REG_RW_LOCAL_OS_TCRIT_LIMIT,
&regval);
- if (ret < 0) {
- mutex_unlock(&data->update_lock);
+ if (ret < 0)
return ret;
- }
/* Clamp to reasonable range to prevent overflow */
val = clamp_val(val, -1000000, 1000000);
val = regval - val / 1000;
val = clamp_val(val, 0, 31);
ret = regmap_write(regmap, LM95245_REG_RW_COMMON_HYSTERESIS,
val);
- mutex_unlock(&data->update_lock);
return ret;
case hwmon_temp_offset:
val = clamp_val(val, -128000, 127875);
@@ -332,14 +326,10 @@ static int lm95245_write_chip(struct device *dev, u32 attr, int channel,
long val)
{
struct lm95245_data *data = dev_get_drvdata(dev);
- int ret;
switch (attr) {
case hwmon_chip_update_interval:
- mutex_lock(&data->update_lock);
- ret = lm95245_set_conversion_rate(data, val);
- mutex_unlock(&data->update_lock);
- return ret;
+ return lm95245_set_conversion_rate(data, val);
default:
return -EOPNOTSUPP;
}
@@ -542,8 +532,6 @@ static int lm95245_probe(struct i2c_client *client)
if (IS_ERR(data->regmap))
return PTR_ERR(data->regmap);
- mutex_init(&data->update_lock);
-
/* Initialize the LM95245 chip */
ret = lm95245_init_client(data);
if (ret < 0)
diff --git a/drivers/hwmon/lochnagar-hwmon.c b/drivers/hwmon/lochnagar-hwmon.c
index 5202dddfd61e..c1ba72f6132e 100644
--- a/drivers/hwmon/lochnagar-hwmon.c
+++ b/drivers/hwmon/lochnagar-hwmon.c
@@ -10,7 +10,6 @@
#include <linux/delay.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/math64.h>
#include <linux/mfd/lochnagar.h>
#include <linux/mfd/lochnagar2_regs.h>
@@ -42,9 +41,6 @@ struct lochnagar_hwmon {
struct regmap *regmap;
long power_nsamples[ARRAY_SIZE(lochnagar_chan_names)];
-
- /* Lock to ensure only a single sensor is read at a time */
- struct mutex sensor_lock;
};
enum lochnagar_measure_mode {
@@ -178,26 +174,20 @@ static int read_sensor(struct device *dev, int chan,
u32 data;
int ret;
- mutex_lock(&priv->sensor_lock);
-
ret = do_measurement(regmap, chan, mode, nsamples);
if (ret < 0) {
dev_err(dev, "Failed to perform measurement: %d\n", ret);
- goto error;
+ return ret;
}
ret = request_data(regmap, chan, &data);
if (ret < 0) {
dev_err(dev, "Failed to read measurement: %d\n", ret);
- goto error;
+ return ret;
}
*val = float_to_long(data, precision);
-
-error:
- mutex_unlock(&priv->sensor_lock);
-
- return ret;
+ return 0;
}
static int read_power(struct device *dev, int chan, long *val)
@@ -378,8 +368,6 @@ static int lochnagar_hwmon_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- mutex_init(&priv->sensor_lock);
-
priv->regmap = dev_get_regmap(dev->parent, NULL);
if (!priv->regmap) {
dev_err(dev, "No register map found\n");
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
index 244839167e51..ad7120d1e469 100644
--- a/drivers/hwmon/ltc2947-core.c
+++ b/drivers/hwmon/ltc2947-core.c
@@ -9,8 +9,8 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/module.h>
+#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -120,12 +120,6 @@
struct ltc2947_data {
struct regmap *map;
struct device *dev;
- /*
- * The mutex is needed because the device has 2 memory pages. When
- * reading/writing the correct page needs to be set so that, the
- * complete sequence select_page->read/write needs to be protected.
- */
- struct mutex lock;
u32 lsb_energy;
bool gpio_out;
};
@@ -181,13 +175,9 @@ static int ltc2947_val_read(struct ltc2947_data *st, const u8 reg,
int ret;
u64 __val = 0;
- mutex_lock(&st->lock);
-
ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
- if (ret) {
- mutex_unlock(&st->lock);
+ if (ret)
return ret;
- }
dev_dbg(st->dev, "Read val, reg:%02X, p:%d sz:%zu\n", reg, page,
size);
@@ -207,8 +197,6 @@ static int ltc2947_val_read(struct ltc2947_data *st, const u8 reg,
break;
}
- mutex_unlock(&st->lock);
-
if (ret)
return ret;
@@ -242,13 +230,10 @@ static int ltc2947_val_write(struct ltc2947_data *st, const u8 reg,
{
int ret;
- mutex_lock(&st->lock);
/* set device on correct page */
ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, page);
- if (ret) {
- mutex_unlock(&st->lock);
+ if (ret)
return ret;
- }
dev_dbg(st->dev, "Write val, r:%02X, p:%d, sz:%zu, val:%016llX\n",
reg, page, size, val);
@@ -265,8 +250,6 @@ static int ltc2947_val_write(struct ltc2947_data *st, const u8 reg,
break;
}
- mutex_unlock(&st->lock);
-
return ret;
}
@@ -295,11 +278,9 @@ static int ltc2947_alarm_read(struct ltc2947_data *st, const u8 reg,
memset(alarms, 0, sizeof(alarms));
- mutex_lock(&st->lock);
-
ret = regmap_write(st->map, LTC2947_REG_PAGE_CTRL, LTC2947_PAGE0);
if (ret)
- goto unlock;
+ return ret;
dev_dbg(st->dev, "Read alarm, reg:%02X, mask:%02X\n", reg, mask);
/*
@@ -310,31 +291,11 @@ static int ltc2947_alarm_read(struct ltc2947_data *st, const u8 reg,
ret = regmap_bulk_read(st->map, LTC2947_REG_STATUS, alarms,
sizeof(alarms));
if (ret)
- goto unlock;
+ return ret;
/* get the alarm */
*val = !!(alarms[offset] & mask);
-unlock:
- mutex_unlock(&st->lock);
- return ret;
-}
-
-static ssize_t ltc2947_show_value(struct device *dev,
- struct device_attribute *da, char *buf)
-{
- struct ltc2947_data *st = dev_get_drvdata(dev);
- struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- int ret;
- s64 val = 0;
-
- ret = ltc2947_val_read(st, attr->index, LTC2947_PAGE0, 6, &val);
- if (ret)
- return ret;
-
- /* value in microJoule. st->lsb_energy was multiplied by 10E9 */
- val = div_s64(val * st->lsb_energy, 1000);
-
- return sprintf(buf, "%lld\n", val);
+ return 0;
}
static int ltc2947_read_temp(struct device *dev, const u32 attr, long *val,
@@ -588,6 +549,23 @@ static int ltc2947_read_in(struct device *dev, const u32 attr, long *val,
return 0;
}
+static int ltc2947_read_energy(struct device *dev, s64 *val, const int channel)
+{
+ int reg = channel ? LTC2947_REG_ENERGY2 : LTC2947_REG_ENERGY1;
+ struct ltc2947_data *st = dev_get_drvdata(dev);
+ s64 __val = 0;
+ int ret;
+
+ ret = ltc2947_val_read(st, reg, LTC2947_PAGE0, 6, &__val);
+ if (ret)
+ return ret;
+
+ /* value in microJoule. st->lsb_energy was multiplied by 10E9 */
+ *val = DIV_S64_ROUND_CLOSEST(__val * st->lsb_energy, 1000);
+
+ return 0;
+}
+
static int ltc2947_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -600,6 +578,8 @@ static int ltc2947_read(struct device *dev, enum hwmon_sensor_types type,
return ltc2947_read_power(dev, attr, val);
case hwmon_temp:
return ltc2947_read_temp(dev, attr, val, channel);
+ case hwmon_energy64:
+ return ltc2947_read_energy(dev, (s64 *)val, channel);
default:
return -ENOTSUPP;
}
@@ -897,6 +877,8 @@ static umode_t ltc2947_is_visible(const void *data,
return ltc2947_power_is_visible(attr);
case hwmon_temp:
return ltc2947_temp_is_visible(attr);
+ case hwmon_energy64:
+ return 0444;
default:
return 0;
}
@@ -929,6 +911,9 @@ static const struct hwmon_channel_info * const ltc2947_info[] = {
HWMON_T_LABEL,
HWMON_T_MAX_ALARM | HWMON_T_MIN_ALARM | HWMON_T_MAX |
HWMON_T_MIN | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(energy64,
+ HWMON_E_INPUT,
+ HWMON_E_INPUT),
NULL
};
@@ -944,19 +929,6 @@ static const struct hwmon_chip_info ltc2947_chip_info = {
.info = ltc2947_info,
};
-/* energy attributes are 6bytes wide so we need u64 */
-static SENSOR_DEVICE_ATTR(energy1_input, 0444, ltc2947_show_value, NULL,
- LTC2947_REG_ENERGY1);
-static SENSOR_DEVICE_ATTR(energy2_input, 0444, ltc2947_show_value, NULL,
- LTC2947_REG_ENERGY2);
-
-static struct attribute *ltc2947_attrs[] = {
- &sensor_dev_attr_energy1_input.dev_attr.attr,
- &sensor_dev_attr_energy2_input.dev_attr.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(ltc2947);
-
static int ltc2947_setup(struct ltc2947_data *st)
{
int ret;
@@ -1107,15 +1079,13 @@ int ltc2947_core_probe(struct regmap *map, const char *name)
st->map = map;
st->dev = dev;
dev_set_drvdata(dev, st);
- mutex_init(&st->lock);
ret = ltc2947_setup(st);
if (ret)
return ret;
hwmon = devm_hwmon_device_register_with_info(dev, name, st,
- &ltc2947_chip_info,
- ltc2947_groups);
+ &ltc2947_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon);
}
EXPORT_SYMBOL_GPL(ltc2947_core_probe);
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index 14593bc81e85..e8131a48bda7 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -18,7 +18,6 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/jiffies.h>
#include <linux/platform_data/ltc4245.h>
@@ -51,7 +50,6 @@ enum ltc4245_cmd {
struct ltc4245_data {
struct i2c_client *client;
- struct mutex update_lock;
bool valid;
unsigned long last_updated; /* in jiffies */
@@ -132,10 +130,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
s32 val;
int i;
- mutex_lock(&data->update_lock);
-
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
-
/* Read control registers -- 0x00 to 0x07 */
for (i = 0; i < ARRAY_SIZE(data->cregs); i++) {
val = i2c_smbus_read_byte_data(client, i);
@@ -161,8 +156,6 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev)
data->valid = true;
}
- mutex_unlock(&data->update_lock);
-
return data;
}
@@ -454,7 +447,6 @@ static int ltc4245_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- mutex_init(&data->update_lock);
data->use_extra_gpios = ltc4245_use_extra_gpios(client);
/* Initialize the LTC4245 chip */
diff --git a/drivers/hwmon/ltc4282.c b/drivers/hwmon/ltc4282.c
index dbb30abcd343..b9cad89f2cd9 100644
--- a/drivers/hwmon/ltc4282.c
+++ b/drivers/hwmon/ltc4282.c
@@ -12,13 +12,11 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/math.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/property.h>
#include <linux/string.h>
@@ -132,8 +130,6 @@ struct ltc4282_cache {
struct ltc4282_state {
struct regmap *map;
- /* Protect against multiple accesses to the device registers */
- struct mutex lock;
struct clk_hw clk_hw;
/*
* Used to cache values for VDD/VSOURCE depending which will be used
@@ -282,14 +278,12 @@ static int __ltc4282_read_alarm(struct ltc4282_state *st, u32 reg, u32 mask,
static int ltc4282_read_alarm(struct ltc4282_state *st, u32 reg, u32 mask,
long *val)
{
- guard(mutex)(&st->lock);
return __ltc4282_read_alarm(st, reg, mask, val);
}
static int ltc4282_vdd_source_read_in(struct ltc4282_state *st, u32 channel,
long *val)
{
- guard(mutex)(&st->lock);
if (!st->in0_1_cache[channel].en)
return -ENODATA;
@@ -301,7 +295,6 @@ static int ltc4282_vdd_source_read_hist(struct ltc4282_state *st, u32 reg,
{
int ret;
- guard(mutex)(&st->lock);
if (!st->in0_1_cache[channel].en) {
*val = *cached;
return 0;
@@ -318,7 +311,6 @@ static int ltc4282_vdd_source_read_hist(struct ltc4282_state *st, u32 reg,
static int ltc4282_vdd_source_read_lim(struct ltc4282_state *st, u32 reg,
u32 channel, u32 *cached, long *val)
{
- guard(mutex)(&st->lock);
if (!st->in0_1_cache[channel].en)
return ltc4282_read_voltage_byte_cached(st, reg, st->vfs_out,
val, cached);
@@ -329,7 +321,6 @@ static int ltc4282_vdd_source_read_lim(struct ltc4282_state *st, u32 reg,
static int ltc4282_vdd_source_read_alm(struct ltc4282_state *st, u32 mask,
u32 channel, long *val)
{
- guard(mutex)(&st->lock);
if (!st->in0_1_cache[channel].en) {
/*
* Do this otherwise alarms can get confused because we clear
@@ -413,9 +404,7 @@ static int ltc4282_read_in(struct ltc4282_state *st, u32 attr, long *val,
channel,
&st->in0_1_cache[channel].in_min_raw, val);
case hwmon_in_enable:
- scoped_guard(mutex, &st->lock) {
- *val = st->in0_1_cache[channel].en;
- }
+ *val = st->in0_1_cache[channel].en;
return 0;
case hwmon_in_fault:
/*
@@ -541,7 +530,7 @@ static int ltc4282_read_power_byte(const struct ltc4282_state *st, u32 reg,
return 0;
}
-static int ltc4282_read_energy(const struct ltc4282_state *st, u64 *val)
+static int ltc4282_read_energy(const struct ltc4282_state *st, s64 *val)
{
u64 temp, energy;
__be64 raw;
@@ -613,10 +602,12 @@ static int ltc4282_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_power:
return ltc4282_read_power(st, attr, val);
case hwmon_energy:
- scoped_guard(mutex, &st->lock) {
- *val = st->energy_en;
- }
+ *val = st->energy_en;
return 0;
+ case hwmon_energy64:
+ if (st->energy_en)
+ return ltc4282_read_energy(st, (s64 *)val);
+ return -ENODATA;
default:
return -EOPNOTSUPP;
}
@@ -683,7 +674,6 @@ static int __ltc4282_in_write_history(const struct ltc4282_state *st, u32 reg,
static int ltc4282_in_write_history(struct ltc4282_state *st, u32 reg,
long lowest, long highest, u32 fs)
{
- guard(mutex)(&st->lock);
return __ltc4282_in_write_history(st, reg, lowest, highest, fs);
}
@@ -691,8 +681,6 @@ static int ltc4282_power_reset_hist(struct ltc4282_state *st)
{
int ret;
- guard(mutex)(&st->lock);
-
ret = ltc4282_write_power_word(st, LTC4282_POWER_LOWEST,
st->power_max);
if (ret)
@@ -798,7 +786,6 @@ static int ltc4282_vdd_source_write_lim(struct ltc4282_state *st, u32 reg,
{
int ret;
- guard(mutex)(&st->lock);
if (st->in0_1_cache[channel].en)
ret = ltc4282_write_voltage_byte(st, reg, st->vfs_out, val);
else
@@ -816,7 +803,6 @@ static int ltc4282_vdd_source_reset_hist(struct ltc4282_state *st, int channel)
if (channel == LTC4282_CHAN_VDD)
lowest = st->vdd;
- guard(mutex)(&st->lock);
if (st->in0_1_cache[channel].en) {
ret = __ltc4282_in_write_history(st, LTC4282_VSOURCE_LOWEST,
lowest, 0, st->vfs_out);
@@ -856,7 +842,6 @@ static int ltc4282_vdd_source_enable(struct ltc4282_state *st, int channel,
int ret, other_chan = ~channel & 0x1;
u8 __val = val;
- guard(mutex)(&st->lock);
if (st->in0_1_cache[channel].en == !!val)
return 0;
@@ -933,8 +918,6 @@ static int ltc4282_curr_reset_hist(struct ltc4282_state *st)
{
int ret;
- guard(mutex)(&st->lock);
-
ret = __ltc4282_in_write_history(st, LTC4282_VSENSE_LOWEST,
st->vsense_max, 0, 40 * MILLI);
if (ret)
@@ -969,7 +952,6 @@ static int ltc4282_energy_enable_set(struct ltc4282_state *st, long val)
{
int ret;
- guard(mutex)(&st->lock);
/* setting the bit halts the meter */
ret = regmap_update_bits(st->map, LTC4282_ADC_CTRL,
LTC4282_METER_HALT_MASK,
@@ -1078,6 +1060,9 @@ static umode_t ltc4282_is_visible(const void *data,
case hwmon_energy:
/* hwmon_energy_enable */
return 0644;
+ case hwmon_energy64:
+ /* hwmon_energy_input */
+ return 0444;
default:
return 0;
}
@@ -1106,24 +1091,6 @@ static int ltc4282_read_labels(struct device *dev,
}
}
-static ssize_t ltc4282_energy_show(struct device *dev,
- struct device_attribute *da, char *buf)
-{
- struct ltc4282_state *st = dev_get_drvdata(dev);
- u64 energy;
- int ret;
-
- guard(mutex)(&st->lock);
- if (!st->energy_en)
- return -ENODATA;
-
- ret = ltc4282_read_energy(st, &energy);
- if (ret < 0)
- return ret;
-
- return sysfs_emit(buf, "%llu\n", energy);
-}
-
static const struct clk_ops ltc4282_ops = {
.recalc_rate = ltc4282_recalc_rate,
.determine_rate = ltc4282_determine_rate,
@@ -1588,6 +1555,8 @@ static const struct hwmon_channel_info * const ltc4282_info[] = {
HWMON_P_RESET_HISTORY | HWMON_P_LABEL),
HWMON_CHANNEL_INFO(energy,
HWMON_E_ENABLE),
+ HWMON_CHANNEL_INFO(energy64,
+ HWMON_E_INPUT),
NULL
};
@@ -1603,15 +1572,6 @@ static const struct hwmon_chip_info ltc4282_chip_info = {
.info = ltc4282_info,
};
-/* energy attributes are 6bytes wide so we need u64 */
-static SENSOR_DEVICE_ATTR_RO(energy1_input, ltc4282_energy, 0);
-
-static struct attribute *ltc4282_attrs[] = {
- &sensor_dev_attr_energy1_input.dev_attr.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(ltc4282);
-
static int ltc4282_show_fault_log(void *arg, u64 *val, u32 mask)
{
struct ltc4282_state *st = arg;
@@ -1693,8 +1653,7 @@ static int ltc4282_probe(struct i2c_client *i2c)
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
if (!st)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate memory\n");
+ return -ENOMEM;
st->map = devm_regmap_init_i2c(i2c, &ltc4282_regmap_config);
if (IS_ERR(st->map))
@@ -1717,10 +1676,8 @@ static int ltc4282_probe(struct i2c_client *i2c)
if (ret)
return ret;
- mutex_init(&st->lock);
hwmon = devm_hwmon_device_register_with_info(dev, "ltc4282", st,
- &ltc4282_chip_info,
- ltc4282_groups);
+ &ltc4282_chip_info, NULL);
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
diff --git a/drivers/hwmon/macsmc-hwmon.c b/drivers/hwmon/macsmc-hwmon.c
new file mode 100644
index 000000000000..1c0bbec7e8eb
--- /dev/null
+++ b/drivers/hwmon/macsmc-hwmon.c
@@ -0,0 +1,851 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SMC hwmon driver for Apple Silicon platforms
+ *
+ * The System Management Controller on Apple Silicon devices is responsible for
+ * measuring data from sensors across the SoC and machine. These include power,
+ * temperature, voltage and current sensors. Some "sensors" actually expose
+ * derived values. An example of this is the key PHPC, which is an estimate
+ * of the heat energy being dissipated by the SoC.
+ *
+ * While each SoC only has one SMC variant, each platform exposes a different
+ * set of sensors. For example, M1 MacBooks expose battery telemetry sensors
+ * which are not present on the M1 Mac mini. For this reason, the available
+ * sensors for a given platform are described in the device tree in a child
+ * node of the SMC device. We must walk this list of available sensors and
+ * populate the required hwmon data structures at runtime.
+ *
+ * Originally based on a concept by Jean-Francois Bortolotti <jeff@borto.fr>
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <linux/bitfield.h>
+#include <linux/hwmon.h>
+#include <linux/mfd/macsmc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define MAX_LABEL_LENGTH 32
+
+/* Temperature, voltage, current, power, fan(s) */
+#define NUM_SENSOR_TYPES 5
+
+#define FLT_EXP_BIAS 127
+#define FLT_EXP_MASK GENMASK(30, 23)
+#define FLT_MANT_BIAS 23
+#define FLT_MANT_MASK GENMASK(22, 0)
+#define FLT_SIGN_MASK BIT(31)
+
+static bool fan_control;
+module_param_unsafe(fan_control, bool, 0644);
+MODULE_PARM_DESC(fan_control,
+ "Override the SMC to set your own fan speeds on supported machines");
+
+struct macsmc_hwmon_sensor {
+ struct apple_smc_key_info info;
+ smc_key macsmc_key;
+ char label[MAX_LABEL_LENGTH];
+ u32 attrs;
+};
+
+struct macsmc_hwmon_fan {
+ struct macsmc_hwmon_sensor now;
+ struct macsmc_hwmon_sensor min;
+ struct macsmc_hwmon_sensor max;
+ struct macsmc_hwmon_sensor set;
+ struct macsmc_hwmon_sensor mode;
+ char label[MAX_LABEL_LENGTH];
+ u32 attrs;
+ bool manual;
+};
+
+struct macsmc_hwmon_sensors {
+ struct hwmon_channel_info channel_info;
+ struct macsmc_hwmon_sensor *sensors;
+ u32 count;
+};
+
+struct macsmc_hwmon_fans {
+ struct hwmon_channel_info channel_info;
+ struct macsmc_hwmon_fan *fans;
+ u32 count;
+};
+
+struct macsmc_hwmon {
+ struct device *dev;
+ struct apple_smc *smc;
+ struct device *hwmon_dev;
+ struct hwmon_chip_info chip_info;
+ /* Chip + sensor types + NULL */
+ const struct hwmon_channel_info *channel_infos[1 + NUM_SENSOR_TYPES + 1];
+ struct macsmc_hwmon_sensors temp;
+ struct macsmc_hwmon_sensors volt;
+ struct macsmc_hwmon_sensors curr;
+ struct macsmc_hwmon_sensors power;
+ struct macsmc_hwmon_fans fan;
+};
+
+static int macsmc_hwmon_read_label(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct macsmc_hwmon *hwmon = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_temp:
+ *str = hwmon->temp.sensors[channel].label;
+ break;
+ case hwmon_in:
+ *str = hwmon->volt.sensors[channel].label;
+ break;
+ case hwmon_curr:
+ *str = hwmon->curr.sensors[channel].label;
+ break;
+ case hwmon_power:
+ *str = hwmon->power.sensors[channel].label;
+ break;
+ case hwmon_fan:
+ *str = hwmon->fan.fans[channel].label;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/*
+ * A number of sensors report data in a 48.16 fixed-point decimal format that is
+ * not used by any other function of the SMC.
+ */
+static int macsmc_hwmon_read_ioft_scaled(struct apple_smc *smc, smc_key key,
+ u64 *p, int scale)
+{
+ u64 val;
+ int ret;
+
+ ret = apple_smc_read_u64(smc, key, &val);
+ if (ret < 0)
+ return ret;
+
+ *p = mult_frac(val, scale, 65536);
+
+ return 0;
+}
+
+/*
+ * Many sensors report their data as IEEE-754 floats. No other SMC function uses
+ * them.
+ */
+static int macsmc_hwmon_read_f32_scaled(struct apple_smc *smc, smc_key key,
+ int *p, int scale)
+{
+ u32 fval;
+ u64 val;
+ int ret, exp;
+
+ ret = apple_smc_read_u32(smc, key, &fval);
+ if (ret < 0)
+ return ret;
+
+ val = ((u64)((fval & FLT_MANT_MASK) | BIT(23)));
+ exp = ((fval >> 23) & 0xff) - FLT_EXP_BIAS - FLT_MANT_BIAS;
+
+ /* We never have negatively scaled SMC floats */
+ val *= scale;
+
+ if (exp > 63)
+ val = U64_MAX;
+ else if (exp < -63)
+ val = 0;
+ else if (exp < 0)
+ val >>= -exp;
+ else if (exp != 0 && (val & ~((1UL << (64 - exp)) - 1))) /* overflow */
+ val = U64_MAX;
+ else
+ val <<= exp;
+
+ if (fval & FLT_SIGN_MASK) {
+ if (val > (-(s64)INT_MIN))
+ *p = INT_MIN;
+ else
+ *p = -val;
+ } else {
+ if (val > INT_MAX)
+ *p = INT_MAX;
+ else
+ *p = val;
+ }
+
+ return 0;
+}
+
+/*
+ * The SMC has keys of multiple types, denoted by a FourCC of the same format
+ * as the key ID. We don't know what data type a key encodes until we poke at it.
+ */
+static int macsmc_hwmon_read_key(struct apple_smc *smc,
+ struct macsmc_hwmon_sensor *sensor, int scale,
+ long *val)
+{
+ int ret;
+
+ switch (sensor->info.type_code) {
+ /* 32-bit IEEE 754 float */
+ case __SMC_KEY('f', 'l', 't', ' '): {
+ u32 flt_ = 0;
+
+ ret = macsmc_hwmon_read_f32_scaled(smc, sensor->macsmc_key,
+ &flt_, scale);
+ if (ret)
+ return ret;
+
+ *val = flt_;
+ break;
+ }
+ /* 48.16 fixed point decimal */
+ case __SMC_KEY('i', 'o', 'f', 't'): {
+ u64 ioft = 0;
+
+ ret = macsmc_hwmon_read_ioft_scaled(smc, sensor->macsmc_key,
+ &ioft, scale);
+ if (ret)
+ return ret;
+
+ *val = (long)ioft;
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int macsmc_hwmon_write_f32(struct apple_smc *smc, smc_key key, int value)
+{
+ u64 val;
+ u32 fval = 0;
+ int exp = 0, neg;
+
+ val = abs(value);
+ neg = val != value;
+
+ if (val) {
+ int msb = __fls(val) - exp;
+
+ if (msb > 23) {
+ val >>= msb - FLT_MANT_BIAS;
+ exp -= msb - FLT_MANT_BIAS;
+ } else if (msb < 23) {
+ val <<= FLT_MANT_BIAS - msb;
+ exp += msb;
+ }
+
+ fval = FIELD_PREP(FLT_SIGN_MASK, neg) |
+ FIELD_PREP(FLT_EXP_MASK, exp + FLT_EXP_BIAS) |
+ FIELD_PREP(FLT_MANT_MASK, val);
+ }
+
+ return apple_smc_write_u32(smc, key, fval);
+}
+
+static int macsmc_hwmon_write_key(struct apple_smc *smc,
+ struct macsmc_hwmon_sensor *sensor, long val)
+{
+ switch (sensor->info.type_code) {
+ /* 32-bit IEEE 754 float */
+ case __SMC_KEY('f', 'l', 't', ' '):
+ return macsmc_hwmon_write_f32(smc, sensor->macsmc_key, val);
+ /* unsigned 8-bit integer */
+ case __SMC_KEY('u', 'i', '8', ' '):
+ return apple_smc_write_u8(smc, sensor->macsmc_key, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int macsmc_hwmon_read_fan(struct macsmc_hwmon *hwmon, u32 attr, int chan,
+ long *val)
+{
+ switch (attr) {
+ case hwmon_fan_input:
+ return macsmc_hwmon_read_key(hwmon->smc,
+ &hwmon->fan.fans[chan].now, 1, val);
+ case hwmon_fan_min:
+ return macsmc_hwmon_read_key(hwmon->smc,
+ &hwmon->fan.fans[chan].min, 1, val);
+ case hwmon_fan_max:
+ return macsmc_hwmon_read_key(hwmon->smc,
+ &hwmon->fan.fans[chan].max, 1, val);
+ case hwmon_fan_target:
+ return macsmc_hwmon_read_key(hwmon->smc,
+ &hwmon->fan.fans[chan].set, 1, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int macsmc_hwmon_write_fan(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct macsmc_hwmon *hwmon = dev_get_drvdata(dev);
+ long min, max;
+ int ret;
+
+ if (!fan_control || hwmon->fan.fans[channel].mode.macsmc_key == 0)
+ return -EOPNOTSUPP;
+
+ /*
+ * The SMC does no sanity checks on requested fan speeds, so we need to.
+ */
+ ret = macsmc_hwmon_read_key(hwmon->smc, &hwmon->fan.fans[channel].min,
+ 1, &min);
+ if (ret)
+ return ret;
+
+ ret = macsmc_hwmon_read_key(hwmon->smc, &hwmon->fan.fans[channel].max,
+ 1, &max);
+ if (ret)
+ return ret;
+
+ if (val >= min && val <= max) {
+ if (!hwmon->fan.fans[channel].manual) {
+ /* Write 1 to mode key for manual control */
+ ret = macsmc_hwmon_write_key(hwmon->smc,
+ &hwmon->fan.fans[channel].mode, 1);
+ if (ret < 0)
+ return ret;
+
+ hwmon->fan.fans[channel].manual = true;
+ }
+ return macsmc_hwmon_write_key(hwmon->smc,
+ &hwmon->fan.fans[channel].set, val);
+ } else if (!val) {
+ if (hwmon->fan.fans[channel].manual) {
+ ret = macsmc_hwmon_write_key(hwmon->smc,
+ &hwmon->fan.fans[channel].mode, 0);
+ if (ret < 0)
+ return ret;
+
+ hwmon->fan.fans[channel].manual = false;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int macsmc_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct macsmc_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret = 0;
+
+ switch (type) {
+ case hwmon_temp:
+ ret = macsmc_hwmon_read_key(hwmon->smc,
+ &hwmon->temp.sensors[channel], 1000, val);
+ break;
+ case hwmon_in:
+ ret = macsmc_hwmon_read_key(hwmon->smc,
+ &hwmon->volt.sensors[channel], 1000, val);
+ break;
+ case hwmon_curr:
+ ret = macsmc_hwmon_read_key(hwmon->smc,
+ &hwmon->curr.sensors[channel], 1000, val);
+ break;
+ case hwmon_power:
+ /* SMC returns power in Watts with acceptable precision to scale to uW */
+ ret = macsmc_hwmon_read_key(hwmon->smc,
+ &hwmon->power.sensors[channel],
+ 1000000, val);
+ break;
+ case hwmon_fan:
+ ret = macsmc_hwmon_read_fan(hwmon, attr, channel, val);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int macsmc_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_fan:
+ return macsmc_hwmon_write_fan(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t macsmc_hwmon_fan_is_visible(const struct macsmc_hwmon_fan *fan,
+ u32 attr)
+{
+ if (fan->attrs & BIT(attr)) {
+ if (attr == hwmon_fan_target && fan_control && fan->mode.macsmc_key)
+ return 0644;
+
+ return 0444;
+ }
+
+ return 0;
+}
+
+static umode_t macsmc_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ const struct macsmc_hwmon *hwmon = data;
+ struct macsmc_hwmon_sensor *sensor;
+
+ switch (type) {
+ case hwmon_in:
+ sensor = &hwmon->volt.sensors[channel];
+ break;
+ case hwmon_curr:
+ sensor = &hwmon->curr.sensors[channel];
+ break;
+ case hwmon_power:
+ sensor = &hwmon->power.sensors[channel];
+ break;
+ case hwmon_temp:
+ sensor = &hwmon->temp.sensors[channel];
+ break;
+ case hwmon_fan:
+ return macsmc_hwmon_fan_is_visible(&hwmon->fan.fans[channel], attr);
+ default:
+ return 0;
+ }
+
+ /* Sensors only register ro attributes */
+ if (sensor->attrs & BIT(attr))
+ return 0444;
+
+ return 0;
+}
+
+static const struct hwmon_ops macsmc_hwmon_ops = {
+ .is_visible = macsmc_hwmon_is_visible,
+ .read = macsmc_hwmon_read,
+ .read_string = macsmc_hwmon_read_label,
+ .write = macsmc_hwmon_write,
+};
+
+/*
+ * Get the key metadata, including key data type, from the SMC.
+ */
+static int macsmc_hwmon_parse_key(struct device *dev, struct apple_smc *smc,
+ struct macsmc_hwmon_sensor *sensor,
+ const char *key)
+{
+ int ret;
+
+ ret = apple_smc_get_key_info(smc, _SMC_KEY(key), &sensor->info);
+ if (ret) {
+ dev_dbg(dev, "Failed to retrieve key info for %s\n", key);
+ return ret;
+ }
+
+ sensor->macsmc_key = _SMC_KEY(key);
+
+ return 0;
+}
+
+/*
+ * A sensor is a single key-value pair as made available by the SMC.
+ * The devicetree gives us the SMC key ID and a friendly name where the
+ * purpose of the sensor is known.
+ */
+static int macsmc_hwmon_create_sensor(struct device *dev, struct apple_smc *smc,
+ struct device_node *sensor_node,
+ struct macsmc_hwmon_sensor *sensor)
+{
+ const char *key, *label;
+ int ret;
+
+ ret = of_property_read_string(sensor_node, "apple,key-id", &key);
+ if (ret) {
+ dev_dbg(dev, "Could not find apple,key-id in sensor node\n");
+ return ret;
+ }
+
+ ret = macsmc_hwmon_parse_key(dev, smc, sensor, key);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_string(sensor_node, "label", &label);
+ if (ret)
+ dev_dbg(dev, "No label found for sensor %s\n", key);
+ else
+ strscpy_pad(sensor->label, label, sizeof(sensor->label));
+
+ return 0;
+}
+
+/*
+ * Fan data is exposed by the SMC as multiple sensors.
+ *
+ * The devicetree schema reuses apple,key-id for the actual fan speed sensor.
+ * Min, max and target keys do not need labels, so we can reuse label
+ * for naming the entire fan.
+ */
+static int macsmc_hwmon_create_fan(struct device *dev, struct apple_smc *smc,
+ struct device_node *fan_node,
+ struct macsmc_hwmon_fan *fan)
+{
+ const char *label, *now, *min, *max, *set, *mode;
+ int ret;
+
+ ret = of_property_read_string(fan_node, "apple,key-id", &now);
+ if (ret) {
+ dev_err(dev, "apple,key-id not found in fan node!\n");
+ return ret;
+ }
+
+ ret = macsmc_hwmon_parse_key(dev, smc, &fan->now, now);
+ if (ret)
+ return ret;
+
+ fan->attrs = HWMON_F_INPUT;
+
+ ret = of_property_read_string(fan_node, "label", &label);
+ if (ret) {
+ dev_dbg(dev, "No label found for fan %s\n", now);
+ } else {
+ strscpy_pad(fan->label, label, sizeof(fan->label));
+ fan->attrs |= HWMON_F_LABEL;
+ }
+
+ /* The following keys are not required to simply monitor fan speed */
+ if (!of_property_read_string(fan_node, "apple,fan-minimum", &min)) {
+ ret = macsmc_hwmon_parse_key(dev, smc, &fan->min, min);
+ if (ret)
+ return ret;
+
+ fan->attrs |= HWMON_F_MIN;
+ }
+
+ if (!of_property_read_string(fan_node, "apple,fan-maximum", &max)) {
+ ret = macsmc_hwmon_parse_key(dev, smc, &fan->max, max);
+ if (ret)
+ return ret;
+
+ fan->attrs |= HWMON_F_MAX;
+ }
+
+ if (!of_property_read_string(fan_node, "apple,fan-target", &set)) {
+ ret = macsmc_hwmon_parse_key(dev, smc, &fan->set, set);
+ if (ret)
+ return ret;
+
+ fan->attrs |= HWMON_F_TARGET;
+ }
+
+ if (!of_property_read_string(fan_node, "apple,fan-mode", &mode)) {
+ ret = macsmc_hwmon_parse_key(dev, smc, &fan->mode, mode);
+ if (ret)
+ return ret;
+ }
+
+ /* Initialise fan control mode to automatic */
+ fan->manual = false;
+
+ return 0;
+}
+
+static int macsmc_hwmon_populate_sensors(struct macsmc_hwmon *hwmon,
+ struct device_node *hwmon_node)
+{
+ struct device_node *key_node __maybe_unused;
+ struct macsmc_hwmon_sensor *sensor;
+ u32 n_current = 0, n_fan = 0, n_power = 0, n_temperature = 0, n_voltage = 0;
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "current-") {
+ n_current++;
+ }
+
+ if (n_current) {
+ hwmon->curr.sensors = devm_kcalloc(hwmon->dev, n_current,
+ sizeof(struct macsmc_hwmon_sensor), GFP_KERNEL);
+ if (!hwmon->curr.sensors)
+ return -ENOMEM;
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "current-") {
+ sensor = &hwmon->curr.sensors[hwmon->curr.count];
+ if (!macsmc_hwmon_create_sensor(hwmon->dev, hwmon->smc, key_node, sensor)) {
+ sensor->attrs = HWMON_C_INPUT;
+
+ if (*sensor->label)
+ sensor->attrs |= HWMON_C_LABEL;
+
+ hwmon->curr.count++;
+ }
+ }
+ }
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "fan-") {
+ n_fan++;
+ }
+
+ if (n_fan) {
+ hwmon->fan.fans = devm_kcalloc(hwmon->dev, n_fan,
+ sizeof(struct macsmc_hwmon_fan), GFP_KERNEL);
+ if (!hwmon->fan.fans)
+ return -ENOMEM;
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "fan-") {
+ if (!macsmc_hwmon_create_fan(hwmon->dev, hwmon->smc, key_node,
+ &hwmon->fan.fans[hwmon->fan.count]))
+ hwmon->fan.count++;
+ }
+ }
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "power-") {
+ n_power++;
+ }
+
+ if (n_power) {
+ hwmon->power.sensors = devm_kcalloc(hwmon->dev, n_power,
+ sizeof(struct macsmc_hwmon_sensor), GFP_KERNEL);
+ if (!hwmon->power.sensors)
+ return -ENOMEM;
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "power-") {
+ sensor = &hwmon->power.sensors[hwmon->power.count];
+ if (!macsmc_hwmon_create_sensor(hwmon->dev, hwmon->smc, key_node, sensor)) {
+ sensor->attrs = HWMON_P_INPUT;
+
+ if (*sensor->label)
+ sensor->attrs |= HWMON_P_LABEL;
+
+ hwmon->power.count++;
+ }
+ }
+ }
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "temperature-") {
+ n_temperature++;
+ }
+
+ if (n_temperature) {
+ hwmon->temp.sensors = devm_kcalloc(hwmon->dev, n_temperature,
+ sizeof(struct macsmc_hwmon_sensor), GFP_KERNEL);
+ if (!hwmon->temp.sensors)
+ return -ENOMEM;
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "temperature-") {
+ sensor = &hwmon->temp.sensors[hwmon->temp.count];
+ if (!macsmc_hwmon_create_sensor(hwmon->dev, hwmon->smc, key_node, sensor)) {
+ sensor->attrs = HWMON_T_INPUT;
+
+ if (*sensor->label)
+ sensor->attrs |= HWMON_T_LABEL;
+
+ hwmon->temp.count++;
+ }
+ }
+ }
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "voltage-") {
+ n_voltage++;
+ }
+
+ if (n_voltage) {
+ hwmon->volt.sensors = devm_kcalloc(hwmon->dev, n_voltage,
+ sizeof(struct macsmc_hwmon_sensor), GFP_KERNEL);
+ if (!hwmon->volt.sensors)
+ return -ENOMEM;
+
+ for_each_child_of_node_with_prefix(hwmon_node, key_node, "volt-") {
+ sensor = &hwmon->temp.sensors[hwmon->temp.count];
+ if (!macsmc_hwmon_create_sensor(hwmon->dev, hwmon->smc, key_node, sensor)) {
+ sensor->attrs = HWMON_I_INPUT;
+
+ if (*sensor->label)
+ sensor->attrs |= HWMON_I_LABEL;
+
+ hwmon->volt.count++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Create NULL-terminated config arrays */
+static void macsmc_hwmon_populate_configs(u32 *configs, const struct macsmc_hwmon_sensors *sensors)
+{
+ int idx;
+
+ for (idx = 0; idx < sensors->count; idx++)
+ configs[idx] = sensors->sensors[idx].attrs;
+}
+
+static void macsmc_hwmon_populate_fan_configs(u32 *configs, const struct macsmc_hwmon_fans *fans)
+{
+ int idx;
+
+ for (idx = 0; idx < fans->count; idx++)
+ configs[idx] = fans->fans[idx].attrs;
+}
+
+static const struct hwmon_channel_info *const macsmc_chip_channel_info =
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ);
+
+static int macsmc_hwmon_create_infos(struct macsmc_hwmon *hwmon)
+{
+ struct hwmon_channel_info *channel_info;
+ int i = 0;
+
+ /* chip */
+ hwmon->channel_infos[i++] = macsmc_chip_channel_info;
+
+ if (hwmon->curr.count) {
+ channel_info = &hwmon->curr.channel_info;
+ channel_info->type = hwmon_curr;
+ channel_info->config = devm_kcalloc(hwmon->dev, hwmon->curr.count + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!channel_info->config)
+ return -ENOMEM;
+
+ macsmc_hwmon_populate_configs((u32 *)channel_info->config, &hwmon->curr);
+ hwmon->channel_infos[i++] = channel_info;
+ }
+
+ if (hwmon->fan.count) {
+ channel_info = &hwmon->fan.channel_info;
+ channel_info->type = hwmon_fan;
+ channel_info->config = devm_kcalloc(hwmon->dev, hwmon->fan.count + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!channel_info->config)
+ return -ENOMEM;
+
+ macsmc_hwmon_populate_fan_configs((u32 *)channel_info->config, &hwmon->fan);
+ hwmon->channel_infos[i++] = channel_info;
+ }
+
+ if (hwmon->power.count) {
+ channel_info = &hwmon->power.channel_info;
+ channel_info->type = hwmon_power;
+ channel_info->config = devm_kcalloc(hwmon->dev, hwmon->power.count + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!channel_info->config)
+ return -ENOMEM;
+
+ macsmc_hwmon_populate_configs((u32 *)channel_info->config, &hwmon->power);
+ hwmon->channel_infos[i++] = channel_info;
+ }
+
+ if (hwmon->temp.count) {
+ channel_info = &hwmon->temp.channel_info;
+ channel_info->type = hwmon_temp;
+ channel_info->config = devm_kcalloc(hwmon->dev, hwmon->temp.count + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!channel_info->config)
+ return -ENOMEM;
+
+ macsmc_hwmon_populate_configs((u32 *)channel_info->config, &hwmon->temp);
+ hwmon->channel_infos[i++] = channel_info;
+ }
+
+ if (hwmon->volt.count) {
+ channel_info = &hwmon->volt.channel_info;
+ channel_info->type = hwmon_in;
+ channel_info->config = devm_kcalloc(hwmon->dev, hwmon->volt.count + 1,
+ sizeof(u32), GFP_KERNEL);
+ if (!channel_info->config)
+ return -ENOMEM;
+
+ macsmc_hwmon_populate_configs((u32 *)channel_info->config, &hwmon->volt);
+ hwmon->channel_infos[i++] = channel_info;
+ }
+
+ return 0;
+}
+
+static int macsmc_hwmon_probe(struct platform_device *pdev)
+{
+ struct apple_smc *smc = dev_get_drvdata(pdev->dev.parent);
+ struct macsmc_hwmon *hwmon;
+ int ret;
+
+ /*
+ * The MFD driver will try to probe us unconditionally. Some devices
+ * with the SMC do not have hwmon capabilities. Only probe if we have
+ * a hwmon node.
+ */
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon),
+ GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ hwmon->dev = &pdev->dev;
+ hwmon->smc = smc;
+
+ ret = macsmc_hwmon_populate_sensors(hwmon, hwmon->dev->of_node);
+ if (ret) {
+ dev_err(hwmon->dev, "Could not parse sensors\n");
+ return ret;
+ }
+
+ if (!hwmon->curr.count && !hwmon->fan.count &&
+ !hwmon->power.count && !hwmon->temp.count &&
+ !hwmon->volt.count) {
+ dev_err(hwmon->dev,
+ "No valid sensors found of any supported type\n");
+ return -ENODEV;
+ }
+
+ ret = macsmc_hwmon_create_infos(hwmon);
+ if (ret)
+ return ret;
+
+ hwmon->chip_info.ops = &macsmc_hwmon_ops;
+ hwmon->chip_info.info =
+ (const struct hwmon_channel_info *const *)&hwmon->channel_infos;
+
+ hwmon->hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "macsmc_hwmon", hwmon,
+ &hwmon->chip_info, NULL);
+ if (IS_ERR(hwmon->hwmon_dev))
+ return dev_err_probe(hwmon->dev, PTR_ERR(hwmon->hwmon_dev),
+ "Probing SMC hwmon device failed\n");
+
+ dev_dbg(hwmon->dev, "Registered SMC hwmon device. Sensors:\n");
+ dev_dbg(hwmon->dev,
+ "Current: %d, Fans: %d, Power: %d, Temperature: %d, Voltage: %d",
+ hwmon->curr.count, hwmon->fan.count,
+ hwmon->power.count, hwmon->temp.count,
+ hwmon->volt.count);
+
+ return 0;
+}
+
+static const struct of_device_id macsmc_hwmon_of_table[] = {
+ { .compatible = "apple,smc-hwmon" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, macsmc_hwmon_of_table);
+
+static struct platform_driver macsmc_hwmon_driver = {
+ .probe = macsmc_hwmon_probe,
+ .driver = {
+ .name = "macsmc-hwmon",
+ .of_match_table = macsmc_hwmon_of_table,
+ },
+};
+module_platform_driver(macsmc_hwmon_driver);
+
+MODULE_DESCRIPTION("Apple Silicon SMC hwmon driver");
+MODULE_AUTHOR("James Calligeros <jcalligeros99@gmail.com>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/hwmon/max127.c b/drivers/hwmon/max127.c
index a9aab8862f5e..5102d86d2619 100644
--- a/drivers/hwmon/max127.c
+++ b/drivers/hwmon/max127.c
@@ -45,7 +45,6 @@
#define MAX127_SIGN_BIT BIT(11)
struct max127_data {
- struct mutex lock;
struct i2c_client *client;
u8 ctrl_byte[MAX127_NUM_CHANNELS];
};
@@ -121,21 +120,16 @@ static int max127_read_input(struct max127_data *data, int channel, long *val)
struct i2c_client *client = data->client;
u8 ctrl_byte = data->ctrl_byte[channel];
- mutex_lock(&data->lock);
-
status = max127_select_channel(client, ctrl_byte);
if (status)
- goto exit;
+ return status;
status = max127_read_channel(client, &raw);
if (status)
- goto exit;
+ return status;
*val = max127_process_raw(ctrl_byte, raw);
-
-exit:
- mutex_unlock(&data->lock);
- return status;
+ return 0;
}
static int max127_read_min(struct max127_data *data, int channel, long *val)
@@ -170,8 +164,6 @@ static int max127_write_min(struct max127_data *data, int channel, long val)
{
u8 ctrl;
- mutex_lock(&data->lock);
-
ctrl = data->ctrl_byte[channel];
if (val <= -MAX127_FULL_RANGE) {
ctrl |= (MAX127_CTRL_RNG | MAX127_CTRL_BIP);
@@ -182,23 +174,15 @@ static int max127_write_min(struct max127_data *data, int channel, long val)
ctrl &= ~MAX127_CTRL_BIP;
}
data->ctrl_byte[channel] = ctrl;
-
- mutex_unlock(&data->lock);
-
return 0;
}
static int max127_write_max(struct max127_data *data, int channel, long val)
{
- mutex_lock(&data->lock);
-
if (val >= MAX127_FULL_RANGE)
data->ctrl_byte[channel] |= MAX127_CTRL_RNG;
else
data->ctrl_byte[channel] &= ~MAX127_CTRL_RNG;
-
- mutex_unlock(&data->lock);
-
return 0;
}
@@ -315,7 +299,6 @@ static int max127_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- mutex_init(&data->lock);
for (i = 0; i < ARRAY_SIZE(data->ctrl_byte); i++)
data->ctrl_byte[i] = (MAX127_CTRL_START |
MAX127_SET_CHANNEL(i));
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 0ccb5eb596fc..4c9e7892a73c 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -216,12 +216,13 @@ static ssize_t max16065_current_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct max16065_data *data = max16065_update_device(dev);
+ int curr_sense = data->curr_sense;
- if (unlikely(data->curr_sense < 0))
- return data->curr_sense;
+ if (unlikely(curr_sense < 0))
+ return curr_sense;
return sysfs_emit(buf, "%d\n",
- ADC_TO_CURR(data->curr_sense, data->curr_gain));
+ ADC_TO_CURR(curr_sense, data->curr_gain));
}
static ssize_t max16065_limit_store(struct device *dev,
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c
index f56913327004..4f6171a17d9f 100644
--- a/drivers/hwmon/max31790.c
+++ b/drivers/hwmon/max31790.c
@@ -57,7 +57,6 @@
*/
struct max31790_data {
struct i2c_client *client;
- struct mutex update_lock;
bool valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
@@ -74,30 +73,27 @@ static struct max31790_data *max31790_update_device(struct device *dev)
{
struct max31790_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- struct max31790_data *ret = data;
- int i;
- int rv;
-
- mutex_lock(&data->update_lock);
+ int i, rv;
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ data->valid = false;
rv = i2c_smbus_read_byte_data(client,
MAX31790_REG_FAN_FAULT_STATUS1);
if (rv < 0)
- goto abort;
+ return ERR_PTR(rv);
data->fault_status |= rv & 0x3F;
rv = i2c_smbus_read_byte_data(client,
MAX31790_REG_FAN_FAULT_STATUS2);
if (rv < 0)
- goto abort;
+ return ERR_PTR(rv);
data->fault_status |= (rv & 0x3F) << 6;
for (i = 0; i < NR_CHANNEL; i++) {
rv = i2c_smbus_read_word_swapped(client,
MAX31790_REG_TACH_COUNT(i));
if (rv < 0)
- goto abort;
+ return ERR_PTR(rv);
data->tach[i] = rv;
if (data->fan_config[i]
@@ -106,19 +102,19 @@ static struct max31790_data *max31790_update_device(struct device *dev)
MAX31790_REG_TACH_COUNT(NR_CHANNEL
+ i));
if (rv < 0)
- goto abort;
+ return ERR_PTR(rv);
data->tach[NR_CHANNEL + i] = rv;
} else {
rv = i2c_smbus_read_word_swapped(client,
MAX31790_REG_PWM_DUTY_CYCLE(i));
if (rv < 0)
- goto abort;
+ return ERR_PTR(rv);
data->pwm[i] = rv;
rv = i2c_smbus_read_word_swapped(client,
MAX31790_REG_TARGET_COUNT(i));
if (rv < 0)
- goto abort;
+ return ERR_PTR(rv);
data->target_count[i] = rv;
}
}
@@ -126,16 +122,7 @@ static struct max31790_data *max31790_update_device(struct device *dev)
data->last_updated = jiffies;
data->valid = true;
}
- goto done;
-
-abort:
- data->valid = false;
- ret = ERR_PTR(rv);
-
-done:
- mutex_unlock(&data->update_lock);
-
- return ret;
+ return data;
}
static const u8 tach_period[8] = { 1, 2, 4, 8, 16, 32, 32, 32 };
@@ -189,7 +176,6 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
*val = rpm;
return 0;
case hwmon_fan_fault:
- mutex_lock(&data->update_lock);
*val = !!(data->fault_status & (1 << channel));
data->fault_status &= ~(1 << channel);
/*
@@ -200,10 +186,9 @@ static int max31790_read_fan(struct device *dev, u32 attr, int channel,
if (*val) {
int reg = MAX31790_REG_TARGET_COUNT(channel % NR_CHANNEL);
- i2c_smbus_write_byte_data(data->client, reg,
- data->target_count[channel % NR_CHANNEL] >> 8);
+ return i2c_smbus_write_byte_data(data->client, reg,
+ data->target_count[channel % NR_CHANNEL] >> 8);
}
- mutex_unlock(&data->update_lock);
return 0;
case hwmon_fan_enable:
*val = !!(data->fan_config[channel] & MAX31790_FAN_CFG_TACH_INPUT_EN);
@@ -223,8 +208,6 @@ static int max31790_write_fan(struct device *dev, u32 attr, int channel,
u8 bits, fan_config;
int sr;
- mutex_lock(&data->update_lock);
-
switch (attr) {
case hwmon_fan_target:
val = clamp_val(val, FAN_RPM_MIN, FAN_RPM_MAX);
@@ -270,9 +253,6 @@ static int max31790_write_fan(struct device *dev, u32 attr, int channel,
err = -EOPNOTSUPP;
break;
}
-
- mutex_unlock(&data->update_lock);
-
return err;
}
@@ -338,8 +318,6 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
u8 fan_config;
int err = 0;
- mutex_lock(&data->update_lock);
-
switch (attr) {
case hwmon_pwm_input:
if (val < 0 || val > 255) {
@@ -389,9 +367,6 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel,
err = -EOPNOTSUPP;
break;
}
-
- mutex_unlock(&data->update_lock);
-
return err;
}
@@ -525,7 +500,6 @@ static int max31790_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- mutex_init(&data->update_lock);
/*
* Initialize the max31790 chip
diff --git a/drivers/hwmon/max31827.c b/drivers/hwmon/max31827.c
index a31c7b655da1..9b2e56c040df 100644
--- a/drivers/hwmon/max31827.c
+++ b/drivers/hwmon/max31827.c
@@ -10,7 +10,6 @@
#include <linux/delay.h>
#include <linux/hwmon.h>
#include <linux/i2c.h>
-#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -99,7 +98,6 @@ struct max31827_state {
/*
* Prevent simultaneous access to the i2c client.
*/
- struct mutex lock;
struct regmap *regmap;
bool enable;
unsigned int resolution;
@@ -123,30 +121,23 @@ static int shutdown_write(struct max31827_state *st, unsigned int reg,
* Before the Temperature Threshold Alarm, Alarm Hysteresis Threshold
* and Resolution bits from Configuration register are changed over I2C,
* the part must be in shutdown mode.
- *
- * Mutex is used to ensure, that some other process doesn't change the
- * configuration register.
*/
- mutex_lock(&st->lock);
-
if (!st->enable) {
if (!mask)
- ret = regmap_write(st->regmap, reg, val);
- else
- ret = regmap_update_bits(st->regmap, reg, mask, val);
- goto unlock;
+ return regmap_write(st->regmap, reg, val);
+ return regmap_update_bits(st->regmap, reg, mask, val);
}
ret = regmap_read(st->regmap, MAX31827_CONFIGURATION_REG, &cfg);
if (ret)
- goto unlock;
+ return ret;
cnv_rate = MAX31827_CONFIGURATION_CNV_RATE_MASK & cfg;
cfg = cfg & ~(MAX31827_CONFIGURATION_1SHOT_MASK |
MAX31827_CONFIGURATION_CNV_RATE_MASK);
ret = regmap_write(st->regmap, MAX31827_CONFIGURATION_REG, cfg);
if (ret)
- goto unlock;
+ return ret;
if (!mask)
ret = regmap_write(st->regmap, reg, val);
@@ -154,15 +145,11 @@ static int shutdown_write(struct max31827_state *st, unsigned int reg,
ret = regmap_update_bits(st->regmap, reg, mask, val);
if (ret)
- goto unlock;
-
- ret = regmap_update_bits(st->regmap, MAX31827_CONFIGURATION_REG,
- MAX31827_CONFIGURATION_CNV_RATE_MASK,
- cnv_rate);
+ return ret;
-unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return regmap_update_bits(st->regmap, MAX31827_CONFIGURATION_REG,
+ MAX31827_CONFIGURATION_CNV_RATE_MASK,
+ cnv_rate);
}
static int write_alarm_val(struct max31827_state *st, unsigned int reg,
@@ -223,23 +210,13 @@ static int max31827_read(struct device *dev, enum hwmon_sensor_types type,
break;
case hwmon_temp_input:
- mutex_lock(&st->lock);
-
if (!st->enable) {
- /*
- * This operation requires mutex protection,
- * because the chip configuration should not
- * be changed during the conversion process.
- */
-
ret = regmap_update_bits(st->regmap,
MAX31827_CONFIGURATION_REG,
MAX31827_CONFIGURATION_1SHOT_MASK,
1);
- if (ret) {
- mutex_unlock(&st->lock);
+ if (ret)
return ret;
- }
msleep(max31827_conv_times[st->resolution]);
}
@@ -254,8 +231,6 @@ static int max31827_read(struct device *dev, enum hwmon_sensor_types type,
ret = regmap_read(st->regmap, MAX31827_T_REG, &uval);
- mutex_unlock(&st->lock);
-
if (ret)
break;
@@ -352,7 +327,6 @@ static int max31827_write(struct device *dev, enum hwmon_sensor_types type,
if (val >> 1)
return -EINVAL;
- mutex_lock(&st->lock);
/**
* The chip should not be enabled while a conversion is
* performed. Neither should the chip be enabled when
@@ -361,15 +335,11 @@ static int max31827_write(struct device *dev, enum hwmon_sensor_types type,
st->enable = val;
- ret = regmap_update_bits(st->regmap,
- MAX31827_CONFIGURATION_REG,
- MAX31827_CONFIGURATION_1SHOT_MASK |
- MAX31827_CONFIGURATION_CNV_RATE_MASK,
- MAX31827_DEVICE_ENABLE(val));
-
- mutex_unlock(&st->lock);
-
- return ret;
+ return regmap_update_bits(st->regmap,
+ MAX31827_CONFIGURATION_REG,
+ MAX31827_CONFIGURATION_1SHOT_MASK |
+ MAX31827_CONFIGURATION_CNV_RATE_MASK,
+ MAX31827_DEVICE_ENABLE(val));
case hwmon_temp_max:
return write_alarm_val(st, MAX31827_TH_REG, val);
@@ -623,8 +593,6 @@ static int max31827_probe(struct i2c_client *client)
if (!st)
return -ENOMEM;
- mutex_init(&st->lock);
-
st->regmap = devm_regmap_init_i2c(client, &max31827_regmap);
if (IS_ERR(st->regmap))
return dev_err_probe(dev, PTR_ERR(st->regmap),
diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c
index 13201fb755c9..4316dcdd03fc 100644
--- a/drivers/hwmon/max6620.c
+++ b/drivers/hwmon/max6620.c
@@ -130,7 +130,6 @@ static const u8 target_reg[] = {
struct max6620_data {
struct i2c_client *client;
- struct mutex update_lock;
bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
@@ -161,39 +160,36 @@ static int max6620_update_device(struct device *dev)
{
struct max6620_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
- int i;
- int ret = 0;
-
- mutex_lock(&data->update_lock);
+ int i, ret;
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
for (i = 0; i < 4; i++) {
ret = i2c_smbus_read_byte_data(client, config_reg[i]);
if (ret < 0)
- goto error;
+ return ret;
data->fancfg[i] = ret;
ret = i2c_smbus_read_byte_data(client, dyn_reg[i]);
if (ret < 0)
- goto error;
+ return ret;
data->fandyn[i] = ret;
ret = i2c_smbus_read_byte_data(client, tach_reg[i]);
if (ret < 0)
- goto error;
+ return ret;
data->tach[i] = (ret << 3) & 0x7f8;
ret = i2c_smbus_read_byte_data(client, tach_reg[i] + 1);
if (ret < 0)
- goto error;
+ return ret;
data->tach[i] |= (ret >> 5) & 0x7;
ret = i2c_smbus_read_byte_data(client, target_reg[i]);
if (ret < 0)
- goto error;
+ return ret;
data->target[i] = (ret << 3) & 0x7f8;
ret = i2c_smbus_read_byte_data(client, target_reg[i] + 1);
if (ret < 0)
- goto error;
+ return ret;
data->target[i] |= (ret >> 5) & 0x7;
}
@@ -204,16 +200,13 @@ static int max6620_update_device(struct device *dev)
*/
ret = i2c_smbus_read_byte_data(client, MAX6620_REG_FAULT);
if (ret < 0)
- goto error;
+ return ret;
data->fault |= (ret >> 4) & (ret & 0x0F);
data->last_updated = jiffies;
data->valid = true;
}
-
-error:
- mutex_unlock(&data->update_lock);
- return ret;
+ return 0;
}
static umode_t
@@ -261,7 +254,6 @@ max6620_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_fan:
switch (attr) {
case hwmon_fan_alarm:
- mutex_lock(&data->update_lock);
*val = !!(data->fault & BIT(channel));
/* Setting TACH count to re-enable fan fault detection */
@@ -270,21 +262,15 @@ max6620_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
val2 = (data->target[channel] << 5) & 0xe0;
ret = i2c_smbus_write_byte_data(client,
target_reg[channel], val1);
- if (ret < 0) {
- mutex_unlock(&data->update_lock);
+ if (ret < 0)
return ret;
- }
ret = i2c_smbus_write_byte_data(client,
target_reg[channel] + 1, val2);
- if (ret < 0) {
- mutex_unlock(&data->update_lock);
+ if (ret < 0)
return ret;
- }
data->fault &= ~BIT(channel);
}
- mutex_unlock(&data->update_lock);
-
break;
case hwmon_fan_div:
*val = max6620_fan_div_from_reg(data->fandyn[channel]);
@@ -334,7 +320,6 @@ max6620_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
return ret;
data = dev_get_drvdata(dev);
client = data->client;
- mutex_lock(&data->update_lock);
switch (type) {
case hwmon_fan:
@@ -360,8 +345,7 @@ max6620_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
div = 5;
break;
default:
- ret = -EINVAL;
- goto error;
+ return -EINVAL;
}
data->fandyn[channel] &= 0x1F;
data->fandyn[channel] |= div << 5;
@@ -396,8 +380,6 @@ max6620_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
-error:
- mutex_unlock(&data->update_lock);
return ret;
}
@@ -478,7 +460,6 @@ static int max6620_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- mutex_init(&data->update_lock);
err = max6620_init_client(data);
if (err)
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index a06346496e1d..99140a2ca995 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -16,9 +16,7 @@
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/util_macros.h>
@@ -75,7 +73,6 @@ static const unsigned int freq_table[] = { 20, 33, 50, 100, 5000, 8333, 12500,
*/
struct max6639_data {
struct regmap *regmap;
- struct mutex update_lock;
/* Register values initialized only once */
u8 ppr[MAX6639_NUM_CHANNELS]; /* Pulses per rotation 0..3 for 1..4 ppr */
@@ -249,16 +246,11 @@ static int max6639_write_fan(struct device *dev, u32 attr, int channel,
if (val <= 0 || val > 4)
return -EINVAL;
- mutex_lock(&data->update_lock);
/* Set Fan pulse per revolution */
err = max6639_set_ppr(data, channel, val);
- if (err < 0) {
- mutex_unlock(&data->update_lock);
+ if (err < 0)
return err;
- }
data->ppr[channel] = val;
-
- mutex_unlock(&data->update_lock);
return 0;
default:
return -EOPNOTSUPP;
@@ -320,21 +312,17 @@ static int max6639_write_pwm(struct device *dev, u32 attr, int channel,
case hwmon_pwm_input:
if (val < 0 || val > 255)
return -EINVAL;
- err = regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(channel),
- val * 120 / 255);
- return err;
+ return regmap_write(data->regmap, MAX6639_REG_TARGTDUTY(channel),
+ val * 120 / 255);
case hwmon_pwm_freq:
val = clamp_val(val, 0, 25000);
i = find_closest(val, freq_table, ARRAY_SIZE(freq_table));
- mutex_lock(&data->update_lock);
err = regmap_update_bits(data->regmap, MAX6639_REG_FAN_CONFIG3(channel),
MAX6639_FAN_CONFIG3_FREQ_MASK, i);
- if (err < 0) {
- mutex_unlock(&data->update_lock);
+ if (err < 0)
return err;
- }
if (i >> 2)
err = regmap_set_bits(data->regmap, MAX6639_REG_GCONFIG,
@@ -343,7 +331,6 @@ static int max6639_write_pwm(struct device *dev, u32 attr, int channel,
err = regmap_clear_bits(data->regmap, MAX6639_REG_GCONFIG,
MAX6639_GCONFIG_PWM_FREQ_HI);
- mutex_unlock(&data->update_lock);
return err;
default:
return -EOPNOTSUPP;
@@ -753,8 +740,6 @@ static int max6639_probe(struct i2c_client *client)
}
}
- mutex_init(&data->update_lock);
-
/* Initialize the max6639 chip */
err = max6639_init_client(client, data);
if (err < 0)
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 0735a1d2c20f..dd906cf491ca 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -13,7 +13,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -91,8 +90,6 @@ struct max6697_data {
int temp_offset; /* in degrees C */
- struct mutex update_lock;
-
#define MAX6697_TEMP_INPUT 0
#define MAX6697_TEMP_EXT 1
#define MAX6697_TEMP_MAX 2
@@ -302,7 +299,6 @@ static int max6697_write(struct device *dev, enum hwmon_sensor_types type,
val = clamp_val(val, 0, 255);
return regmap_write(regmap, MAX6697_REG_MIN, val);
case hwmon_temp_offset:
- mutex_lock(&data->update_lock);
val = clamp_val(val, MAX6581_OFFSET_MIN, MAX6581_OFFSET_MAX);
val = DIV_ROUND_CLOSEST(val, 250);
if (!val) { /* disable this (and only this) channel */
@@ -313,11 +309,9 @@ static int max6697_write(struct device *dev, enum hwmon_sensor_types type,
ret = regmap_set_bits(regmap, MAX6581_REG_OFFSET_SELECT,
BIT(channel - 1));
if (ret)
- goto unlock;
+ return ret;
ret = regmap_write(regmap, MAX6581_REG_OFFSET, val);
}
-unlock:
- mutex_unlock(&data->update_lock);
return ret;
default:
return -EOPNOTSUPP;
@@ -548,7 +542,7 @@ static int max6697_probe(struct i2c_client *client)
struct regmap *regmap;
int err;
- regmap = regmap_init_i2c(client, &max6697_regmap_config);
+ regmap = devm_regmap_init_i2c(client, &max6697_regmap_config);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
@@ -559,7 +553,6 @@ static int max6697_probe(struct i2c_client *client)
data->regmap = regmap;
data->type = (uintptr_t)i2c_get_match_data(client);
data->chip = &max6697_chip_data[data->type];
- mutex_init(&data->update_lock);
err = max6697_init_chip(client->dev.of_node, data);
if (err)
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index c25a54d5b39a..137a90dd2075 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -63,12 +63,14 @@ struct mlxreg_fan;
* @reg: register offset;
* @mask: fault mask;
* @prsnt: present register offset;
+ * @shift: tacho presence bit shift;
*/
struct mlxreg_fan_tacho {
bool connected;
u32 reg;
u32 mask;
u32 prsnt;
+ u32 shift;
};
/*
@@ -113,8 +115,8 @@ struct mlxreg_fan {
int divider;
};
-static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long state);
+static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state, bool thermal);
static int
mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
@@ -143,8 +145,10 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
/*
* Map channel to presence bit - drawer can be equipped with
* one or few FANs, while presence is indicated per drawer.
+ * Shift channel value if necessary to align with register value.
*/
- if (BIT(channel / fan->tachos_per_drwr) & regval) {
+ if (BIT(rol32(channel, tacho->shift) / fan->tachos_per_drwr) &
+ regval) {
/* FAN is not connected - return zero for FAN speed. */
*val = 0;
return 0;
@@ -224,8 +228,9 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
* last thermal state.
*/
if (pwm->last_hwmon_state >= pwm->last_thermal_state)
- return mlxreg_fan_set_cur_state(pwm->cdev,
- pwm->last_hwmon_state);
+ return _mlxreg_fan_set_cur_state(pwm->cdev,
+ pwm->last_hwmon_state,
+ false);
return 0;
}
return regmap_write(fan->regmap, pwm->reg, val);
@@ -357,9 +362,8 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
-static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
- unsigned long state)
-
+static int _mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state, bool thermal)
{
struct mlxreg_fan_pwm *pwm = cdev->devdata;
struct mlxreg_fan *fan = pwm->fan;
@@ -369,7 +373,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
return -EINVAL;
/* Save thermal state. */
- pwm->last_thermal_state = state;
+ if (thermal)
+ pwm->last_thermal_state = state;
state = max_t(unsigned long, state, pwm->last_hwmon_state);
err = regmap_write(fan->regmap, pwm->reg,
@@ -381,6 +386,13 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
return 0;
}
+static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+
+{
+ return _mlxreg_fan_set_cur_state(cdev, state, true);
+}
+
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
.get_max_state = mlxreg_fan_get_max_state,
.get_cur_state = mlxreg_fan_get_cur_state,
@@ -400,7 +412,7 @@ static int mlxreg_fan_connect_verify(struct mlxreg_fan *fan,
return err;
}
- return !!(regval & data->bit);
+ return data->slot ? (data->slot <= regval ? 1 : 0) : !!(regval & data->bit);
}
static int mlxreg_pwm_connect_verify(struct mlxreg_fan *fan,
@@ -537,7 +549,15 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
return err;
}
- drwr_avail = hweight32(regval);
+ /*
+ * The number of drawers could be specified in registers by counters for newer
+ * systems, or by bitmasks for older systems. In case the data is provided by
+ * counter, it is indicated through 'version' field.
+ */
+ if (pdata->version)
+ drwr_avail = regval;
+ else
+ drwr_avail = hweight32(regval);
if (!tacho_avail || !drwr_avail || tacho_avail < drwr_avail) {
dev_err(fan->dev, "Configuration is invalid: drawers num %d tachos num %d\n",
drwr_avail, tacho_avail);
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 7848198f8996..32c1e42e1278 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -14,7 +14,6 @@
#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
diff --git a/drivers/hwmon/nct6694-hwmon.c b/drivers/hwmon/nct6694-hwmon.c
new file mode 100644
index 000000000000..6dcf22ca5018
--- /dev/null
+++ b/drivers/hwmon/nct6694-hwmon.c
@@ -0,0 +1,949 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 HWMON driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * USB command module type for NCT6694 report channel
+ * This defines the module type used for communication with the NCT6694
+ * report channel over the USB interface.
+ */
+#define NCT6694_RPT_MOD 0xFF
+
+/* Report channel */
+/*
+ * The report channel is used to report the status of the hardware monitor
+ * devices, such as voltage, temperature, fan speed, and PWM.
+ */
+#define NCT6694_VIN_IDX(x) (0x00 + (x))
+#define NCT6694_TIN_IDX(x) \
+ ({ typeof(x) (_x) = (x); \
+ ((_x) < 10) ? (0x10 + ((_x) * 2)) : \
+ (0x30 + (((_x) - 10) * 2)); })
+#define NCT6694_FIN_IDX(x) (0x50 + ((x) * 2))
+#define NCT6694_PWM_IDX(x) (0x70 + (x))
+#define NCT6694_VIN_STS(x) (0x68 + (x))
+#define NCT6694_TIN_STS(x) (0x6A + (x))
+#define NCT6694_FIN_STS(x) (0x6E + (x))
+
+/*
+ * USB command module type for NCT6694 HWMON controller.
+ * This defines the module type used for communication with the NCT6694
+ * HWMON controller over the USB interface.
+ */
+#define NCT6694_HWMON_MOD 0x00
+
+/* Command 00h - Hardware Monitor Control */
+#define NCT6694_HWMON_CONTROL 0x00
+#define NCT6694_HWMON_CONTROL_SEL 0x00
+
+/* Command 02h - Alarm Control */
+#define NCT6694_HWMON_ALARM 0x02
+#define NCT6694_HWMON_ALARM_SEL 0x00
+
+/*
+ * USB command module type for NCT6694 PWM controller.
+ * This defines the module type used for communication with the NCT6694
+ * PWM controller over the USB interface.
+ */
+#define NCT6694_PWM_MOD 0x01
+
+/* PWM Command - Manual Control */
+#define NCT6694_PWM_CONTROL 0x01
+#define NCT6694_PWM_CONTROL_SEL 0x00
+
+#define NCT6694_FREQ_FROM_REG(reg) ((reg) * 25000 / 255)
+#define NCT6694_FREQ_TO_REG(val) \
+ (DIV_ROUND_CLOSEST(clamp_val((val), 100, 25000) * 255, 25000))
+
+#define NCT6694_LSB_REG_MASK GENMASK(7, 5)
+#define NCT6694_TIN_HYST_MASK GENMASK(7, 5)
+
+enum nct6694_hwmon_temp_mode {
+ NCT6694_HWMON_TWOTIME_IRQ = 0,
+ NCT6694_HWMON_ONETIME_IRQ,
+ NCT6694_HWMON_REALTIME_IRQ,
+ NCT6694_HWMON_COMPARE_IRQ,
+};
+
+struct __packed nct6694_hwmon_control {
+ u8 vin_en[2];
+ u8 tin_en[2];
+ u8 fin_en[2];
+ u8 pwm_en[2];
+ u8 reserved1[40];
+ u8 pwm_freq[10];
+ u8 reserved2[6];
+};
+
+struct __packed nct6694_hwmon_alarm {
+ u8 smi_ctrl;
+ u8 reserved1[15];
+ struct {
+ u8 hl;
+ u8 ll;
+ } vin_limit[16];
+ struct {
+ u8 hyst;
+ s8 hl;
+ } tin_cfg[32];
+ __be16 fin_ll[10];
+ u8 reserved2[4];
+};
+
+struct __packed nct6694_pwm_control {
+ u8 mal_en[2];
+ u8 mal_val[10];
+ u8 reserved[12];
+};
+
+union __packed nct6694_hwmon_rpt {
+ u8 vin;
+ struct {
+ u8 msb;
+ u8 lsb;
+ } tin;
+ __be16 fin;
+ u8 pwm;
+ u8 status;
+};
+
+union __packed nct6694_hwmon_msg {
+ struct nct6694_hwmon_alarm hwmon_alarm;
+ struct nct6694_pwm_control pwm_ctrl;
+};
+
+struct nct6694_hwmon_data {
+ struct nct6694 *nct6694;
+ struct mutex lock;
+ struct nct6694_hwmon_control hwmon_en;
+ union nct6694_hwmon_rpt *rpt;
+ union nct6694_hwmon_msg *msg;
+};
+
+static inline long in_from_reg(u8 reg)
+{
+ return reg * 16;
+}
+
+static inline u8 in_to_reg(long val)
+{
+ return DIV_ROUND_CLOSEST(val, 16);
+}
+
+static inline long temp_from_reg(s8 reg)
+{
+ return reg * 1000;
+}
+
+static inline s8 temp_to_reg(long val)
+{
+ return DIV_ROUND_CLOSEST(val, 1000);
+}
+
+#define NCT6694_HWMON_IN_CONFIG (HWMON_I_INPUT | HWMON_I_ENABLE | \
+ HWMON_I_MAX | HWMON_I_MIN | \
+ HWMON_I_ALARM)
+#define NCT6694_HWMON_TEMP_CONFIG (HWMON_T_INPUT | HWMON_T_ENABLE | \
+ HWMON_T_MAX | HWMON_T_MAX_HYST | \
+ HWMON_T_MAX_ALARM)
+#define NCT6694_HWMON_FAN_CONFIG (HWMON_F_INPUT | HWMON_F_ENABLE | \
+ HWMON_F_MIN | HWMON_F_MIN_ALARM)
+#define NCT6694_HWMON_PWM_CONFIG (HWMON_PWM_INPUT | HWMON_PWM_ENABLE | \
+ HWMON_PWM_FREQ)
+static const struct hwmon_channel_info *nct6694_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ NCT6694_HWMON_IN_CONFIG, /* VIN0 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN1 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN2 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN3 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN5 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN6 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN7 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN14 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN15 */
+ NCT6694_HWMON_IN_CONFIG, /* VIN16 */
+ NCT6694_HWMON_IN_CONFIG, /* VBAT */
+ NCT6694_HWMON_IN_CONFIG, /* VSB */
+ NCT6694_HWMON_IN_CONFIG, /* AVSB */
+ NCT6694_HWMON_IN_CONFIG, /* VCC */
+ NCT6694_HWMON_IN_CONFIG, /* VHIF */
+ NCT6694_HWMON_IN_CONFIG), /* VTT */
+
+ HWMON_CHANNEL_INFO(temp,
+ NCT6694_HWMON_TEMP_CONFIG, /* THR1 */
+ NCT6694_HWMON_TEMP_CONFIG, /* THR2 */
+ NCT6694_HWMON_TEMP_CONFIG, /* THR14 */
+ NCT6694_HWMON_TEMP_CONFIG, /* THR15 */
+ NCT6694_HWMON_TEMP_CONFIG, /* THR16 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP0 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP1 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP2 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP3 */
+ NCT6694_HWMON_TEMP_CONFIG, /* TDP4 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN0 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN1 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN2 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN3 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN4 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN5 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN6 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN7 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN8 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN9 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN10 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN11 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN12 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN13 */
+ NCT6694_HWMON_TEMP_CONFIG, /* DTIN14 */
+ NCT6694_HWMON_TEMP_CONFIG), /* DTIN15 */
+
+ HWMON_CHANNEL_INFO(fan,
+ NCT6694_HWMON_FAN_CONFIG, /* FIN0 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN1 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN2 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN3 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN4 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN5 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN6 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN7 */
+ NCT6694_HWMON_FAN_CONFIG, /* FIN8 */
+ NCT6694_HWMON_FAN_CONFIG), /* FIN9 */
+
+ HWMON_CHANNEL_INFO(pwm,
+ NCT6694_HWMON_PWM_CONFIG, /* PWM0 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM1 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM2 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM3 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM4 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM5 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM6 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM7 */
+ NCT6694_HWMON_PWM_CONFIG, /* PWM8 */
+ NCT6694_HWMON_PWM_CONFIG), /* PWM9 */
+ NULL
+};
+
+static int nct6694_in_read(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char vin_en;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_in_enable:
+ vin_en = data->hwmon_en.vin_en[(channel / 8)];
+ *val = !!(vin_en & BIT(channel % 8));
+
+ return 0;
+ case hwmon_in_input:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_VIN_IDX(channel)),
+ .len = cpu_to_le16(sizeof(data->rpt->vin))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->vin);
+ if (ret)
+ return ret;
+
+ *val = in_from_reg(data->rpt->vin);
+
+ return 0;
+ case hwmon_in_max:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ *val = in_from_reg(data->msg->hwmon_alarm.vin_limit[channel].hl);
+
+ return 0;
+ case hwmon_in_min:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ *val = in_from_reg(data->msg->hwmon_alarm.vin_limit[channel].ll);
+
+ return 0;
+ case hwmon_in_alarm:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_VIN_STS(channel / 8)),
+ .len = cpu_to_le16(sizeof(data->rpt->status))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->status);
+ if (ret)
+ return ret;
+
+ *val = !!(data->rpt->status & BIT(channel % 8));
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_temp_read(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char temp_en, temp_hyst;
+ signed char temp_max;
+ int ret, temp_raw;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ temp_en = data->hwmon_en.tin_en[channel / 8];
+ *val = !!(temp_en & BIT(channel % 8));
+
+ return 0;
+ case hwmon_temp_input:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_TIN_IDX(channel)),
+ .len = cpu_to_le16(sizeof(data->rpt->tin))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->tin);
+ if (ret)
+ return ret;
+
+ temp_raw = data->rpt->tin.msb << 3;
+ temp_raw |= FIELD_GET(NCT6694_LSB_REG_MASK, data->rpt->tin.lsb);
+
+ /* Real temperature(milli degrees Celsius) = temp_raw * 1000 * 0.125 */
+ *val = sign_extend32(temp_raw, 10) * 125;
+
+ return 0;
+ case hwmon_temp_max:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ *val = temp_from_reg(data->msg->hwmon_alarm.tin_cfg[channel].hl);
+
+ return 0;
+ case hwmon_temp_max_hyst:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ temp_max = data->msg->hwmon_alarm.tin_cfg[channel].hl;
+ temp_hyst = FIELD_GET(NCT6694_TIN_HYST_MASK,
+ data->msg->hwmon_alarm.tin_cfg[channel].hyst);
+ *val = temp_from_reg(temp_max - temp_hyst);
+
+ return 0;
+ case hwmon_temp_max_alarm:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_TIN_STS(channel / 8)),
+ .len = cpu_to_le16(sizeof(data->rpt->status))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->status);
+ if (ret)
+ return ret;
+
+ *val = !!(data->rpt->status & BIT(channel % 8));
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_fan_read(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char fanin_en;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_fan_enable:
+ fanin_en = data->hwmon_en.fin_en[channel / 8];
+ *val = !!(fanin_en & BIT(channel % 8));
+
+ return 0;
+ case hwmon_fan_input:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_FIN_IDX(channel)),
+ .len = cpu_to_le16(sizeof(data->rpt->fin))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->fin);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(data->rpt->fin);
+
+ return 0;
+ case hwmon_fan_min:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(data->msg->hwmon_alarm.fin_ll[channel]);
+
+ return 0;
+ case hwmon_fan_min_alarm:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_FIN_STS(channel / 8)),
+ .len = cpu_to_le16(sizeof(data->rpt->status))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->status);
+ if (ret)
+ return ret;
+
+ *val = !!(data->rpt->status & BIT(channel % 8));
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_pwm_read(struct device *dev, u32 attr, int channel,
+ long *val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char pwm_en;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_pwm_enable:
+ pwm_en = data->hwmon_en.pwm_en[channel / 8];
+ *val = !!(pwm_en & BIT(channel % 8));
+
+ return 0;
+ case hwmon_pwm_input:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_RPT_MOD,
+ .offset = cpu_to_le16(NCT6694_PWM_IDX(channel)),
+ .len = cpu_to_le16(sizeof(data->rpt->pwm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->rpt->pwm);
+ if (ret)
+ return ret;
+
+ *val = data->rpt->pwm;
+
+ return 0;
+ case hwmon_pwm_freq:
+ *val = NCT6694_FREQ_FROM_REG(data->hwmon_en.pwm_freq[channel]);
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_in_write(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_in_enable:
+ if (val == 0)
+ data->hwmon_en.vin_en[channel / 8] &= ~BIT(channel % 8);
+ else if (val == 1)
+ data->hwmon_en.vin_en[channel / 8] |= BIT(channel % 8);
+ else
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ case hwmon_in_max:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, 0, 2032);
+ data->msg->hwmon_alarm.vin_limit[channel].hl = in_to_reg(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ case hwmon_in_min:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, 0, 2032);
+ data->msg->hwmon_alarm.vin_limit[channel].ll = in_to_reg(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_temp_write(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ unsigned char temp_hyst;
+ signed char temp_max;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ if (val == 0)
+ data->hwmon_en.tin_en[channel / 8] &= ~BIT(channel % 8);
+ else if (val == 1)
+ data->hwmon_en.tin_en[channel / 8] |= BIT(channel % 8);
+ else
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ case hwmon_temp_max:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, -127000, 127000);
+ data->msg->hwmon_alarm.tin_cfg[channel].hl = temp_to_reg(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ case hwmon_temp_max_hyst:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+
+ val = clamp_val(val, -127000, 127000);
+ temp_max = data->msg->hwmon_alarm.tin_cfg[channel].hl;
+ temp_hyst = temp_max - temp_to_reg(val);
+ temp_hyst = clamp_val(temp_hyst, 0, 7);
+ data->msg->hwmon_alarm.tin_cfg[channel].hyst =
+ (data->msg->hwmon_alarm.tin_cfg[channel].hyst & ~NCT6694_TIN_HYST_MASK) |
+ FIELD_PREP(NCT6694_TIN_HYST_MASK, temp_hyst);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_fan_write(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_fan_enable:
+ if (val == 0)
+ data->hwmon_en.fin_en[channel / 8] &= ~BIT(channel % 8);
+ else if (val == 1)
+ data->hwmon_en.fin_en[channel / 8] |= BIT(channel % 8);
+ else
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ case hwmon_fan_min:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ val = clamp_val(val, 1, 65535);
+ data->msg->hwmon_alarm.fin_ll[channel] = cpu_to_be16(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_pwm_write(struct device *dev, u32 attr, int channel,
+ long val)
+{
+ struct nct6694_hwmon_data *data = dev_get_drvdata(dev);
+ struct nct6694_cmd_header cmd_hd;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ switch (attr) {
+ case hwmon_pwm_enable:
+ if (val == 0)
+ data->hwmon_en.pwm_en[channel / 8] &= ~BIT(channel % 8);
+ else if (val == 1)
+ data->hwmon_en.pwm_en[channel / 8] |= BIT(channel % 8);
+ else
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_PWM_MOD,
+ .cmd = NCT6694_PWM_CONTROL,
+ .sel = NCT6694_PWM_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->pwm_ctrl))
+ };
+
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->pwm_ctrl);
+ if (ret)
+ return ret;
+
+ data->msg->pwm_ctrl.mal_val[channel] = val;
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->pwm_ctrl);
+ case hwmon_pwm_freq:
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+
+ data->hwmon_en.pwm_freq[channel] = NCT6694_FREQ_TO_REG(val);
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_in:
+ /* in mV */
+ return nct6694_in_read(dev, attr, channel, val);
+ case hwmon_temp:
+ /* in mC */
+ return nct6694_temp_read(dev, attr, channel, val);
+ case hwmon_fan:
+ /* in RPM */
+ return nct6694_fan_read(dev, attr, channel, val);
+ case hwmon_pwm:
+ /* in value 0~255 */
+ return nct6694_pwm_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int nct6694_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_in:
+ return nct6694_in_write(dev, attr, channel, val);
+ case hwmon_temp:
+ return nct6694_temp_write(dev, attr, channel, val);
+ case hwmon_fan:
+ return nct6694_fan_write(dev, attr, channel, val);
+ case hwmon_pwm:
+ return nct6694_pwm_write(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t nct6694_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_enable:
+ case hwmon_in_max:
+ case hwmon_in_min:
+ return 0644;
+ case hwmon_in_alarm:
+ case hwmon_in_input:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_enable:
+ case hwmon_temp_max:
+ case hwmon_temp_max_hyst:
+ return 0644;
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_enable:
+ case hwmon_fan_min:
+ return 0644;
+ case hwmon_fan_input:
+ case hwmon_fan_min_alarm:
+ return 0444;
+ default:
+ return 0;
+ }
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ case hwmon_pwm_freq:
+ case hwmon_pwm_input:
+ return 0644;
+ default:
+ return 0;
+ }
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops nct6694_hwmon_ops = {
+ .is_visible = nct6694_is_visible,
+ .read = nct6694_read,
+ .write = nct6694_write,
+};
+
+static const struct hwmon_chip_info nct6694_chip_info = {
+ .ops = &nct6694_hwmon_ops,
+ .info = nct6694_info,
+};
+
+static int nct6694_hwmon_init(struct nct6694_hwmon_data *data)
+{
+ struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_CONTROL,
+ .sel = NCT6694_HWMON_CONTROL_SEL,
+ .len = cpu_to_le16(sizeof(data->hwmon_en))
+ };
+ int ret;
+
+ /*
+ * Record each Hardware Monitor Channel enable status
+ * and PWM frequency register
+ */
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->hwmon_en);
+ if (ret)
+ return ret;
+
+ cmd_hd = (struct nct6694_cmd_header) {
+ .mod = NCT6694_HWMON_MOD,
+ .cmd = NCT6694_HWMON_ALARM,
+ .sel = NCT6694_HWMON_ALARM_SEL,
+ .len = cpu_to_le16(sizeof(data->msg->hwmon_alarm))
+ };
+
+ /* Select hwmon device alarm mode */
+ ret = nct6694_read_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+ if (ret)
+ return ret;
+
+ data->msg->hwmon_alarm.smi_ctrl = NCT6694_HWMON_REALTIME_IRQ;
+
+ return nct6694_write_msg(data->nct6694, &cmd_hd,
+ &data->msg->hwmon_alarm);
+}
+
+static int nct6694_hwmon_probe(struct platform_device *pdev)
+{
+ struct nct6694_hwmon_data *data;
+ struct nct6694 *nct6694 = dev_get_drvdata(pdev->dev.parent);
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->rpt = devm_kzalloc(&pdev->dev, sizeof(union nct6694_hwmon_rpt),
+ GFP_KERNEL);
+ if (!data->rpt)
+ return -ENOMEM;
+
+ data->msg = devm_kzalloc(&pdev->dev, sizeof(union nct6694_hwmon_msg),
+ GFP_KERNEL);
+ if (!data->msg)
+ return -ENOMEM;
+
+ data->nct6694 = nct6694;
+ ret = devm_mutex_init(&pdev->dev, &data->lock);
+ if (ret)
+ return ret;
+
+ ret = nct6694_hwmon_init(data);
+ if (ret)
+ return ret;
+
+ /* Register hwmon device to HWMON framework */
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "nct6694", data,
+ &nct6694_chip_info,
+ NULL);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver nct6694_hwmon_driver = {
+ .driver = {
+ .name = "nct6694-hwmon",
+ },
+ .probe = nct6694_hwmon_probe,
+};
+
+module_platform_driver(nct6694_hwmon_driver);
+
+MODULE_DESCRIPTION("USB-HWMON driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-hwmon");
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index 0a040364b512..c3a719aef1ac 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -167,7 +167,8 @@ static inline int nct6775_asuswmi_write(u8 bank, u8 reg, u8 val)
static inline int nct6775_asuswmi_read(u8 bank, u8 reg, u8 *val)
{
- u32 ret, tmp = 0;
+ u32 tmp = 0;
+ int ret;
ret = nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_RHWM, bank,
reg, 0, &tmp);
@@ -1402,6 +1403,7 @@ static const char * const asus_msi_boards[] = {
"ROG STRIX X670E-E GAMING WIFI",
"ROG STRIX X670E-F GAMING WIFI",
"ROG STRIX X670E-I GAMING WIFI",
+ "ROG STRIX X870E-H GAMING WIFI7",
"ROG STRIX Z590-A GAMING WIFI",
"ROG STRIX Z590-A GAMING WIFI II",
"ROG STRIX Z590-E GAMING WIFI",
diff --git a/drivers/hwmon/nct7363.c b/drivers/hwmon/nct7363.c
index e13ab918b1ab..71cef794835d 100644
--- a/drivers/hwmon/nct7363.c
+++ b/drivers/hwmon/nct7363.c
@@ -7,10 +7,8 @@
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/slab.h>
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index f1e6eda949ba..2fa091720c79 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -21,7 +21,6 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/mutex.h>
#include <linux/hwmon.h>
#include <linux/watchdog.h>
@@ -128,7 +127,6 @@ static const unsigned short normal_i2c[] = {
struct nct7904_data {
struct i2c_client *client;
struct watchdog_device wdt;
- struct mutex bank_lock;
int bank_sel;
u32 fanin_mask;
u32 vsen_mask;
@@ -142,24 +140,19 @@ struct nct7904_data {
};
/* Access functions */
-static int nct7904_bank_lock(struct nct7904_data *data, unsigned int bank)
+static int nct7904_bank_select(struct nct7904_data *data, unsigned int bank)
{
int ret;
- mutex_lock(&data->bank_lock);
if (data->bank_sel == bank)
return 0;
ret = i2c_smbus_write_byte_data(data->client, BANK_SEL_REG, bank);
- if (ret == 0)
- data->bank_sel = bank;
- else
+ if (ret < 0) {
data->bank_sel = -1;
- return ret;
-}
-
-static inline void nct7904_bank_release(struct nct7904_data *data)
-{
- mutex_unlock(&data->bank_lock);
+ return ret;
+ }
+ data->bank_sel = bank;
+ return 0;
}
/* Read 1-byte register. Returns unsigned reg or -ERRNO on error. */
@@ -169,12 +162,10 @@ static int nct7904_read_reg(struct nct7904_data *data,
struct i2c_client *client = data->client;
int ret;
- ret = nct7904_bank_lock(data, bank);
- if (ret == 0)
- ret = i2c_smbus_read_byte_data(client, reg);
-
- nct7904_bank_release(data);
- return ret;
+ ret = nct7904_bank_select(data, bank);
+ if (ret < 0)
+ return ret;
+ return i2c_smbus_read_byte_data(client, reg);
}
/*
@@ -187,19 +178,16 @@ static int nct7904_read_reg16(struct nct7904_data *data,
struct i2c_client *client = data->client;
int ret, hi;
- ret = nct7904_bank_lock(data, bank);
- if (ret == 0) {
- ret = i2c_smbus_read_byte_data(client, reg);
- if (ret >= 0) {
- hi = ret;
- ret = i2c_smbus_read_byte_data(client, reg + 1);
- if (ret >= 0)
- ret |= hi << 8;
- }
- }
-
- nct7904_bank_release(data);
- return ret;
+ ret = nct7904_bank_select(data, bank);
+ if (ret < 0)
+ return ret;
+ hi = i2c_smbus_read_byte_data(client, reg);
+ if (hi < 0)
+ return hi;
+ ret = i2c_smbus_read_byte_data(client, reg + 1);
+ if (ret < 0)
+ return ret;
+ return ret | (hi << 8);
}
/* Write 1-byte register. Returns 0 or -ERRNO on error. */
@@ -209,12 +197,10 @@ static int nct7904_write_reg(struct nct7904_data *data,
struct i2c_client *client = data->client;
int ret;
- ret = nct7904_bank_lock(data, bank);
- if (ret == 0)
- ret = i2c_smbus_write_byte_data(client, reg, val);
-
- nct7904_bank_release(data);
- return ret;
+ ret = nct7904_bank_select(data, bank);
+ if (ret < 0)
+ return ret;
+ return i2c_smbus_write_byte_data(client, reg, val);
}
static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
@@ -1023,7 +1009,6 @@ static int nct7904_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- mutex_init(&data->bank_lock);
data->bank_sel = -1;
/* Setup sensor groups. */
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 802c73def428..c8f5e695fb6d 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -4,7 +4,6 @@
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -198,7 +197,6 @@ struct npcm7xx_pwm_fan_data {
int pwm_modules;
struct clk *pwm_clk;
struct clk *fan_clk;
- struct mutex pwm_lock[NPCM7XX_PWM_MAX_MODULES];
spinlock_t fan_lock[NPCM7XX_FAN_MAX_MODULE];
int fan_irq[NPCM7XX_FAN_MAX_MODULE];
bool pwm_present[NPCM7XX_PWM_MAX_CHN_NUM];
@@ -221,7 +219,6 @@ static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data,
/*
* Config PWM Comparator register for setting duty cycle
*/
- mutex_lock(&data->pwm_lock[module]);
/* write new CMR value */
iowrite32(val, NPCM7XX_PWM_REG_CMRx(data->pwm_base, module, pwm_ch));
@@ -245,7 +242,6 @@ static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data,
env_bit = NPCM7XX_PWM_CTRL_CH3_INV_BIT;
break;
default:
- mutex_unlock(&data->pwm_lock[module]);
return -ENODEV;
}
@@ -260,8 +256,6 @@ static int npcm7xx_pwm_config_set(struct npcm7xx_pwm_fan_data *data,
}
iowrite32(tmp_buf, NPCM7XX_PWM_REG_CR(data->pwm_base, module));
- mutex_unlock(&data->pwm_lock[module]);
-
return 0;
}
@@ -932,8 +926,8 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
struct resource *res;
struct device *hwmon;
char name[20];
- int ret, cnt;
u32 output_freq;
+ int ret;
u32 i;
np = dev->of_node;
@@ -985,9 +979,6 @@ static int npcm7xx_pwm_fan_probe(struct platform_device *pdev)
output_freq = npcm7xx_pwm_init(data);
npcm7xx_fan_init(data);
- for (cnt = 0; cnt < data->pwm_modules; cnt++)
- mutex_init(&data->pwm_lock[cnt]);
-
for (i = 0; i < NPCM7XX_FAN_MAX_MODULE; i++) {
spin_lock_init(&data->fan_lock[i]);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index d21f7266c411..d6b48178343d 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -24,6 +24,7 @@ enum ntc_thermistor_type {
TYPE_NCPXXWF104,
TYPE_NCPXXWL333,
TYPE_NCPXXXH103,
+ TYPE_NCPXXWM474,
};
struct ntc_compensation {
@@ -46,6 +47,7 @@ enum {
NTC_NCP18WB473,
NTC_NCP21WB473,
NTC_SSG1404001221,
+ NTC_NCP18WM474,
NTC_LAST,
};
@@ -60,6 +62,7 @@ static const struct platform_device_id ntc_thermistor_id[] = {
[NTC_NCP18WB473] = { "ncp18wb473", TYPE_NCPXXWB473 },
[NTC_NCP21WB473] = { "ncp21wb473", TYPE_NCPXXWB473 },
[NTC_SSG1404001221] = { "ssg1404_001221", TYPE_NCPXXWB473 },
+ [NTC_NCP18WM474] = { "ncp18wm474", TYPE_NCPXXWM474 },
[NTC_LAST] = { },
};
MODULE_DEVICE_TABLE(platform, ntc_thermistor_id);
@@ -217,6 +220,43 @@ static const struct ntc_compensation ncpXXxh103[] = {
{ .temp_c = 125, .ohm = 531 },
};
+static const struct ntc_compensation ncpXXwm474[] = {
+ { .temp_c = -40, .ohm = 10900000 },
+ { .temp_c = -35, .ohm = 9600000 },
+ { .temp_c = -30, .ohm = 8300000 },
+ { .temp_c = -25, .ohm = 7000000 },
+ { .temp_c = -20, .ohm = 5980000 },
+ { .temp_c = -15, .ohm = 4960000 },
+ { .temp_c = -10, .ohm = 3940000 },
+ { .temp_c = -5, .ohm = 2920000 },
+ { .temp_c = 0, .ohm = 1900000 },
+ { .temp_c = 5, .ohm = 1614000 },
+ { .temp_c = 10, .ohm = 1328000 },
+ { .temp_c = 15, .ohm = 1042000 },
+ { .temp_c = 20, .ohm = 756000 },
+ { .temp_c = 25, .ohm = 470000 },
+ { .temp_c = 30, .ohm = 404000 },
+ { .temp_c = 35, .ohm = 338000 },
+ { .temp_c = 40, .ohm = 272000 },
+ { .temp_c = 45, .ohm = 206000 },
+ { .temp_c = 50, .ohm = 140000 },
+ { .temp_c = 55, .ohm = 122000 },
+ { .temp_c = 60, .ohm = 104000 },
+ { .temp_c = 65, .ohm = 86000 },
+ { .temp_c = 70, .ohm = 68000 },
+ { .temp_c = 75, .ohm = 50000 },
+ { .temp_c = 80, .ohm = 44200 },
+ { .temp_c = 85, .ohm = 38400 },
+ { .temp_c = 90, .ohm = 32600 },
+ { .temp_c = 95, .ohm = 26800 },
+ { .temp_c = 100, .ohm = 21000 },
+ { .temp_c = 105, .ohm = 18600 },
+ { .temp_c = 110, .ohm = 16200 },
+ { .temp_c = 115, .ohm = 13800 },
+ { .temp_c = 120, .ohm = 11400 },
+ { .temp_c = 125, .ohm = 9000 },
+};
+
/*
* The following compensation tables are from the specifications in EPCOS NTC
* Thermistors Datasheets
@@ -319,6 +359,7 @@ static const struct ntc_type ntc_type[] = {
NTC_TYPE(TYPE_NCPXXWF104, ncpXXwf104),
NTC_TYPE(TYPE_NCPXXWL333, ncpXXwl333),
NTC_TYPE(TYPE_NCPXXXH103, ncpXXxh103),
+ NTC_TYPE(TYPE_NCPXXWM474, ncpXXwm474),
};
/*
@@ -675,6 +716,8 @@ static const struct of_device_id ntc_match[] = {
.data = &ntc_thermistor_id[NTC_NCP21WB473] },
{ .compatible = "samsung,1404-001221",
.data = &ntc_thermistor_id[NTC_SSG1404001221] },
+ { .compatible = "murata,ncp18wm474",
+ .data = &ntc_thermistor_id[NTC_NCP18WM474] },
/* Usage of vendor name "ntc" is deprecated */
{ .compatible = "ntc,ncp03wb473",
diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c
index c2d1173f42fe..58ef9fa0184b 100644
--- a/drivers/hwmon/nzxt-smart2.c
+++ b/drivers/hwmon/nzxt-smart2.c
@@ -721,11 +721,6 @@ static int __maybe_unused nzxt_smart2_hid_reset_resume(struct hid_device *hdev)
return init_device(drvdata, drvdata->update_interval);
}
-static void mutex_fini(void *lock)
-{
- mutex_destroy(lock);
-}
-
static int nzxt_smart2_hid_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -741,8 +736,7 @@ static int nzxt_smart2_hid_probe(struct hid_device *hdev,
init_waitqueue_head(&drvdata->wq);
- mutex_init(&drvdata->mutex);
- ret = devm_add_action_or_reset(&hdev->dev, mutex_fini, &drvdata->mutex);
+ ret = devm_mutex_init(&hdev->dev, &drvdata->mutex);
if (ret)
return ret;
diff --git a/drivers/hwmon/peci/common.h b/drivers/hwmon/peci/common.h
index 734506b0eca2..92a7ee1925bc 100644
--- a/drivers/hwmon/peci/common.h
+++ b/drivers/hwmon/peci/common.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2021 Intel Corporation */
-#include <linux/mutex.h>
#include <linux/types.h>
#ifndef __PECI_HWMON_COMMON_H
@@ -13,12 +12,10 @@
* struct peci_sensor_state - PECI state information
* @valid: flag to indicate the sensor value is valid
* @last_updated: time of the last update in jiffies
- * @lock: mutex to protect sensor access
*/
struct peci_sensor_state {
bool valid;
unsigned long last_updated;
- struct mutex lock; /* protect sensor access */
};
/**
diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
index c7112dbf008b..b2fc936851e1 100644
--- a/drivers/hwmon/peci/cputemp.c
+++ b/drivers/hwmon/peci/cputemp.c
@@ -116,11 +116,9 @@ static int get_temp_target(struct peci_cputemp *priv, enum peci_temp_target_type
{
int ret;
- mutex_lock(&priv->temp.target.state.lock);
-
ret = update_temp_target(priv);
if (ret)
- goto unlock;
+ return ret;
switch (type) {
case tcontrol_type:
@@ -139,9 +137,6 @@ static int get_temp_target(struct peci_cputemp *priv, enum peci_temp_target_type
ret = -EOPNOTSUPP;
break;
}
-unlock:
- mutex_unlock(&priv->temp.target.state.lock);
-
return ret;
}
@@ -177,26 +172,23 @@ static s32 dts_eight_dot_eight_to_millidegree(u16 val)
static int get_die_temp(struct peci_cputemp *priv, long *val)
{
- int ret = 0;
long tjmax;
u16 temp;
+ int ret;
- mutex_lock(&priv->temp.die.state.lock);
if (!peci_sensor_need_update(&priv->temp.die.state))
goto skip_update;
ret = peci_temp_read(priv->peci_dev, &temp);
if (ret)
- goto err_unlock;
+ return ret;
- if (!dts_valid(temp)) {
- ret = -EIO;
- goto err_unlock;
- }
+ if (!dts_valid(temp))
+ return -EIO;
ret = get_temp_target(priv, tjmax_type, &tjmax);
if (ret)
- goto err_unlock;
+ return ret;
priv->temp.die.value = (s32)tjmax + dts_ten_dot_six_to_millidegree(temp);
@@ -204,35 +196,30 @@ static int get_die_temp(struct peci_cputemp *priv, long *val)
skip_update:
*val = priv->temp.die.value;
-err_unlock:
- mutex_unlock(&priv->temp.die.state.lock);
- return ret;
+ return 0;
}
static int get_dts(struct peci_cputemp *priv, long *val)
{
- int ret = 0;
u16 thermal_margin;
long tcontrol;
u32 pcs;
+ int ret;
- mutex_lock(&priv->temp.dts.state.lock);
if (!peci_sensor_need_update(&priv->temp.dts.state))
goto skip_update;
ret = peci_pcs_read(priv->peci_dev, PECI_PCS_THERMAL_MARGIN, 0, &pcs);
if (ret)
- goto err_unlock;
+ return ret;
thermal_margin = FIELD_GET(DTS_MARGIN_MASK, pcs);
- if (!dts_valid(thermal_margin)) {
- ret = -EIO;
- goto err_unlock;
- }
+ if (!dts_valid(thermal_margin))
+ return -EIO;
ret = get_temp_target(priv, tcontrol_type, &tcontrol);
if (ret)
- goto err_unlock;
+ return ret;
/* Note that the tcontrol should be available before calling it */
priv->temp.dts.value =
@@ -242,35 +229,30 @@ static int get_dts(struct peci_cputemp *priv, long *val)
skip_update:
*val = priv->temp.dts.value;
-err_unlock:
- mutex_unlock(&priv->temp.dts.state.lock);
- return ret;
+ return 0;
}
static int get_core_temp(struct peci_cputemp *priv, int core_index, long *val)
{
- int ret = 0;
u16 core_dts_margin;
long tjmax;
u32 pcs;
+ int ret;
- mutex_lock(&priv->temp.core[core_index].state.lock);
if (!peci_sensor_need_update(&priv->temp.core[core_index].state))
goto skip_update;
ret = peci_pcs_read(priv->peci_dev, PECI_PCS_MODULE_TEMP, core_index, &pcs);
if (ret)
- goto err_unlock;
+ return ret;
core_dts_margin = FIELD_GET(PCS_MODULE_TEMP_MASK, pcs);
- if (!dts_valid(core_dts_margin)) {
- ret = -EIO;
- goto err_unlock;
- }
+ if (!dts_valid(core_dts_margin))
+ return -EIO;
ret = get_temp_target(priv, tjmax_type, &tjmax);
if (ret)
- goto err_unlock;
+ return ret;
/* Note that the tjmax should be available before calling it */
priv->temp.core[core_index].value =
@@ -280,9 +262,7 @@ static int get_core_temp(struct peci_cputemp *priv, int core_index, long *val)
skip_update:
*val = priv->temp.core[core_index].value;
-err_unlock:
- mutex_unlock(&priv->temp.core[core_index].state.lock);
- return ret;
+ return 0;
}
static int cputemp_read_string(struct device *dev, enum hwmon_sensor_types type,
@@ -364,6 +344,7 @@ static int init_core_mask(struct peci_cputemp *priv)
case INTEL_ICELAKE_X:
case INTEL_ICELAKE_D:
case INTEL_SAPPHIRERAPIDS_X:
+ case INTEL_EMERALDRAPIDS_X:
ret = peci_ep_pci_local_read(peci_dev, 0, reg->bus, reg->dev,
reg->func, reg->offset + 4, &data);
if (ret)
@@ -430,18 +411,6 @@ static void check_resolved_cores(struct peci_cputemp *priv)
bitmap_zero(priv->core_mask, CORE_NUMS_MAX);
}
-static void sensor_init(struct peci_cputemp *priv)
-{
- int i;
-
- mutex_init(&priv->temp.target.state.lock);
- mutex_init(&priv->temp.die.state.lock);
- mutex_init(&priv->temp.dts.state.lock);
-
- for_each_set_bit(i, priv->core_mask, CORE_NUMS_MAX)
- mutex_init(&priv->temp.core[i].state.lock);
-}
-
static const struct hwmon_ops peci_cputemp_ops = {
.is_visible = cputemp_is_visible,
.read_string = cputemp_read_string,
@@ -506,8 +475,6 @@ static int peci_cputemp_probe(struct auxiliary_device *adev,
check_resolved_cores(priv);
- sensor_init(priv);
-
hwmon_dev = devm_hwmon_device_register_with_info(priv->dev, priv->name,
priv, &peci_cputemp_chip_info, NULL);
@@ -539,6 +506,13 @@ static struct resolved_cores_reg resolved_cores_reg_spr = {
.offset = 0x80,
};
+static struct resolved_cores_reg resolved_cores_reg_emr = {
+ .bus = 31,
+ .dev = 30,
+ .func = 6,
+ .offset = 0x80,
+};
+
static const struct cpu_info cpu_hsx = {
.reg = &resolved_cores_reg_hsx,
.min_peci_revision = 0x33,
@@ -563,6 +537,12 @@ static const struct cpu_info cpu_spr = {
.thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
};
+static const struct cpu_info cpu_emr = {
+ .reg = &resolved_cores_reg_emr,
+ .min_peci_revision = 0x40,
+ .thermal_margin_to_millidegree = &dts_ten_dot_six_to_millidegree,
+};
+
static const struct auxiliary_device_id peci_cputemp_ids[] = {
{
.name = "peci_cpu.cputemp.hsx",
@@ -592,6 +572,10 @@ static const struct auxiliary_device_id peci_cputemp_ids[] = {
.name = "peci_cpu.cputemp.spr",
.driver_data = (kernel_ulong_t)&cpu_spr,
},
+ {
+ .name = "peci_cpu.cputemp.emr",
+ .driver_data = (kernel_ulong_t)&cpu_emr,
+ },
{ }
};
MODULE_DEVICE_TABLE(auxiliary, peci_cputemp_ids);
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index fbe82d9852e0..bd3e8715dfec 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -32,6 +32,8 @@
#define DIMM_IDX_MAX_ON_ICXD 2
#define CHAN_RANK_MAX_ON_SPR 8
#define DIMM_IDX_MAX_ON_SPR 2
+#define CHAN_RANK_MAX_ON_EMR 8
+#define DIMM_IDX_MAX_ON_EMR 2
#define CHAN_RANK_MAX CHAN_RANK_MAX_ON_HSX
#define DIMM_IDX_MAX DIMM_IDX_MAX_ON_HSX
@@ -94,16 +96,15 @@ static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no, long *val)
{
int dimm_order = dimm_no % priv->gen_info->dimm_idx_max;
int chan_rank = dimm_no / priv->gen_info->dimm_idx_max;
- int ret = 0;
u32 data;
+ int ret;
- mutex_lock(&priv->dimm[dimm_no].temp.state.lock);
if (!peci_sensor_need_update(&priv->dimm[dimm_no].temp.state))
goto skip_update;
ret = peci_pcs_read(priv->peci_dev, PECI_PCS_DDR_DIMM_TEMP, chan_rank, &data);
if (ret)
- goto unlock;
+ return ret;
priv->dimm[dimm_no].temp.value = __dimm_temp(data, dimm_order) * MILLIDEGREE_PER_DEGREE;
@@ -111,9 +112,7 @@ static int get_dimm_temp(struct peci_dimmtemp *priv, int dimm_no, long *val)
skip_update:
*val = priv->dimm[dimm_no].temp.value;
-unlock:
- mutex_unlock(&priv->dimm[dimm_no].temp.state.lock);
- return ret;
+ return 0;
}
static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
@@ -143,10 +142,9 @@ static int get_dimm_thresholds(struct peci_dimmtemp *priv, enum peci_dimm_thresh
{
int ret;
- mutex_lock(&priv->dimm[dimm_no].thresholds.state.lock);
ret = update_thresholds(priv, dimm_no);
if (ret)
- goto unlock;
+ return ret;
switch (type) {
case temp_max_type:
@@ -159,9 +157,6 @@ static int get_dimm_thresholds(struct peci_dimmtemp *priv, enum peci_dimm_thresh
ret = -EOPNOTSUPP;
break;
}
-unlock:
- mutex_unlock(&priv->dimm[dimm_no].thresholds.state.lock);
-
return ret;
}
@@ -347,8 +342,6 @@ static int create_dimm_temp_info(struct peci_dimmtemp *priv)
ret = create_dimm_temp_label(priv, i);
if (ret)
return ret;
- mutex_init(&priv->dimm[i].thresholds.state.lock);
- mutex_init(&priv->dimm[i].temp.state.lock);
}
dev = devm_hwmon_device_register_with_info(priv->dev, priv->name, priv,
@@ -571,6 +564,12 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
return 0;
}
+static int read_thresholds_emr(struct peci_dimmtemp *priv, int dimm_order,
+ int chan_rank, u32 *data)
+{
+ return read_thresholds_spr(priv, dimm_order, chan_rank, data);
+}
+
static const struct dimm_info dimm_hsx = {
.chan_rank_max = CHAN_RANK_MAX_ON_HSX,
.dimm_idx_max = DIMM_IDX_MAX_ON_HSX,
@@ -620,6 +619,13 @@ static const struct dimm_info dimm_spr = {
.read_thresholds = &read_thresholds_spr,
};
+static const struct dimm_info dimm_emr = {
+ .chan_rank_max = CHAN_RANK_MAX_ON_EMR,
+ .dimm_idx_max = DIMM_IDX_MAX_ON_EMR,
+ .min_peci_revision = 0x40,
+ .read_thresholds = &read_thresholds_emr,
+};
+
static const struct auxiliary_device_id peci_dimmtemp_ids[] = {
{
.name = "peci_cpu.dimmtemp.hsx",
@@ -649,6 +655,10 @@ static const struct auxiliary_device_id peci_dimmtemp_ids[] = {
.name = "peci_cpu.dimmtemp.spr",
.driver_data = (kernel_ulong_t)&dimm_spr,
},
+ {
+ .name = "peci_cpu.dimmtemp.emr",
+ .driver_data = (kernel_ulong_t)&dimm_emr,
+ },
{ }
};
MODULE_DEVICE_TABLE(auxiliary, peci_dimmtemp_ids);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 55e492452ce8..f3fb94cebf1a 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -52,7 +52,8 @@ config SENSORS_ADM1275
help
If you say yes here you get hardware monitoring support for Analog
Devices ADM1075, ADM1272, ADM1273, ADM1275, ADM1276, ADM1278, ADM1281,
- ADM1293, and ADM1294 Hot-Swap Controller and Digital Power Monitors.
+ ADM1293, ADM1294 and SQ24905C Hot-Swap Controller and
+ Digital Power Monitors.
This driver can also be built as a module. If so, the module will
be called adm1275.
@@ -319,6 +320,15 @@ config SENSORS_MAX16601
This driver can also be built as a module. If so, the module will
be called max16601.
+config SENSORS_MAX17616
+ tristate "Analog Devices MAX17616/MAX17616A"
+ help
+ If you say yes here you get hardware monitoring support for Analog
+ Devices MAX17616/MAX17616A.
+
+ This driver can also be built as a module. If so, the module will
+ be called max17616.
+
config SENSORS_MAX20730
tristate "Maxim MAX20710, MAX20730, MAX20734, MAX20743"
help
@@ -351,6 +361,7 @@ config SENSORS_MAX34440
help
If you say yes here you get hardware monitoring support for Maxim
MAX34440, MAX34441, MAX34446, MAX34451, MAX34460, and MAX34461.
+ Other compatible are ADPM12160, and ADPM12200.
This driver can also be built as a module. If so, the module will
be called max34440.
@@ -373,6 +384,15 @@ config SENSORS_MP2856
This driver can also be built as a module. If so, the module will
be called mp2856.
+config SENSORS_MP2869
+ tristate "MPS MP2869"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2869 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2869.
+
config SENSORS_MP2888
tristate "MPS MP2888"
help
@@ -391,6 +411,24 @@ config SENSORS_MP2891
This driver can also be built as a module. If so, the module will
be called mp2891.
+config SENSORS_MP2925
+ tristate "MPS MP2925"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP2925 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp2925.
+
+config SENSORS_MP29502
+ tristate "MPS MP29502"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP29502 Dual Loop Digital Multi-Phase Controller.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp29502.
+
config SENSORS_MP2975
tristate "MPS MP2975"
help
@@ -452,6 +490,15 @@ config SENSORS_MP9941
This driver can also be built as a module. If so, the module will
be called mp9941.
+config SENSORS_MP9945
+ tristate "MPS MP9945"
+ help
+ If you say yes here you get hardware monitoring support for MPS
+ MP9945.
+
+ This driver can also be built as a module. If so, the module will
+ be called mp9945.
+
config SENSORS_MPQ7932_REGULATOR
bool "Regulator support for MPQ7932"
depends on SENSORS_MPQ7932 && REGULATOR
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 29cd8a3317d2..349a89b6d92e 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -31,20 +31,25 @@ obj-$(CONFIG_SENSORS_LTC4286) += ltc4286.o
obj-$(CONFIG_SENSORS_MAX15301) += max15301.o
obj-$(CONFIG_SENSORS_MAX16064) += max16064.o
obj-$(CONFIG_SENSORS_MAX16601) += max16601.o
+obj-$(CONFIG_SENSORS_MAX17616) += max17616.o
obj-$(CONFIG_SENSORS_MAX20730) += max20730.o
obj-$(CONFIG_SENSORS_MAX20751) += max20751.o
obj-$(CONFIG_SENSORS_MAX31785) += max31785.o
obj-$(CONFIG_SENSORS_MAX34440) += max34440.o
obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_MP2856) += mp2856.o
+obj-$(CONFIG_SENSORS_MP2869) += mp2869.o
obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2891) += mp2891.o
+obj-$(CONFIG_SENSORS_MP2925) += mp2925.o
+obj-$(CONFIG_SENSORS_MP29502) += mp29502.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_MP2993) += mp2993.o
obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
obj-$(CONFIG_SENSORS_MP5920) += mp5920.o
obj-$(CONFIG_SENSORS_MP5990) += mp5990.o
obj-$(CONFIG_SENSORS_MP9941) += mp9941.o
+obj-$(CONFIG_SENSORS_MP9945) += mp9945.o
obj-$(CONFIG_SENSORS_MPQ7932) += mpq7932.o
obj-$(CONFIG_SENSORS_MPQ8785) += mpq8785.o
obj-$(CONFIG_SENSORS_PLI1209BC) += pli1209bc.o
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 7d175baa5de2..bc2a6a07dc3e 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -18,7 +18,8 @@
#include <linux/log2.h>
#include "pmbus.h"
-enum chips { adm1075, adm1272, adm1273, adm1275, adm1276, adm1278, adm1281, adm1293, adm1294 };
+enum chips { adm1075, adm1272, adm1273, adm1275, adm1276, adm1278, adm1281,
+ adm1293, adm1294, sq24905c };
#define ADM1275_MFR_STATUS_IOUT_WARN2 BIT(0)
#define ADM1293_MFR_STATUS_VAUX_UV_WARN BIT(5)
@@ -486,6 +487,7 @@ static const struct i2c_device_id adm1275_id[] = {
{ "adm1281", adm1281 },
{ "adm1293", adm1293 },
{ "adm1294", adm1294 },
+ { "mc09c", sq24905c },
{ }
};
MODULE_DEVICE_TABLE(i2c, adm1275_id);
@@ -532,7 +534,8 @@ static int adm1275_probe(struct i2c_client *client)
dev_err(&client->dev, "Failed to read Manufacturer ID\n");
return ret;
}
- if (ret != 3 || strncmp(block_buffer, "ADI", 3)) {
+ if ((ret != 3 || strncmp(block_buffer, "ADI", 3)) &&
+ (ret != 2 || strncmp(block_buffer, "SY", 2))) {
dev_err(&client->dev, "Unsupported Manufacturer ID\n");
return -ENODEV;
}
@@ -558,7 +561,8 @@ static int adm1275_probe(struct i2c_client *client)
if (mid->driver_data == adm1272 || mid->driver_data == adm1273 ||
mid->driver_data == adm1278 || mid->driver_data == adm1281 ||
- mid->driver_data == adm1293 || mid->driver_data == adm1294)
+ mid->driver_data == adm1293 || mid->driver_data == adm1294 ||
+ mid->driver_data == sq24905c)
config_read_fn = i2c_smbus_read_word_data;
else
config_read_fn = i2c_smbus_read_byte_data;
@@ -708,6 +712,7 @@ static int adm1275_probe(struct i2c_client *client)
break;
case adm1278:
case adm1281:
+ case sq24905c:
data->have_vout = true;
data->have_pin_max = true;
data->have_temp_max = true;
diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c
index c52c55d2e7f4..97b61836f53a 100644
--- a/drivers/hwmon/pmbus/isl68137.c
+++ b/drivers/hwmon/pmbus/isl68137.c
@@ -61,8 +61,11 @@ enum chips {
raa228004,
raa228006,
raa228228,
+ raa228244,
+ raa228246,
raa229001,
raa229004,
+ raa229141,
raa229621,
};
@@ -71,6 +74,7 @@ enum variants {
raa_dmpvr2_1rail,
raa_dmpvr2_2rail,
raa_dmpvr2_2rail_nontc,
+ raa_dmpvr2_2rail_pmbus,
raa_dmpvr2_3rail,
raa_dmpvr2_hv,
};
@@ -334,10 +338,9 @@ static int isl68137_probe_from_dt(struct device *dev,
struct isl68137_data *data)
{
const struct device_node *np = dev->of_node;
- struct device_node *child;
int err;
- for_each_child_of_node(np, child) {
+ for_each_child_of_node_scoped(np, child) {
if (strcmp(child->name, "channel"))
continue;
@@ -398,6 +401,17 @@ static int isl68137_probe(struct i2c_client *client)
info->read_word_data = raa_dmpvr2_read_word_data;
info->write_word_data = raa_dmpvr2_write_word_data;
break;
+ case raa_dmpvr2_2rail_pmbus:
+ info->format[PSC_VOLTAGE_IN] = linear,
+ info->format[PSC_VOLTAGE_OUT] = linear,
+ info->format[PSC_CURRENT_IN] = linear;
+ info->format[PSC_CURRENT_OUT] = linear;
+ info->format[PSC_POWER] = linear;
+ info->format[PSC_TEMPERATURE] = linear;
+ info->pages = 2;
+ info->read_word_data = raa_dmpvr2_read_word_data;
+ info->write_word_data = raa_dmpvr2_write_word_data;
+ break;
case raa_dmpvr2_3rail:
info->read_word_data = raa_dmpvr2_read_word_data;
info->write_word_data = raa_dmpvr2_write_word_data;
@@ -464,8 +478,11 @@ static const struct i2c_device_id raa_dmpvr_id[] = {
{"raa228004", raa_dmpvr2_hv},
{"raa228006", raa_dmpvr2_hv},
{"raa228228", raa_dmpvr2_2rail_nontc},
+ {"raa228244", raa_dmpvr2_2rail_nontc},
+ {"raa228246", raa_dmpvr2_2rail_nontc},
{"raa229001", raa_dmpvr2_2rail},
{"raa229004", raa_dmpvr2_2rail},
+ {"raa229141", raa_dmpvr2_2rail_pmbus},
{"raa229621", raa_dmpvr2_2rail},
{}
};
@@ -512,6 +529,8 @@ static const struct of_device_id isl68137_of_match[] = {
{ .compatible = "renesas,raa228004", .data = (void *)raa_dmpvr2_hv },
{ .compatible = "renesas,raa228006", .data = (void *)raa_dmpvr2_hv },
{ .compatible = "renesas,raa228228", .data = (void *)raa_dmpvr2_2rail_nontc },
+ { .compatible = "renesas,raa228244", .data = (void *)raa_dmpvr2_2rail_nontc },
+ { .compatible = "renesas,raa228246", .data = (void *)raa_dmpvr2_2rail_nontc },
{ .compatible = "renesas,raa229001", .data = (void *)raa_dmpvr2_2rail },
{ .compatible = "renesas,raa229004", .data = (void *)raa_dmpvr2_2rail },
{ .compatible = "renesas,raa229621", .data = (void *)raa_dmpvr2_2rail },
diff --git a/drivers/hwmon/pmbus/max17616.c b/drivers/hwmon/pmbus/max17616.c
new file mode 100644
index 000000000000..1d4a0ddb95bb
--- /dev/null
+++ b/drivers/hwmon/pmbus/max17616.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hardware monitoring driver for Analog Devices MAX17616/MAX17616A
+ *
+ * Copyright (C) 2025 Analog Devices, Inc.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+#include "pmbus.h"
+
+static struct pmbus_driver_info max17616_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .m[PSC_VOLTAGE_IN] = 512,
+ .b[PSC_VOLTAGE_IN] = -18,
+ .R[PSC_VOLTAGE_IN] = -1,
+
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .m[PSC_VOLTAGE_OUT] = 512,
+ .b[PSC_VOLTAGE_OUT] = -18,
+ .R[PSC_VOLTAGE_OUT] = -1,
+
+ .format[PSC_CURRENT_OUT] = direct,
+ .m[PSC_CURRENT_OUT] = 5845,
+ .b[PSC_CURRENT_OUT] = 80,
+ .R[PSC_CURRENT_OUT] = -1,
+
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_TEMPERATURE] = 71,
+ .b[PSC_TEMPERATURE] = 19653,
+ .R[PSC_TEMPERATURE] = -1,
+
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_VOUT |
+ PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT |
+ PMBUS_HAVE_STATUS_TEMP,
+};
+
+static int max17616_probe(struct i2c_client *client)
+{
+ return pmbus_do_probe(client, &max17616_info);
+}
+
+static const struct i2c_device_id max17616_id[] = {
+ { "max17616" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max17616_id);
+
+static const struct of_device_id max17616_of_match[] = {
+ { .compatible = "adi,max17616" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max17616_of_match);
+
+static struct i2c_driver max17616_driver = {
+ .driver = {
+ .name = "max17616",
+ .of_match_table = max17616_of_match,
+ },
+ .probe = max17616_probe,
+ .id_table = max17616_id,
+};
+module_i2c_driver(max17616_driver);
+
+MODULE_AUTHOR("Kim Seer Paller <kimseer.paller@analog.com>");
+MODULE_DESCRIPTION("PMBus driver for Analog Devices MAX17616/MAX17616A");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index 56834d26f8ef..8ea4e68d4e9d 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -17,6 +17,7 @@
enum chips {
adpm12160,
+ adpm12200,
max34440,
max34441,
max34446,
@@ -98,7 +99,7 @@ static int max34440_read_word_data(struct i2c_client *client, int page,
break;
case PMBUS_VIRT_READ_IOUT_AVG:
if (data->id != max34446 && data->id != max34451 &&
- data->id != adpm12160)
+ data->id != adpm12160 && data->id != adpm12200)
return -ENXIO;
ret = pmbus_read_word_data(client, page, phase,
MAX34446_MFR_IOUT_AVG);
@@ -183,7 +184,7 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_IOUT_PEAK, 0);
if (!ret && (data->id == max34446 || data->id == max34451 ||
- data->id == adpm12160))
+ data->id == adpm12160 || data->id == adpm12200))
ret = pmbus_write_word_data(client, page,
MAX34446_MFR_IOUT_AVG, 0);
@@ -336,18 +337,18 @@ static struct pmbus_driver_info max34440_info[] = {
.format[PSC_CURRENT_IN] = direct,
.format[PSC_CURRENT_OUT] = direct,
.format[PSC_TEMPERATURE] = direct,
- .m[PSC_VOLTAGE_IN] = 1,
+ .m[PSC_VOLTAGE_IN] = 125,
.b[PSC_VOLTAGE_IN] = 0,
.R[PSC_VOLTAGE_IN] = 0,
- .m[PSC_VOLTAGE_OUT] = 1,
+ .m[PSC_VOLTAGE_OUT] = 125,
.b[PSC_VOLTAGE_OUT] = 0,
.R[PSC_VOLTAGE_OUT] = 0,
- .m[PSC_CURRENT_IN] = 1,
+ .m[PSC_CURRENT_IN] = 250,
.b[PSC_CURRENT_IN] = 0,
- .R[PSC_CURRENT_IN] = 2,
- .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_IN] = -1,
+ .m[PSC_CURRENT_OUT] = 250,
.b[PSC_CURRENT_OUT] = 0,
- .R[PSC_CURRENT_OUT] = 2,
+ .R[PSC_CURRENT_OUT] = -1,
.m[PSC_TEMPERATURE] = 1,
.b[PSC_TEMPERATURE] = 0,
.R[PSC_TEMPERATURE] = 2,
@@ -364,6 +365,42 @@ static struct pmbus_driver_info max34440_info[] = {
.read_word_data = max34440_read_word_data,
.write_word_data = max34440_write_word_data,
},
+ [adpm12200] = {
+ .pages = 19,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 125,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 125,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_IN] = 250,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = -1,
+ .m[PSC_CURRENT_OUT] = 250,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = -1,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 2,
+ /* absent func below [18] are not for monitoring */
+ .func[2] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT,
+ .func[4] = PMBUS_HAVE_STATUS_IOUT,
+ .func[5] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[6] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[7] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[8] = PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT,
+ .func[9] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[10] = PMBUS_HAVE_IIN | PMBUS_HAVE_STATUS_INPUT,
+ .func[14] = PMBUS_HAVE_IOUT,
+ .func[18] = PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP,
+ .read_word_data = max34440_read_word_data,
+ .write_word_data = max34440_write_word_data,
+ },
[max34440] = {
.pages = 14,
.format[PSC_VOLTAGE_IN] = direct,
@@ -600,7 +637,7 @@ static int max34440_probe(struct i2c_client *client)
rv = max34451_set_supported_funcs(client, data);
if (rv)
return rv;
- } else if (data->id == adpm12160) {
+ } else if (data->id == adpm12160 || data->id == adpm12200) {
data->iout_oc_fault_limit = PMBUS_IOUT_OC_FAULT_LIMIT;
data->iout_oc_warn_limit = PMBUS_IOUT_OC_WARN_LIMIT;
}
@@ -610,6 +647,7 @@ static int max34440_probe(struct i2c_client *client)
static const struct i2c_device_id max34440_id[] = {
{"adpm12160", adpm12160},
+ {"adpm12200", adpm12200},
{"max34440", max34440},
{"max34441", max34441},
{"max34446", max34446},
diff --git a/drivers/hwmon/pmbus/mp2869.c b/drivers/hwmon/pmbus/mp2869.c
new file mode 100644
index 000000000000..cc69a1e91dfe
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2869.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP2869)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+/*
+ * Vender specific registers, the register MFR_SVI3_IOUT_PRT(0x67),
+ * READ_PIN_EST(0x94)and READ_IIN_EST(0x95) redefine the standard
+ * PMBUS register. The MFR_VOUT_LOOP_CTRL(0x29) is used to identify
+ * the vout scale and the MFR_SVI3_IOUT_PRT(0x67) is used to identify
+ * the iout scale. The READ_PIN_EST(0x94) is used to read input power
+ * per rail. The MP2891 does not have standard READ_IIN register(0x89),
+ * the iin telemetry can be obtained through the vendor redefined
+ * register READ_IIN_EST(0x95).
+ */
+#define MFR_SVI3_IOUT_PRT 0x67
+#define MFR_READ_PIN_EST 0x94
+#define MFR_READ_IIN_EST 0x95
+#define MFR_TSNS_FLT_SET 0xBB
+
+#define MP2869_VIN_OV_FAULT_GAIN 4
+#define MP2869_READ_VOUT_DIV 1024
+#define MP2869_READ_IOUT_DIV 32
+#define MP2869_OVUV_LIMIT_SCALE 10
+#define MP2869_OVUV_DELTA_SCALE 50
+#define MP2869_TEMP_LIMIT_OFFSET 40
+#define MP2869_IOUT_LIMIT_UINT 8
+#define MP2869_POUT_OP_GAIN 2
+
+#define MP2869_PAGE_NUM 2
+
+#define MP2869_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_PIN | \
+ PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+#define MP2869_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | \
+ PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | \
+ PMBUS_HAVE_PIN | PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+struct mp2869_data {
+ struct pmbus_driver_info info;
+ bool mfr_thwn_flt_en;
+ int vout_scale[MP2869_PAGE_NUM];
+ int iout_scale[MP2869_PAGE_NUM];
+};
+
+static const int mp2869_vout_sacle[8] = {6400, 5120, 2560, 2048, 1024,
+ 4, 2, 1};
+static const int mp2869_iout_sacle[8] = {32, 1, 2, 4, 8, 16, 32, 64};
+
+#define to_mp2869_data(x) container_of(x, struct mp2869_data, info)
+
+static u16 mp2869_reg2data_linear11(u16 word)
+{
+ s16 exponent;
+ s32 mantissa;
+ s64 val;
+
+ exponent = ((s16)word) >> 11;
+ mantissa = ((s16)((word & 0x7ff) << 5)) >> 5;
+ val = mantissa;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return val;
+}
+
+static int
+mp2869_identify_thwn_flt(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_TSNS_FLT_SET);
+ if (ret < 0)
+ return ret;
+
+ data->mfr_thwn_flt_en = FIELD_GET(GENMASK(13, 13), ret);
+
+ return 0;
+}
+
+static int
+mp2869_identify_vout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, PMBUS_VOUT_SCALE_LOOP);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The output voltage is equal to the READ_VOUT(0x8B) register value multiply
+ * by vout_scale.
+ * Obtain vout scale from the register PMBUS_VOUT_SCALE_LOOP, bits 12-10
+ * PMBUS_VOUT_SCALE_LOOP[12:10]:
+ * 000b - 6.25mV/LSB, 001b - 5mV/LSB, 010b - 2.5mV/LSB, 011b - 2mV/LSB
+ * 100b - 1mV/Lsb, 101b - (1/256)mV/LSB, 110b - (1/512)mV/LSB,
+ * 111b - (1/1024)mV/LSB
+ */
+ data->vout_scale[page] = mp2869_vout_sacle[FIELD_GET(GENMASK(12, 10), ret)];
+
+ return 0;
+}
+
+static int
+mp2869_identify_iout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_SVI3_IOUT_PRT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The output current is equal to the READ_IOUT(0x8C) register value
+ * multiply by iout_scale.
+ * Obtain iout_scale from the register MFR_SVI3_IOUT_PRT[2:0].
+ * The value is selected as below:
+ * 000b - 1A/LSB, 001b - (1/32)A/LSB, 010b - (1/16)A/LSB,
+ * 011b - (1/8)A/LSB, 100b - (1/4)A/LSB, 101b - (1/2)A/LSB
+ * 110b - 1A/LSB, 111b - 2A/LSB
+ */
+ data->iout_scale[page] = mp2869_iout_sacle[FIELD_GET(GENMASK(2, 0), ret)];
+
+ return 0;
+}
+
+static int mp2869_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * The calculation of vout in this driver is based on direct format.
+ * As a result, the format of vout is enforced to direct.
+ */
+ ret = PB_VOUT_MODE_DIRECT;
+ break;
+ case PMBUS_STATUS_BYTE:
+ /*
+ * If the tsns digital fault is enabled, the TEMPERATURE flag
+ * of PMBUS_STATUS_BYTE should come from STATUS_MFR_SPECIFIC
+ * register bit1.
+ */
+ if (!data->mfr_thwn_flt_en)
+ return -ENODATA;
+
+ ret = pmbus_read_byte_data(client, page, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & ~GENMASK(2, 2)) |
+ FIELD_PREP(GENMASK(2, 2),
+ FIELD_GET(GENMASK(1, 1),
+ pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC)));
+ break;
+ case PMBUS_STATUS_TEMPERATURE:
+ /*
+ * If the tsns digital fault is enabled, the OT Fault and OT Warning
+ * flag of PMBUS_STATUS_TEMPERATURE should come from STATUS_MFR_SPECIFIC
+ * register bit1.
+ */
+ if (!data->mfr_thwn_flt_en)
+ return -ENODATA;
+
+ ret = pmbus_read_byte_data(client, page, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & ~GENMASK(7, 6)) |
+ FIELD_PREP(GENMASK(6, 6),
+ FIELD_GET(GENMASK(1, 1),
+ pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC))) |
+ FIELD_PREP(GENMASK(7, 7),
+ FIELD_GET(GENMASK(1, 1),
+ pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC)));
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2869_read_word_data(struct i2c_client *client, int page, int phase,
+ int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_STATUS_WORD:
+ /*
+ * If the tsns digital fault is enabled, the OT Fault flag
+ * of PMBUS_STATUS_WORD should come from STATUS_MFR_SPECIFIC
+ * register bit1.
+ */
+ if (!data->mfr_thwn_flt_en)
+ return -ENODATA;
+
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & ~GENMASK(2, 2)) |
+ FIELD_PREP(GENMASK(2, 2),
+ FIELD_GET(GENMASK(1, 1),
+ pmbus_read_byte_data(client, page,
+ PMBUS_STATUS_MFR_SPECIFIC)));
+ break;
+ case PMBUS_READ_VIN:
+ /*
+ * The MP2869 PMBUS_READ_VIN[10:0] is the vin value, the vin scale is
+ * 31.25mV/LSB. And the vin scale is set to 31.25mV/Lsb(using r/m/b scale)
+ * in MP2869 pmbus_driver_info struct, so the word data bit0-bit10 can be
+ * returned to pmbus core directly.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(10, 0), ret);
+ break;
+ case PMBUS_READ_IIN:
+ /*
+ * The MP2869 redefine the standard 0x95 register as iin telemetry
+ * per rail.
+ */
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_IIN_EST);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case PMBUS_READ_PIN:
+ /*
+ * The MP2869 redefine the standard 0x94 register as pin telemetry
+ * per rail. The MP2869 MFR_READ_PIN_EST register is linear11 format,
+ * but the pin scale is set to 1W/Lsb(using r/m/b scale). As a result,
+ * the pin read from MP2869 should be converted to W, then return
+ * the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_PIN_EST);
+ if (ret < 0)
+ return ret;
+
+ ret = mp2869_reg2data_linear11(ret);
+ break;
+ case PMBUS_READ_VOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(11, 0)) * data->vout_scale[page],
+ MP2869_READ_VOUT_DIV);
+ break;
+ case PMBUS_READ_IOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(10, 0)) * data->iout_scale[page],
+ MP2869_READ_IOUT_DIV);
+ break;
+ case PMBUS_READ_POUT:
+ /*
+ * The MP2869 PMBUS_READ_POUT register is linear11 format, but the pout
+ * scale is set to 1W/Lsb(using r/m/b scale). As a result, the pout read
+ * from MP2869 should be converted to W, then return the result to pmbus
+ * core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = mp2869_reg2data_linear11(ret);
+ break;
+ case PMBUS_READ_TEMPERATURE_1:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(10, 0), ret);
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(12, 9), ret))
+ ret = FIELD_GET(GENMASK(8, 0), ret) * MP2869_OVUV_LIMIT_SCALE +
+ (FIELD_GET(GENMASK(12, 9), ret) + 1) * MP2869_OVUV_DELTA_SCALE;
+ else
+ ret = FIELD_GET(GENMASK(8, 0), ret) * MP2869_OVUV_LIMIT_SCALE;
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(12, 9), ret))
+ ret = FIELD_GET(GENMASK(8, 0), ret) * MP2869_OVUV_LIMIT_SCALE -
+ (FIELD_GET(GENMASK(12, 9), ret) + 1) * MP2869_OVUV_DELTA_SCALE;
+ else
+ ret = FIELD_GET(GENMASK(8, 0), ret) * MP2869_OVUV_LIMIT_SCALE;
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The scale of MP2869 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT
+ * is 1°C/LSB and they have 40°C offset.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) - MP2869_TEMP_LIMIT_OFFSET;
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) * MP2869_VIN_OV_FAULT_GAIN;
+ break;
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(9, 0), ret);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) * data->iout_scale[page] *
+ MP2869_IOUT_LIMIT_UINT, MP2869_READ_IOUT_DIV);
+ break;
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) * MP2869_POUT_OP_GAIN;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2869_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2869_data *data = to_mp2869_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ /*
+ * The MP2869 PMBUS_VOUT_UV_FAULT_LIMIT[8:0] is the limit value,
+ * and bit9-bit15 should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(12, 9), ret))
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word +
+ (FIELD_GET(GENMASK(12, 9),
+ ret) + 1) *
+ MP2869_OVUV_DELTA_SCALE,
+ MP2869_OVUV_LIMIT_SCALE)));
+ else
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2869_OVUV_LIMIT_SCALE)));
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ /*
+ * The MP2869 PMBUS_VOUT_OV_FAULT_LIMIT[8:0] is the limit value,
+ * and bit9-bit15 should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(12, 9), ret))
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word -
+ (FIELD_GET(GENMASK(12, 9),
+ ret) + 1) *
+ MP2869_OVUV_DELTA_SCALE,
+ MP2869_OVUV_LIMIT_SCALE)));
+ else
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2869_OVUV_LIMIT_SCALE)));
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * If the tsns digital fault is enabled, the PMBUS_OT_FAULT_LIMIT and
+ * PMBUS_OT_WARN_LIMIT can not be written.
+ */
+ if (data->mfr_thwn_flt_en)
+ return -EINVAL;
+
+ /*
+ * The MP2869 scale of MP2869 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT
+ * have 40°C offset. The bit0-bit7 is the limit value, and bit8-bit15
+ * should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ word + MP2869_TEMP_LIMIT_OFFSET));
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /*
+ * The MP2869 PMBUS_VIN_OV_FAULT_LIMIT[7:0] is the limit value, and bit8-bit15
+ * should not be changed. The scale of PMBUS_VIN_OV_FAULT_LIMIT is 125mV/Lsb,
+ * but the vin scale is set to 31.25mV/Lsb(using r/m/b scale), so the word data
+ * should divide by MP2869_VIN_OV_FAULT_GAIN(4)
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2869_VIN_OV_FAULT_GAIN)));
+ break;
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VIN_UV_LIMIT[9:0] is the limit value, and bit10-bit15 should
+ * not be changed. The scale of PMBUS_VIN_UV_LIMIT is 31.25mV/Lsb, and the
+ * vin scale is set to 31.25mV/Lsb(using r/m/b scale), so the word data can
+ * be written directly.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(9, 0)) |
+ FIELD_PREP(GENMASK(9, 0),
+ word));
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word * MP2869_READ_IOUT_DIV,
+ MP2869_IOUT_LIMIT_UINT *
+ data->iout_scale[page]));
+ break;
+ case PMBUS_POUT_OP_WARN_LIMIT:
+ /*
+ * The POUT_OP_WARN_LIMIT[11:0] is the limit value, and bit12-bit15 should
+ * not be changed. The scale of POUT_OP_WARN_LIMIT is 2W/Lsb.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(11, 0)) |
+ FIELD_PREP(GENMASK(11, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP2869_POUT_OP_GAIN)));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2869_identify(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ int ret;
+
+ /* Identify whether tsns digital fault is enable */
+ ret = mp2869_identify_thwn_flt(client, info, 1);
+ if (ret < 0)
+ return 0;
+
+ /* Identify vout scale for rail1. */
+ ret = mp2869_identify_vout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify vout scale for rail2. */
+ ret = mp2869_identify_vout_scale(client, info, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Identify iout scale for rail 1. */
+ ret = mp2869_identify_iout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify iout scale for rail 2. */
+ return mp2869_identify_iout_scale(client, info, 1);
+}
+
+static const struct pmbus_driver_info mp2869_info = {
+ .pages = MP2869_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+
+ .m[PSC_VOLTAGE_IN] = 32,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .b[PSC_VOLTAGE_IN] = 0,
+
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+
+ .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_OUT] = 0,
+ .b[PSC_CURRENT_OUT] = 0,
+
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 0,
+ .b[PSC_TEMPERATURE] = 0,
+
+ .m[PSC_POWER] = 1,
+ .R[PSC_POWER] = 0,
+ .b[PSC_POWER] = 0,
+
+ .func[0] = MP2869_RAIL1_FUNC,
+ .func[1] = MP2869_RAIL2_FUNC,
+ .read_word_data = mp2869_read_word_data,
+ .write_word_data = mp2869_write_word_data,
+ .read_byte_data = mp2869_read_byte_data,
+ .identify = mp2869_identify,
+};
+
+static int mp2869_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp2869_data *data;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp2869_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp2869_info, sizeof(*info));
+ info = &data->info;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id mp2869_id[] = {
+ {"mp2869", 0},
+ {"mp29608", 1},
+ {"mp29612", 2},
+ {"mp29816", 3},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp2869_id);
+
+static const struct of_device_id __maybe_unused mp2869_of_match[] = {
+ {.compatible = "mps,mp2869", .data = (void *)0},
+ {.compatible = "mps,mp29608", .data = (void *)1},
+ {.compatible = "mps,mp29612", .data = (void *)2},
+ {.compatible = "mps,mp29816", .data = (void *)3},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2869_of_match);
+
+static struct i2c_driver mp2869_driver = {
+ .driver = {
+ .name = "mp2869",
+ .of_match_table = mp2869_of_match,
+ },
+ .probe = mp2869_probe,
+ .id_table = mp2869_id,
+};
+
+module_i2c_driver(mp2869_driver);
+
+MODULE_AUTHOR("Wensheng Wang <wenswang@yeah.net>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2869");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp2925.c b/drivers/hwmon/pmbus/mp2925.c
new file mode 100644
index 000000000000..6bebd6023021
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp2925.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP2925)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+/*
+ * Vender specific register MFR_VR_MULTI_CONFIG(0x08).
+ * This register is used to obtain vid scale.
+ */
+#define MFR_VR_MULTI_CONFIG 0x08
+
+#define MP2925_VOUT_DIV 512
+#define MP2925_VOUT_OVUV_UINT 195
+#define MP2925_VOUT_OVUV_DIV 100
+
+#define MP2925_PAGE_NUM 2
+
+#define MP2925_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_PIN | \
+ PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | \
+ PMBUS_HAVE_POUT | PMBUS_HAVE_TEMP | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+#define MP2925_RAIL2_FUNC (PMBUS_HAVE_PIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+struct mp2925_data {
+ struct pmbus_driver_info info;
+ int vout_scale[MP2925_PAGE_NUM];
+};
+
+#define to_mp2925_data(x) container_of(x, struct mp2925_data, info)
+
+static u16 mp2925_linear_exp_transfer(u16 word, u16 expect_exponent)
+{
+ s16 exponent, mantissa, target_exponent;
+
+ exponent = ((s16)word) >> 11;
+ mantissa = ((s16)((word & 0x7ff) << 5)) >> 5;
+ target_exponent = (s16)((expect_exponent & 0x1f) << 11) >> 11;
+
+ if (exponent > target_exponent)
+ mantissa = mantissa << (exponent - target_exponent);
+ else
+ mantissa = mantissa >> (target_exponent - exponent);
+
+ return (mantissa & 0x7ff) | ((expect_exponent << 11) & 0xf800);
+}
+
+static int mp2925_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * The MP2925 does not follow standard PMBus protocol completely,
+ * and the calculation of vout in this driver is based on direct
+ * format. As a result, the format of vout is enforced to direct.
+ */
+ ret = PB_VOUT_MODE_DIRECT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2925_read_word_data(struct i2c_client *client, int page, int phase,
+ int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp2925_data *data = to_mp2925_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_READ_VOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(11, 0)) * data->vout_scale[page],
+ MP2925_VOUT_DIV);
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(11, 0)) * MP2925_VOUT_OVUV_UINT,
+ MP2925_VOUT_OVUV_DIV);
+ break;
+ case PMBUS_STATUS_WORD:
+ case PMBUS_READ_VIN:
+ case PMBUS_READ_IOUT:
+ case PMBUS_READ_POUT:
+ case PMBUS_READ_PIN:
+ case PMBUS_READ_IIN:
+ case PMBUS_READ_TEMPERATURE_1:
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ ret = -ENODATA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp2925_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ case PMBUS_VIN_OV_WARN_LIMIT:
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VIN_OV_FAULT_LIMIT, PMBUS_VIN_OV_WARN_LIMIT,
+ * PMBUS_VIN_UV_WARN_LIMIT and PMBUS_VIN_UV_FAULT_LIMIT
+ * of MP2925 is linear11 format, and the exponent is a
+ * constant value(5'b11100), so the exponent of word
+ * parameter should be converted to 5'b11100(0x1C).
+ */
+ ret = pmbus_write_word_data(client, page, reg,
+ mp2925_linear_exp_transfer(word, 0x1C));
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ /*
+ * The bit0-bit11 is the limit value, and bit12-bit15
+ * should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(11, 0)) |
+ FIELD_PREP(GENMASK(11, 0),
+ DIV_ROUND_CLOSEST(word * MP2925_VOUT_OVUV_DIV,
+ MP2925_VOUT_OVUV_UINT)));
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT of
+ * MP2925 is linear11 format, and the exponent is a
+ * constant value(5'b00000), so the exponent of word
+ * parameter should be converted to 5'b00000.
+ */
+ ret = pmbus_write_word_data(client, page, reg,
+ mp2925_linear_exp_transfer(word, 0x00));
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ /*
+ * The PMBUS_IOUT_OC_FAULT_LIMIT and PMBUS_IOUT_OC_WARN_LIMIT
+ * of MP2925 is linear11 format, and the exponent can not be
+ * changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ mp2925_linear_exp_transfer(word,
+ FIELD_GET(GENMASK(15, 11),
+ ret)));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mp2925_identify_vout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp2925_data *data = to_mp2925_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(5, 5), ret)) {
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE,
+ page == 0 ? 3 : 4);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_VR_MULTI_CONFIG);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(GENMASK(5, 5), ret))
+ data->vout_scale[page] = 2560;
+ else
+ data->vout_scale[page] = 5120;
+ } else if (FIELD_GET(GENMASK(4, 4), ret)) {
+ data->vout_scale[page] = 1;
+ } else {
+ data->vout_scale[page] = 512;
+ }
+
+ return 0;
+}
+
+static int mp2925_identify(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ int ret;
+
+ ret = mp2925_identify_vout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ return mp2925_identify_vout_scale(client, info, 1);
+}
+
+static const struct pmbus_driver_info mp2925_info = {
+ .pages = MP2925_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+
+ .func[0] = MP2925_RAIL1_FUNC,
+ .func[1] = MP2925_RAIL2_FUNC,
+ .read_word_data = mp2925_read_word_data,
+ .read_byte_data = mp2925_read_byte_data,
+ .write_word_data = mp2925_write_word_data,
+ .identify = mp2925_identify,
+};
+
+static int mp2925_probe(struct i2c_client *client)
+{
+ struct mp2925_data *data;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp2925_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp2925_info, sizeof(mp2925_info));
+
+ return pmbus_do_probe(client, &data->info);
+}
+
+static const struct i2c_device_id mp2925_id[] = {
+ {"mp2925"},
+ {"mp2929"},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp2925_id);
+
+static const struct of_device_id __maybe_unused mp2925_of_match[] = {
+ {.compatible = "mps,mp2925"},
+ {.compatible = "mps,mp2929"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp2925_of_match);
+
+static struct i2c_driver mp2925_driver = {
+ .driver = {
+ .name = "mp2925",
+ .of_match_table = mp2925_of_match,
+ },
+ .probe = mp2925_probe,
+ .id_table = mp2925_id,
+};
+
+module_i2c_driver(mp2925_driver);
+
+MODULE_AUTHOR("Wensheng Wang <wenswang@yeah.net>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP2925");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp29502.c b/drivers/hwmon/pmbus/mp29502.c
new file mode 100644
index 000000000000..7241373f1557
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp29502.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Multi-phase Digital VR Controllers(MP29502)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+#define MFR_VOUT_SCALE_LOOP 0x29
+#define MFR_SVI3_IOUT_PRT 0x67
+#define MFR_READ_PIN_EST 0x94
+#define MFR_READ_IIN_EST 0x95
+#define MFR_VOUT_PROT1 0x3D
+#define MFR_VOUT_PROT2 0x51
+#define MFR_SLOPE_CNT_SET 0xA8
+#define MFR_TSNS_FLT_SET 0xBB
+
+#define MP29502_VIN_OV_GAIN 4
+#define MP29502_TEMP_LIMIT_OFFSET 40
+#define MP29502_READ_VOUT_DIV 1024
+#define MP29502_READ_IOUT_DIV 32
+#define MP29502_IOUT_LIMIT_UINT 8
+#define MP29502_OVUV_LIMIT_SCALE 10
+#define MP28502_VOUT_OV_GAIN 512
+#define MP28502_VOUT_OV_SCALE 40
+#define MP29502_VOUT_UV_OFFSET 36
+#define MP29502_PIN_GAIN 2
+#define MP29502_IIN_DIV 2
+
+#define MP29502_PAGE_NUM 1
+
+#define MP29502_RAIL_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IOUT | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_PIN | \
+ PMBUS_HAVE_IIN | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+struct mp29502_data {
+ struct pmbus_driver_info info;
+ int vout_scale;
+ int vout_bottom_div;
+ int vout_top_div;
+ int ovp_div;
+ int iout_scale;
+};
+
+#define to_mp29502_data(x) container_of(x, struct mp29502_data, info)
+
+static u16 mp29502_reg2data_linear11(u16 word)
+{
+ s16 exponent;
+ s32 mantissa;
+ s64 val;
+
+ exponent = ((s16)word) >> 11;
+ mantissa = ((s16)((word & 0x7ff) << 5)) >> 5;
+ val = mantissa;
+
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ return val;
+}
+
+static int
+mp29502_identify_vout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_VOUT_SCALE_LOOP);
+ if (ret < 0)
+ return ret;
+
+ switch (FIELD_GET(GENMASK(12, 10), ret)) {
+ case 0:
+ data->vout_scale = 6400;
+ break;
+ case 1:
+ data->vout_scale = 5120;
+ break;
+ case 2:
+ data->vout_scale = 2560;
+ break;
+ case 3:
+ data->vout_scale = 2048;
+ break;
+ case 4:
+ data->vout_scale = 1024;
+ break;
+ case 5:
+ data->vout_scale = 4;
+ break;
+ case 6:
+ data->vout_scale = 2;
+ break;
+ case 7:
+ data->vout_scale = 1;
+ break;
+ default:
+ data->vout_scale = 1;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+mp29502_identify_vout_divider(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_VOUT_PROT1);
+ if (ret < 0)
+ return ret;
+
+ data->vout_bottom_div = FIELD_GET(GENMASK(11, 0), ret);
+
+ ret = i2c_smbus_read_word_data(client, MFR_VOUT_PROT2);
+ if (ret < 0)
+ return ret;
+
+ data->vout_top_div = FIELD_GET(GENMASK(14, 0), ret);
+
+ return 0;
+}
+
+static int
+mp29502_identify_ovp_divider(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_SLOPE_CNT_SET);
+ if (ret < 0)
+ return ret;
+
+ data->ovp_div = FIELD_GET(GENMASK(9, 0), ret);
+
+ return 0;
+}
+
+static int
+mp29502_identify_iout_scale(struct i2c_client *client, struct pmbus_driver_info *info,
+ int page)
+{
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_SVI3_IOUT_PRT);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & GENMASK(2, 0)) {
+ case 0:
+ case 6:
+ data->iout_scale = 32;
+ break;
+ case 1:
+ data->iout_scale = 1;
+ break;
+ case 2:
+ data->iout_scale = 2;
+ break;
+ case 3:
+ data->iout_scale = 4;
+ break;
+ case 4:
+ data->iout_scale = 8;
+ break;
+ case 5:
+ data->iout_scale = 16;
+ break;
+ default:
+ data->iout_scale = 64;
+ break;
+ }
+
+ return 0;
+}
+
+static int mp29502_read_vout_ov_limit(struct i2c_client *client, struct mp29502_data *data)
+{
+ int ret;
+ int ov_value;
+
+ /*
+ * This is because the vout ov fault limit value comes from
+ * page1 MFR_TSNS_FLT_SET reg, and other telemetry and limit
+ * value comes from page0 reg. So the page should be set to
+ * 0 after the reading of vout ov limit.
+ */
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_TSNS_FLT_SET);
+ if (ret < 0)
+ return ret;
+
+ ov_value = DIV_ROUND_CLOSEST(FIELD_GET(GENMASK(12, 7), ret) *
+ MP28502_VOUT_OV_GAIN * MP28502_VOUT_OV_SCALE,
+ data->ovp_div);
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ return ov_value;
+}
+
+static int mp29502_write_vout_ov_limit(struct i2c_client *client, u16 word,
+ struct mp29502_data *data)
+{
+ int ret;
+
+ /*
+ * This is because the vout ov fault limit value comes from
+ * page1 MFR_TSNS_FLT_SET reg, and other telemetry and limit
+ * value comes from page0 reg. So the page should be set to
+ * 0 after the writing of vout ov limit.
+ */
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_TSNS_FLT_SET);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_word_data(client, MFR_TSNS_FLT_SET,
+ (ret & ~GENMASK(12, 7)) |
+ FIELD_PREP(GENMASK(12, 7),
+ DIV_ROUND_CLOSEST(word * data->ovp_div,
+ MP28502_VOUT_OV_GAIN * MP28502_VOUT_OV_SCALE)));
+
+ return i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+}
+
+static int mp29502_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ ret = PB_VOUT_MODE_DIRECT;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp29502_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ switch (reg) {
+ case PMBUS_STATUS_WORD:
+ ret = -ENODATA;
+ break;
+ case PMBUS_READ_VIN:
+ /*
+ * The MP29502 PMBUS_READ_VIN[10:0] is the vin value, the vin scale is
+ * 125mV/LSB. And the vin scale is set to 125mV/Lsb(using r/m/b scale)
+ * in MP29502 pmbus_driver_info struct, so the word data bit0-bit10 can
+ * be returned to pmbus core directly.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(10, 0), ret);
+ break;
+ case PMBUS_READ_VOUT:
+ /*
+ * The MP29502 PMBUS_READ_VOUT[11:0] is the vout value, and vout
+ * value is calculated based on vout scale and vout divider.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(11, 0)) *
+ data->vout_scale *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div),
+ MP29502_READ_VOUT_DIV *
+ data->vout_bottom_div);
+ break;
+ case PMBUS_READ_IIN:
+ /*
+ * The MP29502 MFR_READ_IIN_EST register is linear11 format, and the
+ * exponent is not a constant value. But the iin scale is set to
+ * 1A/Lsb(using r/m/b scale). As a result, the iin read from MP29502
+ * should be calculated to A, then return the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_IIN_EST);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(mp29502_reg2data_linear11(ret),
+ MP29502_IIN_DIV);
+ break;
+ case PMBUS_READ_PIN:
+ /*
+ * The MP29502 MFR_READ_PIN_EST register is linear11 format, and the
+ * exponent is not a constant value. But the pin scale is set to
+ * 1W/Lsb(using r/m/b scale). As a result, the pout read from MP29502
+ * should be calculated to W, then return the result to pmbus core.
+ */
+ ret = pmbus_read_word_data(client, page, phase, MFR_READ_PIN_EST);
+ if (ret < 0)
+ return ret;
+
+ ret = mp29502_reg2data_linear11(ret) * MP29502_PIN_GAIN;
+ break;
+ case PMBUS_READ_POUT:
+ /*
+ * The MP29502 PMBUS_READ_POUT register is linear11 format, and the
+ * exponent is not a constant value. But the pout scale is set to
+ * 1W/Lsb(using r/m/b scale). As a result, the pout read from MP29502
+ * should be calculated to W, then return the result to pmbus core.
+ * And the pout is calculated based on vout divider.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST(mp29502_reg2data_linear11(ret) *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div),
+ data->vout_bottom_div);
+ break;
+ case PMBUS_READ_IOUT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(10, 0)) * data->iout_scale,
+ MP29502_READ_IOUT_DIV);
+ break;
+ case PMBUS_READ_TEMPERATURE_1:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(10, 0), ret);
+ break;
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /*
+ * The MP29502 PMBUS_VIN_OV_FAULT_LIMIT is 500mV/Lsb, but
+ * the vin scale is set to 125mV/Lsb(using r/m/b scale),
+ * so the word data should multiply by 4.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(7, 0), ret) * MP29502_VIN_OV_GAIN;
+ break;
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /*
+ * The MP29502 PMBUS_VIN_UV_WARN_LIMIT and PMBUS_VIN_UV_FAULT_LIMIT
+ * scale is 125mV/Lsb, but the vin scale is set to 125mV/Lsb(using
+ * r/m/b scale), so the word data bit0-bit9 can be returned to pmbus
+ * core directly.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(GENMASK(9, 0), ret);
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ /*
+ * The MP29502 vout ov fault limit value comes from
+ * page1 MFR_TSNS_FLT_SET[12:7].
+ */
+ ret = mp29502_read_vout_ov_limit(client, data);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((FIELD_GET(GENMASK(8, 0), ret) *
+ MP29502_OVUV_LIMIT_SCALE -
+ MP29502_VOUT_UV_OFFSET) *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div),
+ data->vout_bottom_div);
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(7, 0)) *
+ data->iout_scale *
+ MP29502_IOUT_LIMIT_UINT,
+ MP29502_READ_IOUT_DIV);
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The scale of MP29502 PMBUS_OT_FAULT_LIMIT and PMBUS_OT_WARN_LIMIT
+ * is 1°C/LSB and they have 40°C offset.
+ */
+ ret = pmbus_read_word_data(client, page, phase, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = (ret & GENMASK(7, 0)) - MP29502_TEMP_LIMIT_OFFSET;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp29502_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp29502_data *data = to_mp29502_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_VIN_OV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VIN_OV_FAULT_LIMIT[7:0] is the limit value,
+ * and bit8-bit15 should not be changed. The scale of
+ * PMBUS_VIN_OV_FAULT_LIMIT is 500mV/Lsb, but the vin
+ * scale is set to 125mV/Lsb(using r/m/b scale), so
+ * the word data should divide by 4.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ DIV_ROUND_CLOSEST(word,
+ MP29502_VIN_OV_GAIN)));
+ break;
+ case PMBUS_VIN_UV_WARN_LIMIT:
+ case PMBUS_VIN_UV_FAULT_LIMIT:
+ /*
+ * The PMBUS_VIN_UV_WARN_LIMIT[9:0] and PMBUS_VIN_UV_FAULT_LIMIT[9:0]
+ * are the limit value, and bit10-bit15 should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(9, 0)) |
+ FIELD_PREP(GENMASK(9, 0),
+ word));
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ ret = mp29502_write_vout_ov_limit(client, word, data);
+ if (ret < 0)
+ return ret;
+
+ break;
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(8, 0)) |
+ FIELD_PREP(GENMASK(8, 0),
+ DIV_ROUND_CLOSEST(word *
+ data->vout_bottom_div +
+ MP29502_VOUT_UV_OFFSET *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div),
+ MP29502_OVUV_LIMIT_SCALE *
+ (data->vout_bottom_div +
+ 4 * data->vout_top_div))));
+ break;
+ case PMBUS_IOUT_OC_FAULT_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ ret = pmbus_write_word_data(client, page, reg,
+ DIV_ROUND_CLOSEST(word *
+ MP29502_READ_IOUT_DIV,
+ MP29502_IOUT_LIMIT_UINT *
+ data->iout_scale));
+ break;
+ case PMBUS_OT_FAULT_LIMIT:
+ case PMBUS_OT_WARN_LIMIT:
+ /*
+ * The PMBUS_OT_FAULT_LIMIT[7:0] and PMBUS_OT_WARN_LIMIT[7:0]
+ * are the limit value, and bit8-bit15 should not be changed.
+ */
+ ret = pmbus_read_word_data(client, page, 0xff, reg);
+ if (ret < 0)
+ return ret;
+
+ ret = pmbus_write_word_data(client, page, reg,
+ (ret & ~GENMASK(7, 0)) |
+ FIELD_PREP(GENMASK(7, 0),
+ word + MP29502_TEMP_LIMIT_OFFSET));
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp29502_identify(struct i2c_client *client, struct pmbus_driver_info *info)
+{
+ int ret;
+
+ /* Identify vout scale */
+ ret = mp29502_identify_vout_scale(client, info, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Identify vout divider. */
+ ret = mp29502_identify_vout_divider(client, info, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Identify ovp divider. */
+ ret = mp29502_identify_ovp_divider(client, info, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Identify iout scale */
+ return mp29502_identify_iout_scale(client, info, 0);
+}
+
+static const struct pmbus_driver_info mp29502_info = {
+ .pages = MP29502_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_POWER] = direct,
+
+ .m[PSC_VOLTAGE_IN] = 8,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .b[PSC_VOLTAGE_IN] = 0,
+
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+
+ .m[PSC_TEMPERATURE] = 1,
+ .R[PSC_TEMPERATURE] = 0,
+ .b[PSC_TEMPERATURE] = 0,
+
+ .m[PSC_CURRENT_IN] = 1,
+ .R[PSC_CURRENT_IN] = 0,
+ .b[PSC_CURRENT_IN] = 0,
+
+ .m[PSC_CURRENT_OUT] = 1,
+ .R[PSC_CURRENT_OUT] = 0,
+ .b[PSC_CURRENT_OUT] = 0,
+
+ .m[PSC_POWER] = 1,
+ .R[PSC_POWER] = 0,
+ .b[PSC_POWER] = 0,
+
+ .func[0] = MP29502_RAIL_FUNC,
+ .read_word_data = mp29502_read_word_data,
+ .read_byte_data = mp29502_read_byte_data,
+ .write_word_data = mp29502_write_word_data,
+ .identify = mp29502_identify,
+};
+
+static int mp29502_probe(struct i2c_client *client)
+{
+ struct pmbus_driver_info *info;
+ struct mp29502_data *data;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct mp29502_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp29502_info, sizeof(*info));
+ info = &data->info;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct i2c_device_id mp29502_id[] = {
+ {"mp29502", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp29502_id);
+
+static const struct of_device_id __maybe_unused mp29502_of_match[] = {
+ {.compatible = "mps,mp29502"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp29502_of_match);
+
+static struct i2c_driver mp29502_driver = {
+ .driver = {
+ .name = "mp29502",
+ .of_match_table = mp29502_of_match,
+ },
+ .probe = mp29502_probe,
+ .id_table = mp29502_id,
+};
+
+module_i2c_driver(mp29502_driver);
+
+MODULE_AUTHOR("Wensheng Wang <wenswang@yeah.net");
+MODULE_DESCRIPTION("PMBus driver for MPS MP29502");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/pmbus/mp5990.c b/drivers/hwmon/pmbus/mp5990.c
index 4ce381a39480..9a4ee79712cf 100644
--- a/drivers/hwmon/pmbus/mp5990.c
+++ b/drivers/hwmon/pmbus/mp5990.c
@@ -8,6 +8,8 @@
#include <linux/of_device.h>
#include "pmbus.h"
+enum chips { mp5990, mp5998 };
+
#define MP5990_EFUSE_CFG (0xC4)
#define MP5990_VOUT_FORMAT BIT(9)
@@ -110,10 +112,53 @@ static struct pmbus_driver_info mp5990_info = {
.read_word_data = mp5990_read_word_data,
};
+static struct pmbus_driver_info mp5998_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = direct,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = direct,
+ .format[PSC_CURRENT_OUT] = direct,
+ .format[PSC_POWER] = direct,
+ .format[PSC_TEMPERATURE] = direct,
+ .m[PSC_VOLTAGE_IN] = 64,
+ .b[PSC_VOLTAGE_IN] = 0,
+ .R[PSC_VOLTAGE_IN] = 0,
+ .m[PSC_VOLTAGE_OUT] = 64,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .R[PSC_VOLTAGE_OUT] = 0,
+ .m[PSC_CURRENT_IN] = 16,
+ .b[PSC_CURRENT_IN] = 0,
+ .R[PSC_CURRENT_IN] = 0,
+ .m[PSC_CURRENT_OUT] = 16,
+ .b[PSC_CURRENT_OUT] = 0,
+ .R[PSC_CURRENT_OUT] = 0,
+ .m[PSC_POWER] = 2,
+ .b[PSC_POWER] = 0,
+ .R[PSC_POWER] = 0,
+ .m[PSC_TEMPERATURE] = 1,
+ .b[PSC_TEMPERATURE] = 0,
+ .R[PSC_TEMPERATURE] = 0,
+ .func[0] =
+ PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT |
+ PMBUS_HAVE_IIN | PMBUS_HAVE_PIN | PMBUS_HAVE_POUT |
+ PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_IOUT |
+ PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP,
+ .read_byte_data = mp5990_read_byte_data,
+ .read_word_data = mp5990_read_word_data,
+};
+
+static const struct i2c_device_id mp5990_id[] = {
+ {"mp5990", mp5990},
+ {"mp5998", mp5998},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, mp5990_id);
+
static int mp5990_probe(struct i2c_client *client)
{
struct pmbus_driver_info *info;
struct mp5990_data *data;
+ enum chips chip;
int ret;
data = devm_kzalloc(&client->dev, sizeof(struct mp5990_data),
@@ -121,7 +166,15 @@ static int mp5990_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- memcpy(&data->info, &mp5990_info, sizeof(*info));
+ if (client->dev.of_node)
+ chip = (uintptr_t)of_device_get_match_data(&client->dev);
+ else
+ chip = i2c_match_id(mp5990_id, client)->driver_data;
+
+ if (chip == mp5990)
+ memcpy(&data->info, &mp5990_info, sizeof(*info));
+ else
+ memcpy(&data->info, &mp5998_info, sizeof(*info));
info = &data->info;
/* Read Vout Config */
@@ -140,6 +193,9 @@ static int mp5990_probe(struct i2c_client *client)
data->info.format[PSC_VOLTAGE_OUT] = linear;
data->info.format[PSC_CURRENT_OUT] = linear;
data->info.format[PSC_POWER] = linear;
+ if (chip == mp5998)
+ data->info.format[PSC_CURRENT_IN] = linear;
+
ret = i2c_smbus_read_word_data(client, PMBUS_READ_VOUT);
if (ret < 0) {
dev_err(&client->dev, "Can't get vout exponent.");
@@ -153,16 +209,11 @@ static int mp5990_probe(struct i2c_client *client)
}
static const struct of_device_id mp5990_of_match[] = {
- { .compatible = "mps,mp5990" },
+ { .compatible = "mps,mp5990", .data = (void *)mp5990 },
+ { .compatible = "mps,mp5998", .data = (void *)mp5998 },
{}
};
-static const struct i2c_device_id mp5990_id[] = {
- {"mp5990"},
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mp5990_id);
-
static struct i2c_driver mp5990_driver = {
.driver = {
.name = "mp5990",
diff --git a/drivers/hwmon/pmbus/mp9945.c b/drivers/hwmon/pmbus/mp9945.c
new file mode 100644
index 000000000000..34822e0de812
--- /dev/null
+++ b/drivers/hwmon/pmbus/mp9945.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for MPS Single-phase Digital VR Controllers(MP9945)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include "pmbus.h"
+
+#define MFR_VR_MULTI_CONFIG_R1 0x08
+#define MFR_SVID_CFG_R1 0xBD
+
+/* VOUT_MODE register values */
+#define VOUT_MODE_LINEAR16 0x17
+#define VOUT_MODE_VID 0x21
+#define VOUT_MODE_DIRECT 0x40
+
+#define MP9945_PAGE_NUM 1
+
+#define MP9945_RAIL1_FUNC (PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | \
+ PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | \
+ PMBUS_HAVE_PIN | PMBUS_HAVE_POUT | \
+ PMBUS_HAVE_TEMP | \
+ PMBUS_HAVE_STATUS_VOUT | \
+ PMBUS_HAVE_STATUS_IOUT | \
+ PMBUS_HAVE_STATUS_TEMP | \
+ PMBUS_HAVE_STATUS_INPUT)
+
+enum mp9945_vout_mode {
+ MP9945_VOUT_MODE_VID,
+ MP9945_VOUT_MODE_DIRECT,
+ MP9945_VOUT_MODE_LINEAR16,
+};
+
+struct mp9945_data {
+ struct pmbus_driver_info info;
+ enum mp9945_vout_mode vout_mode;
+ int vid_resolution;
+ int vid_offset;
+};
+
+#define to_mp9945_data(x) container_of(x, struct mp9945_data, info)
+
+static int mp9945_read_vout(struct i2c_client *client, struct mp9945_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, PMBUS_READ_VOUT);
+ if (ret < 0)
+ return ret;
+
+ ret &= GENMASK(11, 0);
+
+ switch (data->vout_mode) {
+ case MP9945_VOUT_MODE_VID:
+ if (ret > 0)
+ ret = (ret + data->vid_offset) * data->vid_resolution;
+ break;
+ case MP9945_VOUT_MODE_DIRECT:
+ break;
+ case MP9945_VOUT_MODE_LINEAR16:
+ /* LSB: 1000 * 2^-9 (mV) */
+ ret = DIV_ROUND_CLOSEST(ret * 125, 64);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return ret;
+}
+
+static int mp9945_read_byte_data(struct i2c_client *client, int page, int reg)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_MODE:
+ /*
+ * Override VOUT_MODE to DIRECT as the driver handles custom
+ * VOUT format conversions internally.
+ */
+ return PB_VOUT_MODE_DIRECT;
+ default:
+ return -ENODATA;
+ }
+}
+
+static int mp9945_read_word_data(struct i2c_client *client, int page, int phase,
+ int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct mp9945_data *data = to_mp9945_data(info);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ switch (reg) {
+ case PMBUS_READ_VOUT:
+ ret = mp9945_read_vout(client, data);
+ break;
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ /* LSB: 1.95 (mV) */
+ ret = DIV_ROUND_CLOSEST((ret & GENMASK(11, 0)) * 39, 20);
+ break;
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ ret = i2c_smbus_read_word_data(client, reg);
+ if (ret < 0)
+ return ret;
+
+ ret &= GENMASK(9, 0);
+ if (ret > 0)
+ ret = (ret + data->vid_offset) * data->vid_resolution;
+ break;
+ default:
+ ret = -ENODATA;
+ break;
+ }
+
+ return ret;
+}
+
+static int mp9945_identify(struct i2c_client *client,
+ struct pmbus_driver_info *info)
+{
+ struct mp9945_data *data = to_mp9945_data(info);
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, PMBUS_VOUT_MODE);
+ if (ret < 0)
+ return ret;
+
+ switch (ret) {
+ case VOUT_MODE_LINEAR16:
+ data->vout_mode = MP9945_VOUT_MODE_LINEAR16;
+ break;
+ case VOUT_MODE_VID:
+ data->vout_mode = MP9945_VOUT_MODE_VID;
+ break;
+ case VOUT_MODE_DIRECT:
+ data->vout_mode = MP9945_VOUT_MODE_DIRECT;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 3);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_word_data(client, MFR_VR_MULTI_CONFIG_R1);
+ if (ret < 0)
+ return ret;
+
+ data->vid_resolution = (FIELD_GET(BIT(2), ret)) ? 5 : 10;
+
+ ret = i2c_smbus_read_word_data(client, MFR_SVID_CFG_R1);
+ if (ret < 0)
+ return ret;
+
+ data->vid_offset = (FIELD_GET(BIT(15), ret)) ? 19 : 49;
+
+ return i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+}
+
+static struct pmbus_driver_info mp9945_info = {
+ .pages = MP9945_PAGE_NUM,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = direct,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+ .m[PSC_VOLTAGE_OUT] = 1,
+ .R[PSC_VOLTAGE_OUT] = 3,
+ .b[PSC_VOLTAGE_OUT] = 0,
+ .func[0] = MP9945_RAIL1_FUNC,
+ .read_word_data = mp9945_read_word_data,
+ .read_byte_data = mp9945_read_byte_data,
+ .identify = mp9945_identify,
+};
+
+static int mp9945_probe(struct i2c_client *client)
+{
+ struct mp9945_data *data;
+ int ret;
+
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memcpy(&data->info, &mp9945_info, sizeof(mp9945_info));
+
+ /*
+ * Set page 0 before probe. The core reads paged registers which are
+ * only on page 0 for this device.
+ */
+ ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0);
+ if (ret < 0)
+ return ret;
+
+ return pmbus_do_probe(client, &data->info);
+}
+
+static const struct i2c_device_id mp9945_id[] = {
+ {"mp9945"},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mp9945_id);
+
+static const struct of_device_id __maybe_unused mp9945_of_match[] = {
+ {.compatible = "mps,mp9945"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, mp9945_of_match);
+
+static struct i2c_driver mp9945_driver = {
+ .driver = {
+ .name = "mp9945",
+ .of_match_table = of_match_ptr(mp9945_of_match),
+ },
+ .probe = mp9945_probe,
+ .id_table = mp9945_id,
+};
+
+module_i2c_driver(mp9945_driver);
+
+MODULE_AUTHOR("Cosmo Chou <chou.cosmo@gmail.com>");
+MODULE_DESCRIPTION("PMBus driver for MPS MP9945");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("PMBUS");
diff --git a/drivers/hwmon/powr1220.c b/drivers/hwmon/powr1220.c
index 5f9ca6543530..06a2c56016d1 100644
--- a/drivers/hwmon/powr1220.c
+++ b/drivers/hwmon/powr1220.c
@@ -16,7 +16,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/delay.h>
#define ADC_STEP_MV 2
@@ -75,7 +74,6 @@ enum powr1220_adc_values {
struct powr1220_data {
struct i2c_client *client;
- struct mutex update_lock;
u8 max_channels;
bool adc_valid[MAX_POWR1220_ADC_VALUES];
/* the next value is in jiffies */
@@ -111,8 +109,6 @@ static int powr1220_read_adc(struct device *dev, int ch_num)
int result;
int adc_range = 0;
- mutex_lock(&data->update_lock);
-
if (time_after(jiffies, data->adc_last_updated[ch_num] + HZ) ||
!data->adc_valid[ch_num]) {
/*
@@ -128,8 +124,8 @@ static int powr1220_read_adc(struct device *dev, int ch_num)
/* set the attenuator and mux */
result = i2c_smbus_write_byte_data(data->client, ADC_MUX,
adc_range | ch_num);
- if (result)
- goto exit;
+ if (result < 0)
+ return result;
/*
* wait at least Tconvert time (200 us) for the
@@ -140,14 +136,14 @@ static int powr1220_read_adc(struct device *dev, int ch_num)
/* get the ADC reading */
result = i2c_smbus_read_byte_data(data->client, ADC_VALUE_LOW);
if (result < 0)
- goto exit;
+ return result;
reading = result >> 4;
/* get the upper half of the reading */
result = i2c_smbus_read_byte_data(data->client, ADC_VALUE_HIGH);
if (result < 0)
- goto exit;
+ return result;
reading |= result << 4;
@@ -163,10 +159,6 @@ static int powr1220_read_adc(struct device *dev, int ch_num)
} else {
result = data->adc_values[ch_num];
}
-
-exit:
- mutex_unlock(&data->update_lock);
-
return result;
}
@@ -302,7 +294,6 @@ static int powr1220_probe(struct i2c_client *client)
break;
}
- mutex_init(&data->update_lock);
data->client = client;
hwmon_dev = devm_hwmon_device_register_with_info(&client->dev,
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index d0fe53451bdf..37269db2de84 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -64,6 +64,7 @@ struct pwm_fan_ctx {
u64 pwm_duty_cycle_from_stopped;
u32 pwm_usec_from_stopped;
+ u8 pwm_shutdown;
};
/* This handler assumes self resetting edge triggered interrupt. */
@@ -484,9 +485,14 @@ static void pwm_fan_cleanup(void *__ctx)
struct pwm_fan_ctx *ctx = __ctx;
timer_delete_sync(&ctx->rpm_timer);
- /* Switch off everything */
- ctx->enable_mode = pwm_disable_reg_disable;
- pwm_fan_power_off(ctx, true);
+ if (ctx->pwm_shutdown) {
+ ctx->enable_mode = pwm_enable_reg_enable;
+ __set_pwm(ctx, ctx->pwm_shutdown);
+ } else {
+ /* Switch off everything */
+ ctx->enable_mode = pwm_disable_reg_disable;
+ pwm_fan_power_off(ctx, true);
+ }
}
static int pwm_fan_probe(struct platform_device *pdev)
@@ -498,6 +504,7 @@ static int pwm_fan_probe(struct platform_device *pdev)
int ret;
const struct hwmon_channel_info **channels;
u32 initial_pwm, pwm_min_from_stopped = 0;
+ u32 pwm_shutdown_percent = 0;
u32 *fan_channel_config;
int channel_count = 1; /* We always have a PWM channel. */
int i;
@@ -648,6 +655,11 @@ static int pwm_fan_probe(struct platform_device *pdev)
channels[1] = &ctx->fan_channel;
}
+ ret = device_property_read_u32(dev, "fan-shutdown-percent",
+ &pwm_shutdown_percent);
+ if (!ret && pwm_shutdown_percent)
+ ctx->pwm_shutdown = (clamp(pwm_shutdown_percent, 0, 100) * 255) / 100;
+
ret = device_property_read_u32(dev, "fan-stop-to-start-percent",
&pwm_min_from_stopped);
if (!ret && pwm_min_from_stopped) {
diff --git a/drivers/hwmon/sa67mcu-hwmon.c b/drivers/hwmon/sa67mcu-hwmon.c
new file mode 100644
index 000000000000..22f703b7b256
--- /dev/null
+++ b/drivers/hwmon/sa67mcu-hwmon.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sl67mcu hardware monitoring driver
+ *
+ * Copyright 2025 Kontron Europe GmbH
+ */
+
+#include <linux/bitfield.h>
+#include <linux/hwmon.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+#define SA67MCU_VOLTAGE(n) (0x00 + ((n) * 2))
+#define SA67MCU_TEMP(n) (0x04 + ((n) * 2))
+
+struct sa67mcu_hwmon {
+ struct regmap *regmap;
+ u32 offset;
+};
+
+static int sa67mcu_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, long *input)
+{
+ struct sa67mcu_hwmon *hwmon = dev_get_drvdata(dev);
+ unsigned int offset;
+ u8 reg[2];
+ int ret;
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ offset = hwmon->offset + SA67MCU_VOLTAGE(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ offset = hwmon->offset + SA67MCU_TEMP(channel);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Reading the low byte will capture the value */
+ ret = regmap_bulk_read(hwmon->regmap, offset, reg, ARRAY_SIZE(reg));
+ if (ret)
+ return ret;
+
+ *input = reg[1] << 8 | reg[0];
+
+ /* Temperatures are s16 and in 0.1degC steps. */
+ if (type == hwmon_temp)
+ *input = sign_extend32(*input, 15) * 100;
+
+ return 0;
+}
+
+static const struct hwmon_channel_info * const sa67mcu_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT),
+ NULL
+};
+
+static const char *const sa67mcu_hwmon_in_labels[] = {
+ "VDDIN",
+ "VDD_RTC",
+};
+
+static int sa67mcu_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_label:
+ *str = sa67mcu_hwmon_in_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops sa67mcu_hwmon_ops = {
+ .visible = 0444,
+ .read = sa67mcu_hwmon_read,
+ .read_string = sa67mcu_hwmon_read_string,
+};
+
+static const struct hwmon_chip_info sa67mcu_hwmon_chip_info = {
+ .ops = &sa67mcu_hwmon_ops,
+ .info = sa67mcu_hwmon_info,
+};
+
+static int sa67mcu_hwmon_probe(struct platform_device *pdev)
+{
+ struct sa67mcu_hwmon *hwmon;
+ struct device *hwmon_dev;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -ENODEV;
+
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ hwmon->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!hwmon->regmap)
+ return -ENODEV;
+
+ ret = device_property_read_u32(&pdev->dev, "reg", &hwmon->offset);
+ if (ret)
+ return -EINVAL;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "sa67mcu_hwmon", hwmon,
+ &sa67mcu_hwmon_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ dev_err(&pdev->dev, "failed to register as hwmon device");
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id sa67mcu_hwmon_of_match[] = {
+ { .compatible = "kontron,sa67mcu-hwmon", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sa67mcu_hwmon_of_match);
+
+static struct platform_driver sa67mcu_hwmon_driver = {
+ .probe = sa67mcu_hwmon_probe,
+ .driver = {
+ .name = "sa67mcu-hwmon",
+ .of_match_table = sa67mcu_hwmon_of_match,
+ },
+};
+module_platform_driver(sa67mcu_hwmon_driver);
+
+MODULE_DESCRIPTION("sa67mcu Hardware Monitoring Driver");
+MODULE_AUTHOR("Michael Walle <mwalle@kernel.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/sbtsi_temp.c b/drivers/hwmon/sbtsi_temp.c
index 3c839f56c460..c5b2488c4c7f 100644
--- a/drivers/hwmon/sbtsi_temp.c
+++ b/drivers/hwmon/sbtsi_temp.c
@@ -12,8 +12,8 @@
#include <linux/init.h>
#include <linux/hwmon.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/bitfield.h>
/*
* SB-TSI registers only support SMBus byte data access. "_INT" registers are
@@ -29,15 +29,30 @@
#define SBTSI_REG_TEMP_HIGH_DEC 0x13 /* RW */
#define SBTSI_REG_TEMP_LOW_DEC 0x14 /* RW */
+/*
+ * Bit for reporting value with temperature measurement range.
+ * bit == 0: Use default temperature range (0C to 255.875C).
+ * bit == 1: Use extended temperature range (-49C to +206.875C).
+ */
+#define SBTSI_CONFIG_EXT_RANGE_SHIFT 2
+/*
+ * ReadOrder bit specifies the reading order of integer and decimal part of
+ * CPU temperature for atomic reads. If bit == 0, reading integer part triggers
+ * latching of the decimal part, so integer part should be read first.
+ * If bit == 1, read order should be reversed.
+ */
#define SBTSI_CONFIG_READ_ORDER_SHIFT 5
+#define SBTSI_TEMP_EXT_RANGE_ADJ 49000
+
#define SBTSI_TEMP_MIN 0
#define SBTSI_TEMP_MAX 255875
/* Each client has this additional data */
struct sbtsi_data {
struct i2c_client *client;
- struct mutex lock;
+ bool ext_range_mode;
+ bool read_order;
};
/*
@@ -74,42 +89,24 @@ static int sbtsi_read(struct device *dev, enum hwmon_sensor_types type,
{
struct sbtsi_data *data = dev_get_drvdata(dev);
s32 temp_int, temp_dec;
- int err;
switch (attr) {
case hwmon_temp_input:
- /*
- * ReadOrder bit specifies the reading order of integer and
- * decimal part of CPU temp for atomic reads. If bit == 0,
- * reading integer part triggers latching of the decimal part,
- * so integer part should be read first. If bit == 1, read
- * order should be reversed.
- */
- err = i2c_smbus_read_byte_data(data->client, SBTSI_REG_CONFIG);
- if (err < 0)
- return err;
-
- mutex_lock(&data->lock);
- if (err & BIT(SBTSI_CONFIG_READ_ORDER_SHIFT)) {
+ if (data->read_order) {
temp_dec = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_DEC);
temp_int = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_INT);
} else {
temp_int = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_INT);
temp_dec = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_DEC);
}
- mutex_unlock(&data->lock);
break;
case hwmon_temp_max:
- mutex_lock(&data->lock);
temp_int = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_HIGH_INT);
temp_dec = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_HIGH_DEC);
- mutex_unlock(&data->lock);
break;
case hwmon_temp_min:
- mutex_lock(&data->lock);
temp_int = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_LOW_INT);
temp_dec = i2c_smbus_read_byte_data(data->client, SBTSI_REG_TEMP_LOW_DEC);
- mutex_unlock(&data->lock);
break;
default:
return -EINVAL;
@@ -122,6 +119,8 @@ static int sbtsi_read(struct device *dev, enum hwmon_sensor_types type,
return temp_dec;
*val = sbtsi_reg_to_mc(temp_int, temp_dec);
+ if (data->ext_range_mode)
+ *val -= SBTSI_TEMP_EXT_RANGE_ADJ;
return 0;
}
@@ -146,18 +145,16 @@ static int sbtsi_write(struct device *dev, enum hwmon_sensor_types type,
return -EINVAL;
}
+ if (data->ext_range_mode)
+ val += SBTSI_TEMP_EXT_RANGE_ADJ;
val = clamp_val(val, SBTSI_TEMP_MIN, SBTSI_TEMP_MAX);
sbtsi_mc_to_reg(val, &temp_int, &temp_dec);
- mutex_lock(&data->lock);
err = i2c_smbus_write_byte_data(data->client, reg_int, temp_int);
if (err)
- goto exit;
+ return err;
- err = i2c_smbus_write_byte_data(data->client, reg_dec, temp_dec);
-exit:
- mutex_unlock(&data->lock);
- return err;
+ return i2c_smbus_write_byte_data(data->client, reg_dec, temp_dec);
}
static umode_t sbtsi_is_visible(const void *data,
@@ -203,16 +200,22 @@ static int sbtsi_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct sbtsi_data *data;
+ int err;
data = devm_kzalloc(dev, sizeof(struct sbtsi_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
- mutex_init(&data->lock);
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, &sbtsi_chip_info,
- NULL);
+ err = i2c_smbus_read_byte_data(data->client, SBTSI_REG_CONFIG);
+ if (err < 0)
+ return err;
+ data->ext_range_mode = FIELD_GET(BIT(SBTSI_CONFIG_EXT_RANGE_SHIFT), err);
+ data->read_order = FIELD_GET(BIT(SBTSI_CONFIG_READ_ORDER_SHIFT), err);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data,
+ &sbtsi_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index 71941b1bb573..98e075e54e9d 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -544,10 +544,8 @@ void sch56xx_watchdog_register(struct device *parent, u16 addr, u32 revision,
watchdog_set_drvdata(&data->wddev, data);
err = devm_watchdog_register_device(parent, &data->wddev);
- if (err) {
- pr_err("Registering watchdog chardev: %d\n", err);
+ if (err)
devm_kfree(parent, data);
- }
}
EXPORT_SYMBOL(sch56xx_watchdog_register);
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 364199b332c0..eec223d174c0 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -240,6 +240,8 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
const struct hwmon_channel_info **ptr_scmi_ci;
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
+ u32 sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
+ SCMI_SENS_CFG_SENSOR_ENABLE);
if (!handle)
return -ENODEV;
@@ -339,6 +341,13 @@ static int scmi_hwmon_probe(struct scmi_device *sdev)
if (!sensor)
continue;
+ ret = sensor_ops->config_set(ph, i, sensor_config);
+ if (ret) {
+ dev_err(dev, "Error enabling sensor %s. err=%d\n",
+ sensor->name, ret);
+ continue;
+ }
+
/*
* Warn on any misconfiguration related to thermal zones but
* bail out of probing only on memory errors.
diff --git a/drivers/hwmon/sfctemp.c b/drivers/hwmon/sfctemp.c
index fb1da93383d7..b78b2c099a12 100644
--- a/drivers/hwmon/sfctemp.c
+++ b/drivers/hwmon/sfctemp.c
@@ -10,7 +10,6 @@
#include <linux/hwmon.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -49,8 +48,6 @@
#define SFCTEMP_K1000 81100L
struct sfctemp {
- /* serialize access to hardware register and enabled below */
- struct mutex lock;
void __iomem *regs;
struct clk *clk_sense;
struct clk *clk_bus;
@@ -92,15 +89,14 @@ static void sfctemp_stop(struct sfctemp *sfctemp)
static int sfctemp_enable(struct sfctemp *sfctemp)
{
- int ret = 0;
+ int ret;
- mutex_lock(&sfctemp->lock);
if (sfctemp->enabled)
- goto done;
+ return 0;
ret = clk_prepare_enable(sfctemp->clk_bus);
if (ret)
- goto err;
+ return ret;
ret = reset_control_deassert(sfctemp->rst_bus);
if (ret)
goto err_disable_bus;
@@ -115,9 +111,7 @@ static int sfctemp_enable(struct sfctemp *sfctemp)
sfctemp_power_up(sfctemp);
sfctemp_run(sfctemp);
sfctemp->enabled = true;
-done:
- mutex_unlock(&sfctemp->lock);
- return ret;
+ return 0;
err_disable_sense:
clk_disable_unprepare(sfctemp->clk_sense);
@@ -125,16 +119,13 @@ err_assert_bus:
reset_control_assert(sfctemp->rst_bus);
err_disable_bus:
clk_disable_unprepare(sfctemp->clk_bus);
-err:
- mutex_unlock(&sfctemp->lock);
return ret;
}
static int sfctemp_disable(struct sfctemp *sfctemp)
{
- mutex_lock(&sfctemp->lock);
if (!sfctemp->enabled)
- goto done;
+ return 0;
sfctemp_stop(sfctemp);
sfctemp_power_down(sfctemp);
@@ -143,8 +134,6 @@ static int sfctemp_disable(struct sfctemp *sfctemp)
reset_control_assert(sfctemp->rst_bus);
clk_disable_unprepare(sfctemp->clk_bus);
sfctemp->enabled = false;
-done:
- mutex_unlock(&sfctemp->lock);
return 0;
}
@@ -155,22 +144,14 @@ static void sfctemp_disable_action(void *data)
static int sfctemp_convert(struct sfctemp *sfctemp, long *val)
{
- int ret;
-
- mutex_lock(&sfctemp->lock);
- if (!sfctemp->enabled) {
- ret = -ENODATA;
- goto out;
- }
+ if (!sfctemp->enabled)
+ return -ENODATA;
/* calculate temperature in milli Celcius */
*val = (long)((readl(sfctemp->regs) & SFCTEMP_DOUT_MSK) >> SFCTEMP_DOUT_POS)
* SFCTEMP_Y1000 / SFCTEMP_Z - SFCTEMP_K1000;
- ret = 0;
-out:
- mutex_unlock(&sfctemp->lock);
- return ret;
+ return 0;
}
static umode_t sfctemp_is_visible(const void *data, enum hwmon_sensor_types type,
@@ -263,7 +244,6 @@ static int sfctemp_probe(struct platform_device *pdev)
return -ENOMEM;
dev_set_drvdata(dev, sfctemp);
- mutex_init(&sfctemp->lock);
sfctemp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sfctemp->regs))
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 97327313529b..627d35070a42 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -275,13 +275,26 @@ static int sht21_probe(struct i2c_client *client)
/* Device ID table */
static const struct i2c_device_id sht21_id[] = {
+ { "sht20" },
{ "sht21" },
+ { "sht25" },
{ }
};
MODULE_DEVICE_TABLE(i2c, sht21_id);
+static const struct of_device_id sht21_of_match[] = {
+ { .compatible = "sensirion,sht20" },
+ { .compatible = "sensirion,sht21" },
+ { .compatible = "sensirion,sht25" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sht21_of_match);
+
static struct i2c_driver sht21_driver = {
- .driver.name = "sht21",
+ .driver = {
+ .name = "sht21",
+ .of_match_table = sht21_of_match,
+ },
.probe = sht21_probe,
.id_table = sht21_id,
};
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
index 557ad3e7752a..f36c0229328f 100644
--- a/drivers/hwmon/sht3x.c
+++ b/drivers/hwmon/sht3x.c
@@ -291,24 +291,26 @@ out:
return data;
}
-static int temp1_input_read(struct device *dev)
+static int temp1_input_read(struct device *dev, long *temp)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return data->temperature;
+ *temp = data->temperature;
+ return 0;
}
-static int humidity1_input_read(struct device *dev)
+static int humidity1_input_read(struct device *dev, long *humidity)
{
struct sht3x_data *data = sht3x_update_client(dev);
if (IS_ERR(data))
return PTR_ERR(data);
- return data->humidity;
+ *humidity = data->humidity;
+ return 0;
}
/*
@@ -706,6 +708,7 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
enum sht3x_limits index;
+ int ret;
switch (type) {
case hwmon_chip:
@@ -720,10 +723,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
- *val = temp1_input_read(dev);
- break;
+ return temp1_input_read(dev, val);
case hwmon_temp_alarm:
- *val = temp1_alarm_read(dev);
+ ret = temp1_alarm_read(dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
break;
case hwmon_temp_max:
index = limit_max;
@@ -748,10 +753,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_humidity:
switch (attr) {
case hwmon_humidity_input:
- *val = humidity1_input_read(dev);
- break;
+ return humidity1_input_read(dev, val);
case hwmon_humidity_alarm:
- *val = humidity1_alarm_read(dev);
+ ret = humidity1_alarm_read(dev);
+ if (ret < 0)
+ return ret;
+ *val = ret;
break;
case hwmon_humidity_max:
index = limit_max;
diff --git a/drivers/hwmon/sht4x.c b/drivers/hwmon/sht4x.c
index 6c9b776237c2..5abe1227e109 100644
--- a/drivers/hwmon/sht4x.c
+++ b/drivers/hwmon/sht4x.c
@@ -55,7 +55,6 @@ DECLARE_CRC8_TABLE(sht4x_crc8_table);
/**
* struct sht4x_data - All the data required to operate an SHT4X chip
* @client: the i2c client associated with the SHT4X
- * @lock: a mutex that is used to prevent parallel access to the i2c client
* @heating_complete: the time that the last heating finished
* @data_pending: true if and only if there are measurements to retrieve after heating
* @heater_power: the power at which the heater will be started
@@ -68,7 +67,6 @@ DECLARE_CRC8_TABLE(sht4x_crc8_table);
*/
struct sht4x_data {
struct i2c_client *client;
- struct mutex lock; /* atomic read data updates */
unsigned long heating_complete; /* in jiffies */
bool data_pending;
u32 heater_power; /* in milli-watts */
@@ -87,7 +85,7 @@ struct sht4x_data {
*/
static int sht4x_read_values(struct sht4x_data *data)
{
- int ret = 0;
+ int ret;
u16 t_ticks, rh_ticks;
unsigned long next_update;
struct i2c_client *client = data->client;
@@ -96,8 +94,6 @@ static int sht4x_read_values(struct sht4x_data *data)
u8 raw_data[SHT4X_RESPONSE_LENGTH];
unsigned long curr_jiffies;
- mutex_lock(&data->lock);
-
curr_jiffies = jiffies;
if (time_before(curr_jiffies, data->heating_complete))
msleep(jiffies_to_msecs(data->heating_complete - curr_jiffies));
@@ -110,11 +106,11 @@ static int sht4x_read_values(struct sht4x_data *data)
msecs_to_jiffies(data->update_interval);
if (data->valid && time_before_eq(jiffies, next_update))
- goto unlock;
+ return 0;
ret = i2c_master_send(client, cmd, SHT4X_CMD_LEN);
if (ret < 0)
- goto unlock;
+ return ret;
usleep_range(SHT4X_MEAS_DELAY_HPM, SHT4X_MEAS_DELAY_HPM + SHT4X_DELAY_EXTRA);
}
@@ -123,7 +119,7 @@ static int sht4x_read_values(struct sht4x_data *data)
if (ret != SHT4X_RESPONSE_LENGTH) {
if (ret >= 0)
ret = -ENODATA;
- goto unlock;
+ return ret;
}
t_ticks = raw_data[0] << 8 | raw_data[1];
@@ -132,26 +128,20 @@ static int sht4x_read_values(struct sht4x_data *data)
crc = crc8(sht4x_crc8_table, &raw_data[0], SHT4X_WORD_LEN, CRC8_INIT_VALUE);
if (crc != raw_data[2]) {
dev_err(&client->dev, "data integrity check failed\n");
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
crc = crc8(sht4x_crc8_table, &raw_data[3], SHT4X_WORD_LEN, CRC8_INIT_VALUE);
if (crc != raw_data[5]) {
dev_err(&client->dev, "data integrity check failed\n");
- ret = -EIO;
- goto unlock;
+ return -EIO;
}
data->temperature = ((21875 * (int32_t)t_ticks) >> 13) - 45000;
data->humidity = ((15625 * (int32_t)rh_ticks) >> 13) - 6000;
data->last_updated = jiffies;
data->valid = true;
- ret = 0;
-
-unlock:
- mutex_unlock(&data->lock);
- return ret;
+ return 0;
}
static ssize_t sht4x_interval_write(struct sht4x_data *data, long val)
@@ -287,22 +277,16 @@ static ssize_t heater_enable_store(struct device *dev,
heating_time_bound = 1100;
}
- mutex_lock(&data->lock);
-
- if (time_before(jiffies, data->heating_complete)) {
- ret = -EBUSY;
- goto unlock;
- }
+ if (time_before(jiffies, data->heating_complete))
+ return -EBUSY;
ret = i2c_master_send(data->client, &cmd, SHT4X_CMD_LEN);
if (ret < 0)
- goto unlock;
+ return ret;
data->heating_complete = jiffies + msecs_to_jiffies(heating_time_bound);
data->data_pending = true;
-unlock:
- mutex_unlock(&data->lock);
- return ret;
+ return 0;
}
static ssize_t heater_power_show(struct device *dev,
@@ -422,8 +406,6 @@ static int sht4x_probe(struct i2c_client *client)
data->heater_time = 1000;
data->heating_complete = jiffies;
- mutex_init(&data->lock);
-
crc8_populate_msb(sht4x_crc8_table, SHT4X_CRC8_POLYNOMIAL);
ret = i2c_master_send(client, cmd, SHT4X_CMD_LEN);
diff --git a/drivers/hwmon/sy7636a-hwmon.c b/drivers/hwmon/sy7636a-hwmon.c
index ed110884786b..d51daaf63d63 100644
--- a/drivers/hwmon/sy7636a-hwmon.c
+++ b/drivers/hwmon/sy7636a-hwmon.c
@@ -66,18 +66,13 @@ static const struct hwmon_chip_info sy7636a_chip_info = {
static int sy7636a_sensor_probe(struct platform_device *pdev)
{
struct regmap *regmap = dev_get_regmap(pdev->dev.parent, NULL);
- struct regulator *regulator;
struct device *hwmon_dev;
int err;
if (!regmap)
return -EPROBE_DEFER;
- regulator = devm_regulator_get(&pdev->dev, "vcom");
- if (IS_ERR(regulator))
- return PTR_ERR(regulator);
-
- err = regulator_enable(regulator);
+ err = devm_regulator_get_enable(&pdev->dev, "vcom");
if (err)
return err;
@@ -104,3 +99,4 @@ module_platform_driver(sy7636a_sensor_driver);
MODULE_DESCRIPTION("SY7636A sensor driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sy7636a-temperature");
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index a02daa496c9c..5b10c395a84d 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -10,9 +10,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/jiffies.h>
#include <linux/regmap.h>
@@ -53,6 +51,7 @@
#define CONVERSION_TIME_MS 35 /* in milli-seconds */
struct tmp102 {
+ const char *label;
struct regmap *regmap;
u16 config_orig;
unsigned long ready_time;
@@ -70,6 +69,16 @@ static inline u16 tmp102_mC_to_reg(int val)
return (val * 128) / 1000;
}
+static int tmp102_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct tmp102 *tmp102 = dev_get_drvdata(dev);
+
+ *str = tmp102->label;
+
+ return 0;
+}
+
static int tmp102_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *temp)
{
@@ -128,12 +137,18 @@ static int tmp102_write(struct device *dev, enum hwmon_sensor_types type,
static umode_t tmp102_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
+ const struct tmp102 *tmp102 = data;
+
if (type != hwmon_temp)
return 0;
switch (attr) {
case hwmon_temp_input:
return 0444;
+ case hwmon_temp_label:
+ if (tmp102->label)
+ return 0444;
+ return 0;
case hwmon_temp_max_hyst:
case hwmon_temp_max:
return 0644;
@@ -146,12 +161,13 @@ static const struct hwmon_channel_info * const tmp102_info[] = {
HWMON_CHANNEL_INFO(chip,
HWMON_C_REGISTER_TZ),
HWMON_CHANNEL_INFO(temp,
- HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST),
+ HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | HWMON_T_MAX_HYST),
NULL
};
static const struct hwmon_ops tmp102_hwmon_ops = {
.is_visible = tmp102_is_visible,
+ .read_string = tmp102_read_string,
.read = tmp102_read,
.write = tmp102_write,
};
@@ -213,6 +229,8 @@ static int tmp102_probe(struct i2c_client *client)
if (!tmp102)
return -ENOMEM;
+ of_property_read_string(dev->of_node, "label", &tmp102->label);
+
i2c_set_clientdata(client, tmp102);
tmp102->regmap = devm_regmap_init_i2c(client, &tmp102_regmap_config);
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index f271a03e05ae..221bba8a215d 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -14,11 +14,8 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/device.h>
-#include <linux/jiffies.h>
#include <linux/regmap.h>
#define TMP103_TEMP_REG 0x00
diff --git a/drivers/hwmon/tmp108.c b/drivers/hwmon/tmp108.c
index a971ff628435..60a237cbedbc 100644
--- a/drivers/hwmon/tmp108.c
+++ b/drivers/hwmon/tmp108.c
@@ -10,7 +10,6 @@
#include <linux/hwmon.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/i2c.h>
#include <linux/i3c/device.h>
#include <linux/init.h>
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index 02c5a3bb1071..fbaa34973694 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -24,7 +24,6 @@
#include <linux/hwmon.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -107,7 +106,6 @@ MODULE_DEVICE_TABLE(i2c, tmp401_id);
struct tmp401_data {
struct i2c_client *client;
struct regmap *regmap;
- struct mutex update_lock;
enum chips kind;
bool extended_range;
@@ -357,7 +355,6 @@ static int tmp401_temp_write(struct device *dev, u32 attr, int channel,
unsigned int regval;
int reg, ret, temp;
- mutex_lock(&data->update_lock);
switch (attr) {
case hwmon_temp_min:
case hwmon_temp_max:
@@ -386,7 +383,6 @@ static int tmp401_temp_write(struct device *dev, u32 attr, int channel,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&data->update_lock);
return ret;
}
@@ -436,7 +432,6 @@ static int tmp401_chip_write(struct device *dev, u32 attr, int channel, long val
struct regmap *regmap = data->regmap;
int err;
- mutex_lock(&data->update_lock);
switch (attr) {
case hwmon_chip_update_interval:
err = tmp401_set_convrate(regmap, val);
@@ -456,8 +451,6 @@ static int tmp401_chip_write(struct device *dev, u32 attr, int channel, long val
err = -EOPNOTSUPP;
break;
}
- mutex_unlock(&data->update_lock);
-
return err;
}
@@ -685,7 +678,6 @@ static int tmp401_probe(struct i2c_client *client)
return -ENOMEM;
data->client = client;
- mutex_init(&data->update_lock);
data->kind = (uintptr_t)i2c_get_match_data(client);
data->regmap = devm_regmap_init(dev, NULL, data, &tmp401_regmap_config);
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 9537727aad9a..2ea9d3e9553d 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -19,7 +19,6 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/sysfs.h>
@@ -99,7 +98,6 @@ struct tmp421_channel {
struct tmp421_data {
struct i2c_client *client;
- struct mutex update_lock;
u32 temp_config[MAX_CHANNELS + 1];
struct hwmon_channel_info temp_info;
const struct hwmon_channel_info *info[2];
@@ -130,38 +128,28 @@ static int tmp421_update_device(struct tmp421_data *data)
int ret = 0;
int i;
- mutex_lock(&data->update_lock);
-
if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
!data->valid) {
+ data->valid = false;
ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
if (ret < 0)
- goto exit;
+ return ret;
data->config = ret;
for (i = 0; i < data->channels; i++) {
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
if (ret < 0)
- goto exit;
+ return ret;
data->channel[i].temp = ret << 8;
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
if (ret < 0)
- goto exit;
+ return ret;
data->channel[i].temp |= ret;
}
data->last_updated = jiffies;
data->valid = true;
}
-
-exit:
- mutex_unlock(&data->update_lock);
-
- if (ret < 0) {
- data->valid = false;
- return ret;
- }
-
return 0;
}
@@ -262,7 +250,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_temp_fault:
case hwmon_temp_input:
- return 0444;
case hwmon_temp_label:
return 0444;
case hwmon_temp_enable:
@@ -381,7 +368,11 @@ static int tmp421_probe_child_from_dt(struct i2c_client *client,
return -EINVAL;
}
- of_property_read_string(child, "label", &data->channel[i].label);
+ err = of_property_read_string(child, "label", &data->channel[i].label);
+ if (err == -ENODATA || err == -EILSEQ) {
+ dev_err(dev, "invalid label property in %pOFn\n", child);
+ return err;
+ }
if (data->channel[i].label)
data->temp_config[i] |= HWMON_T_LABEL;
@@ -442,7 +433,6 @@ static int tmp421_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- mutex_init(&data->update_lock);
data->channels = (unsigned long)i2c_get_match_data(client);
data->client = client;
diff --git a/drivers/hwmon/tmp464.c b/drivers/hwmon/tmp464.c
index 0f629c6d7695..98f2576d94c6 100644
--- a/drivers/hwmon/tmp464.c
+++ b/drivers/hwmon/tmp464.c
@@ -13,7 +13,6 @@
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -92,7 +91,6 @@ struct tmp464_channel {
struct tmp464_data {
struct regmap *regmap;
- struct mutex update_lock;
int channels;
s16 config_orig;
u16 open_reg;
@@ -172,19 +170,16 @@ static int tmp464_temp_read(struct device *dev, u32 attr, int channel, long *val
* complete. That means we have to cache the value internally
* for one measurement cycle and report the cached value.
*/
- mutex_lock(&data->update_lock);
if (!data->valid || time_after(jiffies, data->last_updated +
msecs_to_jiffies(data->update_interval))) {
err = regmap_read(regmap, TMP464_REMOTE_OPEN_REG, &regval);
if (err < 0)
- goto unlock;
+ break;
data->open_reg = regval;
data->last_updated = jiffies;
data->valid = true;
}
*val = !!(data->open_reg & BIT(channel + 7));
-unlock:
- mutex_unlock(&data->update_lock);
break;
case hwmon_temp_max_hyst:
regs[0] = TMP464_THERM_LIMIT[channel];
@@ -345,8 +340,6 @@ static int tmp464_write(struct device *dev, enum hwmon_sensor_types type,
struct tmp464_data *data = dev_get_drvdata(dev);
int err;
- mutex_lock(&data->update_lock);
-
switch (type) {
case hwmon_chip:
err = tmp464_chip_write(data, attr, channel, val);
@@ -359,8 +352,6 @@ static int tmp464_write(struct device *dev, enum hwmon_sensor_types type,
break;
}
- mutex_unlock(&data->update_lock);
-
return err;
}
@@ -658,8 +649,6 @@ static int tmp464_probe(struct i2c_client *client)
if (!data)
return -ENOMEM;
- mutex_init(&data->update_lock);
-
data->channels = (int)(unsigned long)i2c_get_match_data(client);
data->regmap = devm_regmap_init_i2c(client, &tmp464_regmap_config);
diff --git a/drivers/hwmon/tsc1641.c b/drivers/hwmon/tsc1641.c
new file mode 100644
index 000000000000..2b5d34bab146
--- /dev/null
+++ b/drivers/hwmon/tsc1641.c
@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for ST Microelectronics TSC1641 I2C power monitor
+ *
+ * 60 V, 16-bit high-precision power monitor with I2C and MIPI I3C interface
+ * Datasheet: https://www.st.com/resource/en/datasheet/tsc1641.pdf
+ *
+ * Copyright (C) 2025 Igor Reznichenko <igor@reznichenko.net>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/sysfs.h>
+#include <linux/util_macros.h>
+
+/* I2C registers */
+#define TSC1641_CONFIG 0x00
+#define TSC1641_SHUNT_VOLTAGE 0x01
+#define TSC1641_LOAD_VOLTAGE 0x02
+#define TSC1641_POWER 0x03
+#define TSC1641_CURRENT 0x04
+#define TSC1641_TEMP 0x05
+#define TSC1641_MASK 0x06
+#define TSC1641_FLAG 0x07
+#define TSC1641_RSHUNT 0x08 /* Shunt resistance */
+#define TSC1641_SOL 0x09
+#define TSC1641_SUL 0x0A
+#define TSC1641_LOL 0x0B
+#define TSC1641_LUL 0x0C
+#define TSC1641_POL 0x0D
+#define TSC1641_TOL 0x0E
+#define TSC1641_MANUF_ID 0xFE /* 0x0006 */
+#define TSC1641_DIE_ID 0xFF /* 0x1000 */
+#define TSC1641_MAX_REG 0xFF
+
+#define TSC1641_RSHUNT_DEFAULT 1000 /* 1mOhm */
+#define TSC1641_CONFIG_DEFAULT 0x003F /* Default mode and temperature sensor */
+#define TSC1641_MASK_DEFAULT 0xFC00 /* Unmask all alerts */
+
+/* Bit mask for conversion time in the configuration register */
+#define TSC1641_CONV_TIME_MASK GENMASK(7, 4)
+
+#define TSC1641_CONV_TIME_DEFAULT 1024
+#define TSC1641_MIN_UPDATE_INTERVAL 1024
+
+/* LSB value of different registers */
+#define TSC1641_VLOAD_LSB_MVOLT 2
+#define TSC1641_POWER_LSB_UWATT 25000
+#define TSC1641_VSHUNT_LSB_NVOLT 2500 /* Use nanovolts to make it integer */
+#define TSC1641_RSHUNT_LSB_UOHM 10
+#define TSC1641_TEMP_LSB_MDEGC 500
+
+/* Limits based on datasheet */
+#define TSC1641_RSHUNT_MIN_UOHM 100
+#define TSC1641_RSHUNT_MAX_UOHM 655350
+#define TSC1641_CURR_ABS_MAX_MAMP 819200 /* Max current at 100uOhm*/
+
+#define TSC1641_ALERT_POL_MASK BIT(1)
+#define TSC1641_ALERT_LATCH_EN_MASK BIT(0)
+
+/* Flags indicating alerts in TSC1641_FLAG register*/
+#define TSC1641_SAT_FLAG BIT(13)
+#define TSC1641_SHUNT_OV_FLAG BIT(6)
+#define TSC1641_SHUNT_UV_FLAG BIT(5)
+#define TSC1641_LOAD_OV_FLAG BIT(4)
+#define TSC1641_LOAD_UV_FLAG BIT(3)
+#define TSC1641_POWER_OVER_FLAG BIT(2)
+#define TSC1641_TEMP_OVER_FLAG BIT(1)
+
+static bool tsc1641_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TSC1641_CONFIG:
+ case TSC1641_MASK:
+ case TSC1641_RSHUNT:
+ case TSC1641_SOL:
+ case TSC1641_SUL:
+ case TSC1641_LOL:
+ case TSC1641_LUL:
+ case TSC1641_POL:
+ case TSC1641_TOL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tsc1641_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TSC1641_SHUNT_VOLTAGE:
+ case TSC1641_LOAD_VOLTAGE:
+ case TSC1641_POWER:
+ case TSC1641_CURRENT:
+ case TSC1641_TEMP:
+ case TSC1641_FLAG:
+ case TSC1641_MANUF_ID:
+ case TSC1641_DIE_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tsc1641_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .use_single_write = true,
+ .use_single_read = true,
+ .max_register = TSC1641_MAX_REG,
+ .cache_type = REGCACHE_MAPLE,
+ .volatile_reg = tsc1641_volatile_reg,
+ .writeable_reg = tsc1641_writeable_reg,
+};
+
+struct tsc1641_data {
+ long rshunt_uohm;
+ long current_lsb_ua;
+ struct regmap *regmap;
+};
+
+/*
+ * Upper limit due to chip 16-bit shunt register, lower limit to
+ * prevent current and power registers overflow
+ */
+static inline int tsc1641_validate_shunt(u32 val)
+{
+ if (val < TSC1641_RSHUNT_MIN_UOHM || val > TSC1641_RSHUNT_MAX_UOHM)
+ return -EINVAL;
+ return 0;
+}
+
+static int tsc1641_set_shunt(struct tsc1641_data *data, u32 val)
+{
+ struct regmap *regmap = data->regmap;
+ long rshunt_reg;
+
+ /* RSHUNT register LSB is 10uOhm so need to divide further */
+ rshunt_reg = DIV_ROUND_CLOSEST(val, TSC1641_RSHUNT_LSB_UOHM);
+ /*
+ * Clamp value to the nearest multiple of TSC1641_RSHUNT_LSB_UOHM
+ * in case shunt value provided was not a multiple
+ */
+ data->rshunt_uohm = rshunt_reg * TSC1641_RSHUNT_LSB_UOHM;
+ data->current_lsb_ua = DIV_ROUND_CLOSEST(TSC1641_VSHUNT_LSB_NVOLT * 1000,
+ data->rshunt_uohm);
+
+ return regmap_write(regmap, TSC1641_RSHUNT, rshunt_reg);
+}
+
+/*
+ * Conversion times in uS, value in CONFIG[CT3:CT0] corresponds to index in this array
+ * See "Table 14. CT3 to CT0: conversion time" in:
+ * https://www.st.com/resource/en/datasheet/tsc1641.pdf
+ */
+static const int tsc1641_conv_times[] = { 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768 };
+
+static int tsc1641_reg_to_upd_interval(u16 config)
+{
+ int idx = FIELD_GET(TSC1641_CONV_TIME_MASK, config);
+
+ idx = clamp_val(idx, 0, ARRAY_SIZE(tsc1641_conv_times) - 1);
+ int conv_time = tsc1641_conv_times[idx];
+
+ /* Don't support sub-millisecond update interval as it's not supported in hwmon */
+ conv_time = max(conv_time, TSC1641_MIN_UPDATE_INTERVAL);
+ /* Return nearest value in milliseconds */
+ return DIV_ROUND_CLOSEST(conv_time, 1000);
+}
+
+static u16 tsc1641_upd_interval_to_reg(long interval)
+{
+ /* Supported interval is 1ms - 33ms */
+ interval = clamp_val(interval, 1, 33);
+
+ int conv = interval * 1000;
+ int conv_bits = find_closest(conv, tsc1641_conv_times,
+ ARRAY_SIZE(tsc1641_conv_times));
+
+ return FIELD_PREP(TSC1641_CONV_TIME_MASK, conv_bits);
+}
+
+static int tsc1641_chip_write(struct device *dev, u32 attr, long val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return regmap_update_bits(data->regmap, TSC1641_CONFIG,
+ TSC1641_CONV_TIME_MASK,
+ tsc1641_upd_interval_to_reg(val));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsc1641_chip_read(struct device *dev, u32 attr, long *val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ u32 regval;
+ int ret;
+
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ ret = regmap_read(data->regmap, TSC1641_CONFIG, &regval);
+ if (ret)
+ return ret;
+
+ *val = tsc1641_reg_to_upd_interval(regval);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsc1641_flag_read(struct regmap *regmap, u32 flag, long *val)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read_bypassed(regmap, TSC1641_FLAG, &regval);
+ if (ret)
+ return ret;
+
+ *val = !!(regval & flag);
+ return 0;
+}
+
+static int tsc1641_in_read(struct device *dev, u32 attr, long *val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int ret, reg;
+ long sat_flag;
+
+ switch (attr) {
+ case hwmon_in_input:
+ reg = TSC1641_LOAD_VOLTAGE;
+ break;
+ case hwmon_in_min:
+ reg = TSC1641_LUL;
+ break;
+ case hwmon_in_max:
+ reg = TSC1641_LOL;
+ break;
+ case hwmon_in_min_alarm:
+ return tsc1641_flag_read(regmap, TSC1641_LOAD_UV_FLAG, val);
+ case hwmon_in_max_alarm:
+ return tsc1641_flag_read(regmap, TSC1641_LOAD_OV_FLAG, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ /* Check if load voltage is out of range */
+ if (reg == TSC1641_LOAD_VOLTAGE) {
+ /* Register is 15-bit max */
+ if (regval & 0x8000)
+ return -ENODATA;
+
+ ret = tsc1641_flag_read(regmap, TSC1641_SAT_FLAG, &sat_flag);
+ if (ret)
+ return ret;
+ /* Out of range conditions per datasheet */
+ if (sat_flag && (regval == 0x7FFF || !regval))
+ return -ENODATA;
+ }
+
+ *val = regval * TSC1641_VLOAD_LSB_MVOLT;
+ return 0;
+}
+
+/* Chip supports bidirectional (positive or negative) current */
+static int tsc1641_curr_read(struct device *dev, u32 attr, long *val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ int regval;
+ int ret, reg;
+ long sat_flag;
+
+ /* Current limits are the shunt under/over voltage limits */
+ switch (attr) {
+ case hwmon_curr_input:
+ reg = TSC1641_CURRENT;
+ break;
+ case hwmon_curr_min:
+ reg = TSC1641_SUL;
+ break;
+ case hwmon_curr_max:
+ reg = TSC1641_SOL;
+ break;
+ case hwmon_curr_min_alarm:
+ return tsc1641_flag_read(regmap, TSC1641_SHUNT_UV_FLAG, val);
+ case hwmon_curr_max_alarm:
+ return tsc1641_flag_read(regmap, TSC1641_SHUNT_OV_FLAG, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ /*
+ * Current uses shunt voltage, so check if it's out of range.
+ * We report current register in sysfs to stay consistent with internal
+ * power calculations which use current register values
+ */
+ if (reg == TSC1641_CURRENT) {
+ ret = regmap_read(regmap, TSC1641_SHUNT_VOLTAGE, &regval);
+ if (ret)
+ return ret;
+
+ ret = tsc1641_flag_read(regmap, TSC1641_SAT_FLAG, &sat_flag);
+ if (ret)
+ return ret;
+
+ if (sat_flag && (regval == 0x7FFF || regval == 0x8000))
+ return -ENODATA;
+ }
+
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ /* Current in milliamps, signed */
+ *val = DIV_ROUND_CLOSEST((s16)regval * data->current_lsb_ua, 1000);
+ return 0;
+}
+
+static int tsc1641_power_read(struct device *dev, u32 attr, long *val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int ret, reg;
+
+ switch (attr) {
+ case hwmon_power_input:
+ reg = TSC1641_POWER;
+ break;
+ case hwmon_power_max:
+ reg = TSC1641_POL;
+ break;
+ case hwmon_power_max_alarm:
+ return tsc1641_flag_read(regmap, TSC1641_POWER_OVER_FLAG, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ *val = regval * TSC1641_POWER_LSB_UWATT;
+ return 0;
+}
+
+static int tsc1641_temp_read(struct device *dev, u32 attr, long *val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int ret, reg;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = TSC1641_TEMP;
+ break;
+ case hwmon_temp_max:
+ reg = TSC1641_TOL;
+ break;
+ case hwmon_temp_max_alarm:
+ return tsc1641_flag_read(regmap, TSC1641_TEMP_OVER_FLAG, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = regmap_read(regmap, reg, &regval);
+ if (ret)
+ return ret;
+
+ /* 0x8000 means that TEMP measurement not enabled */
+ if (reg == TSC1641_TEMP && regval == 0x8000)
+ return -ENODATA;
+
+ /* Both temperature and limit registers are signed */
+ *val = (s16)regval * TSC1641_TEMP_LSB_MDEGC;
+ return 0;
+}
+
+static int tsc1641_in_write(struct device *dev, u32 attr, long val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+ int reg;
+
+ switch (attr) {
+ case hwmon_in_min:
+ reg = TSC1641_LUL;
+ break;
+ case hwmon_in_max:
+ reg = TSC1641_LOL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ /* Clamp to full register range */
+ val = clamp_val(val, 0, TSC1641_VLOAD_LSB_MVOLT * USHRT_MAX);
+ regval = DIV_ROUND_CLOSEST(val, TSC1641_VLOAD_LSB_MVOLT);
+
+ return regmap_write(regmap, reg, regval);
+}
+
+static int tsc1641_curr_write(struct device *dev, u32 attr, long val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ int reg, regval;
+
+ switch (attr) {
+ case hwmon_curr_min:
+ reg = TSC1641_SUL;
+ break;
+ case hwmon_curr_max:
+ reg = TSC1641_SOL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ /* Clamp to prevent over/underflow below */
+ val = clamp_val(val, -TSC1641_CURR_ABS_MAX_MAMP, TSC1641_CURR_ABS_MAX_MAMP);
+ /* Convert val in milliamps to register */
+ regval = DIV_ROUND_CLOSEST(val * 1000, data->current_lsb_ua);
+ /*
+ * Prevent signed 16-bit overflow.
+ * Integer arithmetic and shunt scaling can quantize values near 0x7FFF/0x8000,
+ * so reading and writing back may not preserve the exact original register value.
+ */
+ regval = clamp_val(regval, SHRT_MIN, SHRT_MAX);
+ /* SUL and SOL registers are signed */
+ return regmap_write(regmap, reg, regval & 0xFFFF);
+}
+
+static int tsc1641_power_write(struct device *dev, u32 attr, long val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ unsigned int regval;
+
+ switch (attr) {
+ case hwmon_power_max:
+ /* Clamp to full register range */
+ val = clamp_val(val, 0, TSC1641_POWER_LSB_UWATT * USHRT_MAX);
+ regval = DIV_ROUND_CLOSEST(val, TSC1641_POWER_LSB_UWATT);
+ return regmap_write(regmap, TSC1641_POL, regval);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsc1641_temp_write(struct device *dev, u32 attr, long val)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ int regval;
+
+ switch (attr) {
+ case hwmon_temp_max:
+ /* Clamp to full register range */
+ val = clamp_val(val, TSC1641_TEMP_LSB_MDEGC * SHRT_MIN,
+ TSC1641_TEMP_LSB_MDEGC * SHRT_MAX);
+ regval = DIV_ROUND_CLOSEST(val, TSC1641_TEMP_LSB_MDEGC);
+ /* TOL register is signed */
+ return regmap_write(regmap, TSC1641_TOL, regval & 0xFFFF);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t tsc1641_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_chip:
+ switch (attr) {
+ case hwmon_chip_update_interval:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ return 0444;
+ case hwmon_in_min:
+ case hwmon_in_max:
+ return 0644;
+ case hwmon_in_min_alarm:
+ case hwmon_in_max_alarm:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_curr:
+ switch (attr) {
+ case hwmon_curr_input:
+ return 0444;
+ case hwmon_curr_min:
+ case hwmon_curr_max:
+ return 0644;
+ case hwmon_curr_min_alarm:
+ case hwmon_curr_max_alarm:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ return 0444;
+ case hwmon_power_max:
+ return 0644;
+ case hwmon_power_max_alarm:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_max:
+ return 0644;
+ case hwmon_temp_max_alarm:
+ return 0444;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int tsc1641_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return tsc1641_chip_read(dev, attr, val);
+ case hwmon_in:
+ return tsc1641_in_read(dev, attr, val);
+ case hwmon_curr:
+ return tsc1641_curr_read(dev, attr, val);
+ case hwmon_power:
+ return tsc1641_power_read(dev, attr, val);
+ case hwmon_temp:
+ return tsc1641_temp_read(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsc1641_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_chip:
+ return tsc1641_chip_write(dev, attr, val);
+ case hwmon_in:
+ return tsc1641_in_write(dev, attr, val);
+ case hwmon_curr:
+ return tsc1641_curr_write(dev, attr, val);
+ case hwmon_power:
+ return tsc1641_power_write(dev, attr, val);
+ case hwmon_temp:
+ return tsc1641_temp_write(dev, attr, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info * const tsc1641_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_UPDATE_INTERVAL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MAX_ALARM |
+ HWMON_I_MIN | HWMON_I_MIN_ALARM),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_MAX_ALARM |
+ HWMON_C_MIN | HWMON_C_MIN_ALARM),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_MAX | HWMON_P_MAX_ALARM),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_ALARM),
+ NULL
+};
+
+static ssize_t shunt_resistor_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%li\n", data->rshunt_uohm);
+}
+
+static ssize_t shunt_resistor_store(struct device *dev,
+ struct device_attribute *da,
+ const char *buf, size_t count)
+{
+ struct tsc1641_data *data = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = tsc1641_validate_shunt(val);
+ if (ret < 0)
+ return ret;
+
+ ret = tsc1641_set_shunt(data, val);
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
+static const struct hwmon_ops tsc1641_hwmon_ops = {
+ .is_visible = tsc1641_is_visible,
+ .read = tsc1641_read,
+ .write = tsc1641_write,
+};
+
+static const struct hwmon_chip_info tsc1641_chip_info = {
+ .ops = &tsc1641_hwmon_ops,
+ .info = tsc1641_info,
+};
+
+static DEVICE_ATTR_RW(shunt_resistor);
+
+/* Shunt resistor value is exposed via sysfs attribute */
+static struct attribute *tsc1641_attrs[] = {
+ &dev_attr_shunt_resistor.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(tsc1641);
+
+static int tsc1641_init(struct device *dev, struct tsc1641_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ bool active_high;
+ u32 shunt;
+ int ret;
+
+ if (device_property_read_u32(dev, "shunt-resistor-micro-ohms", &shunt) < 0)
+ shunt = TSC1641_RSHUNT_DEFAULT;
+
+ if (tsc1641_validate_shunt(shunt) < 0) {
+ dev_err(dev, "invalid shunt resistor value %u\n", shunt);
+ return -EINVAL;
+ }
+
+ ret = tsc1641_set_shunt(data, shunt);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(regmap, TSC1641_CONFIG, TSC1641_CONFIG_DEFAULT);
+ if (ret < 0)
+ return ret;
+
+ active_high = device_property_read_bool(dev, "st,alert-polarity-active-high");
+
+ return regmap_write(regmap, TSC1641_MASK, TSC1641_MASK_DEFAULT |
+ FIELD_PREP(TSC1641_ALERT_POL_MASK, active_high));
+}
+
+static int tsc1641_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct tsc1641_data *data;
+ struct device *hwmon_dev;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = devm_regmap_init_i2c(client, &tsc1641_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "failed to allocate register map\n");
+
+ ret = tsc1641_init(dev, data);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to configure device\n");
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data, &tsc1641_chip_info, tsc1641_groups);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
+ client->name, data->rshunt_uohm);
+
+ return 0;
+}
+
+static const struct i2c_device_id tsc1641_id[] = {
+ { "tsc1641", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tsc1641_id);
+
+static const struct of_device_id __maybe_unused tsc1641_of_match[] = {
+ { .compatible = "st,tsc1641" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tsc1641_of_match);
+
+static struct i2c_driver tsc1641_driver = {
+ .driver = {
+ .name = "tsc1641",
+ .of_match_table = of_match_ptr(tsc1641_of_match),
+ },
+ .probe = tsc1641_probe,
+ .id_table = tsc1641_id,
+};
+
+module_i2c_driver(tsc1641_driver);
+
+MODULE_AUTHOR("Igor Reznichenko <igor@reznichenko.net>");
+MODULE_DESCRIPTION("tsc1641 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 386edea6b69e..1e52cabd6e24 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -142,9 +142,15 @@ struct vt1211_data {
* in5 (ix = 5) is special. It's the internal 3.3V so it's scaled in the
* driver according to the VT1211 BIOS porting guide
*/
-#define IN_FROM_REG(ix, reg) ((reg) < 3 ? 0 : (ix) == 5 ? \
- (((reg) - 3) * 15882 + 479) / 958 : \
- (((reg) - 3) * 10000 + 479) / 958)
+static int in_from_reg(int ix, int reg)
+{
+ if (reg < 3)
+ return 0;
+ if (ix == 5)
+ return ((reg - 3) * 15882 + 479) / 958;
+ return ((reg - 3) * 10000 + 479) / 958;
+}
+
#define IN_TO_REG(ix, val) (clamp_val((ix) == 5 ? \
((val) * 958 + 7941) / 15882 + 3 : \
((val) * 958 + 5000) / 10000 + 3, 0, 255))
@@ -156,10 +162,15 @@ struct vt1211_data {
* temp3-7 are thermistor based so the driver returns the voltage measured at
* the pin (range 0V - 2.2V).
*/
-#define TEMP_FROM_REG(ix, reg) ((ix) == 0 ? (reg) * 1000 : \
- (ix) == 1 ? (reg) < 51 ? 0 : \
- ((reg) - 51) * 1000 : \
- ((253 - (reg)) * 2200 + 105) / 210)
+static int temp_from_reg(int ix, int reg)
+{
+ if (ix == 0)
+ return reg * 1000;
+ if (ix == 1)
+ return reg < 51 ? 0 : (reg - 51) * 1000;
+ return ((253 - reg) * 2200 + 105) / 210;
+}
+
#define TEMP_TO_REG(ix, val) clamp_val( \
((ix) == 0 ? ((val) + 500) / 1000 : \
(ix) == 1 ? ((val) + 500) / 1000 + 51 : \
@@ -167,8 +178,14 @@ struct vt1211_data {
#define DIV_FROM_REG(reg) (1 << (reg))
-#define RPM_FROM_REG(reg, div) (((reg) == 0) || ((reg) == 255) ? 0 : \
- 1310720 / (reg) / DIV_FROM_REG(div))
+static int rpm_from_reg(int reg, int div)
+{
+ if (reg == 0 || reg == 255)
+ return 0;
+
+ return 1310720 / reg / DIV_FROM_REG(div);
+}
+
#define RPM_TO_REG(val, div) ((val) == 0 ? 255 : \
clamp_val((1310720 / (val) / \
DIV_FROM_REG(div)), 1, 254))
@@ -343,13 +360,13 @@ static ssize_t show_in(struct device *dev, struct device_attribute *attr,
switch (fn) {
case SHOW_IN_INPUT:
- res = IN_FROM_REG(ix, data->in[ix]);
+ res = in_from_reg(ix, data->in[ix]);
break;
case SHOW_SET_IN_MIN:
- res = IN_FROM_REG(ix, data->in_min[ix]);
+ res = in_from_reg(ix, data->in_min[ix]);
break;
case SHOW_SET_IN_MAX:
- res = IN_FROM_REG(ix, data->in_max[ix]);
+ res = in_from_reg(ix, data->in_max[ix]);
break;
case SHOW_IN_ALARM:
res = (data->alarms >> bitalarmin[ix]) & 1;
@@ -417,13 +434,13 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
switch (fn) {
case SHOW_TEMP_INPUT:
- res = TEMP_FROM_REG(ix, data->temp[ix]);
+ res = temp_from_reg(ix, data->temp[ix]);
break;
case SHOW_SET_TEMP_MAX:
- res = TEMP_FROM_REG(ix, data->temp_max[ix]);
+ res = temp_from_reg(ix, data->temp_max[ix]);
break;
case SHOW_SET_TEMP_MAX_HYST:
- res = TEMP_FROM_REG(ix, data->temp_hyst[ix]);
+ res = temp_from_reg(ix, data->temp_hyst[ix]);
break;
case SHOW_TEMP_ALARM:
res = (data->alarms >> bitalarmtemp[ix]) & 1;
@@ -493,10 +510,10 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
switch (fn) {
case SHOW_FAN_INPUT:
- res = RPM_FROM_REG(data->fan[ix], data->fan_div[ix]);
+ res = rpm_from_reg(data->fan[ix], data->fan_div[ix]);
break;
case SHOW_SET_FAN_MIN:
- res = RPM_FROM_REG(data->fan_min[ix], data->fan_div[ix]);
+ res = rpm_from_reg(data->fan_min[ix], data->fan_div[ix]);
break;
case SHOW_SET_FAN_DIV:
res = DIV_FROM_REG(data->fan_div[ix]);
@@ -751,7 +768,7 @@ static ssize_t show_pwm_auto_point_temp(struct device *dev,
int ix = sensor_attr_2->index;
int ap = sensor_attr_2->nr;
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->pwm_ctl[ix] & 7,
+ return sprintf(buf, "%d\n", temp_from_reg(data->pwm_ctl[ix] & 7,
data->pwm_auto_temp[ap]));
}
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 3bf27c21845b..5757a0979f3f 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -138,7 +138,12 @@ static inline u8 FAN_TO_REG(long rpm, int div)
return clamp_val(1310720 / (rpm * div), 1, 255);
}
-#define FAN_FROM_REG(val, div) ((val) == 0 ? 0 : 1310720 / ((val) * (div)))
+static int fan_from_reg(int val, int div)
+{
+ if (val == 0)
+ return 0;
+ return 1310720 / (val * div);
+}
struct vt8231_data {
unsigned short addr;
@@ -561,7 +566,7 @@ static ssize_t fan_show(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct vt8231_data *data = vt8231_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
+ return sprintf(buf, "%d\n", fan_from_reg(data->fan[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
@@ -571,7 +576,7 @@ static ssize_t fan_min_show(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct vt8231_data *data = vt8231_update_device(dev);
- return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
+ return sprintf(buf, "%d\n", fan_from_reg(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
@@ -613,9 +618,8 @@ static ssize_t fan_div_store(struct device *dev,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
unsigned long val;
int nr = sensor_attr->index;
- int old = vt8231_read_value(data, VT8231_REG_FANDIV);
- long min = FAN_FROM_REG(data->fan_min[nr],
- DIV_FROM_REG(data->fan_div[nr]));
+ int old;
+ long min;
int err;
err = kstrtoul(buf, 10, &val);
@@ -623,6 +627,8 @@ static ssize_t fan_div_store(struct device *dev,
return err;
mutex_lock(&data->update_lock);
+ old = vt8231_read_value(data, VT8231_REG_FANDIV);
+ min = fan_from_reg(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
switch (val) {
case 1:
data->fan_div[nr] = 0;
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 076200ed2ec9..f664c2152a6d 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -1850,10 +1850,12 @@ w83781d_isa_found(unsigned short address)
}
}
-#define REALLY_SLOW_IO
/*
* We need the timeouts for at least some W83781D-like
* chips. But only if we read 'undefined' registers.
+ * There used to be a "#define REALLY_SLOW_IO" to enforce that, but
+ * this has been without any effect since more than a decade, so it
+ * has been dropped.
*/
val = inb_p(address + 1);
if (inb_p(address + 2) != val
@@ -1862,7 +1864,6 @@ w83781d_isa_found(unsigned short address)
pr_debug("Detection failed at step %d\n", 1);
goto release;
}
-#undef REALLY_SLOW_IO
/*
* We should be able to change the 7 LSB of the address port. The
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index ace854b370a0..996e36951f9d 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -218,9 +218,14 @@ static u8 fan_to_reg(long rpm, int div)
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
-#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
- ((val) == 255 ? 0 : \
- 1350000 / ((val) * (div))))
+static int fan_from_reg(int val, int div)
+{
+ if (val == 0)
+ return -1;
+ if (val == 255)
+ return 0;
+ return 1350000 / (val * div);
+}
/* for temp1 which is 8-bit resolution, LSB = 1 degree Celsius */
#define TEMP1_FROM_REG(val) ((val) * 1000)
@@ -521,7 +526,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
struct w83791d_data *data = w83791d_update_device(dev); \
int nr = sensor_attr->index; \
return sprintf(buf, "%d\n", \
- FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
+ fan_from_reg(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
}
show_fan_reg(fan);
@@ -585,10 +590,10 @@ static ssize_t store_fan_div(struct device *dev, struct device_attribute *attr,
if (err)
return err;
+ mutex_lock(&data->update_lock);
/* Save fan_min */
- min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
+ min = fan_from_reg(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
- mutex_lock(&data->update_lock);
data->fan_div[nr] = div_to_reg(nr, val);
switch (nr) {
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 9b81bd406e05..1d9109ca1585 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -76,15 +76,25 @@ FAN_TO_REG(long rpm, int div)
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
-#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
- ((val) == 255 ? 0 : \
- 1350000 / ((val) * (div))))
+static int fan_from_reg(int val, int div)
+{
+ if (val == 0)
+ return -1;
+ if (val == 255)
+ return 0;
+ return 1350000 / (val * div);
+}
/* for temp */
#define TEMP_TO_REG(val) (clamp_val(((val) < 0 ? (val) + 0x100 * 1000 \
: (val)) / 1000, 0, 0xff))
-#define TEMP_FROM_REG(val) (((val) & 0x80 ? \
- (val) - 0x100 : (val)) * 1000)
+
+static int temp_from_reg(int val)
+{
+ if (val & 0x80)
+ return (val - 0x100) * 1000;
+ return val * 1000;
+}
/*
* The analog voltage inputs have 8mV LSB. Since the sysfs output is
@@ -280,7 +290,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
int nr = to_sensor_dev_attr(attr)->index; \
struct w83l786ng_data *data = w83l786ng_update_device(dev); \
return sprintf(buf, "%d\n", \
- FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
+ fan_from_reg(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
}
show_fan_reg(fan);
@@ -347,7 +357,7 @@ store_fan_div(struct device *dev, struct device_attribute *attr,
/* Save fan_min */
mutex_lock(&data->update_lock);
- min = FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
+ min = fan_from_reg(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr]));
data->fan_div[nr] = DIV_TO_REG(val);
@@ -409,7 +419,7 @@ show_temp(struct device *dev, struct device_attribute *attr, char *buf)
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83l786ng_data *data = w83l786ng_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr][index]));
+ return sprintf(buf, "%d\n", temp_from_reg(data->temp[nr][index]));
}
static ssize_t
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index f064e3d172b3..6a4239ebb582 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -268,4 +268,16 @@ config CORESIGHT_KUNIT_TESTS
Enable Coresight unit tests. Only useful for development and not
intended for production.
+config CORESIGHT_TNOC
+ tristate "Coresight Trace Network On Chip driver"
+ help
+ This driver provides support for Trace Network On Chip (TNOC) component.
+ TNOC is an interconnect used to collect traces from various subsystems
+ and transport to a coresight trace sink. It sits in the different
+ tiles of SOC and aggregates the trace local to the tile and transports
+ it another tile or to coresight trace sink eventually.
+
+ To compile this driver as a module, choose M here: the module will be
+ called coresight-tnoc.
+
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 4e7cc3c5bf99..ab16d06783a5 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
coresight-replicator.o
+obj-$(CONFIG_CORESIGHT_TNOC) += coresight-tnoc.o
obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o
coresight-etm3x-y := coresight-etm3x-core.o coresight-etm-cp14.o \
coresight-etm3x-sysfs.o
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index 5058432233da..69b36bae97ab 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -397,7 +397,7 @@ static int catu_wait_for_ready(struct catu_drvdata *drvdata)
}
static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode,
- void *data)
+ struct coresight_path *path)
{
int rc;
u32 control, mode;
@@ -425,7 +425,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode,
etrdev = coresight_find_input_type(
csdev->pdata, CORESIGHT_DEV_TYPE_SINK, etr_subtype);
if (etrdev) {
- etr_buf = tmc_etr_get_buffer(etrdev, cs_mode, data);
+ etr_buf = tmc_etr_get_buffer(etrdev, cs_mode, path);
if (IS_ERR(etr_buf))
return PTR_ERR(etr_buf);
}
@@ -455,7 +455,7 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode,
}
static int catu_enable(struct coresight_device *csdev, enum cs_mode mode,
- void *data)
+ struct coresight_path *path)
{
int rc = 0;
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
@@ -463,7 +463,7 @@ static int catu_enable(struct coresight_device *csdev, enum cs_mode mode,
guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock);
if (csdev->refcnt == 0) {
CS_UNLOCK(catu_drvdata->base);
- rc = catu_enable_hw(catu_drvdata, mode, data);
+ rc = catu_enable_hw(catu_drvdata, mode, path);
CS_LOCK(catu_drvdata->base);
}
if (!rc)
@@ -488,7 +488,7 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
return rc;
}
-static int catu_disable(struct coresight_device *csdev, void *__unused)
+static int catu_disable(struct coresight_device *csdev, struct coresight_path *path)
{
int rc = 0;
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
@@ -515,11 +515,21 @@ static int __catu_probe(struct device *dev, struct resource *res)
{
int ret = 0;
u32 dma_mask;
- struct catu_drvdata *drvdata = dev_get_drvdata(dev);
+ struct catu_drvdata *drvdata;
struct coresight_desc catu_desc;
struct coresight_platform_data *pdata = NULL;
void __iomem *base;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, drvdata);
+
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
+
catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
if (!catu_desc.name)
return -ENOMEM;
@@ -576,14 +586,8 @@ out:
static int catu_probe(struct amba_device *adev, const struct amba_id *id)
{
- struct catu_drvdata *drvdata;
int ret;
- drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- amba_set_drvdata(adev, drvdata);
ret = __catu_probe(&adev->dev, &adev->res);
if (!ret)
pm_runtime_put(&adev->dev);
@@ -623,29 +627,16 @@ static struct amba_driver catu_driver = {
static int catu_platform_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct catu_drvdata *drvdata;
int ret = 0;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
-
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- dev_set_drvdata(&pdev->dev, drvdata);
ret = __catu_probe(&pdev->dev, res);
pm_runtime_put(&pdev->dev);
- if (ret) {
+ if (ret)
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
- }
return ret;
}
@@ -659,8 +650,6 @@ static void catu_platform_remove(struct platform_device *pdev)
__catu_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
@@ -668,18 +657,26 @@ static int catu_runtime_suspend(struct device *dev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
+
return 0;
}
static int catu_runtime_resume(struct device *dev)
{
struct catu_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
index 755776cd19c5..6e6b7aac206d 100644
--- a/drivers/hwtracing/coresight/coresight-catu.h
+++ b/drivers/hwtracing/coresight/coresight-catu.h
@@ -62,6 +62,7 @@
struct catu_drvdata {
struct clk *pclk;
+ struct clk *atclk;
void __iomem *base;
struct coresight_device *csdev;
int irq;
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index fa758cc21827..c660cf8adb1c 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -3,6 +3,8 @@
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*/
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/build_bug.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -298,9 +300,10 @@ unlock:
EXPORT_SYMBOL_GPL(coresight_add_helper);
static int coresight_enable_sink(struct coresight_device *csdev,
- enum cs_mode mode, void *data)
+ enum cs_mode mode,
+ struct coresight_path *path)
{
- return sink_ops(csdev)->enable(csdev, mode, data);
+ return sink_ops(csdev)->enable(csdev, mode, path);
}
static void coresight_disable_sink(struct coresight_device *csdev)
@@ -353,17 +356,20 @@ static bool coresight_is_helper(struct coresight_device *csdev)
}
static int coresight_enable_helper(struct coresight_device *csdev,
- enum cs_mode mode, void *data)
+ enum cs_mode mode,
+ struct coresight_path *path)
{
- return helper_ops(csdev)->enable(csdev, mode, data);
+ return helper_ops(csdev)->enable(csdev, mode, path);
}
-static void coresight_disable_helper(struct coresight_device *csdev, void *data)
+static void coresight_disable_helper(struct coresight_device *csdev,
+ struct coresight_path *path)
{
- helper_ops(csdev)->disable(csdev, data);
+ helper_ops(csdev)->disable(csdev, path);
}
-static void coresight_disable_helpers(struct coresight_device *csdev, void *data)
+static void coresight_disable_helpers(struct coresight_device *csdev,
+ struct coresight_path *path)
{
int i;
struct coresight_device *helper;
@@ -371,7 +377,7 @@ static void coresight_disable_helpers(struct coresight_device *csdev, void *data
for (i = 0; i < csdev->pdata->nr_outconns; ++i) {
helper = csdev->pdata->out_conns[i]->dest_dev;
if (helper && coresight_is_helper(helper))
- coresight_disable_helper(helper, data);
+ coresight_disable_helper(helper, path);
}
}
@@ -477,7 +483,8 @@ void coresight_disable_path(struct coresight_path *path)
EXPORT_SYMBOL_GPL(coresight_disable_path);
static int coresight_enable_helpers(struct coresight_device *csdev,
- enum cs_mode mode, void *data)
+ enum cs_mode mode,
+ struct coresight_path *path)
{
int i, ret = 0;
struct coresight_device *helper;
@@ -487,7 +494,7 @@ static int coresight_enable_helpers(struct coresight_device *csdev,
if (!helper || !coresight_is_helper(helper))
continue;
- ret = coresight_enable_helper(helper, mode, data);
+ ret = coresight_enable_helper(helper, mode, path);
if (ret)
return ret;
}
@@ -495,8 +502,7 @@ static int coresight_enable_helpers(struct coresight_device *csdev,
return 0;
}
-int coresight_enable_path(struct coresight_path *path, enum cs_mode mode,
- void *sink_data)
+int coresight_enable_path(struct coresight_path *path, enum cs_mode mode)
{
int ret = 0;
u32 type;
@@ -526,7 +532,7 @@ int coresight_enable_path(struct coresight_path *path, enum cs_mode mode,
switch (type) {
case CORESIGHT_DEV_TYPE_SINK:
- ret = coresight_enable_sink(csdev, mode, sink_data);
+ ret = coresight_enable_sink(csdev, mode, path);
/*
* Sink is the first component turned on. If we
* failed to enable the sink, there are no components
@@ -1374,8 +1380,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
goto out_unlock;
}
- if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ sink_ops(csdev)->alloc_buffer) {
ret = etm_perf_add_symlink_sink(csdev);
if (ret) {
@@ -1698,6 +1705,53 @@ int coresight_etm_get_trace_id(struct coresight_device *csdev, enum cs_mode mode
}
EXPORT_SYMBOL_GPL(coresight_etm_get_trace_id);
+/*
+ * Attempt to find and enable programming clock (pclk) and trace clock (atclk)
+ * for the given device.
+ *
+ * For ACPI devices, clocks are controlled by firmware, so bail out early in
+ * this case. Also, skip enabling pclk if the clock is managed by the AMBA
+ * bus driver instead.
+ *
+ * atclk is an optional clock, it will be only enabled when it is existed.
+ * Otherwise, a NULL pointer will be returned to caller.
+ *
+ * Returns: '0' on Success; Error code otherwise.
+ */
+int coresight_get_enable_clocks(struct device *dev, struct clk **pclk,
+ struct clk **atclk)
+{
+ WARN_ON(!pclk);
+
+ if (has_acpi_companion(dev))
+ return 0;
+
+ if (!dev_is_amba(dev)) {
+ /*
+ * "apb_pclk" is the default clock name for an Arm Primecell
+ * peripheral, while "apb" is used only by the CTCU driver.
+ *
+ * For easier maintenance, CoreSight drivers should use
+ * "apb_pclk" as the programming clock name.
+ */
+ *pclk = devm_clk_get_optional_enabled(dev, "apb_pclk");
+ if (!*pclk)
+ *pclk = devm_clk_get_optional_enabled(dev, "apb");
+ if (IS_ERR(*pclk))
+ return PTR_ERR(*pclk);
+ }
+
+ /* Initialization of atclk is skipped if it is a NULL pointer. */
+ if (atclk) {
+ *atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(*atclk))
+ return PTR_ERR(*atclk);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(coresight_get_enable_clocks);
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
MODULE_AUTHOR("Mathieu Poirier <mathieu.poirier@linaro.org>");
diff --git a/drivers/hwtracing/coresight/coresight-cpu-debug.c b/drivers/hwtracing/coresight/coresight-cpu-debug.c
index a871d997330b..5f21366406aa 100644
--- a/drivers/hwtracing/coresight/coresight-cpu-debug.c
+++ b/drivers/hwtracing/coresight/coresight-cpu-debug.c
@@ -562,10 +562,20 @@ static void debug_func_exit(void)
static int __debug_probe(struct device *dev, struct resource *res)
{
- struct debug_drvdata *drvdata = dev_get_drvdata(dev);
+ struct debug_drvdata *drvdata;
void __iomem *base;
int ret;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, drvdata);
+
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, NULL);
+ if (ret)
+ return ret;
+
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
return drvdata->cpu;
@@ -625,13 +635,6 @@ err:
static int debug_probe(struct amba_device *adev, const struct amba_id *id)
{
- struct debug_drvdata *drvdata;
-
- drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- amba_set_drvdata(adev, drvdata);
return __debug_probe(&adev->dev, &adev->res);
}
@@ -690,18 +693,8 @@ static struct amba_driver debug_driver = {
static int debug_platform_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct debug_drvdata *drvdata;
int ret = 0;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
-
- dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -710,8 +703,6 @@ static int debug_platform_probe(struct platform_device *pdev)
if (ret) {
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
return ret;
}
@@ -725,8 +716,6 @@ static void debug_platform_remove(struct platform_device *pdev)
__debug_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
@@ -742,8 +731,8 @@ static int debug_runtime_suspend(struct device *dev)
{
struct debug_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->pclk);
+
return 0;
}
@@ -751,9 +740,7 @@ static int debug_runtime_resume(struct device *dev)
{
struct debug_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ return clk_prepare_enable(drvdata->pclk);
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-ctcu-core.c b/drivers/hwtracing/coresight/coresight-ctcu-core.c
index c6bafc96db96..abed15eb72b4 100644
--- a/drivers/hwtracing/coresight/coresight-ctcu-core.c
+++ b/drivers/hwtracing/coresight/coresight-ctcu-core.c
@@ -156,17 +156,14 @@ static int ctcu_set_etr_traceid(struct coresight_device *csdev, struct coresight
return __ctcu_set_etr_traceid(csdev, traceid, port_num, enable);
}
-static int ctcu_enable(struct coresight_device *csdev, enum cs_mode mode, void *data)
+static int ctcu_enable(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_path *path)
{
- struct coresight_path *path = (struct coresight_path *)data;
-
return ctcu_set_etr_traceid(csdev, path, true);
}
-static int ctcu_disable(struct coresight_device *csdev, void *data)
+static int ctcu_disable(struct coresight_device *csdev, struct coresight_path *path)
{
- struct coresight_path *path = (struct coresight_path *)data;
-
return ctcu_set_etr_traceid(csdev, path, false);
}
@@ -188,7 +185,7 @@ static int ctcu_probe(struct platform_device *pdev)
const struct ctcu_config *cfgs;
struct ctcu_drvdata *drvdata;
void __iomem *base;
- int i;
+ int i, ret;
desc.name = coresight_alloc_device_name(&ctcu_devs, dev);
if (!desc.name)
@@ -207,9 +204,9 @@ static int ctcu_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- drvdata->apb_clk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->apb_clk))
- return -ENODEV;
+ ret = coresight_get_enable_clocks(dev, &drvdata->apb_clk, NULL);
+ if (ret)
+ return ret;
cfgs = of_device_get_match_data(dev);
if (cfgs) {
@@ -233,12 +230,8 @@ static int ctcu_probe(struct platform_device *pdev)
desc.access = CSDEV_ACCESS_IOMEM(base);
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- if (!IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_put(drvdata->apb_clk);
-
+ if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
- }
return 0;
}
@@ -275,8 +268,6 @@ static void ctcu_platform_remove(struct platform_device *pdev)
ctcu_remove(pdev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_put(drvdata->apb_clk);
}
#ifdef CONFIG_PM
@@ -284,8 +275,7 @@ static int ctcu_runtime_suspend(struct device *dev)
{
struct ctcu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_disable_unprepare(drvdata->apb_clk);
+ clk_disable_unprepare(drvdata->apb_clk);
return 0;
}
@@ -294,10 +284,7 @@ static int ctcu_runtime_resume(struct device *dev)
{
struct ctcu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->apb_clk))
- clk_prepare_enable(drvdata->apb_clk);
-
- return 0;
+ return clk_prepare_enable(drvdata->apb_clk);
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 8fb30dd73fd2..bfbc365bb2ef 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -799,14 +799,15 @@ static void cti_pm_release(struct cti_drvdata *drvdata)
}
/** cti ect operations **/
-int cti_enable(struct coresight_device *csdev, enum cs_mode mode, void *data)
+int cti_enable(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_path *path)
{
struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
return cti_enable_hw(drvdata);
}
-int cti_disable(struct coresight_device *csdev, void *data)
+int cti_disable(struct coresight_device *csdev, struct coresight_path *path)
{
struct cti_drvdata *drvdata = csdev_to_cti_drvdata(csdev);
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
index 8362a47c939c..4f89091ee93f 100644
--- a/drivers/hwtracing/coresight/coresight-cti.h
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -216,8 +216,9 @@ int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata,
const char *assoc_dev_name);
struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs,
int out_sigs);
-int cti_enable(struct coresight_device *csdev, enum cs_mode mode, void *data);
-int cti_disable(struct coresight_device *csdev, void *data);
+int cti_enable(struct coresight_device *csdev, enum cs_mode mode,
+ struct coresight_path *path);
+int cti_disable(struct coresight_device *csdev, struct coresight_path *path);
void cti_write_all_hw_regs(struct cti_drvdata *drvdata);
void cti_write_intack(struct device *dev, u32 ackval);
void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value);
diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c
index aaa92b5081e3..14322c99e29d 100644
--- a/drivers/hwtracing/coresight/coresight-dummy.c
+++ b/drivers/hwtracing/coresight/coresight-dummy.c
@@ -52,7 +52,7 @@ static int dummy_source_trace_id(struct coresight_device *csdev, __maybe_unused
}
static int dummy_sink_enable(struct coresight_device *csdev, enum cs_mode mode,
- void *data)
+ struct coresight_path *path)
{
dev_dbg(csdev->dev.parent, "Dummy sink enabled\n");
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index d5efb085b30d..6657602d8f2e 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -167,13 +167,13 @@ out:
return ret;
}
-static int etb_enable_perf(struct coresight_device *csdev, void *data)
+static int etb_enable_perf(struct coresight_device *csdev, struct coresight_path *path)
{
int ret = 0;
pid_t pid;
unsigned long flags;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct perf_output_handle *handle = data;
+ struct perf_output_handle *handle = path->handle;
struct cs_buffers *buf = etm_perf_sink_config(handle);
raw_spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -224,7 +224,7 @@ out:
}
static int etb_enable(struct coresight_device *csdev, enum cs_mode mode,
- void *data)
+ struct coresight_path *path)
{
int ret;
@@ -233,7 +233,7 @@ static int etb_enable(struct coresight_device *csdev, enum cs_mode mode,
ret = etb_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = etb_enable_perf(csdev, data);
+ ret = etb_enable_perf(csdev, path);
break;
default:
ret = -EINVAL;
@@ -730,12 +730,10 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
+
dev_set_drvdata(dev, drvdata);
/* validity for the resource is already checked by the AMBA core */
@@ -811,8 +809,7 @@ static int etb_runtime_suspend(struct device *dev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
return 0;
}
@@ -821,10 +818,7 @@ static int etb_runtime_resume(struct device *dev)
{
struct etb_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
-
- return 0;
+ return clk_prepare_enable(drvdata->atclk);
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index f1551c08ecb2..17afa0f4cdee 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -520,13 +520,14 @@ static void etm_event_start(struct perf_event *event, int flags)
goto out;
path = etm_event_cpu_path(event_data, cpu);
+ path->handle = handle;
/* We need a sink, no need to continue without one */
sink = coresight_get_sink(path);
if (WARN_ON_ONCE(!sink))
goto fail_end_stop;
/* Nothing will happen without a path */
- if (coresight_enable_path(path, CS_MODE_PERF, handle))
+ if (coresight_enable_path(path, CS_MODE_PERF))
goto fail_end_stop;
/* Finally enable the tracer */
@@ -851,7 +852,7 @@ static ssize_t etm_perf_sink_name_show(struct device *dev,
struct dev_ext_attribute *ea;
ea = container_of(dattr, struct dev_ext_attribute, attr);
- return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
+ return scnprintf(buf, PAGE_SIZE, "0x%px\n", ea->var);
}
static struct dev_ext_attribute *
@@ -943,7 +944,7 @@ static ssize_t etm_perf_cscfg_event_show(struct device *dev,
struct dev_ext_attribute *ea;
ea = container_of(dattr, struct dev_ext_attribute, attr);
- return scnprintf(buf, PAGE_SIZE, "configid=0x%lx\n", (unsigned long)(ea->var));
+ return scnprintf(buf, PAGE_SIZE, "configid=0x%px\n", ea->var);
}
int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc)
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 1c6204e14422..a5e809589d3e 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -439,13 +439,26 @@ struct etm_enable_arg {
int rc;
};
-static void etm_enable_hw_smp_call(void *info)
+static void etm_enable_sysfs_smp_call(void *info)
{
struct etm_enable_arg *arg = info;
+ struct coresight_device *csdev;
if (WARN_ON(!arg))
return;
+
+ csdev = arg->drvdata->csdev;
+ if (!coresight_take_mode(csdev, CS_MODE_SYSFS)) {
+ /* Someone is already using the tracer */
+ arg->rc = -EBUSY;
+ return;
+ }
+
arg->rc = etm_enable_hw(arg->drvdata);
+
+ /* The tracer didn't start */
+ if (arg->rc)
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
}
static int etm_cpu_id(struct coresight_device *csdev)
@@ -465,16 +478,26 @@ static int etm_enable_perf(struct coresight_device *csdev,
struct coresight_path *path)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return -EINVAL;
+ if (!coresight_take_mode(csdev, CS_MODE_PERF))
+ return -EBUSY;
+
/* Configure the tracer based on the session's specifics */
etm_parse_event_config(drvdata, event);
drvdata->traceid = path->trace_id;
/* And enable it */
- return etm_enable_hw(drvdata);
+ ret = etm_enable_hw(drvdata);
+
+ /* Failed to start tracer; roll back to DISABLED mode */
+ if (ret)
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
+
+ return ret;
}
static int etm_enable_sysfs(struct coresight_device *csdev, struct coresight_path *path)
@@ -494,7 +517,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev, struct coresight_pat
if (cpu_online(drvdata->cpu)) {
arg.drvdata = drvdata;
ret = smp_call_function_single(drvdata->cpu,
- etm_enable_hw_smp_call, &arg, 1);
+ etm_enable_sysfs_smp_call, &arg, 1);
if (!ret)
ret = arg.rc;
if (!ret)
@@ -517,12 +540,6 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode, struct coresight_path *path)
{
int ret;
- struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- if (!coresight_take_mode(csdev, mode)) {
- /* Someone is already using the tracer */
- return -EBUSY;
- }
switch (mode) {
case CS_MODE_SYSFS:
@@ -535,17 +552,12 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
ret = -EINVAL;
}
- /* The tracer didn't start */
- if (ret)
- coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
-
return ret;
}
-static void etm_disable_hw(void *info)
+static void etm_disable_hw(struct etm_drvdata *drvdata)
{
int i;
- struct etm_drvdata *drvdata = info;
struct etm_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
@@ -567,6 +579,15 @@ static void etm_disable_hw(void *info)
"cpu: %d disable smp call done\n", drvdata->cpu);
}
+static void etm_disable_sysfs_smp_call(void *info)
+{
+ struct etm_drvdata *drvdata = info;
+
+ etm_disable_hw(drvdata);
+
+ coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
+}
+
static void etm_disable_perf(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -588,6 +609,8 @@ static void etm_disable_perf(struct coresight_device *csdev)
CS_LOCK(drvdata->csa.base);
+ coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
+
/*
* perf will release trace ids when _free_aux()
* is called at the end of the session
@@ -612,7 +635,8 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
* Executing etm_disable_hw on the cpu whose ETM is being disabled
* ensures that register writes occur when cpu is powered.
*/
- smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
+ smp_call_function_single(drvdata->cpu, etm_disable_sysfs_smp_call,
+ drvdata, 1);
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
@@ -652,9 +676,6 @@ static void etm_disable(struct coresight_device *csdev,
WARN_ON_ONCE(mode);
return;
}
-
- if (mode)
- coresight_set_mode(csdev, CS_MODE_DISABLED);
}
static const struct coresight_ops_source etm_source_ops = {
@@ -832,12 +853,9 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
spin_lock_init(&drvdata->spinlock);
- drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk");
+ if (IS_ERR(drvdata->atclk))
+ return PTR_ERR(drvdata->atclk);
drvdata->cpu = coresight_get_cpu(dev);
if (drvdata->cpu < 0)
@@ -928,8 +946,7 @@ static int etm_runtime_suspend(struct device *dev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
return 0;
}
@@ -938,10 +955,7 @@ static int etm_runtime_resume(struct device *dev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
-
- return 0;
+ return clk_prepare_enable(drvdata->atclk);
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 42e5d37403ad..560975b70474 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -4,6 +4,7 @@
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/kvm_host.h>
@@ -445,10 +446,24 @@ static int etm4_enable_trace_unit(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR);
etm4x_allow_trace(drvdata);
+
+ /*
+ * According to software usage PKLXF in Arm ARM (ARM DDI 0487 L.a),
+ * execute a Context synchronization event to guarantee the trace unit
+ * will observe the new values of the System registers.
+ */
+ if (!csa->io_mem)
+ isb();
+
/* Enable the trace unit */
etm4x_relaxed_write32(csa, 1, TRCPRGCTLR);
- /* Synchronize the register updates for sysreg access */
+ /*
+ * As recommended by section 4.3.7 ("Synchronization when using system
+ * instructions to progrom the trace unit") of ARM IHI 0064H.b, the
+ * self-hosted trace analyzer must perform a Context synchronization
+ * event between writing to the TRCPRGCTLR and reading the TRCSTATR.
+ */
if (!csa->io_mem)
isb();
@@ -460,10 +475,16 @@ static int etm4_enable_trace_unit(struct etmv4_drvdata *drvdata)
}
/*
- * As recommended by section 4.3.7 ("Synchronization when using the
- * memory-mapped interface") of ARM IHI 0064D
+ * As recommended in section 4.3.7 (Synchronization of register updates)
+ * of ARM IHI 0064H.b, the self-hosted trace analyzer always executes an
+ * ISB instruction after programming the trace unit registers.
+ *
+ * For the memory-mapped interface, the registers are mapped as Device
+ * type (Device-nGnRE). Reading back the value of any register in the
+ * trace unit ensures that all writes have completed. Therefore, polling
+ * on TRCSTATR guarantees that the writing TRCPRGCTLR is complete, and
+ * no explicit dsb() is required at here.
*/
- dsb(sy);
isb();
return 0;
@@ -528,7 +549,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
}
- etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
+ if (drvdata->numextinsel)
+ etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
@@ -587,13 +609,26 @@ done:
return rc;
}
-static void etm4_enable_hw_smp_call(void *info)
+static void etm4_enable_sysfs_smp_call(void *info)
{
struct etm4_enable_arg *arg = info;
+ struct coresight_device *csdev;
if (WARN_ON(!arg))
return;
+
+ csdev = arg->drvdata->csdev;
+ if (!coresight_take_mode(csdev, CS_MODE_SYSFS)) {
+ /* Someone is already using the tracer */
+ arg->rc = -EBUSY;
+ return;
+ }
+
arg->rc = etm4_enable_hw(arg->drvdata);
+
+ /* The tracer didn't start */
+ if (arg->rc)
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
}
/*
@@ -806,13 +841,14 @@ static int etm4_enable_perf(struct coresight_device *csdev,
struct perf_event *event,
struct coresight_path *path)
{
- int ret = 0;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret;
- if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
- ret = -EINVAL;
- goto out;
- }
+ if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
+ return -EINVAL;
+
+ if (!coresight_take_mode(csdev, CS_MODE_PERF))
+ return -EBUSY;
/* Configure the tracer based on the session's specifics */
ret = etm4_parse_event_config(csdev, event);
@@ -828,6 +864,9 @@ static int etm4_enable_perf(struct coresight_device *csdev,
ret = etm4_enable_hw(drvdata);
out:
+ /* Failed to start tracer; roll back to DISABLED mode */
+ if (ret)
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
return ret;
}
@@ -859,7 +898,7 @@ static int etm4_enable_sysfs(struct coresight_device *csdev, struct coresight_pa
*/
arg.drvdata = drvdata;
ret = smp_call_function_single(drvdata->cpu,
- etm4_enable_hw_smp_call, &arg, 1);
+ etm4_enable_sysfs_smp_call, &arg, 1);
if (!ret)
ret = arg.rc;
if (!ret)
@@ -880,11 +919,6 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
{
int ret;
- if (!coresight_take_mode(csdev, mode)) {
- /* Someone is already using the tracer */
- return -EBUSY;
- }
-
switch (mode) {
case CS_MODE_SYSFS:
ret = etm4_enable_sysfs(csdev, path);
@@ -896,10 +930,6 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
ret = -EINVAL;
}
- /* The tracer didn't start */
- if (ret)
- coresight_set_mode(csdev, CS_MODE_DISABLED);
-
return ret;
}
@@ -921,11 +951,16 @@ static void etm4_disable_trace_unit(struct etmv4_drvdata *drvdata)
*/
etm4x_prohibit_trace(drvdata);
/*
- * Make sure everything completes before disabling, as recommended
- * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register,
- * SSTATUS") of ARM IHI 0064D
+ * Prevent being speculative at the point of disabling the trace unit,
+ * as recommended by section 7.3.77 ("TRCVICTLR, ViewInst Main Control
+ * Register, SSTATUS") of ARM IHI 0064D
*/
dsb(sy);
+ /*
+ * According to software usage VKHHY in Arm ARM (ARM DDI 0487 L.a),
+ * execute a Context synchronization event to guarantee no new
+ * program-flow trace is generated.
+ */
isb();
/* Trace synchronization barrier, is a nop if not supported */
tsb_csync();
@@ -945,16 +980,22 @@ static void etm4_disable_trace_unit(struct etmv4_drvdata *drvdata)
dev_err(etm_dev,
"timeout while waiting for PM stable Trace Status\n");
/*
- * As recommended by section 4.3.7 (Synchronization of register updates)
- * of ARM IHI 0064H.b.
+ * As recommended in section 4.3.7 (Synchronization of register updates)
+ * of ARM IHI 0064H.b, the self-hosted trace analyzer always executes an
+ * ISB instruction after programming the trace unit registers.
+ *
+ * For the memory-mapped interface, the registers are mapped as Device
+ * type (Device-nGnRE). Reading back the value of any register in the
+ * trace unit ensures that all writes have completed. Therefore, polling
+ * on TRCSTATR guarantees that the writing TRCPRGCTLR is complete, and
+ * no explicit dsb() is required at here.
*/
isb();
}
-static void etm4_disable_hw(void *info)
+static void etm4_disable_hw(struct etmv4_drvdata *drvdata)
{
u32 control;
- struct etmv4_drvdata *drvdata = info;
struct etmv4_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
struct csdev_access *csa = &csdev->access;
@@ -991,6 +1032,15 @@ static void etm4_disable_hw(void *info)
"cpu: %d disable smp call done\n", drvdata->cpu);
}
+static void etm4_disable_sysfs_smp_call(void *info)
+{
+ struct etmv4_drvdata *drvdata = info;
+
+ etm4_disable_hw(drvdata);
+
+ coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
+}
+
static int etm4_disable_perf(struct coresight_device *csdev,
struct perf_event *event)
{
@@ -1020,6 +1070,8 @@ static int etm4_disable_perf(struct coresight_device *csdev,
/* TRCVICTLR::SSSTATUS, bit[9] */
filters->ssstatus = (control & BIT(9));
+ coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
+
/*
* perf will release trace ids when _free_aux() is
* called at the end of the session.
@@ -1045,7 +1097,8 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
* Executing etm4_disable_hw on the cpu whose ETM is being disabled
* ensures that register writes occur when cpu is powered.
*/
- smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
+ smp_call_function_single(drvdata->cpu, etm4_disable_sysfs_smp_call,
+ drvdata, 1);
raw_spin_unlock(&drvdata->spinlock);
@@ -1085,9 +1138,6 @@ static void etm4_disable(struct coresight_device *csdev,
etm4_disable_perf(csdev, event);
break;
}
-
- if (mode)
- coresight_set_mode(csdev, CS_MODE_DISABLED);
}
static int etm4_resume_perf(struct coresight_device *csdev)
@@ -1423,6 +1473,7 @@ static void etm4_init_arch_data(void *info)
etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
+ drvdata->numextinsel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
/* ATBTRIG, bit[22] implementation can support ATB triggers? */
@@ -1820,9 +1871,11 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
goto out;
}
+ if (!drvdata->paused)
+ etm4_disable_trace_unit(drvdata);
+
state = drvdata->save_state;
- state->trcprgctlr = etm4x_read32(csa, TRCPRGCTLR);
if (drvdata->nr_pe)
state->trcprocselr = etm4x_read32(csa, TRCPROCSELR);
state->trcconfigr = etm4x_read32(csa, TRCCONFIGR);
@@ -1852,7 +1905,9 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
}
- state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
+
+ if (drvdata->numextinsel)
+ state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
@@ -1903,7 +1958,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcpdcr = etm4x_read32(csa, TRCPDCR);
/* wait for TRCSTATR.IDLE to go up */
- if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) {
+ if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1)) {
dev_err(etm_dev,
"timeout while waiting for Idle Trace Status\n");
etm4_os_unlock(drvdata);
@@ -1911,8 +1966,6 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
goto out;
}
- drvdata->state_needs_restore = true;
-
/*
* Power can be removed from the trace unit now. We do this to
* potentially save power on systems that respect the TRCPDCR_PU
@@ -1930,14 +1983,14 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int ret = 0;
- /* Save the TRFCR irrespective of whether the ETM is ON */
- if (drvdata->trfcr)
- drvdata->save_trfcr = read_trfcr();
+ if (pm_save_enable != PARAM_PM_SAVE_SELF_HOSTED)
+ return 0;
+
/*
* Save and restore the ETM Trace registers only if
* the ETM is active.
*/
- if (coresight_get_mode(drvdata->csdev) && drvdata->save_state)
+ if (coresight_get_mode(drvdata->csdev))
ret = __etm4_cpu_save(drvdata);
return ret;
}
@@ -1954,7 +2007,6 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4_cs_unlock(drvdata, csa);
etm4x_relaxed_write32(csa, state->trcclaimset, TRCCLAIMSET);
- etm4x_relaxed_write32(csa, state->trcprgctlr, TRCPRGCTLR);
if (drvdata->nr_pe)
etm4x_relaxed_write32(csa, state->trcprocselr, TRCPROCSELR);
etm4x_relaxed_write32(csa, state->trcconfigr, TRCCONFIGR);
@@ -1984,7 +2036,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
}
- etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
+ if (drvdata->numextinsel)
+ etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
@@ -2027,8 +2080,6 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
if (!drvdata->skip_power_up)
etm4x_relaxed_write32(csa, state->trcpdcr, TRCPDCR);
- drvdata->state_needs_restore = false;
-
/*
* As recommended by section 4.3.7 ("Synchronization when using the
* memory-mapped interface") of ARM IHI 0064D
@@ -2038,14 +2089,19 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
/* Unlock the OS lock to re-enable trace and external debug access */
etm4_os_unlock(drvdata);
+
+ if (!drvdata->paused)
+ etm4_enable_trace_unit(drvdata);
+
etm4_cs_lock(drvdata, csa);
}
static void etm4_cpu_restore(struct etmv4_drvdata *drvdata)
{
- if (drvdata->trfcr)
- write_trfcr(drvdata->save_trfcr);
- if (drvdata->state_needs_restore)
+ if (pm_save_enable != PARAM_PM_SAVE_SELF_HOSTED)
+ return;
+
+ if (coresight_get_mode(drvdata->csdev))
__etm4_cpu_restore(drvdata);
}
@@ -2211,10 +2267,15 @@ static int etm4_probe(struct device *dev)
struct csdev_access access = { 0 };
struct etm4_init_arg init_arg = { 0 };
struct etm4_init_arg *delayed;
+ int ret;
if (WARN_ON(!drvdata))
return -ENOMEM;
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
+
if (pm_save_enable == PARAM_PM_SAVE_FIRMWARE)
pm_save_enable = coresight_loses_context_with_cpu(dev) ?
PARAM_PM_SAVE_SELF_HOSTED : PARAM_PM_SAVE_NEVER;
@@ -2297,16 +2358,10 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
if (!drvdata)
return -ENOMEM;
- drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
-
if (res) {
drvdata->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drvdata->base)) {
- clk_put(drvdata->pclk);
+ if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- }
}
dev_set_drvdata(&pdev->dev, drvdata);
@@ -2413,9 +2468,6 @@ static void etm4_remove_platform_dev(struct platform_device *pdev)
if (drvdata)
etm4_remove_dev(drvdata);
pm_runtime_disable(&pdev->dev);
-
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
static const struct amba_id etm4_ids[] = {
@@ -2463,8 +2515,8 @@ static int etm4_runtime_suspend(struct device *dev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata->pclk && !IS_ERR(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
return 0;
}
@@ -2472,11 +2524,17 @@ static int etm4_runtime_suspend(struct device *dev)
static int etm4_runtime_resume(struct device *dev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata->pclk && !IS_ERR(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
- return 0;
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index ab251865b893..e9eeea6240d5 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/bitfield.h>
#include <linux/coresight.h>
#include <linux/pid_namespace.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index ac649515054d..012c52fd1933 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -162,6 +162,7 @@
#define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28)
#define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0)
+#define TRCIDR5_NUMEXTINSEL_MASK GENMASK(11, 9)
#define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16)
#define TRCIDR5_ATBTRIG BIT(22)
#define TRCIDR5_LPOVERRIDE BIT(23)
@@ -865,7 +866,6 @@ struct etmv4_config {
* struct etm4_save_state - state to be preserved when ETM is without power
*/
struct etmv4_save_state {
- u32 trcprgctlr;
u32 trcprocselr;
u32 trcconfigr;
u32 trcauxctlr;
@@ -919,7 +919,8 @@ struct etmv4_save_state {
/**
* struct etm4_drvdata - specifics associated to an ETM component
- * @pclk APB clock if present, otherwise NULL
+ * @pclk: APB clock if present, otherwise NULL
+ * @atclk: Optional clock for the core parts of the ETMv4.
* @base: Memory mapped base address for this component.
* @csdev: Component vitals needed by the framework.
* @spinlock: Only one at a time pls.
@@ -978,9 +979,7 @@ struct etmv4_save_state {
* at runtime, due to the additional setting of TRFCR_CX when
* in EL2. Otherwise, 0.
* @config: structure holding configuration parameters.
- * @save_trfcr: Saved TRFCR_EL1 register during a CPU PM event.
* @save_state: State to be preserved across power loss
- * @state_needs_restore: True when there is context to restore after PM exit
* @skip_power_up: Indicates if an implementation can skip powering up
* the trace unit.
* @paused: Indicates if the trace unit is paused.
@@ -988,6 +987,7 @@ struct etmv4_save_state {
*/
struct etmv4_drvdata {
struct clk *pclk;
+ struct clk *atclk;
void __iomem *base;
struct coresight_device *csdev;
raw_spinlock_t spinlock;
@@ -999,6 +999,7 @@ struct etmv4_drvdata {
u8 nr_cntr;
u8 nr_ext_inp;
u8 numcidc;
+ u8 numextinsel;
u8 numvmidc;
u8 nrseqstate;
u8 nr_event;
@@ -1033,9 +1034,7 @@ struct etmv4_drvdata {
bool lpoverride;
u64 trfcr;
struct etmv4_config config;
- u64 save_trfcr;
struct etmv4_save_state *save_state;
- bool state_needs_restore;
bool skip_power_up;
bool paused;
DECLARE_BITMAP(arch_features, ETM4_IMPDEF_FEATURE_MAX);
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index b1922dbe9292..3b248e54471a 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -213,11 +213,11 @@ ATTRIBUTE_GROUPS(coresight_funnel);
static int funnel_probe(struct device *dev, struct resource *res)
{
- int ret;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
struct coresight_desc desc = { 0 };
+ int ret;
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-funnel"))
@@ -231,16 +231,9 @@ static int funnel_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
-
- drvdata->pclk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
/*
* Map the device base for dynamic-funnel, which has been
@@ -248,10 +241,8 @@ static int funnel_probe(struct device *dev, struct resource *res)
*/
if (res) {
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto out_disable_clk;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
drvdata->base = base;
desc.groups = coresight_funnel_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
@@ -261,10 +252,9 @@ static int funnel_probe(struct device *dev, struct resource *res)
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out_disable_clk;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
dev->platform_data = pdata;
raw_spin_lock_init(&drvdata->spinlock);
@@ -274,19 +264,10 @@ static int funnel_probe(struct device *dev, struct resource *res)
desc.pdata = pdata;
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto out_disable_clk;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
- ret = 0;
-
-out_disable_clk:
- if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- if (ret && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
- return ret;
+ return 0;
}
static int funnel_remove(struct device *dev)
@@ -303,11 +284,8 @@ static int funnel_runtime_suspend(struct device *dev)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
-
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
return 0;
}
@@ -315,13 +293,17 @@ static int funnel_runtime_suspend(struct device *dev)
static int funnel_runtime_resume(struct device *dev)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
@@ -355,8 +337,6 @@ static void funnel_platform_remove(struct platform_device *pdev)
funnel_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
static const struct of_device_id funnel_match[] = {
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 33e22b1ba043..fd896ac07942 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -135,8 +135,7 @@ static inline void CS_UNLOCK(void __iomem *addr)
}
void coresight_disable_path(struct coresight_path *path);
-int coresight_enable_path(struct coresight_path *path, enum cs_mode mode,
- void *sink_data);
+int coresight_enable_path(struct coresight_path *path, enum cs_mode mode);
struct coresight_device *coresight_get_sink(struct coresight_path *path);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct coresight_device *
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 06efd2b01a0f..e6472658235d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -219,11 +219,11 @@ static const struct attribute_group *replicator_groups[] = {
static int replicator_probe(struct device *dev, struct resource *res)
{
- int ret = 0;
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
struct coresight_desc desc = { 0 };
void __iomem *base;
+ int ret;
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-replicator"))
@@ -238,16 +238,9 @@ static int replicator_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
-
- drvdata->pclk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
/*
* Map the device base for dynamic-replicator, which has been
@@ -255,10 +248,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
*/
if (res) {
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- goto out_disable_clk;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
drvdata->base = base;
desc.groups = replicator_groups;
desc.access = CSDEV_ACCESS_IOMEM(base);
@@ -272,10 +263,8 @@ static int replicator_probe(struct device *dev, struct resource *res)
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
- if (IS_ERR(pdata)) {
- ret = PTR_ERR(pdata);
- goto out_disable_clk;
- }
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
dev->platform_data = pdata;
raw_spin_lock_init(&drvdata->spinlock);
@@ -286,19 +275,11 @@ static int replicator_probe(struct device *dev, struct resource *res)
desc.dev = dev;
drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev)) {
- ret = PTR_ERR(drvdata->csdev);
- goto out_disable_clk;
- }
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
replicator_reset(drvdata);
-
-out_disable_clk:
- if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- if (ret && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
- return ret;
+ return 0;
}
static int replicator_remove(struct device *dev)
@@ -335,8 +316,6 @@ static void replicator_platform_remove(struct platform_device *pdev)
replicator_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
@@ -344,24 +323,26 @@ static int replicator_runtime_suspend(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
return 0;
}
static int replicator_runtime_resume(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index e45c6c7204b4..e68529bf89c9 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -342,7 +342,7 @@ static int stm_generic_link(struct stm_data *stm_data,
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
- if (!drvdata || !drvdata->csdev)
+ if (!drvdata->csdev)
return -EINVAL;
return coresight_enable_sysfs(drvdata->csdev);
@@ -353,7 +353,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
- if (!drvdata || !drvdata->csdev)
+ if (!drvdata->csdev)
return;
coresight_disable_sysfs(drvdata->csdev);
@@ -384,7 +384,7 @@ static long stm_generic_set_options(struct stm_data *stm_data,
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
- if (!(drvdata && coresight_get_mode(drvdata->csdev)))
+ if (!coresight_get_mode(drvdata->csdev))
return -EINVAL;
if (channel >= drvdata->numsp)
@@ -419,7 +419,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
struct stm_drvdata, stm);
unsigned int stm_flags;
- if (!(drvdata && coresight_get_mode(drvdata->csdev)))
+ if (!coresight_get_mode(drvdata->csdev))
return -EACCES;
if (channel >= drvdata->numsp)
@@ -842,16 +842,10 @@ static int __stm_probe(struct device *dev, struct resource *res)
if (!drvdata)
return -ENOMEM;
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
- drvdata->pclk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, res);
@@ -963,24 +957,26 @@ static int stm_runtime_suspend(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
return 0;
}
static int stm_runtime_resume(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ return ret;
}
#endif
@@ -1033,8 +1029,6 @@ static void stm_platform_remove(struct platform_device *pdev)
__stm_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index 83dad24e0116..6836b05986e8 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -395,7 +395,7 @@ static void cscfg_remove_owned_csdev_configs(struct coresight_device *csdev, voi
if (list_empty(&csdev->config_csdev_list))
return;
- guard(raw_spinlock_irqsave)(&csdev->cscfg_csdev_lock);
+ guard(raw_spinlock_irqsave)(&csdev->cscfg_csdev_lock);
list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) {
if (config_csdev->config_desc->load_owner == load_owner)
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index feadaf065b53..d2a6ed8bcc74 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/kernel.h>
+#include <linux/property.h>
#include "coresight-priv.h"
#include "coresight-trace-id.h"
@@ -214,7 +215,7 @@ int coresight_enable_sysfs(struct coresight_device *csdev)
if (!IS_VALID_CS_TRACE_ID(path->trace_id))
goto err_path;
- ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
+ ret = coresight_enable_path(path, CS_MODE_SYSFS);
if (ret)
goto err_path;
@@ -371,17 +372,81 @@ static ssize_t enable_source_store(struct device *dev,
}
static DEVICE_ATTR_RW(enable_source);
+static ssize_t label_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ const char *str;
+ int ret;
+
+ ret = fwnode_property_read_string(dev_fwnode(dev), "label", &str);
+ if (ret == 0)
+ return sysfs_emit(buf, "%s\n", str);
+ else
+ return ret;
+}
+static DEVICE_ATTR_RO(label);
+
+static umode_t label_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (attr == &dev_attr_label.attr) {
+ if (fwnode_property_present(dev_fwnode(dev), "label"))
+ return attr->mode;
+ else
+ return 0;
+ }
+
+ return attr->mode;
+}
+
static struct attribute *coresight_sink_attrs[] = {
&dev_attr_enable_sink.attr,
+ &dev_attr_label.attr,
NULL,
};
-ATTRIBUTE_GROUPS(coresight_sink);
+
+static struct attribute_group coresight_sink_group = {
+ .attrs = coresight_sink_attrs,
+ .is_visible = label_is_visible,
+};
+__ATTRIBUTE_GROUPS(coresight_sink);
static struct attribute *coresight_source_attrs[] = {
&dev_attr_enable_source.attr,
+ &dev_attr_label.attr,
NULL,
};
-ATTRIBUTE_GROUPS(coresight_source);
+
+static struct attribute_group coresight_source_group = {
+ .attrs = coresight_source_attrs,
+ .is_visible = label_is_visible,
+};
+__ATTRIBUTE_GROUPS(coresight_source);
+
+static struct attribute *coresight_link_attrs[] = {
+ &dev_attr_label.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_link_group = {
+ .attrs = coresight_link_attrs,
+ .is_visible = label_is_visible,
+};
+__ATTRIBUTE_GROUPS(coresight_link);
+
+static struct attribute *coresight_helper_attrs[] = {
+ &dev_attr_label.attr,
+ NULL,
+};
+
+static struct attribute_group coresight_helper_group = {
+ .attrs = coresight_helper_attrs,
+ .is_visible = label_is_visible,
+};
+__ATTRIBUTE_GROUPS(coresight_helper);
const struct device_type coresight_dev_type[] = {
[CORESIGHT_DEV_TYPE_SINK] = {
@@ -390,6 +455,7 @@ const struct device_type coresight_dev_type[] = {
},
[CORESIGHT_DEV_TYPE_LINK] = {
.name = "link",
+ .groups = coresight_link_groups,
},
[CORESIGHT_DEV_TYPE_LINKSINK] = {
.name = "linksink",
@@ -401,6 +467,7 @@ const struct device_type coresight_dev_type[] = {
},
[CORESIGHT_DEV_TYPE_HELPER] = {
.name = "helper",
+ .groups = coresight_helper_groups,
}
};
/* Ensure the enum matches the names and groups */
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 88afb16bb6be..36599c431be6 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -24,6 +24,7 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
@@ -634,25 +635,14 @@ static int of_tmc_get_reserved_resource_by_name(struct device *dev,
const char *name,
struct resource *res)
{
- int index, rc = -ENODEV;
- struct device_node *node;
+ int rc = -ENODEV;
- if (!is_of_node(dev->fwnode))
- return -ENODEV;
-
- index = of_property_match_string(dev->of_node, "memory-region-names",
- name);
- if (index < 0)
- return rc;
-
- node = of_parse_phandle(dev->of_node, "memory-region", index);
- if (!node)
+ rc = of_reserved_mem_region_to_resource_byname(dev->of_node, name, res);
+ if (rc < 0)
return rc;
- if (!of_address_to_resource(node, 0, res) &&
- res->start != 0 && resource_size(res) != 0)
- rc = 0;
- of_node_put(node);
+ if (res->start == 0 || resource_size(res) == 0)
+ rc = -ENODEV;
return rc;
}
@@ -785,10 +775,20 @@ static int __tmc_probe(struct device *dev, struct resource *res)
u32 devid;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
- struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
+ struct tmc_drvdata *drvdata;
struct coresight_desc desc = { 0 };
struct coresight_dev_list *dev_list = NULL;
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, drvdata);
+
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
+
ret = -ENOMEM;
/* Validity for the resource is already checked by the AMBA core */
@@ -894,14 +894,8 @@ out:
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
- struct tmc_drvdata *drvdata;
int ret;
- drvdata = devm_kzalloc(&adev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- amba_set_drvdata(adev, drvdata);
ret = __tmc_probe(&adev->dev, &adev->res);
if (!ret)
pm_runtime_put(&adev->dev);
@@ -978,18 +972,8 @@ static struct amba_driver tmc_driver = {
static int tmc_platform_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct tmc_drvdata *drvdata;
int ret = 0;
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
-
- drvdata->pclk = coresight_get_enable_apb_pclk(&pdev->dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
-
- dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -1011,8 +995,6 @@ static void tmc_platform_remove(struct platform_device *pdev)
__tmc_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_PM
@@ -1020,18 +1002,26 @@ static int tmc_runtime_suspend(struct device *dev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
+
return 0;
}
static int tmc_runtime_resume(struct device *dev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 0f45ab5e5249..8882b1c4cdc0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -246,13 +246,14 @@ out:
return ret;
}
-static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev,
+ struct coresight_path *path)
{
int ret = 0;
pid_t pid;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct perf_output_handle *handle = data;
+ struct perf_output_handle *handle = path->handle;
struct cs_buffers *buf = etm_perf_sink_config(handle);
raw_spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -304,7 +305,8 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
}
static int tmc_enable_etf_sink(struct coresight_device *csdev,
- enum cs_mode mode, void *data)
+ enum cs_mode mode,
+ struct coresight_path *path)
{
int ret;
@@ -313,7 +315,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
ret = tmc_enable_etf_sink_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = tmc_enable_etf_sink_perf(csdev, data);
+ ret = tmc_enable_etf_sink_perf(csdev, path);
break;
/* We shouldn't be here */
default:
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index b07fcdb3fe1a..e0d83ee01b77 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1250,6 +1250,13 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
* with the lock released.
*/
raw_spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /*
+ * If the ETR is already enabled, continue with the existing buffer.
+ */
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS)
+ goto out;
+
sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1325,9 +1332,10 @@ out:
}
struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev,
- enum cs_mode mode, void *data)
+ enum cs_mode mode,
+ struct coresight_path *path)
{
- struct perf_output_handle *handle = data;
+ struct perf_output_handle *handle = path->handle;
struct etr_perf_buffer *etr_perf;
switch (mode) {
@@ -1725,13 +1733,14 @@ out:
return size;
}
-static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev,
+ struct coresight_path *path)
{
int rc = 0;
pid_t pid;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct perf_output_handle *handle = data;
+ struct perf_output_handle *handle = path->handle;
struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
raw_spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -1779,13 +1788,14 @@ unlock_out:
}
static int tmc_enable_etr_sink(struct coresight_device *csdev,
- enum cs_mode mode, void *data)
+ enum cs_mode mode,
+ struct coresight_path *path)
{
switch (mode) {
case CS_MODE_SYSFS:
return tmc_enable_etr_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etr_sink_perf(csdev, data);
+ return tmc_enable_etr_sink_perf(csdev, path);
default:
return -EINVAL;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 6541a27a018e..95473d131032 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -210,6 +210,7 @@ struct tmc_resrv_buf {
/**
* struct tmc_drvdata - specifics associated to an TMC component
+ * @atclk: optional clock for the core parts of the TMC.
* @pclk: APB clock if present, otherwise NULL
* @base: memory mapped base address for this component.
* @csdev: component vitals needed by the framework.
@@ -244,6 +245,7 @@ struct tmc_resrv_buf {
* Used by ETR/ETF.
*/
struct tmc_drvdata {
+ struct clk *atclk;
struct clk *pclk;
void __iomem *base;
struct coresight_device *csdev;
@@ -440,7 +442,8 @@ struct coresight_device *tmc_etr_get_catu_device(struct tmc_drvdata *drvdata);
void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu);
void tmc_etr_remove_catu_ops(void);
struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev,
- enum cs_mode mode, void *data);
+ enum cs_mode mode,
+ struct coresight_path *path);
extern const struct attribute_group coresight_etr_group;
#endif
diff --git a/drivers/hwtracing/coresight/coresight-tnoc.c b/drivers/hwtracing/coresight/coresight-tnoc.c
new file mode 100644
index 000000000000..ff9a0a9cfe96
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tnoc.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/amba/bus.h>
+ #include <linux/coresight.h>
+ #include <linux/device.h>
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+
+#include "coresight-priv.h"
+#include "coresight-trace-id.h"
+
+#define TRACE_NOC_CTRL 0x008
+#define TRACE_NOC_XLD 0x010
+#define TRACE_NOC_FREQVAL 0x018
+#define TRACE_NOC_SYNCR 0x020
+
+/* Enable generation of output ATB traffic.*/
+#define TRACE_NOC_CTRL_PORTEN BIT(0)
+/* Sets the type of issued ATB FLAG packets.*/
+#define TRACE_NOC_CTRL_FLAGTYPE BIT(7)
+/* Sets the type of issued ATB FREQ packet*/
+#define TRACE_NOC_CTRL_FREQTYPE BIT(8)
+
+#define TRACE_NOC_SYNC_INTERVAL 0xFFFF
+
+/*
+ * struct trace_noc_drvdata - specifics associated to a trace noc component
+ * @base: memory mapped base address for this component.
+ * @dev: device node for trace_noc_drvdata.
+ * @csdev: component vitals needed by the framework.
+ * @spinlock: serialize enable/disable operation.
+ * @atid: id for the trace packet.
+ */
+struct trace_noc_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ u32 atid;
+};
+
+DEFINE_CORESIGHT_DEVLIST(trace_noc_devs, "traceNoc");
+
+static void trace_noc_enable_hw(struct trace_noc_drvdata *drvdata)
+{
+ u32 val;
+
+ /* Set ATID */
+ writel_relaxed(drvdata->atid, drvdata->base + TRACE_NOC_XLD);
+
+ /* Set the data word count between 'SYNC' packets */
+ writel_relaxed(TRACE_NOC_SYNC_INTERVAL, drvdata->base + TRACE_NOC_SYNCR);
+
+ /* Set the Control register:
+ * - Set the FLAG packets to 'FLAG' packets
+ * - Set the FREQ packets to 'FREQ_TS' packets
+ * - Enable generation of output ATB traffic
+ */
+
+ val = readl_relaxed(drvdata->base + TRACE_NOC_CTRL);
+
+ val &= ~TRACE_NOC_CTRL_FLAGTYPE;
+ val |= TRACE_NOC_CTRL_FREQTYPE;
+ val |= TRACE_NOC_CTRL_PORTEN;
+
+ writel(val, drvdata->base + TRACE_NOC_CTRL);
+}
+
+static int trace_noc_enable(struct coresight_device *csdev, struct coresight_connection *inport,
+ struct coresight_connection *outport)
+{
+ struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ scoped_guard(spinlock, &drvdata->spinlock) {
+ if (csdev->refcnt == 0)
+ trace_noc_enable_hw(drvdata);
+
+ csdev->refcnt++;
+ }
+
+ dev_dbg(drvdata->dev, "Trace NOC is enabled\n");
+ return 0;
+}
+
+static void trace_noc_disable(struct coresight_device *csdev, struct coresight_connection *inport,
+ struct coresight_connection *outport)
+{
+ struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ scoped_guard(spinlock, &drvdata->spinlock) {
+ if (--csdev->refcnt == 0)
+ writel(0x0, drvdata->base + TRACE_NOC_CTRL);
+ }
+ dev_dbg(drvdata->dev, "Trace NOC is disabled\n");
+}
+
+static int trace_noc_id(struct coresight_device *csdev, __maybe_unused enum cs_mode mode,
+ __maybe_unused struct coresight_device *sink)
+{
+ struct trace_noc_drvdata *drvdata;
+
+ drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ return drvdata->atid;
+}
+
+static const struct coresight_ops_link trace_noc_link_ops = {
+ .enable = trace_noc_enable,
+ .disable = trace_noc_disable,
+};
+
+static const struct coresight_ops trace_noc_cs_ops = {
+ .trace_id = trace_noc_id,
+ .link_ops = &trace_noc_link_ops,
+};
+
+static int trace_noc_init_default_data(struct trace_noc_drvdata *drvdata)
+{
+ int atid;
+
+ atid = coresight_trace_id_get_system_id();
+ if (atid < 0)
+ return atid;
+
+ drvdata->atid = atid;
+
+ return 0;
+}
+
+static ssize_t traceid_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long val;
+ struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ val = drvdata->atid;
+ return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(traceid);
+
+static struct attribute *coresight_tnoc_attrs[] = {
+ &dev_attr_traceid.attr,
+ NULL,
+};
+
+static const struct attribute_group coresight_tnoc_group = {
+ .attrs = coresight_tnoc_attrs,
+};
+
+static const struct attribute_group *coresight_tnoc_groups[] = {
+ &coresight_tnoc_group,
+ NULL,
+};
+
+static int trace_noc_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata;
+ struct trace_noc_drvdata *drvdata;
+ struct coresight_desc desc = { 0 };
+ int ret;
+
+ desc.name = coresight_alloc_device_name(&trace_noc_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ drvdata->base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ spin_lock_init(&drvdata->spinlock);
+
+ ret = trace_noc_init_default_data(drvdata);
+ if (ret)
+ return ret;
+
+ desc.ops = &trace_noc_cs_ops;
+ desc.type = CORESIGHT_DEV_TYPE_LINK;
+ desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
+ desc.pdata = adev->dev.platform_data;
+ desc.dev = &adev->dev;
+ desc.access = CSDEV_ACCESS_IOMEM(drvdata->base);
+ desc.groups = coresight_tnoc_groups;
+ drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev)) {
+ coresight_trace_id_put_system_id(drvdata->atid);
+ return PTR_ERR(drvdata->csdev);
+ }
+ pm_runtime_put(&adev->dev);
+
+ return 0;
+}
+
+static void trace_noc_remove(struct amba_device *adev)
+{
+ struct trace_noc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ coresight_unregister(drvdata->csdev);
+ coresight_trace_id_put_system_id(drvdata->atid);
+}
+
+static struct amba_id trace_noc_ids[] = {
+ {
+ .id = 0x000f0c00,
+ .mask = 0x00ffff00,
+ },
+ {
+ .id = 0x001f0c00,
+ .mask = 0x00ffff00,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(amba, trace_noc_ids);
+
+static struct amba_driver trace_noc_driver = {
+ .drv = {
+ .name = "coresight-trace-noc",
+ .suppress_bind_attrs = true,
+ },
+ .probe = trace_noc_probe,
+ .remove = trace_noc_remove,
+ .id_table = trace_noc_ids,
+};
+
+module_amba_driver(trace_noc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Trace NOC driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 0633f04beb24..3a3825d27f86 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -22,13 +22,6 @@
DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
-static bool coresight_device_is_tpdm(struct coresight_device *csdev)
-{
- return (coresight_is_device_source(csdev)) &&
- (csdev->subtype.source_subtype ==
- CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
-}
-
static void tpda_clear_element_size(struct coresight_device *csdev)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -71,6 +64,8 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
if (tpdm_data->dsb) {
rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
"qcom,dsb-element-bits", &drvdata->dsb_esize);
+ if (rc)
+ goto out;
}
if (tpdm_data->cmb) {
@@ -78,6 +73,7 @@ static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
"qcom,cmb-element-bits", &drvdata->cmb_esize);
}
+out:
if (rc)
dev_warn_once(&csdev->dev,
"Failed to read TPDM Element size: %d\n", rc);
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 7214e65097ec..06e0a905a67d 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -470,6 +470,9 @@ static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
*/
static void __tpdm_enable(struct tpdm_drvdata *drvdata)
{
+ if (coresight_is_static_tpdm(drvdata->csdev))
+ return;
+
CS_UNLOCK(drvdata->base);
tpdm_enable_dsb(drvdata);
@@ -532,6 +535,9 @@ static void tpdm_disable_cmb(struct tpdm_drvdata *drvdata)
/* TPDM disable operations */
static void __tpdm_disable(struct tpdm_drvdata *drvdata)
{
+ if (coresight_is_static_tpdm(drvdata->csdev))
+ return;
+
CS_UNLOCK(drvdata->base);
tpdm_disable_dsb(drvdata);
@@ -595,6 +601,30 @@ static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata)
return 0;
}
+static int static_tpdm_datasets_setup(struct tpdm_drvdata *drvdata, struct device *dev)
+{
+ /* setup datasets for static TPDM */
+ if (fwnode_property_present(dev->fwnode, "qcom,dsb-element-bits") &&
+ (!drvdata->dsb)) {
+ drvdata->dsb = devm_kzalloc(drvdata->dev,
+ sizeof(*drvdata->dsb), GFP_KERNEL);
+
+ if (!drvdata->dsb)
+ return -ENOMEM;
+ }
+
+ if (fwnode_property_present(dev->fwnode, "qcom,cmb-element-bits") &&
+ (!drvdata->cmb)) {
+ drvdata->cmb = devm_kzalloc(drvdata->dev,
+ sizeof(*drvdata->cmb), GFP_KERNEL);
+
+ if (!drvdata->cmb)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static ssize_t reset_dataset_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -1342,10 +1372,9 @@ static const struct attribute_group *tpdm_attr_grps[] = {
NULL,
};
-static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
+static int tpdm_probe(struct device *dev, struct resource *res)
{
void __iomem *base;
- struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct tpdm_drvdata *drvdata;
struct coresight_desc desc = { 0 };
@@ -1354,32 +1383,37 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
- adev->dev.platform_data = pdata;
+ dev->platform_data = pdata;
/* driver data*/
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- drvdata->dev = &adev->dev;
+ drvdata->dev = dev;
dev_set_drvdata(dev, drvdata);
- base = devm_ioremap_resource(dev, &adev->res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (res) {
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- drvdata->base = base;
+ drvdata->base = base;
+ ret = tpdm_datasets_setup(drvdata);
+ if (ret)
+ return ret;
- ret = tpdm_datasets_setup(drvdata);
- if (ret)
- return ret;
-
- if (drvdata && tpdm_has_dsb_dataset(drvdata))
- of_property_read_u32(drvdata->dev->of_node,
- "qcom,dsb-msrs-num", &drvdata->dsb_msr_num);
+ if (tpdm_has_dsb_dataset(drvdata))
+ of_property_read_u32(drvdata->dev->of_node,
+ "qcom,dsb-msrs-num", &drvdata->dsb_msr_num);
- if (drvdata && tpdm_has_cmb_dataset(drvdata))
- of_property_read_u32(drvdata->dev->of_node,
- "qcom,cmb-msrs-num", &drvdata->cmb_msr_num);
+ if (tpdm_has_cmb_dataset(drvdata))
+ of_property_read_u32(drvdata->dev->of_node,
+ "qcom,cmb-msrs-num", &drvdata->cmb_msr_num);
+ } else {
+ ret = static_tpdm_datasets_setup(drvdata, dev);
+ if (ret)
+ return ret;
+ }
/* Set up coresight component description */
desc.name = coresight_alloc_device_name(&tpdm_devs, dev);
@@ -1388,34 +1422,51 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM;
desc.ops = &tpdm_cs_ops;
- desc.pdata = adev->dev.platform_data;
- desc.dev = &adev->dev;
+ desc.pdata = dev->platform_data;
+ desc.dev = dev;
desc.access = CSDEV_ACCESS_IOMEM(base);
- desc.groups = tpdm_attr_grps;
+ if (res)
+ desc.groups = tpdm_attr_grps;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
spin_lock_init(&drvdata->spinlock);
- /* Decrease pm refcount when probe is done.*/
- pm_runtime_put(&adev->dev);
-
return 0;
}
-static void tpdm_remove(struct amba_device *adev)
+static int tpdm_remove(struct device *dev)
{
- struct tpdm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev);
coresight_unregister(drvdata->csdev);
+
+ return 0;
+}
+
+static int dynamic_tpdm_probe(struct amba_device *adev,
+ const struct amba_id *id)
+{
+ int ret;
+
+ ret = tpdm_probe(&adev->dev, &adev->res);
+ if (!ret)
+ pm_runtime_put(&adev->dev);
+
+ return ret;
+}
+
+static void dynamic_tpdm_remove(struct amba_device *adev)
+{
+ tpdm_remove(&adev->dev);
}
/*
* Different TPDM has different periph id.
* The difference is 0-7 bits' value. So ignore 0-7 bits.
*/
-static const struct amba_id tpdm_ids[] = {
+static const struct amba_id dynamic_tpdm_ids[] = {
{
.id = 0x001f0e00,
.mask = 0x00ffff00,
@@ -1423,17 +1474,76 @@ static const struct amba_id tpdm_ids[] = {
{ 0, 0, NULL },
};
-static struct amba_driver tpdm_driver = {
+MODULE_DEVICE_TABLE(amba, dynamic_tpdm_ids);
+
+static struct amba_driver dynamic_tpdm_driver = {
.drv = {
.name = "coresight-tpdm",
.suppress_bind_attrs = true,
},
- .probe = tpdm_probe,
- .id_table = tpdm_ids,
- .remove = tpdm_remove,
+ .probe = dynamic_tpdm_probe,
+ .id_table = dynamic_tpdm_ids,
+ .remove = dynamic_tpdm_remove,
};
-module_amba_driver(tpdm_driver);
+static int tpdm_platform_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ int ret;
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = tpdm_probe(&pdev->dev, res);
+ pm_runtime_put(&pdev->dev);
+ if (ret)
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static void tpdm_platform_remove(struct platform_device *pdev)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
+
+ if (WARN_ON(!drvdata))
+ return;
+
+ tpdm_remove(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct of_device_id static_tpdm_match[] = {
+ {.compatible = "qcom,coresight-static-tpdm"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, static_tpdm_match);
+
+static struct platform_driver static_tpdm_driver = {
+ .probe = tpdm_platform_probe,
+ .remove = tpdm_platform_remove,
+ .driver = {
+ .name = "coresight-static-tpdm",
+ .of_match_table = static_tpdm_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init tpdm_init(void)
+{
+ return coresight_init_driver("tpdm", &dynamic_tpdm_driver, &static_tpdm_driver,
+ THIS_MODULE);
+}
+
+static void __exit tpdm_exit(void)
+{
+ coresight_remove_driver(&dynamic_tpdm_driver, &static_tpdm_driver);
+}
+
+module_init(tpdm_init);
+module_exit(tpdm_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.h b/drivers/hwtracing/coresight/coresight-tpdm.h
index b11754389734..2867f3ab8186 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.h
+++ b/drivers/hwtracing/coresight/coresight-tpdm.h
@@ -343,4 +343,16 @@ struct tpdm_dataset_attribute {
enum dataset_mem mem;
u32 idx;
};
+
+static inline bool coresight_device_is_tpdm(struct coresight_device *csdev)
+{
+ return (coresight_is_device_source(csdev)) &&
+ (csdev->subtype.source_subtype ==
+ CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
+}
+
+static inline bool coresight_is_static_tpdm(struct coresight_device *csdev)
+{
+ return (coresight_device_is_tpdm(csdev) && !csdev->access.base);
+}
#endif /* _CORESIGHT_CORESIGHT_TPDM_H */
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 3e0159288428..aaa44bc521c3 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -75,7 +75,7 @@ static void tpiu_enable_hw(struct csdev_access *csa)
}
static int tpiu_enable(struct coresight_device *csdev, enum cs_mode mode,
- void *__unused)
+ struct coresight_path *path)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -128,11 +128,11 @@ static const struct coresight_ops tpiu_cs_ops = {
static int __tpiu_probe(struct device *dev, struct resource *res)
{
- int ret;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
struct tpiu_drvdata *drvdata;
struct coresight_desc desc = { 0 };
+ int ret;
desc.name = coresight_alloc_device_name(&tpiu_devs, dev);
if (!desc.name)
@@ -144,16 +144,10 @@ static int __tpiu_probe(struct device *dev, struct resource *res)
spin_lock_init(&drvdata->spinlock);
- drvdata->atclk = devm_clk_get(dev, "atclk"); /* optional */
- if (!IS_ERR(drvdata->atclk)) {
- ret = clk_prepare_enable(drvdata->atclk);
- if (ret)
- return ret;
- }
+ ret = coresight_get_enable_clocks(dev, &drvdata->pclk, &drvdata->atclk);
+ if (ret)
+ return ret;
- drvdata->pclk = coresight_get_enable_apb_pclk(dev);
- if (IS_ERR(drvdata->pclk))
- return -ENODEV;
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
@@ -212,24 +206,26 @@ static int tpiu_runtime_suspend(struct device *dev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->atclk);
+ clk_disable_unprepare(drvdata->pclk);
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_disable_unprepare(drvdata->pclk);
return 0;
}
static int tpiu_runtime_resume(struct device *dev)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(dev);
+ int ret;
- if (drvdata && !IS_ERR(drvdata->atclk))
- clk_prepare_enable(drvdata->atclk);
+ ret = clk_prepare_enable(drvdata->pclk);
+ if (ret)
+ return ret;
- if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk))
- clk_prepare_enable(drvdata->pclk);
- return 0;
+ ret = clk_prepare_enable(drvdata->atclk);
+ if (ret)
+ clk_disable_unprepare(drvdata->pclk);
+
+ return ret;
}
#endif
@@ -293,8 +289,6 @@ static void tpiu_platform_remove(struct platform_device *pdev)
__tpiu_remove(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (!IS_ERR_OR_NULL(drvdata->pclk))
- clk_put(drvdata->pclk);
}
#ifdef CONFIG_ACPI
diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c
index 8267dd1a2130..474861903f6c 100644
--- a/drivers/hwtracing/coresight/coresight-trbe.c
+++ b/drivers/hwtracing/coresight/coresight-trbe.c
@@ -23,7 +23,8 @@
#include "coresight-self-hosted-trace.h"
#include "coresight-trbe.h"
-#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+#define PERF_IDX2OFF(idx, buf) \
+ ((idx) % ((unsigned long)(buf)->nr_pages << PAGE_SHIFT))
/*
* A padding packet that will help the user space tools
@@ -257,6 +258,7 @@ static void trbe_drain_and_disable_local(struct trbe_cpudata *cpudata)
static void trbe_reset_local(struct trbe_cpudata *cpudata)
{
write_sysreg_s(0, SYS_TRBLIMITR_EL1);
+ isb();
trbe_drain_buffer();
write_sysreg_s(0, SYS_TRBPTR_EL1);
write_sysreg_s(0, SYS_TRBBASER_EL1);
@@ -747,12 +749,12 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
buf = kzalloc_node(sizeof(*buf), GFP_KERNEL, trbe_alloc_node(event));
if (!buf)
- return ERR_PTR(-ENOMEM);
+ return NULL;
pglist = kcalloc(nr_pages, sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
kfree(buf);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
for (i = 0; i < nr_pages; i++)
@@ -762,7 +764,7 @@ static void *arm_trbe_alloc_buffer(struct coresight_device *csdev,
if (!buf->trbe_base) {
kfree(pglist);
kfree(buf);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
buf->trbe_limit = buf->trbe_base + nr_pages * PAGE_SIZE;
buf->trbe_write = buf->trbe_base;
@@ -1011,11 +1013,11 @@ err:
}
static int arm_trbe_enable(struct coresight_device *csdev, enum cs_mode mode,
- void *data)
+ struct coresight_path *path)
{
struct trbe_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct trbe_cpudata *cpudata = dev_get_drvdata(&csdev->dev);
- struct perf_output_handle *handle = data;
+ struct perf_output_handle *handle = path->handle;
struct trbe_buf *buf = etm_perf_sink_config(handle);
WARN_ON(cpudata->cpu != smp_processor_id());
@@ -1279,7 +1281,7 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp
* into the device for that purpose.
*/
desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL);
- if (IS_ERR(desc.pdata))
+ if (!desc.pdata)
goto cpu_clear;
desc.type = CORESIGHT_DEV_TYPE_SINK;
@@ -1472,9 +1474,10 @@ static void arm_trbe_remove_cpuhp(struct trbe_drvdata *drvdata)
static int arm_trbe_probe_irq(struct platform_device *pdev,
struct trbe_drvdata *drvdata)
{
+ const struct cpumask *affinity;
int ret;
- drvdata->irq = platform_get_irq(pdev, 0);
+ drvdata->irq = platform_get_irq_affinity(pdev, 0, &affinity);
if (drvdata->irq < 0) {
pr_err("IRQ not found for the platform device\n");
return drvdata->irq;
@@ -1485,14 +1488,14 @@ static int arm_trbe_probe_irq(struct platform_device *pdev,
return -EINVAL;
}
- if (irq_get_percpu_devid_partition(drvdata->irq, &drvdata->supported_cpus))
- return -EINVAL;
+ cpumask_copy(&drvdata->supported_cpus, affinity);
drvdata->handle = alloc_percpu(struct perf_output_handle *);
if (!drvdata->handle)
return -ENOMEM;
- ret = request_percpu_irq(drvdata->irq, arm_trbe_irq_handler, DRVNAME, drvdata->handle);
+ ret = request_percpu_irq_affinity(drvdata->irq, arm_trbe_irq_handler, DRVNAME,
+ affinity, drvdata->handle);
if (ret) {
free_percpu(drvdata->handle);
return ret;
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index 26cfc939e5bd..8f7922a5e534 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -213,10 +213,11 @@ static void smb_enable_sysfs(struct coresight_device *csdev)
coresight_set_mode(csdev, CS_MODE_SYSFS);
}
-static int smb_enable_perf(struct coresight_device *csdev, void *data)
+static int smb_enable_perf(struct coresight_device *csdev,
+ struct coresight_path *path)
{
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct perf_output_handle *handle = data;
+ struct perf_output_handle *handle = path->handle;
struct cs_buffers *buf = etm_perf_sink_config(handle);
pid_t pid;
@@ -240,7 +241,7 @@ static int smb_enable_perf(struct coresight_device *csdev, void *data)
}
static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
- void *data)
+ struct coresight_path *path)
{
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret = 0;
@@ -261,7 +262,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
smb_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = smb_enable_perf(csdev, data);
+ ret = smb_enable_perf(csdev, path);
break;
default:
ret = -EINVAL;
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
index c4c111275627..323f0ccb6878 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -7,6 +7,7 @@
#ifndef _ULTRASOC_SMB_H
#define _ULTRASOC_SMB_H
+#include <linux/bitfield.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 47d9e6c3bac0..591b7c12aae5 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -166,7 +166,7 @@ static void intel_th_remove(struct device *dev)
pm_runtime_enable(dev);
}
-static struct bus_type intel_th_bus = {
+static const struct bus_type intel_th_bus = {
.name = "intel_th",
.match = intel_th_match,
.probe = intel_th_probe,
@@ -810,13 +810,17 @@ static int intel_th_output_open(struct inode *inode, struct file *file)
int err;
dev = bus_find_device_by_devt(&intel_th_bus, inode->i_rdev);
- if (!dev || !dev->driver)
- return -ENODEV;
+ if (!dev || !dev->driver) {
+ err = -ENODEV;
+ goto out_no_device;
+ }
thdrv = to_intel_th_driver(dev->driver);
fops = fops_get(thdrv->fops);
- if (!fops)
- return -ENODEV;
+ if (!fops) {
+ err = -ENODEV;
+ goto out_put_device;
+ }
replace_fops(file, fops);
@@ -824,10 +828,16 @@ static int intel_th_output_open(struct inode *inode, struct file *file)
if (file->f_op->open) {
err = file->f_op->open(inode, file);
- return err;
+ if (err)
+ goto out_put_device;
}
return 0;
+
+out_put_device:
+ put_device(dev);
+out_no_device:
+ return err;
}
static const struct file_operations intel_th_output_fops = {
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 74b66aec33d4..ee86df4cff4b 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -30,7 +30,7 @@ static int i2c_debug;
#define pca_clock(adap) adap->i2c_clock
#define pca_set_con(adap, val) pca_outw(adap, I2C_PCA_CON, val)
#define pca_get_con(adap) pca_inw(adap, I2C_PCA_CON)
-#define pca_wait(adap) adap->wait_for_completion(adap->data)
+#define pca_wait(adap) adap->wait_for_completion_cb(adap->data)
static void pca_reset(struct i2c_algo_pca_data *adap)
{
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index fd563e845d4b..a87ecea7f510 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -23,17 +23,8 @@
#include "i2c-algo-pcf.h"
-#define DEB2(x) if (i2c_debug >= 2) x
-#define DEB3(x) if (i2c_debug >= 3) x /* print several statistical values */
-#define DEBPROTO(x) if (i2c_debug >= 9) x;
- /* debug the protocol by showing transferred bits */
#define DEF_TIMEOUT 16
-/*
- * module parameters:
- */
-static int i2c_debug;
-
/* setting states on the bus with the right timing: */
#define set_pcf(adap, ctl, val) adap->setpcf(adap->data, ctl, val)
@@ -47,27 +38,21 @@ static int i2c_debug;
static void i2c_start(struct i2c_algo_pcf_data *adap)
{
- DEBPROTO(printk(KERN_DEBUG "S "));
set_pcf(adap, 1, I2C_PCF_START);
}
static void i2c_repstart(struct i2c_algo_pcf_data *adap)
{
- DEBPROTO(printk(" Sr "));
set_pcf(adap, 1, I2C_PCF_REPSTART);
}
static void i2c_stop(struct i2c_algo_pcf_data *adap)
{
- DEBPROTO(printk("P\n"));
set_pcf(adap, 1, I2C_PCF_STOP);
}
static void handle_lab(struct i2c_algo_pcf_data *adap, const int *status)
{
- DEB2(printk(KERN_INFO
- "i2c-algo-pcf.o: lost arbitration (CSR 0x%02x)\n",
- *status));
/*
* Cleanup from LAB -- reset and enable ESO.
* This resets the PCF8584; since we've lost the bus, no
@@ -88,9 +73,6 @@ static void handle_lab(struct i2c_algo_pcf_data *adap, const int *status)
if (adap->lab_mdelay)
mdelay(adap->lab_mdelay);
- DEB2(printk(KERN_INFO
- "i2c-algo-pcf.o: reset LAB condition (CSR 0x%02x)\n",
- get_pcf(adap, 1)));
}
static int wait_for_bb(struct i2c_algo_pcf_data *adap)
@@ -147,56 +129,48 @@ static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status)
*
* vdovikin: added detect code for PCF8584
*/
-static int pcf_init_8584 (struct i2c_algo_pcf_data *adap)
+static int pcf_init_8584(struct i2c_algo_pcf_data *adap)
{
unsigned char temp;
- DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: PCF state 0x%02x\n",
- get_pcf(adap, 1)));
-
/* S1=0x80: S0 selected, serial interface off */
set_pcf(adap, 1, I2C_PCF_PIN);
/*
* check to see S1 now used as R/W ctrl -
* PCF8584 does that when ESO is zero
*/
- if (((temp = get_pcf(adap, 1)) & 0x7f) != (0)) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S0 (0x%02x).\n", temp));
+ temp = get_pcf(adap, 1);
+ if ((temp & 0x7f) != 0)
return -ENXIO; /* definitely not PCF8584 */
- }
/* load own address in S0, effective address is (own << 1) */
i2c_outb(adap, get_own(adap));
/* check it's really written */
- if ((temp = i2c_inb(adap)) != get_own(adap)) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S0 (0x%02x).\n", temp));
+ temp = i2c_inb(adap);
+ if (temp != get_own(adap))
return -ENXIO;
- }
/* S1=0xA0, next byte in S2 */
set_pcf(adap, 1, I2C_PCF_PIN | I2C_PCF_ES1);
/* check to see S2 now selected */
- if (((temp = get_pcf(adap, 1)) & 0x7f) != I2C_PCF_ES1) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S2 (0x%02x).\n", temp));
+ temp = get_pcf(adap, 1);
+ if ((temp & 0x7f) != I2C_PCF_ES1)
return -ENXIO;
- }
/* load clock register S2 */
i2c_outb(adap, get_clock(adap));
/* check it's really written, the only 5 lowest bits does matter */
- if (((temp = i2c_inb(adap)) & 0x1f) != get_clock(adap)) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S2 (0x%02x).\n", temp));
+ temp = i2c_inb(adap);
+ if ((temp & 0x1f) != get_clock(adap))
return -ENXIO;
- }
/* Enable serial interface, idle, S0 selected */
set_pcf(adap, 1, I2C_PCF_IDLE);
/* check to see PCF is really idled and we can access status register */
- if ((temp = get_pcf(adap, 1)) != (I2C_PCF_PIN | I2C_PCF_BB)) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S1` (0x%02x).\n", temp));
+ temp = get_pcf(adap, 1);
+ if (temp != (I2C_PCF_PIN | I2C_PCF_BB))
return -ENXIO;
- }
printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n");
@@ -209,9 +183,7 @@ static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf,
struct i2c_algo_pcf_data *adap = i2c_adap->algo_data;
int wrcount, status, timeout;
- for (wrcount=0; wrcount<count; ++wrcount) {
- DEB2(dev_dbg(&i2c_adap->dev, "i2c_write: writing %2.2X\n",
- buf[wrcount] & 0xff));
+ for (wrcount = 0; wrcount < count; ++wrcount) {
i2c_outb(adap, buf[wrcount]);
timeout = wait_for_pin(adap, &status);
if (timeout) {
@@ -246,7 +218,8 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf,
/* increment number of bytes to read by one -- read dummy byte */
for (i = 0; i <= count; i++) {
- if ((wfp = wait_for_pin(adap, &status))) {
+ wfp = wait_for_pin(adap, &status);
+ if (wfp) {
if (wfp == -EINTR)
return -EINTR; /* arbitration lost */
@@ -280,7 +253,7 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf,
}
-static int pcf_doAddress(struct i2c_algo_pcf_data *adap,
+static void pcf_send_address(struct i2c_algo_pcf_data *adap,
struct i2c_msg *msg)
{
unsigned char addr = i2c_8bit_addr_from_msg(msg);
@@ -288,8 +261,6 @@ static int pcf_doAddress(struct i2c_algo_pcf_data *adap,
if (msg->flags & I2C_M_REV_DIR_ADDR)
addr ^= 1;
i2c_outb(adap, addr);
-
- return 0;
}
static int pcf_xfer(struct i2c_adapter *i2c_adap,
@@ -299,7 +270,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
struct i2c_algo_pcf_data *adap = i2c_adap->algo_data;
struct i2c_msg *pmsg;
int i;
- int ret=0, timeout, status;
+ int timeout, status;
if (adap->xfer_begin)
adap->xfer_begin(adap->data);
@@ -307,20 +278,15 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
/* Check for bus busy */
timeout = wait_for_bb(adap);
if (timeout) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: "
- "Timeout waiting for BB in pcf_xfer\n");)
i = -EIO;
goto out;
}
- for (i = 0;ret >= 0 && i < num; i++) {
- pmsg = &msgs[i];
-
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n",
- str_read_write(pmsg->flags & I2C_M_RD),
- pmsg->len, pmsg->addr, i + 1, num);)
+ for (i = 0; i < num; i++) {
+ int ret;
- ret = pcf_doAddress(adap, pmsg);
+ pmsg = &msgs[i];
+ pcf_send_address(adap, pmsg);
/* Send START */
if (i == 0)
@@ -335,8 +301,6 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
goto out;
}
i2c_stop(adap);
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting "
- "for PIN(1) in pcf_xfer\n");)
i = -EREMOTEIO;
goto out;
}
@@ -344,35 +308,21 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
/* Check LRB (last rcvd bit - slave ack) */
if (status & I2C_PCF_LRB) {
i2c_stop(adap);
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");)
i = -EREMOTEIO;
goto out;
}
- DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: Msg %d, addr=0x%x, flags=0x%x, len=%d\n",
- i, msgs[i].addr, msgs[i].flags, msgs[i].len);)
if (pmsg->flags & I2C_M_RD) {
ret = pcf_readbytes(i2c_adap, pmsg->buf, pmsg->len,
(i + 1 == num));
-
- if (ret != pmsg->len) {
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: "
- "only read %d bytes.\n",ret));
- } else {
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: read %d bytes.\n",ret));
- }
} else {
ret = pcf_sendbytes(i2c_adap, pmsg->buf, pmsg->len,
(i + 1 == num));
-
- if (ret != pmsg->len) {
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: "
- "only wrote %d bytes.\n",ret));
- } else {
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: wrote %d bytes.\n",ret));
- }
}
+
+ if (ret < 0)
+ goto out;
}
out:
@@ -401,12 +351,11 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap)
struct i2c_algo_pcf_data *pcf_adap = adap->algo_data;
int rval;
- DEB2(dev_dbg(&adap->dev, "hw routines registered.\n"));
-
/* register new adapter to i2c module... */
adap->algo = &pcf_algo;
- if ((rval = pcf_init_8584(pcf_adap)))
+ rval = pcf_init_8584(pcf_adap);
+ if (rval)
return rval;
rval = i2c_add_adapter(adap);
@@ -418,7 +367,3 @@ EXPORT_SYMBOL(i2c_pcf_add_bus);
MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>");
MODULE_DESCRIPTION("I2C-Bus PCF8584 algorithm");
MODULE_LICENSE("GPL");
-
-module_param(i2c_debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(i2c_debug,
- "debug level - 0 off; 1 normal; 2,3 more verbose; 9 pcf-protocol");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 070d014fdc5d..cea87fcb4a1a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -165,6 +165,8 @@ config I2C_I801
Birch Stream (SOC)
Arrow Lake (SOC)
Panther Lake (SOC)
+ Wildcat Lake (SOC)
+ Diamond Rapids (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -414,7 +416,7 @@ config I2C_ASPEED
config I2C_AT91
tristate "Atmel AT91 I2C Two-Wire interface (TWI)"
- depends on ARCH_AT91 || COMPILE_TEST
+ depends on ARCH_MICROCHIP || COMPILE_TEST
help
This supports the use of the I2C interface on Atmel AT91
processors.
@@ -1357,6 +1359,27 @@ config I2C_LJCA
This driver can also be built as a module. If so, the module
will be called i2c-ljca.
+config I2C_NCT6694
+ tristate "Nuvoton NCT6694 I2C adapter support"
+ depends on MFD_NCT6694
+ help
+ If you say yes to this option, support will be included for Nuvoton
+ NCT6694, a USB to I2C interface.
+
+ This driver can also be built as a module. If so, the module will
+ be called i2c-nct6694.
+
+config I2C_USBIO
+ tristate "Intel USBIO I2C Adapter support"
+ depends on USB_USBIO
+ default USB_USBIO
+ help
+ Select this option to enable I2C driver for the INTEL
+ USBIO driver stack.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c_usbio.
+
config I2C_CP2615
tristate "Silicon Labs CP2615 USB sound card and I2C adapter"
depends on USB
@@ -1452,7 +1475,7 @@ config I2C_ACORN
config I2C_ELEKTOR
tristate "Elektor ISA card"
- depends on ISA && HAS_IOPORT_MAP && BROKEN_ON_SMP
+ depends on ISA && HAS_IOPORT_MAP
select I2C_ALGOPCF
help
This supports the PCF8584 ISA bus I2C adapter. Say Y if you own
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 04db855fdfd6..fb985769f5ff 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -135,6 +135,8 @@ obj-$(CONFIG_I2C_GXP) += i2c-gxp.o
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
obj-$(CONFIG_I2C_DLN2) += i2c-dln2.o
obj-$(CONFIG_I2C_LJCA) += i2c-ljca.o
+obj-$(CONFIG_I2C_NCT6694) += i2c-nct6694.o
+obj-$(CONFIG_I2C_USBIO) += i2c-usbio.o
obj-$(CONFIG_I2C_CP2615) += i2c-cp2615.o
obj-$(CONFIG_I2C_PARPORT) += i2c-parport.o
obj-$(CONFIG_I2C_PCI1XXXX) += i2c-mchp-pci1xxxx.o
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index ef7370d3dbea..60edbabc2986 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -458,13 +458,16 @@ struct amd_mp2_dev *amd_mp2_find_device(void)
{
struct device *dev;
struct pci_dev *pci_dev;
+ struct amd_mp2_dev *mp2_dev;
dev = driver_find_next_device(&amd_mp2_pci_driver.driver, NULL);
if (!dev)
return NULL;
pci_dev = to_pci_dev(dev);
- return (struct amd_mp2_dev *)pci_get_drvdata(pci_dev);
+ mp2_dev = (struct amd_mp2_dev *)pci_get_drvdata(pci_dev);
+ put_device(dev);
+ return mp2_dev;
}
EXPORT_SYMBOL_GPL(amd_mp2_find_device);
diff --git a/drivers/i2c/busses/i2c-amd-mp2.h b/drivers/i2c/busses/i2c-amd-mp2.h
index 018a42de8b1e..9b7e9494dd12 100644
--- a/drivers/i2c/busses/i2c-amd-mp2.h
+++ b/drivers/i2c/busses/i2c-amd-mp2.h
@@ -207,7 +207,6 @@ static inline void amd_mp2_pm_runtime_get(struct amd_mp2_dev *mp2_dev)
static inline void amd_mp2_pm_runtime_put(struct amd_mp2_dev *mp2_dev)
{
- pm_runtime_mark_last_busy(&mp2_dev->pci_dev->dev);
pm_runtime_put_autosuspend(&mp2_dev->pci_dev->dev);
}
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index edc047e3e535..b64adef778d4 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -313,7 +313,6 @@ static int __maybe_unused at91_twi_resume_noirq(struct device *dev)
return ret;
}
- pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
at91_init_twi_bus(twi_dev);
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index 59795c1c24ff..894cedbca99f 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -717,7 +717,6 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
ret = (ret < 0) ? ret : num;
out:
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 8554e790f8e3..0d7e2654a534 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -137,12 +137,14 @@ static int clk_bcm2835_i2c_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_bcm2835_i2c_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_bcm2835_i2c_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u32 divider = clk_bcm2835_i2c_calc_divider(rate, *parent_rate);
+ u32 divider = clk_bcm2835_i2c_calc_divider(req->rate, req->best_parent_rate);
- return DIV_ROUND_UP(*parent_rate, divider);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, divider);
+
+ return 0;
}
static unsigned long clk_bcm2835_i2c_recalc_rate(struct clk_hw *hw,
@@ -156,7 +158,7 @@ static unsigned long clk_bcm2835_i2c_recalc_rate(struct clk_hw *hw,
static const struct clk_ops clk_bcm2835_i2c_ops = {
.set_rate = clk_bcm2835_i2c_set_rate,
- .round_rate = clk_bcm2835_i2c_round_rate,
+ .determine_rate = clk_bcm2835_i2c_determine_rate,
.recalc_rate = clk_bcm2835_i2c_recalc_rate,
};
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 697d095afbe4..0fb728ade92e 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -1128,7 +1128,6 @@ out:
cdns_i2c_set_mode(CDNS_I2C_MODE_SLAVE, id);
#endif
- pm_runtime_mark_last_busy(id->dev);
pm_runtime_put_autosuspend(id->dev);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 6a3d4e9e07f4..a773ba082321 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -543,7 +543,6 @@ i2c_davinci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = num;
out:
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
@@ -821,7 +820,6 @@ static int davinci_i2c_probe(struct platform_device *pdev)
if (r)
goto err_unuse_clocks;
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 347843b4f5dd..bb5ce0a382f9 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -78,6 +78,7 @@
#define DW_IC_TX_ABRT_SOURCE 0x80
#define DW_IC_ENABLE_STATUS 0x9c
#define DW_IC_CLR_RESTART_DET 0xa8
+#define DW_IC_SMBUS_INTR_MASK 0xcc
#define DW_IC_COMP_PARAM_1 0xf4
#define DW_IC_COMP_VERSION 0xf8
#define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A /* "111*" == v1.11* */
@@ -330,7 +331,6 @@ struct dw_i2c_dev {
struct i2c_dw_semaphore_callbacks {
int (*probe)(struct dw_i2c_dev *dev);
- void (*remove)(struct dw_i2c_dev *dev);
};
int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index cbd88ffa5610..45bfca05bb30 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -220,6 +220,13 @@ static int i2c_dw_init_master(struct dw_i2c_dev *dev)
/* Disable the adapter */
__i2c_dw_disable(dev);
+ /*
+ * Mask SMBus interrupts to block storms from broken
+ * firmware that leaves IC_SMBUS=1; the handler never
+ * services them.
+ */
+ regmap_write(dev->map, DW_IC_SMBUS_INTR_MASK, 0);
+
/* Write standard speed timing parameters */
regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
@@ -901,7 +908,6 @@ done:
i2c_dw_release_lock(dev);
done_nolock:
- pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
@@ -1068,11 +1074,10 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
if (!(dev->flags & ACCESS_POLLING)) {
ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
irq_flags, dev_name(dev->dev), dev);
- if (ret) {
- dev_err(dev->dev, "failure requesting irq %i: %d\n",
- dev->irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev->dev, ret,
+ "failure requesting irq %i: %d\n",
+ dev->irq, ret);
}
ret = i2c_dw_init_recovery_info(dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index a35e4c64a1d4..7be99656a67d 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -197,15 +197,6 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
return 0;
}
-static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
-{
- if (dev->semaphore_idx < 0)
- return;
-
- if (i2c_dw_semaphore_cb_table[dev->semaphore_idx].remove)
- i2c_dw_semaphore_cb_table[dev->semaphore_idx].remove(dev);
-}
-
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
u32 flags = (uintptr_t)device_get_match_data(&pdev->dev);
@@ -238,7 +229,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->rst = devm_reset_control_get_optional_exclusive(device, NULL);
if (IS_ERR(dev->rst))
- return PTR_ERR(dev->rst);
+ return dev_err_probe(device, PTR_ERR(dev->rst), "failed to acquire reset\n");
reset_control_deassert(dev->rst);
@@ -247,21 +238,23 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
goto exit_reset;
ret = i2c_dw_probe_lock_support(dev);
- if (ret)
+ if (ret) {
+ dev_err_probe(device, ret, "failed to probe lock support\n");
goto exit_reset;
+ }
i2c_dw_configure(dev);
/* Optional interface clock */
dev->pclk = devm_clk_get_optional(device, "pclk");
if (IS_ERR(dev->pclk)) {
- ret = PTR_ERR(dev->pclk);
+ ret = dev_err_probe(device, PTR_ERR(dev->pclk), "failed to acquire pclk\n");
goto exit_reset;
}
dev->clk = devm_clk_get_optional(device, NULL);
if (IS_ERR(dev->clk)) {
- ret = PTR_ERR(dev->clk);
+ ret = dev_err_probe(device, PTR_ERR(dev->clk), "failed to acquire clock\n");
goto exit_reset;
}
@@ -314,6 +307,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
exit_probe:
dw_i2c_plat_pm_cleanup(dev);
+ i2c_dw_prepare_clk(dev, false);
exit_reset:
reset_control_assert(dev->rst);
return ret;
@@ -331,10 +325,10 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
i2c_dw_disable(dev);
pm_runtime_dont_use_autosuspend(device);
- pm_runtime_put_sync(device);
+ pm_runtime_put_noidle(device);
dw_i2c_plat_pm_cleanup(dev);
- i2c_dw_remove_lock_support(dev);
+ i2c_dw_prepare_clk(dev, false);
reset_control_assert(dev->rst);
}
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index b936a240db0a..6eb16b7d75a6 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -266,11 +266,10 @@ int i2c_dw_probe_slave(struct dw_i2c_dev *dev)
ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr_slave,
IRQF_SHARED, dev_name(dev->dev), dev);
- if (ret) {
- dev_err(dev->dev, "failure requesting IRQ %i: %d\n",
- dev->irq, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev->dev, ret,
+ "failure requesting IRQ %i: %d\n",
+ dev->irq, ret);
ret = i2c_add_numbered_adapter(adap);
if (ret)
diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
index 370f32974763..95ab910b80c0 100644
--- a/drivers/i2c/busses/i2c-hix5hd2.c
+++ b/drivers/i2c/busses/i2c-hix5hd2.c
@@ -339,7 +339,7 @@ static int hix5hd2_i2c_xfer_msg(struct hix5hd2_i2c_priv *priv,
ret = priv->state;
/*
- * If this is the last message to be transfered (stop == 1)
+ * If this is the last message to be transferred (stop == 1)
* Then check if the bus can be brought back to idle.
*/
if (priv->state == HIX5I2C_STAT_RW_SUCCESS && stop)
@@ -373,7 +373,6 @@ static int hix5hd2_i2c_xfer(struct i2c_adapter *adap,
ret = num;
out:
- pm_runtime_mark_last_busy(priv->dev);
pm_runtime_put_autosuspend(priv->dev);
return ret;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e94ac746a741..81e6e2d7ad3d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -83,6 +83,8 @@
* Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes
* Panther Lake-H (SOC) 0xe322 32 hard yes yes yes
* Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
+ * Wildcat Lake-U (SOC) 0x4d22 32 hard yes yes yes
+ * Diamond Rapids (SOC) 0x5827 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -236,10 +238,12 @@
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3
#define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23
+#define PCI_DEVICE_ID_INTEL_WILDCAT_LAKE_U_SMBUS 0x4d22
#define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
+#define PCI_DEVICE_ID_INTEL_DIAMOND_RAPIDS_SMBUS 0x5827
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_ARROW_LAKE_H_SMBUS 0x7722
#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
@@ -928,7 +932,6 @@ out:
*/
iowrite8(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv));
- pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
return ret;
}
@@ -1053,9 +1056,11 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, DIAMOND_RAPIDS_SMBUS, FEATURES_ICH5) },
{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, WILDCAT_LAKE_U_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index a454f9f25146..88192c25c44c 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1131,7 +1131,6 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
break;
}
- pm_runtime_mark_last_busy(adap->dev.parent);
pm_runtime_put_autosuspend(adap->dev.parent);
return i2c->msg_status ? i2c->msg_status : num;
@@ -1165,7 +1164,6 @@ static int img_i2c_init(struct img_i2c *i2c)
"Unknown hardware revision (%d.%d.%d.%d)\n",
(rev >> 24) & 0xff, (rev >> 16) & 0xff,
(rev >> 8) & 0xff, rev & 0xff);
- pm_runtime_mark_last_busy(i2c->adap.dev.parent);
pm_runtime_put_autosuspend(i2c->adap.dev.parent);
return -EINVAL;
}
@@ -1317,7 +1315,6 @@ static int img_i2c_init(struct img_i2c *i2c)
/* Perform a synchronous sequence to reset the bus */
ret = img_i2c_reset_bus(i2c);
- pm_runtime_mark_last_busy(i2c->adap.dev.parent);
pm_runtime_put_autosuspend(i2c->adap.dev.parent);
return ret;
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 03b5a7e8c361..2a0962a0b441 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -363,7 +363,6 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
return 0;
rpm_put:
- pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent);
pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
return ret;
@@ -377,7 +376,6 @@ static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
temp &= ~MCR_MEN;
writel(temp, lpi2c_imx->base + LPI2C_MCR);
- pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent);
pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
return 0;
@@ -1462,7 +1460,6 @@ static int lpi2c_imx_probe(struct platform_device *pdev)
if (ret)
goto rpm_disable;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n");
@@ -1564,7 +1561,6 @@ static int lpi2c_suspend(struct device *dev)
static int lpi2c_resume(struct device *dev)
{
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 60f5c790ad7c..dcce882f3eba 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1637,7 +1637,6 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
result = i2c_imx_xfer_common(adapter, msgs, num, false);
- pm_runtime_mark_last_busy(i2c_imx->adapter.dev.parent);
pm_runtime_put_autosuspend(i2c_imx->adapter.dev.parent);
return result;
@@ -1822,7 +1821,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (ret < 0)
goto clk_notifier_unregister;
- pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq);
@@ -1928,7 +1926,6 @@ static int i2c_imx_suspend(struct device *dev)
static int i2c_imx_resume(struct device *dev)
{
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-k1.c b/drivers/i2c/busses/i2c-k1.c
index b68a21fff0b5..d42c03ef5db5 100644
--- a/drivers/i2c/busses/i2c-k1.c
+++ b/drivers/i2c/busses/i2c-k1.c
@@ -3,6 +3,7 @@
* Copyright (C) 2024-2025 Troy Mitchell <troymitchell988@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
@@ -14,6 +15,7 @@
#define SPACEMIT_ICR 0x0 /* Control register */
#define SPACEMIT_ISR 0x4 /* Status register */
#define SPACEMIT_IDBR 0xc /* Data buffer register */
+#define SPACEMIT_IRCR 0x18 /* Reset cycle counter */
#define SPACEMIT_IBMR 0x1c /* Bus monitor register */
/* SPACEMIT_ICR register fields */
@@ -25,7 +27,8 @@
#define SPACEMIT_CR_MODE_FAST BIT(8) /* bus mode (master operation) */
/* Bit 9 is reserved */
#define SPACEMIT_CR_UR BIT(10) /* unit reset */
-/* Bits 11-12 are reserved */
+#define SPACEMIT_CR_RSTREQ BIT(11) /* i2c bus reset request */
+/* Bit 12 is reserved */
#define SPACEMIT_CR_SCLE BIT(13) /* master clock enable */
#define SPACEMIT_CR_IUE BIT(14) /* unit enable */
/* Bits 15-17 are reserved */
@@ -76,6 +79,10 @@
SPACEMIT_SR_GCAD | SPACEMIT_SR_IRF | SPACEMIT_SR_ITE | \
SPACEMIT_SR_ALD)
+#define SPACEMIT_RCR_SDA_GLITCH_NOFIX BIT(7) /* bypass the SDA glitch fix */
+/* the cycles of SCL during bus reset */
+#define SPACEMIT_RCR_FIELD_RST_CYC GENMASK(3, 0)
+
/* SPACEMIT_IBMR register fields */
#define SPACEMIT_BMR_SDA BIT(0) /* SDA line level */
#define SPACEMIT_BMR_SCL BIT(1) /* SCL line level */
@@ -88,6 +95,8 @@
#define SPACEMIT_SR_ERR (SPACEMIT_SR_BED | SPACEMIT_SR_RXOV | SPACEMIT_SR_ALD)
+#define SPACEMIT_BUS_RESET_CLK_CNT_MAX 9
+
enum spacemit_i2c_state {
SPACEMIT_STATE_IDLE,
SPACEMIT_STATE_START,
@@ -149,17 +158,23 @@ static int spacemit_i2c_handle_err(struct spacemit_i2c_dev *i2c)
{
dev_dbg(i2c->dev, "i2c error status: 0x%08x\n", i2c->status);
- if (i2c->status & (SPACEMIT_SR_BED | SPACEMIT_SR_ALD)) {
+ /* Arbitration Loss Detected */
+ if (i2c->status & SPACEMIT_SR_ALD) {
spacemit_i2c_reset(i2c);
return -EAGAIN;
}
+ /* Bus Error No ACK/NAK */
+ if (i2c->status & SPACEMIT_SR_BED)
+ spacemit_i2c_reset(i2c);
+
return i2c->status & SPACEMIT_SR_ACKNAK ? -ENXIO : -EIO;
}
static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
{
u32 status;
+ u8 clk_cnt;
/* if bus is locked, reset unit. 0: locked */
status = readl(i2c->base + SPACEMIT_IBMR);
@@ -169,9 +184,21 @@ static void spacemit_i2c_conditionally_reset_bus(struct spacemit_i2c_dev *i2c)
spacemit_i2c_reset(i2c);
usleep_range(10, 20);
- /* check scl status again */
+ for (clk_cnt = 0; clk_cnt < SPACEMIT_BUS_RESET_CLK_CNT_MAX; clk_cnt++) {
+ status = readl(i2c->base + SPACEMIT_IBMR);
+ if (status & SPACEMIT_BMR_SDA)
+ return;
+
+ /* There's nothing left to save here, we are about to exit */
+ writel(FIELD_PREP(SPACEMIT_RCR_FIELD_RST_CYC, 1),
+ i2c->base + SPACEMIT_IRCR);
+ writel(SPACEMIT_CR_RSTREQ, i2c->base + SPACEMIT_ICR);
+ usleep_range(20, 30);
+ }
+
+ /* check sda again here */
status = readl(i2c->base + SPACEMIT_IBMR);
- if (!(status & SPACEMIT_BMR_SCL))
+ if (!(status & SPACEMIT_BMR_SDA))
dev_warn_ratelimited(i2c->dev, "unit reset failed\n");
}
@@ -202,6 +229,12 @@ static void spacemit_i2c_check_bus_release(struct spacemit_i2c_dev *i2c)
}
}
+static inline void
+spacemit_i2c_clear_int_status(struct spacemit_i2c_dev *i2c, u32 mask)
+{
+ writel(mask & SPACEMIT_I2C_INT_STATUS_MASK, i2c->base + SPACEMIT_ISR);
+}
+
static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c)
{
u32 val;
@@ -237,12 +270,16 @@ static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c)
val |= SPACEMIT_CR_MSDE | SPACEMIT_CR_MSDIE;
writel(val, i2c->base + SPACEMIT_ICR);
-}
-static inline void
-spacemit_i2c_clear_int_status(struct spacemit_i2c_dev *i2c, u32 mask)
-{
- writel(mask & SPACEMIT_I2C_INT_STATUS_MASK, i2c->base + SPACEMIT_ISR);
+ /*
+ * The glitch fix in the K1 I2C controller introduces a delay
+ * on restart signals, so we disable the fix here.
+ */
+ val = readl(i2c->base + SPACEMIT_IRCR);
+ val |= SPACEMIT_RCR_SDA_GLITCH_NOFIX;
+ writel(val, i2c->base + SPACEMIT_IRCR);
+
+ spacemit_i2c_clear_int_status(i2c, SPACEMIT_I2C_INT_STATUS_MASK);
}
static void spacemit_i2c_start(struct spacemit_i2c_dev *i2c)
@@ -267,19 +304,6 @@ static void spacemit_i2c_start(struct spacemit_i2c_dev *i2c)
writel(val, i2c->base + SPACEMIT_ICR);
}
-static void spacemit_i2c_stop(struct spacemit_i2c_dev *i2c)
-{
- u32 val;
-
- val = readl(i2c->base + SPACEMIT_ICR);
- val |= SPACEMIT_CR_STOP | SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
-
- if (i2c->read)
- val |= SPACEMIT_CR_ACKNAK;
-
- writel(val, i2c->base + SPACEMIT_ICR);
-}
-
static int spacemit_i2c_xfer_msg(struct spacemit_i2c_dev *i2c)
{
unsigned long time_left;
@@ -412,7 +436,6 @@ static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
val = readl(i2c->base + SPACEMIT_ICR);
val &= ~(SPACEMIT_CR_TB | SPACEMIT_CR_ACKNAK | SPACEMIT_CR_STOP | SPACEMIT_CR_START);
- writel(val, i2c->base + SPACEMIT_ICR);
switch (i2c->state) {
case SPACEMIT_STATE_START:
@@ -429,14 +452,16 @@ static irqreturn_t spacemit_i2c_irq_handler(int irq, void *devid)
}
if (i2c->state != SPACEMIT_STATE_IDLE) {
+ val |= SPACEMIT_CR_TB | SPACEMIT_CR_ALDIE;
+
if (spacemit_i2c_is_last_msg(i2c)) {
/* trigger next byte with stop */
- spacemit_i2c_stop(i2c);
- } else {
- /* trigger next byte */
- val |= SPACEMIT_CR_ALDIE | SPACEMIT_CR_TB;
- writel(val, i2c->base + SPACEMIT_ICR);
+ val |= SPACEMIT_CR_STOP;
+
+ if (i2c->read)
+ val |= SPACEMIT_CR_ACKNAK;
}
+ writel(val, i2c->base + SPACEMIT_ICR);
}
err_out:
@@ -476,12 +501,13 @@ static int spacemit_i2c_xfer(struct i2c_adapter *adapt, struct i2c_msg *msgs, in
spacemit_i2c_enable(i2c);
ret = spacemit_i2c_wait_bus_idle(i2c);
- if (!ret)
+ if (!ret) {
ret = spacemit_i2c_xfer_msg(i2c);
- else if (ret < 0)
- dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
- else
+ if (ret < 0)
+ dev_dbg(i2c->dev, "i2c transfer error: %d\n", ret);
+ } else {
spacemit_i2c_check_bus_release(i2c);
+ }
spacemit_i2c_disable(i2c);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index ab456c3717db..aefdbee1f03c 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -868,7 +868,7 @@ static int mtk_i2c_calculate_speed(struct mtk_i2c *i2c, unsigned int clk_src,
return 0;
}
-static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
+static void mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
{
unsigned int clk_src;
unsigned int step_cnt;
@@ -938,9 +938,6 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk)
break;
}
-
-
- return 0;
}
static void i2c_dump_register(struct mtk_i2c *i2c)
@@ -1243,6 +1240,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
{
int ret;
int left_num = num;
+ bool write_then_read_en = false;
struct mtk_i2c *i2c = i2c_get_adapdata(adap);
ret = clk_bulk_enable(I2C_MT65XX_CLK_MAX, i2c->clocks);
@@ -1256,6 +1254,7 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
if (!(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD) &&
msgs[0].addr == msgs[1].addr) {
i2c->auto_restart = 0;
+ write_then_read_en = true;
}
}
@@ -1280,12 +1279,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
else
i2c->op = I2C_MASTER_WR;
- if (!i2c->auto_restart) {
- if (num > 1) {
- /* combined two messages into one transaction */
- i2c->op = I2C_MASTER_WRRD;
- left_num--;
- }
+ if (write_then_read_en) {
+ /* combined two messages into one transaction */
+ i2c->op = I2C_MASTER_WRRD;
+ left_num--;
}
/* always use DMA mode. */
@@ -1293,7 +1290,10 @@ static int mtk_i2c_transfer(struct i2c_adapter *adap,
if (ret < 0)
goto err_exit;
- msgs++;
+ if (i2c->op == I2C_MASTER_WRRD)
+ msgs += 2;
+ else
+ msgs++;
}
/* the return value is number of executed messages */
ret = num;
@@ -1457,11 +1457,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
strscpy(i2c->adap.name, I2C_DRV_NAME, sizeof(i2c->adap.name));
- ret = mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
- if (ret) {
- dev_err(&pdev->dev, "Failed to set the speed.\n");
- return -EINVAL;
- }
+ mtk_i2c_set_speed(i2c, clk_get_rate(i2c->clocks[speed_clk].clk));
if (i2c->dev_comp->max_dma_support > 32) {
ret = dma_set_mask(&pdev->dev,
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 8fc26a511320..1acba628e16c 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -766,7 +766,6 @@ mv64xxx_i2c_xfer_core(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
drv_data->num_msgs = 0;
drv_data->msgs = NULL;
- pm_runtime_mark_last_busy(&adap->dev);
pm_runtime_put_autosuspend(&adap->dev);
return ret;
diff --git a/drivers/i2c/busses/i2c-nct6694.c b/drivers/i2c/busses/i2c-nct6694.c
new file mode 100644
index 000000000000..1413ab6f9462
--- /dev/null
+++ b/drivers/i2c/busses/i2c-nct6694.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nuvoton NCT6694 I2C adapter driver based on USB interface.
+ *
+ * Copyright (C) 2025 Nuvoton Technology Corp.
+ */
+
+#include <linux/i2c.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/mfd/nct6694.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+/*
+ * USB command module type for NCT6694 I2C controller.
+ * This defines the module type used for communication with the NCT6694
+ * I2C controller over the USB interface.
+ */
+#define NCT6694_I2C_MOD 0x03
+
+/* Command 00h - I2C Deliver */
+#define NCT6694_I2C_DELIVER 0x00
+#define NCT6694_I2C_DELIVER_SEL 0x00
+
+#define NCT6694_I2C_MAX_XFER_SIZE 64
+#define NCT6694_I2C_MAX_DEVS 6
+
+static unsigned char br_reg[NCT6694_I2C_MAX_DEVS] = {[0 ... (NCT6694_I2C_MAX_DEVS - 1)] = 0xFF};
+
+module_param_array(br_reg, byte, NULL, 0644);
+MODULE_PARM_DESC(br_reg,
+ "I2C Baudrate register per adapter: (0=25K, 1=50K, 2=100K, 3=200K, 4=400K, 5=800K, 6=1M), default=2");
+
+enum nct6694_i2c_baudrate {
+ NCT6694_I2C_BR_25K = 0,
+ NCT6694_I2C_BR_50K,
+ NCT6694_I2C_BR_100K,
+ NCT6694_I2C_BR_200K,
+ NCT6694_I2C_BR_400K,
+ NCT6694_I2C_BR_800K,
+ NCT6694_I2C_BR_1M
+};
+
+struct __packed nct6694_i2c_deliver {
+ u8 port;
+ u8 br;
+ u8 addr;
+ u8 w_cnt;
+ u8 r_cnt;
+ u8 rsv[11];
+ u8 write_data[NCT6694_I2C_MAX_XFER_SIZE];
+ u8 read_data[NCT6694_I2C_MAX_XFER_SIZE];
+};
+
+struct nct6694_i2c_data {
+ struct device *dev;
+ struct nct6694 *nct6694;
+ struct i2c_adapter adapter;
+ struct nct6694_i2c_deliver deliver;
+ unsigned char port;
+ unsigned char br;
+};
+
+static int nct6694_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct nct6694_i2c_data *data = adap->algo_data;
+ struct nct6694_i2c_deliver *deliver = &data->deliver;
+ static const struct nct6694_cmd_header cmd_hd = {
+ .mod = NCT6694_I2C_MOD,
+ .cmd = NCT6694_I2C_DELIVER,
+ .sel = NCT6694_I2C_DELIVER_SEL,
+ .len = cpu_to_le16(sizeof(*deliver))
+ };
+ int ret, i;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *msg_temp = &msgs[i];
+
+ memset(deliver, 0, sizeof(*deliver));
+
+ deliver->port = data->port;
+ deliver->br = data->br;
+ deliver->addr = i2c_8bit_addr_from_msg(msg_temp);
+ if (msg_temp->flags & I2C_M_RD) {
+ deliver->r_cnt = msg_temp->len;
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, deliver);
+ if (ret < 0)
+ return ret;
+
+ memcpy(msg_temp->buf, deliver->read_data, msg_temp->len);
+ } else {
+ deliver->w_cnt = msg_temp->len;
+ memcpy(deliver->write_data, msg_temp->buf, msg_temp->len);
+ ret = nct6694_write_msg(data->nct6694, &cmd_hd, deliver);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return num;
+}
+
+static u32 nct6694_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_adapter_quirks nct6694_i2c_quirks = {
+ .max_read_len = NCT6694_I2C_MAX_XFER_SIZE,
+ .max_write_len = NCT6694_I2C_MAX_XFER_SIZE,
+};
+
+static const struct i2c_algorithm nct6694_i2c_algo = {
+ .xfer = nct6694_i2c_xfer,
+ .functionality = nct6694_i2c_func,
+};
+
+static int nct6694_i2c_set_baudrate(struct nct6694_i2c_data *data)
+{
+ if (data->port >= NCT6694_I2C_MAX_DEVS) {
+ dev_err(data->dev, "Invalid I2C port index %d\n", data->port);
+ return -EINVAL;
+ }
+
+ if (br_reg[data->port] > NCT6694_I2C_BR_1M) {
+ dev_warn(data->dev, "Invalid baudrate %d for I2C%d, using 100K\n",
+ br_reg[data->port], data->port);
+ br_reg[data->port] = NCT6694_I2C_BR_100K;
+ }
+
+ data->br = br_reg[data->port];
+
+ return 0;
+}
+
+static void nct6694_i2c_ida_free(void *d)
+{
+ struct nct6694_i2c_data *data = d;
+ struct nct6694 *nct6694 = data->nct6694;
+
+ ida_free(&nct6694->i2c_ida, data->port);
+}
+
+static int nct6694_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct nct6694 *nct6694 = dev_get_drvdata(dev->parent);
+ struct nct6694_i2c_data *data;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = dev;
+ data->nct6694 = nct6694;
+
+ ret = ida_alloc(&nct6694->i2c_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ data->port = ret;
+
+ ret = devm_add_action_or_reset(dev, nct6694_i2c_ida_free, data);
+ if (ret)
+ return ret;
+
+ ret = nct6694_i2c_set_baudrate(data);
+ if (ret)
+ return ret;
+
+ sprintf(data->adapter.name, "NCT6694 I2C Adapter %d", data->port);
+ data->adapter.owner = THIS_MODULE;
+ data->adapter.algo = &nct6694_i2c_algo;
+ data->adapter.quirks = &nct6694_i2c_quirks;
+ data->adapter.dev.parent = dev;
+ data->adapter.algo_data = data;
+
+ platform_set_drvdata(pdev, data);
+
+ return devm_i2c_add_adapter(dev, &data->adapter);
+}
+
+static struct platform_driver nct6694_i2c_driver = {
+ .driver = {
+ .name = "nct6694-i2c",
+ },
+ .probe = nct6694_i2c_probe,
+};
+
+module_platform_driver(nct6694_i2c_driver);
+
+MODULE_DESCRIPTION("USB-I2C adapter driver for NCT6694");
+MODULE_AUTHOR("Ming Yu <tmyu0@nuvoton.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nct6694-i2c");
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 541d808d62d0..14c059b03945 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -216,7 +216,6 @@ exit:
if (status2 < 0)
dev_err(i2cd->dev, "i2c stop failed %d\n", status2);
}
- pm_runtime_mark_last_busy(i2cd->dev);
pm_runtime_put_autosuspend(i2cd->dev);
return status;
}
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 5fcc9f6c33e5..d9f590f0c384 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -828,7 +828,6 @@ omap_i2c_xfer_common(struct i2c_adapter *adap, struct i2c_msg msgs[], int num,
omap->set_mpu_wkup_lat(omap->dev, -1);
out:
- pm_runtime_mark_last_busy(omap->dev);
pm_runtime_put_autosuspend(omap->dev);
return r;
}
@@ -1510,7 +1509,6 @@ omap_i2c_probe(struct platform_device *pdev)
dev_info(omap->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
major, minor, omap->speed);
- pm_runtime_mark_last_busy(omap->dev);
pm_runtime_put_autosuspend(omap->dev);
return 0;
@@ -1605,7 +1603,6 @@ static int omap_i2c_suspend(struct device *dev)
static int omap_i2c_resume(struct device *dev)
{
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 85e8cf58e8bf..0cbf2f509527 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -95,7 +95,7 @@ static struct i2c_algo_pca_data pca_isa_data = {
/* .data intentionally left NULL, not needed with ISA */
.write_byte = pca_isa_writebyte,
.read_byte = pca_isa_readbyte,
- .wait_for_completion = pca_isa_waitforcompletion,
+ .wait_for_completion_cb = pca_isa_waitforcompletion,
.reset_chip = pca_isa_resetchip,
};
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 87da8241b927..c0f35ebbe37d 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -180,7 +180,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
}
i2c->algo_data.data = i2c;
- i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion;
+ i2c->algo_data.wait_for_completion_cb = i2c_pca_pf_waitforcompletion;
if (i2c->gpio)
i2c->algo_data.reset_chip = i2c_pca_pf_resetchip;
else
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index a3afa11a71a1..884055df1560 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -450,7 +450,6 @@ static int cci_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = num;
err:
- pm_runtime_mark_last_busy(cci->dev);
pm_runtime_put_autosuspend(cci->dev);
return ret;
@@ -508,7 +507,6 @@ static int __maybe_unused cci_suspend(struct device *dev)
static int __maybe_unused cci_resume(struct device *dev)
{
cci_resume_runtime(dev);
- pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
return 0;
@@ -785,8 +783,54 @@ static const struct cci_data cci_v2_data = {
},
};
+static const struct cci_data cci_msm8953_data = {
+ .num_masters = 2,
+ .queue_size = { 64, 16 },
+ .quirks = {
+ .max_write_len = 11,
+ .max_read_len = 12,
+ },
+ .params[I2C_MODE_STANDARD] = {
+ .thigh = 78,
+ .tlow = 114,
+ .tsu_sto = 28,
+ .tsu_sta = 28,
+ .thd_dat = 10,
+ .thd_sta = 77,
+ .tbuf = 118,
+ .scl_stretch_en = 0,
+ .trdhld = 6,
+ .tsp = 1
+ },
+ .params[I2C_MODE_FAST] = {
+ .thigh = 20,
+ .tlow = 28,
+ .tsu_sto = 21,
+ .tsu_sta = 21,
+ .thd_dat = 13,
+ .thd_sta = 18,
+ .tbuf = 32,
+ .scl_stretch_en = 0,
+ .trdhld = 6,
+ .tsp = 3
+ },
+ .params[I2C_MODE_FAST_PLUS] = {
+ .thigh = 16,
+ .tlow = 22,
+ .tsu_sto = 17,
+ .tsu_sta = 18,
+ .thd_dat = 16,
+ .thd_sta = 15,
+ .tbuf = 19,
+ .scl_stretch_en = 1,
+ .trdhld = 3,
+ .tsp = 3
+ },
+};
+
static const struct of_device_id cci_dt_match[] = {
{ .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
+ { .compatible = "qcom,msm8953-cci", .data = &cci_msm8953_data},
{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index ff2289b52c84..3a04016db2c3 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -77,6 +77,25 @@ enum geni_i2c_err_code {
#define XFER_TIMEOUT HZ
#define RST_TIMEOUT HZ
+#define QCOM_I2C_MIN_NUM_OF_MSGS_MULTI_DESC 2
+
+/**
+ * struct geni_i2c_gpi_multi_desc_xfer - Structure for multi transfer support
+ *
+ * @msg_idx_cnt: Current message index being processed in the transfer
+ * @unmap_msg_cnt: Number of messages that have been unmapped
+ * @irq_cnt: Number of transfer completion interrupts received
+ * @dma_buf: Array of virtual addresses for DMA-safe buffers
+ * @dma_addr: Array of DMA addresses corresponding to the buffers
+ */
+struct geni_i2c_gpi_multi_desc_xfer {
+ u32 msg_idx_cnt;
+ u32 unmap_msg_cnt;
+ u32 irq_cnt;
+ void **dma_buf;
+ dma_addr_t *dma_addr;
+};
+
struct geni_i2c_dev {
struct geni_se se;
u32 tx_wm;
@@ -99,6 +118,9 @@ struct geni_i2c_dev {
struct dma_chan *rx_c;
bool gpi_mode;
bool abort_done;
+ bool is_tx_multi_desc_xfer;
+ u32 num_msgs;
+ struct geni_i2c_gpi_multi_desc_xfer i2c_multi_desc_config;
};
struct geni_i2c_desc {
@@ -499,6 +521,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
{
struct geni_i2c_dev *gi2c = cb;
+ struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer;
if (result->result != DMA_TRANS_NOERROR) {
dev_err(gi2c->se.dev, "DMA txn failed:%d\n", result->result);
@@ -507,6 +530,11 @@ static void i2c_gpi_cb_result(void *cb, const struct dmaengine_result *result)
dev_dbg(gi2c->se.dev, "DMA xfer has pending: %d\n", result->residue);
}
+ if (gi2c->is_tx_multi_desc_xfer) {
+ tx_multi_xfer = &gi2c->i2c_multi_desc_config;
+ tx_multi_xfer->irq_cnt++;
+ }
+
complete(&gi2c->done);
}
@@ -525,7 +553,72 @@ static void geni_i2c_gpi_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
}
}
-static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+/**
+ * geni_i2c_gpi_multi_desc_unmap() - Unmaps DMA buffers post multi message TX transfers
+ * @gi2c: I2C dev handle
+ * @msgs: Array of I2C messages
+ * @peripheral: Pointer to gpi_i2c_config
+ */
+static void geni_i2c_gpi_multi_desc_unmap(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
+ struct gpi_i2c_config *peripheral)
+{
+ u32 msg_xfer_cnt, wr_idx = 0;
+ struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer = &gi2c->i2c_multi_desc_config;
+
+ msg_xfer_cnt = gi2c->err ? tx_multi_xfer->msg_idx_cnt : tx_multi_xfer->irq_cnt;
+
+ /* Unmap the processed DMA buffers based on the received interrupt count */
+ for (; tx_multi_xfer->unmap_msg_cnt < msg_xfer_cnt; tx_multi_xfer->unmap_msg_cnt++) {
+ wr_idx = tx_multi_xfer->unmap_msg_cnt;
+ geni_i2c_gpi_unmap(gi2c, &msgs[wr_idx],
+ tx_multi_xfer->dma_buf[wr_idx],
+ tx_multi_xfer->dma_addr[wr_idx],
+ NULL, 0);
+
+ if (tx_multi_xfer->unmap_msg_cnt == gi2c->num_msgs - 1) {
+ kfree(tx_multi_xfer->dma_buf);
+ kfree(tx_multi_xfer->dma_addr);
+ break;
+ }
+ }
+}
+
+/**
+ * geni_i2c_gpi_multi_xfer_timeout_handler() - Handles multi message transfer timeout
+ * @dev: Pointer to the corresponding dev node
+ * @multi_xfer: Pointer to the geni_i2c_gpi_multi_desc_xfer
+ * @transfer_timeout_msecs: Timeout value in milliseconds
+ * @transfer_comp: Completion object of the transfer
+ *
+ * This function waits for the completion of each processed transfer messages
+ * based on the interrupts generated upon transfer completion.
+ *
+ * Return: On success returns 0, -ETIMEDOUT on timeout.
+ */
+static int geni_i2c_gpi_multi_xfer_timeout_handler(struct device *dev,
+ struct geni_i2c_gpi_multi_desc_xfer *multi_xfer,
+ u32 transfer_timeout_msecs,
+ struct completion *transfer_comp)
+{
+ int i;
+ u32 time_left;
+
+ for (i = 0; i < multi_xfer->msg_idx_cnt - 1; i++) {
+ reinit_completion(transfer_comp);
+
+ if (multi_xfer->msg_idx_cnt != multi_xfer->irq_cnt) {
+ time_left = wait_for_completion_timeout(transfer_comp,
+ transfer_timeout_msecs);
+ if (!time_left) {
+ dev_err(dev, "%s: Transfer timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+ }
+ return 0;
+}
+
+static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[],
struct dma_slave_config *config, dma_addr_t *dma_addr_p,
void **buf, unsigned int op, struct dma_chan *dma_chan)
{
@@ -537,26 +630,45 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
enum dma_transfer_direction dma_dirn;
struct dma_async_tx_descriptor *desc;
int ret;
+ struct geni_i2c_gpi_multi_desc_xfer *gi2c_gpi_xfer;
+ dma_cookie_t cookie;
+ u32 msg_idx;
peripheral = config->peripheral_config;
+ gi2c_gpi_xfer = &gi2c->i2c_multi_desc_config;
+ msg_idx = gi2c_gpi_xfer->msg_idx_cnt;
- dma_buf = i2c_get_dma_safe_msg_buf(msg, 1);
- if (!dma_buf)
- return -ENOMEM;
+ dma_buf = i2c_get_dma_safe_msg_buf(&msgs[msg_idx], 1);
+ if (!dma_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (op == I2C_WRITE)
map_dirn = DMA_TO_DEVICE;
else
map_dirn = DMA_FROM_DEVICE;
- addr = dma_map_single(gi2c->se.dev->parent, dma_buf, msg->len, map_dirn);
+ addr = dma_map_single(gi2c->se.dev->parent, dma_buf,
+ msgs[msg_idx].len, map_dirn);
if (dma_mapping_error(gi2c->se.dev->parent, addr)) {
- i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
- return -ENOMEM;
+ i2c_put_dma_safe_msg_buf(dma_buf, &msgs[msg_idx], false);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (gi2c->is_tx_multi_desc_xfer) {
+ flags = DMA_CTRL_ACK;
+
+ /* BEI bit to be cleared for last TRE */
+ if (msg_idx == gi2c->num_msgs - 1)
+ flags |= DMA_PREP_INTERRUPT;
+ } else {
+ flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
}
/* set the length as message for rx txn */
- peripheral->rx_len = msg->len;
+ peripheral->rx_len = msgs[msg_idx].len;
peripheral->op = op;
ret = dmaengine_slave_config(dma_chan, config);
@@ -567,14 +679,21 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
peripheral->set_config = 0;
peripheral->multi_msg = true;
- flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
if (op == I2C_WRITE)
dma_dirn = DMA_MEM_TO_DEV;
else
dma_dirn = DMA_DEV_TO_MEM;
- desc = dmaengine_prep_slave_single(dma_chan, addr, msg->len, dma_dirn, flags);
+ desc = dmaengine_prep_slave_single(dma_chan, addr, msgs[msg_idx].len,
+ dma_dirn, flags);
+ if (!desc && !(flags & DMA_PREP_INTERRUPT)) {
+ /* Retry with interrupt if not enough TREs */
+ flags |= DMA_PREP_INTERRUPT;
+ desc = dmaengine_prep_slave_single(dma_chan, addr, msgs[msg_idx].len,
+ dma_dirn, flags);
+ }
+
if (!desc) {
dev_err(gi2c->se.dev, "prep_slave_sg failed\n");
ret = -EIO;
@@ -584,15 +703,48 @@ static int geni_i2c_gpi(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
desc->callback_result = i2c_gpi_cb_result;
desc->callback_param = gi2c;
- dmaengine_submit(desc);
- *buf = dma_buf;
- *dma_addr_p = addr;
+ if (!((msgs[msg_idx].flags & I2C_M_RD) && op == I2C_WRITE))
+ gi2c_gpi_xfer->msg_idx_cnt++;
+ cookie = dmaengine_submit(desc);
+ if (dma_submit_error(cookie)) {
+ dev_err(gi2c->se.dev,
+ "%s: dmaengine_submit failed (%d)\n", __func__, cookie);
+ ret = -EINVAL;
+ goto err_config;
+ }
+
+ if (gi2c->is_tx_multi_desc_xfer) {
+ gi2c_gpi_xfer->dma_buf[msg_idx] = dma_buf;
+ gi2c_gpi_xfer->dma_addr[msg_idx] = addr;
+
+ dma_async_issue_pending(gi2c->tx_c);
+
+ if ((msg_idx == (gi2c->num_msgs - 1)) || flags & DMA_PREP_INTERRUPT) {
+ ret = geni_i2c_gpi_multi_xfer_timeout_handler(gi2c->se.dev, gi2c_gpi_xfer,
+ XFER_TIMEOUT, &gi2c->done);
+ if (ret) {
+ dev_err(gi2c->se.dev,
+ "I2C multi write msg transfer timeout: %d\n",
+ ret);
+ gi2c->err = ret;
+ return ret;
+ }
+ }
+ } else {
+ /* Non multi descriptor message transfer */
+ *buf = dma_buf;
+ *dma_addr_p = addr;
+ }
return 0;
err_config:
- dma_unmap_single(gi2c->se.dev->parent, addr, msg->len, map_dirn);
- i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ dma_unmap_single(gi2c->se.dev->parent, addr,
+ msgs[msg_idx].len, map_dirn);
+ i2c_put_dma_safe_msg_buf(dma_buf, &msgs[msg_idx], false);
+
+out:
+ gi2c->err = ret;
return ret;
}
@@ -604,6 +756,7 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
unsigned long time_left;
dma_addr_t tx_addr, rx_addr;
void *tx_buf = NULL, *rx_buf = NULL;
+ struct geni_i2c_gpi_multi_desc_xfer *tx_multi_xfer;
const struct geni_i2c_clk_fld *itr = gi2c->clk_fld;
config.peripheral_config = &peripheral;
@@ -617,6 +770,41 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
peripheral.set_config = 1;
peripheral.multi_msg = false;
+ gi2c->num_msgs = num;
+ gi2c->is_tx_multi_desc_xfer = false;
+
+ tx_multi_xfer = &gi2c->i2c_multi_desc_config;
+ memset(tx_multi_xfer, 0, sizeof(struct geni_i2c_gpi_multi_desc_xfer));
+
+ /*
+ * If number of write messages are two and higher then
+ * configure hardware for multi descriptor transfers with BEI.
+ */
+ if (num >= QCOM_I2C_MIN_NUM_OF_MSGS_MULTI_DESC) {
+ gi2c->is_tx_multi_desc_xfer = true;
+ for (i = 0; i < num; i++) {
+ if (msgs[i].flags & I2C_M_RD) {
+ /*
+ * Multi descriptor transfer with BEI
+ * support is enabled for write transfers.
+ * TODO: Add BEI optimization support for
+ * read transfers later.
+ */
+ gi2c->is_tx_multi_desc_xfer = false;
+ break;
+ }
+ }
+ }
+
+ if (gi2c->is_tx_multi_desc_xfer) {
+ tx_multi_xfer->dma_buf = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ tx_multi_xfer->dma_addr = kcalloc(num, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!tx_multi_xfer->dma_buf || !tx_multi_xfer->dma_addr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
for (i = 0; i < num; i++) {
gi2c->cur = &msgs[i];
gi2c->err = 0;
@@ -627,14 +815,16 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
peripheral.stretch = 1;
peripheral.addr = msgs[i].addr;
+ if (i > 0 && (!(msgs[i].flags & I2C_M_RD)))
+ peripheral.multi_msg = false;
- ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
+ ret = geni_i2c_gpi(gi2c, msgs, &config,
&tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c);
if (ret)
goto err;
if (msgs[i].flags & I2C_M_RD) {
- ret = geni_i2c_gpi(gi2c, &msgs[i], &config,
+ ret = geni_i2c_gpi(gi2c, msgs, &config,
&rx_addr, &rx_buf, I2C_READ, gi2c->rx_c);
if (ret)
goto err;
@@ -642,18 +832,24 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i
dma_async_issue_pending(gi2c->rx_c);
}
- dma_async_issue_pending(gi2c->tx_c);
-
- time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
- if (!time_left)
- gi2c->err = -ETIMEDOUT;
+ if (!gi2c->is_tx_multi_desc_xfer) {
+ dma_async_issue_pending(gi2c->tx_c);
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left) {
+ dev_err(gi2c->se.dev, "%s:I2C timeout\n", __func__);
+ gi2c->err = -ETIMEDOUT;
+ }
+ }
if (gi2c->err) {
ret = gi2c->err;
goto err;
}
- geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
+ if (!gi2c->is_tx_multi_desc_xfer)
+ geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
+ else if (tx_multi_xfer->unmap_msg_cnt != tx_multi_xfer->irq_cnt)
+ geni_i2c_gpi_multi_desc_unmap(gi2c, msgs, &peripheral);
}
return num;
@@ -662,7 +858,11 @@ err:
dev_err(gi2c->se.dev, "GPI transfer failed: %d\n", ret);
dmaengine_terminate_sync(gi2c->rx_c);
dmaengine_terminate_sync(gi2c->tx_c);
- geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
+ if (gi2c->is_tx_multi_desc_xfer)
+ geni_i2c_gpi_multi_desc_unmap(gi2c, msgs, &peripheral);
+ else
+ geni_i2c_gpi_unmap(gi2c, &msgs[i], tx_buf, tx_addr, rx_buf, rx_addr);
+
return ret;
}
@@ -714,7 +914,6 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
else
ret = geni_i2c_fifo_xfer(gi2c, msgs, num);
- pm_runtime_mark_last_busy(gi2c->se.dev);
pm_runtime_put_autosuspend(gi2c->se.dev);
gi2c->cur = NULL;
gi2c->err = 0;
@@ -870,7 +1069,13 @@ static int geni_i2c_probe(struct platform_device *pdev)
goto err_clk;
}
proto = geni_se_read_proto(&gi2c->se);
- if (proto != GENI_SE_I2C) {
+ if (proto == GENI_SE_INVALID_PROTO) {
+ ret = geni_load_se_firmware(&gi2c->se, GENI_SE_I2C);
+ if (ret) {
+ dev_err_probe(dev, ret, "i2c firmware load failed ret: %d\n", ret);
+ goto err_resources;
+ }
+ } else if (proto != GENI_SE_I2C) {
ret = dev_err_probe(dev, -ENXIO, "Invalid proto %d\n", proto);
goto err_resources;
}
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index fc348924d522..a0e076fc5f36 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -1139,7 +1139,6 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
ret = num;
out:
- pm_runtime_mark_last_busy(qup->dev);
pm_runtime_put_autosuspend(qup->dev);
return ret;
@@ -1624,7 +1623,6 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
if (ret == 0)
ret = num;
out:
- pm_runtime_mark_last_busy(qup->dev);
pm_runtime_put_autosuspend(qup->dev);
return ret;
@@ -1991,7 +1989,6 @@ static int qup_i2c_suspend(struct device *device)
static int qup_i2c_resume(struct device *device)
{
qup_i2c_pm_resume_runtime(device);
- pm_runtime_mark_last_busy(device);
pm_request_autosuspend(device);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 9c164a4b9bb9..3e8f126cb7f7 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -206,7 +206,6 @@ static int riic_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
}
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return riic->err ?: num;
@@ -386,7 +385,7 @@ static int riic_init_hw(struct riic_dev *riic)
*/
total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
- for (cks = 0; cks < 7; cks++) {
+ for (cks = 0; cks <= 7; cks++) {
/*
* 60% low time must be less than BRL + 2 + 1
* BRL max register value is 0x1F.
@@ -452,7 +451,6 @@ static int riic_init_hw(struct riic_dev *riic)
riic_clear_set_bit(riic, ICCR1_IICRST, 0, RIIC_ICCR1);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-rtl9300.c b/drivers/i2c/busses/i2c-rtl9300.c
index 9e1f71fed0fe..4723e48cfe18 100644
--- a/drivers/i2c/busses/i2c-rtl9300.c
+++ b/drivers/i2c/busses/i2c-rtl9300.c
@@ -8,6 +8,7 @@
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/unaligned.h>
enum rtl9300_bus_freq {
RTL9300_I2C_STD_FREQ,
@@ -20,103 +21,143 @@ struct rtl9300_i2c_chan {
struct i2c_adapter adap;
struct rtl9300_i2c *i2c;
enum rtl9300_bus_freq bus_freq;
- u8 sda_pin;
+ u8 sda_num;
+};
+
+enum rtl9300_i2c_reg_scope {
+ REG_SCOPE_GLOBAL,
+ REG_SCOPE_MASTER,
+};
+
+struct rtl9300_i2c_reg_field {
+ struct reg_field field;
+ enum rtl9300_i2c_reg_scope scope;
+};
+
+enum rtl9300_i2c_reg_fields {
+ F_DATA_WIDTH = 0,
+ F_DEV_ADDR,
+ F_I2C_FAIL,
+ F_I2C_TRIG,
+ F_MEM_ADDR,
+ F_MEM_ADDR_WIDTH,
+ F_RD_MODE,
+ F_RWOP,
+ F_SCL_FREQ,
+ F_SCL_SEL,
+ F_SDA_OUT_SEL,
+ F_SDA_SEL,
+
+ /* keep last */
+ F_NUM_FIELDS
+};
+
+struct rtl9300_i2c_drv_data {
+ struct rtl9300_i2c_reg_field field_desc[F_NUM_FIELDS];
+ int (*select_scl)(struct rtl9300_i2c *i2c, u8 scl);
+ u32 data_reg;
+ u8 max_nchan;
};
#define RTL9300_I2C_MUX_NCHAN 8
+#define RTL9310_I2C_MUX_NCHAN 12
struct rtl9300_i2c {
struct regmap *regmap;
struct device *dev;
- struct rtl9300_i2c_chan chans[RTL9300_I2C_MUX_NCHAN];
+ struct rtl9300_i2c_chan chans[RTL9310_I2C_MUX_NCHAN];
+ struct regmap_field *fields[F_NUM_FIELDS];
u32 reg_base;
- u8 sda_pin;
+ u32 data_reg;
+ u8 scl_num;
+ u8 sda_num;
struct mutex lock;
};
+DEFINE_GUARD(rtl9300_i2c, struct rtl9300_i2c *, mutex_lock(&_T->lock), mutex_unlock(&_T->lock))
+
+enum rtl9300_i2c_xfer_type {
+ RTL9300_I2C_XFER_BYTE,
+ RTL9300_I2C_XFER_WORD,
+ RTL9300_I2C_XFER_BLOCK,
+};
+
+struct rtl9300_i2c_xfer {
+ enum rtl9300_i2c_xfer_type type;
+ u16 dev_addr;
+ u8 reg_addr;
+ u8 reg_addr_len;
+ u8 *data;
+ u8 data_len;
+ bool write;
+};
+
#define RTL9300_I2C_MST_CTRL1 0x0
-#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS 8
-#define RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK GENMASK(31, 8)
-#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS 4
-#define RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK GENMASK(6, 4)
-#define RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL BIT(3)
-#define RTL9300_I2C_MST_CTRL1_RWOP BIT(2)
-#define RTL9300_I2C_MST_CTRL1_I2C_FAIL BIT(1)
-#define RTL9300_I2C_MST_CTRL1_I2C_TRIG BIT(0)
#define RTL9300_I2C_MST_CTRL2 0x4
-#define RTL9300_I2C_MST_CTRL2_RD_MODE BIT(15)
-#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS 8
-#define RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK GENMASK(14, 8)
-#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS 4
-#define RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK GENMASK(7, 4)
-#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS 2
-#define RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK GENMASK(3, 2)
-#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS 0
-#define RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK GENMASK(1, 0)
#define RTL9300_I2C_MST_DATA_WORD0 0x8
#define RTL9300_I2C_MST_DATA_WORD1 0xc
#define RTL9300_I2C_MST_DATA_WORD2 0x10
#define RTL9300_I2C_MST_DATA_WORD3 0x14
-
#define RTL9300_I2C_MST_GLB_CTRL 0x384
+#define RTL9310_I2C_MST_IF_CTRL 0x1004
+#define RTL9310_I2C_MST_IF_SEL 0x1008
+#define RTL9310_I2C_MST_CTRL 0x0
+#define RTL9310_I2C_MST_MEMADDR_CTRL 0x4
+#define RTL9310_I2C_MST_DATA_CTRL 0x8
+
static int rtl9300_i2c_reg_addr_set(struct rtl9300_i2c *i2c, u32 reg, u16 len)
{
- u32 val, mask;
int ret;
- val = len << RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_OFS;
- mask = RTL9300_I2C_MST_CTRL2_MEM_ADDR_WIDTH_MASK;
-
- ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val);
+ ret = regmap_field_write(i2c->fields[F_MEM_ADDR_WIDTH], len);
if (ret)
return ret;
- val = reg << RTL9300_I2C_MST_CTRL1_MEM_ADDR_OFS;
- mask = RTL9300_I2C_MST_CTRL1_MEM_ADDR_MASK;
-
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+ return regmap_field_write(i2c->fields[F_MEM_ADDR], reg);
}
-static int rtl9300_i2c_config_io(struct rtl9300_i2c *i2c, u8 sda_pin)
+static int rtl9300_i2c_select_scl(struct rtl9300_i2c *i2c, u8 scl)
{
- int ret;
- u32 val, mask;
-
- ret = regmap_update_bits(i2c->regmap, RTL9300_I2C_MST_GLB_CTRL, BIT(sda_pin), BIT(sda_pin));
- if (ret)
- return ret;
-
- val = (sda_pin << RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_OFS) |
- RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL;
- mask = RTL9300_I2C_MST_CTRL1_SDA_OUT_SEL_MASK | RTL9300_I2C_MST_CTRL1_GPIO_SCL_SEL;
+ return regmap_field_write(i2c->fields[F_SCL_SEL], 1);
+}
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+static int rtl9310_i2c_select_scl(struct rtl9300_i2c *i2c, u8 scl)
+{
+ return regmap_field_update_bits(i2c->fields[F_SCL_SEL], BIT(scl), BIT(scl));
}
-static int rtl9300_i2c_config_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan,
- u16 addr, u16 len)
+static int rtl9300_i2c_config_chan(struct rtl9300_i2c *i2c, struct rtl9300_i2c_chan *chan)
{
- u32 val, mask;
+ struct rtl9300_i2c_drv_data *drv_data;
+ int ret;
- if (len < 1 || len > 16)
- return -EINVAL;
+ if (i2c->sda_num == chan->sda_num)
+ return 0;
- val = chan->bus_freq << RTL9300_I2C_MST_CTRL2_SCL_FREQ_OFS;
- mask = RTL9300_I2C_MST_CTRL2_SCL_FREQ_MASK;
+ ret = regmap_field_write(i2c->fields[F_SCL_FREQ], chan->bus_freq);
+ if (ret)
+ return ret;
- val |= addr << RTL9300_I2C_MST_CTRL2_DEV_ADDR_OFS;
- mask |= RTL9300_I2C_MST_CTRL2_DEV_ADDR_MASK;
+ drv_data = (struct rtl9300_i2c_drv_data *)device_get_match_data(i2c->dev);
+ ret = drv_data->select_scl(i2c, i2c->scl_num);
+ if (ret)
+ return ret;
- val |= ((len - 1) & 0xf) << RTL9300_I2C_MST_CTRL2_DATA_WIDTH_OFS;
- mask |= RTL9300_I2C_MST_CTRL2_DATA_WIDTH_MASK;
+ ret = regmap_field_update_bits(i2c->fields[F_SDA_SEL], BIT(chan->sda_num),
+ BIT(chan->sda_num));
+ if (ret)
+ return ret;
- mask |= RTL9300_I2C_MST_CTRL2_RD_MODE;
+ ret = regmap_field_write(i2c->fields[F_SDA_OUT_SEL], chan->sda_num);
+ if (ret)
+ return ret;
- return regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL2, mask, val);
+ i2c->sda_num = chan->sda_num;
+ return 0;
}
-static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
+static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, u8 len)
{
u32 vals[4] = {};
int i, ret;
@@ -124,8 +165,7 @@ static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
if (len > 16)
return -EIO;
- ret = regmap_bulk_read(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0,
- vals, ARRAY_SIZE(vals));
+ ret = regmap_bulk_read(i2c->regmap, i2c->data_reg, vals, ARRAY_SIZE(vals));
if (ret)
return ret;
@@ -137,7 +177,7 @@ static int rtl9300_i2c_read(struct rtl9300_i2c *i2c, u8 *buf, int len)
return 0;
}
-static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len)
+static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, u8 len)
{
u32 vals[4] = {};
int i;
@@ -152,56 +192,94 @@ static int rtl9300_i2c_write(struct rtl9300_i2c *i2c, u8 *buf, int len)
vals[reg] |= buf[i] << shift;
}
- return regmap_bulk_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0,
- vals, ARRAY_SIZE(vals));
+ return regmap_bulk_write(i2c->regmap, i2c->data_reg, vals, ARRAY_SIZE(vals));
}
static int rtl9300_i2c_writel(struct rtl9300_i2c *i2c, u32 data)
{
- return regmap_write(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, data);
+ return regmap_write(i2c->regmap, i2c->data_reg, data);
}
-static int rtl9300_i2c_execute_xfer(struct rtl9300_i2c *i2c, char read_write,
- int size, union i2c_smbus_data *data, int len)
+static int rtl9300_i2c_prepare_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_xfer *xfer)
{
- u32 val, mask;
int ret;
- val = read_write == I2C_SMBUS_WRITE ? RTL9300_I2C_MST_CTRL1_RWOP : 0;
- mask = RTL9300_I2C_MST_CTRL1_RWOP;
+ if (xfer->data_len < 1 || xfer->data_len > 16)
+ return -EINVAL;
+
+ ret = regmap_field_write(i2c->fields[F_DEV_ADDR], xfer->dev_addr);
+ if (ret)
+ return ret;
+
+ ret = rtl9300_i2c_reg_addr_set(i2c, xfer->reg_addr, xfer->reg_addr_len);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(i2c->fields[F_RWOP], xfer->write);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(i2c->fields[F_DATA_WIDTH], (xfer->data_len - 1) & 0xf);
+ if (ret)
+ return ret;
- val |= RTL9300_I2C_MST_CTRL1_I2C_TRIG;
- mask |= RTL9300_I2C_MST_CTRL1_I2C_TRIG;
+ if (xfer->write) {
+ switch (xfer->type) {
+ case RTL9300_I2C_XFER_BYTE:
+ ret = rtl9300_i2c_writel(i2c, *xfer->data);
+ break;
+ case RTL9300_I2C_XFER_WORD:
+ ret = rtl9300_i2c_writel(i2c, get_unaligned((const u16 *)xfer->data));
+ break;
+ default:
+ ret = rtl9300_i2c_write(i2c, xfer->data, xfer->data_len);
+ break;
+ }
+ }
- ret = regmap_update_bits(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1, mask, val);
+ return ret;
+}
+
+static int rtl9300_i2c_do_xfer(struct rtl9300_i2c *i2c, struct rtl9300_i2c_xfer *xfer)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_field_write(i2c->fields[F_I2C_TRIG], 1);
if (ret)
return ret;
- ret = regmap_read_poll_timeout(i2c->regmap, i2c->reg_base + RTL9300_I2C_MST_CTRL1,
- val, !(val & RTL9300_I2C_MST_CTRL1_I2C_TRIG), 100, 100000);
+ ret = regmap_field_read_poll_timeout(i2c->fields[F_I2C_TRIG], val, !val, 100, 100000);
if (ret)
return ret;
- if (val & RTL9300_I2C_MST_CTRL1_I2C_FAIL)
+ ret = regmap_field_read(i2c->fields[F_I2C_FAIL], &val);
+ if (ret)
+ return ret;
+ if (val)
return -EIO;
- if (read_write == I2C_SMBUS_READ) {
- if (size == I2C_SMBUS_BYTE || size == I2C_SMBUS_BYTE_DATA) {
- ret = regmap_read(i2c->regmap,
- i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val);
+ if (!xfer->write) {
+ switch (xfer->type) {
+ case RTL9300_I2C_XFER_BYTE:
+ ret = regmap_read(i2c->regmap, i2c->data_reg, &val);
if (ret)
return ret;
- data->byte = val & 0xff;
- } else if (size == I2C_SMBUS_WORD_DATA) {
- ret = regmap_read(i2c->regmap,
- i2c->reg_base + RTL9300_I2C_MST_DATA_WORD0, &val);
+
+ *xfer->data = val & 0xff;
+ break;
+ case RTL9300_I2C_XFER_WORD:
+ ret = regmap_read(i2c->regmap, i2c->data_reg, &val);
if (ret)
return ret;
- data->word = val & 0xffff;
- } else {
- ret = rtl9300_i2c_read(i2c, &data->block[0], len);
+
+ put_unaligned(val & 0xffff, (u16*)xfer->data);
+ break;
+ default:
+ ret = rtl9300_i2c_read(i2c, xfer->data, xfer->data_len);
if (ret)
return ret;
+ break;
}
}
@@ -214,94 +292,61 @@ static int rtl9300_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr, unsigned s
{
struct rtl9300_i2c_chan *chan = i2c_get_adapdata(adap);
struct rtl9300_i2c *i2c = chan->i2c;
- int len = 0, ret;
+ struct rtl9300_i2c_xfer xfer = {0};
+ int ret;
- mutex_lock(&i2c->lock);
- if (chan->sda_pin != i2c->sda_pin) {
- ret = rtl9300_i2c_config_io(i2c, chan->sda_pin);
- if (ret)
- goto out_unlock;
- i2c->sda_pin = chan->sda_pin;
- }
+ if (addr > 0x7f)
+ return -EINVAL;
+
+ guard(rtl9300_i2c)(i2c);
+
+ ret = rtl9300_i2c_config_chan(i2c, chan);
+ if (ret)
+ return ret;
+
+ xfer.dev_addr = addr & 0x7f;
+ xfer.write = (read_write == I2C_SMBUS_WRITE);
+ xfer.reg_addr = command;
+ xfer.reg_addr_len = 1;
switch (size) {
case I2C_SMBUS_BYTE:
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 0);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- } else {
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_reg_addr_set(i2c, 0, 0);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = (read_write == I2C_SMBUS_READ) ? &data->byte : &command;
+ xfer.data_len = 1;
+ xfer.reg_addr = 0;
+ xfer.reg_addr_len = 0;
+ xfer.type = RTL9300_I2C_XFER_BYTE;
break;
-
case I2C_SMBUS_BYTE_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 1);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_writel(i2c, data->byte);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = &data->byte;
+ xfer.data_len = 1;
+ xfer.type = RTL9300_I2C_XFER_BYTE;
break;
-
case I2C_SMBUS_WORD_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, 2);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_writel(i2c, data->word);
- if (ret)
- goto out_unlock;
- }
+ xfer.data = (u8 *)&data->word;
+ xfer.data_len = 2;
+ xfer.type = RTL9300_I2C_XFER_WORD;
break;
-
case I2C_SMBUS_BLOCK_DATA:
- ret = rtl9300_i2c_reg_addr_set(i2c, command, 1);
- if (ret)
- goto out_unlock;
- if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX) {
- ret = -EINVAL;
- goto out_unlock;
- }
- ret = rtl9300_i2c_config_xfer(i2c, chan, addr, data->block[0] + 1);
- if (ret)
- goto out_unlock;
- if (read_write == I2C_SMBUS_WRITE) {
- ret = rtl9300_i2c_write(i2c, &data->block[0], data->block[0] + 1);
- if (ret)
- goto out_unlock;
- }
- len = data->block[0] + 1;
+ xfer.data = &data->block[0];
+ xfer.data_len = data->block[0] + 1;
+ xfer.type = RTL9300_I2C_XFER_BLOCK;
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ xfer.data = &data->block[1];
+ xfer.data_len = data->block[0];
+ xfer.type = RTL9300_I2C_XFER_BLOCK;
break;
-
default:
dev_err(&adap->dev, "Unsupported transaction %d\n", size);
- ret = -EOPNOTSUPP;
- goto out_unlock;
+ return -EOPNOTSUPP;
}
- ret = rtl9300_i2c_execute_xfer(i2c, read_write, size, data, len);
-
-out_unlock:
- mutex_unlock(&i2c->lock);
+ ret = rtl9300_i2c_prepare_xfer(i2c, &xfer);
+ if (ret)
+ return ret;
- return ret;
+ return rtl9300_i2c_do_xfer(i2c, &xfer);
}
static u32 rtl9300_i2c_func(struct i2c_adapter *a)
@@ -326,9 +371,11 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rtl9300_i2c *i2c;
- u32 clock_freq, sda_pin;
- int ret, i = 0;
struct fwnode_handle *child;
+ struct rtl9300_i2c_drv_data *drv_data;
+ struct reg_field fields[F_NUM_FIELDS];
+ u32 clock_freq, scl_num, sda_num;
+ int ret, i = 0;
i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -345,16 +392,34 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = device_property_read_u32(dev, "realtek,scl", &scl_num);
+ if (ret || scl_num != 1)
+ scl_num = 0;
+ i2c->scl_num = (u8)scl_num;
+
platform_set_drvdata(pdev, i2c);
- if (device_get_child_node_count(dev) > RTL9300_I2C_MUX_NCHAN)
+ drv_data = (struct rtl9300_i2c_drv_data *)device_get_match_data(i2c->dev);
+ if (device_get_child_node_count(dev) > drv_data->max_nchan)
return dev_err_probe(dev, -EINVAL, "Too many channels\n");
+ i2c->data_reg = i2c->reg_base + drv_data->data_reg;
+ for (i = 0; i < F_NUM_FIELDS; i++) {
+ fields[i] = drv_data->field_desc[i].field;
+ if (drv_data->field_desc[i].scope == REG_SCOPE_MASTER)
+ fields[i].reg += i2c->reg_base;
+ }
+ ret = devm_regmap_field_bulk_alloc(dev, i2c->regmap, i2c->fields,
+ fields, F_NUM_FIELDS);
+ if (ret)
+ return ret;
+
+ i = 0;
device_for_each_child_node(dev, child) {
struct rtl9300_i2c_chan *chan = &i2c->chans[i];
struct i2c_adapter *adap = &chan->adap;
- ret = fwnode_property_read_u32(child, "reg", &sda_pin);
+ ret = fwnode_property_read_u32(child, "reg", &sda_num);
if (ret)
return ret;
@@ -366,17 +431,16 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
case I2C_MAX_STANDARD_MODE_FREQ:
chan->bus_freq = RTL9300_I2C_STD_FREQ;
break;
-
case I2C_MAX_FAST_MODE_FREQ:
chan->bus_freq = RTL9300_I2C_FAST_FREQ;
break;
default:
dev_warn(i2c->dev, "SDA%d clock-frequency %d not supported using default\n",
- sda_pin, clock_freq);
+ sda_num, clock_freq);
break;
}
- chan->sda_pin = sda_pin;
+ chan->sda_num = sda_num;
chan->i2c = i2c;
adap = &i2c->chans[i].adap;
adap->owner = THIS_MODULE;
@@ -386,23 +450,77 @@ static int rtl9300_i2c_probe(struct platform_device *pdev)
adap->dev.parent = dev;
i2c_set_adapdata(adap, chan);
adap->dev.of_node = to_of_node(child);
- snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_pin);
+ snprintf(adap->name, sizeof(adap->name), "%s SDA%d\n", dev_name(dev), sda_num);
i++;
ret = devm_i2c_add_adapter(dev, adap);
if (ret)
return ret;
}
- i2c->sda_pin = 0xff;
+ i2c->sda_num = 0xff;
+
+ /* only use standard read format */
+ ret = regmap_field_write(i2c->fields[F_RD_MODE], 0);
+ if (ret)
+ return ret;
return 0;
}
+#define GLB_REG_FIELD(reg, msb, lsb) \
+ { .field = REG_FIELD(reg, msb, lsb), .scope = REG_SCOPE_GLOBAL }
+#define MST_REG_FIELD(reg, msb, lsb) \
+ { .field = REG_FIELD(reg, msb, lsb), .scope = REG_SCOPE_MASTER }
+
+static const struct rtl9300_i2c_drv_data rtl9300_i2c_drv_data = {
+ .field_desc = {
+ [F_MEM_ADDR] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 8, 31),
+ [F_SDA_OUT_SEL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 4, 6),
+ [F_SCL_SEL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 3, 3),
+ [F_RWOP] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 2, 2),
+ [F_I2C_FAIL] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 1, 1),
+ [F_I2C_TRIG] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL1, 0, 0),
+ [F_RD_MODE] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 15, 15),
+ [F_DEV_ADDR] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 8, 14),
+ [F_DATA_WIDTH] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 4, 7),
+ [F_MEM_ADDR_WIDTH] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 2, 3),
+ [F_SCL_FREQ] = MST_REG_FIELD(RTL9300_I2C_MST_CTRL2, 0, 1),
+ [F_SDA_SEL] = GLB_REG_FIELD(RTL9300_I2C_MST_GLB_CTRL, 0, 7),
+ },
+ .select_scl = rtl9300_i2c_select_scl,
+ .data_reg = RTL9300_I2C_MST_DATA_WORD0,
+ .max_nchan = RTL9300_I2C_MUX_NCHAN,
+};
+
+static const struct rtl9300_i2c_drv_data rtl9310_i2c_drv_data = {
+ .field_desc = {
+ [F_SCL_SEL] = GLB_REG_FIELD(RTL9310_I2C_MST_IF_SEL, 12, 13),
+ [F_SDA_SEL] = GLB_REG_FIELD(RTL9310_I2C_MST_IF_SEL, 0, 11),
+ [F_SCL_FREQ] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 30, 31),
+ [F_DEV_ADDR] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 11, 17),
+ [F_SDA_OUT_SEL] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 18, 21),
+ [F_MEM_ADDR_WIDTH] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 9, 10),
+ [F_DATA_WIDTH] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 5, 8),
+ [F_RD_MODE] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 4, 4),
+ [F_RWOP] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 2, 2),
+ [F_I2C_FAIL] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 1, 1),
+ [F_I2C_TRIG] = MST_REG_FIELD(RTL9310_I2C_MST_CTRL, 0, 0),
+ [F_MEM_ADDR] = MST_REG_FIELD(RTL9310_I2C_MST_MEMADDR_CTRL, 0, 23),
+ },
+ .select_scl = rtl9310_i2c_select_scl,
+ .data_reg = RTL9310_I2C_MST_DATA_CTRL,
+ .max_nchan = RTL9310_I2C_MUX_NCHAN,
+};
+
static const struct of_device_id i2c_rtl9300_dt_ids[] = {
- { .compatible = "realtek,rtl9301-i2c" },
- { .compatible = "realtek,rtl9302b-i2c" },
- { .compatible = "realtek,rtl9302c-i2c" },
- { .compatible = "realtek,rtl9303-i2c" },
+ { .compatible = "realtek,rtl9301-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9302b-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9302c-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9303-i2c", .data = (void *) &rtl9300_i2c_drv_data },
+ { .compatible = "realtek,rtl9310-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9311-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9312-i2c", .data = (void *) &rtl9310_i2c_drv_data },
+ { .compatible = "realtek,rtl9313-i2c", .data = (void *) &rtl9310_i2c_drv_data },
{}
};
MODULE_DEVICE_TABLE(of, i2c_rtl9300_dt_ids);
diff --git a/drivers/i2c/busses/i2c-rzv2m.c b/drivers/i2c/busses/i2c-rzv2m.c
index b0e9c0b62429..238714850673 100644
--- a/drivers/i2c/busses/i2c-rzv2m.c
+++ b/drivers/i2c/busses/i2c-rzv2m.c
@@ -372,7 +372,6 @@ static int rzv2m_i2c_xfer(struct i2c_adapter *adap,
ret = num;
out:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index f4fa4703acbd..8138f5ef40f0 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -138,7 +138,6 @@ static void i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_i2c_match[] = {
- { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
{ .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
{ .compatible = "samsung,s3c2440-hdmiphy-i2c",
.data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 56b2e5c5fb49..1b490525d8dd 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -302,7 +302,6 @@ static int sprd_i2c_xfer(struct i2c_adapter *i2c_adap,
ret = sprd_i2c_handle_msg(i2c_adap, &msgs[im++], 1);
err_msg:
- pm_runtime_mark_last_busy(i2c_dev->dev);
pm_runtime_put_autosuspend(i2c_dev->dev);
return ret < 0 ? ret : im;
@@ -425,7 +424,7 @@ static irqreturn_t sprd_i2c_isr(int irq, void *dev_id)
* If we did not get one ACK from target when writing data, then we
* should finish this transmission since we got some errors.
*
- * When writing data, if i2c_tran == 0 which means we have writen
+ * When writing data, if i2c_tran == 0 which means we have written
* done all data, then we can finish this transmission.
*
* When reading data, if conut < rx fifo full threshold, which
@@ -559,7 +558,6 @@ static int sprd_i2c_probe(struct platform_device *pdev)
goto err_rpm_put;
}
- pm_runtime_mark_last_busy(i2c_dev->dev);
pm_runtime_put_autosuspend(i2c_dev->dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index bf28f8e3ee6b..97d70e667227 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -152,7 +152,7 @@ struct st_i2c_timings {
/**
* struct st_i2c_client - client specific data
* @addr: 8-bit target addr, including r/w bit
- * @count: number of bytes to be transfered
+ * @count: number of bytes to be transferred
* @xfered: number of bytes already transferred
* @buf: data buffer
* @result: result of the transfer
diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
index f84ec056e36d..becf8977979f 100644
--- a/drivers/i2c/busses/i2c-stm32.c
+++ b/drivers/i2c/busses/i2c-stm32.c
@@ -27,8 +27,8 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
if (IS_ERR(dma->chan_tx)) {
ret = PTR_ERR(dma->chan_tx);
if (ret != -ENODEV)
- ret = dev_err_probe(dev, ret,
- "can't request DMA tx channel\n");
+ dev_err_probe(dev, ret, "can't request DMA tx channel\n");
+
goto fail_al;
}
@@ -48,8 +48,7 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
if (IS_ERR(dma->chan_rx)) {
ret = PTR_ERR(dma->chan_rx);
if (ret != -ENODEV)
- ret = dev_err_probe(dev, ret,
- "can't request DMA rx channel\n");
+ dev_err_probe(dev, ret, "can't request DMA rx channel\n");
goto fail_tx;
}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index e6815f6cae78..dc69ed934ec8 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -1761,7 +1761,6 @@ static int stm32f7_i2c_xfer_core(struct i2c_adapter *i2c_adap,
}
pm_free:
- pm_runtime_mark_last_busy(i2c_dev->dev);
pm_runtime_put_autosuspend(i2c_dev->dev);
return (ret < 0) ? ret : num;
@@ -1870,7 +1869,6 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
}
pm_free:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -1977,7 +1975,6 @@ pm_free:
if (!stm32f7_i2c_is_slave_registered(i2c_dev))
stm32f7_i2c_enable_wakeup(i2c_dev, false);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -2015,7 +2012,6 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
stm32f7_i2c_enable_wakeup(i2c_dev, false);
}
- pm_runtime_mark_last_busy(i2c_dev->dev);
pm_runtime_put_autosuspend(i2c_dev->dev);
return 0;
@@ -2328,7 +2324,6 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
dev_info(i2c_dev->dev, "STM32F7 I2C-%d bus adapter\n", adap->nr);
- pm_runtime_mark_last_busy(i2c_dev->dev);
pm_runtime_put_autosuspend(i2c_dev->dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4eb31b913c1a..e533460bccc3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -1649,7 +1649,33 @@ static const struct tegra_i2c_hw_feature tegra194_i2c_hw = {
.has_interface_timing_reg = true,
};
+static const struct tegra_i2c_hw_feature tegra256_i2c_hw = {
+ .has_continue_xfer_support = true,
+ .has_per_pkt_xfer_complete_irq = true,
+ .clk_divisor_hs_mode = 7,
+ .clk_divisor_std_mode = 0x7a,
+ .clk_divisor_fast_mode = 0x40,
+ .clk_divisor_fast_plus_mode = 0x19,
+ .has_config_load_reg = true,
+ .has_multi_master_mode = true,
+ .has_slcg_override_reg = true,
+ .has_mst_fifo = true,
+ .has_mst_reset = true,
+ .quirks = &tegra194_i2c_quirks,
+ .supports_bus_clear = true,
+ .has_apb_dma = false,
+ .tlow_std_mode = 0x8,
+ .thigh_std_mode = 0x7,
+ .tlow_fast_fastplus_mode = 0x3,
+ .thigh_fast_fastplus_mode = 0x3,
+ .setup_hold_time_std_mode = 0x08080808,
+ .setup_hold_time_fast_fast_plus_mode = 0x02020202,
+ .setup_hold_time_hs_mode = 0x090909,
+ .has_interface_timing_reg = true,
+};
+
static const struct of_device_id tegra_i2c_of_match[] = {
+ { .compatible = "nvidia,tegra256-i2c", .data = &tegra256_i2c_hw, },
{ .compatible = "nvidia,tegra194-i2c", .data = &tegra194_i2c_hw, },
{ .compatible = "nvidia,tegra186-i2c", .data = &tegra186_i2c_hw, },
#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
diff --git a/drivers/i2c/busses/i2c-usbio.c b/drivers/i2c/busses/i2c-usbio.c
new file mode 100644
index 000000000000..e7799abf6787
--- /dev/null
+++ b/drivers/i2c/busses/i2c-usbio.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2025 Intel Corporation.
+ * Copyright (c) 2025 Red Hat, Inc.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/usb/usbio.h>
+
+#define I2C_RW_OVERHEAD (sizeof(struct usbio_bulk_packet) + sizeof(struct usbio_i2c_rw))
+
+struct usbio_i2c {
+ struct i2c_adapter adap;
+ struct auxiliary_device *adev;
+ struct usbio_i2c_rw *rwbuf;
+ unsigned long quirks;
+ u32 speed;
+ u16 txbuf_len;
+ u16 rxbuf_len;
+};
+
+static const struct acpi_device_id usbio_i2c_acpi_hids[] = {
+ { "INTC1008" }, /* MTL */
+ { "INTC10B3" }, /* ARL */
+ { "INTC10B6" }, /* LNL */
+ { "INTC10D2" }, /* MTL-CVF */
+ { "INTC10E3" }, /* PTL */
+ { }
+};
+
+static const u32 usbio_i2c_speeds[] = {
+ I2C_MAX_STANDARD_MODE_FREQ,
+ I2C_MAX_FAST_MODE_FREQ,
+ I2C_MAX_FAST_MODE_PLUS_FREQ,
+ I2C_MAX_HIGH_SPEED_MODE_FREQ
+};
+
+static void usbio_i2c_uninit(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ struct usbio_i2c_uninit ubuf;
+
+ ubuf.busid = i2c->adev->id;
+ ubuf.config = cpu_to_le16(msg->addr);
+
+ usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C, USBIO_I2CCMD_UNINIT, true,
+ &ubuf, sizeof(ubuf), NULL, 0);
+}
+
+static int usbio_i2c_init(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ struct usbio_i2c_init ibuf;
+ void *reply_buf;
+ u16 reply_len;
+ int ret;
+
+ ibuf.busid = i2c->adev->id;
+ ibuf.config = cpu_to_le16(msg->addr);
+ ibuf.speed = cpu_to_le32(i2c->speed);
+
+ if (i2c->quirks & USBIO_QUIRK_I2C_NO_INIT_ACK) {
+ reply_buf = NULL;
+ reply_len = 0;
+ } else {
+ reply_buf = &ibuf;
+ reply_len = sizeof(ibuf);
+ }
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C, USBIO_I2CCMD_INIT, true,
+ &ibuf, sizeof(ibuf), reply_buf, reply_len);
+ if (ret != sizeof(ibuf))
+ return (ret < 0) ? ret : -EIO;
+
+ return 0;
+}
+
+static int usbio_i2c_read(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ u16 rxchunk = i2c->rxbuf_len - I2C_RW_OVERHEAD;
+ struct usbio_i2c_rw *rbuf = i2c->rwbuf;
+ int ret;
+
+ rbuf->busid = i2c->adev->id;
+ rbuf->config = cpu_to_le16(msg->addr);
+ rbuf->size = cpu_to_le16(msg->len);
+
+ if (msg->len > rxchunk) {
+ /* Need to split the input buffer */
+ u16 len = 0;
+
+ do {
+ if (msg->len - len < rxchunk)
+ rxchunk = msg->len - len;
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C,
+ USBIO_I2CCMD_READ, true,
+ rbuf, len == 0 ? sizeof(*rbuf) : 0,
+ rbuf, sizeof(*rbuf) + rxchunk);
+ if (ret < 0)
+ return ret;
+
+ memcpy(&msg->buf[len], rbuf->data, rxchunk);
+ len += rxchunk;
+ } while (msg->len > len);
+
+ return 0;
+ }
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C, USBIO_I2CCMD_READ, true,
+ rbuf, sizeof(*rbuf), rbuf, sizeof(*rbuf) + msg->len);
+ if (ret != sizeof(*rbuf) + msg->len)
+ return (ret < 0) ? ret : -EIO;
+
+ memcpy(msg->buf, rbuf->data, msg->len);
+
+ return 0;
+}
+
+static int usbio_i2c_write(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ u16 txchunk = i2c->txbuf_len - I2C_RW_OVERHEAD;
+ struct usbio_i2c_rw *wbuf = i2c->rwbuf;
+ int ret;
+
+ if (msg->len > txchunk) {
+ /* Need to split the output buffer */
+ u16 len = 0;
+
+ do {
+ wbuf->busid = i2c->adev->id;
+ wbuf->config = cpu_to_le16(msg->addr);
+
+ if (i2c->quirks & USBIO_QUIRK_I2C_USE_CHUNK_LEN)
+ wbuf->size = cpu_to_le16(txchunk);
+ else
+ wbuf->size = cpu_to_le16(msg->len);
+
+ memcpy(wbuf->data, &msg->buf[len], txchunk);
+ len += txchunk;
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C,
+ USBIO_I2CCMD_WRITE, msg->len == len,
+ wbuf, sizeof(*wbuf) + txchunk,
+ wbuf, sizeof(*wbuf));
+ if (ret < 0)
+ return ret;
+
+ if (msg->len - len < txchunk)
+ txchunk = msg->len - len;
+ } while (msg->len > len);
+
+ return 0;
+ }
+
+ wbuf->busid = i2c->adev->id;
+ wbuf->config = cpu_to_le16(msg->addr);
+ wbuf->size = cpu_to_le16(msg->len);
+ memcpy(wbuf->data, msg->buf, msg->len);
+
+ ret = usbio_bulk_msg(i2c->adev, USBIO_PKTTYPE_I2C, USBIO_I2CCMD_WRITE, true,
+ wbuf, sizeof(*wbuf) + msg->len, wbuf, sizeof(*wbuf));
+ if (ret != sizeof(*wbuf) || le16_to_cpu(wbuf->size) != msg->len)
+ return (ret < 0) ? ret : -EIO;
+
+ return 0;
+}
+
+static int usbio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
+{
+ struct usbio_i2c *i2c = i2c_get_adapdata(adap);
+ int ret;
+
+ usbio_acquire(i2c->adev);
+
+ ret = usbio_i2c_init(adap, msgs);
+ if (ret)
+ goto out_release;
+
+ for (int i = 0; i < num; ret = ++i) {
+ if (msgs[i].flags & I2C_M_RD)
+ ret = usbio_i2c_read(adap, &msgs[i]);
+ else
+ ret = usbio_i2c_write(adap, &msgs[i]);
+
+ if (ret)
+ break;
+ }
+
+ usbio_i2c_uninit(adap, msgs);
+
+out_release:
+ usbio_release(i2c->adev);
+
+ return ret;
+}
+
+static u32 usbio_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_adapter_quirks usbio_i2c_quirks = {
+ .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_NO_REP_START,
+ .max_read_len = SZ_4K,
+ .max_write_len = SZ_4K,
+};
+
+static const struct i2c_adapter_quirks usbio_i2c_quirks_max_rw_len52 = {
+ .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_NO_REP_START,
+ .max_read_len = 52,
+ .max_write_len = 52,
+};
+
+static const struct i2c_algorithm usbio_i2c_algo = {
+ .master_xfer = usbio_i2c_xfer,
+ .functionality = usbio_i2c_func,
+};
+
+static int usbio_i2c_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *adev_id)
+{
+ struct usbio_i2c_bus_desc *i2c_desc;
+ struct device *dev = &adev->dev;
+ struct usbio_i2c *i2c;
+ u32 max_speed;
+ int ret;
+
+ i2c_desc = dev_get_platdata(dev);
+ if (!i2c_desc)
+ return -EINVAL;
+
+ i2c = devm_kzalloc(dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return -ENOMEM;
+
+ i2c->adev = adev;
+
+ usbio_acpi_bind(i2c->adev, usbio_i2c_acpi_hids);
+ usbio_get_txrxbuf_len(i2c->adev, &i2c->txbuf_len, &i2c->rxbuf_len);
+
+ i2c->rwbuf = devm_kzalloc(dev, max(i2c->txbuf_len, i2c->rxbuf_len), GFP_KERNEL);
+ if (!i2c->rwbuf)
+ return -ENOMEM;
+
+ i2c->quirks = usbio_get_quirks(i2c->adev);
+
+ max_speed = usbio_i2c_speeds[i2c_desc->caps & USBIO_I2C_BUS_MODE_CAP_MASK];
+ if (max_speed < I2C_MAX_FAST_MODE_FREQ &&
+ (i2c->quirks & USBIO_QUIRK_I2C_ALLOW_400KHZ))
+ max_speed = I2C_MAX_FAST_MODE_FREQ;
+
+ i2c->speed = i2c_acpi_find_bus_speed(dev);
+ if (!i2c->speed)
+ i2c->speed = I2C_MAX_STANDARD_MODE_FREQ;
+ else if (i2c->speed > max_speed) {
+ dev_warn(dev, "Invalid speed %u adjusting to bus max %u\n",
+ i2c->speed, max_speed);
+ i2c->speed = max_speed;
+ }
+
+ i2c->adap.owner = THIS_MODULE;
+ i2c->adap.class = I2C_CLASS_HWMON;
+ i2c->adap.dev.parent = dev;
+ i2c->adap.algo = &usbio_i2c_algo;
+
+ if (i2c->quirks & USBIO_QUIRK_I2C_MAX_RW_LEN_52)
+ i2c->adap.quirks = &usbio_i2c_quirks_max_rw_len52;
+ else
+ i2c->adap.quirks = &usbio_i2c_quirks;
+
+ snprintf(i2c->adap.name, sizeof(i2c->adap.name), "%s.%d",
+ USBIO_I2C_CLIENT, i2c->adev->id);
+
+ device_set_node(&i2c->adap.dev, dev_fwnode(&adev->dev));
+
+ auxiliary_set_drvdata(adev, i2c);
+ i2c_set_adapdata(&i2c->adap, i2c);
+
+ ret = i2c_add_adapter(&i2c->adap);
+ if (ret)
+ return ret;
+
+ if (has_acpi_companion(&i2c->adap.dev))
+ acpi_dev_clear_dependencies(ACPI_COMPANION(&i2c->adap.dev));
+
+ return 0;
+}
+
+static void usbio_i2c_remove(struct auxiliary_device *adev)
+{
+ struct usbio_i2c *i2c = auxiliary_get_drvdata(adev);
+
+ i2c_del_adapter(&i2c->adap);
+}
+
+static const struct auxiliary_device_id usbio_i2c_id_table[] = {
+ { "usbio.usbio-i2c" },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, usbio_i2c_id_table);
+
+static struct auxiliary_driver usbio_i2c_driver = {
+ .name = USBIO_I2C_CLIENT,
+ .probe = usbio_i2c_probe,
+ .remove = usbio_i2c_remove,
+ .id_table = usbio_i2c_id_table
+};
+module_auxiliary_driver(usbio_i2c_driver);
+
+MODULE_DESCRIPTION("Intel USBIO I2C driver");
+MODULE_AUTHOR("Israel Cepeda <israel.a.cepeda.lopez@intel.com>");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("USBIO");
diff --git a/drivers/i2c/busses/i2c-viperboard.c b/drivers/i2c/busses/i2c-viperboard.c
index 1bd602852e35..f596efcc291c 100644
--- a/drivers/i2c/busses/i2c-viperboard.c
+++ b/drivers/i2c/busses/i2c-viperboard.c
@@ -204,7 +204,7 @@ static int vprbrd_i2c_read(struct vprbrd *vb, struct i2c_msg *msg)
/* copy the received data */
memcpy(msg->buf + start, rmsg, len1);
- /* second read transfer if neccessary */
+ /* second read transfer if necessary */
if (len2 > 0) {
ret = vprbrd_i2c_receive(vb->usb_dev, rmsg, len2);
if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 607026c921d6..28015d77599d 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -1349,7 +1349,6 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
mutex_unlock(&i2c->lock);
out:
- pm_runtime_mark_last_busy(i2c->dev);
pm_runtime_put_autosuspend(i2c->dev);
return err;
}
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index ecca8c006b02..ae7e9c8b65a6 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -573,7 +573,8 @@ static int i2c_device_probe(struct device *dev)
goto err_clear_wakeup_irq;
do_power_on = !i2c_acpi_waive_d0_probe(dev);
- status = dev_pm_domain_attach(&client->dev, do_power_on ? PD_FLAG_ATTACH_POWER_ON : 0);
+ status = dev_pm_domain_attach(&client->dev, PD_FLAG_DETACH_POWER_OFF |
+ (do_power_on ? PD_FLAG_ATTACH_POWER_ON : 0));
if (status)
goto err_clear_wakeup_irq;
@@ -581,7 +582,7 @@ static int i2c_device_probe(struct device *dev)
GFP_KERNEL);
if (!client->devres_group_id) {
status = -ENOMEM;
- goto err_detach_pm_domain;
+ goto err_clear_wakeup_irq;
}
client->debugfs = debugfs_create_dir(dev_name(&client->dev),
@@ -608,8 +609,6 @@ static int i2c_device_probe(struct device *dev)
err_release_driver_resources:
debugfs_remove_recursive(client->debugfs);
devres_release_group(&client->dev, client->devres_group_id);
-err_detach_pm_domain:
- dev_pm_domain_detach(&client->dev, do_power_on);
err_clear_wakeup_irq:
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
@@ -636,8 +635,6 @@ static void i2c_device_remove(struct device *dev)
devres_release_group(&client->dev, client->devres_group_id);
- dev_pm_domain_detach(&client->dev, true);
-
dev_pm_clear_wake_irq(&client->dev);
device_init_wakeup(&client->dev, false);
diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c
index 7ee6b992b835..02ca55c2246b 100644
--- a/drivers/i2c/i2c-core-slave.c
+++ b/drivers/i2c/i2c-core-slave.c
@@ -112,10 +112,9 @@ bool i2c_detect_slave_mode(struct device *dev)
struct fwnode_handle *fwnode = dev_fwnode(dev);
if (is_of_node(fwnode)) {
- struct fwnode_handle *child __free(fwnode_handle) = NULL;
u32 reg;
- fwnode_for_each_child_node(fwnode, child) {
+ fwnode_for_each_child_node_scoped(fwnode, child) {
fwnode_property_read_u32(child, "reg", &reg);
if (reg & I2C_OWN_SLAVE_ADDRESS)
return true;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 4d8690981a55..d59644e50f14 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -241,12 +241,9 @@ struct i2c_mux_core *i2c_mux_alloc(struct i2c_adapter *parent,
muxc->parent = parent;
muxc->dev = dev;
- if (flags & I2C_MUX_LOCKED)
- muxc->mux_locked = true;
- if (flags & I2C_MUX_ARBITRATOR)
- muxc->arbitrator = true;
- if (flags & I2C_MUX_GATE)
- muxc->gate = true;
+ muxc->mux_locked = !!(flags & I2C_MUX_LOCKED);
+ muxc->arbitrator = !!(flags & I2C_MUX_ARBITRATOR);
+ muxc->gate = !!(flags & I2C_MUX_GATE);
muxc->select = select;
muxc->deselect = deselect;
muxc->max_adapters = max_adapters;
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 8663c8a7c269..3d8002caf703 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -63,10 +63,6 @@
#define mybus(x) (!((x) & MYBUS) || ((x) & MYBUS) == MYBUS)
#define busoff(x) (!((x) & BUSON) || ((x) & BUSON) == BUSON)
-/* arbitration timeouts, in jiffies */
-#define ARB_TIMEOUT (HZ / 8) /* 125 ms until forcing bus ownership */
-#define ARB2_TIMEOUT (HZ / 4) /* 250 ms until acquisition failure */
-
/* arbitration retry delays, in us */
#define SELECT_DELAY_SHORT 50
#define SELECT_DELAY_LONG 1000
@@ -229,6 +225,9 @@ static int pca9541_arbitrate(struct i2c_client *client)
*/
data->select_timeout = SELECT_DELAY_LONG;
if (time_is_before_eq_jiffies(data->arb_timeout)) {
+ dev_warn(&client->dev,
+ "Arbitration timeout on I2C bus, forcing bus ownership\n");
+
/* Time is up, take the bus and reset it. */
pca9541_reg_write(client,
PCA9541_CONTROL,
@@ -251,10 +250,10 @@ static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan)
struct pca9541 *data = i2c_mux_priv(muxc);
struct i2c_client *client = data->client;
int ret;
- unsigned long timeout = jiffies + ARB2_TIMEOUT;
+ unsigned long timeout = jiffies + (2 * client->adapter->timeout);
/* give up after this time */
- data->arb_timeout = jiffies + ARB_TIMEOUT;
+ data->arb_timeout = jiffies + client->adapter->timeout;
/* force bus ownership after this time */
do {
@@ -267,6 +266,7 @@ static int pca9541_select_chan(struct i2c_mux_core *muxc, u32 chan)
else
msleep(data->select_timeout / 1000);
} while (time_is_after_eq_jiffies(timeout));
+ dev_warn(&client->dev, "Failed to acquire I2C bus, timed out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index 2396545763ff..8a156f5ad692 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -15,12 +15,12 @@
#include "internals.h"
/**
- * i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a
- * specific device
+ * i3c_device_do_xfers() - do I3C transfers directed to a specific device
*
* @dev: device with which the transfers should be done
* @xfers: array of transfers
* @nxfers: number of transfers
+ * @mode: transfer mode
*
* Initiate one or several private SDR transfers with @dev.
*
@@ -33,9 +33,8 @@
* 'xfers' some time later. See I3C spec ver 1.1.1 09-Jun-2021. Section:
* 5.1.2.2.3.
*/
-int i3c_device_do_priv_xfers(struct i3c_device *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers)
+int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
{
int ret, i;
@@ -48,12 +47,12 @@ int i3c_device_do_priv_xfers(struct i3c_device *dev,
}
i3c_bus_normaluse_lock(dev->bus);
- ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers);
+ ret = i3c_dev_do_xfers_locked(dev->desc, xfers, nxfers, mode);
i3c_bus_normaluse_unlock(dev->bus);
return ret;
}
-EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
+EXPORT_SYMBOL_GPL(i3c_device_do_xfers);
/**
* i3c_device_do_setdasa() - do I3C dynamic address assignement with
@@ -261,6 +260,20 @@ i3c_device_match_id(struct i3c_device *i3cdev,
EXPORT_SYMBOL_GPL(i3c_device_match_id);
/**
+ * i3c_device_get_supported_xfer_mode - Returns the supported transfer mode by
+ * connected master controller.
+ * @dev: I3C device
+ *
+ * Return: a bit mask, which supported transfer mode, bit position is defined at
+ * enum i3c_hdr_mode
+ */
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev)
+{
+ return i3c_dev_get_master(dev->desc)->this->info.hdr_cap | BIT(I3C_SDR);
+}
+EXPORT_SYMBOL_GPL(i3c_device_get_supported_xfer_mode);
+
+/**
* i3c_driver_register_with_owner() - register an I3C device driver
*
* @drv: driver to register
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
index 0d857cc68cc5..f609e5098137 100644
--- a/drivers/i3c/internals.h
+++ b/drivers/i3c/internals.h
@@ -15,9 +15,9 @@ void i3c_bus_normaluse_lock(struct i3c_bus *bus);
void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev);
-int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers);
+int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev);
int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
@@ -38,7 +38,11 @@ static inline void i3c_writel_fifo(void __iomem *addr, const void *buf,
u32 tmp = 0;
memcpy(&tmp, buf + (nbytes & ~3), nbytes & 3);
- writel(tmp, addr);
+ /*
+ * writesl() instead of writel() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ writesl(addr, &tmp, 1);
}
}
@@ -55,7 +59,11 @@ static inline void i3c_readl_fifo(const void __iomem *addr, void *buf,
if (nbytes & 3) {
u32 tmp;
- tmp = readl(addr);
+ /*
+ * readsl() instead of readl() to keep FIFO
+ * byteorder on big-endian targets
+ */
+ readsl(addr, &tmp, 1);
memcpy(buf + (nbytes & ~3), &tmp, nbytes & 3);
}
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 2ef898a8fd80..f88f7e19203a 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -8,6 +8,7 @@
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/device.h>
+#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
@@ -333,8 +334,6 @@ static void i3c_device_remove(struct device *dev)
if (driver->remove)
driver->remove(i3cdev);
-
- i3c_device_free_ibi(i3cdev);
}
const struct bus_type i3c_bus_type = {
@@ -1728,6 +1727,79 @@ int i3c_master_do_daa(struct i3c_master_controller *master)
EXPORT_SYMBOL_GPL(i3c_master_do_daa);
/**
+ * i3c_master_dma_map_single() - Map buffer for single DMA transfer
+ * @dev: device object of a device doing DMA
+ * @buf: destination/source buffer for DMA
+ * @len: length of transfer
+ * @force_bounce: true, force to use a bounce buffer,
+ * false, function will auto check is a bounce buffer required
+ * @dir: DMA direction
+ *
+ * Map buffer for a DMA transfer and allocate a bounce buffer if required.
+ *
+ * Return: I3C DMA transfer descriptor or NULL in case of error.
+ */
+struct i3c_dma *i3c_master_dma_map_single(struct device *dev, void *buf,
+ size_t len, bool force_bounce, enum dma_data_direction dir)
+{
+ struct i3c_dma *dma_xfer __free(kfree) = NULL;
+ void *bounce __free(kfree) = NULL;
+ void *dma_buf = buf;
+
+ dma_xfer = kzalloc(sizeof(*dma_xfer), GFP_KERNEL);
+ if (!dma_xfer)
+ return NULL;
+
+ dma_xfer->dev = dev;
+ dma_xfer->buf = buf;
+ dma_xfer->dir = dir;
+ dma_xfer->len = len;
+ dma_xfer->map_len = len;
+
+ if (is_vmalloc_addr(buf))
+ force_bounce = true;
+
+ if (force_bounce) {
+ dma_xfer->map_len = ALIGN(len, cache_line_size());
+ if (dir == DMA_FROM_DEVICE)
+ bounce = kzalloc(dma_xfer->map_len, GFP_KERNEL);
+ else
+ bounce = kmemdup(buf, dma_xfer->map_len, GFP_KERNEL);
+ if (!bounce)
+ return NULL;
+ dma_buf = bounce;
+ }
+
+ dma_xfer->addr = dma_map_single(dev, dma_buf, dma_xfer->map_len, dir);
+ if (dma_mapping_error(dev, dma_xfer->addr))
+ return NULL;
+
+ dma_xfer->bounce_buf = no_free_ptr(bounce);
+ return no_free_ptr(dma_xfer);
+}
+EXPORT_SYMBOL_GPL(i3c_master_dma_map_single);
+
+/**
+ * i3c_master_dma_unmap_single() - Unmap buffer after DMA
+ * @dma_xfer: DMA transfer and mapping descriptor
+ *
+ * Unmap buffer and cleanup DMA transfer descriptor.
+ */
+void i3c_master_dma_unmap_single(struct i3c_dma *dma_xfer)
+{
+ dma_unmap_single(dma_xfer->dev, dma_xfer->addr,
+ dma_xfer->map_len, dma_xfer->dir);
+ if (dma_xfer->bounce_buf) {
+ if (dma_xfer->dir == DMA_FROM_DEVICE)
+ memcpy(dma_xfer->buf, dma_xfer->bounce_buf,
+ dma_xfer->len);
+ kfree(dma_xfer->bounce_buf);
+ }
+ kfree(dma_xfer);
+}
+EXPORT_SYMBOL_GPL(i3c_master_dma_unmap_single);
+
+/**
* i3c_master_set_info() - set master device information
* @master: master used to send frames on the bus
* @info: I3C device information
@@ -2490,9 +2562,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
adap->owner = master->dev.parent->driver->owner;
adap->algo = &i3c_master_i2c_algo;
strscpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
-
- /* FIXME: Should we allow i3c masters to override these values? */
- adap->timeout = 1000;
+ adap->timeout = HZ;
adap->retries = 3;
id = of_alias_get_id(master->dev.of_node, "i2c");
@@ -2749,10 +2819,14 @@ EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
{
- if (!ops || !ops->bus_init || !ops->priv_xfers ||
+ if (!ops || !ops->bus_init ||
!ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers)
return -EINVAL;
+ /* Must provide one of priv_xfers (SDR only) or i3c_xfers (all modes) */
+ if (!ops->priv_xfers && !ops->i3c_xfers)
+ return -EINVAL;
+
if (ops->request_ibi &&
(!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi ||
!ops->recycle_ibi_slot))
@@ -2811,10 +2885,6 @@ int i3c_master_register(struct i3c_master_controller *master,
INIT_LIST_HEAD(&master->boardinfo.i2c);
INIT_LIST_HEAD(&master->boardinfo.i3c);
- ret = i3c_bus_init(i3cbus, master->dev.of_node);
- if (ret)
- return ret;
-
device_initialize(&master->dev);
dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
@@ -2822,6 +2892,10 @@ int i3c_master_register(struct i3c_master_controller *master,
master->dev.coherent_dma_mask = parent->coherent_dma_mask;
master->dev.dma_parms = parent->dma_parms;
+ ret = i3c_bus_init(i3cbus, master->dev.of_node);
+ if (ret)
+ goto err_put_dev;
+
ret = of_populate_i3c_bus(master);
if (ret)
goto err_put_dev;
@@ -2853,7 +2927,7 @@ int i3c_master_register(struct i3c_master_controller *master,
if (ret)
goto err_put_dev;
- master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent));
+ master->wq = alloc_workqueue("%s", WQ_PERCPU, 0, dev_name(parent));
if (!master->wq) {
ret = -ENOMEM;
goto err_put_dev;
@@ -2942,9 +3016,8 @@ int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev)
dev->boardinfo->init_dyn_addr);
}
-int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers)
+int i3c_dev_do_xfers_locked(struct i3c_dev_desc *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
{
struct i3c_master_controller *master;
@@ -2955,9 +3028,15 @@ int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
if (!master || !xfers)
return -EINVAL;
- if (!master->ops->priv_xfers)
+ if (mode != I3C_SDR && !(master->this->info.hdr_cap & BIT(mode)))
return -EOPNOTSUPP;
+ if (master->ops->i3c_xfers)
+ return master->ops->i3c_xfers(dev, xfers, nxfers, mode);
+
+ if (mode != I3C_SDR)
+ return -EINVAL;
+
return master->ops->priv_xfers(dev, xfers, nxfers);
}
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 13df2944f2ec..82cf330778d5 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -1,4 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+config ADI_I3C_MASTER
+ tristate "Analog Devices I3C master driver"
+ depends on HAS_IOMEM
+ help
+ Support for Analog Devices I3C Controller IP, an AXI-interfaced IP
+ core that supports I3C and I2C devices, multiple speed-grades and I3C
+ IBIs.
+
+ This driver can also be built as a module. If so, the module will be
+ called adi-i3c-master.
+
config CDNS_I3C_MASTER
tristate "Cadence I3C master driver"
depends on HAS_IOMEM
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index aac74f3e3851..816a227b6f7a 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ADI_I3C_MASTER) += adi-i3c-master.o
obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
obj-$(CONFIG_AST2600_I3C_MASTER) += ast2600-i3c-master.o
diff --git a/drivers/i3c/master/adi-i3c-master.c b/drivers/i3c/master/adi-i3c-master.c
new file mode 100644
index 000000000000..82ac0b3d057a
--- /dev/null
+++ b/drivers/i3c/master/adi-i3c-master.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * I3C Controller driver
+ * Copyright 2025 Analog Devices Inc.
+ * Author: Jorge Marques <jorge.marques@analog.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/adi-axi-common.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "../internals.h"
+
+#define ADI_MAX_DEVS 16
+#define ADI_HAS_MDB_FROM_BCR(x) (FIELD_GET(BIT(2), (x)))
+
+#define REG_ENABLE 0x040
+
+#define REG_PID_L 0x054
+#define REG_PID_H 0x058
+#define REG_DCR_BCR_DA 0x05c
+#define REG_DCR_BCR_DA_GET_DA(x) FIELD_GET(GENMASK(22, 16), (x))
+#define REG_DCR_BCR_DA_GET_BCR(x) FIELD_GET(GENMASK(15, 8), (x))
+#define REG_DCR_BCR_DA_GET_DCR(x) FIELD_GET(GENMASK(7, 0), (x))
+
+#define REG_IRQ_MASK 0x080
+#define REG_IRQ_PENDING 0x084
+#define REG_IRQ_PENDING_DAA BIT(7)
+#define REG_IRQ_PENDING_IBI BIT(6)
+#define REG_IRQ_PENDING_CMDR BIT(5)
+
+#define REG_CMD_FIFO 0x0d4
+#define REG_CMD_FIFO_0_IS_CCC BIT(22)
+#define REG_CMD_FIFO_0_BCAST BIT(21)
+#define REG_CMD_FIFO_0_SR BIT(20)
+#define REG_CMD_FIFO_0_LEN(l) FIELD_PREP(GENMASK(19, 8), (l))
+#define REG_CMD_FIFO_0_DEV_ADDR(a) FIELD_PREP(GENMASK(7, 1), (a))
+#define REG_CMD_FIFO_0_RNW BIT(0)
+#define REG_CMD_FIFO_1_CCC(id) FIELD_PREP(GENMASK(7, 0), (id))
+
+#define REG_CMD_FIFO_ROOM 0x0c0
+#define REG_CMDR_FIFO 0x0d8
+#define REG_CMDR_FIFO_UDA_ERROR 8
+#define REG_CMDR_FIFO_NACK_RESP 6
+#define REG_CMDR_FIFO_CE2_ERROR 4
+#define REG_CMDR_FIFO_CE0_ERROR 1
+#define REG_CMDR_FIFO_NO_ERROR 0
+#define REG_CMDR_FIFO_ERROR(x) FIELD_GET(GENMASK(23, 20), (x))
+#define REG_CMDR_FIFO_XFER_BYTES(x) FIELD_GET(GENMASK(19, 8), (x))
+
+#define REG_SDO_FIFO 0x0dc
+#define REG_SDO_FIFO_ROOM 0x0c8
+#define REG_SDI_FIFO 0x0e0
+#define REG_IBI_FIFO 0x0e4
+#define REG_FIFO_STATUS 0x0e8
+#define REG_FIFO_STATUS_CMDR_EMPTY BIT(0)
+#define REG_FIFO_STATUS_IBI_EMPTY BIT(1)
+
+#define REG_OPS 0x100
+#define REG_OPS_PP_SG_MASK GENMASK(6, 5)
+#define REG_OPS_SET_SG(x) FIELD_PREP(REG_OPS_PP_SG_MASK, (x))
+
+#define REG_IBI_CONFIG 0x140
+#define REG_IBI_CONFIG_ENABLE BIT(0)
+#define REG_IBI_CONFIG_LISTEN BIT(1)
+
+#define REG_DEV_CHAR 0x180
+#define REG_DEV_CHAR_IS_I2C BIT(0)
+#define REG_DEV_CHAR_IS_ATTACHED BIT(1)
+#define REG_DEV_CHAR_BCR_IBI(x) FIELD_PREP(GENMASK(3, 2), (x))
+#define REG_DEV_CHAR_WEN BIT(8)
+#define REG_DEV_CHAR_ADDR(x) FIELD_PREP(GENMASK(15, 9), (x))
+
+enum speed_grade {PP_SG_UNSET, PP_SG_1MHZ, PP_SG_3MHZ, PP_SG_6MHZ, PP_SG_12MHZ};
+
+struct adi_i3c_cmd {
+ u32 cmd0;
+ u32 cmd1;
+ u32 tx_len;
+ const void *tx_buf;
+ u32 rx_len;
+ void *rx_buf;
+ u32 error;
+};
+
+struct adi_i3c_xfer {
+ struct list_head node;
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+ unsigned int ncmds_comp;
+ struct adi_i3c_cmd cmds[] __counted_by(ncmds);
+};
+
+struct adi_i3c_master {
+ struct i3c_master_controller base;
+ u32 free_rr_slots;
+ struct {
+ unsigned int num_slots;
+ struct i3c_dev_desc **slots;
+ spinlock_t lock; /* Protect IBI slot access */
+ } ibi;
+ struct {
+ struct list_head list;
+ struct adi_i3c_xfer *cur;
+ spinlock_t lock; /* Protect transfer */
+ } xferqueue;
+ void __iomem *regs;
+ struct clk *clk;
+ unsigned long i3c_scl_lim;
+ struct {
+ u8 addrs[ADI_MAX_DEVS];
+ u8 index;
+ } daa;
+};
+
+static inline struct adi_i3c_master *to_adi_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct adi_i3c_master, base);
+}
+
+static void adi_i3c_master_wr_to_tx_fifo(struct adi_i3c_master *master,
+ const u8 *buf, unsigned int nbytes)
+{
+ unsigned int n, m;
+
+ n = readl(master->regs + REG_SDO_FIFO_ROOM);
+ m = min(n, nbytes);
+ i3c_writel_fifo(master->regs + REG_SDO_FIFO, buf, m);
+}
+
+static void adi_i3c_master_rd_from_rx_fifo(struct adi_i3c_master *master,
+ u8 *buf, unsigned int nbytes)
+{
+ i3c_readl_fifo(master->regs + REG_SDI_FIFO, buf, nbytes);
+}
+
+static bool adi_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd)
+{
+ if (cmd->ndests > 1)
+ return false;
+
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ case I3C_CCC_ENEC(false):
+ case I3C_CCC_DISEC(true):
+ case I3C_CCC_DISEC(false):
+ case I3C_CCC_RSTDAA(true):
+ case I3C_CCC_RSTDAA(false):
+ case I3C_CCC_ENTDAA:
+ case I3C_CCC_SETDASA:
+ case I3C_CCC_SETNEWDA:
+ case I3C_CCC_GETMWL:
+ case I3C_CCC_GETMRL:
+ case I3C_CCC_GETPID:
+ case I3C_CCC_GETBCR:
+ case I3C_CCC_GETDCR:
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int adi_i3c_master_disable(struct adi_i3c_master *master)
+{
+ writel(0, master->regs + REG_IBI_CONFIG);
+
+ return 0;
+}
+
+static struct adi_i3c_xfer *adi_i3c_master_alloc_xfer(struct adi_i3c_master *master,
+ unsigned int ncmds)
+{
+ struct adi_i3c_xfer *xfer;
+
+ xfer = kzalloc(struct_size(xfer, cmds, ncmds), GFP_KERNEL);
+ if (!xfer)
+ return NULL;
+
+ INIT_LIST_HEAD(&xfer->node);
+ xfer->ncmds = ncmds;
+ xfer->ret = -ETIMEDOUT;
+
+ return xfer;
+}
+
+static void adi_i3c_master_start_xfer_locked(struct adi_i3c_master *master)
+{
+ struct adi_i3c_xfer *xfer = master->xferqueue.cur;
+ unsigned int i, n, m;
+
+ if (!xfer)
+ return;
+
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct adi_i3c_cmd *cmd = &xfer->cmds[i];
+
+ if (!(cmd->cmd0 & REG_CMD_FIFO_0_RNW))
+ adi_i3c_master_wr_to_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
+ }
+
+ n = readl(master->regs + REG_CMD_FIFO_ROOM);
+ for (i = 0; i < xfer->ncmds; i++) {
+ struct adi_i3c_cmd *cmd = &xfer->cmds[i];
+
+ m = cmd->cmd0 & REG_CMD_FIFO_0_IS_CCC ? 2 : 1;
+ if (m > n)
+ break;
+ writel(cmd->cmd0, master->regs + REG_CMD_FIFO);
+ if (cmd->cmd0 & REG_CMD_FIFO_0_IS_CCC)
+ writel(cmd->cmd1, master->regs + REG_CMD_FIFO);
+ n -= m;
+ }
+}
+
+static void adi_i3c_master_end_xfer_locked(struct adi_i3c_master *master,
+ u32 pending)
+{
+ struct adi_i3c_xfer *xfer = master->xferqueue.cur;
+ int i, ret = 0;
+
+ if (!xfer)
+ return;
+
+ while (!(readl(master->regs + REG_FIFO_STATUS) & REG_FIFO_STATUS_CMDR_EMPTY)) {
+ struct adi_i3c_cmd *cmd;
+ u32 cmdr, rx_len;
+
+ cmdr = readl(master->regs + REG_CMDR_FIFO);
+
+ cmd = &xfer->cmds[xfer->ncmds_comp++];
+ if (cmd->cmd0 & REG_CMD_FIFO_0_RNW) {
+ rx_len = min_t(u32, REG_CMDR_FIFO_XFER_BYTES(cmdr), cmd->rx_len);
+ adi_i3c_master_rd_from_rx_fifo(master, cmd->rx_buf, rx_len);
+ }
+ cmd->error = REG_CMDR_FIFO_ERROR(cmdr);
+ }
+
+ for (i = 0; i < xfer->ncmds_comp; i++) {
+ switch (xfer->cmds[i].error) {
+ case REG_CMDR_FIFO_NO_ERROR:
+ break;
+
+ case REG_CMDR_FIFO_CE0_ERROR:
+ case REG_CMDR_FIFO_CE2_ERROR:
+ case REG_CMDR_FIFO_NACK_RESP:
+ case REG_CMDR_FIFO_UDA_ERROR:
+ ret = -EIO;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ xfer->ret = ret;
+
+ if (xfer->ncmds_comp != xfer->ncmds)
+ return;
+
+ complete(&xfer->comp);
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+ struct adi_i3c_xfer, node);
+ if (xfer)
+ list_del_init(&xfer->node);
+
+ master->xferqueue.cur = xfer;
+ adi_i3c_master_start_xfer_locked(master);
+}
+
+static void adi_i3c_master_queue_xfer(struct adi_i3c_master *master,
+ struct adi_i3c_xfer *xfer)
+{
+ init_completion(&xfer->comp);
+ guard(spinlock_irqsave)(&master->xferqueue.lock);
+ if (master->xferqueue.cur) {
+ list_add_tail(&xfer->node, &master->xferqueue.list);
+ } else {
+ master->xferqueue.cur = xfer;
+ adi_i3c_master_start_xfer_locked(master);
+ }
+}
+
+static void adi_i3c_master_unqueue_xfer(struct adi_i3c_master *master,
+ struct adi_i3c_xfer *xfer)
+{
+ guard(spinlock_irqsave)(&master->xferqueue.lock);
+ if (master->xferqueue.cur == xfer)
+ master->xferqueue.cur = NULL;
+ else
+ list_del_init(&xfer->node);
+
+ writel(0x01, master->regs + REG_ENABLE);
+ writel(0x00, master->regs + REG_ENABLE);
+ writel(REG_IRQ_PENDING_CMDR, master->regs + REG_IRQ_MASK);
+}
+
+static enum i3c_error_code adi_i3c_cmd_get_err(struct adi_i3c_cmd *cmd)
+{
+ switch (cmd->error) {
+ case REG_CMDR_FIFO_CE0_ERROR:
+ return I3C_ERROR_M0;
+
+ case REG_CMDR_FIFO_CE2_ERROR:
+ case REG_CMDR_FIFO_NACK_RESP:
+ return I3C_ERROR_M2;
+
+ default:
+ break;
+ }
+
+ return I3C_ERROR_UNKNOWN;
+}
+
+static int adi_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ struct adi_i3c_cmd *ccmd;
+
+ xfer = adi_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+
+ ccmd = xfer->cmds;
+ ccmd->cmd1 = REG_CMD_FIFO_1_CCC(cmd->id);
+ ccmd->cmd0 = REG_CMD_FIFO_0_IS_CCC |
+ REG_CMD_FIFO_0_LEN(cmd->dests[0].payload.len);
+
+ if (cmd->id & I3C_CCC_DIRECT)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_DEV_ADDR(cmd->dests[0].addr);
+
+ if (cmd->rnw) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = cmd->dests[0].payload.data;
+ ccmd->rx_len = cmd->dests[0].payload.len;
+ } else {
+ ccmd->tx_buf = cmd->dests[0].payload.data;
+ ccmd->tx_len = cmd->dests[0].payload.len;
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ cmd->err = adi_i3c_cmd_get_err(&xfer->cmds[0]);
+
+ return 0;
+}
+
+static int adi_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ int i, ret;
+
+ if (!nxfers)
+ return 0;
+
+ xfer = adi_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct adi_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = REG_CMD_FIFO_0_DEV_ADDR(dev->info.dyn_addr);
+
+ if (xfers[i].rnw) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = xfers[i].data.in;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].data.out;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= REG_CMD_FIFO_0_LEN(xfers[i].len);
+
+ if (i < nxfers - 1)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_SR;
+
+ if (!i)
+ ccmd->cmd0 |= REG_CMD_FIFO_0_BCAST;
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp,
+ msecs_to_jiffies(1000)))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ ret = xfer->ret;
+
+ for (i = 0; i < nxfers; i++)
+ xfers[i].err = adi_i3c_cmd_get_err(&xfer->cmds[i]);
+
+ return ret;
+}
+
+struct adi_i3c_i2c_dev_data {
+ struct i3c_generic_ibi_pool *ibi_pool;
+ u16 id;
+ s16 ibi;
+};
+
+static int adi_i3c_master_get_rr_slot(struct adi_i3c_master *master,
+ u8 dyn_addr)
+{
+ if (!master->free_rr_slots)
+ return -ENOSPC;
+
+ return ffs(master->free_rr_slots) - 1;
+}
+
+static int adi_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ u8 addr;
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(dyn_addr), master->regs + REG_DEV_CHAR);
+ writel((readl(master->regs + REG_DEV_CHAR) &
+ ~REG_DEV_CHAR_IS_ATTACHED) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static int adi_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ int slot;
+ u8 addr;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ slot = adi_i3c_master_get_rr_slot(master, dev->info.dyn_addr);
+ if (slot < 0) {
+ kfree(data);
+ return slot;
+ }
+
+ data->id = slot;
+ i3c_dev_set_master_data(dev, data);
+ master->free_rr_slots &= ~BIT(slot);
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static void adi_i3c_master_sync_dev_char(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_dev_desc *i3cdev;
+ u32 bcr_ibi;
+ u8 addr;
+
+ i3c_bus_for_each_i3cdev(&m->bus, i3cdev) {
+ addr = i3cdev->info.dyn_addr ?
+ i3cdev->info.dyn_addr : i3cdev->info.static_addr;
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ bcr_ibi = FIELD_GET(I3C_BCR_IBI_PAYLOAD | I3C_BCR_IBI_REQ_CAP, (i3cdev->info.bcr));
+ writel(readl(master->regs + REG_DEV_CHAR) |
+ REG_DEV_CHAR_BCR_IBI(bcr_ibi) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+ }
+}
+
+static void adi_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ u8 addr;
+
+ addr = dev->info.dyn_addr ? dev->info.dyn_addr : dev->info.static_addr;
+
+ writel(REG_DEV_CHAR_ADDR(addr), master->regs + REG_DEV_CHAR);
+ writel((readl(master->regs + REG_DEV_CHAR) &
+ ~REG_DEV_CHAR_IS_ATTACHED) | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ i3c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static int adi_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ int slot;
+
+ slot = adi_i3c_master_get_rr_slot(master, 0);
+ if (slot < 0)
+ return slot;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = slot;
+ master->free_rr_slots &= ~BIT(slot);
+ i2c_dev_set_master_data(dev, data);
+
+ writel(REG_DEV_CHAR_ADDR(dev->addr) |
+ REG_DEV_CHAR_IS_I2C | REG_DEV_CHAR_IS_ATTACHED | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ return 0;
+}
+
+static void adi_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+ writel(REG_DEV_CHAR_ADDR(dev->addr) |
+ REG_DEV_CHAR_IS_I2C | REG_DEV_CHAR_WEN,
+ master->regs + REG_DEV_CHAR);
+
+ i2c_dev_set_master_data(dev, NULL);
+ master->free_rr_slots |= BIT(data->id);
+ kfree(data);
+}
+
+static void adi_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+
+ adi_i3c_master_disable(master);
+}
+
+static void adi_i3c_master_upd_i3c_scl_lim(struct adi_i3c_master *master)
+{
+ struct i3c_master_controller *m = &master->base;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ u8 i3c_scl_lim = 0;
+ struct i3c_dev_desc *dev;
+ u8 pp_sg;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ u8 max_fscl;
+
+ max_fscl = max(I3C_CCC_MAX_SDR_FSCL(dev->info.max_read_ds),
+ I3C_CCC_MAX_SDR_FSCL(dev->info.max_write_ds));
+
+ switch (max_fscl) {
+ case I3C_SDR1_FSCL_8MHZ:
+ max_fscl = PP_SG_6MHZ;
+ break;
+ case I3C_SDR2_FSCL_6MHZ:
+ max_fscl = PP_SG_3MHZ;
+ break;
+ case I3C_SDR3_FSCL_4MHZ:
+ max_fscl = PP_SG_3MHZ;
+ break;
+ case I3C_SDR4_FSCL_2MHZ:
+ max_fscl = PP_SG_1MHZ;
+ break;
+ case I3C_SDR0_FSCL_MAX:
+ default:
+ max_fscl = PP_SG_12MHZ;
+ break;
+ }
+
+ if (max_fscl &&
+ (i3c_scl_lim > max_fscl || !i3c_scl_lim))
+ i3c_scl_lim = max_fscl;
+ }
+
+ if (!i3c_scl_lim)
+ return;
+
+ master->i3c_scl_lim = i3c_scl_lim - 1;
+
+ pp_sg = readl(master->regs + REG_OPS) & ~REG_OPS_PP_SG_MASK;
+ pp_sg |= REG_OPS_SET_SG(master->i3c_scl_lim);
+
+ writel(pp_sg, master->regs + REG_OPS);
+}
+
+static void adi_i3c_master_get_features(struct adi_i3c_master *master,
+ unsigned int slot,
+ struct i3c_device_info *info)
+{
+ u32 buf;
+
+ /* Dynamic address and PID are for identification only */
+ memset(info, 0, sizeof(*info));
+ buf = readl(master->regs + REG_DCR_BCR_DA);
+ info->dyn_addr = REG_DCR_BCR_DA_GET_DA(buf);
+ info->dcr = REG_DCR_BCR_DA_GET_DCR(buf);
+ info->bcr = REG_DCR_BCR_DA_GET_BCR(buf);
+ info->pid = readl(master->regs + REG_PID_L);
+ info->pid |= (u64)readl(master->regs + REG_PID_H) << 32;
+}
+
+static int adi_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ int ret, addr = 0;
+ u32 irq_mask;
+
+ for (u8 i = 0; i < ADI_MAX_DEVS; i++) {
+ addr = i3c_master_get_free_addr(m, addr);
+ if (addr < 0)
+ return addr;
+ master->daa.addrs[i] = addr;
+ }
+
+ irq_mask = readl(master->regs + REG_IRQ_MASK);
+ writel(irq_mask | REG_IRQ_PENDING_DAA,
+ master->regs + REG_IRQ_MASK);
+
+ master->daa.index = 0;
+ ret = i3c_master_entdaa_locked(&master->base);
+
+ writel(irq_mask, master->regs + REG_IRQ_MASK);
+
+ /* DAA always finishes with CE2_ERROR or NACK_RESP */
+ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+ /* Add I3C devices discovered */
+ for (u8 i = 0; i < master->daa.index; i++)
+ i3c_master_add_i3c_dev_locked(m, master->daa.addrs[i]);
+ /* Sync retrieved devs info with the IP */
+ adi_i3c_master_sync_dev_char(m);
+
+ i3c_master_defslvs_locked(&master->base);
+
+ adi_i3c_master_upd_i3c_scl_lim(master);
+
+ return 0;
+}
+
+static int adi_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_device_info info = { };
+ int ret;
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+
+ adi_i3c_master_get_features(master, 0, &info);
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+
+ writel(REG_IBI_CONFIG_LISTEN,
+ master->regs + REG_IBI_CONFIG);
+
+ return 0;
+}
+
+static void adi_i3c_master_handle_ibi(struct adi_i3c_master *master,
+ u32 raw)
+{
+ struct adi_i3c_i2c_dev_data *data;
+ struct i3c_ibi_slot *slot;
+ struct i3c_dev_desc *dev;
+ u8 da, id, mdb, len;
+ u8 *buf;
+
+ da = FIELD_GET(GENMASK(23, 17), raw);
+ mdb = FIELD_GET(GENMASK(15, 8), raw);
+ for (id = 0; id < master->ibi.num_slots; id++) {
+ if (master->ibi.slots[id] &&
+ master->ibi.slots[id]->info.dyn_addr == da)
+ break;
+ }
+
+ if (id == master->ibi.num_slots)
+ return;
+
+ dev = master->ibi.slots[id];
+ len = ADI_HAS_MDB_FROM_BCR(dev->info.bcr);
+ data = i3c_dev_get_master_data(dev);
+
+ guard(spinlock)(&master->ibi.lock);
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot)
+ return;
+
+ slot->len = len;
+ buf = slot->data;
+ buf[0] = mdb;
+ i3c_master_queue_ibi(dev, slot);
+}
+
+static void adi_i3c_master_demux_ibis(struct adi_i3c_master *master)
+{
+ while (!(readl(master->regs + REG_FIFO_STATUS) & REG_FIFO_STATUS_IBI_EMPTY)) {
+ u32 raw = readl(master->regs + REG_IBI_FIFO);
+
+ adi_i3c_master_handle_ibi(master, raw);
+ }
+}
+
+static void adi_i3c_master_handle_da_req(struct adi_i3c_master *master)
+{
+ u8 payload0[8];
+ u32 addr;
+
+ adi_i3c_master_rd_from_rx_fifo(master, payload0, 6);
+ addr = master->daa.addrs[master->daa.index++];
+ addr = (addr << 1) | (parity8(addr) ? 0 : 1);
+
+ writel(addr, master->regs + REG_SDO_FIFO);
+}
+
+static irqreturn_t adi_i3c_master_irq(int irq, void *data)
+{
+ struct adi_i3c_master *master = data;
+ u32 pending;
+
+ pending = readl(master->regs + REG_IRQ_PENDING);
+ writel(pending, master->regs + REG_IRQ_PENDING);
+ if (pending & REG_IRQ_PENDING_CMDR) {
+ scoped_guard(spinlock_irqsave, &master->xferqueue.lock) {
+ adi_i3c_master_end_xfer_locked(master, pending);
+ }
+ }
+ if (pending & REG_IRQ_PENDING_IBI)
+ adi_i3c_master_demux_ibis(master);
+ if (pending & REG_IRQ_PENDING_DAA)
+ adi_i3c_master_handle_da_req(master);
+
+ return IRQ_HANDLED;
+}
+
+static int adi_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ struct i2c_msg *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_xfer *xfer __free(kfree) = NULL;
+ int i;
+
+ if (!nxfers)
+ return 0;
+ for (i = 0; i < nxfers; i++) {
+ if (xfers[i].flags & I2C_M_TEN)
+ return -EOPNOTSUPP;
+ }
+ xfer = adi_i3c_master_alloc_xfer(master, nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ struct adi_i3c_cmd *ccmd = &xfer->cmds[i];
+
+ ccmd->cmd0 = REG_CMD_FIFO_0_DEV_ADDR(xfers[i].addr);
+
+ if (xfers[i].flags & I2C_M_RD) {
+ ccmd->cmd0 |= REG_CMD_FIFO_0_RNW;
+ ccmd->rx_buf = xfers[i].buf;
+ ccmd->rx_len = xfers[i].len;
+ } else {
+ ccmd->tx_buf = xfers[i].buf;
+ ccmd->tx_len = xfers[i].len;
+ }
+
+ ccmd->cmd0 |= REG_CMD_FIFO_0_LEN(xfers[i].len);
+ }
+
+ adi_i3c_master_queue_xfer(master, xfer);
+ if (!wait_for_completion_timeout(&xfer->comp,
+ m->i2c.timeout))
+ adi_i3c_master_unqueue_xfer(master, xfer);
+
+ return xfer->ret;
+}
+
+static int adi_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct i3c_dev_desc *i3cdev;
+ u32 enabled = 0;
+ int ret;
+
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+
+ i3c_bus_for_each_i3cdev(&m->bus, i3cdev) {
+ if (dev != i3cdev && i3cdev->ibi)
+ enabled |= i3cdev->ibi->enabled;
+ }
+ if (!enabled) {
+ writel(REG_IBI_CONFIG_LISTEN,
+ master->regs + REG_IBI_CONFIG);
+ writel(readl(master->regs + REG_IRQ_MASK) & ~REG_IRQ_PENDING_IBI,
+ master->regs + REG_IRQ_MASK);
+ }
+
+ return ret;
+}
+
+static int adi_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+
+ writel(REG_IBI_CONFIG_LISTEN | REG_IBI_CONFIG_ENABLE,
+ master->regs + REG_IBI_CONFIG);
+
+ writel(readl(master->regs + REG_IRQ_MASK) | REG_IRQ_PENDING_IBI,
+ master->regs + REG_IRQ_MASK);
+
+ return i3c_master_enec_locked(m, dev->info.dyn_addr,
+ I3C_CCC_EVENT_SIR);
+}
+
+static int adi_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data;
+ unsigned int i;
+
+ data = i3c_dev_get_master_data(dev);
+ data->ibi_pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(data->ibi_pool))
+ return PTR_ERR(data->ibi_pool);
+
+ scoped_guard(spinlock_irqsave, &master->ibi.lock) {
+ for (i = 0; i < master->ibi.num_slots; i++) {
+ if (!master->ibi.slots[i]) {
+ data->ibi = i;
+ master->ibi.slots[i] = dev;
+ break;
+ }
+ }
+ }
+
+ if (i < master->ibi.num_slots)
+ return 0;
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+
+ return -ENOSPC;
+}
+
+static void adi_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct adi_i3c_master *master = to_adi_i3c_master(m);
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ scoped_guard(spinlock_irqsave, &master->ibi.lock) {
+ master->ibi.slots[data->ibi] = NULL;
+ }
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+}
+
+static void adi_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct adi_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_generic_ibi_recycle_slot(data->ibi_pool, slot);
+}
+
+static const struct i3c_master_controller_ops adi_i3c_master_ops = {
+ .bus_init = adi_i3c_master_bus_init,
+ .bus_cleanup = adi_i3c_master_bus_cleanup,
+ .attach_i3c_dev = adi_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = adi_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = adi_i3c_master_detach_i3c_dev,
+ .attach_i2c_dev = adi_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = adi_i3c_master_detach_i2c_dev,
+ .do_daa = adi_i3c_master_do_daa,
+ .supports_ccc_cmd = adi_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = adi_i3c_master_send_ccc_cmd,
+ .priv_xfers = adi_i3c_master_priv_xfers,
+ .i2c_xfers = adi_i3c_master_i2c_xfers,
+ .request_ibi = adi_i3c_master_request_ibi,
+ .enable_ibi = adi_i3c_master_enable_ibi,
+ .disable_ibi = adi_i3c_master_disable_ibi,
+ .free_ibi = adi_i3c_master_free_ibi,
+ .recycle_ibi_slot = adi_i3c_master_recycle_ibi_slot,
+};
+
+static const struct of_device_id adi_i3c_master_of_match[] = {
+ { .compatible = "adi,i3c-master-v1" },
+ {}
+};
+
+static int adi_i3c_master_probe(struct platform_device *pdev)
+{
+ struct adi_i3c_master *master;
+ struct clk_bulk_data *clk;
+ unsigned int version;
+ int ret, irq;
+
+ master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(master->regs))
+ return PTR_ERR(master->regs);
+
+ ret = devm_clk_bulk_get_all_enabled(&pdev->dev, &clk);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret,
+ "Failed to get clocks\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ version = readl(master->regs + ADI_AXI_REG_VERSION);
+ if (ADI_AXI_PCORE_VER_MAJOR(version) != 1)
+ dev_err_probe(&pdev->dev, -ENODEV, "Unsupported peripheral version %u.%u.%u\n",
+ ADI_AXI_PCORE_VER_MAJOR(version),
+ ADI_AXI_PCORE_VER_MINOR(version),
+ ADI_AXI_PCORE_VER_PATCH(version));
+
+ writel(0x00, master->regs + REG_ENABLE);
+ writel(0x00, master->regs + REG_IRQ_MASK);
+
+ ret = devm_request_irq(&pdev->dev, irq, adi_i3c_master_irq, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, master);
+
+ master->free_rr_slots = GENMASK(ADI_MAX_DEVS, 1);
+
+ writel(REG_IRQ_PENDING_CMDR, master->regs + REG_IRQ_MASK);
+
+ spin_lock_init(&master->ibi.lock);
+ master->ibi.num_slots = 15;
+ master->ibi.slots = devm_kcalloc(&pdev->dev, master->ibi.num_slots,
+ sizeof(*master->ibi.slots),
+ GFP_KERNEL);
+ if (!master->ibi.slots)
+ return -ENOMEM;
+
+ spin_lock_init(&master->xferqueue.lock);
+ INIT_LIST_HEAD(&master->xferqueue.list);
+
+ return i3c_master_register(&master->base, &pdev->dev,
+ &adi_i3c_master_ops, false);
+}
+
+static void adi_i3c_master_remove(struct platform_device *pdev)
+{
+ struct adi_i3c_master *master = platform_get_drvdata(pdev);
+
+ writel(0xff, master->regs + REG_IRQ_PENDING);
+ writel(0x00, master->regs + REG_IRQ_MASK);
+ writel(0x01, master->regs + REG_ENABLE);
+
+ i3c_master_unregister(&master->base);
+}
+
+static struct platform_driver adi_i3c_master = {
+ .probe = adi_i3c_master_probe,
+ .remove = adi_i3c_master_remove,
+ .driver = {
+ .name = "adi-i3c-master",
+ .of_match_table = adi_i3c_master_of_match,
+ },
+};
+module_platform_driver(adi_i3c_master);
+
+MODULE_AUTHOR("Jorge Marques <jorge.marques@analog.com>");
+MODULE_DESCRIPTION("Analog Devices I3C master driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 974122b2d20e..276592a8222e 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -228,6 +228,7 @@
/* List of quirks */
#define AMD_I3C_OD_PP_TIMING BIT(1)
+#define DW_I3C_DISABLE_RUNTIME_PM_QUIRK BIT(2)
struct dw_i3c_cmd {
u32 cmd_lo;
@@ -252,6 +253,10 @@ struct dw_i3c_i2c_dev_data {
struct i3c_generic_ibi_pool *ibi_pool;
};
+struct dw_i3c_drvdata {
+ u32 flags;
+};
+
static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
const struct i3c_ccc_cmd *cmd)
{
@@ -1535,6 +1540,8 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
struct platform_device *pdev)
{
int ret, irq;
+ const struct dw_i3c_drvdata *drvdata;
+ unsigned long quirks = 0;
if (!master->platform_ops)
master->platform_ops = &dw_i3c_platform_ops_default;
@@ -1590,7 +1597,18 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
master->maxdevs = ret >> 16;
master->free_pos = GENMASK(master->maxdevs - 1, 0);
- master->quirks = (unsigned long)device_get_match_data(&pdev->dev);
+ if (has_acpi_companion(&pdev->dev)) {
+ quirks = (unsigned long)device_get_match_data(&pdev->dev);
+ } else if (pdev->dev.of_node) {
+ drvdata = device_get_match_data(&pdev->dev);
+ if (drvdata)
+ quirks = drvdata->flags;
+ }
+ master->quirks = quirks;
+
+ /* Keep controller enabled by preventing runtime suspend */
+ if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK)
+ pm_runtime_get_noresume(&pdev->dev);
INIT_WORK(&master->hj_work, dw_i3c_hj_work);
ret = i3c_master_register(&master->base, &pdev->dev,
@@ -1617,6 +1635,10 @@ void dw_i3c_common_remove(struct dw_i3c_master *master)
cancel_work_sync(&master->hj_work);
i3c_master_unregister(&master->base);
+ /* Balance pm_runtime_get_noresume() from probe() */
+ if (master->quirks & DW_I3C_DISABLE_RUNTIME_PM_QUIRK)
+ pm_runtime_put_noidle(master->dev);
+
pm_runtime_disable(master->dev);
pm_runtime_set_suspended(master->dev);
pm_runtime_dont_use_autosuspend(master->dev);
@@ -1737,8 +1759,37 @@ static const struct dev_pm_ops dw_i3c_pm_ops = {
SET_RUNTIME_PM_OPS(dw_i3c_master_runtime_suspend, dw_i3c_master_runtime_resume, NULL)
};
+static void dw_i3c_shutdown(struct platform_device *pdev)
+{
+ struct dw_i3c_master *master = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return;
+ }
+
+ cancel_work_sync(&master->hj_work);
+
+ /* Disable interrupts */
+ writel((u32)~INTR_ALL, master->regs + INTR_STATUS_EN);
+ writel((u32)~INTR_ALL, master->regs + INTR_SIGNAL_EN);
+
+ pm_runtime_put_autosuspend(master->dev);
+}
+
+static const struct dw_i3c_drvdata altr_agilex5_drvdata = {
+ .flags = DW_I3C_DISABLE_RUNTIME_PM_QUIRK,
+};
+
static const struct of_device_id dw_i3c_master_of_match[] = {
{ .compatible = "snps,dw-i3c-master-1.00a", },
+ { .compatible = "altr,agilex5-dw-i3c-master",
+ .data = &altr_agilex5_drvdata,
+ },
{},
};
MODULE_DEVICE_TABLE(of, dw_i3c_master_of_match);
@@ -1752,6 +1803,7 @@ MODULE_DEVICE_TABLE(acpi, amd_i3c_device_match);
static struct platform_driver dw_i3c_driver = {
.probe = dw_i3c_probe,
.remove = dw_i3c_remove,
+ .shutdown = dw_i3c_shutdown,
.driver = {
.name = "dw-i3c-master",
.of_match_table = dw_i3c_master_of_match,
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index dd636094b07f..eb8a3ae2990d 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -317,7 +317,9 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
break;
next_addr = ret;
- DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
+ dev_dbg(&hci->master.dev,
+ "next_addr = 0x%02x, DAA using DAT %d",
+ next_addr, dat_idx);
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
mipi_i3c_hci_dct_index_reset(hci);
@@ -349,8 +351,9 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
}
i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
- DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
- next_addr, pid, dcr, bcr);
+ dev_dbg(&hci->master.dev,
+ "assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
dat_idx = -1;
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
index 4493b2b067cb..efb4326a25b7 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -261,7 +261,7 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
if (ret < 0)
break;
next_addr = ret;
- DBG("next_addr = 0x%02x", next_addr);
+ dev_dbg(&hci->master.dev, "next_addr = 0x%02x", next_addr);
xfer[0].cmd_tid = hci_get_tid();
xfer[0].cmd_desc[0] =
CMD_0_ATTR_A |
@@ -293,8 +293,9 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
pid = (pid << 32) | device_id[0];
bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
- DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
- next_addr, pid, dcr, bcr);
+ dev_dbg(&hci->master.dev,
+ "assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
/*
* TODO: Extend the subsystem layer to allow for registering
* new device and provide BCR/DCR/PID at the same time.
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 60f1175f1f37..47e42cb4dbe7 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -121,8 +121,6 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
struct i3c_device_info info;
int ret;
- DBG("");
-
if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
ret = mipi_i3c_hci_dat_v1.init(hci);
if (ret)
@@ -149,7 +147,7 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
amd_set_resp_buf_thld(hci);
reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
- DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
+ dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
return 0;
}
@@ -159,8 +157,6 @@ static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
struct i3c_hci *hci = to_i3c_hci(m);
struct platform_device *pdev = to_platform_device(m->dev.parent);
- DBG("");
-
reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
synchronize_irq(platform_get_irq(pdev, 0));
hci->io->cleanup(hci);
@@ -196,8 +192,8 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
- DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
- ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
+ dev_dbg(&hci->master.dev, "cmd=%#x rnw=%d ndests=%d data[0].len=%d",
+ ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -255,8 +251,8 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
}
if (ccc->rnw)
- DBG("got: %*ph",
- ccc->dests[0].payload.len, ccc->dests[0].payload.data);
+ dev_dbg(&hci->master.dev, "got: %*ph",
+ ccc->dests[0].payload.len, ccc->dests[0].payload.data);
out:
hci_free_xfer(xfer, nxfers);
@@ -267,39 +263,9 @@ static int i3c_hci_daa(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
- DBG("");
-
return hci->cmd->perform_daa(hci);
}
-static int i3c_hci_alloc_safe_xfer_buf(struct i3c_hci *hci,
- struct hci_xfer *xfer)
-{
- if (hci->io != &mipi_i3c_hci_dma ||
- xfer->data == NULL || !is_vmalloc_addr(xfer->data))
- return 0;
-
- if (xfer->rnw)
- xfer->bounce_buf = kzalloc(xfer->data_len, GFP_KERNEL);
- else
- xfer->bounce_buf = kmemdup(xfer->data,
- xfer->data_len, GFP_KERNEL);
-
- return xfer->bounce_buf == NULL ? -ENOMEM : 0;
-}
-
-static void i3c_hci_free_safe_xfer_buf(struct i3c_hci *hci,
- struct hci_xfer *xfer)
-{
- if (hci->io != &mipi_i3c_hci_dma || xfer->bounce_buf == NULL)
- return;
-
- if (xfer->rnw)
- memcpy(xfer->data, xfer->bounce_buf, xfer->data_len);
-
- kfree(xfer->bounce_buf);
-}
-
static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *i3c_xfers,
int nxfers)
@@ -311,7 +277,7 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
unsigned int size_limit;
int i, last, ret = 0;
- DBG("nxfers = %d", nxfers);
+ dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
@@ -333,9 +299,6 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
}
hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
xfer[i].cmd_desc[0] |= CMD_0_ROC;
- ret = i3c_hci_alloc_safe_xfer_buf(hci, &xfer[i]);
- if (ret)
- goto out;
}
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
@@ -359,9 +322,6 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
}
out:
- for (i = 0; i < nxfers; i++)
- i3c_hci_free_safe_xfer_buf(hci, &xfer[i]);
-
hci_free_xfer(xfer, nxfers);
return ret;
}
@@ -375,14 +335,14 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
DECLARE_COMPLETION_ONSTACK(done);
int i, last, ret = 0;
- DBG("nxfers = %d", nxfers);
+ dev_dbg(&hci->master.dev, "nxfers = %d", nxfers);
xfer = hci_alloc_xfer(nxfers);
if (!xfer)
return -ENOMEM;
for (i = 0; i < nxfers; i++) {
- xfer[i].data = i2c_get_dma_safe_msg_buf(&i2c_xfers[i], 1);
+ xfer[i].data = i2c_xfers[i].buf;
xfer[i].data_len = i2c_xfers[i].len;
xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
@@ -408,10 +368,6 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
}
out:
- for (i = 0; i < nxfers; i++)
- i2c_put_dma_safe_msg_buf(xfer[i].data, &i2c_xfers[i],
- ret ? false : true);
-
hci_free_xfer(xfer, nxfers);
return ret;
}
@@ -423,8 +379,6 @@ static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
struct i3c_hci_dev_data *dev_data;
int ret;
- DBG("");
-
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
if (!dev_data)
return -ENOMEM;
@@ -448,8 +402,6 @@ static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
- DBG("");
-
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
dev->info.dyn_addr);
@@ -462,8 +414,6 @@ static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
- DBG("");
-
i3c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
@@ -477,8 +427,6 @@ static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
struct i3c_hci_dev_data *dev_data;
int ret;
- DBG("");
-
if (hci->cmd != &mipi_i3c_hci_cmd_v1)
return 0;
dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
@@ -502,8 +450,6 @@ static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
struct i3c_hci *hci = to_i3c_hci(m);
struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
- DBG("");
-
if (dev_data) {
i2c_dev_set_master_data(dev, NULL);
if (hci->cmd == &mipi_i3c_hci_cmd_v1)
@@ -591,7 +537,7 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
val = reg_read(INTR_STATUS);
reg_write(INTR_STATUS, val);
- DBG("INTR_STATUS = %#x", val);
+ dev_dbg(&hci->master.dev, "INTR_STATUS %#x", val);
if (val)
result = IRQ_HANDLED;
@@ -641,7 +587,7 @@ static int i3c_hci_init(struct i3c_hci *hci)
}
hci->caps = reg_read(HC_CAPABILITIES);
- DBG("caps = %#x", hci->caps);
+ dev_dbg(&hci->master.dev, "caps = %#x", hci->caps);
size_in_dwords = hci->version_major < 1 ||
(hci->version_major == 1 && hci->version_minor < 1);
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index 491dfe70b660..c401a9425cdc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/i3c/master.h>
#include <linux/io.h>
+#include <linux/pci.h>
#include "hci.h"
#include "cmd.h"
@@ -76,7 +77,6 @@
#define INTR_TRANSFER_COMPLETION BIT(11)
#define INTR_RING_OP BIT(10)
#define INTR_TRANSFER_ERR BIT(9)
-#define INTR_WARN_INS_STOP_MODE BIT(7)
#define INTR_IBI_RING_FULL BIT(6)
#define INTR_TRANSFER_ABORT BIT(5)
@@ -138,6 +138,7 @@ struct hci_rh_data {
};
struct hci_rings_data {
+ struct device *sysdev;
unsigned int total;
struct hci_rh_data headers[] __counted_by(total);
};
@@ -165,20 +166,20 @@ static void hci_dma_cleanup(struct i3c_hci *hci)
rh_reg_write(IBI_SETUP, 0);
if (rh->xfer)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->xfer_struct_sz * rh->xfer_entries,
rh->xfer, rh->xfer_dma);
if (rh->resp)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->resp_struct_sz * rh->xfer_entries,
rh->resp, rh->resp_dma);
kfree(rh->src_xfers);
if (rh->ibi_status)
- dma_free_coherent(&hci->master.dev,
+ dma_free_coherent(rings->sysdev,
rh->ibi_status_sz * rh->ibi_status_entries,
rh->ibi_status, rh->ibi_status_dma);
if (rh->ibi_data_dma)
- dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
+ dma_unmap_single(rings->sysdev, rh->ibi_data_dma,
rh->ibi_chunk_sz * rh->ibi_chunks_total,
DMA_FROM_DEVICE);
kfree(rh->ibi_data);
@@ -194,11 +195,23 @@ static int hci_dma_init(struct i3c_hci *hci)
{
struct hci_rings_data *rings;
struct hci_rh_data *rh;
+ struct device *sysdev;
u32 regval;
unsigned int i, nr_rings, xfers_sz, resps_sz;
unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
int ret;
+ /*
+ * Set pointer to a physical device that does DMA and has IOMMU setup
+ * done for it in case of enabled IOMMU and use it with the DMA API.
+ * Here such device is either
+ * "mipi-i3c-hci" platform device (OF/ACPI enumeration) parent or
+ * grandparent (PCI enumeration).
+ */
+ sysdev = hci->master.dev.parent;
+ if (sysdev->parent && dev_is_pci(sysdev->parent))
+ sysdev = sysdev->parent;
+
regval = rhs_reg_read(CONTROL);
nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
@@ -213,6 +226,7 @@ static int hci_dma_init(struct i3c_hci *hci)
return -ENOMEM;
hci->io_data = rings;
rings->total = nr_rings;
+ rings->sysdev = sysdev;
regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
rhs_reg_write(CONTROL, regval);
@@ -234,14 +248,15 @@ static int hci_dma_init(struct i3c_hci *hci)
regval = rh_reg_read(CR_SETUP);
rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
- DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
- rh->xfer_struct_sz, rh->resp_struct_sz);
+ dev_dbg(&hci->master.dev,
+ "xfer_struct_sz = %d, resp_struct_sz = %d",
+ rh->xfer_struct_sz, rh->resp_struct_sz);
xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
resps_sz = rh->resp_struct_sz * rh->xfer_entries;
- rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
+ rh->xfer = dma_alloc_coherent(rings->sysdev, xfers_sz,
&rh->xfer_dma, GFP_KERNEL);
- rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
+ rh->resp = dma_alloc_coherent(rings->sysdev, resps_sz,
&rh->resp_dma, GFP_KERNEL);
rh->src_xfers =
kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
@@ -263,7 +278,6 @@ static int hci_dma_init(struct i3c_hci *hci)
INTR_TRANSFER_COMPLETION |
INTR_RING_OP |
INTR_TRANSFER_ERR |
- INTR_WARN_INS_STOP_MODE |
INTR_IBI_RING_FULL |
INTR_TRANSFER_ABORT);
@@ -295,16 +309,16 @@ static int hci_dma_init(struct i3c_hci *hci)
ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
rh->ibi_status =
- dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
+ dma_alloc_coherent(rings->sysdev, ibi_status_ring_sz,
&rh->ibi_status_dma, GFP_KERNEL);
rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
ret = -ENOMEM;
if (!rh->ibi_status || !rh->ibi_data)
goto err_out;
rh->ibi_data_dma =
- dma_map_single(&hci->master.dev, rh->ibi_data,
+ dma_map_single(rings->sysdev, rh->ibi_data,
ibi_data_ring_sz, DMA_FROM_DEVICE);
- if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
+ if (dma_mapping_error(rings->sysdev, rh->ibi_data_dma)) {
rh->ibi_data_dma = 0;
ret = -ENOMEM;
goto err_out;
@@ -349,9 +363,7 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
xfer = xfer_list + i;
if (!xfer->data)
continue;
- dma_unmap_single(&hci->master.dev,
- xfer->data_dma, xfer->data_len,
- xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ i3c_master_dma_unmap_single(xfer->dma);
}
}
@@ -362,7 +374,6 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
struct hci_rh_data *rh;
unsigned int i, ring, enqueue_ptr;
u32 op1_val, op2_val;
- void *buf;
/* For now we only use ring 0 */
ring = 0;
@@ -373,6 +384,9 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+ enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ bool need_bounce;
/* store cmd descriptor */
*ring_data++ = xfer->cmd_desc[0];
@@ -391,21 +405,20 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
if (xfer->data) {
- buf = xfer->bounce_buf ? xfer->bounce_buf : xfer->data;
- xfer->data_dma =
- dma_map_single(&hci->master.dev,
- buf,
- xfer->data_len,
- xfer->rnw ?
- DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
- if (dma_mapping_error(&hci->master.dev,
- xfer->data_dma)) {
+ need_bounce = device_iommu_mapped(rings->sysdev) &&
+ xfer->rnw &&
+ xfer->data_len != ALIGN(xfer->data_len, 4);
+ xfer->dma = i3c_master_dma_map_single(rings->sysdev,
+ xfer->data,
+ xfer->data_len,
+ need_bounce,
+ dir);
+ if (!xfer->dma) {
hci_dma_unmap_xfer(hci, xfer_list, i);
return -ENOMEM;
}
- *ring_data++ = lower_32_bits(xfer->data_dma);
- *ring_data++ = upper_32_bits(xfer->data_dma);
+ *ring_data++ = lower_32_bits(xfer->dma->addr);
+ *ring_data++ = upper_32_bits(xfer->dma->addr);
} else {
*ring_data++ = 0;
*ring_data++ = 0;
@@ -511,11 +524,11 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
resp = *ring_resp;
tid = RESP_TID(resp);
- DBG("resp = 0x%08x", resp);
+ dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
xfer = rh->src_xfers[done_ptr];
if (!xfer) {
- DBG("orphaned ring entry");
+ dev_dbg(&hci->master.dev, "orphaned ring entry");
} else {
hci_dma_unmap_xfer(hci, xfer, 1);
xfer->ring_entry = -1;
@@ -586,6 +599,7 @@ static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
{
+ struct hci_rings_data *rings = hci->io_data;
struct i3c_dev_desc *dev;
struct i3c_hci_dev_data *dev_data;
struct hci_dma_dev_ibi_data *dev_ibi;
@@ -617,7 +631,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
ibi_status = *ring_ibi_status;
- DBG("status = %#x", ibi_status);
+ dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
if (ibi_status_error) {
/* we no longer care */
@@ -645,7 +659,9 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
if (last_ptr == -1) {
/* this IBI sequence is not yet complete */
- DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
+ dev_dbg(&hci->master.dev,
+ "no LAST_STATUS available (e=%d d=%d)",
+ enq_ptr, deq_ptr);
return;
}
deq_ptr = last_ptr + 1;
@@ -696,7 +712,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
* rh->ibi_chunk_sz;
if (first_part > ibi_size)
first_part = ibi_size;
- dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
first_part, DMA_FROM_DEVICE);
memcpy(slot->data, ring_ibi_data, first_part);
@@ -705,7 +721,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
/* we wrap back to the start and copy remaining data */
ring_ibi_data = rh->ibi_data;
ring_ibi_data_dma = rh->ibi_data_dma;
- dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ dma_sync_single_for_cpu(rings->sysdev, ring_ibi_data_dma,
ibi_size - first_part, DMA_FROM_DEVICE);
memcpy(slot->data + first_part, ring_ibi_data,
ibi_size - first_part);
@@ -745,7 +761,8 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
rh = &rings->headers[i];
status = rh_reg_read(INTR_STATUS);
- DBG("rh%d status: %#x", i, status);
+ dev_dbg(&hci->master.dev, "Ring %d: RH_INTR_STATUS %#x",
+ i, status);
if (!status)
continue;
rh_reg_write(INTR_STATUS, status);
@@ -761,7 +778,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
u32 ring_status;
dev_notice_ratelimited(&hci->master.dev,
- "ring %d: Transfer Aborted\n", i);
+ "Ring %d: Transfer Aborted\n", i);
mipi_i3c_hci_resume(hci);
ring_status = rh_reg_read(RING_STATUS);
if (!(ring_status & RING_STATUS_RUNNING) &&
@@ -779,12 +796,9 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
RING_CTRL_RUN_STOP);
}
}
- if (status & INTR_WARN_INS_STOP_MODE)
- dev_warn_ratelimited(&hci->master.dev,
- "ring %d: Inserted Stop on Mode Change\n", i);
if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev,
- "ring %d: IBI Ring Full Condition\n", i);
+ "Ring %d: IBI Ring Full Condition\n", i);
handled = true;
}
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
index 2e9b23efdc45..7714f00ea9cc 100644
--- a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
@@ -35,7 +35,7 @@ static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
switch (hci->vendor_mipi_id) {
case MIPI_VENDOR_NXP:
hci->quirks |= HCI_QUIRK_RAW_CCC;
- DBG("raw CCC quirks set");
+ dev_dbg(&hci->master.dev, "raw CCC quirks set");
break;
}
@@ -77,7 +77,8 @@ static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
for (index = 0; index < entries; index++) {
u32 mode_entry = readl(base);
- DBG("mode %d: 0x%08x", index, mode_entry);
+ dev_dbg(&hci->master.dev, "mode %d: 0x%08x",
+ index, mode_entry);
/* TODO: will be needed when I3C core does more than SDR */
base += 4;
}
@@ -97,7 +98,8 @@ static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
dev_info(&hci->master.dev, "available data rates:\n");
for (index = 0; index < entries; index++) {
rate_entry = readl(base);
- DBG("entry %d: 0x%08x", index, rate_entry);
+ dev_dbg(&hci->master.dev, "entry %d: 0x%08x",
+ index, rate_entry);
rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
@@ -268,7 +270,8 @@ int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
cap_header = readl(curr_cap);
cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
- DBG("id=0x%02x length=%d", cap_id, cap_length);
+ dev_dbg(&hci->master.dev, "id=0x%02x length=%d",
+ cap_id, cap_length);
if (!cap_length)
break;
if (curr_cap + cap_length * 4 >= end) {
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index 69ea1d10414b..249ccb13c909 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -12,9 +12,6 @@
#include <linux/io.h>
-/* Handy logging macro to save on line length */
-#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
-
/* 32-bit word aware bit and mask macros */
#define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
#define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32)
@@ -94,8 +91,7 @@ struct hci_xfer {
};
struct {
/* DMA specific */
- dma_addr_t data_dma;
- void *bounce_buf;
+ struct i3c_dma *dma;
int ring_number;
int ring_entry;
};
diff --git a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
index c6c3a3ec11ea..dc8ede0f8ad8 100644
--- a/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
+++ b/drivers/i3c/master/mipi-i3c-hci/mipi-i3c-hci-pci.c
@@ -7,61 +7,196 @@
* Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
*/
#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/debugfs.h>
#include <linux/idr.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_qos.h>
+
+struct mipi_i3c_hci_pci {
+ struct pci_dev *pci;
+ struct platform_device *pdev;
+ const struct mipi_i3c_hci_pci_info *info;
+ void *private;
+};
struct mipi_i3c_hci_pci_info {
- int (*init)(struct pci_dev *pci);
+ int (*init)(struct mipi_i3c_hci_pci *hci);
+ void (*exit)(struct mipi_i3c_hci_pci *hci);
};
+static DEFINE_IDA(mipi_i3c_hci_pci_ida);
+
#define INTEL_PRIV_OFFSET 0x2b0
#define INTEL_PRIV_SIZE 0x28
-#define INTEL_PRIV_RESETS 0x04
-#define INTEL_PRIV_RESETS_RESET BIT(0)
-#define INTEL_PRIV_RESETS_RESET_DONE BIT(1)
+#define INTEL_RESETS 0x04
+#define INTEL_RESETS_RESET BIT(0)
+#define INTEL_RESETS_RESET_DONE BIT(1)
+#define INTEL_RESETS_TIMEOUT_US (10 * USEC_PER_MSEC)
-static DEFINE_IDA(mipi_i3c_hci_pci_ida);
+#define INTEL_ACTIVELTR 0x0c
+#define INTEL_IDLELTR 0x10
+
+#define INTEL_LTR_REQ BIT(15)
+#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
+#define INTEL_LTR_SCALE_1US FIELD_PREP(INTEL_LTR_SCALE_MASK, 2)
+#define INTEL_LTR_SCALE_32US FIELD_PREP(INTEL_LTR_SCALE_MASK, 3)
+#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
+
+struct intel_host {
+ void __iomem *priv;
+ u32 active_ltr;
+ u32 idle_ltr;
+ struct dentry *debugfs_root;
+};
-static int mipi_i3c_hci_pci_intel_init(struct pci_dev *pci)
+static void intel_cache_ltr(struct intel_host *host)
{
- unsigned long timeout;
- void __iomem *priv;
+ host->active_ltr = readl(host->priv + INTEL_ACTIVELTR);
+ host->idle_ltr = readl(host->priv + INTEL_IDLELTR);
+}
- priv = devm_ioremap(&pci->dev,
- pci_resource_start(pci, 0) + INTEL_PRIV_OFFSET,
- INTEL_PRIV_SIZE);
- if (!priv)
- return -ENOMEM;
+static void intel_ltr_set(struct device *dev, s32 val)
+{
+ struct mipi_i3c_hci_pci *hci = dev_get_drvdata(dev);
+ struct intel_host *host = hci->private;
+ u32 ltr;
- /* Assert reset, wait for completion and release reset */
- writel(0, priv + INTEL_PRIV_RESETS);
- timeout = jiffies + msecs_to_jiffies(10);
- while (!(readl(priv + INTEL_PRIV_RESETS) &
- INTEL_PRIV_RESETS_RESET_DONE)) {
- if (time_after(jiffies, timeout))
- break;
- cpu_relax();
+ /*
+ * Program latency tolerance (LTR) accordingly what has been asked
+ * by the PM QoS layer or disable it in case we were passed
+ * negative value or PM_QOS_LATENCY_ANY.
+ */
+ ltr = readl(host->priv + INTEL_ACTIVELTR);
+
+ if (val == PM_QOS_LATENCY_ANY || val < 0) {
+ ltr &= ~INTEL_LTR_REQ;
+ } else {
+ ltr |= INTEL_LTR_REQ;
+ ltr &= ~INTEL_LTR_SCALE_MASK;
+ ltr &= ~INTEL_LTR_VALUE_MASK;
+
+ if (val > INTEL_LTR_VALUE_MASK) {
+ val >>= 5;
+ if (val > INTEL_LTR_VALUE_MASK)
+ val = INTEL_LTR_VALUE_MASK;
+ ltr |= INTEL_LTR_SCALE_32US | val;
+ } else {
+ ltr |= INTEL_LTR_SCALE_1US | val;
+ }
}
- writel(INTEL_PRIV_RESETS_RESET, priv + INTEL_PRIV_RESETS);
+
+ if (ltr == host->active_ltr)
+ return;
+
+ writel(ltr, host->priv + INTEL_ACTIVELTR);
+ writel(ltr, host->priv + INTEL_IDLELTR);
+
+ /* Cache the values into intel_host structure */
+ intel_cache_ltr(host);
+}
+
+static void intel_ltr_expose(struct device *dev)
+{
+ dev->power.set_latency_tolerance = intel_ltr_set;
+ dev_pm_qos_expose_latency_tolerance(dev);
+}
+
+static void intel_ltr_hide(struct device *dev)
+{
+ dev_pm_qos_hide_latency_tolerance(dev);
+ dev->power.set_latency_tolerance = NULL;
+}
+
+static void intel_add_debugfs(struct mipi_i3c_hci_pci *hci)
+{
+ struct dentry *dir = debugfs_create_dir(dev_name(&hci->pci->dev), NULL);
+ struct intel_host *host = hci->private;
+
+ intel_cache_ltr(host);
+
+ host->debugfs_root = dir;
+ debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
+ debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
+}
+
+static void intel_remove_debugfs(struct mipi_i3c_hci_pci *hci)
+{
+ struct intel_host *host = hci->private;
+
+ debugfs_remove_recursive(host->debugfs_root);
+}
+
+static void intel_reset(void __iomem *priv)
+{
+ u32 reg;
+
+ /* Assert reset, wait for completion and release reset */
+ writel(0, priv + INTEL_RESETS);
+ readl_poll_timeout(priv + INTEL_RESETS, reg,
+ reg & INTEL_RESETS_RESET_DONE, 0,
+ INTEL_RESETS_TIMEOUT_US);
+ writel(INTEL_RESETS_RESET, priv + INTEL_RESETS);
+}
+
+static void __iomem *intel_priv(struct pci_dev *pci)
+{
+ resource_size_t base = pci_resource_start(pci, 0);
+
+ return devm_ioremap(&pci->dev, base + INTEL_PRIV_OFFSET, INTEL_PRIV_SIZE);
+}
+
+static int intel_i3c_init(struct mipi_i3c_hci_pci *hci)
+{
+ struct intel_host *host = devm_kzalloc(&hci->pci->dev, sizeof(*host), GFP_KERNEL);
+ void __iomem *priv = intel_priv(hci->pci);
+
+ if (!host || !priv)
+ return -ENOMEM;
+
+ dma_set_mask_and_coherent(&hci->pci->dev, DMA_BIT_MASK(64));
+
+ hci->pci->d3cold_delay = 0;
+
+ hci->private = host;
+ host->priv = priv;
+
+ intel_reset(priv);
+
+ intel_ltr_expose(&hci->pci->dev);
+ intel_add_debugfs(hci);
return 0;
}
-static struct mipi_i3c_hci_pci_info intel_info = {
- .init = mipi_i3c_hci_pci_intel_init,
+static void intel_i3c_exit(struct mipi_i3c_hci_pci *hci)
+{
+ intel_remove_debugfs(hci);
+ intel_ltr_hide(&hci->pci->dev);
+}
+
+static const struct mipi_i3c_hci_pci_info intel_info = {
+ .init = intel_i3c_init,
+ .exit = intel_i3c_exit,
};
static int mipi_i3c_hci_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
- struct mipi_i3c_hci_pci_info *info;
- struct platform_device *pdev;
+ struct mipi_i3c_hci_pci *hci;
struct resource res[2];
int dev_id, ret;
+ hci = devm_kzalloc(&pci->dev, sizeof(*hci), GFP_KERNEL);
+ if (!hci)
+ return -ENOMEM;
+
+ hci->pci = pci;
+
ret = pcim_enable_device(pci);
if (ret)
return ret;
@@ -82,54 +217,67 @@ static int mipi_i3c_hci_pci_probe(struct pci_dev *pci,
if (dev_id < 0)
return dev_id;
- pdev = platform_device_alloc("mipi-i3c-hci", dev_id);
- if (!pdev)
+ hci->pdev = platform_device_alloc("mipi-i3c-hci", dev_id);
+ if (!hci->pdev)
return -ENOMEM;
- pdev->dev.parent = &pci->dev;
- device_set_node(&pdev->dev, dev_fwnode(&pci->dev));
+ hci->pdev->dev.parent = &pci->dev;
+ device_set_node(&hci->pdev->dev, dev_fwnode(&pci->dev));
- ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
+ ret = platform_device_add_resources(hci->pdev, res, ARRAY_SIZE(res));
if (ret)
goto err;
- info = (struct mipi_i3c_hci_pci_info *)id->driver_data;
- if (info && info->init) {
- ret = info->init(pci);
+ hci->info = (const struct mipi_i3c_hci_pci_info *)id->driver_data;
+ if (hci->info && hci->info->init) {
+ ret = hci->info->init(hci);
if (ret)
goto err;
}
- ret = platform_device_add(pdev);
+ ret = platform_device_add(hci->pdev);
if (ret)
- goto err;
+ goto err_exit;
- pci_set_drvdata(pci, pdev);
+ pci_set_drvdata(pci, hci);
return 0;
+err_exit:
+ if (hci->info && hci->info->exit)
+ hci->info->exit(hci);
err:
- platform_device_put(pdev);
+ platform_device_put(hci->pdev);
ida_free(&mipi_i3c_hci_pci_ida, dev_id);
return ret;
}
static void mipi_i3c_hci_pci_remove(struct pci_dev *pci)
{
- struct platform_device *pdev = pci_get_drvdata(pci);
+ struct mipi_i3c_hci_pci *hci = pci_get_drvdata(pci);
+ struct platform_device *pdev = hci->pdev;
int dev_id = pdev->id;
+ if (hci->info && hci->info->exit)
+ hci->info->exit(hci);
+
platform_device_unregister(pdev);
ida_free(&mipi_i3c_hci_pci_ida, dev_id);
}
static const struct pci_device_id mipi_i3c_hci_pci_devices[] = {
+ /* Wildcat Lake-U */
+ { PCI_VDEVICE(INTEL, 0x4d7c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0x4d6f), (kernel_ulong_t)&intel_info},
/* Panther Lake-H */
{ PCI_VDEVICE(INTEL, 0xe37c), (kernel_ulong_t)&intel_info},
{ PCI_VDEVICE(INTEL, 0xe36f), (kernel_ulong_t)&intel_info},
/* Panther Lake-P */
{ PCI_VDEVICE(INTEL, 0xe47c), (kernel_ulong_t)&intel_info},
{ PCI_VDEVICE(INTEL, 0xe46f), (kernel_ulong_t)&intel_info},
+ /* Nova Lake-S */
+ { PCI_VDEVICE(INTEL, 0x6e2c), (kernel_ulong_t)&intel_info},
+ { PCI_VDEVICE(INTEL, 0x6e2d), (kernel_ulong_t)&intel_info},
{ },
};
MODULE_DEVICE_TABLE(pci, mipi_i3c_hci_pci_devices);
diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
index 2fc71e696911..710faa46a00f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/pio.c
+++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
@@ -213,8 +213,8 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
if (pio) {
- DBG("status = %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
BUG_ON(pio->curr_xfer);
BUG_ON(pio->curr_rx);
BUG_ON(pio->curr_tx);
@@ -226,13 +226,17 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
{
- DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
- DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 0, xfer->cmd_desc[0]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 1, xfer->cmd_desc[1]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
- DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
- DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 2, xfer->cmd_desc[2]);
+ dev_dbg(&hci->master.dev, "cmd_desc[%d] = 0x%08x",
+ 3, xfer->cmd_desc[3]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
}
@@ -254,7 +258,8 @@ static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
@@ -269,7 +274,7 @@ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
struct hci_xfer *xfer = pio->curr_rx;
u32 *p;
- DBG("%d remaining", count);
+ dev_dbg(&hci->master.dev, "%d remaining", count);
p = xfer->data;
p += (xfer->data_len - xfer->data_left) / 4;
@@ -278,7 +283,8 @@ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
unsigned int nr_words = count / 4;
/* extract data from FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
*p++ = pio_reg_read(XFER_DATA_PORT);
}
@@ -321,7 +327,8 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
/* push data into the FIFO */
xfer->data_left -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, xfer->data_left);
while (nr_words--)
pio_reg_write(XFER_DATA_PORT, *p++);
}
@@ -336,7 +343,7 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
*/
if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
return false;
- DBG("trailing %d", xfer->data_left);
+ dev_dbg(&hci->master.dev, "trailing %d", xfer->data_left);
pio_reg_write(XFER_DATA_PORT, *p);
xfer->data_left = 0;
}
@@ -481,7 +488,7 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
unsigned int tid = RESP_TID(resp);
- DBG("resp = 0x%08x", resp);
+ dev_dbg(&hci->master.dev, "resp = 0x%08x", resp);
if (tid != xfer->cmd_tid) {
dev_err(&hci->master.dev,
"response tid=%d when expecting %d\n",
@@ -522,14 +529,15 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
* still exists.
*/
if (pio->curr_rx == xfer) {
- DBG("short RX ?");
+ dev_dbg(&hci->master.dev, "short RX ?");
pio->curr_rx = pio->curr_rx->next_data;
} else if (pio->curr_tx == xfer) {
- DBG("short TX ?");
+ dev_dbg(&hci->master.dev, "short TX ?");
pio->curr_tx = pio->curr_tx->next_data;
} else if (xfer->data_left) {
- DBG("PIO xfer count = %d after response",
- xfer->data_left);
+ dev_dbg(&hci->master.dev,
+ "PIO xfer count = %d after response",
+ xfer->data_left);
}
pio->curr_resp = xfer->next_resp;
@@ -591,7 +599,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
struct hci_xfer *prev_queue_tail;
int i;
- DBG("n = %d", n);
+ dev_dbg(&hci->master.dev, "n = %d", n);
/* link xfer instances together and initialize data count */
for (i = 0; i < n; i++) {
@@ -611,8 +619,9 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
if (!hci_pio_process_cmd(hci, pio))
pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
- DBG("status = %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status = %#x/%#x",
+ pio_reg_read(INTR_STATUS),
+ pio_reg_read(INTR_SIGNAL_ENABLE));
}
spin_unlock_irq(&pio->lock);
return 0;
@@ -686,10 +695,10 @@ static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int
int ret;
spin_lock_irq(&pio->lock);
- DBG("n=%d status=%#x/%#x", n,
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
- DBG("main_status = %#x/%#x",
- readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
+ dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n,
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "main_status = %#x/%#x",
+ readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
spin_unlock_irq(&pio->lock);
@@ -733,8 +742,8 @@ static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
mipi_i3c_hci_pio_reset(hci);
mipi_i3c_hci_resume(hci);
- DBG("status=%#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "status=%#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
}
static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
@@ -749,7 +758,7 @@ static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
if (regval != pio->reg_queue_thresh) {
pio_reg_write(QUEUE_THLD_CTRL, regval);
pio->reg_queue_thresh = regval;
- DBG("%d", thresh_val);
+ dev_dbg(&hci->master.dev, "%d", thresh_val);
}
}
@@ -773,7 +782,8 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
/* extract the data from the IBI port */
nr_words = thresh_val;
ibi->seg_cnt -= nr_words * 4;
- DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
+ dev_dbg(&hci->master.dev, "now %d left %d",
+ nr_words * 4, ibi->seg_cnt);
while (nr_words--)
*p++ = pio_reg_read(IBI_PORT);
}
@@ -791,7 +801,7 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
hci_pio_set_ibi_thresh(hci, pio, 1);
if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
return false;
- DBG("trailing %d", ibi->seg_cnt);
+ dev_dbg(&hci->master.dev, "trailing %d", ibi->seg_cnt);
data = pio_reg_read(IBI_PORT);
data = (__force u32) cpu_to_le32(data);
while (ibi->seg_cnt--) {
@@ -820,7 +830,7 @@ static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
*/
ibi_status = pio_reg_read(IBI_PORT);
- DBG("status = %#x", ibi_status);
+ dev_dbg(&hci->master.dev, "status = %#x", ibi_status);
ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
if (ibi_status & IBI_ERROR) {
dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
@@ -986,7 +996,8 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
spin_lock(&pio->lock);
status = pio_reg_read(INTR_STATUS);
- DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
+ dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
+ status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
if (!status) {
spin_unlock(&pio->lock);
@@ -1023,8 +1034,8 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
- DBG("(out) status: %#x/%#x",
- pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
spin_unlock(&pio->lock);
return true;
}
diff --git a/drivers/i3c/master/renesas-i3c.c b/drivers/i3c/master/renesas-i3c.c
index 174d3dc5d276..275f7b924288 100644
--- a/drivers/i3c/master/renesas-i3c.c
+++ b/drivers/i3c/master/renesas-i3c.c
@@ -679,7 +679,7 @@ static int renesas_i3c_daa(struct i3c_master_controller *m)
i3c_master_add_i3c_dev_locked(m, i3c->addrs[pos]);
}
- return ret < 0 ? ret : 0;
+ return 0;
}
static bool renesas_i3c_supports_ccc_cmd(struct i3c_master_controller *m,
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 701ae165b25b..a62f22ff8b57 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -40,11 +40,13 @@
#define SVC_I3C_MCTRL_REQUEST_NONE 0
#define SVC_I3C_MCTRL_REQUEST_START_ADDR 1
#define SVC_I3C_MCTRL_REQUEST_STOP 2
+#define SVC_I3C_MCTRL_REQUEST_FORCE_EXIT 6
#define SVC_I3C_MCTRL_REQUEST_IBI_ACKNACK 3
#define SVC_I3C_MCTRL_REQUEST_PROC_DAA 4
#define SVC_I3C_MCTRL_REQUEST_AUTO_IBI 7
#define SVC_I3C_MCTRL_TYPE_I3C 0
#define SVC_I3C_MCTRL_TYPE_I2C BIT(4)
+#define SVC_I3C_MCTRL_TYPE_DDR BIT(5)
#define SVC_I3C_MCTRL_IBIRESP_AUTO 0
#define SVC_I3C_MCTRL_IBIRESP_ACK_WITHOUT_BYTE 0
#define SVC_I3C_MCTRL_IBIRESP_ACK_WITH_BYTE BIT(7)
@@ -95,6 +97,7 @@
#define SVC_I3C_MINTMASKED 0x098
#define SVC_I3C_MERRWARN 0x09C
#define SVC_I3C_MERRWARN_NACK BIT(2)
+#define SVC_I3C_MERRWARN_CRC BIT(10)
#define SVC_I3C_MERRWARN_TIMEOUT BIT(20)
#define SVC_I3C_MDMACTRL 0x0A0
#define SVC_I3C_MDATACTRL 0x0AC
@@ -165,12 +168,16 @@
struct svc_i3c_cmd {
u8 addr;
- bool rnw;
+ union {
+ bool rnw;
+ u8 cmd;
+ u32 rnw_cmd;
+ };
u8 *in;
const void *out;
unsigned int len;
unsigned int actual_len;
- struct i3c_priv_xfer *xfer;
+ struct i3c_xfer *xfer;
bool continued;
};
@@ -383,6 +390,36 @@ svc_i3c_master_dev_from_addr(struct svc_i3c_master *master,
return master->descs[i];
}
+static bool svc_cmd_is_read(u32 rnw_cmd, u32 type)
+{
+ return (type == SVC_I3C_MCTRL_TYPE_DDR) ? (rnw_cmd & 0x80) : rnw_cmd;
+}
+
+static void svc_i3c_master_emit_force_exit(struct svc_i3c_master *master)
+{
+ u32 reg;
+
+ writel(SVC_I3C_MCTRL_REQUEST_FORCE_EXIT, master->regs + SVC_I3C_MCTRL);
+
+ /*
+ * Not need check error here because it is never happen at hardware.
+ * IP just wait for few fclk cycle to complete DDR exit pattern. Even
+ * though fclk stop, timeout happen here, the whole data actually
+ * already finish transfer. The next command will be timeout because
+ * wrong hardware state.
+ */
+ readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg), 0, 1000);
+
+ /*
+ * This delay is necessary after the emission of a stop, otherwise eg.
+ * repeating IBIs do not get detected. There is a note in the manual
+ * about it, stating that the stop condition might not be settled
+ * correctly if a start condition follows too rapidly.
+ */
+ udelay(1);
+}
+
static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
{
writel(SVC_I3C_MCTRL_REQUEST_STOP, master->regs + SVC_I3C_MCTRL);
@@ -406,13 +443,10 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
int ret, val;
u8 *buf;
- slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
- if (!slot)
- return -ENOSPC;
-
- slot->len = 0;
- buf = slot->data;
-
+ /*
+ * Wait for transfer to complete before returning. Otherwise, the EmitStop
+ * request might be sent when the transfer is not complete.
+ */
ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
if (ret) {
@@ -420,6 +454,16 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
return ret;
}
+ slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
+ if (!slot) {
+ dev_dbg(master->dev, "No free ibi slot, drop the data\n");
+ writel(SVC_I3C_MDATACTRL_FLUSHRB, master->regs + SVC_I3C_MDATACTRL);
+ return -ENOSPC;
+ }
+
+ slot->len = 0;
+ buf = slot->data;
+
while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS)) &&
slot->len < SVC_I3C_FIFO_SIZE) {
mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
@@ -511,15 +555,30 @@ static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
* cycle, leading to missed client IBI handlers.
*
* A typical scenario is when IBIWON occurs and bus arbitration is lost
- * at svc_i3c_master_priv_xfers().
+ * at svc_i3c_master_i3c_xfers().
*
* Clear SVC_I3C_MINT_IBIWON before sending SVC_I3C_MCTRL_REQUEST_AUTO_IBI.
*/
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
- /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
- writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
- SVC_I3C_MCTRL_IBIRESP_AUTO,
+ /*
+ * Write REQUEST_START_ADDR request to emit broadcast address for arbitration,
+ * instend of using AUTO_IBI.
+ *
+ * Using AutoIBI request may cause controller to remain in AutoIBI state when
+ * there is a glitch on SDA line (high->low->high).
+ * 1. SDA high->low, raising an interrupt to execute IBI isr.
+ * 2. SDA low->high.
+ * 3. IBI isr writes an AutoIBI request.
+ * 4. The controller will not start AutoIBI process because SDA is not low.
+ * 5. IBIWON polling times out.
+ * 6. Controller reamins in AutoIBI state and doesn't accept EmitStop request.
+ */
+ writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
+ SVC_I3C_MCTRL_TYPE_I3C |
+ SVC_I3C_MCTRL_IBIRESP_MANUAL |
+ SVC_I3C_MCTRL_DIR(SVC_I3C_MCTRL_DIR_WRITE) |
+ SVC_I3C_MCTRL_ADDR(I3C_BROADCAST_ADDR),
master->regs + SVC_I3C_MCTRL);
/* Wait for IBIWON, should take approximately 100us */
@@ -539,10 +598,15 @@ static void svc_i3c_master_ibi_isr(struct svc_i3c_master *master)
switch (ibitype) {
case SVC_I3C_MSTATUS_IBITYPE_IBI:
dev = svc_i3c_master_dev_from_addr(master, ibiaddr);
- if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI))
+ if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) {
svc_i3c_master_nack_ibi(master);
- else
+ } else {
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ svc_i3c_master_ack_ibi(master, true);
+ else
+ svc_i3c_master_ack_ibi(master, false);
svc_i3c_master_handle_ibi(master, dev);
+ }
break;
case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN:
if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN))
@@ -771,6 +835,8 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
info.dyn_addr = ret;
+ info.hdr_cap = I3C_CCC_HDR_MODE(I3C_HDR_DDR);
+
writel(SVC_MDYNADDR_VALID | SVC_MDYNADDR_ADDR(info.dyn_addr),
master->regs + SVC_I3C_MDYNADDR);
@@ -1272,10 +1338,11 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
}
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
- bool rnw, unsigned int xfer_type, u8 addr,
+ u32 rnw_cmd, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
unsigned int *actual_len, bool continued, bool repeat_start)
{
+ bool rnw = svc_cmd_is_read(rnw_cmd, xfer_type);
int retry = repeat_start ? 1 : 2;
u32 reg;
int ret;
@@ -1283,6 +1350,16 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
/* clean SVC_I3C_MINT_IBIWON w1c bits */
writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+ if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR) {
+ /* DDR command need prefill into FIFO */
+ writel(rnw_cmd, master->regs + SVC_I3C_MWDATAB);
+ if (!rnw) {
+ /* write data also need prefill into FIFO */
+ ret = svc_i3c_master_write(master, out, xfer_len);
+ if (ret)
+ goto emit_stop;
+ }
+ }
while (retry--) {
writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
@@ -1376,7 +1453,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
if (rnw)
ret = svc_i3c_master_read(master, in, xfer_len);
- else
+ else if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
ret = svc_i3c_master_write(master, out, xfer_len);
if (ret < 0)
goto emit_stop;
@@ -1389,10 +1466,19 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
if (ret)
goto emit_stop;
+ if (xfer_type == SVC_I3C_MCTRL_TYPE_DDR &&
+ (readl(master->regs + SVC_I3C_MERRWARN) & SVC_I3C_MERRWARN_CRC)) {
+ ret = -ENXIO;
+ goto emit_stop;
+ }
+
writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
if (!continued) {
- svc_i3c_master_emit_stop(master);
+ if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
+ svc_i3c_master_emit_stop(master);
+ else
+ svc_i3c_master_emit_force_exit(master);
/* Wait idle if stop is sent. */
readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
@@ -1402,7 +1488,11 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
return 0;
emit_stop:
- svc_i3c_master_emit_stop(master);
+ if (xfer_type != SVC_I3C_MCTRL_TYPE_DDR)
+ svc_i3c_master_emit_stop(master);
+ else
+ svc_i3c_master_emit_force_exit(master);
+
svc_i3c_master_clear_merrwarn(master);
svc_i3c_master_flush_fifo(master);
@@ -1449,6 +1539,11 @@ static void svc_i3c_master_dequeue_xfer(struct svc_i3c_master *master,
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
}
+static int i3c_mode_to_svc_type(enum i3c_xfer_mode mode)
+{
+ return (mode == I3C_SDR) ? SVC_I3C_MCTRL_TYPE_I3C : SVC_I3C_MCTRL_TYPE_DDR;
+}
+
static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
{
struct svc_i3c_xfer *xfer = master->xferqueue.cur;
@@ -1463,7 +1558,7 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
for (i = 0; i < xfer->ncmds; i++) {
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
- ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
+ ret = svc_i3c_master_xfer(master, cmd->rnw_cmd, xfer->type,
cmd->addr, cmd->in, cmd->out,
cmd->len, &cmd->actual_len,
cmd->continued, i > 0);
@@ -1638,9 +1733,8 @@ static int svc_i3c_master_send_ccc_cmd(struct i3c_master_controller *m,
return ret;
}
-static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers)
+static int svc_i3c_master_i3c_xfers(struct i3c_dev_desc *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
struct svc_i3c_master *master = to_svc_i3c_master(m);
@@ -1648,22 +1742,36 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
struct svc_i3c_xfer *xfer;
int ret, i;
+ if (mode != I3C_SDR) {
+ /*
+ * Only support data size less than FIFO SIZE when using DDR
+ * mode. First entry is cmd in FIFO, so actual available FIFO
+ * for data is SVC_I3C_FIFO_SIZE - 2 since DDR only supports
+ * even length.
+ */
+ for (i = 0; i < nxfers; i++)
+ if (xfers[i].len > SVC_I3C_FIFO_SIZE - 2)
+ return -EINVAL;
+ }
+
xfer = svc_i3c_master_alloc_xfer(master, nxfers);
if (!xfer)
return -ENOMEM;
- xfer->type = SVC_I3C_MCTRL_TYPE_I3C;
+ xfer->type = i3c_mode_to_svc_type(mode);
for (i = 0; i < nxfers; i++) {
+ u32 rnw_cmd = (mode == I3C_SDR) ? xfers[i].rnw : xfers[i].cmd;
+ bool rnw = svc_cmd_is_read(rnw_cmd, xfer->type);
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
cmd->xfer = &xfers[i];
cmd->addr = master->addrs[data->index];
- cmd->rnw = xfers[i].rnw;
- cmd->in = xfers[i].rnw ? xfers[i].data.in : NULL;
- cmd->out = xfers[i].rnw ? NULL : xfers[i].data.out;
+ cmd->rnw_cmd = rnw_cmd;
+ cmd->in = rnw ? xfers[i].data.in : NULL;
+ cmd->out = rnw ? NULL : xfers[i].data.out;
cmd->len = xfers[i].len;
- cmd->actual_len = xfers[i].rnw ? xfers[i].len : 0;
+ cmd->actual_len = rnw ? xfers[i].len : 0;
cmd->continued = (i + 1) < nxfers;
}
@@ -1858,7 +1966,7 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.do_daa = svc_i3c_master_do_daa,
.supports_ccc_cmd = svc_i3c_master_supports_ccc_cmd,
.send_ccc_cmd = svc_i3c_master_send_ccc_cmd,
- .priv_xfers = svc_i3c_master_priv_xfers,
+ .i3c_xfers = svc_i3c_master_i3c_xfers,
.i2c_xfers = svc_i3c_master_i2c_xfers,
.request_ibi = svc_i3c_master_request_ibi,
.free_ibi = svc_i3c_master_free_ibi,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 91a7b7e7c0c8..9ba83954c255 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -259,7 +259,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 3,
.target_residency = 6,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -267,7 +267,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -275,7 +275,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -283,7 +283,7 @@ static struct cpuidle_state nehalem_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -296,7 +296,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -304,7 +304,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -312,7 +312,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -320,7 +320,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -328,7 +328,7 @@ static struct cpuidle_state snb_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -341,7 +341,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
@@ -349,7 +349,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 275,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -357,7 +357,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 500,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -365,7 +365,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
@@ -373,7 +373,7 @@ static struct cpuidle_state byt_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -386,7 +386,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6N",
@@ -394,7 +394,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 275,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -402,7 +402,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -410,7 +410,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7S",
@@ -418,7 +418,7 @@ static struct cpuidle_state cht_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -431,7 +431,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -439,7 +439,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -447,7 +447,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -455,7 +455,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -463,7 +463,7 @@ static struct cpuidle_state ivb_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 87,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -476,7 +476,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -484,7 +484,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -492,7 +492,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 156,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -500,7 +500,7 @@ static struct cpuidle_state ivt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 82,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -513,7 +513,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -521,7 +521,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 250,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -529,7 +529,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 300,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -537,7 +537,7 @@ static struct cpuidle_state ivt_cstates_4s[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 84,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -550,7 +550,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -558,7 +558,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -566,7 +566,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 59,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -574,7 +574,7 @@ static struct cpuidle_state ivt_cstates_8s[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 88,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -587,7 +587,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -595,7 +595,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -603,7 +603,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 33,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -611,7 +611,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -619,7 +619,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -627,7 +627,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -635,7 +635,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -643,7 +643,7 @@ static struct cpuidle_state hsw_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -655,7 +655,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -663,7 +663,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -671,7 +671,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 40,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -679,7 +679,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -687,7 +687,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 166,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -695,7 +695,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 300,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -703,7 +703,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 600,
.target_residency = 1800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -711,7 +711,7 @@ static struct cpuidle_state bdw_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2600,
.target_residency = 7700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -724,7 +724,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -732,7 +732,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C3",
@@ -740,7 +740,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 70,
.target_residency = 100,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -748,7 +748,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 85,
.target_residency = 200,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -756,7 +756,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x33) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 124,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -764,7 +764,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 200,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -772,7 +772,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 480,
.target_residency = 5000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -780,7 +780,7 @@ static struct cpuidle_state skl_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 890,
.target_residency = 5000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -793,7 +793,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -801,7 +801,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -809,7 +809,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED | CPUIDLE_FLAG_IBRS,
.exit_latency = 133,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -822,7 +822,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -830,7 +830,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -838,7 +838,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 170,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -861,7 +861,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -869,7 +869,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -877,7 +877,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 220,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -885,7 +885,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 280,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -893,7 +893,7 @@ static struct cpuidle_state adl_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 680,
.target_residency = 2000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -906,7 +906,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -914,7 +914,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -922,7 +922,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 170,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -930,7 +930,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 600,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -938,7 +938,7 @@ static struct cpuidle_state adl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 230,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -951,7 +951,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -959,7 +959,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 420,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -967,7 +967,7 @@ static struct cpuidle_state mtl_l_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 310,
.target_residency = 930,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -980,7 +980,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_UNUSABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -988,7 +988,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -996,7 +996,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 195,
.target_residency = 585,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -1004,7 +1004,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 260,
.target_residency = 1040,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -1012,7 +1012,7 @@ static struct cpuidle_state gmt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 660,
.target_residency = 1980,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1025,7 +1025,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1033,7 +1033,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1042,7 +1042,7 @@ static struct cpuidle_state spr_cstates[] __initdata = {
CPUIDLE_FLAG_INIT_XSTATE,
.exit_latency = 290,
.target_residency = 800,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1055,7 +1055,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1063,7 +1063,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1073,7 +1073,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 170,
.target_residency = 650,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
@@ -1083,7 +1083,7 @@ static struct cpuidle_state gnr_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 210,
.target_residency = 1000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1096,7 +1096,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1104,7 +1104,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 4,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1114,7 +1114,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 220,
.target_residency = 650,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6P",
@@ -1124,7 +1124,7 @@ static struct cpuidle_state gnrd_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 240,
.target_residency = 750,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1137,7 +1137,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C2",
@@ -1145,7 +1145,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x10),
.exit_latency = 20,
.target_residency = 80,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
@@ -1153,7 +1153,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1161,7 +1161,7 @@ static struct cpuidle_state atom_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1173,7 +1173,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 4,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C4",
@@ -1181,7 +1181,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1189,7 +1189,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7",
@@ -1197,7 +1197,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
.target_residency = 4000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -1205,7 +1205,7 @@ static struct cpuidle_state tangier_cstates[] __initdata = {
.flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 20000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1217,7 +1217,7 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1225,7 +1225,7 @@ static struct cpuidle_state avn_cstates[] __initdata = {
.flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 15,
.target_residency = 45,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1237,7 +1237,7 @@ static struct cpuidle_state knl_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 1,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle },
{
.name = "C6",
@@ -1245,7 +1245,7 @@ static struct cpuidle_state knl_cstates[] __initdata = {
.flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 120,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle },
{
.enter = NULL }
@@ -1258,7 +1258,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1266,7 +1266,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1274,7 +1274,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 133,
.target_residency = 133,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C7s",
@@ -1282,7 +1282,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x31) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 155,
.target_residency = 155,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C8",
@@ -1290,7 +1290,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1000,
.target_residency = 1000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C9",
@@ -1298,7 +1298,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 2000,
.target_residency = 2000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C10",
@@ -1306,7 +1306,7 @@ static struct cpuidle_state bxt_cstates[] __initdata = {
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 10000,
.target_residency = 10000,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1319,7 +1319,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1327,7 +1327,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 10,
.target_residency = 20,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1335,7 +1335,7 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 50,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1352,7 +1352,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00),
.exit_latency = 2,
.target_residency = 2,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1360,7 +1360,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 15,
.target_residency = 25,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6",
@@ -1368,7 +1368,7 @@ static struct cpuidle_state snr_cstates[] __initdata = {
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 130,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1381,7 +1381,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1389,7 +1389,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -1397,7 +1397,7 @@ static struct cpuidle_state grr_cstates[] __initdata = {
.flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 500,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
@@ -1410,7 +1410,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
.flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 1,
.target_residency = 1,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C1E",
@@ -1418,7 +1418,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
.flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
.exit_latency = 2,
.target_residency = 10,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6S",
@@ -1427,7 +1427,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 270,
.target_residency = 700,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.name = "C6SP",
@@ -1436,7 +1436,7 @@ static struct cpuidle_state srf_cstates[] __initdata = {
CPUIDLE_FLAG_PARTIAL_HINT_MATCH,
.exit_latency = 310,
.target_residency = 900,
- .enter = &intel_idle,
+ .enter = intel_idle,
.enter_s2idle = intel_idle_s2idle, },
{
.enter = NULL }
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 8c3f7cf55d5f..76911278fb21 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -218,15 +218,30 @@ config BMA180
config BMA220
tristate "Bosch BMA220 3-Axis Accelerometer Driver"
- depends on SPI
+ depends on I2C || SPI
+ select REGMAP
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select BMA220_I2C if I2C
+ select BMA220_SPI if SPI
help
Say yes here to add support for the Bosch BMA220 triaxial
acceleration sensor.
To compile this driver as a module, choose M here: the
- module will be called bma220_spi.
+ module will be called bma220_core and you will also get
+ bma220_i2c if I2C is enabled and bma220_spi if SPI is
+ enabled.
+
+config BMA220_I2C
+ tristate
+ select REGMAP_I2C
+ depends on BMA220
+
+config BMA220_SPI
+ tristate
+ select REGMAP_SPI
+ depends on BMA220
config BMA400
tristate "Bosch BMA400 3-Axis Accelerometer Driver"
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index ca8569e25aba..fa440a859283 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -25,7 +25,9 @@ obj-$(CONFIG_ADXL380) += adxl380.o
obj-$(CONFIG_ADXL380_I2C) += adxl380_i2c.o
obj-$(CONFIG_ADXL380_SPI) += adxl380_spi.o
obj-$(CONFIG_BMA180) += bma180.o
-obj-$(CONFIG_BMA220) += bma220_spi.o
+obj-$(CONFIG_BMA220) += bma220_core.o
+obj-$(CONFIG_BMA220_I2C) += bma220_i2c.o
+obj-$(CONFIG_BMA220_SPI) += bma220_spi.o
obj-$(CONFIG_BMA400) += bma400_core.o
obj-$(CONFIG_BMA400_I2C) += bma400_i2c.o
obj-$(CONFIG_BMA400_SPI) += bma400_spi.o
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index b7dfd0007aa0..78e3f799ecc1 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -36,10 +36,29 @@
#define ADXL345_REG_TAP_AXIS_MSK GENMASK(2, 0)
#define ADXL345_REG_TAP_SUPPRESS_MSK BIT(3)
#define ADXL345_REG_TAP_SUPPRESS BIT(3)
+#define ADXL345_POWER_CTL_INACT_MSK (ADXL345_POWER_CTL_AUTO_SLEEP | ADXL345_POWER_CTL_LINK)
#define ADXL345_TAP_Z_EN BIT(0)
#define ADXL345_TAP_Y_EN BIT(1)
#define ADXL345_TAP_X_EN BIT(2)
+#define ADXL345_REG_TAP_SUPPRESS BIT(3)
+
+#define ADXL345_INACT_Z_EN BIT(0)
+#define ADXL345_INACT_Y_EN BIT(1)
+#define ADXL345_INACT_X_EN BIT(2)
+#define ADXL345_REG_INACT_ACDC BIT(3)
+#define ADXL345_ACT_INACT_NO_AXIS_EN 0x00
+#define ADXL345_INACT_XYZ_EN (ADXL345_INACT_Z_EN | ADXL345_INACT_Y_EN | ADXL345_INACT_X_EN)
+
+#define ADXL345_ACT_Z_EN BIT(4)
+#define ADXL345_ACT_Y_EN BIT(5)
+#define ADXL345_ACT_X_EN BIT(6)
+#define ADXL345_REG_ACT_ACDC BIT(7)
+#define ADXL345_ACT_XYZ_EN (ADXL345_ACT_Z_EN | ADXL345_ACT_Y_EN | ADXL345_ACT_X_EN)
+
+#define ADXL345_COUPLING_DC 0
+#define ADXL345_COUPLING_AC 1
+#define ADXL345_REG_NO_ACDC 0x00
/* single/double tap */
enum adxl345_tap_type {
@@ -64,6 +83,39 @@ static const unsigned int adxl345_tap_time_reg[] = {
[ADXL345_TAP_TIME_DUR] = ADXL345_REG_DUR,
};
+/* activity/inactivity */
+enum adxl345_activity_type {
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC,
+ ADXL345_INACTIVITY_FF,
+};
+
+static const unsigned int adxl345_act_int_reg[] = {
+ [ADXL345_ACTIVITY] = ADXL345_INT_ACTIVITY,
+ [ADXL345_INACTIVITY] = ADXL345_INT_INACTIVITY,
+ [ADXL345_ACTIVITY_AC] = ADXL345_INT_ACTIVITY,
+ [ADXL345_INACTIVITY_AC] = ADXL345_INT_INACTIVITY,
+ [ADXL345_INACTIVITY_FF] = ADXL345_INT_FREE_FALL,
+};
+
+static const unsigned int adxl345_act_thresh_reg[] = {
+ [ADXL345_ACTIVITY] = ADXL345_REG_THRESH_ACT,
+ [ADXL345_INACTIVITY] = ADXL345_REG_THRESH_INACT,
+ [ADXL345_ACTIVITY_AC] = ADXL345_REG_THRESH_ACT,
+ [ADXL345_INACTIVITY_AC] = ADXL345_REG_THRESH_INACT,
+ [ADXL345_INACTIVITY_FF] = ADXL345_REG_THRESH_FF,
+};
+
+static const unsigned int adxl345_act_acdc_msk[] = {
+ [ADXL345_ACTIVITY] = ADXL345_REG_ACT_ACDC,
+ [ADXL345_INACTIVITY] = ADXL345_REG_INACT_ACDC,
+ [ADXL345_ACTIVITY_AC] = ADXL345_REG_ACT_ACDC,
+ [ADXL345_INACTIVITY_AC] = ADXL345_REG_INACT_ACDC,
+ [ADXL345_INACTIVITY_FF] = ADXL345_REG_NO_ACDC,
+};
+
enum adxl345_odr {
ADXL345_ODR_0P10HZ = 0,
ADXL345_ODR_0P20HZ,
@@ -129,6 +181,14 @@ static const int adxl345_fullres_range_tbl[][2] = {
[ADXL345_16G_RANGE] = { 0, 38312 },
};
+/* scaling */
+static const int adxl345_range_factor_tbl[] = {
+ [ADXL345_2G_RANGE] = 1,
+ [ADXL345_4G_RANGE] = 2,
+ [ADXL345_8G_RANGE] = 4,
+ [ADXL345_16G_RANGE] = 8,
+};
+
struct adxl345_state {
const struct adxl345_chip_info *info;
struct regmap *regmap;
@@ -136,6 +196,9 @@ struct adxl345_state {
u8 watermark;
u8 fifo_mode;
+ u8 inact_threshold;
+ u32 inact_time_ms;
+
u32 tap_duration_us;
u32 tap_latent_us;
u32 tap_window_us;
@@ -145,6 +208,22 @@ struct adxl345_state {
static const struct iio_event_spec adxl345_events[] = {
{
+ /* activity */
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type =
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ /* activity, ac bit set */
+ .type = IIO_EV_TYPE_MAG_ADAPTIVE,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_type =
+ BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+ {
/* single tap */
.type = IIO_EV_TYPE_GESTURE,
.dir = IIO_EV_DIR_SINGLETAP,
@@ -188,10 +267,39 @@ enum adxl345_chans {
chan_x, chan_y, chan_z,
};
+static const struct iio_event_spec adxl345_fake_chan_events[] = {
+ {
+ /* inactivity */
+ .type = IIO_EV_TYPE_MAG,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type =
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+ {
+ /* inactivity, AC bit set */
+ .type = IIO_EV_TYPE_MAG_ADAPTIVE,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type =
+ BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_PERIOD),
+ },
+};
+
static const struct iio_chan_spec adxl345_channels[] = {
ADXL345_CHANNEL(0, chan_x, X),
ADXL345_CHANNEL(1, chan_y, Y),
ADXL345_CHANNEL(2, chan_z, Z),
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = adxl345_fake_chan_events,
+ .num_event_specs = ARRAY_SIZE(adxl345_fake_chan_events),
+ },
};
static const unsigned long adxl345_scan_masks[] = {
@@ -237,6 +345,394 @@ static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
ADXL345_POWER_CTL_MEASURE, en);
}
+/* activity / inactivity */
+
+static int adxl345_set_inact_threshold(struct adxl345_state *st,
+ unsigned int threshold)
+{
+ int ret;
+
+ st->inact_threshold = min(U8_MAX, threshold);
+
+ ret = regmap_write(st->regmap,
+ adxl345_act_thresh_reg[ADXL345_INACTIVITY],
+ st->inact_threshold);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap,
+ adxl345_act_thresh_reg[ADXL345_INACTIVITY_FF],
+ st->inact_threshold);
+}
+
+static int adxl345_set_default_time(struct adxl345_state *st)
+{
+ int max_boundary = U8_MAX;
+ int min_boundary = 10;
+ enum adxl345_odr odr;
+ unsigned int regval;
+ unsigned int val;
+ int ret;
+
+ /* Generated inactivity time based on ODR */
+ ret = regmap_read(st->regmap, ADXL345_REG_BW_RATE, &regval);
+ if (ret)
+ return ret;
+
+ odr = FIELD_GET(ADXL345_BW_RATE_MSK, regval);
+ val = clamp(max_boundary - adxl345_odr_tbl[odr][0],
+ min_boundary, max_boundary);
+ st->inact_time_ms = MILLI * val;
+
+ /* Inactivity time in s */
+ return regmap_write(st->regmap, ADXL345_REG_TIME_INACT, val);
+}
+
+static int adxl345_set_inactivity_time(struct adxl345_state *st, u32 val_int)
+{
+ st->inact_time_ms = MILLI * val_int;
+
+ return regmap_write(st->regmap, ADXL345_REG_TIME_INACT, val_int);
+}
+
+static int adxl345_set_freefall_time(struct adxl345_state *st, u32 val_fract)
+{
+ /*
+ * Datasheet max. value is 255 * 5000 us = 1.275000 seconds.
+ *
+ * Recommended values between 100ms and 350ms (0x14 to 0x46)
+ */
+ st->inact_time_ms = DIV_ROUND_UP(val_fract, MILLI);
+
+ return regmap_write(st->regmap, ADXL345_REG_TIME_FF,
+ DIV_ROUND_CLOSEST(val_fract, 5));
+}
+
+/**
+ * adxl345_set_inact_time - Configure inactivity time explicitly or by ODR.
+ * @st: The sensor state instance.
+ * @val_int: The inactivity time, integer part.
+ * @val_fract: The inactivity time, fractional part when val_int is 0.
+ *
+ * Inactivity time can be configured between 1 and 255 seconds. If a user sets
+ * val_s to 0, a default inactivity time is calculated automatically (since 0 is
+ * also invalid and undefined by the sensor).
+ *
+ * In such cases, power consumption should be considered: the inactivity period
+ * should be shorter at higher sampling frequencies and longer at lower ones.
+ * Specifically, for frequencies above 255 Hz, the default is set to 10 seconds;
+ * for frequencies below 10 Hz, it defaults to 255 seconds.
+ *
+ * The calculation method subtracts the integer part of the configured sample
+ * frequency from 255 to estimate the inactivity time in seconds. Sub-Hertz
+ * values are ignored in this approximation. Since the recommended output data
+ * rates (ODRs) for features like activity/inactivity detection, sleep modes,
+ * and free fall range between 12.5 Hz and 400 Hz, frequencies outside this
+ * range will either use the defined boundary defaults or require explicit
+ * configuration via val_s.
+ *
+ * Return: 0 or error value.
+ */
+static int adxl345_set_inact_time(struct adxl345_state *st, u32 val_int,
+ u32 val_fract)
+{
+ if (val_int > 0) {
+ /* Time >= 1s, inactivity */
+ return adxl345_set_inactivity_time(st, val_int);
+ } else if (val_int == 0) {
+ if (val_fract > 0) {
+ /* Time < 1s, free-fall */
+ return adxl345_set_freefall_time(st, val_fract);
+ } else if (val_fract == 0) {
+ /* Time == 0.0s */
+ return adxl345_set_default_time(st);
+ }
+ }
+
+ /* Do not support negative or wrong input. */
+ return -EINVAL;
+}
+
+/**
+ * adxl345_is_act_inact_ac() - Verify if AC or DC coupling is currently enabled.
+ *
+ * @st: The device data.
+ * @type: The activity or inactivity type.
+ *
+ * Given a type of activity / inactivity combined with either AC coupling set or
+ * default to DC, this function verifies if the combination is currently
+ * configured, hence enabled or not.
+ *
+ * Return: true if configured coupling matches the provided type, else a negative
+ * error value.
+ */
+static int adxl345_is_act_inact_ac(struct adxl345_state *st,
+ enum adxl345_activity_type type)
+{
+ unsigned int regval;
+ bool coupling;
+ int ret;
+
+ if (type == ADXL345_INACTIVITY_FF)
+ return true;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_INACT_CTRL, &regval);
+ if (ret)
+ return ret;
+
+ coupling = adxl345_act_acdc_msk[type] & regval;
+
+ switch (type) {
+ case ADXL345_ACTIVITY:
+ case ADXL345_INACTIVITY:
+ return coupling == ADXL345_COUPLING_DC;
+ case ADXL345_ACTIVITY_AC:
+ case ADXL345_INACTIVITY_AC:
+ return coupling == ADXL345_COUPLING_AC;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * adxl345_set_act_inact_ac() - Configure AC coupling or DC coupling.
+ *
+ * @st: The device data.
+ * @type: Provide a type of activity or inactivity.
+ * @cmd_en: enable or disable AC coupling.
+ *
+ * Enables AC coupling or DC coupling depending on the provided type argument.
+ * Note: Activity and inactivity can be either AC coupled or DC coupled not
+ * both at the same time.
+ *
+ * Return: 0 if successful, else error value.
+ */
+static int adxl345_set_act_inact_ac(struct adxl345_state *st,
+ enum adxl345_activity_type type,
+ bool cmd_en)
+{
+ unsigned int act_inact_ac;
+
+ if (type == ADXL345_ACTIVITY_AC || type == ADXL345_INACTIVITY_AC)
+ act_inact_ac = ADXL345_COUPLING_AC && cmd_en;
+ else
+ act_inact_ac = ADXL345_COUPLING_DC && cmd_en;
+
+ /*
+ * A setting of false selects dc-coupled operation, and a setting of
+ * true enables ac-coupled operation. In dc-coupled operation, the
+ * current acceleration magnitude is compared directly with
+ * ADXL345_REG_THRESH_ACT and ADXL345_REG_THRESH_INACT to determine
+ * whether activity or inactivity is detected.
+ *
+ * In ac-coupled operation for activity detection, the acceleration
+ * value at the start of activity detection is taken as a reference
+ * value. New samples of acceleration are then compared to this
+ * reference value, and if the magnitude of the difference exceeds the
+ * ADXL345_REG_THRESH_ACT value, the device triggers an activity
+ * interrupt.
+ *
+ * Similarly, in ac-coupled operation for inactivity detection, a
+ * reference value is used for comparison and is updated whenever the
+ * device exceeds the inactivity threshold. After the reference value
+ * is selected, the device compares the magnitude of the difference
+ * between the reference value and the current acceleration with
+ * ADXL345_REG_THRESH_INACT. If the difference is less than the value in
+ * ADXL345_REG_THRESH_INACT for the time in ADXL345_REG_TIME_INACT, the
+ * device is considered inactive and the inactivity interrupt is
+ * triggered. [quoted from p. 24, ADXL345 datasheet Rev. G]
+ *
+ * In a conclusion, the first acceleration snapshot sample which hit the
+ * threshold in a particular direction is always taken as acceleration
+ * reference value to that direction. Since for the hardware activity
+ * and inactivity depend on the x/y/z axis, so do ac and dc coupling.
+ * Note, this sw driver always enables or disables all three x/y/z axis
+ * for detection via act_axis_ctrl and inact_axis_ctrl, respectively.
+ * Where in dc-coupling samples are compared against the thresholds, in
+ * ac-coupling measurement difference to the first acceleration
+ * reference value are compared against the threshold. So, ac-coupling
+ * allows for a bit more dynamic compensation depending on the initial
+ * sample.
+ */
+ return regmap_assign_bits(st->regmap, ADXL345_REG_ACT_INACT_CTRL,
+ adxl345_act_acdc_msk[type], act_inact_ac);
+}
+
+static int adxl345_is_act_inact_en(struct adxl345_state *st,
+ enum adxl345_activity_type type)
+{
+ unsigned int axis_ctrl;
+ unsigned int regval;
+ bool int_en, en;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_INACT_CTRL, &axis_ctrl);
+ if (ret)
+ return ret;
+
+ /* Check if axis for activity are enabled */
+ switch (type) {
+ case ADXL345_ACTIVITY:
+ case ADXL345_ACTIVITY_AC:
+ en = FIELD_GET(ADXL345_ACT_XYZ_EN, axis_ctrl);
+ if (!en)
+ return false;
+ break;
+ case ADXL345_INACTIVITY:
+ case ADXL345_INACTIVITY_AC:
+ en = FIELD_GET(ADXL345_INACT_XYZ_EN, axis_ctrl);
+ if (!en)
+ return false;
+ break;
+ case ADXL345_INACTIVITY_FF:
+ en = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Check if specific interrupt is enabled */
+ ret = regmap_read(st->regmap, ADXL345_REG_INT_ENABLE, &regval);
+ if (ret)
+ return ret;
+
+ int_en = adxl345_act_int_reg[type] & regval;
+ if (!int_en)
+ return false;
+
+ /* Check if configured coupling matches provided type */
+ return adxl345_is_act_inact_ac(st, type);
+}
+
+static int adxl345_set_act_inact_linkbit(struct adxl345_state *st,
+ enum adxl345_activity_type type,
+ bool en)
+{
+ int act_ac_en, inact_ac_en;
+ int act_en, inact_en;
+
+ act_en = adxl345_is_act_inact_en(st, ADXL345_ACTIVITY);
+ if (act_en < 0)
+ return act_en;
+
+ act_ac_en = adxl345_is_act_inact_en(st, ADXL345_ACTIVITY_AC);
+ if (act_ac_en < 0)
+ return act_ac_en;
+
+ if (type == ADXL345_INACTIVITY_FF) {
+ inact_en = false;
+ } else {
+ inact_en = adxl345_is_act_inact_en(st, ADXL345_INACTIVITY);
+ if (inact_en < 0)
+ return inact_en;
+
+ inact_ac_en = adxl345_is_act_inact_en(st, ADXL345_INACTIVITY_AC);
+ if (inact_ac_en < 0)
+ return inact_ac_en;
+
+ inact_en = inact_en || inact_ac_en;
+ }
+
+ act_en = act_en || act_ac_en;
+
+ return regmap_assign_bits(st->regmap, ADXL345_REG_POWER_CTL,
+ ADXL345_POWER_CTL_INACT_MSK,
+ en && act_en && inact_en);
+}
+
+static int adxl345_set_act_inact_en(struct adxl345_state *st,
+ enum adxl345_activity_type type,
+ bool cmd_en)
+{
+ unsigned int axis_ctrl;
+ unsigned int threshold;
+ unsigned int period;
+ int ret;
+
+ if (cmd_en) {
+ /* When turning on, check if threshold is valid */
+ if (type == ADXL345_ACTIVITY || type == ADXL345_ACTIVITY_AC) {
+ ret = regmap_read(st->regmap,
+ adxl345_act_thresh_reg[type],
+ &threshold);
+ if (ret)
+ return ret;
+ } else {
+ threshold = st->inact_threshold;
+ }
+
+ if (!threshold) /* Just ignore the command if threshold is 0 */
+ return 0;
+
+ /* When turning on inactivity, check if inact time is valid */
+ if (type == ADXL345_INACTIVITY || type == ADXL345_INACTIVITY_AC) {
+ ret = regmap_read(st->regmap,
+ ADXL345_REG_TIME_INACT,
+ &period);
+ if (ret)
+ return ret;
+
+ if (!period)
+ return 0;
+ }
+ } else {
+ /*
+ * When turning off an activity, ensure that the correct
+ * coupling event is specified. This step helps prevent misuse -
+ * for example, if an AC-coupled activity is active and the
+ * current call attempts to turn off a DC-coupled activity, this
+ * inconsistency should be detected here.
+ */
+ if (adxl345_is_act_inact_ac(st, type) <= 0)
+ return 0;
+ }
+
+ /* Start modifying configuration registers */
+ ret = adxl345_set_measure_en(st, false);
+ if (ret)
+ return ret;
+
+ /* Enable axis according to the command */
+ switch (type) {
+ case ADXL345_ACTIVITY:
+ case ADXL345_ACTIVITY_AC:
+ axis_ctrl = ADXL345_ACT_XYZ_EN;
+ break;
+ case ADXL345_INACTIVITY:
+ case ADXL345_INACTIVITY_AC:
+ axis_ctrl = ADXL345_INACT_XYZ_EN;
+ break;
+ case ADXL345_INACTIVITY_FF:
+ axis_ctrl = ADXL345_ACT_INACT_NO_AXIS_EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_assign_bits(st->regmap, ADXL345_REG_ACT_INACT_CTRL,
+ axis_ctrl, cmd_en);
+ if (ret)
+ return ret;
+
+ /* Update AC/DC-coupling according to the command */
+ ret = adxl345_set_act_inact_ac(st, type, cmd_en);
+ if (ret)
+ return ret;
+
+ /* Enable the interrupt line, according to the command */
+ ret = regmap_assign_bits(st->regmap, ADXL345_REG_INT_ENABLE,
+ adxl345_act_int_reg[type], cmd_en);
+ if (ret)
+ return ret;
+
+ /* Set link-bit and auto-sleep only when ACT and INACT are enabled */
+ ret = adxl345_set_act_inact_linkbit(st, type, cmd_en);
+ if (ret)
+ return ret;
+
+ return adxl345_set_measure_en(st, true);
+}
+
/* tap */
static int _adxl345_set_tap_int(struct adxl345_state *st,
@@ -368,9 +864,8 @@ static int adxl345_set_doubletap_en(struct adxl345_state *st, bool en)
* Generally suppress detection of spikes during the latency period as
* double taps here, this is fully optional for double tap detection
*/
- ret = regmap_update_bits(st->regmap, ADXL345_REG_TAP_AXIS,
- ADXL345_REG_TAP_SUPPRESS_MSK,
- en ? ADXL345_REG_TAP_SUPPRESS : 0x00);
+ ret = regmap_assign_bits(st->regmap, ADXL345_REG_TAP_AXIS,
+ ADXL345_REG_TAP_SUPPRESS, en);
if (ret)
return ret;
@@ -466,9 +961,16 @@ static int adxl345_find_odr(struct adxl345_state *st, int val,
static int adxl345_set_odr(struct adxl345_state *st, enum adxl345_odr odr)
{
- return regmap_update_bits(st->regmap, ADXL345_REG_BW_RATE,
+ int ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_BW_RATE,
ADXL345_BW_RATE_MSK,
FIELD_PREP(ADXL345_BW_RATE_MSK, odr));
+ if (ret)
+ return ret;
+
+ /* update inactivity time by ODR */
+ return adxl345_set_inact_time(st, 0, 0);
}
static int adxl345_find_range(struct adxl345_state *st, int val, int val2,
@@ -489,9 +991,43 @@ static int adxl345_find_range(struct adxl345_state *st, int val, int val2,
static int adxl345_set_range(struct adxl345_state *st, enum adxl345_range range)
{
- return regmap_update_bits(st->regmap, ADXL345_REG_DATA_FORMAT,
+ unsigned int act_threshold, inact_threshold;
+ unsigned int range_old;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADXL345_REG_DATA_FORMAT, &regval);
+ if (ret)
+ return ret;
+ range_old = FIELD_GET(ADXL345_DATA_FORMAT_RANGE, regval);
+
+ ret = regmap_read(st->regmap,
+ adxl345_act_thresh_reg[ADXL345_ACTIVITY],
+ &act_threshold);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(st->regmap, ADXL345_REG_DATA_FORMAT,
ADXL345_DATA_FORMAT_RANGE,
FIELD_PREP(ADXL345_DATA_FORMAT_RANGE, range));
+ if (ret)
+ return ret;
+
+ act_threshold = act_threshold * adxl345_range_factor_tbl[range_old]
+ / adxl345_range_factor_tbl[range];
+ act_threshold = min(U8_MAX, max(1, act_threshold));
+
+ inact_threshold = st->inact_threshold;
+ inact_threshold = inact_threshold * adxl345_range_factor_tbl[range_old]
+ / adxl345_range_factor_tbl[range];
+ inact_threshold = min(U8_MAX, max(1, inact_threshold));
+
+ ret = regmap_write(st->regmap, adxl345_act_thresh_reg[ADXL345_ACTIVITY],
+ act_threshold);
+ if (ret)
+ return ret;
+
+ return adxl345_set_inact_threshold(st, inact_threshold);
}
static int adxl345_read_avail(struct iio_dev *indio_dev,
@@ -624,6 +1160,37 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
return adxl345_set_measure_en(st, true);
}
+static int adxl345_read_mag_config(struct adxl345_state *st,
+ enum iio_event_direction dir,
+ enum adxl345_activity_type type_act,
+ enum adxl345_activity_type type_inact)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return !!adxl345_is_act_inact_en(st, type_act);
+ case IIO_EV_DIR_FALLING:
+ return !!adxl345_is_act_inact_en(st, type_inact);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl345_write_mag_config(struct adxl345_state *st,
+ enum iio_event_direction dir,
+ enum adxl345_activity_type type_act,
+ enum adxl345_activity_type type_inact,
+ bool state)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return adxl345_set_act_inact_en(st, type_act, state);
+ case IIO_EV_DIR_FALLING:
+ return adxl345_set_act_inact_en(st, type_inact, state);
+ default:
+ return -EINVAL;
+ }
+}
+
static int adxl345_read_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -634,6 +1201,14 @@ static int adxl345_read_event_config(struct iio_dev *indio_dev,
int ret;
switch (type) {
+ case IIO_EV_TYPE_MAG:
+ return adxl345_read_mag_config(st, dir,
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY);
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return adxl345_read_mag_config(st, dir,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC);
case IIO_EV_TYPE_GESTURE:
switch (dir) {
case IIO_EV_DIR_SINGLETAP:
@@ -665,6 +1240,16 @@ static int adxl345_write_event_config(struct iio_dev *indio_dev,
struct adxl345_state *st = iio_priv(indio_dev);
switch (type) {
+ case IIO_EV_TYPE_MAG:
+ return adxl345_write_mag_config(st, dir,
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY,
+ state);
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return adxl345_write_mag_config(st, dir,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC,
+ state);
case IIO_EV_TYPE_GESTURE:
switch (dir) {
case IIO_EV_DIR_SINGLETAP:
@@ -679,6 +1264,72 @@ static int adxl345_write_event_config(struct iio_dev *indio_dev,
}
}
+static int adxl345_read_mag_value(struct adxl345_state *st,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ enum adxl345_activity_type type_act,
+ enum adxl345_activity_type type_inact,
+ int *val, int *val2)
+{
+ unsigned int threshold;
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = regmap_read(st->regmap,
+ adxl345_act_thresh_reg[type_act],
+ &threshold);
+ if (ret)
+ return ret;
+ *val = 62500 * threshold;
+ *val2 = MICRO;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_EV_DIR_FALLING:
+ *val = 62500 * st->inact_threshold;
+ *val2 = MICRO;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ *val = st->inact_time_ms;
+ *val2 = MILLI;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adxl345_write_mag_value(struct adxl345_state *st,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ enum adxl345_activity_type type_act,
+ enum adxl345_activity_type type_inact,
+ int val, int val2)
+{
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ /* Scaling factor 62.5mg/LSB, i.e. ~16g corresponds to 0xff */
+ val = DIV_ROUND_CLOSEST(val * MICRO + val2, 62500);
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return regmap_write(st->regmap,
+ adxl345_act_thresh_reg[type_act],
+ val);
+ case IIO_EV_DIR_FALLING:
+ return adxl345_set_inact_threshold(st, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_PERIOD:
+ return adxl345_set_inact_time(st, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
static int adxl345_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -691,6 +1342,16 @@ static int adxl345_read_event_value(struct iio_dev *indio_dev,
int ret;
switch (type) {
+ case IIO_EV_TYPE_MAG:
+ return adxl345_read_mag_value(st, dir, info,
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY,
+ val, val2);
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return adxl345_read_mag_value(st, dir, info,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC,
+ val, val2);
case IIO_EV_TYPE_GESTURE:
switch (info) {
case IIO_EV_INFO_VALUE:
@@ -741,6 +1402,22 @@ static int adxl345_write_event_value(struct iio_dev *indio_dev,
return ret;
switch (type) {
+ case IIO_EV_TYPE_MAG:
+ ret = adxl345_write_mag_value(st, dir, info,
+ ADXL345_ACTIVITY,
+ ADXL345_INACTIVITY,
+ val, val2);
+ if (ret)
+ return ret;
+ break;
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ ret = adxl345_write_mag_value(st, dir, info,
+ ADXL345_ACTIVITY_AC,
+ ADXL345_INACTIVITY_AC,
+ val, val2);
+ if (ret)
+ return ret;
+ break;
case IIO_EV_TYPE_GESTURE:
switch (info) {
case IIO_EV_INFO_VALUE:
@@ -980,10 +1657,12 @@ static int adxl345_fifo_push(struct iio_dev *indio_dev,
}
static int adxl345_push_event(struct iio_dev *indio_dev, int int_stat,
+ enum iio_modifier act_dir,
enum iio_modifier tap_dir)
{
s64 ts = iio_get_time_ns(indio_dev);
struct adxl345_state *st = iio_priv(indio_dev);
+ unsigned int regval;
int samples;
int ret = -ENOENT;
@@ -1007,6 +1686,68 @@ static int adxl345_push_event(struct iio_dev *indio_dev, int int_stat,
return ret;
}
+ if (FIELD_GET(ADXL345_INT_ACTIVITY, int_stat)) {
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_INACT_CTRL, &regval);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(ADXL345_REG_ACT_ACDC, regval)) {
+ /* AC coupled */
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, act_dir,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+ IIO_EV_DIR_RISING),
+ ts);
+
+ } else {
+ /* DC coupled, relying on THRESH */
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, act_dir,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ ts);
+ }
+ if (ret)
+ return ret;
+ }
+
+ if (FIELD_GET(ADXL345_INT_INACTIVITY, int_stat)) {
+ ret = regmap_read(st->regmap, ADXL345_REG_ACT_INACT_CTRL, &regval);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(ADXL345_REG_INACT_ACDC, regval)) {
+ /* AC coupled */
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+ IIO_EV_DIR_FALLING),
+ ts);
+ } else {
+ /* DC coupled, relying on THRESH */
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ ts);
+ }
+ if (ret)
+ return ret;
+ }
+
+ if (FIELD_GET(ADXL345_INT_FREE_FALL, int_stat)) {
+ ret = iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_FALLING),
+ ts);
+ if (ret)
+ return ret;
+ }
+
if (FIELD_GET(ADXL345_INT_WATERMARK, int_stat)) {
samples = adxl345_get_samples(st);
if (samples < 0)
@@ -1034,6 +1775,7 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
struct adxl345_state *st = iio_priv(indio_dev);
unsigned int regval;
enum iio_modifier tap_dir = IIO_NO_MOD;
+ enum iio_modifier act_dir = IIO_NO_MOD;
u32 axis_ctrl;
int int_stat;
int ret;
@@ -1042,7 +1784,8 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
if (ret)
return IRQ_NONE;
- if (FIELD_GET(ADXL345_REG_TAP_AXIS_MSK, axis_ctrl)) {
+ if (FIELD_GET(ADXL345_REG_TAP_AXIS_MSK, axis_ctrl) ||
+ FIELD_GET(ADXL345_ACT_XYZ_EN, axis_ctrl)) {
ret = regmap_read(st->regmap, ADXL345_REG_ACT_TAP_STATUS, &regval);
if (ret)
return IRQ_NONE;
@@ -1053,12 +1796,19 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
tap_dir = IIO_MOD_Y;
else if (FIELD_GET(ADXL345_TAP_X_EN, regval))
tap_dir = IIO_MOD_X;
+
+ if (FIELD_GET(ADXL345_ACT_Z_EN, regval))
+ act_dir = IIO_MOD_Z;
+ else if (FIELD_GET(ADXL345_ACT_Y_EN, regval))
+ act_dir = IIO_MOD_Y;
+ else if (FIELD_GET(ADXL345_ACT_X_EN, regval))
+ act_dir = IIO_MOD_X;
}
if (regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &int_stat))
return IRQ_NONE;
- if (adxl345_push_event(indio_dev, int_stat, tap_dir))
+ if (adxl345_push_event(indio_dev, int_stat, act_dir, tap_dir))
goto err;
if (FIELD_GET(ADXL345_INT_OVERRUN, int_stat))
@@ -1226,6 +1976,24 @@ int adxl345_core_probe(struct device *dev, struct regmap *regmap,
if (ret)
return ret;
+ /*
+ * Initialized with sensible default values to streamline
+ * sensor operation. These defaults are partly derived from
+ * the previous input driver for the ADXL345 and partly
+ * based on the recommendations provided in the datasheet.
+ */
+ ret = regmap_write(st->regmap, ADXL345_REG_ACT_INACT_CTRL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADXL345_REG_THRESH_ACT, 6);
+ if (ret)
+ return ret;
+
+ ret = adxl345_set_inact_threshold(st, 4);
+ if (ret)
+ return ret;
+
ret = regmap_write(st->regmap, ADXL345_REG_THRESH_TAP, tap_threshold);
if (ret)
return ret;
diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c
index 2e00fd51b4d5..5fc7f814b907 100644
--- a/drivers/iio/accel/adxl355_core.c
+++ b/drivers/iio/accel/adxl355_core.c
@@ -56,6 +56,8 @@
#define ADXL355_POWER_CTL_DRDY_MSK BIT(2)
#define ADXL355_SELF_TEST_REG 0x2E
#define ADXL355_RESET_REG 0x2F
+#define ADXL355_BASE_ADDR_SHADOW_REG 0x50
+#define ADXL355_SHADOW_REG_COUNT 5
#define ADXL355_DEVID_AD_VAL 0xAD
#define ADXL355_DEVID_MST_VAL 0x1D
@@ -294,7 +296,12 @@ static void adxl355_fill_3db_frequency_table(struct adxl355_data *data)
static int adxl355_setup(struct adxl355_data *data)
{
unsigned int regval;
+ int retries = 5; /* the number is chosen based on empirical reasons */
int ret;
+ u8 *shadow_regs __free(kfree) = kzalloc(ADXL355_SHADOW_REG_COUNT, GFP_KERNEL);
+
+ if (!shadow_regs)
+ return -ENOMEM;
ret = regmap_read(data->regmap, ADXL355_DEVID_AD_REG, &regval);
if (ret)
@@ -321,14 +328,41 @@ static int adxl355_setup(struct adxl355_data *data)
if (regval != ADXL355_PARTID_VAL)
dev_warn(data->dev, "Invalid DEV ID 0x%02x\n", regval);
- /*
- * Perform a software reset to make sure the device is in a consistent
- * state after start-up.
- */
- ret = regmap_write(data->regmap, ADXL355_RESET_REG, ADXL355_RESET_CODE);
+ /* Read shadow registers to be compared after reset */
+ ret = regmap_bulk_read(data->regmap,
+ ADXL355_BASE_ADDR_SHADOW_REG,
+ shadow_regs, ADXL355_SHADOW_REG_COUNT);
if (ret)
return ret;
+ do {
+ if (--retries == 0) {
+ dev_err(data->dev, "Shadow registers mismatch\n");
+ return -EIO;
+ }
+
+ /*
+ * Perform a software reset to make sure the device is in a consistent
+ * state after start-up.
+ */
+ ret = regmap_write(data->regmap, ADXL355_RESET_REG,
+ ADXL355_RESET_CODE);
+ if (ret)
+ return ret;
+
+ /* Wait at least 5ms after software reset */
+ usleep_range(5000, 10000);
+
+ /* Read shadow registers for comparison */
+ ret = regmap_bulk_read(data->regmap,
+ ADXL355_BASE_ADDR_SHADOW_REG,
+ data->buffer.buf,
+ ADXL355_SHADOW_REG_COUNT);
+ if (ret)
+ return ret;
+ } while (memcmp(shadow_regs, data->buffer.buf,
+ ADXL355_SHADOW_REG_COUNT));
+
ret = regmap_update_bits(data->regmap, ADXL355_POWER_CTL_REG,
ADXL355_POWER_CTL_DRDY_MSK,
FIELD_PREP(ADXL355_POWER_CTL_DRDY_MSK, 1));
diff --git a/drivers/iio/accel/adxl380.c b/drivers/iio/accel/adxl380.c
index 0cf3c6815829..6d5f1a0d51e9 100644
--- a/drivers/iio/accel/adxl380.c
+++ b/drivers/iio/accel/adxl380.c
@@ -26,7 +26,9 @@
#include "adxl380.h"
#define ADXL380_ID_VAL 380
+#define ADXL318_ID_VAL 380
#define ADXL382_ID_VAL 382
+#define ADXL319_ID_VAL 382
#define ADXL380_DEVID_AD_REG 0x00
#define ADLX380_PART_ID_REG 0x02
@@ -178,41 +180,6 @@ enum adxl380_tap_time_type {
static const int adxl380_range_scale_factor_tbl[] = { 1, 2, 4 };
-const struct adxl380_chip_info adxl380_chip_info = {
- .name = "adxl380",
- .chip_id = ADXL380_ID_VAL,
- .scale_tbl = {
- [ADXL380_OP_MODE_4G_RANGE] = { 0, 1307226 },
- [ADXL380_OP_MODE_8G_RANGE] = { 0, 2615434 },
- [ADXL380_OP_MODE_16G_RANGE] = { 0, 5229886 },
- },
- .samp_freq_tbl = { 8000, 16000, 32000 },
- /*
- * The datasheet defines an intercept of 470 LSB at 25 degC
- * and a sensitivity of 10.2 LSB/C.
- */
- .temp_offset = 25 * 102 / 10 - 470,
-
-};
-EXPORT_SYMBOL_NS_GPL(adxl380_chip_info, "IIO_ADXL380");
-
-const struct adxl380_chip_info adxl382_chip_info = {
- .name = "adxl382",
- .chip_id = ADXL382_ID_VAL,
- .scale_tbl = {
- [ADXL382_OP_MODE_15G_RANGE] = { 0, 4903325 },
- [ADXL382_OP_MODE_30G_RANGE] = { 0, 9806650 },
- [ADXL382_OP_MODE_60G_RANGE] = { 0, 19613300 },
- },
- .samp_freq_tbl = { 16000, 32000, 64000 },
- /*
- * The datasheet defines an intercept of 570 LSB at 25 degC
- * and a sensitivity of 10.2 LSB/C.
- */
- .temp_offset = 25 * 102 / 10 - 570,
-};
-EXPORT_SYMBOL_NS_GPL(adxl382_chip_info, "IIO_ADXL380");
-
static const unsigned int adxl380_th_reg_high_addr[2] = {
[ADXL380_ACTIVITY] = ADXL380_THRESH_ACT_H_REG,
[ADXL380_INACTIVITY] = ADXL380_THRESH_INACT_H_REG,
@@ -276,9 +243,14 @@ static int adxl380_set_measure_en(struct adxl380_state *st, bool en)
if (ret)
return ret;
- /* Activity/ Inactivity detection available only in VLP/ULP mode */
- if (FIELD_GET(ADXL380_ACT_EN_MSK, act_inact_ctl) ||
- FIELD_GET(ADXL380_INACT_EN_MSK, act_inact_ctl))
+ /*
+ * Activity/Inactivity detection available only in VLP/ULP
+ * mode and for devices that support low power modes. Otherwise
+ * go straight to measure mode (same bits as ADXL380_OP_MODE_HP).
+ */
+ if (st->chip_info->has_low_power &&
+ (FIELD_GET(ADXL380_ACT_EN_MSK, act_inact_ctl) ||
+ FIELD_GET(ADXL380_INACT_EN_MSK, act_inact_ctl)))
op_mode = ADXL380_OP_MODE_VLP;
else
op_mode = ADXL380_OP_MODE_HP;
@@ -1618,6 +1590,15 @@ static int adxl380_set_watermark(struct iio_dev *indio_dev, unsigned int val)
return 0;
}
+static const struct iio_info adxl318_info = {
+ .read_raw = adxl380_read_raw,
+ .read_avail = &adxl380_read_avail,
+ .write_raw = adxl380_write_raw,
+ .write_raw_get_fmt = adxl380_write_raw_get_fmt,
+ .debugfs_reg_access = &adxl380_reg_access,
+ .hwfifo_set_watermark = adxl380_set_watermark,
+};
+
static const struct iio_info adxl380_info = {
.read_raw = adxl380_read_raw,
.read_avail = &adxl380_read_avail,
@@ -1632,6 +1613,81 @@ static const struct iio_info adxl380_info = {
.hwfifo_set_watermark = adxl380_set_watermark,
};
+const struct adxl380_chip_info adxl318_chip_info = {
+ .name = "adxl318",
+ .chip_id = ADXL318_ID_VAL,
+ .scale_tbl = {
+ [ADXL380_OP_MODE_4G_RANGE] = { 0, 1307226 },
+ [ADXL380_OP_MODE_8G_RANGE] = { 0, 2615434 },
+ [ADXL380_OP_MODE_16G_RANGE] = { 0, 5229886 },
+ },
+ .samp_freq_tbl = { 8000, 16000, 32000 },
+ /*
+ * The datasheet defines an intercept of 550 LSB at 25 degC
+ * and a sensitivity of 10.2 LSB/C.
+ */
+ .temp_offset = 25 * 102 / 10 - 550,
+ .info = &adxl318_info,
+};
+EXPORT_SYMBOL_NS_GPL(adxl318_chip_info, "IIO_ADXL380");
+
+const struct adxl380_chip_info adxl319_chip_info = {
+ .name = "adxl319",
+ .chip_id = ADXL319_ID_VAL,
+ .scale_tbl = {
+ [ADXL382_OP_MODE_15G_RANGE] = { 0, 4903325 },
+ [ADXL382_OP_MODE_30G_RANGE] = { 0, 9806650 },
+ [ADXL382_OP_MODE_60G_RANGE] = { 0, 19613300 },
+ },
+ .samp_freq_tbl = { 16000, 32000, 64000 },
+ /*
+ * The datasheet defines an intercept of 550 LSB at 25 degC
+ * and a sensitivity of 10.2 LSB/C.
+ */
+ .temp_offset = 25 * 102 / 10 - 550,
+ .info = &adxl318_info,
+};
+EXPORT_SYMBOL_NS_GPL(adxl319_chip_info, "IIO_ADXL380");
+
+const struct adxl380_chip_info adxl380_chip_info = {
+ .name = "adxl380",
+ .chip_id = ADXL380_ID_VAL,
+ .scale_tbl = {
+ [ADXL380_OP_MODE_4G_RANGE] = { 0, 1307226 },
+ [ADXL380_OP_MODE_8G_RANGE] = { 0, 2615434 },
+ [ADXL380_OP_MODE_16G_RANGE] = { 0, 5229886 },
+ },
+ .samp_freq_tbl = { 8000, 16000, 32000 },
+ /*
+ * The datasheet defines an intercept of 470 LSB at 25 degC
+ * and a sensitivity of 10.2 LSB/C.
+ */
+ .temp_offset = 25 * 102 / 10 - 470,
+ .has_low_power = true,
+ .info = &adxl380_info,
+
+};
+EXPORT_SYMBOL_NS_GPL(adxl380_chip_info, "IIO_ADXL380");
+
+const struct adxl380_chip_info adxl382_chip_info = {
+ .name = "adxl382",
+ .chip_id = ADXL382_ID_VAL,
+ .scale_tbl = {
+ [ADXL382_OP_MODE_15G_RANGE] = { 0, 4903325 },
+ [ADXL382_OP_MODE_30G_RANGE] = { 0, 9806650 },
+ [ADXL382_OP_MODE_60G_RANGE] = { 0, 19613300 },
+ },
+ .samp_freq_tbl = { 16000, 32000, 64000 },
+ /*
+ * The datasheet defines an intercept of 570 LSB at 25 degC
+ * and a sensitivity of 10.2 LSB/C.
+ */
+ .temp_offset = 25 * 102 / 10 - 570,
+ .has_low_power = true,
+ .info = &adxl380_info,
+};
+EXPORT_SYMBOL_NS_GPL(adxl382_chip_info, "IIO_ADXL380");
+
static const struct iio_event_spec adxl380_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -1866,7 +1922,7 @@ int adxl380_probe(struct device *dev, struct regmap *regmap,
indio_dev->channels = adxl380_channels;
indio_dev->num_channels = ARRAY_SIZE(adxl380_channels);
indio_dev->name = chip_info->name;
- indio_dev->info = &adxl380_info;
+ indio_dev->info = chip_info->info;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_regulator_get_enable(dev, "vddio");
diff --git a/drivers/iio/accel/adxl380.h b/drivers/iio/accel/adxl380.h
index a683625d897a..e67c5aab8efc 100644
--- a/drivers/iio/accel/adxl380.h
+++ b/drivers/iio/accel/adxl380.h
@@ -12,10 +12,14 @@ struct adxl380_chip_info {
const char *name;
const int scale_tbl[3][2];
const int samp_freq_tbl[3];
+ const struct iio_info *info;
const int temp_offset;
const u16 chip_id;
+ const bool has_low_power;
};
+extern const struct adxl380_chip_info adxl318_chip_info;
+extern const struct adxl380_chip_info adxl319_chip_info;
extern const struct adxl380_chip_info adxl380_chip_info;
extern const struct adxl380_chip_info adxl382_chip_info;
diff --git a/drivers/iio/accel/adxl380_i2c.c b/drivers/iio/accel/adxl380_i2c.c
index b4f86f972361..bd8782d08c7d 100644
--- a/drivers/iio/accel/adxl380_i2c.c
+++ b/drivers/iio/accel/adxl380_i2c.c
@@ -33,6 +33,8 @@ static int adxl380_i2c_probe(struct i2c_client *client)
}
static const struct i2c_device_id adxl380_i2c_id[] = {
+ { "adxl318", (kernel_ulong_t)&adxl318_chip_info },
+ { "adxl319", (kernel_ulong_t)&adxl319_chip_info },
{ "adxl380", (kernel_ulong_t)&adxl380_chip_info },
{ "adxl382", (kernel_ulong_t)&adxl382_chip_info },
{ }
@@ -40,6 +42,8 @@ static const struct i2c_device_id adxl380_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, adxl380_i2c_id);
static const struct of_device_id adxl380_of_match[] = {
+ { .compatible = "adi,adxl318", .data = &adxl318_chip_info },
+ { .compatible = "adi,adxl319", .data = &adxl319_chip_info },
{ .compatible = "adi,adxl380", .data = &adxl380_chip_info },
{ .compatible = "adi,adxl382", .data = &adxl382_chip_info },
{ }
diff --git a/drivers/iio/accel/adxl380_spi.c b/drivers/iio/accel/adxl380_spi.c
index 6edd0d211ffa..4ead949b24f1 100644
--- a/drivers/iio/accel/adxl380_spi.c
+++ b/drivers/iio/accel/adxl380_spi.c
@@ -35,6 +35,8 @@ static int adxl380_spi_probe(struct spi_device *spi)
}
static const struct spi_device_id adxl380_spi_id[] = {
+ { "adxl318", (kernel_ulong_t)&adxl318_chip_info },
+ { "adxl319", (kernel_ulong_t)&adxl319_chip_info },
{ "adxl380", (kernel_ulong_t)&adxl380_chip_info },
{ "adxl382", (kernel_ulong_t)&adxl382_chip_info },
{ }
@@ -42,6 +44,8 @@ static const struct spi_device_id adxl380_spi_id[] = {
MODULE_DEVICE_TABLE(spi, adxl380_spi_id);
static const struct of_device_id adxl380_of_match[] = {
+ { .compatible = "adi,adxl318", .data = &adxl318_chip_info },
+ { .compatible = "adi,adxl319", .data = &adxl319_chip_info },
{ .compatible = "adi,adxl380", .data = &adxl380_chip_info },
{ .compatible = "adi,adxl382", .data = &adxl382_chip_info },
{ }
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 4fccbcb76e04..8925f5279e62 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -139,11 +139,6 @@ struct bma180_data {
int scale;
int bw;
bool pmode;
- /* Ensure timestamp is naturally aligned */
- struct {
- s16 chan[4];
- aligned_s64 timestamp;
- } scan;
};
enum bma180_chan {
@@ -870,6 +865,10 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
struct bma180_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns(indio_dev);
int bit, ret, i = 0;
+ struct {
+ s16 chan[4];
+ aligned_s64 timestamp;
+ } scan = { };
mutex_lock(&data->mutex);
@@ -879,12 +878,12 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
mutex_unlock(&data->mutex);
goto err;
}
- data->scan.chan[i++] = ret;
+ scan.chan[i++] = ret;
}
mutex_unlock(&data->mutex);
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan), time_ns);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), time_ns);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/bma220.h b/drivers/iio/accel/bma220.h
new file mode 100644
index 000000000000..00dfe275256b
--- /dev/null
+++ b/drivers/iio/accel/bma220.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Forward declarations needed by the bma220 sources.
+ *
+ * Copyright 2025 Petre Rodan <petre.rodan@subdimension.ro>
+ */
+
+#ifndef _BMA220_H
+#define _BMA220_H
+
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+#define BMA220_REG_WDT 0x17
+#define BMA220_WDT_MASK GENMASK(2, 1)
+#define BMA220_WDT_OFF 0x0
+#define BMA220_WDT_1MS 0x2
+#define BMA220_WDT_10MS 0x3
+
+struct device;
+
+extern const struct regmap_config bma220_i2c_regmap_config;
+extern const struct regmap_config bma220_spi_regmap_config;
+extern const struct dev_pm_ops bma220_pm_ops;
+
+int bma220_common_probe(struct device *dev, struct regmap *regmap, int irq);
+
+#endif
diff --git a/drivers/iio/accel/bma220_core.c b/drivers/iio/accel/bma220_core.c
new file mode 100644
index 000000000000..f32d875b994e
--- /dev/null
+++ b/drivers/iio/accel/bma220_core.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * BMA220 Digital triaxial acceleration sensor driver
+ *
+ * Copyright (c) 2016,2020 Intel Corporation.
+ * Copyright (c) 2025 Petre Rodan <petre.rodan@subdimension.ro>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "bma220.h"
+
+#define BMA220_REG_ID 0x00
+#define BMA220_REG_REVISION_ID 0x01
+#define BMA220_REG_ACCEL_X 0x02
+#define BMA220_REG_ACCEL_Y 0x03
+#define BMA220_REG_ACCEL_Z 0x04
+#define BMA220_REG_CONF0 0x05
+#define BMA220_HIGH_DUR_MSK GENMASK(5, 0)
+#define BMA220_HIGH_HY_MSK GENMASK(7, 6)
+#define BMA220_REG_CONF1 0x06
+#define BMA220_HIGH_TH_MSK GENMASK(3, 0)
+#define BMA220_LOW_TH_MSK GENMASK(7, 4)
+#define BMA220_REG_CONF2 0x07
+#define BMA220_LOW_DUR_MSK GENMASK(5, 0)
+#define BMA220_LOW_HY_MSK GENMASK(7, 6)
+#define BMA220_REG_CONF3 0x08
+#define BMA220_TT_DUR_MSK GENMASK(2, 0)
+#define BMA220_TT_TH_MSK GENMASK(6, 3)
+#define BMA220_REG_CONF4 0x09
+#define BMA220_SLOPE_DUR_MSK GENMASK(1, 0)
+#define BMA220_SLOPE_TH_MSK GENMASK(5, 2)
+#define BMA220_REG_CONF5 0x0a
+#define BMA220_TIP_EN_MSK BIT(4)
+#define BMA220_REG_IF0 0x0b
+#define BMA220_REG_IF1 0x0c
+#define BMA220_IF_SLOPE BIT(0)
+#define BMA220_IF_DRDY BIT(1)
+#define BMA220_IF_HIGH BIT(2)
+#define BMA220_IF_LOW BIT(3)
+#define BMA220_IF_TT BIT(4)
+#define BMA220_REG_IE0 0x0d
+#define BMA220_INT_EN_TAP_Z_MSK BIT(0)
+#define BMA220_INT_EN_TAP_Y_MSK BIT(1)
+#define BMA220_INT_EN_TAP_X_MSK BIT(2)
+#define BMA220_INT_EN_SLOPE_Z_MSK BIT(3)
+#define BMA220_INT_EN_SLOPE_Y_MSK BIT(4)
+#define BMA220_INT_EN_SLOPE_X_MSK BIT(5)
+#define BMA220_INT_EN_DRDY_MSK BIT(7)
+#define BMA220_REG_IE1 0x0e
+#define BMA220_INT_EN_HIGH_Z_MSK BIT(0)
+#define BMA220_INT_EN_HIGH_Y_MSK BIT(1)
+#define BMA220_INT_EN_HIGH_X_MSK BIT(2)
+#define BMA220_INT_EN_LOW_MSK BIT(3)
+#define BMA220_INT_LATCH_MSK GENMASK(6, 4)
+#define BMA220_INT_RST_MSK BIT(7)
+#define BMA220_REG_IE2 0x0f
+#define BMA220_REG_FILTER 0x10
+#define BMA220_FILTER_MASK GENMASK(3, 0)
+#define BMA220_REG_RANGE 0x11
+#define BMA220_RANGE_MASK GENMASK(1, 0)
+#define BMA220_REG_SUSPEND 0x18
+#define BMA220_REG_SOFTRESET 0x19
+
+#define BMA220_CHIP_ID 0xDD
+#define BMA220_SUSPEND_SLEEP 0xFF
+#define BMA220_SUSPEND_WAKE 0x00
+#define BMA220_RESET_MODE 0xFF
+#define BMA220_NONRESET_MODE 0x00
+
+#define BMA220_DEVICE_NAME "bma220"
+
+#define BMA220_COF_1000Hz 0x0
+#define BMA220_COF_500Hz 0x1
+#define BMA220_COF_250Hz 0x2
+#define BMA220_COF_125Hz 0x3
+#define BMA220_COF_64Hz 0x4
+#define BMA220_COF_32Hz 0x5
+
+#define BMA220_ACCEL_CHANNEL(index, reg, axis) { \
+ .type = IIO_ACCEL, \
+ .address = reg, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) |\
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 6, \
+ .storagebits = 8, \
+ .shift = 2, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+enum bma220_axis {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+static const int bma220_scale_table[][2] = {
+ { 0, 623000 }, { 1, 248000 }, { 2, 491000 }, { 4, 983000 },
+};
+
+struct bma220_data {
+ struct regmap *regmap;
+ struct mutex lock;
+ u8 lpf_3dB_freq_idx;
+ u8 range_idx;
+ struct iio_trigger *trig;
+ struct {
+ s8 chans[3];
+ /* Ensure timestamp is naturally aligned. */
+ aligned_s64 timestamp;
+ } scan __aligned(IIO_DMA_MINALIGN);
+};
+
+static const struct iio_chan_spec bma220_channels[] = {
+ BMA220_ACCEL_CHANNEL(0, BMA220_REG_ACCEL_X, X),
+ BMA220_ACCEL_CHANNEL(1, BMA220_REG_ACCEL_Y, Y),
+ BMA220_ACCEL_CHANNEL(2, BMA220_REG_ACCEL_Z, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+/* Available cut-off frequencies of the low pass filter in Hz. */
+static const int bma220_lpf_3dB_freq_Hz_table[] = {
+ [BMA220_COF_1000Hz] = 1000,
+ [BMA220_COF_500Hz] = 500,
+ [BMA220_COF_250Hz] = 250,
+ [BMA220_COF_125Hz] = 125,
+ [BMA220_COF_64Hz] = 64,
+ [BMA220_COF_32Hz] = 32,
+};
+
+static const unsigned long bma220_accel_scan_masks[] = {
+ BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+ 0
+};
+
+static bool bma220_is_writable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMA220_REG_CONF0:
+ case BMA220_REG_CONF1:
+ case BMA220_REG_CONF2:
+ case BMA220_REG_CONF3:
+ case BMA220_REG_CONF4:
+ case BMA220_REG_CONF5:
+ case BMA220_REG_IE0:
+ case BMA220_REG_IE1:
+ case BMA220_REG_IE2:
+ case BMA220_REG_FILTER:
+ case BMA220_REG_RANGE:
+ case BMA220_REG_WDT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+const struct regmap_config bma220_spi_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = BIT(7),
+ .max_register = BMA220_REG_SOFTRESET,
+ .cache_type = REGCACHE_NONE,
+ .writeable_reg = bma220_is_writable_reg,
+};
+EXPORT_SYMBOL_NS_GPL(bma220_spi_regmap_config, "IIO_BOSCH_BMA220");
+
+/*
+ * Based on the datasheet the memory map differs between the SPI and the I2C
+ * implementations. I2C register addresses are simply shifted to the left
+ * by 1 bit yet the register size remains unchanged.
+ * This driver employs the SPI memory map to correlate register names to
+ * addresses regardless of the bus type.
+ */
+
+const struct regmap_config bma220_i2c_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_shift = -1,
+ .max_register = BMA220_REG_SOFTRESET,
+ .cache_type = REGCACHE_NONE,
+ .writeable_reg = bma220_is_writable_reg,
+};
+EXPORT_SYMBOL_NS_GPL(bma220_i2c_regmap_config, "IIO_BOSCH_BMA220");
+
+static int bma220_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ return regmap_update_bits(data->regmap, BMA220_REG_IE0,
+ BMA220_INT_EN_DRDY_MSK,
+ FIELD_PREP(BMA220_INT_EN_DRDY_MSK, state));
+}
+
+static const struct iio_trigger_ops bma220_trigger_ops = {
+ .set_trigger_state = &bma220_data_rdy_trigger_set_state,
+ .validate_device = &iio_trigger_validate_own_device,
+};
+
+static irqreturn_t bma220_trigger_handler(int irq, void *p)
+{
+ int ret;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ ret = regmap_bulk_read(data->regmap, BMA220_REG_ACCEL_X,
+ &data->scan.chans,
+ sizeof(data->scan.chans));
+ if (ret < 0)
+ return IRQ_NONE;
+
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_get_time_ns(indio_dev));
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bma220_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ u8 index;
+ unsigned int reg;
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(data->regmap, chan->address, &reg);
+ if (ret < 0)
+ return -EINVAL;
+ *val = sign_extend32(reg >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ index = data->range_idx;
+ *val = bma220_scale_table[index][0];
+ *val2 = bma220_scale_table[index][1];
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ index = data->lpf_3dB_freq_idx;
+ *val = bma220_lpf_3dB_freq_Hz_table[index];
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int bma220_find_match_2dt(const int (*tbl)[2], const int n,
+ const int val, const int val2)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (tbl[i][0] == val && tbl[i][1] == val2)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int bma220_find_match(const int *arr, const int n, const int val)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (arr[i] == val)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int bma220_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+ int index = -1;
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ index = bma220_find_match_2dt(bma220_scale_table,
+ ARRAY_SIZE(bma220_scale_table),
+ val, val2);
+ if (index < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, BMA220_REG_RANGE,
+ BMA220_RANGE_MASK,
+ FIELD_PREP(BMA220_RANGE_MASK, index));
+ if (ret < 0)
+ return ret;
+ data->range_idx = index;
+
+ return 0;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ index = bma220_find_match(bma220_lpf_3dB_freq_Hz_table,
+ ARRAY_SIZE(bma220_lpf_3dB_freq_Hz_table),
+ val);
+ if (index < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(data->regmap, BMA220_REG_FILTER,
+ BMA220_FILTER_MASK,
+ FIELD_PREP(BMA220_FILTER_MASK, index));
+ if (ret < 0)
+ return ret;
+ data->lpf_3dB_freq_idx = index;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bma220_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)bma220_scale_table;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = ARRAY_SIZE(bma220_scale_table) * 2;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = (const int *)bma220_lpf_3dB_freq_Hz_table;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(bma220_lpf_3dB_freq_Hz_table);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma220_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(data->regmap, reg, readval);
+ return regmap_write(data->regmap, reg, writeval);
+}
+
+static const struct iio_info bma220_info = {
+ .read_raw = bma220_read_raw,
+ .write_raw = bma220_write_raw,
+ .read_avail = bma220_read_avail,
+ .debugfs_reg_access = &bma220_reg_access,
+};
+
+static int bma220_reset(struct bma220_data *data, bool up)
+{
+ int ret;
+ unsigned int i, val;
+
+ /*
+ * The chip can be reset by a simple register read.
+ * We need up to 2 register reads of the softreset register
+ * to make sure that the device is in the desired state.
+ */
+ for (i = 0; i < 2; i++) {
+ ret = regmap_read(data->regmap, BMA220_REG_SOFTRESET, &val);
+ if (ret < 0)
+ return ret;
+
+ if (up && val == BMA220_RESET_MODE)
+ return 0;
+
+ if (!up && val == BMA220_NONRESET_MODE)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int bma220_power(struct bma220_data *data, bool up)
+{
+ int ret;
+ unsigned int i, val;
+
+ /*
+ * The chip can be suspended/woken up by a simple register read.
+ * So, we need up to 2 register reads of the suspend register
+ * to make sure that the device is in the desired state.
+ */
+ for (i = 0; i < 2; i++) {
+ ret = regmap_read(data->regmap, BMA220_REG_SUSPEND, &val);
+ if (ret < 0)
+ return ret;
+
+ if (up && val == BMA220_SUSPEND_SLEEP)
+ return 0;
+
+ if (!up && val == BMA220_SUSPEND_WAKE)
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int bma220_init(struct device *dev, struct bma220_data *data)
+{
+ int ret;
+ unsigned int val;
+ static const char * const regulator_names[] = { "vddd", "vddio", "vdda" };
+
+ ret = devm_regulator_bulk_get_enable(dev,
+ ARRAY_SIZE(regulator_names),
+ regulator_names);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ret = regmap_read(data->regmap, BMA220_REG_ID, &val);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to read chip id register\n");
+
+ if (val != BMA220_CHIP_ID)
+ dev_info(dev, "Unknown chip found: 0x%02x\n", val);
+
+ ret = bma220_power(data, true);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to power-on chip\n");
+
+ ret = bma220_reset(data, true);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to soft reset chip\n");
+
+ return 0;
+}
+
+static void bma220_deinit(void *data_ptr)
+{
+ struct bma220_data *data = data_ptr;
+ int ret;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = bma220_power(data, false);
+ if (ret)
+ dev_warn(dev,
+ "Failed to put device into suspend mode (%pe)\n",
+ ERR_PTR(ret));
+}
+
+static irqreturn_t bma220_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bma220_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int bma220_reg_if1;
+
+ ret = regmap_read(data->regmap, BMA220_REG_IF1, &bma220_reg_if1);
+ if (ret)
+ return IRQ_NONE;
+
+ if (FIELD_GET(BMA220_IF_DRDY, bma220_reg_if1))
+ iio_trigger_poll_nested(data->trig);
+
+ return IRQ_HANDLED;
+}
+
+int bma220_common_probe(struct device *dev, struct regmap *regmap, int irq)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct bma220_data *data;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->regmap = regmap;
+
+ ret = bma220_init(dev, data);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ indio_dev->info = &bma220_info;
+ indio_dev->name = BMA220_DEVICE_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = bma220_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bma220_channels);
+ indio_dev->available_scan_masks = bma220_accel_scan_masks;
+
+ if (irq > 0) {
+ data->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->ops = &bma220_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = devm_iio_trigger_register(dev, data->trig);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "iio trigger register fail\n");
+ indio_dev->trig = iio_trigger_get(data->trig);
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ &bma220_irq_handler, IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "request irq %d failed\n", irq);
+ }
+
+ ret = devm_add_action_or_reset(dev, bma220_deinit, data);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ bma220_trigger_handler, NULL);
+ if (ret < 0)
+ dev_err_probe(dev, ret, "iio triggered buffer setup failed\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+EXPORT_SYMBOL_NS_GPL(bma220_common_probe, "IIO_BOSCH_BMA220");
+
+static int bma220_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ return bma220_power(data, false);
+}
+
+static int bma220_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bma220_data *data = iio_priv(indio_dev);
+
+ return bma220_power(data, true);
+}
+EXPORT_NS_SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume,
+ IIO_BOSCH_BMA220);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("BMA220 acceleration sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/bma220_i2c.c b/drivers/iio/accel/bma220_i2c.c
new file mode 100644
index 000000000000..8b6f8e305c8c
--- /dev/null
+++ b/drivers/iio/accel/bma220_i2c.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Bosch triaxial acceleration sensor
+ *
+ * Copyright (c) 2025 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Datasheet: https://media.digikey.com/pdf/Data%20Sheets/Bosch/BMA220.pdf
+ * I2C address is either 0x0b or 0x0a depending on CSB (pin 10)
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include "bma220.h"
+
+static int bma220_set_wdt(struct regmap *regmap, const u8 val)
+{
+ return regmap_update_bits(regmap, BMA220_REG_WDT, BMA220_WDT_MASK,
+ FIELD_PREP(BMA220_WDT_MASK, val));
+}
+
+static int bma220_i2c_probe(struct i2c_client *client)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(client, &bma220_i2c_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(regmap),
+ "failed to create regmap\n");
+
+ ret = bma220_common_probe(&client->dev, regmap, client->irq);
+ if (ret)
+ return ret;
+
+ return bma220_set_wdt(regmap, BMA220_WDT_1MS);
+}
+
+static const struct of_device_id bma220_i2c_match[] = {
+ { .compatible = "bosch,bma220" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bma220_i2c_match);
+
+static const struct i2c_device_id bma220_i2c_id[] = {
+ { "bma220" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, bma220_i2c_id);
+
+static struct i2c_driver bma220_i2c_driver = {
+ .driver = {
+ .name = "bma220_i2c",
+ .pm = pm_sleep_ptr(&bma220_pm_ops),
+ .of_match_table = bma220_i2c_match,
+ },
+ .probe = bma220_i2c_probe,
+ .id_table = bma220_i2c_id,
+};
+module_i2c_driver(bma220_i2c_driver);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Bosch triaxial acceleration sensor i2c driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_BOSCH_BMA220");
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index 38f7498431ee..383ee8a135ee 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -5,328 +5,56 @@
* Copyright (c) 2016,2020 Intel Corporation.
*/
-#include <linux/bits.h>
-#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#include <linux/spi/spi.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/triggered_buffer.h>
+#include "bma220.h"
-#define BMA220_REG_ID 0x00
-#define BMA220_REG_ACCEL_X 0x02
-#define BMA220_REG_ACCEL_Y 0x03
-#define BMA220_REG_ACCEL_Z 0x04
-#define BMA220_REG_RANGE 0x11
-#define BMA220_REG_SUSPEND 0x18
-
-#define BMA220_CHIP_ID 0xDD
-#define BMA220_READ_MASK BIT(7)
-#define BMA220_RANGE_MASK GENMASK(1, 0)
-#define BMA220_SUSPEND_SLEEP 0xFF
-#define BMA220_SUSPEND_WAKE 0x00
-
-#define BMA220_DEVICE_NAME "bma220"
-
-#define BMA220_ACCEL_CHANNEL(index, reg, axis) { \
- .type = IIO_ACCEL, \
- .address = reg, \
- .modified = 1, \
- .channel2 = IIO_MOD_##axis, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_index = index, \
- .scan_type = { \
- .sign = 's', \
- .realbits = 6, \
- .storagebits = 8, \
- .shift = 2, \
- .endianness = IIO_CPU, \
- }, \
-}
-
-enum bma220_axis {
- AXIS_X,
- AXIS_Y,
- AXIS_Z,
-};
-
-static const int bma220_scale_table[][2] = {
- {0, 623000}, {1, 248000}, {2, 491000}, {4, 983000},
-};
-
-struct bma220_data {
- struct spi_device *spi_device;
- struct mutex lock;
- struct {
- s8 chans[3];
- /* Ensure timestamp is naturally aligned. */
- aligned_s64 timestamp;
- } scan;
- u8 tx_buf[2] __aligned(IIO_DMA_MINALIGN);
-};
-
-static const struct iio_chan_spec bma220_channels[] = {
- BMA220_ACCEL_CHANNEL(0, BMA220_REG_ACCEL_X, X),
- BMA220_ACCEL_CHANNEL(1, BMA220_REG_ACCEL_Y, Y),
- BMA220_ACCEL_CHANNEL(2, BMA220_REG_ACCEL_Z, Z),
- IIO_CHAN_SOFT_TIMESTAMP(3),
-};
-
-static inline int bma220_read_reg(struct spi_device *spi, u8 reg)
+static int bma220_spi_probe(struct spi_device *spi)
{
- return spi_w8r8(spi, reg | BMA220_READ_MASK);
-}
-
-static const unsigned long bma220_accel_scan_masks[] = {
- BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
- 0
-};
-
-static irqreturn_t bma220_trigger_handler(int irq, void *p)
-{
- int ret;
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct bma220_data *data = iio_priv(indio_dev);
- struct spi_device *spi = data->spi_device;
-
- mutex_lock(&data->lock);
- data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK;
- ret = spi_write_then_read(spi, data->tx_buf, 1, &data->scan.chans,
- ARRAY_SIZE(bma220_channels) - 1);
- if (ret < 0)
- goto err;
+ struct regmap *regmap;
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
- pf->timestamp);
-err:
- mutex_unlock(&data->lock);
- iio_trigger_notify_done(indio_dev->trig);
+ regmap = devm_regmap_init_spi(spi, &bma220_spi_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&spi->dev, PTR_ERR(regmap),
+ "failed to create regmap\n");
- return IRQ_HANDLED;
+ return bma220_common_probe(&spi->dev, regmap, spi->irq);
}
-static int bma220_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- int ret;
- u8 range_idx;
- struct bma220_data *data = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- ret = bma220_read_reg(data->spi_device, chan->address);
- if (ret < 0)
- return -EINVAL;
- *val = sign_extend32(ret >> chan->scan_type.shift,
- chan->scan_type.realbits - 1);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SCALE:
- ret = bma220_read_reg(data->spi_device, BMA220_REG_RANGE);
- if (ret < 0)
- return ret;
- range_idx = ret & BMA220_RANGE_MASK;
- *val = bma220_scale_table[range_idx][0];
- *val2 = bma220_scale_table[range_idx][1];
- return IIO_VAL_INT_PLUS_MICRO;
- }
-
- return -EINVAL;
-}
-
-static int bma220_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- int i;
- int ret;
- int index = -1;
- struct bma220_data *data = iio_priv(indio_dev);
-
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- for (i = 0; i < ARRAY_SIZE(bma220_scale_table); i++)
- if (val == bma220_scale_table[i][0] &&
- val2 == bma220_scale_table[i][1]) {
- index = i;
- break;
- }
- if (index < 0)
- return -EINVAL;
-
- mutex_lock(&data->lock);
- data->tx_buf[0] = BMA220_REG_RANGE;
- data->tx_buf[1] = index;
- ret = spi_write(data->spi_device, data->tx_buf,
- sizeof(data->tx_buf));
- if (ret < 0)
- dev_err(&data->spi_device->dev,
- "failed to set measurement range\n");
- mutex_unlock(&data->lock);
-
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int bma220_read_avail(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- const int **vals, int *type, int *length,
- long mask)
-{
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- *vals = (int *)bma220_scale_table;
- *type = IIO_VAL_INT_PLUS_MICRO;
- *length = ARRAY_SIZE(bma220_scale_table) * 2;
- return IIO_AVAIL_LIST;
- default:
- return -EINVAL;
- }
-}
-
-static const struct iio_info bma220_info = {
- .read_raw = bma220_read_raw,
- .write_raw = bma220_write_raw,
- .read_avail = bma220_read_avail,
-};
-
-static int bma220_init(struct spi_device *spi)
-{
- int ret;
-
- ret = bma220_read_reg(spi, BMA220_REG_ID);
- if (ret != BMA220_CHIP_ID)
- return -ENODEV;
-
- /* Make sure the chip is powered on */
- ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
- if (ret == BMA220_SUSPEND_WAKE)
- ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
- if (ret < 0)
- return ret;
- if (ret == BMA220_SUSPEND_WAKE)
- return -EBUSY;
-
- return 0;
-}
-
-static int bma220_power(struct spi_device *spi, bool up)
-{
- int i, ret;
-
- /**
- * The chip can be suspended/woken up by a simple register read.
- * So, we need up to 2 register reads of the suspend register
- * to make sure that the device is in the desired state.
- */
- for (i = 0; i < 2; i++) {
- ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
- if (ret < 0)
- return ret;
-
- if (up && ret == BMA220_SUSPEND_SLEEP)
- return 0;
-
- if (!up && ret == BMA220_SUSPEND_WAKE)
- return 0;
- }
-
- return -EBUSY;
-}
-
-static void bma220_deinit(void *spi)
-{
- bma220_power(spi, false);
-}
-
-static int bma220_probe(struct spi_device *spi)
-{
- int ret;
- struct iio_dev *indio_dev;
- struct bma220_data *data;
-
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&spi->dev, "iio allocation failed!\n");
- return -ENOMEM;
- }
-
- data = iio_priv(indio_dev);
- data->spi_device = spi;
- mutex_init(&data->lock);
-
- indio_dev->info = &bma220_info;
- indio_dev->name = BMA220_DEVICE_NAME;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = bma220_channels;
- indio_dev->num_channels = ARRAY_SIZE(bma220_channels);
- indio_dev->available_scan_masks = bma220_accel_scan_masks;
-
- ret = bma220_init(data->spi_device);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, bma220_deinit, spi);
- if (ret)
- return ret;
-
- ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev,
- iio_pollfunc_store_time,
- bma220_trigger_handler, NULL);
- if (ret < 0) {
- dev_err(&spi->dev, "iio triggered buffer setup failed\n");
- return ret;
- }
-
- return devm_iio_device_register(&spi->dev, indio_dev);
-}
-
-static int bma220_suspend(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- return bma220_power(spi, false);
-}
-
-static int bma220_resume(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- return bma220_power(spi, true);
-}
-static DEFINE_SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume);
-
static const struct spi_device_id bma220_spi_id[] = {
- {"bma220", 0},
+ { "bma220", 0 },
{ }
};
static const struct acpi_device_id bma220_acpi_id[] = {
- {"BMA0220", 0},
+ { "BMA0220", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, bma220_spi_id);
-static struct spi_driver bma220_driver = {
+static const struct of_device_id bma220_of_spi_match[] = {
+ { .compatible = "bosch,bma220" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bma220_of_spi_match);
+
+static struct spi_driver bma220_spi_driver = {
.driver = {
.name = "bma220_spi",
.pm = pm_sleep_ptr(&bma220_pm_ops),
+ .of_match_table = bma220_of_spi_match,
.acpi_match_table = bma220_acpi_id,
},
- .probe = bma220_probe,
+ .probe = bma220_spi_probe,
.id_table = bma220_spi_id,
};
-module_spi_driver(bma220_driver);
+module_spi_driver(bma220_spi_driver);
MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
-MODULE_DESCRIPTION("BMA220 acceleration sensor driver");
-MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BMA220 triaxial acceleration sensor spi driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_BOSCH_BMA220");
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
index 932358b45f17..b5f3cac51610 100644
--- a/drivers/iio/accel/bma400.h
+++ b/drivers/iio/accel/bma400.h
@@ -16,31 +16,44 @@
* Read-Only Registers
*/
+/* Chip ID of BMA 400 devices found in the chip ID register. */
+#define BMA400_ID_REG_VAL 0x90
+
/* Status and ID registers */
#define BMA400_CHIP_ID_REG 0x00
#define BMA400_ERR_REG 0x02
#define BMA400_STATUS_REG 0x03
/* Acceleration registers */
-#define BMA400_X_AXIS_LSB_REG 0x04
-#define BMA400_X_AXIS_MSB_REG 0x05
-#define BMA400_Y_AXIS_LSB_REG 0x06
-#define BMA400_Y_AXIS_MSB_REG 0x07
-#define BMA400_Z_AXIS_LSB_REG 0x08
-#define BMA400_Z_AXIS_MSB_REG 0x09
+#define BMA400_ACC_X_LSB_REG 0x04
+#define BMA400_ACC_X_MSB_REG 0x05
+#define BMA400_ACC_Y_LSB_REG 0x06
+#define BMA400_ACC_Y_MSB_REG 0x07
+#define BMA400_ACC_Z_LSB_REG 0x08
+#define BMA400_ACC_Z_MSB_REG 0x09
/* Sensor time registers */
-#define BMA400_SENSOR_TIME0 0x0a
-#define BMA400_SENSOR_TIME1 0x0b
-#define BMA400_SENSOR_TIME2 0x0c
+#define BMA400_SENSOR_TIME0_REG 0x0a
+#define BMA400_SENSOR_TIME1_REG 0x0b
+#define BMA400_SENSOR_TIME2_REG 0x0c
/* Event and interrupt registers */
#define BMA400_EVENT_REG 0x0d
+
#define BMA400_INT_STAT0_REG 0x0e
+#define BMA400_INT_STAT0_GEN1_MASK BIT(2)
+#define BMA400_INT_STAT0_GEN2_MASK BIT(3)
+#define BMA400_INT_STAT0_DRDY_MASK BIT(7)
+
#define BMA400_INT_STAT1_REG 0x0f
+#define BMA400_INT_STAT1_STEP_INT_MASK GENMASK(9, 8)
+#define BMA400_INT_STAT1_S_TAP_MASK BIT(10)
+#define BMA400_INT_STAT1_D_TAP_MASK BIT(11)
+
#define BMA400_INT_STAT2_REG 0x10
-#define BMA400_INT12_MAP_REG 0x23
-#define BMA400_INT_ENG_OVRUN_MSK BIT(4)
+
+/* Bit present in all INT_STAT registers */
+#define BMA400_INT_STAT_ENG_OVRRUN_MASK BIT(4)
/* Temperature register */
#define BMA400_TEMP_DATA_REG 0x11
@@ -55,70 +68,100 @@
#define BMA400_STEP_CNT1_REG 0x16
#define BMA400_STEP_CNT3_REG 0x17
#define BMA400_STEP_STAT_REG 0x18
-#define BMA400_STEP_INT_MSK BIT(0)
#define BMA400_STEP_RAW_LEN 0x03
-#define BMA400_STEP_STAT_MASK GENMASK(9, 8)
/*
* Read-write configuration registers
*/
-#define BMA400_ACC_CONFIG0_REG 0x19
-#define BMA400_ACC_CONFIG1_REG 0x1a
+#define BMA400_ACC_CONFIG0_REG 0x19
+#define BMA400_ACC_CONFIG0_LP_OSR_MASK GENMASK(6, 5)
+
+#define BMA400_ACC_CONFIG1_REG 0x1a
+#define BMA400_ACC_CONFIG1_ODR_MASK GENMASK(3, 0)
+#define BMA400_ACC_CONFIG1_ODR_MIN_RAW 0x05
+#define BMA400_ACC_CONFIG1_ODR_LP_RAW 0x06
+#define BMA400_ACC_CONFIG1_ODR_MAX_RAW 0x0b
+#define BMA400_ACC_CONFIG1_ODR_MAX_HZ 800
+#define BMA400_ACC_CONFIG1_ODR_MIN_WHOLE_HZ 25
+#define BMA400_ACC_CONFIG1_ODR_MIN_HZ 12
+#define BMA400_ACC_CONFIG1_NP_OSR_MASK GENMASK(5, 4)
+#define BMA400_ACC_CONFIG1_ACC_RANGE_MASK GENMASK(7, 6)
+
#define BMA400_ACC_CONFIG2_REG 0x1b
-#define BMA400_CMD_REG 0x7e
/* Interrupt registers */
#define BMA400_INT_CONFIG0_REG 0x1f
+#define BMA400_INT_CONFIG0_GEN1_MASK BIT(2)
+#define BMA400_INT_CONFIG0_GEN2_MASK BIT(3)
+#define BMA400_INT_CONFIG0_DRDY_MASK BIT(7)
+
+enum bma400_generic_intr {
+ BMA400_GEN1_INTR = 0x1,
+ BMA400_GEN2_INTR = 0x2,
+};
+
#define BMA400_INT_CONFIG1_REG 0x20
+#define BMA400_INT_CONFIG1_STEP_INT_MASK BIT(0)
+#define BMA400_INT_CONFIG1_S_TAP_MASK BIT(2)
+#define BMA400_INT_CONFIG1_D_TAP_MASK BIT(3)
+
#define BMA400_INT1_MAP_REG 0x21
+#define BMA400_INT12_MAP_REG 0x23
#define BMA400_INT_IO_CTRL_REG 0x24
-#define BMA400_INT_DRDY_MSK BIT(7)
-
-/* Chip ID of BMA 400 devices found in the chip ID register. */
-#define BMA400_ID_REG_VAL 0x90
-
-#define BMA400_LP_OSR_SHIFT 5
-#define BMA400_NP_OSR_SHIFT 4
-#define BMA400_SCALE_SHIFT 6
#define BMA400_TWO_BITS_MASK GENMASK(1, 0)
-#define BMA400_LP_OSR_MASK GENMASK(6, 5)
-#define BMA400_NP_OSR_MASK GENMASK(5, 4)
-#define BMA400_ACC_ODR_MASK GENMASK(3, 0)
-#define BMA400_ACC_SCALE_MASK GENMASK(7, 6)
-
-#define BMA400_ACC_ODR_MIN_RAW 0x05
-#define BMA400_ACC_ODR_LP_RAW 0x06
-#define BMA400_ACC_ODR_MAX_RAW 0x0b
-
-#define BMA400_ACC_ODR_MAX_HZ 800
-#define BMA400_ACC_ODR_MIN_WHOLE_HZ 25
-#define BMA400_ACC_ODR_MIN_HZ 12
/* Generic interrupts register */
-#define BMA400_GEN1INT_CONFIG0 0x3f
-#define BMA400_GEN2INT_CONFIG0 0x4A
+#define BMA400_GENINT_CONFIG_REG_BASE 0x3f
+#define BMA400_NUM_GENINT_CONFIG_REGS 11
+#define BMA400_GENINT_CONFIG_REG(gen_intr, config_idx) \
+ (BMA400_GENINT_CONFIG_REG_BASE + \
+ (gen_intr - 1) * BMA400_NUM_GENINT_CONFIG_REGS + \
+ (config_idx))
+#define BMA400_GENINT_CONFIG0_HYST_MASK GENMASK(1, 0)
+#define BMA400_GENINT_CONFIG0_REF_UPD_MODE_MASK GENMASK(3, 2)
+#define BMA400_GENINT_CONFIG0_DATA_SRC_MASK BIT(4)
+#define BMA400_GENINT_CONFIG0_X_EN_MASK BIT(5)
+#define BMA400_GENINT_CONFIG0_Y_EN_MASK BIT(6)
+#define BMA400_GENINT_CONFIG0_Z_EN_MASK BIT(7)
+
+enum bma400_accel_data_src {
+ ACCEL_FILT1 = 0x0,
+ ACCEL_FILT2 = 0x1,
+};
+
+enum bma400_ref_updt_mode {
+ BMA400_REF_MANUAL_UPDT_MODE = 0x0,
+ BMA400_REF_ONETIME_UPDT_MODE = 0x1,
+ BMA400_REF_EVERYTIME_UPDT_MODE = 0x2,
+ BMA400_REF_EVERYTIME_LP_UPDT_MODE = 0x3,
+};
+
#define BMA400_GEN_CONFIG1_OFF 0x01
-#define BMA400_GEN_CONFIG2_OFF 0x02
-#define BMA400_GEN_CONFIG3_OFF 0x03
-#define BMA400_GEN_CONFIG31_OFF 0x04
-#define BMA400_INT_GEN1_MSK BIT(2)
-#define BMA400_INT_GEN2_MSK BIT(3)
-#define BMA400_GEN_HYST_MSK GENMASK(1, 0)
+#define BMA400_GENINT_CONFIG1_AXES_COMB_MASK BIT(0)
+#define BMA400_GENINT_CONFIG1_DETCT_CRIT_MASK BIT(1)
+
+enum bma400_genintr_acceleval_axescomb {
+ BMA400_EVAL_X_OR_Y_OR_Z = 0x0,
+ BMA400_EVAL_X_AND_Y_AND_Z = 0x1,
+};
+
+enum bma400_detect_criterion {
+ BMA400_DETECT_INACTIVITY = 0x0,
+ BMA400_DETECT_ACTIVITY = 0x1,
+};
/* TAP config registers */
-#define BMA400_TAP_CONFIG 0x57
-#define BMA400_TAP_CONFIG1 0x58
-#define BMA400_S_TAP_MSK BIT(2)
-#define BMA400_D_TAP_MSK BIT(3)
-#define BMA400_INT_S_TAP_MSK BIT(10)
-#define BMA400_INT_D_TAP_MSK BIT(11)
-#define BMA400_TAP_SEN_MSK GENMASK(2, 0)
-#define BMA400_TAP_TICSTH_MSK GENMASK(1, 0)
-#define BMA400_TAP_QUIET_MSK GENMASK(3, 2)
-#define BMA400_TAP_QUIETDT_MSK GENMASK(5, 4)
+#define BMA400_TAP_CONFIG_REG 0x57
+#define BMA400_TAP_CONFIG_SEN_MASK GENMASK(2, 0)
+
+#define BMA400_TAP_CONFIG1_REG 0x58
+#define BMA400_TAP_CONFIG1_TICSTH_MASK GENMASK(1, 0)
+#define BMA400_TAP_CONFIG1_QUIET_MASK GENMASK(3, 2)
+#define BMA400_TAP_CONFIG1_QUIETDT_MASK GENMASK(5, 4)
#define BMA400_TAP_TIM_LIST_LEN 4
+#define BMA400_CMD_REG 0x7e
/*
* BMA400_SCALE_MIN macro value represents m/s^2 for 1 LSB before
* converting to micro values for +-2g range.
@@ -138,8 +181,8 @@
* To select +-8g = 9577 << 2 = raw value to write is 2.
* To select +-16g = 9577 << 3 = raw value to write is 3.
*/
-#define BMA400_SCALE_MIN 9577
-#define BMA400_SCALE_MAX 76617
+#define BMA400_ACC_SCALE_MIN 9577
+#define BMA400_ACC_SCALE_MAX 76617
extern const struct regmap_config bma400_regmap_config;
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index 85e23badf733..05f72707f830 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -121,21 +121,56 @@ struct bma400_data {
__be16 duration;
};
+struct bma400_genintr_info {
+ enum bma400_generic_intr genintr;
+ unsigned int intrmask;
+ enum iio_event_direction dir;
+ enum bma400_detect_criterion detect_mode;
+};
+
+/* Lookup struct for determining GEN1/GEN2 based on dir */
+static const struct bma400_genintr_info bma400_genintrs[] = {
+ [IIO_EV_DIR_RISING] = {
+ .genintr = BMA400_GEN1_INTR,
+ .intrmask = BMA400_INT_CONFIG0_GEN1_MASK,
+ .dir = IIO_EV_DIR_RISING,
+ .detect_mode = BMA400_DETECT_ACTIVITY,
+ },
+ [IIO_EV_DIR_FALLING] = {
+ .genintr = BMA400_GEN2_INTR,
+ .intrmask = BMA400_INT_CONFIG0_GEN2_MASK,
+ .dir = IIO_EV_DIR_FALLING,
+ .detect_mode = BMA400_DETECT_INACTIVITY,
+ }
+};
+
+static inline const struct bma400_genintr_info *
+get_bma400_genintr_info(enum iio_event_direction dir)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ case IIO_EV_DIR_FALLING:
+ return &bma400_genintrs[dir];
+ default:
+ return NULL;
+ };
+}
+
static bool bma400_is_writable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case BMA400_CHIP_ID_REG:
case BMA400_ERR_REG:
case BMA400_STATUS_REG:
- case BMA400_X_AXIS_LSB_REG:
- case BMA400_X_AXIS_MSB_REG:
- case BMA400_Y_AXIS_LSB_REG:
- case BMA400_Y_AXIS_MSB_REG:
- case BMA400_Z_AXIS_LSB_REG:
- case BMA400_Z_AXIS_MSB_REG:
- case BMA400_SENSOR_TIME0:
- case BMA400_SENSOR_TIME1:
- case BMA400_SENSOR_TIME2:
+ case BMA400_ACC_X_LSB_REG:
+ case BMA400_ACC_X_MSB_REG:
+ case BMA400_ACC_Y_LSB_REG:
+ case BMA400_ACC_Y_MSB_REG:
+ case BMA400_ACC_Z_LSB_REG:
+ case BMA400_ACC_Z_MSB_REG:
+ case BMA400_SENSOR_TIME0_REG:
+ case BMA400_SENSOR_TIME1_REG:
+ case BMA400_SENSOR_TIME2_REG:
case BMA400_EVENT_REG:
case BMA400_INT_STAT0_REG:
case BMA400_INT_STAT1_REG:
@@ -159,15 +194,15 @@ static bool bma400_is_volatile_reg(struct device *dev, unsigned int reg)
switch (reg) {
case BMA400_ERR_REG:
case BMA400_STATUS_REG:
- case BMA400_X_AXIS_LSB_REG:
- case BMA400_X_AXIS_MSB_REG:
- case BMA400_Y_AXIS_LSB_REG:
- case BMA400_Y_AXIS_MSB_REG:
- case BMA400_Z_AXIS_LSB_REG:
- case BMA400_Z_AXIS_MSB_REG:
- case BMA400_SENSOR_TIME0:
- case BMA400_SENSOR_TIME1:
- case BMA400_SENSOR_TIME2:
+ case BMA400_ACC_X_LSB_REG:
+ case BMA400_ACC_X_MSB_REG:
+ case BMA400_ACC_Y_LSB_REG:
+ case BMA400_ACC_Y_MSB_REG:
+ case BMA400_ACC_Z_LSB_REG:
+ case BMA400_ACC_Z_MSB_REG:
+ case BMA400_SENSOR_TIME0_REG:
+ case BMA400_SENSOR_TIME1_REG:
+ case BMA400_SENSOR_TIME2_REG:
case BMA400_EVENT_REG:
case BMA400_INT_STAT0_REG:
case BMA400_INT_STAT1_REG:
@@ -275,11 +310,11 @@ static ssize_t in_accel_gesture_tap_maxtomin_time_show(struct device *dev,
struct bma400_data *data = iio_priv(indio_dev);
int ret, reg_val, raw, vals[2];
- ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1, &reg_val);
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1_REG, &reg_val);
if (ret)
return ret;
- raw = FIELD_GET(BMA400_TAP_TICSTH_MSK, reg_val);
+ raw = FIELD_GET(BMA400_TAP_CONFIG1_TICSTH_MASK, reg_val);
vals[0] = 0;
vals[1] = tap_max2min_time[raw];
@@ -302,9 +337,9 @@ static ssize_t in_accel_gesture_tap_maxtomin_time_store(struct device *dev,
if (raw < 0)
return -EINVAL;
- ret = regmap_update_bits(data->regmap, BMA400_TAP_CONFIG1,
- BMA400_TAP_TICSTH_MSK,
- FIELD_PREP(BMA400_TAP_TICSTH_MSK, raw));
+ ret = regmap_update_bits(data->regmap, BMA400_TAP_CONFIG1_REG,
+ BMA400_TAP_CONFIG1_TICSTH_MASK,
+ FIELD_PREP(BMA400_TAP_CONFIG1_TICSTH_MASK, raw));
if (ret)
return ret;
@@ -449,13 +484,13 @@ static int bma400_get_accel_reg(struct bma400_data *data,
switch (chan->channel2) {
case IIO_MOD_X:
- lsb_reg = BMA400_X_AXIS_LSB_REG;
+ lsb_reg = BMA400_ACC_X_LSB_REG;
break;
case IIO_MOD_Y:
- lsb_reg = BMA400_Y_AXIS_LSB_REG;
+ lsb_reg = BMA400_ACC_Y_LSB_REG;
break;
case IIO_MOD_Z:
- lsb_reg = BMA400_Z_AXIS_LSB_REG;
+ lsb_reg = BMA400_ACC_Z_LSB_REG;
break;
default:
dev_err(data->dev, "invalid axis channel modifier\n");
@@ -475,8 +510,8 @@ static int bma400_get_accel_reg(struct bma400_data *data,
static void bma400_output_data_rate_from_raw(int raw, unsigned int *val,
unsigned int *val2)
{
- *val = BMA400_ACC_ODR_MAX_HZ >> (BMA400_ACC_ODR_MAX_RAW - raw);
- if (raw > BMA400_ACC_ODR_MIN_RAW)
+ *val = BMA400_ACC_CONFIG1_ODR_MAX_HZ >> (BMA400_ACC_CONFIG1_ODR_MAX_RAW - raw);
+ if (raw > BMA400_ACC_CONFIG1_ODR_MIN_RAW)
*val2 = 0;
else
*val2 = 500000;
@@ -494,7 +529,7 @@ static int bma400_get_accel_output_data_rate(struct bma400_data *data)
* Runs at a fixed rate in low-power mode. See section 4.3
* in the datasheet.
*/
- bma400_output_data_rate_from_raw(BMA400_ACC_ODR_LP_RAW,
+ bma400_output_data_rate_from_raw(BMA400_ACC_CONFIG1_ODR_LP_RAW,
&data->sample_freq.hz,
&data->sample_freq.uhz);
return 0;
@@ -507,9 +542,9 @@ static int bma400_get_accel_output_data_rate(struct bma400_data *data)
if (ret)
goto error;
- odr = val & BMA400_ACC_ODR_MASK;
- if (odr < BMA400_ACC_ODR_MIN_RAW ||
- odr > BMA400_ACC_ODR_MAX_RAW) {
+ odr = val & BMA400_ACC_CONFIG1_ODR_MASK;
+ if (odr < BMA400_ACC_CONFIG1_ODR_MIN_RAW ||
+ odr > BMA400_ACC_CONFIG1_ODR_MAX_RAW) {
ret = -EINVAL;
goto error;
}
@@ -539,19 +574,19 @@ static int bma400_set_accel_output_data_rate(struct bma400_data *data,
unsigned int val;
int ret;
- if (hz >= BMA400_ACC_ODR_MIN_WHOLE_HZ) {
- if (uhz || hz > BMA400_ACC_ODR_MAX_HZ)
+ if (hz >= BMA400_ACC_CONFIG1_ODR_MIN_WHOLE_HZ) {
+ if (uhz || hz > BMA400_ACC_CONFIG1_ODR_MAX_HZ)
return -EINVAL;
/* Note this works because MIN_WHOLE_HZ is odd */
idx = __ffs(hz);
- if (hz >> idx != BMA400_ACC_ODR_MIN_WHOLE_HZ)
+ if (hz >> idx != BMA400_ACC_CONFIG1_ODR_MIN_WHOLE_HZ)
return -EINVAL;
- idx += BMA400_ACC_ODR_MIN_RAW + 1;
- } else if (hz == BMA400_ACC_ODR_MIN_HZ && uhz == 500000) {
- idx = BMA400_ACC_ODR_MIN_RAW;
+ idx += BMA400_ACC_CONFIG1_ODR_MIN_RAW + 1;
+ } else if (hz == BMA400_ACC_CONFIG1_ODR_MIN_HZ && uhz == 500000) {
+ idx = BMA400_ACC_CONFIG1_ODR_MIN_RAW;
} else {
return -EINVAL;
}
@@ -561,7 +596,7 @@ static int bma400_set_accel_output_data_rate(struct bma400_data *data,
return ret;
/* preserve the range and normal mode osr */
- odr = (~BMA400_ACC_ODR_MASK & val) | idx;
+ odr = (~BMA400_ACC_CONFIG1_ODR_MASK & val) | idx;
ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG, odr);
if (ret)
@@ -592,7 +627,7 @@ static int bma400_get_accel_oversampling_ratio(struct bma400_data *data)
return ret;
}
- osr = (val & BMA400_LP_OSR_MASK) >> BMA400_LP_OSR_SHIFT;
+ osr = FIELD_GET(BMA400_ACC_CONFIG0_LP_OSR_MASK, val);
data->oversampling_ratio = osr;
return 0;
@@ -603,7 +638,7 @@ static int bma400_get_accel_oversampling_ratio(struct bma400_data *data)
return ret;
}
- osr = (val & BMA400_NP_OSR_MASK) >> BMA400_NP_OSR_SHIFT;
+ osr = FIELD_GET(BMA400_ACC_CONFIG1_NP_OSR_MASK, val);
data->oversampling_ratio = osr;
return 0;
@@ -637,8 +672,8 @@ static int bma400_set_accel_oversampling_ratio(struct bma400_data *data,
return ret;
ret = regmap_write(data->regmap, BMA400_ACC_CONFIG0_REG,
- (acc_config & ~BMA400_LP_OSR_MASK) |
- (val << BMA400_LP_OSR_SHIFT));
+ (acc_config & ~BMA400_ACC_CONFIG0_LP_OSR_MASK) |
+ FIELD_PREP(BMA400_ACC_CONFIG0_LP_OSR_MASK, val));
if (ret) {
dev_err(data->dev, "Failed to write out OSR\n");
return ret;
@@ -653,8 +688,8 @@ static int bma400_set_accel_oversampling_ratio(struct bma400_data *data,
return ret;
ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG,
- (acc_config & ~BMA400_NP_OSR_MASK) |
- (val << BMA400_NP_OSR_SHIFT));
+ (acc_config & ~BMA400_ACC_CONFIG1_NP_OSR_MASK) |
+ FIELD_PREP(BMA400_ACC_CONFIG1_NP_OSR_MASK, val));
if (ret) {
dev_err(data->dev, "Failed to write out OSR\n");
return ret;
@@ -679,7 +714,7 @@ static int bma400_accel_scale_to_raw(struct bma400_data *data,
/* Note this works because BMA400_SCALE_MIN is odd */
raw = __ffs(val);
- if (val >> raw != BMA400_SCALE_MIN)
+ if (val >> raw != BMA400_ACC_SCALE_MIN)
return -EINVAL;
return raw;
@@ -695,11 +730,11 @@ static int bma400_get_accel_scale(struct bma400_data *data)
if (ret)
return ret;
- raw_scale = (val & BMA400_ACC_SCALE_MASK) >> BMA400_SCALE_SHIFT;
+ raw_scale = FIELD_GET(BMA400_ACC_CONFIG1_ACC_RANGE_MASK, val);
if (raw_scale > BMA400_TWO_BITS_MASK)
return -EINVAL;
- data->scale = BMA400_SCALE_MIN << raw_scale;
+ data->scale = BMA400_ACC_SCALE_MIN << raw_scale;
return 0;
}
@@ -719,8 +754,8 @@ static int bma400_set_accel_scale(struct bma400_data *data, unsigned int val)
return raw;
ret = regmap_write(data->regmap, BMA400_ACC_CONFIG1_REG,
- (acc_config & ~BMA400_ACC_SCALE_MASK) |
- (raw << BMA400_SCALE_SHIFT));
+ (acc_config & ~BMA400_ACC_CONFIG1_ACC_RANGE_MASK) |
+ FIELD_PREP(BMA400_ACC_CONFIG1_ACC_RANGE_MASK, raw));
if (ret)
return ret;
@@ -786,8 +821,8 @@ static int bma400_enable_steps(struct bma400_data *data, int val)
return 0;
ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG1_REG,
- BMA400_STEP_INT_MSK,
- FIELD_PREP(BMA400_STEP_INT_MSK, val ? 1 : 0));
+ BMA400_INT_CONFIG1_STEP_INT_MASK,
+ FIELD_PREP(BMA400_INT_CONFIG1_STEP_INT_MASK, val ? 1 : 0));
if (ret)
return ret;
data->steps_enabled = val;
@@ -826,7 +861,7 @@ static void bma400_init_tables(void)
for (i = 0; i + 1 < ARRAY_SIZE(bma400_scales); i += 2) {
raw = i / 2;
bma400_scales[i] = 0;
- bma400_scales[i + 1] = BMA400_SCALE_MIN << raw;
+ bma400_scales[i + 1] = BMA400_ACC_SCALE_MIN << raw;
}
}
@@ -1063,7 +1098,7 @@ static int bma400_write_raw(struct iio_dev *indio_dev,
return ret;
case IIO_CHAN_INFO_SCALE:
if (val != 0 ||
- val2 < BMA400_SCALE_MIN || val2 > BMA400_SCALE_MAX)
+ val2 < BMA400_ACC_SCALE_MIN || val2 > BMA400_ACC_SCALE_MAX)
return -EINVAL;
mutex_lock(&data->mutex);
@@ -1114,16 +1149,16 @@ static int bma400_read_event_config(struct iio_dev *indio_dev,
case IIO_ACCEL:
switch (dir) {
case IIO_EV_DIR_RISING:
- return FIELD_GET(BMA400_INT_GEN1_MSK,
+ return FIELD_GET(BMA400_INT_CONFIG0_GEN1_MASK,
data->generic_event_en);
case IIO_EV_DIR_FALLING:
- return FIELD_GET(BMA400_INT_GEN2_MSK,
+ return FIELD_GET(BMA400_INT_CONFIG0_GEN2_MASK,
data->generic_event_en);
case IIO_EV_DIR_SINGLETAP:
- return FIELD_GET(BMA400_S_TAP_MSK,
+ return FIELD_GET(BMA400_INT_CONFIG1_S_TAP_MASK,
data->tap_event_en_bitmask);
case IIO_EV_DIR_DOUBLETAP:
- return FIELD_GET(BMA400_D_TAP_MSK,
+ return FIELD_GET(BMA400_INT_CONFIG1_D_TAP_MASK,
data->tap_event_en_bitmask);
default:
return -EINVAL;
@@ -1146,8 +1181,8 @@ static int bma400_steps_event_enable(struct bma400_data *data, int state)
return ret;
ret = regmap_update_bits(data->regmap, BMA400_INT12_MAP_REG,
- BMA400_STEP_INT_MSK,
- FIELD_PREP(BMA400_STEP_INT_MSK,
+ BMA400_INT_CONFIG1_STEP_INT_MASK,
+ FIELD_PREP(BMA400_INT_CONFIG1_STEP_INT_MASK,
state));
if (ret)
return ret;
@@ -1155,63 +1190,68 @@ static int bma400_steps_event_enable(struct bma400_data *data, int state)
return 0;
}
-static int bma400_activity_event_en(struct bma400_data *data,
- enum iio_event_direction dir,
- int state)
+static int bma400_generic_event_en(struct bma400_data *data,
+ enum iio_event_direction dir,
+ int state)
{
- int ret, reg, msk, value;
- int field_value = 0;
+ int ret;
+ unsigned int intrmask, regval;
+ enum bma400_generic_intr genintr;
+ enum bma400_detect_criterion detect_criterion;
+ const struct bma400_genintr_info *bma400_genintr;
- switch (dir) {
- case IIO_EV_DIR_RISING:
- reg = BMA400_GEN1INT_CONFIG0;
- msk = BMA400_INT_GEN1_MSK;
- value = 2;
- set_mask_bits(&field_value, BMA400_INT_GEN1_MSK,
- FIELD_PREP(BMA400_INT_GEN1_MSK, state));
- break;
- case IIO_EV_DIR_FALLING:
- reg = BMA400_GEN2INT_CONFIG0;
- msk = BMA400_INT_GEN2_MSK;
- value = 0;
- set_mask_bits(&field_value, BMA400_INT_GEN2_MSK,
- FIELD_PREP(BMA400_INT_GEN2_MSK, state));
- break;
- default:
+ bma400_genintr = get_bma400_genintr_info(dir);
+ if (!bma400_genintr)
return -EINVAL;
- }
- /* Enabling all axis for interrupt evaluation */
- ret = regmap_write(data->regmap, reg, 0xF8);
+ genintr = bma400_genintr->genintr;
+ detect_criterion = bma400_genintr->detect_mode;
+ intrmask = bma400_genintr->intrmask;
+
+ /*
+ * Enabling all axis for interrupt evaluation
+ * Acc_filt2 is recommended as data source in datasheet (Section 4.7)
+ */
+ ret = regmap_write(data->regmap, BMA400_GENINT_CONFIG_REG(genintr, 0),
+ BMA400_GENINT_CONFIG0_X_EN_MASK |
+ BMA400_GENINT_CONFIG0_Y_EN_MASK |
+ BMA400_GENINT_CONFIG0_Z_EN_MASK|
+ FIELD_PREP(BMA400_GENINT_CONFIG0_DATA_SRC_MASK, ACCEL_FILT2)|
+ FIELD_PREP(BMA400_GENINT_CONFIG0_REF_UPD_MODE_MASK,
+ BMA400_REF_EVERYTIME_UPDT_MODE));
if (ret)
return ret;
/* OR combination of all axis for interrupt evaluation */
- ret = regmap_write(data->regmap, reg + BMA400_GEN_CONFIG1_OFF, value);
+ regval = FIELD_PREP(BMA400_GENINT_CONFIG1_AXES_COMB_MASK, BMA400_EVAL_X_OR_Y_OR_Z) |
+ FIELD_PREP(BMA400_GENINT_CONFIG1_DETCT_CRIT_MASK, detect_criterion);
+ ret = regmap_write(data->regmap, BMA400_GENINT_CONFIG_REG(genintr, 1), regval);
if (ret)
return ret;
- /* Initial value to avoid interrupts while enabling*/
- ret = regmap_write(data->regmap, reg + BMA400_GEN_CONFIG2_OFF, 0x0A);
+ /*
+ * Initial value to avoid interrupts while enabling
+ * Value is in units of 8mg/lsb, i.e. effective val is val * 8mg/lsb
+ */
+ ret = regmap_write(data->regmap, BMA400_GENINT_CONFIG_REG(genintr, 2), 0x0A);
if (ret)
return ret;
/* Initial duration value to avoid interrupts while enabling*/
- ret = regmap_write(data->regmap, reg + BMA400_GEN_CONFIG31_OFF, 0x0F);
+ ret = regmap_write(data->regmap, BMA400_GENINT_CONFIG_REG(genintr, 4), 0x0F);
if (ret)
return ret;
- ret = regmap_update_bits(data->regmap, BMA400_INT1_MAP_REG, msk,
- field_value);
+ regval = state ? intrmask : 0;
+ ret = regmap_update_bits(data->regmap, BMA400_INT1_MAP_REG, intrmask, regval);
if (ret)
return ret;
- ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG0_REG, msk,
- field_value);
+ ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG0_REG, intrmask, regval);
if (ret)
return ret;
- set_mask_bits(&data->generic_event_en, msk, field_value);
+ set_mask_bits(&data->generic_event_en, intrmask, regval);
return 0;
}
@@ -1240,21 +1280,21 @@ static int bma400_tap_event_en(struct bma400_data *data,
}
ret = regmap_update_bits(data->regmap, BMA400_INT12_MAP_REG,
- BMA400_S_TAP_MSK,
- FIELD_PREP(BMA400_S_TAP_MSK, state));
+ BMA400_INT_CONFIG1_S_TAP_MASK,
+ FIELD_PREP(BMA400_INT_CONFIG1_S_TAP_MASK, state));
if (ret)
return ret;
switch (dir) {
case IIO_EV_DIR_SINGLETAP:
- mask = BMA400_S_TAP_MSK;
- set_mask_bits(&field_value, BMA400_S_TAP_MSK,
- FIELD_PREP(BMA400_S_TAP_MSK, state));
+ mask = BMA400_INT_CONFIG1_S_TAP_MASK;
+ set_mask_bits(&field_value, BMA400_INT_CONFIG1_S_TAP_MASK,
+ FIELD_PREP(BMA400_INT_CONFIG1_S_TAP_MASK, state));
break;
case IIO_EV_DIR_DOUBLETAP:
- mask = BMA400_D_TAP_MSK;
- set_mask_bits(&field_value, BMA400_D_TAP_MSK,
- FIELD_PREP(BMA400_D_TAP_MSK, state));
+ mask = BMA400_INT_CONFIG1_D_TAP_MASK;
+ set_mask_bits(&field_value, BMA400_INT_CONFIG1_D_TAP_MASK,
+ FIELD_PREP(BMA400_INT_CONFIG1_D_TAP_MASK, state));
break;
default:
return -EINVAL;
@@ -1303,7 +1343,7 @@ static int bma400_write_event_config(struct iio_dev *indio_dev,
switch (type) {
case IIO_EV_TYPE_MAG:
mutex_lock(&data->mutex);
- ret = bma400_activity_event_en(data, dir, state);
+ ret = bma400_generic_event_en(data, dir, state);
mutex_unlock(&data->mutex);
return ret;
case IIO_EV_TYPE_GESTURE:
@@ -1336,18 +1376,6 @@ static int bma400_write_event_config(struct iio_dev *indio_dev,
}
}
-static int get_gen_config_reg(enum iio_event_direction dir)
-{
- switch (dir) {
- case IIO_EV_DIR_FALLING:
- return BMA400_GEN2INT_CONFIG0;
- case IIO_EV_DIR_RISING:
- return BMA400_GEN1INT_CONFIG0;
- default:
- return -EINVAL;
- }
-}
-
static int bma400_read_event_value(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -1356,22 +1384,25 @@ static int bma400_read_event_value(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct bma400_data *data = iio_priv(indio_dev);
- int ret, reg, reg_val, raw;
+ int ret, reg_val, raw;
+ enum bma400_generic_intr genintr;
+ const struct bma400_genintr_info *bma400_genintr;
if (chan->type != IIO_ACCEL)
return -EINVAL;
switch (type) {
case IIO_EV_TYPE_MAG:
- reg = get_gen_config_reg(dir);
- if (reg < 0)
+ bma400_genintr = get_bma400_genintr_info(dir);
+ if (!bma400_genintr)
return -EINVAL;
+ genintr = bma400_genintr->genintr;
*val2 = 0;
switch (info) {
case IIO_EV_INFO_VALUE:
ret = regmap_read(data->regmap,
- reg + BMA400_GEN_CONFIG2_OFF,
+ BMA400_GENINT_CONFIG_REG(genintr, 2),
val);
if (ret)
return ret;
@@ -1379,7 +1410,7 @@ static int bma400_read_event_value(struct iio_dev *indio_dev,
case IIO_EV_INFO_PERIOD:
mutex_lock(&data->mutex);
ret = regmap_bulk_read(data->regmap,
- reg + BMA400_GEN_CONFIG3_OFF,
+ BMA400_GENINT_CONFIG_REG(genintr, 3),
&data->duration,
sizeof(data->duration));
if (ret) {
@@ -1390,10 +1421,12 @@ static int bma400_read_event_value(struct iio_dev *indio_dev,
mutex_unlock(&data->mutex);
return IIO_VAL_INT;
case IIO_EV_INFO_HYSTERESIS:
- ret = regmap_read(data->regmap, reg, val);
+ ret = regmap_read(data->regmap,
+ BMA400_GENINT_CONFIG_REG(genintr, 0),
+ val);
if (ret)
return ret;
- *val = FIELD_GET(BMA400_GEN_HYST_MSK, *val);
+ *val = FIELD_GET(BMA400_GENINT_CONFIG0_HYST_MASK, *val);
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -1401,30 +1434,30 @@ static int bma400_read_event_value(struct iio_dev *indio_dev,
case IIO_EV_TYPE_GESTURE:
switch (info) {
case IIO_EV_INFO_VALUE:
- ret = regmap_read(data->regmap, BMA400_TAP_CONFIG,
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG_REG,
&reg_val);
if (ret)
return ret;
- *val = FIELD_GET(BMA400_TAP_SEN_MSK, reg_val);
+ *val = FIELD_GET(BMA400_TAP_CONFIG_SEN_MASK, reg_val);
return IIO_VAL_INT;
case IIO_EV_INFO_RESET_TIMEOUT:
- ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1,
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1_REG,
&reg_val);
if (ret)
return ret;
- raw = FIELD_GET(BMA400_TAP_QUIET_MSK, reg_val);
+ raw = FIELD_GET(BMA400_TAP_CONFIG1_QUIET_MASK, reg_val);
*val = 0;
*val2 = tap_reset_timeout[raw];
return IIO_VAL_INT_PLUS_MICRO;
case IIO_EV_INFO_TAP2_MIN_DELAY:
- ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1,
+ ret = regmap_read(data->regmap, BMA400_TAP_CONFIG1_REG,
&reg_val);
if (ret)
return ret;
- raw = FIELD_GET(BMA400_TAP_QUIETDT_MSK, reg_val);
+ raw = FIELD_GET(BMA400_TAP_CONFIG1_QUIETDT_MASK, reg_val);
*val = 0;
*val2 = double_tap2_min_delay[raw];
return IIO_VAL_INT_PLUS_MICRO;
@@ -1444,16 +1477,19 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
int val, int val2)
{
struct bma400_data *data = iio_priv(indio_dev);
- int reg, ret, raw;
+ int ret, raw;
+ enum bma400_generic_intr genintr;
+ const struct bma400_genintr_info *bma400_genintr;
if (chan->type != IIO_ACCEL)
return -EINVAL;
switch (type) {
case IIO_EV_TYPE_MAG:
- reg = get_gen_config_reg(dir);
- if (reg < 0)
+ bma400_genintr = get_bma400_genintr_info(dir);
+ if (!bma400_genintr)
return -EINVAL;
+ genintr = bma400_genintr->genintr;
switch (info) {
case IIO_EV_INFO_VALUE:
@@ -1461,7 +1497,7 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
return -EINVAL;
return regmap_write(data->regmap,
- reg + BMA400_GEN_CONFIG2_OFF,
+ BMA400_GENINT_CONFIG_REG(genintr, 2),
val);
case IIO_EV_INFO_PERIOD:
if (val < 1 || val > 65535)
@@ -1470,7 +1506,7 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
mutex_lock(&data->mutex);
put_unaligned_be16(val, &data->duration);
ret = regmap_bulk_write(data->regmap,
- reg + BMA400_GEN_CONFIG3_OFF,
+ BMA400_GENINT_CONFIG_REG(genintr, 3),
&data->duration,
sizeof(data->duration));
mutex_unlock(&data->mutex);
@@ -1479,9 +1515,10 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
if (val < 0 || val > 3)
return -EINVAL;
- return regmap_update_bits(data->regmap, reg,
- BMA400_GEN_HYST_MSK,
- FIELD_PREP(BMA400_GEN_HYST_MSK,
+ return regmap_update_bits(data->regmap,
+ BMA400_GENINT_CONFIG_REG(genintr, 0),
+ BMA400_GENINT_CONFIG0_HYST_MASK,
+ FIELD_PREP(BMA400_GENINT_CONFIG0_HYST_MASK,
val));
default:
return -EINVAL;
@@ -1493,9 +1530,9 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
return -EINVAL;
return regmap_update_bits(data->regmap,
- BMA400_TAP_CONFIG,
- BMA400_TAP_SEN_MSK,
- FIELD_PREP(BMA400_TAP_SEN_MSK,
+ BMA400_TAP_CONFIG_REG,
+ BMA400_TAP_CONFIG_SEN_MASK,
+ FIELD_PREP(BMA400_TAP_CONFIG_SEN_MASK,
val));
case IIO_EV_INFO_RESET_TIMEOUT:
raw = usec_to_tapreg_raw(val2, tap_reset_timeout);
@@ -1503,9 +1540,9 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
return -EINVAL;
return regmap_update_bits(data->regmap,
- BMA400_TAP_CONFIG1,
- BMA400_TAP_QUIET_MSK,
- FIELD_PREP(BMA400_TAP_QUIET_MSK,
+ BMA400_TAP_CONFIG1_REG,
+ BMA400_TAP_CONFIG1_QUIET_MASK,
+ FIELD_PREP(BMA400_TAP_CONFIG1_QUIET_MASK,
raw));
case IIO_EV_INFO_TAP2_MIN_DELAY:
raw = usec_to_tapreg_raw(val2, double_tap2_min_delay);
@@ -1513,9 +1550,9 @@ static int bma400_write_event_value(struct iio_dev *indio_dev,
return -EINVAL;
return regmap_update_bits(data->regmap,
- BMA400_TAP_CONFIG1,
- BMA400_TAP_QUIETDT_MSK,
- FIELD_PREP(BMA400_TAP_QUIETDT_MSK,
+ BMA400_TAP_CONFIG1_REG,
+ BMA400_TAP_CONFIG1_QUIETDT_MASK,
+ FIELD_PREP(BMA400_TAP_CONFIG1_QUIETDT_MASK,
raw));
default:
return -EINVAL;
@@ -1533,14 +1570,14 @@ static int bma400_data_rdy_trigger_set_state(struct iio_trigger *trig,
int ret;
ret = regmap_update_bits(data->regmap, BMA400_INT_CONFIG0_REG,
- BMA400_INT_DRDY_MSK,
- FIELD_PREP(BMA400_INT_DRDY_MSK, state));
+ BMA400_INT_CONFIG0_DRDY_MASK,
+ FIELD_PREP(BMA400_INT_CONFIG0_DRDY_MASK, state));
if (ret)
return ret;
return regmap_update_bits(data->regmap, BMA400_INT1_MAP_REG,
- BMA400_INT_DRDY_MSK,
- FIELD_PREP(BMA400_INT_DRDY_MSK, state));
+ BMA400_INT_CONFIG0_DRDY_MASK,
+ FIELD_PREP(BMA400_INT_CONFIG0_DRDY_MASK, state));
}
static const unsigned long bma400_avail_scan_masks[] = {
@@ -1578,7 +1615,7 @@ static irqreturn_t bma400_trigger_handler(int irq, void *p)
mutex_lock(&data->mutex);
/* bulk read six registers, with the base being the LSB register */
- ret = regmap_bulk_read(data->regmap, BMA400_X_AXIS_LSB_REG,
+ ret = regmap_bulk_read(data->regmap, BMA400_ACC_X_LSB_REG,
&data->buffer.buff, sizeof(data->buffer.buff));
if (ret)
goto unlock_err;
@@ -1628,13 +1665,13 @@ static irqreturn_t bma400_interrupt(int irq, void *private)
* Disable all advance interrupts if interrupt engine overrun occurs.
* See section 4.7 "Interrupt engine overrun" in datasheet v1.2.
*/
- if (FIELD_GET(BMA400_INT_ENG_OVRUN_MSK, le16_to_cpu(data->status))) {
+ if (FIELD_GET(BMA400_INT_STAT_ENG_OVRRUN_MASK, le16_to_cpu(data->status))) {
bma400_disable_adv_interrupt(data);
dev_err(data->dev, "Interrupt engine overrun\n");
goto unlock_err;
}
- if (FIELD_GET(BMA400_INT_S_TAP_MSK, le16_to_cpu(data->status)))
+ if (FIELD_GET(BMA400_INT_STAT1_S_TAP_MASK, le16_to_cpu(data->status)))
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
IIO_MOD_X_OR_Y_OR_Z,
@@ -1642,7 +1679,7 @@ static irqreturn_t bma400_interrupt(int irq, void *private)
IIO_EV_DIR_SINGLETAP),
timestamp);
- if (FIELD_GET(BMA400_INT_D_TAP_MSK, le16_to_cpu(data->status)))
+ if (FIELD_GET(BMA400_INT_STAT1_D_TAP_MASK, le16_to_cpu(data->status)))
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
IIO_MOD_X_OR_Y_OR_Z,
@@ -1650,10 +1687,10 @@ static irqreturn_t bma400_interrupt(int irq, void *private)
IIO_EV_DIR_DOUBLETAP),
timestamp);
- if (FIELD_GET(BMA400_INT_GEN1_MSK, le16_to_cpu(data->status)))
+ if (FIELD_GET(BMA400_INT_STAT0_GEN1_MASK, le16_to_cpu(data->status)))
ev_dir = IIO_EV_DIR_RISING;
- if (FIELD_GET(BMA400_INT_GEN2_MSK, le16_to_cpu(data->status)))
+ if (FIELD_GET(BMA400_INT_STAT0_GEN2_MASK, le16_to_cpu(data->status)))
ev_dir = IIO_EV_DIR_FALLING;
if (ev_dir != IIO_EV_DIR_NONE) {
@@ -1664,7 +1701,7 @@ static irqreturn_t bma400_interrupt(int irq, void *private)
timestamp);
}
- if (FIELD_GET(BMA400_STEP_STAT_MASK, le16_to_cpu(data->status))) {
+ if (FIELD_GET(BMA400_INT_STAT1_STEP_INT_MASK, le16_to_cpu(data->status))) {
iio_push_event(indio_dev,
IIO_MOD_EVENT_CODE(IIO_STEPS, 0, IIO_NO_MOD,
IIO_EV_TYPE_CHANGE,
@@ -1686,7 +1723,7 @@ static irqreturn_t bma400_interrupt(int irq, void *private)
}
}
- if (FIELD_GET(BMA400_INT_DRDY_MSK, le16_to_cpu(data->status))) {
+ if (FIELD_GET(BMA400_INT_STAT0_DRDY_MASK, le16_to_cpu(data->status))) {
mutex_unlock(&data->mutex);
iio_trigger_poll_nested(data->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index be5fbb0c5d29..42ccf0316ce5 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -332,13 +332,10 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
struct device *dev = regmap_get_device(data->regmap);
int ret;
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
+ else
ret = pm_runtime_put_autosuspend(dev);
- }
-
if (ret < 0) {
dev_err(dev,
"Failed: %s for %d\n", __func__, on);
@@ -526,6 +523,10 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
const struct bmc150_accel_interrupt_info *info = intr->info;
int ret;
+ /* We do not always have an IRQ */
+ if (data->irq <= 0)
+ return 0;
+
if (state) {
if (atomic_inc_return(&intr->users) > 1)
return 0;
@@ -1699,6 +1700,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
}
if (irq > 0) {
+ data->irq = irq;
ret = devm_request_threaded_irq(dev, irq,
bmc150_accel_irq_handler,
bmc150_accel_irq_thread_handler,
diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
index 7a7baf52e595..e8f26198359f 100644
--- a/drivers/iio/accel/bmc150-accel.h
+++ b/drivers/iio/accel/bmc150-accel.h
@@ -58,6 +58,7 @@ enum bmc150_accel_trigger_id {
struct bmc150_accel_data {
struct regmap *regmap;
+ int irq;
struct regulator_bulk_data regulators[2];
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index dea126f993c1..c7da90af0d2d 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -375,7 +375,6 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
out_read_raw_pm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -419,7 +418,6 @@ static int bmi088_accel_write_raw(struct iio_dev *indio_dev,
return ret;
ret = bmi088_accel_set_scale(data, val, val2);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
@@ -428,7 +426,6 @@ static int bmi088_accel_write_raw(struct iio_dev *indio_dev,
return ret;
ret = bmi088_accel_set_sample_freq(data, val);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
default:
diff --git a/drivers/iio/accel/dmard06.c b/drivers/iio/accel/dmard06.c
index fb14894c66f9..33f225d73e7b 100644
--- a/drivers/iio/accel/dmard06.c
+++ b/drivers/iio/accel/dmard06.c
@@ -137,10 +137,8 @@ static int dmard06_probe(struct i2c_client *client)
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*dmard06));
- if (!indio_dev) {
- dev_err(&client->dev, "Failed to allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
dmard06 = iio_priv(indio_dev);
dmard06->client = client;
diff --git a/drivers/iio/accel/dmard09.c b/drivers/iio/accel/dmard09.c
index 4ec70ca6910d..d9290e3b9c46 100644
--- a/drivers/iio/accel/dmard09.c
+++ b/drivers/iio/accel/dmard09.c
@@ -95,10 +95,8 @@ static int dmard09_probe(struct i2c_client *client)
struct dmard09_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/accel/dmard10.c b/drivers/iio/accel/dmard10.c
index 71cd1928baa6..575e8510e1bd 100644
--- a/drivers/iio/accel/dmard10.c
+++ b/drivers/iio/accel/dmard10.c
@@ -191,10 +191,8 @@ static int dmard10_probe(struct i2c_client *client)
return (ret < 0) ? ret : -ENODEV;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index b10a30960e1e..8763e91c63d2 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -222,7 +222,6 @@ static int fxls8962af_power_off(struct fxls8962af_data *data)
struct device *dev = regmap_get_device(data->regmap);
int ret;
- pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
if (ret)
dev_err(dev, "failed to power off\n");
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 6aefe8221296..2823ddde4bf2 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -636,10 +636,8 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
if (on)
ret = pm_runtime_resume_and_get(&data->client->dev);
- else {
- pm_runtime_mark_last_busy(&data->client->dev);
+ else
ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: %s for %d\n", __func__, on);
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index cfc31265cdd0..4717d80fc24a 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -151,7 +151,6 @@ static int kxsd9_write_raw(struct iio_dev *indio_dev,
ret = kxsd9_write_scale(indio_dev, val2);
}
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return ret;
@@ -199,7 +198,6 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
}
error_ret:
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return ret;
@@ -250,7 +248,6 @@ static int kxsd9_buffer_postdisable(struct iio_dev *indio_dev)
{
struct kxsd9_state *st = iio_priv(indio_dev);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index e2853090fa6e..3e494f9ddc56 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -169,10 +169,8 @@ static int mc3230_probe(struct i2c_client *client)
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->chip_info = chip_info;
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index d0a16f227903..be3213600cf4 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -192,10 +192,8 @@ static int mma7660_probe(struct i2c_client *client)
struct mma7660_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index aba444a980d9..15172ba2972c 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -224,13 +224,10 @@ static int mma8452_set_runtime_pm_state(struct i2c_client *client, bool on)
#ifdef CONFIG_PM
int ret;
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(&client->dev);
- } else {
- pm_runtime_mark_last_busy(&client->dev);
+ else
ret = pm_runtime_put_autosuspend(&client->dev);
- }
-
if (ret < 0) {
dev_err(&client->dev,
"failed to change power state to %d\n", on);
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
index 3e7d9b79ed0e..2ccb1fb19b96 100644
--- a/drivers/iio/accel/mma9551_core.c
+++ b/drivers/iio/accel/mma9551_core.c
@@ -671,11 +671,8 @@ int mma9551_set_power_state(struct i2c_client *client, bool on)
if (on)
ret = pm_runtime_resume_and_get(&client->dev);
- else {
- pm_runtime_mark_last_busy(&client->dev);
+ else
ret = pm_runtime_put_autosuspend(&client->dev);
- }
-
if (ret < 0) {
dev_err(&client->dev,
"failed to change power state to %d\n", on);
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index 3e10225410e8..5eace0de3750 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -607,7 +607,6 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev,
err = msa311_get_axis(msa311, chan, &axis);
mutex_unlock(&msa311->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
iio_device_release_direct(indio_dev);
@@ -741,7 +740,6 @@ static int msa311_write_scale(struct iio_dev *indio_dev, int val, int val2)
break;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (err)
@@ -781,7 +779,6 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
break;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
iio_device_release_direct(indio_dev);
@@ -832,7 +829,6 @@ static int msa311_debugfs_reg_access(struct iio_dev *indio_dev,
mutex_unlock(&msa311->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (err)
@@ -855,7 +851,6 @@ static int msa311_buffer_postdisable(struct iio_dev *indio_dev)
struct msa311_priv *msa311 = iio_priv(indio_dev);
struct device *dev = msa311->dev;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -990,7 +985,7 @@ static int msa311_check_partid(struct msa311_priv *msa311)
msa311->chip_name = devm_kasprintf(dev, GFP_KERNEL,
"msa311-%02x", partid);
if (!msa311->chip_name)
- return dev_err_probe(dev, -ENOMEM, "can't alloc chip name\n");
+ return -ENOMEM;
return 0;
}
@@ -1069,8 +1064,7 @@ static int msa311_setup_interrupts(struct msa311_priv *msa311)
trig = devm_iio_trigger_alloc(dev, "%s-new-data", msa311->chip_name);
if (!trig)
- return dev_err_probe(dev, -ENOMEM,
- "can't allocate newdata trigger\n");
+ return -ENOMEM;
msa311->new_data_trig = trig;
msa311->new_data_trig->ops = &msa311_new_data_trig_ops;
@@ -1153,8 +1147,7 @@ static int msa311_probe(struct i2c_client *i2c)
indio_dev = devm_iio_device_alloc(dev, sizeof(*msa311));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "IIO device allocation failed\n");
+ return -ENOMEM;
msa311 = iio_priv(indio_dev);
msa311->dev = dev;
@@ -1195,7 +1188,7 @@ static int msa311_probe(struct i2c_client *i2c)
*/
err = devm_add_action_or_reset(dev, msa311_powerdown, msa311);
if (err)
- return dev_err_probe(dev, err, "can't add powerdown action\n");
+ return err;
err = pm_runtime_set_active(dev);
if (err)
@@ -1231,7 +1224,6 @@ static int msa311_probe(struct i2c_client *i2c)
if (err)
return err;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
err = devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 89569ce221d7..f31c6ab3392d 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -504,10 +504,8 @@ static int stk8312_probe(struct i2c_client *client)
struct stk8312_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index c1d7e7dcb09b..384f1fbcbcb3 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -385,10 +385,8 @@ static int stk8ba50_probe(struct i2c_client *client)
struct stk8ba50_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/adc/88pm886-gpadc.c b/drivers/iio/adc/88pm886-gpadc.c
new file mode 100644
index 000000000000..cffe35136685
--- /dev/null
+++ b/drivers/iio/adc/88pm886-gpadc.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025, Duje Mihanović <duje@dujemihanovic.xyz>
+ */
+
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <asm/byteorder.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+#include <linux/mfd/88pm886.h>
+
+struct pm886_gpadc {
+ struct regmap *map;
+};
+
+enum pm886_gpadc_channel {
+ VSC_CHAN,
+ VCHG_PWR_CHAN,
+ VCF_OUT_CHAN,
+ VBAT_CHAN,
+ VBAT_SLP_CHAN,
+ VBUS_CHAN,
+
+ GPADC0_CHAN,
+ GPADC1_CHAN,
+ GPADC2_CHAN,
+ GPADC3_CHAN,
+
+ GND_DET1_CHAN,
+ GND_DET2_CHAN,
+ MIC_DET_CHAN,
+
+ TINT_CHAN,
+};
+
+static const int pm886_gpadc_regs[] = {
+ [VSC_CHAN] = PM886_REG_GPADC_VSC,
+ [VCHG_PWR_CHAN] = PM886_REG_GPADC_VCHG_PWR,
+ [VCF_OUT_CHAN] = PM886_REG_GPADC_VCF_OUT,
+ [VBAT_CHAN] = PM886_REG_GPADC_VBAT,
+ [VBAT_SLP_CHAN] = PM886_REG_GPADC_VBAT_SLP,
+ [VBUS_CHAN] = PM886_REG_GPADC_VBUS,
+
+ [GPADC0_CHAN] = PM886_REG_GPADC_GPADC0,
+ [GPADC1_CHAN] = PM886_REG_GPADC_GPADC1,
+ [GPADC2_CHAN] = PM886_REG_GPADC_GPADC2,
+ [GPADC3_CHAN] = PM886_REG_GPADC_GPADC3,
+
+ [GND_DET1_CHAN] = PM886_REG_GPADC_GND_DET1,
+ [GND_DET2_CHAN] = PM886_REG_GPADC_GND_DET2,
+ [MIC_DET_CHAN] = PM886_REG_GPADC_MIC_DET,
+
+ [TINT_CHAN] = PM886_REG_GPADC_TINT,
+};
+
+#define ADC_CHANNEL_VOLTAGE(index, lsb, name) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = lsb, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = name, \
+}
+
+#define ADC_CHANNEL_RESISTANCE(index, lsb, name) \
+{ \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = lsb, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .datasheet_name = name, \
+}
+
+#define ADC_CHANNEL_TEMPERATURE(index, lsb, name) \
+{ \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = lsb, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .datasheet_name = name, \
+}
+
+static const struct iio_chan_spec pm886_gpadc_channels[] = {
+ ADC_CHANNEL_VOLTAGE(VSC_CHAN, 1367, "vsc"),
+ ADC_CHANNEL_VOLTAGE(VCHG_PWR_CHAN, 1709, "vchg_pwr"),
+ ADC_CHANNEL_VOLTAGE(VCF_OUT_CHAN, 1367, "vcf_out"),
+ ADC_CHANNEL_VOLTAGE(VBAT_CHAN, 1367, "vbat"),
+ ADC_CHANNEL_VOLTAGE(VBAT_SLP_CHAN, 1367, "vbat_slp"),
+ ADC_CHANNEL_VOLTAGE(VBUS_CHAN, 1709, "vbus"),
+
+ ADC_CHANNEL_RESISTANCE(GPADC0_CHAN, 342, "gpadc0"),
+ ADC_CHANNEL_RESISTANCE(GPADC1_CHAN, 342, "gpadc1"),
+ ADC_CHANNEL_RESISTANCE(GPADC2_CHAN, 342, "gpadc2"),
+ ADC_CHANNEL_RESISTANCE(GPADC3_CHAN, 342, "gpadc3"),
+
+ ADC_CHANNEL_VOLTAGE(GND_DET1_CHAN, 342, "gnddet1"),
+ ADC_CHANNEL_VOLTAGE(GND_DET2_CHAN, 342, "gnddet2"),
+ ADC_CHANNEL_VOLTAGE(MIC_DET_CHAN, 1367, "mic_det"),
+
+ ADC_CHANNEL_TEMPERATURE(TINT_CHAN, 104, "tint"),
+};
+
+static const struct regmap_config pm886_gpadc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PM886_GPADC_MAX_REGISTER,
+};
+
+static int gpadc_get_raw(struct iio_dev *iio, enum pm886_gpadc_channel chan)
+{
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+ __be16 buf;
+ int ret;
+
+ ret = regmap_bulk_read(gpadc->map, pm886_gpadc_regs[chan], &buf, sizeof(buf));
+ if (ret)
+ return ret;
+
+ return be16_to_cpu(buf) >> 4;
+}
+
+static int
+gpadc_set_bias(struct pm886_gpadc *gpadc, enum pm886_gpadc_channel chan, bool on)
+{
+ unsigned int gpadc_num = chan - GPADC0_CHAN;
+ unsigned int bits = BIT(gpadc_num + 4) | BIT(gpadc_num);
+
+ return regmap_assign_bits(gpadc->map, PM886_REG_GPADC_CONFIG(0x14), bits, on);
+}
+
+static int
+gpadc_find_bias_current(struct iio_dev *iio, struct iio_chan_spec const *chan,
+ unsigned int *raw_uV, unsigned int *raw_uA)
+{
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+ unsigned int gpadc_num = chan->channel - GPADC0_CHAN;
+ unsigned int reg = PM886_REG_GPADC_CONFIG(0xb + gpadc_num);
+ unsigned long lsb = chan->address;
+ int ret;
+
+ for (unsigned int i = 0; i < PM886_GPADC_BIAS_LEVELS; i++) {
+ ret = regmap_update_bits(gpadc->map, reg, GENMASK(3, 0), i);
+ if (ret)
+ return ret;
+
+ /* Wait for the new bias level to apply. */
+ fsleep(5 * USEC_PER_MSEC);
+
+ *raw_uA = PM886_GPADC_INDEX_TO_BIAS_uA(i);
+ *raw_uV = gpadc_get_raw(iio, chan->channel) * lsb;
+
+ /*
+ * Vendor kernel errors out above 1.25 V, but testing shows
+ * that the resistance of the battery detection channel (GPADC2
+ * on coreprimevelte) reaches about 1.4 MΩ when the battery is
+ * removed, which can't be measured with such a low upper
+ * limit. Therefore, to be able to detect the battery without
+ * ugly externs as used in the vendor fuel gauge driver,
+ * increase this limit a bit.
+ */
+ if (WARN_ON(*raw_uV > 1500 * (MICRO / MILLI)))
+ return -EIO;
+
+ /*
+ * Vendor kernel errors out under 300 mV, but for the same
+ * reason as above (except the channel hovers around 3.5 kΩ
+ * with battery present) reduce this limit.
+ */
+ if (*raw_uV < 200 * (MICRO / MILLI)) {
+ dev_dbg(&iio->dev, "bad bias for chan %d: %d uA @ %d uV\n",
+ chan->channel, *raw_uA, *raw_uV);
+ continue;
+ }
+
+ dev_dbg(&iio->dev, "good bias for chan %d: %d uA @ %d uV\n",
+ chan->channel, *raw_uA, *raw_uV);
+ return 0;
+ }
+
+ dev_err(&iio->dev, "failed to find good bias for chan %d\n", chan->channel);
+ return -EINVAL;
+}
+
+static int
+gpadc_get_resistance_ohm(struct iio_dev *iio, struct iio_chan_spec const *chan)
+{
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+ unsigned int raw_uV, raw_uA;
+ int ret;
+
+ ret = gpadc_set_bias(gpadc, chan->channel, true);
+ if (ret)
+ goto out;
+
+ ret = gpadc_find_bias_current(iio, chan, &raw_uV, &raw_uA);
+ if (ret)
+ goto out;
+
+ ret = DIV_ROUND_CLOSEST(raw_uV, raw_uA);
+out:
+ gpadc_set_bias(gpadc, chan->channel, false);
+ return ret;
+}
+
+static int
+__pm886_gpadc_read_raw(struct iio_dev *iio, struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ unsigned long lsb = chan->address;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ *val = gpadc_get_raw(iio, chan->channel);
+ if (*val < 0)
+ return *val;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = lsb;
+
+ if (chan->type == IIO_VOLTAGE) {
+ *val2 = MILLI;
+ return IIO_VAL_FRACTIONAL;
+ } else {
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Raw value is 104 millikelvin/LSB, convert it to 104 millicelsius/LSB */
+ *val = ABSOLUTE_ZERO_MILLICELSIUS;
+ *val2 = lsb;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_PROCESSED:
+ *val = gpadc_get_resistance_ohm(iio, chan);
+ if (*val < 0)
+ return *val;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pm886_gpadc_read_raw(struct iio_dev *iio, struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct device *dev = iio->dev.parent;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = __pm886_gpadc_read_raw(iio, chan, val, val2, mask);
+
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+static int pm886_gpadc_hw_enable(struct regmap *map)
+{
+ const u8 config[] = {
+ PM886_GPADC_CONFIG1_EN_ALL,
+ PM886_GPADC_CONFIG2_EN_ALL,
+ PM886_GPADC_GND_DET2_EN,
+ };
+ int ret;
+
+ /* Enable the ADC block. */
+ ret = regmap_set_bits(map, PM886_REG_GPADC_CONFIG(0x6), BIT(0));
+ if (ret)
+ return ret;
+
+ /* Enable all channels. */
+ return regmap_bulk_write(map, PM886_REG_GPADC_CONFIG(0x1), config, ARRAY_SIZE(config));
+}
+
+static int pm886_gpadc_hw_disable(struct regmap *map)
+{
+ return regmap_clear_bits(map, PM886_REG_GPADC_CONFIG(0x6), BIT(0));
+}
+
+static const struct iio_info pm886_gpadc_iio_info = {
+ .read_raw = pm886_gpadc_read_raw,
+};
+
+static int pm886_gpadc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pm886_chip *chip = dev_get_drvdata(dev->parent);
+ struct i2c_client *client = chip->client;
+ struct pm886_gpadc *gpadc;
+ struct i2c_client *page;
+ struct iio_dev *iio;
+ int ret;
+
+ iio = devm_iio_device_alloc(dev, sizeof(*gpadc));
+ if (!iio)
+ return -ENOMEM;
+
+ gpadc = iio_priv(iio);
+ dev_set_drvdata(dev, iio);
+
+ page = devm_i2c_new_dummy_device(dev, client->adapter,
+ client->addr + PM886_PAGE_OFFSET_GPADC);
+ if (IS_ERR(page))
+ return dev_err_probe(dev, PTR_ERR(page), "Failed to initialize GPADC page\n");
+
+ gpadc->map = devm_regmap_init_i2c(page, &pm886_gpadc_regmap_config);
+ if (IS_ERR(gpadc->map))
+ return dev_err_probe(dev, PTR_ERR(gpadc->map),
+ "Failed to initialize GPADC regmap\n");
+
+ iio->name = "88pm886-gpadc";
+ iio->modes = INDIO_DIRECT_MODE;
+ iio->info = &pm886_gpadc_iio_info;
+ iio->channels = pm886_gpadc_channels;
+ iio->num_channels = ARRAY_SIZE(pm886_gpadc_channels);
+ device_set_node(&iio->dev, dev_fwnode(dev->parent));
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_iio_device_register(dev, iio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register ADC\n");
+
+ return 0;
+}
+
+static int pm886_gpadc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *iio = dev_get_drvdata(dev);
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+
+ return pm886_gpadc_hw_enable(gpadc->map);
+}
+
+static int pm886_gpadc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *iio = dev_get_drvdata(dev);
+ struct pm886_gpadc *gpadc = iio_priv(iio);
+
+ return pm886_gpadc_hw_disable(gpadc->map);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(pm886_gpadc_pm_ops,
+ pm886_gpadc_runtime_suspend,
+ pm886_gpadc_runtime_resume, NULL);
+
+static const struct platform_device_id pm886_gpadc_id[] = {
+ { "88pm886-gpadc" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, pm886_gpadc_id);
+
+static struct platform_driver pm886_gpadc_driver = {
+ .driver = {
+ .name = "88pm886-gpadc",
+ .pm = pm_ptr(&pm886_gpadc_pm_ops),
+ },
+ .probe = pm886_gpadc_probe,
+ .id_table = pm886_gpadc_id,
+};
+module_platform_driver(pm886_gpadc_driver);
+
+MODULE_AUTHOR("Duje Mihanović <duje@dujemihanovic.xyz>");
+MODULE_DESCRIPTION("Marvell 88PM886 GPADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 24f2572c487e..58da8255525e 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -9,6 +9,19 @@ menu "Analog to digital converters"
config IIO_ADC_HELPER
tristate
+config 88PM886_GPADC
+ tristate "Marvell 88PM886 GPADC driver"
+ depends on MFD_88PM886_PMIC
+ default MFD_88PM886_PMIC
+ help
+ Say Y here to enable support for the GPADC (General Purpose ADC)
+ found on the Marvell 88PM886 PMIC. The GPADC measures various
+ internal voltages and temperatures, including (but not limited to)
+ system, battery and USB Vbus.
+
+ To compile this driver as a module, choose M here: the module will be
+ called 88pm886-gpadc.
+
config AB8500_GPADC
bool "ST-Ericsson AB8500 GPADC driver"
depends on AB8500_CORE && REGULATOR_AB8500
@@ -389,6 +402,7 @@ config AD7779
depends on SPI
select CRC8
select IIO_BUFFER
+ select IIO_BACKEND
help
Say yes here to build support for Analog Devices AD777X family
(AD7770, AD7771, AD7779) analog to digital converter (ADC).
@@ -507,6 +521,25 @@ config AD9467
To compile this driver as a module, choose M here: the module will be
called ad9467.
+config ADE9000
+ tristate "Analog Devices ADE9000 Multiphase Energy, and Power Quality Monitoring IC Driver"
+ depends on SPI
+ select REGMAP_SPI
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ help
+ Say yes here to build support for the Analog Devices ADE9000,
+ a highly accurate, multiphase energy and power quality monitoring
+ integrated circuit.
+
+ The device features high-precision analog-to-digital converters
+ and digital signal processing to compute RMS values, power factor,
+ frequency, and harmonic analysis. It supports SPI communication
+ and provides buffered data output through the IIO framework.
+
+ To compile this driver as a module, choose M here: the module will
+ be called ade9000.
+
config ADI_AXI_ADC
tristate "Analog Devices Generic AXI ADC IP core driver"
depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_INTEL_SOCFPGA || COMPILE_TEST
@@ -766,6 +799,17 @@ config INGENIC_ADC
This driver can also be built as a module. If so, the module will be
called ingenic_adc.
+config INTEL_DC_TI_ADC
+ tristate "Intel Bay Trail / Cherry Trail Dollar Cove TI ADC driver"
+ depends on INTEL_SOC_PMIC_CHTDC_TI
+ help
+ Say yes here to have support for the Dollar Cove TI PMIC ADC device.
+ Depending on platform configuration, this general purpose ADC can be
+ used for sensors such as battery voltage and thermal resistors.
+
+ To compile this driver as a module, choose M here: the module will be
+ called intel_dc_ti_adc.
+
config INTEL_MRFLD_ADC
tristate "Intel Merrifield Basin Cove ADC driver"
depends on INTEL_SOC_PMIC_MRFLD
@@ -976,6 +1020,16 @@ config MAX1363
To compile this driver as a module, choose M here: the module will be
called max1363.
+config MAX14001
+ tristate "Analog Devices MAX14001/MAX14002 ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices MAX14001/MAX14002
+ Configurable, Isolated 10-bit ADCs for Multi-Range Binary Inputs.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max14001.
+
config MAX34408
tristate "Maxim max34408/max344089 ADC driver"
depends on I2C
@@ -1298,6 +1352,16 @@ config RN5T618_ADC
This driver can also be built as a module. If so, the module
will be called rn5t618-adc.
+config ROHM_BD79112
+ tristate "Rohm BD79112 ADC driver"
+ depends on SPI && GPIOLIB
+ select REGMAP_SPI
+ select IIO_ADC_HELPER
+ help
+ Say yes here to build support for the ROHM BD79112 ADC. The
+ ROHM BD79112 is a 12-bit, 32-channel, SAR ADC. Analog inputs
+ can also be used for GPIO.
+
config ROHM_BD79124
tristate "Rohm BD79124 ADC driver"
depends on I2C && GPIOLIB
@@ -1349,6 +1413,27 @@ config RZG2L_ADC
To compile this driver as a module, choose M here: the
module will be called rzg2l_adc.
+config RZN1_ADC
+ tristate "Renesas RZ/N1 ADC driver"
+ depends on ARCH_RZN1 || COMPILE_TEST
+ help
+ Say yes here to build support for the ADC found in Renesas
+ RZ/N1 family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rzn1-adc.
+
+config RZT2H_ADC
+ tristate "Renesas RZ/T2H / RZ/N2H ADC driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select IIO_ADC_HELPER
+ help
+ Say yes here to build support for the ADC found in Renesas
+ RZ/T2H / RZ/N2H SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called rzt2h_adc.
+
config SC27XX_ADC
tristate "Spreadtrum SC27xx series PMICs ADC"
depends on MFD_SC27XX_PMIC || COMPILE_TEST
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 1c6ca5fd4b6d..7cc8f9a12f76 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -6,6 +6,7 @@
obj-$(CONFIG_IIO_ADC_HELPER) += industrialio-adc.o
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_88PM886_GPADC) += 88pm886-gpadc.o
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD4000) += ad4000.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_AD7944) += ad7944.o
obj-$(CONFIG_AD7949) += ad7949.o
obj-$(CONFIG_AD799X) += ad799x.o
obj-$(CONFIG_AD9467) += ad9467.o
+obj-$(CONFIG_ADE9000) += ade9000.o
obj-$(CONFIG_ADI_AXI_ADC) += adi-axi-adc.o
obj-$(CONFIG_ASPEED_ADC) += aspeed_adc.o
obj-$(CONFIG_AT91_ADC) += at91_adc.o
@@ -70,6 +72,7 @@ obj-$(CONFIG_IMX8QXP_ADC) += imx8qxp-adc.o
obj-$(CONFIG_IMX93_ADC) += imx93_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o
+obj-$(CONFIG_INTEL_DC_TI_ADC) += intel_dc_ti_adc.o
obj-$(CONFIG_INTEL_MRFLD_ADC) += intel_mrfld_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
@@ -86,6 +89,7 @@ obj-$(CONFIG_MAX11205) += max11205.o
obj-$(CONFIG_MAX11410) += max11410.o
obj-$(CONFIG_MAX1241) += max1241.o
obj-$(CONFIG_MAX1363) += max1363.o
+obj-$(CONFIG_MAX14001) += max14001.o
obj-$(CONFIG_MAX34408) += max34408.o
obj-$(CONFIG_MAX77541_ADC) += max77541-adc.o
obj-$(CONFIG_MAX9611) += max9611.o
@@ -116,9 +120,12 @@ obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
obj-$(CONFIG_RICHTEK_RTQ6056) += rtq6056.o
obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o
+obj-$(CONFIG_ROHM_BD79112) += rohm-bd79112.o
obj-$(CONFIG_ROHM_BD79124) += rohm-bd79124.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o
+obj-$(CONFIG_RZN1_ADC) += rzn1-adc.o
+obj-$(CONFIG_RZT2H_ADC) += rzt2h_adc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
obj-$(CONFIG_SOPHGO_CV1800B_ADC) += sophgo-cv1800b-adc.o
diff --git a/drivers/iio/adc/ab8500-gpadc.c b/drivers/iio/adc/ab8500-gpadc.c
index f3b057f92310..8eaa1dd6a89b 100644
--- a/drivers/iio/adc/ab8500-gpadc.c
+++ b/drivers/iio/adc/ab8500-gpadc.c
@@ -607,7 +607,6 @@ static int ab8500_gpadc_read(struct ab8500_gpadc *gpadc,
}
/* This eventually drops the regulator */
- pm_runtime_mark_last_busy(gpadc->dev);
pm_runtime_put_autosuspend(gpadc->dev);
return (high_data << 8) | low_data;
diff --git a/drivers/iio/adc/ad4030.c b/drivers/iio/adc/ad4030.c
index 1bc2f9a22470..68446db9bef1 100644
--- a/drivers/iio/adc/ad4030.c
+++ b/drivers/iio/adc/ad4030.c
@@ -385,7 +385,7 @@ static int ad4030_get_chan_scale(struct iio_dev *indio_dev,
struct ad4030_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
- scan_type = iio_get_current_scan_type(indio_dev, st->chip->channels);
+ scan_type = iio_get_current_scan_type(indio_dev, chan);
if (IS_ERR(scan_type))
return PTR_ERR(scan_type);
@@ -852,8 +852,8 @@ static int ad4030_read_label(struct iio_dev *indio_dev,
char *label)
{
if (chan->differential)
- return sprintf(label, "differential%lu\n", chan->address);
- return sprintf(label, "common-mode%lu\n", chan->address);
+ return sysfs_emit(label, "differential%lu\n", chan->address);
+ return sysfs_emit(label, "common-mode%lu\n", chan->address);
}
static int ad4030_get_current_scan_type(const struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad4080.c b/drivers/iio/adc/ad4080.c
index 6e61787ed321..7cf3b6ed7940 100644
--- a/drivers/iio/adc/ad4080.c
+++ b/drivers/iio/adc/ad4080.c
@@ -125,7 +125,12 @@
/* Miscellaneous Definitions */
#define AD4080_SPI_READ BIT(7)
-#define AD4080_CHIP_ID GENMASK(2, 0)
+#define AD4080_CHIP_ID 0x0050
+#define AD4081_CHIP_ID 0x0051
+#define AD4083_CHIP_ID 0x0053
+#define AD4084_CHIP_ID 0x0054
+#define AD4086_CHIP_ID 0x0056
+#define AD4087_CHIP_ID 0x0057
#define AD4080_LVDS_CNV_CLK_CNT_MAX 7
@@ -167,6 +172,7 @@ struct ad4080_chip_info {
const unsigned int (*scale_table)[2];
const struct iio_chan_spec *channels;
unsigned int num_channels;
+ unsigned int lvds_cnv_clk_cnt_max;
};
struct ad4080_state {
@@ -414,23 +420,35 @@ static struct iio_chan_spec_ext_info ad4080_ext_info[] = {
{ }
};
-static const struct iio_chan_spec ad4080_channel = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
- .info_mask_shared_by_all_available =
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
- .ext_info = ad4080_ext_info,
- .scan_index = 0,
- .scan_type = {
- .sign = 's',
- .realbits = 20,
- .storagebits = 32,
- },
-};
+#define AD4080_CHANNEL_DEFINE(bits, storage) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .ext_info = ad4080_ext_info, \
+ .scan_index = 0, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = (bits), \
+ .storagebits = (storage), \
+ }, \
+}
+
+static const struct iio_chan_spec ad4080_channel = AD4080_CHANNEL_DEFINE(20, 32);
+
+static const struct iio_chan_spec ad4081_channel = AD4080_CHANNEL_DEFINE(20, 32);
+
+static const struct iio_chan_spec ad4083_channel = AD4080_CHANNEL_DEFINE(16, 16);
+
+static const struct iio_chan_spec ad4084_channel = AD4080_CHANNEL_DEFINE(16, 16);
+
+static const struct iio_chan_spec ad4086_channel = AD4080_CHANNEL_DEFINE(14, 16);
+
+static const struct iio_chan_spec ad4087_channel = AD4080_CHANNEL_DEFINE(14, 16);
static const struct ad4080_chip_info ad4080_chip_info = {
.name = "ad4080",
@@ -439,13 +457,65 @@ static const struct ad4080_chip_info ad4080_chip_info = {
.num_scales = ARRAY_SIZE(ad4080_scale_table),
.num_channels = 1,
.channels = &ad4080_channel,
+ .lvds_cnv_clk_cnt_max = AD4080_LVDS_CNV_CLK_CNT_MAX,
+};
+
+static const struct ad4080_chip_info ad4081_chip_info = {
+ .name = "ad4081",
+ .product_id = AD4081_CHIP_ID,
+ .scale_table = ad4080_scale_table,
+ .num_scales = ARRAY_SIZE(ad4080_scale_table),
+ .num_channels = 1,
+ .channels = &ad4081_channel,
+ .lvds_cnv_clk_cnt_max = 2,
+};
+
+static const struct ad4080_chip_info ad4083_chip_info = {
+ .name = "ad4083",
+ .product_id = AD4083_CHIP_ID,
+ .scale_table = ad4080_scale_table,
+ .num_scales = ARRAY_SIZE(ad4080_scale_table),
+ .num_channels = 1,
+ .channels = &ad4083_channel,
+ .lvds_cnv_clk_cnt_max = 5,
+};
+
+static const struct ad4080_chip_info ad4084_chip_info = {
+ .name = "ad4084",
+ .product_id = AD4084_CHIP_ID,
+ .scale_table = ad4080_scale_table,
+ .num_scales = ARRAY_SIZE(ad4080_scale_table),
+ .num_channels = 1,
+ .channels = &ad4084_channel,
+ .lvds_cnv_clk_cnt_max = 2,
+};
+
+static const struct ad4080_chip_info ad4086_chip_info = {
+ .name = "ad4086",
+ .product_id = AD4086_CHIP_ID,
+ .scale_table = ad4080_scale_table,
+ .num_scales = ARRAY_SIZE(ad4080_scale_table),
+ .num_channels = 1,
+ .channels = &ad4086_channel,
+ .lvds_cnv_clk_cnt_max = 4,
+};
+
+static const struct ad4080_chip_info ad4087_chip_info = {
+ .name = "ad4087",
+ .product_id = AD4087_CHIP_ID,
+ .scale_table = ad4080_scale_table,
+ .num_scales = ARRAY_SIZE(ad4080_scale_table),
+ .num_channels = 1,
+ .channels = &ad4087_channel,
+ .lvds_cnv_clk_cnt_max = 1,
};
static int ad4080_setup(struct iio_dev *indio_dev)
{
struct ad4080_state *st = iio_priv(indio_dev);
struct device *dev = regmap_get_device(st->regmap);
- unsigned int id;
+ __le16 id_le;
+ u16 id;
int ret;
ret = regmap_write(st->regmap, AD4080_REG_INTERFACE_CONFIG_A,
@@ -458,11 +528,13 @@ static int ad4080_setup(struct iio_dev *indio_dev)
if (ret)
return ret;
- ret = regmap_read(st->regmap, AD4080_REG_CHIP_TYPE, &id);
+ ret = regmap_bulk_read(st->regmap, AD4080_REG_PRODUCT_ID_L, &id_le,
+ sizeof(id_le));
if (ret)
return ret;
- if (id != AD4080_CHIP_ID)
+ id = le16_to_cpu(id_le);
+ if (id != st->info->product_id)
dev_info(dev, "Unrecognized CHIP_ID 0x%X\n", id);
ret = regmap_set_bits(st->regmap, AD4080_REG_GPIO_CONFIG_A,
@@ -488,7 +560,7 @@ static int ad4080_setup(struct iio_dev *indio_dev)
AD4080_REG_ADC_DATA_INTF_CONFIG_B,
AD4080_ADC_DATA_INTF_CONFIG_B_LVDS_CNV_CLK_CNT_MSK,
FIELD_PREP(AD4080_ADC_DATA_INTF_CONFIG_B_LVDS_CNV_CLK_CNT_MSK,
- AD4080_LVDS_CNV_CLK_CNT_MAX));
+ st->info->lvds_cnv_clk_cnt_max));
if (ret)
return ret;
@@ -593,12 +665,22 @@ static int ad4080_probe(struct spi_device *spi)
static const struct spi_device_id ad4080_id[] = {
{ "ad4080", (kernel_ulong_t)&ad4080_chip_info },
+ { "ad4081", (kernel_ulong_t)&ad4081_chip_info },
+ { "ad4083", (kernel_ulong_t)&ad4083_chip_info },
+ { "ad4084", (kernel_ulong_t)&ad4084_chip_info },
+ { "ad4086", (kernel_ulong_t)&ad4086_chip_info },
+ { "ad4087", (kernel_ulong_t)&ad4087_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad4080_id);
static const struct of_device_id ad4080_of_match[] = {
{ .compatible = "adi,ad4080", &ad4080_chip_info },
+ { .compatible = "adi,ad4081", &ad4081_chip_info },
+ { .compatible = "adi,ad4083", &ad4083_chip_info },
+ { .compatible = "adi,ad4084", &ad4084_chip_info },
+ { .compatible = "adi,ad4086", &ad4086_chip_info },
+ { .compatible = "adi,ad4087", &ad4087_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, ad4080_of_match);
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index dcdb5778f7d6..5567ae5dee88 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -2035,8 +2035,7 @@ static int ad4130_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(dev, ad4130_disable_regulators, st);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to add regulators disable action\n");
+ return ret;
ret = ad4130_soft_reset(st);
if (ret)
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 4d8c6bafd1c3..5c1a8f886bcc 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -3,21 +3,28 @@
* AD7124 SPI ADC driver
*
* Copyright 2018 Analog Devices Inc.
+ * Copyright 2025 BayLibre, SAS
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/sprintf.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/adc/ad_sigma_delta.h>
@@ -44,6 +51,11 @@
#define AD7124_STATUS_POR_FLAG BIT(4)
/* AD7124_ADC_CONTROL */
+#define AD7124_ADC_CONTROL_CLK_SEL GENMASK(1, 0)
+#define AD7124_ADC_CONTROL_CLK_SEL_INT 0
+#define AD7124_ADC_CONTROL_CLK_SEL_INT_OUT 1
+#define AD7124_ADC_CONTROL_CLK_SEL_EXT 2
+#define AD7124_ADC_CONTROL_CLK_SEL_EXT_DIV4 3
#define AD7124_ADC_CONTROL_MODE GENMASK(5, 2)
#define AD7124_ADC_CONTROL_MODE_CONTINUOUS 0
#define AD7124_ADC_CONTROL_MODE_SINGLE 1
@@ -84,14 +96,28 @@
#define AD7124_CONFIG_PGA GENMASK(2, 0)
/* AD7124_FILTER_X */
-#define AD7124_FILTER_FS GENMASK(10, 0)
#define AD7124_FILTER_FILTER GENMASK(23, 21)
#define AD7124_FILTER_FILTER_SINC4 0
#define AD7124_FILTER_FILTER_SINC3 2
+#define AD7124_FILTER_FILTER_SINC4_SINC1 4
+#define AD7124_FILTER_FILTER_SINC3_SINC1 5
+#define AD7124_FILTER_FILTER_SINC3_PF 7
+#define AD7124_FILTER_REJ60 BIT(20)
+#define AD7124_FILTER_POST_FILTER GENMASK(19, 17)
+#define AD7124_FILTER_POST_FILTER_47dB 2
+#define AD7124_FILTER_POST_FILTER_62dB 3
+#define AD7124_FILTER_POST_FILTER_86dB 5
+#define AD7124_FILTER_POST_FILTER_92dB 6
+#define AD7124_FILTER_SINGLE_CYCLE BIT(16)
+#define AD7124_FILTER_FS GENMASK(10, 0)
+
+#define AD7124_CFG_SLOT_UNASSIGNED ~0U
#define AD7124_MAX_CONFIGS 8
#define AD7124_MAX_CHANNELS 16
+#define AD7124_INT_CLK_HZ 614400
+
/* AD7124 input sources */
enum ad7124_ref_sel {
@@ -120,9 +146,9 @@ static const unsigned int ad7124_reg_size[] = {
};
static const int ad7124_master_clk_freq_hz[3] = {
- [AD7124_LOW_POWER] = 76800,
- [AD7124_MID_POWER] = 153600,
- [AD7124_FULL_POWER] = 614400,
+ [AD7124_LOW_POWER] = AD7124_INT_CLK_HZ / 8,
+ [AD7124_MID_POWER] = AD7124_INT_CLK_HZ / 4,
+ [AD7124_FULL_POWER] = AD7124_INT_CLK_HZ,
};
static const char * const ad7124_ref_names[] = {
@@ -138,13 +164,27 @@ struct ad7124_chip_info {
unsigned int num_inputs;
};
+enum ad7124_filter_type {
+ AD7124_FILTER_TYPE_SINC3,
+ AD7124_FILTER_TYPE_SINC3_PF1,
+ AD7124_FILTER_TYPE_SINC3_PF2,
+ AD7124_FILTER_TYPE_SINC3_PF3,
+ AD7124_FILTER_TYPE_SINC3_PF4,
+ AD7124_FILTER_TYPE_SINC3_REJ60,
+ AD7124_FILTER_TYPE_SINC3_SINC1,
+ AD7124_FILTER_TYPE_SINC4,
+ AD7124_FILTER_TYPE_SINC4_REJ60,
+ AD7124_FILTER_TYPE_SINC4_SINC1,
+};
+
struct ad7124_channel_config {
- bool live;
unsigned int cfg_slot;
+ unsigned int requested_odr;
+ unsigned int requested_odr_micro;
/*
* Following fields are used to compare for equality. If you
* make adaptations in it, you most likely also have to adapt
- * ad7124_find_similar_live_cfg(), too.
+ * ad7124_config_equal(), too.
*/
struct_group(config_props,
enum ad7124_ref_sel refsel;
@@ -153,16 +193,14 @@ struct ad7124_channel_config {
bool buf_negative;
unsigned int vref_mv;
unsigned int pga_bits;
- unsigned int odr;
unsigned int odr_sel_bits;
- unsigned int filter_type;
+ enum ad7124_filter_type filter_type;
unsigned int calibration_offset;
unsigned int calibration_gain;
);
};
struct ad7124_channel {
- unsigned int nr;
struct ad7124_channel_config cfg;
unsigned int ain;
unsigned int slot;
@@ -174,18 +212,18 @@ struct ad7124_state {
struct ad_sigma_delta sd;
struct ad7124_channel *channels;
struct regulator *vref[4];
- struct clk *mclk;
+ u32 clk_hz;
unsigned int adc_control;
unsigned int num_channels;
struct mutex cfgs_lock; /* lock for configs access */
- unsigned long cfg_slots_status; /* bitmap with slot status (1 means it is used) */
+ u8 cfg_slot_use_count[AD7124_MAX_CONFIGS];
/*
* Stores the power-on reset value for the GAIN(x) registers which are
* needed for measurements at gain 1 (i.e. CONFIG(x).PGA == 0)
*/
unsigned int gain_default;
- DECLARE_KFIFO(live_cfgs_fifo, struct ad7124_channel_config *, AD7124_MAX_CONFIGS);
+ bool enable_single_cycle;
};
static const struct ad7124_chip_info ad7124_4_chip_info = {
@@ -250,104 +288,117 @@ static int ad7124_set_mode(struct ad_sigma_delta *sd,
return ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
}
-static void ad7124_set_channel_odr(struct ad7124_state *st, unsigned int channel, unsigned int odr)
+static u32 ad7124_get_fclk_hz(struct ad7124_state *st)
{
- unsigned int fclk, odr_sel_bits;
+ enum ad7124_power_mode power_mode;
+ u32 fclk_hz;
- fclk = clk_get_rate(st->mclk);
- /*
- * FS[10:0] = fCLK / (fADC x 32) where:
- * fADC is the output data rate
- * fCLK is the master clock frequency
- * FS[10:0] are the bits in the filter register
- * FS[10:0] can have a value from 1 to 2047
- */
- odr_sel_bits = DIV_ROUND_CLOSEST(fclk, odr * 32);
- if (odr_sel_bits < 1)
- odr_sel_bits = 1;
- else if (odr_sel_bits > 2047)
- odr_sel_bits = 2047;
+ power_mode = FIELD_GET(AD7124_ADC_CONTROL_POWER_MODE, st->adc_control);
+ fclk_hz = st->clk_hz;
- if (odr_sel_bits != st->channels[channel].cfg.odr_sel_bits)
- st->channels[channel].cfg.live = false;
+ switch (power_mode) {
+ case AD7124_LOW_POWER:
+ fclk_hz /= 8;
+ break;
+ case AD7124_MID_POWER:
+ fclk_hz /= 4;
+ break;
+ default:
+ break;
+ }
- /* fADC = fCLK / (FS[10:0] x 32) */
- st->channels[channel].cfg.odr = DIV_ROUND_CLOSEST(fclk, odr_sel_bits * 32);
- st->channels[channel].cfg.odr_sel_bits = odr_sel_bits;
+ return fclk_hz;
}
-static int ad7124_get_3db_filter_freq(struct ad7124_state *st,
- unsigned int channel)
+static u32 ad7124_get_fs_factor(struct ad7124_state *st, unsigned int channel)
{
- unsigned int fadc;
-
- fadc = st->channels[channel].cfg.odr;
+ enum ad7124_power_mode power_mode =
+ FIELD_GET(AD7124_ADC_CONTROL_POWER_MODE, st->adc_control);
+ u32 avg = power_mode == AD7124_LOW_POWER ? 8 : 16;
+ /*
+ * These are the "zero-latency" factors from the data sheet. For the
+ * sinc1 filters, these aren't documented, but derived by taking the
+ * single-channel formula from the sinc1 section of the data sheet and
+ * multiplying that by the sinc3/4 factor from the corresponding zero-
+ * latency sections.
+ */
switch (st->channels[channel].cfg.filter_type) {
- case AD7124_FILTER_FILTER_SINC3:
- return DIV_ROUND_CLOSEST(fadc * 272, 1000);
- case AD7124_FILTER_FILTER_SINC4:
- return DIV_ROUND_CLOSEST(fadc * 230, 1000);
+ case AD7124_FILTER_TYPE_SINC4:
+ case AD7124_FILTER_TYPE_SINC4_REJ60:
+ return 4 * 32;
+ case AD7124_FILTER_TYPE_SINC4_SINC1:
+ return 4 * avg * 32;
+ case AD7124_FILTER_TYPE_SINC3_SINC1:
+ return 3 * avg * 32;
default:
- return -EINVAL;
+ return 3 * 32;
}
}
-static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_state *st,
- struct ad7124_channel_config *cfg)
+static u32 ad7124_get_fadc_divisor(struct ad7124_state *st, unsigned int channel)
{
- struct ad7124_channel_config *cfg_aux;
- int i;
+ u32 factor = ad7124_get_fs_factor(st, channel);
/*
- * This is just to make sure that the comparison is adapted after
- * struct ad7124_channel_config was changed.
+ * The output data rate (f_ADC) is f_CLK / divisor. We are returning
+ * the divisor.
*/
- static_assert(sizeof_field(struct ad7124_channel_config, config_props) ==
- sizeof(struct {
- enum ad7124_ref_sel refsel;
- bool bipolar;
- bool buf_positive;
- bool buf_negative;
- unsigned int vref_mv;
- unsigned int pga_bits;
- unsigned int odr;
- unsigned int odr_sel_bits;
- unsigned int filter_type;
- unsigned int calibration_offset;
- unsigned int calibration_gain;
- }));
+ return st->channels[channel].cfg.odr_sel_bits * factor;
+}
- for (i = 0; i < st->num_channels; i++) {
- cfg_aux = &st->channels[i].cfg;
-
- if (cfg_aux->live &&
- cfg->refsel == cfg_aux->refsel &&
- cfg->bipolar == cfg_aux->bipolar &&
- cfg->buf_positive == cfg_aux->buf_positive &&
- cfg->buf_negative == cfg_aux->buf_negative &&
- cfg->vref_mv == cfg_aux->vref_mv &&
- cfg->pga_bits == cfg_aux->pga_bits &&
- cfg->odr == cfg_aux->odr &&
- cfg->odr_sel_bits == cfg_aux->odr_sel_bits &&
- cfg->filter_type == cfg_aux->filter_type &&
- cfg->calibration_offset == cfg_aux->calibration_offset &&
- cfg->calibration_gain == cfg_aux->calibration_gain)
- return cfg_aux;
- }
+static void ad7124_set_channel_odr(struct ad7124_state *st, unsigned int channel)
+{
+ struct ad7124_channel_config *cfg = &st->channels[channel].cfg;
+ unsigned int fclk, factor, divisor, odr_sel_bits;
+
+ fclk = ad7124_get_fclk_hz(st);
+ factor = ad7124_get_fs_factor(st, channel);
- return NULL;
+ /*
+ * FS[10:0] = fCLK / (fADC x 32 * N) where:
+ * fADC is the output data rate
+ * fCLK is the master clock frequency
+ * N is number of conversions per sample (depends on filter type)
+ * FS[10:0] are the bits in the filter register
+ * FS[10:0] can have a value from 1 to 2047
+ */
+ divisor = cfg->requested_odr * factor +
+ cfg->requested_odr_micro * factor / MICRO;
+ odr_sel_bits = clamp(DIV_ROUND_CLOSEST(fclk, divisor), 1, 2047);
+
+ st->channels[channel].cfg.odr_sel_bits = odr_sel_bits;
}
-static int ad7124_find_free_config_slot(struct ad7124_state *st)
+static int ad7124_get_3db_filter_factor(struct ad7124_state *st,
+ unsigned int channel)
{
- unsigned int free_cfg_slot;
-
- free_cfg_slot = find_first_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS);
- if (free_cfg_slot == AD7124_MAX_CONFIGS)
- return -1;
+ struct ad7124_channel_config *cfg = &st->channels[channel].cfg;
- return free_cfg_slot;
+ /*
+ * 3dB point is the f_CLK rate times some factor. This functions returns
+ * the factor times 1000.
+ */
+ switch (cfg->filter_type) {
+ case AD7124_FILTER_TYPE_SINC3:
+ case AD7124_FILTER_TYPE_SINC3_REJ60:
+ case AD7124_FILTER_TYPE_SINC3_SINC1:
+ return 272;
+ case AD7124_FILTER_TYPE_SINC4:
+ case AD7124_FILTER_TYPE_SINC4_REJ60:
+ case AD7124_FILTER_TYPE_SINC4_SINC1:
+ return 230;
+ case AD7124_FILTER_TYPE_SINC3_PF1:
+ return 633;
+ case AD7124_FILTER_TYPE_SINC3_PF2:
+ return 605;
+ case AD7124_FILTER_TYPE_SINC3_PF3:
+ return 669;
+ case AD7124_FILTER_TYPE_SINC3_PF4:
+ return 759;
+ default:
+ return -EINVAL;
+ }
}
/* Only called during probe, so dev_err_probe() can be used */
@@ -378,20 +429,36 @@ static int ad7124_init_config_vref(struct ad7124_state *st, struct ad7124_channe
}
}
+static bool ad7124_config_equal(struct ad7124_channel_config *a,
+ struct ad7124_channel_config *b)
+{
+ return a->refsel == b->refsel &&
+ a->bipolar == b->bipolar &&
+ a->buf_positive == b->buf_positive &&
+ a->buf_negative == b->buf_negative &&
+ a->vref_mv == b->vref_mv &&
+ a->pga_bits == b->pga_bits &&
+ a->odr_sel_bits == b->odr_sel_bits &&
+ a->filter_type == b->filter_type &&
+ a->calibration_offset == b->calibration_offset &&
+ a->calibration_gain == b->calibration_gain;
+}
+
static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_config *cfg,
unsigned int cfg_slot)
{
- unsigned int tmp;
- unsigned int val;
+ unsigned int val, filter;
+ unsigned int rej60 = 0;
+ unsigned int post = 0;
int ret;
- cfg->cfg_slot = cfg_slot;
-
- ret = ad_sd_write_reg(&st->sd, AD7124_OFFSET(cfg->cfg_slot), 3, cfg->calibration_offset);
+ ret = ad_sd_write_reg(&st->sd, AD7124_OFFSET(cfg_slot), 3,
+ cfg->calibration_offset);
if (ret)
return ret;
- ret = ad_sd_write_reg(&st->sd, AD7124_GAIN(cfg->cfg_slot), 3, cfg->calibration_gain);
+ ret = ad_sd_write_reg(&st->sd, AD7124_GAIN(cfg_slot), 3,
+ cfg->calibration_gain);
if (ret)
return ret;
@@ -401,109 +468,157 @@ static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_co
(cfg->buf_negative ? AD7124_CONFIG_AIN_BUFM : 0) |
FIELD_PREP(AD7124_CONFIG_PGA, cfg->pga_bits);
- ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg->cfg_slot), 2, val);
+ ret = ad_sd_write_reg(&st->sd, AD7124_CONFIG(cfg_slot), 2, val);
if (ret < 0)
return ret;
- tmp = FIELD_PREP(AD7124_FILTER_FILTER, cfg->filter_type) |
- FIELD_PREP(AD7124_FILTER_FS, cfg->odr_sel_bits);
- return ad7124_spi_write_mask(st, AD7124_FILTER(cfg->cfg_slot),
- AD7124_FILTER_FILTER | AD7124_FILTER_FS,
- tmp, 3);
-}
-
-static struct ad7124_channel_config *ad7124_pop_config(struct ad7124_state *st)
-{
- struct ad7124_channel_config *lru_cfg;
- struct ad7124_channel_config *cfg;
- int ret;
- int i;
+ switch (cfg->filter_type) {
+ case AD7124_FILTER_TYPE_SINC3:
+ filter = AD7124_FILTER_FILTER_SINC3;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_PF1:
+ filter = AD7124_FILTER_FILTER_SINC3_PF;
+ post = AD7124_FILTER_POST_FILTER_47dB;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_PF2:
+ filter = AD7124_FILTER_FILTER_SINC3_PF;
+ post = AD7124_FILTER_POST_FILTER_62dB;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_PF3:
+ filter = AD7124_FILTER_FILTER_SINC3_PF;
+ post = AD7124_FILTER_POST_FILTER_86dB;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_PF4:
+ filter = AD7124_FILTER_FILTER_SINC3_PF;
+ post = AD7124_FILTER_POST_FILTER_92dB;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_REJ60:
+ filter = AD7124_FILTER_FILTER_SINC3;
+ rej60 = 1;
+ break;
+ case AD7124_FILTER_TYPE_SINC3_SINC1:
+ filter = AD7124_FILTER_FILTER_SINC3_SINC1;
+ break;
+ case AD7124_FILTER_TYPE_SINC4:
+ filter = AD7124_FILTER_FILTER_SINC4;
+ break;
+ case AD7124_FILTER_TYPE_SINC4_REJ60:
+ filter = AD7124_FILTER_FILTER_SINC4;
+ rej60 = 1;
+ break;
+ case AD7124_FILTER_TYPE_SINC4_SINC1:
+ filter = AD7124_FILTER_FILTER_SINC4_SINC1;
+ break;
+ default:
+ return -EINVAL;
+ }
/*
- * Pop least recently used config from the fifo
- * in order to make room for the new one
+ * NB: AD7124_FILTER_SINGLE_CYCLE is always set so that we get the same
+ * sampling frequency even when only one channel is enabled in a
+ * buffered read. If it was not set, the N in ad7124_set_channel_odr()
+ * would be 1 and we would get a faster sampling frequency than what
+ * was requested. It may only be disabled through debugfs for testing
+ * purposes.
*/
- ret = kfifo_get(&st->live_cfgs_fifo, &lru_cfg);
- if (ret <= 0)
- return NULL;
-
- lru_cfg->live = false;
+ return ad_sd_write_reg(&st->sd, AD7124_FILTER(cfg_slot), 3,
+ FIELD_PREP(AD7124_FILTER_FILTER, filter) |
+ FIELD_PREP(AD7124_FILTER_REJ60, rej60) |
+ FIELD_PREP(AD7124_FILTER_POST_FILTER, post) |
+ FIELD_PREP(AD7124_FILTER_SINGLE_CYCLE,
+ st->enable_single_cycle) |
+ FIELD_PREP(AD7124_FILTER_FS, cfg->odr_sel_bits));
+}
- /* mark slot as free */
- assign_bit(lru_cfg->cfg_slot, &st->cfg_slots_status, 0);
+/**
+ * ad7124_request_config_slot() - Request a config slot for a given config
+ * @st: Driver instance
+ * @channel: Channel to request a slot for
+ *
+ * Tries to find a matching config already in use, otherwise finds a free
+ * slot. If this function returns successfully, the use count for the slot is
+ * increased and the slot number is stored in cfg->cfg_slot.
+ *
+ * The slot must be released again with ad7124_release_config_slot() when no
+ * longer needed.
+ *
+ * Returns: 0 if a slot was successfully assigned, -EUSERS if no slot is
+ * available or other error if SPI communication fails.
+ */
+static int ad7124_request_config_slot(struct ad7124_state *st, u8 channel)
+{
+ unsigned int other, slot;
+ int last_used_slot = -1;
- /* invalidate all other configs that pointed to this one */
- for (i = 0; i < st->num_channels; i++) {
- cfg = &st->channels[i].cfg;
+ /* Find another channel with a matching config, if any. */
+ for (other = 0; other < st->num_channels; other++) {
+ if (other == channel)
+ continue;
- if (cfg->cfg_slot == lru_cfg->cfg_slot)
- cfg->live = false;
- }
+ if (st->channels[other].cfg.cfg_slot == AD7124_CFG_SLOT_UNASSIGNED)
+ continue;
- return lru_cfg;
-}
+ last_used_slot = max_t(int, last_used_slot,
+ st->channels[other].cfg.cfg_slot);
-static int ad7124_push_config(struct ad7124_state *st, struct ad7124_channel_config *cfg)
-{
- struct ad7124_channel_config *lru_cfg;
- int free_cfg_slot;
+ if (!ad7124_config_equal(&st->channels[other].cfg,
+ &st->channels[channel].cfg))
+ continue;
- free_cfg_slot = ad7124_find_free_config_slot(st);
- if (free_cfg_slot >= 0) {
- /* push the new config in configs queue */
- kfifo_put(&st->live_cfgs_fifo, cfg);
- } else {
- /* pop one config to make room for the new one */
- lru_cfg = ad7124_pop_config(st);
- if (!lru_cfg)
- return -EINVAL;
+ /* Found a match, re-use that slot. */
+ slot = st->channels[other].cfg.cfg_slot;
+ st->cfg_slot_use_count[slot]++;
+ st->channels[channel].cfg.cfg_slot = slot;
- /* push the new config in configs queue */
- free_cfg_slot = lru_cfg->cfg_slot;
- kfifo_put(&st->live_cfgs_fifo, cfg);
+ return 0;
}
- /* mark slot as used */
- assign_bit(free_cfg_slot, &st->cfg_slots_status, 1);
+ /* No match, use next free slot. */
+ slot = last_used_slot + 1;
+ if (slot >= AD7124_MAX_CONFIGS)
+ return -EUSERS;
- return ad7124_write_config(st, cfg, free_cfg_slot);
-}
+ st->cfg_slot_use_count[slot]++;
+ st->channels[channel].cfg.cfg_slot = slot;
-static int ad7124_enable_channel(struct ad7124_state *st, struct ad7124_channel *ch)
-{
- ch->cfg.live = true;
- return ad_sd_write_reg(&st->sd, AD7124_CHANNEL(ch->nr), 2, ch->ain |
- FIELD_PREP(AD7124_CHANNEL_SETUP, ch->cfg.cfg_slot) |
- AD7124_CHANNEL_ENABLE);
+ return ad7124_write_config(st, &st->channels[channel].cfg, slot);
}
-static int ad7124_prepare_read(struct ad7124_state *st, int address)
+static void ad7124_release_config_slot(struct ad7124_state *st, u8 channel)
{
- struct ad7124_channel_config *cfg = &st->channels[address].cfg;
- struct ad7124_channel_config *live_cfg;
+ unsigned int slot;
/*
- * Before doing any reads assign the channel a configuration.
- * Check if channel's config is on the device
+ * All of these early return conditions can happen at probe when all
+ * channels are disabled. Otherwise, they should not happen normally.
*/
- if (!cfg->live) {
- /* check if config matches another one */
- live_cfg = ad7124_find_similar_live_cfg(st, cfg);
- if (!live_cfg)
- ad7124_push_config(st, cfg);
- else
- cfg->cfg_slot = live_cfg->cfg_slot;
- }
+ if (channel >= st->num_channels)
+ return;
- /* point channel to the config slot and enable */
- return ad7124_enable_channel(st, &st->channels[address]);
+ slot = st->channels[channel].cfg.cfg_slot;
+
+ if (slot == AD7124_CFG_SLOT_UNASSIGNED ||
+ st->cfg_slot_use_count[slot] == 0)
+ return;
+
+ st->cfg_slot_use_count[slot]--;
+ st->channels[channel].cfg.cfg_slot = AD7124_CFG_SLOT_UNASSIGNED;
}
-static int __ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
+static int ad7124_prepare_read(struct ad7124_state *st, int address)
{
- struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+ struct ad7124_channel_config *cfg = &st->channels[address].cfg;
+ int ret;
- return ad7124_prepare_read(st, channel);
+ ret = ad7124_request_config_slot(st, address);
+ if (ret)
+ return ret;
+
+ /* point channel to the config slot and enable */
+ return ad_sd_write_reg(&st->sd, AD7124_CHANNEL(address), 2,
+ st->channels[address].ain |
+ FIELD_PREP(AD7124_CHANNEL_SETUP, cfg->cfg_slot) |
+ AD7124_CHANNEL_ENABLE);
}
static int ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
@@ -512,7 +627,7 @@ static int ad7124_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
int ret;
mutex_lock(&st->cfgs_lock);
- ret = __ad7124_set_channel(sd, channel);
+ ret = ad7124_prepare_read(st, channel);
mutex_unlock(&st->cfgs_lock);
return ret;
@@ -542,6 +657,8 @@ static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
{
struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+ ad7124_release_config_slot(st, chan);
+
/* The relevant thing here is that AD7124_CHANNEL_ENABLE is cleared. */
return ad_sd_write_reg(&st->sd, AD7124_CHANNEL(chan), 2, 0);
}
@@ -551,7 +668,7 @@ static int ad7124_disable_all(struct ad_sigma_delta *sd)
int ret;
int i;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < AD7124_MAX_CHANNELS; i++) {
ret = ad7124_disable_one(sd, i);
if (ret < 0)
return ret;
@@ -576,6 +693,33 @@ static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.num_resetclks = 64,
};
+static const int ad7124_voltage_scales[][2] = {
+ { 0, 1164 },
+ { 0, 2328 },
+ { 0, 4656 },
+ { 0, 9313 },
+ { 0, 18626 },
+ { 0, 37252 },
+ { 0, 74505 },
+ { 0, 149011 },
+ { 0, 298023 },
+};
+
+static int ad7124_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)ad7124_voltage_scales;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *length = ARRAY_SIZE(ad7124_voltage_scales) * 2;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ad7124_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long info)
@@ -644,18 +788,59 @@ static int ad7124_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
- case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->cfgs_lock);
- *val = st->channels[chan->address].cfg.odr;
- mutex_unlock(&st->cfgs_lock);
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ struct ad7124_channel_config *cfg = &st->channels[chan->address].cfg;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- mutex_lock(&st->cfgs_lock);
- *val = ad7124_get_3db_filter_freq(st, chan->scan_index);
- mutex_unlock(&st->cfgs_lock);
+ guard(mutex)(&st->cfgs_lock);
- return IIO_VAL_INT;
+ switch (cfg->filter_type) {
+ case AD7124_FILTER_TYPE_SINC3:
+ case AD7124_FILTER_TYPE_SINC3_REJ60:
+ case AD7124_FILTER_TYPE_SINC3_SINC1:
+ case AD7124_FILTER_TYPE_SINC4:
+ case AD7124_FILTER_TYPE_SINC4_REJ60:
+ case AD7124_FILTER_TYPE_SINC4_SINC1:
+ *val = ad7124_get_fclk_hz(st);
+ *val2 = ad7124_get_fadc_divisor(st, chan->address);
+ return IIO_VAL_FRACTIONAL;
+ /*
+ * Post filters force the chip to a fixed rate. These are the
+ * single-channel rates from the data sheet divided by 3 for
+ * the multi-channel case (data sheet doesn't explicitly state
+ * this but confirmed through testing).
+ */
+ case AD7124_FILTER_TYPE_SINC3_PF1:
+ *val = 300;
+ *val2 = 33;
+ return IIO_VAL_FRACTIONAL;
+ case AD7124_FILTER_TYPE_SINC3_PF2:
+ *val = 25;
+ *val2 = 3;
+ return IIO_VAL_FRACTIONAL;
+ case AD7124_FILTER_TYPE_SINC3_PF3:
+ *val = 20;
+ *val2 = 3;
+ return IIO_VAL_FRACTIONAL;
+ case AD7124_FILTER_TYPE_SINC3_PF4:
+ *val = 50;
+ *val2 = 9;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ }
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY: {
+ guard(mutex)(&st->cfgs_lock);
+
+ ret = ad7124_get_3db_filter_factor(st, chan->address);
+ if (ret < 0)
+ return ret;
+
+ /* 3dB point is the f_CLK rate times a fractional value */
+ *val = ret * ad7124_get_fclk_hz(st);
+ *val2 = MILLI * ad7124_get_fadc_divisor(st, chan->address);
+ return IIO_VAL_FRACTIONAL;
+ }
default:
return -EINVAL;
}
@@ -666,25 +851,24 @@ static int ad7124_write_raw(struct iio_dev *indio_dev,
int val, int val2, long info)
{
struct ad7124_state *st = iio_priv(indio_dev);
+ struct ad7124_channel_config *cfg = &st->channels[chan->address].cfg;
unsigned int res, gain, full_scale, vref;
- int ret = 0;
- mutex_lock(&st->cfgs_lock);
+ guard(mutex)(&st->cfgs_lock);
switch (info) {
case IIO_CHAN_INFO_SAMP_FREQ:
- if (val2 != 0 || val == 0) {
- ret = -EINVAL;
- break;
- }
+ if (val2 < 0 || val < 0 || (val2 == 0 && val == 0))
+ return -EINVAL;
- ad7124_set_channel_odr(st, chan->address, val);
- break;
+ cfg->requested_odr = val;
+ cfg->requested_odr_micro = val2;
+ ad7124_set_channel_odr(st, chan->address);
+
+ return 0;
case IIO_CHAN_INFO_SCALE:
- if (val != 0) {
- ret = -EINVAL;
- break;
- }
+ if (val != 0)
+ return -EINVAL;
if (st->channels[chan->address].cfg.bipolar)
full_scale = 1 << (chan->scan_type.realbits - 1);
@@ -696,17 +880,11 @@ static int ad7124_write_raw(struct iio_dev *indio_dev,
gain = DIV_ROUND_CLOSEST(res, val2);
res = ad7124_find_closest_match(ad7124_gain, ARRAY_SIZE(ad7124_gain), gain);
- if (st->channels[chan->address].cfg.pga_bits != res)
- st->channels[chan->address].cfg.live = false;
-
st->channels[chan->address].cfg.pga_bits = res;
- break;
+ return 0;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-
- mutex_unlock(&st->cfgs_lock);
- return ret;
}
static int ad7124_reg_access(struct iio_dev *indio_dev,
@@ -730,18 +908,6 @@ static int ad7124_reg_access(struct iio_dev *indio_dev,
return ret;
}
-static IIO_CONST_ATTR(in_voltage_scale_available,
- "0.000001164 0.000002328 0.000004656 0.000009313 0.000018626 0.000037252 0.000074505 0.000149011 0.000298023");
-
-static struct attribute *ad7124_attributes[] = {
- &iio_const_attr_in_voltage_scale_available.dev_attr.attr,
- NULL,
-};
-
-static const struct attribute_group ad7124_attrs_group = {
- .attrs = ad7124_attributes,
-};
-
static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
@@ -750,33 +916,29 @@ static int ad7124_update_scan_mode(struct iio_dev *indio_dev,
int ret;
int i;
- mutex_lock(&st->cfgs_lock);
+ guard(mutex)(&st->cfgs_lock);
+
for (i = 0; i < st->num_channels; i++) {
bit_set = test_bit(i, scan_mask);
if (bit_set)
- ret = __ad7124_set_channel(&st->sd, i);
+ ret = ad7124_prepare_read(st, i);
else
ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_ENABLE,
0, 2);
- if (ret < 0) {
- mutex_unlock(&st->cfgs_lock);
-
+ if (ret < 0)
return ret;
- }
}
- mutex_unlock(&st->cfgs_lock);
-
return 0;
}
static const struct iio_info ad7124_info = {
+ .read_avail = ad7124_read_avail,
.read_raw = ad7124_read_raw,
.write_raw = ad7124_write_raw,
.debugfs_reg_access = &ad7124_reg_access,
.validate_trigger = ad_sd_validate_trigger,
.update_scan_mode = ad7124_update_scan_mode,
- .attrs = &ad7124_attrs_group,
};
/* Only called during probe, so dev_err_probe() can be used */
@@ -860,7 +1022,11 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan
if (ret < 0)
return ret;
- ret = ad_sd_read_reg(&st->sd, AD7124_OFFSET(ch->cfg.cfg_slot), 3,
+ /*
+ * Making the assumption that a single conversion will always
+ * use configuration slot 0 for the OFFSET/GAIN registers.
+ */
+ ret = ad_sd_read_reg(&st->sd, AD7124_OFFSET(0), 3,
&ch->cfg.calibration_offset);
if (ret < 0)
return ret;
@@ -875,7 +1041,7 @@ static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan
if (ret < 0)
return ret;
- ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(ch->cfg.cfg_slot), 3,
+ ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(0), 3,
&ch->cfg.calibration_gain);
if (ret < 0)
return ret;
@@ -944,6 +1110,51 @@ static const struct iio_enum ad7124_syscalib_mode_enum = {
.get = ad7124_get_syscalib_mode
};
+static const char * const ad7124_filter_types[] = {
+ [AD7124_FILTER_TYPE_SINC3] = "sinc3",
+ [AD7124_FILTER_TYPE_SINC3_PF1] = "sinc3+pf1",
+ [AD7124_FILTER_TYPE_SINC3_PF2] = "sinc3+pf2",
+ [AD7124_FILTER_TYPE_SINC3_PF3] = "sinc3+pf3",
+ [AD7124_FILTER_TYPE_SINC3_PF4] = "sinc3+pf4",
+ [AD7124_FILTER_TYPE_SINC3_REJ60] = "sinc3+rej60",
+ [AD7124_FILTER_TYPE_SINC3_SINC1] = "sinc3+sinc1",
+ [AD7124_FILTER_TYPE_SINC4] = "sinc4",
+ [AD7124_FILTER_TYPE_SINC4_REJ60] = "sinc4+rej60",
+ [AD7124_FILTER_TYPE_SINC4_SINC1] = "sinc4+sinc1",
+};
+
+static int ad7124_set_filter_type_attr(struct iio_dev *dev,
+ const struct iio_chan_spec *chan,
+ unsigned int value)
+{
+ struct ad7124_state *st = iio_priv(dev);
+ struct ad7124_channel_config *cfg = &st->channels[chan->address].cfg;
+
+ guard(mutex)(&st->cfgs_lock);
+
+ cfg->filter_type = value;
+ ad7124_set_channel_odr(st, chan->address);
+
+ return 0;
+}
+
+static int ad7124_get_filter_type_attr(struct iio_dev *dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7124_state *st = iio_priv(dev);
+
+ guard(mutex)(&st->cfgs_lock);
+
+ return st->channels[chan->address].cfg.filter_type;
+}
+
+static const struct iio_enum ad7124_filter_type_enum = {
+ .items = ad7124_filter_types,
+ .num_items = ARRAY_SIZE(ad7124_filter_types),
+ .set = ad7124_set_filter_type_attr,
+ .get = ad7124_get_filter_type_attr,
+};
+
static const struct iio_chan_spec_ext_info ad7124_calibsys_ext_info[] = {
{
.name = "sys_calibration",
@@ -954,6 +1165,9 @@ static const struct iio_chan_spec_ext_info ad7124_calibsys_ext_info[] = {
&ad7124_syscalib_mode_enum),
IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
&ad7124_syscalib_mode_enum),
+ IIO_ENUM("filter_type", IIO_SEPARATE, &ad7124_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_TYPE,
+ &ad7124_filter_type_enum),
{ }
};
@@ -966,6 +1180,7 @@ static const struct iio_chan_spec ad7124_channel_template = {
BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
.scan_type = {
.sign = 'u',
.realbits = 24,
@@ -1049,7 +1264,6 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
return dev_err_probe(dev, -EINVAL,
"diff-channels property of %pfwP contains invalid data\n", child);
- st->channels[channel].nr = channel;
st->channels[channel].ain = FIELD_PREP(AD7124_CHANNEL_AINP, ain[0]) |
FIELD_PREP(AD7124_CHANNEL_AINM, ain[1]);
@@ -1076,7 +1290,6 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
if (num_channels < AD7124_MAX_CHANNELS) {
st->channels[num_channels] = (struct ad7124_channel) {
- .nr = num_channels,
.ain = FIELD_PREP(AD7124_CHANNEL_AINP, AD7124_CHANNEL_AINx_TEMPSENSOR) |
FIELD_PREP(AD7124_CHANNEL_AINM, AD7124_CHANNEL_AINx_AVSS),
.cfg = {
@@ -1102,6 +1315,7 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
},
.address = num_channels,
.scan_index = num_channels,
+ .ext_info = ad7124_calibsys_ext_info,
};
}
@@ -1111,44 +1325,151 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
static int ad7124_setup(struct ad7124_state *st)
{
struct device *dev = &st->sd.spi->dev;
- unsigned int fclk, power_mode;
+ unsigned int power_mode, clk_sel;
+ struct clk *mclk;
int i, ret;
- fclk = clk_get_rate(st->mclk);
- if (!fclk)
- return dev_err_probe(dev, -EINVAL, "Failed to get mclk rate\n");
+ /*
+ * Always use full power mode for max performance. If needed, the driver
+ * could be adapted to use a dynamic power mode based on the requested
+ * output data rate.
+ */
+ power_mode = AD7124_ADC_CONTROL_POWER_MODE_FULL;
+
+ /*
+ * This "mclk" business is needed for backwards compatibility with old
+ * devicetrees that specified a fake clock named "mclk" to select the
+ * power mode.
+ */
+ mclk = devm_clk_get_optional_enabled(dev, "mclk");
+ if (IS_ERR(mclk))
+ return dev_err_probe(dev, PTR_ERR(mclk), "Failed to get mclk\n");
+
+ if (mclk) {
+ unsigned long mclk_hz;
+
+ mclk_hz = clk_get_rate(mclk);
+ if (!mclk_hz)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get mclk rate\n");
- /* The power mode changes the master clock frequency */
- power_mode = ad7124_find_closest_match(ad7124_master_clk_freq_hz,
- ARRAY_SIZE(ad7124_master_clk_freq_hz),
- fclk);
- if (fclk != ad7124_master_clk_freq_hz[power_mode]) {
- ret = clk_set_rate(st->mclk, fclk);
+ /*
+ * This logic is a bit backwards, which is why it is only here
+ * for backwards compatibility. The driver should be able to set
+ * the power mode as it sees fit and the f_clk/mclk rate should
+ * be dynamic accordingly. But here, we are selecting a fixed
+ * power mode based on the given "mclk" rate.
+ */
+ power_mode = ad7124_find_closest_match(ad7124_master_clk_freq_hz,
+ ARRAY_SIZE(ad7124_master_clk_freq_hz), mclk_hz);
+
+ if (mclk_hz != ad7124_master_clk_freq_hz[power_mode]) {
+ ret = clk_set_rate(mclk, mclk_hz);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set mclk rate\n");
+ }
+
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_INT;
+ st->clk_hz = AD7124_INT_CLK_HZ;
+ } else if (!device_property_present(dev, "clocks") &&
+ device_property_present(dev, "#clock-cells")) {
+#ifdef CONFIG_COMMON_CLK
+ struct clk_hw *clk_hw;
+
+ const char *name __free(kfree) = kasprintf(GFP_KERNEL, "%pfwP-clk",
+ dev_fwnode(dev));
+ if (!name)
+ return -ENOMEM;
+
+ clk_hw = devm_clk_hw_register_fixed_rate(dev, name, NULL, 0,
+ AD7124_INT_CLK_HZ);
+ if (IS_ERR(clk_hw))
+ return dev_err_probe(dev, PTR_ERR(clk_hw),
+ "Failed to register clock provider\n");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get,
+ clk_hw);
if (ret)
- return dev_err_probe(dev, ret, "Failed to set mclk rate\n");
+ return dev_err_probe(dev, ret,
+ "Failed to add clock provider\n");
+#endif
+
+ /*
+ * Treat the clock as always on. This way we don't have to deal
+ * with someone trying to enable/disable the clock while we are
+ * reading samples.
+ */
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_INT_OUT;
+ st->clk_hz = AD7124_INT_CLK_HZ;
+ } else {
+ struct clk *clk;
+
+ clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "Failed to get external clock\n");
+
+ if (clk) {
+ unsigned long clk_hz;
+
+ clk_hz = clk_get_rate(clk);
+ if (!clk_hz)
+ return dev_err_probe(dev, -EINVAL,
+ "Failed to get external clock rate\n");
+
+ /*
+ * The external clock may be 4x the nominal clock rate,
+ * in which case the ADC needs to be configured to
+ * divide it by 4. Using MEGA is a bit arbitrary, but
+ * the expected clock rates are either 614.4 kHz or
+ * 2.4576 MHz, so this should work.
+ */
+ if (clk_hz > (1 * HZ_PER_MHZ)) {
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_EXT_DIV4;
+ st->clk_hz = clk_hz / 4;
+ } else {
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_EXT;
+ st->clk_hz = clk_hz;
+ }
+ } else {
+ clk_sel = AD7124_ADC_CONTROL_CLK_SEL_INT;
+ st->clk_hz = AD7124_INT_CLK_HZ;
+ }
}
- /* Set the power mode */
+ st->adc_control &= ~AD7124_ADC_CONTROL_CLK_SEL;
+ st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_CLK_SEL, clk_sel);
+
st->adc_control &= ~AD7124_ADC_CONTROL_POWER_MODE;
st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_POWER_MODE, power_mode);
st->adc_control &= ~AD7124_ADC_CONTROL_MODE;
st->adc_control |= FIELD_PREP(AD7124_ADC_CONTROL_MODE, AD_SD_MODE_IDLE);
- mutex_init(&st->cfgs_lock);
- INIT_KFIFO(st->live_cfgs_fifo);
+ ret = devm_mutex_init(dev, &st->cfgs_lock);
+ if (ret)
+ return ret;
+
for (i = 0; i < st->num_channels; i++) {
+ struct ad7124_channel_config *cfg = &st->channels[i].cfg;
- ret = ad7124_init_config_vref(st, &st->channels[i].cfg);
+ ret = ad7124_init_config_vref(st, cfg);
if (ret < 0)
return ret;
+ cfg->cfg_slot = AD7124_CFG_SLOT_UNASSIGNED;
+
+ /* Default filter type on the ADC after reset. */
+ cfg->filter_type = AD7124_FILTER_TYPE_SINC4;
+
/*
* 9.38 SPS is the minimum output data rate supported
* regardless of the selected power mode. Round it up to 10 and
* set all channels to this default value.
*/
- ad7124_set_channel_odr(st, i, 10);
+ cfg->requested_odr = 10;
+ ad7124_set_channel_odr(st, i);
}
ad7124_disable_all(&st->sd);
@@ -1166,10 +1487,6 @@ static int __ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio
int ret, i;
for (i = 0; i < st->num_channels; i++) {
-
- if (indio_dev->channels[i].type != IIO_VOLTAGE)
- continue;
-
/*
* For calibration the OFFSET register should hold its reset default
* value. For the GAIN register there is no such requirement but
@@ -1180,6 +1497,14 @@ static int __ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio
st->channels[i].cfg.calibration_gain = st->gain_default;
/*
+ * Only the main voltage input channels are important enough
+ * to be automatically calibrated here. For everything else,
+ * just use the default values set above.
+ */
+ if (indio_dev->channels[i].type != IIO_VOLTAGE)
+ continue;
+
+ /*
* Full-scale calibration isn't supported at gain 1, so skip in
* that case. Note that untypically full-scale calibration has
* to happen before zero-scale calibration. This only applies to
@@ -1196,9 +1521,9 @@ static int __ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio
* after full-scale calibration because the next
* ad_sd_calibrate() call overwrites this via
* ad_sigma_delta_set_channel() -> ad7124_set_channel()
- * ... -> ad7124_enable_channel().
+ * -> ad7124_prepare_read().
*/
- ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(st->channels[i].cfg.cfg_slot), 3,
+ ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(0), 3,
&st->channels[i].cfg.calibration_gain);
if (ret < 0)
return ret;
@@ -1208,7 +1533,11 @@ static int __ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio
if (ret < 0)
return ret;
- ret = ad_sd_read_reg(&st->sd, AD7124_OFFSET(st->channels[i].cfg.cfg_slot), 3,
+ /*
+ * Making the assumption that a single conversion will always
+ * use configuration slot 0 for the OFFSET/GAIN registers.
+ */
+ ret = ad_sd_read_reg(&st->sd, AD7124_OFFSET(0), 3,
&st->channels[i].cfg.calibration_offset);
if (ret < 0)
return ret;
@@ -1250,6 +1579,18 @@ static void ad7124_reg_disable(void *r)
regulator_disable(r);
}
+static void ad7124_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct dentry *dentry = iio_get_debugfs_dentry(indio_dev);
+ struct ad7124_state *st = iio_priv(indio_dev);
+
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return;
+
+ debugfs_create_bool("enable_single_cycle", 0644, dentry,
+ &st->enable_single_cycle);
+}
+
static int ad7124_probe(struct spi_device *spi)
{
const struct ad7124_chip_info *info;
@@ -1270,6 +1611,9 @@ static int ad7124_probe(struct spi_device *spi)
st->chip_info = info;
+ /* Only disabled for debug/testing purposes. */
+ st->enable_single_cycle = true;
+
indio_dev->name = st->chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7124_info;
@@ -1300,13 +1644,9 @@ static int ad7124_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(&spi->dev, ad7124_reg_disable,
st->vref[i]);
if (ret)
- return dev_err_probe(dev, ret, "Failed to register disable handler for regulator #%d\n", i);
+ return ret;
}
- st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
- if (IS_ERR(st->mclk))
- return dev_err_probe(dev, PTR_ERR(st->mclk), "Failed to get mclk\n");
-
ret = ad7124_soft_reset(st);
if (ret < 0)
return ret;
@@ -1331,6 +1671,8 @@ static int ad7124_probe(struct spi_device *spi)
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to register iio device\n");
+ ad7124_debugfs_init(indio_dev);
+
return 0;
}
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 683146e83ab2..d36612352b44 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -8,6 +8,7 @@
* AD7175-8/AD7176-2/AD7177-2
*
* Copyright (C) 2015, 2024 Analog Devices, Inc.
+ * Copyright (C) 2025 BayLibre, SAS
*/
#include <linux/array_size.h>
@@ -149,7 +150,12 @@
(pin2) < st->info->num_voltage_in && \
(pin2) >= st->info->num_voltage_in_div)
-#define AD7173_FILTER_ODR0_MASK GENMASK(5, 0)
+#define AD7173_FILTER_SINC3_MAP BIT(15)
+#define AD7173_FILTER_SINC3_MAP_DIV GENMASK(14, 0)
+#define AD7173_FILTER_ENHFILTEN BIT(11)
+#define AD7173_FILTER_ENHFILT_MASK GENMASK(10, 8)
+#define AD7173_FILTER_ORDER BIT(6)
+#define AD7173_FILTER_ODR_MASK GENMASK(5, 0)
#define AD7173_MAX_CONFIGS 8
#define AD4111_OW_DET_THRSH_MV 300
@@ -190,6 +196,15 @@ struct ad7173_device_info {
u8 num_gpios;
};
+enum ad7173_filter_type {
+ AD7173_FILTER_SINC3,
+ AD7173_FILTER_SINC5_SINC1,
+ AD7173_FILTER_SINC5_SINC1_PF1,
+ AD7173_FILTER_SINC5_SINC1_PF2,
+ AD7173_FILTER_SINC5_SINC1_PF3,
+ AD7173_FILTER_SINC5_SINC1_PF4,
+};
+
struct ad7173_channel_config {
/* Openwire detection threshold */
unsigned int openwire_thrsh_raw;
@@ -205,8 +220,10 @@ struct ad7173_channel_config {
struct_group(config_props,
bool bipolar;
bool input_buf;
- u8 odr;
+ u16 sinc3_odr_div;
+ u8 sinc5_odr_index;
u8 ref_sel;
+ enum ad7173_filter_type filter_type;
);
};
@@ -266,6 +283,24 @@ static const unsigned int ad7175_sinc5_data_rates[] = {
5000, /* 20 */
};
+/**
+ * ad7173_sinc3_odr_div_from_odr() - Convert ODR to divider value
+ * @odr_millihz: ODR (sampling_frequency) in milliHz
+ * Returns: Divider value for SINC3 filter to pass.
+ */
+static u16 ad7173_sinc3_odr_div_from_odr(u32 odr_millihz)
+{
+ /*
+ * Divider is f_MOD (1 MHz) / 32 / ODR. ODR freq is in milliHz, so
+ * we need to convert f_MOD to the same units. When SING_CYC=1 or
+ * multiple channels are enabled (currently always the case), there
+ * is an additional factor of 3.
+ */
+ u32 div = DIV_ROUND_CLOSEST(MEGA * MILLI, odr_millihz * 32 * 3);
+ /* Avoid divide by 0 and limit to register field size. */
+ return clamp(div, 1U, AD7173_FILTER_SINC3_MAP_DIV);
+}
+
static unsigned int ad4111_current_channel_config[] = {
/* Ain sel: pos neg */
0x1E8, /* 15:IIN0+ 8:IIN0− */
@@ -369,7 +404,48 @@ static const struct iio_enum ad7173_syscalib_mode_enum = {
.get = ad7173_get_syscalib_mode
};
-static const struct iio_chan_spec_ext_info ad7173_calibsys_ext_info[] = {
+static const char * const ad7173_filter_types_str[] = {
+ [AD7173_FILTER_SINC3] = "sinc3",
+ [AD7173_FILTER_SINC5_SINC1] = "sinc5+sinc1",
+ [AD7173_FILTER_SINC5_SINC1_PF1] = "sinc5+sinc1+pf1",
+ [AD7173_FILTER_SINC5_SINC1_PF2] = "sinc5+sinc1+pf2",
+ [AD7173_FILTER_SINC5_SINC1_PF3] = "sinc5+sinc1+pf3",
+ [AD7173_FILTER_SINC5_SINC1_PF4] = "sinc5+sinc1+pf4",
+};
+
+static int ad7173_set_filter_type(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int val)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ st->channels[chan->address].cfg.filter_type = val;
+ st->channels[chan->address].cfg.live = false;
+
+ iio_device_release_direct(indio_dev);
+
+ return 0;
+}
+
+static int ad7173_get_filter_type(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+
+ return st->channels[chan->address].cfg.filter_type;
+}
+
+static const struct iio_enum ad7173_filter_type_enum = {
+ .items = ad7173_filter_types_str,
+ .num_items = ARRAY_SIZE(ad7173_filter_types_str),
+ .set = ad7173_set_filter_type,
+ .get = ad7173_get_filter_type,
+};
+
+static const struct iio_chan_spec_ext_info ad7173_chan_spec_ext_info[] = {
{
.name = "sys_calibration",
.write = ad7173_write_syscalib,
@@ -379,6 +455,16 @@ static const struct iio_chan_spec_ext_info ad7173_calibsys_ext_info[] = {
&ad7173_syscalib_mode_enum),
IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
&ad7173_syscalib_mode_enum),
+ IIO_ENUM("filter_type", IIO_SEPARATE, &ad7173_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_TYPE,
+ &ad7173_filter_type_enum),
+ { }
+};
+
+static const struct iio_chan_spec_ext_info ad7173_temp_chan_spec_ext_info[] = {
+ IIO_ENUM("filter_type", IIO_SEPARATE, &ad7173_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_TYPE,
+ &ad7173_filter_type_enum),
{ }
};
@@ -582,14 +668,18 @@ static bool ad7173_is_setup_equal(const struct ad7173_channel_config *cfg1,
sizeof(struct {
bool bipolar;
bool input_buf;
- u8 odr;
+ u16 sinc3_odr_div;
+ u8 sinc5_odr_index;
u8 ref_sel;
+ enum ad7173_filter_type filter_type;
}));
return cfg1->bipolar == cfg2->bipolar &&
cfg1->input_buf == cfg2->input_buf &&
- cfg1->odr == cfg2->odr &&
- cfg1->ref_sel == cfg2->ref_sel;
+ cfg1->sinc3_odr_div == cfg2->sinc3_odr_div &&
+ cfg1->sinc5_odr_index == cfg2->sinc5_odr_index &&
+ cfg1->ref_sel == cfg2->ref_sel &&
+ cfg1->filter_type == cfg2->filter_type;
}
static struct ad7173_channel_config *
@@ -630,6 +720,7 @@ static int ad7173_load_config(struct ad7173_state *st,
{
unsigned int config;
int free_cfg_slot, ret;
+ u8 post_filter_enable, post_filter_select;
free_cfg_slot = ida_alloc_range(&st->cfg_slots_status, 0,
st->info->num_configs - 1, GFP_KERNEL);
@@ -649,8 +740,49 @@ static int ad7173_load_config(struct ad7173_state *st,
if (ret)
return ret;
+ /*
+ * When SINC3_MAP flag is enabled, the rest of the register has a
+ * different meaning. We are using this option to allow the most
+ * possible sampling frequencies with SINC3 filter.
+ */
+ if (cfg->filter_type == AD7173_FILTER_SINC3)
+ return ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(free_cfg_slot), 2,
+ FIELD_PREP(AD7173_FILTER_SINC3_MAP, 1) |
+ FIELD_PREP(AD7173_FILTER_SINC3_MAP_DIV,
+ cfg->sinc3_odr_div));
+
+ switch (cfg->filter_type) {
+ case AD7173_FILTER_SINC5_SINC1_PF1:
+ post_filter_enable = 1;
+ post_filter_select = 2;
+ break;
+ case AD7173_FILTER_SINC5_SINC1_PF2:
+ post_filter_enable = 1;
+ post_filter_select = 3;
+ break;
+ case AD7173_FILTER_SINC5_SINC1_PF3:
+ post_filter_enable = 1;
+ post_filter_select = 5;
+ break;
+ case AD7173_FILTER_SINC5_SINC1_PF4:
+ post_filter_enable = 1;
+ post_filter_select = 6;
+ break;
+ default:
+ post_filter_enable = 0;
+ post_filter_select = 0;
+ break;
+ }
+
return ad_sd_write_reg(&st->sd, AD7173_REG_FILTER(free_cfg_slot), 2,
- AD7173_FILTER_ODR0_MASK & cfg->odr);
+ FIELD_PREP(AD7173_FILTER_SINC3_MAP, 0) |
+ FIELD_PREP(AD7173_FILTER_ENHFILT_MASK,
+ post_filter_enable) |
+ FIELD_PREP(AD7173_FILTER_ENHFILTEN,
+ post_filter_select) |
+ FIELD_PREP(AD7173_FILTER_ORDER, 0) |
+ FIELD_PREP(AD7173_FILTER_ODR_MASK,
+ cfg->sinc5_odr_index));
}
static int ad7173_config_channel(struct ad7173_state *st, int addr)
@@ -761,6 +893,7 @@ static const struct ad_sigma_delta_info ad7173_sigma_delta_info_4_slots = {
.set_mode = ad7173_set_mode,
.has_registers = true,
.has_named_irqs = true,
+ .supports_spi_offload = true,
.addr_shift = 0,
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
@@ -777,6 +910,7 @@ static const struct ad_sigma_delta_info ad7173_sigma_delta_info_8_slots = {
.set_mode = ad7173_set_mode,
.has_registers = true,
.has_named_irqs = true,
+ .supports_spi_offload = true,
.addr_shift = 0,
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
@@ -793,6 +927,7 @@ static const struct ad_sigma_delta_info ad7173_sigma_delta_info_16_slots = {
.set_mode = ad7173_set_mode,
.has_registers = true,
.has_named_irqs = true,
+ .supports_spi_offload = true,
.addr_shift = 0,
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
@@ -1180,7 +1315,14 @@ static int ad7173_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_SAMP_FREQ:
- reg = st->channels[chan->address].cfg.odr;
+ if (st->channels[chan->address].cfg.filter_type == AD7173_FILTER_SINC3) {
+ /* Inverse operation of ad7173_sinc3_odr_div_from_odr() */
+ *val = MEGA;
+ *val2 = 3 * 32 * st->channels[chan->address].cfg.sinc3_odr_div;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ reg = st->channels[chan->address].cfg.sinc5_odr_index;
*val = st->info->sinc5_data_rates[reg] / MILLI;
*val2 = (st->info->sinc5_data_rates[reg] % MILLI) * (MICRO / MILLI);
@@ -1218,6 +1360,10 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
*
* This will cause the reading of CH1 to be actually done once every
* 200.16ms, an effective rate of 4.99sps.
+ *
+ * Both the sinc5 and sinc3 rates are set here so that if the filter
+ * type is changed, the requested rate will still be set (aside from
+ * rounding differences).
*/
case IIO_CHAN_INFO_SAMP_FREQ:
freq = val * MILLI + val2 / MILLI;
@@ -1226,7 +1372,8 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
break;
cfg = &st->channels[chan->address].cfg;
- cfg->odr = i;
+ cfg->sinc5_odr_index = i;
+ cfg->sinc3_odr_div = ad7173_sinc3_odr_div_from_odr(freq);
cfg->live = false;
break;
@@ -1243,17 +1390,40 @@ static int ad7173_update_scan_mode(struct iio_dev *indio_dev,
const unsigned long *scan_mask)
{
struct ad7173_state *st = iio_priv(indio_dev);
+ u16 sinc3_count = 0;
+ u16 sinc3_div = 0;
int i, j, k, ret;
for (i = 0; i < indio_dev->num_channels; i++) {
- if (test_bit(i, scan_mask))
+ const struct ad7173_channel_config *cfg = &st->channels[i].cfg;
+
+ if (test_bit(i, scan_mask)) {
+ if (cfg->filter_type == AD7173_FILTER_SINC3) {
+ sinc3_count++;
+
+ if (sinc3_div == 0) {
+ sinc3_div = cfg->sinc3_odr_div;
+ } else if (sinc3_div != cfg->sinc3_odr_div) {
+ dev_err(&st->sd.spi->dev,
+ "All enabled channels must have the same sampling_frequency for sinc3 filter_type\n");
+ return -EINVAL;
+ }
+ }
+
ret = ad7173_set_channel(&st->sd, i);
- else
+ } else {
ret = ad_sd_write_reg(&st->sd, AD7173_REG_CH(i), 2, 0);
+ }
if (ret < 0)
return ret;
}
+ if (sinc3_count && sinc3_count < bitmap_weight(scan_mask, indio_dev->num_channels)) {
+ dev_err(&st->sd.spi->dev,
+ "All enabled channels must have sinc3 filter_type\n");
+ return -EINVAL;
+ }
+
/*
* On some chips, there are more channels that setups, so if there were
* more unique setups requested than the number of available slots,
@@ -1396,7 +1566,7 @@ static const struct iio_chan_spec ad7173_channel_template = {
.storagebits = 32,
.endianness = IIO_BE,
},
- .ext_info = ad7173_calibsys_ext_info,
+ .ext_info = ad7173_chan_spec_ext_info,
};
static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
@@ -1412,6 +1582,7 @@ static const struct iio_chan_spec ad7173_temp_iio_channel_template = {
.storagebits = 32,
.endianness = IIO_BE,
},
+ .ext_info = ad7173_temp_chan_spec_ext_info,
};
static void ad7173_disable_regulators(void *data)
@@ -1652,12 +1823,21 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan_st_priv->cfg.bipolar = false;
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
- chan_st_priv->cfg.odr = st->info->odr_start_value;
+ chan_st_priv->cfg.sinc3_odr_div = ad7173_sinc3_odr_div_from_odr(
+ st->info->sinc5_data_rates[st->info->odr_start_value]
+ );
+ chan_st_priv->cfg.sinc5_odr_index = st->info->odr_start_value;
+ chan_st_priv->cfg.filter_type = AD7173_FILTER_SINC5_SINC1;
chan_st_priv->cfg.openwire_comp_chan = -1;
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
if (st->info->data_reg_only_16bit)
chan_arr[chan_index].scan_type = ad4113_scan_type;
+ if (ad_sigma_delta_has_spi_offload(&st->sd)) {
+ chan_arr[chan_index].scan_type.storagebits = 32;
+ chan_arr[chan_index].scan_type.endianness = IIO_CPU;
+ }
+
chan_index++;
}
@@ -1719,7 +1899,11 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan->scan_index = chan_index;
chan->channel = ain[0];
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
- chan_st_priv->cfg.odr = st->info->odr_start_value;
+ chan_st_priv->cfg.sinc3_odr_div = ad7173_sinc3_odr_div_from_odr(
+ st->info->sinc5_data_rates[st->info->odr_start_value]
+ );
+ chan_st_priv->cfg.sinc5_odr_index = st->info->odr_start_value;
+ chan_st_priv->cfg.filter_type = AD7173_FILTER_SINC5_SINC1;
chan_st_priv->cfg.openwire_comp_chan = -1;
chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
@@ -1748,6 +1932,12 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
if (st->info->data_reg_only_16bit)
chan_arr[chan_index].scan_type = ad4113_scan_type;
+ /* Assuming SPI offload is ad411x_ad717x HDL project. */
+ if (ad_sigma_delta_has_spi_offload(&st->sd)) {
+ chan_arr[chan_index].scan_type.storagebits = 32;
+ chan_arr[chan_index].scan_type.endianness = IIO_CPU;
+ }
+
chan_index++;
}
return 0;
@@ -1780,8 +1970,7 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
ret = devm_add_action_or_reset(dev, ad7173_disable_regulators, st);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to add regulators disable action\n");
+ return ret;
ret = device_property_match_property_string(dev, "clock-names",
ad7173_clk_sel,
diff --git a/drivers/iio/adc/ad7280a.c b/drivers/iio/adc/ad7280a.c
index dda2986ccda0..50a6ff7c8b1c 100644
--- a/drivers/iio/adc/ad7280a.c
+++ b/drivers/iio/adc/ad7280a.c
@@ -541,7 +541,7 @@ static ssize_t ad7280_store_balance_timer(struct iio_dev *indio_dev,
int val, val2;
int ret;
- ret = iio_str_to_fixpoint(buf, 1000, &val, &val2);
+ ret = iio_str_to_fixpoint(buf, 100, &val, &val2);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index fa251dc1aae6..bfd908deefc0 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -1227,6 +1227,14 @@ static int ad7380_offload_buffer_postenable(struct iio_dev *indio_dev)
if (ret)
return ret;
+ /*
+ * When the sequencer is required to read all channels, we need to
+ * trigger twice per sample period in order to read one complete set
+ * of samples.
+ */
+ if (st->seq)
+ config.periodic.frequency_hz *= 2;
+
ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
if (ret)
spi_unoptimize_message(&st->offload_msg);
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index aea734aa06bd..1bec6657394c 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -6,6 +6,7 @@
* Copyright 2010 Analog Devices Inc.
*/
+#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -27,22 +28,24 @@
struct ad7476_state;
struct ad7476_chip_info {
- unsigned int int_vref_uv;
+ unsigned int int_vref_mv;
struct iio_chan_spec channel[2];
- /* channels used when convst gpio is defined */
- struct iio_chan_spec convst_channel[2];
void (*reset)(struct ad7476_state *);
+ void (*conversion_pre_op)(struct ad7476_state *st);
+ void (*conversion_post_op)(struct ad7476_state *st);
bool has_vref;
bool has_vdrive;
+ bool convstart_required;
};
struct ad7476_state {
struct spi_device *spi;
const struct ad7476_chip_info *chip_info;
- struct regulator *ref_reg;
struct gpio_desc *convst_gpio;
struct spi_transfer xfer;
struct spi_message msg;
+ struct iio_chan_spec channel[2];
+ int scale_mv;
/*
* DMA (thus cache coherency maintenance) may require the
* transfer buffers to live in their own cache lines.
@@ -52,40 +55,29 @@ struct ad7476_state {
unsigned char data[ALIGN(2, sizeof(s64)) + sizeof(s64)] __aligned(IIO_DMA_MINALIGN);
};
-enum ad7476_supported_device_ids {
- ID_AD7091,
- ID_AD7091R,
- ID_AD7273,
- ID_AD7274,
- ID_AD7276,
- ID_AD7277,
- ID_AD7278,
- ID_AD7466,
- ID_AD7467,
- ID_AD7468,
- ID_AD7475,
- ID_AD7495,
- ID_AD7940,
- ID_ADC081S,
- ID_ADC101S,
- ID_ADC121S,
- ID_ADS7866,
- ID_ADS7867,
- ID_ADS7868,
- ID_LTC2314_14,
-};
-
static void ad7091_convst(struct ad7476_state *st)
{
if (!st->convst_gpio)
return;
- gpiod_set_value(st->convst_gpio, 0);
+ gpiod_set_value_cansleep(st->convst_gpio, 0);
udelay(1); /* CONVST pulse width: 10 ns min */
- gpiod_set_value(st->convst_gpio, 1);
+ gpiod_set_value_cansleep(st->convst_gpio, 1);
udelay(1); /* Conversion time: 650 ns max */
}
+static void bd79105_convst_disable(struct ad7476_state *st)
+{
+ gpiod_set_value_cansleep(st->convst_gpio, 0);
+}
+
+static void bd79105_convst_enable(struct ad7476_state *st)
+{
+ gpiod_set_value_cansleep(st->convst_gpio, 1);
+ /* Worst case, 2790 ns required for conversion */
+ ndelay(2790);
+}
+
static irqreturn_t ad7476_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -93,7 +85,8 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
struct ad7476_state *st = iio_priv(indio_dev);
int b_sent;
- ad7091_convst(st);
+ if (st->chip_info->conversion_pre_op)
+ st->chip_info->conversion_pre_op(st);
b_sent = spi_sync(st->spi, &st->msg);
if (b_sent < 0)
@@ -102,6 +95,8 @@ static irqreturn_t ad7476_trigger_handler(int irq, void *p)
iio_push_to_buffers_with_ts(indio_dev, st->data, sizeof(st->data),
iio_get_time_ns(indio_dev));
done:
+ if (st->chip_info->conversion_post_op)
+ st->chip_info->conversion_post_op(st);
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
@@ -117,12 +112,16 @@ static int ad7476_scan_direct(struct ad7476_state *st)
{
int ret;
- ad7091_convst(st);
+ if (st->chip_info->conversion_pre_op)
+ st->chip_info->conversion_pre_op(st);
ret = spi_sync(st->spi, &st->msg);
if (ret)
return ret;
+ if (st->chip_info->conversion_post_op)
+ st->chip_info->conversion_post_op(st);
+
return be16_to_cpup((__be16 *)st->data);
}
@@ -134,7 +133,6 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
{
int ret;
struct ad7476_state *st = iio_priv(indio_dev);
- int scale_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
@@ -145,18 +143,11 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- *val = (ret >> st->chip_info->channel[0].scan_type.shift) &
- GENMASK(st->chip_info->channel[0].scan_type.realbits - 1, 0);
+ *val = (ret >> chan->scan_type.shift) &
+ GENMASK(chan->scan_type.realbits - 1, 0);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- if (st->ref_reg) {
- scale_uv = regulator_get_voltage(st->ref_reg);
- if (scale_uv < 0)
- return scale_uv;
- } else {
- scale_uv = st->chip_info->int_vref_uv;
- }
- *val = scale_uv / 1000;
+ *val = st->scale_mv;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
}
@@ -185,125 +176,147 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
#define AD7940_CHAN(bits) _AD7476_CHAN((bits), 15 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
#define AD7091R_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), 0)
-#define AD7091R_CONVST_CHAN(bits) _AD7476_CHAN((bits), 16 - (bits), \
- BIT(IIO_CHAN_INFO_RAW))
#define ADS786X_CHAN(bits) _AD7476_CHAN((bits), 12 - (bits), \
BIT(IIO_CHAN_INFO_RAW))
-static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
- [ID_AD7091] = {
- .channel[0] = AD7091R_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .convst_channel[0] = AD7091R_CONVST_CHAN(12),
- .convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .reset = ad7091_reset,
- },
- [ID_AD7091R] = {
- .channel[0] = AD7091R_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .convst_channel[0] = AD7091R_CONVST_CHAN(12),
- .convst_channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .int_vref_uv = 2500000,
- .has_vref = true,
- .reset = ad7091_reset,
- },
- [ID_AD7273] = {
- .channel[0] = AD7940_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .has_vref = true,
- },
- [ID_AD7274] = {
- .channel[0] = AD7940_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .has_vref = true,
- },
- [ID_AD7276] = {
- .channel[0] = AD7940_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7277] = {
- .channel[0] = AD7940_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7278] = {
- .channel[0] = AD7940_CHAN(8),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7466] = {
- .channel[0] = AD7476_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7467] = {
- .channel[0] = AD7476_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7468] = {
- .channel[0] = AD7476_CHAN(8),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_AD7475] = {
- .channel[0] = AD7476_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .has_vref = true,
- .has_vdrive = true,
- },
- [ID_AD7495] = {
- .channel[0] = AD7476_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .int_vref_uv = 2500000,
- .has_vdrive = true,
- },
- [ID_AD7940] = {
- .channel[0] = AD7940_CHAN(14),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADC081S] = {
- .channel[0] = ADC081S_CHAN(8),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADC101S] = {
- .channel[0] = ADC081S_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADC121S] = {
- .channel[0] = ADC081S_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADS7866] = {
- .channel[0] = ADS786X_CHAN(12),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADS7867] = {
- .channel[0] = ADS786X_CHAN(10),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_ADS7868] = {
- .channel[0] = ADS786X_CHAN(8),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- },
- [ID_LTC2314_14] = {
- .channel[0] = AD7940_CHAN(14),
- .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
- .has_vref = true,
- },
+static const struct ad7476_chip_info ad7091_chip_info = {
+ .channel[0] = AD7091R_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .conversion_pre_op = ad7091_convst,
+ .reset = ad7091_reset,
};
-static const struct iio_info ad7476_info = {
- .read_raw = &ad7476_read_raw,
+static const struct ad7476_chip_info ad7091r_chip_info = {
+ .channel[0] = AD7091R_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .conversion_pre_op = ad7091_convst,
+ .int_vref_mv = 2500,
+ .has_vref = true,
+ .reset = ad7091_reset,
};
-static void ad7476_reg_disable(void *data)
-{
- struct regulator *reg = data;
+static const struct ad7476_chip_info ad7273_chip_info = {
+ .channel[0] = AD7940_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+};
- regulator_disable(reg);
-}
+static const struct ad7476_chip_info ad7274_chip_info = {
+ .channel[0] = AD7940_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+};
+
+static const struct ad7476_chip_info ad7276_chip_info = {
+ .channel[0] = AD7940_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7277_chip_info = {
+ .channel[0] = AD7940_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7278_chip_info = {
+ .channel[0] = AD7940_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7466_chip_info = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7467_chip_info = {
+ .channel[0] = AD7476_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7468_chip_info = {
+ .channel[0] = AD7476_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ad7475_chip_info = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+ .has_vdrive = true,
+};
+
+static const struct ad7476_chip_info ad7495_chip_info = {
+ .channel[0] = AD7476_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .int_vref_mv = 2500,
+ .has_vdrive = true,
+};
+
+static const struct ad7476_chip_info ad7940_chip_info = {
+ .channel[0] = AD7940_CHAN(14),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info adc081s_chip_info = {
+ .channel[0] = ADC081S_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info adc101s_chip_info = {
+ .channel[0] = ADC081S_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info adc121s_chip_info = {
+ .channel[0] = ADC081S_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ads7866_chip_info = {
+ .channel[0] = ADS786X_CHAN(12),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ads7867_chip_info = {
+ .channel[0] = ADS786X_CHAN(10),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ads7868_chip_info = {
+ .channel[0] = ADS786X_CHAN(8),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static const struct ad7476_chip_info ltc2314_14_chip_info = {
+ .channel[0] = AD7940_CHAN(14),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ .has_vref = true,
+};
+
+static const struct ad7476_chip_info bd79105_chip_info = {
+ .channel[0] = AD7091R_CHAN(16),
+ .channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
+ /*
+ * The BD79105 starts ADC data conversion when the CONVSTART line is
+ * set HIGH. The CONVSTART must be kept HIGH until the data has been
+ * read from the ADC.
+ */
+ .conversion_pre_op = bd79105_convst_enable,
+ .conversion_post_op = bd79105_convst_disable,
+ /* BD79105 won't do conversion without convstart */
+ .convstart_required = true,
+ .has_vref = true,
+ .has_vdrive = true,
+};
+
+static const struct iio_info ad7476_info = {
+ .read_raw = &ad7476_read_raw,
+};
static int ad7476_probe(struct spi_device *spi)
{
struct ad7476_state *st;
struct iio_dev *indio_dev;
- struct regulator *reg;
+ unsigned int i;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -311,61 +324,37 @@ static int ad7476_probe(struct spi_device *spi)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->chip_info =
- &ad7476_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- reg = devm_regulator_get(&spi->dev, "vcc");
- if (IS_ERR(reg))
- return PTR_ERR(reg);
+ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return -ENODEV;
- ret = regulator_enable(reg);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev, ad7476_reg_disable, reg);
- if (ret)
- return ret;
-
- /* Either vcc or vref (below) as appropriate */
- if (!st->chip_info->int_vref_uv)
- st->ref_reg = reg;
+ /* Use VCC for reference voltage if vref / internal vref aren't used */
+ if (!st->chip_info->int_vref_mv && !st->chip_info->has_vref) {
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vcc");
+ if (ret < 0)
+ return ret;
+ st->scale_mv = ret / 1000;
+ } else {
+ ret = devm_regulator_get_enable(&spi->dev, "vcc");
+ if (ret < 0)
+ return ret;
+ }
if (st->chip_info->has_vref) {
-
- /* If a device has an internal reference vref is optional */
- if (st->chip_info->int_vref_uv) {
- reg = devm_regulator_get_optional(&spi->dev, "vref");
- if (IS_ERR(reg) && (PTR_ERR(reg) != -ENODEV))
- return PTR_ERR(reg);
- } else {
- reg = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(reg))
- return PTR_ERR(reg);
- }
-
- if (!IS_ERR(reg)) {
- ret = regulator_enable(reg);
- if (ret)
- return ret;
-
- ret = devm_add_action_or_reset(&spi->dev,
- ad7476_reg_disable,
- reg);
- if (ret)
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0) {
+ /* Vref is optional if a device has an internal reference */
+ if (!st->chip_info->int_vref_mv || ret != -ENODEV)
return ret;
- st->ref_reg = reg;
} else {
- /*
- * Can only get here if device supports both internal
- * and external reference, but the regulator connected
- * to the external reference is not connected.
- * Set the reference regulator pointer to NULL to
- * indicate this.
- */
- st->ref_reg = NULL;
+ st->scale_mv = ret / 1000;
}
}
+ if (!st->scale_mv)
+ st->scale_mv = st->chip_info->int_vref_mv;
+
if (st->chip_info->has_vdrive) {
ret = devm_regulator_get_enable(&spi->dev, "vdrive");
if (ret)
@@ -378,20 +367,35 @@ static int ad7476_probe(struct spi_device *spi)
if (IS_ERR(st->convst_gpio))
return PTR_ERR(st->convst_gpio);
+ if (st->chip_info->convstart_required && !st->convst_gpio)
+ return dev_err_probe(&spi->dev, -EINVAL, "No convstart GPIO\n");
+
+ /*
+ * This will never happen. Unless someone changes the channel specs
+ * in this driver. And if someone does, without changing the loop
+ * below, then we'd better immediately produce a big fat error, before
+ * the change proceeds from that developer's table.
+ */
+ static_assert(ARRAY_SIZE(st->channel) == ARRAY_SIZE(st->chip_info->channel));
+ for (i = 0; i < ARRAY_SIZE(st->channel); i++) {
+ st->channel[i] = st->chip_info->channel[i];
+ if (st->convst_gpio)
+ __set_bit(IIO_CHAN_INFO_RAW,
+ &st->channel[i].info_mask_separate);
+ }
+
st->spi = spi;
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channel;
- indio_dev->num_channels = 2;
+ indio_dev->channels = st->channel;
+ indio_dev->num_channels = ARRAY_SIZE(st->channel);
indio_dev->info = &ad7476_info;
- if (st->convst_gpio)
- indio_dev->channels = st->chip_info->convst_channel;
/* Setup default message */
st->xfer.rx_buf = &st->data;
- st->xfer.len = st->chip_info->channel[0].scan_type.storagebits / 8;
+ st->xfer.len = indio_dev->channels[0].scan_type.storagebits / 8;
spi_message_init(&st->msg);
spi_message_add_tail(&st->xfer, &st->msg);
@@ -408,41 +412,42 @@ static int ad7476_probe(struct spi_device *spi)
}
static const struct spi_device_id ad7476_id[] = {
- { "ad7091", ID_AD7091 },
- { "ad7091r", ID_AD7091R },
- { "ad7273", ID_AD7273 },
- { "ad7274", ID_AD7274 },
- { "ad7276", ID_AD7276},
- { "ad7277", ID_AD7277 },
- { "ad7278", ID_AD7278 },
- { "ad7466", ID_AD7466 },
- { "ad7467", ID_AD7467 },
- { "ad7468", ID_AD7468 },
- { "ad7475", ID_AD7475 },
- { "ad7476", ID_AD7466 },
- { "ad7476a", ID_AD7466 },
- { "ad7477", ID_AD7467 },
- { "ad7477a", ID_AD7467 },
- { "ad7478", ID_AD7468 },
- { "ad7478a", ID_AD7468 },
- { "ad7495", ID_AD7495 },
- { "ad7910", ID_AD7467 },
- { "ad7920", ID_AD7466 },
- { "ad7940", ID_AD7940 },
- { "adc081s", ID_ADC081S },
- { "adc101s", ID_ADC101S },
- { "adc121s", ID_ADC121S },
- { "ads7866", ID_ADS7866 },
- { "ads7867", ID_ADS7867 },
- { "ads7868", ID_ADS7868 },
+ { "ad7091", (kernel_ulong_t)&ad7091_chip_info },
+ { "ad7091r", (kernel_ulong_t)&ad7091r_chip_info },
+ { "ad7273", (kernel_ulong_t)&ad7273_chip_info },
+ { "ad7274", (kernel_ulong_t)&ad7274_chip_info },
+ { "ad7276", (kernel_ulong_t)&ad7276_chip_info },
+ { "ad7277", (kernel_ulong_t)&ad7277_chip_info },
+ { "ad7278", (kernel_ulong_t)&ad7278_chip_info },
+ { "ad7466", (kernel_ulong_t)&ad7466_chip_info },
+ { "ad7467", (kernel_ulong_t)&ad7467_chip_info },
+ { "ad7468", (kernel_ulong_t)&ad7468_chip_info },
+ { "ad7475", (kernel_ulong_t)&ad7475_chip_info },
+ { "ad7476", (kernel_ulong_t)&ad7466_chip_info },
+ { "ad7476a", (kernel_ulong_t)&ad7466_chip_info },
+ { "ad7477", (kernel_ulong_t)&ad7467_chip_info },
+ { "ad7477a", (kernel_ulong_t)&ad7467_chip_info },
+ { "ad7478", (kernel_ulong_t)&ad7468_chip_info },
+ { "ad7478a", (kernel_ulong_t)&ad7468_chip_info },
+ { "ad7495", (kernel_ulong_t)&ad7495_chip_info },
+ { "ad7910", (kernel_ulong_t)&ad7467_chip_info },
+ { "ad7920", (kernel_ulong_t)&ad7466_chip_info },
+ { "ad7940", (kernel_ulong_t)&ad7940_chip_info },
+ { "adc081s", (kernel_ulong_t)&adc081s_chip_info },
+ { "adc101s", (kernel_ulong_t)&adc101s_chip_info },
+ { "adc121s", (kernel_ulong_t)&adc121s_chip_info },
+ { "ads7866", (kernel_ulong_t)&ads7866_chip_info },
+ { "ads7867", (kernel_ulong_t)&ads7867_chip_info },
+ { "ads7868", (kernel_ulong_t)&ads7868_chip_info },
+ { "bd79105", (kernel_ulong_t)&bd79105_chip_info },
/*
* The ROHM BU79100G is identical to the TI's ADS7866 from the software
* point of view. The binding document mandates the ADS7866 to be
* marked as a fallback for the BU79100G, but we still need the SPI ID
* here to make the module loading work.
*/
- { "bu79100g", ID_ADS7866 },
- { "ltc2314-14", ID_LTC2314_14 },
+ { "bu79100g", (kernel_ulong_t)&ads7866_chip_info },
+ { "ltc2314-14", (kernel_ulong_t)&ltc2314_14_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7476_id);
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index ca8fa91796ca..d96802b7847a 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -217,7 +217,7 @@ struct ad7768_state {
struct spi_device *spi;
struct regmap *regmap;
struct regmap *regmap24;
- struct regulator *vref;
+ int vref_uv;
struct regulator_dev *vcm_rdev;
unsigned int vcm_output_sel;
struct clk *mclk;
@@ -687,8 +687,6 @@ static int ad7768_set_freq(struct ad7768_state *st,
int ret;
freq = clamp(freq, 50, 1024000);
- if (freq == 0)
- return -EINVAL;
mclk_div = DIV_ROUND_CLOSEST(st->mclk_freq, freq * st->oversampling_ratio);
/* Find the closest match for the desired sampling frequency */
@@ -776,7 +774,7 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
{
struct ad7768_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
- int scale_uv, ret, temp;
+ int ret, temp;
scan_type = iio_get_current_scan_type(indio_dev, chan);
if (IS_ERR(scan_type))
@@ -797,11 +795,7 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- scale_uv = regulator_get_voltage(st->vref);
- if (scale_uv < 0)
- return scale_uv;
-
- *val = (scale_uv * 2) / 1000;
+ *val = (st->vref_uv * 2) / 1000;
*val2 = scan_type->realbits;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -905,7 +899,7 @@ static int ad7768_read_label(struct iio_dev *indio_dev,
{
struct ad7768_state *st = iio_priv(indio_dev);
- return sprintf(label, "%s\n", st->labels[chan->channel]);
+ return sysfs_emit(label, "%s\n", st->labels[chan->channel]);
}
static int ad7768_get_current_scan_type(const struct iio_dev *indio_dev,
@@ -1134,13 +1128,6 @@ static const struct iio_trigger_ops ad7768_trigger_ops = {
.validate_device = iio_trigger_validate_own_device,
};
-static void ad7768_regulator_disable(void *data)
-{
- struct ad7768_state *st = data;
-
- regulator_disable(st->vref);
-}
-
static int ad7768_set_channel_label(struct iio_dev *indio_dev,
int num_channels)
{
@@ -1372,19 +1359,11 @@ static int ad7768_probe(struct spi_device *spi)
return dev_err_probe(&spi->dev, PTR_ERR(st->regmap24),
"Failed to initialize regmap24");
- st->vref = devm_regulator_get(&spi->dev, "vref");
- if (IS_ERR(st->vref))
- return PTR_ERR(st->vref);
-
- ret = regulator_enable(st->vref);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified vref supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad7768_regulator_disable, st);
- if (ret)
- return ret;
+ ret = devm_regulator_get_enable_read_voltage(&spi->dev, "vref");
+ if (ret < 0)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get VREF voltage\n");
+ st->vref_uv = ret;
st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
diff --git a/drivers/iio/adc/ad7779.c b/drivers/iio/adc/ad7779.c
index 845adc510239..aac5049c9a07 100644
--- a/drivers/iio/adc/ad7779.c
+++ b/drivers/iio/adc/ad7779.c
@@ -25,6 +25,7 @@
#include <linux/units.h>
#include <linux/iio/iio.h>
+#include <linux/iio/backend.h>
#include <linux/iio/buffer.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -145,6 +146,7 @@ struct ad7779_state {
struct completion completion;
unsigned int sampling_freq;
enum ad7779_filter filter_enabled;
+ struct iio_backend *back;
/*
* DMA (thus cache coherency maintenance) requires the
* transfer buffers to live in their own cache lines.
@@ -630,12 +632,38 @@ static int ad7779_reset(struct iio_dev *indio_dev, struct gpio_desc *reset_gpio)
return ret;
}
+static int ad7779_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad7779_state *st = iio_priv(indio_dev);
+ unsigned int c;
+ int ret;
+
+ for (c = 0; c < AD7779_NUM_CHANNELS; c++) {
+ if (test_bit(c, scan_mask))
+ ret = iio_backend_chan_enable(st->back, c);
+ else
+ ret = iio_backend_chan_disable(st->back, c);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct iio_info ad7779_info = {
.read_raw = ad7779_read_raw,
.write_raw = ad7779_write_raw,
.debugfs_reg_access = &ad7779_reg_access,
};
+static const struct iio_info ad7779_info_data = {
+ .read_raw = ad7779_read_raw,
+ .write_raw = ad7779_write_raw,
+ .debugfs_reg_access = &ad7779_reg_access,
+ .update_scan_mode = &ad7779_update_scan_mode,
+};
+
static const struct iio_enum ad7779_filter_enum = {
.items = ad7779_filter_type,
.num_items = ARRAY_SIZE(ad7779_filter_type),
@@ -752,6 +780,125 @@ static int ad7779_conf(struct ad7779_state *st, struct gpio_desc *start_gpio)
return 0;
}
+static int ad7779_set_data_lines(struct iio_dev *indio_dev, u32 num_lanes)
+{
+ struct ad7779_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (num_lanes != 1 && num_lanes != 2 && num_lanes != 4)
+ return -EINVAL;
+
+ ret = ad7779_set_sampling_frequency(st, num_lanes * AD7779_DEFAULT_SAMPLING_1LINE);
+ if (ret)
+ return ret;
+
+ ret = iio_backend_num_lanes_set(st->back, num_lanes);
+ if (ret)
+ return ret;
+
+ return ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
+ AD7779_DOUT_FORMAT_MSK,
+ FIELD_PREP(AD7779_DOUT_FORMAT_MSK, 2 - ilog2(num_lanes)));
+}
+
+static int ad7779_setup_channels(struct iio_dev *indio_dev, const struct ad7779_state *st)
+{
+ struct iio_chan_spec *channels;
+ struct device *dev = &st->spi->dev;
+
+ channels = devm_kmemdup_array(dev, st->chip_info->channels,
+ ARRAY_SIZE(ad7779_channels),
+ sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(ad7779_channels); i++)
+ channels[i].scan_type.endianness = IIO_CPU;
+
+ indio_dev->channels = channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7779_channels);
+
+ return 0;
+}
+
+static int ad7779_setup_without_backend(struct ad7779_state *st, struct iio_dev *indio_dev)
+{
+ int ret;
+ struct device *dev = &st->spi->dev;
+
+ indio_dev->info = &ad7779_info;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7779_channels);
+
+ st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!st->trig)
+ return -ENOMEM;
+
+ st->trig->ops = &ad7779_trigger_ops;
+
+ iio_trigger_set_drvdata(st->trig, st);
+
+ ret = devm_request_irq(dev, st->spi->irq, iio_trigger_generic_data_rdy_poll,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN, indio_dev->name,
+ st->trig);
+ if (ret)
+ return dev_err_probe(dev, ret, "request IRQ %d failed\n",
+ st->spi->irq);
+
+ ret = devm_iio_trigger_register(dev, st->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ init_completion(&st->completion);
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad7779_trigger_handler,
+ &ad7779_buffer_setup_ops);
+ if (ret)
+ return ret;
+
+ return ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
+ AD7779_DCLK_CLK_DIV_MSK,
+ FIELD_PREP(AD7779_DCLK_CLK_DIV_MSK, 7));
+}
+
+static int ad7779_setup_backend(struct ad7779_state *st, struct iio_dev *indio_dev)
+{
+ struct device *dev = &st->spi->dev;
+ int ret;
+ u32 num_lanes;
+
+ indio_dev->info = &ad7779_info_data;
+
+ ret = ad7779_setup_channels(indio_dev, st);
+ if (ret)
+ return ret;
+
+ st->back = devm_iio_backend_get(dev, NULL);
+ if (IS_ERR(st->back))
+ return dev_err_probe(dev, PTR_ERR(st->back),
+ "failed to get iio backend");
+
+ ret = devm_iio_backend_request_buffer(dev, st->back, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_backend_enable(dev, st->back);
+ if (ret)
+ return ret;
+
+ num_lanes = 4;
+ ret = device_property_read_u32(dev, "adi,num-lanes", &num_lanes);
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ return ad7779_set_data_lines(indio_dev, num_lanes);
+}
+
static int ad7779_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -760,9 +907,6 @@ static int ad7779_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
int ret = -EINVAL;
- if (!spi->irq)
- return dev_err_probe(dev, ret, "DRDY irq not present\n");
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
@@ -804,45 +948,12 @@ static int ad7779_probe(struct spi_device *spi)
return ret;
indio_dev->name = st->chip_info->name;
- indio_dev->info = &ad7779_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = st->chip_info->channels;
- indio_dev->num_channels = ARRAY_SIZE(ad7779_channels);
- st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
- iio_device_id(indio_dev));
- if (!st->trig)
- return -ENOMEM;
-
- st->trig->ops = &ad7779_trigger_ops;
-
- iio_trigger_set_drvdata(st->trig, st);
-
- ret = devm_request_irq(dev, spi->irq, iio_trigger_generic_data_rdy_poll,
- IRQF_ONESHOT | IRQF_NO_AUTOEN, indio_dev->name,
- st->trig);
- if (ret)
- return dev_err_probe(dev, ret, "request IRQ %d failed\n",
- st->spi->irq);
-
- ret = devm_iio_trigger_register(dev, st->trig);
- if (ret)
- return ret;
-
- indio_dev->trig = iio_trigger_get(st->trig);
-
- init_completion(&st->completion);
-
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- &iio_pollfunc_store_time,
- &ad7779_trigger_handler,
- &ad7779_buffer_setup_ops);
- if (ret)
- return ret;
-
- ret = ad7779_spi_write_mask(st, AD7779_REG_DOUT_FORMAT,
- AD7779_DCLK_CLK_DIV_MSK,
- FIELD_PREP(AD7779_DCLK_CLK_DIV_MSK, 7));
+ if (device_property_present(dev, "io-backends"))
+ ret = ad7779_setup_backend(st, indio_dev);
+ else
+ ret = ad7779_setup_without_backend(st, indio_dev);
if (ret)
return ret;
@@ -936,3 +1047,4 @@ module_spi_driver(ad7779_driver);
MODULE_AUTHOR("Ramona Alexandra Nechita <ramona.nechita@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7779 ADC");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_BACKEND");
diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c
index 202561cad401..b35d299a3977 100644
--- a/drivers/iio/adc/ad7949.c
+++ b/drivers/iio/adc/ad7949.c
@@ -316,10 +316,8 @@ static int ad7949_spi_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*ad7949_adc));
- if (!indio_dev) {
- dev_err(dev, "can not allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
indio_dev->info = &ad7949_spi_info;
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 9c02f9199139..108bb22162ef 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -114,11 +114,13 @@ struct ad799x_chip_config {
* @num_channels: number of channels
* @noirq_config: device configuration w/o IRQ
* @irq_config: device configuration w/IRQ
+ * @has_vref: device supports external reference voltage
*/
struct ad799x_chip_info {
int num_channels;
const struct ad799x_chip_config noirq_config;
const struct ad799x_chip_config irq_config;
+ bool has_vref;
};
struct ad799x_state {
@@ -604,6 +606,7 @@ static const struct iio_event_spec ad799x_events[] = {
static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
[ad7991] = {
.num_channels = 5,
+ .has_vref = true,
.noirq_config = {
.channel = {
AD799X_CHANNEL(0, 12),
@@ -617,6 +620,7 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7995] = {
.num_channels = 5,
+ .has_vref = true,
.noirq_config = {
.channel = {
AD799X_CHANNEL(0, 10),
@@ -630,6 +634,7 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7999] = {
.num_channels = 5,
+ .has_vref = true,
.noirq_config = {
.channel = {
AD799X_CHANNEL(0, 8),
@@ -687,6 +692,7 @@ static const struct ad799x_chip_info ad799x_chip_info_tbl[] = {
},
[ad7994] = {
.num_channels = 5,
+ .has_vref = true,
.noirq_config = {
.channel = {
AD799X_CHANNEL(0, 12),
@@ -809,32 +815,22 @@ static int ad799x_probe(struct i2c_client *client)
return ret;
/* check if an external reference is supplied */
- st->vref = devm_regulator_get_optional(&client->dev, "vref");
-
- if (IS_ERR(st->vref)) {
- if (PTR_ERR(st->vref) == -ENODEV) {
+ if (chip_info->has_vref) {
+ st->vref = devm_regulator_get_optional(&client->dev, "vref");
+ ret = PTR_ERR_OR_ZERO(st->vref);
+ if (ret) {
+ if (ret != -ENODEV)
+ goto error_disable_reg;
st->vref = NULL;
dev_info(&client->dev, "Using VCC reference voltage\n");
- } else {
- ret = PTR_ERR(st->vref);
- goto error_disable_reg;
}
- }
- if (st->vref) {
- /*
- * Use external reference voltage if supported by hardware.
- * This is optional if voltage / regulator present, use VCC otherwise.
- */
- if ((st->id == ad7991) || (st->id == ad7995) || (st->id == ad7999)) {
+ if (st->vref) {
dev_info(&client->dev, "Using external reference voltage\n");
extra_config |= AD7991_REF_SEL;
ret = regulator_enable(st->vref);
if (ret)
goto error_disable_reg;
- } else {
- st->vref = NULL;
- dev_warn(&client->dev, "Supplied reference not supported\n");
}
}
diff --git a/drivers/iio/adc/ade9000.c b/drivers/iio/adc/ade9000.c
new file mode 100644
index 000000000000..2de8a718d62a
--- /dev/null
+++ b/drivers/iio/adc/ade9000.c
@@ -0,0 +1,1799 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/**
+ * ADE9000 driver
+ *
+ * Copyright 2025 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/events.h>
+#include <linux/interrupt.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/unaligned.h>
+
+/* Address of ADE9000 registers */
+#define ADE9000_REG_AIGAIN 0x000
+#define ADE9000_REG_AVGAIN 0x00B
+#define ADE9000_REG_AIRMSOS 0x00C
+#define ADE9000_REG_AVRMSOS 0x00D
+#define ADE9000_REG_APGAIN 0x00E
+#define ADE9000_REG_AWATTOS 0x00F
+#define ADE9000_REG_AVAROS 0x010
+#define ADE9000_REG_AFVAROS 0x012
+#define ADE9000_REG_CONFIG0 0x060
+#define ADE9000_REG_DICOEFF 0x072
+#define ADE9000_REG_AI_PCF 0x20A
+#define ADE9000_REG_AV_PCF 0x20B
+#define ADE9000_REG_AIRMS 0x20C
+#define ADE9000_REG_AVRMS 0x20D
+#define ADE9000_REG_AWATT 0x210
+#define ADE9000_REG_AVAR 0x211
+#define ADE9000_REG_AVA 0x212
+#define ADE9000_REG_AFVAR 0x214
+#define ADE9000_REG_APF 0x216
+#define ADE9000_REG_BI_PCF 0x22A
+#define ADE9000_REG_BV_PCF 0x22B
+#define ADE9000_REG_BIRMS 0x22C
+#define ADE9000_REG_BVRMS 0x22D
+#define ADE9000_REG_CI_PCF 0x24A
+#define ADE9000_REG_CV_PCF 0x24B
+#define ADE9000_REG_CIRMS 0x24C
+#define ADE9000_REG_CVRMS 0x24D
+#define ADE9000_REG_AWATT_ACC 0x2E5
+#define ADE9000_REG_AWATTHR_LO 0x2E6
+#define ADE9000_REG_AVAHR_LO 0x2FA
+#define ADE9000_REG_AFVARHR_LO 0x30E
+#define ADE9000_REG_BWATTHR_LO 0x322
+#define ADE9000_REG_BVAHR_LO 0x336
+#define ADE9000_REG_BFVARHR_LO 0x34A
+#define ADE9000_REG_CWATTHR_LO 0x35E
+#define ADE9000_REG_CVAHR_LO 0x372
+#define ADE9000_REG_CFVARHR_LO 0x386
+#define ADE9000_REG_STATUS0 0x402
+#define ADE9000_REG_STATUS1 0x403
+#define ADE9000_REG_MASK0 0x405
+#define ADE9000_REG_MASK1 0x406
+#define ADE9000_REG_EVENT_MASK 0x407
+#define ADE9000_REG_VLEVEL 0x40F
+#define ADE9000_REG_DIP_LVL 0x410
+#define ADE9000_REG_DIPA 0x411
+#define ADE9000_REG_DIPB 0x412
+#define ADE9000_REG_DIPC 0x413
+#define ADE9000_REG_SWELL_LVL 0x414
+#define ADE9000_REG_SWELLA 0x415
+#define ADE9000_REG_SWELLB 0x416
+#define ADE9000_REG_SWELLC 0x417
+#define ADE9000_REG_APERIOD 0x418
+#define ADE9000_REG_BPERIOD 0x419
+#define ADE9000_REG_CPERIOD 0x41A
+#define ADE9000_REG_RUN 0x480
+#define ADE9000_REG_CONFIG1 0x481
+#define ADE9000_REG_ACCMODE 0x492
+#define ADE9000_REG_CONFIG3 0x493
+#define ADE9000_REG_ZXTOUT 0x498
+#define ADE9000_REG_ZX_LP_SEL 0x49A
+#define ADE9000_REG_WFB_CFG 0x4A0
+#define ADE9000_REG_WFB_PG_IRQEN 0x4A1
+#define ADE9000_REG_WFB_TRG_CFG 0x4A2
+#define ADE9000_REG_WFB_TRG_STAT 0x4A3
+#define ADE9000_REG_CONFIG2 0x4AF
+#define ADE9000_REG_EP_CFG 0x4B0
+#define ADE9000_REG_EGY_TIME 0x4B2
+#define ADE9000_REG_PGA_GAIN 0x4B9
+#define ADE9000_REG_VERSION 0x4FE
+#define ADE9000_REG_WF_BUFF 0x800
+#define ADE9000_REG_WF_HALF_BUFF 0xC00
+
+#define ADE9000_REG_ADDR_MASK GENMASK(15, 4)
+#define ADE9000_REG_READ_BIT_MASK BIT(3)
+
+#define ADE9000_WF_CAP_EN_MASK BIT(4)
+#define ADE9000_WF_CAP_SEL_MASK BIT(5)
+#define ADE9000_WF_MODE_MASK GENMASK(7, 6)
+#define ADE9000_WF_SRC_MASK GENMASK(9, 8)
+#define ADE9000_WF_IN_EN_MASK BIT(12)
+
+/* External reference selection bit in CONFIG1 */
+#define ADE9000_EXT_REF_MASK BIT(15)
+
+/*
+ * Configuration registers
+ */
+#define ADE9000_PGA_GAIN 0x0000
+
+/* Default configuration */
+
+#define ADE9000_CONFIG0 0x00000000
+
+/* CF3/ZX pin outputs Zero crossing, CF4 = DREADY */
+#define ADE9000_CONFIG1 0x000E
+
+/* Default High pass corner frequency of 1.25Hz */
+#define ADE9000_CONFIG2 0x0A00
+
+/* Peak and overcurrent detection disabled */
+#define ADE9000_CONFIG3 0x0000
+
+/*
+ * 50Hz operation, 3P4W Wye configuration, signed accumulation
+ * 3P4W Wye = 3-Phase 4-Wire star configuration (3 phases + neutral wire)
+ * Clear bit 8 i.e. ACCMODE=0x00xx for 50Hz operation
+ * ACCMODE=0x0x9x for 3Wire delta when phase B is used as reference
+ * 3Wire delta = 3-Phase 3-Wire triangle configuration (3 phases, no neutral)
+ */
+#define ADE9000_ACCMODE 0x0000
+#define ADE9000_ACCMODE_60HZ 0x0100
+
+/*Line period and zero crossing obtained from VA */
+#define ADE9000_ZX_LP_SEL 0x0000
+
+/* Interrupt mask values for initialization */
+#define ADE9000_MASK0_ALL_INT_DIS 0
+#define ADE9000_MASK1_ALL_INT_DIS 0x00000000
+
+/* Events disabled */
+#define ADE9000_EVENT_DISABLE 0x00000000
+
+/*
+ * Assuming Vnom=1/2 of full scale.
+ * Refer to Technical reference manual for detailed calculations.
+ */
+#define ADE9000_VLEVEL 0x0022EA28
+
+/* Set DICOEFF= 0xFFFFE000 when integrator is enabled */
+#define ADE9000_DICOEFF 0x00000000
+
+/* DSP ON */
+#define ADE9000_RUN_ON 0xFFFFFFFF
+
+/*
+ * Energy Accumulation Settings
+ * Enable energy accumulation, accumulate samples at 8ksps
+ * latch energy accumulation after EGYRDY
+ * If accumulation is changed to half line cycle mode, change EGY_TIME
+ */
+#define ADE9000_EP_CFG 0x0011
+
+/* Accumulate 4000 samples */
+#define ADE9000_EGY_TIME 7999
+
+/*
+ * Constant Definitions
+ * ADE9000 FDSP: 8000sps, ADE9000 FDSP: 4000sps
+ */
+#define ADE9000_FDSP 4000
+#define ADE9000_DEFAULT_CLK_FREQ_HZ 24576000
+#define ADE9000_WFB_CFG 0x03E9
+#define ADE9000_WFB_PAGE_SIZE 128
+#define ADE9000_WFB_NR_OF_PAGES 16
+#define ADE9000_WFB_MAX_CHANNELS 8
+#define ADE9000_WFB_BYTES_IN_SAMPLE 4
+#define ADE9000_WFB_SAMPLES_IN_PAGE \
+ (ADE9000_WFB_PAGE_SIZE / ADE9000_WFB_MAX_CHANNELS)
+#define ADE9000_WFB_MAX_SAMPLES_CHAN \
+ (ADE9000_WFB_SAMPLES_IN_PAGE * ADE9000_WFB_NR_OF_PAGES)
+#define ADE9000_WFB_FULL_BUFF_NR_SAMPLES \
+ (ADE9000_WFB_PAGE_SIZE * ADE9000_WFB_NR_OF_PAGES)
+#define ADE9000_WFB_FULL_BUFF_SIZE \
+ (ADE9000_WFB_FULL_BUFF_NR_SAMPLES * ADE9000_WFB_BYTES_IN_SAMPLE)
+
+#define ADE9000_SWRST_BIT BIT(0)
+
+/* Status and Mask register bits*/
+#define ADE9000_ST0_WFB_TRIG_BIT BIT(16)
+#define ADE9000_ST0_PAGE_FULL_BIT BIT(17)
+#define ADE9000_ST0_EGYRDY BIT(0)
+
+#define ADE9000_ST1_ZXTOVA_BIT BIT(6)
+#define ADE9000_ST1_ZXTOVB_BIT BIT(7)
+#define ADE9000_ST1_ZXTOVC_BIT BIT(8)
+#define ADE9000_ST1_ZXVA_BIT BIT(9)
+#define ADE9000_ST1_ZXVB_BIT BIT(10)
+#define ADE9000_ST1_ZXVC_BIT BIT(11)
+#define ADE9000_ST1_ZXIA_BIT BIT(13)
+#define ADE9000_ST1_ZXIB_BIT BIT(14)
+#define ADE9000_ST1_ZXIC_BIT BIT(15)
+#define ADE9000_ST1_RSTDONE_BIT BIT(16)
+#define ADE9000_ST1_SEQERR_BIT BIT(18)
+#define ADE9000_ST1_SWELLA_BIT BIT(20)
+#define ADE9000_ST1_SWELLB_BIT BIT(21)
+#define ADE9000_ST1_SWELLC_BIT BIT(22)
+#define ADE9000_ST1_DIPA_BIT BIT(23)
+#define ADE9000_ST1_DIPB_BIT BIT(24)
+#define ADE9000_ST1_DIPC_BIT BIT(25)
+#define ADE9000_ST1_ERROR0_BIT BIT(28)
+#define ADE9000_ST1_ERROR1_BIT BIT(29)
+#define ADE9000_ST1_ERROR2_BIT BIT(30)
+#define ADE9000_ST1_ERROR3_BIT BIT(31)
+#define ADE9000_ST_ERROR \
+ (ADE9000_ST1_ERROR0 | ADE9000_ST1_ERROR1 | \
+ ADE9000_ST1_ERROR2 | ADE9000_ST1_ERROR3)
+#define ADE9000_ST1_CROSSING_FIRST 6
+#define ADE9000_ST1_CROSSING_DEPTH 25
+
+#define ADE9000_WFB_TRG_DIP_BIT BIT(0)
+#define ADE9000_WFB_TRG_SWELL_BIT BIT(1)
+#define ADE9000_WFB_TRG_ZXIA_BIT BIT(3)
+#define ADE9000_WFB_TRG_ZXIB_BIT BIT(4)
+#define ADE9000_WFB_TRG_ZXIC_BIT BIT(5)
+#define ADE9000_WFB_TRG_ZXVA_BIT BIT(6)
+#define ADE9000_WFB_TRG_ZXVB_BIT BIT(7)
+#define ADE9000_WFB_TRG_ZXVC_BIT BIT(8)
+
+/* Stop when waveform buffer is full */
+#define ADE9000_WFB_FULL_MODE 0x0
+/* Continuous fill—stop only on enabled trigger events */
+#define ADE9000_WFB_EN_TRIG_MODE 0x1
+/* Continuous filling—center capture around enabled trigger events */
+#define ADE9000_WFB_C_EN_TRIG_MODE 0x2
+/* Continuous fill—used as streaming mode for continuous data output */
+#define ADE9000_WFB_STREAMING_MODE 0x3
+
+#define ADE9000_LAST_PAGE_BIT BIT(15)
+#define ADE9000_MIDDLE_PAGE_BIT BIT(7)
+
+/*
+ * Full scale Codes referred from Datasheet. Respective digital codes are
+ * produced when ADC inputs are at full scale.
+ */
+#define ADE9000_RMS_FULL_SCALE_CODES 52866837
+#define ADE9000_WATT_FULL_SCALE_CODES 20694066
+#define ADE9000_PCF_FULL_SCALE_CODES 74770000
+
+/* Phase and channel definitions */
+#define ADE9000_PHASE_A_NR 0
+#define ADE9000_PHASE_B_NR 1
+#define ADE9000_PHASE_C_NR 2
+
+#define ADE9000_SCAN_POS_IA BIT(0)
+#define ADE9000_SCAN_POS_VA BIT(1)
+#define ADE9000_SCAN_POS_IB BIT(2)
+#define ADE9000_SCAN_POS_VB BIT(3)
+#define ADE9000_SCAN_POS_IC BIT(4)
+#define ADE9000_SCAN_POS_VC BIT(5)
+
+/* Waveform buffer configuration values */
+enum ade9000_wfb_cfg {
+ ADE9000_WFB_CFG_ALL_CHAN = 0x0,
+ ADE9000_WFB_CFG_IA_VA = 0x1,
+ ADE9000_WFB_CFG_IB_VB = 0x2,
+ ADE9000_WFB_CFG_IC_VC = 0x3,
+ ADE9000_WFB_CFG_IA = 0x8,
+ ADE9000_WFB_CFG_VA = 0x9,
+ ADE9000_WFB_CFG_IB = 0xA,
+ ADE9000_WFB_CFG_VB = 0xB,
+ ADE9000_WFB_CFG_IC = 0xC,
+ ADE9000_WFB_CFG_VC = 0xD,
+};
+
+#define ADE9000_PHASE_B_POS_BIT BIT(5)
+#define ADE9000_PHASE_C_POS_BIT BIT(6)
+
+#define ADE9000_MAX_PHASE_NR 3
+#define AD9000_CHANNELS_PER_PHASE 10
+
+/*
+ * Calculate register address for multi-phase device.
+ * Phase A (chan 0): base address + 0x00
+ * Phase B (chan 1): base address + 0x20
+ * Phase C (chan 2): base address + 0x40
+ */
+#define ADE9000_ADDR_ADJUST(addr, chan) \
+ (((chan) == 0 ? 0 : (chan) == 1 ? 2 : 4) << 4 | (addr))
+
+struct ade9000_state {
+ struct completion reset_completion;
+ struct mutex lock; /* Protects SPI transactions */
+ u8 wf_src;
+ u32 wfb_trg;
+ u8 wfb_nr_activ_chan;
+ u32 wfb_nr_samples;
+ struct spi_device *spi;
+ struct clk *clkin;
+ struct spi_transfer xfer[2];
+ struct spi_message spi_msg;
+ struct regmap *regmap;
+ union{
+ u8 byte[ADE9000_WFB_FULL_BUFF_SIZE];
+ __be32 word[ADE9000_WFB_FULL_BUFF_NR_SAMPLES];
+ } rx_buff __aligned(IIO_DMA_MINALIGN);
+ u8 tx_buff[2] __aligned(IIO_DMA_MINALIGN);
+ unsigned int bulk_read_buf[2];
+};
+
+struct ade9000_irq1_event {
+ u32 bit_mask;
+ enum iio_chan_type chan_type;
+ u32 channel;
+ enum iio_event_type event_type;
+ enum iio_event_direction event_dir;
+};
+
+static const struct ade9000_irq1_event ade9000_irq1_events[] = {
+ { ADE9000_ST1_ZXVA_BIT, IIO_VOLTAGE, ADE9000_PHASE_A_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXIA_BIT, IIO_CURRENT, ADE9000_PHASE_A_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXVB_BIT, IIO_VOLTAGE, ADE9000_PHASE_B_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXIB_BIT, IIO_CURRENT, ADE9000_PHASE_B_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXVC_BIT, IIO_VOLTAGE, ADE9000_PHASE_C_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_ZXIC_BIT, IIO_CURRENT, ADE9000_PHASE_C_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER },
+ { ADE9000_ST1_SWELLA_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_A_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING },
+ { ADE9000_ST1_SWELLB_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_B_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING },
+ { ADE9000_ST1_SWELLC_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_C_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING },
+ { ADE9000_ST1_DIPA_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_A_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING },
+ { ADE9000_ST1_DIPB_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_B_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING },
+ { ADE9000_ST1_DIPC_BIT, IIO_ALTVOLTAGE, ADE9000_PHASE_C_NR, IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING },
+};
+
+/* Voltage events (zero crossing on instantaneous voltage) */
+static const struct iio_event_spec ade9000_voltage_events[] = {
+ {
+ /* Zero crossing detection - datasheet: ZXV interrupts */
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+/* Current events (zero crossing on instantaneous current) */
+static const struct iio_event_spec ade9000_current_events[] = {
+ {
+ /* Zero crossing detection - datasheet: ZXI interrupts */
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+/* RMS voltage events (swell/sag detection on RMS values) */
+static const struct iio_event_spec ade9000_rms_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING, /* RMS swell detection */
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING, /* RMS sag/dip detection */
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const char * const ade9000_filter_type_items[] = {
+ "sinc4", "sinc4+lp",
+};
+
+static const int ade9000_filter_type_values[] = {
+ 0, 2,
+};
+
+static int ade9000_filter_type_get(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 val;
+ int ret;
+ unsigned int i;
+
+ ret = regmap_read(st->regmap, ADE9000_REG_WFB_CFG, &val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(ADE9000_WF_SRC_MASK, val);
+
+ for (i = 0; i < ARRAY_SIZE(ade9000_filter_type_values); i++) {
+ if (ade9000_filter_type_values[i] == val)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int ade9000_filter_type_set(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int index)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ int ret, val;
+
+ if (index >= ARRAY_SIZE(ade9000_filter_type_values))
+ return -EINVAL;
+
+ val = ade9000_filter_type_values[index];
+
+ /* Update the WFB_CFG register with the new filter type */
+ ret = regmap_update_bits(st->regmap, ADE9000_REG_WFB_CFG,
+ ADE9000_WF_SRC_MASK,
+ FIELD_PREP(ADE9000_WF_SRC_MASK, val));
+ if (ret)
+ return ret;
+
+ /* Update cached value */
+ st->wf_src = val;
+
+ return 0;
+}
+
+static const struct iio_enum ade9000_filter_type_enum = {
+ .items = ade9000_filter_type_items,
+ .num_items = ARRAY_SIZE(ade9000_filter_type_items),
+ .get = ade9000_filter_type_get,
+ .set = ade9000_filter_type_set,
+};
+
+static const struct iio_chan_spec_ext_info ade9000_ext_info[] = {
+ IIO_ENUM("filter_type", IIO_SHARED_BY_ALL, &ade9000_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_ALL, &ade9000_filter_type_enum),
+ { }
+};
+
+#define ADE9000_CURRENT_CHANNEL(num) { \
+ .type = IIO_CURRENT, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AI_PCF, num), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .event_spec = ade9000_current_events, \
+ .num_event_specs = ARRAY_SIZE(ade9000_current_events), \
+ .scan_index = num, \
+ .indexed = 1, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+#define ADE9000_VOLTAGE_CHANNEL(num) { \
+ .type = IIO_VOLTAGE, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AV_PCF, num), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_FREQUENCY), \
+ .event_spec = ade9000_voltage_events, \
+ .num_event_specs = ARRAY_SIZE(ade9000_voltage_events), \
+ .scan_index = num + 1, /* interleave with current channels */ \
+ .indexed = 1, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ .ext_info = ade9000_ext_info, \
+}
+
+#define ADE9000_ALTCURRENT_RMS_CHANNEL(num) { \
+ .type = IIO_ALTCURRENT, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AIRMS, num), \
+ .channel2 = IIO_MOD_RMS, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_ALTVOLTAGE_RMS_CHANNEL(num) { \
+ .type = IIO_ALTVOLTAGE, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AVRMS, num), \
+ .channel2 = IIO_MOD_RMS, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .event_spec = ade9000_rms_voltage_events, \
+ .num_event_specs = ARRAY_SIZE(ade9000_rms_voltage_events), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_POWER_ACTIVE_CHANNEL(num) { \
+ .type = IIO_POWER, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AWATT, num), \
+ .channel2 = IIO_MOD_ACTIVE, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_POWER_REACTIVE_CHANNEL(num) { \
+ .type = IIO_POWER, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AVAR, num), \
+ .channel2 = IIO_MOD_REACTIVE, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_POWER_APPARENT_CHANNEL(num) { \
+ .type = IIO_POWER, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_AVA, num), \
+ .channel2 = IIO_MOD_APPARENT, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = -1 \
+}
+
+ #define ADE9000_ENERGY_ACTIVE_CHANNEL(num, addr) { \
+ .type = IIO_ENERGY, \
+ .channel = num, \
+ .address = addr, \
+ .channel2 = IIO_MOD_ACTIVE, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_ENERGY_APPARENT_CHANNEL(num, addr) { \
+ .type = IIO_ENERGY, \
+ .channel = num, \
+ .address = addr, \
+ .channel2 = IIO_MOD_APPARENT, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_ENERGY_REACTIVE_CHANNEL(num, addr) { \
+ .type = IIO_ENERGY, \
+ .channel = num, \
+ .address = addr, \
+ .channel2 = IIO_MOD_REACTIVE, \
+ .modified = 1, \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_index = -1 \
+}
+
+#define ADE9000_POWER_FACTOR_CHANNEL(num) { \
+ .type = IIO_POWER, \
+ .channel = num, \
+ .address = ADE9000_ADDR_ADJUST(ADE9000_REG_APF, num), \
+ .indexed = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_POWERFACTOR), \
+ .scan_index = -1 \
+}
+
+static const struct iio_chan_spec ade9000_channels[] = {
+ /* Phase A channels */
+ ADE9000_CURRENT_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_VOLTAGE_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_ALTCURRENT_RMS_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_ALTVOLTAGE_RMS_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_POWER_ACTIVE_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_POWER_REACTIVE_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_POWER_APPARENT_CHANNEL(ADE9000_PHASE_A_NR),
+ ADE9000_ENERGY_ACTIVE_CHANNEL(ADE9000_PHASE_A_NR, ADE9000_REG_AWATTHR_LO),
+ ADE9000_ENERGY_APPARENT_CHANNEL(ADE9000_PHASE_A_NR, ADE9000_REG_AVAHR_LO),
+ ADE9000_ENERGY_REACTIVE_CHANNEL(ADE9000_PHASE_A_NR, ADE9000_REG_AFVARHR_LO),
+ ADE9000_POWER_FACTOR_CHANNEL(ADE9000_PHASE_A_NR),
+ /* Phase B channels */
+ ADE9000_CURRENT_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_VOLTAGE_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_ALTCURRENT_RMS_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_ALTVOLTAGE_RMS_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_POWER_ACTIVE_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_POWER_REACTIVE_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_POWER_APPARENT_CHANNEL(ADE9000_PHASE_B_NR),
+ ADE9000_ENERGY_ACTIVE_CHANNEL(ADE9000_PHASE_B_NR, ADE9000_REG_BWATTHR_LO),
+ ADE9000_ENERGY_APPARENT_CHANNEL(ADE9000_PHASE_B_NR, ADE9000_REG_BVAHR_LO),
+ ADE9000_ENERGY_REACTIVE_CHANNEL(ADE9000_PHASE_B_NR, ADE9000_REG_BFVARHR_LO),
+ ADE9000_POWER_FACTOR_CHANNEL(ADE9000_PHASE_B_NR),
+ /* Phase C channels */
+ ADE9000_CURRENT_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_VOLTAGE_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_ALTCURRENT_RMS_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_ALTVOLTAGE_RMS_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_POWER_ACTIVE_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_POWER_REACTIVE_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_POWER_APPARENT_CHANNEL(ADE9000_PHASE_C_NR),
+ ADE9000_ENERGY_ACTIVE_CHANNEL(ADE9000_PHASE_C_NR, ADE9000_REG_CWATTHR_LO),
+ ADE9000_ENERGY_APPARENT_CHANNEL(ADE9000_PHASE_C_NR, ADE9000_REG_CVAHR_LO),
+ ADE9000_ENERGY_REACTIVE_CHANNEL(ADE9000_PHASE_C_NR, ADE9000_REG_CFVARHR_LO),
+ ADE9000_POWER_FACTOR_CHANNEL(ADE9000_PHASE_C_NR),
+};
+
+static const struct reg_sequence ade9000_initialization_sequence[] = {
+ { ADE9000_REG_PGA_GAIN, ADE9000_PGA_GAIN },
+ { ADE9000_REG_CONFIG0, ADE9000_CONFIG0 },
+ { ADE9000_REG_CONFIG1, ADE9000_CONFIG1 },
+ { ADE9000_REG_CONFIG2, ADE9000_CONFIG2 },
+ { ADE9000_REG_CONFIG3, ADE9000_CONFIG3 },
+ { ADE9000_REG_ACCMODE, ADE9000_ACCMODE },
+ { ADE9000_REG_ZX_LP_SEL, ADE9000_ZX_LP_SEL },
+ { ADE9000_REG_MASK0, ADE9000_MASK0_ALL_INT_DIS },
+ { ADE9000_REG_MASK1, ADE9000_MASK1_ALL_INT_DIS },
+ { ADE9000_REG_EVENT_MASK, ADE9000_EVENT_DISABLE },
+ { ADE9000_REG_WFB_CFG, ADE9000_WFB_CFG },
+ { ADE9000_REG_VLEVEL, ADE9000_VLEVEL },
+ { ADE9000_REG_DICOEFF, ADE9000_DICOEFF },
+ { ADE9000_REG_EGY_TIME, ADE9000_EGY_TIME },
+ { ADE9000_REG_EP_CFG, ADE9000_EP_CFG },
+ /* Clear all pending status bits by writing 1s */
+ { ADE9000_REG_STATUS0, GENMASK(31, 0) },
+ { ADE9000_REG_STATUS1, GENMASK(31, 0) },
+ { ADE9000_REG_RUN, ADE9000_RUN_ON }
+};
+
+static int ade9000_spi_write_reg(void *context, unsigned int reg,
+ unsigned int val)
+{
+ struct ade9000_state *st = context;
+ u8 tx_buf[6];
+ u16 addr;
+ int ret, len;
+
+ guard(mutex)(&st->lock);
+
+ addr = FIELD_PREP(ADE9000_REG_ADDR_MASK, reg);
+ put_unaligned_be16(addr, tx_buf);
+
+ if (reg > ADE9000_REG_RUN && reg < ADE9000_REG_VERSION) {
+ put_unaligned_be16(val, &tx_buf[2]);
+ len = 4;
+ } else {
+ put_unaligned_be32(val, &tx_buf[2]);
+ len = 6;
+ }
+
+ ret = spi_write_then_read(st->spi, tx_buf, len, NULL, 0);
+ if (ret)
+ dev_err(&st->spi->dev, "problem when writing register 0x%x\n", reg);
+
+ return ret;
+}
+
+static int ade9000_spi_read_reg(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ struct ade9000_state *st = context;
+ u8 tx_buf[2];
+ u8 rx_buf[4];
+ u16 addr;
+ int ret, rx_len;
+
+ guard(mutex)(&st->lock);
+
+ addr = FIELD_PREP(ADE9000_REG_ADDR_MASK, reg) |
+ ADE9000_REG_READ_BIT_MASK;
+
+ put_unaligned_be16(addr, tx_buf);
+
+ /* Skip CRC bytes - only read actual data */
+ if (reg > ADE9000_REG_RUN && reg < ADE9000_REG_VERSION)
+ rx_len = 2;
+ else
+ rx_len = 4;
+
+ ret = spi_write_then_read(st->spi, tx_buf, 2, rx_buf, rx_len);
+ if (ret) {
+ dev_err(&st->spi->dev, "error reading register 0x%x\n", reg);
+ return ret;
+ }
+
+ if (reg > ADE9000_REG_RUN && reg < ADE9000_REG_VERSION)
+ *val = get_unaligned_be16(rx_buf);
+ else
+ *val = get_unaligned_be32(rx_buf);
+
+ return 0;
+}
+
+static bool ade9000_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ /* Interrupt/error status registers - volatile */
+ case ADE9000_REG_STATUS0:
+ case ADE9000_REG_STATUS1:
+ return true;
+ default:
+ /* All other registers are non-volatile */
+ return false;
+ }
+}
+
+static void ade9000_configure_scan(struct iio_dev *indio_dev, u32 wfb_addr)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u16 addr;
+
+ addr = FIELD_PREP(ADE9000_REG_ADDR_MASK, wfb_addr) |
+ ADE9000_REG_READ_BIT_MASK;
+
+ put_unaligned_be16(addr, st->tx_buff);
+
+ st->xfer[0].tx_buf = &st->tx_buff[0];
+ st->xfer[0].len = 2;
+
+ st->xfer[1].rx_buf = st->rx_buff.byte;
+
+ /* Always use streaming mode */
+ st->xfer[1].len = (st->wfb_nr_samples / 2) * 4;
+
+ spi_message_init_with_transfers(&st->spi_msg, st->xfer, ARRAY_SIZE(st->xfer));
+}
+
+static int ade9000_iio_push_streaming(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ u32 current_page, i;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = spi_sync(st->spi, &st->spi_msg);
+ if (ret) {
+ dev_err_ratelimited(dev, "SPI fail in trigger handler\n");
+ return ret;
+ }
+
+ /* In streaming mode, only half the buffer is filled per interrupt */
+ for (i = 0; i < st->wfb_nr_samples / 2; i += st->wfb_nr_activ_chan)
+ iio_push_to_buffers(indio_dev, &st->rx_buff.word[i]);
+
+ ret = regmap_read(st->regmap, ADE9000_REG_WFB_PG_IRQEN, &current_page);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 WFB read fail\n");
+ return ret;
+ }
+
+ if (current_page & ADE9000_MIDDLE_PAGE_BIT) {
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_PG_IRQEN,
+ ADE9000_LAST_PAGE_BIT);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 WFB write fail\n");
+ return ret;
+ }
+
+ ade9000_configure_scan(indio_dev,
+ ADE9000_REG_WF_HALF_BUFF);
+ } else {
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_PG_IRQEN,
+ ADE9000_MIDDLE_PAGE_BIT);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 WFB write fail");
+ return IRQ_HANDLED;
+ }
+
+ ade9000_configure_scan(indio_dev, ADE9000_REG_WF_BUFF);
+ }
+
+ return 0;
+}
+
+static int ade9000_iio_push_buffer(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ int ret;
+ u32 i;
+
+ guard(mutex)(&st->lock);
+
+ ret = spi_sync(st->spi, &st->spi_msg);
+ if (ret) {
+ dev_err_ratelimited(&st->spi->dev,
+ "SPI fail in trigger handler\n");
+ return ret;
+ }
+
+ for (i = 0; i < st->wfb_nr_samples; i += st->wfb_nr_activ_chan)
+ iio_push_to_buffers(indio_dev, &st->rx_buff.word[i]);
+
+ return 0;
+}
+
+static irqreturn_t ade9000_irq0_thread(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ade9000_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ u32 handled_irq = 0;
+ u32 interrupts, status;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADE9000_REG_STATUS0, &status);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 read status fail\n");
+ return IRQ_HANDLED;
+ }
+
+ ret = regmap_read(st->regmap, ADE9000_REG_MASK0, &interrupts);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 read mask fail\n");
+ return IRQ_HANDLED;
+ }
+
+ if ((status & ADE9000_ST0_PAGE_FULL_BIT) &&
+ (interrupts & ADE9000_ST0_PAGE_FULL_BIT)) {
+ /* Always use streaming mode */
+ ret = ade9000_iio_push_streaming(indio_dev);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 IIO push fail\n");
+ return IRQ_HANDLED;
+ }
+
+ handled_irq |= ADE9000_ST0_PAGE_FULL_BIT;
+ }
+
+ if ((status & ADE9000_ST0_WFB_TRIG_BIT) &&
+ (interrupts & ADE9000_ST0_WFB_TRIG_BIT)) {
+ ret = regmap_update_bits(st->regmap, ADE9000_REG_WFB_CFG,
+ ADE9000_WF_CAP_EN_MASK, 0);
+ if (ret) {
+ dev_err_ratelimited(dev, "IRQ0 WFB fail\n");
+ return IRQ_HANDLED;
+ }
+
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = ade9000_iio_push_buffer(indio_dev);
+ if (ret) {
+ dev_err_ratelimited(dev,
+ "IRQ0 IIO push fail @ WFB TRIG\n");
+ return IRQ_HANDLED;
+ }
+ }
+
+ handled_irq |= ADE9000_ST0_WFB_TRIG_BIT;
+ }
+
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS0, handled_irq);
+ if (ret)
+ dev_err_ratelimited(dev, "IRQ0 write status fail\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ade9000_irq1_thread(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ade9000_state *st = iio_priv(indio_dev);
+ unsigned int bit = ADE9000_ST1_CROSSING_FIRST;
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ u32 handled_irq = 0;
+ u32 interrupts, result, status, tmp;
+ DECLARE_BITMAP(interrupt_bits, ADE9000_ST1_CROSSING_DEPTH);
+ const struct ade9000_irq1_event *event;
+ int ret, i;
+
+ if (!completion_done(&st->reset_completion)) {
+ ret = regmap_read(st->regmap, ADE9000_REG_STATUS1, &result);
+ if (ret) {
+ dev_err_ratelimited(&st->spi->dev, "IRQ1 read status fail\n");
+ return IRQ_HANDLED;
+ }
+
+ if (result & ADE9000_ST1_RSTDONE_BIT) {
+ complete(&st->reset_completion);
+ /* Clear the reset done status bit */
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS1, ADE9000_ST1_RSTDONE_BIT);
+ if (ret)
+ dev_err_ratelimited(&st->spi->dev,
+ "IRQ1 clear reset status fail\n");
+ } else {
+ dev_err_ratelimited(&st->spi->dev,
+ "Error testing reset done\n");
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ ret = regmap_read(st->regmap, ADE9000_REG_STATUS1, &status);
+ if (ret) {
+ dev_err_ratelimited(&st->spi->dev, "IRQ1 read status fail\n");
+ return IRQ_HANDLED;
+ }
+
+ ret = regmap_read(st->regmap, ADE9000_REG_MASK1, &interrupts);
+ if (ret) {
+ dev_err_ratelimited(&st->spi->dev, "IRQ1 read mask fail\n");
+ return IRQ_HANDLED;
+ }
+
+ bitmap_from_arr32(interrupt_bits, &interrupts, ADE9000_ST1_CROSSING_DEPTH);
+ for_each_set_bit_from(bit, interrupt_bits,
+ ADE9000_ST1_CROSSING_DEPTH) {
+ tmp = status & BIT(bit);
+ if (!tmp)
+ continue;
+
+ event = NULL;
+
+ /* Find corresponding event in lookup table */
+ for (i = 0; i < ARRAY_SIZE(ade9000_irq1_events); i++) {
+ if (ade9000_irq1_events[i].bit_mask == tmp) {
+ event = &ade9000_irq1_events[i];
+ break;
+ }
+ }
+
+ if (event) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(event->chan_type,
+ event->channel,
+ event->event_type,
+ event->event_dir),
+ timestamp);
+ }
+ handled_irq |= tmp;
+ }
+
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS1, handled_irq);
+ if (ret)
+ dev_err_ratelimited(&st->spi->dev, "IRQ1 write status fail\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ade9000_dready_thread(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+
+ /* Handle data ready interrupt from C4/EVENT/DREADY pin */
+ if (!iio_device_claim_buffer_mode(indio_dev)) {
+ ade9000_iio_push_buffer(indio_dev);
+ iio_device_release_buffer_mode(indio_dev);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int ade9000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ unsigned int measured;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_FREQUENCY:
+ if (chan->type == IIO_VOLTAGE) {
+ int period_reg;
+ int period;
+
+ switch (chan->channel) {
+ case ADE9000_PHASE_A_NR:
+ period_reg = ADE9000_REG_APERIOD;
+ break;
+ case ADE9000_PHASE_B_NR:
+ period_reg = ADE9000_REG_BPERIOD;
+ break;
+ case ADE9000_PHASE_C_NR:
+ period_reg = ADE9000_REG_CPERIOD;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = regmap_read(st->regmap, period_reg, &period);
+ if (ret)
+ return ret;
+ /*
+ * Frequency = (4MHz * 65536) / (PERIOD + 1)
+ * 4MHz = ADC sample rate, 65536 = 2^16 period register scaling
+ * See ADE9000 datasheet section on period measurement
+ */
+ *val = 4000 * 65536;
+ *val2 = period + 1;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type == IIO_ENERGY) {
+ u16 lo_reg = chan->address;
+
+ ret = regmap_bulk_read(st->regmap, lo_reg,
+ st->bulk_read_buf, 2);
+ if (ret)
+ return ret;
+
+ *val = st->bulk_read_buf[0]; /* Lower 32 bits */
+ *val2 = st->bulk_read_buf[1]; /* Upper 32 bits */
+ return IIO_VAL_INT_64;
+ }
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = regmap_read(st->regmap, chan->address, &measured);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+
+ *val = measured;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_POWERFACTOR:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = regmap_read(st->regmap, chan->address, &measured);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+
+ *val = measured;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ case IIO_VOLTAGE:
+ case IIO_ALTVOLTAGE:
+ case IIO_ALTCURRENT:
+ switch (chan->address) {
+ case ADE9000_REG_AI_PCF:
+ case ADE9000_REG_AV_PCF:
+ case ADE9000_REG_BI_PCF:
+ case ADE9000_REG_BV_PCF:
+ case ADE9000_REG_CI_PCF:
+ case ADE9000_REG_CV_PCF:
+ *val = 1;
+ *val2 = ADE9000_PCF_FULL_SCALE_CODES;
+ return IIO_VAL_FRACTIONAL;
+ case ADE9000_REG_AIRMS:
+ case ADE9000_REG_AVRMS:
+ case ADE9000_REG_BIRMS:
+ case ADE9000_REG_BVRMS:
+ case ADE9000_REG_CIRMS:
+ case ADE9000_REG_CVRMS:
+ *val = 1;
+ *val2 = ADE9000_RMS_FULL_SCALE_CODES;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_POWER:
+ *val = 1;
+ *val2 = ADE9000_WATT_FULL_SCALE_CODES;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 tmp;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AIRMSOS,
+ chan->channel), val);
+ case IIO_VOLTAGE:
+ case IIO_ALTVOLTAGE:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AVRMSOS,
+ chan->channel), val);
+ case IIO_POWER:
+ tmp = chan->address;
+ tmp &= ~ADE9000_PHASE_B_POS_BIT;
+ tmp &= ~ADE9000_PHASE_C_POS_BIT;
+
+ switch (tmp) {
+ case ADE9000_REG_AWATTOS:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AWATTOS,
+ chan->channel), val);
+ case ADE9000_REG_AVAR:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AVAROS,
+ chan->channel), val);
+ case ADE9000_REG_AFVAR:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AFVAROS,
+ chan->channel), val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBSCALE:
+ /*
+ * Calibration gain registers for fine-tuning measurements.
+ * These are separate from PGA gain and applied in the digital domain.
+ */
+ switch (chan->type) {
+ case IIO_CURRENT:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AIGAIN,
+ chan->channel), val);
+ case IIO_VOLTAGE:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_AVGAIN,
+ chan->channel), val);
+ case IIO_POWER:
+ return regmap_write(st->regmap,
+ ADE9000_ADDR_ADJUST(ADE9000_REG_APGAIN,
+ chan->channel), val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ /* Per-channel scales are read-only */
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int tx_val,
+ unsigned int *rx_val)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+
+ if (rx_val)
+ return regmap_read(st->regmap, reg, rx_val);
+
+ return regmap_write(st->regmap, reg, tx_val);
+}
+
+static int ade9000_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 interrupts1;
+ int ret;
+
+ /* All events use MASK1 register */
+ ret = regmap_read(st->regmap, ADE9000_REG_MASK1, &interrupts1);
+ if (ret)
+ return ret;
+
+ switch (chan->channel) {
+ case ADE9000_PHASE_A_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXVA_BIT);
+ else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXIA_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING)
+ return !!(interrupts1 & ADE9000_ST1_SWELLA_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING)
+ return !!(interrupts1 & ADE9000_ST1_DIPA_BIT);
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase A\n", chan->type, dir);
+ return -EINVAL;
+ case ADE9000_PHASE_B_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXVB_BIT);
+ else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXIB_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING)
+ return !!(interrupts1 & ADE9000_ST1_SWELLB_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING)
+ return !!(interrupts1 & ADE9000_ST1_DIPB_BIT);
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase B\n", chan->type, dir);
+ return -EINVAL;
+ case ADE9000_PHASE_C_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXVC_BIT);
+ else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER)
+ return !!(interrupts1 & ADE9000_ST1_ZXIC_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING)
+ return !!(interrupts1 & ADE9000_ST1_SWELLC_BIT);
+ else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING)
+ return !!(interrupts1 & ADE9000_ST1_DIPC_BIT);
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase C\n", chan->type, dir);
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 bit_mask;
+ int ret;
+
+ /* Clear all pending events in STATUS1 register (write 1 to clear) */
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS1, GENMASK(31, 0));
+ if (ret)
+ return ret;
+
+ /* Determine which interrupt bit to enable/disable */
+ switch (chan->channel) {
+ case ADE9000_PHASE_A_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXVA_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXVA_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXVA_BIT;
+ } else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXIA_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXIA_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXIA_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING) {
+ bit_mask = ADE9000_ST1_SWELLA_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_SWELL_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_SWELL_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING) {
+ bit_mask = ADE9000_ST1_DIPA_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_DIP_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_DIP_BIT;
+ } else {
+ dev_err_ratelimited(&indio_dev->dev, "Invalid channel type %d or direction %d for phase A\n",
+ chan->type, dir);
+ return -EINVAL;
+ }
+ break;
+ case ADE9000_PHASE_B_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXVB_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXVB_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXVB_BIT;
+ } else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXIB_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXIB_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXIB_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING) {
+ bit_mask = ADE9000_ST1_SWELLB_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_SWELL_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_SWELL_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING) {
+ bit_mask = ADE9000_ST1_DIPB_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_DIP_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_DIP_BIT;
+ } else {
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase B\n",
+ chan->type, dir);
+ return -EINVAL;
+ }
+ break;
+ case ADE9000_PHASE_C_NR:
+ if (chan->type == IIO_VOLTAGE && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXVC_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXVC_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXVC_BIT;
+ } else if (chan->type == IIO_CURRENT && dir == IIO_EV_DIR_EITHER) {
+ bit_mask = ADE9000_ST1_ZXIC_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_ZXIC_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_ZXIC_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_RISING) {
+ bit_mask = ADE9000_ST1_SWELLC_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_SWELL_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_SWELL_BIT;
+ } else if (chan->type == IIO_ALTVOLTAGE && dir == IIO_EV_DIR_FALLING) {
+ bit_mask = ADE9000_ST1_DIPC_BIT;
+ if (state)
+ st->wfb_trg |= ADE9000_WFB_TRG_DIP_BIT;
+ else
+ st->wfb_trg &= ~ADE9000_WFB_TRG_DIP_BIT;
+ } else {
+ dev_err_ratelimited(&indio_dev->dev,
+ "Invalid channel type %d or direction %d for phase C\n",
+ chan->type, dir);
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set bits if enabling event, clear bits if disabling */
+ return regmap_assign_bits(st->regmap, ADE9000_REG_MASK1, bit_mask, state ? bit_mask : 0);
+}
+
+static int ade9000_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ return regmap_write(st->regmap, ADE9000_REG_DIP_LVL, val);
+ case IIO_EV_DIR_RISING:
+ return regmap_write(st->regmap, ADE9000_REG_SWELL_LVL, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ unsigned int data;
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ switch (dir) {
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_read(st->regmap, ADE9000_REG_DIP_LVL, &data);
+ if (ret)
+ return ret;
+ *val = data;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_RISING:
+ ret = regmap_read(st->regmap, ADE9000_REG_SWELL_LVL, &data);
+ if (ret)
+ return ret;
+ *val = data;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ade9000_waveform_buffer_config(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ u32 wfb_cfg_val;
+ u32 active_scans;
+
+ bitmap_to_arr32(&active_scans, indio_dev->active_scan_mask,
+ iio_get_masklength(indio_dev));
+
+ switch (active_scans) {
+ case ADE9000_SCAN_POS_IA | ADE9000_SCAN_POS_VA:
+ wfb_cfg_val = ADE9000_WFB_CFG_IA_VA;
+ st->wfb_nr_activ_chan = 2;
+ break;
+ case ADE9000_SCAN_POS_IB | ADE9000_SCAN_POS_VB:
+ wfb_cfg_val = ADE9000_WFB_CFG_IB_VB;
+ st->wfb_nr_activ_chan = 2;
+ break;
+ case ADE9000_SCAN_POS_IC | ADE9000_SCAN_POS_VC:
+ wfb_cfg_val = ADE9000_WFB_CFG_IC_VC;
+ st->wfb_nr_activ_chan = 2;
+ break;
+ case ADE9000_SCAN_POS_IA:
+ wfb_cfg_val = ADE9000_WFB_CFG_IA;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_VA:
+ wfb_cfg_val = ADE9000_WFB_CFG_VA;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_IB:
+ wfb_cfg_val = ADE9000_WFB_CFG_IB;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_VB:
+ wfb_cfg_val = ADE9000_WFB_CFG_VB;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_IC:
+ wfb_cfg_val = ADE9000_WFB_CFG_IC;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case ADE9000_SCAN_POS_VC:
+ wfb_cfg_val = ADE9000_WFB_CFG_VC;
+ st->wfb_nr_activ_chan = 1;
+ break;
+ case (ADE9000_SCAN_POS_IA | ADE9000_SCAN_POS_VA | ADE9000_SCAN_POS_IB |
+ ADE9000_SCAN_POS_VB | ADE9000_SCAN_POS_IC | ADE9000_SCAN_POS_VC):
+ wfb_cfg_val = ADE9000_WFB_CFG_ALL_CHAN;
+ st->wfb_nr_activ_chan = 6;
+ break;
+ default:
+ dev_err(&st->spi->dev, "Unsupported combination of scans\n");
+ return -EINVAL;
+ }
+
+ wfb_cfg_val |= FIELD_PREP(ADE9000_WF_SRC_MASK, st->wf_src);
+
+ return regmap_write(st->regmap, ADE9000_REG_WFB_CFG, wfb_cfg_val);
+}
+
+static int ade9000_waveform_buffer_interrupt_setup(struct ade9000_state *st)
+{
+ int ret;
+
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_TRG_CFG, 0x0);
+ if (ret)
+ return ret;
+
+ /* Always use streaming mode setup */
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_PG_IRQEN,
+ ADE9000_MIDDLE_PAGE_BIT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, ADE9000_REG_STATUS0, GENMASK(31, 0));
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(st->regmap, ADE9000_REG_MASK0,
+ ADE9000_ST0_PAGE_FULL_BIT);
+}
+
+static int ade9000_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ade9000_waveform_buffer_config(indio_dev);
+ if (ret)
+ return ret;
+
+ st->wfb_nr_samples = ADE9000_WFB_MAX_SAMPLES_CHAN * st->wfb_nr_activ_chan;
+
+ ade9000_configure_scan(indio_dev, ADE9000_REG_WF_BUFF);
+
+ ret = ade9000_waveform_buffer_interrupt_setup(st);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(st->regmap, ADE9000_REG_WFB_CFG,
+ ADE9000_WF_CAP_EN_MASK);
+ if (ret) {
+ dev_err(&st->spi->dev, "Post-enable waveform buffer enable fail\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ade9000_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ade9000_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ u32 interrupts;
+ int ret;
+
+ ret = regmap_clear_bits(st->regmap, ADE9000_REG_WFB_CFG,
+ ADE9000_WF_CAP_EN_MASK);
+ if (ret) {
+ dev_err(dev, "Post-disable waveform buffer disable fail\n");
+ return ret;
+ }
+
+ ret = regmap_write(st->regmap, ADE9000_REG_WFB_TRG_CFG, 0x0);
+ if (ret)
+ return ret;
+
+ interrupts = ADE9000_ST0_WFB_TRIG_BIT | ADE9000_ST0_PAGE_FULL_BIT;
+
+ ret = regmap_clear_bits(st->regmap, ADE9000_REG_MASK0, interrupts);
+ if (ret) {
+ dev_err(dev, "Post-disable update maks0 fail\n");
+ return ret;
+ }
+
+ return regmap_write(st->regmap, ADE9000_REG_STATUS0, GENMASK(31, 0));
+}
+
+static const struct iio_buffer_setup_ops ade9000_buffer_ops = {
+ .preenable = &ade9000_buffer_preenable,
+ .postdisable = &ade9000_buffer_postdisable,
+};
+
+static int ade9000_reset(struct ade9000_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct gpio_desc *gpio_reset;
+ int ret;
+
+ gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio_reset))
+ return PTR_ERR(gpio_reset);
+
+ /* Software reset via register if no GPIO available */
+ if (!gpio_reset) {
+ ret = regmap_set_bits(st->regmap, ADE9000_REG_CONFIG1,
+ ADE9000_SWRST_BIT);
+ if (ret)
+ return ret;
+ fsleep(90);
+ return 0;
+ }
+
+ /* Hardware reset via GPIO */
+ fsleep(10);
+ gpiod_set_value_cansleep(gpio_reset, 0);
+ fsleep(50000);
+
+ /* Only wait for completion if IRQ1 is available to signal reset done */
+ if (fwnode_irq_get_byname(dev_fwnode(dev), "irq1") >= 0) {
+ if (!wait_for_completion_timeout(&st->reset_completion,
+ msecs_to_jiffies(1000))) {
+ dev_err(dev, "Reset timeout after 1s\n");
+ return -ETIMEDOUT;
+ }
+ }
+ /* If no IRQ available, reset is already complete after the 50ms delay above */
+
+ return 0;
+}
+
+static int ade9000_setup(struct ade9000_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ int ret;
+
+ ret = regmap_multi_reg_write(st->regmap, ade9000_initialization_sequence,
+ ARRAY_SIZE(ade9000_initialization_sequence));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to write register sequence");
+
+ fsleep(2000);
+
+ return 0;
+}
+
+static const struct iio_info ade9000_info = {
+ .read_raw = ade9000_read_raw,
+ .write_raw = ade9000_write_raw,
+ .debugfs_reg_access = ade9000_reg_access,
+ .write_event_config = ade9000_write_event_config,
+ .read_event_config = ade9000_read_event_config,
+ .write_event_value = ade9000_write_event_value,
+ .read_event_value = ade9000_read_event_value,
+};
+
+static const struct regmap_config ade9000_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .max_register = 0x6bc,
+ .zero_flag_mask = true,
+ .cache_type = REGCACHE_MAPLE,
+ .reg_read = ade9000_spi_read_reg,
+ .reg_write = ade9000_spi_write_reg,
+ .volatile_reg = ade9000_is_volatile_reg,
+};
+
+static int ade9000_setup_clkout(struct device *dev, struct ade9000_state *st)
+{
+ struct clk_hw *clkout_hw;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_COMMON_CLK))
+ return 0;
+
+ /*
+ * Only provide clock output when using external CMOS clock.
+ * When using crystal, CLKOUT is connected to crystal and shouldn't
+ * be used as clock provider for other devices.
+ */
+ if (!device_property_present(dev, "#clock-cells") || !st->clkin)
+ return 0;
+
+ /* CLKOUT passes through CLKIN with divider of 1 */
+ clkout_hw = devm_clk_hw_register_divider(dev, "clkout", __clk_get_name(st->clkin),
+ CLK_SET_RATE_PARENT, NULL, 0, 1, 0, NULL);
+ if (IS_ERR(clkout_hw))
+ return dev_err_probe(dev, PTR_ERR(clkout_hw), "Failed to register clkout");
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, clkout_hw);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add clock provider");
+
+ return 0;
+}
+
+static int ade9000_request_irq(struct device *dev, const char *name,
+ irq_handler_t handler, void *dev_id)
+{
+ int irq, ret;
+
+ irq = fwnode_irq_get_byname(dev_fwnode(dev), name);
+ if (irq == -EINVAL)
+ return 0; /* interrupts are optional */
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get %s irq", name);
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, handler,
+ IRQF_ONESHOT, KBUILD_MODNAME, dev_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request %s irq", name);
+
+ return 0;
+}
+
+static int ade9000_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ade9000_state *st;
+ struct regmap *regmap;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ regmap = devm_regmap_init(dev, NULL, st, &ade9000_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Unable to allocate ADE9000 regmap");
+
+ st->regmap = regmap;
+ st->spi = spi;
+
+ init_completion(&st->reset_completion);
+
+ ret = ade9000_request_irq(dev, "irq0", ade9000_irq0_thread, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ade9000_request_irq(dev, "irq1", ade9000_irq1_thread, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ade9000_request_irq(dev, "dready", ade9000_dready_thread, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
+ /* External CMOS clock input (optional - crystal can be used instead) */
+ st->clkin = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(st->clkin))
+ return dev_err_probe(dev, PTR_ERR(st->clkin), "Failed to get and enable clkin");
+
+ ret = ade9000_setup_clkout(dev, st);
+ if (ret)
+ return ret;
+
+ indio_dev->name = "ade9000";
+ indio_dev->info = &ade9000_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->setup_ops = &ade9000_buffer_ops;
+
+ ret = devm_regulator_get_enable(&spi->dev, "vdd");
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to get and enable vdd regulator\n");
+
+ indio_dev->channels = ade9000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ade9000_channels);
+
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
+ &ade9000_buffer_ops);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to setup IIO buffer");
+
+ ret = ade9000_reset(st);
+ if (ret)
+ return ret;
+
+ /* Configure reference selection if vref regulator is available */
+ ret = devm_regulator_get_enable_optional(dev, "vref");
+ if (ret != -ENODEV && ret >= 0) {
+ ret = regmap_set_bits(st->regmap, ADE9000_REG_CONFIG1,
+ ADE9000_EXT_REF_MASK);
+ if (ret)
+ return ret;
+ } else if (ret < 0 && ret != -ENODEV) {
+ return dev_err_probe(dev, ret,
+ "Failed to get and enable vref regulator\n");
+ }
+
+ ret = ade9000_setup(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+};
+
+static const struct spi_device_id ade9000_id[] = {
+ { "ade9000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ade9000_id);
+
+static const struct of_device_id ade9000_of_match[] = {
+ { .compatible = "adi,ade9000" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ade9000_of_match);
+
+static struct spi_driver ade9000_driver = {
+ .driver = {
+ .name = "ade9000",
+ .of_match_table = ade9000_of_match,
+ },
+ .probe = ade9000_probe,
+ .id_table = ade9000_id,
+};
+module_spi_driver(ade9000_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADE9000");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index eb42e29960e4..14fa4238c2b9 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -618,6 +618,7 @@ static const struct iio_backend_ops adi_axi_adc_ops = {
.chan_status = axi_adc_chan_status,
.interface_type_get = axi_adc_interface_type_get,
.oversampling_ratio_set = axi_adc_oversampling_ratio_set,
+ .num_lanes_set = axi_adc_num_lanes_set,
.debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
.debugfs_print_chan_status = iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
};
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 1d5fd5f534b8..bf2bfd6bdc41 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -645,6 +645,16 @@ static const struct aspeed_adc_trim_locate ast2600_adc1_trim = {
.field = GENMASK(7, 4),
};
+static const struct aspeed_adc_trim_locate ast2700_adc0_trim = {
+ .offset = 0x820,
+ .field = GENMASK(3, 0),
+};
+
+static const struct aspeed_adc_trim_locate ast2700_adc1_trim = {
+ .offset = 0x820,
+ .field = GENMASK(7, 4),
+};
+
static const struct aspeed_adc_model_data ast2400_model_data = {
.model_name = "ast2400-adc",
.vref_fixed_mv = 2500,
@@ -689,11 +699,35 @@ static const struct aspeed_adc_model_data ast2600_adc1_model_data = {
.trim_locate = &ast2600_adc1_trim,
};
+static const struct aspeed_adc_model_data ast2700_adc0_model_data = {
+ .model_name = "ast2700-adc0",
+ .min_sampling_rate = 10000,
+ .max_sampling_rate = 500000,
+ .wait_init_sequence = true,
+ .bat_sense_sup = true,
+ .scaler_bit_width = 16,
+ .num_channels = 8,
+ .trim_locate = &ast2700_adc0_trim,
+};
+
+static const struct aspeed_adc_model_data ast2700_adc1_model_data = {
+ .model_name = "ast2700-adc1",
+ .min_sampling_rate = 10000,
+ .max_sampling_rate = 500000,
+ .wait_init_sequence = true,
+ .bat_sense_sup = true,
+ .scaler_bit_width = 16,
+ .num_channels = 8,
+ .trim_locate = &ast2700_adc1_trim,
+};
+
static const struct of_device_id aspeed_adc_matches[] = {
{ .compatible = "aspeed,ast2400-adc", .data = &ast2400_model_data },
{ .compatible = "aspeed,ast2500-adc", .data = &ast2500_model_data },
{ .compatible = "aspeed,ast2600-adc0", .data = &ast2600_adc0_model_data },
{ .compatible = "aspeed,ast2600-adc1", .data = &ast2600_adc1_model_data },
+ { .compatible = "aspeed,ast2700-adc0", .data = &ast2700_adc0_model_data },
+ { .compatible = "aspeed,ast2700-adc1", .data = &ast2700_adc1_model_data },
{ }
};
MODULE_DEVICE_TABLE(of, aspeed_adc_matches);
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index c3450246730e..b4c36e6a7490 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -896,7 +896,6 @@ static int at91_adc_config_emr(struct at91_adc_state *st,
emr |= osr | AT91_SAMA5D2_TRACKX(trackx);
at91_adc_writel(st, EMR, emr);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
st->oversampling_ratio = oversampling_ratio;
@@ -971,7 +970,6 @@ static int at91_adc_configure_touch(struct at91_adc_state *st, bool state)
AT91_SAMA5D2_IER_PEN | AT91_SAMA5D2_IER_NOPEN);
at91_adc_writel(st, TSMR, 0);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
}
@@ -1142,10 +1140,8 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
at91_adc_configure_trigger_registers(st, state);
- if (!state) {
- pm_runtime_mark_last_busy(st->dev);
+ if (!state)
pm_runtime_put_autosuspend(st->dev);
- }
return 0;
}
@@ -1336,7 +1332,6 @@ static int at91_adc_buffer_prepare(struct iio_dev *indio_dev)
at91_adc_writel(st, IER, AT91_SAMA5D2_IER_DRDY);
pm_runtime_put:
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return ret;
}
@@ -1394,7 +1389,6 @@ static int at91_adc_buffer_postdisable(struct iio_dev *indio_dev)
if (st->dma_st.dma_chan)
dmaengine_terminate_sync(st->dma_st.dma_chan);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
@@ -1603,7 +1597,6 @@ static void at91_adc_setup_samp_freq(struct iio_dev *indio_dev, unsigned freq,
mr |= AT91_SAMA5D2_MR_TRACKTIM(tracktim);
at91_adc_writel(st, MR, mr);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u, tracktim=%u\n",
@@ -1809,7 +1802,6 @@ static int at91_adc_read_info_raw(struct iio_dev *indio_dev,
at91_adc_readl(st, LCDR);
pm_runtime_put:
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return ret;
}
@@ -1890,7 +1882,6 @@ static int at91_adc_read_temp(struct iio_dev *indio_dev,
restore_config:
/* Revert previous settings. */
at91_adc_temp_sensor_configure(st, false);
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
if (ret < 0)
return ret;
@@ -2465,7 +2456,6 @@ static int at91_adc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "version: %x\n",
readl_relaxed(st->base + st->soc_info.platform->layout->VERSION));
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
@@ -2567,7 +2557,6 @@ static int at91_adc_resume(struct device *dev)
at91_adc_configure_trigger_registers(st, true);
}
- pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
return 0;
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
index f258668b0dc7..6426c9e6ccc9 100644
--- a/drivers/iio/adc/bcm_iproc_adc.c
+++ b/drivers/iio/adc/bcm_iproc_adc.c
@@ -511,10 +511,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(&pdev->dev,
sizeof(*adc_priv));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed to allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc_priv = iio_priv(indio_dev);
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/cpcap-adc.c b/drivers/iio/adc/cpcap-adc.c
index ba7cbd3b4822..d9ee2ea116a7 100644
--- a/drivers/iio/adc/cpcap-adc.c
+++ b/drivers/iio/adc/cpcap-adc.c
@@ -953,11 +953,9 @@ static int cpcap_adc_probe(struct platform_device *pdev)
int error;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ddata));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed to allocate iio device\n");
-
+ if (!indio_dev)
return -ENOMEM;
- }
+
ddata = iio_priv(indio_dev);
ddata->ato = device_get_match_data(&pdev->dev);
if (!ddata->ato)
diff --git a/drivers/iio/adc/da9150-gpadc.c b/drivers/iio/adc/da9150-gpadc.c
index b99291ce2a45..625e3a8e4d03 100644
--- a/drivers/iio/adc/da9150-gpadc.c
+++ b/drivers/iio/adc/da9150-gpadc.c
@@ -308,10 +308,9 @@ static int da9150_gpadc_probe(struct platform_device *pdev)
int irq, ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*gpadc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "Failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
+
gpadc = iio_priv(indio_dev);
gpadc->da9150 = da9150;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 5aea7644780f..eb902a946efe 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -584,10 +584,8 @@ static int dln2_adc_probe(struct platform_device *pdev)
int i, ret, chans;
indio_dev = devm_iio_device_alloc(dev, sizeof(*dln2));
- if (!indio_dev) {
- dev_err(dev, "failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
dln2 = iio_priv(indio_dev);
dln2->pdev = pdev;
@@ -628,10 +626,9 @@ static int dln2_adc_probe(struct platform_device *pdev)
dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
indio_dev->name,
iio_device_id(indio_dev));
- if (!dln2->trig) {
- dev_err(dev, "failed to allocate trigger\n");
+ if (!dln2->trig)
return -ENOMEM;
- }
+
iio_trigger_set_drvdata(dln2->trig, dln2);
ret = devm_iio_trigger_register(dev, dln2->trig);
if (ret) {
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 4614cf848535..1484adff00df 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -19,11 +19,9 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/regulator/consumer.h>
#include <linux/of_platform.h>
#include <linux/err.h>
-#include <linux/input.h>
#include <linux/iio/iio.h>
#include <linux/iio/machine.h>
@@ -31,21 +29,14 @@
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
-#include <linux/platform_data/touchscreen-s3c2410.h>
-
/* S3C/EXYNOS4412/5250 ADC_V1 registers definitions */
#define ADC_V1_CON(x) ((x) + 0x00)
-#define ADC_V1_TSC(x) ((x) + 0x04)
#define ADC_V1_DLY(x) ((x) + 0x08)
#define ADC_V1_DATX(x) ((x) + 0x0C)
#define ADC_V1_DATY(x) ((x) + 0x10)
#define ADC_V1_UPDN(x) ((x) + 0x14)
#define ADC_V1_INTCLR(x) ((x) + 0x18)
#define ADC_V1_MUX(x) ((x) + 0x1c)
-#define ADC_V1_CLRINTPNDNUP(x) ((x) + 0x20)
-
-/* S3C2410 ADC registers definitions */
-#define ADC_S3C2410_MUX(x) ((x) + 0x18)
/* Future ADC_V2 registers definitions */
#define ADC_V2_CON1(x) ((x) + 0x00)
@@ -61,13 +52,8 @@
#define ADC_V1_CON_PRSCLV(x) (((x) & 0xFF) << 6)
#define ADC_V1_CON_STANDBY (1u << 2)
-/* Bit definitions for S3C2410 ADC */
+/* Bit definitions for S3C2410 / S3C6410 ADC */
#define ADC_S3C2410_CON_SELMUX(x) (((x) & 7) << 3)
-#define ADC_S3C2410_DATX_MASK 0x3FF
-#define ADC_S3C2416_CON_RES_SEL (1u << 3)
-
-/* touch screen always uses channel 0 */
-#define ADC_S3C2410_MUX_TS 0
/* ADCTSC Register Bits */
#define ADC_S3C2443_TSC_UD_SEN (1u << 8)
@@ -75,8 +61,6 @@
#define ADC_S3C2410_TSC_YP_SEN (1u << 6)
#define ADC_S3C2410_TSC_XM_SEN (1u << 5)
#define ADC_S3C2410_TSC_XP_SEN (1u << 4)
-#define ADC_S3C2410_TSC_PULL_UP_DISABLE (1u << 3)
-#define ADC_S3C2410_TSC_AUTO_PST (1u << 2)
#define ADC_S3C2410_TSC_XY_PST(x) (((x) & 0x3) << 0)
#define ADC_TSC_WAIT4INT (ADC_S3C2410_TSC_YM_SEN | \
@@ -84,12 +68,6 @@
ADC_S3C2410_TSC_XP_SEN | \
ADC_S3C2410_TSC_XY_PST(3))
-#define ADC_TSC_AUTOPST (ADC_S3C2410_TSC_YM_SEN | \
- ADC_S3C2410_TSC_YP_SEN | \
- ADC_S3C2410_TSC_XP_SEN | \
- ADC_S3C2410_TSC_AUTO_PST | \
- ADC_S3C2410_TSC_XY_PST(0))
-
/* Bit definitions for ADC_V2 */
#define ADC_V2_CON1_SOFT_RESET (1u << 2)
@@ -121,14 +99,11 @@
struct exynos_adc {
struct exynos_adc_data *data;
struct device *dev;
- struct input_dev *input;
void __iomem *regs;
struct regmap *pmu_map;
struct clk *clk;
struct clk *sclk;
unsigned int irq;
- unsigned int tsirq;
- unsigned int delay;
struct regulator *vdd;
struct completion completion;
@@ -136,12 +111,6 @@ struct exynos_adc {
u32 value;
unsigned int version;
- bool ts_enabled;
-
- bool read_ts;
- u32 ts_x;
- u32 ts_y;
-
/*
* Lock to protect from potential concurrent access to the
* completion callback during a manual conversion. For this driver
@@ -241,7 +210,7 @@ static void exynos_adc_v1_init_hw(struct exynos_adc *info)
writel(con1, ADC_V1_CON(info->regs));
/* set touchscreen delay */
- writel(info->delay, ADC_V1_DLY(info->regs));
+ writel(10000, ADC_V1_DLY(info->regs));
}
static void exynos_adc_v1_exit_hw(struct exynos_adc *info)
@@ -307,53 +276,6 @@ static const struct exynos_adc_data exynos_adc_s5pv210_data = {
.start_conv = exynos_adc_v1_start_conv,
};
-static void exynos_adc_s3c2416_start_conv(struct exynos_adc *info,
- unsigned long addr)
-{
- u32 con1;
-
- /* Enable 12 bit ADC resolution */
- con1 = readl(ADC_V1_CON(info->regs));
- con1 |= ADC_S3C2416_CON_RES_SEL;
- writel(con1, ADC_V1_CON(info->regs));
-
- /* Select channel for S3C2416 */
- writel(addr, ADC_S3C2410_MUX(info->regs));
-
- con1 = readl(ADC_V1_CON(info->regs));
- writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
-}
-
-static struct exynos_adc_data const exynos_adc_s3c2416_data = {
- .num_channels = MAX_ADC_V1_CHANNELS,
- .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
-
- .init_hw = exynos_adc_v1_init_hw,
- .exit_hw = exynos_adc_v1_exit_hw,
- .start_conv = exynos_adc_s3c2416_start_conv,
-};
-
-static void exynos_adc_s3c2443_start_conv(struct exynos_adc *info,
- unsigned long addr)
-{
- u32 con1;
-
- /* Select channel for S3C2433 */
- writel(addr, ADC_S3C2410_MUX(info->regs));
-
- con1 = readl(ADC_V1_CON(info->regs));
- writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
-}
-
-static struct exynos_adc_data const exynos_adc_s3c2443_data = {
- .num_channels = MAX_ADC_V1_CHANNELS,
- .mask = ADC_S3C2410_DATX_MASK, /* 10 bit ADC resolution */
-
- .init_hw = exynos_adc_v1_init_hw,
- .exit_hw = exynos_adc_v1_exit_hw,
- .start_conv = exynos_adc_s3c2443_start_conv,
-};
-
static void exynos_adc_s3c64xx_start_conv(struct exynos_adc *info,
unsigned long addr)
{
@@ -365,15 +287,6 @@ static void exynos_adc_s3c64xx_start_conv(struct exynos_adc *info,
writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
}
-static struct exynos_adc_data const exynos_adc_s3c24xx_data = {
- .num_channels = MAX_ADC_V1_CHANNELS,
- .mask = ADC_S3C2410_DATX_MASK, /* 10 bit ADC resolution */
-
- .init_hw = exynos_adc_v1_init_hw,
- .exit_hw = exynos_adc_v1_exit_hw,
- .start_conv = exynos_adc_s3c64xx_start_conv,
-};
-
static struct exynos_adc_data const exynos_adc_s3c64xx_data = {
.num_channels = MAX_ADC_V1_CHANNELS,
.mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
@@ -486,18 +399,6 @@ static const struct exynos_adc_data exynos7_adc_data = {
static const struct of_device_id exynos_adc_match[] = {
{
- .compatible = "samsung,s3c2410-adc",
- .data = &exynos_adc_s3c24xx_data,
- }, {
- .compatible = "samsung,s3c2416-adc",
- .data = &exynos_adc_s3c2416_data,
- }, {
- .compatible = "samsung,s3c2440-adc",
- .data = &exynos_adc_s3c24xx_data,
- }, {
- .compatible = "samsung,s3c2443-adc",
- .data = &exynos_adc_s3c2443_data,
- }, {
.compatible = "samsung,s3c6410-adc",
.data = &exynos_adc_s3c64xx_data,
}, {
@@ -580,55 +481,13 @@ static int exynos_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int exynos_read_s3c64xx_ts(struct iio_dev *indio_dev, int *x, int *y)
-{
- struct exynos_adc *info = iio_priv(indio_dev);
- unsigned long time_left;
- int ret;
-
- mutex_lock(&info->lock);
- info->read_ts = true;
-
- reinit_completion(&info->completion);
-
- writel(ADC_S3C2410_TSC_PULL_UP_DISABLE | ADC_TSC_AUTOPST,
- ADC_V1_TSC(info->regs));
-
- /* Select the ts channel to be used and Trigger conversion */
- info->data->start_conv(info, ADC_S3C2410_MUX_TS);
-
- time_left = wait_for_completion_timeout(&info->completion,
- EXYNOS_ADC_TIMEOUT);
- if (time_left == 0) {
- dev_warn(&indio_dev->dev, "Conversion timed out! Resetting\n");
- if (info->data->init_hw)
- info->data->init_hw(info);
- ret = -ETIMEDOUT;
- } else {
- *x = info->ts_x;
- *y = info->ts_y;
- ret = 0;
- }
-
- info->read_ts = false;
- mutex_unlock(&info->lock);
-
- return ret;
-}
-
static irqreturn_t exynos_adc_isr(int irq, void *dev_id)
{
struct exynos_adc *info = dev_id;
u32 mask = info->data->mask;
/* Read value */
- if (info->read_ts) {
- info->ts_x = readl(ADC_V1_DATX(info->regs));
- info->ts_y = readl(ADC_V1_DATY(info->regs));
- writel(ADC_TSC_WAIT4INT | ADC_S3C2443_TSC_UD_SEN, ADC_V1_TSC(info->regs));
- } else {
- info->value = readl(ADC_V1_DATX(info->regs)) & mask;
- }
+ info->value = readl(ADC_V1_DATX(info->regs)) & mask;
/* clear irq */
if (info->data->clear_irq)
@@ -639,46 +498,6 @@ static irqreturn_t exynos_adc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/*
- * Here we (ab)use a threaded interrupt handler to stay running
- * for as long as the touchscreen remains pressed, we report
- * a new event with the latest data and then sleep until the
- * next timer tick. This mirrors the behavior of the old
- * driver, with much less code.
- */
-static irqreturn_t exynos_ts_isr(int irq, void *dev_id)
-{
- struct exynos_adc *info = dev_id;
- struct iio_dev *dev = dev_get_drvdata(info->dev);
- u32 x, y;
- bool pressed;
- int ret;
-
- while (READ_ONCE(info->ts_enabled)) {
- ret = exynos_read_s3c64xx_ts(dev, &x, &y);
- if (ret == -ETIMEDOUT)
- break;
-
- pressed = x & y & ADC_DATX_PRESSED;
- if (!pressed) {
- input_report_key(info->input, BTN_TOUCH, 0);
- input_sync(info->input);
- break;
- }
-
- input_report_abs(info->input, ABS_X, x & ADC_DATX_MASK);
- input_report_abs(info->input, ABS_Y, y & ADC_DATY_MASK);
- input_report_key(info->input, BTN_TOUCH, 1);
- input_sync(info->input);
-
- usleep_range(1000, 1100);
- }
-
- writel(0, ADC_V1_CLRINTPNDNUP(info->regs));
-
- return IRQ_HANDLED;
-}
-
static int exynos_adc_reg_access(struct iio_dev *indio_dev,
unsigned reg, unsigned writeval,
unsigned *readval)
@@ -730,78 +549,17 @@ static int exynos_adc_remove_devices(struct device *dev, void *c)
return 0;
}
-static int exynos_adc_ts_open(struct input_dev *dev)
-{
- struct exynos_adc *info = input_get_drvdata(dev);
-
- WRITE_ONCE(info->ts_enabled, true);
- enable_irq(info->tsirq);
-
- return 0;
-}
-
-static void exynos_adc_ts_close(struct input_dev *dev)
-{
- struct exynos_adc *info = input_get_drvdata(dev);
-
- WRITE_ONCE(info->ts_enabled, false);
- disable_irq(info->tsirq);
-}
-
-static int exynos_adc_ts_init(struct exynos_adc *info)
-{
- int ret;
-
- if (info->tsirq <= 0)
- return -ENODEV;
-
- info->input = input_allocate_device();
- if (!info->input)
- return -ENOMEM;
-
- info->input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- info->input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-
- input_set_abs_params(info->input, ABS_X, 0, 0x3FF, 0, 0);
- input_set_abs_params(info->input, ABS_Y, 0, 0x3FF, 0, 0);
-
- info->input->name = "S3C24xx TouchScreen";
- info->input->id.bustype = BUS_HOST;
- info->input->open = exynos_adc_ts_open;
- info->input->close = exynos_adc_ts_close;
-
- input_set_drvdata(info->input, info);
-
- ret = input_register_device(info->input);
- if (ret) {
- input_free_device(info->input);
- return ret;
- }
-
- ret = request_threaded_irq(info->tsirq, NULL, exynos_ts_isr,
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- "touchscreen", info);
- if (ret)
- input_unregister_device(info->input);
-
- return ret;
-}
-
static int exynos_adc_probe(struct platform_device *pdev)
{
struct exynos_adc *info = NULL;
struct device_node *np = pdev->dev.of_node;
- struct s3c2410_ts_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct iio_dev *indio_dev = NULL;
- bool has_ts = false;
int ret;
int irq;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct exynos_adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
info = iio_priv(indio_dev);
@@ -826,27 +584,10 @@ static int exynos_adc_probe(struct platform_device *pdev)
}
}
- /* leave out any TS related code if unreachable */
- if (IS_REACHABLE(CONFIG_INPUT)) {
- has_ts = of_property_read_bool(pdev->dev.of_node,
- "has-touchscreen") || pdata;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
info->irq = irq;
-
- if (has_ts) {
- irq = platform_get_irq(pdev, 1);
- if (irq == -EPROBE_DEFER)
- return irq;
-
- info->tsirq = irq;
- } else {
- info->tsirq = -1;
- }
-
info->dev = &pdev->dev;
init_completion(&info->completion);
@@ -910,16 +651,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (info->data->init_hw)
info->data->init_hw(info);
- if (pdata)
- info->delay = pdata->delay;
- else
- info->delay = 10000;
-
- if (has_ts)
- ret = exynos_adc_ts_init(info);
- if (ret)
- goto err_iio;
-
ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed adding child nodes\n");
@@ -931,11 +662,6 @@ static int exynos_adc_probe(struct platform_device *pdev)
err_of_populate:
device_for_each_child(&indio_dev->dev, NULL,
exynos_adc_remove_devices);
- if (has_ts) {
- input_unregister_device(info->input);
- free_irq(info->tsirq, info);
- }
-err_iio:
iio_device_unregister(indio_dev);
err_irq:
free_irq(info->irq, info);
@@ -955,10 +681,6 @@ static void exynos_adc_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct exynos_adc *info = iio_priv(indio_dev);
- if (IS_REACHABLE(CONFIG_INPUT) && info->input) {
- free_irq(info->tsirq, info);
- input_unregister_device(info->input);
- }
device_for_each_child(&indio_dev->dev, NULL,
exynos_adc_remove_devices);
iio_device_unregister(indio_dev);
diff --git a/drivers/iio/adc/hx711.c b/drivers/iio/adc/hx711.c
index 7235fa9e13d5..1db8b68a8f64 100644
--- a/drivers/iio/adc/hx711.c
+++ b/drivers/iio/adc/hx711.c
@@ -465,7 +465,7 @@ static int hx711_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(struct hx711_data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM, "failed to allocate IIO device\n");
+ return -ENOMEM;
hx711_data = iio_priv(indio_dev);
hx711_data->dev = dev;
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 09ce71f6e941..039c0387da23 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -482,10 +482,8 @@ static int imx7d_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
- if (!indio_dev) {
- dev_err(&pdev->dev, "Failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
info = iio_priv(indio_dev);
info->dev = dev;
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index be13a6ed7e00..6fc50394ad90 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -229,7 +229,6 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
ret = wait_for_completion_interruptible_timeout(&adc->completion,
IMX8QXP_ADC_TIMEOUT);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
if (ret == 0) {
@@ -295,7 +294,6 @@ static int imx8qxp_adc_reg_access(struct iio_dev *indio_dev, unsigned int reg,
*readval = readl(adc->regs + reg);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
return 0;
@@ -315,10 +313,8 @@ static int imx8qxp_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
- if (!indio_dev) {
- dev_err(dev, "Failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc = iio_priv(indio_dev);
adc->dev = dev;
diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
index 7feaafd2316f..787e80db5de3 100644
--- a/drivers/iio/adc/imx93_adc.c
+++ b/drivers/iio/adc/imx93_adc.c
@@ -32,12 +32,13 @@
#define IMX93_ADC_PCDR0 0x100
#define IMX93_ADC_PCDR1 0x104
#define IMX93_ADC_PCDR2 0x108
-#define IMX93_ADC_PCDR3 0x10c
+#define IMX93_ADC_PCDR3 0x10C
#define IMX93_ADC_PCDR4 0x110
#define IMX93_ADC_PCDR5 0x114
#define IMX93_ADC_PCDR6 0x118
-#define IMX93_ADC_PCDR7 0x11c
+#define IMX93_ADC_PCDR7 0x11C
#define IMX93_ADC_CALSTAT 0x39C
+#define IMX93_ADC_CALCFG0 0x3A0
/* ADC bit shift */
#define IMX93_ADC_MCR_MODE_MASK BIT(29)
@@ -58,6 +59,8 @@
#define IMX93_ADC_IMR_ECH_MASK BIT(0)
#define IMX93_ADC_PCDR_CDATA_MASK GENMASK(11, 0)
+#define IMX93_ADC_CALCFG0_LDFAIL_MASK BIT(4)
+
/* ADC status */
#define IMX93_ADC_MSR_ADCSTATUS_IDLE 0
#define IMX93_ADC_MSR_ADCSTATUS_POWER_DOWN 1
@@ -145,7 +148,7 @@ static void imx93_adc_config_ad_clk(struct imx93_adc *adc)
static int imx93_adc_calibration(struct imx93_adc *adc)
{
- u32 mcr, msr;
+ u32 mcr, msr, calcfg;
int ret;
/* make sure ADC in power down mode */
@@ -158,6 +161,11 @@ static int imx93_adc_calibration(struct imx93_adc *adc)
imx93_adc_power_up(adc);
+ /* Enable loading of calibrated values even in fail condition */
+ calcfg = readl(adc->regs + IMX93_ADC_CALCFG0);
+ calcfg |= IMX93_ADC_CALCFG0_LDFAIL_MASK;
+ writel(calcfg, adc->regs + IMX93_ADC_CALCFG0);
+
/*
* TODO: we use the default TSAMP/NRSMPL/AVGEN in MCR,
* can add the setting of these bit if need in future.
@@ -180,9 +188,13 @@ static int imx93_adc_calibration(struct imx93_adc *adc)
/* check whether calbration is success or not */
msr = readl(adc->regs + IMX93_ADC_MSR);
if (msr & IMX93_ADC_MSR_CALFAIL_MASK) {
+ /*
+ * Only give warning here, this means the noise of the
+ * reference voltage do not meet the requirement:
+ * ADC reference voltage Noise < 1.8V * 1/2^ENOB
+ * And the resault of ADC is not that accurate.
+ */
dev_warn(adc->dev, "ADC calibration failed!\n");
- imx93_adc_power_down(adc);
- return -EAGAIN;
}
return 0;
@@ -248,7 +260,6 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&adc->lock);
ret = imx93_adc_read_channel_conversion(adc, chan->channel, val);
mutex_unlock(&adc->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_sync_autosuspend(dev);
if (ret < 0)
return ret;
@@ -308,8 +319,7 @@ static int imx93_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "Failed allocating iio device\n");
+ return -ENOMEM;
adc = iio_priv(indio_dev);
adc->dev = dev;
diff --git a/drivers/iio/adc/intel_dc_ti_adc.c b/drivers/iio/adc/intel_dc_ti_adc.c
new file mode 100644
index 000000000000..0fe34f1c338e
--- /dev/null
+++ b/drivers/iio/adc/intel_dc_ti_adc.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Dollar Cove TI PMIC GPADC Driver
+ *
+ * Copyright (C) 2014 Intel Corporation (Ramakrishna Pallala <ramakrishna.pallala@intel.com>)
+ * Copyright (C) 2024 - 2025 Hans de Goede <hansg@kernel.org>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/intel_soc_pmic.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/wait.h>
+
+#include <linux/iio/driver.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/machine.h>
+
+#define DC_TI_ADC_CNTL_REG 0x50
+#define DC_TI_ADC_START BIT(0)
+#define DC_TI_ADC_CH_SEL GENMASK(2, 1)
+#define DC_TI_ADC_EN BIT(5)
+#define DC_TI_ADC_EN_EXT_BPTH_BIAS BIT(6)
+
+#define DC_TI_VBAT_ZSE_GE_REG 0x53
+#define DC_TI_VBAT_GE GENMASK(3, 0)
+#define DC_TI_VBAT_ZSE GENMASK(7, 4)
+
+/* VBAT GE gain correction is in 0.0015 increments, ZSE is in 1.0 increments */
+#define DC_TI_VBAT_GE_STEP 15
+#define DC_TI_VBAT_GE_DIV 10000
+
+#define DC_TI_ADC_DATA_REG_CH(x) (0x54 + 2 * (x))
+
+enum dc_ti_adc_id {
+ DC_TI_ADC_VBAT,
+ DC_TI_ADC_PMICTEMP,
+ DC_TI_ADC_BATTEMP,
+ DC_TI_ADC_SYSTEMP0,
+};
+
+struct dc_ti_adc_info {
+ struct mutex lock; /* Protects against concurrent accesses to the ADC */
+ wait_queue_head_t wait;
+ struct device *dev;
+ struct regmap *regmap;
+ int vbat_zse;
+ int vbat_ge;
+ bool conversion_done;
+};
+
+static const struct iio_chan_spec dc_ti_adc_channels[] = {
+ {
+ .indexed = 1,
+ .type = IIO_VOLTAGE,
+ .channel = DC_TI_ADC_VBAT,
+ .address = DC_TI_ADC_DATA_REG_CH(0),
+ .datasheet_name = "CH0",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = DC_TI_ADC_PMICTEMP,
+ .address = DC_TI_ADC_DATA_REG_CH(1),
+ .datasheet_name = "CH1",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = DC_TI_ADC_BATTEMP,
+ .address = DC_TI_ADC_DATA_REG_CH(2),
+ .datasheet_name = "CH2",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }, {
+ .indexed = 1,
+ .type = IIO_TEMP,
+ .channel = DC_TI_ADC_SYSTEMP0,
+ .address = DC_TI_ADC_DATA_REG_CH(3),
+ .datasheet_name = "CH3",
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ }
+};
+
+static struct iio_map dc_ti_adc_default_maps[] = {
+ IIO_MAP("CH0", "chtdc_ti_battery", "VBAT"),
+ IIO_MAP("CH1", "chtdc_ti_battery", "PMICTEMP"),
+ IIO_MAP("CH2", "chtdc_ti_battery", "BATTEMP"),
+ IIO_MAP("CH3", "chtdc_ti_battery", "SYSTEMP0"),
+ { }
+};
+
+static irqreturn_t dc_ti_adc_isr(int irq, void *data)
+{
+ struct dc_ti_adc_info *info = data;
+
+ info->conversion_done = true;
+ wake_up(&info->wait);
+ return IRQ_HANDLED;
+}
+
+static int dc_ti_adc_scale(struct dc_ti_adc_info *info,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ if (chan->channel != DC_TI_ADC_VBAT)
+ return -EINVAL;
+
+ /* Vbat ADC scale is 4.6875 mV / unit */
+ *val = 4;
+ *val2 = 687500;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int dc_ti_adc_raw_to_processed(struct dc_ti_adc_info *info,
+ struct iio_chan_spec const *chan,
+ int raw, int *val, int *val2)
+{
+ if (chan->channel != DC_TI_ADC_VBAT)
+ return -EINVAL;
+
+ /* Apply calibration */
+ raw -= info->vbat_zse;
+ raw = raw * (DC_TI_VBAT_GE_DIV - info->vbat_ge * DC_TI_VBAT_GE_STEP) /
+ DC_TI_VBAT_GE_DIV;
+ /* Vbat ADC scale is 4.6875 mV / unit */
+ raw *= 46875;
+
+ /* raw is now in 10000 units / mV, convert to milli + milli/1e6 */
+ *val = raw / 10000;
+ *val2 = (raw % 10000) * 100;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int dc_ti_adc_sample(struct dc_ti_adc_info *info,
+ struct iio_chan_spec const *chan, int *val)
+{
+ int ret, ch = chan->channel;
+ __be16 buf;
+
+ info->conversion_done = false;
+
+ /*
+ * As per TI (PMIC Vendor), the ADC enable and ADC start commands should
+ * not be sent together. Hence send the commands separately.
+ */
+ ret = regmap_set_bits(info->regmap, DC_TI_ADC_CNTL_REG, DC_TI_ADC_EN);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_CH_SEL,
+ FIELD_PREP(DC_TI_ADC_CH_SEL, ch));
+ if (ret)
+ return ret;
+
+ /*
+ * As per PMIC Vendor, a minimum of 50 ųs delay is required between ADC
+ * Enable and ADC START commands. This is also recommended by Intel
+ * Hardware team after the timing analysis of GPADC signals. Since the
+ * I2C Write transaction to set the channel number also imparts 25 ųs
+ * delay, we need to wait for another 25 ųs before issuing ADC START.
+ */
+ fsleep(25);
+
+ ret = regmap_set_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_START);
+ if (ret)
+ return ret;
+
+ /* TI (PMIC Vendor) recommends 5 s timeout for conversion */
+ ret = wait_event_timeout(info->wait, info->conversion_done, 5 * HZ);
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ goto disable_adc;
+ }
+
+ ret = regmap_bulk_read(info->regmap, chan->address, &buf, sizeof(buf));
+ if (ret)
+ goto disable_adc;
+
+ /* The ADC values are 10 bits wide */
+ *val = be16_to_cpu(buf) & GENMASK(9, 0);
+
+disable_adc:
+ regmap_clear_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_START | DC_TI_ADC_EN);
+ return ret;
+}
+
+static int dc_ti_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct dc_ti_adc_info *info = iio_priv(indio_dev);
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_SCALE)
+ return dc_ti_adc_scale(info, chan, val, val2);
+
+ guard(mutex)(&info->lock);
+
+ /*
+ * If channel BPTHERM has been selected, first enable the BPTHERM BIAS
+ * which provides the VREF Voltage reference to convert BPTHERM Input
+ * voltage to temperature.
+ */
+ if (chan->channel == DC_TI_ADC_BATTEMP) {
+ ret = regmap_set_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_EN_EXT_BPTH_BIAS);
+ if (ret)
+ return ret;
+ /*
+ * As per PMIC Vendor specifications, BPTHERM BIAS should be
+ * enabled 35 ms before ADC_EN command.
+ */
+ msleep(35);
+ }
+
+ ret = dc_ti_adc_sample(info, chan, val);
+
+ if (chan->channel == DC_TI_ADC_BATTEMP)
+ regmap_clear_bits(info->regmap, DC_TI_ADC_CNTL_REG,
+ DC_TI_ADC_EN_EXT_BPTH_BIAS);
+
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ return dc_ti_adc_raw_to_processed(info, chan, *val, val, val2);
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info dc_ti_adc_iio_info = {
+ .read_raw = dc_ti_adc_read_raw,
+};
+
+static int dc_ti_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct intel_soc_pmic *pmic = dev_get_drvdata(dev->parent);
+ struct dc_ti_adc_info *info;
+ struct iio_dev *indio_dev;
+ unsigned int val;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+
+ ret = devm_mutex_init(dev, &info->lock);
+ if (ret)
+ return ret;
+
+ init_waitqueue_head(&info->wait);
+
+ info->dev = dev;
+ info->regmap = pmic->regmap;
+
+ indio_dev->name = "dc_ti_adc";
+ indio_dev->channels = dc_ti_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(dc_ti_adc_channels);
+ indio_dev->info = &dc_ti_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = regmap_read(info->regmap, DC_TI_VBAT_ZSE_GE_REG, &val);
+ if (ret)
+ return ret;
+
+ info->vbat_zse = sign_extend32(FIELD_GET(DC_TI_VBAT_ZSE, val), 3);
+ info->vbat_ge = sign_extend32(FIELD_GET(DC_TI_VBAT_GE, val), 3);
+
+ dev_dbg(dev, "vbat-zse %d vbat-ge %d\n", info->vbat_zse, info->vbat_ge);
+
+ ret = devm_iio_map_array_register(dev, indio_dev, dc_ti_adc_default_maps);
+ if (ret)
+ return ret;
+
+ ret = devm_request_threaded_irq(dev, irq, NULL, dc_ti_adc_isr,
+ IRQF_ONESHOT, indio_dev->name, info);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct platform_device_id dc_ti_adc_ids[] = {
+ { .name = "chtdc_ti_adc" },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, dc_ti_adc_ids);
+
+static struct platform_driver dc_ti_adc_driver = {
+ .driver = {
+ .name = "dc_ti_adc",
+ },
+ .probe = dc_ti_adc_probe,
+ .id_table = dc_ti_adc_ids,
+};
+module_platform_driver(dc_ti_adc_driver);
+
+MODULE_AUTHOR("Ramakrishna Pallala (Intel)");
+MODULE_AUTHOR("Hans de Goede <hansg@kernel.org>");
+MODULE_DESCRIPTION("Intel Dollar Cove (TI) GPADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/max14001.c b/drivers/iio/adc/max14001.c
new file mode 100644
index 000000000000..90ad4cb5868d
--- /dev/null
+++ b/drivers/iio/adc/max14001.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Analog Devices MAX14001/MAX14002 ADC driver
+ *
+ * Copyright (C) 2023-2025 Analog Devices Inc.
+ * Copyright (C) 2023 Kim Seer Paller <kimseer.paller@analog.com>
+ * Copyright (c) 2025 Marilene Andrade Garcia <marilene.agarcia@gmail.com>
+ *
+ * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/MAX14001-MAX14002.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitrev.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <asm/byteorder.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+/* MAX14001 Registers Address */
+#define MAX14001_REG_ADC 0x00
+#define MAX14001_REG_FADC 0x01
+#define MAX14001_REG_FLAGS 0x02
+#define MAX14001_REG_FLTEN 0x03
+#define MAX14001_REG_THL 0x04
+#define MAX14001_REG_THU 0x05
+#define MAX14001_REG_INRR 0x06
+#define MAX14001_REG_INRT 0x07
+#define MAX14001_REG_INRP 0x08
+#define MAX14001_REG_CFG 0x09
+#define MAX14001_REG_ENBL 0x0A
+#define MAX14001_REG_ACT 0x0B
+#define MAX14001_REG_WEN 0x0C
+
+#define MAX14001_REG_VERIFICATION(x) ((x) + 0x10)
+
+#define MAX14001_REG_CFG_BIT_EXRF BIT(5)
+
+#define MAX14001_REG_WEN_VALUE_WRITE 0x294
+
+#define MAX14001_MASK_ADDR GENMASK(15, 11)
+#define MAX14001_MASK_WR BIT(10)
+#define MAX14001_MASK_DATA GENMASK(9, 0)
+
+struct max14001_state {
+ const struct max14001_chip_info *chip_info;
+ struct spi_device *spi;
+ struct regmap *regmap;
+ int vref_mV;
+ bool spi_hw_has_lsb_first;
+
+ /*
+ * The following buffers will be bit-reversed during device
+ * communication, because the device transmits and receives data
+ * LSB-first.
+ * DMA (thus cache coherency maintenance) requires the transfer
+ * buffers to live in their own cache lines.
+ */
+ union {
+ __be16 be;
+ __le16 le;
+ } spi_tx_buffer __aligned(IIO_DMA_MINALIGN);
+
+ union {
+ __be16 be;
+ __le16 le;
+ } spi_rx_buffer;
+};
+
+struct max14001_chip_info {
+ const char *name;
+};
+
+static int max14001_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct max14001_state *st = context;
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->spi_tx_buffer,
+ .len = sizeof(st->spi_tx_buffer),
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->spi_rx_buffer,
+ .len = sizeof(st->spi_rx_buffer),
+ },
+ };
+ int ret;
+ unsigned int addr, data;
+
+ /*
+ * Prepare SPI transmit buffer 16 bit-value and reverse bit order
+ * to align with the LSB-first input on SDI port in order to meet
+ * the device communication requirements. If the controller supports
+ * SPI_LSB_FIRST, this step will be handled by the SPI controller.
+ */
+ addr = FIELD_PREP(MAX14001_MASK_ADDR, reg);
+
+ if (st->spi_hw_has_lsb_first)
+ st->spi_tx_buffer.le = cpu_to_le16(addr);
+ else
+ st->spi_tx_buffer.be = cpu_to_be16(bitrev16(addr));
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret)
+ return ret;
+
+ /*
+ * Convert received 16-bit value to cpu-endian format and reverse
+ * bit order. If the controller supports SPI_LSB_FIRST, this step
+ * will be handled by the SPI controller.
+ */
+ if (st->spi_hw_has_lsb_first)
+ data = le16_to_cpu(st->spi_rx_buffer.le);
+ else
+ data = bitrev16(be16_to_cpu(st->spi_rx_buffer.be));
+
+ *val = FIELD_GET(MAX14001_MASK_DATA, data);
+
+ return 0;
+}
+
+static int max14001_write(struct max14001_state *st, unsigned int reg, unsigned int val)
+{
+ unsigned int addr;
+
+ /*
+ * Prepare SPI transmit buffer 16 bit-value and reverse bit order
+ * to align with the LSB-first input on SDI port in order to meet
+ * the device communication requirements. If the controller supports
+ * SPI_LSB_FIRST, this step will be handled by the SPI controller.
+ */
+ addr = FIELD_PREP(MAX14001_MASK_ADDR, reg) |
+ FIELD_PREP(MAX14001_MASK_WR, 1) |
+ FIELD_PREP(MAX14001_MASK_DATA, val);
+
+ if (st->spi_hw_has_lsb_first)
+ st->spi_tx_buffer.le = cpu_to_le16(addr);
+ else
+ st->spi_tx_buffer.be = cpu_to_be16(bitrev16(addr));
+
+ return spi_write(st->spi, &st->spi_tx_buffer, sizeof(st->spi_tx_buffer));
+}
+
+static int max14001_write_single_reg(void *context, unsigned int reg, unsigned int val)
+{
+ struct max14001_state *st = context;
+ int ret;
+
+ /* Enable writing to the SPI register. */
+ ret = max14001_write(st, MAX14001_REG_WEN, MAX14001_REG_WEN_VALUE_WRITE);
+ if (ret)
+ return ret;
+
+ /* Writing data into SPI register. */
+ ret = max14001_write(st, reg, val);
+ if (ret)
+ return ret;
+
+ /* Disable writing to the SPI register. */
+ return max14001_write(st, MAX14001_REG_WEN, 0);
+}
+
+static int max14001_write_verification_reg(struct max14001_state *st, unsigned int reg)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(st->regmap, reg, &val);
+ if (ret)
+ return ret;
+
+ return max14001_write(st, MAX14001_REG_VERIFICATION(reg), val);
+}
+
+static int max14001_disable_mv_fault(struct max14001_state *st)
+{
+ unsigned int reg;
+ int ret;
+
+ /* Enable writing to the SPI registers. */
+ ret = max14001_write(st, MAX14001_REG_WEN, MAX14001_REG_WEN_VALUE_WRITE);
+ if (ret)
+ return ret;
+
+ /*
+ * Reads all registers and writes the values to their appropriate
+ * verification registers to clear the Memory Validation fault.
+ */
+ for (reg = MAX14001_REG_FLTEN; reg <= MAX14001_REG_ENBL; reg++) {
+ ret = max14001_write_verification_reg(st, reg);
+ if (ret)
+ return ret;
+ }
+
+ /* Disable writing to the SPI registers. */
+ return max14001_write(st, MAX14001_REG_WEN, 0);
+}
+
+static int max14001_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct max14001_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static int max14001_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max14001_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(st->regmap, MAX14001_REG_ADC, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = st->vref_mV;
+ *val2 = 10;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct regmap_range max14001_regmap_rd_range[] = {
+ regmap_reg_range(MAX14001_REG_ADC, MAX14001_REG_ENBL),
+ regmap_reg_range(MAX14001_REG_WEN, MAX14001_REG_WEN),
+ regmap_reg_range(MAX14001_REG_VERIFICATION(MAX14001_REG_FLTEN),
+ MAX14001_REG_VERIFICATION(MAX14001_REG_ENBL)),
+};
+
+static const struct regmap_access_table max14001_regmap_rd_table = {
+ .yes_ranges = max14001_regmap_rd_range,
+ .n_yes_ranges = ARRAY_SIZE(max14001_regmap_rd_range),
+};
+
+static const struct regmap_range max14001_regmap_wr_range[] = {
+ regmap_reg_range(MAX14001_REG_FLTEN, MAX14001_REG_WEN),
+ regmap_reg_range(MAX14001_REG_VERIFICATION(MAX14001_REG_FLTEN),
+ MAX14001_REG_VERIFICATION(MAX14001_REG_ENBL)),
+};
+
+static const struct regmap_access_table max14001_regmap_wr_table = {
+ .yes_ranges = max14001_regmap_wr_range,
+ .n_yes_ranges = ARRAY_SIZE(max14001_regmap_wr_range),
+};
+
+static const struct regmap_config max14001_regmap_config = {
+ .reg_read = max14001_read,
+ .reg_write = max14001_write_single_reg,
+ .max_register = MAX14001_REG_VERIFICATION(MAX14001_REG_ENBL),
+ .rd_table = &max14001_regmap_rd_table,
+ .wr_table = &max14001_regmap_wr_table,
+};
+
+static const struct iio_info max14001_info = {
+ .read_raw = max14001_read_raw,
+ .debugfs_reg_access = max14001_debugfs_reg_access,
+};
+
+static const struct iio_chan_spec max14001_channel[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int max14001_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct max14001_state *st;
+ int ret;
+ bool use_ext_vrefin = false;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+ st->spi_hw_has_lsb_first = spi->mode & SPI_LSB_FIRST;
+ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+ return -EINVAL;
+
+ indio_dev->name = st->chip_info->name;
+ indio_dev->info = &max14001_info;
+ indio_dev->channels = max14001_channel;
+ indio_dev->num_channels = ARRAY_SIZE(max14001_channel);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ st->regmap = devm_regmap_init(dev, NULL, st, &max14001_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap), "Failed to initialize regmap\n");
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable Vdd supply\n");
+
+ ret = devm_regulator_get_enable(dev, "vddl");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable Vddl supply\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "refin");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "Failed to get REFIN voltage\n");
+
+ if (ret == -ENODEV)
+ ret = 1250000;
+ else
+ use_ext_vrefin = true;
+ st->vref_mV = ret / (MICRO / MILLI);
+
+ if (use_ext_vrefin) {
+ /*
+ * Configure the MAX14001/MAX14002 to use an external voltage
+ * reference source by setting the bit 5 of the configuration register.
+ */
+ ret = regmap_set_bits(st->regmap, MAX14001_REG_CFG,
+ MAX14001_REG_CFG_BIT_EXRF);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to set External REFIN in Configuration Register\n");
+ }
+
+ ret = max14001_disable_mv_fault(st);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to disable MV Fault\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static struct max14001_chip_info max14001_chip_info = {
+ .name = "max14001",
+};
+
+static struct max14001_chip_info max14002_chip_info = {
+ .name = "max14002",
+};
+
+static const struct spi_device_id max14001_id_table[] = {
+ { "max14001", (kernel_ulong_t)&max14001_chip_info },
+ { "max14002", (kernel_ulong_t)&max14002_chip_info },
+ { }
+};
+
+static const struct of_device_id max14001_of_match[] = {
+ { .compatible = "adi,max14001", .data = &max14001_chip_info },
+ { .compatible = "adi,max14002", .data = &max14002_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max14001_of_match);
+
+static struct spi_driver max14001_driver = {
+ .driver = {
+ .name = "max14001",
+ .of_match_table = max14001_of_match,
+ },
+ .probe = max14001_probe,
+ .id_table = max14001_id_table,
+};
+module_spi_driver(max14001_driver);
+
+MODULE_AUTHOR("Kim Seer Paller <kimseer.paller@analog.com>");
+MODULE_AUTHOR("Marilene Andrade Garcia <marilene.agarcia@gmail.com>");
+MODULE_DESCRIPTION("Analog Devices MAX14001/MAX14002 ADCs driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/mcp3564.c b/drivers/iio/adc/mcp3564.c
index a68f1cd6883e..fcdf13f49c48 100644
--- a/drivers/iio/adc/mcp3564.c
+++ b/drivers/iio/adc/mcp3564.c
@@ -987,7 +987,7 @@ static int mcp3564_read_label(struct iio_dev *indio_dev,
{
struct mcp3564_state *adc = iio_priv(indio_dev);
- return sprintf(label, "%s\n", adc->labels[chan->scan_index]);
+ return sysfs_emit(label, "%s\n", adc->labels[chan->scan_index]);
}
static int mcp3564_parse_fw_children(struct iio_dev *indio_dev)
@@ -1019,7 +1019,7 @@ static int mcp3564_parse_fw_children(struct iio_dev *indio_dev)
channels = devm_kcalloc(dev, num_ch, sizeof(*channels), GFP_KERNEL);
if (!channels)
- return dev_err_probe(dev, -ENOMEM, "Can't allocate memory\n");
+ return -ENOMEM;
device_for_each_child_node_scoped(dev, child) {
node_name = fwnode_get_name(child);
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 4ff88603e4fc..47cd350498a0 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -1181,12 +1181,12 @@ static int read_label(struct iio_dev *indio_dev,
char *label)
{
if (chan->type == IIO_TEMP)
- return sprintf(label, "temp-sensor\n");
+ return sysfs_emit(label, "temp-sensor\n");
if (chan->type == IIO_VOLTAGE && chan->channel >= NUM_MUX_0_VSS)
- return sprintf(label, "%s\n",
+ return sysfs_emit(label, "%s\n",
chan7_mux_names[chan->channel - NUM_MUX_0_VSS]);
if (chan->type == IIO_VOLTAGE)
- return sprintf(label, "channel-%d\n", chan->channel);
+ return sysfs_emit(label, "channel-%d\n", chan->channel);
return 0;
}
@@ -1357,7 +1357,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM, "failed allocating iio device\n");
+ return -ENOMEM;
priv = iio_priv(indio_dev);
init_completion(&priv->done);
diff --git a/drivers/iio/adc/mt6360-adc.c b/drivers/iio/adc/mt6360-adc.c
index 69b3569c90e5..e0e4df418612 100644
--- a/drivers/iio/adc/mt6360-adc.c
+++ b/drivers/iio/adc/mt6360-adc.c
@@ -216,7 +216,7 @@ static const char *mt6360_channel_labels[MT6360_CHAN_MAX] = {
static int mt6360_adc_read_label(struct iio_dev *iio_dev, const struct iio_chan_spec *chan,
char *label)
{
- return snprintf(label, PAGE_SIZE, "%s\n", mt6360_channel_labels[chan->channel]);
+ return sysfs_emit(label, "%s\n", mt6360_channel_labels[chan->channel]);
}
static const struct iio_info mt6360_adc_iio_info = {
diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
index 3343b54e8e44..fe9e3ece3fda 100644
--- a/drivers/iio/adc/mt6577_auxadc.c
+++ b/drivers/iio/adc/mt6577_auxadc.c
@@ -297,8 +297,7 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
ret = devm_add_action_or_reset(&pdev->dev, mt6577_power_off, adc_dev);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "Failed to add action to managed power off\n");
+ return ret;
ret = devm_iio_device_register(&pdev->dev, indio_dev);
if (ret < 0)
diff --git a/drivers/iio/adc/mxs-lradc-adc.c b/drivers/iio/adc/mxs-lradc-adc.c
index 92baf3f5f560..dda5182a5076 100644
--- a/drivers/iio/adc/mxs-lradc-adc.c
+++ b/drivers/iio/adc/mxs-lradc-adc.c
@@ -697,10 +697,8 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
/* Allocate the IIO device. */
iio = devm_iio_device_alloc(dev, sizeof(*adc));
- if (!iio) {
- dev_err(dev, "Failed to allocate IIO device\n");
+ if (!iio)
return -ENOMEM;
- }
adc = iio_priv(iio);
adc->lradc = lradc;
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index 72aa4ca2e5a4..a0227b57f238 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -672,13 +672,13 @@ static int pac1921_read_label(struct iio_dev *indio_dev,
{
switch (chan->channel) {
case PAC1921_CHAN_VBUS:
- return sprintf(label, "vbus\n");
+ return sysfs_emit(label, "vbus\n");
case PAC1921_CHAN_VSENSE:
- return sprintf(label, "vsense\n");
+ return sysfs_emit(label, "vsense\n");
case PAC1921_CHAN_CURRENT:
- return sprintf(label, "current\n");
+ return sysfs_emit(label, "current\n");
case PAC1921_CHAN_POWER:
- return sprintf(label, "power\n");
+ return sysfs_emit(label, "power\n");
default:
return -EINVAL;
}
@@ -1279,8 +1279,7 @@ static int pac1921_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, pac1921_regulator_disable,
priv->vdd);
if (ret)
- return dev_err_probe(dev, ret,
- "Cannot add action for vdd regulator disposal\n");
+ return ret;
msleep(PAC1921_POWERUP_TIME_MS);
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
index 09fe88eb3fb0..ec96bb0f2ed6 100644
--- a/drivers/iio/adc/pac1934.c
+++ b/drivers/iio/adc/pac1934.c
@@ -88,6 +88,7 @@
#define PAC1934_VPOWER_3_ADDR 0x19
#define PAC1934_VPOWER_4_ADDR 0x1A
#define PAC1934_REFRESH_V_REG_ADDR 0x1F
+#define PAC1934_SLOW_REG_ADDR 0x20
#define PAC1934_CTRL_STAT_REGS_ADDR 0x1C
#define PAC1934_PID_REG_ADDR 0xFD
#define PAC1934_MID_REG_ADDR 0xFE
@@ -767,7 +768,7 @@ static int pac1934_retrieve_data(struct pac1934_chip_info *info,
* Re-schedule the work for the read registers on timeout
* (to prevent chip registers saturation)
*/
- mod_delayed_work(system_wq, &info->work_chip_rfsh,
+ mod_delayed_work(system_percpu_wq, &info->work_chip_rfsh,
msecs_to_jiffies(PAC1934_MAX_RFSH_LIMIT_MS));
}
@@ -1265,8 +1266,23 @@ static int pac1934_chip_configure(struct pac1934_chip_info *info)
/* no SLOW triggered REFRESH, clear POR */
regs[PAC1934_SLOW_REG_OFF] = 0;
- ret = i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
- ARRAY_SIZE(regs), (u8 *)regs);
+ /*
+ * Write the three bytes sequentially, as the device does not support
+ * block write.
+ */
+ ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ regs[PAC1934_CHANNEL_DIS_REG_OFF]);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client,
+ PAC1934_CTRL_STAT_REGS_ADDR + PAC1934_NEG_PWR_REG_OFF,
+ regs[PAC1934_NEG_PWR_REG_OFF]);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, PAC1934_SLOW_REG_ADDR,
+ regs[PAC1934_SLOW_REG_OFF]);
if (ret)
return ret;
@@ -1455,13 +1471,6 @@ static int pac1934_prep_custom_attributes(struct pac1934_chip_info *info,
return 0;
}
-static void pac1934_mutex_destroy(void *data)
-{
- struct mutex *lock = data;
-
- mutex_destroy(lock);
-}
-
static const struct iio_info pac1934_info = {
.read_raw = pac1934_read_raw,
.write_raw = pac1934_write_raw,
@@ -1520,9 +1529,7 @@ static int pac1934_probe(struct i2c_client *client)
return dev_err_probe(dev, ret,
"parameter parsing returned an error\n");
- mutex_init(&info->lock);
- ret = devm_add_action_or_reset(dev, pac1934_mutex_destroy,
- &info->lock);
+ ret = devm_mutex_init(dev, &info->lock);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 7c01e33be04c..3f433064618e 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -885,10 +885,8 @@ static int palmas_gpadc_probe(struct platform_device *pdev)
return -EINVAL;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "iio_device_alloc failed\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc = iio_priv(indio_dev);
adc->dev = &pdev->dev;
diff --git a/drivers/iio/adc/qcom-spmi-rradc.c b/drivers/iio/adc/qcom-spmi-rradc.c
index f61ad0510f04..b245416bae12 100644
--- a/drivers/iio/adc/qcom-spmi-rradc.c
+++ b/drivers/iio/adc/qcom-spmi-rradc.c
@@ -769,7 +769,7 @@ static int rradc_read_raw(struct iio_dev *indio_dev,
static int rradc_read_label(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, char *label)
{
- return snprintf(label, PAGE_SIZE, "%s\n",
+ return sysfs_emit(label, "%s\n",
rradc_chans[chan->address].label);
}
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index cc326f21d398..3a17b3898bf6 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -163,12 +163,10 @@ static int rcar_gyroadc_set_power(struct rcar_gyroadc *priv, bool on)
{
struct device *dev = priv->dev;
- if (on) {
+ if (on)
return pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
- return pm_runtime_put_autosuspend(dev);
- }
+
+ return pm_runtime_put_autosuspend(dev);
}
static int rcar_gyroadc_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/rn5t618-adc.c b/drivers/iio/adc/rn5t618-adc.c
index d6f6b351f2af..f78fc795b69a 100644
--- a/drivers/iio/adc/rn5t618-adc.c
+++ b/drivers/iio/adc/rn5t618-adc.c
@@ -199,10 +199,8 @@ static int rn5t618_adc_probe(struct platform_device *pdev)
struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
- if (!iio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!iio_dev)
return -ENOMEM;
- }
adc = iio_priv(iio_dev);
adc->dev = &pdev->dev;
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index bd62daea0a3e..6721da0ed7bb 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -466,8 +466,7 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*info));
if (!indio_dev)
- return dev_err_probe(&pdev->dev, -ENOMEM,
- "failed allocating iio device\n");
+ return -ENOMEM;
info = iio_priv(indio_dev);
@@ -527,8 +526,7 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
ret = devm_add_action_or_reset(&pdev->dev,
rockchip_saradc_regulator_disable, info);
if (ret)
- return dev_err_probe(&pdev->dev, ret,
- "failed to register devm action\n");
+ return ret;
ret = regulator_get_voltage(info->vref);
if (ret < 0)
diff --git a/drivers/iio/adc/rohm-bd79112.c b/drivers/iio/adc/rohm-bd79112.c
new file mode 100644
index 000000000000..7420aa6627d5
--- /dev/null
+++ b/drivers/iio/adc/rohm-bd79112.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ROHM ADC driver for BD79112 signal monitoring hub.
+ * Copyright (C) 2025, ROHM Semiconductor.
+ *
+ * SPI communication derived from ad7923.c and ti-ads7950.c
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+#include <linux/iio/adc-helpers.h>
+#include <linux/iio/iio.h>
+
+#define BD79112_MAX_NUM_CHANNELS 32
+
+struct bd79112_data {
+ struct spi_device *spi;
+ struct regmap *map;
+ struct device *dev;
+ struct gpio_chip gc;
+ unsigned long gpio_valid_mask;
+ unsigned int vref_mv;
+ struct spi_transfer read_xfer[2];
+ struct spi_transfer write_xfer;
+ struct spi_message read_msg;
+ struct spi_message write_msg;
+ /* 16-bit TX, valid data in high byte */
+ u8 read_tx[2] __aligned(IIO_DMA_MINALIGN);
+ /* 8-bit address followed by 8-bit data */
+ u8 reg_write_tx[2];
+ /* 12-bit of ADC data or 8 bit of reg data */
+ __be16 read_rx;
+};
+
+/*
+ * The ADC data is read issuing SPI-command matching the channel number.
+ * We treat this as a register address.
+ */
+#define BD79112_REG_AGIO0A 0x00
+#define BD79112_REG_AGIO15B 0x1f
+
+/*
+ * ADC STATUS_FLAG appended to ADC data will be set, if the ADC result is being
+ * read for a channel, which input pin is muxed to be a GPIO.
+ */
+#define BD79112_ADC_STATUS_FLAG BIT(14)
+
+/*
+ * The BD79112 requires "R/W bit" to be set for SPI register (not ADC data)
+ * reads and an "IOSET bit" to be set for read/write operations (which aren't
+ * reading the ADC data).
+ */
+#define BD79112_BIT_RW BIT(4)
+#define BD79112_BIT_IO BIT(5)
+
+#define BD79112_REG_GPI_VALUE_B8_15 (BD79112_BIT_IO | 0x0)
+#define BD79112_REG_GPI_VALUE_B0_B7 (BD79112_BIT_IO | 0x1)
+#define BD79112_REG_GPI_VALUE_A8_15 (BD79112_BIT_IO | 0x2)
+#define BD79112_REG_GPI_VALUE_A0_A7 (BD79112_BIT_IO | 0x3)
+
+#define BD79112_REG_GPI_EN_B7_B15 (BD79112_BIT_IO | 0x4)
+#define BD79112_REG_GPI_EN_B0_B7 (BD79112_BIT_IO | 0x5)
+#define BD79112_REG_GPI_EN_A8_A15 (BD79112_BIT_IO | 0x6)
+#define BD79112_REG_GPI_EN_A0_A7 (BD79112_BIT_IO | 0x7)
+
+#define BD79112_REG_GPO_EN_B7_B15 (BD79112_BIT_IO | 0x8)
+#define BD79112_REG_GPO_EN_B0_B7 (BD79112_BIT_IO | 0x9)
+#define BD79112_REG_GPO_EN_A8_A15 (BD79112_BIT_IO | 0xa)
+#define BD79112_REG_GPO_EN_A0_A7 (BD79112_BIT_IO | 0xb)
+
+#define BD79112_NUM_GPIO_EN_REGS 8
+#define BD79112_FIRST_GPIO_EN_REG BD79112_REG_GPI_EN_B7_B15
+
+#define BD79112_REG_GPO_VALUE_B8_15 (BD79112_BIT_IO | 0xc)
+#define BD79112_REG_GPO_VALUE_B0_B7 (BD79112_BIT_IO | 0xd)
+#define BD79112_REG_GPO_VALUE_A8_15 (BD79112_BIT_IO | 0xe)
+#define BD79112_REG_GPO_VALUE_A0_A7 (BD79112_BIT_IO | 0xf)
+
+#define BD79112_REG_MAX BD79112_REG_GPO_VALUE_A0_A7
+
+/*
+ * Read transaction consists of two 16-bit sequences separated by CSB.
+ * For register read, 'IOSET' bit must be set. For ADC read, IOSET is cleared
+ * and ADDR equals the channel number (0 ... 31).
+ *
+ * First 16-bit sequence, MOSI as below, MISO data ignored:
+ * - SCK: | 1 | 2 | 3 | 4 | 5 .. 8 | 9 .. 16 |
+ * - MOSI:| 0 | 0 | IOSET | RW (1) | ADDR | 8'b0 |
+ *
+ * CSB released and re-acquired between these sequences
+ *
+ * Second 16-bit sequence, MISO as below, MOSI data ignored:
+ * For Register read data is 8 bits:
+ * - SCK: | 1 .. 8 | 9 .. 16 |
+ * - MISO:| 8'b0 | 8-bit data |
+ *
+ * For ADC read data is 12 bits:
+ * - SCK: | 1 | 2 | 3 4 | 4 .. 16 |
+ * - MISO:| 0 | STATUS_FLAG | 2'b0 | 12-bit data |
+ * The 'STATUS_FLAG' is set if the read input pin was configured as a GPIO.
+ */
+static int bd79112_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct bd79112_data *data = context;
+ int ret;
+
+ if (reg & BD79112_BIT_IO)
+ reg |= BD79112_BIT_RW;
+
+ data->read_tx[0] = reg;
+
+ ret = spi_sync(data->spi, &data->read_msg);
+ if (!ret)
+ *val = be16_to_cpu(data->read_rx);
+
+ return ret;
+}
+
+/*
+ * Write, single 16-bit sequence (broken down below):
+ *
+ * First 8-bit, MOSI as below, MISO data ignored:
+ * - SCK: | 1 | 2 | 3 | 4 | 5 .. 8 |
+ * - MOSI:| 0 | 0 |IOSET| RW(0) | ADDR |
+ *
+ * Last 8 SCK cycles (b8 ... b15), MISO contains register data, MOSI ignored.
+ * - SCK: | 9 .. 16 |
+ * - MISO:| data |
+ */
+static int bd79112_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct bd79112_data *data = context;
+
+ data->reg_write_tx[0] = reg;
+ data->reg_write_tx[1] = val;
+
+ return spi_sync(data->spi, &data->write_msg);
+}
+
+static int _get_gpio_reg(unsigned int offset, unsigned int base)
+{
+ int regoffset = offset / 8;
+
+ if (offset > 31)
+ return -EINVAL;
+
+ return base - regoffset;
+}
+
+#define GET_GPIO_BIT(offset) BIT((offset) % 8)
+#define GET_GPO_EN_REG(offset) _get_gpio_reg((offset), BD79112_REG_GPO_EN_A0_A7)
+#define GET_GPI_EN_REG(offset) _get_gpio_reg((offset), BD79112_REG_GPI_EN_A0_A7)
+#define GET_GPO_VAL_REG(offset) _get_gpio_reg((offset), BD79112_REG_GPO_VALUE_A0_A7)
+#define GET_GPI_VAL_REG(offset) _get_gpio_reg((offset), BD79112_REG_GPI_VALUE_A0_A7)
+
+static const struct regmap_range bd71815_volatile_ro_ranges[] = {
+ /* Read ADC data */
+ regmap_reg_range(BD79112_REG_AGIO0A, BD79112_REG_AGIO15B),
+ /* GPI state */
+ regmap_reg_range(BD79112_REG_GPI_VALUE_B8_15, BD79112_REG_GPI_VALUE_A0_A7),
+};
+
+static const struct regmap_access_table bd79112_volatile_regs = {
+ .yes_ranges = &bd71815_volatile_ro_ranges[0],
+ .n_yes_ranges = ARRAY_SIZE(bd71815_volatile_ro_ranges),
+};
+
+static const struct regmap_access_table bd79112_ro_regs = {
+ .no_ranges = &bd71815_volatile_ro_ranges[0],
+ .n_no_ranges = ARRAY_SIZE(bd71815_volatile_ro_ranges),
+};
+
+static const struct regmap_config bd79112_regmap = {
+ .reg_read = bd79112_reg_read,
+ .reg_write = bd79112_reg_write,
+ .volatile_table = &bd79112_volatile_regs,
+ .wr_table = &bd79112_ro_regs,
+ .cache_type = REGCACHE_MAPLE,
+ .max_register = BD79112_REG_MAX,
+};
+
+static int bd79112_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long m)
+{
+ struct bd79112_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_read(data->map, chan->channel, val);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = data->vref_mv;
+ *val2 = 12;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bd79112_info = {
+ .read_raw = bd79112_read_raw,
+};
+
+static const struct iio_chan_spec bd79112_chan_template = {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .indexed = 1,
+};
+
+static int bd79112_gpio_init_valid_mask(struct gpio_chip *gc,
+ unsigned long *valid_mask,
+ unsigned int ngpios)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+
+ *valid_mask = data->gpio_valid_mask;
+
+ return 0;
+}
+
+static int bd79112_gpio_dir_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ unsigned int reg, bit, val;
+ int ret;
+
+ bit = GET_GPIO_BIT(offset);
+ reg = GET_GPO_EN_REG(offset);
+
+ ret = regmap_read(data->map, reg, &val);
+ if (ret)
+ return ret;
+
+ if (bit & val)
+ return GPIO_LINE_DIRECTION_OUT;
+
+ reg = GET_GPI_EN_REG(offset);
+ ret = regmap_read(data->map, reg, &val);
+ if (ret)
+ return ret;
+
+ if (bit & val)
+ return GPIO_LINE_DIRECTION_IN;
+
+ /*
+ * Ouch. Seems the pin is ADC input - shouldn't happen as changing mux
+ * at runtime is not supported and non GPIO pins should be invalidated
+ * by the valid_mask at probe. Maybe someone wrote a register bypassing
+ * the driver?
+ */
+ dev_err(data->dev, "Pin not a GPIO\n");
+
+ return -EINVAL;
+}
+
+static int bd79112_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ unsigned int reg, bit, val;
+ int ret;
+
+ bit = GET_GPIO_BIT(offset);
+ reg = GET_GPI_VAL_REG(offset);
+
+ ret = regmap_read(data->map, reg, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & bit);
+}
+
+static int bd79112_gpio_set(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ unsigned int reg, bit;
+
+ bit = GET_GPIO_BIT(offset);
+ reg = GET_GPO_VAL_REG(offset);
+
+ return regmap_assign_bits(data->map, reg, bit, value);
+}
+
+static int bd79112_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ unsigned long i, bank_mask;
+
+ for_each_set_clump8(i, bank_mask, mask, gc->ngpio) {
+ unsigned long bank_bits;
+ unsigned int reg;
+ int ret;
+
+ bank_bits = bitmap_get_value8(bits, i);
+ reg = BD79112_REG_GPO_VALUE_A0_A7 - i / 8;
+ ret = regmap_update_bits(data->map, reg, bank_mask, bank_bits);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bd79112_gpio_dir_set(struct bd79112_data *data, unsigned int offset,
+ int dir)
+{
+ unsigned int gpi_reg, gpo_reg, bit;
+ int ret;
+
+ bit = GET_GPIO_BIT(offset);
+ gpi_reg = GET_GPI_EN_REG(offset);
+ gpo_reg = GET_GPO_EN_REG(offset);
+
+ if (dir == GPIO_LINE_DIRECTION_OUT) {
+ ret = regmap_clear_bits(data->map, gpi_reg, bit);
+ if (ret)
+ return ret;
+
+ return regmap_set_bits(data->map, gpo_reg, bit);
+ }
+
+ ret = regmap_set_bits(data->map, gpi_reg, bit);
+ if (ret)
+ return ret;
+
+ return regmap_clear_bits(data->map, gpo_reg, bit);
+}
+
+static int bd79112_gpio_input(struct gpio_chip *gc, unsigned int offset)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+
+ return bd79112_gpio_dir_set(data, offset, GPIO_LINE_DIRECTION_IN);
+}
+
+static int bd79112_gpio_output(struct gpio_chip *gc, unsigned int offset,
+ int value)
+{
+ struct bd79112_data *data = gpiochip_get_data(gc);
+ int ret;
+
+ ret = bd79112_gpio_set(gc, offset, value);
+ if (ret)
+ return ret;
+
+ return bd79112_gpio_dir_set(data, offset, GPIO_LINE_DIRECTION_OUT);
+}
+
+static const struct gpio_chip bd79112_gpio_chip = {
+ .label = "bd79112-gpio",
+ .get_direction = bd79112_gpio_dir_get,
+ .direction_input = bd79112_gpio_input,
+ .direction_output = bd79112_gpio_output,
+ .get = bd79112_gpio_get,
+ .set = bd79112_gpio_set,
+ .set_multiple = bd79112_gpio_set_multiple,
+ .init_valid_mask = bd79112_gpio_init_valid_mask,
+ .can_sleep = true,
+ .ngpio = 32,
+ .base = -1,
+};
+
+static unsigned int bd79112_get_gpio_pins(const struct iio_chan_spec *cs, int num_channels)
+{
+ unsigned int i, gpio_channels;
+
+ /*
+ * Let's initialize the mux config to say that all 32 channels are
+ * GPIOs. Then we can just loop through the iio_chan_spec and clear the
+ * bits for found ADC channels.
+ */
+ gpio_channels = GENMASK(31, 0);
+ for (i = 0; i < num_channels; i++)
+ gpio_channels &= ~BIT(cs[i].channel);
+
+ return gpio_channels;
+}
+
+/* ADC channels as named in the data-sheet */
+static const char * const bd79112_chan_names[] = {
+ "AGIO0A", "AGIO1A", "AGIO2A", "AGIO3A", /* 0 - 3 */
+ "AGIO4A", "AGIO5A", "AGIO6A", "AGIO7A", /* 4 - 7 */
+ "AGIO8A", "AGIO9A", "AGIO10A", "AGIO11A", /* 8 - 11 */
+ "AGIO12A", "AGIO13A", "AGIO14A", "AGIO15A", /* 12 - 15 */
+ "AGIO0B", "AGIO1B", "AGIO2B", "AGIO3B", /* 16 - 19 */
+ "AGIO4B", "AGIO5B", "AGIO6B", "AGIO7B", /* 20 - 23 */
+ "AGIO8B", "AGIO9B", "AGIO10B", "AGIO11B", /* 24 - 27 */
+ "AGIO12B", "AGIO13B", "AGIO14B", "AGIO15B", /* 28 - 31 */
+};
+
+static int bd79112_probe(struct spi_device *spi)
+{
+ struct bd79112_data *data;
+ struct iio_dev *iio_dev;
+ struct iio_chan_spec *cs;
+ struct device *dev = &spi->dev;
+ unsigned long gpio_pins, pin;
+ unsigned int i;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(iio_dev);
+ data->spi = spi;
+ data->dev = dev;
+ data->map = devm_regmap_init(dev, NULL, data, &bd79112_regmap);
+ if (IS_ERR(data->map))
+ return dev_err_probe(dev, PTR_ERR(data->map),
+ "Failed to initialize Regmap\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "vdd");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get the Vdd\n");
+
+ data->vref_mv = ret / 1000;
+
+ ret = devm_regulator_get_enable(dev, "iovdd");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to enable I/O voltage\n");
+
+ data->read_xfer[0].tx_buf = &data->read_tx[0];
+ data->read_xfer[0].len = sizeof(data->read_tx);
+ data->read_xfer[0].cs_change = 1;
+ data->read_xfer[1].rx_buf = &data->read_rx;
+ data->read_xfer[1].len = sizeof(data->read_rx);
+ spi_message_init_with_transfers(&data->read_msg, data->read_xfer, 2);
+ ret = devm_spi_optimize_message(dev, spi, &data->read_msg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI read message\n");
+
+ data->write_xfer.tx_buf = &data->reg_write_tx[0];
+ data->write_xfer.len = sizeof(data->reg_write_tx);
+ spi_message_init_with_transfers(&data->write_msg, &data->write_xfer, 1);
+ ret = devm_spi_optimize_message(dev, spi, &data->write_msg);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to optimize SPI write message\n");
+
+ ret = devm_iio_adc_device_alloc_chaninfo_se(dev, &bd79112_chan_template,
+ BD79112_MAX_NUM_CHANNELS - 1,
+ &cs);
+
+ /* Register all pins as GPIOs if there are no ADC channels */
+ if (ret == -ENOENT)
+ goto register_gpios;
+
+ if (ret < 0)
+ return ret;
+
+ iio_dev->num_channels = ret;
+ iio_dev->channels = cs;
+
+ for (i = 0; i < iio_dev->num_channels; i++)
+ cs[i].datasheet_name = bd79112_chan_names[cs[i].channel];
+
+ iio_dev->info = &bd79112_info;
+ iio_dev->name = "bd79112";
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ /*
+ * Ensure all channels are ADCs. This allows us to register the IIO
+ * device early (before checking which pins are to be used for GPIO)
+ * without having to worry about some pins being initially used for
+ * GPIO.
+ */
+ for (i = 0; i < BD79112_NUM_GPIO_EN_REGS; i++) {
+ ret = regmap_write(data->map, BD79112_FIRST_GPIO_EN_REG + i, 0);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to initialize channels\n");
+ }
+
+ ret = devm_iio_device_register(data->dev, iio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Failed to register ADC\n");
+
+register_gpios:
+ gpio_pins = bd79112_get_gpio_pins(iio_dev->channels,
+ iio_dev->num_channels);
+
+ /* If all channels are reserved for ADC, then we're done. */
+ if (!gpio_pins)
+ return 0;
+
+ /* Default all the GPIO pins to GPI */
+ for_each_set_bit(pin, &gpio_pins, BD79112_MAX_NUM_CHANNELS) {
+ ret = bd79112_gpio_dir_set(data, pin, GPIO_LINE_DIRECTION_IN);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to mark pin as GPI\n");
+ }
+
+ data->gpio_valid_mask = gpio_pins;
+ data->gc = bd79112_gpio_chip;
+ data->gc.parent = dev;
+
+ return devm_gpiochip_add_data(dev, &data->gc, data);
+}
+
+static const struct of_device_id bd79112_of_match[] = {
+ { .compatible = "rohm,bd79112" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, bd79112_of_match);
+
+static const struct spi_device_id bd79112_id[] = {
+ { "bd79112" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, bd79112_id);
+
+static struct spi_driver bd79112_driver = {
+ .driver = {
+ .name = "bd79112",
+ .of_match_table = bd79112_of_match,
+ },
+ .probe = bd79112_probe,
+ .id_table = bd79112_id,
+};
+module_spi_driver(bd79112_driver);
+
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("Driver for ROHM BD79112 ADC/GPIO");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DRIVER");
diff --git a/drivers/iio/adc/rohm-bd79124.c b/drivers/iio/adc/rohm-bd79124.c
index 06c55c8da93f..fc0452749b79 100644
--- a/drivers/iio/adc/rohm-bd79124.c
+++ b/drivers/iio/adc/rohm-bd79124.c
@@ -126,13 +126,8 @@ struct bd79124_data {
};
static const struct regmap_range bd79124_ro_ranges[] = {
- {
- .range_min = BD79124_REG_EVENT_FLAG,
- .range_max = BD79124_REG_EVENT_FLAG,
- }, {
- .range_min = BD79124_REG_RECENT_CH0_LSB,
- .range_max = BD79124_REG_RECENT_CH7_MSB,
- },
+ regmap_reg_range(BD79124_REG_EVENT_FLAG, BD79124_REG_EVENT_FLAG),
+ regmap_reg_range(BD79124_REG_RECENT_CH0_LSB, BD79124_REG_RECENT_CH7_MSB),
};
static const struct regmap_access_table bd79124_ro_regs = {
@@ -141,22 +136,11 @@ static const struct regmap_access_table bd79124_ro_regs = {
};
static const struct regmap_range bd79124_volatile_ranges[] = {
- {
- .range_min = BD79124_REG_RECENT_CH0_LSB,
- .range_max = BD79124_REG_RECENT_CH7_MSB,
- }, {
- .range_min = BD79124_REG_EVENT_FLAG,
- .range_max = BD79124_REG_EVENT_FLAG,
- }, {
- .range_min = BD79124_REG_EVENT_FLAG_HI,
- .range_max = BD79124_REG_EVENT_FLAG_HI,
- }, {
- .range_min = BD79124_REG_EVENT_FLAG_LO,
- .range_max = BD79124_REG_EVENT_FLAG_LO,
- }, {
- .range_min = BD79124_REG_SYSTEM_STATUS,
- .range_max = BD79124_REG_SYSTEM_STATUS,
- },
+ regmap_reg_range(BD79124_REG_RECENT_CH0_LSB, BD79124_REG_RECENT_CH7_MSB),
+ regmap_reg_range(BD79124_REG_EVENT_FLAG, BD79124_REG_EVENT_FLAG),
+ regmap_reg_range(BD79124_REG_EVENT_FLAG_HI, BD79124_REG_EVENT_FLAG_HI),
+ regmap_reg_range(BD79124_REG_EVENT_FLAG_LO, BD79124_REG_EVENT_FLAG_LO),
+ regmap_reg_range(BD79124_REG_SYSTEM_STATUS, BD79124_REG_SYSTEM_STATUS),
};
static const struct regmap_access_table bd79124_volatile_regs = {
@@ -165,13 +149,8 @@ static const struct regmap_access_table bd79124_volatile_regs = {
};
static const struct regmap_range bd79124_precious_ranges[] = {
- {
- .range_min = BD79124_REG_EVENT_FLAG_HI,
- .range_max = BD79124_REG_EVENT_FLAG_HI,
- }, {
- .range_min = BD79124_REG_EVENT_FLAG_LO,
- .range_max = BD79124_REG_EVENT_FLAG_LO,
- },
+ regmap_reg_range(BD79124_REG_EVENT_FLAG_HI, BD79124_REG_EVENT_FLAG_HI),
+ regmap_reg_range(BD79124_REG_EVENT_FLAG_LO, BD79124_REG_EVENT_FLAG_LO),
};
static const struct regmap_access_table bd79124_precious_regs = {
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index ad9738228b7f..2bf3a09ac6b0 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -300,7 +300,7 @@ static int rtq6056_adc_read_channel(struct rtq6056_priv *priv,
return IIO_VAL_INT;
case RTQ6056_REG_SHUNTVOLT:
case RTQ6056_REG_CURRENT:
- *val = sign_extend32(regval, 16);
+ *val = sign_extend32(regval, 15);
return IIO_VAL_INT;
default:
return -EINVAL;
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index cadb0446bc29..1010e0511b3e 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -248,7 +248,6 @@ static int rzg2l_adc_conversion(struct iio_dev *indio_dev, struct rzg2l_adc *adc
rzg2l_adc_start_stop(adc, false);
rpm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -410,7 +409,6 @@ static int rzg2l_adc_hw_init(struct device *dev, struct rzg2l_adc *adc)
rzg2l_adc_writel(adc, RZG2L_ADM(3), reg);
exit_hw_init:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
diff --git a/drivers/iio/adc/rzn1-adc.c b/drivers/iio/adc/rzn1-adc.c
new file mode 100644
index 000000000000..93b0feef8ea0
--- /dev/null
+++ b/drivers/iio/adc/rzn1-adc.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/N1 ADC driver
+ *
+ * Copyright (C) 2025 Schneider-Electric
+ *
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ *
+ * The RZ/N1 ADC controller can handle channels from its internal ADC1 and/or
+ * ADC2 cores. The driver use ADC1 and/or ADC2 cores depending on the presence
+ * of the related power supplies (AVDD and VREF) description in the device-tree.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#define RZN1_ADC_CONTROL_REG 0x02c
+#define RZN1_ADC_CONTROL_ADC_BUSY BIT(6)
+
+#define RZN1_ADC_FORCE_REG 0x030
+#define RZN1_ADC_SET_FORCE_REG 0x034
+#define RZN1_ADC_CLEAR_FORCE_REG 0x038
+#define RZN1_ADC_FORCE_VC(_n) BIT(_n)
+
+#define RZN1_ADC_CONFIG_REG 0x040
+#define RZN1_ADC_CONFIG_ADC_POWER_DOWN BIT(3)
+
+#define RZN1_ADC_VC_REG(_n) (0x0c0 + 4 * (_n))
+#define RZN1_ADC_VC_ADC2_ENABLE BIT(16)
+#define RZN1_ADC_VC_ADC1_ENABLE BIT(15)
+#define RZN1_ADC_VC_ADC2_CHANNEL_SEL_MASK GENMASK(5, 3)
+#define RZN1_ADC_VC_ADC1_CHANNEL_SEL_MASK GENMASK(2, 0)
+
+#define RZN1_ADC_ADC1_DATA_REG(_n) (0x100 + 4 * (_n))
+#define RZN1_ADC_ADC2_DATA_REG(_n) (0x140 + 4 * (_n))
+#define RZN1_ADC_ADCX_DATA_DATA_MASK GENMASK(11, 0)
+
+#define RZN1_ADC_NO_CHANNEL -1
+
+#define RZN1_ADC_CHANNEL_SHARED_SCALE(_ch, _ds_name) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = (_ds_name), \
+}
+
+#define RZN1_ADC_CHANNEL_SEPARATED_SCALE(_ch, _ds_name) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = (_ds_name), \
+}
+
+/*
+ * 8 ADC1_IN signals existed numbered 0..4, 6..8
+ * ADCx_IN5 doesn't exist in RZ/N1 datasheet
+ */
+static struct iio_chan_spec rzn1_adc1_channels[] = {
+ RZN1_ADC_CHANNEL_SHARED_SCALE(0, "ADC1_IN0"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(1, "ADC1_IN1"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(2, "ADC1_IN2"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(3, "ADC1_IN3"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(4, "ADC1_IN4"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(5, "ADC1_IN6"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(6, "ADC1_IN7"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(7, "ADC1_IN8"),
+};
+
+static struct iio_chan_spec rzn1_adc2_channels[] = {
+ RZN1_ADC_CHANNEL_SHARED_SCALE(8, "ADC2_IN0"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(9, "ADC2_IN1"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(10, "ADC2_IN2"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(11, "ADC2_IN3"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(12, "ADC2_IN4"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(13, "ADC2_IN6"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(14, "ADC2_IN7"),
+ RZN1_ADC_CHANNEL_SHARED_SCALE(15, "ADC2_IN8"),
+};
+
+/*
+ * If both ADCs core are used, scale cannot be common. Indeed, scale is
+ * based on Vref connected on each ADC core.
+ */
+static struct iio_chan_spec rzn1_adc1_adc2_channels[] = {
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(0, "ADC1_IN0"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(1, "ADC1_IN1"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(2, "ADC1_IN2"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(3, "ADC1_IN3"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(4, "ADC1_IN4"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(5, "ADC1_IN6"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(6, "ADC1_IN7"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(7, "ADC1_IN8"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(8, "ADC2_IN0"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(9, "ADC2_IN1"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(10, "ADC2_IN2"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(11, "ADC2_IN3"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(12, "ADC2_IN4"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(13, "ADC2_IN6"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(14, "ADC2_IN7"),
+ RZN1_ADC_CHANNEL_SEPARATED_SCALE(15, "ADC2_IN8"),
+};
+
+struct rzn1_adc {
+ struct device *dev;
+ void __iomem *regs;
+ struct mutex lock; /* ADC lock */
+ int adc1_vref_mV; /* ADC1 Vref in mV. Negative if ADC1 is not used */
+ int adc2_vref_mV; /* ADC2 Vref in mV. Negative if ADC2 is not used */
+};
+
+static int rzn1_adc_power(struct rzn1_adc *rzn1_adc, bool power)
+{
+ u32 v;
+
+ writel(power ? 0 : RZN1_ADC_CONFIG_ADC_POWER_DOWN,
+ rzn1_adc->regs + RZN1_ADC_CONFIG_REG);
+
+ /* Wait for the ADC_BUSY to clear */
+ return readl_poll_timeout_atomic(rzn1_adc->regs + RZN1_ADC_CONTROL_REG,
+ v, !(v & RZN1_ADC_CONTROL_ADC_BUSY),
+ 0, 500);
+}
+
+static void rzn1_adc_vc_setup_conversion(struct rzn1_adc *rzn1_adc, u32 ch,
+ int adc1_ch, int adc2_ch)
+{
+ u32 vc = 0;
+
+ if (adc1_ch != RZN1_ADC_NO_CHANNEL)
+ vc |= RZN1_ADC_VC_ADC1_ENABLE |
+ FIELD_PREP(RZN1_ADC_VC_ADC1_CHANNEL_SEL_MASK, adc1_ch);
+
+ if (adc2_ch != RZN1_ADC_NO_CHANNEL)
+ vc |= RZN1_ADC_VC_ADC2_ENABLE |
+ FIELD_PREP(RZN1_ADC_VC_ADC2_CHANNEL_SEL_MASK, adc2_ch);
+
+ writel(vc, rzn1_adc->regs + RZN1_ADC_VC_REG(ch));
+}
+
+static int rzn1_adc_vc_start_conversion(struct rzn1_adc *rzn1_adc, u32 ch)
+{
+ u32 val;
+
+ val = readl(rzn1_adc->regs + RZN1_ADC_FORCE_REG);
+ if (val & RZN1_ADC_FORCE_VC(ch))
+ return -EBUSY;
+
+ writel(RZN1_ADC_FORCE_VC(ch), rzn1_adc->regs + RZN1_ADC_SET_FORCE_REG);
+
+ return 0;
+}
+
+static void rzn1_adc_vc_stop_conversion(struct rzn1_adc *rzn1_adc, u32 ch)
+{
+ writel(RZN1_ADC_FORCE_VC(ch), rzn1_adc->regs + RZN1_ADC_CLEAR_FORCE_REG);
+}
+
+static int rzn1_adc_vc_wait_conversion(struct rzn1_adc *rzn1_adc, u32 ch,
+ u32 *adc1_data, u32 *adc2_data)
+{
+ u32 data_reg;
+ int ret;
+ u32 v;
+
+ /*
+ * When a VC is selected, it needs 20 ADC clocks to perform the
+ * conversion.
+ *
+ * The worst case is when the 16 VCs need to perform a conversion and
+ * our VC is the lowest in term of priority.
+ *
+ * In that case, the conversion is performed in 16 * 20 ADC clocks.
+ *
+ * The ADC clock can be set from 4MHz to 20MHz. This leads to a worst
+ * case of 16 * 20 * 1/4Mhz = 80us.
+ *
+ * Round it up to 100us.
+ */
+
+ /* Wait for the ADC_FORCE_VC(n) to clear */
+ ret = readl_poll_timeout_atomic(rzn1_adc->regs + RZN1_ADC_FORCE_REG,
+ v, !(v & RZN1_ADC_FORCE_VC(ch)),
+ 0, 100);
+ if (ret)
+ return ret;
+
+ if (adc1_data) {
+ data_reg = readl(rzn1_adc->regs + RZN1_ADC_ADC1_DATA_REG(ch));
+ *adc1_data = FIELD_GET(RZN1_ADC_ADCX_DATA_DATA_MASK, data_reg);
+ }
+
+ if (adc2_data) {
+ data_reg = readl(rzn1_adc->regs + RZN1_ADC_ADC2_DATA_REG(ch));
+ *adc2_data = FIELD_GET(RZN1_ADC_ADCX_DATA_DATA_MASK, data_reg);
+ }
+
+ return 0;
+}
+
+static int rzn1_adc_read_raw_ch(struct rzn1_adc *rzn1_adc, unsigned int chan, int *val)
+{
+ u32 *adc1_data, *adc2_data;
+ int adc1_ch, adc2_ch;
+ u32 adc_data;
+ int ret;
+
+ /*
+ * IIO chan are decoupled from chans used in rzn1_adc_vc_*() functions.
+ * The RZ/N1 ADC VC controller can handle on a single VC chan one
+ * channel from the ADC1 core and one channel from the ADC2 core.
+ *
+ * Even if IIO chans are mapped 1:1 to ADC core chans and so uses only
+ * a chan from ADC1 or a chan from ADC2, future improvements can define
+ * an IIO chan that uses one chan from ADC1 and one chan from ADC2.
+ */
+
+ if (chan < 8) {
+ /* chan 0..7 used to get ADC1 ch 0..7 */
+ adc1_ch = chan;
+ adc1_data = &adc_data;
+ adc2_ch = RZN1_ADC_NO_CHANNEL;
+ adc2_data = NULL;
+ } else if (chan < 16) {
+ /* chan 8..15 used to get ADC2 ch 0..7 */
+ adc1_ch = RZN1_ADC_NO_CHANNEL;
+ adc1_data = NULL;
+ adc2_ch = chan - 8;
+ adc2_data = &adc_data;
+ } else {
+ return -EINVAL;
+ }
+
+ ACQUIRE(pm_runtime_active_auto_try_enabled, pm)(rzn1_adc->dev);
+ ret = ACQUIRE_ERR(pm_runtime_active_auto_try_enabled, &pm);
+ if (ret < 0)
+ return ret;
+
+ scoped_guard(mutex, &rzn1_adc->lock) {
+ rzn1_adc_vc_setup_conversion(rzn1_adc, chan, adc1_ch, adc2_ch);
+
+ ret = rzn1_adc_vc_start_conversion(rzn1_adc, chan);
+ if (ret)
+ return ret;
+
+ ret = rzn1_adc_vc_wait_conversion(rzn1_adc, chan, adc1_data, adc2_data);
+ if (ret) {
+ rzn1_adc_vc_stop_conversion(rzn1_adc, chan);
+ return ret;
+ }
+ }
+
+ *val = adc_data;
+ ret = IIO_VAL_INT;
+
+ return 0;
+}
+
+static int rzn1_adc_get_vref_mV(struct rzn1_adc *rzn1_adc, unsigned int chan)
+{
+ /* chan 0..7 use ADC1 ch 0..7. Vref related to ADC1 core */
+ if (chan < 8)
+ return rzn1_adc->adc1_vref_mV;
+
+ /* chan 8..15 use ADC2 ch 0..7. Vref related to ADC2 core */
+ if (chan < 16)
+ return rzn1_adc->adc2_vref_mV;
+
+ return -EINVAL;
+}
+
+static int rzn1_adc_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct rzn1_adc *rzn1_adc = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = rzn1_adc_read_raw_ch(rzn1_adc, chan->channel, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = rzn1_adc_get_vref_mV(rzn1_adc, chan->channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info rzn1_adc_info = {
+ .read_raw = &rzn1_adc_read_raw,
+};
+
+static int rzn1_adc_set_iio_dev_channels(struct rzn1_adc *rzn1_adc,
+ struct iio_dev *indio_dev)
+{
+ /*
+ * When an ADC core is not used, its related vref_mV is set to a
+ * negative error code. Use the correct IIO channels table based on
+ * those vref_mV values.
+ */
+ if (rzn1_adc->adc1_vref_mV >= 0) {
+ if (rzn1_adc->adc2_vref_mV >= 0) {
+ indio_dev->channels = rzn1_adc1_adc2_channels;
+ indio_dev->num_channels = ARRAY_SIZE(rzn1_adc1_adc2_channels);
+ } else {
+ indio_dev->channels = rzn1_adc1_channels;
+ indio_dev->num_channels = ARRAY_SIZE(rzn1_adc1_channels);
+ }
+ return 0;
+ }
+
+ if (rzn1_adc->adc2_vref_mV >= 0) {
+ indio_dev->channels = rzn1_adc2_channels;
+ indio_dev->num_channels = ARRAY_SIZE(rzn1_adc2_channels);
+ return 0;
+ }
+
+ return dev_err_probe(rzn1_adc->dev, -ENODEV,
+ "Failed to set IIO channels, no ADC core used\n");
+}
+
+static int rzn1_adc_core_get_regulators(struct rzn1_adc *rzn1_adc,
+ int *adc_vref_mV,
+ const char *avdd_name, const char *vref_name)
+{
+ struct device *dev = rzn1_adc->dev;
+ int ret;
+
+ /*
+ * For a given ADC core (ADC1 or ADC2), both regulators (AVDD and VREF)
+ * must be available in order to have the ADC core used.
+ *
+ * We use the regulators presence to check the usage of the related
+ * ADC core. If both regulators are available, the ADC core is used.
+ * Otherwise, the ADC core is not used.
+ *
+ * The adc_vref_mV value is set to a negative error code (-ENODEV) when
+ * the ADC core is not used. Otherwise it is set to the VRef mV value.
+ */
+
+ *adc_vref_mV = -ENODEV;
+
+ ret = devm_regulator_get_enable_optional(dev, avdd_name);
+ if (ret == -ENODEV)
+ return 0;
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get '%s' regulator\n",
+ avdd_name);
+
+ ret = devm_regulator_get_enable_read_voltage(dev, vref_name);
+ if (ret == -ENODEV)
+ return 0;
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get '%s' regulator\n",
+ vref_name);
+
+ /*
+ * Both regulators are available.
+ * Set adc_vref_mV to the Vref value in mV. This, as the value set is
+ * positive, also signals that the ADC is used.
+ */
+ *adc_vref_mV = ret / 1000;
+
+ return 0;
+}
+
+static int rzn1_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct rzn1_adc *rzn1_adc;
+ struct clk *clk;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*rzn1_adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ rzn1_adc = iio_priv(indio_dev);
+ rzn1_adc->dev = dev;
+
+ ret = devm_mutex_init(dev, &rzn1_adc->lock);
+ if (ret)
+ return ret;
+
+ rzn1_adc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(rzn1_adc->regs))
+ return PTR_ERR(rzn1_adc->regs);
+
+ clk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Failed to get pclk\n");
+
+ clk = devm_clk_get_enabled(dev, "adc");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "Failed to get adc clk\n");
+
+ ret = rzn1_adc_core_get_regulators(rzn1_adc, &rzn1_adc->adc1_vref_mV,
+ "adc1-avdd", "adc1-vref");
+ if (ret)
+ return ret;
+
+ ret = rzn1_adc_core_get_regulators(rzn1_adc, &rzn1_adc->adc2_vref_mV,
+ "adc2-avdd", "adc2-vref");
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, rzn1_adc);
+
+ indio_dev->name = "rzn1-adc";
+ indio_dev->info = &rzn1_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ ret = rzn1_adc_set_iio_dev_channels(rzn1_adc, indio_dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_autosuspend_delay(dev, 500);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable runtime PM\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int rzn1_adc_pm_runtime_suspend(struct device *dev)
+{
+ struct rzn1_adc *rzn1_adc = dev_get_drvdata(dev);
+
+ return rzn1_adc_power(rzn1_adc, false);
+}
+
+static int rzn1_adc_pm_runtime_resume(struct device *dev)
+{
+ struct rzn1_adc *rzn1_adc = dev_get_drvdata(dev);
+
+ return rzn1_adc_power(rzn1_adc, true);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(rzn1_adc_pm_ops,
+ rzn1_adc_pm_runtime_suspend,
+ rzn1_adc_pm_runtime_resume,
+ NULL);
+
+static const struct of_device_id rzn1_adc_of_match[] = {
+ { .compatible = "renesas,rzn1-adc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rzn1_adc_of_match);
+
+static struct platform_driver rzn1_adc_driver = {
+ .probe = rzn1_adc_probe,
+ .driver = {
+ .name = "rzn1-adc",
+ .of_match_table = rzn1_adc_of_match,
+ .pm = pm_ptr(&rzn1_adc_pm_ops),
+ },
+};
+module_platform_driver(rzn1_adc_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("Renesas RZ/N1 ADC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/rzt2h_adc.c b/drivers/iio/adc/rzt2h_adc.c
new file mode 100644
index 000000000000..33ce5cc44ff4
--- /dev/null
+++ b/drivers/iio/adc/rzt2h_adc.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/iio/adc-helpers.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+
+#define RZT2H_ADCSR_REG 0x00
+#define RZT2H_ADCSR_ADIE_MASK BIT(12)
+#define RZT2H_ADCSR_ADCS_MASK GENMASK(14, 13)
+#define RZT2H_ADCSR_ADCS_SINGLE 0b00
+#define RZT2H_ADCSR_ADST_MASK BIT(15)
+
+#define RZT2H_ADANSA0_REG 0x04
+#define RZT2H_ADANSA0_CH_MASK(x) BIT(x)
+
+#define RZT2H_ADDR_REG(x) (0x20 + 0x2 * (x))
+
+#define RZT2H_ADCALCTL_REG 0x1f0
+#define RZT2H_ADCALCTL_CAL_MASK BIT(0)
+#define RZT2H_ADCALCTL_CAL_RDY_MASK BIT(1)
+#define RZT2H_ADCALCTL_CAL_ERR_MASK BIT(2)
+
+#define RZT2H_ADC_MAX_CHANNELS 16
+
+struct rzt2h_adc {
+ void __iomem *base;
+ struct device *dev;
+
+ struct completion completion;
+ /* lock to protect against multiple access to the device */
+ struct mutex lock;
+
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ unsigned int max_channels;
+};
+
+static void rzt2h_adc_start(struct rzt2h_adc *adc, unsigned int conversion_type)
+{
+ u16 reg;
+
+ reg = readw(adc->base + RZT2H_ADCSR_REG);
+
+ /* Set conversion type */
+ FIELD_MODIFY(RZT2H_ADCSR_ADCS_MASK, &reg, conversion_type);
+
+ /* Set end of conversion interrupt and start bit. */
+ reg |= RZT2H_ADCSR_ADIE_MASK | RZT2H_ADCSR_ADST_MASK;
+
+ writew(reg, adc->base + RZT2H_ADCSR_REG);
+}
+
+static void rzt2h_adc_stop(struct rzt2h_adc *adc)
+{
+ u16 reg;
+
+ reg = readw(adc->base + RZT2H_ADCSR_REG);
+
+ /* Clear end of conversion interrupt and start bit. */
+ reg &= ~(RZT2H_ADCSR_ADIE_MASK | RZT2H_ADCSR_ADST_MASK);
+
+ writew(reg, adc->base + RZT2H_ADCSR_REG);
+}
+
+static int rzt2h_adc_read_single(struct rzt2h_adc *adc, unsigned int ch, int *val)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(adc->dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&adc->lock);
+
+ reinit_completion(&adc->completion);
+
+ /* Enable a single channel */
+ writew(RZT2H_ADANSA0_CH_MASK(ch), adc->base + RZT2H_ADANSA0_REG);
+
+ rzt2h_adc_start(adc, RZT2H_ADCSR_ADCS_SINGLE);
+
+ /*
+ * Datasheet Page 2770, Table 41.1:
+ * 0.32us per channel when sample-and-hold circuits are not in use.
+ */
+ ret = wait_for_completion_timeout(&adc->completion, usecs_to_jiffies(1));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ goto disable;
+ }
+
+ *val = readw(adc->base + RZT2H_ADDR_REG(ch));
+ ret = IIO_VAL_INT;
+
+disable:
+ rzt2h_adc_stop(adc);
+
+ mutex_unlock(&adc->lock);
+
+ pm_runtime_put_autosuspend(adc->dev);
+
+ return ret;
+}
+
+static void rzt2h_adc_set_cal(struct rzt2h_adc *adc, bool cal)
+{
+ u16 val;
+
+ val = readw(adc->base + RZT2H_ADCALCTL_REG);
+ if (cal)
+ val |= RZT2H_ADCALCTL_CAL_MASK;
+ else
+ val &= ~RZT2H_ADCALCTL_CAL_MASK;
+
+ writew(val, adc->base + RZT2H_ADCALCTL_REG);
+}
+
+static int rzt2h_adc_calibrate(struct rzt2h_adc *adc)
+{
+ u16 val;
+ int ret;
+
+ rzt2h_adc_set_cal(adc, true);
+
+ ret = read_poll_timeout(readw, val, val & RZT2H_ADCALCTL_CAL_RDY_MASK,
+ 200, 1000, true, adc->base + RZT2H_ADCALCTL_REG);
+ if (ret) {
+ dev_err(adc->dev, "Calibration timed out: %d\n", ret);
+ return ret;
+ }
+
+ rzt2h_adc_set_cal(adc, false);
+
+ if (val & RZT2H_ADCALCTL_CAL_ERR_MASK) {
+ dev_err(adc->dev, "Calibration failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rzt2h_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct rzt2h_adc *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ return rzt2h_adc_read_single(adc, chan->channel, val);
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1800;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info rzt2h_adc_iio_info = {
+ .read_raw = rzt2h_adc_read_raw,
+};
+
+static irqreturn_t rzt2h_adc_isr(int irq, void *private)
+{
+ struct rzt2h_adc *adc = private;
+
+ complete(&adc->completion);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_chan_spec rzt2h_adc_chan_template = {
+ .indexed = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .type = IIO_VOLTAGE,
+};
+
+static int rzt2h_adc_parse_properties(struct rzt2h_adc *adc)
+{
+ struct iio_chan_spec *chan_array;
+ unsigned int i;
+ int ret;
+
+ ret = devm_iio_adc_device_alloc_chaninfo_se(adc->dev,
+ &rzt2h_adc_chan_template,
+ RZT2H_ADC_MAX_CHANNELS - 1,
+ &chan_array);
+ if (ret < 0)
+ return dev_err_probe(adc->dev, ret, "Failed to read channel info");
+
+ adc->num_channels = ret;
+ adc->channels = chan_array;
+
+ for (i = 0; i < adc->num_channels; i++)
+ if (chan_array[i].channel + 1 > adc->max_channels)
+ adc->max_channels = chan_array[i].channel + 1;
+
+ return 0;
+}
+
+static int rzt2h_adc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct iio_dev *indio_dev;
+ struct rzt2h_adc *adc;
+ int ret, irq;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ adc = iio_priv(indio_dev);
+ adc->dev = dev;
+ init_completion(&adc->completion);
+
+ ret = devm_mutex_init(dev, &adc->lock);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, adc);
+
+ ret = rzt2h_adc_parse_properties(adc);
+ if (ret)
+ return ret;
+
+ adc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adc->base))
+ return PTR_ERR(adc->base);
+
+ pm_runtime_set_autosuspend_delay(dev, 300);
+ pm_runtime_use_autosuspend(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq_byname(pdev, "adi");
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, rzt2h_adc_isr, 0, dev_name(dev), adc);
+ if (ret)
+ return ret;
+
+ indio_dev->name = "rzt2h-adc";
+ indio_dev->info = &rzt2h_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = adc->channels;
+ indio_dev->num_channels = adc->num_channels;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id rzt2h_adc_match[] = {
+ { .compatible = "renesas,r9a09g077-adc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rzt2h_adc_match);
+
+static int rzt2h_adc_pm_runtime_resume(struct device *dev)
+{
+ struct rzt2h_adc *adc = dev_get_drvdata(dev);
+
+ /*
+ * Datasheet Page 2810, Section 41.5.6:
+ * After release from the module-stop state, wait for at least
+ * 0.5 µs before starting A/D conversion.
+ */
+ fsleep(1);
+
+ return rzt2h_adc_calibrate(adc);
+}
+
+static const struct dev_pm_ops rzt2h_adc_pm_ops = {
+ RUNTIME_PM_OPS(NULL, rzt2h_adc_pm_runtime_resume, NULL)
+};
+
+static struct platform_driver rzt2h_adc_driver = {
+ .probe = rzt2h_adc_probe,
+ .driver = {
+ .name = "rzt2h-adc",
+ .of_match_table = rzt2h_adc_match,
+ .pm = pm_ptr(&rzt2h_adc_pm_ops),
+ },
+};
+
+module_platform_driver(rzt2h_adc_driver);
+
+MODULE_AUTHOR("Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/T2H / RZ/N2H ADC driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DRIVER");
diff --git a/drivers/iio/adc/spear_adc.c b/drivers/iio/adc/spear_adc.c
index e3a865c79686..50b0a607baeb 100644
--- a/drivers/iio/adc/spear_adc.c
+++ b/drivers/iio/adc/spear_adc.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/completion.h>
@@ -29,9 +30,9 @@
/* Bit definitions for SPEAR_ADC_STATUS */
#define SPEAR_ADC_STATUS_START_CONVERSION BIT(0)
-#define SPEAR_ADC_STATUS_CHANNEL_NUM(x) ((x) << 1)
+#define SPEAR_ADC_STATUS_CHANNEL_NUM_MASK GENMASK(3, 1)
#define SPEAR_ADC_STATUS_ADC_ENABLE BIT(4)
-#define SPEAR_ADC_STATUS_AVG_SAMPLE(x) ((x) << 5)
+#define SPEAR_ADC_STATUS_AVG_SAMPLE_MASK GENMASK(8, 5)
#define SPEAR_ADC_STATUS_VREF_INTERNAL BIT(9)
#define SPEAR_ADC_DATA_MASK 0x03ff
@@ -157,8 +158,8 @@ static int spear_adc_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
mutex_lock(&st->lock);
- status = SPEAR_ADC_STATUS_CHANNEL_NUM(chan->channel) |
- SPEAR_ADC_STATUS_AVG_SAMPLE(st->avg_samples) |
+ status = FIELD_PREP(SPEAR_ADC_STATUS_CHANNEL_NUM_MASK, chan->channel) |
+ FIELD_PREP(SPEAR_ADC_STATUS_AVG_SAMPLE_MASK, st->avg_samples) |
SPEAR_ADC_STATUS_START_CONVERSION |
SPEAR_ADC_STATUS_ADC_ENABLE;
if (st->vref_external == 0)
@@ -274,8 +275,7 @@ static int spear_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_state));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "failed allocating iio device\n");
+ return -ENOMEM;
st = iio_priv(indio_dev);
st->dev = dev;
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 3d800762c5fc..e39a4c0db25e 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -794,7 +794,6 @@ static int stm32_adc_probe(struct platform_device *pdev)
goto err_irq_remove;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index b9f93116e114..2d7f88459c7c 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1528,7 +1528,6 @@ static int stm32_adc_single_conv(struct iio_dev *indio_dev,
stm32_adc_conv_irq_disable(adc);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1564,7 +1563,6 @@ static int stm32_adc_write_raw(struct iio_dev *indio_dev,
adc->cfg->set_ovs(indio_dev, idx);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
adc->ovs_idx = idx;
@@ -1759,7 +1757,6 @@ static int stm32_adc_update_scan_mode(struct iio_dev *indio_dev,
adc->num_conv = bitmap_weight(scan_mask, iio_get_masklength(indio_dev));
ret = stm32_adc_conf_scan_seq(indio_dev, scan_mask);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1808,7 +1805,6 @@ static int stm32_adc_debugfs_reg_access(struct iio_dev *indio_dev,
else
*readval = stm32_adc_readl(adc, reg);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -1954,7 +1950,6 @@ static int stm32_adc_buffer_postenable(struct iio_dev *indio_dev)
err_clr_trig:
stm32_adc_set_trig(indio_dev, NULL);
err_pm_put:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1977,7 +1972,6 @@ static int stm32_adc_buffer_predisable(struct iio_dev *indio_dev)
if (stm32_adc_set_trig(indio_dev, NULL))
dev_err(&indio_dev->dev, "Can't clear trigger\n");
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -2614,7 +2608,6 @@ static int stm32_adc_probe(struct platform_device *pdev)
goto err_hw_stop;
}
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (IS_ENABLED(CONFIG_DEBUG_FS))
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index c2d21eecafe7..9664b9bd75d4 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -725,9 +725,8 @@ static int stm32_dfsdm_generic_channel_parse_of(struct stm32_dfsdm *dfsdm,
}
df_ch->src = val;
- ret = fwnode_property_read_u32(node, "st,adc-alt-channel", &df_ch->alt_si);
- if (ret != -EINVAL)
- df_ch->alt_si = 0;
+ if (fwnode_property_present(node, "st,adc-alt-channel"))
+ df_ch->alt_si = 1;
if (adc->dev_data->type == DFSDM_IIO) {
backend = devm_iio_backend_fwnode_get(&indio_dev->dev, NULL, node);
@@ -1764,10 +1763,8 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
dev_data = of_device_get_match_data(dev);
iio = devm_iio_device_alloc(dev, sizeof(*adc));
- if (!iio) {
- dev_err(dev, "%s: Failed to allocate IIO\n", __func__);
+ if (!iio)
return -ENOMEM;
- }
adc = iio_priv(iio);
adc->dfsdm = dev_get_drvdata(dev->parent);
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index b0add5a2eab5..8e26c47edc08 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -267,10 +267,8 @@ static int stmpe_adc_probe(struct platform_device *pdev)
return irq_adc;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct stmpe_adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
info = iio_priv(indio_dev);
mutex_init(&info->lock);
diff --git a/drivers/iio/adc/sun4i-gpadc-iio.c b/drivers/iio/adc/sun4i-gpadc-iio.c
index 6b8d6bee1873..479115ea50bf 100644
--- a/drivers/iio/adc/sun4i-gpadc-iio.c
+++ b/drivers/iio/adc/sun4i-gpadc-iio.c
@@ -154,7 +154,6 @@ static const struct regmap_config sun4i_gpadc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
- .fast_io = true,
};
static int sun4i_prepare_for_irq(struct iio_dev *indio_dev, int channel,
@@ -245,7 +244,6 @@ static int sun4i_gpadc_read(struct iio_dev *indio_dev, int channel, int *val,
*val = info->temp_data;
ret = 0;
- pm_runtime_mark_last_busy(indio_dev->dev.parent);
err:
pm_runtime_put_autosuspend(indio_dev->dev.parent);
@@ -272,7 +270,6 @@ static int sun4i_gpadc_temp_read(struct iio_dev *indio_dev, int *val)
regmap_read(info->regmap, SUN4I_GPADC_TEMP_DATA, val);
- pm_runtime_mark_last_busy(indio_dev->dev.parent);
pm_runtime_put_autosuspend(indio_dev->dev.parent);
return 0;
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 4f514db5c26e..8ef51c57912d 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -102,27 +102,23 @@ struct adcxx1c_model {
int bits;
};
-#define ADCxx1C_MODEL(_name, _bits) \
- { \
- .channels = _name ## _channels, \
- .bits = (_bits), \
- }
-
DEFINE_ADCxx1C_CHANNELS(adc081c, 8);
DEFINE_ADCxx1C_CHANNELS(adc101c, 10);
DEFINE_ADCxx1C_CHANNELS(adc121c, 12);
-/* Model ids are indexes in _models array */
-enum adcxx1c_model_id {
- ADC081C = 0,
- ADC101C = 1,
- ADC121C = 2,
+static const struct adcxx1c_model adc081c_model = {
+ .channels = adc081c_channels,
+ .bits = 8,
+};
+
+static const struct adcxx1c_model adc101c_model = {
+ .channels = adc101c_channels,
+ .bits = 10,
};
-static struct adcxx1c_model adcxx1c_models[] = {
- ADCxx1C_MODEL(adc081c, 8),
- ADCxx1C_MODEL(adc101c, 10),
- ADCxx1C_MODEL(adc121c, 12),
+static const struct adcxx1c_model adc121c_model = {
+ .channels = adc121c_channels,
+ .bits = 12,
};
static const struct iio_info adc081c_info = {
@@ -203,24 +199,24 @@ static int adc081c_probe(struct i2c_client *client)
}
static const struct i2c_device_id adc081c_id[] = {
- { "adc081c", (kernel_ulong_t)&adcxx1c_models[ADC081C] },
- { "adc101c", (kernel_ulong_t)&adcxx1c_models[ADC101C] },
- { "adc121c", (kernel_ulong_t)&adcxx1c_models[ADC121C] },
+ { "adc081c", (kernel_ulong_t)&adc081c_model },
+ { "adc101c", (kernel_ulong_t)&adc101c_model },
+ { "adc121c", (kernel_ulong_t)&adc121c_model },
{ }
};
MODULE_DEVICE_TABLE(i2c, adc081c_id);
static const struct acpi_device_id adc081c_acpi_match[] = {
/* Used on some AAEON boards */
- { "ADC081C", (kernel_ulong_t)&adcxx1c_models[ADC081C] },
+ { "ADC081C", (kernel_ulong_t)&adc081c_model },
{ }
};
MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match);
static const struct of_device_id adc081c_of_match[] = {
- { .compatible = "ti,adc081c", .data = &adcxx1c_models[ADC081C] },
- { .compatible = "ti,adc101c", .data = &adcxx1c_models[ADC101C] },
- { .compatible = "ti,adc121c", .data = &adcxx1c_models[ADC121C] },
+ { .compatible = "ti,adc081c", .data = &adc081c_model },
+ { .compatible = "ti,adc101c", .data = &adc101c_model },
+ { .compatible = "ti,adc121c", .data = &adc121c_model },
{ }
};
MODULE_DEVICE_TABLE(of, adc081c_of_match);
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index 50a474f4d9f5..a100f770fa1c 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -200,10 +200,8 @@ static int adc084s021_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
- if (!indio_dev) {
- dev_err(&spi->dev, "Failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc = iio_priv(indio_dev);
adc->spi = spi;
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index 9dc465a10ffc..e5ec4b073daa 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -38,15 +38,13 @@ enum {
struct adc12138 {
struct spi_device *spi;
unsigned int id;
- /* conversion clock */
- struct clk *cclk;
/* positive analog voltage reference */
struct regulator *vref_p;
/* negative analog voltage reference */
struct regulator *vref_n;
struct mutex lock;
struct completion complete;
- /* The number of cclk periods for the S/H's acquisition time */
+ /* The number of conversion clock periods for the S/H's acquisition time */
unsigned int acquisition_time;
/*
* Maximum size needed: 16x 2 bytes ADC data + 8 bytes timestamp.
@@ -400,6 +398,7 @@ static int adc12138_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct adc12138 *adc;
+ struct clk *cclk;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
@@ -435,9 +434,14 @@ static int adc12138_probe(struct spi_device *spi)
if (ret)
adc->acquisition_time = 10;
- adc->cclk = devm_clk_get(&spi->dev, NULL);
- if (IS_ERR(adc->cclk))
- return PTR_ERR(adc->cclk);
+ ret = devm_request_irq(&spi->dev, spi->irq, adc12138_eoc_handler,
+ IRQF_TRIGGER_RISING, indio_dev->name, indio_dev);
+ if (ret)
+ return ret;
+
+ cclk = devm_clk_get_enabled(&spi->dev, NULL);
+ if (IS_ERR(cclk))
+ return PTR_ERR(cclk);
adc->vref_p = devm_regulator_get(&spi->dev, "vref-p");
if (IS_ERR(adc->vref_p))
@@ -454,18 +458,9 @@ static int adc12138_probe(struct spi_device *spi)
return ret;
}
- ret = devm_request_irq(&spi->dev, spi->irq, adc12138_eoc_handler,
- IRQF_TRIGGER_RISING, indio_dev->name, indio_dev);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(adc->cclk);
- if (ret)
- return ret;
-
ret = regulator_enable(adc->vref_p);
if (ret)
- goto err_clk_disable;
+ return ret;
if (!IS_ERR(adc->vref_n)) {
ret = regulator_enable(adc->vref_n);
@@ -496,8 +491,6 @@ err_vref_n_disable:
regulator_disable(adc->vref_n);
err_vref_p_disable:
regulator_disable(adc->vref_p);
-err_clk_disable:
- clk_disable_unprepare(adc->cclk);
return ret;
}
@@ -512,7 +505,6 @@ static void adc12138_remove(struct spi_device *spi)
if (!IS_ERR(adc->vref_n))
regulator_disable(adc->vref_n);
regulator_disable(adc->vref_p);
- clk_disable_unprepare(adc->cclk);
}
static const struct of_device_id adc12138_dt_ids[] = {
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index 1b46a8155803..4ae65793ad9b 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -99,51 +99,83 @@ static int adc128_read_raw(struct iio_dev *indio_dev,
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
}
-static const struct iio_chan_spec adc128s052_channels[] = {
+static const struct iio_chan_spec simple_1chan_adc_channels[] = {
+ ADC128_VOLTAGE_CHANNEL(0),
+};
+
+static const struct iio_chan_spec simple_2chan_adc_channels[] = {
ADC128_VOLTAGE_CHANNEL(0),
ADC128_VOLTAGE_CHANNEL(1),
- ADC128_VOLTAGE_CHANNEL(2),
- ADC128_VOLTAGE_CHANNEL(3),
- ADC128_VOLTAGE_CHANNEL(4),
- ADC128_VOLTAGE_CHANNEL(5),
- ADC128_VOLTAGE_CHANNEL(6),
- ADC128_VOLTAGE_CHANNEL(7),
};
-static const struct iio_chan_spec adc122s021_channels[] = {
+static const struct iio_chan_spec simple_4chan_adc_channels[] = {
ADC128_VOLTAGE_CHANNEL(0),
ADC128_VOLTAGE_CHANNEL(1),
+ ADC128_VOLTAGE_CHANNEL(2),
+ ADC128_VOLTAGE_CHANNEL(3),
};
-static const struct iio_chan_spec adc124s021_channels[] = {
+static const struct iio_chan_spec simple_8chan_adc_channels[] = {
ADC128_VOLTAGE_CHANNEL(0),
ADC128_VOLTAGE_CHANNEL(1),
ADC128_VOLTAGE_CHANNEL(2),
ADC128_VOLTAGE_CHANNEL(3),
+ ADC128_VOLTAGE_CHANNEL(4),
+ ADC128_VOLTAGE_CHANNEL(5),
+ ADC128_VOLTAGE_CHANNEL(6),
+ ADC128_VOLTAGE_CHANNEL(7),
};
static const char * const bd79104_regulators[] = { "iovdd" };
-static const struct adc128_configuration adc128_config[] = {
- {
- .channels = adc128s052_channels,
- .num_channels = ARRAY_SIZE(adc128s052_channels),
- .refname = "vref",
- }, {
- .channels = adc122s021_channels,
- .num_channels = ARRAY_SIZE(adc122s021_channels),
- .refname = "vref",
- }, {
- .channels = adc124s021_channels,
- .num_channels = ARRAY_SIZE(adc124s021_channels),
- .refname = "vref",
- }, {
- .channels = adc128s052_channels,
- .num_channels = ARRAY_SIZE(adc128s052_channels),
- .refname = "vdd",
- .other_regulators = &bd79104_regulators,
- .num_other_regulators = 1,
- },
+static const struct adc128_configuration adc122s_config = {
+ .channels = simple_2chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_2chan_adc_channels),
+ .refname = "vref",
+};
+
+static const struct adc128_configuration adc124s_config = {
+ .channels = simple_4chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_4chan_adc_channels),
+ .refname = "vref",
+};
+
+static const struct adc128_configuration adc128s_config = {
+ .channels = simple_8chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_8chan_adc_channels),
+ .refname = "vref",
+};
+
+static const struct adc128_configuration bd79100_config = {
+ .channels = simple_1chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_1chan_adc_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
+};
+
+static const struct adc128_configuration bd79101_config = {
+ .channels = simple_2chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_2chan_adc_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
+};
+
+static const struct adc128_configuration bd79102_config = {
+ .channels = simple_4chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_4chan_adc_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
+};
+
+static const struct adc128_configuration bd79104_config = {
+ .channels = simple_8chan_adc_channels,
+ .num_channels = ARRAY_SIZE(simple_8chan_adc_channels),
+ .refname = "vdd",
+ .other_regulators = &bd79104_regulators,
+ .num_other_regulators = 1,
};
static const struct iio_info adc128_info = {
@@ -199,33 +231,41 @@ static int adc128_probe(struct spi_device *spi)
}
static const struct of_device_id adc128_of_match[] = {
- { .compatible = "ti,adc128s052", .data = &adc128_config[0] },
- { .compatible = "ti,adc122s021", .data = &adc128_config[1] },
- { .compatible = "ti,adc122s051", .data = &adc128_config[1] },
- { .compatible = "ti,adc122s101", .data = &adc128_config[1] },
- { .compatible = "ti,adc124s021", .data = &adc128_config[2] },
- { .compatible = "ti,adc124s051", .data = &adc128_config[2] },
- { .compatible = "ti,adc124s101", .data = &adc128_config[2] },
- { .compatible = "rohm,bd79104", .data = &adc128_config[3] },
+ { .compatible = "ti,adc128s052", .data = &adc128s_config },
+ { .compatible = "ti,adc122s021", .data = &adc122s_config },
+ { .compatible = "ti,adc122s051", .data = &adc122s_config },
+ { .compatible = "ti,adc122s101", .data = &adc122s_config },
+ { .compatible = "ti,adc124s021", .data = &adc124s_config },
+ { .compatible = "ti,adc124s051", .data = &adc124s_config },
+ { .compatible = "ti,adc124s101", .data = &adc124s_config },
+ { .compatible = "rohm,bd79100", .data = &bd79100_config },
+ { .compatible = "rohm,bd79101", .data = &bd79101_config },
+ { .compatible = "rohm,bd79102", .data = &bd79102_config },
+ { .compatible = "rohm,bd79103", .data = &bd79104_config },
+ { .compatible = "rohm,bd79104", .data = &bd79104_config },
{ }
};
MODULE_DEVICE_TABLE(of, adc128_of_match);
static const struct spi_device_id adc128_id[] = {
- { "adc128s052", (kernel_ulong_t)&adc128_config[0] },
- { "adc122s021", (kernel_ulong_t)&adc128_config[1] },
- { "adc122s051", (kernel_ulong_t)&adc128_config[1] },
- { "adc122s101", (kernel_ulong_t)&adc128_config[1] },
- { "adc124s021", (kernel_ulong_t)&adc128_config[2] },
- { "adc124s051", (kernel_ulong_t)&adc128_config[2] },
- { "adc124s101", (kernel_ulong_t)&adc128_config[2] },
- { "bd79104", (kernel_ulong_t)&adc128_config[3] },
+ { "adc128s052", (kernel_ulong_t)&adc128s_config },
+ { "adc122s021", (kernel_ulong_t)&adc122s_config },
+ { "adc122s051", (kernel_ulong_t)&adc122s_config },
+ { "adc122s101", (kernel_ulong_t)&adc122s_config },
+ { "adc124s021", (kernel_ulong_t)&adc124s_config },
+ { "adc124s051", (kernel_ulong_t)&adc124s_config },
+ { "adc124s101", (kernel_ulong_t)&adc124s_config },
+ { "bd79100", (kernel_ulong_t)&bd79100_config },
+ { "bd79101", (kernel_ulong_t)&bd79101_config },
+ { "bd79102", (kernel_ulong_t)&bd79102_config },
+ { "bd79103", (kernel_ulong_t)&bd79104_config },
+ { "bd79104", (kernel_ulong_t)&bd79104_config },
{ }
};
MODULE_DEVICE_TABLE(spi, adc128_id);
static const struct acpi_device_id adc128_acpi_match[] = {
- { "AANT1280", (kernel_ulong_t)&adc128_config[2] },
+ { "AANT1280", (kernel_ulong_t)&adc124s_config },
{ }
};
MODULE_DEVICE_TABLE(acpi, adc128_acpi_match);
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 48549d617e5f..f2a93c63ca14 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -374,12 +374,10 @@ static int ads1015_set_power_state(struct ads1015_data *data, bool on)
int ret;
struct device *dev = regmap_get_device(data->regmap);
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
+ else
ret = pm_runtime_put_autosuspend(dev);
- }
return ret < 0 ? ret : 0;
}
diff --git a/drivers/iio/adc/ti-ads1100.c b/drivers/iio/adc/ti-ads1100.c
index b0790e300b18..aa8946063c7d 100644
--- a/drivers/iio/adc/ti-ads1100.c
+++ b/drivers/iio/adc/ti-ads1100.c
@@ -105,7 +105,6 @@ static int ads1100_get_adc_result(struct ads1100_data *data, int chan, int *val)
ret = i2c_master_recv(data->client, (char *)&buffer, sizeof(buffer));
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
if (ret < 0) {
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index d2f86e1ec656..c9cedc59cdcd 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -291,7 +291,6 @@ static int ads1119_single_conversion(struct ads1119_state *st,
*val = sign_extend32(sample, chan->scan_type.realbits - 1);
ret = IIO_VAL_INT;
pdown:
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -470,7 +469,6 @@ static int ads1119_triggered_buffer_postdisable(struct iio_dev *indio_dev)
if (ret)
return ret;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
@@ -693,8 +691,7 @@ static int ads1119_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate IIO device\n");
+ return -ENOMEM;
st = iio_priv(indio_dev);
st->client = client;
@@ -750,8 +747,7 @@ static int ads1119_probe(struct i2c_client *client)
indio_dev->name,
iio_device_id(indio_dev));
if (!st->trig)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate IIO trigger\n");
+ return -ENOMEM;
st->trig->ops = &ads1119_trigger_ops;
iio_trigger_set_drvdata(st->trig, indio_dev);
@@ -778,8 +774,7 @@ static int ads1119_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, ads1119_powerdown, st);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to add powerdown action\n");
+ return ret;
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index b18f30d3fdbe..c9a20024d6b1 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -807,10 +807,8 @@ static int ads131e08_probe(struct spi_device *spi)
}
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev) {
- dev_err(&spi->dev, "failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
st = iio_priv(indio_dev);
st->info = info;
@@ -841,10 +839,8 @@ static int ads131e08_probe(struct spi_device *spi)
st->trig = devm_iio_trigger_alloc(&spi->dev, "%s-dev%d",
indio_dev->name, iio_device_id(indio_dev));
- if (!st->trig) {
- dev_err(&spi->dev, "failed to allocate IIO trigger\n");
+ if (!st->trig)
return -ENOMEM;
- }
st->trig->ops = &ads131e08_trigger_ops;
st->trig->dev.parent = &spi->dev;
@@ -852,7 +848,7 @@ static int ads131e08_probe(struct spi_device *spi)
ret = devm_iio_trigger_register(&spi->dev, st->trig);
if (ret) {
dev_err(&spi->dev, "failed to register IIO trigger\n");
- return -ENOMEM;
+ return ret;
}
indio_dev->trig = iio_trigger_get(st->trig);
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
index b1f745f75dbe..bbcc4fc22b6e 100644
--- a/drivers/iio/adc/ti-ads7924.c
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -355,8 +355,7 @@ static int ads7924_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "failed to allocate iio device\n");
+ return -ENOMEM;
data = iio_priv(indio_dev);
@@ -399,8 +398,7 @@ static int ads7924_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, ads7924_reg_disable, data->vref_reg);
if (ret)
- return dev_err_probe(dev, ret,
- "failed to add regulator disable action\n");
+ return ret;
ret = ads7924_reset(indio_dev);
if (ret < 0)
@@ -414,8 +412,7 @@ static int ads7924_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, ads7924_set_idle_mode, data);
if (ret)
- return dev_err_probe(dev, ret,
- "failed to add idle mode action\n");
+ return ret;
/* Use minimum signal acquire time. */
ret = regmap_update_bits(data->regmap, ADS7924_ACQCONFIG_REG,
diff --git a/drivers/iio/adc/ti-tsc2046.c b/drivers/iio/adc/ti-tsc2046.c
index 74471f08662e..8eb717b11cff 100644
--- a/drivers/iio/adc/ti-tsc2046.c
+++ b/drivers/iio/adc/ti-tsc2046.c
@@ -535,8 +535,7 @@ static enum hrtimer_restart tsc2046_adc_timer(struct hrtimer *hrtimer)
if (priv->poll_cnt < TI_TSC2046_POLL_CNT) {
priv->poll_cnt++;
hrtimer_start(&priv->trig_timer,
- ns_to_ktime(priv->scan_interval_us *
- NSEC_PER_USEC),
+ us_to_ktime(priv->scan_interval_us),
HRTIMER_MODE_REL_SOFT);
if (priv->poll_cnt >= TI_TSC2046_MIN_POLL_CNT) {
@@ -605,8 +604,7 @@ static void tsc2046_adc_reenable_trigger(struct iio_trigger *trig)
* many samples. Reduce the sample rate for default (touchscreen) use
* case.
*/
- tim = ns_to_ktime((priv->scan_interval_us - priv->time_per_scan_us) *
- NSEC_PER_USEC);
+ tim = us_to_ktime(priv->scan_interval_us - priv->time_per_scan_us);
hrtimer_start(&priv->trig_timer, tim, HRTIMER_MODE_REL_SOFT);
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index fe1509d3b1e7..a1a28584de93 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -123,7 +123,7 @@ static void tiadc_step_config(struct iio_dev *indio_dev)
chan = adc_dev->channel_line[i];
- if (adc_dev->step_avg[i])
+ if (adc_dev->step_avg[i] && adc_dev->step_avg[i] <= STEPCONFIG_AVG_16)
stepconfig = STEPCONFIG_AVG(ffs(adc_dev->step_avg[i]) - 1) |
STEPCONFIG_FIFO1;
else
@@ -631,10 +631,9 @@ static int tiadc_probe(struct platform_device *pdev)
}
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed to allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
+
adc_dev = iio_priv(indio_dev);
adc_dev->mfd_tscadc = ti_tscadc_dev_get(pdev);
diff --git a/drivers/iio/adc/twl4030-madc.c b/drivers/iio/adc/twl4030-madc.c
index 0ea51ddeaa0a..fe3b31ec976e 100644
--- a/drivers/iio/adc/twl4030-madc.c
+++ b/drivers/iio/adc/twl4030-madc.c
@@ -758,10 +758,8 @@ static int twl4030_madc_probe(struct platform_device *pdev)
}
iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*madc));
- if (!iio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!iio_dev)
return -ENOMEM;
- }
madc = iio_priv(iio_dev);
madc->dev = &pdev->dev;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 1b3b1843a801..d7182ed0d2a7 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -832,7 +832,7 @@ static int vf610_adc_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct vf610_adc));
if (!indio_dev)
- return dev_err_probe(&pdev->dev, -ENOMEM, "Failed allocating iio device\n");
+ return -ENOMEM;
info = iio_priv(indio_dev);
info->dev = &pdev->dev;
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index 1028b101cf56..9bb0b83c8f67 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -113,10 +113,8 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
/* registering iio */
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
- if (!indio_dev) {
- dev_err(&pdev->dev, "failed allocating iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
adc = iio_priv(indio_dev);
adc->vb = vb;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 76dd0343f5f7..124470c92529 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -118,7 +118,7 @@
#define AMS_ALARM_THRESHOLD_OFF_10 0x10
#define AMS_ALARM_THRESHOLD_OFF_20 0x20
-#define AMS_ALARM_THR_DIRECT_MASK BIT(1)
+#define AMS_ALARM_THR_DIRECT_MASK BIT(0)
#define AMS_ALARM_THR_MIN 0x0000
#define AMS_ALARM_THR_MAX (BIT(16) - 1)
@@ -389,6 +389,29 @@ static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
}
+static void ams_unmask(struct ams *ams)
+{
+ unsigned int status, unmask;
+
+ status = readl(ams->base + AMS_ISR_0);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+
+ /* Clear status of disabled alarm */
+ unmask |= ams->intr_mask;
+
+ ams->current_masked_alarm &= status;
+
+ /* Also clear those which are masked out anyway */
+ ams->current_masked_alarm &= ~ams->intr_mask;
+
+ /* Clear the interrupts before we unmask them */
+ writel(unmask, ams->base + AMS_ISR_0);
+
+ ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+}
+
static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
{
unsigned long flags;
@@ -401,6 +424,7 @@ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
spin_lock_irqsave(&ams->intr_lock, flags);
ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
+ ams_unmask(ams);
spin_unlock_irqrestore(&ams->intr_lock, flags);
}
@@ -1035,28 +1059,9 @@ static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
static void ams_unmask_worker(struct work_struct *work)
{
struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
- unsigned int status, unmask;
spin_lock_irq(&ams->intr_lock);
-
- status = readl(ams->base + AMS_ISR_0);
-
- /* Clear those bits which are not active anymore */
- unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
-
- /* Clear status of disabled alarm */
- unmask |= ams->intr_mask;
-
- ams->current_masked_alarm &= status;
-
- /* Also clear those which are masked out anyway */
- ams->current_masked_alarm &= ~ams->intr_mask;
-
- /* Clear the interrupts before we unmask them */
- writel(unmask, ams->base + AMS_ISR_0);
-
- ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
-
+ ams_unmask(ams);
spin_unlock_irq(&ams->intr_lock);
/* If still pending some alarm re-trigger the timer */
diff --git a/drivers/iio/buffer/industrialio-buffer-cb.c b/drivers/iio/buffer/industrialio-buffer-cb.c
index 4befc9f55201..f4ebff968493 100644
--- a/drivers/iio/buffer/industrialio-buffer-cb.c
+++ b/drivers/iio/buffer/industrialio-buffer-cb.c
@@ -13,6 +13,7 @@
struct iio_cb_buffer {
struct iio_buffer buffer;
+ /* Must be safe to call from any context (e.g. must not sleep). */
int (*cb)(const void *data, void *private);
void *private;
struct iio_channel *channels;
@@ -68,7 +69,6 @@ struct iio_cb_buffer *iio_channel_get_all_cb(struct device *dev,
cb_buff->private = private;
cb_buff->cb = cb;
cb_buff->buffer.access = &iio_cb_access;
- INIT_LIST_HEAD(&cb_buff->buffer.demux_list);
cb_buff->channels = iio_channel_get_all(dev);
if (IS_ERR(cb_buff->channels)) {
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index ee294a775e8a..7a7a9d37339b 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -786,6 +786,12 @@ out_end_signalling:
}
EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enqueue_dmabuf, "IIO_DMA_BUFFER");
+struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer)
+{
+ return iio_buffer_to_queue(buffer)->dev;
+}
+EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_get_dma_dev, "IIO_DMA_BUFFER");
+
void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
{
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index e9d9a7d39fe1..27dd56334345 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -177,6 +177,8 @@ static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
.lock_queue = iio_dma_buffer_lock_queue,
.unlock_queue = iio_dma_buffer_unlock_queue,
+ .get_dma_dev = iio_dma_buffer_get_dma_dev,
+
.modes = INDIO_BUFFER_HARDWARE,
.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
};
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 1daaa36f87a9..8bbba85af699 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -425,7 +425,6 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
- pm_runtime_mark_last_busy(&data->client->dev);
ret = pm_runtime_put_autosuspend(&data->client->dev);
if (ret)
return ret;
@@ -491,7 +490,6 @@ static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
ret = regmap_bulk_read(data->regmap, reg, val, sizeof(*val));
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 61d446fd456c..70f81c4a96ba 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -950,7 +950,6 @@ static int bme680_read_raw(struct iio_dev *indio_dev,
return ret;
ret = __bme680_read_raw(indio_dev, chan, val, val2, mask);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1021,7 +1020,6 @@ static int bme680_write_raw(struct iio_dev *indio_dev,
return ret;
ret = __bme680_write_raw(indio_dev, chan, val, val2, mask);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -1140,7 +1138,6 @@ static int bme680_buffer_postdisable(struct iio_dev *indio_dev)
struct bme680_data *data = iio_priv(indio_dev);
struct device *dev = regmap_get_device(data->regmap);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
}
diff --git a/drivers/iio/chemical/ens160_core.c b/drivers/iio/chemical/ens160_core.c
index 6cec60074827..86bde4a91bf7 100644
--- a/drivers/iio/chemical/ens160_core.c
+++ b/drivers/iio/chemical/ens160_core.c
@@ -305,8 +305,7 @@ static int ens160_setup_trigger(struct iio_dev *indio_dev, int irq)
trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
iio_device_id(indio_dev));
if (!trig)
- return dev_err_probe(dev, -ENOMEM,
- "failed to allocate trigger\n");
+ return -ENOMEM;
trig->ops = &ens160_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index 5df1926cd5d9..a665fcb78806 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -635,7 +635,7 @@ static int scd30_setup_trigger(struct iio_dev *indio_dev)
trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
iio_device_id(indio_dev));
if (!trig)
- return dev_err_probe(dev, -ENOMEM, "failed to allocate trigger\n");
+ return -ENOMEM;
trig->ops = &scd30_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 48193937275b..5540e2d28f4a 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -163,7 +163,6 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
ret = pm_runtime_resume_and_get(&st->pdev->dev);
} else {
atomic_dec(&st->user_requested_state);
- pm_runtime_mark_last_busy(&st->pdev->dev);
pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
}
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index da516c46e057..5136ad9ada04 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -66,10 +66,9 @@ static int scmi_iio_sensor_update_cb(struct notifier_block *nb,
/*
* Timestamp returned by SCMI is in seconds and is equal to
* time * power-of-10 multiplier(tstamp_scale) seconds.
- * Converting the timestamp to nanoseconds below.
+ * Converting the timestamp to nanoseconds (10⁹) below.
*/
- tstamp_scale = sensor->sensor_info->tstamp_scale +
- const_ilog2(NSEC_PER_SEC) / const_ilog2(10);
+ tstamp_scale = sensor->sensor_info->tstamp_scale + 9;
if (tstamp_scale < 0) {
do_div(time, int_pow(10, abs(tstamp_scale)));
time_ns = time;
@@ -521,9 +520,9 @@ static int scmi_iio_set_sampling_freq_avail(struct iio_dev *iio_dev)
int i;
sensor->freq_avail =
- devm_kzalloc(&iio_dev->dev,
- sizeof(*sensor->freq_avail) *
- (sensor->sensor_info->intervals.count * 2),
+ devm_kcalloc(&iio_dev->dev,
+ array_size(sensor->sensor_info->intervals.count, 2),
+ sizeof(*sensor->freq_avail),
GFP_KERNEL);
if (!sensor->freq_avail)
return -ENOMEM;
@@ -597,8 +596,8 @@ scmi_alloc_iiodev(struct scmi_device *sdev,
iiodev->info = &scmi_iio_info;
iio_channels =
- devm_kzalloc(dev,
- sizeof(*iio_channels) * (iiodev->num_channels),
+ devm_kcalloc(dev, iiodev->num_channels,
+ sizeof(*iio_channels),
GFP_KERNEL);
if (!iio_channels)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/iio/common/ssp_sensors/ssp_dev.c b/drivers/iio/common/ssp_sensors/ssp_dev.c
index 1e167dc673ca..da09c9f3ceb6 100644
--- a/drivers/iio/common/ssp_sensors/ssp_dev.c
+++ b/drivers/iio/common/ssp_sensors/ssp_dev.c
@@ -503,7 +503,7 @@ static int ssp_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "Failed to setup spi\n");
- return ret;
+ goto err_setup_spi;
}
data->fw_dl_state = SSP_FW_DL_STATE_NONE;
@@ -568,6 +568,8 @@ err_read_reg:
err_setup_irq:
mutex_destroy(&data->pending_lock);
mutex_destroy(&data->comm_lock);
+err_setup_spi:
+ mfd_remove_devices(&spi->dev);
dev_err(&spi->dev, "Probe failed!\n");
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index e0996dc014a3..7cd3caec1262 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -97,17 +97,32 @@ config AD5421
ad5421.
config AD5446
- tristate "Analog Devices AD5446 and similar single channel DACs driver"
- depends on (SPI_MASTER && I2C!=m) || I2C
+ tristate
+
+config AD5446_SPI
+ tristate "Analog Devices AD5446 and similar single channel DACs driver (SPI)"
+ depends on SPI
+ select AD5446
+ help
+ Say yes here to build support for Analog Devices AD5300, AD5310,
+ AD5320, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453, AD5512A,
+ AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5611, AD5620,
+ AD5621, AD5640, AD5641, AD5660, AD5662 DACs as well as
+ Texas Instruments DAC081S101, DAC101S101, DAC121S101.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5446-spi.
+
+config AD5446_I2C
+ tristate "Analog Devices AD5446 and similar single channel DACs driver (I2C)"
+ depends on I2C
+ select AD5446
help
- Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
- AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
- AD5512A, AD5541A, AD5542A, AD5543, AD5553, AD5600, AD5601, AD5602, AD5611,
- AD5612, AD5620, AD5621, AD5622, AD5640, AD5641, AD5660, AD5662 DACs
- as well as Texas Instruments DAC081S101, DAC101S101, DAC121S101.
+ Say yes here to build support for Analog Devices AD5301, AD5311, AD5321,
+ AD5602, AD5612, AD5622 DACs.
To compile this driver as a module, choose M here: the
- module will be called ad5446.
+ module will be called ad5446-i2c.
config AD5449
tristate "Analog Devices AD5449 and similar DACs driver"
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 3684cd52b7fa..e6ac4c67e337 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -15,6 +15,8 @@ obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
obj-$(CONFIG_AD5064) += ad5064.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
+obj-$(CONFIG_AD5446_SPI) += ad5446-spi.o
+obj-$(CONFIG_AD5446_I2C) += ad5446-i2c.o
obj-$(CONFIG_AD5449) += ad5449.o
obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
obj-$(CONFIG_AD5592R) += ad5592r.o
diff --git a/drivers/iio/dac/ad3530r.c b/drivers/iio/dac/ad3530r.c
index 6134613777b8..b97b46090d80 100644
--- a/drivers/iio/dac/ad3530r.c
+++ b/drivers/iio/dac/ad3530r.c
@@ -53,9 +53,6 @@
#define AD3530R_MAX_CHANNELS 8
#define AD3531R_MAX_CHANNELS 4
-/* Non-constant mask variant of FIELD_PREP() */
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
enum ad3530r_mode {
AD3530R_NORMAL_OP,
AD3530R_POWERDOWN_1K,
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index a57b0a093112..8271849b1c83 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
unsigned int clr)
{
struct ad5360_state *st = iio_priv(indio_dev);
- unsigned int ret;
+ int ret;
mutex_lock(&st->lock);
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 0ddce7b218e3..8b813cee7625 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -371,10 +371,8 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
- if (indio_dev == NULL) {
- dev_err(dev, "Failed to allocate iio device\n");
+ if (indio_dev == NULL)
return -ENOMEM;
- }
st = iio_priv(indio_dev);
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 1462ee640b16..d9d7031c4432 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
unsigned int clr)
{
struct ad5421_state *st = iio_priv(indio_dev);
- unsigned int ret;
+ int ret;
mutex_lock(&st->lock);
diff --git a/drivers/iio/dac/ad5446-i2c.c b/drivers/iio/dac/ad5446-i2c.c
new file mode 100644
index 000000000000..40fe7e17fce4
--- /dev/null
+++ b/drivers/iio/dac/ad5446-i2c.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD5446 SPI I2C driver
+ *
+ * Copyright 2025 Analog Devices Inc.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/i2c.h>
+
+#include <asm/byteorder.h>
+
+#include "ad5446.h"
+
+static int ad5622_write(struct ad5446_state *st, unsigned int val)
+{
+ struct i2c_client *client = to_i2c_client(st->dev);
+ int ret;
+
+ st->d16 = cpu_to_be16(val);
+
+ ret = i2c_master_send_dmasafe(client, (char *)&st->d16, sizeof(st->d16));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(st->d16))
+ return -EIO;
+
+ return 0;
+}
+
+static int ad5446_i2c_probe(struct i2c_client *i2c)
+{
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
+ const struct ad5446_chip_info *chip_info;
+
+ chip_info = i2c_get_match_data(i2c);
+ if (!chip_info)
+ return -ENODEV;
+
+ return ad5446_probe(&i2c->dev, id->name, chip_info);
+}
+
+/*
+ * ad5446_supported_i2c_device_ids:
+ * The AD5620/40/60 parts are available in different fixed internal reference
+ * voltage options. The actual part numbers may look differently
+ * (and a bit cryptic), however this style is used to make clear which
+ * parts are supported here.
+ */
+
+static const struct ad5446_chip_info ad5602_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 4),
+ .write = ad5622_write,
+};
+
+static const struct ad5446_chip_info ad5612_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 2),
+ .write = ad5622_write,
+};
+
+static const struct ad5446_chip_info ad5622_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 0),
+ .write = ad5622_write,
+};
+
+static const struct i2c_device_id ad5446_i2c_ids[] = {
+ {"ad5301", (kernel_ulong_t)&ad5602_chip_info},
+ {"ad5311", (kernel_ulong_t)&ad5612_chip_info},
+ {"ad5321", (kernel_ulong_t)&ad5622_chip_info},
+ {"ad5602", (kernel_ulong_t)&ad5602_chip_info},
+ {"ad5612", (kernel_ulong_t)&ad5612_chip_info},
+ {"ad5622", (kernel_ulong_t)&ad5622_chip_info},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad5446_i2c_ids);
+
+static const struct of_device_id ad5446_i2c_of_ids[] = {
+ { .compatible = "adi,ad5301", .data = &ad5602_chip_info },
+ { .compatible = "adi,ad5311", .data = &ad5612_chip_info },
+ { .compatible = "adi,ad5321", .data = &ad5622_chip_info },
+ { .compatible = "adi,ad5602", .data = &ad5602_chip_info },
+ { .compatible = "adi,ad5612", .data = &ad5612_chip_info },
+ { .compatible = "adi,ad5622", .data = &ad5622_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(OF, ad5446_i2c_of_ids);
+
+static struct i2c_driver ad5446_i2c_driver = {
+ .driver = {
+ .name = "ad5446",
+ .of_match_table = ad5446_i2c_of_ids,
+ },
+ .probe = ad5446_i2c_probe,
+ .id_table = ad5446_i2c_ids,
+};
+module_i2c_driver(ad5446_i2c_driver);
+
+MODULE_AUTHOR("Nuno Sá <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5622 and similar I2C DACs");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_AD5446");
diff --git a/drivers/iio/dac/ad5446-spi.c b/drivers/iio/dac/ad5446-spi.c
new file mode 100644
index 000000000000..e29d77f21482
--- /dev/null
+++ b/drivers/iio/dac/ad5446-spi.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AD5446 SPI DAC driver
+ *
+ * Copyright 2025 Analog Devices Inc.
+ */
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+#include <linux/unaligned.h>
+
+#include <asm/byteorder.h>
+
+#include "ad5446.h"
+
+static int ad5446_write(struct ad5446_state *st, unsigned int val)
+{
+ struct spi_device *spi = to_spi_device(st->dev);
+
+ st->d16 = cpu_to_be16(val);
+
+ return spi_write(spi, &st->d16, sizeof(st->d16));
+}
+
+static int ad5660_write(struct ad5446_state *st, unsigned int val)
+{
+ struct spi_device *spi = to_spi_device(st->dev);
+
+ put_unaligned_be24(val, st->d24);
+
+ return spi_write(spi, st->d24, sizeof(st->d24));
+}
+
+static int ad5446_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct ad5446_chip_info *chip_info;
+
+ chip_info = spi_get_device_match_data(spi);
+ if (!chip_info)
+ return -ENODEV;
+
+ return ad5446_probe(&spi->dev, id->name, chip_info);
+}
+
+/*
+ * ad5446_supported_spi_device_ids:
+ * The AD5620/40/60 parts are available in different fixed internal reference
+ * voltage options. The actual part numbers may look differently
+ * (and a bit cryptic), however this style is used to make clear which
+ * parts are supported here.
+ */
+
+static const struct ad5446_chip_info ad5300_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 4),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5310_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 2),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5320_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 0),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5444_chip_info = {
+ .channel = AD5446_CHANNEL(12, 16, 2),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5446_chip_info = {
+ .channel = AD5446_CHANNEL(14, 16, 0),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5450_chip_info = {
+ .channel = AD5446_CHANNEL(8, 16, 6),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5451_chip_info = {
+ .channel = AD5446_CHANNEL(10, 16, 4),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5541a_chip_info = {
+ .channel = AD5446_CHANNEL(16, 16, 0),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5512a_chip_info = {
+ .channel = AD5446_CHANNEL(12, 16, 4),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5553_chip_info = {
+ .channel = AD5446_CHANNEL(14, 16, 0),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5601_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5611_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 4),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5621_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5641_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5620_2500_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
+ .int_vref_mv = 2500,
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5620_1250_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
+ .int_vref_mv = 1250,
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5640_2500_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
+ .int_vref_mv = 2500,
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5640_1250_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
+ .int_vref_mv = 1250,
+ .write = ad5446_write,
+};
+
+static const struct ad5446_chip_info ad5660_2500_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
+ .int_vref_mv = 2500,
+ .write = ad5660_write,
+};
+
+static const struct ad5446_chip_info ad5660_1250_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
+ .int_vref_mv = 1250,
+ .write = ad5660_write,
+};
+
+static const struct ad5446_chip_info ad5662_chip_info = {
+ .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
+ .write = ad5660_write,
+};
+
+static const struct spi_device_id ad5446_spi_ids[] = {
+ {"ad5300", (kernel_ulong_t)&ad5300_chip_info},
+ {"ad5310", (kernel_ulong_t)&ad5310_chip_info},
+ {"ad5320", (kernel_ulong_t)&ad5320_chip_info},
+ {"ad5444", (kernel_ulong_t)&ad5444_chip_info},
+ {"ad5446", (kernel_ulong_t)&ad5446_chip_info},
+ {"ad5450", (kernel_ulong_t)&ad5450_chip_info},
+ {"ad5451", (kernel_ulong_t)&ad5451_chip_info},
+ {"ad5452", (kernel_ulong_t)&ad5444_chip_info}, /* ad5452 is compatible to the ad5444 */
+ {"ad5453", (kernel_ulong_t)&ad5446_chip_info}, /* ad5453 is compatible to the ad5446 */
+ {"ad5512a", (kernel_ulong_t)&ad5512a_chip_info},
+ {"ad5541a", (kernel_ulong_t)&ad5541a_chip_info},
+ {"ad5542", (kernel_ulong_t)&ad5541a_chip_info}, /* ad5541a and ad5542 are compatible */
+ {"ad5542a", (kernel_ulong_t)&ad5541a_chip_info}, /* ad5541a and ad5542a are compatible */
+ {"ad5543", (kernel_ulong_t)&ad5541a_chip_info}, /* ad5541a and ad5543 are compatible */
+ {"ad5553", (kernel_ulong_t)&ad5553_chip_info},
+ {"ad5600", (kernel_ulong_t)&ad5541a_chip_info}, /* ad5541a and ad5600 are compatible */
+ {"ad5601", (kernel_ulong_t)&ad5601_chip_info},
+ {"ad5611", (kernel_ulong_t)&ad5611_chip_info},
+ {"ad5621", (kernel_ulong_t)&ad5621_chip_info},
+ {"ad5641", (kernel_ulong_t)&ad5641_chip_info},
+ {"ad5620-2500", (kernel_ulong_t)&ad5620_2500_chip_info}, /* AD5620/40/60: */
+ /* part numbers may look differently */
+ {"ad5620-1250", (kernel_ulong_t)&ad5620_1250_chip_info},
+ {"ad5640-2500", (kernel_ulong_t)&ad5640_2500_chip_info},
+ {"ad5640-1250", (kernel_ulong_t)&ad5640_1250_chip_info},
+ {"ad5660-2500", (kernel_ulong_t)&ad5660_2500_chip_info},
+ {"ad5660-1250", (kernel_ulong_t)&ad5660_1250_chip_info},
+ {"ad5662", (kernel_ulong_t)&ad5662_chip_info},
+ {"dac081s101", (kernel_ulong_t)&ad5300_chip_info}, /* compatible Texas Instruments chips */
+ {"dac101s101", (kernel_ulong_t)&ad5310_chip_info},
+ {"dac121s101", (kernel_ulong_t)&ad5320_chip_info},
+ {"dac7512", (kernel_ulong_t)&ad5320_chip_info},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
+
+static const struct of_device_id ad5446_of_ids[] = {
+ { .compatible = "adi,ad5300", .data = &ad5300_chip_info },
+ { .compatible = "adi,ad5310", .data = &ad5310_chip_info },
+ { .compatible = "adi,ad5320", .data = &ad5320_chip_info },
+ { .compatible = "adi,ad5444", .data = &ad5444_chip_info },
+ { .compatible = "adi,ad5446", .data = &ad5446_chip_info },
+ { .compatible = "adi,ad5450", .data = &ad5450_chip_info },
+ { .compatible = "adi,ad5451", .data = &ad5451_chip_info },
+ { .compatible = "adi,ad5452", .data = &ad5444_chip_info },
+ { .compatible = "adi,ad5453", .data = &ad5446_chip_info },
+ { .compatible = "adi,ad5512a", .data = &ad5512a_chip_info },
+ { .compatible = "adi,ad5541a", .data = &ad5541a_chip_info },
+ { .compatible = "adi,ad5542", .data = &ad5541a_chip_info },
+ { .compatible = "adi,ad5542a", .data = &ad5541a_chip_info },
+ { .compatible = "adi,ad5543", .data = &ad5541a_chip_info },
+ { .compatible = "adi,ad5553", .data = &ad5553_chip_info },
+ { .compatible = "adi,ad5600", .data = &ad5541a_chip_info },
+ { .compatible = "adi,ad5601", .data = &ad5601_chip_info },
+ { .compatible = "adi,ad5611", .data = &ad5611_chip_info },
+ { .compatible = "adi,ad5621", .data = &ad5621_chip_info },
+ { .compatible = "adi,ad5641", .data = &ad5641_chip_info },
+ { .compatible = "adi,ad5620-2500", .data = &ad5620_2500_chip_info },
+ { .compatible = "adi,ad5620-1250", .data = &ad5620_1250_chip_info },
+ { .compatible = "adi,ad5640-2500", .data = &ad5640_2500_chip_info },
+ { .compatible = "adi,ad5640-1250", .data = &ad5640_1250_chip_info },
+ { .compatible = "adi,ad5660-2500", .data = &ad5660_2500_chip_info },
+ { .compatible = "adi,ad5660-1250", .data = &ad5660_1250_chip_info },
+ { .compatible = "adi,ad5662", .data = &ad5662_chip_info },
+ { .compatible = "ti,dac081s101", .data = &ad5300_chip_info },
+ { .compatible = "ti,dac101s101", .data = &ad5310_chip_info },
+ { .compatible = "ti,dac121s101", .data = &ad5320_chip_info },
+ { .compatible = "ti,dac7512", .data = &ad5320_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad5446_of_ids);
+
+static struct spi_driver ad5446_spi_driver = {
+ .driver = {
+ .name = "ad5446",
+ .of_match_table = ad5446_of_ids,
+ },
+ .probe = ad5446_spi_probe,
+ .id_table = ad5446_spi_ids,
+};
+module_spi_driver(ad5446_spi_driver);
+
+MODULE_AUTHOR("Nuno Sá <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5446 and similar SPI DACs");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_AD5446");
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index ad304b0fec08..46a2eadb1d9b 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -1,73 +1,35 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * AD5446 SPI DAC driver
+ * AD5446 CORE DAC driver
*
* Copyright 2010 Analog Devices Inc.
*/
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
+#include <linux/array_size.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/spi/spi.h>
-#include <linux/i2c.h>
-#include <linux/regulator/consumer.h>
#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-
+#include <linux/export.h>
#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
+#include <linux/kstrtox.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sysfs.h>
-#include <linux/unaligned.h>
+#include "ad5446.h"
#define MODE_PWRDWN_1k 0x1
#define MODE_PWRDWN_100k 0x2
#define MODE_PWRDWN_TRISTATE 0x3
-/**
- * struct ad5446_state - driver instance specific data
- * @dev: this device
- * @chip_info: chip model specific constants, available modes etc
- * @vref_mv: actual reference voltage used
- * @cached_val: store/retrieve values during power down
- * @pwr_down_mode: power down mode (1k, 100k or tristate)
- * @pwr_down: true if the device is in power down
- * @lock: lock to protect the data buffer during write ops
- */
-
-struct ad5446_state {
- struct device *dev;
- const struct ad5446_chip_info *chip_info;
- unsigned short vref_mv;
- unsigned cached_val;
- unsigned pwr_down_mode;
- unsigned pwr_down;
- struct mutex lock;
-};
-
-/**
- * struct ad5446_chip_info - chip specific information
- * @channel: channel spec for the DAC
- * @int_vref_mv: AD5620/40/60: the internal reference voltage
- * @write: chip specific helper function to write to the register
- */
-
-struct ad5446_chip_info {
- struct iio_chan_spec channel;
- u16 int_vref_mv;
- int (*write)(struct ad5446_state *st, unsigned val);
-};
-
static const char * const ad5446_powerdown_modes[] = {
"1kohm_to_gnd", "100kohm_to_gnd", "three_state"
};
static int ad5446_set_powerdown_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, unsigned int mode)
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
{
struct ad5446_state *st = iio_priv(indio_dev);
@@ -77,7 +39,7 @@ static int ad5446_set_powerdown_mode(struct iio_dev *indio_dev,
}
static int ad5446_get_powerdown_mode(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan)
+ const struct iio_chan_spec *chan)
{
struct ad5446_state *st = iio_priv(indio_dev);
@@ -92,9 +54,9 @@ static const struct iio_enum ad5446_powerdown_mode_enum = {
};
static ssize_t ad5446_read_dac_powerdown(struct iio_dev *indio_dev,
- uintptr_t private,
- const struct iio_chan_spec *chan,
- char *buf)
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
{
struct ad5446_state *st = iio_priv(indio_dev);
@@ -102,9 +64,9 @@ static ssize_t ad5446_read_dac_powerdown(struct iio_dev *indio_dev,
}
static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
- uintptr_t private,
- const struct iio_chan_spec *chan,
- const char *buf, size_t len)
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
{
struct ad5446_state *st = iio_priv(indio_dev);
unsigned int shift;
@@ -116,7 +78,7 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
st->pwr_down = powerdown;
if (st->pwr_down) {
@@ -127,12 +89,13 @@ static ssize_t ad5446_write_dac_powerdown(struct iio_dev *indio_dev,
}
ret = st->chip_info->write(st, val);
- mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
- return ret ? ret : len;
+ return len;
}
-static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
+const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
{
.name = "powerdown",
.read = ad5446_read_dac_powerdown,
@@ -143,28 +106,7 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5446_powerdown_mode_enum),
{ }
};
-
-#define _AD5446_CHANNEL(bits, storage, _shift, ext) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .output = 1, \
- .channel = 0, \
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_type = { \
- .sign = 'u', \
- .realbits = (bits), \
- .storagebits = (storage), \
- .shift = (_shift), \
- }, \
- .ext_info = (ext), \
-}
-
-#define AD5446_CHANNEL(bits, storage, shift) \
- _AD5446_CHANNEL(bits, storage, shift, NULL)
-
-#define AD5446_CHANNEL_POWERDOWN(bits, storage, shift) \
- _AD5446_CHANNEL(bits, storage, shift, ad5446_ext_info_powerdown)
+EXPORT_SYMBOL_NS_GPL(ad5446_ext_info_powerdown, "IIO_AD5446");
static int ad5446_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
@@ -186,32 +128,35 @@ static int ad5446_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ad5446_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+static int ad5446_write_dac_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val)
{
struct ad5446_state *st = iio_priv(indio_dev);
- int ret = 0;
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ val <<= chan->scan_type.shift;
+ guard(mutex)(&st->lock);
+
+ st->cached_val = val;
+ if (st->pwr_down)
+ return 0;
+
+ return st->chip_info->write(st, val);
+}
+
+static int ad5446_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val >= (1 << chan->scan_type.realbits) || val < 0)
- return -EINVAL;
-
- val <<= chan->scan_type.shift;
- mutex_lock(&st->lock);
- st->cached_val = val;
- if (!st->pwr_down)
- ret = st->chip_info->write(st, val);
- mutex_unlock(&st->lock);
- break;
+ return ad5446_write_dac_raw(indio_dev, chan, val);
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-
- return ret;
}
static const struct iio_info ad5446_info = {
@@ -219,8 +164,8 @@ static const struct iio_info ad5446_info = {
.write_raw = ad5446_write_raw,
};
-static int ad5446_probe(struct device *dev, const char *name,
- const struct ad5446_chip_info *chip_info)
+int ad5446_probe(struct device *dev, const char *name,
+ const struct ad5446_chip_info *chip_info)
{
struct ad5446_state *st;
struct iio_dev *indio_dev;
@@ -241,7 +186,9 @@ static int ad5446_probe(struct device *dev, const char *name,
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
- mutex_init(&st->lock);
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
st->pwr_down_mode = MODE_PWRDWN_1k;
@@ -249,354 +196,19 @@ static int ad5446_probe(struct device *dev, const char *name,
if (ret < 0 && ret != -ENODEV)
return ret;
if (ret == -ENODEV) {
- if (chip_info->int_vref_mv)
- st->vref_mv = chip_info->int_vref_mv;
- else
- dev_warn(dev, "reference voltage unspecified\n");
+ if (!chip_info->int_vref_mv)
+ return dev_err_probe(dev, ret,
+ "reference voltage unspecified\n");
+
+ st->vref_mv = chip_info->int_vref_mv;
} else {
st->vref_mv = ret / 1000;
}
return devm_iio_device_register(dev, indio_dev);
}
-
-#if IS_ENABLED(CONFIG_SPI_MASTER)
-
-static int ad5446_write(struct ad5446_state *st, unsigned val)
-{
- struct spi_device *spi = to_spi_device(st->dev);
- __be16 data = cpu_to_be16(val);
-
- return spi_write(spi, &data, sizeof(data));
-}
-
-static int ad5660_write(struct ad5446_state *st, unsigned val)
-{
- struct spi_device *spi = to_spi_device(st->dev);
- uint8_t data[3];
-
- put_unaligned_be24(val, &data[0]);
-
- return spi_write(spi, data, sizeof(data));
-}
-
-/*
- * ad5446_supported_spi_device_ids:
- * The AD5620/40/60 parts are available in different fixed internal reference
- * voltage options. The actual part numbers may look differently
- * (and a bit cryptic), however this style is used to make clear which
- * parts are supported here.
- */
-enum ad5446_supported_spi_device_ids {
- ID_AD5300,
- ID_AD5310,
- ID_AD5320,
- ID_AD5444,
- ID_AD5446,
- ID_AD5450,
- ID_AD5451,
- ID_AD5541A,
- ID_AD5512A,
- ID_AD5553,
- ID_AD5600,
- ID_AD5601,
- ID_AD5611,
- ID_AD5621,
- ID_AD5641,
- ID_AD5620_2500,
- ID_AD5620_1250,
- ID_AD5640_2500,
- ID_AD5640_1250,
- ID_AD5660_2500,
- ID_AD5660_1250,
- ID_AD5662,
-};
-
-static const struct ad5446_chip_info ad5446_spi_chip_info[] = {
- [ID_AD5300] = {
- .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 4),
- .write = ad5446_write,
- },
- [ID_AD5310] = {
- .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 2),
- .write = ad5446_write,
- },
- [ID_AD5320] = {
- .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 0),
- .write = ad5446_write,
- },
- [ID_AD5444] = {
- .channel = AD5446_CHANNEL(12, 16, 2),
- .write = ad5446_write,
- },
- [ID_AD5446] = {
- .channel = AD5446_CHANNEL(14, 16, 0),
- .write = ad5446_write,
- },
- [ID_AD5450] = {
- .channel = AD5446_CHANNEL(8, 16, 6),
- .write = ad5446_write,
- },
- [ID_AD5451] = {
- .channel = AD5446_CHANNEL(10, 16, 4),
- .write = ad5446_write,
- },
- [ID_AD5541A] = {
- .channel = AD5446_CHANNEL(16, 16, 0),
- .write = ad5446_write,
- },
- [ID_AD5512A] = {
- .channel = AD5446_CHANNEL(12, 16, 4),
- .write = ad5446_write,
- },
- [ID_AD5553] = {
- .channel = AD5446_CHANNEL(14, 16, 0),
- .write = ad5446_write,
- },
- [ID_AD5600] = {
- .channel = AD5446_CHANNEL(16, 16, 0),
- .write = ad5446_write,
- },
- [ID_AD5601] = {
- .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 6),
- .write = ad5446_write,
- },
- [ID_AD5611] = {
- .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 4),
- .write = ad5446_write,
- },
- [ID_AD5621] = {
- .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
- .write = ad5446_write,
- },
- [ID_AD5641] = {
- .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
- .write = ad5446_write,
- },
- [ID_AD5620_2500] = {
- .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
- .int_vref_mv = 2500,
- .write = ad5446_write,
- },
- [ID_AD5620_1250] = {
- .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 2),
- .int_vref_mv = 1250,
- .write = ad5446_write,
- },
- [ID_AD5640_2500] = {
- .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
- .int_vref_mv = 2500,
- .write = ad5446_write,
- },
- [ID_AD5640_1250] = {
- .channel = AD5446_CHANNEL_POWERDOWN(14, 16, 0),
- .int_vref_mv = 1250,
- .write = ad5446_write,
- },
- [ID_AD5660_2500] = {
- .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
- .int_vref_mv = 2500,
- .write = ad5660_write,
- },
- [ID_AD5660_1250] = {
- .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
- .int_vref_mv = 1250,
- .write = ad5660_write,
- },
- [ID_AD5662] = {
- .channel = AD5446_CHANNEL_POWERDOWN(16, 16, 0),
- .write = ad5660_write,
- },
-};
-
-static const struct spi_device_id ad5446_spi_ids[] = {
- {"ad5300", ID_AD5300},
- {"ad5310", ID_AD5310},
- {"ad5320", ID_AD5320},
- {"ad5444", ID_AD5444},
- {"ad5446", ID_AD5446},
- {"ad5450", ID_AD5450},
- {"ad5451", ID_AD5451},
- {"ad5452", ID_AD5444}, /* ad5452 is compatible to the ad5444 */
- {"ad5453", ID_AD5446}, /* ad5453 is compatible to the ad5446 */
- {"ad5512a", ID_AD5512A},
- {"ad5541a", ID_AD5541A},
- {"ad5542a", ID_AD5541A}, /* ad5541a and ad5542a are compatible */
- {"ad5543", ID_AD5541A}, /* ad5541a and ad5543 are compatible */
- {"ad5553", ID_AD5553},
- {"ad5600", ID_AD5600},
- {"ad5601", ID_AD5601},
- {"ad5611", ID_AD5611},
- {"ad5621", ID_AD5621},
- {"ad5641", ID_AD5641},
- {"ad5620-2500", ID_AD5620_2500}, /* AD5620/40/60: */
- {"ad5620-1250", ID_AD5620_1250}, /* part numbers may look differently */
- {"ad5640-2500", ID_AD5640_2500},
- {"ad5640-1250", ID_AD5640_1250},
- {"ad5660-2500", ID_AD5660_2500},
- {"ad5660-1250", ID_AD5660_1250},
- {"ad5662", ID_AD5662},
- {"dac081s101", ID_AD5300}, /* compatible Texas Instruments chips */
- {"dac101s101", ID_AD5310},
- {"dac121s101", ID_AD5320},
- {"dac7512", ID_AD5320},
- { }
-};
-MODULE_DEVICE_TABLE(spi, ad5446_spi_ids);
-
-static const struct of_device_id ad5446_of_ids[] = {
- { .compatible = "ti,dac7512" },
- { }
-};
-MODULE_DEVICE_TABLE(of, ad5446_of_ids);
-
-static int ad5446_spi_probe(struct spi_device *spi)
-{
- const struct spi_device_id *id = spi_get_device_id(spi);
-
- return ad5446_probe(&spi->dev, id->name,
- &ad5446_spi_chip_info[id->driver_data]);
-}
-
-static struct spi_driver ad5446_spi_driver = {
- .driver = {
- .name = "ad5446",
- .of_match_table = ad5446_of_ids,
- },
- .probe = ad5446_spi_probe,
- .id_table = ad5446_spi_ids,
-};
-
-static int __init ad5446_spi_register_driver(void)
-{
- return spi_register_driver(&ad5446_spi_driver);
-}
-
-static void ad5446_spi_unregister_driver(void)
-{
- spi_unregister_driver(&ad5446_spi_driver);
-}
-
-#else
-
-static inline int ad5446_spi_register_driver(void) { return 0; }
-static inline void ad5446_spi_unregister_driver(void) { }
-
-#endif
-
-#if IS_ENABLED(CONFIG_I2C)
-
-static int ad5622_write(struct ad5446_state *st, unsigned val)
-{
- struct i2c_client *client = to_i2c_client(st->dev);
- __be16 data = cpu_to_be16(val);
- int ret;
-
- ret = i2c_master_send(client, (char *)&data, sizeof(data));
- if (ret < 0)
- return ret;
- if (ret != sizeof(data))
- return -EIO;
-
- return 0;
-}
-
-/*
- * ad5446_supported_i2c_device_ids:
- * The AD5620/40/60 parts are available in different fixed internal reference
- * voltage options. The actual part numbers may look differently
- * (and a bit cryptic), however this style is used to make clear which
- * parts are supported here.
- */
-enum ad5446_supported_i2c_device_ids {
- ID_AD5602,
- ID_AD5612,
- ID_AD5622,
-};
-
-static const struct ad5446_chip_info ad5446_i2c_chip_info[] = {
- [ID_AD5602] = {
- .channel = AD5446_CHANNEL_POWERDOWN(8, 16, 4),
- .write = ad5622_write,
- },
- [ID_AD5612] = {
- .channel = AD5446_CHANNEL_POWERDOWN(10, 16, 2),
- .write = ad5622_write,
- },
- [ID_AD5622] = {
- .channel = AD5446_CHANNEL_POWERDOWN(12, 16, 0),
- .write = ad5622_write,
- },
-};
-
-static int ad5446_i2c_probe(struct i2c_client *i2c)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
- return ad5446_probe(&i2c->dev, id->name,
- &ad5446_i2c_chip_info[id->driver_data]);
-}
-
-static const struct i2c_device_id ad5446_i2c_ids[] = {
- {"ad5301", ID_AD5602},
- {"ad5311", ID_AD5612},
- {"ad5321", ID_AD5622},
- {"ad5602", ID_AD5602},
- {"ad5612", ID_AD5612},
- {"ad5622", ID_AD5622},
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ad5446_i2c_ids);
-
-static struct i2c_driver ad5446_i2c_driver = {
- .driver = {
- .name = "ad5446",
- },
- .probe = ad5446_i2c_probe,
- .id_table = ad5446_i2c_ids,
-};
-
-static int __init ad5446_i2c_register_driver(void)
-{
- return i2c_add_driver(&ad5446_i2c_driver);
-}
-
-static void __exit ad5446_i2c_unregister_driver(void)
-{
- i2c_del_driver(&ad5446_i2c_driver);
-}
-
-#else
-
-static inline int ad5446_i2c_register_driver(void) { return 0; }
-static inline void ad5446_i2c_unregister_driver(void) { }
-
-#endif
-
-static int __init ad5446_init(void)
-{
- int ret;
-
- ret = ad5446_spi_register_driver();
- if (ret)
- return ret;
-
- ret = ad5446_i2c_register_driver();
- if (ret) {
- ad5446_spi_unregister_driver();
- return ret;
- }
-
- return 0;
-}
-module_init(ad5446_init);
-
-static void __exit ad5446_exit(void)
-{
- ad5446_i2c_unregister_driver();
- ad5446_spi_unregister_driver();
-}
-module_exit(ad5446_exit);
+EXPORT_SYMBOL_NS_GPL(ad5446_probe, "IIO_AD5446");
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
-MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
+MODULE_DESCRIPTION("Analog Devices CORE AD5446 DAC and similar devices");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5446.h b/drivers/iio/dac/ad5446.h
new file mode 100644
index 000000000000..6ba31d98f415
--- /dev/null
+++ b/drivers/iio/dac/ad5446.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_AD5446_H
+#define _LINUX_AD5446_H
+
+#include <linux/bits.h>
+#include <linux/compiler.h>
+#include <linux/iio/iio.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+struct device;
+
+extern const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[];
+
+#define _AD5446_CHANNEL(bits, storage, _shift, ext) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = (storage), \
+ .shift = (_shift), \
+ }, \
+ .ext_info = (ext), \
+}
+
+#define AD5446_CHANNEL(bits, storage, shift) \
+ _AD5446_CHANNEL(bits, storage, shift, NULL)
+
+#define AD5446_CHANNEL_POWERDOWN(bits, storage, shift) \
+ _AD5446_CHANNEL(bits, storage, shift, ad5446_ext_info_powerdown)
+
+/**
+ * struct ad5446_state - driver instance specific data
+ * @dev: this device
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_mv: actual reference voltage used
+ * @cached_val: store/retrieve values during power down
+ * @pwr_down_mode: power down mode (1k, 100k or tristate)
+ * @pwr_down: true if the device is in power down
+ * @lock: lock to protect the data buffer during write ops
+ */
+struct ad5446_state {
+ struct device *dev;
+ const struct ad5446_chip_info *chip_info;
+ unsigned short vref_mv;
+ unsigned int cached_val;
+ unsigned int pwr_down_mode;
+ unsigned int pwr_down;
+ /* mutex to protect device shared data */
+ struct mutex lock;
+ union {
+ __be16 d16;
+ u8 d24[3];
+ } __aligned(IIO_DMA_MINALIGN);
+};
+
+/**
+ * struct ad5446_chip_info - chip specific information
+ * @channel: channel spec for the DAC
+ * @int_vref_mv: AD5620/40/60: the internal reference voltage
+ * @write: chip specific helper function to write to the register
+ */
+struct ad5446_chip_info {
+ struct iio_chan_spec channel;
+ u16 int_vref_mv;
+ int (*write)(struct ad5446_state *st, unsigned int val);
+};
+
+int ad5446_probe(struct device *dev, const char *name,
+ const struct ad5446_chip_info *chip_info);
+
+#endif
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index 26c049d5b73a..fbbd7105a80c 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -278,10 +278,8 @@ static int ad5764_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (indio_dev == NULL) {
- dev_err(&spi->dev, "Failed to allocate iio device\n");
+ if (indio_dev == NULL)
return -ENOMEM;
- }
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 41582f2b90fb..ae7297f08398 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -80,8 +80,6 @@ struct ad5791_chip_info {
/**
* struct ad5791_state - driver instance specific data
* @spi: spi_device
- * @reg_vdd: positive supply regulator
- * @reg_vss: negative supply regulator
* @gpio_reset: reset gpio
* @gpio_clear: clear gpio
* @gpio_ldac: load dac gpio
@@ -100,8 +98,6 @@ struct ad5791_chip_info {
*/
struct ad5791_state {
struct spi_device *spi;
- struct regulator *reg_vdd;
- struct regulator *reg_vss;
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_clear;
struct gpio_desc *gpio_ldac;
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index a26a99753418..a8198ba4f98a 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -221,10 +221,8 @@ static int ds4424_probe(struct i2c_client *client)
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio dev alloc failed.\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/dac/ltc2688.c b/drivers/iio/dac/ltc2688.c
index 7a2ee26a7d68..02f408229681 100644
--- a/drivers/iio/dac/ltc2688.c
+++ b/drivers/iio/dac/ltc2688.c
@@ -6,6 +6,7 @@
*/
#include <linux/bitfield.h>
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
@@ -208,12 +209,12 @@ static int ltc2688_dac_code_write(struct ltc2688_state *st, u32 chan, u32 input,
code = FIELD_PREP(LTC2688_DITHER_RAW_MASK, code);
}
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
/* select the correct input register to read from */
ret = regmap_update_bits(st->regmap, LTC2688_CMD_A_B_SELECT, BIT(chan),
input << chan);
if (ret)
- goto out_unlock;
+ return ret;
/*
* If in dither/toggle mode the dac should be updated by an
@@ -224,10 +225,7 @@ static int ltc2688_dac_code_write(struct ltc2688_state *st, u32 chan, u32 input,
else
reg = LTC2688_CMD_CH_CODE(chan);
- ret = regmap_write(st->regmap, reg, code);
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return regmap_write(st->regmap, reg, code);
}
static int ltc2688_dac_code_read(struct ltc2688_state *st, u32 chan, u32 input,
@@ -236,20 +234,20 @@ static int ltc2688_dac_code_read(struct ltc2688_state *st, u32 chan, u32 input,
struct ltc2688_chan *c = &st->channels[chan];
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = regmap_update_bits(st->regmap, LTC2688_CMD_A_B_SELECT, BIT(chan),
input << chan);
if (ret)
- goto out_unlock;
+ return ret;
ret = regmap_read(st->regmap, LTC2688_CMD_CH_CODE(chan), code);
-out_unlock:
- mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
if (!c->toggle_chan && input == LTC2688_INPUT_B)
*code = FIELD_GET(LTC2688_DITHER_RAW_MASK, *code);
- return ret;
+ return 0;
}
static const int ltc2688_raw_range[] = {0, 1, U16_MAX};
@@ -359,17 +357,15 @@ static ssize_t ltc2688_dither_toggle_set(struct iio_dev *indio_dev,
if (ret)
return ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = regmap_update_bits(st->regmap, LTC2688_CMD_TOGGLE_DITHER_EN,
BIT(chan->channel), en << chan->channel);
if (ret)
- goto out_unlock;
+ return ret;
c->mode = en ? LTC2688_MODE_DITHER_TOGGLE : LTC2688_MODE_DEFAULT;
-out_unlock:
- mutex_unlock(&st->lock);
- return ret ?: len;
+ return len;
}
static ssize_t ltc2688_reg_bool_get(struct iio_dev *indio_dev,
@@ -953,7 +949,9 @@ static int ltc2688_probe(struct spi_device *spi)
/* Just write this once. No need to do it in every regmap read. */
st->tx_data[3] = LTC2688_CMD_NOOP;
- mutex_init(&st->lock);
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
st->regmap = devm_regmap_init(dev, &ltc2688_regmap_bus, st,
&ltc2688_regmap_config);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index 344388338d9b..b860e18d52a1 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -82,9 +82,11 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
ret = regmap_update_bits(dac->common->regmap, STM32_DAC_CR, msk, en);
mutex_unlock(&dac->lock);
- if (ret < 0) {
+ if (ret) {
dev_err(&indio_dev->dev, "%s failed\n", str_enable_disable(en));
- goto err_put_pm;
+ if (enable)
+ pm_runtime_put_autosuspend(dev);
+ return ret;
}
/*
@@ -95,20 +97,10 @@ static int stm32_dac_set_enable_state(struct iio_dev *indio_dev, int ch,
if (en && dac->common->hfsel)
udelay(1);
- if (!enable) {
- pm_runtime_mark_last_busy(dev);
+ if (!enable)
pm_runtime_put_autosuspend(dev);
- }
return 0;
-
-err_put_pm:
- if (enable) {
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- }
-
- return ret;
}
static int stm32_dac_get_value(struct stm32_dac *dac, int channel, int *val)
@@ -349,7 +341,6 @@ static int stm32_dac_probe(struct platform_device *pdev)
if (ret)
goto err_pm_put;
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 3d2ce61f0db6..5c1c5213962f 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -242,10 +242,8 @@ static int ti_dac_probe(struct spi_device *spi)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*ti_dac));
- if (!indio_dev) {
- dev_err(dev, "can not allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
spi->mode = SPI_MODE_1;
spi->bits_per_word = 16;
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 47f1c7e9efa9..ed1741165f55 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -149,6 +149,19 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
return -EINVAL;
+ st->r4_rf_div_sel = 0;
+
+ /*
+ * !\TODO: The below computation is making sure we get a power of 2
+ * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to
+ * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long()
+ * and friends.
+ */
+ while (freq < ADF4350_MIN_VCO_FREQ) {
+ freq <<= 1;
+ st->r4_rf_div_sel++;
+ }
+
if (freq > ADF4350_MAX_FREQ_45_PRESC) {
prescaler = ADF4350_REG1_PRESCALER;
mdiv = 75;
@@ -157,13 +170,6 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
mdiv = 23;
}
- st->r4_rf_div_sel = 0;
-
- while (freq < ADF4350_MIN_VCO_FREQ) {
- freq <<= 1;
- st->r4_rf_div_sel++;
- }
-
/*
* Allow a predefined reference division factor
* if not set, compute our own
@@ -673,8 +679,7 @@ static int adf4350_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(&spi->dev, adf4350_power_down, indio_dev);
if (ret)
- return dev_err_probe(&spi->dev, ret,
- "Failed to add action to managed power down\n");
+ return ret;
return devm_iio_device_register(&spi->dev, indio_dev);
}
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 781d3e96645f..38394b5f3275 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -309,10 +309,8 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
if (on)
ret = pm_runtime_get_sync(dev);
- else {
- pm_runtime_mark_last_busy(dev);
+ else
ret = pm_runtime_put_autosuspend(dev);
- }
if (ret < 0) {
dev_err(dev, "Failed: bmg160_set_power_state for %d\n", on);
diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c
index 754c8a564ba4..a88670207cec 100644
--- a/drivers/iio/gyro/fxas21002c_core.c
+++ b/drivers/iio/gyro/fxas21002c_core.c
@@ -373,8 +373,6 @@ static int fxas21002c_pm_put(struct fxas21002c_data *data)
{
struct device *dev = regmap_get_device(data->regmap);
- pm_runtime_mark_last_busy(dev);
-
return pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index 16553948c5c3..67ae7d1012bc 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -370,7 +370,6 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
out_read_raw_unlock:
mutex_unlock(&mpu3050->lock);
- pm_runtime_mark_last_busy(mpu3050->dev);
pm_runtime_put_autosuspend(mpu3050->dev);
return ret;
@@ -662,7 +661,6 @@ static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
{
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
- pm_runtime_mark_last_busy(mpu3050->dev);
pm_runtime_put_autosuspend(mpu3050->dev);
return 0;
@@ -976,7 +974,6 @@ static int mpu3050_drdy_trigger_set_state(struct iio_trigger *trig,
if (ret)
dev_err(mpu3050->dev, "error resetting FIFO\n");
- pm_runtime_mark_last_busy(mpu3050->dev);
pm_runtime_put_autosuspend(mpu3050->dev);
mpu3050->hw_irq_trigger = false;
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 8e284f47242c..092878f2c886 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -27,7 +27,6 @@ static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
{
struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
- pm_runtime_mark_last_busy(mpu3050->dev);
pm_runtime_put_autosuspend(mpu3050->dev);
return 0;
}
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 30d3f984b032..0e5a512e3bb8 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -58,7 +58,6 @@ static const struct reg_field afe4403_reg_fields[] = {
/**
* struct afe4403_data - AFE4403 device instance data
- * @dev: Device structure
* @spi: SPI device handle
* @regmap: Register map of the device
* @fields: Register fields of the device
@@ -68,7 +67,6 @@ static const struct reg_field afe4403_reg_fields[] = {
* @buffer: Used to construct data layout to push into IIO buffer.
*/
struct afe4403_data {
- struct device *dev;
struct spi_device *spi;
struct regmap *regmap;
struct regmap_field *fields[F_MAX_FIELDS];
@@ -460,63 +458,63 @@ static DEFINE_SIMPLE_DEV_PM_OPS(afe4403_pm_ops, afe4403_suspend,
static int afe4403_probe(struct spi_device *spi)
{
+ struct device *dev = &spi->dev;
struct iio_dev *indio_dev;
struct afe4403_data *afe;
int i, ret;
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*afe));
if (!indio_dev)
return -ENOMEM;
afe = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
- afe->dev = &spi->dev;
afe->spi = spi;
afe->irq = spi->irq;
afe->regmap = devm_regmap_init_spi(spi, &afe4403_regmap_config);
if (IS_ERR(afe->regmap)) {
- dev_err(afe->dev, "Unable to allocate register map\n");
+ dev_err(dev, "Unable to allocate register map\n");
return PTR_ERR(afe->regmap);
}
for (i = 0; i < F_MAX_FIELDS; i++) {
- afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+ afe->fields[i] = devm_regmap_field_alloc(dev, afe->regmap,
afe4403_reg_fields[i]);
if (IS_ERR(afe->fields[i])) {
- dev_err(afe->dev, "Unable to allocate regmap fields\n");
+ dev_err(dev, "Unable to allocate regmap fields\n");
return PTR_ERR(afe->fields[i]);
}
}
- afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+ afe->regulator = devm_regulator_get(dev, "tx_sup");
if (IS_ERR(afe->regulator))
- return dev_err_probe(afe->dev, PTR_ERR(afe->regulator),
+ return dev_err_probe(dev, PTR_ERR(afe->regulator),
"Unable to get regulator\n");
ret = regulator_enable(afe->regulator);
if (ret) {
- dev_err(afe->dev, "Unable to enable regulator\n");
+ dev_err(dev, "Unable to enable regulator\n");
return ret;
}
- ret = devm_add_action_or_reset(afe->dev, afe4403_regulator_disable, afe->regulator);
+ ret = devm_add_action_or_reset(dev, afe4403_regulator_disable, afe->regulator);
if (ret) {
- dev_err(afe->dev, "Unable to add regulator disable action\n");
+ dev_err(dev, "Unable to add regulator disable action\n");
return ret;
}
ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
AFE440X_CONTROL0_SW_RESET);
if (ret) {
- dev_err(afe->dev, "Unable to reset device\n");
+ dev_err(dev, "Unable to reset device\n");
return ret;
}
ret = regmap_multi_reg_write(afe->regmap, afe4403_reg_sequences,
ARRAY_SIZE(afe4403_reg_sequences));
if (ret) {
- dev_err(afe->dev, "Unable to set register defaults\n");
+ dev_err(dev, "Unable to set register defaults\n");
return ret;
}
@@ -527,45 +525,43 @@ static int afe4403_probe(struct spi_device *spi)
indio_dev->info = &afe4403_iio_info;
if (afe->irq > 0) {
- afe->trig = devm_iio_trigger_alloc(afe->dev,
+ afe->trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
iio_device_id(indio_dev));
- if (!afe->trig) {
- dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+ if (!afe->trig)
return -ENOMEM;
- }
iio_trigger_set_drvdata(afe->trig, indio_dev);
- ret = devm_iio_trigger_register(afe->dev, afe->trig);
+ ret = devm_iio_trigger_register(dev, afe->trig);
if (ret) {
- dev_err(afe->dev, "Unable to register IIO trigger\n");
+ dev_err(dev, "Unable to register IIO trigger\n");
return ret;
}
- ret = devm_request_threaded_irq(afe->dev, afe->irq,
+ ret = devm_request_threaded_irq(dev, afe->irq,
iio_trigger_generic_data_rdy_poll,
NULL, IRQF_ONESHOT,
AFE4403_DRIVER_NAME,
afe->trig);
if (ret) {
- dev_err(afe->dev, "Unable to request IRQ\n");
+ dev_err(dev, "Unable to request IRQ\n");
return ret;
}
}
- ret = devm_iio_triggered_buffer_setup(afe->dev, indio_dev,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
afe4403_trigger_handler, NULL);
if (ret) {
- dev_err(afe->dev, "Unable to setup buffer\n");
+ dev_err(dev, "Unable to setup buffer\n");
return ret;
}
- ret = devm_iio_device_register(afe->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(afe->dev, "Unable to register IIO device\n");
+ dev_err(dev, "Unable to register IIO device\n");
return ret;
}
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index b2727effecaa..768d794e574b 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -77,7 +77,6 @@ static const struct reg_field afe4404_reg_fields[] = {
/**
* struct afe4404_data - AFE4404 device instance data
- * @dev: Device structure
* @regmap: Register map of the device
* @fields: Register fields of the device
* @regulator: Pointer to the regulator for the IC
@@ -86,7 +85,6 @@ static const struct reg_field afe4404_reg_fields[] = {
* @buffer: Used to construct a scan to push to the iio buffer.
*/
struct afe4404_data {
- struct device *dev;
struct regmap *regmap;
struct regmap_field *fields[F_MAX_FIELDS];
struct regulator *regulator;
@@ -468,62 +466,62 @@ static DEFINE_SIMPLE_DEV_PM_OPS(afe4404_pm_ops, afe4404_suspend,
static int afe4404_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct afe4404_data *afe;
int i, ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*afe));
if (!indio_dev)
return -ENOMEM;
afe = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- afe->dev = &client->dev;
afe->irq = client->irq;
afe->regmap = devm_regmap_init_i2c(client, &afe4404_regmap_config);
if (IS_ERR(afe->regmap)) {
- dev_err(afe->dev, "Unable to allocate register map\n");
+ dev_err(dev, "Unable to allocate register map\n");
return PTR_ERR(afe->regmap);
}
for (i = 0; i < F_MAX_FIELDS; i++) {
- afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+ afe->fields[i] = devm_regmap_field_alloc(dev, afe->regmap,
afe4404_reg_fields[i]);
if (IS_ERR(afe->fields[i])) {
- dev_err(afe->dev, "Unable to allocate regmap fields\n");
+ dev_err(dev, "Unable to allocate regmap fields\n");
return PTR_ERR(afe->fields[i]);
}
}
- afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
+ afe->regulator = devm_regulator_get(dev, "tx_sup");
if (IS_ERR(afe->regulator))
- return dev_err_probe(afe->dev, PTR_ERR(afe->regulator),
+ return dev_err_probe(dev, PTR_ERR(afe->regulator),
"Unable to get regulator\n");
ret = regulator_enable(afe->regulator);
if (ret) {
- dev_err(afe->dev, "Unable to enable regulator\n");
+ dev_err(dev, "Unable to enable regulator\n");
return ret;
}
- ret = devm_add_action_or_reset(afe->dev, afe4404_regulator_disable, afe->regulator);
+ ret = devm_add_action_or_reset(dev, afe4404_regulator_disable, afe->regulator);
if (ret) {
- dev_err(afe->dev, "Unable to enable regulator\n");
+ dev_err(dev, "Unable to enable regulator\n");
return ret;
}
ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
AFE440X_CONTROL0_SW_RESET);
if (ret) {
- dev_err(afe->dev, "Unable to reset device\n");
+ dev_err(dev, "Unable to reset device\n");
return ret;
}
ret = regmap_multi_reg_write(afe->regmap, afe4404_reg_sequences,
ARRAY_SIZE(afe4404_reg_sequences));
if (ret) {
- dev_err(afe->dev, "Unable to set register defaults\n");
+ dev_err(dev, "Unable to set register defaults\n");
return ret;
}
@@ -534,45 +532,43 @@ static int afe4404_probe(struct i2c_client *client)
indio_dev->info = &afe4404_iio_info;
if (afe->irq > 0) {
- afe->trig = devm_iio_trigger_alloc(afe->dev,
+ afe->trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
iio_device_id(indio_dev));
- if (!afe->trig) {
- dev_err(afe->dev, "Unable to allocate IIO trigger\n");
+ if (!afe->trig)
return -ENOMEM;
- }
iio_trigger_set_drvdata(afe->trig, indio_dev);
- ret = devm_iio_trigger_register(afe->dev, afe->trig);
+ ret = devm_iio_trigger_register(dev, afe->trig);
if (ret) {
- dev_err(afe->dev, "Unable to register IIO trigger\n");
+ dev_err(dev, "Unable to register IIO trigger\n");
return ret;
}
- ret = devm_request_threaded_irq(afe->dev, afe->irq,
+ ret = devm_request_threaded_irq(dev, afe->irq,
iio_trigger_generic_data_rdy_poll,
NULL, IRQF_ONESHOT,
AFE4404_DRIVER_NAME,
afe->trig);
if (ret) {
- dev_err(afe->dev, "Unable to request IRQ\n");
+ dev_err(dev, "Unable to request IRQ\n");
return ret;
}
}
- ret = devm_iio_triggered_buffer_setup(afe->dev, indio_dev,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
&iio_pollfunc_store_time,
afe4404_trigger_handler, NULL);
if (ret) {
- dev_err(afe->dev, "Unable to setup buffer\n");
+ dev_err(dev, "Unable to setup buffer\n");
return ret;
}
- ret = devm_iio_device_register(afe->dev, indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(afe->dev, "Unable to register IIO device\n");
+ dev_err(dev, "Unable to register IIO device\n");
return ret;
}
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 814f521e47ae..3d441013893c 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -5,7 +5,6 @@
* Copyright (C) 2015, 2018
* Author: Matt Ranostay <matt.ranostay@konsulko.com>
*
- * TODO: enable pulse length controls via device tree properties
*/
#include <linux/module.h>
@@ -18,6 +17,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/bitfield.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
#include <linux/iio/kfifo_buf.h>
@@ -52,9 +52,13 @@
#define MAX30100_REG_MODE_CONFIG_PWR BIT(7)
#define MAX30100_REG_SPO2_CONFIG 0x07
+#define MAX30100_REG_SPO2_CONFIG_PW_MASK GENMASK(1, 0)
+#define MAX30100_REG_SPO2_CONFIG_200US 0x0
+#define MAX30100_REG_SPO2_CONFIG_400US 0x1
+#define MAX30100_REG_SPO2_CONFIG_800US 0x2
+#define MAX30100_REG_SPO2_CONFIG_1600US 0x3
#define MAX30100_REG_SPO2_CONFIG_100HZ BIT(2)
#define MAX30100_REG_SPO2_CONFIG_HI_RES_EN BIT(6)
-#define MAX30100_REG_SPO2_CONFIG_1600US 0x3
#define MAX30100_REG_LED_CONFIG 0x09
#define MAX30100_REG_LED_CONFIG_LED_MASK 0x0f
@@ -306,19 +310,47 @@ static int max30100_led_init(struct max30100_data *data)
MAX30100_REG_LED_CONFIG_LED_MASK, reg);
}
+static int max30100_get_pulse_width(unsigned int pwidth_us)
+{
+ switch (pwidth_us) {
+ case 200:
+ return MAX30100_REG_SPO2_CONFIG_200US;
+ case 400:
+ return MAX30100_REG_SPO2_CONFIG_400US;
+ case 800:
+ return MAX30100_REG_SPO2_CONFIG_800US;
+ case 1600:
+ return MAX30100_REG_SPO2_CONFIG_1600US;
+ default:
+ return -EINVAL;
+ }
+}
+
static int max30100_chip_init(struct max30100_data *data)
{
int ret;
+ int pulse_width;
+ /* set default LED pulse-width to 1600 us */
+ unsigned int pulse_us = 1600;
+ struct device *dev = &data->client->dev;
/* setup LED current settings */
ret = max30100_led_init(data);
if (ret)
return ret;
+ /* Read LED pulse-width-us from DT */
+ device_property_read_u32(dev, "maxim,pulse-width-us", &pulse_us);
+
+ pulse_width = max30100_get_pulse_width(pulse_us);
+ if (pulse_width < 0)
+ return dev_err_probe(dev, pulse_width, "invalid LED pulse-width %uus\n", pulse_us);
+
/* enable hi-res SPO2 readings at 100Hz */
ret = regmap_write(data->regmap, MAX30100_REG_SPO2_CONFIG,
MAX30100_REG_SPO2_CONFIG_HI_RES_EN |
- MAX30100_REG_SPO2_CONFIG_100HZ);
+ MAX30100_REG_SPO2_CONFIG_100HZ |
+ FIELD_PREP(MAX30100_REG_SPO2_CONFIG_PW_MASK, pulse_width));
if (ret)
return ret;
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
index f021c3e6d886..02ca23eb8991 100644
--- a/drivers/iio/humidity/am2315.c
+++ b/drivers/iio/humidity/am2315.c
@@ -224,10 +224,8 @@ static int am2315_probe(struct i2c_client *client)
struct am2315_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 73d2033954e7..980cb946bbf7 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -294,10 +294,8 @@ static int dht11_probe(struct platform_device *pdev)
struct iio_dev *iio;
iio = devm_iio_device_alloc(dev, sizeof(*dht11));
- if (!iio) {
- dev_err(dev, "Failed to allocate IIO device\n");
+ if (!iio)
return -ENOMEM;
- }
dht11 = iio_priv(iio);
dht11->dev = dev;
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index ffb25596d3a8..78b2c171c8da 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -72,6 +72,9 @@
#define HDC3020_MAX_TEMP_HYST_MICRO 164748607
#define HDC3020_MAX_HUM_MICRO 99220264
+/* Divide 65535 from the datasheet by 5 to avoid overflows */
+#define HDC3020_THRESH_FRACTION (65535 / 5)
+
struct hdc3020_data {
struct i2c_client *client;
struct gpio_desc *reset_gpio;
@@ -301,9 +304,9 @@ static int hdc3020_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
*val2 = 65536;
if (chan->type == IIO_TEMP)
- *val = 175;
+ *val = 175 * MILLI;
else
- *val = 100;
+ *val = 100 * MILLI;
return IIO_VAL_FRACTIONAL;
case IIO_CHAN_INFO_OFFSET:
@@ -376,15 +379,18 @@ static int hdc3020_thresh_get_temp(u16 thresh)
int temp;
/*
- * Get the temperature threshold from 9 LSBs, shift them to get
- * the truncated temperature threshold representation and
- * calculate the threshold according to the formula in the
- * datasheet. Result is degree celsius scaled by 65535.
+ * Get the temperature threshold from 9 LSBs, shift them to get the
+ * truncated temperature threshold representation and calculate the
+ * threshold according to the explicit formula in the datasheet:
+ * T(C) = -45 + (175 * temp) / 65535.
+ * Additionally scale by HDC3020_THRESH_FRACTION to avoid precision loss
+ * when calculating threshold and hysteresis values. Result is degree
+ * celsius scaled by HDC3020_THRESH_FRACTION.
*/
temp = FIELD_GET(HDC3020_THRESH_TEMP_MASK, thresh) <<
HDC3020_THRESH_TEMP_TRUNC_SHIFT;
- return -2949075 + (175 * temp);
+ return -2949075 / 5 + (175 / 5 * temp);
}
static int hdc3020_thresh_get_hum(u16 thresh)
@@ -394,13 +400,16 @@ static int hdc3020_thresh_get_hum(u16 thresh)
/*
* Get the humidity threshold from 7 MSBs, shift them to get the
* truncated humidity threshold representation and calculate the
- * threshold according to the formula in the datasheet. Result is
- * percent scaled by 65535.
+ * threshold according to the explicit formula in the datasheet:
+ * RH(%) = 100 * hum / 65535.
+ * Additionally scale by HDC3020_THRESH_FRACTION to avoid precision loss
+ * when calculating threshold and hysteresis values. Result is percent
+ * scaled by HDC3020_THRESH_FRACTION.
*/
hum = FIELD_GET(HDC3020_THRESH_HUM_MASK, thresh) <<
HDC3020_THRESH_HUM_TRUNC_SHIFT;
- return hum * 100;
+ return hum * 100 / 5;
}
static u16 hdc3020_thresh_set_temp(int s_temp, u16 curr_thresh)
@@ -455,8 +464,8 @@ int hdc3020_thresh_clr(s64 s_thresh, s64 s_hyst, enum iio_event_direction dir)
else
s_clr = s_thresh + s_hyst;
- /* Divide by 65535 to get units of micro */
- return div_s64(s_clr, 65535);
+ /* Divide by HDC3020_THRESH_FRACTION to get units of micro */
+ return div_s64(s_clr, HDC3020_THRESH_FRACTION);
}
static int _hdc3020_write_thresh(struct hdc3020_data *data, u16 reg, u16 val)
@@ -507,7 +516,7 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
clr = ret;
/* Scale value to include decimal part into calculations */
- s_val = (val < 0) ? (val * 1000000 - val2) : (val * 1000000 + val2);
+ s_val = (val < 0) ? (val * 1000 - val2) : (val * 1000 + val2);
switch (chan->type) {
case IIO_TEMP:
switch (info) {
@@ -523,7 +532,8 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
/* Calculate old hysteresis */
s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
s_clr = (s64)hdc3020_thresh_get_temp(clr) * 1000000;
- s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
+ s_hyst = div_s64(abs(s_thresh - s_clr),
+ HDC3020_THRESH_FRACTION);
/* Set new threshold */
thresh = reg_val;
/* Set old hysteresis */
@@ -532,16 +542,17 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
case IIO_EV_INFO_HYSTERESIS:
/*
* Function hdc3020_thresh_get_temp returns temperature
- * in degree celsius scaled by 65535. Scale by 1000000
- * to be able to subtract scaled hysteresis value.
+ * in degree celsius scaled by HDC3020_THRESH_FRACTION.
+ * Scale by 1000000 to be able to subtract scaled
+ * hysteresis value.
*/
s_thresh = (s64)hdc3020_thresh_get_temp(thresh) * 1000000;
/*
* Units of s_val are in micro degree celsius, scale by
- * 65535 to get same units as s_thresh.
+ * HDC3020_THRESH_FRACTION to get same units as s_thresh.
*/
s_val = min(abs(s_val), HDC3020_MAX_TEMP_HYST_MICRO);
- s_hyst = (s64)s_val * 65535;
+ s_hyst = (s64)s_val * HDC3020_THRESH_FRACTION;
s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
s_clr = max(s_clr, HDC3020_MIN_TEMP_MICRO);
s_clr = min(s_clr, HDC3020_MAX_TEMP_MICRO);
@@ -565,7 +576,8 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
/* Calculate old hysteresis */
s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
s_clr = (s64)hdc3020_thresh_get_hum(clr) * 1000000;
- s_hyst = div_s64(abs(s_thresh - s_clr), 65535);
+ s_hyst = div_s64(abs(s_thresh - s_clr),
+ HDC3020_THRESH_FRACTION);
/* Set new threshold */
thresh = reg_val;
/* Try to set old hysteresis */
@@ -574,15 +586,16 @@ static int hdc3020_write_thresh(struct iio_dev *indio_dev,
case IIO_EV_INFO_HYSTERESIS:
/*
* Function hdc3020_thresh_get_hum returns relative
- * humidity in percent scaled by 65535. Scale by 1000000
- * to be able to subtract scaled hysteresis value.
+ * humidity in percent scaled by HDC3020_THRESH_FRACTION.
+ * Scale by 1000000 to be able to subtract scaled
+ * hysteresis value.
*/
s_thresh = (s64)hdc3020_thresh_get_hum(thresh) * 1000000;
/*
- * Units of s_val are in micro percent, scale by 65535
- * to get same units as s_thresh.
+ * Units of s_val are in micro percent, scale by
+ * HDC3020_THRESH_FRACTION to get same units as s_thresh.
*/
- s_hyst = (s64)s_val * 65535;
+ s_hyst = (s64)s_val * HDC3020_THRESH_FRACTION;
s_clr = hdc3020_thresh_clr(s_thresh, s_hyst, dir);
s_clr = max(s_clr, 0);
s_clr = min(s_clr, HDC3020_MAX_HUM_MICRO);
@@ -630,7 +643,7 @@ static int hdc3020_read_thresh(struct iio_dev *indio_dev,
thresh = hdc3020_thresh_get_temp(ret);
switch (info) {
case IIO_EV_INFO_VALUE:
- *val = thresh;
+ *val = thresh * MILLI;
break;
case IIO_EV_INFO_HYSTERESIS:
ret = hdc3020_read_be16(data, reg_clr);
@@ -638,18 +651,18 @@ static int hdc3020_read_thresh(struct iio_dev *indio_dev,
return ret;
clr = hdc3020_thresh_get_temp(ret);
- *val = abs(thresh - clr);
+ *val = abs(thresh - clr) * MILLI;
break;
default:
return -EOPNOTSUPP;
}
- *val2 = 65535;
+ *val2 = HDC3020_THRESH_FRACTION;
return IIO_VAL_FRACTIONAL;
case IIO_HUMIDITYRELATIVE:
thresh = hdc3020_thresh_get_hum(ret);
switch (info) {
case IIO_EV_INFO_VALUE:
- *val = thresh;
+ *val = thresh * MILLI;
break;
case IIO_EV_INFO_HYSTERESIS:
ret = hdc3020_read_be16(data, reg_clr);
@@ -657,12 +670,12 @@ static int hdc3020_read_thresh(struct iio_dev *indio_dev,
return ret;
clr = hdc3020_thresh_get_hum(ret);
- *val = abs(thresh - clr);
+ *val = abs(thresh - clr) * MILLI;
break;
default:
return -EOPNOTSUPP;
}
- *val2 = 65535;
+ *val2 = HDC3020_THRESH_FRACTION;
return IIO_VAL_FRACTIONAL;
default:
return -EOPNOTSUPP;
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 15612f0f189b..7e0181c27bb6 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -109,6 +109,7 @@ config KMX61
be called kmx61.
source "drivers/iio/imu/inv_icm42600/Kconfig"
+source "drivers/iio/imu/inv_icm45600/Kconfig"
source "drivers/iio/imu/inv_mpu6050/Kconfig"
config SMI240
@@ -124,6 +125,7 @@ config SMI240
This driver can also be built as a module. If so, the module will be
called smi240.
+source "drivers/iio/imu/smi330/Kconfig"
source "drivers/iio/imu/st_lsm6dsx/Kconfig"
source "drivers/iio/imu/st_lsm9ds0/Kconfig"
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index e901aea498d3..13fb7846e9c9 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -25,11 +25,13 @@ obj-$(CONFIG_FXOS8700_I2C) += fxos8700_i2c.o
obj-$(CONFIG_FXOS8700_SPI) += fxos8700_spi.o
obj-y += inv_icm42600/
+obj-y += inv_icm45600/
obj-y += inv_mpu6050/
obj-$(CONFIG_KMX61) += kmx61.o
obj-$(CONFIG_SMI240) += smi240.o
+obj-y += smi330/
obj-y += st_lsm6dsx/
obj-y += st_lsm9ds0/
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 924395b7e3b4..ab39bea1e729 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -1930,7 +1930,6 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
return 0;
}
-
static int adis16475_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c
index 519f1c9d466d..2ad230788532 100644
--- a/drivers/iio/imu/bmi270/bmi270_core.c
+++ b/drivers/iio/imu/bmi270/bmi270_core.c
@@ -31,6 +31,8 @@
#define BMI270_INT_STATUS_0_REG 0x1c
#define BMI270_INT_STATUS_0_STEP_CNT_MSK BIT(1)
+#define BMI270_INT_STATUS_0_NOMOTION_MSK BIT(5)
+#define BMI270_INT_STATUS_0_MOTION_MSK BIT(6)
#define BMI270_INT_STATUS_1_REG 0x1d
#define BMI270_INT_STATUS_1_ACC_GYR_DRDY_MSK GENMASK(7, 6)
@@ -81,6 +83,8 @@
#define BMI270_INT1_MAP_FEAT_REG 0x56
#define BMI270_INT2_MAP_FEAT_REG 0x57
#define BMI270_INT_MAP_FEAT_STEP_CNT_WTRMRK_MSK BIT(1)
+#define BMI270_INT_MAP_FEAT_NOMOTION_MSK BIT(5)
+#define BMI270_INT_MAP_FEAT_ANYMOTION_MSK BIT(6)
#define BMI270_INT_MAP_DATA_REG 0x58
#define BMI270_INT_MAP_DATA_DRDY_INT1_MSK BIT(2)
@@ -106,6 +110,25 @@
#define BMI270_STEP_SC26_RST_CNT_MSK BIT(10)
#define BMI270_STEP_SC26_EN_CNT_MSK BIT(12)
+#define BMI270_FEAT_MOTION_DURATION_MSK GENMASK(12, 0)
+#define BMI270_FEAT_MOTION_X_EN_MSK BIT(13)
+#define BMI270_FEAT_MOTION_Y_EN_MSK BIT(14)
+#define BMI270_FEAT_MOTION_Z_EN_MSK BIT(15)
+#define BMI270_FEAT_MOTION_XYZ_EN_MSK GENMASK(15, 13)
+#define BMI270_FEAT_MOTION_THRESHOLD_MSK GENMASK(10, 0)
+#define BMI270_FEAT_MOTION_OUT_CONF_MSK GENMASK(14, 11)
+#define BMI270_FEAT_MOTION_ENABLE_MSK BIT(15)
+
+#define BMI270_MOTION_XYZ_MSK GENMASK(2, 0)
+
+/* See pages 92 and 93 of the datasheet */
+#define BMI270_MOTION_THRES_FULL_SCALE GENMASK(10, 0)
+#define BMI270_MOTION_DURAT_SCALE 50
+#define BMI270_MOTION_DURAT_MAX 162
+
+/* 9.81 * 1000000 m/s^2 */
+#define BMI270_G_MICRO_M_S_2 9810000
+
/* See datasheet section 4.6.14, Temperature Sensor */
#define BMI270_TEMP_OFFSET 11776
#define BMI270_TEMP_SCALE 1953125
@@ -114,6 +137,11 @@
#define BMI270_STEP_COUNTER_FACTOR 20
#define BMI270_STEP_COUNTER_MAX 20460
+#define BMI270_INT_MICRO_TO_RAW(val, val2, scale) \
+ ((val) * (scale) + ((val2) * (scale)) / MEGA)
+#define BMI270_RAW_TO_MICRO(raw, scale) \
+ ((((raw) % (scale)) * MEGA) / scale)
+
#define BMI260_INIT_DATA_FILE "bmi260-init-data.fw"
#define BMI270_INIT_DATA_FILE "bmi270-init-data.fw"
@@ -309,6 +337,13 @@ static const struct bmi270_odr_item bmi270_odr_table[] = {
};
enum bmi270_feature_reg_id {
+ /* Page 1 registers */
+ BMI270_ANYMO1_REG,
+ BMI270_ANYMO2_REG,
+ /* Page 2 registers */
+ BMI270_NOMO1_REG,
+ BMI270_NOMO2_REG,
+ /* Page 6 registers */
BMI270_SC_26_REG,
};
@@ -318,6 +353,22 @@ struct bmi270_feature_reg {
};
static const struct bmi270_feature_reg bmi270_feature_regs[] = {
+ [BMI270_ANYMO1_REG] = {
+ .page = 1,
+ .addr = 0x3c,
+ },
+ [BMI270_ANYMO2_REG] = {
+ .page = 1,
+ .addr = 0x3e,
+ },
+ [BMI270_NOMO1_REG] = {
+ .page = 2,
+ .addr = 0x30,
+ },
+ [BMI270_NOMO2_REG] = {
+ .page = 2,
+ .addr = 0x32,
+ },
[BMI270_SC_26_REG] = {
.page = 6,
.addr = 0x32,
@@ -439,6 +490,121 @@ static int bmi270_step_wtrmrk_en(struct bmi270_data *data, bool state)
state));
}
+static int bmi270_motion_reg(enum iio_event_type type, enum iio_event_info info)
+{
+ switch (info) {
+ case IIO_EV_INFO_PERIOD:
+ switch (type) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return BMI270_ANYMO1_REG;
+ case IIO_EV_TYPE_ROC:
+ return BMI270_NOMO1_REG;
+ default:
+ return -EINVAL;
+ }
+ case IIO_EV_INFO_VALUE:
+ switch (type) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return BMI270_ANYMO2_REG;
+ case IIO_EV_TYPE_ROC:
+ return BMI270_NOMO2_REG;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bmi270_anymotion_event_en(struct bmi270_data *data,
+ struct iio_chan_spec const *chan,
+ bool state)
+{
+ u16 axis_msk, axis_field_val, regval;
+ int ret, irq_reg;
+ bool axis_en;
+
+ irq_reg = bmi270_int_map_reg(data->irq_pin);
+ if (irq_reg < 0)
+ return irq_reg;
+
+ guard(mutex)(&data->mutex);
+
+ ret = bmi270_read_feature_reg(data, BMI270_ANYMO1_REG, &regval);
+ if (ret)
+ return ret;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ axis_msk = BMI270_FEAT_MOTION_X_EN_MSK;
+ axis_field_val = FIELD_PREP(BMI270_FEAT_MOTION_X_EN_MSK, state);
+ axis_en = FIELD_GET(BMI270_FEAT_MOTION_Y_EN_MSK, regval) |
+ FIELD_GET(BMI270_FEAT_MOTION_Z_EN_MSK, regval);
+ break;
+ case IIO_MOD_Y:
+ axis_msk = BMI270_FEAT_MOTION_Y_EN_MSK;
+ axis_field_val = FIELD_PREP(BMI270_FEAT_MOTION_Y_EN_MSK, state);
+ axis_en = FIELD_GET(BMI270_FEAT_MOTION_X_EN_MSK, regval) |
+ FIELD_GET(BMI270_FEAT_MOTION_Z_EN_MSK, regval);
+ break;
+ case IIO_MOD_Z:
+ axis_msk = BMI270_FEAT_MOTION_Z_EN_MSK;
+ axis_field_val = FIELD_PREP(BMI270_FEAT_MOTION_Z_EN_MSK, state);
+ axis_en = FIELD_GET(BMI270_FEAT_MOTION_X_EN_MSK, regval) |
+ FIELD_GET(BMI270_FEAT_MOTION_Y_EN_MSK, regval);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = bmi270_update_feature_reg(data, BMI270_ANYMO1_REG, axis_msk,
+ axis_field_val);
+ if (ret)
+ return ret;
+
+ ret = bmi270_update_feature_reg(data, BMI270_ANYMO2_REG,
+ BMI270_FEAT_MOTION_ENABLE_MSK,
+ FIELD_PREP(BMI270_FEAT_MOTION_ENABLE_MSK,
+ state || axis_en));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, irq_reg,
+ BMI270_INT_MAP_FEAT_ANYMOTION_MSK,
+ FIELD_PREP(BMI270_INT_MAP_FEAT_ANYMOTION_MSK,
+ state || axis_en));
+}
+
+static int bmi270_nomotion_event_en(struct bmi270_data *data, bool state)
+{
+ int ret, irq_reg;
+
+ irq_reg = bmi270_int_map_reg(data->irq_pin);
+ if (irq_reg < 0)
+ return irq_reg;
+
+ guard(mutex)(&data->mutex);
+
+ ret = bmi270_update_feature_reg(data, BMI270_NOMO1_REG,
+ BMI270_FEAT_MOTION_XYZ_EN_MSK,
+ FIELD_PREP(BMI270_FEAT_MOTION_XYZ_EN_MSK,
+ state ? BMI270_MOTION_XYZ_MSK : 0));
+ if (ret)
+ return ret;
+
+ ret = bmi270_update_feature_reg(data, BMI270_NOMO2_REG,
+ BMI270_FEAT_MOTION_ENABLE_MSK,
+ FIELD_PREP(BMI270_FEAT_MOTION_ENABLE_MSK,
+ state));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, irq_reg,
+ BMI270_INT_MAP_FEAT_NOMOTION_MSK,
+ FIELD_PREP(BMI270_INT_MAP_FEAT_NOMOTION_MSK,
+ state));
+}
+
static int bmi270_set_scale(struct bmi270_data *data, int chan_type, int uscale)
{
int i;
@@ -479,8 +645,6 @@ static int bmi270_get_scale(struct bmi270_data *data, int chan_type, int *scale,
unsigned int val;
struct bmi270_scale_item bmi270_scale_item;
- guard(mutex)(&data->mutex);
-
switch (chan_type) {
case IIO_ACCEL:
ret = regmap_read(data->regmap, BMI270_ACC_CONF_RANGE_REG, &val);
@@ -614,6 +778,20 @@ static irqreturn_t bmi270_irq_thread_handler(int irq, void *private)
if (FIELD_GET(BMI270_INT_STATUS_1_ACC_GYR_DRDY_MSK, status1))
iio_trigger_poll_nested(data->trig);
+ if (FIELD_GET(BMI270_INT_STATUS_0_MOTION_MSK, status0))
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
+ if (FIELD_GET(BMI270_INT_STATUS_0_NOMOTION_MSK, status0))
+ iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_DIR_RISING),
+ timestamp);
+
if (FIELD_GET(BMI270_INT_STATUS_0_STEP_CNT_MSK, status0))
iio_push_event(indio_dev, IIO_UNMOD_EVENT_CODE(IIO_STEPS, 0,
IIO_EV_TYPE_CHANGE,
@@ -827,6 +1005,39 @@ static int bmi270_read_avail(struct iio_dev *indio_dev,
}
}
+static ssize_t in_accel_value_available_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bmi270_data *data = iio_priv(indio_dev);
+ int ret, scale, uscale;
+ unsigned int step, max;
+
+ ret = bmi270_get_scale(data, IIO_ACCEL, &scale, &uscale);
+ if (ret)
+ return ret;
+
+ max = BMI270_G_MICRO_M_S_2 / uscale;
+ step = max / BMI270_MOTION_THRES_FULL_SCALE;
+
+ return sysfs_emit(buf, "[0 %u %u]\n", step, max);
+}
+
+static IIO_DEVICE_ATTR_RO(in_accel_value_available, 0);
+
+static IIO_CONST_ATTR(in_accel_period_available, "[0.0 0.02 162.0]");
+
+static struct attribute *bmi270_event_attributes[] = {
+ &iio_dev_attr_in_accel_value_available.dev_attr.attr,
+ &iio_const_attr_in_accel_period_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group bmi270_event_attribute_group = {
+ .attrs = bmi270_event_attributes,
+};
+
static int bmi270_write_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
enum iio_event_type type,
@@ -835,6 +1046,10 @@ static int bmi270_write_event_config(struct iio_dev *indio_dev,
struct bmi270_data *data = iio_priv(indio_dev);
switch (type) {
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ return bmi270_anymotion_event_en(data, chan, state);
+ case IIO_EV_TYPE_ROC:
+ return bmi270_nomotion_event_en(data, state);
case IIO_EV_TYPE_CHANGE:
return bmi270_step_wtrmrk_en(data, state);
default:
@@ -848,21 +1063,55 @@ static int bmi270_read_event_config(struct iio_dev *indio_dev,
enum iio_event_direction dir)
{
struct bmi270_data *data = iio_priv(indio_dev);
+ bool feat_en, axis_en;
int ret, reg, regval;
+ u16 motion_reg;
guard(mutex)(&data->mutex);
+ reg = bmi270_int_map_reg(data->irq_pin);
+ if (reg < 0)
+ return reg;
+
+ ret = regmap_read(data->regmap, reg, &regval);
+ if (ret)
+ return ret;
+
switch (chan->type) {
case IIO_STEPS:
- reg = bmi270_int_map_reg(data->irq_pin);
- if (reg)
- return reg;
-
- ret = regmap_read(data->regmap, reg, &regval);
- if (ret)
- return ret;
- return FIELD_GET(BMI270_INT_MAP_FEAT_STEP_CNT_WTRMRK_MSK,
- regval) ? 1 : 0;
+ return !!FIELD_GET(BMI270_INT_MAP_FEAT_STEP_CNT_WTRMRK_MSK, regval);
+ case IIO_ACCEL:
+ switch (type) {
+ case IIO_EV_TYPE_ROC:
+ return !!FIELD_GET(BMI270_INT_MAP_FEAT_NOMOTION_MSK, regval);
+ case IIO_EV_TYPE_MAG_ADAPTIVE:
+ ret = bmi270_read_feature_reg(data, BMI270_ANYMO1_REG,
+ &motion_reg);
+ if (ret)
+ return ret;
+
+ feat_en = FIELD_GET(BMI270_INT_MAP_FEAT_ANYMOTION_MSK,
+ regval);
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ axis_en = FIELD_GET(BMI270_FEAT_MOTION_X_EN_MSK,
+ motion_reg);
+ break;
+ case IIO_MOD_Y:
+ axis_en = FIELD_GET(BMI270_FEAT_MOTION_Y_EN_MSK,
+ motion_reg);
+ break;
+ case IIO_MOD_Z:
+ axis_en = FIELD_GET(BMI270_FEAT_MOTION_Z_EN_MSK,
+ motion_reg);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return axis_en && feat_en;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -876,20 +1125,50 @@ static int bmi270_write_event_value(struct iio_dev *indio_dev,
int val, int val2)
{
struct bmi270_data *data = iio_priv(indio_dev);
- unsigned int raw;
+ unsigned int raw, mask, regval;
+ int ret, reg, scale, uscale;
+ u64 tmp;
guard(mutex)(&data->mutex);
- switch (type) {
- case IIO_EV_TYPE_CHANGE:
+ if (type == IIO_EV_TYPE_CHANGE) {
if (!in_range(val, 0, BMI270_STEP_COUNTER_MAX + 1))
return -EINVAL;
raw = val / BMI270_STEP_COUNTER_FACTOR;
- return bmi270_update_feature_reg(data, BMI270_SC_26_REG,
- BMI270_STEP_SC26_WTRMRK_MSK,
- FIELD_PREP(BMI270_STEP_SC26_WTRMRK_MSK,
- raw));
+ mask = BMI270_STEP_SC26_WTRMRK_MSK;
+ regval = FIELD_PREP(BMI270_STEP_SC26_WTRMRK_MSK, raw);
+ return bmi270_update_feature_reg(data, BMI270_SC_26_REG, mask,
+ regval);
+ }
+
+ reg = bmi270_motion_reg(type, info);
+ if (reg < 0)
+ return reg;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = bmi270_get_scale(data, IIO_ACCEL, &scale, &uscale);
+ if (ret)
+ return ret;
+
+ if (!in_range(val, 0, (BMI270_G_MICRO_M_S_2 / uscale) + 1))
+ return -EINVAL;
+
+ tmp = (u64)val * BMI270_MOTION_THRES_FULL_SCALE * uscale;
+ raw = DIV_ROUND_CLOSEST_ULL(tmp, BMI270_G_MICRO_M_S_2);
+ mask = BMI270_FEAT_MOTION_THRESHOLD_MSK;
+ regval = FIELD_PREP(BMI270_FEAT_MOTION_THRESHOLD_MSK, raw);
+ return bmi270_update_feature_reg(data, reg, mask, regval);
+ case IIO_EV_INFO_PERIOD:
+ if (!in_range(val, 0, BMI270_MOTION_DURAT_MAX + 1))
+ return -EINVAL;
+
+ raw = BMI270_INT_MICRO_TO_RAW(val, val2,
+ BMI270_MOTION_DURAT_SCALE);
+ mask = BMI270_FEAT_MOTION_DURATION_MSK;
+ regval = FIELD_PREP(BMI270_FEAT_MOTION_DURATION_MSK, raw);
+ return bmi270_update_feature_reg(data, reg, mask, regval);
default:
return -EINVAL;
}
@@ -903,14 +1182,14 @@ static int bmi270_read_event_value(struct iio_dev *indio_dev,
int *val, int *val2)
{
struct bmi270_data *data = iio_priv(indio_dev);
+ int ret, reg, scale, uscale;
unsigned int raw;
u16 regval;
- int ret;
+ u64 tmp;
guard(mutex)(&data->mutex);
- switch (type) {
- case IIO_EV_TYPE_CHANGE:
+ if (type == IIO_EV_TYPE_CHANGE) {
ret = bmi270_read_feature_reg(data, BMI270_SC_26_REG, &regval);
if (ret)
return ret;
@@ -918,6 +1197,36 @@ static int bmi270_read_event_value(struct iio_dev *indio_dev,
raw = FIELD_GET(BMI270_STEP_SC26_WTRMRK_MSK, regval);
*val = raw * BMI270_STEP_COUNTER_FACTOR;
return IIO_VAL_INT;
+ }
+
+ reg = bmi270_motion_reg(type, info);
+ if (reg < 0)
+ return reg;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ ret = bmi270_read_feature_reg(data, reg, &regval);
+ if (ret)
+ return ret;
+
+ ret = bmi270_get_scale(data, IIO_ACCEL, &scale, &uscale);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI270_FEAT_MOTION_THRESHOLD_MSK, regval);
+ tmp = (u64)raw * BMI270_G_MICRO_M_S_2;
+ *val = DIV_ROUND_CLOSEST_ULL(tmp,
+ BMI270_MOTION_THRES_FULL_SCALE * uscale);
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_PERIOD:
+ ret = bmi270_read_feature_reg(data, reg, &regval);
+ if (ret)
+ return ret;
+
+ raw = FIELD_GET(BMI270_FEAT_MOTION_DURATION_MSK, regval);
+ *val = raw / BMI270_MOTION_DURAT_SCALE;
+ *val2 = BMI270_RAW_TO_MICRO(raw, BMI270_MOTION_DURAT_SCALE);
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -929,6 +1238,20 @@ static const struct iio_event_spec bmi270_step_wtrmrk_event = {
.mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
};
+static const struct iio_event_spec bmi270_anymotion_event = {
+ .type = IIO_EV_TYPE_MAG_ADAPTIVE,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_PERIOD),
+};
+
+static const struct iio_event_spec bmi270_nomotion_event = {
+ .type = IIO_EV_TYPE_ROC,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_PERIOD),
+};
+
static const struct iio_info bmi270_info = {
.read_raw = bmi270_read_raw,
.write_raw = bmi270_write_raw,
@@ -937,6 +1260,7 @@ static const struct iio_info bmi270_info = {
.read_event_config = bmi270_read_event_config,
.write_event_value = bmi270_write_event_value,
.read_event_value = bmi270_read_event_value,
+ .event_attrs = &bmi270_event_attribute_group,
};
#define BMI270_ACCEL_CHANNEL(_axis) { \
@@ -956,6 +1280,8 @@ static const struct iio_info bmi270_info = {
.storagebits = 16, \
.endianness = IIO_LE, \
}, \
+ .event_spec = &bmi270_anymotion_event, \
+ .num_event_specs = 1, \
}
#define BMI270_ANG_VEL_CHANNEL(_axis) { \
@@ -1000,6 +1326,14 @@ static const struct iio_chan_spec bmi270_channels[] = {
.num_event_specs = 1,
},
IIO_CHAN_SOFT_TIMESTAMP(BMI270_SCAN_TIMESTAMP),
+ {
+ .type = IIO_ACCEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_X_AND_Y_AND_Z,
+ .scan_index = -1, /* Fake channel */
+ .event_spec = &bmi270_nomotion_event,
+ .num_event_specs = 1,
+ },
};
static int bmi270_int_pin_config(struct bmi270_data *data,
@@ -1107,6 +1441,13 @@ static int bmi270_trigger_probe(struct bmi270_data *data,
return dev_err_probe(data->dev, ret,
"Trigger registration failed\n");
+ /* Disable axes for motion events */
+ ret = bmi270_update_feature_reg(data, BMI270_ANYMO1_REG,
+ BMI270_FEAT_MOTION_XYZ_EN_MSK,
+ FIELD_PREP(BMI270_FEAT_MOTION_XYZ_EN_MSK, 0));
+ if (ret)
+ return ret;
+
data->irq_pin = irq_pin;
return 0;
diff --git a/drivers/iio/imu/bmi270/bmi270_i2c.c b/drivers/iio/imu/bmi270/bmi270_i2c.c
index c77839b03a96..b909a421ad01 100644
--- a/drivers/iio/imu/bmi270/bmi270_i2c.c
+++ b/drivers/iio/imu/bmi270/bmi270_i2c.c
@@ -41,6 +41,8 @@ static const struct i2c_device_id bmi270_i2c_id[] = {
static const struct acpi_device_id bmi270_acpi_match[] = {
/* GPD Win Mini, Aya Neo AIR Pro, OXP Mini Pro, etc. */
{ "BMI0160", (kernel_ulong_t)&bmi260_chip_info },
+ /* GPD Win Max 2 2023(sincice BIOS v0.40), etc. */
+ { "BMI0260", (kernel_ulong_t)&bmi260_chip_info },
{ }
};
diff --git a/drivers/iio/imu/bmi270/bmi270_spi.c b/drivers/iio/imu/bmi270/bmi270_spi.c
index 19dd7734f9d0..80c9fa1d685a 100644
--- a/drivers/iio/imu/bmi270/bmi270_spi.c
+++ b/drivers/iio/imu/bmi270/bmi270_spi.c
@@ -60,7 +60,7 @@ static int bmi270_spi_probe(struct spi_device *spi)
&bmi270_spi_regmap_config);
if (IS_ERR(regmap))
return dev_err_probe(dev, PTR_ERR(regmap),
- "Failed to init i2c regmap");
+ "Failed to init spi regmap\n");
return bmi270_core_probe(dev, regmap, chip_info);
}
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index fc54d464a3ae..6bcb9a436581 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -2112,8 +2112,7 @@ int bmi323_core_probe(struct device *dev)
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "Failed to allocate device\n");
+ return -ENOMEM;
ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
regulator_names);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
index 1430ab4f1dea..c8b48a5c5ed0 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h
@@ -167,7 +167,6 @@ struct inv_icm42600_state {
enum inv_icm42600_chip chip;
const char *name;
struct regmap *map;
- struct regulator *vdd_supply;
struct regulator *vddio_supply;
int irq;
struct iio_mount_matrix orientation;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 7a28051330b7..54760d8f92a2 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -315,7 +315,6 @@ static int inv_icm42600_accel_read_sensor(struct iio_dev *indio_dev,
ret = -EINVAL;
exit:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -562,12 +561,10 @@ static int inv_icm42600_accel_write_scale(struct iio_dev *indio_dev,
conf.fs = idx / 2;
pm_runtime_get_sync(dev);
- mutex_lock(&st->lock);
- ret = inv_icm42600_set_accel_conf(st, &conf, NULL);
+ scoped_guard(mutex, &st->lock)
+ ret = inv_icm42600_set_accel_conf(st, &conf, NULL);
- mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -675,7 +672,6 @@ static int inv_icm42600_accel_write_odr(struct iio_dev *indio_dev,
out_unlock:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -727,7 +723,6 @@ static int inv_icm42600_accel_read_offset(struct inv_icm42600_state *st,
memcpy(data, st->buffer, sizeof(data));
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (ret)
return ret;
@@ -865,7 +860,6 @@ static int inv_icm42600_accel_write_offset(struct inv_icm42600_state *st,
out_unlock:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -991,16 +985,11 @@ static int inv_icm42600_accel_hwfifo_set_watermark(struct iio_dev *indio_dev,
unsigned int val)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
st->fifo.watermark.accel = val;
- ret = inv_icm42600_buffer_update_watermark(st);
-
- mutex_unlock(&st->lock);
-
- return ret;
+ return inv_icm42600_buffer_update_watermark(st);
}
static int inv_icm42600_accel_hwfifo_flush(struct iio_dev *indio_dev,
@@ -1012,15 +1001,13 @@ static int inv_icm42600_accel_hwfifo_flush(struct iio_dev *indio_dev,
if (count == 0)
return 0;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = inv_icm42600_buffer_hwfifo_flush(st, count);
- if (!ret)
- ret = st->fifo.nb.accel;
-
- mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
- return ret;
+ return st->fifo.nb.accel;
}
static int inv_icm42600_accel_read_event_config(struct iio_dev *indio_dev,
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index 7c4ed981db04..ada968be954d 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -5,6 +5,7 @@
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/minmax.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
@@ -100,7 +101,7 @@ ssize_t inv_icm42600_fifo_decode_packet(const void *packet, const void **accel,
void inv_icm42600_buffer_update_fifo_period(struct inv_icm42600_state *st)
{
- u32 period_gyro, period_accel, period;
+ u32 period_gyro, period_accel;
if (st->fifo.en & INV_ICM42600_SENSOR_GYRO)
period_gyro = inv_icm42600_odr_to_period(st->conf.gyro.odr);
@@ -112,12 +113,7 @@ void inv_icm42600_buffer_update_fifo_period(struct inv_icm42600_state *st)
else
period_accel = U32_MAX;
- if (period_gyro <= period_accel)
- period = period_gyro;
- else
- period = period_accel;
-
- st->fifo.period = period;
+ st->fifo.period = min(period_gyro, period_accel);
}
int inv_icm42600_buffer_set_fifo_en(struct inv_icm42600_state *st,
@@ -204,7 +200,7 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
{
size_t packet_size, wm_size;
unsigned int wm_gyro, wm_accel, watermark;
- u32 period_gyro, period_accel, period;
+ u32 period_gyro, period_accel;
u32 latency_gyro, latency_accel, latency;
bool restore;
__le16 raw_wm;
@@ -237,13 +233,8 @@ int inv_icm42600_buffer_update_watermark(struct inv_icm42600_state *st)
latency = latency_gyro - (latency_accel % latency_gyro);
else
latency = latency_accel - (latency_gyro % latency_accel);
- /* use the shortest period */
- if (period_gyro <= period_accel)
- period = period_gyro;
- else
- period = period_accel;
/* all this works because periods are multiple of each others */
- watermark = latency / period;
+ watermark = latency / min(period_gyro, period_accel);
if (watermark < 1)
watermark = 1;
/* update effective watermark */
@@ -292,9 +283,8 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev)
pm_runtime_get_sync(dev);
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
inv_sensors_timestamp_reset(ts);
- mutex_unlock(&st->lock);
return 0;
}
@@ -308,43 +298,39 @@ static int inv_icm42600_buffer_postenable(struct iio_dev *indio_dev)
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
- /* exit if FIFO is already on */
if (st->fifo.on) {
- ret = 0;
- goto out_on;
+ st->fifo.on++;
+ return 0;
}
/* set FIFO threshold interrupt */
ret = regmap_set_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
if (ret)
- goto out_unlock;
+ return ret;
/* flush FIFO data */
ret = regmap_write(st->map, INV_ICM42600_REG_SIGNAL_PATH_RESET,
INV_ICM42600_SIGNAL_PATH_RESET_FIFO_FLUSH);
if (ret)
- goto out_unlock;
+ return ret;
/* set FIFO in streaming mode */
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_STREAM);
if (ret)
- goto out_unlock;
+ return ret;
/* workaround: first read of FIFO count after reset is always 0 */
ret = regmap_bulk_read(st->map, INV_ICM42600_REG_FIFO_COUNT, st->buffer, 2);
if (ret)
- goto out_unlock;
+ return ret;
-out_on:
- /* increase FIFO on counter */
st->fifo.on++;
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+
+ return 0;
}
static int inv_icm42600_buffer_predisable(struct iio_dev *indio_dev)
@@ -352,38 +338,34 @@ static int inv_icm42600_buffer_predisable(struct iio_dev *indio_dev)
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
- /* exit if there are several sensors using the FIFO */
if (st->fifo.on > 1) {
- ret = 0;
- goto out_off;
+ st->fifo.on--;
+ return 0;
}
/* set FIFO in bypass mode */
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_BYPASS);
if (ret)
- goto out_unlock;
+ return ret;
/* flush FIFO data */
ret = regmap_write(st->map, INV_ICM42600_REG_SIGNAL_PATH_RESET,
INV_ICM42600_SIGNAL_PATH_RESET_FIFO_FLUSH);
if (ret)
- goto out_unlock;
+ return ret;
/* disable FIFO threshold interrupt */
ret = regmap_clear_bits(st->map, INV_ICM42600_REG_INT_SOURCE0,
INV_ICM42600_INT_SOURCE0_FIFO_THS_INT1_EN);
if (ret)
- goto out_unlock;
+ return ret;
-out_off:
- /* decrease FIFO on counter */
st->fifo.on--;
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+
+ return 0;
}
static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
@@ -439,7 +421,6 @@ out_unlock:
if (sleep)
msleep(sleep);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
index a4d42e7e2180..76eb22488e5f 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c
@@ -439,18 +439,13 @@ int inv_icm42600_debugfs_reg(struct iio_dev *indio_dev, unsigned int reg,
unsigned int writeval, unsigned int *readval)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (readval)
- ret = regmap_read(st->map, reg, readval);
- else
- ret = regmap_write(st->map, reg, writeval);
+ return regmap_read(st->map, reg, readval);
- mutex_unlock(&st->lock);
-
- return ret;
+ return regmap_write(st->map, reg, writeval);
}
static int inv_icm42600_set_conf(struct inv_icm42600_state *st,
@@ -697,34 +692,15 @@ static int inv_icm42600_enable_regulator_vddio(struct inv_icm42600_state *st)
return 0;
}
-static void inv_icm42600_disable_vdd_reg(void *_data)
-{
- struct inv_icm42600_state *st = _data;
- const struct device *dev = regmap_get_device(st->map);
- int ret;
-
- ret = regulator_disable(st->vdd_supply);
- if (ret)
- dev_err(dev, "failed to disable vdd error %d\n", ret);
-}
-
static void inv_icm42600_disable_vddio_reg(void *_data)
{
struct inv_icm42600_state *st = _data;
- const struct device *dev = regmap_get_device(st->map);
- int ret;
-
- ret = regulator_disable(st->vddio_supply);
- if (ret)
- dev_err(dev, "failed to disable vddio error %d\n", ret);
-}
+ struct device *dev = regmap_get_device(st->map);
-static void inv_icm42600_disable_pm(void *_data)
-{
- struct device *dev = _data;
+ if (pm_runtime_status_suspended(dev))
+ return;
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
+ regulator_disable(st->vddio_supply);
}
int inv_icm42600_core_probe(struct regmap *regmap, int chip,
@@ -773,23 +749,17 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip,
return ret;
}
- st->vdd_supply = devm_regulator_get(dev, "vdd");
- if (IS_ERR(st->vdd_supply))
- return PTR_ERR(st->vdd_supply);
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to get vdd regulator\n");
+
+ msleep(INV_ICM42600_POWER_UP_TIME_MS);
st->vddio_supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(st->vddio_supply))
return PTR_ERR(st->vddio_supply);
- ret = regulator_enable(st->vdd_supply);
- if (ret)
- return ret;
- msleep(INV_ICM42600_POWER_UP_TIME_MS);
-
- ret = devm_add_action_or_reset(dev, inv_icm42600_disable_vdd_reg, st);
- if (ret)
- return ret;
-
ret = inv_icm42600_enable_regulator_vddio(st);
if (ret)
return ret;
@@ -824,16 +794,14 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip,
return ret;
/* setup runtime power management */
- ret = pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_set_active_enabled(dev);
if (ret)
return ret;
- pm_runtime_get_noresume(dev);
- pm_runtime_enable(dev);
+
pm_runtime_set_autosuspend_delay(dev, INV_ICM42600_SUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(dev);
- pm_runtime_put(dev);
- return devm_add_action_or_reset(dev, inv_icm42600_disable_pm, dev);
+ return ret;
}
EXPORT_SYMBOL_NS_GPL(inv_icm42600_core_probe, "IIO_ICM42600");
@@ -849,22 +817,20 @@ static int inv_icm42600_suspend(struct device *dev)
int accel_conf;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
st->suspended.gyro = st->conf.gyro.mode;
st->suspended.accel = st->conf.accel.mode;
st->suspended.temp = st->conf.temp_en;
- if (pm_runtime_suspended(dev)) {
- ret = 0;
- goto out_unlock;
- }
+ if (pm_runtime_suspended(dev))
+ return 0;
/* disable FIFO data streaming */
if (st->fifo.on) {
ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG,
INV_ICM42600_FIFO_CONFIG_BYPASS);
if (ret)
- goto out_unlock;
+ return ret;
}
/* keep chip on and wake-up capable if APEX and wakeup on */
@@ -880,7 +846,7 @@ static int inv_icm42600_suspend(struct device *dev)
if (st->apex.wom.enable) {
ret = inv_icm42600_disable_wom(st);
if (ret)
- goto out_unlock;
+ return ret;
}
accel_conf = INV_ICM42600_SENSOR_MODE_OFF;
}
@@ -888,15 +854,13 @@ static int inv_icm42600_suspend(struct device *dev)
ret = inv_icm42600_set_pwr_mgmt0(st, INV_ICM42600_SENSOR_MODE_OFF,
accel_conf, false, NULL);
if (ret)
- goto out_unlock;
+ return ret;
/* disable vddio regulator if chip is sleeping */
if (!wakeup)
regulator_disable(st->vddio_supply);
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return 0;
}
/*
@@ -912,7 +876,10 @@ static int inv_icm42600_resume(struct device *dev)
bool wakeup;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
/* check wakeup capability */
accel_dev = &st->indio_accel->dev;
@@ -924,25 +891,21 @@ static int inv_icm42600_resume(struct device *dev)
} else {
ret = inv_icm42600_enable_regulator_vddio(st);
if (ret)
- goto out_unlock;
+ return ret;
}
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
/* restore sensors state */
ret = inv_icm42600_set_pwr_mgmt0(st, st->suspended.gyro,
st->suspended.accel,
st->suspended.temp, NULL);
if (ret)
- goto out_unlock;
+ return ret;
/* restore APEX features if disabled */
if (!wakeup && st->apex.wom.enable) {
ret = inv_icm42600_enable_wom(st);
if (ret)
- goto out_unlock;
+ return ret;
}
/* restore FIFO data streaming */
@@ -953,9 +916,7 @@ static int inv_icm42600_resume(struct device *dev)
INV_ICM42600_FIFO_CONFIG_STREAM);
}
-out_unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return 0;
}
/* Runtime suspend will turn off sensors that are enabled by iio devices. */
@@ -964,34 +925,28 @@ static int inv_icm42600_runtime_suspend(struct device *dev)
struct inv_icm42600_state *st = dev_get_drvdata(dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
/* disable all sensors */
ret = inv_icm42600_set_pwr_mgmt0(st, INV_ICM42600_SENSOR_MODE_OFF,
INV_ICM42600_SENSOR_MODE_OFF, false,
NULL);
if (ret)
- goto error_unlock;
+ return ret;
regulator_disable(st->vddio_supply);
-error_unlock:
- mutex_unlock(&st->lock);
- return ret;
+ return 0;
}
/* Sensors are enabled by iio devices, no need to turn them back on here. */
static int inv_icm42600_runtime_resume(struct device *dev)
{
struct inv_icm42600_state *st = dev_get_drvdata(dev);
- int ret;
-
- mutex_lock(&st->lock);
- ret = inv_icm42600_enable_regulator_vddio(st);
+ guard(mutex)(&st->lock);
- mutex_unlock(&st->lock);
- return ret;
+ return inv_icm42600_enable_regulator_vddio(st);
}
EXPORT_NS_GPL_DEV_PM_OPS(inv_icm42600_pm_ops, IIO_ICM42600) = {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index 9ba6f13628e6..7ef0a25ec74f 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -184,7 +184,6 @@ static int inv_icm42600_gyro_read_sensor(struct inv_icm42600_state *st,
ret = -EINVAL;
exit:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -278,12 +277,10 @@ static int inv_icm42600_gyro_write_scale(struct iio_dev *indio_dev,
conf.fs = idx / 2;
pm_runtime_get_sync(dev);
- mutex_lock(&st->lock);
- ret = inv_icm42600_set_gyro_conf(st, &conf, NULL);
+ scoped_guard(mutex, &st->lock)
+ ret = inv_icm42600_set_gyro_conf(st, &conf, NULL);
- mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -378,7 +375,6 @@ static int inv_icm42600_gyro_write_odr(struct iio_dev *indio_dev,
out_unlock:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
@@ -430,7 +426,6 @@ static int inv_icm42600_gyro_read_offset(struct inv_icm42600_state *st,
memcpy(data, st->buffer, sizeof(data));
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
if (ret)
return ret;
@@ -567,7 +562,6 @@ static int inv_icm42600_gyro_write_offset(struct inv_icm42600_state *st,
out_unlock:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
}
@@ -693,16 +687,11 @@ static int inv_icm42600_gyro_hwfifo_set_watermark(struct iio_dev *indio_dev,
unsigned int val)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
- int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
st->fifo.watermark.gyro = val;
- ret = inv_icm42600_buffer_update_watermark(st);
-
- mutex_unlock(&st->lock);
-
- return ret;
+ return inv_icm42600_buffer_update_watermark(st);
}
static int inv_icm42600_gyro_hwfifo_flush(struct iio_dev *indio_dev,
@@ -714,15 +703,13 @@ static int inv_icm42600_gyro_hwfifo_flush(struct iio_dev *indio_dev,
if (count == 0)
return 0;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = inv_icm42600_buffer_hwfifo_flush(st, count);
- if (!ret)
- ret = st->fifo.nb.gyro;
-
- mutex_unlock(&st->lock);
+ if (ret)
+ return ret;
- return ret;
+ return st->fifo.nb.gyro;
}
static const struct iio_info inv_icm42600_gyro_info = {
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
index 271a4788604a..30f6a9595eea 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_temp.c
@@ -41,7 +41,6 @@ static int inv_icm42600_temp_read(struct inv_icm42600_state *st, s16 *temp)
exit:
mutex_unlock(&st->lock);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return ret;
diff --git a/drivers/iio/imu/inv_icm45600/Kconfig b/drivers/iio/imu/inv_icm45600/Kconfig
new file mode 100644
index 000000000000..dc133402f6d7
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/Kconfig
@@ -0,0 +1,70 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+config INV_ICM45600
+ tristate
+ select IIO_BUFFER
+ select IIO_KFIFO_BUF
+ select IIO_INV_SENSORS_TIMESTAMP
+
+config INV_ICM45600_I2C
+ tristate "InvenSense ICM-456xx I2C driver"
+ depends on I2C
+ select INV_ICM45600
+ select REGMAP_I2C
+ help
+ This driver supports the InvenSense ICM-456xx motion tracking
+ devices over I2C.
+ Supported devices:
+ - ICM-45605
+ - ICM-45606
+ - ICM-45608
+ - ICM-45634
+ - ICM-45686
+ - ICM-45687
+ - ICM-45688-P
+ - ICM-45689
+
+ This driver can be built as a module. The module will be called
+ inv-icm45600-i2c.
+
+config INV_ICM45600_SPI
+ tristate "InvenSense ICM-456xx SPI driver"
+ depends on SPI_MASTER
+ select INV_ICM45600
+ select REGMAP_SPI
+ help
+ This driver supports the InvenSense ICM-456xx motion tracking
+ devices over SPI.
+ Supported devices:
+ - ICM-45605
+ - ICM-45606
+ - ICM-45608
+ - ICM-45634
+ - ICM-45686
+ - ICM-45687
+ - ICM-45688-P
+ - ICM-45689
+
+ This driver can be built as a module. The module will be called
+ inv-icm45600-spi.
+
+config INV_ICM45600_I3C
+ tristate "InvenSense ICM-456xx I3C driver"
+ depends on I3C
+ select INV_ICM45600
+ select REGMAP_I3C
+ help
+ This driver supports the InvenSense ICM-456xx motion tracking
+ devices over I3C.
+ Supported devices:
+ - ICM-45605
+ - ICM-45606
+ - ICM-45608
+ - ICM-45634
+ - ICM-45686
+ - ICM-45687
+ - ICM-45688-P
+ - ICM-45689
+
+ This driver can be built as a module. The module will be called
+ inv-icm45600-i3c.
diff --git a/drivers/iio/imu/inv_icm45600/Makefile b/drivers/iio/imu/inv_icm45600/Makefile
new file mode 100644
index 000000000000..c98b8365b467
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+obj-$(CONFIG_INV_ICM45600) += inv-icm45600.o
+inv-icm45600-y += inv_icm45600_core.o
+inv-icm45600-y += inv_icm45600_buffer.o
+inv-icm45600-y += inv_icm45600_gyro.o
+inv-icm45600-y += inv_icm45600_accel.o
+
+obj-$(CONFIG_INV_ICM45600_I2C) += inv-icm45600-i2c.o
+inv-icm45600-i2c-y += inv_icm45600_i2c.o
+
+obj-$(CONFIG_INV_ICM45600_SPI) += inv-icm45600-spi.o
+inv-icm45600-spi-y += inv_icm45600_spi.o
+
+obj-$(CONFIG_INV_ICM45600_I3C) += inv-icm45600-i3c.o
+inv-icm45600-i3c-y += inv_icm45600_i3c.o
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600.h b/drivers/iio/imu/inv_icm45600/inv_icm45600.h
new file mode 100644
index 000000000000..c5b5446f6c3b
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2025 Invensense, Inc. */
+
+#ifndef INV_ICM45600_H_
+#define INV_ICM45600_H_
+
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
+
+#include "inv_icm45600_buffer.h"
+
+#define INV_ICM45600_REG_BANK_MASK GENMASK(15, 8)
+#define INV_ICM45600_REG_ADDR_MASK GENMASK(7, 0)
+
+enum inv_icm45600_sensor_mode {
+ INV_ICM45600_SENSOR_MODE_OFF,
+ INV_ICM45600_SENSOR_MODE_STANDBY,
+ INV_ICM45600_SENSOR_MODE_LOW_POWER,
+ INV_ICM45600_SENSOR_MODE_LOW_NOISE,
+ INV_ICM45600_SENSOR_MODE_MAX
+};
+
+/* gyroscope fullscale values */
+enum inv_icm45600_gyro_fs {
+ INV_ICM45600_GYRO_FS_2000DPS,
+ INV_ICM45600_GYRO_FS_1000DPS,
+ INV_ICM45600_GYRO_FS_500DPS,
+ INV_ICM45600_GYRO_FS_250DPS,
+ INV_ICM45600_GYRO_FS_125DPS,
+ INV_ICM45600_GYRO_FS_62_5DPS,
+ INV_ICM45600_GYRO_FS_31_25DPS,
+ INV_ICM45600_GYRO_FS_15_625DPS,
+ INV_ICM45600_GYRO_FS_MAX
+};
+
+enum inv_icm45686_gyro_fs {
+ INV_ICM45686_GYRO_FS_4000DPS,
+ INV_ICM45686_GYRO_FS_2000DPS,
+ INV_ICM45686_GYRO_FS_1000DPS,
+ INV_ICM45686_GYRO_FS_500DPS,
+ INV_ICM45686_GYRO_FS_250DPS,
+ INV_ICM45686_GYRO_FS_125DPS,
+ INV_ICM45686_GYRO_FS_62_5DPS,
+ INV_ICM45686_GYRO_FS_31_25DPS,
+ INV_ICM45686_GYRO_FS_15_625DPS,
+ INV_ICM45686_GYRO_FS_MAX
+};
+
+/* accelerometer fullscale values */
+enum inv_icm45600_accel_fs {
+ INV_ICM45600_ACCEL_FS_16G,
+ INV_ICM45600_ACCEL_FS_8G,
+ INV_ICM45600_ACCEL_FS_4G,
+ INV_ICM45600_ACCEL_FS_2G,
+ INV_ICM45600_ACCEL_FS_MAX
+};
+
+enum inv_icm45686_accel_fs {
+ INV_ICM45686_ACCEL_FS_32G,
+ INV_ICM45686_ACCEL_FS_16G,
+ INV_ICM45686_ACCEL_FS_8G,
+ INV_ICM45686_ACCEL_FS_4G,
+ INV_ICM45686_ACCEL_FS_2G,
+ INV_ICM45686_ACCEL_FS_MAX
+};
+
+/* ODR suffixed by LN or LP are Low-Noise or Low-Power mode only */
+enum inv_icm45600_odr {
+ INV_ICM45600_ODR_6400HZ_LN = 0x03,
+ INV_ICM45600_ODR_3200HZ_LN,
+ INV_ICM45600_ODR_1600HZ_LN,
+ INV_ICM45600_ODR_800HZ_LN,
+ INV_ICM45600_ODR_400HZ,
+ INV_ICM45600_ODR_200HZ,
+ INV_ICM45600_ODR_100HZ,
+ INV_ICM45600_ODR_50HZ,
+ INV_ICM45600_ODR_25HZ,
+ INV_ICM45600_ODR_12_5HZ,
+ INV_ICM45600_ODR_6_25HZ_LP,
+ INV_ICM45600_ODR_3_125HZ_LP,
+ INV_ICM45600_ODR_1_5625HZ_LP,
+ INV_ICM45600_ODR_MAX
+};
+
+struct inv_icm45600_sensor_conf {
+ u8 mode;
+ u8 fs;
+ u8 odr;
+ u8 filter;
+};
+
+#define INV_ICM45600_SENSOR_CONF_KEEP_VALUES { U8_MAX, U8_MAX, U8_MAX, U8_MAX }
+
+struct inv_icm45600_conf {
+ struct inv_icm45600_sensor_conf gyro;
+ struct inv_icm45600_sensor_conf accel;
+};
+
+struct inv_icm45600_suspended {
+ enum inv_icm45600_sensor_mode gyro;
+ enum inv_icm45600_sensor_mode accel;
+};
+
+struct inv_icm45600_chip_info {
+ u8 whoami;
+ const char *name;
+ const struct inv_icm45600_conf *conf;
+ const int *accel_scales;
+ const int accel_scales_len;
+ const int *gyro_scales;
+ const int gyro_scales_len;
+};
+
+extern const struct inv_icm45600_chip_info inv_icm45605_chip_info;
+extern const struct inv_icm45600_chip_info inv_icm45606_chip_info;
+extern const struct inv_icm45600_chip_info inv_icm45608_chip_info;
+extern const struct inv_icm45600_chip_info inv_icm45634_chip_info;
+extern const struct inv_icm45600_chip_info inv_icm45686_chip_info;
+extern const struct inv_icm45600_chip_info inv_icm45687_chip_info;
+extern const struct inv_icm45600_chip_info inv_icm45688p_chip_info;
+extern const struct inv_icm45600_chip_info inv_icm45689_chip_info;
+
+extern const int inv_icm45600_accel_scale[][2];
+extern const int inv_icm45686_accel_scale[][2];
+extern const int inv_icm45600_gyro_scale[][2];
+extern const int inv_icm45686_gyro_scale[][2];
+
+/**
+ * struct inv_icm45600_state - driver state variables
+ * @lock: lock for serializing multiple registers access.
+ * @map: regmap pointer.
+ * @vddio_supply: I/O voltage regulator for the chip.
+ * @orientation: sensor chip orientation relative to main hardware.
+ * @conf: chip sensors configurations.
+ * @suspended: suspended sensors configuration.
+ * @indio_gyro: gyroscope IIO device.
+ * @indio_accel: accelerometer IIO device.
+ * @chip_info: chip driver data.
+ * @timestamp: interrupt timestamps.
+ * @fifo: FIFO management structure.
+ * @buffer: data transfer buffer aligned for DMA.
+ */
+struct inv_icm45600_state {
+ struct mutex lock;
+ struct regmap *map;
+ struct regulator *vddio_supply;
+ struct iio_mount_matrix orientation;
+ struct inv_icm45600_conf conf;
+ struct inv_icm45600_suspended suspended;
+ struct iio_dev *indio_gyro;
+ struct iio_dev *indio_accel;
+ const struct inv_icm45600_chip_info *chip_info;
+ struct {
+ s64 gyro;
+ s64 accel;
+ } timestamp;
+ struct inv_icm45600_fifo fifo;
+ union {
+ u8 buff[2];
+ __le16 u16;
+ u8 ireg[3];
+ } buffer __aligned(IIO_DMA_MINALIGN);
+};
+
+/**
+ * struct inv_icm45600_sensor_state - sensor state variables
+ * @scales: table of scales.
+ * @scales_len: length (nb of items) of the scales table.
+ * @power_mode: sensor requested power mode (for common frequencies)
+ * @ts: timestamp module states.
+ */
+struct inv_icm45600_sensor_state {
+ const int *scales;
+ size_t scales_len;
+ enum inv_icm45600_sensor_mode power_mode;
+ struct inv_sensors_timestamp ts;
+};
+
+/* Virtual register addresses: @bank on MSB (16 bits), @address on LSB */
+
+/* Indirect register access */
+#define INV_ICM45600_REG_IREG_ADDR 0x7C
+#define INV_ICM45600_REG_IREG_DATA 0x7E
+
+/* Direct acces registers */
+#define INV_ICM45600_REG_MISC2 0x007F
+#define INV_ICM45600_MISC2_SOFT_RESET BIT(1)
+
+#define INV_ICM45600_REG_DRIVE_CONFIG0 0x0032
+#define INV_ICM45600_DRIVE_CONFIG0_SPI_MASK GENMASK(3, 1)
+#define INV_ICM45600_SPI_SLEW_RATE_0_5NS 6
+#define INV_ICM45600_SPI_SLEW_RATE_4NS 5
+#define INV_ICM45600_SPI_SLEW_RATE_5NS 4
+#define INV_ICM45600_SPI_SLEW_RATE_7NS 3
+#define INV_ICM45600_SPI_SLEW_RATE_10NS 2
+#define INV_ICM45600_SPI_SLEW_RATE_14NS 1
+#define INV_ICM45600_SPI_SLEW_RATE_38NS 0
+
+#define INV_ICM45600_REG_INT1_CONFIG2 0x0018
+#define INV_ICM45600_INT1_CONFIG2_PUSH_PULL BIT(2)
+#define INV_ICM45600_INT1_CONFIG2_LATCHED BIT(1)
+#define INV_ICM45600_INT1_CONFIG2_ACTIVE_HIGH BIT(0)
+#define INV_ICM45600_INT1_CONFIG2_ACTIVE_LOW 0x00
+
+#define INV_ICM45600_REG_FIFO_CONFIG0 0x001D
+#define INV_ICM45600_FIFO_CONFIG0_MODE_MASK GENMASK(7, 6)
+#define INV_ICM45600_FIFO_CONFIG0_MODE_BYPASS 0
+#define INV_ICM45600_FIFO_CONFIG0_MODE_STREAM 1
+#define INV_ICM45600_FIFO_CONFIG0_MODE_STOP_ON_FULL 2
+#define INV_ICM45600_FIFO_CONFIG0_FIFO_DEPTH_MASK GENMASK(5, 0)
+#define INV_ICM45600_FIFO_CONFIG0_FIFO_DEPTH_MAX 0x1F
+
+#define INV_ICM45600_REG_FIFO_CONFIG2 0x0020
+#define INV_ICM45600_REG_FIFO_CONFIG2_FIFO_FLUSH BIT(7)
+#define INV_ICM45600_REG_FIFO_CONFIG2_WM_GT_TH BIT(3)
+
+#define INV_ICM45600_REG_FIFO_CONFIG3 0x0021
+#define INV_ICM45600_FIFO_CONFIG3_ES1_EN BIT(5)
+#define INV_ICM45600_FIFO_CONFIG3_ES0_EN BIT(4)
+#define INV_ICM45600_FIFO_CONFIG3_HIRES_EN BIT(3)
+#define INV_ICM45600_FIFO_CONFIG3_GYRO_EN BIT(2)
+#define INV_ICM45600_FIFO_CONFIG3_ACCEL_EN BIT(1)
+#define INV_ICM45600_FIFO_CONFIG3_IF_EN BIT(0)
+
+#define INV_ICM45600_REG_FIFO_CONFIG4 0x0022
+#define INV_ICM45600_FIFO_CONFIG4_COMP_EN BIT(2)
+#define INV_ICM45600_FIFO_CONFIG4_TMST_FSYNC_EN BIT(1)
+#define INV_ICM45600_FIFO_CONFIG4_ES0_9B BIT(0)
+
+/* all sensor data are 16 bits (2 registers wide) in big-endian */
+#define INV_ICM45600_REG_TEMP_DATA 0x000C
+#define INV_ICM45600_REG_ACCEL_DATA_X 0x0000
+#define INV_ICM45600_REG_ACCEL_DATA_Y 0x0002
+#define INV_ICM45600_REG_ACCEL_DATA_Z 0x0004
+#define INV_ICM45600_REG_GYRO_DATA_X 0x0006
+#define INV_ICM45600_REG_GYRO_DATA_Y 0x0008
+#define INV_ICM45600_REG_GYRO_DATA_Z 0x000A
+
+#define INV_ICM45600_REG_INT_STATUS 0x0019
+#define INV_ICM45600_INT_STATUS_RESET_DONE BIT(7)
+#define INV_ICM45600_INT_STATUS_AUX1_AGC_RDY BIT(6)
+#define INV_ICM45600_INT_STATUS_AP_AGC_RDY BIT(5)
+#define INV_ICM45600_INT_STATUS_AP_FSYNC BIT(4)
+#define INV_ICM45600_INT_STATUS_AUX1_DRDY BIT(3)
+#define INV_ICM45600_INT_STATUS_DATA_RDY BIT(2)
+#define INV_ICM45600_INT_STATUS_FIFO_THS BIT(1)
+#define INV_ICM45600_INT_STATUS_FIFO_FULL BIT(0)
+
+/*
+ * FIFO access registers
+ * FIFO count is 16 bits (2 registers)
+ * FIFO data is a continuous read register to read FIFO content
+ */
+#define INV_ICM45600_REG_FIFO_COUNT 0x0012
+#define INV_ICM45600_REG_FIFO_DATA 0x0014
+
+#define INV_ICM45600_REG_PWR_MGMT0 0x0010
+#define INV_ICM45600_PWR_MGMT0_GYRO_MODE_MASK GENMASK(3, 2)
+#define INV_ICM45600_PWR_MGMT0_ACCEL_MODE_MASK GENMASK(1, 0)
+
+#define INV_ICM45600_REG_ACCEL_CONFIG0 0x001B
+#define INV_ICM45600_ACCEL_CONFIG0_FS_MASK GENMASK(6, 4)
+#define INV_ICM45600_ACCEL_CONFIG0_ODR_MASK GENMASK(3, 0)
+#define INV_ICM45600_REG_GYRO_CONFIG0 0x001C
+#define INV_ICM45600_GYRO_CONFIG0_FS_MASK GENMASK(7, 4)
+#define INV_ICM45600_GYRO_CONFIG0_ODR_MASK GENMASK(3, 0)
+
+#define INV_ICM45600_REG_SMC_CONTROL_0 0xA258
+#define INV_ICM45600_SMC_CONTROL_0_ACCEL_LP_CLK_SEL BIT(4)
+#define INV_ICM45600_SMC_CONTROL_0_TMST_EN BIT(0)
+
+/* FIFO watermark is 16 bits (2 registers wide) in little-endian */
+#define INV_ICM45600_REG_FIFO_WATERMARK 0x001E
+
+/* FIFO is configured for 8kb */
+#define INV_ICM45600_FIFO_SIZE_MAX SZ_8K
+
+#define INV_ICM45600_REG_INT1_CONFIG0 0x0016
+#define INV_ICM45600_INT1_CONFIG0_RESET_DONE_EN BIT(7)
+#define INV_ICM45600_INT1_CONFIG0_AUX1_AGC_RDY_EN BIT(6)
+#define INV_ICM45600_INT1_CONFIG0_AP_AGC_RDY_EN BIT(5)
+#define INV_ICM45600_INT1_CONFIG0_AP_FSYNC_EN BIT(4)
+#define INV_ICM45600_INT1_CONFIG0_AUX1_DRDY_EN BIT(3)
+#define INV_ICM45600_INT1_CONFIG0_DRDY_EN BIT(2)
+#define INV_ICM45600_INT1_CONFIG0_FIFO_THS_EN BIT(1)
+#define INV_ICM45600_INT1_CONFIG0_FIFO_FULL_EN BIT(0)
+
+#define INV_ICM45600_REG_WHOAMI 0x0072
+#define INV_ICM45600_WHOAMI_ICM45605 0xE5
+#define INV_ICM45600_WHOAMI_ICM45686 0xE9
+#define INV_ICM45600_WHOAMI_ICM45688P 0xE7
+#define INV_ICM45600_WHOAMI_ICM45608 0x81
+#define INV_ICM45600_WHOAMI_ICM45634 0x82
+#define INV_ICM45600_WHOAMI_ICM45689 0x83
+#define INV_ICM45600_WHOAMI_ICM45606 0x84
+#define INV_ICM45600_WHOAMI_ICM45687 0x85
+
+/* Gyro USER offset */
+#define INV_ICM45600_IPREG_SYS1_REG_42 0xA42A
+#define INV_ICM45600_IPREG_SYS1_REG_56 0xA438
+#define INV_ICM45600_IPREG_SYS1_REG_70 0xA446
+#define INV_ICM45600_GYRO_OFFUSER_MASK GENMASK(13, 0)
+/* Gyro Averaging filter */
+#define INV_ICM45600_IPREG_SYS1_REG_170 0xA4AA
+#define INV_ICM45600_IPREG_SYS1_170_GYRO_LP_AVG_MASK GENMASK(4, 1)
+#define INV_ICM45600_GYRO_LP_AVG_SEL_8X 5
+#define INV_ICM45600_GYRO_LP_AVG_SEL_2X 1
+/* Accel USER offset */
+#define INV_ICM45600_IPREG_SYS2_REG_24 0xA518
+#define INV_ICM45600_IPREG_SYS2_REG_32 0xA520
+#define INV_ICM45600_IPREG_SYS2_REG_40 0xA528
+#define INV_ICM45600_ACCEL_OFFUSER_MASK GENMASK(13, 0)
+/* Accel averaging filter */
+#define INV_ICM45600_IPREG_SYS2_REG_129 0xA581
+#define INV_ICM45600_ACCEL_LP_AVG_SEL_1X 0x0000
+#define INV_ICM45600_ACCEL_LP_AVG_SEL_4X 0x0002
+
+/* Sleep times required by the driver */
+#define INV_ICM45600_ACCEL_STARTUP_TIME_MS 60
+#define INV_ICM45600_GYRO_STARTUP_TIME_MS 60
+#define INV_ICM45600_GYRO_STOP_TIME_MS 150
+#define INV_ICM45600_IREG_DELAY_US 4
+
+typedef int (*inv_icm45600_bus_setup)(struct inv_icm45600_state *);
+
+extern const struct dev_pm_ops inv_icm45600_pm_ops;
+
+const struct iio_mount_matrix *
+inv_icm45600_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan);
+
+#define INV_ICM45600_TEMP_CHAN(_index) \
+ { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ }
+
+int inv_icm45600_temp_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask);
+
+u32 inv_icm45600_odr_to_period(enum inv_icm45600_odr odr);
+
+int inv_icm45600_set_accel_conf(struct inv_icm45600_state *st,
+ struct inv_icm45600_sensor_conf *conf,
+ unsigned int *sleep_ms);
+
+int inv_icm45600_set_gyro_conf(struct inv_icm45600_state *st,
+ struct inv_icm45600_sensor_conf *conf,
+ unsigned int *sleep_ms);
+
+int inv_icm45600_debugfs_reg(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval);
+
+int inv_icm45600_core_probe(struct regmap *regmap,
+ const struct inv_icm45600_chip_info *chip_info,
+ bool reset, inv_icm45600_bus_setup bus_setup);
+
+struct iio_dev *inv_icm45600_gyro_init(struct inv_icm45600_state *st);
+
+int inv_icm45600_gyro_parse_fifo(struct iio_dev *indio_dev);
+
+struct iio_dev *inv_icm45600_accel_init(struct inv_icm45600_state *st);
+
+int inv_icm45600_accel_parse_fifo(struct iio_dev *indio_dev);
+
+#endif
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c b/drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
new file mode 100644
index 000000000000..efa22e02657f
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_accel.c
@@ -0,0 +1,782 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2025 Invensense, Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+
+#include "inv_icm45600_buffer.h"
+#include "inv_icm45600.h"
+
+enum inv_icm45600_accel_scan {
+ INV_ICM45600_ACCEL_SCAN_X,
+ INV_ICM45600_ACCEL_SCAN_Y,
+ INV_ICM45600_ACCEL_SCAN_Z,
+ INV_ICM45600_ACCEL_SCAN_TEMP,
+ INV_ICM45600_ACCEL_SCAN_TIMESTAMP,
+};
+
+static const struct iio_chan_spec_ext_info inv_icm45600_accel_ext_infos[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, inv_icm45600_get_mount_matrix),
+ { }
+};
+
+#define INV_ICM45600_ACCEL_CHAN(_modifier, _index, _ext_info) \
+ { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = _modifier, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ .ext_info = _ext_info, \
+ }
+
+static const struct iio_chan_spec inv_icm45600_accel_channels[] = {
+ INV_ICM45600_ACCEL_CHAN(IIO_MOD_X, INV_ICM45600_ACCEL_SCAN_X,
+ inv_icm45600_accel_ext_infos),
+ INV_ICM45600_ACCEL_CHAN(IIO_MOD_Y, INV_ICM45600_ACCEL_SCAN_Y,
+ inv_icm45600_accel_ext_infos),
+ INV_ICM45600_ACCEL_CHAN(IIO_MOD_Z, INV_ICM45600_ACCEL_SCAN_Z,
+ inv_icm45600_accel_ext_infos),
+ INV_ICM45600_TEMP_CHAN(INV_ICM45600_ACCEL_SCAN_TEMP),
+ IIO_CHAN_SOFT_TIMESTAMP(INV_ICM45600_ACCEL_SCAN_TIMESTAMP),
+};
+
+/*
+ * IIO buffer data: size must be a power of 2 and timestamp aligned
+ * 16 bytes: 6 bytes acceleration, 2 bytes temperature, 8 bytes timestamp
+ */
+struct inv_icm45600_accel_buffer {
+ struct inv_icm45600_fifo_sensor_data accel;
+ s16 temp;
+ aligned_s64 timestamp;
+};
+
+static const unsigned long inv_icm45600_accel_scan_masks[] = {
+ /* 3-axis accel + temperature */
+ BIT(INV_ICM45600_ACCEL_SCAN_X) |
+ BIT(INV_ICM45600_ACCEL_SCAN_Y) |
+ BIT(INV_ICM45600_ACCEL_SCAN_Z) |
+ BIT(INV_ICM45600_ACCEL_SCAN_TEMP),
+ 0
+};
+
+/* enable accelerometer sensor and FIFO write */
+static int inv_icm45600_accel_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(indio_dev);
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ unsigned int fifo_en = 0;
+ unsigned int sleep = 0;
+ int ret;
+
+ scoped_guard(mutex, &st->lock) {
+ if (*scan_mask & BIT(INV_ICM45600_ACCEL_SCAN_TEMP))
+ fifo_en |= INV_ICM45600_SENSOR_TEMP;
+
+ if (*scan_mask & (BIT(INV_ICM45600_ACCEL_SCAN_X) |
+ BIT(INV_ICM45600_ACCEL_SCAN_Y) |
+ BIT(INV_ICM45600_ACCEL_SCAN_Z))) {
+ /* enable accel sensor */
+ conf.mode = accel_st->power_mode;
+ ret = inv_icm45600_set_accel_conf(st, &conf, &sleep);
+ if (ret)
+ return ret;
+ fifo_en |= INV_ICM45600_SENSOR_ACCEL;
+ }
+
+ /* Update data FIFO write. */
+ ret = inv_icm45600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+ }
+
+ /* Sleep required time. */
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+static int _inv_icm45600_accel_read_sensor(struct inv_icm45600_state *st,
+ struct inv_icm45600_sensor_state *accel_st,
+ unsigned int reg, int *val)
+{
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ int ret;
+
+ /* enable accel sensor */
+ conf.mode = accel_st->power_mode;
+ ret = inv_icm45600_set_accel_conf(st, &conf, NULL);
+ if (ret)
+ return ret;
+
+ /* read accel register data */
+ ret = regmap_bulk_read(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpup(&st->buffer.u16), 15);
+ if (*val == INV_ICM45600_DATA_INVALID)
+ return -ENODATA;
+
+ return 0;
+}
+
+static int inv_icm45600_accel_read_sensor(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int reg;
+ int ret;
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ reg = INV_ICM45600_REG_ACCEL_DATA_X;
+ break;
+ case IIO_MOD_Y:
+ reg = INV_ICM45600_REG_ACCEL_DATA_Y;
+ break;
+ case IIO_MOD_Z:
+ reg = INV_ICM45600_REG_ACCEL_DATA_Z;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = _inv_icm45600_accel_read_sensor(st, accel_st, reg, val);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+/* IIO format int + nano */
+const int inv_icm45600_accel_scale[][2] = {
+ /* +/- 16G => 0.004788403 m/s-2 */
+ [INV_ICM45600_ACCEL_FS_16G] = { 0, 4788403 },
+ /* +/- 8G => 0.002394202 m/s-2 */
+ [INV_ICM45600_ACCEL_FS_8G] = { 0, 2394202 },
+ /* +/- 4G => 0.001197101 m/s-2 */
+ [INV_ICM45600_ACCEL_FS_4G] = { 0, 1197101 },
+ /* +/- 2G => 0.000598550 m/s-2 */
+ [INV_ICM45600_ACCEL_FS_2G] = { 0, 598550 },
+};
+
+const int inv_icm45686_accel_scale[][2] = {
+ /* +/- 32G => 0.009576806 m/s-2 */
+ [INV_ICM45686_ACCEL_FS_32G] = { 0, 9576806 },
+ /* +/- 16G => 0.004788403 m/s-2 */
+ [INV_ICM45686_ACCEL_FS_16G] = { 0, 4788403 },
+ /* +/- 8G => 0.002394202 m/s-2 */
+ [INV_ICM45686_ACCEL_FS_8G] = { 0, 2394202 },
+ /* +/- 4G => 0.001197101 m/s-2 */
+ [INV_ICM45686_ACCEL_FS_4G] = { 0, 1197101 },
+ /* +/- 2G => 0.000598550 m/s-2 */
+ [INV_ICM45686_ACCEL_FS_2G] = { 0, 598550 },
+};
+
+static int inv_icm45600_accel_read_scale(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(indio_dev);
+ unsigned int idx;
+
+ idx = st->conf.accel.fs;
+
+ /* Full scale register starts at 1 for not High FSR parts */
+ if (accel_st->scales == (const int *)&inv_icm45600_accel_scale)
+ idx--;
+
+ *val = accel_st->scales[2 * idx];
+ *val2 = accel_st->scales[2 * idx + 1];
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int inv_icm45600_accel_write_scale(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int idx;
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ int ret;
+
+ for (idx = 0; idx < accel_st->scales_len; idx += 2) {
+ if (val == accel_st->scales[idx] &&
+ val2 == accel_st->scales[idx + 1])
+ break;
+ }
+ if (idx == accel_st->scales_len)
+ return -EINVAL;
+
+ conf.fs = idx / 2;
+
+ /* Full scale register starts at 1 for not High FSR parts */
+ if (accel_st->scales == (const int *)&inv_icm45600_accel_scale)
+ conf.fs++;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = inv_icm45600_set_accel_conf(st, &conf, NULL);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+/* IIO format int + micro */
+static const int inv_icm45600_accel_odr[] = {
+ 1, 562500, /* 1.5625Hz */
+ 3, 125000, /* 3.125Hz */
+ 6, 250000, /* 6.25Hz */
+ 12, 500000, /* 12.5Hz */
+ 25, 0, /* 25Hz */
+ 50, 0, /* 50Hz */
+ 100, 0, /* 100Hz */
+ 200, 0, /* 200Hz */
+ 400, 0, /* 400Hz */
+ 800, 0, /* 800Hz */
+ 1600, 0, /* 1.6kHz */
+ 3200, 0, /* 3.2kHz */
+ 6400, 0, /* 6.4kHz */
+};
+
+static const int inv_icm45600_accel_odr_conv[] = {
+ INV_ICM45600_ODR_1_5625HZ_LP,
+ INV_ICM45600_ODR_3_125HZ_LP,
+ INV_ICM45600_ODR_6_25HZ_LP,
+ INV_ICM45600_ODR_12_5HZ,
+ INV_ICM45600_ODR_25HZ,
+ INV_ICM45600_ODR_50HZ,
+ INV_ICM45600_ODR_100HZ,
+ INV_ICM45600_ODR_200HZ,
+ INV_ICM45600_ODR_400HZ,
+ INV_ICM45600_ODR_800HZ_LN,
+ INV_ICM45600_ODR_1600HZ_LN,
+ INV_ICM45600_ODR_3200HZ_LN,
+ INV_ICM45600_ODR_6400HZ_LN,
+};
+
+static int inv_icm45600_accel_read_odr(struct inv_icm45600_state *st,
+ int *val, int *val2)
+{
+ unsigned int odr;
+ unsigned int i;
+
+ odr = st->conf.accel.odr;
+
+ for (i = 0; i < ARRAY_SIZE(inv_icm45600_accel_odr_conv); ++i) {
+ if (inv_icm45600_accel_odr_conv[i] == odr)
+ break;
+ }
+ if (i >= ARRAY_SIZE(inv_icm45600_accel_odr_conv))
+ return -EINVAL;
+
+ *val = inv_icm45600_accel_odr[2 * i];
+ *val2 = inv_icm45600_accel_odr[2 * i + 1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int _inv_icm45600_accel_write_odr(struct iio_dev *indio_dev, int odr)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = &accel_st->ts;
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ int ret;
+
+ conf.odr = odr;
+ ret = inv_sensors_timestamp_update_odr(ts, inv_icm45600_odr_to_period(conf.odr),
+ iio_buffer_enabled(indio_dev));
+ if (ret)
+ return ret;
+
+ if (st->conf.accel.mode != INV_ICM45600_SENSOR_MODE_OFF)
+ conf.mode = accel_st->power_mode;
+
+ ret = inv_icm45600_set_accel_conf(st, &conf, NULL);
+ if (ret)
+ return ret;
+
+ inv_icm45600_buffer_update_fifo_period(st);
+ inv_icm45600_buffer_update_watermark(st);
+
+ return 0;
+}
+
+static int inv_icm45600_accel_write_odr(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int idx;
+ int odr;
+ int ret;
+
+ for (idx = 0; idx < ARRAY_SIZE(inv_icm45600_accel_odr); idx += 2) {
+ if (val == inv_icm45600_accel_odr[idx] &&
+ val2 == inv_icm45600_accel_odr[idx + 1])
+ break;
+ }
+ if (idx >= ARRAY_SIZE(inv_icm45600_accel_odr))
+ return -EINVAL;
+
+ odr = inv_icm45600_accel_odr_conv[idx / 2];
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = _inv_icm45600_accel_write_odr(indio_dev, odr);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+/*
+ * Calibration bias values, IIO range format int + micro.
+ * Value is limited to +/-1g coded on 14 bits signed. Step is 0.125mg.
+ */
+static int inv_icm45600_accel_calibbias[] = {
+ -9, 806650, /* min: -9.806650 m/s² */
+ 0, 1197, /* step: 0.001197 m/s² */
+ 9, 805453, /* max: 9.805453 m/s² */
+};
+
+static int inv_icm45600_accel_read_offset(struct inv_icm45600_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct device *dev = regmap_get_device(st->map);
+ s64 val64;
+ s32 bias;
+ unsigned int reg;
+ s16 offset;
+ int ret;
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ reg = INV_ICM45600_IPREG_SYS2_REG_24;
+ break;
+ case IIO_MOD_Y:
+ reg = INV_ICM45600_IPREG_SYS2_REG_32;
+ break;
+ case IIO_MOD_Z:
+ reg = INV_ICM45600_IPREG_SYS2_REG_40;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = regmap_bulk_read(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
+
+ pm_runtime_put_autosuspend(dev);
+ if (ret)
+ return ret;
+
+ offset = le16_to_cpup(&st->buffer.u16) & INV_ICM45600_ACCEL_OFFUSER_MASK;
+ /* 14 bits signed value */
+ offset = sign_extend32(offset, 13);
+
+ /*
+ * convert raw offset to g then to m/s²
+ * 14 bits signed raw step 1/8192g
+ * g to m/s²: 9.806650
+ * result in micro (* 1000000)
+ * (offset * 9806650) / 8192
+ */
+ val64 = (s64)offset * 9806650LL;
+ /* for rounding, add + or - divisor (8192) divided by 2 */
+ if (val64 >= 0)
+ val64 += 8192LL / 2LL;
+ else
+ val64 -= 8192LL / 2LL;
+ bias = div_s64(val64, 8192L);
+ *val = bias / 1000000L;
+ *val2 = bias % 1000000L;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int inv_icm45600_accel_write_offset(struct inv_icm45600_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2)
+{
+ struct device *dev = regmap_get_device(st->map);
+ s64 val64;
+ s32 min, max;
+ unsigned int reg;
+ s16 offset;
+ int ret;
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ reg = INV_ICM45600_IPREG_SYS2_REG_24;
+ break;
+ case IIO_MOD_Y:
+ reg = INV_ICM45600_IPREG_SYS2_REG_32;
+ break;
+ case IIO_MOD_Z:
+ reg = INV_ICM45600_IPREG_SYS2_REG_40;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* inv_icm45600_accel_calibbias: min - step - max in micro */
+ min = inv_icm45600_accel_calibbias[0] * 1000000L -
+ inv_icm45600_accel_calibbias[1];
+ max = inv_icm45600_accel_calibbias[4] * 1000000L +
+ inv_icm45600_accel_calibbias[5];
+ val64 = (s64)val * 1000000LL;
+ if (val >= 0)
+ val64 += (s64)val2;
+ else
+ val64 -= (s64)val2;
+ if (val64 < min || val64 > max)
+ return -EINVAL;
+
+ /*
+ * convert m/s² to g then to raw value
+ * m/s² to g: 1 / 9.806650
+ * g to raw 14 bits signed, step 1/8192g: * 8192
+ * val in micro (1000000)
+ * val * 8192 / (9.806650 * 1000000)
+ */
+ val64 = val64 * 8192LL;
+ /* for rounding, add + or - divisor (9806650) divided by 2 */
+ if (val64 >= 0)
+ val64 += 9806650 / 2;
+ else
+ val64 -= 9806650 / 2;
+ offset = div_s64(val64, 9806650);
+
+ /* clamp value limited to 14 bits signed */
+ offset = clamp(offset, -8192, 8191);
+
+ st->buffer.u16 = cpu_to_le16(offset & INV_ICM45600_ACCEL_OFFUSER_MASK);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = regmap_bulk_write(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
+
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+static int inv_icm45600_accel_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ break;
+ case IIO_TEMP:
+ return inv_icm45600_temp_read_raw(indio_dev, chan, val, val2, mask);
+ default:
+ return -EINVAL;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = inv_icm45600_accel_read_sensor(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ return inv_icm45600_accel_read_scale(indio_dev, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return inv_icm45600_accel_read_odr(st, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return inv_icm45600_accel_read_offset(st, chan, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_icm45600_accel_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type, int *length, long mask)
+{
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(indio_dev);
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = accel_st->scales;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *length = accel_st->scales_len;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = inv_icm45600_accel_odr;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = ARRAY_SIZE(inv_icm45600_accel_odr);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *vals = inv_icm45600_accel_calibbias;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_icm45600_accel_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = inv_icm45600_accel_write_scale(indio_dev, val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return inv_icm45600_accel_write_odr(indio_dev, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = inv_icm45600_accel_write_offset(st, chan, val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_icm45600_accel_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ if (chan->type != IIO_ACCEL)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_icm45600_accel_hwfifo_set_watermark(struct iio_dev *indio_dev,
+ unsigned int val)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ st->fifo.watermark.accel = val;
+ return inv_icm45600_buffer_update_watermark(st);
+}
+
+static int inv_icm45600_accel_hwfifo_flush(struct iio_dev *indio_dev,
+ unsigned int count)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ if (count == 0)
+ return 0;
+
+ guard(mutex)(&st->lock);
+
+ ret = inv_icm45600_buffer_hwfifo_flush(st, count);
+ if (ret)
+ return ret;
+
+ return st->fifo.nb.accel;
+}
+
+static const struct iio_info inv_icm45600_accel_info = {
+ .read_raw = inv_icm45600_accel_read_raw,
+ .read_avail = inv_icm45600_accel_read_avail,
+ .write_raw = inv_icm45600_accel_write_raw,
+ .write_raw_get_fmt = inv_icm45600_accel_write_raw_get_fmt,
+ .debugfs_reg_access = inv_icm45600_debugfs_reg,
+ .update_scan_mode = inv_icm45600_accel_update_scan_mode,
+ .hwfifo_set_watermark = inv_icm45600_accel_hwfifo_set_watermark,
+ .hwfifo_flush_to_buffer = inv_icm45600_accel_hwfifo_flush,
+};
+
+struct iio_dev *inv_icm45600_accel_init(struct inv_icm45600_state *st)
+{
+ struct device *dev = regmap_get_device(st->map);
+ struct inv_icm45600_sensor_state *accel_st;
+ struct inv_sensors_timestamp_chip ts_chip;
+ struct iio_dev *indio_dev;
+ const char *name;
+ int ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s-accel", st->chip_info->name);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*accel_st));
+ if (!indio_dev)
+ return ERR_PTR(-ENOMEM);
+ accel_st = iio_priv(indio_dev);
+
+ accel_st->scales = st->chip_info->accel_scales;
+ accel_st->scales_len = st->chip_info->accel_scales_len * 2;
+
+ /* low-power (LP) mode by default at init, no ULP mode */
+ accel_st->power_mode = INV_ICM45600_SENSOR_MODE_LOW_POWER;
+ ret = regmap_set_bits(st->map, INV_ICM45600_REG_SMC_CONTROL_0,
+ INV_ICM45600_SMC_CONTROL_0_ACCEL_LP_CLK_SEL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * clock period is 32kHz (31250ns)
+ * jitter is +/- 2% (20 per mille)
+ */
+ ts_chip.clock_period = 31250;
+ ts_chip.jitter = 20;
+ ts_chip.init_period = inv_icm45600_odr_to_period(st->conf.accel.odr);
+ inv_sensors_timestamp_init(&accel_st->ts, &ts_chip);
+
+ iio_device_set_drvdata(indio_dev, st);
+ indio_dev->name = name;
+ indio_dev->info = &inv_icm45600_accel_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = inv_icm45600_accel_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_icm45600_accel_channels);
+ indio_dev->available_scan_masks = inv_icm45600_accel_scan_masks;
+
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
+ &inv_icm45600_buffer_ops);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return indio_dev;
+}
+
+int inv_icm45600_accel_parse_fifo(struct iio_dev *indio_dev)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = &accel_st->ts;
+ ssize_t i, size;
+ unsigned int no;
+
+ /* parse all fifo packets */
+ for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) {
+ struct inv_icm45600_accel_buffer buffer = { };
+ const struct inv_icm45600_fifo_sensor_data *accel, *gyro;
+ const __le16 *timestamp;
+ const s8 *temp;
+ unsigned int odr;
+ s64 ts_val;
+
+ size = inv_icm45600_fifo_decode_packet(&st->fifo.data[i],
+ &accel, &gyro, &temp, &timestamp, &odr);
+ /* quit if error or FIFO is empty */
+ if (size <= 0)
+ return size;
+
+ /* skip packet if no accel data or data is invalid */
+ if (accel == NULL || !inv_icm45600_fifo_is_data_valid(accel))
+ continue;
+
+ /* update odr */
+ if (odr & INV_ICM45600_SENSOR_ACCEL)
+ inv_sensors_timestamp_apply_odr(ts, st->fifo.period,
+ st->fifo.nb.total, no);
+
+ memcpy(&buffer.accel, accel, sizeof(buffer.accel));
+ /* convert 8 bits FIFO temperature in high resolution format */
+ buffer.temp = temp ? (*temp * 64) : 0;
+ ts_val = inv_sensors_timestamp_pop(ts);
+ iio_push_to_buffers_with_ts(indio_dev, &buffer, sizeof(buffer), ts_val);
+ }
+
+ return 0;
+}
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c b/drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
new file mode 100644
index 000000000000..2b9ea317385c
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.c
@@ -0,0 +1,558 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2025 Invensense, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/minmax.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
+
+#include "inv_icm45600_buffer.h"
+#include "inv_icm45600.h"
+
+/* FIFO header: 1 byte */
+#define INV_ICM45600_FIFO_EXT_HEADER BIT(7)
+#define INV_ICM45600_FIFO_HEADER_ACCEL BIT(6)
+#define INV_ICM45600_FIFO_HEADER_GYRO BIT(5)
+#define INV_ICM45600_FIFO_HEADER_HIGH_RES BIT(4)
+#define INV_ICM45600_FIFO_HEADER_TMST_FSYNC GENMASK(3, 2)
+#define INV_ICM45600_FIFO_HEADER_ODR_ACCEL BIT(1)
+#define INV_ICM45600_FIFO_HEADER_ODR_GYRO BIT(0)
+
+struct inv_icm45600_fifo_1sensor_packet {
+ u8 header;
+ struct inv_icm45600_fifo_sensor_data data;
+ s8 temp;
+} __packed;
+
+struct inv_icm45600_fifo_2sensors_packet {
+ u8 header;
+ struct inv_icm45600_fifo_sensor_data accel;
+ struct inv_icm45600_fifo_sensor_data gyro;
+ s8 temp;
+ __le16 timestamp;
+} __packed;
+
+ssize_t inv_icm45600_fifo_decode_packet(const void *packet,
+ const struct inv_icm45600_fifo_sensor_data **accel,
+ const struct inv_icm45600_fifo_sensor_data **gyro,
+ const s8 **temp,
+ const __le16 **timestamp, unsigned int *odr)
+{
+ const struct inv_icm45600_fifo_1sensor_packet *pack1 = packet;
+ const struct inv_icm45600_fifo_2sensors_packet *pack2 = packet;
+ u8 header = *((const u8 *)packet);
+
+ /* FIFO extended header */
+ if (header & INV_ICM45600_FIFO_EXT_HEADER) {
+ /* Not yet supported */
+ return 0;
+ }
+
+ /* handle odr flags. */
+ *odr = 0;
+ if (header & INV_ICM45600_FIFO_HEADER_ODR_GYRO)
+ *odr |= INV_ICM45600_SENSOR_GYRO;
+ if (header & INV_ICM45600_FIFO_HEADER_ODR_ACCEL)
+ *odr |= INV_ICM45600_SENSOR_ACCEL;
+
+ /* Accel + Gyro data are present. */
+ if ((header & INV_ICM45600_FIFO_HEADER_ACCEL) &&
+ (header & INV_ICM45600_FIFO_HEADER_GYRO)) {
+ *accel = &pack2->accel;
+ *gyro = &pack2->gyro;
+ *temp = &pack2->temp;
+ *timestamp = &pack2->timestamp;
+ return sizeof(*pack2);
+ }
+
+ /* Accel data only. */
+ if (header & INV_ICM45600_FIFO_HEADER_ACCEL) {
+ *accel = &pack1->data;
+ *gyro = NULL;
+ *temp = &pack1->temp;
+ *timestamp = NULL;
+ return sizeof(*pack1);
+ }
+
+ /* Gyro data only. */
+ if (header & INV_ICM45600_FIFO_HEADER_GYRO) {
+ *accel = NULL;
+ *gyro = &pack1->data;
+ *temp = &pack1->temp;
+ *timestamp = NULL;
+ return sizeof(*pack1);
+ }
+
+ /* Invalid packet if here. */
+ return -EINVAL;
+}
+
+void inv_icm45600_buffer_update_fifo_period(struct inv_icm45600_state *st)
+{
+ u32 period_gyro, period_accel;
+
+ if (st->fifo.en & INV_ICM45600_SENSOR_GYRO)
+ period_gyro = inv_icm45600_odr_to_period(st->conf.gyro.odr);
+ else
+ period_gyro = U32_MAX;
+
+ if (st->fifo.en & INV_ICM45600_SENSOR_ACCEL)
+ period_accel = inv_icm45600_odr_to_period(st->conf.accel.odr);
+ else
+ period_accel = U32_MAX;
+
+ st->fifo.period = min(period_gyro, period_accel);
+}
+
+int inv_icm45600_buffer_set_fifo_en(struct inv_icm45600_state *st,
+ unsigned int fifo_en)
+{
+ unsigned int mask;
+ int ret;
+
+ mask = INV_ICM45600_FIFO_CONFIG3_GYRO_EN |
+ INV_ICM45600_FIFO_CONFIG3_ACCEL_EN;
+
+ ret = regmap_assign_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3, mask,
+ (fifo_en & INV_ICM45600_SENSOR_GYRO) ||
+ (fifo_en & INV_ICM45600_SENSOR_ACCEL));
+ if (ret)
+ return ret;
+
+ st->fifo.en = fifo_en;
+ inv_icm45600_buffer_update_fifo_period(st);
+
+ return 0;
+}
+
+static unsigned int inv_icm45600_wm_truncate(unsigned int watermark, size_t packet_size,
+ unsigned int fifo_period)
+{
+ size_t watermark_max, grace_samples;
+
+ /* Keep 20ms for processing FIFO.*/
+ grace_samples = (20U * NSEC_PER_MSEC) / fifo_period;
+ if (grace_samples < 1)
+ grace_samples = 1;
+
+ watermark_max = INV_ICM45600_FIFO_SIZE_MAX / packet_size;
+ watermark_max -= grace_samples;
+
+ return min(watermark, watermark_max);
+}
+
+/**
+ * inv_icm45600_buffer_update_watermark - update watermark FIFO threshold
+ * @st: driver internal state
+ *
+ * FIFO watermark threshold is computed based on the required watermark values
+ * set for gyro and accel sensors. Since watermark is all about acceptable data
+ * latency, use the smallest setting between the 2. It means choosing the
+ * smallest latency but this is not as simple as choosing the smallest watermark
+ * value. Latency depends on watermark and ODR. It requires several steps:
+ * 1) compute gyro and accel latencies and choose the smallest value.
+ * 2) adapt the chosen latency so that it is a multiple of both gyro and accel
+ * ones. Otherwise it is possible that you don't meet a requirement. (for
+ * example with gyro @100Hz wm 4 and accel @100Hz with wm 6, choosing the
+ * value of 4 will not meet accel latency requirement because 6 is not a
+ * multiple of 4. You need to use the value 2.)
+ * 3) Since all periods are multiple of each others, watermark is computed by
+ * dividing this computed latency by the smallest period, which corresponds
+ * to the FIFO frequency.
+ *
+ * Returns: 0 on success, a negative error code otherwise.
+ */
+int inv_icm45600_buffer_update_watermark(struct inv_icm45600_state *st)
+{
+ const size_t packet_size = sizeof(struct inv_icm45600_fifo_2sensors_packet);
+ unsigned int wm_gyro, wm_accel, watermark;
+ u32 period_gyro, period_accel, period;
+ u32 latency_gyro, latency_accel, latency;
+
+ /* Compute sensors latency, depending on sensor watermark and odr. */
+ wm_gyro = inv_icm45600_wm_truncate(st->fifo.watermark.gyro, packet_size,
+ st->fifo.period);
+ wm_accel = inv_icm45600_wm_truncate(st->fifo.watermark.accel, packet_size,
+ st->fifo.period);
+ /* Use us for odr to avoid overflow using 32 bits values. */
+ period_gyro = inv_icm45600_odr_to_period(st->conf.gyro.odr) / NSEC_PER_USEC;
+ period_accel = inv_icm45600_odr_to_period(st->conf.accel.odr) / NSEC_PER_USEC;
+ latency_gyro = period_gyro * wm_gyro;
+ latency_accel = period_accel * wm_accel;
+
+ /* 0 value for watermark means that the sensor is turned off. */
+ if (wm_gyro == 0 && wm_accel == 0)
+ return 0;
+
+ if (latency_gyro == 0) {
+ watermark = wm_accel;
+ st->fifo.watermark.eff_accel = wm_accel;
+ } else if (latency_accel == 0) {
+ watermark = wm_gyro;
+ st->fifo.watermark.eff_gyro = wm_gyro;
+ } else {
+ /* Compute the smallest latency that is a multiple of both. */
+ if (latency_gyro <= latency_accel)
+ latency = latency_gyro - (latency_accel % latency_gyro);
+ else
+ latency = latency_accel - (latency_gyro % latency_accel);
+ /* Use the shortest period. */
+ period = min(period_gyro, period_accel);
+ /* All this works because periods are multiple of each others. */
+ watermark = max(latency / period, 1);
+ /* Update effective watermark. */
+ st->fifo.watermark.eff_gyro = max(latency / period_gyro, 1);
+ st->fifo.watermark.eff_accel = max(latency / period_accel, 1);
+ }
+
+ st->buffer.u16 = cpu_to_le16(watermark);
+ return regmap_bulk_write(st->map, INV_ICM45600_REG_FIFO_WATERMARK,
+ &st->buffer.u16, sizeof(st->buffer.u16));
+}
+
+static int inv_icm45600_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ struct inv_icm45600_sensor_state *sensor_st = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = &sensor_st->ts;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&st->lock);
+ inv_sensors_timestamp_reset(ts);
+
+ return 0;
+}
+
+/*
+ * Update_scan_mode callback is turning sensors on and setting data FIFO enable
+ * bits.
+ */
+static int inv_icm45600_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ unsigned int val;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ /* Exit if FIFO is already on. */
+ if (st->fifo.on) {
+ st->fifo.on++;
+ return 0;
+ }
+
+ ret = regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG2,
+ INV_ICM45600_REG_FIFO_CONFIG2_FIFO_FLUSH);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(st->map, INV_ICM45600_REG_INT1_CONFIG0,
+ INV_ICM45600_INT1_CONFIG0_FIFO_THS_EN |
+ INV_ICM45600_INT1_CONFIG0_FIFO_FULL_EN);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(INV_ICM45600_FIFO_CONFIG0_MODE_MASK,
+ INV_ICM45600_FIFO_CONFIG0_MODE_STREAM);
+ ret = regmap_update_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG0,
+ INV_ICM45600_FIFO_CONFIG0_MODE_MASK, val);
+ if (ret)
+ return ret;
+
+ /* Enable writing sensor data to FIFO. */
+ ret = regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3,
+ INV_ICM45600_FIFO_CONFIG3_IF_EN);
+ if (ret)
+ return ret;
+
+ st->fifo.on++;
+ return 0;
+}
+
+static int inv_icm45600_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ unsigned int val;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ /* Exit if there are several sensors using the FIFO. */
+ if (st->fifo.on > 1) {
+ st->fifo.on--;
+ return 0;
+ }
+
+ /* Disable writing sensor data to FIFO. */
+ ret = regmap_clear_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3,
+ INV_ICM45600_FIFO_CONFIG3_IF_EN);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(INV_ICM45600_FIFO_CONFIG0_MODE_MASK,
+ INV_ICM45600_FIFO_CONFIG0_MODE_BYPASS);
+ ret = regmap_update_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG0,
+ INV_ICM45600_FIFO_CONFIG0_MODE_MASK, val);
+ if (ret)
+ return ret;
+
+ ret = regmap_clear_bits(st->map, INV_ICM45600_REG_INT1_CONFIG0,
+ INV_ICM45600_INT1_CONFIG0_FIFO_THS_EN |
+ INV_ICM45600_INT1_CONFIG0_FIFO_FULL_EN);
+ if (ret)
+ return ret;
+
+ ret = regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG2,
+ INV_ICM45600_REG_FIFO_CONFIG2_FIFO_FLUSH);
+ if (ret)
+ return ret;
+
+ st->fifo.on--;
+ return 0;
+}
+
+static int _inv_icm45600_buffer_postdisable(struct inv_icm45600_state *st,
+ unsigned int sensor, unsigned int *watermark,
+ unsigned int *sleep)
+{
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ int ret;
+
+ ret = inv_icm45600_buffer_set_fifo_en(st, st->fifo.en & ~sensor);
+ if (ret)
+ return ret;
+
+ *watermark = 0;
+ ret = inv_icm45600_buffer_update_watermark(st);
+ if (ret)
+ return ret;
+
+ conf.mode = INV_ICM45600_SENSOR_MODE_OFF;
+ if (sensor == INV_ICM45600_SENSOR_GYRO)
+ return inv_icm45600_set_gyro_conf(st, &conf, sleep);
+ else
+ return inv_icm45600_set_accel_conf(st, &conf, sleep);
+}
+
+static int inv_icm45600_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int sensor;
+ unsigned int *watermark;
+ unsigned int sleep;
+ int ret;
+
+ if (indio_dev == st->indio_gyro) {
+ sensor = INV_ICM45600_SENSOR_GYRO;
+ watermark = &st->fifo.watermark.gyro;
+ } else if (indio_dev == st->indio_accel) {
+ sensor = INV_ICM45600_SENSOR_ACCEL;
+ watermark = &st->fifo.watermark.accel;
+ } else {
+ return -EINVAL;
+ }
+
+ sleep = 0;
+ scoped_guard(mutex, &st->lock)
+ ret = _inv_icm45600_buffer_postdisable(st, sensor, watermark, &sleep);
+
+ /* Sleep required time. */
+ if (sleep)
+ msleep(sleep);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+const struct iio_buffer_setup_ops inv_icm45600_buffer_ops = {
+ .preenable = inv_icm45600_buffer_preenable,
+ .postenable = inv_icm45600_buffer_postenable,
+ .predisable = inv_icm45600_buffer_predisable,
+ .postdisable = inv_icm45600_buffer_postdisable,
+};
+
+int inv_icm45600_buffer_fifo_read(struct inv_icm45600_state *st,
+ unsigned int max)
+{
+ const ssize_t packet_size = sizeof(struct inv_icm45600_fifo_2sensors_packet);
+ __le16 *raw_fifo_count;
+ size_t fifo_nb, i;
+ ssize_t size;
+ const struct inv_icm45600_fifo_sensor_data *accel, *gyro;
+ const __le16 *timestamp;
+ const s8 *temp;
+ unsigned int odr;
+ int ret;
+
+ /* Reset all samples counters. */
+ st->fifo.count = 0;
+ st->fifo.nb.gyro = 0;
+ st->fifo.nb.accel = 0;
+ st->fifo.nb.total = 0;
+
+ raw_fifo_count = &st->buffer.u16;
+ ret = regmap_bulk_read(st->map, INV_ICM45600_REG_FIFO_COUNT,
+ raw_fifo_count, sizeof(*raw_fifo_count));
+ if (ret)
+ return ret;
+
+ /* Check and limit number of samples if requested. */
+ fifo_nb = le16_to_cpup(raw_fifo_count);
+ if (fifo_nb == 0)
+ return 0;
+ if (max > 0 && fifo_nb > max)
+ fifo_nb = max;
+
+ /* Try to read all FIFO data in internal buffer. */
+ st->fifo.count = fifo_nb * packet_size;
+ ret = regmap_noinc_read(st->map, INV_ICM45600_REG_FIFO_DATA,
+ st->fifo.data, st->fifo.count);
+ if (ret == -ENOTSUPP || ret == -EFBIG) {
+ /* Read full fifo is not supported, read samples one by one. */
+ ret = 0;
+ for (i = 0; i < st->fifo.count && ret == 0; i += packet_size)
+ ret = regmap_noinc_read(st->map, INV_ICM45600_REG_FIFO_DATA,
+ &st->fifo.data[i], packet_size);
+ }
+ if (ret)
+ return ret;
+
+ for (i = 0; i < st->fifo.count; i += size) {
+ size = inv_icm45600_fifo_decode_packet(&st->fifo.data[i], &accel, &gyro,
+ &temp, &timestamp, &odr);
+ if (size <= 0)
+ /* No more sample in buffer */
+ break;
+ if (gyro && inv_icm45600_fifo_is_data_valid(gyro))
+ st->fifo.nb.gyro++;
+ if (accel && inv_icm45600_fifo_is_data_valid(accel))
+ st->fifo.nb.accel++;
+ st->fifo.nb.total++;
+ }
+
+ return 0;
+}
+
+int inv_icm45600_buffer_fifo_parse(struct inv_icm45600_state *st)
+{
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(st->indio_accel);
+ struct inv_sensors_timestamp *ts;
+ int ret;
+
+ if (st->fifo.nb.total == 0)
+ return 0;
+
+ /* Handle gyroscope timestamp and FIFO data parsing. */
+ if (st->fifo.nb.gyro > 0) {
+ ts = &gyro_st->ts;
+ inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_gyro,
+ st->timestamp.gyro);
+ ret = inv_icm45600_gyro_parse_fifo(st->indio_gyro);
+ if (ret)
+ return ret;
+ }
+
+ /* Handle accelerometer timestamp and FIFO data parsing. */
+ if (st->fifo.nb.accel > 0) {
+ ts = &accel_st->ts;
+ inv_sensors_timestamp_interrupt(ts, st->fifo.watermark.eff_accel,
+ st->timestamp.accel);
+ ret = inv_icm45600_accel_parse_fifo(st->indio_accel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int inv_icm45600_buffer_hwfifo_flush(struct inv_icm45600_state *st,
+ unsigned int count)
+{
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(st->indio_accel);
+ struct inv_sensors_timestamp *ts;
+ s64 gyro_ts, accel_ts;
+ int ret;
+
+ gyro_ts = iio_get_time_ns(st->indio_gyro);
+ accel_ts = iio_get_time_ns(st->indio_accel);
+
+ ret = inv_icm45600_buffer_fifo_read(st, count);
+ if (ret)
+ return ret;
+
+ if (st->fifo.nb.total == 0)
+ return 0;
+
+ if (st->fifo.nb.gyro > 0) {
+ ts = &gyro_st->ts;
+ inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro, gyro_ts);
+ ret = inv_icm45600_gyro_parse_fifo(st->indio_gyro);
+ if (ret)
+ return ret;
+ }
+
+ if (st->fifo.nb.accel > 0) {
+ ts = &accel_st->ts;
+ inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel, accel_ts);
+ ret = inv_icm45600_accel_parse_fifo(st->indio_accel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int inv_icm45600_buffer_init(struct inv_icm45600_state *st)
+{
+ int ret;
+ unsigned int val;
+
+ st->fifo.watermark.eff_gyro = 1;
+ st->fifo.watermark.eff_accel = 1;
+
+ /* Disable all FIFO EN bits. */
+ ret = regmap_write(st->map, INV_ICM45600_REG_FIFO_CONFIG3, 0);
+ if (ret)
+ return ret;
+
+ /* Disable FIFO and set depth. */
+ val = FIELD_PREP(INV_ICM45600_FIFO_CONFIG0_MODE_MASK,
+ INV_ICM45600_FIFO_CONFIG0_MODE_BYPASS) |
+ FIELD_PREP(INV_ICM45600_FIFO_CONFIG0_FIFO_DEPTH_MASK,
+ INV_ICM45600_FIFO_CONFIG0_FIFO_DEPTH_MAX);
+
+ ret = regmap_write(st->map, INV_ICM45600_REG_FIFO_CONFIG0, val);
+ if (ret)
+ return ret;
+
+ /* Enable only timestamp in fifo, disable compression. */
+ ret = regmap_write(st->map, INV_ICM45600_REG_FIFO_CONFIG4,
+ INV_ICM45600_FIFO_CONFIG4_TMST_FSYNC_EN);
+ if (ret)
+ return ret;
+
+ /* Enable FIFO continuous watermark interrupt. */
+ return regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG2,
+ INV_ICM45600_REG_FIFO_CONFIG2_WM_GT_TH);
+}
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.h b/drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.h
new file mode 100644
index 000000000000..e047871cdbe2
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_buffer.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2025 Invensense, Inc. */
+
+#ifndef INV_ICM45600_BUFFER_H_
+#define INV_ICM45600_BUFFER_H_
+
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#include <linux/iio/iio.h>
+
+struct inv_icm45600_state;
+
+#define INV_ICM45600_SENSOR_GYRO BIT(0)
+#define INV_ICM45600_SENSOR_ACCEL BIT(1)
+#define INV_ICM45600_SENSOR_TEMP BIT(2)
+
+/**
+ * struct inv_icm45600_fifo - FIFO state variables
+ * @on: reference counter for FIFO on.
+ * @en: bits field of INV_ICM45600_SENSOR_* for FIFO EN bits.
+ * @period: FIFO internal period.
+ * @watermark: watermark configuration values for accel and gyro.
+ * @watermark.gyro: requested watermark for gyro.
+ * @watermark.accel: requested watermark for accel.
+ * @watermark.eff_gyro: effective watermark for gyro.
+ * @watermark.eff_accel: effective watermark for accel.
+ * @count: number of bytes in the FIFO data buffer.
+ * @nb: gyro, accel and total samples in the FIFO data buffer.
+ * @data: FIFO data buffer aligned for DMA (8kB)
+ */
+struct inv_icm45600_fifo {
+ unsigned int on;
+ unsigned int en;
+ u32 period;
+ struct {
+ unsigned int gyro;
+ unsigned int accel;
+ unsigned int eff_gyro;
+ unsigned int eff_accel;
+ } watermark;
+ size_t count;
+ struct {
+ size_t gyro;
+ size_t accel;
+ size_t total;
+ } nb;
+ u8 *data;
+};
+
+/* FIFO data packet */
+struct inv_icm45600_fifo_sensor_data {
+ __le16 x;
+ __le16 y;
+ __le16 z;
+} __packed;
+#define INV_ICM45600_DATA_INVALID S16_MIN
+
+static inline bool
+inv_icm45600_fifo_is_data_valid(const struct inv_icm45600_fifo_sensor_data *s)
+{
+ s16 x, y, z;
+
+ x = le16_to_cpu(s->x);
+ y = le16_to_cpu(s->y);
+ z = le16_to_cpu(s->z);
+
+ return (x != INV_ICM45600_DATA_INVALID ||
+ y != INV_ICM45600_DATA_INVALID ||
+ z != INV_ICM45600_DATA_INVALID);
+}
+
+ssize_t inv_icm45600_fifo_decode_packet(const void *packet,
+ const struct inv_icm45600_fifo_sensor_data **accel,
+ const struct inv_icm45600_fifo_sensor_data **gyro,
+ const s8 **temp,
+ const __le16 **timestamp, unsigned int *odr);
+
+extern const struct iio_buffer_setup_ops inv_icm45600_buffer_ops;
+
+int inv_icm45600_buffer_init(struct inv_icm45600_state *st);
+
+void inv_icm45600_buffer_update_fifo_period(struct inv_icm45600_state *st);
+
+int inv_icm45600_buffer_set_fifo_en(struct inv_icm45600_state *st,
+ unsigned int fifo_en);
+
+int inv_icm45600_buffer_update_watermark(struct inv_icm45600_state *st);
+
+int inv_icm45600_buffer_fifo_read(struct inv_icm45600_state *st,
+ unsigned int max);
+
+int inv_icm45600_buffer_fifo_parse(struct inv_icm45600_state *st);
+
+int inv_icm45600_buffer_hwfifo_flush(struct inv_icm45600_state *st,
+ unsigned int count);
+
+#endif
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_core.c b/drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
new file mode 100644
index 000000000000..ab1cb7b9dba4
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
@@ -0,0 +1,988 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2025 Invensense, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/limits.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/time.h>
+#include <linux/types.h>
+
+#include <asm/byteorder.h>
+
+#include <linux/iio/iio.h>
+
+#include "inv_icm45600_buffer.h"
+#include "inv_icm45600.h"
+
+static int inv_icm45600_ireg_read(struct regmap *map, unsigned int reg,
+ u8 *data, size_t count)
+{
+ const struct device *dev = regmap_get_device(map);
+ struct inv_icm45600_state *st = dev_get_drvdata(dev);
+ unsigned int d;
+ size_t i;
+ int ret;
+
+ st->buffer.ireg[0] = FIELD_GET(INV_ICM45600_REG_BANK_MASK, reg);
+ st->buffer.ireg[1] = FIELD_GET(INV_ICM45600_REG_ADDR_MASK, reg);
+
+ /* Burst write address. */
+ ret = regmap_bulk_write(map, INV_ICM45600_REG_IREG_ADDR, st->buffer.ireg, 2);
+ /*
+ * Wait while the device is busy processing the address.
+ * Datasheet: 13.3 MINIMUM WAIT TIME-GAP
+ */
+ fsleep(INV_ICM45600_IREG_DELAY_US);
+ if (ret)
+ return ret;
+
+ /* Read the data. */
+ for (i = 0; i < count; i++) {
+ ret = regmap_read(map, INV_ICM45600_REG_IREG_DATA, &d);
+ /*
+ * Wait while the device is busy processing the address.
+ * Datasheet: 13.3 MINIMUM WAIT TIME-GAP
+ */
+ fsleep(INV_ICM45600_IREG_DELAY_US);
+ if (ret)
+ return ret;
+ data[i] = d;
+ }
+
+ return 0;
+}
+
+static int inv_icm45600_ireg_write(struct regmap *map, unsigned int reg,
+ const u8 *data, size_t count)
+{
+ const struct device *dev = regmap_get_device(map);
+ struct inv_icm45600_state *st = dev_get_drvdata(dev);
+ size_t i;
+ int ret;
+
+ st->buffer.ireg[0] = FIELD_GET(INV_ICM45600_REG_BANK_MASK, reg);
+ st->buffer.ireg[1] = FIELD_GET(INV_ICM45600_REG_ADDR_MASK, reg);
+ st->buffer.ireg[2] = data[0];
+
+ /* Burst write address and first byte. */
+ ret = regmap_bulk_write(map, INV_ICM45600_REG_IREG_ADDR, st->buffer.ireg, 3);
+ /*
+ * Wait while the device is busy processing the address.
+ * Datasheet: 13.3 MINIMUM WAIT TIME-GAP
+ */
+ fsleep(INV_ICM45600_IREG_DELAY_US);
+ if (ret)
+ return ret;
+
+ /* Write the remaining bytes. */
+ for (i = 1; i < count; i++) {
+ ret = regmap_write(map, INV_ICM45600_REG_IREG_DATA, data[i]);
+ /*
+ * Wait while the device is busy processing the address.
+ * Datasheet: 13.3 MINIMUM WAIT TIME-GAP
+ */
+ fsleep(INV_ICM45600_IREG_DELAY_US);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int inv_icm45600_read(void *context, const void *reg_buf, size_t reg_size,
+ void *val_buf, size_t val_size)
+{
+ unsigned int reg = be16_to_cpup(reg_buf);
+ struct regmap *map = context;
+
+ if (FIELD_GET(INV_ICM45600_REG_BANK_MASK, reg))
+ return inv_icm45600_ireg_read(map, reg, val_buf, val_size);
+
+ return regmap_bulk_read(map, FIELD_GET(INV_ICM45600_REG_ADDR_MASK, reg),
+ val_buf, val_size);
+}
+
+static int inv_icm45600_write(void *context, const void *data, size_t count)
+{
+ const u8 *d = data;
+ unsigned int reg = be16_to_cpup(data);
+ struct regmap *map = context;
+
+ if (FIELD_GET(INV_ICM45600_REG_BANK_MASK, reg))
+ return inv_icm45600_ireg_write(map, reg, d + 2, count - 2);
+
+ return regmap_bulk_write(map, FIELD_GET(INV_ICM45600_REG_ADDR_MASK, reg),
+ d + 2, count - 2);
+}
+
+static const struct regmap_bus inv_icm45600_regmap_bus = {
+ .read = inv_icm45600_read,
+ .write = inv_icm45600_write,
+};
+
+static const struct regmap_config inv_icm45600_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+};
+
+/* These are the chip initial default configurations (default FS value is based on icm45686) */
+static const struct inv_icm45600_conf inv_icm45600_default_conf = {
+ .gyro = {
+ .mode = INV_ICM45600_SENSOR_MODE_OFF,
+ .fs = INV_ICM45686_GYRO_FS_2000DPS,
+ .odr = INV_ICM45600_ODR_800HZ_LN,
+ .filter = INV_ICM45600_GYRO_LP_AVG_SEL_8X,
+ },
+ .accel = {
+ .mode = INV_ICM45600_SENSOR_MODE_OFF,
+ .fs = INV_ICM45686_ACCEL_FS_16G,
+ .odr = INV_ICM45600_ODR_800HZ_LN,
+ .filter = INV_ICM45600_ACCEL_LP_AVG_SEL_4X,
+ },
+};
+
+static const struct inv_icm45600_conf inv_icm45686_default_conf = {
+ .gyro = {
+ .mode = INV_ICM45600_SENSOR_MODE_OFF,
+ .fs = INV_ICM45686_GYRO_FS_4000DPS,
+ .odr = INV_ICM45600_ODR_800HZ_LN,
+ .filter = INV_ICM45600_GYRO_LP_AVG_SEL_8X,
+ },
+ .accel = {
+ .mode = INV_ICM45600_SENSOR_MODE_OFF,
+ .fs = INV_ICM45686_ACCEL_FS_32G,
+ .odr = INV_ICM45600_ODR_800HZ_LN,
+ .filter = INV_ICM45600_ACCEL_LP_AVG_SEL_4X,
+ },
+};
+
+const struct inv_icm45600_chip_info inv_icm45605_chip_info = {
+ .whoami = INV_ICM45600_WHOAMI_ICM45605,
+ .name = "icm45605",
+ .conf = &inv_icm45600_default_conf,
+ .accel_scales = (const int *)inv_icm45600_accel_scale,
+ .accel_scales_len = INV_ICM45600_ACCEL_FS_MAX,
+ .gyro_scales = (const int *)inv_icm45600_gyro_scale,
+ .gyro_scales_len = INV_ICM45600_GYRO_FS_MAX,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm45605_chip_info, "IIO_ICM45600");
+
+const struct inv_icm45600_chip_info inv_icm45606_chip_info = {
+ .whoami = INV_ICM45600_WHOAMI_ICM45606,
+ .name = "icm45606",
+ .conf = &inv_icm45600_default_conf,
+ .accel_scales = (const int *)inv_icm45600_accel_scale,
+ .accel_scales_len = INV_ICM45600_ACCEL_FS_MAX,
+ .gyro_scales = (const int *)inv_icm45600_gyro_scale,
+ .gyro_scales_len = INV_ICM45600_GYRO_FS_MAX,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm45606_chip_info, "IIO_ICM45600");
+
+const struct inv_icm45600_chip_info inv_icm45608_chip_info = {
+ .whoami = INV_ICM45600_WHOAMI_ICM45608,
+ .name = "icm45608",
+ .conf = &inv_icm45600_default_conf,
+ .accel_scales = (const int *)inv_icm45600_accel_scale,
+ .accel_scales_len = INV_ICM45600_ACCEL_FS_MAX,
+ .gyro_scales = (const int *)inv_icm45600_gyro_scale,
+ .gyro_scales_len = INV_ICM45600_GYRO_FS_MAX,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm45608_chip_info, "IIO_ICM45600");
+
+const struct inv_icm45600_chip_info inv_icm45634_chip_info = {
+ .whoami = INV_ICM45600_WHOAMI_ICM45634,
+ .name = "icm45634",
+ .conf = &inv_icm45600_default_conf,
+ .accel_scales = (const int *)inv_icm45600_accel_scale,
+ .accel_scales_len = INV_ICM45600_ACCEL_FS_MAX,
+ .gyro_scales = (const int *)inv_icm45600_gyro_scale,
+ .gyro_scales_len = INV_ICM45600_GYRO_FS_MAX,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm45634_chip_info, "IIO_ICM45600");
+
+const struct inv_icm45600_chip_info inv_icm45686_chip_info = {
+ .whoami = INV_ICM45600_WHOAMI_ICM45686,
+ .name = "icm45686",
+ .conf = &inv_icm45686_default_conf,
+ .accel_scales = (const int *)inv_icm45686_accel_scale,
+ .accel_scales_len = INV_ICM45686_ACCEL_FS_MAX,
+ .gyro_scales = (const int *)inv_icm45686_gyro_scale,
+ .gyro_scales_len = INV_ICM45686_GYRO_FS_MAX,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm45686_chip_info, "IIO_ICM45600");
+
+const struct inv_icm45600_chip_info inv_icm45687_chip_info = {
+ .whoami = INV_ICM45600_WHOAMI_ICM45687,
+ .name = "icm45687",
+ .conf = &inv_icm45686_default_conf,
+ .accel_scales = (const int *)inv_icm45686_accel_scale,
+ .accel_scales_len = INV_ICM45686_ACCEL_FS_MAX,
+ .gyro_scales = (const int *)inv_icm45686_gyro_scale,
+ .gyro_scales_len = INV_ICM45686_GYRO_FS_MAX,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm45687_chip_info, "IIO_ICM45600");
+
+const struct inv_icm45600_chip_info inv_icm45688p_chip_info = {
+ .whoami = INV_ICM45600_WHOAMI_ICM45688P,
+ .name = "icm45688p",
+ .conf = &inv_icm45686_default_conf,
+ .accel_scales = (const int *)inv_icm45686_accel_scale,
+ .accel_scales_len = INV_ICM45686_ACCEL_FS_MAX,
+ .gyro_scales = (const int *)inv_icm45686_gyro_scale,
+ .gyro_scales_len = INV_ICM45686_GYRO_FS_MAX,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm45688p_chip_info, "IIO_ICM45600");
+
+const struct inv_icm45600_chip_info inv_icm45689_chip_info = {
+ .whoami = INV_ICM45600_WHOAMI_ICM45689,
+ .name = "icm45689",
+ .conf = &inv_icm45686_default_conf,
+ .accel_scales = (const int *)inv_icm45686_accel_scale,
+ .accel_scales_len = INV_ICM45686_ACCEL_FS_MAX,
+ .gyro_scales = (const int *)inv_icm45686_gyro_scale,
+ .gyro_scales_len = INV_ICM45686_GYRO_FS_MAX,
+};
+EXPORT_SYMBOL_NS_GPL(inv_icm45689_chip_info, "IIO_ICM45600");
+
+const struct iio_mount_matrix *
+inv_icm45600_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ const struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+
+ return &st->orientation;
+}
+
+u32 inv_icm45600_odr_to_period(enum inv_icm45600_odr odr)
+{
+ static const u32 odr_periods[INV_ICM45600_ODR_MAX] = {
+ /* 3 first values are reserved, left to 0 */
+ [INV_ICM45600_ODR_6400HZ_LN] = 156250,
+ [INV_ICM45600_ODR_3200HZ_LN] = 312500,
+ [INV_ICM45600_ODR_1600HZ_LN] = 625000,
+ [INV_ICM45600_ODR_800HZ_LN] = 1250000,
+ [INV_ICM45600_ODR_400HZ] = 2500000,
+ [INV_ICM45600_ODR_200HZ] = 5000000,
+ [INV_ICM45600_ODR_100HZ] = 10000000,
+ [INV_ICM45600_ODR_50HZ] = 20000000,
+ [INV_ICM45600_ODR_25HZ] = 40000000,
+ [INV_ICM45600_ODR_12_5HZ] = 80000000,
+ [INV_ICM45600_ODR_6_25HZ_LP] = 160000000,
+ [INV_ICM45600_ODR_3_125HZ_LP] = 320000000,
+ [INV_ICM45600_ODR_1_5625HZ_LP] = 640000000,
+ };
+
+ return odr_periods[odr];
+}
+
+static int inv_icm45600_set_pwr_mgmt0(struct inv_icm45600_state *st,
+ enum inv_icm45600_sensor_mode gyro,
+ enum inv_icm45600_sensor_mode accel,
+ unsigned int *sleep_ms)
+{
+ enum inv_icm45600_sensor_mode oldgyro = st->conf.gyro.mode;
+ enum inv_icm45600_sensor_mode oldaccel = st->conf.accel.mode;
+ unsigned int sleepval;
+ unsigned int val;
+ int ret;
+
+ /* if nothing changed, exit */
+ if (gyro == oldgyro && accel == oldaccel)
+ return 0;
+
+ val = FIELD_PREP(INV_ICM45600_PWR_MGMT0_GYRO_MODE_MASK, gyro) |
+ FIELD_PREP(INV_ICM45600_PWR_MGMT0_ACCEL_MODE_MASK, accel);
+ ret = regmap_write(st->map, INV_ICM45600_REG_PWR_MGMT0, val);
+ if (ret)
+ return ret;
+
+ st->conf.gyro.mode = gyro;
+ st->conf.accel.mode = accel;
+
+ /* Compute the required wait time for sensors to stabilize. */
+ sleepval = 0;
+ if (accel != oldaccel && oldaccel == INV_ICM45600_SENSOR_MODE_OFF)
+ sleepval = max(sleepval, INV_ICM45600_ACCEL_STARTUP_TIME_MS);
+
+ if (gyro != oldgyro) {
+ if (oldgyro == INV_ICM45600_SENSOR_MODE_OFF)
+ sleepval = max(sleepval, INV_ICM45600_GYRO_STARTUP_TIME_MS);
+ else if (gyro == INV_ICM45600_SENSOR_MODE_OFF)
+ sleepval = max(sleepval, INV_ICM45600_GYRO_STOP_TIME_MS);
+ }
+
+ /* Deferred sleep value if sleep pointer is provided or direct sleep */
+ if (sleep_ms)
+ *sleep_ms = sleepval;
+ else if (sleepval)
+ msleep(sleepval);
+
+ return 0;
+}
+
+static void inv_icm45600_set_default_conf(struct inv_icm45600_sensor_conf *conf,
+ struct inv_icm45600_sensor_conf *oldconf)
+{
+ /* Sanitize missing values with current values. */
+ if (conf->mode == U8_MAX)
+ conf->mode = oldconf->mode;
+ if (conf->fs == U8_MAX)
+ conf->fs = oldconf->fs;
+ if (conf->odr == U8_MAX)
+ conf->odr = oldconf->odr;
+ if (conf->filter == U8_MAX)
+ conf->filter = oldconf->filter;
+}
+
+int inv_icm45600_set_accel_conf(struct inv_icm45600_state *st,
+ struct inv_icm45600_sensor_conf *conf,
+ unsigned int *sleep_ms)
+{
+ struct inv_icm45600_sensor_conf *oldconf = &st->conf.accel;
+ unsigned int val;
+ int ret;
+
+ inv_icm45600_set_default_conf(conf, oldconf);
+
+ /* Force the power mode against the ODR when sensor is on. */
+ if (conf->mode > INV_ICM45600_SENSOR_MODE_STANDBY) {
+ if (conf->odr <= INV_ICM45600_ODR_800HZ_LN) {
+ conf->mode = INV_ICM45600_SENSOR_MODE_LOW_NOISE;
+ } else {
+ conf->mode = INV_ICM45600_SENSOR_MODE_LOW_POWER;
+ /* sanitize averaging value depending on ODR for low-power mode */
+ /* maximum 1x @400Hz */
+ if (conf->odr == INV_ICM45600_ODR_400HZ)
+ conf->filter = INV_ICM45600_ACCEL_LP_AVG_SEL_1X;
+ else
+ conf->filter = INV_ICM45600_ACCEL_LP_AVG_SEL_4X;
+ }
+ }
+
+ /* Set accel fullscale & odr. */
+ if (conf->fs != oldconf->fs || conf->odr != oldconf->odr) {
+ val = FIELD_PREP(INV_ICM45600_ACCEL_CONFIG0_FS_MASK, conf->fs) |
+ FIELD_PREP(INV_ICM45600_ACCEL_CONFIG0_ODR_MASK, conf->odr);
+ ret = regmap_write(st->map, INV_ICM45600_REG_ACCEL_CONFIG0, val);
+ if (ret)
+ return ret;
+ oldconf->fs = conf->fs;
+ oldconf->odr = conf->odr;
+ }
+
+ /* Set accel low-power average filter. */
+ if (conf->filter != oldconf->filter) {
+ ret = regmap_write(st->map, INV_ICM45600_IPREG_SYS2_REG_129,
+ conf->filter);
+ if (ret)
+ return ret;
+ oldconf->filter = conf->filter;
+ }
+
+ /* Update the sensor accel mode. */
+ return inv_icm45600_set_pwr_mgmt0(st, st->conf.gyro.mode, conf->mode,
+ sleep_ms);
+}
+
+int inv_icm45600_set_gyro_conf(struct inv_icm45600_state *st,
+ struct inv_icm45600_sensor_conf *conf,
+ unsigned int *sleep_ms)
+{
+ struct inv_icm45600_sensor_conf *oldconf = &st->conf.gyro;
+ unsigned int val;
+ int ret;
+
+ inv_icm45600_set_default_conf(conf, oldconf);
+
+ /* Force the power mode against ODR when sensor is on. */
+ if (conf->mode > INV_ICM45600_SENSOR_MODE_STANDBY) {
+ if (conf->odr >= INV_ICM45600_ODR_6_25HZ_LP) {
+ conf->mode = INV_ICM45600_SENSOR_MODE_LOW_POWER;
+ conf->filter = INV_ICM45600_GYRO_LP_AVG_SEL_8X;
+ } else {
+ conf->mode = INV_ICM45600_SENSOR_MODE_LOW_NOISE;
+ }
+ }
+
+ /* Set gyro fullscale & odr. */
+ if (conf->fs != oldconf->fs || conf->odr != oldconf->odr) {
+ val = FIELD_PREP(INV_ICM45600_GYRO_CONFIG0_FS_MASK, conf->fs) |
+ FIELD_PREP(INV_ICM45600_GYRO_CONFIG0_ODR_MASK, conf->odr);
+ ret = regmap_write(st->map, INV_ICM45600_REG_GYRO_CONFIG0, val);
+ if (ret)
+ return ret;
+ oldconf->fs = conf->fs;
+ oldconf->odr = conf->odr;
+ }
+
+ /* Set gyro low-power average filter. */
+ if (conf->filter != oldconf->filter) {
+ val = FIELD_PREP(INV_ICM45600_IPREG_SYS1_170_GYRO_LP_AVG_MASK, conf->filter);
+ ret = regmap_update_bits(st->map, INV_ICM45600_IPREG_SYS1_REG_170,
+ INV_ICM45600_IPREG_SYS1_170_GYRO_LP_AVG_MASK, val);
+ if (ret)
+ return ret;
+ oldconf->filter = conf->filter;
+ }
+
+ /* Update the sensor gyro mode. */
+ return inv_icm45600_set_pwr_mgmt0(st, conf->mode, st->conf.accel.mode,
+ sleep_ms);
+}
+
+int inv_icm45600_debugfs_reg(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ if (readval)
+ return regmap_read(st->map, reg, readval);
+ else
+ return regmap_write(st->map, reg, writeval);
+}
+
+static int inv_icm45600_set_conf(struct inv_icm45600_state *st,
+ const struct inv_icm45600_conf *conf)
+{
+ unsigned int val;
+ int ret;
+
+ val = FIELD_PREP(INV_ICM45600_PWR_MGMT0_GYRO_MODE_MASK, conf->gyro.mode) |
+ FIELD_PREP(INV_ICM45600_PWR_MGMT0_ACCEL_MODE_MASK, conf->accel.mode);
+ ret = regmap_write(st->map, INV_ICM45600_REG_PWR_MGMT0, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(INV_ICM45600_GYRO_CONFIG0_FS_MASK, conf->gyro.fs) |
+ FIELD_PREP(INV_ICM45600_GYRO_CONFIG0_ODR_MASK, conf->gyro.odr);
+ ret = regmap_write(st->map, INV_ICM45600_REG_GYRO_CONFIG0, val);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(INV_ICM45600_ACCEL_CONFIG0_FS_MASK, conf->accel.fs) |
+ FIELD_PREP(INV_ICM45600_ACCEL_CONFIG0_ODR_MASK, conf->accel.odr);
+ ret = regmap_write(st->map, INV_ICM45600_REG_ACCEL_CONFIG0, val);
+ if (ret)
+ return ret;
+
+ /* Save configuration. */
+ st->conf = *conf;
+
+ return 0;
+}
+
+/**
+ * inv_icm45600_setup() - check and setup chip
+ * @st: driver internal state
+ * @chip_info: detected chip description
+ * @reset: define whether a reset is required or not
+ * @bus_setup: callback for setting up bus specific registers
+ *
+ * Returns: 0 on success, a negative error code otherwise.
+ */
+static int inv_icm45600_setup(struct inv_icm45600_state *st,
+ const struct inv_icm45600_chip_info *chip_info,
+ bool reset, inv_icm45600_bus_setup bus_setup)
+{
+ const struct device *dev = regmap_get_device(st->map);
+ unsigned int val;
+ int ret;
+
+ /* Set chip bus configuration if specified. */
+ if (bus_setup) {
+ ret = bus_setup(st);
+ if (ret)
+ return ret;
+ }
+
+ /* Check chip self-identification value. */
+ ret = regmap_read(st->map, INV_ICM45600_REG_WHOAMI, &val);
+ if (ret)
+ return ret;
+ if (val != chip_info->whoami) {
+ /*
+ * SPI interface has no ack mechanism.
+ * 0xFF or 0x00 whoami means no response from the device.
+ */
+ if (val == U8_MAX || val == 0)
+ return dev_err_probe(dev, -ENODEV,
+ "Invalid whoami %#02x expected %#02x (%s)\n",
+ val, chip_info->whoami, chip_info->name);
+
+ dev_warn(dev, "Unexpected whoami %#02x expected %#02x (%s)\n",
+ val, chip_info->whoami, chip_info->name);
+ }
+
+ st->chip_info = chip_info;
+
+ if (reset) {
+ /* Reset previous state. */
+ ret = regmap_write(st->map, INV_ICM45600_REG_MISC2,
+ INV_ICM45600_MISC2_SOFT_RESET);
+ if (ret)
+ return ret;
+ /*
+ * IMU reset time.
+ * Datasheet: 16.84 REG_MISC2
+ */
+ fsleep(USEC_PER_MSEC);
+
+ if (bus_setup) {
+ ret = bus_setup(st);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(st->map, INV_ICM45600_REG_INT_STATUS, &val);
+ if (ret)
+ return ret;
+ if (!(val & INV_ICM45600_INT_STATUS_RESET_DONE)) {
+ dev_err(dev, "reset error, reset done bit not set\n");
+ return -ENODEV;
+ }
+ }
+
+ return inv_icm45600_set_conf(st, chip_info->conf);
+}
+
+static irqreturn_t inv_icm45600_irq_timestamp(int irq, void *_data)
+{
+ struct inv_icm45600_state *st = _data;
+
+ st->timestamp.gyro = iio_get_time_ns(st->indio_gyro);
+ st->timestamp.accel = iio_get_time_ns(st->indio_accel);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t inv_icm45600_irq_handler(int irq, void *_data)
+{
+ struct inv_icm45600_state *st = _data;
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int mask, status;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->map, INV_ICM45600_REG_INT_STATUS, &status);
+ if (ret)
+ return IRQ_HANDLED;
+
+ /* Read the FIFO data. */
+ mask = INV_ICM45600_INT_STATUS_FIFO_THS | INV_ICM45600_INT_STATUS_FIFO_FULL;
+ if (status & mask) {
+ ret = inv_icm45600_buffer_fifo_read(st, 0);
+ if (ret) {
+ dev_err(dev, "FIFO read error %d\n", ret);
+ return IRQ_HANDLED;
+ }
+ ret = inv_icm45600_buffer_fifo_parse(st);
+ if (ret)
+ dev_err(dev, "FIFO parsing error %d\n", ret);
+ }
+
+ /* FIFO full warning. */
+ if (status & INV_ICM45600_INT_STATUS_FIFO_FULL)
+ dev_warn(dev, "FIFO full possible data lost!\n");
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * inv_icm45600_irq_init() - initialize int pin and interrupt handler
+ * @st: driver internal state
+ * @irq: irq number
+ * @irq_type: irq trigger type
+ * @open_drain: true if irq is open drain, false for push-pull
+ *
+ * Returns: 0 on success, a negative error code otherwise.
+ */
+static int inv_icm45600_irq_init(struct inv_icm45600_state *st, int irq,
+ int irq_type, bool open_drain)
+{
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int val;
+ int ret;
+
+ /* Configure INT1 interrupt: default is active low on edge. */
+ switch (irq_type) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ val = INV_ICM45600_INT1_CONFIG2_ACTIVE_HIGH;
+ break;
+ default:
+ val = INV_ICM45600_INT1_CONFIG2_ACTIVE_LOW;
+ break;
+ }
+
+ switch (irq_type) {
+ case IRQF_TRIGGER_LOW:
+ case IRQF_TRIGGER_HIGH:
+ val |= INV_ICM45600_INT1_CONFIG2_LATCHED;
+ break;
+ default:
+ break;
+ }
+
+ if (!open_drain)
+ val |= INV_ICM45600_INT1_CONFIG2_PUSH_PULL;
+
+ ret = regmap_write(st->map, INV_ICM45600_REG_INT1_CONFIG2, val);
+ if (ret)
+ return ret;
+
+ return devm_request_threaded_irq(dev, irq, inv_icm45600_irq_timestamp,
+ inv_icm45600_irq_handler, irq_type | IRQF_ONESHOT,
+ "inv_icm45600", st);
+}
+
+static int inv_icm45600_timestamp_setup(struct inv_icm45600_state *st)
+{
+ /* Enable timestamps. */
+ return regmap_set_bits(st->map, INV_ICM45600_REG_SMC_CONTROL_0,
+ INV_ICM45600_SMC_CONTROL_0_TMST_EN);
+}
+
+static int inv_icm45600_enable_regulator_vddio(struct inv_icm45600_state *st)
+{
+ int ret;
+
+ ret = regulator_enable(st->vddio_supply);
+ if (ret)
+ return ret;
+
+ /*
+ * Wait a little for supply ramp.
+ * Duration is empirically defined.
+ */
+ fsleep(3 * USEC_PER_MSEC);
+
+ return 0;
+}
+
+static void inv_icm45600_disable_vddio_reg(void *_data)
+{
+ struct inv_icm45600_state *st = _data;
+ struct device *dev = regmap_get_device(st->map);
+
+ if (pm_runtime_status_suspended(dev))
+ return;
+
+ regulator_disable(st->vddio_supply);
+}
+
+int inv_icm45600_core_probe(struct regmap *regmap, const struct inv_icm45600_chip_info *chip_info,
+ bool reset, inv_icm45600_bus_setup bus_setup)
+{
+ struct device *dev = regmap_get_device(regmap);
+ struct inv_icm45600_state *st;
+ struct regmap *regmap_custom;
+ struct fwnode_handle *fwnode;
+ int irq, irq_type;
+ bool open_drain;
+ int ret;
+
+ /* Get INT1 only supported interrupt. */
+ fwnode = dev_fwnode(dev);
+ irq = fwnode_irq_get_byname(fwnode, "int1");
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Missing int1 interrupt\n");
+
+ irq_type = irq_get_trigger_type(irq);
+
+ open_drain = device_property_read_bool(dev, "drive-open-drain");
+
+ regmap_custom = devm_regmap_init(dev, &inv_icm45600_regmap_bus, regmap,
+ &inv_icm45600_regmap_config);
+ if (IS_ERR(regmap_custom))
+ return dev_err_probe(dev, PTR_ERR(regmap_custom), "Failed to register regmap\n");
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, st);
+
+ st->fifo.data = devm_kzalloc(dev, 8192, GFP_KERNEL);
+ if (!st->fifo.data)
+ return -ENOMEM;
+
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
+ st->map = regmap_custom;
+
+ ret = iio_read_mount_matrix(dev, &st->orientation);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to retrieve mounting matrix\n");
+
+ st->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(st->vddio_supply))
+ return PTR_ERR(st->vddio_supply);
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get vdd regulator\n");
+
+ /*
+ * Supply ramp time + Start-up time.
+ * Datasheet: 3.3.2 A.C. Electrical Characteristics
+ */
+ fsleep(5 * USEC_PER_MSEC);
+
+ ret = inv_icm45600_enable_regulator_vddio(st);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, inv_icm45600_disable_vddio_reg, st);
+ if (ret)
+ return ret;
+
+ ret = inv_icm45600_setup(st, chip_info, reset, bus_setup);
+ if (ret)
+ return ret;
+
+ ret = inv_icm45600_timestamp_setup(st);
+ if (ret)
+ return ret;
+
+ ret = inv_icm45600_buffer_init(st);
+ if (ret)
+ return ret;
+
+ st->indio_gyro = inv_icm45600_gyro_init(st);
+ if (IS_ERR(st->indio_gyro))
+ return PTR_ERR(st->indio_gyro);
+
+ st->indio_accel = inv_icm45600_accel_init(st);
+ if (IS_ERR(st->indio_accel))
+ return PTR_ERR(st->indio_accel);
+
+ ret = inv_icm45600_irq_init(st, irq, irq_type, open_drain);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_set_active_enabled(dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_autosuspend_delay(dev, 2 * USEC_PER_MSEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(inv_icm45600_core_probe, "IIO_ICM45600");
+
+/*
+ * Suspend saves sensors state and turns everything off.
+ */
+static int inv_icm45600_suspend(struct device *dev)
+{
+ struct inv_icm45600_state *st = dev_get_drvdata(dev);
+ int ret;
+
+ scoped_guard(mutex, &st->lock) {
+ /* Disable FIFO data streaming. */
+ if (st->fifo.on) {
+ unsigned int val;
+
+ /* Clear FIFO_CONFIG3_IF_EN before changing the FIFO configuration */
+ ret = regmap_clear_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3,
+ INV_ICM45600_FIFO_CONFIG3_IF_EN);
+ if (ret)
+ return ret;
+ val = FIELD_PREP(INV_ICM45600_FIFO_CONFIG0_MODE_MASK,
+ INV_ICM45600_FIFO_CONFIG0_MODE_BYPASS);
+ ret = regmap_update_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG0,
+ INV_ICM45600_FIFO_CONFIG0_MODE_MASK, val);
+ if (ret)
+ return ret;
+ }
+
+ /* Save sensors states */
+ st->suspended.gyro = st->conf.gyro.mode;
+ st->suspended.accel = st->conf.accel.mode;
+ }
+
+ return pm_runtime_force_suspend(dev);
+}
+
+/*
+ * System resume gets the system back on and restores the sensors state.
+ * Manually put runtime power management in system active state.
+ */
+static int inv_icm45600_resume(struct device *dev)
+{
+ struct inv_icm45600_state *st = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock) {
+ /* Restore sensors state. */
+ ret = inv_icm45600_set_pwr_mgmt0(st, st->suspended.gyro,
+ st->suspended.accel, NULL);
+ if (ret)
+ return ret;
+
+ /* Restore FIFO data streaming. */
+ if (st->fifo.on) {
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(st->indio_gyro);
+ struct inv_icm45600_sensor_state *accel_st = iio_priv(st->indio_accel);
+ unsigned int val;
+
+ inv_sensors_timestamp_reset(&gyro_st->ts);
+ inv_sensors_timestamp_reset(&accel_st->ts);
+ val = FIELD_PREP(INV_ICM45600_FIFO_CONFIG0_MODE_MASK,
+ INV_ICM45600_FIFO_CONFIG0_MODE_STREAM);
+ ret = regmap_update_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG0,
+ INV_ICM45600_FIFO_CONFIG0_MODE_MASK, val);
+ if (ret)
+ return ret;
+ /* FIFO_CONFIG3_IF_EN must only be set at end of FIFO the configuration */
+ ret = regmap_set_bits(st->map, INV_ICM45600_REG_FIFO_CONFIG3,
+ INV_ICM45600_FIFO_CONFIG3_IF_EN);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+/* Runtime suspend will turn off sensors that are enabled by iio devices. */
+static int inv_icm45600_runtime_suspend(struct device *dev)
+{
+ struct inv_icm45600_state *st = dev_get_drvdata(dev);
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ /* disable all sensors */
+ ret = inv_icm45600_set_pwr_mgmt0(st, INV_ICM45600_SENSOR_MODE_OFF,
+ INV_ICM45600_SENSOR_MODE_OFF, NULL);
+ if (ret)
+ return ret;
+
+ regulator_disable(st->vddio_supply);
+
+ return 0;
+}
+
+/* Sensors are enabled by iio devices, no need to turn them back on here. */
+static int inv_icm45600_runtime_resume(struct device *dev)
+{
+ struct inv_icm45600_state *st = dev_get_drvdata(dev);
+
+ guard(mutex)(&st->lock);
+
+ return inv_icm45600_enable_regulator_vddio(st);
+}
+
+static int _inv_icm45600_temp_read(struct inv_icm45600_state *st, s16 *temp)
+{
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ int ret;
+
+ /* Make sure a sensor is on. */
+ if (st->conf.gyro.mode == INV_ICM45600_SENSOR_MODE_OFF &&
+ st->conf.accel.mode == INV_ICM45600_SENSOR_MODE_OFF) {
+ conf.mode = INV_ICM45600_SENSOR_MODE_LOW_POWER;
+ ret = inv_icm45600_set_accel_conf(st, &conf, NULL);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_bulk_read(st->map, INV_ICM45600_REG_TEMP_DATA,
+ &st->buffer.u16, sizeof(st->buffer.u16));
+ if (ret)
+ return ret;
+
+ *temp = (s16)le16_to_cpup(&st->buffer.u16);
+ if (*temp == INV_ICM45600_DATA_INVALID)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int inv_icm45600_temp_read(struct inv_icm45600_state *st, s16 *temp)
+{
+ struct device *dev = regmap_get_device(st->map);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = _inv_icm45600_temp_read(st, temp);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+int inv_icm45600_temp_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ s16 temp;
+ int ret;
+
+ if (chan->type != IIO_TEMP)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = inv_icm45600_temp_read(st, &temp);
+ if (ret)
+ return ret;
+ *val = temp;
+ return IIO_VAL_INT;
+ /*
+ * T°C = (temp / 128) + 25
+ * Tm°C = 1000 * ((temp * 100 / 12800) + 25)
+ * scale: 100000 / 13248 = 7.8125
+ * offset: 25000
+ */
+ case IIO_CHAN_INFO_SCALE:
+ *val = 7;
+ *val2 = 812500;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = 25000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+EXPORT_NS_GPL_DEV_PM_OPS(inv_icm45600_pm_ops, IIO_ICM45600) = {
+ SYSTEM_SLEEP_PM_OPS(inv_icm45600_suspend, inv_icm45600_resume)
+ RUNTIME_PM_OPS(inv_icm45600_runtime_suspend,
+ inv_icm45600_runtime_resume, NULL)
+};
+
+MODULE_AUTHOR("InvenSense, Inc.");
+MODULE_DESCRIPTION("InvenSense ICM-456xx device driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_INV_SENSORS_TIMESTAMP");
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c b/drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
new file mode 100644
index 000000000000..1e85fd0e4ea9
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_gyro.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2025 Invensense, Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/math64.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/common/inv_sensors_timestamp.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/kfifo_buf.h>
+
+#include "inv_icm45600_buffer.h"
+#include "inv_icm45600.h"
+
+enum inv_icm45600_gyro_scan {
+ INV_ICM45600_GYRO_SCAN_X,
+ INV_ICM45600_GYRO_SCAN_Y,
+ INV_ICM45600_GYRO_SCAN_Z,
+ INV_ICM45600_GYRO_SCAN_TEMP,
+ INV_ICM45600_GYRO_SCAN_TIMESTAMP,
+};
+
+static const struct iio_chan_spec_ext_info inv_icm45600_gyro_ext_infos[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, inv_icm45600_get_mount_matrix),
+ { }
+};
+
+#define INV_ICM45600_GYRO_CHAN(_modifier, _index, _ext_info) \
+ { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = _modifier, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+ .ext_info = _ext_info, \
+ }
+
+static const struct iio_chan_spec inv_icm45600_gyro_channels[] = {
+ INV_ICM45600_GYRO_CHAN(IIO_MOD_X, INV_ICM45600_GYRO_SCAN_X,
+ inv_icm45600_gyro_ext_infos),
+ INV_ICM45600_GYRO_CHAN(IIO_MOD_Y, INV_ICM45600_GYRO_SCAN_Y,
+ inv_icm45600_gyro_ext_infos),
+ INV_ICM45600_GYRO_CHAN(IIO_MOD_Z, INV_ICM45600_GYRO_SCAN_Z,
+ inv_icm45600_gyro_ext_infos),
+ INV_ICM45600_TEMP_CHAN(INV_ICM45600_GYRO_SCAN_TEMP),
+ IIO_CHAN_SOFT_TIMESTAMP(INV_ICM45600_GYRO_SCAN_TIMESTAMP),
+};
+
+/*
+ * IIO buffer data: size must be a power of 2 and timestamp aligned
+ * 16 bytes: 6 bytes angular velocity, 2 bytes temperature, 8 bytes timestamp
+ */
+struct inv_icm45600_gyro_buffer {
+ struct inv_icm45600_fifo_sensor_data gyro;
+ s16 temp;
+ aligned_s64 timestamp;
+};
+
+static const unsigned long inv_icm45600_gyro_scan_masks[] = {
+ /* 3-axis gyro + temperature */
+ BIT(INV_ICM45600_GYRO_SCAN_X) |
+ BIT(INV_ICM45600_GYRO_SCAN_Y) |
+ BIT(INV_ICM45600_GYRO_SCAN_Z) |
+ BIT(INV_ICM45600_GYRO_SCAN_TEMP),
+ 0
+};
+
+/* enable gyroscope sensor and FIFO write */
+static int inv_icm45600_gyro_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(indio_dev);
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ unsigned int fifo_en = 0;
+ unsigned int sleep = 0;
+ int ret;
+
+ scoped_guard(mutex, &st->lock) {
+ if (*scan_mask & BIT(INV_ICM45600_GYRO_SCAN_TEMP))
+ fifo_en |= INV_ICM45600_SENSOR_TEMP;
+
+ if (*scan_mask & (BIT(INV_ICM45600_GYRO_SCAN_X) |
+ BIT(INV_ICM45600_GYRO_SCAN_Y) |
+ BIT(INV_ICM45600_GYRO_SCAN_Z))) {
+ /* enable gyro sensor */
+ conf.mode = gyro_st->power_mode;
+ ret = inv_icm45600_set_gyro_conf(st, &conf, &sleep);
+ if (ret)
+ return ret;
+ fifo_en |= INV_ICM45600_SENSOR_GYRO;
+ }
+ ret = inv_icm45600_buffer_set_fifo_en(st, fifo_en | st->fifo.en);
+ }
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+static int _inv_icm45600_gyro_read_sensor(struct inv_icm45600_state *st,
+ struct inv_icm45600_sensor_state *gyro_st,
+ unsigned int reg, int *val)
+{
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ int ret;
+
+ /* enable gyro sensor */
+ conf.mode = gyro_st->power_mode;
+ ret = inv_icm45600_set_gyro_conf(st, &conf, NULL);
+ if (ret)
+ return ret;
+
+ /* read gyro register data */
+ ret = regmap_bulk_read(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpup(&st->buffer.u16), 15);
+ if (*val == INV_ICM45600_DATA_INVALID)
+ return -ENODATA;
+
+ return 0;
+}
+
+static int inv_icm45600_gyro_read_sensor(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int reg;
+ int ret;
+
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ reg = INV_ICM45600_REG_GYRO_DATA_X;
+ break;
+ case IIO_MOD_Y:
+ reg = INV_ICM45600_REG_GYRO_DATA_Y;
+ break;
+ case IIO_MOD_Z:
+ reg = INV_ICM45600_REG_GYRO_DATA_Z;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = _inv_icm45600_gyro_read_sensor(st, gyro_st, reg, val);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+/* IIO format int + nano */
+const int inv_icm45600_gyro_scale[][2] = {
+ /* +/- 2000dps => 0.001065264 rad/s */
+ [INV_ICM45600_GYRO_FS_2000DPS] = { 0, 1065264 },
+ /* +/- 1000dps => 0.000532632 rad/s */
+ [INV_ICM45600_GYRO_FS_1000DPS] = { 0, 532632 },
+ /* +/- 500dps => 0.000266316 rad/s */
+ [INV_ICM45600_GYRO_FS_500DPS] = { 0, 266316 },
+ /* +/- 250dps => 0.000133158 rad/s */
+ [INV_ICM45600_GYRO_FS_250DPS] = { 0, 133158 },
+ /* +/- 125dps => 0.000066579 rad/s */
+ [INV_ICM45600_GYRO_FS_125DPS] = { 0, 66579 },
+ /* +/- 62.5dps => 0.000033290 rad/s */
+ [INV_ICM45600_GYRO_FS_62_5DPS] = { 0, 33290 },
+ /* +/- 31.25dps => 0.000016645 rad/s */
+ [INV_ICM45600_GYRO_FS_31_25DPS] = { 0, 16645 },
+ /* +/- 15.625dps => 0.000008322 rad/s */
+ [INV_ICM45600_GYRO_FS_15_625DPS] = { 0, 8322 },
+};
+
+/* IIO format int + nano */
+const int inv_icm45686_gyro_scale[][2] = {
+ /* +/- 4000dps => 0.002130529 rad/s */
+ [INV_ICM45686_GYRO_FS_4000DPS] = { 0, 2130529 },
+ /* +/- 2000dps => 0.001065264 rad/s */
+ [INV_ICM45686_GYRO_FS_2000DPS] = { 0, 1065264 },
+ /* +/- 1000dps => 0.000532632 rad/s */
+ [INV_ICM45686_GYRO_FS_1000DPS] = { 0, 532632 },
+ /* +/- 500dps => 0.000266316 rad/s */
+ [INV_ICM45686_GYRO_FS_500DPS] = { 0, 266316 },
+ /* +/- 250dps => 0.000133158 rad/s */
+ [INV_ICM45686_GYRO_FS_250DPS] = { 0, 133158 },
+ /* +/- 125dps => 0.000066579 rad/s */
+ [INV_ICM45686_GYRO_FS_125DPS] = { 0, 66579 },
+ /* +/- 62.5dps => 0.000033290 rad/s */
+ [INV_ICM45686_GYRO_FS_62_5DPS] = { 0, 33290 },
+ /* +/- 31.25dps => 0.000016645 rad/s */
+ [INV_ICM45686_GYRO_FS_31_25DPS] = { 0, 16645 },
+ /* +/- 15.625dps => 0.000008322 rad/s */
+ [INV_ICM45686_GYRO_FS_15_625DPS] = { 0, 8322 },
+};
+
+static int inv_icm45600_gyro_read_scale(struct iio_dev *indio_dev,
+ int *val, int *val2)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(indio_dev);
+ unsigned int idx;
+
+ idx = st->conf.gyro.fs;
+
+ /* Full scale register starts at 1 for not High FSR parts */
+ if (gyro_st->scales == (const int *)&inv_icm45600_gyro_scale)
+ idx--;
+
+ *val = gyro_st->scales[2 * idx];
+ *val2 = gyro_st->scales[2 * idx + 1];
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int inv_icm45600_gyro_write_scale(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int idx;
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ int ret;
+
+ for (idx = 0; idx < gyro_st->scales_len; idx += 2) {
+ if (val == gyro_st->scales[idx] &&
+ val2 == gyro_st->scales[idx + 1])
+ break;
+ }
+ if (idx == gyro_st->scales_len)
+ return -EINVAL;
+
+ conf.fs = idx / 2;
+
+ /* Full scale register starts at 1 for not High FSR parts */
+ if (gyro_st->scales == (const int *)&inv_icm45600_gyro_scale)
+ conf.fs++;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = inv_icm45600_set_gyro_conf(st, &conf, NULL);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+/* IIO format int + micro */
+static const int inv_icm45600_gyro_odr[] = {
+ 1, 562500, /* 1.5625Hz */
+ 3, 125000, /* 3.125Hz */
+ 6, 250000, /* 6.25Hz */
+ 12, 500000, /* 12.5Hz */
+ 25, 0, /* 25Hz */
+ 50, 0, /* 50Hz */
+ 100, 0, /* 100Hz */
+ 200, 0, /* 200Hz */
+ 400, 0, /* 400Hz */
+ 800, 0, /* 800Hz */
+ 1600, 0, /* 1.6kHz */
+ 3200, 0, /* 3.2kHz */
+ 6400, 0, /* 6.4kHz */
+};
+
+static const int inv_icm45600_gyro_odr_conv[] = {
+ INV_ICM45600_ODR_1_5625HZ_LP,
+ INV_ICM45600_ODR_3_125HZ_LP,
+ INV_ICM45600_ODR_6_25HZ_LP,
+ INV_ICM45600_ODR_12_5HZ,
+ INV_ICM45600_ODR_25HZ,
+ INV_ICM45600_ODR_50HZ,
+ INV_ICM45600_ODR_100HZ,
+ INV_ICM45600_ODR_200HZ,
+ INV_ICM45600_ODR_400HZ,
+ INV_ICM45600_ODR_800HZ_LN,
+ INV_ICM45600_ODR_1600HZ_LN,
+ INV_ICM45600_ODR_3200HZ_LN,
+ INV_ICM45600_ODR_6400HZ_LN,
+};
+
+static int inv_icm45600_gyro_read_odr(struct inv_icm45600_state *st,
+ int *val, int *val2)
+{
+ unsigned int odr;
+ unsigned int i;
+
+ odr = st->conf.gyro.odr;
+
+ for (i = 0; i < ARRAY_SIZE(inv_icm45600_gyro_odr_conv); ++i) {
+ if (inv_icm45600_gyro_odr_conv[i] == odr)
+ break;
+ }
+ if (i >= ARRAY_SIZE(inv_icm45600_gyro_odr_conv))
+ return -EINVAL;
+
+ *val = inv_icm45600_gyro_odr[2 * i];
+ *val2 = inv_icm45600_gyro_odr[2 * i + 1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int _inv_icm45600_gyro_write_odr(struct iio_dev *indio_dev, int odr)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = &gyro_st->ts;
+ struct inv_icm45600_sensor_conf conf = INV_ICM45600_SENSOR_CONF_KEEP_VALUES;
+ int ret;
+
+ conf.odr = odr;
+ ret = inv_sensors_timestamp_update_odr(ts, inv_icm45600_odr_to_period(conf.odr),
+ iio_buffer_enabled(indio_dev));
+ if (ret)
+ return ret;
+
+ if (st->conf.gyro.mode != INV_ICM45600_SENSOR_MODE_OFF)
+ conf.mode = gyro_st->power_mode;
+
+ ret = inv_icm45600_set_gyro_conf(st, &conf, NULL);
+ if (ret)
+ return ret;
+
+ inv_icm45600_buffer_update_fifo_period(st);
+ inv_icm45600_buffer_update_watermark(st);
+
+ return 0;
+}
+
+static int inv_icm45600_gyro_write_odr(struct iio_dev *indio_dev,
+ int val, int val2)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct device *dev = regmap_get_device(st->map);
+ unsigned int idx;
+ int odr;
+ int ret;
+
+ for (idx = 0; idx < ARRAY_SIZE(inv_icm45600_gyro_odr); idx += 2) {
+ if (val == inv_icm45600_gyro_odr[idx] &&
+ val2 == inv_icm45600_gyro_odr[idx + 1])
+ break;
+ }
+ if (idx >= ARRAY_SIZE(inv_icm45600_gyro_odr))
+ return -EINVAL;
+
+ odr = inv_icm45600_gyro_odr_conv[idx / 2];
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = _inv_icm45600_gyro_write_odr(indio_dev, odr);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+/*
+ * Calibration bias values, IIO range format int + nano.
+ * Value is limited to +/-62.5dps coded on 14 bits signed. Step is 7.5mdps.
+ */
+static int inv_icm45600_gyro_calibbias[] = {
+ -1, 90830336, /* min: -1.090830336 rad/s */
+ 0, 133158, /* step: 0.000133158 rad/s */
+ 1, 90697178, /* max: 1.090697178 rad/s */
+};
+
+static int inv_icm45600_gyro_read_offset(struct inv_icm45600_state *st,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2)
+{
+ struct device *dev = regmap_get_device(st->map);
+ s64 val64;
+ s32 bias;
+ unsigned int reg;
+ s16 offset;
+ int ret;
+
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ reg = INV_ICM45600_IPREG_SYS1_REG_42;
+ break;
+ case IIO_MOD_Y:
+ reg = INV_ICM45600_IPREG_SYS1_REG_56;
+ break;
+ case IIO_MOD_Z:
+ reg = INV_ICM45600_IPREG_SYS1_REG_70;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = regmap_bulk_read(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
+
+ pm_runtime_put_autosuspend(dev);
+ if (ret)
+ return ret;
+
+ offset = le16_to_cpup(&st->buffer.u16) & INV_ICM45600_GYRO_OFFUSER_MASK;
+ /* 14 bits signed value */
+ offset = sign_extend32(offset, 13);
+
+ /*
+ * convert raw offset to dps then to rad/s
+ * 14 bits signed raw max 62.5 to dps: 625 / 81920
+ * dps to rad: Pi / 180
+ * result in nano (1000000000)
+ * (offset * 625 * Pi * 1000000000) / (81920 * 180)
+ */
+ val64 = (s64)offset * 625LL * 3141592653LL;
+ /* for rounding, add + or - divisor (81920 * 180) divided by 2 */
+ if (val64 >= 0)
+ val64 += 81920 * 180 / 2;
+ else
+ val64 -= 81920 * 180 / 2;
+ bias = div_s64(val64, 81920 * 180);
+ *val = bias / 1000000000L;
+ *val2 = bias % 1000000000L;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int inv_icm45600_gyro_write_offset(struct inv_icm45600_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2)
+{
+ struct device *dev = regmap_get_device(st->map);
+ s64 val64, min, max;
+ unsigned int reg;
+ s16 offset;
+ int ret;
+
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+
+ switch (chan->channel2) {
+ case IIO_MOD_X:
+ reg = INV_ICM45600_IPREG_SYS1_REG_42;
+ break;
+ case IIO_MOD_Y:
+ reg = INV_ICM45600_IPREG_SYS1_REG_56;
+ break;
+ case IIO_MOD_Z:
+ reg = INV_ICM45600_IPREG_SYS1_REG_70;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* inv_icm45600_gyro_calibbias: min - step - max in nano */
+ min = (s64)inv_icm45600_gyro_calibbias[0] * 1000000000LL -
+ (s64)inv_icm45600_gyro_calibbias[1];
+ max = (s64)inv_icm45600_gyro_calibbias[4] * 1000000000LL +
+ (s64)inv_icm45600_gyro_calibbias[5];
+ val64 = (s64)val * 1000000000LL;
+ if (val >= 0)
+ val64 += (s64)val2;
+ else
+ val64 -= (s64)val2;
+ if (val64 < min || val64 > max)
+ return -EINVAL;
+
+ /*
+ * convert rad/s to dps then to raw value
+ * rad to dps: 180 / Pi
+ * dps to raw 14 bits signed, max 62.5: 8192 / 62.5
+ * val in nano (1000000000)
+ * val * 180 * 8192 / (Pi * 1000000000 * 62.5)
+ */
+ val64 = val64 * 180LL * 8192;
+ /* for rounding, add + or - divisor (314159265 * 625) divided by 2 */
+ if (val64 >= 0)
+ val64 += 314159265LL * 625LL / 2LL;
+ else
+ val64 -= 314159265LL * 625LL / 2LL;
+ offset = div64_s64(val64, 314159265LL * 625LL);
+
+ /* clamp value limited to 14 bits signed */
+ offset = clamp(offset, -8192, 8191);
+
+ st->buffer.u16 = cpu_to_le16(offset & INV_ICM45600_GYRO_OFFUSER_MASK);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ scoped_guard(mutex, &st->lock)
+ ret = regmap_bulk_write(st->map, reg, &st->buffer.u16, sizeof(st->buffer.u16));
+
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+static int inv_icm45600_gyro_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ break;
+ case IIO_TEMP:
+ return inv_icm45600_temp_read_raw(indio_dev, chan, val, val2, mask);
+ default:
+ return -EINVAL;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = inv_icm45600_gyro_read_sensor(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ return inv_icm45600_gyro_read_scale(indio_dev, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return inv_icm45600_gyro_read_odr(st, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return inv_icm45600_gyro_read_offset(st, chan, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_icm45600_gyro_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals,
+ int *type, int *length, long mask)
+{
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(indio_dev);
+
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = gyro_st->scales;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *length = gyro_st->scales_len;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = inv_icm45600_gyro_odr;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = ARRAY_SIZE(inv_icm45600_gyro_odr);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *vals = inv_icm45600_gyro_calibbias;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_icm45600_gyro_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = inv_icm45600_gyro_write_scale(indio_dev, val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return inv_icm45600_gyro_write_odr(indio_dev, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = inv_icm45600_gyro_write_offset(st, chan, val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_icm45600_gyro_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ if (chan->type != IIO_ANGL_VEL)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int inv_icm45600_gyro_hwfifo_set_watermark(struct iio_dev *indio_dev,
+ unsigned int val)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+
+ guard(mutex)(&st->lock);
+
+ st->fifo.watermark.gyro = val;
+ return inv_icm45600_buffer_update_watermark(st);
+}
+
+static int inv_icm45600_gyro_hwfifo_flush(struct iio_dev *indio_dev,
+ unsigned int count)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ int ret;
+
+ if (count == 0)
+ return 0;
+
+ guard(mutex)(&st->lock);
+
+ ret = inv_icm45600_buffer_hwfifo_flush(st, count);
+ if (ret)
+ return ret;
+
+ return st->fifo.nb.gyro;
+}
+
+static const struct iio_info inv_icm45600_gyro_info = {
+ .read_raw = inv_icm45600_gyro_read_raw,
+ .read_avail = inv_icm45600_gyro_read_avail,
+ .write_raw = inv_icm45600_gyro_write_raw,
+ .write_raw_get_fmt = inv_icm45600_gyro_write_raw_get_fmt,
+ .debugfs_reg_access = inv_icm45600_debugfs_reg,
+ .update_scan_mode = inv_icm45600_gyro_update_scan_mode,
+ .hwfifo_set_watermark = inv_icm45600_gyro_hwfifo_set_watermark,
+ .hwfifo_flush_to_buffer = inv_icm45600_gyro_hwfifo_flush,
+};
+
+struct iio_dev *inv_icm45600_gyro_init(struct inv_icm45600_state *st)
+{
+ struct device *dev = regmap_get_device(st->map);
+ struct inv_icm45600_sensor_state *gyro_st;
+ struct inv_sensors_timestamp_chip ts_chip;
+ struct iio_dev *indio_dev;
+ const char *name;
+ int ret;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s-gyro", st->chip_info->name);
+ if (!name)
+ return ERR_PTR(-ENOMEM);
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*gyro_st));
+ if (!indio_dev)
+ return ERR_PTR(-ENOMEM);
+ gyro_st = iio_priv(indio_dev);
+
+ gyro_st->scales = st->chip_info->gyro_scales;
+ gyro_st->scales_len = st->chip_info->gyro_scales_len * 2;
+
+ /* low-noise by default at init */
+ gyro_st->power_mode = INV_ICM45600_SENSOR_MODE_LOW_NOISE;
+
+ /*
+ * clock period is 32kHz (31250ns)
+ * jitter is +/- 2% (20 per mille)
+ */
+ ts_chip.clock_period = 31250;
+ ts_chip.jitter = 20;
+ ts_chip.init_period = inv_icm45600_odr_to_period(st->conf.gyro.odr);
+ inv_sensors_timestamp_init(&gyro_st->ts, &ts_chip);
+
+ iio_device_set_drvdata(indio_dev, st);
+ indio_dev->name = name;
+ indio_dev->info = &inv_icm45600_gyro_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = inv_icm45600_gyro_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_icm45600_gyro_channels);
+ indio_dev->available_scan_masks = inv_icm45600_gyro_scan_masks;
+ indio_dev->setup_ops = &inv_icm45600_buffer_ops;
+
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev,
+ &inv_icm45600_buffer_ops);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return indio_dev;
+}
+
+int inv_icm45600_gyro_parse_fifo(struct iio_dev *indio_dev)
+{
+ struct inv_icm45600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm45600_sensor_state *gyro_st = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = &gyro_st->ts;
+ ssize_t i, size;
+ unsigned int no;
+
+ /* parse all fifo packets */
+ for (i = 0, no = 0; i < st->fifo.count; i += size, ++no) {
+ struct inv_icm45600_gyro_buffer buffer = { };
+ const struct inv_icm45600_fifo_sensor_data *accel, *gyro;
+ const __le16 *timestamp;
+ const s8 *temp;
+ unsigned int odr;
+ s64 ts_val;
+
+ size = inv_icm45600_fifo_decode_packet(&st->fifo.data[i],
+ &accel, &gyro, &temp, &timestamp, &odr);
+ /* quit if error or FIFO is empty */
+ if (size <= 0)
+ return size;
+
+ /* skip packet if no gyro data or data is invalid */
+ if (gyro == NULL || !inv_icm45600_fifo_is_data_valid(gyro))
+ continue;
+
+ /* update odr */
+ if (odr & INV_ICM45600_SENSOR_GYRO)
+ inv_sensors_timestamp_apply_odr(ts, st->fifo.period,
+ st->fifo.nb.total, no);
+
+ memcpy(&buffer.gyro, gyro, sizeof(buffer.gyro));
+ /* convert 8 bits FIFO temperature in high resolution format */
+ buffer.temp = temp ? (*temp * 64) : 0;
+ ts_val = inv_sensors_timestamp_pop(ts);
+ iio_push_to_buffers_with_ts(indio_dev, &buffer, sizeof(buffer), ts_val);
+ }
+
+ return 0;
+}
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_i2c.c b/drivers/iio/imu/inv_icm45600/inv_icm45600_i2c.c
new file mode 100644
index 000000000000..5ebc18121a11
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_i2c.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2025 InvenSense, Inc. */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+
+#include "inv_icm45600.h"
+
+static const struct regmap_config inv_icm45600_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int inv_icm45600_probe(struct i2c_client *client)
+{
+ const struct inv_icm45600_chip_info *chip_info;
+ struct regmap *regmap;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENODEV;
+
+ chip_info = device_get_match_data(&client->dev);
+ if (!chip_info)
+ return -ENODEV;
+
+ regmap = devm_regmap_init_i2c(client, &inv_icm45600_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return inv_icm45600_core_probe(regmap, chip_info, true, NULL);
+}
+
+/*
+ * The device id table is used to identify which device is
+ * supported by this driver.
+ */
+static const struct i2c_device_id inv_icm45600_id[] = {
+ { "icm45605", (kernel_ulong_t)&inv_icm45605_chip_info },
+ { "icm45606", (kernel_ulong_t)&inv_icm45606_chip_info },
+ { "icm45608", (kernel_ulong_t)&inv_icm45608_chip_info },
+ { "icm45634", (kernel_ulong_t)&inv_icm45634_chip_info },
+ { "icm45686", (kernel_ulong_t)&inv_icm45686_chip_info },
+ { "icm45687", (kernel_ulong_t)&inv_icm45687_chip_info },
+ { "icm45688p", (kernel_ulong_t)&inv_icm45688p_chip_info },
+ { "icm45689", (kernel_ulong_t)&inv_icm45689_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, inv_icm45600_id);
+
+static const struct of_device_id inv_icm45600_of_matches[] = {
+ {
+ .compatible = "invensense,icm45605",
+ .data = &inv_icm45605_chip_info,
+ }, {
+ .compatible = "invensense,icm45606",
+ .data = &inv_icm45606_chip_info,
+ }, {
+ .compatible = "invensense,icm45608",
+ .data = &inv_icm45608_chip_info,
+ }, {
+ .compatible = "invensense,icm45634",
+ .data = &inv_icm45634_chip_info,
+ }, {
+ .compatible = "invensense,icm45686",
+ .data = &inv_icm45686_chip_info,
+ }, {
+ .compatible = "invensense,icm45687",
+ .data = &inv_icm45687_chip_info,
+ }, {
+ .compatible = "invensense,icm45688p",
+ .data = &inv_icm45688p_chip_info,
+ }, {
+ .compatible = "invensense,icm45689",
+ .data = &inv_icm45689_chip_info,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, inv_icm45600_of_matches);
+
+static struct i2c_driver inv_icm45600_driver = {
+ .driver = {
+ .name = "inv-icm45600-i2c",
+ .of_match_table = inv_icm45600_of_matches,
+ .pm = pm_ptr(&inv_icm45600_pm_ops),
+ },
+ .id_table = inv_icm45600_id,
+ .probe = inv_icm45600_probe,
+};
+module_i2c_driver(inv_icm45600_driver);
+
+MODULE_AUTHOR("InvenSense, Inc.");
+MODULE_DESCRIPTION("InvenSense ICM-456xx I2C driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_ICM45600");
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_i3c.c b/drivers/iio/imu/inv_icm45600/inv_icm45600_i3c.c
new file mode 100644
index 000000000000..9247eae9b3e2
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_i3c.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2025 InvenSense, Inc. */
+
+#include <linux/err.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <linux/i3c/device.h>
+#include <linux/i3c/master.h>
+
+#include "inv_icm45600.h"
+
+static const struct regmap_config inv_icm45600_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct i3c_device_id inv_icm45600_i3c_ids[] = {
+ I3C_DEVICE_EXTRA_INFO(0x0235, 0x0000, 0x0011, (void *)NULL),
+ I3C_DEVICE_EXTRA_INFO(0x0235, 0x0000, 0x0084, (void *)NULL),
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i3c, inv_icm45600_i3c_ids);
+
+static const struct inv_icm45600_chip_info *i3c_chip_info[] = {
+ &inv_icm45605_chip_info,
+ &inv_icm45606_chip_info,
+ &inv_icm45608_chip_info,
+ &inv_icm45634_chip_info,
+ &inv_icm45686_chip_info,
+ &inv_icm45687_chip_info,
+ &inv_icm45688p_chip_info,
+ &inv_icm45689_chip_info,
+};
+
+static int inv_icm45600_i3c_probe(struct i3c_device *i3cdev)
+{
+ int ret;
+ unsigned int whoami;
+ struct regmap *regmap;
+ const int nb_chip = ARRAY_SIZE(i3c_chip_info);
+ int chip;
+
+ regmap = devm_regmap_init_i3c(i3cdev, &inv_icm45600_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&i3cdev->dev, PTR_ERR(regmap),
+ "Failed to register i3c regmap %ld\n", PTR_ERR(regmap));
+
+ ret = regmap_read(regmap, INV_ICM45600_REG_WHOAMI, &whoami);
+ if (ret)
+ return dev_err_probe(&i3cdev->dev, ret, "Failed to read part id %d\n", whoami);
+
+ for (chip = 0; chip < nb_chip; chip++) {
+ if (whoami == i3c_chip_info[chip]->whoami)
+ break;
+ }
+
+ if (chip == nb_chip)
+ return dev_err_probe(&i3cdev->dev, -ENODEV,
+ "Failed to match part id %d\n", whoami);
+
+ return inv_icm45600_core_probe(regmap, i3c_chip_info[chip], false, NULL);
+}
+
+static struct i3c_driver inv_icm45600_driver = {
+ .driver = {
+ .name = "inv_icm45600_i3c",
+ .pm = pm_sleep_ptr(&inv_icm45600_pm_ops),
+ },
+ .probe = inv_icm45600_i3c_probe,
+ .id_table = inv_icm45600_i3c_ids,
+};
+module_i3c_driver(inv_icm45600_driver);
+
+MODULE_AUTHOR("Remi Buisson <remi.buisson@tdk.com>");
+MODULE_DESCRIPTION("InvenSense ICM-456xx i3c driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_ICM45600");
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_spi.c b/drivers/iio/imu/inv_icm45600/inv_icm45600_spi.c
new file mode 100644
index 000000000000..6288113a6d7c
--- /dev/null
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_spi.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2025 InvenSense, Inc. */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+
+#include <linux/spi/spi.h>
+
+#include "inv_icm45600.h"
+
+static const struct regmap_config inv_icm45600_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int inv_icm45600_spi_bus_setup(struct inv_icm45600_state *st)
+{
+ /* Set slew rates for SPI. */
+ return regmap_update_bits(st->map, INV_ICM45600_REG_DRIVE_CONFIG0,
+ INV_ICM45600_DRIVE_CONFIG0_SPI_MASK,
+ FIELD_PREP(INV_ICM45600_DRIVE_CONFIG0_SPI_MASK,
+ INV_ICM45600_SPI_SLEW_RATE_5NS));
+}
+
+static int inv_icm45600_probe(struct spi_device *spi)
+{
+ const struct inv_icm45600_chip_info *chip_info;
+ struct regmap *regmap;
+
+ chip_info = spi_get_device_match_data(spi);
+ if (!chip_info)
+ return -ENODEV;
+
+ /* Use SPI specific regmap. */
+ regmap = devm_regmap_init_spi(spi, &inv_icm45600_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return inv_icm45600_core_probe(regmap, chip_info, true,
+ inv_icm45600_spi_bus_setup);
+}
+
+/*
+ * The device id table is used to identify which device is
+ * supported by this driver.
+ */
+static const struct spi_device_id inv_icm45600_id[] = {
+ { "icm45605", (kernel_ulong_t)&inv_icm45605_chip_info },
+ { "icm45606", (kernel_ulong_t)&inv_icm45606_chip_info },
+ { "icm45608", (kernel_ulong_t)&inv_icm45608_chip_info },
+ { "icm45634", (kernel_ulong_t)&inv_icm45634_chip_info },
+ { "icm45686", (kernel_ulong_t)&inv_icm45686_chip_info },
+ { "icm45687", (kernel_ulong_t)&inv_icm45687_chip_info },
+ { "icm45688p", (kernel_ulong_t)&inv_icm45688p_chip_info },
+ { "icm45689", (kernel_ulong_t)&inv_icm45689_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, inv_icm45600_id);
+
+static const struct of_device_id inv_icm45600_of_matches[] = {
+ {
+ .compatible = "invensense,icm45605",
+ .data = &inv_icm45605_chip_info,
+ }, {
+ .compatible = "invensense,icm45606",
+ .data = &inv_icm45606_chip_info,
+ }, {
+ .compatible = "invensense,icm45608",
+ .data = &inv_icm45608_chip_info,
+ }, {
+ .compatible = "invensense,icm45634",
+ .data = &inv_icm45634_chip_info,
+ }, {
+ .compatible = "invensense,icm45686",
+ .data = &inv_icm45686_chip_info,
+ }, {
+ .compatible = "invensense,icm45687",
+ .data = &inv_icm45687_chip_info,
+ }, {
+ .compatible = "invensense,icm45688p",
+ .data = &inv_icm45688p_chip_info,
+ }, {
+ .compatible = "invensense,icm45689",
+ .data = &inv_icm45689_chip_info,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, inv_icm45600_of_matches);
+
+static struct spi_driver inv_icm45600_driver = {
+ .driver = {
+ .name = "inv-icm45600-spi",
+ .of_match_table = inv_icm45600_of_matches,
+ .pm = pm_ptr(&inv_icm45600_pm_ops),
+ },
+ .id_table = inv_icm45600_id,
+ .probe = inv_icm45600_probe,
+};
+module_spi_driver(inv_icm45600_driver);
+
+MODULE_AUTHOR("InvenSense, Inc.");
+MODULE_DESCRIPTION("InvenSense ICM-456xx SPI driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_ICM45600");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 39eb516acc73..b2fa1f4957a5 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -735,7 +735,6 @@ static int inv_mpu6050_read_channel_data(struct iio_dev *indio_dev,
break;
}
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
return ret;
@@ -938,7 +937,6 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
break;
}
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
error_write_raw_unlock:
mutex_unlock(&st->lock);
@@ -1146,14 +1144,12 @@ static int inv_mpu6050_enable_wom(struct inv_mpu6050_state *st, bool en)
st->chip_config.wom_en = false;
}
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
}
return result;
error_suspend:
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
return result;
}
@@ -1249,7 +1245,6 @@ static int inv_mpu6050_write_event_value(struct iio_dev *indio_dev,
value = (u64)val * 1000000ULL + (u64)val2;
result = inv_mpu6050_set_wom_threshold(st, value, INV_MPU6050_FREQ_DIVIDER(st));
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
return result;
@@ -1357,7 +1352,6 @@ inv_mpu6050_fifo_rate_store(struct device *dev, struct device_attribute *attr,
if (result)
goto fifo_rate_fail_power_off;
- pm_runtime_mark_last_busy(pdev);
fifo_rate_fail_power_off:
pm_runtime_put_autosuspend(pdev);
fifo_rate_fail_unlock:
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 5b1088cc3704..10a473342075 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -194,7 +194,6 @@ static int inv_mpu6050_set_enable(struct iio_dev *indio_dev, bool enable)
result = inv_mpu6050_prepare_fifo(st, false);
if (result)
goto error_power_off;
- pm_runtime_mark_last_busy(pdev);
pm_runtime_put_autosuspend(pdev);
}
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 55c82891e08c..3cd91d8a89ee 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -747,12 +747,10 @@ static int kmx61_set_power_state(struct kmx61_data *data, bool on, u8 device)
data->mag_ps = on;
}
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(&data->client->dev);
- } else {
- pm_runtime_mark_last_busy(&data->client->dev);
+ else
ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: kmx61_set_power_state for %d, ret %d\n",
diff --git a/drivers/iio/imu/smi330/Kconfig b/drivers/iio/imu/smi330/Kconfig
new file mode 100644
index 000000000000..856a315e15aa
--- /dev/null
+++ b/drivers/iio/imu/smi330/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# SMI330 IMU driver
+#
+
+config SMI330
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config SMI330_I2C
+ tristate "Bosch SMI330 I2C driver"
+ depends on I2C
+ select SMI330
+ select REGMAP_I2C
+ help
+ Enable support for the Bosch SMI330 6-Axis IMU connected to I2C
+ interface.
+
+ This driver can also be built as a module. If so, the module will be
+ called smi330_i2c.
+
+config SMI330_SPI
+ tristate "Bosch SMI330 SPI driver"
+ depends on SPI
+ select SMI330
+ select REGMAP_SPI
+ help
+ Enable support for the Bosch SMI330 6-Axis IMU connected to SPI
+ interface.
+
+ This driver can also be built as a module. If so, the module will be
+ called smi330_spi.
diff --git a/drivers/iio/imu/smi330/Makefile b/drivers/iio/imu/smi330/Makefile
new file mode 100644
index 000000000000..c663dcb5a9f2
--- /dev/null
+++ b/drivers/iio/imu/smi330/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Bosch SMI330 IMU
+#
+obj-$(CONFIG_SMI330) += smi330_core.o
+obj-$(CONFIG_SMI330_I2C) += smi330_i2c.o
+obj-$(CONFIG_SMI330_SPI) += smi330_spi.o
diff --git a/drivers/iio/imu/smi330/smi330.h b/drivers/iio/imu/smi330/smi330.h
new file mode 100644
index 000000000000..a5c765645aaa
--- /dev/null
+++ b/drivers/iio/imu/smi330/smi330.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/*
+ * Copyright (c) 2025 Robert Bosch GmbH.
+ */
+#ifndef _SMI330_H
+#define _SMI330_H
+
+#include <linux/iio/iio.h>
+
+enum {
+ SMI330_SCAN_ACCEL_X,
+ SMI330_SCAN_ACCEL_Y,
+ SMI330_SCAN_ACCEL_Z,
+ SMI330_SCAN_GYRO_X,
+ SMI330_SCAN_GYRO_Y,
+ SMI330_SCAN_GYRO_Z,
+ SMI330_SCAN_TIMESTAMP,
+ SMI330_SCAN_LEN = SMI330_SCAN_TIMESTAMP,
+};
+
+extern const struct regmap_config smi330_regmap_config;
+
+int smi330_core_probe(struct device *dev, struct regmap *regmap);
+
+#endif /* _SMI330_H */
diff --git a/drivers/iio/imu/smi330/smi330_core.c b/drivers/iio/imu/smi330/smi330_core.c
new file mode 100644
index 000000000000..7564f12543e0
--- /dev/null
+++ b/drivers/iio/imu/smi330/smi330_core.c
@@ -0,0 +1,918 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/*
+ * Copyright (c) 2025 Robert Bosch GmbH.
+ */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/units.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include "smi330.h"
+
+/* Register map */
+#define SMI330_CHIP_ID_REG 0x00
+#define SMI330_ERR_REG 0x01
+#define SMI330_STATUS_REG 0x02
+#define SMI330_ACCEL_X_REG 0x03
+#define SMI330_GYRO_X_REG 0x06
+#define SMI330_TEMP_REG 0x09
+#define SMI330_INT1_STATUS_REG 0x0D
+#define SMI330_ACCEL_CFG_REG 0x20
+#define SMI330_GYRO_CFG_REG 0x21
+#define SMI330_IO_INT_CTRL_REG 0x38
+#define SMI330_INT_CONF_REG 0x39
+#define SMI330_INT_MAP1_REG 0x3A
+#define SMI330_INT_MAP2_REG 0x3B
+#define SMI330_CMD_REG 0x7E
+
+/* Register mask */
+#define SMI330_CHIP_ID_MASK GENMASK(7, 0)
+#define SMI330_ERR_FATAL_MASK BIT(0)
+#define SMI330_ERR_ACC_CONF_MASK BIT(5)
+#define SMI330_ERR_GYR_CONF_MASK BIT(6)
+#define SMI330_STATUS_POR_MASK BIT(0)
+#define SMI330_INT_STATUS_ACC_GYR_DRDY_MASK GENMASK(13, 12)
+#define SMI330_CFG_ODR_MASK GENMASK(3, 0)
+#define SMI330_CFG_RANGE_MASK GENMASK(6, 4)
+#define SMI330_CFG_BW_MASK BIT(7)
+#define SMI330_CFG_AVG_NUM_MASK GENMASK(10, 8)
+#define SMI330_CFG_MODE_MASK GENMASK(14, 12)
+#define SMI330_IO_INT_CTRL_INT1_MASK GENMASK(2, 0)
+#define SMI330_IO_INT_CTRL_INT2_MASK GENMASK(10, 8)
+#define SMI330_INT_CONF_LATCH_MASK BIT(0)
+#define SMI330_INT_MAP2_ACC_DRDY_MASK GENMASK(11, 10)
+#define SMI330_INT_MAP2_GYR_DRDY_MASK GENMASK(9, 8)
+
+/* Register values */
+#define SMI330_IO_INT_CTRL_LVL BIT(0)
+#define SMI330_IO_INT_CTRL_OD BIT(1)
+#define SMI330_IO_INT_CTRL_EN BIT(2)
+#define SMI330_CMD_SOFT_RESET 0xDEAF
+
+/* T°C = (temp / 512) + 23 */
+#define SMI330_TEMP_OFFSET 11776 /* 23 * 512 */
+#define SMI330_TEMP_SCALE 1953125 /* (1 / 512) * 1e9 */
+
+#define SMI330_CHIP_ID 0x42
+#define SMI330_SOFT_RESET_DELAY 2000
+
+/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
+#define smi330_field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
+#define smi330_field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
+
+#define SMI330_ACCEL_CHANNEL(_axis) { \
+ .type = IIO_ACCEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_dir_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = SMI330_SCAN_ACCEL_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+#define SMI330_GYRO_CHANNEL(_axis) { \
+ .type = IIO_ANGL_VEL, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_dir_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = SMI330_SCAN_GYRO_##_axis, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+#define SMI330_TEMP_CHANNEL(_index) { \
+ .type = IIO_TEMP, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+enum smi330_accel_range {
+ SMI330_ACCEL_RANGE_2G = 0x00,
+ SMI330_ACCEL_RANGE_4G = 0x01,
+ SMI330_ACCEL_RANGE_8G = 0x02,
+ SMI330_ACCEL_RANGE_16G = 0x03
+};
+
+enum smi330_gyro_range {
+ SMI330_GYRO_RANGE_125 = 0x0,
+ SMI330_GYRO_RANGE_250 = 0x01,
+ SMI330_GYRO_RANGE_500 = 0x02
+};
+
+enum smi330_odr {
+ SMI330_ODR_12_5_HZ = 0x05,
+ SMI330_ODR_25_HZ = 0x06,
+ SMI330_ODR_50_HZ = 0x07,
+ SMI330_ODR_100_HZ = 0x08,
+ SMI330_ODR_200_HZ = 0x09,
+ SMI330_ODR_400_HZ = 0x0A,
+ SMI330_ODR_800_HZ = 0x0B,
+ SMI330_ODR_1600_HZ = 0x0C,
+ SMI330_ODR_3200_HZ = 0x0D,
+ SMI330_ODR_6400_HZ = 0x0E
+};
+
+enum smi330_avg_num {
+ SMI330_AVG_NUM_1 = 0x00,
+ SMI330_AVG_NUM_2 = 0x01,
+ SMI330_AVG_NUM_4 = 0x02,
+ SMI330_AVG_NUM_8 = 0x03,
+ SMI330_AVG_NUM_16 = 0x04,
+ SMI330_AVG_NUM_32 = 0x05,
+ SMI330_AVG_NUM_64 = 0x06
+};
+
+enum smi330_mode {
+ SMI330_MODE_SUSPEND = 0x00,
+ SMI330_MODE_GYRO_DRIVE = 0x01,
+ SMI330_MODE_LOW_POWER = 0x03,
+ SMI330_MODE_NORMAL = 0x04,
+ SMI330_MODE_HIGH_PERF = 0x07
+};
+
+enum smi330_bw {
+ SMI330_BW_2 = 0x00, /* ODR/2 */
+ SMI330_BW_4 = 0x01 /* ODR/4 */
+};
+
+enum smi330_operation_mode {
+ SMI330_POLLING,
+ SMI330_DATA_READY,
+};
+
+enum smi330_sensor {
+ SMI330_ACCEL,
+ SMI330_GYRO,
+};
+
+enum smi330_sensor_conf_select {
+ SMI330_ODR,
+ SMI330_RANGE,
+ SMI330_BW,
+ SMI330_AVG_NUM,
+};
+
+enum smi330_int_out {
+ SMI330_INT_DISABLED,
+ SMI330_INT_1,
+ SMI330_INT_2,
+};
+
+struct smi330_attributes {
+ int *reg_vals;
+ int *vals;
+ int len;
+ int type;
+ int mask;
+};
+
+struct smi330_cfg {
+ enum smi330_operation_mode op_mode;
+ enum smi330_int_out data_irq;
+};
+
+struct smi330_data {
+ struct regmap *regmap;
+ struct smi330_cfg cfg;
+ struct iio_trigger *trig;
+ IIO_DECLARE_BUFFER_WITH_TS(__le16, buf, SMI330_SCAN_LEN);
+};
+
+const struct regmap_config smi330_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+EXPORT_SYMBOL_NS_GPL(smi330_regmap_config, "IIO_SMI330");
+
+static const struct iio_chan_spec smi330_channels[] = {
+ SMI330_ACCEL_CHANNEL(X),
+ SMI330_ACCEL_CHANNEL(Y),
+ SMI330_ACCEL_CHANNEL(Z),
+ SMI330_GYRO_CHANNEL(X),
+ SMI330_GYRO_CHANNEL(Y),
+ SMI330_GYRO_CHANNEL(Z),
+ SMI330_TEMP_CHANNEL(-1), /* No buffer support */
+ IIO_CHAN_SOFT_TIMESTAMP(SMI330_SCAN_TIMESTAMP),
+};
+
+static const unsigned long smi330_avail_scan_masks[] = {
+ (BIT(SMI330_SCAN_ACCEL_X) | BIT(SMI330_SCAN_ACCEL_Y) |
+ BIT(SMI330_SCAN_ACCEL_Z) | BIT(SMI330_SCAN_GYRO_X) |
+ BIT(SMI330_SCAN_GYRO_Y) | BIT(SMI330_SCAN_GYRO_Z)),
+ 0
+};
+
+static const struct smi330_attributes smi330_accel_scale_attr = {
+ .reg_vals = (int[]){ SMI330_ACCEL_RANGE_2G, SMI330_ACCEL_RANGE_4G,
+ SMI330_ACCEL_RANGE_8G, SMI330_ACCEL_RANGE_16G },
+ .vals = (int[]){ 0, 61035, 0, 122070, 0, 244140, 0, 488281 },
+ .len = 8,
+ .type = IIO_VAL_INT_PLUS_NANO,
+ .mask = SMI330_CFG_RANGE_MASK
+};
+
+static const struct smi330_attributes smi330_gyro_scale_attr = {
+ .reg_vals = (int[]){ SMI330_GYRO_RANGE_125, SMI330_GYRO_RANGE_250,
+ SMI330_GYRO_RANGE_500 },
+ .vals = (int[]){ 0, 3814697, 0, 7629395, 0, 15258789 },
+ .len = 6,
+ .type = IIO_VAL_INT_PLUS_NANO,
+ .mask = SMI330_CFG_RANGE_MASK
+};
+
+static const struct smi330_attributes smi330_average_attr = {
+ .reg_vals = (int[]){ SMI330_AVG_NUM_1, SMI330_AVG_NUM_2,
+ SMI330_AVG_NUM_4, SMI330_AVG_NUM_8,
+ SMI330_AVG_NUM_16, SMI330_AVG_NUM_32,
+ SMI330_AVG_NUM_64 },
+ .vals = (int[]){ 1, 2, 4, 8, 16, 32, 64 },
+ .len = 7,
+ .type = IIO_VAL_INT,
+ .mask = SMI330_CFG_AVG_NUM_MASK
+};
+
+static const struct smi330_attributes smi330_bandwidth_attr = {
+ .reg_vals = (int[]){ SMI330_BW_2, SMI330_BW_4 },
+ .vals = (int[]){ 2, 4 },
+ .len = 2,
+ .type = IIO_VAL_INT,
+ .mask = SMI330_CFG_BW_MASK
+};
+
+static const struct smi330_attributes smi330_odr_attr = {
+ .reg_vals = (int[]){ SMI330_ODR_12_5_HZ, SMI330_ODR_25_HZ,
+ SMI330_ODR_50_HZ, SMI330_ODR_100_HZ,
+ SMI330_ODR_200_HZ, SMI330_ODR_400_HZ,
+ SMI330_ODR_800_HZ, SMI330_ODR_1600_HZ,
+ SMI330_ODR_3200_HZ, SMI330_ODR_6400_HZ },
+ .vals = (int[]){ 12, 25, 50, 100, 200, 400, 800, 1600, 3200, 6400 },
+ .len = 10,
+ .type = IIO_VAL_INT,
+ .mask = SMI330_CFG_ODR_MASK
+};
+
+static int smi330_get_attributes(enum smi330_sensor_conf_select config,
+ enum smi330_sensor sensor,
+ const struct smi330_attributes **attr)
+{
+ switch (config) {
+ case SMI330_ODR:
+ *attr = &smi330_odr_attr;
+ return 0;
+ case SMI330_RANGE:
+ if (sensor == SMI330_ACCEL)
+ *attr = &smi330_accel_scale_attr;
+ else
+ *attr = &smi330_gyro_scale_attr;
+ return 0;
+ case SMI330_BW:
+ *attr = &smi330_bandwidth_attr;
+ return 0;
+ case SMI330_AVG_NUM:
+ *attr = &smi330_average_attr;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smi330_get_config_reg(enum smi330_sensor sensor, int *reg)
+{
+ switch (sensor) {
+ case SMI330_ACCEL:
+ *reg = SMI330_ACCEL_CFG_REG;
+ return 0;
+ case SMI330_GYRO:
+ *reg = SMI330_GYRO_CFG_REG;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smi330_get_sensor_config(struct smi330_data *data,
+ enum smi330_sensor sensor,
+ enum smi330_sensor_conf_select config,
+ int *value)
+
+{
+ int ret, reg, reg_val, i;
+ const struct smi330_attributes *attr;
+
+ ret = smi330_get_config_reg(sensor, &reg);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(data->regmap, reg, &reg_val);
+ if (ret)
+ return ret;
+
+ ret = smi330_get_attributes(config, sensor, &attr);
+ if (ret)
+ return ret;
+
+ reg_val = smi330_field_get(attr->mask, reg_val);
+
+ if (attr->type == IIO_VAL_INT) {
+ for (i = 0; i < attr->len; i++) {
+ if (attr->reg_vals[i] == reg_val) {
+ *value = attr->vals[i];
+ return 0;
+ }
+ }
+ } else {
+ for (i = 0; i < attr->len / 2; i++) {
+ if (attr->reg_vals[i] == reg_val) {
+ *value = attr->vals[2 * i + 1];
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int smi330_set_sensor_config(struct smi330_data *data,
+ enum smi330_sensor sensor,
+ enum smi330_sensor_conf_select config,
+ int value)
+{
+ int ret, i, reg, reg_val, error;
+ const struct smi330_attributes *attr;
+
+ ret = smi330_get_attributes(config, sensor, &attr);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < attr->len; i++) {
+ if (attr->vals[i] == value) {
+ if (attr->type == IIO_VAL_INT)
+ reg_val = attr->reg_vals[i];
+ else
+ reg_val = attr->reg_vals[i / 2];
+ break;
+ }
+ }
+ if (i == attr->len)
+ return -EINVAL;
+
+ ret = smi330_get_config_reg(sensor, &reg);
+ if (ret)
+ return ret;
+
+ reg_val = smi330_field_prep(attr->mask, reg_val);
+ ret = regmap_update_bits(data->regmap, reg, attr->mask, reg_val);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(data->regmap, SMI330_ERR_REG, &error);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(SMI330_ERR_ACC_CONF_MASK, error) ||
+ FIELD_GET(SMI330_ERR_GYR_CONF_MASK, error))
+ return -EIO;
+
+ return 0;
+}
+
+static int smi330_get_data(struct smi330_data *data, int chan_type, int axis,
+ int *val)
+{
+ u8 reg;
+ int ret, sample;
+
+ switch (chan_type) {
+ case IIO_ACCEL:
+ reg = SMI330_ACCEL_X_REG + (axis - IIO_MOD_X);
+ break;
+ case IIO_ANGL_VEL:
+ reg = SMI330_GYRO_X_REG + (axis - IIO_MOD_X);
+ break;
+ case IIO_TEMP:
+ reg = SMI330_TEMP_REG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_read(data->regmap, reg, &sample);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(sample, 15);
+
+ return 0;
+}
+
+static int smi330_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, const int **vals,
+ int *type, int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_ACCEL) {
+ *vals = smi330_accel_scale_attr.vals;
+ *length = smi330_accel_scale_attr.len;
+ *type = smi330_accel_scale_attr.type;
+ } else {
+ *vals = smi330_gyro_scale_attr.vals;
+ *length = smi330_gyro_scale_attr.len;
+ *type = smi330_gyro_scale_attr.type;
+ }
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = smi330_average_attr.vals;
+ *length = smi330_average_attr.len;
+ *type = smi330_average_attr.type;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *vals = smi330_bandwidth_attr.vals;
+ *length = smi330_bandwidth_attr.len;
+ *type = smi330_bandwidth_attr.type;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = smi330_odr_attr.vals;
+ *length = smi330_odr_attr.len;
+ *type = smi330_odr_attr.type;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smi330_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret;
+ struct smi330_data *data = iio_priv(indio_dev);
+ enum smi330_sensor sensor;
+
+ /* valid for all channel types */
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = smi330_get_data(data, chan->type, chan->channel2, val);
+ iio_device_release_direct(indio_dev);
+ return ret ? ret : IIO_VAL_INT;
+ default:
+ break;
+ }
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ sensor = SMI330_ACCEL;
+ break;
+ case IIO_ANGL_VEL:
+ sensor = SMI330_GYRO;
+ break;
+ case IIO_TEMP:
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *val = SMI330_TEMP_SCALE / GIGA;
+ *val2 = SMI330_TEMP_SCALE % GIGA;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = SMI330_TEMP_OFFSET;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ /* valid for acc and gyro channels */
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = smi330_get_sensor_config(data, sensor, SMI330_AVG_NUM,
+ val);
+ return ret ? ret : IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = smi330_get_sensor_config(data, sensor, SMI330_BW, val);
+ return ret ? ret : IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = smi330_get_sensor_config(data, sensor, SMI330_ODR, val);
+ return ret ? ret : IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ ret = smi330_get_sensor_config(data, sensor, SMI330_RANGE,
+ val2);
+ return ret ? ret : IIO_VAL_INT_PLUS_NANO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smi330_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct smi330_data *data = iio_priv(indio_dev);
+ enum smi330_sensor sensor;
+
+ switch (chan->type) {
+ case IIO_ACCEL:
+ sensor = SMI330_ACCEL;
+ break;
+ case IIO_ANGL_VEL:
+ sensor = SMI330_GYRO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return smi330_set_sensor_config(data, sensor, SMI330_RANGE,
+ val2);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return smi330_set_sensor_config(data, sensor, SMI330_AVG_NUM,
+ val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return smi330_set_sensor_config(data, sensor, SMI330_BW, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return smi330_set_sensor_config(data, sensor, SMI330_ODR, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int smi330_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int smi330_soft_reset(struct smi330_data *data)
+{
+ int ret, dummy_byte;
+
+ ret = regmap_write(data->regmap, SMI330_CMD_REG, SMI330_CMD_SOFT_RESET);
+ if (ret)
+ return ret;
+ fsleep(SMI330_SOFT_RESET_DELAY);
+
+ /* Performing a dummy read after a soft-reset */
+ regmap_read(data->regmap, SMI330_CHIP_ID_REG, &dummy_byte);
+
+ return 0;
+}
+
+static irqreturn_t smi330_trigger_handler(int irq, void *p)
+{
+ int ret;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct smi330_data *data = iio_priv(indio_dev);
+
+ ret = regmap_bulk_read(data->regmap, SMI330_ACCEL_X_REG, data->buf,
+ SMI330_SCAN_LEN);
+ if (ret)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buf, pf->timestamp);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t smi330_irq_thread_handler(int irq, void *indio_dev_)
+{
+ int ret, int_stat;
+ s16 int_status[2] = { 0 };
+ struct iio_dev *indio_dev = indio_dev_;
+ struct smi330_data *data = iio_priv(indio_dev);
+
+ ret = regmap_bulk_read(data->regmap, SMI330_INT1_STATUS_REG, int_status, 2);
+ if (ret)
+ return IRQ_NONE;
+
+ int_stat = int_status[0] | int_status[1];
+
+ if (FIELD_GET(SMI330_INT_STATUS_ACC_GYR_DRDY_MASK, int_stat)) {
+ indio_dev->pollfunc->timestamp = iio_get_time_ns(indio_dev);
+ iio_trigger_poll_nested(data->trig);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int smi330_set_int_pin_config(struct smi330_data *data,
+ enum smi330_int_out irq_num,
+ bool active_high, bool open_drain,
+ bool latch)
+{
+ int ret, val;
+
+ val = active_high ? SMI330_IO_INT_CTRL_LVL : 0;
+ val |= open_drain ? SMI330_IO_INT_CTRL_OD : 0;
+ val |= SMI330_IO_INT_CTRL_EN;
+
+ switch (irq_num) {
+ case SMI330_INT_1:
+ val = FIELD_PREP(SMI330_IO_INT_CTRL_INT1_MASK, val);
+ ret = regmap_update_bits(data->regmap, SMI330_IO_INT_CTRL_REG,
+ SMI330_IO_INT_CTRL_INT1_MASK, val);
+ if (ret)
+ return ret;
+ break;
+ case SMI330_INT_2:
+ val = FIELD_PREP(SMI330_IO_INT_CTRL_INT2_MASK, val);
+ ret = regmap_update_bits(data->regmap, SMI330_IO_INT_CTRL_REG,
+ SMI330_IO_INT_CTRL_INT2_MASK, val);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(data->regmap, SMI330_INT_CONF_REG,
+ SMI330_INT_CONF_LATCH_MASK,
+ FIELD_PREP(SMI330_INT_CONF_LATCH_MASK,
+ latch));
+}
+
+static int smi330_setup_irq(struct device *dev, struct iio_dev *indio_dev,
+ int irq, enum smi330_int_out irq_num)
+{
+ int ret, irq_type;
+ bool open_drain, active_high, latch;
+ struct smi330_data *data = iio_priv(indio_dev);
+ struct irq_data *desc;
+
+ desc = irq_get_irq_data(irq);
+ if (!desc)
+ return -EINVAL;
+
+ irq_type = irqd_get_trigger_type(desc);
+ switch (irq_type) {
+ case IRQF_TRIGGER_RISING:
+ latch = false;
+ active_high = true;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ latch = true;
+ active_high = true;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ latch = false;
+ active_high = false;
+ break;
+ case IRQF_TRIGGER_LOW:
+ latch = true;
+ active_high = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ open_drain = device_property_read_bool(dev, "drive-open-drain");
+
+ ret = smi330_set_int_pin_config(data, irq_num, active_high, open_drain,
+ latch);
+ if (ret)
+ return ret;
+
+ return devm_request_threaded_irq(dev, irq, NULL,
+ smi330_irq_thread_handler,
+ irq_type | IRQF_ONESHOT,
+ indio_dev->name, indio_dev);
+}
+
+static int smi330_register_irq(struct device *dev, struct iio_dev *indio_dev)
+{
+ int ret, irq;
+ struct smi330_data *data = iio_priv(indio_dev);
+ struct fwnode_handle *fwnode;
+
+ fwnode = dev_fwnode(dev);
+ if (!fwnode)
+ return -ENODEV;
+
+ data->cfg.data_irq = SMI330_INT_DISABLED;
+
+ irq = fwnode_irq_get_byname(fwnode, "INT1");
+ if (irq > 0) {
+ ret = smi330_setup_irq(dev, indio_dev, irq, SMI330_INT_1);
+ if (ret)
+ return ret;
+ data->cfg.data_irq = SMI330_INT_1;
+ } else {
+ irq = fwnode_irq_get_byname(fwnode, "INT2");
+ if (irq > 0) {
+ ret = smi330_setup_irq(dev, indio_dev, irq,
+ SMI330_INT_2);
+ if (ret)
+ return ret;
+ data->cfg.data_irq = SMI330_INT_2;
+ }
+ }
+
+ return 0;
+}
+
+static int smi330_set_drdy_trigger_state(struct iio_trigger *trig, bool enable)
+{
+ int val;
+ struct smi330_data *data = iio_trigger_get_drvdata(trig);
+
+ if (enable)
+ data->cfg.op_mode = SMI330_DATA_READY;
+ else
+ data->cfg.op_mode = SMI330_POLLING;
+
+ val = FIELD_PREP(SMI330_INT_MAP2_ACC_DRDY_MASK,
+ enable ? data->cfg.data_irq : 0);
+ val |= FIELD_PREP(SMI330_INT_MAP2_GYR_DRDY_MASK,
+ enable ? data->cfg.data_irq : 0);
+ return regmap_update_bits(data->regmap, SMI330_INT_MAP2_REG,
+ SMI330_INT_MAP2_ACC_DRDY_MASK |
+ SMI330_INT_MAP2_GYR_DRDY_MASK,
+ val);
+}
+
+static const struct iio_trigger_ops smi330_trigger_ops = {
+ .set_trigger_state = &smi330_set_drdy_trigger_state,
+};
+
+static struct iio_info smi330_info = {
+ .read_avail = smi330_read_avail,
+ .read_raw = smi330_read_raw,
+ .write_raw = smi330_write_raw,
+ .write_raw_get_fmt = smi330_write_raw_get_fmt,
+};
+
+static int smi330_dev_init(struct smi330_data *data)
+{
+ int ret, chip_id, val, mode;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_read(data->regmap, SMI330_CHIP_ID_REG, &chip_id);
+ if (ret)
+ return ret;
+
+ chip_id = FIELD_GET(SMI330_CHIP_ID_MASK, chip_id);
+ if (chip_id != SMI330_CHIP_ID)
+ dev_info(dev, "Unknown chip id: 0x%04x\n", chip_id);
+
+ ret = regmap_read(data->regmap, SMI330_ERR_REG, &val);
+ if (ret)
+ return ret;
+ if (FIELD_GET(SMI330_ERR_FATAL_MASK, val))
+ return -ENODEV;
+
+ ret = regmap_read(data->regmap, SMI330_STATUS_REG, &val);
+ if (ret)
+ return ret;
+ if (FIELD_GET(SMI330_STATUS_POR_MASK, val) == 0)
+ return -ENODEV;
+
+ mode = FIELD_PREP(SMI330_CFG_MODE_MASK, SMI330_MODE_NORMAL);
+
+ ret = regmap_update_bits(data->regmap, SMI330_ACCEL_CFG_REG,
+ SMI330_CFG_MODE_MASK, mode);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(data->regmap, SMI330_GYRO_CFG_REG,
+ SMI330_CFG_MODE_MASK, mode);
+}
+
+int smi330_core_probe(struct device *dev, struct regmap *regmap)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct smi330_data *data;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->regmap = regmap;
+
+ ret = smi330_soft_reset(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Soft reset failed\n");
+
+ indio_dev->channels = smi330_channels;
+ indio_dev->num_channels = ARRAY_SIZE(smi330_channels);
+ indio_dev->available_scan_masks = smi330_avail_scan_masks;
+ indio_dev->name = "smi330";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &smi330_info;
+
+ data->cfg.op_mode = SMI330_POLLING;
+
+ ret = smi330_dev_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Init failed\n");
+
+ ret = smi330_register_irq(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Register IRQ failed\n");
+
+ if (data->cfg.data_irq != SMI330_INT_DISABLED) {
+ data->trig = devm_iio_trigger_alloc(dev, "%s-drdy-trigger",
+ indio_dev->name);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->ops = &smi330_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, data);
+
+ ret = devm_iio_trigger_register(dev, data->trig);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "IIO register trigger failed\n");
+
+ /* Set default operation mode to data ready. */
+ indio_dev->trig = iio_trigger_get(data->trig);
+ }
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ smi330_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "IIO buffer setup failed\n");
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Register IIO device failed\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(smi330_core_probe, "IIO_SMI330");
+
+MODULE_AUTHOR("Stefan Gutmann <stefan.gutmann@de.bosch.com>");
+MODULE_AUTHOR("Roman Huber <roman.huber@de.bosch.com>");
+MODULE_AUTHOR("Filip Andrei <Andrei.Filip@ro.bosch.com>");
+MODULE_AUTHOR("Drimbarean Avram Andrei <Avram-Andrei.Drimbarean@ro.bosch.com>");
+MODULE_DESCRIPTION("Bosch SMI330 IMU driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/iio/imu/smi330/smi330_i2c.c b/drivers/iio/imu/smi330/smi330_i2c.c
new file mode 100644
index 000000000000..e5f1825beb71
--- /dev/null
+++ b/drivers/iio/imu/smi330/smi330_i2c.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/*
+ * Copyright (c) 2025 Robert Bosch GmbH.
+ */
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "smi330.h"
+
+#define SMI330_NUM_DUMMY_BYTES 2
+#define SMI330_I2C_MAX_RX_BUFFER_SIZE \
+ (SMI330_NUM_DUMMY_BYTES + SMI330_SCAN_LEN * sizeof(s16))
+
+struct smi330_i2c_priv {
+ struct i2c_client *i2c;
+ u8 rx_buffer[SMI330_I2C_MAX_RX_BUFFER_SIZE];
+};
+
+static int smi330_regmap_i2c_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct smi330_i2c_priv *priv = context;
+ int ret;
+
+ if (SMI330_NUM_DUMMY_BYTES + val_size > SMI330_I2C_MAX_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ /*
+ * SMI330 I2C read frame:
+ * <Slave address[6:0], RnW> <x, Register address[6:0]>
+ * <Slave address[6:0], RnW> <Dummy[7:0]> <Dummy[7:0]> <Data_0[7:0]> <Data_1[15:8]>...
+ * <Data_N[7:0]> <Data_N[15:8]>
+ * Remark: Slave address is not considered part of the frame in the following definitions
+ */
+ struct i2c_msg msgs[] = {
+ {
+ .addr = priv->i2c->addr,
+ .flags = priv->i2c->flags,
+ .len = reg_size,
+ .buf = (u8 *)reg_buf,
+ },
+ {
+ .addr = priv->i2c->addr,
+ .flags = priv->i2c->flags | I2C_M_RD,
+ .len = SMI330_NUM_DUMMY_BYTES + val_size,
+ .buf = priv->rx_buffer,
+ },
+ };
+
+ ret = i2c_transfer(priv->i2c->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ memcpy(val_buf, priv->rx_buffer + SMI330_NUM_DUMMY_BYTES, val_size);
+
+ return 0;
+}
+
+static int smi330_regmap_i2c_write(void *context, const void *data,
+ size_t count)
+{
+ struct smi330_i2c_priv *priv = context;
+ u8 reg;
+
+ /*
+ * SMI330 I2C write frame:
+ * <Slave address[6:0], RnW> <x, Register address[6:0]> <Data_0[7:0]> <Data_1[15:8]>...
+ * <Data_N[7:0]> <Data_N[15:8]>
+ * Remark: Slave address is not considered part of the frame in the following definitions
+ */
+ reg = *(u8 *)data;
+ return i2c_smbus_write_i2c_block_data(priv->i2c, reg,
+ count - sizeof(u8),
+ data + sizeof(u8));
+}
+
+static const struct regmap_bus smi330_regmap_bus = {
+ .read = smi330_regmap_i2c_read,
+ .write = smi330_regmap_i2c_write,
+};
+
+static int smi330_i2c_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct smi330_i2c_priv *priv;
+ struct regmap *regmap;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->i2c = i2c;
+ regmap = devm_regmap_init(dev, &smi330_regmap_bus, priv,
+ &smi330_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to initialize I2C Regmap\n");
+
+ return smi330_core_probe(dev, regmap);
+}
+
+static const struct i2c_device_id smi330_i2c_device_id[] = {
+ { .name = "smi330" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, smi330_i2c_device_id);
+
+static const struct of_device_id smi330_of_match[] = {
+ { .compatible = "bosch,smi330" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, smi330_of_match);
+
+static struct i2c_driver smi330_i2c_driver = {
+ .probe = smi330_i2c_probe,
+ .id_table = smi330_i2c_device_id,
+ .driver = {
+ .of_match_table = smi330_of_match,
+ .name = "smi330_i2c",
+ },
+};
+module_i2c_driver(smi330_i2c_driver);
+
+MODULE_AUTHOR("Stefan Gutmann <stefan.gutmann@de.bosch.com>");
+MODULE_AUTHOR("Roman Huber <roman.huber@de.bosch.com>");
+MODULE_AUTHOR("Filip Andrei <Andrei.Filip@ro.bosch.com>");
+MODULE_AUTHOR("Drimbarean Avram Andrei <Avram-Andrei.Drimbarean@ro.bosch.com>");
+MODULE_DESCRIPTION("Bosch SMI330 I2C driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS("IIO_SMI330");
diff --git a/drivers/iio/imu/smi330/smi330_spi.c b/drivers/iio/imu/smi330/smi330_spi.c
new file mode 100644
index 000000000000..a6044e02b451
--- /dev/null
+++ b/drivers/iio/imu/smi330/smi330_spi.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/*
+ * Copyright (c) 2025 Robert Bosch GmbH.
+ */
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#include "smi330.h"
+
+static int smi330_regmap_spi_read(void *context, const void *reg_buf,
+ size_t reg_size, void *val_buf,
+ size_t val_size)
+{
+ struct spi_device *spi = context;
+
+ /* Insert pad byte for reading */
+ u8 reg[] = { *(u8 *)reg_buf, 0 };
+
+ if (reg_size + 1 != ARRAY_SIZE(reg)) {
+ dev_err(&spi->dev, "Invalid register size %zu\n", reg_size);
+ return -EINVAL;
+ }
+
+ return spi_write_then_read(spi, reg, ARRAY_SIZE(reg), val_buf,
+ val_size);
+}
+
+static int smi330_regmap_spi_write(void *context, const void *data,
+ size_t count)
+{
+ struct spi_device *spi = context;
+
+ return spi_write(spi, data, count);
+}
+
+static const struct regmap_bus smi330_regmap_bus = {
+ .read = smi330_regmap_spi_read,
+ .write = smi330_regmap_spi_write,
+ .read_flag_mask = 0x80,
+};
+
+static int smi330_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init(&spi->dev, &smi330_regmap_bus, &spi->dev,
+ &smi330_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(&spi->dev, PTR_ERR(regmap),
+ "Failed to initialize SPI Regmap\n");
+
+ return smi330_core_probe(&spi->dev, regmap);
+}
+
+static const struct spi_device_id smi330_spi_device_id[] = {
+ { .name = "smi330" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, smi330_spi_device_id);
+
+static const struct of_device_id smi330_of_match[] = {
+ { .compatible = "bosch,smi330" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, smi330_of_match);
+
+static struct spi_driver smi330_spi_driver = {
+ .probe = smi330_spi_probe,
+ .id_table = smi330_spi_device_id,
+ .driver = {
+ .of_match_table = smi330_of_match,
+ .name = "smi330_spi",
+ },
+};
+module_spi_driver(smi330_spi_driver);
+
+MODULE_AUTHOR("Stefan Gutmann <stefan.gutmann@de.bosch.com>");
+MODULE_AUTHOR("Roman Huber <roman.huber@de.bosch.com>");
+MODULE_AUTHOR("Filip Andrei <Andrei.Filip@ro.bosch.com>");
+MODULE_AUTHOR("Drimbarean Avram Andrei <Avram-Andrei.Drimbarean@ro.bosch.com>");
+MODULE_DESCRIPTION("Bosch SMI330 SPI driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_IMPORT_NS("IIO_SMI330");
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index c225b246c8a5..6405a5367d76 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -192,6 +192,22 @@ struct st_lsm6dsx_fifo_ops {
* @fifo_en: Hw timer FIFO enable register info (addr + mask).
* @decimator: Hw timer FIFO decimator register info (addr + mask).
* @freq_fine: Difference in % of ODR with respect to the typical.
+ * @ts_sensitivity: Nominal timestamp sensitivity.
+ * @ts_trim_coeff: Coefficient for calculating the calibrated timestamp gain.
+ * This coefficient comes into play when linearizing the formula
+ * used to calculate the calibrated timestamp (please see the
+ * relevant formula in the AN for the specific IMU).
+ * For example, in the case of LSM6DSO we have:
+ *
+ * 1 / (1 + x) ~= 1 - x (Taylor’s Series)
+ * ttrim[s] = 1 / (40000 * (1 + 0.0015 * val)) (from AN5192)
+ * ttrim[ns] ~= 25000 - 37.5 * val
+ * ttrim[ns] ~= 25000 - (37500 * val) / 1000
+ *
+ * so, replacing ts_sensitivity = 25000 and
+ * ts_trim_coeff = 37500
+ *
+ * ttrim[ns] ~= ts_sensitivity - (ts_trim_coeff * val) / 1000
*/
struct st_lsm6dsx_hw_ts_settings {
struct st_lsm6dsx_reg timer_en;
@@ -199,6 +215,8 @@ struct st_lsm6dsx_hw_ts_settings {
struct st_lsm6dsx_reg fifo_en;
struct st_lsm6dsx_reg decimator;
u8 freq_fine;
+ u16 ts_sensitivity;
+ u16 ts_trim_coeff;
};
/**
@@ -252,6 +270,15 @@ struct st_lsm6dsx_event_settings {
u8 wakeup_src_x_mask;
};
+enum st_lsm6dsx_sensor_id {
+ ST_LSM6DSX_ID_GYRO,
+ ST_LSM6DSX_ID_ACC,
+ ST_LSM6DSX_ID_EXT0,
+ ST_LSM6DSX_ID_EXT1,
+ ST_LSM6DSX_ID_EXT2,
+ ST_LSM6DSX_ID_MAX
+};
+
enum st_lsm6dsx_ext_sensor_id {
ST_LSM6DSX_ID_MAGN,
};
@@ -337,23 +364,14 @@ struct st_lsm6dsx_settings {
struct st_lsm6dsx_odr_table_entry odr_table[2];
struct st_lsm6dsx_samples_to_discard samples_to_discard[2];
struct st_lsm6dsx_fs_table_entry fs_table[2];
- struct st_lsm6dsx_reg decimator[ST_LSM6DSX_MAX_ID];
- struct st_lsm6dsx_reg batch[ST_LSM6DSX_MAX_ID];
+ struct st_lsm6dsx_reg decimator[ST_LSM6DSX_ID_MAX];
+ struct st_lsm6dsx_reg batch[2];
struct st_lsm6dsx_fifo_ops fifo_ops;
struct st_lsm6dsx_hw_ts_settings ts_settings;
struct st_lsm6dsx_shub_settings shub_settings;
struct st_lsm6dsx_event_settings event_settings;
};
-enum st_lsm6dsx_sensor_id {
- ST_LSM6DSX_ID_GYRO,
- ST_LSM6DSX_ID_ACC,
- ST_LSM6DSX_ID_EXT0,
- ST_LSM6DSX_ID_EXT1,
- ST_LSM6DSX_ID_EXT2,
- ST_LSM6DSX_ID_MAX,
-};
-
enum st_lsm6dsx_fifo_mode {
ST_LSM6DSX_FIFO_BYPASS = 0x0,
ST_LSM6DSX_FIFO_CONT = 0x6,
@@ -365,7 +383,8 @@ enum st_lsm6dsx_fifo_mode {
* @id: Sensor identifier.
* @hw: Pointer to instance of struct st_lsm6dsx_hw.
* @gain: Configured sensor sensitivity.
- * @odr: Output data rate of the sensor [Hz].
+ * @odr: Output data rate of the sensor [mHz].
+ * hwfifo_odr_mHz: Batch data rate for hardware FIFO [mHz]
* @samples_to_discard: Number of samples to discard for filters settling time.
* @watermark: Sensor watermark level.
* @decimator: Sensor decimation factor.
@@ -380,6 +399,7 @@ struct st_lsm6dsx_sensor {
u32 gain;
u32 odr;
+ u32 hwfifo_odr_mHz;
u16 samples_to_discard;
u16 watermark;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 8a9d2593576a..55d877745575 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -56,6 +56,7 @@
#include <linux/iio/kfifo_buf.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
#include <linux/regmap.h>
#include <linux/bitfield.h>
@@ -105,7 +106,7 @@ static int
st_lsm6dsx_get_decimator_val(struct st_lsm6dsx_sensor *sensor, u32 max_odr)
{
const int max_size = ARRAY_SIZE(st_lsm6dsx_decimator_table);
- u32 decimator = max_odr / sensor->odr;
+ u32 decimator = max_odr / sensor->hwfifo_odr_mHz;
int i;
if (decimator > 1)
@@ -136,14 +137,14 @@ static void st_lsm6dsx_get_max_min_odr(struct st_lsm6dsx_hw *hw,
if (!(hw->enable_mask & BIT(sensor->id)))
continue;
- *max_odr = max_t(u32, *max_odr, sensor->odr);
- *min_odr = min_t(u32, *min_odr, sensor->odr);
+ *max_odr = max(*max_odr, sensor->hwfifo_odr_mHz);
+ *min_odr = min(*min_odr, sensor->hwfifo_odr_mHz);
}
}
static u8 st_lsm6dsx_get_sip(struct st_lsm6dsx_sensor *sensor, u32 min_odr)
{
- u8 sip = sensor->odr / min_odr;
+ u8 sip = sensor->hwfifo_odr_mHz / min_odr;
return sip > 1 ? round_down(sip, 2) : sip;
}
@@ -231,7 +232,7 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
if (enable) {
int err;
- err = st_lsm6dsx_check_odr(sensor, sensor->odr,
+ err = st_lsm6dsx_check_odr(sensor, sensor->hwfifo_odr_mHz,
&data);
if (err < 0)
return err;
@@ -713,7 +714,7 @@ st_lsm6dsx_update_samples_to_discard(struct st_lsm6dsx_sensor *sensor)
data = &hw->settings->samples_to_discard[sensor->id];
for (i = 0; i < ST_LSM6DSX_ODR_LIST_SIZE; i++) {
- if (data->val[i].milli_hz == sensor->odr) {
+ if (data->val[i].milli_hz == sensor->hwfifo_odr_mHz) {
sensor->samples_to_discard = data->val[i].samples;
return;
}
@@ -799,6 +800,59 @@ static const struct iio_buffer_setup_ops st_lsm6dsx_buffer_ops = {
.postdisable = st_lsm6dsx_buffer_postdisable,
};
+static ssize_t st_lsm6dsx_hwfifo_odr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct st_lsm6dsx_sensor *sensor = iio_priv(dev_to_iio_dev(dev));
+
+ return sysfs_emit(buf, "%d.%03d\n", sensor->hwfifo_odr_mHz / 1000,
+ sensor->hwfifo_odr_mHz % 1000);
+}
+
+static ssize_t st_lsm6dsx_hwfifo_odr_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *iio_dev = dev_to_iio_dev(dev);
+ struct st_lsm6dsx_sensor *sensor = iio_priv(iio_dev);
+ int integer, milli;
+ int ret;
+ u32 hwfifo_odr;
+ u8 data;
+
+ if (!iio_device_claim_direct(iio_dev))
+ return -EBUSY;
+
+ ret = iio_str_to_fixpoint(buf, 100, &integer, &milli);
+ if (ret)
+ goto out;
+
+ hwfifo_odr = integer * 1000 + milli;
+ ret = st_lsm6dsx_check_odr(sensor, hwfifo_odr, &data);
+ if (ret < 0)
+ goto out;
+
+ hwfifo_odr = ret;
+
+ /* the batch data rate must not exceed the sensor output data rate */
+ if (hwfifo_odr <= sensor->odr)
+ sensor->hwfifo_odr_mHz = hwfifo_odr;
+ else
+ ret = -EINVAL;
+
+out:
+ iio_device_release_direct(iio_dev);
+
+ return ret < 0 ? ret : len;
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(0664, st_lsm6dsx_hwfifo_odr_show, st_lsm6dsx_hwfifo_odr_store);
+
+static const struct iio_dev_attr *st_lsm6dsx_buffer_attrs[] = {
+ &iio_dev_attr_sampling_frequency,
+ NULL
+};
+
int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
{
int i, ret;
@@ -807,8 +861,9 @@ int st_lsm6dsx_fifo_setup(struct st_lsm6dsx_hw *hw)
if (!hw->iio_devs[i])
continue;
- ret = devm_iio_kfifo_buffer_setup(hw->dev, hw->iio_devs[i],
- &st_lsm6dsx_buffer_ops);
+ ret = devm_iio_kfifo_buffer_setup_ext(hw->dev, hw->iio_devs[i],
+ &st_lsm6dsx_buffer_ops,
+ st_lsm6dsx_buffer_attrs);
if (ret)
return ret;
}
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index c65ad49829e7..49ac17806e72 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -94,8 +94,6 @@
#define ST_LSM6DSX_REG_WHOAMI_ADDR 0x0f
-#define ST_LSM6DSX_TS_SENSITIVITY 25000UL /* 25us */
-
static const struct iio_chan_spec st_lsm6dsx_acc_channels[] = {
ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x28, IIO_MOD_X, 0),
ST_LSM6DSX_CHANNEL_ACC(IIO_ACCEL, 0x2a, IIO_MOD_Y, 1),
@@ -983,6 +981,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(7, 6),
},
.freq_fine = 0x63,
+ .ts_sensitivity = 25000,
+ .ts_trim_coeff = 37500,
},
.shub_settings = {
.page_mux = {
@@ -1196,6 +1196,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(7, 6),
},
.freq_fine = 0x63,
+ .ts_sensitivity = 25000,
+ .ts_trim_coeff = 37500,
},
.event_settings = {
.enable_reg = {
@@ -1371,6 +1373,8 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.mask = GENMASK(7, 6),
},
.freq_fine = 0x4f,
+ .ts_sensitivity = 21701,
+ .ts_trim_coeff = 28212,
},
.shub_settings = {
.page_mux = {
@@ -1847,10 +1851,12 @@ static int st_lsm6dsx_write_raw(struct iio_dev *iio_dev,
val = val * 1000 + val2 / 1000;
val = st_lsm6dsx_check_odr(sensor, val, &data);
- if (val < 0)
+ if (val < 0) {
err = val;
- else
+ } else {
sensor->odr = val;
+ sensor->hwfifo_odr_mHz = val;
+ }
break;
}
default:
@@ -2035,10 +2041,10 @@ st_lsm6dsx_sysfs_sampling_frequency_avail(struct device *dev,
odr_table = &sensor->hw->settings->odr_table[sensor->id];
for (i = 0; i < odr_table->odr_len; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "%d.%03d ",
- odr_table->odr_avl[i].milli_hz / 1000,
- odr_table->odr_avl[i].milli_hz % 1000);
- buf[len - 1] = '\n';
+ len += sysfs_emit_at(buf, len, "%d.%03d%c",
+ odr_table->odr_avl[i].milli_hz / 1000,
+ odr_table->odr_avl[i].milli_hz % 1000,
+ (i == odr_table->odr_len - 1) ? '\n' : ' ');
return len;
}
@@ -2054,9 +2060,9 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
fs_table = &hw->settings->fs_table[sensor->id];
for (i = 0; i < fs_table->fs_len; i++)
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09u ",
- fs_table->fs_avl[i].gain);
- buf[len - 1] = '\n';
+ len += sysfs_emit_at(buf, len, "0.%09u%c",
+ fs_table->fs_avl[i].gain,
+ (i == fs_table->fs_len - 1) ? '\n' : ' ');
return len;
}
@@ -2248,20 +2254,13 @@ static int st_lsm6dsx_init_hw_timer(struct st_lsm6dsx_hw *hw)
}
/* calibrate timestamp sensitivity */
- hw->ts_gain = ST_LSM6DSX_TS_SENSITIVITY;
+ hw->ts_gain = ts_settings->ts_sensitivity;
if (ts_settings->freq_fine) {
err = regmap_read(hw->regmap, ts_settings->freq_fine, &val);
if (err < 0)
return err;
- /*
- * linearize the AN5192 formula:
- * 1 / (1 + x) ~= 1 - x (Taylor’s Series)
- * ttrim[s] = 1 / (40000 * (1 + 0.0015 * val))
- * ttrim[ns] ~= 25000 - 37.5 * val
- * ttrim[ns] ~= 25000 - (37500 * val) / 1000
- */
- hw->ts_gain -= ((s8)val * 37500) / 1000;
+ hw->ts_gain -= ((s8)val * ts_settings->ts_trim_coeff) / 1000;
}
return 0;
@@ -2384,6 +2383,7 @@ static struct iio_dev *st_lsm6dsx_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor->id = id;
sensor->hw = hw;
sensor->odr = hw->settings->odr_table[id].odr_avl[0].milli_hz;
+ sensor->hwfifo_odr_mHz = sensor->odr;
sensor->gain = hw->settings->fs_table[id].fs_avl[0].gain;
sensor->watermark = 1;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index 3c5e65dc0f97..d6a1eeb151ca 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -640,6 +640,7 @@ __st_lsm6dsx_shub_write_raw(struct iio_dev *iio_dev,
sensor->ext_info.slv_odr = val;
sensor->odr = odr;
+ sensor->hwfifo_odr_mHz = odr;
return 0;
}
case IIO_CHAN_INFO_SCALE:
@@ -746,6 +747,7 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
sensor->id = id;
sensor->hw = hw;
sensor->odr = hw->settings->odr_table[ref_id].odr_avl[0].milli_hz;
+ sensor->hwfifo_odr_mHz = sensor->odr;
sensor->ext_info.slv_odr = info->odr_table.odr_avl[0].milli_hz;
sensor->gain = info->fs_table.fs_avl[0].gain;
sensor->ext_info.settings = info;
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index 23760652a046..447b694d6d5f 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -702,7 +702,7 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_interface_type_get, "IIO_BACKEND");
* interface/data bus. Hence, the backend device needs to be aware of it so
* data can be correctly transferred.
*
- * Return:
+ * RETURNS:
* 0 on success, negative error number on failure.
*/
int iio_backend_data_size_set(struct iio_backend *back, unsigned int size)
@@ -717,9 +717,10 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_data_size_set, "IIO_BACKEND");
/**
* iio_backend_oversampling_ratio_set - set the oversampling ratio
* @back: Backend device
+ * @chan: Channel number
* @ratio: The oversampling ratio - value 1 corresponds to no oversampling.
*
- * Return:
+ * RETURNS:
* 0 on success, negative error number on failure.
*/
int iio_backend_oversampling_ratio_set(struct iio_backend *back,
@@ -1064,6 +1065,9 @@ EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, "IIO_BACKEND");
/**
* iio_backend_get_priv - Get driver private data
* @back: Backend device
+ *
+ * RETURNS:
+ * Pointer to the driver private data associated with the backend.
*/
void *iio_backend_get_priv(const struct iio_backend *back)
{
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index a80f7cc25a27..c6259213e150 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -1563,9 +1563,7 @@ static void iio_buffer_dmabuf_release(struct kref *ref)
struct iio_buffer *buffer = priv->buffer;
struct dma_buf *dmabuf = attach->dmabuf;
- dma_resv_lock(dmabuf->resv, NULL);
- dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
- dma_resv_unlock(dmabuf->resv);
+ dma_buf_unmap_attachment_unlocked(attach, priv->sgt, priv->dir);
buffer->access->detach_dmabuf(buffer, priv->block);
@@ -1623,19 +1621,28 @@ static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
return 0;
}
+static struct device *iio_buffer_get_dma_dev(const struct iio_dev *indio_dev,
+ struct iio_buffer *buffer)
+{
+ if (buffer->access->get_dma_dev)
+ return buffer->access->get_dma_dev(buffer);
+
+ return indio_dev->dev.parent;
+}
+
static struct dma_buf_attachment *
iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib,
struct dma_buf *dmabuf, bool nonblock)
{
- struct device *dev = ib->indio_dev->dev.parent;
struct iio_buffer *buffer = ib->buffer;
+ struct device *dma_dev = iio_buffer_get_dma_dev(ib->indio_dev, buffer);
struct dma_buf_attachment *attach = NULL;
struct iio_dmabuf_priv *priv;
guard(mutex)(&buffer->dmabufs_mutex);
list_for_each_entry(priv, &buffer->dmabufs, entry) {
- if (priv->attach->dev == dev
+ if (priv->attach->dev == dma_dev
&& priv->attach->dmabuf == dmabuf) {
attach = priv->attach;
break;
@@ -1653,6 +1660,7 @@ static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
{
struct iio_dev *indio_dev = ib->indio_dev;
struct iio_buffer *buffer = ib->buffer;
+ struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
struct dma_buf_attachment *attach;
struct iio_dmabuf_priv *priv, *each;
struct dma_buf *dmabuf;
@@ -1679,7 +1687,7 @@ static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
goto err_free_priv;
}
- attach = dma_buf_attach(dmabuf, indio_dev->dev.parent);
+ attach = dma_buf_attach(dmabuf, dma_dev);
if (IS_ERR(attach)) {
err = PTR_ERR(attach);
goto err_dmabuf_put;
@@ -1719,7 +1727,7 @@ static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
* combo. If we do, refuse to attach.
*/
list_for_each_entry(each, &buffer->dmabufs, entry) {
- if (each->attach->dev == indio_dev->dev.parent
+ if (each->attach->dev == dma_dev
&& each->attach->dmabuf == dmabuf) {
/*
* We unlocked the reservation object, so going through
@@ -1758,6 +1766,7 @@ static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib,
{
struct iio_buffer *buffer = ib->buffer;
struct iio_dev *indio_dev = ib->indio_dev;
+ struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
struct iio_dmabuf_priv *priv;
struct dma_buf *dmabuf;
int dmabuf_fd, ret = -EPERM;
@@ -1772,7 +1781,7 @@ static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib,
guard(mutex)(&buffer->dmabufs_mutex);
list_for_each_entry(priv, &buffer->dmabufs, entry) {
- if (priv->attach->dev == indio_dev->dev.parent
+ if (priv->attach->dev == dma_dev
&& priv->attach->dmabuf == dmabuf) {
list_del(&priv->entry);
@@ -2372,6 +2381,9 @@ static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
* iio_push_to_buffers() - push to a registered buffer.
* @indio_dev: iio_dev structure for device.
* @data: Full scan.
+ *
+ * Context: Any context.
+ * Return: 0 on success, negative error code on failure.
*/
int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
{
@@ -2401,6 +2413,9 @@ EXPORT_SYMBOL_GPL(iio_push_to_buffers);
* not require space for the timestamp, or 8 byte alignment of data.
* It does however require an allocation on first call and additional
* copies on all calls, so should be avoided if possible.
+ *
+ * Context: May sleep.
+ * Return: 0 on success, negative error code on failure.
*/
int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
const void *data,
@@ -2409,6 +2424,8 @@ int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
+ might_sleep();
+
/*
* Conservative estimate - we can always safely copy the minimum
* of either the data provided or the length of the destination buffer.
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 159d6c5ca3ce..f69deefcfb6f 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -97,6 +97,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_COLORTEMP] = "colortemp",
[IIO_CHROMATICITY] = "chromaticity",
[IIO_ATTENTION] = "attention",
+ [IIO_ALTCURRENT] = "altcurrent",
};
static const char * const iio_modifier_names[] = {
@@ -152,6 +153,10 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_PITCH] = "pitch",
[IIO_MOD_YAW] = "yaw",
[IIO_MOD_ROLL] = "roll",
+ [IIO_MOD_RMS] = "rms",
+ [IIO_MOD_ACTIVE] = "active",
+ [IIO_MOD_REACTIVE] = "reactive",
+ [IIO_MOD_APPARENT] = "apparent",
};
/* relies on pairs of these shared then separate */
@@ -189,6 +194,7 @@ static const char * const iio_chan_info_postfix[] = {
[IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
[IIO_CHAN_INFO_TROUGH] = "trough_raw",
[IIO_CHAN_INFO_CONVDELAY] = "convdelay",
+ [IIO_CHAN_INFO_POWERFACTOR] = "powerfactor",
};
/**
* iio_device_id() - query the unique ID for the device
@@ -790,6 +796,7 @@ static ssize_t iio_format_list(char *buf, const int *vals, int type, int length,
switch (type) {
case IIO_VAL_INT:
+ case IIO_VAL_CHAR:
stride = 1;
break;
default:
@@ -1243,7 +1250,7 @@ static int iio_device_add_channel_label(struct iio_dev *indio_dev,
static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
enum iio_shared_by shared_by,
- const long *infomask)
+ const unsigned long *infomask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int i, ret, attrcount = 0;
@@ -1273,7 +1280,7 @@ static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
enum iio_shared_by shared_by,
- const long *infomask)
+ const unsigned long *infomask)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
int i, ret, attrcount = 0;
@@ -1647,6 +1654,9 @@ static void iio_dev_release(struct device *device)
iio_device_detach_buffers(indio_dev);
+ mutex_destroy(&iio_dev_opaque->info_exist_lock);
+ mutex_destroy(&iio_dev_opaque->mlock);
+
lockdep_unregister_key(&iio_dev_opaque->mlock_key);
ida_free(&iio_ida, iio_dev_opaque->id);
@@ -1687,12 +1697,6 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
ACCESS_PRIVATE(indio_dev, priv) = (char *)iio_dev_opaque +
ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN);
- indio_dev->dev.parent = parent;
- indio_dev->dev.type = &iio_device_type;
- indio_dev->dev.bus = &iio_bus_type;
- device_initialize(&indio_dev->dev);
- mutex_init(&iio_dev_opaque->mlock);
- mutex_init(&iio_dev_opaque->info_exist_lock);
INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL);
@@ -1713,7 +1717,14 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
lockdep_register_key(&iio_dev_opaque->mlock_key);
- lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
+
+ mutex_init_with_key(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
+ mutex_init(&iio_dev_opaque->info_exist_lock);
+
+ indio_dev->dev.parent = parent;
+ indio_dev->dev.type = &iio_device_type;
+ indio_dev->dev.bus = &iio_bus_type;
+ device_initialize(&indio_dev->dev);
return indio_dev;
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index c174ebb7d5e6..1e5eb5a41271 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -11,6 +11,7 @@
#include <linux/mutex.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/iio-opaque.h>
@@ -598,6 +599,42 @@ int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
}
EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
+int iio_multiply_value(int *result, s64 multiplier,
+ unsigned int type, int val, int val2)
+{
+ s64 denominator;
+
+ switch (type) {
+ case IIO_VAL_INT:
+ *result = multiplier * val;
+ return IIO_VAL_INT;
+ case IIO_VAL_INT_PLUS_MICRO:
+ case IIO_VAL_INT_PLUS_NANO:
+ switch (type) {
+ case IIO_VAL_INT_PLUS_MICRO:
+ denominator = MICRO;
+ break;
+ case IIO_VAL_INT_PLUS_NANO:
+ denominator = NANO;
+ break;
+ }
+ *result = multiplier * abs(val);
+ *result += div_s64(multiplier * abs(val2), denominator);
+ if (val < 0 || val2 < 0)
+ *result *= -1;
+ return IIO_VAL_INT;
+ case IIO_VAL_FRACTIONAL:
+ *result = div_s64(multiplier * val, val2);
+ return IIO_VAL_INT;
+ case IIO_VAL_FRACTIONAL_LOG2:
+ *result = (multiplier * val) >> val2;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_NS_GPL(iio_multiply_value, "IIO_UNIT_TEST");
+
static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int raw, int *processed,
unsigned int scale)
@@ -605,6 +642,7 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
int scale_type, scale_val, scale_val2;
int offset_type, offset_val, offset_val2;
s64 raw64 = raw;
+ int ret;
offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
IIO_CHAN_INFO_OFFSET);
@@ -639,40 +677,14 @@ static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
* If no channel scaling is available apply consumer scale to
* raw value and return.
*/
- *processed = raw * scale;
+ *processed = raw64 * scale;
return 0;
}
- switch (scale_type) {
- case IIO_VAL_INT:
- *processed = raw64 * scale_val * scale;
- break;
- case IIO_VAL_INT_PLUS_MICRO:
- if (scale_val2 < 0)
- *processed = -raw64 * scale_val * scale;
- else
- *processed = raw64 * scale_val * scale;
- *processed += div_s64(raw64 * (s64)scale_val2 * scale,
- 1000000LL);
- break;
- case IIO_VAL_INT_PLUS_NANO:
- if (scale_val2 < 0)
- *processed = -raw64 * scale_val * scale;
- else
- *processed = raw64 * scale_val * scale;
- *processed += div_s64(raw64 * (s64)scale_val2 * scale,
- 1000000000LL);
- break;
- case IIO_VAL_FRACTIONAL:
- *processed = div_s64(raw64 * (s64)scale_val * scale,
- scale_val2);
- break;
- case IIO_VAL_FRACTIONAL_LOG2:
- *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
- break;
- default:
- return -EINVAL;
- }
+ ret = iio_multiply_value(processed, raw64 * scale,
+ scale_type, scale_val, scale_val2);
+ if (ret < 0)
+ return ret;
return 0;
}
@@ -714,20 +726,19 @@ int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
unsigned int scale)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
- int ret;
+ int ret, pval, pval2;
guard(mutex)(&iio_dev_opaque->info_exist_lock);
if (!chan->indio_dev->info)
return -ENODEV;
if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
- ret = iio_channel_read(chan, val, NULL,
+ ret = iio_channel_read(chan, &pval, &pval2,
IIO_CHAN_INFO_PROCESSED);
if (ret < 0)
return ret;
- *val *= scale;
- return ret;
+ return iio_multiply_value(val, scale, ret, pval, pval2);
} else {
ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
if (ret < 0)
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 4a7d983c9cd4..ac1408d374c9 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -724,6 +724,19 @@ config VEML6040
To compile this driver as a module, choose M here: the
module will be called veml6040.
+config VEML6046X00
+ tristate "VEML6046X00 RGBIR color sensor"
+ select REGMAP_I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6046X00
+ high accuracy RGBIR color sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6046x00.
+
config VEML6070
tristate "VEML6070 UV A light sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 8229ebe6edc4..c0048e0d5ca8 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_VCNL4035) += vcnl4035.o
obj-$(CONFIG_VEML3235) += veml3235.o
obj-$(CONFIG_VEML6030) += veml6030.o
obj-$(CONFIG_VEML6040) += veml6040.o
+obj-$(CONFIG_VEML6046X00) += veml6046x00.o
obj-$(CONFIG_VEML6070) += veml6070.o
obj-$(CONFIG_VEML6075) += veml6075.o
obj-$(CONFIG_VL6180) += vl6180.o
diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c
index 032e6cae8b80..d5d1a8b9c035 100644
--- a/drivers/iio/light/acpi-als.c
+++ b/drivers/iio/light/acpi-als.c
@@ -49,20 +49,10 @@ static const struct iio_chan_spec acpi_als_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(1),
};
-/*
- * The event buffer contains timestamp and all the data from
- * the ACPI0008 block. There are multiple, but so far we only
- * support _ALI (illuminance): One channel, padding and timestamp.
- */
-#define ACPI_ALS_EVT_BUFFER_SIZE \
- (sizeof(s32) + sizeof(s32) + sizeof(s64))
-
struct acpi_als {
struct acpi_device *device;
struct mutex lock;
struct iio_trigger *trig;
-
- s32 evt_buffer[ACPI_ALS_EVT_BUFFER_SIZE / sizeof(s32)] __aligned(8);
};
/*
@@ -152,7 +142,10 @@ static irqreturn_t acpi_als_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct acpi_als *als = iio_priv(indio_dev);
- s32 *buffer = als->evt_buffer;
+ struct {
+ s32 light;
+ aligned_s64 ts;
+ } scan = { };
s32 val;
int ret;
@@ -161,7 +154,7 @@ static irqreturn_t acpi_als_trigger_handler(int irq, void *p)
ret = acpi_als_read_value(als, ACPI_ALS_ILLUMINANCE, &val);
if (ret < 0)
goto out;
- *buffer = val;
+ scan.light = val;
/*
* When coming from own trigger via polls, set polling function
@@ -174,7 +167,7 @@ static irqreturn_t acpi_als_trigger_handler(int irq, void *p)
if (!pf->timestamp)
pf->timestamp = iio_get_time_ns(indio_dev);
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), pf->timestamp);
out:
mutex_unlock(&als->lock);
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index cf96e3dd8bc6..edb3d9dc8bed 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -54,10 +54,6 @@
struct adjd_s311_data {
struct i2c_client *client;
- struct {
- s16 chans[4];
- aligned_s64 ts;
- } scan;
};
enum adjd_s311_channel_idx {
@@ -120,6 +116,10 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
struct adjd_s311_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns(indio_dev);
int i, j = 0;
+ struct {
+ s16 chans[4];
+ aligned_s64 ts;
+ } scan = { };
int ret = adjd_s311_req_data(indio_dev);
if (ret < 0)
@@ -131,10 +131,10 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->scan.chans[j++] = ret & ADJD_S311_DATA_MASK;
+ scan.chans[j++] = ret & ADJD_S311_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/al3000a.c b/drivers/iio/light/al3000a.c
index 6f301c067045..9871096cbab3 100644
--- a/drivers/iio/light/al3000a.c
+++ b/drivers/iio/light/al3000a.c
@@ -94,7 +94,7 @@ static int al3000a_init(struct al3000a_data *data)
ret = devm_add_action_or_reset(dev, al3000a_set_pwr_off, data);
if (ret)
- return dev_err_probe(dev, ret, "failed to add action\n");
+ return ret;
ret = regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_RESET);
if (ret)
diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
index f676da245aa7..7e68cca0edfa 100644
--- a/drivers/iio/light/apds9306.c
+++ b/drivers/iio/light/apds9306.c
@@ -350,7 +350,7 @@ static const struct regmap_config apds9306_regmap = {
.volatile_table = &apds9306_volatile_table,
.precious_table = &apds9306_precious_table,
.max_register = APDS9306_ALS_THRES_VAR_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct reg_field apds9306_rf_sw_reset =
@@ -537,7 +537,6 @@ static int apds9306_read_data(struct apds9306_data *data, int *val, int reg)
*val = get_unaligned_le24(&buff);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return 0;
@@ -1121,7 +1120,6 @@ static int apds9306_write_event_config(struct iio_dev *indio_dev,
if (ret)
return ret;
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return 0;
@@ -1309,7 +1307,7 @@ static int apds9306_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, apds9306_powerdown, data);
if (ret)
- return dev_err_probe(dev, ret, "failed to add action or reset\n");
+ return ret;
ret = devm_iio_device_register(dev, indio_dev);
if (ret)
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index b92d0fce5aec..785c5dbe2d08 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -234,7 +234,7 @@ static const struct regmap_config apds9960_regmap_config = {
.reg_defaults = apds9960_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(apds9960_reg_defaults),
.max_register = APDS9960_REG_GFIFO_DIR(RIGHT),
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static const struct iio_event_spec apds9960_pxs_event_spec[] = {
@@ -495,7 +495,6 @@ static int apds9960_set_power_state(struct apds9960_data *data, bool on)
usleep_range(data->als_adc_int_us,
APDS9960_MAX_INT_TIME_IN_US);
} else {
- pm_runtime_mark_last_busy(dev);
ret = pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
index 4e9bd8f831f7..10b00344bbed 100644
--- a/drivers/iio/light/bh1745.c
+++ b/drivers/iio/light/bh1745.c
@@ -755,8 +755,8 @@ static irqreturn_t bh1745_trigger_handler(int interrupt, void *p)
scan.chans[j++] = value;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
err:
iio_trigger_notify_done(indio_dev->trig);
@@ -814,8 +814,7 @@ static int bh1745_init(struct bh1745_data *data)
ret = devm_add_action_or_reset(dev, bh1745_power_off, data);
if (ret)
- return dev_err_probe(dev, ret,
- "Failed to add action or reset\n");
+ return ret;
return 0;
}
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index c7c877d2fe67..5d3c6d5276ba 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -111,7 +111,6 @@ static int bh1780_read_raw(struct iio_dev *indio_dev,
value = bh1780_read_word(bh1780, BH1780_REG_DLOW);
if (value < 0)
return value;
- pm_runtime_mark_last_busy(&bh1780->client->dev);
pm_runtime_put_autosuspend(&bh1780->client->dev);
*val = value;
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index 42859e5b1089..a0d8a58f2704 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -271,7 +271,6 @@ static int gp2ap002_read_raw(struct iio_dev *indio_dev,
}
out:
- pm_runtime_mark_last_busy(gp2ap002->dev);
pm_runtime_put_autosuspend(gp2ap002->dev);
return ret;
@@ -353,7 +352,6 @@ static int gp2ap002_write_event_config(struct iio_dev *indio_dev,
pm_runtime_get_sync(gp2ap002->dev);
gp2ap002->enabled = true;
} else {
- pm_runtime_mark_last_busy(gp2ap002->dev);
pm_runtime_put_autosuspend(gp2ap002->dev);
gp2ap002->enabled = false;
}
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 830e5ae7f34a..384572844162 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -262,8 +262,9 @@ static int als_proc_event(struct hid_sensor_hub_device *hsdev,
if (!als_state->timestamp)
als_state->timestamp = iio_get_time_ns(indio_dev);
- iio_push_to_buffers_with_timestamp(indio_dev, &als_state->scan,
- als_state->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &als_state->scan,
+ sizeof(als_state->scan),
+ als_state->timestamp);
als_state->timestamp = 0;
}
diff --git a/drivers/iio/light/isl29028.c b/drivers/iio/light/isl29028.c
index 0e4284823d44..374bccad9119 100644
--- a/drivers/iio/light/isl29028.c
+++ b/drivers/iio/light/isl29028.c
@@ -336,16 +336,11 @@ static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
static int isl29028_set_pm_runtime_busy(struct isl29028_chip *chip, bool on)
{
struct device *dev = regmap_get_device(chip->regmap);
- int ret;
- if (on) {
- ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
- ret = pm_runtime_put_autosuspend(dev);
- }
+ if (on)
+ return pm_runtime_resume_and_get(dev);
- return ret;
+ return pm_runtime_put_autosuspend(dev);
}
/* Channel IO */
diff --git a/drivers/iio/light/isl29125.c b/drivers/iio/light/isl29125.c
index 6bc23b164cc5..3acb8a4f1d12 100644
--- a/drivers/iio/light/isl29125.c
+++ b/drivers/iio/light/isl29125.c
@@ -51,11 +51,6 @@
struct isl29125_data {
struct i2c_client *client;
u8 conf1;
- /* Ensure timestamp is naturally aligned */
- struct {
- u16 chans[3];
- aligned_s64 timestamp;
- } scan;
};
#define ISL29125_CHANNEL(_color, _si) { \
@@ -179,6 +174,11 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct isl29125_data *data = iio_priv(indio_dev);
int i, j = 0;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[3];
+ aligned_s64 timestamp;
+ } scan = { };
iio_for_each_active_channel(indio_dev, i) {
int ret = i2c_smbus_read_word_data(data->client,
@@ -186,10 +186,10 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->scan.chans[j++] = ret;
+ scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c
index ee59bbb8aa09..fc387426fa87 100644
--- a/drivers/iio/light/ltr390.c
+++ b/drivers/iio/light/ltr390.c
@@ -26,6 +26,7 @@
#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
@@ -38,12 +39,21 @@
#define LTR390_ALS_UVS_GAIN 0x05
#define LTR390_PART_ID 0x06
#define LTR390_MAIN_STATUS 0x07
+
#define LTR390_ALS_DATA 0x0D
+#define LTR390_ALS_DATA_BYTE(n) (LTR390_ALS_DATA + (n))
+
#define LTR390_UVS_DATA 0x10
+#define LTR390_UVS_DATA_BYTE(n) (LTR390_UVS_DATA + (n))
+
#define LTR390_INT_CFG 0x19
#define LTR390_INT_PST 0x1A
+
#define LTR390_THRESH_UP 0x21
+#define LTR390_THRESH_UP_BYTE(n) (LTR390_THRESH_UP + (n))
+
#define LTR390_THRESH_LOW 0x24
+#define LTR390_THRESH_LOW_BYTE(n) (LTR390_THRESH_LOW + (n))
#define LTR390_PART_NUMBER_ID 0xb
#define LTR390_ALS_UVS_GAIN_MASK GENMASK(2, 0)
@@ -96,6 +106,32 @@ struct ltr390_data {
enum ltr390_mode mode;
int gain;
int int_time_us;
+ bool irq_enabled;
+};
+
+static const struct regmap_range ltr390_readable_reg_ranges[] = {
+ regmap_reg_range(LTR390_MAIN_CTRL, LTR390_MAIN_CTRL),
+ regmap_reg_range(LTR390_ALS_UVS_MEAS_RATE, LTR390_MAIN_STATUS),
+ regmap_reg_range(LTR390_ALS_DATA_BYTE(0), LTR390_UVS_DATA_BYTE(2)),
+ regmap_reg_range(LTR390_INT_CFG, LTR390_INT_PST),
+ regmap_reg_range(LTR390_THRESH_UP_BYTE(0), LTR390_THRESH_LOW_BYTE(2)),
+};
+
+static const struct regmap_access_table ltr390_readable_reg_table = {
+ .yes_ranges = ltr390_readable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ltr390_readable_reg_ranges),
+};
+
+static const struct regmap_range ltr390_writeable_reg_ranges[] = {
+ regmap_reg_range(LTR390_MAIN_CTRL, LTR390_MAIN_CTRL),
+ regmap_reg_range(LTR390_ALS_UVS_MEAS_RATE, LTR390_ALS_UVS_GAIN),
+ regmap_reg_range(LTR390_INT_CFG, LTR390_INT_PST),
+ regmap_reg_range(LTR390_THRESH_UP_BYTE(0), LTR390_THRESH_LOW_BYTE(2)),
+};
+
+static const struct regmap_access_table ltr390_writeable_reg_table = {
+ .yes_ranges = ltr390_writeable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ltr390_writeable_reg_ranges),
};
static const struct regmap_config ltr390_regmap_config = {
@@ -103,6 +139,9 @@ static const struct regmap_config ltr390_regmap_config = {
.reg_bits = 8,
.reg_stride = 1,
.val_bits = 8,
+ .max_register = LTR390_THRESH_LOW_BYTE(2),
+ .rd_table = &ltr390_readable_reg_table,
+ .wr_table = &ltr390_writeable_reg_table,
};
/* Sampling frequency is in mili Hz and mili Seconds */
@@ -121,16 +160,16 @@ static int ltr390_register_read(struct ltr390_data *data, u8 register_address)
{
struct device *dev = &data->client->dev;
int ret;
- u8 recieve_buffer[3];
+ u8 receive_buffer[3];
- ret = regmap_bulk_read(data->regmap, register_address, recieve_buffer,
- sizeof(recieve_buffer));
+ ret = regmap_bulk_read(data->regmap, register_address, receive_buffer,
+ sizeof(receive_buffer));
if (ret) {
dev_err(dev, "failed to read measurement data");
return ret;
}
- return get_unaligned_le24(recieve_buffer);
+ return get_unaligned_le24(receive_buffer);
}
static int ltr390_set_mode(struct ltr390_data *data, enum ltr390_mode mode)
@@ -178,9 +217,10 @@ static int ltr390_get_samp_freq_or_period(struct ltr390_data *data,
return ltr390_samp_freq_table[value][option];
}
-static int ltr390_read_raw(struct iio_dev *iio_device,
- struct iio_chan_spec const *chan, int *val,
- int *val2, long mask)
+
+static int ltr390_do_read_raw(struct iio_dev *iio_device,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
{
int ret;
struct ltr390_data *data = iio_priv(iio_device);
@@ -243,6 +283,27 @@ static int ltr390_read_raw(struct iio_dev *iio_device,
}
}
+static int ltr390_read_raw(struct iio_dev *iio_device,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct ltr390_data *data = iio_priv(iio_device);
+ struct device *dev = &data->client->dev;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "runtime PM failed to resume: %d\n", ret);
+ return ret;
+ }
+
+ ret = ltr390_do_read_raw(iio_device, chan, val, val2, mask);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
/* integration time in us */
static const int ltr390_int_time_map_us[] = { 400000, 200000, 100000, 50000, 25000, 12500 };
static const int ltr390_gain_map[] = { 1, 3, 6, 9, 18 };
@@ -549,11 +610,11 @@ static int ltr390_read_event_config(struct iio_dev *indio_dev,
return FIELD_GET(LTR390_LS_INT_EN, status);
}
-static int ltr390_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- bool state)
+static int ltr390_do_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
{
struct ltr390_data *data = iio_priv(indio_dev);
int ret;
@@ -561,7 +622,6 @@ static int ltr390_write_event_config(struct iio_dev *indio_dev,
if (!state)
return regmap_clear_bits(data->regmap, LTR390_INT_CFG, LTR390_LS_INT_EN);
- guard(mutex)(&data->lock);
ret = regmap_set_bits(data->regmap, LTR390_INT_CFG, LTR390_LS_INT_EN);
if (ret < 0)
return ret;
@@ -586,6 +646,51 @@ static int ltr390_write_event_config(struct iio_dev *indio_dev,
}
}
+static int ltr390_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ int ret;
+ struct ltr390_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+
+ guard(mutex)(&data->lock);
+
+ if (state && !data->irq_enabled) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "runtime PM failed to resume: %d\n", ret);
+ return ret;
+ }
+ data->irq_enabled = true;
+ }
+
+ ret = ltr390_do_event_config(indio_dev, chan, type, dir, state);
+
+ if (!state && data->irq_enabled) {
+ data->irq_enabled = false;
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ return ret;
+}
+
+static int ltr390_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ltr390_data *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->lock);
+
+ if (readval)
+ return regmap_read(data->regmap, reg, readval);
+
+ return regmap_write(data->regmap, reg, writeval);
+}
+
static const struct iio_info ltr390_info = {
.read_raw = ltr390_read_raw,
.write_raw = ltr390_write_raw,
@@ -594,6 +699,7 @@ static const struct iio_info ltr390_info = {
.read_event_config = ltr390_read_event_config,
.write_event_value = ltr390_write_event_value,
.write_event_config = ltr390_write_event_config,
+ .debugfs_reg_access = ltr390_debugfs_reg_access,
};
static irqreturn_t ltr390_interrupt_handler(int irq, void *private)
@@ -628,6 +734,43 @@ static irqreturn_t ltr390_interrupt_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static void ltr390_powerdown(void *priv)
+{
+ struct ltr390_data *data = priv;
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ guard(mutex)(&data->lock);
+
+ /* Ensure that power off and interrupts are disabled */
+ if (data->irq_enabled) {
+ ret = regmap_clear_bits(data->regmap, LTR390_INT_CFG, LTR390_LS_INT_EN);
+ if (ret < 0)
+ dev_err(dev, "failed to disable interrupts\n");
+
+ data->irq_enabled = false;
+ pm_runtime_put_autosuspend(dev);
+ }
+
+ ret = regmap_clear_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE);
+ if (ret < 0)
+ dev_err(dev, "failed to disable sensor\n");
+}
+
+static int ltr390_pm_init(struct ltr390_data *data)
+{
+ int ret;
+ struct device *dev = &data->client->dev;
+
+ ret = devm_pm_runtime_set_active_enabled(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable runtime PM\n");
+
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+ return 0;
+}
+
static int ltr390_probe(struct i2c_client *client)
{
struct ltr390_data *data;
@@ -640,8 +783,9 @@ static int ltr390_probe(struct i2c_client *client)
if (!indio_dev)
return -ENOMEM;
- data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data = iio_priv(indio_dev);
data->regmap = devm_regmap_init_i2c(client, &ltr390_regmap_config);
if (IS_ERR(data->regmap))
return dev_err_probe(dev, PTR_ERR(data->regmap),
@@ -654,6 +798,8 @@ static int ltr390_probe(struct i2c_client *client)
data->gain = 3;
/* default mode for ltr390 is ALS mode */
data->mode = LTR390_SET_ALS_MODE;
+ /* default value of irq_enabled is false */
+ data->irq_enabled = false;
mutex_init(&data->lock);
@@ -681,6 +827,10 @@ static int ltr390_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "failed to enable the sensor\n");
+ ret = devm_add_action_or_reset(dev, ltr390_powerdown, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add action or reset\n");
+
if (client->irq) {
ret = devm_request_threaded_irq(dev, client->irq,
NULL, ltr390_interrupt_handler,
@@ -692,6 +842,10 @@ static int ltr390_probe(struct i2c_client *client)
"request irq (%d) failed\n", client->irq);
}
+ ret = ltr390_pm_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize runtime PM\n");
+
return devm_iio_device_register(dev, indio_dev);
}
@@ -713,7 +867,26 @@ static int ltr390_resume(struct device *dev)
LTR390_SENSOR_ENABLE);
}
-static DEFINE_SIMPLE_DEV_PM_OPS(ltr390_pm_ops, ltr390_suspend, ltr390_resume);
+static int ltr390_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ltr390_data *data = iio_priv(indio_dev);
+
+ return regmap_clear_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE);
+}
+
+static int ltr390_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ltr390_data *data = iio_priv(indio_dev);
+
+ return regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SENSOR_ENABLE);
+}
+
+static const struct dev_pm_ops ltr390_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(ltr390_suspend, ltr390_resume)
+ RUNTIME_PM_OPS(ltr390_runtime_suspend, ltr390_runtime_resume, NULL)
+};
static const struct i2c_device_id ltr390_id[] = {
{ "ltr390" },
@@ -731,7 +904,7 @@ static struct i2c_driver ltr390_driver = {
.driver = {
.name = "ltr390",
.of_match_table = ltr390_of_table,
- .pm = pm_sleep_ptr(&ltr390_pm_ops),
+ .pm = pm_ptr(&ltr390_pm_ops),
},
.probe = ltr390_probe,
.id_table = ltr390_id,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index debf57a52d1c..022e0693983b 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1315,8 +1315,8 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
scan.channels[j++] = psdata & LTR501_PS_DATA_MASK;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c
index 61f57a82b872..5f27f754fe1c 100644
--- a/drivers/iio/light/ltrf216a.c
+++ b/drivers/iio/light/ltrf216a.c
@@ -208,7 +208,6 @@ static int ltrf216a_set_power_state(struct ltrf216a_data *data, bool on)
return ret;
}
} else {
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index e8b767680133..039d45af3a7f 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -75,11 +75,6 @@
struct max44000_data {
struct mutex lock;
struct regmap *regmap;
- /* Ensure naturally aligned timestamp */
- struct {
- u16 channels[2];
- aligned_s64 ts;
- } scan;
};
/* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
@@ -496,24 +491,29 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
int index = 0;
unsigned int regval;
int ret;
+ struct {
+ u16 channels[2];
+ aligned_s64 ts;
+ } scan = { };
+
mutex_lock(&data->lock);
if (test_bit(MAX44000_SCAN_INDEX_ALS, indio_dev->active_scan_mask)) {
ret = max44000_read_alsval(data);
if (ret < 0)
goto out_unlock;
- data->scan.channels[index++] = ret;
+ scan.channels[index++] = ret;
}
if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
if (ret < 0)
goto out_unlock;
- data->scan.channels[index] = regval;
+ scan.channels[index] = regval;
}
mutex_unlock(&data->lock);
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
diff --git a/drivers/iio/light/opt4001.c b/drivers/iio/light/opt4001.c
index ba4eb82d9bc2..95167273bb90 100644
--- a/drivers/iio/light/opt4001.c
+++ b/drivers/iio/light/opt4001.c
@@ -428,8 +428,7 @@ static int opt4001_probe(struct i2c_client *client)
opt4001_chip_off_action,
chip);
if (ret < 0)
- return dev_err_probe(&client->dev, ret,
- "Failed to setup power off action\n");
+ return ret;
return devm_iio_device_register(&client->dev, indio_dev);
}
diff --git a/drivers/iio/light/opt4060.c b/drivers/iio/light/opt4060.c
index 566f1bb8fe2a..981c704e7df5 100644
--- a/drivers/iio/light/opt4060.c
+++ b/drivers/iio/light/opt4060.c
@@ -1104,7 +1104,7 @@ static irqreturn_t opt4060_trigger_handler(int irq, void *p)
}
}
- iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp);
+ iio_push_to_buffers_with_ts(idev, &raw, sizeof(raw), pf->timestamp);
err_read:
iio_trigger_notify_done(idev->trig);
return IRQ_HANDLED;
@@ -1212,7 +1212,7 @@ static int opt4060_setup_trigger(struct opt4060_chip *chip, struct iio_dev *idev
name = devm_kasprintf(chip->dev, GFP_KERNEL, "%s-opt4060",
dev_name(chip->dev));
if (!name)
- return dev_err_probe(chip->dev, -ENOMEM, "Failed to alloc chip name\n");
+ return -ENOMEM;
ret = devm_request_threaded_irq(chip->dev, chip->irq, NULL, opt4060_irq_thread,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -1299,8 +1299,7 @@ static int opt4060_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(dev, opt4060_chip_off_action, chip);
if (ret < 0)
- return dev_err_probe(dev, ret,
- "Failed to setup power off action\n");
+ return ret;
ret = opt4060_setup_buffer(chip, indio_dev);
if (ret)
diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c
index 8885852bef22..98a1f1624c75 100644
--- a/drivers/iio/light/pa12203001.c
+++ b/drivers/iio/light/pa12203001.c
@@ -185,15 +185,10 @@ static int pa12203001_set_power_state(struct pa12203001_data *data, bool on,
mutex_unlock(&data->lock);
}
- if (on) {
- ret = pm_runtime_resume_and_get(&data->client->dev);
+ if (on)
+ return pm_runtime_resume_and_get(&data->client->dev);
- } else {
- pm_runtime_mark_last_busy(&data->client->dev);
- ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
-
- return ret;
+ return pm_runtime_put_autosuspend(&data->client->dev);
err:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/light/rohm-bu27034.c b/drivers/iio/light/rohm-bu27034.c
index 7cec5e943373..28d111ac8c0a 100644
--- a/drivers/iio/light/rohm-bu27034.c
+++ b/drivers/iio/light/rohm-bu27034.c
@@ -1193,7 +1193,8 @@ static int bu27034_buffer_thread(void *arg)
*/
data->scan.mlux = (u32)mlux;
}
- iio_push_to_buffers_with_timestamp(idev, &data->scan, tstamp);
+ iio_push_to_buffers_with_ts(idev, &data->scan,
+ sizeof(data->scan), tstamp);
}
return 0;
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index c50183f07240..9341c1d58cbe 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -358,12 +358,10 @@ static int rpr0521_set_power_state(struct rpr0521_data *data, bool on,
* Note: If either measurement is re-enabled before _suspend(),
* both stay enabled until _suspend().
*/
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(&data->client->dev);
- } else {
- pm_runtime_mark_last_busy(&data->client->dev);
+ else
ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
if (ret < 0) {
dev_err(&data->client->dev,
"Failed: rpr0521_set_power_state for %d, ret %d\n",
@@ -457,8 +455,8 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
data->scan.channels,
(3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */
if (!err)
- iio_push_to_buffers_with_timestamp(indio_dev,
- &data->scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &data->scan,
+ sizeof(data->scan), pf->timestamp);
else
dev_err(&data->client->dev,
"Trigger consumer can't read from sensor.\n");
diff --git a/drivers/iio/light/si1145.c b/drivers/iio/light/si1145.c
index 4aa02afd853e..f8eb251eca8d 100644
--- a/drivers/iio/light/si1145.c
+++ b/drivers/iio/light/si1145.c
@@ -494,8 +494,9 @@ static irqreturn_t si1145_trigger_handler(int irq, void *private)
goto done;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, data->buffer,
+ sizeof(data->buffer),
+ iio_get_time_ns(indio_dev));
done:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
index 1f93e3dc45c2..78bc56aad129 100644
--- a/drivers/iio/light/st_uvis25.h
+++ b/drivers/iio/light/st_uvis25.h
@@ -27,11 +27,6 @@ struct st_uvis25_hw {
struct iio_trigger *trig;
bool enabled;
int irq;
- /* Ensure timestamp is naturally aligned */
- struct {
- u8 chan;
- aligned_s64 ts;
- } scan;
};
extern const struct dev_pm_ops st_uvis25_pm_ops;
diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
index 124a8f9204a9..bcd729a9924e 100644
--- a/drivers/iio/light/st_uvis25_core.c
+++ b/drivers/iio/light/st_uvis25_core.c
@@ -234,15 +234,21 @@ static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p)
struct st_uvis25_hw *hw = iio_priv(iio_dev);
unsigned int val;
int err;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u8 chan;
+ aligned_s64 ts;
+ } scan = { };
+
err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, &val);
if (err < 0)
goto out;
- hw->scan.chan = val;
+ scan.chan = val;
- iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan,
- iio_get_time_ns(iio_dev));
+ iio_push_to_buffers_with_ts(iio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(iio_dev));
out:
iio_trigger_notify_done(hw->trig);
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 81dd2bfc22c0..a75a83594a7e 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -607,10 +607,8 @@ static int stk3310_probe(struct i2c_client *client)
struct stk3310_data *data;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
- if (!indio_dev) {
- dev_err(&client->dev, "iio allocation failed!\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->client = client;
diff --git a/drivers/iio/light/tcs3414.c b/drivers/iio/light/tcs3414.c
index 39268f855c77..5be461e6dbdb 100644
--- a/drivers/iio/light/tcs3414.c
+++ b/drivers/iio/light/tcs3414.c
@@ -53,11 +53,6 @@ struct tcs3414_data {
u8 control;
u8 gain;
u8 timing;
- /* Ensure timestamp is naturally aligned */
- struct {
- u16 chans[4];
- aligned_s64 timestamp;
- } scan;
};
#define TCS3414_CHANNEL(_color, _si, _addr) { \
@@ -204,6 +199,12 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct tcs3414_data *data = iio_priv(indio_dev);
int i, j = 0;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[4];
+ aligned_s64 timestamp;
+ } scan = { };
+
iio_for_each_active_channel(indio_dev, i) {
int ret = i2c_smbus_read_word_data(data->client,
@@ -211,10 +212,10 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->scan.chans[j++] = ret;
+ scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index 0f8bf8503edd..12429a3261b3 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -64,11 +64,6 @@ struct tcs3472_data {
u8 control;
u8 atime;
u8 apers;
- /* Ensure timestamp is naturally aligned */
- struct {
- u16 chans[4];
- aligned_s64 timestamp;
- } scan;
};
static const struct iio_event_spec tcs3472_events[] = {
@@ -377,6 +372,11 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct tcs3472_data *data = iio_priv(indio_dev);
int i, j = 0;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ u16 chans[4];
+ aligned_s64 timestamp;
+ } scan = { };
int ret = tcs3472_req_data(data);
if (ret < 0)
@@ -388,10 +388,10 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
if (ret < 0)
goto done;
- data->scan.chans[j++] = ret;
+ scan.chans[j++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
done:
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index fc3b0c4226be..8801a491de77 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -641,16 +641,10 @@ static const struct iio_chan_spec tsl2583_channels[] = {
static int tsl2583_set_pm_runtime_busy(struct tsl2583_chip *chip, bool on)
{
- int ret;
+ if (on)
+ return pm_runtime_resume_and_get(&chip->client->dev);
- if (on) {
- ret = pm_runtime_resume_and_get(&chip->client->dev);
- } else {
- pm_runtime_mark_last_busy(&chip->client->dev);
- ret = pm_runtime_put_autosuspend(&chip->client->dev);
- }
-
- return ret;
+ return pm_runtime_put_autosuspend(&chip->client->dev);
}
static int tsl2583_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/tsl2591.c b/drivers/iio/light/tsl2591.c
index 08476f193a44..c5557867ea43 100644
--- a/drivers/iio/light/tsl2591.c
+++ b/drivers/iio/light/tsl2591.c
@@ -772,7 +772,6 @@ static int tsl2591_read_raw(struct iio_dev *indio_dev,
err_unlock:
mutex_unlock(&chip->als_mutex);
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -995,7 +994,6 @@ static int tsl2591_write_event_config(struct iio_dev *indio_dev,
pm_runtime_get_sync(&client->dev);
} else if (!state && chip->events_enabled) {
chip->events_enabled = false;
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
}
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 61a0957317a1..d2f5a44892a8 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -361,19 +361,13 @@ static int us5182d_shutdown_en(struct us5182d_data *data, u8 state)
static int us5182d_set_power_state(struct us5182d_data *data, bool on)
{
- int ret;
-
if (data->power_mode == US5182D_ONESHOT)
return 0;
- if (on) {
- ret = pm_runtime_resume_and_get(&data->client->dev);
- } else {
- pm_runtime_mark_last_busy(&data->client->dev);
- ret = pm_runtime_put_autosuspend(&data->client->dev);
- }
+ if (on)
+ return pm_runtime_resume_and_get(&data->client->dev);
- return ret;
+ return pm_runtime_put_autosuspend(&data->client->dev);
}
static int us5182d_read_value(struct us5182d_data *data,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 90e7d4421abf..4dbb2294a843 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -576,16 +576,11 @@ static bool vcnl4010_is_in_periodic_mode(struct vcnl4000_data *data)
static int vcnl4000_set_pm_runtime_state(struct vcnl4000_data *data, bool on)
{
struct device *dev = &data->client->dev;
- int ret;
- if (on) {
- ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
- ret = pm_runtime_put_autosuspend(dev);
- }
+ if (on)
+ return pm_runtime_resume_and_get(dev);
- return ret;
+ return pm_runtime_put_autosuspend(dev);
}
static int vcnl4040_read_als_it(struct vcnl4000_data *data, int *val, int *val2)
@@ -1662,7 +1657,10 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct vcnl4000_data *data = iio_priv(indio_dev);
const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
- u16 buffer[8] __aligned(8) = {0}; /* 1x16-bit + naturally aligned ts */
+ struct {
+ u16 chan;
+ aligned_s64 ts;
+ } scan = { };
bool data_read = false;
unsigned long isr;
int val = 0;
@@ -1682,7 +1680,7 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
if (ret < 0)
goto end;
- buffer[0] = val;
+ scan.chan = val;
data_read = true;
}
}
@@ -1695,8 +1693,8 @@ static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
if (!data_read)
goto end;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns(indio_dev));
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
+ iio_get_time_ns(indio_dev));
end:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c
index 01bc99564f98..963747927425 100644
--- a/drivers/iio/light/vcnl4035.c
+++ b/drivers/iio/light/vcnl4035.c
@@ -141,17 +141,12 @@ static const struct iio_trigger_ops vcnl4035_trigger_ops = {
static int vcnl4035_set_pm_runtime_state(struct vcnl4035_data *data, bool on)
{
- int ret;
struct device *dev = &data->client->dev;
- if (on) {
- ret = pm_runtime_resume_and_get(dev);
- } else {
- pm_runtime_mark_last_busy(dev);
- ret = pm_runtime_put_autosuspend(dev);
- }
+ if (on)
+ return pm_runtime_resume_and_get(dev);
- return ret;
+ return pm_runtime_put_autosuspend(dev);
}
static int vcnl4035_read_info_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/light/veml3235.c b/drivers/iio/light/veml3235.c
index 77c9ae17ed47..9309ad83ca9e 100644
--- a/drivers/iio/light/veml3235.c
+++ b/drivers/iio/light/veml3235.c
@@ -154,7 +154,7 @@ static const struct regmap_config veml3235_regmap_config = {
.rd_table = &veml3235_readable_table,
.wr_table = &veml3235_writable_table,
.volatile_table = &veml3235_volatile_table,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
static int veml3235_get_it(struct veml3235_data *data, int *val, int *val2)
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index 0945f146bedb..6bcacae3863c 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -903,7 +903,7 @@ static irqreturn_t veml6030_trigger_handler(int irq, void *p)
scan.chans[i++] = reg;
}
- iio_push_to_buffers_with_timestamp(iio, &scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(iio, &scan, sizeof(scan), pf->timestamp);
done:
iio_trigger_notify_done(iio->trig);
diff --git a/drivers/iio/light/veml6040.c b/drivers/iio/light/veml6040.c
index 71a594b2ec85..f563f9f0ee67 100644
--- a/drivers/iio/light/veml6040.c
+++ b/drivers/iio/light/veml6040.c
@@ -219,8 +219,7 @@ static int veml6040_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "IIO device allocation failed\n");
+ return -ENOMEM;
regmap = devm_regmap_init_i2c(client, &veml6040_regmap_config);
if (IS_ERR(regmap))
diff --git a/drivers/iio/light/veml6046x00.c b/drivers/iio/light/veml6046x00.c
new file mode 100644
index 000000000000..e60f24d46e7b
--- /dev/null
+++ b/drivers/iio/light/veml6046x00.c
@@ -0,0 +1,1030 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VEML6046X00 High Accuracy RGBIR Color Sensor
+ *
+ * Copyright (c) 2025 Andreas Klinger <ak@it-klinger.de>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <asm/byteorder.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/*
+ * Device registers
+ * Those which are accessed as bulk io are omitted
+ */
+#define VEML6046X00_REG_CONF0 0x00
+#define VEML6046X00_REG_CONF1 0x01
+#define VEML6046X00_REG_THDH 0x04
+#define VEML6046X00_REG_THDL 0x06
+#define VEML6046X00_REG_R 0x10
+#define VEML6046X00_REG_G 0x12
+#define VEML6046X00_REG_B 0x14
+#define VEML6046X00_REG_IR 0x16
+#define VEML6046X00_REG_ID 0x18
+#define VEML6046X00_REG_INT 0x1A
+#define VEML6046X00_REG_INT_H 0x1B
+
+/* Bit masks for specific functionality */
+#define VEML6046X00_CONF0_ON_0 BIT(0)
+#define VEML6046X00_CONF0_INT BIT(1)
+#define VEML6046X00_CONF0_AF_TRIG BIT(2)
+#define VEML6046X00_CONF0_AF BIT(3)
+#define VEML6046X00_CONF0_IT GENMASK(6, 4)
+#define VEML6046X00_CONF1_CAL BIT(0)
+#define VEML6046X00_CONF1_PERS GENMASK(2, 1)
+#define VEML6046X00_CONF1_GAIN GENMASK(4, 3)
+#define VEML6046X00_CONF1_PD_D2 BIT(6)
+#define VEML6046X00_CONF1_ON_1 BIT(7)
+#define VEML6046X00_INT_TH_H BIT(1)
+#define VEML6046X00_INT_TH_L BIT(2)
+#define VEML6046X00_INT_DRDY BIT(3)
+#define VEML6046X00_INT_MASK \
+ (VEML6046X00_INT_TH_H | VEML6046X00_INT_TH_L | VEML6046X00_INT_DRDY)
+
+#define VEML6046X00_GAIN_1 0x0
+#define VEML6046X00_GAIN_2 0x1
+#define VEML6046X00_GAIN_0_66 0x2
+#define VEML6046X00_GAIN_0_5 0x3
+
+#define VEML6046X00_PD_2_2 0x0
+#define VEML6046X00_PD_1_2 BIT(6)
+
+/* Autosuspend delay */
+#define VEML6046X00_AUTOSUSPEND_MS (3 * MSEC_PER_SEC)
+
+enum veml6046x00_scan {
+ VEML6046X00_SCAN_R,
+ VEML6046X00_SCAN_G,
+ VEML6046X00_SCAN_B,
+ VEML6046X00_SCAN_IR,
+ VEML6046X00_SCAN_TIMESTAMP,
+};
+
+/**
+ * struct veml6046x00_rf - Regmap field of configuration registers.
+ * @int_en: Interrupt enable of green channel.
+ * @mode: Mode of operation.
+ * Driver uses always Active force mode.
+ * @trig: Trigger to be set in active force mode for starting
+ * measurement.
+ * @it: Integration time.
+ * @pers: Persistense - Number of threshold crossing for triggering
+ * interrupt.
+ */
+struct veml6046x00_rf {
+ struct regmap_field *int_en;
+ struct regmap_field *mode;
+ struct regmap_field *trig;
+ struct regmap_field *it;
+ struct regmap_field *pers;
+};
+
+/**
+ * struct veml6046x00_data - Private data of driver.
+ * @regmap: Regmap definition of sensor.
+ * @trig: Industrial-IO trigger.
+ * @rf: Regmap field of configuration.
+ */
+struct veml6046x00_data {
+ struct regmap *regmap;
+ struct iio_trigger *trig;
+ struct veml6046x00_rf rf;
+};
+
+/**
+ * DOC: Valid integration times (IT)
+ *
+ * static const int veml6046x00_it contains the array with valid IT.
+ *
+ * Register value to be read or written in regmap_field it on veml6046x00 is
+ * identical with array index.
+ * This means there is no separate translation table between valid integration
+ * times and register values needed. The index of the array is identical with
+ * the register value.
+ *
+ * The array is in the form as expected by the callback of the sysfs attribute
+ * integration_time_available (IIO_CHAN_INFO_INT_TIME). So there is no
+ * additional conversion needed.
+ */
+static const int veml6046x00_it[][2] = {
+ { 0, 3125 },
+ { 0, 6250 },
+ { 0, 12500 },
+ { 0, 25000 },
+ { 0, 50000 },
+ { 0, 100000 },
+ { 0, 200000 },
+ { 0, 400000 },
+};
+
+/**
+ * DOC: Handling of gain and photodiode size (PD)
+ *
+ * Gains here in the driver are not exactly the same as in the datasheet of the
+ * sensor. The gain in the driver is a combination of the gain of the sensor
+ * with the photodiode size (PD).
+ * The following combinations are possible:
+ * gain(driver) = gain(sensor) * PD
+ * 0.25 = x0.5 * 1/2
+ * 0.33 = x0.66 * 1/2
+ * 0.5 = x0.5 * 2/2
+ * 0.66 = x0.66 * 2/2
+ * 1 = x1 * 2/2
+ * 2 = x2 * 2/2
+ */
+
+/**
+ * struct veml6046x00_gain_pd - Translation of gain and photodiode size (PD).
+ * @gain_sen: Gain used in the sensor as described in the datasheet of the
+ * sensor
+ * @pd: Photodiode size in the sensor
+ *
+ * This is the translation table from the gain used in the driver (and also used
+ * by the userspace interface in sysfs) to the gain and PD used in the sensor
+ * hardware.
+ *
+ * There are six gain values visible to the user (0.25 .. 2) which translate to
+ * two different gains in the sensor hardware (x0.5 .. x2) and two PD (1/2 and
+ * 2/2). Theoretical are there eight combinations, but gain values 0.5 and 1 are
+ * doubled and therefore the combination with the larger PD (2/2) is taken as
+ * more photodiode cells are supposed to deliver a more precise result.
+ */
+struct veml6046x00_gain_pd {
+ unsigned int gain_sen;
+ unsigned int pd;
+};
+
+static const struct veml6046x00_gain_pd veml6046x00_gain_pd[] = {
+ { .gain_sen = VEML6046X00_GAIN_0_5, .pd = VEML6046X00_PD_1_2 },
+ { .gain_sen = VEML6046X00_GAIN_0_66, .pd = VEML6046X00_PD_1_2 },
+ { .gain_sen = VEML6046X00_GAIN_0_5, .pd = VEML6046X00_PD_2_2 },
+ { .gain_sen = VEML6046X00_GAIN_0_66, .pd = VEML6046X00_PD_2_2 },
+ { .gain_sen = VEML6046X00_GAIN_1, .pd = VEML6046X00_PD_2_2 },
+ { .gain_sen = VEML6046X00_GAIN_2, .pd = VEML6046X00_PD_2_2 },
+};
+
+/**
+ * DOC: Factors for calculation of lux
+ *
+ * static const int veml6046x00_it_gains contains the factors for calculation of
+ * lux.
+ *
+ * Depending on the set up integration time (IT), gain and photodiode size (PD)
+ * the measured raw values are different if the light is constant. As the gain
+ * and PD are already coupled in the driver (see &struct veml6046x00_gain_pd)
+ * there are two dimensions remaining: IT and gain(driver).
+ *
+ * The array of available factors for a certain IT are grouped together in the
+ * same form as expected by the callback of scale_available
+ * (IIO_CHAN_INFO_SCALE).
+ *
+ * Factors for lux / raw count are taken directly from the datasheet.
+ */
+static const int veml6046x00_it_gains[][6][2] = {
+ /* integration time: 3.125 ms */
+ {
+ { 5, 376000 }, /* gain: x0.25 */
+ { 4, 72700 }, /* gain: x0.33 */
+ { 2, 688000 }, /* gain: x0.5 */
+ { 2, 36400 }, /* gain: x0.66 */
+ { 1, 344000 }, /* gain: x1 */
+ { 0, 672000 }, /* gain: x2 */
+ },
+ /* integration time: 6.25 ms */
+ {
+ { 2, 688000 }, /* gain: x0.25 */
+ { 2, 36350 }, /* gain: x0.33 */
+ { 1, 344000 }, /* gain: x0.5 */
+ { 1, 18200 }, /* gain: x0.66 */
+ { 0, 672000 }, /* gain: x1 */
+ { 0, 336000 }, /* gain: x2 */
+ },
+ /* integration time: 12.5 ms */
+ {
+ { 1, 344000 }, /* gain: x0.25 */
+ { 1, 18175 }, /* gain: x0.33 */
+ { 0, 672000 }, /* gain: x0.5 */
+ { 0, 509100 }, /* gain: x0.66 */
+ { 0, 336000 }, /* gain: x1 */
+ { 0, 168000 }, /* gain: x2 */
+ },
+ /* integration time: 25 ms */
+ {
+ { 0, 672000 }, /* gain: x0.25 */
+ { 0, 509087 }, /* gain: x0.33 */
+ { 0, 336000 }, /* gain: x0.5 */
+ { 0, 254550 }, /* gain: x0.66 */
+ { 0, 168000 }, /* gain: x1 */
+ { 0, 84000 }, /* gain: x2 */
+ },
+ /* integration time: 50 ms */
+ {
+ { 0, 336000 }, /* gain: x0.25 */
+ { 0, 254543 }, /* gain: x0.33 */
+ { 0, 168000 }, /* gain: x0.5 */
+ { 0, 127275 }, /* gain: x0.66 */
+ { 0, 84000 }, /* gain: x1 */
+ { 0, 42000 }, /* gain: x2 */
+ },
+ /* integration time: 100 ms */
+ {
+ { 0, 168000 }, /* gain: x0.25 */
+ { 0, 127271 }, /* gain: x0.33 */
+ { 0, 84000 }, /* gain: x0.5 */
+ { 0, 63637 }, /* gain: x0.66 */
+ { 0, 42000 }, /* gain: x1 */
+ { 0, 21000 }, /* gain: x2 */
+ },
+ /* integration time: 200 ms */
+ {
+ { 0, 84000 }, /* gain: x0.25 */
+ { 0, 63635 }, /* gain: x0.33 */
+ { 0, 42000 }, /* gain: x0.5 */
+ { 0, 31818 }, /* gain: x0.66 */
+ { 0, 21000 }, /* gain: x1 */
+ { 0, 10500 }, /* gain: x2 */
+ },
+ /* integration time: 400 ms */
+ {
+ { 0, 42000 }, /* gain: x0.25 */
+ { 0, 31817 }, /* gain: x0.33 */
+ { 0, 21000 }, /* gain: x0.5 */
+ { 0, 15909 }, /* gain: x0.66 */
+ { 0, 10500 }, /* gain: x1 */
+ { 0, 5250 }, /* gain: x2 */
+ },
+};
+
+/*
+ * Two bits (RGB_ON_0 and RGB_ON_1) must be cleared to power on the device.
+ */
+static int veml6046x00_power_on(struct veml6046x00_data *data)
+{
+ int ret;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_clear_bits(data->regmap, VEML6046X00_REG_CONF0,
+ VEML6046X00_CONF0_ON_0);
+ if (ret) {
+ dev_err(dev, "Failed to set bit for power on %d\n", ret);
+ return ret;
+ }
+
+ return regmap_clear_bits(data->regmap, VEML6046X00_REG_CONF1,
+ VEML6046X00_CONF1_ON_1);
+}
+
+/*
+ * Two bits (RGB_ON_0 and RGB_ON_1) must be set to power off the device.
+ */
+static int veml6046x00_shutdown(struct veml6046x00_data *data)
+{
+ int ret;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_set_bits(data->regmap, VEML6046X00_REG_CONF0,
+ VEML6046X00_CONF0_ON_0);
+ if (ret) {
+ dev_err(dev, "Failed to set bit for shutdown %d\n", ret);
+ return ret;
+ }
+
+ return regmap_set_bits(data->regmap, VEML6046X00_REG_CONF1,
+ VEML6046X00_CONF1_ON_1);
+}
+
+static void veml6046x00_shutdown_action(void *data)
+{
+ veml6046x00_shutdown(data);
+}
+
+static const struct iio_chan_spec veml6046x00_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6046X00_REG_R,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_RED,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6046X00_SCAN_R,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6046X00_REG_G,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_GREEN,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6046X00_SCAN_G,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6046X00_REG_B,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_BLUE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6046X00_SCAN_B,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ {
+ .type = IIO_INTENSITY,
+ .address = VEML6046X00_REG_IR,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = VEML6046X00_SCAN_IR,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(VEML6046X00_SCAN_TIMESTAMP),
+};
+
+static const struct regmap_config veml6046x00_regmap_config = {
+ .name = "veml6046x00_regm",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = VEML6046X00_REG_INT_H,
+};
+
+static const struct reg_field veml6046x00_rf_int_en =
+ REG_FIELD(VEML6046X00_REG_CONF0, 1, 1);
+
+static const struct reg_field veml6046x00_rf_trig =
+ REG_FIELD(VEML6046X00_REG_CONF0, 2, 2);
+
+static const struct reg_field veml6046x00_rf_mode =
+ REG_FIELD(VEML6046X00_REG_CONF0, 3, 3);
+
+static const struct reg_field veml6046x00_rf_it =
+ REG_FIELD(VEML6046X00_REG_CONF0, 4, 6);
+
+static const struct reg_field veml6046x00_rf_pers =
+ REG_FIELD(VEML6046X00_REG_CONF1, 1, 2);
+
+static int veml6046x00_regfield_init(struct veml6046x00_data *data)
+{
+ struct regmap *regmap = data->regmap;
+ struct device *dev = regmap_get_device(data->regmap);
+ struct regmap_field *rm_field;
+ struct veml6046x00_rf *rf = &data->rf;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_int_en);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->int_en = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_mode);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->mode = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_trig);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->trig = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_it);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->it = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, veml6046x00_rf_pers);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->pers = rm_field;
+
+ return 0;
+}
+
+static int veml6046x00_get_it_index(struct veml6046x00_data *data)
+{
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret)
+ return ret;
+
+ /* register value is identical with index of array */
+ if (reg >= ARRAY_SIZE(veml6046x00_it))
+ return -EINVAL;
+
+ return reg;
+}
+
+static int veml6046x00_get_it_usec(struct veml6046x00_data *data, unsigned int *it_usec)
+{
+ int ret;
+ unsigned int reg;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret)
+ return ret;
+
+ if (reg >= ARRAY_SIZE(veml6046x00_it))
+ return -EINVAL;
+
+ *it_usec = veml6046x00_it[reg][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int veml6046x00_set_it(struct iio_dev *iio, int val, int val2)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(veml6046x00_it); i++) {
+ if ((veml6046x00_it[i][0] == val) &&
+ (veml6046x00_it[i][1] == val2))
+ return regmap_field_write(data->rf.it, i);
+ }
+
+ return -EINVAL;
+}
+
+static int veml6046x00_get_val_gain_idx(struct veml6046x00_data *data, int val,
+ int val2)
+{
+ unsigned int i;
+ int it_idx;
+
+ it_idx = veml6046x00_get_it_index(data);
+ if (it_idx < 0)
+ return it_idx;
+
+ for (i = 0; i < ARRAY_SIZE(veml6046x00_it_gains[it_idx]); i++) {
+ if ((veml6046x00_it_gains[it_idx][i][0] == val) &&
+ (veml6046x00_it_gains[it_idx][i][1] == val2))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int veml6046x00_get_gain_idx(struct veml6046x00_data *data)
+{
+ int ret;
+ unsigned int i, reg, reg_gain, reg_pd;
+
+ ret = regmap_read(data->regmap, VEML6046X00_REG_CONF1, &reg);
+ if (ret)
+ return ret;
+
+ reg_gain = FIELD_GET(VEML6046X00_CONF1_GAIN, reg);
+ reg_pd = reg & VEML6046X00_CONF1_PD_D2;
+
+ for (i = 0; i < ARRAY_SIZE(veml6046x00_gain_pd); i++) {
+ if ((veml6046x00_gain_pd[i].gain_sen == reg_gain) &&
+ (veml6046x00_gain_pd[i].pd == reg_pd))
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static int veml6046x00_set_scale(struct iio_dev *iio, int val, int val2)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ unsigned int new_scale;
+ int gain_idx;
+
+ gain_idx = veml6046x00_get_val_gain_idx(data, val, val2);
+ if (gain_idx < 0)
+ return gain_idx;
+
+ new_scale = FIELD_PREP(VEML6046X00_CONF1_GAIN,
+ veml6046x00_gain_pd[gain_idx].gain_sen) |
+ veml6046x00_gain_pd[gain_idx].pd;
+
+ return regmap_update_bits(data->regmap, VEML6046X00_REG_CONF1,
+ VEML6046X00_CONF1_GAIN |
+ VEML6046X00_CONF1_PD_D2,
+ new_scale);
+}
+
+static int veml6046x00_get_scale(struct veml6046x00_data *data,
+ int *val, int *val2)
+{
+ int gain_idx, it_idx;
+
+ gain_idx = veml6046x00_get_gain_idx(data);
+ if (gain_idx < 0)
+ return gain_idx;
+
+ it_idx = veml6046x00_get_it_index(data);
+ if (it_idx < 0)
+ return it_idx;
+
+ *val = veml6046x00_it_gains[it_idx][gain_idx][0];
+ *val2 = veml6046x00_it_gains[it_idx][gain_idx][1];
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+/**
+ * veml6046x00_read_data_ready() - Read data ready bit
+ * @data: Private data.
+ *
+ * Helper function for reading data ready bit from interrupt register.
+ *
+ * Return:
+ * * %1 - Data is available (AF_DATA_READY is set)
+ * * %0 - No data available
+ * * %-EIO - Error during bulk read
+ */
+static int veml6046x00_read_data_ready(struct veml6046x00_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ u8 reg[2];
+
+ /*
+ * Note from the vendor, but not explicitly in the datasheet: we
+ * should always read both registers together.
+ */
+ ret = regmap_bulk_read(data->regmap, VEML6046X00_REG_INT,
+ &reg, sizeof(reg));
+ if (ret) {
+ dev_err(dev, "Failed to read interrupt register %d\n", ret);
+ return -EIO;
+ }
+
+ if (reg[1] & VEML6046X00_INT_DRDY)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * veml6046x00_wait_data_available() - Wait until data is available
+ * @iio: Industrial IO.
+ * @usecs: Microseconds to wait for data.
+ *
+ * This function waits for a certain bit in the interrupt register which signals
+ * that there is data to be read available.
+ *
+ * It tries it two times with a waiting time of usecs in between.
+ *
+ * Return:
+ * * %1 - Data is available (AF_DATA_READY is set)
+ * * %0 - Timeout, no data available after usecs timeout
+ * * %-EIO - Error during bulk read
+ */
+static int veml6046x00_wait_data_available(struct iio_dev *iio, unsigned int usecs)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ int ret;
+
+ ret = veml6046x00_read_data_ready(data);
+ if (ret)
+ return ret;
+
+ fsleep(usecs);
+ return veml6046x00_read_data_ready(data);
+}
+
+static int veml6046x00_single_read(struct iio_dev *iio,
+ enum iio_modifier modifier, int *val)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int addr, it_usec;
+ int ret;
+ __le16 reg;
+
+ switch (modifier) {
+ case IIO_MOD_LIGHT_RED:
+ addr = VEML6046X00_REG_R;
+ break;
+ case IIO_MOD_LIGHT_GREEN:
+ addr = VEML6046X00_REG_G;
+ break;
+ case IIO_MOD_LIGHT_BLUE:
+ addr = VEML6046X00_REG_B;
+ break;
+ case IIO_MOD_LIGHT_IR:
+ addr = VEML6046X00_REG_IR;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret)
+ return ret;
+
+ ret = veml6046x00_get_it_usec(data, &it_usec);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get integration time ret: %d", ret);
+ goto out;
+ }
+
+ ret = regmap_field_write(data->rf.mode, 1);
+ if (ret) {
+ dev_err(dev, "Failed to write mode ret: %d", ret);
+ goto out;
+ }
+
+ ret = regmap_field_write(data->rf.trig, 1);
+ if (ret) {
+ dev_err(dev, "Failed to write trigger ret: %d", ret);
+ goto out;
+ }
+
+ /* integration time + 12.5 % to ensure completion */
+ fsleep(it_usec + it_usec / 8);
+
+ ret = veml6046x00_wait_data_available(iio, it_usec * 4);
+ if (ret < 0)
+ goto out;
+ if (ret == 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (!iio_device_claim_direct(iio)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = regmap_bulk_read(data->regmap, addr, &reg, sizeof(reg));
+ iio_device_release_direct(iio);
+ if (ret)
+ goto out;
+
+ *val = le16_to_cpu(reg);
+
+ ret = IIO_VAL_INT;
+
+out:
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int veml6046x00_read_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (chan->type != IIO_INTENSITY)
+ return -EINVAL;
+ return veml6046x00_single_read(iio, chan->channel2, val);
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ return veml6046x00_get_it_usec(data, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return veml6046x00_get_scale(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6046x00_read_avail(struct iio_dev *iio,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ int it_idx;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ *vals = (int *)&veml6046x00_it;
+ *length = 2 * ARRAY_SIZE(veml6046x00_it);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SCALE:
+ it_idx = veml6046x00_get_it_index(data);
+ if (it_idx < 0)
+ return it_idx;
+ *vals = (int *)&veml6046x00_it_gains[it_idx];
+ *length = 2 * ARRAY_SIZE(veml6046x00_it_gains[it_idx]);
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6046x00_write_raw(struct iio_dev *iio,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return veml6046x00_set_it(iio, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return veml6046x00_set_scale(iio, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info veml6046x00_info_no_irq = {
+ .read_raw = veml6046x00_read_raw,
+ .read_avail = veml6046x00_read_avail,
+ .write_raw = veml6046x00_write_raw,
+};
+
+static int veml6046x00_buffer_preenable(struct iio_dev *iio)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = regmap_field_write(data->rf.mode, 0);
+ if (ret) {
+ dev_err(dev, "Failed to set mode %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_field_write(data->rf.trig, 0);
+ if (ret) {
+ /*
+ * no unrolling of mode as it is set appropriately with next
+ * single read.
+ */
+ dev_err(dev, "Failed to set trigger %d\n", ret);
+ return ret;
+ }
+
+ return pm_runtime_resume_and_get(dev);
+}
+
+static int veml6046x00_buffer_postdisable(struct iio_dev *iio)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = regmap_field_write(data->rf.mode, 1);
+ if (ret) {
+ dev_err(dev, "Failed to set mode %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_put_autosuspend(dev);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops veml6046x00_buffer_setup_ops = {
+ .preenable = veml6046x00_buffer_preenable,
+ .postdisable = veml6046x00_buffer_postdisable,
+};
+
+static irqreturn_t veml6046x00_trig_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *iio = pf->indio_dev;
+ struct veml6046x00_data *data = iio_priv(iio);
+ int ret;
+ struct {
+ __le16 chans[4];
+ aligned_s64 timestamp;
+ } scan;
+
+ ret = regmap_bulk_read(data->regmap, VEML6046X00_REG_R,
+ &scan.chans, sizeof(scan.chans));
+ if (ret)
+ goto done;
+
+ iio_push_to_buffers_with_ts(iio, &scan, sizeof(scan),
+ iio_get_time_ns(iio));
+
+done:
+ iio_trigger_notify_done(iio->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int veml6046x00_validate_part_id(struct veml6046x00_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ unsigned int part_id;
+ int ret;
+ __le16 reg;
+
+ ret = regmap_bulk_read(data->regmap, VEML6046X00_REG_ID,
+ &reg, sizeof(reg));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read ID\n");
+
+ part_id = le16_to_cpu(reg);
+ if (part_id != 0x01)
+ dev_info(dev, "Unknown ID %#04x\n", part_id);
+
+ return 0;
+}
+
+static int veml6046x00_setup_device(struct iio_dev *iio)
+{
+ struct veml6046x00_data *data = iio_priv(iio);
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+ __le16 reg16;
+
+ reg16 = cpu_to_le16(VEML6046X00_CONF0_AF);
+ ret = regmap_bulk_write(data->regmap, VEML6046X00_REG_CONF0,
+ &reg16, sizeof(reg16));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set configuration\n");
+
+ reg16 = cpu_to_le16(0);
+ ret = regmap_bulk_write(data->regmap, VEML6046X00_REG_THDL,
+ &reg16, sizeof(reg16));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set low threshold\n");
+
+ reg16 = cpu_to_le16(U16_MAX);
+ ret = regmap_bulk_write(data->regmap, VEML6046X00_REG_THDH,
+ &reg16, sizeof(reg16));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set high threshold\n");
+
+ ret = regmap_bulk_read(data->regmap, VEML6046X00_REG_INT,
+ &reg16, sizeof(reg16));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to clear interrupts\n");
+
+ return 0;
+}
+
+static int veml6046x00_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct veml6046x00_data *data;
+ struct iio_dev *iio;
+ struct regmap *regmap;
+ int ret;
+
+ regmap = devm_regmap_init_i2c(i2c, &veml6046x00_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Failed to set regmap\n");
+
+ iio = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!iio)
+ return -ENOMEM;
+
+ data = iio_priv(iio);
+ /* struct iio_dev is retrieved via dev_get_drvdata(). */
+ i2c_set_clientdata(i2c, iio);
+ data->regmap = regmap;
+
+ ret = veml6046x00_regfield_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init regfield\n");
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulator\n");
+
+ /* bring device in a known state and switch device on */
+ ret = veml6046x00_setup_device(iio);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, veml6046x00_shutdown_action, data);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to add shut down action\n");
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to activate PM runtime\n");
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable PM runtime\n");
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_autosuspend_delay(dev, VEML6046X00_AUTOSUSPEND_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ ret = veml6046x00_validate_part_id(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to validate device ID\n");
+
+ iio->name = "veml6046x00";
+ iio->channels = veml6046x00_channels;
+ iio->num_channels = ARRAY_SIZE(veml6046x00_channels);
+ iio->modes = INDIO_DIRECT_MODE;
+
+ iio->info = &veml6046x00_info_no_irq;
+
+ ret = devm_iio_triggered_buffer_setup(dev, iio, NULL,
+ veml6046x00_trig_handler,
+ &veml6046x00_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register triggered buffer");
+
+ pm_runtime_put_autosuspend(dev);
+
+ ret = devm_iio_device_register(dev, iio);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register iio device");
+
+ return 0;
+}
+
+static int veml6046x00_runtime_suspend(struct device *dev)
+{
+ struct veml6046x00_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return veml6046x00_shutdown(data);
+}
+
+static int veml6046x00_runtime_resume(struct device *dev)
+{
+ struct veml6046x00_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return veml6046x00_power_on(data);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(veml6046x00_pm_ops,
+ veml6046x00_runtime_suspend,
+ veml6046x00_runtime_resume, NULL);
+
+static const struct of_device_id veml6046x00_of_match[] = {
+ { .compatible = "vishay,veml6046x00" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, veml6046x00_of_match);
+
+static const struct i2c_device_id veml6046x00_id[] = {
+ { "veml6046x00" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6046x00_id);
+
+static struct i2c_driver veml6046x00_driver = {
+ .driver = {
+ .name = "veml6046x00",
+ .of_match_table = veml6046x00_of_match,
+ .pm = pm_ptr(&veml6046x00_pm_ops),
+ },
+ .probe = veml6046x00_probe,
+ .id_table = veml6046x00_id,
+};
+module_i2c_driver(veml6046x00_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("VEML6046X00 RGBIR Color Sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index cc4f2e5404aa..c1314b144367 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -96,11 +96,6 @@ struct vl6180_data {
unsigned int als_it_ms;
unsigned int als_meas_rate;
unsigned int range_meas_rate;
-
- struct {
- u16 chan[2];
- aligned_s64 timestamp;
- } scan;
};
enum { VL6180_ALS, VL6180_RANGE, VL6180_PROX };
@@ -545,6 +540,11 @@ static irqreturn_t vl6180_trigger_handler(int irq, void *priv)
struct vl6180_data *data = iio_priv(indio_dev);
s64 time_ns = iio_get_time_ns(indio_dev);
int ret, bit, i = 0;
+ struct {
+ u16 chan[2];
+ aligned_s64 timestamp;
+ } scan = { };
+
iio_for_each_active_channel(indio_dev, bit) {
if (vl6180_chan_regs_table[bit].word)
@@ -560,10 +560,10 @@ static irqreturn_t vl6180_trigger_handler(int irq, void *priv)
return IRQ_HANDLED;
}
- data->scan.chan[i++] = ret;
+ scan.chan[i++] = ret;
}
- iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), time_ns);
iio_trigger_notify_done(indio_dev->trig);
/* Clear the interrupt flag after data read */
@@ -722,7 +722,7 @@ static int vl6180_probe(struct i2c_client *client)
IRQF_ONESHOT,
indio_dev->name, indio_dev);
if (ret)
- return dev_err_probe(&client->dev, ret, "devm_request_irq error \n");
+ return dev_err_probe(&client->dev, ret, "devm_request_irq error\n");
init_completion(&data->completion);
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 3debf1320ad1..81b812a29044 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -123,7 +123,7 @@ config HID_SENSOR_MAGNETOMETER_3D
select IIO_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
- tristate "HID Magenetometer 3D"
+ tristate "HID Magnetometer 3D"
help
Say yes here to build support for the HID SENSOR
Magnetometer 3D.
@@ -173,6 +173,19 @@ config IIO_ST_MAGN_SPI_3AXIS
To compile this driver as a module, choose M here. The module
will be called st_magn_spi.
+config INFINEON_TLV493D
+ tristate "Infineon TLV493D Low-Power 3D Magnetic Sensor"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here to add support for the Infineon TLV493D-A1B6 Low-
+ Power 3D Magnetic Sensor.
+
+ This driver can also be compiled as a module.
+ To compile this driver as a module, choose M here: the module
+ will be called tlv493d.
+
config SENSORS_HMC5843
tristate
select IIO_BUFFER
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 9297723a97d8..dfe970fcacb8 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -23,6 +23,8 @@ st_magn-$(CONFIG_IIO_BUFFER) += st_magn_buffer.o
obj-$(CONFIG_IIO_ST_MAGN_I2C_3AXIS) += st_magn_i2c.o
obj-$(CONFIG_IIO_ST_MAGN_SPI_3AXIS) += st_magn_spi.o
+obj-$(CONFIG_INFINEON_TLV493D) += tlv493d.o
+
obj-$(CONFIG_SENSORS_HMC5843) += hmc5843_core.o
obj-$(CONFIG_SENSORS_HMC5843_I2C) += hmc5843_i2c.o
obj-$(CONFIG_SENSORS_HMC5843_SPI) += hmc5843_spi.o
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 947fe8a475f2..68ece700c7ce 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -583,7 +583,6 @@ static int ak8974_measure_channel(struct ak8974 *ak8974, unsigned long address,
*val = (s16)le16_to_cpu(hw_values[address]);
out_unlock:
mutex_unlock(&ak8974->lock);
- pm_runtime_mark_last_busy(&ak8974->i2c->dev);
pm_runtime_put_autosuspend(&ak8974->i2c->dev);
return ret;
@@ -678,7 +677,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
out_unlock:
mutex_unlock(&ak8974->lock);
- pm_runtime_mark_last_busy(&ak8974->i2c->dev);
pm_runtime_put_autosuspend(&ak8974->i2c->dev);
}
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index a1e92b2abffd..3fd0171e5d69 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -775,7 +775,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
mutex_unlock(&data->lock);
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
/* Swap bytes and convert to valid range. */
diff --git a/drivers/iio/magnetometer/als31300.c b/drivers/iio/magnetometer/als31300.c
index f72af829715f..2a2677428ed5 100644
--- a/drivers/iio/magnetometer/als31300.c
+++ b/drivers/iio/magnetometer/als31300.c
@@ -140,7 +140,6 @@ static int als31300_get_measure(struct als31300_data *data,
*z = ALS31300_DATA_Z_GET(buf);
out:
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -156,7 +155,6 @@ static int als31300_read_raw(struct iio_dev *indio_dev,
int ret;
switch (mask) {
- case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW:
ret = als31300_get_measure(data, &t, &x, &y, &z);
if (ret)
@@ -373,7 +371,7 @@ static int als31300_probe(struct i2c_client *i2c)
ret = devm_add_action_or_reset(dev, als31300_power_down, data);
if (ret)
- return dev_err_probe(dev, ret, "failed to add powerdown action\n");
+ return ret;
indio_dev->info = &als31300_info;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -401,7 +399,6 @@ static int als31300_probe(struct i2c_client *i2c)
pm_runtime_set_autosuspend_delay(dev, 200);
pm_runtime_use_autosuspend(dev);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
ret = devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index 761daead5ada..6a73f6e2f1f0 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -257,22 +257,17 @@ static int bmc150_magn_set_power_mode(struct bmc150_magn_data *data,
static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
{
-#ifdef CONFIG_PM
- int ret;
+ int ret = 0;
- if (on) {
+ if (on)
ret = pm_runtime_resume_and_get(data->dev);
- } else {
- pm_runtime_mark_last_busy(data->dev);
- ret = pm_runtime_put_autosuspend(data->dev);
- }
-
+ else
+ pm_runtime_put_autosuspend(data->dev);
if (ret < 0) {
dev_err(data->dev,
"failed to change power state to %d\n", on);
return ret;
}
-#endif
return 0;
}
diff --git a/drivers/iio/magnetometer/tlv493d.c b/drivers/iio/magnetometer/tlv493d.c
new file mode 100644
index 000000000000..ec53fd40277b
--- /dev/null
+++ b/drivers/iio/magnetometer/tlv493d.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Infineon TLV493D Low-Power 3D Magnetic Sensor
+ *
+ * Copyright (C) 2025 Dixit Parmar <dixitparmar19@gmail.com>
+ */
+
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+/*
+ * TLV493D sensor I2C communication note:
+ *
+ * The sensor supports only direct byte-stream write starting from the
+ * register address 0x0. So for any modification to be made to any write
+ * registers, it must be written starting from the register address 0x0.
+ * I2C write operation should not contain the register address in the I2C
+ * frame, it should contain only raw byte stream for the write registers.
+ * I2C Frame: |S|SlaveAddr Wr|Ack|Byte[0]|Ack|Byte[1]|Ack|.....|Sp|
+ *
+ * Same as the write operation, reading from the sensor registers is also
+ * performed starting from the register address 0x0 for as many bytes as
+ * need to be read.
+ * I2C read operation should not contain the register address in the I2C frame.
+ * I2C Frame: |S|SlaveAddr Rd|Ack|Byte[0]|Ack|Byte[1]|Ack|.....|Sp|
+ */
+
+#define TLV493D_RD_REG_BX 0x00
+#define TLV493D_RD_REG_BY 0x01
+#define TLV493D_RD_REG_BZ 0x02
+#define TLV493D_RD_REG_TEMP 0x03
+#define TLV493D_RD_REG_BX2 0x04
+#define TLV493D_RD_REG_BZ2 0x05
+#define TLV493D_RD_REG_TEMP2 0x06
+#define TLV493D_RD_REG_RES1 0x07
+#define TLV493D_RD_REG_RES2 0x08
+#define TLV493D_RD_REG_RES3 0x09
+#define TLV493D_RD_REG_MAX 0x0a
+
+#define TLV493D_WR_REG_MODE1 0x01
+#define TLV493D_WR_REG_MODE2 0x03
+#define TLV493D_WR_REG_MAX 0x04
+
+#define TLV493D_BX_MAG_X_AXIS_MSB GENMASK(7, 0)
+#define TLV493D_BX2_MAG_X_AXIS_LSB GENMASK(7, 4)
+#define TLV493D_BY_MAG_Y_AXIS_MSB GENMASK(7, 0)
+#define TLV493D_BX2_MAG_Y_AXIS_LSB GENMASK(3, 0)
+#define TLV493D_BZ_MAG_Z_AXIS_MSB GENMASK(7, 0)
+#define TLV493D_BZ2_MAG_Z_AXIS_LSB GENMASK(3, 0)
+#define TLV493D_TEMP_TEMP_MSB GENMASK(7, 4)
+#define TLV493D_TEMP2_TEMP_LSB GENMASK(7, 0)
+#define TLV493D_TEMP_CHANNEL GENMASK(1, 0)
+#define TLV493D_MODE1_MOD_LOWFAST GENMASK(1, 0)
+#define TLV493D_MODE2_LP_PERIOD BIT(6)
+#define TLV493D_RD_REG_RES1_WR_MASK GENMASK(4, 3)
+#define TLV493D_RD_REG_RES2_WR_MASK GENMASK(7, 0)
+#define TLV493D_RD_REG_RES3_WR_MASK GENMASK(4, 0)
+
+enum tlv493d_channels {
+ TLV493D_AXIS_X,
+ TLV493D_AXIS_Y,
+ TLV493D_AXIS_Z,
+ TLV493D_TEMPERATURE,
+};
+
+enum tlv493d_op_mode {
+ TLV493D_OP_MODE_POWERDOWN,
+ TLV493D_OP_MODE_FAST,
+ TLV493D_OP_MODE_LOWPOWER,
+ TLV493D_OP_MODE_ULTRA_LOWPOWER,
+ TLV493D_OP_MODE_MASTERCONTROLLED,
+};
+
+struct tlv493d_data {
+ struct i2c_client *client;
+ /* protects from simultaneous sensor access and register readings */
+ struct mutex lock;
+ enum tlv493d_op_mode mode;
+ u8 wr_regs[TLV493D_WR_REG_MAX];
+};
+
+/*
+ * Different mode has different measurement sampling time, this time is
+ * used in deriving the sleep and timeout while reading the data from
+ * sensor in polling.
+ * Power-down mode: No measurement.
+ * Fast mode: Freq:3.3 KHz. Measurement time:305 usec.
+ * Low-power mode: Freq:100 Hz. Measurement time:10 msec.
+ * Ultra low-power mode: Freq:10 Hz. Measurement time:100 msec.
+ * Master controlled mode: Freq:3.3 Khz. Measurement time:305 usec.
+ */
+static const u32 tlv493d_sample_rate_us[] = {
+ [TLV493D_OP_MODE_POWERDOWN] = 0,
+ [TLV493D_OP_MODE_FAST] = 305,
+ [TLV493D_OP_MODE_LOWPOWER] = 10 * USEC_PER_MSEC,
+ [TLV493D_OP_MODE_ULTRA_LOWPOWER] = 100 * USEC_PER_MSEC,
+ [TLV493D_OP_MODE_MASTERCONTROLLED] = 305,
+};
+
+static int tlv493d_write_all_regs(struct tlv493d_data *data)
+{
+ int ret;
+ struct device *dev = &data->client->dev;
+
+ ret = i2c_master_send(data->client, data->wr_regs, ARRAY_SIZE(data->wr_regs));
+ if (ret < 0) {
+ dev_err(dev, "i2c write registers failed, error: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tlv493d_set_operating_mode(struct tlv493d_data *data, enum tlv493d_op_mode mode)
+{
+ u8 *mode1_cfg = &data->wr_regs[TLV493D_WR_REG_MODE1];
+ u8 *mode2_cfg = &data->wr_regs[TLV493D_WR_REG_MODE2];
+
+ switch (mode) {
+ case TLV493D_OP_MODE_POWERDOWN:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 0);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 0);
+ break;
+
+ case TLV493D_OP_MODE_FAST:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 1);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 0);
+ break;
+
+ case TLV493D_OP_MODE_LOWPOWER:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 2);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 1);
+ break;
+
+ case TLV493D_OP_MODE_ULTRA_LOWPOWER:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 2);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 0);
+ break;
+
+ case TLV493D_OP_MODE_MASTERCONTROLLED:
+ FIELD_MODIFY(TLV493D_MODE1_MOD_LOWFAST, mode1_cfg, 3);
+ FIELD_MODIFY(TLV493D_MODE2_LP_PERIOD, mode2_cfg, 0);
+ break;
+ }
+
+ return tlv493d_write_all_regs(data);
+}
+
+static s16 tlv493d_get_channel_data(u8 *b, enum tlv493d_channels ch)
+{
+ u16 val;
+
+ switch (ch) {
+ case TLV493D_AXIS_X:
+ val = FIELD_GET(TLV493D_BX_MAG_X_AXIS_MSB, b[TLV493D_RD_REG_BX]) << 4 |
+ FIELD_GET(TLV493D_BX2_MAG_X_AXIS_LSB, b[TLV493D_RD_REG_BX2]) >> 4;
+ break;
+ case TLV493D_AXIS_Y:
+ val = FIELD_GET(TLV493D_BY_MAG_Y_AXIS_MSB, b[TLV493D_RD_REG_BY]) << 4 |
+ FIELD_GET(TLV493D_BX2_MAG_Y_AXIS_LSB, b[TLV493D_RD_REG_BX2]);
+ break;
+ case TLV493D_AXIS_Z:
+ val = FIELD_GET(TLV493D_BZ_MAG_Z_AXIS_MSB, b[TLV493D_RD_REG_BZ]) << 4 |
+ FIELD_GET(TLV493D_BZ2_MAG_Z_AXIS_LSB, b[TLV493D_RD_REG_BZ2]);
+ break;
+ case TLV493D_TEMPERATURE:
+ val = FIELD_GET(TLV493D_TEMP_TEMP_MSB, b[TLV493D_RD_REG_TEMP]) << 8 |
+ FIELD_GET(TLV493D_TEMP2_TEMP_LSB, b[TLV493D_RD_REG_TEMP2]);
+ break;
+ }
+
+ return sign_extend32(val, 11);
+}
+
+static int tlv493d_get_measurements(struct tlv493d_data *data, s16 *x, s16 *y,
+ s16 *z, s16 *t)
+{
+ u8 buff[7] = {};
+ int err, ret;
+ struct device *dev = &data->client->dev;
+ u32 sleep_us = tlv493d_sample_rate_us[data->mode];
+
+ guard(mutex)(&data->lock);
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Poll until data is valid.
+ * For a valid data TLV493D_TEMP_CHANNEL bit of TLV493D_RD_REG_TEMP
+ * should be set to 0. The sampling time depends on the sensor mode.
+ * Poll 3x the time of the sampling time.
+ */
+ ret = read_poll_timeout(i2c_master_recv, err,
+ err || !FIELD_GET(TLV493D_TEMP_CHANNEL, buff[TLV493D_RD_REG_TEMP]),
+ sleep_us, 3 * sleep_us, false, data->client, buff,
+ ARRAY_SIZE(buff));
+ if (ret) {
+ dev_err(dev, "i2c read poll timeout, error:%d\n", ret);
+ goto out_put_autosuspend;
+ }
+ if (err < 0) {
+ dev_err(dev, "i2c read data failed, error:%d\n", err);
+ ret = err;
+ goto out_put_autosuspend;
+ }
+
+ *x = tlv493d_get_channel_data(buff, TLV493D_AXIS_X);
+ *y = tlv493d_get_channel_data(buff, TLV493D_AXIS_Y);
+ *z = tlv493d_get_channel_data(buff, TLV493D_AXIS_Z);
+ *t = tlv493d_get_channel_data(buff, TLV493D_TEMPERATURE);
+
+out_put_autosuspend:
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+static int tlv493d_init(struct tlv493d_data *data)
+{
+ int ret;
+ u8 buff[TLV493D_RD_REG_MAX];
+ struct device *dev = &data->client->dev;
+
+ /*
+ * The sensor initialization requires below steps to be followed,
+ * 1. Power-up sensor.
+ * 2. Read and store read-registers map (0x0-0x9).
+ * 3. Copy values from read reserved registers to write reserved fields
+ * (0x0-0x3).
+ * 4. Set operating mode.
+ * 5. Write to all registers.
+ */
+ ret = i2c_master_recv(data->client, buff, ARRAY_SIZE(buff));
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "i2c read failed\n");
+
+ /* Write register 0x0 is reserved. Does not require to be updated.*/
+ data->wr_regs[0] = 0;
+ data->wr_regs[1] = buff[TLV493D_RD_REG_RES1] & TLV493D_RD_REG_RES1_WR_MASK;
+ data->wr_regs[2] = buff[TLV493D_RD_REG_RES2] & TLV493D_RD_REG_RES2_WR_MASK;
+ data->wr_regs[3] = buff[TLV493D_RD_REG_RES3] & TLV493D_RD_REG_RES3_WR_MASK;
+
+ ret = tlv493d_set_operating_mode(data, data->mode);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set operating mode\n");
+
+ return 0;
+}
+
+static int tlv493d_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val,
+ int *val2, long mask)
+{
+ struct tlv493d_data *data = iio_priv(indio_dev);
+ s16 x, y, z, t;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = tlv493d_get_measurements(data, &x, &y, &z, &t);
+ if (ret)
+ return ret;
+
+ switch (chan->address) {
+ case TLV493D_AXIS_X:
+ *val = x;
+ return IIO_VAL_INT;
+ case TLV493D_AXIS_Y:
+ *val = y;
+ return IIO_VAL_INT;
+ case TLV493D_AXIS_Z:
+ *val = z;
+ return IIO_VAL_INT;
+ case TLV493D_TEMPERATURE:
+ *val = t;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_MAGN:
+ /*
+ * Magnetic field scale: 0.0098 mTesla (i.e. 9.8 µT)
+ * Magnetic field in Gauss: mT * 10 = 0.098.
+ */
+ *val = 98;
+ *val2 = 1000;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ /*
+ * Temperature scale: 1.1 °C per LSB, expressed as 1100 m°C
+ * Returned as integer for IIO core to apply:
+ * temp = (raw + offset) * scale
+ */
+ *val = 1100;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /*
+ * Temperature offset includes sensor-specific raw offset
+ * plus compensation for +25°C bias in formula.
+ * offset = -raw_offset + (25000 / 1100)
+ * -340 + 22.72 = -317.28
+ */
+ *val = -31728;
+ *val2 = 100;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t tlv493d_trigger_handler(int irq, void *ptr)
+{
+ int ret;
+ s16 x, y, z, t;
+ struct iio_poll_func *pf = ptr;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct tlv493d_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ struct {
+ s16 channels[3];
+ s16 temperature;
+ aligned_s64 timestamp;
+ } scan;
+
+ ret = tlv493d_get_measurements(data, &x, &y, &z, &t);
+ if (ret) {
+ dev_err(dev, "failed to read sensor data\n");
+ goto out_trigger_notify;
+ }
+
+ scan.channels[0] = x;
+ scan.channels[1] = y;
+ scan.channels[2] = z;
+ scan.temperature = t;
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), pf->timestamp);
+
+out_trigger_notify:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+#define TLV493D_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
+static const struct iio_chan_spec tlv493d_channels[] = {
+ TLV493D_AXIS_CHANNEL(X, TLV493D_AXIS_X),
+ TLV493D_AXIS_CHANNEL(Y, TLV493D_AXIS_Y),
+ TLV493D_AXIS_CHANNEL(Z, TLV493D_AXIS_Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = TLV493D_TEMPERATURE,
+ .scan_index = TLV493D_TEMPERATURE,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 12,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static const struct iio_info tlv493d_info = {
+ .read_raw = tlv493d_read_raw,
+};
+
+static const unsigned long tlv493d_scan_masks[] = { GENMASK(3, 0), 0 };
+
+static int tlv493d_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct tlv493d_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable regulator\n");
+
+ /*
+ * Setting Sensor default operating mode to Master-Controlled mode since
+ * it performs measurement cycle only on-request and stays in Power-Down
+ * state until next cycle is initiated.
+ */
+ data->mode = TLV493D_OP_MODE_MASTERCONTROLLED;
+ ret = tlv493d_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize\n");
+
+ indio_dev->info = &tlv493d_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = client->name;
+ indio_dev->channels = tlv493d_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tlv493d_channels);
+ indio_dev->available_scan_masks = tlv493d_scan_masks;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ tlv493d_trigger_handler,
+ NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "iio triggered buffer setup failed\n");
+
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+ pm_runtime_use_autosuspend(dev);
+
+ pm_runtime_put_autosuspend(dev);
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "iio device register failed\n");
+
+ return 0;
+}
+
+static int tlv493d_runtime_suspend(struct device *dev)
+{
+ struct tlv493d_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return tlv493d_set_operating_mode(data, TLV493D_OP_MODE_POWERDOWN);
+}
+
+static int tlv493d_runtime_resume(struct device *dev)
+{
+ struct tlv493d_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return tlv493d_set_operating_mode(data, data->mode);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(tlv493d_pm_ops, tlv493d_runtime_suspend,
+ tlv493d_runtime_resume, NULL);
+
+static const struct i2c_device_id tlv493d_id[] = {
+ { "tlv493d" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tlv493d_id);
+
+static const struct of_device_id tlv493d_of_match[] = {
+ { .compatible = "infineon,tlv493d-a1b6" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tlv493d_of_match);
+
+static struct i2c_driver tlv493d_driver = {
+ .driver = {
+ .name = "tlv493d",
+ .of_match_table = tlv493d_of_match,
+ .pm = pm_ptr(&tlv493d_pm_ops),
+ },
+ .probe = tlv493d_probe,
+ .id_table = tlv493d_id,
+};
+module_i2c_driver(tlv493d_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Infineon TLV493D Low-Power 3D Magnetic Sensor");
+MODULE_AUTHOR("Dixit Parmar <dixitparmar19@gmail.com>");
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
index 2ca5c26f0091..2adc3c036ab4 100644
--- a/drivers/iio/magnetometer/tmag5273.c
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -287,7 +287,6 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
int ret;
switch (mask) {
- case IIO_CHAN_INFO_PROCESSED:
case IIO_CHAN_INFO_RAW:
ret = pm_runtime_resume_and_get(data->dev);
if (ret < 0)
@@ -295,7 +294,6 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
if (ret)
@@ -642,7 +640,7 @@ static int tmag5273_probe(struct i2c_client *i2c)
*/
ret = devm_add_action_or_reset(dev, tmag5273_power_down, data);
if (ret)
- return dev_err_probe(dev, ret, "failed to add powerdown action\n");
+ return ret;
ret = pm_runtime_set_active(dev);
if (ret < 0)
@@ -668,7 +666,6 @@ static int tmag5273_probe(struct i2c_client *i2c)
indio_dev->channels = tmag5273_channels;
indio_dev->num_channels = ARRAY_SIZE(tmag5273_channels);
- pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
ret = devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c
index 340607111d9a..d49e37edcbed 100644
--- a/drivers/iio/magnetometer/yamaha-yas530.c
+++ b/drivers/iio/magnetometer/yamaha-yas530.c
@@ -623,7 +623,6 @@ static int yas5xx_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
pm_runtime_get_sync(yas5xx->dev);
ret = ci->get_measure(yas5xx, &t, &x, &y, &z);
- pm_runtime_mark_last_busy(yas5xx->dev);
pm_runtime_put_autosuspend(yas5xx->dev);
if (ret)
return ret;
@@ -664,7 +663,6 @@ static void yas5xx_fill_buffer(struct iio_dev *indio_dev)
pm_runtime_get_sync(yas5xx->dev);
ret = ci->get_measure(yas5xx, &t, &x, &y, &z);
- pm_runtime_mark_last_busy(yas5xx->dev);
pm_runtime_put_autosuspend(yas5xx->dev);
if (ret) {
dev_err(yas5xx->dev, "error refilling buffer\n");
diff --git a/drivers/iio/position/hid-sensor-custom-intel-hinge.c b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
index bff7039690ac..a26d391661fd 100644
--- a/drivers/iio/position/hid-sensor-custom-intel-hinge.c
+++ b/drivers/iio/position/hid-sensor-custom-intel-hinge.c
@@ -176,7 +176,7 @@ static int hinge_read_label(struct iio_dev *indio_dev,
{
struct hinge_state *st = iio_priv(indio_dev);
- return sprintf(label, "%s\n", st->labels[chan->channel]);
+ return sysfs_emit(label, "%s\n", st->labels[chan->channel]);
}
static const struct iio_info hinge_info = {
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index 030498d0b763..eccc2a34358f 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -321,10 +321,8 @@ static int lmp91000_probe(struct i2c_client *client)
data->trig = devm_iio_trigger_alloc(dev, "%s-mux%d",
indio_dev->name,
iio_device_id(indio_dev));
- if (!data->trig) {
- dev_err(dev, "cannot allocate iio trigger.\n");
+ if (!data->trig)
return -ENOMEM;
- }
init_completion(&data->completion);
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index d2cb8c871f6a..2fe9dc90cceb 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -339,4 +339,16 @@ config ZPA2326_SPI
tristate
select REGMAP_SPI
+config ADP810
+ tristate "Aosong adp810 differential pressure and temperature sensor"
+ depends on I2C
+ select CRC8
+ help
+ Say yes here to build adp810 differential pressure and temperature
+ sensor driver. ADP810 can measure pressure range up to 500Pa.
+ It supports an I2C interface for data communication.
+
+ To compile this driver as a module, choose M here: the module will
+ be called adp810
+
endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index 6482288e07ee..a21443e992b9 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ABP060MG) += abp060mg.o
+obj-$(CONFIG_ADP810) += adp810.o
obj-$(CONFIG_ROHM_BM1390) += rohm-bm1390.o
obj-$(CONFIG_BMP280) += bmp280.o
bmp280-objs := bmp280-core.o bmp280-regmap.o
@@ -15,6 +16,7 @@ obj-$(CONFIG_DPS310) += dps310.o
obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
obj-$(CONFIG_HP03) += hp03.o
+obj-$(CONFIG_HP206C) += hp206c.o
obj-$(CONFIG_HSC030PA) += hsc030pa.o
obj-$(CONFIG_HSC030PA_I2C) += hsc030pa_i2c.o
obj-$(CONFIG_HSC030PA_SPI) += hsc030pa_spi.o
@@ -34,11 +36,9 @@ obj-$(CONFIG_SDP500) += sdp500.o
obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
+obj-$(CONFIG_IIO_ST_PRESS_I2C) += st_pressure_i2c.o
+obj-$(CONFIG_IIO_ST_PRESS_SPI) += st_pressure_spi.o
obj-$(CONFIG_T5403) += t5403.o
-obj-$(CONFIG_HP206C) += hp206c.o
obj-$(CONFIG_ZPA2326) += zpa2326.o
obj-$(CONFIG_ZPA2326_I2C) += zpa2326_i2c.o
obj-$(CONFIG_ZPA2326_SPI) += zpa2326_spi.o
-
-obj-$(CONFIG_IIO_ST_PRESS_I2C) += st_pressure_i2c.o
-obj-$(CONFIG_IIO_ST_PRESS_SPI) += st_pressure_spi.o
diff --git a/drivers/iio/pressure/adp810.c b/drivers/iio/pressure/adp810.c
new file mode 100644
index 000000000000..5282612d1309
--- /dev/null
+++ b/drivers/iio/pressure/adp810.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+ *
+ * Driver for adp810 pressure and temperature sensor
+ * Datasheet:
+ * https://aosong.com/userfiles/files/media/Datasheet%20ADP810-Digital.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/cleanup.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+/*
+ * Refer section 5.4 checksum calculation from datasheet.
+ * This sensor uses CRC polynomial x^8 + x^5 + x^4 + 1 (0x31)
+ */
+#define ADP810_CRC8_POLYNOMIAL 0x31
+
+DECLARE_CRC8_TABLE(crc_table);
+
+/*
+ * Buffer declaration which holds 9 bytes of measurement data read
+ * from the sensor. Use __packed to avoid any paddings, as data sent
+ * from the sensor is strictly contiguous 9 bytes.
+ */
+struct adp810_read_buf {
+ __be16 dp;
+ u8 dp_crc;
+ __be16 tmp;
+ u8 tmp_crc;
+ __be16 sf;
+ u8 sf_crc;
+} __packed;
+
+struct adp810_data {
+ struct i2c_client *client;
+ /* Use lock to synchronize access to device during read sequence */
+ struct mutex lock;
+};
+
+static int adp810_measure(struct adp810_data *data, struct adp810_read_buf *buf)
+{
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ int ret;
+ u8 trig_cmd[2] = {0x37, 0x2d};
+
+ /* Send trigger command to the sensor for measurement */
+ ret = i2c_master_send(client, trig_cmd, sizeof(trig_cmd));
+ if (ret < 0) {
+ dev_err(dev, "Error sending trigger command\n");
+ return ret;
+ }
+ if (ret != sizeof(trig_cmd))
+ return -EIO;
+
+ /*
+ * Wait for the sensor to acquire data. As per datasheet section 5.3.1,
+ * at least 10ms delay before reading from the sensor is recommended.
+ * Here, we wait for 20ms to have some safe margin on the top
+ * of recommendation and to compensate for any possible variations.
+ */
+ msleep(20);
+
+ /* Read sensor values */
+ ret = i2c_master_recv(client, (char *)buf, sizeof(*buf));
+ if (ret < 0) {
+ dev_err(dev, "Error reading from sensor\n");
+ return ret;
+ }
+ if (ret != sizeof(*buf))
+ return -EIO;
+
+ /* CRC checks */
+ crc8_populate_msb(crc_table, ADP810_CRC8_POLYNOMIAL);
+ if (buf->dp_crc != crc8(crc_table, (u8 *)&buf->dp, 0x2, CRC8_INIT_VALUE)) {
+ dev_err(dev, "CRC error for pressure\n");
+ return -EIO;
+ }
+
+ if (buf->tmp_crc != crc8(crc_table, (u8 *)&buf->tmp, 0x2, CRC8_INIT_VALUE)) {
+ dev_err(dev, "CRC error for temperature\n");
+ return -EIO;
+ }
+
+ if (buf->sf_crc != crc8(crc_table, (u8 *)&buf->sf, 0x2, CRC8_INIT_VALUE)) {
+ dev_err(dev, "CRC error for scale\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adp810_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct adp810_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ struct adp810_read_buf buf = { };
+ int ret;
+
+ scoped_guard(mutex, &data->lock) {
+ ret = adp810_measure(data, &buf);
+ if (ret) {
+ dev_err(dev, "Failed to read from device\n");
+ return ret;
+ }
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = get_unaligned_be16(&buf.dp);
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val = get_unaligned_be16(&buf.tmp);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = get_unaligned_be16(&buf.sf);
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val = 200;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info adp810_info = {
+ .read_raw = adp810_read_raw,
+};
+
+static const struct iio_chan_spec adp810_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int adp810_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct adp810_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ indio_dev->name = "adp810";
+ indio_dev->channels = adp810_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adp810_channels);
+ indio_dev->info = &adp810_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register IIO device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id adp810_id_table[] = {
+ { "adp810" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adp810_id_table);
+
+static const struct of_device_id adp810_of_table[] = {
+ { .compatible = "aosong,adp810" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adp810_of_table);
+
+static struct i2c_driver adp810_driver = {
+ .driver = {
+ .name = "adp810",
+ .of_match_table = adp810_of_table,
+ },
+ .probe = adp810_probe,
+ .id_table = adp810_id_table,
+};
+module_i2c_driver(adp810_driver);
+
+MODULE_AUTHOR("Akhilesh Patil <akhilesh@ee.iitb.ac.in>");
+MODULE_DESCRIPTION("Driver for Aosong ADP810 sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 6cdc8ed53520..d983ce9c0b99 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -752,7 +752,6 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
pm_runtime_get_sync(data->dev);
ret = bmp280_read_raw_impl(indio_dev, chan, val, val2, mask);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -927,7 +926,6 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
pm_runtime_get_sync(data->dev);
ret = bmp280_write_raw_impl(indio_dev, chan, val, val2, mask);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -1042,13 +1040,16 @@ static int bmp280_wait_conv(struct bmp280_data *data)
unsigned int reg, meas_time_us;
int ret;
- /* Check if we are using a BME280 device */
- if (data->oversampling_humid)
- meas_time_us = BMP280_PRESS_HUMID_MEAS_OFFSET +
- BIT(data->oversampling_humid) * BMP280_MEAS_DUR;
+ /* Constant part of the measurement time */
+ meas_time_us = BMP280_MEAS_OFFSET;
- else
- meas_time_us = 0;
+ /*
+ * Check if we are using a BME280 device,
+ * Humidity measurement time
+ */
+ if (data->chip_info->oversampling_humid_avail)
+ meas_time_us += BMP280_PRESS_HUMID_MEAS_OFFSET +
+ BIT(data->oversampling_humid) * BMP280_MEAS_DUR;
/* Pressure measurement time */
meas_time_us += BMP280_PRESS_HUMID_MEAS_OFFSET +
@@ -2255,7 +2256,6 @@ static int bmp580_nvmem_read(void *priv, unsigned int offset, void *val,
pm_runtime_get_sync(data->dev);
ret = bmp580_nvmem_read_impl(priv, offset, val, bytes);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -2330,7 +2330,6 @@ static int bmp580_nvmem_write(void *priv, unsigned int offset, void *val,
pm_runtime_get_sync(data->dev);
ret = bmp580_nvmem_write_impl(priv, offset, val, bytes);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return ret;
@@ -3120,7 +3119,6 @@ static int bmp280_buffer_postdisable(struct iio_dev *indio_dev)
{
struct bmp280_data *data = iio_priv(indio_dev);
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return 0;
@@ -3217,8 +3215,7 @@ int bmp280_common_probe(struct device *dev,
return dev_err_probe(dev, PTR_ERR(gpiod), "failed to get reset GPIO\n");
/* Deassert the signal */
- dev_info(dev, "release reset\n");
- gpiod_set_value(gpiod, 0);
+ gpiod_set_value_cansleep(gpiod, 0);
data->regmap = regmap;
diff --git a/drivers/iio/pressure/dlhl60d.c b/drivers/iio/pressure/dlhl60d.c
index 6a13cf2eaf50..8bad7162fec6 100644
--- a/drivers/iio/pressure/dlhl60d.c
+++ b/drivers/iio/pressure/dlhl60d.c
@@ -289,10 +289,8 @@ static int dlh_probe(struct i2c_client *client)
}
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
- if (!indio_dev) {
- dev_err(&client->dev, "failed to allocate iio device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/pressure/icp10100.c b/drivers/iio/pressure/icp10100.c
index 1951c1cc84cf..3d83d0098a57 100644
--- a/drivers/iio/pressure/icp10100.c
+++ b/drivers/iio/pressure/icp10100.c
@@ -265,7 +265,6 @@ static int icp10100_get_measures(struct icp10100_state *st,
(be16_to_cpu(measures[1]) >> 8);
*temperature = be16_to_cpu(measures[2]);
- pm_runtime_mark_last_busy(&st->client->dev);
error_measure:
pm_runtime_put_autosuspend(&st->client->dev);
return ret;
diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c
index 71beb28b7f2c..830a5065c008 100644
--- a/drivers/iio/pressure/mpl115.c
+++ b/drivers/iio/pressure/mpl115.c
@@ -108,7 +108,6 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
ret = mpl115_comp_pressure(data, val, val2);
if (ret < 0)
return ret;
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
return IIO_VAL_INT_PLUS_MICRO;
@@ -118,7 +117,6 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
ret = mpl115_read_temp(data);
if (ret < 0)
return ret;
- pm_runtime_mark_last_busy(data->dev);
pm_runtime_put_autosuspend(data->dev);
*val = ret >> 6;
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 579da60ef441..aeac1586f12e 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -7,38 +7,97 @@
* (7-bit I2C slave address 0x60)
*
* TODO: FIFO buffer, altimeter mode, oversampling, continuous mode,
- * interrupts, user offset correction, raw mode
+ * user offset correction, raw mode
*/
-#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/unaligned.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/iio/trigger_consumer.h>
-#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
-#include <linux/delay.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/trigger.h>
#define MPL3115_STATUS 0x00
#define MPL3115_OUT_PRESS 0x01 /* MSB first, 20 bit */
#define MPL3115_OUT_TEMP 0x04 /* MSB first, 12 bit */
#define MPL3115_WHO_AM_I 0x0c
+#define MPL3115_INT_SOURCE 0x12
+#define MPL3115_PT_DATA_CFG 0x13
+#define MPL3115_PRESS_TGT 0x16 /* MSB first, 16 bit */
+#define MPL3115_TEMP_TGT 0x18
#define MPL3115_CTRL_REG1 0x26
+#define MPL3115_CTRL_REG2 0x27
+#define MPL3115_CTRL_REG3 0x28
+#define MPL3115_CTRL_REG4 0x29
+#define MPL3115_CTRL_REG5 0x2a
#define MPL3115_DEVICE_ID 0xc4
#define MPL3115_STATUS_PRESS_RDY BIT(2)
#define MPL3115_STATUS_TEMP_RDY BIT(1)
-#define MPL3115_CTRL_RESET BIT(2) /* software reset */
-#define MPL3115_CTRL_OST BIT(1) /* initiate measurement */
-#define MPL3115_CTRL_ACTIVE BIT(0) /* continuous measurement */
-#define MPL3115_CTRL_OS_258MS (BIT(5) | BIT(4)) /* 64x oversampling */
+#define MPL3115_INT_SRC_DRDY BIT(7)
+#define MPL3115_INT_SRC_PTH BIT(3)
+#define MPL3115_INT_SRC_TTH BIT(2)
+
+#define MPL3115_PT_DATA_EVENT_ALL GENMASK(2, 0)
+
+#define MPL3115_CTRL1_RESET BIT(2) /* software reset */
+#define MPL3115_CTRL1_OST BIT(1) /* initiate measurement */
+#define MPL3115_CTRL1_ACTIVE BIT(0) /* continuous measurement */
+#define MPL3115_CTRL1_OS_258MS GENMASK(5, 4) /* 64x oversampling */
+
+#define MPL3115_CTRL2_ST GENMASK(3, 0)
+
+#define MPL3115_CTRL3_IPOL1 BIT(5)
+#define MPL3115_CTRL3_IPOL2 BIT(1)
+
+#define MPL3115_CTRL4_INT_EN_DRDY BIT(7)
+#define MPL3115_CTRL4_INT_EN_PTH BIT(3)
+#define MPL3115_CTRL4_INT_EN_TTH BIT(2)
+
+#define MPL3115_CTRL5_INT_CFG_DRDY BIT(7)
+
+static const unsigned int mpl3115_samp_freq_table[][2] = {
+ { 1, 0 },
+ { 0, 500000 },
+ { 0, 250000 },
+ { 0, 125000 },
+ { 0, 62500 },
+ { 0, 31250 },
+ { 0, 15625 },
+ { 0, 7812 },
+ { 0, 3906 },
+ { 0, 1953 },
+ { 0, 976 },
+ { 0, 488 },
+ { 0, 244 },
+ { 0, 122 },
+ { 0, 61 },
+ { 0, 30 },
+};
struct mpl3115_data {
struct i2c_client *client;
+ struct iio_trigger *drdy_trig;
struct mutex lock;
u8 ctrl_reg1;
+ u8 ctrl_reg4;
+};
+
+enum mpl3115_irq_pin {
+ MPL3115_IRQ_INT1,
+ MPL3115_IRQ_INT2,
};
static int mpl3115_request(struct mpl3115_data *data)
@@ -47,7 +106,7 @@ static int mpl3115_request(struct mpl3115_data *data)
/* trigger measurement */
ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
- data->ctrl_reg1 | MPL3115_CTRL_OST);
+ data->ctrl_reg1 | MPL3115_CTRL1_OST);
if (ret < 0)
return ret;
@@ -56,7 +115,7 @@ static int mpl3115_request(struct mpl3115_data *data)
if (ret < 0)
return ret;
/* wait for data ready, i.e. OST cleared */
- if (!(ret & MPL3115_CTRL_OST))
+ if (!(ret & MPL3115_CTRL1_OST))
break;
msleep(20);
}
@@ -76,7 +135,7 @@ static int mpl3115_read_info_raw(struct mpl3115_data *data,
switch (chan->type) {
case IIO_PRESSURE: { /* in 0.25 pascal / LSB */
- __be32 tmp = 0;
+ u8 press_be24[3];
guard(mutex)(&data->lock);
ret = mpl3115_request(data);
@@ -85,11 +144,17 @@ static int mpl3115_read_info_raw(struct mpl3115_data *data,
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_PRESS,
- 3, (u8 *) &tmp);
+ sizeof(press_be24),
+ press_be24);
if (ret < 0)
return ret;
- *val = be32_to_cpu(tmp) >> chan->scan_type.shift;
+ /*
+ * The pressure channel shift is applied in the case where the
+ * data (24-bit big endian) is read into a 32-bit buffer. Here
+ * the data is stored in a 24-bit buffer, so the shift is 4.
+ */
+ *val = get_unaligned_be24(press_be24) >> 4;
return IIO_VAL_INT;
}
case IIO_TEMP: { /* in 0.0625 celsius / LSB */
@@ -144,51 +209,110 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = i2c_smbus_read_byte_data(data->client, MPL3115_CTRL_REG2);
+ if (ret < 0)
+ return ret;
+
+ ret = FIELD_GET(MPL3115_CTRL2_ST, ret);
+
+ *val = mpl3115_samp_freq_table[ret][0];
+ *val2 = mpl3115_samp_freq_table[ret][1];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
-static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
+static int mpl3115_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *length = ARRAY_SIZE(mpl3115_samp_freq_table) * 2;
+ *vals = (int *)mpl3115_samp_freq_table;
+ return IIO_AVAIL_LIST;
+}
+
+static int mpl3115_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ int i, ret;
+
+ if (mask != IIO_CHAN_INFO_SAMP_FREQ)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mpl3115_samp_freq_table); i++)
+ if (val == mpl3115_samp_freq_table[i][0] &&
+ val2 == mpl3115_samp_freq_table[i][1])
+ break;
+
+ if (i == ARRAY_SIZE(mpl3115_samp_freq_table))
+ return -EINVAL;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG2,
+ FIELD_PREP(MPL3115_CTRL2_ST, i));
+ iio_device_release_direct(indio_dev);
+ return ret;
+}
+
+static int mpl3115_fill_trig_buffer(struct iio_dev *indio_dev, u8 *buffer)
{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
struct mpl3115_data *data = iio_priv(indio_dev);
- /*
- * 32-bit channel + 16-bit channel + padding + ts
- * Note that it is possible for only one of the first 2
- * channels to be enabled. If that happens, the first element
- * of the buffer may be either 16 or 32-bits. As such we cannot
- * use a simple structure definition to express this data layout.
- */
- u8 buffer[16] __aligned(8) = { };
int ret, pos = 0;
- mutex_lock(&data->lock);
- ret = mpl3115_request(data);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- goto done;
+ if (!(data->ctrl_reg1 & MPL3115_CTRL1_ACTIVE)) {
+ ret = mpl3115_request(data);
+ if (ret < 0)
+ return ret;
}
if (test_bit(0, indio_dev->active_scan_mask)) {
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_PRESS, 3, &buffer[pos]);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- goto done;
- }
+ if (ret < 0)
+ return ret;
pos += 4;
}
if (test_bit(1, indio_dev->active_scan_mask)) {
ret = i2c_smbus_read_i2c_block_data(data->client,
MPL3115_OUT_TEMP, 2, &buffer[pos]);
- if (ret < 0) {
- mutex_unlock(&data->lock);
- goto done;
- }
+ if (ret < 0)
+ return ret;
}
+
+ return 0;
+}
+
+static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ /*
+ * 32-bit channel + 16-bit channel + padding + ts
+ * Note that it is possible for only one of the first 2
+ * channels to be enabled. If that happens, the first element
+ * of the buffer may be either 16 or 32-bits. As such we cannot
+ * use a simple structure definition to express this data layout.
+ */
+ u8 buffer[16] __aligned(8) = { };
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = mpl3115_fill_trig_buffer(indio_dev, buffer);
mutex_unlock(&data->lock);
+ if (ret)
+ goto done;
iio_push_to_buffers_with_ts(indio_dev, buffer, sizeof(buffer),
iio_get_time_ns(indio_dev));
@@ -198,11 +322,23 @@ done:
return IRQ_HANDLED;
}
+static const struct iio_event_spec mpl3115_temp_press_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
static const struct iio_chan_spec mpl3115_channels[] = {
{
.type = IIO_PRESSURE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_index = 0,
.scan_type = {
.sign = 'u',
@@ -210,12 +346,17 @@ static const struct iio_chan_spec mpl3115_channels[] = {
.storagebits = 32,
.shift = 12,
.endianness = IIO_BE,
- }
+ },
+ .event_spec = mpl3115_temp_press_event,
+ .num_event_specs = ARRAY_SIZE(mpl3115_temp_press_event),
},
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
.scan_index = 1,
.scan_type = {
.sign = 's',
@@ -223,15 +364,333 @@ static const struct iio_chan_spec mpl3115_channels[] = {
.storagebits = 16,
.shift = 4,
.endianness = IIO_BE,
- }
+ },
+ .event_spec = mpl3115_temp_press_event,
+ .num_event_specs = ARRAY_SIZE(mpl3115_temp_press_event),
},
IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static irqreturn_t mpl3115_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ int ret;
+ u8 val_press[3];
+ __be16 val_temp;
+
+ ret = i2c_smbus_read_byte_data(data->client, MPL3115_INT_SOURCE);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (!(ret & (MPL3115_INT_SRC_TTH | MPL3115_INT_SRC_PTH |
+ MPL3115_INT_SRC_DRDY)))
+ return IRQ_NONE;
+
+ if (ret & MPL3115_INT_SRC_DRDY)
+ iio_trigger_poll_nested(data->drdy_trig);
+
+ if (ret & MPL3115_INT_SRC_PTH) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PRESSURE, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+
+ /* Reset the SRC_PTH bit in INT_SOURCE */
+ i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_PRESS,
+ sizeof(val_press), val_press);
+ }
+
+ if (ret & MPL3115_INT_SRC_TTH) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+
+ /* Reset the SRC_TTH bit in INT_SOURCE */
+ i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_OUT_TEMP,
+ sizeof(val_temp),
+ (u8 *)&val_temp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mpl3115_config_interrupt(struct mpl3115_data *data,
+ u8 ctrl_reg1, u8 ctrl_reg4)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ ctrl_reg1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG4,
+ ctrl_reg4);
+ if (ret < 0)
+ goto reg1_cleanup;
+
+ data->ctrl_reg1 = ctrl_reg1;
+ data->ctrl_reg4 = ctrl_reg4;
+
+ return 0;
+
+reg1_cleanup:
+ i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
+ data->ctrl_reg1);
+ return ret;
+}
+
+static int mpl3115_set_trigger_state(struct iio_trigger *trig, bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ u8 ctrl_reg1, ctrl_reg4;
+
+ guard(mutex)(&data->lock);
+
+ ctrl_reg1 = data->ctrl_reg1;
+ ctrl_reg4 = data->ctrl_reg4;
+
+ if (state) {
+ ctrl_reg1 |= MPL3115_CTRL1_ACTIVE;
+ ctrl_reg4 |= MPL3115_CTRL4_INT_EN_DRDY;
+ } else {
+ ctrl_reg4 &= ~MPL3115_CTRL4_INT_EN_DRDY;
+
+ if (!ctrl_reg4)
+ ctrl_reg1 &= ~MPL3115_CTRL1_ACTIVE;
+ }
+
+ return mpl3115_config_interrupt(data, ctrl_reg1, ctrl_reg4);
+}
+
+static const struct iio_trigger_ops mpl3115_trigger_ops = {
+ .set_trigger_state = mpl3115_set_trigger_state,
+};
+
+static int mpl3115_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct mpl3115_data *data = iio_priv(indio_dev);
+
+ if (chan->type == IIO_PRESSURE)
+ return !!(data->ctrl_reg4 & MPL3115_CTRL4_INT_EN_PTH);
+
+ if (chan->type == IIO_TEMP)
+ return !!(data->ctrl_reg4 & MPL3115_CTRL4_INT_EN_TTH);
+
+ return -EINVAL;
+}
+
+static int mpl3115_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ u8 int_en_mask;
+ u8 ctrl_reg1, ctrl_reg4;
+
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ int_en_mask = MPL3115_CTRL4_INT_EN_PTH;
+ break;
+ case IIO_TEMP:
+ int_en_mask = MPL3115_CTRL4_INT_EN_TTH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ guard(mutex)(&data->lock);
+
+ ctrl_reg1 = data->ctrl_reg1;
+ ctrl_reg4 = data->ctrl_reg4;
+
+ if (state) {
+ ctrl_reg1 |= MPL3115_CTRL1_ACTIVE;
+ ctrl_reg4 |= int_en_mask;
+ } else {
+ ctrl_reg4 &= ~int_en_mask;
+
+ if (!ctrl_reg4)
+ ctrl_reg1 &= ~MPL3115_CTRL1_ACTIVE;
+ }
+
+ return mpl3115_config_interrupt(data, ctrl_reg1, ctrl_reg4);
+}
+
+static int mpl3115_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ int ret;
+ __be16 press_tgt;
+
+ if (info != IIO_EV_INFO_VALUE)
+ return -EINVAL;
+
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ ret = i2c_smbus_read_i2c_block_data(data->client,
+ MPL3115_PRESS_TGT,
+ sizeof(press_tgt),
+ (u8 *)&press_tgt);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Target value for the pressure is 16-bit unsigned value,
+ * expressed in 2 Pa units
+ */
+ *val = be16_to_cpu(press_tgt) << 1;
+
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ ret = i2c_smbus_read_byte_data(data->client, MPL3115_TEMP_TGT);
+ if (ret < 0)
+ return ret;
+
+ /* Target value for the temperature is 8-bit 2's complement */
+ *val = sign_extend32(ret, 7);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int mpl3115_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct mpl3115_data *data = iio_priv(indio_dev);
+ __be16 press_tgt;
+
+ if (info != IIO_EV_INFO_VALUE)
+ return -EINVAL;
+
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ val >>= 1;
+
+ if (val < 0 || val > U16_MAX)
+ return -EINVAL;
+
+ press_tgt = cpu_to_be16(val);
+
+ return i2c_smbus_write_i2c_block_data(data->client,
+ MPL3115_PRESS_TGT,
+ sizeof(press_tgt),
+ (u8 *)&press_tgt);
+ case IIO_TEMP:
+ if (val < S8_MIN || val > S8_MAX)
+ return -EINVAL;
+
+ return i2c_smbus_write_byte_data(data->client,
+ MPL3115_TEMP_TGT, val);
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_info mpl3115_info = {
.read_raw = &mpl3115_read_raw,
+ .read_avail = &mpl3115_read_avail,
+ .write_raw = &mpl3115_write_raw,
+ .read_event_config = mpl3115_read_event_config,
+ .write_event_config = mpl3115_write_event_config,
+ .read_event_value = mpl3115_read_thresh,
+ .write_event_value = mpl3115_write_thresh,
};
+static int mpl3115_trigger_probe(struct mpl3115_data *data,
+ struct iio_dev *indio_dev)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(&data->client->dev);
+ int ret, irq, irq_type, irq_pin = MPL3115_IRQ_INT1;
+
+ irq = fwnode_irq_get_byname(fwnode, "INT1");
+ if (irq < 0) {
+ irq = fwnode_irq_get_byname(fwnode, "INT2");
+ if (irq < 0)
+ return 0;
+
+ irq_pin = MPL3115_IRQ_INT2;
+ }
+
+ irq_type = irq_get_trigger_type(irq);
+ if (irq_type != IRQF_TRIGGER_RISING && irq_type != IRQF_TRIGGER_FALLING)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_byte_data(data->client, MPL3115_PT_DATA_CFG,
+ MPL3115_PT_DATA_EVENT_ALL);
+ if (ret < 0)
+ return ret;
+
+ if (irq_pin == MPL3115_IRQ_INT1) {
+ ret = i2c_smbus_write_byte_data(data->client,
+ MPL3115_CTRL_REG5,
+ MPL3115_CTRL5_INT_CFG_DRDY);
+ if (ret)
+ return ret;
+
+ if (irq_type == IRQF_TRIGGER_RISING) {
+ ret = i2c_smbus_write_byte_data(data->client,
+ MPL3115_CTRL_REG3,
+ MPL3115_CTRL3_IPOL1);
+ if (ret)
+ return ret;
+ }
+ } else if (irq_type == IRQF_TRIGGER_RISING) {
+ ret = i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG3,
+ MPL3115_CTRL3_IPOL2);
+ if (ret)
+ return ret;
+ }
+
+ data->drdy_trig = devm_iio_trigger_alloc(&data->client->dev,
+ "%s-dev%d",
+ indio_dev->name,
+ iio_device_id(indio_dev));
+ if (!data->drdy_trig)
+ return -ENOMEM;
+
+ data->drdy_trig->ops = &mpl3115_trigger_ops;
+ iio_trigger_set_drvdata(data->drdy_trig, indio_dev);
+
+ ret = devm_request_threaded_irq(&data->client->dev, irq, NULL,
+ mpl3115_interrupt_handler,
+ IRQF_ONESHOT,
+ "mpl3115_irq", indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_trigger_register(&data->client->dev, data->drdy_trig);
+ if (ret)
+ return ret;
+
+ indio_dev->trig = iio_trigger_get(data->drdy_trig);
+
+ return 0;
+}
+
static int mpl3115_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
@@ -262,15 +721,19 @@ static int mpl3115_probe(struct i2c_client *client)
/* software reset, I2C transfer is aborted (fails) */
i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1,
- MPL3115_CTRL_RESET);
+ MPL3115_CTRL1_RESET);
msleep(50);
- data->ctrl_reg1 = MPL3115_CTRL_OS_258MS;
+ data->ctrl_reg1 = MPL3115_CTRL1_OS_258MS;
ret = i2c_smbus_write_byte_data(client, MPL3115_CTRL_REG1,
data->ctrl_reg1);
if (ret < 0)
return ret;
+ ret = mpl3115_trigger_probe(data, indio_dev);
+ if (ret)
+ return ret;
+
ret = iio_triggered_buffer_setup(indio_dev, NULL,
mpl3115_trigger_handler, NULL);
if (ret < 0)
@@ -289,7 +752,7 @@ buffer_cleanup:
static int mpl3115_standby(struct mpl3115_data *data)
{
return i2c_smbus_write_byte_data(data->client, MPL3115_CTRL_REG1,
- data->ctrl_reg1 & ~MPL3115_CTRL_ACTIVE);
+ data->ctrl_reg1 & ~MPL3115_CTRL1_ACTIVE);
}
static void mpl3115_remove(struct i2c_client *client)
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 6eef37c0952d..4923a558a26a 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -697,7 +697,6 @@ static void zpa2326_suspend(struct iio_dev *indio_dev)
zpa2326_sleep(indio_dev);
- pm_runtime_mark_last_busy(parent);
pm_runtime_put_autosuspend(parent);
}
@@ -708,7 +707,6 @@ static void zpa2326_init_runtime(struct device *parent)
pm_runtime_enable(parent);
pm_runtime_set_autosuspend_delay(parent, 1000);
pm_runtime_use_autosuspend(parent);
- pm_runtime_mark_last_busy(parent);
pm_runtime_put_autosuspend(parent);
}
diff --git a/drivers/iio/proximity/d3323aa.c b/drivers/iio/proximity/d3323aa.c
index d4c3dbea9bb0..30821f583454 100644
--- a/drivers/iio/proximity/d3323aa.c
+++ b/drivers/iio/proximity/d3323aa.c
@@ -722,8 +722,7 @@ static int d3323aa_probe(struct platform_device *pdev)
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM,
- "Could not allocate iio device\n");
+ return -ENOMEM;
data = iio_priv(indio_dev);
data->dev = dev;
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index 33781c314728..2918dfc0df54 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -1141,8 +1141,7 @@ static int hx9023s_probe(struct i2c_client *client)
indio_dev->name,
iio_device_id(indio_dev));
if (!data->trig)
- return dev_err_probe(dev, -ENOMEM,
- "iio trigger alloc failed\n");
+ return -ENOMEM;
data->trig->ops = &hx9023s_trigger_ops;
iio_trigger_set_drvdata(data->trig, indio_dev);
diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c
index 253e4aef22fb..65af31d43453 100644
--- a/drivers/iio/proximity/irsd200.c
+++ b/drivers/iio/proximity/irsd200.c
@@ -862,8 +862,7 @@ static int irsd200_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(&client->dev, -ENOMEM,
- "Could not allocate iio device\n");
+ return -ENOMEM;
data = iio_priv(indio_dev);
data->dev = &client->dev;
@@ -916,8 +915,7 @@ static int irsd200_probe(struct i2c_client *client)
trigger = devm_iio_trigger_alloc(data->dev, "%s-dev%d", indio_dev->name,
iio_device_id(indio_dev));
if (!trigger)
- return dev_err_probe(data->dev, -ENOMEM,
- "Could not allocate iio trigger\n");
+ return -ENOMEM;
trigger->ops = &irsd200_trigger_ops;
iio_trigger_set_drvdata(trigger, data);
diff --git a/drivers/iio/proximity/mb1232.c b/drivers/iio/proximity/mb1232.c
index 01783486bc7d..34b49c54e68b 100644
--- a/drivers/iio/proximity/mb1232.c
+++ b/drivers/iio/proximity/mb1232.c
@@ -42,11 +42,6 @@ struct mb1232_data {
*/
struct completion ranging;
int irqnr;
- /* Ensure correct alignment of data to push to IIO buffer */
- struct {
- s16 distance;
- aligned_s64 ts;
- } scan;
};
static irqreturn_t mb1232_handle_irq(int irq, void *dev_id)
@@ -120,12 +115,16 @@ static irqreturn_t mb1232_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct mb1232_data *data = iio_priv(indio_dev);
+ struct {
+ s16 distance;
+ aligned_s64 ts;
+ } scan = { };
- data->scan.distance = mb1232_read_distance(data);
- if (data->scan.distance < 0)
+ scan.distance = mb1232_read_distance(data);
+ if (scan.distance < 0)
goto err;
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
pf->timestamp);
err:
diff --git a/drivers/iio/proximity/ping.c b/drivers/iio/proximity/ping.c
index c5b4e1378b7d..e3487094d7be 100644
--- a/drivers/iio/proximity/ping.c
+++ b/drivers/iio/proximity/ping.c
@@ -280,10 +280,8 @@ static int ping_probe(struct platform_device *pdev)
struct iio_dev *indio_dev;
indio_dev = devm_iio_device_alloc(dev, sizeof(struct ping_data));
- if (!indio_dev) {
- dev_err(dev, "failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->dev = dev;
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index 1deaf70e92ce..21336b8f122a 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -43,12 +43,6 @@ struct lidar_data {
int (*xfer)(struct lidar_data *data, u8 reg, u8 *val, int len);
int i2c_enabled;
-
- /* Ensure timestamp is naturally aligned */
- struct {
- u16 chan;
- aligned_s64 timestamp;
- } scan;
};
static const struct iio_chan_spec lidar_channels[] = {
@@ -191,7 +185,6 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
}
ret = -EIO;
}
- pm_runtime_mark_last_busy(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
return ret;
@@ -235,11 +228,14 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct lidar_data *data = iio_priv(indio_dev);
int ret;
+ struct {
+ u16 chan;
+ aligned_s64 timestamp;
+ } scan = { };
- ret = lidar_get_measurement(data, &data->scan.chan);
+ ret = lidar_get_measurement(data, &scan.chan);
if (!ret) {
- iio_push_to_buffers_with_ts(indio_dev, &data->scan,
- sizeof(data->scan),
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
} else if (ret != -EINVAL) {
dev_err(&data->client->dev, "cannot read LIDAR measurement");
diff --git a/drivers/iio/proximity/srf04.c b/drivers/iio/proximity/srf04.c
index b059bac1078b..e97f9a20ac7a 100644
--- a/drivers/iio/proximity/srf04.c
+++ b/drivers/iio/proximity/srf04.c
@@ -117,10 +117,8 @@ static int srf04_read(struct srf04_data *data)
udelay(data->cfg->trigger_pulse_us);
gpiod_set_value(data->gpiod_trig, 0);
- if (data->gpiod_power) {
- pm_runtime_mark_last_busy(data->dev);
+ if (data->gpiod_power)
pm_runtime_put_autosuspend(data->dev);
- }
/* it should not take more than 20 ms until echo is rising */
ret = wait_for_completion_killable_timeout(&data->rising, HZ/50);
@@ -253,10 +251,8 @@ static int srf04_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(struct srf04_data));
- if (!indio_dev) {
- dev_err(dev, "failed to allocate IIO device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
data = iio_priv(indio_dev);
data->dev = dev;
diff --git a/drivers/iio/proximity/srf08.c b/drivers/iio/proximity/srf08.c
index 6e32fdfd161b..d7e4cc48cfbf 100644
--- a/drivers/iio/proximity/srf08.c
+++ b/drivers/iio/proximity/srf08.c
@@ -63,12 +63,6 @@ struct srf08_data {
int range_mm;
struct mutex lock;
- /* Ensure timestamp is naturally aligned */
- struct {
- s16 chan;
- aligned_s64 timestamp;
- } scan;
-
/* Sensor-Type */
enum srf08_sensor_type sensor_type;
@@ -182,16 +176,18 @@ static irqreturn_t srf08_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct srf08_data *data = iio_priv(indio_dev);
- s16 sensor_data;
+ struct {
+ s16 chan;
+ aligned_s64 timestamp;
+ } scan = { };
- sensor_data = srf08_read_ranging(data);
- if (sensor_data < 0)
+ scan.chan = srf08_read_ranging(data);
+ if (scan.chan < 0)
goto err;
mutex_lock(&data->lock);
- data->scan.chan = sensor_data;
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
pf->timestamp);
mutex_unlock(&data->lock);
diff --git a/drivers/iio/proximity/sx9500.c b/drivers/iio/proximity/sx9500.c
index 05844f17a15f..6c67bae7488c 100644
--- a/drivers/iio/proximity/sx9500.c
+++ b/drivers/iio/proximity/sx9500.c
@@ -88,7 +88,6 @@ struct sx9500_data {
bool prox_stat[SX9500_NUM_CHANNELS];
bool event_enabled[SX9500_NUM_CHANNELS];
bool trigger_enabled;
- u16 *buffer;
/* Remember enabled channels and sample rate during suspend. */
unsigned int suspend_ctrl0;
struct completion completion;
@@ -578,22 +577,6 @@ out_unlock:
return ret;
}
-static int sx9500_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask)
-{
- struct sx9500_data *data = iio_priv(indio_dev);
-
- mutex_lock(&data->mutex);
- kfree(data->buffer);
- data->buffer = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
- mutex_unlock(&data->mutex);
-
- if (data->buffer == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
"2.500000 3.333333 5 6.666666 8.333333 11.111111 16.666666 33.333333");
@@ -612,7 +595,6 @@ static const struct iio_info sx9500_info = {
.write_raw = &sx9500_write_raw,
.read_event_config = &sx9500_read_event_config,
.write_event_config = &sx9500_write_event_config,
- .update_scan_mode = &sx9500_update_scan_mode,
};
static int sx9500_set_trigger_state(struct iio_trigger *trig,
@@ -649,6 +631,10 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct sx9500_data *data = iio_priv(indio_dev);
int val, bit, ret, i = 0;
+ struct {
+ u16 chan[SX9500_NUM_CHANNELS];
+ aligned_s64 timestamp;
+ } scan = { };
mutex_lock(&data->mutex);
@@ -658,10 +644,10 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
if (ret < 0)
goto out;
- data->buffer[i++] = val;
+ scan.chan[i++] = val;
}
- iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
out:
@@ -984,7 +970,6 @@ static void sx9500_remove(struct i2c_client *client)
iio_triggered_buffer_cleanup(indio_dev);
if (client->irq > 0)
iio_trigger_unregister(data->trig);
- kfree(data->buffer);
}
static int sx9500_suspend(struct device *dev)
diff --git a/drivers/iio/proximity/vl53l0x-i2c.c b/drivers/iio/proximity/vl53l0x-i2c.c
index ef4aa7b2835e..ad3e46d47fa8 100644
--- a/drivers/iio/proximity/vl53l0x-i2c.c
+++ b/drivers/iio/proximity/vl53l0x-i2c.c
@@ -57,11 +57,6 @@ struct vl53l0x_data {
struct regulator *vdd_supply;
struct gpio_desc *reset_gpio;
struct iio_trigger *trig;
-
- struct {
- u16 chan;
- aligned_s64 timestamp;
- } scan;
};
static int vl53l0x_clear_irq(struct vl53l0x_data *data)
@@ -84,6 +79,10 @@ static irqreturn_t vl53l0x_trigger_handler(int irq, void *priv)
struct vl53l0x_data *data = iio_priv(indio_dev);
u8 buffer[12];
int ret;
+ struct {
+ u16 chan;
+ aligned_s64 timestamp;
+ } scan = { };
ret = i2c_smbus_read_i2c_block_data(data->client,
VL_REG_RESULT_RANGE_STATUS,
@@ -93,8 +92,8 @@ static irqreturn_t vl53l0x_trigger_handler(int irq, void *priv)
else if (ret != 12)
return -EREMOTEIO;
- data->scan.chan = get_unaligned_be16(&buffer[10]);
- iio_push_to_buffers_with_ts(indio_dev, &data->scan, sizeof(data->scan),
+ scan.chan = get_unaligned_be16(&buffer[10]);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan),
iio_get_time_ns(indio_dev));
iio_trigger_notify_done(indio_dev->trig);
@@ -312,7 +311,6 @@ static int vl53l0x_probe(struct i2c_client *client)
{
struct vl53l0x_data *data;
struct iio_dev *indio_dev;
- int error;
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
@@ -345,15 +343,14 @@ static int vl53l0x_probe(struct i2c_client *client)
return dev_err_probe(&client->dev, PTR_ERR(data->reset_gpio),
"Cannot get reset GPIO\n");
- error = vl53l0x_power_on(data);
- if (error)
- return dev_err_probe(&client->dev, error,
+ ret = vl53l0x_power_on(data);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
"Failed to power on the chip\n");
- error = devm_add_action_or_reset(&client->dev, vl53l0x_power_off, data);
- if (error)
- return dev_err_probe(&client->dev, error,
- "Failed to install poweroff action\n");
+ ret = devm_add_action_or_reset(&client->dev, vl53l0x_power_off, data);
+ if (ret)
+ return ret;
indio_dev->name = "vl53l0x";
indio_dev->info = &vl53l0x_info;
diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c
index 9b028c8bb1db..06d9c784f93e 100644
--- a/drivers/iio/resolver/ad2s1210.c
+++ b/drivers/iio/resolver/ad2s1210.c
@@ -1132,23 +1132,23 @@ static int ad2s1210_read_label(struct iio_dev *indio_dev,
{
if (chan->type == IIO_ANGL) {
if (chan->channel == 0)
- return sprintf(label, "position\n");
+ return sysfs_emit(label, "position\n");
if (chan->channel == 1)
- return sprintf(label, "tracking error\n");
+ return sysfs_emit(label, "tracking error\n");
}
if (chan->type == IIO_ANGL_VEL)
- return sprintf(label, "velocity\n");
+ return sysfs_emit(label, "velocity\n");
if (chan->type == IIO_PHASE)
- return sprintf(label, "synthetic reference\n");
+ return sysfs_emit(label, "synthetic reference\n");
if (chan->type == IIO_ALTVOLTAGE) {
if (chan->output)
- return sprintf(label, "excitation\n");
+ return sysfs_emit(label, "excitation\n");
if (chan->channel == 0)
- return sprintf(label, "monitor signal\n");
+ return sysfs_emit(label, "monitor signal\n");
if (chan->channel == 1)
- return sprintf(label, "cosine\n");
+ return sysfs_emit(label, "cosine\n");
if (chan->channel == 2)
- return sprintf(label, "sine\n");
+ return sysfs_emit(label, "sine\n");
}
return -EINVAL;
@@ -1239,24 +1239,24 @@ static int ad2s1210_read_event_label(struct iio_dev *indio_dev,
char *label)
{
if (chan->type == IIO_ANGL)
- return sprintf(label, "LOT\n");
+ return sysfs_emit(label, "LOT\n");
if (chan->type == IIO_ANGL_VEL)
- return sprintf(label, "max tracking rate\n");
+ return sysfs_emit(label, "max tracking rate\n");
if (chan->type == IIO_PHASE)
- return sprintf(label, "phase lock\n");
+ return sysfs_emit(label, "phase lock\n");
if (chan->type == IIO_ALTVOLTAGE) {
if (chan->channel == 0) {
if (type == IIO_EV_TYPE_THRESH &&
dir == IIO_EV_DIR_FALLING)
- return sprintf(label, "LOS\n");
+ return sysfs_emit(label, "LOS\n");
if (type == IIO_EV_TYPE_THRESH &&
dir == IIO_EV_DIR_RISING)
- return sprintf(label, "DOS overrange\n");
+ return sysfs_emit(label, "DOS overrange\n");
if (type == IIO_EV_TYPE_MAG)
- return sprintf(label, "DOS mismatch\n");
+ return sysfs_emit(label, "DOS mismatch\n");
}
if (chan->channel == 1 || chan->channel == 2)
- return sprintf(label, "clipped\n");
+ return sysfs_emit(label, "clipped\n");
}
return -EINVAL;
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index 1244d8e17d50..9328b2250ace 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -173,11 +173,13 @@ config MAX31865
will be called max31865.
config MCP9600
- tristate "MCP9600 thermocouple EMF converter"
+ tristate "MCP9600 and similar thermocouple EMF converters"
depends on I2C
help
- If you say yes here you get support for MCP9600
- thermocouple EMF converter connected via I2C.
+ If you say yes here you get support for...
+ - MCP9600
+ - MCP9601
+ ...thermocouple EMF converters connected via I2C.
This driver can also be built as a module. If so, the module
will be called mcp9600.
diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c
index 6e9108d5cf75..aa42c2b1a369 100644
--- a/drivers/iio/temperature/mcp9600.c
+++ b/drivers/iio/temperature/mcp9600.c
@@ -22,26 +22,31 @@
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
+#include <dt-bindings/iio/temperature/thermocouple.h>
+
/* MCP9600 registers */
-#define MCP9600_HOT_JUNCTION 0x0
-#define MCP9600_COLD_JUNCTION 0x2
-#define MCP9600_STATUS 0x4
+#define MCP9600_HOT_JUNCTION 0x00
+#define MCP9600_COLD_JUNCTION 0x02
+#define MCP9600_STATUS 0x04
#define MCP9600_STATUS_ALERT(x) BIT(x)
-#define MCP9600_ALERT_CFG1 0x8
+#define MCP9600_SENSOR_CFG 0x05
+#define MCP9600_SENSOR_TYPE_MASK GENMASK(6, 4)
+#define MCP9600_ALERT_CFG1 0x08
#define MCP9600_ALERT_CFG(x) (MCP9600_ALERT_CFG1 + (x - 1))
#define MCP9600_ALERT_CFG_ENABLE BIT(0)
#define MCP9600_ALERT_CFG_ACTIVE_HIGH BIT(2)
#define MCP9600_ALERT_CFG_FALLING BIT(3)
#define MCP9600_ALERT_CFG_COLD_JUNCTION BIT(4)
-#define MCP9600_ALERT_HYSTERESIS1 0xc
+#define MCP9600_ALERT_HYSTERESIS1 0x0c
#define MCP9600_ALERT_HYSTERESIS(x) (MCP9600_ALERT_HYSTERESIS1 + (x - 1))
#define MCP9600_ALERT_LIMIT1 0x10
#define MCP9600_ALERT_LIMIT(x) (MCP9600_ALERT_LIMIT1 + (x - 1))
#define MCP9600_ALERT_LIMIT_MASK GENMASK(15, 2)
-#define MCP9600_DEVICE_ID 0x20
+#define MCP9600_DEVICE_ID 0x20
/* MCP9600 device id value */
-#define MCP9600_DEVICE_ID_MCP9600 0x40
+#define MCP9600_DEVICE_ID_MCP9600 0x40
+#define MCP9600_DEVICE_ID_MCP9601 0x41
#define MCP9600_ALERT_COUNT 4
@@ -65,6 +70,30 @@ static const char * const mcp9600_alert_name[MCP9600_ALERT_COUNT] = {
[MCP9600_ALERT4] = "alert4",
};
+/* Map between dt-bindings enum and the chip's type value */
+static const unsigned int mcp9600_type_map[] = {
+ [THERMOCOUPLE_TYPE_K] = 0,
+ [THERMOCOUPLE_TYPE_J] = 1,
+ [THERMOCOUPLE_TYPE_T] = 2,
+ [THERMOCOUPLE_TYPE_N] = 3,
+ [THERMOCOUPLE_TYPE_S] = 4,
+ [THERMOCOUPLE_TYPE_E] = 5,
+ [THERMOCOUPLE_TYPE_B] = 6,
+ [THERMOCOUPLE_TYPE_R] = 7,
+};
+
+/* Map thermocouple type to a char for iio info in sysfs */
+static const int mcp9600_tc_types[] = {
+ [THERMOCOUPLE_TYPE_K] = 'K',
+ [THERMOCOUPLE_TYPE_J] = 'J',
+ [THERMOCOUPLE_TYPE_T] = 'T',
+ [THERMOCOUPLE_TYPE_N] = 'N',
+ [THERMOCOUPLE_TYPE_S] = 'S',
+ [THERMOCOUPLE_TYPE_E] = 'E',
+ [THERMOCOUPLE_TYPE_B] = 'B',
+ [THERMOCOUPLE_TYPE_R] = 'R',
+};
+
static const struct iio_event_spec mcp9600_events[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -82,12 +111,41 @@ static const struct iio_event_spec mcp9600_events[] = {
},
};
+struct mcp_chip_info {
+ u8 chip_id;
+ const char *chip_name;
+};
+
+struct mcp9600_data {
+ struct i2c_client *client;
+ u32 thermocouple_type;
+};
+
+static int mcp9600_config(struct mcp9600_data *data)
+{
+ struct i2c_client *client = data->client;
+ int ret;
+ u8 cfg;
+
+ cfg = FIELD_PREP(MCP9600_SENSOR_TYPE_MASK,
+ mcp9600_type_map[data->thermocouple_type]);
+
+ ret = i2c_smbus_write_byte_data(client, MCP9600_SENSOR_CFG, cfg);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to set sensor configuration\n");
+ return ret;
+ }
+
+ return 0;
+}
+
#define MCP9600_CHANNELS(hj_num_ev, hj_ev_spec_off, cj_num_ev, cj_ev_spec_off) \
{ \
{ \
.type = IIO_TEMP, \
.address = MCP9600_HOT_JUNCTION, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_THERMOCOUPLE_TYPE) | \
BIT(IIO_CHAN_INFO_SCALE), \
.event_spec = &mcp9600_events[hj_ev_spec_off], \
.num_event_specs = hj_num_ev, \
@@ -123,10 +181,6 @@ static const struct iio_chan_spec mcp9600_channels[][2] = {
MCP9600_CHANNELS(2, 0, 2, 0), /* Alerts: 1 2 3 4 */
};
-struct mcp9600_data {
- struct i2c_client *client;
-};
-
static int mcp9600_read(struct mcp9600_data *data,
struct iio_chan_spec const *chan, int *val)
{
@@ -159,6 +213,9 @@ static int mcp9600_read_raw(struct iio_dev *indio_dev,
*val = 62;
*val2 = 500000;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_THERMOCOUPLE_TYPE:
+ *val = mcp9600_tc_types[data->thermocouple_type];
+ return IIO_VAL_CHAR;
default:
return -EINVAL;
}
@@ -416,45 +473,93 @@ static int mcp9600_probe_alerts(struct iio_dev *indio_dev)
static int mcp9600_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
+ const struct mcp_chip_info *chip_info;
struct iio_dev *indio_dev;
struct mcp9600_data *data;
- int ret, ch_sel;
+ int ch_sel, dev_id, ret;
+
+ chip_info = i2c_get_match_data(client);
+ if (!chip_info)
+ return dev_err_probe(dev, -ENODEV,
+ "No chip-info found for device\n");
+
+ dev_id = i2c_smbus_read_byte_data(client, MCP9600_DEVICE_ID);
+ if (dev_id < 0)
+ return dev_err_probe(dev, dev_id, "Failed to read device ID\n");
+
+ switch (dev_id) {
+ case MCP9600_DEVICE_ID_MCP9600:
+ case MCP9600_DEVICE_ID_MCP9601:
+ if (dev_id != chip_info->chip_id)
+ dev_warn(dev,
+ "Expected id %02x, but device responded with %02x\n",
+ chip_info->chip_id, dev_id);
+ break;
- ret = i2c_smbus_read_byte_data(client, MCP9600_DEVICE_ID);
- if (ret < 0)
- return dev_err_probe(&client->dev, ret, "Failed to read device ID\n");
- if (ret != MCP9600_DEVICE_ID_MCP9600)
- dev_warn(&client->dev, "Expected ID %x, got %x\n",
- MCP9600_DEVICE_ID_MCP9600, ret);
+ default:
+ dev_warn(dev, "Unknown id %x, using %x\n", dev_id,
+ chip_info->chip_id);
+ }
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
data->client = client;
+ /* Accept type from dt with default of Type-K. */
+ data->thermocouple_type = THERMOCOUPLE_TYPE_K;
+ ret = device_property_read_u32(dev, "thermocouple-type",
+ &data->thermocouple_type);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret,
+ "Error reading thermocouple-type property\n");
+
+ if (data->thermocouple_type >= ARRAY_SIZE(mcp9600_type_map))
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid thermocouple-type property %u.\n",
+ data->thermocouple_type);
+
+ /* Set initial config. */
+ ret = mcp9600_config(data);
+ if (ret)
+ return ret;
+
ch_sel = mcp9600_probe_alerts(indio_dev);
if (ch_sel < 0)
return ch_sel;
indio_dev->info = &mcp9600_info;
- indio_dev->name = "mcp9600";
+ indio_dev->name = chip_info->chip_name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mcp9600_channels[ch_sel];
indio_dev->num_channels = ARRAY_SIZE(mcp9600_channels[ch_sel]);
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
+static const struct mcp_chip_info mcp9600_chip_info = {
+ .chip_id = MCP9600_DEVICE_ID_MCP9600,
+ .chip_name = "mcp9600",
+};
+
+static const struct mcp_chip_info mcp9601_chip_info = {
+ .chip_id = MCP9600_DEVICE_ID_MCP9601,
+ .chip_name = "mcp9601",
+};
+
static const struct i2c_device_id mcp9600_id[] = {
- { "mcp9600" },
+ { "mcp9600", .driver_data = (kernel_ulong_t)&mcp9600_chip_info },
+ { "mcp9601", .driver_data = (kernel_ulong_t)&mcp9601_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, mcp9600_id);
static const struct of_device_id mcp9600_of_match[] = {
- { .compatible = "microchip,mcp9600" },
+ { .compatible = "microchip,mcp9600", .data = &mcp9600_chip_info },
+ { .compatible = "microchip,mcp9601", .data = &mcp9601_chip_info },
{ }
};
MODULE_DEVICE_TABLE(of, mcp9600_of_match);
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index 740018d4b3df..1ad21b73e1b4 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -22,6 +22,7 @@
* the "wakeup" GPIO is not given, power management will be disabled.
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
@@ -68,10 +69,6 @@
#define MLX90614_CONST_SCALE 20 /* Scale in milliKelvin (0.02 * 1000) */
#define MLX90614_CONST_FIR 0x7 /* Fixed value for FIR part of low pass filter */
-/* Non-constant mask variant of FIELD_GET() and FIELD_PREP() */
-#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
-#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
-
struct mlx_chip_info {
/* EEPROM offsets with 16-bit data, MSB first */
/* emissivity correction coefficient */
@@ -225,7 +222,6 @@ static void mlx90614_power_put(struct mlx90614_data *data)
if (!data->wakeup_gpio)
return;
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
}
#else
diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c
index ae4ea587e7f9..b44f7036c2cc 100644
--- a/drivers/iio/temperature/mlx90632.c
+++ b/drivers/iio/temperature/mlx90632.c
@@ -1043,7 +1043,6 @@ static int mlx90632_read_raw(struct iio_dev *indio_dev,
}
mlx90632_read_raw_pm:
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
return ret;
}
@@ -1178,10 +1177,8 @@ static int mlx90632_probe(struct i2c_client *client)
int ret;
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90632));
- if (!indio_dev) {
- dev_err(&client->dev, "Failed to allocate device\n");
+ if (!indio_dev)
return -ENOMEM;
- }
regmap = devm_regmap_init_i2c(client, &mlx90632_regmap);
if (IS_ERR(regmap)) {
diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c
index f7f88498ba0e..1c8948ca54df 100644
--- a/drivers/iio/temperature/mlx90635.c
+++ b/drivers/iio/temperature/mlx90635.c
@@ -749,7 +749,6 @@ static int mlx90635_read_raw(struct iio_dev *indio_dev,
}
mlx90635_read_raw_pm:
- pm_runtime_mark_last_busy(&data->client->dev);
pm_runtime_put_autosuspend(&data->client->dev);
return ret;
}
@@ -939,7 +938,7 @@ static int mlx90635_probe(struct i2c_client *client)
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90635));
if (!indio_dev)
- return dev_err_probe(&client->dev, -ENOMEM, "failed to allocate device\n");
+ return -ENOMEM;
regmap = devm_regmap_init_i2c(client, &mlx90635_regmap);
if (IS_ERR(regmap))
@@ -977,8 +976,7 @@ static int mlx90635_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(&client->dev, mlx90635_disable_regulator,
mlx90635);
if (ret < 0)
- return dev_err_probe(&client->dev, ret,
- "failed to setup regulator cleanup action\n");
+ return ret;
ret = mlx90635_wakeup(mlx90635);
if (ret < 0)
@@ -986,8 +984,7 @@ static int mlx90635_probe(struct i2c_client *client)
ret = devm_add_action_or_reset(&client->dev, mlx90635_sleep, mlx90635);
if (ret < 0)
- return dev_err_probe(&client->dev, ret,
- "failed to setup low power cleanup\n");
+ return ret;
ret = regmap_read(mlx90635->regmap_ee, MLX90635_EE_VERSION, &dsp_version);
if (ret < 0)
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 7a181cac3cc9..6e65e929791c 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -41,3 +41,15 @@ config IIO_FORMAT_KUNIT_TEST
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
+
+config IIO_MULTIPLY_KUNIT_TEST
+ tristate "Test IIO multiply functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ build unit tests for the IIO multiply functions.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
index e9a4cf1ff57f..0c846bc21acd 100644
--- a/drivers/iio/test/Makefile
+++ b/drivers/iio/test/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o
obj-$(CONFIG_IIO_FORMAT_KUNIT_TEST) += iio-test-format.o
obj-$(CONFIG_IIO_GTS_KUNIT_TEST) += iio-test-gts.o
+obj-$(CONFIG_IIO_MULTIPLY_KUNIT_TEST) += iio-test-multiply.o
CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/iio/test/iio-test-multiply.c b/drivers/iio/test/iio-test-multiply.c
new file mode 100644
index 000000000000..432e279ffe5b
--- /dev/null
+++ b/drivers/iio/test/iio-test-multiply.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Unit tests for IIO multiply functions
+ *
+ * Copyright (c) 2025 Hans de Goede <hans@hansg.org>
+ * Based on iio-test-format.c which is:
+ * Copyright (c) 2020 Lars-Peter Clausen <lars@metafoo.de>
+ */
+
+#include <kunit/test.h>
+#include <linux/iio/consumer.h>
+#include <linux/math64.h>
+#include <linux/types.h>
+
+static void __iio_test_iio_multiply_value_integer(struct kunit *test, s64 multiplier)
+{
+ int ret, result, val;
+
+ val = 42;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT, val, 0);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, multiplier * val);
+
+ val = -23;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT, val, 0);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, multiplier * val);
+
+ val = 0;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT, val, 0);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, multiplier * val);
+}
+
+static void iio_test_iio_multiply_value_integer(struct kunit *test)
+{
+ __iio_test_iio_multiply_value_integer(test, 20);
+ __iio_test_iio_multiply_value_integer(test, -20);
+}
+
+static void __iio_test_iio_multiply_value_fixedpoint(struct kunit *test, s64 multiplier)
+{
+ int ret, result, val, val2;
+
+ /* positive >= 1 (1.5) */
+ val = 1;
+ val2 = 500000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_MICRO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * 15, 10));
+
+ val = 1;
+ val2 = 500000000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_NANO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * 15, 10));
+
+ /* positive < 1 (0.5) */
+ val = 0;
+ val2 = 500000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_MICRO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * 5, 10));
+
+ val = 0;
+ val2 = 500000000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_NANO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * 5, 10));
+
+ /* negative <= -1 (-1.5) */
+ val = -1;
+ val2 = 500000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_MICRO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * -15, 10));
+
+ val = -1;
+ val2 = 500000000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_NANO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * -15, 10));
+
+ /* negative > -1 (-0.5) */
+ val = 0;
+ val2 = -500000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_MICRO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * -5, 10));
+
+ val = 0;
+ val2 = -500000000;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_INT_PLUS_NANO, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * -5, 10));
+}
+
+static void iio_test_iio_multiply_value_fixedpoint(struct kunit *test)
+{
+ __iio_test_iio_multiply_value_fixedpoint(test, 20);
+ __iio_test_iio_multiply_value_fixedpoint(test, -20);
+}
+
+static void __iio_test_iio_multiply_value_fractional(struct kunit *test, s64 multiplier)
+{
+ int ret, result, val, val2;
+
+ /* positive < 1 (1/10)*/
+ val = 1;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+
+ /* positive >= 1 (100/3)*/
+ val = 100;
+ val2 = 3;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+
+ /* negative > -1 (-1/10) */
+ val = -1;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+
+ /* negative <= -1 (-200/3)*/
+ val = -200;
+ val2 = 3;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+
+ /* Zero (0/-10) */
+ val = 0;
+ val2 = -10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, div_s64(multiplier * val, val2));
+}
+
+static void iio_test_iio_multiply_value_fractional(struct kunit *test)
+{
+ __iio_test_iio_multiply_value_fractional(test, 20);
+ __iio_test_iio_multiply_value_fractional(test, -20);
+}
+
+static void __iio_test_iio_multiply_value_fractional_log2(struct kunit *test, s64 multiplier)
+{
+ int ret, result, val, val2;
+
+ /* positive < 1 (123/1024) */
+ val = 123;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+
+ /* positive >= 1 (1234567/1024) */
+ val = 1234567;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+
+ /* negative > -1 (-123/1024) */
+ val = -123;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+
+ /* negative <= -1 (-1234567/1024) */
+ val = -1234567;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+
+ /* Zero (0/1024) */
+ val = 0;
+ val2 = 10;
+ ret = iio_multiply_value(&result, multiplier, IIO_VAL_FRACTIONAL_LOG2, val, val2);
+ KUNIT_EXPECT_EQ(test, ret, IIO_VAL_INT);
+ KUNIT_EXPECT_EQ(test, result, (multiplier * val) >> val2);
+}
+
+static void iio_test_iio_multiply_value_fractional_log2(struct kunit *test)
+{
+ __iio_test_iio_multiply_value_fractional_log2(test, 20);
+ __iio_test_iio_multiply_value_fractional_log2(test, -20);
+}
+
+static struct kunit_case iio_multiply_test_cases[] = {
+ KUNIT_CASE(iio_test_iio_multiply_value_integer),
+ KUNIT_CASE(iio_test_iio_multiply_value_fixedpoint),
+ KUNIT_CASE(iio_test_iio_multiply_value_fractional),
+ KUNIT_CASE(iio_test_iio_multiply_value_fractional_log2),
+ { }
+};
+
+static struct kunit_suite iio_multiply_test_suite = {
+ .name = "iio-multiply",
+ .test_cases = iio_multiply_test_cases,
+};
+kunit_test_suite(iio_multiply_test_suite);
+
+MODULE_AUTHOR("Hans de Goede <hans@hansg.org>");
+MODULE_DESCRIPTION("Test IIO multiply functions");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_UNIT_TEST");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 3a394cd772f6..794b9778816b 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -80,11 +80,13 @@ config INFINIBAND_VIRT_DMA
if INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
if !UML
source "drivers/infiniband/hw/bnxt_re/Kconfig"
+source "drivers/infiniband/hw/bng_re/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/efa/Kconfig"
source "drivers/infiniband/hw/erdma/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
source "drivers/infiniband/hw/hns/Kconfig"
+source "drivers/infiniband/hw/ionic/Kconfig"
source "drivers/infiniband/hw/irdma/Kconfig"
source "drivers/infiniband/hw/mana/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index be0743dac3ff..61596cda2b65 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -446,63 +446,41 @@ static int addr6_resolve(struct sockaddr *src_sock,
}
#endif
+static bool is_dst_local(const struct dst_entry *dst)
+{
+ if (dst->ops->family == AF_INET)
+ return !!(dst_rtable(dst)->rt_type & RTN_LOCAL);
+ else if (dst->ops->family == AF_INET6)
+ return !!(dst_rt6_info(dst)->rt6i_flags & RTF_LOCAL);
+ else
+ return false;
+}
+
static int addr_resolve_neigh(const struct dst_entry *dst,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
- unsigned int ndev_flags,
u32 seq)
{
- int ret = 0;
-
- if (ndev_flags & IFF_LOOPBACK) {
+ if (is_dst_local(dst)) {
+ /* When the destination is local entry, source and destination
+ * are same. Skip the neighbour lookup.
+ */
memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
- } else {
- if (!(ndev_flags & IFF_NOARP)) {
- /* If the device doesn't do ARP internally */
- ret = fetch_ha(dst, addr, dst_in, seq);
- }
+ return 0;
}
- return ret;
-}
-
-static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
- const struct sockaddr *dst_in,
- const struct dst_entry *dst,
- const struct net_device *ndev)
-{
- int ret = 0;
-
- if (dst->dev->flags & IFF_LOOPBACK)
- ret = rdma_translate_ip(dst_in, dev_addr);
- else
- rdma_copy_src_l2_addr(dev_addr, dst->dev);
-
- /*
- * If there's a gateway and type of device not ARPHRD_INFINIBAND,
- * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
- * network type accordingly.
- */
- if (has_gateway(dst, dst_in->sa_family) &&
- ndev->type != ARPHRD_INFINIBAND)
- dev_addr->network = dst_in->sa_family == AF_INET ?
- RDMA_NETWORK_IPV4 :
- RDMA_NETWORK_IPV6;
- else
- dev_addr->network = RDMA_NETWORK_IB;
- return ret;
+ return fetch_ha(dst, addr, dst_in, seq);
}
static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
- unsigned int *ndev_flags,
const struct sockaddr *dst_in,
const struct dst_entry *dst)
{
struct net_device *ndev = READ_ONCE(dst->dev);
- *ndev_flags = ndev->flags;
/* A physical device must be the RDMA device to use */
- if (ndev->flags & IFF_LOOPBACK) {
+ if (is_dst_local(dst)) {
+ int ret;
/*
* RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
* loopback IP address. So if route is resolved to loopback
@@ -512,9 +490,27 @@ static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
if (IS_ERR(ndev))
return -ENODEV;
+ ret = rdma_translate_ip(dst_in, dev_addr);
+ if (ret)
+ return ret;
+ } else {
+ rdma_copy_src_l2_addr(dev_addr, dst->dev);
}
- return copy_src_l2_addr(dev_addr, dst_in, dst, ndev);
+ /*
+ * If there's a gateway and type of device not ARPHRD_INFINIBAND,
+ * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
+ * network type accordingly.
+ */
+ if (has_gateway(dst, dst_in->sa_family) &&
+ ndev->type != ARPHRD_INFINIBAND)
+ dev_addr->network = dst_in->sa_family == AF_INET ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ else
+ dev_addr->network = RDMA_NETWORK_IB;
+
+ return 0;
}
static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
@@ -551,7 +547,6 @@ static int addr_resolve(struct sockaddr *src_in,
u32 seq)
{
struct dst_entry *dst = NULL;
- unsigned int ndev_flags = 0;
struct rtable *rt = NULL;
int ret;
@@ -588,7 +583,7 @@ static int addr_resolve(struct sockaddr *src_in,
rcu_read_unlock();
goto done;
}
- ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst);
+ ret = rdma_set_src_addr_rcu(addr, dst_in, dst);
rcu_read_unlock();
/*
@@ -596,7 +591,7 @@ static int addr_resolve(struct sockaddr *src_in,
* only if src addr translation didn't fail.
*/
if (!ret && resolve_neigh)
- ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq);
+ ret = addr_resolve_neigh(dst, dst_in, addr, seq);
if (src_in->sa_family == AF_INET)
ip_rt_put(rt);
diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c
index 3bb46696731e..25a060a28301 100644
--- a/drivers/infiniband/core/agent.c
+++ b/drivers/infiniband/core/agent.c
@@ -110,8 +110,7 @@ void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *
agent = port_priv->agent[qpn];
ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num);
if (IS_ERR(ah)) {
- dev_err(&device->dev, "ib_create_ah_from_wc error %ld\n",
- PTR_ERR(ah));
+ dev_err(&device->dev, "ib_create_ah_from_wc error %pe\n", ah);
return;
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 92678e438ff4..024df6ee239d 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -34,7 +34,6 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
-#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
#define CM_DIRECT_RETRY_CTX ((void *) 1UL)
#define CM_MRA_SETTING 24 /* 4.096us * 2^24 = ~68.7 seconds */
@@ -1049,14 +1048,15 @@ static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id,
struct cm_id_private *cm_id_priv;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
- pr_err("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
- cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
+ pr_err_ratelimited("%s: cm_id=%p timed out. state %d -> %d, refcnt=%d\n", __func__,
+ cm_id, old_state, cm_id->state, refcount_read(&cm_id_priv->refcount));
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
enum ib_cm_state old_state;
+ unsigned long timeout;
struct cm_work *work;
int ret;
@@ -1167,10 +1167,9 @@ retest:
xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
+ timeout = msecs_to_jiffies((cm_id_priv->max_cm_retries * cm_id_priv->timeout_ms * 5) / 4);
do {
- ret = wait_for_completion_timeout(&cm_id_priv->comp,
- msecs_to_jiffies(
- CM_DESTROY_ID_WAIT_TIMEOUT));
+ ret = wait_for_completion_timeout(&cm_id_priv->comp, timeout);
if (!ret) /* timeout happened */
cm_destroy_id_wait_timeout(cm_id, old_state);
} while (!ret);
@@ -4518,7 +4517,7 @@ static int __init ib_cm_init(void)
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
INIT_LIST_HEAD(&cm.timewait_list);
- cm.wq = alloc_workqueue("ib_cm", 0, 1);
+ cm.wq = alloc_workqueue("ib_cm", WQ_PERCPU, 1);
if (!cm.wq) {
ret = -ENOMEM;
goto error2;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 9b471548e7ae..95e89f5c147c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2076,6 +2076,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec);
kfree(id_priv->id.route.path_rec_inbound);
kfree(id_priv->id.route.path_rec_outbound);
+ kfree(id_priv->id.route.service_recs);
put_net(id_priv->id.route.addr.dev_addr.net);
kfree(id_priv);
@@ -3382,13 +3383,18 @@ err1:
int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
+ enum rdma_cm_state state;
int ret;
if (!timeout_ms)
return -EINVAL;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
+ state = id_priv->state;
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+ RDMA_CM_ROUTE_QUERY) &&
+ !cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_RESOLVED,
+ RDMA_CM_ROUTE_QUERY))
return -EINVAL;
cma_id_get(id_priv);
@@ -3409,7 +3415,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
return 0;
err:
- cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
+ cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, state);
cma_id_put(id_priv);
return ret;
}
@@ -4469,6 +4475,8 @@ int rdma_connect_locked(struct rdma_cm_id *id,
container_of(id, struct rdma_id_private, id);
int ret;
+ lockdep_assert_held(&id_priv->handler_mutex);
+
if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
return -EINVAL;
@@ -5506,3 +5514,129 @@ static void __exit cma_cleanup(void)
module_init(cma_init);
module_exit(cma_cleanup);
+
+static void cma_query_ib_service_handler(int status,
+ struct sa_service_rec *recs,
+ unsigned int num_recs, void *context)
+{
+ struct cma_work *work = context;
+ struct rdma_id_private *id_priv = work->id;
+ struct sockaddr_ib *addr;
+
+ if (status)
+ goto fail;
+
+ if (!num_recs) {
+ status = -ENOENT;
+ goto fail;
+ }
+
+ if (id_priv->id.route.service_recs) {
+ status = -EALREADY;
+ goto fail;
+ }
+
+ id_priv->id.route.service_recs =
+ kmalloc_array(num_recs, sizeof(*recs), GFP_KERNEL);
+ if (!id_priv->id.route.service_recs) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
+ id_priv->id.route.num_service_recs = num_recs;
+ memcpy(id_priv->id.route.service_recs, recs, sizeof(*recs) * num_recs);
+
+ addr = (struct sockaddr_ib *)&id_priv->id.route.addr.dst_addr;
+ addr->sib_family = AF_IB;
+ addr->sib_addr = *(struct ib_addr *)&recs->gid;
+ addr->sib_pkey = recs->pkey;
+ addr->sib_sid = recs->id;
+ rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr,
+ (union ib_gid *)&addr->sib_addr);
+ ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr,
+ ntohs(addr->sib_pkey));
+
+ queue_work(cma_wq, &work->work);
+ return;
+
+fail:
+ work->old_state = RDMA_CM_ADDRINFO_QUERY;
+ work->new_state = RDMA_CM_ADDR_BOUND;
+ work->event.event = RDMA_CM_EVENT_ADDRINFO_ERROR;
+ work->event.status = status;
+ pr_debug_ratelimited(
+ "RDMA CM: SERVICE_ERROR: failed to query service record. status %d\n",
+ status);
+ queue_work(cma_wq, &work->work);
+}
+
+static int cma_resolve_ib_service(struct rdma_id_private *id_priv,
+ struct rdma_ucm_ib_service *ibs)
+{
+ struct sa_service_rec sr = {};
+ ib_sa_comp_mask mask = 0;
+ struct cma_work *work;
+
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
+ if (!work)
+ return -ENOMEM;
+
+ cma_id_get(id_priv);
+
+ work->id = id_priv;
+ INIT_WORK(&work->work, cma_work_handler);
+ work->old_state = RDMA_CM_ADDRINFO_QUERY;
+ work->new_state = RDMA_CM_ADDRINFO_RESOLVED;
+ work->event.event = RDMA_CM_EVENT_ADDRINFO_RESOLVED;
+
+ if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_ID) {
+ sr.id = cpu_to_be64(ibs->service_id);
+ mask |= IB_SA_SERVICE_REC_SERVICE_ID;
+ }
+ if (ibs->flags & RDMA_USER_CM_IB_SERVICE_FLAG_NAME) {
+ strscpy(sr.name, ibs->service_name, sizeof(sr.name));
+ mask |= IB_SA_SERVICE_REC_SERVICE_NAME;
+ }
+
+ id_priv->query_id = ib_sa_service_rec_get(&sa_client,
+ id_priv->id.device,
+ id_priv->id.port_num,
+ &sr, mask,
+ 2000, GFP_KERNEL,
+ cma_query_ib_service_handler,
+ work, &id_priv->query);
+
+ if (id_priv->query_id < 0) {
+ cma_id_put(id_priv);
+ kfree(work);
+ return id_priv->query_id;
+ }
+
+ return 0;
+}
+
+int rdma_resolve_ib_service(struct rdma_cm_id *id,
+ struct rdma_ucm_ib_service *ibs)
+{
+ struct rdma_id_private *id_priv;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ if (!id_priv->cma_dev ||
+ !cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDRINFO_QUERY))
+ return -EINVAL;
+
+ if (rdma_cap_ib_sa(id->device, id->port_num))
+ ret = cma_resolve_ib_service(id_priv, ibs);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ cma_comp_exch(id_priv, RDMA_CM_ADDRINFO_QUERY, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_resolve_ib_service);
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index b7354c94cf1b..c604b601f4d9 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -47,7 +47,9 @@ enum rdma_cm_state {
RDMA_CM_ADDR_BOUND,
RDMA_CM_LISTEN,
RDMA_CM_DEVICE_REMOVAL,
- RDMA_CM_DESTROYING
+ RDMA_CM_DESTROYING,
+ RDMA_CM_ADDRINFO_QUERY,
+ RDMA_CM_ADDRINFO_RESOLVED
};
struct rdma_id_private {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 3145cb34a1d2..13e8a1714bbd 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1543,7 +1543,7 @@ static void __ib_unregister_device(struct ib_device *ib_dev)
/*
* We have a registration lock so that all the calls to unregister are
- * fully fenced, once any unregister returns the device is truely
+ * fully fenced, once any unregister returns the device is truly
* unregistered even if multiple callers are unregistering it at the
* same time. This also interacts with the registration flow and
* provides sane semantics if register and unregister are racing.
@@ -3021,7 +3021,7 @@ static int __init ib_core_init(void)
{
int ret = -ENOMEM;
- ib_wq = alloc_workqueue("infiniband", 0, 0);
+ ib_wq = alloc_workqueue("infiniband", WQ_PERCPU, 0);
if (!ib_wq)
return -ENOMEM;
@@ -3031,7 +3031,7 @@ static int __init ib_core_init(void)
goto err;
ib_comp_wq = alloc_workqueue("ib-comp-wq",
- WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS | WQ_PERCPU, 0);
if (!ib_comp_wq)
goto err_unbound;
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index a7de6f403fca..b097cfcade1c 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -175,7 +175,7 @@ void rdma_restrack_new(struct rdma_restrack_entry *res,
EXPORT_SYMBOL(rdma_restrack_new);
/**
- * rdma_restrack_add() - add object to the reource tracking database
+ * rdma_restrack_add() - add object to the resource tracking database
* @res: resource entry
*/
void rdma_restrack_add(struct rdma_restrack_entry *res)
@@ -277,7 +277,7 @@ int rdma_restrack_put(struct rdma_restrack_entry *res)
EXPORT_SYMBOL(rdma_restrack_put);
/**
- * rdma_restrack_del() - delete object from the reource tracking database
+ * rdma_restrack_del() - delete object from the resource tracking database
* @res: resource entry
*/
void rdma_restrack_del(struct rdma_restrack_entry *res)
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 53571e6b3162..c23e9c847314 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -107,6 +107,8 @@ struct ib_sa_device {
struct ib_sa_query {
void (*callback)(struct ib_sa_query *sa_query, int status,
struct ib_sa_mad *mad);
+ void (*rmpp_callback)(struct ib_sa_query *sa_query, int status,
+ struct ib_mad_recv_wc *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
@@ -150,6 +152,13 @@ struct ib_sa_mcmember_query {
struct ib_sa_query sa_query;
};
+struct ib_sa_service_query {
+ void (*callback)(int status, struct sa_service_rec *rec,
+ unsigned int num_services, void *context);
+ void *context;
+ struct ib_sa_query sa_query;
+};
+
static LIST_HEAD(ib_nl_request_list);
static DEFINE_SPINLOCK(ib_nl_request_lock);
static atomic_t ib_nl_sa_request_seq;
@@ -684,6 +693,58 @@ static const struct ib_field guidinfo_rec_table[] = {
.size_bits = 512 },
};
+#define SERVICE_REC_FIELD(field) \
+ .struct_offset_bytes = offsetof(struct sa_service_rec, field), \
+ .struct_size_bytes = sizeof_field(struct sa_service_rec, field), \
+ .field_name = "sa_service_rec:" #field
+
+static const struct ib_field service_rec_table[] = {
+ { SERVICE_REC_FIELD(id),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 64 },
+ { SERVICE_REC_FIELD(gid),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(pkey),
+ .offset_words = 6,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { RESERVED,
+ .offset_words = 6,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { SERVICE_REC_FIELD(lease),
+ .offset_words = 7,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { SERVICE_REC_FIELD(key),
+ .offset_words = 8,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(name),
+ .offset_words = 12,
+ .offset_bits = 0,
+ .size_bits = 512 },
+ { SERVICE_REC_FIELD(data_8),
+ .offset_words = 28,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_16),
+ .offset_words = 32,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_32),
+ .offset_words = 36,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { SERVICE_REC_FIELD(data_64),
+ .offset_words = 40,
+ .offset_bits = 0,
+ .size_bits = 128 },
+};
+
#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
@@ -1013,6 +1074,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+
delta = timeout - sa_local_svc_timeout_ms;
if (delta < 0)
abs_delta = -delta;
@@ -1020,7 +1083,6 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
abs_delta = delta;
if (delta != 0) {
- spin_lock_irqsave(&ib_nl_request_lock, flags);
sa_local_svc_timeout_ms = timeout;
list_for_each_entry(query, &ib_nl_request_list, list) {
if (delta < 0 && abs_delta > query->timeout)
@@ -1038,9 +1100,10 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
if (delay)
mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
(unsigned long)delay);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
+
settimeout_out:
return 0;
}
@@ -1390,6 +1453,20 @@ void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute)
}
EXPORT_SYMBOL(ib_sa_pack_path);
+void ib_sa_pack_service(struct sa_service_rec *rec, void *attribute)
+{
+ ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), rec,
+ attribute);
+}
+EXPORT_SYMBOL(ib_sa_pack_service);
+
+void ib_sa_unpack_service(void *attribute, struct sa_service_rec *rec)
+{
+ ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), attribute,
+ rec);
+}
+EXPORT_SYMBOL(ib_sa_unpack_service);
+
static bool ib_sa_opa_pathrecord_support(struct ib_sa_client *client,
struct ib_sa_device *sa_dev,
u32 port_num)
@@ -1479,6 +1556,68 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
}
}
+#define IB_SA_DATA_OFFS 56
+#define IB_SERVICE_REC_SZ 176
+
+static void ib_unpack_service_rmpp(struct sa_service_rec *rec,
+ struct ib_mad_recv_wc *mad_wc,
+ int num_services)
+{
+ unsigned int cp_sz, data_i, data_size, rec_i = 0, buf_i = 0;
+ struct ib_mad_recv_buf *mad_buf;
+ u8 buf[IB_SERVICE_REC_SZ];
+ u8 *data;
+
+ data_size = sizeof(((struct ib_sa_mad *) mad_buf->mad)->data);
+
+ list_for_each_entry(mad_buf, &mad_wc->rmpp_list, list) {
+ data = ((struct ib_sa_mad *) mad_buf->mad)->data;
+ data_i = 0;
+ while (data_i < data_size && rec_i < num_services) {
+ cp_sz = min(IB_SERVICE_REC_SZ - buf_i,
+ data_size - data_i);
+ memcpy(buf + buf_i, data + data_i, cp_sz);
+ data_i += cp_sz;
+ buf_i += cp_sz;
+ if (buf_i == IB_SERVICE_REC_SZ) {
+ ib_sa_unpack_service(buf, rec + rec_i);
+ buf_i = 0;
+ rec_i++;
+ }
+ }
+ }
+}
+
+static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, int status,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct ib_sa_service_query *query =
+ container_of(sa_query, struct ib_sa_service_query, sa_query);
+ struct sa_service_rec *rec;
+ int num_services;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
+
+ num_services = (mad_wc->mad_len - IB_SA_DATA_OFFS) / IB_SERVICE_REC_SZ;
+ if (!num_services) {
+ query->callback(-ENODATA, NULL, 0, query->context);
+ return;
+ }
+
+ rec = kmalloc_array(num_services, sizeof(*rec), GFP_KERNEL);
+ if (!rec) {
+ query->callback(-ENOMEM, NULL, 0, query->context);
+ return;
+ }
+
+ ib_unpack_service_rmpp(rec, mad_wc, num_services);
+ query->callback(status, rec, num_services, query->context);
+ kfree(rec);
+}
+
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{
struct ib_sa_path_query *query =
@@ -1488,6 +1627,14 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
kfree(query);
}
+static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
+{
+ struct ib_sa_service_query *query =
+ container_of(sa_query, struct ib_sa_service_query, sa_query);
+
+ kfree(query);
+}
+
/**
* ib_sa_path_rec_get - Start a Path get query
* @client:SA client
@@ -1618,6 +1765,101 @@ err1:
}
EXPORT_SYMBOL(ib_sa_path_rec_get);
+/**
+ * ib_sa_service_rec_get - Start a Service get query
+ * @client: SA client
+ * @device: device to send query on
+ * @port_num: port number to send query on
+ * @rec: Service Record to send in query
+ * @comp_mask: component mask to send in query
+ * @timeout_ms: time to wait for response
+ * @gfp_mask: GFP mask to use for internal allocations
+ * @callback: function called when query completes, times out or is
+ * canceled
+ * @context: opaque user context passed to callback
+ * @sa_query: query context, used to cancel query
+ *
+ * Send a Service Record Get query to the SA to look up a path. The
+ * callback function will be called when the query completes (or
+ * fails); status is 0 for a successful response, -EINTR if the query
+ * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
+ * occurred sending the query. The resp parameter of the callback is
+ * only valid if status is 0.
+ *
+ * If the return value of ib_sa_service_rec_get() is negative, it is an
+ * error code. Otherwise it is a query ID that can be used to cancel
+ * the query.
+ */
+int ib_sa_service_rec_get(struct ib_sa_client *client,
+ struct ib_device *device, u32 port_num,
+ struct sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct sa_service_rec *resp,
+ unsigned int num_services,
+ void *context),
+ void *context, struct ib_sa_query **sa_query)
+{
+ struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_service_query *query;
+ struct ib_mad_agent *agent;
+ struct ib_sa_port *port;
+ struct ib_sa_mad *mad;
+ int ret;
+
+ if (!sa_dev)
+ return -ENODEV;
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ agent = port->agent;
+
+ query = kzalloc(sizeof(*query), gfp_mask);
+ if (!query)
+ return -ENOMEM;
+
+ query->sa_query.port = port;
+
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
+ goto err1;
+
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
+
+ mad = query->sa_query.mad_buf->mad;
+ init_mad(&query->sa_query, agent);
+
+ query->sa_query.rmpp_callback = callback ? ib_sa_service_rec_callback :
+ NULL;
+ query->sa_query.release = ib_sa_service_rec_release;
+ mad->mad_hdr.method = IB_MGMT_METHOD_GET_TABLE;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
+ mad->sa_hdr.comp_mask = comp_mask;
+
+ ib_sa_pack_service(rec, mad->data);
+
+ *sa_query = &query->sa_query;
+ query->sa_query.mad_buf->context[1] = rec;
+
+ ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ *sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
+ free_mad(&query->sa_query);
+err1:
+ kfree(query);
+ return ret;
+}
+EXPORT_SYMBOL(ib_sa_service_rec_get);
+
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
int status, struct ib_sa_mad *mad)
{
@@ -1987,23 +2229,29 @@ static void send_handler(struct ib_mad_agent *agent,
{
struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
unsigned long flags;
+ int status = 0;
- if (query->callback)
+ if (query->callback || query->rmpp_callback) {
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, NULL);
+ status = -ETIMEDOUT;
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, NULL);
+ status = -EINTR;
break;
default:
- query->callback(query, -EIO, NULL);
+ status = -EIO;
break;
}
+ if (status)
+ query->callback ? query->callback(query, status, NULL) :
+ query->rmpp_callback(query, status, NULL);
+ }
+
xa_lock_irqsave(&queries, flags);
__xa_erase(&queries, query->id);
xa_unlock_irqrestore(&queries, flags);
@@ -2019,17 +2267,25 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_sa_query *query;
+ struct ib_mad *mad;
+
if (!send_buf)
return;
query = send_buf->context[0];
- if (query->callback) {
+ mad = mad_recv_wc->recv_buf.mad;
+
+ if (query->rmpp_callback) {
+ if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
+ query->rmpp_callback(query, mad->mad_hdr.status ?
+ -EINVAL : 0, mad_recv_wc);
+ else
+ query->rmpp_callback(query, -EIO, NULL);
+ } else if (query->callback) {
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
- query->callback(query,
- mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0,
- (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
+ query->callback(query, mad->mad_hdr.status ?
+ -EINVAL : 0, (struct ib_sa_mad *)mad);
else
query->callback(query, -EIO, NULL);
}
@@ -2181,8 +2437,9 @@ static int ib_sa_add_one(struct ib_device *device)
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
- NULL, 0, send_handler,
- recv_handler, sa_dev, 0);
+ NULL, IB_MGMT_RMPP_VERSION,
+ send_handler, recv_handler,
+ sa_dev, 0);
if (IS_ERR(sa_dev->port[i].agent)) {
ret = PTR_ERR(sa_dev->port[i].agent);
goto err;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 6e700b974033..ec3be65a2b88 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -282,6 +282,10 @@ static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
}
uevent->resp.event = event->event;
uevent->resp.status = event->status;
+
+ if (event->event == RDMA_CM_EVENT_ADDRINFO_RESOLVED)
+ goto out;
+
if (ctx->cm_id->qp_type == IB_QPT_UD)
ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
&event->param.ud);
@@ -289,6 +293,7 @@ static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
ucma_copy_conn_event(&uevent->resp.param.conn,
&event->param.conn);
+out:
uevent->resp.ece.vendor_id = event->ece.vendor_id;
uevent->resp.ece.attr_mod = event->ece.attr_mod;
return uevent;
@@ -361,7 +366,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id,
if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
xa_lock(&ctx_table);
if (xa_load(&ctx_table, ctx->id) == ctx)
- queue_work(system_unbound_wq, &ctx->close_work);
+ queue_work(system_dfl_wq, &ctx->close_work);
xa_unlock(&ctx_table);
}
return 0;
@@ -728,6 +733,28 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
return ret;
}
+static ssize_t ucma_resolve_ib_service(struct ucma_file *file,
+ const char __user *inbuf, int in_len,
+ int out_len)
+{
+ struct rdma_ucm_resolve_ib_service cmd;
+ struct ucma_context *ctx;
+ int ret;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_ib_service(ctx->cm_id, &cmd.ibs);
+ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
static ssize_t ucma_resolve_route(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -994,6 +1021,43 @@ static ssize_t ucma_query_gid(struct ucma_context *ctx,
return ret;
}
+static ssize_t ucma_query_ib_service(struct ucma_context *ctx,
+ void __user *response, int out_len)
+{
+ struct rdma_ucm_query_ib_service_resp *resp;
+ int n, ret = 0;
+
+ if (out_len < sizeof(struct rdma_ucm_query_ib_service_resp))
+ return -ENOSPC;
+
+ if (!ctx->cm_id->route.service_recs)
+ return -ENODATA;
+
+ resp = kzalloc(out_len, GFP_KERNEL);
+ if (!resp)
+ return -ENOMEM;
+
+ resp->num_service_recs = ctx->cm_id->route.num_service_recs;
+
+ n = (out_len - sizeof(struct rdma_ucm_query_ib_service_resp)) /
+ sizeof(struct ib_user_service_rec);
+
+ if (!n)
+ goto out;
+
+ if (n > ctx->cm_id->route.num_service_recs)
+ n = ctx->cm_id->route.num_service_recs;
+
+ memcpy(resp->recs, ctx->cm_id->route.service_recs,
+ sizeof(*resp->recs) * n);
+ if (copy_to_user(response, resp, struct_size(resp, recs, n)))
+ ret = -EFAULT;
+
+out:
+ kfree(resp);
+ return ret;
+}
+
static ssize_t ucma_query(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len)
@@ -1022,6 +1086,9 @@ static ssize_t ucma_query(struct ucma_file *file,
case RDMA_USER_CM_QUERY_GID:
ret = ucma_query_gid(ctx, response, out_len);
break;
+ case RDMA_USER_CM_QUERY_IB_SERVICE:
+ ret = ucma_query_ib_service(ctx, response, out_len);
+ break;
default:
ret = -ENOSYS;
break;
@@ -1678,6 +1745,55 @@ err_unlock:
return ret;
}
+static ssize_t ucma_write_cm_event(struct ucma_file *file,
+ const char __user *inbuf, int in_len,
+ int out_len)
+{
+ struct rdma_ucm_write_cm_event cmd;
+ struct rdma_cm_event event = {};
+ struct ucma_event *uevent;
+ struct ucma_context *ctx;
+ int ret = 0;
+
+ if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
+ return -EFAULT;
+
+ if ((cmd.event != RDMA_CM_EVENT_USER) &&
+ (cmd.event != RDMA_CM_EVENT_INTERNAL))
+ return -EINVAL;
+
+ ctx = ucma_get_ctx(file, cmd.id);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ event.event = cmd.event;
+ event.status = cmd.status;
+ event.param.arg = cmd.param.arg;
+
+ uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
+ if (!uevent) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ uevent->ctx = ctx;
+ uevent->resp.uid = ctx->uid;
+ uevent->resp.id = ctx->id;
+ uevent->resp.event = event.event;
+ uevent->resp.status = event.status;
+ memcpy(uevent->resp.param.arg32, &event.param.arg,
+ sizeof(event.param.arg));
+
+ mutex_lock(&ctx->file->mut);
+ list_add_tail(&uevent->list, &ctx->file->event_list);
+ mutex_unlock(&ctx->file->mut);
+ wake_up_interruptible(&ctx->file->poll_wait);
+
+out:
+ ucma_put_ctx(ctx);
+ return ret;
+}
+
static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
const char __user *inbuf,
int in_len, int out_len) = {
@@ -1703,7 +1819,9 @@ static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
[RDMA_USER_CM_CMD_QUERY] = ucma_query,
[RDMA_USER_CM_CMD_BIND] = ucma_bind,
[RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
- [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
+ [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
+ [RDMA_USER_CM_CMD_RESOLVE_IB_SERVICE] = ucma_resolve_ib_service,
+ [RDMA_USER_CM_CMD_WRITE_CM_EVENT] = ucma_write_cm_event,
};
static ssize_t ucma_write(struct file *filp, const char __user *buf,
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index c5b686394760..8137031c2a65 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -45,6 +45,8 @@
#include "uverbs.h"
+#define RESCHED_LOOP_CNT_THRESHOLD 0x1000
+
static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
{
bool make_dirty = umem->writable && dirty;
@@ -55,10 +57,14 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
DMA_BIDIRECTIONAL, 0);
- for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i)
+ for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) {
unpin_user_page_range_dirty_lock(sg_page(sg),
DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
+ if (i && !(i % RESCHED_LOOP_CNT_THRESHOLD))
+ cond_resched();
+ }
+
sg_free_append_table(&umem->sgt_append);
}
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index 37cd37556510..fab5d914029d 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -206,6 +206,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
return ret;
err_free:
+ ib_umem_release(umem);
rdma_restrack_put(&cq->res);
kfree(cq);
err_event_file:
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3a5f81402d2f..11b1a194de44 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -148,6 +148,7 @@ __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
case IB_RATE_400_GBPS: return 160;
case IB_RATE_600_GBPS: return 240;
case IB_RATE_800_GBPS: return 320;
+ case IB_RATE_1600_GBPS: return 640;
default: return -1;
}
}
@@ -178,6 +179,7 @@ __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
case 160: return IB_RATE_400_GBPS;
case 240: return IB_RATE_600_GBPS;
case 320: return IB_RATE_800_GBPS;
+ case 640: return IB_RATE_1600_GBPS;
default: return IB_RATE_PORT_CURRENT;
}
}
@@ -208,6 +210,7 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
case IB_RATE_400_GBPS: return 425000;
case IB_RATE_600_GBPS: return 637500;
case IB_RATE_800_GBPS: return 850000;
+ case IB_RATE_1600_GBPS: return 1700000;
default: return -1;
}
}
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index df61b2299ec0..c42b22ac3303 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -13,4 +13,6 @@ obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns/
obj-$(CONFIG_INFINIBAND_QEDR) += qedr/
obj-$(CONFIG_INFINIBAND_BNXT_RE) += bnxt_re/
+obj-$(CONFIG_INFINIBAND_BNG_RE) += bng_re/
obj-$(CONFIG_INFINIBAND_ERDMA) += erdma/
+obj-$(CONFIG_INFINIBAND_IONIC) += ionic/
diff --git a/drivers/infiniband/hw/bng_re/Kconfig b/drivers/infiniband/hw/bng_re/Kconfig
new file mode 100644
index 000000000000..85845f72c64d
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config INFINIBAND_BNG_RE
+ tristate "Broadcom Next generation RoCE HCA support"
+ depends on 64BIT
+ depends on INET && DCB && BNGE
+ help
+ This driver supports Broadcom Next generation
+ 50/100/200/400/800 gigabit RoCE HCAs. The module
+ will be called bng_re. To compile this driver
+ as a module, choose M here.
diff --git a/drivers/infiniband/hw/bng_re/Makefile b/drivers/infiniband/hw/bng_re/Makefile
new file mode 100644
index 000000000000..c6aaaf853c77
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y := -I $(srctree)/drivers/net/ethernet/broadcom/bnge -I $(srctree)/drivers/infiniband/hw/bnxt_re
+
+obj-$(CONFIG_INFINIBAND_BNG_RE) += bng_re.o
+
+bng_re-y := bng_dev.o bng_fw.o \
+ bng_res.o bng_sp.o \
+ bng_debugfs.o
diff --git a/drivers/infiniband/hw/bng_re/bng_debugfs.c b/drivers/infiniband/hw/bng_re/bng_debugfs.c
new file mode 100644
index 000000000000..9ec5a8785250
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_debugfs.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+#include <linux/debugfs.h>
+#include <linux/pci.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "bng_res.h"
+#include "bng_fw.h"
+#include "bnge.h"
+#include "bnge_auxr.h"
+#include "bng_re.h"
+#include "bng_debugfs.h"
+
+static struct dentry *bng_re_debugfs_root;
+
+void bng_re_debugfs_add_pdev(struct bng_re_dev *rdev)
+{
+ struct pci_dev *pdev = rdev->aux_dev->pdev;
+
+ rdev->dbg_root =
+ debugfs_create_dir(dev_name(&pdev->dev), bng_re_debugfs_root);
+}
+
+void bng_re_debugfs_rem_pdev(struct bng_re_dev *rdev)
+{
+ debugfs_remove_recursive(rdev->dbg_root);
+ rdev->dbg_root = NULL;
+}
+
+void bng_re_register_debugfs(void)
+{
+ bng_re_debugfs_root = debugfs_create_dir("bng_re", NULL);
+}
+
+void bng_re_unregister_debugfs(void)
+{
+ debugfs_remove(bng_re_debugfs_root);
+}
diff --git a/drivers/infiniband/hw/bng_re/bng_debugfs.h b/drivers/infiniband/hw/bng_re/bng_debugfs.h
new file mode 100644
index 000000000000..baef71df4242
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_debugfs.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2025 Broadcom.
+
+#ifndef __BNG_RE_DEBUGFS__
+#define __BNG_RE_DEBUGFS__
+
+void bng_re_debugfs_add_pdev(struct bng_re_dev *rdev);
+void bng_re_debugfs_rem_pdev(struct bng_re_dev *rdev);
+
+void bng_re_register_debugfs(void);
+void bng_re_unregister_debugfs(void);
+#endif
diff --git a/drivers/infiniband/hw/bng_re/bng_dev.c b/drivers/infiniband/hw/bng_re/bng_dev.c
new file mode 100644
index 000000000000..d8f8d7f7075f
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_dev.c
@@ -0,0 +1,534 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/auxiliary_bus.h>
+
+#include <rdma/ib_verbs.h>
+
+#include "bng_res.h"
+#include "bng_sp.h"
+#include "bng_fw.h"
+#include "bnge.h"
+#include "bnge_auxr.h"
+#include "bng_re.h"
+#include "bnge_hwrm.h"
+#include "bng_debugfs.h"
+
+MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@broadcom.com>");
+MODULE_DESCRIPTION(BNG_RE_DESC);
+MODULE_LICENSE("Dual BSD/GPL");
+
+static struct bng_re_dev *bng_re_dev_add(struct auxiliary_device *adev,
+ struct bnge_auxr_dev *aux_dev)
+{
+ struct bng_re_dev *rdev;
+
+ /* Allocate bng_re_dev instance */
+ rdev = ib_alloc_device(bng_re_dev, ibdev);
+ if (!rdev) {
+ pr_err("%s: bng_re_dev allocation failure!", KBUILD_MODNAME);
+ return NULL;
+ }
+
+ /* Assign auxiliary device specific data */
+ rdev->netdev = aux_dev->net;
+ rdev->aux_dev = aux_dev;
+ rdev->adev = adev;
+ rdev->fn_id = rdev->aux_dev->pdev->devfn;
+
+ return rdev;
+}
+
+
+static int bng_re_register_netdev(struct bng_re_dev *rdev)
+{
+ struct bnge_auxr_dev *aux_dev;
+
+ aux_dev = rdev->aux_dev;
+ return bnge_register_dev(aux_dev, rdev->adev);
+}
+
+static void bng_re_destroy_chip_ctx(struct bng_re_dev *rdev)
+{
+ struct bng_re_chip_ctx *chip_ctx;
+
+ if (!rdev->chip_ctx)
+ return;
+
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+
+ chip_ctx = rdev->chip_ctx;
+ rdev->chip_ctx = NULL;
+ rdev->rcfw.res = NULL;
+ rdev->bng_res.cctx = NULL;
+ rdev->bng_res.pdev = NULL;
+ kfree(chip_ctx);
+}
+
+static int bng_re_setup_chip_ctx(struct bng_re_dev *rdev)
+{
+ struct bng_re_chip_ctx *chip_ctx;
+ struct bnge_auxr_dev *aux_dev;
+ int rc = -ENOMEM;
+
+ aux_dev = rdev->aux_dev;
+ rdev->bng_res.pdev = aux_dev->pdev;
+ rdev->rcfw.res = &rdev->bng_res;
+ chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
+ if (!chip_ctx)
+ return -ENOMEM;
+ chip_ctx->chip_num = aux_dev->chip_num;
+ chip_ctx->hw_stats_size = aux_dev->hw_ring_stats_size;
+
+ rdev->chip_ctx = chip_ctx;
+ rdev->bng_res.cctx = rdev->chip_ctx;
+ rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
+ if (!rdev->dev_attr)
+ goto free_chip_ctx;
+ rdev->bng_res.dattr = rdev->dev_attr;
+
+ return 0;
+free_chip_ctx:
+ kfree(rdev->chip_ctx);
+ rdev->chip_ctx = NULL;
+ return rc;
+}
+
+static void bng_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
+{
+ hdr->req_type = cpu_to_le16(opcd);
+ hdr->cmpl_ring = cpu_to_le16(-1);
+ hdr->target_id = cpu_to_le16(-1);
+}
+
+static void bng_re_fill_fw_msg(struct bnge_fw_msg *fw_msg, void *msg,
+ int msg_len, void *resp, int resp_max_len,
+ int timeout)
+{
+ fw_msg->msg = msg;
+ fw_msg->msg_len = msg_len;
+ fw_msg->resp = resp;
+ fw_msg->resp_max_len = resp_max_len;
+ fw_msg->timeout = timeout;
+}
+
+static int bng_re_net_ring_free(struct bng_re_dev *rdev,
+ u16 fw_ring_id, int type)
+{
+ struct bnge_auxr_dev *aux_dev = rdev->aux_dev;
+ struct hwrm_ring_free_input req = {};
+ struct hwrm_ring_free_output resp;
+ struct bnge_fw_msg fw_msg = {};
+ int rc = -EINVAL;
+
+ if (!rdev)
+ return rc;
+
+ if (!aux_dev)
+ return rc;
+
+ bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE);
+ req.ring_type = type;
+ req.ring_id = cpu_to_le16(fw_ring_id);
+ bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT);
+ rc = bnge_send_msg(aux_dev, &fw_msg);
+ if (rc)
+ ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
+ req.ring_id, rc);
+ return rc;
+}
+
+static int bng_re_net_ring_alloc(struct bng_re_dev *rdev,
+ struct bng_re_ring_attr *ring_attr,
+ u16 *fw_ring_id)
+{
+ struct bnge_auxr_dev *aux_dev = rdev->aux_dev;
+ struct hwrm_ring_alloc_input req = {};
+ struct hwrm_ring_alloc_output resp;
+ struct bnge_fw_msg fw_msg = {};
+ int rc = -EINVAL;
+
+ if (!aux_dev)
+ return rc;
+
+ bng_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
+ req.enables = 0;
+ req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
+ if (ring_attr->pages > 1) {
+ /* Page size is in log2 units */
+ req.page_size = BNGE_PAGE_SHIFT;
+ req.page_tbl_depth = 1;
+ }
+ req.fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req.logical_id = cpu_to_le16(ring_attr->lrid);
+ req.length = cpu_to_le32(ring_attr->depth + 1);
+ req.ring_type = ring_attr->type;
+ req.int_mode = ring_attr->mode;
+ bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT);
+ rc = bnge_send_msg(aux_dev, &fw_msg);
+ if (!rc)
+ *fw_ring_id = le16_to_cpu(resp.ring_id);
+
+ return rc;
+}
+
+static int bng_re_stats_ctx_free(struct bng_re_dev *rdev)
+{
+ struct bnge_auxr_dev *aux_dev = rdev->aux_dev;
+ struct hwrm_stat_ctx_free_input req = {};
+ struct hwrm_stat_ctx_free_output resp = {};
+ struct bnge_fw_msg fw_msg = {};
+ int rc = -EINVAL;
+
+ if (!aux_dev)
+ return rc;
+
+ bng_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
+ req.stat_ctx_id = cpu_to_le32(rdev->stats_ctx.fw_id);
+ bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT);
+ rc = bnge_send_msg(aux_dev, &fw_msg);
+ if (rc)
+ ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
+ rc);
+
+ return rc;
+}
+
+static int bng_re_stats_ctx_alloc(struct bng_re_dev *rdev)
+{
+ struct bnge_auxr_dev *aux_dev = rdev->aux_dev;
+ struct bng_re_stats *stats = &rdev->stats_ctx;
+ struct hwrm_stat_ctx_alloc_output resp = {};
+ struct hwrm_stat_ctx_alloc_input req = {};
+ struct bnge_fw_msg fw_msg = {};
+ int rc = -EINVAL;
+
+ stats->fw_id = BNGE_INVALID_STATS_CTX_ID;
+
+ if (!aux_dev)
+ return rc;
+
+ bng_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
+ req.update_period_ms = cpu_to_le32(1000);
+ req.stats_dma_addr = cpu_to_le64(stats->dma_map);
+ req.stats_dma_length = cpu_to_le16(rdev->chip_ctx->hw_stats_size);
+ req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
+ bng_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNGE_DFLT_HWRM_CMD_TIMEOUT);
+ rc = bnge_send_msg(aux_dev, &fw_msg);
+ if (!rc)
+ stats->fw_id = le32_to_cpu(resp.stat_ctx_id);
+ return rc;
+}
+
+static void bng_re_query_hwrm_version(struct bng_re_dev *rdev)
+{
+ struct bnge_auxr_dev *aux_dev = rdev->aux_dev;
+ struct hwrm_ver_get_output ver_get_resp = {};
+ struct hwrm_ver_get_input ver_get_req = {};
+ struct bng_re_chip_ctx *cctx;
+ struct bnge_fw_msg fw_msg = {};
+ int rc;
+
+ bng_re_init_hwrm_hdr((void *)&ver_get_req, HWRM_VER_GET);
+ ver_get_req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ ver_get_req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ ver_get_req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+ bng_re_fill_fw_msg(&fw_msg, (void *)&ver_get_req, sizeof(ver_get_req),
+ (void *)&ver_get_resp, sizeof(ver_get_resp),
+ BNGE_DFLT_HWRM_CMD_TIMEOUT);
+ rc = bnge_send_msg(aux_dev, &fw_msg);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
+ rc);
+ return;
+ }
+
+ cctx = rdev->chip_ctx;
+ cctx->hwrm_intf_ver =
+ (u64)le16_to_cpu(ver_get_resp.hwrm_intf_major) << 48 |
+ (u64)le16_to_cpu(ver_get_resp.hwrm_intf_minor) << 32 |
+ (u64)le16_to_cpu(ver_get_resp.hwrm_intf_build) << 16 |
+ le16_to_cpu(ver_get_resp.hwrm_intf_patch);
+
+ cctx->hwrm_cmd_max_timeout = le16_to_cpu(ver_get_resp.max_req_timeout);
+
+ if (!cctx->hwrm_cmd_max_timeout)
+ cctx->hwrm_cmd_max_timeout = BNG_ROCE_FW_MAX_TIMEOUT;
+}
+
+static void bng_re_dev_uninit(struct bng_re_dev *rdev)
+{
+ int rc;
+ bng_re_debugfs_rem_pdev(rdev);
+
+ if (test_and_clear_bit(BNG_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
+ rc = bng_re_deinit_rcfw(&rdev->rcfw);
+ if (rc)
+ ibdev_warn(&rdev->ibdev,
+ "Failed to deinitialize RCFW: %#x", rc);
+ bng_re_stats_ctx_free(rdev);
+ bng_re_free_stats_ctx_mem(rdev->bng_res.pdev, &rdev->stats_ctx);
+ bng_re_disable_rcfw_channel(&rdev->rcfw);
+ bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id,
+ RING_ALLOC_REQ_RING_TYPE_NQ);
+ bng_re_free_rcfw_channel(&rdev->rcfw);
+ }
+
+ kfree(rdev->nqr);
+ rdev->nqr = NULL;
+ bng_re_destroy_chip_ctx(rdev);
+ if (test_and_clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+ bnge_unregister_dev(rdev->aux_dev);
+}
+
+static int bng_re_dev_init(struct bng_re_dev *rdev)
+{
+ struct bng_re_ring_attr rattr = {};
+ struct bng_re_creq_ctx *creq;
+ u32 db_offt;
+ int vid;
+ u8 type;
+ int rc;
+
+ /* Registered a new RoCE device instance to netdev */
+ rc = bng_re_register_netdev(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to register with netedev: %#x\n", rc);
+ return -EINVAL;
+ }
+
+ set_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+
+ if (rdev->aux_dev->auxr_info->msix_requested < BNG_RE_MIN_MSIX) {
+ ibdev_err(&rdev->ibdev,
+ "RoCE requires minimum 2 MSI-X vectors, but only %d reserved\n",
+ rdev->aux_dev->auxr_info->msix_requested);
+ bnge_unregister_dev(rdev->aux_dev);
+ clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ return -EINVAL;
+ }
+ ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+ rdev->aux_dev->auxr_info->msix_requested);
+
+ rc = bng_re_setup_chip_ctx(rdev);
+ if (rc) {
+ bnge_unregister_dev(rdev->aux_dev);
+ clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ ibdev_err(&rdev->ibdev, "Failed to get chip context\n");
+ return -EINVAL;
+ }
+
+ bng_re_query_hwrm_version(rdev);
+
+ rc = bng_re_alloc_fw_channel(&rdev->bng_res, &rdev->rcfw);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate RCFW Channel: %#x\n", rc);
+ goto fail;
+ }
+
+ /* Allocate nq record memory */
+ rdev->nqr = kzalloc(sizeof(*rdev->nqr), GFP_KERNEL);
+ if (!rdev->nqr) {
+ bng_re_destroy_chip_ctx(rdev);
+ bnge_unregister_dev(rdev->aux_dev);
+ clear_bit(BNG_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ return -ENOMEM;
+ }
+
+ rdev->nqr->num_msix = rdev->aux_dev->auxr_info->msix_requested;
+ memcpy(rdev->nqr->msix_entries, rdev->aux_dev->msix_info,
+ sizeof(struct bnge_msix_info) * rdev->nqr->num_msix);
+
+ type = RING_ALLOC_REQ_RING_TYPE_NQ;
+ creq = &rdev->rcfw.creq;
+ rattr.dma_arr = creq->hwq.pbl[BNG_PBL_LVL_0].pg_map_arr;
+ rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
+ rattr.type = type;
+ rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
+ rattr.depth = BNG_FW_CREQE_MAX_CNT - 1;
+ rattr.lrid = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].ring_idx;
+ rc = bng_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
+ goto free_rcfw;
+ }
+ db_offt = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].db_offset;
+ vid = rdev->nqr->msix_entries[BNG_RE_CREQ_NQ_IDX].vector;
+
+ rc = bng_re_enable_fw_channel(&rdev->rcfw,
+ vid, db_offt);
+ if (rc) {
+ ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
+ rc);
+ goto free_ring;
+ }
+
+ rc = bng_re_get_dev_attr(&rdev->rcfw);
+ if (rc)
+ goto disable_rcfw;
+
+ bng_re_debugfs_add_pdev(rdev);
+ rc = bng_re_alloc_stats_ctx_mem(rdev->bng_res.pdev, rdev->chip_ctx,
+ &rdev->stats_ctx);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate stats context: %#x\n", rc);
+ goto disable_rcfw;
+ }
+
+ rc = bng_re_stats_ctx_alloc(rdev);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate QPLIB context: %#x\n", rc);
+ goto free_stats_ctx;
+ }
+
+ rc = bng_re_init_rcfw(&rdev->rcfw, &rdev->stats_ctx);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to initialize RCFW: %#x\n", rc);
+ goto free_sctx;
+ }
+ set_bit(BNG_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
+
+ return 0;
+free_sctx:
+ bng_re_stats_ctx_free(rdev);
+free_stats_ctx:
+ bng_re_free_stats_ctx_mem(rdev->bng_res.pdev, &rdev->stats_ctx);
+disable_rcfw:
+ bng_re_disable_rcfw_channel(&rdev->rcfw);
+free_ring:
+ bng_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
+free_rcfw:
+ bng_re_free_rcfw_channel(&rdev->rcfw);
+fail:
+ bng_re_dev_uninit(rdev);
+ return rc;
+}
+
+static int bng_re_add_device(struct auxiliary_device *adev)
+{
+ struct bnge_auxr_priv *auxr_priv =
+ container_of(adev, struct bnge_auxr_priv, aux_dev);
+ struct bng_re_en_dev_info *dev_info;
+ struct bng_re_dev *rdev;
+ int rc;
+
+ dev_info = auxiliary_get_drvdata(adev);
+
+ rdev = bng_re_dev_add(adev, auxr_priv->auxr_dev);
+ if (!rdev) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ dev_info->rdev = rdev;
+
+ rc = bng_re_dev_init(rdev);
+ if (rc)
+ goto re_dev_dealloc;
+
+ return 0;
+
+re_dev_dealloc:
+ ib_dealloc_device(&rdev->ibdev);
+exit:
+ return rc;
+}
+
+
+static void bng_re_remove_device(struct bng_re_dev *rdev,
+ struct auxiliary_device *aux_dev)
+{
+ bng_re_dev_uninit(rdev);
+ ib_dealloc_device(&rdev->ibdev);
+}
+
+
+static int bng_re_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct bnge_auxr_priv *aux_priv =
+ container_of(adev, struct bnge_auxr_priv, aux_dev);
+ struct bng_re_en_dev_info *en_info;
+ int rc;
+
+ en_info = kzalloc(sizeof(*en_info), GFP_KERNEL);
+ if (!en_info)
+ return -ENOMEM;
+
+ en_info->auxr_dev = aux_priv->auxr_dev;
+
+ auxiliary_set_drvdata(adev, en_info);
+
+ rc = bng_re_add_device(adev);
+ if (rc)
+ kfree(en_info);
+
+ return rc;
+}
+
+static void bng_re_remove(struct auxiliary_device *adev)
+{
+ struct bng_re_en_dev_info *dev_info = auxiliary_get_drvdata(adev);
+ struct bng_re_dev *rdev;
+
+ rdev = dev_info->rdev;
+
+ if (rdev)
+ bng_re_remove_device(rdev, adev);
+ kfree(dev_info);
+}
+
+static const struct auxiliary_device_id bng_re_id_table[] = {
+ { .name = BNG_RE_ADEV_NAME ".rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, bng_re_id_table);
+
+static struct auxiliary_driver bng_re_driver = {
+ .name = "rdma",
+ .probe = bng_re_probe,
+ .remove = bng_re_remove,
+ .id_table = bng_re_id_table,
+};
+
+static int __init bng_re_mod_init(void)
+{
+ int rc;
+
+
+ bng_re_register_debugfs();
+
+ rc = auxiliary_driver_register(&bng_re_driver);
+ if (rc) {
+ pr_err("%s: Failed to register auxiliary driver\n",
+ KBUILD_MODNAME);
+ goto unreg_debugfs;
+ }
+ return 0;
+unreg_debugfs:
+ bng_re_unregister_debugfs();
+ return rc;
+}
+
+static void __exit bng_re_mod_exit(void)
+{
+ auxiliary_driver_unregister(&bng_re_driver);
+ bng_re_unregister_debugfs();
+}
+
+module_init(bng_re_mod_init);
+module_exit(bng_re_mod_exit);
diff --git a/drivers/infiniband/hw/bng_re/bng_fw.c b/drivers/infiniband/hw/bng_re/bng_fw.c
new file mode 100644
index 000000000000..7d9539113cf5
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_fw.c
@@ -0,0 +1,767 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+#include <linux/pci.h>
+
+#include "roce_hsi.h"
+#include "bng_res.h"
+#include "bng_fw.h"
+#include "bng_sp.h"
+
+/**
+ * bng_re_map_rc - map return type based on opcode
+ * @opcode: roce slow path opcode
+ *
+ * case #1
+ * Firmware initiated error recovery is a safe state machine and
+ * driver can consider all the underlying rdma resources are free.
+ * In this state, it is safe to return success for opcodes related to
+ * destroying rdma resources (like destroy qp, destroy cq etc.).
+ *
+ * case #2
+ * If driver detect potential firmware stall, it is not safe state machine
+ * and the driver can not consider all the underlying rdma resources are
+ * freed.
+ * In this state, it is not safe to return success for opcodes related to
+ * destroying rdma resources (like destroy qp, destroy cq etc.).
+ *
+ * Scope of this helper function is only for case #1.
+ *
+ * Returns:
+ * 0 to communicate success to caller.
+ * Non zero error code to communicate failure to caller.
+ */
+static int bng_re_map_rc(u8 opcode)
+{
+ switch (opcode) {
+ case CMDQ_BASE_OPCODE_DESTROY_QP:
+ case CMDQ_BASE_OPCODE_DESTROY_SRQ:
+ case CMDQ_BASE_OPCODE_DESTROY_CQ:
+ case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:
+ case CMDQ_BASE_OPCODE_DEREGISTER_MR:
+ case CMDQ_BASE_OPCODE_DELETE_GID:
+ case CMDQ_BASE_OPCODE_DESTROY_QP1:
+ case CMDQ_BASE_OPCODE_DESTROY_AH:
+ case CMDQ_BASE_OPCODE_DEINITIALIZE_FW:
+ case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC:
+ case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE:
+ return 0;
+ default:
+ return -ETIMEDOUT;
+ }
+}
+
+void bng_re_free_rcfw_channel(struct bng_re_rcfw *rcfw)
+{
+ kfree(rcfw->crsqe_tbl);
+ bng_re_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
+ bng_re_free_hwq(rcfw->res, &rcfw->creq.hwq);
+ rcfw->pdev = NULL;
+}
+
+int bng_re_alloc_fw_channel(struct bng_re_res *res,
+ struct bng_re_rcfw *rcfw)
+{
+ struct bng_re_hwq_attr hwq_attr = {};
+ struct bng_re_sg_info sginfo = {};
+ struct bng_re_cmdq_ctx *cmdq;
+ struct bng_re_creq_ctx *creq;
+
+ rcfw->pdev = res->pdev;
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ rcfw->res = res;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = rcfw->res;
+ hwq_attr.depth = BNG_FW_CREQE_MAX_CNT;
+ hwq_attr.stride = BNG_FW_CREQE_UNITS;
+ hwq_attr.type = BNG_HWQ_TYPE_QUEUE;
+
+ if (bng_re_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
+ dev_err(&rcfw->pdev->dev,
+ "HW channel CREQ allocation failed\n");
+ goto fail;
+ }
+
+ rcfw->cmdq_depth = BNG_FW_CMDQE_MAX_CNT;
+
+ sginfo.pgsize = bng_fw_cmdqe_page_size(rcfw->cmdq_depth);
+ hwq_attr.depth = rcfw->cmdq_depth & 0x7FFFFFFF;
+ hwq_attr.stride = BNG_FW_CMDQE_UNITS;
+ hwq_attr.type = BNG_HWQ_TYPE_CTX;
+ if (bng_re_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
+ dev_err(&rcfw->pdev->dev,
+ "HW channel CMDQ allocation failed\n");
+ goto fail;
+ }
+
+ rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
+ sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
+ if (!rcfw->crsqe_tbl)
+ goto fail;
+
+ spin_lock_init(&rcfw->tbl_lock);
+
+ rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
+ return 0;
+
+fail:
+ bng_re_free_rcfw_channel(rcfw);
+ return -ENOMEM;
+}
+
+static int bng_re_process_qp_event(struct bng_re_rcfw *rcfw,
+ struct creq_qp_event *qp_event,
+ u32 *num_wait)
+{
+ struct bng_re_hwq *hwq = &rcfw->cmdq.hwq;
+ struct bng_re_crsqe *crsqe;
+ u32 req_size;
+ u16 cookie;
+ bool is_waiter_alive;
+ struct pci_dev *pdev;
+ u32 wait_cmds = 0;
+ int rc = 0;
+
+ pdev = rcfw->pdev;
+ switch (qp_event->event) {
+ case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
+ dev_err(&pdev->dev, "Received QP error notification\n");
+ break;
+ default:
+ /*
+ * Command Response
+ * cmdq->lock needs to be acquired to synchronie
+ * the command send and completion reaping. This function
+ * is always called with creq->lock held. Using
+ * the nested variant of spin_lock.
+ *
+ */
+
+ spin_lock_nested(&hwq->lock, SINGLE_DEPTH_NESTING);
+ cookie = le16_to_cpu(qp_event->cookie);
+ cookie &= BNG_FW_MAX_COOKIE_VALUE;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
+ &rcfw->cmdq.flags),
+ "Unreponsive rcfw channel detected.!!")) {
+ dev_info(&pdev->dev,
+ "rcfw timedout: cookie = %#x, free_slots = %d",
+ cookie, crsqe->free_slots);
+ spin_unlock(&hwq->lock);
+ return rc;
+ }
+
+ if (crsqe->is_waiter_alive) {
+ if (crsqe->resp) {
+ memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
+ /* Insert write memory barrier to ensure that
+ * response data is copied before clearing the
+ * flags
+ */
+ smp_wmb();
+ }
+ }
+
+ wait_cmds++;
+
+ req_size = crsqe->req_size;
+ is_waiter_alive = crsqe->is_waiter_alive;
+
+ crsqe->req_size = 0;
+ if (!is_waiter_alive)
+ crsqe->resp = NULL;
+
+ crsqe->is_in_used = false;
+
+ hwq->cons += req_size;
+
+ spin_unlock(&hwq->lock);
+ }
+ *num_wait += wait_cmds;
+ return rc;
+}
+
+/* function events */
+static int bng_re_process_func_event(struct bng_re_rcfw *rcfw,
+ struct creq_func_event *func_event)
+{
+ switch (func_event->event) {
+ case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
+ case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* CREQ Completion handlers */
+static void bng_re_service_creq(struct tasklet_struct *t)
+{
+ struct bng_re_rcfw *rcfw = from_tasklet(rcfw, t, creq.creq_tasklet);
+ struct bng_re_creq_ctx *creq = &rcfw->creq;
+ u32 type, budget = BNG_FW_CREQ_ENTRY_POLL_BUDGET;
+ struct bng_re_hwq *hwq = &creq->hwq;
+ struct creq_base *creqe;
+ u32 num_wakeup = 0;
+ u32 hw_polled = 0;
+
+ /* Service the CREQ until budget is over */
+ spin_lock_bh(&hwq->lock);
+ while (budget > 0) {
+ creqe = bng_re_get_qe(hwq, hwq->cons, NULL);
+ if (!BNG_FW_CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
+ break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+
+ type = creqe->type & CREQ_BASE_TYPE_MASK;
+ switch (type) {
+ case CREQ_BASE_TYPE_QP_EVENT:
+ bng_re_process_qp_event
+ (rcfw, (struct creq_qp_event *)creqe,
+ &num_wakeup);
+ creq->stats.creq_qp_event_processed++;
+ break;
+ case CREQ_BASE_TYPE_FUNC_EVENT:
+ if (!bng_re_process_func_event
+ (rcfw, (struct creq_func_event *)creqe))
+ creq->stats.creq_func_event_processed++;
+ else
+ dev_warn(&rcfw->pdev->dev,
+ "aeqe:%#x Not handled\n", type);
+ break;
+ default:
+ if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
+ dev_warn(&rcfw->pdev->dev,
+ "creqe with event 0x%x not handled\n",
+ type);
+ break;
+ }
+ budget--;
+ hw_polled++;
+ bng_re_hwq_incr_cons(hwq->max_elements, &hwq->cons,
+ 1, &creq->creq_db.dbinfo.flags);
+ }
+
+ if (hw_polled)
+ bng_re_ring_nq_db(&creq->creq_db.dbinfo,
+ rcfw->res->cctx, true);
+ spin_unlock_bh(&hwq->lock);
+ if (num_wakeup)
+ wake_up_nr(&rcfw->cmdq.waitq, num_wakeup);
+}
+
+static int __send_message_basic_sanity(struct bng_re_rcfw *rcfw,
+ struct bng_re_cmdqmsg *msg,
+ u8 opcode)
+{
+ struct bng_re_cmdq_ctx *cmdq;
+
+ cmdq = &rcfw->cmdq;
+
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
+ opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
+ dev_err(&rcfw->pdev->dev, "RCFW already initialized!");
+ return -EINVAL;
+ }
+
+ if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
+ (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
+ opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
+ opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
+ dev_err(&rcfw->pdev->dev,
+ "RCFW not initialized, reject opcode 0x%x",
+ opcode);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int __send_message(struct bng_re_rcfw *rcfw,
+ struct bng_re_cmdqmsg *msg, u8 opcode)
+{
+ u32 bsize, free_slots, required_slots;
+ struct bng_re_cmdq_ctx *cmdq;
+ struct bng_re_crsqe *crsqe;
+ struct bng_fw_cmdqe *cmdqe;
+ struct bng_re_hwq *hwq;
+ u32 sw_prod, cmdq_prod;
+ struct pci_dev *pdev;
+ u16 cookie;
+ u8 *preq;
+
+ cmdq = &rcfw->cmdq;
+ hwq = &cmdq->hwq;
+ pdev = rcfw->pdev;
+
+ /* Cmdq are in 16-byte units, each request can consume 1 or more
+ * cmdqe
+ */
+ spin_lock_bh(&hwq->lock);
+ required_slots = bng_re_get_cmd_slots(msg->req);
+ free_slots = HWQ_FREE_SLOTS(hwq);
+ cookie = cmdq->seq_num & BNG_FW_MAX_COOKIE_VALUE;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ if (required_slots >= free_slots) {
+ dev_info_ratelimited(&pdev->dev,
+ "CMDQ is full req/free %d/%d!",
+ required_slots, free_slots);
+ spin_unlock_bh(&hwq->lock);
+ return -EAGAIN;
+ }
+ __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
+
+ bsize = bng_re_set_cmd_slots(msg->req);
+ crsqe->free_slots = free_slots;
+ crsqe->resp = (struct creq_qp_event *)msg->resp;
+ crsqe->is_waiter_alive = true;
+ crsqe->is_in_used = true;
+ crsqe->opcode = opcode;
+
+ crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
+ if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {
+ struct bng_re_rcfw_sbuf *sbuf = msg->sb;
+
+ __set_cmdq_base_resp_addr(msg->req, msg->req_sz,
+ cpu_to_le64(sbuf->dma_addr));
+ __set_cmdq_base_resp_size(msg->req, msg->req_sz,
+ ALIGN(sbuf->size,
+ BNG_FW_CMDQE_UNITS) /
+ BNG_FW_CMDQE_UNITS);
+ }
+
+ preq = (u8 *)msg->req;
+ do {
+ /* Locate the next cmdq slot */
+ sw_prod = HWQ_CMP(hwq->prod, hwq);
+ cmdqe = bng_re_get_qe(hwq, sw_prod, NULL);
+ /* Copy a segment of the req cmd to the cmdq */
+ memset(cmdqe, 0, sizeof(*cmdqe));
+ memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
+ preq += min_t(u32, bsize, sizeof(*cmdqe));
+ bsize -= min_t(u32, bsize, sizeof(*cmdqe));
+ hwq->prod++;
+ } while (bsize > 0);
+ cmdq->seq_num++;
+
+ cmdq_prod = hwq->prod & 0xFFFF;
+ if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
+ /* The very first doorbell write
+ * is required to set this flag
+ * which prompts the FW to reset
+ * its internal pointers
+ */
+ cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
+ clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
+ }
+ /* ring CMDQ DB */
+ wmb();
+ writel(cmdq_prod, cmdq->cmdq_mbox.prod);
+ writel(BNG_FW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
+ spin_unlock_bh(&hwq->lock);
+ /* Return the CREQ response pointer */
+ return 0;
+}
+
+/**
+ * __wait_for_resp - Don't hold the cpu context and wait for response
+ * @rcfw: rcfw channel instance of rdev
+ * @cookie: cookie to track the command
+ *
+ * Wait for command completion in sleepable context.
+ *
+ * Returns:
+ * 0 if command is completed by firmware.
+ * Non zero error code for rest of the case.
+ */
+static int __wait_for_resp(struct bng_re_rcfw *rcfw, u16 cookie)
+{
+ struct bng_re_cmdq_ctx *cmdq;
+ struct bng_re_crsqe *crsqe;
+
+ cmdq = &rcfw->cmdq;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ do {
+ wait_event_timeout(cmdq->waitq,
+ !crsqe->is_in_used,
+ secs_to_jiffies(rcfw->max_timeout));
+
+ if (!crsqe->is_in_used)
+ return 0;
+
+ bng_re_service_creq(&rcfw->creq.creq_tasklet);
+
+ if (!crsqe->is_in_used)
+ return 0;
+ } while (true);
+};
+
+/**
+ * bng_re_rcfw_send_message - interface to send
+ * and complete rcfw command.
+ * @rcfw: rcfw channel instance of rdev
+ * @msg: message to send
+ *
+ * This function does not account shadow queue depth. It will send
+ * all the command unconditionally as long as send queue is not full.
+ *
+ * Returns:
+ * 0 if command completed by firmware.
+ * Non zero if the command is not completed by firmware.
+ */
+int bng_re_rcfw_send_message(struct bng_re_rcfw *rcfw,
+ struct bng_re_cmdqmsg *msg)
+{
+ struct creq_qp_event *evnt = (struct creq_qp_event *)msg->resp;
+ struct bng_re_crsqe *crsqe;
+ u16 cookie;
+ int rc;
+ u8 opcode;
+
+ opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
+
+ rc = __send_message_basic_sanity(rcfw, msg, opcode);
+ if (rc)
+ return rc == -ENXIO ? bng_re_map_rc(opcode) : rc;
+
+ rc = __send_message(rcfw, msg, opcode);
+ if (rc)
+ return rc;
+
+ cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req, msg->req_sz))
+ & BNG_FW_MAX_COOKIE_VALUE;
+
+ rc = __wait_for_resp(rcfw, cookie);
+
+ if (rc) {
+ spin_lock_bh(&rcfw->cmdq.hwq.lock);
+ crsqe = &rcfw->crsqe_tbl[cookie];
+ crsqe->is_waiter_alive = false;
+ if (rc == -ENODEV)
+ set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
+ spin_unlock_bh(&rcfw->cmdq.hwq.lock);
+ return -ETIMEDOUT;
+ }
+
+ if (evnt->status) {
+ /* failed with status */
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
+ cookie, opcode, evnt->status);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int bng_re_map_cmdq_mbox(struct bng_re_rcfw *rcfw)
+{
+ struct bng_re_cmdq_mbox *mbox;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
+
+ pdev = rcfw->pdev;
+ mbox = &rcfw->cmdq.cmdq_mbox;
+
+ mbox->reg.bar_id = BNG_FW_COMM_PCI_BAR_REGION;
+ mbox->reg.len = BNG_FW_COMM_SIZE;
+ mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
+ if (!mbox->reg.bar_base) {
+ dev_err(&pdev->dev,
+ "CMDQ BAR region %d resc start is 0!\n",
+ mbox->reg.bar_id);
+ return -ENOMEM;
+ }
+
+ bar_reg = mbox->reg.bar_base + BNG_FW_COMM_BASE_OFFSET;
+ mbox->reg.len = BNG_FW_COMM_SIZE;
+ mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
+ if (!mbox->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "CMDQ BAR region %d mapping failed\n",
+ mbox->reg.bar_id);
+ return -ENOMEM;
+ }
+
+ mbox->prod = (void __iomem *)(mbox->reg.bar_reg +
+ BNG_FW_PF_VF_COMM_PROD_OFFSET);
+ mbox->db = (void __iomem *)(mbox->reg.bar_reg + BNG_FW_COMM_TRIG_OFFSET);
+ return 0;
+}
+
+static irqreturn_t bng_re_creq_irq(int irq, void *dev_instance)
+{
+ struct bng_re_rcfw *rcfw = dev_instance;
+ struct bng_re_creq_ctx *creq;
+ struct bng_re_hwq *hwq;
+ u32 sw_cons;
+
+ creq = &rcfw->creq;
+ hwq = &creq->hwq;
+ /* Prefetch the CREQ element */
+ sw_cons = HWQ_CMP(hwq->cons, hwq);
+ bng_re_get_qe(hwq, sw_cons, NULL);
+
+ tasklet_schedule(&creq->creq_tasklet);
+ return IRQ_HANDLED;
+}
+
+int bng_re_rcfw_start_irq(struct bng_re_rcfw *rcfw, int msix_vector,
+ bool need_init)
+{
+ struct bng_re_creq_ctx *creq;
+ struct bng_re_res *res;
+ int rc;
+
+ creq = &rcfw->creq;
+ res = rcfw->res;
+
+ if (creq->irq_handler_avail)
+ return -EFAULT;
+
+ creq->msix_vec = msix_vector;
+ if (need_init)
+ tasklet_setup(&creq->creq_tasklet, bng_re_service_creq);
+ else
+ tasklet_enable(&creq->creq_tasklet);
+
+ creq->irq_name = kasprintf(GFP_KERNEL, "bng_re-creq@pci:%s",
+ pci_name(res->pdev));
+ if (!creq->irq_name)
+ return -ENOMEM;
+ rc = request_irq(creq->msix_vec, bng_re_creq_irq, 0,
+ creq->irq_name, rcfw);
+ if (rc) {
+ kfree(creq->irq_name);
+ creq->irq_name = NULL;
+ tasklet_disable(&creq->creq_tasklet);
+ return rc;
+ }
+ creq->irq_handler_avail = true;
+
+ bng_re_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
+ atomic_inc(&rcfw->rcfw_intr_enabled);
+
+ return 0;
+}
+
+static int bng_re_map_creq_db(struct bng_re_rcfw *rcfw, u32 reg_offt)
+{
+ struct bng_re_creq_db *creq_db;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
+
+ pdev = rcfw->pdev;
+ creq_db = &rcfw->creq.creq_db;
+
+ creq_db->dbinfo.flags = 0;
+ creq_db->reg.bar_id = BNG_FW_COMM_CONS_PCI_BAR_REGION;
+ creq_db->reg.bar_base = pci_resource_start(pdev, creq_db->reg.bar_id);
+ if (!creq_db->reg.bar_id)
+ dev_err(&pdev->dev,
+ "CREQ BAR region %d resc start is 0!",
+ creq_db->reg.bar_id);
+
+ bar_reg = creq_db->reg.bar_base + reg_offt;
+
+ creq_db->reg.len = BNG_FW_CREQ_DB_LEN;
+ creq_db->reg.bar_reg = ioremap(bar_reg, creq_db->reg.len);
+ if (!creq_db->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "CREQ BAR region %d mapping failed",
+ creq_db->reg.bar_id);
+ return -ENOMEM;
+ }
+ creq_db->dbinfo.db = creq_db->reg.bar_reg;
+ creq_db->dbinfo.hwq = &rcfw->creq.hwq;
+ creq_db->dbinfo.xid = rcfw->creq.ring_id;
+ return 0;
+}
+
+void bng_re_rcfw_stop_irq(struct bng_re_rcfw *rcfw, bool kill)
+{
+ struct bng_re_creq_ctx *creq;
+
+ creq = &rcfw->creq;
+
+ if (!creq->irq_handler_avail)
+ return;
+
+ creq->irq_handler_avail = false;
+ /* Mask h/w interrupts */
+ bng_re_ring_nq_db(&creq->creq_db.dbinfo, rcfw->res->cctx, false);
+ /* Sync with last running IRQ-handler */
+ synchronize_irq(creq->msix_vec);
+ free_irq(creq->msix_vec, rcfw);
+ kfree(creq->irq_name);
+ creq->irq_name = NULL;
+ atomic_set(&rcfw->rcfw_intr_enabled, 0);
+ if (kill)
+ tasklet_kill(&creq->creq_tasklet);
+ tasklet_disable(&creq->creq_tasklet);
+}
+
+void bng_re_disable_rcfw_channel(struct bng_re_rcfw *rcfw)
+{
+ struct bng_re_creq_ctx *creq;
+ struct bng_re_cmdq_ctx *cmdq;
+
+ creq = &rcfw->creq;
+ cmdq = &rcfw->cmdq;
+ /* Make sure the HW channel is stopped! */
+ bng_re_rcfw_stop_irq(rcfw, true);
+
+ iounmap(cmdq->cmdq_mbox.reg.bar_reg);
+ iounmap(creq->creq_db.reg.bar_reg);
+
+ cmdq->cmdq_mbox.reg.bar_reg = NULL;
+ creq->creq_db.reg.bar_reg = NULL;
+ creq->msix_vec = 0;
+}
+
+static void bng_re_start_rcfw(struct bng_re_rcfw *rcfw)
+{
+ struct bng_re_cmdq_ctx *cmdq;
+ struct bng_re_creq_ctx *creq;
+ struct bng_re_cmdq_mbox *mbox;
+ struct cmdq_init init = {0};
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ mbox = &cmdq->cmdq_mbox;
+
+ init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[BNG_PBL_LVL_0].pg_map_arr[0]);
+ init.cmdq_size_cmdq_lvl =
+ cpu_to_le16(((rcfw->cmdq_depth <<
+ CMDQ_INIT_CMDQ_SIZE_SFT) &
+ CMDQ_INIT_CMDQ_SIZE_MASK) |
+ ((cmdq->hwq.level <<
+ CMDQ_INIT_CMDQ_LVL_SFT) &
+ CMDQ_INIT_CMDQ_LVL_MASK));
+ init.creq_ring_id = cpu_to_le16(creq->ring_id);
+ /* Write to the mailbox register */
+ __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
+}
+
+int bng_re_enable_fw_channel(struct bng_re_rcfw *rcfw,
+ int msix_vector,
+ int cp_bar_reg_off)
+{
+ struct bng_re_cmdq_ctx *cmdq;
+ int rc;
+
+ cmdq = &rcfw->cmdq;
+
+ /* Assign defaults */
+ cmdq->seq_num = 0;
+ set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
+ init_waitqueue_head(&cmdq->waitq);
+
+ rc = bng_re_map_cmdq_mbox(rcfw);
+ if (rc)
+ return rc;
+
+ rc = bng_re_map_creq_db(rcfw, cp_bar_reg_off);
+ if (rc)
+ return rc;
+
+ rc = bng_re_rcfw_start_irq(rcfw, msix_vector, true);
+ if (rc) {
+ dev_err(&rcfw->pdev->dev,
+ "Failed to request IRQ for CREQ rc = 0x%x\n", rc);
+ bng_re_disable_rcfw_channel(rcfw);
+ return rc;
+ }
+
+ bng_re_start_rcfw(rcfw);
+ return 0;
+}
+
+int bng_re_deinit_rcfw(struct bng_re_rcfw *rcfw)
+{
+ struct creq_deinitialize_fw_resp resp = {};
+ struct cmdq_deinitialize_fw req = {};
+ struct bng_re_cmdqmsg msg = {};
+ int rc;
+
+ bng_re_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_DEINITIALIZE_FW,
+ sizeof(req));
+ bng_re_fill_cmdqmsg(&msg, &req, &resp, NULL,
+ sizeof(req), sizeof(resp), 0);
+ rc = bng_re_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
+ return 0;
+}
+static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ (CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
+ CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
+}
+
+#define BNG_RE_HW_RETX(a) _is_hw_retx_supported((a))
+static inline bool _is_optimize_modify_qp_supported(u16 dev_cap_ext_flags2)
+{
+ return dev_cap_ext_flags2 &
+ CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED;
+}
+
+int bng_re_init_rcfw(struct bng_re_rcfw *rcfw,
+ struct bng_re_stats *stats_ctx)
+{
+ struct creq_initialize_fw_resp resp = {};
+ struct cmdq_initialize_fw req = {};
+ struct bng_re_cmdqmsg msg = {};
+ int rc;
+ u16 flags = 0;
+
+ bng_re_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_INITIALIZE_FW,
+ sizeof(req));
+ /* Supply (log-base-2-of-host-page-size - base-page-shift)
+ * to bono to adjust the doorbell page sizes.
+ */
+ req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
+ BNG_FW_DBR_BASE_PAGE_SHIFT);
+ if (BNG_RE_HW_RETX(rcfw->res->dattr->dev_cap_flags))
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED;
+ if (_is_optimize_modify_qp_supported(rcfw->res->dattr->dev_cap_flags2))
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
+ req.flags |= cpu_to_le16(flags);
+ req.stat_ctx_id = cpu_to_le32(stats_ctx->fw_id);
+ bng_re_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bng_re_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
+ return 0;
+}
diff --git a/drivers/infiniband/hw/bng_re/bng_fw.h b/drivers/infiniband/hw/bng_re/bng_fw.h
new file mode 100644
index 000000000000..c89c926ec2fc
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_fw.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2025 Broadcom.
+
+#ifndef __BNG_FW_H__
+#define __BNG_FW_H__
+
+#include "bng_tlv.h"
+
+/* FW DB related */
+#define BNG_FW_CMDQ_TRIG_VAL 1
+#define BNG_FW_COMM_PCI_BAR_REGION 0
+#define BNG_FW_COMM_CONS_PCI_BAR_REGION 2
+#define BNG_FW_DBR_BASE_PAGE_SHIFT 12
+#define BNG_FW_COMM_SIZE 0x104
+#define BNG_FW_COMM_BASE_OFFSET 0x600
+#define BNG_FW_COMM_TRIG_OFFSET 0x100
+#define BNG_FW_PF_VF_COMM_PROD_OFFSET 0xc
+#define BNG_FW_CREQ_DB_LEN 8
+
+/* CREQ */
+#define BNG_FW_CREQE_MAX_CNT (64 * 1024)
+#define BNG_FW_CREQE_UNITS 16
+#define BNG_FW_CREQ_ENTRY_POLL_BUDGET 0x100
+#define BNG_FW_CREQ_CMP_VALID(hdr, pass) \
+ (!!((hdr)->v & CREQ_BASE_V) == \
+ !((pass) & BNG_RE_FLAG_EPOCH_CONS_MASK))
+#define BNG_FW_CREQ_ENTRY_POLL_BUDGET 0x100
+
+/* CMDQ */
+struct bng_fw_cmdqe {
+ u8 data[16];
+};
+
+#define BNG_FW_CMDQE_MAX_CNT 8192
+#define BNG_FW_CMDQE_UNITS sizeof(struct bng_fw_cmdqe)
+#define BNG_FW_CMDQE_BYTES(depth) ((depth) * BNG_FW_CMDQE_UNITS)
+
+#define BNG_FW_MAX_COOKIE_VALUE (BNG_FW_CMDQE_MAX_CNT - 1)
+#define BNG_FW_CMD_IS_BLOCKING 0x8000
+
+/* Crsq buf is 1024-Byte */
+struct bng_re_crsbe {
+ u8 data[1024];
+};
+
+
+static inline u32 bng_fw_cmdqe_npages(u32 depth)
+{
+ u32 npages;
+
+ npages = BNG_FW_CMDQE_BYTES(depth) / PAGE_SIZE;
+ if (BNG_FW_CMDQE_BYTES(depth) % PAGE_SIZE)
+ npages++;
+ return npages;
+}
+
+static inline u32 bng_fw_cmdqe_page_size(u32 depth)
+{
+ return (bng_fw_cmdqe_npages(depth) * PAGE_SIZE);
+}
+struct bng_re_cmdq_mbox {
+ struct bng_re_reg_desc reg;
+ void __iomem *prod;
+ void __iomem *db;
+};
+
+/* HWQ */
+struct bng_re_cmdq_ctx {
+ struct bng_re_hwq hwq;
+ struct bng_re_cmdq_mbox cmdq_mbox;
+ unsigned long flags;
+#define FIRMWARE_INITIALIZED_FLAG (0)
+#define FIRMWARE_STALL_DETECTED (3)
+#define FIRMWARE_FIRST_FLAG (31)
+ wait_queue_head_t waitq;
+ u32 seq_num;
+};
+
+struct bng_re_creq_db {
+ struct bng_re_reg_desc reg;
+ struct bng_re_db_info dbinfo;
+};
+
+struct bng_re_creq_stat {
+ u64 creq_qp_event_processed;
+ u64 creq_func_event_processed;
+};
+
+struct bng_re_creq_ctx {
+ struct bng_re_hwq hwq;
+ struct bng_re_creq_db creq_db;
+ struct bng_re_creq_stat stats;
+ struct tasklet_struct creq_tasklet;
+ u16 ring_id;
+ int msix_vec;
+ bool irq_handler_avail;
+ char *irq_name;
+};
+
+struct bng_re_crsqe {
+ struct creq_qp_event *resp;
+ u32 req_size;
+ /* Free slots at the time of submission */
+ u32 free_slots;
+ u8 opcode;
+ bool is_waiter_alive;
+ bool is_in_used;
+};
+
+struct bng_re_rcfw_sbuf {
+ void *sb;
+ dma_addr_t dma_addr;
+ u32 size;
+};
+
+/* RoCE FW Communication Channels */
+struct bng_re_rcfw {
+ struct pci_dev *pdev;
+ struct bng_re_res *res;
+ struct bng_re_cmdq_ctx cmdq;
+ struct bng_re_creq_ctx creq;
+ struct bng_re_crsqe *crsqe_tbl;
+ /* To synchronize the qp-handle hash table */
+ spinlock_t tbl_lock;
+ u32 cmdq_depth;
+ /* cached from chip cctx for quick reference in slow path */
+ u16 max_timeout;
+ atomic_t rcfw_intr_enabled;
+};
+
+struct bng_re_cmdqmsg {
+ struct cmdq_base *req;
+ struct creq_base *resp;
+ void *sb;
+ u32 req_sz;
+ u32 res_sz;
+ u8 block;
+};
+
+static inline void bng_re_rcfw_cmd_prep(struct cmdq_base *req,
+ u8 opcode, u8 cmd_size)
+{
+ req->opcode = opcode;
+ req->cmd_size = cmd_size;
+}
+
+static inline void bng_re_fill_cmdqmsg(struct bng_re_cmdqmsg *msg,
+ void *req, void *resp, void *sb,
+ u32 req_sz, u32 res_sz, u8 block)
+{
+ msg->req = req;
+ msg->resp = resp;
+ msg->sb = sb;
+ msg->req_sz = req_sz;
+ msg->res_sz = res_sz;
+ msg->block = block;
+}
+
+/* Get the number of command units required for the req. The
+ * function returns correct value only if called before
+ * setting using bng_re_set_cmd_slots
+ */
+static inline u32 bng_re_get_cmd_slots(struct cmdq_base *req)
+{
+ u32 cmd_units = 0;
+
+ if (HAS_TLV_HEADER(req)) {
+ struct roce_tlv *tlv_req = (struct roce_tlv *)req;
+
+ cmd_units = tlv_req->total_size;
+ } else {
+ cmd_units = (req->cmd_size + BNG_FW_CMDQE_UNITS - 1) /
+ BNG_FW_CMDQE_UNITS;
+ }
+
+ return cmd_units;
+}
+
+static inline u32 bng_re_set_cmd_slots(struct cmdq_base *req)
+{
+ u32 cmd_byte = 0;
+
+ if (HAS_TLV_HEADER(req)) {
+ struct roce_tlv *tlv_req = (struct roce_tlv *)req;
+
+ cmd_byte = tlv_req->total_size * BNG_FW_CMDQE_UNITS;
+ } else {
+ cmd_byte = req->cmd_size;
+ req->cmd_size = (req->cmd_size + BNG_FW_CMDQE_UNITS - 1) /
+ BNG_FW_CMDQE_UNITS;
+ }
+
+ return cmd_byte;
+}
+
+void bng_re_free_rcfw_channel(struct bng_re_rcfw *rcfw);
+int bng_re_alloc_fw_channel(struct bng_re_res *res,
+ struct bng_re_rcfw *rcfw);
+int bng_re_enable_fw_channel(struct bng_re_rcfw *rcfw,
+ int msix_vector,
+ int cp_bar_reg_off);
+void bng_re_disable_rcfw_channel(struct bng_re_rcfw *rcfw);
+int bng_re_rcfw_start_irq(struct bng_re_rcfw *rcfw, int msix_vector,
+ bool need_init);
+void bng_re_rcfw_stop_irq(struct bng_re_rcfw *rcfw, bool kill);
+int bng_re_rcfw_send_message(struct bng_re_rcfw *rcfw,
+ struct bng_re_cmdqmsg *msg);
+int bng_re_init_rcfw(struct bng_re_rcfw *rcfw,
+ struct bng_re_stats *stats_ctx);
+int bng_re_deinit_rcfw(struct bng_re_rcfw *rcfw);
+#endif
diff --git a/drivers/infiniband/hw/bng_re/bng_re.h b/drivers/infiniband/hw/bng_re/bng_re.h
new file mode 100644
index 000000000000..dae4862621a7
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_re.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2025 Broadcom.
+
+#ifndef __BNG_RE_H__
+#define __BNG_RE_H__
+
+#include "bng_res.h"
+
+#define BNG_RE_ADEV_NAME "bng_en"
+
+#define BNG_RE_DESC "Broadcom 800G RoCE Driver"
+
+#define rdev_to_dev(rdev) ((rdev) ? (&(rdev)->ibdev.dev) : NULL)
+
+#define BNG_RE_MIN_MSIX 2
+#define BNG_RE_MAX_MSIX BNGE_MAX_ROCE_MSIX
+
+#define BNG_RE_CREQ_NQ_IDX 0
+
+#define BNGE_INVALID_STATS_CTX_ID -1
+/* NQ specific structures */
+struct bng_re_nq_db {
+ struct bng_re_reg_desc reg;
+ struct bng_re_db_info dbinfo;
+};
+
+struct bng_re_nq {
+ struct pci_dev *pdev;
+ struct bng_re_res *res;
+ char *name;
+ struct bng_re_hwq hwq;
+ struct bng_re_nq_db nq_db;
+ u16 ring_id;
+ int msix_vec;
+ cpumask_t mask;
+ struct tasklet_struct nq_tasklet;
+ bool requested;
+ int budget;
+ u32 load;
+
+ struct workqueue_struct *cqn_wq;
+};
+
+struct bng_re_nq_record {
+ struct bnge_msix_info msix_entries[BNG_RE_MAX_MSIX];
+ struct bng_re_nq nq[BNG_RE_MAX_MSIX];
+ int num_msix;
+ /* serialize NQ access */
+ struct mutex load_lock;
+};
+
+struct bng_re_en_dev_info {
+ struct bng_re_dev *rdev;
+ struct bnge_auxr_dev *auxr_dev;
+};
+
+struct bng_re_ring_attr {
+ dma_addr_t *dma_arr;
+ int pages;
+ int type;
+ u32 depth;
+ u32 lrid; /* Logical ring id */
+ u8 mode;
+};
+
+struct bng_re_dev {
+ struct ib_device ibdev;
+ unsigned long flags;
+#define BNG_RE_FLAG_NETDEV_REGISTERED 0
+#define BNG_RE_FLAG_RCFW_CHANNEL_EN 1
+ struct net_device *netdev;
+ struct auxiliary_device *adev;
+ struct bnge_auxr_dev *aux_dev;
+ struct bng_re_chip_ctx *chip_ctx;
+ int fn_id;
+ struct bng_re_res bng_res;
+ struct bng_re_rcfw rcfw;
+ struct bng_re_nq_record *nqr;
+ /* Device Resources */
+ struct bng_re_dev_attr *dev_attr;
+ struct dentry *dbg_root;
+ struct bng_re_stats stats_ctx;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/bng_re/bng_res.c b/drivers/infiniband/hw/bng_re/bng_res.c
new file mode 100644
index 000000000000..c50823758b53
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_res.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <rdma/ib_umem.h>
+
+#include <linux/bnxt/hsi.h>
+#include "bng_res.h"
+#include "roce_hsi.h"
+
+/* Stats */
+void bng_re_free_stats_ctx_mem(struct pci_dev *pdev,
+ struct bng_re_stats *stats)
+{
+ if (stats->dma) {
+ dma_free_coherent(&pdev->dev, stats->size,
+ stats->dma, stats->dma_map);
+ }
+ memset(stats, 0, sizeof(*stats));
+ stats->fw_id = -1;
+}
+
+int bng_re_alloc_stats_ctx_mem(struct pci_dev *pdev,
+ struct bng_re_chip_ctx *cctx,
+ struct bng_re_stats *stats)
+{
+ memset(stats, 0, sizeof(*stats));
+ stats->fw_id = -1;
+ stats->size = cctx->hw_stats_size;
+ stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
+ &stats->dma_map, GFP_KERNEL);
+ if (!stats->dma)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bng_free_pbl(struct bng_re_res *res, struct bng_re_pbl *pbl)
+{
+ struct pci_dev *pdev = res->pdev;
+ int i;
+
+ for (i = 0; i < pbl->pg_count; i++) {
+ if (pbl->pg_arr[i])
+ dma_free_coherent(&pdev->dev, pbl->pg_size,
+ (void *)((unsigned long)
+ pbl->pg_arr[i] &
+ PAGE_MASK),
+ pbl->pg_map_arr[i]);
+ else
+ dev_warn(&pdev->dev,
+ "PBL free pg_arr[%d] empty?!\n", i);
+ pbl->pg_arr[i] = NULL;
+ }
+
+ vfree(pbl->pg_arr);
+ pbl->pg_arr = NULL;
+ vfree(pbl->pg_map_arr);
+ pbl->pg_map_arr = NULL;
+ pbl->pg_count = 0;
+ pbl->pg_size = 0;
+}
+
+static int bng_alloc_pbl(struct bng_re_res *res,
+ struct bng_re_pbl *pbl,
+ struct bng_re_sg_info *sginfo)
+{
+ struct pci_dev *pdev = res->pdev;
+ u32 pages;
+ int i;
+
+ if (sginfo->nopte)
+ return 0;
+ pages = sginfo->npages;
+
+ /* page ptr arrays */
+ pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
+
+ pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+ vfree(pbl->pg_arr);
+ pbl->pg_arr = NULL;
+ return -ENOMEM;
+ }
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+
+ for (i = 0; i < pages; i++) {
+ pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
+ if (!pbl->pg_arr[i])
+ goto fail;
+ pbl->pg_count++;
+ }
+
+ return 0;
+fail:
+ bng_free_pbl(res, pbl);
+ return -ENOMEM;
+}
+
+void bng_re_free_hwq(struct bng_re_res *res,
+ struct bng_re_hwq *hwq)
+{
+ int i;
+
+ if (!hwq->max_elements)
+ return;
+ if (hwq->level >= BNG_PBL_LVL_MAX)
+ return;
+
+ for (i = 0; i < hwq->level + 1; i++)
+ bng_free_pbl(res, &hwq->pbl[i]);
+
+ hwq->level = BNG_PBL_LVL_MAX;
+ hwq->max_elements = 0;
+ hwq->element_size = 0;
+ hwq->prod = 0;
+ hwq->cons = 0;
+}
+
+/* All HWQs are power of 2 in size */
+int bng_re_alloc_init_hwq(struct bng_re_hwq *hwq,
+ struct bng_re_hwq_attr *hwq_attr)
+{
+ u32 npages, pg_size;
+ struct bng_re_sg_info sginfo = {};
+ u32 depth, stride, npbl, npde;
+ dma_addr_t *src_phys_ptr, **dst_virt_ptr;
+ struct bng_re_res *res;
+ struct pci_dev *pdev;
+ int i, rc, lvl;
+
+ res = hwq_attr->res;
+ pdev = res->pdev;
+ pg_size = hwq_attr->sginfo->pgsize;
+ hwq->level = BNG_PBL_LVL_MAX;
+
+ depth = roundup_pow_of_two(hwq_attr->depth);
+ stride = roundup_pow_of_two(hwq_attr->stride);
+
+ npages = (depth * stride) / pg_size;
+ if ((depth * stride) % pg_size)
+ npages++;
+ if (!npages)
+ return -EINVAL;
+ hwq_attr->sginfo->npages = npages;
+
+ if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
+ /* This request is Level 0, map PTE */
+ rc = bng_alloc_pbl(res, &hwq->pbl[BNG_PBL_LVL_0], hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = BNG_PBL_LVL_0;
+ goto done;
+ }
+
+ if (npages >= MAX_PBL_LVL_0_PGS) {
+ if (npages > MAX_PBL_LVL_1_PGS) {
+ u32 flag = PTU_PTE_VALID;
+ /* 2 levels of indirection */
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ npde = npbl >> MAX_PDL_LVL_SHIFT;
+ if (npbl % BIT(MAX_PDL_LVL_SHIFT))
+ npde++;
+ /* Alloc PDE pages */
+ sginfo.pgsize = npde * pg_size;
+ sginfo.npages = 1;
+ rc = bng_alloc_pbl(res, &hwq->pbl[BNG_PBL_LVL_0], &sginfo);
+ if (rc)
+ goto fail;
+
+ /* Alloc PBL pages */
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ rc = bng_alloc_pbl(res, &hwq->pbl[BNG_PBL_LVL_1], &sginfo);
+ if (rc)
+ goto fail;
+ /* Fill PDL with PBL page pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[BNG_PBL_LVL_0].pg_arr;
+ src_phys_ptr = hwq->pbl[BNG_PBL_LVL_1].pg_map_arr;
+ for (i = 0; i < hwq->pbl[BNG_PBL_LVL_1].pg_count; i++)
+ dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
+
+ /* Alloc or init PTEs */
+ rc = bng_alloc_pbl(res, &hwq->pbl[BNG_PBL_LVL_2],
+ hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = BNG_PBL_LVL_2;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBLs with PTE pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[BNG_PBL_LVL_1].pg_arr;
+ src_phys_ptr = hwq->pbl[BNG_PBL_LVL_2].pg_map_arr;
+ for (i = 0; i < hwq->pbl[BNG_PBL_LVL_2].pg_count; i++) {
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] | PTU_PTE_VALID;
+ }
+ if (hwq_attr->type == BNG_HWQ_TYPE_QUEUE) {
+ /* Find the last pg of the size */
+ i = hwq->pbl[BNG_PBL_LVL_2].pg_count;
+ dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
+ PTU_PTE_LAST;
+ if (i > 1)
+ dst_virt_ptr[PTR_PG(i - 2)]
+ [PTR_IDX(i - 2)] |=
+ PTU_PTE_NEXT_TO_LAST;
+ }
+ } else { /* pages < 512 npbl = 1, npde = 0 */
+ u32 flag = PTU_PTE_VALID;
+
+ /* 1 level of indirection */
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ /* Alloc PBL page */
+ rc = bng_alloc_pbl(res, &hwq->pbl[BNG_PBL_LVL_0], &sginfo);
+ if (rc)
+ goto fail;
+ /* Alloc or init PTEs */
+ rc = bng_alloc_pbl(res, &hwq->pbl[BNG_PBL_LVL_1],
+ hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = BNG_PBL_LVL_1;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBL with PTE pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[BNG_PBL_LVL_0].pg_arr;
+ src_phys_ptr = hwq->pbl[BNG_PBL_LVL_1].pg_map_arr;
+ for (i = 0; i < hwq->pbl[BNG_PBL_LVL_1].pg_count; i++)
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] | flag;
+ if (hwq_attr->type == BNG_HWQ_TYPE_QUEUE) {
+ /* Find the last pg of the size */
+ i = hwq->pbl[BNG_PBL_LVL_1].pg_count;
+ dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
+ PTU_PTE_LAST;
+ if (i > 1)
+ dst_virt_ptr[PTR_PG(i - 2)]
+ [PTR_IDX(i - 2)] |=
+ PTU_PTE_NEXT_TO_LAST;
+ }
+ }
+ }
+done:
+ hwq->prod = 0;
+ hwq->cons = 0;
+ hwq->pdev = pdev;
+ hwq->depth = hwq_attr->depth;
+ hwq->max_elements = hwq->depth;
+ hwq->element_size = stride;
+ hwq->qe_ppg = pg_size / stride;
+ /* For direct access to the elements */
+ lvl = hwq->level;
+ if (hwq_attr->sginfo->nopte && hwq->level)
+ lvl = hwq->level - 1;
+ hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
+ hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
+ spin_lock_init(&hwq->lock);
+
+ return 0;
+fail:
+ bng_re_free_hwq(res, hwq);
+ return -ENOMEM;
+}
diff --git a/drivers/infiniband/hw/bng_re/bng_res.h b/drivers/infiniband/hw/bng_re/bng_res.h
new file mode 100644
index 000000000000..9997f86d6a0e
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_res.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2025 Broadcom.
+
+#ifndef __BNG_RES_H__
+#define __BNG_RES_H__
+
+#include "roce_hsi.h"
+
+#define BNG_ROCE_FW_MAX_TIMEOUT 60
+
+#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
+#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
+#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
+#define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
+
+#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
+#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
+ ((HWQ_CMP(hwq->prod, hwq)\
+ - HWQ_CMP(hwq->cons, hwq))\
+ & (hwq->max_elements - 1)))
+
+#define MAX_PBL_LVL_0_PGS 1
+#define MAX_PBL_LVL_1_PGS 512
+#define MAX_PBL_LVL_1_PGS_SHIFT 9
+#define MAX_PBL_LVL_1_PGS_FOR_LVL_2 256
+#define MAX_PBL_LVL_2_PGS (256 * 512)
+#define MAX_PDL_LVL_SHIFT 9
+
+#define BNG_RE_DBR_VALID (0x1UL << 26)
+#define BNG_RE_DBR_EPOCH_SHIFT 24
+#define BNG_RE_DBR_TOGGLE_SHIFT 25
+
+#define BNG_MAX_TQM_ALLOC_REQ 48
+
+struct bng_re_reg_desc {
+ u8 bar_id;
+ resource_size_t bar_base;
+ unsigned long offset;
+ void __iomem *bar_reg;
+ size_t len;
+};
+
+struct bng_re_db_info {
+ void __iomem *db;
+ void __iomem *priv_db;
+ struct bng_re_hwq *hwq;
+ u32 xid;
+ u32 max_slot;
+ u32 flags;
+ u8 toggle;
+};
+
+enum bng_re_db_info_flags_mask {
+ BNG_RE_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
+ BNG_RE_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
+ BNG_RE_FLAG_EPOCH_CONS_MASK = 0x1UL,
+ BNG_RE_FLAG_EPOCH_PROD_MASK = 0x2UL,
+};
+
+enum bng_re_db_epoch_flag_shift {
+ BNG_RE_DB_EPOCH_CONS_SHIFT = BNG_RE_DBR_EPOCH_SHIFT,
+ BNG_RE_DB_EPOCH_PROD_SHIFT = (BNG_RE_DBR_EPOCH_SHIFT - 1),
+};
+
+struct bng_re_chip_ctx {
+ u16 chip_num;
+ u16 hw_stats_size;
+ u64 hwrm_intf_ver;
+ u16 hwrm_cmd_max_timeout;
+};
+
+struct bng_re_pbl {
+ u32 pg_count;
+ u32 pg_size;
+ void **pg_arr;
+ dma_addr_t *pg_map_arr;
+};
+
+enum bng_re_pbl_lvl {
+ BNG_PBL_LVL_0,
+ BNG_PBL_LVL_1,
+ BNG_PBL_LVL_2,
+ BNG_PBL_LVL_MAX
+};
+
+enum bng_re_hwq_type {
+ BNG_HWQ_TYPE_CTX,
+ BNG_HWQ_TYPE_QUEUE
+};
+
+struct bng_re_sg_info {
+ u32 npages;
+ u32 pgshft;
+ u32 pgsize;
+ bool nopte;
+};
+
+struct bng_re_hwq_attr {
+ struct bng_re_res *res;
+ struct bng_re_sg_info *sginfo;
+ enum bng_re_hwq_type type;
+ u32 depth;
+ u32 stride;
+ u32 aux_stride;
+ u32 aux_depth;
+};
+
+struct bng_re_hwq {
+ struct pci_dev *pdev;
+ /* lock to protect hwq */
+ spinlock_t lock;
+ struct bng_re_pbl pbl[BNG_PBL_LVL_MAX + 1];
+ /* Valid values: 0, 1, 2 */
+ enum bng_re_pbl_lvl level;
+ /* PBL entries */
+ void **pbl_ptr;
+ /* PBL dma_addr */
+ dma_addr_t *pbl_dma_ptr;
+ u32 max_elements;
+ u32 depth;
+ u16 element_size;
+ u32 prod;
+ u32 cons;
+ /* queue entry per page */
+ u16 qe_ppg;
+};
+
+struct bng_re_stats {
+ dma_addr_t dma_map;
+ void *dma;
+ u32 size;
+ u32 fw_id;
+};
+
+struct bng_re_res {
+ struct pci_dev *pdev;
+ struct bng_re_chip_ctx *cctx;
+ struct bng_re_dev_attr *dattr;
+};
+
+static inline void *bng_re_get_qe(struct bng_re_hwq *hwq,
+ u32 indx, u64 *pg)
+{
+ u32 pg_num, pg_idx;
+
+ pg_num = (indx / hwq->qe_ppg);
+ pg_idx = (indx % hwq->qe_ppg);
+ if (pg)
+ *pg = (u64)&hwq->pbl_ptr[pg_num];
+ return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
+}
+
+#define BNG_RE_INIT_DBHDR(xid, type, indx, toggle) \
+ (((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
+ (type) | BNG_RE_DBR_VALID) << 32) | (indx) | \
+ (((u32)(toggle)) << (BNG_RE_DBR_TOGGLE_SHIFT)))
+
+static inline void bng_re_ring_db(struct bng_re_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+ u32 indx;
+ u8 toggle = 0;
+
+ if (type == DBC_DBC_TYPE_CQ_ARMALL ||
+ type == DBC_DBC_TYPE_CQ_ARMSE)
+ toggle = info->toggle;
+
+ indx = (info->hwq->cons & DBC_DBC_INDEX_MASK) |
+ ((info->flags & BNG_RE_FLAG_EPOCH_CONS_MASK) <<
+ BNG_RE_DB_EPOCH_CONS_SHIFT);
+
+ key = BNG_RE_INIT_DBHDR(info->xid, type, indx, toggle);
+ writeq(key, info->db);
+}
+
+static inline void bng_re_ring_nq_db(struct bng_re_db_info *info,
+ struct bng_re_chip_ctx *cctx,
+ bool arm)
+{
+ u32 type;
+
+ type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
+ bng_re_ring_db(info, type);
+}
+
+static inline void bng_re_hwq_incr_cons(u32 max_elements, u32 *cons, u32 cnt,
+ u32 *dbinfo_flags)
+{
+ /* move cons and update toggle/epoch if wrap around */
+ *cons += cnt;
+ if (*cons >= max_elements) {
+ *cons %= max_elements;
+ *dbinfo_flags ^= 1UL << BNG_RE_FLAG_EPOCH_CONS_SHIFT;
+ }
+}
+
+static inline bool _is_max_srq_ext_supported(u16 dev_cap_ext_flags_2)
+{
+ return !!(dev_cap_ext_flags_2 & CREQ_QUERY_FUNC_RESP_SB_MAX_SRQ_EXTENDED);
+}
+
+void bng_re_free_hwq(struct bng_re_res *res,
+ struct bng_re_hwq *hwq);
+
+int bng_re_alloc_init_hwq(struct bng_re_hwq *hwq,
+ struct bng_re_hwq_attr *hwq_attr);
+
+void bng_re_free_stats_ctx_mem(struct pci_dev *pdev,
+ struct bng_re_stats *stats);
+
+int bng_re_alloc_stats_ctx_mem(struct pci_dev *pdev,
+ struct bng_re_chip_ctx *cctx,
+ struct bng_re_stats *stats);
+#endif
diff --git a/drivers/infiniband/hw/bng_re/bng_sp.c b/drivers/infiniband/hw/bng_re/bng_sp.c
new file mode 100644
index 000000000000..83099e05328d
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_sp.c
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2025 Broadcom.
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+
+#include "bng_res.h"
+#include "bng_fw.h"
+#include "bng_sp.h"
+#include "bng_tlv.h"
+
+static bool bng_re_is_atomic_cap(struct bng_re_rcfw *rcfw)
+{
+ u16 pcie_ctl2 = 0;
+
+ pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
+ return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
+
+static void bng_re_query_version(struct bng_re_rcfw *rcfw,
+ char *fw_ver)
+{
+ struct creq_query_version_resp resp = {};
+ struct bng_re_cmdqmsg msg = {};
+ struct cmdq_query_version req = {};
+ int rc;
+
+ bng_re_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_VERSION,
+ sizeof(req));
+
+ bng_re_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
+ rc = bng_re_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return;
+ fw_ver[0] = resp.fw_maj;
+ fw_ver[1] = resp.fw_minor;
+ fw_ver[2] = resp.fw_bld;
+ fw_ver[3] = resp.fw_rsvd;
+}
+
+int bng_re_get_dev_attr(struct bng_re_rcfw *rcfw)
+{
+ struct bng_re_dev_attr *attr = rcfw->res->dattr;
+ struct creq_query_func_resp resp = {};
+ struct bng_re_cmdqmsg msg = {};
+ struct creq_query_func_resp_sb *sb;
+ struct bng_re_rcfw_sbuf sbuf;
+ struct cmdq_query_func req = {};
+ u8 *tqm_alloc;
+ int i, rc;
+ u32 temp;
+
+ bng_re_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_FUNC,
+ sizeof(req));
+
+ sbuf.size = ALIGN(sizeof(*sb), BNG_FW_CMDQE_UNITS);
+ sbuf.sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ sb = sbuf.sb;
+ req.resp_size = sbuf.size / BNG_FW_CMDQE_UNITS;
+ bng_re_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bng_re_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ attr->max_qp = le32_to_cpu(sb->max_qp);
+ /* max_qp value reported by FW doesn't include the QP1 */
+ attr->max_qp += 1;
+ attr->max_qp_rd_atom =
+ sb->max_qp_rd_atom > BNG_RE_MAX_OUT_RD_ATOM ?
+ BNG_RE_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
+ attr->max_qp_init_rd_atom =
+ sb->max_qp_init_rd_atom > BNG_RE_MAX_OUT_RD_ATOM ?
+ BNG_RE_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
+ attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
+
+ /* Adjust for max_qp_wqes for variable wqe */
+ attr->max_qp_wqes = min_t(u32, attr->max_qp_wqes, BNG_VAR_MAX_WQE - 1);
+
+ attr->max_qp_sges = min_t(u32, sb->max_sge_var_wqe, BNG_VAR_MAX_SGE);
+ attr->max_cq = le32_to_cpu(sb->max_cq);
+ attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
+ attr->max_cq_sges = attr->max_qp_sges;
+ attr->max_mr = le32_to_cpu(sb->max_mr);
+ attr->max_mw = le32_to_cpu(sb->max_mw);
+
+ attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
+ attr->max_pd = 64 * 1024;
+ attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
+ attr->max_ah = le32_to_cpu(sb->max_ah);
+
+ attr->max_srq = le16_to_cpu(sb->max_srq);
+ attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
+ attr->max_srq_sges = sb->max_srq_sge;
+ attr->max_pkey = 1;
+ attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
+ /*
+ * Read the max gid supported by HW.
+ * For each entry in HW GID in HW table, we consume 2
+ * GID entries in the kernel GID table. So max_gid reported
+ * to stack can be up to twice the value reported by the HW, up to 256 gids.
+ */
+ attr->max_sgid = le32_to_cpu(sb->max_gid);
+ attr->max_sgid = min_t(u32, BNG_RE_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
+ attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
+ attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+
+ if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
+ attr->max_srq += le16_to_cpu(sb->max_srq_ext);
+
+ bng_re_query_version(rcfw, attr->fw_ver);
+ for (i = 0; i < BNG_MAX_TQM_ALLOC_REQ / 4; i++) {
+ temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
+ tqm_alloc = (u8 *)&temp;
+ attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
+ attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
+ attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
+ attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
+ }
+
+ attr->max_dpi = le32_to_cpu(sb->max_dpi);
+ attr->is_atomic = bng_re_is_atomic_cap(rcfw);
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bng_re/bng_sp.h b/drivers/infiniband/hw/bng_re/bng_sp.h
new file mode 100644
index 000000000000..e15190515ed1
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_sp.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2025 Broadcom.
+
+#ifndef __BNG_SP_H__
+#define __BNG_SP_H__
+
+#include "bng_fw.h"
+
+#define BNG_VAR_MAX_WQE 4352
+#define BNG_VAR_MAX_SGE 13
+
+struct bng_re_dev_attr {
+#define FW_VER_ARR_LEN 4
+ u8 fw_ver[FW_VER_ARR_LEN];
+#define BNG_RE_NUM_GIDS_SUPPORTED 256
+ u16 max_sgid;
+ u16 max_mrw;
+ u32 max_qp;
+#define BNG_RE_MAX_OUT_RD_ATOM 126
+ u32 max_qp_rd_atom;
+ u32 max_qp_init_rd_atom;
+ u32 max_qp_wqes;
+ u32 max_qp_sges;
+ u32 max_cq;
+ u32 max_cq_wqes;
+ u32 max_cq_sges;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_pd;
+ u32 max_mw;
+ u32 max_raw_ethy_qp;
+ u32 max_ah;
+ u32 max_srq;
+ u32 max_srq_wqes;
+ u32 max_srq_sges;
+ u32 max_pkey;
+ u32 max_inline_data;
+ u32 l2_db_size;
+ u8 tqm_alloc_reqs[BNG_MAX_TQM_ALLOC_REQ];
+ bool is_atomic;
+ u16 dev_cap_flags;
+ u16 dev_cap_flags2;
+ u32 max_dpi;
+};
+
+int bng_re_get_dev_attr(struct bng_re_rcfw *rcfw);
+#endif
diff --git a/drivers/infiniband/hw/bng_re/bng_tlv.h b/drivers/infiniband/hw/bng_re/bng_tlv.h
new file mode 100644
index 000000000000..278f4922962d
--- /dev/null
+++ b/drivers/infiniband/hw/bng_re/bng_tlv.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+
+#ifndef __BNG_TLV_H__
+#define __BNG_TLV_H__
+
+#include "roce_hsi.h"
+
+struct roce_tlv {
+ struct tlv tlv;
+ u8 total_size; // in units of 16 byte chunks
+ u8 unused[7]; // for 16 byte alignment
+};
+
+/*
+ * TLV size in units of 16 byte chunks
+ */
+#define TLV_SIZE ((sizeof(struct roce_tlv) + 15) / 16)
+/*
+ * TLV length in bytes
+ */
+#define TLV_BYTES (TLV_SIZE * 16)
+
+#define HAS_TLV_HEADER(msg) (le16_to_cpu(((struct tlv *)(msg))->cmd_discr) == CMD_DISCR_TLV_ENCAP)
+#define GET_TLV_DATA(tlv) ((void *)&((uint8_t *)(tlv))[TLV_BYTES])
+
+static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode;
+ else
+ return req->opcode;
+}
+
+static inline void __set_cmdq_base_opcode(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val;
+ else
+ req->opcode = val;
+}
+
+static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->cookie;
+ else
+ return req->cookie;
+}
+
+static inline void __set_cmdq_base_cookie(struct cmdq_base *req,
+ u32 size, __le16 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->cookie = val;
+ else
+ req->cookie = val;
+}
+
+static inline __le64 __get_cmdq_base_resp_addr(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr;
+ else
+ return req->resp_addr;
+}
+
+static inline void __set_cmdq_base_resp_addr(struct cmdq_base *req,
+ u32 size, __le64 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr = val;
+ else
+ req->resp_addr = val;
+}
+
+static inline u8 __get_cmdq_base_resp_size(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size;
+ else
+ return req->resp_size;
+}
+
+static inline void __set_cmdq_base_resp_size(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size = val;
+ else
+ req->resp_size = val;
+}
+
+static inline u8 __get_cmdq_base_cmd_size(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct roce_tlv *)(req))->total_size;
+ else
+ return req->cmd_size;
+}
+
+static inline void __set_cmdq_base_cmd_size(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->cmd_size = val;
+ else
+ req->cmd_size = val;
+}
+
+static inline __le16 __get_cmdq_base_flags(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->flags;
+ else
+ return req->flags;
+}
+
+static inline void __set_cmdq_base_flags(struct cmdq_base *req,
+ u32 size, __le16 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->flags = val;
+ else
+ req->flags = val;
+}
+
+#endif /* __BNG_TLV_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 6df5a2738c95..3a7ce4729fcf 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -172,9 +172,9 @@ struct bnxt_re_dev {
struct list_head list;
unsigned long flags;
#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
+#define BNXT_RE_FLAG_STATS_CTX3_ALLOC 1
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
-#define BNXT_RE_FLAG_QOS_WORK_REG 5
#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
@@ -187,9 +187,6 @@ struct bnxt_re_dev {
int id;
- struct delayed_work worker;
- u8 cur_prio_map;
-
/* RCFW Channel */
struct bnxt_qplib_rcfw rcfw;
@@ -227,6 +224,15 @@ struct bnxt_re_dev {
struct workqueue_struct *dcb_wq;
struct dentry *cc_config;
struct bnxt_re_dbg_cc_config_params *cc_config_params;
+ struct dentry *cq_coal_cfg;
+ struct bnxt_re_dbg_cq_coal_params *cq_coal_cfg_params;
+#define BNXT_VPD_FLD_LEN 32
+ char board_partno[BNXT_VPD_FLD_LEN];
+ /* RoCE mirror */
+ u16 mirror_vnic_id;
+ union ib_gid ugid;
+ u32 ugid_index;
+ u8 sniffer_flow_created : 1;
};
#define to_bnxt_re_dev(ptr, member) \
@@ -243,6 +249,10 @@ int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *ou
int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev,
struct ib_mad *out_mad);
+void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id);
+
static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
{
if (rdev)
@@ -276,4 +286,7 @@ static inline int bnxt_re_read_context_allowed(struct bnxt_re_dev *rdev)
#define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 192
#define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 192
+#define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \
+ ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000)
+
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c
index e632f1661b92..88817c86ae24 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.c
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.c
@@ -8,6 +8,7 @@
#include <linux/debugfs.h>
#include <linux/pci.h>
+#include <linux/seq_file.h>
#include <rdma/ib_addr.h>
#include "bnxt_ulp.h"
@@ -22,6 +23,14 @@
static struct dentry *bnxt_re_debugfs_root;
+static const char * const bnxt_re_cq_coal_str[] = {
+ "buf_maxtime",
+ "normal_maxbuf",
+ "during_maxbuf",
+ "en_ring_idle_mode",
+ "enable",
+};
+
static const char * const bnxt_re_cc_gen0_name[] = {
"enable_cc",
"run_avg_weight_g",
@@ -314,6 +323,157 @@ static const struct file_operations bnxt_re_cc_config_ops = {
.write = bnxt_re_cc_config_set,
};
+static int info_show(struct seq_file *m, void *unused)
+{
+ struct bnxt_re_dev *rdev = m->private;
+ struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
+
+ seq_puts(m, "Info:\n");
+ seq_printf(m, "Device Name\t\t: %s\n", dev_name(&rdev->ibdev.dev));
+ seq_printf(m, "PD Watermark\t\t: %llu\n", res_s->pd_watermark);
+ seq_printf(m, "AH Watermark\t\t: %llu\n", res_s->ah_watermark);
+ seq_printf(m, "QP Watermark\t\t: %llu\n", res_s->qp_watermark);
+ seq_printf(m, "RC QP Watermark\t\t: %llu\n", res_s->rc_qp_watermark);
+ seq_printf(m, "UD QP Watermark\t\t: %llu\n", res_s->ud_qp_watermark);
+ seq_printf(m, "SRQ Watermark\t\t: %llu\n", res_s->srq_watermark);
+ seq_printf(m, "CQ Watermark\t\t: %llu\n", res_s->cq_watermark);
+ seq_printf(m, "MR Watermark\t\t: %llu\n", res_s->mr_watermark);
+ seq_printf(m, "MW Watermark\t\t: %llu\n", res_s->mw_watermark);
+ seq_printf(m, "CQ Resize Count\t\t: %d\n", atomic_read(&res_s->resize_count));
+ if (rdev->pacing.dbr_pacing) {
+ seq_printf(m, "DB Pacing Reschedule\t: %llu\n", rdev->stats.pacing.resched);
+ seq_printf(m, "DB Pacing Complete\t: %llu\n", rdev->stats.pacing.complete);
+ seq_printf(m, "DB Pacing Alerts\t: %llu\n", rdev->stats.pacing.alerts);
+ seq_printf(m, "DB FIFO Register\t: 0x%x\n",
+ readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(info);
+
+static void bnxt_re_debugfs_add_info(struct bnxt_re_dev *rdev)
+{
+ debugfs_create_file("info", 0400, rdev->dbg_root, rdev, &info_fops);
+}
+
+static ssize_t cq_coal_cfg_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct seq_file *s = file->private_data;
+ struct bnxt_re_cq_coal_param *param = s->private;
+ struct bnxt_re_dev *rdev = param->rdev;
+ int offset = param->offset;
+ char lbuf[16] = { };
+ u32 val;
+
+ if (count > sizeof(lbuf))
+ return -EINVAL;
+
+ if (copy_from_user(lbuf, buf, count))
+ return -EFAULT;
+
+ lbuf[sizeof(lbuf) - 1] = '\0';
+
+ if (kstrtou32(lbuf, 0, &val))
+ return -EINVAL;
+
+ switch (offset) {
+ case BNXT_RE_COAL_CQ_BUF_MAXTIME:
+ if (val < 1 || val > BNXT_QPLIB_CQ_COAL_MAX_BUF_MAXTIME)
+ return -EINVAL;
+ rdev->cq_coalescing.buf_maxtime = val;
+ break;
+ case BNXT_RE_COAL_CQ_NORMAL_MAXBUF:
+ if (val < 1 || val > BNXT_QPLIB_CQ_COAL_MAX_NORMAL_MAXBUF)
+ return -EINVAL;
+ rdev->cq_coalescing.normal_maxbuf = val;
+ break;
+ case BNXT_RE_COAL_CQ_DURING_MAXBUF:
+ if (val < 1 || val > BNXT_QPLIB_CQ_COAL_MAX_DURING_MAXBUF)
+ return -EINVAL;
+ rdev->cq_coalescing.during_maxbuf = val;
+ break;
+ case BNXT_RE_COAL_CQ_EN_RING_IDLE_MODE:
+ if (val > BNXT_QPLIB_CQ_COAL_MAX_EN_RING_IDLE_MODE)
+ return -EINVAL;
+ rdev->cq_coalescing.en_ring_idle_mode = val;
+ break;
+ case BNXT_RE_COAL_CQ_ENABLE:
+ if (val > 1)
+ return -EINVAL;
+ rdev->cq_coalescing.enable = val;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return count;
+}
+
+static int cq_coal_cfg_show(struct seq_file *s, void *unused)
+{
+ struct bnxt_re_cq_coal_param *param = s->private;
+ struct bnxt_re_dev *rdev = param->rdev;
+ int offset = param->offset;
+ u32 val = 0;
+
+ switch (offset) {
+ case BNXT_RE_COAL_CQ_BUF_MAXTIME:
+ val = rdev->cq_coalescing.buf_maxtime;
+ break;
+ case BNXT_RE_COAL_CQ_NORMAL_MAXBUF:
+ val = rdev->cq_coalescing.normal_maxbuf;
+ break;
+ case BNXT_RE_COAL_CQ_DURING_MAXBUF:
+ val = rdev->cq_coalescing.during_maxbuf;
+ break;
+ case BNXT_RE_COAL_CQ_EN_RING_IDLE_MODE:
+ val = rdev->cq_coalescing.en_ring_idle_mode;
+ break;
+ case BNXT_RE_COAL_CQ_ENABLE:
+ val = rdev->cq_coalescing.enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ seq_printf(s, "%u\n", val);
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(cq_coal_cfg);
+
+static void bnxt_re_cleanup_cq_coal_debugfs(struct bnxt_re_dev *rdev)
+{
+ debugfs_remove_recursive(rdev->cq_coal_cfg);
+ kfree(rdev->cq_coal_cfg_params);
+}
+
+static void bnxt_re_init_cq_coal_debugfs(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_dbg_cq_coal_params *dbg_cq_coal_params;
+ int i;
+
+ if (!_is_cq_coalescing_supported(rdev->dev_attr->dev_cap_flags2))
+ return;
+
+ dbg_cq_coal_params = kzalloc(sizeof(*dbg_cq_coal_params), GFP_KERNEL);
+ if (!dbg_cq_coal_params)
+ return;
+
+ rdev->cq_coal_cfg = debugfs_create_dir("cq_coal_cfg", rdev->dbg_root);
+ rdev->cq_coal_cfg_params = dbg_cq_coal_params;
+
+ for (i = 0; i < BNXT_RE_COAL_CQ_MAX; i++) {
+ dbg_cq_coal_params->params[i].offset = i;
+ dbg_cq_coal_params->params[i].rdev = rdev;
+ debugfs_create_file(bnxt_re_cq_coal_str[i],
+ 0600, rdev->cq_coal_cfg,
+ &dbg_cq_coal_params->params[i],
+ &cq_coal_cfg_fops);
+ }
+}
+
void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
{
struct pci_dev *pdev = rdev->en_dev->pdev;
@@ -325,6 +485,8 @@ void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root);
rdev->cc_config = debugfs_create_dir("cc_config", rdev->dbg_root);
+ bnxt_re_debugfs_add_info(rdev);
+
rdev->cc_config_params = kzalloc(sizeof(*cc_params), GFP_KERNEL);
for (i = 0; i < BNXT_RE_CC_PARAM_GEN0; i++) {
@@ -337,10 +499,13 @@ void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev)
rdev->cc_config, tmp_params,
&bnxt_re_cc_config_ops);
}
+
+ bnxt_re_init_cq_coal_debugfs(rdev);
}
void bnxt_re_debugfs_rem_pdev(struct bnxt_re_dev *rdev)
{
+ bnxt_re_cleanup_cq_coal_debugfs(rdev);
debugfs_remove_recursive(rdev->qp_debugfs);
debugfs_remove_recursive(rdev->cc_config);
kfree(rdev->cc_config_params);
diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.h b/drivers/infiniband/hw/bnxt_re/debugfs.h
index 8f101df4e838..98f4620ef245 100644
--- a/drivers/infiniband/hw/bnxt_re/debugfs.h
+++ b/drivers/infiniband/hw/bnxt_re/debugfs.h
@@ -33,4 +33,23 @@ struct bnxt_re_cc_param {
struct bnxt_re_dbg_cc_config_params {
struct bnxt_re_cc_param gen0_parms[BNXT_RE_CC_PARAM_GEN0];
};
+
+struct bnxt_re_cq_coal_param {
+ struct bnxt_re_dev *rdev;
+ u32 offset;
+};
+
+enum bnxt_re_cq_coal_types {
+ BNXT_RE_COAL_CQ_BUF_MAXTIME,
+ BNXT_RE_COAL_CQ_NORMAL_MAXBUF,
+ BNXT_RE_COAL_CQ_DURING_MAXBUF,
+ BNXT_RE_COAL_CQ_EN_RING_IDLE_MODE,
+ BNXT_RE_COAL_CQ_ENABLE,
+ BNXT_RE_COAL_CQ_MAX
+
+};
+
+struct bnxt_re_dbg_cq_coal_params {
+ struct bnxt_re_cq_coal_param params[BNXT_RE_COAL_CQ_MAX];
+};
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 44bb082e0a60..651cf9d0e0c7 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -51,25 +51,6 @@
#include "hw_counters.h"
static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
- [BNXT_RE_ACTIVE_PD].name = "active_pds",
- [BNXT_RE_ACTIVE_AH].name = "active_ahs",
- [BNXT_RE_ACTIVE_QP].name = "active_qps",
- [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps",
- [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps",
- [BNXT_RE_ACTIVE_SRQ].name = "active_srqs",
- [BNXT_RE_ACTIVE_CQ].name = "active_cqs",
- [BNXT_RE_ACTIVE_MR].name = "active_mrs",
- [BNXT_RE_ACTIVE_MW].name = "active_mws",
- [BNXT_RE_WATERMARK_PD].name = "watermark_pds",
- [BNXT_RE_WATERMARK_AH].name = "watermark_ahs",
- [BNXT_RE_WATERMARK_QP].name = "watermark_qps",
- [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps",
- [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps",
- [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs",
- [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs",
- [BNXT_RE_WATERMARK_MR].name = "watermark_mrs",
- [BNXT_RE_WATERMARK_MW].name = "watermark_mws",
- [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt",
[BNXT_RE_RX_PKTS].name = "rx_pkts",
[BNXT_RE_RX_BYTES].name = "rx_bytes",
[BNXT_RE_TX_PKTS].name = "tx_pkts",
@@ -79,22 +60,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_TX_DISCARDS].name = "tx_roce_discards",
[BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
[BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
- [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits",
- [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd",
- [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
- [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd",
- [BNXT_RE_MISSING_RESP].name = "missing_resp",
+ [BNXT_RE_TO_RETRANSMITS].name = "local_ack_timeout_err",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "packet_seq_err",
+ [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_nak_retry_err",
+ [BNXT_RE_MISSING_RESP].name = "implied_nak_seq_err",
[BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err",
[BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err",
[BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err",
[BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err",
[BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err",
- [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err",
- [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "req_remote_invalid_request",
+ [BNXT_RE_REMOTE_ACCESS_ERR].name = "req_remote_access_errors",
[BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err",
- [BNXT_RE_DUP_REQ].name = "dup_req",
+ [BNXT_RE_DUP_REQ].name = "duplicate_request",
[BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max",
- [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch",
+ [BNXT_RE_RES_LENGTH_MISMATCH].name = "resp_local_length_error",
[BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe",
[BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err",
[BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey",
@@ -118,7 +99,7 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err",
[BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err",
[BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err",
- [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count",
+ [BNXT_RE_OUT_OF_SEQ_ERR].name = "out_of_sequence",
[BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req",
[BNXT_RE_TX_READ_REQ].name = "tx_read_req",
[BNXT_RE_TX_READ_RES].name = "tx_read_resp",
@@ -126,23 +107,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
[BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
[BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts",
[BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes",
- [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req",
- [BNXT_RE_RX_READ_REQ].name = "rx_read_req",
+ [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_requests",
+ [BNXT_RE_RX_READ_REQ].name = "rx_read_requests",
[BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
- [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req",
+ [BNXT_RE_RX_WRITE_REQ].name = "rx_write_requests",
[BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
[BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts",
[BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes",
[BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
[BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
- [BNXT_RE_OOB].name = "rx_out_of_buffer",
- [BNXT_RE_TX_CNP].name = "tx_cnp_pkts",
- [BNXT_RE_RX_CNP].name = "rx_cnp_pkts",
- [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts",
- [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule",
- [BNXT_RE_PACING_CMPL].name = "pacing_complete",
- [BNXT_RE_PACING_ALERT].name = "pacing_alerts",
- [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register",
+ [BNXT_RE_OOB].name = "out_of_buffer",
+ [BNXT_RE_TX_CNP].name = "np_cnp_pkts",
+ [BNXT_RE_RX_CNP].name = "rp_cnp_handled",
+ [BNXT_RE_RX_ECN].name = "np_ecn_marked_roce_packets",
+ [BNXT_RE_REQ_CQE_ERROR].name = "req_cqe_error",
+ [BNXT_RE_RESP_CQE_ERROR].name = "resp_cqe_error",
+ [BNXT_RE_RESP_REMOTE_ACCESS_ERRS].name = "resp_remote_access_errors",
};
static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
@@ -273,18 +253,20 @@ static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
err_s->res_rx_pci_err;
stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
err_s->res_oos_drop_count;
-}
-
-static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
- struct rdma_hw_stats *stats)
-{
- struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing;
-
- stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched;
- stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete;
- stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts;
- stats->value[BNXT_RE_DB_FIFO_REG] =
- readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off);
+ stats->value[BNXT_RE_REQ_CQE_ERROR] =
+ err_s->bad_resp_err +
+ err_s->local_qp_op_err +
+ err_s->local_protection_err +
+ err_s->mem_mgmt_op_err +
+ err_s->remote_invalid_req_err +
+ err_s->remote_access_err +
+ err_s->remote_op_err;
+ stats->value[BNXT_RE_RESP_CQE_ERROR] =
+ err_s->res_cmp_err +
+ err_s->res_cq_load_err;
+ stats->value[BNXT_RE_RESP_REMOTE_ACCESS_ERRS] =
+ err_s->res_rx_no_perm +
+ err_s->res_tx_no_perm;
}
int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad)
@@ -382,7 +364,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct bnxt_re_res_cntrs *res_s = &rdev->stats.res;
struct bnxt_qplib_roce_stats *err_s = NULL;
struct ctx_hw_stats *hw_stats = NULL;
int rc = 0;
@@ -391,26 +372,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
if (!port || !stats)
return -EINVAL;
- stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count);
- stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count);
- stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count);
- stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count);
- stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count);
- stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count);
- stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count);
- stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count);
- stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count);
- stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark;
- stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark;
- stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark;
- stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark;
- stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark;
- stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark;
- stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark;
- stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark;
- stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark;
- stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count);
-
if (hw_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(hw_stats->tx_bcast_pkts);
@@ -449,8 +410,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
}
- if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
- bnxt_re_copy_db_pacing_stats(rdev, stats);
}
done:
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index e541b6f8ca9f..09d371d442aa 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -41,25 +41,6 @@
#define __BNXT_RE_HW_STATS_H__
enum bnxt_re_hw_stats {
- BNXT_RE_ACTIVE_PD,
- BNXT_RE_ACTIVE_AH,
- BNXT_RE_ACTIVE_QP,
- BNXT_RE_ACTIVE_RC_QP,
- BNXT_RE_ACTIVE_UD_QP,
- BNXT_RE_ACTIVE_SRQ,
- BNXT_RE_ACTIVE_CQ,
- BNXT_RE_ACTIVE_MR,
- BNXT_RE_ACTIVE_MW,
- BNXT_RE_WATERMARK_PD,
- BNXT_RE_WATERMARK_AH,
- BNXT_RE_WATERMARK_QP,
- BNXT_RE_WATERMARK_RC_QP,
- BNXT_RE_WATERMARK_UD_QP,
- BNXT_RE_WATERMARK_SRQ,
- BNXT_RE_WATERMARK_CQ,
- BNXT_RE_WATERMARK_MR,
- BNXT_RE_WATERMARK_MW,
- BNXT_RE_RESIZE_CQ_CNT,
BNXT_RE_RX_PKTS,
BNXT_RE_RX_BYTES,
BNXT_RE_TX_PKTS,
@@ -129,10 +110,9 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_CNP,
BNXT_RE_RX_CNP,
BNXT_RE_RX_ECN,
- BNXT_RE_PACING_RESCHED,
- BNXT_RE_PACING_CMPL,
- BNXT_RE_PACING_ALERT,
- BNXT_RE_DB_FIFO_REG,
+ BNXT_RE_REQ_CQE_ERROR,
+ BNXT_RE_RESP_CQE_ERROR,
+ BNXT_RE_RESP_REMOTE_ACCESS_ERRS,
BNXT_RE_NUM_EXT_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 260dc67b8b87..f19b55c13d58 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -288,7 +288,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num,
}
port_attr->max_mtu = IB_MTU_4096;
port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu);
- port_attr->gid_tbl_len = dev_attr->max_sgid;
+ /* One GID is reserved for RawEth QP. Report one less */
+ port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) :
+ dev_attr->max_sgid);
port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP;
@@ -375,7 +377,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
if (!ctx)
return -EINVAL;
- if (sgid_tbl && sgid_tbl->active) {
+ if (sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
@@ -429,7 +431,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context)
rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid,
rdev->qplib_res.netdev->dev_addr,
- vlan_id, true, &tbl_idx);
+ vlan_id, true, &tbl_idx, false, 0);
if (rc == -EALREADY) {
ctx_tbl = sgid_tbl->ctx;
ctx_tbl[tbl_idx]->refcnt++;
@@ -599,7 +601,8 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
mr->qplib_mr.va = (u64)(unsigned long)fence->va;
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL,
- BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE);
+ BNXT_RE_FENCE_PBL_SIZE, PAGE_SIZE,
+ _is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register fence-MR\n");
goto fail;
@@ -911,7 +914,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
-static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
{
struct bnxt_re_qp *gsi_sqp;
struct bnxt_re_ah *gsi_sah;
@@ -931,10 +934,9 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
- if (rc) {
+ if (rc)
ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
- goto fail;
- }
+
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
/* remove from active qp list */
@@ -949,10 +951,20 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
rdev->gsi_ctx.gsi_sqp = NULL;
rdev->gsi_ctx.gsi_sah = NULL;
rdev->gsi_ctx.sqp_tbl = NULL;
+}
- return 0;
-fail:
- return rc;
+static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl,
+ (struct bnxt_qplib_gid *)&rdev->ugid,
+ 0xFFFF, true);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc);
}
/* Queue Pairs */
@@ -994,6 +1006,9 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
atomic_dec(&rdev->stats.res.ud_qp_count);
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ bnxt_re_del_unique_gid(rdev);
+
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
@@ -1018,6 +1033,8 @@ static u8 __from_ib_qp_type(enum ib_qp_type type)
return CMDQ_CREATE_QP_TYPE_RC;
case IB_QPT_UD:
return CMDQ_CREATE_QP_TYPE_UD;
+ case IB_QPT_RAW_PACKET:
+ return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
default:
return IB_QPT_MAX;
}
@@ -1595,6 +1612,29 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
return rc;
}
+static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL);
+ addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev);
+
+ rc = bnxt_qplib_add_sgid(&res->sgid_tbl,
+ (struct bnxt_qplib_gid *)&rdev->ugid,
+ rdev->qplib_res.netdev->dev_addr,
+ 0xFFFF, true, &rdev->ugid_index, true,
+ hctx->stats3.fw_id);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc);
+
+ return rc;
+}
+
int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata)
{
@@ -1656,6 +1696,17 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
}
}
+ /* Support for RawEth QP is added to capture TCP pkt dump.
+ * So unique SGID is used to avoid incorrect statistics on per
+ * function stats_ctx
+ */
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) {
+ rc = bnxt_re_add_unique_gid(rdev);
+ if (rc)
+ goto qp_destroy;
+ qp->qplib_qp.ugid_index = rdev->ugid_index;
+ }
+
qp->ib_qp.qp_num = qp->qplib_qp.id;
if (qp_init_attr->qp_type == IB_QPT_GSI)
rdev->gsi_ctx.gsi_qp = qp;
@@ -2301,7 +2352,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->pkey_index = qplib_qp->pkey_index;
qp_attr->qkey = qplib_qp->qkey;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport,
qplib_qp->ah.host_sgid_index,
qplib_qp->ah.hop_limit,
qplib_qp->ah.traffic_class);
@@ -3248,9 +3299,9 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(cq->resize_umem)) {
rc = PTR_ERR(cq->resize_umem);
+ ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n",
+ __func__, cq->resize_umem);
cq->resize_umem = NULL;
- ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n",
- __func__, rc);
goto fail;
}
cq->resize_cqe = entries;
@@ -3977,7 +4028,7 @@ struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
mr->qplib_mr.hwq.level = PBL_LVL_MAX;
mr->qplib_mr.total_size = -1; /* Infinte length */
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, NULL, 0,
- PAGE_SIZE);
+ PAGE_SIZE, false);
if (rc)
goto fail_mr;
@@ -4207,7 +4258,8 @@ static struct ib_mr *__bnxt_re_user_reg_mr(struct ib_pd *ib_pd, u64 length, u64
umem_pgs = ib_umem_num_dma_blocks(umem, page_size);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, umem,
- umem_pgs, page_size);
+ umem_pgs, page_size,
+ _is_alloc_mr_unified(rdev->dev_attr->dev_cap_flags));
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to register user MR - rc = %d\n", rc);
rc = -EIO;
@@ -4392,6 +4444,93 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
}
}
+static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ int rc;
+
+ rc = bnxt_re_hwrm_alloc_vnic(rdev);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id);
+ if (rc)
+ goto out_free_vnic;
+
+ return 0;
+out_free_vnic:
+ bnxt_re_hwrm_free_vnic(rdev);
+ return rc;
+}
+
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+ struct ib_flow_attr *attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_re_flow *flow;
+ int rc;
+
+ if (attr->type != IB_FLOW_ATTR_SNIFFER ||
+ !rdev->rcfw.roce_mirror)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&rdev->qp_lock);
+ if (rdev->sniffer_flow_created) {
+ ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n");
+ mutex_unlock(&rdev->qp_lock);
+ return ERR_PTR(-EBUSY);
+ }
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow) {
+ mutex_unlock(&rdev->qp_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ flow->rdev = rdev;
+
+ rc = bnxt_re_setup_vnic(rdev, qp);
+ if (rc)
+ goto out_free_flow;
+
+ rc = bnxt_qplib_create_flow(&rdev->qplib_res);
+ if (rc)
+ goto out_free_vnic;
+
+ rdev->sniffer_flow_created = 1;
+ mutex_unlock(&rdev->qp_lock);
+
+ return &flow->ib_flow;
+
+out_free_vnic:
+ bnxt_re_hwrm_free_vnic(rdev);
+out_free_flow:
+ mutex_unlock(&rdev->qp_lock);
+ kfree(flow);
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_destroy_flow(struct ib_flow *flow_id)
+{
+ struct bnxt_re_flow *flow =
+ container_of(flow_id, struct bnxt_re_flow, ib_flow);
+ struct bnxt_re_dev *rdev = flow->rdev;
+ int rc;
+
+ mutex_lock(&rdev->qp_lock);
+ rc = bnxt_qplib_destroy_flow(&rdev->qplib_res);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc);
+ rdev->sniffer_flow_created = 0;
+
+ bnxt_re_hwrm_free_vnic(rdev);
+ mutex_unlock(&rdev->qp_lock);
+ kfree(flow);
+
+ return rc;
+}
+
static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id)
{
struct bnxt_re_cq *cq = NULL, *tmp_cq;
@@ -4604,7 +4743,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
return err;
err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
- &dpi, sizeof(length));
+ &dpi, sizeof(dpi));
if (err)
return err;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index fe00ab691a51..76ba9ab04d5c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -164,6 +164,11 @@ struct bnxt_re_user_mmap_entry {
u8 mmap_flag;
};
+struct bnxt_re_flow {
+ struct ib_flow ib_flow;
+ struct bnxt_re_dev *rdev;
+};
+
static inline u16 bnxt_re_get_swqe_size(int nsge)
{
return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
@@ -267,6 +272,11 @@ struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start,
struct uverbs_attr_bundle *attrs);
int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata);
void bnxt_re_dealloc_ucontext(struct ib_ucontext *context);
+struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp,
+ struct ib_flow_attr *attr,
+ struct ib_udata *udata);
+int bnxt_re_destroy_flow(struct ib_flow *flow_id);
+
int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index df7cf8d68e27..73003ad25ee8 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -80,6 +80,7 @@ MODULE_LICENSE("Dual BSD/GPL");
static DEFINE_MUTEX(bnxt_re_mutex);
static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev);
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset);
@@ -188,6 +189,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
rdev->qplib_res.en_dev = en_dev;
+ rc = bnxt_re_query_hwrm_intf_version(rdev);
+ if (rc)
+ goto free_dev_attr;
+
bnxt_re_set_drv_mode(rdev);
bnxt_re_set_db_offset(rdev);
@@ -540,6 +545,72 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
fw_msg->timeout = timeout;
}
+void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_free_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_FREE);
+
+ req.vnic_id = cpu_to_le32(rdev->mirror_vnic_id);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
+ 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to free vnic, rc = %d\n", rc);
+}
+
+int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_alloc_output resp = {};
+ struct hwrm_vnic_alloc_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_ALLOC);
+
+ req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
+ req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to alloc vnic, rc = %d\n", rc);
+
+ return rc;
+}
+
+int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_vnic_cfg_input req = {};
+ struct bnxt_fw_msg fw_msg = {};
+ int rc;
+
+ bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_CFG);
+
+ req.flags = cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE);
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_RAW_QP_ID |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id);
+ req.raw_qp_id = cpu_to_le32(qp_id);
+ req.mru = cpu_to_le16(rdev->netdev->mtu + VLAN_ETH_HLEN);
+
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL,
+ 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = bnxt_send_msg(en_dev, &fw_msg);
+ if (rc)
+ ibdev_dbg(&rdev->ibdev,
+ "Failed to cfg vnic, rc = %d\n", rc);
+
+ return rc;
+}
+
/* Query device config using common hwrm */
static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
u32 *offset)
@@ -553,11 +624,12 @@ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc) {
*db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
*offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
+ rdev->mirror_vnic_id = le16_to_cpu(resp.mirror_vnic_id);
}
return rc;
}
@@ -577,7 +649,7 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
req.fid = cpu_to_le16(0xffff);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
@@ -587,6 +659,8 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
flags_ext2 = le32_to_cpu(resp.flags_ext2);
cctx->modes.dbr_pacing = flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED;
+ cctx->modes.roce_mirror = !!(le32_to_cpu(resp.flags_ext3) &
+ FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED);
return 0;
}
@@ -603,7 +677,7 @@ static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
cctx = rdev->chip_ctx;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
return rc;
@@ -842,20 +916,12 @@ static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
u16 fw_ring_id, int type)
{
- struct bnxt_en_dev *en_dev;
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_free_input req = {};
struct hwrm_ring_free_output resp;
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!rdev)
- return rc;
-
- en_dev = rdev->en_dev;
-
- if (!en_dev)
- return rc;
-
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return 0;
@@ -863,7 +929,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
req.ring_type = type;
req.ring_id = cpu_to_le16(fw_ring_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
@@ -881,9 +947,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!en_dev)
- return rc;
-
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
req.enables = 0;
req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
@@ -899,7 +962,7 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
req.ring_type = ring_attr->type;
req.int_mode = ring_attr->mode;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
*fw_ring_id = le16_to_cpu(resp.ring_id);
@@ -916,16 +979,13 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- if (!en_dev)
- return rc;
-
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return 0;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
@@ -935,8 +995,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
}
static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
- dma_addr_t dma_map,
- u32 *fw_stats_ctx_id)
+ struct bnxt_qplib_stats *stats)
{
struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
struct hwrm_stat_ctx_alloc_output resp = {};
@@ -945,21 +1004,18 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
struct bnxt_fw_msg fw_msg = {};
int rc = -EINVAL;
- *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
-
- if (!en_dev)
- return rc;
+ stats->fw_id = INVALID_STATS_CTX_ID;
bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
req.update_period_ms = cpu_to_le32(1000);
- req.stats_dma_addr = cpu_to_le64(dma_map);
+ req.stats_dma_addr = cpu_to_le64(stats->dma_map);
req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
- sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
- *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
+ stats->fw_id = le32_to_cpu(resp.stat_ctx_id);
return rc;
}
@@ -975,7 +1031,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct bnxt_re_dev *rdev =
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
- return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor);
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->revision);
}
static DEVICE_ATTR_RO(hw_rev);
@@ -985,13 +1041,31 @@ static ssize_t hca_type_show(struct device *device,
struct bnxt_re_dev *rdev =
rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev);
- return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc);
+ return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->device);
}
static DEVICE_ATTR_RO(hca_type);
+static ssize_t board_id_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device,
+ struct bnxt_re_dev, ibdev);
+ char buffer[BNXT_VPD_FLD_LEN] = {};
+
+ if (!rdev->is_virtfn)
+ memcpy(buffer, rdev->board_partno, BNXT_VPD_FLD_LEN - 1);
+ else
+ scnprintf(buffer, BNXT_VPD_FLD_LEN, "0x%x-VF",
+ rdev->en_dev->pdev->device);
+
+ return sysfs_emit(buf, "%s\n", buffer);
+}
+static DEVICE_ATTR_RO(board_id);
+
static struct attribute *bnxt_re_attributes[] = {
&dev_attr_hw_rev.attr,
&dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
NULL
};
@@ -1207,6 +1281,8 @@ static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq
goto err;
if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge))
goto err;
+ if (rdma_nl_put_driver_u32_hex(msg, "srq_limit", srq->qplib_srq.threshold))
+ goto err;
nla_nest_end(msg, table_attr);
return 0;
@@ -1297,6 +1373,8 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf,
.req_notify_cq = bnxt_re_req_notify_cq,
.resize_cq = bnxt_re_resize_cq,
+ .create_flow = bnxt_re_create_flow,
+ .destroy_flow = bnxt_re_destroy_flow,
INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah),
INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq),
INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd),
@@ -1323,8 +1401,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->node_type = RDMA_NODE_IB_CA;
- strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
- strlen(BNXT_RE_DESC) + 5);
+ strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA");
ibdev->phys_port_cnt = 1;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
@@ -1376,6 +1453,7 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
atomic_set(&rdev->stats.res.pd_count, 0);
rdev->cosq[0] = 0xFFFF;
rdev->cosq[1] = 0xFFFF;
+ rdev->cq_coalescing.enable = 1;
rdev->cq_coalescing.buf_maxtime = BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME;
if (bnxt_re_chip_gen_p7(en_dev->chip_num)) {
rdev->cq_coalescing.normal_maxbuf = BNXT_QPLIB_CQ_COAL_DEF_NORMAL_MAXBUF_P7;
@@ -1850,81 +1928,6 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev)
mutex_unlock(&rdev->qp_lock);
}
-static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
-{
- struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
- struct bnxt_qplib_gid gid;
- u16 gid_idx, index;
- int rc = 0;
-
- if (!ib_device_try_get(&rdev->ibdev))
- return 0;
-
- for (index = 0; index < sgid_tbl->active; index++) {
- gid_idx = sgid_tbl->hw_id[index];
-
- if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
- sizeof(bnxt_qplib_gid_zero)))
- continue;
- /* need to modify the VLAN enable setting of non VLAN GID only
- * as setting is done for VLAN GID while adding GID
- */
- if (sgid_tbl->vlan[index])
- continue;
-
- memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
-
- rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
- rdev->qplib_res.netdev->dev_addr);
- }
-
- ib_device_put(&rdev->ibdev);
- return rc;
-}
-
-static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev)
-{
- u32 prio_map = 0, tmp_map = 0;
- struct net_device *netdev;
- struct dcb_app app = {};
-
- netdev = rdev->netdev;
-
- app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
- app.protocol = ETH_P_IBOE;
- tmp_map = dcb_ieee_getapp_mask(netdev, &app);
- prio_map = tmp_map;
-
- app.selector = IEEE_8021QAZ_APP_SEL_DGRAM;
- app.protocol = ROCE_V2_UDP_DPORT;
- tmp_map = dcb_ieee_getapp_mask(netdev, &app);
- prio_map |= tmp_map;
-
- return prio_map;
-}
-
-static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
-{
- u8 prio_map = 0;
-
- /* Get priority for roce */
- prio_map = bnxt_re_get_priority_mask(rdev);
-
- if (prio_map == rdev->cur_prio_map)
- return 0;
- rdev->cur_prio_map = prio_map;
- /* Actual priorities are not programmed as they are already
- * done by L2 driver; just enable or disable priority vlan tagging
- */
- if ((prio_map == 0 && rdev->qplib_res.prio) ||
- (prio_map != 0 && !rdev->qplib_res.prio)) {
- rdev->qplib_res.prio = prio_map;
- bnxt_re_update_gid(rdev);
- }
-
- return 0;
-}
-
static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
{
if (rdev->is_virtfn)
@@ -1945,7 +1948,31 @@ static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE);
}
-static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
+static void bnxt_re_read_vpd_info(struct bnxt_re_dev *rdev)
+{
+ struct pci_dev *pdev = rdev->en_dev->pdev;
+ unsigned int vpd_size, kw_len;
+ int pos, size;
+ u8 *vpd_data;
+
+ vpd_data = pci_vpd_alloc(pdev, &vpd_size);
+ if (IS_ERR(vpd_data)) {
+ pci_warn(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
+ return;
+ }
+
+ pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+ PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+ if (pos < 0)
+ goto free;
+
+ size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
+ memcpy(rdev->board_partno, &vpd_data[pos], size);
+free:
+ kfree(vpd_data);
+}
+
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ver_get_output resp = {};
@@ -1964,7 +1991,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
rc);
- return;
+ return rc;
}
cctx = rdev->chip_ctx;
@@ -1978,6 +2005,8 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
if (!cctx->hwrm_cmd_max_timeout)
cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
+
+ return 0;
}
static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
@@ -2039,6 +2068,72 @@ static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
}
}
+static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+
+ return rc;
+}
+
+static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+ int rc;
+
+ if (!rdev->rcfw.roce_mirror)
+ return 0;
+
+ rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3);
+ if (rc)
+ return rc;
+
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3);
+ if (rc)
+ goto free_stat_mem;
+
+ return 0;
+free_stat_mem:
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+
+ return rc;
+}
+
+static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ if (!rdev->rcfw.roce_mirror)
+ return;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3);
+}
+
+static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx;
+ struct bnxt_qplib_res *res = &rdev->qplib_res;
+
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id);
+ bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats);
+}
+
static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
{
u8 type;
@@ -2049,8 +2144,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_net_unregister_async_event(rdev);
bnxt_re_uninit_dcb_wq(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
- cancel_delayed_work_sync(&rdev->worker);
+ bnxt_re_put_stats3_ctx(rdev);
bnxt_re_free_gid_ctx(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
@@ -2064,8 +2158,8 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
ibdev_warn(&rdev->ibdev,
"Failed to deinitialize RCFW: %#x", rc);
- bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
- bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ bnxt_re_put_stats_ctx(rdev);
+ bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
type = bnxt_qplib_get_ring_type(rdev->chip_ctx);
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
@@ -2085,16 +2179,6 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
}
}
-/* worker thread for polling periodic events. Now used for QoS programming*/
-static void bnxt_re_worker(struct work_struct *work)
-{
- struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
- worker.work);
-
- bnxt_re_setup_qos(rdev);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
-}
-
static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
{
struct bnxt_re_ring_attr rattr = {};
@@ -2109,8 +2193,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
rc = bnxt_re_register_netdev(rdev);
if (rc) {
ibdev_err(&rdev->ibdev,
- "Failed to register with netedev: %#x\n", rc);
- return -EINVAL;
+ "Failed to register with Ethernet driver, rc %d\n",
+ rc);
+ return rc;
}
}
set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
@@ -2148,8 +2233,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
- bnxt_re_query_hwrm_intf_version(rdev);
-
/* Establish RCFW Communication Channel to initialize the context
* memory for the function and all child VFs
*/
@@ -2199,18 +2282,20 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
goto disable_rcfw;
+ bnxt_qplib_query_version(&rdev->rcfw);
bnxt_re_set_resource_limits(rdev);
- rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
- bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
- if (rc) {
- ibdev_err(&rdev->ibdev,
- "Failed to allocate QPLIB context: %#x\n", rc);
- goto disable_rcfw;
+ if (!rdev->is_virtfn &&
+ !bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate hw context: %#x\n", rc);
+ goto disable_rcfw;
+ }
}
- rc = bnxt_re_net_stats_ctx_alloc(rdev,
- rdev->qplib_ctx.stats.dma_map,
- &rdev->qplib_ctx.stats.fw_id);
+
+ rc = bnxt_re_get_stats_ctx(rdev);
if (rc) {
ibdev_err(&rdev->ibdev,
"Failed to allocate stats context: %#x\n", rc);
@@ -2249,15 +2334,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
if (rc)
ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n");
- rc = bnxt_re_setup_qos(rdev);
- if (rc)
- ibdev_info(&rdev->ibdev,
- "RoCE priority not yet configured\n");
-
- INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
- set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
- schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
-
if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT))
bnxt_re_vf_res_config(rdev);
}
@@ -2270,11 +2346,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type)
bnxt_re_init_dcb_wq(rdev);
bnxt_re_net_register_async_event(rdev);
+ if (!rdev->is_virtfn)
+ bnxt_re_read_vpd_info(rdev);
+
+ rc = bnxt_re_get_stats3_ctx(rdev);
+ if (rc)
+ goto fail;
+
return 0;
free_sctx:
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
free_ctx:
- bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx);
+ bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx);
disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring:
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index ee36b3d82cc0..c88f049136fc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -1307,6 +1307,7 @@ static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp)
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl;
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct creq_modify_qp_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
@@ -1358,9 +1359,14 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL)
req.flow_label = cpu_to_le32(qp->ah.flow_label);
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)
- req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id
- [qp->ah.sgid_index]);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE)
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]);
+ else
+ req.sgid_index =
+ cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]);
+ }
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT)
req.hop_limit = qp->ah.hop_limit;
@@ -1464,6 +1470,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->access = sb->access;
qp->pkey_index = le16_to_cpu(sb->pkey);
qp->qkey = le32_to_cpu(sb->qkey);
+ qp->udp_sport = le16_to_cpu(sb->udp_src_port);
temp32[0] = le32_to_cpu(sb->dgid[0]);
temp32[1] = le32_to_cpu(sb->dgid[1]);
@@ -2219,7 +2226,8 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
req.cq_handle = cpu_to_le64(cq->cq_handle);
req.cq_size = cpu_to_le32(cq->max_wqe);
- if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2)) {
+ if (_is_cq_coalescing_supported(res->dattr->dev_cap_flags2) &&
+ cq->coalescing->enable) {
req.flags |= cpu_to_le16(CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID);
coalescing |= ((cq->coalescing->buf_maxtime <<
CMDQ_CREATE_CQ_BUF_MAXTIME_SFT) &
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 4921a214c34c..1b414a73b46d 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -299,6 +299,7 @@ struct bnxt_qplib_qp {
u8 smac[6];
u16 vlan_id;
u16 port_id;
+ u16 udp_sport;
u8 nw_type;
struct bnxt_qplib_ah ah;
@@ -344,6 +345,7 @@ struct bnxt_qplib_qp {
u32 msn_tbl_sz;
bool is_host_msn_tbl;
u8 tos_dscp;
+ u32 ugid_index;
};
#define BNXT_RE_MAX_MSG_SIZE 0x80000000
@@ -393,6 +395,7 @@ struct bnxt_qplib_cq_coal_param {
u8 normal_maxbuf;
u8 during_maxbuf;
u8 en_ring_idle_mode;
+ u8 enable;
};
#define BNXT_QPLIB_CQ_COAL_DEF_BUF_MAXTIME 0x1
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 804bc773b4ef..295a9610f3e6 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -186,7 +186,7 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
* wait for command completion. Maximum holding interval is 8 second.
*
* Returns:
- * -ETIMEOUT if command is not completed in specific time interval.
+ * -ETIMEDOUT if command is not completed in specific time interval.
* 0 if command is completed by firmware.
*/
static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
@@ -366,6 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
wmb();
writel(cmdq_prod, cmdq->cmdq_mbox.prod);
writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
+ print_hex_dump_bytes("req: ", DUMP_PREFIX_OFFSET, msg->req, msg->req_sz);
spin_unlock_bh(&hwq->lock);
/* Return the CREQ response pointer */
return 0;
@@ -381,7 +382,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw,
* This function can not be called from non-sleepable context.
*
* Returns:
- * -ETIMEOUT if command is not completed in specific time interval.
+ * -ETIMEDOUT if command is not completed in specific time interval.
* 0 if command is completed by firmware.
*/
static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
@@ -631,6 +632,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
int rc = 0;
pdev = rcfw->pdev;
+ print_hex_dump_bytes("event: ", DUMP_PREFIX_OFFSET, qp_event, sizeof(*qp_event));
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
@@ -903,6 +905,10 @@ skip_ctx_setup:
flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED;
if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)
flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT;
+ if (bnxt_qplib_roce_mirror_supported(rcfw->res->cctx)) {
+ flags |= CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED;
+ rcfw->roce_mirror = true;
+ }
req.flags |= cpu_to_le16(flags);
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index ff873c5f1b25..988c89b4232e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -236,6 +236,7 @@ struct bnxt_qplib_rcfw {
atomic_t timeout_send;
/* cached from chip cctx for quick reference in slow path */
u16 max_timeout;
+ bool roce_mirror;
};
struct bnxt_qplib_cmdqmsg {
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index cc5c82d96839..875d7b52c06a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -53,12 +53,6 @@
#include "qplib_sp.h"
#include "qplib_rcfw.h"
-static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_stats *stats);
-static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_chip_ctx *cctx,
- struct bnxt_qplib_stats *stats);
-
/* PBL */
static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
bool is_umem)
@@ -352,8 +346,8 @@ fail:
}
/* Context Tables */
-void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx)
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
{
int i;
@@ -367,7 +361,6 @@ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
/* restore original pde level before destroy */
ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level;
bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde);
- bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats);
}
static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
@@ -466,7 +459,7 @@ fail:
}
/*
- * Routine: bnxt_qplib_alloc_ctx
+ * Routine: bnxt_qplib_alloc_hwctx
* Description:
* Context tables are memories which are used by the chip fw.
* The 6 tables defined are:
@@ -486,17 +479,13 @@ fail:
* Returns:
* 0 if success, else -ERRORS
*/
-int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx,
- bool virt_fn, bool is_p5)
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx)
{
struct bnxt_qplib_hwq_attr hwq_attr = {};
struct bnxt_qplib_sg_info sginfo = {};
int rc;
- if (virt_fn || is_p5)
- goto stats_alloc;
-
/* QPC Tables */
sginfo.pgsize = PAGE_SIZE;
sginfo.pgshft = PAGE_SHIFT;
@@ -542,16 +531,11 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr);
if (rc)
goto fail;
-stats_alloc:
- /* Stats */
- rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats);
- if (rc)
- goto fail;
return 0;
fail:
- bnxt_qplib_free_ctx(res, ctx);
+ bnxt_qplib_free_hwctx(res, ctx);
return rc;
}
@@ -832,8 +816,8 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
}
/* Stats */
-static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_stats *stats)
+void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_stats *stats)
{
if (stats->dma) {
dma_free_coherent(&pdev->dev, stats->size,
@@ -843,9 +827,9 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
stats->fw_id = -1;
}
-static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
- struct bnxt_qplib_chip_ctx *cctx,
- struct bnxt_qplib_stats *stats)
+int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats)
{
memset(stats, 0, sizeof(*stats));
stats->fw_id = -1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 6a13927674b4..2ea3b7f232a3 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -65,6 +65,7 @@ struct bnxt_qplib_drv_modes {
bool db_push;
bool dbr_pacing;
u32 toggle_bits;
+ u8 roce_mirror;
};
enum bnxt_re_toggle_modes {
@@ -303,6 +304,7 @@ struct bnxt_qplib_ctx {
struct bnxt_qplib_hwq tim_tbl;
struct bnxt_qplib_tqm_ctx tqm_ctx;
struct bnxt_qplib_stats stats;
+ struct bnxt_qplib_stats stats3;
struct bnxt_qplib_vf_res vf_res;
};
@@ -432,15 +434,19 @@ void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res);
int bnxt_qplib_init_res(struct bnxt_qplib_res *res);
void bnxt_qplib_free_res(struct bnxt_qplib_res *res);
int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev);
-void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx);
-int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res,
- struct bnxt_qplib_ctx *ctx,
- bool virt_fn, bool is_p5);
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
int bnxt_qplib_determine_atomics(struct pci_dev *dev);
+int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats);
+void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
+ struct bnxt_qplib_stats *stats);
static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
struct bnxt_qplib_hwq *hwq, u32 cnt)
@@ -582,6 +588,11 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
return cctx->modes.dbr_pacing;
}
+static inline u8 bnxt_qplib_roce_mirror_supported(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.roce_mirror;
+}
+
static inline bool _is_alloc_mr_unified(u16 dev_cap_flags)
{
return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 68981399598d..408a34df2667 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -66,14 +66,15 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
}
-static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
- char *fw_ver)
+void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw)
{
struct creq_query_version_resp resp = {};
struct bnxt_qplib_cmdqmsg msg = {};
struct cmdq_query_version req = {};
+ struct bnxt_qplib_dev_attr *attr;
int rc;
+ attr = rcfw->res->dattr;
bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
CMDQ_BASE_OPCODE_QUERY_VERSION,
sizeof(req));
@@ -82,10 +83,10 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
if (rc)
return;
- fw_ver[0] = resp.fw_maj;
- fw_ver[1] = resp.fw_minor;
- fw_ver[2] = resp.fw_bld;
- fw_ver[3] = resp.fw_rsvd;
+ attr->fw_ver[0] = resp.fw_maj;
+ attr->fw_ver[1] = resp.fw_minor;
+ attr->fw_ver[2] = resp.fw_bld;
+ attr->fw_ver[3] = resp.fw_rsvd;
}
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
@@ -161,7 +162,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
attr->max_pkey = 1;
- attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
+ attr->max_inline_data = attr->max_qp_sges * sizeof(struct sq_sge);
if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
@@ -179,8 +180,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
if (_is_max_srq_ext_supported(attr->dev_cap_flags2))
attr->max_srq += le16_to_cpu(sb->max_srq_ext);
- bnxt_qplib_query_version(rcfw, attr->fw_ver);
-
for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
tqm_alloc = (u8 *)&temp;
@@ -309,7 +308,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, const u8 *smac,
- u16 vlan_id, bool update, u32 *index)
+ u16 vlan_id, bool update, u32 *index,
+ bool is_ugid, u32 stats_ctx_id)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -374,6 +374,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
+ req.stats_ctx = cpu_to_le16(CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID |
+ (u16)stats_ctx_id);
+
bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
sizeof(resp), 0);
rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
@@ -397,46 +400,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return 0;
}
-int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u16 gid_idx,
- const u8 *smac)
-{
- struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
- struct bnxt_qplib_res,
- sgid_tbl);
- struct bnxt_qplib_rcfw *rcfw = res->rcfw;
- struct creq_modify_gid_resp resp = {};
- struct bnxt_qplib_cmdqmsg msg = {};
- struct cmdq_modify_gid req = {};
- int rc;
-
- bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
- CMDQ_BASE_OPCODE_MODIFY_GID,
- sizeof(req));
-
- req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
- req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
- req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
- req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
- if (res->prio) {
- req.vlan |= cpu_to_le16
- (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
- CMDQ_ADD_GID_VLAN_VLAN_EN);
- }
-
- /* MAC in network format */
- req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
- req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
- req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
-
- req.gid_index = cpu_to_le16(gid_idx);
-
- bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
- sizeof(resp), 0);
- rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
- return rc;
-}
-
/* AH */
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block)
@@ -615,7 +578,7 @@ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
}
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- struct ib_umem *umem, int num_pbls, u32 buf_pg_size)
+ struct ib_umem *umem, int num_pbls, u32 buf_pg_size, bool unified_mr)
{
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct bnxt_qplib_hwq_attr hwq_attr = {};
@@ -677,7 +640,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
req.access = (mr->access_flags & BNXT_QPLIB_MR_ACCESS_MASK);
req.va = cpu_to_le64(mr->va);
req.key = cpu_to_le32(mr->lkey);
- if (_is_alloc_mr_unified(res->dattr->dev_cap_flags))
+ if (unified_mr)
req.key = cpu_to_le32(mr->pd->id);
req.flags = cpu_to_le16(mr->flags);
req.mr_size = cpu_to_le64(mr->total_size);
@@ -688,7 +651,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
if (rc)
goto fail;
- if (_is_alloc_mr_unified(res->dattr->dev_cap_flags)) {
+ if (unified_mr) {
mr->lkey = le32_to_cpu(resp.xid);
mr->rkey = mr->lkey;
}
@@ -1143,3 +1106,40 @@ out:
dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr);
return rc;
}
+
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res)
+{
+ struct creq_roce_mirror_cfg_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_roce_mirror_cfg req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+ sizeof(req));
+
+ req.mirror_flags = (u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE;
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
+
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res)
+{
+ struct creq_roce_mirror_cfg_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_roce_mirror_cfg req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG,
+ sizeof(req));
+
+ req.mirror_flags &= ~((u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 09faf4a1e849..5a45c55c6464 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -323,7 +323,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id,
- bool update, u32 *index);
+ bool update, u32 *index,
+ bool is_ugid, u32 stats_ctx_id);
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
@@ -340,7 +341,7 @@ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
bool block);
int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
- struct ib_umem *umem, int num_pbls, u32 buf_pg_size);
+ struct ib_umem *umem, int num_pbls, u32 buf_pg_size, bool unified_mr);
int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
struct bnxt_qplib_mrw *mr, int max);
@@ -358,6 +359,9 @@ int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid,
u32 resp_size, void *resp_va);
int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
struct bnxt_qplib_cc_param *cc_param);
+void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_create_flow(struct bnxt_qplib_res *res);
+int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res);
#define BNXT_VAR_MAX_WQE 4352
#define BNXT_VAR_MAX_SLOT_ALIGN 256
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 024845f945ff..99ecd72e72e2 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -144,7 +144,8 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL
#define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL
#define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
- #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT
+ #define CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG 0x99UL
+ #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -218,6 +219,7 @@ struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL
#define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL
#define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL
+ #define CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED 0x80UL
__le16 cookie;
u8 resp_size;
u8 reserved8;
@@ -788,7 +790,8 @@ struct creq_query_qp_resp_sb {
#define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL
__le16 pkey;
__le32 qkey;
- __le32 reserved32;
+ __le16 udp_src_port;
+ __le16 reserved16;
__le32 dgid[4];
__le32 flow_label;
__le16 sgid_index;
@@ -2108,6 +2111,43 @@ struct creq_query_roce_stats_ext_resp_sb {
__le64 dup_req;
};
+/* cmdq_roce_mirror_cfg (size:192b/24B) */
+struct cmdq_roce_mirror_cfg {
+ u8 opcode;
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG 0x99UL
+ #define CMDQ_ROCE_MIRROR_CFG_OPCODE_LAST \
+ CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG
+ u8 cmd_size;
+ __le16 flags;
+ __le16 cookie;
+ u8 resp_size;
+ u8 reserved8;
+ __le64 resp_addr;
+ u8 mirror_flags;
+ #define CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE 0x1UL
+ u8 rsvd[7];
+};
+
+/* creq_roce_mirror_cfg_resp (size:128b/16B) */
+struct creq_roce_mirror_cfg_resp {
+ u8 type;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_SFT 0
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_LAST \
+ CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 reserved32;
+ u8 v;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG 0x99UL
+ #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_LAST \
+ CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG
+ u8 reserved48[6];
+};
+
/* cmdq_query_func (size:128b/16B) */
struct cmdq_query_func {
u8 opcode;
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index b67747ae6a68..d892f55febe2 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1228,9 +1228,8 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
if (!ctx->dev) {
ctx->dev = c4iw_alloc(&ctx->lldi);
if (IS_ERR(ctx->dev)) {
- pr_err("%s: initialization failed: %ld\n",
- pci_name(ctx->lldi.pdev),
- PTR_ERR(ctx->dev));
+ pr_err("%s: initialization failed: %pe\n",
+ pci_name(ctx->lldi.pdev), ctx->dev);
ctx->dev = NULL;
break;
}
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index dcdfe250bdbe..adeed7447e7b 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -348,7 +348,7 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
{
int err;
- pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
+ pr_debug("*pbl_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
pbl_addr, rdev->lldi.vr->pbl.start,
pbl_size);
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index bafd210dd43e..0e979ca10d24 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
@@ -30,6 +30,7 @@ struct efa_comp_ctx {
struct efa_admin_acq_entry *user_cqe;
u32 comp_size;
enum efa_cmd_status status;
+ u16 cmd_id;
u8 cmd_opcode;
u8 occupied;
};
@@ -333,6 +334,7 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
comp_ctx->comp_size = comp_size_in_bytes;
comp_ctx->user_cqe = comp;
comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
+ comp_ctx->cmd_id = cmd_id;
reinit_completion(&comp_ctx->wait_event);
@@ -557,17 +559,19 @@ static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *com
if (comp_ctx->status == EFA_CMD_COMPLETED)
ibdev_err_ratelimited(
aq->efa_dev,
- "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx->cmd_opcode, comp_ctx->status,
- comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
+ aq->cq.cc);
else
ibdev_err_ratelimited(
aq->efa_dev,
- "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
+ "The device didn't send any completion for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx->cmd_opcode, comp_ctx->status,
- comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
+ comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
+ aq->cq.cc);
clear_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state);
err = -ETIME;
@@ -631,9 +635,9 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
if (IS_ERR(comp_ctx)) {
ibdev_err_ratelimited(
aq->efa_dev,
- "Failed to submit command %s (opcode %u) err %ld\n",
+ "Failed to submit command %s (opcode %u) err %pe\n",
efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
- cmd->aq_common_descriptor.opcode, PTR_ERR(comp_ctx));
+ cmd->aq_common_descriptor.opcode, comp_ctx);
up(&aq->avail_cmds);
atomic64_inc(&aq->stats.cmd_err);
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 886923d5fe50..22d3e25c3b9d 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1216,13 +1216,13 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (umem->length < cq->size) {
ibdev_dbg(&dev->ibdev, "External memory too small\n");
err = -EINVAL;
- goto err_free_mem;
+ goto err_out;
}
if (!ib_umem_is_contiguous(umem)) {
ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
err = -EINVAL;
- goto err_free_mem;
+ goto err_out;
}
cq->cpu_addr = NULL;
@@ -1251,7 +1251,7 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err = efa_com_create_cq(&dev->edev, &params, &result);
if (err)
- goto err_free_mem;
+ goto err_free_mapped;
resp.db_off = result.db_off;
resp.cq_idx = result.cq_idx;
@@ -1299,12 +1299,10 @@ err_remove_mmap:
efa_cq_user_mmap_entries_remove(cq);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
-err_free_mem:
- if (umem)
- ib_umem_release(umem);
- else
- efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
-
+err_free_mapped:
+ if (!umem)
+ efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
+ DMA_FROM_DEVICE);
err_out:
atomic64_inc(&dev->stats.create_cq_err);
return err;
@@ -1788,7 +1786,8 @@ struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
+ ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%pe]\n",
+ umem_dmabuf);
goto err_free;
}
@@ -1832,7 +1831,8 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(&dev->ibdev,
- "Failed to pin and map user space memory[%d]\n", err);
+ "Failed to pin and map user space memory[%pe]\n",
+ mr->umem);
goto err_free;
}
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index e0acc185e719..ed21ba0037a4 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -993,10 +993,10 @@ static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
int ret;
sock_set_reuseaddr(s->sk);
- ret = s->ops->bind(s, laddr, laddrlen);
+ ret = s->ops->bind(s, (struct sockaddr_unsized *)laddr, laddrlen);
if (ret)
return ret;
- ret = s->ops->connect(s, raddr, raddrlen, flags);
+ ret = s->ops->connect(s, (struct sockaddr_unsized *)raddr, raddrlen, flags);
return ret < 0 ? ret : 0;
}
@@ -1315,7 +1315,7 @@ int erdma_create_listen(struct iw_cm_id *id, int backlog)
if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
s->sk->sk_bound_dev_if = dev->netdev->ifindex;
- ret = s->ops->bind(s, (struct sockaddr *)laddr,
+ ret = s->ops->bind(s, (struct sockaddr_unsized *)laddr,
sizeof(struct sockaddr_in));
if (ret)
goto error;
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index fdeec33c71da..109a3f3de911 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -149,7 +149,7 @@ static int regmr_cmd(struct erdma_dev *dev, struct erdma_mr *mr)
req.phy_addr[0] = mr->mem.mtt->buf_dma;
mtt_level = ERDMA_MR_MTT_1LEVEL;
} else {
- req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
+ req.phy_addr[0] = mr->mem.mtt->dma_addrs[0];
mtt_level = mr->mem.mtt->level;
}
} else if (mr->type != ERDMA_MR_TYPE_DMA) {
@@ -626,18 +626,27 @@ err_free_mtt:
return ERR_PTR(-ENOMEM);
}
-static void erdma_destroy_mtt_buf_sg(struct erdma_dev *dev,
- struct erdma_mtt *mtt)
+static void erdma_unmap_page_list(struct erdma_dev *dev, dma_addr_t *pg_dma,
+ u32 npages)
{
- dma_unmap_sg(&dev->pdev->dev, mtt->sglist,
- DIV_ROUND_UP(mtt->size, PAGE_SIZE), DMA_TO_DEVICE);
- vfree(mtt->sglist);
+ u32 i;
+
+ for (i = 0; i < npages; i++)
+ dma_unmap_page(&dev->pdev->dev, pg_dma[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+}
+
+static void erdma_destroy_mtt_buf_dma_addrs(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ erdma_unmap_page_list(dev, mtt->dma_addrs, mtt->npages);
+ vfree(mtt->dma_addrs);
}
static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
struct erdma_mtt *mtt)
{
- erdma_destroy_mtt_buf_sg(dev, mtt);
+ erdma_destroy_mtt_buf_dma_addrs(dev, mtt);
vfree(mtt->buf);
kfree(mtt);
}
@@ -645,50 +654,69 @@ static void erdma_destroy_scatter_mtt(struct erdma_dev *dev,
static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
struct erdma_mtt *low_mtt)
{
- struct scatterlist *sg;
- u32 idx = 0, i;
+ dma_addr_t *pg_addr = mtt->buf;
+ u32 i;
- for_each_sg(low_mtt->sglist, sg, low_mtt->nsg, i)
- mtt->buf[idx++] = sg_dma_address(sg);
+ for (i = 0; i < low_mtt->npages; i++)
+ pg_addr[i] = low_mtt->dma_addrs[i];
}
-static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
+static u32 vmalloc_to_dma_addrs(struct erdma_dev *dev, dma_addr_t **dma_addrs,
+ void *buf, u64 len)
{
- struct scatterlist *sglist;
- void *buf = mtt->buf;
- u32 npages, i, nsg;
+ dma_addr_t *pg_dma;
struct page *pg;
+ u32 npages, i;
+ void *addr;
- /* Failed if buf is not page aligned */
- if ((uintptr_t)buf & ~PAGE_MASK)
- return -EINVAL;
-
- npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
- sglist = vzalloc(npages * sizeof(*sglist));
- if (!sglist)
- return -ENOMEM;
+ npages = (PAGE_ALIGN((u64)buf + len) - PAGE_ALIGN_DOWN((u64)buf)) >>
+ PAGE_SHIFT;
+ pg_dma = vcalloc(npages, sizeof(*pg_dma));
+ if (!pg_dma)
+ return 0;
- sg_init_table(sglist, npages);
+ addr = buf;
for (i = 0; i < npages; i++) {
- pg = vmalloc_to_page(buf);
+ pg = vmalloc_to_page(addr);
if (!pg)
goto err;
- sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
- buf += PAGE_SIZE;
+
+ pg_dma[i] = dma_map_page(&dev->pdev->dev, pg, 0, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dev->pdev->dev, pg_dma[i]))
+ goto err;
+
+ addr += PAGE_SIZE;
}
- nsg = dma_map_sg(&dev->pdev->dev, sglist, npages, DMA_TO_DEVICE);
- if (!nsg)
- goto err;
+ *dma_addrs = pg_dma;
- mtt->sglist = sglist;
- mtt->nsg = nsg;
+ return npages;
+err:
+ erdma_unmap_page_list(dev, pg_dma, i);
+ vfree(pg_dma);
return 0;
-err:
- vfree(sglist);
+}
- return -ENOMEM;
+static int erdma_create_mtt_buf_dma_addrs(struct erdma_dev *dev,
+ struct erdma_mtt *mtt)
+{
+ dma_addr_t *addrs;
+ u32 npages;
+
+ /* Failed if buf is not page aligned */
+ if ((uintptr_t)mtt->buf & ~PAGE_MASK)
+ return -EINVAL;
+
+ npages = vmalloc_to_dma_addrs(dev, &addrs, mtt->buf, mtt->size);
+ if (!npages)
+ return -ENOMEM;
+
+ mtt->dma_addrs = addrs;
+ mtt->npages = npages;
+
+ return 0;
}
static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
@@ -707,12 +735,12 @@ static struct erdma_mtt *erdma_create_scatter_mtt(struct erdma_dev *dev,
if (!mtt->buf)
goto err_free_mtt;
- ret = erdma_create_mtt_buf_sg(dev, mtt);
+ ret = erdma_create_mtt_buf_dma_addrs(dev, mtt);
if (ret)
goto err_free_mtt_buf;
- ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
- mtt->size, mtt->nsg);
+ ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, npages:%u\n",
+ mtt->size, mtt->npages);
return mtt;
@@ -746,8 +774,8 @@ static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
level = 1;
/* convergence the mtt table. */
- while (mtt->nsg != 1 && level <= 3) {
- tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
+ while (mtt->npages != 1 && level <= 3) {
+ tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->npages));
if (IS_ERR(tmp_mtt)) {
ret = PTR_ERR(tmp_mtt);
goto err_free_mtt;
@@ -765,7 +793,7 @@ static struct erdma_mtt *erdma_create_mtt(struct erdma_dev *dev, size_t size,
mtt->level = level;
ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
- mtt->level, mtt->sglist[0].dma_address);
+ mtt->level, mtt->dma_addrs[0]);
return mtt;
err_free_mtt:
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.h b/drivers/infiniband/hw/erdma/erdma_verbs.h
index ef411b81fbd7..7d8d3fe501d5 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.h
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.h
@@ -99,8 +99,8 @@ struct erdma_mtt {
union {
dma_addr_t buf_dma;
struct {
- struct scatterlist *sglist;
- u32 nsg;
+ dma_addr_t *dma_addrs;
+ u32 npages;
u32 level;
};
};
diff --git a/drivers/infiniband/hw/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index 4250d077b06f..a98a4175e53b 100644
--- a/drivers/infiniband/hw/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -64,9 +64,9 @@ int hfi1_cdev_init(int minor, const char *name,
if (IS_ERR(device)) {
ret = PTR_ERR(device);
+ pr_err("Could not create device for minor %d, %s (err %pe)\n",
+ minor, name, device);
device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
cdev_del(cdev);
}
done:
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index b35f92e7d865..e4aef102dac0 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -745,8 +745,8 @@ static int create_workqueues(struct hfi1_devdata *dd)
ppd->hfi1_wq =
alloc_workqueue(
"hfi%d_%d",
- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
- WQ_MEM_RECLAIM,
+ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM |
+ WQ_PERCPU,
HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
dd->unit, pidx);
if (!ppd->hfi1_wq)
diff --git a/drivers/infiniband/hw/hfi1/opfn.c b/drivers/infiniband/hw/hfi1/opfn.c
index 370a5a8eaa71..6e0e3458d202 100644
--- a/drivers/infiniband/hw/hfi1/opfn.c
+++ b/drivers/infiniband/hw/hfi1/opfn.c
@@ -305,8 +305,8 @@ void opfn_trigger_conn_request(struct rvt_qp *qp, u32 bth1)
int opfn_init(void)
{
opfn_wq = alloc_workqueue("hfi_opfn",
- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
- WQ_MEM_RECLAIM,
+ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM |
+ WQ_PERCPU,
HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES);
if (!opfn_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 719b7c34e238..5cfa4f8fbf3d 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -990,7 +990,7 @@ ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
}
/* Clean up old mappings */
- for_each_cpu(cpu, cpu_online_mask) {
+ for_each_online_cpu(cpu) {
struct sdma_rht_node *rht_node;
/* Don't cleanup sdes that are set in the new mask */
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index b72625283fcf..9b1aece1b080 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -498,8 +498,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
ntids, sizeof(*req->tids));
if (IS_ERR(tmp)) {
ret = PTR_ERR(tmp);
- SDMA_DBG(req, "Failed to copy %d TIDs (%d)",
- ntids, ret);
+ SDMA_DBG(req, "Failed to copy %d TIDs (%pe)", ntids,
+ tmp);
goto free_req;
}
req->tids = tmp;
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index baf592e6f21b..d07ef02c5231 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -4,11 +4,13 @@
#
ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
+ccflags-y += -I $(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
ccflags-y += -I $(src)
hns-roce-hw-v2-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o \
- hns_roce_debugfs.o hns_roce_hw_v2.o
+ hns_roce_debugfs.o hns_roce_hw_v2.o hns_roce_bond.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 307c35888b30..0c1c32d23c88 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
#include "hns_roce_device.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_bond.c b/drivers/infiniband/hw/hns/hns_roce_bond.c
new file mode 100644
index 000000000000..cc85f3ce1f3e
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_bond.c
@@ -0,0 +1,1012 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2025 Hisilicon Limited.
+ */
+
+#include <net/lag.h>
+#include <net/bonding.h>
+#include "hns_roce_device.h"
+#include "hns_roce_hw_v2.h"
+#include "hns_roce_bond.h"
+
+static DEFINE_XARRAY(roce_bond_xa);
+
+static struct hns_roce_dev *hns_roce_get_hrdev_by_netdev(struct net_device *net_dev)
+{
+ struct ib_device *ibdev =
+ ib_device_get_by_netdev(net_dev, RDMA_DRIVER_HNS);
+
+ if (!ibdev)
+ return NULL;
+
+ return container_of(ibdev, struct hns_roce_dev, ib_dev);
+}
+
+static struct net_device *get_upper_dev_from_ndev(struct net_device *net_dev)
+{
+ struct net_device *upper_dev;
+
+ rcu_read_lock();
+ upper_dev = netdev_master_upper_dev_get_rcu(net_dev);
+ dev_hold(upper_dev);
+ rcu_read_unlock();
+
+ return upper_dev;
+}
+
+static int get_netdev_bond_slave_id(struct net_device *net_dev,
+ struct hns_roce_bond_group *bond_grp)
+{
+ int i;
+
+ for (i = 0; i < ROCE_BOND_FUNC_MAX; i++)
+ if (net_dev == bond_grp->bond_func_info[i].net_dev)
+ return i;
+
+ return -ENOENT;
+}
+
+struct hns_roce_bond_group *hns_roce_get_bond_grp(struct net_device *net_dev,
+ u8 bus_num)
+{
+ struct hns_roce_die_info *die_info = xa_load(&roce_bond_xa, bus_num);
+ struct hns_roce_bond_group *bond_grp;
+ struct net_device *upper_dev = NULL;
+ int i;
+
+ if (!die_info)
+ return NULL;
+
+ for (i = 0; i < ROCE_BOND_NUM_MAX; i++) {
+ bond_grp = die_info->bgrps[i];
+ if (!bond_grp)
+ continue;
+ if (get_netdev_bond_slave_id(net_dev, bond_grp) >= 0)
+ return bond_grp;
+ if (bond_grp->upper_dev) {
+ upper_dev = get_upper_dev_from_ndev(net_dev);
+ if (bond_grp->upper_dev == upper_dev) {
+ dev_put(upper_dev);
+ return bond_grp;
+ }
+ dev_put(upper_dev);
+ }
+ }
+
+ return NULL;
+}
+
+static int hns_roce_set_bond_netdev(struct hns_roce_bond_group *bond_grp,
+ struct hns_roce_dev *hr_dev)
+{
+ struct net_device *active_dev;
+ struct net_device *old_dev;
+ int i, ret = 0;
+
+ if (bond_grp->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ rcu_read_lock();
+ active_dev =
+ bond_option_active_slave_get_rcu(netdev_priv(bond_grp->upper_dev));
+ rcu_read_unlock();
+ } else {
+ for (i = 0; i < ROCE_BOND_FUNC_MAX; i++) {
+ active_dev = bond_grp->bond_func_info[i].net_dev;
+ if (active_dev &&
+ ib_get_curr_port_state(active_dev) == IB_PORT_ACTIVE)
+ break;
+ }
+ }
+
+ if (!active_dev || i == ROCE_BOND_FUNC_MAX)
+ active_dev = get_hr_netdev(hr_dev, 0);
+
+ old_dev = ib_device_get_netdev(&hr_dev->ib_dev, 1);
+ if (old_dev == active_dev)
+ goto out;
+
+ ret = ib_device_set_netdev(&hr_dev->ib_dev, active_dev, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to set netdev for bond.\n");
+ goto out;
+ }
+
+ if (bond_grp->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ if (old_dev)
+ roce_del_all_netdev_gids(&hr_dev->ib_dev, 1, old_dev);
+ rdma_roce_rescan_port(&hr_dev->ib_dev, 1);
+ }
+out:
+ dev_put(old_dev);
+ return ret;
+}
+
+bool hns_roce_bond_is_active(struct hns_roce_dev *hr_dev)
+{
+ struct net_device *net_dev = get_hr_netdev(hr_dev, 0);
+ struct hns_roce_bond_group *bond_grp;
+ u8 bus_num = get_hr_bus_num(hr_dev);
+
+ bond_grp = hns_roce_get_bond_grp(net_dev, bus_num);
+ if (bond_grp && bond_grp->bond_state != HNS_ROCE_BOND_NOT_BONDED &&
+ bond_grp->bond_state != HNS_ROCE_BOND_NOT_ATTACHED)
+ return true;
+
+ return false;
+}
+
+static void hns_roce_bond_get_active_slave(struct hns_roce_bond_group *bond_grp)
+{
+ struct net_device *net_dev;
+ u32 active_slave_map = 0;
+ u8 active_slave_num = 0;
+ bool active;
+ u8 i;
+
+ for (i = 0; i < ROCE_BOND_FUNC_MAX; i++) {
+ net_dev = bond_grp->bond_func_info[i].net_dev;
+ if (!net_dev || !(bond_grp->slave_map & (1U << i)))
+ continue;
+
+ active = (bond_grp->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ?
+ net_lag_port_dev_txable(net_dev) :
+ (ib_get_curr_port_state(net_dev) == IB_PORT_ACTIVE);
+ if (active) {
+ active_slave_num++;
+ active_slave_map |= (1U << i);
+ }
+ }
+
+ bond_grp->active_slave_num = active_slave_num;
+ bond_grp->active_slave_map = active_slave_map;
+}
+
+static int hns_roce_recover_bond(struct hns_roce_bond_group *bond_grp,
+ struct hns_roce_dev *hr_dev)
+{
+ bond_grp->main_hr_dev = hr_dev;
+ hns_roce_bond_get_active_slave(bond_grp);
+
+ return hns_roce_cmd_bond(bond_grp, HNS_ROCE_SET_BOND);
+}
+
+static void hns_roce_slave_uninit(struct hns_roce_bond_group *bond_grp,
+ u8 func_idx)
+{
+ struct hnae3_handle *handle;
+
+ handle = bond_grp->bond_func_info[func_idx].handle;
+ if (handle->priv)
+ hns_roce_bond_uninit_client(bond_grp, func_idx);
+}
+
+static struct hns_roce_dev
+ *hns_roce_slave_init(struct hns_roce_bond_group *bond_grp,
+ u8 func_idx, bool need_switch);
+
+static int switch_main_dev(struct hns_roce_bond_group *bond_grp,
+ u8 main_func_idx)
+{
+ struct hns_roce_dev *hr_dev;
+ struct net_device *net_dev;
+ u8 i;
+
+ bond_grp->main_hr_dev = NULL;
+ hns_roce_bond_uninit_client(bond_grp, main_func_idx);
+
+ for (i = 0; i < ROCE_BOND_FUNC_MAX; i++) {
+ net_dev = bond_grp->bond_func_info[i].net_dev;
+ if ((bond_grp->slave_map & (1U << i)) && net_dev) {
+ /* In case this slave is still being registered as
+ * a non-bonded PF, uninit it first and then re-init
+ * it as the main device.
+ */
+ hns_roce_slave_uninit(bond_grp, i);
+ hr_dev = hns_roce_slave_init(bond_grp, i, false);
+ if (hr_dev) {
+ bond_grp->main_hr_dev = hr_dev;
+ break;
+ }
+ }
+ }
+
+ if (!bond_grp->main_hr_dev)
+ return -ENODEV;
+
+ return 0;
+}
+
+static struct hns_roce_dev
+ *hns_roce_slave_init(struct hns_roce_bond_group *bond_grp,
+ u8 func_idx, bool need_switch)
+{
+ struct hns_roce_dev *hr_dev = NULL;
+ struct hnae3_handle *handle;
+ u8 main_func_idx;
+ int ret;
+
+ if (need_switch) {
+ main_func_idx = PCI_FUNC(bond_grp->main_hr_dev->pci_dev->devfn);
+ if (func_idx == main_func_idx) {
+ ret = switch_main_dev(bond_grp, main_func_idx);
+ if (ret == -ENODEV)
+ return NULL;
+ }
+ }
+
+ handle = bond_grp->bond_func_info[func_idx].handle;
+ if (handle) {
+ if (handle->priv)
+ return handle->priv;
+ /* Prevent this device from being initialized as a bond device */
+ if (need_switch)
+ bond_grp->bond_func_info[func_idx].net_dev = NULL;
+ hr_dev = hns_roce_bond_init_client(bond_grp, func_idx);
+ if (!hr_dev)
+ BOND_ERR_LOG("failed to init slave %u.\n", func_idx);
+ }
+
+ return hr_dev;
+}
+
+static struct hns_roce_die_info *alloc_die_info(int bus_num)
+{
+ struct hns_roce_die_info *die_info;
+ int ret;
+
+ die_info = kzalloc(sizeof(*die_info), GFP_KERNEL);
+ if (!die_info)
+ return NULL;
+
+ ret = xa_err(xa_store(&roce_bond_xa, bus_num, die_info, GFP_KERNEL));
+ if (ret) {
+ kfree(die_info);
+ return NULL;
+ }
+
+ mutex_init(&die_info->die_mutex);
+
+ return die_info;
+}
+
+static void dealloc_die_info(struct hns_roce_die_info *die_info, u8 bus_num)
+{
+ mutex_destroy(&die_info->die_mutex);
+ xa_erase(&roce_bond_xa, bus_num);
+ kfree(die_info);
+}
+
+static int alloc_bond_id(struct hns_roce_bond_group *bond_grp)
+{
+ u8 bus_num = bond_grp->bus_num;
+ struct hns_roce_die_info *die_info = xa_load(&roce_bond_xa, bus_num);
+ int i;
+
+ if (!die_info) {
+ die_info = alloc_die_info(bus_num);
+ if (!die_info)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < ROCE_BOND_NUM_MAX; i++) {
+ if (die_info->bond_id_mask & BOND_ID(i))
+ continue;
+
+ die_info->bond_id_mask |= BOND_ID(i);
+ die_info->bgrps[i] = bond_grp;
+ bond_grp->bond_id = i;
+
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+static int remove_bond_id(int bus_num, u8 bond_id)
+{
+ struct hns_roce_die_info *die_info = xa_load(&roce_bond_xa, bus_num);
+
+ if (bond_id >= ROCE_BOND_NUM_MAX)
+ return -EINVAL;
+
+ if (!die_info)
+ return -ENODEV;
+
+ die_info->bond_id_mask &= ~BOND_ID(bond_id);
+ die_info->bgrps[bond_id] = NULL;
+ if (!die_info->bond_id_mask)
+ dealloc_die_info(die_info, bus_num);
+
+ return 0;
+}
+
+static void hns_roce_set_bond(struct hns_roce_bond_group *bond_grp)
+{
+ struct hns_roce_dev *hr_dev;
+ int ret;
+ int i;
+
+ for (i = ROCE_BOND_FUNC_MAX - 1; i >= 0; i--) {
+ if (bond_grp->slave_map & (1 << i))
+ hns_roce_slave_uninit(bond_grp, i);
+ }
+
+ mutex_lock(&bond_grp->bond_mutex);
+ bond_grp->bond_state = HNS_ROCE_BOND_IS_BONDED;
+ mutex_unlock(&bond_grp->bond_mutex);
+ bond_grp->main_hr_dev = NULL;
+
+ for (i = 0; i < ROCE_BOND_FUNC_MAX; i++) {
+ if (bond_grp->slave_map & (1 << i)) {
+ hr_dev = hns_roce_slave_init(bond_grp, i, false);
+ if (hr_dev) {
+ bond_grp->main_hr_dev = hr_dev;
+ break;
+ }
+ }
+ }
+
+ if (!bond_grp->main_hr_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ hns_roce_bond_get_active_slave(bond_grp);
+
+ ret = hns_roce_cmd_bond(bond_grp, HNS_ROCE_SET_BOND);
+
+out:
+ if (ret) {
+ BOND_ERR_LOG("failed to set RoCE bond, ret = %d.\n", ret);
+ hns_roce_cleanup_bond(bond_grp);
+ } else {
+ ibdev_info(&bond_grp->main_hr_dev->ib_dev,
+ "RoCE set bond finished!\n");
+ }
+}
+
+static void hns_roce_clear_bond(struct hns_roce_bond_group *bond_grp)
+{
+ u8 main_func_idx = PCI_FUNC(bond_grp->main_hr_dev->pci_dev->devfn);
+ struct hns_roce_dev *hr_dev;
+ u8 i;
+
+ if (bond_grp->bond_state == HNS_ROCE_BOND_NOT_BONDED)
+ goto out;
+
+ bond_grp->bond_state = HNS_ROCE_BOND_NOT_BONDED;
+ bond_grp->main_hr_dev = NULL;
+
+ hns_roce_slave_uninit(bond_grp, main_func_idx);
+
+ for (i = 0; i < ROCE_BOND_FUNC_MAX; i++) {
+ hr_dev = hns_roce_slave_init(bond_grp, i, false);
+ if (hr_dev)
+ bond_grp->main_hr_dev = hr_dev;
+ }
+
+out:
+ hns_roce_cleanup_bond(bond_grp);
+}
+
+static void hns_roce_slave_changestate(struct hns_roce_bond_group *bond_grp)
+{
+ int ret;
+
+ hns_roce_bond_get_active_slave(bond_grp);
+
+ ret = hns_roce_cmd_bond(bond_grp, HNS_ROCE_CHANGE_BOND);
+
+ mutex_lock(&bond_grp->bond_mutex);
+ if (bond_grp->bond_state == HNS_ROCE_BOND_SLAVE_CHANGESTATE)
+ bond_grp->bond_state = HNS_ROCE_BOND_IS_BONDED;
+ mutex_unlock(&bond_grp->bond_mutex);
+
+ if (ret)
+ ibdev_err(&bond_grp->main_hr_dev->ib_dev,
+ "failed to change RoCE bond slave state, ret = %d.\n",
+ ret);
+ else
+ ibdev_info(&bond_grp->main_hr_dev->ib_dev,
+ "RoCE slave changestate finished!\n");
+}
+
+static void hns_roce_slave_change_num(struct hns_roce_bond_group *bond_grp)
+{
+ int ret;
+ u8 i;
+
+ for (i = 0; i < ROCE_BOND_FUNC_MAX; i++) {
+ if (bond_grp->slave_map & (1U << i)) {
+ if (i == PCI_FUNC(bond_grp->main_hr_dev->pci_dev->devfn))
+ continue;
+ hns_roce_slave_uninit(bond_grp, i);
+ } else {
+ hns_roce_slave_init(bond_grp, i, true);
+ if (!bond_grp->main_hr_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+ bond_grp->bond_func_info[i].net_dev = NULL;
+ bond_grp->bond_func_info[i].handle = NULL;
+ }
+ }
+
+ hns_roce_bond_get_active_slave(bond_grp);
+
+ ret = hns_roce_cmd_bond(bond_grp, HNS_ROCE_CHANGE_BOND);
+
+out:
+ if (ret) {
+ BOND_ERR_LOG("failed to change RoCE bond slave num, ret = %d.\n", ret);
+ hns_roce_cleanup_bond(bond_grp);
+ } else {
+ mutex_lock(&bond_grp->bond_mutex);
+ if (bond_grp->bond_state == HNS_ROCE_BOND_SLAVE_CHANGE_NUM)
+ bond_grp->bond_state = HNS_ROCE_BOND_IS_BONDED;
+ mutex_unlock(&bond_grp->bond_mutex);
+ ibdev_info(&bond_grp->main_hr_dev->ib_dev,
+ "RoCE slave change num finished!\n");
+ }
+}
+
+static void hns_roce_bond_info_update_nolock(struct hns_roce_bond_group *bond_grp,
+ struct net_device *upper_dev)
+{
+ struct hns_roce_v2_priv *priv;
+ struct hns_roce_dev *hr_dev;
+ struct net_device *net_dev;
+ int func_idx;
+
+ bond_grp->slave_map = 0;
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper_dev, net_dev) {
+ func_idx = get_netdev_bond_slave_id(net_dev, bond_grp);
+ if (func_idx < 0) {
+ hr_dev = hns_roce_get_hrdev_by_netdev(net_dev);
+ if (!hr_dev)
+ continue;
+ func_idx = PCI_FUNC(hr_dev->pci_dev->devfn);
+ if (!bond_grp->bond_func_info[func_idx].net_dev) {
+ priv = hr_dev->priv;
+ bond_grp->bond_func_info[func_idx].net_dev =
+ net_dev;
+ bond_grp->bond_func_info[func_idx].handle =
+ priv->handle;
+ }
+ ib_device_put(&hr_dev->ib_dev);
+ }
+
+ bond_grp->slave_map |= (1 << func_idx);
+ }
+ rcu_read_unlock();
+}
+
+static bool is_dev_bond_supported(struct hns_roce_bond_group *bond_grp,
+ struct net_device *net_dev)
+{
+ struct hns_roce_dev *hr_dev = hns_roce_get_hrdev_by_netdev(net_dev);
+ bool ret = true;
+
+ if (!hr_dev) {
+ if (bond_grp &&
+ get_netdev_bond_slave_id(net_dev, bond_grp) >= 0)
+ return true;
+ else
+ return false;
+ }
+
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND)) {
+ ret = false;
+ goto out;
+ }
+
+ if (hr_dev->is_vf || pci_num_vf(hr_dev->pci_dev) > 0) {
+ ret = false;
+ goto out;
+ }
+
+ if (bond_grp->bus_num != get_hr_bus_num(hr_dev))
+ ret = false;
+
+out:
+ ib_device_put(&hr_dev->ib_dev);
+ return ret;
+}
+
+static bool check_slave_support(struct hns_roce_bond_group *bond_grp,
+ struct net_device *upper_dev)
+{
+ struct net_device *net_dev;
+ u8 slave_num = 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper_dev, net_dev) {
+ if (is_dev_bond_supported(bond_grp, net_dev)) {
+ slave_num++;
+ continue;
+ }
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ return (slave_num > 1 && slave_num <= ROCE_BOND_FUNC_MAX);
+}
+
+static void hns_roce_bond_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct hns_roce_bond_group *bond_grp =
+ container_of(delayed_work, struct hns_roce_bond_group,
+ bond_work);
+ enum hns_roce_bond_state bond_state;
+ bool bond_ready;
+
+ mutex_lock(&bond_grp->bond_mutex);
+ bond_ready = check_slave_support(bond_grp, bond_grp->upper_dev);
+ hns_roce_bond_info_update_nolock(bond_grp, bond_grp->upper_dev);
+ bond_state = bond_grp->bond_state;
+ bond_grp->bond_ready = bond_ready;
+ mutex_unlock(&bond_grp->bond_mutex);
+
+ ibdev_info(&bond_grp->main_hr_dev->ib_dev,
+ "bond work: bond_ready - %d, bond_state - %d.\n",
+ bond_ready, bond_state);
+
+ if (!bond_ready) {
+ hns_roce_clear_bond(bond_grp);
+ return;
+ }
+
+ switch (bond_state) {
+ case HNS_ROCE_BOND_NOT_BONDED:
+ hns_roce_set_bond(bond_grp);
+ /* In set_bond flow, we don't need to set bond netdev here as
+ * it has been done when bond_grp->main_hr_dev is registered.
+ */
+ return;
+ case HNS_ROCE_BOND_SLAVE_CHANGESTATE:
+ hns_roce_slave_changestate(bond_grp);
+ break;
+ case HNS_ROCE_BOND_SLAVE_CHANGE_NUM:
+ hns_roce_slave_change_num(bond_grp);
+ break;
+ default:
+ return;
+ }
+ hns_roce_set_bond_netdev(bond_grp, bond_grp->main_hr_dev);
+}
+
+static void hns_roce_attach_bond_grp(struct hns_roce_bond_group *bond_grp,
+ struct hns_roce_dev *hr_dev,
+ struct net_device *upper_dev)
+{
+ bond_grp->upper_dev = upper_dev;
+ bond_grp->main_hr_dev = hr_dev;
+ bond_grp->bond_state = HNS_ROCE_BOND_NOT_BONDED;
+ bond_grp->bond_ready = false;
+}
+
+static void hns_roce_detach_bond_grp(struct hns_roce_bond_group *bond_grp)
+{
+ mutex_lock(&bond_grp->bond_mutex);
+
+ cancel_delayed_work(&bond_grp->bond_work);
+ bond_grp->upper_dev = NULL;
+ bond_grp->main_hr_dev = NULL;
+ bond_grp->bond_ready = false;
+ bond_grp->bond_state = HNS_ROCE_BOND_NOT_ATTACHED;
+ bond_grp->slave_map = 0;
+ memset(bond_grp->bond_func_info, 0, sizeof(bond_grp->bond_func_info));
+
+ mutex_unlock(&bond_grp->bond_mutex);
+}
+
+void hns_roce_cleanup_bond(struct hns_roce_bond_group *bond_grp)
+{
+ int ret;
+
+ ret = bond_grp->main_hr_dev ?
+ hns_roce_cmd_bond(bond_grp, HNS_ROCE_CLEAR_BOND) : -EIO;
+ if (ret)
+ BOND_ERR_LOG("failed to clear RoCE bond, ret = %d.\n", ret);
+ else
+ ibdev_info(&bond_grp->main_hr_dev->ib_dev,
+ "RoCE clear bond finished!\n");
+
+ hns_roce_detach_bond_grp(bond_grp);
+}
+
+static bool lowerstate_event_filter(struct hns_roce_bond_group *bond_grp,
+ struct net_device *net_dev)
+{
+ struct hns_roce_bond_group *bond_grp_tmp;
+
+ bond_grp_tmp = hns_roce_get_bond_grp(net_dev, bond_grp->bus_num);
+ return bond_grp_tmp == bond_grp;
+}
+
+static void lowerstate_event_setting(struct hns_roce_bond_group *bond_grp,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ mutex_lock(&bond_grp->bond_mutex);
+
+ if (bond_grp->bond_ready &&
+ bond_grp->bond_state == HNS_ROCE_BOND_IS_BONDED)
+ bond_grp->bond_state = HNS_ROCE_BOND_SLAVE_CHANGESTATE;
+
+ mutex_unlock(&bond_grp->bond_mutex);
+}
+
+static bool hns_roce_bond_lowerstate_event(struct hns_roce_bond_group *bond_grp,
+ struct netdev_notifier_changelowerstate_info *info)
+{
+ struct net_device *net_dev =
+ netdev_notifier_info_to_dev((struct netdev_notifier_info *)info);
+
+ if (!netif_is_lag_port(net_dev))
+ return false;
+
+ if (!lowerstate_event_filter(bond_grp, net_dev))
+ return false;
+
+ lowerstate_event_setting(bond_grp, info);
+
+ return true;
+}
+
+static bool is_bond_setting_supported(struct netdev_lag_upper_info *bond_info)
+{
+ if (!bond_info)
+ return false;
+
+ if (bond_info->tx_type != NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
+ bond_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return false;
+
+ if (bond_info->tx_type == NETDEV_LAG_TX_TYPE_HASH &&
+ bond_info->hash_type > NETDEV_LAG_HASH_L23)
+ return false;
+
+ return true;
+}
+
+static void upper_event_setting(struct hns_roce_bond_group *bond_grp,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct netdev_lag_upper_info *bond_upper_info = NULL;
+ bool slave_inc = info->linking;
+
+ if (slave_inc)
+ bond_upper_info = info->upper_info;
+
+ if (bond_upper_info) {
+ bond_grp->tx_type = bond_upper_info->tx_type;
+ bond_grp->hash_type = bond_upper_info->hash_type;
+ }
+}
+
+static bool check_unlinking_bond_support(struct hns_roce_bond_group *bond_grp)
+{
+ struct net_device *net_dev;
+ u8 slave_num = 0;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(bond_grp->upper_dev, net_dev) {
+ if (get_netdev_bond_slave_id(net_dev, bond_grp) >= 0)
+ slave_num++;
+ }
+ rcu_read_unlock();
+
+ return (slave_num > 1);
+}
+
+static bool check_linking_bond_support(struct netdev_lag_upper_info *bond_info,
+ struct hns_roce_bond_group *bond_grp,
+ struct net_device *upper_dev)
+{
+ if (!is_bond_setting_supported(bond_info))
+ return false;
+
+ return check_slave_support(bond_grp, upper_dev);
+}
+
+static enum bond_support_type
+ check_bond_support(struct hns_roce_bond_group *bond_grp,
+ struct net_device *upper_dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ bool bond_grp_exist = false;
+ bool support;
+
+ if (upper_dev == bond_grp->upper_dev)
+ bond_grp_exist = true;
+
+ if (!info->linking && !bond_grp_exist)
+ return BOND_NOT_SUPPORT;
+
+ if (info->linking)
+ support = check_linking_bond_support(info->upper_info, bond_grp,
+ upper_dev);
+ else
+ support = check_unlinking_bond_support(bond_grp);
+
+ if (support)
+ return BOND_SUPPORT;
+
+ return bond_grp_exist ? BOND_EXISTING_NOT_SUPPORT : BOND_NOT_SUPPORT;
+}
+
+static bool upper_event_filter(struct netdev_notifier_changeupper_info *info,
+ struct hns_roce_bond_group *bond_grp,
+ struct net_device *net_dev)
+{
+ struct net_device *upper_dev = info->upper_dev;
+ struct hns_roce_bond_group *bond_grp_tmp;
+ struct hns_roce_dev *hr_dev;
+ bool ret = true;
+ u8 bus_num;
+
+ if (!info->linking ||
+ bond_grp->bond_state != HNS_ROCE_BOND_NOT_ATTACHED)
+ return bond_grp->upper_dev == upper_dev;
+
+ hr_dev = hns_roce_get_hrdev_by_netdev(net_dev);
+ if (!hr_dev)
+ return false;
+
+ bus_num = get_hr_bus_num(hr_dev);
+ if (bond_grp->bus_num != bus_num) {
+ ret = false;
+ goto out;
+ }
+
+ bond_grp_tmp = hns_roce_get_bond_grp(net_dev, bus_num);
+ if (bond_grp_tmp && bond_grp_tmp != bond_grp)
+ ret = false;
+out:
+ ib_device_put(&hr_dev->ib_dev);
+ return ret;
+}
+
+static bool hns_roce_bond_upper_event(struct hns_roce_bond_group *bond_grp,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *net_dev =
+ netdev_notifier_info_to_dev((struct netdev_notifier_info *)info);
+ struct net_device *upper_dev = info->upper_dev;
+ enum bond_support_type support = BOND_SUPPORT;
+ struct hns_roce_dev *hr_dev;
+ int slave_id;
+
+ if (!upper_dev || !netif_is_lag_master(upper_dev))
+ return false;
+
+ if (!upper_event_filter(info, bond_grp, net_dev))
+ return false;
+
+ mutex_lock(&bond_grp->bond_mutex);
+ support = check_bond_support(bond_grp, upper_dev, info);
+ if (support == BOND_NOT_SUPPORT) {
+ mutex_unlock(&bond_grp->bond_mutex);
+ return false;
+ }
+
+ if (bond_grp->bond_state == HNS_ROCE_BOND_NOT_ATTACHED) {
+ hr_dev = hns_roce_get_hrdev_by_netdev(net_dev);
+ if (!hr_dev) {
+ mutex_unlock(&bond_grp->bond_mutex);
+ return false;
+ }
+ hns_roce_attach_bond_grp(bond_grp, hr_dev, upper_dev);
+ ib_device_put(&hr_dev->ib_dev);
+ }
+
+ /* In the case of netdev being unregistered, the roce
+ * instance shouldn't be inited.
+ */
+ if (net_dev->reg_state >= NETREG_UNREGISTERING) {
+ slave_id = get_netdev_bond_slave_id(net_dev, bond_grp);
+ if (slave_id >= 0) {
+ bond_grp->bond_func_info[slave_id].net_dev = NULL;
+ bond_grp->bond_func_info[slave_id].handle = NULL;
+ }
+ }
+
+ if (support == BOND_SUPPORT) {
+ bond_grp->bond_ready = true;
+ if (bond_grp->bond_state != HNS_ROCE_BOND_NOT_BONDED)
+ bond_grp->bond_state = HNS_ROCE_BOND_SLAVE_CHANGE_NUM;
+ }
+ mutex_unlock(&bond_grp->bond_mutex);
+ if (support == BOND_SUPPORT)
+ upper_event_setting(bond_grp, info);
+
+ return true;
+}
+
+static int hns_roce_bond_event(struct notifier_block *self,
+ unsigned long event, void *ptr)
+{
+ struct hns_roce_bond_group *bond_grp =
+ container_of(self, struct hns_roce_bond_group, bond_nb);
+ bool changed = false;
+
+ if (event == NETDEV_CHANGEUPPER)
+ changed = hns_roce_bond_upper_event(bond_grp, ptr);
+ if (event == NETDEV_CHANGELOWERSTATE)
+ changed = hns_roce_bond_lowerstate_event(bond_grp, ptr);
+
+ if (changed)
+ schedule_delayed_work(&bond_grp->bond_work, HZ);
+
+ return NOTIFY_DONE;
+}
+
+int hns_roce_alloc_bond_grp(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_bond_group *bgrps[ROCE_BOND_NUM_MAX];
+ struct hns_roce_bond_group *bond_grp;
+ u8 bus_num = get_hr_bus_num(hr_dev);
+ int ret;
+ int i;
+
+ if (xa_load(&roce_bond_xa, bus_num))
+ return 0;
+
+ for (i = 0; i < ROCE_BOND_NUM_MAX; i++) {
+ bond_grp = kvzalloc(sizeof(*bond_grp), GFP_KERNEL);
+ if (!bond_grp) {
+ ret = -ENOMEM;
+ goto mem_err;
+ }
+
+ mutex_init(&bond_grp->bond_mutex);
+ INIT_DELAYED_WORK(&bond_grp->bond_work, hns_roce_bond_work);
+
+ bond_grp->bond_ready = false;
+ bond_grp->bond_state = HNS_ROCE_BOND_NOT_ATTACHED;
+ bond_grp->bus_num = bus_num;
+
+ ret = alloc_bond_id(bond_grp);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to alloc bond ID, ret = %d.\n", ret);
+ goto alloc_id_err;
+ }
+
+ bond_grp->bond_nb.notifier_call = hns_roce_bond_event;
+ ret = register_netdevice_notifier(&bond_grp->bond_nb);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev,
+ "failed to register bond nb, ret = %d.\n", ret);
+ goto register_nb_err;
+ }
+ bgrps[i] = bond_grp;
+ }
+
+ return 0;
+
+register_nb_err:
+ remove_bond_id(bond_grp->bus_num, bond_grp->bond_id);
+alloc_id_err:
+ mutex_destroy(&bond_grp->bond_mutex);
+ kvfree(bond_grp);
+mem_err:
+ for (i--; i >= 0; i--) {
+ unregister_netdevice_notifier(&bgrps[i]->bond_nb);
+ cancel_delayed_work_sync(&bgrps[i]->bond_work);
+ remove_bond_id(bgrps[i]->bus_num, bgrps[i]->bond_id);
+ mutex_destroy(&bgrps[i]->bond_mutex);
+ kvfree(bgrps[i]);
+ }
+ return ret;
+}
+
+void hns_roce_dealloc_bond_grp(void)
+{
+ struct hns_roce_bond_group *bond_grp;
+ struct hns_roce_die_info *die_info;
+ unsigned long id;
+ int i;
+
+ xa_for_each(&roce_bond_xa, id, die_info) {
+ for (i = 0; i < ROCE_BOND_NUM_MAX; i++) {
+ bond_grp = die_info->bgrps[i];
+ if (!bond_grp)
+ continue;
+ unregister_netdevice_notifier(&bond_grp->bond_nb);
+ cancel_delayed_work_sync(&bond_grp->bond_work);
+ remove_bond_id(bond_grp->bus_num, bond_grp->bond_id);
+ mutex_destroy(&bond_grp->bond_mutex);
+ kvfree(bond_grp);
+ }
+ }
+}
+
+int hns_roce_bond_init(struct hns_roce_dev *hr_dev)
+{
+ struct net_device *net_dev = get_hr_netdev(hr_dev, 0);
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_bond_group *bond_grp;
+ u8 bus_num = get_hr_bus_num(hr_dev);
+ int ret;
+
+ bond_grp = hns_roce_get_bond_grp(net_dev, bus_num);
+
+ if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT) {
+ ret = hns_roce_recover_bond(bond_grp, hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev,
+ "failed to recover RoCE bond, ret = %d.\n", ret);
+ return ret;
+ }
+ }
+
+ return hns_roce_set_bond_netdev(bond_grp, hr_dev);
+}
+
+void hns_roce_bond_suspend(struct hnae3_handle *handle)
+{
+ u8 bus_num = handle->pdev->bus->number;
+ struct hns_roce_bond_group *bond_grp;
+ struct hns_roce_die_info *die_info;
+ int i;
+
+ die_info = xa_load(&roce_bond_xa, bus_num);
+ if (!die_info)
+ return;
+
+ mutex_lock(&die_info->die_mutex);
+
+ /*
+ * Avoid duplicated processing when calling this function
+ * multiple times.
+ */
+ if (die_info->suspend_cnt)
+ goto out;
+
+ for (i = 0; i < ROCE_BOND_NUM_MAX; i++) {
+ bond_grp = die_info->bgrps[i];
+ if (!bond_grp)
+ continue;
+ unregister_netdevice_notifier(&bond_grp->bond_nb);
+ cancel_delayed_work_sync(&bond_grp->bond_work);
+ }
+
+out:
+ die_info->suspend_cnt++;
+ mutex_unlock(&die_info->die_mutex);
+}
+
+void hns_roce_bond_resume(struct hnae3_handle *handle)
+{
+ u8 bus_num = handle->pdev->bus->number;
+ struct hns_roce_bond_group *bond_grp;
+ struct hns_roce_die_info *die_info;
+ int i, ret;
+
+ die_info = xa_load(&roce_bond_xa, bus_num);
+ if (!die_info)
+ return;
+
+ mutex_lock(&die_info->die_mutex);
+
+ die_info->suspend_cnt--;
+ if (die_info->suspend_cnt)
+ goto out;
+
+ for (i = 0; i < ROCE_BOND_NUM_MAX; i++) {
+ bond_grp = die_info->bgrps[i];
+ if (!bond_grp)
+ continue;
+ ret = register_netdevice_notifier(&bond_grp->bond_nb);
+ if (ret)
+ dev_err(&handle->pdev->dev,
+ "failed to resume bond notifier(bus_num = %u, id = %u), ret = %d.\n",
+ bus_num, bond_grp->bond_id, ret);
+ }
+
+out:
+ mutex_unlock(&die_info->die_mutex);
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_bond.h b/drivers/infiniband/hw/hns/hns_roce_bond.h
new file mode 100644
index 000000000000..98c295d78ca1
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_bond.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (c) 2025 Hisilicon Limited.
+ */
+
+#ifndef _HNS_ROCE_BOND_H
+#define _HNS_ROCE_BOND_H
+
+#include <linux/netdevice.h>
+#include <net/bonding.h>
+
+#define ROCE_BOND_FUNC_MAX 4
+#define ROCE_BOND_NUM_MAX 2
+
+#define BOND_ID(id) BIT(id)
+
+#define BOND_ERR_LOG(fmt, ...) \
+ pr_err("HNS RoCE Bonding: " fmt, ##__VA_ARGS__)
+
+enum {
+ BOND_MODE_1,
+ BOND_MODE_2_4,
+};
+
+enum hns_roce_bond_hashtype {
+ BOND_HASH_L2,
+ BOND_HASH_L34,
+ BOND_HASH_L23,
+};
+
+enum bond_support_type {
+ BOND_NOT_SUPPORT,
+ /*
+ * bond_grp already exists, but in the current
+ * conditions it's no longer supported
+ */
+ BOND_EXISTING_NOT_SUPPORT,
+ BOND_SUPPORT,
+};
+
+enum hns_roce_bond_state {
+ HNS_ROCE_BOND_NOT_ATTACHED,
+ HNS_ROCE_BOND_NOT_BONDED,
+ HNS_ROCE_BOND_IS_BONDED,
+ HNS_ROCE_BOND_SLAVE_CHANGE_NUM,
+ HNS_ROCE_BOND_SLAVE_CHANGESTATE,
+};
+
+enum hns_roce_bond_cmd_type {
+ HNS_ROCE_SET_BOND,
+ HNS_ROCE_CHANGE_BOND,
+ HNS_ROCE_CLEAR_BOND,
+};
+
+struct hns_roce_func_info {
+ struct net_device *net_dev;
+ struct hnae3_handle *handle;
+};
+
+struct hns_roce_bond_group {
+ struct net_device *upper_dev;
+ struct hns_roce_dev *main_hr_dev;
+ u8 active_slave_num;
+ u32 slave_map;
+ u32 active_slave_map;
+ u8 bond_id;
+ u8 bus_num;
+ struct hns_roce_func_info bond_func_info[ROCE_BOND_FUNC_MAX];
+ bool bond_ready;
+ enum hns_roce_bond_state bond_state;
+ enum netdev_lag_tx_type tx_type;
+ enum netdev_lag_hash hash_type;
+ struct mutex bond_mutex;
+ struct notifier_block bond_nb;
+ struct delayed_work bond_work;
+};
+
+struct hns_roce_die_info {
+ u8 bond_id_mask;
+ struct hns_roce_bond_group *bgrps[ROCE_BOND_NUM_MAX];
+ struct mutex die_mutex;
+ u8 suspend_cnt;
+};
+
+struct hns_roce_bond_group *hns_roce_get_bond_grp(struct net_device *net_dev,
+ u8 bus_num);
+int hns_roce_alloc_bond_grp(struct hns_roce_dev *hr_dev);
+void hns_roce_dealloc_bond_grp(void);
+void hns_roce_cleanup_bond(struct hns_roce_bond_group *bond_grp);
+bool hns_roce_bond_is_active(struct hns_roce_dev *hr_dev);
+int hns_roce_bond_init(struct hns_roce_dev *hr_dev);
+void hns_roce_bond_suspend(struct hnae3_handle *handle);
+void hns_roce_bond_resume(struct hnae3_handle *handle);
+
+#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 3a5c93c9fb3e..6aa82fe9dd3d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/pci.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
@@ -37,6 +38,43 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
+void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+
+ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+ return;
+
+ mutex_lock(&cq_table->bank_mutex);
+ cq_table->ctx_num[uctx->cq_bank_id]--;
+ mutex_unlock(&cq_table->bank_mutex);
+}
+
+void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ u32 least_load = cq_table->ctx_num[0];
+ u8 bankid = 0;
+ u8 i;
+
+ if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+ return;
+
+ mutex_lock(&cq_table->bank_mutex);
+ for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
+ if (cq_table->ctx_num[i] < least_load) {
+ least_load = cq_table->ctx_num[i];
+ bankid = i;
+ }
+ }
+ cq_table->ctx_num[bankid]++;
+ mutex_unlock(&cq_table->bank_mutex);
+
+ uctx->cq_bank_id = bankid;
+}
+
static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
{
u32 least_load = bank[0].inuse;
@@ -55,7 +93,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
return bankid;
}
-static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+static u8 select_cq_bankid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_bank *bank, struct ib_udata *udata)
+{
+ struct hns_roce_ucontext *uctx = udata ?
+ rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
+ ibucontext) : NULL;
+
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ return uctx ? uctx->cq_bank_id : 0;
+
+ return get_least_load_bankid_for_cq(bank);
+}
+
+static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
+ struct ib_udata *udata)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct hns_roce_bank *bank;
@@ -63,7 +115,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
int id;
mutex_lock(&cq_table->bank_mutex);
- bankid = get_least_load_bankid_for_cq(cq_table->bank);
+ bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
bank = &cq_table->bank[bankid];
id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
@@ -396,7 +448,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cq_buf;
}
- ret = alloc_cqn(hr_dev, hr_cq);
+ ret = alloc_cqn(hr_dev, hr_cq, udata);
if (ret) {
ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
goto err_cq_db;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 78ee04a48a74..318f18cf37aa 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -33,6 +33,7 @@
#ifndef _HNS_ROCE_DEVICE_H
#define _HNS_ROCE_DEVICE_H
+#include <linux/pci.h>
#include <rdma/ib_verbs.h>
#include <rdma/hns-abi.h>
#include "hns_roce_debugfs.h"
@@ -153,6 +154,7 @@ enum {
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19),
+ HNS_ROCE_CAP_FLAG_BOND = BIT(21),
HNS_ROCE_CAP_FLAG_SRQ_RECORD_DB = BIT(22),
};
@@ -177,6 +179,7 @@ enum hns_roce_instance_state {
HNS_ROCE_STATE_INIT,
HNS_ROCE_STATE_INITED,
HNS_ROCE_STATE_UNINIT,
+ HNS_ROCE_STATE_BOND_UNINIT,
};
enum {
@@ -217,6 +220,7 @@ struct hns_roce_ucontext {
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
u32 config;
+ u8 cq_bank_id;
};
struct hns_roce_pd {
@@ -495,6 +499,7 @@ struct hns_roce_cq_table {
struct hns_roce_hem_table table;
struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
struct mutex bank_mutex;
+ u32 ctx_num[HNS_ROCE_CQ_BANK_NUM];
};
struct hns_roce_srq_table {
@@ -1165,6 +1170,17 @@ static inline u8 get_tclass(const struct ib_global_route *grh)
grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
}
+static inline struct net_device *get_hr_netdev(struct hns_roce_dev *hr_dev,
+ u8 port)
+{
+ return hr_dev->iboe.netdevs[port];
+}
+
+static inline u8 get_hr_bus_num(struct hns_roce_dev *hr_dev)
+{
+ return hr_dev->pci_dev->bus->number;
+}
+
void hns_roce_init_uar_table(struct hns_roce_dev *dev);
int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
@@ -1291,7 +1307,7 @@ void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
int hns_roce_init(struct hns_roce_dev *hr_dev);
-void hns_roce_exit(struct hns_roce_dev *hr_dev);
+void hns_roce_exit(struct hns_roce_dev *hr_dev, bool bond_cleanup);
int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp);
@@ -1305,5 +1321,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,
enum hns_roce_mmap_type mmap_type);
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
+void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
+void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index f82bdd46a917..2d6ae89e525b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -43,11 +43,13 @@
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
+#include "hclge_main.h"
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+#include "hns_roce_bond.h"
#define CREATE_TRACE_POINTS
#include "hns_roce_trace.h"
@@ -165,6 +167,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
hr_reg_clear(fseg, FRMR_BLK_MODE);
+ hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
+ hr_reg_clear(fseg, FRMR_ZBVA);
}
static void set_atomic_seg(const struct ib_send_wr *wr,
@@ -339,9 +343,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int j = 0;
int i;
- hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
- (*sge_ind) & (qp->sge.sge_cnt - 1));
-
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
!!(wr->send_flags & IB_SEND_INLINE));
if (wr->send_flags & IB_SEND_INLINE)
@@ -586,6 +587,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
+ curr_idx & (qp->sge.sge_cnt - 1));
+
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
if (msg_len != ATOMIC_WR_LEN)
@@ -734,6 +738,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
+ /* RC and UD share the same DirectWQE field layout */
+ ((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
+
/* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
@@ -1429,6 +1436,79 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
return ret;
}
+static enum hns_roce_opcode_type
+ get_bond_opcode(enum hns_roce_bond_cmd_type bond_type)
+{
+ switch (bond_type) {
+ case HNS_ROCE_SET_BOND:
+ return HNS_ROCE_OPC_SET_BOND_INFO;
+ case HNS_ROCE_CHANGE_BOND:
+ return HNS_ROCE_OPC_CHANGE_ACTIVE_PORT;
+ case HNS_ROCE_CLEAR_BOND:
+ return HNS_ROCE_OPC_CLEAR_BOND_INFO;
+ default:
+ WARN(true, "Invalid bond type %d!\n", bond_type);
+ return HNS_ROCE_OPC_SET_BOND_INFO;
+ }
+}
+
+static enum hns_roce_bond_hashtype
+ get_bond_hashtype(enum netdev_lag_hash netdev_hashtype)
+{
+ switch (netdev_hashtype) {
+ case NETDEV_LAG_HASH_L2:
+ return BOND_HASH_L2;
+ case NETDEV_LAG_HASH_L34:
+ return BOND_HASH_L34;
+ case NETDEV_LAG_HASH_L23:
+ return BOND_HASH_L23;
+ default:
+ WARN(true, "Invalid hash type %d!\n", netdev_hashtype);
+ return BOND_HASH_L2;
+ }
+}
+
+int hns_roce_cmd_bond(struct hns_roce_bond_group *bond_grp,
+ enum hns_roce_bond_cmd_type bond_type)
+{
+ enum hns_roce_opcode_type opcode = get_bond_opcode(bond_type);
+ struct hns_roce_bond_info *slave_info;
+ struct hns_roce_cmq_desc desc = {};
+ int ret;
+
+ slave_info = (struct hns_roce_bond_info *)desc.data;
+ hns_roce_cmq_setup_basic_desc(&desc, opcode, false);
+
+ slave_info->bond_id = cpu_to_le32(bond_grp->bond_id);
+ if (bond_type == HNS_ROCE_CLEAR_BOND)
+ goto out;
+
+ if (bond_grp->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+ slave_info->bond_mode = cpu_to_le32(BOND_MODE_1);
+ if (bond_grp->active_slave_num != 1)
+ ibdev_warn(&bond_grp->main_hr_dev->ib_dev,
+ "active slave cnt(%u) in Mode 1 is invalid.\n",
+ bond_grp->active_slave_num);
+ } else {
+ slave_info->bond_mode = cpu_to_le32(BOND_MODE_2_4);
+ slave_info->hash_policy =
+ cpu_to_le32(get_bond_hashtype(bond_grp->hash_type));
+ }
+
+ slave_info->active_slave_cnt = cpu_to_le32(bond_grp->active_slave_num);
+ slave_info->active_slave_mask = cpu_to_le32(bond_grp->active_slave_map);
+ slave_info->slave_mask = cpu_to_le32(bond_grp->slave_map);
+
+out:
+ ret = hns_roce_cmq_send(bond_grp->main_hr_dev, &desc, 1);
+ if (ret)
+ ibdev_err(&bond_grp->main_hr_dev->ib_dev,
+ "cmq bond type(%d) failed, ret = %d.\n",
+ bond_type, ret);
+
+ return ret;
+}
+
static int config_hem_ba_to_hw(struct hns_roce_dev *hr_dev,
dma_addr_t base_addr, u8 cmd, unsigned long tag)
{
@@ -2270,6 +2350,9 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) <<
HNS_ROCE_CAP_FLAGS_EX_SHIFT;
+ if (hr_dev->is_vf)
+ caps->flags &= ~HNS_ROCE_CAP_FLAG_BOND;
+
caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS);
caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID);
caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH);
@@ -7048,7 +7131,6 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_roce_init;
}
-
handle->priv = hr_dev;
return 0;
@@ -7063,7 +7145,7 @@ error_failed_kzalloc:
}
static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
- bool reset)
+ bool reset, bool bond_cleanup)
{
struct hns_roce_dev *hr_dev = handle->priv;
@@ -7075,7 +7157,7 @@ static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
hns_roce_handle_device_err(hr_dev);
- hns_roce_exit(hr_dev);
+ hns_roce_exit(hr_dev, bond_cleanup);
kfree(hr_dev->priv);
ib_dealloc_device(&hr_dev->ib_dev);
}
@@ -7126,12 +7208,51 @@ reset_chk_err:
static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
bool reset)
{
+ /* Suspend bond to avoid concurrency */
+ hns_roce_bond_suspend(handle);
+
if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
- return;
+ goto out;
handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
- __hns_roce_hw_v2_uninit_instance(handle, reset);
+ __hns_roce_hw_v2_uninit_instance(handle, reset, true);
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
+
+out:
+ hns_roce_bond_resume(handle);
+}
+
+struct hns_roce_dev
+ *hns_roce_bond_init_client(struct hns_roce_bond_group *bond_grp,
+ int func_idx)
+{
+ struct hnae3_handle *handle;
+ int ret;
+
+ handle = bond_grp->bond_func_info[func_idx].handle;
+ if (!handle || !handle->client)
+ return NULL;
+
+ ret = hns_roce_hw_v2_init_instance(handle);
+ if (ret)
+ return NULL;
+
+ return handle->priv;
+}
+
+void hns_roce_bond_uninit_client(struct hns_roce_bond_group *bond_grp,
+ int func_idx)
+{
+ struct hnae3_handle *handle = bond_grp->bond_func_info[func_idx].handle;
+
+ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
+ return;
+
+ handle->rinfo.instance_state = HNS_ROCE_STATE_BOND_UNINIT;
+
+ __hns_roce_hw_v2_uninit_instance(handle, false, false);
handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
}
@@ -7140,6 +7261,9 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
{
struct hns_roce_dev *hr_dev;
+ /* Suspend bond to avoid concurrency */
+ hns_roce_bond_suspend(handle);
+
if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
return 0;
@@ -7170,6 +7294,7 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
&handle->rinfo.state)) {
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
+ hns_roce_bond_resume(handle);
return 0;
}
@@ -7189,6 +7314,7 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
dev_info(dev, "reset done, RoCE client reinit finished.\n");
}
+ hns_roce_bond_resume(handle);
return ret;
}
@@ -7200,7 +7326,7 @@ static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
- __hns_roce_hw_v2_uninit_instance(handle, false);
+ __hns_roce_hw_v2_uninit_instance(handle, false, false);
return 0;
}
@@ -7236,6 +7362,14 @@ static void hns_roce_hw_v2_link_status_change(struct hnae3_handle *handle,
if (linkup || !hr_dev)
return;
+ /* For bond device, the link status depends on the upper netdev,
+ * and the upper device's link status depends on all the slaves'
+ * netdev but not only one. So bond device cannot get a correct
+ * link status from this path.
+ */
+ if (hns_roce_get_bond_grp(netdev, get_hr_bus_num(hr_dev)))
+ return;
+
ib_dispatch_port_state_event(&hr_dev->ib_dev, netdev);
}
@@ -7260,6 +7394,7 @@ static int __init hns_roce_hw_v2_init(void)
static void __exit hns_roce_hw_v2_exit(void)
{
+ hns_roce_dealloc_bond_grp();
hnae3_unregister_client(&hns_roce_hw_v2_client);
hns_roce_cleanup_debugfs();
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index e64a04d6f85b..285fe0875fac 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -35,6 +35,7 @@
#include <linux/bitops.h>
#include "hnae3.h"
+#include "hns_roce_bond.h"
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_MTT_ENTRY_SZ 64
@@ -228,6 +229,9 @@ enum hns_roce_opcode_type {
HNS_ROCE_OPC_CFG_GMV_BT = 0x8510,
HNS_ROCE_QUERY_RAM_ECC = 0x8513,
HNS_SWITCH_PARAMETER_CFG = 0x1033,
+ HNS_ROCE_OPC_SET_BOND_INFO = 0x8601,
+ HNS_ROCE_OPC_CLEAR_BOND_INFO = 0x8602,
+ HNS_ROCE_OPC_CHANGE_ACTIVE_PORT = 0x8603,
};
#define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
@@ -1465,7 +1469,23 @@ struct hns_roce_sccc_clr_done {
__le32 rsv[5];
};
+struct hns_roce_bond_info {
+ __le32 bond_id;
+ __le32 bond_mode;
+ __le32 active_slave_cnt;
+ __le32 active_slave_mask;
+ __le32 slave_mask;
+ __le32 hash_policy;
+};
+
+struct hns_roce_dev
+ *hns_roce_bond_init_client(struct hns_roce_bond_group *bond_grp,
+ int func_idx);
+void hns_roce_bond_uninit_client(struct hns_roce_bond_group *bond_grp,
+ int func_idx);
int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+int hns_roce_cmd_bond(struct hns_roce_bond_group *bond_grp,
+ enum hns_roce_bond_cmd_type bond_type);
static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
void __iomem *dest)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index d50f36f8a110..2f4864ab7d4e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -32,7 +32,6 @@
*/
#include <linux/acpi.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_user_verbs.h>
@@ -41,6 +40,7 @@
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_hw_v2.h"
+#include "hns_roce_bond.h"
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
const u8 *addr)
@@ -89,30 +89,75 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
return ret;
}
-static int handle_en_event(struct hns_roce_dev *hr_dev, u32 port,
- unsigned long event)
+static int hns_roce_get_port_state(struct hns_roce_dev *hr_dev, u32 port_num,
+ enum ib_port_state *state)
{
+ struct hns_roce_bond_group *bond_grp;
+ u8 bus_num = get_hr_bus_num(hr_dev);
+ struct net_device *net_dev;
+
+ net_dev = ib_device_get_netdev(&hr_dev->ib_dev, port_num);
+ if (!net_dev)
+ return -ENODEV;
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) {
+ bond_grp = hns_roce_get_bond_grp(net_dev, bus_num);
+ if (bond_grp) {
+ *state = ib_get_curr_port_state(bond_grp->upper_dev);
+ goto out;
+ }
+ }
+
+ *state = ib_get_curr_port_state(net_dev);
+out:
+ dev_put(net_dev);
+ return 0;
+}
+
+static int handle_en_event(struct net_device *netdev,
+ struct hns_roce_dev *hr_dev,
+ u32 port, unsigned long event)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
struct device *dev = hr_dev->dev;
- struct net_device *netdev;
+ enum ib_port_state curr_state;
+ struct ib_event ibevent;
int ret = 0;
- netdev = hr_dev->iboe.netdevs[port];
if (!netdev) {
dev_err(dev, "can't find netdev on port(%u)!\n", port);
return -ENODEV;
}
switch (event) {
- case NETDEV_UP:
- case NETDEV_CHANGE:
case NETDEV_REGISTER:
case NETDEV_CHANGEADDR:
ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+ ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
+ if (ret)
+ return ret;
+ fallthrough;
case NETDEV_DOWN:
- /*
- * In v1 engine, only support all ports closed together.
- */
+ if (!netif_is_lag_master(netdev))
+ break;
+ curr_state = ib_get_curr_port_state(netdev);
+
+ write_lock_irq(&ibdev->cache_lock);
+ if (ibdev->port_data[port].cache.last_port_state == curr_state) {
+ write_unlock_irq(&ibdev->cache_lock);
+ return 0;
+ }
+ ibdev->port_data[port].cache.last_port_state = curr_state;
+ write_unlock_irq(&ibdev->cache_lock);
+
+ ibevent.event = (curr_state == IB_PORT_DOWN) ?
+ IB_EVENT_PORT_ERR : IB_EVENT_PORT_ACTIVE;
+ ibevent.device = ibdev;
+ ibevent.element.port_num = port + 1;
+ ib_dispatch_event(&ibevent);
break;
default:
dev_dbg(dev, "NETDEV event = 0x%x!\n", (u32)(event));
@@ -126,17 +171,25 @@ static int hns_roce_netdev_event(struct notifier_block *self,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct hns_roce_bond_group *bond_grp;
struct hns_roce_ib_iboe *iboe = NULL;
struct hns_roce_dev *hr_dev = NULL;
+ struct net_device *upper = NULL;
int ret;
u32 port;
hr_dev = container_of(self, struct hns_roce_dev, iboe.nb);
iboe = &hr_dev->iboe;
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) {
+ bond_grp = hns_roce_get_bond_grp(get_hr_netdev(hr_dev, 0),
+ get_hr_bus_num(hr_dev));
+ upper = bond_grp ? bond_grp->upper_dev : NULL;
+ }
for (port = 0; port < hr_dev->caps.num_ports; port++) {
- if (dev == iboe->netdevs[port]) {
- ret = handle_en_event(hr_dev, port, event);
+ if ((!upper && dev == iboe->netdevs[port]) ||
+ (upper && dev == upper)) {
+ ret = handle_en_event(dev, hr_dev, port, event);
if (ret)
return NOTIFY_DONE;
break;
@@ -148,12 +201,13 @@ static int hns_roce_netdev_event(struct notifier_block *self,
static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
{
+ struct net_device *net_dev;
int ret;
u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
- ret = hns_roce_set_mac(hr_dev, i,
- hr_dev->iboe.netdevs[i]->dev_addr);
+ net_dev = get_hr_netdev(hr_dev, i);
+ ret = hns_roce_set_mac(hr_dev, i, net_dev->dev_addr);
if (ret)
return ret;
}
@@ -221,9 +275,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
struct ib_port_attr *props)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
- struct device *dev = hr_dev->dev;
struct net_device *net_dev;
- unsigned long flags;
enum ib_mtu mtu;
u32 port;
int ret;
@@ -244,26 +296,26 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u32 port_num,
if (ret)
ibdev_warn(ib_dev, "failed to get speed, ret = %d.\n", ret);
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
-
- net_dev = hr_dev->iboe.netdevs[port];
+ net_dev = ib_device_get_netdev(ib_dev, port_num);
if (!net_dev) {
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
- dev_err(dev, "find netdev %u failed!\n", port);
+ ibdev_err(ib_dev, "find netdev %u failed!\n", port);
return -EINVAL;
}
mtu = iboe_get_mtu(net_dev->mtu);
props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
- props->state = netif_running(net_dev) && netif_carrier_ok(net_dev) ?
- IB_PORT_ACTIVE :
- IB_PORT_DOWN;
+
+ dev_put(net_dev);
+
+ ret = hns_roce_get_port_state(hr_dev, port_num, &props->state);
+ if (ret) {
+ ibdev_err(ib_dev, "failed to get port state.\n");
+ return ret;
+ }
+
props->phys_state = props->state == IB_PORT_ACTIVE ?
IB_PORT_PHYS_STATE_LINK_UP :
IB_PORT_PHYS_STATE_DISABLED;
-
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-
return 0;
}
@@ -425,6 +477,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
if (ret)
goto error_fail_copy_to_udata;
+ hns_roce_get_cq_bankid_for_uctx(context);
+
return 0;
error_fail_copy_to_udata:
@@ -447,6 +501,8 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+ hns_roce_put_cq_bankid_for_uctx(context);
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
mutex_destroy(&context->page_mutex);
@@ -613,9 +669,40 @@ static int hns_roce_get_hw_stats(struct ib_device *device,
return num_counters;
}
-static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
+static void
+ hns_roce_unregister_bond_cleanup(struct hns_roce_dev *hr_dev,
+ struct hns_roce_bond_group *bond_grp)
+{
+ struct net_device *net_dev;
+ int i;
+
+ /* To avoid the loss of other slave devices when main_hr_dev
+ * is unregistered, re-initialize the remaining slaves before
+ * the bond resources cleanup.
+ */
+ bond_grp->bond_state = HNS_ROCE_BOND_NOT_BONDED;
+ for (i = 0; i < ROCE_BOND_FUNC_MAX; i++) {
+ net_dev = bond_grp->bond_func_info[i].net_dev;
+ if (net_dev && net_dev != get_hr_netdev(hr_dev, 0))
+ hns_roce_bond_init_client(bond_grp, i);
+ }
+
+ hns_roce_cleanup_bond(bond_grp);
+}
+
+static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev,
+ bool bond_cleanup)
{
+ struct net_device *net_dev = get_hr_netdev(hr_dev, 0);
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
+ struct hns_roce_bond_group *bond_grp;
+ u8 bus_num = get_hr_bus_num(hr_dev);
+
+ if (bond_cleanup && hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) {
+ bond_grp = hns_roce_get_bond_grp(net_dev, bus_num);
+ if (bond_grp)
+ hns_roce_unregister_bond_cleanup(hr_dev, bond_grp);
+ }
hr_dev->active = false;
unregister_netdevice_notifier(&iboe->nb);
@@ -704,11 +791,12 @@ static const struct ib_device_ops hns_roce_dev_restrack_ops = {
static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{
- int ret;
struct hns_roce_ib_iboe *iboe = NULL;
- struct ib_device *ib_dev = NULL;
struct device *dev = hr_dev->dev;
+ struct ib_device *ib_dev = NULL;
+ struct net_device *net_dev;
unsigned int i;
+ int ret;
iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock);
@@ -743,17 +831,38 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_restrack_ops);
- for (i = 0; i < hr_dev->caps.num_ports; i++) {
- if (!hr_dev->iboe.netdevs[i])
- continue;
- ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
- i + 1);
- if (ret)
+ dma_set_max_seg_size(dev, SZ_2G);
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND) {
+ ret = hns_roce_alloc_bond_grp(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to alloc bond_grp for bus %u, ret = %d\n",
+ get_hr_bus_num(hr_dev), ret);
+ return ret;
+ }
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_BOND &&
+ hns_roce_bond_is_active(hr_dev)) {
+ ret = hns_roce_bond_init(hr_dev);
+ if (ret) {
+ dev_err(dev, "failed to init bond!\n");
return ret;
+ }
+ ret = ib_register_device(ib_dev, "hns_bond_%d", dev);
+ } else {
+ for (i = 0; i < hr_dev->caps.num_ports; i++) {
+ net_dev = get_hr_netdev(hr_dev, i);
+ if (!net_dev)
+ continue;
+
+ ret = ib_device_set_netdev(ib_dev, net_dev, i + 1);
+ if (ret)
+ return ret;
+ }
+ ret = ib_register_device(ib_dev, "hns_%d", dev);
}
- dma_set_max_seg_size(dev, SZ_2G);
- ret = ib_register_device(ib_dev, "hns_%d", dev);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
return ret;
@@ -1153,10 +1262,10 @@ error_failed_alloc_dfx_cnt:
return ret;
}
-void hns_roce_exit(struct hns_roce_dev *hr_dev)
+void hns_roce_exit(struct hns_roce_dev *hr_dev, bool bond_cleanup)
{
hns_roce_unregister_debugfs(hr_dev);
- hns_roce_unregister_device(hr_dev);
+ hns_roce_unregister_device(hr_dev, bond_cleanup);
if (hr_dev->hw->hw_exit)
hr_dev->hw->hw_exit(hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 0f037e545520..31cb8699e198 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -594,8 +594,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
buf_attr->user_access);
if (IS_ERR(mtr->umem)) {
- ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
- PTR_ERR(mtr->umem));
+ ibdev_err(ibdev, "failed to get umem, ret = %pe.\n",
+ mtr->umem);
return -ENOMEM;
}
} else {
@@ -605,8 +605,8 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
!mtr_has_mtt(buf_attr) ?
HNS_ROCE_BUF_DIRECT : 0);
if (IS_ERR(mtr->kmem)) {
- ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
- PTR_ERR(mtr->kmem));
+ ibdev_err(ibdev, "failed to alloc kmem, ret = %pe.\n",
+ mtr->kmem);
return PTR_ERR(mtr->kmem);
}
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index d35cf59d0f43..225c3e328e0e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/pci.h>
#include "hns_roce_device.h"
void hns_roce_init_pd_table(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 6ff1b8ce580c..d1640c5fbaab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
@@ -662,7 +661,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
hr_qp->sq.wqe_cnt = cnt;
- cap->max_send_sge = hr_qp->sq.max_gs;
return 0;
}
@@ -744,7 +742,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* sync the parameters of kernel QP to user's configuration */
cap->max_send_wr = cnt;
- cap->max_send_sge = hr_qp->sq.max_gs;
return 0;
}
@@ -1350,11 +1347,13 @@ static int check_mtu_validate(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_qp_attr *attr, int attr_mask)
{
+ struct net_device *net_dev;
enum ib_mtu active_mtu;
int p;
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
- active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
+ net_dev = get_hr_netdev(hr_dev, p);
+ active_mtu = iboe_get_mtu(net_dev->mtu);
if ((hr_dev->caps.max_mtu >= IB_MTU_2048 &&
attr->path_mtu > hr_dev->caps.max_mtu) ||
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
index 1090051f493b..8a6efb6b9c9e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
@@ -3,7 +3,6 @@
* Copyright (c) 2018 Hisilicon Limited.
*/
-#include <linux/pci.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
diff --git a/drivers/infiniband/hw/ionic/Kconfig b/drivers/infiniband/hw/ionic/Kconfig
new file mode 100644
index 000000000000..de6f10e9b6e9
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2018-2025, Advanced Micro Devices, Inc.
+
+config INFINIBAND_IONIC
+ tristate "AMD Pensando DSC RDMA/RoCE Support"
+ depends on NETDEVICES && ETHERNET && PCI && INET && IONIC
+ help
+ This enables RDMA/RoCE support for the AMD Pensando family of
+ Distributed Services Cards (DSCs).
+
+ To learn more, visit our website at
+ <https://www.amd.com/en/products/accelerators/pensando.html>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ionic_rdma.
diff --git a/drivers/infiniband/hw/ionic/Makefile b/drivers/infiniband/hw/ionic/Makefile
new file mode 100644
index 000000000000..957973742820
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y := -I $(srctree)/drivers/net/ethernet/pensando/ionic
+
+obj-$(CONFIG_INFINIBAND_IONIC) += ionic_rdma.o
+
+ionic_rdma-y := \
+ ionic_ibdev.o ionic_lif_cfg.o ionic_queue.o ionic_pgtbl.o ionic_admin.o \
+ ionic_controlpath.o ionic_datapath.o ionic_hw_stats.o
diff --git a/drivers/infiniband/hw/ionic/ionic_admin.c b/drivers/infiniband/hw/ionic/ionic_admin.c
new file mode 100644
index 000000000000..2537aa55d12d
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_admin.c
@@ -0,0 +1,1229 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define IONIC_EQ_COUNT_MIN 4
+#define IONIC_AQ_COUNT_MIN 1
+
+/* not a valid queue position or negative error status */
+#define IONIC_ADMIN_POSTED 0x10000
+
+/* cpu can be held with irq disabled for COUNT * MS (for create/destroy_ah) */
+#define IONIC_ADMIN_BUSY_RETRY_COUNT 2000
+#define IONIC_ADMIN_BUSY_RETRY_MS 1
+
+/* admin queue will be considered failed if a command takes longer */
+#define IONIC_ADMIN_TIMEOUT (HZ * 2)
+#define IONIC_ADMIN_WARN (HZ / 8)
+
+/* will poll for admin cq to tolerate and report from missed event */
+#define IONIC_ADMIN_DELAY (HZ / 8)
+
+/* work queue for polling the event queue and admin cq */
+struct workqueue_struct *ionic_evt_workq;
+
+static void ionic_admin_timedout(struct ionic_aq *aq)
+{
+ struct ionic_ibdev *dev = aq->dev;
+ unsigned long irqflags;
+ u16 pos;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ if (ionic_queue_empty(&aq->q))
+ goto out;
+
+ /* Reset ALL adminq if any one times out */
+ if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
+ queue_work(ionic_evt_workq, &dev->reset_work);
+
+ ibdev_err(&dev->ibdev, "admin command timed out, aq %d after: %ums\n",
+ aq->aqid, (u32)jiffies_to_msecs(jiffies - aq->stamp));
+
+ pos = (aq->q.prod - 1) & aq->q.mask;
+ if (pos == aq->q.cons)
+ goto out;
+
+ ibdev_warn(&dev->ibdev, "admin pos %u (last posted)\n", pos);
+ print_hex_dump(KERN_WARNING, "cmd ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at(&aq->q, pos),
+ BIT(aq->q.stride_log2), true);
+
+out:
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_admin_reset_dwork(struct ionic_ibdev *dev)
+{
+ if (atomic_read(&dev->admin_state) == IONIC_ADMIN_KILLED)
+ return;
+
+ queue_delayed_work(ionic_evt_workq, &dev->admin_dwork,
+ IONIC_ADMIN_DELAY);
+}
+
+static void ionic_admin_reset_wdog(struct ionic_aq *aq)
+{
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED)
+ return;
+
+ aq->stamp = jiffies;
+ ionic_admin_reset_dwork(aq->dev);
+}
+
+static bool ionic_admin_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_v1_cqe **cqe)
+{
+ struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
+
+ if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
+ return false;
+
+ /* Prevent out-of-order reads of the CQE */
+ dma_rmb();
+ *cqe = qcqe;
+
+ return true;
+}
+
+static void ionic_admin_poll_locked(struct ionic_aq *aq)
+{
+ struct ionic_cq *cq = &aq->vcq->cq[0];
+ struct ionic_admin_wr *wr, *wr_next;
+ struct ionic_ibdev *dev = aq->dev;
+ u32 wr_strides, avlbl_strides;
+ struct ionic_v1_cqe *cqe;
+ u32 qtf, qid;
+ u16 old_prod;
+ u8 type;
+
+ lockdep_assert_held(&aq->lock);
+
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_KILLED) {
+ list_for_each_entry_safe(wr, wr_next, &aq->wr_prod, aq_ent) {
+ INIT_LIST_HEAD(&wr->aq_ent);
+ aq->q_wr[wr->status].wr = NULL;
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+ INIT_LIST_HEAD(&aq->wr_prod);
+
+ list_for_each_entry_safe(wr, wr_next, &aq->wr_post, aq_ent) {
+ INIT_LIST_HEAD(&wr->aq_ent);
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+ INIT_LIST_HEAD(&aq->wr_post);
+
+ return;
+ }
+
+ old_prod = cq->q.prod;
+
+ while (ionic_admin_next_cqe(dev, cq, &cqe)) {
+ qtf = ionic_v1_cqe_qtf(cqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ if (unlikely(type != IONIC_V1_CQE_TYPE_ADMIN)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe type %u\n", type);
+ goto cq_next;
+ }
+
+ if (unlikely(qid != aq->aqid)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe qid %u\n", qid);
+ goto cq_next;
+ }
+
+ if (unlikely(be16_to_cpu(cqe->admin.cmd_idx) != aq->q.cons)) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad idx %u cons %u qid %u\n",
+ be16_to_cpu(cqe->admin.cmd_idx),
+ aq->q.cons, qid);
+ goto cq_next;
+ }
+
+ if (unlikely(ionic_queue_empty(&aq->q))) {
+ ibdev_warn_ratelimited(&dev->ibdev,
+ "bad cqe for empty adminq\n");
+ goto cq_next;
+ }
+
+ wr = aq->q_wr[aq->q.cons].wr;
+ if (wr) {
+ aq->q_wr[aq->q.cons].wr = NULL;
+ list_del_init(&wr->aq_ent);
+
+ wr->cqe = *cqe;
+ wr->status = atomic_read(&aq->admin_state);
+ complete_all(&wr->work);
+ }
+
+ ionic_queue_consume_entries(&aq->q,
+ aq->q_wr[aq->q.cons].wqe_strides);
+
+cq_next:
+ ionic_queue_produce(&cq->q);
+ cq->color = ionic_color_wrap(cq->q.prod, cq->color);
+ }
+
+ if (old_prod != cq->q.prod) {
+ ionic_admin_reset_wdog(aq);
+ cq->q.cons = cq->q.prod;
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ ionic_queue_dbell_val(&cq->q));
+ queue_work(ionic_evt_workq, &aq->work);
+ } else if (!aq->armed) {
+ aq->armed = true;
+ cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ cq->q.dbell | IONIC_CQ_RING_ARM |
+ cq->arm_any_prod);
+ queue_work(ionic_evt_workq, &aq->work);
+ }
+
+ if (atomic_read(&aq->admin_state) != IONIC_ADMIN_ACTIVE)
+ return;
+
+ old_prod = aq->q.prod;
+
+ if (ionic_queue_empty(&aq->q) && !list_empty(&aq->wr_post))
+ ionic_admin_reset_wdog(aq);
+
+ if (list_empty(&aq->wr_post))
+ return;
+
+ do {
+ u8 *src;
+ int i, src_len;
+ size_t stride_len;
+
+ wr = list_first_entry(&aq->wr_post, struct ionic_admin_wr,
+ aq_ent);
+ wr_strides = (le16_to_cpu(wr->wqe.len) + ADMIN_WQE_HDR_LEN +
+ (ADMIN_WQE_STRIDE - 1)) >> aq->q.stride_log2;
+ avlbl_strides = ionic_queue_length_remaining(&aq->q);
+
+ if (wr_strides > avlbl_strides)
+ break;
+
+ list_move(&wr->aq_ent, &aq->wr_prod);
+ wr->status = aq->q.prod;
+ aq->q_wr[aq->q.prod].wr = wr;
+ aq->q_wr[aq->q.prod].wqe_strides = wr_strides;
+
+ src_len = le16_to_cpu(wr->wqe.len);
+ src = (uint8_t *)&wr->wqe.cmd;
+
+ /* First stride */
+ memcpy(ionic_queue_at_prod(&aq->q), &wr->wqe,
+ ADMIN_WQE_HDR_LEN);
+ stride_len = ADMIN_WQE_STRIDE - ADMIN_WQE_HDR_LEN;
+ if (stride_len > src_len)
+ stride_len = src_len;
+ memcpy(ionic_queue_at_prod(&aq->q) + ADMIN_WQE_HDR_LEN,
+ src, stride_len);
+ ibdev_dbg(&dev->ibdev, "post admin prod %u (%u strides)\n",
+ aq->q.prod, wr_strides);
+ print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at_prod(&aq->q),
+ BIT(aq->q.stride_log2), true);
+ ionic_queue_produce(&aq->q);
+
+ /* Remaining strides */
+ for (i = stride_len; i < src_len; i += stride_len) {
+ stride_len = ADMIN_WQE_STRIDE;
+
+ if (i + stride_len > src_len)
+ stride_len = src_len - i;
+
+ memcpy(ionic_queue_at_prod(&aq->q), src + i,
+ stride_len);
+ print_hex_dump_debug("wqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ ionic_queue_at_prod(&aq->q),
+ BIT(aq->q.stride_log2), true);
+ ionic_queue_produce(&aq->q);
+ }
+ } while (!list_empty(&aq->wr_post));
+
+ if (old_prod != aq->q.prod)
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.aq_qtype,
+ ionic_queue_dbell_val(&aq->q));
+}
+
+static void ionic_admin_dwork(struct work_struct *ws)
+{
+ struct ionic_ibdev *dev =
+ container_of(ws, struct ionic_ibdev, admin_dwork.work);
+ struct ionic_aq *aq, *bad_aq = NULL;
+ bool do_reschedule = false;
+ unsigned long irqflags;
+ bool do_reset = false;
+ u16 pos;
+ int i;
+
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ aq = dev->aq_vec[i];
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+
+ if (ionic_queue_empty(&aq->q))
+ goto next_aq;
+
+ /* Reschedule if any queue has outstanding work */
+ do_reschedule = true;
+
+ if (time_is_after_eq_jiffies(aq->stamp + IONIC_ADMIN_WARN))
+ /* Warning threshold not met, nothing to do */
+ goto next_aq;
+
+ /* See if polling now makes some progress */
+ pos = aq->q.cons;
+ ionic_admin_poll_locked(aq);
+ if (pos != aq->q.cons) {
+ ibdev_dbg(&dev->ibdev,
+ "missed event for acq %d\n", aq->cqid);
+ goto next_aq;
+ }
+
+ if (time_is_after_eq_jiffies(aq->stamp +
+ IONIC_ADMIN_TIMEOUT)) {
+ /* Timeout threshold not met */
+ ibdev_dbg(&dev->ibdev, "no progress after %ums\n",
+ (u32)jiffies_to_msecs(jiffies - aq->stamp));
+ goto next_aq;
+ }
+
+ /* Queue timed out */
+ bad_aq = aq;
+ do_reset = true;
+next_aq:
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+ }
+
+ if (do_reset)
+ /* Reset RDMA lif on a timeout */
+ ionic_admin_timedout(bad_aq);
+ else if (do_reschedule)
+ /* Try to poll again later */
+ ionic_admin_reset_dwork(dev);
+}
+
+static void ionic_admin_work(struct work_struct *ws)
+{
+ struct ionic_aq *aq = container_of(ws, struct ionic_aq, work);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_admin_post_aq(struct ionic_aq *aq, struct ionic_admin_wr *wr)
+{
+ unsigned long irqflags;
+ bool poll;
+
+ wr->status = IONIC_ADMIN_POSTED;
+ wr->aq = aq;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ poll = list_empty(&aq->wr_post);
+ list_add(&wr->aq_ent, &aq->wr_post);
+ if (poll)
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr)
+{
+ int aq_idx;
+
+ /* Use cpu id for the adminq selection */
+ aq_idx = raw_smp_processor_id() % dev->lif_cfg.aq_count;
+ ionic_admin_post_aq(dev->aq_vec[aq_idx], wr);
+}
+
+static void ionic_admin_cancel(struct ionic_admin_wr *wr)
+{
+ struct ionic_aq *aq = wr->aq;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+
+ if (!list_empty(&wr->aq_ent)) {
+ list_del(&wr->aq_ent);
+ if (wr->status != IONIC_ADMIN_POSTED)
+ aq->q_wr[wr->status].wr = NULL;
+ }
+
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static int ionic_admin_busy_wait(struct ionic_admin_wr *wr)
+{
+ struct ionic_aq *aq = wr->aq;
+ unsigned long irqflags;
+ int try_i;
+
+ for (try_i = 0; try_i < IONIC_ADMIN_BUSY_RETRY_COUNT; ++try_i) {
+ if (completion_done(&wr->work))
+ return 0;
+
+ mdelay(IONIC_ADMIN_BUSY_RETRY_MS);
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ ionic_admin_poll_locked(aq);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+ }
+
+ /*
+ * we timed out. Initiate RDMA LIF reset and indicate
+ * error to caller.
+ */
+ ionic_admin_timedout(aq);
+ return -ETIMEDOUT;
+}
+
+int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
+ enum ionic_admin_flags flags)
+{
+ int rc, timo;
+
+ if (flags & IONIC_ADMIN_F_BUSYWAIT) {
+ /* Spin */
+ rc = ionic_admin_busy_wait(wr);
+ } else if (flags & IONIC_ADMIN_F_INTERRUPT) {
+ /*
+ * Interruptible sleep, 1s timeout
+ * This is used for commands which are safe for the caller
+ * to clean up without killing and resetting the adminq.
+ */
+ timo = wait_for_completion_interruptible_timeout(&wr->work,
+ HZ);
+ if (timo > 0)
+ rc = 0;
+ else if (timo == 0)
+ rc = -ETIMEDOUT;
+ else
+ rc = timo;
+ } else {
+ /*
+ * Uninterruptible sleep
+ * This is used for commands which are NOT safe for the
+ * caller to clean up. Cleanup must be handled by the
+ * adminq kill and reset process so that host memory is
+ * not corrupted by the device.
+ */
+ wait_for_completion(&wr->work);
+ rc = 0;
+ }
+
+ if (rc) {
+ ibdev_warn(&dev->ibdev, "wait status %d\n", rc);
+ ionic_admin_cancel(wr);
+ } else if (wr->status == IONIC_ADMIN_KILLED) {
+ ibdev_dbg(&dev->ibdev, "admin killed\n");
+
+ /* No error if admin already killed during teardown */
+ rc = (flags & IONIC_ADMIN_F_TEARDOWN) ? 0 : -ENODEV;
+ } else if (ionic_v1_cqe_error(&wr->cqe)) {
+ ibdev_warn(&dev->ibdev, "opcode %u error %u\n",
+ wr->wqe.op,
+ be32_to_cpu(wr->cqe.status_length));
+ rc = -EINVAL;
+ }
+ return rc;
+}
+
+static int ionic_rdma_devcmd(struct ionic_ibdev *dev,
+ struct ionic_admin_ctx *admin)
+{
+ int rc;
+
+ rc = ionic_adminq_post_wait(dev->lif_cfg.lif, admin);
+ if (rc)
+ return rc;
+
+ return ionic_error_to_errno(admin->comp.comp.status);
+}
+
+int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev)
+{
+ struct ionic_admin_ctx admin = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
+ .cmd.rdma_reset = {
+ .opcode = IONIC_CMD_RDMA_RESET_LIF,
+ .lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
+ },
+ };
+
+ return ionic_rdma_devcmd(dev, &admin);
+}
+
+static int ionic_rdma_queue_devcmd(struct ionic_ibdev *dev,
+ struct ionic_queue *q,
+ u32 qid, u32 cid, u16 opcode)
+{
+ struct ionic_admin_ctx admin = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(admin.work),
+ .cmd.rdma_queue = {
+ .opcode = opcode,
+ .lif_index = cpu_to_le16(dev->lif_cfg.lif_index),
+ .qid_ver = cpu_to_le32(qid),
+ .cid = cpu_to_le32(cid),
+ .dbid = cpu_to_le16(dev->lif_cfg.dbid),
+ .depth_log2 = q->depth_log2,
+ .stride_log2 = q->stride_log2,
+ .dma_addr = cpu_to_le64(q->dma),
+ },
+ };
+
+ return ionic_rdma_devcmd(dev, &admin);
+}
+
+static void ionic_rdma_admincq_comp(struct ib_cq *ibcq, void *cq_context)
+{
+ struct ionic_aq *aq = cq_context;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&aq->lock, irqflags);
+ aq->armed = false;
+ if (atomic_read(&aq->admin_state) < IONIC_ADMIN_KILLED)
+ queue_work(ionic_evt_workq, &aq->work);
+ spin_unlock_irqrestore(&aq->lock, irqflags);
+}
+
+static void ionic_rdma_admincq_event(struct ib_event *event, void *cq_context)
+{
+ struct ionic_aq *aq = cq_context;
+
+ ibdev_err(&aq->dev->ibdev, "admincq event %d\n", event->event);
+}
+
+static struct ionic_vcq *ionic_create_rdma_admincq(struct ionic_ibdev *dev,
+ int comp_vector)
+{
+ struct ib_cq_init_attr attr = {
+ .cqe = IONIC_AQ_DEPTH,
+ .comp_vector = comp_vector,
+ };
+ struct ionic_tbl_buf buf = {};
+ struct ionic_vcq *vcq;
+ struct ionic_cq *cq;
+ int rc;
+
+ vcq = kzalloc(sizeof(*vcq), GFP_KERNEL);
+ if (!vcq)
+ return ERR_PTR(-ENOMEM);
+
+ vcq->ibcq.device = &dev->ibdev;
+ vcq->ibcq.comp_handler = ionic_rdma_admincq_comp;
+ vcq->ibcq.event_handler = ionic_rdma_admincq_event;
+ atomic_set(&vcq->ibcq.usecnt, 0);
+
+ vcq->udma_mask = 1;
+ cq = &vcq->cq[0];
+
+ rc = ionic_create_cq_common(vcq, &buf, &attr, NULL, NULL,
+ NULL, NULL, 0);
+ if (rc)
+ goto err_init;
+
+ rc = ionic_rdma_queue_devcmd(dev, &cq->q, cq->cqid, cq->eqid,
+ IONIC_CMD_RDMA_CREATE_CQ);
+ if (rc)
+ goto err_cmd;
+
+ return vcq;
+
+err_cmd:
+ ionic_destroy_cq_common(dev, cq);
+err_init:
+ kfree(vcq);
+
+ return ERR_PTR(rc);
+}
+
+static struct ionic_aq *__ionic_create_rdma_adminq(struct ionic_ibdev *dev,
+ u32 aqid, u32 cqid)
+{
+ struct ionic_aq *aq;
+ int rc;
+
+ aq = kzalloc(sizeof(*aq), GFP_KERNEL);
+ if (!aq)
+ return ERR_PTR(-ENOMEM);
+
+ atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
+ aq->dev = dev;
+ aq->aqid = aqid;
+ aq->cqid = cqid;
+ spin_lock_init(&aq->lock);
+
+ rc = ionic_queue_init(&aq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
+ ADMIN_WQE_STRIDE);
+ if (rc)
+ goto err_q;
+
+ ionic_queue_dbell_init(&aq->q, aq->aqid);
+
+ aq->q_wr = kcalloc((u32)aq->q.mask + 1, sizeof(*aq->q_wr), GFP_KERNEL);
+ if (!aq->q_wr) {
+ rc = -ENOMEM;
+ goto err_wr;
+ }
+
+ INIT_LIST_HEAD(&aq->wr_prod);
+ INIT_LIST_HEAD(&aq->wr_post);
+
+ INIT_WORK(&aq->work, ionic_admin_work);
+ aq->armed = false;
+
+ return aq;
+
+err_wr:
+ ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
+err_q:
+ kfree(aq);
+
+ return ERR_PTR(rc);
+}
+
+static void __ionic_destroy_rdma_adminq(struct ionic_ibdev *dev,
+ struct ionic_aq *aq)
+{
+ kfree(aq->q_wr);
+ ionic_queue_destroy(&aq->q, dev->lif_cfg.hwdev);
+ kfree(aq);
+}
+
+static struct ionic_aq *ionic_create_rdma_adminq(struct ionic_ibdev *dev,
+ u32 aqid, u32 cqid)
+{
+ struct ionic_aq *aq;
+ int rc;
+
+ aq = __ionic_create_rdma_adminq(dev, aqid, cqid);
+ if (IS_ERR(aq))
+ return aq;
+
+ rc = ionic_rdma_queue_devcmd(dev, &aq->q, aq->aqid, aq->cqid,
+ IONIC_CMD_RDMA_CREATE_ADMINQ);
+ if (rc)
+ goto err_cmd;
+
+ return aq;
+
+err_cmd:
+ __ionic_destroy_rdma_adminq(dev, aq);
+
+ return ERR_PTR(rc);
+}
+
+static void ionic_flush_qs(struct ionic_ibdev *dev)
+{
+ struct ionic_qp *qp, *qp_tmp;
+ struct ionic_cq *cq, *cq_tmp;
+ LIST_HEAD(flush_list);
+ unsigned long index;
+
+ WARN_ON(!irqs_disabled());
+
+ /* Flush qp send and recv */
+ xa_lock(&dev->qp_tbl);
+ xa_for_each(&dev->qp_tbl, index, qp) {
+ kref_get(&qp->qp_kref);
+ list_add_tail(&qp->ibkill_flush_ent, &flush_list);
+ }
+ xa_unlock(&dev->qp_tbl);
+
+ list_for_each_entry_safe(qp, qp_tmp, &flush_list, ibkill_flush_ent) {
+ ionic_flush_qp(dev, qp);
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+ list_del(&qp->ibkill_flush_ent);
+ }
+
+ /* Notify completions */
+ xa_lock(&dev->cq_tbl);
+ xa_for_each(&dev->cq_tbl, index, cq) {
+ kref_get(&cq->cq_kref);
+ list_add_tail(&cq->ibkill_flush_ent, &flush_list);
+ }
+ xa_unlock(&dev->cq_tbl);
+
+ list_for_each_entry_safe(cq, cq_tmp, &flush_list, ibkill_flush_ent) {
+ ionic_notify_flush_cq(cq);
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+ list_del(&cq->ibkill_flush_ent);
+ }
+}
+
+static void ionic_kill_ibdev(struct ionic_ibdev *dev, bool fatal_path)
+{
+ unsigned long irqflags;
+ bool do_flush = false;
+ int i;
+
+ /* Mark AQs for drain and flush the QPs while irq is disabled */
+ local_irq_save(irqflags);
+
+ /* Mark the admin queue, flushing at most once */
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ struct ionic_aq *aq = dev->aq_vec[i];
+
+ spin_lock(&aq->lock);
+ if (atomic_read(&aq->admin_state) != IONIC_ADMIN_KILLED) {
+ atomic_set(&aq->admin_state, IONIC_ADMIN_KILLED);
+ /* Flush incomplete admin commands */
+ ionic_admin_poll_locked(aq);
+ do_flush = true;
+ }
+ spin_unlock(&aq->lock);
+ }
+
+ if (do_flush)
+ ionic_flush_qs(dev);
+
+ local_irq_restore(irqflags);
+
+ /* Post a fatal event if requested */
+ if (fatal_path) {
+ struct ib_event ev;
+
+ ev.device = &dev->ibdev;
+ ev.element.port_num = 1;
+ ev.event = IB_EVENT_DEVICE_FATAL;
+
+ ib_dispatch_event(&ev);
+ }
+
+ atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
+}
+
+void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path)
+{
+ enum ionic_admin_state old_state;
+ unsigned long irqflags = 0;
+ int i, rc;
+
+ if (!dev->aq_vec)
+ return;
+
+ /*
+ * Admin queues are transitioned from active to paused to killed state.
+ * When in paused state, no new commands are issued to the device,
+ * nor are any completed locally. After resetting the lif, it will be
+ * safe to resume the rdma admin queues in the killed state. Commands
+ * will not be issued to the device, but will complete locally with status
+ * IONIC_ADMIN_KILLED. Handling completion will ensure that creating or
+ * modifying resources fails, but destroying resources succeeds.
+ * If there was a failure resetting the lif using this strategy,
+ * then the state of the device is unknown.
+ */
+ old_state = atomic_cmpxchg(&dev->admin_state, IONIC_ADMIN_ACTIVE,
+ IONIC_ADMIN_PAUSED);
+ if (old_state != IONIC_ADMIN_ACTIVE)
+ return;
+
+ /* Pause all the AQs */
+ local_irq_save(irqflags);
+ for (i = 0; i < dev->lif_cfg.aq_count; i++) {
+ struct ionic_aq *aq = dev->aq_vec[i];
+
+ spin_lock(&aq->lock);
+ /* pause rdma admin queues to reset lif */
+ if (atomic_read(&aq->admin_state) == IONIC_ADMIN_ACTIVE)
+ atomic_set(&aq->admin_state, IONIC_ADMIN_PAUSED);
+ spin_unlock(&aq->lock);
+ }
+ local_irq_restore(irqflags);
+
+ rc = ionic_rdma_reset_devcmd(dev);
+ if (unlikely(rc)) {
+ ibdev_err(&dev->ibdev, "failed to reset rdma %d\n", rc);
+ ionic_request_rdma_reset(dev->lif_cfg.lif);
+ }
+
+ ionic_kill_ibdev(dev, fatal_path);
+}
+
+static void ionic_reset_work(struct work_struct *ws)
+{
+ struct ionic_ibdev *dev =
+ container_of(ws, struct ionic_ibdev, reset_work);
+
+ ionic_kill_rdma_admin(dev, true);
+}
+
+static bool ionic_next_eqe(struct ionic_eq *eq, struct ionic_v1_eqe *eqe)
+{
+ struct ionic_v1_eqe *qeqe;
+ bool color;
+
+ qeqe = ionic_queue_at_prod(&eq->q);
+ color = ionic_v1_eqe_color(qeqe);
+
+ /* cons is color for eq */
+ if (eq->q.cons != color)
+ return false;
+
+ /* Prevent out-of-order reads of the EQE */
+ dma_rmb();
+
+ ibdev_dbg(&eq->dev->ibdev, "poll eq prod %u\n", eq->q.prod);
+ print_hex_dump_debug("eqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ qeqe, BIT(eq->q.stride_log2), true);
+ *eqe = *qeqe;
+
+ return true;
+}
+
+static void ionic_cq_event(struct ionic_ibdev *dev, u32 cqid, u8 code)
+{
+ unsigned long irqflags;
+ struct ib_event ibev;
+ struct ionic_cq *cq;
+
+ xa_lock_irqsave(&dev->cq_tbl, irqflags);
+ cq = xa_load(&dev->cq_tbl, cqid);
+ if (cq)
+ kref_get(&cq->cq_kref);
+ xa_unlock_irqrestore(&dev->cq_tbl, irqflags);
+
+ if (!cq) {
+ ibdev_dbg(&dev->ibdev,
+ "missing cqid %#x code %u\n", cqid, code);
+ return;
+ }
+
+ switch (code) {
+ case IONIC_V1_EQE_CQ_NOTIFY:
+ if (cq->vcq->ibcq.comp_handler)
+ cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
+ cq->vcq->ibcq.cq_context);
+ break;
+
+ case IONIC_V1_EQE_CQ_ERR:
+ if (cq->vcq->ibcq.event_handler) {
+ ibev.event = IB_EVENT_CQ_ERR;
+ ibev.device = &dev->ibdev;
+ ibev.element.cq = &cq->vcq->ibcq;
+
+ cq->vcq->ibcq.event_handler(&ibev,
+ cq->vcq->ibcq.cq_context);
+ }
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unrecognized cqid %#x code %u\n", cqid, code);
+ break;
+ }
+
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+}
+
+static void ionic_qp_event(struct ionic_ibdev *dev, u32 qpid, u8 code)
+{
+ unsigned long irqflags;
+ struct ib_event ibev;
+ struct ionic_qp *qp;
+
+ xa_lock_irqsave(&dev->qp_tbl, irqflags);
+ qp = xa_load(&dev->qp_tbl, qpid);
+ if (qp)
+ kref_get(&qp->qp_kref);
+ xa_unlock_irqrestore(&dev->qp_tbl, irqflags);
+
+ if (!qp) {
+ ibdev_dbg(&dev->ibdev,
+ "missing qpid %#x code %u\n", qpid, code);
+ return;
+ }
+
+ ibev.device = &dev->ibdev;
+ ibev.element.qp = &qp->ibqp;
+
+ switch (code) {
+ case IONIC_V1_EQE_SQ_DRAIN:
+ ibev.event = IB_EVENT_SQ_DRAINED;
+ break;
+
+ case IONIC_V1_EQE_QP_COMM_EST:
+ ibev.event = IB_EVENT_COMM_EST;
+ break;
+
+ case IONIC_V1_EQE_QP_LAST_WQE:
+ ibev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR:
+ ibev.event = IB_EVENT_QP_FATAL;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR_REQUEST:
+ ibev.event = IB_EVENT_QP_REQ_ERR;
+ break;
+
+ case IONIC_V1_EQE_QP_ERR_ACCESS:
+ ibev.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unrecognized qpid %#x code %u\n", qpid, code);
+ goto out;
+ }
+
+ if (qp->ibqp.event_handler)
+ qp->ibqp.event_handler(&ibev, qp->ibqp.qp_context);
+
+out:
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+}
+
+static u16 ionic_poll_eq(struct ionic_eq *eq, u16 budget)
+{
+ struct ionic_ibdev *dev = eq->dev;
+ struct ionic_v1_eqe eqe;
+ u16 npolled = 0;
+ u8 type, code;
+ u32 evt, qid;
+
+ while (npolled < budget) {
+ if (!ionic_next_eqe(eq, &eqe))
+ break;
+
+ ionic_queue_produce(&eq->q);
+
+ /* cons is color for eq */
+ eq->q.cons = ionic_color_wrap(eq->q.prod, eq->q.cons);
+
+ ++npolled;
+
+ evt = ionic_v1_eqe_evt(&eqe);
+ type = ionic_v1_eqe_evt_type(evt);
+ code = ionic_v1_eqe_evt_code(evt);
+ qid = ionic_v1_eqe_evt_qid(evt);
+
+ switch (type) {
+ case IONIC_V1_EQE_TYPE_CQ:
+ ionic_cq_event(dev, qid, code);
+ break;
+
+ case IONIC_V1_EQE_TYPE_QP:
+ ionic_qp_event(dev, qid, code);
+ break;
+
+ default:
+ ibdev_dbg(&dev->ibdev,
+ "unknown event %#x type %u\n", evt, type);
+ }
+ }
+
+ return npolled;
+}
+
+static void ionic_poll_eq_work(struct work_struct *work)
+{
+ struct ionic_eq *eq = container_of(work, struct ionic_eq, work);
+ u32 npolled;
+
+ if (unlikely(!eq->enable) || WARN_ON(eq->armed))
+ return;
+
+ npolled = ionic_poll_eq(eq, IONIC_EQ_WORK_BUDGET);
+ if (npolled == IONIC_EQ_WORK_BUDGET) {
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ npolled, 0);
+ queue_work(ionic_evt_workq, &eq->work);
+ } else {
+ xchg(&eq->armed, 1);
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ 0, IONIC_INTR_CRED_UNMASK);
+ }
+}
+
+static irqreturn_t ionic_poll_eq_isr(int irq, void *eqptr)
+{
+ struct ionic_eq *eq = eqptr;
+ int was_armed;
+ u32 npolled;
+
+ was_armed = xchg(&eq->armed, 0);
+
+ if (unlikely(!eq->enable) || !was_armed)
+ return IRQ_HANDLED;
+
+ npolled = ionic_poll_eq(eq, IONIC_EQ_ISR_BUDGET);
+ if (npolled == IONIC_EQ_ISR_BUDGET) {
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ npolled, 0);
+ queue_work(ionic_evt_workq, &eq->work);
+ } else {
+ xchg(&eq->armed, 1);
+ ionic_intr_credits(eq->dev->lif_cfg.intr_ctrl, eq->intr,
+ 0, IONIC_INTR_CRED_UNMASK);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct ionic_eq *ionic_create_eq(struct ionic_ibdev *dev, int eqid)
+{
+ struct ionic_intr_info intr_obj = { };
+ struct ionic_eq *eq;
+ int rc;
+
+ eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+ if (!eq)
+ return ERR_PTR(-ENOMEM);
+
+ eq->dev = dev;
+
+ rc = ionic_queue_init(&eq->q, dev->lif_cfg.hwdev, IONIC_EQ_DEPTH,
+ sizeof(struct ionic_v1_eqe));
+ if (rc)
+ goto err_q;
+
+ eq->eqid = eqid;
+
+ eq->armed = true;
+ eq->enable = false;
+ INIT_WORK(&eq->work, ionic_poll_eq_work);
+
+ rc = ionic_intr_alloc(dev->lif_cfg.lif, &intr_obj);
+ if (rc < 0)
+ goto err_intr;
+
+ eq->irq = intr_obj.vector;
+ eq->intr = intr_obj.index;
+
+ ionic_queue_dbell_init(&eq->q, eq->eqid);
+
+ /* cons is color for eq */
+ eq->q.cons = true;
+
+ snprintf(eq->name, sizeof(eq->name), "%s-%d-%d-eq",
+ "ionr", dev->lif_cfg.lif_index, eq->eqid);
+
+ ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
+ ionic_intr_mask_assert(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_SET);
+ ionic_intr_coal_init(dev->lif_cfg.intr_ctrl, eq->intr, 0);
+ ionic_intr_clean(dev->lif_cfg.intr_ctrl, eq->intr);
+
+ eq->enable = true;
+
+ rc = request_irq(eq->irq, ionic_poll_eq_isr, 0, eq->name, eq);
+ if (rc)
+ goto err_irq;
+
+ rc = ionic_rdma_queue_devcmd(dev, &eq->q, eq->eqid, eq->intr,
+ IONIC_CMD_RDMA_CREATE_EQ);
+ if (rc)
+ goto err_cmd;
+
+ ionic_intr_mask(dev->lif_cfg.intr_ctrl, eq->intr, IONIC_INTR_MASK_CLEAR);
+
+ return eq;
+
+err_cmd:
+ eq->enable = false;
+ free_irq(eq->irq, eq);
+ flush_work(&eq->work);
+err_irq:
+ ionic_intr_free(dev->lif_cfg.lif, eq->intr);
+err_intr:
+ ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
+err_q:
+ kfree(eq);
+
+ return ERR_PTR(rc);
+}
+
+static void ionic_destroy_eq(struct ionic_eq *eq)
+{
+ struct ionic_ibdev *dev = eq->dev;
+
+ eq->enable = false;
+ free_irq(eq->irq, eq);
+ flush_work(&eq->work);
+
+ ionic_intr_free(dev->lif_cfg.lif, eq->intr);
+ ionic_queue_destroy(&eq->q, dev->lif_cfg.hwdev);
+ kfree(eq);
+}
+
+int ionic_create_rdma_admin(struct ionic_ibdev *dev)
+{
+ int eq_i = 0, aq_i = 0, rc = 0;
+ struct ionic_vcq *vcq;
+ struct ionic_aq *aq;
+ struct ionic_eq *eq;
+
+ dev->eq_vec = NULL;
+ dev->aq_vec = NULL;
+
+ INIT_WORK(&dev->reset_work, ionic_reset_work);
+ INIT_DELAYED_WORK(&dev->admin_dwork, ionic_admin_dwork);
+ atomic_set(&dev->admin_state, IONIC_ADMIN_KILLED);
+
+ if (dev->lif_cfg.aq_count > IONIC_AQ_COUNT) {
+ ibdev_dbg(&dev->ibdev, "limiting adminq count to %d\n",
+ IONIC_AQ_COUNT);
+ dev->lif_cfg.aq_count = IONIC_AQ_COUNT;
+ }
+
+ if (dev->lif_cfg.eq_count > IONIC_EQ_COUNT) {
+ dev_dbg(&dev->ibdev.dev, "limiting eventq count to %d\n",
+ IONIC_EQ_COUNT);
+ dev->lif_cfg.eq_count = IONIC_EQ_COUNT;
+ }
+
+ /* need at least two eq and one aq */
+ if (dev->lif_cfg.eq_count < IONIC_EQ_COUNT_MIN ||
+ dev->lif_cfg.aq_count < IONIC_AQ_COUNT_MIN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ dev->eq_vec = kmalloc_array(dev->lif_cfg.eq_count, sizeof(*dev->eq_vec),
+ GFP_KERNEL);
+ if (!dev->eq_vec) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ for (eq_i = 0; eq_i < dev->lif_cfg.eq_count; ++eq_i) {
+ eq = ionic_create_eq(dev, eq_i + dev->lif_cfg.eq_base);
+ if (IS_ERR(eq)) {
+ rc = PTR_ERR(eq);
+
+ if (eq_i < IONIC_EQ_COUNT_MIN) {
+ ibdev_err(&dev->ibdev,
+ "fail create eq %pe\n", eq);
+ goto out;
+ }
+
+ /* ok, just fewer eq than device supports */
+ ibdev_dbg(&dev->ibdev, "eq count %d want %d rc %pe\n",
+ eq_i, dev->lif_cfg.eq_count, eq);
+
+ rc = 0;
+ break;
+ }
+
+ dev->eq_vec[eq_i] = eq;
+ }
+
+ dev->lif_cfg.eq_count = eq_i;
+
+ dev->aq_vec = kmalloc_array(dev->lif_cfg.aq_count, sizeof(*dev->aq_vec),
+ GFP_KERNEL);
+ if (!dev->aq_vec) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Create one CQ per AQ */
+ for (aq_i = 0; aq_i < dev->lif_cfg.aq_count; ++aq_i) {
+ vcq = ionic_create_rdma_admincq(dev, aq_i % eq_i);
+ if (IS_ERR(vcq)) {
+ rc = PTR_ERR(vcq);
+
+ if (!aq_i) {
+ ibdev_err(&dev->ibdev,
+ "failed to create acq %pe\n", vcq);
+ goto out;
+ }
+
+ /* ok, just fewer adminq than device supports */
+ ibdev_dbg(&dev->ibdev, "acq count %d want %d rc %pe\n",
+ aq_i, dev->lif_cfg.aq_count, vcq);
+ break;
+ }
+
+ aq = ionic_create_rdma_adminq(dev, aq_i + dev->lif_cfg.aq_base,
+ vcq->cq[0].cqid);
+ if (IS_ERR(aq)) {
+ /* Clean up the dangling CQ */
+ ionic_destroy_cq_common(dev, &vcq->cq[0]);
+ kfree(vcq);
+
+ rc = PTR_ERR(aq);
+
+ if (!aq_i) {
+ ibdev_err(&dev->ibdev,
+ "failed to create aq %pe\n", aq);
+ goto out;
+ }
+
+ /* ok, just fewer adminq than device supports */
+ ibdev_dbg(&dev->ibdev, "aq count %d want %d rc %pe\n",
+ aq_i, dev->lif_cfg.aq_count, aq);
+ break;
+ }
+
+ vcq->ibcq.cq_context = aq;
+ aq->vcq = vcq;
+
+ atomic_set(&aq->admin_state, IONIC_ADMIN_ACTIVE);
+ dev->aq_vec[aq_i] = aq;
+ }
+
+ atomic_set(&dev->admin_state, IONIC_ADMIN_ACTIVE);
+out:
+ dev->lif_cfg.eq_count = eq_i;
+ dev->lif_cfg.aq_count = aq_i;
+
+ return rc;
+}
+
+void ionic_destroy_rdma_admin(struct ionic_ibdev *dev)
+{
+ struct ionic_vcq *vcq;
+ struct ionic_aq *aq;
+ struct ionic_eq *eq;
+
+ /*
+ * Killing the admin before destroy makes sure all admin and
+ * completions are flushed. admin_state = IONIC_ADMIN_KILLED
+ * stops queueing up further works.
+ */
+ cancel_delayed_work_sync(&dev->admin_dwork);
+ cancel_work_sync(&dev->reset_work);
+
+ if (dev->aq_vec) {
+ while (dev->lif_cfg.aq_count > 0) {
+ aq = dev->aq_vec[--dev->lif_cfg.aq_count];
+ vcq = aq->vcq;
+
+ cancel_work_sync(&aq->work);
+
+ __ionic_destroy_rdma_adminq(dev, aq);
+ if (vcq) {
+ ionic_destroy_cq_common(dev, &vcq->cq[0]);
+ kfree(vcq);
+ }
+ }
+
+ kfree(dev->aq_vec);
+ }
+
+ if (dev->eq_vec) {
+ while (dev->lif_cfg.eq_count > 0) {
+ eq = dev->eq_vec[--dev->lif_cfg.eq_count];
+ ionic_destroy_eq(eq);
+ }
+
+ kfree(dev->eq_vec);
+ }
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_controlpath.c b/drivers/infiniband/hw/ionic/ionic_controlpath.c
new file mode 100644
index 000000000000..ea12d9b8e125
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_controlpath.c
@@ -0,0 +1,2679 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <rdma/ib_user_verbs.h>
+#include <ionic_api.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define ionic_set_ecn(tos) (((tos) | 2u) & ~1u)
+#define ionic_clear_ecn(tos) ((tos) & ~3u)
+
+static int ionic_validate_qdesc(struct ionic_qdesc *q)
+{
+ if (!q->addr || !q->size || !q->mask ||
+ !q->depth_log2 || !q->stride_log2)
+ return -EINVAL;
+
+ if (q->addr & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ if (q->mask != BIT(q->depth_log2) - 1)
+ return -EINVAL;
+
+ if (q->size < BIT_ULL(q->depth_log2 + q->stride_log2))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u32 ionic_get_eqid(struct ionic_ibdev *dev, u32 comp_vector, u8 udma_idx)
+{
+ /* EQ per vector per udma, and the first eqs reserved for async events.
+ * The rest of the vectors can be requested for completions.
+ */
+ u32 comp_vec_count = dev->lif_cfg.eq_count / dev->lif_cfg.udma_count - 1;
+
+ return (comp_vector % comp_vec_count + 1) * dev->lif_cfg.udma_count + udma_idx;
+}
+
+static int ionic_get_cqid(struct ionic_ibdev *dev, u32 *cqid, u8 udma_idx)
+{
+ unsigned int size, base, bound;
+ int rc;
+
+ size = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
+ base = size * udma_idx;
+ bound = base + size;
+
+ rc = ionic_resid_get_shared(&dev->inuse_cqid, base, bound);
+ if (rc >= 0) {
+ /* cq_base is zero or a multiple of two queue groups */
+ *cqid = dev->lif_cfg.cq_base +
+ ionic_bitid_to_qid(rc, dev->lif_cfg.udma_qgrp_shift,
+ dev->half_cqid_udma_shift);
+
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static void ionic_put_cqid(struct ionic_ibdev *dev, u32 cqid)
+{
+ u32 bitid = ionic_qid_to_bitid(cqid - dev->lif_cfg.cq_base,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_cqid_udma_shift);
+
+ ionic_resid_put(&dev->inuse_cqid, bitid);
+}
+
+int ionic_create_cq_common(struct ionic_vcq *vcq,
+ struct ionic_tbl_buf *buf,
+ const struct ib_cq_init_attr *attr,
+ struct ionic_ctx *ctx,
+ struct ib_udata *udata,
+ struct ionic_qdesc *req_cq,
+ __u32 *resp_cqid,
+ int udma_idx)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(vcq->ibcq.device);
+ struct ionic_cq *cq = &vcq->cq[udma_idx];
+ void *entry;
+ int rc;
+
+ cq->vcq = vcq;
+
+ if (attr->cqe < 1 || attr->cqe + IONIC_CQ_GRACE > 0xffff) {
+ rc = -EINVAL;
+ goto err_args;
+ }
+
+ rc = ionic_get_cqid(dev, &cq->cqid, udma_idx);
+ if (rc)
+ goto err_args;
+
+ cq->eqid = ionic_get_eqid(dev, attr->comp_vector, udma_idx);
+
+ spin_lock_init(&cq->lock);
+ INIT_LIST_HEAD(&cq->poll_sq);
+ INIT_LIST_HEAD(&cq->flush_sq);
+ INIT_LIST_HEAD(&cq->flush_rq);
+
+ if (udata) {
+ rc = ionic_validate_qdesc(req_cq);
+ if (rc)
+ goto err_qdesc;
+
+ cq->umem = ib_umem_get(&dev->ibdev, req_cq->addr, req_cq->size,
+ IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(cq->umem)) {
+ rc = PTR_ERR(cq->umem);
+ goto err_qdesc;
+ }
+
+ cq->q.ptr = NULL;
+ cq->q.size = req_cq->size;
+ cq->q.mask = req_cq->mask;
+ cq->q.depth_log2 = req_cq->depth_log2;
+ cq->q.stride_log2 = req_cq->stride_log2;
+
+ *resp_cqid = cq->cqid;
+ } else {
+ rc = ionic_queue_init(&cq->q, dev->lif_cfg.hwdev,
+ attr->cqe + IONIC_CQ_GRACE,
+ sizeof(struct ionic_v1_cqe));
+ if (rc)
+ goto err_q_init;
+
+ ionic_queue_dbell_init(&cq->q, cq->cqid);
+ cq->color = true;
+ cq->credit = cq->q.mask;
+ }
+
+ rc = ionic_pgtbl_init(dev, buf, cq->umem, cq->q.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_pgtbl_init;
+
+ init_completion(&cq->cq_rel_comp);
+ kref_init(&cq->cq_kref);
+
+ entry = xa_store_irq(&dev->cq_tbl, cq->cqid, cq, GFP_KERNEL);
+ if (entry) {
+ if (!xa_is_err(entry))
+ rc = -EINVAL;
+ else
+ rc = xa_err(entry);
+
+ goto err_xa;
+ }
+
+ return 0;
+
+err_xa:
+ ionic_pgtbl_unbuf(dev, buf);
+err_pgtbl_init:
+ if (!udata)
+ ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
+err_q_init:
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+err_qdesc:
+ ionic_put_cqid(dev, cq->cqid);
+err_args:
+ cq->vcq = NULL;
+
+ return rc;
+}
+
+void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq)
+{
+ if (!cq->vcq)
+ return;
+
+ xa_erase_irq(&dev->cq_tbl, cq->cqid);
+
+ kref_put(&cq->cq_kref, ionic_cq_complete);
+ wait_for_completion(&cq->cq_rel_comp);
+
+ if (cq->umem)
+ ib_umem_release(cq->umem);
+ else
+ ionic_queue_destroy(&cq->q, dev->lif_cfg.hwdev);
+
+ ionic_put_cqid(dev, cq->cqid);
+
+ cq->vcq = NULL;
+}
+
+static int ionic_validate_qdesc_zero(struct ionic_qdesc *q)
+{
+ if (q->addr || q->size || q->mask || q->depth_log2 || q->stride_log2)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ionic_get_pdid(struct ionic_ibdev *dev, u32 *pdid)
+{
+ int rc;
+
+ rc = ionic_resid_get(&dev->inuse_pdid);
+ if (rc < 0)
+ return rc;
+
+ *pdid = rc;
+ return 0;
+}
+
+static int ionic_get_ahid(struct ionic_ibdev *dev, u32 *ahid)
+{
+ int rc;
+
+ rc = ionic_resid_get(&dev->inuse_ahid);
+ if (rc < 0)
+ return rc;
+
+ *ahid = rc;
+ return 0;
+}
+
+static int ionic_get_mrid(struct ionic_ibdev *dev, u32 *mrid)
+{
+ int rc;
+
+ /* wrap to 1, skip reserved lkey */
+ rc = ionic_resid_get_shared(&dev->inuse_mrid, 1,
+ dev->inuse_mrid.inuse_size);
+ if (rc < 0)
+ return rc;
+
+ *mrid = ionic_mrid(rc, dev->next_mrkey++);
+ return 0;
+}
+
+static int ionic_get_gsi_qpid(struct ionic_ibdev *dev, u32 *qpid)
+{
+ int rc = 0;
+
+ rc = ionic_resid_get_shared(&dev->inuse_qpid, IB_QPT_GSI, IB_QPT_GSI + 1);
+ if (rc < 0)
+ return rc;
+
+ *qpid = IB_QPT_GSI;
+ return 0;
+}
+
+static int ionic_get_qpid(struct ionic_ibdev *dev, u32 *qpid,
+ u8 *udma_idx, u8 udma_mask)
+{
+ unsigned int size, base, bound;
+ int udma_i, udma_x, udma_ix;
+ int rc = -EINVAL;
+
+ udma_x = dev->next_qpid_udma_idx;
+
+ dev->next_qpid_udma_idx ^= dev->lif_cfg.udma_count - 1;
+
+ for (udma_i = 0; udma_i < dev->lif_cfg.udma_count; ++udma_i) {
+ udma_ix = udma_i ^ udma_x;
+
+ if (!(udma_mask & BIT(udma_ix)))
+ continue;
+
+ size = dev->lif_cfg.qp_count / dev->lif_cfg.udma_count;
+ base = size * udma_ix;
+ bound = base + size;
+
+ /* skip reserved SMI and GSI qpids in group zero */
+ if (!base)
+ base = 2;
+
+ rc = ionic_resid_get_shared(&dev->inuse_qpid, base, bound);
+ if (rc >= 0) {
+ *qpid = ionic_bitid_to_qid(rc,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_qpid_udma_shift);
+ *udma_idx = udma_ix;
+
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int ionic_get_dbid(struct ionic_ibdev *dev, u32 *dbid, phys_addr_t *addr)
+{
+ int rc, dbpage_num;
+
+ /* wrap to 1, skip kernel reserved */
+ rc = ionic_resid_get_shared(&dev->inuse_dbid, 1,
+ dev->inuse_dbid.inuse_size);
+ if (rc < 0)
+ return rc;
+
+ dbpage_num = (dev->lif_cfg.lif_hw_index * dev->lif_cfg.dbid_count) + rc;
+ *addr = dev->lif_cfg.db_phys + ((phys_addr_t)dbpage_num << PAGE_SHIFT);
+
+ *dbid = rc;
+
+ return 0;
+}
+
+static void ionic_put_pdid(struct ionic_ibdev *dev, u32 pdid)
+{
+ ionic_resid_put(&dev->inuse_pdid, pdid);
+}
+
+static void ionic_put_ahid(struct ionic_ibdev *dev, u32 ahid)
+{
+ ionic_resid_put(&dev->inuse_ahid, ahid);
+}
+
+static void ionic_put_mrid(struct ionic_ibdev *dev, u32 mrid)
+{
+ ionic_resid_put(&dev->inuse_mrid, ionic_mrid_index(mrid));
+}
+
+static void ionic_put_qpid(struct ionic_ibdev *dev, u32 qpid)
+{
+ u32 bitid = ionic_qid_to_bitid(qpid,
+ dev->lif_cfg.udma_qgrp_shift,
+ dev->half_qpid_udma_shift);
+
+ ionic_resid_put(&dev->inuse_qpid, bitid);
+}
+
+static void ionic_put_dbid(struct ionic_ibdev *dev, u32 dbid)
+{
+ ionic_resid_put(&dev->inuse_dbid, dbid);
+}
+
+static struct rdma_user_mmap_entry*
+ionic_mmap_entry_insert(struct ionic_ctx *ctx, unsigned long size,
+ unsigned long pfn, u8 mmap_flags, u64 *offset)
+{
+ struct ionic_mmap_entry *entry;
+ int rc;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->size = size;
+ entry->pfn = pfn;
+ entry->mmap_flags = mmap_flags;
+
+ rc = rdma_user_mmap_entry_insert(&ctx->ibctx, &entry->rdma_entry,
+ entry->size);
+ if (rc) {
+ kfree(entry);
+ return NULL;
+ }
+
+ if (offset)
+ *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
+
+ return &entry->rdma_entry;
+}
+
+int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+ struct ionic_ctx_resp resp = {};
+ struct ionic_ctx_req req;
+ phys_addr_t db_phys = 0;
+ int rc;
+
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+
+ /* try to allocate dbid for user ctx */
+ rc = ionic_get_dbid(dev, &ctx->dbid, &db_phys);
+ if (rc < 0)
+ return rc;
+
+ ibdev_dbg(&dev->ibdev, "user space dbid %u\n", ctx->dbid);
+
+ ctx->mmap_dbell = ionic_mmap_entry_insert(ctx, PAGE_SIZE,
+ PHYS_PFN(db_phys), 0, NULL);
+ if (!ctx->mmap_dbell) {
+ rc = -ENOMEM;
+ goto err_mmap_dbell;
+ }
+
+ resp.page_shift = PAGE_SHIFT;
+
+ resp.dbell_offset = db_phys & ~PAGE_MASK;
+
+ resp.version = dev->lif_cfg.rdma_version;
+ resp.qp_opcodes = dev->lif_cfg.qp_opcodes;
+ resp.admin_opcodes = dev->lif_cfg.admin_opcodes;
+
+ resp.sq_qtype = dev->lif_cfg.sq_qtype;
+ resp.rq_qtype = dev->lif_cfg.rq_qtype;
+ resp.cq_qtype = dev->lif_cfg.cq_qtype;
+ resp.admin_qtype = dev->lif_cfg.aq_qtype;
+ resp.max_stride = dev->lif_cfg.max_stride;
+ resp.max_spec = IONIC_SPEC_HIGH;
+
+ resp.udma_count = dev->lif_cfg.udma_count;
+ resp.expdb_mask = dev->lif_cfg.expdb_mask;
+
+ if (dev->lif_cfg.sq_expdb)
+ resp.expdb_qtypes |= IONIC_EXPDB_SQ;
+ if (dev->lif_cfg.rq_expdb)
+ resp.expdb_qtypes |= IONIC_EXPDB_RQ;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+
+ return 0;
+
+err_resp:
+ rdma_user_mmap_entry_remove(ctx->mmap_dbell);
+err_mmap_dbell:
+ ionic_put_dbid(dev, ctx->dbid);
+
+ return rc;
+}
+
+void ionic_dealloc_ucontext(struct ib_ucontext *ibctx)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+
+ rdma_user_mmap_entry_remove(ctx->mmap_dbell);
+ ionic_put_dbid(dev, ctx->dbid);
+}
+
+int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibctx->device);
+ struct ionic_ctx *ctx = to_ionic_ctx(ibctx);
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct ionic_mmap_entry *ionic_entry;
+ int rc = 0;
+
+ rdma_entry = rdma_user_mmap_entry_get(&ctx->ibctx, vma);
+ if (!rdma_entry) {
+ ibdev_dbg(&dev->ibdev, "not found %#lx\n",
+ vma->vm_pgoff << PAGE_SHIFT);
+ return -EINVAL;
+ }
+
+ ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
+ rdma_entry);
+
+ ibdev_dbg(&dev->ibdev, "writecombine? %d\n",
+ ionic_entry->mmap_flags & IONIC_MMAP_WC);
+ if (ionic_entry->mmap_flags & IONIC_MMAP_WC)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ibdev_dbg(&dev->ibdev, "remap st %#lx pf %#lx sz %#lx\n",
+ vma->vm_start, ionic_entry->pfn, ionic_entry->size);
+ rc = rdma_user_mmap_io(&ctx->ibctx, vma, ionic_entry->pfn,
+ ionic_entry->size, vma->vm_page_prot,
+ rdma_entry);
+ if (rc)
+ ibdev_dbg(&dev->ibdev, "remap failed %d\n", rc);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+ return rc;
+}
+
+void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct ionic_mmap_entry *ionic_entry;
+
+ ionic_entry = container_of(rdma_entry, struct ionic_mmap_entry,
+ rdma_entry);
+ kfree(ionic_entry);
+}
+
+int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+
+ return ionic_get_pdid(dev, &pd->pdid);
+}
+
+int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+
+ ionic_put_pdid(dev, pd->pdid);
+
+ return 0;
+}
+
+static int ionic_build_hdr(struct ionic_ibdev *dev,
+ struct ib_ud_header *hdr,
+ const struct rdma_ah_attr *attr,
+ u16 sport, bool want_ecn)
+{
+ const struct ib_global_route *grh;
+ enum rdma_network_type net;
+ u16 vlan;
+ int rc;
+
+ if (attr->ah_flags != IB_AH_GRH)
+ return -EINVAL;
+ if (attr->type != RDMA_AH_ATTR_TYPE_ROCE)
+ return -EINVAL;
+
+ grh = rdma_ah_read_grh(attr);
+
+ rc = rdma_read_gid_l2_fields(grh->sgid_attr, &vlan, &hdr->eth.smac_h[0]);
+ if (rc)
+ return rc;
+
+ net = rdma_gid_attr_network_type(grh->sgid_attr);
+
+ rc = ib_ud_header_init(0, /* no payload */
+ 0, /* no lrh */
+ 1, /* yes eth */
+ vlan != 0xffff,
+ 0, /* no grh */
+ net == RDMA_NETWORK_IPV4 ? 4 : 6,
+ 1, /* yes udp */
+ 0, /* no imm */
+ hdr);
+ if (rc)
+ return rc;
+
+ ether_addr_copy(hdr->eth.dmac_h, attr->roce.dmac);
+
+ if (net == RDMA_NETWORK_IPV4) {
+ hdr->eth.type = cpu_to_be16(ETH_P_IP);
+ hdr->ip4.frag_off = cpu_to_be16(0x4000); /* don't fragment */
+ hdr->ip4.ttl = grh->hop_limit;
+ hdr->ip4.tot_len = cpu_to_be16(0xffff);
+ hdr->ip4.saddr =
+ *(const __be32 *)(grh->sgid_attr->gid.raw + 12);
+ hdr->ip4.daddr = *(const __be32 *)(grh->dgid.raw + 12);
+
+ if (want_ecn)
+ hdr->ip4.tos = ionic_set_ecn(grh->traffic_class);
+ else
+ hdr->ip4.tos = ionic_clear_ecn(grh->traffic_class);
+ } else {
+ hdr->eth.type = cpu_to_be16(ETH_P_IPV6);
+ hdr->grh.flow_label = cpu_to_be32(grh->flow_label);
+ hdr->grh.hop_limit = grh->hop_limit;
+ hdr->grh.source_gid = grh->sgid_attr->gid;
+ hdr->grh.destination_gid = grh->dgid;
+
+ if (want_ecn)
+ hdr->grh.traffic_class =
+ ionic_set_ecn(grh->traffic_class);
+ else
+ hdr->grh.traffic_class =
+ ionic_clear_ecn(grh->traffic_class);
+ }
+
+ if (vlan != 0xffff) {
+ vlan |= rdma_ah_get_sl(attr) << VLAN_PRIO_SHIFT;
+ hdr->vlan.tag = cpu_to_be16(vlan);
+ hdr->vlan.type = hdr->eth.type;
+ hdr->eth.type = cpu_to_be16(ETH_P_8021Q);
+ }
+
+ hdr->udp.sport = cpu_to_be16(sport);
+ hdr->udp.dport = cpu_to_be16(ROCE_V2_UDP_DPORT);
+
+ return 0;
+}
+
+static void ionic_set_ah_attr(struct ionic_ibdev *dev,
+ struct rdma_ah_attr *ah_attr,
+ struct ib_ud_header *hdr,
+ int sgid_index)
+{
+ u32 flow_label;
+ u16 vlan = 0;
+ u8 tos, ttl;
+
+ if (hdr->vlan_present)
+ vlan = be16_to_cpu(hdr->vlan.tag);
+
+ if (hdr->ipv4_present) {
+ flow_label = 0;
+ ttl = hdr->ip4.ttl;
+ tos = hdr->ip4.tos;
+ *(__be16 *)(hdr->grh.destination_gid.raw + 10) = cpu_to_be16(0xffff);
+ *(__be32 *)(hdr->grh.destination_gid.raw + 12) = hdr->ip4.daddr;
+ } else {
+ flow_label = be32_to_cpu(hdr->grh.flow_label);
+ ttl = hdr->grh.hop_limit;
+ tos = hdr->grh.traffic_class;
+ }
+
+ memset(ah_attr, 0, sizeof(*ah_attr));
+ ah_attr->type = RDMA_AH_ATTR_TYPE_ROCE;
+ if (hdr->eth_present)
+ ether_addr_copy(ah_attr->roce.dmac, hdr->eth.dmac_h);
+ rdma_ah_set_sl(ah_attr, vlan >> VLAN_PRIO_SHIFT);
+ rdma_ah_set_port_num(ah_attr, 1);
+ rdma_ah_set_grh(ah_attr, NULL, flow_label, sgid_index, ttl, tos);
+ rdma_ah_set_dgid_raw(ah_attr, &hdr->grh.destination_gid);
+}
+
+static int ionic_create_ah_cmd(struct ionic_ibdev *dev,
+ struct ionic_ah *ah,
+ struct ionic_pd *pd,
+ struct rdma_ah_attr *attr,
+ u32 flags)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_AH,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_AH_IN_V1_LEN),
+ .cmd.create_ah = {
+ .pd_id = cpu_to_le32(pd->pdid),
+ .dbid_flags = cpu_to_le16(dev->lif_cfg.dbid),
+ .id_ver = cpu_to_le32(ah->ahid),
+ }
+ }
+ };
+ enum ionic_admin_flags admin_flags = 0;
+ dma_addr_t hdr_dma = 0;
+ void *hdr_buf;
+ gfp_t gfp = GFP_ATOMIC;
+ int rc, hdr_len = 0;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_AH)
+ return -EBADRQC;
+
+ if (flags & RDMA_CREATE_AH_SLEEPABLE)
+ gfp = GFP_KERNEL;
+ else
+ admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
+
+ rc = ionic_build_hdr(dev, &ah->hdr, attr, IONIC_ROCE_UDP_SPORT, false);
+ if (rc)
+ return rc;
+
+ if (ah->hdr.eth.type == cpu_to_be16(ETH_P_8021Q)) {
+ if (ah->hdr.vlan.type == cpu_to_be16(ETH_P_IP))
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP;
+ else
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP;
+ } else {
+ if (ah->hdr.eth.type == cpu_to_be16(ETH_P_IP))
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
+ else
+ wr.wqe.cmd.create_ah.csum_profile =
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
+ }
+
+ ah->sgid_index = rdma_ah_read_grh(attr)->sgid_index;
+
+ hdr_buf = kmalloc(PAGE_SIZE, gfp);
+ if (!hdr_buf)
+ return -ENOMEM;
+
+ hdr_len = ib_ud_header_pack(&ah->hdr, hdr_buf);
+ hdr_len -= IB_BTH_BYTES;
+ hdr_len -= IB_DETH_BYTES;
+ ibdev_dbg(&dev->ibdev, "roce packet header template\n");
+ print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr_buf, hdr_len, true);
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
+ DMA_TO_DEVICE);
+
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ wr.wqe.cmd.create_ah.dma_addr = cpu_to_le64(hdr_dma);
+ wr.wqe.cmd.create_ah.length = cpu_to_le32(hdr_len);
+
+ ionic_admin_post(dev, &wr);
+ rc = ionic_admin_wait(dev, &wr, admin_flags);
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
+ DMA_TO_DEVICE);
+err_dma:
+ kfree(hdr_buf);
+
+ return rc;
+}
+
+static int ionic_destroy_ah_cmd(struct ionic_ibdev *dev, u32 ahid, u32 flags)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_AH,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_AH_IN_V1_LEN),
+ .cmd.destroy_ah = {
+ .ah_id = cpu_to_le32(ahid),
+ },
+ }
+ };
+ enum ionic_admin_flags admin_flags = IONIC_ADMIN_F_TEARDOWN;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_AH)
+ return -EBADRQC;
+
+ if (!(flags & RDMA_CREATE_AH_SLEEPABLE))
+ admin_flags |= IONIC_ADMIN_F_BUSYWAIT;
+
+ ionic_admin_post(dev, &wr);
+ ionic_admin_wait(dev, &wr, admin_flags);
+
+ /* No host-memory resource is associated with ah, so it is ok
+ * to "succeed" and complete this destroy ah on the host.
+ */
+ return 0;
+}
+
+int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct rdma_ah_attr *attr = init_attr->ah_attr;
+ struct ionic_pd *pd = to_ionic_pd(ibah->pd);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+ struct ionic_ah_resp resp = {};
+ u32 flags = init_attr->flags;
+ int rc;
+
+ rc = ionic_get_ahid(dev, &ah->ahid);
+ if (rc)
+ return rc;
+
+ rc = ionic_create_ah_cmd(dev, ah, pd, attr, flags);
+ if (rc)
+ goto err_cmd;
+
+ if (udata) {
+ resp.ahid = ah->ahid;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ return 0;
+
+err_resp:
+ ionic_destroy_ah_cmd(dev, ah->ahid, flags);
+err_cmd:
+ ionic_put_ahid(dev, ah->ahid);
+ return rc;
+}
+
+int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+
+ ionic_set_ah_attr(dev, ah_attr, &ah->hdr, ah->sgid_index);
+
+ return 0;
+}
+
+int ionic_destroy_ah(struct ib_ah *ibah, u32 flags)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibah->device);
+ struct ionic_ah *ah = to_ionic_ah(ibah);
+ int rc;
+
+ rc = ionic_destroy_ah_cmd(dev, ah->ahid, flags);
+ if (rc)
+ return rc;
+
+ ionic_put_ahid(dev, ah->ahid);
+
+ return 0;
+}
+
+static int ionic_create_mr_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_mr *mr,
+ u64 addr,
+ u64 length)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_MR,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_MR_IN_V1_LEN),
+ .cmd.create_mr = {
+ .va = cpu_to_le64(addr),
+ .length = cpu_to_le64(length),
+ .pd_id = cpu_to_le32(pd->pdid),
+ .page_size_log2 = mr->buf.page_size_log2,
+ .tbl_index = cpu_to_le32(~0),
+ .map_count = cpu_to_le32(mr->buf.tbl_pages),
+ .dma_addr = ionic_pgtbl_dma(&mr->buf, addr),
+ .dbid_flags = cpu_to_le16(mr->flags),
+ .id_ver = cpu_to_le32(mr->mrid),
+ }
+ }
+ };
+ int rc;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_MR)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+ rc = ionic_admin_wait(dev, &wr, 0);
+ if (!rc)
+ mr->created = true;
+
+ return rc;
+}
+
+static int ionic_destroy_mr_cmd(struct ionic_ibdev *dev, u32 mrid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_MR,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_MR_IN_V1_LEN),
+ .cmd.destroy_mr = {
+ .mr_id = cpu_to_le32(mrid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_MR)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access)
+{
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->ibmr.lkey = IONIC_DMA_LKEY;
+ mr->ibmr.rkey = IONIC_DMA_RKEY;
+
+ if (pd)
+ pd->flags |= IONIC_QPF_PRIVILEGED;
+
+ return &mr->ibmr;
+}
+
+struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 addr, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+ unsigned long pg_sz;
+ int rc;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+ mr->ibmr.iova = addr;
+ mr->ibmr.length = length;
+
+ mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
+
+ mr->umem = ib_umem_get(&dev->ibdev, start, length, access);
+ if (IS_ERR(mr->umem)) {
+ rc = PTR_ERR(mr->umem);
+ goto err_umem;
+ }
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->lif_cfg.page_size_supported,
+ addr);
+ if (!pg_sz) {
+ rc = -EINVAL;
+ goto err_pgtbl;
+ }
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
+ if (rc)
+ goto err_pgtbl;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ib_umem_release(mr->umem);
+err_umem:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
+ u64 length, u64 addr, int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct ionic_mr *mr;
+ u64 pg_sz;
+ int rc;
+
+ if (dmah)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+ mr->ibmr.iova = addr;
+ mr->ibmr.length = length;
+
+ mr->flags = IONIC_MRF_USER_MR | to_ionic_mr_flags(access);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(&dev->ibdev, offset, length,
+ fd, access);
+ if (IS_ERR(umem_dmabuf)) {
+ rc = PTR_ERR(umem_dmabuf);
+ goto err_umem;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+
+ pg_sz = ib_umem_find_best_pgsz(mr->umem,
+ dev->lif_cfg.page_size_supported,
+ addr);
+ if (!pg_sz) {
+ rc = -EINVAL;
+ goto err_pgtbl;
+ }
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, 1, pg_sz);
+ if (rc)
+ goto err_pgtbl;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, addr, length);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ib_umem_release(mr->umem);
+err_umem:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+ int rc;
+
+ if (!mr->ibmr.lkey)
+ goto out;
+
+ if (mr->created) {
+ rc = ionic_destroy_mr_cmd(dev, mr->mrid);
+ if (rc)
+ return rc;
+ }
+
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+
+ if (mr->umem)
+ ib_umem_release(mr->umem);
+
+ ionic_put_mrid(dev, mr->mrid);
+
+out:
+ kfree(mr);
+
+ return 0;
+}
+
+struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
+ u32 max_sg)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibpd->device);
+ struct ionic_pd *pd = to_ionic_pd(ibpd);
+ struct ionic_mr *mr;
+ int rc;
+
+ if (type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ goto err_mrid;
+
+ mr->ibmr.lkey = mr->mrid;
+ mr->ibmr.rkey = mr->mrid;
+
+ mr->flags = IONIC_MRF_PHYS_MR;
+
+ rc = ionic_pgtbl_init(dev, &mr->buf, mr->umem, 0, max_sg, PAGE_SIZE);
+ if (rc)
+ goto err_pgtbl;
+
+ mr->buf.tbl_pages = 0;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
+ if (rc)
+ goto err_cmd;
+
+ return &mr->ibmr;
+
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &mr->buf);
+err_pgtbl:
+ ionic_put_mrid(dev, mr->mrid);
+err_mrid:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+static int ionic_map_mr_page(struct ib_mr *ibmr, u64 dma)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+
+ ibdev_dbg(&dev->ibdev, "dma %p\n", (void *)dma);
+ return ionic_pgtbl_page(&mr->buf, dma);
+}
+
+int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmr->device);
+ struct ionic_mr *mr = to_ionic_mr(ibmr);
+ int rc;
+
+ /* mr must be allocated using ib_alloc_mr() */
+ if (unlikely(!mr->buf.tbl_limit))
+ return -EINVAL;
+
+ mr->buf.tbl_pages = 0;
+
+ if (mr->buf.tbl_buf)
+ dma_sync_single_for_cpu(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
+ mr->buf.tbl_size, DMA_TO_DEVICE);
+
+ ibdev_dbg(&dev->ibdev, "sg %p nent %d\n", sg, sg_nents);
+ rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ionic_map_mr_page);
+
+ mr->buf.page_size_log2 = order_base_2(ibmr->page_size);
+
+ if (mr->buf.tbl_buf)
+ dma_sync_single_for_device(dev->lif_cfg.hwdev, mr->buf.tbl_dma,
+ mr->buf.tbl_size, DMA_TO_DEVICE);
+
+ return rc;
+}
+
+int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
+ struct ionic_pd *pd = to_ionic_pd(ibmw->pd);
+ struct ionic_mr *mr = to_ionic_mw(ibmw);
+ int rc;
+
+ rc = ionic_get_mrid(dev, &mr->mrid);
+ if (rc)
+ return rc;
+
+ mr->ibmw.rkey = mr->mrid;
+
+ if (mr->ibmw.type == IB_MW_TYPE_1)
+ mr->flags = IONIC_MRF_MW_1;
+ else
+ mr->flags = IONIC_MRF_MW_2;
+
+ rc = ionic_create_mr_cmd(dev, pd, mr, 0, 0);
+ if (rc)
+ goto err_cmd;
+
+ return 0;
+
+err_cmd:
+ ionic_put_mrid(dev, mr->mrid);
+ return rc;
+}
+
+int ionic_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibmw->device);
+ struct ionic_mr *mr = to_ionic_mw(ibmw);
+ int rc;
+
+ rc = ionic_destroy_mr_cmd(dev, mr->mrid);
+ if (rc)
+ return rc;
+
+ ionic_put_mrid(dev, mr->mrid);
+
+ return 0;
+}
+
+static int ionic_create_cq_cmd(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_cq *cq,
+ struct ionic_tbl_buf *buf)
+{
+ const u16 dbid = ionic_ctx_dbid(dev, ctx);
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_CQ,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_CQ_IN_V1_LEN),
+ .cmd.create_cq = {
+ .eq_id = cpu_to_le32(cq->eqid),
+ .depth_log2 = cq->q.depth_log2,
+ .stride_log2 = cq->q.stride_log2,
+ .page_size_log2 = buf->page_size_log2,
+ .tbl_index = cpu_to_le32(~0),
+ .map_count = cpu_to_le32(buf->tbl_pages),
+ .dma_addr = ionic_pgtbl_dma(buf, 0),
+ .dbid_flags = cpu_to_le16(dbid),
+ .id_ver = cpu_to_le32(cq->cqid),
+ }
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_CQ)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, 0);
+}
+
+static int ionic_destroy_cq_cmd(struct ionic_ibdev *dev, u32 cqid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_CQ,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN),
+ .cmd.destroy_cq = {
+ .cq_id = cpu_to_le32(cqid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_CQ)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ib_udata *udata = &attrs->driver_udata;
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ struct ionic_tbl_buf buf = {};
+ struct ionic_cq_resp resp;
+ struct ionic_cq_req req;
+ int udma_idx = 0, rc;
+
+ if (udata) {
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+ }
+
+ vcq->udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
+
+ if (udata)
+ vcq->udma_mask &= req.udma_mask;
+
+ if (!vcq->udma_mask) {
+ rc = -EINVAL;
+ goto err_init;
+ }
+
+ for (; udma_idx < dev->lif_cfg.udma_count; ++udma_idx) {
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+
+ rc = ionic_create_cq_common(vcq, &buf, attr, ctx, udata,
+ &req.cq[udma_idx],
+ &resp.cqid[udma_idx],
+ udma_idx);
+ if (rc)
+ goto err_init;
+
+ rc = ionic_create_cq_cmd(dev, ctx, &vcq->cq[udma_idx], &buf);
+ if (rc)
+ goto err_cmd;
+
+ ionic_pgtbl_unbuf(dev, &buf);
+ }
+
+ vcq->ibcq.cqe = attr->cqe;
+
+ if (udata) {
+ resp.udma_mask = vcq->udma_mask;
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ return 0;
+
+err_resp:
+ while (udma_idx) {
+ --udma_idx;
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+ ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &buf);
+ ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
+err_init:
+ ;
+ }
+
+ return rc;
+}
+
+int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int udma_idx, rc_tmp, rc = 0;
+
+ for (udma_idx = dev->lif_cfg.udma_count; udma_idx; ) {
+ --udma_idx;
+
+ if (!(vcq->udma_mask & BIT(udma_idx)))
+ continue;
+
+ rc_tmp = ionic_destroy_cq_cmd(dev, vcq->cq[udma_idx].cqid);
+ if (rc_tmp) {
+ if (!rc)
+ rc = rc_tmp;
+
+ continue;
+ }
+
+ ionic_destroy_cq_common(dev, &vcq->cq[udma_idx]);
+ }
+
+ return rc;
+}
+
+static bool pd_remote_privileged(struct ib_pd *pd)
+{
+ return pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+}
+
+static int ionic_create_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_cq *send_cq,
+ struct ionic_cq *recv_cq,
+ struct ionic_qp *qp,
+ struct ionic_tbl_buf *sq_buf,
+ struct ionic_tbl_buf *rq_buf,
+ struct ib_qp_init_attr *attr)
+{
+ const u16 dbid = ionic_obj_dbid(dev, pd->ibpd.uobject);
+ const u32 flags = to_ionic_qp_flags(0, 0,
+ qp->sq_cmb & IONIC_CMB_ENABLE,
+ qp->rq_cmb & IONIC_CMB_ENABLE,
+ qp->sq_spec, qp->rq_spec,
+ pd->flags & IONIC_QPF_PRIVILEGED,
+ pd_remote_privileged(&pd->ibpd));
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_CREATE_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_CREATE_QP_IN_V1_LEN),
+ .cmd.create_qp = {
+ .pd_id = cpu_to_le32(pd->pdid),
+ .priv_flags = cpu_to_be32(flags),
+ .type_state = to_ionic_qp_type(attr->qp_type),
+ .dbid_flags = cpu_to_le16(dbid),
+ .id_ver = cpu_to_le32(qp->qpid),
+ }
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_CREATE_QP)
+ return -EBADRQC;
+
+ if (qp->has_sq) {
+ wr.wqe.cmd.create_qp.sq_cq_id = cpu_to_le32(send_cq->cqid);
+ wr.wqe.cmd.create_qp.sq_depth_log2 = qp->sq.depth_log2;
+ wr.wqe.cmd.create_qp.sq_stride_log2 = qp->sq.stride_log2;
+ wr.wqe.cmd.create_qp.sq_page_size_log2 = sq_buf->page_size_log2;
+ wr.wqe.cmd.create_qp.sq_tbl_index_xrcd_id = cpu_to_le32(~0);
+ wr.wqe.cmd.create_qp.sq_map_count =
+ cpu_to_le32(sq_buf->tbl_pages);
+ wr.wqe.cmd.create_qp.sq_dma_addr = ionic_pgtbl_dma(sq_buf, 0);
+ }
+
+ if (qp->has_rq) {
+ wr.wqe.cmd.create_qp.rq_cq_id = cpu_to_le32(recv_cq->cqid);
+ wr.wqe.cmd.create_qp.rq_depth_log2 = qp->rq.depth_log2;
+ wr.wqe.cmd.create_qp.rq_stride_log2 = qp->rq.stride_log2;
+ wr.wqe.cmd.create_qp.rq_page_size_log2 = rq_buf->page_size_log2;
+ wr.wqe.cmd.create_qp.rq_tbl_index_srq_id = cpu_to_le32(~0);
+ wr.wqe.cmd.create_qp.rq_map_count =
+ cpu_to_le32(rq_buf->tbl_pages);
+ wr.wqe.cmd.create_qp.rq_dma_addr = ionic_pgtbl_dma(rq_buf, 0);
+ }
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, 0);
+}
+
+static int ionic_modify_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_pd *pd,
+ struct ionic_qp *qp,
+ struct ib_qp_attr *attr,
+ int mask)
+{
+ const u32 flags = to_ionic_qp_flags(attr->qp_access_flags,
+ attr->en_sqd_async_notify,
+ qp->sq_cmb & IONIC_CMB_ENABLE,
+ qp->rq_cmb & IONIC_CMB_ENABLE,
+ qp->sq_spec, qp->rq_spec,
+ pd->flags & IONIC_QPF_PRIVILEGED,
+ pd_remote_privileged(qp->ibqp.pd));
+ const u8 state = to_ionic_qp_modify_state(attr->qp_state,
+ attr->cur_qp_state);
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_MODIFY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_MODIFY_QP_IN_V1_LEN),
+ .cmd.mod_qp = {
+ .attr_mask = cpu_to_be32(mask),
+ .access_flags = cpu_to_be16(flags),
+ .rq_psn = cpu_to_le32(attr->rq_psn),
+ .sq_psn = cpu_to_le32(attr->sq_psn),
+ .rate_limit_kbps =
+ cpu_to_le32(attr->rate_limit),
+ .pmtu = (attr->path_mtu + 7),
+ .retry = (attr->retry_cnt |
+ (attr->rnr_retry << 4)),
+ .rnr_timer = attr->min_rnr_timer,
+ .retry_timeout = attr->timeout,
+ .type_state = state,
+ .id_ver = cpu_to_le32(qp->qpid),
+ }
+ }
+ };
+ const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
+ void *hdr_buf = NULL;
+ dma_addr_t hdr_dma = 0;
+ int rc, hdr_len = 0;
+ u16 sport;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_MODIFY_QP)
+ return -EBADRQC;
+
+ if ((mask & IB_QP_MAX_DEST_RD_ATOMIC) && attr->max_dest_rd_atomic) {
+ /* Note, round up/down was already done for allocating
+ * resources on the device. The allocation order is in cache
+ * line size. We can't use the order of the resource
+ * allocation to determine the order wqes here, because for
+ * queue length <= one cache line it is not distinct.
+ *
+ * Therefore, order wqes is computed again here.
+ *
+ * Account for hole and round up to the next order.
+ */
+ wr.wqe.cmd.mod_qp.rsq_depth =
+ order_base_2(attr->max_dest_rd_atomic + 1);
+ wr.wqe.cmd.mod_qp.rsq_index = cpu_to_le32(~0);
+ }
+
+ if ((mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ /* Account for hole and round down to the next order */
+ wr.wqe.cmd.mod_qp.rrq_depth =
+ order_base_2(attr->max_rd_atomic + 2) - 1;
+ wr.wqe.cmd.mod_qp.rrq_index = cpu_to_le32(~0);
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
+ wr.wqe.cmd.mod_qp.qkey_dest_qpn =
+ cpu_to_le32(attr->dest_qp_num);
+ else
+ wr.wqe.cmd.mod_qp.qkey_dest_qpn = cpu_to_le32(attr->qkey);
+
+ if (mask & IB_QP_AV) {
+ if (!qp->hdr)
+ return -ENOMEM;
+
+ sport = rdma_get_udp_sport(grh->flow_label,
+ qp->qpid,
+ attr->dest_qp_num);
+
+ rc = ionic_build_hdr(dev, qp->hdr, &attr->ah_attr, sport, true);
+ if (rc)
+ return rc;
+
+ qp->sgid_index = grh->sgid_index;
+
+ hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hdr_buf)
+ return -ENOMEM;
+
+ hdr_len = ib_ud_header_pack(qp->hdr, hdr_buf);
+ hdr_len -= IB_BTH_BYTES;
+ hdr_len -= IB_DETH_BYTES;
+ ibdev_dbg(&dev->ibdev, "roce packet header template\n");
+ print_hex_dump_debug("hdr ", DUMP_PREFIX_OFFSET, 16, 1,
+ hdr_buf, hdr_len, true);
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf, hdr_len,
+ DMA_TO_DEVICE);
+
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ if (qp->hdr->ipv4_present) {
+ wr.wqe.cmd.mod_qp.tfp_csum_profile =
+ qp->hdr->vlan_present ?
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP :
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP;
+ } else {
+ wr.wqe.cmd.mod_qp.tfp_csum_profile =
+ qp->hdr->vlan_present ?
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP :
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP;
+ }
+
+ wr.wqe.cmd.mod_qp.ah_id_len =
+ cpu_to_le32(qp->ahid | (hdr_len << 24));
+ wr.wqe.cmd.mod_qp.dma_addr = cpu_to_le64(hdr_dma);
+
+ wr.wqe.cmd.mod_qp.en_pcp = attr->ah_attr.sl;
+ wr.wqe.cmd.mod_qp.ip_dscp = grh->traffic_class >> 2;
+ }
+
+ ionic_admin_post(dev, &wr);
+
+ rc = ionic_admin_wait(dev, &wr, 0);
+
+ if (mask & IB_QP_AV)
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, hdr_len,
+ DMA_TO_DEVICE);
+err_dma:
+ if (mask & IB_QP_AV)
+ kfree(hdr_buf);
+
+ return rc;
+}
+
+static int ionic_query_qp_cmd(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_qp_attr *attr,
+ int mask)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_QUERY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_QUERY_QP_IN_V1_LEN),
+ .cmd.query_qp = {
+ .id_ver = cpu_to_le32(qp->qpid),
+ },
+ }
+ };
+ struct ionic_v1_admin_query_qp_sq *query_sqbuf;
+ struct ionic_v1_admin_query_qp_rq *query_rqbuf;
+ dma_addr_t query_sqdma;
+ dma_addr_t query_rqdma;
+ dma_addr_t hdr_dma = 0;
+ void *hdr_buf = NULL;
+ int flags, rc;
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_QUERY_QP)
+ return -EBADRQC;
+
+ if (qp->has_sq) {
+ bool expdb = !!(qp->sq_cmb & IONIC_CMB_EXPDB);
+
+ attr->cap.max_send_sge =
+ ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ expdb);
+ attr->cap.max_inline_data =
+ ionic_v1_send_wqe_max_data(qp->sq.stride_log2, expdb);
+ }
+
+ if (qp->has_rq) {
+ attr->cap.max_recv_sge =
+ ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
+ qp->rq_spec,
+ qp->rq_cmb & IONIC_CMB_EXPDB);
+ }
+
+ query_sqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!query_sqbuf)
+ return -ENOMEM;
+
+ query_rqbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!query_rqbuf) {
+ rc = -ENOMEM;
+ goto err_rqbuf;
+ }
+
+ query_sqdma = dma_map_single(dev->lif_cfg.hwdev, query_sqbuf, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, query_sqdma);
+ if (rc)
+ goto err_sqdma;
+
+ query_rqdma = dma_map_single(dev->lif_cfg.hwdev, query_rqbuf, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, query_rqdma);
+ if (rc)
+ goto err_rqdma;
+
+ if (mask & IB_QP_AV) {
+ hdr_buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!hdr_buf) {
+ rc = -ENOMEM;
+ goto err_hdrbuf;
+ }
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, hdr_buf,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_hdrdma;
+ }
+
+ wr.wqe.cmd.query_qp.sq_dma_addr = cpu_to_le64(query_sqdma);
+ wr.wqe.cmd.query_qp.rq_dma_addr = cpu_to_le64(query_rqdma);
+ wr.wqe.cmd.query_qp.hdr_dma_addr = cpu_to_le64(hdr_dma);
+ wr.wqe.cmd.query_qp.ah_id = cpu_to_le32(qp->ahid);
+
+ ionic_admin_post(dev, &wr);
+
+ rc = ionic_admin_wait(dev, &wr, 0);
+
+ if (rc)
+ goto err_hdrdma;
+
+ flags = be16_to_cpu(query_sqbuf->access_perms_flags |
+ query_rqbuf->access_perms_flags);
+
+ print_hex_dump_debug("sqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
+ query_sqbuf, sizeof(*query_sqbuf), true);
+ print_hex_dump_debug("rqbuf ", DUMP_PREFIX_OFFSET, 16, 1,
+ query_rqbuf, sizeof(*query_rqbuf), true);
+ ibdev_dbg(&dev->ibdev, "query qp %u state_pmtu %#x flags %#x",
+ qp->qpid, query_rqbuf->state_pmtu, flags);
+
+ attr->qp_state = from_ionic_qp_state(query_rqbuf->state_pmtu >> 4);
+ attr->cur_qp_state = attr->qp_state;
+ attr->path_mtu = (query_rqbuf->state_pmtu & 0xf) - 7;
+ attr->path_mig_state = IB_MIG_MIGRATED;
+ attr->qkey = be32_to_cpu(query_sqbuf->qkey_dest_qpn);
+ attr->rq_psn = be32_to_cpu(query_sqbuf->rq_psn);
+ attr->sq_psn = be32_to_cpu(query_rqbuf->sq_psn);
+ attr->dest_qp_num = attr->qkey;
+ attr->qp_access_flags = from_ionic_qp_flags(flags);
+ attr->pkey_index = 0;
+ attr->alt_pkey_index = 0;
+ attr->en_sqd_async_notify = !!(flags & IONIC_QPF_SQD_NOTIFY);
+ attr->sq_draining = !!(flags & IONIC_QPF_SQ_DRAINING);
+ attr->max_rd_atomic = BIT(query_rqbuf->rrq_depth) - 1;
+ attr->max_dest_rd_atomic = BIT(query_rqbuf->rsq_depth) - 1;
+ attr->min_rnr_timer = query_sqbuf->rnr_timer;
+ attr->port_num = 0;
+ attr->timeout = query_sqbuf->retry_timeout;
+ attr->retry_cnt = query_rqbuf->retry_rnrtry & 0xf;
+ attr->rnr_retry = query_rqbuf->retry_rnrtry >> 4;
+ attr->alt_port_num = 0;
+ attr->alt_timeout = 0;
+ attr->rate_limit = be32_to_cpu(query_sqbuf->rate_limit_kbps);
+
+ if (mask & IB_QP_AV)
+ ionic_set_ah_attr(dev, &attr->ah_attr,
+ qp->hdr, qp->sgid_index);
+
+err_hdrdma:
+ if (mask & IB_QP_AV) {
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ kfree(hdr_buf);
+ }
+err_hdrbuf:
+ dma_unmap_single(dev->lif_cfg.hwdev, query_rqdma, sizeof(*query_rqbuf),
+ DMA_FROM_DEVICE);
+err_rqdma:
+ dma_unmap_single(dev->lif_cfg.hwdev, query_sqdma, sizeof(*query_sqbuf),
+ DMA_FROM_DEVICE);
+err_sqdma:
+ kfree(query_rqbuf);
+err_rqbuf:
+ kfree(query_sqbuf);
+
+ return rc;
+}
+
+static int ionic_destroy_qp_cmd(struct ionic_ibdev *dev, u32 qpid)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = IONIC_V1_ADMIN_DESTROY_QP,
+ .len = cpu_to_le16(IONIC_ADMIN_DESTROY_QP_IN_V1_LEN),
+ .cmd.destroy_qp = {
+ .qp_id = cpu_to_le32(qpid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= IONIC_V1_ADMIN_DESTROY_QP)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_TEARDOWN);
+}
+
+static bool ionic_expdb_wqe_size_supported(struct ionic_ibdev *dev,
+ uint32_t wqe_size)
+{
+ switch (wqe_size) {
+ case 64: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_64;
+ case 128: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_128;
+ case 256: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_256;
+ case 512: return dev->lif_cfg.expdb_mask & IONIC_EXPDB_512;
+ }
+
+ return false;
+}
+
+static void ionic_qp_sq_init_cmb(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_udata *udata,
+ int max_data)
+{
+ u8 expdb_stride_log2 = 0;
+ bool expdb;
+ int rc;
+
+ if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
+ goto not_in_cmb;
+
+ if (qp->sq_cmb & ~IONIC_CMB_SUPPORTED) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->sq_cmb &= IONIC_CMB_SUPPORTED;
+ }
+
+ if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.sq_expdb) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ qp->sq_cmb_order = order_base_2(qp->sq.size / PAGE_SIZE);
+
+ if (qp->sq_cmb_order >= IONIC_SQCMB_ORDER)
+ goto not_in_cmb;
+
+ if (qp->sq_cmb & IONIC_CMB_EXPDB)
+ expdb_stride_log2 = qp->sq.stride_log2;
+
+ rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->sq_cmb_pgid,
+ &qp->sq_cmb_addr, qp->sq_cmb_order,
+ expdb_stride_log2, &expdb);
+ if (rc)
+ goto not_in_cmb;
+
+ if ((qp->sq_cmb & IONIC_CMB_EXPDB) && !expdb) {
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ goto err_map;
+
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ return;
+
+err_map:
+ ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
+not_in_cmb:
+ if (qp->sq_cmb & IONIC_CMB_REQUIRE)
+ ibdev_dbg(&dev->ibdev, "could not place sq in cmb as required\n");
+
+ qp->sq_cmb = 0;
+ qp->sq_cmb_order = IONIC_RES_INVALID;
+ qp->sq_cmb_pgid = 0;
+ qp->sq_cmb_addr = 0;
+}
+
+static void ionic_qp_sq_destroy_cmb(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!(qp->sq_cmb & IONIC_CMB_ENABLE))
+ return;
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
+
+ ionic_put_cmb(dev->lif_cfg.lif, qp->sq_cmb_pgid, qp->sq_cmb_order);
+}
+
+static int ionic_qp_sq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
+ struct ionic_qp *qp, struct ionic_qdesc *sq,
+ struct ionic_tbl_buf *buf, int max_wr, int max_sge,
+ int max_data, int sq_spec, struct ib_udata *udata)
+{
+ u32 wqe_size;
+ int rc = 0;
+
+ qp->sq_msn_prod = 0;
+ qp->sq_msn_cons = 0;
+
+ if (!qp->has_sq) {
+ if (buf) {
+ buf->tbl_buf = NULL;
+ buf->tbl_limit = 0;
+ buf->tbl_pages = 0;
+ }
+ if (udata)
+ rc = ionic_validate_qdesc_zero(sq);
+
+ return rc;
+ }
+
+ rc = -EINVAL;
+
+ if (max_wr < 0 || max_wr > 0xffff)
+ return rc;
+
+ if (max_sge < 1)
+ return rc;
+
+ if (max_sge > min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0,
+ qp->sq_cmb &
+ IONIC_CMB_EXPDB),
+ IONIC_SPEC_HIGH))
+ return rc;
+
+ if (max_data < 0)
+ return rc;
+
+ if (max_data > ionic_v1_send_wqe_max_data(dev->lif_cfg.max_stride,
+ qp->sq_cmb & IONIC_CMB_EXPDB))
+ return rc;
+
+ if (udata) {
+ rc = ionic_validate_qdesc(sq);
+ if (rc)
+ return rc;
+
+ qp->sq_spec = sq_spec;
+
+ qp->sq.ptr = NULL;
+ qp->sq.size = sq->size;
+ qp->sq.mask = sq->mask;
+ qp->sq.depth_log2 = sq->depth_log2;
+ qp->sq.stride_log2 = sq->stride_log2;
+
+ qp->sq_meta = NULL;
+ qp->sq_msn_idx = NULL;
+
+ qp->sq_umem = ib_umem_get(&dev->ibdev, sq->addr, sq->size, 0);
+ if (IS_ERR(qp->sq_umem))
+ return PTR_ERR(qp->sq_umem);
+ } else {
+ qp->sq_umem = NULL;
+
+ qp->sq_spec = ionic_v1_use_spec_sge(max_sge, sq_spec);
+ if (sq_spec && !qp->sq_spec)
+ ibdev_dbg(&dev->ibdev,
+ "init sq: max_sge %u disables spec\n",
+ max_sge);
+
+ if (qp->sq_cmb & IONIC_CMB_EXPDB) {
+ wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
+ qp->sq_spec,
+ true);
+
+ if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
+ qp->sq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ if (!(qp->sq_cmb & IONIC_CMB_EXPDB))
+ wqe_size = ionic_v1_send_wqe_min_size(max_sge, max_data,
+ qp->sq_spec,
+ false);
+
+ rc = ionic_queue_init(&qp->sq, dev->lif_cfg.hwdev,
+ max_wr, wqe_size);
+ if (rc)
+ return rc;
+
+ ionic_queue_dbell_init(&qp->sq, qp->qpid);
+
+ qp->sq_meta = kmalloc_array((u32)qp->sq.mask + 1,
+ sizeof(*qp->sq_meta),
+ GFP_KERNEL);
+ if (!qp->sq_meta) {
+ rc = -ENOMEM;
+ goto err_sq_meta;
+ }
+
+ qp->sq_msn_idx = kmalloc_array((u32)qp->sq.mask + 1,
+ sizeof(*qp->sq_msn_idx),
+ GFP_KERNEL);
+ if (!qp->sq_msn_idx) {
+ rc = -ENOMEM;
+ goto err_sq_msn;
+ }
+ }
+
+ ionic_qp_sq_init_cmb(dev, qp, udata, max_data);
+
+ if (qp->sq_cmb & IONIC_CMB_ENABLE)
+ rc = ionic_pgtbl_init(dev, buf, NULL,
+ (u64)qp->sq_cmb_pgid << PAGE_SHIFT,
+ 1, PAGE_SIZE);
+ else
+ rc = ionic_pgtbl_init(dev, buf,
+ qp->sq_umem, qp->sq.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_sq_tbl;
+
+ return 0;
+
+err_sq_tbl:
+ ionic_qp_sq_destroy_cmb(dev, ctx, qp);
+ kfree(qp->sq_msn_idx);
+err_sq_msn:
+ kfree(qp->sq_meta);
+err_sq_meta:
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ else
+ ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
+ return rc;
+}
+
+static void ionic_qp_sq_destroy(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!qp->has_sq)
+ return;
+
+ ionic_qp_sq_destroy_cmb(dev, ctx, qp);
+
+ kfree(qp->sq_msn_idx);
+ kfree(qp->sq_meta);
+
+ if (qp->sq_umem)
+ ib_umem_release(qp->sq_umem);
+ else
+ ionic_queue_destroy(&qp->sq, dev->lif_cfg.hwdev);
+}
+
+static void ionic_qp_rq_init_cmb(struct ionic_ibdev *dev,
+ struct ionic_qp *qp,
+ struct ib_udata *udata)
+{
+ u8 expdb_stride_log2 = 0;
+ bool expdb;
+ int rc;
+
+ if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
+ goto not_in_cmb;
+
+ if (qp->rq_cmb & ~IONIC_CMB_SUPPORTED) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->rq_cmb &= IONIC_CMB_SUPPORTED;
+ }
+
+ if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !dev->lif_cfg.rq_expdb) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto not_in_cmb;
+
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ qp->rq_cmb_order = order_base_2(qp->rq.size / PAGE_SIZE);
+
+ if (qp->rq_cmb_order >= IONIC_RQCMB_ORDER)
+ goto not_in_cmb;
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB)
+ expdb_stride_log2 = qp->rq.stride_log2;
+
+ rc = ionic_get_cmb(dev->lif_cfg.lif, &qp->rq_cmb_pgid,
+ &qp->rq_cmb_addr, qp->rq_cmb_order,
+ expdb_stride_log2, &expdb);
+ if (rc)
+ goto not_in_cmb;
+
+ if ((qp->rq_cmb & IONIC_CMB_EXPDB) && !expdb) {
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ goto err_map;
+
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ return;
+
+err_map:
+ ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
+not_in_cmb:
+ if (qp->rq_cmb & IONIC_CMB_REQUIRE)
+ ibdev_dbg(&dev->ibdev, "could not place rq in cmb as required\n");
+
+ qp->rq_cmb = 0;
+ qp->rq_cmb_order = IONIC_RES_INVALID;
+ qp->rq_cmb_pgid = 0;
+ qp->rq_cmb_addr = 0;
+}
+
+static void ionic_qp_rq_destroy_cmb(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!(qp->rq_cmb & IONIC_CMB_ENABLE))
+ return;
+
+ if (ctx)
+ rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
+
+ ionic_put_cmb(dev->lif_cfg.lif, qp->rq_cmb_pgid, qp->rq_cmb_order);
+}
+
+static int ionic_qp_rq_init(struct ionic_ibdev *dev, struct ionic_ctx *ctx,
+ struct ionic_qp *qp, struct ionic_qdesc *rq,
+ struct ionic_tbl_buf *buf, int max_wr, int max_sge,
+ int rq_spec, struct ib_udata *udata)
+{
+ int rc = 0, i;
+ u32 wqe_size;
+
+ if (!qp->has_rq) {
+ if (buf) {
+ buf->tbl_buf = NULL;
+ buf->tbl_limit = 0;
+ buf->tbl_pages = 0;
+ }
+ if (udata)
+ rc = ionic_validate_qdesc_zero(rq);
+
+ return rc;
+ }
+
+ rc = -EINVAL;
+
+ if (max_wr < 0 || max_wr > 0xffff)
+ return rc;
+
+ if (max_sge < 1)
+ return rc;
+
+ if (max_sge > min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH))
+ return rc;
+
+ if (udata) {
+ rc = ionic_validate_qdesc(rq);
+ if (rc)
+ return rc;
+
+ qp->rq_spec = rq_spec;
+
+ qp->rq.ptr = NULL;
+ qp->rq.size = rq->size;
+ qp->rq.mask = rq->mask;
+ qp->rq.depth_log2 = rq->depth_log2;
+ qp->rq.stride_log2 = rq->stride_log2;
+
+ qp->rq_meta = NULL;
+
+ qp->rq_umem = ib_umem_get(&dev->ibdev, rq->addr, rq->size, 0);
+ if (IS_ERR(qp->rq_umem))
+ return PTR_ERR(qp->rq_umem);
+ } else {
+ qp->rq_umem = NULL;
+
+ qp->rq_spec = ionic_v1_use_spec_sge(max_sge, rq_spec);
+ if (rq_spec && !qp->rq_spec)
+ ibdev_dbg(&dev->ibdev,
+ "init rq: max_sge %u disables spec\n",
+ max_sge);
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB) {
+ wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
+ qp->rq_spec,
+ true);
+
+ if (!ionic_expdb_wqe_size_supported(dev, wqe_size))
+ qp->rq_cmb &= ~IONIC_CMB_EXPDB;
+ }
+
+ if (!(qp->rq_cmb & IONIC_CMB_EXPDB))
+ wqe_size = ionic_v1_recv_wqe_min_size(max_sge,
+ qp->rq_spec,
+ false);
+
+ rc = ionic_queue_init(&qp->rq, dev->lif_cfg.hwdev,
+ max_wr, wqe_size);
+ if (rc)
+ return rc;
+
+ ionic_queue_dbell_init(&qp->rq, qp->qpid);
+
+ qp->rq_meta = kmalloc_array((u32)qp->rq.mask + 1,
+ sizeof(*qp->rq_meta),
+ GFP_KERNEL);
+ if (!qp->rq_meta) {
+ rc = -ENOMEM;
+ goto err_rq_meta;
+ }
+
+ for (i = 0; i < qp->rq.mask; ++i)
+ qp->rq_meta[i].next = &qp->rq_meta[i + 1];
+ qp->rq_meta[i].next = IONIC_META_LAST;
+ qp->rq_meta_head = &qp->rq_meta[0];
+ }
+
+ ionic_qp_rq_init_cmb(dev, qp, udata);
+
+ if (qp->rq_cmb & IONIC_CMB_ENABLE)
+ rc = ionic_pgtbl_init(dev, buf, NULL,
+ (u64)qp->rq_cmb_pgid << PAGE_SHIFT,
+ 1, PAGE_SIZE);
+ else
+ rc = ionic_pgtbl_init(dev, buf,
+ qp->rq_umem, qp->rq.dma, 1, PAGE_SIZE);
+ if (rc)
+ goto err_rq_tbl;
+
+ return 0;
+
+err_rq_tbl:
+ ionic_qp_rq_destroy_cmb(dev, ctx, qp);
+ kfree(qp->rq_meta);
+err_rq_meta:
+ if (qp->rq_umem)
+ ib_umem_release(qp->rq_umem);
+ else
+ ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
+ return rc;
+}
+
+static void ionic_qp_rq_destroy(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx,
+ struct ionic_qp *qp)
+{
+ if (!qp->has_rq)
+ return;
+
+ ionic_qp_rq_destroy_cmb(dev, ctx, qp);
+
+ kfree(qp->rq_meta);
+
+ if (qp->rq_umem)
+ ib_umem_release(qp->rq_umem);
+ else
+ ionic_queue_destroy(&qp->rq, dev->lif_cfg.hwdev);
+}
+
+int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_tbl_buf sq_buf = {}, rq_buf = {};
+ struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_qp_resp resp = {};
+ struct ionic_qp_req req = {};
+ struct ionic_cq *cq;
+ u8 udma_mask;
+ void *entry;
+ int rc;
+
+ if (udata) {
+ rc = ib_copy_from_udata(&req, udata, sizeof(req));
+ if (rc)
+ return rc;
+ } else {
+ req.sq_spec = IONIC_SPEC_HIGH;
+ req.rq_spec = IONIC_SPEC_HIGH;
+ }
+
+ if (attr->qp_type == IB_QPT_SMI || attr->qp_type > IB_QPT_UD)
+ return -EOPNOTSUPP;
+
+ qp->state = IB_QPS_RESET;
+
+ INIT_LIST_HEAD(&qp->cq_poll_sq);
+ INIT_LIST_HEAD(&qp->cq_flush_sq);
+ INIT_LIST_HEAD(&qp->cq_flush_rq);
+
+ spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
+
+ qp->has_sq = 1;
+ qp->has_rq = 1;
+
+ if (attr->qp_type == IB_QPT_GSI) {
+ rc = ionic_get_gsi_qpid(dev, &qp->qpid);
+ } else {
+ udma_mask = BIT(dev->lif_cfg.udma_count) - 1;
+
+ if (qp->has_sq)
+ udma_mask &= to_ionic_vcq(attr->send_cq)->udma_mask;
+
+ if (qp->has_rq)
+ udma_mask &= to_ionic_vcq(attr->recv_cq)->udma_mask;
+
+ if (udata && req.udma_mask)
+ udma_mask &= req.udma_mask;
+
+ if (!udma_mask)
+ return -EINVAL;
+
+ rc = ionic_get_qpid(dev, &qp->qpid, &qp->udma_idx, udma_mask);
+ }
+ if (rc)
+ return rc;
+
+ qp->sig_all = attr->sq_sig_type == IB_SIGNAL_ALL_WR;
+ qp->has_ah = attr->qp_type == IB_QPT_RC;
+
+ if (qp->has_ah) {
+ qp->hdr = kzalloc(sizeof(*qp->hdr), GFP_KERNEL);
+ if (!qp->hdr) {
+ rc = -ENOMEM;
+ goto err_ah_alloc;
+ }
+
+ rc = ionic_get_ahid(dev, &qp->ahid);
+ if (rc)
+ goto err_ahid;
+ }
+
+ if (udata) {
+ if (req.rq_cmb & IONIC_CMB_ENABLE)
+ qp->rq_cmb = req.rq_cmb;
+
+ if (req.sq_cmb & IONIC_CMB_ENABLE)
+ qp->sq_cmb = req.sq_cmb;
+ }
+
+ rc = ionic_qp_sq_init(dev, ctx, qp, &req.sq, &sq_buf,
+ attr->cap.max_send_wr, attr->cap.max_send_sge,
+ attr->cap.max_inline_data, req.sq_spec, udata);
+ if (rc)
+ goto err_sq;
+
+ rc = ionic_qp_rq_init(dev, ctx, qp, &req.rq, &rq_buf,
+ attr->cap.max_recv_wr, attr->cap.max_recv_sge,
+ req.rq_spec, udata);
+ if (rc)
+ goto err_rq;
+
+ rc = ionic_create_qp_cmd(dev, pd,
+ to_ionic_vcq_cq(attr->send_cq, qp->udma_idx),
+ to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx),
+ qp, &sq_buf, &rq_buf, attr);
+ if (rc)
+ goto err_cmd;
+
+ if (udata) {
+ resp.qpid = qp->qpid;
+ resp.udma_idx = qp->udma_idx;
+
+ if (qp->sq_cmb & IONIC_CMB_ENABLE) {
+ bool wc;
+
+ if ((qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
+ (IONIC_CMB_WC | IONIC_CMB_UC)) {
+ ibdev_dbg(&dev->ibdev,
+ "Both sq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
+ qp->sq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
+ }
+
+ wc = (qp->sq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ != IONIC_CMB_UC;
+
+ /* let userspace know the mapping */
+ if (wc)
+ qp->sq_cmb |= IONIC_CMB_WC;
+ else
+ qp->sq_cmb |= IONIC_CMB_UC;
+
+ qp->mmap_sq_cmb =
+ ionic_mmap_entry_insert(ctx,
+ qp->sq.size,
+ PHYS_PFN(qp->sq_cmb_addr),
+ wc ? IONIC_MMAP_WC : 0,
+ &resp.sq_cmb_offset);
+ if (!qp->mmap_sq_cmb) {
+ rc = -ENOMEM;
+ goto err_mmap_sq;
+ }
+
+ resp.sq_cmb = qp->sq_cmb;
+ }
+
+ if (qp->rq_cmb & IONIC_CMB_ENABLE) {
+ bool wc;
+
+ if ((qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC)) ==
+ (IONIC_CMB_WC | IONIC_CMB_UC)) {
+ ibdev_dbg(&dev->ibdev,
+ "Both rq_cmb flags IONIC_CMB_WC and IONIC_CMB_UC are set, using default driver mapping\n");
+ qp->rq_cmb &= ~(IONIC_CMB_WC | IONIC_CMB_UC);
+ }
+
+ if (qp->rq_cmb & IONIC_CMB_EXPDB)
+ wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ == IONIC_CMB_WC;
+ else
+ wc = (qp->rq_cmb & (IONIC_CMB_WC | IONIC_CMB_UC))
+ != IONIC_CMB_UC;
+
+ /* let userspace know the mapping */
+ if (wc)
+ qp->rq_cmb |= IONIC_CMB_WC;
+ else
+ qp->rq_cmb |= IONIC_CMB_UC;
+
+ qp->mmap_rq_cmb =
+ ionic_mmap_entry_insert(ctx,
+ qp->rq.size,
+ PHYS_PFN(qp->rq_cmb_addr),
+ wc ? IONIC_MMAP_WC : 0,
+ &resp.rq_cmb_offset);
+ if (!qp->mmap_rq_cmb) {
+ rc = -ENOMEM;
+ goto err_mmap_rq;
+ }
+
+ resp.rq_cmb = qp->rq_cmb;
+ }
+
+ rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ if (rc)
+ goto err_resp;
+ }
+
+ ionic_pgtbl_unbuf(dev, &rq_buf);
+ ionic_pgtbl_unbuf(dev, &sq_buf);
+
+ qp->ibqp.qp_num = qp->qpid;
+
+ init_completion(&qp->qp_rel_comp);
+ kref_init(&qp->qp_kref);
+
+ entry = xa_store_irq(&dev->qp_tbl, qp->qpid, qp, GFP_KERNEL);
+ if (entry) {
+ if (!xa_is_err(entry))
+ rc = -EINVAL;
+ else
+ rc = xa_err(entry);
+
+ goto err_resp;
+ }
+
+ if (qp->has_sq) {
+ cq = to_ionic_vcq_cq(attr->send_cq, qp->udma_idx);
+
+ attr->cap.max_send_wr = qp->sq.mask;
+ attr->cap.max_send_sge =
+ ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ qp->sq_cmb & IONIC_CMB_EXPDB);
+ attr->cap.max_inline_data =
+ ionic_v1_send_wqe_max_data(qp->sq.stride_log2,
+ qp->sq_cmb &
+ IONIC_CMB_EXPDB);
+ qp->sq_cqid = cq->cqid;
+ }
+
+ if (qp->has_rq) {
+ cq = to_ionic_vcq_cq(attr->recv_cq, qp->udma_idx);
+
+ attr->cap.max_recv_wr = qp->rq.mask;
+ attr->cap.max_recv_sge =
+ ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2,
+ qp->rq_spec,
+ qp->rq_cmb & IONIC_CMB_EXPDB);
+ qp->rq_cqid = cq->cqid;
+ }
+
+ return 0;
+
+err_resp:
+ if (udata && (qp->rq_cmb & IONIC_CMB_ENABLE))
+ rdma_user_mmap_entry_remove(qp->mmap_rq_cmb);
+err_mmap_rq:
+ if (udata && (qp->sq_cmb & IONIC_CMB_ENABLE))
+ rdma_user_mmap_entry_remove(qp->mmap_sq_cmb);
+err_mmap_sq:
+ ionic_destroy_qp_cmd(dev, qp->qpid);
+err_cmd:
+ ionic_pgtbl_unbuf(dev, &rq_buf);
+ ionic_qp_rq_destroy(dev, ctx, qp);
+err_rq:
+ ionic_pgtbl_unbuf(dev, &sq_buf);
+ ionic_qp_sq_destroy(dev, ctx, qp);
+err_sq:
+ if (qp->has_ah)
+ ionic_put_ahid(dev, qp->ahid);
+err_ahid:
+ kfree(qp->hdr);
+err_ah_alloc:
+ ionic_put_qpid(dev, qp->qpid);
+ return rc;
+}
+
+void ionic_notify_flush_cq(struct ionic_cq *cq)
+{
+ if (cq->flush && cq->vcq->ibcq.comp_handler)
+ cq->vcq->ibcq.comp_handler(&cq->vcq->ibcq,
+ cq->vcq->ibcq.cq_context);
+}
+
+static void ionic_notify_qp_cqs(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ if (qp->ibqp.send_cq)
+ ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.send_cq,
+ qp->udma_idx));
+ if (qp->ibqp.recv_cq && qp->ibqp.recv_cq != qp->ibqp.send_cq)
+ ionic_notify_flush_cq(to_ionic_vcq_cq(qp->ibqp.recv_cq,
+ qp->udma_idx));
+}
+
+void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+
+ /* Hold the CQ lock and QP sq_lock to set up flush */
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->sq_lock);
+ qp->sq_flush = true;
+ if (!ionic_queue_empty(&qp->sq)) {
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+ spin_unlock(&qp->sq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+
+ /* Hold the CQ lock and QP rq_lock to set up flush */
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->rq_lock);
+ qp->rq_flush = true;
+ if (!ionic_queue_empty(&qp->rq)) {
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+ }
+ spin_unlock(&qp->rq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+}
+
+static void ionic_clean_cq(struct ionic_cq *cq, u32 qpid)
+{
+ struct ionic_v1_cqe *qcqe;
+ int prod, qtf, qid, type;
+ bool color;
+
+ if (!cq->q.ptr)
+ return;
+
+ color = cq->color;
+ prod = cq->q.prod;
+ qcqe = ionic_queue_at(&cq->q, prod);
+
+ while (color == ionic_v1_cqe_color(qcqe)) {
+ qtf = ionic_v1_cqe_qtf(qcqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ if (qid == qpid && type != IONIC_V1_CQE_TYPE_ADMIN)
+ ionic_v1_cqe_clean(qcqe);
+
+ prod = ionic_queue_next(&cq->q, prod);
+ qcqe = ionic_queue_at(&cq->q, prod);
+ color = ionic_color_wrap(prod, color);
+ }
+}
+
+static void ionic_reset_qp(struct ionic_ibdev *dev, struct ionic_qp *qp)
+{
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+ int i;
+
+ local_irq_save(irqflags);
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+ spin_lock(&cq->lock);
+ ionic_clean_cq(cq, qp->qpid);
+ spin_unlock(&cq->lock);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+ spin_lock(&cq->lock);
+ ionic_clean_cq(cq, qp->qpid);
+ spin_unlock(&cq->lock);
+ }
+
+ if (qp->has_sq) {
+ spin_lock(&qp->sq_lock);
+ qp->sq_flush = false;
+ qp->sq_flush_rcvd = false;
+ qp->sq_msn_prod = 0;
+ qp->sq_msn_cons = 0;
+ qp->sq.prod = 0;
+ qp->sq.cons = 0;
+ spin_unlock(&qp->sq_lock);
+ }
+
+ if (qp->has_rq) {
+ spin_lock(&qp->rq_lock);
+ qp->rq_flush = false;
+ qp->rq.prod = 0;
+ qp->rq.cons = 0;
+ if (qp->rq_meta) {
+ for (i = 0; i < qp->rq.mask; ++i)
+ qp->rq_meta[i].next = &qp->rq_meta[i + 1];
+ qp->rq_meta[i].next = IONIC_META_LAST;
+ }
+ qp->rq_meta_head = &qp->rq_meta[0];
+ spin_unlock(&qp->rq_lock);
+ }
+
+ local_irq_restore(irqflags);
+}
+
+static bool ionic_qp_cur_state_is_ok(enum ib_qp_state q_state,
+ enum ib_qp_state attr_state)
+{
+ if (q_state == attr_state)
+ return true;
+
+ if (attr_state == IB_QPS_ERR)
+ return true;
+
+ if (attr_state == IB_QPS_SQE)
+ return q_state == IB_QPS_RTS || q_state == IB_QPS_SQD;
+
+ return false;
+}
+
+static int ionic_check_modify_qp(struct ionic_qp *qp, struct ib_qp_attr *attr,
+ int mask)
+{
+ enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state : qp->state;
+ enum ib_qp_state next_state = (mask & IB_QP_STATE) ?
+ attr->qp_state : cur_state;
+
+ if ((mask & IB_QP_CUR_STATE) &&
+ !ionic_qp_cur_state_is_ok(qp->state, attr->cur_qp_state))
+ return -EINVAL;
+
+ if (!ib_modify_qp_is_ok(cur_state, next_state, qp->ibqp.qp_type, mask))
+ return -EINVAL;
+
+ /* unprivileged qp not allowed privileged qkey */
+ if ((mask & IB_QP_QKEY) && (attr->qkey & 0x80000000) &&
+ qp->ibqp.uobject)
+ return -EPERM;
+
+ return 0;
+}
+
+int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_pd *pd = to_ionic_pd(ibqp->pd);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ int rc;
+
+ rc = ionic_check_modify_qp(qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (mask & IB_QP_CAP)
+ return -EINVAL;
+
+ rc = ionic_modify_qp_cmd(dev, pd, qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (mask & IB_QP_STATE) {
+ qp->state = attr->qp_state;
+
+ if (attr->qp_state == IB_QPS_ERR) {
+ ionic_flush_qp(dev, qp);
+ ionic_notify_qp_cqs(dev, qp);
+ } else if (attr->qp_state == IB_QPS_RESET) {
+ ionic_reset_qp(dev, qp);
+ }
+ }
+
+ return 0;
+}
+
+int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+ int mask, struct ib_qp_init_attr *init_attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ int rc;
+
+ memset(attr, 0, sizeof(*attr));
+ memset(init_attr, 0, sizeof(*init_attr));
+
+ rc = ionic_query_qp_cmd(dev, qp, attr, mask);
+ if (rc)
+ return rc;
+
+ if (qp->has_sq)
+ attr->cap.max_send_wr = qp->sq.mask;
+
+ if (qp->has_rq)
+ attr->cap.max_recv_wr = qp->rq.mask;
+
+ init_attr->event_handler = ibqp->event_handler;
+ init_attr->qp_context = ibqp->qp_context;
+ init_attr->send_cq = ibqp->send_cq;
+ init_attr->recv_cq = ibqp->recv_cq;
+ init_attr->srq = ibqp->srq;
+ init_attr->xrcd = ibqp->xrcd;
+ init_attr->cap = attr->cap;
+ init_attr->sq_sig_type = qp->sig_all ?
+ IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
+ init_attr->qp_type = ibqp->qp_type;
+ init_attr->create_flags = 0;
+ init_attr->port_num = 0;
+ init_attr->rwq_ind_tbl = ibqp->rwq_ind_tbl;
+ init_attr->source_qpn = 0;
+
+ return rc;
+}
+
+int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+{
+ struct ionic_ctx *ctx =
+ rdma_udata_to_drv_context(udata, struct ionic_ctx, ibctx);
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ unsigned long irqflags;
+ struct ionic_cq *cq;
+ int rc;
+
+ rc = ionic_destroy_qp_cmd(dev, qp->qpid);
+ if (rc)
+ return rc;
+
+ xa_erase_irq(&dev->qp_tbl, qp->qpid);
+
+ kref_put(&qp->qp_kref, ionic_qp_complete);
+ wait_for_completion(&qp->qp_rel_comp);
+
+ if (qp->ibqp.send_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.send_cq, qp->udma_idx);
+ spin_lock_irqsave(&cq->lock, irqflags);
+ ionic_clean_cq(cq, qp->qpid);
+ list_del(&qp->cq_poll_sq);
+ list_del(&qp->cq_flush_sq);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ if (qp->ibqp.recv_cq) {
+ cq = to_ionic_vcq_cq(qp->ibqp.recv_cq, qp->udma_idx);
+ spin_lock_irqsave(&cq->lock, irqflags);
+ ionic_clean_cq(cq, qp->qpid);
+ list_del(&qp->cq_flush_rq);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+ }
+
+ ionic_qp_rq_destroy(dev, ctx, qp);
+ ionic_qp_sq_destroy(dev, ctx, qp);
+ if (qp->has_ah) {
+ ionic_put_ahid(dev, qp->ahid);
+ kfree(qp->hdr);
+ }
+ ionic_put_qpid(dev, qp->qpid);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_datapath.c b/drivers/infiniband/hw/ionic/ionic_datapath.c
new file mode 100644
index 000000000000..aa2944887f23
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_datapath.c
@@ -0,0 +1,1399 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_user_verbs.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+#define IONIC_OP(version, opname) \
+ ((version) < 2 ? IONIC_V1_OP_##opname : IONIC_V2_OP_##opname)
+
+static bool ionic_next_cqe(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_v1_cqe **cqe)
+{
+ struct ionic_v1_cqe *qcqe = ionic_queue_at_prod(&cq->q);
+
+ if (unlikely(cq->color != ionic_v1_cqe_color(qcqe)))
+ return false;
+
+ /* Prevent out-of-order reads of the CQE */
+ dma_rmb();
+
+ *cqe = qcqe;
+
+ return true;
+}
+
+static int ionic_flush_recv(struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_rq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (!qp->rq_flush)
+ return 0;
+
+ if (ionic_queue_empty(&qp->rq))
+ return 0;
+
+ wqe = ionic_queue_at_cons(&qp->rq);
+
+ /* wqe_id must be a valid queue index */
+ if (unlikely(wqe->base.wqe_id >> qp->rq.depth_log2)) {
+ ibdev_warn(qp->ibqp.device,
+ "flush qp %u recv index %llu invalid\n",
+ qp->qpid, (unsigned long long)wqe->base.wqe_id);
+ return -EIO;
+ }
+
+ /* wqe_id must indicate a request that is outstanding */
+ meta = &qp->rq_meta[wqe->base.wqe_id];
+ if (unlikely(meta->next != IONIC_META_POSTED)) {
+ ibdev_warn(qp->ibqp.device,
+ "flush qp %u recv index %llu not posted\n",
+ qp->qpid, (unsigned long long)wqe->base.wqe_id);
+ return -EIO;
+ }
+
+ ionic_queue_consume(&qp->rq);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ meta->next = qp->rq_meta_head;
+ qp->rq_meta_head = meta;
+
+ return 1;
+}
+
+static int ionic_flush_recv_many(struct ionic_qp *qp,
+ struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_flush_recv(qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_flush_send(struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_sq_meta *meta;
+
+ if (!qp->sq_flush)
+ return 0;
+
+ if (ionic_queue_empty(&qp->sq))
+ return 0;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ ionic_queue_consume(&qp->sq);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = IB_WC_WR_FLUSH_ERR;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ return 1;
+}
+
+static int ionic_flush_send_many(struct ionic_qp *qp,
+ struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_flush_send(qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_poll_recv(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *cqe_qp, struct ionic_v1_cqe *cqe,
+ struct ib_wc *wc)
+{
+ struct ionic_qp *qp = NULL;
+ struct ionic_rq_meta *meta;
+ u32 src_qpn, st_len;
+ u16 vlan_tag;
+ u8 op;
+
+ if (cqe_qp->rq_flush)
+ return 0;
+
+ qp = cqe_qp;
+
+ st_len = be32_to_cpu(cqe->status_length);
+
+ /* ignore wqe_id in case of flush error */
+ if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
+ cqe_qp->rq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+
+ /* posted recvs (if any) flushed by ionic_flush_recv */
+ return 0;
+ }
+
+ /* there had better be something in the recv queue to complete */
+ if (ionic_queue_empty(&qp->rq)) {
+ ibdev_warn(&dev->ibdev, "qp %u is empty\n", qp->qpid);
+ return -EIO;
+ }
+
+ /* wqe_id must be a valid queue index */
+ if (unlikely(cqe->recv.wqe_id >> qp->rq.depth_log2)) {
+ ibdev_warn(&dev->ibdev,
+ "qp %u recv index %llu invalid\n",
+ qp->qpid, (unsigned long long)cqe->recv.wqe_id);
+ return -EIO;
+ }
+
+ /* wqe_id must indicate a request that is outstanding */
+ meta = &qp->rq_meta[cqe->recv.wqe_id];
+ if (unlikely(meta->next != IONIC_META_POSTED)) {
+ ibdev_warn(&dev->ibdev,
+ "qp %u recv index %llu not posted\n",
+ qp->qpid, (unsigned long long)cqe->recv.wqe_id);
+ return -EIO;
+ }
+
+ meta->next = qp->rq_meta_head;
+ qp->rq_meta_head = meta;
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->wr_id = meta->wrid;
+
+ wc->qp = &cqe_qp->ibqp;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ wc->vendor_err = st_len;
+ wc->status = ionic_to_ib_status(st_len);
+
+ cqe_qp->rq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+
+ ibdev_warn(&dev->ibdev,
+ "qp %d recv cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, BIT(cq->q.stride_log2), true);
+ goto out;
+ }
+
+ wc->vendor_err = 0;
+ wc->status = IB_WC_SUCCESS;
+
+ src_qpn = be32_to_cpu(cqe->recv.src_qpn_op);
+ op = src_qpn >> IONIC_V1_CQE_RECV_OP_SHIFT;
+
+ src_qpn &= IONIC_V1_CQE_RECV_QPN_MASK;
+ op &= IONIC_V1_CQE_RECV_OP_MASK;
+
+ wc->opcode = IB_WC_RECV;
+ switch (op) {
+ case IONIC_V1_CQE_RECV_OP_RDMA_IMM:
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
+ break;
+ case IONIC_V1_CQE_RECV_OP_SEND_IMM:
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ wc->ex.imm_data = cqe->recv.imm_data_rkey; /* be32 in wc */
+ break;
+ case IONIC_V1_CQE_RECV_OP_SEND_INV:
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->recv.imm_data_rkey);
+ break;
+ }
+
+ wc->byte_len = st_len;
+ wc->src_qp = src_qpn;
+
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI) {
+ wc->wc_flags |= IB_WC_GRH | IB_WC_WITH_SMAC;
+ ether_addr_copy(wc->smac, cqe->recv.src_mac);
+
+ wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+ if (ionic_v1_cqe_recv_is_ipv4(cqe))
+ wc->network_hdr_type = RDMA_NETWORK_IPV4;
+ else
+ wc->network_hdr_type = RDMA_NETWORK_IPV6;
+
+ if (ionic_v1_cqe_recv_is_vlan(cqe))
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+
+ /* vlan_tag in cqe will be valid from dpath even if no vlan */
+ vlan_tag = be16_to_cpu(cqe->recv.vlan_tag);
+ wc->vlan_id = vlan_tag & 0xfff; /* 802.1q VID */
+ wc->sl = vlan_tag >> VLAN_PRIO_SHIFT; /* 802.1q PCP */
+ }
+
+ wc->pkey_index = 0;
+ wc->port_num = 1;
+
+out:
+ ionic_queue_consume(&qp->rq);
+
+ return 1;
+}
+
+static bool ionic_peek_send(struct ionic_qp *qp)
+{
+ struct ionic_sq_meta *meta;
+
+ if (qp->sq_flush)
+ return false;
+
+ /* completed all send queue requests */
+ if (ionic_queue_empty(&qp->sq))
+ return false;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ /* waiting for remote completion */
+ if (meta->remote && meta->seq == qp->sq_msn_cons)
+ return false;
+
+ /* waiting for local completion */
+ if (!meta->remote && !meta->local_comp)
+ return false;
+
+ return true;
+}
+
+static int ionic_poll_send(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *qp, struct ib_wc *wc)
+{
+ struct ionic_sq_meta *meta;
+
+ if (qp->sq_flush)
+ return 0;
+
+ do {
+ /* completed all send queue requests */
+ if (ionic_queue_empty(&qp->sq))
+ goto out_empty;
+
+ meta = &qp->sq_meta[qp->sq.cons];
+
+ /* waiting for remote completion */
+ if (meta->remote && meta->seq == qp->sq_msn_cons)
+ goto out_empty;
+
+ /* waiting for local completion */
+ if (!meta->remote && !meta->local_comp)
+ goto out_empty;
+
+ ionic_queue_consume(&qp->sq);
+
+ /* produce wc only if signaled or error status */
+ } while (!meta->signal && meta->ibsts == IB_WC_SUCCESS);
+
+ memset(wc, 0, sizeof(*wc));
+
+ wc->status = meta->ibsts;
+ wc->wr_id = meta->wrid;
+ wc->qp = &qp->ibqp;
+
+ if (meta->ibsts == IB_WC_SUCCESS) {
+ wc->byte_len = meta->len;
+ wc->opcode = meta->ibop;
+ } else {
+ wc->vendor_err = meta->len;
+
+ qp->sq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+
+ return 1;
+
+out_empty:
+ if (qp->sq_flush_rcvd) {
+ qp->sq_flush = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+ return 0;
+}
+
+static int ionic_poll_send_many(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ struct ionic_qp *qp, struct ib_wc *wc, int nwc)
+{
+ int rc = 0, npolled = 0;
+
+ while (npolled < nwc) {
+ rc = ionic_poll_send(dev, cq, qp, wc + npolled);
+ if (rc <= 0)
+ break;
+
+ npolled += rc;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_validate_cons(u16 prod, u16 cons,
+ u16 comp, u16 mask)
+{
+ if (((prod - cons) & mask) <= ((comp - cons) & mask))
+ return -EIO;
+
+ return 0;
+}
+
+static int ionic_comp_msn(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
+{
+ struct ionic_sq_meta *meta;
+ u16 cqe_seq, cqe_idx;
+ int rc;
+
+ if (qp->sq_flush)
+ return 0;
+
+ cqe_seq = be32_to_cpu(cqe->send.msg_msn) & qp->sq.mask;
+
+ rc = ionic_validate_cons(qp->sq_msn_prod,
+ qp->sq_msn_cons,
+ cqe_seq - 1,
+ qp->sq.mask);
+ if (rc) {
+ ibdev_warn(qp->ibqp.device,
+ "qp %u bad msn %#x seq %u for prod %u cons %u\n",
+ qp->qpid, be32_to_cpu(cqe->send.msg_msn),
+ cqe_seq, qp->sq_msn_prod, qp->sq_msn_cons);
+ return rc;
+ }
+
+ qp->sq_msn_cons = cqe_seq;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ cqe_idx = qp->sq_msn_idx[(cqe_seq - 1) & qp->sq.mask];
+
+ meta = &qp->sq_meta[cqe_idx];
+ meta->len = be32_to_cpu(cqe->status_length);
+ meta->ibsts = ionic_to_ib_status(meta->len);
+
+ ibdev_warn(qp->ibqp.device,
+ "qp %d msn cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), true);
+ }
+
+ return 0;
+}
+
+static int ionic_comp_npg(struct ionic_qp *qp, struct ionic_v1_cqe *cqe)
+{
+ struct ionic_sq_meta *meta;
+ u16 cqe_idx;
+ u32 st_len;
+
+ if (qp->sq_flush)
+ return 0;
+
+ st_len = be32_to_cpu(cqe->status_length);
+
+ if (ionic_v1_cqe_error(cqe) && st_len == IONIC_STS_WQE_FLUSHED_ERR) {
+ /*
+ * Flush cqe does not consume a wqe on the device, and maybe
+ * no such work request is posted.
+ *
+ * The driver should begin flushing after the last indicated
+ * normal or error completion. Here, only set a hint that the
+ * flush request was indicated. In poll_send, if nothing more
+ * can be polled normally, then begin flushing.
+ */
+ qp->sq_flush_rcvd = true;
+ return 0;
+ }
+
+ cqe_idx = cqe->send.npg_wqe_id & qp->sq.mask;
+ meta = &qp->sq_meta[cqe_idx];
+ meta->local_comp = true;
+
+ if (ionic_v1_cqe_error(cqe)) {
+ meta->len = st_len;
+ meta->ibsts = ionic_to_ib_status(st_len);
+ meta->remote = false;
+ ibdev_warn(qp->ibqp.device,
+ "qp %d npg cqe with error\n", qp->qpid);
+ print_hex_dump(KERN_WARNING, "cqe ", DUMP_PREFIX_OFFSET, 16, 1,
+ cqe, sizeof(*cqe), true);
+ }
+
+ return 0;
+}
+
+static void ionic_reserve_sync_cq(struct ionic_ibdev *dev, struct ionic_cq *cq)
+{
+ if (!ionic_queue_empty(&cq->q)) {
+ cq->credit += ionic_queue_length(&cq->q);
+ cq->q.cons = cq->q.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype,
+ ionic_queue_dbell_val(&cq->q));
+ }
+}
+
+static void ionic_reserve_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ int spend)
+{
+ cq->credit -= spend;
+
+ if (cq->credit <= 0)
+ ionic_reserve_sync_cq(dev, cq);
+}
+
+static int ionic_poll_vcq_cq(struct ionic_ibdev *dev,
+ struct ionic_cq *cq,
+ int nwc, struct ib_wc *wc)
+{
+ struct ionic_qp *qp, *qp_next;
+ struct ionic_v1_cqe *cqe;
+ int rc = 0, npolled = 0;
+ unsigned long irqflags;
+ u32 qtf, qid;
+ bool peek;
+ u8 type;
+
+ if (nwc < 1)
+ return 0;
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+
+ /* poll already indicated work completions for send queue */
+ list_for_each_entry_safe(qp, qp_next, &cq->poll_sq, cq_poll_sq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->sq_lock);
+ rc = ionic_poll_send_many(dev, cq, qp, wc + npolled,
+ nwc - npolled);
+ spin_unlock(&qp->sq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_poll_sq);
+ }
+
+ /* poll for more work completions */
+ while (likely(ionic_next_cqe(dev, cq, &cqe))) {
+ if (npolled == nwc)
+ goto out;
+
+ qtf = ionic_v1_cqe_qtf(cqe);
+ qid = ionic_v1_cqe_qtf_qid(qtf);
+ type = ionic_v1_cqe_qtf_type(qtf);
+
+ /*
+ * Safe to access QP without additional reference here as,
+ * 1. We hold cq->lock throughout
+ * 2. ionic_destroy_qp() acquires the same cq->lock before cleanup
+ * 3. QP is removed from qp_tbl before any cleanup begins
+ * This ensures no concurrent access between polling and destruction.
+ */
+ qp = xa_load(&dev->qp_tbl, qid);
+ if (unlikely(!qp)) {
+ ibdev_dbg(&dev->ibdev, "missing qp for qid %u\n", qid);
+ goto cq_next;
+ }
+
+ switch (type) {
+ case IONIC_V1_CQE_TYPE_RECV:
+ spin_lock(&qp->rq_lock);
+ rc = ionic_poll_recv(dev, cq, qp, cqe, wc + npolled);
+ spin_unlock(&qp->rq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ break;
+
+ case IONIC_V1_CQE_TYPE_SEND_MSN:
+ spin_lock(&qp->sq_lock);
+ rc = ionic_comp_msn(qp, cqe);
+ if (!rc) {
+ rc = ionic_poll_send_many(dev, cq, qp,
+ wc + npolled,
+ nwc - npolled);
+ peek = ionic_peek_send(qp);
+ }
+ spin_unlock(&qp->sq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ if (peek)
+ list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
+ break;
+
+ case IONIC_V1_CQE_TYPE_SEND_NPG:
+ spin_lock(&qp->sq_lock);
+ rc = ionic_comp_npg(qp, cqe);
+ if (!rc) {
+ rc = ionic_poll_send_many(dev, cq, qp,
+ wc + npolled,
+ nwc - npolled);
+ peek = ionic_peek_send(qp);
+ }
+ spin_unlock(&qp->sq_lock);
+
+ if (rc < 0)
+ goto out;
+
+ npolled += rc;
+
+ if (peek)
+ list_move_tail(&qp->cq_poll_sq, &cq->poll_sq);
+ break;
+
+ default:
+ ibdev_warn(&dev->ibdev,
+ "unexpected cqe type %u\n", type);
+ rc = -EIO;
+ goto out;
+ }
+
+cq_next:
+ ionic_queue_produce(&cq->q);
+ cq->color = ionic_color_wrap(cq->q.prod, cq->color);
+ }
+
+ /* lastly, flush send and recv queues */
+ if (likely(!cq->flush))
+ goto out;
+
+ cq->flush = false;
+
+ list_for_each_entry_safe(qp, qp_next, &cq->flush_sq, cq_flush_sq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->sq_lock);
+ rc = ionic_flush_send_many(qp, wc + npolled, nwc - npolled);
+ spin_unlock(&qp->sq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_flush_sq);
+ else
+ cq->flush = true;
+ }
+
+ list_for_each_entry_safe(qp, qp_next, &cq->flush_rq, cq_flush_rq) {
+ if (npolled == nwc)
+ goto out;
+
+ spin_lock(&qp->rq_lock);
+ rc = ionic_flush_recv_many(qp, wc + npolled, nwc - npolled);
+ spin_unlock(&qp->rq_lock);
+
+ if (rc > 0)
+ npolled += rc;
+
+ if (npolled < nwc)
+ list_del_init(&qp->cq_flush_rq);
+ else
+ cq->flush = true;
+ }
+
+out:
+ /* in case credit was depleted (more work posted than cq depth) */
+ if (cq->credit <= 0)
+ ionic_reserve_sync_cq(dev, cq);
+
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ return npolled ?: rc;
+}
+
+int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int rc_tmp, rc = 0, npolled = 0;
+ int cq_i, cq_x, cq_ix;
+
+ cq_x = vcq->poll_idx;
+ vcq->poll_idx ^= dev->lif_cfg.udma_count - 1;
+
+ for (cq_i = 0; npolled < nwc && cq_i < dev->lif_cfg.udma_count; ++cq_i) {
+ cq_ix = cq_i ^ cq_x;
+
+ if (!(vcq->udma_mask & BIT(cq_ix)))
+ continue;
+
+ rc_tmp = ionic_poll_vcq_cq(dev, &vcq->cq[cq_ix],
+ nwc - npolled,
+ wc + npolled);
+
+ if (rc_tmp >= 0)
+ npolled += rc_tmp;
+ else if (!rc)
+ rc = rc_tmp;
+ }
+
+ return npolled ?: rc;
+}
+
+static int ionic_req_notify_vcq_cq(struct ionic_ibdev *dev, struct ionic_cq *cq,
+ enum ib_cq_notify_flags flags)
+{
+ u64 dbell_val = cq->q.dbell;
+
+ if (flags & IB_CQ_SOLICITED) {
+ cq->arm_sol_prod = ionic_queue_next(&cq->q, cq->arm_sol_prod);
+ dbell_val |= cq->arm_sol_prod | IONIC_CQ_RING_SOL;
+ } else {
+ cq->arm_any_prod = ionic_queue_next(&cq->q, cq->arm_any_prod);
+ dbell_val |= cq->arm_any_prod | IONIC_CQ_RING_ARM;
+ }
+
+ ionic_reserve_sync_cq(dev, cq);
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.cq_qtype, dbell_val);
+
+ /*
+ * IB_CQ_REPORT_MISSED_EVENTS:
+ *
+ * The queue index in ring zero guarantees no missed events.
+ *
+ * Here, we check if the color bit in the next cqe is flipped. If it
+ * is flipped, then progress can be made by immediately polling the cq.
+ * Still, the cq will be armed, and an event will be generated. The cq
+ * may be empty when polled after the event, because the next poll
+ * after arming the cq can empty it.
+ */
+ return (flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ cq->color == ionic_v1_cqe_color(ionic_queue_at_prod(&cq->q));
+}
+
+int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibcq->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibcq);
+ int rc = 0, cq_i;
+
+ for (cq_i = 0; cq_i < dev->lif_cfg.udma_count; ++cq_i) {
+ if (!(vcq->udma_mask & BIT(cq_i)))
+ continue;
+
+ if (ionic_req_notify_vcq_cq(dev, &vcq->cq[cq_i], flags))
+ rc = 1;
+ }
+
+ return rc;
+}
+
+static s64 ionic_prep_inline(void *data, u32 max_data,
+ const struct ib_sge *ib_sgl, int num_sge)
+{
+ static const s64 bit_31 = 1u << 31;
+ s64 len = 0, sg_len;
+ int sg_i;
+
+ for (sg_i = 0; sg_i < num_sge; ++sg_i) {
+ sg_len = ib_sgl[sg_i].length;
+
+ /* sge length zero means 2GB */
+ if (unlikely(sg_len == 0))
+ sg_len = bit_31;
+
+ /* greater than max inline data is invalid */
+ if (unlikely(len + sg_len > max_data))
+ return -EINVAL;
+
+ memcpy(data + len, (void *)ib_sgl[sg_i].addr, sg_len);
+
+ len += sg_len;
+ }
+
+ return len;
+}
+
+static s64 ionic_prep_pld(struct ionic_v1_wqe *wqe,
+ union ionic_v1_pld *pld,
+ int spec, u32 max_sge,
+ const struct ib_sge *ib_sgl,
+ int num_sge)
+{
+ static const s64 bit_31 = 1l << 31;
+ struct ionic_sge *sgl;
+ __be32 *spec32 = NULL;
+ __be16 *spec16 = NULL;
+ s64 len = 0, sg_len;
+ int sg_i = 0;
+
+ if (unlikely(num_sge < 0 || (u32)num_sge > max_sge))
+ return -EINVAL;
+
+ if (spec && num_sge > IONIC_V1_SPEC_FIRST_SGE) {
+ sg_i = IONIC_V1_SPEC_FIRST_SGE;
+
+ if (num_sge > 8) {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC16);
+ spec16 = pld->spec16;
+ } else {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SPEC32);
+ spec32 = pld->spec32;
+ }
+ }
+
+ sgl = &pld->sgl[sg_i];
+
+ for (sg_i = 0; sg_i < num_sge; ++sg_i) {
+ sg_len = ib_sgl[sg_i].length;
+
+ /* sge length zero means 2GB */
+ if (unlikely(sg_len == 0))
+ sg_len = bit_31;
+
+ /* greater than 2GB data is invalid */
+ if (unlikely(len + sg_len > bit_31))
+ return -EINVAL;
+
+ sgl[sg_i].va = cpu_to_be64(ib_sgl[sg_i].addr);
+ sgl[sg_i].len = cpu_to_be32(sg_len);
+ sgl[sg_i].lkey = cpu_to_be32(ib_sgl[sg_i].lkey);
+
+ if (spec32) {
+ spec32[sg_i] = sgl[sg_i].len;
+ } else if (spec16) {
+ if (unlikely(sg_len > U16_MAX))
+ return -EINVAL;
+ spec16[sg_i] = cpu_to_be16(sg_len);
+ }
+
+ len += sg_len;
+ }
+
+ return len;
+}
+
+static void ionic_prep_base(struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ struct ionic_sq_meta *meta,
+ struct ionic_v1_wqe *wqe)
+{
+ meta->wrid = wr->wr_id;
+ meta->ibsts = IB_WC_SUCCESS;
+ meta->signal = false;
+ meta->local_comp = false;
+
+ wqe->base.wqe_id = qp->sq.prod;
+
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_FENCE);
+
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SOL);
+
+ if (qp->sig_all || wr->send_flags & IB_SEND_SIGNALED) {
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_SIG);
+ meta->signal = true;
+ }
+
+ meta->seq = qp->sq_msn_prod;
+ meta->remote =
+ qp->ibqp.qp_type != IB_QPT_UD &&
+ qp->ibqp.qp_type != IB_QPT_GSI &&
+ !ionic_ibop_is_local(wr->opcode);
+
+ if (meta->remote) {
+ qp->sq_msn_idx[meta->seq] = qp->sq.prod;
+ qp->sq_msn_prod = ionic_queue_next(&qp->sq, qp->sq_msn_prod);
+ }
+
+ ionic_queue_produce(&qp->sq);
+}
+
+static int ionic_prep_common(struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ struct ionic_sq_meta *meta,
+ struct ionic_v1_wqe *wqe)
+{
+ s64 signed_len;
+ u32 mval;
+
+ if (wr->send_flags & IB_SEND_INLINE) {
+ wqe->base.num_sge_key = 0;
+ wqe->base.flags |= cpu_to_be16(IONIC_V1_FLAG_INL);
+ mval = ionic_v1_send_wqe_max_data(qp->sq.stride_log2, false);
+ signed_len = ionic_prep_inline(wqe->common.pld.data, mval,
+ wr->sg_list, wr->num_sge);
+ } else {
+ wqe->base.num_sge_key = wr->num_sge;
+ mval = ionic_v1_send_wqe_max_sge(qp->sq.stride_log2,
+ qp->sq_spec,
+ false);
+ signed_len = ionic_prep_pld(wqe, &wqe->common.pld,
+ qp->sq_spec, mval,
+ wr->sg_list, wr->num_sge);
+ }
+
+ if (unlikely(signed_len < 0))
+ return signed_len;
+
+ meta->len = signed_len;
+ wqe->common.length = cpu_to_be32(signed_len);
+
+ ionic_prep_base(qp, wr, meta, wqe);
+
+ return 0;
+}
+
+static void ionic_prep_sq_wqe(struct ionic_qp *qp, void *wqe)
+{
+ memset(wqe, 0, 1u << qp->sq.stride_log2);
+}
+
+static void ionic_prep_rq_wqe(struct ionic_qp *qp, void *wqe)
+{
+ memset(wqe, 0, 1u << qp->rq.stride_log2);
+}
+
+static int ionic_prep_send(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_SEND;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
+ wqe->base.imm_data_key = wr->ex.imm_data;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_INV);
+ wqe->base.imm_data_key =
+ cpu_to_be32(wr->ex.invalidate_rkey);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ionic_prep_common(qp, wr, meta, wqe);
+}
+
+static int ionic_prep_send_ud(struct ionic_qp *qp,
+ const struct ib_ud_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ struct ionic_ah *ah;
+
+ if (unlikely(!wr->ah))
+ return -EINVAL;
+
+ ah = to_ionic_ah(wr->ah);
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ wqe->common.send.ah_id = cpu_to_be32(ah->ahid);
+ wqe->common.send.dest_qpn = cpu_to_be32(wr->remote_qpn);
+ wqe->common.send.dest_qkey = cpu_to_be32(wr->remote_qkey);
+
+ meta->ibop = IB_WC_SEND;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_SEND:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND);
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, SEND_IMM);
+ wqe->base.imm_data_key = wr->wr.ex.imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_rdma(struct ionic_qp *qp,
+ const struct ib_rdma_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_RDMA_WRITE;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_RDMA_READ:
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+ meta->ibop = IB_WC_RDMA_READ;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_READ);
+ break;
+ case IB_WR_RDMA_WRITE:
+ if (wr->wr.send_flags & IB_SEND_SOLICITED)
+ return -EINVAL;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE);
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, RDMA_WRITE_IMM);
+ wqe->base.imm_data_key = wr->wr.ex.imm_data;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wqe->common.rdma.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
+ wqe->common.rdma.remote_va_low = cpu_to_be32(wr->remote_addr);
+ wqe->common.rdma.remote_rkey = cpu_to_be32(wr->rkey);
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_atomic(struct ionic_qp *qp,
+ const struct ib_atomic_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (wr->wr.num_sge != 1 || wr->wr.sg_list[0].length != 8)
+ return -EINVAL;
+
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ meta->ibop = IB_WC_RDMA_WRITE;
+
+ switch (wr->wr.opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ meta->ibop = IB_WC_COMP_SWAP;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_CS);
+ wqe->atomic.swap_add_high = cpu_to_be32(wr->swap >> 32);
+ wqe->atomic.swap_add_low = cpu_to_be32(wr->swap);
+ wqe->atomic.compare_high = cpu_to_be32(wr->compare_add >> 32);
+ wqe->atomic.compare_low = cpu_to_be32(wr->compare_add);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ meta->ibop = IB_WC_FETCH_ADD;
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, ATOMIC_FA);
+ wqe->atomic.swap_add_high = cpu_to_be32(wr->compare_add >> 32);
+ wqe->atomic.swap_add_low = cpu_to_be32(wr->compare_add);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ wqe->atomic.remote_va_high = cpu_to_be32(wr->remote_addr >> 32);
+ wqe->atomic.remote_va_low = cpu_to_be32(wr->remote_addr);
+ wqe->atomic.remote_rkey = cpu_to_be32(wr->rkey);
+
+ wqe->base.num_sge_key = 1;
+ wqe->atomic.sge.va = cpu_to_be64(wr->wr.sg_list[0].addr);
+ wqe->atomic.sge.len = cpu_to_be32(8);
+ wqe->atomic.sge.lkey = cpu_to_be32(wr->wr.sg_list[0].lkey);
+
+ return ionic_prep_common(qp, &wr->wr, meta, wqe);
+}
+
+static int ionic_prep_inv(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+
+ if (wr->send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, LOCAL_INV);
+ wqe->base.imm_data_key = cpu_to_be32(wr->ex.invalidate_rkey);
+
+ meta->len = 0;
+ meta->ibop = IB_WC_LOCAL_INV;
+
+ ionic_prep_base(qp, wr, meta, wqe);
+
+ return 0;
+}
+
+static int ionic_prep_reg(struct ionic_qp *qp,
+ const struct ib_reg_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ struct ionic_mr *mr = to_ionic_mr(wr->mr);
+ struct ionic_sq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ __le64 dma_addr;
+ int flags;
+
+ if (wr->wr.send_flags & (IB_SEND_SOLICITED | IB_SEND_INLINE))
+ return -EINVAL;
+
+ /* must call ib_map_mr_sg before posting reg wr */
+ if (!mr->buf.tbl_pages)
+ return -EINVAL;
+
+ meta = &qp->sq_meta[qp->sq.prod];
+ wqe = ionic_queue_at_prod(&qp->sq);
+
+ ionic_prep_sq_wqe(qp, wqe);
+
+ flags = to_ionic_mr_flags(wr->access);
+
+ wqe->base.op = IONIC_OP(dev->lif_cfg.rdma_version, REG_MR);
+ wqe->base.num_sge_key = wr->key;
+ wqe->base.imm_data_key = cpu_to_be32(mr->ibmr.lkey);
+ wqe->reg_mr.va = cpu_to_be64(mr->ibmr.iova);
+ wqe->reg_mr.length = cpu_to_be64(mr->ibmr.length);
+ wqe->reg_mr.offset = ionic_pgtbl_off(&mr->buf, mr->ibmr.iova);
+ dma_addr = ionic_pgtbl_dma(&mr->buf, mr->ibmr.iova);
+ wqe->reg_mr.dma_addr = cpu_to_be64(le64_to_cpu(dma_addr));
+
+ wqe->reg_mr.map_count = cpu_to_be32(mr->buf.tbl_pages);
+ wqe->reg_mr.flags = cpu_to_be16(flags);
+ wqe->reg_mr.dir_size_log2 = 0;
+ wqe->reg_mr.page_size_log2 = order_base_2(mr->ibmr.page_size);
+
+ meta->len = 0;
+ meta->ibop = IB_WC_REG_MR;
+
+ ionic_prep_base(qp, &wr->wr, meta, wqe);
+
+ return 0;
+}
+
+static int ionic_prep_one_rc(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ int rc = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ case IB_WR_SEND_WITH_INV:
+ rc = ionic_prep_send(qp, wr);
+ break;
+ case IB_WR_RDMA_READ:
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ rc = ionic_prep_rdma(qp, rdma_wr(wr));
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc = ionic_prep_atomic(qp, atomic_wr(wr));
+ break;
+ case IB_WR_LOCAL_INV:
+ rc = ionic_prep_inv(qp, wr);
+ break;
+ case IB_WR_REG_MR:
+ rc = ionic_prep_reg(qp, reg_wr(wr));
+ break;
+ default:
+ ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int ionic_prep_one_ud(struct ionic_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(qp->ibqp.device);
+ int rc = 0;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ rc = ionic_prep_send_ud(qp, ud_wr(wr));
+ break;
+ default:
+ ibdev_dbg(&dev->ibdev, "invalid opcode %d\n", wr->opcode);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int ionic_prep_recv(struct ionic_qp *qp,
+ const struct ib_recv_wr *wr)
+{
+ struct ionic_rq_meta *meta;
+ struct ionic_v1_wqe *wqe;
+ s64 signed_len;
+ u32 mval;
+
+ wqe = ionic_queue_at_prod(&qp->rq);
+
+ /* if wqe is owned by device, caller can try posting again soon */
+ if (wqe->base.flags & cpu_to_be16(IONIC_V1_FLAG_FENCE))
+ return -EAGAIN;
+
+ meta = qp->rq_meta_head;
+ if (unlikely(meta == IONIC_META_LAST) ||
+ unlikely(meta == IONIC_META_POSTED))
+ return -EIO;
+
+ ionic_prep_rq_wqe(qp, wqe);
+
+ mval = ionic_v1_recv_wqe_max_sge(qp->rq.stride_log2, qp->rq_spec,
+ false);
+ signed_len = ionic_prep_pld(wqe, &wqe->recv.pld,
+ qp->rq_spec, mval,
+ wr->sg_list, wr->num_sge);
+ if (signed_len < 0)
+ return signed_len;
+
+ meta->wrid = wr->wr_id;
+
+ wqe->base.wqe_id = meta - qp->rq_meta;
+ wqe->base.num_sge_key = wr->num_sge;
+
+ /* total length for recv goes in base imm_data_key */
+ wqe->base.imm_data_key = cpu_to_be32(signed_len);
+
+ ionic_queue_produce(&qp->rq);
+
+ qp->rq_meta_head = meta->next;
+ meta->next = IONIC_META_POSTED;
+
+ return 0;
+}
+
+static int ionic_post_send_common(struct ionic_ibdev *dev,
+ struct ionic_vcq *vcq,
+ struct ionic_cq *cq,
+ struct ionic_qp *qp,
+ const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad)
+{
+ unsigned long irqflags;
+ bool notify = false;
+ int spend, rc = 0;
+
+ if (!bad)
+ return -EINVAL;
+
+ if (!qp->has_sq) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ if (qp->state < IB_QPS_RTS) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->sq_lock, irqflags);
+
+ while (wr) {
+ if (ionic_queue_full(&qp->sq)) {
+ ibdev_dbg(&dev->ibdev, "queue full");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ rc = ionic_prep_one_ud(qp, wr);
+ else
+ rc = ionic_prep_one_rc(qp, wr);
+ if (rc)
+ goto out;
+
+ wr = wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&qp->sq_lock, irqflags);
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->sq_lock);
+
+ if (likely(qp->sq.prod != qp->sq_old_prod)) {
+ /* ring cq doorbell just in time */
+ spend = (qp->sq.prod - qp->sq_old_prod) & qp->sq.mask;
+ ionic_reserve_cq(dev, cq, spend);
+
+ qp->sq_old_prod = qp->sq.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.sq_qtype,
+ ionic_queue_dbell_val(&qp->sq));
+ }
+
+ if (qp->sq_flush) {
+ notify = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_sq, &cq->flush_sq);
+ }
+
+ spin_unlock(&qp->sq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ if (notify && vcq->ibcq.comp_handler)
+ vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
+
+ *bad = wr;
+ return rc;
+}
+
+static int ionic_post_recv_common(struct ionic_ibdev *dev,
+ struct ionic_vcq *vcq,
+ struct ionic_cq *cq,
+ struct ionic_qp *qp,
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad)
+{
+ unsigned long irqflags;
+ bool notify = false;
+ int spend, rc = 0;
+
+ if (!bad)
+ return -EINVAL;
+
+ if (!qp->has_rq) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ if (qp->state < IB_QPS_INIT) {
+ *bad = wr;
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&qp->rq_lock, irqflags);
+
+ while (wr) {
+ if (ionic_queue_full(&qp->rq)) {
+ ibdev_dbg(&dev->ibdev, "queue full");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = ionic_prep_recv(qp, wr);
+ if (rc)
+ goto out;
+
+ wr = wr->next;
+ }
+
+out:
+ if (!cq) {
+ spin_unlock_irqrestore(&qp->rq_lock, irqflags);
+ goto out_unlocked;
+ }
+ spin_unlock_irqrestore(&qp->rq_lock, irqflags);
+
+ spin_lock_irqsave(&cq->lock, irqflags);
+ spin_lock(&qp->rq_lock);
+
+ if (likely(qp->rq.prod != qp->rq_old_prod)) {
+ /* ring cq doorbell just in time */
+ spend = (qp->rq.prod - qp->rq_old_prod) & qp->rq.mask;
+ ionic_reserve_cq(dev, cq, spend);
+
+ qp->rq_old_prod = qp->rq.prod;
+
+ ionic_dbell_ring(dev->lif_cfg.dbpage, dev->lif_cfg.rq_qtype,
+ ionic_queue_dbell_val(&qp->rq));
+ }
+
+ if (qp->rq_flush) {
+ notify = true;
+ cq->flush = true;
+ list_move_tail(&qp->cq_flush_rq, &cq->flush_rq);
+ }
+
+ spin_unlock(&qp->rq_lock);
+ spin_unlock_irqrestore(&cq->lock, irqflags);
+
+ if (notify && vcq->ibcq.comp_handler)
+ vcq->ibcq.comp_handler(&vcq->ibcq, vcq->ibcq.cq_context);
+
+out_unlocked:
+ *bad = wr;
+ return rc;
+}
+
+int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibqp->send_cq);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_cq *cq =
+ to_ionic_vcq_cq(ibqp->send_cq, qp->udma_idx);
+
+ return ionic_post_send_common(dev, vcq, cq, qp, wr, bad);
+}
+
+int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibqp->device);
+ struct ionic_vcq *vcq = to_ionic_vcq(ibqp->recv_cq);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_cq *cq =
+ to_ionic_vcq_cq(ibqp->recv_cq, qp->udma_idx);
+
+ return ionic_post_recv_common(dev, vcq, cq, qp, wr, bad);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_fw.h b/drivers/infiniband/hw/ionic/ionic_fw.h
new file mode 100644
index 000000000000..adfbb89d856c
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_fw.h
@@ -0,0 +1,1029 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_FW_H_
+#define _IONIC_FW_H_
+
+#include <linux/kernel.h>
+#include <rdma/ib_verbs.h>
+
+/* common for ib spec */
+
+#define IONIC_EXP_DBELL_SZ 8
+
+enum ionic_mrid_bits {
+ IONIC_MRID_INDEX_SHIFT = 8,
+};
+
+static inline u32 ionic_mrid(u32 index, u8 key)
+{
+ return (index << IONIC_MRID_INDEX_SHIFT) | key;
+}
+
+static inline u32 ionic_mrid_index(u32 lrkey)
+{
+ return lrkey >> IONIC_MRID_INDEX_SHIFT;
+}
+
+/* common to all versions */
+
+/* wqe scatter gather element */
+struct ionic_sge {
+ __be64 va;
+ __be32 len;
+ __be32 lkey;
+};
+
+/* admin queue mr type */
+enum ionic_mr_flags {
+ /* bits that determine mr access */
+ IONIC_MRF_LOCAL_WRITE = BIT(0),
+ IONIC_MRF_REMOTE_WRITE = BIT(1),
+ IONIC_MRF_REMOTE_READ = BIT(2),
+ IONIC_MRF_REMOTE_ATOMIC = BIT(3),
+ IONIC_MRF_MW_BIND = BIT(4),
+ IONIC_MRF_ZERO_BASED = BIT(5),
+ IONIC_MRF_ON_DEMAND = BIT(6),
+ IONIC_MRF_PB = BIT(7),
+ IONIC_MRF_ACCESS_MASK = BIT(12) - 1,
+
+ /* bits that determine mr type */
+ IONIC_MRF_UKEY_EN = BIT(13),
+ IONIC_MRF_IS_MW = BIT(14),
+ IONIC_MRF_INV_EN = BIT(15),
+
+ /* base flags combinations for mr types */
+ IONIC_MRF_USER_MR = 0,
+ IONIC_MRF_PHYS_MR = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_INV_EN),
+ IONIC_MRF_MW_1 = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_IS_MW),
+ IONIC_MRF_MW_2 = (IONIC_MRF_UKEY_EN |
+ IONIC_MRF_IS_MW |
+ IONIC_MRF_INV_EN),
+};
+
+static inline int to_ionic_mr_flags(int access)
+{
+ int flags = 0;
+
+ if (access & IB_ACCESS_LOCAL_WRITE)
+ flags |= IONIC_MRF_LOCAL_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_READ)
+ flags |= IONIC_MRF_REMOTE_READ;
+
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ flags |= IONIC_MRF_REMOTE_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= IONIC_MRF_REMOTE_ATOMIC;
+
+ if (access & IB_ACCESS_MW_BIND)
+ flags |= IONIC_MRF_MW_BIND;
+
+ if (access & IB_ZERO_BASED)
+ flags |= IONIC_MRF_ZERO_BASED;
+
+ return flags;
+}
+
+enum ionic_qp_flags {
+ /* bits that determine qp access */
+ IONIC_QPF_REMOTE_WRITE = BIT(0),
+ IONIC_QPF_REMOTE_READ = BIT(1),
+ IONIC_QPF_REMOTE_ATOMIC = BIT(2),
+
+ /* bits that determine other qp behavior */
+ IONIC_QPF_SQ_PB = BIT(6),
+ IONIC_QPF_RQ_PB = BIT(7),
+ IONIC_QPF_SQ_SPEC = BIT(8),
+ IONIC_QPF_RQ_SPEC = BIT(9),
+ IONIC_QPF_REMOTE_PRIVILEGED = BIT(10),
+ IONIC_QPF_SQ_DRAINING = BIT(11),
+ IONIC_QPF_SQD_NOTIFY = BIT(12),
+ IONIC_QPF_SQ_CMB = BIT(13),
+ IONIC_QPF_RQ_CMB = BIT(14),
+ IONIC_QPF_PRIVILEGED = BIT(15),
+};
+
+static inline int from_ionic_qp_flags(int flags)
+{
+ int access_flags = 0;
+
+ if (flags & IONIC_QPF_REMOTE_WRITE)
+ access_flags |= IB_ACCESS_REMOTE_WRITE;
+
+ if (flags & IONIC_QPF_REMOTE_READ)
+ access_flags |= IB_ACCESS_REMOTE_READ;
+
+ if (flags & IONIC_QPF_REMOTE_ATOMIC)
+ access_flags |= IB_ACCESS_REMOTE_ATOMIC;
+
+ return access_flags;
+}
+
+static inline int to_ionic_qp_flags(int access, bool sqd_notify,
+ bool sq_is_cmb, bool rq_is_cmb,
+ bool sq_spec, bool rq_spec,
+ bool privileged, bool remote_privileged)
+{
+ int flags = 0;
+
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ flags |= IONIC_QPF_REMOTE_WRITE;
+
+ if (access & IB_ACCESS_REMOTE_READ)
+ flags |= IONIC_QPF_REMOTE_READ;
+
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ flags |= IONIC_QPF_REMOTE_ATOMIC;
+
+ if (sqd_notify)
+ flags |= IONIC_QPF_SQD_NOTIFY;
+
+ if (sq_is_cmb)
+ flags |= IONIC_QPF_SQ_CMB;
+
+ if (rq_is_cmb)
+ flags |= IONIC_QPF_RQ_CMB;
+
+ if (sq_spec)
+ flags |= IONIC_QPF_SQ_SPEC;
+
+ if (rq_spec)
+ flags |= IONIC_QPF_RQ_SPEC;
+
+ if (privileged)
+ flags |= IONIC_QPF_PRIVILEGED;
+
+ if (remote_privileged)
+ flags |= IONIC_QPF_REMOTE_PRIVILEGED;
+
+ return flags;
+}
+
+/* cqe non-admin status indicated in status_length field when err bit is set */
+enum ionic_status {
+ IONIC_STS_OK,
+ IONIC_STS_LOCAL_LEN_ERR,
+ IONIC_STS_LOCAL_QP_OPER_ERR,
+ IONIC_STS_LOCAL_PROT_ERR,
+ IONIC_STS_WQE_FLUSHED_ERR,
+ IONIC_STS_MEM_MGMT_OPER_ERR,
+ IONIC_STS_BAD_RESP_ERR,
+ IONIC_STS_LOCAL_ACC_ERR,
+ IONIC_STS_REMOTE_INV_REQ_ERR,
+ IONIC_STS_REMOTE_ACC_ERR,
+ IONIC_STS_REMOTE_OPER_ERR,
+ IONIC_STS_RETRY_EXCEEDED,
+ IONIC_STS_RNR_RETRY_EXCEEDED,
+ IONIC_STS_XRC_VIO_ERR,
+ IONIC_STS_LOCAL_SGL_INV_ERR,
+};
+
+static inline int ionic_to_ib_status(int sts)
+{
+ switch (sts) {
+ case IONIC_STS_OK:
+ return IB_WC_SUCCESS;
+ case IONIC_STS_LOCAL_LEN_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case IONIC_STS_LOCAL_QP_OPER_ERR:
+ case IONIC_STS_LOCAL_SGL_INV_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case IONIC_STS_LOCAL_PROT_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case IONIC_STS_WQE_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case IONIC_STS_MEM_MGMT_OPER_ERR:
+ return IB_WC_MW_BIND_ERR;
+ case IONIC_STS_BAD_RESP_ERR:
+ return IB_WC_BAD_RESP_ERR;
+ case IONIC_STS_LOCAL_ACC_ERR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case IONIC_STS_REMOTE_INV_REQ_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case IONIC_STS_REMOTE_ACC_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case IONIC_STS_REMOTE_OPER_ERR:
+ return IB_WC_REM_OP_ERR;
+ case IONIC_STS_RETRY_EXCEEDED:
+ return IB_WC_RETRY_EXC_ERR;
+ case IONIC_STS_RNR_RETRY_EXCEEDED:
+ return IB_WC_RNR_RETRY_EXC_ERR;
+ case IONIC_STS_XRC_VIO_ERR:
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+/* admin queue qp type */
+enum ionic_qp_type {
+ IONIC_QPT_RC,
+ IONIC_QPT_UC,
+ IONIC_QPT_RD,
+ IONIC_QPT_UD,
+ IONIC_QPT_SRQ,
+ IONIC_QPT_XRC_INI,
+ IONIC_QPT_XRC_TGT,
+ IONIC_QPT_XRC_SRQ,
+};
+
+static inline int to_ionic_qp_type(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_GSI:
+ case IB_QPT_UD:
+ return IONIC_QPT_UD;
+ case IB_QPT_RC:
+ return IONIC_QPT_RC;
+ case IB_QPT_UC:
+ return IONIC_QPT_UC;
+ case IB_QPT_XRC_INI:
+ return IONIC_QPT_XRC_INI;
+ case IB_QPT_XRC_TGT:
+ return IONIC_QPT_XRC_TGT;
+ default:
+ return -EINVAL;
+ }
+}
+
+/* admin queue qp state */
+enum ionic_qp_state {
+ IONIC_QPS_RESET,
+ IONIC_QPS_INIT,
+ IONIC_QPS_RTR,
+ IONIC_QPS_RTS,
+ IONIC_QPS_SQD,
+ IONIC_QPS_SQE,
+ IONIC_QPS_ERR,
+};
+
+static inline int from_ionic_qp_state(enum ionic_qp_state state)
+{
+ switch (state) {
+ case IONIC_QPS_RESET:
+ return IB_QPS_RESET;
+ case IONIC_QPS_INIT:
+ return IB_QPS_INIT;
+ case IONIC_QPS_RTR:
+ return IB_QPS_RTR;
+ case IONIC_QPS_RTS:
+ return IB_QPS_RTS;
+ case IONIC_QPS_SQD:
+ return IB_QPS_SQD;
+ case IONIC_QPS_SQE:
+ return IB_QPS_SQE;
+ case IONIC_QPS_ERR:
+ return IB_QPS_ERR;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int to_ionic_qp_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return IONIC_QPS_RESET;
+ case IB_QPS_INIT:
+ return IONIC_QPS_INIT;
+ case IB_QPS_RTR:
+ return IONIC_QPS_RTR;
+ case IB_QPS_RTS:
+ return IONIC_QPS_RTS;
+ case IB_QPS_SQD:
+ return IONIC_QPS_SQD;
+ case IB_QPS_SQE:
+ return IONIC_QPS_SQE;
+ case IB_QPS_ERR:
+ return IONIC_QPS_ERR;
+ default:
+ return 0;
+ }
+}
+
+static inline int to_ionic_qp_modify_state(enum ib_qp_state to_state,
+ enum ib_qp_state from_state)
+{
+ return to_ionic_qp_state(to_state) |
+ (to_ionic_qp_state(from_state) << 4);
+}
+
+/* fw abi v1 */
+
+/* data payload part of v1 wqe */
+union ionic_v1_pld {
+ struct ionic_sge sgl[2];
+ __be32 spec32[8];
+ __be16 spec16[16];
+ __u8 data[32];
+};
+
+/* completion queue v1 cqe */
+struct ionic_v1_cqe {
+ union {
+ struct {
+ __be16 cmd_idx;
+ __u8 cmd_op;
+ __u8 rsvd[17];
+ __le16 old_sq_cindex;
+ __le16 old_rq_cq_cindex;
+ } admin;
+ struct {
+ __u64 wqe_id;
+ __be32 src_qpn_op;
+ __u8 src_mac[6];
+ __be16 vlan_tag;
+ __be32 imm_data_rkey;
+ } recv;
+ struct {
+ __u8 rsvd[4];
+ __be32 msg_msn;
+ __u8 rsvd2[8];
+ __u64 npg_wqe_id;
+ } send;
+ };
+ __be32 status_length;
+ __be32 qid_type_flags;
+};
+
+/* bits for cqe recv */
+enum ionic_v1_cqe_src_qpn_bits {
+ IONIC_V1_CQE_RECV_QPN_MASK = 0xffffff,
+ IONIC_V1_CQE_RECV_OP_SHIFT = 24,
+
+ /* MASK could be 0x3, but need 0x1f for makeshift values:
+ * OP_TYPE_RDMA_OPER_WITH_IMM, OP_TYPE_SEND_RCVD
+ */
+ IONIC_V1_CQE_RECV_OP_MASK = 0x1f,
+ IONIC_V1_CQE_RECV_OP_SEND = 0,
+ IONIC_V1_CQE_RECV_OP_SEND_INV = 1,
+ IONIC_V1_CQE_RECV_OP_SEND_IMM = 2,
+ IONIC_V1_CQE_RECV_OP_RDMA_IMM = 3,
+
+ IONIC_V1_CQE_RECV_IS_IPV4 = BIT(7 + IONIC_V1_CQE_RECV_OP_SHIFT),
+ IONIC_V1_CQE_RECV_IS_VLAN = BIT(6 + IONIC_V1_CQE_RECV_OP_SHIFT),
+};
+
+/* bits for cqe qid_type_flags */
+enum ionic_v1_cqe_qtf_bits {
+ IONIC_V1_CQE_COLOR = BIT(0),
+ IONIC_V1_CQE_ERROR = BIT(1),
+ IONIC_V1_CQE_TYPE_SHIFT = 5,
+ IONIC_V1_CQE_TYPE_MASK = 0x7,
+ IONIC_V1_CQE_QID_SHIFT = 8,
+
+ IONIC_V1_CQE_TYPE_ADMIN = 0,
+ IONIC_V1_CQE_TYPE_RECV = 1,
+ IONIC_V1_CQE_TYPE_SEND_MSN = 2,
+ IONIC_V1_CQE_TYPE_SEND_NPG = 3,
+};
+
+static inline bool ionic_v1_cqe_color(struct ionic_v1_cqe *cqe)
+{
+ return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_COLOR);
+}
+
+static inline bool ionic_v1_cqe_error(struct ionic_v1_cqe *cqe)
+{
+ return cqe->qid_type_flags & cpu_to_be32(IONIC_V1_CQE_ERROR);
+}
+
+static inline bool ionic_v1_cqe_recv_is_ipv4(struct ionic_v1_cqe *cqe)
+{
+ return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_IPV4);
+}
+
+static inline bool ionic_v1_cqe_recv_is_vlan(struct ionic_v1_cqe *cqe)
+{
+ return cqe->recv.src_qpn_op & cpu_to_be32(IONIC_V1_CQE_RECV_IS_VLAN);
+}
+
+static inline void ionic_v1_cqe_clean(struct ionic_v1_cqe *cqe)
+{
+ cqe->qid_type_flags |= cpu_to_be32(~0u << IONIC_V1_CQE_QID_SHIFT);
+}
+
+static inline u32 ionic_v1_cqe_qtf(struct ionic_v1_cqe *cqe)
+{
+ return be32_to_cpu(cqe->qid_type_flags);
+}
+
+static inline u8 ionic_v1_cqe_qtf_type(u32 qtf)
+{
+ return (qtf >> IONIC_V1_CQE_TYPE_SHIFT) & IONIC_V1_CQE_TYPE_MASK;
+}
+
+static inline u32 ionic_v1_cqe_qtf_qid(u32 qtf)
+{
+ return qtf >> IONIC_V1_CQE_QID_SHIFT;
+}
+
+/* v1 base wqe header */
+struct ionic_v1_base_hdr {
+ __u64 wqe_id;
+ __u8 op;
+ __u8 num_sge_key;
+ __be16 flags;
+ __be32 imm_data_key;
+};
+
+/* v1 receive wqe body */
+struct ionic_v1_recv_bdy {
+ __u8 rsvd[16];
+ union ionic_v1_pld pld;
+};
+
+/* v1 send/rdma wqe body (common, has sgl) */
+struct ionic_v1_common_bdy {
+ union {
+ struct {
+ __be32 ah_id;
+ __be32 dest_qpn;
+ __be32 dest_qkey;
+ } send;
+ struct {
+ __be32 remote_va_high;
+ __be32 remote_va_low;
+ __be32 remote_rkey;
+ } rdma;
+ };
+ __be32 length;
+ union ionic_v1_pld pld;
+};
+
+/* v1 atomic wqe body */
+struct ionic_v1_atomic_bdy {
+ __be32 remote_va_high;
+ __be32 remote_va_low;
+ __be32 remote_rkey;
+ __be32 swap_add_high;
+ __be32 swap_add_low;
+ __be32 compare_high;
+ __be32 compare_low;
+ __u8 rsvd[4];
+ struct ionic_sge sge;
+};
+
+/* v1 reg mr wqe body */
+struct ionic_v1_reg_mr_bdy {
+ __be64 va;
+ __be64 length;
+ __be64 offset;
+ __be64 dma_addr;
+ __be32 map_count;
+ __be16 flags;
+ __u8 dir_size_log2;
+ __u8 page_size_log2;
+ __u8 rsvd[8];
+};
+
+/* v1 bind mw wqe body */
+struct ionic_v1_bind_mw_bdy {
+ __be64 va;
+ __be64 length;
+ __be32 lkey;
+ __be16 flags;
+ __u8 rsvd[26];
+};
+
+/* v1 send/recv wqe */
+struct ionic_v1_wqe {
+ struct ionic_v1_base_hdr base;
+ union {
+ struct ionic_v1_recv_bdy recv;
+ struct ionic_v1_common_bdy common;
+ struct ionic_v1_atomic_bdy atomic;
+ struct ionic_v1_reg_mr_bdy reg_mr;
+ struct ionic_v1_bind_mw_bdy bind_mw;
+ };
+};
+
+/* queue pair v1 send opcodes */
+enum ionic_v1_op {
+ IONIC_V1_OP_SEND,
+ IONIC_V1_OP_SEND_INV,
+ IONIC_V1_OP_SEND_IMM,
+ IONIC_V1_OP_RDMA_READ,
+ IONIC_V1_OP_RDMA_WRITE,
+ IONIC_V1_OP_RDMA_WRITE_IMM,
+ IONIC_V1_OP_ATOMIC_CS,
+ IONIC_V1_OP_ATOMIC_FA,
+ IONIC_V1_OP_REG_MR,
+ IONIC_V1_OP_LOCAL_INV,
+ IONIC_V1_OP_BIND_MW,
+
+ /* flags */
+ IONIC_V1_FLAG_FENCE = BIT(0),
+ IONIC_V1_FLAG_SOL = BIT(1),
+ IONIC_V1_FLAG_INL = BIT(2),
+ IONIC_V1_FLAG_SIG = BIT(3),
+
+ /* flags last four bits for sgl spec format */
+ IONIC_V1_FLAG_SPEC32 = (1u << 12),
+ IONIC_V1_FLAG_SPEC16 = (2u << 12),
+ IONIC_V1_SPEC_FIRST_SGE = 2,
+};
+
+/* queue pair v2 send opcodes */
+enum ionic_v2_op {
+ IONIC_V2_OPSL_OUT = 0x20,
+ IONIC_V2_OPSL_IMM = 0x40,
+ IONIC_V2_OPSL_INV = 0x80,
+
+ IONIC_V2_OP_SEND = 0x0 | IONIC_V2_OPSL_OUT,
+ IONIC_V2_OP_SEND_IMM = IONIC_V2_OP_SEND | IONIC_V2_OPSL_IMM,
+ IONIC_V2_OP_SEND_INV = IONIC_V2_OP_SEND | IONIC_V2_OPSL_INV,
+
+ IONIC_V2_OP_RDMA_WRITE = 0x1 | IONIC_V2_OPSL_OUT,
+ IONIC_V2_OP_RDMA_WRITE_IMM = IONIC_V2_OP_RDMA_WRITE | IONIC_V2_OPSL_IMM,
+
+ IONIC_V2_OP_RDMA_READ = 0x2,
+
+ IONIC_V2_OP_ATOMIC_CS = 0x4,
+ IONIC_V2_OP_ATOMIC_FA = 0x5,
+ IONIC_V2_OP_REG_MR = 0x6,
+ IONIC_V2_OP_LOCAL_INV = 0x7,
+ IONIC_V2_OP_BIND_MW = 0x8,
+};
+
+static inline size_t ionic_v1_send_wqe_min_size(int min_sge, int min_data,
+ int spec, bool expdb)
+{
+ size_t sz_wqe, sz_sgl, sz_data;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ min_sge += IONIC_V1_SPEC_FIRST_SGE;
+
+ if (expdb) {
+ min_sge += 1;
+ min_data += IONIC_EXP_DBELL_SZ;
+ }
+
+ sz_wqe = sizeof(struct ionic_v1_wqe);
+ sz_sgl = offsetof(struct ionic_v1_wqe, common.pld.sgl[min_sge]);
+ sz_data = offsetof(struct ionic_v1_wqe, common.pld.data[min_data]);
+
+ if (sz_sgl > sz_wqe)
+ sz_wqe = sz_sgl;
+
+ if (sz_data > sz_wqe)
+ sz_wqe = sz_data;
+
+ return sz_wqe;
+}
+
+static inline int ionic_v1_send_wqe_max_sge(u8 stride_log2, int spec,
+ bool expdb)
+{
+ struct ionic_sge *sge = (void *)(1ull << stride_log2);
+ struct ionic_v1_wqe *wqe = (void *)0;
+ int num_sge = 0;
+
+ if (expdb)
+ sge -= 1;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ num_sge = IONIC_V1_SPEC_FIRST_SGE;
+
+ num_sge = sge - &wqe->common.pld.sgl[num_sge];
+
+ if (spec && num_sge > spec)
+ num_sge = spec;
+
+ return num_sge;
+}
+
+static inline int ionic_v1_send_wqe_max_data(u8 stride_log2, bool expdb)
+{
+ struct ionic_v1_wqe *wqe = (void *)0;
+ __u8 *data = (void *)(1ull << stride_log2);
+
+ if (expdb)
+ data -= IONIC_EXP_DBELL_SZ;
+
+ return data - wqe->common.pld.data;
+}
+
+static inline size_t ionic_v1_recv_wqe_min_size(int min_sge, int spec,
+ bool expdb)
+{
+ size_t sz_wqe, sz_sgl;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ min_sge += IONIC_V1_SPEC_FIRST_SGE;
+
+ if (expdb)
+ min_sge += 1;
+
+ sz_wqe = sizeof(struct ionic_v1_wqe);
+ sz_sgl = offsetof(struct ionic_v1_wqe, recv.pld.sgl[min_sge]);
+
+ if (sz_sgl > sz_wqe)
+ sz_wqe = sz_sgl;
+
+ return sz_wqe;
+}
+
+static inline int ionic_v1_recv_wqe_max_sge(u8 stride_log2, int spec,
+ bool expdb)
+{
+ struct ionic_sge *sge = (void *)(1ull << stride_log2);
+ struct ionic_v1_wqe *wqe = (void *)0;
+ int num_sge = 0;
+
+ if (expdb)
+ sge -= 1;
+
+ if (spec > IONIC_V1_SPEC_FIRST_SGE)
+ num_sge = IONIC_V1_SPEC_FIRST_SGE;
+
+ num_sge = sge - &wqe->recv.pld.sgl[num_sge];
+
+ if (spec && num_sge > spec)
+ num_sge = spec;
+
+ return num_sge;
+}
+
+static inline int ionic_v1_use_spec_sge(int min_sge, int spec)
+{
+ if (!spec || min_sge > spec)
+ return 0;
+
+ if (min_sge <= IONIC_V1_SPEC_FIRST_SGE)
+ return IONIC_V1_SPEC_FIRST_SGE;
+
+ return spec;
+}
+
+struct ionic_admin_stats_hdr {
+ __le64 dma_addr;
+ __le32 length;
+ __le32 id_ver;
+ __u8 type_state;
+} __packed;
+
+#define IONIC_ADMIN_STATS_HDRS_IN_V1_LEN 17
+static_assert(sizeof(struct ionic_admin_stats_hdr) ==
+ IONIC_ADMIN_STATS_HDRS_IN_V1_LEN);
+
+struct ionic_admin_create_ah {
+ __le64 dma_addr;
+ __le32 length;
+ __le32 pd_id;
+ __le32 id_ver;
+ __le16 dbid_flags;
+ __u8 csum_profile;
+ __u8 crypto;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_AH_IN_V1_LEN 24
+static_assert(sizeof(struct ionic_admin_create_ah) ==
+ IONIC_ADMIN_CREATE_AH_IN_V1_LEN);
+
+struct ionic_admin_destroy_ah {
+ __le32 ah_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_AH_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_ah) ==
+ IONIC_ADMIN_DESTROY_AH_IN_V1_LEN);
+
+struct ionic_admin_query_ah {
+ __le64 dma_addr;
+} __packed;
+
+#define IONIC_ADMIN_QUERY_AH_IN_V1_LEN 8
+static_assert(sizeof(struct ionic_admin_query_ah) ==
+ IONIC_ADMIN_QUERY_AH_IN_V1_LEN);
+
+struct ionic_admin_create_mr {
+ __le64 va;
+ __le64 length;
+ __le32 pd_id;
+ __le32 id_ver;
+ __le32 tbl_index;
+ __le32 map_count;
+ __le64 dma_addr;
+ __le16 dbid_flags;
+ __u8 pt_type;
+ __u8 dir_size_log2;
+ __u8 page_size_log2;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_MR_IN_V1_LEN 45
+static_assert(sizeof(struct ionic_admin_create_mr) ==
+ IONIC_ADMIN_CREATE_MR_IN_V1_LEN);
+
+struct ionic_admin_destroy_mr {
+ __le32 mr_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_MR_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_mr) ==
+ IONIC_ADMIN_DESTROY_MR_IN_V1_LEN);
+
+struct ionic_admin_create_cq {
+ __le32 eq_id;
+ __u8 depth_log2;
+ __u8 stride_log2;
+ __u8 dir_size_log2_rsvd;
+ __u8 page_size_log2;
+ __le32 cq_flags;
+ __le32 id_ver;
+ __le32 tbl_index;
+ __le32 map_count;
+ __le64 dma_addr;
+ __le16 dbid_flags;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_CQ_IN_V1_LEN 34
+static_assert(sizeof(struct ionic_admin_create_cq) ==
+ IONIC_ADMIN_CREATE_CQ_IN_V1_LEN);
+
+struct ionic_admin_destroy_cq {
+ __le32 cq_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_cq) ==
+ IONIC_ADMIN_DESTROY_CQ_IN_V1_LEN);
+
+struct ionic_admin_create_qp {
+ __le32 pd_id;
+ __be32 priv_flags;
+ __le32 sq_cq_id;
+ __u8 sq_depth_log2;
+ __u8 sq_stride_log2;
+ __u8 sq_dir_size_log2_rsvd;
+ __u8 sq_page_size_log2;
+ __le32 sq_tbl_index_xrcd_id;
+ __le32 sq_map_count;
+ __le64 sq_dma_addr;
+ __le32 rq_cq_id;
+ __u8 rq_depth_log2;
+ __u8 rq_stride_log2;
+ __u8 rq_dir_size_log2_rsvd;
+ __u8 rq_page_size_log2;
+ __le32 rq_tbl_index_srq_id;
+ __le32 rq_map_count;
+ __le64 rq_dma_addr;
+ __le32 id_ver;
+ __le16 dbid_flags;
+ __u8 type_state;
+ __u8 rsvd;
+} __packed;
+
+#define IONIC_ADMIN_CREATE_QP_IN_V1_LEN 64
+static_assert(sizeof(struct ionic_admin_create_qp) ==
+ IONIC_ADMIN_CREATE_QP_IN_V1_LEN);
+
+struct ionic_admin_destroy_qp {
+ __le32 qp_id;
+} __packed;
+
+#define IONIC_ADMIN_DESTROY_QP_IN_V1_LEN 4
+static_assert(sizeof(struct ionic_admin_destroy_qp) ==
+ IONIC_ADMIN_DESTROY_QP_IN_V1_LEN);
+
+struct ionic_admin_mod_qp {
+ __be32 attr_mask;
+ __u8 dcqcn_profile;
+ __u8 tfp_csum_profile;
+ __be16 access_flags;
+ __le32 rq_psn;
+ __le32 sq_psn;
+ __le32 qkey_dest_qpn;
+ __le32 rate_limit_kbps;
+ __u8 pmtu;
+ __u8 retry;
+ __u8 rnr_timer;
+ __u8 retry_timeout;
+ __u8 rsq_depth;
+ __u8 rrq_depth;
+ __le16 pkey_id;
+ __le32 ah_id_len;
+ __u8 en_pcp;
+ __u8 ip_dscp;
+ __u8 rsvd2;
+ __u8 type_state;
+ union {
+ struct {
+ __le16 rsvd1;
+ };
+ __le32 rrq_index;
+ };
+ __le32 rsq_index;
+ __le64 dma_addr;
+ __le32 id_ver;
+} __packed;
+
+#define IONIC_ADMIN_MODIFY_QP_IN_V1_LEN 60
+static_assert(sizeof(struct ionic_admin_mod_qp) ==
+ IONIC_ADMIN_MODIFY_QP_IN_V1_LEN);
+
+struct ionic_admin_query_qp {
+ __le64 hdr_dma_addr;
+ __le64 sq_dma_addr;
+ __le64 rq_dma_addr;
+ __le32 ah_id;
+ __le32 id_ver;
+ __le16 dbid_flags;
+} __packed;
+
+#define IONIC_ADMIN_QUERY_QP_IN_V1_LEN 34
+static_assert(sizeof(struct ionic_admin_query_qp) ==
+ IONIC_ADMIN_QUERY_QP_IN_V1_LEN);
+
+#define ADMIN_WQE_STRIDE 64
+#define ADMIN_WQE_HDR_LEN 4
+
+/* admin queue v1 wqe */
+struct ionic_v1_admin_wqe {
+ __u8 op;
+ __u8 rsvd;
+ __le16 len;
+
+ union {
+ struct ionic_admin_stats_hdr stats;
+ struct ionic_admin_create_ah create_ah;
+ struct ionic_admin_destroy_ah destroy_ah;
+ struct ionic_admin_query_ah query_ah;
+ struct ionic_admin_create_mr create_mr;
+ struct ionic_admin_destroy_mr destroy_mr;
+ struct ionic_admin_create_cq create_cq;
+ struct ionic_admin_destroy_cq destroy_cq;
+ struct ionic_admin_create_qp create_qp;
+ struct ionic_admin_destroy_qp destroy_qp;
+ struct ionic_admin_mod_qp mod_qp;
+ struct ionic_admin_query_qp query_qp;
+ } cmd;
+};
+
+/* side data for query qp */
+struct ionic_v1_admin_query_qp_sq {
+ __u8 rnr_timer;
+ __u8 retry_timeout;
+ __be16 access_perms_flags;
+ __be16 rsvd;
+ __be16 pkey_id;
+ __be32 qkey_dest_qpn;
+ __be32 rate_limit_kbps;
+ __be32 rq_psn;
+};
+
+struct ionic_v1_admin_query_qp_rq {
+ __u8 state_pmtu;
+ __u8 retry_rnrtry;
+ __u8 rrq_depth;
+ __u8 rsq_depth;
+ __be32 sq_psn;
+ __be16 access_perms_flags;
+ __be16 rsvd;
+};
+
+/* admin queue v1 opcodes */
+enum ionic_v1_admin_op {
+ IONIC_V1_ADMIN_NOOP,
+ IONIC_V1_ADMIN_CREATE_CQ,
+ IONIC_V1_ADMIN_CREATE_QP,
+ IONIC_V1_ADMIN_CREATE_MR,
+ IONIC_V1_ADMIN_STATS_HDRS,
+ IONIC_V1_ADMIN_STATS_VALS,
+ IONIC_V1_ADMIN_DESTROY_MR,
+ IONIC_V1_ADMIN_RSVD_7, /* RESIZE_CQ */
+ IONIC_V1_ADMIN_DESTROY_CQ,
+ IONIC_V1_ADMIN_MODIFY_QP,
+ IONIC_V1_ADMIN_QUERY_QP,
+ IONIC_V1_ADMIN_DESTROY_QP,
+ IONIC_V1_ADMIN_DEBUG,
+ IONIC_V1_ADMIN_CREATE_AH,
+ IONIC_V1_ADMIN_QUERY_AH,
+ IONIC_V1_ADMIN_MODIFY_DCQCN,
+ IONIC_V1_ADMIN_DESTROY_AH,
+ IONIC_V1_ADMIN_QP_STATS_HDRS,
+ IONIC_V1_ADMIN_QP_STATS_VALS,
+ IONIC_V1_ADMIN_OPCODES_MAX,
+};
+
+/* admin queue v1 cqe status */
+enum ionic_v1_admin_status {
+ IONIC_V1_ASTS_OK,
+ IONIC_V1_ASTS_BAD_CMD,
+ IONIC_V1_ASTS_BAD_INDEX,
+ IONIC_V1_ASTS_BAD_STATE,
+ IONIC_V1_ASTS_BAD_TYPE,
+ IONIC_V1_ASTS_BAD_ATTR,
+ IONIC_V1_ASTS_MSG_TOO_BIG,
+};
+
+/* event queue v1 eqe */
+struct ionic_v1_eqe {
+ __be32 evt;
+};
+
+/* bits for cqe queue_type_flags */
+enum ionic_v1_eqe_evt_bits {
+ IONIC_V1_EQE_COLOR = BIT(0),
+ IONIC_V1_EQE_TYPE_SHIFT = 1,
+ IONIC_V1_EQE_TYPE_MASK = 0x7,
+ IONIC_V1_EQE_CODE_SHIFT = 4,
+ IONIC_V1_EQE_CODE_MASK = 0xf,
+ IONIC_V1_EQE_QID_SHIFT = 8,
+
+ /* cq events */
+ IONIC_V1_EQE_TYPE_CQ = 0,
+ /* cq normal events */
+ IONIC_V1_EQE_CQ_NOTIFY = 0,
+ /* cq error events */
+ IONIC_V1_EQE_CQ_ERR = 8,
+
+ /* qp and srq events */
+ IONIC_V1_EQE_TYPE_QP = 1,
+ /* qp normal events */
+ IONIC_V1_EQE_SRQ_LEVEL = 0,
+ IONIC_V1_EQE_SQ_DRAIN = 1,
+ IONIC_V1_EQE_QP_COMM_EST = 2,
+ IONIC_V1_EQE_QP_LAST_WQE = 3,
+ /* qp error events */
+ IONIC_V1_EQE_QP_ERR = 8,
+ IONIC_V1_EQE_QP_ERR_REQUEST = 9,
+ IONIC_V1_EQE_QP_ERR_ACCESS = 10,
+};
+
+enum ionic_tfp_csum_profiles {
+ IONIC_TFP_CSUM_PROF_ETH_IPV4_UDP = 0,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP = 1,
+ IONIC_TFP_CSUM_PROF_ETH_IPV6_UDP = 2,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_UDP = 3,
+ IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 4,
+ IONIC_TFP_CSUM_PROF_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 5,
+ IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV4_UDP = 6,
+ IONIC_TFP_CSUM_PROF_QTAG_IPV4_UDP_VXLAN_ETH_QTAG_IPV6_UDP = 7,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_IPV4_UDP = 8,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_ESP_UDP = 9,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_ESP_UDP = 10,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV6_ESP_UDP = 11,
+ IONIC_TFP_CSUM_PROF_ETH_QTAG_IPV4_UDP_CSUM = 12,
+};
+
+static inline bool ionic_v1_eqe_color(struct ionic_v1_eqe *eqe)
+{
+ return eqe->evt & cpu_to_be32(IONIC_V1_EQE_COLOR);
+}
+
+static inline u32 ionic_v1_eqe_evt(struct ionic_v1_eqe *eqe)
+{
+ return be32_to_cpu(eqe->evt);
+}
+
+static inline u8 ionic_v1_eqe_evt_type(u32 evt)
+{
+ return (evt >> IONIC_V1_EQE_TYPE_SHIFT) & IONIC_V1_EQE_TYPE_MASK;
+}
+
+static inline u8 ionic_v1_eqe_evt_code(u32 evt)
+{
+ return (evt >> IONIC_V1_EQE_CODE_SHIFT) & IONIC_V1_EQE_CODE_MASK;
+}
+
+static inline u32 ionic_v1_eqe_evt_qid(u32 evt)
+{
+ return evt >> IONIC_V1_EQE_QID_SHIFT;
+}
+
+enum ionic_v1_stat_bits {
+ IONIC_V1_STAT_TYPE_SHIFT = 28,
+ IONIC_V1_STAT_TYPE_NONE = 0,
+ IONIC_V1_STAT_TYPE_8 = 1,
+ IONIC_V1_STAT_TYPE_LE16 = 2,
+ IONIC_V1_STAT_TYPE_LE32 = 3,
+ IONIC_V1_STAT_TYPE_LE64 = 4,
+ IONIC_V1_STAT_TYPE_BE16 = 5,
+ IONIC_V1_STAT_TYPE_BE32 = 6,
+ IONIC_V1_STAT_TYPE_BE64 = 7,
+ IONIC_V1_STAT_OFF_MASK = BIT(IONIC_V1_STAT_TYPE_SHIFT) - 1,
+};
+
+struct ionic_v1_stat {
+ union {
+ __be32 be_type_off;
+ u32 type_off;
+ };
+ char name[28];
+};
+
+static inline int ionic_v1_stat_type(struct ionic_v1_stat *hdr)
+{
+ return hdr->type_off >> IONIC_V1_STAT_TYPE_SHIFT;
+}
+
+static inline unsigned int ionic_v1_stat_off(struct ionic_v1_stat *hdr)
+{
+ return hdr->type_off & IONIC_V1_STAT_OFF_MASK;
+}
+
+#endif /* _IONIC_FW_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_hw_stats.c b/drivers/infiniband/hw/ionic/ionic_hw_stats.c
new file mode 100644
index 000000000000..244a80dde08f
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_hw_stats.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/dma-mapping.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+static int ionic_v1_stat_normalize(struct ionic_v1_stat *hw_stats,
+ int hw_stats_count)
+{
+ int hw_stat_i;
+
+ for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
+ struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
+
+ stat->type_off = be32_to_cpu(stat->be_type_off);
+ stat->name[sizeof(stat->name) - 1] = 0;
+ if (ionic_v1_stat_type(stat) == IONIC_V1_STAT_TYPE_NONE)
+ break;
+ }
+
+ return hw_stat_i;
+}
+
+static void ionic_fill_stats_desc(struct rdma_stat_desc *hw_stats_hdrs,
+ struct ionic_v1_stat *hw_stats,
+ int hw_stats_count)
+{
+ int hw_stat_i;
+
+ for (hw_stat_i = 0; hw_stat_i < hw_stats_count; ++hw_stat_i) {
+ struct ionic_v1_stat *stat = &hw_stats[hw_stat_i];
+
+ hw_stats_hdrs[hw_stat_i].name = stat->name;
+ }
+}
+
+static u64 ionic_v1_stat_val(struct ionic_v1_stat *stat,
+ void *vals_buf, size_t vals_len)
+{
+ unsigned int off = ionic_v1_stat_off(stat);
+ int type = ionic_v1_stat_type(stat);
+
+#define __ionic_v1_stat_validate(__type) \
+ ((off + sizeof(__type) <= vals_len) && \
+ (IS_ALIGNED(off, sizeof(__type))))
+
+ switch (type) {
+ case IONIC_V1_STAT_TYPE_8:
+ if (__ionic_v1_stat_validate(u8))
+ return *(u8 *)(vals_buf + off);
+ break;
+ case IONIC_V1_STAT_TYPE_LE16:
+ if (__ionic_v1_stat_validate(__le16))
+ return le16_to_cpu(*(__le16 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_LE32:
+ if (__ionic_v1_stat_validate(__le32))
+ return le32_to_cpu(*(__le32 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_LE64:
+ if (__ionic_v1_stat_validate(__le64))
+ return le64_to_cpu(*(__le64 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE16:
+ if (__ionic_v1_stat_validate(__be16))
+ return be16_to_cpu(*(__be16 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE32:
+ if (__ionic_v1_stat_validate(__be32))
+ return be32_to_cpu(*(__be32 *)(vals_buf + off));
+ break;
+ case IONIC_V1_STAT_TYPE_BE64:
+ if (__ionic_v1_stat_validate(__be64))
+ return be64_to_cpu(*(__be64 *)(vals_buf + off));
+ break;
+ }
+
+ return ~0ull;
+#undef __ionic_v1_stat_validate
+}
+
+static int ionic_hw_stats_cmd(struct ionic_ibdev *dev,
+ dma_addr_t dma, size_t len, int qid, int op)
+{
+ struct ionic_admin_wr wr = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(wr.work),
+ .wqe = {
+ .op = op,
+ .len = cpu_to_le16(IONIC_ADMIN_STATS_HDRS_IN_V1_LEN),
+ .cmd.stats = {
+ .dma_addr = cpu_to_le64(dma),
+ .length = cpu_to_le32(len),
+ .id_ver = cpu_to_le32(qid),
+ },
+ }
+ };
+
+ if (dev->lif_cfg.admin_opcodes <= op)
+ return -EBADRQC;
+
+ ionic_admin_post(dev, &wr);
+
+ return ionic_admin_wait(dev, &wr, IONIC_ADMIN_F_INTERRUPT);
+}
+
+static int ionic_init_hw_stats(struct ionic_ibdev *dev)
+{
+ dma_addr_t hw_stats_dma;
+ int rc, hw_stats_count;
+
+ if (dev->hw_stats_hdrs)
+ return 0;
+
+ dev->hw_stats_count = 0;
+
+ /* buffer for current values from the device */
+ dev->hw_stats_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dev->hw_stats_buf) {
+ rc = -ENOMEM;
+ goto err_buf;
+ }
+
+ /* buffer for names, sizes, offsets of values */
+ dev->hw_stats = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!dev->hw_stats) {
+ rc = -ENOMEM;
+ goto err_hw_stats;
+ }
+
+ /* request the names, sizes, offsets */
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE, 0,
+ IONIC_V1_ADMIN_STATS_HDRS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* normalize and count the number of hw_stats */
+ hw_stats_count =
+ ionic_v1_stat_normalize(dev->hw_stats,
+ PAGE_SIZE / sizeof(*dev->hw_stats));
+ if (!hw_stats_count) {
+ rc = -ENODATA;
+ goto err_dma;
+ }
+
+ dev->hw_stats_count = hw_stats_count;
+
+ /* alloc and init array of names, for alloc_hw_stats */
+ dev->hw_stats_hdrs = kcalloc(hw_stats_count,
+ sizeof(*dev->hw_stats_hdrs),
+ GFP_KERNEL);
+ if (!dev->hw_stats_hdrs) {
+ rc = -ENOMEM;
+ goto err_dma;
+ }
+
+ ionic_fill_stats_desc(dev->hw_stats_hdrs, dev->hw_stats,
+ hw_stats_count);
+
+ return 0;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ kfree(dev->hw_stats);
+err_hw_stats:
+ kfree(dev->hw_stats_buf);
+err_buf:
+ dev->hw_stats_count = 0;
+ dev->hw_stats = NULL;
+ dev->hw_stats_buf = NULL;
+ dev->hw_stats_hdrs = NULL;
+ return rc;
+}
+
+static struct rdma_hw_stats *ionic_alloc_hw_stats(struct ib_device *ibdev,
+ u32 port)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ if (port != 1)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(dev->hw_stats_hdrs,
+ dev->hw_stats_count,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int ionic_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *hw_stats,
+ u32 port, int index)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ dma_addr_t hw_stats_dma;
+ int rc, hw_stat_i;
+
+ if (port != 1)
+ return -EINVAL;
+
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, dev->hw_stats_buf,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
+ 0, IONIC_V1_ADMIN_STATS_VALS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ for (hw_stat_i = 0; hw_stat_i < dev->hw_stats_count; ++hw_stat_i)
+ hw_stats->value[hw_stat_i] =
+ ionic_v1_stat_val(&dev->hw_stats[hw_stat_i],
+ dev->hw_stats_buf, PAGE_SIZE);
+
+ return hw_stat_i;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ return rc;
+}
+
+static struct rdma_hw_stats *
+ionic_counter_alloc_stats(struct rdma_counter *counter)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_counter *cntr;
+ int err;
+
+ cntr = kzalloc(sizeof(*cntr), GFP_KERNEL);
+ if (!cntr)
+ return NULL;
+
+ /* buffer for current values from the device */
+ cntr->vals = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cntr->vals)
+ goto err_vals;
+
+ err = xa_alloc(&dev->counter_stats->xa_counters, &counter->id,
+ cntr,
+ XA_LIMIT(0, IONIC_MAX_QPID),
+ GFP_KERNEL);
+ if (err)
+ goto err_xa;
+
+ INIT_LIST_HEAD(&cntr->qp_list);
+
+ return rdma_alloc_hw_stats_struct(dev->counter_stats->stats_hdrs,
+ dev->counter_stats->queue_stats_count,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+err_xa:
+ kfree(cntr->vals);
+err_vals:
+ kfree(cntr);
+
+ return NULL;
+}
+
+static int ionic_counter_dealloc(struct rdma_counter *counter)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_counter *cntr;
+
+ cntr = xa_erase(&dev->counter_stats->xa_counters, counter->id);
+ if (!cntr)
+ return -EINVAL;
+
+ kfree(cntr->vals);
+ kfree(cntr);
+
+ return 0;
+}
+
+static int ionic_counter_bind_qp(struct rdma_counter *counter,
+ struct ib_qp *ibqp,
+ u32 port)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(counter->device);
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+ struct ionic_counter *cntr;
+
+ cntr = xa_load(&dev->counter_stats->xa_counters, counter->id);
+ if (!cntr)
+ return -EINVAL;
+
+ list_add_tail(&qp->qp_list_counter, &cntr->qp_list);
+ ibqp->counter = counter;
+
+ return 0;
+}
+
+static int ionic_counter_unbind_qp(struct ib_qp *ibqp, u32 port)
+{
+ struct ionic_qp *qp = to_ionic_qp(ibqp);
+
+ if (ibqp->counter) {
+ list_del(&qp->qp_list_counter);
+ ibqp->counter = NULL;
+ }
+
+ return 0;
+}
+
+static int ionic_get_qp_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *hw_stats,
+ u32 counter_id)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ struct ionic_counter_stats *cs;
+ struct ionic_counter *cntr;
+ dma_addr_t hw_stats_dma;
+ struct ionic_qp *qp;
+ int rc, stat_i = 0;
+
+ cs = dev->counter_stats;
+ cntr = xa_load(&cs->xa_counters, counter_id);
+ if (!cntr)
+ return -EINVAL;
+
+ hw_stats_dma = dma_map_single(dev->lif_cfg.hwdev, cntr->vals,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hw_stats_dma);
+ if (rc)
+ return rc;
+
+ memset(hw_stats->value, 0, sizeof(u64) * hw_stats->num_counters);
+
+ list_for_each_entry(qp, &cntr->qp_list, qp_list_counter) {
+ rc = ionic_hw_stats_cmd(dev, hw_stats_dma, PAGE_SIZE,
+ qp->qpid,
+ IONIC_V1_ADMIN_QP_STATS_VALS);
+ if (rc)
+ goto err_cmd;
+
+ for (stat_i = 0; stat_i < cs->queue_stats_count; ++stat_i)
+ hw_stats->value[stat_i] +=
+ ionic_v1_stat_val(&cs->hdr[stat_i],
+ cntr->vals,
+ PAGE_SIZE);
+ }
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ return stat_i;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hw_stats_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ return rc;
+}
+
+static int ionic_counter_update_stats(struct rdma_counter *counter)
+{
+ return ionic_get_qp_stats(counter->device, counter->stats, counter->id);
+}
+
+static int ionic_alloc_counters(struct ionic_ibdev *dev)
+{
+ struct ionic_counter_stats *cs = dev->counter_stats;
+ int rc, hw_stats_count;
+ dma_addr_t hdr_dma;
+
+ /* buffer for names, sizes, offsets of values */
+ cs->hdr = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!cs->hdr)
+ return -ENOMEM;
+
+ hdr_dma = dma_map_single(dev->lif_cfg.hwdev, cs->hdr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, hdr_dma);
+ if (rc)
+ goto err_dma;
+
+ rc = ionic_hw_stats_cmd(dev, hdr_dma, PAGE_SIZE, 0,
+ IONIC_V1_ADMIN_QP_STATS_HDRS);
+ if (rc)
+ goto err_cmd;
+
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* normalize and count the number of hw_stats */
+ hw_stats_count = ionic_v1_stat_normalize(cs->hdr,
+ PAGE_SIZE / sizeof(*cs->hdr));
+ if (!hw_stats_count) {
+ rc = -ENODATA;
+ goto err_dma;
+ }
+
+ cs->queue_stats_count = hw_stats_count;
+
+ /* alloc and init array of names */
+ cs->stats_hdrs = kcalloc(hw_stats_count, sizeof(*cs->stats_hdrs),
+ GFP_KERNEL);
+ if (!cs->stats_hdrs) {
+ rc = -ENOMEM;
+ goto err_dma;
+ }
+
+ ionic_fill_stats_desc(cs->stats_hdrs, cs->hdr, hw_stats_count);
+
+ return 0;
+
+err_cmd:
+ dma_unmap_single(dev->lif_cfg.hwdev, hdr_dma, PAGE_SIZE, DMA_FROM_DEVICE);
+err_dma:
+ kfree(cs->hdr);
+
+ return rc;
+}
+
+static const struct ib_device_ops ionic_hw_stats_ops = {
+ .driver_id = RDMA_DRIVER_IONIC,
+ .alloc_hw_port_stats = ionic_alloc_hw_stats,
+ .get_hw_stats = ionic_get_hw_stats,
+};
+
+static const struct ib_device_ops ionic_counter_stats_ops = {
+ .counter_alloc_stats = ionic_counter_alloc_stats,
+ .counter_dealloc = ionic_counter_dealloc,
+ .counter_bind_qp = ionic_counter_bind_qp,
+ .counter_unbind_qp = ionic_counter_unbind_qp,
+ .counter_update_stats = ionic_counter_update_stats,
+};
+
+void ionic_stats_init(struct ionic_ibdev *dev)
+{
+ u16 stats_type = dev->lif_cfg.stats_type;
+ int rc;
+
+ if (stats_type & IONIC_LIF_RDMA_STAT_GLOBAL) {
+ rc = ionic_init_hw_stats(dev);
+ if (rc)
+ ibdev_dbg(&dev->ibdev, "Failed to init hw stats\n");
+ else
+ ib_set_device_ops(&dev->ibdev, &ionic_hw_stats_ops);
+ }
+
+ if (stats_type & IONIC_LIF_RDMA_STAT_QP) {
+ dev->counter_stats = kzalloc(sizeof(*dev->counter_stats),
+ GFP_KERNEL);
+ if (!dev->counter_stats)
+ return;
+
+ rc = ionic_alloc_counters(dev);
+ if (rc) {
+ ibdev_dbg(&dev->ibdev, "Failed to init counter stats\n");
+ kfree(dev->counter_stats);
+ dev->counter_stats = NULL;
+ return;
+ }
+
+ xa_init_flags(&dev->counter_stats->xa_counters, XA_FLAGS_ALLOC);
+
+ ib_set_device_ops(&dev->ibdev, &ionic_counter_stats_ops);
+ }
+}
+
+void ionic_stats_cleanup(struct ionic_ibdev *dev)
+{
+ if (dev->counter_stats) {
+ xa_destroy(&dev->counter_stats->xa_counters);
+ kfree(dev->counter_stats->hdr);
+ kfree(dev->counter_stats->stats_hdrs);
+ kfree(dev->counter_stats);
+ dev->counter_stats = NULL;
+ }
+
+ kfree(dev->hw_stats);
+ kfree(dev->hw_stats_buf);
+ kfree(dev->hw_stats_hdrs);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.c b/drivers/infiniband/hw/ionic/ionic_ibdev.c
new file mode 100644
index 000000000000..164046d00e5d
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_ibdev.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <net/addrconf.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
+
+#include "ionic_ibdev.h"
+
+#define DRIVER_DESCRIPTION "AMD Pensando RoCE HCA driver"
+#define DEVICE_DESCRIPTION "AMD Pensando RoCE HCA"
+
+MODULE_AUTHOR("Allen Hubbe <allen.hubbe@amd.com>");
+MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("NET_IONIC");
+
+static int ionic_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *attr,
+ struct ib_udata *udata)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+ struct net_device *ndev;
+
+ ndev = ib_device_get_netdev(ibdev, 1);
+ addrconf_ifid_eui48((u8 *)&attr->sys_image_guid, ndev);
+ dev_put(ndev);
+ attr->max_mr_size = dev->lif_cfg.npts_per_lif * PAGE_SIZE / 2;
+ attr->page_size_cap = dev->lif_cfg.page_size_supported;
+
+ attr->vendor_id = to_pci_dev(dev->lif_cfg.hwdev)->vendor;
+ attr->vendor_part_id = to_pci_dev(dev->lif_cfg.hwdev)->device;
+
+ attr->hw_ver = ionic_lif_asic_rev(dev->lif_cfg.lif);
+ attr->fw_ver = 0;
+ attr->max_qp = dev->lif_cfg.qp_count;
+ attr->max_qp_wr = IONIC_MAX_DEPTH;
+ attr->device_cap_flags =
+ IB_DEVICE_MEM_WINDOW |
+ IB_DEVICE_MEM_MGT_EXTENSIONS |
+ IB_DEVICE_MEM_WINDOW_TYPE_2B |
+ 0;
+ attr->max_send_sge =
+ min(ionic_v1_send_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH);
+ attr->max_recv_sge =
+ min(ionic_v1_recv_wqe_max_sge(dev->lif_cfg.max_stride, 0, false),
+ IONIC_SPEC_HIGH);
+ attr->max_sge_rd = attr->max_send_sge;
+ attr->max_cq = dev->lif_cfg.cq_count / dev->lif_cfg.udma_count;
+ attr->max_cqe = IONIC_MAX_CQ_DEPTH - IONIC_CQ_GRACE;
+ attr->max_mr = dev->lif_cfg.nmrs_per_lif;
+ attr->max_pd = IONIC_MAX_PD;
+ attr->max_qp_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_ee_rd_atom = 0;
+ attr->max_res_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_qp_init_rd_atom = IONIC_MAX_RD_ATOM;
+ attr->max_ee_init_rd_atom = 0;
+ attr->atomic_cap = IB_ATOMIC_GLOB;
+ attr->masked_atomic_cap = IB_ATOMIC_GLOB;
+ attr->max_mw = dev->lif_cfg.nmrs_per_lif;
+ attr->max_mcast_grp = 0;
+ attr->max_mcast_qp_attach = 0;
+ attr->max_ah = dev->lif_cfg.nahs_per_lif;
+ attr->max_fast_reg_page_list_len = dev->lif_cfg.npts_per_lif / 2;
+ attr->max_pkeys = IONIC_PKEY_TBL_LEN;
+
+ return 0;
+}
+
+static int ionic_query_port(struct ib_device *ibdev, u32 port,
+ struct ib_port_attr *attr)
+{
+ struct net_device *ndev;
+
+ if (port != 1)
+ return -EINVAL;
+
+ ndev = ib_device_get_netdev(ibdev, port);
+
+ if (netif_running(ndev) && netif_carrier_ok(ndev)) {
+ attr->state = IB_PORT_ACTIVE;
+ attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ } else if (netif_running(ndev)) {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_POLLING;
+ } else {
+ attr->state = IB_PORT_DOWN;
+ attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ }
+
+ attr->max_mtu = iboe_get_mtu(ndev->max_mtu);
+ attr->active_mtu = min(attr->max_mtu, iboe_get_mtu(ndev->mtu));
+ attr->gid_tbl_len = IONIC_GID_TBL_LEN;
+ attr->ip_gids = true;
+ attr->port_cap_flags = 0;
+ attr->max_msg_sz = 0x80000000;
+ attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
+ attr->max_vl_num = 1;
+ attr->subnet_prefix = 0xfe80000000000000ull;
+
+ dev_put(ndev);
+
+ return ib_get_eth_speed(ibdev, port,
+ &attr->active_speed,
+ &attr->active_width);
+}
+
+static enum rdma_link_layer ionic_get_link_layer(struct ib_device *ibdev,
+ u32 port)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static int ionic_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
+ u16 *pkey)
+{
+ if (port != 1)
+ return -EINVAL;
+
+ if (index != 0)
+ return -EINVAL;
+
+ *pkey = IB_DEFAULT_PKEY_FULL;
+
+ return 0;
+}
+
+static int ionic_modify_device(struct ib_device *ibdev, int mask,
+ struct ib_device_modify *attr)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+ return -EOPNOTSUPP;
+
+ if (mask & IB_DEVICE_MODIFY_NODE_DESC)
+ memcpy(dev->ibdev.node_desc, attr->node_desc,
+ IB_DEVICE_NODE_DESC_MAX);
+
+ return 0;
+}
+
+static int ionic_get_port_immutable(struct ib_device *ibdev, u32 port,
+ struct ib_port_immutable *attr)
+{
+ if (port != 1)
+ return -EINVAL;
+
+ attr->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+
+ attr->pkey_tbl_len = IONIC_PKEY_TBL_LEN;
+ attr->gid_tbl_len = IONIC_GID_TBL_LEN;
+ attr->max_mad_size = IB_MGMT_MAD_SIZE;
+
+ return 0;
+}
+
+static void ionic_get_dev_fw_str(struct ib_device *ibdev, char *str)
+{
+ struct ionic_ibdev *dev = to_ionic_ibdev(ibdev);
+
+ ionic_lif_fw_version(dev->lif_cfg.lif, str, IB_FW_VERSION_NAME_MAX);
+}
+
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct ionic_ibdev *dev =
+ rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
+
+ return sysfs_emit(buf, "0x%x\n", ionic_lif_asic_rev(dev->lif_cfg.lif));
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ionic_ibdev *dev =
+ rdma_device_to_drv_device(device, struct ionic_ibdev, ibdev);
+
+ return sysfs_emit(buf, "%s\n", dev->ibdev.node_desc);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ionic_rdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ionic_rdma_attr_group = {
+ .attrs = ionic_rdma_attributes,
+};
+
+static void ionic_disassociate_ucontext(struct ib_ucontext *ibcontext)
+{
+ /*
+ * Dummy define disassociate_ucontext so that it does not
+ * wait for user context before cleaning up hw resources.
+ */
+}
+
+static const struct ib_device_ops ionic_dev_ops = {
+ .owner = THIS_MODULE,
+ .driver_id = RDMA_DRIVER_IONIC,
+ .uverbs_abi_ver = IONIC_ABI_VERSION,
+
+ .alloc_ucontext = ionic_alloc_ucontext,
+ .dealloc_ucontext = ionic_dealloc_ucontext,
+ .mmap = ionic_mmap,
+ .mmap_free = ionic_mmap_free,
+ .alloc_pd = ionic_alloc_pd,
+ .dealloc_pd = ionic_dealloc_pd,
+ .create_ah = ionic_create_ah,
+ .query_ah = ionic_query_ah,
+ .destroy_ah = ionic_destroy_ah,
+ .create_user_ah = ionic_create_ah,
+ .get_dma_mr = ionic_get_dma_mr,
+ .reg_user_mr = ionic_reg_user_mr,
+ .reg_user_mr_dmabuf = ionic_reg_user_mr_dmabuf,
+ .dereg_mr = ionic_dereg_mr,
+ .alloc_mr = ionic_alloc_mr,
+ .map_mr_sg = ionic_map_mr_sg,
+ .alloc_mw = ionic_alloc_mw,
+ .dealloc_mw = ionic_dealloc_mw,
+ .create_cq = ionic_create_cq,
+ .destroy_cq = ionic_destroy_cq,
+ .create_qp = ionic_create_qp,
+ .modify_qp = ionic_modify_qp,
+ .query_qp = ionic_query_qp,
+ .destroy_qp = ionic_destroy_qp,
+
+ .post_send = ionic_post_send,
+ .post_recv = ionic_post_recv,
+ .poll_cq = ionic_poll_cq,
+ .req_notify_cq = ionic_req_notify_cq,
+
+ .query_device = ionic_query_device,
+ .query_port = ionic_query_port,
+ .get_link_layer = ionic_get_link_layer,
+ .query_pkey = ionic_query_pkey,
+ .modify_device = ionic_modify_device,
+ .get_port_immutable = ionic_get_port_immutable,
+ .get_dev_fw_str = ionic_get_dev_fw_str,
+ .device_group = &ionic_rdma_attr_group,
+ .disassociate_ucontext = ionic_disassociate_ucontext,
+
+ INIT_RDMA_OBJ_SIZE(ib_ucontext, ionic_ctx, ibctx),
+ INIT_RDMA_OBJ_SIZE(ib_pd, ionic_pd, ibpd),
+ INIT_RDMA_OBJ_SIZE(ib_ah, ionic_ah, ibah),
+ INIT_RDMA_OBJ_SIZE(ib_cq, ionic_vcq, ibcq),
+ INIT_RDMA_OBJ_SIZE(ib_qp, ionic_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_mw, ionic_mr, ibmw),
+};
+
+static void ionic_init_resids(struct ionic_ibdev *dev)
+{
+ ionic_resid_init(&dev->inuse_cqid, dev->lif_cfg.cq_count);
+ dev->half_cqid_udma_shift =
+ order_base_2(dev->lif_cfg.cq_count / dev->lif_cfg.udma_count);
+ ionic_resid_init(&dev->inuse_pdid, IONIC_MAX_PD);
+ ionic_resid_init(&dev->inuse_ahid, dev->lif_cfg.nahs_per_lif);
+ ionic_resid_init(&dev->inuse_mrid, dev->lif_cfg.nmrs_per_lif);
+ /* skip reserved lkey */
+ dev->next_mrkey = 1;
+ ionic_resid_init(&dev->inuse_qpid, dev->lif_cfg.qp_count);
+ /* skip reserved SMI and GSI qpids */
+ dev->half_qpid_udma_shift =
+ order_base_2(dev->lif_cfg.qp_count / dev->lif_cfg.udma_count);
+ ionic_resid_init(&dev->inuse_dbid, dev->lif_cfg.dbid_count);
+}
+
+static void ionic_destroy_resids(struct ionic_ibdev *dev)
+{
+ ionic_resid_destroy(&dev->inuse_cqid);
+ ionic_resid_destroy(&dev->inuse_pdid);
+ ionic_resid_destroy(&dev->inuse_ahid);
+ ionic_resid_destroy(&dev->inuse_mrid);
+ ionic_resid_destroy(&dev->inuse_qpid);
+ ionic_resid_destroy(&dev->inuse_dbid);
+}
+
+static void ionic_destroy_ibdev(struct ionic_ibdev *dev)
+{
+ ionic_kill_rdma_admin(dev, false);
+ ib_unregister_device(&dev->ibdev);
+ ionic_stats_cleanup(dev);
+ ionic_destroy_rdma_admin(dev);
+ ionic_destroy_resids(dev);
+ WARN_ON(!xa_empty(&dev->qp_tbl));
+ xa_destroy(&dev->qp_tbl);
+ WARN_ON(!xa_empty(&dev->cq_tbl));
+ xa_destroy(&dev->cq_tbl);
+ ib_dealloc_device(&dev->ibdev);
+}
+
+static struct ionic_ibdev *ionic_create_ibdev(struct ionic_aux_dev *ionic_adev)
+{
+ struct ib_device *ibdev;
+ struct ionic_ibdev *dev;
+ struct net_device *ndev;
+ int rc;
+
+ dev = ib_alloc_device(ionic_ibdev, ibdev);
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+
+ ionic_fill_lif_cfg(ionic_adev->lif, &dev->lif_cfg);
+
+ xa_init_flags(&dev->qp_tbl, GFP_ATOMIC);
+ xa_init_flags(&dev->cq_tbl, GFP_ATOMIC);
+
+ ionic_init_resids(dev);
+
+ rc = ionic_rdma_reset_devcmd(dev);
+ if (rc)
+ goto err_reset;
+
+ rc = ionic_create_rdma_admin(dev);
+ if (rc)
+ goto err_admin;
+
+ ibdev = &dev->ibdev;
+ ibdev->dev.parent = dev->lif_cfg.hwdev;
+
+ strscpy(ibdev->name, "ionic_%d", IB_DEVICE_NAME_MAX);
+ strscpy(ibdev->node_desc, DEVICE_DESCRIPTION, IB_DEVICE_NODE_DESC_MAX);
+
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ ibdev->phys_port_cnt = 1;
+
+ /* the first two eq are reserved for async events */
+ ibdev->num_comp_vectors = dev->lif_cfg.eq_count - 2;
+
+ ndev = ionic_lif_netdev(ionic_adev->lif);
+ addrconf_ifid_eui48((u8 *)&ibdev->node_guid, ndev);
+ rc = ib_device_set_netdev(ibdev, ndev, 1);
+ /* ionic_lif_netdev() returns ndev with refcount held */
+ dev_put(ndev);
+ if (rc)
+ goto err_admin;
+
+ ib_set_device_ops(&dev->ibdev, &ionic_dev_ops);
+
+ ionic_stats_init(dev);
+
+ rc = ib_register_device(ibdev, "ionic_%d", ibdev->dev.parent);
+ if (rc)
+ goto err_register;
+
+ return dev;
+
+err_register:
+ ionic_stats_cleanup(dev);
+err_admin:
+ ionic_kill_rdma_admin(dev, false);
+ ionic_destroy_rdma_admin(dev);
+err_reset:
+ ionic_destroy_resids(dev);
+ xa_destroy(&dev->qp_tbl);
+ xa_destroy(&dev->cq_tbl);
+ ib_dealloc_device(&dev->ibdev);
+
+ return ERR_PTR(rc);
+}
+
+static int ionic_aux_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct ionic_aux_dev *ionic_adev;
+ struct ionic_ibdev *dev;
+
+ ionic_adev = container_of(adev, struct ionic_aux_dev, adev);
+ dev = ionic_create_ibdev(ionic_adev);
+ if (IS_ERR(dev))
+ return dev_err_probe(&adev->dev, PTR_ERR(dev),
+ "Failed to register ibdev\n");
+
+ auxiliary_set_drvdata(adev, dev);
+ ibdev_dbg(&dev->ibdev, "registered\n");
+
+ return 0;
+}
+
+static void ionic_aux_remove(struct auxiliary_device *adev)
+{
+ struct ionic_ibdev *dev = auxiliary_get_drvdata(adev);
+
+ dev_dbg(&adev->dev, "unregister ibdev\n");
+ ionic_destroy_ibdev(dev);
+ dev_dbg(&adev->dev, "unregistered\n");
+}
+
+static const struct auxiliary_device_id ionic_aux_id_table[] = {
+ { .name = "ionic.rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ionic_aux_id_table);
+
+static struct auxiliary_driver ionic_aux_r_driver = {
+ .name = "rdma",
+ .probe = ionic_aux_probe,
+ .remove = ionic_aux_remove,
+ .id_table = ionic_aux_id_table,
+};
+
+static int __init ionic_mod_init(void)
+{
+ int rc;
+
+ ionic_evt_workq = create_workqueue(KBUILD_MODNAME "-evt");
+ if (!ionic_evt_workq)
+ return -ENOMEM;
+
+ rc = auxiliary_driver_register(&ionic_aux_r_driver);
+ if (rc)
+ goto err_aux;
+
+ return 0;
+
+err_aux:
+ destroy_workqueue(ionic_evt_workq);
+
+ return rc;
+}
+
+static void __exit ionic_mod_exit(void)
+{
+ auxiliary_driver_unregister(&ionic_aux_r_driver);
+ destroy_workqueue(ionic_evt_workq);
+}
+
+module_init(ionic_mod_init);
+module_exit(ionic_mod_exit);
diff --git a/drivers/infiniband/hw/ionic/ionic_ibdev.h b/drivers/infiniband/hw/ionic/ionic_ibdev.h
new file mode 100644
index 000000000000..82fda1e3cdb6
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_ibdev.h
@@ -0,0 +1,517 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_IBDEV_H_
+#define _IONIC_IBDEV_H_
+
+#include <rdma/ib_umem.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include <rdma/ionic-abi.h>
+#include <ionic_api.h>
+#include <ionic_regs.h>
+
+#include "ionic_fw.h"
+#include "ionic_queue.h"
+#include "ionic_res.h"
+
+#include "ionic_lif_cfg.h"
+
+/* Config knobs */
+#define IONIC_EQ_DEPTH 511
+#define IONIC_EQ_COUNT 32
+#define IONIC_AQ_DEPTH 63
+#define IONIC_AQ_COUNT 4
+#define IONIC_EQ_ISR_BUDGET 10
+#define IONIC_EQ_WORK_BUDGET 1000
+#define IONIC_MAX_RD_ATOM 16
+#define IONIC_PKEY_TBL_LEN 1
+#define IONIC_GID_TBL_LEN 256
+
+#define IONIC_MAX_QPID 0xffffff
+#define IONIC_SPEC_HIGH 8
+#define IONIC_MAX_PD 1024
+#define IONIC_SPEC_HIGH 8
+#define IONIC_SQCMB_ORDER 5
+#define IONIC_RQCMB_ORDER 0
+
+#define IONIC_META_LAST ((void *)1ul)
+#define IONIC_META_POSTED ((void *)2ul)
+
+#define IONIC_CQ_GRACE 100
+
+#define IONIC_ROCE_UDP_SPORT 28272
+#define IONIC_DMA_LKEY 0
+#define IONIC_DMA_RKEY IONIC_DMA_LKEY
+
+#define IONIC_CMB_SUPPORTED \
+ (IONIC_CMB_ENABLE | IONIC_CMB_REQUIRE | IONIC_CMB_EXPDB | \
+ IONIC_CMB_WC | IONIC_CMB_UC)
+
+/* resource is not reserved on the device, indicated in tbl_order */
+#define IONIC_RES_INVALID -1
+
+struct ionic_aq;
+struct ionic_cq;
+struct ionic_eq;
+struct ionic_vcq;
+
+enum ionic_admin_state {
+ IONIC_ADMIN_ACTIVE, /* submitting admin commands to queue */
+ IONIC_ADMIN_PAUSED, /* not submitting, but may complete normally */
+ IONIC_ADMIN_KILLED, /* not submitting, locally completed */
+};
+
+enum ionic_admin_flags {
+ IONIC_ADMIN_F_BUSYWAIT = BIT(0), /* Don't sleep */
+ IONIC_ADMIN_F_TEARDOWN = BIT(1), /* In destroy path */
+ IONIC_ADMIN_F_INTERRUPT = BIT(2), /* Interruptible w/timeout */
+};
+
+enum ionic_mmap_flag {
+ IONIC_MMAP_WC = BIT(0),
+};
+
+struct ionic_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ unsigned long size;
+ unsigned long pfn;
+ u8 mmap_flags;
+};
+
+struct ionic_ibdev {
+ struct ib_device ibdev;
+
+ struct ionic_lif_cfg lif_cfg;
+
+ struct xarray qp_tbl;
+ struct xarray cq_tbl;
+
+ struct ionic_resid_bits inuse_dbid;
+ struct ionic_resid_bits inuse_pdid;
+ struct ionic_resid_bits inuse_ahid;
+ struct ionic_resid_bits inuse_mrid;
+ struct ionic_resid_bits inuse_qpid;
+ struct ionic_resid_bits inuse_cqid;
+
+ u8 half_cqid_udma_shift;
+ u8 half_qpid_udma_shift;
+ u8 next_qpid_udma_idx;
+ u8 next_mrkey;
+
+ struct work_struct reset_work;
+ bool reset_posted;
+ u32 reset_cnt;
+
+ struct delayed_work admin_dwork;
+ struct ionic_aq **aq_vec;
+ atomic_t admin_state;
+
+ struct ionic_eq **eq_vec;
+
+ struct ionic_v1_stat *hw_stats;
+ void *hw_stats_buf;
+ struct rdma_stat_desc *hw_stats_hdrs;
+ struct ionic_counter_stats *counter_stats;
+ int hw_stats_count;
+};
+
+struct ionic_eq {
+ struct ionic_ibdev *dev;
+
+ u32 eqid;
+ u32 intr;
+
+ struct ionic_queue q;
+
+ int armed;
+ bool enable;
+
+ struct work_struct work;
+
+ int irq;
+ char name[32];
+};
+
+struct ionic_admin_wr {
+ struct completion work;
+ struct list_head aq_ent;
+ struct ionic_v1_admin_wqe wqe;
+ struct ionic_v1_cqe cqe;
+ struct ionic_aq *aq;
+ int status;
+};
+
+struct ionic_admin_wr_q {
+ struct ionic_admin_wr *wr;
+ int wqe_strides;
+};
+
+struct ionic_aq {
+ struct ionic_ibdev *dev;
+ struct ionic_vcq *vcq;
+
+ struct work_struct work;
+
+ atomic_t admin_state;
+ unsigned long stamp;
+ bool armed;
+
+ u32 aqid;
+ u32 cqid;
+
+ spinlock_t lock; /* for posting */
+ struct ionic_queue q;
+ struct ionic_admin_wr_q *q_wr;
+ struct list_head wr_prod;
+ struct list_head wr_post;
+};
+
+struct ionic_ctx {
+ struct ib_ucontext ibctx;
+ u32 dbid;
+ struct rdma_user_mmap_entry *mmap_dbell;
+};
+
+struct ionic_tbl_buf {
+ u32 tbl_limit;
+ u32 tbl_pages;
+ size_t tbl_size;
+ __le64 *tbl_buf;
+ dma_addr_t tbl_dma;
+ u8 page_size_log2;
+};
+
+struct ionic_pd {
+ struct ib_pd ibpd;
+
+ u32 pdid;
+ u32 flags;
+};
+
+struct ionic_cq {
+ struct ionic_vcq *vcq;
+
+ u32 cqid;
+ u32 eqid;
+
+ spinlock_t lock; /* for polling */
+ struct list_head poll_sq;
+ bool flush;
+ struct list_head flush_sq;
+ struct list_head flush_rq;
+ struct list_head ibkill_flush_ent;
+
+ struct ionic_queue q;
+ bool color;
+ int credit;
+ u16 arm_any_prod;
+ u16 arm_sol_prod;
+
+ struct kref cq_kref;
+ struct completion cq_rel_comp;
+
+ /* infrequently accessed, keep at end */
+ struct ib_umem *umem;
+};
+
+struct ionic_vcq {
+ struct ib_cq ibcq;
+ struct ionic_cq cq[2];
+ u8 udma_mask;
+ u8 poll_idx;
+};
+
+struct ionic_sq_meta {
+ u64 wrid;
+ u32 len;
+ u16 seq;
+ u8 ibop;
+ u8 ibsts;
+ u8 remote:1;
+ u8 signal:1;
+ u8 local_comp:1;
+};
+
+struct ionic_rq_meta {
+ struct ionic_rq_meta *next;
+ u64 wrid;
+};
+
+struct ionic_qp {
+ struct ib_qp ibqp;
+ enum ib_qp_state state;
+
+ u32 qpid;
+ u32 ahid;
+ u32 sq_cqid;
+ u32 rq_cqid;
+ u8 udma_idx;
+ u8 has_ah:1;
+ u8 has_sq:1;
+ u8 has_rq:1;
+ u8 sig_all:1;
+
+ struct list_head qp_list_counter;
+
+ struct list_head cq_poll_sq;
+ struct list_head cq_flush_sq;
+ struct list_head cq_flush_rq;
+ struct list_head ibkill_flush_ent;
+
+ spinlock_t sq_lock; /* for posting and polling */
+ struct ionic_queue sq;
+ struct ionic_sq_meta *sq_meta;
+ u16 *sq_msn_idx;
+ int sq_spec;
+ u16 sq_old_prod;
+ u16 sq_msn_prod;
+ u16 sq_msn_cons;
+ u8 sq_cmb;
+ bool sq_flush;
+ bool sq_flush_rcvd;
+
+ spinlock_t rq_lock; /* for posting and polling */
+ struct ionic_queue rq;
+ struct ionic_rq_meta *rq_meta;
+ struct ionic_rq_meta *rq_meta_head;
+ int rq_spec;
+ u16 rq_old_prod;
+ u8 rq_cmb;
+ bool rq_flush;
+
+ struct kref qp_kref;
+ struct completion qp_rel_comp;
+
+ /* infrequently accessed, keep at end */
+ int sgid_index;
+ int sq_cmb_order;
+ u32 sq_cmb_pgid;
+ phys_addr_t sq_cmb_addr;
+ struct rdma_user_mmap_entry *mmap_sq_cmb;
+
+ struct ib_umem *sq_umem;
+
+ int rq_cmb_order;
+ u32 rq_cmb_pgid;
+ phys_addr_t rq_cmb_addr;
+ struct rdma_user_mmap_entry *mmap_rq_cmb;
+
+ struct ib_umem *rq_umem;
+
+ int dcqcn_profile;
+
+ struct ib_ud_header *hdr;
+};
+
+struct ionic_ah {
+ struct ib_ah ibah;
+ u32 ahid;
+ int sgid_index;
+ struct ib_ud_header hdr;
+};
+
+struct ionic_mr {
+ union {
+ struct ib_mr ibmr;
+ struct ib_mw ibmw;
+ };
+
+ u32 mrid;
+ int flags;
+
+ struct ib_umem *umem;
+ struct ionic_tbl_buf buf;
+ bool created;
+};
+
+struct ionic_counter_stats {
+ int queue_stats_count;
+ struct ionic_v1_stat *hdr;
+ struct rdma_stat_desc *stats_hdrs;
+ struct xarray xa_counters;
+};
+
+struct ionic_counter {
+ void *vals;
+ struct list_head qp_list;
+};
+
+static inline struct ionic_ibdev *to_ionic_ibdev(struct ib_device *ibdev)
+{
+ return container_of(ibdev, struct ionic_ibdev, ibdev);
+}
+
+static inline struct ionic_ctx *to_ionic_ctx(struct ib_ucontext *ibctx)
+{
+ return container_of(ibctx, struct ionic_ctx, ibctx);
+}
+
+static inline struct ionic_ctx *to_ionic_ctx_uobj(struct ib_uobject *uobj)
+{
+ if (!uobj)
+ return NULL;
+
+ if (!uobj->context)
+ return NULL;
+
+ return to_ionic_ctx(uobj->context);
+}
+
+static inline struct ionic_pd *to_ionic_pd(struct ib_pd *ibpd)
+{
+ return container_of(ibpd, struct ionic_pd, ibpd);
+}
+
+static inline struct ionic_mr *to_ionic_mr(struct ib_mr *ibmr)
+{
+ return container_of(ibmr, struct ionic_mr, ibmr);
+}
+
+static inline struct ionic_mr *to_ionic_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct ionic_mr, ibmw);
+}
+
+static inline struct ionic_vcq *to_ionic_vcq(struct ib_cq *ibcq)
+{
+ return container_of(ibcq, struct ionic_vcq, ibcq);
+}
+
+static inline struct ionic_cq *to_ionic_vcq_cq(struct ib_cq *ibcq,
+ uint8_t udma_idx)
+{
+ return &to_ionic_vcq(ibcq)->cq[udma_idx];
+}
+
+static inline struct ionic_qp *to_ionic_qp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct ionic_qp, ibqp);
+}
+
+static inline struct ionic_ah *to_ionic_ah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct ionic_ah, ibah);
+}
+
+static inline u32 ionic_ctx_dbid(struct ionic_ibdev *dev,
+ struct ionic_ctx *ctx)
+{
+ if (!ctx)
+ return dev->lif_cfg.dbid;
+
+ return ctx->dbid;
+}
+
+static inline u32 ionic_obj_dbid(struct ionic_ibdev *dev,
+ struct ib_uobject *uobj)
+{
+ return ionic_ctx_dbid(dev, to_ionic_ctx_uobj(uobj));
+}
+
+static inline bool ionic_ibop_is_local(enum ib_wr_opcode op)
+{
+ return op == IB_WR_LOCAL_INV || op == IB_WR_REG_MR;
+}
+
+static inline void ionic_qp_complete(struct kref *kref)
+{
+ struct ionic_qp *qp = container_of(kref, struct ionic_qp, qp_kref);
+
+ complete(&qp->qp_rel_comp);
+}
+
+static inline void ionic_cq_complete(struct kref *kref)
+{
+ struct ionic_cq *cq = container_of(kref, struct ionic_cq, cq_kref);
+
+ complete(&cq->cq_rel_comp);
+}
+
+/* ionic_admin.c */
+extern struct workqueue_struct *ionic_evt_workq;
+void ionic_admin_post(struct ionic_ibdev *dev, struct ionic_admin_wr *wr);
+int ionic_admin_wait(struct ionic_ibdev *dev, struct ionic_admin_wr *wr,
+ enum ionic_admin_flags);
+
+int ionic_rdma_reset_devcmd(struct ionic_ibdev *dev);
+
+int ionic_create_rdma_admin(struct ionic_ibdev *dev);
+void ionic_destroy_rdma_admin(struct ionic_ibdev *dev);
+void ionic_kill_rdma_admin(struct ionic_ibdev *dev, bool fatal_path);
+
+/* ionic_controlpath.c */
+int ionic_create_cq_common(struct ionic_vcq *vcq,
+ struct ionic_tbl_buf *buf,
+ const struct ib_cq_init_attr *attr,
+ struct ionic_ctx *ctx,
+ struct ib_udata *udata,
+ struct ionic_qdesc *req_cq,
+ __u32 *resp_cqid,
+ int udma_idx);
+void ionic_destroy_cq_common(struct ionic_ibdev *dev, struct ionic_cq *cq);
+void ionic_flush_qp(struct ionic_ibdev *dev, struct ionic_qp *qp);
+void ionic_notify_flush_cq(struct ionic_cq *cq);
+
+int ionic_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *udata);
+void ionic_dealloc_ucontext(struct ib_ucontext *ibctx);
+int ionic_mmap(struct ib_ucontext *ibctx, struct vm_area_struct *vma);
+void ionic_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
+int ionic_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int ionic_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
+int ionic_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
+ struct ib_udata *udata);
+int ionic_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
+int ionic_destroy_ah(struct ib_ah *ibah, u32 flags);
+struct ib_mr *ionic_get_dma_mr(struct ib_pd *ibpd, int access);
+struct ib_mr *ionic_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 addr, int access, struct ib_dmah *dmah,
+ struct ib_udata *udata);
+struct ib_mr *ionic_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 offset,
+ u64 length, u64 addr, int fd, int access,
+ struct ib_dmah *dmah,
+ struct uverbs_attr_bundle *attrs);
+int ionic_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
+struct ib_mr *ionic_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type type,
+ u32 max_sg);
+int ionic_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
+int ionic_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
+int ionic_dealloc_mw(struct ib_mw *ibmw);
+int ionic_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
+ struct uverbs_attr_bundle *attrs);
+int ionic_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
+int ionic_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
+ struct ib_udata *udata);
+int ionic_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_udata *udata);
+int ionic_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
+ struct ib_qp_init_attr *init_attr);
+int ionic_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
+
+/* ionic_datapath.c */
+int ionic_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad);
+int ionic_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad);
+int ionic_poll_cq(struct ib_cq *ibcq, int nwc, struct ib_wc *wc);
+int ionic_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
+
+/* ionic_hw_stats.c */
+void ionic_stats_init(struct ionic_ibdev *dev);
+void ionic_stats_cleanup(struct ionic_ibdev *dev);
+
+/* ionic_pgtbl.c */
+__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va);
+__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va);
+int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma);
+int ionic_pgtbl_init(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf,
+ struct ib_umem *umem,
+ dma_addr_t dma,
+ int limit,
+ u64 page_size);
+void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf);
+#endif /* _IONIC_IBDEV_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.c b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c
new file mode 100644
index 000000000000..f3cd281c3a2f
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/kernel.h>
+
+#include <ionic.h>
+#include <ionic_lif.h>
+
+#include "ionic_lif_cfg.h"
+
+#define IONIC_MIN_RDMA_VERSION 0
+#define IONIC_MAX_RDMA_VERSION 2
+
+static u8 ionic_get_expdb(struct ionic_lif *lif)
+{
+ u8 expdb_support = 0;
+
+ if (lif->ionic->idev.phy_cmb_expdb64_pages)
+ expdb_support |= IONIC_EXPDB_64B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb128_pages)
+ expdb_support |= IONIC_EXPDB_128B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb256_pages)
+ expdb_support |= IONIC_EXPDB_256B_WQE;
+ if (lif->ionic->idev.phy_cmb_expdb512_pages)
+ expdb_support |= IONIC_EXPDB_512B_WQE;
+
+ return expdb_support;
+}
+
+void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg)
+{
+ union ionic_lif_identity *ident = &lif->ionic->ident.lif;
+
+ cfg->lif = lif;
+ cfg->hwdev = &lif->ionic->pdev->dev;
+ cfg->lif_index = lif->index;
+ cfg->lif_hw_index = lif->hw_index;
+
+ cfg->dbid = lif->kern_pid;
+ cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+ cfg->dbpage = lif->kern_dbpage;
+ cfg->intr_ctrl = lif->ionic->idev.intr_ctrl;
+
+ cfg->db_phys = lif->ionic->bars[IONIC_PCI_BAR_DBELL].bus_addr;
+
+ if (IONIC_VERSION(ident->rdma.version, ident->rdma.minor_version) >=
+ IONIC_VERSION(2, 1))
+ cfg->page_size_supported =
+ le64_to_cpu(ident->rdma.page_size_cap);
+ else
+ cfg->page_size_supported = IONIC_PAGE_SIZE_SUPPORTED;
+
+ cfg->rdma_version = ident->rdma.version;
+ cfg->qp_opcodes = ident->rdma.qp_opcodes;
+ cfg->admin_opcodes = ident->rdma.admin_opcodes;
+
+ cfg->stats_type = le16_to_cpu(ident->rdma.stats_type);
+ cfg->npts_per_lif = le32_to_cpu(ident->rdma.npts_per_lif);
+ cfg->nmrs_per_lif = le32_to_cpu(ident->rdma.nmrs_per_lif);
+ cfg->nahs_per_lif = le32_to_cpu(ident->rdma.nahs_per_lif);
+
+ cfg->aq_base = le32_to_cpu(ident->rdma.aq_qtype.qid_base);
+ cfg->cq_base = le32_to_cpu(ident->rdma.cq_qtype.qid_base);
+ cfg->eq_base = le32_to_cpu(ident->rdma.eq_qtype.qid_base);
+
+ /*
+ * ionic_create_rdma_admin() may reduce aq_count or eq_count if
+ * it is unable to allocate all that were requested.
+ * aq_count is tunable; see ionic_aq_count
+ * eq_count is tunable; see ionic_eq_count
+ */
+ cfg->aq_count = le32_to_cpu(ident->rdma.aq_qtype.qid_count);
+ cfg->eq_count = le32_to_cpu(ident->rdma.eq_qtype.qid_count);
+ cfg->cq_count = le32_to_cpu(ident->rdma.cq_qtype.qid_count);
+ cfg->qp_count = le32_to_cpu(ident->rdma.sq_qtype.qid_count);
+ cfg->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
+
+ cfg->aq_qtype = ident->rdma.aq_qtype.qtype;
+ cfg->sq_qtype = ident->rdma.sq_qtype.qtype;
+ cfg->rq_qtype = ident->rdma.rq_qtype.qtype;
+ cfg->cq_qtype = ident->rdma.cq_qtype.qtype;
+ cfg->eq_qtype = ident->rdma.eq_qtype.qtype;
+ cfg->udma_qgrp_shift = ident->rdma.udma_shift;
+ cfg->udma_count = 2;
+
+ cfg->max_stride = ident->rdma.max_stride;
+ cfg->expdb_mask = ionic_get_expdb(lif);
+
+ cfg->sq_expdb =
+ !!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_EXPDB);
+ cfg->rq_expdb =
+ !!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_EXPDB);
+}
+
+struct net_device *ionic_lif_netdev(struct ionic_lif *lif)
+{
+ struct net_device *netdev = lif->netdev;
+
+ dev_hold(netdev);
+ return netdev;
+}
+
+void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len)
+{
+ strscpy(str, lif->ionic->idev.dev_info.fw_version, len);
+}
+
+u8 ionic_lif_asic_rev(struct ionic_lif *lif)
+{
+ return lif->ionic->idev.dev_info.asic_rev;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_lif_cfg.h b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h
new file mode 100644
index 000000000000..20853429f623
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_lif_cfg.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_LIF_CFG_H_
+
+#define IONIC_VERSION(a, b) (((a) << 16) + ((b) << 8))
+#define IONIC_PAGE_SIZE_SUPPORTED 0x40201000 /* 4kb, 2Mb, 1Gb */
+
+#define IONIC_EXPDB_64B_WQE BIT(0)
+#define IONIC_EXPDB_128B_WQE BIT(1)
+#define IONIC_EXPDB_256B_WQE BIT(2)
+#define IONIC_EXPDB_512B_WQE BIT(3)
+
+struct ionic_lif_cfg {
+ struct device *hwdev;
+ struct ionic_lif *lif;
+
+ int lif_index;
+ int lif_hw_index;
+
+ u32 dbid;
+ int dbid_count;
+ u64 __iomem *dbpage;
+ struct ionic_intr __iomem *intr_ctrl;
+ phys_addr_t db_phys;
+
+ u64 page_size_supported;
+ u32 npts_per_lif;
+ u32 nmrs_per_lif;
+ u32 nahs_per_lif;
+
+ u32 aq_base;
+ u32 cq_base;
+ u32 eq_base;
+
+ int aq_count;
+ int eq_count;
+ int cq_count;
+ int qp_count;
+
+ u16 stats_type;
+ u8 aq_qtype;
+ u8 sq_qtype;
+ u8 rq_qtype;
+ u8 cq_qtype;
+ u8 eq_qtype;
+
+ u8 udma_count;
+ u8 udma_qgrp_shift;
+
+ u8 rdma_version;
+ u8 qp_opcodes;
+ u8 admin_opcodes;
+
+ u8 max_stride;
+ bool sq_expdb;
+ bool rq_expdb;
+ u8 expdb_mask;
+};
+
+void ionic_fill_lif_cfg(struct ionic_lif *lif, struct ionic_lif_cfg *cfg);
+struct net_device *ionic_lif_netdev(struct ionic_lif *lif);
+void ionic_lif_fw_version(struct ionic_lif *lif, char *str, size_t len);
+u8 ionic_lif_asic_rev(struct ionic_lif *lif);
+
+#endif /* _IONIC_LIF_CFG_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_pgtbl.c b/drivers/infiniband/hw/ionic/ionic_pgtbl.c
new file mode 100644
index 000000000000..e74db73c9246
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_pgtbl.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/mman.h>
+#include <linux/dma-mapping.h>
+
+#include "ionic_fw.h"
+#include "ionic_ibdev.h"
+
+__le64 ionic_pgtbl_dma(struct ionic_tbl_buf *buf, u64 va)
+{
+ u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
+ u64 dma;
+
+ if (!buf->tbl_pages)
+ return cpu_to_le64(0);
+
+ if (buf->tbl_pages > 1)
+ return cpu_to_le64(buf->tbl_dma);
+
+ if (buf->tbl_buf)
+ dma = le64_to_cpu(buf->tbl_buf[0]);
+ else
+ dma = buf->tbl_dma;
+
+ return cpu_to_le64(dma + (va & pg_mask));
+}
+
+__be64 ionic_pgtbl_off(struct ionic_tbl_buf *buf, u64 va)
+{
+ if (buf->tbl_pages > 1) {
+ u64 pg_mask = BIT_ULL(buf->page_size_log2) - 1;
+
+ return cpu_to_be64(va & pg_mask);
+ }
+
+ return 0;
+}
+
+int ionic_pgtbl_page(struct ionic_tbl_buf *buf, u64 dma)
+{
+ if (unlikely(buf->tbl_pages == buf->tbl_limit))
+ return -ENOMEM;
+
+ if (buf->tbl_buf)
+ buf->tbl_buf[buf->tbl_pages] = cpu_to_le64(dma);
+ else
+ buf->tbl_dma = dma;
+
+ ++buf->tbl_pages;
+
+ return 0;
+}
+
+static int ionic_tbl_buf_alloc(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf)
+{
+ int rc;
+
+ buf->tbl_size = buf->tbl_limit * sizeof(*buf->tbl_buf);
+ buf->tbl_buf = kmalloc(buf->tbl_size, GFP_KERNEL);
+ if (!buf->tbl_buf)
+ return -ENOMEM;
+
+ buf->tbl_dma = dma_map_single(dev->lif_cfg.hwdev, buf->tbl_buf,
+ buf->tbl_size, DMA_TO_DEVICE);
+ rc = dma_mapping_error(dev->lif_cfg.hwdev, buf->tbl_dma);
+ if (rc) {
+ kfree(buf->tbl_buf);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int ionic_pgtbl_umem(struct ionic_tbl_buf *buf, struct ib_umem *umem)
+{
+ struct ib_block_iter biter;
+ u64 page_dma;
+ int rc;
+
+ rdma_umem_for_each_dma_block(umem, &biter, BIT_ULL(buf->page_size_log2)) {
+ page_dma = rdma_block_iter_dma_address(&biter);
+ rc = ionic_pgtbl_page(buf, page_dma);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+void ionic_pgtbl_unbuf(struct ionic_ibdev *dev, struct ionic_tbl_buf *buf)
+{
+ if (buf->tbl_buf)
+ dma_unmap_single(dev->lif_cfg.hwdev, buf->tbl_dma,
+ buf->tbl_size, DMA_TO_DEVICE);
+
+ kfree(buf->tbl_buf);
+ memset(buf, 0, sizeof(*buf));
+}
+
+int ionic_pgtbl_init(struct ionic_ibdev *dev,
+ struct ionic_tbl_buf *buf,
+ struct ib_umem *umem,
+ dma_addr_t dma,
+ int limit,
+ u64 page_size)
+{
+ int rc;
+
+ memset(buf, 0, sizeof(*buf));
+
+ if (umem) {
+ limit = ib_umem_num_dma_blocks(umem, page_size);
+ buf->page_size_log2 = order_base_2(page_size);
+ }
+
+ if (limit < 1)
+ return -EINVAL;
+
+ buf->tbl_limit = limit;
+
+ /* skip pgtbl if contiguous / direct translation */
+ if (limit > 1) {
+ rc = ionic_tbl_buf_alloc(dev, buf);
+ if (rc)
+ return rc;
+ }
+
+ if (umem)
+ rc = ionic_pgtbl_umem(buf, umem);
+ else
+ rc = ionic_pgtbl_page(buf, dma);
+
+ if (rc)
+ goto err_unbuf;
+
+ return 0;
+
+err_unbuf:
+ ionic_pgtbl_unbuf(dev, buf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_queue.c b/drivers/infiniband/hw/ionic/ionic_queue.c
new file mode 100644
index 000000000000..aa897ed2a412
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_queue.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#include <linux/dma-mapping.h>
+
+#include "ionic_queue.h"
+
+int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
+ int depth, size_t stride)
+{
+ if (depth < 0 || depth > 0xffff)
+ return -EINVAL;
+
+ if (stride == 0 || stride > 0x10000)
+ return -EINVAL;
+
+ if (depth == 0)
+ depth = 1;
+
+ q->depth_log2 = order_base_2(depth + 1);
+ q->stride_log2 = order_base_2(stride);
+
+ if (q->depth_log2 + q->stride_log2 < PAGE_SHIFT)
+ q->depth_log2 = PAGE_SHIFT - q->stride_log2;
+
+ if (q->depth_log2 > 16 || q->stride_log2 > 16)
+ return -EINVAL;
+
+ q->size = BIT_ULL(q->depth_log2 + q->stride_log2);
+ q->mask = BIT(q->depth_log2) - 1;
+
+ q->ptr = dma_alloc_coherent(dma_dev, q->size, &q->dma, GFP_KERNEL);
+ if (!q->ptr)
+ return -ENOMEM;
+
+ /* it will always be page aligned, but just to be sure... */
+ if (!PAGE_ALIGNED(q->ptr)) {
+ dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
+ return -ENOMEM;
+ }
+
+ q->prod = 0;
+ q->cons = 0;
+ q->dbell = 0;
+
+ return 0;
+}
+
+void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev)
+{
+ dma_free_coherent(dma_dev, q->size, q->ptr, q->dma);
+}
diff --git a/drivers/infiniband/hw/ionic/ionic_queue.h b/drivers/infiniband/hw/ionic/ionic_queue.h
new file mode 100644
index 000000000000..d18020d4cad5
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_queue.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_QUEUE_H_
+#define _IONIC_QUEUE_H_
+
+#include <linux/io.h>
+#include <ionic_regs.h>
+
+#define IONIC_MAX_DEPTH 0xffff
+#define IONIC_MAX_CQ_DEPTH 0xffff
+#define IONIC_CQ_RING_ARM IONIC_DBELL_RING_1
+#define IONIC_CQ_RING_SOL IONIC_DBELL_RING_2
+
+/**
+ * struct ionic_queue - Ring buffer used between device and driver
+ * @size: Size of the buffer, in bytes
+ * @dma: Dma address of the buffer
+ * @ptr: Buffer virtual address
+ * @prod: Driver position in the queue
+ * @cons: Device position in the queue
+ * @mask: Capacity of the queue, subtracting the hole
+ * This value is equal to ((1 << depth_log2) - 1)
+ * @depth_log2: Log base two size depth of the queue
+ * @stride_log2: Log base two size of an element in the queue
+ * @dbell: Doorbell identifying bits
+ */
+struct ionic_queue {
+ size_t size;
+ dma_addr_t dma;
+ void *ptr;
+ u16 prod;
+ u16 cons;
+ u16 mask;
+ u8 depth_log2;
+ u8 stride_log2;
+ u64 dbell;
+};
+
+/**
+ * ionic_queue_init() - Initialize user space queue
+ * @q: Uninitialized queue structure
+ * @dma_dev: DMA device for mapping
+ * @depth: Depth of the queue
+ * @stride: Size of each element of the queue
+ *
+ * Return: status code
+ */
+int ionic_queue_init(struct ionic_queue *q, struct device *dma_dev,
+ int depth, size_t stride);
+
+/**
+ * ionic_queue_destroy() - Destroy user space queue
+ * @q: Queue structure
+ * @dma_dev: DMA device for mapping
+ *
+ * Return: status code
+ */
+void ionic_queue_destroy(struct ionic_queue *q, struct device *dma_dev);
+
+/**
+ * ionic_queue_empty() - Test if queue is empty
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: is empty
+ */
+static inline bool ionic_queue_empty(struct ionic_queue *q)
+{
+ return q->prod == q->cons;
+}
+
+/**
+ * ionic_queue_length() - Get the current length of the queue
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: length
+ */
+static inline u16 ionic_queue_length(struct ionic_queue *q)
+{
+ return (q->prod - q->cons) & q->mask;
+}
+
+/**
+ * ionic_queue_length_remaining() - Get the remaining length of the queue
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: length remaining
+ */
+static inline u16 ionic_queue_length_remaining(struct ionic_queue *q)
+{
+ return q->mask - ionic_queue_length(q);
+}
+
+/**
+ * ionic_queue_full() - Test if queue is full
+ * @q: Queue structure
+ *
+ * This is only valid for to-device queues.
+ *
+ * Return: is full
+ */
+static inline bool ionic_queue_full(struct ionic_queue *q)
+{
+ return q->mask == ionic_queue_length(q);
+}
+
+/**
+ * ionic_color_wrap() - Flip the color if prod is wrapped
+ * @prod: Queue index just after advancing
+ * @color: Queue color just prior to advancing the index
+ *
+ * Return: color after advancing the index
+ */
+static inline bool ionic_color_wrap(u16 prod, bool color)
+{
+ /* logical xor color with (prod == 0) */
+ return color != (prod == 0);
+}
+
+/**
+ * ionic_queue_at() - Get the element at the given index
+ * @q: Queue structure
+ * @idx: Index in the queue
+ *
+ * The index must be within the bounds of the queue. It is not checked here.
+ *
+ * Return: pointer to element at index
+ */
+static inline void *ionic_queue_at(struct ionic_queue *q, u16 idx)
+{
+ return q->ptr + ((unsigned long)idx << q->stride_log2);
+}
+
+/**
+ * ionic_queue_at_prod() - Get the element at the producer index
+ * @q: Queue structure
+ *
+ * Return: pointer to element at producer index
+ */
+static inline void *ionic_queue_at_prod(struct ionic_queue *q)
+{
+ return ionic_queue_at(q, q->prod);
+}
+
+/**
+ * ionic_queue_at_cons() - Get the element at the consumer index
+ * @q: Queue structure
+ *
+ * Return: pointer to element at consumer index
+ */
+static inline void *ionic_queue_at_cons(struct ionic_queue *q)
+{
+ return ionic_queue_at(q, q->cons);
+}
+
+/**
+ * ionic_queue_next() - Compute the next index
+ * @q: Queue structure
+ * @idx: Index
+ *
+ * Return: next index after idx
+ */
+static inline u16 ionic_queue_next(struct ionic_queue *q, u16 idx)
+{
+ return (idx + 1) & q->mask;
+}
+
+/**
+ * ionic_queue_produce() - Increase the producer index
+ * @q: Queue structure
+ *
+ * Caller must ensure that the queue is not full. It is not checked here.
+ */
+static inline void ionic_queue_produce(struct ionic_queue *q)
+{
+ q->prod = ionic_queue_next(q, q->prod);
+}
+
+/**
+ * ionic_queue_consume() - Increase the consumer index
+ * @q: Queue structure
+ *
+ * Caller must ensure that the queue is not empty. It is not checked here.
+ *
+ * This is only valid for to-device queues.
+ */
+static inline void ionic_queue_consume(struct ionic_queue *q)
+{
+ q->cons = ionic_queue_next(q, q->cons);
+}
+
+/**
+ * ionic_queue_consume_entries() - Increase the consumer index by entries
+ * @q: Queue structure
+ * @entries: Number of entries to increment
+ *
+ * Caller must ensure that the queue is not empty. It is not checked here.
+ *
+ * This is only valid for to-device queues.
+ */
+static inline void ionic_queue_consume_entries(struct ionic_queue *q,
+ u16 entries)
+{
+ q->cons = (q->cons + entries) & q->mask;
+}
+
+/**
+ * ionic_queue_dbell_init() - Initialize doorbell bits for queue id
+ * @q: Queue structure
+ * @qid: Queue identifying number
+ */
+static inline void ionic_queue_dbell_init(struct ionic_queue *q, u32 qid)
+{
+ q->dbell = IONIC_DBELL_QID(qid);
+}
+
+/**
+ * ionic_queue_dbell_val() - Get current doorbell update value
+ * @q: Queue structure
+ *
+ * Return: current doorbell update value
+ */
+static inline u64 ionic_queue_dbell_val(struct ionic_queue *q)
+{
+ return q->dbell | q->prod;
+}
+
+#endif /* _IONIC_QUEUE_H_ */
diff --git a/drivers/infiniband/hw/ionic/ionic_res.h b/drivers/infiniband/hw/ionic/ionic_res.h
new file mode 100644
index 000000000000..46c8c584bd9a
--- /dev/null
+++ b/drivers/infiniband/hw/ionic/ionic_res.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2025, Advanced Micro Devices, Inc. */
+
+#ifndef _IONIC_RES_H_
+#define _IONIC_RES_H_
+
+#include <linux/kernel.h>
+#include <linux/idr.h>
+
+/**
+ * struct ionic_resid_bits - Number allocator based on IDA
+ *
+ * @inuse: IDA handle
+ * @inuse_size: Highest ID limit for IDA
+ */
+struct ionic_resid_bits {
+ struct ida inuse;
+ unsigned int inuse_size;
+};
+
+/**
+ * ionic_resid_init() - Initialize a resid allocator
+ * @resid: Uninitialized resid allocator
+ * @size: Capacity of the allocator
+ *
+ * Return: Zero on success, or negative error number
+ */
+static inline void ionic_resid_init(struct ionic_resid_bits *resid,
+ unsigned int size)
+{
+ resid->inuse_size = size;
+ ida_init(&resid->inuse);
+}
+
+/**
+ * ionic_resid_destroy() - Destroy a resid allocator
+ * @resid: Resid allocator
+ */
+static inline void ionic_resid_destroy(struct ionic_resid_bits *resid)
+{
+ ida_destroy(&resid->inuse);
+}
+
+/**
+ * ionic_resid_get_shared() - Allocate an available shared resource id
+ * @resid: Resid allocator
+ * @min: Smallest valid resource id
+ * @size: One after largest valid resource id
+ *
+ * Return: Resource id, or negative error number
+ */
+static inline int ionic_resid_get_shared(struct ionic_resid_bits *resid,
+ unsigned int min,
+ unsigned int size)
+{
+ return ida_alloc_range(&resid->inuse, min, size - 1, GFP_KERNEL);
+}
+
+/**
+ * ionic_resid_get() - Allocate an available resource id
+ * @resid: Resid allocator
+ *
+ * Return: Resource id, or negative error number
+ */
+static inline int ionic_resid_get(struct ionic_resid_bits *resid)
+{
+ return ionic_resid_get_shared(resid, 0, resid->inuse_size);
+}
+
+/**
+ * ionic_resid_put() - Free a resource id
+ * @resid: Resid allocator
+ * @id: Resource id
+ */
+static inline void ionic_resid_put(struct ionic_resid_bits *resid, int id)
+{
+ ida_free(&resid->inuse, id);
+}
+
+/**
+ * ionic_bitid_to_qid() - Transform a resource bit index into a queue id
+ * @bitid: Bit index
+ * @qgrp_shift: Log2 number of queues per queue group
+ * @half_qid_shift: Log2 of half the total number of queues
+ *
+ * Return: Queue id
+ *
+ * Udma-constrained queues (QPs and CQs) are associated with their udma by
+ * queue group. Even queue groups are associated with udma0, and odd queue
+ * groups with udma1.
+ *
+ * For allocating queue ids, we want to arrange the bits into two halves,
+ * with the even queue groups of udma0 in the lower half of the bitset,
+ * and the odd queue groups of udma1 in the upper half of the bitset.
+ * Then, one or two calls of find_next_zero_bit can examine all the bits
+ * for queues of an entire udma.
+ *
+ * For example, assuming eight queue groups with qgrp qids per group:
+ *
+ * bitid 0*qgrp..1*qgrp-1 : qid 0*qgrp..1*qgrp-1
+ * bitid 1*qgrp..2*qgrp-1 : qid 2*qgrp..3*qgrp-1
+ * bitid 2*qgrp..3*qgrp-1 : qid 4*qgrp..5*qgrp-1
+ * bitid 3*qgrp..4*qgrp-1 : qid 6*qgrp..7*qgrp-1
+ * bitid 4*qgrp..5*qgrp-1 : qid 1*qgrp..2*qgrp-1
+ * bitid 5*qgrp..6*qgrp-1 : qid 3*qgrp..4*qgrp-1
+ * bitid 6*qgrp..7*qgrp-1 : qid 5*qgrp..6*qgrp-1
+ * bitid 7*qgrp..8*qgrp-1 : qid 7*qgrp..8*qgrp-1
+ *
+ * There are three important ranges of bits in the qid. There is the udma
+ * bit "U" at qgrp_shift, which is the least significant bit of the group
+ * index, and determines which udma a queue is associated with.
+ * The bits of lesser significance we can call the idx bits "I", which are
+ * the index of the queue within the group. The bits of greater significance
+ * we can call the grp bits "G", which are other bits of the group index that
+ * do not determine the udma. Those bits are just rearranged in the bit index
+ * in the bitset. A bitid has the udma bit in the most significant place,
+ * then the grp bits, then the idx bits.
+ *
+ * bitid: 00000000000000 U GGG IIIIII
+ * qid: 00000000000000 GGG U IIIIII
+ *
+ * Transforming from bit index to qid, or from qid to bit index, can be
+ * accomplished by rearranging the bits by masking and shifting.
+ */
+static inline u32 ionic_bitid_to_qid(u32 bitid, u8 qgrp_shift,
+ u8 half_qid_shift)
+{
+ u32 udma_bit =
+ (bitid & BIT(half_qid_shift)) >> (half_qid_shift - qgrp_shift);
+ u32 grp_bits = (bitid & GENMASK(half_qid_shift - 1, qgrp_shift)) << 1;
+ u32 idx_bits = bitid & (BIT(qgrp_shift) - 1);
+
+ return grp_bits | udma_bit | idx_bits;
+}
+
+/**
+ * ionic_qid_to_bitid() - Transform a queue id into a resource bit index
+ * @qid: queue index
+ * @qgrp_shift: Log2 number of queues per queue group
+ * @half_qid_shift: Log2 of half the total number of queues
+ *
+ * Return: Resource bit index
+ *
+ * This is the inverse of ionic_bitid_to_qid().
+ */
+static inline u32 ionic_qid_to_bitid(u32 qid, u8 qgrp_shift, u8 half_qid_shift)
+{
+ u32 udma_bit = (qid & BIT(qgrp_shift)) << (half_qid_shift - qgrp_shift);
+ u32 grp_bits = (qid & GENMASK(half_qid_shift, qgrp_shift + 1)) >> 1;
+ u32 idx_bits = qid & (BIT(qgrp_shift) - 1);
+
+ return udma_bit | grp_bits | idx_bits;
+}
+#endif /* _IONIC_RES_H_ */
diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig
index 5f49a58590ed..0bd7e3fca1fb 100644
--- a/drivers/infiniband/hw/irdma/Kconfig
+++ b/drivers/infiniband/hw/irdma/Kconfig
@@ -4,10 +4,11 @@ config INFINIBAND_IRDMA
depends on INET
depends on IPV6 || !IPV6
depends on PCI
- depends on ICE && I40E
+ depends on IDPF && ICE && I40E
select GENERIC_ALLOCATOR
select AUXILIARY_BUS
select CRC32
help
- This is an Intel(R) Ethernet Protocol Driver for RDMA driver
- that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
+ This is an Intel(R) Ethernet Protocol Driver for RDMA that
+ supports IPU E2000 (RoCEv2), E810 (iWARP/RoCEv2) and X722 (iWARP)
+ network devices.
diff --git a/drivers/infiniband/hw/irdma/Makefile b/drivers/infiniband/hw/irdma/Makefile
index 48c3854235a0..03ceb9e5475f 100644
--- a/drivers/infiniband/hw/irdma/Makefile
+++ b/drivers/infiniband/hw/irdma/Makefile
@@ -13,7 +13,10 @@ irdma-objs := cm.o \
hw.o \
i40iw_hw.o \
i40iw_if.o \
+ ig3rdma_if.o\
+ icrdma_if.o \
icrdma_hw.o \
+ ig3rdma_hw.o\
main.o \
pble.o \
puda.o \
@@ -22,6 +25,7 @@ irdma-objs := cm.o \
uk.o \
utils.o \
verbs.o \
+ virtchnl.o \
ws.o \
CFLAGS_trace.o = -I$(src)
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index c6a0a661d6e7..f4f4f92ba63a 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -3710,7 +3710,7 @@ int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
iwpd = iwqp->iwpd;
tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len,
- IB_ACCESS_LOCAL_WRITE, &tagged_offset);
+ IB_ACCESS_LOCAL_WRITE, &tagged_offset, false);
if (IS_ERR(ibmr)) {
ret = -ENOMEM;
goto error;
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index 99a7f1a6c0b5..ce5cf89c463c 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -74,6 +74,14 @@ static void irdma_set_qos_info(struct irdma_sc_vsi *vsi,
{
u8 i;
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ vsi->qos[i].qs_handle = vsi->dev->qos[i].qs_handle;
+ vsi->qos[i].valid = true;
+ }
+
+ return;
+ }
vsi->qos_rel_bw = l2p->vsi_rel_bw;
vsi->qos_prio_type = l2p->vsi_prio_type;
vsi->dscp_mode = l2p->dscp_mode;
@@ -404,7 +412,8 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) ||
- (info->virtual_map && info->rq_pa >= pble_obj_cnt))
+ (!info->qp_uk_init_info.srq_uk &&
+ info->virtual_map && info->rq_pa >= pble_obj_cnt))
return -EINVAL;
qp->llp_stream_handle = (void *)(-1);
@@ -439,6 +448,208 @@ int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info)
}
/**
+ * irdma_sc_srq_init - init sc_srq structure
+ * @srq: srq sc struct
+ * @info: parameters for srq init
+ */
+int irdma_sc_srq_init(struct irdma_sc_srq *srq,
+ struct irdma_srq_init_info *info)
+{
+ u32 srq_size_quanta;
+ int ret_code;
+
+ ret_code = irdma_uk_srq_init(&srq->srq_uk, &info->srq_uk_init_info);
+ if (ret_code)
+ return ret_code;
+
+ srq->dev = info->pd->dev;
+ srq->pd = info->pd;
+ srq->vsi = info->vsi;
+ srq->srq_pa = info->srq_pa;
+ srq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+ srq->pasid = info->pasid;
+ srq->pasid_valid = info->pasid_valid;
+ srq->srq_limit = info->srq_limit;
+ srq->leaf_pbl_size = info->leaf_pbl_size;
+ srq->virtual_map = info->virtual_map;
+ srq->tph_en = info->tph_en;
+ srq->arm_limit_event = info->arm_limit_event;
+ srq->tph_val = info->tph_value;
+ srq->shadow_area_pa = info->shadow_area_pa;
+
+ /* Smallest SRQ size is 256B i.e. 8 quanta */
+ srq_size_quanta = max((u32)IRDMA_SRQ_MIN_QUANTA,
+ srq->srq_uk.srq_size *
+ srq->srq_uk.wqe_size_multiplier);
+ srq->hw_srq_size = irdma_get_encoded_wqe_size(srq_size_quanta,
+ IRDMA_QUEUE_TYPE_SRQ);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_create - send srq create CQP WQE
+ * @srq: srq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_create(struct irdma_sc_srq *srq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->pd->dev->cqp;
+ if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
+ srq->srq_uk.srq_id >
+ (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, srq->srq_limit) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
+ set_64bit_val(wqe, 8, (uintptr_t)srq);
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
+ srq->srq_pa >>
+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
+ srq->shadow_area_pa >>
+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
+ srq->first_pm_pbl_idx));
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
+ srq->arm_limit_event) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_CREATE WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_modify - send modify_srq CQP WQE
+ * @srq: srq sc struct
+ * @info: parameters for srq modification
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_modify(struct irdma_sc_srq *srq,
+ struct irdma_modify_srq_info *info, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->dev->cqp;
+ if (srq->srq_uk.srq_id < cqp->dev->hw_attrs.min_hw_srq_id ||
+ srq->srq_uk.srq_id >
+ (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].max_cnt - 1))
+ return -EINVAL;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQ_LIMIT, info->srq_limit) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQSIZE, srq->hw_srq_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE, srq->srq_uk.wqe_size));
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_SRQCTX, srq->srq_uk.srq_id));
+ set_64bit_val(wqe, 16,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PD_ID, srq->pd->pd_id));
+ set_64bit_val(wqe, 32,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR,
+ srq->srq_pa >>
+ IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S));
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR,
+ srq->shadow_area_pa >>
+ IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX,
+ srq->first_pm_pbl_idx));
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_MODIFY_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE, srq->leaf_pbl_size) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_VIRTMAP, srq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT,
+ info->arm_limit_event) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_MODIFY WQE", DUMP_PREFIX_OFFSET, 16, 8,
+ wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
+ * irdma_sc_srq_destroy - send srq_destroy CQP WQE
+ * @srq: srq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static int irdma_sc_srq_destroy(struct irdma_sc_srq *srq, u64 scratch,
+ bool post_sq)
+{
+ struct irdma_sc_cqp *cqp;
+ __le64 *wqe;
+ u64 hdr;
+
+ cqp = srq->dev->cqp;
+
+ wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 8, (uintptr_t)srq);
+
+ hdr = srq->srq_uk.srq_id |
+ FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_SRQ) |
+ FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ dma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ print_hex_dump_debug("WQE: SRQ_DESTROY WQE", DUMP_PREFIX_OFFSET, 16,
+ 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ if (post_sq)
+ irdma_sc_cqp_post_sq(cqp);
+
+ return 0;
+}
+
+/**
* irdma_sc_qp_create - create qp
* @qp: sc qp
* @info: qp create info
@@ -629,13 +840,14 @@ static u8 irdma_sc_get_encoded_ird_size(u16 ird_size)
}
/**
- * irdma_sc_qp_setctx_roce - set qp's context
+ * irdma_sc_qp_setctx_roce_gen_2 - set qp's context
* @qp: sc qp
* @qp_ctx: context ptr
* @info: ctx info
*/
-void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
- struct irdma_qp_host_ctx_info *info)
+static void irdma_sc_qp_setctx_roce_gen_2(struct irdma_sc_qp *qp,
+ __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
{
struct irdma_roce_offload_info *roce_info;
struct irdma_udp_offload_info *udp;
@@ -753,6 +965,189 @@ void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
}
+/**
+ * irdma_sc_get_encoded_ird_size_gen_3 - get encoded IRD size for GEN 3
+ * @ird_size: IRD size
+ * The ird from the connection is rounded to a supported HW setting and then encoded
+ * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based
+ * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input.
+ */
+static u8 irdma_sc_get_encoded_ird_size_gen_3(u16 ird_size)
+{
+ switch (ird_size ?
+ roundup_pow_of_two(2 * ird_size) : 4) {
+ case 4096:
+ return IRDMA_IRD_HW_SIZE_4096_GEN3;
+ case 2048:
+ return IRDMA_IRD_HW_SIZE_2048_GEN3;
+ case 1024:
+ return IRDMA_IRD_HW_SIZE_1024_GEN3;
+ case 512:
+ return IRDMA_IRD_HW_SIZE_512_GEN3;
+ case 256:
+ return IRDMA_IRD_HW_SIZE_256_GEN3;
+ case 128:
+ return IRDMA_IRD_HW_SIZE_128_GEN3;
+ case 64:
+ return IRDMA_IRD_HW_SIZE_64_GEN3;
+ case 32:
+ return IRDMA_IRD_HW_SIZE_32_GEN3;
+ case 16:
+ return IRDMA_IRD_HW_SIZE_16_GEN3;
+ case 8:
+ return IRDMA_IRD_HW_SIZE_8_GEN3;
+ case 4:
+ default:
+ break;
+ }
+
+ return IRDMA_IRD_HW_SIZE_4_GEN3;
+}
+
+/**
+ * irdma_sc_qp_setctx_roce_gen_3 - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+static void irdma_sc_qp_setctx_roce_gen_3(struct irdma_sc_qp *qp,
+ __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ struct irdma_roce_offload_info *roce_info = info->roce_info;
+ struct irdma_udp_offload_info *udp = info->udp_info;
+ u64 qw0, qw3, qw7 = 0, qw8 = 0;
+ u8 push_mode_en;
+ u32 push_idx;
+
+ qp->user_pri = info->user_pri;
+ if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) {
+ push_mode_en = 0;
+ push_idx = 0;
+ } else {
+ push_mode_en = 1;
+ push_idx = qp->push_idx;
+ }
+
+ qw0 = FIELD_PREP(IRDMAQPC_RQWQESIZE, qp->qp_uk.rq_wqe_size) |
+ FIELD_PREP(IRDMAQPC_RCVTPHEN, qp->rcv_tph_en) |
+ FIELD_PREP(IRDMAQPC_XMITTPHEN, qp->xmit_tph_en) |
+ FIELD_PREP(IRDMAQPC_RQTPHEN, qp->rq_tph_en) |
+ FIELD_PREP(IRDMAQPC_SQTPHEN, qp->sq_tph_en) |
+ FIELD_PREP(IRDMAQPC_PPIDX, push_idx) |
+ FIELD_PREP(IRDMAQPC_PMENA, push_mode_en) |
+ FIELD_PREP(IRDMAQPC_DC_TCP_EN, roce_info->dctcp_en) |
+ FIELD_PREP(IRDMAQPC_ISQP1, roce_info->is_qp1) |
+ FIELD_PREP(IRDMAQPC_ROCE_TVER, roce_info->roce_tver) |
+ FIELD_PREP(IRDMAQPC_IPV4, udp->ipv4) |
+ FIELD_PREP(IRDMAQPC_USE_SRQ, !qp->qp_uk.srq_uk ? 0 : 1) |
+ FIELD_PREP(IRDMAQPC_INSERTVLANTAG, udp->insert_vlan_tag);
+ set_64bit_val(qp_ctx, 0, qw0);
+ set_64bit_val(qp_ctx, 8, qp->sq_pa);
+ set_64bit_val(qp_ctx, 16, qp->rq_pa);
+ qw3 = FIELD_PREP(IRDMAQPC_RQSIZE, qp->hw_rq_size) |
+ FIELD_PREP(IRDMAQPC_SQSIZE, qp->hw_sq_size) |
+ FIELD_PREP(IRDMAQPC_TTL, udp->ttl) |
+ FIELD_PREP(IRDMAQPC_TOS, udp->tos) |
+ FIELD_PREP(IRDMAQPC_SRCPORTNUM, udp->src_port) |
+ FIELD_PREP(IRDMAQPC_DESTPORTNUM, udp->dst_port);
+ set_64bit_val(qp_ctx, 24, qw3);
+ set_64bit_val(qp_ctx, 32,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR2, udp->dest_ip_addr[2]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR3, udp->dest_ip_addr[3]));
+ set_64bit_val(qp_ctx, 40,
+ FIELD_PREP(IRDMAQPC_DESTIPADDR0, udp->dest_ip_addr[0]) |
+ FIELD_PREP(IRDMAQPC_DESTIPADDR1, udp->dest_ip_addr[1]));
+ set_64bit_val(qp_ctx, 48,
+ FIELD_PREP(IRDMAQPC_SNDMSS, udp->snd_mss) |
+ FIELD_PREP(IRDMAQPC_VLANTAG, udp->vlan_tag) |
+ FIELD_PREP(IRDMAQPC_ARPIDX, udp->arp_idx));
+ qw7 = FIELD_PREP(IRDMAQPC_PKEY, roce_info->p_key) |
+ FIELD_PREP(IRDMAQPC_ACKCREDITS, roce_info->ack_credits) |
+ FIELD_PREP(IRDMAQPC_FLOWLABEL, udp->flow_label);
+ set_64bit_val(qp_ctx, 56, qw7);
+ qw8 = FIELD_PREP(IRDMAQPC_QKEY, roce_info->qkey) |
+ FIELD_PREP(IRDMAQPC_DESTQP, roce_info->dest_qp);
+ set_64bit_val(qp_ctx, 64, qw8);
+ set_64bit_val(qp_ctx, 80,
+ FIELD_PREP(IRDMAQPC_PSNNXT, udp->psn_nxt) |
+ FIELD_PREP(IRDMAQPC_LSN, udp->lsn));
+ set_64bit_val(qp_ctx, 88,
+ FIELD_PREP(IRDMAQPC_EPSN, udp->epsn));
+ set_64bit_val(qp_ctx, 96,
+ FIELD_PREP(IRDMAQPC_PSNMAX, udp->psn_max) |
+ FIELD_PREP(IRDMAQPC_PSNUNA, udp->psn_una));
+ set_64bit_val(qp_ctx, 112,
+ FIELD_PREP(IRDMAQPC_CWNDROCE, udp->cwnd));
+ set_64bit_val(qp_ctx, 128,
+ FIELD_PREP(IRDMAQPC_MINRNR_TIMER, udp->min_rnr_timer) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_THRESH, udp->rnr_nak_thresh) |
+ FIELD_PREP(IRDMAQPC_REXMIT_THRESH, udp->rexmit_thresh) |
+ FIELD_PREP(IRDMAQPC_RNRNAK_TMR, udp->rnr_nak_tmr) |
+ FIELD_PREP(IRDMAQPC_RTOMIN, roce_info->rtomin));
+ set_64bit_val(qp_ctx, 136,
+ FIELD_PREP(IRDMAQPC_TXCQNUM, info->send_cq_num) |
+ FIELD_PREP(IRDMAQPC_RXCQNUM, info->rcv_cq_num));
+ set_64bit_val(qp_ctx, 152,
+ FIELD_PREP(IRDMAQPC_MACADDRESS,
+ ether_addr_to_u64(roce_info->mac_addr)) |
+ FIELD_PREP(IRDMAQPC_LOCALACKTIMEOUT,
+ roce_info->local_ack_timeout));
+ set_64bit_val(qp_ctx, 160,
+ FIELD_PREP(IRDMAQPC_ORDSIZE_GEN3, roce_info->ord_size) |
+ FIELD_PREP(IRDMAQPC_IRDSIZE_GEN3,
+ irdma_sc_get_encoded_ird_size_gen_3(roce_info->ird_size)) |
+ FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
+ FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
+ FIELD_PREP(IRDMAQPC_USESTATSINSTANCE,
+ info->stats_idx_valid) |
+ FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
+ FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
+ FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
+ FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
+ FIELD_PREP(IRDMAQPC_FW_CC_ENABLE,
+ roce_info->fw_cc_enable) |
+ FIELD_PREP(IRDMAQPC_UDPRIVCQENABLE,
+ roce_info->udprivcq_en) |
+ FIELD_PREP(IRDMAQPC_PRIVEN, roce_info->priv_mode_en) |
+ FIELD_PREP(IRDMAQPC_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
+ FIELD_PREP(IRDMAQPC_TIMELYENABLE, roce_info->timely_en));
+ set_64bit_val(qp_ctx, 168,
+ FIELD_PREP(IRDMAQPC_QPCOMPCTX, info->qp_compl_ctx));
+ set_64bit_val(qp_ctx, 176,
+ FIELD_PREP(IRDMAQPC_SQTPHVAL, qp->sq_tph_val) |
+ FIELD_PREP(IRDMAQPC_RQTPHVAL, qp->rq_tph_val) |
+ FIELD_PREP(IRDMAQPC_QSHANDLE, qp->qs_handle));
+ set_64bit_val(qp_ctx, 184,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR3, udp->local_ipaddr[3]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR2, udp->local_ipaddr[2]));
+ set_64bit_val(qp_ctx, 192,
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR1, udp->local_ipaddr[1]) |
+ FIELD_PREP(IRDMAQPC_LOCAL_IPADDR0, udp->local_ipaddr[0]));
+ set_64bit_val(qp_ctx, 200,
+ FIELD_PREP(IRDMAQPC_THIGH, roce_info->t_high) |
+ FIELD_PREP(IRDMAQPC_SRQ_ID,
+ !qp->qp_uk.srq_uk ?
+ 0 : qp->qp_uk.srq_uk->srq_id) |
+ FIELD_PREP(IRDMAQPC_TLOW, roce_info->t_low));
+ set_64bit_val(qp_ctx, 208, roce_info->pd_id |
+ FIELD_PREP(IRDMAQPC_STAT_INDEX_GEN3, info->stats_idx) |
+ FIELD_PREP(IRDMAQPC_PKT_LIMIT, qp->pkt_limit));
+
+ print_hex_dump_debug("WQE: QP_HOST ROCE CTX WQE", DUMP_PREFIX_OFFSET,
+ 16, 8, qp_ctx, IRDMA_QP_CTX_SIZE, false);
+}
+
+void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
+ struct irdma_qp_host_ctx_info *info)
+{
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2)
+ irdma_sc_qp_setctx_roce_gen_2(qp, qp_ctx, info);
+ else
+ irdma_sc_qp_setctx_roce_gen_3(qp, qp_ctx, info);
+}
+
/* irdma_sc_alloc_local_mac_entry - allocate a mac entry
* @cqp: struct for cqp hw
* @scratch: u64 saved to be used during cqp completion
@@ -1080,7 +1475,8 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) |
FIELD_PREP(IRDMA_CQPSQ_STAG_STAGLEN, info->total_len));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
set_64bit_val(wqe, 40,
FIELD_PREP(IRDMA_CQPSQ_STAG_HMCFNIDX, info->hmc_fcn_index));
@@ -1096,6 +1492,8 @@ static int irdma_sc_alloc_stag(struct irdma_sc_dev *dev,
FIELD_PREP(IRDMA_CQPSQ_STAG_REMACCENABLED, info->remote_access) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1165,6 +1563,7 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
FIELD_PREP(IRDMA_CQPSQ_STAG_KEY, info->stag_key) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18) |
FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
if (!info->chunk_size) {
set_64bit_val(wqe, 32, info->reg_addr_pa);
@@ -1187,6 +1586,8 @@ static int irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev,
FIELD_PREP(IRDMA_CQPSQ_STAG_VABASEDTO, addr_type) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEHMCFNIDX, info->use_hmc_fcn_index) |
FIELD_PREP(IRDMA_CQPSQ_STAG_USEPFRID, info->use_pf_rid) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN,
+ info->remote_atomics_en) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1223,7 +1624,8 @@ static int irdma_sc_dealloc_stag(struct irdma_sc_dev *dev,
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->stag_idx) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DEALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MR, info->mr) |
@@ -1263,7 +1665,8 @@ static int irdma_sc_mw_alloc(struct irdma_sc_dev *dev,
set_64bit_val(wqe, 8,
FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID));
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index));
+ FIELD_PREP(IRDMA_CQPSQ_STAG_IDX, info->mw_stag_index) |
+ FIELD_PREP(IRDMA_CQPSQ_STAG_PDID_HI, info->pd_id >> 18));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_ALLOC_STAG) |
FIELD_PREP(IRDMA_CQPSQ_STAG_MWTYPE, info->mw_wide) |
@@ -1343,6 +1746,7 @@ int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_ATOMICS_EN, info->remote_atomics_en) |
FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -1873,7 +2277,7 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
mutex_init(&vsi->qos[i].qos_mutex);
INIT_LIST_HEAD(&vsi->qos[i].qplist);
}
- if (vsi->register_qset) {
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) {
vsi->dev->ws_add = irdma_ws_add;
vsi->dev->ws_remove = irdma_ws_remove;
vsi->dev->ws_reset = irdma_ws_reset;
@@ -1888,7 +2292,7 @@ void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
* irdma_get_stats_idx - Return stats index
* @vsi: pointer to the vsi
*/
-static u8 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
+static u16 irdma_get_stats_idx(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {};
struct irdma_sc_dev *dev = vsi->dev;
@@ -1964,12 +2368,13 @@ int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
(void *)((uintptr_t)stats_buff_mem->va +
IRDMA_GATHER_STATS_BUF_SIZE);
- irdma_hw_stats_start_timer(vsi);
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ irdma_hw_stats_start_timer(vsi);
/* when stat allocation is not required default to fcn_id. */
vsi->stats_idx = info->fcn_id;
if (info->alloc_stats_inst) {
- u8 stats_idx = irdma_get_stats_idx(vsi);
+ u16 stats_idx = irdma_get_stats_idx(vsi);
if (stats_idx != IRDMA_INVALID_STATS_IDX) {
vsi->stats_inst_alloc = true;
@@ -1993,7 +2398,7 @@ void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
{
struct irdma_stats_inst_info stats_info = {};
struct irdma_sc_dev *dev = vsi->dev;
- u8 stats_idx = vsi->stats_idx;
+ u16 stats_idx = vsi->stats_idx;
if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
if (vsi->stats_inst_alloc) {
@@ -2009,7 +2414,9 @@ void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi)
if (!vsi->pestat)
return;
- irdma_hw_stats_stop_timer(vsi);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ irdma_hw_stats_stop_timer(vsi);
dma_free_coherent(vsi->pestat->hw->device,
vsi->pestat->gather_info.stats_buff_mem.size,
vsi->pestat->gather_info.stats_buff_mem.va,
@@ -2026,6 +2433,14 @@ u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type)
{
u8 encoded_size = 0;
+ if (queue_type == IRDMA_QUEUE_TYPE_SRQ) {
+ /* Smallest SRQ size is 256B (8 quanta) that gets
+ * encoded to 0.
+ */
+ encoded_size = ilog2(wqsize) - 3;
+
+ return encoded_size;
+ }
/* cqp sq's hw coded value starts from 1 for size of 4
* while it starts from 0 for qp' wq's.
*/
@@ -2259,6 +2674,12 @@ int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
info->ae_code | FIELD_PREP(IRDMA_CQPSQ_FWQE_AESOURCE,
info->ae_src) : 0;
set_64bit_val(wqe, 8, temp);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ set_64bit_val(wqe, 40,
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_SQ_IDX, info->err_sq_idx));
+ set_64bit_val(wqe, 48,
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_RQ_IDX, info->err_rq_idx));
+ }
hdr = qp->qp_uk.qp_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_FLUSH_WQES) |
@@ -2267,6 +2688,9 @@ int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHSQ, flush_sq) |
FIELD_PREP(IRDMA_CQPSQ_FWQE_FLUSHRQ, flush_rq) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ hdr |= FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID, info->err_sq_idx_valid) |
+ FIELD_PREP(IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID, info->err_rq_idx_valid);
dma_wmb(); /* make sure WQE is written before valid bit is set */
set_64bit_val(wqe, 24, hdr);
@@ -2519,8 +2943,6 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
__le64 *wqe;
struct irdma_sc_cqp *cqp;
u64 hdr;
- struct irdma_sc_ceq *ceq;
- int ret_code = 0;
cqp = cq->dev->cqp;
if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt)
@@ -2529,19 +2951,9 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs)
return -EINVAL;
- ceq = cq->dev->ceq[cq->ceq_id];
- if (ceq && ceq->reg_cq)
- ret_code = irdma_sc_add_cq_ctx(ceq, cq);
-
- if (ret_code)
- return ret_code;
-
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
- if (!wqe) {
- if (ceq && ceq->reg_cq)
- irdma_sc_remove_cq_ctx(ceq, cq);
+ if (!wqe)
return -ENOMEM;
- }
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
@@ -2562,6 +2974,9 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch,
FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CQ_CHKOVERFLOW, check_overflow) |
FIELD_PREP(IRDMA_CQPSQ_CQ_VIRTMAP, cq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CQID_HIGH, cq->cq_uk.cq_id >> 22) |
+ FIELD_PREP(IRDMA_CQPSQ_CQ_CEQID_HIGH,
+ (cq->ceq_id_valid ? cq->ceq_id : 0) >> 10) |
FIELD_PREP(IRDMA_CQPSQ_CQ_ENCEQEMASK, cq->ceqe_mask) |
FIELD_PREP(IRDMA_CQPSQ_CQ_CEQIDVALID, cq->ceq_id_valid) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, cq->tph_en) |
@@ -2591,17 +3006,12 @@ int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq)
struct irdma_sc_cqp *cqp;
__le64 *wqe;
u64 hdr;
- struct irdma_sc_ceq *ceq;
cqp = cq->dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe)
return -ENOMEM;
- ceq = cq->dev->ceq[cq->ceq_id];
- if (ceq && ceq->reg_cq)
- irdma_sc_remove_cq_ctx(ceq, cq);
-
set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
set_64bit_val(wqe, 8, (uintptr_t)cq >> 1);
set_64bit_val(wqe, 40, cq->shadow_area_pa);
@@ -2706,6 +3116,41 @@ static int irdma_sc_cq_modify(struct irdma_sc_cq *cq,
}
/**
+ * irdma_sc_get_decoded_ird_size_gen_3 - get decoded IRD size for GEN 3
+ * @ird_enc: IRD encoding
+ * IRD size defaults to a value of 4 in case of invalid input.
+ */
+static u16 irdma_sc_get_decoded_ird_size_gen_3(u8 ird_enc)
+{
+ switch (ird_enc) {
+ case IRDMA_IRD_HW_SIZE_4096_GEN3:
+ return 4096;
+ case IRDMA_IRD_HW_SIZE_2048_GEN3:
+ return 2048;
+ case IRDMA_IRD_HW_SIZE_1024_GEN3:
+ return 1024;
+ case IRDMA_IRD_HW_SIZE_512_GEN3:
+ return 512;
+ case IRDMA_IRD_HW_SIZE_256_GEN3:
+ return 256;
+ case IRDMA_IRD_HW_SIZE_128_GEN3:
+ return 128;
+ case IRDMA_IRD_HW_SIZE_64_GEN3:
+ return 64;
+ case IRDMA_IRD_HW_SIZE_32_GEN3:
+ return 32;
+ case IRDMA_IRD_HW_SIZE_16_GEN3:
+ return 16;
+ case IRDMA_IRD_HW_SIZE_8_GEN3:
+ return 8;
+ case IRDMA_IRD_HW_SIZE_4_GEN3:
+ return 4;
+ default:
+ return 4;
+ }
+}
+
+/**
* irdma_check_cqp_progress - check cqp processing progress
* @timeout: timeout info struct
* @dev: sc device struct
@@ -2738,6 +3183,89 @@ static inline void irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val,
}
/**
+ * irdma_sc_cqp_def_cmpl_ae_handler - remove completed requests from pending list
+ * @dev: sc device struct
+ * @info: AE entry info
+ * @first: true if this is the first call to this handler for given AEQE
+ * @scratch: (out) scratch entry pointer
+ * @sw_def_info: (in/out) SW ticket value for this AE
+ *
+ * In case of AE_DEF_CMPL event, this function should be called in a loop
+ * until it returns NULL-ptr via scratch.
+ * For each call, it looks for a matching CQP request on pending list,
+ * removes it from the list and returns the pointer to the associated scratch
+ * entry.
+ * If this is the first call to this function for given AEQE, sw_def_info
+ * value is not used to find matching requests. Instead, it is populated
+ * with the value from the first matching cqp_request on the list.
+ * For subsequent calls, ooo_op->sw_def_info need to match the value passed
+ * by a caller.
+ *
+ * Return: scratch entry pointer for cqp_request to be released or NULL
+ * if no matching request is found.
+ */
+void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
+ struct irdma_aeqe_info *info,
+ bool first, u64 *scratch,
+ u32 *sw_def_info)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ *scratch = 0;
+
+ spin_lock_irqsave(&dev->cqp->ooo_list_lock, flags);
+ list_for_each_entry(ooo_op, &dev->cqp->ooo_pnd, list_entry) {
+ if (ooo_op->deferred &&
+ ((first && ooo_op->def_info == info->def_info) ||
+ (!first && ooo_op->sw_def_info == *sw_def_info))) {
+ *sw_def_info = ooo_op->sw_def_info;
+ *scratch = ooo_op->scratch;
+
+ list_move(&ooo_op->list_entry, &dev->cqp->ooo_avail);
+ atomic64_inc(&dev->cqp->completed_ops);
+
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cqp->ooo_list_lock, flags);
+
+ if (first && !*scratch)
+ ibdev_dbg(to_ibdev(dev),
+ "AEQ: deferred completion with unknown ticket: def_info 0x%x\n",
+ info->def_info);
+}
+
+/**
+ * irdma_sc_cqp_cleanup_handler - remove requests from pending list
+ * @dev: sc device struct
+ *
+ * This function should be called in a loop from irdma_cleanup_pending_cqp_op.
+ * For each call, it returns first CQP request on pending list, removes it
+ * from the list and returns the pointer to the associated scratch entry.
+ *
+ * Return: scratch entry pointer for cqp_request to be released or NULL
+ * if pending list is empty.
+ */
+u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ u64 scratch = 0;
+
+ list_for_each_entry(ooo_op, &dev->cqp->ooo_pnd, list_entry) {
+ scratch = ooo_op->scratch;
+
+ list_del(&ooo_op->list_entry);
+ list_add(&ooo_op->list_entry, &dev->cqp->ooo_avail);
+ atomic64_inc(&dev->cqp->completed_ops);
+
+ break;
+ }
+
+ return scratch;
+}
+
+/**
* irdma_cqp_poll_registers - poll cqp registers
* @cqp: struct for cqp hw
* @tail: wqtail register value
@@ -2794,7 +3322,10 @@ static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT);
break;
case IRDMA_HMC_IW_APBVT_ENTRY:
- obj_info[rsrc_idx].cnt = 1;
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ obj_info[rsrc_idx].cnt = 1;
+ else
+ obj_info[rsrc_idx].cnt = 0;
break;
default:
obj_info[rsrc_idx].cnt = (u32)temp;
@@ -2829,7 +3360,8 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
IRDMA_HMC_IW_QP);
irdma_sc_decode_fpm_commit(dev, buf, 8, info,
IRDMA_HMC_IW_CQ);
- /* skiping RSRVD */
+ irdma_sc_decode_fpm_commit(dev, buf, 16, info,
+ IRDMA_HMC_IW_SRQ);
irdma_sc_decode_fpm_commit(dev, buf, 24, info,
IRDMA_HMC_IW_HTE);
irdma_sc_decode_fpm_commit(dev, buf, 32, info,
@@ -2864,15 +3396,17 @@ irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 *buf,
IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_commit(dev, buf, 152, info,
IRDMA_HMC_IW_MD);
- irdma_sc_decode_fpm_commit(dev, buf, 160, info,
- IRDMA_HMC_IW_OOISC);
- irdma_sc_decode_fpm_commit(dev, buf, 168, info,
- IRDMA_HMC_IW_OOISCFFL);
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_commit(dev, buf, 160, info,
+ IRDMA_HMC_IW_OOISC);
+ irdma_sc_decode_fpm_commit(dev, buf, 168, info,
+ IRDMA_HMC_IW_OOISCFFL);
+ }
}
/* searching for the last object in HMC to find the size of the HMC area. */
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
- if (info[i].base > max_base) {
+ if (info[i].base > max_base && info[i].cnt) {
max_base = info[i].base;
last_hmc_obj = i;
}
@@ -2927,6 +3461,7 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
struct irdma_hmc_fpm_misc *hmc_fpm_misc)
{
struct irdma_hmc_obj_info *obj_info;
+ u8 ird_encoding;
u64 temp;
u32 size;
u16 max_pe_sds;
@@ -2935,7 +3470,19 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
get_64bit_val(buf, 0, &temp);
hmc_info->first_sd_index = (u16)FIELD_GET(IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX, temp);
- max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3, temp);
+ else
+ max_pe_sds = (u16)FIELD_GET(IRDMA_QUERY_FPM_MAX_PE_SDS, temp);
+
+ /* Reduce SD count for unprivleged functions by 1 to account for PBLE
+ * backing page rounding
+ */
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2 &&
+ (hmc_info->hmc_fn_id >= dev->hw_attrs.first_hw_vf_fpm_id ||
+ !dev->privileged))
+ max_pe_sds--;
hmc_fpm_misc->max_sds = max_pe_sds;
hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
@@ -2949,11 +3496,17 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
size = (u32)(temp >> 32);
obj_info[IRDMA_HMC_IW_CQ].size = BIT_ULL(size);
+ irdma_sc_decode_fpm_query(buf, 24, obj_info, IRDMA_HMC_IW_SRQ);
irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE);
irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP);
- obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
- obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 0;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 0;
+ } else {
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192;
+ obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+ }
irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR);
irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF);
@@ -2962,7 +3515,7 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp;
obj_info[IRDMA_HMC_IW_XFFL].size = 4;
hmc_fpm_misc->xf_block_size = FIELD_GET(IRDMA_QUERY_FPM_XFBLOCKSIZE, temp);
- if (!hmc_fpm_misc->xf_block_size)
+ if (obj_info[IRDMA_HMC_IW_XF].max_cnt && !hmc_fpm_misc->xf_block_size)
return -EINVAL;
irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1);
@@ -2984,6 +3537,14 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
hmc_fpm_misc->max_ceqs = FIELD_GET(IRDMA_QUERY_FPM_MAX_CEQS, temp);
hmc_fpm_misc->ht_multiplier = FIELD_GET(IRDMA_QUERY_FPM_HTMULTIPLIER, temp);
hmc_fpm_misc->timer_bucket = FIELD_GET(IRDMA_QUERY_FPM_TIMERBUCKET, temp);
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2,
+ dev->feature_info[IRDMA_FTN_FLAGS])) {
+ ird_encoding = (u8)FIELD_GET(IRDMA_QUERY_FPM_MAX_IRD, temp);
+ hmc_fpm_misc->ird =
+ irdma_sc_get_decoded_ird_size_gen_3(ird_encoding) / 2;
+ dev->hw_attrs.max_hw_ird = hmc_fpm_misc->ird;
+ dev->hw_attrs.max_hw_ord = hmc_fpm_misc->ird;
+ }
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
return 0;
irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC);
@@ -3000,85 +3561,30 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf,
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
- irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
-
- get_64bit_val(buf, 168, &temp);
- obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
- obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
- hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
- if (!hmc_fpm_misc->ooiscf_block_size &&
- obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
- return -EINVAL;
- return 0;
-}
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
-/**
- * irdma_sc_find_reg_cq - find cq ctx index
- * @ceq: ceq sc structure
- * @cq: cq sc structure
- */
-static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq,
- struct irdma_sc_cq *cq)
-{
- u32 i;
-
- for (i = 0; i < ceq->reg_cq_size; i++) {
- if (cq == ceq->reg_cq[i])
- return i;
+ get_64bit_val(buf, 168, &temp);
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
+ hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->ooiscf_block_size &&
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ return -EINVAL;
}
- return IRDMA_INVALID_CQ_IDX;
-}
-
-/**
- * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq
- * @ceq: ceq sc structure
- * @cq: cq sc structure
- */
-int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ceq->req_cq_lock, flags);
-
- if (ceq->reg_cq_size == ceq->elem_cnt) {
- spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
- return -ENOMEM;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ get_64bit_val(buf, 176, &temp);
+ hmc_fpm_misc->loc_mem_pages = (u32)FIELD_GET(IRDMA_QUERY_FPM_LOC_MEM_PAGES, temp);
+ if (!hmc_fpm_misc->loc_mem_pages)
+ return -EINVAL;
}
- ceq->reg_cq[ceq->reg_cq_size++] = cq;
-
- spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
-
return 0;
}
/**
- * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq
- * @ceq: ceq sc structure
- * @cq: cq sc structure
- */
-void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq)
-{
- unsigned long flags;
- u32 cq_ctx_idx;
-
- spin_lock_irqsave(&ceq->req_cq_lock, flags);
- cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq);
- if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX)
- goto exit;
-
- ceq->reg_cq_size--;
- if (cq_ctx_idx != ceq->reg_cq_size)
- ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size];
- ceq->reg_cq[ceq->reg_cq_size] = NULL;
-
-exit:
- spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
-}
-
-/**
* irdma_sc_cqp_init - Initialize buffers for a control Queue Pair
* @cqp: IWARP control queue pair pointer
* @info: IWARP control queue pair init info pointer
@@ -3088,6 +3594,8 @@ exit:
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
struct irdma_cqp_init_info *info)
{
+ struct irdma_ooo_cqp_op *ooo_op;
+ u32 num_ooo_ops;
u8 hw_sq_size;
if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 ||
@@ -3118,17 +3626,43 @@ int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
cqp->rocev2_rto_policy = info->rocev2_rto_policy;
cqp->protocol_used = info->protocol_used;
memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params));
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ cqp->ooisc_blksize = info->ooisc_blksize;
+ cqp->rrsp_blksize = info->rrsp_blksize;
+ cqp->q1_blksize = info->q1_blksize;
+ cqp->xmit_blksize = info->xmit_blksize;
+ cqp->blksizes_valid = info->blksizes_valid;
+ cqp->ts_shift = info->ts_shift;
+ cqp->ts_override = info->ts_override;
+ cqp->en_fine_grained_timers = info->en_fine_grained_timers;
+ cqp->pe_en_vf_cnt = info->pe_en_vf_cnt;
+ cqp->ooo_op_array = info->ooo_op_array;
+ /* initialize the OOO lists */
+ INIT_LIST_HEAD(&cqp->ooo_avail);
+ INIT_LIST_HEAD(&cqp->ooo_pnd);
+ if (cqp->ooo_op_array) {
+ /* Populate avail list entries */
+ for (num_ooo_ops = 0, ooo_op = info->ooo_op_array;
+ num_ooo_ops < cqp->sq_size;
+ num_ooo_ops++, ooo_op++)
+ list_add(&ooo_op->list_entry, &cqp->ooo_avail);
+ }
+ }
info->dev->cqp = cqp;
IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size);
+ cqp->last_def_cmpl_ticket = 0;
+ cqp->sw_def_cmpl_ticket = 0;
cqp->requested_ops = 0;
atomic64_set(&cqp->completed_ops, 0);
/* for the cqp commands backlog. */
INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head);
writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]);
- writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
- writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) {
+ writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]);
+ writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]);
+ }
ibdev_dbg(to_ibdev(cqp->dev),
"WQE: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%p] cqp[%p] polarity[x%04x]\n",
@@ -3160,6 +3694,7 @@ int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
return -ENOMEM;
spin_lock_init(&cqp->dev->cqp_lock);
+ spin_lock_init(&cqp->ooo_list_lock);
temp = FIELD_PREP(IRDMA_CQPHC_SQSIZE, cqp->hw_sq_size) |
FIELD_PREP(IRDMA_CQPHC_SVER, cqp->struct_ver) |
@@ -3171,12 +3706,29 @@ int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err)
FIELD_PREP(IRDMA_CQPHC_PROTOCOL_USED,
cqp->protocol_used);
}
+ if (hw_rev >= IRDMA_GEN_3)
+ temp |= FIELD_PREP(IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS,
+ cqp->en_fine_grained_timers);
set_64bit_val(cqp->host_ctx, 0, temp);
set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
+
+ if (hw_rev >= IRDMA_GEN_3)
+ temp |= FIELD_PREP(IRDMA_CQPHC_OOISC_BLKSIZE,
+ cqp->ooisc_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_RRSP_BLKSIZE,
+ cqp->rrsp_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_Q1_BLKSIZE, cqp->q1_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_XMIT_BLKSIZE,
+ cqp->xmit_blksize) |
+ FIELD_PREP(IRDMA_CQPHC_BLKSIZES_VALID,
+ cqp->blksizes_valid) |
+ FIELD_PREP(IRDMA_CQPHC_TIMESTAMP_OVERRIDE,
+ cqp->ts_override) |
+ FIELD_PREP(IRDMA_CQPHC_TS_SHIFT, cqp->ts_shift);
set_64bit_val(cqp->host_ctx, 16, temp);
set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
temp = FIELD_PREP(IRDMA_CQPHC_HW_MAJVER, cqp->hw_maj_ver) |
@@ -3316,11 +3868,13 @@ int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp)
*/
void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
{
+ unsigned long flags;
u64 temp_val;
u16 sw_cq_sel;
u8 arm_next_se;
u8 arm_seq_num;
+ spin_lock_irqsave(&ccq->dev->cqp_lock, flags);
get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
@@ -3331,6 +3885,7 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
+ spin_unlock_irqrestore(&ccq->dev->cqp_lock, flags);
dma_wmb(); /* make sure shadow area is updated before arming */
@@ -3338,6 +3893,87 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
}
/**
+ * irdma_sc_process_def_cmpl - process deferred or pending completion
+ * @cqp: CQP sc struct
+ * @info: CQP CQE info
+ * @wqe_idx: CQP WQE descriptor index
+ * @def_info: deferred op ticket value or out-of-order completion id
+ * @def_cmpl: true for deferred completion, false for pending (RCA)
+ */
+static void irdma_sc_process_def_cmpl(struct irdma_sc_cqp *cqp,
+ struct irdma_ccq_cqe_info *info,
+ u32 wqe_idx, u32 def_info, bool def_cmpl)
+{
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ /* Deferred and out-of-order completions share the same list of pending
+ * completions. Since the list can be also accessed from AE handler,
+ * it must be protected by a lock.
+ */
+ spin_lock_irqsave(&cqp->ooo_list_lock, flags);
+
+ /* For deferred completions bump up SW completion ticket value. */
+ if (def_cmpl) {
+ cqp->last_def_cmpl_ticket = def_info;
+ cqp->sw_def_cmpl_ticket++;
+ }
+ if (!list_empty(&cqp->ooo_avail)) {
+ ooo_op = (struct irdma_ooo_cqp_op *)
+ list_entry(cqp->ooo_avail.next,
+ struct irdma_ooo_cqp_op, list_entry);
+
+ list_del(&ooo_op->list_entry);
+ ooo_op->scratch = info->scratch;
+ ooo_op->def_info = def_info;
+ ooo_op->sw_def_info = cqp->sw_def_cmpl_ticket;
+ ooo_op->deferred = def_cmpl;
+ ooo_op->wqe_idx = wqe_idx;
+ /* Pending completions must be chronologically ordered,
+ * so adding at the end of list.
+ */
+ list_add_tail(&ooo_op->list_entry, &cqp->ooo_pnd);
+ }
+ spin_unlock_irqrestore(&cqp->ooo_list_lock, flags);
+
+ info->pending = true;
+}
+
+/**
+ * irdma_sc_process_ooo_cmpl - process out-of-order (final) completion
+ * @cqp: CQP sc struct
+ * @info: CQP CQE info
+ * @def_info: out-of-order completion id
+ */
+static void irdma_sc_process_ooo_cmpl(struct irdma_sc_cqp *cqp,
+ struct irdma_ccq_cqe_info *info,
+ u32 def_info)
+{
+ struct irdma_ooo_cqp_op *ooo_op_tmp;
+ struct irdma_ooo_cqp_op *ooo_op;
+ unsigned long flags;
+
+ info->scratch = 0;
+
+ spin_lock_irqsave(&cqp->ooo_list_lock, flags);
+ list_for_each_entry_safe(ooo_op, ooo_op_tmp, &cqp->ooo_pnd,
+ list_entry) {
+ if (!ooo_op->deferred && ooo_op->def_info == def_info) {
+ list_del(&ooo_op->list_entry);
+ info->scratch = ooo_op->scratch;
+ list_add(&ooo_op->list_entry, &cqp->ooo_avail);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cqp->ooo_list_lock, flags);
+
+ if (!info->scratch)
+ ibdev_dbg(to_ibdev(cqp->dev),
+ "CQP: DEBUG_FW_OOO out-of-order completion with unknown def_info = 0x%x\n",
+ def_info);
+}
+
+/**
* irdma_sc_ccq_get_cqe_info - get ccq's cq entry
* @ccq: ccq sc struct
* @info: completion q entry to return
@@ -3345,6 +3981,10 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
struct irdma_ccq_cqe_info *info)
{
+ u32 def_info;
+ bool def_cmpl = false;
+ bool pend_cmpl = false;
+ bool ooo_final_cmpl = false;
u64 qp_ctx, temp, temp1;
__le64 *cqe;
struct irdma_sc_cqp *cqp;
@@ -3352,6 +3992,7 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
u32 error;
u8 polarity;
int ret_code = 0;
+ unsigned long flags;
if (ccq->cq_uk.avoid_mem_cflct)
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
@@ -3383,6 +4024,25 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
get_64bit_val(cqe, 16, &temp1);
info->op_ret_val = (u32)FIELD_GET(IRDMA_CCQ_OPRETVAL, temp1);
+ if (cqp->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ def_cmpl = info->maj_err_code == IRDMA_CQPSQ_MAJ_NO_ERROR &&
+ info->min_err_code == IRDMA_CQPSQ_MIN_DEF_CMPL;
+ def_info = (u32)FIELD_GET(IRDMA_CCQ_DEFINFO, temp1);
+
+ pend_cmpl = info->maj_err_code == IRDMA_CQPSQ_MAJ_NO_ERROR &&
+ info->min_err_code == IRDMA_CQPSQ_MIN_OOO_CMPL;
+
+ ooo_final_cmpl = (bool)FIELD_GET(IRDMA_OOO_CMPL, temp);
+
+ if (def_cmpl || pend_cmpl || ooo_final_cmpl) {
+ if (ooo_final_cmpl)
+ irdma_sc_process_ooo_cmpl(cqp, info, def_info);
+ else
+ irdma_sc_process_def_cmpl(cqp, info, wqe_idx,
+ def_info, def_cmpl);
+ }
+ }
+
get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
info->op_code = (u8)FIELD_GET(IRDMA_CQPSQ_OPCODE, temp1);
info->cqp = cqp;
@@ -3399,7 +4059,16 @@ int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
dma_wmb(); /* make sure shadow area is updated before moving tail */
- IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ spin_lock_irqsave(&cqp->dev->cqp_lock, flags);
+ if (!ooo_final_cmpl)
+ IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ spin_unlock_irqrestore(&cqp->dev->cqp_lock, flags);
+
+ /* Do not increment completed_ops counter on pending or deferred
+ * completions.
+ */
+ if (pend_cmpl || def_cmpl)
+ return ret_code;
atomic64_inc(&cqp->completed_ops);
return ret_code;
@@ -3639,15 +4308,12 @@ int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
ceq->ceq_elem_pa = info->ceqe_pa;
ceq->virtual_map = info->virtual_map;
ceq->itr_no_expire = info->itr_no_expire;
- ceq->reg_cq = info->reg_cq;
- ceq->reg_cq_size = 0;
- spin_lock_init(&ceq->req_cq_lock);
ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
ceq->tph_en = info->tph_en;
ceq->tph_val = info->tph_val;
- ceq->vsi = info->vsi;
+ ceq->vsi_idx = info->vsi_idx;
ceq->polarity = 1;
IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
ceq->dev->ceq[info->ceq_id] = ceq;
@@ -3680,13 +4346,16 @@ static int irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch,
(ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
set_64bit_val(wqe, 56,
FIELD_PREP(IRDMA_CQPSQ_TPHVAL, ceq->tph_val) |
- FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi->vsi_idx));
+ FIELD_PREP(IRDMA_CQPSQ_PASID, ceq->pasid) |
+ FIELD_PREP(IRDMA_CQPSQ_VSIIDX, ceq->vsi_idx));
hdr = FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID, ceq->ceq_id) |
+ FIELD_PREP(IRDMA_CQPSQ_CEQ_CEQID_HIGH, ceq->ceq_id >> 10) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CEQ) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_ITRNOEXPIRE, ceq->itr_no_expire) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, ceq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3721,9 +4390,6 @@ int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq)
{
struct irdma_sc_cqp *cqp;
- if (ceq->reg_cq)
- irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
-
cqp = ceq->dev->cqp;
cqp->process_cqp_sds = irdma_update_sds_noccq;
@@ -3741,12 +4407,7 @@ int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch)
int ret_code;
struct irdma_sc_dev *dev = ceq->dev;
- dev->ccq->vsi = ceq->vsi;
- if (ceq->reg_cq) {
- ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq);
- if (ret_code)
- return ret_code;
- }
+ dev->ccq->vsi_idx = ceq->vsi_idx;
ret_code = irdma_sc_ceq_create(ceq, scratch, true);
if (!ret_code)
@@ -3774,11 +4435,14 @@ int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq)
set_64bit_val(wqe, 16, ceq->elem_cnt);
set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, ceq->pasid));
hdr = ceq->ceq_id |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_CEQ) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_LPBLSIZE, ceq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_CEQ_VMAP, ceq->virtual_map) |
FIELD_PREP(IRDMA_CQPSQ_TPHEN, ceq->tph_en) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, ceq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3808,7 +4472,6 @@ void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
struct irdma_sc_cq *temp_cq;
u8 polarity;
u32 cq_idx;
- unsigned long flags;
do {
cq_idx = 0;
@@ -3829,11 +4492,6 @@ void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq)
}
cq = temp_cq;
- if (ceq->reg_cq) {
- spin_lock_irqsave(&ceq->req_cq_lock, flags);
- cq_idx = irdma_sc_find_reg_cq(ceq, cq);
- spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
- }
IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
@@ -3942,10 +4600,13 @@ static int irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch,
(aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
set_64bit_val(wqe, 48,
(aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, aeq->pasid));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_AEQ) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, aeq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -3974,7 +4635,9 @@ static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
u64 hdr;
dev = aeq->dev;
- writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
+
+ if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]);
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -3982,9 +4645,12 @@ static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch,
return -ENOMEM;
set_64bit_val(wqe, 16, aeq->elem_cnt);
set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMA_CQPSQ_PASID, aeq->pasid));
hdr = FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_DESTROY_AEQ) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_LPBLSIZE, aeq->pbl_chunk_size) |
FIELD_PREP(IRDMA_CQPSQ_AEQ_VMAP, aeq->virtual_map) |
+ FIELD_PREP(IRDMA_CQPSQ_PASID_VALID, aeq->pasid_valid) |
FIELD_PREP(IRDMA_CQPSQ_WQEVALID, cqp->polarity);
dma_wmb(); /* make sure WQE is written before valid bit is set */
@@ -4025,18 +4691,39 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8,
aeqe, 16, false);
- ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
- info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
- info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC_GEN_3, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX_GEN_3,
+ temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_GEN_3, temp);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE_GEN_3, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE_GEN_3, compl_ctx);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE_GEN_3, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA_GEN_3, compl_ctx);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW_GEN_3, temp);
+ info->compl_ctx = FIELD_GET(IRDMA_AEQE_CMPL_CTXT, compl_ctx);
+ compl_ctx = FIELD_GET(IRDMA_AEQE_CMPL_CTXT, compl_ctx) << IRDMA_AEQE_CMPL_CTXT_S;
+ } else {
+ ae_src = (u8)FIELD_GET(IRDMA_AEQE_AESRC, temp);
+ info->wqe_idx = (u16)FIELD_GET(IRDMA_AEQE_WQDESCIDX, temp);
+ info->qp_cq_id = (u32)FIELD_GET(IRDMA_AEQE_QPCQID_LOW, temp) |
((u32)FIELD_GET(IRDMA_AEQE_QPCQID_HI, temp) << 18);
- info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
- info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
- info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
- info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
- info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW, temp);
+ info->ae_id = (u16)FIELD_GET(IRDMA_AEQE_AECODE, temp);
+ info->tcp_state = (u8)FIELD_GET(IRDMA_AEQE_TCPSTATE, temp);
+ info->iwarp_state = (u8)FIELD_GET(IRDMA_AEQE_IWSTATE, temp);
+ info->q2_data_written = (u8)FIELD_GET(IRDMA_AEQE_Q2DATA, temp);
+ info->aeqe_overflow = (bool)FIELD_GET(IRDMA_AEQE_OVERFLOW,
+ temp);
+ }
info->ae_src = ae_src;
switch (info->ae_id) {
+ case IRDMA_AE_SRQ_LIMIT:
+ info->srq = true;
+ /* [63:6] from CMPL_CTXT, [5:0] from WQDESCIDX. */
+ info->compl_ctx = compl_ctx;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
case IRDMA_AE_PRIV_OPERATION_DENIED:
case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW:
case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW:
@@ -4069,6 +4756,10 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
case IRDMA_AE_RESET_SENT:
@@ -4085,6 +4776,10 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->compl_ctx = compl_ctx << 1;
ae_src = IRDMA_AE_SOURCE_RSVD;
break;
+ case IRDMA_AE_CQP_DEFERRED_COMPLETE:
+ info->def_info = info->wqe_idx;
+ ae_src = IRDMA_AE_SOURCE_RSVD;
+ break;
case IRDMA_AE_ROCE_EMPTY_MCG:
case IRDMA_AE_ROCE_BAD_MC_IP_ADDR:
case IRDMA_AE_ROCE_BAD_MC_QPID:
@@ -4110,6 +4805,7 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->qp = true;
info->rq = true;
info->compl_ctx = compl_ctx;
+ info->err_rq_idx_valid = true;
break;
case IRDMA_AE_SOURCE_CQ:
case IRDMA_AE_SOURCE_CQ_0110:
@@ -4125,8 +4821,18 @@ int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
info->compl_ctx = compl_ctx;
break;
case IRDMA_AE_SOURCE_IN_RR_WR:
+ info->qp = true;
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ info->err_rq_idx_valid = true;
+ info->compl_ctx = compl_ctx;
+ info->in_rdrsp_wr = true;
+ break;
case IRDMA_AE_SOURCE_IN_RR_WR_1011:
info->qp = true;
+ if (aeq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ info->sq = true;
+ info->err_rq_idx_valid = true;
+ }
info->compl_ctx = compl_ctx;
info->in_rdrsp_wr = true;
break;
@@ -4336,6 +5042,26 @@ int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id)
}
/**
+ * irdma_set_loc_mem() - set a local memory bit field
+ * @buf: ptr to a buffer where local memory gets enabled
+ */
+static void irdma_set_loc_mem(__le64 *buf)
+{
+ u64 loc_mem_en = BIT_ULL(ENABLE_LOC_MEM);
+ u32 offset;
+ u64 temp;
+
+ for (offset = 0; offset < IRDMA_COMMIT_FPM_BUF_SIZE;
+ offset += sizeof(__le64)) {
+ if (offset == IRDMA_PBLE_COMMIT_OFFSET)
+ continue;
+ get_64bit_val(buf, offset, &temp);
+ if (temp)
+ set_64bit_val(buf, offset, temp | loc_mem_en);
+ }
+}
+
+/**
* irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp
* command and populates fpm base address in hmc_info
* @dev : ptr to irdma_dev struct
@@ -4356,7 +5082,7 @@ static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
set_64bit_val(buf, 0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt);
set_64bit_val(buf, 8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt);
- set_64bit_val(buf, 16, (u64)0); /* RSRVD */
+ set_64bit_val(buf, 16, (u64)obj_info[IRDMA_HMC_IW_SRQ].cnt);
set_64bit_val(buf, 24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt);
set_64bit_val(buf, 32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt);
set_64bit_val(buf, 40, (u64)0); /* RSVD */
@@ -4383,7 +5109,9 @@ static int irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id)
(u64)obj_info[IRDMA_HMC_IW_OOISC].cnt);
set_64bit_val(buf, 168,
(u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt);
-
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
+ dev->hmc_fpm_misc.loc_mem_pages)
+ irdma_set_loc_mem(buf);
commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
commit_fpm_mem.va = dev->fpm_commit_buf;
@@ -4592,6 +5320,7 @@ static bool irdma_cqp_ring_full(struct irdma_sc_cqp *cqp)
static u32 irdma_est_sd(struct irdma_sc_dev *dev,
struct irdma_hmc_info *hmc_info)
{
+ struct irdma_hmc_obj_info *pble_info;
int i;
u64 size = 0;
u64 sd;
@@ -4600,12 +5329,22 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
if (i != IRDMA_HMC_IW_PBLE)
size += round_up(hmc_info->hmc_obj[i].cnt *
hmc_info->hmc_obj[i].size, 512);
- size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt *
- hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512);
+
+ pble_info = &hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE];
+ if (dev->privileged)
+ size += round_up(pble_info->cnt * pble_info->size, 512);
if (size & 0x1FFFFF)
sd = (size >> 21) + 1; /* add 1 for remainder */
else
sd = size >> 21;
+ if (!dev->privileged && !dev->hmc_fpm_misc.loc_mem_pages) {
+ /* 2MB alignment for VF PBLE HMC */
+ size = pble_info->cnt * pble_info->size;
+ if (size & 0x1FFFFF)
+ sd += (size >> 21) + 1; /* add 1 for remainder */
+ else
+ sd += size >> 21;
+ }
if (sd > 0xFFFFFFFF) {
ibdev_dbg(to_ibdev(dev), "HMC: sd overflow[%lld]\n", sd);
sd = 0xFFFFFFFF - 1;
@@ -4615,17 +5354,6 @@ static u32 irdma_est_sd(struct irdma_sc_dev *dev,
}
/**
- * irdma_sc_query_rdma_features_done - poll cqp for query features done
- * @cqp: struct for cqp hw
- */
-static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
-{
- return irdma_sc_poll_for_cqp_op_done(cqp,
- IRDMA_CQP_OP_QUERY_RDMA_FEATURES,
- NULL);
-}
-
-/**
* irdma_sc_query_rdma_features - query RDMA features and FW ver
* @cqp: struct for cqp hw
* @buf: buffer to hold query info
@@ -4634,7 +5362,9 @@ static int irdma_sc_query_rdma_features_done(struct irdma_sc_cqp *cqp)
static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
struct irdma_dma_mem *buf, u64 scratch)
{
+ u32 tail, val, error;
__le64 *wqe;
+ int status;
u64 temp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
@@ -4654,9 +5384,15 @@ static int irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp,
print_hex_dump_debug("WQE: QUERY RDMA FEATURES", DUMP_PREFIX_OFFSET,
16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
+ irdma_get_cqp_reg_info(cqp, &val, &tail, &error);
+
irdma_sc_cqp_post_sq(cqp);
+ status = irdma_cqp_poll_registers(cqp, tail,
+ cqp->dev->hw_attrs.max_done_count);
+ if (error || status)
+ status = -EINVAL;
- return 0;
+ return status;
}
/**
@@ -4678,8 +5414,6 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
- if (!ret_code)
- ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
if (ret_code)
goto exit;
@@ -4703,8 +5437,6 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
return -ENOMEM;
ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0);
- if (!ret_code)
- ret_code = irdma_sc_query_rdma_features_done(dev->cqp);
if (ret_code)
goto exit;
@@ -4731,6 +5463,10 @@ int irdma_get_rdma_features(struct irdma_sc_dev *dev)
}
dev->feature_info[feat_type] = temp;
}
+
+ if (dev->feature_info[IRDMA_FTN_FLAGS] & IRDMA_ATOMICS_ALLOWED_BIT)
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_ATOMIC_OPS;
+
exit:
dma_free_coherent(dev->hw->device, feat_buf.size, feat_buf.va,
feat_buf.pa);
@@ -4786,22 +5522,354 @@ static void cfg_fpm_value_gen_2(struct irdma_sc_dev *dev,
}
/**
+ * irdma_get_rsrc_mem_config - configure resources if local memory or host
+ * @dev: sc device struct
+ * @is_mrte_loc_mem: if true, MR's to be in local memory because sd=loc pages
+ *
+ * Only mr can be configured host or local memory if qp's are in local memory.
+ * If qp is in local memory, then all resource object will be in local memory
+ * except mr which can be either host or local memory. The only exception
+ * is pble's which are always in host memory.
+ */
+static void irdma_get_rsrc_mem_config(struct irdma_sc_dev *dev, bool is_mrte_loc_mem)
+{
+ struct irdma_hmc_info *hmc_info = dev->hmc_info;
+ int i;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].mem_loc = IRDMA_LOC_MEM;
+
+ if (dev->feature_info[IRDMA_OBJ_1] && !is_mrte_loc_mem) {
+ u8 mem_type;
+
+ mem_type = (u8)FIELD_GET(IRDMA_MR_MEM_LOC, dev->feature_info[IRDMA_OBJ_1]);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc =
+ (mem_type & IRDMA_OBJ_LOC_MEM_BIT) ?
+ IRDMA_LOC_MEM : IRDMA_HOST_MEM;
+ } else {
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc = IRDMA_LOC_MEM;
+ }
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].mem_loc = IRDMA_HOST_MEM;
+
+ ibdev_dbg(to_ibdev(dev), "HMC: INFO: mrte_mem_loc = %d pble = %d\n",
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc,
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].mem_loc);
+}
+
+/**
+ * irdma_cfg_sd_mem - allocate sd memory
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ */
+static int irdma_cfg_sd_mem(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info)
+{
+ struct irdma_virt_mem virt_mem;
+ u32 mem_size;
+
+ mem_size = sizeof(struct irdma_hmc_sd_entry) * hmc_info->sd_table.sd_cnt;
+ virt_mem.size = mem_size;
+ virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
+ if (!virt_mem.va)
+ return -ENOMEM;
+ hmc_info->sd_table.sd_entry = virt_mem.va;
+
+ return 0;
+}
+
+/**
+ * irdma_get_objs_pages - get number of 2M pages needed
+ * @dev: sc device struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @mem_loc: pages for local or host memory
+ */
+static u32 irdma_get_objs_pages(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ enum irdma_hmc_obj_mem mem_loc)
+{
+ u64 size = 0;
+ int i;
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
+ if (hmc_info->hmc_obj[i].mem_loc == mem_loc) {
+ size += round_up(hmc_info->hmc_obj[i].cnt *
+ hmc_info->hmc_obj[i].size, 512);
+ }
+ }
+
+ return DIV_ROUND_UP(size, IRDMA_HMC_PAGE_SIZE);
+}
+
+/**
+ * irdma_set_host_hmc_rsrc_gen_3 - calculate host hmc resources for gen 3
+ * @dev: sc device struct
+ */
+static void irdma_set_host_hmc_rsrc_gen_3(struct irdma_sc_dev *dev)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ struct irdma_hmc_info *hmc_info;
+ enum irdma_hmc_obj_mem mrte_loc;
+ u32 mrwanted, pblewanted;
+ u32 avail_sds, mr_sds;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ avail_sds = hmc_fpm_misc->max_sds;
+ mrte_loc = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc;
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
+ pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
+
+ if (mrte_loc == IRDMA_HOST_MEM && avail_sds > IRDMA_MIN_PBLE_PAGES) {
+ mr_sds = avail_sds - IRDMA_MIN_PBLE_PAGES;
+ mrwanted = min(mrwanted, mr_sds * MAX_MR_PER_SD);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
+ avail_sds -= DIV_ROUND_UP(mrwanted, MAX_MR_PER_SD);
+ }
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]) &&
+ pblewanted > avail_sds * MAX_PBLE_PER_SD)
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: Warn: Resource version 2: pble wanted = 0x%x available = 0x%x\n",
+ pblewanted, avail_sds * MAX_PBLE_PER_SD);
+
+ pblewanted = min(pblewanted, avail_sds * MAX_PBLE_PER_SD);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
+}
+
+/**
+ * irdma_verify_commit_fpm_gen_3 - verify query fpm values
+ * @dev: sc device struct
+ * @max_pages: max local memory available
+ * @qpwanted: number of qp's wanted
+ */
+static int irdma_verify_commit_fpm_gen_3(struct irdma_sc_dev *dev,
+ u32 max_pages,
+ u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ u32 rrf_cnt, xf_cnt, timer_cnt, pages_needed;
+ struct irdma_hmc_info *hmc_info;
+ u32 rrffl_cnt = 0;
+ u32 xffl_cnt = 0;
+ u32 q1fl_cnt;
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+ rrf_cnt = roundup_pow_of_two(IRDMA_RRF_MULTIPLIER * qpwanted);
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ rrffl_cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+
+ xf_cnt = roundup_pow_of_two(IRDMA_XF_MULTIPLIER * qpwanted);
+
+ if (xf_cnt)
+ xffl_cnt = xf_cnt / hmc_fpm_misc->xf_block_size;
+
+ timer_cnt = (round_up(qpwanted, 512) / 512 + 1) *
+ hmc_fpm_misc->timer_bucket;
+
+ q1fl_cnt = hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+
+ pages_needed = irdma_get_objs_pages(dev, hmc_info, IRDMA_LOC_MEM);
+ if (pages_needed > max_pages) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FAIL: SW counts rrf_cnt = %u rrffl_cnt = %u timer_cnt = %u",
+ rrf_cnt, rrffl_cnt, timer_cnt);
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: FAIL: SW counts xf_cnt = %u xffl_cnt = %u q1fl_cnt = %u",
+ xf_cnt, xffl_cnt, q1fl_cnt);
+
+ return -EINVAL;
+ }
+
+ hmc_fpm_misc->max_sds -= pages_needed;
+ hmc_fpm_misc->loc_mem_pages -= pages_needed;
+
+ return 0;
+}
+
+/**
+ * irdma_set_loc_hmc_rsrc_gen_3 - calculate hmc resources for gen 3
+ * @dev: sc device struct
+ * @max_pages: max local memory available
+ * @qpwanted: number of qp's wanted
+ */
+static int irdma_set_loc_hmc_rsrc_gen_3(struct irdma_sc_dev *dev,
+ u32 max_pages,
+ u32 qpwanted)
+{
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc;
+ u32 rrf_cnt, xf_cnt, timer_cnt, pages_needed;
+ struct irdma_hmc_info *hmc_info;
+ u32 ird, ord;
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ return irdma_verify_commit_fpm_gen_3(dev, max_pages, qpwanted);
+
+ hmc_info = dev->hmc_info;
+ hmc_fpm_misc = &dev->hmc_fpm_misc;
+ ird = dev->hw_attrs.max_hw_ird;
+ ord = dev->hw_attrs.max_hw_ord;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, qpwanted * 2);
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
+ min(qpwanted * 8, hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+
+ rrf_cnt = roundup_pow_of_two(IRDMA_RRF_MULTIPLIER * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt, rrf_cnt);
+
+ if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt)
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt /
+ hmc_fpm_misc->rrf_block_size;
+
+ xf_cnt = roundup_pow_of_two(IRDMA_XF_MULTIPLIER * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt, xf_cnt);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt =
+ xf_cnt / hmc_fpm_misc->xf_block_size;
+
+ timer_cnt = (round_up(qpwanted, 512) / 512 + 1) *
+ hmc_fpm_misc->timer_bucket;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt =
+ min(timer_cnt, hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt);
+
+ do {
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = roundup_pow_of_two(ird * 2 * qpwanted);
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt =
+ hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+
+ pages_needed = irdma_get_objs_pages(dev, hmc_info, IRDMA_LOC_MEM);
+ if (pages_needed <= max_pages)
+ break;
+
+ ird /= 2;
+ ord /= 2;
+ } while (ird >= IRDMA_MIN_IRD);
+
+ if (ird < IRDMA_MIN_IRD) {
+ ibdev_dbg(to_ibdev(dev), "HMC: FAIL: IRD=%u Q1 CNT = %u\n",
+ ird, hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt);
+ return -EINVAL;
+ }
+
+ dev->hw_attrs.max_hw_ird = ird;
+ dev->hw_attrs.max_hw_ord = ord;
+ hmc_fpm_misc->max_sds -= pages_needed;
+
+ return 0;
+}
+
+/**
+ * cfg_fpm_value_gen_3 - configure fpm for gen 3
+ * @dev: sc device struct
+ * @hmc_info: ptr to irdma_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ */
+static int cfg_fpm_value_gen_3(struct irdma_sc_dev *dev,
+ struct irdma_hmc_info *hmc_info,
+ struct irdma_hmc_fpm_misc *hmc_fpm_misc)
+{
+ enum irdma_hmc_obj_mem mrte_loc;
+ u32 mrwanted, qpwanted;
+ int i, ret_code = 0;
+ u32 loc_mem_pages;
+ bool is_mrte_loc_mem;
+
+ loc_mem_pages = hmc_fpm_misc->loc_mem_pages;
+ is_mrte_loc_mem = hmc_fpm_misc->loc_mem_pages == hmc_fpm_misc->max_sds ?
+ true : false;
+
+ irdma_get_rsrc_mem_config(dev, is_mrte_loc_mem);
+ mrte_loc = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc;
+
+ if (is_mrte_loc_mem)
+ loc_mem_pages -= IRDMA_MIN_PBLE_PAGES;
+
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: mrte_loc %d loc_mem %u fpm max sds %u host_obj %d\n",
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].mem_loc,
+ hmc_fpm_misc->loc_mem_pages, hmc_fpm_misc->max_sds,
+ is_mrte_loc_mem);
+
+ mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
+ qpwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted;
+
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].max_cnt = 0;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt = 0;
+
+ if (!FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt,
+ (u32)IRDMA_FSIAV_CNT_MAX);
+
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+
+ while (qpwanted >= IRDMA_MIN_QP_CNT) {
+ if (!irdma_set_loc_hmc_rsrc_gen_3(dev, loc_mem_pages, qpwanted))
+ break;
+
+ if (FIELD_GET(IRDMA_MANAGE_RSRC_VER2, dev->feature_info[IRDMA_FTN_FLAGS]))
+ return -EINVAL;
+
+ qpwanted /= 2;
+ if (mrte_loc == IRDMA_LOC_MEM) {
+ mrwanted = qpwanted * IRDMA_MIN_MR_PER_QP;
+ hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt =
+ min(hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt, mrwanted);
+ }
+ }
+
+ if (qpwanted < IRDMA_MIN_QP_CNT) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: ERROR: could not allocate fpm resources\n");
+ return -EINVAL;
+ }
+
+ irdma_set_host_hmc_rsrc_gen_3(dev);
+ ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "HMC: cfg_iw_fpm returned error_code[x%08X]\n",
+ readl(dev->hw_regs[IRDMA_CQPERRCODES]));
+
+ return ret_code;
+ }
+
+ return irdma_cfg_sd_mem(dev, hmc_info);
+}
+
+/**
* irdma_cfg_fpm_val - configure HMC objects
* @dev: sc device struct
* @qp_count: desired qp count
*/
int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
{
- struct irdma_virt_mem virt_mem;
- u32 i, mem_size;
u32 qpwanted, mrwanted, pblewanted;
- u32 powerof2, hte;
+ u32 powerof2, hte, i;
u32 sd_needed;
u32 sd_diff;
u32 loop_count = 0;
struct irdma_hmc_info *hmc_info;
struct irdma_hmc_fpm_misc *hmc_fpm_misc;
int ret_code = 0;
+ u32 max_sds;
hmc_info = dev->hmc_info;
hmc_fpm_misc = &dev->hmc_fpm_misc;
@@ -4814,14 +5882,16 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
return ret_code;
}
+ max_sds = hmc_fpm_misc->max_sds;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ return cfg_fpm_value_gen_3(dev, hmc_info, hmc_fpm_misc);
+
for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
sd_needed = irdma_est_sd(dev, hmc_info);
- ibdev_dbg(to_ibdev(dev),
- "HMC: FW max resources sd_needed[%08d] first_sd_index[%04d]\n",
- sd_needed, hmc_info->first_sd_index);
- ibdev_dbg(to_ibdev(dev), "HMC: sd count %d where max sd is %d\n",
- hmc_info->sd_table.sd_cnt, hmc_fpm_misc->max_sds);
+ ibdev_dbg(to_ibdev(dev), "HMC: sd count %u where max sd is %u\n",
+ hmc_info->sd_table.sd_cnt, max_sds);
qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
@@ -4835,21 +5905,21 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
ibdev_dbg(to_ibdev(dev),
- "HMC: req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n",
- qp_count, hmc_fpm_misc->max_sds,
+ "HMC: req_qp=%d max_sd=%u, max_qp = %u, max_cq=%u, max_mr=%u, max_pble=%u, mc=%d, av=%u\n",
+ qp_count, max_sds,
hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt,
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt);
+
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt;
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt;
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt =
hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt;
-
hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1;
while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt)
@@ -4860,7 +5930,7 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted;
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt =
min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt);
- hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */
+ hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt = 0; /* Reserved */
hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
@@ -4898,11 +5968,12 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (!(loop_count % 2) && qpwanted > 128) {
qpwanted /= 2;
} else {
- mrwanted /= 2;
pblewanted /= 2;
+ mrwanted /= 2;
}
continue;
}
+
if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF &&
pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) {
pblewanted -= 256 * FPM_MULTIPLIER * sd_diff;
@@ -4928,14 +5999,13 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
if (sd_needed > hmc_fpm_misc->max_sds) {
ibdev_dbg(to_ibdev(dev),
- "HMC: cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n",
+ "HMC: cfg_fpm failed loop_cnt=%u, sd_needed=%u, max sd count %u\n",
loop_count, sd_needed, hmc_info->sd_table.sd_cnt);
return -EINVAL;
}
- if (loop_count > 1 && sd_needed < hmc_fpm_misc->max_sds) {
- pblewanted += (hmc_fpm_misc->max_sds - sd_needed) * 256 *
- FPM_MULTIPLIER;
+ if (loop_count > 1 && sd_needed < max_sds) {
+ pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER;
hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted;
sd_needed = irdma_est_sd(dev, hmc_info);
}
@@ -4959,18 +6029,7 @@ int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
return ret_code;
}
- mem_size = sizeof(struct irdma_hmc_sd_entry) *
- (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
- virt_mem.size = mem_size;
- virt_mem.va = kzalloc(virt_mem.size, GFP_KERNEL);
- if (!virt_mem.va) {
- ibdev_dbg(to_ibdev(dev),
- "HMC: failed to allocate memory for sd_entry buffer\n");
- return -ENOMEM;
- }
- hmc_info->sd_table.sd_entry = virt_mem.va;
-
- return ret_code;
+ return irdma_cfg_sd_mem(dev, hmc_info);
}
/**
@@ -5242,6 +6301,22 @@ static int irdma_exec_cqp_cmd(struct irdma_sc_dev *dev,
&pcmdinfo->in.u.mc_modify.info,
pcmdinfo->in.u.mc_modify.scratch);
break;
+ case IRDMA_OP_SRQ_CREATE:
+ status = irdma_sc_srq_create(pcmdinfo->in.u.srq_create.srq,
+ pcmdinfo->in.u.srq_create.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_SRQ_MODIFY:
+ status = irdma_sc_srq_modify(pcmdinfo->in.u.srq_modify.srq,
+ &pcmdinfo->in.u.srq_modify.info,
+ pcmdinfo->in.u.srq_modify.scratch,
+ pcmdinfo->post_sq);
+ break;
+ case IRDMA_OP_SRQ_DESTROY:
+ status = irdma_sc_srq_destroy(pcmdinfo->in.u.srq_destroy.srq,
+ pcmdinfo->in.u.srq_destroy.scratch,
+ pcmdinfo->post_sq);
+ break;
default:
status = -EOPNOTSUPP;
break;
@@ -5314,14 +6389,26 @@ void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
*/
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
{
- struct irdma_gather_stats *gather_stats;
- struct irdma_gather_stats *last_gather_stats;
+ struct irdma_dev_hw_stats *hw_stats = &vsi->pestat->hw_stats;
+ struct irdma_gather_stats *gather_stats =
+ vsi->pestat->gather_info.gather_stats_va;
+ struct irdma_gather_stats *last_gather_stats =
+ vsi->pestat->gather_info.last_gather_stats_va;
+ const struct irdma_hw_stat_map *map = vsi->dev->hw_stats_map;
+ u16 max_stat_idx = vsi->dev->hw_attrs.max_stat_idx;
+ u16 i;
+
+ if (vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ for (i = 0; i < max_stat_idx; i++) {
+ u16 idx = map[i].byteoff / sizeof(u64);
- gather_stats = vsi->pestat->gather_info.gather_stats_va;
- last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
- irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
- last_gather_stats, vsi->dev->hw_stats_map,
- vsi->dev->hw_attrs.max_stat_idx);
+ hw_stats->stats_val[i] = gather_stats->val[idx];
+ }
+ return;
+ }
+
+ irdma_update_stats(hw_stats, gather_stats, last_gather_stats,
+ map, max_stat_idx);
}
/**
@@ -5356,6 +6443,9 @@ static inline void irdma_sc_init_hw(struct irdma_sc_dev *dev)
case IRDMA_GEN_2:
icrdma_init_hw(dev);
break;
+ case IRDMA_GEN_3:
+ ig3rdma_init_hw(dev);
+ break;
}
}
@@ -5381,10 +6471,15 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
dev->fpm_commit_buf = info->fpm_commit_buf;
dev->hw = info->hw;
dev->hw->hw_addr = info->bar0;
+ dev->protocol_used = info->protocol_used;
/* Setup the hardware limits, hmc may limit further */
dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID;
+ dev->hw_attrs.min_hw_srq_id = IRDMA_MIN_IW_SRQ_ID;
dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES;
- dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES_GEN_3;
+ else
+ dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES;
dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES;
dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES;
dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE;
@@ -5409,21 +6504,39 @@ int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT;
dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS;
- dev->hw_attrs.uk_attrs.hw_rev = ver;
+ if (!dev->privileged) {
+ ret_code = irdma_vchnl_req_get_hmc_fcn(dev);
+ if (ret_code) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get HMC function ret = %d\n",
+ ret_code);
+
+ return ret_code;
+ }
+ }
+
irdma_sc_init_hw(dev);
- if (irdma_wait_pe_ready(dev))
- return -ETIMEDOUT;
+ if (dev->privileged) {
+ if (irdma_wait_pe_ready(dev))
+ return -ETIMEDOUT;
- val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
- db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
- if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) {
- ibdev_dbg(to_ibdev(dev),
- "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
- val, db_size);
- return -ENODEV;
+ val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]);
+ db_size = (u8)FIELD_GET(IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE, val);
+ if (db_size != IRDMA_PE_DB_SIZE_4M &&
+ db_size != IRDMA_PE_DB_SIZE_8M) {
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n",
+ val, db_size);
+ return -ENODEV;
+ }
+ } else {
+ ret_code = irdma_vchnl_req_get_reg_layout(dev);
+ if (ret_code)
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get Register layout failed ret = %d\n",
+ ret_code);
}
- dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
return ret_code;
}
diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h
index 2cb4b96db721..983b22d7ae23 100644
--- a/drivers/infiniband/hw/irdma/defs.h
+++ b/drivers/infiniband/hw/irdma/defs.h
@@ -14,6 +14,18 @@
#define IRDMA_PE_DB_SIZE_4M 1
#define IRDMA_PE_DB_SIZE_8M 2
+#define IRDMA_IRD_HW_SIZE_4_GEN3 0
+#define IRDMA_IRD_HW_SIZE_8_GEN3 1
+#define IRDMA_IRD_HW_SIZE_16_GEN3 2
+#define IRDMA_IRD_HW_SIZE_32_GEN3 3
+#define IRDMA_IRD_HW_SIZE_64_GEN3 4
+#define IRDMA_IRD_HW_SIZE_128_GEN3 5
+#define IRDMA_IRD_HW_SIZE_256_GEN3 6
+#define IRDMA_IRD_HW_SIZE_512_GEN3 7
+#define IRDMA_IRD_HW_SIZE_1024_GEN3 8
+#define IRDMA_IRD_HW_SIZE_2048_GEN3 9
+#define IRDMA_IRD_HW_SIZE_4096_GEN3 10
+
#define IRDMA_IRD_HW_SIZE_4 0
#define IRDMA_IRD_HW_SIZE_16 1
#define IRDMA_IRD_HW_SIZE_64 2
@@ -114,6 +126,13 @@ enum irdma_protocol_used {
#define IRDMA_UPDATE_SD_BUFF_SIZE 128
#define IRDMA_FEATURE_BUF_SIZE (8 * IRDMA_MAX_FEATURES)
+#define ENABLE_LOC_MEM 63
+#define IRDMA_ATOMICS_ALLOWED_BIT 1
+#define MAX_PBLE_PER_SD 0x40000
+#define MAX_PBLE_SD_PER_FCN 0x400
+#define MAX_MR_PER_SD 0x8000
+#define MAX_MR_SD_PER_FCN 0x80
+#define IRDMA_PBLE_COMMIT_OFFSET 112
#define IRDMA_MAX_QUANTA_PER_WR 8
#define IRDMA_QP_SW_MAX_WQ_QUANTA 32768
@@ -121,6 +140,10 @@ enum irdma_protocol_used {
#define IRDMA_QP_SW_MAX_RQ_QUANTA 32768
#define IRDMA_MAX_QP_WRS(max_quanta_per_wr) \
((IRDMA_QP_SW_MAX_WQ_QUANTA - IRDMA_SQ_RSVD) / (max_quanta_per_wr))
+#define IRDMA_SRQ_MIN_QUANTA 8
+#define IRDMA_SRQ_MAX_QUANTA 262144
+#define IRDMA_MAX_SRQ_WRS \
+ ((IRDMA_SRQ_MAX_QUANTA - IRDMA_RQ_RSVD) / IRDMA_MAX_QUANTA_PER_WR)
#define IRDMAQP_TERM_SEND_TERM_AND_FIN 0
#define IRDMAQP_TERM_SEND_TERM_ONLY 1
@@ -147,8 +170,13 @@ enum irdma_protocol_used {
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
-#define IRDMA_FEATURE_RTS_AE 1ULL
-#define IRDMA_FEATURE_CQ_RESIZE 2ULL
+#define IRDMA_FEATURE_RTS_AE BIT_ULL(0)
+#define IRDMA_FEATURE_CQ_RESIZE BIT_ULL(1)
+#define IRDMA_FEATURE_64_BYTE_CQE BIT_ULL(5)
+#define IRDMA_FEATURE_ATOMIC_OPS BIT_ULL(6)
+#define IRDMA_FEATURE_SRQ BIT_ULL(7)
+#define IRDMA_FEATURE_CQE_TIMESTAMPING BIT_ULL(8)
+
#define IRDMAQP_OP_RDMA_WRITE 0x00
#define IRDMAQP_OP_RDMA_READ 0x01
#define IRDMAQP_OP_RDMA_SEND 0x03
@@ -161,6 +189,8 @@ enum irdma_protocol_used {
#define IRDMAQP_OP_RDMA_READ_LOC_INV 0x0b
#define IRDMAQP_OP_NOP 0x0c
#define IRDMAQP_OP_RDMA_WRITE_SOL 0x0d
+#define IRDMAQP_OP_ATOMIC_FETCH_ADD 0x0f
+#define IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD 0x11
#define IRDMAQP_OP_GEN_RTS_AE 0x30
enum irdma_cqp_op_type {
@@ -212,9 +242,12 @@ enum irdma_cqp_op_type {
IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46,
IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47,
IRDMA_OP_CQ_MODIFY = 48,
+ IRDMA_OP_SRQ_CREATE = 49,
+ IRDMA_OP_SRQ_MODIFY = 50,
+ IRDMA_OP_SRQ_DESTROY = 51,
/* Must be last entry*/
- IRDMA_MAX_CQP_OPS = 49,
+ IRDMA_MAX_CQP_OPS = 52,
};
/* CQP SQ WQES */
@@ -224,6 +257,9 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_CREATE_CQ 0x03
#define IRDMA_CQP_OP_MODIFY_CQ 0x04
#define IRDMA_CQP_OP_DESTROY_CQ 0x05
+#define IRDMA_CQP_OP_CREATE_SRQ 0x06
+#define IRDMA_CQP_OP_MODIFY_SRQ 0x07
+#define IRDMA_CQP_OP_DESTROY_SRQ 0x08
#define IRDMA_CQP_OP_ALLOC_STAG 0x09
#define IRDMA_CQP_OP_REG_MR 0x0a
#define IRDMA_CQP_OP_QUERY_STAG 0x0b
@@ -265,97 +301,6 @@ enum irdma_cqp_op_type {
#define IRDMA_CQP_OP_GATHER_STATS 0x2e
#define IRDMA_CQP_OP_UP_MAP 0x2f
-/* Async Events codes */
-#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
-#define IRDMA_AE_AMP_INVALID_STAG 0x0103
-#define IRDMA_AE_AMP_BAD_QP 0x0104
-#define IRDMA_AE_AMP_BAD_PD 0x0105
-#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
-#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
-#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
-#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
-#define IRDMA_AE_AMP_TO_WRAP 0x010a
-#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
-#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
-#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
-#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
-#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
-#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
-#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
-#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
-#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
-#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
-#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
-#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
-#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
-#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
-#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
-#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
-#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
-#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
-#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
-#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
-#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
-#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
-#define IRDMA_AE_BAD_CLOSE 0x0201
-#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
-#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
-#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
-#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
-#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
-#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
-#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
-#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
-#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
-#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
-#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
-#define IRDMA_AE_INVALID_REQUEST 0x0223
-#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
-#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
-#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
-#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
-#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
-#define IRDMA_AE_DDP_NO_L_BIT 0x0308
-#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
-#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
-#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
-#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
-#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
-#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
-#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
-#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
-#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
-#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
-#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
-#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
-#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
-#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
-#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
-#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
-#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
-#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
-#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
-#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
-#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
-#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
-#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
-#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
-#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
-#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
-#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
-#define IRDMA_AE_RESET_SENT 0x0601
-#define IRDMA_AE_TERMINATE_SENT 0x0602
-#define IRDMA_AE_RESET_NOT_SENT 0x0603
-#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
-#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
-#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
-
#define FLD_LS_64(dev, val, field) \
(((u64)(val) << (dev)->hw_shifts[field ## _S]) & (dev)->hw_masks[field ## _M])
#define FLD_RS_64(dev, val, field) \
@@ -393,9 +338,13 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STATS_USE_INST BIT_ULL(61)
#define IRDMA_CQPSQ_STATS_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_STATS_INST_INDEX GENMASK_ULL(6, 0)
-#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_STATS_HMC_FCN_INDEX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_WS_WQEVALID BIT_ULL(63)
-#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(53, 52)
+#define IRDMA_CQPSQ_WS_NODEOP GENMASK_ULL(55, 52)
+#define IRDMA_SD_MAX GENMASK_ULL(15, 0)
+#define IRDMA_MEM_MAX GENMASK_ULL(15, 0)
+#define IRDMA_QP_MEM_LOC GENMASK_ULL(47, 44)
+#define IRDMA_MR_MEM_LOC GENMASK_ULL(27, 24)
#define IRDMA_CQPSQ_WS_ENABLENODE BIT_ULL(62)
#define IRDMA_CQPSQ_WS_NODETYPE BIT_ULL(61)
@@ -404,16 +353,16 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_WS_VMVFTYPE GENMASK_ULL(55, 54)
#define IRDMA_CQPSQ_WS_VMVFNUM GENMASK_ULL(51, 42)
#define IRDMA_CQPSQ_WS_OP GENMASK_ULL(37, 32)
-#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(25, 16)
-#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(9, 0)
-#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(57, 48)
+#define IRDMA_CQPSQ_WS_PARENTID GENMASK_ULL(29, 16)
+#define IRDMA_CQPSQ_WS_NODEID GENMASK_ULL(13, 0)
+#define IRDMA_CQPSQ_WS_VSI GENMASK_ULL(63, 48)
#define IRDMA_CQPSQ_WS_WEIGHT GENMASK_ULL(38, 32)
#define IRDMA_CQPSQ_UP_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_UP_USEVLAN BIT_ULL(62)
#define IRDMA_CQPSQ_UP_USEOVERRIDE BIT_ULL(61)
#define IRDMA_CQPSQ_UP_OP GENMASK_ULL(37, 32)
-#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_UP_HMCFCNIDX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_UP_CNPOVERRIDE GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN GENMASK_ULL(31, 0)
@@ -448,6 +397,16 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPHC_SVER GENMASK_ULL(31, 24)
#define IRDMA_CQPHC_SQBASE GENMASK_ULL(63, 9)
+#define IRDMA_CQPHC_TIMESTAMP_OVERRIDE BIT_ULL(5)
+#define IRDMA_CQPHC_TS_SHIFT GENMASK_ULL(12, 8)
+#define IRDMA_CQPHC_EN_FINE_GRAINED_TIMERS BIT_ULL(0)
+
+#define IRDMA_CQPHC_OOISC_BLKSIZE GENMASK_ULL(63, 60)
+#define IRDMA_CQPHC_RRSP_BLKSIZE GENMASK_ULL(59, 56)
+#define IRDMA_CQPHC_Q1_BLKSIZE GENMASK_ULL(55, 52)
+#define IRDMA_CQPHC_XMIT_BLKSIZE GENMASK_ULL(51, 48)
+#define IRDMA_CQPHC_BLKSIZES_VALID BIT_ULL(4)
+
#define IRDMA_CQPHC_QPCTX GENMASK_ULL(63, 0)
#define IRDMA_QP_DBSA_HW_SQ_TAIL GENMASK_ULL(14, 0)
#define IRDMA_CQ_DBSA_CQEIDX GENMASK_ULL(19, 0)
@@ -461,6 +420,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CCQ_OPRETVAL GENMASK_ULL(31, 0)
+#define IRDMA_CCQ_DEFINFO GENMASK_ULL(63, 32)
+
#define IRDMA_CQ_MINERR GENMASK_ULL(15, 0)
#define IRDMA_CQ_MAJERR GENMASK_ULL(31, 16)
#define IRDMA_CQ_WQEIDX GENMASK_ULL(46, 32)
@@ -469,6 +430,7 @@ enum irdma_cqp_op_type {
#define IRDMA_CQ_ERROR BIT_ULL(55)
#define IRDMA_CQ_SQ BIT_ULL(62)
+#define IRDMA_CQ_SRQ BIT_ULL(52)
#define IRDMA_CQ_VALID BIT_ULL(63)
#define IRDMA_CQ_IMMVALID BIT_ULL(62)
#define IRDMA_CQ_UDSMACVALID BIT_ULL(61)
@@ -476,8 +438,6 @@ enum irdma_cqp_op_type {
#define IRDMA_CQ_UDSMAC GENMASK_ULL(47, 0)
#define IRDMA_CQ_UDVLAN GENMASK_ULL(63, 48)
-#define IRDMA_CQ_IMMDATA_S 0
-#define IRDMA_CQ_IMMDATA_M (0xffffffffffffffffULL << IRDMA_CQ_IMMVALID_S)
#define IRDMA_CQ_IMMDATALOW32 GENMASK_ULL(31, 0)
#define IRDMA_CQ_IMMDATAUP32 GENMASK_ULL(63, 32)
#define IRDMACQ_PAYLDLEN GENMASK_ULL(31, 0)
@@ -508,6 +468,17 @@ enum irdma_cqp_op_type {
#define IRDMA_AEQE_Q2DATA GENMASK_ULL(62, 61)
#define IRDMA_AEQE_VALID BIT_ULL(63)
+#define IRDMA_AEQE_Q2DATA_GEN_3 GENMASK_ULL(5, 4)
+#define IRDMA_AEQE_TCPSTATE_GEN_3 GENMASK_ULL(3, 0)
+#define IRDMA_AEQE_QPCQID_GEN_3 GENMASK_ULL(24, 0)
+#define IRDMA_AEQE_AECODE_GEN_3 GENMASK_ULL(61, 50)
+#define IRDMA_AEQE_OVERFLOW_GEN_3 BIT_ULL(62)
+#define IRDMA_AEQE_WQDESCIDX_GEN_3 GENMASK_ULL(49, 32)
+#define IRDMA_AEQE_IWSTATE_GEN_3 GENMASK_ULL(31, 29)
+#define IRDMA_AEQE_AESRC_GEN_3 GENMASK_ULL(28, 25)
+#define IRDMA_AEQE_CMPL_CTXT_S 6
+#define IRDMA_AEQE_CMPL_CTXT GENMASK_ULL(63, 6)
+
#define IRDMA_UDA_QPSQ_NEXT_HDR GENMASK_ULL(23, 16)
#define IRDMA_UDA_QPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_QPSQ_L4LEN GENMASK_ULL(45, 42)
@@ -530,11 +501,14 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_WQEVALID BIT_ULL(63)
#define IRDMA_CQPSQ_TPHVAL GENMASK_ULL(7, 0)
-#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(17, 8)
+#define IRDMA_CQPSQ_VSIIDX GENMASK_ULL(23, 8)
#define IRDMA_CQPSQ_TPHEN BIT_ULL(60)
#define IRDMA_CQPSQ_PBUFADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_PASID GENMASK_ULL(51, 32)
+#define IRDMA_CQPSQ_PASID_VALID BIT_ULL(62)
+
/* Create/Modify/Destroy QP */
#define IRDMA_CQPSQ_QP_NEWMSS GENMASK_ULL(45, 32)
@@ -566,10 +540,30 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_QP_DBSHADOWADDR IRDMA_CQPHC_QPCTX
+#define IRDMA_CQPSQ_SRQ_RQSIZE GENMASK_ULL(3, 0)
+#define IRDMA_CQPSQ_SRQ_RQ_WQE_SIZE GENMASK_ULL(5, 4)
+#define IRDMA_CQPSQ_SRQ_SRQ_LIMIT GENMASK_ULL(43, 32)
+#define IRDMA_CQPSQ_SRQ_SRQCTX GENMASK_ULL(63, 6)
+#define IRDMA_CQPSQ_SRQ_PD_ID GENMASK_ULL(39, 16)
+#define IRDMA_CQPSQ_SRQ_SRQ_ID GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_SRQ_OP GENMASK_ULL(37, 32)
+#define IRDMA_CQPSQ_SRQ_LEAF_PBL_SIZE GENMASK_ULL(45, 44)
+#define IRDMA_CQPSQ_SRQ_VIRTMAP BIT_ULL(47)
+#define IRDMA_CQPSQ_SRQ_TPH_EN BIT_ULL(60)
+#define IRDMA_CQPSQ_SRQ_ARM_LIMIT_EVENT BIT_ULL(61)
+#define IRDMA_CQPSQ_SRQ_FIRST_PM_PBL_IDX GENMASK_ULL(27, 0)
+#define IRDMA_CQPSQ_SRQ_TPH_VALUE GENMASK_ULL(7, 0)
+#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR_S 8
+#define IRDMA_CQPSQ_SRQ_PHYSICAL_BUFFER_ADDR GENMASK_ULL(63, 8)
+#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR_S 6
+#define IRDMA_CQPSQ_SRQ_DB_SHADOW_ADDR GENMASK_ULL(63, 6)
+
#define IRDMA_CQPSQ_CQ_CQSIZE GENMASK_ULL(20, 0)
#define IRDMA_CQPSQ_CQ_CQCTX GENMASK_ULL(62, 0)
#define IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD GENMASK(17, 0)
+#define IRDMA_CQPSQ_CQ_CQID_HIGH GENMASK_ULL(52, 50)
+#define IRDMA_CQPSQ_CQ_CEQID_HIGH GENMASK_ULL(59, 54)
#define IRDMA_CQPSQ_CQ_OP GENMASK_ULL(37, 32)
#define IRDMA_CQPSQ_CQ_CQRESIZE BIT_ULL(43)
#define IRDMA_CQPSQ_CQ_LPBLSIZE GENMASK_ULL(45, 44)
@@ -590,6 +584,7 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STAG_MR BIT_ULL(43)
#define IRDMA_CQPSQ_STAG_MWTYPE BIT_ULL(42)
#define IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY BIT_ULL(58)
+#define IRDMA_CQPSQ_STAG_PDID_HI GENMASK_ULL(59, 54)
#define IRDMA_CQPSQ_STAG_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_STAG_HPAGESIZE GENMASK_ULL(47, 46)
@@ -600,7 +595,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_STAG_USEPFRID BIT_ULL(61)
#define IRDMA_CQPSQ_STAG_PBA IRDMA_CQPHC_QPCTX
-#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_STAG_HMCFNIDX GENMASK_ULL(15, 0)
+#define IRDMA_CQPSQ_STAG_REMOTE_ATOMIC_EN BIT_ULL(61)
#define IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
#define IRDMA_CQPSQ_QUERYSTAG_IDX IRDMA_CQPSQ_STAG_IDX
@@ -628,11 +624,8 @@ enum irdma_cqp_op_type {
/* Manage Push Page - MPP */
#define IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1 0xffff
#define IRDMA_INVALID_PUSH_PAGE_INDEX 0xffffffff
-
-#define IRDMA_CQPSQ_MPP_QS_HANDLE GENMASK_ULL(9, 0)
-#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_MPP_PPIDX GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_MPP_PPTYPE GENMASK_ULL(61, 60)
-
#define IRDMA_CQPSQ_MPP_FREE_PAGE BIT_ULL(62)
/* Upload Context - UCTX */
@@ -651,6 +644,8 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_CEQ_CEQSIZE GENMASK_ULL(21, 0)
#define IRDMA_CQPSQ_CEQ_CEQID GENMASK_ULL(9, 0)
+#define IRDMA_CQPSQ_CEQ_CEQID_HIGH GENMASK_ULL(15, 10)
+
#define IRDMA_CQPSQ_CEQ_LPBLSIZE IRDMA_CQPSQ_CQ_LPBLSIZE
#define IRDMA_CQPSQ_CEQ_VMAP BIT_ULL(47)
#define IRDMA_CQPSQ_CEQ_ITRNOEXPIRE BIT_ULL(46)
@@ -660,10 +655,10 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_AEQ_VMAP BIT_ULL(47)
#define IRDMA_CQPSQ_AEQ_FIRSTPMPBLIDX GENMASK_ULL(27, 0)
-#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(18, 0)
-
+#define IRDMA_COMMIT_FPM_QPCNT GENMASK_ULL(20, 0)
#define IRDMA_COMMIT_FPM_BASE_S 32
-#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(5, 0)
+#define IRDMA_CQPSQ_CFPM_HMCFNID GENMASK_ULL(15, 0)
+
#define IRDMA_CQPSQ_FWQE_AECODE GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_FWQE_AESOURCE GENMASK_ULL(19, 16)
#define IRDMA_CQPSQ_FWQE_RQMNERR GENMASK_ULL(15, 0)
@@ -675,6 +670,10 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_FWQE_USERFLCODE BIT_ULL(60)
#define IRDMA_CQPSQ_FWQE_FLUSHSQ BIT_ULL(61)
#define IRDMA_CQPSQ_FWQE_FLUSHRQ BIT_ULL(62)
+#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX_VALID BIT_ULL(42)
+#define IRDMA_CQPSQ_FWQE_ERR_SQ_IDX GENMASK_ULL(49, 32)
+#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX_VALID BIT_ULL(43)
+#define IRDMA_CQPSQ_FWQE_ERR_RQ_IDX GENMASK_ULL(46, 32)
#define IRDMA_CQPSQ_MAPT_PORT GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_MAPT_ADDPORT BIT_ULL(62)
#define IRDMA_CQPSQ_UPESD_SDCMD GENMASK_ULL(31, 0)
@@ -693,9 +692,12 @@ enum irdma_cqp_op_type {
#define IRDMA_CQPSQ_SUSPENDQP_QPID GENMASK_ULL(23, 0)
#define IRDMA_CQPSQ_RESUMEQP_QSHANDLE GENMASK_ULL(31, 0)
#define IRDMA_CQPSQ_RESUMEQP_QPID GENMASK(23, 0)
+#define IRDMA_MANAGE_RSRC_VER2 BIT_ULL(2)
#define IRDMA_CQPSQ_MIN_STAG_INVALID 0x0001
#define IRDMA_CQPSQ_MIN_SUSPEND_PND 0x0005
+#define IRDMA_CQPSQ_MIN_DEF_CMPL 0x0006
+#define IRDMA_CQPSQ_MIN_OOO_CMPL 0x0007
#define IRDMA_CQPSQ_MAJ_NO_ERROR 0x0000
#define IRDMA_CQPSQ_MAJ_OBJCACHE_ERROR 0xF000
@@ -712,6 +714,11 @@ enum irdma_cqp_op_type {
#define IRDMAQPC_INSERTL2TAG2 BIT_ULL(11)
#define IRDMAQPC_LIMIT GENMASK_ULL(13, 12)
+#define IRDMAQPC_USE_SRQ BIT_ULL(10)
+#define IRDMAQPC_SRQ_ID GENMASK_ULL(15, 0)
+#define IRDMAQPC_PASID GENMASK_ULL(19, 0)
+#define IRDMAQPC_PASID_VALID BIT_ULL(11)
+
#define IRDMAQPC_ECN_EN BIT_ULL(14)
#define IRDMAQPC_DROPOOOSEG BIT_ULL(15)
#define IRDMAQPC_DUPACK_THRESH GENMASK_ULL(18, 16)
@@ -782,21 +789,31 @@ enum irdma_cqp_op_type {
#define IRDMAQPC_CWNDROCE GENMASK_ULL(55, 32)
#define IRDMAQPC_SNDWL1 GENMASK_ULL(31, 0)
#define IRDMAQPC_SNDWL2 GENMASK_ULL(63, 32)
-#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(45, 32)
+#define IRDMAQPC_MINRNR_TIMER GENMASK_ULL(4, 0)
+#define IRDMAQPC_ERR_RQ_IDX GENMASK_ULL(46, 32)
#define IRDMAQPC_RTOMIN GENMASK_ULL(63, 57)
#define IRDMAQPC_MAXSNDWND GENMASK_ULL(31, 0)
#define IRDMAQPC_REXMIT_THRESH GENMASK_ULL(53, 48)
#define IRDMAQPC_RNRNAK_THRESH GENMASK_ULL(56, 54)
-#define IRDMAQPC_TXCQNUM GENMASK_ULL(18, 0)
-#define IRDMAQPC_RXCQNUM GENMASK_ULL(50, 32)
+#define IRDMAQPC_TXCQNUM GENMASK_ULL(24, 0)
+#define IRDMAQPC_RXCQNUM GENMASK_ULL(56, 32)
#define IRDMAQPC_STAT_INDEX GENMASK_ULL(6, 0)
#define IRDMAQPC_Q2ADDR GENMASK_ULL(63, 8)
#define IRDMAQPC_LASTBYTESENT GENMASK_ULL(7, 0)
#define IRDMAQPC_MACADDRESS GENMASK_ULL(63, 16)
#define IRDMAQPC_ORDSIZE GENMASK_ULL(7, 0)
+#define IRDMAQPC_LOCALACKTIMEOUT GENMASK_ULL(12, 8)
+#define IRDMAQPC_RNRNAK_TMR GENMASK_ULL(4, 0)
+#define IRDMAQPC_ORDSIZE_GEN3 GENMASK_ULL(10, 0)
+#define IRDMAQPC_REMOTE_ATOMIC_EN BIT_ULL(18)
+#define IRDMAQPC_STAT_INDEX_GEN3 GENMASK_ULL(47, 32)
+#define IRDMAQPC_PKT_LIMIT GENMASK_ULL(55, 48)
+
#define IRDMAQPC_IRDSIZE GENMASK_ULL(18, 16)
+#define IRDMAQPC_IRDSIZE_GEN3 GENMASK_ULL(17, 14)
+
#define IRDMAQPC_UDPRIVCQENABLE BIT_ULL(19)
#define IRDMAQPC_WRRDRSPOK BIT_ULL(20)
#define IRDMAQPC_RDOK BIT_ULL(21)
@@ -833,6 +850,7 @@ enum irdma_cqp_op_type {
#define IRDMA_FEATURE_INFO GENMASK_ULL(47, 0)
#define IRDMA_FEATURE_CNT GENMASK_ULL(47, 32)
#define IRDMA_FEATURE_TYPE GENMASK_ULL(63, 48)
+#define IRDMA_FEATURE_RSRC_MAX GENMASK_ULL(31, 0)
#define IRDMAQPSQ_OPCODE GENMASK_ULL(37, 32)
#define IRDMAQPSQ_COPY_HOST_PBL BIT_ULL(43)
@@ -856,7 +874,7 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_REMSTAGINV GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQKEY GENMASK_ULL(31, 0)
#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
-#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
+#define IRDMAQPSQ_AHID GENMASK_ULL(24, 0)
#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
#define IRDMA_INLINE_VALID_S 7
@@ -869,6 +887,9 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_REMTO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_STAG GENMASK_ULL(31, 0)
+#define IRDMAQPSQ_REMOTE_STAG GENMASK_ULL(31, 0)
+
#define IRDMAQPSQ_STAGRIGHTS GENMASK_ULL(52, 48)
#define IRDMAQPSQ_VABASEDTO BIT_ULL(53)
#define IRDMAQPSQ_MEMWINDOWTYPE BIT_ULL(54)
@@ -879,6 +900,8 @@ enum irdma_cqp_op_type {
#define IRDMAQPSQ_BASEVA_TO_FBO IRDMA_CQPHC_QPCTX
+#define IRDMAQPSQ_REMOTE_ATOMICS_EN BIT_ULL(55)
+
#define IRDMAQPSQ_LOCSTAG GENMASK_ULL(31, 0)
#define IRDMAQPSQ_STAGKEY GENMASK_ULL(7, 0)
@@ -903,11 +926,14 @@ enum irdma_cqp_op_type {
#define IRDMAPFINT_OICR_PE_PUSH_M BIT(27)
#define IRDMAPFINT_OICR_PE_CRITERR_M BIT(28)
-#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(18, 0)
-#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(19, 0)
+#define IRDMA_QUERY_FPM_LOC_MEM_PAGES GENMASK_ULL(63, 32)
+#define IRDMA_QUERY_FPM_MAX_QPS GENMASK_ULL(31, 0)
+#define IRDMA_QUERY_FPM_MAX_CQS GENMASK_ULL(31, 0)
#define IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX GENMASK_ULL(13, 0)
-#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(45, 32)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS GENMASK_ULL(44, 32)
+#define IRDMA_QUERY_FPM_MAX_PE_SDS_GEN3 GENMASK_ULL(47, 32)
#define IRDMA_QUERY_FPM_MAX_CEQS GENMASK_ULL(9, 0)
+#define IRDMA_QUERY_FPM_MAX_IRD GENMASK_ULL(53, 50)
#define IRDMA_QUERY_FPM_XFBLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_Q1BLOCKSIZE GENMASK_ULL(63, 32)
#define IRDMA_QUERY_FPM_HTMULTIPLIER GENMASK_ULL(19, 16)
@@ -1103,7 +1129,7 @@ enum irdma_alignment {
IRDMA_CEQ_ALIGNMENT = 0x100,
IRDMA_CQ0_ALIGNMENT = 0x100,
IRDMA_SD_BUF_ALIGNMENT = 0x80,
- IRDMA_FEATURE_BUF_ALIGNMENT = 0x8,
+ IRDMA_FEATURE_BUF_ALIGNMENT = 0x10,
};
enum icrdma_protocol_used {
diff --git a/drivers/infiniband/hw/irdma/hmc.c b/drivers/infiniband/hw/irdma/hmc.c
index ac58088a8e41..da18add141da 100644
--- a/drivers/infiniband/hw/irdma/hmc.c
+++ b/drivers/infiniband/hw/irdma/hmc.c
@@ -5,6 +5,7 @@
#include "defs.h"
#include "type.h"
#include "protos.h"
+#include "virtchnl.h"
/**
* irdma_find_sd_index_limit - finds segment descriptor index limit
@@ -228,6 +229,10 @@ int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
bool pd_error = false;
int ret_code = 0;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3 &&
+ dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
+ return 0;
+
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
return -EINVAL;
@@ -330,7 +335,7 @@ static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
u32 i, sd_idx;
struct irdma_dma_mem *mem;
- if (!reset)
+ if (dev->privileged && !reset)
ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
info->hmc_info->sd_indexes[0],
info->del_sd_cnt, false);
@@ -376,6 +381,9 @@ int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
u32 i, j;
int ret_code = 0;
+ if (dev->hmc_info->hmc_obj[info->rsrc_type].mem_loc == IRDMA_LOC_MEM)
+ return 0;
+
if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
ibdev_dbg(to_ibdev(dev),
"HMC: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
@@ -589,7 +597,10 @@ int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
pd_entry->sd_index = sd_idx;
pd_entry->valid = true;
pd_table->use_cnt++;
- irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
+
+ if (hmc_info->hmc_fn_id < dev->hw_attrs.first_hw_vf_fpm_id &&
+ dev->privileged)
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
}
pd_entry->bp.use_cnt++;
@@ -640,7 +651,8 @@ int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
pd_addr = pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
- irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
+ if (dev->privileged && dev->hmc_fn_id == hmc_info->hmc_fn_id)
+ irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
if (!pd_entry->rsrc_pg) {
mem = &pd_entry->bp.addr;
diff --git a/drivers/infiniband/hw/irdma/hmc.h b/drivers/infiniband/hw/irdma/hmc.h
index 415f9e23bbf6..257a5d22aa96 100644
--- a/drivers/infiniband/hw/irdma/hmc.h
+++ b/drivers/infiniband/hw/irdma/hmc.h
@@ -16,11 +16,21 @@
#define IRDMA_HMC_PD_BP_BUF_ALIGNMENT 4096
#define IRDMA_FIRST_VF_FPM_ID 8
#define FPM_MULTIPLIER 1024
+#define IRDMA_OBJ_LOC_MEM_BIT 0x4
+#define IRDMA_XF_MULTIPLIER 16
+#define IRDMA_RRF_MULTIPLIER 8
+#define IRDMA_MIN_PBLE_PAGES 3
+#define IRDMA_HMC_PAGE_SIZE 2097152
+#define IRDMA_MIN_MR_PER_QP 4
+#define IRDMA_MIN_QP_CNT 64
+#define IRDMA_FSIAV_CNT_MAX 1048576
+#define IRDMA_MIN_IRD 8
+#define IRDMA_HMC_MIN_RRF 16
enum irdma_hmc_rsrc_type {
IRDMA_HMC_IW_QP = 0,
IRDMA_HMC_IW_CQ = 1,
- IRDMA_HMC_IW_RESERVED = 2,
+ IRDMA_HMC_IW_SRQ = 2,
IRDMA_HMC_IW_HTE = 3,
IRDMA_HMC_IW_ARP = 4,
IRDMA_HMC_IW_APBVT_ENTRY = 5,
@@ -48,11 +58,17 @@ enum irdma_sd_entry_type {
IRDMA_SD_TYPE_DIRECT = 2,
};
+enum irdma_hmc_obj_mem {
+ IRDMA_HOST_MEM = 0,
+ IRDMA_LOC_MEM = 1,
+};
+
struct irdma_hmc_obj_info {
u64 base;
u32 max_cnt;
u32 cnt;
u64 size;
+ enum irdma_hmc_obj_mem mem_loc;
};
struct irdma_hmc_bp {
@@ -117,6 +133,7 @@ struct irdma_update_sds_info {
struct irdma_ccq_cqe_info;
struct irdma_hmc_fcn_info {
u32 vf_id;
+ u8 protocol_used;
u8 free_fcn;
};
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 69ce1862eabe..d1fc5726b979 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -33,6 +33,7 @@ static struct irdma_rsrc_limits rsrc_limits_table[] = {
static enum irdma_hmc_rsrc_type iw_hmc_obj_types[] = {
IRDMA_HMC_IW_QP,
IRDMA_HMC_IW_CQ,
+ IRDMA_HMC_IW_SRQ,
IRDMA_HMC_IW_HTE,
IRDMA_HMC_IW_ARP,
IRDMA_HMC_IW_APBVT_ENTRY,
@@ -134,75 +135,68 @@ static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq)
static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
struct irdma_aeqe_info *info)
{
+ struct qp_err_code qp_err;
+
qp->sq_flush_code = info->sq;
qp->rq_flush_code = info->rq;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
-
- switch (info->ae_id) {
- case IRDMA_AE_AMP_BOUNDS_VIOLATION:
- case IRDMA_AE_AMP_INVALID_STAG:
- case IRDMA_AE_AMP_RIGHTS_VIOLATION:
- case IRDMA_AE_AMP_UNALLOCATED_STAG:
- case IRDMA_AE_AMP_BAD_PD:
- case IRDMA_AE_AMP_BAD_QP:
- case IRDMA_AE_AMP_BAD_STAG_KEY:
- case IRDMA_AE_AMP_BAD_STAG_INDEX:
- case IRDMA_AE_AMP_TO_WRAP:
- case IRDMA_AE_PRIV_OPERATION_DENIED:
- qp->flush_code = FLUSH_PROT_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_UDA_XMIT_BAD_PD:
- case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
- qp->flush_code = FLUSH_LOC_QP_OP_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case IRDMA_AE_UDA_L4LEN_INVALID:
- case IRDMA_AE_DDP_UBE_INVALID_MO:
- case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- qp->flush_code = FLUSH_LOC_LEN_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
- qp->flush_code = FLUSH_REM_ACCESS_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
- case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
- case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
- qp->flush_code = FLUSH_REM_OP_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_LCE_QP_CATASTROPHIC:
- qp->flush_code = FLUSH_FATAL_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_IB_RREQ_AND_Q1_FULL:
- qp->flush_code = FLUSH_GENERAL_ERR;
- break;
- case IRDMA_AE_LLP_TOO_MANY_RETRIES:
- qp->flush_code = FLUSH_RETRY_EXC_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
- case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
- case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
- case IRDMA_AE_AMP_MWBIND_VALID_STAG:
- qp->flush_code = FLUSH_MW_BIND_ERR;
- qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_IB_INVALID_REQUEST:
- qp->flush_code = FLUSH_REM_INV_REQ_ERR;
- qp->event_type = IRDMA_QP_EVENT_REQ_ERR;
- break;
- default:
- qp->flush_code = FLUSH_GENERAL_ERR;
- qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
+ if (qp->qp_uk.uk_attrs->hw_rev >= IRDMA_GEN_3) {
+ if (info->sq) {
+ qp->err_sq_idx_valid = true;
+ qp->err_sq_idx = info->wqe_idx;
+ }
+ if (info->rq) {
+ qp->err_rq_idx_valid = true;
+ qp->err_rq_idx = info->wqe_idx;
+ }
+ }
+
+ qp_err = irdma_ae_to_qp_err_code(info->ae_id);
+ qp->flush_code = qp_err.flush_code;
+ qp->event_type = qp_err.event_type;
+}
+
+/**
+ * irdma_complete_cqp_request - perform post-completion cleanup
+ * @cqp: device CQP
+ * @cqp_request: CQP request
+ *
+ * Mark CQP request as done, wake up waiting thread or invoke
+ * callback function and release/free CQP request.
+ */
+static void irdma_complete_cqp_request(struct irdma_cqp *cqp,
+ struct irdma_cqp_request *cqp_request)
+{
+ if (cqp_request->waiting) {
+ WRITE_ONCE(cqp_request->request_done, true);
+ wake_up(&cqp_request->waitq);
+ } else if (cqp_request->callback_fcn) {
+ cqp_request->callback_fcn(cqp_request);
+ }
+ irdma_put_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * irdma_process_ae_def_cmpl - handle IRDMA_AE_CQP_DEFERRED_COMPLETE event
+ * @rf: RDMA PCI function
+ * @info: AEQ entry info
+ */
+static void irdma_process_ae_def_cmpl(struct irdma_pci_f *rf,
+ struct irdma_aeqe_info *info)
+{
+ u32 sw_def_info;
+ u64 scratch;
+
+ irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
+
+ irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, true,
+ &scratch, &sw_def_info);
+ while (scratch) {
+ struct irdma_cqp_request *cqp_request =
+ (struct irdma_cqp_request *)(uintptr_t)scratch;
+
+ irdma_complete_cqp_request(&rf->cqp, cqp_request);
+ irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, false,
+ &scratch, &sw_def_info);
}
}
@@ -223,6 +217,7 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
struct irdma_sc_qp *qp = NULL;
struct irdma_qp_host_ctx_info *ctx_info = NULL;
struct irdma_device *iwdev = rf->iwdev;
+ struct irdma_sc_srq *srq;
unsigned long flags;
u32 aeqcnt = 0;
@@ -236,6 +231,13 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
if (ret)
break;
+ if (info->aeqe_overflow) {
+ ibdev_err(&iwdev->ibdev, "AEQ has overflowed\n");
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ return;
+ }
+
aeqcnt++;
ibdev_dbg(&iwdev->ibdev,
"AEQ: ae_id = 0x%x bool qp=%d qp_id = %d tcp_state=%d iwarp_state=%d ae_src=%d\n",
@@ -266,9 +268,12 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
if (info->ae_id != IRDMA_AE_QP_SUSPEND_COMPLETE)
iwqp->last_aeq = info->ae_id;
spin_unlock_irqrestore(&iwqp->lock, flags);
- ctx_info = &iwqp->ctx_info;
+ } else if (info->srq) {
+ if (info->ae_id != IRDMA_AE_SRQ_LIMIT)
+ continue;
} else {
- if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR)
+ if (info->ae_id != IRDMA_AE_CQ_OPERATION_ERROR &&
+ info->ae_id != IRDMA_AE_CQP_DEFERRED_COMPLETE)
continue;
}
@@ -363,6 +368,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
}
irdma_cq_rem_ref(&iwcq->ibcq);
break;
+ case IRDMA_AE_SRQ_LIMIT:
+ srq = (struct irdma_sc_srq *)(uintptr_t)info->compl_ctx;
+ irdma_srq_event(srq);
+ break;
+ case IRDMA_AE_SRQ_CATASTROPHIC_ERROR:
+ break;
+ case IRDMA_AE_CQP_DEFERRED_COMPLETE:
+ /* Remove completed CQP requests from pending list
+ * and notify about those CQP ops completion.
+ */
+ irdma_process_ae_def_cmpl(rf, info);
+ break;
case IRDMA_AE_RESET_NOT_SENT:
case IRDMA_AE_LLP_DOUBT_REACHABILITY:
case IRDMA_AE_RESOURCE_EXHAUSTION:
@@ -389,13 +406,18 @@ static void irdma_process_aeq(struct irdma_pci_f *rf)
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_LCE_CQ_CATASTROPHIC:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
ibdev_err(&iwdev->ibdev, "abnormal ae_id = 0x%x bool qp=%d qp_id = %d, ae_src=%d\n",
info->ae_id, info->qp, info->qp_cq_id, info->ae_src);
- if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- ctx_info->roce_info->err_rq_idx_valid = info->rq;
- if (info->rq) {
+ ctx_info = &iwqp->ctx_info;
+ if (rdma_protocol_roce(&iwqp->iwdev->ibdev, 1)) {
+ ctx_info->roce_info->err_rq_idx_valid =
+ ctx_info->srq_valid ? false : info->err_rq_idx_valid;
+ if (ctx_info->roce_info->err_rq_idx_valid) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
ctx_info);
@@ -599,6 +621,8 @@ static void irdma_destroy_cqp(struct irdma_pci_f *rf)
dma_free_coherent(dev->hw->device, cqp->sq.size, cqp->sq.va,
cqp->sq.pa);
cqp->sq.va = NULL;
+ kfree(cqp->oop_op_array);
+ cqp->oop_op_array = NULL;
kfree(cqp->scratch_array);
cqp->scratch_array = NULL;
kfree(cqp->cqp_requests);
@@ -631,7 +655,9 @@ static void irdma_destroy_aeq(struct irdma_pci_f *rf)
int status = -EBUSY;
if (!rf->msix_shared) {
- rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, rf->iw_msixtbl->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev,
+ rf->iw_msixtbl->idx, false);
irdma_destroy_irq(rf, rf->iw_msixtbl, rf);
}
if (rf->reset)
@@ -697,9 +723,10 @@ static void irdma_del_ceq_0(struct irdma_pci_f *rf)
if (rf->msix_shared) {
msix_vec = &rf->iw_msixtbl[0];
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
- msix_vec->ceq_id,
- msix_vec->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
irdma_destroy_irq(rf, msix_vec, rf);
} else {
msix_vec = &rf->iw_msixtbl[1];
@@ -730,8 +757,10 @@ static void irdma_del_ceqs(struct irdma_pci_f *rf)
msix_vec = &rf->iw_msixtbl[2];
for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) {
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, msix_vec->ceq_id,
- msix_vec->idx, false);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev,
+ msix_vec->ceq_id,
+ msix_vec->idx, false);
irdma_destroy_irq(rf, msix_vec, iwceq);
irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq,
IRDMA_OP_CEQ_DESTROY);
@@ -942,6 +971,13 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
goto err_scratch;
}
+ cqp->oop_op_array = kcalloc(sqsize, sizeof(*cqp->oop_op_array),
+ GFP_KERNEL);
+ if (!cqp->oop_op_array) {
+ status = -ENOMEM;
+ goto err_oop;
+ }
+ cqp_init_info.ooo_op_array = cqp->oop_op_array;
dev->cqp = &cqp->sc_cqp;
dev->cqp->dev = dev;
cqp->sq.size = ALIGN(sizeof(struct irdma_cqp_sq_wqe) * sqsize,
@@ -978,6 +1014,10 @@ static int irdma_create_cqp(struct irdma_pci_f *rf)
case IRDMA_GEN_2:
cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_2;
break;
+ case IRDMA_GEN_3:
+ cqp_init_info.hw_maj_ver = IRDMA_CQPHC_HW_MAJVER_GEN_3;
+ cqp_init_info.ts_override = 1;
+ break;
}
status = irdma_sc_cqp_init(dev->cqp, &cqp_init_info);
if (status) {
@@ -1012,6 +1052,9 @@ err_ctx:
cqp->sq.va, cqp->sq.pa);
cqp->sq.va = NULL;
err_sq:
+ kfree(cqp->oop_op_array);
+ cqp->oop_op_array = NULL;
+err_oop:
kfree(cqp->scratch_array);
cqp->scratch_array = NULL;
err_scratch:
@@ -1033,13 +1076,15 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_ccq_init_info info = {};
struct irdma_ccq *ccq = &rf->ccq;
+ int ccq_size;
int status;
dev->ccq = &ccq->sc_cq;
dev->ccq->dev = dev;
info.dev = dev;
+ ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE;
ccq->shadow_area.size = sizeof(struct irdma_cq_shadow_area);
- ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * IW_CCQ_SIZE,
+ ccq->mem_cq.size = ALIGN(sizeof(struct irdma_cqe) * ccq_size,
IRDMA_CQ0_ALIGNMENT);
ccq->mem_cq.va = dma_alloc_coherent(dev->hw->device, ccq->mem_cq.size,
&ccq->mem_cq.pa, GFP_KERNEL);
@@ -1056,7 +1101,7 @@ static int irdma_create_ccq(struct irdma_pci_f *rf)
/* populate the ccq init info */
info.cq_base = ccq->mem_cq.va;
info.cq_pa = ccq->mem_cq.pa;
- info.num_elem = IW_CCQ_SIZE;
+ info.num_elem = ccq_size;
info.shadow_area = ccq->shadow_area.va;
info.shadow_area_pa = ccq->shadow_area.pa;
info.ceqe_mask = false;
@@ -1140,9 +1185,13 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
}
msix_vec->ceq_id = ceq_id;
- rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, msix_vec->idx, true);
-
- return 0;
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id,
+ msix_vec->idx, true);
+ else
+ status = irdma_vchnl_req_ceq_vec_map(&rf->sc_dev, ceq_id,
+ msix_vec->idx);
+ return status;
}
/**
@@ -1155,7 +1204,7 @@ static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
{
struct irdma_msix_vector *msix_vec = rf->iw_msixtbl;
- u32 ret = 0;
+ int ret = 0;
if (!rf->msix_shared) {
snprintf(msix_vec->name, sizeof(msix_vec->name) - 1,
@@ -1166,12 +1215,16 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
}
if (ret) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n");
- return -EINVAL;
+ return ret;
}
- rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, true);
+ if (rf->sc_dev.privileged)
+ rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx,
+ true);
+ else
+ ret = irdma_vchnl_req_aeq_vec_map(&rf->sc_dev, msix_vec->idx);
- return 0;
+ return ret;
}
/**
@@ -1179,13 +1232,13 @@ static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf)
* @rf: RDMA PCI function
* @iwceq: pointer to the ceq resources to be created
* @ceq_id: the id number of the iwceq
- * @vsi: SC vsi struct
+ * @vsi_idx: vsi idx
*
* Return 0, if the ceq and the resources associated with it
* are successfully created, otherwise return error
*/
static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
- u32 ceq_id, struct irdma_sc_vsi *vsi)
+ u32 ceq_id, u16 vsi_idx)
{
int status;
struct irdma_ceq_init_info info = {};
@@ -1209,7 +1262,7 @@ static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
info.elem_cnt = ceq_size;
iwceq->sc_ceq.ceq_id = ceq_id;
info.dev = dev;
- info.vsi = vsi;
+ info.vsi_idx = vsi_idx;
status = irdma_sc_ceq_init(&iwceq->sc_ceq, &info);
if (!status) {
if (dev->ceq_valid)
@@ -1252,7 +1305,7 @@ static int irdma_setup_ceq_0(struct irdma_pci_f *rf)
}
iwceq = &rf->ceqlist[0];
- status = irdma_create_ceq(rf, iwceq, 0, &rf->default_vsi);
+ status = irdma_create_ceq(rf, iwceq, 0, rf->default_vsi.vsi_idx);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n",
status);
@@ -1287,13 +1340,13 @@ exit:
/**
* irdma_setup_ceqs - manage the device ceq's and their interrupt resources
* @rf: RDMA PCI function
- * @vsi: VSI structure for this CEQ
+ * @vsi_idx: vsi_idx for this CEQ
*
* Allocate a list for all device completion event queues
* Create the ceq's and configure their msix interrupt vectors
* Return 0, if ceqs are successfully set up, otherwise return error
*/
-static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
+static int irdma_setup_ceqs(struct irdma_pci_f *rf, u16 vsi_idx)
{
u32 i;
u32 ceq_id;
@@ -1306,7 +1359,7 @@ static int irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
i = (rf->msix_shared) ? 1 : 2;
for (ceq_id = 1; i < num_ceqs; i++, ceq_id++) {
iwceq = &rf->ceqlist[ceq_id];
- status = irdma_create_ceq(rf, iwceq, ceq_id, vsi);
+ status = irdma_create_ceq(rf, iwceq, ceq_id, vsi_idx);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev,
"ERR: create ceq status = %d\n", status);
@@ -1387,7 +1440,10 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
aeq_size = multiplier * hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt +
hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
aeq_size = min(aeq_size, dev->hw_attrs.max_hw_aeq_size);
-
+ /* GEN_3 does not support virtual AEQ. Cap at max Kernel alloc size */
+ if (rf->rdma_ver == IRDMA_GEN_3)
+ aeq_size = min(aeq_size, (u32)((PAGE_SIZE << MAX_PAGE_ORDER) /
+ sizeof(struct irdma_sc_aeqe)));
aeq->mem.size = ALIGN(sizeof(struct irdma_sc_aeqe) * aeq_size,
IRDMA_AEQ_ALIGNMENT);
aeq->mem.va = dma_alloc_coherent(dev->hw->device, aeq->mem.size,
@@ -1395,6 +1451,8 @@ static int irdma_create_aeq(struct irdma_pci_f *rf)
GFP_KERNEL | __GFP_NOWARN);
if (aeq->mem.va)
goto skip_virt_aeq;
+ else if (rf->rdma_ver == IRDMA_GEN_3)
+ return -ENOMEM;
/* physically mapped aeq failed. setup virtual aeq */
status = irdma_create_virt_aeq(rf, aeq_size);
@@ -1569,6 +1627,8 @@ static void irdma_del_init_mem(struct irdma_pci_f *rf)
{
struct irdma_sc_dev *dev = &rf->sc_dev;
+ if (!rf->sc_dev.privileged)
+ irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev);
kfree(dev->hmc_info->sd_table.sd_entry);
dev->hmc_info->sd_table.sd_entry = NULL;
vfree(rf->mem_rsrc);
@@ -1635,6 +1695,7 @@ static int irdma_initialize_dev(struct irdma_pci_f *rf)
info.bar0 = rf->hw.hw_addr;
info.hmc_fn_id = rf->pf_id;
+ info.protocol_used = rf->protocol_used;
info.hw = &rf->hw;
status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info);
if (status)
@@ -1665,9 +1726,6 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
irdma_del_local_mac_entry(iwdev->rf,
(u8)iwdev->mac_ip_table_idx);
fallthrough;
- case AEQ_CREATED:
- case PBLE_CHUNK_MEM:
- case CEQS_CREATED:
case IEQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
@@ -1740,7 +1798,9 @@ static void irdma_get_used_rsrc(struct irdma_device *iwdev)
iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
iwdev->rf->max_qp);
iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
- iwdev->rf->max_cq);
+ iwdev->rf->max_cq);
+ iwdev->rf->used_srqs = find_first_zero_bit(iwdev->rf->allocated_srqs,
+ iwdev->rf->max_srq);
iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
iwdev->rf->max_mr);
}
@@ -1750,13 +1810,17 @@ void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
enum init_completion_state state = rf->init_state;
rf->init_state = INVALID_STATE;
- if (rf->rsrc_created) {
+
+ switch (state) {
+ case AEQ_CREATED:
irdma_destroy_aeq(rf);
+ fallthrough;
+ case PBLE_CHUNK_MEM:
irdma_destroy_pble_prm(rf->pble_rsrc);
+ fallthrough;
+ case CEQS_CREATED:
irdma_del_ceqs(rf);
- rf->rsrc_created = false;
- }
- switch (state) {
+ fallthrough;
case CEQ0_CREATED:
irdma_del_ceq_0(rf);
fallthrough;
@@ -1835,32 +1899,6 @@ int irdma_rt_init_hw(struct irdma_device *iwdev,
break;
iwdev->init_state = IEQ_CREATED;
}
- if (!rf->rsrc_created) {
- status = irdma_setup_ceqs(rf, &iwdev->vsi);
- if (status)
- break;
-
- iwdev->init_state = CEQS_CREATED;
-
- status = irdma_hmc_init_pble(&rf->sc_dev,
- rf->pble_rsrc);
- if (status) {
- irdma_del_ceqs(rf);
- break;
- }
-
- iwdev->init_state = PBLE_CHUNK_MEM;
-
- status = irdma_setup_aeq(rf);
- if (status) {
- irdma_destroy_pble_prm(rf->pble_rsrc);
- irdma_del_ceqs(rf);
- break;
- }
- iwdev->init_state = AEQ_CREATED;
- rf->rsrc_created = true;
- }
-
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
irdma_alloc_set_mac(iwdev);
irdma_add_ip(iwdev);
@@ -1907,6 +1945,13 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
break;
rf->init_state = CQP_CREATED;
+ dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
+ if (rf->rdma_ver != IRDMA_GEN_1) {
+ status = irdma_get_rdma_features(dev);
+ if (status)
+ break;
+ }
+
status = irdma_hmc_setup(rf);
if (status)
break;
@@ -1922,13 +1967,6 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
break;
rf->init_state = CCQ_CREATED;
- dev->feature_info[IRDMA_FEATURE_FW_INFO] = IRDMA_FW_VER_DEFAULT;
- if (rf->rdma_ver != IRDMA_GEN_1) {
- status = irdma_get_rdma_features(dev);
- if (status)
- break;
- }
-
status = irdma_setup_ceq_0(rf);
if (status)
break;
@@ -1942,6 +1980,25 @@ int irdma_ctrl_init_hw(struct irdma_pci_f *rf)
}
INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker);
irdma_sc_ccq_arm(dev->ccq);
+
+ status = irdma_setup_ceqs(rf, rf->iwdev ? rf->iwdev->vsi_num : 0);
+ if (status)
+ break;
+
+ rf->init_state = CEQS_CREATED;
+
+ status = irdma_hmc_init_pble(&rf->sc_dev,
+ rf->pble_rsrc);
+ if (status)
+ break;
+
+ rf->init_state = PBLE_CHUNK_MEM;
+
+ status = irdma_setup_aeq(rf);
+ if (status)
+ break;
+ rf->init_state = AEQ_CREATED;
+
return 0;
} while (0);
@@ -1960,7 +2017,8 @@ static void irdma_set_hw_rsrc(struct irdma_pci_f *rf)
rf->allocated_qps = (void *)(rf->mem_rsrc +
(sizeof(struct irdma_arp_entry) * rf->arp_table_size));
rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)];
- rf->allocated_mrs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)];
+ rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)];
rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)];
rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)];
rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)];
@@ -1988,12 +2046,14 @@ static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf)
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq);
+ rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah);
rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg);
rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp;
rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq;
+ rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq;
return rsrc_size;
}
@@ -2021,6 +2081,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt;
rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt;
rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt;
+ rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt;
rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds;
rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt;
rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt;
@@ -2040,6 +2101,7 @@ u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf)
set_bit(0, rf->allocated_mrs);
set_bit(0, rf->allocated_qps);
set_bit(0, rf->allocated_cqs);
+ set_bit(0, rf->allocated_srqs);
set_bit(0, rf->allocated_pds);
set_bit(0, rf->allocated_arps);
set_bit(0, rf->allocated_ahs);
@@ -2100,15 +2162,16 @@ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq)
cqp_request->compl_info.op_ret_val = info.op_ret_val;
cqp_request->compl_info.error = info.error;
- if (cqp_request->waiting) {
- WRITE_ONCE(cqp_request->request_done, true);
- wake_up(&cqp_request->waitq);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
- } else {
- if (cqp_request->callback_fcn)
- cqp_request->callback_fcn(cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
- }
+ /*
+ * If this is deferred or pending completion, then mark
+ * CQP request as pending to not block the CQ, but don't
+ * release CQP request, as it is still on the OOO list.
+ */
+ if (info.pending)
+ cqp_request->pending = true;
+ else
+ irdma_complete_cqp_request(&rf->cqp,
+ cqp_request);
}
cqe_count++;
@@ -2302,7 +2365,6 @@ static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev,
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_apbvt_entry.info;
- memset(info, 0, sizeof(*info));
info->add = add_port;
info->port = accel_local_port;
cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY;
@@ -2411,7 +2473,6 @@ void irdma_manage_arp_cache(struct irdma_pci_f *rf,
if (action == IRDMA_ARP_ADD) {
cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY;
info = &cqp_info->in.u.add_arp_cache_entry.info;
- memset(info, 0, sizeof(*info));
info->arp_index = (u16)arp_index;
info->permanent = true;
ether_addr_copy(info->mac_addr, mac_addr);
@@ -2470,7 +2531,6 @@ int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.manage_qhash_table_entry.info;
- memset(info, 0, sizeof(*info));
info->vsi = &iwdev->vsi;
info->manage = mtype;
info->entry_type = etype;
@@ -2718,7 +2778,9 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
struct irdma_pci_f *rf = iwqp->iwdev->rf;
u8 flush_code = iwqp->sc_qp.flush_code;
- if (!(flush_mask & IRDMA_FLUSH_SQ) && !(flush_mask & IRDMA_FLUSH_RQ))
+ if ((!(flush_mask & IRDMA_FLUSH_SQ) &&
+ !(flush_mask & IRDMA_FLUSH_RQ)) ||
+ ((flush_mask & IRDMA_REFLUSH) && rf->rdma_ver >= IRDMA_GEN_3))
return;
/* Set flush info fields*/
@@ -2731,6 +2793,10 @@ void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
info.rq_major_code = IRDMA_FLUSH_MAJOR_ERR;
info.rq_minor_code = FLUSH_GENERAL_ERR;
info.userflushcode = true;
+ info.err_sq_idx_valid = iwqp->sc_qp.err_sq_idx_valid;
+ info.err_sq_idx = iwqp->sc_qp.err_sq_idx;
+ info.err_rq_idx_valid = iwqp->sc_qp.err_rq_idx_valid;
+ info.err_rq_idx = iwqp->sc_qp.err_rq_idx;
if (flush_mask & IRDMA_REFLUSH) {
if (info.sq)
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c
index ce61a27cb1f6..60c1f2b1811d 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.c
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.c
@@ -85,6 +85,7 @@ static u64 i40iw_masks[IRDMA_MAX_MASKS] = {
I40E_CQPSQ_CQ_CEQID,
I40E_CQPSQ_CQ_CQID,
I40E_COMMIT_FPM_CQCNT,
+ I40E_CQPSQ_UPESD_HMCFNID,
};
static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
@@ -94,6 +95,7 @@ static u64 i40iw_shifts[IRDMA_MAX_SHIFTS] = {
I40E_CQPSQ_CQ_CEQID_S,
I40E_CQPSQ_CQ_CQID_S,
I40E_COMMIT_FPM_CQCNT_S,
+ I40E_CQPSQ_UPESD_HMCFNID_S,
};
/**
diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.h b/drivers/infiniband/hw/irdma/i40iw_hw.h
index e1db84d8a62c..0095b327afcc 100644
--- a/drivers/infiniband/hw/irdma/i40iw_hw.h
+++ b/drivers/infiniband/hw/irdma/i40iw_hw.h
@@ -123,6 +123,8 @@
#define I40E_CQPSQ_CQ_CQID GENMASK_ULL(15, 0)
#define I40E_COMMIT_FPM_CQCNT_S 0
#define I40E_COMMIT_FPM_CQCNT GENMASK_ULL(17, 0)
+#define I40E_CQPSQ_UPESD_HMCFNID_S 0
+#define I40E_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4))
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index cc50a7070371..15e036ddaffb 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -75,6 +75,9 @@ static void i40iw_fill_device_info(struct irdma_device *iwdev, struct i40e_info
struct irdma_pci_f *rf = iwdev->rf;
rf->rdma_ver = IRDMA_GEN_1;
+ rf->sc_dev.hw = &rf->hw;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_1;
+ rf->sc_dev.privileged = true;
rf->gen_ops.request_reset = i40iw_request_reset;
rf->pcidev = cdev_info->pcidev;
rf->pf_id = cdev_info->fid;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c
index 941d3edffadb..32f26284a788 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.c
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.c
@@ -38,6 +38,7 @@ static u64 icrdma_masks[IRDMA_MAX_MASKS] = {
ICRDMA_CQPSQ_CQ_CEQID,
ICRDMA_CQPSQ_CQ_CQID,
ICRDMA_COMMIT_FPM_CQCNT,
+ ICRDMA_CQPSQ_UPESD_HMCFNID,
};
static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
@@ -47,6 +48,7 @@ static u64 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
ICRDMA_CQPSQ_CQ_CEQID_S,
ICRDMA_CQPSQ_CQ_CQID_S,
ICRDMA_COMMIT_FPM_CQCNT_S,
+ ICRDMA_CQPSQ_UPESD_HMCFNID_S,
};
/**
@@ -194,6 +196,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+ dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.h b/drivers/infiniband/hw/irdma/icrdma_hw.h
index 697b9572b5c6..d97944ab45da 100644
--- a/drivers/infiniband/hw/irdma/icrdma_hw.h
+++ b/drivers/infiniband/hw/irdma/icrdma_hw.h
@@ -58,14 +58,15 @@
#define ICRDMA_CQPSQ_CQ_CQID GENMASK_ULL(18, 0)
#define ICRDMA_COMMIT_FPM_CQCNT_S 0
#define ICRDMA_COMMIT_FPM_CQCNT GENMASK_ULL(19, 0)
-
+#define ICRDMA_CQPSQ_UPESD_HMCFNID_S 0
+#define ICRDMA_CQPSQ_UPESD_HMCFNID GENMASK_ULL(5, 0)
enum icrdma_device_caps_const {
ICRDMA_MAX_STATS_COUNT = 128,
ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255,
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
-
+ ICRDMA_MAX_PUSH_PAGE_COUNT = 256,
};
void icrdma_init_hw(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/icrdma_if.c b/drivers/infiniband/hw/irdma/icrdma_if.c
new file mode 100644
index 000000000000..b49fd9cf2476
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/icrdma_if.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_ice.h>
+
+static void icrdma_prep_tc_change(struct irdma_device *iwdev)
+{
+ iwdev->vsi.tc_change_pending = true;
+ irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
+
+ /* Wait for all qp's to suspend */
+ wait_event_timeout(iwdev->suspend_wq,
+ !atomic_read(&iwdev->vsi.qp_suspend_reqs),
+ msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
+ irdma_ws_reset(&iwdev->vsi);
+}
+
+static void icrdma_fill_qos_info(struct irdma_l2params *l2params,
+ struct iidc_rdma_qos_params *qos_info)
+{
+ int i;
+
+ l2params->num_tc = qos_info->num_tc;
+ l2params->vsi_prio_type = qos_info->vport_priority_type;
+ l2params->vsi_rel_bw = qos_info->vport_relative_bw;
+ for (i = 0; i < l2params->num_tc; i++) {
+ l2params->tc_info[i].egress_virt_up =
+ qos_info->tc_info[i].egress_virt_up;
+ l2params->tc_info[i].ingress_virt_up =
+ qos_info->tc_info[i].ingress_virt_up;
+ l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
+ l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
+ l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
+ }
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ l2params->up2tc[i] = qos_info->up2tc[i];
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
+ l2params->dscp_mode = true;
+ memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
+ }
+}
+
+static void icrdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
+ struct irdma_l2params l2params = {};
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
+ ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu);
+ if (iwdev->vsi.mtu != iwdev->netdev->mtu) {
+ l2params.mtu = iwdev->netdev->mtu;
+ l2params.mtu_changed = true;
+ irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ }
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
+ if (iwdev->vsi.tc_change_pending)
+ return;
+
+ icrdma_prep_tc_change(iwdev);
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+
+ if (!iwdev->vsi.tc_change_pending)
+ return;
+
+ l2params.tc_changed = true;
+ ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
+
+ icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode =
+ l2params.num_tc > 1 && !l2params.dscp_mode;
+ irdma_change_l2params(&iwdev->vsi, &l2params);
+ } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
+ ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
+ event->reg);
+ if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
+ u32 pe_criterr;
+
+ pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
+#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
+ if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
+ ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
+ pe_criterr);
+ iwdev->rf->reset = true;
+ } else {
+ ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
+ }
+ }
+ if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
+ ibdev_err(&iwdev->ibdev, "HMC Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
+ ibdev_err(&iwdev->ibdev, "PE Push Error\n");
+ iwdev->rf->reset = true;
+ }
+ if (iwdev->rf->reset)
+ iwdev->rf->gen_ops.request_reset(iwdev->rf);
+ }
+}
+
+/**
+ * icrdma_lan_register_qset - Register qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static int icrdma_lan_register_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+ int ret;
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ ret = ice_add_rdma_qset(cdev_info, &qset);
+ if (ret) {
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
+ return ret;
+ }
+
+ tc_node->l2_sched_node_id = qset.teid;
+ vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
+
+ return 0;
+}
+
+/**
+ * icrdma_lan_unregister_qset - Unregister qset with LAN driver
+ * @vsi: vsi structure
+ * @tc_node: Traffic class node
+ */
+static void icrdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
+ struct irdma_ws_node *tc_node)
+{
+ struct irdma_device *iwdev = vsi->back_vsi;
+ struct iidc_rdma_core_dev_info *cdev_info = iwdev->rf->cdev;
+ struct iidc_rdma_qset_params qset = {};
+
+ qset.qs_handle = tc_node->qs_handle;
+ qset.tc = tc_node->traffic_class;
+ qset.vport_id = vsi->vsi_idx;
+ qset.teid = tc_node->l2_sched_node_id;
+
+ if (ice_del_rdma_qset(cdev_info, &qset))
+ ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
+}
+
+/**
+ * icrdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void icrdma_request_reset(struct irdma_pci_f *rf)
+{
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
+}
+
+static int icrdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
+{
+ int i;
+
+ rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
+ rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
+ GFP_KERNEL);
+ if (!rf->msix_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < rf->msix_count; i++)
+ if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
+ break;
+
+ if (i < IRDMA_MIN_MSIX) {
+ while (--i >= 0)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+ return -ENOMEM;
+ }
+
+ rf->msix_count = i;
+
+ return 0;
+}
+
+static void icrdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
+{
+ int i;
+
+ for (i = 0; i < rf->msix_count; i++)
+ ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+
+ kfree(rf->msix_entries);
+}
+
+static void icrdma_fill_device_info(struct irdma_device *iwdev,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct irdma_pci_f *rf = iwdev->rf;
+
+ rf->sc_dev.hw = &rf->hw;
+ rf->iwdev = iwdev;
+ rf->cdev = cdev_info;
+ rf->hw.hw_addr = idc_priv->hw_addr;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->pf_id = idc_priv->pf_id;
+ rf->rdma_ver = IRDMA_GEN_2;
+ rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_2;
+ rf->sc_dev.is_pf = true;
+ rf->sc_dev.privileged = true;
+
+ rf->gen_ops.register_qset = icrdma_lan_register_qset;
+ rf->gen_ops.unregister_qset = icrdma_lan_unregister_qset;
+
+ rf->default_vsi.vsi_idx = idc_priv->vport_id;
+ rf->protocol_used =
+ cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = icrdma_request_reset;
+ rf->limits_sel = 7;
+ mutex_init(&rf->ah_tbl_lock);
+
+ iwdev->netdev = idc_priv->netdev;
+ iwdev->vsi_num = idc_priv->vport_id;
+ iwdev->init_state = INITIAL_STATE;
+ iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
+ iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
+ iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
+ iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->roce_mode = true;
+}
+
+static int icrdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_core_auxiliary_dev *iidc_adev;
+ struct iidc_rdma_core_dev_info *cdev_info;
+ struct iidc_rdma_priv_dev_info *idc_priv;
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ struct irdma_pci_f *rf;
+ int err;
+
+ iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ cdev_info = iidc_adev->cdev_info;
+ idc_priv = cdev_info->iidc_priv;
+
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ if (!iwdev)
+ return -ENOMEM;
+ iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!iwdev->rf) {
+ ib_dealloc_device(&iwdev->ibdev);
+ return -ENOMEM;
+ }
+
+ icrdma_fill_device_info(iwdev, cdev_info);
+ rf = iwdev->rf;
+
+ err = icrdma_init_interrupts(rf, cdev_info);
+ if (err)
+ goto err_init_interrupts;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ l2params.mtu = iwdev->netdev->mtu;
+ icrdma_fill_qos_info(&l2params, &idc_priv->qos_info);
+ if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
+ iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
+
+ err = irdma_rt_init_hw(iwdev, &l2params);
+ if (err)
+ goto err_rt_init;
+
+ err = irdma_ib_register_device(iwdev);
+ if (err)
+ goto err_ibreg;
+
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
+
+ ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
+ auxiliary_set_drvdata(aux_dev, iwdev);
+
+ return 0;
+
+err_ibreg:
+ irdma_rt_deinit_hw(iwdev);
+err_rt_init:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ icrdma_deinit_interrupts(rf, cdev_info);
+err_init_interrupts:
+ mutex_destroy(&rf->ah_tbl_lock);
+ kfree(rf);
+ ib_dealloc_device(&iwdev->ibdev);
+
+ return err;
+}
+
+static void icrdma_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+ u8 rdma_ver = iwdev->rf->rdma_ver;
+
+ ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
+ irdma_ib_unregister_device(iwdev);
+ icrdma_deinit_interrupts(iwdev->rf, cdev_info);
+ mutex_destroy(&iwdev->rf->ah_tbl_lock);
+
+ kfree(iwdev->rf);
+
+ pr_debug("INIT: Gen[%d] func[%d] device remove success\n",
+ rdma_ver, PCI_FUNC(cdev_info->pdev->devfn));
+}
+
+static const struct auxiliary_device_id icrdma_auxiliary_id_table[] = {
+ {.name = "ice.iwarp", },
+ {.name = "ice.roce", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, icrdma_auxiliary_id_table);
+
+struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv = {
+ .adrv = {
+ .name = "gen_2",
+ .id_table = icrdma_auxiliary_id_table,
+ .probe = icrdma_probe,
+ .remove = icrdma_remove,
+ },
+ .event_handler = icrdma_iidc_event_handler,
+};
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_hw.c b/drivers/infiniband/hw/irdma/ig3rdma_hw.c
new file mode 100644
index 000000000000..2e8bb475e22a
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_hw.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2018 - 2024 Intel Corporation */
+#include "osdep.h"
+#include "type.h"
+#include "protos.h"
+#include "ig3rdma_hw.h"
+
+/**
+ * ig3rdma_ena_irq - Enable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void ig3rdma_ena_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 val;
+ u32 int_stride = 1; /* one u32 per register */
+
+ if (dev->is_pf)
+ int_stride = 0x400;
+ else
+ idx--; /* VFs use DYN_CTL_N */
+
+ val = FIELD_PREP(IRDMA_GLINT_DYN_CTL_INTENA, 1) |
+ FIELD_PREP(IRDMA_GLINT_DYN_CTL_CLEARPBA, 1);
+
+ writel(val, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
+}
+
+/**
+ * ig3rdma_disable_irq - Disable interrupt
+ * @dev: pointer to the device structure
+ * @idx: vector index
+ */
+static void ig3rdma_disable_irq(struct irdma_sc_dev *dev, u32 idx)
+{
+ u32 int_stride = 1; /* one u32 per register */
+
+ if (dev->is_pf)
+ int_stride = 0x400;
+ else
+ idx--; /* VFs use DYN_CTL_N */
+
+ writel(0, dev->hw_regs[IRDMA_GLINT_DYN_CTL] + (idx * int_stride));
+}
+
+static const struct irdma_irq_ops ig3rdma_irq_ops = {
+ .irdma_dis_irq = ig3rdma_disable_irq,
+ .irdma_en_irq = ig3rdma_ena_irq,
+};
+
+static const struct irdma_hw_stat_map ig3rdma_hw_stat_map[] = {
+ [IRDMA_HW_STAT_INDEX_RXVLANERR] = { 0, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS] = { 8, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS] = { 16, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = { 24, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = { 32, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS] = { 40, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS] = { 48, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS] = { 56, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS] = { 64, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS] = { 72, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = { 80, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = { 88, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS] = { 96, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS] = { 104, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS] = { 112, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS] = { 120, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS] = { 128, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS] = { 136, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS] = { 144, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS] = { 152, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS] = { 160, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS] = { 168, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS] = { 176, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS] = { 184, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS] = { 192, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = { 200, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = { 208, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = { 216, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = { 224, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = { 232, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG] = { 240, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS] = { 248, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS] = { 256, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS] = { 264, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS] = { 272, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS] = { 280, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS] = { 288, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS] = { 296, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS] = { 304, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS] = { 312, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAVBND] = { 320, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAVINV] = { 328, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS] = { 336, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = { 344, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = { 352, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = { 360, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RNR_SENT] = { 368, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RNR_RCVD] = { 376, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT] = { 384, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT] = { 392, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMARXATS] = { 408, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RDMATXATS] = { 416, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR] = { 424, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED] = { 432, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RTO] = { 440, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_RXOOOPKTS] = { 448, 0, 0 },
+ [IRDMA_HW_STAT_INDEX_ICRCERR] = { 456, 0, 0 },
+};
+
+void ig3rdma_init_hw(struct irdma_sc_dev *dev)
+{
+ dev->irq_ops = &ig3rdma_irq_ops;
+ dev->hw_stats_map = ig3rdma_hw_stat_map;
+
+ dev->hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_3;
+ dev->hw_attrs.uk_attrs.max_hw_wq_frags = IG3RDMA_MAX_WQ_FRAGMENT_COUNT;
+ dev->hw_attrs.uk_attrs.max_hw_read_sges = IG3RDMA_MAX_SGE_RD;
+ dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
+ dev->hw_attrs.first_hw_vf_fpm_id = 0;
+ dev->hw_attrs.max_hw_vf_fpm_id = IG3_MAX_APFS + IG3_MAX_AVFS;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_64_BYTE_CQE;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_CQE_TIMESTAMPING;
+
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_SRQ;
+ dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
+ IRDMA_FEATURE_CQ_RESIZE;
+ dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G;
+ dev->hw_attrs.max_hw_ird = IG3RDMA_MAX_IRD_SIZE;
+ dev->hw_attrs.max_hw_ord = IG3RDMA_MAX_ORD_SIZE;
+ dev->hw_attrs.max_stat_inst = IG3RDMA_MAX_STATS_COUNT;
+ dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_3;
+ dev->hw_attrs.uk_attrs.min_hw_wq_size = IG3RDMA_MIN_WQ_SIZE;
+ dev->hw_attrs.uk_attrs.max_hw_srq_quanta = IRDMA_SRQ_MAX_QUANTA;
+ dev->hw_attrs.uk_attrs.max_hw_inline = IG3RDMA_MAX_INLINE_DATA_SIZE;
+ dev->hw_attrs.max_hw_device_pages =
+ dev->is_pf ? IG3RDMA_MAX_PF_PUSH_PAGE_COUNT : IG3RDMA_MAX_VF_PUSH_PAGE_COUNT;
+}
+
+static void __iomem *__ig3rdma_get_reg_addr(struct irdma_mmio_region *region, u64 reg_offset)
+{
+ if (reg_offset >= region->offset &&
+ reg_offset < (region->offset + region->len)) {
+ reg_offset -= region->offset;
+
+ return region->addr + reg_offset;
+ }
+
+ return NULL;
+}
+
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset)
+{
+ u8 __iomem *reg_addr;
+ int i;
+
+ reg_addr = __ig3rdma_get_reg_addr(&hw->rdma_reg, reg_offset);
+ if (reg_addr)
+ return reg_addr;
+
+ for (i = 0; i < hw->num_io_regions; i++) {
+ reg_addr = __ig3rdma_get_reg_addr(&hw->io_regs[i], reg_offset);
+ if (reg_addr)
+ return reg_addr;
+ }
+
+ WARN_ON_ONCE(1);
+
+ return NULL;
+}
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_hw.h b/drivers/infiniband/hw/irdma/ig3rdma_hw.h
new file mode 100644
index 000000000000..03d5f1188789
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_hw.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2021 - 2024 Intel Corporation */
+#ifndef IG3RDMA_HW_H
+#define IG3RDMA_HW_H
+
+#define IG3_MAX_APFS 1
+#define IG3_MAX_AVFS 0
+
+#define IG3_PF_RDMA_REGION_OFFSET 0xBC00000
+#define IG3_PF_RDMA_REGION_LEN 0x401000
+#define IG3_VF_RDMA_REGION_OFFSET 0x8C00
+#define IG3_VF_RDMA_REGION_LEN 0x8400
+
+enum ig3rdma_device_caps_const {
+ IG3RDMA_MAX_WQ_FRAGMENT_COUNT = 14,
+ IG3RDMA_MAX_SGE_RD = 14,
+
+ IG3RDMA_MAX_STATS_COUNT = 128,
+
+ IG3RDMA_MAX_IRD_SIZE = 64,
+ IG3RDMA_MAX_ORD_SIZE = 64,
+ IG3RDMA_MIN_WQ_SIZE = 16 /* WQEs */,
+ IG3RDMA_MAX_INLINE_DATA_SIZE = 216,
+ IG3RDMA_MAX_PF_PUSH_PAGE_COUNT = 8192,
+ IG3RDMA_MAX_VF_PUSH_PAGE_COUNT = 16,
+};
+
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
+int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
+ u8 *recv_msg, u16 *recv_len);
+
+#endif /* IG3RDMA_HW_H*/
diff --git a/drivers/infiniband/hw/irdma/ig3rdma_if.c b/drivers/infiniband/hw/irdma/ig3rdma_if.c
new file mode 100644
index 000000000000..e1d6670d9396
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/ig3rdma_if.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2023 - 2024 Intel Corporation */
+
+#include "main.h"
+#include <linux/net/intel/iidc_rdma_idpf.h>
+#include "ig3rdma_hw.h"
+
+static void ig3rdma_idc_core_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
+{
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(cdev_info->adev);
+
+ if (*event->type & BIT(IIDC_RDMA_EVENT_WARN_RESET)) {
+ rf->reset = true;
+ rf->sc_dev.vchnl_up = false;
+ }
+}
+
+int ig3rdma_vchnl_send_sync(struct irdma_sc_dev *dev, u8 *msg, u16 len,
+ u8 *recv_msg, u16 *recv_len)
+{
+ struct iidc_rdma_core_dev_info *cdev_info = dev_to_rf(dev)->cdev;
+ int ret;
+
+ ret = idpf_idc_rdma_vc_send_sync(cdev_info, msg, len, recv_msg,
+ recv_len);
+ if (ret == -ETIMEDOUT) {
+ ibdev_err(&(dev_to_rf(dev)->iwdev->ibdev),
+ "Virtual channel Req <-> Resp completion timeout\n");
+ dev->vchnl_up = false;
+ }
+
+ return ret;
+}
+
+static int ig3rdma_vchnl_init(struct irdma_pci_f *rf,
+ struct iidc_rdma_core_dev_info *cdev_info,
+ u8 *rdma_ver)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct irdma_vchnl_init_info virt_info;
+ u8 gen = rf->rdma_ver;
+ int ret;
+
+ rf->vchnl_wq = alloc_ordered_workqueue("irdma-virtchnl-wq", 0);
+ if (!rf->vchnl_wq)
+ return -ENOMEM;
+
+ mutex_init(&rf->sc_dev.vchnl_mutex);
+
+ virt_info.is_pf = !idc_priv->ftype;
+ virt_info.hw_rev = gen;
+ virt_info.privileged = gen == IRDMA_GEN_2;
+ virt_info.vchnl_wq = rf->vchnl_wq;
+ ret = irdma_sc_vchnl_init(&rf->sc_dev, &virt_info);
+ if (ret) {
+ destroy_workqueue(rf->vchnl_wq);
+ mutex_destroy(&rf->sc_dev.vchnl_mutex);
+ return ret;
+ }
+
+ *rdma_ver = rf->sc_dev.hw_attrs.uk_attrs.hw_rev;
+
+ return 0;
+}
+
+/**
+ * ig3rdma_request_reset - Request a reset
+ * @rf: RDMA PCI function
+ */
+static void ig3rdma_request_reset(struct irdma_pci_f *rf)
+{
+ ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
+ idpf_idc_request_reset(rf->cdev, IIDC_FUNC_RESET);
+}
+
+static int ig3rdma_cfg_regions(struct irdma_hw *hw,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ struct pci_dev *pdev = cdev_info->pdev;
+ int i;
+
+ switch (idc_priv->ftype) {
+ case IIDC_FUNCTION_TYPE_PF:
+ hw->rdma_reg.len = IG3_PF_RDMA_REGION_LEN;
+ hw->rdma_reg.offset = IG3_PF_RDMA_REGION_OFFSET;
+ break;
+ case IIDC_FUNCTION_TYPE_VF:
+ hw->rdma_reg.len = IG3_VF_RDMA_REGION_LEN;
+ hw->rdma_reg.offset = IG3_VF_RDMA_REGION_OFFSET;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ hw->rdma_reg.addr = ioremap(pci_resource_start(pdev, 0) + hw->rdma_reg.offset,
+ hw->rdma_reg.len);
+
+ if (!hw->rdma_reg.addr)
+ return -ENOMEM;
+
+ hw->num_io_regions = le16_to_cpu(idc_priv->num_memory_regions);
+ hw->io_regs = kcalloc(hw->num_io_regions,
+ sizeof(struct irdma_mmio_region), GFP_KERNEL);
+
+ if (!hw->io_regs) {
+ iounmap(hw->rdma_reg.addr);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hw->num_io_regions; i++) {
+ hw->io_regs[i].addr =
+ idc_priv->mapped_mem_regions[i].region_addr;
+ hw->io_regs[i].len =
+ le64_to_cpu(idc_priv->mapped_mem_regions[i].size);
+ hw->io_regs[i].offset =
+ le64_to_cpu(idc_priv->mapped_mem_regions[i].start_offset);
+ }
+
+ return 0;
+}
+
+static void ig3rdma_decfg_rf(struct irdma_pci_f *rf)
+{
+ struct irdma_hw *hw = &rf->hw;
+
+ mutex_destroy(&rf->ah_tbl_lock);
+ destroy_workqueue(rf->vchnl_wq);
+ mutex_destroy(&rf->sc_dev.vchnl_mutex);
+ kfree(hw->io_regs);
+ iounmap(hw->rdma_reg.addr);
+}
+
+static int ig3rdma_cfg_rf(struct irdma_pci_f *rf,
+ struct iidc_rdma_core_dev_info *cdev_info)
+{
+ struct iidc_rdma_priv_dev_info *idc_priv = cdev_info->iidc_priv;
+ int err;
+
+ rf->sc_dev.hw = &rf->hw;
+ rf->cdev = cdev_info;
+ rf->pcidev = cdev_info->pdev;
+ rf->hw.device = &rf->pcidev->dev;
+ rf->msix_count = idc_priv->msix_count;
+ rf->msix_entries = idc_priv->msix_entries;
+
+ err = ig3rdma_vchnl_init(rf, cdev_info, &rf->rdma_ver);
+ if (err)
+ return err;
+
+ err = ig3rdma_cfg_regions(&rf->hw, cdev_info);
+ if (err) {
+ destroy_workqueue(rf->vchnl_wq);
+ mutex_destroy(&rf->sc_dev.vchnl_mutex);
+ return err;
+ }
+
+ rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
+ rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
+ rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
+ rf->gen_ops.request_reset = ig3rdma_request_reset;
+ rf->limits_sel = 7;
+ mutex_init(&rf->ah_tbl_lock);
+
+ return 0;
+}
+
+static int ig3rdma_core_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_pci_f *rf;
+ int err;
+
+ rf = kzalloc(sizeof(*rf), GFP_KERNEL);
+ if (!rf)
+ return -ENOMEM;
+
+ err = ig3rdma_cfg_rf(rf, cdev_info);
+ if (err)
+ goto err_cfg_rf;
+
+ err = irdma_ctrl_init_hw(rf);
+ if (err)
+ goto err_ctrl_init;
+
+ auxiliary_set_drvdata(aux_dev, rf);
+
+ err = idpf_idc_vport_dev_ctrl(cdev_info, true);
+ if (err)
+ goto err_vport_ctrl;
+
+ return 0;
+
+err_vport_ctrl:
+ irdma_ctrl_deinit_hw(rf);
+err_ctrl_init:
+ ig3rdma_decfg_rf(rf);
+err_cfg_rf:
+ kfree(rf);
+
+ return err;
+}
+
+static void ig3rdma_core_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_core_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
+ struct iidc_rdma_core_dev_info *cdev_info = idc_adev->cdev_info;
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_dev);
+
+ idpf_idc_vport_dev_ctrl(cdev_info, false);
+ irdma_ctrl_deinit_hw(rf);
+ ig3rdma_decfg_rf(rf);
+ kfree(rf);
+}
+
+static const struct auxiliary_device_id ig3rdma_core_auxiliary_id_table[] = {
+ {.name = "idpf.8086.rdma.core", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ig3rdma_core_auxiliary_id_table);
+
+struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv = {
+ .adrv = {
+ .name = "core",
+ .id_table = ig3rdma_core_auxiliary_id_table,
+ .probe = ig3rdma_core_probe,
+ .remove = ig3rdma_core_remove,
+ },
+ .event_handler = ig3rdma_idc_core_event_handler,
+};
diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h
index 20d2e7393e3d..ff938a01d70c 100644
--- a/drivers/infiniband/hw/irdma/irdma.h
+++ b/drivers/infiniband/hw/irdma/irdma.h
@@ -32,7 +32,16 @@
#define IRDMA_PFHMC_SDDATALOW_PMSDDATALOW GENMASK(31, 12)
#define IRDMA_PFHMC_SDCMD_PMSDWR BIT(31)
-#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_INVALID_CQ_IDX 0xffffffff
+#define IRDMA_Q_INVALID_IDX 0xffff
+
+enum irdma_dyn_idx_t {
+ IRDMA_IDX_ITR0 = 0,
+ IRDMA_IDX_ITR1 = 1,
+ IRDMA_IDX_ITR2 = 2,
+ IRDMA_IDX_NOITR = 3,
+};
+
enum irdma_registers {
IRDMA_CQPTAIL,
IRDMA_CQPDB,
@@ -67,6 +76,7 @@ enum irdma_shifts {
IRDMA_CQPSQ_CQ_CEQID_S,
IRDMA_CQPSQ_CQ_CQID_S,
IRDMA_COMMIT_FPM_CQCNT_S,
+ IRDMA_CQPSQ_UPESD_HMCFNID_S,
IRDMA_MAX_SHIFTS,
};
@@ -77,6 +87,7 @@ enum irdma_masks {
IRDMA_CQPSQ_CQ_CEQID_M,
IRDMA_CQPSQ_CQ_CQID_M,
IRDMA_COMMIT_FPM_CQCNT_M,
+ IRDMA_CQPSQ_UPESD_HMCFNID_M,
IRDMA_MAX_MASKS, /* Must be last entry */
};
@@ -92,7 +103,7 @@ struct irdma_mcast_grp_ctx_entry_info {
struct irdma_mcast_grp_info {
u8 dest_mac_addr[ETH_ALEN];
u16 vlan_id;
- u8 hmc_fcn_id;
+ u16 hmc_fcn_id;
bool ipv4_valid:1;
bool vlan_valid:1;
u16 mg_id;
@@ -107,6 +118,9 @@ enum irdma_vers {
IRDMA_GEN_RSVD,
IRDMA_GEN_1,
IRDMA_GEN_2,
+ IRDMA_GEN_3,
+ IRDMA_GEN_NEXT,
+ IRDMA_GEN_MAX = IRDMA_GEN_NEXT-1
};
struct irdma_uk_attrs {
@@ -118,6 +132,7 @@ struct irdma_uk_attrs {
u32 max_hw_wq_quanta;
u32 min_hw_cq_size;
u32 max_hw_cq_size;
+ u32 max_hw_srq_quanta;
u16 max_hw_sq_chunk;
u16 min_hw_wq_size;
u8 hw_rev;
@@ -147,10 +162,13 @@ struct irdma_hw_attrs {
u32 max_done_count;
u32 max_sleep_count;
u32 max_cqp_compl_wait_time_ms;
+ u32 min_hw_srq_id;
u16 max_stat_inst;
u16 max_stat_idx;
};
void i40iw_init_hw(struct irdma_sc_dev *dev);
void icrdma_init_hw(struct irdma_sc_dev *dev);
+void ig3rdma_init_hw(struct irdma_sc_dev *dev);
+void __iomem *ig3rdma_get_reg_addr(struct irdma_hw *hw, u64 reg_offset);
#endif /* IRDMA_H*/
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 1e840bbd619d..95957d52883d 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "main.h"
+#include <linux/net/intel/iidc_rdma_idpf.h>
MODULE_ALIAS("i40iw");
MODULE_DESCRIPTION("Intel(R) Ethernet Protocol Driver for RDMA");
@@ -38,19 +39,7 @@ static void irdma_unregister_notifiers(void)
unregister_netdevice_notifier(&irdma_netdevice_notifier);
}
-static void irdma_prep_tc_change(struct irdma_device *iwdev)
-{
- iwdev->vsi.tc_change_pending = true;
- irdma_sc_suspend_resume_qps(&iwdev->vsi, IRDMA_OP_SUSPEND);
-
- /* Wait for all qp's to suspend */
- wait_event_timeout(iwdev->suspend_wq,
- !atomic_read(&iwdev->vsi.qp_suspend_reqs),
- msecs_to_jiffies(IRDMA_EVENT_TIMEOUT_MS));
- irdma_ws_reset(&iwdev->vsi);
-}
-
-static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
+void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
{
if (mtu < IRDMA_MIN_MTU_IPV4)
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu);
@@ -58,35 +47,10 @@ static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev)
ibdev_warn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu);
}
-static void irdma_fill_qos_info(struct irdma_l2params *l2params,
- struct iidc_rdma_qos_params *qos_info)
+static void ig3rdma_idc_vport_event_handler(struct iidc_rdma_vport_dev_info *cdev_info,
+ struct iidc_rdma_event *event)
{
- int i;
-
- l2params->num_tc = qos_info->num_tc;
- l2params->vsi_prio_type = qos_info->vport_priority_type;
- l2params->vsi_rel_bw = qos_info->vport_relative_bw;
- for (i = 0; i < l2params->num_tc; i++) {
- l2params->tc_info[i].egress_virt_up =
- qos_info->tc_info[i].egress_virt_up;
- l2params->tc_info[i].ingress_virt_up =
- qos_info->tc_info[i].ingress_virt_up;
- l2params->tc_info[i].prio_type = qos_info->tc_info[i].prio_type;
- l2params->tc_info[i].rel_bw = qos_info->tc_info[i].rel_bw;
- l2params->tc_info[i].tc_ctx = qos_info->tc_info[i].tc_ctx;
- }
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- l2params->up2tc[i] = qos_info->up2tc[i];
- if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) {
- l2params->dscp_mode = true;
- memcpy(l2params->dscp_map, qos_info->dscp_map, sizeof(l2params->dscp_map));
- }
-}
-
-static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
- struct iidc_rdma_event *event)
-{
- struct irdma_device *iwdev = dev_get_drvdata(&cdev_info->adev->dev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(cdev_info->adev);
struct irdma_l2params l2params = {};
if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE)) {
@@ -97,248 +61,39 @@ static void irdma_iidc_event_handler(struct iidc_rdma_core_dev_info *cdev_info,
irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
irdma_change_l2params(&iwdev->vsi, &l2params);
}
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE)) {
- if (iwdev->vsi.tc_change_pending)
- return;
-
- irdma_prep_tc_change(iwdev);
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_AFTER_TC_CHANGE)) {
- struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
-
- if (!iwdev->vsi.tc_change_pending)
- return;
-
- l2params.tc_changed = true;
- ibdev_dbg(&iwdev->ibdev, "CLNT: TC Change\n");
-
- irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
- if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode =
- l2params.num_tc > 1 && !l2params.dscp_mode;
- irdma_change_l2params(&iwdev->vsi, &l2params);
- } else if (*event->type & BIT(IIDC_RDMA_EVENT_CRIT_ERR)) {
- ibdev_warn(&iwdev->ibdev, "ICE OICR event notification: oicr = 0x%08x\n",
- event->reg);
- if (event->reg & IRDMAPFINT_OICR_PE_CRITERR_M) {
- u32 pe_criterr;
-
- pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
-#define IRDMA_Q1_RESOURCE_ERR 0x0001024d
- if (pe_criterr != IRDMA_Q1_RESOURCE_ERR) {
- ibdev_err(&iwdev->ibdev, "critical PE Error, GLPE_CRITERR=0x%08x\n",
- pe_criterr);
- iwdev->rf->reset = true;
- } else {
- ibdev_warn(&iwdev->ibdev, "Q1 Resource Check\n");
- }
- }
- if (event->reg & IRDMAPFINT_OICR_HMC_ERR_M) {
- ibdev_err(&iwdev->ibdev, "HMC Error\n");
- iwdev->rf->reset = true;
- }
- if (event->reg & IRDMAPFINT_OICR_PE_PUSH_M) {
- ibdev_err(&iwdev->ibdev, "PE Push Error\n");
- iwdev->rf->reset = true;
- }
- if (iwdev->rf->reset)
- iwdev->rf->gen_ops.request_reset(iwdev->rf);
}
}
-/**
- * irdma_request_reset - Request a reset
- * @rf: RDMA PCI function
- */
-static void irdma_request_reset(struct irdma_pci_f *rf)
-{
- ibdev_warn(&rf->iwdev->ibdev, "Requesting a reset\n");
- ice_rdma_request_reset(rf->cdev, IIDC_FUNC_RESET);
-}
-
-/**
- * irdma_lan_register_qset - Register qset with LAN driver
- * @vsi: vsi structure
- * @tc_node: Traffic class node
- */
-static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node)
-{
- struct irdma_device *iwdev = vsi->back_vsi;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_qset_params qset = {};
- int ret;
-
- cdev_info = iwdev->rf->cdev;
- qset.qs_handle = tc_node->qs_handle;
- qset.tc = tc_node->traffic_class;
- qset.vport_id = vsi->vsi_idx;
- ret = ice_add_rdma_qset(cdev_info, &qset);
- if (ret) {
- ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
- return ret;
- }
-
- tc_node->l2_sched_node_id = qset.teid;
- vsi->qos[tc_node->user_pri].l2_sched_node_id = qset.teid;
-
- return 0;
-}
-
-/**
- * irdma_lan_unregister_qset - Unregister qset with LAN driver
- * @vsi: vsi structure
- * @tc_node: Traffic class node
- */
-static void irdma_lan_unregister_qset(struct irdma_sc_vsi *vsi,
- struct irdma_ws_node *tc_node)
+static int ig3rdma_vport_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
{
- struct irdma_device *iwdev = vsi->back_vsi;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_qset_params qset = {};
-
- cdev_info = iwdev->rf->cdev;
- qset.qs_handle = tc_node->qs_handle;
- qset.tc = tc_node->traffic_class;
- qset.vport_id = vsi->vsi_idx;
- qset.teid = tc_node->l2_sched_node_id;
-
- if (ice_del_rdma_qset(cdev_info, &qset))
- ibdev_dbg(&iwdev->ibdev, "WS: LAN free_res for rdma qset failed.\n");
-}
-
-static int irdma_init_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
-{
- int i;
-
- rf->msix_count = num_online_cpus() + IRDMA_NUM_AEQ_MSIX;
- rf->msix_entries = kcalloc(rf->msix_count, sizeof(*rf->msix_entries),
- GFP_KERNEL);
- if (!rf->msix_entries)
- return -ENOMEM;
-
- for (i = 0; i < rf->msix_count; i++)
- if (ice_alloc_rdma_qvector(cdev, &rf->msix_entries[i]))
- break;
-
- if (i < IRDMA_MIN_MSIX) {
- while (--i >= 0)
- ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
+ struct iidc_rdma_vport_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
+ struct auxiliary_device *aux_core_dev = idc_adev->vdev_info->core_adev;
+ struct irdma_pci_f *rf = auxiliary_get_drvdata(aux_core_dev);
+ struct irdma_l2params l2params = {};
+ struct irdma_device *iwdev;
+ int err;
- kfree(rf->msix_entries);
+ if (!rf) {
+ WARN_ON_ONCE(1);
return -ENOMEM;
}
-
- rf->msix_count = i;
-
- return 0;
-}
-
-static void irdma_deinit_interrupts(struct irdma_pci_f *rf, struct iidc_rdma_core_dev_info *cdev)
-{
- int i;
-
- for (i = 0; i < rf->msix_count; i++)
- ice_free_rdma_qvector(cdev, &rf->msix_entries[i]);
-
- kfree(rf->msix_entries);
-}
-
-static void irdma_remove(struct auxiliary_device *aux_dev)
-{
- struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
- struct iidc_rdma_core_auxiliary_dev *iidc_adev;
- struct iidc_rdma_core_dev_info *cdev_info;
-
- iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
- cdev_info = iidc_adev->cdev_info;
-
- ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false);
- irdma_ib_unregister_device(iwdev);
- irdma_deinit_interrupts(iwdev->rf, cdev_info);
-
- kfree(iwdev->rf);
-
- pr_debug("INIT: Gen2 PF[%d] device remove success\n", PCI_FUNC(cdev_info->pdev->devfn));
-}
-
-static void irdma_fill_device_info(struct irdma_device *iwdev,
- struct iidc_rdma_core_dev_info *cdev_info)
-{
- struct iidc_rdma_priv_dev_info *iidc_priv = cdev_info->iidc_priv;
- struct irdma_pci_f *rf = iwdev->rf;
-
- rf->sc_dev.hw = &rf->hw;
- rf->iwdev = iwdev;
- rf->cdev = cdev_info;
- rf->hw.hw_addr = iidc_priv->hw_addr;
- rf->pcidev = cdev_info->pdev;
- rf->hw.device = &rf->pcidev->dev;
- rf->pf_id = iidc_priv->pf_id;
- rf->gen_ops.register_qset = irdma_lan_register_qset;
- rf->gen_ops.unregister_qset = irdma_lan_unregister_qset;
-
- rf->default_vsi.vsi_idx = iidc_priv->vport_id;
- rf->protocol_used =
- cdev_info->rdma_protocol == IIDC_RDMA_PROTOCOL_ROCEV2 ?
- IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY;
- rf->rdma_ver = IRDMA_GEN_2;
- rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
- rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
- rf->gen_ops.request_reset = irdma_request_reset;
- rf->limits_sel = 7;
- rf->iwdev = iwdev;
-
- mutex_init(&iwdev->ah_tbl_lock);
-
- iwdev->netdev = iidc_priv->netdev;
- iwdev->vsi_num = iidc_priv->vport_id;
+ iwdev = ib_alloc_device(irdma_device, ibdev);
+ /* Fill iwdev info */
+ iwdev->is_vport = true;
+ iwdev->rf = rf;
+ iwdev->vport_id = idc_adev->vdev_info->vport_id;
+ iwdev->netdev = idc_adev->vdev_info->netdev;
iwdev->init_state = INITIAL_STATE;
iwdev->roce_cwnd = IRDMA_ROCE_CWND_DEFAULT;
iwdev->roce_ackcreds = IRDMA_ROCE_ACKCREDS_DEFAULT;
iwdev->rcv_wnd = IRDMA_CM_DEFAULT_RCV_WND_SCALED;
iwdev->rcv_wscale = IRDMA_CM_DEFAULT_RCV_WND_SCALE;
- if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY)
- iwdev->roce_mode = true;
-}
-
-static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_device_id *id)
-{
- struct iidc_rdma_core_auxiliary_dev *iidc_adev;
- struct iidc_rdma_core_dev_info *cdev_info;
- struct iidc_rdma_priv_dev_info *iidc_priv;
- struct irdma_l2params l2params = {};
- struct irdma_device *iwdev;
- struct irdma_pci_f *rf;
- int err;
-
- iidc_adev = container_of(aux_dev, struct iidc_rdma_core_auxiliary_dev, adev);
- cdev_info = iidc_adev->cdev_info;
- iidc_priv = cdev_info->iidc_priv;
-
- iwdev = ib_alloc_device(irdma_device, ibdev);
- if (!iwdev)
- return -ENOMEM;
- iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
- if (!iwdev->rf) {
- ib_dealloc_device(&iwdev->ibdev);
- return -ENOMEM;
- }
-
- irdma_fill_device_info(iwdev, cdev_info);
- rf = iwdev->rf;
-
- err = irdma_init_interrupts(rf, cdev_info);
- if (err)
- goto err_init_interrupts;
-
- err = irdma_ctrl_init_hw(rf);
- if (err)
- goto err_ctrl_init;
+ iwdev->roce_mode = true;
+ iwdev->push_mode = false;
l2params.mtu = iwdev->netdev->mtu;
- irdma_fill_qos_info(&l2params, &iidc_priv->qos_info);
- if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
- iwdev->dcb_vlan_mode = l2params.num_tc > 1 && !l2params.dscp_mode;
err = irdma_rt_init_hw(iwdev, &l2params);
if (err)
@@ -348,43 +103,57 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
if (err)
goto err_ibreg;
- ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, true);
-
- ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
auxiliary_set_drvdata(aux_dev, iwdev);
- return 0;
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: Gen[%d] vport[%d] probe success. dev_name = %s, core_dev_name = %s, netdev=%s\n",
+ rf->rdma_ver, idc_adev->vdev_info->vport_id,
+ dev_name(&aux_dev->dev),
+ dev_name(&idc_adev->vdev_info->core_adev->dev),
+ netdev_name(idc_adev->vdev_info->netdev));
+ return 0;
err_ibreg:
irdma_rt_deinit_hw(iwdev);
err_rt_init:
- irdma_ctrl_deinit_hw(rf);
-err_ctrl_init:
- irdma_deinit_interrupts(rf, cdev_info);
-err_init_interrupts:
- kfree(iwdev->rf);
ib_dealloc_device(&iwdev->ibdev);
return err;
}
-static const struct auxiliary_device_id irdma_auxiliary_id_table[] = {
- {.name = "ice.iwarp", },
- {.name = "ice.roce", },
+static void ig3rdma_vport_remove(struct auxiliary_device *aux_dev)
+{
+ struct iidc_rdma_vport_auxiliary_dev *idc_adev =
+ container_of(aux_dev, struct iidc_rdma_vport_auxiliary_dev, adev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
+
+ ibdev_dbg(&iwdev->ibdev,
+ "INIT: Gen[%d] dev_name = %s, core_dev_name = %s, netdev=%s\n",
+ iwdev->rf->rdma_ver, dev_name(&aux_dev->dev),
+ dev_name(&idc_adev->vdev_info->core_adev->dev),
+ netdev_name(idc_adev->vdev_info->netdev));
+
+ irdma_ib_unregister_device(iwdev);
+}
+
+static const struct auxiliary_device_id ig3rdma_vport_auxiliary_id_table[] = {
+ {.name = "idpf.8086.rdma.vdev", },
{},
};
-MODULE_DEVICE_TABLE(auxiliary, irdma_auxiliary_id_table);
+MODULE_DEVICE_TABLE(auxiliary, ig3rdma_vport_auxiliary_id_table);
-static struct iidc_rdma_core_auxiliary_drv irdma_auxiliary_drv = {
+static struct iidc_rdma_vport_auxiliary_drv ig3rdma_vport_auxiliary_drv = {
.adrv = {
- .id_table = irdma_auxiliary_id_table,
- .probe = irdma_probe,
- .remove = irdma_remove,
+ .name = "vdev",
+ .id_table = ig3rdma_vport_auxiliary_id_table,
+ .probe = ig3rdma_vport_probe,
+ .remove = ig3rdma_vport_remove,
},
- .event_handler = irdma_iidc_event_handler,
+ .event_handler = ig3rdma_idc_vport_event_handler,
};
+
static int __init irdma_init_module(void)
{
int ret;
@@ -396,14 +165,34 @@ static int __init irdma_init_module(void)
return ret;
}
- ret = auxiliary_driver_register(&irdma_auxiliary_drv.adrv);
+ ret = auxiliary_driver_register(&icrdma_core_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed icrdma(gen_2) auxiliary_driver_register() ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = auxiliary_driver_register(&ig3rdma_core_auxiliary_drv.adrv);
if (ret) {
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
- pr_err("Failed irdma auxiliary_driver_register() ret=%d\n",
+ pr_err("Failed ig3rdma(gen_3) core auxiliary_driver_register() ret=%d\n",
ret);
+
return ret;
}
+ ret = auxiliary_driver_register(&ig3rdma_vport_auxiliary_drv.adrv);
+ if (ret) {
+ auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ pr_err("Failed ig3rdma vport auxiliary_driver_register() ret=%d\n",
+ ret);
+
+ return ret;
+ }
irdma_register_notifiers();
return 0;
@@ -412,8 +201,10 @@ static int __init irdma_init_module(void)
static void __exit irdma_exit_module(void)
{
irdma_unregister_notifiers();
- auxiliary_driver_unregister(&irdma_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&icrdma_core_auxiliary_drv.adrv);
auxiliary_driver_unregister(&i40iw_auxiliary_drv);
+ auxiliary_driver_unregister(&ig3rdma_core_auxiliary_drv.adrv);
+ auxiliary_driver_unregister(&ig3rdma_vport_auxiliary_drv.adrv);
}
module_init(irdma_init_module);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 674acc952168..baab61e424a2 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -30,7 +30,6 @@
#endif
#include <linux/auxiliary_bus.h>
#include <linux/net/intel/iidc_rdma.h>
-#include <linux/net/intel/iidc_rdma_ice.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@@ -54,6 +53,8 @@
#include "puda.h"
extern struct auxiliary_driver i40iw_auxiliary_drv;
+extern struct iidc_rdma_core_auxiliary_drv icrdma_core_auxiliary_drv;
+extern struct iidc_rdma_core_auxiliary_drv ig3rdma_core_auxiliary_drv;
#define IRDMA_FW_VER_DEFAULT 2
#define IRDMA_HW_VER 2
@@ -65,7 +66,8 @@ extern struct auxiliary_driver i40iw_auxiliary_drv;
#define IRDMA_MACIP_ADD 1
#define IRDMA_MACIP_DELETE 2
-#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 1)
+#define IW_GEN_3_CCQ_SIZE (2 * IRDMA_CQP_SW_SQSIZE_2048 + 2)
+#define IW_CCQ_SIZE (IRDMA_CQP_SW_SQSIZE_2048 + 2)
#define IW_CEQ_SIZE 2048
#define IW_AEQ_SIZE 2048
@@ -127,12 +129,12 @@ enum init_completion_state {
HMC_OBJS_CREATED,
HW_RSRC_INITIALIZED,
CCQ_CREATED,
- CEQ0_CREATED, /* Last state of probe */
- ILQ_CREATED,
- IEQ_CREATED,
+ CEQ0_CREATED,
CEQS_CREATED,
PBLE_CHUNK_MEM,
AEQ_CREATED,
+ ILQ_CREATED,
+ IEQ_CREATED, /* Last state of probe */
IP_ADDR_REGISTERED, /* Last state of open */
};
@@ -167,6 +169,7 @@ struct irdma_cqp_request {
bool request_done; /* READ/WRITE_ONCE macros operate on it */
bool waiting:1;
bool dynamic:1;
+ bool pending:1;
};
struct irdma_cqp {
@@ -179,6 +182,7 @@ struct irdma_cqp {
struct irdma_dma_mem host_ctx;
u64 *scratch_array;
struct irdma_cqp_request *cqp_requests;
+ struct irdma_ooo_cqp_op *oop_op_array;
struct list_head cqp_avail_reqs;
struct list_head cqp_pending_reqs;
};
@@ -257,6 +261,7 @@ struct irdma_pci_f {
bool reset:1;
bool rsrc_created:1;
bool msix_shared:1;
+ bool hwqp1_rsvd:1;
u8 rsrc_profile;
u8 *hmc_info_mem;
u8 *mem_rsrc;
@@ -269,6 +274,8 @@ struct irdma_pci_f {
u32 max_mr;
u32 max_qp;
u32 max_cq;
+ u32 max_srq;
+ u32 next_srq;
u32 max_ah;
u32 next_ah;
u32 max_mcg;
@@ -282,6 +289,7 @@ struct irdma_pci_f {
u32 mr_stagmask;
u32 used_pds;
u32 used_cqs;
+ u32 used_srqs;
u32 used_mrs;
u32 used_qps;
u32 arp_table_size;
@@ -293,6 +301,7 @@ struct irdma_pci_f {
unsigned long *allocated_ws_nodes;
unsigned long *allocated_qps;
unsigned long *allocated_cqs;
+ unsigned long *allocated_srqs;
unsigned long *allocated_mrs;
unsigned long *allocated_pds;
unsigned long *allocated_mcgs;
@@ -327,10 +336,13 @@ struct irdma_pci_f {
wait_queue_head_t vchnl_waitq;
struct workqueue_struct *cqp_cmpl_wq;
struct work_struct cqp_cmpl_work;
+ struct workqueue_struct *vchnl_wq;
struct irdma_sc_vsi default_vsi;
void *back_fcn;
struct irdma_gen_ops gen_ops;
struct irdma_device *iwdev;
+ DECLARE_HASHTABLE(ah_hash_tbl, 8);
+ struct mutex ah_tbl_lock; /* protect AH hash table access */
};
struct irdma_device {
@@ -340,8 +352,6 @@ struct irdma_device {
struct workqueue_struct *cleanup_wq;
struct irdma_sc_vsi vsi;
struct irdma_cm_core cm_core;
- DECLARE_HASHTABLE(ah_hash_tbl, 8);
- struct mutex ah_tbl_lock; /* protect AH hash table access */
u32 roce_cwnd;
u32 roce_ackcreds;
u32 vendor_id;
@@ -350,12 +360,14 @@ struct irdma_device {
u32 rcv_wnd;
u16 mac_ip_table_idx;
u16 vsi_num;
+ u16 vport_id;
u8 rcv_wscale;
u8 iw_status;
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb_vlan_mode:1;
bool iw_ooo:1;
+ bool is_vport:1;
enum init_completion_state init_state;
wait_queue_head_t suspend_wq;
@@ -413,6 +425,11 @@ static inline struct irdma_pci_f *dev_to_rf(struct irdma_sc_dev *dev)
return container_of(dev, struct irdma_pci_f, sc_dev);
}
+static inline struct irdma_srq *to_iwsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct irdma_srq, ibsrq);
+}
+
/**
* irdma_alloc_resource - allocate a resource
* @iwdev: device pointer
@@ -508,7 +525,8 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
void irdma_cq_add_ref(struct ib_cq *ibcq);
void irdma_cq_rem_ref(struct ib_cq *ibcq);
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
-
+void irdma_srq_event(struct irdma_sc_srq *srq);
+void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
struct irdma_modify_qp_info *info, bool wait);
@@ -538,7 +556,7 @@ void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
u16 irdma_get_vlan_ipv4(u32 *addr);
void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac);
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
- int acc, u64 *iova_start);
+ int acc, u64 *iova_start, bool dma_mr);
int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
@@ -546,7 +564,6 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd,
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
-bool irdma_cq_empty(struct irdma_cq *iwcq);
int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event,
@@ -557,4 +574,5 @@ int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
void *ptr);
void irdma_add_ip(struct irdma_device *iwdev);
void cqp_compl_worker(struct work_struct *work);
+void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev);
#endif /* IRDMA_MAIN_H */
diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c
index 37ce35cb10e7..28dfad7f940c 100644
--- a/drivers/infiniband/hw/irdma/pble.c
+++ b/drivers/infiniband/hw/irdma/pble.c
@@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
{
- idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
+ idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
}
@@ -193,8 +193,15 @@ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
{
enum irdma_sd_entry_type sd_entry_type;
- sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
- IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ sd_entry_type = (!idx->rel_pd_idx &&
+ pages == IRDMA_HMC_PD_CNT_IN_SD) ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
+ else
+ sd_entry_type = (!idx->rel_pd_idx &&
+ pages == IRDMA_HMC_PD_CNT_IN_SD &&
+ dev->privileged) ?
+ IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
return sd_entry_type;
}
@@ -279,10 +286,11 @@ static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
sd_entry->u.pd_table.pd_page_addr.pa :
sd_entry->u.bp.addr.pa;
-
- if (!sd_entry->valid) {
- ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
- idx->sd_idx, sd_entry->entry_type, true);
+ if ((dev->privileged && !sd_entry->valid) ||
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+ sd_reg_val, idx->sd_idx,
+ sd_entry->entry_type, true);
if (ret_code)
goto error;
}
@@ -498,12 +506,14 @@ exit:
void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc)
{
- pble_rsrc->freedpbles += palloc->total_cnt;
-
if (palloc->level == PBLE_LEVEL_2)
free_lvl2(pble_rsrc, palloc);
else
irdma_prm_return_pbles(&pble_rsrc->pinfo,
&palloc->level1.chunkinfo);
+
+ mutex_lock(&pble_rsrc->pble_mutex_lock);
+ pble_rsrc->freedpbles += palloc->total_cnt;
pble_rsrc->stats_alloc_freed++;
+ mutex_unlock(&pble_rsrc->pble_mutex_lock);
}
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index c0c9441885d3..324cfbf21764 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -10,6 +10,7 @@
#define ALL_TC2PFC 0xff
#define CQP_COMPL_WAIT_TIME_MS 10
#define CQP_TIMEOUT_THRESHOLD 500
+#define CQP_DEF_CMPL_TIMEOUT_THRESHOLD 2500
/* init operations */
int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c
index 694e5a9ed15d..cee47ddbd1b5 100644
--- a/drivers/infiniband/hw/irdma/puda.c
+++ b/drivers/infiniband/hw/irdma/puda.c
@@ -685,7 +685,6 @@ static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc)
ukqp->rq_size = rsrc->rq_size;
IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
- IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db;
@@ -726,7 +725,6 @@ static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
struct irdma_sc_cqp *cqp;
u64 hdr;
struct irdma_ccq_cqe_info compl_info;
- int status = 0;
cqp = dev->cqp;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0);
@@ -756,16 +754,8 @@ static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq)
print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16,
8, wqe, IRDMA_CQP_WQE_SIZE * 8, false);
irdma_sc_cqp_post_sq(dev->cqp);
- status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
- &compl_info);
- if (!status) {
- struct irdma_sc_ceq *ceq = dev->ceq[0];
-
- if (ceq && ceq->reg_cq)
- status = irdma_sc_add_cq_ctx(ceq, cq);
- }
-
- return status;
+ return irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ,
+ &compl_info);
}
/**
@@ -897,23 +887,17 @@ void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
struct irdma_puda_buf *buf = NULL;
struct irdma_puda_buf *nextbuf = NULL;
struct irdma_virt_mem *vmem;
- struct irdma_sc_ceq *ceq;
- ceq = vsi->dev->ceq[0];
switch (type) {
case IRDMA_PUDA_RSRC_TYPE_ILQ:
rsrc = vsi->ilq;
vmem = &vsi->ilq_mem;
vsi->ilq = NULL;
- if (ceq && ceq->reg_cq)
- irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
break;
case IRDMA_PUDA_RSRC_TYPE_IEQ:
rsrc = vsi->ieq;
vmem = &vsi->ieq_mem;
vsi->ieq = NULL;
- if (ceq && ceq->reg_cq)
- irdma_sc_remove_cq_ctx(ceq, &rsrc->cq);
break;
default:
ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n",
diff --git a/drivers/infiniband/hw/irdma/puda.h b/drivers/infiniband/hw/irdma/puda.h
index 2fc638f2b143..d65041bee667 100644
--- a/drivers/infiniband/hw/irdma/puda.h
+++ b/drivers/infiniband/hw/irdma/puda.h
@@ -91,7 +91,7 @@ struct irdma_puda_rsrc_info {
u32 rq_size;
u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */
u16 buf_size;
- u8 stats_idx;
+ u16 stats_idx;
bool stats_idx_valid:1;
int abi_ver;
};
@@ -140,7 +140,7 @@ struct irdma_puda_rsrc {
u64 crc_err;
u64 pmode_count;
u64 partials_handled;
- u8 stats_idx;
+ u16 stats_idx;
bool check_crc:1;
bool stats_idx_valid:1;
};
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 527c6da2c1ac..cab4896640a1 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -8,6 +8,8 @@
#include "hmc.h"
#include "uda.h"
#include "ws.h"
+#include "virtchnl.h"
+
#define IRDMA_DEBUG_ERR "ERR"
#define IRDMA_DEBUG_INIT "INIT"
#define IRDMA_DEBUG_DEV "DEV"
@@ -95,12 +97,6 @@ enum irdma_term_mpa_errors {
MPA_REQ_RSP = 0x04,
};
-enum irdma_qp_event_type {
- IRDMA_QP_EVENT_CATASTROPHIC,
- IRDMA_QP_EVENT_ACCESS_ERR,
- IRDMA_QP_EVENT_REQ_ERR,
-};
-
enum irdma_hw_stats_index {
/* gen1 - 32-bit */
IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
@@ -154,12 +150,46 @@ enum irdma_hw_stats_index {
IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 44,
IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 45,
IRDMA_HW_STAT_INDEX_MAX_GEN_2 = 46,
+
+ /* gen3 */
+ IRDMA_HW_STAT_INDEX_RNR_SENT = 46,
+ IRDMA_HW_STAT_INDEX_RNR_RCVD = 47,
+ IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT = 48,
+ IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT = 49,
+ IRDMA_HW_STAT_INDEX_RDMARXATS = 50,
+ IRDMA_HW_STAT_INDEX_RDMATXATS = 51,
+ IRDMA_HW_STAT_INDEX_NAKSEQERR = 52,
+ IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED = 53,
+ IRDMA_HW_STAT_INDEX_RTO = 54,
+ IRDMA_HW_STAT_INDEX_RXOOOPKTS = 55,
+ IRDMA_HW_STAT_INDEX_ICRCERR = 56,
+
+ IRDMA_HW_STAT_INDEX_MAX_GEN_3 = 57,
};
enum irdma_feature_type {
IRDMA_FEATURE_FW_INFO = 0,
IRDMA_HW_VERSION_INFO = 1,
+ IRDMA_QP_MAX_INCR = 2,
+ IRDMA_CQ_MAX_INCR = 3,
+ IRDMA_CEQ_MAX_INCR = 4,
+ IRDMA_SD_MAX_INCR = 5,
+ IRDMA_MR_MAX_INCR = 6,
+ IRDMA_Q1_MAX_INCR = 7,
+ IRDMA_AH_MAX_INCR = 8,
+ IRDMA_SRQ_MAX_INCR = 9,
+ IRDMA_TIMER_MAX_INCR = 10,
+ IRDMA_XF_MAX_INCR = 11,
+ IRDMA_RRF_MAX_INCR = 12,
+ IRDMA_PBLE_MAX_INCR = 13,
+ IRDMA_OBJ_1 = 22,
+ IRDMA_OBJ_2 = 23,
+ IRDMA_ENDPT_TRK = 24,
+ IRDMA_FTN_INLINE_MAX = 25,
IRDMA_QSETS_MAX = 26,
+ IRDMA_ASO = 27,
+ IRDMA_FTN_FLAGS = 32,
+ IRDMA_FTN_NOP = 33,
IRDMA_MAX_FEATURES, /* Must be last entry */
};
@@ -206,6 +236,7 @@ enum irdma_syn_rst_handling {
enum irdma_queue_type {
IRDMA_QUEUE_TYPE_SQ_RQ = 0,
IRDMA_QUEUE_TYPE_CQP,
+ IRDMA_QUEUE_TYPE_SRQ,
};
struct irdma_sc_dev;
@@ -233,12 +264,22 @@ struct irdma_cqp_init_info {
__le64 *host_ctx;
u64 *scratch_array;
u32 sq_size;
+ struct irdma_ooo_cqp_op *ooo_op_array;
+ u32 pe_en_vf_cnt;
u16 hw_maj_ver;
u16 hw_min_ver;
u8 struct_ver;
u8 hmc_profile;
u8 ena_vf_count;
u8 ceqs_per_vf;
+ u8 ooisc_blksize;
+ u8 rrsp_blksize;
+ u8 q1_blksize;
+ u8 xmit_blksize;
+ u8 ts_override;
+ u8 ts_shift;
+ u8 en_fine_grained_timers;
+ u8 blksizes_valid;
bool en_datacenter_tcp:1;
bool disable_packed:1;
bool rocev2_rto_policy:1;
@@ -310,9 +351,21 @@ struct irdma_vsi_pestat {
spinlock_t lock; /* rdma stats lock */
};
+struct irdma_mmio_region {
+ u8 __iomem *addr;
+ resource_size_t len;
+ resource_size_t offset;
+};
+
struct irdma_hw {
- u8 __iomem *hw_addr;
- u8 __iomem *priv_hw_addr;
+ union {
+ u8 __iomem *hw_addr;
+ struct {
+ struct irdma_mmio_region rdma_reg; /* RDMA region */
+ struct irdma_mmio_region *io_regs; /* Non-RDMA MMIO regions */
+ u16 num_io_regions; /* Number of Non-RDMA MMIO regions */
+ };
+ };
struct device *device;
struct irdma_hmc_info hmc;
};
@@ -351,7 +404,21 @@ struct irdma_cqp_quanta {
__le64 elem[IRDMA_CQP_WQE_SIZE];
};
+struct irdma_ooo_cqp_op {
+ struct list_head list_entry;
+ u64 scratch;
+ u32 def_info;
+ u32 sw_def_info;
+ u32 wqe_idx;
+ bool deferred:1;
+};
+
struct irdma_sc_cqp {
+ spinlock_t ooo_list_lock; /* protects list of pending completions */
+ struct list_head ooo_avail;
+ struct list_head ooo_pnd;
+ u32 last_def_cmpl_ticket;
+ u32 sw_def_cmpl_ticket;
u32 size;
u64 sq_pa;
u64 host_ctx_pa;
@@ -367,8 +434,10 @@ struct irdma_sc_cqp {
u64 *scratch_array;
u64 requested_ops;
atomic64_t completed_ops;
+ struct irdma_ooo_cqp_op *ooo_op_array;
u32 cqp_id;
u32 sq_size;
+ u32 pe_en_vf_cnt;
u32 hw_sq_size;
u16 hw_maj_ver;
u16 hw_min_ver;
@@ -378,6 +447,14 @@ struct irdma_sc_cqp {
u8 ena_vf_count;
u8 timeout_count;
u8 ceqs_per_vf;
+ u8 ooisc_blksize;
+ u8 rrsp_blksize;
+ u8 q1_blksize;
+ u8 xmit_blksize;
+ u8 ts_override;
+ u8 ts_shift;
+ u8 en_fine_grained_timers;
+ u8 blksizes_valid;
bool en_datacenter_tcp:1;
bool disable_packed:1;
bool rocev2_rto_policy:1;
@@ -397,6 +474,8 @@ struct irdma_sc_aeq {
u32 msix_idx;
u8 polarity;
bool virtual_map:1;
+ bool pasid_valid:1;
+ u32 pasid;
};
struct irdma_sc_ceq {
@@ -412,13 +491,12 @@ struct irdma_sc_ceq {
u8 tph_val;
u32 first_pm_pbl_idx;
u8 polarity;
- struct irdma_sc_vsi *vsi;
- struct irdma_sc_cq **reg_cq;
- u32 reg_cq_size;
- spinlock_t req_cq_lock; /* protect access to reg_cq array */
+ u16 vsi_idx;
bool virtual_map:1;
bool tph_en:1;
bool itr_no_expire:1;
+ bool pasid_valid:1;
+ u32 pasid;
};
struct irdma_sc_cq {
@@ -426,6 +504,7 @@ struct irdma_sc_cq {
u64 cq_pa;
u64 shadow_area_pa;
struct irdma_sc_dev *dev;
+ u16 vsi_idx;
struct irdma_sc_vsi *vsi;
void *pbl_list;
void *back_cq;
@@ -477,8 +556,13 @@ struct irdma_sc_qp {
bool virtual_map:1;
bool flush_sq:1;
bool flush_rq:1;
+ bool err_sq_idx_valid:1;
+ bool err_rq_idx_valid:1;
+ u32 err_sq_idx;
+ u32 err_rq_idx;
bool sq_flush_code:1;
bool rq_flush_code:1;
+ u32 pkt_limit;
enum irdma_flush_opcode flush_code;
enum irdma_qp_event_type event_type;
u8 term_flags;
@@ -489,13 +573,13 @@ struct irdma_sc_qp {
struct irdma_stats_inst_info {
bool use_hmc_fcn_index;
u8 hmc_fn_id;
- u8 stats_idx;
+ u16 stats_idx;
};
struct irdma_up_info {
u8 map[8];
u8 cnp_up_override;
- u8 hmc_fcn_idx;
+ u16 hmc_fcn_idx;
bool use_vlan:1;
bool use_cnp_up_override:1;
};
@@ -518,6 +602,8 @@ struct irdma_ws_node_info {
struct irdma_hmc_fpm_misc {
u32 max_ceqs;
u32 max_sds;
+ u32 loc_mem_pages;
+ u8 ird;
u32 xf_block_size;
u32 q1_block_size;
u32 ht_multiplier;
@@ -526,6 +612,7 @@ struct irdma_hmc_fpm_misc {
u32 ooiscf_block_size;
};
+#define IRDMA_VCHNL_MAX_MSG_SIZE 512
#define IRDMA_LEAF_DEFAULT_REL_BW 64
#define IRDMA_PARENT_DEFAULT_REL_BW 1
@@ -601,19 +688,28 @@ struct irdma_sc_dev {
u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
struct irdma_hw_attrs hw_attrs;
struct irdma_hmc_info *hmc_info;
+ struct irdma_vchnl_rdma_caps vc_caps;
+ u8 vc_recv_buf[IRDMA_VCHNL_MAX_MSG_SIZE];
+ u16 vc_recv_len;
struct irdma_sc_cqp *cqp;
struct irdma_sc_aeq *aeq;
struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
struct irdma_sc_cq *ccq;
const struct irdma_irq_ops *irq_ops;
+ struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
struct irdma_hmc_fpm_misc hmc_fpm_misc;
struct irdma_ws_node *ws_tree_root;
struct mutex ws_mutex; /* ws tree mutex */
+ u32 vchnl_ver;
u16 num_vfs;
- u8 hmc_fn_id;
- u8 vf_id;
+ u16 hmc_fn_id;
+ u16 vf_id;
+ bool privileged:1;
bool vchnl_up:1;
bool ceq_valid:1;
+ bool is_pf:1;
+ u8 protocol_used;
+ struct mutex vchnl_mutex; /* mutex to synchronize RDMA virtual channel messages */
u8 pci_rev;
int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
@@ -632,6 +728,51 @@ struct irdma_modify_cq_info {
bool cq_resize:1;
};
+struct irdma_srq_init_info {
+ struct irdma_sc_pd *pd;
+ struct irdma_sc_vsi *vsi;
+ u64 srq_pa;
+ u64 shadow_area_pa;
+ u32 first_pm_pbl_idx;
+ u32 pasid;
+ u32 srq_size;
+ u16 srq_limit;
+ u8 pasid_valid;
+ u8 wqe_size;
+ u8 leaf_pbl_size;
+ u8 virtual_map;
+ u8 tph_en;
+ u8 arm_limit_event;
+ u8 tph_value;
+ u8 pbl_chunk_size;
+ struct irdma_srq_uk_init_info srq_uk_init_info;
+};
+
+struct irdma_sc_srq {
+ struct irdma_sc_dev *dev;
+ struct irdma_sc_vsi *vsi;
+ struct irdma_sc_pd *pd;
+ struct irdma_srq_uk srq_uk;
+ void *back_srq;
+ u64 srq_pa;
+ u64 shadow_area_pa;
+ u32 first_pm_pbl_idx;
+ u32 pasid;
+ u32 hw_srq_size;
+ u16 srq_limit;
+ u8 pasid_valid;
+ u8 leaf_pbl_size;
+ u8 virtual_map;
+ u8 tph_en;
+ u8 arm_limit_event;
+ u8 tph_val;
+};
+
+struct irdma_modify_srq_info {
+ u16 srq_limit;
+ u8 arm_limit_event;
+};
+
struct irdma_create_qp_info {
bool ord_valid:1;
bool tcp_ctx_valid:1;
@@ -671,7 +812,8 @@ struct irdma_ccq_cqe_info {
u16 maj_err_code;
u16 min_err_code;
u8 op_code;
- bool error;
+ bool error:1;
+ bool pending:1;
};
struct irdma_dcb_app_info {
@@ -720,7 +862,7 @@ struct irdma_vsi_init_info {
struct irdma_vsi_stats_info {
struct irdma_vsi_pestat *pestat;
- u8 fcn_id;
+ u16 fcn_id;
bool alloc_stats_inst;
};
@@ -731,7 +873,8 @@ struct irdma_device_init_info {
__le64 *fpm_commit_buf;
struct irdma_hw *hw;
void __iomem *bar0;
- u8 hmc_fn_id;
+ enum irdma_protocol_used protocol_used;
+ u16 hmc_fn_id;
};
struct irdma_ceq_init_info {
@@ -746,10 +889,8 @@ struct irdma_ceq_init_info {
bool itr_no_expire:1;
u8 pbl_chunk_size;
u8 tph_val;
+ u16 vsi_idx;
u32 first_pm_pbl_idx;
- struct irdma_sc_vsi *vsi;
- struct irdma_sc_cq **reg_cq;
- u32 reg_cq_idx;
};
struct irdma_aeq_init_info {
@@ -807,6 +948,8 @@ struct irdma_udp_offload_info {
u32 cwnd;
u8 rexmit_thresh;
u8 rnr_nak_thresh;
+ u8 rnr_nak_tmr;
+ u8 min_rnr_timer;
};
struct irdma_roce_offload_info {
@@ -833,6 +976,7 @@ struct irdma_roce_offload_info {
bool dctcp_en:1;
bool fw_cc_enable:1;
bool use_stats_inst:1;
+ u8 local_ack_timeout;
u16 t_high;
u16 t_low;
u8 last_byte_sent;
@@ -933,8 +1077,10 @@ struct irdma_qp_host_ctx_info {
};
u32 send_cq_num;
u32 rcv_cq_num;
+ u32 srq_id;
u32 rem_endpoint_idx;
- u8 stats_idx;
+ u16 stats_idx;
+ bool remote_atomics_en:1;
bool srq_valid:1;
bool tcp_info_valid:1;
bool iwarp_info_valid:1;
@@ -945,6 +1091,7 @@ struct irdma_qp_host_ctx_info {
struct irdma_aeqe_info {
u64 compl_ctx;
u32 qp_cq_id;
+ u32 def_info; /* only valid for DEF_CMPL */
u16 ae_id;
u16 wqe_idx;
u8 tcp_state;
@@ -953,9 +1100,11 @@ struct irdma_aeqe_info {
bool cq:1;
bool sq:1;
bool rq:1;
+ bool srq:1;
bool in_rdrsp_wr:1;
bool out_rdrsp:1;
bool aeqe_overflow:1;
+ bool err_rq_idx_valid:1;
u8 q2_data_written;
u8 ae_src;
};
@@ -972,7 +1121,8 @@ struct irdma_allocate_stag_info {
bool use_hmc_fcn_index:1;
bool use_pf_rid:1;
bool all_memory:1;
- u8 hmc_fcn_index;
+ bool remote_atomics_en:1;
+ u16 hmc_fcn_index;
};
struct irdma_mw_alloc_info {
@@ -1000,6 +1150,7 @@ struct irdma_reg_ns_stag_info {
u8 hmc_fcn_index;
bool use_pf_rid:1;
bool all_memory:1;
+ bool remote_atomics_en:1;
};
struct irdma_fast_reg_stag_info {
@@ -1023,6 +1174,7 @@ struct irdma_fast_reg_stag_info {
u8 hmc_fcn_index;
bool use_pf_rid:1;
bool defer_flag:1;
+ bool remote_atomics_en:1;
};
struct irdma_dealloc_stag_info {
@@ -1130,6 +1282,8 @@ struct irdma_cqp_manage_push_page_info {
};
struct irdma_qp_flush_info {
+ u32 err_sq_idx;
+ u32 err_rq_idx;
u16 sq_minor_code;
u16 sq_major_code;
u16 rq_minor_code;
@@ -1140,6 +1294,8 @@ struct irdma_qp_flush_info {
bool rq:1;
bool userflushcode:1;
bool generate_ae:1;
+ bool err_sq_idx_valid:1;
+ bool err_rq_idx_valid:1;
};
struct irdma_gen_ae_info {
@@ -1189,6 +1345,11 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
struct irdma_sc_dev *dev);
+void irdma_sc_cqp_def_cmpl_ae_handler(struct irdma_sc_dev *dev,
+ struct irdma_aeqe_info *info,
+ bool first, u64 *scratch,
+ u32 *sw_def_info);
+u64 irdma_sc_cqp_cleanup_handler(struct irdma_sc_dev *dev);
int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
@@ -1224,6 +1385,8 @@ void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *inf
int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
u8 hmc_fn_id, bool post_sq,
bool poll_registers);
+int irdma_sc_srq_init(struct irdma_sc_srq *srq,
+ struct irdma_srq_init_info *info);
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
struct cqp_info {
@@ -1467,6 +1630,23 @@ struct cqp_info {
struct irdma_dma_mem query_buff_mem;
u64 scratch;
} query_rdma;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ u64 scratch;
+ } srq_create;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ struct irdma_modify_srq_info info;
+ u64 scratch;
+ } srq_modify;
+
+ struct {
+ struct irdma_sc_srq *srq;
+ u64 scratch;
+ } srq_destroy;
+
} u;
};
diff --git a/drivers/infiniband/hw/irdma/uda_d.h b/drivers/infiniband/hw/irdma/uda_d.h
index 5a9e6eabf032..4fb4daa20722 100644
--- a/drivers/infiniband/hw/irdma/uda_d.h
+++ b/drivers/infiniband/hw/irdma/uda_d.h
@@ -78,8 +78,7 @@
#define IRDMA_UDAQPC_IPID GENMASK_ULL(47, 32)
#define IRDMA_UDAQPC_SNDMSS GENMASK_ULL(29, 16)
#define IRDMA_UDAQPC_VLANTAG GENMASK_ULL(15, 0)
-
-#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(21, 20)
+#define IRDMA_UDA_CQPSQ_MAV_PDINDEXHI GENMASK_ULL(27, 20)
#define IRDMA_UDA_CQPSQ_MAV_PDINDEXLO GENMASK_ULL(63, 48)
#define IRDMA_UDA_CQPSQ_MAV_SRCMACADDRINDEX GENMASK_ULL(29, 24)
#define IRDMA_UDA_CQPSQ_MAV_ARPINDEX GENMASK_ULL(63, 48)
@@ -94,7 +93,7 @@
#define IRDMA_UDA_CQPSQ_MAV_OPCODE GENMASK_ULL(37, 32)
#define IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK BIT_ULL(62)
#define IRDMA_UDA_CQPSQ_MAV_IPV4VALID BIT_ULL(59)
-#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(16, 0)
+#define IRDMA_UDA_CQPSQ_MAV_AVIDX GENMASK_ULL(23, 0)
#define IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG BIT_ULL(60)
#define IRDMA_UDA_MGCTX_VFFLAG BIT_ULL(29)
#define IRDMA_UDA_MGCTX_DESTPORT GENMASK_ULL(47, 32)
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 38c54e59cc2e..f0846b800913 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -114,33 +114,8 @@ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
*/
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp)
{
- u64 temp;
- u32 hw_sq_tail;
- u32 sw_sq_head;
-
- /* valid bit is written and loads completed before reading shadow */
- mb();
-
- /* read the doorbell shadow area */
- get_64bit_val(qp->shadow_area, 0, &temp);
-
- hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp);
- sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring);
- if (sw_sq_head != qp->initial_ring.head) {
- if (sw_sq_head != hw_sq_tail) {
- if (sw_sq_head > qp->initial_ring.head) {
- if (hw_sq_tail >= qp->initial_ring.head &&
- hw_sq_tail < sw_sq_head)
- writel(qp->qp_id, qp->wqe_alloc_db);
- } else {
- if (hw_sq_tail >= qp->initial_ring.head ||
- hw_sq_tail < sw_sq_head)
- writel(qp->qp_id, qp->wqe_alloc_db);
- }
- }
- }
-
- qp->initial_ring.head = qp->sq_ring.head;
+ dma_wmb();
+ writel(qp->qp_id, qp->wqe_alloc_db);
}
/**
@@ -194,6 +169,27 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id;
qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size;
qp->sq_wrtrk_array[*wqe_idx].quanta = quanta;
+ qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled;
+
+ return wqe;
+}
+
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx)
+{
+ int ret_code;
+ __le64 *wqe;
+
+ if (IRDMA_RING_FULL_ERR(srq->srq_ring))
+ return NULL;
+
+ IRDMA_ATOMIC_RING_MOVE_HEAD(srq->srq_ring, *wqe_idx, ret_code);
+ if (ret_code)
+ return NULL;
+
+ if (!*wqe_idx)
+ srq->srwqe_polarity = !srq->srwqe_polarity;
+ /* rq_wqe_size_multiplier is no of 32 byte quanta in one rq wqe */
+ wqe = srq->srq_base[*wqe_idx * (srq->wqe_size_multiplier)].elem;
return wqe;
}
@@ -318,6 +314,160 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
}
/**
+ * irdma_uk_atomic_fetch_add - atomic fetch and add operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_fetch_add *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_fetch_add;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_FETCH_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->fetch_add_data_bytes);
+ set_64bit_val(wqe, 40, 0);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_atomic_compare_swap - atomic compare and swap operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq)
+{
+ struct irdma_atomic_compare_swap *op_info;
+ u32 total_size = 0;
+ u16 quanta = 2;
+ u32 wqe_idx;
+ __le64 *wqe;
+ u64 hdr;
+
+ op_info = &info->op.atomic_compare_swap;
+ wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, quanta, total_size,
+ info);
+ if (!wqe)
+ return -ENOMEM;
+
+ set_64bit_val(wqe, 0, op_info->tagged_offset);
+ set_64bit_val(wqe, 8,
+ FIELD_PREP(IRDMAQPSQ_STAG, op_info->stag));
+ set_64bit_val(wqe, 16, op_info->remote_tagged_offset);
+
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, 1) |
+ FIELD_PREP(IRDMAQPSQ_REMOTE_STAG, op_info->remote_stag) |
+ FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_ATOMIC_COMPARE_SWAP_ADD) |
+ FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, info->local_fence) |
+ FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
+
+ set_64bit_val(wqe, 32, op_info->swap_data_bytes);
+ set_64bit_val(wqe, 40, op_info->compare_data_bytes);
+ set_64bit_val(wqe, 48, 0);
+ set_64bit_val(wqe, 56,
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity));
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ if (post_sq)
+ irdma_uk_qp_post_wr(qp);
+
+ return 0;
+}
+
+/**
+ * irdma_uk_srq_post_receive - post a receive wqe to a shared rq
+ * @srq: shared rq ptr
+ * @info: post rq information
+ */
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info)
+{
+ u32 wqe_idx, i, byte_off;
+ u32 addl_frag_cnt;
+ __le64 *wqe;
+ u64 hdr;
+
+ if (srq->max_srq_frag_cnt < info->num_sges)
+ return -EINVAL;
+
+ wqe = irdma_srq_get_next_recv_wqe(srq, &wqe_idx);
+ if (!wqe)
+ return -ENOMEM;
+
+ addl_frag_cnt = info->num_sges > 1 ? info->num_sges - 1 : 0;
+ srq->wqe_ops.iw_set_fragment(wqe, 0, info->sg_list,
+ srq->srwqe_polarity);
+
+ for (i = 1, byte_off = 32; i < info->num_sges; i++) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, &info->sg_list[i],
+ srq->srwqe_polarity);
+ byte_off += 16;
+ }
+
+ /* if not an odd number set valid bit in next fragment */
+ if (srq->uk_attrs->hw_rev >= IRDMA_GEN_2 && !(info->num_sges & 0x01) &&
+ info->num_sges) {
+ srq->wqe_ops.iw_set_fragment(wqe, byte_off, NULL,
+ srq->srwqe_polarity);
+ if (srq->uk_attrs->hw_rev == IRDMA_GEN_2)
+ ++addl_frag_cnt;
+ }
+
+ set_64bit_val(wqe, 16, (u64)info->wr_id);
+ hdr = FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
+ FIELD_PREP(IRDMAQPSQ_VALID, srq->srwqe_polarity);
+
+ dma_wmb(); /* make sure WQE is populated before valid bit is set */
+
+ set_64bit_val(wqe, 24, hdr);
+
+ set_64bit_val(srq->shadow_area, 0, (wqe_idx + 1) % srq->srq_ring.size);
+
+ return 0;
+}
+
+/**
* irdma_uk_rdma_read - rdma read command
* @qp: hw qp ptr
* @info: post sq information
@@ -963,6 +1113,27 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
}
/**
+ * irdma_uk_cq_empty - Check if CQ is empty
+ * @cq: hw cq
+ */
+bool irdma_uk_cq_empty(struct irdma_cq_uk *cq)
+{
+ __le64 *cqe;
+ u8 polarity;
+ u64 qword3;
+
+ if (cq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != cq->polarity;
+}
+
+/**
* irdma_uk_cq_poll_cmpl - get cq completion info
* @cq: hw cq
* @info: cq poll information returned
@@ -973,6 +1144,9 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
u64 comp_ctx, qword0, qword2, qword3;
__le64 *cqe;
struct irdma_qp_uk *qp;
+ struct irdma_srq_uk *srq;
+ struct qp_err_code qp_err;
+ u8 is_srq;
struct irdma_ring *pring = NULL;
u32 wqe_idx;
int ret_code;
@@ -1046,21 +1220,46 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
}
info->q_type = (u8)FIELD_GET(IRDMA_CQ_SQ, qword3);
+ is_srq = (u8)FIELD_GET(IRDMA_CQ_SRQ, qword3);
info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, qword3);
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
+ get_64bit_val(cqe, 8, &comp_ctx);
+ if (is_srq)
+ get_64bit_val(cqe, 40, (u64 *)&qp);
+ else
+ qp = (struct irdma_qp_uk *)(unsigned long)comp_ctx;
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
- if (info->major_err == IRDMA_FLUSH_MAJOR_ERR) {
- info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ switch (info->major_err) {
+ case IRDMA_SRQFLUSH_RSVD_MAJOR_ERR:
+ qp_err = irdma_ae_to_qp_err_code(info->minor_err);
+ info->minor_err = qp_err.flush_code;
+ fallthrough;
+ case IRDMA_FLUSH_MAJOR_ERR:
/* Set the min error to standard flush error code for remaining cqes */
if (info->minor_err != FLUSH_GENERAL_ERR) {
qword3 &= ~IRDMA_CQ_MINERR;
qword3 |= FIELD_PREP(IRDMA_CQ_MINERR, FLUSH_GENERAL_ERR);
set_64bit_val(cqe, 24, qword3);
}
- } else {
- info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ info->comp_status = IRDMA_COMPL_STATUS_FLUSHED;
+ break;
+ default:
+#define IRDMA_CIE_SIGNATURE 0xE
+#define IRDMA_CQMAJERR_HIGH_NIBBLE GENMASK(15, 12)
+ if (info->q_type == IRDMA_CQE_QTYPE_SQ &&
+ qp->qp_type == IRDMA_QP_TYPE_ROCE_UD &&
+ FIELD_GET(IRDMA_CQMAJERR_HIGH_NIBBLE, info->major_err)
+ == IRDMA_CIE_SIGNATURE) {
+ info->error = 0;
+ info->major_err = 0;
+ info->minor_err = 0;
+ info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
+ } else {
+ info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
+ }
+ break;
}
} else {
info->comp_status = IRDMA_COMPL_STATUS_SUCCESS;
@@ -1069,7 +1268,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
get_64bit_val(cqe, 0, &qword0);
get_64bit_val(cqe, 16, &qword2);
- info->tcp_seq_num_rtt = (u32)FIELD_GET(IRDMACQ_TCPSEQNUMRTT, qword0);
info->qp_id = (u32)FIELD_GET(IRDMACQ_QPID, qword2);
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
@@ -1085,7 +1283,27 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->qp_handle = (irdma_qp_handle)(unsigned long)qp;
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
- if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
+ if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) {
+ unsigned long flags;
+
+ srq = qp->srq_uk;
+
+ get_64bit_val(cqe, 8, &info->wr_id);
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG,
+ qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
+ spin_lock_irqsave(srq->lock, flags);
+ IRDMA_RING_MOVE_TAIL(srq->srq_ring);
+ spin_unlock_irqrestore(srq->lock, flags);
+ pring = &srq->srq_ring;
+
+ } else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) {
u32 array_idx;
array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
@@ -1139,6 +1357,10 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
if (!info->comp_status)
info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+ if (!qp->sq_wrtrk_array[wqe_idx].signaled) {
+ ret_code = -EFAULT;
+ goto exit;
+ }
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
IRDMA_RING_SET_TAIL(qp->sq_ring,
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
@@ -1180,9 +1402,15 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
ret_code = 0;
exit:
- if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED)
+ if (!ret_code && info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
if (pring && IRDMA_RING_MORE_WORK(*pring))
- move_cq_head = false;
+ /* Park CQ head during a flush to generate additional CQEs
+ * from SW for all unprocessed WQEs. For GEN3 and beyond
+ * FW will generate/flush these CQEs so move to the next CQE
+ */
+ move_cq_head = qp->uk_attrs->hw_rev <= IRDMA_GEN_2 ?
+ false : true;
+ }
if (move_cq_head) {
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
@@ -1198,8 +1426,9 @@ exit:
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
if (!cq->avoid_mem_cflct && ext_valid)
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
- set_64bit_val(cq->shadow_area, 0,
- IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ if (IRDMA_RING_CURRENT_HEAD(cq->cq_ring) & 0x3F || irdma_uk_cq_empty(cq))
+ set_64bit_val(cq->shadow_area, 0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
} else {
qword3 &= ~IRDMA_CQ_WQEIDX;
qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
@@ -1210,10 +1439,10 @@ exit:
}
/**
- * irdma_qp_round_up - return round up qp wq depth
+ * irdma_round_up_wq - return round up qp wq depth
* @wqdepth: wq depth in quanta to round up
*/
-static int irdma_qp_round_up(u32 wqdepth)
+static int irdma_round_up_wq(u32 wqdepth)
{
int scount = 1;
@@ -1268,7 +1497,7 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
{
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
- *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
+ *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
if (*sqdepth < min_size)
*sqdepth = min_size;
@@ -1290,7 +1519,7 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
{
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
- *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
+ *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
if (*rqdepth < min_size)
*rqdepth = min_size;
@@ -1300,6 +1529,26 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
return 0;
}
+/*
+ * irdma_get_srqdepth - get SRQ depth (quanta)
+ * @uk_attrs: qp HW attributes
+ * @srq_size: SRQ size
+ * @shift: shift which determines size of WQE
+ * @srqdepth: depth of SRQ
+ */
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth)
+{
+ *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
+
+ if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
+ *srqdepth = uk_attrs->min_hw_wq_size << shift;
+ else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct irdma_wqe_uk_ops iw_wqe_uk_ops = {
.iw_copy_inline_data = irdma_copy_inline_data,
.iw_inline_data_size_to_quanta = irdma_inline_data_size_to_quanta,
@@ -1332,7 +1581,42 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp,
qp->conn_wqes = move_cnt;
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
- IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
+}
+
+/**
+ * irdma_uk_srq_init - initialize shared qp
+ * @srq: hw srq (user and kernel)
+ * @info: srq initialization info
+ *
+ * Initializes the vars used in both user and kernel mode.
+ * The size of the wqe depends on number of max fragments
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for srq.
+ */
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info)
+{
+ u8 rqshift;
+
+ srq->uk_attrs = info->uk_attrs;
+ if (info->max_srq_frag_cnt > srq->uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ irdma_get_wqe_shift(srq->uk_attrs, info->max_srq_frag_cnt, 0, &rqshift);
+ srq->srq_caps = info->srq_caps;
+ srq->srq_base = info->srq;
+ srq->shadow_area = info->shadow_area;
+ srq->srq_id = info->srq_id;
+ srq->srwqe_polarity = 0;
+ srq->srq_size = info->srq_size;
+ srq->wqe_size = rqshift;
+ srq->max_srq_frag_cnt = min(srq->uk_attrs->max_hw_wq_frags,
+ ((u32)2 << rqshift) - 1);
+ IRDMA_RING_INIT(srq->srq_ring, srq->srq_size);
+ srq->wqe_size_multiplier = 1 << rqshift;
+ srq->wqe_ops = iw_wqe_uk_ops;
+
+ return 0;
}
/**
@@ -1441,7 +1725,6 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
sq_ring_size = qp->sq_size << info->sq_shift;
IRDMA_RING_INIT(qp->sq_ring, sq_ring_size);
- IRDMA_RING_INIT(qp->initial_ring, sq_ring_size);
if (info->first_sq_wq) {
irdma_setup_connection_wqes(qp, info);
qp->swqe_polarity = 1;
@@ -1461,6 +1744,7 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info)
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
qp->wqe_ops = iw_wqe_uk_ops;
+ qp->srq_uk = info->srq_uk;
return ret_code;
}
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index 380e4a47aede..9eb7fd0b1cbf 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -41,10 +41,114 @@
#define IRDMA_OP_TYPE_INV_STAG 0x0a
#define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b
#define IRDMA_OP_TYPE_NOP 0x0c
+#define IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD 0x0f
+#define IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP 0x11
#define IRDMA_OP_TYPE_REC 0x3e
#define IRDMA_OP_TYPE_REC_IMM 0x3f
-#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_FLUSH_MAJOR_ERR 1
+#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
+
+/* Async Events codes */
+#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
+#define IRDMA_AE_AMP_INVALID_STAG 0x0103
+#define IRDMA_AE_AMP_BAD_QP 0x0104
+#define IRDMA_AE_AMP_BAD_PD 0x0105
+#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
+#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
+#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
+#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
+#define IRDMA_AE_AMP_TO_WRAP 0x010a
+#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
+#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
+#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
+#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
+#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
+#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
+#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
+#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
+#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
+#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
+#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
+#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
+#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
+#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
+#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
+#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
+#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
+#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
+#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
+#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
+#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
+#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
+#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
+#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
+#define IRDMA_AE_BAD_CLOSE 0x0201
+#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
+#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
+#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
+#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
+#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
+#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
+#define IRDMA_AE_SRQ_LIMIT 0x0209
+#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
+#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
+#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
+#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
+#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
+#define IRDMA_AE_SRQ_CATASTROPHIC_ERROR 0x020f
+#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
+#define IRDMA_AE_ATOMIC_ALIGNMENT 0x0221
+#define IRDMA_AE_ATOMIC_MASK 0x0222
+#define IRDMA_AE_INVALID_REQUEST 0x0223
+#define IRDMA_AE_PCIE_ATOMIC_DISABLE 0x0224
+#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
+#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
+#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
+#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
+#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
+#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
+#define IRDMA_AE_DDP_NO_L_BIT 0x0308
+#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
+#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
+#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
+#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
+#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
+#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
+#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
+#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
+#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
+#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
+#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
+#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
+#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
+#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
+#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
+#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
+#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
+#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
+#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
+#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
+#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
+#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
+#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
+#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
+#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
+#define IRDMA_AE_RESET_SENT 0x0601
+#define IRDMA_AE_TERMINATE_SENT 0x0602
+#define IRDMA_AE_RESET_NOT_SENT 0x0603
+#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
+#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
+#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
+#define IRDMA_AE_REMOTE_QP_CATASTROPHIC 0x0703
+#define IRDMA_AE_LOCAL_QP_CATASTROPHIC 0x0704
+#define IRDMA_AE_RCE_QP_CATASTROPHIC 0x0705
+#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
+#define IRDMA_AE_CQP_DEFERRED_COMPLETE 0x0901
+#define IRDMA_AE_ADAPTER_CATASTROPHIC 0x0B0B
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
@@ -55,11 +159,12 @@ enum irdma_device_caps_const {
IRDMA_CEQE_SIZE = 1,
IRDMA_CQP_CTX_SIZE = 8,
IRDMA_SHADOW_AREA_SIZE = 8,
- IRDMA_QUERY_FPM_BUF_SIZE = 176,
- IRDMA_COMMIT_FPM_BUF_SIZE = 176,
+ IRDMA_QUERY_FPM_BUF_SIZE = 192,
+ IRDMA_COMMIT_FPM_BUF_SIZE = 192,
IRDMA_GATHER_STATS_BUF_SIZE = 1024,
IRDMA_MIN_IW_QP_ID = 0,
IRDMA_MAX_IW_QP_ID = 262143,
+ IRDMA_MIN_IW_SRQ_ID = 0,
IRDMA_MIN_CEQID = 0,
IRDMA_MAX_CEQID = 1023,
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
@@ -67,6 +172,7 @@ enum irdma_device_caps_const {
IRDMA_MAX_CQID = 524287,
IRDMA_MIN_AEQ_ENTRIES = 1,
IRDMA_MAX_AEQ_ENTRIES = 524287,
+ IRDMA_MAX_AEQ_ENTRIES_GEN_3 = 262144,
IRDMA_MIN_CEQ_ENTRIES = 1,
IRDMA_MAX_CEQ_ENTRIES = 262143,
IRDMA_MIN_CQ_SIZE = 1,
@@ -105,6 +211,13 @@ enum irdma_flush_opcode {
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
+ FLUSH_RNR_RETRY_EXC_ERR,
+};
+
+enum irdma_qp_event_type {
+ IRDMA_QP_EVENT_CATASTROPHIC,
+ IRDMA_QP_EVENT_ACCESS_ERR,
+ IRDMA_QP_EVENT_REQ_ERR,
};
enum irdma_cmpl_status {
@@ -147,6 +260,8 @@ enum irdma_qp_caps {
IRDMA_PUSH_MODE = 8,
};
+struct irdma_srq_uk;
+struct irdma_srq_uk_init_info;
struct irdma_qp_uk;
struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
@@ -201,6 +316,24 @@ struct irdma_bind_window {
bool ena_writes:1;
irdma_stag mw_stag;
bool mem_window_type_1:1;
+ bool remote_atomics_en:1;
+};
+
+struct irdma_atomic_fetch_add {
+ u64 tagged_offset;
+ u64 remote_tagged_offset;
+ u64 fetch_add_data_bytes;
+ u32 stag;
+ u32 remote_stag;
+};
+
+struct irdma_atomic_compare_swap {
+ u64 tagged_offset;
+ u64 remote_tagged_offset;
+ u64 swap_data_bytes;
+ u64 compare_data_bytes;
+ u32 stag;
+ u32 remote_stag;
};
struct irdma_inv_local_stag {
@@ -219,6 +352,7 @@ struct irdma_post_sq_info {
bool report_rtt:1;
bool udp_hdr:1;
bool defer_flag:1;
+ bool remote_atomic_en:1;
u32 imm_data;
u32 stag_to_inv;
union {
@@ -227,6 +361,8 @@ struct irdma_post_sq_info {
struct irdma_rdma_read rdma_read;
struct irdma_bind_window bind_window;
struct irdma_inv_local_stag inv_local_stag;
+ struct irdma_atomic_fetch_add atomic_fetch_add;
+ struct irdma_atomic_compare_swap atomic_compare_swap;
} op;
};
@@ -255,6 +391,15 @@ struct irdma_cq_poll_info {
bool imm_valid:1;
};
+struct qp_err_code {
+ enum irdma_flush_opcode flush_code;
+ enum irdma_qp_event_type event_type;
+};
+
+int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
+int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp,
+ struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
@@ -284,6 +429,7 @@ struct irdma_wqe_uk_ops {
struct irdma_bind_window *op_info);
};
+bool irdma_uk_cq_empty(struct irdma_cq_uk *cq);
int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
@@ -300,11 +446,45 @@ int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
u32 *sq_depth, u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
u32 *rq_depth, u8 *rq_shift);
+int irdma_uk_srq_init(struct irdma_srq_uk *srq,
+ struct irdma_srq_uk_init_info *info);
+int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq,
+ struct irdma_post_rq_info *info);
+
+struct irdma_srq_uk {
+ u32 srq_caps;
+ struct irdma_qp_quanta *srq_base;
+ struct irdma_uk_attrs *uk_attrs;
+ __le64 *shadow_area;
+ struct irdma_ring srq_ring;
+ u32 srq_id;
+ u32 srq_size;
+ u32 max_srq_frag_cnt;
+ struct irdma_wqe_uk_ops wqe_ops;
+ u8 srwqe_polarity;
+ u8 wqe_size;
+ u8 wqe_size_multiplier;
+ u8 deferred_flag;
+ spinlock_t *lock;
+};
+
+struct irdma_srq_uk_init_info {
+ struct irdma_qp_quanta *srq;
+ struct irdma_uk_attrs *uk_attrs;
+ __le64 *shadow_area;
+ u64 *srq_wrid_array;
+ u32 srq_id;
+ u32 srq_caps;
+ u32 srq_size;
+ u32 max_srq_frag_cnt;
+};
+
struct irdma_sq_uk_wr_trk_info {
u64 wrid;
u32 wr_len;
u16 quanta;
- u8 reserved[2];
+ u8 signaled;
+ u8 reserved[1];
};
struct irdma_qp_quanta {
@@ -344,6 +524,7 @@ struct irdma_qp_uk {
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
u8 dbg_rq_flushed;
+ struct irdma_srq_uk *srq_uk;
u8 sq_flush_seen;
u8 rq_flush_seen;
};
@@ -383,6 +564,7 @@ struct irdma_qp_uk_init_info {
u8 rq_shift;
int abi_ver;
bool legacy_mode;
+ struct irdma_srq_uk *srq_uk;
};
struct irdma_cq_uk_init_info {
@@ -398,6 +580,7 @@ struct irdma_cq_uk_init_info {
__le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
u16 quanta, u32 total_size,
struct irdma_post_sq_info *info);
+__le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
@@ -409,5 +592,85 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *wqdepth);
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *wqdepth);
+int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
+ u32 *srqdepth);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
+
+static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
+{
+ struct qp_err_code qp_err = {};
+
+ switch (ae_id) {
+ case IRDMA_AE_AMP_BOUNDS_VIOLATION:
+ case IRDMA_AE_AMP_INVALID_STAG:
+ case IRDMA_AE_AMP_RIGHTS_VIOLATION:
+ case IRDMA_AE_AMP_UNALLOCATED_STAG:
+ case IRDMA_AE_AMP_BAD_PD:
+ case IRDMA_AE_AMP_BAD_QP:
+ case IRDMA_AE_AMP_BAD_STAG_KEY:
+ case IRDMA_AE_AMP_BAD_STAG_INDEX:
+ case IRDMA_AE_AMP_TO_WRAP:
+ case IRDMA_AE_PRIV_OPERATION_DENIED:
+ qp_err.flush_code = FLUSH_PROT_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_UDA_XMIT_BAD_PD:
+ case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
+ qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
+ case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
+ case IRDMA_AE_UDA_L4LEN_INVALID:
+ case IRDMA_AE_DDP_UBE_INVALID_MO:
+ case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+ qp_err.flush_code = FLUSH_LOC_LEN_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+ case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
+ qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ case IRDMA_AE_AMP_MWBIND_VALID_STAG:
+ qp_err.flush_code = FLUSH_MW_BIND_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_IB_INVALID_REQUEST:
+ qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
+ break;
+ case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
+ case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+ case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_IB_REMOTE_OP_ERROR:
+ qp_err.flush_code = FLUSH_REM_OP_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ qp_err.flush_code = FLUSH_RNR_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ case IRDMA_AE_LCE_QP_CATASTROPHIC:
+ case IRDMA_AE_REMOTE_QP_CATASTROPHIC:
+ case IRDMA_AE_LOCAL_QP_CATASTROPHIC:
+ case IRDMA_AE_RCE_QP_CATASTROPHIC:
+ qp_err.flush_code = FLUSH_FATAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ default:
+ qp_err.flush_code = FLUSH_GENERAL_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
+ }
+
+ return qp_err;
+}
#endif /* IRDMA_USER_H */
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index b510ef747399..cc2a12f735d3 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -452,6 +452,7 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp,
cqp_request->waiting = wait;
refcount_set(&cqp_request->refcnt, 1);
memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info));
+ memset(&cqp_request->info, 0, sizeof(cqp_request->info));
return cqp_request;
}
@@ -481,6 +482,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
WRITE_ONCE(cqp_request->request_done, false);
cqp_request->callback_fcn = NULL;
cqp_request->waiting = false;
+ cqp_request->pending = false;
spin_lock_irqsave(&cqp->req_lock, flags);
list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
@@ -521,6 +523,22 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
}
/**
+ * irdma_cleanup_deferred_cqp_ops - clean-up cqp with no completions
+ * @dev: sc_dev
+ * @cqp: cqp
+ */
+static void irdma_cleanup_deferred_cqp_ops(struct irdma_sc_dev *dev,
+ struct irdma_cqp *cqp)
+{
+ u64 scratch;
+
+ /* process all CQP requests with deferred/pending completions */
+ while ((scratch = irdma_sc_cqp_cleanup_handler(dev)))
+ irdma_free_pending_cqp_request(cqp, (struct irdma_cqp_request *)
+ (uintptr_t)scratch);
+}
+
+/**
* irdma_cleanup_pending_cqp_op - clean-up cqp with no
* completions
* @rf: RDMA PCI function
@@ -533,6 +551,8 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
struct cqp_cmds_info *pcmdinfo = NULL;
u32 i, pending_work, wqe_idx;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ irdma_cleanup_deferred_cqp_ops(dev, cqp);
pending_work = IRDMA_RING_USED_QUANTA(cqp->sc_cqp.sq_ring);
wqe_idx = IRDMA_RING_CURRENT_TAIL(cqp->sc_cqp.sq_ring);
for (i = 0; i < pending_work; i++) {
@@ -552,6 +572,26 @@ void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf)
}
}
+static int irdma_get_timeout_threshold(struct irdma_sc_dev *dev)
+{
+ u16 time_s = dev->vc_caps.cqp_timeout_s;
+
+ if (!time_s)
+ return CQP_TIMEOUT_THRESHOLD;
+
+ return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
+}
+
+static int irdma_get_def_timeout_threshold(struct irdma_sc_dev *dev)
+{
+ u16 time_s = dev->vc_caps.cqp_def_timeout_s;
+
+ if (!time_s)
+ return CQP_DEF_CMPL_TIMEOUT_THRESHOLD;
+
+ return time_s * 1000 / dev->hw_attrs.max_cqp_compl_wait_time_ms;
+}
+
/**
* irdma_wait_event - wait for completion
* @rf: RDMA PCI function
@@ -561,6 +601,7 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
struct irdma_cqp_request *cqp_request)
{
struct irdma_cqp_timeout cqp_timeout = {};
+ int timeout_threshold = irdma_get_timeout_threshold(&rf->sc_dev);
bool cqp_error = false;
int err_code = 0;
@@ -572,9 +613,17 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS)))
break;
+ if (cqp_request->pending)
+ /* There was a deferred or pending completion
+ * received for this CQP request, so we need
+ * to wait longer than usual.
+ */
+ timeout_threshold =
+ irdma_get_def_timeout_threshold(&rf->sc_dev);
+
irdma_check_cqp_progress(&cqp_timeout, &rf->sc_dev);
- if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD)
+ if (cqp_timeout.count < timeout_threshold)
continue;
if (!rf->reset) {
@@ -649,6 +698,9 @@ static const char *const irdma_cqp_cmd_names[IRDMA_MAX_CQP_OPS] = {
[IRDMA_OP_ADD_LOCAL_MAC_ENTRY] = "Add Local MAC Entry Cmd",
[IRDMA_OP_DELETE_LOCAL_MAC_ENTRY] = "Delete Local MAC Entry Cmd",
[IRDMA_OP_CQ_MODIFY] = "CQ Modify Cmd",
+ [IRDMA_OP_SRQ_CREATE] = "Create SRQ Cmd",
+ [IRDMA_OP_SRQ_MODIFY] = "Modify SRQ Cmd",
+ [IRDMA_OP_SRQ_DESTROY] = "Destroy SRQ Cmd",
};
static const struct irdma_cqp_err_info irdma_noncrit_err_list[] = {
@@ -1017,7 +1069,6 @@ int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
cqp_info = &cqp_request->info;
qp_info = &cqp_request->info.in.u.qp_create.info;
- memset(qp_info, 0, sizeof(*qp_info));
qp_info->cq_num_valid = true;
qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS;
cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE;
@@ -1065,6 +1116,26 @@ static void irdma_dealloc_push_page(struct irdma_pci_f *rf,
irdma_put_cqp_request(&rf->cqp, cqp_request);
}
+static void irdma_free_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 qp_num)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ unsigned long flags;
+
+ if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev < IRDMA_GEN_3)
+ return;
+
+ irdma_vchnl_req_del_vport(&rf->sc_dev, iwdev->vport_id, qp_num);
+
+ if (qp_num == 1) {
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rf->hwqp1_rsvd = false;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ } else if (qp_num > 2) {
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ }
+}
+
/**
* irdma_free_qp_rsrc - free up memory resources for qp
* @iwqp: qp ptr (user or kernel)
@@ -1073,7 +1144,7 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
{
struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_pci_f *rf = iwdev->rf;
- u32 qp_num = iwqp->ibqp.qp_num;
+ u32 qp_num = iwqp->sc_qp.qp_uk.qp_id;
irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
irdma_dealloc_push_page(rf, &iwqp->sc_qp);
@@ -1083,8 +1154,12 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
iwqp->sc_qp.user_pri);
}
- if (qp_num > 2)
- irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ if (iwqp->ibqp.qp_type == IB_QPT_GSI) {
+ irdma_free_gsi_qp_rsrc(iwqp, qp_num);
+ } else {
+ if (qp_num > 2)
+ irdma_free_rsrc(rf, rf->allocated_qps, qp_num);
+ }
dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
iwqp->q2_ctx_mem.va = NULL;
@@ -1096,6 +1171,30 @@ void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
}
/**
+ * irdma_srq_wq_destroy - send srq destroy cqp
+ * @rf: RDMA PCI function
+ * @srq: hardware control srq
+ */
+void irdma_srq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_srq *srq)
+{
+ struct irdma_cqp_request *cqp_request;
+ struct cqp_cmds_info *cqp_info;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return;
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_DESTROY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_destroy.srq = srq;
+ cqp_info->in.u.srq_destroy.scratch = (uintptr_t)cqp_request;
+
+ irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+}
+
+/**
* irdma_cq_wq_destroy - send cq destroy cqp
* @rf: RDMA PCI function
* @cq: hardware control cq
@@ -1244,7 +1343,6 @@ int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp)
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY;
cqp_info->post_sq = 1;
cqp_info->in.u.qp_destroy.qp = qp;
@@ -1650,7 +1748,6 @@ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER;
cqp_info->post_sq = 1;
cqp_info->in.u.stats_gather.info = pestat->gather_info;
@@ -1690,7 +1787,6 @@ int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = cmd;
cqp_info->post_sq = 1;
cqp_info->in.u.stats_manage.info = *stats_info;
@@ -1791,7 +1887,6 @@ int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
return -ENOMEM;
cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
cqp_info->cqp_cmd = cmd;
cqp_info->post_sq = 1;
cqp_info->in.u.ws_node.info = *node_info;
@@ -2258,21 +2353,6 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
}
-bool irdma_cq_empty(struct irdma_cq *iwcq)
-{
- struct irdma_cq_uk *ukcq;
- u64 qword3;
- __le64 *cqe;
- u8 polarity;
-
- ukcq = &iwcq->sc_cq.cq_uk;
- cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq);
- get_64bit_val(cqe, 24, &qword3);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
-
- return polarity != ukcq->polarity;
-}
-
void irdma_remove_cmpls_list(struct irdma_cq *iwcq)
{
struct irdma_cmpl_gen *cmpl_node;
@@ -2334,6 +2414,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
struct irdma_ring *sq_ring = &qp->sq_ring;
struct irdma_ring *rq_ring = &qp->rq_ring;
+ struct irdma_cq *iwscq = iwqp->iwscq;
+ struct irdma_cq *iwrcq = iwqp->iwrcq;
struct irdma_cmpl_gen *cmpl;
__le64 *sw_wqe;
u64 wqe_qword;
@@ -2341,8 +2423,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
bool compl_generated = false;
unsigned long flags1;
- spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
- if (irdma_cq_empty(iwqp->iwscq)) {
+ spin_lock_irqsave(&iwscq->lock, flags1);
+ if (irdma_uk_cq_empty(&iwscq->sc_cq.cq_uk)) {
unsigned long flags2;
spin_lock_irqsave(&iwqp->lock, flags2);
@@ -2350,7 +2432,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
spin_unlock_irqrestore(&iwqp->lock, flags2);
- spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ spin_unlock_irqrestore(&iwscq->lock, flags1);
return;
}
@@ -2369,24 +2451,24 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
kfree(cmpl);
continue;
}
- ibdev_dbg(iwqp->iwscq->ibcq.device,
+ ibdev_dbg(iwscq->ibcq.device,
"DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n",
__func__, cmpl->cpi.wr_id, qp->qp_id);
- list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
+ list_add_tail(&cmpl->list, &iwscq->cmpl_generated);
compl_generated = true;
}
spin_unlock_irqrestore(&iwqp->lock, flags2);
- spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ spin_unlock_irqrestore(&iwscq->lock, flags1);
if (compl_generated)
- irdma_comp_handler(iwqp->iwscq);
+ irdma_comp_handler(iwscq);
} else {
- spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
+ spin_unlock_irqrestore(&iwscq->lock, flags1);
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
}
- spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
- if (irdma_cq_empty(iwqp->iwrcq)) {
+ spin_lock_irqsave(&iwrcq->lock, flags1);
+ if (irdma_uk_cq_empty(&iwrcq->sc_cq.cq_uk)) {
unsigned long flags2;
spin_lock_irqsave(&iwqp->lock, flags2);
@@ -2394,7 +2476,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC);
if (!cmpl) {
spin_unlock_irqrestore(&iwqp->lock, flags2);
- spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ spin_unlock_irqrestore(&iwrcq->lock, flags1);
return;
}
@@ -2406,20 +2488,20 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ;
/* remove the RQ WR by moving RQ tail */
IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1);
- ibdev_dbg(iwqp->iwrcq->ibcq.device,
+ ibdev_dbg(iwrcq->ibcq.device,
"DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n",
__func__, cmpl->cpi.wr_id, qp->qp_id,
wqe_idx);
- list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
+ list_add_tail(&cmpl->list, &iwrcq->cmpl_generated);
compl_generated = true;
}
spin_unlock_irqrestore(&iwqp->lock, flags2);
- spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ spin_unlock_irqrestore(&iwrcq->lock, flags1);
if (compl_generated)
- irdma_comp_handler(iwqp->iwrcq);
+ irdma_comp_handler(iwrcq);
} else {
- spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
+ spin_unlock_irqrestore(&iwrcq->lock, flags1);
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
}
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index da5a41b275d8..6d9af41a2884 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -27,7 +27,8 @@ static int irdma_query_device(struct ib_device *ibdev,
irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = IB_DEVICE_MEM_WINDOW |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
+ if (hw_attrs->uk_attrs.hw_rev < IRDMA_GEN_3)
+ props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY;
props->vendor_id = pcidev->vendor;
props->vendor_part_id = pcidev->device;
@@ -41,7 +42,8 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_cq = rf->max_cq - rf->used_cqs;
props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
- props->max_mw = props->max_mr;
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
+ props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
@@ -56,9 +58,21 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
props->max_total_mcast_qp_attach = rf->max_qp * IRDMA_MAX_MGS_PER_CTX;
props->max_fast_reg_page_list_len = IRDMA_MAX_PAGES_PER_FMR;
-#define HCA_CLOCK_TIMESTAMP_MASK 0x1ffff
- if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_2)
- props->timestamp_mask = HCA_CLOCK_TIMESTAMP_MASK;
+ props->max_srq = rf->max_srq - rf->used_srqs;
+ props->max_srq_wr = IRDMA_MAX_SRQ_WRS;
+ props->max_srq_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
+ if (hw_attrs->uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ props->atomic_cap = IB_ATOMIC_HCA;
+ else
+ props->atomic_cap = IB_ATOMIC_NONE;
+ props->masked_atomic_cap = props->atomic_cap;
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3) {
+#define HCA_CORE_CLOCK_KHZ 1000000UL
+ props->timestamp_mask = GENMASK(31, 0);
+ props->hca_core_clock = HCA_CORE_CLOCK_KHZ;
+ }
+ if (hw_attrs->uk_attrs.hw_rev >= IRDMA_GEN_3)
+ props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
return 0;
}
@@ -292,6 +306,10 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
ucontext->iwdev = iwdev;
ucontext->abi_ver = req.userspace_ver;
+ if (!(req.comp_mask & IRDMA_SUPPORT_WQE_FORMAT_V2) &&
+ uk_attrs->hw_rev >= IRDMA_GEN_3)
+ return -EOPNOTSUPP;
+
if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
ucontext->use_raw_attrs = true;
@@ -332,6 +350,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
+ uresp.max_hw_srq_quanta = uk_attrs->max_hw_srq_quanta;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MAX_HW_SRQ_QUANTA;
if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
@@ -343,6 +363,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
spin_lock_init(&ucontext->cq_reg_mem_list_lock);
INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+ INIT_LIST_HEAD(&ucontext->srq_reg_mem_list);
+ spin_lock_init(&ucontext->srq_reg_mem_list_lock);
return 0;
@@ -521,7 +543,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
iwqp->sc_qp.qp_uk.destroy_pending = true;
- if (iwqp->iwarp_state == IRDMA_QP_STATE_RTS)
+ if (iwqp->iwarp_state >= IRDMA_QP_STATE_IDLE)
irdma_modify_qp_to_err(&iwqp->sc_qp);
if (!iwqp->user_mode)
@@ -541,6 +563,9 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
irdma_remove_push_mmap_entries(iwqp);
+
+ if (iwqp->sc_qp.qp_uk.qp_id == 1)
+ iwdev->rf->hwqp1_rsvd = false;
irdma_free_qp_rsrc(iwqp);
return 0;
@@ -564,7 +589,11 @@ static void irdma_setup_virt_qp(struct irdma_device *iwdev,
if (iwpbl->pbl_allocated) {
init_info->virtual_map = true;
init_info->sq_pa = qpmr->sq_pbl.idx;
- init_info->rq_pa = qpmr->rq_pbl.idx;
+ /* Need to use contiguous buffer for RQ of QP
+ * in case it is associated with SRQ.
+ */
+ init_info->rq_pa = init_info->qp_uk_init_info.srq_uk ?
+ qpmr->rq_pa : qpmr->rq_pbl.idx;
} else {
init_info->sq_pa = qpmr->sq_pbl.addr;
init_info->rq_pa = qpmr->rq_pbl.addr;
@@ -719,6 +748,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
+ ukinfo->qp_id = info->qp_uk_init_info.qp_id;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
@@ -742,7 +772,6 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
cqp_info = &cqp_request->info;
qp_info = &cqp_request->info.in.u.qp_create.info;
- memset(qp_info, 0, sizeof(*qp_info));
qp_info->mac_valid = true;
qp_info->cq_num_valid = true;
qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE;
@@ -775,9 +804,12 @@ static void irdma_roce_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
roce_info = &iwqp->roce_info;
ether_addr_copy(roce_info->mac_addr, iwdev->netdev->dev_addr);
+ if (iwqp->ibqp.qp_type == IB_QPT_GSI && iwqp->ibqp.qp_num != 1)
+ roce_info->is_qp1 = true;
roce_info->rd_en = true;
roce_info->wr_rdresp_en = true;
- roce_info->bind_en = true;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ roce_info->bind_en = true;
roce_info->dcqcn_en = false;
roce_info->rtomin = 5;
@@ -808,7 +840,6 @@ static void irdma_iw_fill_and_set_qpctx_info(struct irdma_qp *iwqp,
ether_addr_copy(iwarp_info->mac_addr, iwdev->netdev->dev_addr);
iwarp_info->rd_en = true;
iwarp_info->wr_rdresp_en = true;
- iwarp_info->bind_en = true;
iwarp_info->ecn_en = true;
iwarp_info->rtomin = 5;
@@ -864,6 +895,47 @@ static void irdma_flush_worker(struct work_struct *work)
irdma_generate_flush_completions(iwqp);
}
+static int irdma_setup_gsi_qp_rsrc(struct irdma_qp *iwqp, u32 *qp_num)
+{
+ struct irdma_device *iwdev = iwqp->iwdev;
+ struct irdma_pci_f *rf = iwdev->rf;
+ unsigned long flags;
+ int ret;
+
+ if (rf->rdma_ver <= IRDMA_GEN_2) {
+ *qp_num = 1;
+ return 0;
+ }
+
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ if (!rf->hwqp1_rsvd) {
+ *qp_num = 1;
+ rf->hwqp1_rsvd = true;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ ret = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
+ qp_num, &rf->next_qp);
+ if (ret)
+ return ret;
+ }
+
+ ret = irdma_vchnl_req_add_vport(&rf->sc_dev, iwdev->vport_id, *qp_num,
+ (&iwdev->vsi)->qos);
+ if (ret) {
+ if (*qp_num != 1) {
+ irdma_free_rsrc(rf, rf->allocated_qps, *qp_num);
+ } else {
+ spin_lock_irqsave(&rf->rsrc_lock, flags);
+ rf->hwqp1_rsvd = false;
+ spin_unlock_irqrestore(&rf->rsrc_lock, flags);
+ }
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* irdma_create_qp - create qp
* @ibqp: ptr of qp
@@ -889,6 +961,18 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct irdma_uk_attrs *uk_attrs = &dev->hw_attrs.uk_attrs;
struct irdma_qp_init_info init_info = {};
struct irdma_qp_host_ctx_info *ctx_info;
+ struct irdma_srq *iwsrq;
+ bool srq_valid = false;
+ u32 srq_id = 0;
+
+ if (init_attr->srq) {
+ iwsrq = to_iwsrq(init_attr->srq);
+ srq_valid = true;
+ srq_id = iwsrq->srq_num;
+ init_attr->cap.max_recv_sge = uk_attrs->max_hw_wq_frags;
+ init_attr->cap.max_recv_wr = 4;
+ init_info.qp_uk_init_info.srq_uk = &iwsrq->sc_srq.srq_uk;
+ }
err_code = irdma_validate_qp_attrs(init_attr, iwdev);
if (err_code)
@@ -925,16 +1009,20 @@ static int irdma_create_qp(struct ib_qp *ibqp,
init_info.host_ctx = (__le64 *)(init_info.q2 + IRDMA_Q2_BUF_SIZE);
init_info.host_ctx_pa = init_info.q2_pa + IRDMA_Q2_BUF_SIZE;
- if (init_attr->qp_type == IB_QPT_GSI)
- qp_num = 1;
- else
+ if (init_attr->qp_type == IB_QPT_GSI) {
+ err_code = irdma_setup_gsi_qp_rsrc(iwqp, &qp_num);
+ if (err_code)
+ goto error;
+ iwqp->ibqp.qp_num = 1;
+ } else {
err_code = irdma_alloc_rsrc(rf, rf->allocated_qps, rf->max_qp,
&qp_num, &rf->next_qp);
- if (err_code)
- goto error;
+ if (err_code)
+ goto error;
+ iwqp->ibqp.qp_num = qp_num;
+ }
iwqp->iwpd = iwpd;
- iwqp->ibqp.qp_num = qp_num;
qp = &iwqp->sc_qp;
iwqp->iwscq = to_iwcq(init_attr->send_cq);
iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
@@ -991,13 +1079,22 @@ static int irdma_create_qp(struct ib_qp *ibqp,
}
ctx_info = &iwqp->ctx_info;
+ ctx_info->srq_valid = srq_valid;
+ ctx_info->srq_id = srq_id;
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
- if (rdma_protocol_roce(&iwdev->ibdev, 1))
+ if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
+ if (dev->ws_add(&iwdev->vsi, 0)) {
+ irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
+ err_code = -EINVAL;
+ goto error;
+ }
+ irdma_qp_add_qos(&iwqp->sc_qp);
irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
- else
+ } else {
irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
+ }
err_code = irdma_cqp_create_qp_cmd(iwqp);
if (err_code)
@@ -1009,16 +1106,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
rf->qp_table[qp_num] = iwqp;
- if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
- if (dev->ws_add(&iwdev->vsi, 0)) {
- irdma_cqp_qp_destroy_cmd(&rf->sc_dev, &iwqp->sc_qp);
- err_code = -EINVAL;
- goto error;
- }
-
- irdma_qp_add_qos(&iwqp->sc_qp);
- }
-
if (udata) {
/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
if (udata->outlen < sizeof(uresp)) {
@@ -1063,6 +1150,8 @@ static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
acc_flags |= IB_ACCESS_REMOTE_READ;
if (iwqp->roce_info.bind_en)
acc_flags |= IB_ACCESS_MW_BIND;
+ if (iwqp->ctx_info.remote_atomics_en)
+ acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
} else {
if (iwqp->iwarp_info.wr_rdresp_en) {
acc_flags |= IB_ACCESS_LOCAL_WRITE;
@@ -1070,8 +1159,8 @@ static int irdma_get_ib_acc_flags(struct irdma_qp *iwqp)
}
if (iwqp->iwarp_info.rd_en)
acc_flags |= IB_ACCESS_REMOTE_READ;
- if (iwqp->iwarp_info.bind_en)
- acc_flags |= IB_ACCESS_MW_BIND;
+ if (iwqp->ctx_info.remote_atomics_en)
+ acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
}
return acc_flags;
}
@@ -1110,6 +1199,7 @@ static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->pkey_index = iwqp->roce_info.p_key;
attr->retry_cnt = iwqp->udp_info.rexmit_thresh;
attr->rnr_retry = iwqp->udp_info.rnr_nak_thresh;
+ attr->min_rnr_timer = iwqp->udp_info.min_rnr_timer;
attr->max_rd_atomic = iwqp->roce_info.ord_size;
attr->max_dest_rd_atomic = iwqp->roce_info.ird_size;
}
@@ -1118,6 +1208,7 @@ static int irdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
init_attr->recv_cq = iwqp->ibqp.recv_cq;
+ init_attr->srq = iwqp->ibqp.srq;
init_attr->cap = attr->cap;
return 0;
@@ -1242,6 +1333,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_RNR_RETRY)
udp_info->rnr_nak_thresh = attr->rnr_retry;
+ if (attr_mask & IB_QP_MIN_RNR_TIMER &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3)
+ udp_info->min_rnr_timer = attr->min_rnr_timer;
+
if (attr_mask & IB_QP_RETRY_CNT)
udp_info->rexmit_thresh = attr->retry_cnt;
@@ -1362,6 +1457,9 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
roce_info->wr_rdresp_en = true;
if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
roce_info->rd_en = true;
+ if (dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
+ ctx_info->remote_atomics_en = true;
}
wait_event(iwqp->mod_qp_waitq, !atomic_read(&iwqp->hw_mod_qp_pend));
@@ -1777,6 +1875,24 @@ exit:
}
/**
+ * irdma_srq_free_rsrc - free up resources for srq
+ * @rf: RDMA PCI function
+ * @iwsrq: srq ptr
+ */
+static void irdma_srq_free_rsrc(struct irdma_pci_f *rf, struct irdma_srq *iwsrq)
+{
+ struct irdma_sc_srq *srq = &iwsrq->sc_srq;
+
+ if (!iwsrq->user_mode) {
+ dma_free_coherent(rf->sc_dev.hw->device, iwsrq->kmem.size,
+ iwsrq->kmem.va, iwsrq->kmem.pa);
+ iwsrq->kmem.va = NULL;
+ }
+
+ irdma_free_rsrc(rf, rf->allocated_srqs, srq->srq_uk.srq_id);
+}
+
+/**
* irdma_cq_free_rsrc - free up resources for cq
* @rf: RDMA PCI function
* @iwcq: cq ptr
@@ -1840,6 +1956,22 @@ static int irdma_process_resize_list(struct irdma_cq *iwcq,
}
/**
+ * irdma_destroy_srq - destroy srq
+ * @ibsrq: srq pointer
+ * @udata: user data
+ */
+static int irdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_sc_srq *srq = &iwsrq->sc_srq;
+
+ irdma_srq_wq_destroy(iwdev->rf, srq);
+ irdma_srq_free_rsrc(iwdev->rf, iwsrq);
+ return 0;
+}
+
+/**
* irdma_destroy_cq - destroy cq
* @ib_cq: cq pointer
* @udata: user data
@@ -1897,6 +2029,7 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct irdma_pci_f *rf;
struct irdma_cq_buf *cq_buf = NULL;
unsigned long flags;
+ u8 cqe_size;
int ret;
iwdev = to_iwdev(ibcq->device);
@@ -1913,9 +2046,18 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
return -EINVAL;
if (!iwcq->user_mode) {
- entries++;
- if (rf->sc_dev.hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ entries += 2;
+
+ if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct &&
+ dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
+
+ if (entries & 1)
+ entries += 1; /* cq size must be an even number */
+
+ cqe_size = iwcq->sc_cq.cq_uk.avoid_mem_cflct ? 64 : 32;
+ if (entries * cqe_size == IRDMA_HW_PAGE_SIZE)
+ entries += 2;
}
info.cq_size = max(entries, 4);
@@ -2022,10 +2164,298 @@ error:
return ret;
}
+/**
+ * irdma_srq_event - event notification for srq limit
+ * @srq: shared srq struct
+ */
+void irdma_srq_event(struct irdma_sc_srq *srq)
+{
+ struct irdma_srq *iwsrq = container_of(srq, struct irdma_srq, sc_srq);
+ struct ib_srq *ibsrq = &iwsrq->ibsrq;
+ struct ib_event event;
+
+ srq->srq_limit = 0;
+
+ if (!ibsrq->event_handler)
+ return;
+
+ event.device = ibsrq->device;
+ event.element.port_num = 1;
+ event.element.srq = ibsrq;
+ event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ ibsrq->event_handler(&event, ibsrq->srq_context);
+}
+
+/**
+ * irdma_modify_srq - modify srq request
+ * @ibsrq: srq's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+static int irdma_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
+ enum ib_srq_attr_mask attr_mask,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_modify_srq_info *info;
+ struct cqp_cmds_info *cqp_info;
+ int status;
+
+ if (attr_mask & IB_SRQ_MAX_WR)
+ return -EINVAL;
+
+ if (!(attr_mask & IB_SRQ_LIMIT))
+ return 0;
+
+ if (attr->srq_limit > iwsrq->sc_srq.srq_uk.srq_size)
+ return -EINVAL;
+
+ /* Execute this cqp op synchronously, so we can update srq_limit
+ * upon successful completion.
+ */
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request)
+ return -ENOMEM;
+
+ cqp_info = &cqp_request->info;
+ info = &cqp_info->in.u.srq_modify.info;
+ info->srq_limit = attr->srq_limit;
+ if (info->srq_limit > 0xFFF)
+ info->srq_limit = 0xFFF;
+ info->arm_limit_event = 1;
+
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_MODIFY;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_modify.srq = &iwsrq->sc_srq;
+ cqp_info->in.u.srq_modify.scratch = (uintptr_t)cqp_request;
+ status = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (status)
+ return status;
+
+ iwsrq->sc_srq.srq_limit = info->srq_limit;
+
+ return 0;
+}
+
+static int irdma_setup_umode_srq(struct irdma_device *iwdev,
+ struct irdma_srq *iwsrq,
+ struct irdma_srq_init_info *info,
+ struct ib_udata *udata)
+{
+#define IRDMA_CREATE_SRQ_MIN_REQ_LEN \
+ offsetofend(struct irdma_create_srq_req, user_shadow_area)
+ struct irdma_create_srq_req req = {};
+ struct irdma_ucontext *ucontext;
+ struct irdma_srq_mr *srqmr;
+ struct irdma_pbl *iwpbl;
+ unsigned long flags;
+
+ iwsrq->user_mode = true;
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+
+ if (udata->inlen < IRDMA_CREATE_SRQ_MIN_REQ_LEN)
+ return -EINVAL;
+
+ if (ib_copy_from_udata(&req, udata,
+ min(sizeof(req), udata->inlen)))
+ return -EFAULT;
+
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ iwpbl = irdma_get_pbl((unsigned long)req.user_srq_buf,
+ &ucontext->srq_reg_mem_list);
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+ if (!iwpbl)
+ return -EPROTO;
+
+ iwsrq->iwpbl = iwpbl;
+ srqmr = &iwpbl->srq_mr;
+
+ if (iwpbl->pbl_allocated) {
+ info->virtual_map = true;
+ info->pbl_chunk_size = 1;
+ info->first_pm_pbl_idx = srqmr->srq_pbl.idx;
+ info->leaf_pbl_size = 1;
+ } else {
+ info->srq_pa = srqmr->srq_pbl.addr;
+ }
+ info->shadow_area_pa = srqmr->shadow;
+
+ return 0;
+}
+
+static int irdma_setup_kmode_srq(struct irdma_device *iwdev,
+ struct irdma_srq *iwsrq,
+ struct irdma_srq_init_info *info, u32 depth,
+ u8 shift)
+{
+ struct irdma_srq_uk_init_info *ukinfo = &info->srq_uk_init_info;
+ struct irdma_dma_mem *mem = &iwsrq->kmem;
+ u32 size, ring_size;
+
+ ring_size = depth * IRDMA_QP_WQE_MIN_SIZE;
+ size = ring_size + (IRDMA_SHADOW_AREA_SIZE << 3);
+
+ mem->size = ALIGN(size, 256);
+ mem->va = dma_alloc_coherent(iwdev->rf->hw.device, mem->size,
+ &mem->pa, GFP_KERNEL);
+ if (!mem->va)
+ return -ENOMEM;
+
+ ukinfo->srq = mem->va;
+ ukinfo->srq_size = depth >> shift;
+ ukinfo->shadow_area = mem->va + ring_size;
+
+ info->srq_pa = mem->pa;
+ info->shadow_area_pa = info->srq_pa + ring_size;
+
+ return 0;
+}
+
+/**
+ * irdma_create_srq - create srq
+ * @ibsrq: ib's srq pointer
+ * @initattrs: attributes for srq
+ * @udata: user data for create srq
+ */
+static int irdma_create_srq(struct ib_srq *ibsrq,
+ struct ib_srq_init_attr *initattrs,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(ibsrq->device);
+ struct ib_srq_attr *attr = &initattrs->attr;
+ struct irdma_pd *iwpd = to_iwpd(ibsrq->pd);
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_srq_uk_init_info *ukinfo;
+ struct irdma_cqp_request *cqp_request;
+ struct irdma_srq_init_info info = {};
+ struct irdma_pci_f *rf = iwdev->rf;
+ struct irdma_uk_attrs *uk_attrs;
+ struct cqp_cmds_info *cqp_info;
+ int err_code = 0;
+ u32 depth;
+ u8 shift;
+
+ uk_attrs = &rf->sc_dev.hw_attrs.uk_attrs;
+ ukinfo = &info.srq_uk_init_info;
+
+ if (initattrs->srq_type != IB_SRQT_BASIC)
+ return -EOPNOTSUPP;
+
+ if (!(uk_attrs->feature_flags & IRDMA_FEATURE_SRQ) ||
+ attr->max_sge > uk_attrs->max_hw_wq_frags)
+ return -EINVAL;
+
+ refcount_set(&iwsrq->refcnt, 1);
+ spin_lock_init(&iwsrq->lock);
+ err_code = irdma_alloc_rsrc(rf, rf->allocated_srqs, rf->max_srq,
+ &iwsrq->srq_num, &rf->next_srq);
+ if (err_code)
+ return err_code;
+
+ ukinfo->max_srq_frag_cnt = attr->max_sge;
+ ukinfo->uk_attrs = uk_attrs;
+ ukinfo->srq_id = iwsrq->srq_num;
+
+ irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_srq_frag_cnt, 0,
+ &shift);
+
+ err_code = irdma_get_srqdepth(ukinfo->uk_attrs, attr->max_wr,
+ shift, &depth);
+ if (err_code)
+ return err_code;
+
+ /* Actual SRQ size in WRs for ring and HW */
+ ukinfo->srq_size = depth >> shift;
+
+ /* Max postable WRs to SRQ */
+ iwsrq->max_wr = (depth - IRDMA_RQ_RSVD) >> shift;
+ attr->max_wr = iwsrq->max_wr;
+
+ if (udata)
+ err_code = irdma_setup_umode_srq(iwdev, iwsrq, &info, udata);
+ else
+ err_code = irdma_setup_kmode_srq(iwdev, iwsrq, &info, depth,
+ shift);
+
+ if (err_code)
+ goto free_rsrc;
+
+ info.vsi = &iwdev->vsi;
+ info.pd = &iwpd->sc_pd;
+
+ iwsrq->sc_srq.srq_uk.lock = &iwsrq->lock;
+ err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info);
+ if (err_code)
+ goto free_dmem;
+
+ cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
+ if (!cqp_request) {
+ err_code = -ENOMEM;
+ goto free_dmem;
+ }
+
+ cqp_info = &cqp_request->info;
+ cqp_info->cqp_cmd = IRDMA_OP_SRQ_CREATE;
+ cqp_info->post_sq = 1;
+ cqp_info->in.u.srq_create.srq = &iwsrq->sc_srq;
+ cqp_info->in.u.srq_create.scratch = (uintptr_t)cqp_request;
+ err_code = irdma_handle_cqp_op(rf, cqp_request);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (err_code)
+ goto free_dmem;
+
+ if (udata) {
+ struct irdma_create_srq_resp resp = {};
+
+ resp.srq_id = iwsrq->srq_num;
+ resp.srq_size = ukinfo->srq_size;
+ if (ib_copy_to_udata(udata, &resp,
+ min(sizeof(resp), udata->outlen))) {
+ err_code = -EPROTO;
+ goto srq_destroy;
+ }
+ }
+
+ return 0;
+
+srq_destroy:
+ irdma_srq_wq_destroy(rf, &iwsrq->sc_srq);
+
+free_dmem:
+ if (!iwsrq->user_mode)
+ dma_free_coherent(rf->hw.device, iwsrq->kmem.size,
+ iwsrq->kmem.va, iwsrq->kmem.pa);
+free_rsrc:
+ irdma_free_rsrc(rf, rf->allocated_srqs, iwsrq->srq_num);
+ return err_code;
+}
+
+/**
+ * irdma_query_srq - get SRQ attributes
+ * @ibsrq: the SRQ to query
+ * @attr: the attributes of the SRQ
+ */
+static int irdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+{
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+
+ attr->max_wr = iwsrq->max_wr;
+ attr->max_sge = iwsrq->sc_srq.srq_uk.max_srq_frag_cnt;
+ attr->srq_limit = iwsrq->sc_srq.srq_limit;
+
+ return 0;
+}
+
static inline int cq_validate_flags(u32 flags, u8 hw_rev)
{
- /* GEN1 does not support CQ create flags */
- if (hw_rev == IRDMA_GEN_1)
+ /* GEN1/2 does not support CQ create flags */
+ if (hw_rev <= IRDMA_GEN_2)
return flags ? -EOPNOTSUPP : 0;
return flags & ~IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION ? -EOPNOTSUPP : 0;
@@ -2058,6 +2488,8 @@ static int irdma_create_cq(struct ib_cq *ibcq,
unsigned long flags;
int err_code;
int entries = attr->cqe;
+ bool cqe_64byte_ena;
+ u8 cqe_size;
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
@@ -2078,9 +2510,14 @@ static int irdma_create_cq(struct ib_cq *ibcq,
spin_lock_init(&iwcq->lock);
INIT_LIST_HEAD(&iwcq->resize_list);
INIT_LIST_HEAD(&iwcq->cmpl_generated);
+ iwcq->cq_num = cq_num;
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;
+ cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ?
+ true : false;
+ cqe_size = cqe_64byte_ena ? 64 : 32;
+ ukinfo->avoid_mem_cflct = cqe_64byte_ena;
iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
if (attr->comp_vector < rf->ceqs_count)
info.ceq_id = attr->comp_vector;
@@ -2116,8 +2553,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
goto cq_free_rsrc;
}
- iwcq->iwpbl = iwpbl;
- iwcq->cq_mem_size = 0;
cqmr = &iwpbl->cq_mr;
if (rf->sc_dev.hw_attrs.uk_attrs.feature_flags &
@@ -2132,7 +2567,6 @@ static int irdma_create_cq(struct ib_cq *ibcq,
err_code = -EPROTO;
goto cq_free_rsrc;
}
- iwcq->iwpbl_shadow = iwpbl_shadow;
cqmr_shadow = &iwpbl_shadow->cq_mr;
info.shadow_area_pa = cqmr_shadow->cq_pbl.addr;
cqmr->split = true;
@@ -2155,12 +2589,22 @@ static int irdma_create_cq(struct ib_cq *ibcq,
goto cq_free_rsrc;
}
- entries++;
- if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
+ entries += 2;
+ if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
entries *= 2;
+
+ if (entries & 1)
+ entries += 1; /* cq size must be an even number */
+
+ if (entries * cqe_size == IRDMA_HW_PAGE_SIZE)
+ entries += 2;
+
ukinfo->cq_size = entries;
- rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
+ if (cqe_64byte_ena)
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_extended_cqe);
+ else
+ rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe);
iwcq->kmem.size = ALIGN(round_up(rsize, 256), 256);
iwcq->kmem.va = dma_alloc_coherent(dev->hw->device,
iwcq->kmem.size,
@@ -2240,8 +2684,9 @@ cq_free_rsrc:
/**
* irdma_get_mr_access - get hw MR access permissions from IB access flags
* @access: IB access flags
+ * @hw_rev: Hardware version
*/
-static inline u16 irdma_get_mr_access(int access)
+static inline u16 irdma_get_mr_access(int access, u8 hw_rev)
{
u16 hw_access = 0;
@@ -2251,8 +2696,10 @@ static inline u16 irdma_get_mr_access(int access)
IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
- hw_access |= (access & IB_ACCESS_MW_BIND) ?
- IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ if (hw_rev >= IRDMA_GEN_3) {
+ hw_access |= (access & IB_ACCESS_MW_BIND) ?
+ IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
+ }
hw_access |= (access & IB_ZERO_BASED) ?
IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
@@ -2463,6 +2910,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
struct irdma_mr *iwmr = iwpbl->iwmr;
struct irdma_qp_mr *qpmr = &iwpbl->qp_mr;
struct irdma_cq_mr *cqmr = &iwpbl->cq_mr;
+ struct irdma_srq_mr *srqmr = &iwpbl->srq_mr;
struct irdma_hmc_pble *hmc_p;
u64 *arr = iwmr->pgaddrmem;
u32 pg_size, total;
@@ -2482,7 +2930,10 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
total = req->sq_pages + req->rq_pages;
hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t)arr[total];
-
+ /* Need to use physical address for RQ of QP
+ * in case it is associated with SRQ.
+ */
+ qpmr->rq_pa = (dma_addr_t)arr[req->sq_pages];
if (lvl) {
ret = irdma_check_mem_contiguous(arr, req->sq_pages,
pg_size);
@@ -2502,6 +2953,18 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
hmc_p->addr = arr[req->sq_pages];
}
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ hmc_p = &srqmr->srq_pbl;
+ srqmr->shadow = (dma_addr_t)arr[req->rq_pages];
+ if (lvl)
+ ret = irdma_check_mem_contiguous(arr, req->rq_pages,
+ pg_size);
+
+ if (!ret)
+ hmc_p->idx = palloc->level1.idx;
+ else
+ hmc_p->addr = arr[0];
+ break;
case IRDMA_MEMREG_TYPE_CQ:
hmc_p = &cqmr->cq_pbl;
@@ -2651,12 +3114,10 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.alloc_stag.info;
- memset(info, 0, sizeof(*info));
info->page_size = PAGE_SIZE;
info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
info->pd_id = iwpd->sc_pd.pd_id;
info->total_len = iwmr->len;
- info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
info->remote_access = true;
cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG;
cqp_info->post_sq = 1;
@@ -2667,7 +3128,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
if (status)
return status;
- iwmr->is_hwreg = 1;
+ iwmr->is_hwreg = true;
return 0;
}
@@ -2801,14 +3262,16 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
cqp_info = &cqp_request->info;
stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
- memset(stag_info, 0, sizeof(*stag_info));
stag_info->va = iwpbl->user_base;
stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
stag_info->stag_key = (u8)iwmr->stag;
stag_info->total_len = iwmr->len;
- stag_info->access_rights = irdma_get_mr_access(access);
+ stag_info->access_rights = irdma_get_mr_access(access,
+ iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS)
+ stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
stag_info->pd_id = iwpd->sc_pd.pd_id;
- stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY;
+ stag_info->all_memory = iwmr->dma_mr;
if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
else
@@ -2835,7 +3298,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
if (!ret)
- iwmr->is_hwreg = 1;
+ iwmr->is_hwreg = true;
return ret;
}
@@ -2972,6 +3435,37 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
return 0;
}
+static int irdma_reg_user_mr_type_srq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext;
+ unsigned long flags;
+ u32 total;
+ int err;
+ u8 lvl;
+
+ total = req.rq_pages + IRDMA_SHADOW_PGCNT;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ lvl = req.rq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->srq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
struct ib_udata *udata,
struct irdma_mr *iwmr)
@@ -3063,6 +3557,12 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
goto error;
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ err = irdma_reg_user_mr_type_srq(req, udata, iwmr);
+ if (err)
+ goto error;
+
+ break;
case IRDMA_MEMREG_TYPE_CQ:
err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
if (err)
@@ -3106,9 +3606,9 @@ static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
if (IS_ERR(umem_dmabuf)) {
- err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
- return ERR_PTR(err);
+ ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%pe]\n",
+ umem_dmabuf);
+ return ERR_CAST(umem_dmabuf);
}
iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
@@ -3155,7 +3655,6 @@ static int irdma_hwdereg_mr(struct ib_mr *ib_mr)
cqp_info = &cqp_request->info;
info = &cqp_info->in.u.dealloc_stag.info;
- memset(info, 0, sizeof(*info));
info->pd_id = iwpd->sc_pd.pd_id;
info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S;
info->mr = true;
@@ -3171,7 +3670,7 @@ static int irdma_hwdereg_mr(struct ib_mr *ib_mr)
if (status)
return status;
- iwmr->is_hwreg = 0;
+ iwmr->is_hwreg = false;
return 0;
}
@@ -3294,9 +3793,10 @@ static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags,
* @size: size of memory to register
* @access: Access rights
* @iova_start: start of virtual address for physical buffers
+ * @dma_mr: Flag indicating whether this region is a PD DMA MR
*/
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access,
- u64 *iova_start)
+ u64 *iova_start, bool dma_mr)
{
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_pbl *iwpbl;
@@ -3313,6 +3813,7 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access
iwpbl = &iwmr->iwpbl;
iwpbl->iwmr = iwmr;
iwmr->type = IRDMA_MEMREG_TYPE_MEM;
+ iwmr->dma_mr = dma_mr;
iwpbl->user_base = *iova_start;
stag = irdma_create_stag(iwdev);
if (!stag) {
@@ -3351,7 +3852,7 @@ static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc)
{
u64 kva = 0;
- return irdma_reg_phys_mr(pd, 0, 0, acc, &kva);
+ return irdma_reg_phys_mr(pd, 0, 0, acc, &kva, true);
}
/**
@@ -3382,6 +3883,14 @@ static void irdma_del_memlist(struct irdma_mr *iwmr,
}
spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
+ case IRDMA_MEMREG_TYPE_SRQ:
+ spin_lock_irqsave(&ucontext->srq_reg_mem_list_lock, flags);
+ if (iwpbl->on_list) {
+ iwpbl->on_list = false;
+ list_del(&iwpbl->list);
+ }
+ spin_unlock_irqrestore(&ucontext->srq_reg_mem_list_lock, flags);
+ break;
default:
break;
}
@@ -3461,6 +3970,40 @@ static int irdma_post_send(struct ib_qp *ibqp,
if (ib_wr->send_flags & IB_SEND_FENCE)
info.read_fence = true;
switch (ib_wr->opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_ATOMIC_OPS))) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP;
+ info.op.atomic_compare_swap.tagged_offset = ib_wr->sg_list[0].addr;
+ info.op.atomic_compare_swap.remote_tagged_offset =
+ atomic_wr(ib_wr)->remote_addr;
+ info.op.atomic_compare_swap.swap_data_bytes = atomic_wr(ib_wr)->swap;
+ info.op.atomic_compare_swap.compare_data_bytes =
+ atomic_wr(ib_wr)->compare_add;
+ info.op.atomic_compare_swap.stag = ib_wr->sg_list[0].lkey;
+ info.op.atomic_compare_swap.remote_stag = atomic_wr(ib_wr)->rkey;
+ err = irdma_uk_atomic_compare_swap(ukqp, &info, false);
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(dev->hw_attrs.uk_attrs.feature_flags &
+ IRDMA_FEATURE_ATOMIC_OPS))) {
+ err = -EINVAL;
+ break;
+ }
+ info.op_type = IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD;
+ info.op.atomic_fetch_add.tagged_offset = ib_wr->sg_list[0].addr;
+ info.op.atomic_fetch_add.remote_tagged_offset =
+ atomic_wr(ib_wr)->remote_addr;
+ info.op.atomic_fetch_add.fetch_add_data_bytes =
+ atomic_wr(ib_wr)->compare_add;
+ info.op.atomic_fetch_add.stag = ib_wr->sg_list[0].lkey;
+ info.op.atomic_fetch_add.remote_stag =
+ atomic_wr(ib_wr)->rkey;
+ err = irdma_uk_atomic_fetch_add(ukqp, &info, false);
+ break;
case IB_WR_SEND_WITH_IMM:
if (ukqp->qp_caps & IRDMA_SEND_WITH_IMM) {
info.imm_data_valid = true;
@@ -3544,7 +4087,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
break;
case IB_WR_LOCAL_INV:
info.op_type = IRDMA_OP_TYPE_INV_STAG;
- info.local_fence = info.read_fence;
+ info.local_fence = true;
info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
break;
@@ -3555,7 +4098,9 @@ static int irdma_post_send(struct ib_qp *ibqp,
stag_info.signaled = info.signaled;
stag_info.read_fence = info.read_fence;
- stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
+ stag_info.access_rights =
+ irdma_get_mr_access(reg_wr(ib_wr)->access,
+ dev->hw_attrs.uk_attrs.hw_rev);
stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
@@ -3594,6 +4139,48 @@ static int irdma_post_send(struct ib_qp *ibqp,
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS));
}
+
+ if (err)
+ *bad_wr = ib_wr;
+
+ return err;
+}
+
+/**
+ * irdma_post_srq_recv - post receive wr for kernel application
+ * @ibsrq: ib srq pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int irdma_post_srq_recv(struct ib_srq *ibsrq,
+ const struct ib_recv_wr *ib_wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct irdma_srq *iwsrq = to_iwsrq(ibsrq);
+ struct irdma_srq_uk *uksrq = &iwsrq->sc_srq.srq_uk;
+ struct irdma_post_rq_info post_recv = {};
+ unsigned long flags;
+ int err = 0;
+
+ spin_lock_irqsave(&iwsrq->lock, flags);
+ while (ib_wr) {
+ if (ib_wr->num_sge > uksrq->max_srq_frag_cnt) {
+ err = -EINVAL;
+ goto out;
+ }
+ post_recv.num_sges = ib_wr->num_sge;
+ post_recv.wr_id = ib_wr->wr_id;
+ post_recv.sg_list = ib_wr->sg_list;
+ err = irdma_uk_srq_post_receive(uksrq, &post_recv);
+ if (err)
+ goto out;
+
+ ib_wr = ib_wr->next;
+ }
+
+out:
+ spin_unlock_irqrestore(&iwsrq->lock, flags);
+
if (err)
*bad_wr = ib_wr;
@@ -3619,6 +4206,11 @@ static int irdma_post_recv(struct ib_qp *ibqp,
iwqp = to_iwqp(ibqp);
ukqp = &iwqp->sc_qp.qp_uk;
+ if (ukqp->srq_uk) {
+ *bad_wr = ib_wr;
+ return -EINVAL;
+ }
+
spin_lock_irqsave(&iwqp->lock, flags);
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
@@ -3671,6 +4263,8 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_MW_BIND_ERR;
case FLUSH_REM_INV_REQ_ERR:
return IB_WC_REM_INV_REQ_ERR;
+ case FLUSH_RNR_RETRY_EXC_ERR:
+ return IB_WC_RNR_RETRY_EXC_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
@@ -3727,8 +4321,12 @@ static void irdma_process_cqe(struct ib_wc *entry,
if (cq_poll_info->q_type == IRDMA_CQE_QTYPE_SQ) {
set_ib_wc_op_sq(cq_poll_info, entry);
} else {
- set_ib_wc_op_rq(cq_poll_info, entry,
- qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
+ if (qp->dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2)
+ set_ib_wc_op_rq(cq_poll_info, entry,
+ qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
+ true : false);
+ else
+ set_ib_wc_op_rq_gen_3(cq_poll_info, entry);
if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
cq_poll_info->stag_invalid_set) {
entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
@@ -3916,47 +4514,14 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq,
}
if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
- (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated)))
+ (!irdma_uk_cq_empty(ukcq) || !list_empty(&iwcq->cmpl_generated)))
ret = 1;
spin_unlock_irqrestore(&iwcq->lock, flags);
return ret;
}
-static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
-
- immutable->max_mad_size = IB_MGMT_MAD_SIZE;
- immutable->pkey_tbl_len = attr.pkey_tbl_len;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
- struct ib_port_immutable *immutable)
-{
- struct ib_port_attr attr;
- int err;
-
- immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
- err = ib_query_port(ibdev, port_num, &attr);
- if (err)
- return err;
- immutable->gid_tbl_len = attr.gid_tbl_len;
-
- return 0;
-}
-
-static const struct rdma_stat_desc irdma_hw_stat_names[] = {
+static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
/* gen1 - 32-bit */
[IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
[IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
@@ -3964,9 +4529,6 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
[IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
[IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
- [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
- [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
[IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
/* gen1 - 64-bit */
[IRDMA_HW_STAT_INDEX_IP4RXOCTS].name = "ip4InOctets",
@@ -3985,16 +4547,14 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_IP6TXPKTS].name = "ip6OutPkts",
[IRDMA_HW_STAT_INDEX_IP6TXFRAGS].name = "ip6OutSegRqd",
[IRDMA_HW_STAT_INDEX_IP6TXMCPKTS].name = "ip6OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "tcpInSegs",
- [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "tcpOutSegs",
- [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "iwInRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "iwInRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "iwInRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "iwOutRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "iwOutRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "iwOutRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "iwRdmaBnd",
- [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "iwRdmaInv",
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS].name = "InRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS].name = "InRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS].name = "InRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS].name = "OutRdmaReads",
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS].name = "OutRdmaSends",
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS].name = "OutRdmaWrites",
+ [IRDMA_HW_STAT_INDEX_RDMAVBND].name = "RdmaBnd",
+ [IRDMA_HW_STAT_INDEX_RDMAVINV].name = "RdmaInv",
/* gen2 - 32-bit */
[IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
@@ -4008,9 +4568,59 @@ static const struct rdma_stat_desc irdma_hw_stat_names[] = {
[IRDMA_HW_STAT_INDEX_UDPRXPKTS].name = "RxUDP",
[IRDMA_HW_STAT_INDEX_UDPTXPKTS].name = "TxUDP",
[IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS].name = "RxECNMrkd",
-
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "RetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "InOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "InProtoErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS].name = "InSegs",
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG].name = "OutSegs",
+
+ /* gen3 */
+ [IRDMA_HW_STAT_INDEX_RNR_SENT].name = "RNR sent",
+ [IRDMA_HW_STAT_INDEX_RNR_RCVD].name = "RNR received",
+ [IRDMA_HW_STAT_INDEX_RDMAORDLMTCNT].name = "ord limit count",
+ [IRDMA_HW_STAT_INDEX_RDMAIRDLMTCNT].name = "ird limit count",
+ [IRDMA_HW_STAT_INDEX_RDMARXATS].name = "Rx atomics",
+ [IRDMA_HW_STAT_INDEX_RDMATXATS].name = "Tx atomics",
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR].name = "Nak Sequence Error",
+ [IRDMA_HW_STAT_INDEX_NAKSEQERR_IMPLIED].name = "Nak Sequence Error Implied",
+ [IRDMA_HW_STAT_INDEX_RTO].name = "RTO",
+ [IRDMA_HW_STAT_INDEX_RXOOOPKTS].name = "Rcvd Out of order packets",
+ [IRDMA_HW_STAT_INDEX_ICRCERR].name = "CRC errors",
};
+static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
+static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+
+ return 0;
+}
+
static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
{
struct irdma_device *iwdev = to_iwdev(dev);
@@ -4034,7 +4644,7 @@ static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
int num_counters = dev->hw_attrs.max_stat_idx;
unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
- return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
+ return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
lifespan);
}
@@ -4539,7 +5149,7 @@ static bool irdma_ah_exists(struct irdma_device *iwdev,
new_ah->sc_ah.ah_info.dest_ip_addr[2] ^
new_ah->sc_ah.ah_info.dest_ip_addr[3];
- hash_for_each_possible(iwdev->ah_hash_tbl, ah, list, key) {
+ hash_for_each_possible(iwdev->rf->ah_hash_tbl, ah, list, key) {
/* Set ah_valid and ah_id the same so memcmp can work */
new_ah->sc_ah.ah_info.ah_idx = ah->sc_ah.ah_info.ah_idx;
new_ah->sc_ah.ah_info.ah_valid = ah->sc_ah.ah_info.ah_valid;
@@ -4565,14 +5175,14 @@ static int irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
struct irdma_ah *ah = to_iwah(ibah);
if ((ah_flags & RDMA_DESTROY_AH_SLEEPABLE) && ah->parent_ah) {
- mutex_lock(&iwdev->ah_tbl_lock);
+ mutex_lock(&iwdev->rf->ah_tbl_lock);
if (!refcount_dec_and_test(&ah->parent_ah->refcnt)) {
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
return 0;
}
hash_del(&ah->parent_ah->list);
kfree(ah->parent_ah);
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
}
irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
@@ -4603,17 +5213,17 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
struct irdma_ah *parent_ah;
int err;
- if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
+ if (udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
return -EINVAL;
err = irdma_setup_ah(ibah, attr);
if (err)
return err;
- mutex_lock(&iwdev->ah_tbl_lock);
+ mutex_lock(&iwdev->rf->ah_tbl_lock);
if (!irdma_ah_exists(iwdev, ah)) {
err = irdma_create_hw_ah(iwdev, ah, true);
if (err) {
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
return err;
}
/* Add new AH to list */
@@ -4625,11 +5235,11 @@ static int irdma_create_user_ah(struct ib_ah *ibah,
parent_ah->sc_ah.ah_info.dest_ip_addr[3];
ah->parent_ah = parent_ah;
- hash_add(iwdev->ah_hash_tbl, &parent_ah->list, key);
+ hash_add(iwdev->rf->ah_hash_tbl, &parent_ah->list, key);
refcount_set(&parent_ah->refcnt, 1);
}
}
- mutex_unlock(&iwdev->ah_tbl_lock);
+ mutex_unlock(&iwdev->rf->ah_tbl_lock);
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
@@ -4691,6 +5301,20 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET;
}
+static const struct ib_device_ops irdma_gen1_dev_ops = {
+ .dealloc_driver = irdma_ib_dealloc_device,
+};
+
+static const struct ib_device_ops irdma_gen3_dev_ops = {
+ .alloc_mw = irdma_alloc_mw,
+ .create_srq = irdma_create_srq,
+ .dealloc_mw = irdma_dealloc_mw,
+ .destroy_srq = irdma_destroy_srq,
+ .modify_srq = irdma_modify_srq,
+ .post_srq_recv = irdma_post_srq_recv,
+ .query_srq = irdma_query_srq,
+};
+
static const struct ib_device_ops irdma_roce_dev_ops = {
.attach_mcast = irdma_attach_mcast,
.create_ah = irdma_create_ah,
@@ -4725,7 +5349,6 @@ static const struct ib_device_ops irdma_dev_ops = {
.alloc_hw_port_stats = irdma_alloc_hw_port_stats,
.alloc_mr = irdma_alloc_mr,
- .alloc_mw = irdma_alloc_mw,
.alloc_pd = irdma_alloc_pd,
.alloc_ucontext = irdma_alloc_ucontext,
.create_cq = irdma_create_cq,
@@ -4761,6 +5384,7 @@ static const struct ib_device_ops irdma_dev_ops = {
INIT_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_mw, irdma_mr, ibmw),
INIT_RDMA_OBJ_SIZE(ib_qp, irdma_qp, ibqp),
+ INIT_RDMA_OBJ_SIZE(ib_srq, irdma_srq, ibsrq),
};
/**
@@ -4808,6 +5432,10 @@ static void irdma_init_rdma_device(struct irdma_device *iwdev)
iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
iwdev->ibdev.dev.parent = &pcidev->dev;
ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
+ if (iwdev->rf->rdma_ver == IRDMA_GEN_1)
+ ib_set_device_ops(&iwdev->ibdev, &irdma_gen1_dev_ops);
+ if (iwdev->rf->rdma_ver >= IRDMA_GEN_3)
+ ib_set_device_ops(&iwdev->ibdev, &irdma_gen3_dev_ops);
}
/**
@@ -4879,5 +5507,11 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev)
struct irdma_device *iwdev = to_iwdev(ibdev);
irdma_rt_deinit_hw(iwdev);
- irdma_ctrl_deinit_hw(iwdev->rf);
+ if (!iwdev->is_vport) {
+ irdma_ctrl_deinit_hw(iwdev->rf);
+ if (iwdev->rf->vchnl_wq) {
+ destroy_workqueue(iwdev->rf->vchnl_wq);
+ mutex_destroy(&iwdev->rf->sc_dev.vchnl_mutex);
+ }
+ }
}
diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h
index cfa140b36395..aabbb3442098 100644
--- a/drivers/infiniband/hw/irdma/verbs.h
+++ b/drivers/infiniband/hw/irdma/verbs.h
@@ -8,6 +8,7 @@
#define IRDMA_PKEY_TBL_SZ 1
#define IRDMA_DEFAULT_PKEY 0xFFFF
+#define IRDMA_SHADOW_PGCNT 1
struct irdma_ucontext {
struct ib_ucontext ibucontext;
@@ -17,6 +18,8 @@ struct irdma_ucontext {
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
struct list_head qp_reg_mem_list;
spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
+ struct list_head srq_reg_mem_list;
+ spinlock_t srq_reg_mem_list_lock; /* protect SRQ memory list */
int abi_ver;
u8 legacy_mode : 1;
u8 use_raw_attrs : 1;
@@ -65,10 +68,16 @@ struct irdma_cq_mr {
bool split;
};
+struct irdma_srq_mr {
+ struct irdma_hmc_pble srq_pbl;
+ dma_addr_t shadow;
+};
+
struct irdma_qp_mr {
struct irdma_hmc_pble sq_pbl;
struct irdma_hmc_pble rq_pbl;
dma_addr_t shadow;
+ dma_addr_t rq_pa;
struct page *sq_page;
};
@@ -85,6 +94,7 @@ struct irdma_pbl {
union {
struct irdma_qp_mr qp_mr;
struct irdma_cq_mr cq_mr;
+ struct irdma_srq_mr srq_mr;
};
bool pbl_allocated:1;
@@ -101,7 +111,8 @@ struct irdma_mr {
};
struct ib_umem *region;
int access;
- u8 is_hwreg;
+ bool is_hwreg:1;
+ bool dma_mr:1;
u16 type;
u32 page_cnt;
u64 page_size;
@@ -112,24 +123,33 @@ struct irdma_mr {
struct irdma_pbl iwpbl;
};
+struct irdma_srq {
+ struct ib_srq ibsrq;
+ struct irdma_sc_srq sc_srq __aligned(64);
+ struct irdma_dma_mem kmem;
+ u64 *srq_wrid_mem;
+ refcount_t refcnt;
+ spinlock_t lock; /* for poll srq */
+ struct irdma_pbl *iwpbl;
+ struct irdma_sge *sg_list;
+ u16 srq_head;
+ u32 srq_num;
+ u32 max_wr;
+ bool user_mode:1;
+};
+
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
- u16 cq_head;
- u16 cq_size;
- u16 cq_num;
+ u32 cq_num;
bool user_mode;
atomic_t armed;
enum irdma_cmpl_notify last_notify;
- u32 polled_cmpls;
- u32 cq_mem_size;
struct irdma_dma_mem kmem;
struct irdma_dma_mem kmem_shadow;
struct completion free_cq;
refcount_t refcnt;
spinlock_t lock; /* for poll cq */
- struct irdma_pbl *iwpbl;
- struct irdma_pbl *iwpbl_shadow;
struct list_head resize_list;
struct irdma_cq_poll_info cur_cqe;
struct list_head cmpl_generated;
@@ -259,6 +279,12 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
case IRDMA_OP_TYPE_FAST_REG_NSMR:
entry->opcode = IB_WC_REG_MR;
break;
+ case IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP:
+ entry->opcode = IB_WC_COMP_SWAP;
+ break;
+ case IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD:
+ entry->opcode = IB_WC_FETCH_ADD;
+ break;
case IRDMA_OP_TYPE_INV_STAG:
entry->opcode = IB_WC_LOCAL_INV;
break;
@@ -267,6 +293,19 @@ static inline void set_ib_wc_op_sq(struct irdma_cq_poll_info *cq_poll_info,
}
}
+static inline void set_ib_wc_op_rq_gen_3(struct irdma_cq_poll_info *info,
+ struct ib_wc *entry)
+{
+ switch (info->op_type) {
+ case IRDMA_OP_TYPE_RDMA_WRITE:
+ case IRDMA_OP_TYPE_RDMA_WRITE_SOL:
+ entry->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ break;
+ default:
+ entry->opcode = IB_WC_RECV;
+ }
+}
+
static inline void set_ib_wc_op_rq(struct irdma_cq_poll_info *cq_poll_info,
struct ib_wc *entry, bool send_imm_support)
{
diff --git a/drivers/infiniband/hw/irdma/virtchnl.c b/drivers/infiniband/hw/irdma/virtchnl.c
new file mode 100644
index 000000000000..16ad27247527
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/virtchnl.c
@@ -0,0 +1,618 @@
+// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+
+#include "osdep.h"
+#include "hmc.h"
+#include "defs.h"
+#include "type.h"
+#include "protos.h"
+#include "virtchnl.h"
+#include "ws.h"
+#include "i40iw_hw.h"
+#include "ig3rdma_hw.h"
+
+struct vchnl_reg_map_elem {
+ u16 reg_id;
+ u16 reg_idx;
+ bool pg_rel;
+};
+
+struct vchnl_regfld_map_elem {
+ u16 regfld_id;
+ u16 regfld_idx;
+};
+
+static struct vchnl_reg_map_elem vchnl_reg_map[] = {
+ {IRDMA_VCHNL_REG_ID_CQPTAIL, IRDMA_CQPTAIL, false},
+ {IRDMA_VCHNL_REG_ID_CQPDB, IRDMA_CQPDB, false},
+ {IRDMA_VCHNL_REG_ID_CCQPSTATUS, IRDMA_CCQPSTATUS, false},
+ {IRDMA_VCHNL_REG_ID_CCQPHIGH, IRDMA_CCQPHIGH, false},
+ {IRDMA_VCHNL_REG_ID_CCQPLOW, IRDMA_CCQPLOW, false},
+ {IRDMA_VCHNL_REG_ID_CQARM, IRDMA_CQARM, false},
+ {IRDMA_VCHNL_REG_ID_CQACK, IRDMA_CQACK, false},
+ {IRDMA_VCHNL_REG_ID_AEQALLOC, IRDMA_AEQALLOC, false},
+ {IRDMA_VCHNL_REG_ID_CQPERRCODES, IRDMA_CQPERRCODES, false},
+ {IRDMA_VCHNL_REG_ID_WQEALLOC, IRDMA_WQEALLOC, false},
+ {IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET, IRDMA_DB_ADDR_OFFSET, false },
+ {IRDMA_VCHNL_REG_ID_DYN_CTL, IRDMA_GLINT_DYN_CTL, false },
+ {IRDMA_VCHNL_REG_INV_ID, IRDMA_VCHNL_REG_INV_ID, false }
+};
+
+static struct vchnl_regfld_map_elem vchnl_regfld_map[] = {
+ {IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR, IRDMA_CCQPSTATUS_CCQP_ERR_M},
+ {IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE, IRDMA_CCQPSTATUS_CCQP_DONE_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID, IRDMA_CQPSQ_STAG_PDID_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID, IRDMA_CQPSQ_CQ_CEQID_M},
+ {IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID, IRDMA_CQPSQ_CQ_CQID_M},
+ {IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT, IRDMA_COMMIT_FPM_CQCNT_M},
+ {IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID, IRDMA_CQPSQ_UPESD_HMCFNID_M},
+ {IRDMA_VCHNL_REGFLD_INV_ID, IRDMA_VCHNL_REGFLD_INV_ID}
+};
+
+#define IRDMA_VCHNL_REG_COUNT ARRAY_SIZE(vchnl_reg_map)
+#define IRDMA_VCHNL_REGFLD_COUNT ARRAY_SIZE(vchnl_regfld_map)
+#define IRDMA_VCHNL_REGFLD_BUF_SIZE \
+ (IRDMA_VCHNL_REG_COUNT * sizeof(struct irdma_vchnl_reg_info) + \
+ IRDMA_VCHNL_REGFLD_COUNT * sizeof(struct irdma_vchnl_reg_field_info))
+#define IRDMA_REGMAP_RESP_BUF_SIZE (IRDMA_VCHNL_RESP_MIN_SIZE + IRDMA_VCHNL_REGFLD_BUF_SIZE)
+
+/**
+ * irdma_sc_vchnl_init - Initialize dev virtchannel and get hw_rev
+ * @dev: dev structure to update
+ * @info: virtchannel info parameters to fill into the dev structure
+ */
+int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_init_info *info)
+{
+ dev->vchnl_up = true;
+ dev->privileged = info->privileged;
+ dev->is_pf = info->is_pf;
+ dev->hw_attrs.uk_attrs.hw_rev = info->hw_rev;
+
+ if (!dev->privileged) {
+ int ret = irdma_vchnl_req_get_ver(dev, IRDMA_VCHNL_CHNL_VER_MAX,
+ &dev->vchnl_ver);
+
+ ibdev_dbg(to_ibdev(dev),
+ "DEV: Get Channel version ret = %d, version is %u\n",
+ ret, dev->vchnl_ver);
+
+ if (ret)
+ return ret;
+
+ ret = irdma_vchnl_req_get_caps(dev);
+ if (ret)
+ return ret;
+
+ dev->hw_attrs.uk_attrs.hw_rev = dev->vc_caps.hw_rev;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_verify_resp - Verify requested response size
+ * @vchnl_req: vchnl message requested
+ * @resp_len: response length sent from vchnl peer
+ */
+static int irdma_vchnl_req_verify_resp(struct irdma_vchnl_req *vchnl_req,
+ u16 resp_len)
+{
+ switch (vchnl_req->vchnl_msg->op_code) {
+ case IRDMA_VCHNL_OP_GET_VER:
+ case IRDMA_VCHNL_OP_GET_HMC_FCN:
+ case IRDMA_VCHNL_OP_PUT_HMC_FCN:
+ if (resp_len != vchnl_req->parm_len)
+ return -EBADMSG;
+ break;
+ case IRDMA_VCHNL_OP_GET_RDMA_CAPS:
+ if (resp_len < IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE)
+ return -EBADMSG;
+ break;
+ case IRDMA_VCHNL_OP_GET_REG_LAYOUT:
+ case IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP:
+ case IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP:
+ case IRDMA_VCHNL_OP_ADD_VPORT:
+ case IRDMA_VCHNL_OP_DEL_VPORT:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void irdma_free_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req)
+{
+ kfree(vchnl_req->vchnl_msg);
+}
+
+static int irdma_alloc_vchnl_req_msg(struct irdma_vchnl_req *vchnl_req,
+ struct irdma_vchnl_req_init_info *info)
+{
+ struct irdma_vchnl_op_buf *vchnl_msg;
+
+ vchnl_msg = kzalloc(IRDMA_VCHNL_MAX_MSG_SIZE, GFP_KERNEL);
+
+ if (!vchnl_msg)
+ return -ENOMEM;
+
+ vchnl_msg->op_ctx = (uintptr_t)vchnl_req;
+ vchnl_msg->buf_len = sizeof(*vchnl_msg) + info->req_parm_len;
+ if (info->req_parm_len)
+ memcpy(vchnl_msg->buf, info->req_parm, info->req_parm_len);
+ vchnl_msg->op_code = info->op_code;
+ vchnl_msg->op_ver = info->op_ver;
+
+ vchnl_req->vchnl_msg = vchnl_msg;
+ vchnl_req->parm = info->resp_parm;
+ vchnl_req->parm_len = info->resp_parm_len;
+
+ return 0;
+}
+
+static int irdma_vchnl_req_send_sync(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req_init_info *info)
+{
+ u16 resp_len = sizeof(dev->vc_recv_buf);
+ struct irdma_vchnl_req vchnl_req = {};
+ u16 msg_len;
+ u8 *msg;
+ int ret;
+
+ ret = irdma_alloc_vchnl_req_msg(&vchnl_req, info);
+ if (ret)
+ return ret;
+
+ msg_len = vchnl_req.vchnl_msg->buf_len;
+ msg = (u8 *)vchnl_req.vchnl_msg;
+
+ mutex_lock(&dev->vchnl_mutex);
+ ret = ig3rdma_vchnl_send_sync(dev, msg, msg_len, dev->vc_recv_buf,
+ &resp_len);
+ dev->vc_recv_len = resp_len;
+ if (ret)
+ goto exit;
+
+ ret = irdma_vchnl_req_get_resp(dev, &vchnl_req);
+exit:
+ mutex_unlock(&dev->vchnl_mutex);
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: virtual channel send %s caller: %pS ret=%d op=%u op_ver=%u req_len=%u parm_len=%u resp_len=%u\n",
+ !ret ? "SUCCEEDS" : "FAILS", __builtin_return_address(0),
+ ret, vchnl_req.vchnl_msg->op_code,
+ vchnl_req.vchnl_msg->op_ver, vchnl_req.vchnl_msg->buf_len,
+ vchnl_req.parm_len, vchnl_req.resp_len);
+ irdma_free_vchnl_req_msg(&vchnl_req);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_get_reg_layout - Get Register Layout
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev)
+{
+ u16 reg_idx, reg_id, tmp_reg_id, regfld_idx, regfld_id, tmp_regfld_id;
+ struct irdma_vchnl_reg_field_info *regfld_array = NULL;
+ u8 resp_buffer[IRDMA_REGMAP_RESP_BUF_SIZE] = {};
+ struct vchnl_regfld_map_elem *regfld_map_array;
+ struct irdma_vchnl_req_init_info info = {};
+ struct vchnl_reg_map_elem *reg_map_array;
+ struct irdma_vchnl_reg_info *reg_array;
+ u8 num_bits, shift_cnt;
+ u16 buf_len = 0;
+ u64 bitmask;
+ u32 rindex;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_REG_LAYOUT;
+ info.op_ver = IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0;
+ info.resp_parm = resp_buffer;
+ info.resp_parm_len = sizeof(resp_buffer);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ /* parse the response buffer and update reg info*/
+ /* Parse registers till invalid */
+ /* Parse register fields till invalid */
+ reg_array = (struct irdma_vchnl_reg_info *)resp_buffer;
+ for (rindex = 0; rindex < IRDMA_VCHNL_REG_COUNT; rindex++) {
+ buf_len += sizeof(struct irdma_vchnl_reg_info);
+ if (buf_len >= sizeof(resp_buffer))
+ return -ENOMEM;
+
+ regfld_array =
+ (struct irdma_vchnl_reg_field_info *)&reg_array[rindex + 1];
+ reg_id = reg_array[rindex].reg_id;
+ if (reg_id == IRDMA_VCHNL_REG_INV_ID)
+ break;
+
+ reg_id &= ~IRDMA_VCHNL_REG_PAGE_REL;
+ if (reg_id >= IRDMA_VCHNL_REG_COUNT)
+ return -EINVAL;
+
+ /* search regmap for register index in hw_regs.*/
+ reg_map_array = vchnl_reg_map;
+ do {
+ tmp_reg_id = reg_map_array->reg_id;
+ if (tmp_reg_id == reg_id)
+ break;
+
+ reg_map_array++;
+ } while (tmp_reg_id != IRDMA_VCHNL_REG_INV_ID);
+ if (tmp_reg_id != reg_id)
+ continue;
+
+ reg_idx = reg_map_array->reg_idx;
+
+ /* Page relative, DB Offset do not need bar offset */
+ if (reg_idx == IRDMA_DB_ADDR_OFFSET ||
+ (reg_array[rindex].reg_id & IRDMA_VCHNL_REG_PAGE_REL)) {
+ dev->hw_regs[reg_idx] =
+ (u32 __iomem *)(uintptr_t)reg_array[rindex].reg_offset;
+ continue;
+ }
+
+ /* Update the local HW struct */
+ dev->hw_regs[reg_idx] = ig3rdma_get_reg_addr(dev->hw,
+ reg_array[rindex].reg_offset);
+ if (!dev->hw_regs[reg_idx])
+ return -EINVAL;
+ }
+
+ if (!regfld_array)
+ return -ENOMEM;
+
+ /* set up doorbell variables using mapped DB page */
+ dev->wqe_alloc_db = dev->hw_regs[IRDMA_WQEALLOC];
+ dev->cq_arm_db = dev->hw_regs[IRDMA_CQARM];
+ dev->aeq_alloc_db = dev->hw_regs[IRDMA_AEQALLOC];
+ dev->cqp_db = dev->hw_regs[IRDMA_CQPDB];
+ dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK];
+
+ for (rindex = 0; rindex < IRDMA_VCHNL_REGFLD_COUNT; rindex++) {
+ buf_len += sizeof(struct irdma_vchnl_reg_field_info);
+ if ((buf_len - 1) > sizeof(resp_buffer))
+ break;
+
+ if (regfld_array[rindex].fld_id == IRDMA_VCHNL_REGFLD_INV_ID)
+ break;
+
+ regfld_id = regfld_array[rindex].fld_id;
+ regfld_map_array = vchnl_regfld_map;
+ do {
+ tmp_regfld_id = regfld_map_array->regfld_id;
+ if (tmp_regfld_id == regfld_id)
+ break;
+
+ regfld_map_array++;
+ } while (tmp_regfld_id != IRDMA_VCHNL_REGFLD_INV_ID);
+
+ if (tmp_regfld_id != regfld_id)
+ continue;
+
+ regfld_idx = regfld_map_array->regfld_idx;
+
+ num_bits = regfld_array[rindex].fld_bits;
+ shift_cnt = regfld_array[rindex].fld_shift;
+ if ((num_bits + shift_cnt > 64) || !num_bits) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: Invalid field mask id %d bits %d shift %d",
+ regfld_id, num_bits, shift_cnt);
+
+ continue;
+ }
+
+ bitmask = (1ULL << num_bits) - 1;
+ dev->hw_masks[regfld_idx] = bitmask << shift_cnt;
+ dev->hw_shifts[regfld_idx] = shift_cnt;
+ }
+
+ return 0;
+}
+
+int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id, struct irdma_qos *qos)
+{
+ struct irdma_vchnl_resp_vport_info resp_vport = { 0 };
+ struct irdma_vchnl_req_vport_info req_vport = { 0 };
+ struct irdma_vchnl_req_init_info info = { 0 };
+ int ret, i;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_ADD_VPORT;
+ info.op_ver = IRDMA_VCHNL_OP_ADD_VPORT_V0;
+ req_vport.vport_id = vport_id;
+ req_vport.qp1_id = qp1_id;
+ info.req_parm_len = sizeof(req_vport);
+ info.req_parm = &req_vport;
+ info.resp_parm = &resp_vport;
+ info.resp_parm_len = sizeof(resp_vport);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ qos[i].qs_handle = resp_vport.qs_handle[i];
+ qos[i].valid = true;
+ }
+
+ return 0;
+}
+
+int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id, u32 qp1_id)
+{
+ struct irdma_vchnl_req_init_info info = { 0 };
+ struct irdma_vchnl_req_vport_info req_vport = { 0 };
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_DEL_VPORT;
+ info.op_ver = IRDMA_VCHNL_OP_DEL_VPORT_V0;
+ req_vport.vport_id = vport_id;
+ req_vport.qp1_id = qp1_id;
+ info.req_parm_len = sizeof(req_vport);
+ info.req_parm = &req_vport;
+
+ return irdma_vchnl_req_send_sync(dev, &info);
+}
+
+/**
+ * irdma_vchnl_req_aeq_vec_map - Map AEQ to vector on this function
+ * @dev: RDMA device pointer
+ * @v_idx: vector index
+ */
+int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ struct irdma_vchnl_qvlist_info *qvl;
+ struct irdma_vchnl_qv_info *qv;
+ u16 qvl_size, num_vectors = 1;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ qvl_size = struct_size(qvl, qv_info, num_vectors);
+
+ qvl = kzalloc(qvl_size, GFP_KERNEL);
+ if (!qvl)
+ return -ENOMEM;
+
+ qvl->num_vectors = 1;
+ qv = qvl->qv_info;
+
+ qv->ceq_idx = IRDMA_Q_INVALID_IDX;
+ qv->v_idx = v_idx;
+ qv->itr_idx = IRDMA_IDX_ITR0;
+
+ info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
+ info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
+ info.req_parm = qvl;
+ info.req_parm_len = qvl_size;
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ kfree(qvl);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_ceq_vec_map - Map CEQ to vector on this function
+ * @dev: RDMA device pointer
+ * @ceq_id: CEQ index
+ * @v_idx: vector index
+ */
+int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id, u32 v_idx)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ struct irdma_vchnl_qvlist_info *qvl;
+ struct irdma_vchnl_qv_info *qv;
+ u16 qvl_size, num_vectors = 1;
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ qvl_size = struct_size(qvl, qv_info, num_vectors);
+
+ qvl = kzalloc(qvl_size, GFP_KERNEL);
+ if (!qvl)
+ return -ENOMEM;
+
+ qvl->num_vectors = num_vectors;
+ qv = qvl->qv_info;
+
+ qv->aeq_idx = IRDMA_Q_INVALID_IDX;
+ qv->ceq_idx = ceq_id;
+ qv->v_idx = v_idx;
+ qv->itr_idx = IRDMA_IDX_ITR0;
+
+ info.op_code = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP;
+ info.op_ver = IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0;
+ info.req_parm = qvl;
+ info.req_parm_len = qvl_size;
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ kfree(qvl);
+
+ return ret;
+}
+
+/**
+ * irdma_vchnl_req_get_ver - Request Channel version
+ * @dev: RDMA device pointer
+ * @ver_req: Virtual channel version requested
+ * @ver_res: Virtual channel version response
+ */
+int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req, u32 *ver_res)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_VER;
+ info.op_ver = ver_req;
+ info.resp_parm = ver_res;
+ info.resp_parm_len = sizeof(*ver_res);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+ if (ret)
+ return ret;
+
+ if (*ver_res < IRDMA_VCHNL_CHNL_VER_MIN) {
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: %s unsupported vchnl version 0x%0x\n",
+ __func__, *ver_res);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_get_hmc_fcn - Request VF HMC Function
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_hmc_info req_hmc = {};
+ struct irdma_vchnl_resp_hmc_info resp_hmc = {};
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_HMC_FCN;
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ info.op_ver = IRDMA_VCHNL_OP_GET_HMC_FCN_V2;
+ req_hmc.protocol_used = dev->protocol_used;
+ info.req_parm_len = sizeof(req_hmc);
+ info.req_parm = &req_hmc;
+ info.resp_parm = &resp_hmc;
+ info.resp_parm_len = sizeof(resp_hmc);
+ }
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_3) {
+ int i;
+
+ dev->hmc_fn_id = resp_hmc.hmc_func;
+
+ for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
+ dev->qos[i].qs_handle = resp_hmc.qs_handle[i];
+ dev->qos[i].valid = true;
+ }
+ }
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_put_hmc_fcn - Free VF HMC Function
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_init_info info = {};
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_PUT_HMC_FCN;
+ info.op_ver = IRDMA_VCHNL_OP_PUT_HMC_FCN_V0;
+
+ return irdma_vchnl_req_send_sync(dev, &info);
+}
+
+/**
+ * irdma_vchnl_req_get_caps - Request RDMA capabilities
+ * @dev: RDMA device pointer
+ */
+int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev)
+{
+ struct irdma_vchnl_req_init_info info = {};
+ int ret;
+
+ if (!dev->vchnl_up)
+ return -EBUSY;
+
+ info.op_code = IRDMA_VCHNL_OP_GET_RDMA_CAPS;
+ info.op_ver = IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0;
+ info.resp_parm = &dev->vc_caps;
+ info.resp_parm_len = sizeof(dev->vc_caps);
+
+ ret = irdma_vchnl_req_send_sync(dev, &info);
+
+ if (ret)
+ return ret;
+
+ if (dev->vc_caps.hw_rev > IRDMA_GEN_MAX ||
+ dev->vc_caps.hw_rev < IRDMA_GEN_2) {
+ ibdev_dbg(to_ibdev(dev),
+ "ERR: %s unsupported hw_rev version 0x%0x\n",
+ __func__, dev->vc_caps.hw_rev);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/**
+ * irdma_vchnl_req_get_resp - Receive the inbound vchnl response.
+ * @dev: Dev pointer
+ * @vchnl_req: Vchannel request
+ */
+int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req *vchnl_req)
+{
+ struct irdma_vchnl_resp_buf *vchnl_msg_resp =
+ (struct irdma_vchnl_resp_buf *)dev->vc_recv_buf;
+ u16 resp_len;
+ int ret;
+
+ if ((uintptr_t)vchnl_req != (uintptr_t)vchnl_msg_resp->op_ctx) {
+ ibdev_dbg(to_ibdev(dev),
+ "VIRT: error vchnl context value does not match\n");
+ return -EBADMSG;
+ }
+
+ resp_len = dev->vc_recv_len - sizeof(*vchnl_msg_resp);
+ resp_len = min(resp_len, vchnl_req->parm_len);
+
+ ret = irdma_vchnl_req_verify_resp(vchnl_req, resp_len);
+ if (ret)
+ return ret;
+
+ ret = (int)vchnl_msg_resp->op_ret;
+ if (ret)
+ return ret;
+
+ vchnl_req->resp_len = 0;
+ if (vchnl_req->parm_len && vchnl_req->parm && resp_len) {
+ memcpy(vchnl_req->parm, vchnl_msg_resp->buf, resp_len);
+ vchnl_req->resp_len = resp_len;
+ ibdev_dbg(to_ibdev(dev), "VIRT: Got response, data size %u\n",
+ resp_len);
+ }
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/irdma/virtchnl.h b/drivers/infiniband/hw/irdma/virtchnl.h
new file mode 100644
index 000000000000..aa955a9125bd
--- /dev/null
+++ b/drivers/infiniband/hw/irdma/virtchnl.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
+/* Copyright (c) 2015 - 2024 Intel Corporation */
+#ifndef IRDMA_VIRTCHNL_H
+#define IRDMA_VIRTCHNL_H
+
+#include "hmc.h"
+#include "irdma.h"
+
+/* IRDMA_VCHNL_CHNL_VER_V0 is for legacy hw, no longer supported. */
+#define IRDMA_VCHNL_CHNL_VER_V2 2
+#define IRDMA_VCHNL_CHNL_VER_MIN IRDMA_VCHNL_CHNL_VER_V2
+#define IRDMA_VCHNL_CHNL_VER_MAX IRDMA_VCHNL_CHNL_VER_V2
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V0 0
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V1 1
+#define IRDMA_VCHNL_OP_GET_HMC_FCN_V2 2
+#define IRDMA_VCHNL_OP_PUT_HMC_FCN_V0 0
+#define IRDMA_VCHNL_OP_GET_REG_LAYOUT_V0 0
+#define IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP_V0 0
+#define IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP_V0 0
+#define IRDMA_VCHNL_OP_ADD_VPORT_V0 0
+#define IRDMA_VCHNL_OP_DEL_VPORT_V0 0
+#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_V0 0
+#define IRDMA_VCHNL_OP_GET_RDMA_CAPS_MIN_SIZE 1
+
+#define IRDMA_VCHNL_REG_ID_CQPTAIL 0
+#define IRDMA_VCHNL_REG_ID_CQPDB 1
+#define IRDMA_VCHNL_REG_ID_CCQPSTATUS 2
+#define IRDMA_VCHNL_REG_ID_CCQPHIGH 3
+#define IRDMA_VCHNL_REG_ID_CCQPLOW 4
+#define IRDMA_VCHNL_REG_ID_CQARM 5
+#define IRDMA_VCHNL_REG_ID_CQACK 6
+#define IRDMA_VCHNL_REG_ID_AEQALLOC 7
+#define IRDMA_VCHNL_REG_ID_CQPERRCODES 8
+#define IRDMA_VCHNL_REG_ID_WQEALLOC 9
+#define IRDMA_VCHNL_REG_ID_IPCONFIG0 10
+#define IRDMA_VCHNL_REG_ID_DB_ADDR_OFFSET 11
+#define IRDMA_VCHNL_REG_ID_DYN_CTL 12
+#define IRDMA_VCHNL_REG_ID_AEQITRMASK 13
+#define IRDMA_VCHNL_REG_ID_CEQITRMASK 14
+#define IRDMA_VCHNL_REG_INV_ID 0xFFFF
+#define IRDMA_VCHNL_REG_PAGE_REL 0x8000
+
+#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CQP_OP_ERR 2
+#define IRDMA_VCHNL_REGFLD_ID_CCQPSTATUS_CCQP_DONE 5
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_STAG_PDID 6
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CEQID 7
+#define IRDMA_VCHNL_REGFLD_ID_CQPSQ_CQ_CQID 8
+#define IRDMA_VCHNL_REGFLD_ID_COMMIT_FPM_CQCNT 9
+#define IRDMA_VCHNL_REGFLD_ID_UPESD_HMCN_ID 10
+#define IRDMA_VCHNL_REGFLD_INV_ID 0xFFFF
+
+#define IRDMA_VCHNL_RESP_MIN_SIZE (sizeof(struct irdma_vchnl_resp_buf))
+
+enum irdma_vchnl_ops {
+ IRDMA_VCHNL_OP_GET_VER = 0,
+ IRDMA_VCHNL_OP_GET_HMC_FCN = 1,
+ IRDMA_VCHNL_OP_PUT_HMC_FCN = 2,
+ IRDMA_VCHNL_OP_GET_REG_LAYOUT = 11,
+ IRDMA_VCHNL_OP_GET_RDMA_CAPS = 13,
+ IRDMA_VCHNL_OP_QUEUE_VECTOR_MAP = 14,
+ IRDMA_VCHNL_OP_QUEUE_VECTOR_UNMAP = 15,
+ IRDMA_VCHNL_OP_ADD_VPORT = 16,
+ IRDMA_VCHNL_OP_DEL_VPORT = 17,
+};
+
+struct irdma_vchnl_req_hmc_info {
+ u8 protocol_used;
+ u8 disable_qos;
+} __packed;
+
+struct irdma_vchnl_resp_hmc_info {
+ u16 hmc_func;
+ u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
+} __packed;
+
+struct irdma_vchnl_qv_info {
+ u32 v_idx;
+ u16 ceq_idx;
+ u16 aeq_idx;
+ u8 itr_idx;
+};
+
+struct irdma_vchnl_qvlist_info {
+ u32 num_vectors;
+ struct irdma_vchnl_qv_info qv_info[];
+};
+
+struct irdma_vchnl_req_vport_info {
+ u16 vport_id;
+ u32 qp1_id;
+};
+
+struct irdma_vchnl_resp_vport_info {
+ u16 qs_handle[IRDMA_MAX_USER_PRIORITY];
+};
+
+struct irdma_vchnl_op_buf {
+ u16 op_code;
+ u16 op_ver;
+ u16 buf_len;
+ u16 rsvd;
+ u64 op_ctx;
+ u8 buf[];
+} __packed;
+
+struct irdma_vchnl_resp_buf {
+ u64 op_ctx;
+ u16 buf_len;
+ s16 op_ret;
+ u16 rsvd[2];
+ u8 buf[];
+} __packed;
+
+struct irdma_vchnl_rdma_caps {
+ u8 hw_rev;
+ u16 cqp_timeout_s;
+ u16 cqp_def_timeout_s;
+ u16 max_hw_push_len;
+} __packed;
+
+struct irdma_vchnl_init_info {
+ struct workqueue_struct *vchnl_wq;
+ enum irdma_vers hw_rev;
+ bool privileged;
+ bool is_pf;
+};
+
+struct irdma_vchnl_reg_info {
+ u32 reg_offset;
+ u16 field_cnt;
+ u16 reg_id; /* High bit of reg_id: bar or page relative */
+};
+
+struct irdma_vchnl_reg_field_info {
+ u8 fld_shift;
+ u8 fld_bits;
+ u16 fld_id;
+};
+
+struct irdma_vchnl_req {
+ struct irdma_vchnl_op_buf *vchnl_msg;
+ void *parm;
+ u32 vf_id;
+ u16 parm_len;
+ u16 resp_len;
+};
+
+struct irdma_vchnl_req_init_info {
+ void *req_parm;
+ void *resp_parm;
+ u16 req_parm_len;
+ u16 resp_parm_len;
+ u16 op_code;
+ u16 op_ver;
+} __packed;
+
+struct irdma_qos;
+
+int irdma_sc_vchnl_init(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_init_info *info);
+int irdma_vchnl_req_get_ver(struct irdma_sc_dev *dev, u16 ver_req,
+ u32 *ver_res);
+int irdma_vchnl_req_get_hmc_fcn(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_put_hmc_fcn(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_get_caps(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_get_resp(struct irdma_sc_dev *dev,
+ struct irdma_vchnl_req *vc_req);
+int irdma_vchnl_req_get_reg_layout(struct irdma_sc_dev *dev);
+int irdma_vchnl_req_aeq_vec_map(struct irdma_sc_dev *dev, u32 v_idx);
+int irdma_vchnl_req_ceq_vec_map(struct irdma_sc_dev *dev, u16 ceq_id,
+ u32 v_idx);
+int irdma_vchnl_req_add_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id, struct irdma_qos *qos);
+int irdma_vchnl_req_del_vport(struct irdma_sc_dev *dev, u16 vport_id,
+ u32 qp1_id);
+#endif /* IRDMA_VIRTCHNL_H */
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 28e154bbb50f..1becc8779123 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -291,6 +291,32 @@ out:
return wc_index;
}
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev)
+{
+ struct mana_ib_qp *qp = mana_get_qp_ref(mdev, MANA_GSI_QPN, false);
+ struct ud_sq_shadow_wqe *shadow_wqe;
+ struct mana_ib_cq *cq;
+ unsigned long flags;
+
+ if (!qp)
+ return;
+
+ cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ while ((shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq))
+ != NULL) {
+ shadow_wqe->header.error_code = IB_WC_GENERAL_ERR;
+ shadow_queue_advance_next_to_complete(&qp->shadow_sq);
+ }
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ if (cq->ibcq.comp_handler)
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+
+ mana_put_qp_ref(qp);
+}
+
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
diff --git a/drivers/infiniband/hw/mana/device.c b/drivers/infiniband/hw/mana/device.c
index fa60872f169f..bdeddb642b87 100644
--- a/drivers/infiniband/hw/mana/device.c
+++ b/drivers/infiniband/hw/mana/device.c
@@ -230,6 +230,9 @@ static void mana_ib_remove(struct auxiliary_device *adev)
{
struct mana_ib_dev *dev = dev_get_drvdata(&adev->dev);
+ if (mana_ib_is_rnic(dev))
+ mana_drain_gsi_sqs(dev);
+
ib_unregister_device(&dev->ib_dev);
dma_pool_destroy(dev->av_pool);
if (mana_ib_is_rnic(dev)) {
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 6a2471f2e804..fac159f7128d 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -273,9 +273,8 @@ int mana_ib_create_queue(struct mana_ib_dev *mdev, u64 addr, u32 size,
umem = ib_umem_get(&mdev->ib_dev, addr, size, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(umem)) {
- err = PTR_ERR(umem);
- ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %d\n", err);
- return err;
+ ibdev_dbg(&mdev->ib_dev, "Failed to get umem, %pe\n", umem);
+ return PTR_ERR(umem);
}
err = mana_ib_create_zero_offset_dma_region(mdev, umem, &queue->gdma_region);
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 5d31034ac7fb..9d36232ed880 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -43,6 +43,8 @@
*/
#define MANA_AV_BUFFER_SIZE 64
+#define MANA_GSI_QPN (1)
+
struct mana_ib_adapter_caps {
u32 max_sq_id;
u32 max_rq_id;
@@ -410,7 +412,7 @@ struct mana_ib_ah_attr {
u8 traffic_class;
u16 src_port;
u16 dest_port;
- u32 reserved;
+ u32 flow_label;
};
struct mana_rnic_set_qp_state_req {
@@ -427,8 +429,15 @@ struct mana_rnic_set_qp_state_req {
u32 retry_cnt;
u32 rnr_retry;
u32 min_rnr_timer;
- u32 reserved;
+ u32 rate_limit;
struct mana_ib_ah_attr ah_attr;
+ u64 reserved1;
+ u32 qkey;
+ u32 qp_access_flags;
+ u8 local_ack_timeout;
+ u8 max_rd_atomic;
+ u16 reserved2;
+ u32 reserved3;
}; /* HW Data */
struct mana_rnic_set_qp_state_resp {
@@ -718,6 +727,7 @@ int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
+void mana_drain_gsi_sqs(struct mana_ib_dev *mdev);
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 55701046ffba..3d0245a4c1ed 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -138,7 +138,8 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
ibdev_dbg(ibdev,
- "Failed to get umem for register user-mr, %d\n", err);
+ "Failed to get umem for register user-mr, %pe\n",
+ mr->umem);
goto err_free;
}
@@ -220,7 +221,8 @@ struct ib_mr *mana_ib_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, u64 leng
umem_dmabuf = ib_umem_dmabuf_get_pinned(ibdev, start, length, fd, access_flags);
if (IS_ERR(umem_dmabuf)) {
err = PTR_ERR(umem_dmabuf);
- ibdev_dbg(ibdev, "Failed to get dmabuf umem, %d\n", err);
+ ibdev_dbg(ibdev, "Failed to get dmabuf umem, %pe\n",
+ umem_dmabuf);
goto err_free;
}
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index a6bf4d539e67..48c1f4977f21 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -735,6 +735,8 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int err;
mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
+
+ req.hdr.req.msg_version = GDMA_MESSAGE_V3;
req.hdr.dev_id = mdev->gdma_dev->dev_id;
req.adapter = mdev->adapter_handle;
req.qp_handle = qp->qp_handle;
@@ -748,6 +750,12 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
req.retry_cnt = attr->retry_cnt;
req.rnr_retry = attr->rnr_retry;
req.min_rnr_timer = attr->min_rnr_timer;
+ req.rate_limit = attr->rate_limit;
+ req.qkey = attr->qkey;
+ req.local_ack_timeout = attr->timeout;
+ req.qp_access_flags = attr->qp_access_flags;
+ req.max_rd_atomic = attr->max_rd_atomic;
+
if (attr_mask & IB_QP_AV) {
ndev = mana_ib_get_netdev(&mdev->ib_dev, ibqp->port);
if (!ndev) {
@@ -774,6 +782,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
ibqp->qp_num, attr->dest_qp_num);
req.ah_attr.traffic_class = attr->ah_attr.grh.traffic_class >> 2;
req.ah_attr.hop_limit = attr->ah_attr.grh.hop_limit;
+ req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
}
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
index 12b481d138cf..03aacd526860 100644
--- a/drivers/infiniband/hw/mlx4/cm.c
+++ b/drivers/infiniband/hw/mlx4/cm.c
@@ -591,7 +591,7 @@ void mlx4_ib_cm_paravirt_clean(struct mlx4_ib_dev *dev, int slave)
int mlx4_ib_cm_init(void)
{
- cm_wq = alloc_workqueue("mlx4_ib_cm", 0, 0);
+ cm_wq = alloc_workqueue("mlx4_ib_cm", WQ_PERCPU, 0);
if (!cm_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index e6e132f10625..91c714f72099 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1836,9 +1836,9 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
if (IS_ERR(tun_qp->qp)) {
ret = PTR_ERR(tun_qp->qp);
+ pr_err("Couldn't create %s QP (%pe)\n",
+ create_tun ? "tunnel" : "special", tun_qp->qp);
tun_qp->qp = NULL;
- pr_err("Couldn't create %s QP (%d)\n",
- create_tun ? "tunnel" : "special", ret);
return ret;
}
@@ -2017,14 +2017,14 @@ static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
NULL, ctx, &cq_attr);
if (IS_ERR(ctx->cq)) {
ret = PTR_ERR(ctx->cq);
- pr_err("Couldn't create tunnel CQ (%d)\n", ret);
+ pr_err("Couldn't create tunnel CQ (%pe)\n", ctx->cq);
goto err_buf;
}
ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
if (IS_ERR(ctx->pd)) {
ret = PTR_ERR(ctx->pd);
- pr_err("Couldn't create tunnel PD (%d)\n", ret);
+ pr_err("Couldn't create tunnel PD (%pe)\n", ctx->pd);
goto err_cq;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 50fd407103c7..f2887ae6390e 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1652,7 +1652,8 @@ int mlx4_ib_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
sqp->roce_v2_gsi = ib_create_qp(pd, init_attr);
if (IS_ERR(sqp->roce_v2_gsi)) {
- pr_err("Failed to create GSI QP for RoCEv2 (%ld)\n", PTR_ERR(sqp->roce_v2_gsi));
+ pr_err("Failed to create GSI QP for RoCEv2 (%pe)\n",
+ sqp->roce_v2_gsi);
sqp->roce_v2_gsi = NULL;
} else {
to_mqp(sqp->roce_v2_gsi)->flags |=
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 9c8003a78334..651d76bca114 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -648,7 +648,7 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
struct mlx5_ib_cq *cq = to_mcq(ibcq);
- void __iomem *uar_page = mdev->priv.uar->map;
+ void __iomem *uar_page = mdev->priv.bfreg.up->map;
unsigned long irq_flags;
int ret = 0;
@@ -923,7 +923,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
cq->buf.frag_buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
- *index = dev->mdev->priv.uar->index;
+ *index = dev->mdev->priv.bfreg.up->index;
return 0;
@@ -1020,15 +1020,18 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
MLX5_SET(cqc, cqc, oi, 1);
+ if (udata) {
+ cq->mcq.comp = mlx5_add_cq_to_tasklet;
+ cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
+ } else {
+ cq->mcq.comp = mlx5_ib_cq_comp;
+ }
+
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
if (err)
goto err_cqb;
mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
- if (udata)
- cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
- else
- cq->mcq.comp = mlx5_ib_cq_comp;
cq->mcq.event = mlx5_ib_cq_event;
INIT_LIST_HEAD(&cq->wc_list);
diff --git a/drivers/infiniband/hw/mlx5/data_direct.c b/drivers/infiniband/hw/mlx5/data_direct.c
index b9ba84afaae2..b81ac5709b56 100644
--- a/drivers/infiniband/hw/mlx5/data_direct.c
+++ b/drivers/infiniband/hw/mlx5/data_direct.c
@@ -35,7 +35,7 @@ static int mlx5_data_direct_vpd_get_vuid(struct mlx5_data_direct_dev *dev)
vpd_data = pci_vpd_alloc(pdev, &vpd_size);
if (IS_ERR(vpd_data)) {
- pci_err(pdev, "Unable to read VPD, err=%ld\n", PTR_ERR(vpd_data));
+ pci_err(pdev, "Unable to read VPD, err=%pe\n", vpd_data);
return PTR_ERR(vpd_data);
}
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 028d9f031dde..d31d7f3005c6 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -233,6 +233,7 @@ static u16 get_legacy_obj_type(u16 opcode)
{
switch (opcode) {
case MLX5_CMD_OP_CREATE_RQ:
+ case MLX5_CMD_OP_CREATE_RMP:
return MLX5_EVENT_QUEUE_TYPE_RQ;
case MLX5_CMD_OP_CREATE_QP:
return MLX5_EVENT_QUEUE_TYPE_QP;
@@ -1224,6 +1225,11 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_GET(create_flow_table_in, in, other_vport));
MLX5_SET(destroy_flow_table_in, din, vport_number,
MLX5_GET(create_flow_table_in, in, vport_number));
+ MLX5_SET(destroy_flow_table_in, din, other_eswitch,
+ MLX5_GET(create_flow_table_in, in, other_eswitch));
+ MLX5_SET(destroy_flow_table_in, din, eswitch_owner_vhca_id,
+ MLX5_GET(create_flow_table_in, in,
+ eswitch_owner_vhca_id));
MLX5_SET(destroy_flow_table_in, din, table_type,
MLX5_GET(create_flow_table_in, in, table_type));
MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
@@ -1236,6 +1242,11 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_GET(create_flow_group_in, in, other_vport));
MLX5_SET(destroy_flow_group_in, din, vport_number,
MLX5_GET(create_flow_group_in, in, vport_number));
+ MLX5_SET(destroy_flow_group_in, din, other_eswitch,
+ MLX5_GET(create_flow_group_in, in, other_eswitch));
+ MLX5_SET(destroy_flow_group_in, din, eswitch_owner_vhca_id,
+ MLX5_GET(create_flow_group_in, in,
+ eswitch_owner_vhca_id));
MLX5_SET(destroy_flow_group_in, din, table_type,
MLX5_GET(create_flow_group_in, in, table_type));
MLX5_SET(destroy_flow_group_in, din, table_id,
@@ -1250,6 +1261,10 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
MLX5_GET(set_fte_in, in, other_vport));
MLX5_SET(delete_fte_in, din, vport_number,
MLX5_GET(set_fte_in, in, vport_number));
+ MLX5_SET(delete_fte_in, din, other_eswitch,
+ MLX5_GET(set_fte_in, in, other_eswitch));
+ MLX5_SET(delete_fte_in, din, eswitch_owner_vhca_id,
+ MLX5_GET(set_fte_in, in, eswitch_owner_vhca_id));
MLX5_SET(delete_fte_in, din, table_type,
MLX5_GET(set_fte_in, in, table_type));
MLX5_SET(delete_fte_in, din, table_id,
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index b0f7663c24c1..d17823ce7f38 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -691,22 +691,13 @@ static bool __maybe_unused mlx5_ib_shared_ft_allowed(struct ib_device *device)
return MLX5_CAP_GEN(dev->mdev, shared_object_to_user_object_allowed);
}
-static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev,
- struct mlx5_flow_namespace *ns,
+static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
struct mlx5_ib_flow_prio *prio,
- int priority,
- int num_entries, int num_groups,
- u32 flags, u16 vport)
+ struct mlx5_flow_table_attr *ft_attr)
{
- struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_table *ft;
- ft_attr.prio = priority;
- ft_attr.max_fte = num_entries;
- ft_attr.flags = flags;
- ft_attr.vport = vport;
- ft_attr.autogroup.max_num_groups = num_groups;
- ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ ft = mlx5_create_auto_grouped_flow_table(ns, ft_attr);
if (IS_ERR(ft))
return ERR_CAST(ft);
@@ -720,6 +711,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
enum flow_table_type ft_type)
{
bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns = NULL;
enum mlx5_flow_namespace_type fn_type;
struct mlx5_ib_flow_prio *prio;
@@ -797,11 +789,14 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
max_table_size = min_t(int, num_entries, max_table_size);
ft = prio->flow_table;
- if (!ft)
- return _get_prio(dev, ns, prio, priority, max_table_size,
- num_groups, flags, 0);
+ if (ft)
+ return prio;
- return prio;
+ ft_attr.prio = priority;
+ ft_attr.max_fte = max_table_size;
+ ft_attr.flags = flags;
+ ft_attr.autogroup.max_num_groups = num_groups;
+ return _get_prio(ns, prio, &ft_attr);
}
enum {
@@ -950,6 +945,7 @@ static int get_per_qp_prio(struct mlx5_ib_dev *dev,
enum mlx5_ib_optional_counter_type type)
{
enum mlx5_ib_optional_counter_type per_qp_type;
+ struct mlx5_flow_table_attr ft_attr = {};
enum mlx5_flow_namespace_type fn_type;
struct mlx5_flow_namespace *ns;
struct mlx5_ib_flow_prio *prio;
@@ -1003,7 +999,10 @@ static int get_per_qp_prio(struct mlx5_ib_dev *dev,
if (prio->flow_table)
return 0;
- prio = _get_prio(dev, ns, prio, priority, MLX5_FS_MAX_POOL_SIZE, 1, 0, 0);
+ ft_attr.prio = priority;
+ ft_attr.max_fte = MLX5_FS_MAX_POOL_SIZE;
+ ft_attr.autogroup.max_num_groups = 1;
+ prio = _get_prio(ns, prio, &ft_attr);
if (IS_ERR(prio))
return PTR_ERR(prio);
@@ -1223,6 +1222,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
struct mlx5_ib_op_fc *opfc,
enum mlx5_ib_optional_counter_type type)
{
+ struct mlx5_flow_table_attr ft_attr = {};
enum mlx5_flow_namespace_type fn_type;
int priority, i, err, spec_num;
struct mlx5_flow_act flow_act = {};
@@ -1304,8 +1304,10 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
if (err)
goto free;
- prio = _get_prio(dev, ns, prio, priority,
- dev->num_ports * MAX_OPFC_RULES, 1, 0, 0);
+ ft_attr.prio = priority;
+ ft_attr.max_fte = dev->num_ports * MAX_OPFC_RULES;
+ ft_attr.autogroup.max_num_groups = 1;
+ prio = _get_prio(ns, prio, &ft_attr);
if (IS_ERR(prio)) {
err = PTR_ERR(prio);
goto put_prio;
@@ -1872,7 +1874,7 @@ static int mlx5_ib_fill_transport_ns_info(struct mlx5_ib_dev *dev,
u32 *flags, u16 *vport_idx,
u16 *vport,
struct mlx5_core_dev **ft_mdev,
- u32 ib_port)
+ u32 ib_port, u16 *esw_owner_vhca_id)
{
struct mlx5_core_dev *esw_mdev;
@@ -1886,8 +1888,13 @@ static int mlx5_ib_fill_transport_ns_info(struct mlx5_ib_dev *dev,
return -EINVAL;
esw_mdev = mlx5_eswitch_get_core_dev(dev->port[ib_port - 1].rep->esw);
- if (esw_mdev != dev->mdev)
- return -EOPNOTSUPP;
+ if (esw_mdev != dev->mdev) {
+ if (!MLX5_CAP_ADV_RDMA(dev->mdev,
+ rdma_transport_manager_other_eswitch))
+ return -EOPNOTSUPP;
+ *flags |= MLX5_FLOW_TABLE_OTHER_ESWITCH;
+ *esw_owner_vhca_id = MLX5_CAP_GEN(esw_mdev, vhca_id);
+ }
*flags |= MLX5_FLOW_TABLE_OTHER_VPORT;
*ft_mdev = esw_mdev;
@@ -1903,8 +1910,10 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
bool mcast, u32 ib_port)
{
struct mlx5_core_dev *ft_mdev = dev->mdev;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio = NULL;
+ u16 esw_owner_vhca_id = 0;
int max_table_size = 0;
u16 vport_idx = 0;
bool esw_encap;
@@ -1966,7 +1975,8 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
return ERR_PTR(-EINVAL);
ret = mlx5_ib_fill_transport_ns_info(dev, ns_type, &flags,
&vport_idx, &vport,
- &ft_mdev, ib_port);
+ &ft_mdev, ib_port,
+ &esw_owner_vhca_id);
if (ret)
return ERR_PTR(ret);
@@ -2026,8 +2036,13 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority,
if (prio->flow_table)
return prio;
- return _get_prio(dev, ns, prio, priority, max_table_size,
- MLX5_FS_MAX_TYPES, flags, vport);
+ ft_attr.prio = priority;
+ ft_attr.max_fte = max_table_size;
+ ft_attr.flags = flags;
+ ft_attr.vport = vport;
+ ft_attr.esw_owner_vhca_id = esw_owner_vhca_id;
+ ft_attr.autogroup.max_num_groups = MLX5_FS_MAX_TYPES;
+ return _get_prio(ns, prio, &ft_attr);
}
static struct mlx5_ib_flow_handler *
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index b804f2dd5628..d5487834ed25 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -131,8 +131,9 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
gsi->cq = ib_alloc_cq(pd->device, gsi, attr->cap.max_send_wr, 0,
IB_POLL_SOFTIRQ);
if (IS_ERR(gsi->cq)) {
- mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
- PTR_ERR(gsi->cq));
+ mlx5_ib_warn(dev,
+ "unable to create send CQ for GSI QP. error %pe\n",
+ gsi->cq);
ret = PTR_ERR(gsi->cq);
goto err_free_wrs;
}
@@ -147,8 +148,9 @@ int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
if (IS_ERR(gsi->rx_qp)) {
- mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
- PTR_ERR(gsi->rx_qp));
+ mlx5_ib_warn(dev,
+ "unable to create hardware GSI QP. error %pe\n",
+ gsi->rx_qp);
ret = PTR_ERR(gsi->rx_qp);
goto err_destroy_cq;
}
@@ -294,8 +296,9 @@ static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
qp = create_gsi_ud_qp(gsi);
if (IS_ERR(qp)) {
- mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
- PTR_ERR(qp));
+ mlx5_ib_warn(dev,
+ "unable to create hardware UD QP for GSI: %pe\n",
+ qp);
return;
}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index cc8859d3c2f5..bbecca405171 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -44,6 +44,63 @@ static void mlx5_ib_num_ports_update(struct mlx5_core_dev *dev, u32 *num_ports)
}
}
+static int mlx5_ib_set_owner_transport(struct mlx5_core_dev *cur_owner,
+ struct mlx5_core_dev *new_owner)
+{
+ int ret;
+
+ if (!MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_TX(cur_owner, ft_support) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TRANSPORT_RX(cur_owner, ft_support))
+ return 0;
+
+ if (!MLX5_CAP_ADV_RDMA(new_owner, rdma_transport_manager) ||
+ !MLX5_CAP_ADV_RDMA(new_owner, rdma_transport_manager_other_eswitch))
+ return 0;
+
+ ret = mlx5_fs_set_root_dev(cur_owner, new_owner,
+ FS_FT_RDMA_TRANSPORT_TX);
+ if (ret)
+ return ret;
+
+ ret = mlx5_fs_set_root_dev(cur_owner, new_owner,
+ FS_FT_RDMA_TRANSPORT_RX);
+ if (ret) {
+ mlx5_fs_set_root_dev(cur_owner, cur_owner,
+ FS_FT_RDMA_TRANSPORT_TX);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mlx5_ib_release_transport(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_dev *peer_dev;
+ int i, ret;
+
+ mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
+ ret = mlx5_ib_set_owner_transport(peer_dev, peer_dev);
+ WARN_ON_ONCE(ret);
+ }
+}
+
+static int mlx5_ib_take_transport(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_dev *peer_dev;
+ int ret;
+ int i;
+
+ mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
+ ret = mlx5_ib_set_owner_transport(peer_dev, dev);
+ if (ret) {
+ mlx5_ib_release_transport(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int
mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
@@ -88,10 +145,18 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
else
return mlx5_ib_set_vport_rep(lag_master, rep, vport_index);
+ if (mlx5_lag_is_shared_fdb(dev)) {
+ ret = mlx5_ib_take_transport(lag_master);
+ if (ret)
+ return ret;
+ }
+
ibdev = ib_alloc_device_with_net(mlx5_ib_dev, ib_dev,
mlx5_core_net(lag_master));
- if (!ibdev)
- return -ENOMEM;
+ if (!ibdev) {
+ ret = -ENOMEM;
+ goto release_transport;
+ }
ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
GFP_KERNEL);
@@ -127,6 +192,10 @@ fail_add:
kfree(ibdev->port);
fail_port:
ib_dealloc_device(&ibdev->ib_dev);
+release_transport:
+ if (mlx5_lag_is_shared_fdb(lag_master))
+ mlx5_ib_release_transport(lag_master);
+
return ret;
}
@@ -182,6 +251,7 @@ mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
esw = peer_mdev->priv.eswitch;
mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
}
+ mlx5_ib_release_transport(mdev);
}
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d456e4fde3e1..40284bbb45d6 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/log2.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
@@ -510,6 +511,10 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed,
*active_width = IB_WIDTH_4X;
*active_speed = IB_SPEED_XDR;
break;
+ case MLX5E_PROT_MASK(MLX5E_1600TAUI_8_1600TBASE_CR8_KR8):
+ *active_width = IB_WIDTH_8X;
+ *active_speed = IB_SPEED_XDR;
+ break;
default:
return -EINVAL;
}
@@ -841,7 +846,7 @@ static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
break;
case MLX5_VPORT_ACCESS_METHOD_NIC:
- err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
+ err = mlx5_query_nic_vport_node_guid(dev->mdev, 0, false, &tmp);
break;
default:
@@ -883,6 +888,51 @@ static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask();
}
+/*
+ * Calculate maximum SQ overhead across all QP types.
+ * Other QP types (REG_UMR, UC, RC, UD/SMI/GSI, XRC_TGT)
+ * have smaller overhead than the types calculated below,
+ * so they are implicitly included.
+ */
+static u32 mlx5_ib_calc_max_sq_overhead(void)
+{
+ u32 max_overhead_xrc, overhead_ud_lso, a, b;
+
+ /* XRC_INI */
+ max_overhead_xrc = sizeof(struct mlx5_wqe_xrc_seg);
+ max_overhead_xrc += sizeof(struct mlx5_wqe_ctrl_seg);
+ a = sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg);
+ b = sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg) +
+ MLX5_IB_SQ_UMR_INLINE_THRESHOLD / MLX5_IB_UMR_OCTOWORD;
+ max_overhead_xrc += max(a, b);
+
+ /* UD with LSO */
+ overhead_ud_lso = sizeof(struct mlx5_wqe_ctrl_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_pad);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_eth_seg);
+ overhead_ud_lso += sizeof(struct mlx5_wqe_datagram_seg);
+
+ return max(max_overhead_xrc, overhead_ud_lso);
+}
+
+static u32 mlx5_ib_calc_max_qp_wr(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ u32 max_wqe_bb_units = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ u32 max_wqe_size;
+ /* max QP overhead + 1 SGE, no inline, no special features */
+ max_wqe_size = mlx5_ib_calc_max_sq_overhead() +
+ sizeof(struct mlx5_wqe_data_seg);
+
+ max_wqe_size = roundup_pow_of_two(max_wqe_size);
+
+ max_wqe_size = ALIGN(max_wqe_size, MLX5_SEND_WQE_BB);
+
+ return (max_wqe_bb_units * MLX5_SEND_WQE_BB) / max_wqe_size;
+}
+
static int mlx5_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *uhw)
@@ -1041,7 +1091,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = ~(min_page_size - 1);
props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
- props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
+ props->max_qp_wr = mlx5_ib_calc_max_qp_wr(dev);
max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
sizeof(struct mlx5_wqe_data_seg);
max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
@@ -1793,7 +1843,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
}
static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
{
int err;
@@ -1805,6 +1856,7 @@ static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master,
if (err)
goto out;
+ lb_state->force_enable = true;
return 0;
out:
@@ -1813,16 +1865,22 @@ out:
}
static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+ struct mlx5_core_dev *slave,
+ struct mlx5_ib_lb_state *lb_state)
{
mlx5_nic_vport_update_local_lb(slave, false);
mlx5_nic_vport_update_local_lb(master, false);
+
+ lb_state->force_enable = false;
}
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
int err = 0;
+ if (dev->lb.force_enable)
+ return 0;
+
mutex_lock(&dev->lb.mutex);
if (td)
dev->lb.user_td++;
@@ -1844,6 +1902,9 @@ int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
{
+ if (dev->lb.force_enable)
+ return;
+
mutex_lock(&dev->lb.mutex);
if (td)
dev->lb.user_td--;
@@ -2994,14 +3055,16 @@ int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev)
pd = ib_alloc_pd(ibdev, 0);
if (IS_ERR(pd)) {
ret = PTR_ERR(pd);
- mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%d\n", ret);
+ mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%pe\n",
+ pd);
goto unlock;
}
cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr);
if (IS_ERR(cq)) {
ret = PTR_ERR(cq);
- mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%d\n", ret);
+ mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%pe\n",
+ cq);
ib_dealloc_pd(pd);
goto unlock;
}
@@ -3045,7 +3108,9 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
s0 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(s0)) {
ret = PTR_ERR(s0);
- mlx5_ib_err(dev, "Couldn't create SRQ 0 for res init, err=%d\n", ret);
+ mlx5_ib_err(dev,
+ "Couldn't create SRQ 0 for res init, err=%pe\n",
+ s0);
goto unlock;
}
@@ -3057,7 +3122,9 @@ int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev)
s1 = ib_create_srq(devr->p0, &attr);
if (IS_ERR(s1)) {
ret = PTR_ERR(s1);
- mlx5_ib_err(dev, "Couldn't create SRQ 1 for res init, err=%d\n", ret);
+ mlx5_ib_err(dev,
+ "Couldn't create SRQ 1 for res init, err=%pe\n",
+ s1);
ib_destroy_srq(s0);
}
@@ -3118,6 +3185,7 @@ mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_core_dev *mdev = dev->mdev;
+ bool ro_supp = false;
void *mkc;
u32 mkey;
u32 pdn;
@@ -3146,14 +3214,37 @@ mlx5_ib_create_data_direct_resources(struct mlx5_ib_dev *dev)
MLX5_SET(mkc, mkc, length64, 1);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
- kvfree(in);
if (err)
- goto err;
+ goto err_mkey;
dev->ddr.mkey = mkey;
dev->ddr.pdn = pdn;
+
+ /* create another mkey with RO support */
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write)) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_write, 1);
+ ro_supp = true;
+ }
+
+ if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read)) {
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, 1);
+ ro_supp = true;
+ }
+
+ if (ro_supp) {
+ err = mlx5_core_create_mkey(mdev, &mkey, in, inlen);
+ /* RO is defined as best effort */
+ if (!err) {
+ dev->ddr.mkey_ro = mkey;
+ dev->ddr.mkey_ro_valid = true;
+ }
+ }
+
+ kvfree(in);
return 0;
+err_mkey:
+ kvfree(in);
err:
mlx5_core_dealloc_pd(mdev, pdn);
return err;
@@ -3162,6 +3253,10 @@ err:
static void
mlx5_ib_free_data_direct_resources(struct mlx5_ib_dev *dev)
{
+
+ if (dev->ddr.mkey_ro_valid)
+ mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey_ro);
+
mlx5_core_destroy_mkey(dev->mdev, dev->ddr.mkey);
mlx5_core_dealloc_pd(dev->mdev, dev->ddr.pdn);
}
@@ -3523,7 +3618,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
lockdep_assert_held(&mlx5_ib_multiport_mutex);
- mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev);
+ mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
mlx5_core_mp_event_replay(ibdev->mdev,
MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
@@ -3620,7 +3715,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
MLX5_DRIVER_EVENT_AFFILIATION_DONE,
&key);
- err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev);
+ err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev, &ibdev->lb);
if (err)
goto unbind;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7ffc7ee92cf0..09d82d5f95e3 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -854,6 +854,8 @@ struct mlx5_ib_port_resources {
struct mlx5_data_direct_resources {
u32 pdn;
u32 mkey;
+ u32 mkey_ro;
+ u8 mkey_ro_valid :1;
};
struct mlx5_ib_resources {
@@ -1109,6 +1111,7 @@ struct mlx5_ib_lb_state {
u32 user_td;
int qps;
bool enabled;
+ bool force_enable;
};
struct mlx5_ib_pf_eq {
@@ -1802,6 +1805,10 @@ mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
bitmap = GENMASK_ULL(max_log_entity_size_cap, min_log_entity_size_cap);
+ /* In KSM mode HW requires IOVA and mkey's page size to be aligned */
+ if (access_mode == MLX5_MKC_ACCESS_MODE_KSM && iova)
+ bitmap &= GENMASK_ULL(__ffs64(iova), 0);
+
return ib_umem_find_best_pgsz(umem, bitmap, iova);
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1317f2cb38a4..325fa04cbe8a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1652,8 +1652,7 @@ reg_user_mr_dmabuf(struct ib_pd *pd, struct device *dma_device,
fd, access_flags);
if (IS_ERR(umem_dmabuf)) {
- mlx5_ib_dbg(dev, "umem_dmabuf get failed (%ld)\n",
- PTR_ERR(umem_dmabuf));
+ mlx5_ib_dbg(dev, "umem_dmabuf get failed (%pe)\n", umem_dmabuf);
return ERR_CAST(umem_dmabuf);
}
@@ -1717,11 +1716,11 @@ reg_user_mr_dmabuf_by_data_direct(struct ib_pd *pd, u64 offset,
goto end;
}
- /* The device's 'data direct mkey' was created without RO flags to
- * simplify things and allow for a single mkey per device.
- * Since RO is not a must, mask it out accordingly.
+ /* If no device's 'data direct mkey' with RO flags exists
+ * mask it out accordingly.
*/
- access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
+ if (!dev->ddr.mkey_ro_valid)
+ access_flags &= ~IB_ACCESS_RELAXED_ORDERING;
crossed_mr = reg_user_mr_dmabuf(pd, &data_direct_dev->pdev->dev,
offset, length, virt_addr, fd,
access_flags, MLX5_MKC_ACCESS_MODE_KSM,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 0e8ae85af5a6..e71ee3d52eb0 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -97,33 +97,28 @@ struct mlx5_pagefault {
* a pagefault. */
#define MMU_NOTIFIER_TIMEOUT 1000
-#define MLX5_IMR_MTT_BITS (30 - PAGE_SHIFT)
-#define MLX5_IMR_MTT_SHIFT (MLX5_IMR_MTT_BITS + PAGE_SHIFT)
-#define MLX5_IMR_MTT_ENTRIES BIT_ULL(MLX5_IMR_MTT_BITS)
-#define MLX5_IMR_MTT_SIZE BIT_ULL(MLX5_IMR_MTT_SHIFT)
-#define MLX5_IMR_MTT_MASK (~(MLX5_IMR_MTT_SIZE - 1))
-
-#define MLX5_KSM_PAGE_SHIFT MLX5_IMR_MTT_SHIFT
-
static u64 mlx5_imr_ksm_entries;
+static u64 mlx5_imr_mtt_entries;
+static u64 mlx5_imr_mtt_size;
+static u8 mlx5_imr_mtt_shift;
+static u8 mlx5_imr_ksm_page_shift;
-static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
+static void populate_ksm(struct mlx5_ksm *pksm, size_t idx, size_t nentries,
struct mlx5_ib_mr *imr, int flags)
{
struct mlx5_core_dev *dev = mr_to_mdev(imr)->mdev;
- struct mlx5_klm *end = pklm + nentries;
- int step = MLX5_CAP_ODP(dev, mem_page_fault) ? MLX5_IMR_MTT_SIZE : 0;
+ struct mlx5_ksm *end = pksm + nentries;
+ u64 step = MLX5_CAP_ODP(dev, mem_page_fault) ? mlx5_imr_mtt_size : 0;
__be32 key = MLX5_CAP_ODP(dev, mem_page_fault) ?
cpu_to_be32(imr->null_mmkey.key) :
mr_to_mdev(imr)->mkeys.null_mkey;
u64 va =
- MLX5_CAP_ODP(dev, mem_page_fault) ? idx * MLX5_IMR_MTT_SIZE : 0;
+ MLX5_CAP_ODP(dev, mem_page_fault) ? idx * mlx5_imr_mtt_size : 0;
if (flags & MLX5_IB_UPD_XLT_ZAP) {
- for (; pklm != end; pklm++, idx++, va += step) {
- pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = key;
- pklm->va = cpu_to_be64(va);
+ for (; pksm != end; pksm++, idx++, va += step) {
+ pksm->key = key;
+ pksm->va = cpu_to_be64(va);
}
return;
}
@@ -147,16 +142,15 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
*/
lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
- for (; pklm != end; pklm++, idx++, va += step) {
+ for (; pksm != end; pksm++, idx++, va += step) {
struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
- pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
if (mtt) {
- pklm->key = cpu_to_be32(mtt->ibmr.lkey);
- pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
+ pksm->key = cpu_to_be32(mtt->ibmr.lkey);
+ pksm->va = cpu_to_be64(idx * mlx5_imr_mtt_size);
} else {
- pklm->key = key;
- pklm->va = cpu_to_be64(va);
+ pksm->key = key;
+ pksm->va = cpu_to_be64(va);
}
}
}
@@ -201,7 +195,7 @@ int mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags)
{
if (flags & MLX5_IB_UPD_XLT_INDIRECT) {
- populate_klm(xlt, idx, nentries, mr, flags);
+ populate_ksm(xlt, idx, nentries, mr, flags);
return 0;
} else {
return populate_mtt(xlt, idx, nentries, mr, flags);
@@ -226,7 +220,7 @@ static void free_implicit_child_mr_work(struct work_struct *work)
mutex_lock(&odp_imr->umem_mutex);
mlx5r_umr_update_xlt(mr->parent,
- ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT, 1, 0,
+ ib_umem_start(odp) >> mlx5_imr_mtt_shift, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT | MLX5_IB_UPD_XLT_ATOMIC);
mutex_unlock(&odp_imr->umem_mutex);
mlx5_ib_dereg_mr(&mr->ibmr, NULL);
@@ -237,7 +231,7 @@ static void free_implicit_child_mr_work(struct work_struct *work)
static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
+ unsigned long idx = ib_umem_start(odp) >> mlx5_imr_mtt_shift;
struct mlx5_ib_mr *imr = mr->parent;
/*
@@ -265,7 +259,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
/* Freeing a MR is a sleeping operation, so bounce to a work queue */
INIT_WORK(&mr->odp_destroy.work, free_implicit_child_mr_work);
- queue_work(system_unbound_wq, &mr->odp_destroy.work);
+ queue_work(system_dfl_wq, &mr->odp_destroy.work);
}
static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
@@ -425,7 +419,10 @@ static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
MLX5_CAP_GEN(dev->mdev, null_mkey) &&
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
- !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
+ !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled) &&
+ mlx5_imr_ksm_entries != 0 &&
+ !(mlx5_imr_ksm_page_shift >
+ get_max_log_entity_size_cap(dev, MLX5_MKC_ACCESS_MODE_KSM)))
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
}
@@ -476,14 +473,14 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
int err;
odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
- idx * MLX5_IMR_MTT_SIZE,
- MLX5_IMR_MTT_SIZE, &mlx5_mn_ops);
+ idx * mlx5_imr_mtt_size,
+ mlx5_imr_mtt_size, &mlx5_mn_ops);
if (IS_ERR(odp))
return ERR_CAST(odp);
mr = mlx5_mr_cache_alloc(dev, imr->access_flags,
MLX5_MKC_ACCESS_MODE_MTT,
- MLX5_IMR_MTT_ENTRIES);
+ mlx5_imr_mtt_entries);
if (IS_ERR(mr)) {
ib_umem_odp_release(odp);
return mr;
@@ -495,7 +492,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
- mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->ibmr.iova = idx * mlx5_imr_mtt_size;
mr->parent = imr;
odp->private = mr;
@@ -506,7 +503,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
refcount_set(&mr->mmkey.usecount, 2);
err = mlx5r_umr_update_xlt(mr, 0,
- MLX5_IMR_MTT_ENTRIES,
+ mlx5_imr_mtt_entries,
PAGE_SHIFT,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ENABLE);
@@ -611,7 +608,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct mlx5_ib_mr *imr;
int err;
- if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
+ if (!mlx5r_umr_can_load_pas(dev, mlx5_imr_mtt_entries * PAGE_SIZE))
return ERR_PTR(-EOPNOTSUPP);
umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
@@ -647,7 +644,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
err = mlx5r_umr_update_xlt(imr, 0,
mlx5_imr_ksm_entries,
- MLX5_KSM_PAGE_SHIFT,
+ mlx5_imr_ksm_page_shift,
MLX5_IB_UPD_XLT_INDIRECT |
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ENABLE);
@@ -750,20 +747,20 @@ static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
struct ib_umem_odp *odp_imr, u64 user_va,
size_t bcnt, u32 *bytes_mapped, u32 flags)
{
- unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
+ unsigned long end_idx = (user_va + bcnt - 1) >> mlx5_imr_mtt_shift;
unsigned long upd_start_idx = end_idx + 1;
unsigned long upd_len = 0;
unsigned long npages = 0;
int err;
int ret;
- if (unlikely(user_va >= mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE ||
- mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
+ if (unlikely(user_va >= mlx5_imr_ksm_entries * mlx5_imr_mtt_size ||
+ mlx5_imr_ksm_entries * mlx5_imr_mtt_size - user_va < bcnt))
return -EFAULT;
/* Fault each child mr that intersects with our interval. */
while (bcnt) {
- unsigned long idx = user_va >> MLX5_IMR_MTT_SHIFT;
+ unsigned long idx = user_va >> mlx5_imr_mtt_shift;
struct ib_umem_odp *umem_odp;
struct mlx5_ib_mr *mtt;
u64 len;
@@ -1924,9 +1921,25 @@ void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
int mlx5_ib_odp_init(void)
{
+ u32 log_va_pages = ilog2(TASK_SIZE) - PAGE_SHIFT;
+ u8 mlx5_imr_mtt_bits;
+
+ /* 48 is default ARM64 VA space and covers X86 4-level paging which is 47 */
+ if (log_va_pages <= 48 - PAGE_SHIFT)
+ mlx5_imr_mtt_shift = 30;
+ /* 56 is x86-64, 5-level paging */
+ else if (log_va_pages <= 56 - PAGE_SHIFT)
+ mlx5_imr_mtt_shift = 34;
+ else
+ return 0;
+
+ mlx5_imr_mtt_size = BIT_ULL(mlx5_imr_mtt_shift);
+ mlx5_imr_mtt_bits = mlx5_imr_mtt_shift - PAGE_SHIFT;
+ mlx5_imr_mtt_entries = BIT_ULL(mlx5_imr_mtt_bits);
mlx5_imr_ksm_entries = BIT_ULL(get_order(TASK_SIZE) -
- MLX5_IMR_MTT_BITS);
+ mlx5_imr_mtt_bits);
+ mlx5_imr_ksm_page_shift = mlx5_imr_mtt_shift;
return 0;
}
@@ -2093,6 +2106,6 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
destroy_prefetch_work(work);
return rc;
}
- queue_work(system_unbound_wq, &work->work);
+ queue_work(system_dfl_wq, &work->work);
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 88724d15705d..69af20790481 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3451,10 +3451,11 @@ int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
{
u32 stat_rate_support;
- if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
+ if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS ||
+ rate == IB_RATE_1600_GBPS)
return 0;
- if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_1600_GBPS)
return -EINVAL;
stat_rate_support = MLX5_CAP_GEN(dev->mdev, stat_rate_support);
diff --git a/drivers/infiniband/hw/mlx5/std_types.c b/drivers/infiniband/hw/mlx5/std_types.c
index bdb568411091..2fcf553044e1 100644
--- a/drivers/infiniband/hw/mlx5/std_types.c
+++ b/drivers/infiniband/hw/mlx5/std_types.c
@@ -83,33 +83,14 @@ static int fill_vport_icm_addr(struct mlx5_core_dev *mdev, u16 vport,
static int fill_vport_vhca_id(struct mlx5_core_dev *mdev, u16 vport,
struct mlx5_ib_uapi_query_port *info)
{
- size_t out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
- void *out;
- int err;
-
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
+ int err = mlx5_vport_get_vhca_id(mdev, vport, &info->vport_vhca_id);
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, other_function, true);
- MLX5_SET(query_hca_cap_in, in, function_id, vport);
- MLX5_SET(query_hca_cap_in, in, op_mod,
- MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE |
- HCA_CAP_OPMOD_GET_CUR);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, out_sz);
if (err)
- goto out;
-
- info->vport_vhca_id = MLX5_GET(query_hca_cap_out, out,
- capability.cmd_hca_cap.vhca_id);
+ return err;
info->flags |= MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID;
-out:
- kfree(out);
- return err;
+
+ return 0;
}
static int fill_multiport_info(struct mlx5_ib_dev *dev, u32 port_num,
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 7ef35cddce81..4e562e0dd9e1 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -761,7 +761,11 @@ _mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags, bool dd,
if (dd) {
cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter));
- cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
+ if (mr->access_flags & IB_ACCESS_RELAXED_ORDERING &&
+ dev->ddr.mkey_ro_valid)
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey_ro);
+ else
+ cur_ksm->key = cpu_to_be32(dev->ddr.mkey);
if (mr->umem->is_dmabuf &&
(flags & MLX5_IB_UPD_XLT_ZAP)) {
cur_ksm->va = 0;
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
index 1d7fc3226bca..cfb42a8f5768 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h
@@ -53,6 +53,10 @@ extern void
usnic_uiom_interval_tree_remove(struct usnic_uiom_interval_node *node,
struct rb_root_cached *root);
extern struct usnic_uiom_interval_node *
+usnic_uiom_interval_tree_subtree_search(struct usnic_uiom_interval_node *node,
+ unsigned long start,
+ unsigned long last);
+extern struct usnic_uiom_interval_node *
usnic_uiom_interval_tree_iter_first(struct rb_root_cached *root,
unsigned long start,
unsigned long last);
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index 0ca2743f1075..e7835ca70e2b 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -518,7 +518,8 @@ int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
*/
int rvt_driver_cq_init(void)
{
- comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
+ comp_vector_wq = alloc_workqueue("%s",
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_PERCPU,
0, "rdmavt_cq");
if (!comp_vector_wq)
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index e825e2ef7966..134a79eecfcb 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -492,7 +492,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
{
u32 i, offset, max_scan, qpn;
struct rvt_qpn_map *map;
- u32 ret;
+ int ret;
u32 max_qpn = exclude_prefix == RVT_AIP_QP_PREFIX ?
RVT_AIP_QPN_MAX : RVT_QPN_MAX;
@@ -510,7 +510,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
else
qpt->flags |= n;
spin_unlock(&qpt->lock);
- goto bail;
+
+ return ret;
}
qpn = qpt->last + qpt->incr;
@@ -530,7 +531,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
if (!test_and_set_bit(offset, map->page)) {
qpt->last = qpn;
ret = qpn;
- goto bail;
+
+ return ret;
}
offset += qpt->incr;
/*
@@ -565,10 +567,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
qpn = mk_qpn(qpt, map, offset);
}
- ret = -ENOMEM;
-
-bail:
- return ret;
+ return -ENOMEM;
}
/**
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index bcb97b3ea58a..b1df05238848 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -452,7 +452,6 @@ static int rxe_mr_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int leng
length -= bytes;
iova += bytes;
- page_offset = 0;
}
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index ac0183a2ff7a..0195d361e5e3 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -20,6 +20,54 @@
static struct rxe_recv_sockets recv_sockets;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * lockdep can detect false positive circular dependencies
+ * when there are user-space socket API users or in kernel
+ * users switching between a tcp and rdma transport.
+ * Maybe also switching between siw and rxe may cause
+ * problems as per default sockets are only classified
+ * by family and not by ip protocol. And there might
+ * be different locks used between the application
+ * and the low level sockets.
+ *
+ * Problems were seen with ksmbd.ko and cifs.ko,
+ * switching transports, use git blame to find
+ * more details.
+ */
+static struct lock_class_key rxe_recv_sk_key[2];
+static struct lock_class_key rxe_recv_slock_key[2];
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline void rxe_reclassify_recv_socket(struct socket *sock)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk,
+ "slock-AF_INET-RDMA-RXE-RECV",
+ &rxe_recv_slock_key[0],
+ "sk_lock-AF_INET-RDMA-RXE-RECV",
+ &rxe_recv_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk,
+ "slock-AF_INET6-RDMA-RXE-RECV",
+ &rxe_recv_slock_key[1],
+ "sk_lock-AF_INET6-RDMA-RXE-RECV",
+ &rxe_recv_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+}
+
static struct dst_entry *rxe_find_route4(struct rxe_qp *qp,
struct net_device *ndev,
struct in_addr *saddr,
@@ -192,6 +240,7 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
err = udp_sock_create(net, &udp_cfg, &sock);
if (err < 0)
return ERR_PTR(err);
+ rxe_reclassify_recv_socket(sock);
tnl_cfg.encap_type = 1;
tnl_cfg.encap_rcv = rxe_udp_encap_recv;
diff --git a/drivers/infiniband/sw/rxe/rxe_odp.c b/drivers/infiniband/sw/rxe/rxe_odp.c
index f58e3ec6252f..ae71812bea82 100644
--- a/drivers/infiniband/sw/rxe/rxe_odp.c
+++ b/drivers/infiniband/sw/rxe/rxe_odp.c
@@ -358,7 +358,6 @@ int rxe_odp_flush_pmem_iova(struct rxe_mr *mr, u64 iova,
length -= bytes;
iova += bytes;
- page_offset = 0;
}
mutex_unlock(&umem_odp->umem_mutex);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 95f1c1c2949d..845bdd03ca28 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -15,6 +15,54 @@
#include "rxe_queue.h"
#include "rxe_task.h"
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * lockdep can detect false positive circular dependencies
+ * when there are user-space socket API users or in kernel
+ * users switching between a tcp and rdma transport.
+ * Maybe also switching between siw and rxe may cause
+ * problems as per default sockets are only classified
+ * by family and not by ip protocol. And there might
+ * be different locks used between the application
+ * and the low level sockets.
+ *
+ * Problems were seen with ksmbd.ko and cifs.ko,
+ * switching transports, use git blame to find
+ * more details.
+ */
+static struct lock_class_key rxe_send_sk_key[2];
+static struct lock_class_key rxe_send_slock_key[2];
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline void rxe_reclassify_send_socket(struct socket *sock)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk,
+ "slock-AF_INET-RDMA-RXE-SEND",
+ &rxe_send_slock_key[0],
+ "sk_lock-AF_INET-RDMA-RXE-SEND",
+ &rxe_send_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk,
+ "slock-AF_INET6-RDMA-RXE-SEND",
+ &rxe_send_slock_key[1],
+ "sk_lock-AF_INET6-RDMA-RXE-SEND",
+ &rxe_send_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+}
+
static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
int has_srq)
{
@@ -244,6 +292,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
if (err < 0)
return err;
+ rxe_reclassify_send_socket(qp->sk);
qp->sk->sk->sk_user_data = qp;
/* pick a source UDP port number for this QP based on
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 3661cb627d28..2a234f26ac10 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -171,7 +171,7 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
udata, mi, &srq->rq.producer_lock,
&srq->rq.consumer_lock);
if (err)
- goto err_free;
+ return err;
srq->rq.max_wr = attr->max_wr;
}
@@ -180,11 +180,6 @@ int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->limit = attr->srq_limit;
return 0;
-
-err_free:
- rxe_queue_cleanup(q);
- srq->rq.queue = NULL;
- return err;
}
void rxe_srq_cleanup(struct rxe_pool_elem *elem)
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6f8f353e9583..f522820b950c 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -132,8 +132,12 @@ static void do_task(struct rxe_task *task)
* yield the cpu and reschedule the task
*/
if (!ret) {
- task->state = TASK_STATE_IDLE;
- resched = 1;
+ if (task->state != TASK_STATE_DRAINING) {
+ task->state = TASK_STATE_IDLE;
+ resched = 1;
+ } else {
+ cont = 1;
+ }
goto exit;
}
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 708b13993fdf..1d3de8209bfa 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -39,6 +39,55 @@ static void siw_cm_llp_error_report(struct sock *s);
static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
int status);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * lockdep can detect false positive circular dependencies
+ * when there are user-space socket API users or in kernel
+ * users switching between a tcp and rdma transport.
+ * Maybe also switching between siw and rxe may cause
+ * problems as per default sockets are only classified
+ * by family and not by ip protocol. And there might
+ * be different locks used between the application
+ * and the low level sockets.
+ *
+ * Problems were seen with ksmbd.ko and cifs.ko,
+ * switching transports, use git blame to find
+ * more details.
+ */
+static struct lock_class_key siw_sk_key[2];
+static struct lock_class_key siw_slock_key[2];
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+
+static inline void siw_reclassify_socket(struct socket *sock)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk,
+ "slock-AF_INET-RDMA-SIW",
+ &siw_slock_key[0],
+ "sk_lock-AF_INET-RDMA-SIW",
+ &siw_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk,
+ "slock-AF_INET6-RDMA-SIW",
+ &siw_slock_key[1],
+ "sk_lock-AF_INET6-RDMA-SIW",
+ &siw_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
+}
+
static void siw_sk_assign_cm_upcalls(struct sock *sk)
{
struct siw_cep *cep = sk_to_cep(sk);
@@ -1340,11 +1389,11 @@ static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
return rv;
}
- rv = s->ops->bind(s, laddr, size);
+ rv = s->ops->bind(s, (struct sockaddr_unsized *)laddr, size);
if (rv < 0)
return rv;
- rv = s->ops->connect(s, raddr, size, flags);
+ rv = s->ops->connect(s, (struct sockaddr_unsized *)raddr, size, flags);
return rv < 0 ? rv : 0;
}
@@ -1394,6 +1443,7 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
goto error;
+ siw_reclassify_socket(s);
/*
* NOTE: For simplification, connect() is called in blocking
@@ -1770,6 +1820,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
if (rv < 0)
return rv;
+ siw_reclassify_socket(s);
/*
* Allow binding local port when still in TIME_WAIT from last close.
@@ -1789,7 +1840,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
goto error;
}
}
- rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ rv = s->ops->bind(s, (struct sockaddr_unsized *)laddr,
sizeof(struct sockaddr_in));
} else {
struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
@@ -1813,7 +1864,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
goto error;
}
}
- rv = s->ops->bind(s, (struct sockaddr *)laddr,
+ rv = s->ops->bind(s, (struct sockaddr_unsized *)laddr,
sizeof(struct sockaddr_in6));
}
if (rv) {
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 35c3bde0d00a..efa2f097b582 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -769,7 +769,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
struct siw_wqe *wqe = tx_wqe(qp);
unsigned long flags;
- int rv = 0;
+ int rv = 0, imm_err = 0;
if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
@@ -955,9 +955,17 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
* Send directly if SQ processing is not in progress.
* Eventual immediate errors (rv < 0) do not affect the involved
* RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
- * processing, if new work is already pending. But rv must be passed
- * to caller.
+ * processing, if new work is already pending. But rv and pointer
+ * to failed work request must be passed to caller.
*/
+ if (unlikely(rv < 0)) {
+ /*
+ * Immediate error
+ */
+ siw_dbg_qp(qp, "Immediate error %d\n", rv);
+ imm_err = rv;
+ *bad_wr = wr;
+ }
if (wqe->wr_status != SIW_WR_IDLE) {
spin_unlock_irqrestore(&qp->sq_lock, flags);
goto skip_direct_sending;
@@ -982,15 +990,10 @@ skip_direct_sending:
up_read(&qp->state_lock);
- if (rv >= 0)
- return 0;
- /*
- * Immediate error
- */
- siw_dbg_qp(qp, "error %d\n", rv);
+ if (unlikely(imm_err))
+ return imm_err;
- *bad_wr = wr;
- return rv;
+ return (rv >= 0) ? 0 : rv;
}
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 7acafc5c0e09..300afc27c561 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -351,26 +351,27 @@ static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
}
/*
- * Find the master net_device on top of the given net_device.
+ * Find the L2 master net_device on top of the given net_device.
* @dev: base IPoIB net_device
*
- * Returns the master net_device with a reference held, or the same net_device
- * if no master exists.
+ * Returns the L2 master net_device with reference held if the L2 master
+ * exists (such as bond netdevice), or returns same netdev with reference
+ * held when master does not exist or when L3 master (such as VRF netdev).
*/
static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
{
struct net_device *master;
rcu_read_lock();
+
master = netdev_master_upper_dev_get_rcu(dev);
+ if (!master || netif_is_l3_master(master))
+ master = dev;
+
dev_hold(master);
rcu_read_unlock();
- if (master)
- return master;
-
- dev_hold(dev);
- return dev;
+ return master;
}
struct ipoib_walk_data {
@@ -522,7 +523,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
if (ret)
return NULL;
- /* See if we can find a unique device matching the L2 parameters */
+ /* See if we can find a unique device matching the pkey and GID */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
@@ -535,7 +536,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
dev_put(net_dev);
- /* Couldn't find a unique device with L2 parameters only. Use L3
+ /* Couldn't find a unique device with pkey and GID only. Use L3
* address to uniquely match the net device */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, addr, &net_dev);
@@ -1824,6 +1825,31 @@ static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd);
}
+static int ipoib_hwtstamp_get(struct net_device *dev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->rn_ops->ndo_hwtstamp_get)
+ /* legacy */
+ return dev_eth_ioctl(dev, config->ifr, SIOCGHWTSTAMP);
+
+ return priv->rn_ops->ndo_hwtstamp_get(dev, config);
+}
+
+static int ipoib_hwtstamp_set(struct net_device *dev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct ipoib_dev_priv *priv = ipoib_priv(dev);
+
+ if (!priv->rn_ops->ndo_hwtstamp_set)
+ /* legacy */
+ return dev_eth_ioctl(dev, config->ifr, SIOCSHWTSTAMP);
+
+ return priv->rn_ops->ndo_hwtstamp_set(dev, config, extack);
+}
+
static int ipoib_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
@@ -2148,6 +2174,8 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_set_mac_address = ipoib_set_mac,
.ndo_get_stats64 = ipoib_get_stats,
.ndo_eth_ioctl = ipoib_ioctl,
+ .ndo_hwtstamp_get = ipoib_hwtstamp_get,
+ .ndo_hwtstamp_set = ipoib_hwtstamp_set,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -2163,6 +2191,8 @@ static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_get_iflink = ipoib_get_iflink,
.ndo_get_stats64 = ipoib_get_stats,
.ndo_eth_ioctl = ipoib_ioctl,
+ .ndo_hwtstamp_get = ipoib_hwtstamp_get,
+ .ndo_hwtstamp_set = ipoib_hwtstamp_set,
};
static const struct net_device_ops ipoib_netdev_default_pf = {
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 2e3c0516ce8f..dc531fad73de 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -1029,7 +1029,7 @@ static int __init iser_init(void)
mutex_init(&ig.connlist_mutex);
INIT_LIST_HEAD(&ig.connlist);
- release_wq = alloc_workqueue("release workqueue", 0, 0);
+ release_wq = alloc_workqueue("release workqueue", WQ_PERCPU, 0);
if (!release_wq) {
iser_err("failed to allocate release workqueue\n");
err = -ENOMEM;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 42977a5326ee..af811d060cc8 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2613,7 +2613,7 @@ static struct iscsit_transport iser_target_transport = {
static int __init isert_init(void)
{
- isert_login_wq = alloc_workqueue("isert_login_wq", 0, 0);
+ isert_login_wq = alloc_workqueue("isert_login_wq", WQ_PERCPU, 0);
if (!isert_login_wq) {
isert_err("Unable to allocate isert_login_wq\n");
return -ENOMEM;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index ef4abdea3c2d..9ecc6343455d 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -1450,7 +1450,7 @@ err_free_chunks:
kfree(srv->chunks);
err_free_srv:
- kfree(srv);
+ put_device(&srv->dev);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 5dfb4644446b..71269446353d 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -667,9 +667,9 @@ static int srpt_refresh_port(struct srpt_port *sport)
srpt_mad_recv_handler,
sport, 0);
if (IS_ERR(mad_agent)) {
- pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
+ pr_err("%s-%d: MAD agent registration failed (%pe). Note: this is expected if SR-IOV is enabled.\n",
dev_name(&sport->sdev->device->dev), sport->port,
- PTR_ERR(mad_agent));
+ mad_agent);
sport->mad_agent = NULL;
memset(&port_modify, 0, sizeof(port_modify));
port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
@@ -1865,8 +1865,8 @@ retry:
IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
- pr_err("failed to create CQ cqe= %d ret= %d\n",
- ch->rq_size + sq_size, ret);
+ pr_err("failed to create CQ cqe= %d ret= %pe\n",
+ ch->rq_size + sq_size, ch->cq);
goto out;
}
ch->cq_size = ch->rq_size + sq_size;
@@ -3132,7 +3132,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
WARN_ON_ONCE(sdev->srq);
srq = ib_create_srq(sdev->pd, &srq_attr);
if (IS_ERR(srq)) {
- pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
+ pr_debug("ib_create_srq() failed: %pe\n", srq);
return PTR_ERR(srq);
}
@@ -3236,8 +3236,7 @@ static int srpt_add_one(struct ib_device *device)
if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
if (IS_ERR(sdev->cm_id)) {
- pr_info("ib_create_cm_id() failed: %ld\n",
- PTR_ERR(sdev->cm_id));
+ pr_info("ib_create_cm_id() failed: %pe\n", sdev->cm_id);
ret = PTR_ERR(sdev->cm_id);
sdev->cm_id = NULL;
if (!rdma_cm_id)
@@ -3687,8 +3686,7 @@ static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(rdma_cm_id)) {
- pr_err("RDMA/CM ID creation failed: %ld\n",
- PTR_ERR(rdma_cm_id));
+ pr_err("RDMA/CM ID creation failed: %pe\n", rdma_cm_id);
goto out;
}
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index b527308cb52e..66f7ffe8c7e0 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -8,9 +8,9 @@
/* #define DEBUG */
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/limits.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/sched.h>
diff --git a/drivers/input/ff-memless.c b/drivers/input/ff-memless.c
index 91636479ee3c..e0c1c61aae71 100644
--- a/drivers/input/ff-memless.c
+++ b/drivers/input/ff-memless.c
@@ -10,6 +10,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/module.h>
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index a832bc46bc92..f4f12dd00fff 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/export.h>
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/io.h>
diff --git a/drivers/input/input-compat.c b/drivers/input/input-compat.c
index 2ccd3eedbd67..a5043193ead8 100644
--- a/drivers/input/input-compat.c
+++ b/drivers/input/input-compat.c
@@ -6,6 +6,7 @@
*/
#include <linux/export.h>
+#include <linux/sprintf.h>
#include <linux/uaccess.h>
#include "input-compat.h"
@@ -94,6 +95,28 @@ int input_ff_effect_from_user(const char __user *buffer, size_t size,
return 0;
}
+int input_bits_to_string(char *buf, int buf_size, unsigned long bits,
+ bool skip_empty)
+{
+ int len = 0;
+
+ if (in_compat_syscall()) {
+ u32 dword = bits >> 32;
+ if (dword || !skip_empty)
+ len += snprintf(buf, buf_size, "%x ", dword);
+
+ dword = bits & 0xffffffffUL;
+ if (dword || !skip_empty || len)
+ len += snprintf(buf + len, max(buf_size - len, 0),
+ "%x", dword);
+ } else {
+ if (bits || !skip_empty)
+ len += snprintf(buf, buf_size, "%lx", bits);
+ }
+
+ return len;
+}
+
#else
int input_event_from_user(const char __user *buffer,
@@ -126,6 +149,13 @@ int input_ff_effect_from_user(const char __user *buffer, size_t size,
return 0;
}
+int input_bits_to_string(char *buf, int buf_size, unsigned long bits,
+ bool skip_empty)
+{
+ return bits || !skip_empty ?
+ snprintf(buf, buf_size, "%lx", bits) : 0;
+}
+
#endif /* CONFIG_COMPAT */
EXPORT_SYMBOL_GPL(input_event_from_user);
diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h
index 3b7bb12b023b..99c87ceb923d 100644
--- a/drivers/input/input-compat.h
+++ b/drivers/input/input-compat.h
@@ -75,4 +75,7 @@ int input_event_to_user(char __user *buffer,
int input_ff_effect_from_user(const char __user *buffer, size_t size,
struct ff_effect *effect);
+int input_bits_to_string(char *buf, int buf_size, unsigned long bits,
+ bool skip_empty);
+
#endif /* _INPUT_COMPAT_H */
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index 337006dd9dcf..09f518897d4a 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -198,6 +198,7 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
struct input_mt *mt = dev->mt;
struct input_mt_slot *oldest;
int oldid, count, i;
+ int p, reported_p = 0;
if (!mt)
return;
@@ -216,6 +217,13 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
oldest = ps;
oldid = id;
}
+ if (test_bit(ABS_MT_PRESSURE, dev->absbit)) {
+ p = input_mt_get_value(ps, ABS_MT_PRESSURE);
+ if (mt->flags & INPUT_MT_TOTAL_FORCE)
+ reported_p += p;
+ else if (oldid == id)
+ reported_p = p;
+ }
count++;
}
@@ -245,10 +253,8 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
input_event(dev, EV_ABS, ABS_X, x);
input_event(dev, EV_ABS, ABS_Y, y);
- if (test_bit(ABS_MT_PRESSURE, dev->absbit)) {
- int p = input_mt_get_value(oldest, ABS_MT_PRESSURE);
- input_event(dev, EV_ABS, ABS_PRESSURE, p);
- }
+ if (test_bit(ABS_MT_PRESSURE, dev->absbit))
+ input_event(dev, EV_ABS, ABS_PRESSURE, reported_p);
} else {
if (test_bit(ABS_MT_PRESSURE, dev->absbit))
input_event(dev, EV_ABS, ABS_PRESSURE, 0);
diff --git a/drivers/input/input-poller.c b/drivers/input/input-poller.c
index 9c57713a6151..1ce83d6521bb 100644
--- a/drivers/input/input-poller.c
+++ b/drivers/input/input-poller.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/jiffies.h>
#include <linux/mutex.h>
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 1da41324362b..a500e1e276c2 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -8,6 +8,7 @@
#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/idr.h>
@@ -998,41 +999,6 @@ static int input_attach_handler(struct input_dev *dev, struct input_handler *han
return error;
}
-#ifdef CONFIG_COMPAT
-
-static int input_bits_to_string(char *buf, int buf_size,
- unsigned long bits, bool skip_empty)
-{
- int len = 0;
-
- if (in_compat_syscall()) {
- u32 dword = bits >> 32;
- if (dword || !skip_empty)
- len += snprintf(buf, buf_size, "%x ", dword);
-
- dword = bits & 0xffffffffUL;
- if (dword || !skip_empty || len)
- len += snprintf(buf + len, max(buf_size - len, 0),
- "%x", dword);
- } else {
- if (bits || !skip_empty)
- len += snprintf(buf, buf_size, "%lx", bits);
- }
-
- return len;
-}
-
-#else /* !CONFIG_COMPAT */
-
-static int input_bits_to_string(char *buf, int buf_size,
- unsigned long bits, bool skip_empty)
-{
- return bits || !skip_empty ?
- snprintf(buf, buf_size, "%lx", bits) : 0;
-}
-
-#endif
-
#ifdef CONFIG_PROC_FS
static struct proc_dir_entry *proc_bus_input_dir;
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index 55e6321adab9..86d09faa685c 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -6,6 +6,7 @@
* USB/RS232 I-Force joysticks and wheels.
*/
+#include <linux/export.h>
#include <linux/unaligned.h>
#include "iforce.h"
diff --git a/drivers/input/joystick/iforce/iforce-packets.c b/drivers/input/joystick/iforce/iforce-packets.c
index 74181d5123cd..fd1cd731d781 100644
--- a/drivers/input/joystick/iforce/iforce-packets.c
+++ b/drivers/input/joystick/iforce/iforce-packets.c
@@ -6,6 +6,7 @@
* USB/RS232 I-Force joysticks and wheels.
*/
+#include <linux/export.h>
#include <linux/unaligned.h>
#include "iforce.h"
diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c
index c47fc5f34bd0..f902a56d011f 100644
--- a/drivers/input/joystick/psxpad-spi.c
+++ b/drivers/input/joystick/psxpad-spi.c
@@ -344,7 +344,11 @@ static int psxpad_spi_probe(struct spi_device *spi)
/* (PlayStation 1/2 joypad might be possible works 250kHz/500kHz) */
spi->controller->min_speed_hz = 125000;
spi->controller->max_speed_hz = 125000;
- spi_setup(spi);
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "failed to set up SPI: %d\n", err);
+ return err;
+ }
/* pad settings */
psxpad_set_motor_level(pad, 0, 0);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 7c4f309a4cb6..2ff4fef322c2 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -262,24 +262,6 @@ config KEYBOARD_GPIO_POLLED
To compile this driver as a module, choose M here: the
module will be called gpio_keys_polled.
-config KEYBOARD_TCA6416
- tristate "TCA6416/TCA6408A Keypad Support"
- depends on I2C
- help
- This driver implements basic keypad functionality
- for keys connected through TCA6416/TCA6408A IO expanders.
-
- Say Y here if your device has keys connected to
- TCA6416/TCA6408A IO expander. Your board-specific setup logic
- must also provide pin-mask details(of which TCA6416 pins
- are used for keypad).
-
- If enabled the entire TCA6416 device will be managed through
- this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called tca6416_keypad.
-
config KEYBOARD_TCA8418
tristate "TCA8418 Keypad Support"
depends on I2C
@@ -422,6 +404,18 @@ config KEYBOARD_MAX7359
To compile this driver as a module, choose M here: the
module will be called max7359_keypad.
+config KEYBOARD_MAX7360
+ tristate "Maxim MAX7360 Key Switch Controller"
+ select INPUT_MATRIXKMAP
+ depends on I2C
+ depends on MFD_MAX7360
+ help
+ If you say yes here you get support for the keypad controller on the
+ Maxim MAX7360 I/O Expander.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max7360_keypad.
+
config KEYBOARD_MPR121
tristate "Freescale MPR121 Touchkey"
depends on I2C
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 8bc20ab2b103..2d906e14f3e2 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
-obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
@@ -42,6 +41,7 @@ obj-$(CONFIG_KEYBOARD_LPC32XX) += lpc32xx-keys.o
obj-$(CONFIG_KEYBOARD_MAPLE) += maple_keyb.o
obj-$(CONFIG_KEYBOARD_MATRIX) += matrix_keypad.o
obj-$(CONFIG_KEYBOARD_MAX7359) += max7359_keypad.o
+obj-$(CONFIG_KEYBOARD_MAX7360) += max7360-keypad.o
obj-$(CONFIG_KEYBOARD_MPR121) += mpr121_touchkey.o
obj-$(CONFIG_KEYBOARD_MT6779) += mt6779-keypad.o
obj-$(CONFIG_KEYBOARD_MTK_PMIC) += mtk-pmic-keys.o
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index c1e53d87c8a7..1c6b0461dc35 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -261,6 +261,12 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_KEY_MATRIX:
pm_wakeup_event(ckdev->dev, 0);
+ if (!ckdev->idev) {
+ dev_warn_once(ckdev->dev,
+ "Unexpected key matrix event\n");
+ return NOTIFY_OK;
+ }
+
if (ckdev->ec->event_size != ckdev->cols) {
dev_err(ckdev->dev,
"Discarded incomplete key matrix event.\n");
@@ -705,6 +711,12 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
ec = dev_get_drvdata(pdev->dev.parent);
if (!ec)
return -EPROBE_DEFER;
+ /*
+ * Even if the cros_ec_device pointer is available, still need to check
+ * if the device is fully registered before using it.
+ */
+ if (!cros_ec_device_registered(ec))
+ return -EPROBE_DEFER;
ckdev = devm_kzalloc(dev, sizeof(*ckdev), GFP_KERNEL);
if (!ckdev)
diff --git a/drivers/input/keyboard/imx_sc_key.c b/drivers/input/keyboard/imx_sc_key.c
index d18839f1f4f6..b620cd310cdb 100644
--- a/drivers/input/keyboard/imx_sc_key.c
+++ b/drivers/input/keyboard/imx_sc_key.c
@@ -158,7 +158,7 @@ static int imx_sc_key_probe(struct platform_device *pdev)
return error;
}
- error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv);
+ error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, priv);
if (error)
return error;
diff --git a/drivers/input/keyboard/max7360-keypad.c b/drivers/input/keyboard/max7360-keypad.c
new file mode 100644
index 000000000000..503be952b0a6
--- /dev/null
+++ b/drivers/input/keyboard/max7360-keypad.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/dev_printk.h>
+#include <linux/device/devres.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max7360.h>
+#include <linux/mod_devicetable.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+
+struct max7360_keypad {
+ struct input_dev *input;
+ unsigned int rows;
+ unsigned int cols;
+ unsigned int debounce_ms;
+ int irq;
+ struct regmap *regmap;
+ unsigned short keycodes[MAX7360_MAX_KEY_ROWS * MAX7360_MAX_KEY_COLS];
+};
+
+static irqreturn_t max7360_keypad_irq(int irq, void *data)
+{
+ struct max7360_keypad *max7360_keypad = data;
+ struct device *dev = max7360_keypad->input->dev.parent;
+ unsigned int val;
+ unsigned int row, col;
+ unsigned int release;
+ unsigned int code;
+ int error;
+
+ error = regmap_read(max7360_keypad->regmap, MAX7360_REG_KEYFIFO, &val);
+ if (error) {
+ dev_err(dev, "Failed to read MAX7360 FIFO");
+ return IRQ_NONE;
+ }
+
+ /* FIFO overflow: ignore it and get next event. */
+ if (val == MAX7360_FIFO_OVERFLOW) {
+ dev_warn(dev, "max7360 FIFO overflow");
+ error = regmap_read_poll_timeout(max7360_keypad->regmap, MAX7360_REG_KEYFIFO,
+ val, val != MAX7360_FIFO_OVERFLOW, 0, 1000);
+ if (error) {
+ dev_err(dev, "Failed to empty MAX7360 FIFO");
+ return IRQ_NONE;
+ }
+ }
+
+ if (val == MAX7360_FIFO_EMPTY) {
+ dev_dbg(dev, "Got a spurious interrupt");
+
+ return IRQ_NONE;
+ }
+
+ row = FIELD_GET(MAX7360_FIFO_ROW, val);
+ col = FIELD_GET(MAX7360_FIFO_COL, val);
+ release = val & MAX7360_FIFO_RELEASE;
+
+ code = MATRIX_SCAN_CODE(row, col, get_count_order(max7360_keypad->cols));
+
+ dev_dbg(dev, "key[%d:%d] %s\n", row, col, release ? "release" : "press");
+
+ input_event(max7360_keypad->input, EV_MSC, MSC_SCAN, code);
+ input_report_key(max7360_keypad->input, max7360_keypad->keycodes[code], !release);
+ input_sync(max7360_keypad->input);
+
+ return IRQ_HANDLED;
+}
+
+static int max7360_keypad_open(struct input_dev *pdev)
+{
+ struct max7360_keypad *max7360_keypad = input_get_drvdata(pdev);
+ struct device *dev = max7360_keypad->input->dev.parent;
+ int error;
+
+ /* Somebody is using the device: get out of sleep. */
+ error = regmap_write_bits(max7360_keypad->regmap, MAX7360_REG_CONFIG,
+ MAX7360_CFG_SLEEP, MAX7360_CFG_SLEEP);
+ if (error)
+ dev_err(dev, "Failed to write max7360 configuration: %d\n", error);
+
+ return error;
+}
+
+static void max7360_keypad_close(struct input_dev *pdev)
+{
+ struct max7360_keypad *max7360_keypad = input_get_drvdata(pdev);
+ struct device *dev = max7360_keypad->input->dev.parent;
+ int error;
+
+ /* Nobody is using the device anymore: go to sleep. */
+ error = regmap_write_bits(max7360_keypad->regmap, MAX7360_REG_CONFIG, MAX7360_CFG_SLEEP, 0);
+ if (error)
+ dev_err(dev, "Failed to write max7360 configuration: %d\n", error);
+}
+
+static int max7360_keypad_hw_init(struct max7360_keypad *max7360_keypad)
+{
+ struct device *dev = max7360_keypad->input->dev.parent;
+ unsigned int val;
+ int error;
+
+ val = max7360_keypad->debounce_ms - MAX7360_DEBOUNCE_MIN;
+ error = regmap_write_bits(max7360_keypad->regmap, MAX7360_REG_DEBOUNCE,
+ MAX7360_DEBOUNCE,
+ FIELD_PREP(MAX7360_DEBOUNCE, val));
+ if (error)
+ return dev_err_probe(dev, error,
+ "Failed to write max7360 debounce configuration\n");
+
+ error = regmap_write_bits(max7360_keypad->regmap, MAX7360_REG_INTERRUPT,
+ MAX7360_INTERRUPT_TIME_MASK,
+ FIELD_PREP(MAX7360_INTERRUPT_TIME_MASK, 1));
+ if (error)
+ return dev_err_probe(dev, error,
+ "Failed to write max7360 keypad interrupt configuration\n");
+
+ return 0;
+}
+
+static int max7360_keypad_build_keymap(struct max7360_keypad *max7360_keypad)
+{
+ struct input_dev *input_dev = max7360_keypad->input;
+ struct device *dev = input_dev->dev.parent->parent;
+ struct matrix_keymap_data keymap_data;
+ const char *propname = "linux,keymap";
+ unsigned int max_keys;
+ int error;
+ int size;
+
+ size = device_property_count_u32(dev, propname);
+ if (size <= 0) {
+ dev_err(dev, "missing or malformed property %s: %d\n", propname, size);
+ return size < 0 ? size : -EINVAL;
+ }
+
+ max_keys = max7360_keypad->cols * max7360_keypad->rows;
+ if (size > max_keys) {
+ dev_err(dev, "%s size overflow (%d vs max %u)\n", propname, size, max_keys);
+ return -EINVAL;
+ }
+
+ u32 *keys __free(kfree) = kmalloc_array(size, sizeof(*keys), GFP_KERNEL);
+ if (!keys)
+ return -ENOMEM;
+
+ error = device_property_read_u32_array(dev, propname, keys, size);
+ if (error) {
+ dev_err(dev, "failed to read %s property: %d\n", propname, error);
+ return error;
+ }
+
+ keymap_data.keymap = keys;
+ keymap_data.keymap_size = size;
+ error = matrix_keypad_build_keymap(&keymap_data, NULL,
+ max7360_keypad->rows, max7360_keypad->cols,
+ max7360_keypad->keycodes, max7360_keypad->input);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static int max7360_keypad_parse_fw(struct device *dev,
+ struct max7360_keypad *max7360_keypad,
+ bool *autorepeat)
+{
+ int error;
+
+ error = matrix_keypad_parse_properties(dev->parent, &max7360_keypad->rows,
+ &max7360_keypad->cols);
+ if (error)
+ return error;
+
+ if (!max7360_keypad->rows || !max7360_keypad->cols ||
+ max7360_keypad->rows > MAX7360_MAX_KEY_ROWS ||
+ max7360_keypad->cols > MAX7360_MAX_KEY_COLS) {
+ dev_err(dev, "Invalid number of columns or rows (%ux%u)\n",
+ max7360_keypad->cols, max7360_keypad->rows);
+ return -EINVAL;
+ }
+
+ *autorepeat = device_property_read_bool(dev->parent, "autorepeat");
+
+ max7360_keypad->debounce_ms = MAX7360_DEBOUNCE_MIN;
+ error = device_property_read_u32(dev->parent, "keypad-debounce-delay-ms",
+ &max7360_keypad->debounce_ms);
+ if (error == -EINVAL) {
+ dev_info(dev, "Using default keypad-debounce-delay-ms: %u\n",
+ max7360_keypad->debounce_ms);
+ } else if (error < 0) {
+ dev_err(dev, "Failed to read keypad-debounce-delay-ms property\n");
+ return error;
+ }
+
+ if (!in_range(max7360_keypad->debounce_ms, MAX7360_DEBOUNCE_MIN,
+ MAX7360_DEBOUNCE_MAX - MAX7360_DEBOUNCE_MIN + 1)) {
+ dev_err(dev, "Invalid keypad-debounce-delay-ms: %u, should be between %u and %u.\n",
+ max7360_keypad->debounce_ms, MAX7360_DEBOUNCE_MIN, MAX7360_DEBOUNCE_MAX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int max7360_keypad_probe(struct platform_device *pdev)
+{
+ struct max7360_keypad *max7360_keypad;
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ struct regmap *regmap;
+ bool autorepeat;
+ int error;
+ int irq;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "Could not get parent regmap\n");
+
+ irq = fwnode_irq_get_byname(dev_fwnode(dev->parent), "intk");
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ max7360_keypad = devm_kzalloc(dev, sizeof(*max7360_keypad), GFP_KERNEL);
+ if (!max7360_keypad)
+ return -ENOMEM;
+
+ max7360_keypad->regmap = regmap;
+
+ error = max7360_keypad_parse_fw(dev, max7360_keypad, &autorepeat);
+ if (error)
+ return error;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ max7360_keypad->input = input;
+
+ input->id.bustype = BUS_I2C;
+ input->name = pdev->name;
+ input->open = max7360_keypad_open;
+ input->close = max7360_keypad_close;
+
+ error = max7360_keypad_build_keymap(max7360_keypad);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to build keymap\n");
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+ if (autorepeat)
+ __set_bit(EV_REP, input->evbit);
+
+ input_set_drvdata(input, max7360_keypad);
+
+ error = devm_request_threaded_irq(dev, irq, NULL, max7360_keypad_irq,
+ IRQF_ONESHOT,
+ "max7360-keypad", max7360_keypad);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to register interrupt\n");
+
+ error = input_register_device(input);
+ if (error)
+ return dev_err_probe(dev, error, "Could not register input device\n");
+
+ error = max7360_keypad_hw_init(max7360_keypad);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to initialize max7360 keypad\n");
+
+ device_init_wakeup(dev, true);
+ error = dev_pm_set_wake_irq(dev, irq);
+ if (error)
+ dev_warn(dev, "Failed to set up wakeup irq: %d\n", error);
+
+ return 0;
+}
+
+static void max7360_keypad_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+}
+
+static struct platform_driver max7360_keypad_driver = {
+ .driver = {
+ .name = "max7360-keypad",
+ },
+ .probe = max7360_keypad_probe,
+ .remove = max7360_keypad_remove,
+};
+module_platform_driver(max7360_keypad_driver);
+
+MODULE_DESCRIPTION("MAX7360 Keypad driver");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 38ec619aa359..4519eecb317b 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -12,7 +12,8 @@
* on some suggestions by Nicolas Pitre <nico@fluxnic.net>.
*/
-
+#include <linux/bits.h>
+#include <linux/bitfield.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -20,124 +21,148 @@
#include <linux/io.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/input/matrix_keypad.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/platform_data/keypad-pxa27x.h>
/*
* Keypad Controller registers
*/
-#define KPC 0x0000 /* Keypad Control register */
-#define KPDK 0x0008 /* Keypad Direct Key register */
-#define KPREC 0x0010 /* Keypad Rotary Encoder register */
-#define KPMK 0x0018 /* Keypad Matrix Key register */
-#define KPAS 0x0020 /* Keypad Automatic Scan register */
+#define KPC 0x0000 /* Keypad Control register */
+#define KPDK 0x0008 /* Keypad Direct Key register */
+#define KPREC 0x0010 /* Keypad Rotary Encoder register */
+#define KPMK 0x0018 /* Keypad Matrix Key register */
+#define KPAS 0x0020 /* Keypad Automatic Scan register */
/* Keypad Automatic Scan Multiple Key Presser register 0-3 */
-#define KPASMKP0 0x0028
-#define KPASMKP1 0x0030
-#define KPASMKP2 0x0038
-#define KPASMKP3 0x0040
-#define KPKDI 0x0048
+#define KPASMKP0 0x0028
+#define KPASMKP1 0x0030
+#define KPASMKP2 0x0038
+#define KPASMKP3 0x0040
+#define KPKDI 0x0048
/* bit definitions */
-#define KPC_MKRN(n) ((((n) - 1) & 0x7) << 26) /* matrix key row number */
-#define KPC_MKCN(n) ((((n) - 1) & 0x7) << 23) /* matrix key column number */
-#define KPC_DKN(n) ((((n) - 1) & 0x7) << 6) /* direct key number */
-
-#define KPC_AS (0x1 << 30) /* Automatic Scan bit */
-#define KPC_ASACT (0x1 << 29) /* Automatic Scan on Activity */
-#define KPC_MI (0x1 << 22) /* Matrix interrupt bit */
-#define KPC_IMKP (0x1 << 21) /* Ignore Multiple Key Press */
-
-#define KPC_MS(n) (0x1 << (13 + (n))) /* Matrix scan line 'n' */
-#define KPC_MS_ALL (0xff << 13)
-
-#define KPC_ME (0x1 << 12) /* Matrix Keypad Enable */
-#define KPC_MIE (0x1 << 11) /* Matrix Interrupt Enable */
-#define KPC_DK_DEB_SEL (0x1 << 9) /* Direct Keypad Debounce Select */
-#define KPC_DI (0x1 << 5) /* Direct key interrupt bit */
-#define KPC_RE_ZERO_DEB (0x1 << 4) /* Rotary Encoder Zero Debounce */
-#define KPC_REE1 (0x1 << 3) /* Rotary Encoder1 Enable */
-#define KPC_REE0 (0x1 << 2) /* Rotary Encoder0 Enable */
-#define KPC_DE (0x1 << 1) /* Direct Keypad Enable */
-#define KPC_DIE (0x1 << 0) /* Direct Keypad interrupt Enable */
-
-#define KPDK_DKP (0x1 << 31)
-#define KPDK_DK(n) ((n) & 0xff)
-
-#define KPREC_OF1 (0x1 << 31)
-#define kPREC_UF1 (0x1 << 30)
-#define KPREC_OF0 (0x1 << 15)
-#define KPREC_UF0 (0x1 << 14)
-
-#define KPREC_RECOUNT0(n) ((n) & 0xff)
-#define KPREC_RECOUNT1(n) (((n) >> 16) & 0xff)
-
-#define KPMK_MKP (0x1 << 31)
-#define KPAS_SO (0x1 << 31)
-#define KPASMKPx_SO (0x1 << 31)
-
-#define KPAS_MUKP(n) (((n) >> 26) & 0x1f)
-#define KPAS_RP(n) (((n) >> 4) & 0xf)
-#define KPAS_CP(n) ((n) & 0xf)
-
-#define KPASMKP_MKC_MASK (0xff)
+#define KPC_MKRN_MASK GENMASK(28, 26)
+#define KPC_MKCN_MASK GENMASK(25, 23)
+#define KPC_DKN_MASK GENMASK(8, 6)
+#define KPC_MKRN(n) FIELD_PREP(KPC_MKRN_MASK, (n) - 1)
+#define KPC_MKCN(n) FIELD_PREP(KPC_MKCN_MASK, (n) - 1)
+#define KPC_DKN(n) FIELD_PREP(KPC_DKN_MASK, (n) - 1)
+
+#define KPC_AS BIT(30) /* Automatic Scan bit */
+#define KPC_ASACT BIT(29) /* Automatic Scan on Activity */
+#define KPC_MI BIT(22) /* Matrix interrupt bit */
+#define KPC_IMKP BIT(21) /* Ignore Multiple Key Press */
+
+#define KPC_MS(n) BIT(13 + (n)) /* Matrix scan line 'n' */
+#define KPC_MS_ALL GENMASK(20, 13)
+
+#define KPC_ME BIT(12) /* Matrix Keypad Enable */
+#define KPC_MIE BIT(11) /* Matrix Interrupt Enable */
+#define KPC_DK_DEB_SEL BIT(9) /* Direct Keypad Debounce Select */
+#define KPC_DI BIT(5) /* Direct key interrupt bit */
+#define KPC_RE_ZERO_DEB BIT(4) /* Rotary Encoder Zero Debounce */
+#define KPC_REE1 BIT(3) /* Rotary Encoder1 Enable */
+#define KPC_REE0 BIT(2) /* Rotary Encoder0 Enable */
+#define KPC_DE BIT(1) /* Direct Keypad Enable */
+#define KPC_DIE BIT(0) /* Direct Keypad interrupt Enable */
+
+#define KPDK_DKP BIT(31)
+#define KPDK_DK_MASK GENMASK(7, 0)
+#define KPDK_DK(n) FIELD_GET(KPDK_DK_MASK, n)
+
+#define KPREC_OF1 BIT(31)
+#define KPREC_UF1 BIT(30)
+#define KPREC_OF0 BIT(15)
+#define KPREC_UF0 BIT(14)
+
+#define KPREC_RECOUNT0_MASK GENMASK(7, 0)
+#define KPREC_RECOUNT1_MASK GENMASK(23, 16)
+#define KPREC_RECOUNT0(n) FIELD_GET(KPREC_RECOUNT0_MASK, n)
+#define KPREC_RECOUNT1(n) FIELD_GET(KPREC_RECOUNT1_MASK, n)
+
+#define KPMK_MKP BIT(31)
+#define KPAS_SO BIT(31)
+#define KPASMKPx_SO BIT(31)
+
+#define KPAS_MUKP_MASK GENMASK(30, 26)
+#define KPAS_RP_MASK GENMASK(7, 4)
+#define KPAS_CP_MASK GENMASK(3, 0)
+#define KPAS_MUKP(n) FIELD_GET(KPAS_MUKP_MASK, n)
+#define KPAS_RP(n) FIELD_GET(KPAS_RP_MASK, n)
+#define KPAS_CP(n) FIELD_GET(KPAS_CP_MASK, n)
+
+#define KPASMKP_MKC_MASK GENMASK(7, 0)
#define keypad_readl(off) __raw_readl(keypad->mmio_base + (off))
#define keypad_writel(off, v) __raw_writel((v), keypad->mmio_base + (off))
+#define MAX_MATRIX_KEY_ROWS 8
+#define MAX_MATRIX_KEY_COLS 8
+#define MAX_DIRECT_KEY_NUM 8
+#define MAX_ROTARY_ENCODERS 2
+
#define MAX_MATRIX_KEY_NUM (MAX_MATRIX_KEY_ROWS * MAX_MATRIX_KEY_COLS)
#define MAX_KEYPAD_KEYS (MAX_MATRIX_KEY_NUM + MAX_DIRECT_KEY_NUM)
-struct pxa27x_keypad {
- const struct pxa27x_keypad_platform_data *pdata;
+struct pxa27x_keypad_rotary {
+ unsigned short *key_codes;
+ int rel_code;
+ bool enabled;
+};
+struct pxa27x_keypad {
struct clk *clk;
struct input_dev *input_dev;
void __iomem *mmio_base;
int irq;
- unsigned short keycodes[MAX_KEYPAD_KEYS];
- int rotary_rel_code[2];
-
+ unsigned int matrix_key_rows;
+ unsigned int matrix_key_cols;
unsigned int row_shift;
+ unsigned int direct_key_num;
+ unsigned int direct_key_mask;
+ bool direct_key_low_active;
+
+ /* key debounce interval */
+ unsigned int debounce_interval;
+
+ unsigned short keycodes[MAX_KEYPAD_KEYS];
+
/* state row bits of each column scan */
- uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS];
- uint32_t direct_key_state;
+ u32 matrix_key_state[MAX_MATRIX_KEY_COLS];
+ u32 direct_key_state;
- unsigned int direct_key_mask;
+ struct pxa27x_keypad_rotary rotary[MAX_ROTARY_ENCODERS];
};
-#ifdef CONFIG_OF
-static int pxa27x_keypad_matrix_key_parse_dt(struct pxa27x_keypad *keypad,
- struct pxa27x_keypad_platform_data *pdata)
+static int pxa27x_keypad_matrix_key_parse(struct pxa27x_keypad *keypad)
{
struct input_dev *input_dev = keypad->input_dev;
struct device *dev = input_dev->dev.parent;
- u32 rows, cols;
int error;
- error = matrix_keypad_parse_properties(dev, &rows, &cols);
+ error = matrix_keypad_parse_properties(dev, &keypad->matrix_key_rows,
+ &keypad->matrix_key_cols);
if (error)
return error;
- if (rows > MAX_MATRIX_KEY_ROWS || cols > MAX_MATRIX_KEY_COLS) {
+ if (keypad->matrix_key_rows > MAX_MATRIX_KEY_ROWS ||
+ keypad->matrix_key_cols > MAX_MATRIX_KEY_COLS) {
dev_err(dev, "rows or cols exceeds maximum value\n");
return -EINVAL;
}
- pdata->matrix_key_rows = rows;
- pdata->matrix_key_cols = cols;
+ keypad->row_shift = get_count_order(keypad->matrix_key_cols);
error = matrix_keypad_build_keymap(NULL, NULL,
- pdata->matrix_key_rows,
- pdata->matrix_key_cols,
+ keypad->matrix_key_rows,
+ keypad->matrix_key_cols,
keypad->keycodes, input_dev);
if (error)
return error;
@@ -145,20 +170,17 @@ static int pxa27x_keypad_matrix_key_parse_dt(struct pxa27x_keypad *keypad,
return 0;
}
-static int pxa27x_keypad_direct_key_parse_dt(struct pxa27x_keypad *keypad,
- struct pxa27x_keypad_platform_data *pdata)
+static int pxa27x_keypad_direct_key_parse(struct pxa27x_keypad *keypad)
{
struct input_dev *input_dev = keypad->input_dev;
struct device *dev = input_dev->dev.parent;
- struct device_node *np = dev->of_node;
- const __be16 *prop;
unsigned short code;
- unsigned int proplen, size;
+ int count;
int i;
int error;
- error = of_property_read_u32(np, "marvell,direct-key-count",
- &pdata->direct_key_num);
+ error = device_property_read_u32(dev, "marvell,direct-key-count",
+ &keypad->direct_key_num);
if (error) {
/*
* If do not have marvel,direct-key-count defined,
@@ -167,151 +189,121 @@ static int pxa27x_keypad_direct_key_parse_dt(struct pxa27x_keypad *keypad,
return error == -EINVAL ? 0 : error;
}
- error = of_property_read_u32(np, "marvell,direct-key-mask",
- &pdata->direct_key_mask);
+ error = device_property_read_u32(dev, "marvell,direct-key-mask",
+ &keypad->direct_key_mask);
if (error) {
if (error != -EINVAL)
return error;
/*
* If marvell,direct-key-mask is not defined, driver will use
- * default value. Default value is set when configure the keypad.
+ * a default value based on number of direct keys set up.
+ * The default value is calculated in pxa27x_keypad_config().
*/
- pdata->direct_key_mask = 0;
+ keypad->direct_key_mask = 0;
}
- pdata->direct_key_low_active = of_property_read_bool(np,
- "marvell,direct-key-low-active");
-
- prop = of_get_property(np, "marvell,direct-key-map", &proplen);
- if (!prop)
- return -EINVAL;
+ keypad->direct_key_low_active =
+ device_property_read_bool(dev, "marvell,direct-key-low-active");
- if (proplen % sizeof(u16))
+ count = device_property_count_u16(dev, "marvell,direct-key-map");
+ if (count <= 0 || count > MAX_DIRECT_KEY_NUM)
return -EINVAL;
- size = proplen / sizeof(u16);
+ error = device_property_read_u16_array(dev, "marvell,direct-key-map",
+ &keypad->keycodes[MAX_MATRIX_KEY_NUM],
+ count);
- /* Only MAX_DIRECT_KEY_NUM is accepted.*/
- if (size > MAX_DIRECT_KEY_NUM)
- return -EINVAL;
-
- for (i = 0; i < size; i++) {
- code = be16_to_cpup(prop + i);
- keypad->keycodes[MAX_MATRIX_KEY_NUM + i] = code;
+ for (i = 0; i < count; i++) {
+ code = keypad->keycodes[MAX_MATRIX_KEY_NUM + i];
__set_bit(code, input_dev->keybit);
}
return 0;
}
-static int pxa27x_keypad_rotary_parse_dt(struct pxa27x_keypad *keypad,
- struct pxa27x_keypad_platform_data *pdata)
+static int pxa27x_keypad_rotary_parse(struct pxa27x_keypad *keypad)
{
- const __be32 *prop;
- int i, relkey_ret;
- unsigned int code, proplen;
- const char *rotaryname[2] = {
- "marvell,rotary0", "marvell,rotary1"};
- const char relkeyname[] = {"marvell,rotary-rel-key"};
+ static const char * const rotaryname[] = { "marvell,rotary0", "marvell,rotary1" };
struct input_dev *input_dev = keypad->input_dev;
struct device *dev = input_dev->dev.parent;
- struct device_node *np = dev->of_node;
-
- relkey_ret = of_property_read_u32(np, relkeyname, &code);
- /* if can read correct rotary key-code, we do not need this. */
- if (relkey_ret == 0) {
- unsigned short relcode;
+ struct pxa27x_keypad_rotary *encoder;
+ unsigned int code;
+ int i;
+ int error;
- /* rotary0 taks lower half, rotary1 taks upper half. */
- relcode = code & 0xffff;
- pdata->rotary0_rel_code = (code & 0xffff);
- __set_bit(relcode, input_dev->relbit);
+ error = device_property_read_u32(dev, "marvell,rotary-rel-key", &code);
+ if (!error) {
+ for (i = 0; i < MAX_ROTARY_ENCODERS; i++, code >>= 16) {
+ encoder = &keypad->rotary[i];
+ encoder->enabled = true;
+ encoder->rel_code = code & 0xffff;
+ input_set_capability(input_dev, EV_REL, encoder->rel_code);
+ }
- relcode = code >> 16;
- pdata->rotary1_rel_code = relcode;
- __set_bit(relcode, input_dev->relbit);
+ return 0;
}
- for (i = 0; i < 2; i++) {
- prop = of_get_property(np, rotaryname[i], &proplen);
+ for (i = 0; i < MAX_ROTARY_ENCODERS; i++) {
+ encoder = &keypad->rotary[i];
+
/*
* If the prop is not set, it means keypad does not need
* initialize the rotaryX.
*/
- if (!prop)
+ if (!device_property_present(dev, rotaryname[i]))
continue;
- code = be32_to_cpup(prop);
+ error = device_property_read_u32(dev, rotaryname[i], &code);
+ if (error)
+ return error;
+
/*
* Not all up/down key code are valid.
* Now we depends on direct-rel-code.
*/
- if ((!(code & 0xffff) || !(code >> 16)) && relkey_ret) {
- return relkey_ret;
- } else {
- unsigned int n = MAX_MATRIX_KEY_NUM + (i << 1);
- unsigned short keycode;
-
- keycode = code & 0xffff;
- keypad->keycodes[n] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keycode = code >> 16;
- keypad->keycodes[n + 1] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- if (i == 0)
- pdata->rotary0_rel_code = -1;
- else
- pdata->rotary1_rel_code = -1;
- }
- if (i == 0)
- pdata->enable_rotary0 = 1;
- else
- pdata->enable_rotary1 = 1;
- }
+ if (!(code & 0xffff) || !(code >> 16))
+ return -EINVAL;
+
+ encoder->enabled = true;
+ encoder->rel_code = -1;
+ encoder->key_codes = &keypad->keycodes[MAX_MATRIX_KEY_NUM + i * 2];
+ encoder->key_codes[0] = code & 0xffff;
+ encoder->key_codes[1] = code >> 16;
- keypad->rotary_rel_code[0] = pdata->rotary0_rel_code;
- keypad->rotary_rel_code[1] = pdata->rotary1_rel_code;
+ input_set_capability(input_dev, EV_KEY, encoder->key_codes[0]);
+ input_set_capability(input_dev, EV_KEY, encoder->key_codes[1]);
+ }
return 0;
}
-static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
+static int pxa27x_keypad_parse_properties(struct pxa27x_keypad *keypad)
{
struct input_dev *input_dev = keypad->input_dev;
struct device *dev = input_dev->dev.parent;
- struct device_node *np = dev->of_node;
- struct pxa27x_keypad_platform_data *pdata;
int error;
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(dev, "failed to allocate memory for pdata\n");
- return -ENOMEM;
- }
-
- error = pxa27x_keypad_matrix_key_parse_dt(keypad, pdata);
+ error = pxa27x_keypad_matrix_key_parse(keypad);
if (error) {
dev_err(dev, "failed to parse matrix key\n");
return error;
}
- error = pxa27x_keypad_direct_key_parse_dt(keypad, pdata);
+ error = pxa27x_keypad_direct_key_parse(keypad);
if (error) {
dev_err(dev, "failed to parse direct key\n");
return error;
}
- error = pxa27x_keypad_rotary_parse_dt(keypad, pdata);
+ error = pxa27x_keypad_rotary_parse(keypad);
if (error) {
dev_err(dev, "failed to parse rotary key\n");
return error;
}
- error = of_property_read_u32(np, "marvell,debounce-interval",
- &pdata->debounce_interval);
+ error = device_property_read_u32(dev, "marvell,debounce-interval",
+ &keypad->debounce_interval);
if (error) {
dev_err(dev, "failed to parse debounce-interval\n");
return error;
@@ -323,95 +315,15 @@ static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
*/
input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
- keypad->pdata = pdata;
- return 0;
-}
-
-#else
-
-static int pxa27x_keypad_build_keycode_from_dt(struct pxa27x_keypad *keypad)
-{
- dev_info(keypad->input_dev->dev.parent, "missing platform data\n");
-
- return -EINVAL;
-}
-
-#endif
-
-static int pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
-{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
- struct input_dev *input_dev = keypad->input_dev;
- unsigned short keycode;
- int i;
- int error;
-
- error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL,
- pdata->matrix_key_rows,
- pdata->matrix_key_cols,
- keypad->keycodes, input_dev);
- if (error)
- return error;
-
- /*
- * The keycodes may not only include matrix keys but also the direct
- * or rotary keys.
- */
- input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);
-
- /* For direct keys. */
- for (i = 0; i < pdata->direct_key_num; i++) {
- keycode = pdata->direct_key_map[i];
- keypad->keycodes[MAX_MATRIX_KEY_NUM + i] = keycode;
- __set_bit(keycode, input_dev->keybit);
- }
-
- if (pdata->enable_rotary0) {
- if (pdata->rotary0_up_key && pdata->rotary0_down_key) {
- keycode = pdata->rotary0_up_key;
- keypad->keycodes[MAX_MATRIX_KEY_NUM + 0] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keycode = pdata->rotary0_down_key;
- keypad->keycodes[MAX_MATRIX_KEY_NUM + 1] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keypad->rotary_rel_code[0] = -1;
- } else {
- keypad->rotary_rel_code[0] = pdata->rotary0_rel_code;
- __set_bit(pdata->rotary0_rel_code, input_dev->relbit);
- }
- }
-
- if (pdata->enable_rotary1) {
- if (pdata->rotary1_up_key && pdata->rotary1_down_key) {
- keycode = pdata->rotary1_up_key;
- keypad->keycodes[MAX_MATRIX_KEY_NUM + 2] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keycode = pdata->rotary1_down_key;
- keypad->keycodes[MAX_MATRIX_KEY_NUM + 3] = keycode;
- __set_bit(keycode, input_dev->keybit);
-
- keypad->rotary_rel_code[1] = -1;
- } else {
- keypad->rotary_rel_code[1] = pdata->rotary1_rel_code;
- __set_bit(pdata->rotary1_rel_code, input_dev->relbit);
- }
- }
-
- __clear_bit(KEY_RESERVED, input_dev->keybit);
-
return 0;
}
static void pxa27x_keypad_scan_matrix(struct pxa27x_keypad *keypad)
{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
struct input_dev *input_dev = keypad->input_dev;
int row, col, num_keys_pressed = 0;
- uint32_t new_state[MAX_MATRIX_KEY_COLS];
- uint32_t kpas = keypad_readl(KPAS);
+ u32 new_state[MAX_MATRIX_KEY_COLS];
+ u32 kpas = keypad_readl(KPAS);
num_keys_pressed = KPAS_MUKP(kpas);
@@ -425,19 +337,19 @@ static void pxa27x_keypad_scan_matrix(struct pxa27x_keypad *keypad)
row = KPAS_RP(kpas);
/* if invalid row/col, treat as no key pressed */
- if (col >= pdata->matrix_key_cols ||
- row >= pdata->matrix_key_rows)
+ if (col >= keypad->matrix_key_cols ||
+ row >= keypad->matrix_key_rows)
goto scan;
- new_state[col] = (1 << row);
+ new_state[col] = BIT(row);
goto scan;
}
if (num_keys_pressed > 1) {
- uint32_t kpasmkp0 = keypad_readl(KPASMKP0);
- uint32_t kpasmkp1 = keypad_readl(KPASMKP1);
- uint32_t kpasmkp2 = keypad_readl(KPASMKP2);
- uint32_t kpasmkp3 = keypad_readl(KPASMKP3);
+ u32 kpasmkp0 = keypad_readl(KPASMKP0);
+ u32 kpasmkp1 = keypad_readl(KPASMKP1);
+ u32 kpasmkp2 = keypad_readl(KPASMKP2);
+ u32 kpasmkp3 = keypad_readl(KPASMKP3);
new_state[0] = kpasmkp0 & KPASMKP_MKC_MASK;
new_state[1] = (kpasmkp0 >> 16) & KPASMKP_MKC_MASK;
@@ -449,23 +361,23 @@ static void pxa27x_keypad_scan_matrix(struct pxa27x_keypad *keypad)
new_state[7] = (kpasmkp3 >> 16) & KPASMKP_MKC_MASK;
}
scan:
- for (col = 0; col < pdata->matrix_key_cols; col++) {
- uint32_t bits_changed;
+ for (col = 0; col < keypad->matrix_key_cols; col++) {
+ u32 bits_changed;
int code;
bits_changed = keypad->matrix_key_state[col] ^ new_state[col];
if (bits_changed == 0)
continue;
- for (row = 0; row < pdata->matrix_key_rows; row++) {
- if ((bits_changed & (1 << row)) == 0)
+ for (row = 0; row < keypad->matrix_key_rows; row++) {
+ if ((bits_changed & BIT(row)) == 0)
continue;
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev, keypad->keycodes[code],
- new_state[col] & (1 << row));
+ new_state[col] & BIT(row));
}
}
input_sync(input_dev);
@@ -474,7 +386,7 @@ scan:
#define DEFAULT_KPREC (0x007f007f)
-static inline int rotary_delta(uint32_t kprec)
+static inline int rotary_delta(u32 kprec)
{
if (kprec & KPREC_OF0)
return (kprec & 0xff) + 0x7f;
@@ -486,14 +398,16 @@ static inline int rotary_delta(uint32_t kprec)
static void report_rotary_event(struct pxa27x_keypad *keypad, int r, int delta)
{
+ struct pxa27x_keypad_rotary *encoder = &keypad->rotary[r];
struct input_dev *dev = keypad->input_dev;
- if (delta == 0)
+ if (!encoder->enabled || delta == 0)
return;
- if (keypad->rotary_rel_code[r] == -1) {
- int code = MAX_MATRIX_KEY_NUM + 2 * r + (delta > 0 ? 0 : 1);
- unsigned char keycode = keypad->keycodes[code];
+ if (encoder->rel_code == -1) {
+ int idx = delta > 0 ? 0 : 1;
+ int code = MAX_MATRIX_KEY_NUM + 2 * r + idx;
+ unsigned char keycode = encoder->key_codes[idx];
/* simulate a press-n-release */
input_event(dev, EV_MSC, MSC_SCAN, code);
@@ -503,45 +417,43 @@ static void report_rotary_event(struct pxa27x_keypad *keypad, int r, int delta)
input_report_key(dev, keycode, 0);
input_sync(dev);
} else {
- input_report_rel(dev, keypad->rotary_rel_code[r], delta);
+ input_report_rel(dev, encoder->rel_code, delta);
input_sync(dev);
}
}
static void pxa27x_keypad_scan_rotary(struct pxa27x_keypad *keypad)
{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
- uint32_t kprec;
+ u32 kprec;
+ int i;
/* read and reset to default count value */
kprec = keypad_readl(KPREC);
keypad_writel(KPREC, DEFAULT_KPREC);
- if (pdata->enable_rotary0)
+ for (i = 0; i < MAX_ROTARY_ENCODERS; i++) {
report_rotary_event(keypad, 0, rotary_delta(kprec));
-
- if (pdata->enable_rotary1)
- report_rotary_event(keypad, 1, rotary_delta(kprec >> 16));
+ kprec >>= 16;
+ }
}
static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
struct input_dev *input_dev = keypad->input_dev;
unsigned int new_state;
- uint32_t kpdk, bits_changed;
+ u32 kpdk, bits_changed;
int i;
kpdk = keypad_readl(KPDK);
- if (pdata->enable_rotary0 || pdata->enable_rotary1)
+ if (keypad->rotary[0].enabled || keypad->rotary[1].enabled)
pxa27x_keypad_scan_rotary(keypad);
/*
* The KPDR_DK only output the key pin level, so it relates to board,
* and low level may be active.
*/
- if (pdata->direct_key_low_active)
+ if (keypad->direct_key_low_active)
new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
else
new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
@@ -551,34 +463,24 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
if (bits_changed == 0)
return;
- for (i = 0; i < pdata->direct_key_num; i++) {
- if (bits_changed & (1 << i)) {
+ for (i = 0; i < keypad->direct_key_num; i++) {
+ if (bits_changed & BIT(i)) {
int code = MAX_MATRIX_KEY_NUM + i;
input_event(input_dev, EV_MSC, MSC_SCAN, code);
input_report_key(input_dev, keypad->keycodes[code],
- new_state & (1 << i));
+ new_state & BIT(i));
}
}
input_sync(input_dev);
keypad->direct_key_state = new_state;
}
-static void clear_wakeup_event(struct pxa27x_keypad *keypad)
-{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
-
- if (pdata->clear_wakeup_event)
- (pdata->clear_wakeup_event)();
-}
-
static irqreturn_t pxa27x_keypad_irq_handler(int irq, void *dev_id)
{
struct pxa27x_keypad *keypad = dev_id;
unsigned long kpc = keypad_readl(KPC);
- clear_wakeup_event(keypad);
-
if (kpc & KPC_DI)
pxa27x_keypad_scan_direct(keypad);
@@ -590,7 +492,6 @@ static irqreturn_t pxa27x_keypad_irq_handler(int irq, void *dev_id)
static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
{
- const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
unsigned int mask = 0, direct_key_num = 0;
unsigned long kpc = 0;
@@ -598,36 +499,34 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
keypad_readl(KPC);
/* enable matrix keys with automatic scan */
- if (pdata->matrix_key_rows && pdata->matrix_key_cols) {
+ if (keypad->matrix_key_rows && keypad->matrix_key_cols) {
kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
- kpc |= KPC_MKRN(pdata->matrix_key_rows) |
- KPC_MKCN(pdata->matrix_key_cols);
+ kpc |= KPC_MKRN(keypad->matrix_key_rows) |
+ KPC_MKCN(keypad->matrix_key_cols);
}
/* enable rotary key, debounce interval same as direct keys */
- if (pdata->enable_rotary0) {
+ if (keypad->rotary[0].enabled) {
mask |= 0x03;
direct_key_num = 2;
kpc |= KPC_REE0;
}
- if (pdata->enable_rotary1) {
+ if (keypad->rotary[1].enabled) {
mask |= 0x0c;
direct_key_num = 4;
kpc |= KPC_REE1;
}
- if (pdata->direct_key_num > direct_key_num)
- direct_key_num = pdata->direct_key_num;
+ if (keypad->direct_key_num > direct_key_num)
+ direct_key_num = keypad->direct_key_num;
/*
* Direct keys usage may not start from KP_DKIN0, check the platfrom
* mask data to config the specific.
*/
- if (pdata->direct_key_mask)
- keypad->direct_key_mask = pdata->direct_key_mask;
- else
- keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
+ if (!keypad->direct_key_mask)
+ keypad->direct_key_mask = GENMASK(direct_key_num - 1, 0) & ~mask;
/* enable direct key */
if (direct_key_num)
@@ -635,7 +534,7 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
keypad_writel(KPC, kpc | KPC_RE_ZERO_DEB);
keypad_writel(KPREC, DEFAULT_KPREC);
- keypad_writel(KPKDI, pdata->debounce_interval);
+ keypad_writel(KPKDI, keypad->debounce_interval);
}
static int pxa27x_keypad_open(struct input_dev *dev)
@@ -709,19 +608,12 @@ static int pxa27x_keypad_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(pxa27x_keypad_pm_ops,
pxa27x_keypad_suspend, pxa27x_keypad_resume);
-
static int pxa27x_keypad_probe(struct platform_device *pdev)
{
- const struct pxa27x_keypad_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
struct pxa27x_keypad *keypad;
struct input_dev *input_dev;
- int irq, error;
-
- /* Driver need build keycode from device tree or pdata */
- if (!np && !pdata)
- return -EINVAL;
+ int irq;
+ int error;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -736,7 +628,6 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
if (!input_dev)
return -ENOMEM;
- keypad->pdata = pdata;
keypad->input_dev = input_dev;
keypad->irq = irq;
@@ -765,29 +656,12 @@ static int pxa27x_keypad_probe(struct platform_device *pdev)
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
- if (pdata) {
- error = pxa27x_keypad_build_keycode(keypad);
- } else {
- error = pxa27x_keypad_build_keycode_from_dt(keypad);
- /*
- * Data that we get from DT resides in dynamically
- * allocated memory so we need to update our pdata
- * pointer.
- */
- pdata = keypad->pdata;
- }
+ error = pxa27x_keypad_parse_properties(keypad);
if (error) {
- dev_err(&pdev->dev, "failed to build keycode\n");
+ dev_err(&pdev->dev, "failed to parse keypad properties\n");
return error;
}
- keypad->row_shift = get_count_order(pdata->matrix_key_cols);
-
- if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) ||
- (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) {
- input_dev->evbit[0] |= BIT_MASK(EV_REL);
- }
-
error = devm_request_irq(&pdev->dev, irq, pxa27x_keypad_irq_handler,
0, pdev->name, keypad);
if (error) {
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index 2fae337562a2..53f3ac64c980 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/input.h>
+#include <linux/input/matrix_keypad.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
@@ -22,7 +23,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/platform_data/keyboard-spear.h>
/* Keyboard Registers */
#define MODE_CTL_REG 0x00
@@ -56,13 +56,12 @@ struct spear_kbd {
void __iomem *io_base;
struct clk *clk;
unsigned int irq;
- unsigned int mode;
- unsigned int suspended_rate;
+ u32 mode;
+ u32 suspended_rate;
+ u32 mode_ctl_reg;
unsigned short last_key;
unsigned short keycodes[NUM_ROWS * NUM_COLS];
- bool rep;
bool irq_wake_enabled;
- u32 mode_ctl_reg;
};
static irqreturn_t spear_kbd_interrupt(int irq, void *dev_id)
@@ -143,46 +142,8 @@ static void spear_kbd_close(struct input_dev *dev)
kbd->last_key = KEY_RESERVED;
}
-#ifdef CONFIG_OF
-static int spear_kbd_parse_dt(struct platform_device *pdev,
- struct spear_kbd *kbd)
-{
- struct device_node *np = pdev->dev.of_node;
- int error;
- u32 val, suspended_rate;
-
- if (!np) {
- dev_err(&pdev->dev, "Missing DT data\n");
- return -EINVAL;
- }
-
- if (of_property_read_bool(np, "autorepeat"))
- kbd->rep = true;
-
- if (of_property_read_u32(np, "suspended_rate", &suspended_rate))
- kbd->suspended_rate = suspended_rate;
-
- error = of_property_read_u32(np, "st,mode", &val);
- if (error) {
- dev_err(&pdev->dev, "DT: Invalid or missing mode\n");
- return error;
- }
-
- kbd->mode = val;
- return 0;
-}
-#else
-static inline int spear_kbd_parse_dt(struct platform_device *pdev,
- struct spear_kbd *kbd)
-{
- return -ENOSYS;
-}
-#endif
-
static int spear_kbd_probe(struct platform_device *pdev)
{
- struct kbd_platform_data *pdata = dev_get_platdata(&pdev->dev);
- const struct matrix_keymap_data *keymap = pdata ? pdata->keymap : NULL;
struct spear_kbd *kbd;
struct input_dev *input_dev;
int irq;
@@ -198,6 +159,14 @@ static int spear_kbd_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ error = device_property_read_u32(&pdev->dev, "st,mode", &kbd->mode);
+ if (error) {
+ dev_err(&pdev->dev, "Invalid or missing mode\n");
+ return error;
+ }
+
+ device_property_read_u32(&pdev->dev, "suspended_rate", &kbd->suspended_rate);
+
input_dev = devm_input_allocate_device(&pdev->dev);
if (!input_dev) {
dev_err(&pdev->dev, "unable to allocate input device\n");
@@ -207,16 +176,6 @@ static int spear_kbd_probe(struct platform_device *pdev)
kbd->input = input_dev;
kbd->irq = irq;
- if (!pdata) {
- error = spear_kbd_parse_dt(pdev, kbd);
- if (error)
- return error;
- } else {
- kbd->mode = pdata->mode;
- kbd->rep = pdata->rep;
- kbd->suspended_rate = pdata->suspended_rate;
- }
-
kbd->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(kbd->io_base))
return PTR_ERR(kbd->io_base);
@@ -234,21 +193,21 @@ static int spear_kbd_probe(struct platform_device *pdev)
input_dev->open = spear_kbd_open;
input_dev->close = spear_kbd_close;
- error = matrix_keypad_build_keymap(keymap, NULL, NUM_ROWS, NUM_COLS,
+ error = matrix_keypad_build_keymap(NULL, NULL, NUM_ROWS, NUM_COLS,
kbd->keycodes, input_dev);
if (error) {
dev_err(&pdev->dev, "Failed to build keymap\n");
return error;
}
- if (kbd->rep)
+ if (device_property_read_bool(&pdev->dev, "autorepeat"))
__set_bit(EV_REP, input_dev->evbit);
input_set_capability(input_dev, EV_MSC, MSC_SCAN);
input_set_drvdata(input_dev, kbd);
error = devm_request_irq(&pdev->dev, irq, spear_kbd_interrupt, 0,
- "keyboard", kbd);
+ "keyboard", kbd);
if (error) {
dev_err(&pdev->dev, "request_irq failed\n");
return error;
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
deleted file mode 100644
index fbc674d7b9f0..000000000000
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ /dev/null
@@ -1,305 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for keys on TCA6416 I2C IO expander
- *
- * Copyright (C) 2010 Texas Instruments
- *
- * Author : Sriramakrishnan.A.G. <srk@ti.com>
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-#include <linux/i2c.h>
-#include <linux/input.h>
-#include <linux/tca6416_keypad.h>
-
-#define TCA6416_INPUT 0
-#define TCA6416_OUTPUT 1
-#define TCA6416_INVERT 2
-#define TCA6416_DIRECTION 3
-
-#define TCA6416_POLL_INTERVAL 100 /* msec */
-
-static const struct i2c_device_id tca6416_id[] = {
- { "tca6416-keys", 16, },
- { "tca6408-keys", 8, },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, tca6416_id);
-
-struct tca6416_keypad_chip {
- uint16_t reg_output;
- uint16_t reg_direction;
- uint16_t reg_input;
-
- struct i2c_client *client;
- struct input_dev *input;
- int io_size;
- u16 pinmask;
- bool use_polling;
- struct tca6416_button buttons[];
-};
-
-static int tca6416_write_reg(struct tca6416_keypad_chip *chip, int reg, u16 val)
-{
- int error;
-
- error = chip->io_size > 8 ?
- i2c_smbus_write_word_data(chip->client, reg << 1, val) :
- i2c_smbus_write_byte_data(chip->client, reg, val);
- if (error < 0) {
- dev_err(&chip->client->dev,
- "%s failed, reg: %d, val: %d, error: %d\n",
- __func__, reg, val, error);
- return error;
- }
-
- return 0;
-}
-
-static int tca6416_read_reg(struct tca6416_keypad_chip *chip, int reg, u16 *val)
-{
- int retval;
-
- retval = chip->io_size > 8 ?
- i2c_smbus_read_word_data(chip->client, reg << 1) :
- i2c_smbus_read_byte_data(chip->client, reg);
- if (retval < 0) {
- dev_err(&chip->client->dev, "%s failed, reg: %d, error: %d\n",
- __func__, reg, retval);
- return retval;
- }
-
- *val = (u16)retval;
- return 0;
-}
-
-static void tca6416_keys_scan(struct input_dev *input)
-{
- struct tca6416_keypad_chip *chip = input_get_drvdata(input);
- u16 reg_val, val;
- int error, i, pin_index;
-
- error = tca6416_read_reg(chip, TCA6416_INPUT, &reg_val);
- if (error)
- return;
-
- reg_val &= chip->pinmask;
-
- /* Figure out which lines have changed */
- val = reg_val ^ chip->reg_input;
- chip->reg_input = reg_val;
-
- for (i = 0, pin_index = 0; i < 16; i++) {
- if (val & (1 << i)) {
- struct tca6416_button *button = &chip->buttons[pin_index];
- unsigned int type = button->type ?: EV_KEY;
- int state = ((reg_val & (1 << i)) ? 1 : 0)
- ^ button->active_low;
-
- input_event(input, type, button->code, !!state);
- input_sync(input);
- }
-
- if (chip->pinmask & (1 << i))
- pin_index++;
- }
-}
-
-/*
- * This is threaded IRQ handler and this can (and will) sleep.
- */
-static irqreturn_t tca6416_keys_isr(int irq, void *dev_id)
-{
- tca6416_keys_scan(dev_id);
-
- return IRQ_HANDLED;
-}
-
-static int tca6416_keys_open(struct input_dev *dev)
-{
- struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
-
- if (!chip->use_polling) {
- /* Get initial device state in case it has switches */
- tca6416_keys_scan(dev);
- enable_irq(chip->client->irq);
- }
-
- return 0;
-}
-
-static void tca6416_keys_close(struct input_dev *dev)
-{
- struct tca6416_keypad_chip *chip = input_get_drvdata(dev);
-
- if (!chip->use_polling)
- disable_irq(chip->client->irq);
-}
-
-static int tca6416_setup_registers(struct tca6416_keypad_chip *chip)
-{
- int error;
-
- error = tca6416_read_reg(chip, TCA6416_OUTPUT, &chip->reg_output);
- if (error)
- return error;
-
- error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
- if (error)
- return error;
-
- /* ensure that keypad pins are set to input */
- error = tca6416_write_reg(chip, TCA6416_DIRECTION,
- chip->reg_direction | chip->pinmask);
- if (error)
- return error;
-
- error = tca6416_read_reg(chip, TCA6416_DIRECTION, &chip->reg_direction);
- if (error)
- return error;
-
- error = tca6416_read_reg(chip, TCA6416_INPUT, &chip->reg_input);
- if (error)
- return error;
-
- chip->reg_input &= chip->pinmask;
-
- return 0;
-}
-
-static int tca6416_keypad_probe(struct i2c_client *client)
-{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- struct tca6416_keys_platform_data *pdata;
- struct tca6416_keypad_chip *chip;
- struct input_dev *input;
- int error;
- int i;
-
- /* Check functionality */
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
- dev_err(&client->dev, "%s adapter not supported\n",
- dev_driver_string(&client->adapter->dev));
- return -ENODEV;
- }
-
- pdata = dev_get_platdata(&client->dev);
- if (!pdata) {
- dev_dbg(&client->dev, "no platform data\n");
- return -EINVAL;
- }
-
- chip = devm_kzalloc(&client->dev,
- struct_size(chip, buttons, pdata->nbuttons),
- GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- input = devm_input_allocate_device(&client->dev);
- if (!input)
- return -ENOMEM;
-
- chip->client = client;
- chip->input = input;
- chip->io_size = id->driver_data;
- chip->pinmask = pdata->pinmask;
- chip->use_polling = pdata->use_polling;
-
- input->phys = "tca6416-keys/input0";
- input->name = client->name;
-
- input->open = tca6416_keys_open;
- input->close = tca6416_keys_close;
-
- input->id.bustype = BUS_HOST;
- input->id.vendor = 0x0001;
- input->id.product = 0x0001;
- input->id.version = 0x0100;
-
- /* Enable auto repeat feature of Linux input subsystem */
- if (pdata->rep)
- __set_bit(EV_REP, input->evbit);
-
- for (i = 0; i < pdata->nbuttons; i++) {
- unsigned int type;
-
- chip->buttons[i] = pdata->buttons[i];
- type = (pdata->buttons[i].type) ?: EV_KEY;
- input_set_capability(input, type, pdata->buttons[i].code);
- }
-
- input_set_drvdata(input, chip);
-
- /*
- * Initialize cached registers from their original values.
- * we can't share this chip with another i2c master.
- */
- error = tca6416_setup_registers(chip);
- if (error)
- return error;
-
- if (chip->use_polling) {
- error = input_setup_polling(input, tca6416_keys_scan);
- if (error) {
- dev_err(&client->dev, "Failed to setup polling\n");
- return error;
- }
-
- input_set_poll_interval(input, TCA6416_POLL_INTERVAL);
- } else {
- error = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, tca6416_keys_isr,
- IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT |
- IRQF_NO_AUTOEN,
- "tca6416-keypad", input);
- if (error) {
- dev_dbg(&client->dev,
- "Unable to claim irq %d; error %d\n",
- client->irq, error);
- return error;
- }
- }
-
- error = input_register_device(input);
- if (error) {
- dev_dbg(&client->dev,
- "Unable to register input device, error: %d\n", error);
- return error;
- }
-
- i2c_set_clientdata(client, chip);
-
- return 0;
-}
-
-static struct i2c_driver tca6416_keypad_driver = {
- .driver = {
- .name = "tca6416-keypad",
- },
- .probe = tca6416_keypad_probe,
- .id_table = tca6416_id,
-};
-
-static int __init tca6416_keypad_init(void)
-{
- return i2c_add_driver(&tca6416_keypad_driver);
-}
-
-subsys_initcall(tca6416_keypad_init);
-
-static void __exit tca6416_keypad_exit(void)
-{
- i2c_del_driver(&tca6416_keypad_driver);
-}
-module_exit(tca6416_keypad_exit);
-
-MODULE_AUTHOR("Sriramakrishnan <srk@ti.com>");
-MODULE_DESCRIPTION("Keypad driver over tca6416 IO expander");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 76fc19ffe21d..68c0afafee7b 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -373,18 +373,7 @@ static struct i2c_driver tca8418_keypad_driver = {
.probe = tca8418_keypad_probe,
.id_table = tca8418_id,
};
-
-static int __init tca8418_keypad_init(void)
-{
- return i2c_add_driver(&tca8418_keypad_driver);
-}
-subsys_initcall(tca8418_keypad_init);
-
-static void __exit tca8418_keypad_exit(void)
-{
- i2c_del_driver(&tca8418_keypad_driver);
-}
-module_exit(tca8418_keypad_exit);
+module_i2c_driver(tca8418_keypad_driver);
MODULE_AUTHOR("Kyle Manna <kyle.manna@fuel7.com>");
MODULE_DESCRIPTION("Keypad driver for TCA8418");
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index 77e0743a3cf8..5e3d17c5dc9b 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -28,10 +28,6 @@
* an internal state machine that decodes pressed keys, including
* multi-key combinations.
*
- * This driver lets boards define what keycodes they wish to report for
- * which scancodes, as part of the "struct twl4030_keypad_data" used in
- * the probe() routine.
- *
* See the TPS65950 documentation; that's the general availability
* version of the TWL5030 second generation part.
*/
@@ -47,7 +43,6 @@
struct twl4030_keypad {
unsigned short keymap[TWL4030_KEYMAP_SIZE];
u16 kp_state[TWL4030_MAX_ROWS];
- bool autorepeat;
unsigned int n_rows;
unsigned int n_cols;
int irq;
@@ -322,8 +317,6 @@ static int twl4030_kp_program(struct twl4030_keypad *kp)
*/
static int twl4030_kp_probe(struct platform_device *pdev)
{
- struct twl4030_keypad_data *pdata = dev_get_platdata(&pdev->dev);
- const struct matrix_keymap_data *keymap_data = NULL;
struct twl4030_keypad *kp;
struct input_dev *input;
u8 reg;
@@ -350,24 +343,10 @@ static int twl4030_kp_probe(struct platform_device *pdev)
input->id.product = 0x0001;
input->id.version = 0x0003;
- if (pdata) {
- if (!pdata->rows || !pdata->cols || !pdata->keymap_data) {
- dev_err(&pdev->dev, "Missing platform_data\n");
- return -EINVAL;
- }
-
- kp->n_rows = pdata->rows;
- kp->n_cols = pdata->cols;
- kp->autorepeat = pdata->rep;
- keymap_data = pdata->keymap_data;
- } else {
- error = matrix_keypad_parse_properties(&pdev->dev, &kp->n_rows,
- &kp->n_cols);
- if (error)
- return error;
-
- kp->autorepeat = true;
- }
+ error = matrix_keypad_parse_properties(&pdev->dev,
+ &kp->n_rows, &kp->n_cols);
+ if (error)
+ return error;
if (kp->n_rows > TWL4030_MAX_ROWS || kp->n_cols > TWL4030_MAX_COLS) {
dev_err(&pdev->dev,
@@ -379,7 +358,7 @@ static int twl4030_kp_probe(struct platform_device *pdev)
if (kp->irq < 0)
return kp->irq;
- error = matrix_keypad_build_keymap(keymap_data, NULL,
+ error = matrix_keypad_build_keymap(NULL, NULL,
TWL4030_MAX_ROWS,
1 << TWL4030_ROW_SHIFT,
kp->keymap, input);
@@ -389,9 +368,7 @@ static int twl4030_kp_probe(struct platform_device *pdev)
}
input_set_capability(input, EV_MSC, MSC_SCAN);
- /* Enable auto repeat feature of Linux input subsystem */
- if (kp->autorepeat)
- __set_bit(EV_REP, input->evbit);
+ __set_bit(EV_REP, input->evbit);
error = input_register_device(input);
if (error) {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 0fb21c99a5e3..94a753fcb64f 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -126,6 +126,17 @@ config INPUT_ATMEL_CAPTOUCH
To compile this driver as a module, choose M here: the
module will be called atmel_captouch.
+config INPUT_AW86927
+ tristate "Awinic AW86927 Haptic Driver Support"
+ depends on I2C && INPUT
+ select INPUT_FF_MEMLESS
+ select REGMAP_I2C
+ help
+ Say Y here if you have an Awinic AW86927 haptic chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called aw86927.
+
config INPUT_BBNSM_PWRKEY
tristate "NXP BBNSM Power Key Driver"
depends on ARCH_MXC || COMPILE_TEST
@@ -179,6 +190,17 @@ config INPUT_PCSPKR
To compile this driver as a module, choose M here: the
module will be called pcspkr.
+config INPUT_PF1550_ONKEY
+ tristate "NXP PF1550 Onkey support"
+ depends on MFD_PF1550
+ help
+ Say Y here if you want support for PF1550 PMIC. Onkey can trigger
+ release and 1s(push hold), 2s, 3s, 4s, 8s interrupt for long press
+ detect.
+
+ To compile this driver as a module, choose M here. The module will be
+ called pf1550-onkey.
+
config INPUT_PM8941_PWRKEY
tristate "Qualcomm PM8941 power key support"
depends on MFD_SPMI_PMIC
@@ -230,6 +252,16 @@ config INPUT_M68K_BEEP
tristate "M68k Beeper support"
depends on M68K
+config INPUT_MAX7360_ROTARY
+ tristate "Maxim MAX7360 Rotary Encoder"
+ depends on MFD_MAX7360
+ help
+ If you say yes here you get support for the rotary encoder on the
+ Maxim MAX7360 I/O Expander.
+
+ To compile this driver as a module, choose M here: the module will be
+ called max7360_rotary.
+
config INPUT_MAX77650_ONKEY
tristate "Maxim MAX77650 ONKEY support"
depends on MFD_MAX77650
@@ -506,6 +538,16 @@ config INPUT_TPS65219_PWRBUTTON
To compile this driver as a module, choose M here. The module will
be called tps65219-pwrbutton.
+config INPUT_TPS6594_PWRBUTTON
+ tristate "TPS6594 Power button driver"
+ depends on MFD_TPS6594
+ help
+ Say Y here if you want to enable power button reporting for
+ TPS6594 Power Management IC devices.
+
+ To compile this driver as a module, choose M here. The module will
+ be called tps6594-pwrbutton.
+
config INPUT_AXP20X_PEK
tristate "X-Powers AXP20X power button driver"
depends on MFD_AXP20X
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index d468c8140b93..415fc4e2918b 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_INPUT_ATC260X_ONKEY) += atc260x-onkey.o
obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o
obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o
obj-$(CONFIG_INPUT_ATMEL_CAPTOUCH) += atmel_captouch.o
+obj-$(CONFIG_INPUT_AW86927) += aw86927.o
obj-$(CONFIG_INPUT_BBNSM_PWRKEY) += nxp-bbnsm-pwrkey.o
obj-$(CONFIG_INPUT_BMA150) += bma150.o
obj-$(CONFIG_INPUT_CM109) += cm109.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_INPUT_IQS7222) += iqs7222.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
obj-$(CONFIG_INPUT_KXTJ9) += kxtj9.o
obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o
+obj-$(CONFIG_INPUT_MAX7360_ROTARY) += max7360-rotary.o
obj-$(CONFIG_INPUT_MAX77650_ONKEY) += max77650-onkey.o
obj-$(CONFIG_INPUT_MAX77693_HAPTIC) += max77693-haptic.o
obj-$(CONFIG_INPUT_MAX8925_ONKEY) += max8925_onkey.o
@@ -61,6 +63,7 @@ obj-$(CONFIG_INPUT_PALMAS_PWRBUTTON) += palmas-pwrbutton.o
obj-$(CONFIG_INPUT_PCAP) += pcap_keys.o
obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o
obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o
+obj-$(CONFIG_INPUT_PF1550_ONKEY) += pf1550-onkey.o
obj-$(CONFIG_INPUT_PM8941_PWRKEY) += pm8941-pwrkey.o
obj-$(CONFIG_INPUT_PM8XXX_VIBRATOR) += pm8xxx-vibrator.o
obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o
@@ -83,6 +86,7 @@ obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o
obj-$(CONFIG_INPUT_STPMIC1_ONKEY) += stpmic1_onkey.o
obj-$(CONFIG_INPUT_TPS65218_PWRBUTTON) += tps65218-pwrbutton.o
obj-$(CONFIG_INPUT_TPS65219_PWRBUTTON) += tps65219-pwrbutton.o
+obj-$(CONFIG_INPUT_TPS6594_PWRBUTTON) += tps6594-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_PWRBUTTON) += twl4030-pwrbutton.o
obj-$(CONFIG_INPUT_TWL4030_VIBRA) += twl4030-vibra.o
obj-$(CONFIG_INPUT_TWL6040_VIBRA) += twl6040-vibra.o
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index d106f37df6bc..c9fa789337ba 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -6,6 +6,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 7cafbf8d5f1a..ac7674647c09 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 5fa1c9438a85..bb1544d63c51 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -34,8 +34,6 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
- struct snd_soc_component *component =
- snd_soc_dapm_to_component(arizona->dapm);
int ret;
if (!haptics->arizona->dapm) {
@@ -65,7 +63,7 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- ret = snd_soc_component_enable_pin(component, "HAPTICS");
+ ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
@@ -80,7 +78,7 @@ static void arizona_haptics_work(struct work_struct *work)
}
} else {
/* This disable sequence will be a noop if already enabled */
- ret = snd_soc_component_disable_pin(component, "HAPTICS");
+ ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
@@ -139,14 +137,12 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
- struct snd_soc_component *component;
+ struct snd_soc_dapm_context *dapm = haptics->arizona->dapm;
cancel_work_sync(&haptics->work);
- if (haptics->arizona->dapm) {
- component = snd_soc_dapm_to_component(haptics->arizona->dapm);
- snd_soc_component_disable_pin(component, "HAPTICS");
- }
+ if (dapm)
+ snd_soc_dapm_disable_pin(dapm, "HAPTICS");
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/input/misc/aw86927.c b/drivers/input/misc/aw86927.c
new file mode 100644
index 000000000000..8ad361239cfe
--- /dev/null
+++ b/drivers/input/misc/aw86927.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025 Griffin Kroah-Hartman <griffin.kroah@fairphone.com>
+ *
+ * Partially based on vendor driver:
+ * Copyright (c) 2021 AWINIC Technology CO., LTD
+ *
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#define AW86927_RSTCFG_REG 0x00
+#define AW86927_RSTCFG_SOFTRST 0xaa
+
+#define AW86927_SYSINT_REG 0x02
+#define AW86927_SYSINT_BST_SCPI BIT(7)
+#define AW86927_SYSINT_BST_OVPI BIT(6)
+#define AW86927_SYSINT_UVLI BIT(5)
+#define AW86927_SYSINT_FF_AEI BIT(4)
+#define AW86927_SYSINT_FF_AFI BIT(3)
+#define AW86927_SYSINT_OCDI BIT(2)
+#define AW86927_SYSINT_OTI BIT(1)
+#define AW86927_SYSINT_DONEI BIT(0)
+
+#define AW86927_SYSINTM_REG 0x03
+#define AW86927_SYSINTM_BST_OVPM BIT(6)
+#define AW86927_SYSINTM_FF_AEM BIT(4)
+#define AW86927_SYSINTM_FF_AFM BIT(3)
+#define AW86927_SYSINTM_DONEM BIT(0)
+
+#define AW86927_PLAYCFG1_REG 0x06
+#define AW86927_PLAYCFG1_BST_MODE_MASK GENMASK(7, 7)
+#define AW86927_PLAYCFG1_BST_MODE_BYPASS 0
+#define AW86927_PLAYCFG1_BST_VOUT_VREFSET_MASK GENMASK(6, 0)
+#define AW86927_PLAYCFG1_BST_8500MV 0x50
+
+#define AW86927_PLAYCFG2_REG 0x07
+
+#define AW86927_PLAYCFG3_REG 0x08
+#define AW86927_PLAYCFG3_AUTO_BST_MASK GENMASK(4, 4)
+#define AW86927_PLAYCFG3_AUTO_BST_ENABLE 1
+#define AW86927_PLAYCFG3_AUTO_BST_DISABLE 0
+#define AW86927_PLAYCFG3_PLAY_MODE_MASK GENMASK(1, 0)
+#define AW86927_PLAYCFG3_PLAY_MODE_RAM 0
+
+#define AW86927_PLAYCFG4_REG 0x09
+#define AW86927_PLAYCFG4_STOP BIT(1)
+#define AW86927_PLAYCFG4_GO BIT(0)
+
+#define AW86927_WAVCFG1_REG 0x0a
+#define AW86927_WAVCFG1_WAVSEQ1_MASK GENMASK(6, 0)
+
+#define AW86927_WAVCFG2_REG 0x0b
+#define AW86927_WAVCFG2_WAVSEQ2_MASK GENMASK(6, 0)
+
+#define AW86927_WAVCFG9_REG 0x12
+#define AW86927_WAVCFG9_SEQ1LOOP_MASK GENMASK(7, 4)
+#define AW86927_WAVCFG9_SEQ1LOOP_INFINITELY 0x0f
+
+#define AW86927_CONTCFG1_REG 0x18
+#define AW86927_CONTCFG1_BRK_BST_MD_MASK GENMASK(6, 6)
+
+#define AW86927_CONTCFG5_REG 0x1c
+#define AW86927_CONTCFG5_BST_BRK_GAIN_MASK GENMASK(7, 4)
+#define AW86927_CONTCFG5_BRK_GAIN_MASK GENMASK(3, 0)
+
+#define AW86927_CONTCFG10_REG 0x21
+#define AW86927_CONTCFG10_BRK_TIME_MASK GENMASK(7, 0)
+#define AW86927_CONTCFG10_BRK_TIME_DEFAULT 8
+
+#define AW86927_CONTCFG13_REG 0x24
+#define AW86927_CONTCFG13_TSET_MASK GENMASK(7, 4)
+#define AW86927_CONTCFG13_BEME_SET_MASK GENMASK(3, 0)
+
+#define AW86927_BASEADDRH_REG 0x2d
+#define AW86927_BASEADDRL_REG 0x2e
+
+#define AW86927_GLBRD5_REG 0x3f
+#define AW86927_GLBRD5_STATE_MASK GENMASK(3, 0)
+#define AW86927_GLBRD5_STATE_STANDBY 0
+
+#define AW86927_RAMADDRH_REG 0x40
+
+#define AW86927_RAMADDRL_REG 0x41
+
+#define AW86927_RAMDATA_REG 0x42
+
+#define AW86927_SYSCTRL3_REG 0x45
+#define AW86927_SYSCTRL3_STANDBY_MASK GENMASK(5, 5)
+#define AW86927_SYSCTRL3_STANDBY_ON 1
+#define AW86927_SYSCTRL3_STANDBY_OFF 0
+#define AW86927_SYSCTRL3_EN_RAMINIT_MASK GENMASK(2, 2)
+#define AW86927_SYSCTRL3_EN_RAMINIT_ON 1
+#define AW86927_SYSCTRL3_EN_RAMINIT_OFF 0
+
+#define AW86927_SYSCTRL4_REG 0x46
+#define AW86927_SYSCTRL4_WAVDAT_MODE_MASK GENMASK(6, 5)
+#define AW86927_SYSCTRL4_WAVDAT_24K 0
+#define AW86927_SYSCTRL4_INT_EDGE_MODE_MASK GENMASK(4, 4)
+#define AW86927_SYSCTRL4_INT_EDGE_MODE_POS 0
+#define AW86927_SYSCTRL4_INT_MODE_MASK GENMASK(3, 3)
+#define AW86927_SYSCTRL4_INT_MODE_EDGE 1
+#define AW86927_SYSCTRL4_GAIN_BYPASS_MASK GENMASK(0, 0)
+
+#define AW86927_PWMCFG1_REG 0x48
+#define AW86927_PWMCFG1_PRC_EN_MASK GENMASK(7, 7)
+#define AW86927_PWMCFG1_PRC_DISABLE 0
+
+#define AW86927_PWMCFG3_REG 0x4a
+#define AW86927_PWMCFG3_PR_EN_MASK GENMASK(7, 7)
+#define AW86927_PWMCFG3_PRCTIME_MASK GENMASK(6, 0)
+
+#define AW86927_PWMCFG4_REG 0x4b
+#define AW86927_PWMCFG4_PRTIME_MASK GENMASK(7, 0)
+
+#define AW86927_VBATCTRL_REG 0x4c
+#define AW86927_VBATCTRL_VBAT_MODE_MASK GENMASK(6, 6)
+#define AW86927_VBATCTRL_VBAT_MODE_SW 0
+
+#define AW86927_DETCFG1_REG 0x4d
+#define AW86927_DETCFG1_DET_GO_MASK GENMASK(1, 0)
+#define AW86927_DETCFG1_DET_GO_DET_SEQ0 1
+#define AW86927_DETCFG1_DET_GO_NA 0
+
+#define AW86927_DETCFG2_REG 0x4e
+#define AW86927_DETCFG2_DET_SEQ0_MASK GENMASK(6, 3)
+#define AW86927_DETCFG2_DET_SEQ0_VBAT 0
+#define AW86927_DETCFG2_D2S_GAIN_MASK GENMASK(2, 0)
+#define AW86927_DETCFG2_D2S_GAIN_10 4
+
+#define AW86927_CHIPIDH_REG 0x57
+#define AW86927_CHIPIDL_REG 0x58
+#define AW86927_CHIPID 0x9270
+
+#define AW86927_TMCFG_REG 0x5b
+#define AW86927_TMCFG_UNLOCK 0x7d
+#define AW86927_TMCFG_LOCK 0x00
+
+#define AW86927_ANACFG11_REG 0x70
+
+#define AW86927_ANACFG12_REG 0x71
+#define AW86927_ANACFG12_BST_SKIP_MASK GENMASK(7, 7)
+#define AW86927_ANACFG12_BST_SKIP_SHUTDOWN 1
+
+#define AW86927_ANACFG13_REG 0x72
+#define AW86927_ANACFG13_BST_PC_MASK GENMASK(7, 4)
+#define AW86927_ANACFG13_BST_PEAKCUR_3P45A 6
+
+#define AW86927_ANACFG15_REG 0x74
+#define AW86927_ANACFG15_BST_PEAK_MODE_MASK GENMASK(7, 7)
+#define AW86927_ANACFG15_BST_PEAK_BACK 1
+
+#define AW86927_ANACFG16_REG 0x75
+#define AW86927_ANACFG16_BST_SRC_MASK GENMASK(4, 4)
+#define AW86927_ANACFG16_BST_SRC_3NS 0
+
+/* default value of base addr */
+#define AW86927_RAM_BASE_ADDR 0x800
+#define AW86927_BASEADDRH_VAL 0x08
+#define AW86927_BASEADDRL_VAL 0x00
+
+enum aw86927_work_mode {
+ AW86927_STANDBY_MODE,
+ AW86927_RAM_MODE,
+};
+
+struct aw86927_data {
+ struct work_struct play_work;
+ struct device *dev;
+ struct input_dev *input_dev;
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ bool running;
+};
+
+static const struct regmap_config aw86927_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .cache_type = REGCACHE_NONE,
+ .max_register = 0x80,
+};
+
+/*
+ * Sine wave representing the magnitude of the drive to be used.
+ * Data is encoded in two's complement.
+ * round(84 * sin(x / 16.25))
+ */
+static const u8 aw86927_waveform[] = {
+ 0x00, 0x05, 0x0a, 0x0f, 0x14, 0x1a, 0x1f, 0x23, 0x28, 0x2d, 0x31, 0x35,
+ 0x39, 0x3d, 0x41, 0x44, 0x47, 0x4a, 0x4c, 0x4f, 0x51, 0x52, 0x53, 0x54,
+ 0x55, 0x55, 0x55, 0x55, 0x55, 0x54, 0x52, 0x51, 0x4f, 0x4d, 0x4a, 0x47,
+ 0x44, 0x41, 0x3d, 0x3a, 0x36, 0x31, 0x2d, 0x28, 0x24, 0x1f, 0x1a, 0x15,
+ 0x10, 0x0a, 0x05, 0x00, 0xfc, 0xf6, 0xf1, 0xec, 0xe7, 0xe2, 0xdd, 0xd8,
+ 0xd4, 0xcf, 0xcb, 0xc7, 0xc3, 0xbf, 0xbc, 0xb9, 0xb6, 0xb4, 0xb1, 0xb0,
+ 0xae, 0xad, 0xac, 0xab, 0xab, 0xab, 0xab, 0xab, 0xac, 0xae, 0xaf, 0xb1,
+ 0xb3, 0xb6, 0xb8, 0xbc, 0xbf, 0xc2, 0xc6, 0xca, 0xce, 0xd3, 0xd7, 0xdc,
+ 0xe1, 0xe6, 0xeb, 0xf0, 0xf5, 0xfb
+};
+
+struct aw86927_sram_waveform_header {
+ u8 version;
+ __be16 start_address;
+ __be16 end_address;
+} __packed;
+
+static const struct aw86927_sram_waveform_header sram_waveform_header = {
+ .version = 0x01,
+ .start_address = cpu_to_be16(AW86927_RAM_BASE_ADDR +
+ sizeof(struct aw86927_sram_waveform_header)),
+ .end_address = cpu_to_be16(AW86927_RAM_BASE_ADDR +
+ sizeof(struct aw86927_sram_waveform_header) +
+ ARRAY_SIZE(aw86927_waveform) - 1),
+};
+
+static int aw86927_wait_enter_standby(struct aw86927_data *haptics)
+{
+ unsigned int reg_val;
+ int err;
+
+ err = regmap_read_poll_timeout(haptics->regmap, AW86927_GLBRD5_REG, reg_val,
+ (FIELD_GET(AW86927_GLBRD5_STATE_MASK, reg_val) ==
+ AW86927_GLBRD5_STATE_STANDBY),
+ 2500, 2500 * 100);
+
+ if (err) {
+ dev_err(haptics->dev, "did not enter standby: %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int aw86927_play_mode(struct aw86927_data *haptics, u8 play_mode)
+{
+ int err;
+
+ switch (play_mode) {
+ case AW86927_STANDBY_MODE:
+ /* Briefly toggle standby, then toggle back to standby off */
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL3_REG,
+ AW86927_SYSCTRL3_STANDBY_MASK,
+ FIELD_PREP(AW86927_SYSCTRL3_STANDBY_MASK,
+ AW86927_SYSCTRL3_STANDBY_ON));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL3_REG,
+ AW86927_SYSCTRL3_STANDBY_MASK,
+ FIELD_PREP(AW86927_SYSCTRL3_STANDBY_MASK,
+ AW86927_SYSCTRL3_STANDBY_OFF));
+ if (err)
+ return err;
+
+ break;
+
+ case AW86927_RAM_MODE:
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PLAYCFG3_REG,
+ AW86927_PLAYCFG3_PLAY_MODE_MASK,
+ FIELD_PREP(AW86927_PLAYCFG3_PLAY_MODE_MASK,
+ AW86927_PLAYCFG3_PLAY_MODE_RAM));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PLAYCFG1_REG,
+ AW86927_PLAYCFG1_BST_MODE_MASK,
+ FIELD_PREP(AW86927_PLAYCFG1_BST_MODE_MASK,
+ AW86927_PLAYCFG1_BST_MODE_BYPASS));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_VBATCTRL_REG,
+ AW86927_VBATCTRL_VBAT_MODE_MASK,
+ FIELD_PREP(AW86927_VBATCTRL_VBAT_MODE_MASK,
+ AW86927_VBATCTRL_VBAT_MODE_SW));
+ if (err)
+ return err;
+
+ break;
+ }
+
+ return 0;
+}
+
+static int aw86927_stop(struct aw86927_data *haptics)
+{
+ int err;
+
+ err = regmap_write(haptics->regmap, AW86927_PLAYCFG4_REG, AW86927_PLAYCFG4_STOP);
+ if (err) {
+ dev_err(haptics->dev, "Failed to stop playback: %d\n", err);
+ return err;
+ }
+
+ err = aw86927_wait_enter_standby(haptics);
+ if (err) {
+ dev_err(haptics->dev, "Failed to enter standby, trying to force it\n");
+ err = aw86927_play_mode(haptics, AW86927_STANDBY_MODE);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int aw86927_haptics_play(struct input_dev *dev, void *data, struct ff_effect *effect)
+{
+ struct aw86927_data *haptics = input_get_drvdata(dev);
+ int level;
+
+ level = effect->u.rumble.strong_magnitude;
+ if (!level)
+ level = effect->u.rumble.weak_magnitude;
+
+ /* If already running, don't restart playback */
+ if (haptics->running && level)
+ return 0;
+
+ haptics->running = level;
+ schedule_work(&haptics->play_work);
+
+ return 0;
+}
+
+static int aw86927_play_sine(struct aw86927_data *haptics)
+{
+ int err;
+
+ err = aw86927_stop(haptics);
+ if (err)
+ return err;
+
+ err = aw86927_play_mode(haptics, AW86927_RAM_MODE);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap, AW86927_PLAYCFG3_REG,
+ AW86927_PLAYCFG3_AUTO_BST_MASK,
+ FIELD_PREP(AW86927_PLAYCFG3_AUTO_BST_MASK,
+ AW86927_PLAYCFG3_AUTO_BST_ENABLE));
+ if (err)
+ return err;
+
+ /* Set waveseq 1 to the first wave */
+ err = regmap_update_bits(haptics->regmap, AW86927_WAVCFG1_REG,
+ AW86927_WAVCFG1_WAVSEQ1_MASK,
+ FIELD_PREP(AW86927_WAVCFG1_WAVSEQ1_MASK, 1));
+ if (err)
+ return err;
+
+ /* set wavseq 2 to zero */
+ err = regmap_update_bits(haptics->regmap, AW86927_WAVCFG2_REG,
+ AW86927_WAVCFG2_WAVSEQ2_MASK,
+ FIELD_PREP(AW86927_WAVCFG2_WAVSEQ2_MASK, 0));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_WAVCFG9_REG,
+ AW86927_WAVCFG9_SEQ1LOOP_MASK,
+ FIELD_PREP(AW86927_WAVCFG9_SEQ1LOOP_MASK,
+ AW86927_WAVCFG9_SEQ1LOOP_INFINITELY));
+ if (err)
+ return err;
+
+ /* set gain to value lower than 0x80 to avoid distorted playback */
+ err = regmap_write(haptics->regmap, AW86927_PLAYCFG2_REG, 0x7c);
+ if (err)
+ return err;
+
+ /* Start playback */
+ err = regmap_write(haptics->regmap, AW86927_PLAYCFG4_REG, AW86927_PLAYCFG4_GO);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void aw86927_close(struct input_dev *input)
+{
+ struct aw86927_data *haptics = input_get_drvdata(input);
+ struct device *dev = &haptics->client->dev;
+ int err;
+
+ cancel_work_sync(&haptics->play_work);
+
+ err = aw86927_stop(haptics);
+ if (err)
+ dev_err(dev, "Failed to close the Driver: %d\n", err);
+}
+
+static void aw86927_haptics_play_work(struct work_struct *work)
+{
+ struct aw86927_data *haptics =
+ container_of(work, struct aw86927_data, play_work);
+ struct device *dev = &haptics->client->dev;
+ int err;
+
+ if (haptics->running)
+ err = aw86927_play_sine(haptics);
+ else
+ err = aw86927_stop(haptics);
+
+ if (err)
+ dev_err(dev, "Failed to execute work command: %d\n", err);
+}
+
+static void aw86927_hw_reset(struct aw86927_data *haptics)
+{
+ /* Assert reset */
+ gpiod_set_value_cansleep(haptics->reset_gpio, 1);
+ /* Wait ~1ms */
+ usleep_range(1000, 2000);
+ /* Deassert reset */
+ gpiod_set_value_cansleep(haptics->reset_gpio, 0);
+ /* Wait ~8ms until I2C is accessible */
+ usleep_range(8000, 8500);
+}
+
+static int aw86927_haptic_init(struct aw86927_data *haptics)
+{
+ int err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL4_REG,
+ AW86927_SYSCTRL4_WAVDAT_MODE_MASK,
+ FIELD_PREP(AW86927_SYSCTRL4_WAVDAT_MODE_MASK,
+ AW86927_SYSCTRL4_WAVDAT_24K));
+ if (err)
+ return err;
+
+ /* enable gain bypass */
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL4_REG,
+ AW86927_SYSCTRL4_GAIN_BYPASS_MASK,
+ FIELD_PREP(AW86927_SYSCTRL4_GAIN_BYPASS_MASK,
+ 0x01));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_TMCFG_REG, AW86927_TMCFG_UNLOCK);
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap, AW86927_ANACFG11_REG, 0x0f);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_ANACFG12_REG,
+ AW86927_ANACFG12_BST_SKIP_MASK,
+ FIELD_PREP(AW86927_ANACFG12_BST_SKIP_MASK,
+ AW86927_ANACFG12_BST_SKIP_SHUTDOWN));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_ANACFG15_REG,
+ AW86927_ANACFG15_BST_PEAK_MODE_MASK,
+ FIELD_PREP(AW86927_ANACFG15_BST_PEAK_MODE_MASK,
+ AW86927_ANACFG15_BST_PEAK_BACK));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_ANACFG16_REG,
+ AW86927_ANACFG16_BST_SRC_MASK,
+ FIELD_PREP(AW86927_ANACFG16_BST_SRC_MASK,
+ AW86927_ANACFG16_BST_SRC_3NS));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_TMCFG_REG, AW86927_TMCFG_LOCK);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_CONTCFG1_REG,
+ AW86927_CONTCFG1_BRK_BST_MD_MASK,
+ FIELD_PREP(AW86927_CONTCFG1_BRK_BST_MD_MASK, 0x00));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_CONTCFG5_REG,
+ FIELD_PREP(AW86927_CONTCFG5_BST_BRK_GAIN_MASK, 0x05) |
+ FIELD_PREP(AW86927_CONTCFG5_BRK_GAIN_MASK, 0x08));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap, AW86927_CONTCFG10_REG,
+ AW86927_CONTCFG10_BRK_TIME_MASK,
+ FIELD_PREP(AW86927_CONTCFG10_BRK_TIME_MASK,
+ AW86927_CONTCFG10_BRK_TIME_DEFAULT));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_CONTCFG13_REG,
+ FIELD_PREP(AW86927_CONTCFG13_TSET_MASK, 0x06) |
+ FIELD_PREP(AW86927_CONTCFG13_BEME_SET_MASK, 0x02));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_DETCFG2_REG,
+ AW86927_DETCFG2_D2S_GAIN_MASK,
+ FIELD_PREP(AW86927_DETCFG2_D2S_GAIN_MASK,
+ AW86927_DETCFG2_D2S_GAIN_10));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PWMCFG1_REG,
+ AW86927_PWMCFG1_PRC_EN_MASK,
+ FIELD_PREP(AW86927_PWMCFG1_PRC_EN_MASK,
+ AW86927_PWMCFG1_PRC_DISABLE));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_PWMCFG3_REG,
+ FIELD_PREP(AW86927_PWMCFG3_PR_EN_MASK, 0x01) |
+ FIELD_PREP(AW86927_PWMCFG3_PRCTIME_MASK, 0x3f));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PWMCFG4_REG,
+ AW86927_PWMCFG4_PRTIME_MASK,
+ FIELD_PREP(AW86927_PWMCFG4_PRTIME_MASK, 0x32));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_TMCFG_REG, AW86927_TMCFG_UNLOCK);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_ANACFG13_REG,
+ AW86927_ANACFG13_BST_PC_MASK,
+ FIELD_PREP(AW86927_ANACFG13_BST_PC_MASK,
+ AW86927_ANACFG13_BST_PEAKCUR_3P45A));
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_TMCFG_REG, AW86927_TMCFG_LOCK);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PLAYCFG1_REG,
+ AW86927_PLAYCFG1_BST_VOUT_VREFSET_MASK,
+ FIELD_PREP(AW86927_PLAYCFG1_BST_VOUT_VREFSET_MASK,
+ AW86927_PLAYCFG1_BST_8500MV));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_PLAYCFG3_REG,
+ AW86927_PLAYCFG3_AUTO_BST_MASK,
+ FIELD_PREP(AW86927_PLAYCFG3_AUTO_BST_MASK,
+ AW86927_PLAYCFG3_AUTO_BST_DISABLE));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int aw86927_ram_init(struct aw86927_data *haptics)
+{
+ int err;
+
+ err = aw86927_wait_enter_standby(haptics);
+ if (err)
+ return err;
+
+ /* Enable SRAM init */
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL3_REG,
+ AW86927_SYSCTRL3_EN_RAMINIT_MASK,
+ FIELD_PREP(AW86927_SYSCTRL3_EN_RAMINIT_MASK,
+ AW86927_SYSCTRL3_EN_RAMINIT_ON));
+
+ /* Set base address for the start of the SRAM waveforms */
+ err = regmap_write(haptics->regmap,
+ AW86927_BASEADDRH_REG, AW86927_BASEADDRH_VAL);
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_BASEADDRL_REG, AW86927_BASEADDRL_VAL);
+ if (err)
+ return err;
+
+ /* Set start of SRAM, before the data is written it will be the same as the base */
+ err = regmap_write(haptics->regmap,
+ AW86927_RAMADDRH_REG, AW86927_BASEADDRH_VAL);
+ if (err)
+ return err;
+
+ err = regmap_write(haptics->regmap,
+ AW86927_RAMADDRL_REG, AW86927_BASEADDRL_VAL);
+ if (err)
+ return err;
+
+ /* Write waveform header to SRAM */
+ err = regmap_noinc_write(haptics->regmap, AW86927_RAMDATA_REG,
+ &sram_waveform_header, sizeof(sram_waveform_header));
+ if (err)
+ return err;
+
+ /* Write waveform to SRAM */
+ err = regmap_noinc_write(haptics->regmap, AW86927_RAMDATA_REG,
+ aw86927_waveform, ARRAY_SIZE(aw86927_waveform));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_DETCFG2_REG,
+ AW86927_DETCFG2_DET_SEQ0_MASK,
+ FIELD_PREP(AW86927_DETCFG2_DET_SEQ0_MASK,
+ AW86927_DETCFG2_DET_SEQ0_VBAT));
+ if (err)
+ return err;
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_DETCFG1_REG,
+ AW86927_DETCFG1_DET_GO_MASK,
+ FIELD_PREP(AW86927_DETCFG1_DET_GO_MASK,
+ AW86927_DETCFG1_DET_GO_DET_SEQ0));
+ if (err)
+ return err;
+
+ usleep_range(3000, 3500);
+
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_DETCFG1_REG,
+ AW86927_DETCFG1_DET_GO_MASK,
+ FIELD_PREP(AW86927_DETCFG1_DET_GO_MASK,
+ AW86927_DETCFG1_DET_GO_NA));
+ if (err)
+ return err;
+
+ /* Disable SRAM init */
+ err = regmap_update_bits(haptics->regmap,
+ AW86927_SYSCTRL3_REG,
+ AW86927_SYSCTRL3_EN_RAMINIT_MASK,
+ FIELD_PREP(AW86927_SYSCTRL3_EN_RAMINIT_MASK,
+ AW86927_SYSCTRL3_EN_RAMINIT_OFF));
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static irqreturn_t aw86927_irq(int irq, void *data)
+{
+ struct aw86927_data *haptics = data;
+ struct device *dev = &haptics->client->dev;
+ unsigned int reg_val;
+ int err;
+
+ err = regmap_read(haptics->regmap, AW86927_SYSINT_REG, &reg_val);
+ if (err) {
+ dev_err(dev, "Failed to read SYSINT register: %d\n", err);
+ return IRQ_NONE;
+ }
+
+ if (reg_val & AW86927_SYSINT_BST_SCPI)
+ dev_err(dev, "Received a Short Circuit Protection interrupt\n");
+ if (reg_val & AW86927_SYSINT_BST_OVPI)
+ dev_err(dev, "Received an Over Voltage Protection interrupt\n");
+ if (reg_val & AW86927_SYSINT_UVLI)
+ dev_err(dev, "Received an Under Voltage Lock Out interrupt\n");
+ if (reg_val & AW86927_SYSINT_OCDI)
+ dev_err(dev, "Received an Over Current interrupt\n");
+ if (reg_val & AW86927_SYSINT_OTI)
+ dev_err(dev, "Received an Over Temperature interrupt\n");
+
+ if (reg_val & AW86927_SYSINT_DONEI)
+ dev_dbg(dev, "Chip playback done!\n");
+ if (reg_val & AW86927_SYSINT_FF_AFI)
+ dev_dbg(dev, "The RTP mode FIFO is almost full!\n");
+ if (reg_val & AW86927_SYSINT_FF_AEI)
+ dev_dbg(dev, "The RTP mode FIFO is almost empty!\n");
+
+ return IRQ_HANDLED;
+}
+
+static int aw86927_detect(struct aw86927_data *haptics)
+{
+ __be16 read_buf;
+ u16 chip_id;
+ int err;
+
+ err = regmap_bulk_read(haptics->regmap, AW86927_CHIPIDH_REG, &read_buf, 2);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to read CHIPID registers\n");
+
+ chip_id = be16_to_cpu(read_buf);
+
+ if (chip_id != AW86927_CHIPID) {
+ dev_err(haptics->dev, "Unexpected CHIPID value 0x%x\n", chip_id);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int aw86927_probe(struct i2c_client *client)
+{
+ struct aw86927_data *haptics;
+ int err;
+
+ haptics = devm_kzalloc(&client->dev, sizeof(struct aw86927_data), GFP_KERNEL);
+ if (!haptics)
+ return -ENOMEM;
+
+ haptics->dev = &client->dev;
+ haptics->client = client;
+
+ i2c_set_clientdata(client, haptics);
+
+ haptics->regmap = devm_regmap_init_i2c(client, &aw86927_regmap_config);
+ if (IS_ERR(haptics->regmap))
+ return dev_err_probe(haptics->dev, PTR_ERR(haptics->regmap),
+ "Failed to allocate register map\n");
+
+ haptics->input_dev = devm_input_allocate_device(haptics->dev);
+ if (!haptics->input_dev)
+ return -ENOMEM;
+
+ haptics->reset_gpio = devm_gpiod_get(haptics->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(haptics->reset_gpio))
+ return dev_err_probe(haptics->dev, PTR_ERR(haptics->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ /* Hardware reset */
+ aw86927_hw_reset(haptics);
+
+ /* Software reset */
+ err = regmap_write(haptics->regmap, AW86927_RSTCFG_REG, AW86927_RSTCFG_SOFTRST);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed Software reset\n");
+
+ /* Wait ~3ms until I2C is accessible */
+ usleep_range(3000, 3500);
+
+ err = aw86927_detect(haptics);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to find chip\n");
+
+ /* IRQ config */
+ err = regmap_write(haptics->regmap, AW86927_SYSCTRL4_REG,
+ FIELD_PREP(AW86927_SYSCTRL4_INT_MODE_MASK,
+ AW86927_SYSCTRL4_INT_MODE_EDGE) |
+ FIELD_PREP(AW86927_SYSCTRL4_INT_EDGE_MODE_MASK,
+ AW86927_SYSCTRL4_INT_EDGE_MODE_POS));
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to configure interrupt modes\n");
+
+ err = regmap_write(haptics->regmap, AW86927_SYSINTM_REG,
+ AW86927_SYSINTM_BST_OVPM |
+ AW86927_SYSINTM_FF_AEM |
+ AW86927_SYSINTM_FF_AFM |
+ AW86927_SYSINTM_DONEM);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to configure interrupt masks\n");
+
+ err = devm_request_threaded_irq(haptics->dev, client->irq, NULL,
+ aw86927_irq, IRQF_ONESHOT, NULL, haptics);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to request threaded irq\n");
+
+ INIT_WORK(&haptics->play_work, aw86927_haptics_play_work);
+
+ haptics->input_dev->name = "aw86927-haptics";
+ haptics->input_dev->close = aw86927_close;
+
+ input_set_drvdata(haptics->input_dev, haptics);
+ input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
+
+ err = input_ff_create_memless(haptics->input_dev, NULL, aw86927_haptics_play);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to create FF dev\n");
+
+ /* Set up registers */
+ err = aw86927_play_mode(haptics, AW86927_STANDBY_MODE);
+ if (err)
+ return dev_err_probe(haptics->dev, err,
+ "Failed to enter standby for Haptic init\n");
+
+ err = aw86927_haptic_init(haptics);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Haptic init failed\n");
+
+ /* RAM init, upload the waveform for playback */
+ err = aw86927_ram_init(haptics);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to init aw86927 sram\n");
+
+ err = input_register_device(haptics->input_dev);
+ if (err)
+ return dev_err_probe(haptics->dev, err, "Failed to register input device\n");
+
+ return 0;
+}
+
+static const struct of_device_id aw86927_of_id[] = {
+ { .compatible = "awinic,aw86927" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, aw86927_of_id);
+
+static struct i2c_driver aw86927_driver = {
+ .driver = {
+ .name = "aw86927-haptics",
+ .of_match_table = aw86927_of_id,
+ },
+ .probe = aw86927_probe,
+};
+
+module_i2c_driver(aw86927_driver);
+
+MODULE_AUTHOR("Griffin Kroah-Hartman <griffin.kroah@fairphone.com>");
+MODULE_DESCRIPTION("AWINIC AW86927 LRA Haptic Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index cfc12332bee1..b4232b0a3957 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -6,6 +6,7 @@
* Author: Hemanth V <hemanthv@ti.com>
*/
+#include <linux/export.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
diff --git a/drivers/input/misc/max7360-rotary.c b/drivers/input/misc/max7360-rotary.c
new file mode 100644
index 000000000000..385831ef34b6
--- /dev/null
+++ b/drivers/input/misc/max7360-rotary.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2025 Bootlin
+ *
+ * Author: Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device/devres.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/max7360.h>
+#include <linux/property.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#define MAX7360_ROTARY_DEFAULT_STEPS 24
+
+struct max7360_rotary {
+ struct input_dev *input;
+ struct regmap *regmap;
+ unsigned int debounce_ms;
+
+ unsigned int pos;
+
+ u32 steps;
+ u32 axis;
+ bool relative_axis;
+ bool rollover;
+};
+
+static void max7360_rotary_report_event(struct max7360_rotary *max7360_rotary, int steps)
+{
+ if (max7360_rotary->relative_axis) {
+ input_report_rel(max7360_rotary->input, max7360_rotary->axis, steps);
+ } else {
+ int pos = max7360_rotary->pos;
+ int maxval = max7360_rotary->steps;
+
+ /*
+ * Add steps to the position.
+ * Make sure added steps are always in ]-maxval; maxval[
+ * interval, so (pos + maxval) is always >= 0.
+ * Then set back pos to the [0; maxval[ interval.
+ */
+ pos += steps % maxval;
+ if (max7360_rotary->rollover)
+ pos = (pos + maxval) % maxval;
+ else
+ pos = clamp(pos, 0, maxval - 1);
+
+ max7360_rotary->pos = pos;
+ input_report_abs(max7360_rotary->input, max7360_rotary->axis, max7360_rotary->pos);
+ }
+
+ input_sync(max7360_rotary->input);
+}
+
+static irqreturn_t max7360_rotary_irq(int irq, void *data)
+{
+ struct max7360_rotary *max7360_rotary = data;
+ struct device *dev = max7360_rotary->input->dev.parent;
+ unsigned int val;
+ int error;
+
+ error = regmap_read(max7360_rotary->regmap, MAX7360_REG_RTR_CNT, &val);
+ if (error < 0) {
+ dev_err(dev, "Failed to read rotary counter\n");
+ return IRQ_NONE;
+ }
+
+ if (val == 0)
+ return IRQ_NONE;
+
+ max7360_rotary_report_event(max7360_rotary, sign_extend32(val, 7));
+
+ return IRQ_HANDLED;
+}
+
+static int max7360_rotary_hw_init(struct max7360_rotary *max7360_rotary)
+{
+ struct device *dev = max7360_rotary->input->dev.parent;
+ int val;
+ int error;
+
+ val = FIELD_PREP(MAX7360_ROT_DEBOUNCE, max7360_rotary->debounce_ms) |
+ FIELD_PREP(MAX7360_ROT_INTCNT, 1) | MAX7360_ROT_INTCNT_DLY;
+ error = regmap_write(max7360_rotary->regmap, MAX7360_REG_RTRCFG, val);
+ if (error)
+ dev_err(dev, "Failed to set max7360 rotary encoder configuration\n");
+
+ return error;
+}
+
+static int max7360_rotary_probe(struct platform_device *pdev)
+{
+ struct max7360_rotary *max7360_rotary;
+ struct device *dev = &pdev->dev;
+ struct input_dev *input;
+ struct regmap *regmap;
+ int irq;
+ int error;
+
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return dev_err_probe(dev, -ENODEV, "Could not get parent regmap\n");
+
+ irq = fwnode_irq_get_byname(dev_fwnode(dev->parent), "inti");
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "Failed to get IRQ\n");
+
+ max7360_rotary = devm_kzalloc(dev, sizeof(*max7360_rotary), GFP_KERNEL);
+ if (!max7360_rotary)
+ return -ENOMEM;
+
+ max7360_rotary->regmap = regmap;
+
+ device_property_read_u32(dev->parent, "linux,axis", &max7360_rotary->axis);
+ max7360_rotary->rollover = device_property_read_bool(dev->parent,
+ "rotary-encoder,rollover");
+ max7360_rotary->relative_axis =
+ device_property_read_bool(dev->parent, "rotary-encoder,relative-axis");
+
+ error = device_property_read_u32(dev->parent, "rotary-encoder,steps",
+ &max7360_rotary->steps);
+ if (error)
+ max7360_rotary->steps = MAX7360_ROTARY_DEFAULT_STEPS;
+
+ device_property_read_u32(dev->parent, "rotary-debounce-delay-ms",
+ &max7360_rotary->debounce_ms);
+ if (max7360_rotary->debounce_ms > MAX7360_ROT_DEBOUNCE_MAX)
+ return dev_err_probe(dev, -EINVAL, "Invalid debounce timing: %u\n",
+ max7360_rotary->debounce_ms);
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
+ max7360_rotary->input = input;
+
+ input->id.bustype = BUS_I2C;
+ input->name = pdev->name;
+
+ if (max7360_rotary->relative_axis)
+ input_set_capability(input, EV_REL, max7360_rotary->axis);
+ else
+ input_set_abs_params(input, max7360_rotary->axis, 0, max7360_rotary->steps, 0, 1);
+
+ error = devm_request_threaded_irq(dev, irq, NULL, max7360_rotary_irq,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "max7360-rotary", max7360_rotary);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to register interrupt\n");
+
+ error = input_register_device(input);
+ if (error)
+ return dev_err_probe(dev, error, "Could not register input device\n");
+
+ error = max7360_rotary_hw_init(max7360_rotary);
+ if (error)
+ return dev_err_probe(dev, error, "Failed to initialize max7360 rotary\n");
+
+ device_init_wakeup(dev, true);
+ error = dev_pm_set_wake_irq(dev, irq);
+ if (error)
+ dev_warn(dev, "Failed to set up wakeup irq: %d\n", error);
+
+ return 0;
+}
+
+static void max7360_rotary_remove(struct platform_device *pdev)
+{
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+}
+
+static struct platform_driver max7360_rotary_driver = {
+ .driver = {
+ .name = "max7360-rotary",
+ },
+ .probe = max7360_rotary_probe,
+ .remove = max7360_rotary_remove,
+};
+module_platform_driver(max7360_rotary_driver);
+
+MODULE_DESCRIPTION("MAX7360 Rotary driver");
+MODULE_AUTHOR("Mathieu Dubois-Briand <mathieu.dubois-briand@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 1c7faa9b7afe..b83d762ae2e9 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -57,7 +57,6 @@ static irqreturn_t button_irq(int irq, void *_priv)
struct mc13783_pwrb *priv = _priv;
int val;
- mc13xxx_irq_ack(priv->mc13783, irq);
mc13xxx_reg_read(priv->mc13783, MC13783_REG_INTERRUPT_SENSE_1, &val);
switch (irq) {
diff --git a/drivers/input/misc/pf1550-onkey.c b/drivers/input/misc/pf1550-onkey.c
new file mode 100644
index 000000000000..9be6377151cb
--- /dev/null
+++ b/drivers/input/misc/pf1550-onkey.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the PF1550 ONKEY
+ * Copyright (C) 2016 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Portions Copyright (c) 2025 Savoir-faire Linux Inc.
+ * Samuel Kayode <samuel.kayode@savoirfairelinux.com>
+ */
+
+#include <linux/err.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/pf1550.h>
+#include <linux/platform_device.h>
+
+#define PF1550_ONKEY_IRQ_NR 6
+
+struct onkey_drv_data {
+ struct device *dev;
+ const struct pf1550_ddata *pf1550;
+ bool wakeup;
+ struct input_dev *input;
+};
+
+static irqreturn_t pf1550_onkey_irq_handler(int irq, void *data)
+{
+ struct onkey_drv_data *onkey = data;
+ struct platform_device *pdev = to_platform_device(onkey->dev);
+ int i, state, irq_type = -1;
+
+ for (i = 0; i < PF1550_ONKEY_IRQ_NR; i++)
+ if (irq == platform_get_irq(pdev, i))
+ irq_type = i;
+
+ switch (irq_type) {
+ case PF1550_ONKEY_IRQ_PUSHI:
+ state = 0;
+ break;
+ case PF1550_ONKEY_IRQ_1SI:
+ case PF1550_ONKEY_IRQ_2SI:
+ case PF1550_ONKEY_IRQ_3SI:
+ case PF1550_ONKEY_IRQ_4SI:
+ case PF1550_ONKEY_IRQ_8SI:
+ state = 1;
+ break;
+ default:
+ dev_err(onkey->dev, "onkey interrupt: irq %d occurred\n",
+ irq_type);
+ return IRQ_HANDLED;
+ }
+
+ input_event(onkey->input, EV_KEY, KEY_POWER, state);
+ input_sync(onkey->input);
+
+ return IRQ_HANDLED;
+}
+
+static int pf1550_onkey_probe(struct platform_device *pdev)
+{
+ struct onkey_drv_data *onkey;
+ struct input_dev *input;
+ bool key_power = false;
+ int i, irq, error;
+
+ onkey = devm_kzalloc(&pdev->dev, sizeof(*onkey), GFP_KERNEL);
+ if (!onkey)
+ return -ENOMEM;
+
+ onkey->dev = &pdev->dev;
+
+ onkey->pf1550 = dev_get_drvdata(pdev->dev.parent);
+ if (!onkey->pf1550->regmap)
+ return dev_err_probe(&pdev->dev, -ENODEV,
+ "failed to get regmap\n");
+
+ onkey->wakeup = device_property_read_bool(pdev->dev.parent,
+ "wakeup-source");
+
+ if (device_property_read_bool(pdev->dev.parent,
+ "nxp,disable-key-power")) {
+ error = regmap_clear_bits(onkey->pf1550->regmap,
+ PF1550_PMIC_REG_PWRCTRL1,
+ PF1550_ONKEY_RST_EN);
+ if (error)
+ return dev_err_probe(&pdev->dev, error,
+ "failed: disable turn system off");
+ } else {
+ key_power = true;
+ }
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return dev_err_probe(&pdev->dev, -ENOMEM,
+ "failed to allocate the input device\n");
+
+ input->name = pdev->name;
+ input->phys = "pf1550-onkey/input0";
+ input->id.bustype = BUS_HOST;
+
+ if (key_power)
+ input_set_capability(input, EV_KEY, KEY_POWER);
+
+ onkey->input = input;
+ platform_set_drvdata(pdev, onkey);
+
+ for (i = 0; i < PF1550_ONKEY_IRQ_NR; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ error = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ pf1550_onkey_irq_handler,
+ IRQF_NO_SUSPEND,
+ "pf1550-onkey", onkey);
+ if (error)
+ return dev_err_probe(&pdev->dev, error,
+ "failed: irq request (IRQ: %d)\n",
+ i);
+ }
+
+ error = input_register_device(input);
+ if (error)
+ return dev_err_probe(&pdev->dev, error,
+ "failed to register input device\n");
+
+ device_init_wakeup(&pdev->dev, onkey->wakeup);
+
+ return 0;
+}
+
+static int pf1550_onkey_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct onkey_drv_data *onkey = platform_get_drvdata(pdev);
+ int i, irq;
+
+ if (!device_may_wakeup(&pdev->dev))
+ regmap_write(onkey->pf1550->regmap,
+ PF1550_PMIC_REG_ONKEY_INT_MASK0,
+ ONKEY_IRQ_PUSHI | ONKEY_IRQ_1SI | ONKEY_IRQ_2SI |
+ ONKEY_IRQ_3SI | ONKEY_IRQ_4SI | ONKEY_IRQ_8SI);
+ else
+ for (i = 0; i < PF1550_ONKEY_IRQ_NR; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ enable_irq_wake(irq);
+ }
+
+ return 0;
+}
+
+static int pf1550_onkey_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct onkey_drv_data *onkey = platform_get_drvdata(pdev);
+ int i, irq;
+
+ if (!device_may_wakeup(&pdev->dev))
+ regmap_write(onkey->pf1550->regmap,
+ PF1550_PMIC_REG_ONKEY_INT_MASK0,
+ ~((u8)(ONKEY_IRQ_PUSHI | ONKEY_IRQ_1SI |
+ ONKEY_IRQ_2SI | ONKEY_IRQ_3SI | ONKEY_IRQ_4SI |
+ ONKEY_IRQ_8SI)));
+ else
+ for (i = 0; i < PF1550_ONKEY_IRQ_NR; i++) {
+ irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ disable_irq_wake(irq);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pf1550_onkey_pm_ops, pf1550_onkey_suspend,
+ pf1550_onkey_resume);
+
+static const struct platform_device_id pf1550_onkey_id[] = {
+ { "pf1550-onkey", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, pf1550_onkey_id);
+
+static struct platform_driver pf1550_onkey_driver = {
+ .driver = {
+ .name = "pf1550-onkey",
+ .pm = pm_sleep_ptr(&pf1550_onkey_pm_ops),
+ },
+ .probe = pf1550_onkey_probe,
+ .id_table = pf1550_onkey_id,
+};
+module_platform_driver(pf1550_onkey_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("PF1550 onkey Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index d952c16f2458..53249d2c081f 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -60,6 +60,7 @@ struct pm8941_data {
bool supports_ps_hold_poff_config;
bool supports_debounce_config;
bool has_pon_pbs;
+ bool wakeup_source_default;
const char *name;
const char *phys;
};
@@ -245,7 +246,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(pm8941_pwr_key_pm_ops,
static int pm8941_pwrkey_probe(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey;
- bool pull_up;
+ bool pull_up, wakeup;
struct device *parent;
struct device_node *regmap_node;
const __be32 *addr;
@@ -402,8 +403,11 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
}
}
+ wakeup = pwrkey->data->wakeup_source_default ||
+ of_property_read_bool(pdev->dev.of_node, "wakeup-source");
+
platform_set_drvdata(pdev, pwrkey);
- device_init_wakeup(&pdev->dev, 1);
+ device_init_wakeup(&pdev->dev, wakeup);
return 0;
}
@@ -424,6 +428,7 @@ static const struct pm8941_data pwrkey_data = {
.supports_ps_hold_poff_config = true,
.supports_debounce_config = true,
.has_pon_pbs = false,
+ .wakeup_source_default = true,
};
static const struct pm8941_data resin_data = {
@@ -434,6 +439,7 @@ static const struct pm8941_data resin_data = {
.supports_ps_hold_poff_config = true,
.supports_debounce_config = true,
.has_pon_pbs = false,
+ .wakeup_source_default = false,
};
static const struct pm8941_data pon_gen3_pwrkey_data = {
@@ -443,6 +449,7 @@ static const struct pm8941_data pon_gen3_pwrkey_data = {
.supports_ps_hold_poff_config = false,
.supports_debounce_config = false,
.has_pon_pbs = true,
+ .wakeup_source_default = true,
};
static const struct pm8941_data pon_gen3_resin_data = {
@@ -452,6 +459,7 @@ static const struct pm8941_data pon_gen3_resin_data = {
.supports_ps_hold_poff_config = false,
.supports_debounce_config = false,
.has_pon_pbs = true,
+ .wakeup_source_default = false,
};
static const struct of_device_id pm8941_pwr_key_id_table[] = {
diff --git a/drivers/input/misc/tps6594-pwrbutton.c b/drivers/input/misc/tps6594-pwrbutton.c
new file mode 100644
index 000000000000..cd039b3866dc
--- /dev/null
+++ b/drivers/input/misc/tps6594-pwrbutton.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * power button driver for TI TPS6594 PMICs
+ *
+ * Copyright (C) 2025 Critical Link LLC - https://www.criticallink.com/
+ */
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps6594.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+struct tps6594_pwrbutton {
+ struct device *dev;
+ struct input_dev *idev;
+ char phys[32];
+};
+
+static irqreturn_t tps6594_pb_push_irq(int irq, void *_pwr)
+{
+ struct tps6594_pwrbutton *pwr = _pwr;
+
+ input_report_key(pwr->idev, KEY_POWER, 1);
+ pm_wakeup_event(pwr->dev, 0);
+ input_sync(pwr->idev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tps6594_pb_release_irq(int irq, void *_pwr)
+{
+ struct tps6594_pwrbutton *pwr = _pwr;
+
+ input_report_key(pwr->idev, KEY_POWER, 0);
+ input_sync(pwr->idev);
+
+ return IRQ_HANDLED;
+}
+
+static int tps6594_pb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tps6594_pwrbutton *pwr;
+ struct input_dev *idev;
+ int error;
+ int push_irq;
+ int release_irq;
+
+ pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
+ if (!pwr)
+ return -ENOMEM;
+
+ idev = devm_input_allocate_device(dev);
+ if (!idev)
+ return -ENOMEM;
+
+ idev->name = pdev->name;
+ snprintf(pwr->phys, sizeof(pwr->phys), "%s/input0",
+ pdev->name);
+ idev->phys = pwr->phys;
+ idev->id.bustype = BUS_I2C;
+
+ input_set_capability(idev, EV_KEY, KEY_POWER);
+
+ pwr->dev = dev;
+ pwr->idev = idev;
+ device_init_wakeup(dev, true);
+
+ push_irq = platform_get_irq(pdev, 0);
+ if (push_irq < 0)
+ return -EINVAL;
+
+ release_irq = platform_get_irq(pdev, 1);
+ if (release_irq < 0)
+ return -EINVAL;
+
+ error = devm_request_threaded_irq(dev, push_irq, NULL,
+ tps6594_pb_push_irq,
+ IRQF_ONESHOT,
+ pdev->resource[0].name, pwr);
+ if (error) {
+ dev_err(dev, "failed to request push IRQ #%d: %d\n", push_irq,
+ error);
+ return error;
+ }
+
+ error = devm_request_threaded_irq(dev, release_irq, NULL,
+ tps6594_pb_release_irq,
+ IRQF_ONESHOT,
+ pdev->resource[1].name, pwr);
+ if (error) {
+ dev_err(dev, "failed to request release IRQ #%d: %d\n",
+ release_irq, error);
+ return error;
+ }
+
+ error = input_register_device(idev);
+ if (error) {
+ dev_err(dev, "Can't register power button: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct platform_device_id tps6594_pwrbtn_id_table[] = {
+ { "tps6594-pwrbutton", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, tps6594_pwrbtn_id_table);
+
+static struct platform_driver tps6594_pb_driver = {
+ .probe = tps6594_pb_probe,
+ .driver = {
+ .name = "tps6594_pwrbutton",
+ },
+ .id_table = tps6594_pwrbtn_id_table,
+};
+module_platform_driver(tps6594_pb_driver);
+
+MODULE_DESCRIPTION("TPS6594 Power Button");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 2c51ea9d01d7..13336a2fd49c 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -775,6 +775,7 @@ static int uinput_ff_upload_to_user(char __user *buffer,
if (in_compat_syscall()) {
struct uinput_ff_upload_compat ff_up_compat;
+ memset(&ff_up_compat, 0, sizeof(ff_up_compat));
ff_up_compat.request_id = ff_up->request_id;
ff_up_compat.retval = ff_up->retval;
/*
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index b7fe6eb35a4e..ea3eb87a89af 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011 Unixphere
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/of.h>
diff --git a/drivers/input/rmi4/rmi_2d_sensor.h b/drivers/input/rmi4/rmi_2d_sensor.h
index 7d335d809710..61a99c8a7a26 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.h
+++ b/drivers/input/rmi4/rmi_2d_sensor.h
@@ -7,6 +7,9 @@
#ifndef _RMI_2D_SENSOR_H
#define _RMI_2D_SENSOR_H
+#include <linux/rmi.h>
+#include <linux/types.h>
+
enum rmi_2d_sensor_object_type {
RMI_2D_OBJECT_NONE,
RMI_2D_OBJECT_FINGER,
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index 5f98c3bcfd46..b85ee9db87b0 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011 Unixphere
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/irq.h>
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 2168b6cd7167..ccd9338a44db 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -21,6 +21,7 @@
#include <linux/irqdomain.h>
#include <uapi/linux/input.h>
#include <linux/rmi.h>
+#include <linux/export.h>
#include "rmi_bus.h"
#include "rmi_driver.h"
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 17edc1597446..c7ef347a4dff 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -276,8 +276,8 @@ config SERIO_OLPC_APSP
config HYPERV_KEYBOARD
tristate "Microsoft Synthetic Keyboard driver"
- depends on HYPERV
- default HYPERV
+ depends on HYPERV_VMBUS
+ default HYPERV_VMBUS
help
Select this option to enable the Hyper-V Keyboard driver.
diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c
index 94e8bcbbf94d..3fedfc5abc73 100644
--- a/drivers/input/serio/hil_mlc.c
+++ b/drivers/input/serio/hil_mlc.c
@@ -54,6 +54,7 @@
#include <linux/hil_mlc.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 0eec4c5585cb..1461ef319f92 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -63,6 +63,7 @@
#include <linux/hp_sdc.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/ioport.h>
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index cab5a4c5baf5..c135254665b6 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -10,6 +10,7 @@
#include <linux/types.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index c22ea532276e..269df83a167d 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c
index 93769910ce24..46fb7667b244 100644
--- a/drivers/input/serio/ps2-gpio.c
+++ b/drivers/input/serio/ps2-gpio.c
@@ -50,7 +50,7 @@
* interrupt interval should be ~60us. Let's allow +/- 20us for frequency
* deviations and interrupt latency.
*
- * The data line must be samples after ~30us to 50us after the falling edge,
+ * The data line must be sampled after ~30us to 50us after the falling edge,
* since the device updates the data line at the rising edge.
*
* ___ ______ ______ ______ ___
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 4468018cef66..2b5ddc5dac19 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/export.h>
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/serio.h>
diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c
index 96f23ae57d5a..164f8fcfd1aa 100644
--- a/drivers/input/sparse-keymap.c
+++ b/drivers/input/sparse-keymap.c
@@ -10,6 +10,7 @@
* Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
*/
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/input/sparse-keymap.h>
#include <linux/module.h>
diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c
index 8d6b71d59793..eabb4a0b8a0d 100644
--- a/drivers/input/tablet/pegasus_notetaker.c
+++ b/drivers/input/tablet/pegasus_notetaker.c
@@ -63,6 +63,9 @@
#define BUTTON_PRESSED 0xb5
#define COMMAND_VERSION 0xa9
+/* 1 Status + 1 Color + 2 X + 2 Y = 6 bytes */
+#define NOTETAKER_PACKET_SIZE 6
+
/* in xy data packet */
#define BATTERY_NO_REPORT 0x40
#define BATTERY_LOW 0x41
@@ -311,6 +314,12 @@ static int pegasus_probe(struct usb_interface *intf,
}
pegasus->data_len = usb_maxpacket(dev, pipe);
+ if (pegasus->data_len < NOTETAKER_PACKET_SIZE) {
+ dev_err(&intf->dev, "packet size is too small (%d)\n",
+ pegasus->data_len);
+ error = -EINVAL;
+ goto err_free_mem;
+ }
pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
&pegasus->data_dma);
diff --git a/drivers/input/touch-overlay.c b/drivers/input/touch-overlay.c
index 8806373f7a4a..b9fd82c4829d 100644
--- a/drivers/input/touch-overlay.c
+++ b/drivers/input/touch-overlay.c
@@ -5,6 +5,7 @@
* Copyright (c) 2023 Javier Carrasco <javier.carrasco@wolfvision.net>
*/
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touch-overlay.h>
diff --git a/drivers/input/touchscreen.c b/drivers/input/touchscreen.c
index 4620e20d0190..d699b24bb548 100644
--- a/drivers/input/touchscreen.c
+++ b/drivers/input/touchscreen.c
@@ -6,6 +6,7 @@
* Copyright (c) 2014 Sebastian Reichel <sre@kernel.org>
*/
+#include <linux/export.h>
#include <linux/property.h>
#include <linux/input.h>
#include <linux/input/mt.h>
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 196905162945..7d5b72ee07fa 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -441,6 +441,16 @@ config TOUCHSCREEN_HIDEEP
To compile this driver as a module, choose M here : the
module will be called hideep_ts.
+config TOUCHSCREEN_HIMAX_HX852X
+ tristate "Himax HX852x(ES) touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a Himax HX852x(ES) touchscreen.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the module
+ will be called himax_hx852x.
+
config TOUCHSCREEN_HYCON_HY46XX
tristate "Hycon hy46xx touchscreen support"
depends on I2C
@@ -465,6 +475,18 @@ config TOUCHSCREEN_HYNITRON_CSTXXX
To compile this driver as a module, choose M here: the
module will be called hynitron-cstxxx.
+config TOUCHSCREEN_HYNITRON_CST816X
+ tristate "Hynitron CST816x touchscreen"
+ depends on I2C
+ help
+ Say Y here if you have a touchscreen using a Hynitron
+ CST816x series touchscreen controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hynitron-cst816x.
+
config TOUCHSCREEN_ILI210X
tristate "Ilitek ILI210X based touchscreen"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 97a025c6a377..ab9abd151078 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -49,7 +49,9 @@ obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_CORE) += goodix_berlin_core.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_I2C) += goodix_berlin_i2c.o
obj-$(CONFIG_TOUCHSCREEN_GOODIX_BERLIN_SPI) += goodix_berlin_spi.o
obj-$(CONFIG_TOUCHSCREEN_HIDEEP) += hideep.o
+obj-$(CONFIG_TOUCHSCREEN_HIMAX_HX852X) += himax_hx852x.o
obj-$(CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX) += hynitron_cstxxx.o
+obj-$(CONFIG_TOUCHSCREEN_HYNITRON_CST816X) += hynitron-cst816x.o
obj-$(CONFIG_TOUCHSCREEN_ILI210X) += ili210x.o
obj-$(CONFIG_TOUCHSCREEN_ILITEK) += ilitek_ts_i2c.o
obj-$(CONFIG_TOUCHSCREEN_IMAGIS) += imagis.o
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 8b4f3e3660b8..4c448f39bf57 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -22,6 +22,7 @@
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 322d5a3d40a0..dd0544cc1bc1 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -19,6 +19,7 @@
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
@@ -355,6 +356,8 @@ struct mxt_data {
enum mxt_suspend_mode suspend_mode;
u32 wakeup_method;
+
+ struct touchscreen_properties prop;
};
struct mxt_vb2_buffer {
@@ -888,8 +891,7 @@ static void mxt_proc_t9_message(struct mxt_data *data, u8 *message)
/* Touch active */
input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, 1);
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ touchscreen_report_pos(input_dev, &data->prop, x, y, true);
input_report_abs(input_dev, ABS_MT_PRESSURE, amplitude);
input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, area);
} else {
@@ -1010,8 +1012,7 @@ static void mxt_proc_t100_message(struct mxt_data *data, u8 *message)
id, type, x, y, major, pressure, orientation);
input_mt_report_slot_state(input_dev, tool, 1);
- input_report_abs(input_dev, ABS_MT_POSITION_X, x);
- input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ touchscreen_report_pos(input_dev, &data->prop, x, y, true);
input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, major);
input_report_abs(input_dev, ABS_MT_PRESSURE, pressure);
input_report_abs(input_dev, ABS_MT_DISTANCE, distance);
@@ -2212,6 +2213,8 @@ static int mxt_initialize_input_device(struct mxt_data *data)
0, 255, 0, 0);
}
+ touchscreen_parse_properties(input_dev, true, &data->prop);
+
/* For T15 and T97 Key Array */
if (data->T15_reportid_min || data->T97_reportid_min) {
for (i = 0; i < data->t15_num_keys; i++)
@@ -3317,7 +3320,7 @@ static int mxt_probe(struct i2c_client *client)
if (data->reset_gpio) {
/* Wait a while and then de-assert the RESET GPIO line */
msleep(MXT_RESET_GPIO_TIME);
- gpiod_set_value(data->reset_gpio, 0);
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
msleep(MXT_RESET_INVALID_CHG);
}
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index b8ce6012364c..9e729910fbc8 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -14,6 +14,7 @@
*/
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/input.h>
#include <linux/input/mt.h>
#include <linux/input/touchscreen.h>
diff --git a/drivers/input/touchscreen/fsl-imx25-tcq.c b/drivers/input/touchscreen/fsl-imx25-tcq.c
index a32708652d10..ff270b3b8572 100644
--- a/drivers/input/touchscreen/fsl-imx25-tcq.c
+++ b/drivers/input/touchscreen/fsl-imx25-tcq.c
@@ -39,7 +39,6 @@ struct mx25_tcq_priv {
};
static const struct regmap_config mx25_tcq_regconfig = {
- .fast_io = true,
.max_register = 0x5c,
.reg_bits = 32,
.val_bits = 32,
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 252dcae039f8..f8798d11ec03 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -796,17 +796,6 @@ int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
usleep_range(6000, 10000); /* T4: > 5ms */
- /*
- * Put the reset pin back in to input / high-impedance mode to save
- * power. Only do this in the non ACPI case since some ACPI boards
- * don't have a pull-up, so there the reset pin must stay active-high.
- */
- if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) {
- error = gpiod_direction_input(ts->gpiod_rst);
- if (error)
- goto error;
- }
-
return 0;
error:
@@ -957,14 +946,6 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
return -EINVAL;
}
- /*
- * Normally we put the reset pin in input / high-impedance mode to save
- * power. But some x86/ACPI boards don't have a pull-up, so for the ACPI
- * case, leave the pin as is. This results in the pin not being touched
- * at all on x86/ACPI boards, except when needed for error-recover.
- */
- ts->gpiod_rst_flags = GPIOD_ASIS;
-
return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
}
#else
@@ -989,12 +970,6 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
return -EINVAL;
dev = &ts->client->dev;
- /*
- * By default we request the reset pin as input, leaving it in
- * high-impedance when not resetting the controller to save power.
- */
- ts->gpiod_rst_flags = GPIOD_IN;
-
ts->avdd28 = devm_regulator_get(dev, "AVDD28");
if (IS_ERR(ts->avdd28))
return dev_err_probe(dev, PTR_ERR(ts->avdd28), "Failed to get AVDD28 regulator\n");
@@ -1019,7 +994,7 @@ retry_get_irq_gpio:
ts->gpiod_int = gpiod;
/* Get the reset line GPIO pin number */
- gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
+ gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_ASIS);
if (IS_ERR(gpiod))
return dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get %s GPIO\n",
GOODIX_GPIO_RST_NAME);
@@ -1557,6 +1532,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
static const struct acpi_device_id goodix_acpi_match[] = {
{ "GDIX1001", 0 },
{ "GDIX1002", 0 },
+ { "GDIX1003", 0 },
{ "GDX9110", 0 },
{ }
};
diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h
index 87797cc88b32..0d1e8a8d2cba 100644
--- a/drivers/input/touchscreen/goodix.h
+++ b/drivers/input/touchscreen/goodix.h
@@ -88,7 +88,6 @@ struct goodix_ts_data {
struct gpio_desc *gpiod_rst;
int gpio_count;
int gpio_int_idx;
- enum gpiod_flags gpiod_rst_flags;
char id[GOODIX_ID_MAX_LEN + 1];
char cfg_name[64];
u16 version;
diff --git a/drivers/input/touchscreen/goodix_berlin_core.c b/drivers/input/touchscreen/goodix_berlin_core.c
index c78d512d97cd..83f28b870531 100644
--- a/drivers/input/touchscreen/goodix_berlin_core.c
+++ b/drivers/input/touchscreen/goodix_berlin_core.c
@@ -24,6 +24,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/input/mt.h>
diff --git a/drivers/input/touchscreen/himax_hx852x.c b/drivers/input/touchscreen/himax_hx852x.c
new file mode 100644
index 000000000000..83c60e137a55
--- /dev/null
+++ b/drivers/input/touchscreen/himax_hx852x.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Himax HX852x(ES) Touchscreen Driver
+ * Copyright (c) 2020-2024 Stephan Gerhold <stephan@gerhold.net>
+ * Copyright (c) 2020 Jonathan Albrieux <jonathan.albrieux@gmail.com>
+ *
+ * Based on the Himax Android Driver Sample Code Ver 0.3 for HMX852xES chipset:
+ * Copyright (c) 2014 Himax Corporation.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
+#define HX852X_COORD_SIZE(fingers) ((fingers) * sizeof(struct hx852x_coord))
+#define HX852X_WIDTH_SIZE(fingers) ALIGN(fingers, 4)
+#define HX852X_BUF_SIZE(fingers) (HX852X_COORD_SIZE(fingers) + \
+ HX852X_WIDTH_SIZE(fingers) + \
+ sizeof(struct hx852x_touch_info))
+
+#define HX852X_MAX_FINGERS 12
+#define HX852X_MAX_KEY_COUNT 4
+#define HX852X_MAX_BUF_SIZE HX852X_BUF_SIZE(HX852X_MAX_FINGERS)
+
+#define HX852X_TS_SLEEP_IN 0x80
+#define HX852X_TS_SLEEP_OUT 0x81
+#define HX852X_TS_SENSE_OFF 0x82
+#define HX852X_TS_SENSE_ON 0x83
+#define HX852X_READ_ONE_EVENT 0x85
+#define HX852X_READ_ALL_EVENTS 0x86
+#define HX852X_READ_LATEST_EVENT 0x87
+#define HX852X_CLEAR_EVENT_STACK 0x88
+
+#define HX852X_REG_SRAM_SWITCH 0x8c
+#define HX852X_REG_SRAM_ADDR 0x8b
+#define HX852X_REG_FLASH_RPLACE 0x5a
+
+#define HX852X_SRAM_SWITCH_TEST_MODE 0x14
+#define HX852X_SRAM_ADDR_CONFIG 0x7000
+
+struct hx852x {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ struct touchscreen_properties props;
+ struct gpio_desc *reset_gpiod;
+ struct regulator_bulk_data supplies[2];
+ unsigned int max_fingers;
+ unsigned int keycount;
+ unsigned int keycodes[HX852X_MAX_KEY_COUNT];
+};
+
+struct hx852x_config {
+ u8 rx_num;
+ u8 tx_num;
+ u8 max_pt;
+ u8 padding1[3];
+ __be16 x_res;
+ __be16 y_res;
+ u8 padding2[2];
+} __packed __aligned(4);
+
+struct hx852x_coord {
+ __be16 x;
+ __be16 y;
+} __packed __aligned(4);
+
+struct hx852x_touch_info {
+ u8 finger_num;
+ __le16 finger_pressed;
+ u8 padding;
+} __packed __aligned(4);
+
+static int hx852x_i2c_read(struct hx852x *hx, u8 cmd, void *data, u16 len)
+{
+ struct i2c_client *client = hx->client;
+ int error;
+ int ret;
+
+ struct i2c_msg msg[] = {
+ {
+ .addr = client->addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &cmd,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = len,
+ .buf = data,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&client->dev, "failed to read %#x: %d\n", cmd, error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int hx852x_power_on(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error;
+
+ error = regulator_bulk_enable(ARRAY_SIZE(hx->supplies), hx->supplies);
+ if (error) {
+ dev_err(dev, "failed to enable regulators: %d\n", error);
+ return error;
+ }
+
+ gpiod_set_value_cansleep(hx->reset_gpiod, 1);
+ msleep(20);
+ gpiod_set_value_cansleep(hx->reset_gpiod, 0);
+ msleep(50);
+
+ return 0;
+}
+
+static int hx852x_start(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error;
+
+ error = i2c_smbus_write_byte(hx->client, HX852X_TS_SLEEP_OUT);
+ if (error) {
+ dev_err(dev, "failed to send TS_SLEEP_OUT: %d\n", error);
+ return error;
+ }
+ msleep(30);
+
+ error = i2c_smbus_write_byte(hx->client, HX852X_TS_SENSE_ON);
+ if (error) {
+ dev_err(dev, "failed to send TS_SENSE_ON: %d\n", error);
+ return error;
+ }
+ msleep(20);
+
+ return 0;
+}
+
+static int hx852x_stop(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error;
+
+ error = i2c_smbus_write_byte(hx->client, HX852X_TS_SENSE_OFF);
+ if (error) {
+ dev_err(dev, "failed to send TS_SENSE_OFF: %d\n", error);
+ return error;
+ }
+ msleep(20);
+
+ error = i2c_smbus_write_byte(hx->client, HX852X_TS_SLEEP_IN);
+ if (error) {
+ dev_err(dev, "failed to send TS_SLEEP_IN: %d\n", error);
+ return error;
+ }
+ msleep(30);
+
+ return 0;
+}
+
+static int hx852x_power_off(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error;
+
+ error = regulator_bulk_disable(ARRAY_SIZE(hx->supplies), hx->supplies);
+ if (error) {
+ dev_err(dev, "failed to disable regulators: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int hx852x_read_config(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ struct hx852x_config conf;
+ int x_res, y_res;
+ int error, error2;
+
+ error = hx852x_power_on(hx);
+ if (error)
+ return error;
+
+ /* Sensing must be turned on briefly to load the config */
+ error = hx852x_start(hx);
+ if (error)
+ goto err_power_off;
+
+ error = hx852x_stop(hx);
+ if (error)
+ goto err_power_off;
+
+ error = i2c_smbus_write_byte_data(hx->client, HX852X_REG_SRAM_SWITCH,
+ HX852X_SRAM_SWITCH_TEST_MODE);
+ if (error)
+ goto err_power_off;
+
+ error = i2c_smbus_write_word_data(hx->client, HX852X_REG_SRAM_ADDR,
+ HX852X_SRAM_ADDR_CONFIG);
+ if (error)
+ goto err_test_mode;
+
+ error = hx852x_i2c_read(hx, HX852X_REG_FLASH_RPLACE, &conf, sizeof(conf));
+ if (error)
+ goto err_test_mode;
+
+ x_res = be16_to_cpu(conf.x_res);
+ y_res = be16_to_cpu(conf.y_res);
+ hx->max_fingers = (conf.max_pt & 0xf0) >> 4;
+ dev_dbg(dev, "x res: %u, y res: %u, max fingers: %u\n",
+ x_res, y_res, hx->max_fingers);
+
+ if (hx->max_fingers > HX852X_MAX_FINGERS) {
+ dev_err(dev, "max supported fingers: %u, found: %u\n",
+ HX852X_MAX_FINGERS, hx->max_fingers);
+ error = -EINVAL;
+ goto err_test_mode;
+ }
+
+ if (x_res && y_res) {
+ input_set_abs_params(hx->input_dev, ABS_MT_POSITION_X, 0, x_res - 1, 0, 0);
+ input_set_abs_params(hx->input_dev, ABS_MT_POSITION_Y, 0, y_res - 1, 0, 0);
+ }
+
+err_test_mode:
+ error2 = i2c_smbus_write_byte_data(hx->client, HX852X_REG_SRAM_SWITCH, 0);
+ error = error ?: error2;
+err_power_off:
+ error2 = hx852x_power_off(hx);
+ return error ?: error2;
+}
+
+static int hx852x_handle_events(struct hx852x *hx)
+{
+ /*
+ * The event packets have variable size, depending on the amount of
+ * supported fingers (hx->max_fingers). They are laid out as follows:
+ * - struct hx852x_coord[hx->max_fingers]: Coordinates for each finger
+ * - u8[ALIGN(hx->max_fingers, 4)]: Touch width for each finger
+ * with padding for 32-bit alignment
+ * - struct hx852x_touch_info
+ *
+ * Load everything into a 32-bit aligned buffer so the coordinates
+ * can be assigned directly, without using get_unaligned_*().
+ */
+ u8 buf[HX852X_MAX_BUF_SIZE] __aligned(4);
+ struct hx852x_coord *coord = (struct hx852x_coord *)buf;
+ u8 *width = &buf[HX852X_COORD_SIZE(hx->max_fingers)];
+ struct hx852x_touch_info *info = (struct hx852x_touch_info *)
+ &width[HX852X_WIDTH_SIZE(hx->max_fingers)];
+ unsigned long finger_pressed, key_pressed;
+ unsigned int i, x, y, w;
+ int error;
+
+ error = hx852x_i2c_read(hx, HX852X_READ_ALL_EVENTS, buf,
+ HX852X_BUF_SIZE(hx->max_fingers));
+ if (error)
+ return error;
+
+ finger_pressed = get_unaligned_le16(&info->finger_pressed);
+ key_pressed = finger_pressed >> HX852X_MAX_FINGERS;
+
+ /* All bits are set when no touch is detected */
+ if (info->finger_num == 0xff || !(info->finger_num & 0x0f))
+ finger_pressed = 0;
+ if (key_pressed == 0xf)
+ key_pressed = 0;
+
+ for_each_set_bit(i, &finger_pressed, hx->max_fingers) {
+ x = be16_to_cpu(coord[i].x);
+ y = be16_to_cpu(coord[i].y);
+ w = width[i];
+
+ input_mt_slot(hx->input_dev, i);
+ input_mt_report_slot_state(hx->input_dev, MT_TOOL_FINGER, 1);
+ touchscreen_report_pos(hx->input_dev, &hx->props, x, y, true);
+ input_report_abs(hx->input_dev, ABS_MT_TOUCH_MAJOR, w);
+ }
+ input_mt_sync_frame(hx->input_dev);
+
+ for (i = 0; i < hx->keycount; i++)
+ input_report_key(hx->input_dev, hx->keycodes[i], key_pressed & BIT(i));
+
+ input_sync(hx->input_dev);
+ return 0;
+}
+
+static irqreturn_t hx852x_interrupt(int irq, void *ptr)
+{
+ struct hx852x *hx = ptr;
+ int error;
+
+ error = hx852x_handle_events(hx);
+ if (error) {
+ dev_err_ratelimited(&hx->client->dev,
+ "failed to handle events: %d\n", error);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hx852x_input_open(struct input_dev *dev)
+{
+ struct hx852x *hx = input_get_drvdata(dev);
+ int error;
+
+ error = hx852x_power_on(hx);
+ if (error)
+ return error;
+
+ error = hx852x_start(hx);
+ if (error) {
+ hx852x_power_off(hx);
+ return error;
+ }
+
+ enable_irq(hx->client->irq);
+ return 0;
+}
+
+static void hx852x_input_close(struct input_dev *dev)
+{
+ struct hx852x *hx = input_get_drvdata(dev);
+
+ hx852x_stop(hx);
+ disable_irq(hx->client->irq);
+ hx852x_power_off(hx);
+}
+
+static int hx852x_parse_properties(struct hx852x *hx)
+{
+ struct device *dev = &hx->client->dev;
+ int error, count;
+
+ count = device_property_count_u32(dev, "linux,keycodes");
+ if (count == -EINVAL) {
+ /* Property does not exist, keycodes are optional */
+ return 0;
+ } else if (count < 0) {
+ dev_err(dev, "Failed to read linux,keycodes: %d\n", count);
+ return count;
+ } else if (count > HX852X_MAX_KEY_COUNT) {
+ dev_err(dev, "max supported keys: %u, found: %u\n",
+ HX852X_MAX_KEY_COUNT, hx->keycount);
+ return -EINVAL;
+ }
+ hx->keycount = count;
+
+ error = device_property_read_u32_array(dev, "linux,keycodes",
+ hx->keycodes, hx->keycount);
+ if (error) {
+ dev_err(dev, "failed to read linux,keycodes: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int hx852x_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct hx852x *hx;
+ int error, i;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_WRITE_BYTE |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
+ I2C_FUNC_SMBUS_WRITE_WORD_DATA)) {
+ dev_err(dev, "not all required i2c functionality supported\n");
+ return -ENXIO;
+ }
+
+ hx = devm_kzalloc(dev, sizeof(*hx), GFP_KERNEL);
+ if (!hx)
+ return -ENOMEM;
+
+ hx->client = client;
+ hx->input_dev = devm_input_allocate_device(dev);
+ if (!hx->input_dev)
+ return -ENOMEM;
+
+ hx->input_dev->name = "Himax HX852x";
+ hx->input_dev->id.bustype = BUS_I2C;
+ hx->input_dev->open = hx852x_input_open;
+ hx->input_dev->close = hx852x_input_close;
+
+ i2c_set_clientdata(client, hx);
+ input_set_drvdata(hx->input_dev, hx);
+
+ hx->supplies[0].supply = "vcca";
+ hx->supplies[1].supply = "vccd";
+ error = devm_regulator_bulk_get(dev, ARRAY_SIZE(hx->supplies), hx->supplies);
+ if (error)
+ return dev_err_probe(dev, error, "failed to get regulators\n");
+
+ hx->reset_gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(hx->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(hx->reset_gpiod),
+ "failed to get reset gpio\n");
+
+ error = devm_request_threaded_irq(dev, client->irq, NULL, hx852x_interrupt,
+ IRQF_ONESHOT | IRQF_NO_AUTOEN, NULL, hx);
+ if (error)
+ return dev_err_probe(dev, error, "failed to request irq %d", client->irq);
+
+ error = hx852x_read_config(hx);
+ if (error)
+ return error;
+
+ input_set_capability(hx->input_dev, EV_ABS, ABS_MT_POSITION_X);
+ input_set_capability(hx->input_dev, EV_ABS, ABS_MT_POSITION_Y);
+ input_set_abs_params(hx->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0);
+
+ touchscreen_parse_properties(hx->input_dev, true, &hx->props);
+ error = hx852x_parse_properties(hx);
+ if (error)
+ return error;
+
+ hx->input_dev->keycode = hx->keycodes;
+ hx->input_dev->keycodemax = hx->keycount;
+ hx->input_dev->keycodesize = sizeof(hx->keycodes[0]);
+ for (i = 0; i < hx->keycount; i++)
+ input_set_capability(hx->input_dev, EV_KEY, hx->keycodes[i]);
+
+ error = input_mt_init_slots(hx->input_dev, hx->max_fingers,
+ INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
+ if (error)
+ return dev_err_probe(dev, error, "failed to init MT slots\n");
+
+ error = input_register_device(hx->input_dev);
+ if (error)
+ return dev_err_probe(dev, error, "failed to register input device\n");
+
+ return 0;
+}
+
+static int hx852x_suspend(struct device *dev)
+{
+ struct hx852x *hx = dev_get_drvdata(dev);
+
+ guard(mutex)(&hx->input_dev->mutex);
+
+ if (input_device_enabled(hx->input_dev))
+ return hx852x_stop(hx);
+
+ return 0;
+}
+
+static int hx852x_resume(struct device *dev)
+{
+ struct hx852x *hx = dev_get_drvdata(dev);
+
+ guard(mutex)(&hx->input_dev->mutex);
+
+ if (input_device_enabled(hx->input_dev))
+ return hx852x_start(hx);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(hx852x_pm_ops, hx852x_suspend, hx852x_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id hx852x_of_match[] = {
+ { .compatible = "himax,hx852es" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, hx852x_of_match);
+#endif
+
+static struct i2c_driver hx852x_driver = {
+ .probe = hx852x_probe,
+ .driver = {
+ .name = "himax_hx852x",
+ .pm = pm_sleep_ptr(&hx852x_pm_ops),
+ .of_match_table = of_match_ptr(hx852x_of_match),
+ },
+};
+module_i2c_driver(hx852x_driver);
+
+MODULE_DESCRIPTION("Himax HX852x(ES) Touchscreen Driver");
+MODULE_AUTHOR("Jonathan Albrieux <jonathan.albrieux@gmail.com>");
+MODULE_AUTHOR("Stephan Gerhold <stephan@gerhold.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/hynitron-cst816x.c b/drivers/input/touchscreen/hynitron-cst816x.c
new file mode 100644
index 000000000000..b64d7928e18f
--- /dev/null
+++ b/drivers/input/touchscreen/hynitron-cst816x.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Driver for I2C connected Hynitron CST816x Series Touchscreen
+ *
+ * Copyright (C) 2025 Oleh Kuzhylnyi <kuzhylol@gmail.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/unaligned.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#define CST816X_RD_REG 0x01
+#define CST816X_NUM_KEYS 5
+
+struct cst816x_touch {
+ u8 gest;
+ u8 active;
+ u16 abs_x;
+ u16 abs_y;
+} __packed;
+
+struct cst816x_priv {
+ struct i2c_client *client;
+ struct gpio_desc *reset;
+ struct input_dev *input;
+ unsigned int keycode[CST816X_NUM_KEYS];
+ unsigned int keycodemax;
+};
+
+static int cst816x_parse_keycodes(struct device *dev, struct cst816x_priv *priv)
+{
+ int count;
+ int error;
+
+ if (device_property_present(dev, "linux,keycodes")) {
+ count = device_property_count_u32(dev, "linux,keycodes");
+ if (count < 0) {
+ error = count;
+ dev_err(dev, "failed to count keys: %d\n", error);
+ return error;
+ } else if (count > ARRAY_SIZE(priv->keycode)) {
+ dev_err(dev, "too many keys defined: %d\n", count);
+ return -EINVAL;
+ }
+ priv->keycodemax = count;
+
+ error = device_property_read_u32_array(dev, "linux,keycodes",
+ priv->keycode,
+ priv->keycodemax);
+ if (error) {
+ dev_err(dev, "failed to read keycodes: %d\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int cst816x_i2c_read_register(struct cst816x_priv *priv, u8 reg,
+ void *buf, size_t len)
+{
+ struct i2c_msg xfer[] = {
+ {
+ .addr = priv->client->addr,
+ .flags = 0,
+ .buf = &reg,
+ .len = sizeof(reg),
+ },
+ {
+ .addr = priv->client->addr,
+ .flags = I2C_M_RD,
+ .buf = buf,
+ .len = len,
+ },
+ };
+ int error;
+ int ret;
+
+ ret = i2c_transfer(priv->client->adapter, xfer, ARRAY_SIZE(xfer));
+ if (ret != ARRAY_SIZE(xfer)) {
+ error = ret < 0 ? ret : -EIO;
+ dev_err(&priv->client->dev, "i2c rx err: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static u8 cst816x_gest_idx(u8 gest)
+{
+ u8 index;
+
+ switch (gest) {
+ case 0x01: /* Slide up gesture */
+ case 0x02: /* Slide down gesture */
+ case 0x03: /* Slide left gesture */
+ case 0x04: /* Slide right gesture */
+ index = gest;
+ break;
+ case 0x0c: /* Long press gesture */
+ default:
+ index = CST816X_NUM_KEYS;
+ break;
+ }
+
+ return index - 1;
+}
+
+static bool cst816x_process_touch(struct cst816x_priv *priv,
+ struct cst816x_touch *tch)
+{
+ if (cst816x_i2c_read_register(priv, CST816X_RD_REG, tch, sizeof(*tch)))
+ return false;
+
+ tch->abs_x = get_unaligned_be16(&tch->abs_x) & GENMASK(11, 0);
+ tch->abs_y = get_unaligned_be16(&tch->abs_y) & GENMASK(11, 0);
+
+ dev_dbg(&priv->client->dev, "x: %u, y: %u, t: %u, g: 0x%x\n",
+ tch->abs_x, tch->abs_y, tch->active, tch->gest);
+
+ return true;
+}
+
+static int cst816x_register_input(struct cst816x_priv *priv)
+{
+ priv->input = devm_input_allocate_device(&priv->client->dev);
+ if (!priv->input)
+ return -ENOMEM;
+
+ priv->input->name = "Hynitron CST816x Series Touchscreen";
+ priv->input->phys = "input/ts";
+ priv->input->id.bustype = BUS_I2C;
+
+ input_set_drvdata(priv->input, priv);
+
+ input_set_abs_params(priv->input, ABS_X, 0, 240, 0, 0);
+ input_set_abs_params(priv->input, ABS_Y, 0, 240, 0, 0);
+ input_set_capability(priv->input, EV_KEY, BTN_TOUCH);
+
+ priv->input->keycode = priv->keycode;
+ priv->input->keycodesize = sizeof(priv->keycode[0]);
+ priv->input->keycodemax = priv->keycodemax;
+
+ for (int i = 0; i < priv->keycodemax; i++) {
+ if (priv->keycode[i] == KEY_RESERVED)
+ continue;
+
+ input_set_capability(priv->input, EV_KEY, priv->keycode[i]);
+ }
+
+ return input_register_device(priv->input);
+}
+
+static void cst816x_reset(struct cst816x_priv *priv)
+{
+ gpiod_set_value_cansleep(priv->reset, 1);
+ msleep(50);
+ gpiod_set_value_cansleep(priv->reset, 0);
+ msleep(100);
+}
+
+static irqreturn_t cst816x_irq_cb(int irq, void *cookie)
+{
+ struct cst816x_priv *priv = cookie;
+ struct cst816x_touch tch;
+
+ if (!cst816x_process_touch(priv, &tch))
+ return IRQ_HANDLED;
+
+ input_report_abs(priv->input, ABS_X, tch.abs_x);
+ input_report_abs(priv->input, ABS_Y, tch.abs_y);
+
+ if (tch.gest)
+ input_report_key(priv->input,
+ priv->keycode[cst816x_gest_idx(tch.gest)],
+ tch.active);
+
+ input_report_key(priv->input, BTN_TOUCH, tch.active);
+
+ input_sync(priv->input);
+
+ return IRQ_HANDLED;
+}
+
+static int cst816x_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct cst816x_priv *priv;
+ int error;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+
+ priv->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(dev, PTR_ERR(priv->reset),
+ "gpio reset request failed\n");
+
+ if (priv->reset)
+ cst816x_reset(priv);
+
+ error = cst816x_parse_keycodes(dev, priv);
+ if (error)
+ dev_warn(dev, "no gestures found in dt\n");
+
+ error = cst816x_register_input(priv);
+ if (error)
+ return dev_err_probe(dev, error, "input register failed\n");
+
+ error = devm_request_threaded_irq(dev, client->irq,
+ NULL, cst816x_irq_cb, IRQF_ONESHOT,
+ dev_driver_string(dev), priv);
+ if (error)
+ return dev_err_probe(dev, error, "irq request failed\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id cst816x_id[] = {
+ { .name = "cst816s", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, cst816x_id);
+
+static const struct of_device_id cst816x_of_match[] = {
+ { .compatible = "hynitron,cst816s", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cst816x_of_match);
+
+static struct i2c_driver cst816x_driver = {
+ .driver = {
+ .name = "cst816x",
+ .of_match_table = cst816x_of_match,
+ },
+ .id_table = cst816x_id,
+ .probe = cst816x_probe,
+};
+
+module_i2c_driver(cst816x_driver);
+
+MODULE_AUTHOR("Oleh Kuzhylnyi <kuzhylol@gmail.com>");
+MODULE_DESCRIPTION("Hynitron CST816x Series Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index 6ac8fa84ed9f..85f697de2b7e 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -7,6 +7,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/bitfield.h>
#include <linux/gpio/consumer.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -20,25 +21,23 @@
#include <linux/log2.h>
/* ADC configuration registers field define */
-#define ADC_AIEN (0x1 << 7)
+#define ADC_AIEN BIT(7)
+#define ADC_ADCH_MASK GENMASK(4, 0)
#define ADC_CONV_DISABLE 0x1F
-#define ADC_AVGE (0x1 << 5)
-#define ADC_CAL (0x1 << 7)
-#define ADC_CALF 0x2
-#define ADC_12BIT_MODE (0x2 << 2)
-#define ADC_CONV_MODE_MASK (0x3 << 2)
+#define ADC_AVGE BIT(5)
+#define ADC_CAL BIT(7)
+#define ADC_CALF BIT(1)
+#define ADC_CONV_MODE_MASK GENMASK(3, 2)
+#define ADC_12BIT_MODE 0x2
#define ADC_IPG_CLK 0x00
-#define ADC_INPUT_CLK_MASK 0x3
-#define ADC_CLK_DIV_8 (0x03 << 5)
-#define ADC_CLK_DIV_MASK (0x3 << 5)
-#define ADC_SHORT_SAMPLE_MODE (0x0 << 4)
-#define ADC_SAMPLE_MODE_MASK (0x1 << 4)
-#define ADC_HARDWARE_TRIGGER (0x1 << 13)
-#define ADC_AVGS_SHIFT 14
-#define ADC_AVGS_MASK (0x3 << 14)
+#define ADC_INPUT_CLK_MASK GENMASK(1, 0)
+#define ADC_CLK_DIV_8 0x03
+#define ADC_CLK_DIV_MASK GENMASK(6, 5)
+#define ADC_SAMPLE_MODE BIT(4)
+#define ADC_HARDWARE_TRIGGER BIT(13)
+#define ADC_AVGS_MASK GENMASK(15, 14)
#define SELECT_CHANNEL_4 0x04
#define SELECT_CHANNEL_1 0x01
-#define DISABLE_CONVERSION_INT (0x0 << 7)
/* ADC registers */
#define REG_ADC_HC0 0x00
@@ -55,7 +54,7 @@
#define ADC_TIMEOUT msecs_to_jiffies(100)
/* TSC registers */
-#define REG_TSC_BASIC_SETING 0x00
+#define REG_TSC_BASIC_SETTING 0x00
#define REG_TSC_PRE_CHARGE_TIME 0x10
#define REG_TSC_FLOW_CONTROL 0x20
#define REG_TSC_MEASURE_VALUE 0x30
@@ -65,19 +64,26 @@
#define REG_TSC_DEBUG_MODE 0x70
#define REG_TSC_DEBUG_MODE2 0x80
+/* TSC_MEASURE_VALUE register field define */
+#define X_VALUE_MASK GENMASK(27, 16)
+#define Y_VALUE_MASK GENMASK(11, 0)
+
/* TSC configuration registers field define */
-#define DETECT_4_WIRE_MODE (0x0 << 4)
-#define AUTO_MEASURE 0x1
-#define MEASURE_SIGNAL 0x1
-#define DETECT_SIGNAL (0x1 << 4)
-#define VALID_SIGNAL (0x1 << 8)
-#define MEASURE_INT_EN 0x1
-#define MEASURE_SIG_EN 0x1
-#define VALID_SIG_EN (0x1 << 8)
-#define DE_GLITCH_2 (0x2 << 29)
-#define START_SENSE (0x1 << 12)
-#define TSC_DISABLE (0x1 << 16)
+#define MEASURE_DELAY_TIME_MASK GENMASK(31, 8)
+#define DETECT_5_WIRE_MODE BIT(4)
+#define AUTO_MEASURE BIT(0)
+#define MEASURE_SIGNAL BIT(0)
+#define DETECT_SIGNAL BIT(4)
+#define VALID_SIGNAL BIT(8)
+#define MEASURE_INT_EN BIT(0)
+#define MEASURE_SIG_EN BIT(0)
+#define VALID_SIG_EN BIT(8)
+#define DE_GLITCH_MASK GENMASK(30, 29)
+#define DE_GLITCH_DEF 0x02
+#define START_SENSE BIT(12)
+#define TSC_DISABLE BIT(16)
#define DETECT_MODE 0x2
+#define STATE_MACHINE_MASK GENMASK(22, 20)
struct imx6ul_tsc {
struct device *dev;
@@ -92,6 +98,7 @@ struct imx6ul_tsc {
u32 pre_charge_time;
bool average_enable;
u32 average_select;
+ u32 de_glitch;
struct completion completion;
};
@@ -112,19 +119,20 @@ static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
adc_cfg &= ~(ADC_CONV_MODE_MASK | ADC_INPUT_CLK_MASK);
- adc_cfg |= ADC_12BIT_MODE | ADC_IPG_CLK;
- adc_cfg &= ~(ADC_CLK_DIV_MASK | ADC_SAMPLE_MODE_MASK);
- adc_cfg |= ADC_CLK_DIV_8 | ADC_SHORT_SAMPLE_MODE;
+ adc_cfg |= FIELD_PREP(ADC_CONV_MODE_MASK, ADC_12BIT_MODE) |
+ FIELD_PREP(ADC_INPUT_CLK_MASK, ADC_IPG_CLK);
+ adc_cfg &= ~(ADC_CLK_DIV_MASK | ADC_SAMPLE_MODE);
+ adc_cfg |= FIELD_PREP(ADC_CLK_DIV_MASK, ADC_CLK_DIV_8);
if (tsc->average_enable) {
adc_cfg &= ~ADC_AVGS_MASK;
- adc_cfg |= (tsc->average_select) << ADC_AVGS_SHIFT;
+ adc_cfg |= FIELD_PREP(ADC_AVGS_MASK, tsc->average_select);
}
adc_cfg &= ~ADC_HARDWARE_TRIGGER;
writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
/* enable calibration interrupt */
adc_hc |= ADC_AIEN;
- adc_hc |= ADC_CONV_DISABLE;
+ adc_hc |= FIELD_PREP(ADC_ADCH_MASK, ADC_CONV_DISABLE);
writel(adc_hc, tsc->adc_regs + REG_ADC_HC0);
/* start ADC calibration */
@@ -164,19 +172,21 @@ static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
{
u32 adc_hc0, adc_hc1, adc_hc2, adc_hc3, adc_hc4;
- adc_hc0 = DISABLE_CONVERSION_INT;
+ adc_hc0 = FIELD_PREP(ADC_AIEN, 0);
writel(adc_hc0, tsc->adc_regs + REG_ADC_HC0);
- adc_hc1 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_4;
+ adc_hc1 = FIELD_PREP(ADC_AIEN, 0) |
+ FIELD_PREP(ADC_ADCH_MASK, SELECT_CHANNEL_4);
writel(adc_hc1, tsc->adc_regs + REG_ADC_HC1);
- adc_hc2 = DISABLE_CONVERSION_INT;
+ adc_hc2 = FIELD_PREP(ADC_AIEN, 0);
writel(adc_hc2, tsc->adc_regs + REG_ADC_HC2);
- adc_hc3 = DISABLE_CONVERSION_INT | SELECT_CHANNEL_1;
+ adc_hc3 = FIELD_PREP(ADC_AIEN, 0) |
+ FIELD_PREP(ADC_ADCH_MASK, SELECT_CHANNEL_1);
writel(adc_hc3, tsc->adc_regs + REG_ADC_HC3);
- adc_hc4 = DISABLE_CONVERSION_INT;
+ adc_hc4 = FIELD_PREP(ADC_AIEN, 0);
writel(adc_hc4, tsc->adc_regs + REG_ADC_HC4);
}
@@ -188,13 +198,16 @@ static void imx6ul_tsc_channel_config(struct imx6ul_tsc *tsc)
static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
{
u32 basic_setting = 0;
+ u32 debug_mode2;
u32 start;
- basic_setting |= tsc->measure_delay_time << 8;
- basic_setting |= DETECT_4_WIRE_MODE | AUTO_MEASURE;
- writel(basic_setting, tsc->tsc_regs + REG_TSC_BASIC_SETING);
+ basic_setting |= FIELD_PREP(MEASURE_DELAY_TIME_MASK,
+ tsc->measure_delay_time);
+ basic_setting |= AUTO_MEASURE;
+ writel(basic_setting, tsc->tsc_regs + REG_TSC_BASIC_SETTING);
- writel(DE_GLITCH_2, tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
+ debug_mode2 = FIELD_PREP(DE_GLITCH_MASK, tsc->de_glitch);
+ writel(debug_mode2, tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
writel(tsc->pre_charge_time, tsc->tsc_regs + REG_TSC_PRE_CHARGE_TIME);
writel(MEASURE_INT_EN, tsc->tsc_regs + REG_TSC_INT_EN);
@@ -250,7 +263,7 @@ static bool tsc_wait_detect_mode(struct imx6ul_tsc *tsc)
usleep_range(200, 400);
debug_mode2 = readl(tsc->tsc_regs + REG_TSC_DEBUG_MODE2);
- state_machine = (debug_mode2 >> 20) & 0x7;
+ state_machine = FIELD_GET(STATE_MACHINE_MASK, debug_mode2);
} while (state_machine != DETECT_MODE);
usleep_range(200, 400);
@@ -278,8 +291,8 @@ static irqreturn_t tsc_irq_fn(int irq, void *dev_id)
if (status & MEASURE_SIGNAL) {
value = readl(tsc->tsc_regs + REG_TSC_MEASURE_VALUE);
- x = (value >> 16) & 0x0fff;
- y = value & 0x0fff;
+ x = FIELD_GET(X_VALUE_MASK, value);
+ y = FIELD_GET(Y_VALUE_MASK, value);
/*
* In detect mode, we can get the xnur gpio value,
@@ -379,6 +392,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
int tsc_irq;
int adc_irq;
u32 average_samples;
+ u32 de_glitch;
tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
if (!tsc)
@@ -501,6 +515,25 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
return -EINVAL;
}
+ err = of_property_read_u32(np, "debounce-delay-us", &de_glitch);
+ if (err) {
+ tsc->de_glitch = DE_GLITCH_DEF;
+ } else {
+ u64 cycles;
+ unsigned long rate = clk_get_rate(tsc->tsc_clk);
+
+ cycles = DIV64_U64_ROUND_UP((u64)de_glitch * rate, USEC_PER_SEC);
+
+ if (cycles <= 0x3ff)
+ tsc->de_glitch = 3;
+ else if (cycles <= 0x7ff)
+ tsc->de_glitch = 2;
+ else if (cycles <= 0xfff)
+ tsc->de_glitch = 1;
+ else
+ tsc->de_glitch = 0;
+ }
+
err = input_register_device(tsc->input);
if (err) {
dev_err(&pdev->dev,
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index 33635da85079..47b8da00027f 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -42,8 +42,6 @@ static irqreturn_t mc13783_ts_handler(int irq, void *data)
{
struct mc13783_ts_priv *priv = data;
- mc13xxx_irq_ack(priv->mc13xxx, irq);
-
/*
* Kick off reading coordinates. Note that if work happens already
* be queued for future execution (it rearms itself) it will not
@@ -137,8 +135,6 @@ static int mc13783_ts_open(struct input_dev *dev)
mc13xxx_lock(priv->mc13xxx);
- mc13xxx_irq_ack(priv->mc13xxx, MC13XXX_IRQ_TS);
-
ret = mc13xxx_irq_request(priv->mc13xxx, MC13XXX_IRQ_TS,
mc13783_ts_handler, MC13783_TS_NAME, priv);
if (ret)
diff --git a/drivers/input/touchscreen/tsc2007_core.c b/drivers/input/touchscreen/tsc2007_core.c
index 5252301686ec..948935de894b 100644
--- a/drivers/input/touchscreen/tsc2007_core.c
+++ b/drivers/input/touchscreen/tsc2007_core.c
@@ -23,6 +23,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
+#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/platform_data/tsc2007.h>
@@ -68,7 +69,7 @@ static void tsc2007_read_values(struct tsc2007 *tsc, struct ts_event *tc)
u32 tsc2007_calculate_resistance(struct tsc2007 *tsc, struct ts_event *tc)
{
- u32 rt = 0;
+ u64 rt = 0;
/* range filtering */
if (tc->x == MAX_12BIT)
@@ -79,11 +80,13 @@ u32 tsc2007_calculate_resistance(struct tsc2007 *tsc, struct ts_event *tc)
rt = tc->z2 - tc->z1;
rt *= tc->x;
rt *= tsc->x_plate_ohms;
- rt /= tc->z1;
+ rt = div_u64(rt, tc->z1);
rt = (rt + 2047) >> 12;
}
- return rt;
+ if (rt > U32_MAX)
+ return U32_MAX;
+ return (u32) rt;
}
bool tsc2007_is_pen_down(struct tsc2007 *ts)
@@ -177,7 +180,8 @@ static void tsc2007_stop(struct tsc2007 *ts)
mb();
wake_up(&ts->wait);
- disable_irq(ts->irq);
+ if (ts->irq)
+ disable_irq(ts->irq);
}
static int tsc2007_open(struct input_dev *input_dev)
@@ -188,7 +192,8 @@ static int tsc2007_open(struct input_dev *input_dev)
ts->stopped = false;
mb();
- enable_irq(ts->irq);
+ if (ts->irq)
+ enable_irq(ts->irq);
/* Prepare for touch readings - power down ADC and enable PENIRQ */
err = tsc2007_xfer(ts, PWRDOWN);
@@ -253,7 +258,7 @@ static int tsc2007_probe_properties(struct device *dev, struct tsc2007 *ts)
if (ts->gpiod)
ts->get_pendown_state = tsc2007_get_pendown_state_gpio;
else
- dev_warn(dev, "Pen down GPIO is not specified in properties\n");
+ dev_dbg(dev, "Pen down GPIO is not specified in properties\n");
return 0;
}
@@ -361,17 +366,19 @@ static int tsc2007_probe(struct i2c_client *client)
pdata->init_platform_hw();
}
- err = devm_request_threaded_irq(&client->dev, ts->irq,
- NULL, tsc2007_soft_irq,
- IRQF_ONESHOT,
- client->dev.driver->name, ts);
- if (err) {
- dev_err(&client->dev, "Failed to request irq %d: %d\n",
- ts->irq, err);
- return err;
- }
+ if (ts->irq) {
+ err = devm_request_threaded_irq(&client->dev, ts->irq,
+ NULL, tsc2007_soft_irq,
+ IRQF_ONESHOT,
+ client->dev.driver->name, ts);
+ if (err) {
+ dev_err(&client->dev, "Failed to request irq %d: %d\n",
+ ts->irq, err);
+ return err;
+ }
- tsc2007_stop(ts);
+ tsc2007_stop(ts);
+ }
/* power down the chip (TSC2007_SETUP does not ACK on I2C) */
err = tsc2007_xfer(ts, PWRDOWN);
diff --git a/drivers/input/touchscreen/tsc200x-core.c b/drivers/input/touchscreen/tsc200x-core.c
index 82d7d1cf5010..eba53613b005 100644
--- a/drivers/input/touchscreen/tsc200x-core.c
+++ b/drivers/input/touchscreen/tsc200x-core.c
@@ -10,6 +10,7 @@
* based on TSC2301 driver by Klaus K. Pedersen <klaus.k.pedersen@nokia.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/input.h>
diff --git a/drivers/input/touchscreen/wm9705.c b/drivers/input/touchscreen/wm9705.c
index 4b55d5e1ea0f..96484aae030c 100644
--- a/drivers/input/touchscreen/wm9705.c
+++ b/drivers/input/touchscreen/wm9705.c
@@ -9,6 +9,7 @@
* Russell King <rmk@arm.linux.org.uk>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c
index 6947714dfefa..087ece57741a 100644
--- a/drivers/input/touchscreen/wm9712.c
+++ b/drivers/input/touchscreen/wm9712.c
@@ -9,6 +9,7 @@
* Russell King <rmk@arm.linux.org.uk>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c
index a67fbe304f92..6f13f46ce6e6 100644
--- a/drivers/input/touchscreen/wm9713.c
+++ b/drivers/input/touchscreen/wm9713.c
@@ -9,6 +9,7 @@
* Russell King <rmk@arm.linux.org.uk>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index b25771a8df2b..96354c44af87 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -29,6 +29,7 @@
* - Support for async sampling control for noisy LCDs.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 3ebf37ddfc18..6cc979b26151 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -385,7 +385,7 @@ struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spe
mutex_lock(&icc_lock);
list_for_each_entry(provider, &icc_providers, provider_list) {
- if (provider->dev->of_node == spec->np) {
+ if (device_match_of_node(provider->dev, spec->np)) {
if (provider->xlate_extended) {
data = provider->xlate_extended(spec, provider->data);
if (!IS_ERR(data)) {
diff --git a/drivers/interconnect/debugfs-client.c b/drivers/interconnect/debugfs-client.c
index bc3fd8a7b9eb..778deeb4a7e8 100644
--- a/drivers/interconnect/debugfs-client.c
+++ b/drivers/interconnect/debugfs-client.c
@@ -117,7 +117,12 @@ static int icc_commit_set(void *data, u64 val)
mutex_lock(&debugfs_lock);
- if (IS_ERR_OR_NULL(cur_path)) {
+ if (!cur_path) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (IS_ERR(cur_path)) {
ret = PTR_ERR(cur_path);
goto out;
}
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 31dc4781abef..bb1cb8a640c1 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -8,6 +8,24 @@ config INTERCONNECT_QCOM
config INTERCONNECT_QCOM_BCM_VOTER
tristate
+config INTERCONNECT_QCOM_GLYMUR
+ tristate "Qualcomm GLYMUR interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on glymur-based
+ platforms.
+
+config INTERCONNECT_QCOM_KAANAPALI
+ tristate "Qualcomm KAANAPALI interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on kaanapali-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8909
tristate "Qualcomm MSM8909 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index f16ac242eba5..6eedff043b41 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -4,6 +4,8 @@ obj-$(CONFIG_INTERCONNECT_QCOM) += interconnect_qcom.o
interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
+qnoc-glymur-objs := glymur.o
+qnoc-kaanapali-objs := kaanapali.o
qnoc-milos-objs := milos.o
qnoc-msm8909-objs := msm8909.o
qnoc-msm8916-objs := msm8916.o
@@ -46,6 +48,8 @@ qnoc-x1e80100-objs := x1e80100.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
+obj-$(CONFIG_INTERCONNECT_QCOM_GLYMUR) += qnoc-glymur.o
+obj-$(CONFIG_INTERCONNECT_QCOM_KAANAPALI) += qnoc-kaanapali.o
obj-$(CONFIG_INTERCONNECT_QCOM_MILOS) += qnoc-milos.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8909) += qnoc-msm8909.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
diff --git a/drivers/interconnect/qcom/glymur.c b/drivers/interconnect/qcom/glymur.c
new file mode 100644
index 000000000000..e5c07795a6c6
--- /dev/null
+++ b/drivers/interconnect/qcom/glymur.c
@@ -0,0 +1,2522 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,glymur-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy3 = {
+ .name = "qhs_ahb2phy3",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_av1_enc_cfg = {
+ .name = "qhs_av1_enc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2_cfg = {
+ .name = "qhs_pcie2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3a_cfg = {
+ .name = "qhs_pcie3a_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3b_cfg = {
+ .name = "qhs_pcie3b_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie4_cfg = {
+ .name = "qhs_pcie4_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie5_cfg = {
+ .name = "qhs_pcie5_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie6_cfg = {
+ .name = "qhs_pcie6_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_rscc = {
+ .name = "qhs_pcie_rscc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_smmuv3_cfg = {
+ .name = "qhs_smmuv3_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb2_0_cfg = {
+ .name = "qhs_usb2_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0_cfg = {
+ .name = "qhs_usb3_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1_cfg = {
+ .name = "qhs_usb3_1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_2_cfg = {
+ .name = "qhs_usb3_2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_mp_cfg = {
+ .name = "qhs_usb3_mp_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_0_cfg = {
+ .name = "qhs_usb4_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_1_cfg = {
+ .name = "qhs_usb4_1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_2_cfg = {
+ .name = "qhs_usb4_2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qss_lpass_qtb_cfg = {
+ .name = "qss_lpass_qtb_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qss_nsp_qtb_cfg = {
+ .name = "qss_nsp_qtb_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_soccp = {
+ .name = "qhs_soccp",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_apss = {
+ .name = "qns_apss",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .channels = 12,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_nsinoc = {
+ .name = "srvc_nsinoc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_east_aggre_noc = {
+ .name = "srvc_pcie_east_aggre_noc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_hscnoc_pcie_east_ms_mpu_cfg = {
+ .name = "qhs_hscnoc_pcie_east_ms_mpu_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_east = {
+ .name = "srvc_pcie_east",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node xs_pcie_5 = {
+ .name = "xs_pcie_5",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node srvc_pcie_west_aggre_noc = {
+ .name = "srvc_pcie_west_aggre_noc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_hscnoc_pcie_west_ms_mpu_cfg = {
+ .name = "qhs_hscnoc_pcie_west_ms_mpu_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_west = {
+ .name = "srvc_pcie_west",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_2 = {
+ .name = "xs_pcie_2",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_3a = {
+ .name = "xs_pcie_3a",
+ .channels = 1,
+ .buswidth = 64,
+};
+
+static struct qcom_icc_node xs_pcie_3b = {
+ .name = "xs_pcie_3b",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node xs_pcie_4 = {
+ .name = "xs_pcie_4",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_6 = {
+ .name = "xs_pcie_6",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .channels = 12,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &ebi },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &srvc_mnoc },
+};
+
+static struct qcom_icc_node qsm_pcie_east_anoc_cfg = {
+ .name = "qsm_pcie_east_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &srvc_pcie_east_aggre_noc },
+};
+
+static struct qcom_icc_node qnm_hscnoc_pcie_east = {
+ .name = "qnm_hscnoc_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 3,
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1,
+ &xs_pcie_5 },
+};
+
+static struct qcom_icc_node qsm_cnoc_pcie_east_slave_cfg = {
+ .name = "qsm_cnoc_pcie_east_slave_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .link_nodes = { &qhs_hscnoc_pcie_east_ms_mpu_cfg,
+ &srvc_pcie_east },
+};
+
+static struct qcom_icc_node qsm_pcie_west_anoc_cfg = {
+ .name = "qsm_pcie_west_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &srvc_pcie_west_aggre_noc },
+};
+
+static struct qcom_icc_node qnm_hscnoc_pcie_west = {
+ .name = "qnm_hscnoc_pcie_west",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 5,
+ .link_nodes = { &xs_pcie_2, &xs_pcie_3a,
+ &xs_pcie_3b, &xs_pcie_4,
+ &xs_pcie_6 },
+};
+
+static struct qcom_icc_node qsm_cnoc_pcie_west_slave_cfg = {
+ .name = "qsm_cnoc_pcie_west_slave_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .link_nodes = { &qhs_hscnoc_pcie_west_ms_mpu_cfg,
+ &srvc_pcie_west },
+};
+
+static struct qcom_icc_node qss_cnoc_pcie_slave_east_cfg = {
+ .name = "qss_cnoc_pcie_slave_east_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_cnoc_pcie_east_slave_cfg },
+};
+
+static struct qcom_icc_node qss_cnoc_pcie_slave_west_cfg = {
+ .name = "qss_cnoc_pcie_slave_west_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_cnoc_pcie_west_slave_cfg },
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_mnoc_cfg },
+};
+
+static struct qcom_icc_node qss_pcie_east_anoc_cfg = {
+ .name = "qss_pcie_east_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_pcie_east_anoc_cfg },
+};
+
+static struct qcom_icc_node qss_pcie_west_anoc_cfg = {
+ .name = "qss_pcie_west_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_pcie_west_anoc_cfg },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .channels = 12,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &llcc_mc },
+};
+
+static struct qcom_icc_node qns_pcie_east = {
+ .name = "qns_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_hscnoc_pcie_east },
+};
+
+static struct qcom_icc_node qns_pcie_west = {
+ .name = "qns_pcie_west",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_hscnoc_pcie_west },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 51,
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_ahb2phy2, &qhs_ahb2phy3,
+ &qhs_av1_enc_cfg, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_crypto0_cfg,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+ &qhs_pcie3a_cfg, &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg, &qhs_pcie5_cfg,
+ &qhs_pcie6_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_smmuv3_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+ &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+ &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+ &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+ &qhs_usb4_2_cfg, &qhs_venus_cfg,
+ &qss_cnoc_pcie_slave_east_cfg, &qss_cnoc_pcie_slave_west_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_east_anoc_cfg,
+ &qss_pcie_west_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x33000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_cfg },
+};
+
+static struct qcom_icc_node qnm_hscnoc_cnoc = {
+ .name = "qnm_hscnoc_cnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 8,
+ .link_nodes = { &qhs_aoss, &qhs_ipc_router,
+ &qhs_soccp, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qxs_boot_imem, &qxs_imem },
+};
+
+static struct qcom_icc_node qns_hscnoc_cnoc = {
+ .name = "qns_hscnoc_cnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_hscnoc_cnoc },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x933000 },
+ .prio = 1,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node alm_pcie_qtc = {
+ .name = "alm_pcie_qtc",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f000 },
+ .prio = 3,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f080 },
+ .prio = 6,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .channels = 6,
+ .buswidth = 32,
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_aggre_noc_east = {
+ .name = "qnm_aggre_noc_east",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x934000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x935000, 0x936000, 0x937000, 0x938000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_lpass = {
+ .name = "qnm_lpass",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x939000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x721000, 0x721080 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x721100, 0x721180 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_nsp_noc = {
+ .name = "qnm_nsp_noc",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x816000, 0x816080, 0x816100, 0x816180 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_pcie_east = {
+ .name = "qnm_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x93a000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node qnm_pcie_west = {
+ .name = "qnm_pcie_west",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x721200 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f100 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qxm_wlan_q6 = {
+ .name = "qxm_wlan_q6",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .link_nodes = { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qns_a4noc_hscnoc = {
+ .name = "qns_a4noc_hscnoc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_aggre_noc_east },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_lpass },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_mnoc_hf },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_mnoc_sf },
+};
+
+static struct qcom_icc_node qns_nsp_hscnoc = {
+ .name = "qns_nsp_hscnoc",
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_nsp_noc },
+};
+
+static struct qcom_icc_node qns_pcie_east_mem_noc = {
+ .name = "qns_pcie_east_mem_noc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_pcie_east },
+};
+
+static struct qcom_icc_node qns_pcie_west_mem_noc = {
+ .name = "qns_pcie_west_mem_noc",
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .link_nodes = { &qnm_pcie_west },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .link_nodes = { &qnm_snoc_sf },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb4_0 = {
+ .name = "xm_usb4_0",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb4_1 = {
+ .name = "xm_usb4_1",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_lpass_ag_noc_gemnoc },
+};
+
+static struct qcom_icc_node qnm_av1_enc = {
+ .name = "qnm_av1_enc",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x30000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x29000, 0x2a000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2b000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2c000, 0x2d000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_eva = {
+ .name = "qnm_eva",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x34000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2e000, 0x2f000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video = {
+ .name = "qnm_video",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x31000, 0x32000, 0x37000, 0x38000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x33000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x35000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_nsp = {
+ .name = "qnm_nsp",
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qns_nsp_hscnoc },
+};
+
+static struct qcom_icc_node xm_pcie_0 = {
+ .name = "xm_pcie_0",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_1 = {
+ .name = "xm_pcie_1",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_5 = {
+ .name = "xm_pcie_5",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_2 = {
+ .name = "xm_pcie_2",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_3a = {
+ .name = "xm_pcie_3a",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd200 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_3b = {
+ .name = "xm_pcie_3b",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd400 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_4 = {
+ .name = "xm_pcie_4",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd600 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_6 = {
+ .name = "xm_pcie_6",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd800 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_aggre3_noc = {
+ .name = "qnm_aggre3_noc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_nsi_noc = {
+ .name = "qnm_nsi_noc",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1c000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_oobmss = {
+ .name = "qnm_oobmss",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1b000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_node qns_a3noc_snoc = {
+ .name = "qns_a3noc_snoc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_aggre3_noc },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_lpiaon_noc },
+};
+
+static struct qcom_icc_node qns_system_noc = {
+ .name = "qns_system_noc",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = { &qnm_nsi_noc },
+};
+
+static struct qcom_icc_node qns_oobmss_snoc = {
+ .name = "qns_oobmss_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_oobmss },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node qxm_soccp = {
+ .name = "qxm_soccp",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xe000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb3_2 = {
+ .name = "xm_usb3_2",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x8000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb4_2 = {
+ .name = "xm_usb4_2",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x9000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x10000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x11000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x12000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x13000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x18000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x14000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb2_0 = {
+ .name = "xm_usb2_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x15000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb3_mp = {
+ .name = "xm_usb3_mp",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x16000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_node xm_cpucp = {
+ .name = "xm_cpucp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .link_nodes = { &qns_system_noc, &srvc_nsinoc },
+};
+
+static struct qcom_icc_node xm_mem_sp = {
+ .name = "xm_mem_sp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = { &qns_oobmss_snoc },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_lpass_lpinoc },
+};
+
+static struct qcom_icc_node qnm_lpinoc_dsp_qns4m = {
+ .name = "qnm_lpinoc_dsp_qns4m",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_lpi_aon_noc },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .enable_mask = BIT(0),
+ .num_nodes = 60,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_ahb2phy2,
+ &qhs_ahb2phy3, &qhs_av1_enc_cfg,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_crypto0_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+ &qhs_pcie3a_cfg, &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg, &qhs_pcie5_cfg,
+ &qhs_pcie6_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_smmuv3_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+ &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+ &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+ &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+ &qhs_usb4_2_cfg, &qhs_venus_cfg,
+ &qss_cnoc_pcie_slave_east_cfg, &qss_cnoc_pcie_slave_west_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_east_anoc_cfg,
+ &qss_pcie_west_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg, &qnm_hscnoc_cnoc,
+ &qhs_aoss, &qhs_ipc_router,
+ &qhs_soccp, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qxs_boot_imem, &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 1,
+ .nodes = { &qhs_display_cfg },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .enable_mask = BIT(0),
+ .num_nodes = 2,
+ .nodes = { &qnm_nsp, &qns_nsp_hscnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .enable_mask = BIT(0),
+ .num_nodes = 11,
+ .nodes = { &qnm_av1_enc, &qnm_camnoc_hf,
+ &qnm_camnoc_icp, &qnm_camnoc_sf,
+ &qnm_eva, &qnm_mdp,
+ &qnm_vapss_hcp, &qnm_video,
+ &qnm_video_cv_cpu, &qnm_video_v_cpu,
+ &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .enable_mask = BIT(0),
+ .num_nodes = 18,
+ .nodes = { &alm_gpu_tcu, &alm_pcie_qtc,
+ &alm_sys_tcu, &chm_apps,
+ &qnm_aggre_noc_east, &qnm_gpu,
+ &qnm_lpass, &qnm_mnoc_hf,
+ &qnm_mnoc_sf, &qnm_nsp_noc,
+ &qnm_pcie_east, &qnm_pcie_west,
+ &qnm_snoc_sf, &qxm_wlan_q6,
+ &xm_gic, &qns_hscnoc_cnoc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .enable_mask = BIT(0),
+ .num_nodes = 1,
+ .nodes = { &qnm_oobmss },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre3_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .num_nodes = 1,
+ .nodes = { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .num_nodes = 4,
+ .nodes = { &qns_pcie_east_mem_noc, &qnm_hscnoc_pcie_east,
+ &qns_pcie_west_mem_noc, &qnm_hscnoc_pcie_west },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_SOCCP_PROC] = &qxm_soccp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre1_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre1_noc = {
+ .config = &glymur_aggre1_noc_regmap_config,
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_2] = &xm_usb3_2,
+ [MASTER_USB4_2] = &xm_usb4_2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre2_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre2_noc = {
+ .config = &glymur_aggre2_noc_regmap_config,
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_node * const aggre3_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_USB2] = &xm_usb2_0,
+ [MASTER_USB3_MP] = &xm_usb3_mp,
+ [SLAVE_A3NOC_SNOC] = &qns_a3noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre3_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1d400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre3_noc = {
+ .config = &glymur_aggre3_noc_regmap_config,
+ .nodes = aggre3_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre3_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const aggre4_noc_bcms[] = {
+ &bcm_sn5,
+};
+
+static struct qcom_icc_node * const aggre4_noc_nodes[] = {
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [MASTER_USB4_0] = &xm_usb4_0,
+ [MASTER_USB4_1] = &xm_usb4_1,
+ [SLAVE_A4NOC_HSCNOC] = &qns_a4noc_hscnoc,
+};
+
+static const struct regmap_config glymur_aggre4_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre4_noc = {
+ .config = &glymur_aggre4_noc_regmap_config,
+ .nodes = aggre4_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre4_noc_nodes),
+ .bcms = aggre4_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre4_noc_bcms),
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc glymur_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_cfg_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const cnoc_cfg_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+ [SLAVE_AHB2PHY_3] = &qhs_ahb2phy3,
+ [SLAVE_AV1_ENC_CFG] = &qhs_av1_enc_cfg,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_2_CFG] = &qhs_pcie2_cfg,
+ [SLAVE_PCIE_3A_CFG] = &qhs_pcie3a_cfg,
+ [SLAVE_PCIE_3B_CFG] = &qhs_pcie3b_cfg,
+ [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
+ [SLAVE_PCIE_5_CFG] = &qhs_pcie5_cfg,
+ [SLAVE_PCIE_6_CFG] = &qhs_pcie6_cfg,
+ [SLAVE_PCIE_RSCC] = &qhs_pcie_rscc,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB2] = &qhs_usb2_0_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0_cfg,
+ [SLAVE_USB3_1] = &qhs_usb3_1_cfg,
+ [SLAVE_USB3_2] = &qhs_usb3_2_cfg,
+ [SLAVE_USB3_MP] = &qhs_usb3_mp_cfg,
+ [SLAVE_USB4_0] = &qhs_usb4_0_cfg,
+ [SLAVE_USB4_1] = &qhs_usb4_1_cfg,
+ [SLAVE_USB4_2] = &qhs_usb4_2_cfg,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_CNOC_PCIE_SLAVE_EAST_CFG] = &qss_cnoc_pcie_slave_east_cfg,
+ [SLAVE_CNOC_PCIE_SLAVE_WEST_CFG] = &qss_cnoc_pcie_slave_west_cfg,
+ [SLAVE_LPASS_QTB_CFG] = &qss_lpass_qtb_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
+ [SLAVE_PCIE_EAST_ANOC_CFG] = &qss_pcie_east_anoc_cfg,
+ [SLAVE_PCIE_WEST_ANOC_CFG] = &qss_pcie_west_anoc_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct regmap_config glymur_cnoc_cfg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x6600,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_cnoc_cfg = {
+ .config = &glymur_cnoc_cfg_regmap_config,
+ .nodes = cnoc_cfg_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_cfg_nodes),
+ .bcms = cnoc_cfg_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_cfg_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_HSCNOC_CNOC] = &qnm_hscnoc_cnoc,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_SOCCP] = &qhs_soccp,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_APPSS] = &qns_apss,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+};
+
+static const struct regmap_config glymur_cnoc_main_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x17080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_cnoc_main = {
+ .config = &glymur_cnoc_main_regmap_config,
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const hscnoc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+};
+
+static struct qcom_icc_node * const hscnoc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_PCIE_TCU] = &alm_pcie_qtc,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_AGGRE_NOC_EAST] = &qnm_aggre_noc_east,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_noc,
+ [MASTER_PCIE_EAST] = &qnm_pcie_east,
+ [MASTER_PCIE_WEST] = &qnm_pcie_west,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_WLAN_Q6] = &qxm_wlan_q6,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_HSCNOC_CNOC] = &qns_hscnoc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_PCIE_EAST] = &qns_pcie_east,
+ [SLAVE_PCIE_WEST] = &qns_pcie_west,
+};
+
+static const struct regmap_config glymur_hscnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x93a080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_hscnoc = {
+ .config = &glymur_hscnoc_regmap_config,
+ .nodes = hscnoc_nodes,
+ .num_nodes = ARRAY_SIZE(hscnoc_nodes),
+ .bcms = hscnoc_bcms,
+ .num_bcms = ARRAY_SIZE(hscnoc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct regmap_config glymur_lpass_ag_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_ag_noc = {
+ .config = &glymur_lpass_ag_noc_regmap_config,
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct regmap_config glymur_lpass_lpiaon_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x19080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_lpiaon_noc = {
+ .config = &glymur_lpass_lpiaon_noc_regmap_config,
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qnm_lpinoc_dsp_qns4m,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct regmap_config glymur_lpass_lpicx_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x44080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_lpicx_noc = {
+ .config = &glymur_lpass_lpicx_noc_regmap_config,
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc glymur_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_AV1_ENC] = &qnm_av1_enc,
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_EVA] = &qnm_eva,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO] = &qnm_video,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct regmap_config glymur_mmss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5b800,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_mmss_noc = {
+ .config = &glymur_mmss_noc_regmap_config,
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_node * const nsinoc_nodes[] = {
+ [MASTER_CPUCP] = &xm_cpucp,
+ [SLAVE_NSINOC_SYSTEM_NOC] = &qns_system_noc,
+ [SLAVE_SERVICE_NSINOC] = &srvc_nsinoc,
+};
+
+static const struct regmap_config glymur_nsinoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_nsinoc = {
+ .config = &glymur_nsinoc_regmap_config,
+ .nodes = nsinoc_nodes,
+ .num_nodes = ARRAY_SIZE(nsinoc_nodes),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qnm_nsp,
+ [SLAVE_NSP0_HSC_NOC] = &qns_nsp_hscnoc,
+};
+
+static const struct regmap_config glymur_nsp_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x21280,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_nsp_noc = {
+ .config = &glymur_nsp_noc_regmap_config,
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_node * const oobm_ss_noc_nodes[] = {
+ [MASTER_OOBMSS_SP_PROC] = &xm_mem_sp,
+ [SLAVE_OOBMSS_SNOC] = &qns_oobmss_snoc,
+};
+
+static const struct regmap_config glymur_oobm_ss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1e080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_oobm_ss_noc = {
+ .config = &glymur_oobm_ss_noc_regmap_config,
+ .nodes = oobm_ss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(oobm_ss_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const pcie_east_anoc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_east_anoc_nodes[] = {
+ [MASTER_PCIE_EAST_ANOC_CFG] = &qsm_pcie_east_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [MASTER_PCIE_1] = &xm_pcie_1,
+ [MASTER_PCIE_5] = &xm_pcie_5,
+ [SLAVE_PCIE_EAST_MEM_NOC] = &qns_pcie_east_mem_noc,
+ [SLAVE_SERVICE_PCIE_EAST_AGGRE_NOC] = &srvc_pcie_east_aggre_noc,
+};
+
+static const struct regmap_config glymur_pcie_east_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf300,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_east_anoc = {
+ .config = &glymur_pcie_east_anoc_regmap_config,
+ .nodes = pcie_east_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_east_anoc_nodes),
+ .bcms = pcie_east_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_east_anoc_bcms),
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const pcie_east_slv_noc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_east_slv_noc_nodes[] = {
+ [MASTER_HSCNOC_PCIE_EAST] = &qnm_hscnoc_pcie_east,
+ [MASTER_CNOC_PCIE_EAST_SLAVE_CFG] = &qsm_cnoc_pcie_east_slave_cfg,
+ [SLAVE_HSCNOC_PCIE_EAST_MS_MPU_CFG] = &qhs_hscnoc_pcie_east_ms_mpu_cfg,
+ [SLAVE_SERVICE_PCIE_EAST] = &srvc_pcie_east,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_PCIE_5] = &xs_pcie_5,
+};
+
+static const struct regmap_config glymur_pcie_east_slv_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_east_slv_noc = {
+ .config = &glymur_pcie_east_slv_noc_regmap_config,
+ .nodes = pcie_east_slv_noc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_east_slv_noc_nodes),
+ .bcms = pcie_east_slv_noc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_east_slv_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_west_anoc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_west_anoc_nodes[] = {
+ [MASTER_PCIE_WEST_ANOC_CFG] = &qsm_pcie_west_anoc_cfg,
+ [MASTER_PCIE_2] = &xm_pcie_2,
+ [MASTER_PCIE_3A] = &xm_pcie_3a,
+ [MASTER_PCIE_3B] = &xm_pcie_3b,
+ [MASTER_PCIE_4] = &xm_pcie_4,
+ [MASTER_PCIE_6] = &xm_pcie_6,
+ [SLAVE_PCIE_WEST_MEM_NOC] = &qns_pcie_west_mem_noc,
+ [SLAVE_SERVICE_PCIE_WEST_AGGRE_NOC] = &srvc_pcie_west_aggre_noc,
+};
+
+static const struct regmap_config glymur_pcie_west_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf580,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_west_anoc = {
+ .config = &glymur_pcie_west_anoc_regmap_config,
+ .nodes = pcie_west_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_west_anoc_nodes),
+ .bcms = pcie_west_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_west_anoc_bcms),
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const pcie_west_slv_noc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_west_slv_noc_nodes[] = {
+ [MASTER_HSCNOC_PCIE_WEST] = &qnm_hscnoc_pcie_west,
+ [MASTER_CNOC_PCIE_WEST_SLAVE_CFG] = &qsm_cnoc_pcie_west_slave_cfg,
+ [SLAVE_HSCNOC_PCIE_WEST_MS_MPU_CFG] = &qhs_hscnoc_pcie_west_ms_mpu_cfg,
+ [SLAVE_SERVICE_PCIE_WEST] = &srvc_pcie_west,
+ [SLAVE_PCIE_2] = &xs_pcie_2,
+ [SLAVE_PCIE_3A] = &xs_pcie_3a,
+ [SLAVE_PCIE_3B] = &xs_pcie_3b,
+ [SLAVE_PCIE_4] = &xs_pcie_4,
+ [SLAVE_PCIE_6] = &xs_pcie_6,
+};
+
+static const struct regmap_config glymur_pcie_west_slv_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf180,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_west_slv_noc = {
+ .config = &glymur_pcie_west_slv_noc_regmap_config,
+ .nodes = pcie_west_slv_noc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_west_slv_noc_nodes),
+ .bcms = pcie_west_slv_noc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_west_slv_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_A3NOC_SNOC] = &qnm_aggre3_noc,
+ [MASTER_NSINOC_SNOC] = &qnm_nsi_noc,
+ [MASTER_OOBMSS] = &qnm_oobmss,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct regmap_config glymur_system_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1c080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_system_noc = {
+ .config = &glymur_system_noc_regmap_config,
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,glymur-aggre1-noc", .data = &glymur_aggre1_noc},
+ { .compatible = "qcom,glymur-aggre2-noc", .data = &glymur_aggre2_noc},
+ { .compatible = "qcom,glymur-aggre3-noc", .data = &glymur_aggre3_noc},
+ { .compatible = "qcom,glymur-aggre4-noc", .data = &glymur_aggre4_noc},
+ { .compatible = "qcom,glymur-clk-virt", .data = &glymur_clk_virt},
+ { .compatible = "qcom,glymur-cnoc-cfg", .data = &glymur_cnoc_cfg},
+ { .compatible = "qcom,glymur-cnoc-main", .data = &glymur_cnoc_main},
+ { .compatible = "qcom,glymur-hscnoc", .data = &glymur_hscnoc},
+ { .compatible = "qcom,glymur-lpass-ag-noc", .data = &glymur_lpass_ag_noc},
+ { .compatible = "qcom,glymur-lpass-lpiaon-noc", .data = &glymur_lpass_lpiaon_noc},
+ { .compatible = "qcom,glymur-lpass-lpicx-noc", .data = &glymur_lpass_lpicx_noc},
+ { .compatible = "qcom,glymur-mc-virt", .data = &glymur_mc_virt},
+ { .compatible = "qcom,glymur-mmss-noc", .data = &glymur_mmss_noc},
+ { .compatible = "qcom,glymur-nsinoc", .data = &glymur_nsinoc},
+ { .compatible = "qcom,glymur-nsp-noc", .data = &glymur_nsp_noc},
+ { .compatible = "qcom,glymur-oobm-ss-noc", .data = &glymur_oobm_ss_noc},
+ { .compatible = "qcom,glymur-pcie-east-anoc", .data = &glymur_pcie_east_anoc},
+ { .compatible = "qcom,glymur-pcie-east-slv-noc", .data = &glymur_pcie_east_slv_noc},
+ { .compatible = "qcom,glymur-pcie-west-anoc", .data = &glymur_pcie_west_anoc},
+ { .compatible = "qcom,glymur-pcie-west-slv-noc", .data = &glymur_pcie_west_slv_noc},
+ { .compatible = "qcom,glymur-system-noc", .data = &glymur_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-glymur",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("GLYMUR NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 001404e91041..3b445acefece 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -280,14 +280,10 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
if (!qn)
continue;
- if (desc->alloc_dyn_id) {
- if (!qn->node)
- qn->node = icc_node_create_dyn();
- node = qn->node;
- } else {
- node = icc_node_create(qn->id);
- }
+ if (!qn->node)
+ qn->node = icc_node_create_dyn();
+ node = qn->node;
if (IS_ERR(node)) {
ret = PTR_ERR(node);
goto err_remove_nodes;
@@ -302,12 +298,8 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
node->data = qn;
icc_node_add(node, provider);
- for (j = 0; j < qn->num_links; j++) {
- if (desc->alloc_dyn_id)
- icc_link_nodes(node, &qn->link_nodes[j]->node);
- else
- icc_link_create(node, qn->links[j]);
- }
+ for (j = 0; j < qn->num_links; j++)
+ icc_link_nodes(node, &qn->link_nodes[j]->node);
data->nodes[i] = node;
}
@@ -316,14 +308,19 @@ int qcom_icc_rpmh_probe(struct platform_device *pdev)
struct resource *res;
void __iomem *base;
- base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
- if (IS_ERR(base))
- goto skip_qos_config;
-
- qp->regmap = devm_regmap_init_mmio(dev, base, desc->config);
- if (IS_ERR(qp->regmap)) {
- dev_info(dev, "Skipping QoS, regmap failed; %ld\n", PTR_ERR(qp->regmap));
- goto skip_qos_config;
+ /* Try parent's regmap first */
+ qp->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!qp->regmap) {
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ goto skip_qos_config;
+
+ qp->regmap = devm_regmap_init_mmio(dev, base, desc->config);
+ if (IS_ERR(qp->regmap)) {
+ dev_info(dev, "Skipping QoS, regmap failed; %ld\n",
+ PTR_ERR(qp->regmap));
+ goto skip_qos_config;
+ }
}
qp->num_clks = devm_clk_bulk_get_all(qp->dev, &qp->clks);
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index bd8d730249b1..09d8791402dc 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -53,7 +53,7 @@ struct bcm_db {
u8 reserved;
};
-#define MAX_PORTS 2
+#define MAX_PORTS 4
/**
* struct qcom_icc_qosbox - Qualcomm specific QoS config
@@ -81,8 +81,6 @@ struct qcom_icc_qosbox {
/**
* struct qcom_icc_node - Qualcomm specific interconnect nodes
* @name: the node name used in debugfs
- * @links: an array of nodes where we can go next while traversing
- * @id: a unique node identifier
* @link_nodes: links associated with this node
* @node: icc_node associated with this node
* @num_links: the total number of @links
@@ -96,9 +94,6 @@ struct qcom_icc_qosbox {
*/
struct qcom_icc_node {
const char *name;
- u16 links[MAX_LINKS];
- u16 id;
- struct qcom_icc_node **link_nodes;
struct icc_node *node;
u16 num_links;
u16 channels;
@@ -108,6 +103,7 @@ struct qcom_icc_node {
struct qcom_icc_bcm *bcms[MAX_BCM_PER_NODE];
size_t num_bcms;
const struct qcom_icc_qosbox *qosbox;
+ struct qcom_icc_node *link_nodes[];
};
/**
@@ -158,7 +154,6 @@ struct qcom_icc_desc {
struct qcom_icc_bcm * const *bcms;
size_t num_bcms;
bool qos_requires_clocks;
- bool alloc_dyn_id;
};
int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
diff --git a/drivers/interconnect/qcom/kaanapali.c b/drivers/interconnect/qcom/kaanapali.c
new file mode 100644
index 000000000000..d6e7327bfd7f
--- /dev/null
+++ b/drivers/interconnect/qcom/kaanapali.c
@@ -0,0 +1,1855 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,kaanapali-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup3_core_slave = {
+ .name = "qup3_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup4_core_slave = {
+ .name = "qup4_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_eva_cfg = {
+ .name = "qhs_eva_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_i2c = {
+ .name = "qhs_i2c",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi0_cfg = {
+ .name = "qhs_i3c_ibi0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_i3c_ibi1_cfg = {
+ .name = "qhs_i3c_ibi1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .channels = 4,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_cfg = {
+ .name = "qhs_pcie_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup3 = {
+ .name = "qhs_qup3",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup4 = {
+ .name = "qhs_qup4",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3 = {
+ .name = "qhs_usb3",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router_fence = {
+ .name = "qhs_ipc_router_fence",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_soccp = {
+ .name = "qhs_soccp",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_apss = {
+ .name = "qns_apss",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qss_ddrss_cfg = {
+ .name = "qss_ddrss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie = {
+ .name = "xs_pcie",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .channels = 4,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_aggre_noc = {
+ .name = "srvc_pcie_aggre_noc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_node qup3_core_master = {
+ .name = "qup3_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qup3_core_slave },
+};
+
+static struct qcom_icc_node qup4_core_master = {
+ .name = "qup4_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qup4_core_slave },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = { &xs_pcie },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &ebi },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &srvc_mnoc },
+};
+
+static struct qcom_icc_node qsm_pcie_anoc_cfg = {
+ .name = "qsm_pcie_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &srvc_pcie_aggre_noc },
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_mnoc_cfg },
+};
+
+static struct qcom_icc_node qss_pcie_anoc_cfg = {
+ .name = "qss_pcie_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_pcie_anoc_cfg },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &llcc_mc },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = { &qnm_gemnoc_pcie },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 35,
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_crypto0_cfg, &qhs_display_cfg,
+ &qhs_eva_cfg, &qhs_gpuss_cfg,
+ &qhs_i2c, &qhs_i3c_ibi0_cfg,
+ &qhs_i3c_ibi1_cfg, &qhs_imem_cfg,
+ &qhs_ipc_router, &qhs_mss_cfg,
+ &qhs_pcie_cfg, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup1, &qhs_qup2,
+ &qhs_qup3, &qhs_qup4,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_spss_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_ufs_mem_cfg,
+ &qhs_usb3, &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg, &qss_mnoc_cfg,
+ &qss_pcie_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_node qnm_qpace = {
+ .name = "qnm_qpace",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x14e000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x145000 },
+ .prio = 4,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = { &qsm_cfg },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 10,
+ .link_nodes = { &qhs_aoss, &qhs_ipa,
+ &qhs_ipc_router_fence, &qhs_soccp,
+ &qhs_tme_cfg, &qns_apss,
+ &qss_cfg, &qss_ddrss_cfg,
+ &qxs_boot_imem, &qxs_imem },
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_gemnoc_cnoc },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x13d000 },
+ .prio = 1,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x13f000 },
+ .prio = 6,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x31000, 0xb1000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qnm_lpass_gemnoc = {
+ .name = "qnm_lpass_gemnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x141000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+ .name = "qnm_mdsp",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x33000, 0xb3000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x35000, 0xb5000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qnm_nsp_gemnoc = {
+ .name = "qnm_nsp_gemnoc",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x37000, 0xb7000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x143000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 2,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x147000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qnm_wlan_q6 = {
+ .name = "qnm_wlan_q6",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 3,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_lpass_gemnoc },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_mnoc_hf },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_mnoc_sf },
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qnm_nsp_gemnoc },
+};
+
+static struct qcom_icc_node qns_pcie_gemnoc = {
+ .name = "qns_pcie_gemnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_pcie },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_snoc_sf },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_lpass_ag_noc_gemnoc },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2a000, 0x2b000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_node qnm_camnoc_nrt_icp_sf = {
+ .name = "qnm_camnoc_nrt_icp_sf",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2c000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_camnoc_rt_cdm_sf = {
+ .name = "qnm_camnoc_rt_cdm_sf",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x38000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2d000, 0x2e000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2f000, 0x30000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_node qnm_mdss_dcp = {
+ .name = "qnm_mdss_dcp",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x39000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x34000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_eva = {
+ .name = "qnm_video_eva",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x35000, 0x36000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_mvp = {
+ .name = "qnm_video_mvp",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x32000, 0x33000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x37000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_nsp = {
+ .name = "qnm_nsp",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = { &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_node xm_pcie = {
+ .name = "xm_pcie",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 3,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_pcie_gemnoc },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_apss_noc = {
+ .name = "qnm_apss_noc",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1e000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_cnoc_data = {
+ .name = "qnm_cnoc_data",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1f000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_lpiaon_noc },
+};
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x36000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node qxm_qup1 = {
+ .name = "qxm_qup1",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x11000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xe000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xf000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb3 = {
+ .name = "xm_usb3",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x10000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x35000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup3 = {
+ .name = "qhm_qup3",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x3c000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup4 = {
+ .name = "qhm_qup4",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x3d000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x37000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qxm_soccp = {
+ .name = "qxm_soccp",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x3b000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x38000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x39000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x3a000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qnm_lpass_lpinoc },
+};
+
+static struct qcom_icc_node qnm_lpinoc_dsp_qns4m = {
+ .name = "qnm_lpinoc_dsp_qns4m",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = { &qns_lpi_aon_noc },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .enable_mask = BIT(0),
+ .keepalive = true,
+ .num_nodes = 43,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_crypto0_cfg,
+ &qhs_eva_cfg, &qhs_gpuss_cfg,
+ &qhs_i3c_ibi0_cfg, &qhs_i3c_ibi1_cfg,
+ &qhs_imem_cfg, &qhs_ipc_router,
+ &qhs_mss_cfg, &qhs_pcie_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_sdc2,
+ &qhs_sdc4, &qhs_spss_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb3,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qss_mnoc_cfg, &qss_pcie_anoc_cfg,
+ &xs_qdss_stm, &xs_sys_tcu_cfg,
+ &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
+ &qhs_aoss, &qhs_ipa,
+ &qhs_ipc_router_fence, &qhs_soccp,
+ &qhs_tme_cfg, &qns_apss,
+ &qss_cfg, &qss_ddrss_cfg,
+ &qxs_boot_imem, &qxs_imem,
+ &xs_pcie },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 6,
+ .nodes = { &qhs_display_cfg, &qhs_i2c,
+ &qhs_qup1, &qhs_qup2,
+ &qhs_qup3, &qhs_qup4 },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .enable_mask = BIT(0),
+ .num_nodes = 2,
+ .nodes = { &qnm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .enable_mask = BIT(0),
+ .num_nodes = 9,
+ .nodes = { &qnm_camnoc_hf, &qnm_camnoc_nrt_icp_sf,
+ &qnm_camnoc_rt_cdm_sf, &qnm_camnoc_sf,
+ &qnm_vapss_hcp, &qnm_video_cv_cpu,
+ &qnm_video_mvp, &qnm_video_v_cpu,
+ &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qpc0 = {
+ .name = "QPC0",
+ .num_nodes = 1,
+ .nodes = { &qnm_qpace },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup3 = {
+ .name = "QUP3",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup3_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup4 = {
+ .name = "QUP4",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup4_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .enable_mask = BIT(0),
+ .num_nodes = 14,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+ &chm_apps, &qnm_gpu,
+ &qnm_mdsp, &qnm_mnoc_hf,
+ &qnm_mnoc_sf, &qnm_nsp_gemnoc,
+ &qnm_pcie, &qnm_snoc_sf,
+ &qnm_wlan_q6, &xm_gic,
+ &qns_gem_noc_cnoc, &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_gemnoc },
+};
+
+static struct qcom_icc_bcm * const aggre_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_QUP_1] = &qxm_qup1,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3] = &xm_usb3,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_QUP_3] = &qhm_qup3,
+ [MASTER_QUP_4] = &qhm_qup4,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_SOCCP_PROC] = &qxm_soccp,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct regmap_config kaanapali_aggre_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x42400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_aggre_noc = {
+ .config = &kaanapali_aggre_noc_regmap_config,
+ .nodes = aggre_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre_noc_nodes),
+ .bcms = aggre_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre_noc_bcms),
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+ &bcm_qup3,
+ &bcm_qup4,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [MASTER_QUP_CORE_3] = &qup3_core_master,
+ [MASTER_QUP_CORE_4] = &qup4_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+ [SLAVE_QUP_CORE_3] = &qup3_core_slave,
+ [SLAVE_QUP_CORE_4] = &qup4_core_slave,
+};
+
+static const struct qcom_icc_desc kaanapali_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_cfg_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const cnoc_cfg_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_EVA_CFG] = &qhs_eva_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_I2C] = &qhs_i2c,
+ [SLAVE_I3C_IBI0_CFG] = &qhs_i3c_ibi0_cfg,
+ [SLAVE_I3C_IBI1_CFG] = &qhs_i3c_ibi1_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_PCIE_CFG] = &qhs_pcie_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_QUP_3] = &qhs_qup3,
+ [SLAVE_QUP_4] = &qhs_qup4,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qss_pcie_anoc_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct regmap_config kaanapali_cnoc_cfg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x6200,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_cnoc_cfg = {
+ .config = &kaanapali_cnoc_cfg_regmap_config,
+ .nodes = cnoc_cfg_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_cfg_nodes),
+ .bcms = cnoc_cfg_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_cfg_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_FENCE] = &qhs_ipc_router_fence,
+ [SLAVE_SOCCP] = &qhs_soccp,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_APPSS] = &qns_apss,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_DDRSS_CFG] = &qss_ddrss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &xs_pcie,
+};
+
+static const struct regmap_config kaanapali_cnoc_main_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1a080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_cnoc_main = {
+ .config = &kaanapali_cnoc_main_regmap_config,
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_qpc0,
+ &bcm_sh0,
+ &bcm_sh1,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass_gemnoc,
+ [MASTER_MSS_PROC] = &qnm_mdsp,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_QPACE] = &qnm_qpace,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_WLAN_Q6] = &qnm_wlan_q6,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+};
+
+static const struct regmap_config kaanapali_gem_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x153080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_gem_noc = {
+ .config = &kaanapali_gem_noc_regmap_config,
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct regmap_config kaanapali_lpass_ag_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_lpass_ag_noc = {
+ .config = &kaanapali_lpass_ag_noc_regmap_config,
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct regmap_config kaanapali_lpass_lpiaon_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x19080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_lpass_lpiaon_noc = {
+ .config = &kaanapali_lpass_lpiaon_noc_regmap_config,
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qnm_lpinoc_dsp_qns4m,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct regmap_config kaanapali_lpass_lpicx_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x44080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_lpass_lpicx_noc = {
+ .config = &kaanapali_lpass_lpicx_noc_regmap_config,
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc kaanapali_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_NRT_ICP_SF] = &qnm_camnoc_nrt_icp_sf,
+ [MASTER_CAMNOC_RT_CDM_SF] = &qnm_camnoc_rt_cdm_sf,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_MDSS_DCP] = &qnm_mdss_dcp,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_EVA] = &qnm_video_eva,
+ [MASTER_VIDEO_MVP] = &qnm_video_mvp,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct regmap_config kaanapali_mmss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5b800,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_mmss_noc = {
+ .config = &kaanapali_mmss_noc_regmap_config,
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qnm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+};
+
+static const struct regmap_config kaanapali_nsp_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x21280,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_nsp_noc = {
+ .config = &kaanapali_nsp_noc_regmap_config,
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
+ [MASTER_PCIE_ANOC_CFG] = &qsm_pcie_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gemnoc,
+ [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
+};
+
+static const struct regmap_config kaanapali_pcie_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_pcie_anoc = {
+ .config = &kaanapali_pcie_anoc_regmap_config,
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn2,
+ &bcm_sn3,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_APSS_NOC] = &qnm_apss_noc,
+ [MASTER_CNOC_SNOC] = &qnm_cnoc_data,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct regmap_config kaanapali_system_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc kaanapali_system_noc = {
+ .config = &kaanapali_system_noc_regmap_config,
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,kaanapali-aggre-noc", .data = &kaanapali_aggre_noc },
+ { .compatible = "qcom,kaanapali-clk-virt", .data = &kaanapali_clk_virt },
+ { .compatible = "qcom,kaanapali-cnoc-cfg", .data = &kaanapali_cnoc_cfg },
+ { .compatible = "qcom,kaanapali-cnoc-main", .data = &kaanapali_cnoc_main },
+ { .compatible = "qcom,kaanapali-gem-noc", .data = &kaanapali_gem_noc },
+ { .compatible = "qcom,kaanapali-lpass-ag-noc", .data = &kaanapali_lpass_ag_noc },
+ { .compatible = "qcom,kaanapali-lpass-lpiaon-noc", .data = &kaanapali_lpass_lpiaon_noc },
+ { .compatible = "qcom,kaanapali-lpass-lpicx-noc", .data = &kaanapali_lpass_lpicx_noc },
+ { .compatible = "qcom,kaanapali-mc-virt", .data = &kaanapali_mc_virt },
+ { .compatible = "qcom,kaanapali-mmss-noc", .data = &kaanapali_mmss_noc },
+ { .compatible = "qcom,kaanapali-nsp-noc", .data = &kaanapali_nsp_noc },
+ { .compatible = "qcom,kaanapali-pcie-anoc", .data = &kaanapali_pcie_anoc },
+ { .compatible = "qcom,kaanapali-system-noc", .data = &kaanapali_system_noc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-kaanapali",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("Qualcomm Kaanapali NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/milos.c b/drivers/interconnect/qcom/milos.c
index 167d479f7764..d010b106728a 100644
--- a/drivers/interconnect/qcom/milos.c
+++ b/drivers/interconnect/qcom/milos.c
@@ -151,7 +151,7 @@ static struct qcom_icc_node qhm_qup1 = {
.buswidth = 4,
.qosbox = &qhm_qup1_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_qosbox xm_ufs_mem_qos = {
@@ -168,7 +168,7 @@ static struct qcom_icc_node xm_ufs_mem = {
.buswidth = 8,
.qosbox = &xm_ufs_mem_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_qosbox xm_usb3_0_qos = {
@@ -185,7 +185,7 @@ static struct qcom_icc_node xm_usb3_0 = {
.buswidth = 8,
.qosbox = &xm_usb3_0_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_qosbox qhm_qdss_bam_qos = {
@@ -202,7 +202,7 @@ static struct qcom_icc_node qhm_qdss_bam = {
.buswidth = 4,
.qosbox = &qhm_qdss_bam_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox qhm_qspi_qos = {
@@ -219,7 +219,7 @@ static struct qcom_icc_node qhm_qspi = {
.buswidth = 4,
.qosbox = &qhm_qspi_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox qhm_qup0_qos = {
@@ -236,7 +236,7 @@ static struct qcom_icc_node qhm_qup0 = {
.buswidth = 4,
.qosbox = &qhm_qup0_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox qxm_crypto_qos = {
@@ -253,7 +253,7 @@ static struct qcom_icc_node qxm_crypto = {
.buswidth = 8,
.qosbox = &qxm_crypto_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox qxm_ipa_qos = {
@@ -270,7 +270,7 @@ static struct qcom_icc_node qxm_ipa = {
.buswidth = 8,
.qosbox = &qxm_ipa_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox xm_qdss_etr_0_qos = {
@@ -287,7 +287,7 @@ static struct qcom_icc_node xm_qdss_etr_0 = {
.buswidth = 8,
.qosbox = &xm_qdss_etr_0_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox xm_qdss_etr_1_qos = {
@@ -304,7 +304,7 @@ static struct qcom_icc_node xm_qdss_etr_1 = {
.buswidth = 8,
.qosbox = &xm_qdss_etr_1_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox xm_sdc1_qos = {
@@ -321,7 +321,7 @@ static struct qcom_icc_node xm_sdc1 = {
.buswidth = 8,
.qosbox = &xm_sdc1_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox xm_sdc2_qos = {
@@ -338,7 +338,7 @@ static struct qcom_icc_node xm_sdc2 = {
.buswidth = 8,
.qosbox = &xm_sdc2_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
@@ -346,7 +346,7 @@ static struct qcom_icc_node qup0_core_master = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qup0_core_slave },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
@@ -354,7 +354,7 @@ static struct qcom_icc_node qup1_core_master = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qup1_core_slave },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qsm_cfg = {
@@ -362,7 +362,7 @@ static struct qcom_icc_node qsm_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 35,
- .link_nodes = (struct qcom_icc_node *[]) { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
&qhs_camera_cfg, &qhs_clk_ctl,
&qhs_cpr_cx, &qhs_cpr_mxa,
&qhs_crypto0_cfg, &qhs_cx_rdpm,
@@ -387,7 +387,7 @@ static struct qcom_icc_node qnm_gemnoc_cnoc = {
.channels = 1,
.buswidth = 16,
.num_links = 14,
- .link_nodes = (struct qcom_icc_node *[]) { &qhs_aoss, &qhs_display_cfg,
+ .link_nodes = { &qhs_aoss, &qhs_display_cfg,
&qhs_ipa, &qhs_ipc_router,
&qhs_pcie0_cfg, &qhs_pcie1_cfg,
&qhs_prng, &qhs_tme_cfg,
@@ -401,7 +401,7 @@ static struct qcom_icc_node qnm_gemnoc_pcie = {
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &xs_pcie_0, &xs_pcie_1 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_qosbox alm_gpu_tcu_qos = {
@@ -418,7 +418,7 @@ static struct qcom_icc_node alm_gpu_tcu = {
.buswidth = 8,
.qosbox = &alm_gpu_tcu_qos,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox alm_sys_tcu_qos = {
@@ -435,7 +435,7 @@ static struct qcom_icc_node alm_sys_tcu = {
.buswidth = 8,
.qosbox = &alm_sys_tcu_qos,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
@@ -443,7 +443,7 @@ static struct qcom_icc_node chm_apps = {
.channels = 3,
.buswidth = 32,
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -461,7 +461,7 @@ static struct qcom_icc_node qnm_gpu = {
.buswidth = 32,
.qosbox = &qnm_gpu_qos,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox qnm_lpass_gemnoc_qos = {
@@ -478,7 +478,7 @@ static struct qcom_icc_node qnm_lpass_gemnoc = {
.buswidth = 16,
.qosbox = &qnm_lpass_gemnoc_qos,
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -487,7 +487,7 @@ static struct qcom_icc_node qnm_mdsp = {
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -505,7 +505,7 @@ static struct qcom_icc_node qnm_mnoc_hf = {
.buswidth = 32,
.qosbox = &qnm_mnoc_hf_qos,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox qnm_mnoc_sf_qos = {
@@ -522,7 +522,7 @@ static struct qcom_icc_node qnm_mnoc_sf = {
.buswidth = 32,
.qosbox = &qnm_mnoc_sf_qos,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox qnm_nsp_gemnoc_qos = {
@@ -539,7 +539,7 @@ static struct qcom_icc_node qnm_nsp_gemnoc = {
.buswidth = 32,
.qosbox = &qnm_nsp_gemnoc_qos,
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -557,7 +557,7 @@ static struct qcom_icc_node qnm_pcie = {
.buswidth = 8,
.qosbox = &qnm_pcie_qos,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox qnm_snoc_gc_qos = {
@@ -574,7 +574,7 @@ static struct qcom_icc_node qnm_snoc_gc = {
.buswidth = 8,
.qosbox = &qnm_snoc_gc_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_llcc },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_qosbox qnm_snoc_sf_qos = {
@@ -591,7 +591,7 @@ static struct qcom_icc_node qnm_snoc_sf = {
.buswidth = 16,
.qosbox = &qnm_snoc_sf_qos,
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -600,7 +600,7 @@ static struct qcom_icc_node qxm_wlan_q6 = {
.channels = 1,
.buswidth = 8,
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -609,7 +609,7 @@ static struct qcom_icc_node qxm_lpass_dsp = {
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_lpass_ag_noc_gemnoc },
+ .link_nodes = { &qns_lpass_ag_noc_gemnoc },
};
static struct qcom_icc_node llcc_mc = {
@@ -617,7 +617,7 @@ static struct qcom_icc_node llcc_mc = {
.channels = 2,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &ebi },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_qosbox qnm_camnoc_hf_qos = {
@@ -634,7 +634,7 @@ static struct qcom_icc_node qnm_camnoc_hf = {
.buswidth = 32,
.qosbox = &qnm_camnoc_hf_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_qosbox qnm_camnoc_icp_qos = {
@@ -651,7 +651,7 @@ static struct qcom_icc_node qnm_camnoc_icp = {
.buswidth = 8,
.qosbox = &qnm_camnoc_icp_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_qosbox qnm_camnoc_sf_qos = {
@@ -668,7 +668,7 @@ static struct qcom_icc_node qnm_camnoc_sf = {
.buswidth = 32,
.qosbox = &qnm_camnoc_sf_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_qosbox qnm_mdp_qos = {
@@ -685,7 +685,7 @@ static struct qcom_icc_node qnm_mdp = {
.buswidth = 32,
.qosbox = &qnm_mdp_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_qosbox qnm_video_qos = {
@@ -702,7 +702,7 @@ static struct qcom_icc_node qnm_video = {
.buswidth = 32,
.qosbox = &qnm_video_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qsm_hf_mnoc_cfg = {
@@ -710,7 +710,7 @@ static struct qcom_icc_node qsm_hf_mnoc_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &srvc_mnoc_hf },
+ .link_nodes = { &srvc_mnoc_hf },
};
static struct qcom_icc_node qsm_sf_mnoc_cfg = {
@@ -718,7 +718,7 @@ static struct qcom_icc_node qsm_sf_mnoc_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &srvc_mnoc_sf },
+ .link_nodes = { &srvc_mnoc_sf },
};
static struct qcom_icc_node qxm_nsp = {
@@ -726,7 +726,7 @@ static struct qcom_icc_node qxm_nsp = {
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_nsp_gemnoc },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_node qsm_pcie_anoc_cfg = {
@@ -734,7 +734,7 @@ static struct qcom_icc_node qsm_pcie_anoc_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &srvc_pcie_aggre_noc },
+ .link_nodes = { &srvc_pcie_aggre_noc },
};
static struct qcom_icc_qosbox xm_pcie3_0_qos = {
@@ -751,7 +751,7 @@ static struct qcom_icc_node xm_pcie3_0 = {
.buswidth = 8,
.qosbox = &xm_pcie3_0_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_mem_noc },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_qosbox xm_pcie3_1_qos = {
@@ -768,7 +768,7 @@ static struct qcom_icc_node xm_pcie3_1 = {
.buswidth = 8,
.qosbox = &xm_pcie3_1_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_mem_noc },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
@@ -776,7 +776,7 @@ static struct qcom_icc_node qnm_aggre1_noc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
@@ -784,7 +784,7 @@ static struct qcom_icc_node qnm_aggre2_noc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_qosbox qnm_apss_noc_qos = {
@@ -801,7 +801,7 @@ static struct qcom_icc_node qnm_apss_noc = {
.buswidth = 4,
.qosbox = &qnm_apss_noc_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_qosbox qnm_cnoc_data_qos = {
@@ -818,7 +818,7 @@ static struct qcom_icc_node qnm_cnoc_data = {
.buswidth = 8,
.qosbox = &qnm_cnoc_data_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_qosbox qxm_pimem_qos = {
@@ -835,7 +835,7 @@ static struct qcom_icc_node qxm_pimem = {
.buswidth = 8,
.qosbox = &qxm_pimem_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_gc },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_qosbox xm_gic_qos = {
@@ -852,7 +852,7 @@ static struct qcom_icc_node xm_gic = {
.buswidth = 8,
.qosbox = &xm_gic_qos,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_gc },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
@@ -860,7 +860,7 @@ static struct qcom_icc_node qns_a1noc_snoc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre1_noc },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
@@ -868,7 +868,7 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre2_noc },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qup0_core_slave = {
@@ -1079,7 +1079,7 @@ static struct qcom_icc_node qss_mnoc_hf_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qsm_hf_mnoc_cfg },
+ .link_nodes = { &qsm_hf_mnoc_cfg },
};
static struct qcom_icc_node qss_mnoc_sf_cfg = {
@@ -1087,7 +1087,7 @@ static struct qcom_icc_node qss_mnoc_sf_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qsm_sf_mnoc_cfg },
+ .link_nodes = { &qsm_sf_mnoc_cfg },
};
static struct qcom_icc_node qss_nsp_qtb_cfg = {
@@ -1102,7 +1102,7 @@ static struct qcom_icc_node qss_pcie_anoc_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qsm_pcie_anoc_cfg },
+ .link_nodes = { &qsm_pcie_anoc_cfg },
};
static struct qcom_icc_node qss_wlan_q6_throttle_cfg = {
@@ -1201,7 +1201,7 @@ static struct qcom_icc_node qss_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qsm_cfg },
+ .link_nodes = { &qsm_cfg },
};
static struct qcom_icc_node qss_ddrss_cfg = {
@@ -1251,7 +1251,7 @@ static struct qcom_icc_node qns_gem_noc_cnoc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_gemnoc_cnoc },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
@@ -1259,7 +1259,7 @@ static struct qcom_icc_node qns_llcc = {
.channels = 2,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &llcc_mc },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
@@ -1267,7 +1267,7 @@ static struct qcom_icc_node qns_pcie = {
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_gemnoc_pcie },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
@@ -1275,7 +1275,7 @@ static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpass_gemnoc },
+ .link_nodes = { &qnm_lpass_gemnoc },
};
static struct qcom_icc_node ebi = {
@@ -1290,7 +1290,7 @@ static struct qcom_icc_node qns_mem_noc_hf = {
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_hf },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
@@ -1298,7 +1298,7 @@ static struct qcom_icc_node qns_mem_noc_sf = {
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_sf },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc_hf = {
@@ -1320,7 +1320,7 @@ static struct qcom_icc_node qns_nsp_gemnoc = {
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_nsp_gemnoc },
+ .link_nodes = { &qnm_nsp_gemnoc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
@@ -1328,7 +1328,7 @@ static struct qcom_icc_node qns_pcie_mem_noc = {
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_pcie },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_pcie_aggre_noc = {
@@ -1343,7 +1343,7 @@ static struct qcom_icc_node qns_gemnoc_gc = {
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_gc },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
@@ -1351,7 +1351,7 @@ static struct qcom_icc_node qns_gemnoc_sf = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_sf },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_bcm bcm_acv = {
@@ -1522,7 +1522,6 @@ static const struct qcom_icc_desc milos_aggre1_noc = {
.config = &milos_aggre1_noc_regmap_config,
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
@@ -1556,7 +1555,6 @@ static const struct qcom_icc_desc milos_aggre2_noc = {
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
@@ -1576,7 +1574,6 @@ static const struct qcom_icc_desc milos_clk_virt = {
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const cnoc_cfg_bcms[] = {
@@ -1637,7 +1634,6 @@ static const struct qcom_icc_desc milos_cnoc_cfg = {
.num_nodes = ARRAY_SIZE(cnoc_cfg_nodes),
.bcms = cnoc_cfg_bcms,
.num_bcms = ARRAY_SIZE(cnoc_cfg_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
@@ -1680,7 +1676,6 @@ static const struct qcom_icc_desc milos_cnoc_main = {
.num_nodes = ARRAY_SIZE(cnoc_main_nodes),
.bcms = cnoc_main_bcms,
.num_bcms = ARRAY_SIZE(cnoc_main_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
@@ -1721,7 +1716,6 @@ static const struct qcom_icc_desc milos_gem_noc = {
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
@@ -1741,7 +1735,6 @@ static const struct qcom_icc_desc milos_lpass_ag_noc = {
.config = &milos_lpass_ag_noc_regmap_config,
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
@@ -1759,7 +1752,6 @@ static const struct qcom_icc_desc milos_mc_virt = {
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
@@ -1795,7 +1787,6 @@ static const struct qcom_icc_desc milos_mmss_noc = {
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
@@ -1821,7 +1812,6 @@ static const struct qcom_icc_desc milos_nsp_noc = {
.num_nodes = ARRAY_SIZE(nsp_noc_nodes),
.bcms = nsp_noc_bcms,
.num_bcms = ARRAY_SIZE(nsp_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
@@ -1850,7 +1840,6 @@ static const struct qcom_icc_desc milos_pcie_anoc = {
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
@@ -1885,7 +1874,6 @@ static const struct qcom_icc_desc milos_system_noc = {
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
- .alloc_dyn_id = true,
};
static const struct of_device_id qnoc_of_match[] = {
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
index b73566c9b21f..84cfafb22aa1 100644
--- a/drivers/interconnect/qcom/msm8996.c
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -552,6 +552,7 @@ static struct qcom_icc_node mas_venus_vmem = {
static const u16 mas_snoc_pnoc_links[] = {
MSM8996_SLAVE_BLSP_1,
MSM8996_SLAVE_BLSP_2,
+ MSM8996_SLAVE_USB_HS,
MSM8996_SLAVE_SDCC_1,
MSM8996_SLAVE_SDCC_2,
MSM8996_SLAVE_SDCC_4,
diff --git a/drivers/interconnect/qcom/qcs615.c b/drivers/interconnect/qcom/qcs615.c
index 0549cfcbac64..797956eb6ff5 100644
--- a/drivers/interconnect/qcom/qcs615.c
+++ b/drivers/interconnect/qcom/qcs615.c
@@ -13,1041 +13,992 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "qcs615.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qnm_cnoc;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_emac_avb;
+static struct qcom_icc_node xm_pcie;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_sdc1;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb2;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp;
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp;
+static struct qcom_icc_node qxm_camnoc_sf_uncomp;
+static struct qcom_icc_node qhm_spdm;
+static struct qcom_icc_node qnm_snoc;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qhm_cnoc;
+static struct qcom_icc_node acm_apps;
+static struct qcom_icc_node acm_gpu_tcu;
+static struct qcom_icc_node acm_sys_tcu;
+static struct qcom_icc_node qhm_gemnoc_cfg;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qhm_mnoc_cfg;
+static struct qcom_icc_node qxm_camnoc_hf0;
+static struct qcom_icc_node qxm_camnoc_hf1;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qxm_rot;
+static struct qcom_icc_node qxm_venus0;
+static struct qcom_icc_node qxm_venus_arm9;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_gemnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node qnm_lpass_anoc;
+static struct qcom_icc_node qnm_pcie_anoc;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_lpass_snoc;
+static struct qcom_icc_node qns_pcie_snoc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qns_camnoc_uncomp;
+static struct qcom_icc_node qhs_a1_noc_cfg;
+static struct qcom_icc_node qhs_ahb2phy_east;
+static struct qcom_icc_node qhs_ahb2phy_west;
+static struct qcom_icc_node qhs_aop;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_emac_avb_cfg;
+static struct qcom_icc_node qhs_glm;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mnoc_cfg;
+static struct qcom_icc_node qhs_pcie_config;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_sdc1;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_spdm;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm_east;
+static struct qcom_icc_node qhs_tlmm_south;
+static struct qcom_icc_node qhs_tlmm_west;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb2;
+static struct qcom_icc_node qhs_usb3;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_cnoc_a2noc;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node qhs_dc_noc_gemnoc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qns_gem_noc_snoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_sys_pcie;
+static struct qcom_icc_node srvc_gemnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns2_mem_noc;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qns_memnoc_gc;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_pcie;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
- .id = QCS615_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = QCS615_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = QCS615_MASTER_QSPI,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = QCS615_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = QCS615_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
- .id = QCS615_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = QCS615_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = QCS615_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_LPASS_SNOC },
+ .link_nodes = { &qns_lpass_snoc },
};
static struct qcom_icc_node xm_emac_avb = {
.name = "xm_emac_avb",
- .id = QCS615_MASTER_EMAC_EVB,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_pcie = {
.name = "xm_pcie",
- .id = QCS615_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_ANOC_PCIE_SNOC },
+ .link_nodes = { &qns_pcie_snoc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = QCS615_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
- .id = QCS615_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = QCS615_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = QCS615_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb2 = {
.name = "xm_usb2",
- .id = QCS615_MASTER_USB2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = QCS615_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
- .id = QCS615_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
- .id = QCS615_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
- .id = QCS615_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qhm_spdm = {
.name = "qhm_spdm",
- .id = QCS615_MASTER_SPDM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_CNOC_A2NOC },
+ .link_nodes = { &qns_cnoc_a2noc },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
- .id = QCS615_MASTER_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 39,
- .links = { QCS615_SLAVE_A1NOC_CFG, QCS615_SLAVE_AHB2PHY_EAST,
- QCS615_SLAVE_AHB2PHY_WEST, QCS615_SLAVE_AOP,
- QCS615_SLAVE_AOSS, QCS615_SLAVE_CAMERA_CFG,
- QCS615_SLAVE_CLK_CTL, QCS615_SLAVE_RBCPR_CX_CFG,
- QCS615_SLAVE_RBCPR_MX_CFG, QCS615_SLAVE_CRYPTO_0_CFG,
- QCS615_SLAVE_CNOC_DDRSS, QCS615_SLAVE_DISPLAY_CFG,
- QCS615_SLAVE_EMAC_AVB_CFG, QCS615_SLAVE_GLM,
- QCS615_SLAVE_GFX3D_CFG, QCS615_SLAVE_IMEM_CFG,
- QCS615_SLAVE_IPA_CFG, QCS615_SLAVE_CNOC_MNOC_CFG,
- QCS615_SLAVE_PCIE_CFG, QCS615_SLAVE_PIMEM_CFG,
- QCS615_SLAVE_PRNG, QCS615_SLAVE_QDSS_CFG,
- QCS615_SLAVE_QSPI, QCS615_SLAVE_QUP_0,
- QCS615_SLAVE_QUP_1, QCS615_SLAVE_SDCC_1,
- QCS615_SLAVE_SDCC_2, QCS615_SLAVE_SNOC_CFG,
- QCS615_SLAVE_SPDM_WRAPPER, QCS615_SLAVE_TCSR,
- QCS615_SLAVE_TLMM_EAST, QCS615_SLAVE_TLMM_SOUTH,
- QCS615_SLAVE_TLMM_WEST, QCS615_SLAVE_UFS_MEM_CFG,
- QCS615_SLAVE_USB2, QCS615_SLAVE_USB3,
- QCS615_SLAVE_VENUS_CFG, QCS615_SLAVE_VSENSE_CTRL_CFG,
- QCS615_SLAVE_SERVICE_CNOC },
+ .link_nodes = { &qhs_a1_noc_cfg, &qhs_ahb2phy_east,
+ &qhs_ahb2phy_west, &qhs_aop,
+ &qhs_aoss, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_cpr_cx,
+ &qhs_cpr_mx, &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg, &qhs_display_cfg,
+ &qhs_emac_avb_cfg, &qhs_glm,
+ &qhs_gpuss_cfg, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_mnoc_cfg,
+ &qhs_pcie_config, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_sdc1,
+ &qhs_sdc2, &qhs_snoc_cfg,
+ &qhs_spdm, &qhs_tcsr,
+ &qhs_tlmm_east, &qhs_tlmm_south,
+ &qhs_tlmm_west, &qhs_ufs_mem_cfg,
+ &qhs_usb2, &qhs_usb3,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &srvc_cnoc },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = QCS615_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 40,
- .links = { QCS615_SLAVE_A1NOC_CFG, QCS615_SLAVE_AHB2PHY_EAST,
- QCS615_SLAVE_AHB2PHY_WEST, QCS615_SLAVE_AOP,
- QCS615_SLAVE_AOSS, QCS615_SLAVE_CAMERA_CFG,
- QCS615_SLAVE_CLK_CTL, QCS615_SLAVE_RBCPR_CX_CFG,
- QCS615_SLAVE_RBCPR_MX_CFG, QCS615_SLAVE_CRYPTO_0_CFG,
- QCS615_SLAVE_CNOC_DDRSS, QCS615_SLAVE_DISPLAY_CFG,
- QCS615_SLAVE_EMAC_AVB_CFG, QCS615_SLAVE_GLM,
- QCS615_SLAVE_GFX3D_CFG, QCS615_SLAVE_IMEM_CFG,
- QCS615_SLAVE_IPA_CFG, QCS615_SLAVE_CNOC_MNOC_CFG,
- QCS615_SLAVE_PCIE_CFG, QCS615_SLAVE_PIMEM_CFG,
- QCS615_SLAVE_PRNG, QCS615_SLAVE_QDSS_CFG,
- QCS615_SLAVE_QSPI, QCS615_SLAVE_QUP_0,
- QCS615_SLAVE_QUP_1, QCS615_SLAVE_SDCC_1,
- QCS615_SLAVE_SDCC_2, QCS615_SLAVE_SNOC_CFG,
- QCS615_SLAVE_SPDM_WRAPPER, QCS615_SLAVE_TCSR,
- QCS615_SLAVE_TLMM_EAST, QCS615_SLAVE_TLMM_SOUTH,
- QCS615_SLAVE_TLMM_WEST, QCS615_SLAVE_UFS_MEM_CFG,
- QCS615_SLAVE_USB2, QCS615_SLAVE_USB3,
- QCS615_SLAVE_VENUS_CFG, QCS615_SLAVE_VSENSE_CTRL_CFG,
- QCS615_SLAVE_CNOC_A2NOC, QCS615_SLAVE_SERVICE_CNOC },
+ .link_nodes = { &qhs_a1_noc_cfg, &qhs_ahb2phy_east,
+ &qhs_ahb2phy_west, &qhs_aop,
+ &qhs_aoss, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_cpr_cx,
+ &qhs_cpr_mx, &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg, &qhs_display_cfg,
+ &qhs_emac_avb_cfg, &qhs_glm,
+ &qhs_gpuss_cfg, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_mnoc_cfg,
+ &qhs_pcie_config, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_sdc1,
+ &qhs_sdc2, &qhs_snoc_cfg,
+ &qhs_spdm, &qhs_tcsr,
+ &qhs_tlmm_east, &qhs_tlmm_south,
+ &qhs_tlmm_west, &qhs_ufs_mem_cfg,
+ &qhs_usb2, &qhs_usb3,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc, &srvc_cnoc },
};
static struct qcom_icc_node qhm_cnoc = {
.name = "qhm_cnoc",
- .id = QCS615_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { QCS615_SLAVE_DC_NOC_GEMNOC, QCS615_SLAVE_LLCC_CFG },
+ .link_nodes = { &qhs_dc_noc_gemnoc, &qhs_llcc },
};
static struct qcom_icc_node acm_apps = {
.name = "acm_apps",
- .id = QCS615_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { QCS615_SLAVE_GEM_NOC_SNOC, QCS615_SLAVE_LLCC,
- QCS615_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_snoc, &qns_llcc,
+ &qns_sys_pcie },
};
static struct qcom_icc_node acm_gpu_tcu = {
.name = "acm_gpu_tcu",
- .id = QCS615_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QCS615_SLAVE_GEM_NOC_SNOC, QCS615_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_snoc, &qns_llcc },
};
static struct qcom_icc_node acm_sys_tcu = {
.name = "acm_sys_tcu",
- .id = QCS615_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QCS615_SLAVE_GEM_NOC_SNOC, QCS615_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_snoc, &qns_llcc },
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
- .id = QCS615_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { QCS615_SLAVE_MSS_PROC_MS_MPU_CFG, QCS615_SLAVE_SERVICE_GEM_NOC },
+ .link_nodes = { &qhs_mdsp_ms_mpu_cfg, &srvc_gemnoc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = QCS615_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { QCS615_SLAVE_GEM_NOC_SNOC, QCS615_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_snoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = QCS615_MASTER_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = QCS615_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { QCS615_SLAVE_GEM_NOC_SNOC, QCS615_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_snoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = QCS615_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = QCS615_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS615_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = QCS615_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
- .id = QCS615_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
- .id = QCS615_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
- .id = QCS615_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = QCS615_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = QCS615_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
- .id = QCS615_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
- .id = QCS615_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
- .id = QCS615_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = QCS615_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = QCS615_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 8,
- .links = { QCS615_SLAVE_APPSS, QCS615_SLAVE_SNOC_CNOC,
- QCS615_SLAVE_SNOC_GEM_NOC_SF, QCS615_SLAVE_IMEM,
- QCS615_SLAVE_PIMEM, QCS615_SLAVE_PCIE_0,
- QCS615_SLAVE_QDSS_STM, QCS615_SLAVE_TCU },
+ .link_nodes = { &qhs_apss, &qns_cnoc,
+ &qns_gemnoc_sf, &qxs_imem,
+ &qxs_pimem, &xs_pcie,
+ &xs_qdss_stm, &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
- .id = QCS615_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
- .links = { QCS615_SLAVE_APPSS, QCS615_SLAVE_SNOC_CNOC,
- QCS615_SLAVE_IMEM, QCS615_SLAVE_PIMEM,
- QCS615_SLAVE_QDSS_STM, QCS615_SLAVE_TCU },
+ .link_nodes = { &qhs_apss, &qns_cnoc,
+ &qxs_imem, &qxs_pimem,
+ &xs_qdss_stm, &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = QCS615_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_SLAVE_PCIE_0 },
+ .link_nodes = { &xs_pcie },
};
static struct qcom_icc_node qnm_lpass_anoc = {
.name = "qnm_lpass_anoc",
- .id = QCS615_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 8,
.num_links = 7,
- .links = { QCS615_SLAVE_APPSS, QCS615_SLAVE_SNOC_CNOC,
- QCS615_SLAVE_SNOC_GEM_NOC_SF, QCS615_SLAVE_IMEM,
- QCS615_SLAVE_PIMEM, QCS615_SLAVE_PCIE_0,
- QCS615_SLAVE_QDSS_STM },
+ .link_nodes = { &qhs_apss, &qns_cnoc,
+ &qns_gemnoc_sf, &qxs_imem,
+ &qxs_pimem, &xs_pcie,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_pcie_anoc = {
.name = "qnm_pcie_anoc",
- .id = QCS615_MASTER_ANOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 5,
- .links = { QCS615_SLAVE_APPSS, QCS615_SLAVE_SNOC_CNOC,
- QCS615_SLAVE_SNOC_GEM_NOC_SF, QCS615_SLAVE_IMEM,
- QCS615_SLAVE_QDSS_STM },
+ .link_nodes = { &qhs_apss, &qns_cnoc,
+ &qns_gemnoc_sf, &qxs_imem,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = QCS615_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QCS615_SLAVE_SNOC_MEM_NOC_GC, QCS615_SLAVE_IMEM },
+ .link_nodes = { &qns_memnoc_gc, &qxs_imem },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = QCS615_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QCS615_SLAVE_SNOC_MEM_NOC_GC, QCS615_SLAVE_IMEM },
+ .link_nodes = { &qns_memnoc_gc, &qxs_imem },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = QCS615_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS615_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_lpass_snoc = {
.name = "qns_lpass_snoc",
- .id = QCS615_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_MASTER_LPASS_ANOC },
+ .link_nodes = { &qnm_lpass_anoc },
};
static struct qcom_icc_node qns_pcie_snoc = {
.name = "qns_pcie_snoc",
- .id = QCS615_SLAVE_ANOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_MASTER_ANOC_PCIE_SNOC },
+ .link_nodes = { &qnm_pcie_anoc },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = QCS615_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
- .id = QCS615_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
- .num_links = 0,
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
- .id = QCS615_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_MASTER_A1NOC_CFG },
+ .link_nodes = { &qhm_a1noc_cfg },
};
static struct qcom_icc_node qhs_ahb2phy_east = {
.name = "qhs_ahb2phy_east",
- .id = QCS615_SLAVE_AHB2PHY_EAST,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy_west = {
.name = "qhs_ahb2phy_west",
- .id = QCS615_SLAVE_AHB2PHY_WEST,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
- .id = QCS615_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = QCS615_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = QCS615_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = QCS615_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = QCS615_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = QCS615_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = QCS615_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = QCS615_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qhm_cnoc },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = QCS615_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_emac_avb_cfg = {
.name = "qhs_emac_avb_cfg",
- .id = QCS615_SLAVE_EMAC_AVB_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
- .id = QCS615_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = QCS615_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = QCS615_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = QCS615_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
- .id = QCS615_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qhm_mnoc_cfg },
};
static struct qcom_icc_node qhs_pcie_config = {
.name = "qhs_pcie_config",
- .id = QCS615_SLAVE_PCIE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = QCS615_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = QCS615_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = QCS615_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = QCS615_SLAVE_QSPI,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = QCS615_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = QCS615_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
- .id = QCS615_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = QCS615_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = QCS615_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_spdm = {
.name = "qhs_spdm",
- .id = QCS615_SLAVE_SPDM_WRAPPER,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = QCS615_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm_east = {
.name = "qhs_tlmm_east",
- .id = QCS615_SLAVE_TLMM_EAST,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm_south = {
.name = "qhs_tlmm_south",
- .id = QCS615_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm_west = {
.name = "qhs_tlmm_west",
- .id = QCS615_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = QCS615_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb2 = {
.name = "qhs_usb2",
- .id = QCS615_SLAVE_USB2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
- .id = QCS615_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = QCS615_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = QCS615_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
- .id = QCS615_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_MASTER_CNOC_A2NOC },
+ .link_nodes = { &qnm_cnoc },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = QCS615_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_dc_noc_gemnoc = {
.name = "qhs_dc_noc_gemnoc",
- .id = QCS615_SLAVE_DC_NOC_GEMNOC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS615_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qhm_gemnoc_cfg },
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = QCS615_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = QCS615_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
- .id = QCS615_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_MASTER_GEM_NOC_SNOC },
+ .link_nodes = { &qnm_gemnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = QCS615_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS615_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_sys_pcie = {
.name = "qns_sys_pcie",
- .id = QCS615_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
- .id = QCS615_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = QCS615_SLAVE_EBI1,
.channels = 2,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns2_mem_noc = {
.name = "qns2_mem_noc",
- .id = QCS615_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = QCS615_SLAVE_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS615_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = QCS615_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = QCS615_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
- .id = QCS615_SLAVE_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_MASTER_SNOC_CNOC },
+ .link_nodes = { &qnm_snoc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = QCS615_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS615_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qns_memnoc_gc = {
.name = "qns_memnoc_gc",
- .id = QCS615_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS615_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = QCS615_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = QCS615_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = QCS615_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie = {
.name = "xs_pcie",
- .id = QCS615_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = QCS615_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = QCS615_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/qcs615.h b/drivers/interconnect/qcom/qcs615.h
deleted file mode 100644
index 66e66c7e23d4..000000000000
--- a/drivers/interconnect/qcom/qcs615.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_QCS615_H
-#define __DRIVERS_INTERCONNECT_QCOM_QCS615_H
-
-#define QCS615_MASTER_A1NOC_CFG 1
-#define QCS615_MASTER_A1NOC_SNOC 2
-#define QCS615_MASTER_ANOC_PCIE_SNOC 3
-#define QCS615_MASTER_APPSS_PROC 4
-#define QCS615_MASTER_BLSP_1 5
-#define QCS615_MASTER_CAMNOC_HF0 6
-#define QCS615_MASTER_CAMNOC_HF0_UNCOMP 7
-#define QCS615_MASTER_CAMNOC_HF1 8
-#define QCS615_MASTER_CAMNOC_HF1_UNCOMP 9
-#define QCS615_MASTER_CAMNOC_SF 10
-#define QCS615_MASTER_CAMNOC_SF_UNCOMP 11
-#define QCS615_MASTER_CNOC_A2NOC 12
-#define QCS615_MASTER_CNOC_DC_NOC 13
-#define QCS615_MASTER_CNOC_MNOC_CFG 14
-#define QCS615_MASTER_CRYPTO 15
-#define QCS615_MASTER_EMAC_EVB 16
-#define QCS615_MASTER_GEM_NOC_CFG 17
-#define QCS615_MASTER_GEM_NOC_PCIE_SNOC 18
-#define QCS615_MASTER_GEM_NOC_SNOC 19
-#define QCS615_MASTER_GFX3D 20
-#define QCS615_MASTER_GIC 21
-#define QCS615_MASTER_GPU_TCU 22
-#define QCS615_MASTER_IPA 23
-#define QCS615_MASTER_IPA_CORE 24
-#define QCS615_MASTER_LLCC 25
-#define QCS615_MASTER_LPASS_ANOC 26
-#define QCS615_MASTER_MDP0 27
-#define QCS615_MASTER_MNOC_HF_MEM_NOC 28
-#define QCS615_MASTER_MNOC_SF_MEM_NOC 29
-#define QCS615_MASTER_PCIE 30
-#define QCS615_MASTER_PIMEM 31
-#define QCS615_MASTER_QDSS_BAM 32
-#define QCS615_MASTER_QDSS_DAP 33
-#define QCS615_MASTER_QDSS_ETR 34
-#define QCS615_MASTER_QSPI 35
-#define QCS615_MASTER_QUP_0 36
-#define QCS615_MASTER_ROTATOR 37
-#define QCS615_MASTER_SDCC_1 38
-#define QCS615_MASTER_SDCC_2 39
-#define QCS615_MASTER_SNOC_CFG 40
-#define QCS615_MASTER_SNOC_CNOC 41
-#define QCS615_MASTER_SNOC_GC_MEM_NOC 42
-#define QCS615_MASTER_SNOC_SF_MEM_NOC 43
-#define QCS615_MASTER_SPDM 44
-#define QCS615_MASTER_SYS_TCU 45
-#define QCS615_MASTER_UFS_MEM 46
-#define QCS615_MASTER_USB2 47
-#define QCS615_MASTER_USB3_0 48
-#define QCS615_MASTER_VIDEO_P0 49
-#define QCS615_MASTER_VIDEO_PROC 50
-#define QCS615_SLAVE_A1NOC_CFG 51
-#define QCS615_SLAVE_A1NOC_SNOC 52
-#define QCS615_SLAVE_AHB2PHY_EAST 53
-#define QCS615_SLAVE_AHB2PHY_WEST 54
-#define QCS615_SLAVE_ANOC_PCIE_SNOC 55
-#define QCS615_SLAVE_AOP 56
-#define QCS615_SLAVE_AOSS 57
-#define QCS615_SLAVE_APPSS 58
-#define QCS615_SLAVE_CAMERA_CFG 59
-#define QCS615_SLAVE_CAMNOC_UNCOMP 60
-#define QCS615_SLAVE_CLK_CTL 61
-#define QCS615_SLAVE_CNOC_A2NOC 62
-#define QCS615_SLAVE_CNOC_DDRSS 63
-#define QCS615_SLAVE_CNOC_MNOC_CFG 64
-#define QCS615_SLAVE_CRYPTO_0_CFG 65
-#define QCS615_SLAVE_DC_NOC_GEMNOC 66
-#define QCS615_SLAVE_DISPLAY_CFG 67
-#define QCS615_SLAVE_EBI1 68
-#define QCS615_SLAVE_EMAC_AVB_CFG 69
-#define QCS615_SLAVE_GEM_NOC_SNOC 70
-#define QCS615_SLAVE_GFX3D_CFG 71
-#define QCS615_SLAVE_GLM 72
-#define QCS615_SLAVE_IMEM 73
-#define QCS615_SLAVE_IMEM_CFG 74
-#define QCS615_SLAVE_IPA_CFG 75
-#define QCS615_SLAVE_IPA_CORE 76
-#define QCS615_SLAVE_LLCC 77
-#define QCS615_SLAVE_LLCC_CFG 78
-#define QCS615_SLAVE_LPASS_SNOC 79
-#define QCS615_SLAVE_MEM_NOC_PCIE_SNOC 80
-#define QCS615_SLAVE_MNOC_HF_MEM_NOC 81
-#define QCS615_SLAVE_MNOC_SF_MEM_NOC 82
-#define QCS615_SLAVE_MSS_PROC_MS_MPU_CFG 83
-#define QCS615_SLAVE_PCIE_0 84
-#define QCS615_SLAVE_PCIE_CFG 85
-#define QCS615_SLAVE_PIMEM 86
-#define QCS615_SLAVE_PIMEM_CFG 87
-#define QCS615_SLAVE_PRNG 88
-#define QCS615_SLAVE_QDSS_CFG 89
-#define QCS615_SLAVE_QDSS_STM 90
-#define QCS615_SLAVE_QSPI 91
-#define QCS615_SLAVE_QUP_0 92
-#define QCS615_SLAVE_QUP_1 93
-#define QCS615_SLAVE_RBCPR_CX_CFG 94
-#define QCS615_SLAVE_RBCPR_MX_CFG 95
-#define QCS615_SLAVE_SDCC_1 96
-#define QCS615_SLAVE_SDCC_2 97
-#define QCS615_SLAVE_SERVICE_A2NOC 98
-#define QCS615_SLAVE_SERVICE_CNOC 99
-#define QCS615_SLAVE_SERVICE_GEM_NOC 100
-#define QCS615_SLAVE_SERVICE_MNOC 101
-#define QCS615_SLAVE_SERVICE_SNOC 102
-#define QCS615_SLAVE_SNOC_CFG 103
-#define QCS615_SLAVE_SNOC_CNOC 104
-#define QCS615_SLAVE_SNOC_GEM_NOC_SF 105
-#define QCS615_SLAVE_SNOC_MEM_NOC_GC 106
-#define QCS615_SLAVE_SPDM_WRAPPER 107
-#define QCS615_SLAVE_TCSR 108
-#define QCS615_SLAVE_TCU 109
-#define QCS615_SLAVE_TLMM_EAST 110
-#define QCS615_SLAVE_TLMM_SOUTH 111
-#define QCS615_SLAVE_TLMM_WEST 112
-#define QCS615_SLAVE_UFS_MEM_CFG 113
-#define QCS615_SLAVE_USB2 114
-#define QCS615_SLAVE_USB3 115
-#define QCS615_SLAVE_VENUS_CFG 116
-#define QCS615_SLAVE_VSENSE_CTRL_CFG 117
-
-#endif
-
diff --git a/drivers/interconnect/qcom/qcs8300.c b/drivers/interconnect/qcom/qcs8300.c
index e7a1b2fc69ba..70a377bbcf29 100644
--- a/drivers/interconnect/qcom/qcs8300.c
+++ b/drivers/interconnect/qcom/qcs8300.c
@@ -13,1465 +13,1378 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "qcs8300.h"
+
+static struct qcom_icc_node qxm_qup3;
+static struct qcom_icc_node xm_emac_0;
+static struct qcom_icc_node xm_sdc1;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb2_2;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qnm_cnoc_datapath;
+static struct qcom_icc_node qxm_crypto_0;
+static struct qcom_icc_node qxm_crypto_1;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_qdss_etr_0;
+static struct qcom_icc_node xm_qdss_etr_1;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup3_core_master;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node qnm_cnoc_dc_noc;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_pcie_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_cmpnoc0;
+static struct qcom_icc_node qnm_gemnoc_cfg;
+static struct qcom_icc_node qnm_gpdsp_sail;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qnm_sailss_md0;
+static struct qcom_icc_node qxm_dsp0;
+static struct qcom_icc_node qhm_config_noc;
+static struct qcom_icc_node qxm_lpass_dsp;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_mdp0_0;
+static struct qcom_icc_node qnm_mdp0_1;
+static struct qcom_icc_node qnm_mnoc_hf_cfg;
+static struct qcom_icc_node qnm_mnoc_sf_cfg;
+static struct qcom_icc_node qnm_video0;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qnm_video_v_cpu;
+static struct qcom_icc_node qhm_nsp_noc_config;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node qhm_gic;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_lpass_noc;
+static struct qcom_icc_node qnm_snoc_cfg;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup3_core_slave;
+static struct qcom_icc_node qhs_ahb2phy2;
+static struct qcom_icc_node qhs_ahb2phy3;
+static struct qcom_icc_node qhs_anoc_throttle_cfg;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qhs_boot_rom;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_camera_nrt_throttle_cfg;
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute0_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_cpr_nspcx;
+static struct qcom_icc_node qhs_cpr_nsphmx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_display0_cfg;
+static struct qcom_icc_node qhs_display0_rt_throttle_cfg;
+static struct qcom_icc_node qhs_emac0_cfg;
+static struct qcom_icc_node qhs_gp_dsp0_cfg;
+static struct qcom_icc_node qhs_gpdsp0_throttle_cfg;
+static struct qcom_icc_node qhs_gpu_tcu_throttle_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_hwkm;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_lpass_cfg;
+static struct qcom_icc_node qhs_lpass_throttle_cfg;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_mxc_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pcie_tcu_throttle_cfg;
+static struct qcom_icc_node qhs_pcie_throttle_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_pke_wrapper_cfg;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qm_cfg;
+static struct qcom_icc_node qhs_qm_mpu_cfg;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup3;
+static struct qcom_icc_node qhs_sail_throttle_cfg;
+static struct qcom_icc_node qhs_sdc1;
+static struct qcom_icc_node qhs_security;
+static struct qcom_icc_node qhs_snoc_throttle_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_tsc_cfg;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb2_0;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_venus_cvp_throttle_cfg;
+static struct qcom_icc_node qhs_venus_v_cpu_throttle_cfg;
+static struct qcom_icc_node qhs_venus_vcodec_throttle_cfg;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qns_gpdsp_noc_cfg;
+static struct qcom_icc_node qns_mnoc_hf_cfg;
+static struct qcom_icc_node qns_mnoc_sf_cfg;
+static struct qcom_icc_node qns_pcie_anoc_cfg;
+static struct qcom_icc_node qns_snoc_cfg;
+static struct qcom_icc_node qxs_boot_imem;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qns_gemnoc;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node srvc_even_gemnoc;
+static struct qcom_icc_node srvc_odd_gemnoc;
+static struct qcom_icc_node srvc_sys_gemnoc;
+static struct qcom_icc_node srvc_sys_gemnoc_2;
+static struct qcom_icc_node qns_gp_dsp_sail_noc;
+static struct qcom_icc_node qhs_lpass_core;
+static struct qcom_icc_node qhs_lpass_lpi;
+static struct qcom_icc_node qhs_lpass_mpu;
+static struct qcom_icc_node qhs_lpass_top;
+static struct qcom_icc_node qns_sysnoc;
+static struct qcom_icc_node srvc_niu_aml_noc;
+static struct qcom_icc_node srvc_niu_lpass_agnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc_hf;
+static struct qcom_icc_node srvc_mnoc_sf;
+static struct qcom_icc_node qns_hcp;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node service_nsp_noc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node srvc_snoc;
static struct qcom_icc_node qxm_qup3 = {
.name = "qxm_qup3",
- .id = QCS8300_MASTER_QUP_3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emac_0 = {
.name = "xm_emac_0",
- .id = QCS8300_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
- .id = QCS8300_MASTER_SDC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = QCS8300_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb2_2 = {
.name = "xm_usb2_2",
- .id = QCS8300_MASTER_USB2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = QCS8300_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = QCS8300_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = QCS8300_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = QCS8300_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_cnoc_datapath = {
.name = "qnm_cnoc_datapath",
- .id = QCS8300_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto_0 = {
.name = "qxm_crypto_0",
- .id = QCS8300_MASTER_CRYPTO_CORE0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto_1 = {
.name = "qxm_crypto_1",
- .id = QCS8300_MASTER_CRYPTO_CORE1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = QCS8300_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
- .id = QCS8300_MASTER_QDSS_ETR_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
- .id = QCS8300_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = QCS8300_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = QCS8300_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup3_core_master = {
.name = "qup3_core_master",
- .id = QCS8300_MASTER_QUP_CORE_3,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_QUP_CORE_3 },
+ .link_nodes = { &qup3_core_slave },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = QCS8300_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 71,
- .links = { QCS8300_SLAVE_AHB2PHY_2, QCS8300_SLAVE_AHB2PHY_3,
- QCS8300_SLAVE_ANOC_THROTTLE_CFG, QCS8300_SLAVE_AOSS,
- QCS8300_SLAVE_APPSS, QCS8300_SLAVE_BOOT_ROM,
- QCS8300_SLAVE_CAMERA_CFG, QCS8300_SLAVE_CAMERA_NRT_THROTTLE_CFG,
- QCS8300_SLAVE_CAMERA_RT_THROTTLE_CFG, QCS8300_SLAVE_CLK_CTL,
- QCS8300_SLAVE_CDSP_CFG, QCS8300_SLAVE_RBCPR_CX_CFG,
- QCS8300_SLAVE_RBCPR_MMCX_CFG, QCS8300_SLAVE_RBCPR_MX_CFG,
- QCS8300_SLAVE_CPR_NSPCX, QCS8300_SLAVE_CPR_NSPHMX,
- QCS8300_SLAVE_CRYPTO_0_CFG, QCS8300_SLAVE_CX_RDPM,
- QCS8300_SLAVE_DISPLAY_CFG, QCS8300_SLAVE_DISPLAY_RT_THROTTLE_CFG,
- QCS8300_SLAVE_EMAC_CFG, QCS8300_SLAVE_GP_DSP0_CFG,
- QCS8300_SLAVE_GPDSP0_THROTTLE_CFG, QCS8300_SLAVE_GPU_TCU_THROTTLE_CFG,
- QCS8300_SLAVE_GFX3D_CFG, QCS8300_SLAVE_HWKM,
- QCS8300_SLAVE_IMEM_CFG, QCS8300_SLAVE_IPA_CFG,
- QCS8300_SLAVE_IPC_ROUTER_CFG, QCS8300_SLAVE_LPASS,
- QCS8300_SLAVE_LPASS_THROTTLE_CFG, QCS8300_SLAVE_MX_RDPM,
- QCS8300_SLAVE_MXC_RDPM, QCS8300_SLAVE_PCIE_0_CFG,
- QCS8300_SLAVE_PCIE_1_CFG, QCS8300_SLAVE_PCIE_TCU_THROTTLE_CFG,
- QCS8300_SLAVE_PCIE_THROTTLE_CFG, QCS8300_SLAVE_PDM,
- QCS8300_SLAVE_PIMEM_CFG, QCS8300_SLAVE_PKA_WRAPPER_CFG,
- QCS8300_SLAVE_QDSS_CFG, QCS8300_SLAVE_QM_CFG,
- QCS8300_SLAVE_QM_MPU_CFG, QCS8300_SLAVE_QUP_0,
- QCS8300_SLAVE_QUP_1, QCS8300_SLAVE_QUP_3,
- QCS8300_SLAVE_SAIL_THROTTLE_CFG, QCS8300_SLAVE_SDC1,
- QCS8300_SLAVE_SECURITY, QCS8300_SLAVE_SNOC_THROTTLE_CFG,
- QCS8300_SLAVE_TCSR, QCS8300_SLAVE_TLMM,
- QCS8300_SLAVE_TSC_CFG, QCS8300_SLAVE_UFS_MEM_CFG,
- QCS8300_SLAVE_USB2, QCS8300_SLAVE_USB3_0,
- QCS8300_SLAVE_VENUS_CFG, QCS8300_SLAVE_VENUS_CVP_THROTTLE_CFG,
- QCS8300_SLAVE_VENUS_V_CPU_THROTTLE_CFG,
- QCS8300_SLAVE_VENUS_VCODEC_THROTTLE_CFG,
- QCS8300_SLAVE_DDRSS_CFG, QCS8300_SLAVE_GPDSP_NOC_CFG,
- QCS8300_SLAVE_CNOC_MNOC_HF_CFG, QCS8300_SLAVE_CNOC_MNOC_SF_CFG,
- QCS8300_SLAVE_PCIE_ANOC_CFG, QCS8300_SLAVE_SNOC_CFG,
- QCS8300_SLAVE_BOOT_IMEM, QCS8300_SLAVE_IMEM,
- QCS8300_SLAVE_PIMEM, QCS8300_SLAVE_QDSS_STM,
- QCS8300_SLAVE_TCU },
+ .link_nodes = { &qhs_ahb2phy2, &qhs_ahb2phy3,
+ &qhs_anoc_throttle_cfg, &qhs_aoss,
+ &qhs_apss, &qhs_boot_rom,
+ &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg,
+ &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl,
+ &qhs_compute0_cfg, &qhs_cpr_cx,
+ &qhs_cpr_mmcx, &qhs_cpr_mx,
+ &qhs_cpr_nspcx, &qhs_cpr_nsphmx,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display0_cfg, &qhs_display0_rt_throttle_cfg,
+ &qhs_emac0_cfg, &qhs_gp_dsp0_cfg,
+ &qhs_gpdsp0_throttle_cfg, &qhs_gpu_tcu_throttle_cfg,
+ &qhs_gpuss_cfg, &qhs_hwkm,
+ &qhs_imem_cfg, &qhs_ipa,
+ &qhs_ipc_router, &qhs_lpass_cfg,
+ &qhs_lpass_throttle_cfg, &qhs_mx_rdpm,
+ &qhs_mxc_rdpm, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie_tcu_throttle_cfg,
+ &qhs_pcie_throttle_cfg, &qhs_pdm,
+ &qhs_pimem_cfg, &qhs_pke_wrapper_cfg,
+ &qhs_qdss_cfg, &qhs_qm_cfg,
+ &qhs_qm_mpu_cfg, &qhs_qup0,
+ &qhs_qup1, &qhs_qup3,
+ &qhs_sail_throttle_cfg, &qhs_sdc1,
+ &qhs_security, &qhs_snoc_throttle_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_tsc_cfg, &qhs_ufs_mem_cfg,
+ &qhs_usb2_0, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_venus_cvp_throttle_cfg,
+ &qhs_venus_v_cpu_throttle_cfg,
+ &qhs_venus_vcodec_throttle_cfg,
+ &qns_ddrss_cfg, &qns_gpdsp_noc_cfg,
+ &qns_mnoc_hf_cfg, &qns_mnoc_sf_cfg,
+ &qns_pcie_anoc_cfg, &qns_snoc_cfg,
+ &qxs_boot_imem, &qxs_imem,
+ &qxs_pimem, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = QCS8300_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { QCS8300_SLAVE_PCIE_0, QCS8300_SLAVE_PCIE_1 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
- .id = QCS8300_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { QCS8300_SLAVE_LLCC_CFG, QCS8300_SLAVE_GEM_NOC_CFG },
+ .link_nodes = { &qhs_llcc, &qns_gemnoc },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = QCS8300_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_pcie_tcu = {
.name = "alm_pcie_tcu",
- .id = QCS8300_MASTER_PCIE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = QCS8300_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = QCS8300_MASTER_APPSS_PROC,
.channels = 4,
.buswidth = 32,
.num_links = 3,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC,
- QCS8300_SLAVE_GEM_NOC_PCIE_CNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_cmpnoc0 = {
.name = "qnm_cmpnoc0",
- .id = QCS8300_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
- .id = QCS8300_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 4,
- .links = { QCS8300_SLAVE_SERVICE_GEM_NOC_1, QCS8300_SLAVE_SERVICE_GEM_NOC_2,
- QCS8300_SLAVE_SERVICE_GEM_NOC, QCS8300_SLAVE_SERVICE_GEM_NOC2 },
+ .link_nodes = { &srvc_even_gemnoc, &srvc_odd_gemnoc,
+ &srvc_sys_gemnoc, &srvc_sys_gemnoc_2 },
};
static struct qcom_icc_node qnm_gpdsp_sail = {
.name = "qnm_gpdsp_sail",
- .id = QCS8300_MASTER_GPDSP_SAIL,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = QCS8300_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = QCS8300_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { QCS8300_SLAVE_LLCC, QCS8300_SLAVE_GEM_NOC_PCIE_CNOC },
+ .link_nodes = { &qns_llcc, &qns_pcie },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = QCS8300_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC,
- QCS8300_SLAVE_GEM_NOC_PCIE_CNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = QCS8300_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = QCS8300_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = QCS8300_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { QCS8300_SLAVE_GEM_NOC_CNOC, QCS8300_SLAVE_LLCC,
- QCS8300_SLAVE_GEM_NOC_PCIE_CNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_sailss_md0 = {
.name = "qnm_sailss_md0",
- .id = QCS8300_MASTER_SAILSS_MD0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_SLAVE_GP_DSP_SAIL_NOC },
+ .link_nodes = { &qns_gp_dsp_sail_noc },
};
static struct qcom_icc_node qxm_dsp0 = {
.name = "qxm_dsp0",
- .id = QCS8300_MASTER_DSP0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_SLAVE_GP_DSP_SAIL_NOC },
+ .link_nodes = { &qns_gp_dsp_sail_noc },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
- .id = QCS8300_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
- .links = { QCS8300_SLAVE_LPASS_CORE_CFG, QCS8300_SLAVE_LPASS_LPI_CFG,
- QCS8300_SLAVE_LPASS_MPU_CFG, QCS8300_SLAVE_LPASS_TOP_CFG,
- QCS8300_SLAVE_SERVICES_LPASS_AML_NOC, QCS8300_SLAVE_SERVICE_LPASS_AG_NOC },
+ .link_nodes = { &qhs_lpass_core, &qhs_lpass_lpi,
+ &qhs_lpass_mpu, &qhs_lpass_top,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node qxm_lpass_dsp = {
.name = "qxm_lpass_dsp",
- .id = QCS8300_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 4,
- .links = { QCS8300_SLAVE_LPASS_TOP_CFG, QCS8300_SLAVE_LPASS_SNOC,
- QCS8300_SLAVE_SERVICES_LPASS_AML_NOC, QCS8300_SLAVE_SERVICE_LPASS_AG_NOC },
+ .link_nodes = { &qhs_lpass_top, &qns_sysnoc,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = QCS8300_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = QCS8300_MASTER_CAMNOC_HF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = QCS8300_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = QCS8300_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_mdp0_0 = {
.name = "qnm_mdp0_0",
- .id = QCS8300_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp0_1 = {
.name = "qnm_mdp0_1",
- .id = QCS8300_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mnoc_hf_cfg = {
.name = "qnm_mnoc_hf_cfg",
- .id = QCS8300_MASTER_CNOC_MNOC_HF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_SERVICE_MNOC_HF },
+ .link_nodes = { &srvc_mnoc_hf },
};
static struct qcom_icc_node qnm_mnoc_sf_cfg = {
.name = "qnm_mnoc_sf_cfg",
- .id = QCS8300_MASTER_CNOC_MNOC_SF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_SERVICE_MNOC_SF },
+ .link_nodes = { &srvc_mnoc_sf },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
- .id = QCS8300_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = QCS8300_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
- .id = QCS8300_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
- .id = QCS8300_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_SERVICE_NSP_NOC },
+ .link_nodes = { &service_nsp_noc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = QCS8300_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
- .num_links = 2,
- .links = { QCS8300_SLAVE_HCP_A, QCS8300_SLAVE_CDSP_MEM_NOC },
+ .num_links = 1,
+ .link_nodes = { &qns_hcp, &qns_nsp_gemnoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = QCS8300_MASTER_PCIE_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = QCS8300_MASTER_PCIE_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
- .id = QCS8300_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = QCS8300_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = QCS8300_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
- .id = QCS8300_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
- .id = QCS8300_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = QCS8300_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = QCS8300_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = QCS8300_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = QCS8300_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = QCS8300_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = QCS8300_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup3_core_slave = {
.name = "qup3_core_slave",
- .id = QCS8300_SLAVE_QUP_CORE_3,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
- .id = QCS8300_SLAVE_AHB2PHY_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy3 = {
.name = "qhs_ahb2phy3",
- .id = QCS8300_SLAVE_AHB2PHY_3,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_anoc_throttle_cfg = {
.name = "qhs_anoc_throttle_cfg",
- .id = QCS8300_SLAVE_ANOC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = QCS8300_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = QCS8300_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_boot_rom = {
.name = "qhs_boot_rom",
- .id = QCS8300_SLAVE_BOOT_ROM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = QCS8300_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
.name = "qhs_camera_nrt_throttle_cfg",
- .id = QCS8300_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
.name = "qhs_camera_rt_throttle_cfg",
- .id = QCS8300_SLAVE_CAMERA_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = QCS8300_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_compute0_cfg = {
.name = "qhs_compute0_cfg",
- .id = QCS8300_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_MASTER_CDSP_NOC_CFG },
+ .link_nodes = { &qhm_nsp_noc_config },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = QCS8300_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = QCS8300_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = QCS8300_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
- .id = QCS8300_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_nsphmx = {
.name = "qhs_cpr_nsphmx",
- .id = QCS8300_SLAVE_CPR_NSPHMX,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = QCS8300_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = QCS8300_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display0_cfg = {
.name = "qhs_display0_cfg",
- .id = QCS8300_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display0_rt_throttle_cfg = {
.name = "qhs_display0_rt_throttle_cfg",
- .id = QCS8300_SLAVE_DISPLAY_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_emac0_cfg = {
.name = "qhs_emac0_cfg",
- .id = QCS8300_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gp_dsp0_cfg = {
.name = "qhs_gp_dsp0_cfg",
- .id = QCS8300_SLAVE_GP_DSP0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpdsp0_throttle_cfg = {
.name = "qhs_gpdsp0_throttle_cfg",
- .id = QCS8300_SLAVE_GPDSP0_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpu_tcu_throttle_cfg = {
.name = "qhs_gpu_tcu_throttle_cfg",
- .id = QCS8300_SLAVE_GPU_TCU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = QCS8300_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
- .id = QCS8300_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = QCS8300_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = QCS8300_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = QCS8300_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
- .id = QCS8300_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_MASTER_CNOC_LPASS_AG_NOC },
+ .link_nodes = { &qhm_config_noc },
};
static struct qcom_icc_node qhs_lpass_throttle_cfg = {
.name = "qhs_lpass_throttle_cfg",
- .id = QCS8300_SLAVE_LPASS_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = QCS8300_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mxc_rdpm = {
.name = "qhs_mxc_rdpm",
- .id = QCS8300_SLAVE_MXC_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = QCS8300_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = QCS8300_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie_tcu_throttle_cfg = {
.name = "qhs_pcie_tcu_throttle_cfg",
- .id = QCS8300_SLAVE_PCIE_TCU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie_throttle_cfg = {
.name = "qhs_pcie_throttle_cfg",
- .id = QCS8300_SLAVE_PCIE_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = QCS8300_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = QCS8300_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pke_wrapper_cfg = {
.name = "qhs_pke_wrapper_cfg",
- .id = QCS8300_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = QCS8300_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qm_cfg = {
.name = "qhs_qm_cfg",
- .id = QCS8300_SLAVE_QM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qm_mpu_cfg = {
.name = "qhs_qm_mpu_cfg",
- .id = QCS8300_SLAVE_QM_MPU_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = QCS8300_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = QCS8300_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup3 = {
.name = "qhs_qup3",
- .id = QCS8300_SLAVE_QUP_3,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sail_throttle_cfg = {
.name = "qhs_sail_throttle_cfg",
- .id = QCS8300_SLAVE_SAIL_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
- .id = QCS8300_SLAVE_SDC1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
- .id = QCS8300_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_snoc_throttle_cfg = {
.name = "qhs_snoc_throttle_cfg",
- .id = QCS8300_SLAVE_SNOC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = QCS8300_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = QCS8300_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tsc_cfg = {
.name = "qhs_tsc_cfg",
- .id = QCS8300_SLAVE_TSC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = QCS8300_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb2_0 = {
.name = "qhs_usb2_0",
- .id = QCS8300_SLAVE_USB2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = QCS8300_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = QCS8300_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cvp_throttle_cfg = {
.name = "qhs_venus_cvp_throttle_cfg",
- .id = QCS8300_SLAVE_VENUS_CVP_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_v_cpu_throttle_cfg = {
.name = "qhs_venus_v_cpu_throttle_cfg",
- .id = QCS8300_SLAVE_VENUS_V_CPU_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_vcodec_throttle_cfg = {
.name = "qhs_venus_vcodec_throttle_cfg",
- .id = QCS8300_SLAVE_VENUS_VCODEC_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = QCS8300_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qnm_cnoc_dc_noc },
};
static struct qcom_icc_node qns_gpdsp_noc_cfg = {
.name = "qns_gpdsp_noc_cfg",
- .id = QCS8300_SLAVE_GPDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mnoc_hf_cfg = {
.name = "qns_mnoc_hf_cfg",
- .id = QCS8300_SLAVE_CNOC_MNOC_HF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_MASTER_CNOC_MNOC_HF_CFG },
+ .link_nodes = { &qnm_mnoc_hf_cfg },
};
static struct qcom_icc_node qns_mnoc_sf_cfg = {
.name = "qns_mnoc_sf_cfg",
- .id = QCS8300_SLAVE_CNOC_MNOC_SF_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_MASTER_CNOC_MNOC_SF_CFG },
+ .link_nodes = { &qnm_mnoc_sf_cfg },
};
static struct qcom_icc_node qns_pcie_anoc_cfg = {
.name = "qns_pcie_anoc_cfg",
- .id = QCS8300_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
- .id = QCS8300_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_MASTER_SNOC_CFG },
+ .link_nodes = { &qnm_snoc_cfg },
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
- .id = QCS8300_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = QCS8300_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = QCS8300_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = QCS8300_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = QCS8300_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 32,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = QCS8300_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = QCS8300_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = QCS8300_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
- .id = QCS8300_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QCS8300_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qnm_gemnoc_cfg },
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = QCS8300_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = QCS8300_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = QCS8300_SLAVE_GEM_NOC_PCIE_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
- .id = QCS8300_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
- .id = QCS8300_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
- .id = QCS8300_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_sys_gemnoc_2 = {
.name = "srvc_sys_gemnoc_2",
- .id = QCS8300_SLAVE_SERVICE_GEM_NOC2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gp_dsp_sail_noc = {
.name = "qns_gp_dsp_sail_noc",
- .id = QCS8300_SLAVE_GP_DSP_SAIL_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_MASTER_GPDSP_SAIL },
+ .link_nodes = { &qnm_gpdsp_sail },
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
- .id = QCS8300_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
- .id = QCS8300_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
- .id = QCS8300_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
- .id = QCS8300_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_sysnoc = {
.name = "qns_sysnoc",
- .id = QCS8300_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_MASTER_LPASS_ANOC },
+ .link_nodes = { &qnm_lpass_noc },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
- .id = QCS8300_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
- .id = QCS8300_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = QCS8300_SLAVE_EBI1,
.channels = 8,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = QCS8300_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = QCS8300_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc_hf = {
.name = "srvc_mnoc_hf",
- .id = QCS8300_SLAVE_SERVICE_MNOC_HF,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_mnoc_sf = {
.name = "srvc_mnoc_sf",
- .id = QCS8300_SLAVE_SERVICE_MNOC_SF,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_hcp = {
.name = "qns_hcp",
- .id = QCS8300_SLAVE_HCP_A,
.channels = 2,
.buswidth = 32,
- .num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = QCS8300_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc0 },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
- .id = QCS8300_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = QCS8300_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { QCS8300_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = QCS8300_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QCS8300_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = QCS8300_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QCS8300_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = QCS8300_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/qcs8300.h b/drivers/interconnect/qcom/qcs8300.h
deleted file mode 100644
index 6b9e2b424c2a..000000000000
--- a/drivers/interconnect/qcom/qcs8300.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_QCS8300_H
-#define __DRIVERS_INTERCONNECT_QCOM_QCS8300_H
-
-#define QCS8300_MASTER_GPU_TCU 0
-#define QCS8300_MASTER_PCIE_TCU 1
-#define QCS8300_MASTER_SYS_TCU 2
-#define QCS8300_MASTER_APPSS_PROC 3
-#define QCS8300_MASTER_LLCC 4
-#define QCS8300_MASTER_CNOC_LPASS_AG_NOC 5
-#define QCS8300_MASTER_GIC_AHB 6
-#define QCS8300_MASTER_CDSP_NOC_CFG 7
-#define QCS8300_MASTER_QDSS_BAM 8
-#define QCS8300_MASTER_QUP_0 9
-#define QCS8300_MASTER_QUP_1 10
-#define QCS8300_MASTER_A1NOC_SNOC 11
-#define QCS8300_MASTER_A2NOC_SNOC 12
-#define QCS8300_MASTER_CAMNOC_HF 13
-#define QCS8300_MASTER_CAMNOC_ICP 14
-#define QCS8300_MASTER_CAMNOC_SF 15
-#define QCS8300_MASTER_COMPUTE_NOC 16
-#define QCS8300_MASTER_CNOC_A2NOC 17
-#define QCS8300_MASTER_CNOC_DC_NOC 18
-#define QCS8300_MASTER_GEM_NOC_CFG 19
-#define QCS8300_MASTER_GEM_NOC_CNOC 20
-#define QCS8300_MASTER_GEM_NOC_PCIE_SNOC 21
-#define QCS8300_MASTER_GPDSP_SAIL 22
-#define QCS8300_MASTER_GFX3D 23
-#define QCS8300_MASTER_LPASS_ANOC 24
-#define QCS8300_MASTER_MDP0 25
-#define QCS8300_MASTER_MDP1 26
-#define QCS8300_MASTER_MNOC_HF_MEM_NOC 27
-#define QCS8300_MASTER_CNOC_MNOC_HF_CFG 28
-#define QCS8300_MASTER_MNOC_SF_MEM_NOC 29
-#define QCS8300_MASTER_CNOC_MNOC_SF_CFG 30
-#define QCS8300_MASTER_ANOC_PCIE_GEM_NOC 31
-#define QCS8300_MASTER_SAILSS_MD0 32
-#define QCS8300_MASTER_SNOC_CFG 33
-#define QCS8300_MASTER_SNOC_GC_MEM_NOC 34
-#define QCS8300_MASTER_SNOC_SF_MEM_NOC 35
-#define QCS8300_MASTER_VIDEO_P0 36
-#define QCS8300_MASTER_VIDEO_PROC 37
-#define QCS8300_MASTER_VIDEO_V_PROC 38
-#define QCS8300_MASTER_QUP_CORE_0 39
-#define QCS8300_MASTER_QUP_CORE_1 40
-#define QCS8300_MASTER_QUP_CORE_3 41
-#define QCS8300_MASTER_CRYPTO_CORE0 42
-#define QCS8300_MASTER_CRYPTO_CORE1 43
-#define QCS8300_MASTER_DSP0 44
-#define QCS8300_MASTER_IPA 45
-#define QCS8300_MASTER_LPASS_PROC 46
-#define QCS8300_MASTER_CDSP_PROC 47
-#define QCS8300_MASTER_PIMEM 48
-#define QCS8300_MASTER_QUP_3 49
-#define QCS8300_MASTER_EMAC 50
-#define QCS8300_MASTER_GIC 51
-#define QCS8300_MASTER_PCIE_0 52
-#define QCS8300_MASTER_PCIE_1 53
-#define QCS8300_MASTER_QDSS_ETR_0 54
-#define QCS8300_MASTER_QDSS_ETR_1 55
-#define QCS8300_MASTER_SDC 56
-#define QCS8300_MASTER_UFS_MEM 57
-#define QCS8300_MASTER_USB2 58
-#define QCS8300_MASTER_USB3_0 59
-#define QCS8300_SLAVE_EBI1 60
-#define QCS8300_SLAVE_AHB2PHY_2 61
-#define QCS8300_SLAVE_AHB2PHY_3 62
-#define QCS8300_SLAVE_ANOC_THROTTLE_CFG 63
-#define QCS8300_SLAVE_AOSS 64
-#define QCS8300_SLAVE_APPSS 65
-#define QCS8300_SLAVE_BOOT_ROM 66
-#define QCS8300_SLAVE_CAMERA_CFG 67
-#define QCS8300_SLAVE_CAMERA_NRT_THROTTLE_CFG 68
-#define QCS8300_SLAVE_CAMERA_RT_THROTTLE_CFG 69
-#define QCS8300_SLAVE_CLK_CTL 70
-#define QCS8300_SLAVE_CDSP_CFG 71
-#define QCS8300_SLAVE_RBCPR_CX_CFG 72
-#define QCS8300_SLAVE_RBCPR_MMCX_CFG 73
-#define QCS8300_SLAVE_RBCPR_MX_CFG 74
-#define QCS8300_SLAVE_CPR_NSPCX 75
-#define QCS8300_SLAVE_CPR_NSPHMX 76
-#define QCS8300_SLAVE_CRYPTO_0_CFG 77
-#define QCS8300_SLAVE_CX_RDPM 78
-#define QCS8300_SLAVE_DISPLAY_CFG 79
-#define QCS8300_SLAVE_DISPLAY_RT_THROTTLE_CFG 80
-#define QCS8300_SLAVE_EMAC_CFG 81
-#define QCS8300_SLAVE_GP_DSP0_CFG 82
-#define QCS8300_SLAVE_GPDSP0_THROTTLE_CFG 83
-#define QCS8300_SLAVE_GPU_TCU_THROTTLE_CFG 84
-#define QCS8300_SLAVE_GFX3D_CFG 85
-#define QCS8300_SLAVE_HWKM 86
-#define QCS8300_SLAVE_IMEM_CFG 87
-#define QCS8300_SLAVE_IPA_CFG 88
-#define QCS8300_SLAVE_IPC_ROUTER_CFG 89
-#define QCS8300_SLAVE_LLCC_CFG 90
-#define QCS8300_SLAVE_LPASS 91
-#define QCS8300_SLAVE_LPASS_CORE_CFG 92
-#define QCS8300_SLAVE_LPASS_LPI_CFG 93
-#define QCS8300_SLAVE_LPASS_MPU_CFG 94
-#define QCS8300_SLAVE_LPASS_THROTTLE_CFG 95
-#define QCS8300_SLAVE_LPASS_TOP_CFG 96
-#define QCS8300_SLAVE_MX_RDPM 97
-#define QCS8300_SLAVE_MXC_RDPM 98
-#define QCS8300_SLAVE_PCIE_0_CFG 99
-#define QCS8300_SLAVE_PCIE_1_CFG 100
-#define QCS8300_SLAVE_PCIE_TCU_THROTTLE_CFG 101
-#define QCS8300_SLAVE_PCIE_THROTTLE_CFG 102
-#define QCS8300_SLAVE_PDM 103
-#define QCS8300_SLAVE_PIMEM_CFG 104
-#define QCS8300_SLAVE_PKA_WRAPPER_CFG 105
-#define QCS8300_SLAVE_QDSS_CFG 106
-#define QCS8300_SLAVE_QM_CFG 107
-#define QCS8300_SLAVE_QM_MPU_CFG 108
-#define QCS8300_SLAVE_QUP_0 109
-#define QCS8300_SLAVE_QUP_1 110
-#define QCS8300_SLAVE_QUP_3 111
-#define QCS8300_SLAVE_SAIL_THROTTLE_CFG 112
-#define QCS8300_SLAVE_SDC1 113
-#define QCS8300_SLAVE_SECURITY 114
-#define QCS8300_SLAVE_SNOC_THROTTLE_CFG 115
-#define QCS8300_SLAVE_TCSR 116
-#define QCS8300_SLAVE_TLMM 117
-#define QCS8300_SLAVE_TSC_CFG 118
-#define QCS8300_SLAVE_UFS_MEM_CFG 119
-#define QCS8300_SLAVE_USB2 120
-#define QCS8300_SLAVE_USB3_0 121
-#define QCS8300_SLAVE_VENUS_CFG 122
-#define QCS8300_SLAVE_VENUS_CVP_THROTTLE_CFG 123
-#define QCS8300_SLAVE_VENUS_V_CPU_THROTTLE_CFG 124
-#define QCS8300_SLAVE_VENUS_VCODEC_THROTTLE_CFG 125
-#define QCS8300_SLAVE_A1NOC_SNOC 126
-#define QCS8300_SLAVE_A2NOC_SNOC 127
-#define QCS8300_SLAVE_DDRSS_CFG 128
-#define QCS8300_SLAVE_GEM_NOC_CNOC 129
-#define QCS8300_SLAVE_GEM_NOC_CFG 130
-#define QCS8300_SLAVE_SNOC_GEM_NOC_GC 131
-#define QCS8300_SLAVE_SNOC_GEM_NOC_SF 132
-#define QCS8300_SLAVE_GP_DSP_SAIL_NOC 133
-#define QCS8300_SLAVE_GPDSP_NOC_CFG 134
-#define QCS8300_SLAVE_HCP_A 135
-#define QCS8300_SLAVE_LLCC 136
-#define QCS8300_SLAVE_MNOC_HF_MEM_NOC 137
-#define QCS8300_SLAVE_MNOC_SF_MEM_NOC 138
-#define QCS8300_SLAVE_CNOC_MNOC_HF_CFG 139
-#define QCS8300_SLAVE_CNOC_MNOC_SF_CFG 140
-#define QCS8300_SLAVE_CDSP_MEM_NOC 141
-#define QCS8300_SLAVE_GEM_NOC_PCIE_CNOC 142
-#define QCS8300_SLAVE_PCIE_ANOC_CFG 143
-#define QCS8300_SLAVE_ANOC_PCIE_GEM_NOC 144
-#define QCS8300_SLAVE_SNOC_CFG 145
-#define QCS8300_SLAVE_LPASS_SNOC 146
-#define QCS8300_SLAVE_QUP_CORE_0 147
-#define QCS8300_SLAVE_QUP_CORE_1 148
-#define QCS8300_SLAVE_QUP_CORE_3 149
-#define QCS8300_SLAVE_BOOT_IMEM 150
-#define QCS8300_SLAVE_IMEM 151
-#define QCS8300_SLAVE_PIMEM 152
-#define QCS8300_SLAVE_SERVICE_NSP_NOC 153
-#define QCS8300_SLAVE_SERVICE_GEM_NOC_1 154
-#define QCS8300_SLAVE_SERVICE_MNOC_HF 155
-#define QCS8300_SLAVE_SERVICE_MNOC_SF 156
-#define QCS8300_SLAVE_SERVICES_LPASS_AML_NOC 157
-#define QCS8300_SLAVE_SERVICE_LPASS_AG_NOC 158
-#define QCS8300_SLAVE_SERVICE_GEM_NOC_2 159
-#define QCS8300_SLAVE_SERVICE_SNOC 160
-#define QCS8300_SLAVE_SERVICE_GEM_NOC 161
-#define QCS8300_SLAVE_SERVICE_GEM_NOC2 162
-#define QCS8300_SLAVE_PCIE_0 163
-#define QCS8300_SLAVE_PCIE_1 164
-#define QCS8300_SLAVE_QDSS_STM 165
-#define QCS8300_SLAVE_TCU 166
-
-#endif
diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c
index a7392eb73d4a..0006413241dc 100644
--- a/drivers/interconnect/qcom/qdu1000.c
+++ b/drivers/interconnect/qcom/qdu1000.c
@@ -15,756 +15,710 @@
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
-#include "qdu1000.h"
+
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_ecpri_dma;
+static struct qcom_icc_node qnm_fec_2_gemnoc;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qxm_mdsp;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qhm_gic;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qpic;
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qhm_system_noc_cfg;
+static struct qcom_icc_node qnm_aggre_noc;
+static struct qcom_icc_node qnm_aggre_noc_gsi;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_modem_slave;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ecpri_gsi;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_ecpri_dma;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node xm_pcie;
+static struct qcom_icc_node xm_qdss_etr0;
+static struct qcom_icc_node xm_qdss_etr1;
+static struct qcom_icc_node xm_sdc;
+static struct qcom_icc_node xm_usb3;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_modem_slave;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qhs_ahb2phy0_south;
+static struct qcom_icc_node qhs_ahb2phy1_north;
+static struct qcom_icc_node qhs_ahb2phy2_east;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto_cfg;
+static struct qcom_icc_node qhs_ecpri_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_pcie_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qpic;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_smbus_cfg;
+static struct qcom_icc_node qhs_system_noc_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_tme_cfg;
+static struct qcom_icc_node qhs_tsc_cfg;
+static struct qcom_icc_node qhs_usb3;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_anoc_snoc_gsi;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qns_ecpri_gemnoc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qns_modem;
+static struct qcom_icc_node qns_pcie_gemnoc;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_system_noc;
+static struct qcom_icc_node xs_ethernet_ss;
+static struct qcom_icc_node xs_pcie;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = QDU1000_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = QDU1000_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = QDU1000_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = QDU1000_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 4,
- .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
- QDU1000_SLAVE_GEMNOC_MODEM_CNOC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_modem_slave, &qns_pcie },
};
static struct qcom_icc_node qnm_ecpri_dma = {
.name = "qnm_ecpri_dma",
- .id = QDU1000_MASTER_GEMNOC_ECPRI_DMA,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_fec_2_gemnoc = {
.name = "qnm_fec_2_gemnoc",
- .id = QDU1000_MASTER_FEC_2_GEMNOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = QDU1000_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 64,
.num_links = 3,
- .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
- QDU1000_SLAVE_GEMNOC_MODEM_CNOC
- },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_modem_slave },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = QDU1000_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = QDU1000_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 4,
- .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
- QDU1000_SLAVE_GEMNOC_MODEM_CNOC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_modem_slave, &qns_pcie },
};
static struct qcom_icc_node qxm_mdsp = {
.name = "qxm_mdsp",
- .id = QDU1000_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
- QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = QDU1000_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
- .id = QDU1000_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = QDU1000_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qhm_qpic = {
.name = "qhm_qpic",
- .id = QDU1000_MASTER_QPIC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = QDU1000_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = QDU1000_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = QDU1000_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_system_noc_cfg = {
.name = "qhm_system_noc_cfg",
- .id = QDU1000_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_system_noc },
};
static struct qcom_icc_node qnm_aggre_noc = {
.name = "qnm_aggre_noc",
- .id = QDU1000_MASTER_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre_noc_gsi = {
.name = "qnm_aggre_noc_gsi",
- .id = QDU1000_MASTER_ANOC_GSI,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = QDU1000_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 36,
- .links = { QDU1000_SLAVE_AHB2PHY_SOUTH, QDU1000_SLAVE_AHB2PHY_NORTH,
- QDU1000_SLAVE_AHB2PHY_EAST, QDU1000_SLAVE_AOSS,
- QDU1000_SLAVE_CLK_CTL, QDU1000_SLAVE_RBCPR_CX_CFG,
- QDU1000_SLAVE_RBCPR_MX_CFG, QDU1000_SLAVE_CRYPTO_0_CFG,
- QDU1000_SLAVE_ECPRI_CFG, QDU1000_SLAVE_IMEM_CFG,
- QDU1000_SLAVE_IPC_ROUTER_CFG, QDU1000_SLAVE_CNOC_MSS,
- QDU1000_SLAVE_PCIE_CFG, QDU1000_SLAVE_PDM,
- QDU1000_SLAVE_PIMEM_CFG, QDU1000_SLAVE_PRNG,
- QDU1000_SLAVE_QDSS_CFG, QDU1000_SLAVE_QPIC,
- QDU1000_SLAVE_QSPI_0, QDU1000_SLAVE_QUP_0,
- QDU1000_SLAVE_QUP_1, QDU1000_SLAVE_SDCC_2,
- QDU1000_SLAVE_SMBUS_CFG, QDU1000_SLAVE_SNOC_CFG,
- QDU1000_SLAVE_TCSR, QDU1000_SLAVE_TLMM,
- QDU1000_SLAVE_TME_CFG, QDU1000_SLAVE_TSC_CFG,
- QDU1000_SLAVE_USB3_0, QDU1000_SLAVE_VSENSE_CTRL_CFG,
- QDU1000_SLAVE_DDRSS_CFG, QDU1000_SLAVE_IMEM,
- QDU1000_SLAVE_PIMEM, QDU1000_SLAVE_ETHERNET_SS,
- QDU1000_SLAVE_QDSS_STM, QDU1000_SLAVE_TCU
- },
+ .link_nodes = { &qhs_ahb2phy0_south, &qhs_ahb2phy1_north,
+ &qhs_ahb2phy2_east, &qhs_aoss,
+ &qhs_clk_ctl, &qhs_cpr_cx,
+ &qhs_cpr_mx, &qhs_crypto_cfg,
+ &qhs_ecpri_cfg, &qhs_imem_cfg,
+ &qhs_ipc_router, &qhs_mss_cfg,
+ &qhs_pcie_cfg, &qhs_pdm,
+ &qhs_pimem_cfg, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qpic,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_sdc2,
+ &qhs_smbus_cfg, &qhs_system_noc_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_tme_cfg, &qhs_tsc_cfg,
+ &qhs_usb3, &qhs_vsense_ctrl_cfg,
+ &qns_ddrss_cfg, &qxs_imem,
+ &qxs_pimem, &xs_ethernet_ss,
+ &xs_qdss_stm, &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_modem_slave = {
.name = "qnm_gemnoc_modem_slave",
- .id = QDU1000_MASTER_GEMNOC_MODEM_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QDU1000_SLAVE_MODEM_OFFLINE },
+ .link_nodes = { &qns_modem },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = QDU1000_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QDU1000_SLAVE_PCIE_0 },
+ .link_nodes = { &xs_pcie },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = QDU1000_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qxm_ecpri_gsi = {
.name = "qxm_ecpri_gsi",
- .id = QDU1000_MASTER_ECPRI_GSI,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { QDU1000_SLAVE_ANOC_SNOC_GSI, QDU1000_SLAVE_PCIE_0 },
+ .link_nodes = { &qns_anoc_snoc_gsi, &xs_pcie },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = QDU1000_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_ecpri_dma = {
.name = "xm_ecpri_dma",
- .id = QDU1000_MASTER_SNOC_ECPRI_DMA,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { QDU1000_SLAVE_ECPRI_GEMNOC, QDU1000_SLAVE_PCIE_0 },
+ .link_nodes = { &qns_ecpri_gemnoc, &xs_pcie },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = QDU1000_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_pcie = {
.name = "xm_pcie",
- .id = QDU1000_MASTER_PCIE,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { QDU1000_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gemnoc },
};
static struct qcom_icc_node xm_qdss_etr0 = {
.name = "xm_qdss_etr0",
- .id = QDU1000_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node xm_qdss_etr1 = {
.name = "xm_qdss_etr1",
- .id = QDU1000_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node xm_sdc = {
.name = "xm_sdc",
- .id = QDU1000_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3 = {
.name = "xm_usb3",
- .id = QDU1000_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = QDU1000_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = QDU1000_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = QDU1000_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QDU1000_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = QDU1000_SLAVE_LLCC,
.channels = 8,
.buswidth = 16,
.num_links = 1,
- .links = { QDU1000_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_modem_slave = {
.name = "qns_modem_slave",
- .id = QDU1000_SLAVE_GEMNOC_MODEM_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QDU1000_MASTER_GEMNOC_MODEM_CNOC },
+ .link_nodes = { &qnm_gemnoc_modem_slave },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = QDU1000_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QDU1000_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = QDU1000_SLAVE_EBI1,
.channels = 8,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0_south = {
.name = "qhs_ahb2phy0_south",
- .id = QDU1000_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1_north = {
.name = "qhs_ahb2phy1_north",
- .id = QDU1000_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy2_east = {
.name = "qhs_ahb2phy2_east",
- .id = QDU1000_SLAVE_AHB2PHY_EAST,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = QDU1000_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = QDU1000_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = QDU1000_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = QDU1000_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto_cfg = {
.name = "qhs_crypto_cfg",
- .id = QDU1000_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ecpri_cfg = {
.name = "qhs_ecpri_cfg",
- .id = QDU1000_SLAVE_ECPRI_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = QDU1000_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = QDU1000_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = QDU1000_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie_cfg = {
.name = "qhs_pcie_cfg",
- .id = QDU1000_SLAVE_PCIE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = QDU1000_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = QDU1000_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = QDU1000_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = QDU1000_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qpic = {
.name = "qhs_qpic",
- .id = QDU1000_SLAVE_QPIC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = QDU1000_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = QDU1000_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = QDU1000_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = QDU1000_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_smbus_cfg = {
.name = "qhs_smbus_cfg",
- .id = QDU1000_SLAVE_SMBUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_system_noc_cfg = {
.name = "qhs_system_noc_cfg",
- .id = QDU1000_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { QDU1000_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_system_noc_cfg },
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = QDU1000_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = QDU1000_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
- .id = QDU1000_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tsc_cfg = {
.name = "qhs_tsc_cfg",
- .id = QDU1000_SLAVE_TSC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
- .id = QDU1000_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = QDU1000_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = QDU1000_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_MASTER_ANOC_SNOC },
+ .link_nodes = { &qnm_aggre_noc },
};
static struct qcom_icc_node qns_anoc_snoc_gsi = {
.name = "qns_anoc_snoc_gsi",
- .id = QDU1000_SLAVE_ANOC_SNOC_GSI,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_MASTER_ANOC_GSI },
+ .link_nodes = { &qnm_aggre_noc_gsi },
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = QDU1000_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_ecpri_gemnoc = {
.name = "qns_ecpri_gemnoc",
- .id = QDU1000_SLAVE_ECPRI_GEMNOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { QDU1000_MASTER_GEMNOC_ECPRI_DMA },
+ .link_nodes = { &qnm_ecpri_dma },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = QDU1000_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { QDU1000_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = QDU1000_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { QDU1000_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qns_modem = {
.name = "qns_modem",
- .id = QDU1000_SLAVE_MODEM_OFFLINE,
.channels = 1,
.buswidth = 32,
- .num_links = 0,
};
static struct qcom_icc_node qns_pcie_gemnoc = {
.name = "qns_pcie_gemnoc",
- .id = QDU1000_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { QDU1000_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = QDU1000_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = QDU1000_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node srvc_system_noc = {
.name = "srvc_system_noc",
- .id = QDU1000_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_ethernet_ss = {
.name = "xs_ethernet_ss",
- .id = QDU1000_SLAVE_ETHERNET_SS,
.channels = 1,
.buswidth = 32,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie = {
.name = "xs_pcie",
- .id = QDU1000_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 64,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = QDU1000_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = QDU1000_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/qdu1000.h b/drivers/interconnect/qcom/qdu1000.h
deleted file mode 100644
index e75a6419df23..000000000000
--- a/drivers/interconnect/qcom/qdu1000.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_QDU1000_H
-#define __DRIVERS_INTERCONNECT_QCOM_QDU1000_H
-
-#define QDU1000_MASTER_SYS_TCU 0
-#define QDU1000_MASTER_APPSS_PROC 1
-#define QDU1000_MASTER_LLCC 2
-#define QDU1000_MASTER_GIC_AHB 3
-#define QDU1000_MASTER_QDSS_BAM 4
-#define QDU1000_MASTER_QPIC 5
-#define QDU1000_MASTER_QSPI_0 6
-#define QDU1000_MASTER_QUP_0 7
-#define QDU1000_MASTER_QUP_1 8
-#define QDU1000_MASTER_SNOC_CFG 9
-#define QDU1000_MASTER_ANOC_SNOC 10
-#define QDU1000_MASTER_ANOC_GSI 11
-#define QDU1000_MASTER_GEMNOC_ECPRI_DMA 12
-#define QDU1000_MASTER_FEC_2_GEMNOC 13
-#define QDU1000_MASTER_GEM_NOC_CNOC 14
-#define QDU1000_MASTER_GEMNOC_MODEM_CNOC 15
-#define QDU1000_MASTER_GEM_NOC_PCIE_SNOC 16
-#define QDU1000_MASTER_ANOC_PCIE_GEM_NOC 17
-#define QDU1000_MASTER_SNOC_GC_MEM_NOC 18
-#define QDU1000_MASTER_SNOC_SF_MEM_NOC 19
-#define QDU1000_MASTER_QUP_CORE_0 20
-#define QDU1000_MASTER_QUP_CORE_1 21
-#define QDU1000_MASTER_CRYPTO 22
-#define QDU1000_MASTER_ECPRI_GSI 23
-#define QDU1000_MASTER_MSS_PROC 24
-#define QDU1000_MASTER_PIMEM 25
-#define QDU1000_MASTER_SNOC_ECPRI_DMA 26
-#define QDU1000_MASTER_GIC 27
-#define QDU1000_MASTER_PCIE 28
-#define QDU1000_MASTER_QDSS_ETR 29
-#define QDU1000_MASTER_QDSS_ETR_1 30
-#define QDU1000_MASTER_SDCC_1 31
-#define QDU1000_MASTER_USB3 32
-#define QDU1000_SLAVE_EBI1 512
-#define QDU1000_SLAVE_AHB2PHY_SOUTH 513
-#define QDU1000_SLAVE_AHB2PHY_NORTH 514
-#define QDU1000_SLAVE_AHB2PHY_EAST 515
-#define QDU1000_SLAVE_AOSS 516
-#define QDU1000_SLAVE_CLK_CTL 517
-#define QDU1000_SLAVE_RBCPR_CX_CFG 518
-#define QDU1000_SLAVE_RBCPR_MX_CFG 519
-#define QDU1000_SLAVE_CRYPTO_0_CFG 520
-#define QDU1000_SLAVE_ECPRI_CFG 521
-#define QDU1000_SLAVE_IMEM_CFG 522
-#define QDU1000_SLAVE_IPC_ROUTER_CFG 523
-#define QDU1000_SLAVE_CNOC_MSS 524
-#define QDU1000_SLAVE_PCIE_CFG 525
-#define QDU1000_SLAVE_PDM 526
-#define QDU1000_SLAVE_PIMEM_CFG 527
-#define QDU1000_SLAVE_PRNG 528
-#define QDU1000_SLAVE_QDSS_CFG 529
-#define QDU1000_SLAVE_QPIC 530
-#define QDU1000_SLAVE_QSPI_0 531
-#define QDU1000_SLAVE_QUP_0 532
-#define QDU1000_SLAVE_QUP_1 533
-#define QDU1000_SLAVE_SDCC_2 534
-#define QDU1000_SLAVE_SMBUS_CFG 535
-#define QDU1000_SLAVE_SNOC_CFG 536
-#define QDU1000_SLAVE_TCSR 537
-#define QDU1000_SLAVE_TLMM 538
-#define QDU1000_SLAVE_TME_CFG 539
-#define QDU1000_SLAVE_TSC_CFG 540
-#define QDU1000_SLAVE_USB3_0 541
-#define QDU1000_SLAVE_VSENSE_CTRL_CFG 542
-#define QDU1000_SLAVE_A1NOC_SNOC 543
-#define QDU1000_SLAVE_ANOC_SNOC_GSI 544
-#define QDU1000_SLAVE_DDRSS_CFG 545
-#define QDU1000_SLAVE_ECPRI_GEMNOC 546
-#define QDU1000_SLAVE_GEM_NOC_CNOC 547
-#define QDU1000_SLAVE_SNOC_GEM_NOC_GC 548
-#define QDU1000_SLAVE_SNOC_GEM_NOC_SF 549
-#define QDU1000_SLAVE_LLCC 550
-#define QDU1000_SLAVE_MODEM_OFFLINE 551
-#define QDU1000_SLAVE_GEMNOC_MODEM_CNOC 552
-#define QDU1000_SLAVE_MEM_NOC_PCIE_SNOC 553
-#define QDU1000_SLAVE_ANOC_PCIE_GEM_NOC 554
-#define QDU1000_SLAVE_QUP_CORE_0 555
-#define QDU1000_SLAVE_QUP_CORE_1 556
-#define QDU1000_SLAVE_IMEM 557
-#define QDU1000_SLAVE_PIMEM 558
-#define QDU1000_SLAVE_SERVICE_SNOC 559
-#define QDU1000_SLAVE_ETHERNET_SS 560
-#define QDU1000_SLAVE_PCIE_0 561
-#define QDU1000_SLAVE_QDSS_STM 562
-#define QDU1000_SLAVE_TCU 563
-
-#endif
diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c
index 04b4abbf4487..6a49abc96efe 100644
--- a/drivers/interconnect/qcom/sa8775p.c
+++ b/drivers/interconnect/qcom/sa8775p.c
@@ -213,152 +213,285 @@ static struct qcom_icc_node qxm_qup3 = {
.name = "qxm_qup3",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x11000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emac_0 = {
.name = "xm_emac_0",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x12000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emac_1 = {
.name = "xm_emac_1",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x13000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x14000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x15000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb2_2 = {
.name = "xm_usb2_2",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x16000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x17000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x18000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
.channels = 1,
.buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x14000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
.channels = 1,
.buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x17000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
.channels = 1,
.buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x12000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
.channels = 1,
.buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x15000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_cnoc_datapath = {
.name = "qnm_cnoc_datapath",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x16000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto_0 = {
.name = "qxm_crypto_0",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x18000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto_1 = {
.name = "qxm_crypto_1",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1a000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x11000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x13000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x19000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1b000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
@@ -366,7 +499,7 @@ static struct qcom_icc_node qup0_core_master = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qup0_core_slave },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
@@ -374,7 +507,7 @@ static struct qcom_icc_node qup1_core_master = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qup1_core_slave },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
@@ -382,7 +515,7 @@ static struct qcom_icc_node qup2_core_master = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qup2_core_slave },
+ .link_nodes = { &qup2_core_slave },
};
static struct qcom_icc_node qup3_core_master = {
@@ -390,7 +523,7 @@ static struct qcom_icc_node qup3_core_master = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qup3_core_slave },
+ .link_nodes = { &qup3_core_slave },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
@@ -398,7 +531,7 @@ static struct qcom_icc_node qnm_gemnoc_cnoc = {
.channels = 1,
.buswidth = 16,
.num_links = 82,
- .link_nodes = (struct qcom_icc_node *[]) { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
&qhs_ahb2phy2, &qhs_ahb2phy3,
&qhs_anoc_throttle_cfg, &qhs_aoss,
&qhs_apss, &qhs_boot_rom,
@@ -446,7 +579,7 @@ static struct qcom_icc_node qnm_gemnoc_pcie = {
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &xs_pcie_0, &xs_pcie_1 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
@@ -454,31 +587,52 @@ static struct qcom_icc_node qnm_cnoc_dc_noc = {
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qhs_llcc, &qns_gemnoc },
+ .link_nodes = { &qhs_llcc, &qns_gemnoc },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb4000 },
+ .prio_fwd_disable = 1,
+ .prio = 1,
+ .urg_fwd = 0,
+ },
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_pcie_tcu = {
.name = "alm_pcie_tcu",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb5000 },
+ .prio_fwd_disable = 1,
+ .prio = 3,
+ .urg_fwd = 0,
+ },
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb6000 },
+ .prio_fwd_disable = 1,
+ .prio = 6,
+ .urg_fwd = 0,
+ },
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
@@ -486,7 +640,7 @@ static struct qcom_icc_node chm_apps = {
.channels = 4,
.buswidth = 32,
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -494,16 +648,30 @@ static struct qcom_icc_node qnm_cmpnoc0 = {
.name = "qnm_cmpnoc0",
.channels = 2,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0xf3000, 0xf4000 },
+ .prio_fwd_disable = 1,
+ .prio = 0,
+ .urg_fwd = 0,
+ },
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_cmpnoc1 = {
.name = "qnm_cmpnoc1",
.channels = 2,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0xf5000, 0xf6000 },
+ .prio_fwd_disable = 1,
+ .prio = 0,
+ .urg_fwd = 0,
+ },
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
@@ -511,7 +679,7 @@ static struct qcom_icc_node qnm_gemnoc_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 4,
- .link_nodes = (struct qcom_icc_node *[]) { &srvc_even_gemnoc, &srvc_odd_gemnoc,
+ .link_nodes = { &srvc_even_gemnoc, &srvc_odd_gemnoc,
&srvc_sys_gemnoc, &srvc_sys_gemnoc_2 },
};
@@ -520,31 +688,52 @@ static struct qcom_icc_node qnm_gpdsp_sail = {
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
.channels = 2,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0xed000, 0xee000 },
+ .prio_fwd_disable = 1,
+ .prio = 0,
+ .urg_fwd = 0,
+ },
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
.channels = 2,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0xef000, 0xf0000 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_llcc, &qns_pcie },
+ .link_nodes = { &qns_llcc, &qns_pcie },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
.channels = 2,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0xf1000, 0xf2000 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -552,24 +741,45 @@ static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb8000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb9000 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_llcc },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
.channels = 1,
.buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xba000 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 3,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gem_noc_cnoc, &qns_llcc,
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
&qns_pcie },
};
@@ -578,7 +788,7 @@ static struct qcom_icc_node qxm_dsp0 = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gp_dsp_sail_noc },
+ .link_nodes = { &qns_gp_dsp_sail_noc },
};
static struct qcom_icc_node qxm_dsp1 = {
@@ -586,7 +796,7 @@ static struct qcom_icc_node qxm_dsp1 = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gp_dsp_sail_noc },
+ .link_nodes = { &qns_gp_dsp_sail_noc },
};
static struct qcom_icc_node qhm_config_noc = {
@@ -594,7 +804,7 @@ static struct qcom_icc_node qhm_config_noc = {
.channels = 1,
.buswidth = 4,
.num_links = 6,
- .link_nodes = (struct qcom_icc_node *[]) { &qhs_lpass_core, &qhs_lpass_lpi,
+ .link_nodes = { &qhs_lpass_core, &qhs_lpass_lpi,
&qhs_lpass_mpu, &qhs_lpass_top,
&srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
@@ -604,7 +814,7 @@ static struct qcom_icc_node qxm_lpass_dsp = {
.channels = 1,
.buswidth = 8,
.num_links = 4,
- .link_nodes = (struct qcom_icc_node *[]) { &qhs_lpass_top, &qns_sysnoc,
+ .link_nodes = { &qhs_lpass_top, &qns_sysnoc,
&srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
@@ -613,63 +823,112 @@ static struct qcom_icc_node llcc_mc = {
.channels = 8,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &ebi },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa000 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2a000 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2a080 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_mdp0_0 = {
.name = "qnm_mdp0_0",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa080 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp0_1 = {
.name = "qnm_mdp0_1",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa180 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp1_0 = {
.name = "qnm_mdp1_0",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa100 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp1_1 = {
.name = "qnm_mdp1_1",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa200 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mnoc_hf_cfg = {
@@ -677,7 +936,7 @@ static struct qcom_icc_node qnm_mnoc_hf_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &srvc_mnoc_hf },
+ .link_nodes = { &srvc_mnoc_hf },
};
static struct qcom_icc_node qnm_mnoc_sf_cfg = {
@@ -685,39 +944,67 @@ static struct qcom_icc_node qnm_mnoc_sf_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &srvc_mnoc_sf },
+ .link_nodes = { &srvc_mnoc_sf },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2a100 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2a180 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2a200 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2a280 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
@@ -725,7 +1012,7 @@ static struct qcom_icc_node qhm_nsp_noc_config = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &service_nsp_noc },
+ .link_nodes = { &service_nsp_noc },
};
static struct qcom_icc_node qxm_nsp = {
@@ -733,7 +1020,7 @@ static struct qcom_icc_node qxm_nsp = {
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_hcp, &qns_nsp_gemnoc },
+ .link_nodes = { &qns_hcp, &qns_nsp_gemnoc },
};
static struct qcom_icc_node qhm_nspb_noc_config = {
@@ -741,7 +1028,7 @@ static struct qcom_icc_node qhm_nspb_noc_config = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &service_nspb_noc },
+ .link_nodes = { &service_nspb_noc },
};
static struct qcom_icc_node qxm_nspb = {
@@ -749,31 +1036,52 @@ static struct qcom_icc_node qxm_nspb = {
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_nspb_hcp, &qns_nspb_gemnoc },
+ .link_nodes = { &qns_nspb_hcp, &qns_nspb_gemnoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
.channels = 1,
.buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_mem_noc },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
.channels = 1,
.buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_mem_noc },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
.channels = 1,
.buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x14000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre1_noc = {
@@ -781,7 +1089,7 @@ static struct qcom_icc_node qnm_aggre1_noc = {
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
@@ -789,15 +1097,22 @@ static struct qcom_icc_node qnm_aggre2_noc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
.channels = 1,
.buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x12000 },
+ .prio_fwd_disable = 0,
+ .prio = 0,
+ .urg_fwd = 1,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_snoc_cfg = {
@@ -805,23 +1120,37 @@ static struct qcom_icc_node qnm_snoc_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &srvc_snoc },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x13000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_gc },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
.channels = 1,
.buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x15000 },
+ .prio_fwd_disable = 1,
+ .prio = 2,
+ .urg_fwd = 0,
+ },
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_gc },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
@@ -829,7 +1158,7 @@ static struct qcom_icc_node qns_a1noc_snoc = {
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre1_noc },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
@@ -837,7 +1166,7 @@ static struct qcom_icc_node qns_a2noc_snoc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre2_noc },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qup0_core_slave = {
@@ -941,7 +1270,7 @@ static struct qcom_icc_node qhs_compute0_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qhm_nsp_noc_config },
+ .link_nodes = { &qhm_nsp_noc_config },
};
static struct qcom_icc_node qhs_compute1_cfg = {
@@ -949,7 +1278,7 @@ static struct qcom_icc_node qhs_compute1_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qhm_nspb_noc_config },
+ .link_nodes = { &qhm_nspb_noc_config },
};
static struct qcom_icc_node qhs_cpr_cx = {
@@ -1089,7 +1418,7 @@ static struct qcom_icc_node qhs_lpass_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qhm_config_noc },
+ .link_nodes = { &qhm_config_noc },
};
static struct qcom_icc_node qhs_lpass_throttle_cfg = {
@@ -1301,7 +1630,7 @@ static struct qcom_icc_node qns_ddrss_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_cnoc_dc_noc },
+ .link_nodes = { &qnm_cnoc_dc_noc },
};
static struct qcom_icc_node qns_gpdsp_noc_cfg = {
@@ -1315,7 +1644,7 @@ static struct qcom_icc_node qns_mnoc_hf_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_hf_cfg },
+ .link_nodes = { &qnm_mnoc_hf_cfg },
};
static struct qcom_icc_node qns_mnoc_sf_cfg = {
@@ -1323,7 +1652,7 @@ static struct qcom_icc_node qns_mnoc_sf_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_sf_cfg },
+ .link_nodes = { &qnm_mnoc_sf_cfg },
};
static struct qcom_icc_node qns_pcie_anoc_cfg = {
@@ -1337,7 +1666,7 @@ static struct qcom_icc_node qns_snoc_cfg = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_cfg },
+ .link_nodes = { &qnm_snoc_cfg },
};
static struct qcom_icc_node qxs_boot_imem = {
@@ -1393,7 +1722,7 @@ static struct qcom_icc_node qns_gemnoc = {
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_gemnoc_cfg },
+ .link_nodes = { &qnm_gemnoc_cfg },
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
@@ -1401,7 +1730,7 @@ static struct qcom_icc_node qns_gem_noc_cnoc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_gemnoc_cnoc },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
@@ -1409,7 +1738,7 @@ static struct qcom_icc_node qns_llcc = {
.channels = 6,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &llcc_mc },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
@@ -1417,7 +1746,7 @@ static struct qcom_icc_node qns_pcie = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_gemnoc_pcie },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node srvc_even_gemnoc = {
@@ -1449,7 +1778,7 @@ static struct qcom_icc_node qns_gp_dsp_sail_noc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_gpdsp_sail },
+ .link_nodes = { &qnm_gpdsp_sail },
};
static struct qcom_icc_node qhs_lpass_core = {
@@ -1481,7 +1810,7 @@ static struct qcom_icc_node qns_sysnoc = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpass_noc },
+ .link_nodes = { &qnm_lpass_noc },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
@@ -1507,7 +1836,7 @@ static struct qcom_icc_node qns_mem_noc_hf = {
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_hf },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
@@ -1515,7 +1844,7 @@ static struct qcom_icc_node qns_mem_noc_sf = {
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_sf },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc_hf = {
@@ -1541,7 +1870,7 @@ static struct qcom_icc_node qns_nsp_gemnoc = {
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_cmpnoc0 },
+ .link_nodes = { &qnm_cmpnoc0 },
};
static struct qcom_icc_node service_nsp_noc = {
@@ -1555,7 +1884,7 @@ static struct qcom_icc_node qns_nspb_gemnoc = {
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_cmpnoc1 },
+ .link_nodes = { &qnm_cmpnoc1 },
};
static struct qcom_icc_node qns_nspb_hcp = {
@@ -1575,7 +1904,7 @@ static struct qcom_icc_node qns_pcie_mem_noc = {
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_pcie },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node qns_gemnoc_gc = {
@@ -1583,7 +1912,7 @@ static struct qcom_icc_node qns_gemnoc_gc = {
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_gc },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
@@ -1591,7 +1920,7 @@ static struct qcom_icc_node qns_gemnoc_sf = {
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_sf },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node srvc_snoc = {
@@ -1836,12 +2165,21 @@ static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
};
+static const struct regmap_config sa8775p_aggre1_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x18080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_aggre1_noc = {
+ .config = &sa8775p_aggre1_noc_regmap_config,
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
- .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
@@ -1864,12 +2202,21 @@ static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
};
+static const struct regmap_config sa8775p_aggre2_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1b080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_aggre2_noc = {
+ .config = &sa8775p_aggre2_noc_regmap_config,
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
- .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
@@ -1894,7 +2241,6 @@ static const struct qcom_icc_desc sa8775p_clk_virt = {
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
@@ -1995,12 +2341,20 @@ static struct qcom_icc_node * const config_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
+static const struct regmap_config sa8775p_config_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x13080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_config_noc = {
+ .config = &sa8775p_config_noc_regmap_config,
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
@@ -2012,12 +2366,20 @@ static struct qcom_icc_node * const dc_noc_nodes[] = {
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
};
+static const struct regmap_config sa8775p_dc_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_dc_noc = {
+ .config = &sa8775p_dc_noc_regmap_config,
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
@@ -2049,12 +2411,20 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC2] = &srvc_sys_gemnoc_2,
};
+static const struct regmap_config sa8775p_gem_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf6080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_gem_noc = {
+ .config = &sa8775p_gem_noc_regmap_config,
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const gpdsp_anoc_bcms[] = {
@@ -2068,12 +2438,20 @@ static struct qcom_icc_node * const gpdsp_anoc_nodes[] = {
[SLAVE_GP_DSP_SAIL_NOC] = &qns_gp_dsp_sail_noc,
};
+static const struct regmap_config sa8775p_gpdsp_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_gpdsp_anoc = {
+ .config = &sa8775p_gpdsp_anoc_regmap_config,
.nodes = gpdsp_anoc_nodes,
.num_nodes = ARRAY_SIZE(gpdsp_anoc_nodes),
.bcms = gpdsp_anoc_bcms,
.num_bcms = ARRAY_SIZE(gpdsp_anoc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
@@ -2092,12 +2470,20 @@ static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
};
+static const struct regmap_config sa8775p_lpass_ag_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x17200,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_lpass_ag_noc = {
+ .config = &sa8775p_lpass_ag_noc_regmap_config,
.nodes = lpass_ag_noc_nodes,
.num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
.bcms = lpass_ag_noc_bcms,
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
@@ -2115,7 +2501,6 @@ static const struct qcom_icc_desc sa8775p_mc_virt = {
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
@@ -2143,12 +2528,20 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC_SF] = &srvc_mnoc_sf,
};
+static const struct regmap_config sa8775p_mmss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40000,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_mmss_noc = {
+ .config = &sa8775p_mmss_noc_regmap_config,
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const nspa_noc_bcms[] = {
@@ -2164,12 +2557,20 @@ static struct qcom_icc_node * const nspa_noc_nodes[] = {
[SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
};
+static const struct regmap_config sa8775p_nspa_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_nspa_noc = {
+ .config = &sa8775p_nspa_noc_regmap_config,
.nodes = nspa_noc_nodes,
.num_nodes = ARRAY_SIZE(nspa_noc_nodes),
.bcms = nspa_noc_bcms,
.num_bcms = ARRAY_SIZE(nspa_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
@@ -2177,6 +2578,14 @@ static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
&bcm_nsb1,
};
+static const struct regmap_config sa8775p_nspb_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x16080,
+ .fast_io = true,
+};
+
static struct qcom_icc_node * const nspb_noc_nodes[] = {
[MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
[MASTER_CDSP_PROC_B] = &qxm_nspb,
@@ -2186,11 +2595,11 @@ static struct qcom_icc_node * const nspb_noc_nodes[] = {
};
static const struct qcom_icc_desc sa8775p_nspb_noc = {
+ .config = &sa8775p_nspb_noc_regmap_config,
.nodes = nspb_noc_nodes,
.num_nodes = ARRAY_SIZE(nspb_noc_nodes),
.bcms = nspb_noc_bcms,
.num_bcms = ARRAY_SIZE(nspb_noc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
@@ -2203,12 +2612,20 @@ static struct qcom_icc_node * const pcie_anoc_nodes[] = {
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
};
+static const struct regmap_config sa8775p_pcie_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_pcie_anoc = {
+ .config = &sa8775p_pcie_anoc_regmap_config,
.nodes = pcie_anoc_nodes,
.num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
.bcms = pcie_anoc_bcms,
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
- .alloc_dyn_id = true,
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
@@ -2232,12 +2649,20 @@ static struct qcom_icc_node * const system_noc_nodes[] = {
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
};
+static const struct regmap_config sa8775p_system_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x15080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sa8775p_system_noc = {
+ .config = &sa8775p_system_noc_regmap_config,
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
- .alloc_dyn_id = true,
};
static const struct of_device_id qnoc_of_match[] = {
diff --git a/drivers/interconnect/qcom/sar2130p.c b/drivers/interconnect/qcom/sar2130p.c
index 9eac0ac76812..34cb3fc1f995 100644
--- a/drivers/interconnect/qcom/sar2130p.c
+++ b/drivers/interconnect/qcom/sar2130p.c
@@ -20,125 +20,123 @@
#include "icc-common.h"
#include "icc-rpmh.h"
-enum {
- SAR2130P_MASTER_QUP_CORE_0,
- SAR2130P_MASTER_QUP_CORE_1,
- SAR2130P_MASTER_GEM_NOC_CNOC,
- SAR2130P_MASTER_GEM_NOC_PCIE_SNOC,
- SAR2130P_MASTER_QDSS_DAP,
- SAR2130P_MASTER_GPU_TCU,
- SAR2130P_MASTER_SYS_TCU,
- SAR2130P_MASTER_APPSS_PROC,
- SAR2130P_MASTER_GFX3D,
- SAR2130P_MASTER_MNOC_HF_MEM_NOC,
- SAR2130P_MASTER_MNOC_SF_MEM_NOC,
- SAR2130P_MASTER_COMPUTE_NOC,
- SAR2130P_MASTER_ANOC_PCIE_GEM_NOC,
- SAR2130P_MASTER_SNOC_GC_MEM_NOC,
- SAR2130P_MASTER_SNOC_SF_MEM_NOC,
- SAR2130P_MASTER_WLAN_Q6,
- SAR2130P_MASTER_CNOC_LPASS_AG_NOC,
- SAR2130P_MASTER_LPASS_PROC,
- SAR2130P_MASTER_LLCC,
- SAR2130P_MASTER_CAMNOC_HF,
- SAR2130P_MASTER_CAMNOC_ICP,
- SAR2130P_MASTER_CAMNOC_SF,
- SAR2130P_MASTER_LSR,
- SAR2130P_MASTER_MDP,
- SAR2130P_MASTER_CNOC_MNOC_CFG,
- SAR2130P_MASTER_VIDEO,
- SAR2130P_MASTER_VIDEO_CV_PROC,
- SAR2130P_MASTER_VIDEO_PROC,
- SAR2130P_MASTER_VIDEO_V_PROC,
- SAR2130P_MASTER_CDSP_NOC_CFG,
- SAR2130P_MASTER_CDSP_PROC,
- SAR2130P_MASTER_PCIE_0,
- SAR2130P_MASTER_PCIE_1,
- SAR2130P_MASTER_GIC_AHB,
- SAR2130P_MASTER_QDSS_BAM,
- SAR2130P_MASTER_QSPI_0,
- SAR2130P_MASTER_QUP_0,
- SAR2130P_MASTER_QUP_1,
- SAR2130P_MASTER_A2NOC_SNOC,
- SAR2130P_MASTER_CNOC_DATAPATH,
- SAR2130P_MASTER_LPASS_ANOC,
- SAR2130P_MASTER_SNOC_CFG,
- SAR2130P_MASTER_CRYPTO,
- SAR2130P_MASTER_PIMEM,
- SAR2130P_MASTER_GIC,
- SAR2130P_MASTER_QDSS_ETR,
- SAR2130P_MASTER_QDSS_ETR_1,
- SAR2130P_MASTER_SDCC_1,
- SAR2130P_MASTER_USB3_0,
- SAR2130P_SLAVE_QUP_CORE_0,
- SAR2130P_SLAVE_QUP_CORE_1,
- SAR2130P_SLAVE_AHB2PHY_SOUTH,
- SAR2130P_SLAVE_AOSS,
- SAR2130P_SLAVE_CAMERA_CFG,
- SAR2130P_SLAVE_CLK_CTL,
- SAR2130P_SLAVE_CDSP_CFG,
- SAR2130P_SLAVE_RBCPR_CX_CFG,
- SAR2130P_SLAVE_RBCPR_MMCX_CFG,
- SAR2130P_SLAVE_RBCPR_MXA_CFG,
- SAR2130P_SLAVE_RBCPR_MXC_CFG,
- SAR2130P_SLAVE_CPR_NSPCX,
- SAR2130P_SLAVE_CRYPTO_0_CFG,
- SAR2130P_SLAVE_CX_RDPM,
- SAR2130P_SLAVE_DISPLAY_CFG,
- SAR2130P_SLAVE_GFX3D_CFG,
- SAR2130P_SLAVE_IMEM_CFG,
- SAR2130P_SLAVE_IPC_ROUTER_CFG,
- SAR2130P_SLAVE_LPASS,
- SAR2130P_SLAVE_MX_RDPM,
- SAR2130P_SLAVE_PCIE_0_CFG,
- SAR2130P_SLAVE_PCIE_1_CFG,
- SAR2130P_SLAVE_PDM,
- SAR2130P_SLAVE_PIMEM_CFG,
- SAR2130P_SLAVE_PRNG,
- SAR2130P_SLAVE_QDSS_CFG,
- SAR2130P_SLAVE_QSPI_0,
- SAR2130P_SLAVE_QUP_0,
- SAR2130P_SLAVE_QUP_1,
- SAR2130P_SLAVE_SDCC_1,
- SAR2130P_SLAVE_TCSR,
- SAR2130P_SLAVE_TLMM,
- SAR2130P_SLAVE_TME_CFG,
- SAR2130P_SLAVE_USB3_0,
- SAR2130P_SLAVE_VENUS_CFG,
- SAR2130P_SLAVE_VSENSE_CTRL_CFG,
- SAR2130P_SLAVE_WLAN_Q6_CFG,
- SAR2130P_SLAVE_DDRSS_CFG,
- SAR2130P_SLAVE_CNOC_MNOC_CFG,
- SAR2130P_SLAVE_SNOC_CFG,
- SAR2130P_SLAVE_IMEM,
- SAR2130P_SLAVE_PIMEM,
- SAR2130P_SLAVE_SERVICE_CNOC,
- SAR2130P_SLAVE_PCIE_0,
- SAR2130P_SLAVE_PCIE_1,
- SAR2130P_SLAVE_QDSS_STM,
- SAR2130P_SLAVE_TCU,
- SAR2130P_SLAVE_GEM_NOC_CNOC,
- SAR2130P_SLAVE_LLCC,
- SAR2130P_SLAVE_MEM_NOC_PCIE_SNOC,
- SAR2130P_SLAVE_LPASS_CORE_CFG,
- SAR2130P_SLAVE_LPASS_LPI_CFG,
- SAR2130P_SLAVE_LPASS_MPU_CFG,
- SAR2130P_SLAVE_LPASS_TOP_CFG,
- SAR2130P_SLAVE_LPASS_SNOC,
- SAR2130P_SLAVE_SERVICES_LPASS_AML_NOC,
- SAR2130P_SLAVE_SERVICE_LPASS_AG_NOC,
- SAR2130P_SLAVE_EBI1,
- SAR2130P_SLAVE_MNOC_HF_MEM_NOC,
- SAR2130P_SLAVE_MNOC_SF_MEM_NOC,
- SAR2130P_SLAVE_SERVICE_MNOC,
- SAR2130P_SLAVE_CDSP_MEM_NOC,
- SAR2130P_SLAVE_SERVICE_NSP_NOC,
- SAR2130P_SLAVE_ANOC_PCIE_GEM_NOC,
- SAR2130P_SLAVE_A2NOC_SNOC,
- SAR2130P_SLAVE_SNOC_GEM_NOC_GC,
- SAR2130P_SLAVE_SNOC_GEM_NOC_SF,
- SAR2130P_SLAVE_SERVICE_SNOC,
-};
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_nsp_gemnoc;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qxm_wlan_q6;
+static struct qcom_icc_node qhm_config_noc;
+static struct qcom_icc_node qxm_lpass_dsp;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_lsr;
+static struct qcom_icc_node qnm_mdp;
+static struct qcom_icc_node qnm_mnoc_cfg;
+static struct qcom_icc_node qnm_video;
+static struct qcom_icc_node qnm_video_cv_cpu;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qnm_video_v_cpu;
+static struct qcom_icc_node qhm_nsp_noc_config;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node qhm_gic;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_cnoc_datapath;
+static struct qcom_icc_node qnm_lpass_noc;
+static struct qcom_icc_node qnm_snoc_cfg;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node xm_qdss_etr_0;
+static struct qcom_icc_node xm_qdss_etr_1;
+static struct qcom_icc_node xm_sdc1;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mxa;
+static struct qcom_icc_node qhs_cpr_mxc;
+static struct qcom_icc_node qhs_cpr_nspcx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_lpass_cfg;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_sdc1;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_tme_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qhs_wlan_q6;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qns_mnoc_cfg;
+static struct qcom_icc_node qns_snoc_cfg;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node qhs_lpass_core;
+static struct qcom_icc_node qhs_lpass_lpi;
+static struct qcom_icc_node qhs_lpass_mpu;
+static struct qcom_icc_node qhs_lpass_top;
+static struct qcom_icc_node qns_sysnoc;
+static struct qcom_icc_node srvc_niu_aml_noc;
+static struct qcom_icc_node srvc_niu_lpass_agnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node service_nsp_noc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node srvc_snoc;
static const struct regmap_config icc_regmap_config = {
.reg_bits = 32,
@@ -149,89 +147,84 @@ static const struct regmap_config icc_regmap_config = {
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SAR2130P_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SAR2130P_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SAR2130P_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 43,
- .links = { SAR2130P_SLAVE_AHB2PHY_SOUTH, SAR2130P_SLAVE_AOSS,
- SAR2130P_SLAVE_CAMERA_CFG, SAR2130P_SLAVE_CLK_CTL,
- SAR2130P_SLAVE_CDSP_CFG, SAR2130P_SLAVE_RBCPR_CX_CFG,
- SAR2130P_SLAVE_RBCPR_MMCX_CFG, SAR2130P_SLAVE_RBCPR_MXA_CFG,
- SAR2130P_SLAVE_RBCPR_MXC_CFG, SAR2130P_SLAVE_CPR_NSPCX,
- SAR2130P_SLAVE_CRYPTO_0_CFG, SAR2130P_SLAVE_CX_RDPM,
- SAR2130P_SLAVE_DISPLAY_CFG, SAR2130P_SLAVE_GFX3D_CFG,
- SAR2130P_SLAVE_IMEM_CFG, SAR2130P_SLAVE_IPC_ROUTER_CFG,
- SAR2130P_SLAVE_LPASS, SAR2130P_SLAVE_MX_RDPM,
- SAR2130P_SLAVE_PCIE_0_CFG, SAR2130P_SLAVE_PCIE_1_CFG,
- SAR2130P_SLAVE_PDM, SAR2130P_SLAVE_PIMEM_CFG,
- SAR2130P_SLAVE_PRNG, SAR2130P_SLAVE_QDSS_CFG,
- SAR2130P_SLAVE_QSPI_0, SAR2130P_SLAVE_QUP_0,
- SAR2130P_SLAVE_QUP_1, SAR2130P_SLAVE_SDCC_1,
- SAR2130P_SLAVE_TCSR, SAR2130P_SLAVE_TLMM,
- SAR2130P_SLAVE_TME_CFG, SAR2130P_SLAVE_USB3_0,
- SAR2130P_SLAVE_VENUS_CFG, SAR2130P_SLAVE_VSENSE_CTRL_CFG,
- SAR2130P_SLAVE_WLAN_Q6_CFG, SAR2130P_SLAVE_DDRSS_CFG,
- SAR2130P_SLAVE_CNOC_MNOC_CFG, SAR2130P_SLAVE_SNOC_CFG,
- SAR2130P_SLAVE_IMEM, SAR2130P_SLAVE_PIMEM,
- SAR2130P_SLAVE_SERVICE_CNOC, SAR2130P_SLAVE_QDSS_STM,
- SAR2130P_SLAVE_TCU },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_aoss,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_compute_cfg, &qhs_cpr_cx,
+ &qhs_cpr_mmcx, &qhs_cpr_mxa,
+ &qhs_cpr_mxc, &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_ipc_router,
+ &qhs_lpass_cfg, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pdm, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_sdc1,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_tme_cfg, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qhs_wlan_q6, &qns_ddrss_cfg,
+ &qns_mnoc_cfg, &qns_snoc_cfg,
+ &qxs_imem, &qxs_pimem,
+ &srvc_cnoc, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SAR2130P_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SAR2130P_SLAVE_PCIE_0, SAR2130P_SLAVE_PCIE_1 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SAR2130P_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 43,
- .links = { SAR2130P_SLAVE_AHB2PHY_SOUTH, SAR2130P_SLAVE_AOSS,
- SAR2130P_SLAVE_CAMERA_CFG, SAR2130P_SLAVE_CLK_CTL,
- SAR2130P_SLAVE_CDSP_CFG, SAR2130P_SLAVE_RBCPR_CX_CFG,
- SAR2130P_SLAVE_RBCPR_MMCX_CFG, SAR2130P_SLAVE_RBCPR_MXA_CFG,
- SAR2130P_SLAVE_RBCPR_MXC_CFG, SAR2130P_SLAVE_CPR_NSPCX,
- SAR2130P_SLAVE_CRYPTO_0_CFG, SAR2130P_SLAVE_CX_RDPM,
- SAR2130P_SLAVE_DISPLAY_CFG, SAR2130P_SLAVE_GFX3D_CFG,
- SAR2130P_SLAVE_IMEM_CFG, SAR2130P_SLAVE_IPC_ROUTER_CFG,
- SAR2130P_SLAVE_LPASS, SAR2130P_SLAVE_MX_RDPM,
- SAR2130P_SLAVE_PCIE_0_CFG, SAR2130P_SLAVE_PCIE_1_CFG,
- SAR2130P_SLAVE_PDM, SAR2130P_SLAVE_PIMEM_CFG,
- SAR2130P_SLAVE_PRNG, SAR2130P_SLAVE_QDSS_CFG,
- SAR2130P_SLAVE_QSPI_0, SAR2130P_SLAVE_QUP_0,
- SAR2130P_SLAVE_QUP_1, SAR2130P_SLAVE_SDCC_1,
- SAR2130P_SLAVE_TCSR, SAR2130P_SLAVE_TLMM,
- SAR2130P_SLAVE_TME_CFG, SAR2130P_SLAVE_USB3_0,
- SAR2130P_SLAVE_VENUS_CFG, SAR2130P_SLAVE_VSENSE_CTRL_CFG,
- SAR2130P_SLAVE_WLAN_Q6_CFG, SAR2130P_SLAVE_DDRSS_CFG,
- SAR2130P_SLAVE_CNOC_MNOC_CFG, SAR2130P_SLAVE_SNOC_CFG,
- SAR2130P_SLAVE_IMEM, SAR2130P_SLAVE_PIMEM,
- SAR2130P_SLAVE_SERVICE_CNOC, SAR2130P_SLAVE_QDSS_STM,
- SAR2130P_SLAVE_TCU },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_aoss,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_compute_cfg, &qhs_cpr_cx,
+ &qhs_cpr_mmcx, &qhs_cpr_mxa,
+ &qhs_cpr_mxc, &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_ipc_router,
+ &qhs_lpass_cfg, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pdm, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_sdc1,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_tme_cfg, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qhs_wlan_q6, &qns_ddrss_cfg,
+ &qns_mnoc_cfg, &qns_snoc_cfg,
+ &qxs_imem, &qxs_pimem,
+ &srvc_cnoc, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static const struct qcom_icc_qosbox alm_gpu_tcu_qos = {
@@ -244,12 +237,11 @@ static const struct qcom_icc_qosbox alm_gpu_tcu_qos = {
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SAR2130P_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.qosbox = &alm_gpu_tcu_qos,
.num_links = 2,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static const struct qcom_icc_qosbox alm_sys_tcu_qos = {
@@ -262,22 +254,20 @@ static const struct qcom_icc_qosbox alm_sys_tcu_qos = {
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SAR2130P_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.qosbox = &alm_sys_tcu_qos,
.num_links = 2,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SAR2130P_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 3,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC,
- SAR2130P_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static const struct qcom_icc_qosbox qnm_gpu_qos = {
@@ -290,12 +280,11 @@ static const struct qcom_icc_qosbox qnm_gpu_qos = {
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SAR2130P_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_gpu_qos,
.num_links = 2,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static const struct qcom_icc_qosbox qnm_mnoc_hf_qos = {
@@ -307,12 +296,11 @@ static const struct qcom_icc_qosbox qnm_mnoc_hf_qos = {
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SAR2130P_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_mnoc_hf_qos,
.num_links = 2,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static const struct qcom_icc_qosbox qnm_mnoc_sf_qos = {
@@ -324,12 +312,11 @@ static const struct qcom_icc_qosbox qnm_mnoc_sf_qos = {
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SAR2130P_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.qosbox = &qnm_mnoc_sf_qos,
.num_links = 2,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static const struct qcom_icc_qosbox qnm_nsp_gemnoc_qos = {
@@ -342,12 +329,11 @@ static const struct qcom_icc_qosbox qnm_nsp_gemnoc_qos = {
static struct qcom_icc_node qnm_nsp_gemnoc = {
.name = "qnm_nsp_gemnoc",
- .id = SAR2130P_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_nsp_gemnoc_qos,
.num_links = 2,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static const struct qcom_icc_qosbox qnm_pcie_qos = {
@@ -359,12 +345,11 @@ static const struct qcom_icc_qosbox qnm_pcie_qos = {
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SAR2130P_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.qosbox = &qnm_pcie_qos,
.num_links = 2,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static const struct qcom_icc_qosbox qnm_snoc_gc_qos = {
@@ -376,12 +361,11 @@ static const struct qcom_icc_qosbox qnm_snoc_gc_qos = {
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SAR2130P_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.qosbox = &qnm_snoc_gc_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static const struct qcom_icc_qosbox qnm_snoc_sf_qos = {
@@ -393,53 +377,48 @@ static const struct qcom_icc_qosbox qnm_snoc_sf_qos = {
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SAR2130P_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.qosbox = &qnm_snoc_sf_qos,
.num_links = 3,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC,
- SAR2130P_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qxm_wlan_q6 = {
.name = "qxm_wlan_q6",
- .id = SAR2130P_MASTER_WLAN_Q6,
.channels = 1,
.buswidth = 8,
.num_links = 3,
- .links = { SAR2130P_SLAVE_GEM_NOC_CNOC, SAR2130P_SLAVE_LLCC,
- SAR2130P_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
- .id = SAR2130P_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
- .links = { SAR2130P_SLAVE_LPASS_CORE_CFG, SAR2130P_SLAVE_LPASS_LPI_CFG,
- SAR2130P_SLAVE_LPASS_MPU_CFG, SAR2130P_SLAVE_LPASS_TOP_CFG,
- SAR2130P_SLAVE_SERVICES_LPASS_AML_NOC, SAR2130P_SLAVE_SERVICE_LPASS_AG_NOC },
+ .link_nodes = { &qhs_lpass_core, &qhs_lpass_lpi,
+ &qhs_lpass_mpu, &qhs_lpass_top,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node qxm_lpass_dsp = {
.name = "qxm_lpass_dsp",
- .id = SAR2130P_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 4,
- .links = { SAR2130P_SLAVE_LPASS_TOP_CFG, SAR2130P_SLAVE_LPASS_SNOC,
- SAR2130P_SLAVE_SERVICES_LPASS_AML_NOC, SAR2130P_SLAVE_SERVICE_LPASS_AG_NOC },
+ .link_nodes = { &qhs_lpass_top, &qns_sysnoc,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SAR2130P_MASTER_LLCC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static const struct qcom_icc_qosbox qnm_camnoc_hf_qos = {
@@ -451,12 +430,11 @@ static const struct qcom_icc_qosbox qnm_camnoc_hf_qos = {
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SAR2130P_MASTER_CAMNOC_HF,
.channels = 1,
.buswidth = 32,
.qosbox = &qnm_camnoc_hf_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static const struct qcom_icc_qosbox qnm_camnoc_icp_qos = {
@@ -468,12 +446,11 @@ static const struct qcom_icc_qosbox qnm_camnoc_icp_qos = {
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = SAR2130P_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.qosbox = &qnm_camnoc_icp_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static const struct qcom_icc_qosbox qnm_camnoc_sf_qos = {
@@ -485,12 +462,11 @@ static const struct qcom_icc_qosbox qnm_camnoc_sf_qos = {
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = SAR2130P_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.qosbox = &qnm_camnoc_sf_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static const struct qcom_icc_qosbox qnm_lsr_qos = {
@@ -502,12 +478,11 @@ static const struct qcom_icc_qosbox qnm_lsr_qos = {
static struct qcom_icc_node qnm_lsr = {
.name = "qnm_lsr",
- .id = SAR2130P_MASTER_LSR,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_lsr_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static const struct qcom_icc_qosbox qnm_mdp_qos = {
@@ -519,21 +494,19 @@ static const struct qcom_icc_qosbox qnm_mdp_qos = {
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
- .id = SAR2130P_MASTER_MDP,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_mdp_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
- .id = SAR2130P_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static const struct qcom_icc_qosbox qnm_video_qos = {
@@ -545,12 +518,11 @@ static const struct qcom_icc_qosbox qnm_video_qos = {
static struct qcom_icc_node qnm_video = {
.name = "qnm_video",
- .id = SAR2130P_MASTER_VIDEO,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_video_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static const struct qcom_icc_qosbox qnm_video_cv_cpu_qos = {
@@ -562,12 +534,11 @@ static const struct qcom_icc_qosbox qnm_video_cv_cpu_qos = {
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
- .id = SAR2130P_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
.qosbox = &qnm_video_cv_cpu_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static const struct qcom_icc_qosbox qnm_video_cvp_qos = {
@@ -579,12 +550,11 @@ static const struct qcom_icc_qosbox qnm_video_cvp_qos = {
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SAR2130P_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.qosbox = &qnm_video_cvp_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static const struct qcom_icc_qosbox qnm_video_v_cpu_qos = {
@@ -596,30 +566,27 @@ static const struct qcom_icc_qosbox qnm_video_v_cpu_qos = {
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
- .id = SAR2130P_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.qosbox = &qnm_video_v_cpu_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
- .id = SAR2130P_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_SLAVE_SERVICE_NSP_NOC },
+ .link_nodes = { &service_nsp_noc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = SAR2130P_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SAR2130P_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static const struct qcom_icc_qosbox xm_pcie3_0_qos = {
@@ -632,12 +599,11 @@ static const struct qcom_icc_qosbox xm_pcie3_0_qos = {
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SAR2130P_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_pcie3_0_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static const struct qcom_icc_qosbox xm_pcie3_1_qos = {
@@ -650,12 +616,11 @@ static const struct qcom_icc_qosbox xm_pcie3_1_qos = {
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SAR2130P_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_pcie3_1_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static const struct qcom_icc_qosbox qhm_gic_qos = {
@@ -668,12 +633,11 @@ static const struct qcom_icc_qosbox qhm_gic_qos = {
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
- .id = SAR2130P_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_gic_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static const struct qcom_icc_qosbox qhm_qdss_bam_qos = {
@@ -686,12 +650,11 @@ static const struct qcom_icc_qosbox qhm_qdss_bam_qos = {
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SAR2130P_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_qdss_bam_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static const struct qcom_icc_qosbox qhm_qspi_qos = {
@@ -704,12 +667,11 @@ static const struct qcom_icc_qosbox qhm_qspi_qos = {
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SAR2130P_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_qspi_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static const struct qcom_icc_qosbox qhm_qup0_qos = {
@@ -722,12 +684,11 @@ static const struct qcom_icc_qosbox qhm_qup0_qos = {
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SAR2130P_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_qup0_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static const struct qcom_icc_qosbox qhm_qup1_qos = {
@@ -740,21 +701,19 @@ static const struct qcom_icc_qosbox qhm_qup1_qos = {
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SAR2130P_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_qup1_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SAR2130P_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SAR2130P_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static const struct qcom_icc_qosbox qnm_cnoc_datapath_qos = {
@@ -767,12 +726,11 @@ static const struct qcom_icc_qosbox qnm_cnoc_datapath_qos = {
static struct qcom_icc_node qnm_cnoc_datapath = {
.name = "qnm_cnoc_datapath",
- .id = SAR2130P_MASTER_CNOC_DATAPATH,
.channels = 1,
.buswidth = 8,
.qosbox = &qnm_cnoc_datapath_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static const struct qcom_icc_qosbox qnm_lpass_noc_qos = {
@@ -785,21 +743,19 @@ static const struct qcom_icc_qosbox qnm_lpass_noc_qos = {
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
- .id = SAR2130P_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 16,
.qosbox = &qnm_lpass_noc_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
- .id = SAR2130P_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static const struct qcom_icc_qosbox qxm_crypto_qos = {
@@ -812,12 +768,11 @@ static const struct qcom_icc_qosbox qxm_crypto_qos = {
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SAR2130P_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.qosbox = &qxm_crypto_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static const struct qcom_icc_qosbox qxm_pimem_qos = {
@@ -830,12 +785,11 @@ static const struct qcom_icc_qosbox qxm_pimem_qos = {
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SAR2130P_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.qosbox = &qxm_pimem_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static const struct qcom_icc_qosbox xm_gic_qos = {
@@ -848,12 +802,11 @@ static const struct qcom_icc_qosbox xm_gic_qos = {
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SAR2130P_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_gic_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static const struct qcom_icc_qosbox xm_qdss_etr_0_qos = {
@@ -866,12 +819,11 @@ static const struct qcom_icc_qosbox xm_qdss_etr_0_qos = {
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
- .id = SAR2130P_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_qdss_etr_0_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static const struct qcom_icc_qosbox xm_qdss_etr_1_qos = {
@@ -884,12 +836,11 @@ static const struct qcom_icc_qosbox xm_qdss_etr_1_qos = {
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
- .id = SAR2130P_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_qdss_etr_1_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static const struct qcom_icc_qosbox xm_sdc1_qos = {
@@ -902,12 +853,11 @@ static const struct qcom_icc_qosbox xm_sdc1_qos = {
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
- .id = SAR2130P_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_sdc1_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static const struct qcom_icc_qosbox xm_usb3_0_qos = {
@@ -920,571 +870,449 @@ static const struct qcom_icc_qosbox xm_usb3_0_qos = {
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SAR2130P_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_usb3_0_qos,
.num_links = 1,
- .links = { SAR2130P_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SAR2130P_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SAR2130P_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SAR2130P_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SAR2130P_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SAR2130P_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SAR2130P_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_compute_cfg = {
.name = "qhs_compute_cfg",
- .id = SAR2130P_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_MASTER_CDSP_NOC_CFG },
+ .link_nodes = { &qhm_nsp_noc_config },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SAR2130P_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SAR2130P_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxa = {
.name = "qhs_cpr_mxa",
- .id = SAR2130P_SLAVE_RBCPR_MXA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxc = {
.name = "qhs_cpr_mxc",
- .id = SAR2130P_SLAVE_RBCPR_MXC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
- .id = SAR2130P_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SAR2130P_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SAR2130P_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SAR2130P_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SAR2130P_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SAR2130P_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SAR2130P_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
- .id = SAR2130P_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_MASTER_CNOC_LPASS_AG_NOC },
+ .link_nodes = { &qhm_config_noc },
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = SAR2130P_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SAR2130P_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SAR2130P_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SAR2130P_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SAR2130P_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SAR2130P_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SAR2130P_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SAR2130P_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SAR2130P_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SAR2130P_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
- .id = SAR2130P_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SAR2130P_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SAR2130P_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
- .id = SAR2130P_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SAR2130P_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SAR2130P_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SAR2130P_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_wlan_q6 = {
.name = "qhs_wlan_q6",
- .id = SAR2130P_SLAVE_WLAN_Q6_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = SAR2130P_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
- .id = SAR2130P_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qnm_mnoc_cfg },
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
- .id = SAR2130P_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SAR2130P_MASTER_SNOC_CFG },
+ .link_nodes = { &qnm_snoc_cfg },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SAR2130P_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SAR2130P_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SAR2130P_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SAR2130P_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SAR2130P_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SAR2130P_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SAR2130P_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SAR2130P_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SAR2130P_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SAR2130P_SLAVE_LLCC,
.channels = 2,
.buswidth = 16,
.num_links = 1,
- .links = { SAR2130P_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SAR2130P_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SAR2130P_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
- .id = SAR2130P_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
- .id = SAR2130P_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
- .id = SAR2130P_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
- .id = SAR2130P_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_sysnoc = {
.name = "qns_sysnoc",
- .id = SAR2130P_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SAR2130P_MASTER_LPASS_ANOC },
+ .link_nodes = { &qnm_lpass_noc },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
- .id = SAR2130P_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
- .id = SAR2130P_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SAR2130P_SLAVE_EBI1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SAR2130P_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SAR2130P_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SAR2130P_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SAR2130P_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SAR2130P_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SAR2130P_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SAR2130P_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_nsp_gemnoc },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
- .id = SAR2130P_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SAR2130P_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SAR2130P_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SAR2130P_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SAR2130P_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SAR2130P_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SAR2130P_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SAR2130P_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SAR2130P_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SAR2130P_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/sc7180.c b/drivers/interconnect/qcom/sc7180.c
index af2be1543840..0ea06facf81e 100644
--- a/drivers/interconnect/qcom/sc7180.c
+++ b/drivers/interconnect/qcom/sc7180.c
@@ -14,1224 +14,1210 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sc7180.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg;
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup_0;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_emmc;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node qhm_a2noc_cfg;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup_1;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node qhm_usb3;
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp;
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp;
+static struct qcom_icc_node qxm_camnoc_sf_uncomp;
+static struct qcom_icc_node qnm_npu;
+static struct qcom_icc_node qxm_npu_dsp;
+static struct qcom_icc_node qnm_snoc;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qhm_cnoc_dc_noc;
+static struct qcom_icc_node acm_apps0;
+static struct qcom_icc_node acm_sys_tcu;
+static struct qcom_icc_node qhm_gemnoc_cfg;
+static struct qcom_icc_node qnm_cmpnoc;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qxm_gpu;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qhm_mnoc_cfg;
+static struct qcom_icc_node qxm_camnoc_hf0;
+static struct qcom_icc_node qxm_camnoc_hf1;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qxm_rot;
+static struct qcom_icc_node qxm_venus0;
+static struct qcom_icc_node qxm_venus_arm9;
+static struct qcom_icc_node amm_npu_sys;
+static struct qcom_icc_node qhm_npu_cfg;
+static struct qcom_icc_node qup_core_master_1;
+static struct qcom_icc_node qup_core_master_2;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_gemnoc;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qns_camnoc_uncomp;
+static struct qcom_icc_node qns_cdsp_gemnoc;
+static struct qcom_icc_node qhs_a1_noc_cfg;
+static struct qcom_icc_node qhs_a2_noc_cfg;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy2;
+static struct qcom_icc_node qhs_aop;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_boot_rom;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_camera_nrt_throttle_cfg;
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_dcc_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_display_rt_throttle_cfg;
+static struct qcom_icc_node qhs_display_throttle_cfg;
+static struct qcom_icc_node qhs_emmc_cfg;
+static struct qcom_icc_node qhs_glm;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mnoc_cfg;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_npu_cfg;
+static struct qcom_icc_node qhs_npu_dma_throttle_cfg;
+static struct qcom_icc_node qhs_npu_dsp_throttle_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qm_cfg;
+static struct qcom_icc_node qhs_qm_mpu_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_security;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm_1;
+static struct qcom_icc_node qhs_tlmm_2;
+static struct qcom_icc_node qhs_tlmm_3;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_venus_throttle_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node qhs_gemnoc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qns_gem_noc_snoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node srvc_gemnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qhs_cal_dp0;
+static struct qcom_icc_node qhs_cp;
+static struct qcom_icc_node qhs_dma_bwmon;
+static struct qcom_icc_node qhs_dpm;
+static struct qcom_icc_node qhs_isense;
+static struct qcom_icc_node qhs_llm;
+static struct qcom_icc_node qhs_tcm;
+static struct qcom_icc_node qns_npu_sys;
+static struct qcom_icc_node srvc_noc;
+static struct qcom_icc_node qup_core_slave_1;
+static struct qcom_icc_node qup_core_slave_2;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
- .id = SC7180_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SC7180_MASTER_QSPI,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup_0 = {
.name = "qhm_qup_0",
- .id = SC7180_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SC7180_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emmc = {
.name = "xm_emmc",
- .id = SC7180_MASTER_EMMC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SC7180_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
- .id = SC7180_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SC7180_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup_1 = {
.name = "qhm_qup_1",
- .id = SC7180_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SC7180_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SC7180_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SC7180_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_usb3 = {
.name = "qhm_usb3",
- .id = SC7180_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
- .id = SC7180_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
- .id = SC7180_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
- .id = SC7180_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
- .id = SC7180_MASTER_NPU,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_CDSP_GEM_NOC },
+ .link_nodes = { &qns_cdsp_gemnoc },
};
static struct qcom_icc_node qxm_npu_dsp = {
.name = "qxm_npu_dsp",
- .id = SC7180_MASTER_NPU_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_CDSP_GEM_NOC },
+ .link_nodes = { &qns_cdsp_gemnoc },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
- .id = SC7180_MASTER_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 51,
- .links = { SC7180_SLAVE_A1NOC_CFG,
- SC7180_SLAVE_A2NOC_CFG,
- SC7180_SLAVE_AHB2PHY_SOUTH,
- SC7180_SLAVE_AHB2PHY_CENTER,
- SC7180_SLAVE_AOP,
- SC7180_SLAVE_AOSS,
- SC7180_SLAVE_BOOT_ROM,
- SC7180_SLAVE_CAMERA_CFG,
- SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
- SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
- SC7180_SLAVE_CLK_CTL,
- SC7180_SLAVE_RBCPR_CX_CFG,
- SC7180_SLAVE_RBCPR_MX_CFG,
- SC7180_SLAVE_CRYPTO_0_CFG,
- SC7180_SLAVE_DCC_CFG,
- SC7180_SLAVE_CNOC_DDRSS,
- SC7180_SLAVE_DISPLAY_CFG,
- SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
- SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
- SC7180_SLAVE_EMMC_CFG,
- SC7180_SLAVE_GLM,
- SC7180_SLAVE_GFX3D_CFG,
- SC7180_SLAVE_IMEM_CFG,
- SC7180_SLAVE_IPA_CFG,
- SC7180_SLAVE_CNOC_MNOC_CFG,
- SC7180_SLAVE_CNOC_MSS,
- SC7180_SLAVE_NPU_CFG,
- SC7180_SLAVE_NPU_DMA_BWMON_CFG,
- SC7180_SLAVE_NPU_PROC_BWMON_CFG,
- SC7180_SLAVE_PDM,
- SC7180_SLAVE_PIMEM_CFG,
- SC7180_SLAVE_PRNG,
- SC7180_SLAVE_QDSS_CFG,
- SC7180_SLAVE_QM_CFG,
- SC7180_SLAVE_QM_MPU_CFG,
- SC7180_SLAVE_QSPI_0,
- SC7180_SLAVE_QUP_0,
- SC7180_SLAVE_QUP_1,
- SC7180_SLAVE_SDCC_2,
- SC7180_SLAVE_SECURITY,
- SC7180_SLAVE_SNOC_CFG,
- SC7180_SLAVE_TCSR,
- SC7180_SLAVE_TLMM_WEST,
- SC7180_SLAVE_TLMM_NORTH,
- SC7180_SLAVE_TLMM_SOUTH,
- SC7180_SLAVE_UFS_MEM_CFG,
- SC7180_SLAVE_USB3,
- SC7180_SLAVE_VENUS_CFG,
- SC7180_SLAVE_VENUS_THROTTLE_CFG,
- SC7180_SLAVE_VSENSE_CTRL_CFG,
- SC7180_SLAVE_SERVICE_CNOC
- },
+ .link_nodes = { &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_ahb2phy0,
+ &qhs_ahb2phy2,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_boot_rom,
+ &qhs_camera_cfg,
+ &qhs_camera_nrt_throttle_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_clk_ctl,
+ &qhs_cpr_cx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_display_rt_throttle_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_emmc_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_mss_cfg,
+ &qhs_npu_cfg,
+ &qhs_npu_dma_throttle_cfg,
+ &qhs_npu_dsp_throttle_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qm_cfg,
+ &qhs_qm_mpu_cfg,
+ &qhs_qspi,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_sdc2,
+ &qhs_security,
+ &qhs_snoc_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm_1,
+ &qhs_tlmm_2,
+ &qhs_tlmm_3,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3,
+ &qhs_venus_cfg,
+ &qhs_venus_throttle_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &srvc_cnoc },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SC7180_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 51,
- .links = { SC7180_SLAVE_A1NOC_CFG,
- SC7180_SLAVE_A2NOC_CFG,
- SC7180_SLAVE_AHB2PHY_SOUTH,
- SC7180_SLAVE_AHB2PHY_CENTER,
- SC7180_SLAVE_AOP,
- SC7180_SLAVE_AOSS,
- SC7180_SLAVE_BOOT_ROM,
- SC7180_SLAVE_CAMERA_CFG,
- SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
- SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
- SC7180_SLAVE_CLK_CTL,
- SC7180_SLAVE_RBCPR_CX_CFG,
- SC7180_SLAVE_RBCPR_MX_CFG,
- SC7180_SLAVE_CRYPTO_0_CFG,
- SC7180_SLAVE_DCC_CFG,
- SC7180_SLAVE_CNOC_DDRSS,
- SC7180_SLAVE_DISPLAY_CFG,
- SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
- SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
- SC7180_SLAVE_EMMC_CFG,
- SC7180_SLAVE_GLM,
- SC7180_SLAVE_GFX3D_CFG,
- SC7180_SLAVE_IMEM_CFG,
- SC7180_SLAVE_IPA_CFG,
- SC7180_SLAVE_CNOC_MNOC_CFG,
- SC7180_SLAVE_CNOC_MSS,
- SC7180_SLAVE_NPU_CFG,
- SC7180_SLAVE_NPU_DMA_BWMON_CFG,
- SC7180_SLAVE_NPU_PROC_BWMON_CFG,
- SC7180_SLAVE_PDM,
- SC7180_SLAVE_PIMEM_CFG,
- SC7180_SLAVE_PRNG,
- SC7180_SLAVE_QDSS_CFG,
- SC7180_SLAVE_QM_CFG,
- SC7180_SLAVE_QM_MPU_CFG,
- SC7180_SLAVE_QSPI_0,
- SC7180_SLAVE_QUP_0,
- SC7180_SLAVE_QUP_1,
- SC7180_SLAVE_SDCC_2,
- SC7180_SLAVE_SECURITY,
- SC7180_SLAVE_SNOC_CFG,
- SC7180_SLAVE_TCSR,
- SC7180_SLAVE_TLMM_WEST,
- SC7180_SLAVE_TLMM_NORTH,
- SC7180_SLAVE_TLMM_SOUTH,
- SC7180_SLAVE_UFS_MEM_CFG,
- SC7180_SLAVE_USB3,
- SC7180_SLAVE_VENUS_CFG,
- SC7180_SLAVE_VENUS_THROTTLE_CFG,
- SC7180_SLAVE_VSENSE_CTRL_CFG,
- SC7180_SLAVE_SERVICE_CNOC
- },
+ .link_nodes = { &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_ahb2phy0,
+ &qhs_ahb2phy2,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_boot_rom,
+ &qhs_camera_cfg,
+ &qhs_camera_nrt_throttle_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_clk_ctl,
+ &qhs_cpr_cx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_display_rt_throttle_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_emmc_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_mss_cfg,
+ &qhs_npu_cfg,
+ &qhs_npu_dma_throttle_cfg,
+ &qhs_npu_dsp_throttle_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qm_cfg,
+ &qhs_qm_mpu_cfg,
+ &qhs_qspi,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_sdc2,
+ &qhs_security,
+ &qhs_snoc_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm_1,
+ &qhs_tlmm_2,
+ &qhs_tlmm_3,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3,
+ &qhs_venus_cfg,
+ &qhs_venus_throttle_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &srvc_cnoc },
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
- .id = SC7180_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SC7180_SLAVE_GEM_NOC_CFG,
- SC7180_SLAVE_LLCC_CFG
- },
+ .link_nodes = { &qhs_gemnoc,
+ &qhs_llcc },
};
static struct qcom_icc_node acm_apps0 = {
.name = "acm_apps0",
- .id = SC7180_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SC7180_SLAVE_GEM_NOC_SNOC,
- SC7180_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_snoc,
+ &qns_llcc },
};
static struct qcom_icc_node acm_sys_tcu = {
.name = "acm_sys_tcu",
- .id = SC7180_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC7180_SLAVE_GEM_NOC_SNOC,
- SC7180_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_snoc,
+ &qns_llcc },
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
- .id = SC7180_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SC7180_SLAVE_MSS_PROC_MS_MPU_CFG,
- SC7180_SLAVE_SERVICE_GEM_NOC
- },
+ .link_nodes = { &qhs_mdsp_ms_mpu_cfg,
+ &srvc_gemnoc },
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
- .id = SC7180_MASTER_COMPUTE_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SC7180_SLAVE_GEM_NOC_SNOC,
- SC7180_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_snoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SC7180_MASTER_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SC7180_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SC7180_SLAVE_GEM_NOC_SNOC,
- SC7180_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_snoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SC7180_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SC7180_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7180_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
- .id = SC7180_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SC7180_SLAVE_GEM_NOC_SNOC,
- SC7180_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_snoc,
+ &qns_llcc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SC7180_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
- .id = SC7180_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
- .id = SC7180_MASTER_CAMNOC_HF0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
- .id = SC7180_MASTER_CAMNOC_HF1,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = SC7180_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SC7180_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
- .id = SC7180_MASTER_ROTATOR,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
- .id = SC7180_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
- .id = SC7180_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node amm_npu_sys = {
.name = "amm_npu_sys",
- .id = SC7180_MASTER_NPU_SYS,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_SLAVE_NPU_COMPUTE_NOC },
+ .link_nodes = { &qns_npu_sys },
};
static struct qcom_icc_node qhm_npu_cfg = {
.name = "qhm_npu_cfg",
- .id = SC7180_MASTER_NPU_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 8,
- .links = { SC7180_SLAVE_NPU_CAL_DP0,
- SC7180_SLAVE_NPU_CP,
- SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG,
- SC7180_SLAVE_NPU_DPM,
- SC7180_SLAVE_ISENSE_CFG,
- SC7180_SLAVE_NPU_LLM_CFG,
- SC7180_SLAVE_NPU_TCM,
- SC7180_SLAVE_SERVICE_NPU_NOC
- },
+ .link_nodes = { &qhs_cal_dp0,
+ &qhs_cp,
+ &qhs_dma_bwmon,
+ &qhs_dpm,
+ &qhs_isense,
+ &qhs_llm,
+ &qhs_tcm,
+ &srvc_noc },
};
static struct qcom_icc_node qup_core_master_1 = {
.name = "qup_core_master_1",
- .id = SC7180_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup_core_slave_1 },
};
static struct qcom_icc_node qup_core_master_2 = {
.name = "qup_core_master_2",
- .id = SC7180_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup_core_slave_2 },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SC7180_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SC7180_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { SC7180_SLAVE_APPSS,
- SC7180_SLAVE_SNOC_CNOC,
- SC7180_SLAVE_SNOC_GEM_NOC_SF,
- SC7180_SLAVE_IMEM,
- SC7180_SLAVE_PIMEM,
- SC7180_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qhs_apss,
+ &qns_cnoc,
+ &qns_gemnoc_sf,
+ &qxs_imem,
+ &qxs_pimem,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SC7180_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 7,
- .links = { SC7180_SLAVE_APPSS,
- SC7180_SLAVE_SNOC_CNOC,
- SC7180_SLAVE_SNOC_GEM_NOC_SF,
- SC7180_SLAVE_IMEM,
- SC7180_SLAVE_PIMEM,
- SC7180_SLAVE_QDSS_STM,
- SC7180_SLAVE_TCU
- },
+ .link_nodes = { &qhs_apss,
+ &qns_cnoc,
+ &qns_gemnoc_sf,
+ &qxs_imem,
+ &qxs_pimem,
+ &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
- .id = SC7180_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
- .links = { SC7180_SLAVE_APPSS,
- SC7180_SLAVE_SNOC_CNOC,
- SC7180_SLAVE_IMEM,
- SC7180_SLAVE_PIMEM,
- SC7180_SLAVE_QDSS_STM,
- SC7180_SLAVE_TCU
- },
+ .link_nodes = { &qhs_apss,
+ &qns_cnoc,
+ &qxs_imem,
+ &qxs_pimem,
+ &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SC7180_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC7180_SLAVE_SNOC_GEM_NOC_GC,
- SC7180_SLAVE_IMEM
- },
+ .link_nodes = { &qns_gemnoc_gc,
+ &qxs_imem },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SC7180_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7180_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SC7180_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SC7180_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7180_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SC7180_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
- .id = SC7180_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_cdsp_gemnoc = {
.name = "qns_cdsp_gemnoc",
- .id = SC7180_SLAVE_CDSP_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
- .id = SC7180_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_MASTER_A1NOC_CFG },
+ .link_nodes = { &qhm_a1noc_cfg },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
- .id = SC7180_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_MASTER_A2NOC_CFG },
+ .link_nodes = { &qhm_a2noc_cfg },
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SC7180_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
- .id = SC7180_SLAVE_AHB2PHY_CENTER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
- .id = SC7180_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SC7180_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_boot_rom = {
.name = "qhs_boot_rom",
- .id = SC7180_SLAVE_BOOT_ROM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SC7180_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
.name = "qhs_camera_nrt_throttle_cfg",
- .id = SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
.name = "qhs_camera_rt_throttle_cfg",
- .id = SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SC7180_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SC7180_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SC7180_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SC7180_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
- .id = SC7180_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SC7180_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qhm_cnoc_dc_noc },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SC7180_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_rt_throttle_cfg = {
.name = "qhs_display_rt_throttle_cfg",
- .id = SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_throttle_cfg = {
.name = "qhs_display_throttle_cfg",
- .id = SC7180_SLAVE_DISPLAY_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emmc_cfg = {
.name = "qhs_emmc_cfg",
- .id = SC7180_SLAVE_EMMC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
- .id = SC7180_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SC7180_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SC7180_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SC7180_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
- .id = SC7180_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qhm_mnoc_cfg },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SC7180_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_npu_cfg = {
.name = "qhs_npu_cfg",
- .id = SC7180_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_MASTER_NPU_NOC_CFG },
+ .link_nodes = { &qhm_npu_cfg },
};
static struct qcom_icc_node qhs_npu_dma_throttle_cfg = {
.name = "qhs_npu_dma_throttle_cfg",
- .id = SC7180_SLAVE_NPU_DMA_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_npu_dsp_throttle_cfg = {
.name = "qhs_npu_dsp_throttle_cfg",
- .id = SC7180_SLAVE_NPU_PROC_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SC7180_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SC7180_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SC7180_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SC7180_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_cfg = {
.name = "qhs_qm_cfg",
- .id = SC7180_SLAVE_QM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_mpu_cfg = {
.name = "qhs_qm_mpu_cfg",
- .id = SC7180_SLAVE_QM_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SC7180_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SC7180_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SC7180_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SC7180_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
- .id = SC7180_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SC7180_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SC7180_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_1 = {
.name = "qhs_tlmm_1",
- .id = SC7180_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_2 = {
.name = "qhs_tlmm_2",
- .id = SC7180_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_3 = {
.name = "qhs_tlmm_3",
- .id = SC7180_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SC7180_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
- .id = SC7180_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SC7180_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_throttle_cfg = {
.name = "qhs_venus_throttle_cfg",
- .id = SC7180_SLAVE_VENUS_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SC7180_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SC7180_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gemnoc = {
.name = "qhs_gemnoc",
- .id = SC7180_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7180_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qhm_gemnoc_cfg },
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SC7180_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = SC7180_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
- .id = SC7180_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_MASTER_GEM_NOC_SNOC },
+ .link_nodes = { &qnm_gemnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SC7180_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7180_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
- .id = SC7180_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SC7180_SLAVE_EBI1,
.channels = 2,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SC7180_SLAVE_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SC7180_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7180_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SC7180_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cal_dp0 = {
.name = "qhs_cal_dp0",
- .id = SC7180_SLAVE_NPU_CAL_DP0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cp = {
.name = "qhs_cp",
- .id = SC7180_SLAVE_NPU_CP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dma_bwmon = {
.name = "qhs_dma_bwmon",
- .id = SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dpm = {
.name = "qhs_dpm",
- .id = SC7180_SLAVE_NPU_DPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_isense = {
.name = "qhs_isense",
- .id = SC7180_SLAVE_ISENSE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llm = {
.name = "qhs_llm",
- .id = SC7180_SLAVE_NPU_LLM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcm = {
.name = "qhs_tcm",
- .id = SC7180_SLAVE_NPU_TCM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_npu_sys = {
.name = "qns_npu_sys",
- .id = SC7180_SLAVE_NPU_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node srvc_noc = {
.name = "srvc_noc",
- .id = SC7180_SLAVE_SERVICE_NPU_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup_core_slave_1 = {
.name = "qup_core_slave_1",
- .id = SC7180_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup_core_slave_2 = {
.name = "qup_core_slave_2",
- .id = SC7180_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SC7180_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
- .id = SC7180_SLAVE_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_MASTER_SNOC_CNOC },
+ .link_nodes = { &qnm_snoc },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SC7180_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7180_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SC7180_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7180_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SC7180_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SC7180_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SC7180_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SC7180_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SC7180_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
diff --git a/drivers/interconnect/qcom/sc7180.h b/drivers/interconnect/qcom/sc7180.h
deleted file mode 100644
index 2b718922c109..000000000000
--- a/drivers/interconnect/qcom/sc7180.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Qualcomm #define SC7180 interconnect IDs
- *
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SC7180_H
-#define __DRIVERS_INTERCONNECT_QCOM_SC7180_H
-
-#define SC7180_MASTER_APPSS_PROC 0
-#define SC7180_MASTER_SYS_TCU 1
-#define SC7180_MASTER_NPU_SYS 2
-/* 3 was used by MASTER_IPA_CORE, now represented as RPMh clock */
-#define SC7180_MASTER_LLCC 4
-#define SC7180_MASTER_A1NOC_CFG 5
-#define SC7180_MASTER_A2NOC_CFG 6
-#define SC7180_MASTER_CNOC_DC_NOC 7
-#define SC7180_MASTER_GEM_NOC_CFG 8
-#define SC7180_MASTER_CNOC_MNOC_CFG 9
-#define SC7180_MASTER_NPU_NOC_CFG 10
-#define SC7180_MASTER_QDSS_BAM 11
-#define SC7180_MASTER_QSPI 12
-#define SC7180_MASTER_QUP_0 13
-#define SC7180_MASTER_QUP_1 14
-#define SC7180_MASTER_SNOC_CFG 15
-#define SC7180_MASTER_A1NOC_SNOC 16
-#define SC7180_MASTER_A2NOC_SNOC 17
-#define SC7180_MASTER_COMPUTE_NOC 18
-#define SC7180_MASTER_GEM_NOC_SNOC 19
-#define SC7180_MASTER_MNOC_HF_MEM_NOC 20
-#define SC7180_MASTER_MNOC_SF_MEM_NOC 21
-#define SC7180_MASTER_NPU 22
-#define SC7180_MASTER_SNOC_CNOC 23
-#define SC7180_MASTER_SNOC_GC_MEM_NOC 24
-#define SC7180_MASTER_SNOC_SF_MEM_NOC 25
-#define SC7180_MASTER_QUP_CORE_0 26
-#define SC7180_MASTER_QUP_CORE_1 27
-#define SC7180_MASTER_CAMNOC_HF0 28
-#define SC7180_MASTER_CAMNOC_HF1 29
-#define SC7180_MASTER_CAMNOC_HF0_UNCOMP 30
-#define SC7180_MASTER_CAMNOC_HF1_UNCOMP 31
-#define SC7180_MASTER_CAMNOC_SF 32
-#define SC7180_MASTER_CAMNOC_SF_UNCOMP 33
-#define SC7180_MASTER_CRYPTO 34
-#define SC7180_MASTER_GFX3D 35
-#define SC7180_MASTER_IPA 36
-#define SC7180_MASTER_MDP0 37
-#define SC7180_MASTER_NPU_PROC 38
-#define SC7180_MASTER_PIMEM 39
-#define SC7180_MASTER_ROTATOR 40
-#define SC7180_MASTER_VIDEO_P0 41
-#define SC7180_MASTER_VIDEO_PROC 42
-#define SC7180_MASTER_QDSS_DAP 43
-#define SC7180_MASTER_QDSS_ETR 44
-#define SC7180_MASTER_SDCC_2 45
-#define SC7180_MASTER_UFS_MEM 46
-#define SC7180_MASTER_USB3 47
-#define SC7180_MASTER_EMMC 48
-#define SC7180_SLAVE_EBI1 49
-/* 50 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
-#define SC7180_SLAVE_A1NOC_CFG 51
-#define SC7180_SLAVE_A2NOC_CFG 52
-#define SC7180_SLAVE_AHB2PHY_SOUTH 53
-#define SC7180_SLAVE_AHB2PHY_CENTER 54
-#define SC7180_SLAVE_AOP 55
-#define SC7180_SLAVE_AOSS 56
-#define SC7180_SLAVE_APPSS 57
-#define SC7180_SLAVE_BOOT_ROM 58
-#define SC7180_SLAVE_NPU_CAL_DP0 59
-#define SC7180_SLAVE_CAMERA_CFG 60
-#define SC7180_SLAVE_CAMERA_NRT_THROTTLE_CFG 61
-#define SC7180_SLAVE_CAMERA_RT_THROTTLE_CFG 62
-#define SC7180_SLAVE_CLK_CTL 63
-#define SC7180_SLAVE_NPU_CP 64
-#define SC7180_SLAVE_RBCPR_CX_CFG 65
-#define SC7180_SLAVE_RBCPR_MX_CFG 66
-#define SC7180_SLAVE_CRYPTO_0_CFG 67
-#define SC7180_SLAVE_DCC_CFG 68
-#define SC7180_SLAVE_CNOC_DDRSS 69
-#define SC7180_SLAVE_DISPLAY_CFG 70
-#define SC7180_SLAVE_DISPLAY_RT_THROTTLE_CFG 71
-#define SC7180_SLAVE_DISPLAY_THROTTLE_CFG 72
-#define SC7180_SLAVE_NPU_INT_DMA_BWMON_CFG 73
-#define SC7180_SLAVE_NPU_DPM 74
-#define SC7180_SLAVE_EMMC_CFG 75
-#define SC7180_SLAVE_GEM_NOC_CFG 76
-#define SC7180_SLAVE_GLM 77
-#define SC7180_SLAVE_GFX3D_CFG 78
-#define SC7180_SLAVE_IMEM_CFG 79
-#define SC7180_SLAVE_IPA_CFG 80
-#define SC7180_SLAVE_ISENSE_CFG 81
-#define SC7180_SLAVE_LLCC_CFG 82
-#define SC7180_SLAVE_NPU_LLM_CFG 83
-#define SC7180_SLAVE_MSS_PROC_MS_MPU_CFG 84
-#define SC7180_SLAVE_CNOC_MNOC_CFG 85
-#define SC7180_SLAVE_CNOC_MSS 86
-#define SC7180_SLAVE_NPU_CFG 87
-#define SC7180_SLAVE_NPU_DMA_BWMON_CFG 88
-#define SC7180_SLAVE_NPU_PROC_BWMON_CFG 89
-#define SC7180_SLAVE_PDM 90
-#define SC7180_SLAVE_PIMEM_CFG 91
-#define SC7180_SLAVE_PRNG 92
-#define SC7180_SLAVE_QDSS_CFG 93
-#define SC7180_SLAVE_QM_CFG 94
-#define SC7180_SLAVE_QM_MPU_CFG 95
-#define SC7180_SLAVE_QSPI_0 96
-#define SC7180_SLAVE_QUP_0 97
-#define SC7180_SLAVE_QUP_1 98
-#define SC7180_SLAVE_SDCC_2 99
-#define SC7180_SLAVE_SECURITY 100
-#define SC7180_SLAVE_SNOC_CFG 101
-#define SC7180_SLAVE_NPU_TCM 102
-#define SC7180_SLAVE_TCSR 103
-#define SC7180_SLAVE_TLMM_WEST 104
-#define SC7180_SLAVE_TLMM_NORTH 105
-#define SC7180_SLAVE_TLMM_SOUTH 106
-#define SC7180_SLAVE_UFS_MEM_CFG 107
-#define SC7180_SLAVE_USB3 108
-#define SC7180_SLAVE_VENUS_CFG 109
-#define SC7180_SLAVE_VENUS_THROTTLE_CFG 110
-#define SC7180_SLAVE_VSENSE_CTRL_CFG 111
-#define SC7180_SLAVE_A1NOC_SNOC 112
-#define SC7180_SLAVE_A2NOC_SNOC 113
-#define SC7180_SLAVE_CAMNOC_UNCOMP 114
-#define SC7180_SLAVE_CDSP_GEM_NOC 115
-#define SC7180_SLAVE_SNOC_CNOC 116
-#define SC7180_SLAVE_GEM_NOC_SNOC 117
-#define SC7180_SLAVE_SNOC_GEM_NOC_GC 118
-#define SC7180_SLAVE_SNOC_GEM_NOC_SF 119
-#define SC7180_SLAVE_LLCC 120
-#define SC7180_SLAVE_MNOC_HF_MEM_NOC 121
-#define SC7180_SLAVE_MNOC_SF_MEM_NOC 122
-#define SC7180_SLAVE_NPU_COMPUTE_NOC 123
-#define SC7180_SLAVE_QUP_CORE_0 124
-#define SC7180_SLAVE_QUP_CORE_1 125
-#define SC7180_SLAVE_IMEM 126
-#define SC7180_SLAVE_PIMEM 127
-#define SC7180_SLAVE_SERVICE_A1NOC 128
-#define SC7180_SLAVE_SERVICE_A2NOC 129
-#define SC7180_SLAVE_SERVICE_CNOC 130
-#define SC7180_SLAVE_SERVICE_GEM_NOC 131
-#define SC7180_SLAVE_SERVICE_MNOC 132
-#define SC7180_SLAVE_SERVICE_NPU_NOC 133
-#define SC7180_SLAVE_SERVICE_SNOC 134
-#define SC7180_SLAVE_QDSS_STM 135
-#define SC7180_SLAVE_TCU 136
-
-#endif
diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c
index 905403a3a930..c4cb6443f2d4 100644
--- a/drivers/interconnect/qcom/sc7280.c
+++ b/drivers/interconnect/qcom/sc7280.c
@@ -15,11 +15,152 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sc7280.h"
+
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qnm_a1noc_cfg;
+static struct qcom_icc_node xm_sdc1;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb2;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qnm_a2noc_cfg;
+static struct qcom_icc_node qnm_cnoc_datapath;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qnm_cnoc3_cnoc2;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qnm_cnoc2_cnoc3;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node qnm_cnoc_dc_noc;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_cmpnoc;
+static struct qcom_icc_node qnm_gemnoc_cfg;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qhm_config_noc;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_mnoc_cfg;
+static struct qcom_icc_node qnm_video0;
+static struct qcom_icc_node qnm_video_cpu;
+static struct qcom_icc_node qxm_camnoc_hf;
+static struct qcom_icc_node qxm_camnoc_icp;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qhm_nsp_noc_config;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_snoc_cfg;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_dcc_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_hwkm;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_lpass_cfg;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_pka_wrapper_cfg;
+static struct qcom_icc_node qhs_pmu_wrapper_cfg;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_sdc1;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_security;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb2;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_a1_noc_cfg;
+static struct qcom_icc_node qns_a2_noc_cfg;
+static struct qcom_icc_node qns_cnoc2_cnoc3;
+static struct qcom_icc_node qns_mnoc_cfg;
+static struct qcom_icc_node qns_snoc_cfg;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc3_cnoc2;
+static struct qcom_icc_node qns_cnoc_a2noc;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qxs_boot_imem;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qns_gemnoc;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qhs_modem_ms_mpu_cfg;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node srvc_even_gemnoc;
+static struct qcom_icc_node srvc_odd_gemnoc;
+static struct qcom_icc_node srvc_sys_gemnoc;
+static struct qcom_icc_node qhs_lpass_core;
+static struct qcom_icc_node qhs_lpass_lpi;
+static struct qcom_icc_node qhs_lpass_mpu;
+static struct qcom_icc_node qhs_lpass_top;
+static struct qcom_icc_node srvc_niu_aml_noc;
+static struct qcom_icc_node srvc_niu_lpass_agnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node service_nsp_noc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node srvc_snoc;
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SC7280_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -29,12 +170,11 @@ static struct qcom_icc_node qhm_qspi = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SC7280_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -44,12 +184,11 @@ static struct qcom_icc_node qhm_qup0 = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SC7280_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -59,21 +198,19 @@ static struct qcom_icc_node qhm_qup1 = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qnm_a1noc_cfg = {
.name = "qnm_a1noc_cfg",
- .id = SC7280_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
- .id = SC7280_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -83,12 +220,11 @@ static struct qcom_icc_node xm_sdc1 = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SC7280_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -98,12 +234,11 @@ static struct qcom_icc_node xm_sdc2 = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SC7280_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -113,12 +248,11 @@ static struct qcom_icc_node xm_sdc4 = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SC7280_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -128,21 +262,19 @@ static struct qcom_icc_node xm_ufs_mem = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb2 = {
.name = "xm_usb2",
- .id = SC7280_MASTER_USB2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SC7280_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -152,12 +284,11 @@ static struct qcom_icc_node xm_usb3_0 = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SC7280_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -167,21 +298,19 @@ static struct qcom_icc_node qhm_qdss_bam = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_a2noc_cfg = {
.name = "qnm_a2noc_cfg",
- .id = SC7280_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qnm_cnoc_datapath = {
.name = "qnm_cnoc_datapath",
- .id = SC7280_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -191,12 +320,11 @@ static struct qcom_icc_node qnm_cnoc_datapath = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SC7280_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -206,12 +334,11 @@ static struct qcom_icc_node qxm_crypto = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SC7280_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -221,30 +348,27 @@ static struct qcom_icc_node qxm_ipa = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SC7280_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SC7280_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SC7280_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -254,135 +378,126 @@ static struct qcom_icc_node xm_qdss_etr = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SC7280_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SC7280_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qnm_cnoc3_cnoc2 = {
.name = "qnm_cnoc3_cnoc2",
- .id = SC7280_MASTER_CNOC3_CNOC2,
.channels = 1,
.buswidth = 8,
.num_links = 44,
- .links = { SC7280_SLAVE_AHB2PHY_SOUTH, SC7280_SLAVE_AHB2PHY_NORTH,
- SC7280_SLAVE_CAMERA_CFG, SC7280_SLAVE_CLK_CTL,
- SC7280_SLAVE_CDSP_CFG, SC7280_SLAVE_RBCPR_CX_CFG,
- SC7280_SLAVE_RBCPR_MX_CFG, SC7280_SLAVE_CRYPTO_0_CFG,
- SC7280_SLAVE_CX_RDPM, SC7280_SLAVE_DCC_CFG,
- SC7280_SLAVE_DISPLAY_CFG, SC7280_SLAVE_GFX3D_CFG,
- SC7280_SLAVE_HWKM, SC7280_SLAVE_IMEM_CFG,
- SC7280_SLAVE_IPA_CFG, SC7280_SLAVE_IPC_ROUTER_CFG,
- SC7280_SLAVE_LPASS, SC7280_SLAVE_CNOC_MSS,
- SC7280_SLAVE_MX_RDPM, SC7280_SLAVE_PCIE_0_CFG,
- SC7280_SLAVE_PCIE_1_CFG, SC7280_SLAVE_PDM,
- SC7280_SLAVE_PIMEM_CFG, SC7280_SLAVE_PKA_WRAPPER_CFG,
- SC7280_SLAVE_PMU_WRAPPER_CFG, SC7280_SLAVE_QDSS_CFG,
- SC7280_SLAVE_QSPI_0, SC7280_SLAVE_QUP_0,
- SC7280_SLAVE_QUP_1, SC7280_SLAVE_SDCC_1,
- SC7280_SLAVE_SDCC_2, SC7280_SLAVE_SDCC_4,
- SC7280_SLAVE_SECURITY, SC7280_SLAVE_TCSR,
- SC7280_SLAVE_TLMM, SC7280_SLAVE_UFS_MEM_CFG,
- SC7280_SLAVE_USB2, SC7280_SLAVE_USB3_0,
- SC7280_SLAVE_VENUS_CFG, SC7280_SLAVE_VSENSE_CTRL_CFG,
- SC7280_SLAVE_A1NOC_CFG, SC7280_SLAVE_A2NOC_CFG,
- SC7280_SLAVE_CNOC_MNOC_CFG, SC7280_SLAVE_SNOC_CFG },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_compute_cfg, &qhs_cpr_cx,
+ &qhs_cpr_mx, &qhs_crypto0_cfg,
+ &qhs_cx_rdpm, &qhs_dcc_cfg,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_hwkm, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_lpass_cfg, &qhs_mss_cfg,
+ &qhs_mx_rdpm, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pdm,
+ &qhs_pimem_cfg, &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_sdc1,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_security, &qhs_tcsr,
+ &qhs_tlmm, &qhs_ufs_mem_cfg,
+ &qhs_usb2, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qns_a1_noc_cfg, &qns_a2_noc_cfg,
+ &qns_mnoc_cfg, &qns_snoc_cfg },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SC7280_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 45,
- .links = { SC7280_SLAVE_AHB2PHY_SOUTH, SC7280_SLAVE_AHB2PHY_NORTH,
- SC7280_SLAVE_CAMERA_CFG, SC7280_SLAVE_CLK_CTL,
- SC7280_SLAVE_CDSP_CFG, SC7280_SLAVE_RBCPR_CX_CFG,
- SC7280_SLAVE_RBCPR_MX_CFG, SC7280_SLAVE_CRYPTO_0_CFG,
- SC7280_SLAVE_CX_RDPM, SC7280_SLAVE_DCC_CFG,
- SC7280_SLAVE_DISPLAY_CFG, SC7280_SLAVE_GFX3D_CFG,
- SC7280_SLAVE_HWKM, SC7280_SLAVE_IMEM_CFG,
- SC7280_SLAVE_IPA_CFG, SC7280_SLAVE_IPC_ROUTER_CFG,
- SC7280_SLAVE_LPASS, SC7280_SLAVE_CNOC_MSS,
- SC7280_SLAVE_MX_RDPM, SC7280_SLAVE_PCIE_0_CFG,
- SC7280_SLAVE_PCIE_1_CFG, SC7280_SLAVE_PDM,
- SC7280_SLAVE_PIMEM_CFG, SC7280_SLAVE_PKA_WRAPPER_CFG,
- SC7280_SLAVE_PMU_WRAPPER_CFG, SC7280_SLAVE_QDSS_CFG,
- SC7280_SLAVE_QSPI_0, SC7280_SLAVE_QUP_0,
- SC7280_SLAVE_QUP_1, SC7280_SLAVE_SDCC_1,
- SC7280_SLAVE_SDCC_2, SC7280_SLAVE_SDCC_4,
- SC7280_SLAVE_SECURITY, SC7280_SLAVE_TCSR,
- SC7280_SLAVE_TLMM, SC7280_SLAVE_UFS_MEM_CFG,
- SC7280_SLAVE_USB2, SC7280_SLAVE_USB3_0,
- SC7280_SLAVE_VENUS_CFG, SC7280_SLAVE_VSENSE_CTRL_CFG,
- SC7280_SLAVE_A1NOC_CFG, SC7280_SLAVE_A2NOC_CFG,
- SC7280_SLAVE_CNOC2_CNOC3, SC7280_SLAVE_CNOC_MNOC_CFG,
- SC7280_SLAVE_SNOC_CFG },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_compute_cfg, &qhs_cpr_cx,
+ &qhs_cpr_mx, &qhs_crypto0_cfg,
+ &qhs_cx_rdpm, &qhs_dcc_cfg,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_hwkm, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_lpass_cfg, &qhs_mss_cfg,
+ &qhs_mx_rdpm, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pdm,
+ &qhs_pimem_cfg, &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_sdc1,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_security, &qhs_tcsr,
+ &qhs_tlmm, &qhs_ufs_mem_cfg,
+ &qhs_usb2, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qns_a1_noc_cfg, &qns_a2_noc_cfg,
+ &qns_cnoc2_cnoc3, &qns_mnoc_cfg,
+ &qns_snoc_cfg },
};
static struct qcom_icc_node qnm_cnoc2_cnoc3 = {
.name = "qnm_cnoc2_cnoc3",
- .id = SC7280_MASTER_CNOC2_CNOC3,
.channels = 1,
.buswidth = 8,
.num_links = 9,
- .links = { SC7280_SLAVE_AOSS, SC7280_SLAVE_APPSS,
- SC7280_SLAVE_CNOC_A2NOC, SC7280_SLAVE_DDRSS_CFG,
- SC7280_SLAVE_BOOT_IMEM, SC7280_SLAVE_IMEM,
- SC7280_SLAVE_PIMEM, SC7280_SLAVE_QDSS_STM,
- SC7280_SLAVE_TCU },
+ .link_nodes = { &qhs_aoss, &qhs_apss,
+ &qns_cnoc_a2noc, &qns_ddrss_cfg,
+ &qxs_boot_imem, &qxs_imem,
+ &qxs_pimem, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SC7280_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 9,
- .links = { SC7280_SLAVE_AOSS, SC7280_SLAVE_APPSS,
- SC7280_SLAVE_CNOC3_CNOC2, SC7280_SLAVE_DDRSS_CFG,
- SC7280_SLAVE_BOOT_IMEM, SC7280_SLAVE_IMEM,
- SC7280_SLAVE_PIMEM, SC7280_SLAVE_QDSS_STM,
- SC7280_SLAVE_TCU },
+ .link_nodes = { &qhs_aoss, &qhs_apss,
+ &qns_cnoc3_cnoc2, &qns_ddrss_cfg,
+ &qxs_boot_imem, &qxs_imem,
+ &qxs_pimem, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SC7280_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC7280_SLAVE_PCIE_0, SC7280_SLAVE_PCIE_1 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
- .id = SC7280_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SC7280_SLAVE_LLCC_CFG, SC7280_SLAVE_GEM_NOC_CFG },
+ .link_nodes = { &qhs_llcc, &qns_gemnoc },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SC7280_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -392,12 +507,11 @@ static struct qcom_icc_node alm_gpu_tcu = {
.urg_fwd = 0,
},
.num_links = 2,
- .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SC7280_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -407,22 +521,20 @@ static struct qcom_icc_node alm_sys_tcu = {
.urg_fwd = 0,
},
.num_links = 2,
- .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SC7280_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 3,
- .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC,
- SC7280_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
- .id = SC7280_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -432,23 +544,21 @@ static struct qcom_icc_node qnm_cmpnoc = {
.urg_fwd = 1,
},
.num_links = 2,
- .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
- .id = SC7280_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 5,
- .links = { SC7280_SLAVE_MSS_PROC_MS_MPU_CFG, SC7280_SLAVE_MCDMA_MS_MPU_CFG,
- SC7280_SLAVE_SERVICE_GEM_NOC_1, SC7280_SLAVE_SERVICE_GEM_NOC_2,
- SC7280_SLAVE_SERVICE_GEM_NOC },
+ .link_nodes = { &qhs_mdsp_ms_mpu_cfg, &qhs_modem_ms_mpu_cfg,
+ &srvc_even_gemnoc, &srvc_odd_gemnoc,
+ &srvc_sys_gemnoc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SC7280_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -458,12 +568,11 @@ static struct qcom_icc_node qnm_gpu = {
.urg_fwd = 0,
},
.num_links = 2,
- .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SC7280_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -473,12 +582,11 @@ static struct qcom_icc_node qnm_mnoc_hf = {
.urg_fwd = 1,
},
.num_links = 1,
- .links = { SC7280_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SC7280_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -488,21 +596,19 @@ static struct qcom_icc_node qnm_mnoc_sf = {
.urg_fwd = 1,
},
.num_links = 2,
- .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SC7280_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SC7280_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -512,12 +618,11 @@ static struct qcom_icc_node qnm_snoc_gc = {
.urg_fwd = 1,
},
.num_links = 1,
- .links = { SC7280_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SC7280_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -527,42 +632,38 @@ static struct qcom_icc_node qnm_snoc_sf = {
.urg_fwd = 1,
},
.num_links = 3,
- .links = { SC7280_SLAVE_GEM_NOC_CNOC, SC7280_SLAVE_LLCC,
- SC7280_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
- .id = SC7280_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
- .links = { SC7280_SLAVE_LPASS_CORE_CFG, SC7280_SLAVE_LPASS_LPI_CFG,
- SC7280_SLAVE_LPASS_MPU_CFG, SC7280_SLAVE_LPASS_TOP_CFG,
- SC7280_SLAVE_SERVICES_LPASS_AML_NOC, SC7280_SLAVE_SERVICE_LPASS_AG_NOC },
+ .link_nodes = { &qhs_lpass_core, &qhs_lpass_lpi,
+ &qhs_lpass_mpu, &qhs_lpass_top,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SC7280_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
- .id = SC7280_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
- .id = SC7280_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -572,12 +673,11 @@ static struct qcom_icc_node qnm_video0 = {
.urg_fwd = 1,
},
.num_links = 1,
- .links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cpu = {
.name = "qnm_video_cpu",
- .id = SC7280_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -587,12 +687,11 @@ static struct qcom_icc_node qnm_video_cpu = {
.urg_fwd = 1,
},
.num_links = 1,
- .links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_camnoc_hf = {
.name = "qxm_camnoc_hf",
- .id = SC7280_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -602,12 +701,11 @@ static struct qcom_icc_node qxm_camnoc_hf = {
.urg_fwd = 1,
},
.num_links = 1,
- .links = { SC7280_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_icp = {
.name = "qxm_camnoc_icp",
- .id = SC7280_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -617,12 +715,11 @@ static struct qcom_icc_node qxm_camnoc_icp = {
.urg_fwd = 1,
},
.num_links = 1,
- .links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = SC7280_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -632,12 +729,11 @@ static struct qcom_icc_node qxm_camnoc_sf = {
.urg_fwd = 1,
},
.num_links = 1,
- .links = { SC7280_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SC7280_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -647,57 +743,51 @@ static struct qcom_icc_node qxm_mdp0 = {
.urg_fwd = 1,
},
.num_links = 1,
- .links = { SC7280_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
- .id = SC7280_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_SLAVE_SERVICE_NSP_NOC },
+ .link_nodes = { &service_nsp_noc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = SC7280_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC7280_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SC7280_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7280_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SC7280_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7280_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
- .id = SC7280_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SC7280_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -707,12 +797,11 @@ static struct qcom_icc_node qxm_pimem = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SC7280_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.qosbox = &(const struct qcom_icc_qosbox) {
@@ -722,741 +811,585 @@ static struct qcom_icc_node xm_gic = {
.urg_fwd = 0,
},
.num_links = 1,
- .links = { SC7280_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SC7280_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7280_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SC7280_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SC7280_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7280_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SC7280_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7280_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SC7280_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SC7280_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SC7280_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SC7280_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SC7280_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SC7280_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SC7280_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_compute_cfg = {
.name = "qhs_compute_cfg",
- .id = SC7280_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_MASTER_CDSP_NOC_CFG },
+ .link_nodes = { &qhm_nsp_noc_config },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SC7280_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SC7280_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SC7280_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SC7280_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
- .id = SC7280_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SC7280_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SC7280_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
- .id = SC7280_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SC7280_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SC7280_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SC7280_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
- .id = SC7280_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_MASTER_CNOC_LPASS_AG_NOC },
+ .link_nodes = { &qhm_config_noc },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SC7280_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = SC7280_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SC7280_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SC7280_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SC7280_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SC7280_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pka_wrapper_cfg = {
.name = "qhs_pka_wrapper_cfg",
- .id = SC7280_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
.name = "qhs_pmu_wrapper_cfg",
- .id = SC7280_SLAVE_PMU_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SC7280_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SC7280_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SC7280_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SC7280_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
- .id = SC7280_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SC7280_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SC7280_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
- .id = SC7280_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SC7280_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SC7280_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SC7280_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb2 = {
.name = "qhs_usb2",
- .id = SC7280_SLAVE_USB2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SC7280_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SC7280_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SC7280_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_a1_noc_cfg = {
.name = "qns_a1_noc_cfg",
- .id = SC7280_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_MASTER_A1NOC_CFG },
+ .link_nodes = { &qnm_a1noc_cfg },
};
static struct qcom_icc_node qns_a2_noc_cfg = {
.name = "qns_a2_noc_cfg",
- .id = SC7280_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_MASTER_A2NOC_CFG },
+ .link_nodes = { &qnm_a2noc_cfg },
};
static struct qcom_icc_node qns_cnoc2_cnoc3 = {
.name = "qns_cnoc2_cnoc3",
- .id = SC7280_SLAVE_CNOC2_CNOC3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7280_MASTER_CNOC2_CNOC3 },
+ .link_nodes = { &qnm_cnoc2_cnoc3 },
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
- .id = SC7280_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qnm_mnoc_cfg },
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
- .id = SC7280_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_MASTER_SNOC_CFG },
+ .link_nodes = { &qnm_snoc_cfg },
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SC7280_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SC7280_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qns_cnoc3_cnoc2 = {
.name = "qns_cnoc3_cnoc2",
- .id = SC7280_SLAVE_CNOC3_CNOC2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7280_MASTER_CNOC3_CNOC2 },
+ .link_nodes = { &qnm_cnoc3_cnoc2 },
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
- .id = SC7280_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7280_MASTER_CNOC_A2NOC },
+ .link_nodes = { &qnm_cnoc_datapath },
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = SC7280_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qnm_cnoc_dc_noc },
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
- .id = SC7280_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SC7280_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SC7280_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SC7280_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SC7280_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SC7280_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SC7280_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SC7280_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
- .id = SC7280_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC7280_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qnm_gemnoc_cfg },
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = SC7280_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_modem_ms_mpu_cfg = {
.name = "qhs_modem_ms_mpu_cfg",
- .id = SC7280_SLAVE_MCDMA_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SC7280_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7280_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SC7280_SLAVE_LLCC,
.channels = 2,
.buswidth = 16,
.num_links = 1,
- .links = { SC7280_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SC7280_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7280_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
- .id = SC7280_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
- .id = SC7280_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
- .id = SC7280_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
- .id = SC7280_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
- .id = SC7280_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
- .id = SC7280_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
- .id = SC7280_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
- .id = SC7280_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
- .id = SC7280_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SC7280_SLAVE_EBI1,
.channels = 2,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SC7280_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC7280_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SC7280_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC7280_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SC7280_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SC7280_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC7280_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
- .id = SC7280_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SC7280_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC7280_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SC7280_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC7280_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SC7280_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/sc7280.h b/drivers/interconnect/qcom/sc7280.h
deleted file mode 100644
index 175e400305c5..000000000000
--- a/drivers/interconnect/qcom/sc7280.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Qualcomm #define SC7280 interconnect IDs
- *
- * Copyright (c) 2021, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SC7280_H
-#define __DRIVERS_INTERCONNECT_QCOM_SC7280_H
-
-#define SC7280_MASTER_GPU_TCU 0
-#define SC7280_MASTER_SYS_TCU 1
-#define SC7280_MASTER_APPSS_PROC 2
-#define SC7280_MASTER_LLCC 3
-#define SC7280_MASTER_CNOC_LPASS_AG_NOC 4
-#define SC7280_MASTER_CDSP_NOC_CFG 5
-#define SC7280_MASTER_QDSS_BAM 6
-#define SC7280_MASTER_QSPI_0 7
-#define SC7280_MASTER_QUP_0 8
-#define SC7280_MASTER_QUP_1 9
-#define SC7280_MASTER_A1NOC_CFG 10
-#define SC7280_MASTER_A2NOC_CFG 11
-#define SC7280_MASTER_A1NOC_SNOC 12
-#define SC7280_MASTER_A2NOC_SNOC 13
-#define SC7280_MASTER_COMPUTE_NOC 14
-#define SC7280_MASTER_CNOC2_CNOC3 15
-#define SC7280_MASTER_CNOC3_CNOC2 16
-#define SC7280_MASTER_CNOC_A2NOC 17
-#define SC7280_MASTER_CNOC_DC_NOC 18
-#define SC7280_MASTER_GEM_NOC_CFG 19
-#define SC7280_MASTER_GEM_NOC_CNOC 20
-#define SC7280_MASTER_GEM_NOC_PCIE_SNOC 21
-#define SC7280_MASTER_GFX3D 22
-#define SC7280_MASTER_CNOC_MNOC_CFG 23
-#define SC7280_MASTER_MNOC_HF_MEM_NOC 24
-#define SC7280_MASTER_MNOC_SF_MEM_NOC 25
-#define SC7280_MASTER_ANOC_PCIE_GEM_NOC 26
-#define SC7280_MASTER_SNOC_CFG 27
-#define SC7280_MASTER_SNOC_GC_MEM_NOC 28
-#define SC7280_MASTER_SNOC_SF_MEM_NOC 29
-#define SC7280_MASTER_VIDEO_P0 30
-#define SC7280_MASTER_VIDEO_PROC 31
-#define SC7280_MASTER_QUP_CORE_0 32
-#define SC7280_MASTER_QUP_CORE_1 33
-#define SC7280_MASTER_CAMNOC_HF 34
-#define SC7280_MASTER_CAMNOC_ICP 35
-#define SC7280_MASTER_CAMNOC_SF 36
-#define SC7280_MASTER_CRYPTO 37
-#define SC7280_MASTER_IPA 38
-#define SC7280_MASTER_MDP0 39
-#define SC7280_MASTER_CDSP_PROC 40
-#define SC7280_MASTER_PIMEM 41
-#define SC7280_MASTER_GIC 42
-#define SC7280_MASTER_PCIE_0 43
-#define SC7280_MASTER_PCIE_1 44
-#define SC7280_MASTER_QDSS_DAP 45
-#define SC7280_MASTER_QDSS_ETR 46
-#define SC7280_MASTER_SDCC_1 47
-#define SC7280_MASTER_SDCC_2 48
-#define SC7280_MASTER_SDCC_4 49
-#define SC7280_MASTER_UFS_MEM 50
-#define SC7280_MASTER_USB2 51
-#define SC7280_MASTER_USB3_0 52
-#define SC7280_SLAVE_EBI1 53
-#define SC7280_SLAVE_AHB2PHY_SOUTH 54
-#define SC7280_SLAVE_AHB2PHY_NORTH 55
-#define SC7280_SLAVE_AOSS 56
-#define SC7280_SLAVE_APPSS 57
-#define SC7280_SLAVE_CAMERA_CFG 58
-#define SC7280_SLAVE_CLK_CTL 59
-#define SC7280_SLAVE_CDSP_CFG 60
-#define SC7280_SLAVE_RBCPR_CX_CFG 61
-#define SC7280_SLAVE_RBCPR_MX_CFG 62
-#define SC7280_SLAVE_CRYPTO_0_CFG 63
-#define SC7280_SLAVE_CX_RDPM 64
-#define SC7280_SLAVE_DCC_CFG 65
-#define SC7280_SLAVE_DISPLAY_CFG 66
-#define SC7280_SLAVE_GFX3D_CFG 67
-#define SC7280_SLAVE_HWKM 68
-#define SC7280_SLAVE_IMEM_CFG 69
-#define SC7280_SLAVE_IPA_CFG 70
-#define SC7280_SLAVE_IPC_ROUTER_CFG 71
-#define SC7280_SLAVE_LLCC_CFG 72
-#define SC7280_SLAVE_LPASS 73
-#define SC7280_SLAVE_LPASS_CORE_CFG 74
-#define SC7280_SLAVE_LPASS_LPI_CFG 75
-#define SC7280_SLAVE_LPASS_MPU_CFG 76
-#define SC7280_SLAVE_LPASS_TOP_CFG 77
-#define SC7280_SLAVE_MSS_PROC_MS_MPU_CFG 78
-#define SC7280_SLAVE_MCDMA_MS_MPU_CFG 79
-#define SC7280_SLAVE_CNOC_MSS 80
-#define SC7280_SLAVE_MX_RDPM 81
-#define SC7280_SLAVE_PCIE_0_CFG 82
-#define SC7280_SLAVE_PCIE_1_CFG 83
-#define SC7280_SLAVE_PDM 84
-#define SC7280_SLAVE_PIMEM_CFG 85
-#define SC7280_SLAVE_PKA_WRAPPER_CFG 86
-#define SC7280_SLAVE_PMU_WRAPPER_CFG 87
-#define SC7280_SLAVE_QDSS_CFG 88
-#define SC7280_SLAVE_QSPI_0 89
-#define SC7280_SLAVE_QUP_0 90
-#define SC7280_SLAVE_QUP_1 91
-#define SC7280_SLAVE_SDCC_1 92
-#define SC7280_SLAVE_SDCC_2 93
-#define SC7280_SLAVE_SDCC_4 94
-#define SC7280_SLAVE_SECURITY 95
-#define SC7280_SLAVE_TCSR 96
-#define SC7280_SLAVE_TLMM 97
-#define SC7280_SLAVE_UFS_MEM_CFG 98
-#define SC7280_SLAVE_USB2 99
-#define SC7280_SLAVE_USB3_0 100
-#define SC7280_SLAVE_VENUS_CFG 101
-#define SC7280_SLAVE_VSENSE_CTRL_CFG 102
-#define SC7280_SLAVE_A1NOC_CFG 103
-#define SC7280_SLAVE_A1NOC_SNOC 104
-#define SC7280_SLAVE_A2NOC_CFG 105
-#define SC7280_SLAVE_A2NOC_SNOC 106
-#define SC7280_SLAVE_CNOC2_CNOC3 107
-#define SC7280_SLAVE_CNOC3_CNOC2 108
-#define SC7280_SLAVE_CNOC_A2NOC 109
-#define SC7280_SLAVE_DDRSS_CFG 110
-#define SC7280_SLAVE_GEM_NOC_CNOC 111
-#define SC7280_SLAVE_GEM_NOC_CFG 112
-#define SC7280_SLAVE_SNOC_GEM_NOC_GC 113
-#define SC7280_SLAVE_SNOC_GEM_NOC_SF 114
-#define SC7280_SLAVE_LLCC 115
-#define SC7280_SLAVE_MNOC_HF_MEM_NOC 116
-#define SC7280_SLAVE_MNOC_SF_MEM_NOC 117
-#define SC7280_SLAVE_CNOC_MNOC_CFG 118
-#define SC7280_SLAVE_CDSP_MEM_NOC 119
-#define SC7280_SLAVE_MEM_NOC_PCIE_SNOC 120
-#define SC7280_SLAVE_ANOC_PCIE_GEM_NOC 121
-#define SC7280_SLAVE_SNOC_CFG 122
-#define SC7280_SLAVE_QUP_CORE_0 123
-#define SC7280_SLAVE_QUP_CORE_1 124
-#define SC7280_SLAVE_BOOT_IMEM 125
-#define SC7280_SLAVE_IMEM 126
-#define SC7280_SLAVE_PIMEM 127
-#define SC7280_SLAVE_SERVICE_NSP_NOC 128
-#define SC7280_SLAVE_SERVICE_A1NOC 129
-#define SC7280_SLAVE_SERVICE_A2NOC 130
-#define SC7280_SLAVE_SERVICE_GEM_NOC_1 131
-#define SC7280_SLAVE_SERVICE_MNOC 132
-#define SC7280_SLAVE_SERVICES_LPASS_AML_NOC 133
-#define SC7280_SLAVE_SERVICE_LPASS_AG_NOC 134
-#define SC7280_SLAVE_SERVICE_GEM_NOC_2 135
-#define SC7280_SLAVE_SERVICE_SNOC 136
-#define SC7280_SLAVE_SERVICE_GEM_NOC 137
-#define SC7280_SLAVE_PCIE_0 138
-#define SC7280_SLAVE_PCIE_1 139
-#define SC7280_SLAVE_QDSS_STM 140
-#define SC7280_SLAVE_TCU 141
-
-#endif
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index 4dd1d2f2e821..c9bf1af54e37 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -14,1331 +14,1331 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sc8180x.h"
+
+static struct qcom_icc_node mas_qhm_a1noc_cfg;
+static struct qcom_icc_node mas_xm_ufs_card;
+static struct qcom_icc_node mas_xm_ufs_g4;
+static struct qcom_icc_node mas_xm_ufs_mem;
+static struct qcom_icc_node mas_xm_usb3_0;
+static struct qcom_icc_node mas_xm_usb3_1;
+static struct qcom_icc_node mas_xm_usb3_2;
+static struct qcom_icc_node mas_qhm_a2noc_cfg;
+static struct qcom_icc_node mas_qhm_qdss_bam;
+static struct qcom_icc_node mas_qhm_qspi;
+static struct qcom_icc_node mas_qhm_qspi1;
+static struct qcom_icc_node mas_qhm_qup0;
+static struct qcom_icc_node mas_qhm_qup1;
+static struct qcom_icc_node mas_qhm_qup2;
+static struct qcom_icc_node mas_qhm_sensorss_ahb;
+static struct qcom_icc_node mas_qxm_crypto;
+static struct qcom_icc_node mas_qxm_ipa;
+static struct qcom_icc_node mas_xm_emac;
+static struct qcom_icc_node mas_xm_pcie3_0;
+static struct qcom_icc_node mas_xm_pcie3_1;
+static struct qcom_icc_node mas_xm_pcie3_2;
+static struct qcom_icc_node mas_xm_pcie3_3;
+static struct qcom_icc_node mas_xm_qdss_etr;
+static struct qcom_icc_node mas_xm_sdc2;
+static struct qcom_icc_node mas_xm_sdc4;
+static struct qcom_icc_node mas_qxm_camnoc_hf0_uncomp;
+static struct qcom_icc_node mas_qxm_camnoc_hf1_uncomp;
+static struct qcom_icc_node mas_qxm_camnoc_sf_uncomp;
+static struct qcom_icc_node mas_qnm_npu;
+static struct qcom_icc_node mas_qnm_snoc;
+static struct qcom_icc_node mas_qhm_cnoc_dc_noc;
+static struct qcom_icc_node mas_acm_apps;
+static struct qcom_icc_node mas_acm_gpu_tcu;
+static struct qcom_icc_node mas_acm_sys_tcu;
+static struct qcom_icc_node mas_qhm_gemnoc_cfg;
+static struct qcom_icc_node mas_qnm_cmpnoc;
+static struct qcom_icc_node mas_qnm_gpu;
+static struct qcom_icc_node mas_qnm_mnoc_hf;
+static struct qcom_icc_node mas_qnm_mnoc_sf;
+static struct qcom_icc_node mas_qnm_pcie;
+static struct qcom_icc_node mas_qnm_snoc_gc;
+static struct qcom_icc_node mas_qnm_snoc_sf;
+static struct qcom_icc_node mas_qxm_ecc;
+static struct qcom_icc_node mas_llcc_mc;
+static struct qcom_icc_node mas_qhm_mnoc_cfg;
+static struct qcom_icc_node mas_qxm_camnoc_hf0;
+static struct qcom_icc_node mas_qxm_camnoc_hf1;
+static struct qcom_icc_node mas_qxm_camnoc_sf;
+static struct qcom_icc_node mas_qxm_mdp0;
+static struct qcom_icc_node mas_qxm_mdp1;
+static struct qcom_icc_node mas_qxm_rot;
+static struct qcom_icc_node mas_qxm_venus0;
+static struct qcom_icc_node mas_qxm_venus1;
+static struct qcom_icc_node mas_qxm_venus_arm9;
+static struct qcom_icc_node mas_qhm_snoc_cfg;
+static struct qcom_icc_node mas_qnm_aggre1_noc;
+static struct qcom_icc_node mas_qnm_aggre2_noc;
+static struct qcom_icc_node mas_qnm_gemnoc;
+static struct qcom_icc_node mas_qxm_pimem;
+static struct qcom_icc_node mas_xm_gic;
+static struct qcom_icc_node mas_qup_core_0;
+static struct qcom_icc_node mas_qup_core_1;
+static struct qcom_icc_node mas_qup_core_2;
+static struct qcom_icc_node slv_qns_a1noc_snoc;
+static struct qcom_icc_node slv_srvc_aggre1_noc;
+static struct qcom_icc_node slv_qns_a2noc_snoc;
+static struct qcom_icc_node slv_qns_pcie_mem_noc;
+static struct qcom_icc_node slv_srvc_aggre2_noc;
+static struct qcom_icc_node slv_qns_camnoc_uncomp;
+static struct qcom_icc_node slv_qns_cdsp_mem_noc;
+static struct qcom_icc_node slv_qhs_a1_noc_cfg;
+static struct qcom_icc_node slv_qhs_a2_noc_cfg;
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_center;
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_east;
+static struct qcom_icc_node slv_qhs_ahb2phy_refgen_west;
+static struct qcom_icc_node slv_qhs_ahb2phy_south;
+static struct qcom_icc_node slv_qhs_aop;
+static struct qcom_icc_node slv_qhs_aoss;
+static struct qcom_icc_node slv_qhs_camera_cfg;
+static struct qcom_icc_node slv_qhs_clk_ctl;
+static struct qcom_icc_node slv_qhs_compute_dsp;
+static struct qcom_icc_node slv_qhs_cpr_cx;
+static struct qcom_icc_node slv_qhs_cpr_mmcx;
+static struct qcom_icc_node slv_qhs_cpr_mx;
+static struct qcom_icc_node slv_qhs_crypto0_cfg;
+static struct qcom_icc_node slv_qhs_ddrss_cfg;
+static struct qcom_icc_node slv_qhs_display_cfg;
+static struct qcom_icc_node slv_qhs_emac_cfg;
+static struct qcom_icc_node slv_qhs_glm;
+static struct qcom_icc_node slv_qhs_gpuss_cfg;
+static struct qcom_icc_node slv_qhs_imem_cfg;
+static struct qcom_icc_node slv_qhs_ipa;
+static struct qcom_icc_node slv_qhs_mnoc_cfg;
+static struct qcom_icc_node slv_qhs_npu_cfg;
+static struct qcom_icc_node slv_qhs_pcie0_cfg;
+static struct qcom_icc_node slv_qhs_pcie1_cfg;
+static struct qcom_icc_node slv_qhs_pcie2_cfg;
+static struct qcom_icc_node slv_qhs_pcie3_cfg;
+static struct qcom_icc_node slv_qhs_pdm;
+static struct qcom_icc_node slv_qhs_pimem_cfg;
+static struct qcom_icc_node slv_qhs_prng;
+static struct qcom_icc_node slv_qhs_qdss_cfg;
+static struct qcom_icc_node slv_qhs_qspi_0;
+static struct qcom_icc_node slv_qhs_qspi_1;
+static struct qcom_icc_node slv_qhs_qupv3_east0;
+static struct qcom_icc_node slv_qhs_qupv3_east1;
+static struct qcom_icc_node slv_qhs_qupv3_west;
+static struct qcom_icc_node slv_qhs_sdc2;
+static struct qcom_icc_node slv_qhs_sdc4;
+static struct qcom_icc_node slv_qhs_security;
+static struct qcom_icc_node slv_qhs_snoc_cfg;
+static struct qcom_icc_node slv_qhs_spss_cfg;
+static struct qcom_icc_node slv_qhs_tcsr;
+static struct qcom_icc_node slv_qhs_tlmm_east;
+static struct qcom_icc_node slv_qhs_tlmm_south;
+static struct qcom_icc_node slv_qhs_tlmm_west;
+static struct qcom_icc_node slv_qhs_tsif;
+static struct qcom_icc_node slv_qhs_ufs_card_cfg;
+static struct qcom_icc_node slv_qhs_ufs_mem0_cfg;
+static struct qcom_icc_node slv_qhs_ufs_mem1_cfg;
+static struct qcom_icc_node slv_qhs_usb3_0;
+static struct qcom_icc_node slv_qhs_usb3_1;
+static struct qcom_icc_node slv_qhs_usb3_2;
+static struct qcom_icc_node slv_qhs_venus_cfg;
+static struct qcom_icc_node slv_qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node slv_srvc_cnoc;
+static struct qcom_icc_node slv_qhs_gemnoc;
+static struct qcom_icc_node slv_qhs_llcc;
+static struct qcom_icc_node slv_qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node slv_qns_ecc;
+static struct qcom_icc_node slv_qns_gem_noc_snoc;
+static struct qcom_icc_node slv_qns_llcc;
+static struct qcom_icc_node slv_srvc_gemnoc;
+static struct qcom_icc_node slv_srvc_gemnoc1;
+static struct qcom_icc_node slv_ebi;
+static struct qcom_icc_node slv_qns2_mem_noc;
+static struct qcom_icc_node slv_qns_mem_noc_hf;
+static struct qcom_icc_node slv_srvc_mnoc;
+static struct qcom_icc_node slv_qhs_apss;
+static struct qcom_icc_node slv_qns_cnoc;
+static struct qcom_icc_node slv_qns_gemnoc_gc;
+static struct qcom_icc_node slv_qns_gemnoc_sf;
+static struct qcom_icc_node slv_qxs_imem;
+static struct qcom_icc_node slv_qxs_pimem;
+static struct qcom_icc_node slv_srvc_snoc;
+static struct qcom_icc_node slv_xs_pcie_0;
+static struct qcom_icc_node slv_xs_pcie_1;
+static struct qcom_icc_node slv_xs_pcie_2;
+static struct qcom_icc_node slv_xs_pcie_3;
+static struct qcom_icc_node slv_xs_qdss_stm;
+static struct qcom_icc_node slv_xs_sys_tcu_cfg;
+static struct qcom_icc_node slv_qup_core_0;
+static struct qcom_icc_node slv_qup_core_1;
+static struct qcom_icc_node slv_qup_core_2;
static struct qcom_icc_node mas_qhm_a1noc_cfg = {
.name = "mas_qhm_a1noc_cfg",
- .id = SC8180X_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_SLAVE_SERVICE_A1NOC }
+ .link_nodes = { &slv_srvc_aggre1_noc },
};
static struct qcom_icc_node mas_xm_ufs_card = {
.name = "mas_xm_ufs_card",
- .id = SC8180X_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A1NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a1noc_snoc },
};
static struct qcom_icc_node mas_xm_ufs_g4 = {
.name = "mas_xm_ufs_g4",
- .id = SC8180X_MASTER_UFS_GEN4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A1NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a1noc_snoc },
};
static struct qcom_icc_node mas_xm_ufs_mem = {
.name = "mas_xm_ufs_mem",
- .id = SC8180X_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A1NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a1noc_snoc },
};
static struct qcom_icc_node mas_xm_usb3_0 = {
.name = "mas_xm_usb3_0",
- .id = SC8180X_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A1NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a1noc_snoc },
};
static struct qcom_icc_node mas_xm_usb3_1 = {
.name = "mas_xm_usb3_1",
- .id = SC8180X_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A1NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a1noc_snoc },
};
static struct qcom_icc_node mas_xm_usb3_2 = {
.name = "mas_xm_usb3_2",
- .id = SC8180X_MASTER_USB3_2,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8180X_A1NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a1noc_snoc },
};
static struct qcom_icc_node mas_qhm_a2noc_cfg = {
.name = "mas_qhm_a2noc_cfg",
- .id = SC8180X_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_SLAVE_SERVICE_A2NOC }
+ .link_nodes = { &slv_srvc_aggre2_noc },
};
static struct qcom_icc_node mas_qhm_qdss_bam = {
.name = "mas_qhm_qdss_bam",
- .id = SC8180X_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qhm_qspi = {
.name = "mas_qhm_qspi",
- .id = SC8180X_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qhm_qspi1 = {
.name = "mas_qhm_qspi1",
- .id = SC8180X_MASTER_QSPI_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qhm_qup0 = {
.name = "mas_qhm_qup0",
- .id = SC8180X_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qhm_qup1 = {
.name = "mas_qhm_qup1",
- .id = SC8180X_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qhm_qup2 = {
.name = "mas_qhm_qup2",
- .id = SC8180X_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qhm_sensorss_ahb = {
.name = "mas_qhm_sensorss_ahb",
- .id = SC8180X_MASTER_SENSORS_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qxm_crypto = {
.name = "mas_qxm_crypto",
- .id = SC8180X_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qxm_ipa = {
.name = "mas_qxm_ipa",
- .id = SC8180X_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_xm_emac = {
.name = "mas_xm_emac",
- .id = SC8180X_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_xm_pcie3_0 = {
.name = "mas_xm_pcie3_0",
- .id = SC8180X_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+ .link_nodes = { &slv_qns_pcie_mem_noc },
};
static struct qcom_icc_node mas_xm_pcie3_1 = {
.name = "mas_xm_pcie3_1",
- .id = SC8180X_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+ .link_nodes = { &slv_qns_pcie_mem_noc },
};
static struct qcom_icc_node mas_xm_pcie3_2 = {
.name = "mas_xm_pcie3_2",
- .id = SC8180X_MASTER_PCIE_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+ .link_nodes = { &slv_qns_pcie_mem_noc },
};
static struct qcom_icc_node mas_xm_pcie3_3 = {
.name = "mas_xm_pcie3_3",
- .id = SC8180X_MASTER_PCIE_3,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8180X_SLAVE_ANOC_PCIE_GEM_NOC }
+ .link_nodes = { &slv_qns_pcie_mem_noc },
};
static struct qcom_icc_node mas_xm_qdss_etr = {
.name = "mas_xm_qdss_etr",
- .id = SC8180X_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_xm_sdc2 = {
.name = "mas_xm_sdc2",
- .id = SC8180X_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_xm_sdc4 = {
.name = "mas_xm_sdc4",
- .id = SC8180X_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_SLV }
+ .link_nodes = { &slv_qns_a2noc_snoc },
};
static struct qcom_icc_node mas_qxm_camnoc_hf0_uncomp = {
.name = "mas_qxm_camnoc_hf0_uncomp",
- .id = SC8180X_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+ .link_nodes = { &slv_qns_camnoc_uncomp },
};
static struct qcom_icc_node mas_qxm_camnoc_hf1_uncomp = {
.name = "mas_qxm_camnoc_hf1_uncomp",
- .id = SC8180X_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+ .link_nodes = { &slv_qns_camnoc_uncomp },
};
static struct qcom_icc_node mas_qxm_camnoc_sf_uncomp = {
.name = "mas_qxm_camnoc_sf_uncomp",
- .id = SC8180X_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_CAMNOC_UNCOMP }
+ .link_nodes = { &slv_qns_camnoc_uncomp },
};
static struct qcom_icc_node mas_qnm_npu = {
.name = "mas_qnm_npu",
- .id = SC8180X_MASTER_NPU,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_CDSP_MEM_NOC }
+ .link_nodes = { &slv_qns_cdsp_mem_noc },
};
static struct qcom_icc_node mas_qnm_snoc = {
.name = "mas_qnm_snoc",
- .id = SC8180X_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 56,
- .links = { SC8180X_SLAVE_TLMM_SOUTH,
- SC8180X_SLAVE_CDSP_CFG,
- SC8180X_SLAVE_SPSS_CFG,
- SC8180X_SLAVE_CAMERA_CFG,
- SC8180X_SLAVE_SDCC_4,
- SC8180X_SLAVE_AHB2PHY_CENTER,
- SC8180X_SLAVE_SDCC_2,
- SC8180X_SLAVE_PCIE_2_CFG,
- SC8180X_SLAVE_CNOC_MNOC_CFG,
- SC8180X_SLAVE_EMAC_CFG,
- SC8180X_SLAVE_QSPI_0,
- SC8180X_SLAVE_QSPI_1,
- SC8180X_SLAVE_TLMM_EAST,
- SC8180X_SLAVE_SNOC_CFG,
- SC8180X_SLAVE_AHB2PHY_EAST,
- SC8180X_SLAVE_GLM,
- SC8180X_SLAVE_PDM,
- SC8180X_SLAVE_PCIE_1_CFG,
- SC8180X_SLAVE_A2NOC_CFG,
- SC8180X_SLAVE_QDSS_CFG,
- SC8180X_SLAVE_DISPLAY_CFG,
- SC8180X_SLAVE_TCSR,
- SC8180X_SLAVE_UFS_MEM_0_CFG,
- SC8180X_SLAVE_CNOC_DDRSS,
- SC8180X_SLAVE_PCIE_0_CFG,
- SC8180X_SLAVE_QUP_1,
- SC8180X_SLAVE_QUP_2,
- SC8180X_SLAVE_NPU_CFG,
- SC8180X_SLAVE_CRYPTO_0_CFG,
- SC8180X_SLAVE_GRAPHICS_3D_CFG,
- SC8180X_SLAVE_VENUS_CFG,
- SC8180X_SLAVE_TSIF,
- SC8180X_SLAVE_IPA_CFG,
- SC8180X_SLAVE_CLK_CTL,
- SC8180X_SLAVE_SECURITY,
- SC8180X_SLAVE_AOP,
- SC8180X_SLAVE_AHB2PHY_WEST,
- SC8180X_SLAVE_AHB2PHY_SOUTH,
- SC8180X_SLAVE_SERVICE_CNOC,
- SC8180X_SLAVE_UFS_CARD_CFG,
- SC8180X_SLAVE_USB3_1,
- SC8180X_SLAVE_USB3_2,
- SC8180X_SLAVE_PCIE_3_CFG,
- SC8180X_SLAVE_RBCPR_CX_CFG,
- SC8180X_SLAVE_TLMM_WEST,
- SC8180X_SLAVE_A1NOC_CFG,
- SC8180X_SLAVE_AOSS,
- SC8180X_SLAVE_PRNG,
- SC8180X_SLAVE_VSENSE_CTRL_CFG,
- SC8180X_SLAVE_QUP_0,
- SC8180X_SLAVE_USB3,
- SC8180X_SLAVE_RBCPR_MMCX_CFG,
- SC8180X_SLAVE_PIMEM_CFG,
- SC8180X_SLAVE_UFS_MEM_1_CFG,
- SC8180X_SLAVE_RBCPR_MX_CFG,
- SC8180X_SLAVE_IMEM_CFG }
+ .link_nodes = { &slv_qhs_tlmm_south,
+ &slv_qhs_compute_dsp,
+ &slv_qhs_spss_cfg,
+ &slv_qhs_camera_cfg,
+ &slv_qhs_sdc4,
+ &slv_qhs_ahb2phy_refgen_center,
+ &slv_qhs_sdc2,
+ &slv_qhs_pcie2_cfg,
+ &slv_qhs_mnoc_cfg,
+ &slv_qhs_emac_cfg,
+ &slv_qhs_qspi_0,
+ &slv_qhs_qspi_1,
+ &slv_qhs_tlmm_east,
+ &slv_qhs_snoc_cfg,
+ &slv_qhs_ahb2phy_refgen_east,
+ &slv_qhs_glm,
+ &slv_qhs_pdm,
+ &slv_qhs_pcie1_cfg,
+ &slv_qhs_a2_noc_cfg,
+ &slv_qhs_qdss_cfg,
+ &slv_qhs_display_cfg,
+ &slv_qhs_tcsr,
+ &slv_qhs_ufs_mem0_cfg,
+ &slv_qhs_ddrss_cfg,
+ &slv_qhs_pcie0_cfg,
+ &slv_qhs_qupv3_east0,
+ &slv_qhs_qupv3_east1,
+ &slv_qhs_npu_cfg,
+ &slv_qhs_crypto0_cfg,
+ &slv_qhs_gpuss_cfg,
+ &slv_qhs_venus_cfg,
+ &slv_qhs_tsif,
+ &slv_qhs_ipa,
+ &slv_qhs_clk_ctl,
+ &slv_qhs_security,
+ &slv_qhs_aop,
+ &slv_qhs_ahb2phy_refgen_west,
+ &slv_qhs_ahb2phy_south,
+ &slv_srvc_cnoc,
+ &slv_qhs_ufs_card_cfg,
+ &slv_qhs_usb3_1,
+ &slv_qhs_usb3_2,
+ &slv_qhs_pcie3_cfg,
+ &slv_qhs_cpr_cx,
+ &slv_qhs_tlmm_west,
+ &slv_qhs_a1_noc_cfg,
+ &slv_qhs_aoss,
+ &slv_qhs_prng,
+ &slv_qhs_vsense_ctrl_cfg,
+ &slv_qhs_qupv3_west,
+ &slv_qhs_usb3_0,
+ &slv_qhs_cpr_mmcx,
+ &slv_qhs_pimem_cfg,
+ &slv_qhs_ufs_mem1_cfg,
+ &slv_qhs_cpr_mx,
+ &slv_qhs_imem_cfg },
};
static struct qcom_icc_node mas_qhm_cnoc_dc_noc = {
.name = "mas_qhm_cnoc_dc_noc",
- .id = SC8180X_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SC8180X_SLAVE_LLCC_CFG,
- SC8180X_SLAVE_GEM_NOC_CFG }
+ .link_nodes = { &slv_qhs_llcc,
+ &slv_qhs_gemnoc },
};
static struct qcom_icc_node mas_acm_apps = {
.name = "mas_acm_apps",
- .id = SC8180X_MASTER_AMPSS_M0,
.channels = 4,
.buswidth = 64,
.num_links = 3,
- .links = { SC8180X_SLAVE_ECC,
- SC8180X_SLAVE_LLCC,
- SC8180X_SLAVE_GEM_NOC_SNOC }
+ .link_nodes = { &slv_qns_ecc,
+ &slv_qns_llcc,
+ &slv_qns_gem_noc_snoc },
};
static struct qcom_icc_node mas_acm_gpu_tcu = {
.name = "mas_acm_gpu_tcu",
- .id = SC8180X_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC8180X_SLAVE_LLCC,
- SC8180X_SLAVE_GEM_NOC_SNOC }
+ .link_nodes = { &slv_qns_llcc,
+ &slv_qns_gem_noc_snoc },
};
static struct qcom_icc_node mas_acm_sys_tcu = {
.name = "mas_acm_sys_tcu",
- .id = SC8180X_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC8180X_SLAVE_LLCC,
- SC8180X_SLAVE_GEM_NOC_SNOC }
+ .link_nodes = { &slv_qns_llcc,
+ &slv_qns_gem_noc_snoc },
};
static struct qcom_icc_node mas_qhm_gemnoc_cfg = {
.name = "mas_qhm_gemnoc_cfg",
- .id = SC8180X_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 3,
- .links = { SC8180X_SLAVE_SERVICE_GEM_NOC_1,
- SC8180X_SLAVE_SERVICE_GEM_NOC,
- SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG }
+ .link_nodes = { &slv_srvc_gemnoc1,
+ &slv_srvc_gemnoc,
+ &slv_qhs_mdsp_ms_mpu_cfg },
};
static struct qcom_icc_node mas_qnm_cmpnoc = {
.name = "mas_qnm_cmpnoc",
- .id = SC8180X_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SC8180X_SLAVE_ECC,
- SC8180X_SLAVE_LLCC,
- SC8180X_SLAVE_GEM_NOC_SNOC }
+ .link_nodes = { &slv_qns_ecc,
+ &slv_qns_llcc,
+ &slv_qns_gem_noc_snoc },
};
static struct qcom_icc_node mas_qnm_gpu = {
.name = "mas_qnm_gpu",
- .id = SC8180X_MASTER_GRAPHICS_3D,
.channels = 4,
.buswidth = 32,
.num_links = 2,
- .links = { SC8180X_SLAVE_LLCC,
- SC8180X_SLAVE_GEM_NOC_SNOC }
+ .link_nodes = { &slv_qns_llcc,
+ &slv_qns_gem_noc_snoc },
};
static struct qcom_icc_node mas_qnm_mnoc_hf = {
.name = "mas_qnm_mnoc_hf",
- .id = SC8180X_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_LLCC }
+ .link_nodes = { &slv_qns_llcc },
};
static struct qcom_icc_node mas_qnm_mnoc_sf = {
.name = "mas_qnm_mnoc_sf",
- .id = SC8180X_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SC8180X_SLAVE_LLCC,
- SC8180X_SLAVE_GEM_NOC_SNOC }
+ .link_nodes = { &slv_qns_llcc,
+ &slv_qns_gem_noc_snoc },
};
static struct qcom_icc_node mas_qnm_pcie = {
.name = "mas_qnm_pcie",
- .id = SC8180X_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SC8180X_SLAVE_LLCC,
- SC8180X_SLAVE_GEM_NOC_SNOC }
+ .link_nodes = { &slv_qns_llcc,
+ &slv_qns_gem_noc_snoc },
};
static struct qcom_icc_node mas_qnm_snoc_gc = {
.name = "mas_qnm_snoc_gc",
- .id = SC8180X_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_SLAVE_LLCC }
+ .link_nodes = { &slv_qns_llcc },
};
static struct qcom_icc_node mas_qnm_snoc_sf = {
.name = "mas_qnm_snoc_sf",
- .id = SC8180X_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_LLCC }
+ .link_nodes = { &slv_qns_llcc },
};
static struct qcom_icc_node mas_qxm_ecc = {
.name = "mas_qxm_ecc",
- .id = SC8180X_MASTER_ECC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_LLCC }
+ .link_nodes = { &slv_qns_llcc },
};
static struct qcom_icc_node mas_llcc_mc = {
.name = "mas_llcc_mc",
- .id = SC8180X_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_SLAVE_EBI_CH0 }
+ .link_nodes = { &slv_ebi },
};
static struct qcom_icc_node mas_qhm_mnoc_cfg = {
.name = "mas_qhm_mnoc_cfg",
- .id = SC8180X_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_SLAVE_SERVICE_MNOC }
+ .link_nodes = { &slv_srvc_mnoc },
};
static struct qcom_icc_node mas_qxm_camnoc_hf0 = {
.name = "mas_qxm_camnoc_hf0",
- .id = SC8180X_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+ .link_nodes = { &slv_qns_mem_noc_hf },
};
static struct qcom_icc_node mas_qxm_camnoc_hf1 = {
.name = "mas_qxm_camnoc_hf1",
- .id = SC8180X_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+ .link_nodes = { &slv_qns_mem_noc_hf },
};
static struct qcom_icc_node mas_qxm_camnoc_sf = {
.name = "mas_qxm_camnoc_sf",
- .id = SC8180X_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+ .link_nodes = { &slv_qns2_mem_noc },
};
static struct qcom_icc_node mas_qxm_mdp0 = {
.name = "mas_qxm_mdp0",
- .id = SC8180X_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+ .link_nodes = { &slv_qns_mem_noc_hf },
};
static struct qcom_icc_node mas_qxm_mdp1 = {
.name = "mas_qxm_mdp1",
- .id = SC8180X_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_HF_MEM_NOC }
+ .link_nodes = { &slv_qns_mem_noc_hf },
};
static struct qcom_icc_node mas_qxm_rot = {
.name = "mas_qxm_rot",
- .id = SC8180X_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+ .link_nodes = { &slv_qns2_mem_noc },
};
static struct qcom_icc_node mas_qxm_venus0 = {
.name = "mas_qxm_venus0",
- .id = SC8180X_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+ .link_nodes = { &slv_qns2_mem_noc },
};
static struct qcom_icc_node mas_qxm_venus1 = {
.name = "mas_qxm_venus1",
- .id = SC8180X_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+ .link_nodes = { &slv_qns2_mem_noc },
};
static struct qcom_icc_node mas_qxm_venus_arm9 = {
.name = "mas_qxm_venus_arm9",
- .id = SC8180X_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_SLAVE_MNOC_SF_MEM_NOC }
+ .link_nodes = { &slv_qns2_mem_noc },
};
static struct qcom_icc_node mas_qhm_snoc_cfg = {
.name = "mas_qhm_snoc_cfg",
- .id = SC8180X_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_SLAVE_SERVICE_SNOC }
+ .link_nodes = { &slv_srvc_snoc },
};
static struct qcom_icc_node mas_qnm_aggre1_noc = {
.name = "mas_qnm_aggre1_noc",
- .id = SC8180X_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 32,
.num_links = 6,
- .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
- SC8180X_SLAVE_PIMEM,
- SC8180X_SLAVE_OCIMEM,
- SC8180X_SLAVE_APPSS,
- SC8180X_SNOC_CNOC_SLV,
- SC8180X_SLAVE_QDSS_STM }
+ .link_nodes = { &slv_qns_gemnoc_sf,
+ &slv_qxs_pimem,
+ &slv_qxs_imem,
+ &slv_qhs_apss,
+ &slv_qns_cnoc,
+ &slv_xs_qdss_stm },
};
static struct qcom_icc_node mas_qnm_aggre2_noc = {
.name = "mas_qnm_aggre2_noc",
- .id = SC8180X_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 11,
- .links = { SC8180X_SLAVE_SNOC_GEM_NOC_SF,
- SC8180X_SLAVE_PIMEM,
- SC8180X_SLAVE_PCIE_3,
- SC8180X_SLAVE_OCIMEM,
- SC8180X_SLAVE_APPSS,
- SC8180X_SLAVE_PCIE_2,
- SC8180X_SNOC_CNOC_SLV,
- SC8180X_SLAVE_PCIE_0,
- SC8180X_SLAVE_PCIE_1,
- SC8180X_SLAVE_TCU,
- SC8180X_SLAVE_QDSS_STM }
+ .link_nodes = { &slv_qns_gemnoc_sf,
+ &slv_qxs_pimem,
+ &slv_xs_pcie_3,
+ &slv_qxs_imem,
+ &slv_qhs_apss,
+ &slv_xs_pcie_2,
+ &slv_qns_cnoc,
+ &slv_xs_pcie_0,
+ &slv_xs_pcie_1,
+ &slv_xs_sys_tcu_cfg,
+ &slv_xs_qdss_stm },
};
static struct qcom_icc_node mas_qnm_gemnoc = {
.name = "mas_qnm_gemnoc",
- .id = SC8180X_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
- .links = { SC8180X_SLAVE_PIMEM,
- SC8180X_SLAVE_OCIMEM,
- SC8180X_SLAVE_APPSS,
- SC8180X_SNOC_CNOC_SLV,
- SC8180X_SLAVE_TCU,
- SC8180X_SLAVE_QDSS_STM }
+ .link_nodes = { &slv_qxs_pimem,
+ &slv_qxs_imem,
+ &slv_qhs_apss,
+ &slv_qns_cnoc,
+ &slv_xs_sys_tcu_cfg,
+ &slv_xs_qdss_stm },
};
static struct qcom_icc_node mas_qxm_pimem = {
.name = "mas_qxm_pimem",
- .id = SC8180X_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
- SC8180X_SLAVE_OCIMEM }
+ .link_nodes = { &slv_qns_gemnoc_gc,
+ &slv_qxs_imem },
};
static struct qcom_icc_node mas_xm_gic = {
.name = "mas_xm_gic",
- .id = SC8180X_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC8180X_SLAVE_SNOC_GEM_NOC_GC,
- SC8180X_SLAVE_OCIMEM }
+ .link_nodes = { &slv_qns_gemnoc_gc,
+ &slv_qxs_imem },
};
static struct qcom_icc_node mas_qup_core_0 = {
.name = "mas_qup_core_0",
- .id = SC8180X_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_SLAVE_QUP_CORE_0 }
+ .link_nodes = { &slv_qup_core_0 },
};
static struct qcom_icc_node mas_qup_core_1 = {
.name = "mas_qup_core_1",
- .id = SC8180X_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_SLAVE_QUP_CORE_1 }
+ .link_nodes = { &slv_qup_core_1 },
};
static struct qcom_icc_node mas_qup_core_2 = {
.name = "mas_qup_core_2",
- .id = SC8180X_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_SLAVE_QUP_CORE_2 }
+ .link_nodes = { &slv_qup_core_2 },
};
static struct qcom_icc_node slv_qns_a1noc_snoc = {
.name = "slv_qns_a1noc_snoc",
- .id = SC8180X_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_A1NOC_SNOC_MAS }
+ .link_nodes = { &mas_qnm_aggre1_noc },
};
static struct qcom_icc_node slv_srvc_aggre1_noc = {
.name = "slv_srvc_aggre1_noc",
- .id = SC8180X_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qns_a2noc_snoc = {
.name = "slv_qns_a2noc_snoc",
- .id = SC8180X_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8180X_A2NOC_SNOC_MAS }
+ .link_nodes = { &mas_qnm_aggre2_noc },
};
static struct qcom_icc_node slv_qns_pcie_mem_noc = {
.name = "slv_qns_pcie_mem_noc",
- .id = SC8180X_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_MASTER_GEM_NOC_PCIE_SNOC }
+ .link_nodes = { &mas_qnm_pcie },
};
static struct qcom_icc_node slv_srvc_aggre2_noc = {
.name = "slv_srvc_aggre2_noc",
- .id = SC8180X_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qns_camnoc_uncomp = {
.name = "slv_qns_camnoc_uncomp",
- .id = SC8180X_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32
};
static struct qcom_icc_node slv_qns_cdsp_mem_noc = {
.name = "slv_qns_cdsp_mem_noc",
- .id = SC8180X_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_MASTER_COMPUTE_NOC }
+ .link_nodes = { &mas_qnm_cmpnoc },
};
static struct qcom_icc_node slv_qhs_a1_noc_cfg = {
.name = "slv_qhs_a1_noc_cfg",
- .id = SC8180X_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_MASTER_A1NOC_CFG }
+ .link_nodes = { &mas_qhm_a1noc_cfg },
};
static struct qcom_icc_node slv_qhs_a2_noc_cfg = {
.name = "slv_qhs_a2_noc_cfg",
- .id = SC8180X_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_MASTER_A2NOC_CFG }
+ .link_nodes = { &mas_qhm_a2noc_cfg },
};
static struct qcom_icc_node slv_qhs_ahb2phy_refgen_center = {
.name = "slv_qhs_ahb2phy_refgen_center",
- .id = SC8180X_SLAVE_AHB2PHY_CENTER,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ahb2phy_refgen_east = {
.name = "slv_qhs_ahb2phy_refgen_east",
- .id = SC8180X_SLAVE_AHB2PHY_EAST,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ahb2phy_refgen_west = {
.name = "slv_qhs_ahb2phy_refgen_west",
- .id = SC8180X_SLAVE_AHB2PHY_WEST,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ahb2phy_south = {
.name = "slv_qhs_ahb2phy_south",
- .id = SC8180X_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_aop = {
.name = "slv_qhs_aop",
- .id = SC8180X_SLAVE_AOP,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_aoss = {
.name = "slv_qhs_aoss",
- .id = SC8180X_SLAVE_AOSS,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_camera_cfg = {
.name = "slv_qhs_camera_cfg",
- .id = SC8180X_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_clk_ctl = {
.name = "slv_qhs_clk_ctl",
- .id = SC8180X_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_compute_dsp = {
.name = "slv_qhs_compute_dsp",
- .id = SC8180X_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_cpr_cx = {
.name = "slv_qhs_cpr_cx",
- .id = SC8180X_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_cpr_mmcx = {
.name = "slv_qhs_cpr_mmcx",
- .id = SC8180X_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_cpr_mx = {
.name = "slv_qhs_cpr_mx",
- .id = SC8180X_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_crypto0_cfg = {
.name = "slv_qhs_crypto0_cfg",
- .id = SC8180X_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ddrss_cfg = {
.name = "slv_qhs_ddrss_cfg",
- .id = SC8180X_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_MASTER_CNOC_DC_NOC }
+ .link_nodes = { &mas_qhm_cnoc_dc_noc },
};
static struct qcom_icc_node slv_qhs_display_cfg = {
.name = "slv_qhs_display_cfg",
- .id = SC8180X_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_emac_cfg = {
.name = "slv_qhs_emac_cfg",
- .id = SC8180X_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_glm = {
.name = "slv_qhs_glm",
- .id = SC8180X_SLAVE_GLM,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_gpuss_cfg = {
.name = "slv_qhs_gpuss_cfg",
- .id = SC8180X_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_qhs_imem_cfg = {
.name = "slv_qhs_imem_cfg",
- .id = SC8180X_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ipa = {
.name = "slv_qhs_ipa",
- .id = SC8180X_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_mnoc_cfg = {
.name = "slv_qhs_mnoc_cfg",
- .id = SC8180X_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_MASTER_CNOC_MNOC_CFG }
+ .link_nodes = { &mas_qhm_mnoc_cfg },
};
static struct qcom_icc_node slv_qhs_npu_cfg = {
.name = "slv_qhs_npu_cfg",
- .id = SC8180X_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pcie0_cfg = {
.name = "slv_qhs_pcie0_cfg",
- .id = SC8180X_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pcie1_cfg = {
.name = "slv_qhs_pcie1_cfg",
- .id = SC8180X_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pcie2_cfg = {
.name = "slv_qhs_pcie2_cfg",
- .id = SC8180X_SLAVE_PCIE_2_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pcie3_cfg = {
.name = "slv_qhs_pcie3_cfg",
- .id = SC8180X_SLAVE_PCIE_3_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pdm = {
.name = "slv_qhs_pdm",
- .id = SC8180X_SLAVE_PDM,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_pimem_cfg = {
.name = "slv_qhs_pimem_cfg",
- .id = SC8180X_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_prng = {
.name = "slv_qhs_prng",
- .id = SC8180X_SLAVE_PRNG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qdss_cfg = {
.name = "slv_qhs_qdss_cfg",
- .id = SC8180X_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qspi_0 = {
.name = "slv_qhs_qspi_0",
- .id = SC8180X_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qspi_1 = {
.name = "slv_qhs_qspi_1",
- .id = SC8180X_SLAVE_QSPI_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qupv3_east0 = {
.name = "slv_qhs_qupv3_east0",
- .id = SC8180X_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qupv3_east1 = {
.name = "slv_qhs_qupv3_east1",
- .id = SC8180X_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_qupv3_west = {
.name = "slv_qhs_qupv3_west",
- .id = SC8180X_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_sdc2 = {
.name = "slv_qhs_sdc2",
- .id = SC8180X_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_sdc4 = {
.name = "slv_qhs_sdc4",
- .id = SC8180X_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_security = {
.name = "slv_qhs_security",
- .id = SC8180X_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_snoc_cfg = {
.name = "slv_qhs_snoc_cfg",
- .id = SC8180X_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_MASTER_SNOC_CFG }
+ .link_nodes = { &mas_qhm_snoc_cfg },
};
static struct qcom_icc_node slv_qhs_spss_cfg = {
.name = "slv_qhs_spss_cfg",
- .id = SC8180X_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tcsr = {
.name = "slv_qhs_tcsr",
- .id = SC8180X_SLAVE_TCSR,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tlmm_east = {
.name = "slv_qhs_tlmm_east",
- .id = SC8180X_SLAVE_TLMM_EAST,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tlmm_south = {
.name = "slv_qhs_tlmm_south",
- .id = SC8180X_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tlmm_west = {
.name = "slv_qhs_tlmm_west",
- .id = SC8180X_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_tsif = {
.name = "slv_qhs_tsif",
- .id = SC8180X_SLAVE_TSIF,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ufs_card_cfg = {
.name = "slv_qhs_ufs_card_cfg",
- .id = SC8180X_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ufs_mem0_cfg = {
.name = "slv_qhs_ufs_mem0_cfg",
- .id = SC8180X_SLAVE_UFS_MEM_0_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_ufs_mem1_cfg = {
.name = "slv_qhs_ufs_mem1_cfg",
- .id = SC8180X_SLAVE_UFS_MEM_1_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_usb3_0 = {
.name = "slv_qhs_usb3_0",
- .id = SC8180X_SLAVE_USB3,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_usb3_1 = {
.name = "slv_qhs_usb3_1",
- .id = SC8180X_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_usb3_2 = {
.name = "slv_qhs_usb3_2",
- .id = SC8180X_SLAVE_USB3_2,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_venus_cfg = {
.name = "slv_qhs_venus_cfg",
- .id = SC8180X_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_vsense_ctrl_cfg = {
.name = "slv_qhs_vsense_ctrl_cfg",
- .id = SC8180X_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_srvc_cnoc = {
.name = "slv_srvc_cnoc",
- .id = SC8180X_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_gemnoc = {
.name = "slv_qhs_gemnoc",
- .id = SC8180X_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8180X_MASTER_GEM_NOC_CFG }
+ .link_nodes = { &mas_qhm_gemnoc_cfg },
};
static struct qcom_icc_node slv_qhs_llcc = {
.name = "slv_qhs_llcc",
- .id = SC8180X_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_mdsp_ms_mpu_cfg = {
.name = "slv_qhs_mdsp_ms_mpu_cfg",
- .id = SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qns_ecc = {
.name = "slv_qns_ecc",
- .id = SC8180X_SLAVE_ECC,
.channels = 1,
.buswidth = 32
};
static struct qcom_icc_node slv_qns_gem_noc_snoc = {
.name = "slv_qns_gem_noc_snoc",
- .id = SC8180X_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_MASTER_GEM_NOC_SNOC }
+ .link_nodes = { &mas_qnm_gemnoc },
};
static struct qcom_icc_node slv_qns_llcc = {
.name = "slv_qns_llcc",
- .id = SC8180X_SLAVE_LLCC,
.channels = 8,
.buswidth = 16,
.num_links = 1,
- .links = { SC8180X_MASTER_LLCC }
+ .link_nodes = { &mas_llcc_mc },
};
static struct qcom_icc_node slv_srvc_gemnoc = {
.name = "slv_srvc_gemnoc",
- .id = SC8180X_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_srvc_gemnoc1 = {
.name = "slv_srvc_gemnoc1",
- .id = SC8180X_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_ebi = {
.name = "slv_ebi",
- .id = SC8180X_SLAVE_EBI_CH0,
.channels = 8,
.buswidth = 4
};
static struct qcom_icc_node slv_qns2_mem_noc = {
.name = "slv_qns2_mem_noc",
- .id = SC8180X_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_MASTER_MNOC_SF_MEM_NOC }
+ .link_nodes = { &mas_qnm_mnoc_sf },
};
static struct qcom_icc_node slv_qns_mem_noc_hf = {
.name = "slv_qns_mem_noc_hf",
- .id = SC8180X_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_MASTER_MNOC_HF_MEM_NOC }
+ .link_nodes = { &mas_qnm_mnoc_hf },
};
static struct qcom_icc_node slv_srvc_mnoc = {
.name = "slv_srvc_mnoc",
- .id = SC8180X_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qhs_apss = {
.name = "slv_qhs_apss",
- .id = SC8180X_SLAVE_APPSS,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_qns_cnoc = {
.name = "slv_qns_cnoc",
- .id = SC8180X_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_SNOC_CNOC_MAS }
+ .link_nodes = { &mas_qnm_snoc },
};
static struct qcom_icc_node slv_qns_gemnoc_gc = {
.name = "slv_qns_gemnoc_gc",
- .id = SC8180X_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8180X_MASTER_SNOC_GC_MEM_NOC }
+ .link_nodes = { &mas_qnm_snoc_gc },
};
static struct qcom_icc_node slv_qns_gemnoc_sf = {
.name = "slv_qns_gemnoc_sf",
- .id = SC8180X_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8180X_MASTER_SNOC_SF_MEM_NOC }
+ .link_nodes = { &mas_qnm_snoc_sf },
};
static struct qcom_icc_node slv_qxs_imem = {
.name = "slv_qxs_imem",
- .id = SC8180X_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_qxs_pimem = {
.name = "slv_qxs_pimem",
- .id = SC8180X_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_srvc_snoc = {
.name = "slv_srvc_snoc",
- .id = SC8180X_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_xs_pcie_0 = {
.name = "slv_xs_pcie_0",
- .id = SC8180X_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_xs_pcie_1 = {
.name = "slv_xs_pcie_1",
- .id = SC8180X_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_xs_pcie_2 = {
.name = "slv_xs_pcie_2",
- .id = SC8180X_SLAVE_PCIE_2,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_xs_pcie_3 = {
.name = "slv_xs_pcie_3",
- .id = SC8180X_SLAVE_PCIE_3,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_xs_qdss_stm = {
.name = "slv_xs_qdss_stm",
- .id = SC8180X_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_xs_sys_tcu_cfg = {
.name = "slv_xs_sys_tcu_cfg",
- .id = SC8180X_SLAVE_TCU,
.channels = 1,
.buswidth = 8
};
static struct qcom_icc_node slv_qup_core_0 = {
.name = "slv_qup_core_0",
- .id = SC8180X_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qup_core_1 = {
.name = "slv_qup_core_1",
- .id = SC8180X_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4
};
static struct qcom_icc_node slv_qup_core_2 = {
.name = "slv_qup_core_2",
- .id = SC8180X_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4
};
diff --git a/drivers/interconnect/qcom/sc8180x.h b/drivers/interconnect/qcom/sc8180x.h
deleted file mode 100644
index f8d90598335a..000000000000
--- a/drivers/interconnect/qcom/sc8180x.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Qualcomm #define SC8180X interconnect IDs
- *
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SC8180X_H
-#define __DRIVERS_INTERCONNECT_QCOM_SC8180X_H
-
-#define SC8180X_MASTER_A1NOC_CFG 1
-#define SC8180X_MASTER_UFS_CARD 2
-#define SC8180X_MASTER_UFS_GEN4 3
-#define SC8180X_MASTER_UFS_MEM 4
-#define SC8180X_MASTER_USB3 5
-#define SC8180X_MASTER_USB3_1 6
-#define SC8180X_MASTER_USB3_2 7
-#define SC8180X_MASTER_A2NOC_CFG 8
-#define SC8180X_MASTER_QDSS_BAM 9
-#define SC8180X_MASTER_QSPI_0 10
-#define SC8180X_MASTER_QSPI_1 11
-#define SC8180X_MASTER_QUP_0 12
-#define SC8180X_MASTER_QUP_1 13
-#define SC8180X_MASTER_QUP_2 14
-#define SC8180X_MASTER_SENSORS_AHB 15
-#define SC8180X_MASTER_CRYPTO_CORE_0 16
-#define SC8180X_MASTER_IPA 17
-#define SC8180X_MASTER_EMAC 18
-#define SC8180X_MASTER_PCIE 19
-#define SC8180X_MASTER_PCIE_1 20
-#define SC8180X_MASTER_PCIE_2 21
-#define SC8180X_MASTER_PCIE_3 22
-#define SC8180X_MASTER_QDSS_ETR 23
-#define SC8180X_MASTER_SDCC_2 24
-#define SC8180X_MASTER_SDCC_4 25
-#define SC8180X_MASTER_CAMNOC_HF0_UNCOMP 26
-#define SC8180X_MASTER_CAMNOC_HF1_UNCOMP 27
-#define SC8180X_MASTER_CAMNOC_SF_UNCOMP 28
-#define SC8180X_MASTER_NPU 29
-#define SC8180X_SNOC_CNOC_MAS 30
-#define SC8180X_MASTER_CNOC_DC_NOC 31
-#define SC8180X_MASTER_AMPSS_M0 32
-#define SC8180X_MASTER_GPU_TCU 33
-#define SC8180X_MASTER_SYS_TCU 34
-#define SC8180X_MASTER_GEM_NOC_CFG 35
-#define SC8180X_MASTER_COMPUTE_NOC 36
-#define SC8180X_MASTER_GRAPHICS_3D 37
-#define SC8180X_MASTER_MNOC_HF_MEM_NOC 38
-#define SC8180X_MASTER_MNOC_SF_MEM_NOC 39
-#define SC8180X_MASTER_GEM_NOC_PCIE_SNOC 40
-#define SC8180X_MASTER_SNOC_GC_MEM_NOC 41
-#define SC8180X_MASTER_SNOC_SF_MEM_NOC 42
-#define SC8180X_MASTER_ECC 43
-/* 44 was used by MASTER_IPA_CORE, now represented as RPMh clock */
-#define SC8180X_MASTER_LLCC 45
-#define SC8180X_MASTER_CNOC_MNOC_CFG 46
-#define SC8180X_MASTER_CAMNOC_HF0 47
-#define SC8180X_MASTER_CAMNOC_HF1 48
-#define SC8180X_MASTER_CAMNOC_SF 49
-#define SC8180X_MASTER_MDP_PORT0 50
-#define SC8180X_MASTER_MDP_PORT1 51
-#define SC8180X_MASTER_ROTATOR 52
-#define SC8180X_MASTER_VIDEO_P0 53
-#define SC8180X_MASTER_VIDEO_P1 54
-#define SC8180X_MASTER_VIDEO_PROC 55
-#define SC8180X_MASTER_SNOC_CFG 56
-#define SC8180X_A1NOC_SNOC_MAS 57
-#define SC8180X_A2NOC_SNOC_MAS 58
-#define SC8180X_MASTER_GEM_NOC_SNOC 59
-#define SC8180X_MASTER_PIMEM 60
-#define SC8180X_MASTER_GIC 61
-#define SC8180X_MASTER_MNOC_HF_MEM_NOC_DISPLAY 62
-#define SC8180X_MASTER_MNOC_SF_MEM_NOC_DISPLAY 63
-#define SC8180X_MASTER_LLCC_DISPLAY 64
-#define SC8180X_MASTER_MDP_PORT0_DISPLAY 65
-#define SC8180X_MASTER_MDP_PORT1_DISPLAY 66
-#define SC8180X_MASTER_ROTATOR_DISPLAY 67
-#define SC8180X_A1NOC_SNOC_SLV 68
-#define SC8180X_SLAVE_SERVICE_A1NOC 69
-#define SC8180X_A2NOC_SNOC_SLV 70
-#define SC8180X_SLAVE_ANOC_PCIE_GEM_NOC 71
-#define SC8180X_SLAVE_SERVICE_A2NOC 72
-#define SC8180X_SLAVE_CAMNOC_UNCOMP 73
-#define SC8180X_SLAVE_CDSP_MEM_NOC 74
-#define SC8180X_SLAVE_A1NOC_CFG 75
-#define SC8180X_SLAVE_A2NOC_CFG 76
-#define SC8180X_SLAVE_AHB2PHY_CENTER 77
-#define SC8180X_SLAVE_AHB2PHY_EAST 78
-#define SC8180X_SLAVE_AHB2PHY_WEST 79
-#define SC8180X_SLAVE_AHB2PHY_SOUTH 80
-#define SC8180X_SLAVE_AOP 81
-#define SC8180X_SLAVE_AOSS 82
-#define SC8180X_SLAVE_CAMERA_CFG 83
-#define SC8180X_SLAVE_CLK_CTL 84
-#define SC8180X_SLAVE_CDSP_CFG 85
-#define SC8180X_SLAVE_RBCPR_CX_CFG 86
-#define SC8180X_SLAVE_RBCPR_MMCX_CFG 87
-#define SC8180X_SLAVE_RBCPR_MX_CFG 88
-#define SC8180X_SLAVE_CRYPTO_0_CFG 89
-#define SC8180X_SLAVE_CNOC_DDRSS 90
-#define SC8180X_SLAVE_DISPLAY_CFG 91
-#define SC8180X_SLAVE_EMAC_CFG 92
-#define SC8180X_SLAVE_GLM 93
-#define SC8180X_SLAVE_GRAPHICS_3D_CFG 94
-#define SC8180X_SLAVE_IMEM_CFG 95
-#define SC8180X_SLAVE_IPA_CFG 96
-#define SC8180X_SLAVE_CNOC_MNOC_CFG 97
-#define SC8180X_SLAVE_NPU_CFG 98
-#define SC8180X_SLAVE_PCIE_0_CFG 99
-#define SC8180X_SLAVE_PCIE_1_CFG 100
-#define SC8180X_SLAVE_PCIE_2_CFG 101
-#define SC8180X_SLAVE_PCIE_3_CFG 102
-#define SC8180X_SLAVE_PDM 103
-#define SC8180X_SLAVE_PIMEM_CFG 104
-#define SC8180X_SLAVE_PRNG 105
-#define SC8180X_SLAVE_QDSS_CFG 106
-#define SC8180X_SLAVE_QSPI_0 107
-#define SC8180X_SLAVE_QSPI_1 108
-#define SC8180X_SLAVE_QUP_1 109
-#define SC8180X_SLAVE_QUP_2 110
-#define SC8180X_SLAVE_QUP_0 111
-#define SC8180X_SLAVE_SDCC_2 112
-#define SC8180X_SLAVE_SDCC_4 113
-#define SC8180X_SLAVE_SECURITY 114
-#define SC8180X_SLAVE_SNOC_CFG 115
-#define SC8180X_SLAVE_SPSS_CFG 116
-#define SC8180X_SLAVE_TCSR 117
-#define SC8180X_SLAVE_TLMM_EAST 118
-#define SC8180X_SLAVE_TLMM_SOUTH 119
-#define SC8180X_SLAVE_TLMM_WEST 120
-#define SC8180X_SLAVE_TSIF 121
-#define SC8180X_SLAVE_UFS_CARD_CFG 122
-#define SC8180X_SLAVE_UFS_MEM_0_CFG 123
-#define SC8180X_SLAVE_UFS_MEM_1_CFG 124
-#define SC8180X_SLAVE_USB3 125
-#define SC8180X_SLAVE_USB3_1 126
-#define SC8180X_SLAVE_USB3_2 127
-#define SC8180X_SLAVE_VENUS_CFG 128
-#define SC8180X_SLAVE_VSENSE_CTRL_CFG 129
-#define SC8180X_SLAVE_SERVICE_CNOC 130
-#define SC8180X_SLAVE_GEM_NOC_CFG 131
-#define SC8180X_SLAVE_LLCC_CFG 132
-#define SC8180X_SLAVE_MSS_PROC_MS_MPU_CFG 133
-#define SC8180X_SLAVE_ECC 134
-#define SC8180X_SLAVE_GEM_NOC_SNOC 135
-#define SC8180X_SLAVE_LLCC 136
-#define SC8180X_SLAVE_SERVICE_GEM_NOC 137
-#define SC8180X_SLAVE_SERVICE_GEM_NOC_1 138
-/* 139 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
-#define SC8180X_SLAVE_EBI_CH0 140
-#define SC8180X_SLAVE_MNOC_SF_MEM_NOC 141
-#define SC8180X_SLAVE_MNOC_HF_MEM_NOC 142
-#define SC8180X_SLAVE_SERVICE_MNOC 143
-#define SC8180X_SLAVE_APPSS 144
-#define SC8180X_SNOC_CNOC_SLV 145
-#define SC8180X_SLAVE_SNOC_GEM_NOC_GC 146
-#define SC8180X_SLAVE_SNOC_GEM_NOC_SF 147
-#define SC8180X_SLAVE_OCIMEM 148
-#define SC8180X_SLAVE_PIMEM 149
-#define SC8180X_SLAVE_SERVICE_SNOC 150
-#define SC8180X_SLAVE_PCIE_0 151
-#define SC8180X_SLAVE_PCIE_1 152
-#define SC8180X_SLAVE_PCIE_2 153
-#define SC8180X_SLAVE_PCIE_3 154
-#define SC8180X_SLAVE_QDSS_STM 155
-#define SC8180X_SLAVE_TCU 156
-#define SC8180X_SLAVE_LLCC_DISPLAY 157
-#define SC8180X_SLAVE_EBI_CH0_DISPLAY 158
-#define SC8180X_SLAVE_MNOC_SF_MEM_NOC_DISPLAY 159
-#define SC8180X_SLAVE_MNOC_HF_MEM_NOC_DISPLAY 160
-
-#define SC8180X_MASTER_QUP_CORE_0 163
-#define SC8180X_MASTER_QUP_CORE_1 164
-#define SC8180X_MASTER_QUP_CORE_2 165
-#define SC8180X_SLAVE_QUP_CORE_0 166
-#define SC8180X_SLAVE_QUP_CORE_1 167
-#define SC8180X_SLAVE_QUP_CORE_2 168
-
-#endif
diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
index c646cdf8a19b..ed2161da37bf 100644
--- a/drivers/interconnect/qcom/sc8280xp.c
+++ b/drivers/interconnect/qcom/sc8280xp.c
@@ -14,1699 +14,1682 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sc8280xp.h"
+
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qnm_a1noc_cfg;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_emac_1;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node xm_usb3_1;
+static struct qcom_icc_node xm_usb3_mp;
+static struct qcom_icc_node xm_usb4_host0;
+static struct qcom_icc_node xm_usb4_host1;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qnm_a2noc_cfg;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_sensorss_q6;
+static struct qcom_icc_node qxm_sp;
+static struct qcom_icc_node xm_emac_0;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node xm_pcie3_2a;
+static struct qcom_icc_node xm_pcie3_2b;
+static struct qcom_icc_node xm_pcie3_3a;
+static struct qcom_icc_node xm_pcie3_3b;
+static struct qcom_icc_node xm_pcie3_4;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_ufs_card;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup2_core_master;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node qnm_cnoc_dc_noc;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_pcie_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_cmpnoc0;
+static struct qcom_icc_node qnm_cmpnoc1;
+static struct qcom_icc_node qnm_gemnoc_cfg;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qhm_config_noc;
+static struct qcom_icc_node qxm_lpass_dsp;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_mdp0_0;
+static struct qcom_icc_node qnm_mdp0_1;
+static struct qcom_icc_node qnm_mdp1_0;
+static struct qcom_icc_node qnm_mdp1_1;
+static struct qcom_icc_node qnm_mnoc_cfg;
+static struct qcom_icc_node qnm_rot_0;
+static struct qcom_icc_node qnm_rot_1;
+static struct qcom_icc_node qnm_video0;
+static struct qcom_icc_node qnm_video1;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qxm_camnoc_icp;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qhm_nsp_noc_config;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node qhm_nspb_noc_config;
+static struct qcom_icc_node qxm_nspb;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_aggre_usb_noc;
+static struct qcom_icc_node qnm_lpass_noc;
+static struct qcom_icc_node qnm_snoc_cfg;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_aggre_usb_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qns_pcie_gem_noc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup2_core_slave;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_ahb2phy2;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute0_cfg;
+static struct qcom_icc_node qhs_compute1_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_cpr_nspcx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_dcc_cfg;
+static struct qcom_icc_node qhs_display0_cfg;
+static struct qcom_icc_node qhs_display1_cfg;
+static struct qcom_icc_node qhs_emac0_cfg;
+static struct qcom_icc_node qhs_emac1_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_hwkm;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_lpass_cfg;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_mxc_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pcie2a_cfg;
+static struct qcom_icc_node qhs_pcie2b_cfg;
+static struct qcom_icc_node qhs_pcie3a_cfg;
+static struct qcom_icc_node qhs_pcie3b_cfg;
+static struct qcom_icc_node qhs_pcie4_cfg;
+static struct qcom_icc_node qhs_pcie_rsc_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_pka_wrapper_cfg;
+static struct qcom_icc_node qhs_pmu_wrapper_cfg;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_security;
+static struct qcom_icc_node qhs_smmuv3_cfg;
+static struct qcom_icc_node qhs_smss_cfg;
+static struct qcom_icc_node qhs_spss_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_ufs_card_cfg;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_usb3_1;
+static struct qcom_icc_node qhs_usb3_mp;
+static struct qcom_icc_node qhs_usb4_host_0;
+static struct qcom_icc_node qhs_usb4_host_1;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_r_cfg;
+static struct qcom_icc_node qns_a1_noc_cfg;
+static struct qcom_icc_node qns_a2_noc_cfg;
+static struct qcom_icc_node qns_anoc_pcie_bridge_cfg;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qns_mnoc_cfg;
+static struct qcom_icc_node qns_snoc_cfg;
+static struct qcom_icc_node qns_snoc_sf_bridge_cfg;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_pcie_2a;
+static struct qcom_icc_node xs_pcie_2b;
+static struct qcom_icc_node xs_pcie_3a;
+static struct qcom_icc_node xs_pcie_3b;
+static struct qcom_icc_node xs_pcie_4;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_smss;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qns_gemnoc;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node srvc_even_gemnoc;
+static struct qcom_icc_node srvc_odd_gemnoc;
+static struct qcom_icc_node srvc_sys_gemnoc;
+static struct qcom_icc_node qhs_lpass_core;
+static struct qcom_icc_node qhs_lpass_lpi;
+static struct qcom_icc_node qhs_lpass_mpu;
+static struct qcom_icc_node qhs_lpass_top;
+static struct qcom_icc_node qns_sysnoc;
+static struct qcom_icc_node srvc_niu_aml_noc;
+static struct qcom_icc_node srvc_niu_lpass_agnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node qxs_nsp_xfr;
+static struct qcom_icc_node service_nsp_noc;
+static struct qcom_icc_node qns_nspb_gemnoc;
+static struct qcom_icc_node qxs_nspb_xfr;
+static struct qcom_icc_node service_nspb_noc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node srvc_snoc;
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SC8280XP_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SC8280XP_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SC8280XP_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qnm_a1noc_cfg = {
.name = "qnm_a1noc_cfg",
- .id = SC8280XP_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SC8280XP_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emac_1 = {
.name = "xm_emac_1",
- .id = SC8280XP_MASTER_EMAC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SC8280XP_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SC8280XP_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SC8280XP_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+ .link_nodes = { &qns_aggre_usb_snoc },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
- .id = SC8280XP_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+ .link_nodes = { &qns_aggre_usb_snoc },
};
static struct qcom_icc_node xm_usb3_mp = {
.name = "xm_usb3_mp",
- .id = SC8280XP_MASTER_USB3_MP,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+ .link_nodes = { &qns_aggre_usb_snoc },
};
static struct qcom_icc_node xm_usb4_host0 = {
.name = "xm_usb4_host0",
- .id = SC8280XP_MASTER_USB4_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+ .link_nodes = { &qns_aggre_usb_snoc },
};
static struct qcom_icc_node xm_usb4_host1 = {
.name = "xm_usb4_host1",
- .id = SC8280XP_MASTER_USB4_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_USB_NOC_SNOC },
+ .link_nodes = { &qns_aggre_usb_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SC8280XP_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SC8280XP_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_a2noc_cfg = {
.name = "qnm_a2noc_cfg",
- .id = SC8280XP_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SC8280XP_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_sensorss_q6 = {
.name = "qxm_sensorss_q6",
- .id = SC8280XP_MASTER_SENSORS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
- .id = SC8280XP_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_emac_0 = {
.name = "xm_emac_0",
- .id = SC8280XP_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SC8280XP_MASTER_PCIE_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SC8280XP_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gem_noc },
};
static struct qcom_icc_node xm_pcie3_2a = {
.name = "xm_pcie3_2a",
- .id = SC8280XP_MASTER_PCIE_2A,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gem_noc },
};
static struct qcom_icc_node xm_pcie3_2b = {
.name = "xm_pcie3_2b",
- .id = SC8280XP_MASTER_PCIE_2B,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gem_noc },
};
static struct qcom_icc_node xm_pcie3_3a = {
.name = "xm_pcie3_3a",
- .id = SC8280XP_MASTER_PCIE_3A,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gem_noc },
};
static struct qcom_icc_node xm_pcie3_3b = {
.name = "xm_pcie3_3b",
- .id = SC8280XP_MASTER_PCIE_3B,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gem_noc },
};
static struct qcom_icc_node xm_pcie3_4 = {
.name = "xm_pcie3_4",
- .id = SC8280XP_MASTER_PCIE_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gem_noc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SC8280XP_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SC8280XP_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
- .id = SC8280XP_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SC8280XP_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SC8280XP_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
- .id = SC8280XP_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_QUP_CORE_2 },
+ .link_nodes = { &qup2_core_slave },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SC8280XP_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 76,
- .links = { SC8280XP_SLAVE_AHB2PHY_0,
- SC8280XP_SLAVE_AHB2PHY_1,
- SC8280XP_SLAVE_AHB2PHY_2,
- SC8280XP_SLAVE_AOSS,
- SC8280XP_SLAVE_APPSS,
- SC8280XP_SLAVE_CAMERA_CFG,
- SC8280XP_SLAVE_CLK_CTL,
- SC8280XP_SLAVE_CDSP_CFG,
- SC8280XP_SLAVE_CDSP1_CFG,
- SC8280XP_SLAVE_RBCPR_CX_CFG,
- SC8280XP_SLAVE_RBCPR_MMCX_CFG,
- SC8280XP_SLAVE_RBCPR_MX_CFG,
- SC8280XP_SLAVE_CPR_NSPCX,
- SC8280XP_SLAVE_CRYPTO_0_CFG,
- SC8280XP_SLAVE_CX_RDPM,
- SC8280XP_SLAVE_DCC_CFG,
- SC8280XP_SLAVE_DISPLAY_CFG,
- SC8280XP_SLAVE_DISPLAY1_CFG,
- SC8280XP_SLAVE_EMAC_CFG,
- SC8280XP_SLAVE_EMAC1_CFG,
- SC8280XP_SLAVE_GFX3D_CFG,
- SC8280XP_SLAVE_HWKM,
- SC8280XP_SLAVE_IMEM_CFG,
- SC8280XP_SLAVE_IPA_CFG,
- SC8280XP_SLAVE_IPC_ROUTER_CFG,
- SC8280XP_SLAVE_LPASS,
- SC8280XP_SLAVE_MX_RDPM,
- SC8280XP_SLAVE_MXC_RDPM,
- SC8280XP_SLAVE_PCIE_0_CFG,
- SC8280XP_SLAVE_PCIE_1_CFG,
- SC8280XP_SLAVE_PCIE_2A_CFG,
- SC8280XP_SLAVE_PCIE_2B_CFG,
- SC8280XP_SLAVE_PCIE_3A_CFG,
- SC8280XP_SLAVE_PCIE_3B_CFG,
- SC8280XP_SLAVE_PCIE_4_CFG,
- SC8280XP_SLAVE_PCIE_RSC_CFG,
- SC8280XP_SLAVE_PDM,
- SC8280XP_SLAVE_PIMEM_CFG,
- SC8280XP_SLAVE_PKA_WRAPPER_CFG,
- SC8280XP_SLAVE_PMU_WRAPPER_CFG,
- SC8280XP_SLAVE_QDSS_CFG,
- SC8280XP_SLAVE_QSPI_0,
- SC8280XP_SLAVE_QUP_0,
- SC8280XP_SLAVE_QUP_1,
- SC8280XP_SLAVE_QUP_2,
- SC8280XP_SLAVE_SDCC_2,
- SC8280XP_SLAVE_SDCC_4,
- SC8280XP_SLAVE_SECURITY,
- SC8280XP_SLAVE_SMMUV3_CFG,
- SC8280XP_SLAVE_SMSS_CFG,
- SC8280XP_SLAVE_SPSS_CFG,
- SC8280XP_SLAVE_TCSR,
- SC8280XP_SLAVE_TLMM,
- SC8280XP_SLAVE_UFS_CARD_CFG,
- SC8280XP_SLAVE_UFS_MEM_CFG,
- SC8280XP_SLAVE_USB3_0,
- SC8280XP_SLAVE_USB3_1,
- SC8280XP_SLAVE_USB3_MP,
- SC8280XP_SLAVE_USB4_0,
- SC8280XP_SLAVE_USB4_1,
- SC8280XP_SLAVE_VENUS_CFG,
- SC8280XP_SLAVE_VSENSE_CTRL_CFG,
- SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
- SC8280XP_SLAVE_A1NOC_CFG,
- SC8280XP_SLAVE_A2NOC_CFG,
- SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
- SC8280XP_SLAVE_DDRSS_CFG,
- SC8280XP_SLAVE_CNOC_MNOC_CFG,
- SC8280XP_SLAVE_SNOC_CFG,
- SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
- SC8280XP_SLAVE_IMEM,
- SC8280XP_SLAVE_PIMEM,
- SC8280XP_SLAVE_SERVICE_CNOC,
- SC8280XP_SLAVE_QDSS_STM,
- SC8280XP_SLAVE_SMSS,
- SC8280XP_SLAVE_TCU
- },
+ .link_nodes = { &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_ahb2phy2,
+ &qhs_aoss,
+ &qhs_apss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute0_cfg,
+ &qhs_compute1_cfg,
+ &qhs_cpr_cx,
+ &qhs_cpr_mmcx,
+ &qhs_cpr_mx,
+ &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg,
+ &qhs_cx_rdpm,
+ &qhs_dcc_cfg,
+ &qhs_display0_cfg,
+ &qhs_display1_cfg,
+ &qhs_emac0_cfg,
+ &qhs_emac1_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_hwkm,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_ipc_router,
+ &qhs_lpass_cfg,
+ &qhs_mx_rdpm,
+ &qhs_mxc_rdpm,
+ &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg,
+ &qhs_pcie2a_cfg,
+ &qhs_pcie2b_cfg,
+ &qhs_pcie3a_cfg,
+ &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg,
+ &qhs_pcie_rsc_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg,
+ &qhs_qdss_cfg,
+ &qhs_qspi,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_security,
+ &qhs_smmuv3_cfg,
+ &qhs_smss_cfg,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_usb3_mp,
+ &qhs_usb4_host_0,
+ &qhs_usb4_host_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_vsense_ctrl_r_cfg,
+ &qns_a1_noc_cfg,
+ &qns_a2_noc_cfg,
+ &qns_anoc_pcie_bridge_cfg,
+ &qns_ddrss_cfg,
+ &qns_mnoc_cfg,
+ &qns_snoc_cfg,
+ &qns_snoc_sf_bridge_cfg,
+ &qxs_imem,
+ &qxs_pimem,
+ &srvc_cnoc,
+ &xs_qdss_stm,
+ &xs_smss,
+ &xs_sys_tcu_cfg,
+ NULL },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SC8280XP_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 7,
- .links = { SC8280XP_SLAVE_PCIE_0,
- SC8280XP_SLAVE_PCIE_1,
- SC8280XP_SLAVE_PCIE_2A,
- SC8280XP_SLAVE_PCIE_2B,
- SC8280XP_SLAVE_PCIE_3A,
- SC8280XP_SLAVE_PCIE_3B,
- SC8280XP_SLAVE_PCIE_4
- },
+ .link_nodes = { &xs_pcie_0,
+ &xs_pcie_1,
+ &xs_pcie_2a,
+ &xs_pcie_2b,
+ &xs_pcie_3a,
+ &xs_pcie_3b,
+ &xs_pcie_4 },
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
- .id = SC8280XP_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SC8280XP_SLAVE_LLCC_CFG,
- SC8280XP_SLAVE_GEM_NOC_CFG
- },
+ .link_nodes = { &qhs_llcc,
+ &qns_gemnoc },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SC8280XP_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node alm_pcie_tcu = {
.name = "alm_pcie_tcu",
- .id = SC8280XP_MASTER_PCIE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SC8280XP_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SC8280XP_MASTER_APPSS_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC,
- SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_cmpnoc0 = {
.name = "qnm_cmpnoc0",
- .id = SC8280XP_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_cmpnoc1 = {
.name = "qnm_cmpnoc1",
- .id = SC8280XP_MASTER_COMPUTE_NOC_1,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
- .id = SC8280XP_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 3,
- .links = { SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
- SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
- SC8280XP_SLAVE_SERVICE_GEM_NOC
- },
+ .link_nodes = { &srvc_even_gemnoc,
+ &srvc_odd_gemnoc,
+ &srvc_sys_gemnoc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SC8280XP_MASTER_GFX3D,
.channels = 4,
.buswidth = 32,
.num_links = 2,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SC8280XP_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SC8280XP_SLAVE_LLCC,
- SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SC8280XP_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SC8280XP_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SC8280XP_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SC8280XP_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SC8280XP_SLAVE_GEM_NOC_CNOC,
- SC8280XP_SLAVE_LLCC,
- SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
- .id = SC8280XP_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
- .links = { SC8280XP_SLAVE_LPASS_CORE_CFG,
- SC8280XP_SLAVE_LPASS_LPI_CFG,
- SC8280XP_SLAVE_LPASS_MPU_CFG,
- SC8280XP_SLAVE_LPASS_TOP_CFG,
- SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
- SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
- },
+ .link_nodes = { &qhs_lpass_core,
+ &qhs_lpass_lpi,
+ &qhs_lpass_mpu,
+ &qhs_lpass_top,
+ &srvc_niu_aml_noc,
+ &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node qxm_lpass_dsp = {
.name = "qxm_lpass_dsp",
- .id = SC8280XP_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 4,
- .links = { SC8280XP_SLAVE_LPASS_TOP_CFG,
- SC8280XP_SLAVE_LPASS_SNOC,
- SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
- SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC
- },
+ .link_nodes = { &qhs_lpass_top,
+ &qns_sysnoc,
+ &srvc_niu_aml_noc,
+ &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SC8280XP_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SC8280XP_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp0_0 = {
.name = "qnm_mdp0_0",
- .id = SC8280XP_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp0_1 = {
.name = "qnm_mdp0_1",
- .id = SC8280XP_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp1_0 = {
.name = "qnm_mdp1_0",
- .id = SC8280XP_MASTER_MDP_CORE1_0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mdp1_1 = {
.name = "qnm_mdp1_1",
- .id = SC8280XP_MASTER_MDP_CORE1_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
- .id = SC8280XP_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qnm_rot_0 = {
.name = "qnm_rot_0",
- .id = SC8280XP_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_rot_1 = {
.name = "qnm_rot_1",
- .id = SC8280XP_MASTER_ROTATOR_1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
- .id = SC8280XP_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
- .id = SC8280XP_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SC8280XP_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_camnoc_icp = {
.name = "qxm_camnoc_icp",
- .id = SC8280XP_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = SC8280XP_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
- .id = SC8280XP_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SERVICE_NSP_NOC },
+ .link_nodes = { &service_nsp_noc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = SC8280XP_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SC8280XP_SLAVE_CDSP_MEM_NOC,
- SC8280XP_SLAVE_NSP_XFR
- },
+ .link_nodes = { &qns_nsp_gemnoc,
+ &qxs_nsp_xfr },
};
static struct qcom_icc_node qhm_nspb_noc_config = {
.name = "qhm_nspb_noc_config",
- .id = SC8280XP_MASTER_CDSPB_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SERVICE_NSPB_NOC },
+ .link_nodes = { &service_nspb_noc },
};
static struct qcom_icc_node qxm_nspb = {
.name = "qxm_nspb",
- .id = SC8280XP_MASTER_CDSP_PROC_B,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SC8280XP_SLAVE_CDSPB_MEM_NOC,
- SC8280XP_SLAVE_NSPB_XFR
- },
+ .link_nodes = { &qns_nspb_gemnoc,
+ &qxs_nspb_xfr },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SC8280XP_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SC8280XP_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre_usb_noc = {
.name = "qnm_aggre_usb_noc",
- .id = SC8280XP_MASTER_USB_NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
- .id = SC8280XP_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
- .id = SC8280XP_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SC8280XP_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SC8280XP_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SC8280XP_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_aggre_usb_snoc = {
.name = "qns_aggre_usb_snoc",
- .id = SC8280XP_SLAVE_USB_NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_MASTER_USB_NOC_SNOC },
+ .link_nodes = { &qnm_aggre_usb_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SC8280XP_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SC8280XP_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qns_pcie_gem_noc = {
.name = "qns_pcie_gem_noc",
- .id = SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SC8280XP_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SC8280XP_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SC8280XP_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
- .id = SC8280XP_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SC8280XP_SLAVE_AHB2PHY_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SC8280XP_SLAVE_AHB2PHY_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
- .id = SC8280XP_SLAVE_AHB2PHY_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SC8280XP_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SC8280XP_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SC8280XP_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SC8280XP_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute0_cfg = {
.name = "qhs_compute0_cfg",
- .id = SC8280XP_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_CDSP_NOC_CFG },
+ .link_nodes = { &qhm_nsp_noc_config },
};
static struct qcom_icc_node qhs_compute1_cfg = {
.name = "qhs_compute1_cfg",
- .id = SC8280XP_SLAVE_CDSP1_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_CDSPB_NOC_CFG },
+ .link_nodes = { &qhm_nspb_noc_config },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SC8280XP_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SC8280XP_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SC8280XP_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
- .id = SC8280XP_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SC8280XP_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SC8280XP_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
- .id = SC8280XP_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display0_cfg = {
.name = "qhs_display0_cfg",
- .id = SC8280XP_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display1_cfg = {
.name = "qhs_display1_cfg",
- .id = SC8280XP_SLAVE_DISPLAY1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac0_cfg = {
.name = "qhs_emac0_cfg",
- .id = SC8280XP_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac1_cfg = {
.name = "qhs_emac1_cfg",
- .id = SC8280XP_SLAVE_EMAC1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SC8280XP_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
- .id = SC8280XP_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SC8280XP_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SC8280XP_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SC8280XP_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
- .id = SC8280XP_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_CNOC_LPASS_AG_NOC },
+ .link_nodes = { &qhm_config_noc },
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = SC8280XP_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mxc_rdpm = {
.name = "qhs_mxc_rdpm",
- .id = SC8280XP_SLAVE_MXC_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SC8280XP_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SC8280XP_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie2a_cfg = {
.name = "qhs_pcie2a_cfg",
- .id = SC8280XP_SLAVE_PCIE_2A_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie2b_cfg = {
.name = "qhs_pcie2b_cfg",
- .id = SC8280XP_SLAVE_PCIE_2B_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie3a_cfg = {
.name = "qhs_pcie3a_cfg",
- .id = SC8280XP_SLAVE_PCIE_3A_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie3b_cfg = {
.name = "qhs_pcie3b_cfg",
- .id = SC8280XP_SLAVE_PCIE_3B_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie4_cfg = {
.name = "qhs_pcie4_cfg",
- .id = SC8280XP_SLAVE_PCIE_4_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_rsc_cfg = {
.name = "qhs_pcie_rsc_cfg",
- .id = SC8280XP_SLAVE_PCIE_RSC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SC8280XP_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SC8280XP_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pka_wrapper_cfg = {
.name = "qhs_pka_wrapper_cfg",
- .id = SC8280XP_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
.name = "qhs_pmu_wrapper_cfg",
- .id = SC8280XP_SLAVE_PMU_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SC8280XP_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SC8280XP_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SC8280XP_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SC8280XP_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = SC8280XP_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SC8280XP_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SC8280XP_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
- .id = SC8280XP_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_smmuv3_cfg = {
.name = "qhs_smmuv3_cfg",
- .id = SC8280XP_SLAVE_SMMUV3_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_smss_cfg = {
.name = "qhs_smss_cfg",
- .id = SC8280XP_SLAVE_SMSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
- .id = SC8280XP_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SC8280XP_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SC8280XP_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
- .id = SC8280XP_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SC8280XP_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SC8280XP_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
- .id = SC8280XP_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_mp = {
.name = "qhs_usb3_mp",
- .id = SC8280XP_SLAVE_USB3_MP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb4_host_0 = {
.name = "qhs_usb4_host_0",
- .id = SC8280XP_SLAVE_USB4_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb4_host_1 = {
.name = "qhs_usb4_host_1",
- .id = SC8280XP_SLAVE_USB4_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SC8280XP_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SC8280XP_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_r_cfg = {
.name = "qhs_vsense_ctrl_r_cfg",
- .id = SC8280XP_SLAVE_VSENSE_CTRL_R_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a1_noc_cfg = {
.name = "qns_a1_noc_cfg",
- .id = SC8280XP_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_A1NOC_CFG },
+ .link_nodes = { &qnm_a1noc_cfg },
};
static struct qcom_icc_node qns_a2_noc_cfg = {
.name = "qns_a2_noc_cfg",
- .id = SC8280XP_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_A2NOC_CFG },
+ .link_nodes = { &qnm_a2noc_cfg },
};
static struct qcom_icc_node qns_anoc_pcie_bridge_cfg = {
.name = "qns_anoc_pcie_bridge_cfg",
- .id = SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = SC8280XP_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qnm_cnoc_dc_noc },
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
- .id = SC8280XP_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qnm_mnoc_cfg },
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
- .id = SC8280XP_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_SNOC_CFG },
+ .link_nodes = { &qnm_snoc_cfg },
};
static struct qcom_icc_node qns_snoc_sf_bridge_cfg = {
.name = "qns_snoc_sf_bridge_cfg",
- .id = SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SC8280XP_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SC8280XP_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SC8280XP_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SC8280XP_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SC8280XP_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_2a = {
.name = "xs_pcie_2a",
- .id = SC8280XP_SLAVE_PCIE_2A,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_2b = {
.name = "xs_pcie_2b",
- .id = SC8280XP_SLAVE_PCIE_2B,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_3a = {
.name = "xs_pcie_3a",
- .id = SC8280XP_SLAVE_PCIE_3A,
.channels = 1,
.buswidth = 16,
};
static struct qcom_icc_node xs_pcie_3b = {
.name = "xs_pcie_3b",
- .id = SC8280XP_SLAVE_PCIE_3B,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_4 = {
.name = "xs_pcie_4",
- .id = SC8280XP_SLAVE_PCIE_4,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SC8280XP_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_smss = {
.name = "xs_smss",
- .id = SC8280XP_SLAVE_SMSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SC8280XP_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SC8280XP_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
- .id = SC8280XP_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SC8280XP_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qnm_gemnoc_cfg },
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SC8280XP_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SC8280XP_SLAVE_LLCC,
.channels = 8,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
- .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
- .id = SC8280XP_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
- .id = SC8280XP_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
- .id = SC8280XP_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
- .id = SC8280XP_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
- .id = SC8280XP_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
- .id = SC8280XP_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_sysnoc = {
.name = "qns_sysnoc",
- .id = SC8280XP_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_MASTER_LPASS_ANOC },
+ .link_nodes = { &qnm_lpass_noc },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
- .id = SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
- .id = SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SC8280XP_SLAVE_EBI1,
.channels = 8,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SC8280XP_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SC8280XP_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SC8280XP_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SC8280XP_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc0 },
};
static struct qcom_icc_node qxs_nsp_xfr = {
.name = "qxs_nsp_xfr",
- .id = SC8280XP_SLAVE_NSP_XFR,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
- .id = SC8280XP_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_nspb_gemnoc = {
.name = "qns_nspb_gemnoc",
- .id = SC8280XP_SLAVE_CDSPB_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SC8280XP_MASTER_COMPUTE_NOC_1 },
+ .link_nodes = { &qnm_cmpnoc1 },
};
static struct qcom_icc_node qxs_nspb_xfr = {
.name = "qxs_nspb_xfr",
- .id = SC8280XP_SLAVE_NSPB_XFR,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node service_nspb_noc = {
.name = "service_nspb_noc",
- .id = SC8280XP_SLAVE_SERVICE_NSPB_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SC8280XP_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SC8280XP_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SC8280XP_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SC8280XP_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SC8280XP_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
diff --git a/drivers/interconnect/qcom/sc8280xp.h b/drivers/interconnect/qcom/sc8280xp.h
deleted file mode 100644
index c5c410fd5ec3..000000000000
--- a/drivers/interconnect/qcom/sc8280xp.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2021, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
-#define __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
-
-#define SC8280XP_MASTER_GPU_TCU 0
-#define SC8280XP_MASTER_PCIE_TCU 1
-#define SC8280XP_MASTER_SYS_TCU 2
-#define SC8280XP_MASTER_APPSS_PROC 3
-/* 4 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
-#define SC8280XP_MASTER_LLCC 5
-#define SC8280XP_MASTER_CNOC_LPASS_AG_NOC 6
-#define SC8280XP_MASTER_CDSP_NOC_CFG 7
-#define SC8280XP_MASTER_CDSPB_NOC_CFG 8
-#define SC8280XP_MASTER_QDSS_BAM 9
-#define SC8280XP_MASTER_QSPI_0 10
-#define SC8280XP_MASTER_QUP_0 11
-#define SC8280XP_MASTER_QUP_1 12
-#define SC8280XP_MASTER_QUP_2 13
-#define SC8280XP_MASTER_A1NOC_CFG 14
-#define SC8280XP_MASTER_A2NOC_CFG 15
-#define SC8280XP_MASTER_A1NOC_SNOC 16
-#define SC8280XP_MASTER_A2NOC_SNOC 17
-#define SC8280XP_MASTER_USB_NOC_SNOC 18
-#define SC8280XP_MASTER_CAMNOC_HF 19
-#define SC8280XP_MASTER_COMPUTE_NOC 20
-#define SC8280XP_MASTER_COMPUTE_NOC_1 21
-#define SC8280XP_MASTER_CNOC_DC_NOC 22
-#define SC8280XP_MASTER_GEM_NOC_CFG 23
-#define SC8280XP_MASTER_GEM_NOC_CNOC 24
-#define SC8280XP_MASTER_GEM_NOC_PCIE_SNOC 25
-#define SC8280XP_MASTER_GFX3D 26
-#define SC8280XP_MASTER_LPASS_ANOC 27
-#define SC8280XP_MASTER_MDP0 28
-#define SC8280XP_MASTER_MDP1 29
-#define SC8280XP_MASTER_MDP_CORE1_0 30
-#define SC8280XP_MASTER_MDP_CORE1_1 31
-#define SC8280XP_MASTER_CNOC_MNOC_CFG 32
-#define SC8280XP_MASTER_MNOC_HF_MEM_NOC 33
-#define SC8280XP_MASTER_MNOC_SF_MEM_NOC 34
-#define SC8280XP_MASTER_ANOC_PCIE_GEM_NOC 35
-#define SC8280XP_MASTER_ROTATOR 36
-#define SC8280XP_MASTER_ROTATOR_1 37
-#define SC8280XP_MASTER_SNOC_CFG 38
-#define SC8280XP_MASTER_SNOC_GC_MEM_NOC 39
-#define SC8280XP_MASTER_SNOC_SF_MEM_NOC 40
-#define SC8280XP_MASTER_VIDEO_P0 41
-#define SC8280XP_MASTER_VIDEO_P1 42
-#define SC8280XP_MASTER_VIDEO_PROC 43
-#define SC8280XP_MASTER_QUP_CORE_0 44
-#define SC8280XP_MASTER_QUP_CORE_1 45
-#define SC8280XP_MASTER_QUP_CORE_2 46
-#define SC8280XP_MASTER_CAMNOC_ICP 47
-#define SC8280XP_MASTER_CAMNOC_SF 48
-#define SC8280XP_MASTER_CRYPTO 49
-#define SC8280XP_MASTER_IPA 50
-#define SC8280XP_MASTER_LPASS_PROC 51
-#define SC8280XP_MASTER_CDSP_PROC 52
-#define SC8280XP_MASTER_CDSP_PROC_B 53
-#define SC8280XP_MASTER_PIMEM 54
-#define SC8280XP_MASTER_SENSORS_PROC 55
-#define SC8280XP_MASTER_SP 56
-#define SC8280XP_MASTER_EMAC 57
-#define SC8280XP_MASTER_EMAC_1 58
-#define SC8280XP_MASTER_GIC 59
-#define SC8280XP_MASTER_PCIE_0 60
-#define SC8280XP_MASTER_PCIE_1 61
-#define SC8280XP_MASTER_PCIE_2A 62
-#define SC8280XP_MASTER_PCIE_2B 63
-#define SC8280XP_MASTER_PCIE_3A 64
-#define SC8280XP_MASTER_PCIE_3B 65
-#define SC8280XP_MASTER_PCIE_4 66
-#define SC8280XP_MASTER_QDSS_ETR 67
-#define SC8280XP_MASTER_SDCC_2 68
-#define SC8280XP_MASTER_SDCC_4 69
-#define SC8280XP_MASTER_UFS_CARD 70
-#define SC8280XP_MASTER_UFS_MEM 71
-#define SC8280XP_MASTER_USB3_0 72
-#define SC8280XP_MASTER_USB3_1 73
-#define SC8280XP_MASTER_USB3_MP 74
-#define SC8280XP_MASTER_USB4_0 75
-#define SC8280XP_MASTER_USB4_1 76
-#define SC8280XP_SLAVE_EBI1 512
-/* 513 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
-#define SC8280XP_SLAVE_AHB2PHY_0 514
-#define SC8280XP_SLAVE_AHB2PHY_1 515
-#define SC8280XP_SLAVE_AHB2PHY_2 516
-#define SC8280XP_SLAVE_AOSS 517
-#define SC8280XP_SLAVE_APPSS 518
-#define SC8280XP_SLAVE_CAMERA_CFG 519
-#define SC8280XP_SLAVE_CLK_CTL 520
-#define SC8280XP_SLAVE_CDSP_CFG 521
-#define SC8280XP_SLAVE_CDSP1_CFG 522
-#define SC8280XP_SLAVE_RBCPR_CX_CFG 523
-#define SC8280XP_SLAVE_RBCPR_MMCX_CFG 524
-#define SC8280XP_SLAVE_RBCPR_MX_CFG 525
-#define SC8280XP_SLAVE_CPR_NSPCX 526
-#define SC8280XP_SLAVE_CRYPTO_0_CFG 527
-#define SC8280XP_SLAVE_CX_RDPM 528
-#define SC8280XP_SLAVE_DCC_CFG 529
-#define SC8280XP_SLAVE_DISPLAY_CFG 530
-#define SC8280XP_SLAVE_DISPLAY1_CFG 531
-#define SC8280XP_SLAVE_EMAC_CFG 532
-#define SC8280XP_SLAVE_EMAC1_CFG 533
-#define SC8280XP_SLAVE_GFX3D_CFG 534
-#define SC8280XP_SLAVE_HWKM 535
-#define SC8280XP_SLAVE_IMEM_CFG 536
-#define SC8280XP_SLAVE_IPA_CFG 537
-#define SC8280XP_SLAVE_IPC_ROUTER_CFG 538
-#define SC8280XP_SLAVE_LLCC_CFG 539
-#define SC8280XP_SLAVE_LPASS 540
-#define SC8280XP_SLAVE_LPASS_CORE_CFG 541
-#define SC8280XP_SLAVE_LPASS_LPI_CFG 542
-#define SC8280XP_SLAVE_LPASS_MPU_CFG 543
-#define SC8280XP_SLAVE_LPASS_TOP_CFG 544
-#define SC8280XP_SLAVE_MX_RDPM 545
-#define SC8280XP_SLAVE_MXC_RDPM 546
-#define SC8280XP_SLAVE_PCIE_0_CFG 547
-#define SC8280XP_SLAVE_PCIE_1_CFG 548
-#define SC8280XP_SLAVE_PCIE_2A_CFG 549
-#define SC8280XP_SLAVE_PCIE_2B_CFG 550
-#define SC8280XP_SLAVE_PCIE_3A_CFG 551
-#define SC8280XP_SLAVE_PCIE_3B_CFG 552
-#define SC8280XP_SLAVE_PCIE_4_CFG 553
-#define SC8280XP_SLAVE_PCIE_RSC_CFG 554
-#define SC8280XP_SLAVE_PDM 555
-#define SC8280XP_SLAVE_PIMEM_CFG 556
-#define SC8280XP_SLAVE_PKA_WRAPPER_CFG 557
-#define SC8280XP_SLAVE_PMU_WRAPPER_CFG 558
-#define SC8280XP_SLAVE_QDSS_CFG 559
-#define SC8280XP_SLAVE_QSPI_0 560
-#define SC8280XP_SLAVE_QUP_0 561
-#define SC8280XP_SLAVE_QUP_1 562
-#define SC8280XP_SLAVE_QUP_2 563
-#define SC8280XP_SLAVE_SDCC_2 564
-#define SC8280XP_SLAVE_SDCC_4 565
-#define SC8280XP_SLAVE_SECURITY 566
-#define SC8280XP_SLAVE_SMMUV3_CFG 567
-#define SC8280XP_SLAVE_SMSS_CFG 568
-#define SC8280XP_SLAVE_SPSS_CFG 569
-#define SC8280XP_SLAVE_TCSR 570
-#define SC8280XP_SLAVE_TLMM 571
-#define SC8280XP_SLAVE_UFS_CARD_CFG 572
-#define SC8280XP_SLAVE_UFS_MEM_CFG 573
-#define SC8280XP_SLAVE_USB3_0 574
-#define SC8280XP_SLAVE_USB3_1 575
-#define SC8280XP_SLAVE_USB3_MP 576
-#define SC8280XP_SLAVE_USB4_0 577
-#define SC8280XP_SLAVE_USB4_1 578
-#define SC8280XP_SLAVE_VENUS_CFG 579
-#define SC8280XP_SLAVE_VSENSE_CTRL_CFG 580
-#define SC8280XP_SLAVE_VSENSE_CTRL_R_CFG 581
-#define SC8280XP_SLAVE_A1NOC_CFG 582
-#define SC8280XP_SLAVE_A1NOC_SNOC 583
-#define SC8280XP_SLAVE_A2NOC_CFG 584
-#define SC8280XP_SLAVE_A2NOC_SNOC 585
-#define SC8280XP_SLAVE_USB_NOC_SNOC 586
-#define SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG 587
-#define SC8280XP_SLAVE_DDRSS_CFG 588
-#define SC8280XP_SLAVE_GEM_NOC_CNOC 589
-#define SC8280XP_SLAVE_GEM_NOC_CFG 590
-#define SC8280XP_SLAVE_SNOC_GEM_NOC_GC 591
-#define SC8280XP_SLAVE_SNOC_GEM_NOC_SF 592
-#define SC8280XP_SLAVE_LLCC 593
-#define SC8280XP_SLAVE_MNOC_HF_MEM_NOC 594
-#define SC8280XP_SLAVE_MNOC_SF_MEM_NOC 595
-#define SC8280XP_SLAVE_CNOC_MNOC_CFG 596
-#define SC8280XP_SLAVE_CDSP_MEM_NOC 597
-#define SC8280XP_SLAVE_CDSPB_MEM_NOC 598
-#define SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC 599
-#define SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC 600
-#define SC8280XP_SLAVE_SNOC_CFG 601
-#define SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG 602
-#define SC8280XP_SLAVE_LPASS_SNOC 603
-#define SC8280XP_SLAVE_QUP_CORE_0 604
-#define SC8280XP_SLAVE_QUP_CORE_1 605
-#define SC8280XP_SLAVE_QUP_CORE_2 606
-#define SC8280XP_SLAVE_IMEM 607
-#define SC8280XP_SLAVE_NSP_XFR 608
-#define SC8280XP_SLAVE_NSPB_XFR 609
-#define SC8280XP_SLAVE_PIMEM 610
-#define SC8280XP_SLAVE_SERVICE_NSP_NOC 611
-#define SC8280XP_SLAVE_SERVICE_NSPB_NOC 612
-#define SC8280XP_SLAVE_SERVICE_A1NOC 613
-#define SC8280XP_SLAVE_SERVICE_A2NOC 614
-#define SC8280XP_SLAVE_SERVICE_CNOC 615
-#define SC8280XP_SLAVE_SERVICE_GEM_NOC_1 616
-#define SC8280XP_SLAVE_SERVICE_MNOC 617
-#define SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC 618
-#define SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC 619
-#define SC8280XP_SLAVE_SERVICE_GEM_NOC_2 620
-#define SC8280XP_SLAVE_SERVICE_SNOC 621
-#define SC8280XP_SLAVE_SERVICE_GEM_NOC 622
-#define SC8280XP_SLAVE_PCIE_0 623
-#define SC8280XP_SLAVE_PCIE_1 624
-#define SC8280XP_SLAVE_PCIE_2A 625
-#define SC8280XP_SLAVE_PCIE_2B 626
-#define SC8280XP_SLAVE_PCIE_3A 627
-#define SC8280XP_SLAVE_PCIE_3B 628
-#define SC8280XP_SLAVE_PCIE_4 629
-#define SC8280XP_SLAVE_QDSS_STM 630
-#define SC8280XP_SLAVE_SMSS 631
-#define SC8280XP_SLAVE_TCU 632
-
-#endif
-
diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c
index 907e1ff4ff81..88f4768b765c 100644
--- a/drivers/interconnect/qcom/sdm670.c
+++ b/drivers/interconnect/qcom/sdm670.c
@@ -13,1034 +13,1020 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sdm670.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qhm_tsif;
+static struct qcom_icc_node xm_emmc;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node qhm_a2noc_cfg;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qnm_cnoc;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp;
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp;
+static struct qcom_icc_node qxm_camnoc_sf_uncomp;
+static struct qcom_icc_node qhm_spdm;
+static struct qcom_icc_node qnm_snoc;
+static struct qcom_icc_node qhm_cnoc;
+static struct qcom_icc_node acm_l3;
+static struct qcom_icc_node pm_gnoc_cfg;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node acm_tcu;
+static struct qcom_icc_node qhm_memnoc_cfg;
+static struct qcom_icc_node qnm_apps;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qxm_gpu;
+static struct qcom_icc_node qhm_mnoc_cfg;
+static struct qcom_icc_node qxm_camnoc_hf0;
+static struct qcom_icc_node qxm_camnoc_hf1;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qxm_mdp1;
+static struct qcom_icc_node qxm_rot;
+static struct qcom_icc_node qxm_venus0;
+static struct qcom_icc_node qxm_venus1;
+static struct qcom_icc_node qxm_venus_arm9;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_gladiator_sodv;
+static struct qcom_icc_node qnm_memnoc;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qns_camnoc_uncomp;
+static struct qcom_icc_node qhs_a1_noc_cfg;
+static struct qcom_icc_node qhs_a2_noc_cfg;
+static struct qcom_icc_node qhs_aop;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_dsp_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_dcc_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_emmc_cfg;
+static struct qcom_icc_node qhs_glm;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mnoc_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_phy_refgen_south;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qupv3_north;
+static struct qcom_icc_node qhs_qupv3_south;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_spdm;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm_north;
+static struct qcom_icc_node qhs_tlmm_south;
+static struct qcom_icc_node qhs_tsif;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_cnoc_a2noc;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qhs_memnoc;
+static struct qcom_icc_node qns_gladiator_sodv;
+static struct qcom_icc_node qns_gnoc_memnoc;
+static struct qcom_icc_node srvc_gnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qns_apps_io;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_memnoc_snoc;
+static struct qcom_icc_node srvc_memnoc;
+static struct qcom_icc_node qns2_mem_noc;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc;
+static struct qcom_icc_node qns_memnoc_gc;
+static struct qcom_icc_node qns_memnoc_sf;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
- .id = SDM670_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SDM670_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
- .id = SDM670_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emmc = {
.name = "xm_emmc",
- .id = SDM670_MASTER_EMMC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SDM670_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SDM670_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SDM670_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
- .id = SDM670_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SDM670_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SDM670_MASTER_BLSP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
- .id = SDM670_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SDM670_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SDM670_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SDM670_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SDM670_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
- .id = SDM670_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
- .id = SDM670_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
- .id = SDM670_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qhm_spdm = {
.name = "qhm_spdm",
- .id = SDM670_MASTER_SPDM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_CNOC_A2NOC },
+ .link_nodes = { &qns_cnoc_a2noc },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
- .id = SDM670_MASTER_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 38,
- .links = { SDM670_SLAVE_TLMM_SOUTH,
- SDM670_SLAVE_CAMERA_CFG,
- SDM670_SLAVE_SDCC_4,
- SDM670_SLAVE_SDCC_2,
- SDM670_SLAVE_CNOC_MNOC_CFG,
- SDM670_SLAVE_UFS_MEM_CFG,
- SDM670_SLAVE_GLM,
- SDM670_SLAVE_PDM,
- SDM670_SLAVE_A2NOC_CFG,
- SDM670_SLAVE_QDSS_CFG,
- SDM670_SLAVE_DISPLAY_CFG,
- SDM670_SLAVE_TCSR,
- SDM670_SLAVE_DCC_CFG,
- SDM670_SLAVE_CNOC_DDRSS,
- SDM670_SLAVE_SNOC_CFG,
- SDM670_SLAVE_SOUTH_PHY_CFG,
- SDM670_SLAVE_GRAPHICS_3D_CFG,
- SDM670_SLAVE_VENUS_CFG,
- SDM670_SLAVE_TSIF,
- SDM670_SLAVE_CDSP_CFG,
- SDM670_SLAVE_AOP,
- SDM670_SLAVE_BLSP_2,
- SDM670_SLAVE_SERVICE_CNOC,
- SDM670_SLAVE_USB3,
- SDM670_SLAVE_IPA_CFG,
- SDM670_SLAVE_RBCPR_CX_CFG,
- SDM670_SLAVE_A1NOC_CFG,
- SDM670_SLAVE_AOSS,
- SDM670_SLAVE_PRNG,
- SDM670_SLAVE_VSENSE_CTRL_CFG,
- SDM670_SLAVE_EMMC_CFG,
- SDM670_SLAVE_BLSP_1,
- SDM670_SLAVE_SPDM_WRAPPER,
- SDM670_SLAVE_CRYPTO_0_CFG,
- SDM670_SLAVE_PIMEM_CFG,
- SDM670_SLAVE_TLMM_NORTH,
- SDM670_SLAVE_CLK_CTL,
- SDM670_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_tlmm_south,
+ &qhs_camera_cfg,
+ &qhs_sdc4,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_glm,
+ &qhs_pdm,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_display_cfg,
+ &qhs_tcsr,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_snoc_cfg,
+ &qhs_phy_refgen_south,
+ &qhs_gpuss_cfg,
+ &qhs_venus_cfg,
+ &qhs_tsif,
+ &qhs_compute_dsp_cfg,
+ &qhs_aop,
+ &qhs_qupv3_north,
+ &srvc_cnoc,
+ &qhs_usb3_0,
+ &qhs_ipa,
+ &qhs_cpr_cx,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_emmc_cfg,
+ &qhs_qupv3_south,
+ &qhs_spdm,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_tlmm_north,
+ &qhs_clk_ctl,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node qhm_cnoc = {
.name = "qhm_cnoc",
- .id = SDM670_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SDM670_SLAVE_MEM_NOC_CFG,
- SDM670_SLAVE_LLCC_CFG
- },
+ .link_nodes = { &qhs_memnoc,
+ &qhs_llcc },
};
static struct qcom_icc_node acm_l3 = {
.name = "acm_l3",
- .id = SDM670_MASTER_AMPSS_M0,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SDM670_SLAVE_SERVICE_GNOC,
- SDM670_SLAVE_GNOC_SNOC,
- SDM670_SLAVE_GNOC_MEM_NOC
- },
+ .link_nodes = { &srvc_gnoc,
+ &qns_gladiator_sodv,
+ &qns_gnoc_memnoc },
};
static struct qcom_icc_node pm_gnoc_cfg = {
.name = "pm_gnoc_cfg",
- .id = SDM670_MASTER_GNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_SERVICE_GNOC },
+ .link_nodes = { &srvc_gnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SDM670_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_EBI_CH0 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node acm_tcu = {
.name = "acm_tcu",
- .id = SDM670_MASTER_TCU_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
- .links = { SDM670_SLAVE_MEM_NOC_GNOC,
- SDM670_SLAVE_LLCC,
- SDM670_SLAVE_MEM_NOC_SNOC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc,
+ &qns_memnoc_snoc },
};
static struct qcom_icc_node qhm_memnoc_cfg = {
.name = "qhm_memnoc_cfg",
- .id = SDM670_MASTER_MEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SDM670_SLAVE_SERVICE_MEM_NOC,
- SDM670_SLAVE_MSS_PROC_MS_MPU_CFG
- },
+ .link_nodes = { &srvc_memnoc,
+ &qhs_mdsp_ms_mpu_cfg },
};
static struct qcom_icc_node qnm_apps = {
.name = "qnm_apps",
- .id = SDM670_MASTER_GNOC_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SDM670_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SDM670_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 3,
- .links = { SDM670_SLAVE_MEM_NOC_GNOC,
- SDM670_SLAVE_LLCC,
- SDM670_SLAVE_MEM_NOC_SNOC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc,
+ &qns_memnoc_snoc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SDM670_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SDM670_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SDM670_SLAVE_MEM_NOC_GNOC,
- SDM670_SLAVE_LLCC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc },
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
- .id = SDM670_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SDM670_SLAVE_MEM_NOC_GNOC,
- SDM670_SLAVE_LLCC,
- SDM670_SLAVE_MEM_NOC_SNOC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc,
+ &qns_memnoc_snoc },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
- .id = SDM670_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
- .id = SDM670_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
- .id = SDM670_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = SDM670_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SDM670_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
- .id = SDM670_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
- .id = SDM670_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
- .id = SDM670_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus1 = {
.name = "qxm_venus1",
- .id = SDM670_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
- .id = SDM670_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SDM670_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SDM670_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { SDM670_SLAVE_PIMEM,
- SDM670_SLAVE_SNOC_MEM_NOC_SF,
- SDM670_SLAVE_OCIMEM,
- SDM670_SLAVE_APPSS,
- SDM670_SLAVE_SNOC_CNOC,
- SDM670_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qxs_pimem,
+ &qns_memnoc_sf,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SDM670_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 7,
- .links = { SDM670_SLAVE_PIMEM,
- SDM670_SLAVE_SNOC_MEM_NOC_SF,
- SDM670_SLAVE_OCIMEM,
- SDM670_SLAVE_APPSS,
- SDM670_SLAVE_SNOC_CNOC,
- SDM670_SLAVE_TCU,
- SDM670_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qxs_pimem,
+ &qns_memnoc_sf,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_gladiator_sodv = {
.name = "qnm_gladiator_sodv",
- .id = SDM670_MASTER_GNOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
- .links = { SDM670_SLAVE_PIMEM,
- SDM670_SLAVE_OCIMEM,
- SDM670_SLAVE_APPSS,
- SDM670_SLAVE_SNOC_CNOC,
- SDM670_SLAVE_TCU,
- SDM670_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_memnoc = {
.name = "qnm_memnoc",
- .id = SDM670_MASTER_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 5,
- .links = { SDM670_SLAVE_OCIMEM,
- SDM670_SLAVE_APPSS,
- SDM670_SLAVE_PIMEM,
- SDM670_SLAVE_SNOC_CNOC,
- SDM670_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qxs_imem,
+ &qhs_apss,
+ &qxs_pimem,
+ &qns_cnoc,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SDM670_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SDM670_SLAVE_OCIMEM,
- SDM670_SLAVE_SNOC_MEM_NOC_GC
- },
+ .link_nodes = { &qxs_imem,
+ &qns_memnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SDM670_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SDM670_SLAVE_OCIMEM,
- SDM670_SLAVE_SNOC_MEM_NOC_GC
- },
+ .link_nodes = { &qxs_imem,
+ &qns_memnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SDM670_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDM670_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SDM670_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SDM670_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDM670_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SDM670_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
- .id = SDM670_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
- .id = SDM670_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_MASTER_A1NOC_CFG },
+ .link_nodes = { &qhm_a1noc_cfg },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
- .id = SDM670_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_MASTER_A2NOC_CFG },
+ .link_nodes = { &qhm_a2noc_cfg },
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
- .id = SDM670_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SDM670_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SDM670_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SDM670_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp_cfg = {
.name = "qhs_compute_dsp_cfg",
- .id = SDM670_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SDM670_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SDM670_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
- .id = SDM670_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qhm_cnoc },
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SDM670_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SDM670_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emmc_cfg = {
.name = "qhs_emmc_cfg",
- .id = SDM670_SLAVE_EMMC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
- .id = SDM670_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SDM670_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SDM670_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SDM670_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
- .id = SDM670_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qhm_mnoc_cfg },
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SDM670_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_phy_refgen_south = {
.name = "qhs_phy_refgen_south",
- .id = SDM670_SLAVE_SOUTH_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SDM670_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SDM670_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SDM670_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_north = {
.name = "qhs_qupv3_north",
- .id = SDM670_SLAVE_BLSP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_south = {
.name = "qhs_qupv3_south",
- .id = SDM670_SLAVE_BLSP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SDM670_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SDM670_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SDM670_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_spdm = {
.name = "qhs_spdm",
- .id = SDM670_SLAVE_SPDM_WRAPPER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SDM670_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_north = {
.name = "qhs_tlmm_north",
- .id = SDM670_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_south = {
.name = "qhs_tlmm_south",
- .id = SDM670_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
- .id = SDM670_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SDM670_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SDM670_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SDM670_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SDM670_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
- .id = SDM670_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_MASTER_CNOC_A2NOC },
+ .link_nodes = { &qnm_cnoc },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SDM670_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SDM670_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_memnoc = {
.name = "qhs_memnoc",
- .id = SDM670_SLAVE_MEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM670_MASTER_MEM_NOC_CFG },
+ .link_nodes = { &qhm_memnoc_cfg },
};
static struct qcom_icc_node qns_gladiator_sodv = {
.name = "qns_gladiator_sodv",
- .id = SDM670_SLAVE_GNOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_MASTER_GNOC_SNOC },
+ .link_nodes = { &qnm_gladiator_sodv },
};
static struct qcom_icc_node qns_gnoc_memnoc = {
.name = "qns_gnoc_memnoc",
- .id = SDM670_SLAVE_GNOC_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_MASTER_GNOC_MEM_NOC },
+ .link_nodes = { &qnm_apps },
};
static struct qcom_icc_node srvc_gnoc = {
.name = "srvc_gnoc",
- .id = SDM670_SLAVE_SERVICE_GNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SDM670_SLAVE_EBI_CH0,
.channels = 2,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = SDM670_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_apps_io = {
.name = "qns_apps_io",
- .id = SDM670_SLAVE_MEM_NOC_GNOC,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SDM670_SLAVE_LLCC,
.channels = 2,
.buswidth = 16,
.num_links = 1,
- .links = { SDM670_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_memnoc_snoc = {
.name = "qns_memnoc_snoc",
- .id = SDM670_SLAVE_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_MASTER_MEM_NOC_SNOC },
+ .link_nodes = { &qnm_memnoc },
};
static struct qcom_icc_node srvc_memnoc = {
.name = "srvc_memnoc",
- .id = SDM670_SLAVE_SERVICE_MEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns2_mem_noc = {
.name = "qns2_mem_noc",
- .id = SDM670_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SDM670_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SDM670_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SDM670_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SDM670_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
- .id = SDM670_SLAVE_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_MASTER_SNOC_CNOC },
+ .link_nodes = { &qnm_snoc },
};
static struct qcom_icc_node qns_memnoc_gc = {
.name = "qns_memnoc_gc",
- .id = SDM670_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM670_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_memnoc_sf = {
.name = "qns_memnoc_sf",
- .id = SDM670_SLAVE_SNOC_MEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDM670_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SDM670_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SDM670_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SDM670_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SDM670_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SDM670_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
diff --git a/drivers/interconnect/qcom/sdm670.h b/drivers/interconnect/qcom/sdm670.h
deleted file mode 100644
index 14155f244c43..000000000000
--- a/drivers/interconnect/qcom/sdm670.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Qualcomm #define SDM670 interconnect IDs
- *
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SDM670_H
-#define __DRIVERS_INTERCONNECT_QCOM_SDM670_H
-
-#define SDM670_MASTER_A1NOC_CFG 0
-#define SDM670_MASTER_A1NOC_SNOC 1
-#define SDM670_MASTER_A2NOC_CFG 2
-#define SDM670_MASTER_A2NOC_SNOC 3
-#define SDM670_MASTER_AMPSS_M0 4
-#define SDM670_MASTER_BLSP_1 5
-#define SDM670_MASTER_BLSP_2 6
-#define SDM670_MASTER_CAMNOC_HF0 7
-#define SDM670_MASTER_CAMNOC_HF0_UNCOMP 8
-#define SDM670_MASTER_CAMNOC_HF1 9
-#define SDM670_MASTER_CAMNOC_HF1_UNCOMP 10
-#define SDM670_MASTER_CAMNOC_SF 11
-#define SDM670_MASTER_CAMNOC_SF_UNCOMP 12
-#define SDM670_MASTER_CNOC_A2NOC 13
-#define SDM670_MASTER_CNOC_DC_NOC 14
-#define SDM670_MASTER_CNOC_MNOC_CFG 15
-#define SDM670_MASTER_CRYPTO_CORE_0 16
-#define SDM670_MASTER_EMMC 17
-#define SDM670_MASTER_GIC 18
-#define SDM670_MASTER_GNOC_CFG 19
-#define SDM670_MASTER_GNOC_MEM_NOC 20
-#define SDM670_MASTER_GNOC_SNOC 21
-#define SDM670_MASTER_GRAPHICS_3D 22
-#define SDM670_MASTER_IPA 23
-#define SDM670_MASTER_LLCC 24
-#define SDM670_MASTER_MDP_PORT0 25
-#define SDM670_MASTER_MDP_PORT1 26
-#define SDM670_MASTER_MEM_NOC_CFG 27
-#define SDM670_MASTER_MEM_NOC_SNOC 28
-#define SDM670_MASTER_MNOC_HF_MEM_NOC 29
-#define SDM670_MASTER_MNOC_SF_MEM_NOC 30
-#define SDM670_MASTER_PIMEM 31
-#define SDM670_MASTER_QDSS_BAM 32
-#define SDM670_MASTER_QDSS_ETR 33
-#define SDM670_MASTER_ROTATOR 34
-#define SDM670_MASTER_SDCC_2 35
-#define SDM670_MASTER_SDCC_4 36
-#define SDM670_MASTER_SNOC_CFG 37
-#define SDM670_MASTER_SNOC_CNOC 38
-#define SDM670_MASTER_SNOC_GC_MEM_NOC 39
-#define SDM670_MASTER_SNOC_SF_MEM_NOC 40
-#define SDM670_MASTER_SPDM 41
-#define SDM670_MASTER_TCU_0 42
-#define SDM670_MASTER_TSIF 43
-#define SDM670_MASTER_UFS_MEM 44
-#define SDM670_MASTER_USB3 45
-#define SDM670_MASTER_VIDEO_P0 46
-#define SDM670_MASTER_VIDEO_P1 47
-#define SDM670_MASTER_VIDEO_PROC 48
-#define SDM670_SLAVE_A1NOC_CFG 49
-#define SDM670_SLAVE_A1NOC_SNOC 50
-#define SDM670_SLAVE_A2NOC_CFG 51
-#define SDM670_SLAVE_A2NOC_SNOC 52
-#define SDM670_SLAVE_AOP 53
-#define SDM670_SLAVE_AOSS 54
-#define SDM670_SLAVE_APPSS 55
-#define SDM670_SLAVE_BLSP_1 56
-#define SDM670_SLAVE_BLSP_2 57
-#define SDM670_SLAVE_CAMERA_CFG 58
-#define SDM670_SLAVE_CAMNOC_UNCOMP 59
-#define SDM670_SLAVE_CDSP_CFG 60
-#define SDM670_SLAVE_CLK_CTL 61
-#define SDM670_SLAVE_CNOC_A2NOC 62
-#define SDM670_SLAVE_CNOC_DDRSS 63
-#define SDM670_SLAVE_CNOC_MNOC_CFG 64
-#define SDM670_SLAVE_CRYPTO_0_CFG 65
-#define SDM670_SLAVE_DCC_CFG 66
-#define SDM670_SLAVE_DISPLAY_CFG 67
-#define SDM670_SLAVE_EBI_CH0 68
-#define SDM670_SLAVE_EMMC_CFG 69
-#define SDM670_SLAVE_GLM 70
-#define SDM670_SLAVE_GNOC_MEM_NOC 71
-#define SDM670_SLAVE_GNOC_SNOC 72
-#define SDM670_SLAVE_GRAPHICS_3D_CFG 73
-#define SDM670_SLAVE_IMEM_CFG 74
-#define SDM670_SLAVE_IPA_CFG 75
-#define SDM670_SLAVE_LLCC 76
-#define SDM670_SLAVE_LLCC_CFG 77
-#define SDM670_SLAVE_MEM_NOC_CFG 78
-#define SDM670_SLAVE_MEM_NOC_GNOC 79
-#define SDM670_SLAVE_MEM_NOC_SNOC 80
-#define SDM670_SLAVE_MNOC_HF_MEM_NOC 81
-#define SDM670_SLAVE_MNOC_SF_MEM_NOC 82
-#define SDM670_SLAVE_MSS_PROC_MS_MPU_CFG 83
-#define SDM670_SLAVE_OCIMEM 84
-#define SDM670_SLAVE_PDM 85
-#define SDM670_SLAVE_PIMEM 86
-#define SDM670_SLAVE_PIMEM_CFG 87
-#define SDM670_SLAVE_PRNG 88
-#define SDM670_SLAVE_QDSS_CFG 89
-#define SDM670_SLAVE_QDSS_STM 90
-#define SDM670_SLAVE_RBCPR_CX_CFG 91
-#define SDM670_SLAVE_SDCC_2 92
-#define SDM670_SLAVE_SDCC_4 93
-#define SDM670_SLAVE_SERVICE_A1NOC 94
-#define SDM670_SLAVE_SERVICE_A2NOC 95
-#define SDM670_SLAVE_SERVICE_CNOC 96
-#define SDM670_SLAVE_SERVICE_GNOC 97
-#define SDM670_SLAVE_SERVICE_MEM_NOC 98
-#define SDM670_SLAVE_SERVICE_MNOC 99
-#define SDM670_SLAVE_SERVICE_SNOC 100
-#define SDM670_SLAVE_SNOC_CFG 101
-#define SDM670_SLAVE_SNOC_CNOC 102
-#define SDM670_SLAVE_SNOC_MEM_NOC_GC 103
-#define SDM670_SLAVE_SNOC_MEM_NOC_SF 104
-#define SDM670_SLAVE_SOUTH_PHY_CFG 105
-#define SDM670_SLAVE_SPDM_WRAPPER 106
-#define SDM670_SLAVE_TCSR 107
-#define SDM670_SLAVE_TCU 108
-#define SDM670_SLAVE_TLMM_NORTH 109
-#define SDM670_SLAVE_TLMM_SOUTH 110
-#define SDM670_SLAVE_TSIF 111
-#define SDM670_SLAVE_UFS_MEM_CFG 112
-#define SDM670_SLAVE_USB3 113
-#define SDM670_SLAVE_VENUS_CFG 114
-#define SDM670_SLAVE_VSENSE_CTRL_CFG 115
-
-#endif
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 855802be93fe..6d5bbeda0689 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -14,1251 +14,1231 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sdm845.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qhm_tsif;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_card;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_pcie_0;
+static struct qcom_icc_node qhm_a2noc_cfg;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qnm_cnoc;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node xm_usb3_1;
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp;
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp;
+static struct qcom_icc_node qxm_camnoc_sf_uncomp;
+static struct qcom_icc_node qhm_spdm;
+static struct qcom_icc_node qhm_tic;
+static struct qcom_icc_node qnm_snoc;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qhm_cnoc;
+static struct qcom_icc_node acm_l3;
+static struct qcom_icc_node pm_gnoc_cfg;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node acm_tcu;
+static struct qcom_icc_node qhm_memnoc_cfg;
+static struct qcom_icc_node qnm_apps;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qxm_gpu;
+static struct qcom_icc_node qhm_mnoc_cfg;
+static struct qcom_icc_node qxm_camnoc_hf0;
+static struct qcom_icc_node qxm_camnoc_hf1;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qxm_mdp1;
+static struct qcom_icc_node qxm_rot;
+static struct qcom_icc_node qxm_venus0;
+static struct qcom_icc_node qxm_venus1;
+static struct qcom_icc_node qxm_venus_arm9;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_gladiator_sodv;
+static struct qcom_icc_node qnm_memnoc;
+static struct qcom_icc_node qnm_pcie_anoc;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_pcie_a1noc_snoc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qns_pcie_snoc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qns_camnoc_uncomp;
+static struct qcom_icc_node qhs_a1_noc_cfg;
+static struct qcom_icc_node qhs_a2_noc_cfg;
+static struct qcom_icc_node qhs_aop;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_dsp_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_dcc_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_glm;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mnoc_cfg;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie_gen3_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_phy_refgen_south;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qupv3_north;
+static struct qcom_icc_node qhs_qupv3_south;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_spdm;
+static struct qcom_icc_node qhs_spss_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm_north;
+static struct qcom_icc_node qhs_tlmm_south;
+static struct qcom_icc_node qhs_tsif;
+static struct qcom_icc_node qhs_ufs_card_cfg;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_usb3_1;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_cnoc_a2noc;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qhs_memnoc;
+static struct qcom_icc_node qns_gladiator_sodv;
+static struct qcom_icc_node qns_gnoc_memnoc;
+static struct qcom_icc_node srvc_gnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qns_apps_io;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_memnoc_snoc;
+static struct qcom_icc_node srvc_memnoc;
+static struct qcom_icc_node qns2_mem_noc;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc;
+static struct qcom_icc_node qns_memnoc_gc;
+static struct qcom_icc_node qns_memnoc_sf;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pcie;
+static struct qcom_icc_node qxs_pcie_gen3;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
- .id = SDM845_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SDM845_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
- .id = SDM845_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SDM845_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SDM845_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
- .id = SDM845_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SDM845_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_pcie_0 = {
.name = "xm_pcie_0",
- .id = SDM845_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC },
+ .link_nodes = { &qns_pcie_a1noc_snoc },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
- .id = SDM845_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SDM845_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SDM845_MASTER_BLSP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
- .id = SDM845_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SDM845_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SDM845_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SDM845_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_ANOC_PCIE_SNOC },
+ .link_nodes = { &qns_pcie_snoc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SDM845_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SDM845_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
- .id = SDM845_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
- .id = SDM845_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
- .id = SDM845_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
- .id = SDM845_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qhm_spdm = {
.name = "qhm_spdm",
- .id = SDM845_MASTER_SPDM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_CNOC_A2NOC },
+ .link_nodes = { &qns_cnoc_a2noc },
};
static struct qcom_icc_node qhm_tic = {
.name = "qhm_tic",
- .id = SDM845_MASTER_TIC,
.channels = 1,
.buswidth = 4,
.num_links = 43,
- .links = { SDM845_SLAVE_A1NOC_CFG,
- SDM845_SLAVE_A2NOC_CFG,
- SDM845_SLAVE_AOP,
- SDM845_SLAVE_AOSS,
- SDM845_SLAVE_CAMERA_CFG,
- SDM845_SLAVE_CLK_CTL,
- SDM845_SLAVE_CDSP_CFG,
- SDM845_SLAVE_RBCPR_CX_CFG,
- SDM845_SLAVE_CRYPTO_0_CFG,
- SDM845_SLAVE_DCC_CFG,
- SDM845_SLAVE_CNOC_DDRSS,
- SDM845_SLAVE_DISPLAY_CFG,
- SDM845_SLAVE_GLM,
- SDM845_SLAVE_GFX3D_CFG,
- SDM845_SLAVE_IMEM_CFG,
- SDM845_SLAVE_IPA_CFG,
- SDM845_SLAVE_CNOC_MNOC_CFG,
- SDM845_SLAVE_PCIE_0_CFG,
- SDM845_SLAVE_PCIE_1_CFG,
- SDM845_SLAVE_PDM,
- SDM845_SLAVE_SOUTH_PHY_CFG,
- SDM845_SLAVE_PIMEM_CFG,
- SDM845_SLAVE_PRNG,
- SDM845_SLAVE_QDSS_CFG,
- SDM845_SLAVE_BLSP_2,
- SDM845_SLAVE_BLSP_1,
- SDM845_SLAVE_SDCC_2,
- SDM845_SLAVE_SDCC_4,
- SDM845_SLAVE_SNOC_CFG,
- SDM845_SLAVE_SPDM_WRAPPER,
- SDM845_SLAVE_SPSS_CFG,
- SDM845_SLAVE_TCSR,
- SDM845_SLAVE_TLMM_NORTH,
- SDM845_SLAVE_TLMM_SOUTH,
- SDM845_SLAVE_TSIF,
- SDM845_SLAVE_UFS_CARD_CFG,
- SDM845_SLAVE_UFS_MEM_CFG,
- SDM845_SLAVE_USB3_0,
- SDM845_SLAVE_USB3_1,
- SDM845_SLAVE_VENUS_CFG,
- SDM845_SLAVE_VSENSE_CTRL_CFG,
- SDM845_SLAVE_CNOC_A2NOC,
- SDM845_SLAVE_SERVICE_CNOC
- },
+ .link_nodes = { &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp_cfg,
+ &qhs_cpr_cx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_pcie0_cfg,
+ &qhs_pcie_gen3_cfg,
+ &qhs_pdm,
+ &qhs_phy_refgen_south,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qupv3_north,
+ &qhs_qupv3_south,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_spdm,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm_north,
+ &qhs_tlmm_south,
+ &qhs_tsif,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc,
+ &srvc_cnoc },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
- .id = SDM845_MASTER_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 42,
- .links = { SDM845_SLAVE_A1NOC_CFG,
- SDM845_SLAVE_A2NOC_CFG,
- SDM845_SLAVE_AOP,
- SDM845_SLAVE_AOSS,
- SDM845_SLAVE_CAMERA_CFG,
- SDM845_SLAVE_CLK_CTL,
- SDM845_SLAVE_CDSP_CFG,
- SDM845_SLAVE_RBCPR_CX_CFG,
- SDM845_SLAVE_CRYPTO_0_CFG,
- SDM845_SLAVE_DCC_CFG,
- SDM845_SLAVE_CNOC_DDRSS,
- SDM845_SLAVE_DISPLAY_CFG,
- SDM845_SLAVE_GLM,
- SDM845_SLAVE_GFX3D_CFG,
- SDM845_SLAVE_IMEM_CFG,
- SDM845_SLAVE_IPA_CFG,
- SDM845_SLAVE_CNOC_MNOC_CFG,
- SDM845_SLAVE_PCIE_0_CFG,
- SDM845_SLAVE_PCIE_1_CFG,
- SDM845_SLAVE_PDM,
- SDM845_SLAVE_SOUTH_PHY_CFG,
- SDM845_SLAVE_PIMEM_CFG,
- SDM845_SLAVE_PRNG,
- SDM845_SLAVE_QDSS_CFG,
- SDM845_SLAVE_BLSP_2,
- SDM845_SLAVE_BLSP_1,
- SDM845_SLAVE_SDCC_2,
- SDM845_SLAVE_SDCC_4,
- SDM845_SLAVE_SNOC_CFG,
- SDM845_SLAVE_SPDM_WRAPPER,
- SDM845_SLAVE_SPSS_CFG,
- SDM845_SLAVE_TCSR,
- SDM845_SLAVE_TLMM_NORTH,
- SDM845_SLAVE_TLMM_SOUTH,
- SDM845_SLAVE_TSIF,
- SDM845_SLAVE_UFS_CARD_CFG,
- SDM845_SLAVE_UFS_MEM_CFG,
- SDM845_SLAVE_USB3_0,
- SDM845_SLAVE_USB3_1,
- SDM845_SLAVE_VENUS_CFG,
- SDM845_SLAVE_VSENSE_CTRL_CFG,
- SDM845_SLAVE_SERVICE_CNOC
- },
+ .link_nodes = { &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp_cfg,
+ &qhs_cpr_cx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_pcie0_cfg,
+ &qhs_pcie_gen3_cfg,
+ &qhs_pdm,
+ &qhs_phy_refgen_south,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qupv3_north,
+ &qhs_qupv3_south,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_spdm,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm_north,
+ &qhs_tlmm_south,
+ &qhs_tsif,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &srvc_cnoc },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SDM845_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 43,
- .links = { SDM845_SLAVE_A1NOC_CFG,
- SDM845_SLAVE_A2NOC_CFG,
- SDM845_SLAVE_AOP,
- SDM845_SLAVE_AOSS,
- SDM845_SLAVE_CAMERA_CFG,
- SDM845_SLAVE_CLK_CTL,
- SDM845_SLAVE_CDSP_CFG,
- SDM845_SLAVE_RBCPR_CX_CFG,
- SDM845_SLAVE_CRYPTO_0_CFG,
- SDM845_SLAVE_DCC_CFG,
- SDM845_SLAVE_CNOC_DDRSS,
- SDM845_SLAVE_DISPLAY_CFG,
- SDM845_SLAVE_GLM,
- SDM845_SLAVE_GFX3D_CFG,
- SDM845_SLAVE_IMEM_CFG,
- SDM845_SLAVE_IPA_CFG,
- SDM845_SLAVE_CNOC_MNOC_CFG,
- SDM845_SLAVE_PCIE_0_CFG,
- SDM845_SLAVE_PCIE_1_CFG,
- SDM845_SLAVE_PDM,
- SDM845_SLAVE_SOUTH_PHY_CFG,
- SDM845_SLAVE_PIMEM_CFG,
- SDM845_SLAVE_PRNG,
- SDM845_SLAVE_QDSS_CFG,
- SDM845_SLAVE_BLSP_2,
- SDM845_SLAVE_BLSP_1,
- SDM845_SLAVE_SDCC_2,
- SDM845_SLAVE_SDCC_4,
- SDM845_SLAVE_SNOC_CFG,
- SDM845_SLAVE_SPDM_WRAPPER,
- SDM845_SLAVE_SPSS_CFG,
- SDM845_SLAVE_TCSR,
- SDM845_SLAVE_TLMM_NORTH,
- SDM845_SLAVE_TLMM_SOUTH,
- SDM845_SLAVE_TSIF,
- SDM845_SLAVE_UFS_CARD_CFG,
- SDM845_SLAVE_UFS_MEM_CFG,
- SDM845_SLAVE_USB3_0,
- SDM845_SLAVE_USB3_1,
- SDM845_SLAVE_VENUS_CFG,
- SDM845_SLAVE_VSENSE_CTRL_CFG,
- SDM845_SLAVE_CNOC_A2NOC,
- SDM845_SLAVE_SERVICE_CNOC
- },
+ .link_nodes = { &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp_cfg,
+ &qhs_cpr_cx,
+ &qhs_crypto0_cfg,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_pcie0_cfg,
+ &qhs_pcie_gen3_cfg,
+ &qhs_pdm,
+ &qhs_phy_refgen_south,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qupv3_north,
+ &qhs_qupv3_south,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_spdm,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm_north,
+ &qhs_tlmm_south,
+ &qhs_tsif,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc,
+ &srvc_cnoc },
};
static struct qcom_icc_node qhm_cnoc = {
.name = "qhm_cnoc",
- .id = SDM845_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SDM845_SLAVE_LLCC_CFG,
- SDM845_SLAVE_MEM_NOC_CFG
- },
+ .link_nodes = { &qhs_llcc,
+ &qhs_memnoc },
};
static struct qcom_icc_node acm_l3 = {
.name = "acm_l3",
- .id = SDM845_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SDM845_SLAVE_GNOC_SNOC,
- SDM845_SLAVE_GNOC_MEM_NOC,
- SDM845_SLAVE_SERVICE_GNOC
- },
+ .link_nodes = { &qns_gladiator_sodv,
+ &qns_gnoc_memnoc,
+ &srvc_gnoc },
};
static struct qcom_icc_node pm_gnoc_cfg = {
.name = "pm_gnoc_cfg",
- .id = SDM845_MASTER_GNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_SERVICE_GNOC },
+ .link_nodes = { &srvc_gnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SDM845_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node acm_tcu = {
.name = "acm_tcu",
- .id = SDM845_MASTER_TCU_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
- .links = { SDM845_SLAVE_MEM_NOC_GNOC,
- SDM845_SLAVE_LLCC,
- SDM845_SLAVE_MEM_NOC_SNOC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc,
+ &qns_memnoc_snoc },
};
static struct qcom_icc_node qhm_memnoc_cfg = {
.name = "qhm_memnoc_cfg",
- .id = SDM845_MASTER_MEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SDM845_SLAVE_MSS_PROC_MS_MPU_CFG,
- SDM845_SLAVE_SERVICE_MEM_NOC
- },
+ .link_nodes = { &qhs_mdsp_ms_mpu_cfg,
+ &srvc_memnoc },
};
static struct qcom_icc_node qnm_apps = {
.name = "qnm_apps",
- .id = SDM845_MASTER_GNOC_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SDM845_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SDM845_SLAVE_MEM_NOC_GNOC,
- SDM845_SLAVE_LLCC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SDM845_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 3,
- .links = { SDM845_SLAVE_MEM_NOC_GNOC,
- SDM845_SLAVE_LLCC,
- SDM845_SLAVE_MEM_NOC_SNOC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc,
+ &qns_memnoc_snoc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SDM845_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SDM845_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SDM845_SLAVE_MEM_NOC_GNOC,
- SDM845_SLAVE_LLCC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc },
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
- .id = SDM845_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SDM845_SLAVE_MEM_NOC_GNOC,
- SDM845_SLAVE_LLCC,
- SDM845_SLAVE_MEM_NOC_SNOC
- },
+ .link_nodes = { &qns_apps_io,
+ &qns_llcc,
+ &qns_memnoc_snoc },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
- .id = SDM845_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
- .id = SDM845_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
- .id = SDM845_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = SDM845_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SDM845_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
- .id = SDM845_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
- .id = SDM845_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
- .id = SDM845_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus1 = {
.name = "qxm_venus1",
- .id = SDM845_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
- .id = SDM845_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SDM845_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SDM845_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { SDM845_SLAVE_APPSS,
- SDM845_SLAVE_SNOC_CNOC,
- SDM845_SLAVE_SNOC_MEM_NOC_SF,
- SDM845_SLAVE_IMEM,
- SDM845_SLAVE_PIMEM,
- SDM845_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qhs_apss,
+ &qns_cnoc,
+ &qns_memnoc_sf,
+ &qxs_imem,
+ &qxs_pimem,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SDM845_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 9,
- .links = { SDM845_SLAVE_APPSS,
- SDM845_SLAVE_SNOC_CNOC,
- SDM845_SLAVE_SNOC_MEM_NOC_SF,
- SDM845_SLAVE_IMEM,
- SDM845_SLAVE_PCIE_0,
- SDM845_SLAVE_PCIE_1,
- SDM845_SLAVE_PIMEM,
- SDM845_SLAVE_QDSS_STM,
- SDM845_SLAVE_TCU
- },
+ .link_nodes = { &qhs_apss,
+ &qns_cnoc,
+ &qns_memnoc_sf,
+ &qxs_imem,
+ &qxs_pcie,
+ &qxs_pcie_gen3,
+ &qxs_pimem,
+ &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gladiator_sodv = {
.name = "qnm_gladiator_sodv",
- .id = SDM845_MASTER_GNOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 8,
- .links = { SDM845_SLAVE_APPSS,
- SDM845_SLAVE_SNOC_CNOC,
- SDM845_SLAVE_IMEM,
- SDM845_SLAVE_PCIE_0,
- SDM845_SLAVE_PCIE_1,
- SDM845_SLAVE_PIMEM,
- SDM845_SLAVE_QDSS_STM,
- SDM845_SLAVE_TCU
- },
+ .link_nodes = { &qhs_apss,
+ &qns_cnoc,
+ &qxs_imem,
+ &qxs_pcie,
+ &qxs_pcie_gen3,
+ &qxs_pimem,
+ &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_memnoc = {
.name = "qnm_memnoc",
- .id = SDM845_MASTER_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 5,
- .links = { SDM845_SLAVE_APPSS,
- SDM845_SLAVE_SNOC_CNOC,
- SDM845_SLAVE_IMEM,
- SDM845_SLAVE_PIMEM,
- SDM845_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qhs_apss,
+ &qns_cnoc,
+ &qxs_imem,
+ &qxs_pimem,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_pcie_anoc = {
.name = "qnm_pcie_anoc",
- .id = SDM845_MASTER_ANOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 5,
- .links = { SDM845_SLAVE_APPSS,
- SDM845_SLAVE_SNOC_CNOC,
- SDM845_SLAVE_SNOC_MEM_NOC_SF,
- SDM845_SLAVE_IMEM,
- SDM845_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qhs_apss,
+ &qns_cnoc,
+ &qns_memnoc_sf,
+ &qxs_imem,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SDM845_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SDM845_SLAVE_SNOC_MEM_NOC_GC,
- SDM845_SLAVE_IMEM
- },
+ .link_nodes = { &qns_memnoc_gc,
+ &qxs_imem },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SDM845_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SDM845_SLAVE_SNOC_MEM_NOC_GC,
- SDM845_SLAVE_IMEM
- },
+ .link_nodes = { &qns_memnoc_gc,
+ &qxs_imem },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SDM845_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDM845_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SDM845_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 1,
- .links = { 0 },
};
static struct qcom_icc_node qns_pcie_a1noc_snoc = {
.name = "qns_pcie_a1noc_snoc",
- .id = SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDM845_MASTER_ANOC_PCIE_SNOC },
+ .link_nodes = { &qnm_pcie_anoc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SDM845_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDM845_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qns_pcie_snoc = {
.name = "qns_pcie_snoc",
- .id = SDM845_SLAVE_ANOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDM845_MASTER_ANOC_PCIE_SNOC },
+ .link_nodes = { &qnm_pcie_anoc },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SDM845_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
- .id = SDM845_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
- .id = SDM845_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_MASTER_A1NOC_CFG },
+ .link_nodes = { &qhm_a1noc_cfg },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
- .id = SDM845_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_MASTER_A2NOC_CFG },
+ .link_nodes = { &qhm_a2noc_cfg },
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
- .id = SDM845_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SDM845_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SDM845_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SDM845_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp_cfg = {
.name = "qhs_compute_dsp_cfg",
- .id = SDM845_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SDM845_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SDM845_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
- .id = SDM845_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qhm_cnoc },
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SDM845_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SDM845_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
- .id = SDM845_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SDM845_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SDM845_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SDM845_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
- .id = SDM845_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qhm_mnoc_cfg },
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SDM845_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_gen3_cfg = {
.name = "qhs_pcie_gen3_cfg",
- .id = SDM845_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SDM845_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_phy_refgen_south = {
.name = "qhs_phy_refgen_south",
- .id = SDM845_SLAVE_SOUTH_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SDM845_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SDM845_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SDM845_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_north = {
.name = "qhs_qupv3_north",
- .id = SDM845_SLAVE_BLSP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_south = {
.name = "qhs_qupv3_south",
- .id = SDM845_SLAVE_BLSP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SDM845_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SDM845_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SDM845_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_spdm = {
.name = "qhs_spdm",
- .id = SDM845_SLAVE_SPDM_WRAPPER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
- .id = SDM845_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SDM845_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_north = {
.name = "qhs_tlmm_north",
- .id = SDM845_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_south = {
.name = "qhs_tlmm_south",
- .id = SDM845_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
- .id = SDM845_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
- .id = SDM845_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SDM845_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SDM845_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
- .id = SDM845_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SDM845_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SDM845_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
- .id = SDM845_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_MASTER_CNOC_A2NOC },
+ .link_nodes = { &qnm_cnoc },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SDM845_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SDM845_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_memnoc = {
.name = "qhs_memnoc",
- .id = SDM845_SLAVE_MEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDM845_MASTER_MEM_NOC_CFG },
+ .link_nodes = { &qhm_memnoc_cfg },
};
static struct qcom_icc_node qns_gladiator_sodv = {
.name = "qns_gladiator_sodv",
- .id = SDM845_SLAVE_GNOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_MASTER_GNOC_SNOC },
+ .link_nodes = { &qnm_gladiator_sodv },
};
static struct qcom_icc_node qns_gnoc_memnoc = {
.name = "qns_gnoc_memnoc",
- .id = SDM845_SLAVE_GNOC_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_MASTER_GNOC_MEM_NOC },
+ .link_nodes = { &qnm_apps },
};
static struct qcom_icc_node srvc_gnoc = {
.name = "srvc_gnoc",
- .id = SDM845_SLAVE_SERVICE_GNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SDM845_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = SDM845_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_apps_io = {
.name = "qns_apps_io",
- .id = SDM845_SLAVE_MEM_NOC_GNOC,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SDM845_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SDM845_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_memnoc_snoc = {
.name = "qns_memnoc_snoc",
- .id = SDM845_SLAVE_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_MASTER_MEM_NOC_SNOC },
+ .link_nodes = { &qnm_memnoc },
};
static struct qcom_icc_node srvc_memnoc = {
.name = "srvc_memnoc",
- .id = SDM845_SLAVE_SERVICE_MEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns2_mem_noc = {
.name = "qns2_mem_noc",
- .id = SDM845_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SDM845_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SDM845_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SDM845_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SDM845_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
- .id = SDM845_SLAVE_SNOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_MASTER_SNOC_CNOC },
+ .link_nodes = { &qnm_snoc },
};
static struct qcom_icc_node qns_memnoc_gc = {
.name = "qns_memnoc_gc",
- .id = SDM845_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDM845_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_memnoc_sf = {
.name = "qns_memnoc_sf",
- .id = SDM845_SLAVE_SNOC_MEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDM845_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SDM845_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pcie = {
.name = "qxs_pcie",
- .id = SDM845_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pcie_gen3 = {
.name = "qxs_pcie_gen3",
- .id = SDM845_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SDM845_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SDM845_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SDM845_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SDM845_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
diff --git a/drivers/interconnect/qcom/sdm845.h b/drivers/interconnect/qcom/sdm845.h
deleted file mode 100644
index bc7e425ce985..000000000000
--- a/drivers/interconnect/qcom/sdm845.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SDM845_H__
-#define __DRIVERS_INTERCONNECT_QCOM_SDM845_H__
-
-#define SDM845_MASTER_A1NOC_CFG 1
-#define SDM845_MASTER_BLSP_1 2
-#define SDM845_MASTER_TSIF 3
-#define SDM845_MASTER_SDCC_2 4
-#define SDM845_MASTER_SDCC_4 5
-#define SDM845_MASTER_UFS_CARD 6
-#define SDM845_MASTER_UFS_MEM 7
-#define SDM845_MASTER_PCIE_0 8
-#define SDM845_MASTER_A2NOC_CFG 9
-#define SDM845_MASTER_QDSS_BAM 10
-#define SDM845_MASTER_BLSP_2 11
-#define SDM845_MASTER_CNOC_A2NOC 12
-#define SDM845_MASTER_CRYPTO 13
-#define SDM845_MASTER_IPA 14
-#define SDM845_MASTER_PCIE_1 15
-#define SDM845_MASTER_QDSS_ETR 16
-#define SDM845_MASTER_USB3_0 17
-#define SDM845_MASTER_USB3_1 18
-#define SDM845_MASTER_CAMNOC_HF0_UNCOMP 19
-#define SDM845_MASTER_CAMNOC_HF1_UNCOMP 20
-#define SDM845_MASTER_CAMNOC_SF_UNCOMP 21
-#define SDM845_MASTER_SPDM 22
-#define SDM845_MASTER_TIC 23
-#define SDM845_MASTER_SNOC_CNOC 24
-#define SDM845_MASTER_QDSS_DAP 25
-#define SDM845_MASTER_CNOC_DC_NOC 26
-#define SDM845_MASTER_APPSS_PROC 27
-#define SDM845_MASTER_GNOC_CFG 28
-#define SDM845_MASTER_LLCC 29
-#define SDM845_MASTER_TCU_0 30
-#define SDM845_MASTER_MEM_NOC_CFG 31
-#define SDM845_MASTER_GNOC_MEM_NOC 32
-#define SDM845_MASTER_MNOC_HF_MEM_NOC 33
-#define SDM845_MASTER_MNOC_SF_MEM_NOC 34
-#define SDM845_MASTER_SNOC_GC_MEM_NOC 35
-#define SDM845_MASTER_SNOC_SF_MEM_NOC 36
-#define SDM845_MASTER_GFX3D 37
-#define SDM845_MASTER_CNOC_MNOC_CFG 38
-#define SDM845_MASTER_CAMNOC_HF0 39
-#define SDM845_MASTER_CAMNOC_HF1 40
-#define SDM845_MASTER_CAMNOC_SF 41
-#define SDM845_MASTER_MDP0 42
-#define SDM845_MASTER_MDP1 43
-#define SDM845_MASTER_ROTATOR 44
-#define SDM845_MASTER_VIDEO_P0 45
-#define SDM845_MASTER_VIDEO_P1 46
-#define SDM845_MASTER_VIDEO_PROC 47
-#define SDM845_MASTER_SNOC_CFG 48
-#define SDM845_MASTER_A1NOC_SNOC 49
-#define SDM845_MASTER_A2NOC_SNOC 50
-#define SDM845_MASTER_GNOC_SNOC 51
-#define SDM845_MASTER_MEM_NOC_SNOC 52
-#define SDM845_MASTER_ANOC_PCIE_SNOC 53
-#define SDM845_MASTER_PIMEM 54
-#define SDM845_MASTER_GIC 55
-#define SDM845_SLAVE_A1NOC_SNOC 56
-#define SDM845_SLAVE_SERVICE_A1NOC 57
-#define SDM845_SLAVE_ANOC_PCIE_A1NOC_SNOC 58
-#define SDM845_SLAVE_A2NOC_SNOC 59
-#define SDM845_SLAVE_ANOC_PCIE_SNOC 60
-#define SDM845_SLAVE_SERVICE_A2NOC 61
-#define SDM845_SLAVE_CAMNOC_UNCOMP 62
-#define SDM845_SLAVE_A1NOC_CFG 63
-#define SDM845_SLAVE_A2NOC_CFG 64
-#define SDM845_SLAVE_AOP 65
-#define SDM845_SLAVE_AOSS 66
-#define SDM845_SLAVE_CAMERA_CFG 67
-#define SDM845_SLAVE_CLK_CTL 68
-#define SDM845_SLAVE_CDSP_CFG 69
-#define SDM845_SLAVE_RBCPR_CX_CFG 70
-#define SDM845_SLAVE_CRYPTO_0_CFG 71
-#define SDM845_SLAVE_DCC_CFG 72
-#define SDM845_SLAVE_CNOC_DDRSS 73
-#define SDM845_SLAVE_DISPLAY_CFG 74
-#define SDM845_SLAVE_GLM 75
-#define SDM845_SLAVE_GFX3D_CFG 76
-#define SDM845_SLAVE_IMEM_CFG 77
-#define SDM845_SLAVE_IPA_CFG 78
-#define SDM845_SLAVE_CNOC_MNOC_CFG 79
-#define SDM845_SLAVE_PCIE_0_CFG 80
-#define SDM845_SLAVE_PCIE_1_CFG 81
-#define SDM845_SLAVE_PDM 82
-#define SDM845_SLAVE_SOUTH_PHY_CFG 83
-#define SDM845_SLAVE_PIMEM_CFG 84
-#define SDM845_SLAVE_PRNG 85
-#define SDM845_SLAVE_QDSS_CFG 86
-#define SDM845_SLAVE_BLSP_2 87
-#define SDM845_SLAVE_BLSP_1 88
-#define SDM845_SLAVE_SDCC_2 89
-#define SDM845_SLAVE_SDCC_4 90
-#define SDM845_SLAVE_SNOC_CFG 91
-#define SDM845_SLAVE_SPDM_WRAPPER 92
-#define SDM845_SLAVE_SPSS_CFG 93
-#define SDM845_SLAVE_TCSR 94
-#define SDM845_SLAVE_TLMM_NORTH 95
-#define SDM845_SLAVE_TLMM_SOUTH 96
-#define SDM845_SLAVE_TSIF 97
-#define SDM845_SLAVE_UFS_CARD_CFG 98
-#define SDM845_SLAVE_UFS_MEM_CFG 99
-#define SDM845_SLAVE_USB3_0 100
-#define SDM845_SLAVE_USB3_1 101
-#define SDM845_SLAVE_VENUS_CFG 102
-#define SDM845_SLAVE_VSENSE_CTRL_CFG 103
-#define SDM845_SLAVE_CNOC_A2NOC 104
-#define SDM845_SLAVE_SERVICE_CNOC 105
-#define SDM845_SLAVE_LLCC_CFG 106
-#define SDM845_SLAVE_MEM_NOC_CFG 107
-#define SDM845_SLAVE_GNOC_SNOC 108
-#define SDM845_SLAVE_GNOC_MEM_NOC 109
-#define SDM845_SLAVE_SERVICE_GNOC 110
-#define SDM845_SLAVE_EBI1 111
-#define SDM845_SLAVE_MSS_PROC_MS_MPU_CFG 112
-#define SDM845_SLAVE_MEM_NOC_GNOC 113
-#define SDM845_SLAVE_LLCC 114
-#define SDM845_SLAVE_MEM_NOC_SNOC 115
-#define SDM845_SLAVE_SERVICE_MEM_NOC 116
-#define SDM845_SLAVE_MNOC_SF_MEM_NOC 117
-#define SDM845_SLAVE_MNOC_HF_MEM_NOC 118
-#define SDM845_SLAVE_SERVICE_MNOC 119
-#define SDM845_SLAVE_APPSS 120
-#define SDM845_SLAVE_SNOC_CNOC 121
-#define SDM845_SLAVE_SNOC_MEM_NOC_GC 122
-#define SDM845_SLAVE_SNOC_MEM_NOC_SF 123
-#define SDM845_SLAVE_IMEM 124
-#define SDM845_SLAVE_PCIE_0 125
-#define SDM845_SLAVE_PCIE_1 126
-#define SDM845_SLAVE_PIMEM 127
-#define SDM845_SLAVE_SERVICE_SNOC 128
-#define SDM845_SLAVE_QDSS_STM 129
-#define SDM845_SLAVE_TCU 130
-
-#endif /* __DRIVERS_INTERCONNECT_QCOM_SDM845_H__ */
diff --git a/drivers/interconnect/qcom/sdx55.c b/drivers/interconnect/qcom/sdx55.c
index 4117db046fa0..75ced1286919 100644
--- a/drivers/interconnect/qcom/sdx55.c
+++ b/drivers/interconnect/qcom/sdx55.c
@@ -17,628 +17,617 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sdx55.h"
+
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node acm_tcu;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node xm_apps_rdwr;
+static struct qcom_icc_node qhm_audio;
+static struct qcom_icc_node qhm_blsp1;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qpic;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qhm_spmi_fetcher1;
+static struct qcom_icc_node qnm_aggre_noc;
+static struct qcom_icc_node qnm_ipa;
+static struct qcom_icc_node qnm_memnoc;
+static struct qcom_icc_node qnm_memnoc_pcie;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node xm_emac;
+static struct qcom_icc_node xm_ipa2pcie_slv;
+static struct qcom_icc_node xm_pcie;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_sdc1;
+static struct qcom_icc_node xm_usb3;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_memnoc_snoc;
+static struct qcom_icc_node qns_sys_pcie;
+static struct qcom_icc_node qhs_aop;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qhs_audio;
+static struct qcom_icc_node qhs_blsp1;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_ecc_cfg;
+static struct qcom_icc_node qhs_emac_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_pcie_parf;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qpic;
+static struct qcom_icc_node qhs_sdc1;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_spmi_fetcher;
+static struct qcom_icc_node qhs_spmi_vgi_coex;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_usb3;
+static struct qcom_icc_node qhs_usb3_phy;
+static struct qcom_icc_node qns_aggre_noc;
+static struct qcom_icc_node qns_snoc_memnoc;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_pcie;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SDX55_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SDX55_SLAVE_EBI_CH0 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node acm_tcu = {
.name = "acm_tcu",
- .id = SDX55_MASTER_TCU_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
- .links = { SDX55_SLAVE_LLCC,
- SDX55_SLAVE_MEM_NOC_SNOC,
- SDX55_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_memnoc_snoc,
+ &qns_sys_pcie },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SDX55_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node xm_apps_rdwr = {
.name = "xm_apps_rdwr",
- .id = SDX55_MASTER_AMPSS_M0,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SDX55_SLAVE_LLCC,
- SDX55_SLAVE_MEM_NOC_SNOC,
- SDX55_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_memnoc_snoc,
+ &qns_sys_pcie },
};
static struct qcom_icc_node qhm_audio = {
.name = "qhm_audio",
- .id = SDX55_MASTER_AUDIO,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX55_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node qhm_blsp1 = {
.name = "qhm_blsp1",
- .id = SDX55_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX55_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SDX55_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 28,
- .links = { SDX55_SLAVE_SNOC_CFG,
- SDX55_SLAVE_EMAC_CFG,
- SDX55_SLAVE_USB3,
- SDX55_SLAVE_TLMM,
- SDX55_SLAVE_SPMI_FETCHER,
- SDX55_SLAVE_QDSS_CFG,
- SDX55_SLAVE_PDM,
- SDX55_SLAVE_SNOC_MEM_NOC_GC,
- SDX55_SLAVE_TCSR,
- SDX55_SLAVE_CNOC_DDRSS,
- SDX55_SLAVE_SPMI_VGI_COEX,
- SDX55_SLAVE_QPIC,
- SDX55_SLAVE_OCIMEM,
- SDX55_SLAVE_IPA_CFG,
- SDX55_SLAVE_USB3_PHY_CFG,
- SDX55_SLAVE_AOP,
- SDX55_SLAVE_BLSP_1,
- SDX55_SLAVE_SDCC_1,
- SDX55_SLAVE_CNOC_MSS,
- SDX55_SLAVE_PCIE_PARF,
- SDX55_SLAVE_ECC_CFG,
- SDX55_SLAVE_AUDIO,
- SDX55_SLAVE_AOSS,
- SDX55_SLAVE_PRNG,
- SDX55_SLAVE_CRYPTO_0_CFG,
- SDX55_SLAVE_TCU,
- SDX55_SLAVE_CLK_CTL,
- SDX55_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_snoc_cfg,
+ &qhs_emac_cfg,
+ &qhs_usb3,
+ &qhs_tlmm,
+ &qhs_spmi_fetcher,
+ &qhs_qdss_cfg,
+ &qhs_pdm,
+ &qns_snoc_memnoc,
+ &qhs_tcsr,
+ &qhs_ddrss_cfg,
+ &qhs_spmi_vgi_coex,
+ &qhs_qpic,
+ &qxs_imem,
+ &qhs_ipa,
+ &qhs_usb3_phy,
+ &qhs_aop,
+ &qhs_blsp1,
+ &qhs_sdc1,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_ecc_cfg,
+ &qhs_audio,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_crypto0_cfg,
+ &xs_sys_tcu_cfg,
+ &qhs_clk_ctl,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node qhm_qpic = {
.name = "qhm_qpic",
- .id = SDX55_MASTER_QPIC,
.channels = 1,
.buswidth = 4,
.num_links = 5,
- .links = { SDX55_SLAVE_AOSS,
- SDX55_SLAVE_IPA_CFG,
- SDX55_SLAVE_ANOC_SNOC,
- SDX55_SLAVE_AOP,
- SDX55_SLAVE_AUDIO
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_ipa,
+ &qns_aggre_noc,
+ &qhs_aop,
+ &qhs_audio },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SDX55_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX55_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qhm_spmi_fetcher1 = {
.name = "qhm_spmi_fetcher1",
- .id = SDX55_MASTER_SPMI_FETCHER,
.channels = 1,
.buswidth = 4,
.num_links = 3,
- .links = { SDX55_SLAVE_AOSS,
- SDX55_SLAVE_ANOC_SNOC,
- SDX55_SLAVE_AOP
- },
+ .link_nodes = { &qhs_aoss,
+ &qns_aggre_noc,
+ &qhs_aop },
};
static struct qcom_icc_node qnm_aggre_noc = {
.name = "qnm_aggre_noc",
- .id = SDX55_MASTER_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 30,
- .links = { SDX55_SLAVE_PCIE_0,
- SDX55_SLAVE_SNOC_CFG,
- SDX55_SLAVE_SDCC_1,
- SDX55_SLAVE_TLMM,
- SDX55_SLAVE_SPMI_FETCHER,
- SDX55_SLAVE_QDSS_CFG,
- SDX55_SLAVE_PDM,
- SDX55_SLAVE_SNOC_MEM_NOC_GC,
- SDX55_SLAVE_TCSR,
- SDX55_SLAVE_CNOC_DDRSS,
- SDX55_SLAVE_SPMI_VGI_COEX,
- SDX55_SLAVE_QDSS_STM,
- SDX55_SLAVE_QPIC,
- SDX55_SLAVE_OCIMEM,
- SDX55_SLAVE_IPA_CFG,
- SDX55_SLAVE_USB3_PHY_CFG,
- SDX55_SLAVE_AOP,
- SDX55_SLAVE_BLSP_1,
- SDX55_SLAVE_USB3,
- SDX55_SLAVE_CNOC_MSS,
- SDX55_SLAVE_PCIE_PARF,
- SDX55_SLAVE_ECC_CFG,
- SDX55_SLAVE_APPSS,
- SDX55_SLAVE_AUDIO,
- SDX55_SLAVE_AOSS,
- SDX55_SLAVE_PRNG,
- SDX55_SLAVE_CRYPTO_0_CFG,
- SDX55_SLAVE_TCU,
- SDX55_SLAVE_CLK_CTL,
- SDX55_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &xs_pcie,
+ &qhs_snoc_cfg,
+ &qhs_sdc1,
+ &qhs_tlmm,
+ &qhs_spmi_fetcher,
+ &qhs_qdss_cfg,
+ &qhs_pdm,
+ &qns_snoc_memnoc,
+ &qhs_tcsr,
+ &qhs_ddrss_cfg,
+ &qhs_spmi_vgi_coex,
+ &xs_qdss_stm,
+ &qhs_qpic,
+ &qxs_imem,
+ &qhs_ipa,
+ &qhs_usb3_phy,
+ &qhs_aop,
+ &qhs_blsp1,
+ &qhs_usb3,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_ecc_cfg,
+ &qhs_apss,
+ &qhs_audio,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_crypto0_cfg,
+ &xs_sys_tcu_cfg,
+ &qhs_clk_ctl,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node qnm_ipa = {
.name = "qnm_ipa",
- .id = SDX55_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 27,
- .links = { SDX55_SLAVE_SNOC_CFG,
- SDX55_SLAVE_EMAC_CFG,
- SDX55_SLAVE_USB3,
- SDX55_SLAVE_AOSS,
- SDX55_SLAVE_SPMI_FETCHER,
- SDX55_SLAVE_QDSS_CFG,
- SDX55_SLAVE_PDM,
- SDX55_SLAVE_SNOC_MEM_NOC_GC,
- SDX55_SLAVE_TCSR,
- SDX55_SLAVE_CNOC_DDRSS,
- SDX55_SLAVE_QDSS_STM,
- SDX55_SLAVE_QPIC,
- SDX55_SLAVE_OCIMEM,
- SDX55_SLAVE_IPA_CFG,
- SDX55_SLAVE_USB3_PHY_CFG,
- SDX55_SLAVE_AOP,
- SDX55_SLAVE_BLSP_1,
- SDX55_SLAVE_SDCC_1,
- SDX55_SLAVE_CNOC_MSS,
- SDX55_SLAVE_PCIE_PARF,
- SDX55_SLAVE_ECC_CFG,
- SDX55_SLAVE_AUDIO,
- SDX55_SLAVE_TLMM,
- SDX55_SLAVE_PRNG,
- SDX55_SLAVE_CRYPTO_0_CFG,
- SDX55_SLAVE_CLK_CTL,
- SDX55_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_snoc_cfg,
+ &qhs_emac_cfg,
+ &qhs_usb3,
+ &qhs_aoss,
+ &qhs_spmi_fetcher,
+ &qhs_qdss_cfg,
+ &qhs_pdm,
+ &qns_snoc_memnoc,
+ &qhs_tcsr,
+ &qhs_ddrss_cfg,
+ &xs_qdss_stm,
+ &qhs_qpic,
+ &qxs_imem,
+ &qhs_ipa,
+ &qhs_usb3_phy,
+ &qhs_aop,
+ &qhs_blsp1,
+ &qhs_sdc1,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_ecc_cfg,
+ &qhs_audio,
+ &qhs_tlmm,
+ &qhs_prng,
+ &qhs_crypto0_cfg,
+ &qhs_clk_ctl,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node qnm_memnoc = {
.name = "qnm_memnoc",
- .id = SDX55_MASTER_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 29,
- .links = { SDX55_SLAVE_SNOC_CFG,
- SDX55_SLAVE_EMAC_CFG,
- SDX55_SLAVE_USB3,
- SDX55_SLAVE_TLMM,
- SDX55_SLAVE_SPMI_FETCHER,
- SDX55_SLAVE_QDSS_CFG,
- SDX55_SLAVE_PDM,
- SDX55_SLAVE_TCSR,
- SDX55_SLAVE_CNOC_DDRSS,
- SDX55_SLAVE_SPMI_VGI_COEX,
- SDX55_SLAVE_QDSS_STM,
- SDX55_SLAVE_QPIC,
- SDX55_SLAVE_OCIMEM,
- SDX55_SLAVE_IPA_CFG,
- SDX55_SLAVE_USB3_PHY_CFG,
- SDX55_SLAVE_AOP,
- SDX55_SLAVE_BLSP_1,
- SDX55_SLAVE_SDCC_1,
- SDX55_SLAVE_CNOC_MSS,
- SDX55_SLAVE_PCIE_PARF,
- SDX55_SLAVE_ECC_CFG,
- SDX55_SLAVE_APPSS,
- SDX55_SLAVE_AUDIO,
- SDX55_SLAVE_AOSS,
- SDX55_SLAVE_PRNG,
- SDX55_SLAVE_CRYPTO_0_CFG,
- SDX55_SLAVE_TCU,
- SDX55_SLAVE_CLK_CTL,
- SDX55_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_snoc_cfg,
+ &qhs_emac_cfg,
+ &qhs_usb3,
+ &qhs_tlmm,
+ &qhs_spmi_fetcher,
+ &qhs_qdss_cfg,
+ &qhs_pdm,
+ &qhs_tcsr,
+ &qhs_ddrss_cfg,
+ &qhs_spmi_vgi_coex,
+ &xs_qdss_stm,
+ &qhs_qpic,
+ &qxs_imem,
+ &qhs_ipa,
+ &qhs_usb3_phy,
+ &qhs_aop,
+ &qhs_blsp1,
+ &qhs_sdc1,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_ecc_cfg,
+ &qhs_apss,
+ &qhs_audio,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_crypto0_cfg,
+ &xs_sys_tcu_cfg,
+ &qhs_clk_ctl,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node qnm_memnoc_pcie = {
.name = "qnm_memnoc_pcie",
- .id = SDX55_MASTER_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_SLAVE_PCIE_0 },
+ .link_nodes = { &xs_pcie },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SDX55_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
- .links = { SDX55_SLAVE_AOSS,
- SDX55_SLAVE_ANOC_SNOC,
- SDX55_SLAVE_AOP
- },
+ .link_nodes = { &qhs_aoss,
+ &qns_aggre_noc,
+ &qhs_aop },
};
static struct qcom_icc_node xm_emac = {
.name = "xm_emac",
- .id = SDX55_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node xm_ipa2pcie_slv = {
.name = "xm_ipa2pcie_slv",
- .id = SDX55_MASTER_IPA_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_SLAVE_PCIE_0 },
+ .link_nodes = { &xs_pcie },
};
static struct qcom_icc_node xm_pcie = {
.name = "xm_pcie",
- .id = SDX55_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SDX55_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 28,
- .links = { SDX55_SLAVE_SNOC_CFG,
- SDX55_SLAVE_EMAC_CFG,
- SDX55_SLAVE_USB3,
- SDX55_SLAVE_AOSS,
- SDX55_SLAVE_SPMI_FETCHER,
- SDX55_SLAVE_QDSS_CFG,
- SDX55_SLAVE_PDM,
- SDX55_SLAVE_SNOC_MEM_NOC_GC,
- SDX55_SLAVE_TCSR,
- SDX55_SLAVE_CNOC_DDRSS,
- SDX55_SLAVE_SPMI_VGI_COEX,
- SDX55_SLAVE_QPIC,
- SDX55_SLAVE_OCIMEM,
- SDX55_SLAVE_IPA_CFG,
- SDX55_SLAVE_USB3_PHY_CFG,
- SDX55_SLAVE_AOP,
- SDX55_SLAVE_BLSP_1,
- SDX55_SLAVE_SDCC_1,
- SDX55_SLAVE_CNOC_MSS,
- SDX55_SLAVE_PCIE_PARF,
- SDX55_SLAVE_ECC_CFG,
- SDX55_SLAVE_AUDIO,
- SDX55_SLAVE_AOSS,
- SDX55_SLAVE_PRNG,
- SDX55_SLAVE_CRYPTO_0_CFG,
- SDX55_SLAVE_TCU,
- SDX55_SLAVE_CLK_CTL,
- SDX55_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_snoc_cfg,
+ &qhs_emac_cfg,
+ &qhs_usb3,
+ &qhs_aoss,
+ &qhs_spmi_fetcher,
+ &qhs_qdss_cfg,
+ &qhs_pdm,
+ &qns_snoc_memnoc,
+ &qhs_tcsr,
+ &qhs_ddrss_cfg,
+ &qhs_spmi_vgi_coex,
+ &qhs_qpic,
+ &qxs_imem,
+ &qhs_ipa,
+ &qhs_usb3_phy,
+ &qhs_aop,
+ &qhs_blsp1,
+ &qhs_sdc1,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_ecc_cfg,
+ &qhs_audio,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_crypto0_cfg,
+ &xs_sys_tcu_cfg,
+ &qhs_clk_ctl,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
- .id = SDX55_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 5,
- .links = { SDX55_SLAVE_AOSS,
- SDX55_SLAVE_IPA_CFG,
- SDX55_SLAVE_ANOC_SNOC,
- SDX55_SLAVE_AOP,
- SDX55_SLAVE_AUDIO
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_ipa,
+ &qns_aggre_noc,
+ &qhs_aop,
+ &qhs_audio },
};
static struct qcom_icc_node xm_usb3 = {
.name = "xm_usb3",
- .id = SDX55_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SDX55_SLAVE_EBI_CH0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SDX55_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDX55_SLAVE_EBI_CH0 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qns_memnoc_snoc = {
.name = "qns_memnoc_snoc",
- .id = SDX55_SLAVE_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_MASTER_MEM_NOC_SNOC },
+ .link_nodes = { &qnm_memnoc },
};
static struct qcom_icc_node qns_sys_pcie = {
.name = "qns_sys_pcie",
- .id = SDX55_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_MASTER_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_memnoc_pcie },
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
- .id = SDX55_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SDX55_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SDX55_SLAVE_APPSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_audio = {
.name = "qhs_audio",
- .id = SDX55_SLAVE_AUDIO,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_blsp1 = {
.name = "qhs_blsp1",
- .id = SDX55_SLAVE_BLSP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SDX55_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SDX55_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SDX55_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ecc_cfg = {
.name = "qhs_ecc_cfg",
- .id = SDX55_SLAVE_ECC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac_cfg = {
.name = "qhs_emac_cfg",
- .id = SDX55_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SDX55_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SDX55_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SDX55_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_parf = {
.name = "qhs_pcie_parf",
- .id = SDX55_SLAVE_PCIE_PARF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SDX55_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SDX55_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SDX55_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qpic = {
.name = "qhs_qpic",
- .id = SDX55_SLAVE_QPIC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
- .id = SDX55_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SDX55_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX55_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_spmi_fetcher = {
.name = "qhs_spmi_fetcher",
- .id = SDX55_SLAVE_SPMI_FETCHER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spmi_vgi_coex = {
.name = "qhs_spmi_vgi_coex",
- .id = SDX55_SLAVE_SPMI_VGI_COEX,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SDX55_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SDX55_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
- .id = SDX55_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_phy = {
.name = "qhs_usb3_phy",
- .id = SDX55_SLAVE_USB3_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_aggre_noc = {
.name = "qns_aggre_noc",
- .id = SDX55_SLAVE_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_MASTER_ANOC_SNOC },
+ .link_nodes = { &qnm_aggre_noc },
};
static struct qcom_icc_node qns_snoc_memnoc = {
.name = "qns_snoc_memnoc",
- .id = SDX55_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX55_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SDX55_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SDX55_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie = {
.name = "xs_pcie",
- .id = SDX55_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SDX55_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SDX55_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
diff --git a/drivers/interconnect/qcom/sdx55.h b/drivers/interconnect/qcom/sdx55.h
deleted file mode 100644
index 46cbabec8aa1..000000000000
--- a/drivers/interconnect/qcom/sdx55.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2021, Linaro Ltd.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX55_H
-#define __DRIVERS_INTERCONNECT_QCOM_SDX55_H
-
-/* 0 was used by MASTER_IPA_CORE, now represented as RPMh clock */
-#define SDX55_MASTER_LLCC 1
-#define SDX55_MASTER_TCU_0 2
-#define SDX55_MASTER_SNOC_GC_MEM_NOC 3
-#define SDX55_MASTER_AMPSS_M0 4
-#define SDX55_MASTER_AUDIO 5
-#define SDX55_MASTER_BLSP_1 6
-#define SDX55_MASTER_QDSS_BAM 7
-#define SDX55_MASTER_QPIC 8
-#define SDX55_MASTER_SNOC_CFG 9
-#define SDX55_MASTER_SPMI_FETCHER 10
-#define SDX55_MASTER_ANOC_SNOC 11
-#define SDX55_MASTER_IPA 12
-#define SDX55_MASTER_MEM_NOC_SNOC 13
-#define SDX55_MASTER_MEM_NOC_PCIE_SNOC 14
-#define SDX55_MASTER_CRYPTO_CORE_0 15
-#define SDX55_MASTER_EMAC 16
-#define SDX55_MASTER_IPA_PCIE 17
-#define SDX55_MASTER_PCIE 18
-#define SDX55_MASTER_QDSS_ETR 19
-#define SDX55_MASTER_SDCC_1 20
-#define SDX55_MASTER_USB3 21
-/* 22 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
-#define SDX55_SLAVE_EBI_CH0 23
-#define SDX55_SLAVE_LLCC 24
-#define SDX55_SLAVE_MEM_NOC_SNOC 25
-#define SDX55_SLAVE_MEM_NOC_PCIE_SNOC 26
-#define SDX55_SLAVE_ANOC_SNOC 27
-#define SDX55_SLAVE_SNOC_CFG 28
-#define SDX55_SLAVE_EMAC_CFG 29
-#define SDX55_SLAVE_USB3 30
-#define SDX55_SLAVE_TLMM 31
-#define SDX55_SLAVE_SPMI_FETCHER 32
-#define SDX55_SLAVE_QDSS_CFG 33
-#define SDX55_SLAVE_PDM 34
-#define SDX55_SLAVE_SNOC_MEM_NOC_GC 35
-#define SDX55_SLAVE_TCSR 36
-#define SDX55_SLAVE_CNOC_DDRSS 37
-#define SDX55_SLAVE_SPMI_VGI_COEX 38
-#define SDX55_SLAVE_QPIC 39
-#define SDX55_SLAVE_OCIMEM 40
-#define SDX55_SLAVE_IPA_CFG 41
-#define SDX55_SLAVE_USB3_PHY_CFG 42
-#define SDX55_SLAVE_AOP 43
-#define SDX55_SLAVE_BLSP_1 44
-#define SDX55_SLAVE_SDCC_1 45
-#define SDX55_SLAVE_CNOC_MSS 46
-#define SDX55_SLAVE_PCIE_PARF 47
-#define SDX55_SLAVE_ECC_CFG 48
-#define SDX55_SLAVE_AUDIO 49
-#define SDX55_SLAVE_AOSS 51
-#define SDX55_SLAVE_PRNG 52
-#define SDX55_SLAVE_CRYPTO_0_CFG 53
-#define SDX55_SLAVE_TCU 54
-#define SDX55_SLAVE_CLK_CTL 55
-#define SDX55_SLAVE_IMEM_CFG 56
-#define SDX55_SLAVE_SERVICE_SNOC 57
-#define SDX55_SLAVE_PCIE_0 58
-#define SDX55_SLAVE_QDSS_STM 59
-#define SDX55_SLAVE_APPSS 60
-
-#endif
diff --git a/drivers/interconnect/qcom/sdx65.c b/drivers/interconnect/qcom/sdx65.c
index d3a6c6c148e5..6c5b4e1ec82f 100644
--- a/drivers/interconnect/qcom/sdx65.c
+++ b/drivers/interconnect/qcom/sdx65.c
@@ -13,593 +13,582 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sdx65.h"
+
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node acm_tcu;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node xm_apps_rdwr;
+static struct qcom_icc_node qhm_audio;
+static struct qcom_icc_node qhm_blsp1;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qpic;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qhm_spmi_fetcher1;
+static struct qcom_icc_node qnm_aggre_noc;
+static struct qcom_icc_node qnm_ipa;
+static struct qcom_icc_node qnm_memnoc;
+static struct qcom_icc_node qnm_memnoc_pcie;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node xm_ipa2pcie_slv;
+static struct qcom_icc_node xm_pcie;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_sdc1;
+static struct qcom_icc_node xm_usb3;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_memnoc_snoc;
+static struct qcom_icc_node qns_sys_pcie;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qhs_audio;
+static struct qcom_icc_node qhs_blsp1;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_ecc_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_pcie_parf;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qpic;
+static struct qcom_icc_node qhs_sdc1;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_spmi_fetcher;
+static struct qcom_icc_node qhs_spmi_vgi_coex;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_usb3;
+static struct qcom_icc_node qhs_usb3_phy;
+static struct qcom_icc_node qns_aggre_noc;
+static struct qcom_icc_node qns_snoc_memnoc;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_pcie;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SDX65_MASTER_LLCC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX65_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node acm_tcu = {
.name = "acm_tcu",
- .id = SDX65_MASTER_TCU_0,
.channels = 1,
.buswidth = 8,
.num_links = 3,
- .links = { SDX65_SLAVE_LLCC,
- SDX65_SLAVE_MEM_NOC_SNOC,
- SDX65_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_memnoc_snoc,
+ &qns_sys_pcie },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SDX65_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDX65_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node xm_apps_rdwr = {
.name = "xm_apps_rdwr",
- .id = SDX65_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SDX65_SLAVE_LLCC,
- SDX65_SLAVE_MEM_NOC_SNOC,
- SDX65_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_memnoc_snoc,
+ &qns_sys_pcie },
};
static struct qcom_icc_node qhm_audio = {
.name = "qhm_audio",
- .id = SDX65_MASTER_AUDIO,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX65_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node qhm_blsp1 = {
.name = "qhm_blsp1",
- .id = SDX65_MASTER_BLSP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX65_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SDX65_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 26,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_AUDIO,
- SDX65_SLAVE_BLSP_1,
- SDX65_SLAVE_CLK_CTL,
- SDX65_SLAVE_CRYPTO_0_CFG,
- SDX65_SLAVE_CNOC_DDRSS,
- SDX65_SLAVE_ECC_CFG,
- SDX65_SLAVE_IMEM_CFG,
- SDX65_SLAVE_IPA_CFG,
- SDX65_SLAVE_CNOC_MSS,
- SDX65_SLAVE_PCIE_PARF,
- SDX65_SLAVE_PDM,
- SDX65_SLAVE_PRNG,
- SDX65_SLAVE_QDSS_CFG,
- SDX65_SLAVE_QPIC,
- SDX65_SLAVE_SDCC_1,
- SDX65_SLAVE_SNOC_CFG,
- SDX65_SLAVE_SPMI_FETCHER,
- SDX65_SLAVE_SPMI_VGI_COEX,
- SDX65_SLAVE_TCSR,
- SDX65_SLAVE_TLMM,
- SDX65_SLAVE_USB3,
- SDX65_SLAVE_USB3_PHY_CFG,
- SDX65_SLAVE_SNOC_MEM_NOC_GC,
- SDX65_SLAVE_IMEM,
- SDX65_SLAVE_TCU
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_audio,
+ &qhs_blsp1,
+ &qhs_clk_ctl,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ecc_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_pdm,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qpic,
+ &qhs_sdc1,
+ &qhs_snoc_cfg,
+ &qhs_spmi_fetcher,
+ &qhs_spmi_vgi_coex,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_usb3,
+ &qhs_usb3_phy,
+ &qns_snoc_memnoc,
+ &qxs_imem,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qhm_qpic = {
.name = "qhm_qpic",
- .id = SDX65_MASTER_QPIC,
.channels = 1,
.buswidth = 4,
.num_links = 4,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_AUDIO,
- SDX65_SLAVE_IPA_CFG,
- SDX65_SLAVE_ANOC_SNOC
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_audio,
+ &qhs_ipa,
+ &qns_aggre_noc },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SDX65_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX65_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qhm_spmi_fetcher1 = {
.name = "qhm_spmi_fetcher1",
- .id = SDX65_MASTER_SPMI_FETCHER,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_ANOC_SNOC
- },
+ .link_nodes = { &qhs_aoss,
+ &qns_aggre_noc },
};
static struct qcom_icc_node qnm_aggre_noc = {
.name = "qnm_aggre_noc",
- .id = SDX65_MASTER_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 29,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_APPSS,
- SDX65_SLAVE_AUDIO,
- SDX65_SLAVE_BLSP_1,
- SDX65_SLAVE_CLK_CTL,
- SDX65_SLAVE_CRYPTO_0_CFG,
- SDX65_SLAVE_CNOC_DDRSS,
- SDX65_SLAVE_ECC_CFG,
- SDX65_SLAVE_IMEM_CFG,
- SDX65_SLAVE_IPA_CFG,
- SDX65_SLAVE_CNOC_MSS,
- SDX65_SLAVE_PCIE_PARF,
- SDX65_SLAVE_PDM,
- SDX65_SLAVE_PRNG,
- SDX65_SLAVE_QDSS_CFG,
- SDX65_SLAVE_QPIC,
- SDX65_SLAVE_SDCC_1,
- SDX65_SLAVE_SNOC_CFG,
- SDX65_SLAVE_SPMI_FETCHER,
- SDX65_SLAVE_SPMI_VGI_COEX,
- SDX65_SLAVE_TCSR,
- SDX65_SLAVE_TLMM,
- SDX65_SLAVE_USB3,
- SDX65_SLAVE_USB3_PHY_CFG,
- SDX65_SLAVE_SNOC_MEM_NOC_GC,
- SDX65_SLAVE_IMEM,
- SDX65_SLAVE_PCIE_0,
- SDX65_SLAVE_QDSS_STM,
- SDX65_SLAVE_TCU
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_apss,
+ &qhs_audio,
+ &qhs_blsp1,
+ &qhs_clk_ctl,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ecc_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_pdm,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qpic,
+ &qhs_sdc1,
+ &qhs_snoc_cfg,
+ &qhs_spmi_fetcher,
+ &qhs_spmi_vgi_coex,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_usb3,
+ &qhs_usb3_phy,
+ &qns_snoc_memnoc,
+ &qxs_imem,
+ &xs_pcie,
+ &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_ipa = {
.name = "qnm_ipa",
- .id = SDX65_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 26,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_AUDIO,
- SDX65_SLAVE_BLSP_1,
- SDX65_SLAVE_CLK_CTL,
- SDX65_SLAVE_CRYPTO_0_CFG,
- SDX65_SLAVE_CNOC_DDRSS,
- SDX65_SLAVE_ECC_CFG,
- SDX65_SLAVE_IMEM_CFG,
- SDX65_SLAVE_IPA_CFG,
- SDX65_SLAVE_CNOC_MSS,
- SDX65_SLAVE_PCIE_PARF,
- SDX65_SLAVE_PDM,
- SDX65_SLAVE_PRNG,
- SDX65_SLAVE_QDSS_CFG,
- SDX65_SLAVE_QPIC,
- SDX65_SLAVE_SDCC_1,
- SDX65_SLAVE_SNOC_CFG,
- SDX65_SLAVE_SPMI_FETCHER,
- SDX65_SLAVE_TCSR,
- SDX65_SLAVE_TLMM,
- SDX65_SLAVE_USB3,
- SDX65_SLAVE_USB3_PHY_CFG,
- SDX65_SLAVE_SNOC_MEM_NOC_GC,
- SDX65_SLAVE_IMEM,
- SDX65_SLAVE_PCIE_0,
- SDX65_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_audio,
+ &qhs_blsp1,
+ &qhs_clk_ctl,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ecc_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_pdm,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qpic,
+ &qhs_sdc1,
+ &qhs_snoc_cfg,
+ &qhs_spmi_fetcher,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_usb3,
+ &qhs_usb3_phy,
+ &qns_snoc_memnoc,
+ &qxs_imem,
+ &xs_pcie,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_memnoc = {
.name = "qnm_memnoc",
- .id = SDX65_MASTER_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 27,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_APPSS,
- SDX65_SLAVE_AUDIO,
- SDX65_SLAVE_BLSP_1,
- SDX65_SLAVE_CLK_CTL,
- SDX65_SLAVE_CRYPTO_0_CFG,
- SDX65_SLAVE_CNOC_DDRSS,
- SDX65_SLAVE_ECC_CFG,
- SDX65_SLAVE_IMEM_CFG,
- SDX65_SLAVE_IPA_CFG,
- SDX65_SLAVE_CNOC_MSS,
- SDX65_SLAVE_PCIE_PARF,
- SDX65_SLAVE_PDM,
- SDX65_SLAVE_PRNG,
- SDX65_SLAVE_QDSS_CFG,
- SDX65_SLAVE_QPIC,
- SDX65_SLAVE_SDCC_1,
- SDX65_SLAVE_SNOC_CFG,
- SDX65_SLAVE_SPMI_FETCHER,
- SDX65_SLAVE_SPMI_VGI_COEX,
- SDX65_SLAVE_TCSR,
- SDX65_SLAVE_TLMM,
- SDX65_SLAVE_USB3,
- SDX65_SLAVE_USB3_PHY_CFG,
- SDX65_SLAVE_IMEM,
- SDX65_SLAVE_QDSS_STM,
- SDX65_SLAVE_TCU
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_apss,
+ &qhs_audio,
+ &qhs_blsp1,
+ &qhs_clk_ctl,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ecc_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_pdm,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qpic,
+ &qhs_sdc1,
+ &qhs_snoc_cfg,
+ &qhs_spmi_fetcher,
+ &qhs_spmi_vgi_coex,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_usb3,
+ &qhs_usb3_phy,
+ &qxs_imem,
+ &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_memnoc_pcie = {
.name = "qnm_memnoc_pcie",
- .id = SDX65_MASTER_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX65_SLAVE_PCIE_0 },
+ .link_nodes = { &xs_pcie },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SDX65_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_ANOC_SNOC
- },
+ .link_nodes = { &qhs_aoss,
+ &qns_aggre_noc },
};
static struct qcom_icc_node xm_ipa2pcie_slv = {
.name = "xm_ipa2pcie_slv",
- .id = SDX65_MASTER_IPA_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX65_SLAVE_PCIE_0 },
+ .link_nodes = { &xs_pcie },
};
static struct qcom_icc_node xm_pcie = {
.name = "xm_pcie",
- .id = SDX65_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX65_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SDX65_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 26,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_AUDIO,
- SDX65_SLAVE_BLSP_1,
- SDX65_SLAVE_CLK_CTL,
- SDX65_SLAVE_CRYPTO_0_CFG,
- SDX65_SLAVE_CNOC_DDRSS,
- SDX65_SLAVE_ECC_CFG,
- SDX65_SLAVE_IMEM_CFG,
- SDX65_SLAVE_IPA_CFG,
- SDX65_SLAVE_CNOC_MSS,
- SDX65_SLAVE_PCIE_PARF,
- SDX65_SLAVE_PDM,
- SDX65_SLAVE_PRNG,
- SDX65_SLAVE_QDSS_CFG,
- SDX65_SLAVE_QPIC,
- SDX65_SLAVE_SDCC_1,
- SDX65_SLAVE_SNOC_CFG,
- SDX65_SLAVE_SPMI_FETCHER,
- SDX65_SLAVE_SPMI_VGI_COEX,
- SDX65_SLAVE_TCSR,
- SDX65_SLAVE_TLMM,
- SDX65_SLAVE_USB3,
- SDX65_SLAVE_USB3_PHY_CFG,
- SDX65_SLAVE_SNOC_MEM_NOC_GC,
- SDX65_SLAVE_IMEM,
- SDX65_SLAVE_TCU
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_audio,
+ &qhs_blsp1,
+ &qhs_clk_ctl,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ecc_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mss_cfg,
+ &qhs_pcie_parf,
+ &qhs_pdm,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qpic,
+ &qhs_sdc1,
+ &qhs_snoc_cfg,
+ &qhs_spmi_fetcher,
+ &qhs_spmi_vgi_coex,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_usb3,
+ &qhs_usb3_phy,
+ &qns_snoc_memnoc,
+ &qxs_imem,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
- .id = SDX65_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 4,
- .links = { SDX65_SLAVE_AOSS,
- SDX65_SLAVE_AUDIO,
- SDX65_SLAVE_IPA_CFG,
- SDX65_SLAVE_ANOC_SNOC
- },
+ .link_nodes = { &qhs_aoss,
+ &qhs_audio,
+ &qhs_ipa,
+ &qns_aggre_noc },
};
static struct qcom_icc_node xm_usb3 = {
.name = "xm_usb3",
- .id = SDX65_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX65_SLAVE_ANOC_SNOC },
+ .link_nodes = { &qns_aggre_noc },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SDX65_SLAVE_EBI1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SDX65_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDX65_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_memnoc_snoc = {
.name = "qns_memnoc_snoc",
- .id = SDX65_SLAVE_MEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX65_MASTER_MEM_NOC_SNOC },
+ .link_nodes = { &qnm_memnoc },
};
static struct qcom_icc_node qns_sys_pcie = {
.name = "qns_sys_pcie",
- .id = SDX65_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX65_MASTER_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_memnoc_pcie },
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SDX65_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SDX65_SLAVE_APPSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_audio = {
.name = "qhs_audio",
- .id = SDX65_SLAVE_AUDIO,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_blsp1 = {
.name = "qhs_blsp1",
- .id = SDX65_SLAVE_BLSP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SDX65_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SDX65_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SDX65_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ecc_cfg = {
.name = "qhs_ecc_cfg",
- .id = SDX65_SLAVE_ECC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SDX65_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SDX65_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SDX65_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_parf = {
.name = "qhs_pcie_parf",
- .id = SDX65_SLAVE_PCIE_PARF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SDX65_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SDX65_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SDX65_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qpic = {
.name = "qhs_qpic",
- .id = SDX65_SLAVE_QPIC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
- .id = SDX65_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SDX65_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX65_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_spmi_fetcher = {
.name = "qhs_spmi_fetcher",
- .id = SDX65_SLAVE_SPMI_FETCHER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spmi_vgi_coex = {
.name = "qhs_spmi_vgi_coex",
- .id = SDX65_SLAVE_SPMI_VGI_COEX,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SDX65_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SDX65_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
- .id = SDX65_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_phy = {
.name = "qhs_usb3_phy",
- .id = SDX65_SLAVE_USB3_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_aggre_noc = {
.name = "qns_aggre_noc",
- .id = SDX65_SLAVE_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX65_MASTER_ANOC_SNOC },
+ .link_nodes = { &qnm_aggre_noc },
};
static struct qcom_icc_node qns_snoc_memnoc = {
.name = "qns_snoc_memnoc",
- .id = SDX65_SLAVE_SNOC_MEM_NOC_GC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDX65_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SDX65_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SDX65_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie = {
.name = "xs_pcie",
- .id = SDX65_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SDX65_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SDX65_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
diff --git a/drivers/interconnect/qcom/sdx65.h b/drivers/interconnect/qcom/sdx65.h
deleted file mode 100644
index 5dca6e8b32c9..000000000000
--- a/drivers/interconnect/qcom/sdx65.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX65_H
-#define __DRIVERS_INTERCONNECT_QCOM_SDX65_H
-
-#define SDX65_MASTER_TCU_0 0
-#define SDX65_MASTER_LLCC 1
-#define SDX65_MASTER_AUDIO 2
-#define SDX65_MASTER_BLSP_1 3
-#define SDX65_MASTER_QDSS_BAM 4
-#define SDX65_MASTER_QPIC 5
-#define SDX65_MASTER_SNOC_CFG 6
-#define SDX65_MASTER_SPMI_FETCHER 7
-#define SDX65_MASTER_ANOC_SNOC 8
-#define SDX65_MASTER_IPA 9
-#define SDX65_MASTER_MEM_NOC_SNOC 10
-#define SDX65_MASTER_MEM_NOC_PCIE_SNOC 11
-#define SDX65_MASTER_SNOC_GC_MEM_NOC 12
-#define SDX65_MASTER_CRYPTO 13
-#define SDX65_MASTER_APPSS_PROC 14
-#define SDX65_MASTER_IPA_PCIE 15
-#define SDX65_MASTER_PCIE_0 16
-#define SDX65_MASTER_QDSS_ETR 17
-#define SDX65_MASTER_SDCC_1 18
-#define SDX65_MASTER_USB3 19
-#define SDX65_SLAVE_EBI1 512
-#define SDX65_SLAVE_AOSS 513
-#define SDX65_SLAVE_APPSS 514
-#define SDX65_SLAVE_AUDIO 515
-#define SDX65_SLAVE_BLSP_1 516
-#define SDX65_SLAVE_CLK_CTL 517
-#define SDX65_SLAVE_CRYPTO_0_CFG 518
-#define SDX65_SLAVE_CNOC_DDRSS 519
-#define SDX65_SLAVE_ECC_CFG 520
-#define SDX65_SLAVE_IMEM_CFG 521
-#define SDX65_SLAVE_IPA_CFG 522
-#define SDX65_SLAVE_CNOC_MSS 523
-#define SDX65_SLAVE_PCIE_PARF 524
-#define SDX65_SLAVE_PDM 525
-#define SDX65_SLAVE_PRNG 526
-#define SDX65_SLAVE_QDSS_CFG 527
-#define SDX65_SLAVE_QPIC 528
-#define SDX65_SLAVE_SDCC_1 529
-#define SDX65_SLAVE_SNOC_CFG 530
-#define SDX65_SLAVE_SPMI_FETCHER 531
-#define SDX65_SLAVE_SPMI_VGI_COEX 532
-#define SDX65_SLAVE_TCSR 533
-#define SDX65_SLAVE_TLMM 534
-#define SDX65_SLAVE_USB3 535
-#define SDX65_SLAVE_USB3_PHY_CFG 536
-#define SDX65_SLAVE_ANOC_SNOC 537
-#define SDX65_SLAVE_LLCC 538
-#define SDX65_SLAVE_MEM_NOC_SNOC 539
-#define SDX65_SLAVE_SNOC_MEM_NOC_GC 540
-#define SDX65_SLAVE_MEM_NOC_PCIE_SNOC 541
-#define SDX65_SLAVE_IMEM 542
-#define SDX65_SLAVE_SERVICE_SNOC 543
-#define SDX65_SLAVE_PCIE_0 544
-#define SDX65_SLAVE_QDSS_STM 545
-#define SDX65_SLAVE_TCU 546
-
-#endif
diff --git a/drivers/interconnect/qcom/sdx75.c b/drivers/interconnect/qcom/sdx75.c
index 7ef1f17f3292..5cfccc6cfd1b 100644
--- a/drivers/interconnect/qcom/sdx75.c
+++ b/drivers/interconnect/qcom/sdx75.c
@@ -14,782 +14,724 @@
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
-#include "sdx75.h"
-static struct qcom_icc_node qpic_core_master = {
- .name = "qpic_core_master",
- .id = SDX75_MASTER_QPIC_CORE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 1,
- .links = { SDX75_SLAVE_QPIC_CORE },
-};
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qnm_cnoc;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_gemnoc_cfg;
+static struct qcom_icc_node qnm_mdsp;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node xm_ipa2pcie;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node xm_pcie3_2;
+static struct qcom_icc_node qhm_audio;
+static struct qcom_icc_node qhm_gic;
+static struct qcom_icc_node qhm_pcie_rscc;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qpic;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qnm_aggre_noc;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node qnm_system_noc_cfg;
+static struct qcom_icc_node qnm_system_noc_pcie_cfg;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node qxm_mvmss;
+static struct qcom_icc_node xm_emac_0;
+static struct qcom_icc_node xm_emac_1;
+static struct qcom_icc_node xm_qdss_etr0;
+static struct qcom_icc_node xm_qdss_etr1;
+static struct qcom_icc_node xm_sdc1;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_usb3;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qhs_lagg;
+static struct qcom_icc_node qhs_mccc_master;
+static struct qcom_icc_node qns_gemnoc;
+static struct qcom_icc_node qss_snoop_bwmon;
+static struct qcom_icc_node qns_gemnoc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node srvc_gemnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_pcie_gemnoc;
+static struct qcom_icc_node ps_eth0_cfg;
+static struct qcom_icc_node ps_eth1_cfg;
+static struct qcom_icc_node qhs_audio;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_crypto_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_mvmss_cfg;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pcie2_cfg;
+static struct qcom_icc_node qhs_pcie_rscc;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qpic;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_sdc1;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_spmi_vgi_coex;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_usb3;
+static struct qcom_icc_node qhs_usb3_phy;
+static struct qcom_icc_node qns_a1noc;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qns_system_noc_cfg;
+static struct qcom_icc_node qns_system_noc_pcie_cfg;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node srvc_pcie_system_noc;
+static struct qcom_icc_node srvc_system_noc;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_pcie_2;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SDX75_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
- .id = SDX75_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 4,
- .links = { SDX75_SLAVE_LAGG_CFG, SDX75_SLAVE_MCCC_MASTER,
- SDX75_SLAVE_GEM_NOC_CFG, SDX75_SLAVE_SNOOP_BWMON },
+ .link_nodes = { &qhs_lagg, &qhs_mccc_master,
+ &qns_gemnoc, &qss_snoop_bwmon },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SDX75_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC },
+ .link_nodes = { &qns_gemnoc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SDX75_MASTER_APPSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC,
- SDX75_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gemnoc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
- .id = SDX75_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_SERVICE_GEM_NOC },
+ .link_nodes = { &srvc_gemnoc },
};
static struct qcom_icc_node qnm_mdsp = {
.name = "qnm_mdsp",
- .id = SDX75_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC,
- SDX75_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gemnoc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SDX75_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC },
+ .link_nodes = { &qns_gemnoc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SDX75_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SDX75_SLAVE_GEM_NOC_CNOC, SDX75_SLAVE_LLCC,
- SDX75_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gemnoc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SDX75_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node xm_ipa2pcie = {
.name = "xm_ipa2pcie",
- .id = SDX75_MASTER_IPA_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_pcie },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SDX75_MASTER_LLCC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SDX75_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gemnoc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SDX75_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gemnoc },
};
static struct qcom_icc_node xm_pcie3_2 = {
.name = "xm_pcie3_2",
- .id = SDX75_MASTER_PCIE_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gemnoc },
};
static struct qcom_icc_node qhm_audio = {
.name = "qhm_audio",
- .id = SDX75_MASTER_AUDIO,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
- .id = SDX75_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qhm_pcie_rscc = {
.name = "qhm_pcie_rscc",
- .id = SDX75_MASTER_PCIE_RSCC,
.channels = 1,
.buswidth = 4,
.num_links = 31,
- .links = { SDX75_SLAVE_ETH0_CFG, SDX75_SLAVE_ETH1_CFG,
- SDX75_SLAVE_AUDIO, SDX75_SLAVE_CLK_CTL,
- SDX75_SLAVE_CRYPTO_0_CFG, SDX75_SLAVE_IMEM_CFG,
- SDX75_SLAVE_IPA_CFG, SDX75_SLAVE_IPC_ROUTER_CFG,
- SDX75_SLAVE_CNOC_MSS, SDX75_SLAVE_ICBDI_MVMSS_CFG,
- SDX75_SLAVE_PCIE_0_CFG, SDX75_SLAVE_PCIE_1_CFG,
- SDX75_SLAVE_PCIE_2_CFG, SDX75_SLAVE_PDM,
- SDX75_SLAVE_PRNG, SDX75_SLAVE_QDSS_CFG,
- SDX75_SLAVE_QPIC, SDX75_SLAVE_QUP_0,
- SDX75_SLAVE_SDCC_1, SDX75_SLAVE_SDCC_4,
- SDX75_SLAVE_SPMI_VGI_COEX, SDX75_SLAVE_TCSR,
- SDX75_SLAVE_TLMM, SDX75_SLAVE_USB3,
- SDX75_SLAVE_USB3_PHY_CFG, SDX75_SLAVE_DDRSS_CFG,
- SDX75_SLAVE_SNOC_CFG, SDX75_SLAVE_PCIE_ANOC_CFG,
- SDX75_SLAVE_IMEM, SDX75_SLAVE_QDSS_STM,
- SDX75_SLAVE_TCU },
+ .link_nodes = { &ps_eth0_cfg, &ps_eth1_cfg,
+ &qhs_audio, &qhs_clk_ctl,
+ &qhs_crypto_cfg, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_mss_cfg, &qhs_mvmss_cfg,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pcie2_cfg, &qhs_pdm,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qpic, &qhs_qup0,
+ &qhs_sdc1, &qhs_sdc4,
+ &qhs_spmi_vgi_coex, &qhs_tcsr,
+ &qhs_tlmm, &qhs_usb3,
+ &qhs_usb3_phy, &qns_ddrss_cfg,
+ &qns_system_noc_cfg, &qns_system_noc_pcie_cfg,
+ &qxs_imem, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SDX75_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node qhm_qpic = {
.name = "qhm_qpic",
- .id = SDX75_MASTER_QPIC,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SDX75_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node qnm_aggre_noc = {
.name = "qnm_aggre_noc",
- .id = SDX75_MASTER_ANOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SDX75_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 32,
- .links = { SDX75_SLAVE_ETH0_CFG, SDX75_SLAVE_ETH1_CFG,
- SDX75_SLAVE_AUDIO, SDX75_SLAVE_CLK_CTL,
- SDX75_SLAVE_CRYPTO_0_CFG, SDX75_SLAVE_IMEM_CFG,
- SDX75_SLAVE_IPA_CFG, SDX75_SLAVE_IPC_ROUTER_CFG,
- SDX75_SLAVE_CNOC_MSS, SDX75_SLAVE_ICBDI_MVMSS_CFG,
- SDX75_SLAVE_PCIE_0_CFG, SDX75_SLAVE_PCIE_1_CFG,
- SDX75_SLAVE_PCIE_2_CFG, SDX75_SLAVE_PCIE_RSC_CFG,
- SDX75_SLAVE_PDM, SDX75_SLAVE_PRNG,
- SDX75_SLAVE_QDSS_CFG, SDX75_SLAVE_QPIC,
- SDX75_SLAVE_QUP_0, SDX75_SLAVE_SDCC_1,
- SDX75_SLAVE_SDCC_4, SDX75_SLAVE_SPMI_VGI_COEX,
- SDX75_SLAVE_TCSR, SDX75_SLAVE_TLMM,
- SDX75_SLAVE_USB3, SDX75_SLAVE_USB3_PHY_CFG,
- SDX75_SLAVE_DDRSS_CFG, SDX75_SLAVE_SNOC_CFG,
- SDX75_SLAVE_PCIE_ANOC_CFG, SDX75_SLAVE_IMEM,
- SDX75_SLAVE_QDSS_STM, SDX75_SLAVE_TCU },
+ .link_nodes = { &ps_eth0_cfg, &ps_eth1_cfg,
+ &qhs_audio, &qhs_clk_ctl,
+ &qhs_crypto_cfg, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_mss_cfg, &qhs_mvmss_cfg,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pcie2_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qpic,
+ &qhs_qup0, &qhs_sdc1,
+ &qhs_sdc4, &qhs_spmi_vgi_coex,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_usb3, &qhs_usb3_phy,
+ &qns_ddrss_cfg, &qns_system_noc_cfg,
+ &qns_system_noc_pcie_cfg, &qxs_imem,
+ &xs_qdss_stm, &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SDX75_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SDX75_SLAVE_PCIE_0, SDX75_SLAVE_PCIE_1,
- SDX75_SLAVE_PCIE_2 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1,
+ &xs_pcie_2 },
};
static struct qcom_icc_node qnm_system_noc_cfg = {
.name = "qnm_system_noc_cfg",
- .id = SDX75_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_system_noc },
};
static struct qcom_icc_node qnm_system_noc_pcie_cfg = {
.name = "qnm_system_noc_pcie_cfg",
- .id = SDX75_MASTER_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_SLAVE_SERVICE_PCIE_ANOC },
+ .link_nodes = { &srvc_pcie_system_noc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SDX75_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SDX75_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qxm_mvmss = {
.name = "qxm_mvmss",
- .id = SDX75_MASTER_MVMSS,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node xm_emac_0 = {
.name = "xm_emac_0",
- .id = SDX75_MASTER_EMAC_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node xm_emac_1 = {
.name = "xm_emac_1",
- .id = SDX75_MASTER_EMAC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node xm_qdss_etr0 = {
.name = "xm_qdss_etr0",
- .id = SDX75_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node xm_qdss_etr1 = {
.name = "xm_qdss_etr1",
- .id = SDX75_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node xm_sdc1 = {
.name = "xm_sdc1",
- .id = SDX75_MASTER_SDCC_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SDX75_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node xm_usb3 = {
.name = "xm_usb3",
- .id = SDX75_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_SLAVE_A1NOC_CFG },
-};
-
-static struct qcom_icc_node qpic_core_slave = {
- .name = "qpic_core_slave",
- .id = SDX75_SLAVE_QPIC_CORE,
- .channels = 1,
- .buswidth = 4,
- .num_links = 0,
+ .link_nodes = { &qns_a1noc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SDX75_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lagg = {
.name = "qhs_lagg",
- .id = SDX75_SLAVE_LAGG_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mccc_master = {
.name = "qhs_mccc_master",
- .id = SDX75_SLAVE_MCCC_MASTER,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
- .id = SDX75_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_snoop_bwmon = {
.name = "qss_snoop_bwmon",
- .id = SDX75_SLAVE_SNOOP_BWMON,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_cnoc = {
.name = "qns_gemnoc_cnoc",
- .id = SDX75_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SDX75_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDX75_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SDX75_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDX75_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
- .id = SDX75_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SDX75_SLAVE_EBI1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_pcie_gemnoc = {
.name = "qns_pcie_gemnoc",
- .id = SDX75_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDX75_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node ps_eth0_cfg = {
.name = "ps_eth0_cfg",
- .id = SDX75_SLAVE_ETH0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node ps_eth1_cfg = {
.name = "ps_eth1_cfg",
- .id = SDX75_SLAVE_ETH1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_audio = {
.name = "qhs_audio",
- .id = SDX75_SLAVE_AUDIO,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SDX75_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto_cfg = {
.name = "qhs_crypto_cfg",
- .id = SDX75_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SDX75_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SDX75_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SDX75_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SDX75_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mvmss_cfg = {
.name = "qhs_mvmss_cfg",
- .id = SDX75_SLAVE_ICBDI_MVMSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SDX75_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SDX75_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie2_cfg = {
.name = "qhs_pcie2_cfg",
- .id = SDX75_SLAVE_PCIE_2_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie_rscc = {
.name = "qhs_pcie_rscc",
- .id = SDX75_SLAVE_PCIE_RSC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SDX75_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SDX75_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SDX75_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qpic = {
.name = "qhs_qpic",
- .id = SDX75_SLAVE_QPIC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SDX75_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc1 = {
.name = "qhs_sdc1",
- .id = SDX75_SLAVE_SDCC_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SDX75_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_spmi_vgi_coex = {
.name = "qhs_spmi_vgi_coex",
- .id = SDX75_SLAVE_SPMI_VGI_COEX,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SDX75_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SDX75_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3 = {
.name = "qhs_usb3",
- .id = SDX75_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_phy = {
.name = "qhs_usb3_phy",
- .id = SDX75_SLAVE_USB3_PHY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_a1noc = {
.name = "qns_a1noc",
- .id = SDX75_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SDX75_MASTER_ANOC_SNOC },
+ .link_nodes = { &qnm_aggre_noc },
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = SDX75_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qnm_cnoc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SDX75_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SDX75_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qns_system_noc_cfg = {
.name = "qns_system_noc_cfg",
- .id = SDX75_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_MASTER_SNOC_CFG },
+ .link_nodes = { &qnm_system_noc_cfg },
};
static struct qcom_icc_node qns_system_noc_pcie_cfg = {
.name = "qns_system_noc_pcie_cfg",
- .id = SDX75_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SDX75_MASTER_PCIE_ANOC_CFG },
+ .link_nodes = { &qnm_system_noc_pcie_cfg },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SDX75_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node srvc_pcie_system_noc = {
.name = "srvc_pcie_system_noc",
- .id = SDX75_SLAVE_SERVICE_PCIE_ANOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_system_noc = {
.name = "srvc_system_noc",
- .id = SDX75_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SDX75_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SDX75_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_2 = {
.name = "xs_pcie_2",
- .id = SDX75_SLAVE_PCIE_2,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SDX75_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SDX75_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_bcm bcm_ce0 = {
@@ -831,12 +773,6 @@ static struct qcom_icc_bcm bcm_mc0 = {
.nodes = { &ebi },
};
-static struct qcom_icc_bcm bcm_qp0 = {
- .name = "QP0",
- .num_nodes = 1,
- .nodes = { &qpic_core_slave },
-};
-
static struct qcom_icc_bcm bcm_qup0 = {
.name = "QUP0",
.keepalive = true,
@@ -898,14 +834,11 @@ static struct qcom_icc_bcm bcm_sn4 = {
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
- &bcm_qp0,
&bcm_qup0,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
- [MASTER_QPIC_CORE] = &qpic_core_master,
[MASTER_QUP_CORE_0] = &qup0_core_master,
- [SLAVE_QPIC_CORE] = &qpic_core_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
};
diff --git a/drivers/interconnect/qcom/sdx75.h b/drivers/interconnect/qcom/sdx75.h
deleted file mode 100644
index 24e887159920..000000000000
--- a/drivers/interconnect/qcom/sdx75.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX75_H
-#define __DRIVERS_INTERCONNECT_QCOM_SDX75_H
-
-#define SDX75_MASTER_ANOC_PCIE_GEM_NOC 0
-#define SDX75_MASTER_ANOC_SNOC 1
-#define SDX75_MASTER_APPSS_PROC 2
-#define SDX75_MASTER_AUDIO 3
-#define SDX75_MASTER_CNOC_DC_NOC 4
-#define SDX75_MASTER_CRYPTO 5
-#define SDX75_MASTER_EMAC_0 6
-#define SDX75_MASTER_EMAC_1 7
-#define SDX75_MASTER_GEM_NOC_CFG 8
-#define SDX75_MASTER_GEM_NOC_CNOC 9
-#define SDX75_MASTER_GEM_NOC_PCIE_SNOC 10
-#define SDX75_MASTER_GIC 11
-#define SDX75_MASTER_GIC_AHB 12
-#define SDX75_MASTER_IPA 13
-#define SDX75_MASTER_IPA_PCIE 14
-#define SDX75_MASTER_LLCC 15
-#define SDX75_MASTER_MSS_PROC 16
-#define SDX75_MASTER_MVMSS 17
-#define SDX75_MASTER_PCIE_0 18
-#define SDX75_MASTER_PCIE_1 19
-#define SDX75_MASTER_PCIE_2 20
-#define SDX75_MASTER_PCIE_ANOC_CFG 21
-#define SDX75_MASTER_PCIE_RSCC 22
-#define SDX75_MASTER_QDSS_BAM 23
-#define SDX75_MASTER_QDSS_ETR 24
-#define SDX75_MASTER_QDSS_ETR_1 25
-#define SDX75_MASTER_QPIC 26
-#define SDX75_MASTER_QPIC_CORE 27
-#define SDX75_MASTER_QUP_0 28
-#define SDX75_MASTER_QUP_CORE_0 29
-#define SDX75_MASTER_SDCC_1 30
-#define SDX75_MASTER_SDCC_4 31
-#define SDX75_MASTER_SNOC_CFG 32
-#define SDX75_MASTER_SNOC_SF_MEM_NOC 33
-#define SDX75_MASTER_SYS_TCU 34
-#define SDX75_MASTER_USB3_0 35
-#define SDX75_SLAVE_A1NOC_CFG 36
-#define SDX75_SLAVE_ANOC_PCIE_GEM_NOC 37
-#define SDX75_SLAVE_AUDIO 38
-#define SDX75_SLAVE_CLK_CTL 39
-#define SDX75_SLAVE_CRYPTO_0_CFG 40
-#define SDX75_SLAVE_CNOC_MSS 41
-#define SDX75_SLAVE_DDRSS_CFG 42
-#define SDX75_SLAVE_EBI1 43
-#define SDX75_SLAVE_ETH0_CFG 44
-#define SDX75_SLAVE_ETH1_CFG 45
-#define SDX75_SLAVE_GEM_NOC_CFG 46
-#define SDX75_SLAVE_GEM_NOC_CNOC 47
-#define SDX75_SLAVE_ICBDI_MVMSS_CFG 48
-#define SDX75_SLAVE_IMEM 49
-#define SDX75_SLAVE_IMEM_CFG 50
-#define SDX75_SLAVE_IPA_CFG 51
-#define SDX75_SLAVE_IPC_ROUTER_CFG 52
-#define SDX75_SLAVE_LAGG_CFG 53
-#define SDX75_SLAVE_LLCC 54
-#define SDX75_SLAVE_MCCC_MASTER 55
-#define SDX75_SLAVE_MEM_NOC_PCIE_SNOC 56
-#define SDX75_SLAVE_PCIE_0 57
-#define SDX75_SLAVE_PCIE_1 58
-#define SDX75_SLAVE_PCIE_2 59
-#define SDX75_SLAVE_PCIE_0_CFG 60
-#define SDX75_SLAVE_PCIE_1_CFG 61
-#define SDX75_SLAVE_PCIE_2_CFG 62
-#define SDX75_SLAVE_PCIE_ANOC_CFG 63
-#define SDX75_SLAVE_PCIE_RSC_CFG 64
-#define SDX75_SLAVE_PDM 65
-#define SDX75_SLAVE_PRNG 66
-#define SDX75_SLAVE_QDSS_CFG 67
-#define SDX75_SLAVE_QDSS_STM 68
-#define SDX75_SLAVE_QPIC 69
-#define SDX75_SLAVE_QPIC_CORE 70
-#define SDX75_SLAVE_QUP_0 71
-#define SDX75_SLAVE_QUP_CORE_0 72
-#define SDX75_SLAVE_SDCC_1 73
-#define SDX75_SLAVE_SDCC_4 74
-#define SDX75_SLAVE_SERVICE_GEM_NOC 75
-#define SDX75_SLAVE_SERVICE_PCIE_ANOC 76
-#define SDX75_SLAVE_SERVICE_SNOC 77
-#define SDX75_SLAVE_SNOC_CFG 78
-#define SDX75_SLAVE_SNOC_GEM_NOC_SF 79
-#define SDX75_SLAVE_SNOOP_BWMON 80
-#define SDX75_SLAVE_SPMI_VGI_COEX 81
-#define SDX75_SLAVE_TCSR 82
-#define SDX75_SLAVE_TCU 83
-#define SDX75_SLAVE_TLMM 84
-#define SDX75_SLAVE_USB3 85
-#define SDX75_SLAVE_USB3_PHY_CFG 86
-
-#endif
diff --git a/drivers/interconnect/qcom/sm6350.c b/drivers/interconnect/qcom/sm6350.c
index f41d7e19ba26..d96bec1cbb26 100644
--- a/drivers/interconnect/qcom/sm6350.c
+++ b/drivers/interconnect/qcom/sm6350.c
@@ -13,1151 +13,1359 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sm6350.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg;
+static struct qcom_icc_node qhm_qup_0;
+static struct qcom_icc_node xm_emmc;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node qhm_a2noc_cfg;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup_1;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp;
+static struct qcom_icc_node qxm_camnoc_icp_uncomp;
+static struct qcom_icc_node qxm_camnoc_sf_uncomp;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qnm_npu;
+static struct qcom_icc_node qxm_npu_dsp;
+static struct qcom_icc_node qnm_snoc;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qhm_cnoc_dc_noc;
+static struct qcom_icc_node acm_apps;
+static struct qcom_icc_node acm_sys_tcu;
+static struct qcom_icc_node qhm_gemnoc_cfg;
+static struct qcom_icc_node qnm_cmpnoc;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qxm_gpu;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qhm_mnoc_cfg;
+static struct qcom_icc_node qnm_video0;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qxm_camnoc_hf;
+static struct qcom_icc_node qxm_camnoc_icp;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node amm_npu_sys;
+static struct qcom_icc_node qhm_npu_cfg;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_gemnoc;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qns_camnoc_uncomp;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qns_cdsp_gemnoc;
+static struct qcom_icc_node qhs_a1_noc_cfg;
+static struct qcom_icc_node qhs_a2_noc_cfg;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy2;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_boot_rom;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_camera_nrt_thrott_cfg;
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_dcc_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_display_throttle_cfg;
+static struct qcom_icc_node qhs_emmc_cfg;
+static struct qcom_icc_node qhs_glm;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mnoc_cfg;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_npu_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qm_cfg;
+static struct qcom_icc_node qhs_qm_mpu_cfg;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_security;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_venus_throttle_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node qhs_gemnoc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qhs_mcdma_ms_mpu_cfg;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qns_gem_noc_snoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node srvc_gemnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qhs_cal_dp0;
+static struct qcom_icc_node qhs_cp;
+static struct qcom_icc_node qhs_dma_bwmon;
+static struct qcom_icc_node qhs_dpm;
+static struct qcom_icc_node qhs_isense;
+static struct qcom_icc_node qhs_llm;
+static struct qcom_icc_node qhs_tcm;
+static struct qcom_icc_node qns_npu_sys;
+static struct qcom_icc_node srvc_noc;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
- .id = SM6350_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
+};
+
+static struct qcom_icc_qosbox qhm_qup_0_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xa000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node qhm_qup_0 = {
.name = "qhm_qup_0",
- .id = SM6350_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
+ .qosbox = &qhm_qup_0_qos,
.num_links = 1,
- .links = { SM6350_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_qosbox xm_emmc_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x7000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node xm_emmc = {
.name = "xm_emmc",
- .id = SM6350_MASTER_EMMC,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_emmc_qos,
.num_links = 1,
- .links = { SM6350_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_qosbox xm_ufs_mem_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x8000 },
+ .prio = 4,
+ .urg_fwd = 0,
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM6350_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_ufs_mem_qos,
.num_links = 1,
- .links = { SM6350_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
- .id = SM6350_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
+};
+
+static struct qcom_icc_qosbox qhm_qdss_bam_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM6350_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
+ .qosbox = &qhm_qdss_bam_qos,
.num_links = 1,
- .links = { SM6350_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
+static struct qcom_icc_qosbox qhm_qup_1_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x9000 },
+ .prio = 2,
+ .urg_fwd = 0,
+};
static struct qcom_icc_node qhm_qup_1 = {
.name = "qhm_qup_1",
- .id = SM6350_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
+ .qosbox = &qhm_qup_1_qos,
.num_links = 1,
- .links = { SM6350_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_qosbox qxm_crypto_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x6000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM6350_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qxm_crypto_qos,
.num_links = 1,
- .links = { SM6350_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_qosbox qxm_ipa_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x7000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM6350_MASTER_IPA,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qxm_ipa_qos,
.num_links = 1,
- .links = { SM6350_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_qosbox xm_qdss_etr_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SM6350_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_qdss_etr_qos,
.num_links = 1,
- .links = { SM6350_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_qosbox xm_sdc2_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x18000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM6350_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_sdc2_qos,
.num_links = 1,
- .links = { SM6350_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_qosbox xm_usb3_0_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM6350_MASTER_USB3,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_usb3_0_qos,
.num_links = 1,
- .links = { SM6350_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
- .id = SM6350_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM6350_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_icp_uncomp = {
.name = "qxm_camnoc_icp_uncomp",
- .id = SM6350_MASTER_CAMNOC_ICP_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM6350_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
- .id = SM6350_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM6350_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SM6350_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SM6350_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_qosbox qnm_npu_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0xf000, 0x11000 },
+ .prio = 0,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
- .id = SM6350_MASTER_NPU,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qnm_npu_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_CDSP_GEM_NOC },
+ .link_nodes = { &qns_cdsp_gemnoc },
+};
+
+static struct qcom_icc_qosbox qxm_npu_dsp_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x13000 },
+ .prio = 0,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qxm_npu_dsp = {
.name = "qxm_npu_dsp",
- .id = SM6350_MASTER_NPU_PROC,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qxm_npu_dsp_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_CDSP_GEM_NOC },
+ .link_nodes = { &qns_cdsp_gemnoc },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
- .id = SM6350_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 42,
- .links = { SM6350_SLAVE_CAMERA_CFG,
- SM6350_SLAVE_SDCC_2,
- SM6350_SLAVE_CNOC_MNOC_CFG,
- SM6350_SLAVE_UFS_MEM_CFG,
- SM6350_SLAVE_QM_CFG,
- SM6350_SLAVE_SNOC_CFG,
- SM6350_SLAVE_QM_MPU_CFG,
- SM6350_SLAVE_GLM,
- SM6350_SLAVE_PDM,
- SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
- SM6350_SLAVE_A2NOC_CFG,
- SM6350_SLAVE_QDSS_CFG,
- SM6350_SLAVE_VSENSE_CTRL_CFG,
- SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
- SM6350_SLAVE_DISPLAY_CFG,
- SM6350_SLAVE_TCSR,
- SM6350_SLAVE_DCC_CFG,
- SM6350_SLAVE_CNOC_DDRSS,
- SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
- SM6350_SLAVE_NPU_CFG,
- SM6350_SLAVE_AHB2PHY,
- SM6350_SLAVE_GRAPHICS_3D_CFG,
- SM6350_SLAVE_BOOT_ROM,
- SM6350_SLAVE_VENUS_CFG,
- SM6350_SLAVE_IPA_CFG,
- SM6350_SLAVE_SECURITY,
- SM6350_SLAVE_IMEM_CFG,
- SM6350_SLAVE_CNOC_MSS,
- SM6350_SLAVE_SERVICE_CNOC,
- SM6350_SLAVE_USB3,
- SM6350_SLAVE_VENUS_THROTTLE_CFG,
- SM6350_SLAVE_RBCPR_CX_CFG,
- SM6350_SLAVE_A1NOC_CFG,
- SM6350_SLAVE_AOSS,
- SM6350_SLAVE_PRNG,
- SM6350_SLAVE_EMMC_CFG,
- SM6350_SLAVE_CRYPTO_0_CFG,
- SM6350_SLAVE_PIMEM_CFG,
- SM6350_SLAVE_RBCPR_MX_CFG,
- SM6350_SLAVE_QUP_0,
- SM6350_SLAVE_QUP_1,
- SM6350_SLAVE_CLK_CTL
- },
+ .link_nodes = { &qhs_camera_cfg,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_qm_cfg,
+ &qhs_snoc_cfg,
+ &qhs_qm_mpu_cfg,
+ &qhs_glm,
+ &qhs_pdm,
+ &qhs_camera_nrt_thrott_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_display_cfg,
+ &qhs_tcsr,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_npu_cfg,
+ &qhs_ahb2phy0,
+ &qhs_gpuss_cfg,
+ &qhs_boot_rom,
+ &qhs_venus_cfg,
+ &qhs_ipa,
+ &qhs_security,
+ &qhs_imem_cfg,
+ &qhs_mss_cfg,
+ &srvc_cnoc,
+ &qhs_usb3_0,
+ &qhs_venus_throttle_cfg,
+ &qhs_cpr_cx,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_emmc_cfg,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_cpr_mx,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_clk_ctl },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SM6350_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 42,
- .links = { SM6350_SLAVE_CAMERA_CFG,
- SM6350_SLAVE_SDCC_2,
- SM6350_SLAVE_CNOC_MNOC_CFG,
- SM6350_SLAVE_UFS_MEM_CFG,
- SM6350_SLAVE_QM_CFG,
- SM6350_SLAVE_SNOC_CFG,
- SM6350_SLAVE_QM_MPU_CFG,
- SM6350_SLAVE_GLM,
- SM6350_SLAVE_PDM,
- SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
- SM6350_SLAVE_A2NOC_CFG,
- SM6350_SLAVE_QDSS_CFG,
- SM6350_SLAVE_VSENSE_CTRL_CFG,
- SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
- SM6350_SLAVE_DISPLAY_CFG,
- SM6350_SLAVE_TCSR,
- SM6350_SLAVE_DCC_CFG,
- SM6350_SLAVE_CNOC_DDRSS,
- SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
- SM6350_SLAVE_NPU_CFG,
- SM6350_SLAVE_AHB2PHY,
- SM6350_SLAVE_GRAPHICS_3D_CFG,
- SM6350_SLAVE_BOOT_ROM,
- SM6350_SLAVE_VENUS_CFG,
- SM6350_SLAVE_IPA_CFG,
- SM6350_SLAVE_SECURITY,
- SM6350_SLAVE_IMEM_CFG,
- SM6350_SLAVE_CNOC_MSS,
- SM6350_SLAVE_SERVICE_CNOC,
- SM6350_SLAVE_USB3,
- SM6350_SLAVE_VENUS_THROTTLE_CFG,
- SM6350_SLAVE_RBCPR_CX_CFG,
- SM6350_SLAVE_A1NOC_CFG,
- SM6350_SLAVE_AOSS,
- SM6350_SLAVE_PRNG,
- SM6350_SLAVE_EMMC_CFG,
- SM6350_SLAVE_CRYPTO_0_CFG,
- SM6350_SLAVE_PIMEM_CFG,
- SM6350_SLAVE_RBCPR_MX_CFG,
- SM6350_SLAVE_QUP_0,
- SM6350_SLAVE_QUP_1,
- SM6350_SLAVE_CLK_CTL
- },
+ .link_nodes = { &qhs_camera_cfg,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_qm_cfg,
+ &qhs_snoc_cfg,
+ &qhs_qm_mpu_cfg,
+ &qhs_glm,
+ &qhs_pdm,
+ &qhs_camera_nrt_thrott_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_display_cfg,
+ &qhs_tcsr,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_npu_cfg,
+ &qhs_ahb2phy0,
+ &qhs_gpuss_cfg,
+ &qhs_boot_rom,
+ &qhs_venus_cfg,
+ &qhs_ipa,
+ &qhs_security,
+ &qhs_imem_cfg,
+ &qhs_mss_cfg,
+ &srvc_cnoc,
+ &qhs_usb3_0,
+ &qhs_venus_throttle_cfg,
+ &qhs_cpr_cx,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_emmc_cfg,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_cpr_mx,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_clk_ctl },
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
- .id = SM6350_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SM6350_SLAVE_LLCC_CFG,
- SM6350_SLAVE_GEM_NOC_CFG
- },
+ .link_nodes = { &qhs_llcc,
+ &qhs_gemnoc },
+};
+
+static struct qcom_icc_qosbox acm_apps_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x2f100, 0x2f000 },
+ .prio = 0,
+ .urg_fwd = 0,
};
static struct qcom_icc_node acm_apps = {
.name = "acm_apps",
- .id = SM6350_MASTER_AMPSS_M0,
.channels = 1,
.buswidth = 16,
+ .qosbox = &acm_apps_qos,
.num_links = 2,
- .links = { SM6350_SLAVE_LLCC,
- SM6350_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
+};
+
+static struct qcom_icc_qosbox acm_sys_tcu_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x35000 },
+ .prio = 6,
+ .urg_fwd = 0,
};
static struct qcom_icc_node acm_sys_tcu = {
.name = "acm_sys_tcu",
- .id = SM6350_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
+ .qosbox = &acm_sys_tcu_qos,
.num_links = 2,
- .links = { SM6350_SLAVE_LLCC,
- SM6350_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
- .id = SM6350_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 3,
- .links = { SM6350_SLAVE_MCDMA_MS_MPU_CFG,
- SM6350_SLAVE_SERVICE_GEM_NOC,
- SM6350_SLAVE_MSS_PROC_MS_MPU_CFG
- },
+ .link_nodes = { &qhs_mcdma_ms_mpu_cfg,
+ &srvc_gemnoc,
+ &qhs_mdsp_ms_mpu_cfg },
+};
+
+static struct qcom_icc_qosbox qnm_cmpnoc_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x2e000 },
+ .prio = 0,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
- .id = SM6350_MASTER_COMPUTE_NOC,
.channels = 1,
.buswidth = 32,
+ .qosbox = &qnm_cmpnoc_qos,
.num_links = 2,
- .links = { SM6350_SLAVE_LLCC,
- SM6350_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
+};
+
+static struct qcom_icc_qosbox qnm_mnoc_hf_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x30000 },
+ .prio = 0,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM6350_MASTER_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
+ .qosbox = &qnm_mnoc_hf_qos,
.num_links = 2,
- .links = { SM6350_SLAVE_LLCC,
- SM6350_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
+};
+
+static struct qcom_icc_qosbox qnm_mnoc_sf_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x34000 },
+ .prio = 0,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM6350_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
+ .qosbox = &qnm_mnoc_sf_qos,
.num_links = 2,
- .links = { SM6350_SLAVE_LLCC,
- SM6350_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
+};
+
+static struct qcom_icc_qosbox qnm_snoc_gc_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x32000 },
+ .prio = 0,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SM6350_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qnm_snoc_gc_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_qosbox qnm_snoc_sf_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x31000 },
+ .prio = 0,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM6350_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
+ .qosbox = &qnm_snoc_sf_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_qosbox qxm_gpu_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0x33000, 0x33080 },
+ .prio = 0,
+ .urg_fwd = 0,
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
- .id = SM6350_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qxm_gpu_qos,
.num_links = 2,
- .links = { SM6350_SLAVE_LLCC,
- SM6350_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM6350_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_SLAVE_EBI_CH0 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
- .id = SM6350_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
+};
+
+static struct qcom_icc_qosbox qnm_video0_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xf000 },
+ .prio = 2,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
- .id = SM6350_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
+ .qosbox = &qnm_video0_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_qosbox qnm_video_cvp_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xe000 },
+ .prio = 5,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SM6350_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qnm_video_cvp_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_qosbox qxm_camnoc_hf_qos = {
+ .num_ports = 2,
+ .port_offsets = { 0xa000, 0xb000 },
+ .prio = 3,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qxm_camnoc_hf = {
.name = "qxm_camnoc_hf",
- .id = SM6350_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
+ .qosbox = &qxm_camnoc_hf_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_qosbox qxm_camnoc_icp_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 5,
+ .urg_fwd = 0,
};
static struct qcom_icc_node qxm_camnoc_icp = {
.name = "qxm_camnoc_icp",
- .id = SM6350_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qxm_camnoc_icp_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_qosbox qxm_camnoc_sf_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0x9000 },
+ .prio = 3,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = SM6350_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
+ .qosbox = &qxm_camnoc_sf_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_qosbox qxm_mdp0_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 3,
+ .urg_fwd = 1,
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SM6350_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
+ .qosbox = &qxm_mdp0_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node amm_npu_sys = {
.name = "amm_npu_sys",
- .id = SM6350_MASTER_NPU_SYS,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM6350_SLAVE_NPU_COMPUTE_NOC },
+ .link_nodes = { &qns_npu_sys },
};
static struct qcom_icc_node qhm_npu_cfg = {
.name = "qhm_npu_cfg",
- .id = SM6350_MASTER_NPU_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 8,
- .links = { SM6350_SLAVE_SERVICE_NPU_NOC,
- SM6350_SLAVE_ISENSE_CFG,
- SM6350_SLAVE_NPU_LLM_CFG,
- SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG,
- SM6350_SLAVE_NPU_CP,
- SM6350_SLAVE_NPU_TCM,
- SM6350_SLAVE_NPU_CAL_DP0,
- SM6350_SLAVE_NPU_DPM
- },
+ .link_nodes = { &srvc_noc,
+ &qhs_isense,
+ &qhs_llm,
+ &qhs_dma_bwmon,
+ &qhs_cp,
+ &qhs_tcm,
+ &qhs_cal_dp0,
+ &qhs_dpm },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SM6350_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM6350_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { SM6350_SLAVE_SNOC_GEM_NOC_SF,
- SM6350_SLAVE_PIMEM,
- SM6350_SLAVE_OCIMEM,
- SM6350_SLAVE_APPSS,
- SM6350_SNOC_CNOC_SLV,
- SM6350_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qns_gemnoc_sf,
+ &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM6350_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 7,
- .links = { SM6350_SLAVE_SNOC_GEM_NOC_SF,
- SM6350_SLAVE_PIMEM,
- SM6350_SLAVE_OCIMEM,
- SM6350_SLAVE_APPSS,
- SM6350_SNOC_CNOC_SLV,
- SM6350_SLAVE_TCU,
- SM6350_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qns_gemnoc_sf,
+ &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
- .id = SM6350_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
- .links = { SM6350_SLAVE_PIMEM,
- SM6350_SLAVE_OCIMEM,
- SM6350_SLAVE_APPSS,
- SM6350_SNOC_CNOC_SLV,
- SM6350_SLAVE_TCU,
- SM6350_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
+};
+
+static struct qcom_icc_qosbox qxm_pimem_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SM6350_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
+ .qosbox = &qxm_pimem_qos,
.num_links = 2,
- .links = { SM6350_SLAVE_SNOC_GEM_NOC_GC,
- SM6350_SLAVE_OCIMEM
- },
+ .link_nodes = { &qns_gemnoc_gc,
+ &qxs_imem },
+};
+
+static struct qcom_icc_qosbox xm_gic_qos = {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 3,
+ .urg_fwd = 0,
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM6350_MASTER_GIC,
.channels = 1,
.buswidth = 8,
+ .qosbox = &xm_gic_qos,
.num_links = 1,
- .links = { SM6350_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM6350_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM6350_A1NOC_SNOC_MAS },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SM6350_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM6350_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM6350_A2NOC_SNOC_MAS },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SM6350_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
- .id = SM6350_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SM6350_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SM6350_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cdsp_gemnoc = {
.name = "qns_cdsp_gemnoc",
- .id = SM6350_SLAVE_CDSP_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM6350_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
- .id = SM6350_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_MASTER_A1NOC_CFG },
+ .link_nodes = { &qhm_a1noc_cfg },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
- .id = SM6350_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_MASTER_A2NOC_CFG },
+ .link_nodes = { &qhm_a2noc_cfg },
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SM6350_SLAVE_AHB2PHY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
- .id = SM6350_SLAVE_AHB2PHY_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM6350_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_boot_rom = {
.name = "qhs_boot_rom",
- .id = SM6350_SLAVE_BOOT_ROM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM6350_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_nrt_thrott_cfg = {
.name = "qhs_camera_nrt_thrott_cfg",
- .id = SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
.name = "qhs_camera_rt_throttle_cfg",
- .id = SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM6350_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SM6350_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SM6350_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM6350_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
- .id = SM6350_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SM6350_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qhm_cnoc_dc_noc },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM6350_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_throttle_cfg = {
.name = "qhs_display_throttle_cfg",
- .id = SM6350_SLAVE_DISPLAY_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emmc_cfg = {
.name = "qhs_emmc_cfg",
- .id = SM6350_SLAVE_EMMC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
- .id = SM6350_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM6350_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM6350_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM6350_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
- .id = SM6350_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qhm_mnoc_cfg },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SM6350_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_npu_cfg = {
.name = "qhs_npu_cfg",
- .id = SM6350_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_MASTER_NPU_NOC_CFG },
+ .link_nodes = { &qhm_npu_cfg },
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SM6350_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SM6350_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SM6350_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM6350_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_cfg = {
.name = "qhs_qm_cfg",
- .id = SM6350_SLAVE_QM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qm_mpu_cfg = {
.name = "qhs_qm_mpu_cfg",
- .id = SM6350_SLAVE_QM_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SM6350_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SM6350_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM6350_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
- .id = SM6350_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SM6350_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM6350_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM6350_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM6350_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM6350_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_throttle_cfg = {
.name = "qhs_venus_throttle_cfg",
- .id = SM6350_SLAVE_VENUS_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM6350_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SM6350_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gemnoc = {
.name = "qhs_gemnoc",
- .id = SM6350_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM6350_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qhm_gemnoc_cfg },
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SM6350_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mcdma_ms_mpu_cfg = {
.name = "qhs_mcdma_ms_mpu_cfg",
- .id = SM6350_SLAVE_MCDMA_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = SM6350_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
- .id = SM6350_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM6350_MASTER_GEM_NOC_SNOC },
+ .link_nodes = { &qnm_gemnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM6350_SLAVE_LLCC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM6350_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
- .id = SM6350_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM6350_SLAVE_EBI_CH0,
.channels = 2,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM6350_SLAVE_MNOC_HF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM6350_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SM6350_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM6350_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM6350_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cal_dp0 = {
.name = "qhs_cal_dp0",
- .id = SM6350_SLAVE_NPU_CAL_DP0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cp = {
.name = "qhs_cp",
- .id = SM6350_SLAVE_NPU_CP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dma_bwmon = {
.name = "qhs_dma_bwmon",
- .id = SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dpm = {
.name = "qhs_dpm",
- .id = SM6350_SLAVE_NPU_DPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_isense = {
.name = "qhs_isense",
- .id = SM6350_SLAVE_ISENSE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llm = {
.name = "qhs_llm",
- .id = SM6350_SLAVE_NPU_LLM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcm = {
.name = "qhs_tcm",
- .id = SM6350_SLAVE_NPU_TCM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_npu_sys = {
.name = "qns_npu_sys",
- .id = SM6350_SLAVE_NPU_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node srvc_noc = {
.name = "srvc_noc",
- .id = SM6350_SLAVE_SERVICE_NPU_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SM6350_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
- .id = SM6350_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM6350_SNOC_CNOC_MAS },
+ .link_nodes = { &qnm_snoc },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SM6350_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM6350_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM6350_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM6350_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM6350_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SM6350_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SM6350_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM6350_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM6350_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
@@ -1403,11 +1611,21 @@ static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
+static const struct regmap_config sm6350_aggre1_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x15080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sm6350_aggre1_noc = {
+ .config = &sm6350_aggre1_noc_regmap_config,
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+ .qos_requires_clocks = true,
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
@@ -1428,11 +1646,21 @@ static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
+static const struct regmap_config sm6350_aggre2_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f880,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sm6350_aggre2_noc = {
+ .config = &sm6350_aggre2_noc_regmap_config,
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+ .qos_requires_clocks = true,
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
@@ -1474,7 +1702,16 @@ static struct qcom_icc_node * const compute_noc_nodes[] = {
[SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
};
+static const struct regmap_config sm6350_compute_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f880,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sm6350_compute_noc = {
+ .config = &sm6350_compute_noc_regmap_config,
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
@@ -1541,20 +1778,24 @@ static const struct qcom_icc_desc sm6350_config_noc = {
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm * const dc_noc_bcms[] = {
-};
-
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
};
+static const struct regmap_config sm6350_dc_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3200,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sm6350_dc_noc = {
+ .config = &sm6350_dc_noc_regmap_config,
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
- .bcms = dc_noc_bcms,
- .num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
@@ -1581,7 +1822,16 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
+static const struct regmap_config sm6350_gem_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3e200,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sm6350_gem_noc = {
+ .config = &sm6350_gem_noc_regmap_config,
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
@@ -1608,16 +1858,22 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
+static const struct regmap_config sm6350_mmss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1c100,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sm6350_mmss_noc = {
+ .config = &sm6350_mmss_noc_regmap_config,
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm * const npu_noc_bcms[] = {
-};
-
static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
@@ -1635,8 +1891,6 @@ static struct qcom_icc_node * const npu_noc_nodes[] = {
static const struct qcom_icc_desc sm6350_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
- .bcms = npu_noc_bcms,
- .num_bcms = ARRAY_SIZE(npu_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
@@ -1668,7 +1922,16 @@ static struct qcom_icc_node * const system_noc_nodes[] = {
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
+static const struct regmap_config sm6350_system_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x17080,
+ .fast_io = true,
+};
+
static const struct qcom_icc_desc sm6350_system_noc = {
+ .config = &sm6350_system_noc_regmap_config,
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
diff --git a/drivers/interconnect/qcom/sm6350.h b/drivers/interconnect/qcom/sm6350.h
deleted file mode 100644
index 43cf2930c88a..000000000000
--- a/drivers/interconnect/qcom/sm6350.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Qualcomm #define SM6350 interconnect IDs
- *
- * Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SM6350_H
-#define __DRIVERS_INTERCONNECT_QCOM_SM6350_H
-
-#define SM6350_A1NOC_SNOC_MAS 0
-#define SM6350_A1NOC_SNOC_SLV 1
-#define SM6350_A2NOC_SNOC_MAS 2
-#define SM6350_A2NOC_SNOC_SLV 3
-#define SM6350_MASTER_A1NOC_CFG 4
-#define SM6350_MASTER_A2NOC_CFG 5
-#define SM6350_MASTER_AMPSS_M0 6
-#define SM6350_MASTER_CAMNOC_HF 7
-#define SM6350_MASTER_CAMNOC_HF0_UNCOMP 8
-#define SM6350_MASTER_CAMNOC_ICP 9
-#define SM6350_MASTER_CAMNOC_ICP_UNCOMP 10
-#define SM6350_MASTER_CAMNOC_SF 11
-#define SM6350_MASTER_CAMNOC_SF_UNCOMP 12
-#define SM6350_MASTER_CNOC_DC_NOC 13
-#define SM6350_MASTER_CNOC_MNOC_CFG 14
-#define SM6350_MASTER_COMPUTE_NOC 15
-#define SM6350_MASTER_CRYPTO_CORE_0 16
-#define SM6350_MASTER_EMMC 17
-#define SM6350_MASTER_GEM_NOC_CFG 18
-#define SM6350_MASTER_GEM_NOC_SNOC 19
-#define SM6350_MASTER_GIC 20
-#define SM6350_MASTER_GRAPHICS_3D 21
-#define SM6350_MASTER_IPA 22
-#define SM6350_MASTER_LLCC 23
-#define SM6350_MASTER_MDP_PORT0 24
-#define SM6350_MASTER_MNOC_HF_MEM_NOC 25
-#define SM6350_MASTER_MNOC_SF_MEM_NOC 26
-#define SM6350_MASTER_NPU 27
-#define SM6350_MASTER_NPU_NOC_CFG 28
-#define SM6350_MASTER_NPU_PROC 29
-#define SM6350_MASTER_NPU_SYS 30
-#define SM6350_MASTER_PIMEM 31
-#define SM6350_MASTER_QDSS_BAM 32
-#define SM6350_MASTER_QDSS_DAP 33
-#define SM6350_MASTER_QDSS_ETR 34
-#define SM6350_MASTER_QUP_0 35
-#define SM6350_MASTER_QUP_1 36
-#define SM6350_MASTER_QUP_CORE_0 37
-#define SM6350_MASTER_QUP_CORE_1 38
-#define SM6350_MASTER_SDCC_2 39
-#define SM6350_MASTER_SNOC_CFG 40
-#define SM6350_MASTER_SNOC_GC_MEM_NOC 41
-#define SM6350_MASTER_SNOC_SF_MEM_NOC 42
-#define SM6350_MASTER_SYS_TCU 43
-#define SM6350_MASTER_UFS_MEM 44
-#define SM6350_MASTER_USB3 45
-#define SM6350_MASTER_VIDEO_P0 46
-#define SM6350_MASTER_VIDEO_PROC 47
-#define SM6350_SLAVE_A1NOC_CFG 48
-#define SM6350_SLAVE_A2NOC_CFG 49
-#define SM6350_SLAVE_AHB2PHY 50
-#define SM6350_SLAVE_AHB2PHY_2 51
-#define SM6350_SLAVE_AOSS 52
-#define SM6350_SLAVE_APPSS 53
-#define SM6350_SLAVE_BOOT_ROM 54
-#define SM6350_SLAVE_CAMERA_CFG 55
-#define SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG 56
-#define SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG 57
-#define SM6350_SLAVE_CAMNOC_UNCOMP 58
-#define SM6350_SLAVE_CDSP_GEM_NOC 59
-#define SM6350_SLAVE_CLK_CTL 60
-#define SM6350_SLAVE_CNOC_DDRSS 61
-#define SM6350_SLAVE_CNOC_MNOC_CFG 62
-#define SM6350_SLAVE_CNOC_MSS 63
-#define SM6350_SLAVE_CRYPTO_0_CFG 64
-#define SM6350_SLAVE_DCC_CFG 65
-#define SM6350_SLAVE_DISPLAY_CFG 66
-#define SM6350_SLAVE_DISPLAY_THROTTLE_CFG 67
-#define SM6350_SLAVE_EBI_CH0 68
-#define SM6350_SLAVE_EMMC_CFG 69
-#define SM6350_SLAVE_GEM_NOC_CFG 70
-#define SM6350_SLAVE_GEM_NOC_SNOC 71
-#define SM6350_SLAVE_GLM 72
-#define SM6350_SLAVE_GRAPHICS_3D_CFG 73
-#define SM6350_SLAVE_IMEM_CFG 74
-#define SM6350_SLAVE_IPA_CFG 75
-#define SM6350_SLAVE_ISENSE_CFG 76
-#define SM6350_SLAVE_LLCC 77
-#define SM6350_SLAVE_LLCC_CFG 78
-#define SM6350_SLAVE_MCDMA_MS_MPU_CFG 79
-#define SM6350_SLAVE_MNOC_HF_MEM_NOC 80
-#define SM6350_SLAVE_MNOC_SF_MEM_NOC 81
-#define SM6350_SLAVE_MSS_PROC_MS_MPU_CFG 82
-#define SM6350_SLAVE_NPU_CAL_DP0 83
-#define SM6350_SLAVE_NPU_CFG 84
-#define SM6350_SLAVE_NPU_COMPUTE_NOC 85
-#define SM6350_SLAVE_NPU_CP 86
-#define SM6350_SLAVE_NPU_DPM 87
-#define SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG 88
-#define SM6350_SLAVE_NPU_LLM_CFG 89
-#define SM6350_SLAVE_NPU_TCM 90
-#define SM6350_SLAVE_OCIMEM 91
-#define SM6350_SLAVE_PDM 92
-#define SM6350_SLAVE_PIMEM 93
-#define SM6350_SLAVE_PIMEM_CFG 94
-#define SM6350_SLAVE_PRNG 95
-#define SM6350_SLAVE_QDSS_CFG 96
-#define SM6350_SLAVE_QDSS_STM 97
-#define SM6350_SLAVE_QM_CFG 98
-#define SM6350_SLAVE_QM_MPU_CFG 99
-#define SM6350_SLAVE_QUP_0 100
-#define SM6350_SLAVE_QUP_1 101
-#define SM6350_SLAVE_QUP_CORE_0 102
-#define SM6350_SLAVE_QUP_CORE_1 103
-#define SM6350_SLAVE_RBCPR_CX_CFG 104
-#define SM6350_SLAVE_RBCPR_MX_CFG 105
-#define SM6350_SLAVE_SDCC_2 106
-#define SM6350_SLAVE_SECURITY 107
-#define SM6350_SLAVE_SERVICE_A1NOC 108
-#define SM6350_SLAVE_SERVICE_A2NOC 109
-#define SM6350_SLAVE_SERVICE_CNOC 110
-#define SM6350_SLAVE_SERVICE_GEM_NOC 111
-#define SM6350_SLAVE_SERVICE_MNOC 112
-#define SM6350_SLAVE_SERVICE_NPU_NOC 113
-#define SM6350_SLAVE_SERVICE_SNOC 114
-#define SM6350_SLAVE_SNOC_CFG 115
-#define SM6350_SLAVE_SNOC_GEM_NOC_GC 116
-#define SM6350_SLAVE_SNOC_GEM_NOC_SF 117
-#define SM6350_SLAVE_TCSR 118
-#define SM6350_SLAVE_TCU 119
-#define SM6350_SLAVE_UFS_MEM_CFG 120
-#define SM6350_SLAVE_USB3 121
-#define SM6350_SLAVE_VENUS_CFG 122
-#define SM6350_SLAVE_VENUS_THROTTLE_CFG 123
-#define SM6350_SLAVE_VSENSE_CTRL_CFG 124
-#define SM6350_SNOC_CNOC_MAS 125
-#define SM6350_SNOC_CNOC_SLV 126
-
-#endif
diff --git a/drivers/interconnect/qcom/sm7150.c b/drivers/interconnect/qcom/sm7150.c
index c8c77407cd50..0390d0468b48 100644
--- a/drivers/interconnect/qcom/sm7150.c
+++ b/drivers/interconnect/qcom/sm7150.c
@@ -14,1169 +14,1154 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sm7150.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg;
+static struct qcom_icc_node qhm_qup_center;
+static struct qcom_icc_node qhm_tsif;
+static struct qcom_icc_node xm_emmc;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node qhm_a2noc_cfg;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup_north;
+static struct qcom_icc_node qnm_cnoc;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp;
+static struct qcom_icc_node qxm_camnoc_rt_uncomp;
+static struct qcom_icc_node qxm_camnoc_sf_uncomp;
+static struct qcom_icc_node qxm_camnoc_nrt_uncomp;
+static struct qcom_icc_node qnm_npu;
+static struct qcom_icc_node qhm_spdm;
+static struct qcom_icc_node qnm_snoc;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qhm_cnoc_dc_noc;
+static struct qcom_icc_node acm_apps;
+static struct qcom_icc_node acm_sys_tcu;
+static struct qcom_icc_node qhm_gemnoc_cfg;
+static struct qcom_icc_node qnm_cmpnoc;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qxm_gpu;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qhm_mnoc_cfg;
+static struct qcom_icc_node qxm_camnoc_hf;
+static struct qcom_icc_node qxm_camnoc_nrt;
+static struct qcom_icc_node qxm_camnoc_rt;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qxm_mdp1;
+static struct qcom_icc_node qxm_rot;
+static struct qcom_icc_node qxm_venus0;
+static struct qcom_icc_node qxm_venus1;
+static struct qcom_icc_node qxm_venus_arm9;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_gemnoc;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qns_pcie_gemnoc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qns_camnoc_uncomp;
+static struct qcom_icc_node qns_cdsp_gemnoc;
+static struct qcom_icc_node qhs_a1_noc_cfg;
+static struct qcom_icc_node qhs_a2_noc_cfg;
+static struct qcom_icc_node qhs_ahb2phy_north;
+static struct qcom_icc_node qhs_ahb2phy_south;
+static struct qcom_icc_node qhs_ahb2phy_west;
+static struct qcom_icc_node qhs_aop;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_camera_nrt_thrott_cfg;
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_dsp_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_display_throttle_cfg;
+static struct qcom_icc_node qhs_emmc_cfg;
+static struct qcom_icc_node qhs_glm;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mnoc_cfg;
+static struct qcom_icc_node qhs_pcie_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qupv3_center;
+static struct qcom_icc_node qhs_qupv3_north;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_spdm;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm_north;
+static struct qcom_icc_node qhs_tlmm_south;
+static struct qcom_icc_node qhs_tlmm_west;
+static struct qcom_icc_node qhs_tsif;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_venus_cvp_throttle_cfg;
+static struct qcom_icc_node qhs_venus_throttle_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_cnoc_a2noc;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node qhs_gemnoc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qns_gem_noc_snoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node srvc_gemnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns2_mem_noc;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qhm_a1noc_cfg = {
- .name = "qhm-a1noc-cfg",
- .id = SM7150_MASTER_A1NOC_CFG,
+ .name = "qhm_a1noc_cfg",
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node qhm_qup_center = {
.name = "qhm_qup_center",
- .id = SM7150_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
- .id = SM7150_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emmc = {
.name = "xm_emmc",
- .id = SM7150_MASTER_EMMC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM7150_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SM7150_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM7150_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
- .id = SM7150_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM7150_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup_north = {
.name = "qhm_qup_north",
- .id = SM7150_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
- .id = SM7150_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM7150_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM7150_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SM7150_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_gemnoc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SM7150_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM7150_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
- .id = SM7150_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_rt_uncomp = {
.name = "qxm_camnoc_rt_uncomp",
- .id = SM7150_MASTER_CAMNOC_RT_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
- .id = SM7150_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_nrt_uncomp = {
.name = "qxm_camnoc_nrt_uncomp",
- .id = SM7150_MASTER_CAMNOC_NRT_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
- .id = SM7150_MASTER_NPU,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_CDSP_GEM_NOC },
+ .link_nodes = { &qns_cdsp_gemnoc },
};
static struct qcom_icc_node qhm_spdm = {
.name = "qhm_spdm",
- .id = SM7150_MASTER_SPDM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_SLAVE_CNOC_A2NOC },
+ .link_nodes = { &qns_cnoc_a2noc },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
- .id = SM7150_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 47,
- .links = { SM7150_SLAVE_TLMM_SOUTH,
- SM7150_SLAVE_CAMERA_CFG,
- SM7150_SLAVE_SDCC_4,
- SM7150_SLAVE_SDCC_2,
- SM7150_SLAVE_CNOC_MNOC_CFG,
- SM7150_SLAVE_UFS_MEM_CFG,
- SM7150_SLAVE_QUP_0,
- SM7150_SLAVE_GLM,
- SM7150_SLAVE_PDM,
- SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
- SM7150_SLAVE_A2NOC_CFG,
- SM7150_SLAVE_QDSS_CFG,
- SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
- SM7150_SLAVE_DISPLAY_CFG,
- SM7150_SLAVE_PCIE_CFG,
- SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
- SM7150_SLAVE_TCSR,
- SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
- SM7150_SLAVE_CNOC_DDRSS,
- SM7150_SLAVE_AHB2PHY_NORTH,
- SM7150_SLAVE_SNOC_CFG,
- SM7150_SLAVE_GRAPHICS_3D_CFG,
- SM7150_SLAVE_VENUS_CFG,
- SM7150_SLAVE_TSIF,
- SM7150_SLAVE_CDSP_CFG,
- SM7150_SLAVE_CLK_CTL,
- SM7150_SLAVE_AOP,
- SM7150_SLAVE_QUP_1,
- SM7150_SLAVE_AHB2PHY_SOUTH,
- SM7150_SLAVE_SERVICE_CNOC,
- SM7150_SLAVE_AHB2PHY_WEST,
- SM7150_SLAVE_USB3,
- SM7150_SLAVE_VENUS_THROTTLE_CFG,
- SM7150_SLAVE_IPA_CFG,
- SM7150_SLAVE_RBCPR_CX_CFG,
- SM7150_SLAVE_TLMM_WEST,
- SM7150_SLAVE_A1NOC_CFG,
- SM7150_SLAVE_AOSS,
- SM7150_SLAVE_PRNG,
- SM7150_SLAVE_VSENSE_CTRL_CFG,
- SM7150_SLAVE_EMMC_CFG,
- SM7150_SLAVE_SPDM_WRAPPER,
- SM7150_SLAVE_CRYPTO_0_CFG,
- SM7150_SLAVE_PIMEM_CFG,
- SM7150_SLAVE_TLMM_NORTH,
- SM7150_SLAVE_RBCPR_MX_CFG,
- SM7150_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_tlmm_south,
+ &qhs_camera_cfg,
+ &qhs_sdc4,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_qupv3_center,
+ &qhs_glm,
+ &qhs_pdm,
+ &qhs_camera_nrt_thrott_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_display_cfg,
+ &qhs_pcie_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_tcsr,
+ &qhs_venus_cvp_throttle_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ahb2phy_north,
+ &qhs_snoc_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_venus_cfg,
+ &qhs_tsif,
+ &qhs_compute_dsp_cfg,
+ &qhs_clk_ctl,
+ &qhs_aop,
+ &qhs_qupv3_north,
+ &qhs_ahb2phy_south,
+ &srvc_cnoc,
+ &qhs_ahb2phy_west,
+ &qhs_usb3_0,
+ &qhs_venus_throttle_cfg,
+ &qhs_ipa,
+ &qhs_cpr_cx,
+ &qhs_tlmm_west,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_emmc_cfg,
+ &qhs_spdm,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_tlmm_north,
+ &qhs_cpr_mx,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SM7150_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 48,
- .links = { SM7150_SLAVE_TLMM_SOUTH,
- SM7150_SLAVE_CAMERA_CFG,
- SM7150_SLAVE_SDCC_4,
- SM7150_SLAVE_SDCC_2,
- SM7150_SLAVE_CNOC_MNOC_CFG,
- SM7150_SLAVE_UFS_MEM_CFG,
- SM7150_SLAVE_QUP_0,
- SM7150_SLAVE_GLM,
- SM7150_SLAVE_PDM,
- SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
- SM7150_SLAVE_A2NOC_CFG,
- SM7150_SLAVE_QDSS_CFG,
- SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
- SM7150_SLAVE_DISPLAY_CFG,
- SM7150_SLAVE_PCIE_CFG,
- SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
- SM7150_SLAVE_TCSR,
- SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
- SM7150_SLAVE_CNOC_DDRSS,
- SM7150_SLAVE_CNOC_A2NOC,
- SM7150_SLAVE_AHB2PHY_NORTH,
- SM7150_SLAVE_SNOC_CFG,
- SM7150_SLAVE_GRAPHICS_3D_CFG,
- SM7150_SLAVE_VENUS_CFG,
- SM7150_SLAVE_TSIF,
- SM7150_SLAVE_CDSP_CFG,
- SM7150_SLAVE_CLK_CTL,
- SM7150_SLAVE_AOP,
- SM7150_SLAVE_QUP_1,
- SM7150_SLAVE_AHB2PHY_SOUTH,
- SM7150_SLAVE_SERVICE_CNOC,
- SM7150_SLAVE_AHB2PHY_WEST,
- SM7150_SLAVE_USB3,
- SM7150_SLAVE_VENUS_THROTTLE_CFG,
- SM7150_SLAVE_IPA_CFG,
- SM7150_SLAVE_RBCPR_CX_CFG,
- SM7150_SLAVE_TLMM_WEST,
- SM7150_SLAVE_A1NOC_CFG,
- SM7150_SLAVE_AOSS,
- SM7150_SLAVE_PRNG,
- SM7150_SLAVE_VSENSE_CTRL_CFG,
- SM7150_SLAVE_EMMC_CFG,
- SM7150_SLAVE_SPDM_WRAPPER,
- SM7150_SLAVE_CRYPTO_0_CFG,
- SM7150_SLAVE_PIMEM_CFG,
- SM7150_SLAVE_TLMM_NORTH,
- SM7150_SLAVE_RBCPR_MX_CFG,
- SM7150_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_tlmm_south,
+ &qhs_camera_cfg,
+ &qhs_sdc4,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_qupv3_center,
+ &qhs_glm,
+ &qhs_pdm,
+ &qhs_camera_nrt_thrott_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_display_cfg,
+ &qhs_pcie_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_tcsr,
+ &qhs_venus_cvp_throttle_cfg,
+ &qhs_ddrss_cfg,
+ &qns_cnoc_a2noc,
+ &qhs_ahb2phy_north,
+ &qhs_snoc_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_venus_cfg,
+ &qhs_tsif,
+ &qhs_compute_dsp_cfg,
+ &qhs_clk_ctl,
+ &qhs_aop,
+ &qhs_qupv3_north,
+ &qhs_ahb2phy_south,
+ &srvc_cnoc,
+ &qhs_ahb2phy_west,
+ &qhs_usb3_0,
+ &qhs_venus_throttle_cfg,
+ &qhs_ipa,
+ &qhs_cpr_cx,
+ &qhs_tlmm_west,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_emmc_cfg,
+ &qhs_spdm,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_tlmm_north,
+ &qhs_cpr_mx,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
- .id = SM7150_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SM7150_SLAVE_LLCC_CFG,
- SM7150_SLAVE_GEM_NOC_CFG
- },
+ .link_nodes = { &qhs_llcc,
+ &qhs_gemnoc },
};
static struct qcom_icc_node acm_apps = {
.name = "acm_apps",
- .id = SM7150_MASTER_AMPSS_M0,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SM7150_SLAVE_LLCC,
- SM7150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node acm_sys_tcu = {
.name = "acm_sys_tcu",
- .id = SM7150_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM7150_SLAVE_LLCC,
- SM7150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
- .id = SM7150_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SM7150_SLAVE_SERVICE_GEM_NOC,
- SM7150_SLAVE_MSS_PROC_MS_MPU_CFG
- },
+ .link_nodes = { &srvc_gemnoc,
+ &qhs_mdsp_ms_mpu_cfg },
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
- .id = SM7150_MASTER_COMPUTE_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SM7150_SLAVE_LLCC,
- SM7150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM7150_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM7150_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SM7150_SLAVE_LLCC,
- SM7150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SM7150_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM7150_SLAVE_LLCC,
- SM7150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SM7150_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM7150_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM7150_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qxm_gpu = {
.name = "qxm_gpu",
- .id = SM7150_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM7150_SLAVE_LLCC,
- SM7150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM7150_MASTER_LLCC,
.channels = 2,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_SLAVE_EBI_CH0 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
- .id = SM7150_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qxm_camnoc_hf = {
.name = "qxm_camnoc_hf",
- .id = SM7150_MASTER_CAMNOC_HF0,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_nrt = {
.name = "qxm_camnoc_nrt",
- .id = SM7150_MASTER_CAMNOC_NRT,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_camnoc_rt = {
.name = "qxm_camnoc_rt",
- .id = SM7150_MASTER_CAMNOC_RT,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = SM7150_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SM7150_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
- .id = SM7150_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
- .id = SM7150_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
- .id = SM7150_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus1 = {
.name = "qxm_venus1",
- .id = SM7150_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
- .id = SM7150_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SM7150_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM7150_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { SM7150_SLAVE_SNOC_GEM_NOC_SF,
- SM7150_SLAVE_PIMEM,
- SM7150_SLAVE_OCIMEM,
- SM7150_SLAVE_APPSS,
- SM7150_SNOC_CNOC_SLV,
- SM7150_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qns_gemnoc_sf,
+ &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM7150_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 7,
- .links = { SM7150_SLAVE_SNOC_GEM_NOC_SF,
- SM7150_SLAVE_PIMEM,
- SM7150_SLAVE_OCIMEM,
- SM7150_SLAVE_APPSS,
- SM7150_SNOC_CNOC_SLV,
- SM7150_SLAVE_TCU,
- SM7150_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qns_gemnoc_sf,
+ &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
- .id = SM7150_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
- .links = { SM7150_SLAVE_PIMEM,
- SM7150_SLAVE_OCIMEM,
- SM7150_SLAVE_APPSS,
- SM7150_SNOC_CNOC_SLV,
- SM7150_SLAVE_TCU,
- SM7150_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SM7150_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM7150_SLAVE_SNOC_GEM_NOC_GC,
- SM7150_SLAVE_OCIMEM
- },
+ .link_nodes = { &qns_gemnoc_gc,
+ &qxs_imem },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM7150_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM7150_SLAVE_SNOC_GEM_NOC_GC,
- SM7150_SLAVE_OCIMEM
- },
+ .link_nodes = { &qns_gemnoc_gc,
+ &qxs_imem },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM7150_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM7150_A1NOC_SNOC_MAS },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SM7150_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM7150_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM7150_A2NOC_SNOC_MAS },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qns_pcie_gemnoc = {
.name = "qns_pcie_gemnoc",
- .id = SM7150_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SM7150_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
- .id = SM7150_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_cdsp_gemnoc = {
.name = "qns_cdsp_gemnoc",
- .id = SM7150_SLAVE_CDSP_GEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
- .id = SM7150_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_MASTER_A1NOC_CFG },
+ .link_nodes = { &qhm_a1noc_cfg },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
- .id = SM7150_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_MASTER_A2NOC_CFG },
+ .link_nodes = { &qhm_a2noc_cfg },
};
static struct qcom_icc_node qhs_ahb2phy_north = {
.name = "qhs_ahb2phy_north",
- .id = SM7150_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy_south = {
.name = "qhs_ahb2phy_south",
- .id = SM7150_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy_west = {
.name = "qhs_ahb2phy_west",
- .id = SM7150_SLAVE_AHB2PHY_WEST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
- .id = SM7150_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM7150_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM7150_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_nrt_thrott_cfg = {
.name = "qhs_camera_nrt_thrott_cfg",
- .id = SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
.name = "qhs_camera_rt_throttle_cfg",
- .id = SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM7150_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp_cfg = {
.name = "qhs_compute_dsp_cfg",
- .id = SM7150_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SM7150_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SM7150_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM7150_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SM7150_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qhm_cnoc_dc_noc },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM7150_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_throttle_cfg = {
.name = "qhs_display_throttle_cfg",
- .id = SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emmc_cfg = {
.name = "qhs_emmc_cfg",
- .id = SM7150_SLAVE_EMMC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
- .id = SM7150_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM7150_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM7150_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM7150_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
- .id = SM7150_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qhm_mnoc_cfg },
};
static struct qcom_icc_node qhs_pcie_cfg = {
.name = "qhs_pcie_cfg",
- .id = SM7150_SLAVE_PCIE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SM7150_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SM7150_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SM7150_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM7150_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_center = {
.name = "qhs_qupv3_center",
- .id = SM7150_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_north = {
.name = "qhs_qupv3_north",
- .id = SM7150_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM7150_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SM7150_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SM7150_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_spdm = {
.name = "qhs_spdm",
- .id = SM7150_SLAVE_SPDM_WRAPPER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM7150_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_north = {
.name = "qhs_tlmm_north",
- .id = SM7150_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_south = {
.name = "qhs_tlmm_south",
- .id = SM7150_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_west = {
.name = "qhs_tlmm_west",
- .id = SM7150_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
- .id = SM7150_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM7150_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM7150_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM7150_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cvp_throttle_cfg = {
.name = "qhs_venus_cvp_throttle_cfg",
- .id = SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_throttle_cfg = {
.name = "qhs_venus_throttle_cfg",
- .id = SM7150_SLAVE_VENUS_THROTTLE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM7150_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
- .id = SM7150_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_MASTER_CNOC_A2NOC },
+ .link_nodes = { &qnm_cnoc },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SM7150_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gemnoc = {
.name = "qhs_gemnoc",
- .id = SM7150_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM7150_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qhm_gemnoc_cfg },
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SM7150_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = SM7150_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
- .id = SM7150_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_MASTER_GEM_NOC_SNOC },
+ .link_nodes = { &qnm_gemnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM7150_SLAVE_LLCC,
.channels = 2,
.buswidth = 16,
.num_links = 1,
- .links = { SM7150_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
- .id = SM7150_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM7150_SLAVE_EBI_CH0,
.channels = 2,
.buswidth = 4,
};
static struct qcom_icc_node qns2_mem_noc = {
.name = "qns2_mem_noc",
- .id = SM7150_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM7150_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM7150_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM7150_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SM7150_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
- .id = SM7150_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_SNOC_CNOC_MAS },
+ .link_nodes = { &qnm_snoc },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SM7150_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM7150_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM7150_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM7150_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM7150_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SM7150_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SM7150_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM7150_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM7150_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
diff --git a/drivers/interconnect/qcom/sm7150.h b/drivers/interconnect/qcom/sm7150.h
deleted file mode 100644
index e00a9b0c1279..000000000000
--- a/drivers/interconnect/qcom/sm7150.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Qualcomm #define SM7150 interconnect IDs
- *
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SM7150_H
-#define __DRIVERS_INTERCONNECT_QCOM_SM7150_H
-
-#define SM7150_A1NOC_SNOC_MAS 0
-#define SM7150_A1NOC_SNOC_SLV 1
-#define SM7150_A2NOC_SNOC_MAS 2
-#define SM7150_A2NOC_SNOC_SLV 3
-#define SM7150_MASTER_A1NOC_CFG 4
-#define SM7150_MASTER_A2NOC_CFG 5
-#define SM7150_MASTER_AMPSS_M0 6
-#define SM7150_MASTER_CAMNOC_HF0 7
-#define SM7150_MASTER_CAMNOC_HF0_UNCOMP 8
-#define SM7150_MASTER_CAMNOC_NRT 9
-#define SM7150_MASTER_CAMNOC_NRT_UNCOMP 10
-#define SM7150_MASTER_CAMNOC_RT 11
-#define SM7150_MASTER_CAMNOC_RT_UNCOMP 12
-#define SM7150_MASTER_CAMNOC_SF 13
-#define SM7150_MASTER_CAMNOC_SF_UNCOMP 14
-#define SM7150_MASTER_CNOC_A2NOC 15
-#define SM7150_MASTER_CNOC_DC_NOC 16
-#define SM7150_MASTER_CNOC_MNOC_CFG 17
-#define SM7150_MASTER_COMPUTE_NOC 18
-#define SM7150_MASTER_CRYPTO_CORE_0 19
-#define SM7150_MASTER_EMMC 20
-#define SM7150_MASTER_GEM_NOC_CFG 21
-#define SM7150_MASTER_GEM_NOC_PCIE_SNOC 22
-#define SM7150_MASTER_GEM_NOC_SNOC 23
-#define SM7150_MASTER_GIC 24
-#define SM7150_MASTER_GRAPHICS_3D 25
-#define SM7150_MASTER_IPA 26
-#define SM7150_MASTER_LLCC 27
-#define SM7150_MASTER_MDP_PORT0 28
-#define SM7150_MASTER_MDP_PORT1 29
-#define SM7150_MASTER_MNOC_HF_MEM_NOC 30
-#define SM7150_MASTER_MNOC_SF_MEM_NOC 31
-#define SM7150_MASTER_NPU 32
-#define SM7150_MASTER_PCIE 33
-#define SM7150_MASTER_PIMEM 34
-#define SM7150_MASTER_QDSS_BAM 35
-#define SM7150_MASTER_QDSS_DAP 36
-#define SM7150_MASTER_QDSS_ETR 37
-#define SM7150_MASTER_QUP_0 38
-#define SM7150_MASTER_QUP_1 39
-#define SM7150_MASTER_ROTATOR 40
-#define SM7150_MASTER_SDCC_2 41
-#define SM7150_MASTER_SDCC_4 42
-#define SM7150_MASTER_SNOC_CFG 43
-#define SM7150_MASTER_SNOC_GC_MEM_NOC 44
-#define SM7150_MASTER_SNOC_SF_MEM_NOC 45
-#define SM7150_MASTER_SPDM 46
-#define SM7150_MASTER_SYS_TCU 47
-#define SM7150_MASTER_TSIF 48
-#define SM7150_MASTER_UFS_MEM 49
-#define SM7150_MASTER_USB3 50
-#define SM7150_MASTER_VIDEO_P0 51
-#define SM7150_MASTER_VIDEO_P1 52
-#define SM7150_MASTER_VIDEO_PROC 53
-#define SM7150_SLAVE_A1NOC_CFG 54
-#define SM7150_SLAVE_A2NOC_CFG 55
-#define SM7150_SLAVE_AHB2PHY_NORTH 56
-#define SM7150_SLAVE_AHB2PHY_SOUTH 57
-#define SM7150_SLAVE_AHB2PHY_WEST 58
-#define SM7150_SLAVE_ANOC_PCIE_GEM_NOC 59
-#define SM7150_SLAVE_AOP 60
-#define SM7150_SLAVE_AOSS 61
-#define SM7150_SLAVE_APPSS 62
-#define SM7150_SLAVE_CAMERA_CFG 63
-#define SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG 64
-#define SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG 65
-#define SM7150_SLAVE_CAMNOC_UNCOMP 66
-#define SM7150_SLAVE_CDSP_CFG 67
-#define SM7150_SLAVE_CDSP_GEM_NOC 68
-#define SM7150_SLAVE_CLK_CTL 69
-#define SM7150_SLAVE_CNOC_A2NOC 70
-#define SM7150_SLAVE_CNOC_DDRSS 71
-#define SM7150_SLAVE_CNOC_MNOC_CFG 72
-#define SM7150_SLAVE_CRYPTO_0_CFG 73
-#define SM7150_SLAVE_DISPLAY_CFG 74
-#define SM7150_SLAVE_DISPLAY_THROTTLE_CFG 75
-#define SM7150_SLAVE_EBI_CH0 76
-#define SM7150_SLAVE_EMMC_CFG 77
-#define SM7150_SLAVE_GEM_NOC_CFG 78
-#define SM7150_SLAVE_GEM_NOC_SNOC 79
-#define SM7150_SLAVE_GLM 80
-#define SM7150_SLAVE_GRAPHICS_3D_CFG 81
-#define SM7150_SLAVE_IMEM_CFG 82
-#define SM7150_SLAVE_IPA_CFG 83
-#define SM7150_SLAVE_LLCC 84
-#define SM7150_SLAVE_LLCC_CFG 85
-#define SM7150_SLAVE_MNOC_HF_MEM_NOC 86
-#define SM7150_SLAVE_MNOC_SF_MEM_NOC 87
-#define SM7150_SLAVE_MSS_PROC_MS_MPU_CFG 88
-#define SM7150_SLAVE_OCIMEM 89
-#define SM7150_SLAVE_PCIE_CFG 90
-#define SM7150_SLAVE_PDM 91
-#define SM7150_SLAVE_PIMEM 92
-#define SM7150_SLAVE_PIMEM_CFG 93
-#define SM7150_SLAVE_PRNG 94
-#define SM7150_SLAVE_QDSS_CFG 95
-#define SM7150_SLAVE_QDSS_STM 96
-#define SM7150_SLAVE_QUP_0 97
-#define SM7150_SLAVE_QUP_1 98
-#define SM7150_SLAVE_RBCPR_CX_CFG 99
-#define SM7150_SLAVE_RBCPR_MX_CFG 100
-#define SM7150_SLAVE_SDCC_2 101
-#define SM7150_SLAVE_SDCC_4 102
-#define SM7150_SLAVE_SERVICE_A1NOC 103
-#define SM7150_SLAVE_SERVICE_A2NOC 104
-#define SM7150_SLAVE_SERVICE_CNOC 105
-#define SM7150_SLAVE_SERVICE_GEM_NOC 106
-#define SM7150_SLAVE_SERVICE_MNOC 107
-#define SM7150_SLAVE_SERVICE_SNOC 108
-#define SM7150_SLAVE_SNOC_CFG 109
-#define SM7150_SLAVE_SNOC_GEM_NOC_GC 110
-#define SM7150_SLAVE_SNOC_GEM_NOC_SF 111
-#define SM7150_SLAVE_SPDM_WRAPPER 112
-#define SM7150_SLAVE_TCSR 113
-#define SM7150_SLAVE_TCU 114
-#define SM7150_SLAVE_TLMM_NORTH 115
-#define SM7150_SLAVE_TLMM_SOUTH 116
-#define SM7150_SLAVE_TLMM_WEST 117
-#define SM7150_SLAVE_TSIF 118
-#define SM7150_SLAVE_UFS_MEM_CFG 119
-#define SM7150_SLAVE_USB3 120
-#define SM7150_SLAVE_VENUS_CFG 121
-#define SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG 122
-#define SM7150_SLAVE_VENUS_THROTTLE_CFG 123
-#define SM7150_SLAVE_VSENSE_CTRL_CFG 124
-#define SM7150_SNOC_CNOC_MAS 125
-#define SM7150_SNOC_CNOC_SLV 126
-
-#endif
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index edfe824cad35..ae732afbd155 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -14,1268 +14,1252 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sm8150.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node xm_emac;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node xm_usb3_1;
+static struct qcom_icc_node qhm_a2noc_cfg;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qhm_sensorss_ahb;
+static struct qcom_icc_node qhm_tsif;
+static struct qcom_icc_node qnm_cnoc;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp;
+static struct qcom_icc_node qxm_camnoc_hf1_uncomp;
+static struct qcom_icc_node qxm_camnoc_sf_uncomp;
+static struct qcom_icc_node qnm_npu;
+static struct qcom_icc_node qhm_spdm;
+static struct qcom_icc_node qnm_snoc;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qhm_cnoc_dc_noc;
+static struct qcom_icc_node acm_apps;
+static struct qcom_icc_node acm_gpu_tcu;
+static struct qcom_icc_node acm_sys_tcu;
+static struct qcom_icc_node qhm_gemnoc_cfg;
+static struct qcom_icc_node qnm_cmpnoc;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qxm_ecc;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qhm_mnoc_cfg;
+static struct qcom_icc_node qxm_camnoc_hf0;
+static struct qcom_icc_node qxm_camnoc_hf1;
+static struct qcom_icc_node qxm_camnoc_sf;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qxm_mdp1;
+static struct qcom_icc_node qxm_rot;
+static struct qcom_icc_node qxm_venus0;
+static struct qcom_icc_node qxm_venus1;
+static struct qcom_icc_node qxm_venus_arm9;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_gemnoc;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qns_camnoc_uncomp;
+static struct qcom_icc_node qns_cdsp_mem_noc;
+static struct qcom_icc_node qhs_a1_noc_cfg;
+static struct qcom_icc_node qhs_a2_noc_cfg;
+static struct qcom_icc_node qhs_ahb2phy_south;
+static struct qcom_icc_node qhs_aop;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_dsp;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_emac_cfg;
+static struct qcom_icc_node qhs_glm;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_mnoc_cfg;
+static struct qcom_icc_node qhs_npu_cfg;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_phy_refgen_north;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qupv3_east;
+static struct qcom_icc_node qhs_qupv3_north;
+static struct qcom_icc_node qhs_qupv3_south;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_spdm;
+static struct qcom_icc_node qhs_spss_cfg;
+static struct qcom_icc_node qhs_ssc_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm_east;
+static struct qcom_icc_node qhs_tlmm_north;
+static struct qcom_icc_node qhs_tlmm_south;
+static struct qcom_icc_node qhs_tlmm_west;
+static struct qcom_icc_node qhs_tsif;
+static struct qcom_icc_node qhs_ufs_card_cfg;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_usb3_1;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_cnoc_a2noc;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qhs_memnoc;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qns_ecc;
+static struct qcom_icc_node qns_gem_noc_snoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node srvc_gemnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns2_mem_noc;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
- .id = SM8150_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SM8150_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_emac = {
.name = "xm_emac",
- .id = SM8150_MASTER_EMAC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM8150_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM8150_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
- .id = SM8150_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
- .id = SM8150_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM8150_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SM8150_MASTER_QSPI,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SM8150_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SM8150_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_sensorss_ahb = {
.name = "qhm_sensorss_ahb",
- .id = SM8150_MASTER_SENSORS_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
- .id = SM8150_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
- .id = SM8150_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM8150_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM8150_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SM8150_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SM8150_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SM8150_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM8150_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SM8150_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
.name = "qxm_camnoc_hf0_uncomp",
- .id = SM8150_MASTER_CAMNOC_HF0_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_hf1_uncomp = {
.name = "qxm_camnoc_hf1_uncomp",
- .id = SM8150_MASTER_CAMNOC_HF1_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
.name = "qxm_camnoc_sf_uncomp",
- .id = SM8150_MASTER_CAMNOC_SF_UNCOMP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_CAMNOC_UNCOMP },
+ .link_nodes = { &qns_camnoc_uncomp },
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
- .id = SM8150_MASTER_NPU,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_cdsp_mem_noc },
};
static struct qcom_icc_node qhm_spdm = {
.name = "qhm_spdm",
- .id = SM8150_MASTER_SPDM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_SLAVE_CNOC_A2NOC },
+ .link_nodes = { &qns_cnoc_a2noc },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
- .id = SM8150_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 50,
- .links = { SM8150_SLAVE_TLMM_SOUTH,
- SM8150_SLAVE_CDSP_CFG,
- SM8150_SLAVE_SPSS_CFG,
- SM8150_SLAVE_CAMERA_CFG,
- SM8150_SLAVE_SDCC_4,
- SM8150_SLAVE_SDCC_2,
- SM8150_SLAVE_CNOC_MNOC_CFG,
- SM8150_SLAVE_EMAC_CFG,
- SM8150_SLAVE_UFS_MEM_CFG,
- SM8150_SLAVE_TLMM_EAST,
- SM8150_SLAVE_SSC_CFG,
- SM8150_SLAVE_SNOC_CFG,
- SM8150_SLAVE_NORTH_PHY_CFG,
- SM8150_SLAVE_QUP_0,
- SM8150_SLAVE_GLM,
- SM8150_SLAVE_PCIE_1_CFG,
- SM8150_SLAVE_A2NOC_CFG,
- SM8150_SLAVE_QDSS_CFG,
- SM8150_SLAVE_DISPLAY_CFG,
- SM8150_SLAVE_TCSR,
- SM8150_SLAVE_CNOC_DDRSS,
- SM8150_SLAVE_RBCPR_MMCX_CFG,
- SM8150_SLAVE_NPU_CFG,
- SM8150_SLAVE_PCIE_0_CFG,
- SM8150_SLAVE_GRAPHICS_3D_CFG,
- SM8150_SLAVE_VENUS_CFG,
- SM8150_SLAVE_TSIF,
- SM8150_SLAVE_IPA_CFG,
- SM8150_SLAVE_CLK_CTL,
- SM8150_SLAVE_AOP,
- SM8150_SLAVE_QUP_1,
- SM8150_SLAVE_AHB2PHY_SOUTH,
- SM8150_SLAVE_USB3_1,
- SM8150_SLAVE_SERVICE_CNOC,
- SM8150_SLAVE_UFS_CARD_CFG,
- SM8150_SLAVE_QUP_2,
- SM8150_SLAVE_RBCPR_CX_CFG,
- SM8150_SLAVE_TLMM_WEST,
- SM8150_SLAVE_A1NOC_CFG,
- SM8150_SLAVE_AOSS,
- SM8150_SLAVE_PRNG,
- SM8150_SLAVE_VSENSE_CTRL_CFG,
- SM8150_SLAVE_QSPI,
- SM8150_SLAVE_USB3,
- SM8150_SLAVE_SPDM_WRAPPER,
- SM8150_SLAVE_CRYPTO_0_CFG,
- SM8150_SLAVE_PIMEM_CFG,
- SM8150_SLAVE_TLMM_NORTH,
- SM8150_SLAVE_RBCPR_MX_CFG,
- SM8150_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_tlmm_south,
+ &qhs_compute_dsp,
+ &qhs_spss_cfg,
+ &qhs_camera_cfg,
+ &qhs_sdc4,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_emac_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_tlmm_east,
+ &qhs_ssc_cfg,
+ &qhs_snoc_cfg,
+ &qhs_phy_refgen_north,
+ &qhs_qupv3_south,
+ &qhs_glm,
+ &qhs_pcie1_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_display_cfg,
+ &qhs_tcsr,
+ &qhs_ddrss_cfg,
+ &qhs_cpr_mmcx,
+ &qhs_npu_cfg,
+ &qhs_pcie0_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_venus_cfg,
+ &qhs_tsif,
+ &qhs_ipa,
+ &qhs_clk_ctl,
+ &qhs_aop,
+ &qhs_qupv3_north,
+ &qhs_ahb2phy_south,
+ &qhs_usb3_1,
+ &srvc_cnoc,
+ &qhs_ufs_card_cfg,
+ &qhs_qupv3_east,
+ &qhs_cpr_cx,
+ &qhs_tlmm_west,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_qspi,
+ &qhs_usb3_0,
+ &qhs_spdm,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_tlmm_north,
+ &qhs_cpr_mx,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SM8150_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 51,
- .links = { SM8150_SLAVE_TLMM_SOUTH,
- SM8150_SLAVE_CDSP_CFG,
- SM8150_SLAVE_SPSS_CFG,
- SM8150_SLAVE_CAMERA_CFG,
- SM8150_SLAVE_SDCC_4,
- SM8150_SLAVE_SDCC_2,
- SM8150_SLAVE_CNOC_MNOC_CFG,
- SM8150_SLAVE_EMAC_CFG,
- SM8150_SLAVE_UFS_MEM_CFG,
- SM8150_SLAVE_TLMM_EAST,
- SM8150_SLAVE_SSC_CFG,
- SM8150_SLAVE_SNOC_CFG,
- SM8150_SLAVE_NORTH_PHY_CFG,
- SM8150_SLAVE_QUP_0,
- SM8150_SLAVE_GLM,
- SM8150_SLAVE_PCIE_1_CFG,
- SM8150_SLAVE_A2NOC_CFG,
- SM8150_SLAVE_QDSS_CFG,
- SM8150_SLAVE_DISPLAY_CFG,
- SM8150_SLAVE_TCSR,
- SM8150_SLAVE_CNOC_DDRSS,
- SM8150_SLAVE_CNOC_A2NOC,
- SM8150_SLAVE_RBCPR_MMCX_CFG,
- SM8150_SLAVE_NPU_CFG,
- SM8150_SLAVE_PCIE_0_CFG,
- SM8150_SLAVE_GRAPHICS_3D_CFG,
- SM8150_SLAVE_VENUS_CFG,
- SM8150_SLAVE_TSIF,
- SM8150_SLAVE_IPA_CFG,
- SM8150_SLAVE_CLK_CTL,
- SM8150_SLAVE_AOP,
- SM8150_SLAVE_QUP_1,
- SM8150_SLAVE_AHB2PHY_SOUTH,
- SM8150_SLAVE_USB3_1,
- SM8150_SLAVE_SERVICE_CNOC,
- SM8150_SLAVE_UFS_CARD_CFG,
- SM8150_SLAVE_QUP_2,
- SM8150_SLAVE_RBCPR_CX_CFG,
- SM8150_SLAVE_TLMM_WEST,
- SM8150_SLAVE_A1NOC_CFG,
- SM8150_SLAVE_AOSS,
- SM8150_SLAVE_PRNG,
- SM8150_SLAVE_VSENSE_CTRL_CFG,
- SM8150_SLAVE_QSPI,
- SM8150_SLAVE_USB3,
- SM8150_SLAVE_SPDM_WRAPPER,
- SM8150_SLAVE_CRYPTO_0_CFG,
- SM8150_SLAVE_PIMEM_CFG,
- SM8150_SLAVE_TLMM_NORTH,
- SM8150_SLAVE_RBCPR_MX_CFG,
- SM8150_SLAVE_IMEM_CFG
- },
+ .link_nodes = { &qhs_tlmm_south,
+ &qhs_compute_dsp,
+ &qhs_spss_cfg,
+ &qhs_camera_cfg,
+ &qhs_sdc4,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_emac_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_tlmm_east,
+ &qhs_ssc_cfg,
+ &qhs_snoc_cfg,
+ &qhs_phy_refgen_north,
+ &qhs_qupv3_south,
+ &qhs_glm,
+ &qhs_pcie1_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_display_cfg,
+ &qhs_tcsr,
+ &qhs_ddrss_cfg,
+ &qns_cnoc_a2noc,
+ &qhs_cpr_mmcx,
+ &qhs_npu_cfg,
+ &qhs_pcie0_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_venus_cfg,
+ &qhs_tsif,
+ &qhs_ipa,
+ &qhs_clk_ctl,
+ &qhs_aop,
+ &qhs_qupv3_north,
+ &qhs_ahb2phy_south,
+ &qhs_usb3_1,
+ &srvc_cnoc,
+ &qhs_ufs_card_cfg,
+ &qhs_qupv3_east,
+ &qhs_cpr_cx,
+ &qhs_tlmm_west,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_qspi,
+ &qhs_usb3_0,
+ &qhs_spdm,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_tlmm_north,
+ &qhs_cpr_mx,
+ &qhs_imem_cfg },
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
- .id = SM8150_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SM8150_SLAVE_GEM_NOC_CFG,
- SM8150_SLAVE_LLCC_CFG
- },
+ .link_nodes = { &qhs_memnoc,
+ &qhs_llcc },
};
static struct qcom_icc_node acm_apps = {
.name = "acm_apps",
- .id = SM8150_MASTER_AMPSS_M0,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SM8150_SLAVE_ECC,
- SM8150_SLAVE_LLCC,
- SM8150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_ecc,
+ &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node acm_gpu_tcu = {
.name = "acm_gpu_tcu",
- .id = SM8150_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8150_SLAVE_LLCC,
- SM8150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node acm_sys_tcu = {
.name = "acm_sys_tcu",
- .id = SM8150_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8150_SLAVE_LLCC,
- SM8150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
- .id = SM8150_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SM8150_SLAVE_SERVICE_GEM_NOC,
- SM8150_SLAVE_MSS_PROC_MS_MPU_CFG
- },
+ .link_nodes = { &srvc_gemnoc,
+ &qhs_mdsp_ms_mpu_cfg },
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
- .id = SM8150_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SM8150_SLAVE_ECC,
- SM8150_SLAVE_LLCC,
- SM8150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_ecc,
+ &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SM8150_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8150_SLAVE_LLCC,
- SM8150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM8150_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM8150_MASTER_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 2,
- .links = { SM8150_SLAVE_LLCC,
- SM8150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SM8150_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SM8150_SLAVE_LLCC,
- SM8150_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SM8150_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM8150_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8150_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qxm_ecc = {
.name = "qxm_ecc",
- .id = SM8150_MASTER_ECC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM8150_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_SLAVE_EBI_CH0 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
- .id = SM8150_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qxm_camnoc_hf0 = {
.name = "qxm_camnoc_hf0",
- .id = SM8150_MASTER_CAMNOC_HF0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_hf1 = {
.name = "qxm_camnoc_hf1",
- .id = SM8150_MASTER_CAMNOC_HF1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_camnoc_sf = {
.name = "qxm_camnoc_sf",
- .id = SM8150_MASTER_CAMNOC_SF,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SM8150_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
- .id = SM8150_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
- .id = SM8150_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus0 = {
.name = "qxm_venus0",
- .id = SM8150_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus1 = {
.name = "qxm_venus1",
- .id = SM8150_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qxm_venus_arm9 = {
.name = "qxm_venus_arm9",
- .id = SM8150_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns2_mem_noc },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SM8150_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM8150_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { SM8150_SLAVE_SNOC_GEM_NOC_SF,
- SM8150_SLAVE_PIMEM,
- SM8150_SLAVE_OCIMEM,
- SM8150_SLAVE_APPSS,
- SM8150_SNOC_CNOC_SLV,
- SM8150_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qns_gemnoc_sf,
+ &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM8150_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 9,
- .links = { SM8150_SLAVE_SNOC_GEM_NOC_SF,
- SM8150_SLAVE_PIMEM,
- SM8150_SLAVE_OCIMEM,
- SM8150_SLAVE_APPSS,
- SM8150_SNOC_CNOC_SLV,
- SM8150_SLAVE_PCIE_0,
- SM8150_SLAVE_PCIE_1,
- SM8150_SLAVE_TCU,
- SM8150_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qns_gemnoc_sf,
+ &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_pcie_0,
+ &xs_pcie_1,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
- .id = SM8150_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 6,
- .links = { SM8150_SLAVE_PIMEM,
- SM8150_SLAVE_OCIMEM,
- SM8150_SLAVE_APPSS,
- SM8150_SNOC_CNOC_SLV,
- SM8150_SLAVE_TCU,
- SM8150_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SM8150_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8150_SLAVE_SNOC_GEM_NOC_GC,
- SM8150_SLAVE_OCIMEM
- },
+ .link_nodes = { &qns_gemnoc_gc,
+ &qxs_imem },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM8150_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8150_SLAVE_SNOC_GEM_NOC_GC,
- SM8150_SLAVE_OCIMEM
- },
+ .link_nodes = { &qns_gemnoc_gc,
+ &qxs_imem },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM8150_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8150_A1NOC_SNOC_MAS },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SM8150_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM8150_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8150_A2NOC_SNOC_MAS },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SM8150_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8150_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SM8150_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_camnoc_uncomp = {
.name = "qns_camnoc_uncomp",
- .id = SM8150_SLAVE_CAMNOC_UNCOMP,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_cdsp_mem_noc = {
.name = "qns_cdsp_mem_noc",
- .id = SM8150_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
- .id = SM8150_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_MASTER_A1NOC_CFG },
+ .link_nodes = { &qhm_a1noc_cfg },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
- .id = SM8150_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_MASTER_A2NOC_CFG },
+ .link_nodes = { &qhm_a2noc_cfg },
};
static struct qcom_icc_node qhs_ahb2phy_south = {
.name = "qhs_ahb2phy_south",
- .id = SM8150_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aop = {
.name = "qhs_aop",
- .id = SM8150_SLAVE_AOP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM8150_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM8150_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM8150_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp = {
.name = "qhs_compute_dsp",
- .id = SM8150_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SM8150_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SM8150_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SM8150_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM8150_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SM8150_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qhm_cnoc_dc_noc },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM8150_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_emac_cfg = {
.name = "qhs_emac_cfg",
- .id = SM8150_SLAVE_EMAC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_glm = {
.name = "qhs_glm",
- .id = SM8150_SLAVE_GLM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM8150_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM8150_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM8150_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
- .id = SM8150_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qhm_mnoc_cfg },
};
static struct qcom_icc_node qhs_npu_cfg = {
.name = "qhs_npu_cfg",
- .id = SM8150_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SM8150_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SM8150_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_phy_refgen_north = {
.name = "qhs_phy_refgen_north",
- .id = SM8150_SLAVE_NORTH_PHY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SM8150_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SM8150_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM8150_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SM8150_SLAVE_QSPI,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_east = {
.name = "qhs_qupv3_east",
- .id = SM8150_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_north = {
.name = "qhs_qupv3_north",
- .id = SM8150_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qupv3_south = {
.name = "qhs_qupv3_south",
- .id = SM8150_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM8150_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SM8150_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SM8150_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_spdm = {
.name = "qhs_spdm",
- .id = SM8150_SLAVE_SPDM_WRAPPER,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
- .id = SM8150_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ssc_cfg = {
.name = "qhs_ssc_cfg",
- .id = SM8150_SLAVE_SSC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM8150_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_east = {
.name = "qhs_tlmm_east",
- .id = SM8150_SLAVE_TLMM_EAST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_north = {
.name = "qhs_tlmm_north",
- .id = SM8150_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_south = {
.name = "qhs_tlmm_south",
- .id = SM8150_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm_west = {
.name = "qhs_tlmm_west",
- .id = SM8150_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
- .id = SM8150_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
- .id = SM8150_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM8150_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM8150_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
- .id = SM8150_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM8150_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM8150_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
- .id = SM8150_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_MASTER_CNOC_A2NOC },
+ .link_nodes = { &qnm_cnoc },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SM8150_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SM8150_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_memnoc = {
.name = "qhs_memnoc",
- .id = SM8150_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8150_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qhm_gemnoc_cfg },
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = SM8150_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_ecc = {
.name = "qns_ecc",
- .id = SM8150_SLAVE_ECC,
.channels = 1,
.buswidth = 32,
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
- .id = SM8150_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_MASTER_GEM_NOC_SNOC },
+ .link_nodes = { &qnm_gemnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM8150_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SM8150_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node srvc_gemnoc = {
.name = "srvc_gemnoc",
- .id = SM8150_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM8150_SLAVE_EBI_CH0,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qns2_mem_noc = {
.name = "qns2_mem_noc",
- .id = SM8150_SLAVE_MNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM8150_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8150_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM8150_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SM8150_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
- .id = SM8150_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_SNOC_CNOC_MAS },
+ .link_nodes = { &qnm_snoc },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SM8150_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8150_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM8150_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8150_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM8150_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SM8150_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SM8150_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SM8150_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SM8150_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM8150_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM8150_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
diff --git a/drivers/interconnect/qcom/sm8150.h b/drivers/interconnect/qcom/sm8150.h
deleted file mode 100644
index 1d587c94eb06..000000000000
--- a/drivers/interconnect/qcom/sm8150.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Qualcomm #define SM8250 interconnect IDs
- *
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8150_H
-#define __DRIVERS_INTERCONNECT_QCOM_SM8150_H
-
-#define SM8150_A1NOC_SNOC_MAS 0
-#define SM8150_A1NOC_SNOC_SLV 1
-#define SM8150_A2NOC_SNOC_MAS 2
-#define SM8150_A2NOC_SNOC_SLV 3
-#define SM8150_MASTER_A1NOC_CFG 4
-#define SM8150_MASTER_A2NOC_CFG 5
-#define SM8150_MASTER_AMPSS_M0 6
-#define SM8150_MASTER_CAMNOC_HF0 7
-#define SM8150_MASTER_CAMNOC_HF0_UNCOMP 8
-#define SM8150_MASTER_CAMNOC_HF1 9
-#define SM8150_MASTER_CAMNOC_HF1_UNCOMP 10
-#define SM8150_MASTER_CAMNOC_SF 11
-#define SM8150_MASTER_CAMNOC_SF_UNCOMP 12
-#define SM8150_MASTER_CNOC_A2NOC 13
-#define SM8150_MASTER_CNOC_DC_NOC 14
-#define SM8150_MASTER_CNOC_MNOC_CFG 15
-#define SM8150_MASTER_COMPUTE_NOC 16
-#define SM8150_MASTER_CRYPTO_CORE_0 17
-#define SM8150_MASTER_ECC 18
-#define SM8150_MASTER_EMAC 19
-#define SM8150_MASTER_GEM_NOC_CFG 20
-#define SM8150_MASTER_GEM_NOC_PCIE_SNOC 21
-#define SM8150_MASTER_GEM_NOC_SNOC 22
-#define SM8150_MASTER_GIC 23
-#define SM8150_MASTER_GPU_TCU 24
-#define SM8150_MASTER_GRAPHICS_3D 25
-#define SM8150_MASTER_IPA 26
-/* 27 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
-#define SM8150_MASTER_LLCC 28
-#define SM8150_MASTER_MDP_PORT0 29
-#define SM8150_MASTER_MDP_PORT1 30
-#define SM8150_MASTER_MNOC_HF_MEM_NOC 31
-#define SM8150_MASTER_MNOC_SF_MEM_NOC 32
-#define SM8150_MASTER_NPU 33
-#define SM8150_MASTER_PCIE 34
-#define SM8150_MASTER_PCIE_1 35
-#define SM8150_MASTER_PIMEM 36
-#define SM8150_MASTER_QDSS_BAM 37
-#define SM8150_MASTER_QDSS_DAP 38
-#define SM8150_MASTER_QDSS_ETR 39
-#define SM8150_MASTER_QSPI 40
-#define SM8150_MASTER_QUP_0 41
-#define SM8150_MASTER_QUP_1 42
-#define SM8150_MASTER_QUP_2 43
-#define SM8150_MASTER_ROTATOR 44
-#define SM8150_MASTER_SDCC_2 45
-#define SM8150_MASTER_SDCC_4 46
-#define SM8150_MASTER_SENSORS_AHB 47
-#define SM8150_MASTER_SNOC_CFG 48
-#define SM8150_MASTER_SNOC_GC_MEM_NOC 49
-#define SM8150_MASTER_SNOC_SF_MEM_NOC 50
-#define SM8150_MASTER_SPDM 51
-#define SM8150_MASTER_SYS_TCU 52
-#define SM8150_MASTER_TSIF 53
-#define SM8150_MASTER_UFS_MEM 54
-#define SM8150_MASTER_USB3 55
-#define SM8150_MASTER_USB3_1 56
-#define SM8150_MASTER_VIDEO_P0 57
-#define SM8150_MASTER_VIDEO_P1 58
-#define SM8150_MASTER_VIDEO_PROC 59
-#define SM8150_SLAVE_A1NOC_CFG 60
-#define SM8150_SLAVE_A2NOC_CFG 61
-#define SM8150_SLAVE_AHB2PHY_SOUTH 62
-#define SM8150_SLAVE_ANOC_PCIE_GEM_NOC 63
-#define SM8150_SLAVE_AOP 64
-#define SM8150_SLAVE_AOSS 65
-#define SM8150_SLAVE_APPSS 66
-#define SM8150_SLAVE_CAMERA_CFG 67
-#define SM8150_SLAVE_CAMNOC_UNCOMP 68
-#define SM8150_SLAVE_CDSP_CFG 69
-#define SM8150_SLAVE_CDSP_MEM_NOC 70
-#define SM8150_SLAVE_CLK_CTL 71
-#define SM8150_SLAVE_CNOC_A2NOC 72
-#define SM8150_SLAVE_CNOC_DDRSS 73
-#define SM8150_SLAVE_CNOC_MNOC_CFG 74
-#define SM8150_SLAVE_CRYPTO_0_CFG 75
-#define SM8150_SLAVE_DISPLAY_CFG 76
-#define SM8150_SLAVE_EBI_CH0 77
-#define SM8150_SLAVE_ECC 78
-#define SM8150_SLAVE_EMAC_CFG 79
-#define SM8150_SLAVE_GEM_NOC_CFG 80
-#define SM8150_SLAVE_GEM_NOC_SNOC 81
-#define SM8150_SLAVE_GLM 82
-#define SM8150_SLAVE_GRAPHICS_3D_CFG 83
-#define SM8150_SLAVE_IMEM_CFG 84
-#define SM8150_SLAVE_IPA_CFG 85
-/* 86 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
-#define SM8150_SLAVE_LLCC 87
-#define SM8150_SLAVE_LLCC_CFG 88
-#define SM8150_SLAVE_MNOC_HF_MEM_NOC 89
-#define SM8150_SLAVE_MNOC_SF_MEM_NOC 90
-#define SM8150_SLAVE_MSS_PROC_MS_MPU_CFG 91
-#define SM8150_SLAVE_NORTH_PHY_CFG 92
-#define SM8150_SLAVE_NPU_CFG 93
-#define SM8150_SLAVE_OCIMEM 94
-#define SM8150_SLAVE_PCIE_0 95
-#define SM8150_SLAVE_PCIE_0_CFG 96
-#define SM8150_SLAVE_PCIE_1 97
-#define SM8150_SLAVE_PCIE_1_CFG 98
-#define SM8150_SLAVE_PIMEM 99
-#define SM8150_SLAVE_PIMEM_CFG 100
-#define SM8150_SLAVE_PRNG 101
-#define SM8150_SLAVE_QDSS_CFG 102
-#define SM8150_SLAVE_QDSS_STM 103
-#define SM8150_SLAVE_QSPI 104
-#define SM8150_SLAVE_QUP_0 105
-#define SM8150_SLAVE_QUP_1 106
-#define SM8150_SLAVE_QUP_2 107
-#define SM8150_SLAVE_RBCPR_CX_CFG 108
-#define SM8150_SLAVE_RBCPR_MMCX_CFG 109
-#define SM8150_SLAVE_RBCPR_MX_CFG 110
-#define SM8150_SLAVE_SDCC_2 111
-#define SM8150_SLAVE_SDCC_4 112
-#define SM8150_SLAVE_SERVICE_A1NOC 113
-#define SM8150_SLAVE_SERVICE_A2NOC 114
-#define SM8150_SLAVE_SERVICE_CNOC 115
-#define SM8150_SLAVE_SERVICE_GEM_NOC 116
-#define SM8150_SLAVE_SERVICE_MNOC 117
-#define SM8150_SLAVE_SERVICE_SNOC 118
-#define SM8150_SLAVE_SNOC_CFG 119
-#define SM8150_SLAVE_SNOC_GEM_NOC_GC 120
-#define SM8150_SLAVE_SNOC_GEM_NOC_SF 121
-#define SM8150_SLAVE_SPDM_WRAPPER 122
-#define SM8150_SLAVE_SPSS_CFG 123
-#define SM8150_SLAVE_SSC_CFG 124
-#define SM8150_SLAVE_TCSR 125
-#define SM8150_SLAVE_TCU 126
-#define SM8150_SLAVE_TLMM_EAST 127
-#define SM8150_SLAVE_TLMM_NORTH 128
-#define SM8150_SLAVE_TLMM_SOUTH 129
-#define SM8150_SLAVE_TLMM_WEST 130
-#define SM8150_SLAVE_TSIF 131
-#define SM8150_SLAVE_UFS_CARD_CFG 132
-#define SM8150_SLAVE_UFS_MEM_CFG 133
-#define SM8150_SLAVE_USB3 134
-#define SM8150_SLAVE_USB3_1 135
-#define SM8150_SLAVE_VENUS_CFG 136
-#define SM8150_SLAVE_VSENSE_CTRL_CFG 137
-#define SM8150_SNOC_CNOC_MAS 138
-#define SM8150_SNOC_CNOC_SLV 139
-
-#endif
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index cc1b14c13529..2ed112eab155 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -14,1383 +14,1369 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sm8250.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg;
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qhm_tsif;
+static struct qcom_icc_node xm_pcie3_modem;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node xm_usb3_1;
+static struct qcom_icc_node qhm_a2noc_cfg;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qnm_cnoc;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_ufs_card;
+static struct qcom_icc_node qnm_npu;
+static struct qcom_icc_node qnm_snoc;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qhm_cnoc_dc_noc;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qhm_gemnoc_cfg;
+static struct qcom_icc_node qnm_cmpnoc;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qhm_mnoc_cfg;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_video0;
+static struct qcom_icc_node qnm_video1;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qxm_mdp1;
+static struct qcom_icc_node qxm_rot;
+static struct qcom_icc_node amm_npu_sys;
+static struct qcom_icc_node amm_npu_sys_cdp_w;
+static struct qcom_icc_node qhm_cfg;
+static struct qcom_icc_node qhm_snoc_cfg;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_gemnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_pcie_modem_mem_noc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qns_cdsp_mem_noc;
+static struct qcom_icc_node qhs_a1_noc_cfg;
+static struct qcom_icc_node qhs_a2_noc_cfg;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_dsp;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_dcc_cfg;
+static struct qcom_icc_node qhs_ddrss_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_lpass_cfg;
+static struct qcom_icc_node qhs_mnoc_cfg;
+static struct qcom_icc_node qhs_npu_cfg;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pcie_modem_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_snoc_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm0;
+static struct qcom_icc_node qhs_tlmm1;
+static struct qcom_icc_node qhs_tlmm2;
+static struct qcom_icc_node qhs_tsif;
+static struct qcom_icc_node qhs_ufs_card_cfg;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_usb3_1;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_cnoc_a2noc;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qhs_memnoc;
+static struct qcom_icc_node qns_gem_noc_snoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_sys_pcie;
+static struct qcom_icc_node srvc_even_gemnoc;
+static struct qcom_icc_node srvc_odd_gemnoc;
+static struct qcom_icc_node srvc_sys_gemnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qhs_cal_dp0;
+static struct qcom_icc_node qhs_cal_dp1;
+static struct qcom_icc_node qhs_cp;
+static struct qcom_icc_node qhs_dma_bwmon;
+static struct qcom_icc_node qhs_dpm;
+static struct qcom_icc_node qhs_isense;
+static struct qcom_icc_node qhs_llm;
+static struct qcom_icc_node qhs_tcm;
+static struct qcom_icc_node qns_npu_sys;
+static struct qcom_icc_node srvc_noc;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qns_cnoc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_pcie_modem;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup2_core_master;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup2_core_slave;
static struct qcom_icc_node qhm_a1noc_cfg = {
.name = "qhm_a1noc_cfg",
- .id = SM8250_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SM8250_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SM8250_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SM8250_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_tsif = {
.name = "qhm_tsif",
- .id = SM8250_MASTER_TSIF,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_pcie3_modem = {
.name = "xm_pcie3_modem",
- .id = SM8250_MASTER_PCIE_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1 },
+ .link_nodes = { &qns_pcie_modem_mem_noc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SM8250_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM8250_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM8250_MASTER_USB3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
- .id = SM8250_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_SLV },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_a2noc_cfg = {
.name = "qhm_a2noc_cfg",
- .id = SM8250_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM8250_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SM8250_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_cnoc = {
.name = "qnm_cnoc",
- .id = SM8250_MASTER_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM8250_MASTER_CRYPTO_CORE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM8250_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SM8250_MASTER_PCIE,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SM8250_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SM8250_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM8250_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
- .id = SM8250_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_SLV },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_npu = {
.name = "qnm_npu",
- .id = SM8250_MASTER_NPU,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_cdsp_mem_noc },
};
static struct qcom_icc_node qnm_snoc = {
.name = "qnm_snoc",
- .id = SM8250_SNOC_CNOC_MAS,
.channels = 1,
.buswidth = 8,
.num_links = 49,
- .links = { SM8250_SLAVE_CDSP_CFG,
- SM8250_SLAVE_CAMERA_CFG,
- SM8250_SLAVE_TLMM_SOUTH,
- SM8250_SLAVE_TLMM_NORTH,
- SM8250_SLAVE_SDCC_4,
- SM8250_SLAVE_TLMM_WEST,
- SM8250_SLAVE_SDCC_2,
- SM8250_SLAVE_CNOC_MNOC_CFG,
- SM8250_SLAVE_UFS_MEM_CFG,
- SM8250_SLAVE_SNOC_CFG,
- SM8250_SLAVE_PDM,
- SM8250_SLAVE_CX_RDPM,
- SM8250_SLAVE_PCIE_1_CFG,
- SM8250_SLAVE_A2NOC_CFG,
- SM8250_SLAVE_QDSS_CFG,
- SM8250_SLAVE_DISPLAY_CFG,
- SM8250_SLAVE_PCIE_2_CFG,
- SM8250_SLAVE_TCSR,
- SM8250_SLAVE_DCC_CFG,
- SM8250_SLAVE_CNOC_DDRSS,
- SM8250_SLAVE_IPC_ROUTER_CFG,
- SM8250_SLAVE_PCIE_0_CFG,
- SM8250_SLAVE_RBCPR_MMCX_CFG,
- SM8250_SLAVE_NPU_CFG,
- SM8250_SLAVE_AHB2PHY_SOUTH,
- SM8250_SLAVE_AHB2PHY_NORTH,
- SM8250_SLAVE_GRAPHICS_3D_CFG,
- SM8250_SLAVE_VENUS_CFG,
- SM8250_SLAVE_TSIF,
- SM8250_SLAVE_IPA_CFG,
- SM8250_SLAVE_IMEM_CFG,
- SM8250_SLAVE_USB3,
- SM8250_SLAVE_SERVICE_CNOC,
- SM8250_SLAVE_UFS_CARD_CFG,
- SM8250_SLAVE_USB3_1,
- SM8250_SLAVE_LPASS,
- SM8250_SLAVE_RBCPR_CX_CFG,
- SM8250_SLAVE_A1NOC_CFG,
- SM8250_SLAVE_AOSS,
- SM8250_SLAVE_PRNG,
- SM8250_SLAVE_VSENSE_CTRL_CFG,
- SM8250_SLAVE_QSPI_0,
- SM8250_SLAVE_CRYPTO_0_CFG,
- SM8250_SLAVE_PIMEM_CFG,
- SM8250_SLAVE_RBCPR_MX_CFG,
- SM8250_SLAVE_QUP_0,
- SM8250_SLAVE_QUP_1,
- SM8250_SLAVE_QUP_2,
- SM8250_SLAVE_CLK_CTL
- },
+ .link_nodes = { &qhs_compute_dsp,
+ &qhs_camera_cfg,
+ &qhs_tlmm1,
+ &qhs_tlmm0,
+ &qhs_sdc4,
+ &qhs_tlmm2,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_snoc_cfg,
+ &qhs_pdm,
+ &qhs_cx_rdpm,
+ &qhs_pcie1_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_display_cfg,
+ &qhs_pcie_modem_cfg,
+ &qhs_tcsr,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ipc_router,
+ &qhs_pcie0_cfg,
+ &qhs_cpr_mmcx,
+ &qhs_npu_cfg,
+ &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_gpuss_cfg,
+ &qhs_venus_cfg,
+ &qhs_tsif,
+ &qhs_ipa,
+ &qhs_imem_cfg,
+ &qhs_usb3_0,
+ &srvc_cnoc,
+ &qhs_ufs_card_cfg,
+ &qhs_usb3_1,
+ &qhs_lpass_cfg,
+ &qhs_cpr_cx,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_qspi,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_cpr_mx,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2,
+ &qhs_clk_ctl },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SM8250_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 50,
- .links = { SM8250_SLAVE_CDSP_CFG,
- SM8250_SLAVE_CAMERA_CFG,
- SM8250_SLAVE_TLMM_SOUTH,
- SM8250_SLAVE_TLMM_NORTH,
- SM8250_SLAVE_SDCC_4,
- SM8250_SLAVE_TLMM_WEST,
- SM8250_SLAVE_SDCC_2,
- SM8250_SLAVE_CNOC_MNOC_CFG,
- SM8250_SLAVE_UFS_MEM_CFG,
- SM8250_SLAVE_SNOC_CFG,
- SM8250_SLAVE_PDM,
- SM8250_SLAVE_CX_RDPM,
- SM8250_SLAVE_PCIE_1_CFG,
- SM8250_SLAVE_A2NOC_CFG,
- SM8250_SLAVE_QDSS_CFG,
- SM8250_SLAVE_DISPLAY_CFG,
- SM8250_SLAVE_PCIE_2_CFG,
- SM8250_SLAVE_TCSR,
- SM8250_SLAVE_DCC_CFG,
- SM8250_SLAVE_CNOC_DDRSS,
- SM8250_SLAVE_IPC_ROUTER_CFG,
- SM8250_SLAVE_CNOC_A2NOC,
- SM8250_SLAVE_PCIE_0_CFG,
- SM8250_SLAVE_RBCPR_MMCX_CFG,
- SM8250_SLAVE_NPU_CFG,
- SM8250_SLAVE_AHB2PHY_SOUTH,
- SM8250_SLAVE_AHB2PHY_NORTH,
- SM8250_SLAVE_GRAPHICS_3D_CFG,
- SM8250_SLAVE_VENUS_CFG,
- SM8250_SLAVE_TSIF,
- SM8250_SLAVE_IPA_CFG,
- SM8250_SLAVE_IMEM_CFG,
- SM8250_SLAVE_USB3,
- SM8250_SLAVE_SERVICE_CNOC,
- SM8250_SLAVE_UFS_CARD_CFG,
- SM8250_SLAVE_USB3_1,
- SM8250_SLAVE_LPASS,
- SM8250_SLAVE_RBCPR_CX_CFG,
- SM8250_SLAVE_A1NOC_CFG,
- SM8250_SLAVE_AOSS,
- SM8250_SLAVE_PRNG,
- SM8250_SLAVE_VSENSE_CTRL_CFG,
- SM8250_SLAVE_QSPI_0,
- SM8250_SLAVE_CRYPTO_0_CFG,
- SM8250_SLAVE_PIMEM_CFG,
- SM8250_SLAVE_RBCPR_MX_CFG,
- SM8250_SLAVE_QUP_0,
- SM8250_SLAVE_QUP_1,
- SM8250_SLAVE_QUP_2,
- SM8250_SLAVE_CLK_CTL
- },
+ .link_nodes = { &qhs_compute_dsp,
+ &qhs_camera_cfg,
+ &qhs_tlmm1,
+ &qhs_tlmm0,
+ &qhs_sdc4,
+ &qhs_tlmm2,
+ &qhs_sdc2,
+ &qhs_mnoc_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_snoc_cfg,
+ &qhs_pdm,
+ &qhs_cx_rdpm,
+ &qhs_pcie1_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_qdss_cfg,
+ &qhs_display_cfg,
+ &qhs_pcie_modem_cfg,
+ &qhs_tcsr,
+ &qhs_dcc_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_ipc_router,
+ &qns_cnoc_a2noc,
+ &qhs_pcie0_cfg,
+ &qhs_cpr_mmcx,
+ &qhs_npu_cfg,
+ &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_gpuss_cfg,
+ &qhs_venus_cfg,
+ &qhs_tsif,
+ &qhs_ipa,
+ &qhs_imem_cfg,
+ &qhs_usb3_0,
+ &srvc_cnoc,
+ &qhs_ufs_card_cfg,
+ &qhs_usb3_1,
+ &qhs_lpass_cfg,
+ &qhs_cpr_cx,
+ &qhs_a1_noc_cfg,
+ &qhs_aoss,
+ &qhs_prng,
+ &qhs_vsense_ctrl_cfg,
+ &qhs_qspi,
+ &qhs_crypto0_cfg,
+ &qhs_pimem_cfg,
+ &qhs_cpr_mx,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2,
+ &qhs_clk_ctl },
};
static struct qcom_icc_node qhm_cnoc_dc_noc = {
.name = "qhm_cnoc_dc_noc",
- .id = SM8250_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SM8250_SLAVE_GEM_NOC_CFG,
- SM8250_SLAVE_LLCC_CFG
- },
+ .link_nodes = { &qhs_memnoc,
+ &qhs_llcc },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SM8250_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8250_SLAVE_LLCC,
- SM8250_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SM8250_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8250_SLAVE_LLCC,
- SM8250_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SM8250_MASTER_AMPSS_M0,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SM8250_SLAVE_LLCC,
- SM8250_SLAVE_GEM_NOC_SNOC,
- SM8250_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc,
+ &qns_sys_pcie },
};
static struct qcom_icc_node qhm_gemnoc_cfg = {
.name = "qhm_gemnoc_cfg",
- .id = SM8250_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 3,
- .links = { SM8250_SLAVE_SERVICE_GEM_NOC_2,
- SM8250_SLAVE_SERVICE_GEM_NOC_1,
- SM8250_SLAVE_SERVICE_GEM_NOC
- },
+ .link_nodes = { &srvc_odd_gemnoc,
+ &srvc_even_gemnoc,
+ &srvc_sys_gemnoc },
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
- .id = SM8250_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8250_SLAVE_LLCC,
- SM8250_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SM8250_MASTER_GRAPHICS_3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8250_SLAVE_LLCC,
- SM8250_SLAVE_GEM_NOC_SNOC },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM8250_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM8250_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8250_SLAVE_LLCC,
- SM8250_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SM8250_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SM8250_SLAVE_LLCC,
- SM8250_SLAVE_GEM_NOC_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SM8250_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM8250_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8250_SLAVE_LLCC,
- SM8250_SLAVE_GEM_NOC_SNOC,
- SM8250_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_llcc,
+ &qns_gem_noc_snoc,
+ &qns_sys_pcie },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM8250_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_SLAVE_EBI_CH0 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qhm_mnoc_cfg = {
.name = "qhm_mnoc_cfg",
- .id = SM8250_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SM8250_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = SM8250_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = SM8250_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
- .id = SM8250_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
- .id = SM8250_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SM8250_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SM8250_MASTER_MDP_PORT0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
- .id = SM8250_MASTER_MDP_PORT1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
- .id = SM8250_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node amm_npu_sys = {
.name = "amm_npu_sys",
- .id = SM8250_MASTER_NPU_SYS,
.channels = 4,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_SLAVE_NPU_COMPUTE_NOC },
+ .link_nodes = { &qns_npu_sys },
};
static struct qcom_icc_node amm_npu_sys_cdp_w = {
.name = "amm_npu_sys_cdp_w",
- .id = SM8250_MASTER_NPU_CDP,
.channels = 2,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_SLAVE_NPU_COMPUTE_NOC },
+ .link_nodes = { &qns_npu_sys },
};
static struct qcom_icc_node qhm_cfg = {
.name = "qhm_cfg",
- .id = SM8250_MASTER_NPU_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 9,
- .links = { SM8250_SLAVE_SERVICE_NPU_NOC,
- SM8250_SLAVE_ISENSE_CFG,
- SM8250_SLAVE_NPU_LLM_CFG,
- SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG,
- SM8250_SLAVE_NPU_CP,
- SM8250_SLAVE_NPU_TCM,
- SM8250_SLAVE_NPU_CAL_DP0,
- SM8250_SLAVE_NPU_CAL_DP1,
- SM8250_SLAVE_NPU_DPM
- },
+ .link_nodes = { &srvc_noc,
+ &qhs_isense,
+ &qhs_llm,
+ &qhs_dma_bwmon,
+ &qhs_cp,
+ &qhs_tcm,
+ &qhs_cal_dp0,
+ &qhs_cal_dp1,
+ &qhs_dpm },
};
static struct qcom_icc_node qhm_snoc_cfg = {
.name = "qhm_snoc_cfg",
- .id = SM8250_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM8250_A1NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM8250_A2NOC_SNOC_MAS,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_gemnoc = {
.name = "qnm_gemnoc",
- .id = SM8250_MASTER_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { SM8250_SLAVE_PIMEM,
- SM8250_SLAVE_OCIMEM,
- SM8250_SLAVE_APPSS,
- SM8250_SNOC_CNOC_SLV,
- SM8250_SLAVE_TCU,
- SM8250_SLAVE_QDSS_STM
- },
+ .link_nodes = { &qxs_pimem,
+ &qxs_imem,
+ &qhs_apss,
+ &qns_cnoc,
+ &xs_sys_tcu_cfg,
+ &xs_qdss_stm },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SM8250_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 3,
- .links = { SM8250_SLAVE_PCIE_2,
- SM8250_SLAVE_PCIE_0,
- SM8250_SLAVE_PCIE_1
- },
+ .link_nodes = { &xs_pcie_modem,
+ &xs_pcie_0,
+ &xs_pcie_1 },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SM8250_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM8250_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM8250_A1NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_A1NOC_SNOC_MAS },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_pcie_modem_mem_noc = {
.name = "qns_pcie_modem_mem_noc",
- .id = SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SM8250_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM8250_A2NOC_SNOC_SLV,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_A2NOC_SNOC_MAS },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SM8250_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SM8250_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cdsp_mem_noc = {
.name = "qns_cdsp_mem_noc",
- .id = SM8250_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_node qhs_a1_noc_cfg = {
.name = "qhs_a1_noc_cfg",
- .id = SM8250_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_MASTER_A1NOC_CFG },
+ .link_nodes = { &qhm_a1noc_cfg },
};
static struct qcom_icc_node qhs_a2_noc_cfg = {
.name = "qhs_a2_noc_cfg",
- .id = SM8250_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_MASTER_A2NOC_CFG },
+ .link_nodes = { &qhm_a2noc_cfg },
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SM8250_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SM8250_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM8250_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM8250_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM8250_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_dsp = {
.name = "qhs_compute_dsp",
- .id = SM8250_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SM8250_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SM8250_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SM8250_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM8250_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SM8250_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
- .id = SM8250_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ddrss_cfg = {
.name = "qhs_ddrss_cfg",
- .id = SM8250_SLAVE_CNOC_DDRSS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_MASTER_CNOC_DC_NOC },
+ .link_nodes = { &qhm_cnoc_dc_noc },
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM8250_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM8250_SLAVE_GRAPHICS_3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM8250_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM8250_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SM8250_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
- .id = SM8250_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mnoc_cfg = {
.name = "qhs_mnoc_cfg",
- .id = SM8250_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qhm_mnoc_cfg },
};
static struct qcom_icc_node qhs_npu_cfg = {
.name = "qhs_npu_cfg",
- .id = SM8250_SLAVE_NPU_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_MASTER_NPU_NOC_CFG },
+ .link_nodes = { &qhm_cfg },
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SM8250_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SM8250_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie_modem_cfg = {
.name = "qhs_pcie_modem_cfg",
- .id = SM8250_SLAVE_PCIE_2_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SM8250_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SM8250_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SM8250_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM8250_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SM8250_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SM8250_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SM8250_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = SM8250_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM8250_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SM8250_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_snoc_cfg = {
.name = "qhs_snoc_cfg",
- .id = SM8250_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_MASTER_SNOC_CFG },
+ .link_nodes = { &qhm_snoc_cfg },
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM8250_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm0 = {
.name = "qhs_tlmm0",
- .id = SM8250_SLAVE_TLMM_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm1 = {
.name = "qhs_tlmm1",
- .id = SM8250_SLAVE_TLMM_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm2 = {
.name = "qhs_tlmm2",
- .id = SM8250_SLAVE_TLMM_WEST,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tsif = {
.name = "qhs_tsif",
- .id = SM8250_SLAVE_TSIF,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
- .id = SM8250_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM8250_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM8250_SLAVE_USB3,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
- .id = SM8250_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM8250_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM8250_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_cnoc_a2noc = {
.name = "qns_cnoc_a2noc",
- .id = SM8250_SLAVE_CNOC_A2NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_MASTER_CNOC_A2NOC },
+ .link_nodes = { &qnm_cnoc },
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SM8250_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SM8250_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_memnoc = {
.name = "qhs_memnoc",
- .id = SM8250_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_MASTER_GEM_NOC_CFG },
+ .link_nodes = { &qhm_gemnoc_cfg },
};
static struct qcom_icc_node qns_gem_noc_snoc = {
.name = "qns_gem_noc_snoc",
- .id = SM8250_SLAVE_GEM_NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_MASTER_GEM_NOC_SNOC },
+ .link_nodes = { &qnm_gemnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM8250_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_sys_pcie = {
.name = "qns_sys_pcie",
- .id = SM8250_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
- .id = SM8250_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
- .id = SM8250_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
- .id = SM8250_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM8250_SLAVE_EBI_CH0,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM8250_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SM8250_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8250_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM8250_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cal_dp0 = {
.name = "qhs_cal_dp0",
- .id = SM8250_SLAVE_NPU_CAL_DP0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cal_dp1 = {
.name = "qhs_cal_dp1",
- .id = SM8250_SLAVE_NPU_CAL_DP1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cp = {
.name = "qhs_cp",
- .id = SM8250_SLAVE_NPU_CP,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dma_bwmon = {
.name = "qhs_dma_bwmon",
- .id = SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dpm = {
.name = "qhs_dpm",
- .id = SM8250_SLAVE_NPU_DPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_isense = {
.name = "qhs_isense",
- .id = SM8250_SLAVE_ISENSE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_llm = {
.name = "qhs_llm",
- .id = SM8250_SLAVE_NPU_LLM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcm = {
.name = "qhs_tcm",
- .id = SM8250_SLAVE_NPU_TCM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_npu_sys = {
.name = "qns_npu_sys",
- .id = SM8250_SLAVE_NPU_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
};
static struct qcom_icc_node srvc_noc = {
.name = "srvc_noc",
- .id = SM8250_SLAVE_SERVICE_NPU_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SM8250_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qns_cnoc = {
.name = "qns_cnoc",
- .id = SM8250_SNOC_CNOC_SLV,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_SNOC_CNOC_MAS },
+ .link_nodes = { &qnm_snoc },
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SM8250_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8250_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM8250_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8250_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM8250_SLAVE_OCIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SM8250_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SM8250_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SM8250_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SM8250_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_modem = {
.name = "xs_pcie_modem",
- .id = SM8250_SLAVE_PCIE_2,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM8250_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM8250_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SM8250_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SM8250_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
- .id = SM8250_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8250_SLAVE_QUP_CORE_2 },
+ .link_nodes = { &qup2_core_slave },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SM8250_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SM8250_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
- .id = SM8250_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
};
diff --git a/drivers/interconnect/qcom/sm8250.h b/drivers/interconnect/qcom/sm8250.h
deleted file mode 100644
index 032665093c5b..000000000000
--- a/drivers/interconnect/qcom/sm8250.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Qualcomm #define SM8250 interconnect IDs
- *
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8250_H
-#define __DRIVERS_INTERCONNECT_QCOM_SM8250_H
-
-#define SM8250_A1NOC_SNOC_MAS 0
-#define SM8250_A1NOC_SNOC_SLV 1
-#define SM8250_A2NOC_SNOC_MAS 2
-#define SM8250_A2NOC_SNOC_SLV 3
-#define SM8250_MASTER_A1NOC_CFG 4
-#define SM8250_MASTER_A2NOC_CFG 5
-#define SM8250_MASTER_AMPSS_M0 6
-#define SM8250_MASTER_ANOC_PCIE_GEM_NOC 7
-#define SM8250_MASTER_CAMNOC_HF 8
-#define SM8250_MASTER_CAMNOC_ICP 9
-#define SM8250_MASTER_CAMNOC_SF 10
-#define SM8250_MASTER_CNOC_A2NOC 11
-#define SM8250_MASTER_CNOC_DC_NOC 12
-#define SM8250_MASTER_CNOC_MNOC_CFG 13
-#define SM8250_MASTER_COMPUTE_NOC 14
-#define SM8250_MASTER_CRYPTO_CORE_0 15
-#define SM8250_MASTER_GEM_NOC_CFG 16
-#define SM8250_MASTER_GEM_NOC_PCIE_SNOC 17
-#define SM8250_MASTER_GEM_NOC_SNOC 18
-#define SM8250_MASTER_GIC 19
-#define SM8250_MASTER_GPU_TCU 20
-#define SM8250_MASTER_GRAPHICS_3D 21
-#define SM8250_MASTER_IPA 22
-/* 23 was used by MASTER_IPA_CORE, now represented as RPMh clock */
-#define SM8250_MASTER_LLCC 24
-#define SM8250_MASTER_MDP_PORT0 25
-#define SM8250_MASTER_MDP_PORT1 26
-#define SM8250_MASTER_MNOC_HF_MEM_NOC 27
-#define SM8250_MASTER_MNOC_SF_MEM_NOC 28
-#define SM8250_MASTER_NPU 29
-#define SM8250_MASTER_NPU_CDP 30
-#define SM8250_MASTER_NPU_NOC_CFG 31
-#define SM8250_MASTER_NPU_SYS 32
-#define SM8250_MASTER_PCIE 33
-#define SM8250_MASTER_PCIE_1 34
-#define SM8250_MASTER_PCIE_2 35
-#define SM8250_MASTER_PIMEM 36
-#define SM8250_MASTER_QDSS_BAM 37
-#define SM8250_MASTER_QDSS_DAP 38
-#define SM8250_MASTER_QDSS_ETR 39
-#define SM8250_MASTER_QSPI_0 40
-#define SM8250_MASTER_QUP_0 41
-#define SM8250_MASTER_QUP_1 42
-#define SM8250_MASTER_QUP_2 43
-#define SM8250_MASTER_ROTATOR 44
-#define SM8250_MASTER_SDCC_2 45
-#define SM8250_MASTER_SDCC_4 46
-#define SM8250_MASTER_SNOC_CFG 47
-#define SM8250_MASTER_SNOC_GC_MEM_NOC 48
-#define SM8250_MASTER_SNOC_SF_MEM_NOC 49
-#define SM8250_MASTER_SYS_TCU 50
-#define SM8250_MASTER_TSIF 51
-#define SM8250_MASTER_UFS_CARD 52
-#define SM8250_MASTER_UFS_MEM 53
-#define SM8250_MASTER_USB3 54
-#define SM8250_MASTER_USB3_1 55
-#define SM8250_MASTER_VIDEO_P0 56
-#define SM8250_MASTER_VIDEO_P1 57
-#define SM8250_MASTER_VIDEO_PROC 58
-#define SM8250_SLAVE_A1NOC_CFG 59
-#define SM8250_SLAVE_A2NOC_CFG 60
-#define SM8250_SLAVE_AHB2PHY_NORTH 61
-#define SM8250_SLAVE_AHB2PHY_SOUTH 62
-#define SM8250_SLAVE_ANOC_PCIE_GEM_NOC 63
-#define SM8250_SLAVE_ANOC_PCIE_GEM_NOC_1 64
-#define SM8250_SLAVE_AOSS 65
-#define SM8250_SLAVE_APPSS 66
-#define SM8250_SLAVE_CAMERA_CFG 67
-#define SM8250_SLAVE_CDSP_CFG 68
-#define SM8250_SLAVE_CDSP_MEM_NOC 69
-#define SM8250_SLAVE_CLK_CTL 70
-#define SM8250_SLAVE_CNOC_A2NOC 71
-#define SM8250_SLAVE_CNOC_DDRSS 72
-#define SM8250_SLAVE_CNOC_MNOC_CFG 73
-#define SM8250_SLAVE_CRYPTO_0_CFG 74
-#define SM8250_SLAVE_CX_RDPM 75
-#define SM8250_SLAVE_DCC_CFG 76
-#define SM8250_SLAVE_DISPLAY_CFG 77
-#define SM8250_SLAVE_EBI_CH0 78
-#define SM8250_SLAVE_GEM_NOC_CFG 79
-#define SM8250_SLAVE_GEM_NOC_SNOC 80
-#define SM8250_SLAVE_GRAPHICS_3D_CFG 81
-#define SM8250_SLAVE_IMEM_CFG 82
-#define SM8250_SLAVE_IPA_CFG 83
-/* 84 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
-#define SM8250_SLAVE_IPC_ROUTER_CFG 85
-#define SM8250_SLAVE_ISENSE_CFG 86
-#define SM8250_SLAVE_LLCC 87
-#define SM8250_SLAVE_LLCC_CFG 88
-#define SM8250_SLAVE_LPASS 89
-#define SM8250_SLAVE_MEM_NOC_PCIE_SNOC 90
-#define SM8250_SLAVE_MNOC_HF_MEM_NOC 91
-#define SM8250_SLAVE_MNOC_SF_MEM_NOC 92
-#define SM8250_SLAVE_NPU_CAL_DP0 93
-#define SM8250_SLAVE_NPU_CAL_DP1 94
-#define SM8250_SLAVE_NPU_CFG 95
-#define SM8250_SLAVE_NPU_COMPUTE_NOC 96
-#define SM8250_SLAVE_NPU_CP 97
-#define SM8250_SLAVE_NPU_DPM 98
-#define SM8250_SLAVE_NPU_INT_DMA_BWMON_CFG 99
-#define SM8250_SLAVE_NPU_LLM_CFG 100
-#define SM8250_SLAVE_NPU_TCM 101
-#define SM8250_SLAVE_OCIMEM 102
-#define SM8250_SLAVE_PCIE_0 103
-#define SM8250_SLAVE_PCIE_0_CFG 104
-#define SM8250_SLAVE_PCIE_1 105
-#define SM8250_SLAVE_PCIE_1_CFG 106
-#define SM8250_SLAVE_PCIE_2 107
-#define SM8250_SLAVE_PCIE_2_CFG 108
-#define SM8250_SLAVE_PDM 109
-#define SM8250_SLAVE_PIMEM 110
-#define SM8250_SLAVE_PIMEM_CFG 111
-#define SM8250_SLAVE_PRNG 112
-#define SM8250_SLAVE_QDSS_CFG 113
-#define SM8250_SLAVE_QDSS_STM 114
-#define SM8250_SLAVE_QSPI_0 115
-#define SM8250_SLAVE_QUP_0 116
-#define SM8250_SLAVE_QUP_1 117
-#define SM8250_SLAVE_QUP_2 118
-#define SM8250_SLAVE_RBCPR_CX_CFG 119
-#define SM8250_SLAVE_RBCPR_MMCX_CFG 120
-#define SM8250_SLAVE_RBCPR_MX_CFG 121
-#define SM8250_SLAVE_SDCC_2 122
-#define SM8250_SLAVE_SDCC_4 123
-#define SM8250_SLAVE_SERVICE_A1NOC 124
-#define SM8250_SLAVE_SERVICE_A2NOC 125
-#define SM8250_SLAVE_SERVICE_CNOC 126
-#define SM8250_SLAVE_SERVICE_GEM_NOC 127
-#define SM8250_SLAVE_SERVICE_GEM_NOC_1 128
-#define SM8250_SLAVE_SERVICE_GEM_NOC_2 129
-#define SM8250_SLAVE_SERVICE_MNOC 130
-#define SM8250_SLAVE_SERVICE_NPU_NOC 131
-#define SM8250_SLAVE_SERVICE_SNOC 132
-#define SM8250_SLAVE_SNOC_CFG 133
-#define SM8250_SLAVE_SNOC_GEM_NOC_GC 134
-#define SM8250_SLAVE_SNOC_GEM_NOC_SF 135
-#define SM8250_SLAVE_TCSR 136
-#define SM8250_SLAVE_TCU 137
-#define SM8250_SLAVE_TLMM_NORTH 138
-#define SM8250_SLAVE_TLMM_SOUTH 139
-#define SM8250_SLAVE_TLMM_WEST 140
-#define SM8250_SLAVE_TSIF 141
-#define SM8250_SLAVE_UFS_CARD_CFG 142
-#define SM8250_SLAVE_UFS_MEM_CFG 143
-#define SM8250_SLAVE_USB3 144
-#define SM8250_SLAVE_USB3_1 145
-#define SM8250_SLAVE_VENUS_CFG 146
-#define SM8250_SLAVE_VSENSE_CTRL_CFG 147
-#define SM8250_SNOC_CNOC_MAS 148
-#define SM8250_SNOC_CNOC_SLV 149
-#define SM8250_MASTER_QUP_CORE_0 150
-#define SM8250_MASTER_QUP_CORE_1 151
-#define SM8250_MASTER_QUP_CORE_2 152
-#define SM8250_SLAVE_QUP_CORE_0 153
-#define SM8250_SLAVE_QUP_CORE_1 154
-#define SM8250_SLAVE_QUP_CORE_2 155
-
-#endif
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index 38105ead4f29..bb793d724893 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -13,1255 +13,1241 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#include "sm8350.h"
+
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qnm_a1noc_cfg;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node xm_usb3_1;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qnm_a2noc_cfg;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node xm_qdss_etr;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node xm_ufs_card;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node xm_qdss_dap;
+static struct qcom_icc_node qnm_cnoc_dc_noc;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_cmpnoc;
+static struct qcom_icc_node qnm_gemnoc_cfg;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qhm_config_noc;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_mnoc_cfg;
+static struct qcom_icc_node qnm_video0;
+static struct qcom_icc_node qnm_video1;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qxm_mdp0;
+static struct qcom_icc_node qxm_mdp1;
+static struct qcom_icc_node qxm_rot;
+static struct qcom_icc_node qhm_nsp_noc_config;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_snoc_cfg;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_dcc_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_hwkm;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_lpass_cfg;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_pka_wrapper_cfg;
+static struct qcom_icc_node qhs_pmu_wrapper_cfg;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_security;
+static struct qcom_icc_node qhs_spss_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_ufs_card_cfg;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_usb3_1;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_a1_noc_cfg;
+static struct qcom_icc_node qns_a2_noc_cfg;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qns_mnoc_cfg;
+static struct qcom_icc_node qns_snoc_cfg;
+static struct qcom_icc_node qxs_boot_imem;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qhs_llcc;
+static struct qcom_icc_node qns_gemnoc;
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg;
+static struct qcom_icc_node qhs_modem_ms_mpu_cfg;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node srvc_even_gemnoc;
+static struct qcom_icc_node srvc_odd_gemnoc;
+static struct qcom_icc_node srvc_sys_gemnoc;
+static struct qcom_icc_node qhs_lpass_core;
+static struct qcom_icc_node qhs_lpass_lpi;
+static struct qcom_icc_node qhs_lpass_mpu;
+static struct qcom_icc_node qhs_lpass_top;
+static struct qcom_icc_node srvc_niu_aml_noc;
+static struct qcom_icc_node srvc_niu_lpass_agnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node service_nsp_noc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node srvc_snoc;
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SM8350_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SM8350_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SM8350_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SM8350_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_a1noc_cfg = {
.name = "qnm_a1noc_cfg",
- .id = SM8350_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SM8350_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM8350_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM8350_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
- .id = SM8350_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM8350_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_a2noc_cfg = {
.name = "qnm_a2noc_cfg",
- .id = SM8350_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM8350_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM8350_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SM8350_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SM8350_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_qdss_etr = {
.name = "xm_qdss_etr",
- .id = SM8350_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM8350_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_ufs_card = {
.name = "xm_ufs_card",
- .id = SM8350_MASTER_UFS_CARD,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SM8350_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 56,
- .links = { SM8350_SLAVE_AHB2PHY_SOUTH,
- SM8350_SLAVE_AHB2PHY_NORTH,
- SM8350_SLAVE_AOSS,
- SM8350_SLAVE_APPSS,
- SM8350_SLAVE_CAMERA_CFG,
- SM8350_SLAVE_CLK_CTL,
- SM8350_SLAVE_CDSP_CFG,
- SM8350_SLAVE_RBCPR_CX_CFG,
- SM8350_SLAVE_RBCPR_MMCX_CFG,
- SM8350_SLAVE_RBCPR_MX_CFG,
- SM8350_SLAVE_CRYPTO_0_CFG,
- SM8350_SLAVE_CX_RDPM,
- SM8350_SLAVE_DCC_CFG,
- SM8350_SLAVE_DISPLAY_CFG,
- SM8350_SLAVE_GFX3D_CFG,
- SM8350_SLAVE_HWKM,
- SM8350_SLAVE_IMEM_CFG,
- SM8350_SLAVE_IPA_CFG,
- SM8350_SLAVE_IPC_ROUTER_CFG,
- SM8350_SLAVE_LPASS,
- SM8350_SLAVE_CNOC_MSS,
- SM8350_SLAVE_MX_RDPM,
- SM8350_SLAVE_PCIE_0_CFG,
- SM8350_SLAVE_PCIE_1_CFG,
- SM8350_SLAVE_PDM,
- SM8350_SLAVE_PIMEM_CFG,
- SM8350_SLAVE_PKA_WRAPPER_CFG,
- SM8350_SLAVE_PMU_WRAPPER_CFG,
- SM8350_SLAVE_QDSS_CFG,
- SM8350_SLAVE_QSPI_0,
- SM8350_SLAVE_QUP_0,
- SM8350_SLAVE_QUP_1,
- SM8350_SLAVE_QUP_2,
- SM8350_SLAVE_SDCC_2,
- SM8350_SLAVE_SDCC_4,
- SM8350_SLAVE_SECURITY,
- SM8350_SLAVE_SPSS_CFG,
- SM8350_SLAVE_TCSR,
- SM8350_SLAVE_TLMM,
- SM8350_SLAVE_UFS_CARD_CFG,
- SM8350_SLAVE_UFS_MEM_CFG,
- SM8350_SLAVE_USB3_0,
- SM8350_SLAVE_USB3_1,
- SM8350_SLAVE_VENUS_CFG,
- SM8350_SLAVE_VSENSE_CTRL_CFG,
- SM8350_SLAVE_A1NOC_CFG,
- SM8350_SLAVE_A2NOC_CFG,
- SM8350_SLAVE_DDRSS_CFG,
- SM8350_SLAVE_CNOC_MNOC_CFG,
- SM8350_SLAVE_SNOC_CFG,
- SM8350_SLAVE_BOOT_IMEM,
- SM8350_SLAVE_IMEM,
- SM8350_SLAVE_PIMEM,
- SM8350_SLAVE_SERVICE_CNOC,
- SM8350_SLAVE_QDSS_STM,
- SM8350_SLAVE_TCU
- },
+ .link_nodes = { &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_aoss,
+ &qhs_apss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_cfg,
+ &qhs_cpr_cx,
+ &qhs_cpr_mmcx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_cx_rdpm,
+ &qhs_dcc_cfg,
+ &qhs_display_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_hwkm,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_ipc_router,
+ &qhs_lpass_cfg,
+ &qhs_mss_cfg,
+ &qhs_mx_rdpm,
+ &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg,
+ &qhs_qdss_cfg,
+ &qhs_qspi,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_security,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_a1_noc_cfg,
+ &qns_a2_noc_cfg,
+ &qns_ddrss_cfg,
+ &qns_mnoc_cfg,
+ &qns_snoc_cfg,
+ &qxs_boot_imem,
+ &qxs_imem,
+ &qxs_pimem,
+ &srvc_cnoc,
+ &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SM8350_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8350_SLAVE_PCIE_0,
- SM8350_SLAVE_PCIE_1
- },
+ .link_nodes = { &xs_pcie_0,
+ &xs_pcie_1 },
};
static struct qcom_icc_node xm_qdss_dap = {
.name = "xm_qdss_dap",
- .id = SM8350_MASTER_QDSS_DAP,
.channels = 1,
.buswidth = 8,
.num_links = 56,
- .links = { SM8350_SLAVE_AHB2PHY_SOUTH,
- SM8350_SLAVE_AHB2PHY_NORTH,
- SM8350_SLAVE_AOSS,
- SM8350_SLAVE_APPSS,
- SM8350_SLAVE_CAMERA_CFG,
- SM8350_SLAVE_CLK_CTL,
- SM8350_SLAVE_CDSP_CFG,
- SM8350_SLAVE_RBCPR_CX_CFG,
- SM8350_SLAVE_RBCPR_MMCX_CFG,
- SM8350_SLAVE_RBCPR_MX_CFG,
- SM8350_SLAVE_CRYPTO_0_CFG,
- SM8350_SLAVE_CX_RDPM,
- SM8350_SLAVE_DCC_CFG,
- SM8350_SLAVE_DISPLAY_CFG,
- SM8350_SLAVE_GFX3D_CFG,
- SM8350_SLAVE_HWKM,
- SM8350_SLAVE_IMEM_CFG,
- SM8350_SLAVE_IPA_CFG,
- SM8350_SLAVE_IPC_ROUTER_CFG,
- SM8350_SLAVE_LPASS,
- SM8350_SLAVE_CNOC_MSS,
- SM8350_SLAVE_MX_RDPM,
- SM8350_SLAVE_PCIE_0_CFG,
- SM8350_SLAVE_PCIE_1_CFG,
- SM8350_SLAVE_PDM,
- SM8350_SLAVE_PIMEM_CFG,
- SM8350_SLAVE_PKA_WRAPPER_CFG,
- SM8350_SLAVE_PMU_WRAPPER_CFG,
- SM8350_SLAVE_QDSS_CFG,
- SM8350_SLAVE_QSPI_0,
- SM8350_SLAVE_QUP_0,
- SM8350_SLAVE_QUP_1,
- SM8350_SLAVE_QUP_2,
- SM8350_SLAVE_SDCC_2,
- SM8350_SLAVE_SDCC_4,
- SM8350_SLAVE_SECURITY,
- SM8350_SLAVE_SPSS_CFG,
- SM8350_SLAVE_TCSR,
- SM8350_SLAVE_TLMM,
- SM8350_SLAVE_UFS_CARD_CFG,
- SM8350_SLAVE_UFS_MEM_CFG,
- SM8350_SLAVE_USB3_0,
- SM8350_SLAVE_USB3_1,
- SM8350_SLAVE_VENUS_CFG,
- SM8350_SLAVE_VSENSE_CTRL_CFG,
- SM8350_SLAVE_A1NOC_CFG,
- SM8350_SLAVE_A2NOC_CFG,
- SM8350_SLAVE_DDRSS_CFG,
- SM8350_SLAVE_CNOC_MNOC_CFG,
- SM8350_SLAVE_SNOC_CFG,
- SM8350_SLAVE_BOOT_IMEM,
- SM8350_SLAVE_IMEM,
- SM8350_SLAVE_PIMEM,
- SM8350_SLAVE_SERVICE_CNOC,
- SM8350_SLAVE_QDSS_STM,
- SM8350_SLAVE_TCU
- },
+ .link_nodes = { &qhs_ahb2phy0,
+ &qhs_ahb2phy1,
+ &qhs_aoss,
+ &qhs_apss,
+ &qhs_camera_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_cfg,
+ &qhs_cpr_cx,
+ &qhs_cpr_mmcx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_cx_rdpm,
+ &qhs_dcc_cfg,
+ &qhs_display_cfg,
+ &qhs_gpuss_cfg,
+ &qhs_hwkm,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_ipc_router,
+ &qhs_lpass_cfg,
+ &qhs_mss_cfg,
+ &qhs_mx_rdpm,
+ &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_pka_wrapper_cfg,
+ &qhs_pmu_wrapper_cfg,
+ &qhs_qdss_cfg,
+ &qhs_qspi,
+ &qhs_qup0,
+ &qhs_qup1,
+ &qhs_qup2,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_security,
+ &qhs_spss_cfg,
+ &qhs_tcsr,
+ &qhs_tlmm,
+ &qhs_ufs_card_cfg,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_usb3_1,
+ &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_a1_noc_cfg,
+ &qns_a2_noc_cfg,
+ &qns_ddrss_cfg,
+ &qns_mnoc_cfg,
+ &qns_snoc_cfg,
+ &qxs_boot_imem,
+ &qxs_imem,
+ &qxs_pimem,
+ &srvc_cnoc,
+ &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_cnoc_dc_noc = {
.name = "qnm_cnoc_dc_noc",
- .id = SM8350_MASTER_CNOC_DC_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 2,
- .links = { SM8350_SLAVE_LLCC_CFG,
- SM8350_SLAVE_GEM_NOC_CFG
- },
+ .link_nodes = { &qhs_llcc,
+ &qns_gemnoc },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SM8350_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8350_SLAVE_GEM_NOC_CNOC,
- SM8350_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SM8350_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8350_SLAVE_GEM_NOC_CNOC,
- SM8350_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SM8350_MASTER_APPSS_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SM8350_SLAVE_GEM_NOC_CNOC,
- SM8350_SLAVE_LLCC,
- SM8350_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_cmpnoc = {
.name = "qnm_cmpnoc",
- .id = SM8350_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8350_SLAVE_GEM_NOC_CNOC,
- SM8350_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_gemnoc_cfg = {
.name = "qnm_gemnoc_cfg",
- .id = SM8350_MASTER_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 5,
- .links = { SM8350_SLAVE_MSS_PROC_MS_MPU_CFG,
- SM8350_SLAVE_MCDMA_MS_MPU_CFG,
- SM8350_SLAVE_SERVICE_GEM_NOC_1,
- SM8350_SLAVE_SERVICE_GEM_NOC_2,
- SM8350_SLAVE_SERVICE_GEM_NOC
- },
+ .link_nodes = { &qhs_mdsp_ms_mpu_cfg,
+ &qhs_modem_ms_mpu_cfg,
+ &srvc_even_gemnoc,
+ &srvc_odd_gemnoc,
+ &srvc_sys_gemnoc },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SM8350_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8350_SLAVE_GEM_NOC_CNOC,
- SM8350_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM8350_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM8350_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8350_SLAVE_GEM_NOC_CNOC,
- SM8350_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SM8350_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SM8350_SLAVE_GEM_NOC_CNOC,
- SM8350_SLAVE_LLCC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SM8350_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM8350_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8350_SLAVE_GEM_NOC_CNOC,
- SM8350_SLAVE_LLCC,
- SM8350_SLAVE_MEM_NOC_PCIE_SNOC
- },
+ .link_nodes = { &qns_gem_noc_cnoc,
+ &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
- .id = SM8350_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
- .links = { SM8350_SLAVE_LPASS_CORE_CFG,
- SM8350_SLAVE_LPASS_LPI_CFG,
- SM8350_SLAVE_LPASS_MPU_CFG,
- SM8350_SLAVE_LPASS_TOP_CFG,
- SM8350_SLAVE_SERVICES_LPASS_AML_NOC,
- SM8350_SLAVE_SERVICE_LPASS_AG_NOC
- },
+ .link_nodes = { &qhs_lpass_core,
+ &qhs_lpass_lpi,
+ &qhs_lpass_mpu,
+ &qhs_lpass_top,
+ &srvc_niu_aml_noc,
+ &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM8350_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SM8350_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = SM8350_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = SM8350_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
- .id = SM8350_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qnm_video0 = {
.name = "qnm_video0",
- .id = SM8350_MASTER_VIDEO_P0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video1 = {
.name = "qnm_video1",
- .id = SM8350_MASTER_VIDEO_P1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SM8350_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qxm_mdp0 = {
.name = "qxm_mdp0",
- .id = SM8350_MASTER_MDP0,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_mdp1 = {
.name = "qxm_mdp1",
- .id = SM8350_MASTER_MDP1,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qxm_rot = {
.name = "qxm_rot",
- .id = SM8350_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
- .id = SM8350_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_SERVICE_NSP_NOC },
+ .link_nodes = { &service_nsp_noc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = SM8350_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM8350_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8350_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM8350_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8350_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
- .id = SM8350_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SM8350_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM8350_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM8350_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8350_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SM8350_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM8350_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8350_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SM8350_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8350_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SM8350_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SM8350_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SM8350_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM8350_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SM8350_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM8350_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM8350_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_compute_cfg = {
.name = "qhs_compute_cfg",
- .id = SM8350_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SM8350_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SM8350_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cpr_mx = {
.name = "qhs_cpr_mx",
- .id = SM8350_SLAVE_RBCPR_MX_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM8350_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SM8350_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_dcc_cfg = {
.name = "qhs_dcc_cfg",
- .id = SM8350_SLAVE_DCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM8350_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM8350_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_hwkm = {
.name = "qhs_hwkm",
- .id = SM8350_SLAVE_HWKM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM8350_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM8350_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SM8350_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
- .id = SM8350_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8350_MASTER_CNOC_LPASS_AG_NOC },
+ .link_nodes = { &qhm_config_noc },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SM8350_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = SM8350_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SM8350_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SM8350_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SM8350_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SM8350_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pka_wrapper_cfg = {
.name = "qhs_pka_wrapper_cfg",
- .id = SM8350_SLAVE_PKA_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_pmu_wrapper_cfg = {
.name = "qhs_pmu_wrapper_cfg",
- .id = SM8350_SLAVE_PMU_WRAPPER_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM8350_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SM8350_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SM8350_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SM8350_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = SM8350_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM8350_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SM8350_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_security = {
.name = "qhs_security",
- .id = SM8350_SLAVE_SECURITY,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
- .id = SM8350_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM8350_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SM8350_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_card_cfg = {
.name = "qhs_ufs_card_cfg",
- .id = SM8350_SLAVE_UFS_CARD_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM8350_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM8350_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_usb3_1 = {
.name = "qhs_usb3_1",
- .id = SM8350_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM8350_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM8350_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a1_noc_cfg = {
.name = "qns_a1_noc_cfg",
- .id = SM8350_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_a2_noc_cfg = {
.name = "qns_a2_noc_cfg",
- .id = SM8350_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = SM8350_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
- .id = SM8350_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
- .id = SM8350_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
- .id = SM8350_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM8350_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SM8350_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SM8350_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SM8350_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SM8350_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM8350_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM8350_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node qhs_llcc = {
.name = "qhs_llcc",
- .id = SM8350_SLAVE_LLCC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc = {
.name = "qns_gemnoc",
- .id = SM8350_SLAVE_GEM_NOC_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
.name = "qhs_mdsp_ms_mpu_cfg",
- .id = SM8350_SLAVE_MSS_PROC_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_modem_ms_mpu_cfg = {
.name = "qhs_modem_ms_mpu_cfg",
- .id = SM8350_SLAVE_MCDMA_MS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SM8350_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8350_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM8350_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SM8350_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SM8350_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
};
static struct qcom_icc_node srvc_even_gemnoc = {
.name = "srvc_even_gemnoc",
- .id = SM8350_SLAVE_SERVICE_GEM_NOC_1,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_odd_gemnoc = {
.name = "srvc_odd_gemnoc",
- .id = SM8350_SLAVE_SERVICE_GEM_NOC_2,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_sys_gemnoc = {
.name = "srvc_sys_gemnoc",
- .id = SM8350_SLAVE_SERVICE_GEM_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
- .id = SM8350_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
- .id = SM8350_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
- .id = SM8350_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
- .id = SM8350_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
- .id = SM8350_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
- .id = SM8350_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM8350_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM8350_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SM8350_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM8350_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SM8350_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8350_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_cmpnoc },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
- .id = SM8350_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SM8350_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8350_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM8350_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8350_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SM8350_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
};
diff --git a/drivers/interconnect/qcom/sm8350.h b/drivers/interconnect/qcom/sm8350.h
deleted file mode 100644
index 074c6131ab36..000000000000
--- a/drivers/interconnect/qcom/sm8350.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Qualcomm SM8350 interconnect IDs
- *
- * Copyright (c) 2021, Linaro Limited
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8350_H
-#define __DRIVERS_INTERCONNECT_QCOM_SM8350_H
-
-#define SM8350_MASTER_GPU_TCU 0
-#define SM8350_MASTER_SYS_TCU 1
-#define SM8350_MASTER_APPSS_PROC 2
-#define SM8350_MASTER_LLCC 3
-#define SM8350_MASTER_CNOC_LPASS_AG_NOC 4
-#define SM8350_MASTER_CDSP_NOC_CFG 5
-#define SM8350_MASTER_QDSS_BAM 6
-#define SM8350_MASTER_QSPI_0 7
-#define SM8350_MASTER_QUP_0 8
-#define SM8350_MASTER_QUP_1 9
-#define SM8350_MASTER_QUP_2 10
-#define SM8350_MASTER_A1NOC_CFG 11
-#define SM8350_MASTER_A2NOC_CFG 12
-#define SM8350_MASTER_A1NOC_SNOC 13
-#define SM8350_MASTER_A2NOC_SNOC 14
-#define SM8350_MASTER_CAMNOC_HF 15
-#define SM8350_MASTER_CAMNOC_ICP 16
-#define SM8350_MASTER_CAMNOC_SF 17
-#define SM8350_MASTER_COMPUTE_NOC 18
-#define SM8350_MASTER_CNOC_DC_NOC 19
-#define SM8350_MASTER_GEM_NOC_CFG 20
-#define SM8350_MASTER_GEM_NOC_CNOC 21
-#define SM8350_MASTER_GEM_NOC_PCIE_SNOC 22
-#define SM8350_MASTER_GFX3D 23
-#define SM8350_MASTER_CNOC_MNOC_CFG 24
-#define SM8350_MASTER_MNOC_HF_MEM_NOC 25
-#define SM8350_MASTER_MNOC_SF_MEM_NOC 26
-#define SM8350_MASTER_ANOC_PCIE_GEM_NOC 27
-#define SM8350_MASTER_SNOC_CFG 28
-#define SM8350_MASTER_SNOC_GC_MEM_NOC 29
-#define SM8350_MASTER_SNOC_SF_MEM_NOC 30
-#define SM8350_MASTER_VIDEO_P0 31
-#define SM8350_MASTER_VIDEO_P1 32
-#define SM8350_MASTER_VIDEO_PROC 33
-#define SM8350_MASTER_QUP_CORE_0 34
-#define SM8350_MASTER_QUP_CORE_1 35
-#define SM8350_MASTER_QUP_CORE_2 36
-#define SM8350_MASTER_CRYPTO 37
-#define SM8350_MASTER_IPA 38
-#define SM8350_MASTER_MDP0 39
-#define SM8350_MASTER_MDP1 40
-#define SM8350_MASTER_CDSP_PROC 41
-#define SM8350_MASTER_PIMEM 42
-#define SM8350_MASTER_ROTATOR 43
-#define SM8350_MASTER_GIC 44
-#define SM8350_MASTER_PCIE_0 45
-#define SM8350_MASTER_PCIE_1 46
-#define SM8350_MASTER_QDSS_DAP 47
-#define SM8350_MASTER_QDSS_ETR 48
-#define SM8350_MASTER_SDCC_2 49
-#define SM8350_MASTER_SDCC_4 50
-#define SM8350_MASTER_UFS_CARD 51
-#define SM8350_MASTER_UFS_MEM 52
-#define SM8350_MASTER_USB3_0 53
-#define SM8350_MASTER_USB3_1 54
-#define SM8350_SLAVE_EBI1 55
-#define SM8350_SLAVE_AHB2PHY_SOUTH 56
-#define SM8350_SLAVE_AHB2PHY_NORTH 57
-#define SM8350_SLAVE_AOSS 58
-#define SM8350_SLAVE_APPSS 59
-#define SM8350_SLAVE_CAMERA_CFG 60
-#define SM8350_SLAVE_CLK_CTL 61
-#define SM8350_SLAVE_CDSP_CFG 62
-#define SM8350_SLAVE_RBCPR_CX_CFG 63
-#define SM8350_SLAVE_RBCPR_MMCX_CFG 64
-#define SM8350_SLAVE_RBCPR_MX_CFG 65
-#define SM8350_SLAVE_CRYPTO_0_CFG 66
-#define SM8350_SLAVE_CX_RDPM 67
-#define SM8350_SLAVE_DCC_CFG 68
-#define SM8350_SLAVE_DISPLAY_CFG 69
-#define SM8350_SLAVE_GFX3D_CFG 70
-#define SM8350_SLAVE_HWKM 71
-#define SM8350_SLAVE_IMEM_CFG 72
-#define SM8350_SLAVE_IPA_CFG 73
-#define SM8350_SLAVE_IPC_ROUTER_CFG 74
-#define SM8350_SLAVE_LLCC_CFG 75
-#define SM8350_SLAVE_LPASS 76
-#define SM8350_SLAVE_LPASS_CORE_CFG 77
-#define SM8350_SLAVE_LPASS_LPI_CFG 78
-#define SM8350_SLAVE_LPASS_MPU_CFG 79
-#define SM8350_SLAVE_LPASS_TOP_CFG 80
-#define SM8350_SLAVE_MSS_PROC_MS_MPU_CFG 81
-#define SM8350_SLAVE_MCDMA_MS_MPU_CFG 82
-#define SM8350_SLAVE_CNOC_MSS 83
-#define SM8350_SLAVE_MX_RDPM 84
-#define SM8350_SLAVE_PCIE_0_CFG 85
-#define SM8350_SLAVE_PCIE_1_CFG 86
-#define SM8350_SLAVE_PDM 87
-#define SM8350_SLAVE_PIMEM_CFG 88
-#define SM8350_SLAVE_PKA_WRAPPER_CFG 89
-#define SM8350_SLAVE_PMU_WRAPPER_CFG 90
-#define SM8350_SLAVE_QDSS_CFG 91
-#define SM8350_SLAVE_QSPI_0 92
-#define SM8350_SLAVE_QUP_0 93
-#define SM8350_SLAVE_QUP_1 94
-#define SM8350_SLAVE_QUP_2 95
-#define SM8350_SLAVE_SDCC_2 96
-#define SM8350_SLAVE_SDCC_4 97
-#define SM8350_SLAVE_SECURITY 98
-#define SM8350_SLAVE_SPSS_CFG 99
-#define SM8350_SLAVE_TCSR 100
-#define SM8350_SLAVE_TLMM 101
-#define SM8350_SLAVE_UFS_CARD_CFG 102
-#define SM8350_SLAVE_UFS_MEM_CFG 103
-#define SM8350_SLAVE_USB3_0 104
-#define SM8350_SLAVE_USB3_1 105
-#define SM8350_SLAVE_VENUS_CFG 106
-#define SM8350_SLAVE_VSENSE_CTRL_CFG 107
-#define SM8350_SLAVE_A1NOC_CFG 108
-#define SM8350_SLAVE_A1NOC_SNOC 109
-#define SM8350_SLAVE_A2NOC_CFG 110
-#define SM8350_SLAVE_A2NOC_SNOC 111
-#define SM8350_SLAVE_DDRSS_CFG 112
-#define SM8350_SLAVE_GEM_NOC_CNOC 113
-#define SM8350_SLAVE_GEM_NOC_CFG 114
-#define SM8350_SLAVE_SNOC_GEM_NOC_GC 115
-#define SM8350_SLAVE_SNOC_GEM_NOC_SF 116
-#define SM8350_SLAVE_LLCC 117
-#define SM8350_SLAVE_MNOC_HF_MEM_NOC 118
-#define SM8350_SLAVE_MNOC_SF_MEM_NOC 119
-#define SM8350_SLAVE_CNOC_MNOC_CFG 120
-#define SM8350_SLAVE_CDSP_MEM_NOC 121
-#define SM8350_SLAVE_MEM_NOC_PCIE_SNOC 122
-#define SM8350_SLAVE_ANOC_PCIE_GEM_NOC 123
-#define SM8350_SLAVE_SNOC_CFG 124
-#define SM8350_SLAVE_QUP_CORE_0 125
-#define SM8350_SLAVE_QUP_CORE_1 126
-#define SM8350_SLAVE_QUP_CORE_2 127
-#define SM8350_SLAVE_BOOT_IMEM 128
-#define SM8350_SLAVE_IMEM 129
-#define SM8350_SLAVE_PIMEM 130
-#define SM8350_SLAVE_SERVICE_NSP_NOC 131
-#define SM8350_SLAVE_SERVICE_A1NOC 132
-#define SM8350_SLAVE_SERVICE_A2NOC 133
-#define SM8350_SLAVE_SERVICE_CNOC 134
-#define SM8350_SLAVE_SERVICE_GEM_NOC_1 135
-#define SM8350_SLAVE_SERVICE_MNOC 136
-#define SM8350_SLAVE_SERVICES_LPASS_AML_NOC 137
-#define SM8350_SLAVE_SERVICE_LPASS_AG_NOC 138
-#define SM8350_SLAVE_SERVICE_GEM_NOC_2 139
-#define SM8350_SLAVE_SERVICE_SNOC 140
-#define SM8350_SLAVE_SERVICE_GEM_NOC 141
-#define SM8350_SLAVE_PCIE_0 142
-#define SM8350_SLAVE_PCIE_1 143
-#define SM8350_SLAVE_QDSS_STM 144
-#define SM8350_SLAVE_TCU 145
-
-#endif
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
index eb7e17df32ba..669a638bf3ef 100644
--- a/drivers/interconnect/qcom/sm8450.c
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -16,1325 +16,1262 @@
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
-#include "sm8450.h"
+
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qnm_a1noc_cfg;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qnm_a2noc_cfg;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node qxm_sensorss_q6;
+static struct qcom_icc_node qxm_sp;
+static struct qcom_icc_node xm_qdss_etr_0;
+static struct qcom_icc_node xm_qdss_etr_1;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup2_core_master;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_mdsp;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_nsp_gemnoc;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qhm_config_noc;
+static struct qcom_icc_node qxm_lpass_dsp;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_mdp;
+static struct qcom_icc_node qnm_mnoc_cfg;
+static struct qcom_icc_node qnm_rot;
+static struct qcom_icc_node qnm_vapss_hcp;
+static struct qcom_icc_node qnm_video;
+static struct qcom_icc_node qnm_video_cv_cpu;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qnm_video_v_cpu;
+static struct qcom_icc_node qhm_nsp_noc_config;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node qnm_pcie_anoc_cfg;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node qhm_gic;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_lpass_noc;
+static struct qcom_icc_node qnm_snoc_cfg;
+static struct qcom_icc_node qxm_pimem;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qnm_mnoc_hf_disp;
+static struct qcom_icc_node qnm_mnoc_sf_disp;
+static struct qcom_icc_node qnm_pcie_disp;
+static struct qcom_icc_node llcc_mc_disp;
+static struct qcom_icc_node qnm_mdp_disp;
+static struct qcom_icc_node qnm_rot_disp;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node srvc_aggre1_noc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node srvc_aggre2_noc;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup2_core_slave;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_compute_cfg;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mxa;
+static struct qcom_icc_node qhs_cpr_mxc;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_lpass_cfg;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_spss_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_tme_cfg;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qns_a1_noc_cfg;
+static struct qcom_icc_node qns_a2_noc_cfg;
+static struct qcom_icc_node qns_ddrss_cfg;
+static struct qcom_icc_node qns_mnoc_cfg;
+static struct qcom_icc_node qns_pcie_anoc_cfg;
+static struct qcom_icc_node qns_snoc_cfg;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_pimem;
+static struct qcom_icc_node srvc_cnoc;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node qhs_lpass_core;
+static struct qcom_icc_node qhs_lpass_lpi;
+static struct qcom_icc_node qhs_lpass_mpu;
+static struct qcom_icc_node qhs_lpass_top;
+static struct qcom_icc_node qns_sysnoc;
+static struct qcom_icc_node srvc_niu_aml_noc;
+static struct qcom_icc_node srvc_niu_lpass_agnoc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node service_nsp_noc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node srvc_pcie_aggre_noc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node srvc_snoc;
+static struct qcom_icc_node qns_llcc_disp;
+static struct qcom_icc_node ebi_disp;
+static struct qcom_icc_node qns_mem_noc_hf_disp;
+static struct qcom_icc_node qns_mem_noc_sf_disp;
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SM8450_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SM8450_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qnm_a1noc_cfg = {
.name = "qnm_a1noc_cfg",
- .id = SM8450_MASTER_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_SERVICE_A1NOC },
+ .link_nodes = { &srvc_aggre1_noc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SM8450_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM8450_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM8450_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM8450_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = SM8450_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SM8450_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qnm_a2noc_cfg = {
.name = "qnm_a2noc_cfg",
- .id = SM8450_MASTER_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_SERVICE_A2NOC },
+ .link_nodes = { &srvc_aggre2_noc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM8450_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM8450_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_sensorss_q6 = {
.name = "qxm_sensorss_q6",
- .id = SM8450_MASTER_SENSORS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
- .id = SM8450_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
- .id = SM8450_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
- .id = SM8450_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM8450_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SM8450_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SM8450_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
- .id = SM8450_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_QUP_CORE_2 },
+ .link_nodes = { &qup2_core_slave },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SM8450_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 51,
- .links = { SM8450_SLAVE_AHB2PHY_SOUTH, SM8450_SLAVE_AHB2PHY_NORTH,
- SM8450_SLAVE_AOSS, SM8450_SLAVE_CAMERA_CFG,
- SM8450_SLAVE_CLK_CTL, SM8450_SLAVE_CDSP_CFG,
- SM8450_SLAVE_RBCPR_CX_CFG, SM8450_SLAVE_RBCPR_MMCX_CFG,
- SM8450_SLAVE_RBCPR_MXA_CFG, SM8450_SLAVE_RBCPR_MXC_CFG,
- SM8450_SLAVE_CRYPTO_0_CFG, SM8450_SLAVE_CX_RDPM,
- SM8450_SLAVE_DISPLAY_CFG, SM8450_SLAVE_GFX3D_CFG,
- SM8450_SLAVE_IMEM_CFG, SM8450_SLAVE_IPA_CFG,
- SM8450_SLAVE_IPC_ROUTER_CFG, SM8450_SLAVE_LPASS,
- SM8450_SLAVE_CNOC_MSS, SM8450_SLAVE_MX_RDPM,
- SM8450_SLAVE_PCIE_0_CFG, SM8450_SLAVE_PCIE_1_CFG,
- SM8450_SLAVE_PDM, SM8450_SLAVE_PIMEM_CFG,
- SM8450_SLAVE_PRNG, SM8450_SLAVE_QDSS_CFG,
- SM8450_SLAVE_QSPI_0, SM8450_SLAVE_QUP_0,
- SM8450_SLAVE_QUP_1, SM8450_SLAVE_QUP_2,
- SM8450_SLAVE_SDCC_2, SM8450_SLAVE_SDCC_4,
- SM8450_SLAVE_SPSS_CFG, SM8450_SLAVE_TCSR,
- SM8450_SLAVE_TLMM, SM8450_SLAVE_TME_CFG,
- SM8450_SLAVE_UFS_MEM_CFG, SM8450_SLAVE_USB3_0,
- SM8450_SLAVE_VENUS_CFG, SM8450_SLAVE_VSENSE_CTRL_CFG,
- SM8450_SLAVE_A1NOC_CFG, SM8450_SLAVE_A2NOC_CFG,
- SM8450_SLAVE_DDRSS_CFG, SM8450_SLAVE_CNOC_MNOC_CFG,
- SM8450_SLAVE_PCIE_ANOC_CFG, SM8450_SLAVE_SNOC_CFG,
- SM8450_SLAVE_IMEM, SM8450_SLAVE_PIMEM,
- SM8450_SLAVE_SERVICE_CNOC, SM8450_SLAVE_QDSS_STM,
- SM8450_SLAVE_TCU },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_aoss, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_compute_cfg,
+ &qhs_cpr_cx, &qhs_cpr_mmcx,
+ &qhs_cpr_mxa, &qhs_cpr_mxc,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_ipa,
+ &qhs_ipc_router, &qhs_lpass_cfg,
+ &qhs_mss_cfg, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pdm, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_qup2,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_spss_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_tme_cfg,
+ &qhs_ufs_mem_cfg, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qns_a1_noc_cfg, &qns_a2_noc_cfg,
+ &qns_ddrss_cfg, &qns_mnoc_cfg,
+ &qns_pcie_anoc_cfg, &qns_snoc_cfg,
+ &qxs_imem, &qxs_pimem,
+ &srvc_cnoc, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SM8450_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8450_SLAVE_PCIE_0, SM8450_SLAVE_PCIE_1 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SM8450_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SM8450_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SM8450_MASTER_APPSS_PROC,
.channels = 3,
.buswidth = 32,
.num_links = 3,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
- SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SM8450_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mdsp = {
.name = "qnm_mdsp",
- .id = SM8450_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
- SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM8450_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM8450_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_nsp_gemnoc = {
.name = "qnm_nsp_gemnoc",
- .id = SM8450_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SM8450_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SM8450_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM8450_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
- SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qhm_config_noc = {
.name = "qhm_config_noc",
- .id = SM8450_MASTER_CNOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
.num_links = 6,
- .links = { SM8450_SLAVE_LPASS_CORE_CFG, SM8450_SLAVE_LPASS_LPI_CFG,
- SM8450_SLAVE_LPASS_MPU_CFG, SM8450_SLAVE_LPASS_TOP_CFG,
- SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC },
+ .link_nodes = { &qhs_lpass_core, &qhs_lpass_lpi,
+ &qhs_lpass_mpu, &qhs_lpass_top,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node qxm_lpass_dsp = {
.name = "qxm_lpass_dsp",
- .id = SM8450_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 4,
- .links = { SM8450_SLAVE_LPASS_TOP_CFG, SM8450_SLAVE_LPASS_SNOC,
- SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC },
+ .link_nodes = { &qhs_lpass_top, &qns_sysnoc,
+ &srvc_niu_aml_noc, &srvc_niu_lpass_agnoc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM8450_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SM8450_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = SM8450_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = SM8450_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
- .id = SM8450_MASTER_MDP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_mnoc_cfg = {
.name = "qnm_mnoc_cfg",
- .id = SM8450_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qnm_rot = {
.name = "qnm_rot",
- .id = SM8450_MASTER_ROTATOR,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_vapss_hcp = {
.name = "qnm_vapss_hcp",
- .id = SM8450_MASTER_CDSP_HCP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video = {
.name = "qnm_video",
- .id = SM8450_MASTER_VIDEO,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
- .id = SM8450_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SM8450_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
- .id = SM8450_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qhm_nsp_noc_config = {
.name = "qhm_nsp_noc_config",
- .id = SM8450_MASTER_CDSP_NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_SERVICE_NSP_NOC },
+ .link_nodes = { &service_nsp_noc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = SM8450_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_node qnm_pcie_anoc_cfg = {
.name = "qnm_pcie_anoc_cfg",
- .id = SM8450_MASTER_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_SERVICE_PCIE_ANOC },
+ .link_nodes = { &srvc_pcie_aggre_noc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SM8450_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SM8450_MASTER_PCIE_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
- .id = SM8450_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM8450_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM8450_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_lpass_noc = {
.name = "qnm_lpass_noc",
- .id = SM8450_MASTER_LPASS_ANOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_snoc_cfg = {
.name = "qnm_snoc_cfg",
- .id = SM8450_MASTER_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_SERVICE_SNOC },
+ .link_nodes = { &srvc_snoc },
};
static struct qcom_icc_node qxm_pimem = {
.name = "qxm_pimem",
- .id = SM8450_MASTER_PIMEM,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM8450_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qnm_mnoc_hf_disp = {
.name = "qnm_mnoc_hf_disp",
- .id = SM8450_MASTER_MNOC_HF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_LLCC_DISP },
+ .link_nodes = { &qns_llcc_disp },
};
static struct qcom_icc_node qnm_mnoc_sf_disp = {
.name = "qnm_mnoc_sf_disp",
- .id = SM8450_MASTER_MNOC_SF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_LLCC_DISP },
+ .link_nodes = { &qns_llcc_disp },
};
static struct qcom_icc_node qnm_pcie_disp = {
.name = "qnm_pcie_disp",
- .id = SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_SLAVE_LLCC_DISP },
+ .link_nodes = { &qns_llcc_disp },
};
static struct qcom_icc_node llcc_mc_disp = {
.name = "llcc_mc_disp",
- .id = SM8450_MASTER_LLCC_DISP,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_SLAVE_EBI1_DISP },
+ .link_nodes = { &ebi_disp },
};
static struct qcom_icc_node qnm_mdp_disp = {
.name = "qnm_mdp_disp",
- .id = SM8450_MASTER_MDP_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP },
+ .link_nodes = { &qns_mem_noc_hf_disp },
};
static struct qcom_icc_node qnm_rot_disp = {
.name = "qnm_rot_disp",
- .id = SM8450_MASTER_ROTATOR_DISP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP },
+ .link_nodes = { &qns_mem_noc_sf_disp },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM8450_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node srvc_aggre1_noc = {
.name = "srvc_aggre1_noc",
- .id = SM8450_SLAVE_SERVICE_A1NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM8450_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node srvc_aggre2_noc = {
.name = "srvc_aggre2_noc",
- .id = SM8450_SLAVE_SERVICE_A2NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SM8450_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SM8450_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
- .id = SM8450_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SM8450_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SM8450_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM8450_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM8450_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM8450_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_compute_cfg = {
.name = "qhs_compute_cfg",
- .id = SM8450_SLAVE_CDSP_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { MASTER_CDSP_NOC_CFG },
+ .link_nodes = { MASTER_CDSP_NOC_CFG },
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SM8450_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SM8450_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxa = {
.name = "qhs_cpr_mxa",
- .id = SM8450_SLAVE_RBCPR_MXA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxc = {
.name = "qhs_cpr_mxc",
- .id = SM8450_SLAVE_RBCPR_MXC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM8450_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SM8450_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM8450_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM8450_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM8450_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM8450_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SM8450_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_cfg = {
.name = "qhs_lpass_cfg",
- .id = SM8450_SLAVE_LPASS,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { MASTER_CNOC_LPASS_AG_NOC },
+ .link_nodes = { MASTER_CNOC_LPASS_AG_NOC },
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SM8450_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = SM8450_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SM8450_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SM8450_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SM8450_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SM8450_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SM8450_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM8450_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SM8450_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = SM8450_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SM8450_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = SM8450_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM8450_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SM8450_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
- .id = SM8450_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM8450_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SM8450_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
- .id = SM8450_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM8450_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM8450_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM8450_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM8450_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_a1_noc_cfg = {
.name = "qns_a1_noc_cfg",
- .id = SM8450_SLAVE_A1NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_MASTER_A1NOC_CFG },
+ .link_nodes = { &qnm_a1noc_cfg },
};
static struct qcom_icc_node qns_a2_noc_cfg = {
.name = "qns_a2_noc_cfg",
- .id = SM8450_SLAVE_A2NOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_MASTER_A2NOC_CFG },
+ .link_nodes = { &qnm_a2noc_cfg },
};
static struct qcom_icc_node qns_ddrss_cfg = {
.name = "qns_ddrss_cfg",
- .id = SM8450_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 1,
//FIXME where is link
};
static struct qcom_icc_node qns_mnoc_cfg = {
.name = "qns_mnoc_cfg",
- .id = SM8450_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qnm_mnoc_cfg },
};
static struct qcom_icc_node qns_pcie_anoc_cfg = {
.name = "qns_pcie_anoc_cfg",
- .id = SM8450_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_MASTER_PCIE_ANOC_CFG },
+ .link_nodes = { &qnm_pcie_anoc_cfg },
};
static struct qcom_icc_node qns_snoc_cfg = {
.name = "qns_snoc_cfg",
- .id = SM8450_SLAVE_SNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8450_MASTER_SNOC_CFG },
+ .link_nodes = { &qnm_snoc_cfg },
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM8450_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_pimem = {
.name = "qxs_pimem",
- .id = SM8450_SLAVE_PIMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node srvc_cnoc = {
.name = "srvc_cnoc",
- .id = SM8450_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SM8450_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SM8450_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM8450_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM8450_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SM8450_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM8450_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SM8450_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node qhs_lpass_core = {
.name = "qhs_lpass_core",
- .id = SM8450_SLAVE_LPASS_CORE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_lpi = {
.name = "qhs_lpass_lpi",
- .id = SM8450_SLAVE_LPASS_LPI_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_mpu = {
.name = "qhs_lpass_mpu",
- .id = SM8450_SLAVE_LPASS_MPU_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_lpass_top = {
.name = "qhs_lpass_top",
- .id = SM8450_SLAVE_LPASS_TOP_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_sysnoc = {
.name = "qns_sysnoc",
- .id = SM8450_SLAVE_LPASS_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_MASTER_LPASS_ANOC },
+ .link_nodes = { &qnm_lpass_noc },
};
static struct qcom_icc_node srvc_niu_aml_noc = {
.name = "srvc_niu_aml_noc",
- .id = SM8450_SLAVE_SERVICES_LPASS_AML_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node srvc_niu_lpass_agnoc = {
.name = "srvc_niu_lpass_agnoc",
- .id = SM8450_SLAVE_SERVICE_LPASS_AG_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM8450_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM8450_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SM8450_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM8450_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SM8450_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_nsp_gemnoc },
};
static struct qcom_icc_node service_nsp_noc = {
.name = "service_nsp_noc",
- .id = SM8450_SLAVE_SERVICE_NSP_NOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SM8450_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_pcie_aggre_noc = {
.name = "srvc_pcie_aggre_noc",
- .id = SM8450_SLAVE_SERVICE_PCIE_ANOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SM8450_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8450_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM8450_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node srvc_snoc = {
.name = "srvc_snoc",
- .id = SM8450_SLAVE_SERVICE_SNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_llcc_disp = {
.name = "qns_llcc_disp",
- .id = SM8450_SLAVE_LLCC_DISP,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SM8450_MASTER_LLCC_DISP },
+ .link_nodes = { &llcc_mc_disp },
};
static struct qcom_icc_node ebi_disp = {
.name = "ebi_disp",
- .id = SM8450_SLAVE_EBI1_DISP,
.channels = 4,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf_disp = {
.name = "qns_mem_noc_hf_disp",
- .id = SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_MASTER_MNOC_HF_MEM_NOC_DISP },
+ .link_nodes = { &qnm_mnoc_hf_disp },
};
static struct qcom_icc_node qns_mem_noc_sf_disp = {
.name = "qns_mem_noc_sf_disp",
- .id = SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8450_MASTER_MNOC_SF_MEM_NOC_DISP },
+ .link_nodes = { &qnm_mnoc_sf_disp },
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/sm8450.h b/drivers/interconnect/qcom/sm8450.h
deleted file mode 100644
index a5790ec6767b..000000000000
--- a/drivers/interconnect/qcom/sm8450.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SM8450 interconnect IDs
- *
- * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, Linaro Limited
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8450_H
-#define __DRIVERS_INTERCONNECT_QCOM_SM8450_H
-
-#define SM8450_MASTER_GPU_TCU 0
-#define SM8450_MASTER_SYS_TCU 1
-#define SM8450_MASTER_APPSS_PROC 2
-#define SM8450_MASTER_LLCC 3
-#define SM8450_MASTER_CNOC_LPASS_AG_NOC 4
-#define SM8450_MASTER_GIC_AHB 5
-#define SM8450_MASTER_CDSP_NOC_CFG 6
-#define SM8450_MASTER_QDSS_BAM 7
-#define SM8450_MASTER_QSPI_0 8
-#define SM8450_MASTER_QUP_0 9
-#define SM8450_MASTER_QUP_1 10
-#define SM8450_MASTER_QUP_2 11
-#define SM8450_MASTER_A1NOC_CFG 12
-#define SM8450_MASTER_A2NOC_CFG 13
-#define SM8450_MASTER_A1NOC_SNOC 14
-#define SM8450_MASTER_A2NOC_SNOC 15
-#define SM8450_MASTER_CAMNOC_HF 16
-#define SM8450_MASTER_CAMNOC_ICP 17
-#define SM8450_MASTER_CAMNOC_SF 18
-#define SM8450_MASTER_GEM_NOC_CNOC 19
-#define SM8450_MASTER_GEM_NOC_PCIE_SNOC 20
-#define SM8450_MASTER_GFX3D 21
-#define SM8450_MASTER_LPASS_ANOC 22
-#define SM8450_MASTER_MDP 23
-#define SM8450_MASTER_MDP0 SM8450_MASTER_MDP
-#define SM8450_MASTER_MDP1 SM8450_MASTER_MDP
-#define SM8450_MASTER_MSS_PROC 24
-#define SM8450_MASTER_CNOC_MNOC_CFG 25
-#define SM8450_MASTER_MNOC_HF_MEM_NOC 26
-#define SM8450_MASTER_MNOC_SF_MEM_NOC 27
-#define SM8450_MASTER_COMPUTE_NOC 28
-#define SM8450_MASTER_ANOC_PCIE_GEM_NOC 29
-#define SM8450_MASTER_PCIE_ANOC_CFG 30
-#define SM8450_MASTER_ROTATOR 31
-#define SM8450_MASTER_SNOC_CFG 32
-#define SM8450_MASTER_SNOC_GC_MEM_NOC 33
-#define SM8450_MASTER_SNOC_SF_MEM_NOC 34
-#define SM8450_MASTER_CDSP_HCP 35
-#define SM8450_MASTER_VIDEO 36
-#define SM8450_MASTER_VIDEO_P0 SM8450_MASTER_VIDEO
-#define SM8450_MASTER_VIDEO_P1 SM8450_MASTER_VIDEO
-#define SM8450_MASTER_VIDEO_CV_PROC 37
-#define SM8450_MASTER_VIDEO_PROC 38
-#define SM8450_MASTER_VIDEO_V_PROC 39
-#define SM8450_MASTER_QUP_CORE_0 40
-#define SM8450_MASTER_QUP_CORE_1 41
-#define SM8450_MASTER_QUP_CORE_2 42
-#define SM8450_MASTER_CRYPTO 43
-#define SM8450_MASTER_IPA 44
-#define SM8450_MASTER_LPASS_PROC 45
-#define SM8450_MASTER_CDSP_PROC 46
-#define SM8450_MASTER_PIMEM 47
-#define SM8450_MASTER_SENSORS_PROC 48
-#define SM8450_MASTER_SP 49
-#define SM8450_MASTER_GIC 50
-#define SM8450_MASTER_PCIE_0 51
-#define SM8450_MASTER_PCIE_1 52
-#define SM8450_MASTER_QDSS_ETR 53
-#define SM8450_MASTER_QDSS_ETR_1 54
-#define SM8450_MASTER_SDCC_2 55
-#define SM8450_MASTER_SDCC_4 56
-#define SM8450_MASTER_UFS_MEM 57
-#define SM8450_MASTER_USB3_0 58
-#define SM8450_SLAVE_EBI1 512
-#define SM8450_SLAVE_AHB2PHY_SOUTH 513
-#define SM8450_SLAVE_AHB2PHY_NORTH 514
-#define SM8450_SLAVE_AOSS 515
-#define SM8450_SLAVE_CAMERA_CFG 516
-#define SM8450_SLAVE_CLK_CTL 517
-#define SM8450_SLAVE_CDSP_CFG 518
-#define SM8450_SLAVE_RBCPR_CX_CFG 519
-#define SM8450_SLAVE_RBCPR_MMCX_CFG 520
-#define SM8450_SLAVE_RBCPR_MXA_CFG 521
-#define SM8450_SLAVE_RBCPR_MXC_CFG 522
-#define SM8450_SLAVE_CRYPTO_0_CFG 523
-#define SM8450_SLAVE_CX_RDPM 524
-#define SM8450_SLAVE_DISPLAY_CFG 525
-#define SM8450_SLAVE_GFX3D_CFG 526
-#define SM8450_SLAVE_IMEM_CFG 527
-#define SM8450_SLAVE_IPA_CFG 528
-#define SM8450_SLAVE_IPC_ROUTER_CFG 529
-#define SM8450_SLAVE_LPASS 530
-#define SM8450_SLAVE_LPASS_CORE_CFG 531
-#define SM8450_SLAVE_LPASS_LPI_CFG 532
-#define SM8450_SLAVE_LPASS_MPU_CFG 533
-#define SM8450_SLAVE_LPASS_TOP_CFG 534
-#define SM8450_SLAVE_CNOC_MSS 535
-#define SM8450_SLAVE_MX_RDPM 536
-#define SM8450_SLAVE_PCIE_0_CFG 537
-#define SM8450_SLAVE_PCIE_1_CFG 538
-#define SM8450_SLAVE_PDM 539
-#define SM8450_SLAVE_PIMEM_CFG 540
-#define SM8450_SLAVE_PRNG 541
-#define SM8450_SLAVE_QDSS_CFG 542
-#define SM8450_SLAVE_QSPI_0 543
-#define SM8450_SLAVE_QUP_0 544
-#define SM8450_SLAVE_QUP_1 545
-#define SM8450_SLAVE_QUP_2 546
-#define SM8450_SLAVE_SDCC_2 547
-#define SM8450_SLAVE_SDCC_4 548
-#define SM8450_SLAVE_SPSS_CFG 549
-#define SM8450_SLAVE_TCSR 550
-#define SM8450_SLAVE_TLMM 551
-#define SM8450_SLAVE_TME_CFG 552
-#define SM8450_SLAVE_UFS_MEM_CFG 553
-#define SM8450_SLAVE_USB3_0 554
-#define SM8450_SLAVE_VENUS_CFG 555
-#define SM8450_SLAVE_VSENSE_CTRL_CFG 556
-#define SM8450_SLAVE_A1NOC_CFG 557
-#define SM8450_SLAVE_A1NOC_SNOC 558
-#define SM8450_SLAVE_A2NOC_CFG 559
-#define SM8450_SLAVE_A2NOC_SNOC 560
-#define SM8450_SLAVE_DDRSS_CFG 561
-#define SM8450_SLAVE_GEM_NOC_CNOC 562
-#define SM8450_SLAVE_SNOC_GEM_NOC_GC 563
-#define SM8450_SLAVE_SNOC_GEM_NOC_SF 564
-#define SM8450_SLAVE_LLCC 565
-#define SM8450_SLAVE_MNOC_HF_MEM_NOC 566
-#define SM8450_SLAVE_MNOC_SF_MEM_NOC 567
-#define SM8450_SLAVE_CNOC_MNOC_CFG 568
-#define SM8450_SLAVE_CDSP_MEM_NOC 569
-#define SM8450_SLAVE_MEM_NOC_PCIE_SNOC 570
-#define SM8450_SLAVE_PCIE_ANOC_CFG 571
-#define SM8450_SLAVE_ANOC_PCIE_GEM_NOC 572
-#define SM8450_SLAVE_SNOC_CFG 573
-#define SM8450_SLAVE_LPASS_SNOC 574
-#define SM8450_SLAVE_QUP_CORE_0 575
-#define SM8450_SLAVE_QUP_CORE_1 576
-#define SM8450_SLAVE_QUP_CORE_2 577
-#define SM8450_SLAVE_IMEM 578
-#define SM8450_SLAVE_PIMEM 579
-#define SM8450_SLAVE_SERVICE_NSP_NOC 580
-#define SM8450_SLAVE_SERVICE_A1NOC 581
-#define SM8450_SLAVE_SERVICE_A2NOC 582
-#define SM8450_SLAVE_SERVICE_CNOC 583
-#define SM8450_SLAVE_SERVICE_MNOC 584
-#define SM8450_SLAVE_SERVICES_LPASS_AML_NOC 585
-#define SM8450_SLAVE_SERVICE_LPASS_AG_NOC 586
-#define SM8450_SLAVE_SERVICE_PCIE_ANOC 587
-#define SM8450_SLAVE_SERVICE_SNOC 588
-#define SM8450_SLAVE_PCIE_0 589
-#define SM8450_SLAVE_PCIE_1 590
-#define SM8450_SLAVE_QDSS_STM 591
-#define SM8450_SLAVE_TCU 592
-#define SM8450_MASTER_LLCC_DISP 1000
-#define SM8450_MASTER_MDP_DISP 1001
-#define SM8450_MASTER_MDP0_DISP SM8450_MASTER_MDP_DISP
-#define SM8450_MASTER_MDP1_DISP SM8450_MASTER_MDP_DISP
-#define SM8450_MASTER_MNOC_HF_MEM_NOC_DISP 1002
-#define SM8450_MASTER_MNOC_SF_MEM_NOC_DISP 1003
-#define SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP 1004
-#define SM8450_MASTER_ROTATOR_DISP 1005
-#define SM8450_SLAVE_EBI1_DISP 1512
-#define SM8450_SLAVE_LLCC_DISP 1513
-#define SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP 1514
-#define SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP 1515
-
-#endif
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index fdb97d1f1d07..d01762e13272 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -18,1103 +18,1048 @@
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
-#include "sm8550.h"
+
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node qxm_sp;
+static struct qcom_icc_node xm_qdss_etr_0;
+static struct qcom_icc_node xm_qdss_etr_1;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup2_core_master;
+static struct qcom_icc_node qsm_cfg;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_lpass_gemnoc;
+static struct qcom_icc_node qnm_mdsp;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_nsp_gemnoc;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_gc;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qnm_lpiaon_noc;
+static struct qcom_icc_node qnm_lpass_lpinoc;
+static struct qcom_icc_node qxm_lpinoc_dsp_axim;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_mdp;
+static struct qcom_icc_node qnm_vapss_hcp;
+static struct qcom_icc_node qnm_video;
+static struct qcom_icc_node qnm_video_cv_cpu;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qnm_video_v_cpu;
+static struct qcom_icc_node qsm_mnoc_cfg;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node qsm_pcie_anoc_cfg;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node qhm_gic;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup2_core_slave;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_apss;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mxa;
+static struct qcom_icc_node qhs_cpr_mxc;
+static struct qcom_icc_node qhs_cpr_nspcx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_i2c;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_pimem_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_spss_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qss_lpass_qtb_cfg;
+static struct qcom_icc_node qss_mnoc_cfg;
+static struct qcom_icc_node qss_nsp_qtb_cfg;
+static struct qcom_icc_node qss_pcie_anoc_cfg;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_tme_cfg;
+static struct qcom_icc_node qss_cfg;
+static struct qcom_icc_node qss_ddrss_cfg;
+static struct qcom_icc_node qxs_boot_imem;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc;
+static struct qcom_icc_node qns_lpass_aggnoc;
+static struct qcom_icc_node qns_lpi_aon_noc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node srvc_pcie_aggre_noc;
+static struct qcom_icc_node qns_gemnoc_gc;
+static struct qcom_icc_node qns_gemnoc_sf;
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SM8550_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SM8550_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SM8550_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM8550_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM8550_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM8550_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SM8550_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM8550_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM8550_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
- .id = SM8550_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
- .id = SM8550_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
- .id = SM8550_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM8550_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SM8550_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SM8550_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
- .id = SM8550_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_QUP_CORE_2 },
+ .link_nodes = { &qup2_core_slave },
};
static struct qcom_icc_node qsm_cfg = {
.name = "qsm_cfg",
- .id = SM8550_MASTER_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 44,
- .links = { SM8550_SLAVE_AHB2PHY_SOUTH, SM8550_SLAVE_AHB2PHY_NORTH,
- SM8550_SLAVE_APPSS, SM8550_SLAVE_CAMERA_CFG,
- SM8550_SLAVE_CLK_CTL, SM8550_SLAVE_RBCPR_CX_CFG,
- SM8550_SLAVE_RBCPR_MMCX_CFG, SM8550_SLAVE_RBCPR_MXA_CFG,
- SM8550_SLAVE_RBCPR_MXC_CFG, SM8550_SLAVE_CPR_NSPCX,
- SM8550_SLAVE_CRYPTO_0_CFG, SM8550_SLAVE_CX_RDPM,
- SM8550_SLAVE_DISPLAY_CFG, SM8550_SLAVE_GFX3D_CFG,
- SM8550_SLAVE_I2C, SM8550_SLAVE_IMEM_CFG,
- SM8550_SLAVE_IPA_CFG, SM8550_SLAVE_IPC_ROUTER_CFG,
- SM8550_SLAVE_CNOC_MSS, SM8550_SLAVE_MX_RDPM,
- SM8550_SLAVE_PCIE_0_CFG, SM8550_SLAVE_PCIE_1_CFG,
- SM8550_SLAVE_PDM, SM8550_SLAVE_PIMEM_CFG,
- SM8550_SLAVE_PRNG, SM8550_SLAVE_QDSS_CFG,
- SM8550_SLAVE_QSPI_0, SM8550_SLAVE_QUP_1,
- SM8550_SLAVE_QUP_2, SM8550_SLAVE_SDCC_2,
- SM8550_SLAVE_SDCC_4, SM8550_SLAVE_SPSS_CFG,
- SM8550_SLAVE_TCSR, SM8550_SLAVE_TLMM,
- SM8550_SLAVE_UFS_MEM_CFG, SM8550_SLAVE_USB3_0,
- SM8550_SLAVE_VENUS_CFG, SM8550_SLAVE_VSENSE_CTRL_CFG,
- SM8550_SLAVE_LPASS_QTB_CFG, SM8550_SLAVE_CNOC_MNOC_CFG,
- SM8550_SLAVE_NSP_QTB_CFG, SM8550_SLAVE_PCIE_ANOC_CFG,
- SM8550_SLAVE_QDSS_STM, SM8550_SLAVE_TCU },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_apss, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_cpr_cx,
+ &qhs_cpr_mmcx, &qhs_cpr_mxa,
+ &qhs_cpr_mxc, &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_i2c, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_mss_cfg, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pdm, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_spss_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_anoc_cfg,
+ &xs_qdss_stm, &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SM8550_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { SM8550_SLAVE_AOSS, SM8550_SLAVE_TME_CFG,
- SM8550_SLAVE_CNOC_CFG, SM8550_SLAVE_DDRSS_CFG,
- SM8550_SLAVE_BOOT_IMEM, SM8550_SLAVE_IMEM },
+ .link_nodes = { &qhs_aoss, &qhs_tme_cfg,
+ &qss_cfg, &qss_ddrss_cfg,
+ &qxs_boot_imem, &qxs_imem },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SM8550_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8550_SLAVE_PCIE_0, SM8550_SLAVE_PCIE_1 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SM8550_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SM8550_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SM8550_MASTER_APPSS_PROC,
.channels = 3,
.buswidth = 32,
.num_links = 3,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
- SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SM8550_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_lpass_gemnoc = {
.name = "qnm_lpass_gemnoc",
- .id = SM8550_MASTER_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
- SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_mdsp = {
.name = "qnm_mdsp",
- .id = SM8550_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
- SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM8550_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_nsp_gemnoc = {
.name = "qnm_nsp_gemnoc",
- .id = SM8550_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_gc = {
.name = "qnm_snoc_gc",
- .id = SM8550_MASTER_SNOC_GC_MEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM8550_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
- SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_lpiaon_noc = {
.name = "qnm_lpiaon_noc",
- .id = SM8550_MASTER_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_SLAVE_LPASS_GEM_NOC },
+ .link_nodes = { &qns_lpass_ag_noc_gemnoc },
};
static struct qcom_icc_node qnm_lpass_lpinoc = {
.name = "qnm_lpass_lpinoc",
- .id = SM8550_MASTER_LPASS_LPINOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+ .link_nodes = { &qns_lpass_aggnoc },
};
static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
.name = "qxm_lpinoc_dsp_axim",
- .id = SM8550_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_SLAVE_LPICX_NOC_LPIAON_NOC },
+ .link_nodes = { &qns_lpi_aon_noc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM8550_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SM8550_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = SM8550_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = SM8550_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
- .id = SM8550_MASTER_MDP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_vapss_hcp = {
.name = "qnm_vapss_hcp",
- .id = SM8550_MASTER_CDSP_HCP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video = {
.name = "qnm_video",
- .id = SM8550_MASTER_VIDEO,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
- .id = SM8550_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SM8550_MASTER_VIDEO_PROC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
- .id = SM8550_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qsm_mnoc_cfg = {
.name = "qsm_mnoc_cfg",
- .id = SM8550_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = SM8550_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_node qsm_pcie_anoc_cfg = {
.name = "qsm_pcie_anoc_cfg",
- .id = SM8550_MASTER_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_SERVICE_PCIE_ANOC },
+ .link_nodes = { &srvc_pcie_aggre_noc },
};
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SM8550_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SM8550_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qhm_gic = {
.name = "qhm_gic",
- .id = SM8550_MASTER_GIC_AHB,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM8550_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM8550_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM8550_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_SLAVE_SNOC_GEM_NOC_GC },
+ .link_nodes = { &qns_gemnoc_gc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM8550_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM8550_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SM8550_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SM8550_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
- .id = SM8550_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SM8550_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SM8550_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_apss = {
.name = "qhs_apss",
- .id = SM8550_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM8550_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM8550_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SM8550_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SM8550_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxa = {
.name = "qhs_cpr_mxa",
- .id = SM8550_SLAVE_RBCPR_MXA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxc = {
.name = "qhs_cpr_mxc",
- .id = SM8550_SLAVE_RBCPR_MXC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
- .id = SM8550_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM8550_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SM8550_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM8550_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM8550_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_i2c = {
.name = "qhs_i2c",
- .id = SM8550_SLAVE_I2C,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM8550_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM8550_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SM8550_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SM8550_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = SM8550_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SM8550_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SM8550_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SM8550_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pimem_cfg = {
.name = "qhs_pimem_cfg",
- .id = SM8550_SLAVE_PIMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SM8550_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM8550_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SM8550_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SM8550_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = SM8550_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM8550_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SM8550_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
- .id = SM8550_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM8550_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SM8550_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM8550_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM8550_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM8550_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM8550_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_lpass_qtb_cfg = {
.name = "qss_lpass_qtb_cfg",
- .id = SM8550_SLAVE_LPASS_QTB_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_mnoc_cfg = {
.name = "qss_mnoc_cfg",
- .id = SM8550_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qsm_mnoc_cfg },
};
static struct qcom_icc_node qss_nsp_qtb_cfg = {
.name = "qss_nsp_qtb_cfg",
- .id = SM8550_SLAVE_NSP_QTB_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_pcie_anoc_cfg = {
.name = "qss_pcie_anoc_cfg",
- .id = SM8550_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_MASTER_PCIE_ANOC_CFG },
+ .link_nodes = { &qsm_pcie_anoc_cfg },
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM8550_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM8550_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM8550_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
- .id = SM8550_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_cfg = {
.name = "qss_cfg",
- .id = SM8550_SLAVE_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8550_MASTER_CNOC_CFG },
+ .link_nodes = { &qsm_cfg },
};
static struct qcom_icc_node qss_ddrss_cfg = {
.name = "qss_ddrss_cfg",
- .id = SM8550_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
- .id = SM8550_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM8550_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SM8550_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SM8550_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SM8550_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM8550_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SM8550_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
.name = "qns_lpass_ag_noc_gemnoc",
- .id = SM8550_SLAVE_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_LPASS_GEM_NOC },
+ .link_nodes = { &qnm_lpass_gemnoc },
};
static struct qcom_icc_node qns_lpass_aggnoc = {
.name = "qns_lpass_aggnoc",
- .id = SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_LPIAON_NOC },
+ .link_nodes = { &qnm_lpiaon_noc },
};
static struct qcom_icc_node qns_lpi_aon_noc = {
.name = "qns_lpi_aon_noc",
- .id = SM8550_SLAVE_LPICX_NOC_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_LPASS_LPINOC },
+ .link_nodes = { &qnm_lpass_lpinoc },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM8550_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SM8550_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM8550_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SM8550_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8550_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_nsp_gemnoc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SM8550_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_pcie_aggre_noc = {
.name = "srvc_pcie_aggre_noc",
- .id = SM8550_SLAVE_SERVICE_PCIE_ANOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_gc = {
.name = "qns_gemnoc_gc",
- .id = SM8550_SLAVE_SNOC_GEM_NOC_GC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8550_MASTER_SNOC_GC_MEM_NOC },
+ .link_nodes = { &qnm_snoc_gc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM8550_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8550_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/sm8550.h b/drivers/interconnect/qcom/sm8550.h
deleted file mode 100644
index c9b2986e1293..000000000000
--- a/drivers/interconnect/qcom/sm8550.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SM8450 interconnect IDs
- *
- * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, Linaro Limited
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8450_H
-#define __DRIVERS_INTERCONNECT_QCOM_SM8450_H
-
-#define SM8550_MASTER_A1NOC_SNOC 0
-#define SM8550_MASTER_A2NOC_SNOC 1
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC 2
-#define SM8550_MASTER_APPSS_PROC 3
-#define SM8550_MASTER_CAMNOC_HF 4
-#define SM8550_MASTER_CAMNOC_ICP 5
-#define SM8550_MASTER_CAMNOC_SF 6
-#define SM8550_MASTER_CDSP_HCP 7
-#define SM8550_MASTER_CDSP_PROC 8
-#define SM8550_MASTER_CNOC_CFG 9
-#define SM8550_MASTER_CNOC_MNOC_CFG 10
-#define SM8550_MASTER_COMPUTE_NOC 11
-#define SM8550_MASTER_CRYPTO 12
-#define SM8550_MASTER_GEM_NOC_CNOC 13
-#define SM8550_MASTER_GEM_NOC_PCIE_SNOC 14
-#define SM8550_MASTER_GFX3D 15
-#define SM8550_MASTER_GIC 16
-#define SM8550_MASTER_GIC_AHB 17
-#define SM8550_MASTER_GPU_TCU 18
-#define SM8550_MASTER_IPA 19
-#define SM8550_MASTER_LLCC 20
-#define SM8550_MASTER_LPASS_GEM_NOC 21
-#define SM8550_MASTER_LPASS_LPINOC 22
-#define SM8550_MASTER_LPASS_PROC 23
-#define SM8550_MASTER_LPIAON_NOC 24
-#define SM8550_MASTER_MDP 25
-#define SM8550_MASTER_MNOC_HF_MEM_NOC 26
-#define SM8550_MASTER_MNOC_SF_MEM_NOC 27
-#define SM8550_MASTER_MSS_PROC 28
-#define SM8550_MASTER_PCIE_0 29
-#define SM8550_MASTER_PCIE_1 30
-#define SM8550_MASTER_PCIE_ANOC_CFG 31
-#define SM8550_MASTER_QDSS_BAM 32
-#define SM8550_MASTER_QDSS_ETR 33
-#define SM8550_MASTER_QDSS_ETR_1 34
-#define SM8550_MASTER_QSPI_0 35
-#define SM8550_MASTER_QUP_1 36
-#define SM8550_MASTER_QUP_2 37
-#define SM8550_MASTER_QUP_CORE_0 38
-#define SM8550_MASTER_QUP_CORE_1 39
-#define SM8550_MASTER_QUP_CORE_2 40
-#define SM8550_MASTER_SDCC_2 41
-#define SM8550_MASTER_SDCC_4 42
-#define SM8550_MASTER_SNOC_GC_MEM_NOC 43
-#define SM8550_MASTER_SNOC_SF_MEM_NOC 44
-#define SM8550_MASTER_SP 45
-#define SM8550_MASTER_SYS_TCU 46
-#define SM8550_MASTER_UFS_MEM 47
-#define SM8550_MASTER_USB3_0 48
-#define SM8550_MASTER_VIDEO 49
-#define SM8550_MASTER_VIDEO_CV_PROC 50
-#define SM8550_MASTER_VIDEO_PROC 51
-#define SM8550_MASTER_VIDEO_V_PROC 52
-#define SM8550_SLAVE_A1NOC_SNOC 53
-#define SM8550_SLAVE_A2NOC_SNOC 54
-#define SM8550_SLAVE_AHB2PHY_NORTH 55
-#define SM8550_SLAVE_AHB2PHY_SOUTH 56
-#define SM8550_SLAVE_ANOC_PCIE_GEM_NOC 57
-#define SM8550_SLAVE_AOSS 58
-#define SM8550_SLAVE_APPSS 59
-#define SM8550_SLAVE_BOOT_IMEM 60
-#define SM8550_SLAVE_CAMERA_CFG 61
-#define SM8550_SLAVE_CDSP_MEM_NOC 62
-#define SM8550_SLAVE_CLK_CTL 63
-#define SM8550_SLAVE_CNOC_CFG 64
-#define SM8550_SLAVE_CNOC_MNOC_CFG 65
-#define SM8550_SLAVE_CNOC_MSS 66
-#define SM8550_SLAVE_CPR_NSPCX 67
-#define SM8550_SLAVE_CRYPTO_0_CFG 68
-#define SM8550_SLAVE_CX_RDPM 69
-#define SM8550_SLAVE_DDRSS_CFG 70
-#define SM8550_SLAVE_DISPLAY_CFG 71
-#define SM8550_SLAVE_EBI1 72
-#define SM8550_SLAVE_GEM_NOC_CNOC 73
-#define SM8550_SLAVE_GFX3D_CFG 74
-#define SM8550_SLAVE_I2C 75
-#define SM8550_SLAVE_IMEM 76
-#define SM8550_SLAVE_IMEM_CFG 77
-#define SM8550_SLAVE_IPA_CFG 78
-#define SM8550_SLAVE_IPC_ROUTER_CFG 79
-#define SM8550_SLAVE_LLCC 80
-#define SM8550_SLAVE_LPASS_GEM_NOC 81
-#define SM8550_SLAVE_LPASS_QTB_CFG 82
-#define SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC 83
-#define SM8550_SLAVE_LPICX_NOC_LPIAON_NOC 84
-#define SM8550_SLAVE_MEM_NOC_PCIE_SNOC 85
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC 86
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC 87
-#define SM8550_SLAVE_MX_RDPM 88
-#define SM8550_SLAVE_NSP_QTB_CFG 89
-#define SM8550_SLAVE_PCIE_0 90
-#define SM8550_SLAVE_PCIE_0_CFG 91
-#define SM8550_SLAVE_PCIE_1 92
-#define SM8550_SLAVE_PCIE_1_CFG 93
-#define SM8550_SLAVE_PCIE_ANOC_CFG 94
-#define SM8550_SLAVE_PDM 95
-#define SM8550_SLAVE_PIMEM_CFG 96
-#define SM8550_SLAVE_PRNG 97
-#define SM8550_SLAVE_QDSS_CFG 98
-#define SM8550_SLAVE_QDSS_STM 99
-#define SM8550_SLAVE_QSPI_0 100
-#define SM8550_SLAVE_QUP_1 101
-#define SM8550_SLAVE_QUP_2 102
-#define SM8550_SLAVE_QUP_CORE_0 103
-#define SM8550_SLAVE_QUP_CORE_1 104
-#define SM8550_SLAVE_QUP_CORE_2 105
-#define SM8550_SLAVE_RBCPR_CX_CFG 106
-#define SM8550_SLAVE_RBCPR_MMCX_CFG 107
-#define SM8550_SLAVE_RBCPR_MXA_CFG 108
-#define SM8550_SLAVE_RBCPR_MXC_CFG 109
-#define SM8550_SLAVE_SDCC_2 110
-#define SM8550_SLAVE_SDCC_4 111
-#define SM8550_SLAVE_SERVICE_MNOC 112
-#define SM8550_SLAVE_SERVICE_PCIE_ANOC 113
-#define SM8550_SLAVE_SNOC_GEM_NOC_GC 114
-#define SM8550_SLAVE_SNOC_GEM_NOC_SF 115
-#define SM8550_SLAVE_SPSS_CFG 116
-#define SM8550_SLAVE_TCSR 117
-#define SM8550_SLAVE_TCU 118
-#define SM8550_SLAVE_TLMM 119
-#define SM8550_SLAVE_TME_CFG 120
-#define SM8550_SLAVE_UFS_MEM_CFG 121
-#define SM8550_SLAVE_USB3_0 122
-#define SM8550_SLAVE_VENUS_CFG 123
-#define SM8550_SLAVE_VSENSE_CTRL_CFG 124
-
-#endif
diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c
index b7c321f4e4b5..cf3ae734d4c3 100644
--- a/drivers/interconnect/qcom/sm8650.c
+++ b/drivers/interconnect/qcom/sm8650.c
@@ -15,8 +15,138 @@
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
-#include "sm8650.h"
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qxm_qup02;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node qxm_sp;
+static struct qcom_icc_node xm_qdss_etr_0;
+static struct qcom_icc_node xm_qdss_etr_1;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup2_core_master;
+static struct qcom_icc_node qsm_cfg;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node alm_ubwc_p_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_lpass_gemnoc;
+static struct qcom_icc_node qnm_mdsp;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_nsp_gemnoc;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qnm_ubwc_p;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qnm_lpiaon_noc;
+static struct qcom_icc_node qnm_lpass_lpinoc;
+static struct qcom_icc_node qxm_lpinoc_dsp_axim;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_mdp;
+static struct qcom_icc_node qnm_vapss_hcp;
+static struct qcom_icc_node qnm_video;
+static struct qcom_icc_node qnm_video_cv_cpu;
+static struct qcom_icc_node qnm_video_cvp;
+static struct qcom_icc_node qnm_video_v_cpu;
+static struct qcom_icc_node qsm_mnoc_cfg;
+static struct qcom_icc_node qnm_nsp;
+static struct qcom_icc_node qsm_pcie_anoc_cfg;
+static struct qcom_icc_node xm_pcie3_0;
+static struct qcom_icc_node xm_pcie3_1;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_apss_noc;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup2_core_slave;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_cpr_cx;
+static struct qcom_icc_node qhs_cpr_hmx;
+static struct qcom_icc_node qhs_cpr_mmcx;
+static struct qcom_icc_node qhs_cpr_mxa;
+static struct qcom_icc_node qhs_cpr_mxc;
+static struct qcom_icc_node qhs_cpr_nspcx;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_cx_rdpm;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_i2c;
+static struct qcom_icc_node qhs_i3c_ibi0_cfg;
+static struct qcom_icc_node qhs_i3c_ibi1_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_mx_2_rdpm;
+static struct qcom_icc_node qhs_mx_rdpm;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pcie_rscc;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup02;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_spss_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qss_mnoc_cfg;
+static struct qcom_icc_node qss_nsp_qtb_cfg;
+static struct qcom_icc_node qss_pcie_anoc_cfg;
+static struct qcom_icc_node srvc_cnoc_cfg;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_tme_cfg;
+static struct qcom_icc_node qss_apss;
+static struct qcom_icc_node qss_cfg;
+static struct qcom_icc_node qss_ddrss_cfg;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node srvc_cnoc_main;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc;
+static struct qcom_icc_node qns_lpass_aggnoc;
+static struct qcom_icc_node qns_lpi_aon_noc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node srvc_pcie_aggre_noc;
+static struct qcom_icc_node qns_gemnoc_sf;
static const struct regmap_config icc_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -34,12 +164,11 @@ static struct qcom_icc_qosbox qhm_qspi_qos = {
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SM8650_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_qspi_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_qosbox qhm_qup1_qos = {
@@ -52,21 +181,19 @@ static struct qcom_icc_qosbox qhm_qup1_qos = {
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SM8650_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_qup1_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qxm_qup02 = {
.name = "qxm_qup02",
- .id = SM8650_MASTER_QUP_3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8650_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_qosbox xm_sdc4_qos = {
@@ -79,12 +206,11 @@ static struct qcom_icc_qosbox xm_sdc4_qos = {
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SM8650_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_sdc4_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_qosbox xm_ufs_mem_qos = {
@@ -97,12 +223,11 @@ static struct qcom_icc_qosbox xm_ufs_mem_qos = {
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM8650_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 16,
.qosbox = &xm_ufs_mem_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_qosbox xm_usb3_0_qos = {
@@ -115,12 +240,11 @@ static struct qcom_icc_qosbox xm_usb3_0_qos = {
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM8650_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_usb3_0_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_qosbox qhm_qdss_bam_qos = {
@@ -133,12 +257,11 @@ static struct qcom_icc_qosbox qhm_qdss_bam_qos = {
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM8650_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_qdss_bam_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox qhm_qup2_qos = {
@@ -151,12 +274,11 @@ static struct qcom_icc_qosbox qhm_qup2_qos = {
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SM8650_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.qosbox = &qhm_qup2_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox qxm_crypto_qos = {
@@ -169,12 +291,11 @@ static struct qcom_icc_qosbox qxm_crypto_qos = {
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM8650_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.qosbox = &qxm_crypto_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox qxm_ipa_qos = {
@@ -187,21 +308,19 @@ static struct qcom_icc_qosbox qxm_ipa_qos = {
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM8650_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.qosbox = &qxm_ipa_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
- .id = SM8650_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8650_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox xm_qdss_etr_0_qos = {
@@ -214,12 +333,11 @@ static struct qcom_icc_qosbox xm_qdss_etr_0_qos = {
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
- .id = SM8650_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_qdss_etr_0_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox xm_qdss_etr_1_qos = {
@@ -232,12 +350,11 @@ static struct qcom_icc_qosbox xm_qdss_etr_1_qos = {
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
- .id = SM8650_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_qdss_etr_1_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_qosbox xm_sdc2_qos = {
@@ -250,92 +367,85 @@ static struct qcom_icc_qosbox xm_sdc2_qos = {
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM8650_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_sdc2_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SM8650_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SM8650_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
- .id = SM8650_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_SLAVE_QUP_CORE_2 },
+ .link_nodes = { &qup2_core_slave },
};
static struct qcom_icc_node qsm_cfg = {
.name = "qsm_cfg",
- .id = SM8650_MASTER_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 46,
- .links = { SM8650_SLAVE_AHB2PHY_SOUTH, SM8650_SLAVE_AHB2PHY_NORTH,
- SM8650_SLAVE_CAMERA_CFG, SM8650_SLAVE_CLK_CTL,
- SM8650_SLAVE_RBCPR_CX_CFG, SM8650_SLAVE_CPR_HMX,
- SM8650_SLAVE_RBCPR_MMCX_CFG, SM8650_SLAVE_RBCPR_MXA_CFG,
- SM8650_SLAVE_RBCPR_MXC_CFG, SM8650_SLAVE_CPR_NSPCX,
- SM8650_SLAVE_CRYPTO_0_CFG, SM8650_SLAVE_CX_RDPM,
- SM8650_SLAVE_DISPLAY_CFG, SM8650_SLAVE_GFX3D_CFG,
- SM8650_SLAVE_I2C, SM8650_SLAVE_I3C_IBI0_CFG,
- SM8650_SLAVE_I3C_IBI1_CFG, SM8650_SLAVE_IMEM_CFG,
- SM8650_SLAVE_CNOC_MSS, SM8650_SLAVE_MX_2_RDPM,
- SM8650_SLAVE_MX_RDPM, SM8650_SLAVE_PCIE_0_CFG,
- SM8650_SLAVE_PCIE_1_CFG, SM8650_SLAVE_PCIE_RSCC,
- SM8650_SLAVE_PDM, SM8650_SLAVE_PRNG,
- SM8650_SLAVE_QDSS_CFG, SM8650_SLAVE_QSPI_0,
- SM8650_SLAVE_QUP_3, SM8650_SLAVE_QUP_1,
- SM8650_SLAVE_QUP_2, SM8650_SLAVE_SDCC_2,
- SM8650_SLAVE_SDCC_4, SM8650_SLAVE_SPSS_CFG,
- SM8650_SLAVE_TCSR, SM8650_SLAVE_TLMM,
- SM8650_SLAVE_UFS_MEM_CFG, SM8650_SLAVE_USB3_0,
- SM8650_SLAVE_VENUS_CFG, SM8650_SLAVE_VSENSE_CTRL_CFG,
- SM8650_SLAVE_CNOC_MNOC_CFG, SM8650_SLAVE_NSP_QTB_CFG,
- SM8650_SLAVE_PCIE_ANOC_CFG, SM8650_SLAVE_SERVICE_CNOC_CFG,
- SM8650_SLAVE_QDSS_STM, SM8650_SLAVE_TCU },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_cpr_cx, &qhs_cpr_hmx,
+ &qhs_cpr_mmcx, &qhs_cpr_mxa,
+ &qhs_cpr_mxc, &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_i2c, &qhs_i3c_ibi0_cfg,
+ &qhs_i3c_ibi1_cfg, &qhs_imem_cfg,
+ &qhs_mss_cfg, &qhs_mx_2_rdpm,
+ &qhs_mx_rdpm, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup02, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_spss_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qss_mnoc_cfg, &qss_nsp_qtb_cfg,
+ &qss_pcie_anoc_cfg, &srvc_cnoc_cfg,
+ &xs_qdss_stm, &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SM8650_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 9,
- .links = { SM8650_SLAVE_AOSS, SM8650_SLAVE_IPA_CFG,
- SM8650_SLAVE_IPC_ROUTER_CFG, SM8650_SLAVE_TME_CFG,
- SM8650_SLAVE_APPSS, SM8650_SLAVE_CNOC_CFG,
- SM8650_SLAVE_DDRSS_CFG, SM8650_SLAVE_IMEM,
- SM8650_SLAVE_SERVICE_CNOC },
+ .link_nodes = { &qhs_aoss, &qhs_ipa,
+ &qhs_ipc_router, &qhs_tme_cfg,
+ &qss_apss, &qss_cfg,
+ &qss_ddrss_cfg, &qxs_imem,
+ &srvc_cnoc_main },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SM8650_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 2,
- .links = { SM8650_SLAVE_PCIE_0, SM8650_SLAVE_PCIE_1 },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1 },
};
static struct qcom_icc_qosbox alm_gpu_tcu_qos = {
@@ -348,12 +458,11 @@ static struct qcom_icc_qosbox alm_gpu_tcu_qos = {
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SM8650_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.qosbox = &alm_gpu_tcu_qos,
.num_links = 2,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox alm_sys_tcu_qos = {
@@ -366,12 +475,11 @@ static struct qcom_icc_qosbox alm_sys_tcu_qos = {
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SM8650_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.qosbox = &alm_sys_tcu_qos,
.num_links = 2,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox alm_ubwc_p_tcu_qos = {
@@ -384,22 +492,20 @@ static struct qcom_icc_qosbox alm_ubwc_p_tcu_qos = {
static struct qcom_icc_node alm_ubwc_p_tcu = {
.name = "alm_ubwc_p_tcu",
- .id = SM8650_MASTER_UBWC_P_TCU,
.channels = 1,
.buswidth = 8,
.qosbox = &alm_ubwc_p_tcu_qos,
.num_links = 2,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SM8650_MASTER_APPSS_PROC,
.channels = 3,
.buswidth = 32,
.num_links = 3,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
- SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_qosbox qnm_gpu_qos = {
@@ -412,12 +518,11 @@ static struct qcom_icc_qosbox qnm_gpu_qos = {
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SM8650_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_gpu_qos,
.num_links = 2,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox qnm_lpass_gemnoc_qos = {
@@ -430,23 +535,21 @@ static struct qcom_icc_qosbox qnm_lpass_gemnoc_qos = {
static struct qcom_icc_node qnm_lpass_gemnoc = {
.name = "qnm_lpass_gemnoc",
- .id = SM8650_MASTER_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.qosbox = &qnm_lpass_gemnoc_qos,
.num_links = 3,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
- SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_mdsp = {
.name = "qnm_mdsp",
- .id = SM8650_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
- SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_qosbox qnm_mnoc_hf_qos = {
@@ -459,12 +562,11 @@ static struct qcom_icc_qosbox qnm_mnoc_hf_qos = {
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM8650_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_mnoc_hf_qos,
.num_links = 2,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox qnm_mnoc_sf_qos = {
@@ -477,12 +579,11 @@ static struct qcom_icc_qosbox qnm_mnoc_sf_qos = {
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM8650_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_mnoc_sf_qos,
.num_links = 2,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox qnm_nsp_gemnoc_qos = {
@@ -495,13 +596,12 @@ static struct qcom_icc_qosbox qnm_nsp_gemnoc_qos = {
static struct qcom_icc_node qnm_nsp_gemnoc = {
.name = "qnm_nsp_gemnoc",
- .id = SM8650_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_nsp_gemnoc_qos,
.num_links = 3,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
- SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_qosbox qnm_pcie_qos = {
@@ -514,12 +614,11 @@ static struct qcom_icc_qosbox qnm_pcie_qos = {
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SM8650_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.qosbox = &qnm_pcie_qos,
.num_links = 2,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_qosbox qnm_snoc_sf_qos = {
@@ -532,13 +631,12 @@ static struct qcom_icc_qosbox qnm_snoc_sf_qos = {
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM8650_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.qosbox = &qnm_snoc_sf_qos,
.num_links = 3,
- .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC,
- SM8650_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_qosbox qnm_ubwc_p_qos = {
@@ -551,12 +649,11 @@ static struct qcom_icc_qosbox qnm_ubwc_p_qos = {
static struct qcom_icc_node qnm_ubwc_p = {
.name = "qnm_ubwc_p",
- .id = SM8650_MASTER_UBWC_P,
.channels = 1,
.buswidth = 32,
.qosbox = &qnm_ubwc_p_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_qosbox xm_gic_qos = {
@@ -569,48 +666,43 @@ static struct qcom_icc_qosbox xm_gic_qos = {
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM8650_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_gic_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_lpiaon_noc = {
.name = "qnm_lpiaon_noc",
- .id = SM8650_MASTER_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_SLAVE_LPASS_GEM_NOC },
+ .link_nodes = { &qns_lpass_ag_noc_gemnoc },
};
static struct qcom_icc_node qnm_lpass_lpinoc = {
.name = "qnm_lpass_lpinoc",
- .id = SM8650_MASTER_LPASS_LPINOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+ .link_nodes = { &qns_lpass_aggnoc },
};
static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
.name = "qxm_lpinoc_dsp_axim",
- .id = SM8650_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_SLAVE_LPICX_NOC_LPIAON_NOC },
+ .link_nodes = { &qns_lpi_aon_noc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM8650_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_qosbox qnm_camnoc_hf_qos = {
@@ -623,12 +715,11 @@ static struct qcom_icc_qosbox qnm_camnoc_hf_qos = {
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SM8650_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_camnoc_hf_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_qosbox qnm_camnoc_icp_qos = {
@@ -641,12 +732,11 @@ static struct qcom_icc_qosbox qnm_camnoc_icp_qos = {
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = SM8650_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.qosbox = &qnm_camnoc_icp_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_qosbox qnm_camnoc_sf_qos = {
@@ -659,12 +749,11 @@ static struct qcom_icc_qosbox qnm_camnoc_sf_qos = {
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = SM8650_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_camnoc_sf_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_qosbox qnm_mdp_qos = {
@@ -677,21 +766,19 @@ static struct qcom_icc_qosbox qnm_mdp_qos = {
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
- .id = SM8650_MASTER_MDP,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_mdp_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_vapss_hcp = {
.name = "qnm_vapss_hcp",
- .id = SM8650_MASTER_CDSP_HCP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_qosbox qnm_video_qos = {
@@ -704,12 +791,11 @@ static struct qcom_icc_qosbox qnm_video_qos = {
static struct qcom_icc_node qnm_video = {
.name = "qnm_video",
- .id = SM8650_MASTER_VIDEO,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_video_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_qosbox qnm_video_cv_cpu_qos = {
@@ -722,12 +808,11 @@ static struct qcom_icc_qosbox qnm_video_cv_cpu_qos = {
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
- .id = SM8650_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
.qosbox = &qnm_video_cv_cpu_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_qosbox qnm_video_cvp_qos = {
@@ -740,12 +825,11 @@ static struct qcom_icc_qosbox qnm_video_cvp_qos = {
static struct qcom_icc_node qnm_video_cvp = {
.name = "qnm_video_cvp",
- .id = SM8650_MASTER_VIDEO_PROC,
.channels = 2,
.buswidth = 32,
.qosbox = &qnm_video_cvp_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_qosbox qnm_video_v_cpu_qos = {
@@ -758,39 +842,35 @@ static struct qcom_icc_qosbox qnm_video_v_cpu_qos = {
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
- .id = SM8650_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.qosbox = &qnm_video_v_cpu_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qsm_mnoc_cfg = {
.name = "qsm_mnoc_cfg",
- .id = SM8650_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qnm_nsp = {
.name = "qnm_nsp",
- .id = SM8650_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8650_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_node qsm_pcie_anoc_cfg = {
.name = "qsm_pcie_anoc_cfg",
- .id = SM8650_MASTER_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_SLAVE_SERVICE_PCIE_ANOC },
+ .link_nodes = { &srvc_pcie_aggre_noc },
};
static struct qcom_icc_qosbox xm_pcie3_0_qos = {
@@ -803,12 +883,11 @@ static struct qcom_icc_qosbox xm_pcie3_0_qos = {
static struct qcom_icc_node xm_pcie3_0 = {
.name = "xm_pcie3_0",
- .id = SM8650_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.qosbox = &xm_pcie3_0_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_qosbox xm_pcie3_1_qos = {
@@ -821,30 +900,27 @@ static struct qcom_icc_qosbox xm_pcie3_1_qos = {
static struct qcom_icc_node xm_pcie3_1 = {
.name = "xm_pcie3_1",
- .id = SM8650_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
.qosbox = &xm_pcie3_1_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM8650_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM8650_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_qosbox qnm_apss_noc_qos = {
@@ -857,636 +933,499 @@ static struct qcom_icc_qosbox qnm_apss_noc_qos = {
static struct qcom_icc_node qnm_apss_noc = {
.name = "qnm_apss_noc",
- .id = SM8650_MASTER_APSS_NOC,
.channels = 1,
.buswidth = 4,
.qosbox = &qnm_apss_noc_qos,
.num_links = 1,
- .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM8650_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM8650_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SM8650_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SM8650_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
- .id = SM8650_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SM8650_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SM8650_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM8650_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM8650_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_cx = {
.name = "qhs_cpr_cx",
- .id = SM8650_SLAVE_RBCPR_CX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_hmx = {
.name = "qhs_cpr_hmx",
- .id = SM8650_SLAVE_CPR_HMX,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mmcx = {
.name = "qhs_cpr_mmcx",
- .id = SM8650_SLAVE_RBCPR_MMCX_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxa = {
.name = "qhs_cpr_mxa",
- .id = SM8650_SLAVE_RBCPR_MXA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_mxc = {
.name = "qhs_cpr_mxc",
- .id = SM8650_SLAVE_RBCPR_MXC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cpr_nspcx = {
.name = "qhs_cpr_nspcx",
- .id = SM8650_SLAVE_CPR_NSPCX,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM8650_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_cx_rdpm = {
.name = "qhs_cx_rdpm",
- .id = SM8650_SLAVE_CX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM8650_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM8650_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_i2c = {
.name = "qhs_i2c",
- .id = SM8650_SLAVE_I2C,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_i3c_ibi0_cfg = {
.name = "qhs_i3c_ibi0_cfg",
- .id = SM8650_SLAVE_I3C_IBI0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_i3c_ibi1_cfg = {
.name = "qhs_i3c_ibi1_cfg",
- .id = SM8650_SLAVE_I3C_IBI1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM8650_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SM8650_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mx_2_rdpm = {
.name = "qhs_mx_2_rdpm",
- .id = SM8650_SLAVE_MX_2_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mx_rdpm = {
.name = "qhs_mx_rdpm",
- .id = SM8650_SLAVE_MX_RDPM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = SM8650_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = SM8650_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie_rscc = {
.name = "qhs_pcie_rscc",
- .id = SM8650_SLAVE_PCIE_RSCC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = SM8650_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SM8650_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM8650_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SM8650_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup02 = {
.name = "qhs_qup02",
- .id = SM8650_SLAVE_QUP_3,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SM8650_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = SM8650_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM8650_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SM8650_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
- .id = SM8650_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM8650_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SM8650_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM8650_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM8650_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM8650_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM8650_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_mnoc_cfg = {
.name = "qss_mnoc_cfg",
- .id = SM8650_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qsm_mnoc_cfg },
};
static struct qcom_icc_node qss_nsp_qtb_cfg = {
.name = "qss_nsp_qtb_cfg",
- .id = SM8650_SLAVE_NSP_QTB_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_pcie_anoc_cfg = {
.name = "qss_pcie_anoc_cfg",
- .id = SM8650_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_MASTER_PCIE_ANOC_CFG },
+ .link_nodes = { &qsm_pcie_anoc_cfg },
};
static struct qcom_icc_node srvc_cnoc_cfg = {
.name = "srvc_cnoc_cfg",
- .id = SM8650_SLAVE_SERVICE_CNOC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM8650_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM8650_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM8650_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM8650_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SM8650_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
- .id = SM8650_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_apss = {
.name = "qss_apss",
- .id = SM8650_SLAVE_APPSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_cfg = {
.name = "qss_cfg",
- .id = SM8650_SLAVE_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8650_MASTER_CNOC_CFG },
+ .link_nodes = { &qsm_cfg },
};
static struct qcom_icc_node qss_ddrss_cfg = {
.name = "qss_ddrss_cfg",
- .id = SM8650_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM8650_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node srvc_cnoc_main = {
.name = "srvc_cnoc_main",
- .id = SM8650_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = SM8650_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = SM8650_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SM8650_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM8650_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SM8650_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8650_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
.name = "qns_lpass_ag_noc_gemnoc",
- .id = SM8650_SLAVE_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_LPASS_GEM_NOC },
+ .link_nodes = { &qnm_lpass_gemnoc },
};
static struct qcom_icc_node qns_lpass_aggnoc = {
.name = "qns_lpass_aggnoc",
- .id = SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_LPIAON_NOC },
+ .link_nodes = { &qnm_lpiaon_noc },
};
static struct qcom_icc_node qns_lpi_aon_noc = {
.name = "qns_lpi_aon_noc",
- .id = SM8650_SLAVE_LPICX_NOC_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_LPASS_LPINOC },
+ .link_nodes = { &qnm_lpass_lpinoc },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM8650_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM8650_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8650_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SM8650_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8650_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM8650_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SM8650_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8650_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_nsp_gemnoc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SM8650_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_pcie_aggre_noc = {
.name = "srvc_pcie_aggre_noc",
- .id = SM8650_SLAVE_SERVICE_PCIE_ANOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM8650_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8650_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/sm8650.h b/drivers/interconnect/qcom/sm8650.h
deleted file mode 100644
index b6610225b38a..000000000000
--- a/drivers/interconnect/qcom/sm8650.h
+++ /dev/null
@@ -1,144 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * SM8650 interconnect IDs
- *
- * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2023, Linaro Limited
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8650_H
-#define __DRIVERS_INTERCONNECT_QCOM_SM8650_H
-
-#define SM8650_MASTER_A1NOC_SNOC 0
-#define SM8650_MASTER_A2NOC_SNOC 1
-#define SM8650_MASTER_ANOC_PCIE_GEM_NOC 2
-#define SM8650_MASTER_APPSS_PROC 3
-#define SM8650_MASTER_CAMNOC_HF 4
-#define SM8650_MASTER_CAMNOC_ICP 5
-#define SM8650_MASTER_CAMNOC_SF 6
-#define SM8650_MASTER_CDSP_HCP 7
-#define SM8650_MASTER_CDSP_PROC 8
-#define SM8650_MASTER_CNOC_CFG 9
-#define SM8650_MASTER_CNOC_MNOC_CFG 10
-#define SM8650_MASTER_COMPUTE_NOC 11
-#define SM8650_MASTER_CRYPTO 12
-#define SM8650_MASTER_GEM_NOC_CNOC 13
-#define SM8650_MASTER_GEM_NOC_PCIE_SNOC 14
-#define SM8650_MASTER_GFX3D 15
-#define SM8650_MASTER_GIC 16
-#define SM8650_MASTER_GPU_TCU 17
-#define SM8650_MASTER_IPA 18
-#define SM8650_MASTER_LLCC 19
-#define SM8650_MASTER_LPASS_GEM_NOC 20
-#define SM8650_MASTER_LPASS_LPINOC 21
-#define SM8650_MASTER_LPASS_PROC 22
-#define SM8650_MASTER_LPIAON_NOC 23
-#define SM8650_MASTER_MDP 24
-#define SM8650_MASTER_MNOC_HF_MEM_NOC 25
-#define SM8650_MASTER_MNOC_SF_MEM_NOC 26
-#define SM8650_MASTER_MSS_PROC 27
-#define SM8650_MASTER_PCIE_0 28
-#define SM8650_MASTER_PCIE_1 29
-#define SM8650_MASTER_PCIE_ANOC_CFG 30
-#define SM8650_MASTER_QDSS_BAM 31
-#define SM8650_MASTER_QDSS_ETR 32
-#define SM8650_MASTER_QDSS_ETR_1 33
-#define SM8650_MASTER_QSPI_0 34
-#define SM8650_MASTER_QUP_1 35
-#define SM8650_MASTER_QUP_2 36
-#define SM8650_MASTER_QUP_3 37
-#define SM8650_MASTER_QUP_CORE_0 38
-#define SM8650_MASTER_QUP_CORE_1 39
-#define SM8650_MASTER_QUP_CORE_2 40
-#define SM8650_MASTER_SDCC_2 41
-#define SM8650_MASTER_SDCC_4 42
-#define SM8650_MASTER_SNOC_SF_MEM_NOC 43
-#define SM8650_MASTER_SP 44
-#define SM8650_MASTER_SYS_TCU 45
-#define SM8650_MASTER_UBWC_P 46
-#define SM8650_MASTER_UBWC_P_TCU 47
-#define SM8650_MASTER_UFS_MEM 48
-#define SM8650_MASTER_USB3_0 49
-#define SM8650_MASTER_VIDEO 50
-#define SM8650_MASTER_VIDEO_CV_PROC 51
-#define SM8650_MASTER_VIDEO_PROC 52
-#define SM8650_MASTER_VIDEO_V_PROC 53
-#define SM8650_SLAVE_A1NOC_SNOC 54
-#define SM8650_SLAVE_A2NOC_SNOC 55
-#define SM8650_SLAVE_AHB2PHY_NORTH 56
-#define SM8650_SLAVE_AHB2PHY_SOUTH 57
-#define SM8650_SLAVE_ANOC_PCIE_GEM_NOC 58
-#define SM8650_SLAVE_AOSS 59
-#define SM8650_SLAVE_APPSS 60
-#define SM8650_SLAVE_CAMERA_CFG 61
-#define SM8650_SLAVE_CDSP_MEM_NOC 62
-#define SM8650_SLAVE_CLK_CTL 63
-#define SM8650_SLAVE_CNOC_CFG 64
-#define SM8650_SLAVE_CNOC_MNOC_CFG 65
-#define SM8650_SLAVE_CNOC_MSS 66
-#define SM8650_SLAVE_CPR_HMX 67
-#define SM8650_SLAVE_CPR_NSPCX 68
-#define SM8650_SLAVE_CRYPTO_0_CFG 69
-#define SM8650_SLAVE_CX_RDPM 70
-#define SM8650_SLAVE_DDRSS_CFG 71
-#define SM8650_SLAVE_DISPLAY_CFG 72
-#define SM8650_SLAVE_EBI1 73
-#define SM8650_SLAVE_GEM_NOC_CNOC 74
-#define SM8650_SLAVE_GFX3D_CFG 75
-#define SM8650_SLAVE_I2C 76
-#define SM8650_SLAVE_I3C_IBI0_CFG 77
-#define SM8650_SLAVE_I3C_IBI1_CFG 78
-#define SM8650_SLAVE_IMEM 79
-#define SM8650_SLAVE_IMEM_CFG 80
-#define SM8650_SLAVE_IPA_CFG 81
-#define SM8650_SLAVE_IPC_ROUTER_CFG 82
-#define SM8650_SLAVE_LLCC 83
-#define SM8650_SLAVE_LPASS_GEM_NOC 84
-#define SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC 85
-#define SM8650_SLAVE_LPICX_NOC_LPIAON_NOC 86
-#define SM8650_SLAVE_MEM_NOC_PCIE_SNOC 87
-#define SM8650_SLAVE_MNOC_HF_MEM_NOC 88
-#define SM8650_SLAVE_MNOC_SF_MEM_NOC 89
-#define SM8650_SLAVE_MX_2_RDPM 90
-#define SM8650_SLAVE_MX_RDPM 91
-#define SM8650_SLAVE_NSP_QTB_CFG 92
-#define SM8650_SLAVE_PCIE_0 93
-#define SM8650_SLAVE_PCIE_1 94
-#define SM8650_SLAVE_PCIE_0_CFG 95
-#define SM8650_SLAVE_PCIE_1_CFG 96
-#define SM8650_SLAVE_PCIE_ANOC_CFG 97
-#define SM8650_SLAVE_PCIE_RSCC 98
-#define SM8650_SLAVE_PDM 99
-#define SM8650_SLAVE_PRNG 100
-#define SM8650_SLAVE_QDSS_CFG 101
-#define SM8650_SLAVE_QDSS_STM 102
-#define SM8650_SLAVE_QSPI_0 103
-#define SM8650_SLAVE_QUP_1 104
-#define SM8650_SLAVE_QUP_2 105
-#define SM8650_SLAVE_QUP_3 106
-#define SM8650_SLAVE_QUP_CORE_0 107
-#define SM8650_SLAVE_QUP_CORE_1 108
-#define SM8650_SLAVE_QUP_CORE_2 109
-#define SM8650_SLAVE_RBCPR_CX_CFG 110
-#define SM8650_SLAVE_RBCPR_MMCX_CFG 111
-#define SM8650_SLAVE_RBCPR_MXA_CFG 112
-#define SM8650_SLAVE_RBCPR_MXC_CFG 113
-#define SM8650_SLAVE_SDCC_2 114
-#define SM8650_SLAVE_SDCC_4 115
-#define SM8650_SLAVE_SERVICE_CNOC 116
-#define SM8650_SLAVE_SERVICE_CNOC_CFG 117
-#define SM8650_SLAVE_SERVICE_MNOC 118
-#define SM8650_SLAVE_SERVICE_PCIE_ANOC 119
-#define SM8650_SLAVE_SNOC_GEM_NOC_SF 120
-#define SM8650_SLAVE_SPSS_CFG 121
-#define SM8650_SLAVE_TCSR 122
-#define SM8650_SLAVE_TCU 123
-#define SM8650_SLAVE_TLMM 124
-#define SM8650_SLAVE_TME_CFG 125
-#define SM8650_SLAVE_UFS_MEM_CFG 126
-#define SM8650_SLAVE_USB3_0 127
-#define SM8650_SLAVE_VENUS_CFG 128
-#define SM8650_SLAVE_VSENSE_CTRL_CFG 129
-#define SM8650_MASTER_APSS_NOC 130
-
-#endif
diff --git a/drivers/interconnect/qcom/sm8750.c b/drivers/interconnect/qcom/sm8750.c
index 69bc22222075..1486c0b8f4c1 100644
--- a/drivers/interconnect/qcom/sm8750.c
+++ b/drivers/interconnect/qcom/sm8750.c
@@ -14,1181 +14,1011 @@
#include "bcm-voter.h"
#include "icc-rpmh.h"
-#define SM8750_MASTER_GPU_TCU 0
-#define SM8750_MASTER_SYS_TCU 1
-#define SM8750_MASTER_APPSS_PROC 2
-#define SM8750_MASTER_LLCC 3
-#define SM8750_MASTER_QDSS_BAM 4
-#define SM8750_MASTER_QSPI_0 5
-#define SM8750_MASTER_QUP_1 6
-#define SM8750_MASTER_QUP_2 7
-#define SM8750_MASTER_A1NOC_SNOC 8
-#define SM8750_MASTER_A2NOC_SNOC 9
-#define SM8750_MASTER_CAMNOC_HF 10
-#define SM8750_MASTER_CAMNOC_NRT_ICP_SF 11
-#define SM8750_MASTER_CAMNOC_RT_CDM_SF 12
-#define SM8750_MASTER_CAMNOC_SF 13
-#define SM8750_MASTER_GEM_NOC_CNOC 14
-#define SM8750_MASTER_GEM_NOC_PCIE_SNOC 15
-#define SM8750_MASTER_GFX3D 16
-#define SM8750_MASTER_LPASS_GEM_NOC 17
-#define SM8750_MASTER_LPASS_LPINOC 18
-#define SM8750_MASTER_LPIAON_NOC 19
-#define SM8750_MASTER_LPASS_PROC 20
-#define SM8750_MASTER_MDP 21
-#define SM8750_MASTER_MSS_PROC 22
-#define SM8750_MASTER_MNOC_HF_MEM_NOC 23
-#define SM8750_MASTER_MNOC_SF_MEM_NOC 24
-#define SM8750_MASTER_CDSP_PROC 25
-#define SM8750_MASTER_COMPUTE_NOC 26
-#define SM8750_MASTER_ANOC_PCIE_GEM_NOC 27
-#define SM8750_MASTER_SNOC_SF_MEM_NOC 28
-#define SM8750_MASTER_UBWC_P 29
-#define SM8750_MASTER_CDSP_HCP 30
-#define SM8750_MASTER_VIDEO_CV_PROC 31
-#define SM8750_MASTER_VIDEO_EVA 32
-#define SM8750_MASTER_VIDEO_MVP 33
-#define SM8750_MASTER_VIDEO_V_PROC 34
-#define SM8750_MASTER_CNOC_CFG 35
-#define SM8750_MASTER_CNOC_MNOC_CFG 36
-#define SM8750_MASTER_PCIE_ANOC_CFG 37
-#define SM8750_MASTER_QUP_CORE_0 38
-#define SM8750_MASTER_QUP_CORE_1 39
-#define SM8750_MASTER_QUP_CORE_2 40
-#define SM8750_MASTER_CRYPTO 41
-#define SM8750_MASTER_IPA 42
-#define SM8750_MASTER_QUP_3 43
-#define SM8750_MASTER_SOCCP_AGGR_NOC 44
-#define SM8750_MASTER_SP 45
-#define SM8750_MASTER_GIC 46
-#define SM8750_MASTER_PCIE_0 47
-#define SM8750_MASTER_QDSS_ETR 48
-#define SM8750_MASTER_QDSS_ETR_1 49
-#define SM8750_MASTER_SDCC_2 50
-#define SM8750_MASTER_SDCC_4 51
-#define SM8750_MASTER_UFS_MEM 52
-#define SM8750_MASTER_USB3_0 53
-#define SM8750_SLAVE_UBWC_P 54
-#define SM8750_SLAVE_EBI1 55
-#define SM8750_SLAVE_AHB2PHY_SOUTH 56
-#define SM8750_SLAVE_AHB2PHY_NORTH 57
-#define SM8750_SLAVE_AOSS 58
-#define SM8750_SLAVE_CAMERA_CFG 59
-#define SM8750_SLAVE_CLK_CTL 60
-#define SM8750_SLAVE_CRYPTO_0_CFG 61
-#define SM8750_SLAVE_DISPLAY_CFG 62
-#define SM8750_SLAVE_EVA_CFG 63
-#define SM8750_SLAVE_GFX3D_CFG 64
-#define SM8750_SLAVE_I2C 65
-#define SM8750_SLAVE_I3C_IBI0_CFG 66
-#define SM8750_SLAVE_I3C_IBI1_CFG 67
-#define SM8750_SLAVE_IMEM_CFG 68
-#define SM8750_SLAVE_IPA_CFG 69
-#define SM8750_SLAVE_IPC_ROUTER_CFG 70
-#define SM8750_SLAVE_CNOC_MSS 71
-#define SM8750_SLAVE_PCIE_CFG 72
-#define SM8750_SLAVE_PRNG 73
-#define SM8750_SLAVE_QDSS_CFG 74
-#define SM8750_SLAVE_QSPI_0 75
-#define SM8750_SLAVE_QUP_3 76
-#define SM8750_SLAVE_QUP_1 77
-#define SM8750_SLAVE_QUP_2 78
-#define SM8750_SLAVE_SDCC_2 79
-#define SM8750_SLAVE_SDCC_4 80
-#define SM8750_SLAVE_SOCCP 81
-#define SM8750_SLAVE_SPSS_CFG 82
-#define SM8750_SLAVE_TCSR 83
-#define SM8750_SLAVE_TLMM 84
-#define SM8750_SLAVE_TME_CFG 85
-#define SM8750_SLAVE_UFS_MEM_CFG 86
-#define SM8750_SLAVE_USB3_0 87
-#define SM8750_SLAVE_VENUS_CFG 88
-#define SM8750_SLAVE_VSENSE_CTRL_CFG 89
-#define SM8750_SLAVE_A1NOC_SNOC 90
-#define SM8750_SLAVE_A2NOC_SNOC 91
-#define SM8750_SLAVE_APPSS 92
-#define SM8750_SLAVE_GEM_NOC_CNOC 93
-#define SM8750_SLAVE_SNOC_GEM_NOC_SF 94
-#define SM8750_SLAVE_LLCC 95
-#define SM8750_SLAVE_LPASS_GEM_NOC 96
-#define SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC 97
-#define SM8750_SLAVE_LPICX_NOC_LPIAON_NOC 98
-#define SM8750_SLAVE_MNOC_HF_MEM_NOC 99
-#define SM8750_SLAVE_MNOC_SF_MEM_NOC 100
-#define SM8750_SLAVE_CDSP_MEM_NOC 101
-#define SM8750_SLAVE_MEM_NOC_PCIE_SNOC 102
-#define SM8750_SLAVE_ANOC_PCIE_GEM_NOC 103
-#define SM8750_SLAVE_CNOC_CFG 104
-#define SM8750_SLAVE_DDRSS_CFG 105
-#define SM8750_SLAVE_CNOC_MNOC_CFG 106
-#define SM8750_SLAVE_PCIE_ANOC_CFG 107
-#define SM8750_SLAVE_QUP_CORE_0 108
-#define SM8750_SLAVE_QUP_CORE_1 109
-#define SM8750_SLAVE_QUP_CORE_2 110
-#define SM8750_SLAVE_BOOT_IMEM 111
-#define SM8750_SLAVE_IMEM 112
-#define SM8750_SLAVE_BOOT_IMEM_2 113
-#define SM8750_SLAVE_SERVICE_CNOC 114
-#define SM8750_SLAVE_SERVICE_MNOC 115
-#define SM8750_SLAVE_SERVICE_PCIE_ANOC 116
-#define SM8750_SLAVE_PCIE_0 117
-#define SM8750_SLAVE_QDSS_STM 118
-#define SM8750_SLAVE_TCU 119
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node qxm_qup02;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node qhm_qdss_bam;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_ipa;
+static struct qcom_icc_node qxm_soccp;
+static struct qcom_icc_node qxm_sp;
+static struct qcom_icc_node xm_qdss_etr_0;
+static struct qcom_icc_node xm_qdss_etr_1;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup2_core_master;
+static struct qcom_icc_node qsm_cfg;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_lpass_gemnoc;
+static struct qcom_icc_node qnm_mdsp;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_nsp_gemnoc;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node qnm_ubwc_p;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qnm_lpiaon_noc;
+static struct qcom_icc_node qnm_lpass_lpinoc;
+static struct qcom_icc_node qnm_lpinoc_dsp_qns4m;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_nrt_icp_sf;
+static struct qcom_icc_node qnm_camnoc_rt_cdm_sf;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_mdp;
+static struct qcom_icc_node qnm_vapss_hcp;
+static struct qcom_icc_node qnm_video_cv_cpu;
+static struct qcom_icc_node qnm_video_eva;
+static struct qcom_icc_node qnm_video_mvp;
+static struct qcom_icc_node qnm_video_v_cpu;
+static struct qcom_icc_node qsm_mnoc_cfg;
+static struct qcom_icc_node qnm_nsp;
+static struct qcom_icc_node qsm_pcie_anoc_cfg;
+static struct qcom_icc_node xm_pcie3;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup2_core_slave;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_eva_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_i2c;
+static struct qcom_icc_node qhs_i3c_ibi0_cfg;
+static struct qcom_icc_node qhs_i3c_ibi1_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_mss_cfg;
+static struct qcom_icc_node qhs_pcie_cfg;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup02;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_spss_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb3_0;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qhs_vsense_ctrl_cfg;
+static struct qcom_icc_node qss_mnoc_cfg;
+static struct qcom_icc_node qss_pcie_anoc_cfg;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_ipa;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_soccp;
+static struct qcom_icc_node qhs_tme_cfg;
+static struct qcom_icc_node qns_apss;
+static struct qcom_icc_node qss_cfg;
+static struct qcom_icc_node qss_ddrss_cfg;
+static struct qcom_icc_node qxs_boot_imem;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node qxs_modem_boot_imem;
+static struct qcom_icc_node srvc_cnoc_main;
+static struct qcom_icc_node xs_pcie;
+static struct qcom_icc_node chs_ubwc_p;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc;
+static struct qcom_icc_node qns_lpass_aggnoc;
+static struct qcom_icc_node qns_lpi_aon_noc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node srvc_pcie_aggre_noc;
+static struct qcom_icc_node qns_gemnoc_sf;
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = SM8750_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = SM8750_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qxm_qup02 = {
.name = "qxm_qup02",
- .id = SM8750_MASTER_QUP_3,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = SM8750_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = SM8750_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = SM8750_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qdss_bam = {
.name = "qhm_qdss_bam",
- .id = SM8750_MASTER_QDSS_BAM,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = SM8750_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = SM8750_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_ipa = {
.name = "qxm_ipa",
- .id = SM8750_MASTER_IPA,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_soccp = {
.name = "qxm_soccp",
- .id = SM8750_MASTER_SOCCP_AGGR_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
- .id = SM8750_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
- .id = SM8750_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
- .id = SM8750_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = SM8750_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = SM8750_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = SM8750_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
- .id = SM8750_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_QUP_CORE_2 },
+ .link_nodes = { &qup2_core_slave },
};
static struct qcom_icc_node qsm_cfg = {
.name = "qsm_cfg",
- .id = SM8750_MASTER_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 33,
- .links = { SM8750_SLAVE_AHB2PHY_SOUTH, SM8750_SLAVE_AHB2PHY_NORTH,
- SM8750_SLAVE_CAMERA_CFG, SM8750_SLAVE_CLK_CTL,
- SM8750_SLAVE_CRYPTO_0_CFG, SM8750_SLAVE_DISPLAY_CFG,
- SM8750_SLAVE_EVA_CFG, SM8750_SLAVE_GFX3D_CFG,
- SM8750_SLAVE_I2C, SM8750_SLAVE_I3C_IBI0_CFG,
- SM8750_SLAVE_I3C_IBI1_CFG, SM8750_SLAVE_IMEM_CFG,
- SM8750_SLAVE_CNOC_MSS, SM8750_SLAVE_PCIE_CFG,
- SM8750_SLAVE_PRNG, SM8750_SLAVE_QDSS_CFG,
- SM8750_SLAVE_QSPI_0, SM8750_SLAVE_QUP_3,
- SM8750_SLAVE_QUP_1, SM8750_SLAVE_QUP_2,
- SM8750_SLAVE_SDCC_2, SM8750_SLAVE_SDCC_4,
- SM8750_SLAVE_SPSS_CFG, SM8750_SLAVE_TCSR,
- SM8750_SLAVE_TLMM, SM8750_SLAVE_UFS_MEM_CFG,
- SM8750_SLAVE_USB3_0, SM8750_SLAVE_VENUS_CFG,
- SM8750_SLAVE_VSENSE_CTRL_CFG, SM8750_SLAVE_CNOC_MNOC_CFG,
- SM8750_SLAVE_PCIE_ANOC_CFG, SM8750_SLAVE_QDSS_STM,
- SM8750_SLAVE_TCU },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_crypto0_cfg, &qhs_display_cfg,
+ &qhs_eva_cfg, &qhs_gpuss_cfg,
+ &qhs_i2c, &qhs_i3c_ibi0_cfg,
+ &qhs_i3c_ibi1_cfg, &qhs_imem_cfg,
+ &qhs_mss_cfg, &qhs_pcie_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup02,
+ &qhs_qup1, &qhs_qup2,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_spss_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_ufs_mem_cfg,
+ &qhs_usb3_0, &qhs_venus_cfg,
+ &qhs_vsense_ctrl_cfg, &qss_mnoc_cfg,
+ &qss_pcie_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = SM8750_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 12,
- .links = { SM8750_SLAVE_AOSS, SM8750_SLAVE_IPA_CFG,
- SM8750_SLAVE_IPC_ROUTER_CFG, SM8750_SLAVE_SOCCP,
- SM8750_SLAVE_TME_CFG, SM8750_SLAVE_APPSS,
- SM8750_SLAVE_CNOC_CFG, SM8750_SLAVE_DDRSS_CFG,
- SM8750_SLAVE_BOOT_IMEM, SM8750_SLAVE_IMEM,
- SM8750_SLAVE_BOOT_IMEM_2, SM8750_SLAVE_SERVICE_CNOC },
+ .link_nodes = { &qhs_aoss, &qhs_ipa,
+ &qhs_ipc_router, &qhs_soccp,
+ &qhs_tme_cfg, &qns_apss,
+ &qss_cfg, &qss_ddrss_cfg,
+ &qxs_boot_imem, &qxs_imem,
+ &qxs_modem_boot_imem, &srvc_cnoc_main },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = SM8750_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_PCIE_0 },
+ .link_nodes = { &xs_pcie },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = SM8750_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = SM8750_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = SM8750_MASTER_APPSS_PROC,
.channels = 4,
.buswidth = 32,
.num_links = 4,
- .links = { SM8750_SLAVE_UBWC_P, SM8750_SLAVE_GEM_NOC_CNOC,
- SM8750_SLAVE_LLCC, SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &chs_ubwc_p, &qns_gem_noc_cnoc,
+ &qns_llcc, &qns_pcie },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = SM8750_MASTER_GFX3D,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_lpass_gemnoc = {
.name = "qnm_lpass_gemnoc",
- .id = SM8750_MASTER_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
- SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_mdsp = {
.name = "qnm_mdsp",
- .id = SM8750_MASTER_MSS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
- SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = SM8750_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = SM8750_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_nsp_gemnoc = {
.name = "qnm_nsp_gemnoc",
- .id = SM8750_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
- SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = SM8750_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = SM8750_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { SM8750_SLAVE_GEM_NOC_CNOC, SM8750_SLAVE_LLCC,
- SM8750_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_ubwc_p = {
.name = "qnm_ubwc_p",
- .id = SM8750_MASTER_UBWC_P,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = SM8750_MASTER_GIC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_lpiaon_noc = {
.name = "qnm_lpiaon_noc",
- .id = SM8750_MASTER_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_SLAVE_LPASS_GEM_NOC },
+ .link_nodes = { &qns_lpass_ag_noc_gemnoc },
};
static struct qcom_icc_node qnm_lpass_lpinoc = {
.name = "qnm_lpass_lpinoc",
- .id = SM8750_MASTER_LPASS_LPINOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+ .link_nodes = { &qns_lpass_aggnoc },
};
static struct qcom_icc_node qnm_lpinoc_dsp_qns4m = {
.name = "qnm_lpinoc_dsp_qns4m",
- .id = SM8750_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_SLAVE_LPICX_NOC_LPIAON_NOC },
+ .link_nodes = { &qns_lpi_aon_noc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = SM8750_MASTER_LLCC,
.channels = 4,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = SM8750_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_nrt_icp_sf = {
.name = "qnm_camnoc_nrt_icp_sf",
- .id = SM8750_MASTER_CAMNOC_NRT_ICP_SF,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_rt_cdm_sf = {
.name = "qnm_camnoc_rt_cdm_sf",
- .id = SM8750_MASTER_CAMNOC_RT_CDM_SF,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = SM8750_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
- .id = SM8750_MASTER_MDP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_vapss_hcp = {
.name = "qnm_vapss_hcp",
- .id = SM8750_MASTER_CDSP_HCP,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
- .id = SM8750_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_eva = {
.name = "qnm_video_eva",
- .id = SM8750_MASTER_VIDEO_EVA,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_mvp = {
.name = "qnm_video_mvp",
- .id = SM8750_MASTER_VIDEO_MVP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
- .id = SM8750_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qsm_mnoc_cfg = {
.name = "qsm_mnoc_cfg",
- .id = SM8750_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qnm_nsp = {
.name = "qnm_nsp",
- .id = SM8750_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_node qsm_pcie_anoc_cfg = {
.name = "qsm_pcie_anoc_cfg",
- .id = SM8750_MASTER_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_SLAVE_SERVICE_PCIE_ANOC },
+ .link_nodes = { &srvc_pcie_aggre_noc },
};
static struct qcom_icc_node xm_pcie3 = {
.name = "xm_pcie3",
- .id = SM8750_MASTER_PCIE_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = SM8750_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = SM8750_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = SM8750_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = SM8750_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = SM8750_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = SM8750_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
- .id = SM8750_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = SM8750_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = SM8750_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = SM8750_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = SM8750_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = SM8750_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = SM8750_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_eva_cfg = {
.name = "qhs_eva_cfg",
- .id = SM8750_SLAVE_EVA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = SM8750_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_i2c = {
.name = "qhs_i2c",
- .id = SM8750_SLAVE_I2C,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_i3c_ibi0_cfg = {
.name = "qhs_i3c_ibi0_cfg",
- .id = SM8750_SLAVE_I3C_IBI0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_i3c_ibi1_cfg = {
.name = "qhs_i3c_ibi1_cfg",
- .id = SM8750_SLAVE_I3C_IBI1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = SM8750_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_mss_cfg = {
.name = "qhs_mss_cfg",
- .id = SM8750_SLAVE_CNOC_MSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie_cfg = {
.name = "qhs_pcie_cfg",
- .id = SM8750_SLAVE_PCIE_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = SM8750_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = SM8750_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = SM8750_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup02 = {
.name = "qhs_qup02",
- .id = SM8750_SLAVE_QUP_3,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = SM8750_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = SM8750_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = SM8750_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = SM8750_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_spss_cfg = {
.name = "qhs_spss_cfg",
- .id = SM8750_SLAVE_SPSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = SM8750_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = SM8750_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = SM8750_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0 = {
.name = "qhs_usb3_0",
- .id = SM8750_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = SM8750_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
.name = "qhs_vsense_ctrl_cfg",
- .id = SM8750_SLAVE_VSENSE_CTRL_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_mnoc_cfg = {
.name = "qss_mnoc_cfg",
- .id = SM8750_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qsm_mnoc_cfg },
};
static struct qcom_icc_node qss_pcie_anoc_cfg = {
.name = "qss_pcie_anoc_cfg",
- .id = SM8750_SLAVE_PCIE_ANOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_MASTER_PCIE_ANOC_CFG },
+ .link_nodes = { &qsm_pcie_anoc_cfg },
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = SM8750_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = SM8750_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = SM8750_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipa = {
.name = "qhs_ipa",
- .id = SM8750_SLAVE_IPA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = SM8750_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_soccp = {
.name = "qhs_soccp",
- .id = SM8750_SLAVE_SOCCP,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
- .id = SM8750_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_apss = {
.name = "qns_apss",
- .id = SM8750_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qss_cfg = {
.name = "qss_cfg",
- .id = SM8750_SLAVE_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { SM8750_MASTER_CNOC_CFG },
+ .link_nodes = { &qsm_cfg },
};
static struct qcom_icc_node qss_ddrss_cfg = {
.name = "qss_ddrss_cfg",
- .id = SM8750_SLAVE_DDRSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
- .id = SM8750_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = SM8750_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qxs_modem_boot_imem = {
.name = "qxs_modem_boot_imem",
- .id = SM8750_SLAVE_BOOT_IMEM_2,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node srvc_cnoc_main = {
.name = "srvc_cnoc_main",
- .id = SM8750_SLAVE_SERVICE_CNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie = {
.name = "xs_pcie",
- .id = SM8750_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node chs_ubwc_p = {
.name = "chs_ubwc_p",
- .id = SM8750_SLAVE_UBWC_P,
.channels = 1,
.buswidth = 32,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = SM8750_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = SM8750_SLAVE_LLCC,
.channels = 4,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = SM8750_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
.name = "qns_lpass_ag_noc_gemnoc",
- .id = SM8750_SLAVE_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_MASTER_LPASS_GEM_NOC },
+ .link_nodes = { &qnm_lpass_gemnoc },
};
static struct qcom_icc_node qns_lpass_aggnoc = {
.name = "qns_lpass_aggnoc",
- .id = SM8750_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_MASTER_LPIAON_NOC },
+ .link_nodes = { &qnm_lpiaon_noc },
};
static struct qcom_icc_node qns_lpi_aon_noc = {
.name = "qns_lpi_aon_noc",
- .id = SM8750_SLAVE_LPICX_NOC_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_MASTER_LPASS_LPINOC },
+ .link_nodes = { &qnm_lpass_lpinoc },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = SM8750_SLAVE_EBI1,
.channels = 4,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = SM8750_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = SM8750_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = SM8750_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = SM8750_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { SM8750_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_nsp_gemnoc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = SM8750_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { SM8750_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node srvc_pcie_aggre_noc = {
.name = "srvc_pcie_aggre_noc",
- .id = SM8750_SLAVE_SERVICE_PCIE_ANOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = SM8750_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { SM8750_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index 2c46fdb4a054..2ba2823c7860 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -15,1342 +15,1278 @@
#include "bcm-voter.h"
#include "icc-common.h"
#include "icc-rpmh.h"
-#include "x1e80100.h"
+
+static struct qcom_icc_node qhm_qspi;
+static struct qcom_icc_node qhm_qup1;
+static struct qcom_icc_node xm_sdc4;
+static struct qcom_icc_node xm_ufs_mem;
+static struct qcom_icc_node qhm_qup0;
+static struct qcom_icc_node qhm_qup2;
+static struct qcom_icc_node qxm_crypto;
+static struct qcom_icc_node qxm_sp;
+static struct qcom_icc_node xm_qdss_etr_0;
+static struct qcom_icc_node xm_qdss_etr_1;
+static struct qcom_icc_node xm_sdc2;
+static struct qcom_icc_node qup0_core_master;
+static struct qcom_icc_node qup1_core_master;
+static struct qcom_icc_node qup2_core_master;
+static struct qcom_icc_node qsm_cfg;
+static struct qcom_icc_node qnm_gemnoc_cnoc;
+static struct qcom_icc_node qnm_gemnoc_pcie;
+static struct qcom_icc_node alm_gpu_tcu;
+static struct qcom_icc_node alm_pcie_tcu;
+static struct qcom_icc_node alm_sys_tcu;
+static struct qcom_icc_node chm_apps;
+static struct qcom_icc_node qnm_gpu;
+static struct qcom_icc_node qnm_lpass;
+static struct qcom_icc_node qnm_mnoc_hf;
+static struct qcom_icc_node qnm_mnoc_sf;
+static struct qcom_icc_node qnm_nsp_noc;
+static struct qcom_icc_node qnm_pcie;
+static struct qcom_icc_node qnm_snoc_sf;
+static struct qcom_icc_node xm_gic;
+static struct qcom_icc_node qnm_lpiaon_noc;
+static struct qcom_icc_node qnm_lpass_lpinoc;
+static struct qcom_icc_node qxm_lpinoc_dsp_axim;
+static struct qcom_icc_node llcc_mc;
+static struct qcom_icc_node qnm_av1_enc;
+static struct qcom_icc_node qnm_camnoc_hf;
+static struct qcom_icc_node qnm_camnoc_icp;
+static struct qcom_icc_node qnm_camnoc_sf;
+static struct qcom_icc_node qnm_eva;
+static struct qcom_icc_node qnm_mdp;
+static struct qcom_icc_node qnm_video;
+static struct qcom_icc_node qnm_video_cv_cpu;
+static struct qcom_icc_node qnm_video_v_cpu;
+static struct qcom_icc_node qsm_mnoc_cfg;
+static struct qcom_icc_node qxm_nsp;
+static struct qcom_icc_node qnm_pcie_north_gem_noc;
+static struct qcom_icc_node qnm_pcie_south_gem_noc;
+static struct qcom_icc_node xm_pcie_3;
+static struct qcom_icc_node xm_pcie_4;
+static struct qcom_icc_node xm_pcie_5;
+static struct qcom_icc_node xm_pcie_0;
+static struct qcom_icc_node xm_pcie_1;
+static struct qcom_icc_node xm_pcie_2;
+static struct qcom_icc_node xm_pcie_6a;
+static struct qcom_icc_node xm_pcie_6b;
+static struct qcom_icc_node qnm_aggre1_noc;
+static struct qcom_icc_node qnm_aggre2_noc;
+static struct qcom_icc_node qnm_gic;
+static struct qcom_icc_node qnm_usb_anoc;
+static struct qcom_icc_node qnm_aggre_usb_north_snoc;
+static struct qcom_icc_node qnm_aggre_usb_south_snoc;
+static struct qcom_icc_node xm_usb2_0;
+static struct qcom_icc_node xm_usb3_mp;
+static struct qcom_icc_node xm_usb3_0;
+static struct qcom_icc_node xm_usb3_1;
+static struct qcom_icc_node xm_usb3_2;
+static struct qcom_icc_node xm_usb4_0;
+static struct qcom_icc_node xm_usb4_1;
+static struct qcom_icc_node xm_usb4_2;
+static struct qcom_icc_node qns_a1noc_snoc;
+static struct qcom_icc_node qns_a2noc_snoc;
+static struct qcom_icc_node qup0_core_slave;
+static struct qcom_icc_node qup1_core_slave;
+static struct qcom_icc_node qup2_core_slave;
+static struct qcom_icc_node qhs_ahb2phy0;
+static struct qcom_icc_node qhs_ahb2phy1;
+static struct qcom_icc_node qhs_ahb2phy2;
+static struct qcom_icc_node qhs_av1_enc_cfg;
+static struct qcom_icc_node qhs_camera_cfg;
+static struct qcom_icc_node qhs_clk_ctl;
+static struct qcom_icc_node qhs_crypto0_cfg;
+static struct qcom_icc_node qhs_display_cfg;
+static struct qcom_icc_node qhs_gpuss_cfg;
+static struct qcom_icc_node qhs_imem_cfg;
+static struct qcom_icc_node qhs_ipc_router;
+static struct qcom_icc_node qhs_pcie0_cfg;
+static struct qcom_icc_node qhs_pcie1_cfg;
+static struct qcom_icc_node qhs_pcie2_cfg;
+static struct qcom_icc_node qhs_pcie3_cfg;
+static struct qcom_icc_node qhs_pcie4_cfg;
+static struct qcom_icc_node qhs_pcie5_cfg;
+static struct qcom_icc_node qhs_pcie6a_cfg;
+static struct qcom_icc_node qhs_pcie6b_cfg;
+static struct qcom_icc_node qhs_pcie_rsc_cfg;
+static struct qcom_icc_node qhs_pdm;
+static struct qcom_icc_node qhs_prng;
+static struct qcom_icc_node qhs_qdss_cfg;
+static struct qcom_icc_node qhs_qspi;
+static struct qcom_icc_node qhs_qup0;
+static struct qcom_icc_node qhs_qup1;
+static struct qcom_icc_node qhs_qup2;
+static struct qcom_icc_node qhs_sdc2;
+static struct qcom_icc_node qhs_sdc4;
+static struct qcom_icc_node qhs_smmuv3_cfg;
+static struct qcom_icc_node qhs_tcsr;
+static struct qcom_icc_node qhs_tlmm;
+static struct qcom_icc_node qhs_ufs_mem_cfg;
+static struct qcom_icc_node qhs_usb2_0_cfg;
+static struct qcom_icc_node qhs_usb3_0_cfg;
+static struct qcom_icc_node qhs_usb3_1_cfg;
+static struct qcom_icc_node qhs_usb3_2_cfg;
+static struct qcom_icc_node qhs_usb3_mp_cfg;
+static struct qcom_icc_node qhs_usb4_0_cfg;
+static struct qcom_icc_node qhs_usb4_1_cfg;
+static struct qcom_icc_node qhs_usb4_2_cfg;
+static struct qcom_icc_node qhs_venus_cfg;
+static struct qcom_icc_node qss_lpass_qtb_cfg;
+static struct qcom_icc_node qss_mnoc_cfg;
+static struct qcom_icc_node qss_nsp_qtb_cfg;
+static struct qcom_icc_node xs_qdss_stm;
+static struct qcom_icc_node xs_sys_tcu_cfg;
+static struct qcom_icc_node qhs_aoss;
+static struct qcom_icc_node qhs_tme_cfg;
+static struct qcom_icc_node qns_apss;
+static struct qcom_icc_node qss_cfg;
+static struct qcom_icc_node qxs_boot_imem;
+static struct qcom_icc_node qxs_imem;
+static struct qcom_icc_node xs_pcie_0;
+static struct qcom_icc_node xs_pcie_1;
+static struct qcom_icc_node xs_pcie_2;
+static struct qcom_icc_node xs_pcie_3;
+static struct qcom_icc_node xs_pcie_4;
+static struct qcom_icc_node xs_pcie_5;
+static struct qcom_icc_node xs_pcie_6a;
+static struct qcom_icc_node xs_pcie_6b;
+static struct qcom_icc_node qns_gem_noc_cnoc;
+static struct qcom_icc_node qns_llcc;
+static struct qcom_icc_node qns_pcie;
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc;
+static struct qcom_icc_node qns_lpass_aggnoc;
+static struct qcom_icc_node qns_lpi_aon_noc;
+static struct qcom_icc_node ebi;
+static struct qcom_icc_node qns_mem_noc_hf;
+static struct qcom_icc_node qns_mem_noc_sf;
+static struct qcom_icc_node srvc_mnoc;
+static struct qcom_icc_node qns_nsp_gemnoc;
+static struct qcom_icc_node qns_pcie_mem_noc;
+static struct qcom_icc_node qns_pcie_north_gem_noc;
+static struct qcom_icc_node qns_pcie_south_gem_noc;
+static struct qcom_icc_node qns_gemnoc_sf;
+static struct qcom_icc_node qns_aggre_usb_snoc;
+static struct qcom_icc_node qns_aggre_usb_north_snoc;
+static struct qcom_icc_node qns_aggre_usb_south_snoc;
static struct qcom_icc_node qhm_qspi = {
.name = "qhm_qspi",
- .id = X1E80100_MASTER_QSPI_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup1 = {
.name = "qhm_qup1",
- .id = X1E80100_MASTER_QUP_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_sdc4 = {
.name = "xm_sdc4",
- .id = X1E80100_MASTER_SDCC_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node xm_ufs_mem = {
.name = "xm_ufs_mem",
- .id = X1E80100_MASTER_UFS_MEM,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_A1NOC_SNOC },
+ .link_nodes = { &qns_a1noc_snoc },
};
static struct qcom_icc_node qhm_qup0 = {
.name = "qhm_qup0",
- .id = X1E80100_MASTER_QUP_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qhm_qup2 = {
.name = "qhm_qup2",
- .id = X1E80100_MASTER_QUP_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_crypto = {
.name = "qxm_crypto",
- .id = X1E80100_MASTER_CRYPTO,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qxm_sp = {
.name = "qxm_sp",
- .id = X1E80100_MASTER_SP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_0 = {
.name = "xm_qdss_etr_0",
- .id = X1E80100_MASTER_QDSS_ETR,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_qdss_etr_1 = {
.name = "xm_qdss_etr_1",
- .id = X1E80100_MASTER_QDSS_ETR_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node xm_sdc2 = {
.name = "xm_sdc2",
- .id = X1E80100_MASTER_SDCC_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_A2NOC_SNOC },
+ .link_nodes = { &qns_a2noc_snoc },
};
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
- .id = X1E80100_MASTER_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_QUP_CORE_0 },
+ .link_nodes = { &qup0_core_slave },
};
static struct qcom_icc_node qup1_core_master = {
.name = "qup1_core_master",
- .id = X1E80100_MASTER_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_QUP_CORE_1 },
+ .link_nodes = { &qup1_core_slave },
};
static struct qcom_icc_node qup2_core_master = {
.name = "qup2_core_master",
- .id = X1E80100_MASTER_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_QUP_CORE_2 },
+ .link_nodes = { &qup2_core_slave },
};
static struct qcom_icc_node qsm_cfg = {
.name = "qsm_cfg",
- .id = X1E80100_MASTER_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 47,
- .links = { X1E80100_SLAVE_AHB2PHY_SOUTH, X1E80100_SLAVE_AHB2PHY_NORTH,
- X1E80100_SLAVE_AHB2PHY_2, X1E80100_SLAVE_AV1_ENC_CFG,
- X1E80100_SLAVE_CAMERA_CFG, X1E80100_SLAVE_CLK_CTL,
- X1E80100_SLAVE_CRYPTO_0_CFG, X1E80100_SLAVE_DISPLAY_CFG,
- X1E80100_SLAVE_GFX3D_CFG, X1E80100_SLAVE_IMEM_CFG,
- X1E80100_SLAVE_IPC_ROUTER_CFG, X1E80100_SLAVE_PCIE_0_CFG,
- X1E80100_SLAVE_PCIE_1_CFG, X1E80100_SLAVE_PCIE_2_CFG,
- X1E80100_SLAVE_PCIE_3_CFG, X1E80100_SLAVE_PCIE_4_CFG,
- X1E80100_SLAVE_PCIE_5_CFG, X1E80100_SLAVE_PCIE_6A_CFG,
- X1E80100_SLAVE_PCIE_6B_CFG, X1E80100_SLAVE_PCIE_RSC_CFG,
- X1E80100_SLAVE_PDM, X1E80100_SLAVE_PRNG,
- X1E80100_SLAVE_QDSS_CFG, X1E80100_SLAVE_QSPI_0,
- X1E80100_SLAVE_QUP_0, X1E80100_SLAVE_QUP_1,
- X1E80100_SLAVE_QUP_2, X1E80100_SLAVE_SDCC_2,
- X1E80100_SLAVE_SDCC_4, X1E80100_SLAVE_SMMUV3_CFG,
- X1E80100_SLAVE_TCSR, X1E80100_SLAVE_TLMM,
- X1E80100_SLAVE_UFS_MEM_CFG, X1E80100_SLAVE_USB2,
- X1E80100_SLAVE_USB3_0, X1E80100_SLAVE_USB3_1,
- X1E80100_SLAVE_USB3_2, X1E80100_SLAVE_USB3_MP,
- X1E80100_SLAVE_USB4_0, X1E80100_SLAVE_USB4_1,
- X1E80100_SLAVE_USB4_2, X1E80100_SLAVE_VENUS_CFG,
- X1E80100_SLAVE_LPASS_QTB_CFG, X1E80100_SLAVE_CNOC_MNOC_CFG,
- X1E80100_SLAVE_NSP_QTB_CFG, X1E80100_SLAVE_QDSS_STM,
- X1E80100_SLAVE_TCU },
+ .link_nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_ahb2phy2, &qhs_av1_enc_cfg,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_crypto0_cfg, &qhs_display_cfg,
+ &qhs_gpuss_cfg, &qhs_imem_cfg,
+ &qhs_ipc_router, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+ &qhs_pcie3_cfg, &qhs_pcie4_cfg,
+ &qhs_pcie5_cfg, &qhs_pcie6a_cfg,
+ &qhs_pcie6b_cfg, &qhs_pcie_rsc_cfg,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_smmuv3_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+ &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+ &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+ &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+ &qhs_usb4_2_cfg, &qhs_venus_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
};
static struct qcom_icc_node qnm_gemnoc_cnoc = {
.name = "qnm_gemnoc_cnoc",
- .id = X1E80100_MASTER_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 6,
- .links = { X1E80100_SLAVE_AOSS, X1E80100_SLAVE_TME_CFG,
- X1E80100_SLAVE_APPSS, X1E80100_SLAVE_CNOC_CFG,
- X1E80100_SLAVE_BOOT_IMEM, X1E80100_SLAVE_IMEM },
+ .link_nodes = { &qhs_aoss, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qxs_boot_imem, &qxs_imem },
};
static struct qcom_icc_node qnm_gemnoc_pcie = {
.name = "qnm_gemnoc_pcie",
- .id = X1E80100_MASTER_GEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 8,
- .links = { X1E80100_SLAVE_PCIE_0, X1E80100_SLAVE_PCIE_1,
- X1E80100_SLAVE_PCIE_2, X1E80100_SLAVE_PCIE_3,
- X1E80100_SLAVE_PCIE_4, X1E80100_SLAVE_PCIE_5,
- X1E80100_SLAVE_PCIE_6A, X1E80100_SLAVE_PCIE_6B },
+ .link_nodes = { &xs_pcie_0, &xs_pcie_1,
+ &xs_pcie_2, &xs_pcie_3,
+ &xs_pcie_4, &xs_pcie_5,
+ &xs_pcie_6a, &xs_pcie_6b },
};
static struct qcom_icc_node alm_gpu_tcu = {
.name = "alm_gpu_tcu",
- .id = X1E80100_MASTER_GPU_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_pcie_tcu = {
.name = "alm_pcie_tcu",
- .id = X1E80100_MASTER_PCIE_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node alm_sys_tcu = {
.name = "alm_sys_tcu",
- .id = X1E80100_MASTER_SYS_TCU,
.channels = 1,
.buswidth = 8,
.num_links = 2,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node chm_apps = {
.name = "chm_apps",
- .id = X1E80100_MASTER_APPSS_PROC,
.channels = 6,
.buswidth = 32,
.num_links = 3,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
- X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_gpu = {
.name = "qnm_gpu",
- .id = X1E80100_MASTER_GFX3D,
.channels = 4,
.buswidth = 32,
.num_links = 2,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_lpass = {
.name = "qnm_lpass",
- .id = X1E80100_MASTER_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 3,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
- X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_mnoc_hf = {
.name = "qnm_mnoc_hf",
- .id = X1E80100_MASTER_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_mnoc_sf = {
.name = "qnm_mnoc_sf",
- .id = X1E80100_MASTER_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 2,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_nsp_noc = {
.name = "qnm_nsp_noc",
- .id = X1E80100_MASTER_COMPUTE_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 3,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
- X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node qnm_pcie = {
.name = "qnm_pcie",
- .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 64,
.num_links = 2,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc },
};
static struct qcom_icc_node qnm_snoc_sf = {
.name = "qnm_snoc_sf",
- .id = X1E80100_MASTER_SNOC_SF_MEM_NOC,
.channels = 1,
.buswidth = 64,
.num_links = 3,
- .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC,
- X1E80100_SLAVE_MEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qns_gem_noc_cnoc, &qns_llcc,
+ &qns_pcie },
};
static struct qcom_icc_node xm_gic = {
.name = "xm_gic",
- .id = X1E80100_MASTER_GIC2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_LLCC },
+ .link_nodes = { &qns_llcc },
};
static struct qcom_icc_node qnm_lpiaon_noc = {
.name = "qnm_lpiaon_noc",
- .id = X1E80100_MASTER_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_LPASS_GEM_NOC },
+ .link_nodes = { &qns_lpass_ag_noc_gemnoc },
};
static struct qcom_icc_node qnm_lpass_lpinoc = {
.name = "qnm_lpass_lpinoc",
- .id = X1E80100_MASTER_LPASS_LPINOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+ .link_nodes = { &qns_lpass_aggnoc },
};
static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
.name = "qxm_lpinoc_dsp_axim",
- .id = X1E80100_MASTER_LPASS_PROC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC },
+ .link_nodes = { &qns_lpi_aon_noc },
};
static struct qcom_icc_node llcc_mc = {
.name = "llcc_mc",
- .id = X1E80100_MASTER_LLCC,
.channels = 8,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_EBI1 },
+ .link_nodes = { &ebi },
};
static struct qcom_icc_node qnm_av1_enc = {
.name = "qnm_av1_enc",
- .id = X1E80100_MASTER_AV1_ENC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_hf = {
.name = "qnm_camnoc_hf",
- .id = X1E80100_MASTER_CAMNOC_HF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_camnoc_icp = {
.name = "qnm_camnoc_icp",
- .id = X1E80100_MASTER_CAMNOC_ICP,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_camnoc_sf = {
.name = "qnm_camnoc_sf",
- .id = X1E80100_MASTER_CAMNOC_SF,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_eva = {
.name = "qnm_eva",
- .id = X1E80100_MASTER_EVA,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_mdp = {
.name = "qnm_mdp",
- .id = X1E80100_MASTER_MDP,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_hf },
};
static struct qcom_icc_node qnm_video = {
.name = "qnm_video",
- .id = X1E80100_MASTER_VIDEO,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_cv_cpu = {
.name = "qnm_video_cv_cpu",
- .id = X1E80100_MASTER_VIDEO_CV_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qnm_video_v_cpu = {
.name = "qnm_video_v_cpu",
- .id = X1E80100_MASTER_VIDEO_V_PROC,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qns_mem_noc_sf },
};
static struct qcom_icc_node qsm_mnoc_cfg = {
.name = "qsm_mnoc_cfg",
- .id = X1E80100_MASTER_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_SLAVE_SERVICE_MNOC },
+ .link_nodes = { &srvc_mnoc },
};
static struct qcom_icc_node qxm_nsp = {
.name = "qxm_nsp",
- .id = X1E80100_MASTER_CDSP_PROC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_SLAVE_CDSP_MEM_NOC },
+ .link_nodes = { &qns_nsp_gemnoc },
};
static struct qcom_icc_node qnm_pcie_north_gem_noc = {
.name = "qnm_pcie_north_gem_noc",
- .id = X1E80100_MASTER_PCIE_NORTH,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node qnm_pcie_south_gem_noc = {
.name = "qnm_pcie_south_gem_noc",
- .id = X1E80100_MASTER_PCIE_SOUTH,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qns_pcie_mem_noc },
};
static struct qcom_icc_node xm_pcie_3 = {
.name = "xm_pcie_3",
- .id = X1E80100_MASTER_PCIE_3,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH },
+ .link_nodes = { &qns_pcie_north_gem_noc },
};
static struct qcom_icc_node xm_pcie_4 = {
.name = "xm_pcie_4",
- .id = X1E80100_MASTER_PCIE_4,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH },
+ .link_nodes = { &qns_pcie_north_gem_noc },
};
static struct qcom_icc_node xm_pcie_5 = {
.name = "xm_pcie_5",
- .id = X1E80100_MASTER_PCIE_5,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH },
+ .link_nodes = { &qns_pcie_north_gem_noc },
};
static struct qcom_icc_node xm_pcie_0 = {
.name = "xm_pcie_0",
- .id = X1E80100_MASTER_PCIE_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH },
+ .link_nodes = { &qns_pcie_south_gem_noc },
};
static struct qcom_icc_node xm_pcie_1 = {
.name = "xm_pcie_1",
- .id = X1E80100_MASTER_PCIE_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH },
+ .link_nodes = { &qns_pcie_south_gem_noc },
};
static struct qcom_icc_node xm_pcie_2 = {
.name = "xm_pcie_2",
- .id = X1E80100_MASTER_PCIE_2,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH },
+ .link_nodes = { &qns_pcie_south_gem_noc },
};
static struct qcom_icc_node xm_pcie_6a = {
.name = "xm_pcie_6a",
- .id = X1E80100_MASTER_PCIE_6A,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH },
+ .link_nodes = { &qns_pcie_south_gem_noc },
};
static struct qcom_icc_node xm_pcie_6b = {
.name = "xm_pcie_6b",
- .id = X1E80100_MASTER_PCIE_6B,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH },
+ .link_nodes = { &qns_pcie_south_gem_noc },
};
static struct qcom_icc_node qnm_aggre1_noc = {
.name = "qnm_aggre1_noc",
- .id = X1E80100_MASTER_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre2_noc = {
.name = "qnm_aggre2_noc",
- .id = X1E80100_MASTER_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_gic = {
.name = "qnm_gic",
- .id = X1E80100_MASTER_GIC1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_usb_anoc = {
.name = "qnm_usb_anoc",
- .id = X1E80100_MASTER_USB_NOC_SNOC,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF },
+ .link_nodes = { &qns_gemnoc_sf },
};
static struct qcom_icc_node qnm_aggre_usb_north_snoc = {
.name = "qnm_aggre_usb_north_snoc",
- .id = X1E80100_MASTER_AGGRE_USB_NORTH,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_SLAVE_USB_NOC_SNOC },
+ .link_nodes = { &qns_aggre_usb_snoc },
};
static struct qcom_icc_node qnm_aggre_usb_south_snoc = {
.name = "qnm_aggre_usb_south_snoc",
- .id = X1E80100_MASTER_AGGRE_USB_SOUTH,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_SLAVE_USB_NOC_SNOC },
+ .link_nodes = { &qns_aggre_usb_snoc },
};
static struct qcom_icc_node xm_usb2_0 = {
.name = "xm_usb2_0",
- .id = X1E80100_MASTER_USB2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_AGGRE_USB_NORTH },
+ .link_nodes = { &qns_aggre_usb_north_snoc },
};
static struct qcom_icc_node xm_usb3_mp = {
.name = "xm_usb3_mp",
- .id = X1E80100_MASTER_USB3_MP,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_AGGRE_USB_NORTH },
+ .link_nodes = { &qns_aggre_usb_north_snoc },
};
static struct qcom_icc_node xm_usb3_0 = {
.name = "xm_usb3_0",
- .id = X1E80100_MASTER_USB3_0,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+ .link_nodes = { &qns_aggre_usb_south_snoc },
};
static struct qcom_icc_node xm_usb3_1 = {
.name = "xm_usb3_1",
- .id = X1E80100_MASTER_USB3_1,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+ .link_nodes = { &qns_aggre_usb_south_snoc },
};
static struct qcom_icc_node xm_usb3_2 = {
.name = "xm_usb3_2",
- .id = X1E80100_MASTER_USB3_2,
.channels = 1,
.buswidth = 8,
.num_links = 1,
- .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+ .link_nodes = { &qns_aggre_usb_south_snoc },
};
static struct qcom_icc_node xm_usb4_0 = {
.name = "xm_usb4_0",
- .id = X1E80100_MASTER_USB4_0,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+ .link_nodes = { &qns_aggre_usb_south_snoc },
};
static struct qcom_icc_node xm_usb4_1 = {
.name = "xm_usb4_1",
- .id = X1E80100_MASTER_USB4_1,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+ .link_nodes = { &qns_aggre_usb_south_snoc },
};
static struct qcom_icc_node xm_usb4_2 = {
.name = "xm_usb4_2",
- .id = X1E80100_MASTER_USB4_2,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
+ .link_nodes = { &qns_aggre_usb_south_snoc },
};
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
- .id = X1E80100_SLAVE_A1NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_MASTER_A1NOC_SNOC },
+ .link_nodes = { &qnm_aggre1_noc },
};
static struct qcom_icc_node qns_a2noc_snoc = {
.name = "qns_a2noc_snoc",
- .id = X1E80100_SLAVE_A2NOC_SNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_MASTER_A2NOC_SNOC },
+ .link_nodes = { &qnm_aggre2_noc },
};
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
- .id = X1E80100_SLAVE_QUP_CORE_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup1_core_slave = {
.name = "qup1_core_slave",
- .id = X1E80100_SLAVE_QUP_CORE_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qup2_core_slave = {
.name = "qup2_core_slave",
- .id = X1E80100_SLAVE_QUP_CORE_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy0 = {
.name = "qhs_ahb2phy0",
- .id = X1E80100_SLAVE_AHB2PHY_SOUTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy1 = {
.name = "qhs_ahb2phy1",
- .id = X1E80100_SLAVE_AHB2PHY_NORTH,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ahb2phy2 = {
.name = "qhs_ahb2phy2",
- .id = X1E80100_SLAVE_AHB2PHY_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_av1_enc_cfg = {
.name = "qhs_av1_enc_cfg",
- .id = X1E80100_SLAVE_AV1_ENC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_camera_cfg = {
.name = "qhs_camera_cfg",
- .id = X1E80100_SLAVE_CAMERA_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_clk_ctl = {
.name = "qhs_clk_ctl",
- .id = X1E80100_SLAVE_CLK_CTL,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_crypto0_cfg = {
.name = "qhs_crypto0_cfg",
- .id = X1E80100_SLAVE_CRYPTO_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_display_cfg = {
.name = "qhs_display_cfg",
- .id = X1E80100_SLAVE_DISPLAY_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_gpuss_cfg = {
.name = "qhs_gpuss_cfg",
- .id = X1E80100_SLAVE_GFX3D_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_imem_cfg = {
.name = "qhs_imem_cfg",
- .id = X1E80100_SLAVE_IMEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ipc_router = {
.name = "qhs_ipc_router",
- .id = X1E80100_SLAVE_IPC_ROUTER_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie0_cfg = {
.name = "qhs_pcie0_cfg",
- .id = X1E80100_SLAVE_PCIE_0_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie1_cfg = {
.name = "qhs_pcie1_cfg",
- .id = X1E80100_SLAVE_PCIE_1_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie2_cfg = {
.name = "qhs_pcie2_cfg",
- .id = X1E80100_SLAVE_PCIE_2_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie3_cfg = {
.name = "qhs_pcie3_cfg",
- .id = X1E80100_SLAVE_PCIE_3_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie4_cfg = {
.name = "qhs_pcie4_cfg",
- .id = X1E80100_SLAVE_PCIE_4_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie5_cfg = {
.name = "qhs_pcie5_cfg",
- .id = X1E80100_SLAVE_PCIE_5_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie6a_cfg = {
.name = "qhs_pcie6a_cfg",
- .id = X1E80100_SLAVE_PCIE_6A_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie6b_cfg = {
.name = "qhs_pcie6b_cfg",
- .id = X1E80100_SLAVE_PCIE_6B_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pcie_rsc_cfg = {
.name = "qhs_pcie_rsc_cfg",
- .id = X1E80100_SLAVE_PCIE_RSC_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_pdm = {
.name = "qhs_pdm",
- .id = X1E80100_SLAVE_PDM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_prng = {
.name = "qhs_prng",
- .id = X1E80100_SLAVE_PRNG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qdss_cfg = {
.name = "qhs_qdss_cfg",
- .id = X1E80100_SLAVE_QDSS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qspi = {
.name = "qhs_qspi",
- .id = X1E80100_SLAVE_QSPI_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup0 = {
.name = "qhs_qup0",
- .id = X1E80100_SLAVE_QUP_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup1 = {
.name = "qhs_qup1",
- .id = X1E80100_SLAVE_QUP_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_qup2 = {
.name = "qhs_qup2",
- .id = X1E80100_SLAVE_QUP_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc2 = {
.name = "qhs_sdc2",
- .id = X1E80100_SLAVE_SDCC_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_sdc4 = {
.name = "qhs_sdc4",
- .id = X1E80100_SLAVE_SDCC_4,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_smmuv3_cfg = {
.name = "qhs_smmuv3_cfg",
- .id = X1E80100_SLAVE_SMMUV3_CFG,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tcsr = {
.name = "qhs_tcsr",
- .id = X1E80100_SLAVE_TCSR,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tlmm = {
.name = "qhs_tlmm",
- .id = X1E80100_SLAVE_TLMM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_ufs_mem_cfg = {
.name = "qhs_ufs_mem_cfg",
- .id = X1E80100_SLAVE_UFS_MEM_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb2_0_cfg = {
.name = "qhs_usb2_0_cfg",
- .id = X1E80100_SLAVE_USB2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_0_cfg = {
.name = "qhs_usb3_0_cfg",
- .id = X1E80100_SLAVE_USB3_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_1_cfg = {
.name = "qhs_usb3_1_cfg",
- .id = X1E80100_SLAVE_USB3_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_2_cfg = {
.name = "qhs_usb3_2_cfg",
- .id = X1E80100_SLAVE_USB3_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb3_mp_cfg = {
.name = "qhs_usb3_mp_cfg",
- .id = X1E80100_SLAVE_USB3_MP,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb4_0_cfg = {
.name = "qhs_usb4_0_cfg",
- .id = X1E80100_SLAVE_USB4_0,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb4_1_cfg = {
.name = "qhs_usb4_1_cfg",
- .id = X1E80100_SLAVE_USB4_1,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_usb4_2_cfg = {
.name = "qhs_usb4_2_cfg",
- .id = X1E80100_SLAVE_USB4_2,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_venus_cfg = {
.name = "qhs_venus_cfg",
- .id = X1E80100_SLAVE_VENUS_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_lpass_qtb_cfg = {
.name = "qss_lpass_qtb_cfg",
- .id = X1E80100_SLAVE_LPASS_QTB_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qss_mnoc_cfg = {
.name = "qss_mnoc_cfg",
- .id = X1E80100_SLAVE_CNOC_MNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_MASTER_CNOC_MNOC_CFG },
+ .link_nodes = { &qsm_mnoc_cfg },
};
static struct qcom_icc_node qss_nsp_qtb_cfg = {
.name = "qss_nsp_qtb_cfg",
- .id = X1E80100_SLAVE_NSP_QTB_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_qdss_stm = {
.name = "xs_qdss_stm",
- .id = X1E80100_SLAVE_QDSS_STM,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node xs_sys_tcu_cfg = {
.name = "xs_sys_tcu_cfg",
- .id = X1E80100_SLAVE_TCU,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qhs_aoss = {
.name = "qhs_aoss",
- .id = X1E80100_SLAVE_AOSS,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qhs_tme_cfg = {
.name = "qhs_tme_cfg",
- .id = X1E80100_SLAVE_TME_CFG,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_apss = {
.name = "qns_apss",
- .id = X1E80100_SLAVE_APPSS,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node qss_cfg = {
.name = "qss_cfg",
- .id = X1E80100_SLAVE_CNOC_CFG,
.channels = 1,
.buswidth = 4,
.num_links = 1,
- .links = { X1E80100_MASTER_CNOC_CFG },
+ .link_nodes = { &qsm_cfg },
};
static struct qcom_icc_node qxs_boot_imem = {
.name = "qxs_boot_imem",
- .id = X1E80100_SLAVE_BOOT_IMEM,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node qxs_imem = {
.name = "qxs_imem",
- .id = X1E80100_SLAVE_IMEM,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_0 = {
.name = "xs_pcie_0",
- .id = X1E80100_SLAVE_PCIE_0,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_1 = {
.name = "xs_pcie_1",
- .id = X1E80100_SLAVE_PCIE_1,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_2 = {
.name = "xs_pcie_2",
- .id = X1E80100_SLAVE_PCIE_2,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_3 = {
.name = "xs_pcie_3",
- .id = X1E80100_SLAVE_PCIE_3,
.channels = 1,
.buswidth = 64,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_4 = {
.name = "xs_pcie_4",
- .id = X1E80100_SLAVE_PCIE_4,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_5 = {
.name = "xs_pcie_5",
- .id = X1E80100_SLAVE_PCIE_5,
.channels = 1,
.buswidth = 8,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_6a = {
.name = "xs_pcie_6a",
- .id = X1E80100_SLAVE_PCIE_6A,
.channels = 1,
.buswidth = 32,
- .num_links = 0,
};
static struct qcom_icc_node xs_pcie_6b = {
.name = "xs_pcie_6b",
- .id = X1E80100_SLAVE_PCIE_6B,
.channels = 1,
.buswidth = 16,
- .num_links = 0,
};
static struct qcom_icc_node qns_gem_noc_cnoc = {
.name = "qns_gem_noc_cnoc",
- .id = X1E80100_SLAVE_GEM_NOC_CNOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_MASTER_GEM_NOC_CNOC },
+ .link_nodes = { &qnm_gemnoc_cnoc },
};
static struct qcom_icc_node qns_llcc = {
.name = "qns_llcc",
- .id = X1E80100_SLAVE_LLCC,
.channels = 8,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_MASTER_LLCC },
+ .link_nodes = { &llcc_mc },
};
static struct qcom_icc_node qns_pcie = {
.name = "qns_pcie",
- .id = X1E80100_SLAVE_MEM_NOC_PCIE_SNOC,
.channels = 1,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_MASTER_GEM_NOC_PCIE_SNOC },
+ .link_nodes = { &qnm_gemnoc_pcie },
};
static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
.name = "qns_lpass_ag_noc_gemnoc",
- .id = X1E80100_SLAVE_LPASS_GEM_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_MASTER_LPASS_GEM_NOC },
+ .link_nodes = { &qnm_lpass },
};
static struct qcom_icc_node qns_lpass_aggnoc = {
.name = "qns_lpass_aggnoc",
- .id = X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_MASTER_LPIAON_NOC },
+ .link_nodes = { &qnm_lpiaon_noc },
};
static struct qcom_icc_node qns_lpi_aon_noc = {
.name = "qns_lpi_aon_noc",
- .id = X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC,
.channels = 1,
.buswidth = 16,
.num_links = 1,
- .links = { X1E80100_MASTER_LPASS_LPINOC },
+ .link_nodes = { &qnm_lpass_lpinoc },
};
static struct qcom_icc_node ebi = {
.name = "ebi",
- .id = X1E80100_SLAVE_EBI1,
.channels = 8,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_mem_noc_hf = {
.name = "qns_mem_noc_hf",
- .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_hf },
};
static struct qcom_icc_node qns_mem_noc_sf = {
.name = "qns_mem_noc_sf",
- .id = X1E80100_SLAVE_MNOC_SF_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_MASTER_MNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_mnoc_sf },
};
static struct qcom_icc_node srvc_mnoc = {
.name = "srvc_mnoc",
- .id = X1E80100_SLAVE_SERVICE_MNOC,
.channels = 1,
.buswidth = 4,
- .num_links = 0,
};
static struct qcom_icc_node qns_nsp_gemnoc = {
.name = "qns_nsp_gemnoc",
- .id = X1E80100_SLAVE_CDSP_MEM_NOC,
.channels = 2,
.buswidth = 32,
.num_links = 1,
- .links = { X1E80100_MASTER_COMPUTE_NOC },
+ .link_nodes = { &qnm_nsp_noc },
};
static struct qcom_icc_node qns_pcie_mem_noc = {
.name = "qns_pcie_mem_noc",
- .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC },
+ .link_nodes = { &qnm_pcie },
};
static struct qcom_icc_node qns_pcie_north_gem_noc = {
.name = "qns_pcie_north_gem_noc",
- .id = X1E80100_SLAVE_PCIE_NORTH,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_MASTER_PCIE_NORTH },
+ .link_nodes = { &qnm_pcie_north_gem_noc },
};
static struct qcom_icc_node qns_pcie_south_gem_noc = {
.name = "qns_pcie_south_gem_noc",
- .id = X1E80100_SLAVE_PCIE_SOUTH,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_MASTER_PCIE_SOUTH },
+ .link_nodes = { &qnm_pcie_south_gem_noc },
};
static struct qcom_icc_node qns_gemnoc_sf = {
.name = "qns_gemnoc_sf",
- .id = X1E80100_SLAVE_SNOC_GEM_NOC_SF,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_MASTER_SNOC_SF_MEM_NOC },
+ .link_nodes = { &qnm_snoc_sf },
};
static struct qcom_icc_node qns_aggre_usb_snoc = {
.name = "qns_aggre_usb_snoc",
- .id = X1E80100_SLAVE_USB_NOC_SNOC,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_MASTER_USB_NOC_SNOC },
+ .link_nodes = { &qnm_usb_anoc },
};
static struct qcom_icc_node qns_aggre_usb_north_snoc = {
.name = "qns_aggre_usb_north_snoc",
- .id = X1E80100_SLAVE_AGGRE_USB_NORTH,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_MASTER_AGGRE_USB_NORTH },
+ .link_nodes = { &qnm_aggre_usb_north_snoc },
};
static struct qcom_icc_node qns_aggre_usb_south_snoc = {
.name = "qns_aggre_usb_south_snoc",
- .id = X1E80100_SLAVE_AGGRE_USB_SOUTH,
.channels = 1,
.buswidth = 64,
.num_links = 1,
- .links = { X1E80100_MASTER_AGGRE_USB_SOUTH },
+ .link_nodes = { &qnm_aggre_usb_south_snoc },
};
static struct qcom_icc_bcm bcm_acv = {
diff --git a/drivers/interconnect/qcom/x1e80100.h b/drivers/interconnect/qcom/x1e80100.h
deleted file mode 100644
index 2e14264f4c2b..000000000000
--- a/drivers/interconnect/qcom/x1e80100.h
+++ /dev/null
@@ -1,192 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * X1E80100 interconnect IDs
- *
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
- * Copyright (c) 2023, Linaro Limited
- */
-
-#ifndef __DRIVERS_INTERCONNECT_QCOM_X1E80100_H
-#define __DRIVERS_INTERCONNECT_QCOM_X1E80100_H
-
-#define X1E80100_MASTER_A1NOC_SNOC 0
-#define X1E80100_MASTER_A2NOC_SNOC 1
-#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC 2
-#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP 3
-#define X1E80100_MASTER_APPSS_PROC 4
-#define X1E80100_MASTER_CAMNOC_HF 5
-#define X1E80100_MASTER_CAMNOC_ICP 6
-#define X1E80100_MASTER_CAMNOC_SF 7
-#define X1E80100_MASTER_CDSP_PROC 8
-#define X1E80100_MASTER_CNOC_CFG 9
-#define X1E80100_MASTER_CNOC_MNOC_CFG 10
-#define X1E80100_MASTER_COMPUTE_NOC 11
-#define X1E80100_MASTER_CRYPTO 12
-#define X1E80100_MASTER_GEM_NOC_CNOC 13
-#define X1E80100_MASTER_GEM_NOC_PCIE_SNOC 14
-#define X1E80100_MASTER_GFX3D 15
-#define X1E80100_MASTER_GPU_TCU 16
-#define X1E80100_MASTER_IPA 17
-#define X1E80100_MASTER_LLCC 18
-#define X1E80100_MASTER_LLCC_DISP 19
-#define X1E80100_MASTER_LPASS_GEM_NOC 20
-#define X1E80100_MASTER_LPASS_LPINOC 21
-#define X1E80100_MASTER_LPASS_PROC 22
-#define X1E80100_MASTER_LPIAON_NOC 23
-#define X1E80100_MASTER_MDP 24
-#define X1E80100_MASTER_MDP_DISP 25
-#define X1E80100_MASTER_MNOC_HF_MEM_NOC 26
-#define X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP 27
-#define X1E80100_MASTER_MNOC_SF_MEM_NOC 28
-#define X1E80100_MASTER_PCIE_0 29
-#define X1E80100_MASTER_PCIE_1 30
-#define X1E80100_MASTER_QDSS_ETR 31
-#define X1E80100_MASTER_QDSS_ETR_1 32
-#define X1E80100_MASTER_QSPI_0 33
-#define X1E80100_MASTER_QUP_0 34
-#define X1E80100_MASTER_QUP_1 35
-#define X1E80100_MASTER_QUP_2 36
-#define X1E80100_MASTER_QUP_CORE_0 37
-#define X1E80100_MASTER_QUP_CORE_1 38
-#define X1E80100_MASTER_SDCC_2 39
-#define X1E80100_MASTER_SDCC_4 40
-#define X1E80100_MASTER_SNOC_SF_MEM_NOC 41
-#define X1E80100_MASTER_SP 42
-#define X1E80100_MASTER_SYS_TCU 43
-#define X1E80100_MASTER_UFS_MEM 44
-#define X1E80100_MASTER_USB3_0 45
-#define X1E80100_MASTER_VIDEO 46
-#define X1E80100_MASTER_VIDEO_CV_PROC 47
-#define X1E80100_MASTER_VIDEO_V_PROC 48
-#define X1E80100_SLAVE_A1NOC_SNOC 49
-#define X1E80100_SLAVE_A2NOC_SNOC 50
-#define X1E80100_SLAVE_AHB2PHY_NORTH 51
-#define X1E80100_SLAVE_AHB2PHY_SOUTH 52
-#define X1E80100_SLAVE_ANOC_PCIE_GEM_NOC 53
-#define X1E80100_SLAVE_AOSS 54
-#define X1E80100_SLAVE_APPSS 55
-#define X1E80100_SLAVE_BOOT_IMEM 56
-#define X1E80100_SLAVE_CAMERA_CFG 57
-#define X1E80100_SLAVE_CDSP_MEM_NOC 58
-#define X1E80100_SLAVE_CLK_CTL 59
-#define X1E80100_SLAVE_CNOC_CFG 60
-#define X1E80100_SLAVE_CNOC_MNOC_CFG 61
-#define X1E80100_SLAVE_CRYPTO_0_CFG 62
-#define X1E80100_SLAVE_DISPLAY_CFG 63
-#define X1E80100_SLAVE_EBI1 64
-#define X1E80100_SLAVE_EBI1_DISP 65
-#define X1E80100_SLAVE_GEM_NOC_CNOC 66
-#define X1E80100_SLAVE_GFX3D_CFG 67
-#define X1E80100_SLAVE_IMEM 68
-#define X1E80100_SLAVE_IMEM_CFG 69
-#define X1E80100_SLAVE_IPC_ROUTER_CFG 70
-#define X1E80100_SLAVE_LLCC 71
-#define X1E80100_SLAVE_LLCC_DISP 72
-#define X1E80100_SLAVE_LPASS_GEM_NOC 73
-#define X1E80100_SLAVE_LPASS_QTB_CFG 74
-#define X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC 75
-#define X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC 76
-#define X1E80100_SLAVE_MEM_NOC_PCIE_SNOC 77
-#define X1E80100_SLAVE_MNOC_HF_MEM_NOC 78
-#define X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP 79
-#define X1E80100_SLAVE_MNOC_SF_MEM_NOC 80
-#define X1E80100_SLAVE_NSP_QTB_CFG 81
-#define X1E80100_SLAVE_PCIE_0 82
-#define X1E80100_SLAVE_PCIE_0_CFG 83
-#define X1E80100_SLAVE_PCIE_1 84
-#define X1E80100_SLAVE_PCIE_1_CFG 85
-#define X1E80100_SLAVE_PDM 86
-#define X1E80100_SLAVE_PRNG 87
-#define X1E80100_SLAVE_QDSS_CFG 88
-#define X1E80100_SLAVE_QDSS_STM 89
-#define X1E80100_SLAVE_QSPI_0 90
-#define X1E80100_SLAVE_QUP_1 91
-#define X1E80100_SLAVE_QUP_2 92
-#define X1E80100_SLAVE_QUP_CORE_0 93
-#define X1E80100_SLAVE_QUP_CORE_1 94
-#define X1E80100_SLAVE_QUP_CORE_2 95
-#define X1E80100_SLAVE_SDCC_2 96
-#define X1E80100_SLAVE_SDCC_4 97
-#define X1E80100_SLAVE_SERVICE_MNOC 98
-#define X1E80100_SLAVE_SNOC_GEM_NOC_SF 99
-#define X1E80100_SLAVE_TCSR 100
-#define X1E80100_SLAVE_TCU 101
-#define X1E80100_SLAVE_TLMM 102
-#define X1E80100_SLAVE_TME_CFG 103
-#define X1E80100_SLAVE_UFS_MEM_CFG 104
-#define X1E80100_SLAVE_USB3_0 105
-#define X1E80100_SLAVE_VENUS_CFG 106
-#define X1E80100_MASTER_DDR_PERF_MODE 107
-#define X1E80100_MASTER_QUP_CORE_2 108
-#define X1E80100_MASTER_PCIE_TCU 109
-#define X1E80100_MASTER_GIC2 110
-#define X1E80100_MASTER_AV1_ENC 111
-#define X1E80100_MASTER_EVA 112
-#define X1E80100_MASTER_PCIE_NORTH 113
-#define X1E80100_MASTER_PCIE_SOUTH 114
-#define X1E80100_MASTER_PCIE_3 115
-#define X1E80100_MASTER_PCIE_4 116
-#define X1E80100_MASTER_PCIE_5 117
-#define X1E80100_MASTER_PCIE_2 118
-#define X1E80100_MASTER_PCIE_6A 119
-#define X1E80100_MASTER_PCIE_6B 120
-#define X1E80100_MASTER_GIC1 121
-#define X1E80100_MASTER_USB_NOC_SNOC 122
-#define X1E80100_MASTER_AGGRE_USB_NORTH 123
-#define X1E80100_MASTER_AGGRE_USB_SOUTH 124
-#define X1E80100_MASTER_USB2 125
-#define X1E80100_MASTER_USB3_MP 126
-#define X1E80100_MASTER_USB3_1 127
-#define X1E80100_MASTER_USB3_2 128
-#define X1E80100_MASTER_USB4_0 129
-#define X1E80100_MASTER_USB4_1 130
-#define X1E80100_MASTER_USB4_2 131
-#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE 132
-#define X1E80100_MASTER_LLCC_PCIE 133
-#define X1E80100_MASTER_PCIE_NORTH_PCIE 134
-#define X1E80100_MASTER_PCIE_SOUTH_PCIE 135
-#define X1E80100_MASTER_PCIE_3_PCIE 136
-#define X1E80100_MASTER_PCIE_4_PCIE 137
-#define X1E80100_MASTER_PCIE_5_PCIE 138
-#define X1E80100_MASTER_PCIE_0_PCIE 139
-#define X1E80100_MASTER_PCIE_1_PCIE 140
-#define X1E80100_MASTER_PCIE_2_PCIE 141
-#define X1E80100_MASTER_PCIE_6A_PCIE 142
-#define X1E80100_MASTER_PCIE_6B_PCIE 143
-#define X1E80100_SLAVE_AHB2PHY_2 144
-#define X1E80100_SLAVE_AV1_ENC_CFG 145
-#define X1E80100_SLAVE_PCIE_2_CFG 146
-#define X1E80100_SLAVE_PCIE_3_CFG 147
-#define X1E80100_SLAVE_PCIE_4_CFG 148
-#define X1E80100_SLAVE_PCIE_5_CFG 149
-#define X1E80100_SLAVE_PCIE_6A_CFG 150
-#define X1E80100_SLAVE_PCIE_6B_CFG 151
-#define X1E80100_SLAVE_PCIE_RSC_CFG 152
-#define X1E80100_SLAVE_QUP_0 153
-#define X1E80100_SLAVE_SMMUV3_CFG 154
-#define X1E80100_SLAVE_USB2 155
-#define X1E80100_SLAVE_USB3_1 156
-#define X1E80100_SLAVE_USB3_2 157
-#define X1E80100_SLAVE_USB3_MP 158
-#define X1E80100_SLAVE_USB4_0 159
-#define X1E80100_SLAVE_USB4_1 160
-#define X1E80100_SLAVE_USB4_2 161
-#define X1E80100_SLAVE_PCIE_2 162
-#define X1E80100_SLAVE_PCIE_3 163
-#define X1E80100_SLAVE_PCIE_4 164
-#define X1E80100_SLAVE_PCIE_5 165
-#define X1E80100_SLAVE_PCIE_6A 166
-#define X1E80100_SLAVE_PCIE_6B 167
-#define X1E80100_SLAVE_DDR_PERF_MODE 168
-#define X1E80100_SLAVE_PCIE_NORTH 169
-#define X1E80100_SLAVE_PCIE_SOUTH 170
-#define X1E80100_SLAVE_USB_NOC_SNOC 171
-#define X1E80100_SLAVE_AGGRE_USB_NORTH 172
-#define X1E80100_SLAVE_AGGRE_USB_SOUTH 173
-#define X1E80100_SLAVE_LLCC_PCIE 174
-#define X1E80100_SLAVE_EBI1_PCIE 175
-#define X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE 176
-#define X1E80100_SLAVE_PCIE_NORTH_PCIE 177
-#define X1E80100_SLAVE_PCIE_SOUTH_PCIE 178
-
-#endif
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 70d29b14d851..99095645134f 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -40,12 +40,13 @@ config IOMMU_IO_PGTABLE_LPAE
sizes at both stage-1 and stage-2, as well as address spaces
up to 48-bits in size.
-config IOMMU_IO_PGTABLE_LPAE_SELFTEST
- bool "LPAE selftests"
- depends on IOMMU_IO_PGTABLE_LPAE
+config IOMMU_IO_PGTABLE_LPAE_KUNIT_TEST
+ tristate "KUnit tests for LPAE"
+ depends on IOMMU_IO_PGTABLE_LPAE && KUNIT
+ default KUNIT_ALL_TESTS
help
- Enable self-tests for LPAE page table allocator. This performs
- a series of page-table consistency checks during boot.
+ Enable kunit tests for LPAE page table allocator. This performs
+ a series of page-table consistency checks.
If unsure, say N here.
@@ -247,7 +248,7 @@ config SUN50I_IOMMU
config TEGRA_IOMMU_SMMU
bool "NVIDIA Tegra SMMU Support"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
depends on TEGRA_AHB
depends on TEGRA_MC
select IOMMU_API
@@ -384,3 +385,5 @@ config SPRD_IOMMU
Say Y here if you want to use the multimedia devices listed above.
endif # IOMMU_SUPPORT
+
+source "drivers/iommu/generic_pt/Kconfig"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 355294fa9033..8e8843316c4b 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -3,6 +3,7 @@ obj-y += arm/ iommufd/
obj-$(CONFIG_AMD_IOMMU) += amd/
obj-$(CONFIG_INTEL_IOMMU) += intel/
obj-$(CONFIG_RISCV_IOMMU) += riscv/
+obj-$(CONFIG_GENERIC_PT) += generic_pt/fmt/
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_IOMMU_SUPPORT) += iommu-pages.o
obj-$(CONFIG_IOMMU_API) += iommu-traces.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE_KUNIT_TEST) += io-pgtable-arm-selftests.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_DART) += io-pgtable-dart.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig
index ecef69c11144..f2acf471cb5d 100644
--- a/drivers/iommu/amd/Kconfig
+++ b/drivers/iommu/amd/Kconfig
@@ -11,10 +11,13 @@ config AMD_IOMMU
select MMU_NOTIFIER
select IOMMU_API
select IOMMU_IOVA
- select IOMMU_IO_PGTABLE
select IOMMU_SVA
select IOMMU_IOPF
select IOMMUFD_DRIVER if IOMMUFD
+ select GENERIC_PT
+ select IOMMU_PT
+ select IOMMU_PT_AMDV1
+ select IOMMU_PT_X86_64
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help
With this option you can enable support for AMD IOMMU hardware in
diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile
index 59c04a67f398..5412a563c697 100644
--- a/drivers/iommu/amd/Makefile
+++ b/drivers/iommu/amd/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o ppr.o pasid.o
+obj-y += iommu.o init.o quirks.o ppr.o pasid.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index 9b4b589a54b5..25044d28f28a 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -88,7 +88,6 @@ int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag);
* the IOMMU used by this driver.
*/
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
-void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
void amd_iommu_domain_flush_pages(struct protection_domain *domain,
u64 address, size_t size);
void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 5219d7ddfdaa..320733e7d8b4 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -18,7 +18,7 @@
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/irqreturn.h>
-#include <linux/io-pgtable.h>
+#include <linux/generic_pt/iommu.h>
/*
* Maximum number of IOMMUs supported
@@ -107,6 +107,7 @@
/* Extended Feature 2 Bits */
+#define FEATURE_SEVSNPIO_SUP BIT_ULL(1)
#define FEATURE_SNPAVICSUP GENMASK_ULL(7, 5)
#define FEATURE_SNPAVICSUP_GAM(x) \
(FIELD_GET(FEATURE_SNPAVICSUP, x) == 0x1)
@@ -247,6 +248,10 @@
#define CMD_BUFFER_ENTRIES 512
#define MMIO_CMD_SIZE_SHIFT 56
#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
+#define MMIO_CMD_HEAD_MASK GENMASK_ULL(18, 4) /* Command buffer head ptr field [18:4] */
+#define MMIO_CMD_BUFFER_HEAD(x) FIELD_GET(MMIO_CMD_HEAD_MASK, (x))
+#define MMIO_CMD_TAIL_MASK GENMASK_ULL(18, 4) /* Command buffer tail ptr field [18:4] */
+#define MMIO_CMD_BUFFER_TAIL(x) FIELD_GET(MMIO_CMD_TAIL_MASK, (x))
/* constants for event buffer handling */
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
@@ -337,76 +342,7 @@
#define GUEST_PGTABLE_4_LEVEL 0x00
#define GUEST_PGTABLE_5_LEVEL 0x01
-#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9))
-#define PM_LEVEL_SIZE(x) (((x) < 6) ? \
- ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \
- (0xffffffffffffffffULL))
-#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL)
-#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL)
-#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \
- IOMMU_PTE_PR | IOMMU_PTE_IR | IOMMU_PTE_IW)
-#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL)
-
-#define PM_MAP_4k 0
#define PM_ADDR_MASK 0x000ffffffffff000ULL
-#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \
- (~((1ULL << (12 + ((lvl) * 9))) - 1)))
-#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr))
-
-/*
- * Returns the page table level to use for a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_LEVEL(pagesize) \
- ((__ffs(pagesize) - 12) / 9)
-/*
- * Returns the number of ptes to use for a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_PTE_COUNT(pagesize) \
- (1ULL << ((__ffs(pagesize) - 12) % 9))
-
-/*
- * Aligns a given io-virtual address to a given page size
- * Pagesize is expected to be a power-of-two
- */
-#define PAGE_SIZE_ALIGN(address, pagesize) \
- ((address) & ~((pagesize) - 1))
-/*
- * Creates an IOMMU PTE for an address and a given pagesize
- * The PTE has no permission bits set
- * Pagesize is expected to be a power-of-two larger than 4096
- */
-#define PAGE_SIZE_PTE(address, pagesize) \
- (((address) | ((pagesize) - 1)) & \
- (~(pagesize >> 1)) & PM_ADDR_MASK)
-
-/*
- * Takes a PTE value with mode=0x07 and returns the page size it maps
- */
-#define PTE_PAGE_SIZE(pte) \
- (1ULL << (1 + ffz(((pte) | 0xfffULL))))
-
-/*
- * Takes a page-table level and returns the default page-size for this level
- */
-#define PTE_LEVEL_PAGE_SIZE(level) \
- (1ULL << (12 + (9 * (level))))
-
-/*
- * The IOPTE dirty bit
- */
-#define IOMMU_PTE_HD_BIT (6)
-
-/*
- * Bit value definition for I/O PTE fields
- */
-#define IOMMU_PTE_PR BIT_ULL(0)
-#define IOMMU_PTE_HD BIT_ULL(IOMMU_PTE_HD_BIT)
-#define IOMMU_PTE_U BIT_ULL(59)
-#define IOMMU_PTE_FC BIT_ULL(60)
-#define IOMMU_PTE_IR BIT_ULL(61)
-#define IOMMU_PTE_IW BIT_ULL(62)
/*
* Bit value definition for DTE fields
@@ -436,12 +372,6 @@
/* DTE[128:179] | DTE[184:191] */
#define DTE_DATA2_INTR_MASK ~GENMASK_ULL(55, 52)
-#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
-#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_PR)
-#define IOMMU_PTE_DIRTY(pte) ((pte) & IOMMU_PTE_HD)
-#define IOMMU_PTE_PAGE(pte) (iommu_phys_to_virt((pte) & IOMMU_PAGE_MASK))
-#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07)
-
#define IOMMU_PROT_MASK 0x03
#define IOMMU_PROT_IR 0x01
#define IOMMU_PROT_IW 0x02
@@ -534,19 +464,6 @@ struct amd_irte_ops;
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
-#define io_pgtable_to_data(x) \
- container_of((x), struct amd_io_pgtable, pgtbl)
-
-#define io_pgtable_ops_to_data(x) \
- io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
-
-#define io_pgtable_ops_to_domain(x) \
- container_of(io_pgtable_ops_to_data(x), \
- struct protection_domain, iop)
-
-#define io_pgtable_cfg_to_data(x) \
- container_of((x), struct amd_io_pgtable, pgtbl.cfg)
-
struct gcr3_tbl_info {
u64 *gcr3_tbl; /* Guest CR3 table */
int glx; /* Number of levels for GCR3 table */
@@ -554,13 +471,6 @@ struct gcr3_tbl_info {
u16 domid; /* Per device domain ID */
};
-struct amd_io_pgtable {
- struct io_pgtable pgtbl;
- int mode;
- u64 *root;
- u64 *pgd; /* v2 pgtable pgd pointer */
-};
-
enum protection_domain_mode {
PD_MODE_NONE,
PD_MODE_V1,
@@ -588,10 +498,13 @@ struct pdom_iommu_info {
* independent of their use.
*/
struct protection_domain {
+ union {
+ struct iommu_domain domain;
+ struct pt_iommu iommu;
+ struct pt_iommu_amdv1 amdv1;
+ struct pt_iommu_x86_64 amdv2;
+ };
struct list_head dev_list; /* List of all devices in this domain */
- struct iommu_domain domain; /* generic domain handle used by
- iommu core code */
- struct amd_io_pgtable iop;
spinlock_t lock; /* mostly used to lock the page table*/
u16 id; /* the domain id written to the device table */
enum protection_domain_mode pd_mode; /* Track page table type */
@@ -601,6 +514,9 @@ struct protection_domain {
struct mmu_notifier mn; /* mmu notifier for the SVA domain */
struct list_head dev_data_list; /* List of pdom_dev_data */
};
+PT_IOMMU_CHECK_DOMAIN(struct protection_domain, iommu, domain);
+PT_IOMMU_CHECK_DOMAIN(struct protection_domain, amdv1.iommu, domain);
+PT_IOMMU_CHECK_DOMAIN(struct protection_domain, amdv2.iommu, domain);
/*
* This structure contains information about one PCI segment in the system.
@@ -791,6 +707,11 @@ struct amd_iommu {
u32 flags;
volatile u64 *cmd_sem;
atomic64_t cmd_sem_val;
+ /*
+ * Track physical address to directly use it in build_completion_wait()
+ * and avoid adding any special checks and handling for kdump.
+ */
+ u64 cmd_sem_paddr;
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
/* DebugFS Info */
diff --git a/drivers/iommu/amd/debugfs.c b/drivers/iommu/amd/debugfs.c
index 10fa217a7119..20b04996441d 100644
--- a/drivers/iommu/amd/debugfs.c
+++ b/drivers/iommu/amd/debugfs.c
@@ -37,7 +37,7 @@ static ssize_t iommu_mmio_write(struct file *filp, const char __user *ubuf,
if (ret)
return ret;
- if (iommu->dbg_mmio_offset > iommu->mmio_phys_end - 4) {
+ if (iommu->dbg_mmio_offset > iommu->mmio_phys_end - sizeof(u64)) {
iommu->dbg_mmio_offset = -1;
return -EINVAL;
}
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 8de689b2c5ed..4b2953418977 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -406,6 +406,9 @@ static void iommu_set_device_table(struct amd_iommu *iommu)
BUG_ON(iommu->mmio_base == NULL);
+ if (is_kdump_kernel())
+ return;
+
entry = iommu_virt_to_phys(dev_table);
entry |= (dev_table_size >> 12) - 1;
memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
@@ -646,7 +649,10 @@ static inline int __init alloc_dev_table(struct amd_iommu_pci_seg *pci_seg)
static inline void free_dev_table(struct amd_iommu_pci_seg *pci_seg)
{
- iommu_free_pages(pci_seg->dev_table);
+ if (is_kdump_kernel())
+ memunmap((void *)pci_seg->dev_table);
+ else
+ iommu_free_pages(pci_seg->dev_table);
pci_seg->dev_table = NULL;
}
@@ -710,6 +716,26 @@ static void __init free_alias_table(struct amd_iommu_pci_seg *pci_seg)
pci_seg->alias_table = NULL;
}
+static inline void *iommu_memremap(unsigned long paddr, size_t size)
+{
+ phys_addr_t phys;
+
+ if (!paddr)
+ return NULL;
+
+ /*
+ * Obtain true physical address in kdump kernel when SME is enabled.
+ * Currently, previous kernel with SME enabled and kdump kernel
+ * with SME support disabled is not supported.
+ */
+ phys = __sme_clr(paddr);
+
+ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ return (__force void *)ioremap_encrypted(phys, size);
+ else
+ return memremap(phys, size, MEMREMAP_WB);
+}
+
/*
* Allocates the command buffer. This buffer is per AMD IOMMU. We can
* write commands to that buffer later and the IOMMU will execute them
@@ -795,11 +821,16 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
BUG_ON(iommu->cmd_buf == NULL);
- entry = iommu_virt_to_phys(iommu->cmd_buf);
- entry |= MMIO_CMD_SIZE_512;
-
- memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
- &entry, sizeof(entry));
+ if (!is_kdump_kernel()) {
+ /*
+ * Command buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->cmd_buf);
+ entry |= MMIO_CMD_SIZE_512;
+ memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
amd_iommu_reset_cmd_buffer(iommu);
}
@@ -850,10 +881,15 @@ static void iommu_enable_event_buffer(struct amd_iommu *iommu)
BUG_ON(iommu->evt_buf == NULL);
- entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
-
- memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
- &entry, sizeof(entry));
+ if (!is_kdump_kernel()) {
+ /*
+ * Event buffer is re-used for kdump kernel and setting
+ * of MMIO register is not required.
+ */
+ entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
+ memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
+ &entry, sizeof(entry));
+ }
/* set head and tail to zero manually */
writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
@@ -942,8 +978,91 @@ err_out:
static int __init alloc_cwwb_sem(struct amd_iommu *iommu)
{
iommu->cmd_sem = iommu_alloc_4k_pages(iommu, GFP_KERNEL, 1);
+ if (!iommu->cmd_sem)
+ return -ENOMEM;
+ iommu->cmd_sem_paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ return 0;
+}
+
+static int __init remap_event_buffer(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ pr_info_once("Re-using event buffer from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_EVT_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->evt_buf = iommu_memremap(paddr, EVT_BUFFER_SIZE);
+
+ return iommu->evt_buf ? 0 : -ENOMEM;
+}
+
+static int __init remap_command_buffer(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ pr_info_once("Re-using command buffer from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_CMD_BUF_OFFSET) & PM_ADDR_MASK;
+ iommu->cmd_buf = iommu_memremap(paddr, CMD_BUFFER_SIZE);
+
+ return iommu->cmd_buf ? 0 : -ENOMEM;
+}
+
+static int __init remap_or_alloc_cwwb_sem(struct amd_iommu *iommu)
+{
+ u64 paddr;
+
+ if (check_feature(FEATURE_SNP)) {
+ /*
+ * When SNP is enabled, the exclusion base register is used for the
+ * completion wait buffer (CWB) address. Read and re-use it.
+ */
+ pr_info_once("Re-using CWB buffers from the previous kernel\n");
+ paddr = readq(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET) & PM_ADDR_MASK;
+ iommu->cmd_sem = iommu_memremap(paddr, PAGE_SIZE);
+ if (!iommu->cmd_sem)
+ return -ENOMEM;
+ iommu->cmd_sem_paddr = paddr;
+ } else {
+ return alloc_cwwb_sem(iommu);
+ }
+
+ return 0;
+}
+
+static int __init alloc_iommu_buffers(struct amd_iommu *iommu)
+{
+ int ret;
+
+ /*
+ * Reuse/Remap the previous kernel's allocated completion wait
+ * command and event buffers for kdump boot.
+ */
+ if (is_kdump_kernel()) {
+ ret = remap_or_alloc_cwwb_sem(iommu);
+ if (ret)
+ return ret;
+
+ ret = remap_command_buffer(iommu);
+ if (ret)
+ return ret;
+
+ ret = remap_event_buffer(iommu);
+ if (ret)
+ return ret;
+ } else {
+ ret = alloc_cwwb_sem(iommu);
+ if (ret)
+ return ret;
- return iommu->cmd_sem ? 0 : -ENOMEM;
+ ret = alloc_command_buffer(iommu);
+ if (ret)
+ return ret;
+
+ ret = alloc_event_buffer(iommu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void __init free_cwwb_sem(struct amd_iommu *iommu)
@@ -951,6 +1070,38 @@ static void __init free_cwwb_sem(struct amd_iommu *iommu)
if (iommu->cmd_sem)
iommu_free_pages((void *)iommu->cmd_sem);
}
+static void __init unmap_cwwb_sem(struct amd_iommu *iommu)
+{
+ if (iommu->cmd_sem) {
+ if (check_feature(FEATURE_SNP))
+ memunmap((void *)iommu->cmd_sem);
+ else
+ iommu_free_pages((void *)iommu->cmd_sem);
+ }
+}
+
+static void __init unmap_command_buffer(struct amd_iommu *iommu)
+{
+ memunmap((void *)iommu->cmd_buf);
+}
+
+static void __init unmap_event_buffer(struct amd_iommu *iommu)
+{
+ memunmap(iommu->evt_buf);
+}
+
+static void __init free_iommu_buffers(struct amd_iommu *iommu)
+{
+ if (is_kdump_kernel()) {
+ unmap_cwwb_sem(iommu);
+ unmap_command_buffer(iommu);
+ unmap_event_buffer(iommu);
+ } else {
+ free_cwwb_sem(iommu);
+ free_command_buffer(iommu);
+ free_event_buffer(iommu);
+ }
+}
static void iommu_enable_xt(struct amd_iommu *iommu)
{
@@ -982,15 +1133,12 @@ static void set_dte_bit(struct dev_table_entry *dte, u8 bit)
dte->data[i] |= (1UL << _bit);
}
-static bool __copy_device_table(struct amd_iommu *iommu)
+static bool __reuse_device_table(struct amd_iommu *iommu)
{
- u64 int_ctl, int_tab_len, entry = 0;
struct amd_iommu_pci_seg *pci_seg = iommu->pci_seg;
- struct dev_table_entry *old_devtb = NULL;
- u32 lo, hi, devid, old_devtb_size;
+ u32 lo, hi, old_devtb_size;
phys_addr_t old_devtb_phys;
- u16 dom_id, dte_v, irq_v;
- u64 tmp;
+ u64 entry;
/* Each IOMMU use separate device table with the same size */
lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
@@ -1015,66 +1163,20 @@ static bool __copy_device_table(struct amd_iommu *iommu)
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
- old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
- ? (__force void *)ioremap_encrypted(old_devtb_phys,
- pci_seg->dev_table_size)
- : memremap(old_devtb_phys, pci_seg->dev_table_size, MEMREMAP_WB);
- if (!old_devtb)
- return false;
-
- pci_seg->old_dev_tbl_cpy = iommu_alloc_pages_sz(
- GFP_KERNEL | GFP_DMA32, pci_seg->dev_table_size);
+ /*
+ * Re-use the previous kernel's device table for kdump.
+ */
+ pci_seg->old_dev_tbl_cpy = iommu_memremap(old_devtb_phys, pci_seg->dev_table_size);
if (pci_seg->old_dev_tbl_cpy == NULL) {
- pr_err("Failed to allocate memory for copying old device table!\n");
- memunmap(old_devtb);
+ pr_err("Failed to remap memory for reusing old device table!\n");
return false;
}
- for (devid = 0; devid <= pci_seg->last_bdf; ++devid) {
- pci_seg->old_dev_tbl_cpy[devid] = old_devtb[devid];
- dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
- dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
-
- if (dte_v && dom_id) {
- pci_seg->old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
- pci_seg->old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
- /* Reserve the Domain IDs used by previous kernel */
- if (ida_alloc_range(&pdom_ids, dom_id, dom_id, GFP_ATOMIC) != dom_id) {
- pr_err("Failed to reserve domain ID 0x%x\n", dom_id);
- memunmap(old_devtb);
- return false;
- }
- /* If gcr3 table existed, mask it out */
- if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
- tmp = (DTE_GCR3_30_15 | DTE_GCR3_51_31);
- pci_seg->old_dev_tbl_cpy[devid].data[1] &= ~tmp;
- tmp = (DTE_GCR3_14_12 | DTE_FLAG_GV);
- pci_seg->old_dev_tbl_cpy[devid].data[0] &= ~tmp;
- }
- }
-
- irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
- int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
- int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK;
- if (irq_v && (int_ctl || int_tab_len)) {
- if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
- (int_tab_len != DTE_INTTABLEN_512 &&
- int_tab_len != DTE_INTTABLEN_2K)) {
- pr_err("Wrong old irq remapping flag: %#x\n", devid);
- memunmap(old_devtb);
- return false;
- }
-
- pci_seg->old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
- }
- }
- memunmap(old_devtb);
-
return true;
}
-static bool copy_device_table(void)
+static bool reuse_device_table(void)
{
struct amd_iommu *iommu;
struct amd_iommu_pci_seg *pci_seg;
@@ -1082,17 +1184,17 @@ static bool copy_device_table(void)
if (!amd_iommu_pre_enabled)
return false;
- pr_warn("Translation is already enabled - trying to copy translation structures\n");
+ pr_warn("Translation is already enabled - trying to reuse translation structures\n");
/*
* All IOMMUs within PCI segment shares common device table.
- * Hence copy device table only once per PCI segment.
+ * Hence reuse device table only once per PCI segment.
*/
for_each_pci_segment(pci_seg) {
for_each_iommu(iommu) {
if (pci_seg->id != iommu->pci_seg->id)
continue;
- if (!__copy_device_table(iommu))
+ if (!__reuse_device_table(iommu))
return false;
break;
}
@@ -1455,12 +1557,12 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
PCI_FUNC(e->devid));
devid = e->devid;
- for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
- if (alias)
+ if (alias) {
+ for (dev_i = devid_start; dev_i <= devid; ++dev_i)
pci_seg->alias_table[dev_i] = devid_to;
+ set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
}
set_dev_entry_from_acpi_range(iommu, devid_start, devid, flags, ext_flags);
- set_dev_entry_from_acpi(iommu, devid_to, flags, ext_flags);
break;
case IVHD_DEV_SPECIAL: {
u8 handle, type;
@@ -1608,13 +1710,22 @@ static struct amd_iommu_pci_seg *__init alloc_pci_segment(u16 id,
list_add_tail(&pci_seg->list, &amd_iommu_pci_seg_list);
if (alloc_dev_table(pci_seg))
- return NULL;
+ goto err_free_pci_seg;
if (alloc_alias_table(pci_seg))
- return NULL;
+ goto err_free_dev_table;
if (alloc_rlookup_table(pci_seg))
- return NULL;
+ goto err_free_alias_table;
return pci_seg;
+
+err_free_alias_table:
+ free_alias_table(pci_seg);
+err_free_dev_table:
+ free_dev_table(pci_seg);
+err_free_pci_seg:
+ list_del(&pci_seg->list);
+ kfree(pci_seg);
+ return NULL;
}
static struct amd_iommu_pci_seg *__init get_pci_segment(u16 id,
@@ -1655,9 +1766,7 @@ static void __init free_sysfs(struct amd_iommu *iommu)
static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_sysfs(iommu);
- free_cwwb_sem(iommu);
- free_command_buffer(iommu);
- free_event_buffer(iommu);
+ free_iommu_buffers(iommu);
amd_iommu_free_ppr_log(iommu);
free_ga_log(iommu);
iommu_unmap_mmio_space(iommu);
@@ -1821,14 +1930,9 @@ static int __init init_iommu_one_late(struct amd_iommu *iommu)
{
int ret;
- if (alloc_cwwb_sem(iommu))
- return -ENOMEM;
-
- if (alloc_command_buffer(iommu))
- return -ENOMEM;
-
- if (alloc_event_buffer(iommu))
- return -ENOMEM;
+ ret = alloc_iommu_buffers(iommu);
+ if (ret)
+ return ret;
iommu->int_enabled = false;
@@ -2157,6 +2261,9 @@ static void print_iommu_info(void)
if (check_feature(FEATURE_SNP))
pr_cont(" SNP");
+ if (check_feature2(FEATURE_SEVSNPIO_SUP))
+ pr_cont(" SEV-TIO");
+
pr_cont("\n");
}
@@ -2778,8 +2885,8 @@ static void early_enable_iommu(struct amd_iommu *iommu)
* This function finally enables all IOMMUs found in the system after
* they have been initialized.
*
- * Or if in kdump kernel and IOMMUs are all pre-enabled, try to copy
- * the old content of device table entries. Not this case or copy failed,
+ * Or if in kdump kernel and IOMMUs are all pre-enabled, try to reuse
+ * the old content of device table entries. Not this case or reuse failed,
* just continue as normal kernel does.
*/
static void early_enable_iommus(void)
@@ -2787,18 +2894,25 @@ static void early_enable_iommus(void)
struct amd_iommu *iommu;
struct amd_iommu_pci_seg *pci_seg;
- if (!copy_device_table()) {
+ if (!reuse_device_table()) {
/*
- * If come here because of failure in copying device table from old
+ * If come here because of failure in reusing device table from old
* kernel with all IOMMUs enabled, print error message and try to
* free allocated old_dev_tbl_cpy.
*/
- if (amd_iommu_pre_enabled)
- pr_err("Failed to copy DEV table from previous kernel.\n");
+ if (amd_iommu_pre_enabled) {
+ pr_err("Failed to reuse DEV table from previous kernel.\n");
+ /*
+ * Bail out early if unable to remap/reuse DEV table from
+ * previous kernel if SNP enabled as IOMMU commands will
+ * time out without DEV table and cause kdump boot panic.
+ */
+ BUG_ON(check_feature(FEATURE_SNP));
+ }
for_each_pci_segment(pci_seg) {
if (pci_seg->old_dev_tbl_cpy != NULL) {
- iommu_free_pages(pci_seg->old_dev_tbl_cpy);
+ memunmap((void *)pci_seg->old_dev_tbl_cpy);
pci_seg->old_dev_tbl_cpy = NULL;
}
}
@@ -2808,7 +2922,7 @@ static void early_enable_iommus(void)
early_enable_iommu(iommu);
}
} else {
- pr_info("Copied DEV table from previous kernel.\n");
+ pr_info("Reused DEV table from previous kernel.\n");
for_each_pci_segment(pci_seg) {
iommu_free_pages(pci_seg->dev_table);
@@ -2922,7 +3036,7 @@ static void disable_iommus(void)
* disable suspend until real resume implemented
*/
-static void amd_iommu_resume(void)
+static void amd_iommu_resume(void *data)
{
struct amd_iommu *iommu;
@@ -2936,7 +3050,7 @@ static void amd_iommu_resume(void)
amd_iommu_enable_interrupts();
}
-static int amd_iommu_suspend(void)
+static int amd_iommu_suspend(void *data)
{
/* disable IOMMUs to go out of the way for BIOS */
disable_iommus();
@@ -2944,11 +3058,15 @@ static int amd_iommu_suspend(void)
return 0;
}
-static struct syscore_ops amd_iommu_syscore_ops = {
+static const struct syscore_ops amd_iommu_syscore_ops = {
.suspend = amd_iommu_suspend,
.resume = amd_iommu_resume,
};
+static struct syscore amd_iommu_syscore = {
+ .ops = &amd_iommu_syscore_ops,
+};
+
static void __init free_iommu_resources(void)
{
free_iommu_all();
@@ -3067,7 +3185,8 @@ static int __init early_amd_iommu_init(void)
if (!boot_cpu_has(X86_FEATURE_CX16)) {
pr_err("Failed to initialize. The CMPXCHG16B feature is required.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/*
@@ -3292,7 +3411,7 @@ static int __init state_next(void)
init_state = IOMMU_ENABLED;
break;
case IOMMU_ENABLED:
- register_syscore_ops(&amd_iommu_syscore_ops);
+ register_syscore(&amd_iommu_syscore);
iommu_snp_enable();
ret = amd_iommu_init_pci();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
@@ -3395,12 +3514,12 @@ int __init amd_iommu_enable(void)
void amd_iommu_disable(void)
{
- amd_iommu_suspend();
+ amd_iommu_suspend(NULL);
}
int amd_iommu_reenable(int mode)
{
- amd_iommu_resume();
+ amd_iommu_resume(NULL);
return 0;
}
@@ -3912,4 +4031,10 @@ int amd_iommu_snp_disable(void)
return 0;
}
EXPORT_SYMBOL_GPL(amd_iommu_snp_disable);
+
+bool amd_iommu_sev_tio_supported(void)
+{
+ return check_feature2(FEATURE_SEVSNPIO_SUP);
+}
+EXPORT_SYMBOL_GPL(amd_iommu_sev_tio_supported);
#endif
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
deleted file mode 100644
index a91e71f981ef..000000000000
--- a/drivers/iommu/amd/io_pgtable.c
+++ /dev/null
@@ -1,560 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CPU-agnostic AMD IO page table allocator.
- *
- * Copyright (C) 2020 Advanced Micro Devices, Inc.
- * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
- */
-
-#define pr_fmt(fmt) "AMD-Vi: " fmt
-#define dev_fmt(fmt) pr_fmt(fmt)
-
-#include <linux/atomic.h>
-#include <linux/bitops.h>
-#include <linux/io-pgtable.h>
-#include <linux/kernel.h>
-#include <linux/sizes.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-
-#include <asm/barrier.h>
-
-#include "amd_iommu_types.h"
-#include "amd_iommu.h"
-#include "../iommu-pages.h"
-
-/*
- * Helper function to get the first pte of a large mapping
- */
-static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
- unsigned long *count)
-{
- unsigned long pte_mask, pg_size, cnt;
- u64 *fpte;
-
- pg_size = PTE_PAGE_SIZE(*pte);
- cnt = PAGE_SIZE_PTE_COUNT(pg_size);
- pte_mask = ~((cnt << 3) - 1);
- fpte = (u64 *)(((unsigned long)pte) & pte_mask);
-
- if (page_size)
- *page_size = pg_size;
-
- if (count)
- *count = cnt;
-
- return fpte;
-}
-
-static void free_pt_lvl(u64 *pt, struct iommu_pages_list *freelist, int lvl)
-{
- u64 *p;
- int i;
-
- for (i = 0; i < 512; ++i) {
- /* PTE present? */
- if (!IOMMU_PTE_PRESENT(pt[i]))
- continue;
-
- /* Large PTE? */
- if (PM_PTE_LEVEL(pt[i]) == 0 ||
- PM_PTE_LEVEL(pt[i]) == 7)
- continue;
-
- /*
- * Free the next level. No need to look at l1 tables here since
- * they can only contain leaf PTEs; just free them directly.
- */
- p = IOMMU_PTE_PAGE(pt[i]);
- if (lvl > 2)
- free_pt_lvl(p, freelist, lvl - 1);
- else
- iommu_pages_list_add(freelist, p);
- }
-
- iommu_pages_list_add(freelist, pt);
-}
-
-static void free_sub_pt(u64 *root, int mode, struct iommu_pages_list *freelist)
-{
- switch (mode) {
- case PAGE_MODE_NONE:
- case PAGE_MODE_7_LEVEL:
- break;
- case PAGE_MODE_1_LEVEL:
- iommu_pages_list_add(freelist, root);
- break;
- case PAGE_MODE_2_LEVEL:
- case PAGE_MODE_3_LEVEL:
- case PAGE_MODE_4_LEVEL:
- case PAGE_MODE_5_LEVEL:
- case PAGE_MODE_6_LEVEL:
- free_pt_lvl(root, freelist, mode);
- break;
- default:
- BUG();
- }
-}
-
-/*
- * This function is used to add another level to an IO page table. Adding
- * another level increases the size of the address space by 9 bits to a size up
- * to 64 bits.
- */
-static bool increase_address_space(struct amd_io_pgtable *pgtable,
- unsigned long address,
- unsigned int page_size_level,
- gfp_t gfp)
-{
- struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
- struct protection_domain *domain =
- container_of(pgtable, struct protection_domain, iop);
- unsigned long flags;
- bool ret = true;
- u64 *pte;
-
- pte = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp, SZ_4K);
- if (!pte)
- return false;
-
- spin_lock_irqsave(&domain->lock, flags);
-
- if (address <= PM_LEVEL_SIZE(pgtable->mode) &&
- pgtable->mode - 1 >= page_size_level)
- goto out;
-
- ret = false;
- if (WARN_ON_ONCE(pgtable->mode == amd_iommu_hpt_level))
- goto out;
-
- *pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
-
- pgtable->root = pte;
- pgtable->mode += 1;
- amd_iommu_update_and_flush_device_table(domain);
-
- pte = NULL;
- ret = true;
-
-out:
- spin_unlock_irqrestore(&domain->lock, flags);
- iommu_free_pages(pte);
-
- return ret;
-}
-
-static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
- unsigned long address,
- unsigned long page_size,
- u64 **pte_page,
- gfp_t gfp,
- bool *updated)
-{
- unsigned long last_addr = address + (page_size - 1);
- struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
- int level, end_lvl;
- u64 *pte, *page;
-
- BUG_ON(!is_power_of_2(page_size));
-
- while (last_addr > PM_LEVEL_SIZE(pgtable->mode) ||
- pgtable->mode - 1 < PAGE_SIZE_LEVEL(page_size)) {
- /*
- * Return an error if there is no memory to update the
- * page-table.
- */
- if (!increase_address_space(pgtable, last_addr,
- PAGE_SIZE_LEVEL(page_size), gfp))
- return NULL;
- }
-
-
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
- address = PAGE_SIZE_ALIGN(address, page_size);
- end_lvl = PAGE_SIZE_LEVEL(page_size);
-
- while (level > end_lvl) {
- u64 __pte, __npte;
- int pte_level;
-
- __pte = *pte;
- pte_level = PM_PTE_LEVEL(__pte);
-
- /*
- * If we replace a series of large PTEs, we need
- * to tear down all of them.
- */
- if (IOMMU_PTE_PRESENT(__pte) &&
- pte_level == PAGE_MODE_7_LEVEL) {
- unsigned long count, i;
- u64 *lpte;
-
- lpte = first_pte_l7(pte, NULL, &count);
-
- /*
- * Unmap the replicated PTEs that still match the
- * original large mapping
- */
- for (i = 0; i < count; ++i)
- cmpxchg64(&lpte[i], __pte, 0ULL);
-
- *updated = true;
- continue;
- }
-
- if (!IOMMU_PTE_PRESENT(__pte) ||
- pte_level == PAGE_MODE_NONE) {
- page = iommu_alloc_pages_node_sz(cfg->amd.nid, gfp,
- SZ_4K);
-
- if (!page)
- return NULL;
-
- __npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
-
- /* pte could have been changed somewhere. */
- if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_pages(page);
- else if (IOMMU_PTE_PRESENT(__pte))
- *updated = true;
-
- continue;
- }
-
- /* No level skipping support yet */
- if (pte_level != level)
- return NULL;
-
- level -= 1;
-
- pte = IOMMU_PTE_PAGE(__pte);
-
- if (pte_page && level == end_lvl)
- *pte_page = pte;
-
- pte = &pte[PM_LEVEL_INDEX(level, address)];
- }
-
- return pte;
-}
-
-/*
- * This function checks if there is a PTE for a given dma address. If
- * there is one, it returns the pointer to it.
- */
-static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
- unsigned long address,
- unsigned long *page_size)
-{
- int level;
- u64 *pte;
-
- *page_size = 0;
-
- if (address > PM_LEVEL_SIZE(pgtable->mode))
- return NULL;
-
- level = pgtable->mode - 1;
- pte = &pgtable->root[PM_LEVEL_INDEX(level, address)];
- *page_size = PTE_LEVEL_PAGE_SIZE(level);
-
- while (level > 0) {
-
- /* Not Present */
- if (!IOMMU_PTE_PRESENT(*pte))
- return NULL;
-
- /* Large PTE */
- if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL ||
- PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE)
- break;
-
- /* No level skipping support yet */
- if (PM_PTE_LEVEL(*pte) != level)
- return NULL;
-
- level -= 1;
-
- /* Walk to the next level */
- pte = IOMMU_PTE_PAGE(*pte);
- pte = &pte[PM_LEVEL_INDEX(level, address)];
- *page_size = PTE_LEVEL_PAGE_SIZE(level);
- }
-
- /*
- * If we have a series of large PTEs, make
- * sure to return a pointer to the first one.
- */
- if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
- pte = first_pte_l7(pte, page_size, NULL);
-
- return pte;
-}
-
-static void free_clear_pte(u64 *pte, u64 pteval,
- struct iommu_pages_list *freelist)
-{
- u64 *pt;
- int mode;
-
- while (!try_cmpxchg64(pte, &pteval, 0))
- pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
-
- if (!IOMMU_PTE_PRESENT(pteval))
- return;
-
- pt = IOMMU_PTE_PAGE(pteval);
- mode = IOMMU_PTE_MODE(pteval);
-
- free_sub_pt(pt, mode, freelist);
-}
-
-/*
- * Generic mapping functions. It maps a physical address into a DMA
- * address space. It allocates the page table pages if necessary.
- * In the future it can be extended to a generic mapping function
- * supporting all features of AMD IOMMU page tables like level skipping
- * and full 64 bit address spaces.
- */
-static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t pgsize, size_t pgcount,
- int prot, gfp_t gfp, size_t *mapped)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
- bool updated = false;
- u64 __pte, *pte;
- int ret, i, count;
- size_t size = pgcount << __ffs(pgsize);
- unsigned long o_iova = iova;
-
- BUG_ON(!IS_ALIGNED(iova, pgsize));
- BUG_ON(!IS_ALIGNED(paddr, pgsize));
-
- ret = -EINVAL;
- if (!(prot & IOMMU_PROT_MASK))
- goto out;
-
- while (pgcount > 0) {
- count = PAGE_SIZE_PTE_COUNT(pgsize);
- pte = alloc_pte(pgtable, iova, pgsize, NULL, gfp, &updated);
-
- ret = -ENOMEM;
- if (!pte)
- goto out;
-
- for (i = 0; i < count; ++i)
- free_clear_pte(&pte[i], pte[i], &freelist);
-
- if (!iommu_pages_list_empty(&freelist))
- updated = true;
-
- if (count > 1) {
- __pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
- __pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
- } else
- __pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
-
- if (prot & IOMMU_PROT_IR)
- __pte |= IOMMU_PTE_IR;
- if (prot & IOMMU_PROT_IW)
- __pte |= IOMMU_PTE_IW;
-
- for (i = 0; i < count; ++i)
- pte[i] = __pte;
-
- iova += pgsize;
- paddr += pgsize;
- pgcount--;
- if (mapped)
- *mapped += pgsize;
- }
-
- ret = 0;
-
-out:
- if (updated) {
- struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
- unsigned long flags;
-
- spin_lock_irqsave(&dom->lock, flags);
- /*
- * Flush domain TLB(s) and wait for completion. Any Device-Table
- * Updates and flushing already happened in
- * increase_address_space().
- */
- amd_iommu_domain_flush_pages(dom, o_iova, size);
- spin_unlock_irqrestore(&dom->lock, flags);
- }
-
- /* Everything flushed out, free pages now */
- iommu_put_pages_list(&freelist);
-
- return ret;
-}
-
-static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
- unsigned long iova,
- size_t pgsize, size_t pgcount,
- struct iommu_iotlb_gather *gather)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- unsigned long long unmapped;
- unsigned long unmap_size;
- u64 *pte;
- size_t size = pgcount << __ffs(pgsize);
-
- BUG_ON(!is_power_of_2(pgsize));
-
- unmapped = 0;
-
- while (unmapped < size) {
- pte = fetch_pte(pgtable, iova, &unmap_size);
- if (pte) {
- int i, count;
-
- count = PAGE_SIZE_PTE_COUNT(unmap_size);
- for (i = 0; i < count; i++)
- pte[i] = 0ULL;
- } else {
- return unmapped;
- }
-
- iova = (iova & ~(unmap_size - 1)) + unmap_size;
- unmapped += unmap_size;
- }
-
- return unmapped;
-}
-
-static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- unsigned long offset_mask, pte_pgsize;
- u64 *pte, __pte;
-
- pte = fetch_pte(pgtable, iova, &pte_pgsize);
-
- if (!pte || !IOMMU_PTE_PRESENT(*pte))
- return 0;
-
- offset_mask = pte_pgsize - 1;
- __pte = __sme_clr(*pte & PM_ADDR_MASK);
-
- return (__pte & ~offset_mask) | (iova & offset_mask);
-}
-
-static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size,
- unsigned long flags)
-{
- bool test_only = flags & IOMMU_DIRTY_NO_CLEAR;
- bool dirty = false;
- int i, count;
-
- /*
- * 2.2.3.2 Host Dirty Support
- * When a non-default page size is used , software must OR the
- * Dirty bits in all of the replicated host PTEs used to map
- * the page. The IOMMU does not guarantee the Dirty bits are
- * set in all of the replicated PTEs. Any portion of the page
- * may have been written even if the Dirty bit is set in only
- * one of the replicated PTEs.
- */
- count = PAGE_SIZE_PTE_COUNT(size);
- for (i = 0; i < count && test_only; i++) {
- if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) {
- dirty = true;
- break;
- }
- }
-
- for (i = 0; i < count && !test_only; i++) {
- if (test_and_clear_bit(IOMMU_PTE_HD_BIT,
- (unsigned long *)&ptep[i])) {
- dirty = true;
- }
- }
-
- return dirty;
-}
-
-static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
- unsigned long iova, size_t size,
- unsigned long flags,
- struct iommu_dirty_bitmap *dirty)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- unsigned long end = iova + size - 1;
-
- do {
- unsigned long pgsize = 0;
- u64 *ptep, pte;
-
- ptep = fetch_pte(pgtable, iova, &pgsize);
- if (ptep)
- pte = READ_ONCE(*ptep);
- if (!ptep || !IOMMU_PTE_PRESENT(pte)) {
- pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0);
- iova += pgsize;
- continue;
- }
-
- /*
- * Mark the whole IOVA range as dirty even if only one of
- * the replicated PTEs were marked dirty.
- */
- if (pte_test_and_clear_dirty(ptep, pgsize, flags))
- iommu_dirty_bitmap_record(dirty, iova, pgsize);
- iova += pgsize;
- } while (iova < end);
-
- return 0;
-}
-
-/*
- * ----------------------------------------------------
- */
-static void v1_free_pgtable(struct io_pgtable *iop)
-{
- struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
- struct iommu_pages_list freelist = IOMMU_PAGES_LIST_INIT(freelist);
-
- if (pgtable->mode == PAGE_MODE_NONE)
- return;
-
- /* Page-table is not visible to IOMMU anymore, so free it */
- BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
- pgtable->mode > amd_iommu_hpt_level);
-
- free_sub_pt(pgtable->root, pgtable->mode, &freelist);
- iommu_put_pages_list(&freelist);
-}
-
-static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
-
- pgtable->root =
- iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
- if (!pgtable->root)
- return NULL;
- pgtable->mode = PAGE_MODE_3_LEVEL;
-
- cfg->pgsize_bitmap = amd_iommu_pgsize_bitmap;
- cfg->ias = IOMMU_IN_ADDR_BIT_SIZE;
- cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
-
- pgtable->pgtbl.ops.map_pages = iommu_v1_map_pages;
- pgtable->pgtbl.ops.unmap_pages = iommu_v1_unmap_pages;
- pgtable->pgtbl.ops.iova_to_phys = iommu_v1_iova_to_phys;
- pgtable->pgtbl.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
-
- return &pgtable->pgtbl;
-}
-
-struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
- .alloc = v1_alloc_pgtable,
- .free = v1_free_pgtable,
-};
diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c
deleted file mode 100644
index b47941353ccb..000000000000
--- a/drivers/iommu/amd/io_pgtable_v2.c
+++ /dev/null
@@ -1,370 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * CPU-agnostic AMD IO page table v2 allocator.
- *
- * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
- * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
- * Author: Vasant Hegde <vasant.hegde@amd.com>
- */
-
-#define pr_fmt(fmt) "AMD-Vi: " fmt
-#define dev_fmt(fmt) pr_fmt(fmt)
-
-#include <linux/bitops.h>
-#include <linux/io-pgtable.h>
-#include <linux/kernel.h>
-
-#include <asm/barrier.h>
-
-#include "amd_iommu_types.h"
-#include "amd_iommu.h"
-#include "../iommu-pages.h"
-
-#define IOMMU_PAGE_PRESENT BIT_ULL(0) /* Is present */
-#define IOMMU_PAGE_RW BIT_ULL(1) /* Writeable */
-#define IOMMU_PAGE_USER BIT_ULL(2) /* Userspace addressable */
-#define IOMMU_PAGE_PWT BIT_ULL(3) /* Page write through */
-#define IOMMU_PAGE_PCD BIT_ULL(4) /* Page cache disabled */
-#define IOMMU_PAGE_ACCESS BIT_ULL(5) /* Was accessed (updated by IOMMU) */
-#define IOMMU_PAGE_DIRTY BIT_ULL(6) /* Was written to (updated by IOMMU) */
-#define IOMMU_PAGE_PSE BIT_ULL(7) /* Page Size Extensions */
-#define IOMMU_PAGE_NX BIT_ULL(63) /* No execute */
-
-#define MAX_PTRS_PER_PAGE 512
-
-#define IOMMU_PAGE_SIZE_2M BIT_ULL(21)
-#define IOMMU_PAGE_SIZE_1G BIT_ULL(30)
-
-
-static inline int get_pgtable_level(void)
-{
- return amd_iommu_gpt_level;
-}
-
-static inline bool is_large_pte(u64 pte)
-{
- return (pte & IOMMU_PAGE_PSE);
-}
-
-static inline u64 set_pgtable_attr(u64 *page)
-{
- u64 prot;
-
- prot = IOMMU_PAGE_PRESENT | IOMMU_PAGE_RW | IOMMU_PAGE_USER;
- prot |= IOMMU_PAGE_ACCESS;
-
- return (iommu_virt_to_phys(page) | prot);
-}
-
-static inline void *get_pgtable_pte(u64 pte)
-{
- return iommu_phys_to_virt(pte & PM_ADDR_MASK);
-}
-
-static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
-{
- u64 pte;
-
- pte = __sme_set(paddr & PM_ADDR_MASK);
- pte |= IOMMU_PAGE_PRESENT | IOMMU_PAGE_USER;
- pte |= IOMMU_PAGE_ACCESS | IOMMU_PAGE_DIRTY;
-
- if (prot & IOMMU_PROT_IW)
- pte |= IOMMU_PAGE_RW;
-
- /* Large page */
- if (pg_size == IOMMU_PAGE_SIZE_1G || pg_size == IOMMU_PAGE_SIZE_2M)
- pte |= IOMMU_PAGE_PSE;
-
- return pte;
-}
-
-static inline u64 get_alloc_page_size(u64 size)
-{
- if (size >= IOMMU_PAGE_SIZE_1G)
- return IOMMU_PAGE_SIZE_1G;
-
- if (size >= IOMMU_PAGE_SIZE_2M)
- return IOMMU_PAGE_SIZE_2M;
-
- return PAGE_SIZE;
-}
-
-static inline int page_size_to_level(u64 pg_size)
-{
- if (pg_size == IOMMU_PAGE_SIZE_1G)
- return PAGE_MODE_3_LEVEL;
- if (pg_size == IOMMU_PAGE_SIZE_2M)
- return PAGE_MODE_2_LEVEL;
-
- return PAGE_MODE_1_LEVEL;
-}
-
-static void free_pgtable(u64 *pt, int level)
-{
- u64 *p;
- int i;
-
- for (i = 0; i < MAX_PTRS_PER_PAGE; i++) {
- /* PTE present? */
- if (!IOMMU_PTE_PRESENT(pt[i]))
- continue;
-
- if (is_large_pte(pt[i]))
- continue;
-
- /*
- * Free the next level. No need to look at l1 tables here since
- * they can only contain leaf PTEs; just free them directly.
- */
- p = get_pgtable_pte(pt[i]);
- if (level > 2)
- free_pgtable(p, level - 1);
- else
- iommu_free_pages(p);
- }
-
- iommu_free_pages(pt);
-}
-
-/* Allocate page table */
-static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
- unsigned long pg_size, gfp_t gfp, bool *updated)
-{
- u64 *pte, *page;
- int level, end_level;
-
- level = get_pgtable_level() - 1;
- end_level = page_size_to_level(pg_size);
- pte = &pgd[PM_LEVEL_INDEX(level, iova)];
- iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
-
- while (level >= end_level) {
- u64 __pte, __npte;
-
- __pte = *pte;
-
- if (IOMMU_PTE_PRESENT(__pte) && is_large_pte(__pte)) {
- /* Unmap large pte */
- cmpxchg64(pte, *pte, 0ULL);
- *updated = true;
- continue;
- }
-
- if (!IOMMU_PTE_PRESENT(__pte)) {
- page = iommu_alloc_pages_node_sz(nid, gfp, SZ_4K);
- if (!page)
- return NULL;
-
- __npte = set_pgtable_attr(page);
- /* pte could have been changed somewhere. */
- if (!try_cmpxchg64(pte, &__pte, __npte))
- iommu_free_pages(page);
- else if (IOMMU_PTE_PRESENT(__pte))
- *updated = true;
-
- continue;
- }
-
- level -= 1;
- pte = get_pgtable_pte(__pte);
- pte = &pte[PM_LEVEL_INDEX(level, iova)];
- }
-
- /* Tear down existing pte entries */
- if (IOMMU_PTE_PRESENT(*pte)) {
- u64 *__pte;
-
- *updated = true;
- __pte = get_pgtable_pte(*pte);
- cmpxchg64(pte, *pte, 0ULL);
- if (pg_size == IOMMU_PAGE_SIZE_1G)
- free_pgtable(__pte, end_level - 1);
- else if (pg_size == IOMMU_PAGE_SIZE_2M)
- iommu_free_pages(__pte);
- }
-
- return pte;
-}
-
-/*
- * This function checks if there is a PTE for a given dma address.
- * If there is one, it returns the pointer to it.
- */
-static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
- unsigned long iova, unsigned long *page_size)
-{
- u64 *pte;
- int level;
-
- level = get_pgtable_level() - 1;
- pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
- /* Default page size is 4K */
- *page_size = PAGE_SIZE;
-
- while (level) {
- /* Not present */
- if (!IOMMU_PTE_PRESENT(*pte))
- return NULL;
-
- /* Walk to the next level */
- pte = get_pgtable_pte(*pte);
- pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
-
- /* Large page */
- if (is_large_pte(*pte)) {
- if (level == PAGE_MODE_3_LEVEL)
- *page_size = IOMMU_PAGE_SIZE_1G;
- else if (level == PAGE_MODE_2_LEVEL)
- *page_size = IOMMU_PAGE_SIZE_2M;
- else
- return NULL; /* Wrongly set PSE bit in PTE */
-
- break;
- }
-
- level -= 1;
- }
-
- return pte;
-}
-
-static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t pgsize, size_t pgcount,
- int prot, gfp_t gfp, size_t *mapped)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
- u64 *pte;
- unsigned long map_size;
- unsigned long mapped_size = 0;
- unsigned long o_iova = iova;
- size_t size = pgcount << __ffs(pgsize);
- int ret = 0;
- bool updated = false;
-
- if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize) || !pgcount)
- return -EINVAL;
-
- if (!(prot & IOMMU_PROT_MASK))
- return -EINVAL;
-
- while (mapped_size < size) {
- map_size = get_alloc_page_size(pgsize);
- pte = v2_alloc_pte(cfg->amd.nid, pgtable->pgd,
- iova, map_size, gfp, &updated);
- if (!pte) {
- ret = -ENOMEM;
- goto out;
- }
-
- *pte = set_pte_attr(paddr, map_size, prot);
-
- iova += map_size;
- paddr += map_size;
- mapped_size += map_size;
- }
-
-out:
- if (updated) {
- struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
- unsigned long flags;
-
- spin_lock_irqsave(&pdom->lock, flags);
- amd_iommu_domain_flush_pages(pdom, o_iova, size);
- spin_unlock_irqrestore(&pdom->lock, flags);
- }
-
- if (mapped)
- *mapped += mapped_size;
-
- return ret;
-}
-
-static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
- unsigned long iova,
- size_t pgsize, size_t pgcount,
- struct iommu_iotlb_gather *gather)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
- unsigned long unmap_size;
- unsigned long unmapped = 0;
- size_t size = pgcount << __ffs(pgsize);
- u64 *pte;
-
- if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
- return 0;
-
- while (unmapped < size) {
- pte = fetch_pte(pgtable, iova, &unmap_size);
- if (!pte)
- return unmapped;
-
- *pte = 0ULL;
-
- iova = (iova & ~(unmap_size - 1)) + unmap_size;
- unmapped += unmap_size;
- }
-
- return unmapped;
-}
-
-static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
- unsigned long offset_mask, pte_pgsize;
- u64 *pte, __pte;
-
- pte = fetch_pte(pgtable, iova, &pte_pgsize);
- if (!pte || !IOMMU_PTE_PRESENT(*pte))
- return 0;
-
- offset_mask = pte_pgsize - 1;
- __pte = __sme_clr(*pte & PM_ADDR_MASK);
-
- return (__pte & ~offset_mask) | (iova & offset_mask);
-}
-
-/*
- * ----------------------------------------------------
- */
-static void v2_free_pgtable(struct io_pgtable *iop)
-{
- struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
-
- if (!pgtable || !pgtable->pgd)
- return;
-
- /* Free page table */
- free_pgtable(pgtable->pgd, get_pgtable_level());
- pgtable->pgd = NULL;
-}
-
-static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
-{
- struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
- int ias = IOMMU_IN_ADDR_BIT_SIZE;
-
- pgtable->pgd = iommu_alloc_pages_node_sz(cfg->amd.nid, GFP_KERNEL, SZ_4K);
- if (!pgtable->pgd)
- return NULL;
-
- if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
- ias = 57;
-
- pgtable->pgtbl.ops.map_pages = iommu_v2_map_pages;
- pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages;
- pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys;
-
- cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
- cfg->ias = ias;
- cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
-
- return &pgtable->pgtbl;
-}
-
-struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
- .alloc = v2_alloc_pgtable,
- .free = v2_free_pgtable,
-};
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index eb348c63a8d0..9f1d56a5e145 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -14,6 +14,7 @@
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/debugfs.h>
#include <linux/scatterlist.h>
#include <linux/dma-map-ops.h>
@@ -29,7 +30,6 @@
#include <linux/msi.h>
#include <linux/irqdomain.h>
#include <linux/percpu.h>
-#include <linux/io-pgtable.h>
#include <linux/cc_platform.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
@@ -40,9 +40,9 @@
#include <asm/gart.h>
#include <asm/dma.h>
#include <uapi/linux/iommufd.h>
+#include <linux/generic_pt/iommu.h>
#include "amd_iommu.h"
-#include "../dma-iommu.h"
#include "../irq_remapping.h"
#include "../iommu-pages.h"
@@ -59,7 +59,6 @@ LIST_HEAD(hpet_map);
LIST_HEAD(acpihid_map);
const struct iommu_ops amd_iommu_ops;
-static const struct iommu_dirty_ops amd_dirty_ops;
int amd_iommu_max_glx_val = -1;
@@ -69,15 +68,22 @@ int amd_iommu_max_glx_val = -1;
*/
DEFINE_IDA(pdom_ids);
-static int amd_iommu_attach_device(struct iommu_domain *dom,
- struct device *dev);
+static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev,
+ struct iommu_domain *old);
static void set_dte_entry(struct amd_iommu *iommu,
- struct iommu_dev_data *dev_data);
+ struct iommu_dev_data *dev_data,
+ phys_addr_t top_paddr, unsigned int top_level);
+
+static void amd_iommu_change_top(struct pt_iommu *iommu_table,
+ phys_addr_t top_paddr, unsigned int top_level);
static void iommu_flush_dte_sync(struct amd_iommu *iommu, u16 devid);
static struct iommu_dev_data *find_dev_data(struct amd_iommu *iommu, u16 devid);
+static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain);
+static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
+ bool enable);
/****************************************************************************
*
@@ -265,7 +271,7 @@ static inline int get_acpihid_device_id(struct device *dev,
return -EINVAL;
if (fw_bug)
dev_err_once(dev, FW_BUG "No ACPI device matched UID, but %d device%s matched HID.\n",
- hid_count, hid_count > 1 ? "s" : "");
+ hid_count, str_plural(hid_count));
if (hid_count > 1)
return -EINVAL;
if (entry)
@@ -1156,6 +1162,25 @@ irqreturn_t amd_iommu_int_handler(int irq, void *data)
*
****************************************************************************/
+static void dump_command_buffer(struct amd_iommu *iommu)
+{
+ struct iommu_cmd *cmd;
+ u32 head, tail;
+ int i;
+
+ head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
+
+ pr_err("CMD Buffer head=%llu tail=%llu\n", MMIO_CMD_BUFFER_HEAD(head),
+ MMIO_CMD_BUFFER_TAIL(tail));
+
+ for (i = 0; i < CMD_BUFFER_ENTRIES; i++) {
+ cmd = (struct iommu_cmd *)(iommu->cmd_buf + i * sizeof(*cmd));
+ pr_err("%3d: %08x %08x %08x %08x\n", i, cmd->data[0], cmd->data[1], cmd->data[2],
+ cmd->data[3]);
+ }
+}
+
static int wait_on_sem(struct amd_iommu *iommu, u64 data)
{
int i = 0;
@@ -1166,7 +1191,14 @@ static int wait_on_sem(struct amd_iommu *iommu, u64 data)
}
if (i == LOOP_TIMEOUT) {
- pr_alert("Completion-Wait loop timed out\n");
+
+ pr_alert("IOMMU %04x:%02x:%02x.%01x: Completion-Wait loop timed out\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(iommu->devid),
+ PCI_SLOT(iommu->devid), PCI_FUNC(iommu->devid));
+
+ if (amd_iommu_dump)
+ DO_ONCE_LITE(dump_command_buffer, iommu);
+
return -EIO;
}
@@ -1195,7 +1227,7 @@ static void build_completion_wait(struct iommu_cmd *cmd,
struct amd_iommu *iommu,
u64 data)
{
- u64 paddr = iommu_virt_to_phys((void *)iommu->cmd_sem);
+ u64 paddr = iommu->cmd_sem_paddr;
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
@@ -1755,42 +1787,6 @@ static void dev_flush_pasid_all(struct iommu_dev_data *dev_data,
CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
}
-/* Flush the not present cache if it exists */
-static void domain_flush_np_cache(struct protection_domain *domain,
- dma_addr_t iova, size_t size)
-{
- if (unlikely(amd_iommu_np_cache)) {
- unsigned long flags;
-
- spin_lock_irqsave(&domain->lock, flags);
- amd_iommu_domain_flush_pages(domain, iova, size);
- spin_unlock_irqrestore(&domain->lock, flags);
- }
-}
-
-
-/*
- * This function flushes the DTEs for all devices in domain
- */
-void amd_iommu_update_and_flush_device_table(struct protection_domain *domain)
-{
- struct iommu_dev_data *dev_data;
-
- lockdep_assert_held(&domain->lock);
-
- list_for_each_entry(dev_data, &domain->dev_list, list) {
- struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
-
- set_dte_entry(iommu, dev_data);
- clone_aliases(iommu, dev_data->dev);
- }
-
- list_for_each_entry(dev_data, &domain->dev_list, list)
- device_flush_dte(dev_data);
-
- domain_flush_complete(domain);
-}
-
int amd_iommu_complete_ppr(struct device *dev, u32 pasid, int status, int tag)
{
struct iommu_dev_data *dev_data;
@@ -2050,7 +2046,8 @@ static void set_dte_gcr3_table(struct amd_iommu *iommu,
}
static void set_dte_entry(struct amd_iommu *iommu,
- struct iommu_dev_data *dev_data)
+ struct iommu_dev_data *dev_data,
+ phys_addr_t top_paddr, unsigned int top_level)
{
u16 domid;
u32 old_domid;
@@ -2059,19 +2056,36 @@ static void set_dte_entry(struct amd_iommu *iommu,
struct protection_domain *domain = dev_data->domain;
struct gcr3_tbl_info *gcr3_info = &dev_data->gcr3_info;
struct dev_table_entry *dte = &get_dev_table(iommu)[dev_data->devid];
+ struct pt_iommu_amdv1_hw_info pt_info;
+
+ make_clear_dte(dev_data, dte, &new);
if (gcr3_info && gcr3_info->gcr3_tbl)
domid = dev_data->gcr3_info.domid;
- else
+ else {
domid = domain->id;
- make_clear_dte(dev_data, dte, &new);
-
- if (domain->iop.mode != PAGE_MODE_NONE)
- new.data[0] |= iommu_virt_to_phys(domain->iop.root);
+ if (domain->domain.type & __IOMMU_DOMAIN_PAGING) {
+ /*
+ * When updating the IO pagetable, the new top and level
+ * are provided as parameters. For other operations i.e.
+ * device attach, retrieve the current pagetable info
+ * via the IOMMU PT API.
+ */
+ if (top_paddr) {
+ pt_info.host_pt_root = top_paddr;
+ pt_info.mode = top_level + 1;
+ } else {
+ WARN_ON(top_paddr || top_level);
+ pt_iommu_amdv1_hw_info(&domain->amdv1,
+ &pt_info);
+ }
- new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
- << DEV_ENTRY_MODE_SHIFT;
+ new.data[0] |= __sme_set(pt_info.host_pt_root) |
+ (pt_info.mode & DEV_ENTRY_MODE_MASK)
+ << DEV_ENTRY_MODE_SHIFT;
+ }
+ }
new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW;
@@ -2137,7 +2151,7 @@ static void dev_update_dte(struct iommu_dev_data *dev_data, bool set)
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev_data->dev);
if (set)
- set_dte_entry(iommu, dev_data);
+ set_dte_entry(iommu, dev_data, 0, 0);
else
clear_dte_entry(iommu, dev_data);
@@ -2155,6 +2169,7 @@ static int init_gcr3_table(struct iommu_dev_data *dev_data,
{
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
int max_pasids = dev_data->max_pasids;
+ struct pt_iommu_x86_64_hw_info pt_info;
int ret = 0;
/*
@@ -2177,7 +2192,8 @@ static int init_gcr3_table(struct iommu_dev_data *dev_data,
if (!pdom_is_v2_pgtbl_mode(pdom))
return ret;
- ret = update_gcr3(dev_data, 0, iommu_virt_to_phys(pdom->iop.pgd), true);
+ pt_iommu_x86_64_hw_info(&pdom->amdv2, &pt_info);
+ ret = update_gcr3(dev_data, 0, __sme_set(pt_info.gcr3_pt), true);
if (ret)
free_gcr3_table(&dev_data->gcr3_info);
@@ -2499,94 +2515,240 @@ struct protection_domain *protection_domain_alloc(void)
return domain;
}
-static int pdom_setup_pgtable(struct protection_domain *domain,
- struct device *dev)
+static bool amd_iommu_hd_support(struct amd_iommu *iommu)
+{
+ if (amd_iommu_hatdis)
+ return false;
+
+ return iommu && (iommu->features & FEATURE_HDSUP);
+}
+
+static spinlock_t *amd_iommu_get_top_lock(struct pt_iommu *iommupt)
{
- struct io_pgtable_ops *pgtbl_ops;
- enum io_pgtable_fmt fmt;
+ struct protection_domain *pdom =
+ container_of(iommupt, struct protection_domain, iommu);
- switch (domain->pd_mode) {
- case PD_MODE_V1:
- fmt = AMD_IOMMU_V1;
- break;
- case PD_MODE_V2:
- fmt = AMD_IOMMU_V2;
- break;
- case PD_MODE_NONE:
- WARN_ON_ONCE(1);
- return -EPERM;
+ return &pdom->lock;
+}
+
+/*
+ * Update all HW references to the domain with a new pgtable configuration.
+ */
+static void amd_iommu_change_top(struct pt_iommu *iommu_table,
+ phys_addr_t top_paddr, unsigned int top_level)
+{
+ struct protection_domain *pdom =
+ container_of(iommu_table, struct protection_domain, iommu);
+ struct iommu_dev_data *dev_data;
+
+ lockdep_assert_held(&pdom->lock);
+
+ /* Update the DTE for all devices attached to this domain */
+ list_for_each_entry(dev_data, &pdom->dev_list, list) {
+ struct amd_iommu *iommu = rlookup_amd_iommu(dev_data->dev);
+
+ /* Update the HW references with the new level and top ptr */
+ set_dte_entry(iommu, dev_data, top_paddr, top_level);
+ clone_aliases(iommu, dev_data->dev);
}
- domain->iop.pgtbl.cfg.amd.nid = dev_to_node(dev);
- pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain);
- if (!pgtbl_ops)
- return -ENOMEM;
+ list_for_each_entry(dev_data, &pdom->dev_list, list)
+ device_flush_dte(dev_data);
+
+ domain_flush_complete(pdom);
+}
+
+/*
+ * amd_iommu_iotlb_sync_map() is used to generate flushes for non-present to
+ * present (ie mapping) operations. It is a NOP if the IOMMU doesn't have non
+ * present caching (like hypervisor shadowing).
+ */
+static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+ unsigned long iova, size_t size)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ unsigned long flags;
+ if (likely(!amd_iommu_np_cache))
+ return 0;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ amd_iommu_domain_flush_pages(domain, iova, size);
+ spin_unlock_irqrestore(&domain->lock, flags);
return 0;
}
-static inline u64 dma_max_address(enum protection_domain_mode pgtable)
+static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
{
- if (pgtable == PD_MODE_V1)
- return PM_LEVEL_SIZE(amd_iommu_hpt_level);
+ struct protection_domain *dom = to_pdomain(domain);
+ unsigned long flags;
- /*
- * V2 with 4/5 level page table. Note that "2.2.6.5 AMD64 4-Kbyte Page
- * Translation" shows that the V2 table sign extends the top of the
- * address space creating a reserved region in the middle of the
- * translation, just like the CPU does. Further Vasant says the docs are
- * incomplete and this only applies to non-zero PASIDs. If the AMDv2
- * page table is assigned to the 0 PASID then there is no sign extension
- * check.
- *
- * Since the IOMMU must have a fixed geometry, and the core code does
- * not understand sign extended addressing, we have to chop off the high
- * bit to get consistent behavior with attachments of the domain to any
- * PASID.
- */
- return ((1ULL << (PM_LEVEL_SHIFT(amd_iommu_gpt_level) - 1)) - 1);
+ spin_lock_irqsave(&dom->lock, flags);
+ amd_iommu_domain_flush_all(dom);
+ spin_unlock_irqrestore(&dom->lock, flags);
}
-static bool amd_iommu_hd_support(struct amd_iommu *iommu)
+static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
- if (amd_iommu_hatdis)
- return false;
+ struct protection_domain *dom = to_pdomain(domain);
+ unsigned long flags;
- return iommu && (iommu->features & FEATURE_HDSUP);
+ spin_lock_irqsave(&dom->lock, flags);
+ amd_iommu_domain_flush_pages(dom, gather->start,
+ gather->end - gather->start + 1);
+ spin_unlock_irqrestore(&dom->lock, flags);
+ iommu_put_pages_list(&gather->freelist);
}
-static struct iommu_domain *
-do_iommu_domain_alloc(struct device *dev, u32 flags,
- enum protection_domain_mode pgtable)
+static const struct pt_iommu_driver_ops amd_hw_driver_ops_v1 = {
+ .get_top_lock = amd_iommu_get_top_lock,
+ .change_top = amd_iommu_change_top,
+};
+
+static const struct iommu_domain_ops amdv1_ops = {
+ IOMMU_PT_DOMAIN_OPS(amdv1),
+ .iotlb_sync_map = amd_iommu_iotlb_sync_map,
+ .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+ .iotlb_sync = amd_iommu_iotlb_sync,
+ .attach_dev = amd_iommu_attach_device,
+ .free = amd_iommu_domain_free,
+ .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
+};
+
+static const struct iommu_dirty_ops amdv1_dirty_ops = {
+ IOMMU_PT_DIRTY_OPS(amdv1),
+ .set_dirty_tracking = amd_iommu_set_dirty_tracking,
+};
+
+static struct iommu_domain *amd_iommu_domain_alloc_paging_v1(struct device *dev,
+ u32 flags)
{
- bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
- struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
+ struct pt_iommu_amdv1_cfg cfg = {};
struct protection_domain *domain;
int ret;
+ if (amd_iommu_hatdis)
+ return ERR_PTR(-EOPNOTSUPP);
+
domain = protection_domain_alloc();
if (!domain)
return ERR_PTR(-ENOMEM);
- domain->pd_mode = pgtable;
- ret = pdom_setup_pgtable(domain, dev);
+ domain->pd_mode = PD_MODE_V1;
+ domain->iommu.driver_ops = &amd_hw_driver_ops_v1;
+ domain->iommu.nid = dev_to_node(dev);
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+ domain->domain.dirty_ops = &amdv1_dirty_ops;
+
+ /*
+ * Someday FORCE_COHERENCE should be set by
+ * amd_iommu_enforce_cache_coherency() like VT-d does.
+ */
+ cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
+ BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
+ BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
+
+ /*
+ * AMD's IOMMU can flush as many pages as necessary in a single flush.
+ * Unless we run in a virtual machine, which can be inferred according
+ * to whether "non-present cache" is on, it is probably best to prefer
+ * (potentially) too extensive TLB flushing (i.e., more misses) over
+ * multiple TLB flushes (i.e., more flushes). For virtual machines the
+ * hypervisor needs to synchronize the host IOMMU PTEs with those of
+ * the guest, and the trade-off is different: unnecessary TLB flushes
+ * should be avoided.
+ */
+ if (amd_iommu_np_cache)
+ cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS);
+ else
+ cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE);
+
+ cfg.common.hw_max_vasz_lg2 =
+ min(64, (amd_iommu_hpt_level - 1) * 9 + 21);
+ cfg.common.hw_max_oasz_lg2 = 52;
+ cfg.starting_level = 2;
+ domain->domain.ops = &amdv1_ops;
+
+ ret = pt_iommu_amdv1_init(&domain->amdv1, &cfg, GFP_KERNEL);
if (ret) {
- pdom_id_free(domain->id);
- kfree(domain);
+ amd_iommu_domain_free(&domain->domain);
return ERR_PTR(ret);
}
- domain->domain.geometry.aperture_start = 0;
- domain->domain.geometry.aperture_end = dma_max_address(pgtable);
- domain->domain.geometry.force_aperture = true;
- domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
+ /*
+ * Narrow the supported page sizes to those selected by the kernel
+ * command line.
+ */
+ domain->domain.pgsize_bitmap &= amd_iommu_pgsize_bitmap;
+ return &domain->domain;
+}
- domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
- domain->domain.ops = iommu->iommu.ops->default_domain_ops;
+static const struct iommu_domain_ops amdv2_ops = {
+ IOMMU_PT_DOMAIN_OPS(x86_64),
+ .iotlb_sync_map = amd_iommu_iotlb_sync_map,
+ .flush_iotlb_all = amd_iommu_flush_iotlb_all,
+ .iotlb_sync = amd_iommu_iotlb_sync,
+ .attach_dev = amd_iommu_attach_device,
+ .free = amd_iommu_domain_free,
+ /*
+ * Note the AMDv2 page table format does not support a Force Coherency
+ * bit, so enforce_cache_coherency should not be set. However VFIO is
+ * not prepared to handle a case where some domains will support
+ * enforcement and others do not. VFIO and iommufd will have to be fixed
+ * before it can fully use the V2 page table. See the comment in
+ * iommufd_hwpt_paging_alloc(). For now leave things as they have
+ * historically been and lie about enforce_cache_coherencey.
+ */
+ .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
+};
- if (dirty_tracking)
- domain->domain.dirty_ops = &amd_dirty_ops;
+static struct iommu_domain *amd_iommu_domain_alloc_paging_v2(struct device *dev,
+ u32 flags)
+{
+ struct pt_iommu_x86_64_cfg cfg = {};
+ struct protection_domain *domain;
+ int ret;
+ if (!amd_iommu_v2_pgtbl_supported())
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = protection_domain_alloc();
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->pd_mode = PD_MODE_V2;
+ domain->iommu.nid = dev_to_node(dev);
+
+ cfg.common.features = BIT(PT_FEAT_X86_64_AMD_ENCRYPT_TABLES);
+ if (amd_iommu_np_cache)
+ cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS);
+ else
+ cfg.common.features |= BIT(PT_FEAT_FLUSH_RANGE);
+
+ /*
+ * The v2 table behaves differently if it is attached to PASID 0 vs a
+ * non-zero PASID. On PASID 0 it has no sign extension and the full
+ * 57/48 bits decode the lower addresses. Otherwise it behaves like a
+ * normal sign extended x86 page table. Since we want the domain to work
+ * in both modes the top bit is removed and PT_FEAT_SIGN_EXTEND is not
+ * set which creates a table that is compatible in both modes.
+ */
+ if (amd_iommu_gpt_level == PAGE_MODE_5_LEVEL) {
+ cfg.common.hw_max_vasz_lg2 = 56;
+ cfg.top_level = 4;
+ } else {
+ cfg.common.hw_max_vasz_lg2 = 47;
+ cfg.top_level = 3;
+ }
+ cfg.common.hw_max_oasz_lg2 = 52;
+ domain->domain.ops = &amdv2_ops;
+
+ ret = pt_iommu_x86_64_init(&domain->amdv2, &cfg, GFP_KERNEL);
+ if (ret) {
+ amd_iommu_domain_free(&domain->domain);
+ return ERR_PTR(ret);
+ }
return &domain->domain;
}
@@ -2607,15 +2769,27 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
/* Allocate domain with v1 page table for dirty tracking */
if (!amd_iommu_hd_support(iommu))
break;
- return do_iommu_domain_alloc(dev, flags, PD_MODE_V1);
+ return amd_iommu_domain_alloc_paging_v1(dev, flags);
case IOMMU_HWPT_ALLOC_PASID:
/* Allocate domain with v2 page table if IOMMU supports PASID. */
if (!amd_iommu_pasid_supported())
break;
- return do_iommu_domain_alloc(dev, flags, PD_MODE_V2);
- case 0:
+ return amd_iommu_domain_alloc_paging_v2(dev, flags);
+ case 0: {
+ struct iommu_domain *ret;
+
/* If nothing specific is required use the kernel commandline default */
- return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
+ if (amd_iommu_pgtable == PD_MODE_V1) {
+ ret = amd_iommu_domain_alloc_paging_v1(dev, flags);
+ if (ret != ERR_PTR(-EOPNOTSUPP))
+ return ret;
+ return amd_iommu_domain_alloc_paging_v2(dev, flags);
+ }
+ ret = amd_iommu_domain_alloc_paging_v2(dev, flags);
+ if (ret != ERR_PTR(-EOPNOTSUPP))
+ return ret;
+ return amd_iommu_domain_alloc_paging_v1(dev, flags);
+ }
default:
break;
}
@@ -2627,14 +2801,14 @@ void amd_iommu_domain_free(struct iommu_domain *dom)
struct protection_domain *domain = to_pdomain(dom);
WARN_ON(!list_empty(&domain->dev_list));
- if (domain->domain.type & __IOMMU_DOMAIN_PAGING)
- free_io_pgtable_ops(&domain->iop.pgtbl.ops);
+ pt_iommu_deinit(&domain->iommu);
pdom_id_free(domain->id);
kfree(domain);
}
static int blocked_domain_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
@@ -2684,16 +2858,8 @@ void amd_iommu_init_identity_domain(void)
protection_domain_init(&identity_domain);
}
-/* Same as blocked domain except it supports only ops->attach_dev() */
-static struct iommu_domain release_domain = {
- .type = IOMMU_DOMAIN_BLOCKED,
- .ops = &(const struct iommu_domain_ops) {
- .attach_dev = blocked_domain_attach_device,
- }
-};
-
-static int amd_iommu_attach_device(struct iommu_domain *dom,
- struct device *dev)
+static int amd_iommu_attach_device(struct iommu_domain *dom, struct device *dev,
+ struct iommu_domain *old)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = to_pdomain(dom);
@@ -2733,93 +2899,6 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return ret;
}
-static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
- unsigned long iova, size_t size)
-{
- struct protection_domain *domain = to_pdomain(dom);
- struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
-
- if (ops->map_pages)
- domain_flush_np_cache(domain, iova, size);
- return 0;
-}
-
-static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, size_t pgsize, size_t pgcount,
- int iommu_prot, gfp_t gfp, size_t *mapped)
-{
- struct protection_domain *domain = to_pdomain(dom);
- struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
- int prot = 0;
- int ret = -EINVAL;
-
- if ((domain->pd_mode == PD_MODE_V1) &&
- (domain->iop.mode == PAGE_MODE_NONE))
- return -EINVAL;
-
- if (iommu_prot & IOMMU_READ)
- prot |= IOMMU_PROT_IR;
- if (iommu_prot & IOMMU_WRITE)
- prot |= IOMMU_PROT_IW;
-
- if (ops->map_pages) {
- ret = ops->map_pages(ops, iova, paddr, pgsize,
- pgcount, prot, gfp, mapped);
- }
-
- return ret;
-}
-
-static void amd_iommu_iotlb_gather_add_page(struct iommu_domain *domain,
- struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t size)
-{
- /*
- * AMD's IOMMU can flush as many pages as necessary in a single flush.
- * Unless we run in a virtual machine, which can be inferred according
- * to whether "non-present cache" is on, it is probably best to prefer
- * (potentially) too extensive TLB flushing (i.e., more misses) over
- * mutliple TLB flushes (i.e., more flushes). For virtual machines the
- * hypervisor needs to synchronize the host IOMMU PTEs with those of
- * the guest, and the trade-off is different: unnecessary TLB flushes
- * should be avoided.
- */
- if (amd_iommu_np_cache &&
- iommu_iotlb_gather_is_disjoint(gather, iova, size))
- iommu_iotlb_sync(domain, gather);
-
- iommu_iotlb_gather_add_range(gather, iova, size);
-}
-
-static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova,
- size_t pgsize, size_t pgcount,
- struct iommu_iotlb_gather *gather)
-{
- struct protection_domain *domain = to_pdomain(dom);
- struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
- size_t r;
-
- if ((domain->pd_mode == PD_MODE_V1) &&
- (domain->iop.mode == PAGE_MODE_NONE))
- return 0;
-
- r = (ops->unmap_pages) ? ops->unmap_pages(ops, iova, pgsize, pgcount, NULL) : 0;
-
- if (r)
- amd_iommu_iotlb_gather_add_page(dom, gather, iova, r);
-
- return r;
-}
-
-static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
- dma_addr_t iova)
-{
- struct protection_domain *domain = to_pdomain(dom);
- struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
-
- return ops->iova_to_phys(ops, iova);
-}
-
static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
{
switch (cap) {
@@ -2886,28 +2965,6 @@ static int amd_iommu_set_dirty_tracking(struct iommu_domain *domain,
return 0;
}
-static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- unsigned long flags,
- struct iommu_dirty_bitmap *dirty)
-{
- struct protection_domain *pdomain = to_pdomain(domain);
- struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops;
- unsigned long lflags;
-
- if (!ops || !ops->read_and_clear_dirty)
- return -EOPNOTSUPP;
-
- spin_lock_irqsave(&pdomain->lock, lflags);
- if (!pdomain->dirty_tracking && dirty->bitmap) {
- spin_unlock_irqrestore(&pdomain->lock, lflags);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&pdomain->lock, lflags);
-
- return ops->read_and_clear_dirty(ops, iova, size, flags, dirty);
-}
-
static void amd_iommu_get_resv_regions(struct device *dev,
struct list_head *head)
{
@@ -2977,28 +3034,6 @@ static bool amd_iommu_is_attach_deferred(struct device *dev)
return dev_data->defer_attach;
}
-static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
-{
- struct protection_domain *dom = to_pdomain(domain);
- unsigned long flags;
-
- spin_lock_irqsave(&dom->lock, flags);
- amd_iommu_domain_flush_all(dom);
- spin_unlock_irqrestore(&dom->lock, flags);
-}
-
-static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
- struct iommu_iotlb_gather *gather)
-{
- struct protection_domain *dom = to_pdomain(domain);
- unsigned long flags;
-
- spin_lock_irqsave(&dom->lock, flags);
- amd_iommu_domain_flush_pages(dom, gather->start,
- gather->end - gather->start + 1);
- spin_unlock_irqrestore(&dom->lock, flags);
-}
-
static int amd_iommu_def_domain_type(struct device *dev)
{
struct iommu_dev_data *dev_data;
@@ -3033,15 +3068,10 @@ static bool amd_iommu_enforce_cache_coherency(struct iommu_domain *domain)
return true;
}
-static const struct iommu_dirty_ops amd_dirty_ops = {
- .set_dirty_tracking = amd_iommu_set_dirty_tracking,
- .read_and_clear_dirty = amd_iommu_read_and_clear_dirty,
-};
-
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.blocked_domain = &blocked_domain,
- .release_domain = &release_domain,
+ .release_domain = &blocked_domain,
.identity_domain = &identity_domain.domain,
.domain_alloc_paging_flags = amd_iommu_domain_alloc_paging_flags,
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
@@ -3052,17 +3082,6 @@ const struct iommu_ops amd_iommu_ops = {
.is_attach_deferred = amd_iommu_is_attach_deferred,
.def_domain_type = amd_iommu_def_domain_type,
.page_response = amd_iommu_page_response,
- .default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = amd_iommu_attach_device,
- .map_pages = amd_iommu_map_pages,
- .unmap_pages = amd_iommu_unmap_pages,
- .iotlb_sync_map = amd_iommu_iotlb_sync_map,
- .iova_to_phys = amd_iommu_iova_to_phys,
- .flush_iotlb_all = amd_iommu_flush_iotlb_all,
- .iotlb_sync = amd_iommu_iotlb_sync,
- .free = amd_iommu_domain_free,
- .enforce_cache_coherency = amd_iommu_enforce_cache_coherency,
- }
};
#ifdef CONFIG_IRQ_REMAP
@@ -3353,7 +3372,7 @@ static int __modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
static int modify_irte_ga(struct amd_iommu *iommu, u16 devid, int index,
struct irte_ga *irte)
{
- bool ret;
+ int ret;
ret = __modify_irte_ga(iommu, devid, index, irte);
if (ret)
@@ -4071,3 +4090,5 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
return 0;
}
#endif
+
+MODULE_IMPORT_NS("GENERIC_PT_IOMMU");
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 190f28d76615..83a5aabcd15d 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -122,6 +122,8 @@
#define DART_T8110_ERROR_ADDR_LO 0x170
#define DART_T8110_ERROR_ADDR_HI 0x174
+#define DART_T8110_ERROR_STREAMS 0x1c0
+
#define DART_T8110_PROTECT 0x200
#define DART_T8110_UNPROTECT 0x204
#define DART_T8110_PROTECT_LOCK 0x208
@@ -133,6 +135,7 @@
#define DART_T8110_TCR 0x1000
#define DART_T8110_TCR_REMAP GENMASK(11, 8)
#define DART_T8110_TCR_REMAP_EN BIT(7)
+#define DART_T8110_TCR_FOUR_LEVEL BIT(3)
#define DART_T8110_TCR_BYPASS_DAPF BIT(2)
#define DART_T8110_TCR_BYPASS_DART BIT(1)
#define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0)
@@ -166,22 +169,23 @@ struct apple_dart_hw {
int max_sid_count;
- u64 lock;
- u64 lock_bit;
+ u32 lock;
+ u32 lock_bit;
- u64 error;
+ u32 error;
- u64 enable_streams;
+ u32 enable_streams;
- u64 tcr;
- u64 tcr_enabled;
- u64 tcr_disabled;
- u64 tcr_bypass;
+ u32 tcr;
+ u32 tcr_enabled;
+ u32 tcr_disabled;
+ u32 tcr_bypass;
+ u32 tcr_4level;
- u64 ttbr;
- u64 ttbr_valid;
- u64 ttbr_addr_field_shift;
- u64 ttbr_shift;
+ u32 ttbr;
+ u32 ttbr_valid;
+ u32 ttbr_addr_field_shift;
+ u32 ttbr_shift;
int ttbr_count;
};
@@ -217,6 +221,7 @@ struct apple_dart {
u32 pgsize;
u32 num_streams;
u32 supports_bypass : 1;
+ u32 four_level : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu;
@@ -305,13 +310,19 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
}
static void
-apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
+apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map, int levels)
{
struct apple_dart *dart = stream_map->dart;
+ u32 tcr = dart->hw->tcr_enabled;
int sid;
+ if (levels == 4)
+ tcr |= dart->hw->tcr_4level;
+
+ WARN_ON(levels != 3 && levels != 4);
+ WARN_ON(levels == 4 && !dart->four_level);
for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
- writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid));
+ writel(tcr, dart->regs + DART_TCR(dart, sid));
}
static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
@@ -569,7 +580,8 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
for (; i < stream_map->dart->hw->ttbr_count; ++i)
apple_dart_hw_clear_ttbr(stream_map, i);
- apple_dart_hw_enable_translation(stream_map);
+ apple_dart_hw_enable_translation(stream_map,
+ pgtbl_cfg->apple_dart_cfg.n_levels);
stream_map->dart->hw->invalidate_tlb(stream_map);
}
@@ -614,7 +626,7 @@ static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
dart_domain->domain.geometry.aperture_start = 0;
dart_domain->domain.geometry.aperture_end =
- (dma_addr_t)DMA_BIT_MASK(dart->ias);
+ (dma_addr_t)DMA_BIT_MASK(pgtbl_cfg.ias);
dart_domain->domain.geometry.force_aperture = true;
dart_domain->finalized = true;
@@ -660,7 +672,8 @@ static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
}
static int apple_dart_attach_dev_paging(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
int ret, i;
struct apple_dart_stream_map *stream_map;
@@ -681,7 +694,8 @@ static int apple_dart_attach_dev_paging(struct iommu_domain *domain,
}
static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_stream_map *stream_map;
@@ -705,7 +719,8 @@ static struct iommu_domain apple_dart_identity_domain = {
};
static int apple_dart_attach_dev_blocked(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_stream_map *stream_map;
@@ -790,6 +805,8 @@ static int apple_dart_of_xlate(struct device *dev,
struct apple_dart *cfg_dart;
int i, sid;
+ put_device(&iommu_pdev->dev);
+
if (args->args_count != 1)
return -EINVAL;
sid = args->args[0];
@@ -807,6 +824,8 @@ static int apple_dart_of_xlate(struct device *dev,
if (cfg_dart) {
if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL;
+ if (cfg_dart->ias != dart->ias)
+ return -EINVAL;
}
cfg->supports_bypass &= dart->supports_bypass;
@@ -1077,6 +1096,9 @@ static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
error, stream_idx, error_code, fault_name, addr);
writel(error, dart->regs + DART_T8110_ERROR);
+ for (int i = 0; i < BITS_TO_U32(dart->num_streams); i++)
+ writel(U32_MAX, dart->regs + DART_T8110_ERROR_STREAMS + 4 * i);
+
return IRQ_HANDLED;
}
@@ -1137,6 +1159,7 @@ static int apple_dart_probe(struct platform_device *pdev)
dart->ias = FIELD_GET(DART_T8110_PARAMS3_VA_WIDTH, dart_params[2]);
dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]);
dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]);
+ dart->four_level = dart->ias > 36;
break;
}
@@ -1169,9 +1192,9 @@ static int apple_dart_probe(struct platform_device *pdev)
dev_info(
&pdev->dev,
- "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
+ "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d, AS %d -> %d] initialized\n",
dart->pgsize, dart->num_streams, dart->supports_bypass,
- dart->pgsize > PAGE_SIZE);
+ dart->pgsize > PAGE_SIZE, dart->ias, dart->oas);
return 0;
err_sysfs_remove:
@@ -1292,6 +1315,7 @@ static const struct apple_dart_hw apple_dart_hw_t8110 = {
.tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE,
.tcr_disabled = 0,
.tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART,
+ .tcr_4level = DART_T8110_TCR_FOUR_LEVEL,
.ttbr = DART_T8110_TTBR,
.ttbr_valid = DART_T8110_TTBR_VALID,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index 8cd8929bbfdf..93fdadd07431 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -99,6 +99,8 @@ static void arm_smmu_make_nested_domain_ste(
int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
struct arm_smmu_nested_domain *nested_domain)
{
+ unsigned int cfg =
+ FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0]));
struct arm_smmu_vmaster *vmaster;
unsigned long vsid;
int ret;
@@ -107,8 +109,17 @@ int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
ret = iommufd_viommu_get_vdev_id(&nested_domain->vsmmu->core,
state->master->dev, &vsid);
- if (ret)
+ /*
+ * Attaching to a translate nested domain must allocate a vDEVICE prior,
+ * as CD/ATS invalidations and vevents require a vSID to work properly.
+ * A abort/bypass domain is allowed to attach w/o vmaster for GBPA case.
+ */
+ if (ret) {
+ if (cfg == STRTAB_STE_0_CFG_ABORT ||
+ cfg == STRTAB_STE_0_CFG_BYPASS)
+ return 0;
return ret;
+ }
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (!vmaster)
@@ -138,14 +149,15 @@ void arm_smmu_master_clear_vmaster(struct arm_smmu_master *master)
}
static int arm_smmu_attach_dev_nested(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old_domain)
{
struct arm_smmu_nested_domain *nested_domain =
to_smmu_nested_domain(domain);
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_attach_state state = {
.master = master,
- .old_domain = iommu_get_domain_for_dev(dev),
+ .old_domain = old_domain,
.ssid = IOMMU_NO_PASID,
};
struct arm_smmu_ste ste;
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 2a8b46b948f0..d16d35c78c06 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1464,7 +1464,7 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
cd_table->l2.l1tab = dma_alloc_coherent(smmu->dev, l1size,
&cd_table->cdtab_dma,
GFP_KERNEL);
- if (!cd_table->l2.l2ptrs) {
+ if (!cd_table->l2.l1tab) {
ret = -ENOMEM;
goto err_free_l2ptrs;
}
@@ -3002,7 +3002,8 @@ void arm_smmu_attach_commit(struct arm_smmu_attach_state *state)
master->ats_enabled = state->ats_enabled;
}
-static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev,
+ struct iommu_domain *old_domain)
{
int ret = 0;
struct arm_smmu_ste target;
@@ -3010,7 +3011,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_attach_state state = {
- .old_domain = iommu_get_domain_for_dev(dev),
+ .old_domain = old_domain,
.ssid = IOMMU_NO_PASID,
};
struct arm_smmu_master *master;
@@ -3186,7 +3187,7 @@ static int arm_smmu_blocking_set_dev_pasid(struct iommu_domain *new_domain,
/*
* When the last user of the CD table goes away downgrade the STE back
- * to a non-cd_table one.
+ * to a non-cd_table one, by re-attaching its sid_domain.
*/
if (!arm_smmu_ssids_in_use(&master->cd_table)) {
struct iommu_domain *sid_domain =
@@ -3194,12 +3195,14 @@ static int arm_smmu_blocking_set_dev_pasid(struct iommu_domain *new_domain,
if (sid_domain->type == IOMMU_DOMAIN_IDENTITY ||
sid_domain->type == IOMMU_DOMAIN_BLOCKED)
- sid_domain->ops->attach_dev(sid_domain, dev);
+ sid_domain->ops->attach_dev(sid_domain, dev,
+ sid_domain);
}
return 0;
}
static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
+ struct iommu_domain *old_domain,
struct device *dev,
struct arm_smmu_ste *ste,
unsigned int s1dss)
@@ -3207,7 +3210,7 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_attach_state state = {
.master = master,
- .old_domain = iommu_get_domain_for_dev(dev),
+ .old_domain = old_domain,
.ssid = IOMMU_NO_PASID,
};
@@ -3248,14 +3251,16 @@ static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
}
static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old_domain)
{
struct arm_smmu_ste ste;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
arm_smmu_master_clear_vmaster(master);
arm_smmu_make_bypass_ste(master->smmu, &ste);
- arm_smmu_attach_dev_ste(domain, dev, &ste, STRTAB_STE_1_S1DSS_BYPASS);
+ arm_smmu_attach_dev_ste(domain, old_domain, dev, &ste,
+ STRTAB_STE_1_S1DSS_BYPASS);
return 0;
}
@@ -3269,14 +3274,15 @@ static struct iommu_domain arm_smmu_identity_domain = {
};
static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old_domain)
{
struct arm_smmu_ste ste;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
arm_smmu_master_clear_vmaster(master);
arm_smmu_make_abort_ste(&ste);
- arm_smmu_attach_dev_ste(domain, dev, &ste,
+ arm_smmu_attach_dev_ste(domain, old_domain, dev, &ste,
STRTAB_STE_1_S1DSS_TERMINATE);
return 0;
}
@@ -3582,12 +3588,6 @@ static void arm_smmu_release_device(struct device *dev)
WARN_ON(master->iopf_refcount);
- /* Put the STE back to what arm_smmu_init_strtab() sets */
- if (dev->iommu->require_direct)
- arm_smmu_attach_dev_identity(&arm_smmu_identity_domain, dev);
- else
- arm_smmu_attach_dev_blocked(&arm_smmu_blocked_domain, dev);
-
arm_smmu_disable_pasid(master);
arm_smmu_remove_master(master);
if (arm_smmu_cdtab_allocated(&master->cd_table))
@@ -3678,6 +3678,7 @@ static int arm_smmu_def_domain_type(struct device *dev)
static const struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
+ .release_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable,
.hw_info = arm_smmu_hw_info,
.domain_alloc_sva = arm_smmu_sva_domain_alloc,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 57c097e87613..573085349df3 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -367,6 +367,7 @@ static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain,
static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,adreno-gmu" },
+ { .compatible = "qcom,glymur-mdss" },
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
{ .compatible = "qcom,qcm2290-mdss" },
@@ -431,17 +432,19 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
/*
* Some platforms support more than the Arm SMMU architected maximum of
- * 128 stream matching groups. For unknown reasons, the additional
- * groups don't exhibit the same behavior as the architected registers,
- * so limit the groups to 128 until the behavior is fixed for the other
- * groups.
+ * 128 stream matching groups. The additional registers appear to have
+ * the same behavior as the architected registers in the hardware.
+ * However, on some firmware versions, the hypervisor does not
+ * correctly trap and emulate accesses to the additional registers,
+ * resulting in unexpected behavior.
+ *
+ * If there are more than 128 groups, use the last reliable group to
+ * detect if we need to apply the bypass quirk.
*/
- if (smmu->num_mapping_groups > 128) {
- dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
- smmu->num_mapping_groups = 128;
- }
-
- last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
+ if (smmu->num_mapping_groups > 128)
+ last_s2cr = ARM_SMMU_GR0_S2CR(127);
+ else
+ last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1);
/*
* With some firmware versions writes to S2CR of type FAULT are
@@ -464,6 +467,11 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
+
+ if (smmu->num_mapping_groups > 128) {
+ dev_notice(smmu->dev, "\tLimiting the stream matching groups to 128\n");
+ smmu->num_mapping_groups = 128;
+ }
}
for (i = 0; i < smmu->num_mapping_groups; i++) {
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index 4ced4b5bee4d..5e690cf85ec9 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1165,7 +1165,8 @@ static void arm_smmu_master_install_s2crs(struct arm_smmu_master_cfg *cfg,
}
}
-static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev,
+ struct iommu_domain *old)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -1234,7 +1235,8 @@ static int arm_smmu_attach_dev_type(struct device *dev,
}
static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
return arm_smmu_attach_dev_type(dev, S2CR_TYPE_BYPASS);
}
@@ -1249,7 +1251,8 @@ static struct iommu_domain arm_smmu_identity_domain = {
};
static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
return arm_smmu_attach_dev_type(dev, S2CR_TYPE_FAULT);
}
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index c5be95e56031..f69d9276dc55 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -359,7 +359,8 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
kfree(qcom_domain);
}
-static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+static int qcom_iommu_attach_dev(struct iommu_domain *domain,
+ struct device *dev, struct iommu_domain *old)
{
struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
@@ -388,18 +389,18 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
}
static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct qcom_iommu_domain *qcom_domain;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct qcom_iommu_dev *qcom_iommu = dev_iommu_priv_get(dev);
unsigned int i;
- if (domain == identity_domain || !domain)
+ if (old == identity_domain || !old)
return 0;
- qcom_domain = to_qcom_iommu_domain(domain);
+ qcom_domain = to_qcom_iommu_domain(old);
if (WARN_ON(!qcom_domain->iommu))
return -EINVAL;
@@ -565,14 +566,14 @@ static int qcom_iommu_of_xlate(struct device *dev,
qcom_iommu = platform_get_drvdata(iommu_pdev);
+ put_device(&iommu_pdev->dev);
+
/* make sure the asid specified in dt is valid, so we don't have
* to sanity check this elsewhere:
*/
if (WARN_ON(asid > qcom_iommu->max_asid) ||
- WARN_ON(qcom_iommu->ctxs[asid] == NULL)) {
- put_device(&iommu_pdev->dev);
+ WARN_ON(qcom_iommu->ctxs[asid] == NULL))
return -EINVAL;
- }
if (!dev_iommu_priv_get(dev)) {
dev_iommu_priv_set(dev, qcom_iommu);
@@ -581,10 +582,8 @@ static int qcom_iommu_of_xlate(struct device *dev,
* multiple different iommu devices. Multiple context
* banks are ok, but multiple devices are not:
*/
- if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) {
- put_device(&iommu_pdev->dev);
+ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
return -EINVAL;
- }
}
return iommu_fwspec_add_ids(dev, &asid, 1);
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index ea2ef53bd4fe..c92088855450 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -724,7 +724,12 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, struct device *dev
static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
unsigned long attrs)
{
- int prot = coherent ? IOMMU_CACHE : 0;
+ int prot;
+
+ if (attrs & DMA_ATTR_MMIO)
+ prot = IOMMU_MMIO;
+ else
+ prot = coherent ? IOMMU_CACHE : 0;
if (attrs & DMA_ATTR_PRIVILEGED)
prot |= IOMMU_PRIV;
@@ -1190,11 +1195,9 @@ static inline size_t iova_unaligned(struct iova_domain *iovad, phys_addr_t phys,
return iova_offset(iovad, phys | size);
}
-dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+dma_addr_t iommu_dma_map_phys(struct device *dev, phys_addr_t phys, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
{
- phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
struct iommu_domain *domain = iommu_get_dma_domain(dev);
@@ -1208,27 +1211,34 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
*/
if (dev_use_swiotlb(dev, size, dir) &&
iova_unaligned(iovad, phys, size)) {
+ if (attrs & DMA_ATTR_MMIO)
+ return DMA_MAPPING_ERROR;
+
phys = iommu_dma_map_swiotlb(dev, phys, size, dir, attrs);
if (phys == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
}
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_device(phys, size, dir);
iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
- if (iova == DMA_MAPPING_ERROR)
+ if (iova == DMA_MAPPING_ERROR && !(attrs & DMA_ATTR_MMIO))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
return iova;
}
-void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+void iommu_dma_unmap_phys(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- struct iommu_domain *domain = iommu_get_dma_domain(dev);
phys_addr_t phys;
- phys = iommu_iova_to_phys(domain, dma_handle);
+ if (attrs & DMA_ATTR_MMIO) {
+ __iommu_dma_unmap(dev, dma_handle, size);
+ return;
+ }
+
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (WARN_ON(!phys))
return;
@@ -1341,7 +1351,7 @@ static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *s
int i;
for_each_sg(sg, s, nents, i)
- iommu_dma_unmap_page(dev, sg_dma_address(s),
+ iommu_dma_unmap_phys(dev, sg_dma_address(s),
sg_dma_len(s), dir, attrs);
}
@@ -1354,8 +1364,8 @@ static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
sg_dma_mark_swiotlb(sg);
for_each_sg(sg, s, nents, i) {
- sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
- s->offset, s->length, dir, attrs);
+ sg_dma_address(s) = iommu_dma_map_phys(dev, sg_phys(s),
+ s->length, dir, attrs);
if (sg_dma_address(s) == DMA_MAPPING_ERROR)
goto out_unmap;
sg_dma_len(s) = s->length;
@@ -1429,8 +1439,8 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
* as a bus address, __finalise_sg() will copy the dma
* address into the output segment.
*/
- s->dma_address = pci_p2pdma_bus_addr_map(&p2pdma_state,
- sg_phys(s));
+ s->dma_address = pci_p2pdma_bus_addr_map(
+ p2pdma_state.mem, sg_phys(s));
sg_dma_len(s) = sg->length;
sg_dma_mark_bus_address(s);
continue;
@@ -1546,20 +1556,6 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
__iommu_dma_unmap(dev, start, end - start);
}
-dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
- dma_get_mask(dev));
-}
-
-void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
-{
- __iommu_dma_unmap(dev, handle, size);
-}
-
static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
{
size_t alloc_size = PAGE_ALIGN(size);
@@ -1838,12 +1834,13 @@ static int __dma_iova_link(struct device *dev, dma_addr_t addr,
unsigned long attrs)
{
bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
- if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ if (!coherent && !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_device(phys, size, dir);
return iommu_map_nosync(iommu_get_dma_domain(dev), addr, phys, size,
- dma_info_to_prot(dir, coherent, attrs), GFP_ATOMIC);
+ prot, GFP_ATOMIC);
}
static int iommu_dma_iova_bounce_and_link(struct device *dev, dma_addr_t addr,
@@ -1949,9 +1946,13 @@ int dma_iova_link(struct device *dev, struct dma_iova_state *state,
return -EIO;
if (dev_use_swiotlb(dev, size, dir) &&
- iova_unaligned(iovad, phys, size))
+ iova_unaligned(iovad, phys, size)) {
+ if (attrs & DMA_ATTR_MMIO)
+ return -EPERM;
+
return iommu_dma_iova_link_swiotlb(dev, state, phys, offset,
size, dir, attrs);
+ }
return __dma_iova_link(dev, state->addr + offset - iova_start_pad,
phys - iova_start_pad,
@@ -2007,7 +2008,7 @@ static void iommu_dma_iova_unlink_range_slow(struct device *dev,
end - addr, iovad->granule - iova_start_pad);
if (!dev_is_dma_coherent(dev) &&
- !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO)))
arch_sync_dma_for_cpu(phys, len, dir);
swiotlb_tbl_unmap_single(dev, phys, len, dir, attrs);
@@ -2031,7 +2032,8 @@ static void __iommu_dma_iova_unlink(struct device *dev,
size_t unmapped;
if ((state->__size & DMA_IOVA_USE_SWIOTLB) ||
- (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)))
+ (!dev_is_dma_coherent(dev) &&
+ !(attrs & (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_MMIO))))
iommu_dma_iova_unlink_range_slow(dev, addr, size, dir, attrs);
iommu_iotlb_gather_init(&iotlb_gather);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index b6edd178fe25..b512c6b939ac 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -984,7 +984,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
}
static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct exynos_iommu_domain *domain;
@@ -1035,7 +1036,8 @@ static struct iommu_domain exynos_identity_domain = {
};
static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
@@ -1044,7 +1046,7 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
unsigned long flags;
int err;
- err = exynos_iommu_identity_attach(&exynos_identity_domain, dev);
+ err = exynos_iommu_identity_attach(&exynos_identity_domain, dev, old);
if (err)
return err;
@@ -1429,8 +1431,6 @@ static void exynos_iommu_release_device(struct device *dev)
struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
struct sysmmu_drvdata *data;
- WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev));
-
list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link);
}
@@ -1446,17 +1446,14 @@ static int exynos_iommu_of_xlate(struct device *dev,
return -ENODEV;
data = platform_get_drvdata(sysmmu);
- if (!data) {
- put_device(&sysmmu->dev);
+ put_device(&sysmmu->dev);
+ if (!data)
return -ENODEV;
- }
if (!owner) {
owner = kzalloc(sizeof(*owner), GFP_KERNEL);
- if (!owner) {
- put_device(&sysmmu->dev);
+ if (!owner)
return -ENOMEM;
- }
INIT_LIST_HEAD(&owner->controllers);
mutex_init(&owner->rpm_lock);
@@ -1476,6 +1473,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
static const struct iommu_ops exynos_iommu_ops = {
.identity_domain = &exynos_identity_domain,
+ .release_domain = &exynos_identity_domain,
.domain_alloc_paging = exynos_iommu_domain_alloc_paging,
.device_group = generic_device_group,
.probe_device = exynos_iommu_probe_device,
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 5f08523f97cb..9664ef9840d2 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -238,7 +238,7 @@ static int update_domain_stash(struct fsl_dma_domain *dma_domain, u32 val)
}
static int fsl_pamu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
unsigned long flags;
@@ -298,9 +298,9 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
* switches to what looks like BLOCKING.
*/
static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct fsl_dma_domain *dma_domain;
const u32 *prop;
int len;
@@ -311,11 +311,11 @@ static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain,
* Hack to keep things working as they always have, only leaving an
* UNMANAGED domain makes it BLOCKING.
*/
- if (domain == platform_domain || !domain ||
- domain->type != IOMMU_DOMAIN_UNMANAGED)
+ if (old == platform_domain || !old ||
+ old->type != IOMMU_DOMAIN_UNMANAGED)
return 0;
- dma_domain = to_fsl_dma_domain(domain);
+ dma_domain = to_fsl_dma_domain(old);
/*
* Use LIODN of the PCI controller while detaching a
diff --git a/drivers/iommu/generic_pt/.kunitconfig b/drivers/iommu/generic_pt/.kunitconfig
new file mode 100644
index 000000000000..52ac9e661ffd
--- /dev/null
+++ b/drivers/iommu/generic_pt/.kunitconfig
@@ -0,0 +1,14 @@
+CONFIG_KUNIT=y
+CONFIG_GENERIC_PT=y
+CONFIG_DEBUG_GENERIC_PT=y
+CONFIG_IOMMU_PT=y
+CONFIG_IOMMU_PT_AMDV1=y
+CONFIG_IOMMU_PT_VTDSS=y
+CONFIG_IOMMU_PT_X86_64=y
+CONFIG_IOMMU_PT_KUNIT_TEST=y
+
+CONFIG_IOMMUFD=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_RUNTIME_TESTING_MENU=y
+CONFIG_IOMMUFD_TEST=y
diff --git a/drivers/iommu/generic_pt/Kconfig b/drivers/iommu/generic_pt/Kconfig
new file mode 100644
index 000000000000..ce4fb4786914
--- /dev/null
+++ b/drivers/iommu/generic_pt/Kconfig
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+menuconfig GENERIC_PT
+ bool "Generic Radix Page Table" if COMPILE_TEST
+ help
+ Generic library for building radix tree page tables.
+
+ Generic PT provides a set of HW page table formats and a common
+ set of APIs to work with them.
+
+if GENERIC_PT
+config DEBUG_GENERIC_PT
+ bool "Extra debugging checks for GENERIC_PT"
+ help
+ Enable extra run time debugging checks for GENERIC_PT code. This
+ incurs a runtime cost and should not be enabled for production
+ kernels.
+
+ The kunit tests require this to be enabled to get full coverage.
+
+config IOMMU_PT
+ tristate "IOMMU Page Tables"
+ select IOMMU_API
+ depends on IOMMU_SUPPORT
+ depends on GENERIC_PT
+ help
+ Generic library for building IOMMU page tables
+
+ IOMMU_PT provides an implementation of the page table operations
+ related to struct iommu_domain using GENERIC_PT. It provides a single
+ implementation of the page table operations that can be shared by
+ multiple drivers.
+
+if IOMMU_PT
+config IOMMU_PT_AMDV1
+ tristate "IOMMU page table for 64-bit AMD IOMMU v1"
+ depends on !GENERIC_ATOMIC64 # for cmpxchg64
+ help
+ iommu_domain implementation for the AMD v1 page table. AMDv1 is the
+ "host" page table. It supports granular page sizes of almost every
+ power of 2 and decodes the full 64-bit IOVA space.
+
+ Selected automatically by an IOMMU driver that uses this format.
+
+config IOMMU_PT_VTDSS
+ tristate "IOMMU page table for Intel VT-d Second Stage"
+ depends on !GENERIC_ATOMIC64 # for cmpxchg64
+ help
+ iommu_domain implementation for the Intel VT-d's 64 bit 3/4/5
+ level Second Stage page table. It is similar to the X86_64 format with
+ 4K/2M/1G page sizes.
+
+ Selected automatically by an IOMMU driver that uses this format.
+
+config IOMMU_PT_X86_64
+ tristate "IOMMU page table for x86 64-bit, 4/5 levels"
+ depends on !GENERIC_ATOMIC64 # for cmpxchg64
+ help
+ iommu_domain implementation for the x86 64-bit 4/5 level page table.
+ It supports 4K/2M/1G page sizes and can decode a sign-extended
+ portion of the 64-bit IOVA space.
+
+ Selected automatically by an IOMMU driver that uses this format.
+
+config IOMMU_PT_KUNIT_TEST
+ tristate "IOMMU Page Table KUnit Test" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ depends on IOMMU_PT_AMDV1 || !IOMMU_PT_AMDV1
+ depends on IOMMU_PT_X86_64 || !IOMMU_PT_X86_64
+ depends on IOMMU_PT_VTDSS || !IOMMU_PT_VTDSS
+ default KUNIT_ALL_TESTS
+ help
+ Enable kunit tests for GENERIC_PT and IOMMU_PT that covers all the
+ enabled page table formats. The test covers most of the GENERIC_PT
+ functions provided by the page table format, as well as covering the
+ iommu_domain related functions.
+
+endif
+endif
diff --git a/drivers/iommu/generic_pt/fmt/Makefile b/drivers/iommu/generic_pt/fmt/Makefile
new file mode 100644
index 000000000000..976b49ec97dc
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+
+iommu_pt_fmt-$(CONFIG_IOMMU_PT_AMDV1) += amdv1
+iommu_pt_fmt-$(CONFIG_IOMMUFD_TEST) += mock
+
+iommu_pt_fmt-$(CONFIG_IOMMU_PT_VTDSS) += vtdss
+
+iommu_pt_fmt-$(CONFIG_IOMMU_PT_X86_64) += x86_64
+
+IOMMU_PT_KUNIT_TEST :=
+define create_format
+obj-$(2) += iommu_$(1).o
+iommu_pt_kunit_test-y += kunit_iommu_$(1).o
+CFLAGS_kunit_iommu_$(1).o += -DGENERIC_PT_KUNIT=1
+IOMMU_PT_KUNIT_TEST := iommu_pt_kunit_test.o
+
+endef
+
+$(eval $(foreach fmt,$(iommu_pt_fmt-y),$(call create_format,$(fmt),y)))
+$(eval $(foreach fmt,$(iommu_pt_fmt-m),$(call create_format,$(fmt),m)))
+
+# The kunit objects are constructed by compiling the main source
+# with -DGENERIC_PT_KUNIT
+$(obj)/kunit_iommu_%.o: $(src)/iommu_%.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,cc_o_c)
+
+obj-$(CONFIG_IOMMU_PT_KUNIT_TEST) += $(IOMMU_PT_KUNIT_TEST)
diff --git a/drivers/iommu/generic_pt/fmt/amdv1.h b/drivers/iommu/generic_pt/fmt/amdv1.h
new file mode 100644
index 000000000000..aa8e1a8ec95f
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/amdv1.h
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * AMD IOMMU v1 page table
+ *
+ * This is described in Section "2.2.3 I/O Page Tables for Host Translations"
+ * of the "AMD I/O Virtualization Technology (IOMMU) Specification"
+ *
+ * Note the level numbering here matches the core code, so level 0 is the same
+ * as mode 1.
+ *
+ */
+#ifndef __GENERIC_PT_FMT_AMDV1_H
+#define __GENERIC_PT_FMT_AMDV1_H
+
+#include "defs_amdv1.h"
+#include "../pt_defs.h"
+
+#include <asm/page.h>
+#include <linux/bitfield.h>
+#include <linux/container_of.h>
+#include <linux/mem_encrypt.h>
+#include <linux/minmax.h>
+#include <linux/sizes.h>
+#include <linux/string.h>
+
+enum {
+ PT_ITEM_WORD_SIZE = sizeof(u64),
+ /*
+ * The IOMMUFD selftest uses the AMDv1 format with some alterations It
+ * uses a 2k page size to test cases where the CPU page size is not the
+ * same.
+ */
+#ifdef AMDV1_IOMMUFD_SELFTEST
+ PT_MAX_VA_ADDRESS_LG2 = 56,
+ PT_MAX_OUTPUT_ADDRESS_LG2 = 51,
+ PT_MAX_TOP_LEVEL = 4,
+ PT_GRANULE_LG2SZ = 11,
+#else
+ PT_MAX_VA_ADDRESS_LG2 = 64,
+ PT_MAX_OUTPUT_ADDRESS_LG2 = 52,
+ PT_MAX_TOP_LEVEL = 5,
+ PT_GRANULE_LG2SZ = 12,
+#endif
+ PT_TABLEMEM_LG2SZ = 12,
+
+ /* The DTE only has these bits for the top phyiscal address */
+ PT_TOP_PHYS_MASK = GENMASK_ULL(51, 12),
+};
+
+/* PTE bits */
+enum {
+ AMDV1PT_FMT_PR = BIT(0),
+ AMDV1PT_FMT_D = BIT(6),
+ AMDV1PT_FMT_NEXT_LEVEL = GENMASK_ULL(11, 9),
+ AMDV1PT_FMT_OA = GENMASK_ULL(51, 12),
+ AMDV1PT_FMT_FC = BIT_ULL(60),
+ AMDV1PT_FMT_IR = BIT_ULL(61),
+ AMDV1PT_FMT_IW = BIT_ULL(62),
+};
+
+/*
+ * gcc 13 has a bug where it thinks the output of FIELD_GET() is an enum, make
+ * these defines to avoid it.
+ */
+#define AMDV1PT_FMT_NL_DEFAULT 0
+#define AMDV1PT_FMT_NL_SIZE 7
+
+static inline pt_oaddr_t amdv1pt_table_pa(const struct pt_state *pts)
+{
+ u64 entry = pts->entry;
+
+ if (pts_feature(pts, PT_FEAT_AMDV1_ENCRYPT_TABLES))
+ entry = __sme_clr(entry);
+ return oalog2_mul(FIELD_GET(AMDV1PT_FMT_OA, entry), PT_GRANULE_LG2SZ);
+}
+#define pt_table_pa amdv1pt_table_pa
+
+/* Returns the oa for the start of the contiguous entry */
+static inline pt_oaddr_t amdv1pt_entry_oa(const struct pt_state *pts)
+{
+ u64 entry = pts->entry;
+ pt_oaddr_t oa;
+
+ if (pts_feature(pts, PT_FEAT_AMDV1_ENCRYPT_TABLES))
+ entry = __sme_clr(entry);
+ oa = FIELD_GET(AMDV1PT_FMT_OA, entry);
+
+ if (FIELD_GET(AMDV1PT_FMT_NEXT_LEVEL, entry) == AMDV1PT_FMT_NL_SIZE) {
+ unsigned int sz_bits = oaffz(oa);
+
+ oa = oalog2_set_mod(oa, 0, sz_bits);
+ } else if (PT_WARN_ON(FIELD_GET(AMDV1PT_FMT_NEXT_LEVEL, entry) !=
+ AMDV1PT_FMT_NL_DEFAULT))
+ return 0;
+ return oalog2_mul(oa, PT_GRANULE_LG2SZ);
+}
+#define pt_entry_oa amdv1pt_entry_oa
+
+static inline bool amdv1pt_can_have_leaf(const struct pt_state *pts)
+{
+ /*
+ * Table 15: Page Table Level Parameters
+ * The top most level cannot have translation entries
+ */
+ return pts->level < PT_MAX_TOP_LEVEL;
+}
+#define pt_can_have_leaf amdv1pt_can_have_leaf
+
+/* Body in pt_fmt_defaults.h */
+static inline unsigned int pt_table_item_lg2sz(const struct pt_state *pts);
+
+static inline unsigned int
+amdv1pt_entry_num_contig_lg2(const struct pt_state *pts)
+{
+ u32 code;
+
+ if (FIELD_GET(AMDV1PT_FMT_NEXT_LEVEL, pts->entry) ==
+ AMDV1PT_FMT_NL_DEFAULT)
+ return ilog2(1);
+
+ PT_WARN_ON(FIELD_GET(AMDV1PT_FMT_NEXT_LEVEL, pts->entry) !=
+ AMDV1PT_FMT_NL_SIZE);
+
+ /*
+ * The contiguous size is encoded in the length of a string of 1's in
+ * the low bits of the OA. Reverse the equation:
+ * code = log2_to_int(num_contig_lg2 + item_lg2sz -
+ * PT_GRANULE_LG2SZ - 1) - 1
+ * Which can be expressed as:
+ * num_contig_lg2 = oalog2_ffz(code) + 1 -
+ * item_lg2sz - PT_GRANULE_LG2SZ
+ *
+ * Assume the bit layout is correct and remove the masking. Reorganize
+ * the equation to move all the arithmetic before the ffz.
+ */
+ code = pts->entry >> (__bf_shf(AMDV1PT_FMT_OA) - 1 +
+ pt_table_item_lg2sz(pts) - PT_GRANULE_LG2SZ);
+ return ffz_t(u32, code);
+}
+#define pt_entry_num_contig_lg2 amdv1pt_entry_num_contig_lg2
+
+static inline unsigned int amdv1pt_num_items_lg2(const struct pt_state *pts)
+{
+ /*
+ * Top entry covers bits [63:57] only, this is handled through
+ * max_vasz_lg2.
+ */
+ if (PT_WARN_ON(pts->level == 5))
+ return 7;
+ return PT_TABLEMEM_LG2SZ - ilog2(sizeof(u64));
+}
+#define pt_num_items_lg2 amdv1pt_num_items_lg2
+
+static inline pt_vaddr_t amdv1pt_possible_sizes(const struct pt_state *pts)
+{
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+
+ if (!amdv1pt_can_have_leaf(pts))
+ return 0;
+
+ /*
+ * Table 14: Example Page Size Encodings
+ * Address bits 51:32 can be used to encode page sizes greater than 4
+ * Gbytes. Address bits 63:52 are zero-extended.
+ *
+ * 512GB Pages are not supported due to a hardware bug.
+ * Otherwise every power of two size is supported.
+ */
+ return GENMASK_ULL(min(51, isz_lg2 + amdv1pt_num_items_lg2(pts) - 1),
+ isz_lg2) & ~SZ_512G;
+}
+#define pt_possible_sizes amdv1pt_possible_sizes
+
+static inline enum pt_entry_type amdv1pt_load_entry_raw(struct pt_state *pts)
+{
+ const u64 *tablep = pt_cur_table(pts, u64) + pts->index;
+ unsigned int next_level;
+ u64 entry;
+
+ pts->entry = entry = READ_ONCE(*tablep);
+ if (!(entry & AMDV1PT_FMT_PR))
+ return PT_ENTRY_EMPTY;
+
+ next_level = FIELD_GET(AMDV1PT_FMT_NEXT_LEVEL, pts->entry);
+ if (pts->level == 0 || next_level == AMDV1PT_FMT_NL_DEFAULT ||
+ next_level == AMDV1PT_FMT_NL_SIZE)
+ return PT_ENTRY_OA;
+ return PT_ENTRY_TABLE;
+}
+#define pt_load_entry_raw amdv1pt_load_entry_raw
+
+static inline void
+amdv1pt_install_leaf_entry(struct pt_state *pts, pt_oaddr_t oa,
+ unsigned int oasz_lg2,
+ const struct pt_write_attrs *attrs)
+{
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+ u64 *tablep = pt_cur_table(pts, u64) + pts->index;
+ u64 entry;
+
+ if (!pt_check_install_leaf_args(pts, oa, oasz_lg2))
+ return;
+
+ entry = AMDV1PT_FMT_PR |
+ FIELD_PREP(AMDV1PT_FMT_OA, log2_div(oa, PT_GRANULE_LG2SZ)) |
+ attrs->descriptor_bits;
+
+ if (oasz_lg2 == isz_lg2) {
+ entry |= FIELD_PREP(AMDV1PT_FMT_NEXT_LEVEL,
+ AMDV1PT_FMT_NL_DEFAULT);
+ WRITE_ONCE(*tablep, entry);
+ } else {
+ unsigned int num_contig_lg2 = oasz_lg2 - isz_lg2;
+ u64 *end = tablep + log2_to_int(num_contig_lg2);
+
+ entry |= FIELD_PREP(AMDV1PT_FMT_NEXT_LEVEL,
+ AMDV1PT_FMT_NL_SIZE) |
+ FIELD_PREP(AMDV1PT_FMT_OA,
+ oalog2_to_int(oasz_lg2 - PT_GRANULE_LG2SZ -
+ 1) -
+ 1);
+
+ /* See amdv1pt_clear_entries() */
+ if (num_contig_lg2 <= ilog2(32)) {
+ for (; tablep != end; tablep++)
+ WRITE_ONCE(*tablep, entry);
+ } else {
+ memset64(tablep, entry, log2_to_int(num_contig_lg2));
+ }
+ }
+ pts->entry = entry;
+}
+#define pt_install_leaf_entry amdv1pt_install_leaf_entry
+
+static inline bool amdv1pt_install_table(struct pt_state *pts,
+ pt_oaddr_t table_pa,
+ const struct pt_write_attrs *attrs)
+{
+ u64 entry;
+
+ /*
+ * IR and IW are ANDed from the table levels along with the PTE. We
+ * always control permissions from the PTE, so always set IR and IW for
+ * tables.
+ */
+ entry = AMDV1PT_FMT_PR |
+ FIELD_PREP(AMDV1PT_FMT_NEXT_LEVEL, pts->level) |
+ FIELD_PREP(AMDV1PT_FMT_OA,
+ log2_div(table_pa, PT_GRANULE_LG2SZ)) |
+ AMDV1PT_FMT_IR | AMDV1PT_FMT_IW;
+ if (pts_feature(pts, PT_FEAT_AMDV1_ENCRYPT_TABLES))
+ entry = __sme_set(entry);
+ return pt_table_install64(pts, entry);
+}
+#define pt_install_table amdv1pt_install_table
+
+static inline void amdv1pt_attr_from_entry(const struct pt_state *pts,
+ struct pt_write_attrs *attrs)
+{
+ attrs->descriptor_bits =
+ pts->entry & (AMDV1PT_FMT_FC | AMDV1PT_FMT_IR | AMDV1PT_FMT_IW);
+}
+#define pt_attr_from_entry amdv1pt_attr_from_entry
+
+static inline void amdv1pt_clear_entries(struct pt_state *pts,
+ unsigned int num_contig_lg2)
+{
+ u64 *tablep = pt_cur_table(pts, u64) + pts->index;
+ u64 *end = tablep + log2_to_int(num_contig_lg2);
+
+ /*
+ * gcc generates rep stos for the io-pgtable code, and this difference
+ * can show in microbenchmarks with larger contiguous page sizes.
+ * rep is slower for small cases.
+ */
+ if (num_contig_lg2 <= ilog2(32)) {
+ for (; tablep != end; tablep++)
+ WRITE_ONCE(*tablep, 0);
+ } else {
+ memset64(tablep, 0, log2_to_int(num_contig_lg2));
+ }
+}
+#define pt_clear_entries amdv1pt_clear_entries
+
+static inline bool amdv1pt_entry_is_write_dirty(const struct pt_state *pts)
+{
+ unsigned int num_contig_lg2 = amdv1pt_entry_num_contig_lg2(pts);
+ u64 *tablep = pt_cur_table(pts, u64) +
+ log2_set_mod(pts->index, 0, num_contig_lg2);
+ u64 *end = tablep + log2_to_int(num_contig_lg2);
+
+ for (; tablep != end; tablep++)
+ if (READ_ONCE(*tablep) & AMDV1PT_FMT_D)
+ return true;
+ return false;
+}
+#define pt_entry_is_write_dirty amdv1pt_entry_is_write_dirty
+
+static inline void amdv1pt_entry_make_write_clean(struct pt_state *pts)
+{
+ unsigned int num_contig_lg2 = amdv1pt_entry_num_contig_lg2(pts);
+ u64 *tablep = pt_cur_table(pts, u64) +
+ log2_set_mod(pts->index, 0, num_contig_lg2);
+ u64 *end = tablep + log2_to_int(num_contig_lg2);
+
+ for (; tablep != end; tablep++)
+ WRITE_ONCE(*tablep, READ_ONCE(*tablep) & ~(u64)AMDV1PT_FMT_D);
+}
+#define pt_entry_make_write_clean amdv1pt_entry_make_write_clean
+
+static inline bool amdv1pt_entry_make_write_dirty(struct pt_state *pts)
+{
+ u64 *tablep = pt_cur_table(pts, u64) + pts->index;
+ u64 new = pts->entry | AMDV1PT_FMT_D;
+
+ return try_cmpxchg64(tablep, &pts->entry, new);
+}
+#define pt_entry_make_write_dirty amdv1pt_entry_make_write_dirty
+
+/* --- iommu */
+#include <linux/generic_pt/iommu.h>
+#include <linux/iommu.h>
+
+#define pt_iommu_table pt_iommu_amdv1
+
+/* The common struct is in the per-format common struct */
+static inline struct pt_common *common_from_iommu(struct pt_iommu *iommu_table)
+{
+ return &container_of(iommu_table, struct pt_iommu_amdv1, iommu)
+ ->amdpt.common;
+}
+
+static inline struct pt_iommu *iommu_from_common(struct pt_common *common)
+{
+ return &container_of(common, struct pt_iommu_amdv1, amdpt.common)->iommu;
+}
+
+static inline int amdv1pt_iommu_set_prot(struct pt_common *common,
+ struct pt_write_attrs *attrs,
+ unsigned int iommu_prot)
+{
+ u64 pte = 0;
+
+ if (pt_feature(common, PT_FEAT_AMDV1_FORCE_COHERENCE))
+ pte |= AMDV1PT_FMT_FC;
+ if (iommu_prot & IOMMU_READ)
+ pte |= AMDV1PT_FMT_IR;
+ if (iommu_prot & IOMMU_WRITE)
+ pte |= AMDV1PT_FMT_IW;
+
+ /*
+ * Ideally we'd have an IOMMU_ENCRYPTED flag set by higher levels to
+ * control this. For now if the tables use sme_set then so do the ptes.
+ */
+ if (pt_feature(common, PT_FEAT_AMDV1_ENCRYPT_TABLES))
+ pte = __sme_set(pte);
+
+ attrs->descriptor_bits = pte;
+ return 0;
+}
+#define pt_iommu_set_prot amdv1pt_iommu_set_prot
+
+static inline int amdv1pt_iommu_fmt_init(struct pt_iommu_amdv1 *iommu_table,
+ const struct pt_iommu_amdv1_cfg *cfg)
+{
+ struct pt_amdv1 *table = &iommu_table->amdpt;
+ unsigned int max_vasz_lg2 = PT_MAX_VA_ADDRESS_LG2;
+
+ if (cfg->starting_level == 0 || cfg->starting_level > PT_MAX_TOP_LEVEL)
+ return -EINVAL;
+
+ if (!pt_feature(&table->common, PT_FEAT_DYNAMIC_TOP) &&
+ cfg->starting_level != PT_MAX_TOP_LEVEL)
+ max_vasz_lg2 = PT_GRANULE_LG2SZ +
+ (PT_TABLEMEM_LG2SZ - ilog2(sizeof(u64))) *
+ (cfg->starting_level + 1);
+
+ table->common.max_vasz_lg2 =
+ min(max_vasz_lg2, cfg->common.hw_max_vasz_lg2);
+ table->common.max_oasz_lg2 =
+ min(PT_MAX_OUTPUT_ADDRESS_LG2, cfg->common.hw_max_oasz_lg2);
+ pt_top_set_level(&table->common, cfg->starting_level);
+ return 0;
+}
+#define pt_iommu_fmt_init amdv1pt_iommu_fmt_init
+
+#ifndef PT_FMT_VARIANT
+static inline void
+amdv1pt_iommu_fmt_hw_info(struct pt_iommu_amdv1 *table,
+ const struct pt_range *top_range,
+ struct pt_iommu_amdv1_hw_info *info)
+{
+ info->host_pt_root = virt_to_phys(top_range->top_table);
+ PT_WARN_ON(info->host_pt_root & ~PT_TOP_PHYS_MASK);
+ info->mode = top_range->top_level + 1;
+}
+#define pt_iommu_fmt_hw_info amdv1pt_iommu_fmt_hw_info
+#endif
+
+#if defined(GENERIC_PT_KUNIT)
+static const struct pt_iommu_amdv1_cfg amdv1_kunit_fmt_cfgs[] = {
+ /* Matches what io_pgtable does */
+ [0] = { .starting_level = 2 },
+};
+#define kunit_fmt_cfgs amdv1_kunit_fmt_cfgs
+enum { KUNIT_FMT_FEATURES = 0 };
+#endif
+
+#endif
diff --git a/drivers/iommu/generic_pt/fmt/defs_amdv1.h b/drivers/iommu/generic_pt/fmt/defs_amdv1.h
new file mode 100644
index 000000000000..0b9614ca6d10
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/defs_amdv1.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ */
+#ifndef __GENERIC_PT_FMT_DEFS_AMDV1_H
+#define __GENERIC_PT_FMT_DEFS_AMDV1_H
+
+#include <linux/generic_pt/common.h>
+#include <linux/types.h>
+
+typedef u64 pt_vaddr_t;
+typedef u64 pt_oaddr_t;
+
+struct amdv1pt_write_attrs {
+ u64 descriptor_bits;
+ gfp_t gfp;
+};
+#define pt_write_attrs amdv1pt_write_attrs
+
+#endif
diff --git a/drivers/iommu/generic_pt/fmt/defs_vtdss.h b/drivers/iommu/generic_pt/fmt/defs_vtdss.h
new file mode 100644
index 000000000000..4a239bcaae2a
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/defs_vtdss.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ *
+ */
+#ifndef __GENERIC_PT_FMT_DEFS_VTDSS_H
+#define __GENERIC_PT_FMT_DEFS_VTDSS_H
+
+#include <linux/generic_pt/common.h>
+#include <linux/types.h>
+
+typedef u64 pt_vaddr_t;
+typedef u64 pt_oaddr_t;
+
+struct vtdss_pt_write_attrs {
+ u64 descriptor_bits;
+ gfp_t gfp;
+};
+#define pt_write_attrs vtdss_pt_write_attrs
+
+#endif
diff --git a/drivers/iommu/generic_pt/fmt/defs_x86_64.h b/drivers/iommu/generic_pt/fmt/defs_x86_64.h
new file mode 100644
index 000000000000..6f589e1f55d3
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/defs_x86_64.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ */
+#ifndef __GENERIC_PT_FMT_DEFS_X86_64_H
+#define __GENERIC_PT_FMT_DEFS_X86_64_H
+
+#include <linux/generic_pt/common.h>
+#include <linux/types.h>
+
+typedef u64 pt_vaddr_t;
+typedef u64 pt_oaddr_t;
+
+struct x86_64_pt_write_attrs {
+ u64 descriptor_bits;
+ gfp_t gfp;
+};
+#define pt_write_attrs x86_64_pt_write_attrs
+
+#endif
diff --git a/drivers/iommu/generic_pt/fmt/iommu_amdv1.c b/drivers/iommu/generic_pt/fmt/iommu_amdv1.c
new file mode 100644
index 000000000000..72a2337d0c55
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/iommu_amdv1.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#define PT_FMT amdv1
+#define PT_SUPPORTED_FEATURES \
+ (BIT(PT_FEAT_FULL_VA) | BIT(PT_FEAT_DYNAMIC_TOP) | \
+ BIT(PT_FEAT_FLUSH_RANGE) | BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS) | \
+ BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) | \
+ BIT(PT_FEAT_AMDV1_FORCE_COHERENCE))
+#define PT_FORCE_ENABLED_FEATURES \
+ (BIT(PT_FEAT_DYNAMIC_TOP) | BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) | \
+ BIT(PT_FEAT_AMDV1_FORCE_COHERENCE))
+
+#include "iommu_template.h"
diff --git a/drivers/iommu/generic_pt/fmt/iommu_mock.c b/drivers/iommu/generic_pt/fmt/iommu_mock.c
new file mode 100644
index 000000000000..74e597cba9d9
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/iommu_mock.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#define AMDV1_IOMMUFD_SELFTEST 1
+#define PT_FMT amdv1
+#define PT_FMT_VARIANT mock
+#define PT_SUPPORTED_FEATURES 0
+
+#include "iommu_template.h"
diff --git a/drivers/iommu/generic_pt/fmt/iommu_template.h b/drivers/iommu/generic_pt/fmt/iommu_template.h
new file mode 100644
index 000000000000..d28e86abdf2e
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/iommu_template.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * Template to build the iommu module and kunit from the format and
+ * implementation headers.
+ *
+ * The format should have:
+ * #define PT_FMT <name>
+ * #define PT_SUPPORTED_FEATURES (BIT(PT_FEAT_xx) | BIT(PT_FEAT_yy))
+ * And optionally:
+ * #define PT_FORCE_ENABLED_FEATURES ..
+ * #define PT_FMT_VARIANT <suffix>
+ */
+#include <linux/args.h>
+#include <linux/stringify.h>
+
+#ifdef PT_FMT_VARIANT
+#define PTPFX_RAW \
+ CONCATENATE(CONCATENATE(PT_FMT, _), PT_FMT_VARIANT)
+#else
+#define PTPFX_RAW PT_FMT
+#endif
+
+#define PTPFX CONCATENATE(PTPFX_RAW, _)
+
+#define _PT_FMT_H PT_FMT.h
+#define PT_FMT_H __stringify(_PT_FMT_H)
+
+#define _PT_DEFS_H CONCATENATE(defs_, _PT_FMT_H)
+#define PT_DEFS_H __stringify(_PT_DEFS_H)
+
+#include <linux/generic_pt/common.h>
+#include PT_DEFS_H
+#include "../pt_defs.h"
+#include PT_FMT_H
+#include "../pt_common.h"
+
+#ifndef GENERIC_PT_KUNIT
+#include "../iommu_pt.h"
+#else
+/*
+ * The makefile will compile the .c file twice, once with GENERIC_PT_KUNIT set
+ * which means we are building the kunit modle.
+ */
+#include "../kunit_generic_pt.h"
+#include "../kunit_iommu_pt.h"
+#endif
diff --git a/drivers/iommu/generic_pt/fmt/iommu_vtdss.c b/drivers/iommu/generic_pt/fmt/iommu_vtdss.c
new file mode 100644
index 000000000000..f551711e2a33
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/iommu_vtdss.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ */
+#define PT_FMT vtdss
+#define PT_SUPPORTED_FEATURES \
+ (BIT(PT_FEAT_FLUSH_RANGE) | BIT(PT_FEAT_VTDSS_FORCE_COHERENCE) | \
+ BIT(PT_FEAT_VTDSS_FORCE_WRITEABLE) | BIT(PT_FEAT_DMA_INCOHERENT))
+
+#include "iommu_template.h"
diff --git a/drivers/iommu/generic_pt/fmt/iommu_x86_64.c b/drivers/iommu/generic_pt/fmt/iommu_x86_64.c
new file mode 100644
index 000000000000..5472660c2d71
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/iommu_x86_64.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#define PT_FMT x86_64
+#define PT_SUPPORTED_FEATURES \
+ (BIT(PT_FEAT_SIGN_EXTEND) | BIT(PT_FEAT_FLUSH_RANGE) | \
+ BIT(PT_FEAT_FLUSH_RANGE_NO_GAPS) | \
+ BIT(PT_FEAT_X86_64_AMD_ENCRYPT_TABLES) | BIT(PT_FEAT_DMA_INCOHERENT))
+
+#include "iommu_template.h"
diff --git a/drivers/iommu/generic_pt/fmt/vtdss.h b/drivers/iommu/generic_pt/fmt/vtdss.h
new file mode 100644
index 000000000000..f5f8981edde7
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/vtdss.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ *
+ * Intel VT-d Second Stange 5/4 level page table
+ *
+ * This is described in
+ * Section "3.7 Second-Stage Translation"
+ * Section "9.8 Second-Stage Paging Entries"
+ *
+ * Of the "Intel Virtualization Technology for Directed I/O Architecture
+ * Specification".
+ *
+ * The named levels in the spec map to the pts->level as:
+ * Table/SS-PTE - 0
+ * Directory/SS-PDE - 1
+ * Directory Ptr/SS-PDPTE - 2
+ * PML4/SS-PML4E - 3
+ * PML5/SS-PML5E - 4
+ */
+#ifndef __GENERIC_PT_FMT_VTDSS_H
+#define __GENERIC_PT_FMT_VTDSS_H
+
+#include "defs_vtdss.h"
+#include "../pt_defs.h"
+
+#include <linux/bitfield.h>
+#include <linux/container_of.h>
+#include <linux/log2.h>
+
+enum {
+ PT_MAX_OUTPUT_ADDRESS_LG2 = 52,
+ PT_MAX_VA_ADDRESS_LG2 = 57,
+ PT_ITEM_WORD_SIZE = sizeof(u64),
+ PT_MAX_TOP_LEVEL = 4,
+ PT_GRANULE_LG2SZ = 12,
+ PT_TABLEMEM_LG2SZ = 12,
+
+ /* SSPTPTR is 4k aligned and limited by HAW */
+ PT_TOP_PHYS_MASK = GENMASK_ULL(63, 12),
+};
+
+/* Shared descriptor bits */
+enum {
+ VTDSS_FMT_R = BIT(0),
+ VTDSS_FMT_W = BIT(1),
+ VTDSS_FMT_A = BIT(8),
+ VTDSS_FMT_D = BIT(9),
+ VTDSS_FMT_SNP = BIT(11),
+ VTDSS_FMT_OA = GENMASK_ULL(51, 12),
+};
+
+/* PDPTE/PDE */
+enum {
+ VTDSS_FMT_PS = BIT(7),
+};
+
+#define common_to_vtdss_pt(common_ptr) \
+ container_of_const(common_ptr, struct pt_vtdss, common)
+#define to_vtdss_pt(pts) common_to_vtdss_pt((pts)->range->common)
+
+static inline pt_oaddr_t vtdss_pt_table_pa(const struct pt_state *pts)
+{
+ return oalog2_mul(FIELD_GET(VTDSS_FMT_OA, pts->entry),
+ PT_TABLEMEM_LG2SZ);
+}
+#define pt_table_pa vtdss_pt_table_pa
+
+static inline pt_oaddr_t vtdss_pt_entry_oa(const struct pt_state *pts)
+{
+ return oalog2_mul(FIELD_GET(VTDSS_FMT_OA, pts->entry),
+ PT_GRANULE_LG2SZ);
+}
+#define pt_entry_oa vtdss_pt_entry_oa
+
+static inline bool vtdss_pt_can_have_leaf(const struct pt_state *pts)
+{
+ return pts->level <= 2;
+}
+#define pt_can_have_leaf vtdss_pt_can_have_leaf
+
+static inline unsigned int vtdss_pt_num_items_lg2(const struct pt_state *pts)
+{
+ return PT_TABLEMEM_LG2SZ - ilog2(sizeof(u64));
+}
+#define pt_num_items_lg2 vtdss_pt_num_items_lg2
+
+static inline enum pt_entry_type vtdss_pt_load_entry_raw(struct pt_state *pts)
+{
+ const u64 *tablep = pt_cur_table(pts, u64);
+ u64 entry;
+
+ pts->entry = entry = READ_ONCE(tablep[pts->index]);
+ if (!entry)
+ return PT_ENTRY_EMPTY;
+ if (pts->level == 0 ||
+ (vtdss_pt_can_have_leaf(pts) && (pts->entry & VTDSS_FMT_PS)))
+ return PT_ENTRY_OA;
+ return PT_ENTRY_TABLE;
+}
+#define pt_load_entry_raw vtdss_pt_load_entry_raw
+
+static inline void
+vtdss_pt_install_leaf_entry(struct pt_state *pts, pt_oaddr_t oa,
+ unsigned int oasz_lg2,
+ const struct pt_write_attrs *attrs)
+{
+ u64 *tablep = pt_cur_table(pts, u64);
+ u64 entry;
+
+ if (!pt_check_install_leaf_args(pts, oa, oasz_lg2))
+ return;
+
+ entry = FIELD_PREP(VTDSS_FMT_OA, log2_div(oa, PT_GRANULE_LG2SZ)) |
+ attrs->descriptor_bits;
+ if (pts->level != 0)
+ entry |= VTDSS_FMT_PS;
+
+ WRITE_ONCE(tablep[pts->index], entry);
+ pts->entry = entry;
+}
+#define pt_install_leaf_entry vtdss_pt_install_leaf_entry
+
+static inline bool vtdss_pt_install_table(struct pt_state *pts,
+ pt_oaddr_t table_pa,
+ const struct pt_write_attrs *attrs)
+{
+ u64 entry;
+
+ entry = VTDSS_FMT_R | VTDSS_FMT_W |
+ FIELD_PREP(VTDSS_FMT_OA, log2_div(table_pa, PT_GRANULE_LG2SZ));
+ return pt_table_install64(pts, entry);
+}
+#define pt_install_table vtdss_pt_install_table
+
+static inline void vtdss_pt_attr_from_entry(const struct pt_state *pts,
+ struct pt_write_attrs *attrs)
+{
+ attrs->descriptor_bits = pts->entry &
+ (VTDSS_FMT_R | VTDSS_FMT_W | VTDSS_FMT_SNP);
+}
+#define pt_attr_from_entry vtdss_pt_attr_from_entry
+
+static inline bool vtdss_pt_entry_is_write_dirty(const struct pt_state *pts)
+{
+ u64 *tablep = pt_cur_table(pts, u64) + pts->index;
+
+ return READ_ONCE(*tablep) & VTDSS_FMT_D;
+}
+#define pt_entry_is_write_dirty vtdss_pt_entry_is_write_dirty
+
+static inline void vtdss_pt_entry_make_write_clean(struct pt_state *pts)
+{
+ u64 *tablep = pt_cur_table(pts, u64) + pts->index;
+
+ WRITE_ONCE(*tablep, READ_ONCE(*tablep) & ~(u64)VTDSS_FMT_D);
+}
+#define pt_entry_make_write_clean vtdss_pt_entry_make_write_clean
+
+static inline bool vtdss_pt_entry_make_write_dirty(struct pt_state *pts)
+{
+ u64 *tablep = pt_cur_table(pts, u64) + pts->index;
+ u64 new = pts->entry | VTDSS_FMT_D;
+
+ return try_cmpxchg64(tablep, &pts->entry, new);
+}
+#define pt_entry_make_write_dirty vtdss_pt_entry_make_write_dirty
+
+static inline unsigned int vtdss_pt_max_sw_bit(struct pt_common *common)
+{
+ return 10;
+}
+#define pt_max_sw_bit vtdss_pt_max_sw_bit
+
+static inline u64 vtdss_pt_sw_bit(unsigned int bitnr)
+{
+ if (__builtin_constant_p(bitnr) && bitnr > 10)
+ BUILD_BUG();
+
+ /* Bits marked Ignored in the specification */
+ switch (bitnr) {
+ case 0:
+ return BIT(10);
+ case 1 ... 9:
+ return BIT_ULL((bitnr - 1) + 52);
+ case 10:
+ return BIT_ULL(63);
+ /* Some bits in 9-3 are available in some entries */
+ default:
+ PT_WARN_ON(true);
+ return 0;
+ }
+}
+#define pt_sw_bit vtdss_pt_sw_bit
+
+/* --- iommu */
+#include <linux/generic_pt/iommu.h>
+#include <linux/iommu.h>
+
+#define pt_iommu_table pt_iommu_vtdss
+
+/* The common struct is in the per-format common struct */
+static inline struct pt_common *common_from_iommu(struct pt_iommu *iommu_table)
+{
+ return &container_of(iommu_table, struct pt_iommu_table, iommu)
+ ->vtdss_pt.common;
+}
+
+static inline struct pt_iommu *iommu_from_common(struct pt_common *common)
+{
+ return &container_of(common, struct pt_iommu_table, vtdss_pt.common)
+ ->iommu;
+}
+
+static inline int vtdss_pt_iommu_set_prot(struct pt_common *common,
+ struct pt_write_attrs *attrs,
+ unsigned int iommu_prot)
+{
+ u64 pte = 0;
+
+ /*
+ * VTDSS does not have a present bit, so we tell if any entry is present
+ * by checking for R or W.
+ */
+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+ return -EINVAL;
+
+ if (iommu_prot & IOMMU_READ)
+ pte |= VTDSS_FMT_R;
+ if (iommu_prot & IOMMU_WRITE)
+ pte |= VTDSS_FMT_W;
+ if (pt_feature(common, PT_FEAT_VTDSS_FORCE_COHERENCE))
+ pte |= VTDSS_FMT_SNP;
+
+ if (pt_feature(common, PT_FEAT_VTDSS_FORCE_WRITEABLE) &&
+ !(iommu_prot & IOMMU_WRITE)) {
+ pr_err_ratelimited(
+ "Read-only mapping is disallowed on the domain which serves as the parent in a nested configuration, due to HW errata (ERRATA_772415_SPR17)\n");
+ return -EINVAL;
+ }
+
+ attrs->descriptor_bits = pte;
+ return 0;
+}
+#define pt_iommu_set_prot vtdss_pt_iommu_set_prot
+
+static inline int vtdss_pt_iommu_fmt_init(struct pt_iommu_vtdss *iommu_table,
+ const struct pt_iommu_vtdss_cfg *cfg)
+{
+ struct pt_vtdss *table = &iommu_table->vtdss_pt;
+
+ if (cfg->top_level > 4 || cfg->top_level < 2)
+ return -EOPNOTSUPP;
+
+ pt_top_set_level(&table->common, cfg->top_level);
+ return 0;
+}
+#define pt_iommu_fmt_init vtdss_pt_iommu_fmt_init
+
+static inline void
+vtdss_pt_iommu_fmt_hw_info(struct pt_iommu_vtdss *table,
+ const struct pt_range *top_range,
+ struct pt_iommu_vtdss_hw_info *info)
+{
+ info->ssptptr = virt_to_phys(top_range->top_table);
+ PT_WARN_ON(info->ssptptr & ~PT_TOP_PHYS_MASK);
+ /*
+ * top_level = 2 = 3 level table aw=1
+ * top_level = 3 = 4 level table aw=2
+ * top_level = 4 = 5 level table aw=3
+ */
+ info->aw = top_range->top_level - 1;
+}
+#define pt_iommu_fmt_hw_info vtdss_pt_iommu_fmt_hw_info
+
+#if defined(GENERIC_PT_KUNIT)
+static const struct pt_iommu_vtdss_cfg vtdss_kunit_fmt_cfgs[] = {
+ [0] = { .common.hw_max_vasz_lg2 = 39, .top_level = 2},
+ [1] = { .common.hw_max_vasz_lg2 = 48, .top_level = 3},
+ [2] = { .common.hw_max_vasz_lg2 = 57, .top_level = 4},
+};
+#define kunit_fmt_cfgs vtdss_kunit_fmt_cfgs
+enum { KUNIT_FMT_FEATURES = BIT(PT_FEAT_VTDSS_FORCE_WRITEABLE) };
+#endif
+#endif
diff --git a/drivers/iommu/generic_pt/fmt/x86_64.h b/drivers/iommu/generic_pt/fmt/x86_64.h
new file mode 100644
index 000000000000..210748d9d6e8
--- /dev/null
+++ b/drivers/iommu/generic_pt/fmt/x86_64.h
@@ -0,0 +1,279 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * x86 page table. Supports the 4 and 5 level variations.
+ *
+ * The 4 and 5 level version is described in:
+ * Section "4.4 4-Level Paging and 5-Level Paging" of the Intel Software
+ * Developer's Manual Volume 3
+ *
+ * Section "9.7 First-Stage Paging Entries" of the "Intel Virtualization
+ * Technology for Directed I/O Architecture Specification"
+ *
+ * Section "2.2.6 I/O Page Tables for Guest Translations" of the "AMD I/O
+ * Virtualization Technology (IOMMU) Specification"
+ *
+ * It is used by x86 CPUs, AMD and VT-d IOMMU HW.
+ *
+ * Note the 3 level format is very similar and almost implemented here. The
+ * reserved/ignored layout is different and there are functional bit
+ * differences.
+ *
+ * This format uses PT_FEAT_SIGN_EXTEND to have a upper/non-canonical/lower
+ * split. PT_FEAT_SIGN_EXTEND is optional as AMD IOMMU sometimes uses non-sign
+ * extended addressing with this page table format.
+ *
+ * The named levels in the spec map to the pts->level as:
+ * Table/PTE - 0
+ * Directory/PDE - 1
+ * Directory Ptr/PDPTE - 2
+ * PML4/PML4E - 3
+ * PML5/PML5E - 4
+ */
+#ifndef __GENERIC_PT_FMT_X86_64_H
+#define __GENERIC_PT_FMT_X86_64_H
+
+#include "defs_x86_64.h"
+#include "../pt_defs.h"
+
+#include <linux/bitfield.h>
+#include <linux/container_of.h>
+#include <linux/log2.h>
+#include <linux/mem_encrypt.h>
+
+enum {
+ PT_MAX_OUTPUT_ADDRESS_LG2 = 52,
+ PT_MAX_VA_ADDRESS_LG2 = 57,
+ PT_ITEM_WORD_SIZE = sizeof(u64),
+ PT_MAX_TOP_LEVEL = 4,
+ PT_GRANULE_LG2SZ = 12,
+ PT_TABLEMEM_LG2SZ = 12,
+
+ /*
+ * For AMD the GCR3 Base only has these bits. For VT-d FSPTPTR is 4k
+ * aligned and is limited by the architected HAW
+ */
+ PT_TOP_PHYS_MASK = GENMASK_ULL(51, 12),
+};
+
+/* Shared descriptor bits */
+enum {
+ X86_64_FMT_P = BIT(0),
+ X86_64_FMT_RW = BIT(1),
+ X86_64_FMT_U = BIT(2),
+ X86_64_FMT_A = BIT(5),
+ X86_64_FMT_D = BIT(6),
+ X86_64_FMT_OA = GENMASK_ULL(51, 12),
+ X86_64_FMT_XD = BIT_ULL(63),
+};
+
+/* PDPTE/PDE */
+enum {
+ X86_64_FMT_PS = BIT(7),
+};
+
+static inline pt_oaddr_t x86_64_pt_table_pa(const struct pt_state *pts)
+{
+ u64 entry = pts->entry;
+
+ if (pts_feature(pts, PT_FEAT_X86_64_AMD_ENCRYPT_TABLES))
+ entry = __sme_clr(entry);
+ return oalog2_mul(FIELD_GET(X86_64_FMT_OA, entry),
+ PT_TABLEMEM_LG2SZ);
+}
+#define pt_table_pa x86_64_pt_table_pa
+
+static inline pt_oaddr_t x86_64_pt_entry_oa(const struct pt_state *pts)
+{
+ u64 entry = pts->entry;
+
+ if (pts_feature(pts, PT_FEAT_X86_64_AMD_ENCRYPT_TABLES))
+ entry = __sme_clr(entry);
+ return oalog2_mul(FIELD_GET(X86_64_FMT_OA, entry),
+ PT_GRANULE_LG2SZ);
+}
+#define pt_entry_oa x86_64_pt_entry_oa
+
+static inline bool x86_64_pt_can_have_leaf(const struct pt_state *pts)
+{
+ return pts->level <= 2;
+}
+#define pt_can_have_leaf x86_64_pt_can_have_leaf
+
+static inline unsigned int x86_64_pt_num_items_lg2(const struct pt_state *pts)
+{
+ return PT_TABLEMEM_LG2SZ - ilog2(sizeof(u64));
+}
+#define pt_num_items_lg2 x86_64_pt_num_items_lg2
+
+static inline enum pt_entry_type x86_64_pt_load_entry_raw(struct pt_state *pts)
+{
+ const u64 *tablep = pt_cur_table(pts, u64);
+ u64 entry;
+
+ pts->entry = entry = READ_ONCE(tablep[pts->index]);
+ if (!(entry & X86_64_FMT_P))
+ return PT_ENTRY_EMPTY;
+ if (pts->level == 0 ||
+ (x86_64_pt_can_have_leaf(pts) && (entry & X86_64_FMT_PS)))
+ return PT_ENTRY_OA;
+ return PT_ENTRY_TABLE;
+}
+#define pt_load_entry_raw x86_64_pt_load_entry_raw
+
+static inline void
+x86_64_pt_install_leaf_entry(struct pt_state *pts, pt_oaddr_t oa,
+ unsigned int oasz_lg2,
+ const struct pt_write_attrs *attrs)
+{
+ u64 *tablep = pt_cur_table(pts, u64);
+ u64 entry;
+
+ if (!pt_check_install_leaf_args(pts, oa, oasz_lg2))
+ return;
+
+ entry = X86_64_FMT_P |
+ FIELD_PREP(X86_64_FMT_OA, log2_div(oa, PT_GRANULE_LG2SZ)) |
+ attrs->descriptor_bits;
+ if (pts->level != 0)
+ entry |= X86_64_FMT_PS;
+
+ WRITE_ONCE(tablep[pts->index], entry);
+ pts->entry = entry;
+}
+#define pt_install_leaf_entry x86_64_pt_install_leaf_entry
+
+static inline bool x86_64_pt_install_table(struct pt_state *pts,
+ pt_oaddr_t table_pa,
+ const struct pt_write_attrs *attrs)
+{
+ u64 entry;
+
+ entry = X86_64_FMT_P | X86_64_FMT_RW | X86_64_FMT_U | X86_64_FMT_A |
+ FIELD_PREP(X86_64_FMT_OA, log2_div(table_pa, PT_GRANULE_LG2SZ));
+ if (pts_feature(pts, PT_FEAT_X86_64_AMD_ENCRYPT_TABLES))
+ entry = __sme_set(entry);
+ return pt_table_install64(pts, entry);
+}
+#define pt_install_table x86_64_pt_install_table
+
+static inline void x86_64_pt_attr_from_entry(const struct pt_state *pts,
+ struct pt_write_attrs *attrs)
+{
+ attrs->descriptor_bits = pts->entry &
+ (X86_64_FMT_RW | X86_64_FMT_U | X86_64_FMT_A |
+ X86_64_FMT_D | X86_64_FMT_XD);
+}
+#define pt_attr_from_entry x86_64_pt_attr_from_entry
+
+static inline unsigned int x86_64_pt_max_sw_bit(struct pt_common *common)
+{
+ return 12;
+}
+#define pt_max_sw_bit x86_64_pt_max_sw_bit
+
+static inline u64 x86_64_pt_sw_bit(unsigned int bitnr)
+{
+ if (__builtin_constant_p(bitnr) && bitnr > 12)
+ BUILD_BUG();
+
+ /* Bits marked Ignored/AVL in the specification */
+ switch (bitnr) {
+ case 0:
+ return BIT(9);
+ case 1:
+ return BIT(11);
+ case 2 ... 12:
+ return BIT_ULL((bitnr - 2) + 52);
+ /* Some bits in 8,6,4,3 are available in some entries */
+ default:
+ PT_WARN_ON(true);
+ return 0;
+ }
+}
+#define pt_sw_bit x86_64_pt_sw_bit
+
+/* --- iommu */
+#include <linux/generic_pt/iommu.h>
+#include <linux/iommu.h>
+
+#define pt_iommu_table pt_iommu_x86_64
+
+/* The common struct is in the per-format common struct */
+static inline struct pt_common *common_from_iommu(struct pt_iommu *iommu_table)
+{
+ return &container_of(iommu_table, struct pt_iommu_table, iommu)
+ ->x86_64_pt.common;
+}
+
+static inline struct pt_iommu *iommu_from_common(struct pt_common *common)
+{
+ return &container_of(common, struct pt_iommu_table, x86_64_pt.common)
+ ->iommu;
+}
+
+static inline int x86_64_pt_iommu_set_prot(struct pt_common *common,
+ struct pt_write_attrs *attrs,
+ unsigned int iommu_prot)
+{
+ u64 pte;
+
+ pte = X86_64_FMT_U | X86_64_FMT_A;
+ if (iommu_prot & IOMMU_WRITE)
+ pte |= X86_64_FMT_RW | X86_64_FMT_D;
+
+ /*
+ * Ideally we'd have an IOMMU_ENCRYPTED flag set by higher levels to
+ * control this. For now if the tables use sme_set then so do the ptes.
+ */
+ if (pt_feature(common, PT_FEAT_X86_64_AMD_ENCRYPT_TABLES))
+ pte = __sme_set(pte);
+
+ attrs->descriptor_bits = pte;
+ return 0;
+}
+#define pt_iommu_set_prot x86_64_pt_iommu_set_prot
+
+static inline int
+x86_64_pt_iommu_fmt_init(struct pt_iommu_x86_64 *iommu_table,
+ const struct pt_iommu_x86_64_cfg *cfg)
+{
+ struct pt_x86_64 *table = &iommu_table->x86_64_pt;
+
+ if (cfg->top_level < 3 || cfg->top_level > 4)
+ return -EOPNOTSUPP;
+
+ pt_top_set_level(&table->common, cfg->top_level);
+
+ table->common.max_oasz_lg2 =
+ min(PT_MAX_OUTPUT_ADDRESS_LG2, cfg->common.hw_max_oasz_lg2);
+ return 0;
+}
+#define pt_iommu_fmt_init x86_64_pt_iommu_fmt_init
+
+static inline void
+x86_64_pt_iommu_fmt_hw_info(struct pt_iommu_x86_64 *table,
+ const struct pt_range *top_range,
+ struct pt_iommu_x86_64_hw_info *info)
+{
+ info->gcr3_pt = virt_to_phys(top_range->top_table);
+ PT_WARN_ON(info->gcr3_pt & ~PT_TOP_PHYS_MASK);
+ info->levels = top_range->top_level + 1;
+}
+#define pt_iommu_fmt_hw_info x86_64_pt_iommu_fmt_hw_info
+
+#if defined(GENERIC_PT_KUNIT)
+static const struct pt_iommu_x86_64_cfg x86_64_kunit_fmt_cfgs[] = {
+ [0] = { .common.features = BIT(PT_FEAT_SIGN_EXTEND),
+ .common.hw_max_vasz_lg2 = 48, .top_level = 3 },
+ [1] = { .common.features = BIT(PT_FEAT_SIGN_EXTEND),
+ .common.hw_max_vasz_lg2 = 57, .top_level = 4 },
+ /* AMD IOMMU PASID 0 formats with no SIGN_EXTEND */
+ [2] = { .common.hw_max_vasz_lg2 = 47, .top_level = 3 },
+ [3] = { .common.hw_max_vasz_lg2 = 56, .top_level = 4},
+};
+#define kunit_fmt_cfgs x86_64_kunit_fmt_cfgs
+enum { KUNIT_FMT_FEATURES = BIT(PT_FEAT_SIGN_EXTEND)};
+#endif
+#endif
diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h
new file mode 100644
index 000000000000..97aeda1ad01c
--- /dev/null
+++ b/drivers/iommu/generic_pt/iommu_pt.h
@@ -0,0 +1,1289 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * "Templated C code" for implementing the iommu operations for page tables.
+ * This is compiled multiple times, over all the page table formats to pick up
+ * the per-format definitions.
+ */
+#ifndef __GENERIC_PT_IOMMU_PT_H
+#define __GENERIC_PT_IOMMU_PT_H
+
+#include "pt_iter.h"
+
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include "../iommu-pages.h"
+#include <linux/cleanup.h>
+#include <linux/dma-mapping.h>
+
+enum {
+ SW_BIT_CACHE_FLUSH_DONE = 0,
+};
+
+static void flush_writes_range(const struct pt_state *pts,
+ unsigned int start_index, unsigned int end_index)
+{
+ if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT))
+ iommu_pages_flush_incoherent(
+ iommu_from_common(pts->range->common)->iommu_device,
+ pts->table, start_index * PT_ITEM_WORD_SIZE,
+ (end_index - start_index) * PT_ITEM_WORD_SIZE);
+}
+
+static void flush_writes_item(const struct pt_state *pts)
+{
+ if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT))
+ iommu_pages_flush_incoherent(
+ iommu_from_common(pts->range->common)->iommu_device,
+ pts->table, pts->index * PT_ITEM_WORD_SIZE,
+ PT_ITEM_WORD_SIZE);
+}
+
+static void gather_range_pages(struct iommu_iotlb_gather *iotlb_gather,
+ struct pt_iommu *iommu_table, pt_vaddr_t iova,
+ pt_vaddr_t len,
+ struct iommu_pages_list *free_list)
+{
+ struct pt_common *common = common_from_iommu(iommu_table);
+
+ if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
+ iommu_pages_stop_incoherent_list(free_list,
+ iommu_table->iommu_device);
+
+ if (pt_feature(common, PT_FEAT_FLUSH_RANGE_NO_GAPS) &&
+ iommu_iotlb_gather_is_disjoint(iotlb_gather, iova, len)) {
+ iommu_iotlb_sync(&iommu_table->domain, iotlb_gather);
+ /*
+ * Note that the sync frees the gather's free list, so we must
+ * not have any pages on that list that are covered by iova/len
+ */
+ } else if (pt_feature(common, PT_FEAT_FLUSH_RANGE)) {
+ iommu_iotlb_gather_add_range(iotlb_gather, iova, len);
+ }
+
+ iommu_pages_list_splice(free_list, &iotlb_gather->freelist);
+}
+
+#define DOMAIN_NS(op) CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), op)
+
+static int make_range_ul(struct pt_common *common, struct pt_range *range,
+ unsigned long iova, unsigned long len)
+{
+ unsigned long last;
+
+ if (unlikely(len == 0))
+ return -EINVAL;
+
+ if (check_add_overflow(iova, len - 1, &last))
+ return -EOVERFLOW;
+
+ *range = pt_make_range(common, iova, last);
+ if (sizeof(iova) > sizeof(range->va)) {
+ if (unlikely(range->va != iova || range->last_va != last))
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+static __maybe_unused int make_range_u64(struct pt_common *common,
+ struct pt_range *range, u64 iova,
+ u64 len)
+{
+ if (unlikely(iova > ULONG_MAX || len > ULONG_MAX))
+ return -EOVERFLOW;
+ return make_range_ul(common, range, iova, len);
+}
+
+/*
+ * Some APIs use unsigned long, while othersuse dma_addr_t as the type. Dispatch
+ * to the correct validation based on the type.
+ */
+#define make_range_no_check(common, range, iova, len) \
+ ({ \
+ int ret; \
+ if (sizeof(iova) > sizeof(unsigned long) || \
+ sizeof(len) > sizeof(unsigned long)) \
+ ret = make_range_u64(common, range, iova, len); \
+ else \
+ ret = make_range_ul(common, range, iova, len); \
+ ret; \
+ })
+
+#define make_range(common, range, iova, len) \
+ ({ \
+ int ret = make_range_no_check(common, range, iova, len); \
+ if (!ret) \
+ ret = pt_check_range(range); \
+ ret; \
+ })
+
+static inline unsigned int compute_best_pgsize(struct pt_state *pts,
+ pt_oaddr_t oa)
+{
+ struct pt_iommu *iommu_table = iommu_from_common(pts->range->common);
+
+ if (!pt_can_have_leaf(pts))
+ return 0;
+
+ /*
+ * The page size is limited by the domain's bitmap. This allows the core
+ * code to reduce the supported page sizes by changing the bitmap.
+ */
+ return pt_compute_best_pgsize(pt_possible_sizes(pts) &
+ iommu_table->domain.pgsize_bitmap,
+ pts->range->va, pts->range->last_va, oa);
+}
+
+static __always_inline int __do_iova_to_phys(struct pt_range *range, void *arg,
+ unsigned int level,
+ struct pt_table_p *table,
+ pt_level_fn_t descend_fn)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ pt_oaddr_t *res = arg;
+
+ switch (pt_load_single_entry(&pts)) {
+ case PT_ENTRY_EMPTY:
+ return -ENOENT;
+ case PT_ENTRY_TABLE:
+ return pt_descend(&pts, arg, descend_fn);
+ case PT_ENTRY_OA:
+ *res = pt_entry_oa_exact(&pts);
+ return 0;
+ }
+ return -ENOENT;
+}
+PT_MAKE_LEVELS(__iova_to_phys, __do_iova_to_phys);
+
+/**
+ * iova_to_phys() - Return the output address for the given IOVA
+ * @domain: Table to query
+ * @iova: IO virtual address to query
+ *
+ * Determine the output address from the given IOVA. @iova may have any
+ * alignment, the returned physical will be adjusted with any sub page offset.
+ *
+ * Context: The caller must hold a read range lock that includes @iova.
+ *
+ * Return: 0 if there is no translation for the given iova.
+ */
+phys_addr_t DOMAIN_NS(iova_to_phys)(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ struct pt_range range;
+ pt_oaddr_t res;
+ int ret;
+
+ ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
+ if (ret)
+ return ret;
+
+ ret = pt_walk_range(&range, __iova_to_phys, &res);
+ /* PHYS_ADDR_MAX would be a better error code */
+ if (ret)
+ return 0;
+ return res;
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(iova_to_phys), "GENERIC_PT_IOMMU");
+
+struct pt_iommu_dirty_args {
+ struct iommu_dirty_bitmap *dirty;
+ unsigned int flags;
+};
+
+static void record_dirty(struct pt_state *pts,
+ struct pt_iommu_dirty_args *dirty,
+ unsigned int num_contig_lg2)
+{
+ pt_vaddr_t dirty_len;
+
+ if (num_contig_lg2 != ilog2(1)) {
+ unsigned int index = pts->index;
+ unsigned int end_index = log2_set_mod_max_t(
+ unsigned int, pts->index, num_contig_lg2);
+
+ /* Adjust for being contained inside a contiguous page */
+ end_index = min(end_index, pts->end_index);
+ dirty_len = (end_index - index) *
+ log2_to_int(pt_table_item_lg2sz(pts));
+ } else {
+ dirty_len = log2_to_int(pt_table_item_lg2sz(pts));
+ }
+
+ if (dirty->dirty->bitmap)
+ iova_bitmap_set(dirty->dirty->bitmap, pts->range->va,
+ dirty_len);
+
+ if (!(dirty->flags & IOMMU_DIRTY_NO_CLEAR)) {
+ /*
+ * No write log required because DMA incoherence and atomic
+ * dirty tracking bits can't work together
+ */
+ pt_entry_make_write_clean(pts);
+ iommu_iotlb_gather_add_range(dirty->dirty->gather,
+ pts->range->va, dirty_len);
+ }
+}
+
+static inline int __read_and_clear_dirty(struct pt_range *range, void *arg,
+ unsigned int level,
+ struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct pt_iommu_dirty_args *dirty = arg;
+ int ret;
+
+ for_each_pt_level_entry(&pts) {
+ if (pts.type == PT_ENTRY_TABLE) {
+ ret = pt_descend(&pts, arg, __read_and_clear_dirty);
+ if (ret)
+ return ret;
+ continue;
+ }
+ if (pts.type == PT_ENTRY_OA && pt_entry_is_write_dirty(&pts))
+ record_dirty(&pts, dirty,
+ pt_entry_num_contig_lg2(&pts));
+ }
+ return 0;
+}
+
+/**
+ * read_and_clear_dirty() - Manipulate the HW set write dirty state
+ * @domain: Domain to manipulate
+ * @iova: IO virtual address to start
+ * @size: Length of the IOVA
+ * @flags: A bitmap of IOMMU_DIRTY_NO_CLEAR
+ * @dirty: Place to store the dirty bits
+ *
+ * Iterate over all the entries in the mapped range and record their write dirty
+ * status in iommu_dirty_bitmap. If IOMMU_DIRTY_NO_CLEAR is not specified then
+ * the entries will be left dirty, otherwise they are returned to being not
+ * write dirty.
+ *
+ * Context: The caller must hold a read range lock that includes @iova.
+ *
+ * Returns: -ERRNO on failure, 0 on success.
+ */
+int DOMAIN_NS(read_and_clear_dirty)(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ struct pt_iommu_dirty_args dirty_args = {
+ .dirty = dirty,
+ .flags = flags,
+ };
+ struct pt_range range;
+ int ret;
+
+#if !IS_ENABLED(CONFIG_IOMMUFD_DRIVER) || !defined(pt_entry_is_write_dirty)
+ return -EOPNOTSUPP;
+#endif
+
+ ret = make_range(common_from_iommu(iommu_table), &range, iova, size);
+ if (ret)
+ return ret;
+
+ ret = pt_walk_range(&range, __read_and_clear_dirty, &dirty_args);
+ PT_WARN_ON(ret);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(read_and_clear_dirty), "GENERIC_PT_IOMMU");
+
+static inline int __set_dirty(struct pt_range *range, void *arg,
+ unsigned int level, struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+
+ switch (pt_load_single_entry(&pts)) {
+ case PT_ENTRY_EMPTY:
+ return -ENOENT;
+ case PT_ENTRY_TABLE:
+ return pt_descend(&pts, arg, __set_dirty);
+ case PT_ENTRY_OA:
+ if (!pt_entry_make_write_dirty(&pts))
+ return -EAGAIN;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int __maybe_unused NS(set_dirty)(struct pt_iommu *iommu_table,
+ dma_addr_t iova)
+{
+ struct pt_range range;
+ int ret;
+
+ ret = make_range(common_from_iommu(iommu_table), &range, iova, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * Note: There is no locking here yet, if the test suite races this it
+ * can crash. It should use RCU locking eventually.
+ */
+ return pt_walk_range(&range, __set_dirty, NULL);
+}
+
+struct pt_iommu_collect_args {
+ struct iommu_pages_list free_list;
+ /* Fail if any OAs are within the range */
+ u8 check_mapped : 1;
+};
+
+static int __collect_tables(struct pt_range *range, void *arg,
+ unsigned int level, struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct pt_iommu_collect_args *collect = arg;
+ int ret;
+
+ if (!collect->check_mapped && !pt_can_have_table(&pts))
+ return 0;
+
+ for_each_pt_level_entry(&pts) {
+ if (pts.type == PT_ENTRY_TABLE) {
+ iommu_pages_list_add(&collect->free_list, pts.table_lower);
+ ret = pt_descend(&pts, arg, __collect_tables);
+ if (ret)
+ return ret;
+ continue;
+ }
+ if (pts.type == PT_ENTRY_OA && collect->check_mapped)
+ return -EADDRINUSE;
+ }
+ return 0;
+}
+
+enum alloc_mode {ALLOC_NORMAL, ALLOC_DEFER_COHERENT_FLUSH};
+
+/* Allocate a table, the empty table will be ready to be installed. */
+static inline struct pt_table_p *_table_alloc(struct pt_common *common,
+ size_t lg2sz, gfp_t gfp,
+ enum alloc_mode mode)
+{
+ struct pt_iommu *iommu_table = iommu_from_common(common);
+ struct pt_table_p *table_mem;
+
+ table_mem = iommu_alloc_pages_node_sz(iommu_table->nid, gfp,
+ log2_to_int(lg2sz));
+ if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
+ mode == ALLOC_NORMAL) {
+ int ret = iommu_pages_start_incoherent(
+ table_mem, iommu_table->iommu_device);
+ if (ret) {
+ iommu_free_pages(table_mem);
+ return ERR_PTR(ret);
+ }
+ }
+ return table_mem;
+}
+
+static inline struct pt_table_p *table_alloc_top(struct pt_common *common,
+ uintptr_t top_of_table,
+ gfp_t gfp,
+ enum alloc_mode mode)
+{
+ /*
+ * Top doesn't need the free list or otherwise, so it technically
+ * doesn't need to use iommu pages. Use the API anyhow as the top is
+ * usually not smaller than PAGE_SIZE to keep things simple.
+ */
+ return _table_alloc(common, pt_top_memsize_lg2(common, top_of_table),
+ gfp, mode);
+}
+
+/* Allocate an interior table */
+static inline struct pt_table_p *table_alloc(const struct pt_state *parent_pts,
+ gfp_t gfp, enum alloc_mode mode)
+{
+ struct pt_state child_pts =
+ pt_init(parent_pts->range, parent_pts->level - 1, NULL);
+
+ return _table_alloc(parent_pts->range->common,
+ pt_num_items_lg2(&child_pts) +
+ ilog2(PT_ITEM_WORD_SIZE),
+ gfp, mode);
+}
+
+static inline int pt_iommu_new_table(struct pt_state *pts,
+ struct pt_write_attrs *attrs)
+{
+ struct pt_table_p *table_mem;
+ phys_addr_t phys;
+
+ /* Given PA/VA/length can't be represented */
+ if (PT_WARN_ON(!pt_can_have_table(pts)))
+ return -ENXIO;
+
+ table_mem = table_alloc(pts, attrs->gfp, ALLOC_NORMAL);
+ if (IS_ERR(table_mem))
+ return PTR_ERR(table_mem);
+
+ phys = virt_to_phys(table_mem);
+ if (!pt_install_table(pts, phys, attrs)) {
+ iommu_pages_free_incoherent(
+ table_mem,
+ iommu_from_common(pts->range->common)->iommu_device);
+ return -EAGAIN;
+ }
+
+ if (pts_feature(pts, PT_FEAT_DMA_INCOHERENT)) {
+ flush_writes_item(pts);
+ pt_set_sw_bit_release(pts, SW_BIT_CACHE_FLUSH_DONE);
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_GENERIC_PT)) {
+ /*
+ * The underlying table can't store the physical table address.
+ * This happens when kunit testing tables outside their normal
+ * environment where a CPU might be limited.
+ */
+ pt_load_single_entry(pts);
+ if (PT_WARN_ON(pt_table_pa(pts) != phys)) {
+ pt_clear_entries(pts, ilog2(1));
+ iommu_pages_free_incoherent(
+ table_mem, iommu_from_common(pts->range->common)
+ ->iommu_device);
+ return -EINVAL;
+ }
+ }
+
+ pts->table_lower = table_mem;
+ return 0;
+}
+
+struct pt_iommu_map_args {
+ struct iommu_iotlb_gather *iotlb_gather;
+ struct pt_write_attrs attrs;
+ pt_oaddr_t oa;
+ unsigned int leaf_pgsize_lg2;
+ unsigned int leaf_level;
+};
+
+/*
+ * This will recursively check any tables in the block to validate they are
+ * empty and then free them through the gather.
+ */
+static int clear_contig(const struct pt_state *start_pts,
+ struct iommu_iotlb_gather *iotlb_gather,
+ unsigned int step, unsigned int pgsize_lg2)
+{
+ struct pt_iommu *iommu_table =
+ iommu_from_common(start_pts->range->common);
+ struct pt_range range = *start_pts->range;
+ struct pt_state pts =
+ pt_init(&range, start_pts->level, start_pts->table);
+ struct pt_iommu_collect_args collect = { .check_mapped = true };
+ int ret;
+
+ pts.index = start_pts->index;
+ pts.end_index = start_pts->index + step;
+ for (; _pt_iter_load(&pts); pt_next_entry(&pts)) {
+ if (pts.type == PT_ENTRY_TABLE) {
+ collect.free_list =
+ IOMMU_PAGES_LIST_INIT(collect.free_list);
+ ret = pt_walk_descend_all(&pts, __collect_tables,
+ &collect);
+ if (ret)
+ return ret;
+
+ /*
+ * The table item must be cleared before we can update
+ * the gather
+ */
+ pt_clear_entries(&pts, ilog2(1));
+ flush_writes_item(&pts);
+
+ iommu_pages_list_add(&collect.free_list,
+ pt_table_ptr(&pts));
+ gather_range_pages(
+ iotlb_gather, iommu_table, range.va,
+ log2_to_int(pt_table_item_lg2sz(&pts)),
+ &collect.free_list);
+ } else if (pts.type != PT_ENTRY_EMPTY) {
+ return -EADDRINUSE;
+ }
+ }
+ return 0;
+}
+
+static int __map_range_leaf(struct pt_range *range, void *arg,
+ unsigned int level, struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct pt_iommu_map_args *map = arg;
+ unsigned int leaf_pgsize_lg2 = map->leaf_pgsize_lg2;
+ unsigned int start_index;
+ pt_oaddr_t oa = map->oa;
+ unsigned int step;
+ bool need_contig;
+ int ret = 0;
+
+ PT_WARN_ON(map->leaf_level != level);
+ PT_WARN_ON(!pt_can_have_leaf(&pts));
+
+ step = log2_to_int_t(unsigned int,
+ leaf_pgsize_lg2 - pt_table_item_lg2sz(&pts));
+ need_contig = leaf_pgsize_lg2 != pt_table_item_lg2sz(&pts);
+
+ _pt_iter_first(&pts);
+ start_index = pts.index;
+ do {
+ pts.type = pt_load_entry_raw(&pts);
+ if (pts.type != PT_ENTRY_EMPTY || need_contig) {
+ if (pts.index != start_index)
+ pt_index_to_va(&pts);
+ ret = clear_contig(&pts, map->iotlb_gather, step,
+ leaf_pgsize_lg2);
+ if (ret)
+ break;
+ }
+
+ if (IS_ENABLED(CONFIG_DEBUG_GENERIC_PT)) {
+ pt_index_to_va(&pts);
+ PT_WARN_ON(compute_best_pgsize(&pts, oa) !=
+ leaf_pgsize_lg2);
+ }
+ pt_install_leaf_entry(&pts, oa, leaf_pgsize_lg2, &map->attrs);
+
+ oa += log2_to_int(leaf_pgsize_lg2);
+ pts.index += step;
+ } while (pts.index < pts.end_index);
+
+ flush_writes_range(&pts, start_index, pts.index);
+
+ map->oa = oa;
+ return ret;
+}
+
+static int __map_range(struct pt_range *range, void *arg, unsigned int level,
+ struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct pt_iommu_map_args *map = arg;
+ int ret;
+
+ PT_WARN_ON(map->leaf_level == level);
+ PT_WARN_ON(!pt_can_have_table(&pts));
+
+ _pt_iter_first(&pts);
+
+ /* Descend to a child table */
+ do {
+ pts.type = pt_load_entry_raw(&pts);
+
+ if (pts.type != PT_ENTRY_TABLE) {
+ if (pts.type != PT_ENTRY_EMPTY)
+ return -EADDRINUSE;
+ ret = pt_iommu_new_table(&pts, &map->attrs);
+ if (ret) {
+ /*
+ * Racing with another thread installing a table
+ */
+ if (ret == -EAGAIN)
+ continue;
+ return ret;
+ }
+ } else {
+ pts.table_lower = pt_table_ptr(&pts);
+ /*
+ * Racing with a shared pt_iommu_new_table()? The other
+ * thread is still flushing the cache, so we have to
+ * also flush it to ensure that when our thread's map
+ * completes all the table items leading to our mapping
+ * are visible.
+ *
+ * This requires the pt_set_bit_release() to be a
+ * release of the cache flush so that this can acquire
+ * visibility at the iommu.
+ */
+ if (pts_feature(&pts, PT_FEAT_DMA_INCOHERENT) &&
+ !pt_test_sw_bit_acquire(&pts,
+ SW_BIT_CACHE_FLUSH_DONE))
+ flush_writes_item(&pts);
+ }
+
+ /*
+ * The already present table can possibly be shared with another
+ * concurrent map.
+ */
+ if (map->leaf_level == level - 1)
+ ret = pt_descend(&pts, arg, __map_range_leaf);
+ else
+ ret = pt_descend(&pts, arg, __map_range);
+ if (ret)
+ return ret;
+
+ pts.index++;
+ pt_index_to_va(&pts);
+ if (pts.index >= pts.end_index)
+ break;
+ } while (true);
+ return 0;
+}
+
+/*
+ * Fast path for the easy case of mapping a 4k page to an already allocated
+ * table. This is a common workload. If it returns EAGAIN run the full algorithm
+ * instead.
+ */
+static __always_inline int __do_map_single_page(struct pt_range *range,
+ void *arg, unsigned int level,
+ struct pt_table_p *table,
+ pt_level_fn_t descend_fn)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct pt_iommu_map_args *map = arg;
+
+ pts.type = pt_load_single_entry(&pts);
+ if (level == 0) {
+ if (pts.type != PT_ENTRY_EMPTY)
+ return -EADDRINUSE;
+ pt_install_leaf_entry(&pts, map->oa, PAGE_SHIFT,
+ &map->attrs);
+ /* No flush, not used when incoherent */
+ map->oa += PAGE_SIZE;
+ return 0;
+ }
+ if (pts.type == PT_ENTRY_TABLE)
+ return pt_descend(&pts, arg, descend_fn);
+ /* Something else, use the slow path */
+ return -EAGAIN;
+}
+PT_MAKE_LEVELS(__map_single_page, __do_map_single_page);
+
+/*
+ * Add a table to the top, increasing the top level as much as necessary to
+ * encompass range.
+ */
+static int increase_top(struct pt_iommu *iommu_table, struct pt_range *range,
+ struct pt_iommu_map_args *map)
+{
+ struct iommu_pages_list free_list = IOMMU_PAGES_LIST_INIT(free_list);
+ struct pt_common *common = common_from_iommu(iommu_table);
+ uintptr_t top_of_table = READ_ONCE(common->top_of_table);
+ uintptr_t new_top_of_table = top_of_table;
+ struct pt_table_p *table_mem;
+ unsigned int new_level;
+ spinlock_t *domain_lock;
+ unsigned long flags;
+ int ret;
+
+ while (true) {
+ struct pt_range top_range =
+ _pt_top_range(common, new_top_of_table);
+ struct pt_state pts = pt_init_top(&top_range);
+
+ top_range.va = range->va;
+ top_range.last_va = range->last_va;
+
+ if (!pt_check_range(&top_range) &&
+ map->leaf_level <= pts.level) {
+ new_level = pts.level;
+ break;
+ }
+
+ pts.level++;
+ if (pts.level > PT_MAX_TOP_LEVEL ||
+ pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2) {
+ ret = -ERANGE;
+ goto err_free;
+ }
+
+ table_mem =
+ table_alloc_top(common, _pt_top_set(NULL, pts.level),
+ map->attrs.gfp, ALLOC_DEFER_COHERENT_FLUSH);
+ if (IS_ERR(table_mem)) {
+ ret = PTR_ERR(table_mem);
+ goto err_free;
+ }
+ iommu_pages_list_add(&free_list, table_mem);
+
+ /* The new table links to the lower table always at index 0 */
+ top_range.va = 0;
+ top_range.top_level = pts.level;
+ pts.table_lower = pts.table;
+ pts.table = table_mem;
+ pt_load_single_entry(&pts);
+ PT_WARN_ON(pts.index != 0);
+ pt_install_table(&pts, virt_to_phys(pts.table_lower),
+ &map->attrs);
+ new_top_of_table = _pt_top_set(pts.table, pts.level);
+ }
+
+ /*
+ * Avoid double flushing, flush it once after all pt_install_table()
+ */
+ if (pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
+ ret = iommu_pages_start_incoherent_list(
+ &free_list, iommu_table->iommu_device);
+ if (ret)
+ goto err_free;
+ }
+
+ /*
+ * top_of_table is write locked by the spinlock, but readers can use
+ * READ_ONCE() to get the value. Since we encode both the level and the
+ * pointer in one quanta the lockless reader will always see something
+ * valid. The HW must be updated to the new level under the spinlock
+ * before top_of_table is updated so that concurrent readers don't map
+ * into the new level until it is fully functional. If another thread
+ * already updated it while we were working then throw everything away
+ * and try again.
+ */
+ domain_lock = iommu_table->driver_ops->get_top_lock(iommu_table);
+ spin_lock_irqsave(domain_lock, flags);
+ if (common->top_of_table != top_of_table ||
+ top_of_table == new_top_of_table) {
+ spin_unlock_irqrestore(domain_lock, flags);
+ ret = -EAGAIN;
+ goto err_free;
+ }
+
+ /*
+ * We do not issue any flushes for change_top on the expectation that
+ * any walk cache will not become a problem by adding another layer to
+ * the tree. Misses will rewalk from the updated top pointer, hits
+ * continue to be correct. Negative caching is fine too since all the
+ * new IOVA added by the new top is non-present.
+ */
+ iommu_table->driver_ops->change_top(
+ iommu_table, virt_to_phys(table_mem), new_level);
+ WRITE_ONCE(common->top_of_table, new_top_of_table);
+ spin_unlock_irqrestore(domain_lock, flags);
+ return 0;
+
+err_free:
+ if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
+ iommu_pages_stop_incoherent_list(&free_list,
+ iommu_table->iommu_device);
+ iommu_put_pages_list(&free_list);
+ return ret;
+}
+
+static int check_map_range(struct pt_iommu *iommu_table, struct pt_range *range,
+ struct pt_iommu_map_args *map)
+{
+ struct pt_common *common = common_from_iommu(iommu_table);
+ int ret;
+
+ do {
+ ret = pt_check_range(range);
+ if (!pt_feature(common, PT_FEAT_DYNAMIC_TOP))
+ return ret;
+
+ if (!ret && map->leaf_level <= range->top_level)
+ break;
+
+ ret = increase_top(iommu_table, range, map);
+ if (ret && ret != -EAGAIN)
+ return ret;
+
+ /* Reload the new top */
+ *range = pt_make_range(common, range->va, range->last_va);
+ } while (ret);
+ PT_WARN_ON(pt_check_range(range));
+ return 0;
+}
+
+static int do_map(struct pt_range *range, struct pt_common *common,
+ bool single_page, struct pt_iommu_map_args *map)
+{
+ /*
+ * The __map_single_page() fast path does not support DMA_INCOHERENT
+ * flushing to keep its .text small.
+ */
+ if (single_page && !pt_feature(common, PT_FEAT_DMA_INCOHERENT)) {
+ int ret;
+
+ ret = pt_walk_range(range, __map_single_page, map);
+ if (ret != -EAGAIN)
+ return ret;
+ /* EAGAIN falls through to the full path */
+ }
+
+ if (map->leaf_level == range->top_level)
+ return pt_walk_range(range, __map_range_leaf, map);
+ return pt_walk_range(range, __map_range, map);
+}
+
+/**
+ * map_pages() - Install translation for an IOVA range
+ * @domain: Domain to manipulate
+ * @iova: IO virtual address to start
+ * @paddr: Physical/Output address to start
+ * @pgsize: Length of each page
+ * @pgcount: Length of the range in pgsize units starting from @iova
+ * @prot: A bitmap of IOMMU_READ/WRITE/CACHE/NOEXEC/MMIO
+ * @gfp: GFP flags for any memory allocations
+ * @mapped: Total bytes successfully mapped
+ *
+ * The range starting at IOVA will have paddr installed into it. The caller
+ * must specify a valid pgsize and pgcount to segment the range into compatible
+ * blocks.
+ *
+ * On error the caller will probably want to invoke unmap on the range from iova
+ * up to the amount indicated by @mapped to return the table back to an
+ * unchanged state.
+ *
+ * Context: The caller must hold a write range lock that includes the whole
+ * range.
+ *
+ * Returns: -ERRNO on failure, 0 on success. The number of bytes of VA that were
+ * mapped are added to @mapped, @mapped is not zerod first.
+ */
+int DOMAIN_NS(map_pages)(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ pt_vaddr_t pgsize_bitmap = iommu_table->domain.pgsize_bitmap;
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct iommu_iotlb_gather iotlb_gather;
+ pt_vaddr_t len = pgsize * pgcount;
+ struct pt_iommu_map_args map = {
+ .iotlb_gather = &iotlb_gather,
+ .oa = paddr,
+ .leaf_pgsize_lg2 = vaffs(pgsize),
+ };
+ bool single_page = false;
+ struct pt_range range;
+ int ret;
+
+ iommu_iotlb_gather_init(&iotlb_gather);
+
+ if (WARN_ON(!(prot & (IOMMU_READ | IOMMU_WRITE))))
+ return -EINVAL;
+
+ /* Check the paddr doesn't exceed what the table can store */
+ if ((sizeof(pt_oaddr_t) < sizeof(paddr) &&
+ (pt_vaddr_t)paddr > PT_VADDR_MAX) ||
+ (common->max_oasz_lg2 != PT_VADDR_MAX_LG2 &&
+ oalog2_div(paddr, common->max_oasz_lg2)))
+ return -ERANGE;
+
+ ret = pt_iommu_set_prot(common, &map.attrs, prot);
+ if (ret)
+ return ret;
+ map.attrs.gfp = gfp;
+
+ ret = make_range_no_check(common, &range, iova, len);
+ if (ret)
+ return ret;
+
+ /* Calculate target page size and level for the leaves */
+ if (pt_has_system_page_size(common) && pgsize == PAGE_SIZE &&
+ pgcount == 1) {
+ PT_WARN_ON(!(pgsize_bitmap & PAGE_SIZE));
+ if (log2_mod(iova | paddr, PAGE_SHIFT))
+ return -ENXIO;
+ map.leaf_pgsize_lg2 = PAGE_SHIFT;
+ map.leaf_level = 0;
+ single_page = true;
+ } else {
+ map.leaf_pgsize_lg2 = pt_compute_best_pgsize(
+ pgsize_bitmap, range.va, range.last_va, paddr);
+ if (!map.leaf_pgsize_lg2)
+ return -ENXIO;
+ map.leaf_level =
+ pt_pgsz_lg2_to_level(common, map.leaf_pgsize_lg2);
+ }
+
+ ret = check_map_range(iommu_table, &range, &map);
+ if (ret)
+ return ret;
+
+ PT_WARN_ON(map.leaf_level > range.top_level);
+
+ ret = do_map(&range, common, single_page, &map);
+
+ /*
+ * Table levels were freed and replaced with large items, flush any walk
+ * cache that may refer to the freed levels.
+ */
+ if (!iommu_pages_list_empty(&iotlb_gather.freelist))
+ iommu_iotlb_sync(&iommu_table->domain, &iotlb_gather);
+
+ /* Bytes successfully mapped */
+ PT_WARN_ON(!ret && map.oa - paddr != len);
+ *mapped += map.oa - paddr;
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(map_pages), "GENERIC_PT_IOMMU");
+
+struct pt_unmap_args {
+ struct iommu_pages_list free_list;
+ pt_vaddr_t unmapped;
+};
+
+static __maybe_unused int __unmap_range(struct pt_range *range, void *arg,
+ unsigned int level,
+ struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct pt_unmap_args *unmap = arg;
+ unsigned int num_oas = 0;
+ unsigned int start_index;
+ int ret = 0;
+
+ _pt_iter_first(&pts);
+ start_index = pts.index;
+ pts.type = pt_load_entry_raw(&pts);
+ /*
+ * A starting index is in the middle of a contiguous entry
+ *
+ * The IOMMU API does not require drivers to support unmapping parts of
+ * large pages. Long ago VFIO would try to split maps but the current
+ * version never does.
+ *
+ * Instead when unmap reaches a partial unmap of the start of a large
+ * IOPTE it should remove the entire IOPTE and return that size to the
+ * caller.
+ */
+ if (pts.type == PT_ENTRY_OA) {
+ if (log2_mod(range->va, pt_entry_oa_lg2sz(&pts)))
+ return -EINVAL;
+ /* Micro optimization */
+ goto start_oa;
+ }
+
+ do {
+ if (pts.type != PT_ENTRY_OA) {
+ bool fully_covered;
+
+ if (pts.type != PT_ENTRY_TABLE) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (pts.index != start_index)
+ pt_index_to_va(&pts);
+ pts.table_lower = pt_table_ptr(&pts);
+
+ fully_covered = pt_entry_fully_covered(
+ &pts, pt_table_item_lg2sz(&pts));
+
+ ret = pt_descend(&pts, arg, __unmap_range);
+ if (ret)
+ break;
+
+ /*
+ * If the unmapping range fully covers the table then we
+ * can free it as well. The clear is delayed until we
+ * succeed in clearing the lower table levels.
+ */
+ if (fully_covered) {
+ iommu_pages_list_add(&unmap->free_list,
+ pts.table_lower);
+ pt_clear_entries(&pts, ilog2(1));
+ }
+ pts.index++;
+ } else {
+ unsigned int num_contig_lg2;
+start_oa:
+ /*
+ * If the caller requested an last that falls within a
+ * single entry then the entire entry is unmapped and
+ * the length returned will be larger than requested.
+ */
+ num_contig_lg2 = pt_entry_num_contig_lg2(&pts);
+ pt_clear_entries(&pts, num_contig_lg2);
+ num_oas += log2_to_int(num_contig_lg2);
+ pts.index += log2_to_int(num_contig_lg2);
+ }
+ if (pts.index >= pts.end_index)
+ break;
+ pts.type = pt_load_entry_raw(&pts);
+ } while (true);
+
+ unmap->unmapped += log2_mul(num_oas, pt_table_item_lg2sz(&pts));
+ flush_writes_range(&pts, start_index, pts.index);
+
+ return ret;
+}
+
+/**
+ * unmap_pages() - Make a range of IOVA empty/not present
+ * @domain: Domain to manipulate
+ * @iova: IO virtual address to start
+ * @pgsize: Length of each page
+ * @pgcount: Length of the range in pgsize units starting from @iova
+ * @iotlb_gather: Gather struct that must be flushed on return
+ *
+ * unmap_pages() will remove a translation created by map_pages(). It cannot
+ * subdivide a mapping created by map_pages(), so it should be called with IOVA
+ * ranges that match those passed to map_pages(). The IOVA range can aggregate
+ * contiguous map_pages() calls so long as no individual range is split.
+ *
+ * Context: The caller must hold a write range lock that includes
+ * the whole range.
+ *
+ * Returns: Number of bytes of VA unmapped. iova + res will be the point
+ * unmapping stopped.
+ */
+size_t DOMAIN_NS(unmap_pages)(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *iotlb_gather)
+{
+ struct pt_iommu *iommu_table =
+ container_of(domain, struct pt_iommu, domain);
+ struct pt_unmap_args unmap = { .free_list = IOMMU_PAGES_LIST_INIT(
+ unmap.free_list) };
+ pt_vaddr_t len = pgsize * pgcount;
+ struct pt_range range;
+ int ret;
+
+ ret = make_range(common_from_iommu(iommu_table), &range, iova, len);
+ if (ret)
+ return 0;
+
+ pt_walk_range(&range, __unmap_range, &unmap);
+
+ gather_range_pages(iotlb_gather, iommu_table, iova, len,
+ &unmap.free_list);
+
+ return unmap.unmapped;
+}
+EXPORT_SYMBOL_NS_GPL(DOMAIN_NS(unmap_pages), "GENERIC_PT_IOMMU");
+
+static void NS(get_info)(struct pt_iommu *iommu_table,
+ struct pt_iommu_info *info)
+{
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_range range = pt_top_range(common);
+ struct pt_state pts = pt_init_top(&range);
+ pt_vaddr_t pgsize_bitmap = 0;
+
+ if (pt_feature(common, PT_FEAT_DYNAMIC_TOP)) {
+ for (pts.level = 0; pts.level <= PT_MAX_TOP_LEVEL;
+ pts.level++) {
+ if (pt_table_item_lg2sz(&pts) >= common->max_vasz_lg2)
+ break;
+ pgsize_bitmap |= pt_possible_sizes(&pts);
+ }
+ } else {
+ for (pts.level = 0; pts.level <= range.top_level; pts.level++)
+ pgsize_bitmap |= pt_possible_sizes(&pts);
+ }
+
+ /* Hide page sizes larger than the maximum OA */
+ info->pgsize_bitmap = oalog2_mod(pgsize_bitmap, common->max_oasz_lg2);
+}
+
+static void NS(deinit)(struct pt_iommu *iommu_table)
+{
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_range range = pt_all_range(common);
+ struct pt_iommu_collect_args collect = {
+ .free_list = IOMMU_PAGES_LIST_INIT(collect.free_list),
+ };
+
+ iommu_pages_list_add(&collect.free_list, range.top_table);
+ pt_walk_range(&range, __collect_tables, &collect);
+
+ /*
+ * The driver has to already have fenced the HW access to the page table
+ * and invalidated any caching referring to this memory.
+ */
+ if (pt_feature(common, PT_FEAT_DMA_INCOHERENT))
+ iommu_pages_stop_incoherent_list(&collect.free_list,
+ iommu_table->iommu_device);
+ iommu_put_pages_list(&collect.free_list);
+}
+
+static const struct pt_iommu_ops NS(ops) = {
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) && defined(pt_entry_is_write_dirty) && \
+ IS_ENABLED(CONFIG_IOMMUFD_TEST) && defined(pt_entry_make_write_dirty)
+ .set_dirty = NS(set_dirty),
+#endif
+ .get_info = NS(get_info),
+ .deinit = NS(deinit),
+};
+
+static int pt_init_common(struct pt_common *common)
+{
+ struct pt_range top_range = pt_top_range(common);
+
+ if (PT_WARN_ON(top_range.top_level > PT_MAX_TOP_LEVEL))
+ return -EINVAL;
+
+ if (top_range.top_level == PT_MAX_TOP_LEVEL ||
+ common->max_vasz_lg2 == top_range.max_vasz_lg2)
+ common->features &= ~BIT(PT_FEAT_DYNAMIC_TOP);
+
+ if (top_range.max_vasz_lg2 == PT_VADDR_MAX_LG2)
+ common->features |= BIT(PT_FEAT_FULL_VA);
+
+ /* Requested features must match features compiled into this format */
+ if ((common->features & ~(unsigned int)PT_SUPPORTED_FEATURES) ||
+ (!IS_ENABLED(CONFIG_DEBUG_GENERIC_PT) &&
+ (common->features & PT_FORCE_ENABLED_FEATURES) !=
+ PT_FORCE_ENABLED_FEATURES))
+ return -EOPNOTSUPP;
+
+ /*
+ * Check if the top level of the page table is too small to hold the
+ * specified maxvasz.
+ */
+ if (!pt_feature(common, PT_FEAT_DYNAMIC_TOP) &&
+ top_range.top_level != PT_MAX_TOP_LEVEL) {
+ struct pt_state pts = { .range = &top_range,
+ .level = top_range.top_level };
+
+ if (common->max_vasz_lg2 >
+ pt_num_items_lg2(&pts) + pt_table_item_lg2sz(&pts))
+ return -EOPNOTSUPP;
+ }
+
+ if (common->max_oasz_lg2 == 0)
+ common->max_oasz_lg2 = pt_max_oa_lg2(common);
+ else
+ common->max_oasz_lg2 = min(common->max_oasz_lg2,
+ pt_max_oa_lg2(common));
+ return 0;
+}
+
+static int pt_iommu_init_domain(struct pt_iommu *iommu_table,
+ struct iommu_domain *domain)
+{
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_iommu_info info;
+ struct pt_range range;
+
+ NS(get_info)(iommu_table, &info);
+
+ domain->type = __IOMMU_DOMAIN_PAGING;
+ domain->pgsize_bitmap = info.pgsize_bitmap;
+
+ if (pt_feature(common, PT_FEAT_DYNAMIC_TOP))
+ range = _pt_top_range(common,
+ _pt_top_set(NULL, PT_MAX_TOP_LEVEL));
+ else
+ range = pt_top_range(common);
+
+ /* A 64-bit high address space table on a 32-bit system cannot work. */
+ domain->geometry.aperture_start = (unsigned long)range.va;
+ if ((pt_vaddr_t)domain->geometry.aperture_start != range.va)
+ return -EOVERFLOW;
+
+ /*
+ * The aperture is limited to what the API can do after considering all
+ * the different types dma_addr_t/unsigned long/pt_vaddr_t that are used
+ * to store a VA. Set the aperture to something that is valid for all
+ * cases. Saturate instead of truncate the end if the types are smaller
+ * than the top range. aperture_end should be called aperture_last.
+ */
+ domain->geometry.aperture_end = (unsigned long)range.last_va;
+ if ((pt_vaddr_t)domain->geometry.aperture_end != range.last_va) {
+ domain->geometry.aperture_end = ULONG_MAX;
+ domain->pgsize_bitmap &= ULONG_MAX;
+ }
+ domain->geometry.force_aperture = true;
+
+ return 0;
+}
+
+static void pt_iommu_zero(struct pt_iommu_table *fmt_table)
+{
+ struct pt_iommu *iommu_table = &fmt_table->iommu;
+ struct pt_iommu cfg = *iommu_table;
+
+ static_assert(offsetof(struct pt_iommu_table, iommu.domain) == 0);
+ memset_after(fmt_table, 0, iommu.domain);
+
+ /* The caller can initialize some of these values */
+ iommu_table->iommu_device = cfg.iommu_device;
+ iommu_table->driver_ops = cfg.driver_ops;
+ iommu_table->nid = cfg.nid;
+}
+
+#define pt_iommu_table_cfg CONCATENATE(pt_iommu_table, _cfg)
+#define pt_iommu_init CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), init)
+
+int pt_iommu_init(struct pt_iommu_table *fmt_table,
+ const struct pt_iommu_table_cfg *cfg, gfp_t gfp)
+{
+ struct pt_iommu *iommu_table = &fmt_table->iommu;
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_table_p *table_mem;
+ int ret;
+
+ if (cfg->common.hw_max_vasz_lg2 > PT_MAX_VA_ADDRESS_LG2 ||
+ !cfg->common.hw_max_vasz_lg2 || !cfg->common.hw_max_oasz_lg2)
+ return -EINVAL;
+
+ pt_iommu_zero(fmt_table);
+ common->features = cfg->common.features;
+ common->max_vasz_lg2 = cfg->common.hw_max_vasz_lg2;
+ common->max_oasz_lg2 = cfg->common.hw_max_oasz_lg2;
+ ret = pt_iommu_fmt_init(fmt_table, cfg);
+ if (ret)
+ return ret;
+
+ if (cfg->common.hw_max_oasz_lg2 > pt_max_oa_lg2(common))
+ return -EINVAL;
+
+ ret = pt_init_common(common);
+ if (ret)
+ return ret;
+
+ if (pt_feature(common, PT_FEAT_DYNAMIC_TOP) &&
+ WARN_ON(!iommu_table->driver_ops ||
+ !iommu_table->driver_ops->change_top ||
+ !iommu_table->driver_ops->get_top_lock))
+ return -EINVAL;
+
+ if (pt_feature(common, PT_FEAT_SIGN_EXTEND) &&
+ (pt_feature(common, PT_FEAT_FULL_VA) ||
+ pt_feature(common, PT_FEAT_DYNAMIC_TOP)))
+ return -EINVAL;
+
+ if (pt_feature(common, PT_FEAT_DMA_INCOHERENT) &&
+ WARN_ON(!iommu_table->iommu_device))
+ return -EINVAL;
+
+ ret = pt_iommu_init_domain(iommu_table, &iommu_table->domain);
+ if (ret)
+ return ret;
+
+ table_mem = table_alloc_top(common, common->top_of_table, gfp,
+ ALLOC_NORMAL);
+ if (IS_ERR(table_mem))
+ return PTR_ERR(table_mem);
+ pt_top_set(common, table_mem, pt_top_get_level(common));
+
+ /* Must be last, see pt_iommu_deinit() */
+ iommu_table->ops = &NS(ops);
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(pt_iommu_init, "GENERIC_PT_IOMMU");
+
+#ifdef pt_iommu_fmt_hw_info
+#define pt_iommu_table_hw_info CONCATENATE(pt_iommu_table, _hw_info)
+#define pt_iommu_hw_info CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), hw_info)
+void pt_iommu_hw_info(struct pt_iommu_table *fmt_table,
+ struct pt_iommu_table_hw_info *info)
+{
+ struct pt_iommu *iommu_table = &fmt_table->iommu;
+ struct pt_common *common = common_from_iommu(iommu_table);
+ struct pt_range top_range = pt_top_range(common);
+
+ pt_iommu_fmt_hw_info(fmt_table, &top_range, info);
+}
+EXPORT_SYMBOL_NS_GPL(pt_iommu_hw_info, "GENERIC_PT_IOMMU");
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IOMMU Page table implementation for " __stringify(PTPFX_RAW));
+MODULE_IMPORT_NS("GENERIC_PT");
+/* For iommu_dirty_bitmap_record() */
+MODULE_IMPORT_NS("IOMMUFD");
+
+#endif /* __GENERIC_PT_IOMMU_PT_H */
diff --git a/drivers/iommu/generic_pt/kunit_generic_pt.h b/drivers/iommu/generic_pt/kunit_generic_pt.h
new file mode 100644
index 000000000000..68278bf15cfe
--- /dev/null
+++ b/drivers/iommu/generic_pt/kunit_generic_pt.h
@@ -0,0 +1,823 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * Test the format API directly.
+ *
+ */
+#include "kunit_iommu.h"
+#include "pt_iter.h"
+
+static void do_map(struct kunit *test, pt_vaddr_t va, pt_oaddr_t pa,
+ pt_vaddr_t len)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ int ret;
+
+ KUNIT_ASSERT_EQ(test, len, (size_t)len);
+
+ ret = iommu_map(&priv->domain, va, pa, len, IOMMU_READ | IOMMU_WRITE,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NO_ERRNO_FN(test, "map_pages", ret);
+}
+
+#define KUNIT_ASSERT_PT_LOAD(test, pts, entry) \
+ ({ \
+ pt_load_entry(pts); \
+ KUNIT_ASSERT_EQ(test, (pts)->type, entry); \
+ })
+
+struct check_levels_arg {
+ struct kunit *test;
+ void *fn_arg;
+ void (*fn)(struct kunit *test, struct pt_state *pts, void *arg);
+};
+
+static int __check_all_levels(struct pt_range *range, void *arg,
+ unsigned int level, struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct check_levels_arg *chk = arg;
+ struct kunit *test = chk->test;
+ int ret;
+
+ _pt_iter_first(&pts);
+
+
+ /*
+ * If we were able to use the full VA space this should always be the
+ * last index in each table.
+ */
+ if (!(IS_32BIT && range->max_vasz_lg2 > 32)) {
+ if (pt_feature(range->common, PT_FEAT_SIGN_EXTEND) &&
+ pts.level == pts.range->top_level)
+ KUNIT_ASSERT_EQ(test, pts.index,
+ log2_to_int(range->max_vasz_lg2 - 1 -
+ pt_table_item_lg2sz(&pts)) -
+ 1);
+ else
+ KUNIT_ASSERT_EQ(test, pts.index,
+ log2_to_int(pt_table_oa_lg2sz(&pts) -
+ pt_table_item_lg2sz(&pts)) -
+ 1);
+ }
+
+ if (pt_can_have_table(&pts)) {
+ pt_load_single_entry(&pts);
+ KUNIT_ASSERT_EQ(test, pts.type, PT_ENTRY_TABLE);
+ ret = pt_descend(&pts, arg, __check_all_levels);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Index 0 is used by the test */
+ if (IS_32BIT && !pts.index)
+ return 0;
+ KUNIT_ASSERT_NE(chk->test, pts.index, 0);
+ }
+
+ /*
+ * A format should not create a table with only one entry, at least this
+ * test approach won't work.
+ */
+ KUNIT_ASSERT_GT(chk->test, pts.end_index, 1);
+
+ /*
+ * For increase top we end up using index 0 for the original top's tree,
+ * so use index 1 for testing instead.
+ */
+ pts.index = 0;
+ pt_index_to_va(&pts);
+ pt_load_single_entry(&pts);
+ if (pts.type == PT_ENTRY_TABLE && pts.end_index > 2) {
+ pts.index = 1;
+ pt_index_to_va(&pts);
+ }
+ (*chk->fn)(chk->test, &pts, chk->fn_arg);
+ return 0;
+}
+
+/*
+ * Call fn for each level in the table with a pts setup to index 0 in a table
+ * for that level. This allows writing tests that run on every level.
+ * The test can use every index in the table except the last one.
+ */
+static void check_all_levels(struct kunit *test,
+ void (*fn)(struct kunit *test,
+ struct pt_state *pts, void *arg),
+ void *fn_arg)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range range = pt_top_range(priv->common);
+ struct check_levels_arg chk = {
+ .test = test,
+ .fn = fn,
+ .fn_arg = fn_arg,
+ };
+ int ret;
+
+ if (pt_feature(priv->common, PT_FEAT_DYNAMIC_TOP) &&
+ priv->common->max_vasz_lg2 > range.max_vasz_lg2)
+ range.last_va = fvalog2_set_mod_max(range.va,
+ priv->common->max_vasz_lg2);
+
+ /*
+ * Map a page at the highest VA, this will populate all the levels so we
+ * can then iterate over them. Index 0 will be used for testing.
+ */
+ if (IS_32BIT && range.max_vasz_lg2 > 32)
+ range.last_va = (u32)range.last_va;
+ range.va = range.last_va - (priv->smallest_pgsz - 1);
+ do_map(test, range.va, 0, priv->smallest_pgsz);
+
+ range = pt_make_range(priv->common, range.va, range.last_va);
+ ret = pt_walk_range(&range, __check_all_levels, &chk);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+}
+
+static void test_init(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+
+ /* Fixture does the setup */
+ KUNIT_ASSERT_NE(test, priv->info.pgsize_bitmap, 0);
+}
+
+/*
+ * Basic check that the log2_* functions are working, especially at the integer
+ * limits.
+ */
+static void test_bitops(struct kunit *test)
+{
+ int i;
+
+ KUNIT_ASSERT_EQ(test, fls_t(u32, 0), 0);
+ KUNIT_ASSERT_EQ(test, fls_t(u32, 1), 1);
+ KUNIT_ASSERT_EQ(test, fls_t(u32, BIT(2)), 3);
+ KUNIT_ASSERT_EQ(test, fls_t(u32, U32_MAX), 32);
+
+ KUNIT_ASSERT_EQ(test, fls_t(u64, 0), 0);
+ KUNIT_ASSERT_EQ(test, fls_t(u64, 1), 1);
+ KUNIT_ASSERT_EQ(test, fls_t(u64, BIT(2)), 3);
+ KUNIT_ASSERT_EQ(test, fls_t(u64, U64_MAX), 64);
+
+ KUNIT_ASSERT_EQ(test, ffs_t(u32, 1), 0);
+ KUNIT_ASSERT_EQ(test, ffs_t(u32, BIT(2)), 2);
+ KUNIT_ASSERT_EQ(test, ffs_t(u32, BIT(31)), 31);
+
+ KUNIT_ASSERT_EQ(test, ffs_t(u64, 1), 0);
+ KUNIT_ASSERT_EQ(test, ffs_t(u64, BIT(2)), 2);
+ KUNIT_ASSERT_EQ(test, ffs_t(u64, BIT_ULL(63)), 63);
+
+ for (i = 0; i != 31; i++)
+ KUNIT_ASSERT_EQ(test, ffz_t(u64, BIT_ULL(i) - 1), i);
+
+ for (i = 0; i != 63; i++)
+ KUNIT_ASSERT_EQ(test, ffz_t(u64, BIT_ULL(i) - 1), i);
+
+ for (i = 0; i != 32; i++) {
+ u64 val = get_random_u64();
+
+ KUNIT_ASSERT_EQ(test, log2_mod_t(u32, val, ffs_t(u32, val)), 0);
+ KUNIT_ASSERT_EQ(test, log2_mod_t(u64, val, ffs_t(u64, val)), 0);
+
+ KUNIT_ASSERT_EQ(test, log2_mod_t(u32, val, ffz_t(u32, val)),
+ log2_to_max_int_t(u32, ffz_t(u32, val)));
+ KUNIT_ASSERT_EQ(test, log2_mod_t(u64, val, ffz_t(u64, val)),
+ log2_to_max_int_t(u64, ffz_t(u64, val)));
+ }
+}
+
+static unsigned int ref_best_pgsize(pt_vaddr_t pgsz_bitmap, pt_vaddr_t va,
+ pt_vaddr_t last_va, pt_oaddr_t oa)
+{
+ pt_vaddr_t pgsz_lg2;
+
+ /* Brute force the constraints described in pt_compute_best_pgsize() */
+ for (pgsz_lg2 = PT_VADDR_MAX_LG2 - 1; pgsz_lg2 != 0; pgsz_lg2--) {
+ if ((pgsz_bitmap & log2_to_int(pgsz_lg2)) &&
+ log2_mod(va, pgsz_lg2) == 0 &&
+ oalog2_mod(oa, pgsz_lg2) == 0 &&
+ va + log2_to_int(pgsz_lg2) - 1 <= last_va &&
+ log2_div_eq(va, va + log2_to_int(pgsz_lg2) - 1, pgsz_lg2) &&
+ oalog2_div_eq(oa, oa + log2_to_int(pgsz_lg2) - 1, pgsz_lg2))
+ return pgsz_lg2;
+ }
+ return 0;
+}
+
+/* Check that the bit logic in pt_compute_best_pgsize() works. */
+static void test_best_pgsize(struct kunit *test)
+{
+ unsigned int a_lg2;
+ unsigned int b_lg2;
+ unsigned int c_lg2;
+
+ /* Try random prefixes with every suffix combination */
+ for (a_lg2 = 1; a_lg2 != 10; a_lg2++) {
+ for (b_lg2 = 1; b_lg2 != 10; b_lg2++) {
+ for (c_lg2 = 1; c_lg2 != 10; c_lg2++) {
+ pt_vaddr_t pgsz_bitmap = get_random_u64();
+ pt_vaddr_t va = get_random_u64() << a_lg2;
+ pt_oaddr_t oa = get_random_u64() << b_lg2;
+ pt_vaddr_t last_va = log2_set_mod_max(
+ get_random_u64(), c_lg2);
+
+ if (va > last_va)
+ swap(va, last_va);
+ KUNIT_ASSERT_EQ(
+ test,
+ pt_compute_best_pgsize(pgsz_bitmap, va,
+ last_va, oa),
+ ref_best_pgsize(pgsz_bitmap, va,
+ last_va, oa));
+ }
+ }
+ }
+
+ /* 0 prefix, every suffix */
+ for (c_lg2 = 1; c_lg2 != PT_VADDR_MAX_LG2 - 1; c_lg2++) {
+ pt_vaddr_t pgsz_bitmap = get_random_u64();
+ pt_vaddr_t va = 0;
+ pt_oaddr_t oa = 0;
+ pt_vaddr_t last_va = log2_set_mod_max(0, c_lg2);
+
+ KUNIT_ASSERT_EQ(test,
+ pt_compute_best_pgsize(pgsz_bitmap, va, last_va,
+ oa),
+ ref_best_pgsize(pgsz_bitmap, va, last_va, oa));
+ }
+
+ /* 1's prefix, every suffix */
+ for (a_lg2 = 1; a_lg2 != 10; a_lg2++) {
+ for (b_lg2 = 1; b_lg2 != 10; b_lg2++) {
+ for (c_lg2 = 1; c_lg2 != 10; c_lg2++) {
+ pt_vaddr_t pgsz_bitmap = get_random_u64();
+ pt_vaddr_t va = PT_VADDR_MAX << a_lg2;
+ pt_oaddr_t oa = PT_VADDR_MAX << b_lg2;
+ pt_vaddr_t last_va = PT_VADDR_MAX;
+
+ KUNIT_ASSERT_EQ(
+ test,
+ pt_compute_best_pgsize(pgsz_bitmap, va,
+ last_va, oa),
+ ref_best_pgsize(pgsz_bitmap, va,
+ last_va, oa));
+ }
+ }
+ }
+
+ /* pgsize_bitmap is always 0 */
+ for (a_lg2 = 1; a_lg2 != 10; a_lg2++) {
+ for (b_lg2 = 1; b_lg2 != 10; b_lg2++) {
+ for (c_lg2 = 1; c_lg2 != 10; c_lg2++) {
+ pt_vaddr_t pgsz_bitmap = 0;
+ pt_vaddr_t va = get_random_u64() << a_lg2;
+ pt_oaddr_t oa = get_random_u64() << b_lg2;
+ pt_vaddr_t last_va = log2_set_mod_max(
+ get_random_u64(), c_lg2);
+
+ if (va > last_va)
+ swap(va, last_va);
+ KUNIT_ASSERT_EQ(
+ test,
+ pt_compute_best_pgsize(pgsz_bitmap, va,
+ last_va, oa),
+ 0);
+ }
+ }
+ }
+
+ if (sizeof(pt_vaddr_t) <= 4)
+ return;
+
+ /* over 32 bit page sizes */
+ for (a_lg2 = 32; a_lg2 != 42; a_lg2++) {
+ for (b_lg2 = 32; b_lg2 != 42; b_lg2++) {
+ for (c_lg2 = 32; c_lg2 != 42; c_lg2++) {
+ pt_vaddr_t pgsz_bitmap = get_random_u64();
+ pt_vaddr_t va = get_random_u64() << a_lg2;
+ pt_oaddr_t oa = get_random_u64() << b_lg2;
+ pt_vaddr_t last_va = log2_set_mod_max(
+ get_random_u64(), c_lg2);
+
+ if (va > last_va)
+ swap(va, last_va);
+ KUNIT_ASSERT_EQ(
+ test,
+ pt_compute_best_pgsize(pgsz_bitmap, va,
+ last_va, oa),
+ ref_best_pgsize(pgsz_bitmap, va,
+ last_va, oa));
+ }
+ }
+ }
+}
+
+/*
+ * Check that pt_install_table() and pt_table_pa() match
+ */
+static void test_lvl_table_ptr(struct kunit *test, struct pt_state *pts,
+ void *arg)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ pt_oaddr_t paddr =
+ log2_set_mod(priv->test_oa, 0, priv->smallest_pgsz_lg2);
+ struct pt_write_attrs attrs = {};
+
+ if (!pt_can_have_table(pts))
+ return;
+
+ KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
+ pt_iommu_set_prot(pts->range->common, &attrs,
+ IOMMU_READ));
+
+ pt_load_single_entry(pts);
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
+
+ KUNIT_ASSERT_TRUE(test, pt_install_table(pts, paddr, &attrs));
+
+ /* A second install should pass because install updates pts->entry. */
+ KUNIT_ASSERT_EQ(test, pt_install_table(pts, paddr, &attrs), true);
+
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_TABLE);
+ KUNIT_ASSERT_EQ(test, pt_table_pa(pts), paddr);
+
+ pt_clear_entries(pts, ilog2(1));
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
+}
+
+static void test_table_ptr(struct kunit *test)
+{
+ check_all_levels(test, test_lvl_table_ptr, NULL);
+}
+
+struct lvl_radix_arg {
+ pt_vaddr_t vbits;
+};
+
+/*
+ * Check pt_table_oa_lg2sz() and pt_table_item_lg2sz() they need to decode a
+ * continuous list of VA across all the levels that covers the entire advertised
+ * VA space.
+ */
+static void test_lvl_radix(struct kunit *test, struct pt_state *pts, void *arg)
+{
+ unsigned int table_lg2sz = pt_table_oa_lg2sz(pts);
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+ struct lvl_radix_arg *radix = arg;
+
+ /* Every bit below us is decoded */
+ KUNIT_ASSERT_EQ(test, log2_set_mod_max(0, isz_lg2), radix->vbits);
+
+ /* We are not decoding bits someone else is */
+ KUNIT_ASSERT_EQ(test, log2_div(radix->vbits, isz_lg2), 0);
+
+ /* Can't decode past the pt_vaddr_t size */
+ KUNIT_ASSERT_LE(test, table_lg2sz, PT_VADDR_MAX_LG2);
+ KUNIT_ASSERT_EQ(test, fvalog2_div(table_lg2sz, PT_MAX_VA_ADDRESS_LG2),
+ 0);
+
+ radix->vbits = fvalog2_set_mod_max(0, table_lg2sz);
+}
+
+static void test_max_va(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range range = pt_top_range(priv->common);
+
+ KUNIT_ASSERT_GE(test, priv->common->max_vasz_lg2, range.max_vasz_lg2);
+}
+
+static void test_table_radix(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct lvl_radix_arg radix = { .vbits = priv->smallest_pgsz - 1 };
+ struct pt_range range;
+
+ check_all_levels(test, test_lvl_radix, &radix);
+
+ range = pt_top_range(priv->common);
+ if (range.max_vasz_lg2 == PT_VADDR_MAX_LG2) {
+ KUNIT_ASSERT_EQ(test, radix.vbits, PT_VADDR_MAX);
+ } else {
+ if (!IS_32BIT)
+ KUNIT_ASSERT_EQ(test,
+ log2_set_mod_max(0, range.max_vasz_lg2),
+ radix.vbits);
+ KUNIT_ASSERT_EQ(test, log2_div(radix.vbits, range.max_vasz_lg2),
+ 0);
+ }
+}
+
+static unsigned int safe_pt_num_items_lg2(const struct pt_state *pts)
+{
+ struct pt_range top_range = pt_top_range(pts->range->common);
+ struct pt_state top_pts = pt_init_top(&top_range);
+
+ /*
+ * Avoid calling pt_num_items_lg2() on the top, instead we can derive
+ * the size of the top table from the top range.
+ */
+ if (pts->level == top_range.top_level)
+ return ilog2(pt_range_to_end_index(&top_pts));
+ return pt_num_items_lg2(pts);
+}
+
+static void test_lvl_possible_sizes(struct kunit *test, struct pt_state *pts,
+ void *arg)
+{
+ unsigned int num_items_lg2 = safe_pt_num_items_lg2(pts);
+ pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+
+ if (!pt_can_have_leaf(pts)) {
+ KUNIT_ASSERT_EQ(test, pgsize_bitmap, 0);
+ return;
+ }
+
+ /* No bits for sizes that would be outside this table */
+ KUNIT_ASSERT_EQ(test, log2_mod(pgsize_bitmap, isz_lg2), 0);
+ KUNIT_ASSERT_EQ(
+ test, fvalog2_div(pgsize_bitmap, num_items_lg2 + isz_lg2), 0);
+
+ /*
+ * Non contiguous must be supported. AMDv1 has a HW bug where it does
+ * not support it on one of the levels.
+ */
+ if ((u64)pgsize_bitmap != 0xff0000000000ULL ||
+ strcmp(__stringify(PTPFX_RAW), "amdv1") != 0)
+ KUNIT_ASSERT_TRUE(test, pgsize_bitmap & log2_to_int(isz_lg2));
+ else
+ KUNIT_ASSERT_NE(test, pgsize_bitmap, 0);
+
+ /* A contiguous entry should not span the whole table */
+ if (num_items_lg2 + isz_lg2 != PT_VADDR_MAX_LG2)
+ KUNIT_ASSERT_FALSE(
+ test,
+ pgsize_bitmap & log2_to_int(num_items_lg2 + isz_lg2));
+}
+
+static void test_entry_possible_sizes(struct kunit *test)
+{
+ check_all_levels(test, test_lvl_possible_sizes, NULL);
+}
+
+static void sweep_all_pgsizes(struct kunit *test, struct pt_state *pts,
+ struct pt_write_attrs *attrs,
+ pt_oaddr_t test_oaddr)
+{
+ pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+ unsigned int len_lg2;
+
+ if (pts->index != 0)
+ return;
+
+ for (len_lg2 = 0; len_lg2 < PT_VADDR_MAX_LG2 - 1; len_lg2++) {
+ struct pt_state sub_pts = *pts;
+ pt_oaddr_t oaddr;
+
+ if (!(pgsize_bitmap & log2_to_int(len_lg2)))
+ continue;
+
+ oaddr = log2_set_mod(test_oaddr, 0, len_lg2);
+ pt_install_leaf_entry(pts, oaddr, len_lg2, attrs);
+ /* Verify that every contiguous item translates correctly */
+ for (sub_pts.index = 0;
+ sub_pts.index != log2_to_int(len_lg2 - isz_lg2);
+ sub_pts.index++) {
+ KUNIT_ASSERT_PT_LOAD(test, &sub_pts, PT_ENTRY_OA);
+ KUNIT_ASSERT_EQ(test, pt_item_oa(&sub_pts),
+ oaddr + sub_pts.index *
+ oalog2_mul(1, isz_lg2));
+ KUNIT_ASSERT_EQ(test, pt_entry_oa(&sub_pts), oaddr);
+ KUNIT_ASSERT_EQ(test, pt_entry_num_contig_lg2(&sub_pts),
+ len_lg2 - isz_lg2);
+ }
+
+ pt_clear_entries(pts, len_lg2 - isz_lg2);
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
+ }
+}
+
+/*
+ * Check that pt_install_leaf_entry() and pt_entry_oa() match.
+ * Check that pt_clear_entries() works.
+ */
+static void test_lvl_entry_oa(struct kunit *test, struct pt_state *pts,
+ void *arg)
+{
+ unsigned int max_oa_lg2 = pts->range->common->max_oasz_lg2;
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_write_attrs attrs = {};
+
+ if (!pt_can_have_leaf(pts))
+ return;
+
+ KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
+ pt_iommu_set_prot(pts->range->common, &attrs,
+ IOMMU_READ));
+
+ sweep_all_pgsizes(test, pts, &attrs, priv->test_oa);
+
+ /* Check that the table can store the boundary OAs */
+ sweep_all_pgsizes(test, pts, &attrs, 0);
+ if (max_oa_lg2 == PT_OADDR_MAX_LG2)
+ sweep_all_pgsizes(test, pts, &attrs, PT_OADDR_MAX);
+ else
+ sweep_all_pgsizes(test, pts, &attrs,
+ oalog2_to_max_int(max_oa_lg2));
+}
+
+static void test_entry_oa(struct kunit *test)
+{
+ check_all_levels(test, test_lvl_entry_oa, NULL);
+}
+
+/* Test pt_attr_from_entry() */
+static void test_lvl_attr_from_entry(struct kunit *test, struct pt_state *pts,
+ void *arg)
+{
+ pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+ struct kunit_iommu_priv *priv = test->priv;
+ unsigned int len_lg2;
+ unsigned int prot;
+
+ if (!pt_can_have_leaf(pts))
+ return;
+
+ for (len_lg2 = 0; len_lg2 < PT_VADDR_MAX_LG2; len_lg2++) {
+ if (!(pgsize_bitmap & log2_to_int(len_lg2)))
+ continue;
+ for (prot = 0; prot <= (IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE |
+ IOMMU_NOEXEC | IOMMU_MMIO);
+ prot++) {
+ pt_oaddr_t oaddr;
+ struct pt_write_attrs attrs = {};
+ u64 good_entry;
+
+ /*
+ * If the format doesn't support this combination of
+ * prot bits skip it
+ */
+ if (pt_iommu_set_prot(pts->range->common, &attrs,
+ prot)) {
+ /* But RW has to be supported */
+ KUNIT_ASSERT_NE(test, prot,
+ IOMMU_READ | IOMMU_WRITE);
+ continue;
+ }
+
+ oaddr = log2_set_mod(priv->test_oa, 0, len_lg2);
+ pt_install_leaf_entry(pts, oaddr, len_lg2, &attrs);
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_OA);
+
+ good_entry = pts->entry;
+
+ memset(&attrs, 0, sizeof(attrs));
+ pt_attr_from_entry(pts, &attrs);
+
+ pt_clear_entries(pts, len_lg2 - isz_lg2);
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
+
+ pt_install_leaf_entry(pts, oaddr, len_lg2, &attrs);
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_OA);
+
+ /*
+ * The descriptor produced by pt_attr_from_entry()
+ * produce an identical entry value when re-written
+ */
+ KUNIT_ASSERT_EQ(test, good_entry, pts->entry);
+
+ pt_clear_entries(pts, len_lg2 - isz_lg2);
+ }
+ }
+}
+
+static void test_attr_from_entry(struct kunit *test)
+{
+ check_all_levels(test, test_lvl_attr_from_entry, NULL);
+}
+
+static void test_lvl_dirty(struct kunit *test, struct pt_state *pts, void *arg)
+{
+ pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+ struct kunit_iommu_priv *priv = test->priv;
+ unsigned int start_idx = pts->index;
+ struct pt_write_attrs attrs = {};
+ unsigned int len_lg2;
+
+ if (!pt_can_have_leaf(pts))
+ return;
+
+ KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
+ pt_iommu_set_prot(pts->range->common, &attrs,
+ IOMMU_READ | IOMMU_WRITE));
+
+ for (len_lg2 = 0; len_lg2 < PT_VADDR_MAX_LG2; len_lg2++) {
+ pt_oaddr_t oaddr;
+ unsigned int i;
+
+ if (!(pgsize_bitmap & log2_to_int(len_lg2)))
+ continue;
+
+ oaddr = log2_set_mod(priv->test_oa, 0, len_lg2);
+ pt_install_leaf_entry(pts, oaddr, len_lg2, &attrs);
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_OA);
+
+ pt_load_entry(pts);
+ pt_entry_make_write_clean(pts);
+ pt_load_entry(pts);
+ KUNIT_ASSERT_FALSE(test, pt_entry_is_write_dirty(pts));
+
+ for (i = 0; i != log2_to_int(len_lg2 - isz_lg2); i++) {
+ /* dirty every contiguous entry */
+ pts->index = start_idx + i;
+ pt_load_entry(pts);
+ KUNIT_ASSERT_TRUE(test, pt_entry_make_write_dirty(pts));
+ pts->index = start_idx;
+ pt_load_entry(pts);
+ KUNIT_ASSERT_TRUE(test, pt_entry_is_write_dirty(pts));
+
+ pt_entry_make_write_clean(pts);
+ pt_load_entry(pts);
+ KUNIT_ASSERT_FALSE(test, pt_entry_is_write_dirty(pts));
+ }
+
+ pt_clear_entries(pts, len_lg2 - isz_lg2);
+ }
+}
+
+static __maybe_unused void test_dirty(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+
+ if (!pt_dirty_supported(priv->common))
+ kunit_skip(test,
+ "Page table features do not support dirty tracking");
+
+ check_all_levels(test, test_lvl_dirty, NULL);
+}
+
+static void test_lvl_sw_bit_leaf(struct kunit *test, struct pt_state *pts,
+ void *arg)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ pt_vaddr_t pgsize_bitmap = pt_possible_sizes(pts);
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+ struct pt_write_attrs attrs = {};
+ unsigned int len_lg2;
+
+ if (!pt_can_have_leaf(pts))
+ return;
+ if (pts->index != 0)
+ return;
+
+ KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
+ pt_iommu_set_prot(pts->range->common, &attrs,
+ IOMMU_READ));
+
+ for (len_lg2 = 0; len_lg2 < PT_VADDR_MAX_LG2 - 1; len_lg2++) {
+ pt_oaddr_t paddr = log2_set_mod(priv->test_oa, 0, len_lg2);
+ struct pt_write_attrs new_attrs = {};
+ unsigned int bitnr;
+
+ if (!(pgsize_bitmap & log2_to_int(len_lg2)))
+ continue;
+
+ pt_install_leaf_entry(pts, paddr, len_lg2, &attrs);
+
+ for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
+ bitnr++)
+ KUNIT_ASSERT_FALSE(test,
+ pt_test_sw_bit_acquire(pts, bitnr));
+
+ for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
+ bitnr++) {
+ KUNIT_ASSERT_FALSE(test,
+ pt_test_sw_bit_acquire(pts, bitnr));
+ pt_set_sw_bit_release(pts, bitnr);
+ KUNIT_ASSERT_TRUE(test,
+ pt_test_sw_bit_acquire(pts, bitnr));
+ }
+
+ for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common);
+ bitnr++)
+ KUNIT_ASSERT_TRUE(test,
+ pt_test_sw_bit_acquire(pts, bitnr));
+
+ KUNIT_ASSERT_EQ(test, pt_item_oa(pts), paddr);
+
+ /* SW bits didn't leak into the attrs */
+ pt_attr_from_entry(pts, &new_attrs);
+ KUNIT_ASSERT_MEMEQ(test, &new_attrs, &attrs, sizeof(attrs));
+
+ pt_clear_entries(pts, len_lg2 - isz_lg2);
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
+ }
+}
+
+static __maybe_unused void test_sw_bit_leaf(struct kunit *test)
+{
+ check_all_levels(test, test_lvl_sw_bit_leaf, NULL);
+}
+
+static void test_lvl_sw_bit_table(struct kunit *test, struct pt_state *pts,
+ void *arg)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_write_attrs attrs = {};
+ pt_oaddr_t paddr =
+ log2_set_mod(priv->test_oa, 0, priv->smallest_pgsz_lg2);
+ unsigned int bitnr;
+
+ if (!pt_can_have_leaf(pts))
+ return;
+ if (pts->index != 0)
+ return;
+
+ KUNIT_ASSERT_NO_ERRNO_FN(test, "pt_iommu_set_prot",
+ pt_iommu_set_prot(pts->range->common, &attrs,
+ IOMMU_READ));
+
+ KUNIT_ASSERT_TRUE(test, pt_install_table(pts, paddr, &attrs));
+
+ for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++)
+ KUNIT_ASSERT_FALSE(test, pt_test_sw_bit_acquire(pts, bitnr));
+
+ for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++) {
+ KUNIT_ASSERT_FALSE(test, pt_test_sw_bit_acquire(pts, bitnr));
+ pt_set_sw_bit_release(pts, bitnr);
+ KUNIT_ASSERT_TRUE(test, pt_test_sw_bit_acquire(pts, bitnr));
+ }
+
+ for (bitnr = 0; bitnr <= pt_max_sw_bit(pts->range->common); bitnr++)
+ KUNIT_ASSERT_TRUE(test, pt_test_sw_bit_acquire(pts, bitnr));
+
+ KUNIT_ASSERT_EQ(test, pt_table_pa(pts), paddr);
+
+ pt_clear_entries(pts, ilog2(1));
+ KUNIT_ASSERT_PT_LOAD(test, pts, PT_ENTRY_EMPTY);
+}
+
+static __maybe_unused void test_sw_bit_table(struct kunit *test)
+{
+ check_all_levels(test, test_lvl_sw_bit_table, NULL);
+}
+
+static struct kunit_case generic_pt_test_cases[] = {
+ KUNIT_CASE_FMT(test_init),
+ KUNIT_CASE_FMT(test_bitops),
+ KUNIT_CASE_FMT(test_best_pgsize),
+ KUNIT_CASE_FMT(test_table_ptr),
+ KUNIT_CASE_FMT(test_max_va),
+ KUNIT_CASE_FMT(test_table_radix),
+ KUNIT_CASE_FMT(test_entry_possible_sizes),
+ KUNIT_CASE_FMT(test_entry_oa),
+ KUNIT_CASE_FMT(test_attr_from_entry),
+#ifdef pt_entry_is_write_dirty
+ KUNIT_CASE_FMT(test_dirty),
+#endif
+#ifdef pt_sw_bit
+ KUNIT_CASE_FMT(test_sw_bit_leaf),
+ KUNIT_CASE_FMT(test_sw_bit_table),
+#endif
+ {},
+};
+
+static int pt_kunit_generic_pt_init(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ ret = pt_kunit_priv_init(test, priv);
+ if (ret) {
+ kunit_kfree(test, priv);
+ return ret;
+ }
+ test->priv = priv;
+ return 0;
+}
+
+static void pt_kunit_generic_pt_exit(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+
+ if (!test->priv)
+ return;
+
+ pt_iommu_deinit(priv->iommu);
+ kunit_kfree(test, test->priv);
+}
+
+static struct kunit_suite NS(generic_pt_suite) = {
+ .name = __stringify(NS(fmt_test)),
+ .init = pt_kunit_generic_pt_init,
+ .exit = pt_kunit_generic_pt_exit,
+ .test_cases = generic_pt_test_cases,
+};
+kunit_test_suites(&NS(generic_pt_suite));
diff --git a/drivers/iommu/generic_pt/kunit_iommu.h b/drivers/iommu/generic_pt/kunit_iommu.h
new file mode 100644
index 000000000000..22c9e4c4dd97
--- /dev/null
+++ b/drivers/iommu/generic_pt/kunit_iommu.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ */
+#ifndef __GENERIC_PT_KUNIT_IOMMU_H
+#define __GENERIC_PT_KUNIT_IOMMU_H
+
+#define GENERIC_PT_KUNIT 1
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include "../iommu-pages.h"
+#include "pt_iter.h"
+
+#define pt_iommu_table_cfg CONCATENATE(pt_iommu_table, _cfg)
+#define pt_iommu_init CONCATENATE(CONCATENATE(pt_iommu_, PTPFX), init)
+int pt_iommu_init(struct pt_iommu_table *fmt_table,
+ const struct pt_iommu_table_cfg *cfg, gfp_t gfp);
+
+/* The format can provide a list of configurations it would like to test */
+#ifdef kunit_fmt_cfgs
+static const void *kunit_pt_gen_params_cfg(struct kunit *test, const void *prev,
+ char *desc)
+{
+ uintptr_t cfg_id = (uintptr_t)prev;
+
+ cfg_id++;
+ if (cfg_id >= ARRAY_SIZE(kunit_fmt_cfgs) + 1)
+ return NULL;
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s_cfg_%u",
+ __stringify(PTPFX_RAW), (unsigned int)(cfg_id - 1));
+ return (void *)cfg_id;
+}
+#define KUNIT_CASE_FMT(test_name) \
+ KUNIT_CASE_PARAM(test_name, kunit_pt_gen_params_cfg)
+#else
+#define KUNIT_CASE_FMT(test_name) KUNIT_CASE(test_name)
+#endif
+
+#define KUNIT_ASSERT_NO_ERRNO(test, ret) \
+ KUNIT_ASSERT_EQ_MSG(test, ret, 0, KUNIT_SUBSUBTEST_INDENT "errno %pe", \
+ ERR_PTR(ret))
+
+#define KUNIT_ASSERT_NO_ERRNO_FN(test, fn, ret) \
+ KUNIT_ASSERT_EQ_MSG(test, ret, 0, \
+ KUNIT_SUBSUBTEST_INDENT "errno %pe from %s", \
+ ERR_PTR(ret), fn)
+
+/*
+ * When the test is run on a 32 bit system unsigned long can be 32 bits. This
+ * cause the iommu op signatures to be restricted to 32 bits. Meaning the test
+ * has to be mindful not to create any VA's over the 32 bit limit. Reduce the
+ * scope of the testing as the main purpose of checking on full 32 bit is to
+ * look for 32bitism in the core code. Run the test on i386 with X86_PAE=y to
+ * get the full coverage when dma_addr_t & phys_addr_t are 8 bytes
+ */
+#define IS_32BIT (sizeof(unsigned long) == 4)
+
+struct kunit_iommu_priv {
+ union {
+ struct iommu_domain domain;
+ struct pt_iommu_table fmt_table;
+ };
+ spinlock_t top_lock;
+ struct device *dummy_dev;
+ struct pt_iommu *iommu;
+ struct pt_common *common;
+ struct pt_iommu_table_cfg cfg;
+ struct pt_iommu_info info;
+ unsigned int smallest_pgsz_lg2;
+ pt_vaddr_t smallest_pgsz;
+ unsigned int largest_pgsz_lg2;
+ pt_oaddr_t test_oa;
+ pt_vaddr_t safe_pgsize_bitmap;
+ unsigned long orig_nr_secondary_pagetable;
+
+};
+PT_IOMMU_CHECK_DOMAIN(struct kunit_iommu_priv, fmt_table.iommu, domain);
+
+static void pt_kunit_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+{
+ iommu_put_pages_list(&gather->freelist);
+}
+
+#define IOMMU_PT_DOMAIN_OPS1(x) IOMMU_PT_DOMAIN_OPS(x)
+static const struct iommu_domain_ops kunit_pt_ops = {
+ IOMMU_PT_DOMAIN_OPS1(PTPFX_RAW),
+ .iotlb_sync = &pt_kunit_iotlb_sync,
+};
+
+static void pt_kunit_change_top(struct pt_iommu *iommu_table,
+ phys_addr_t top_paddr, unsigned int top_level)
+{
+}
+
+static spinlock_t *pt_kunit_get_top_lock(struct pt_iommu *iommu_table)
+{
+ struct kunit_iommu_priv *priv = container_of(
+ iommu_table, struct kunit_iommu_priv, fmt_table.iommu);
+
+ return &priv->top_lock;
+}
+
+static const struct pt_iommu_driver_ops pt_kunit_driver_ops = {
+ .change_top = &pt_kunit_change_top,
+ .get_top_lock = &pt_kunit_get_top_lock,
+};
+
+static int pt_kunit_priv_init(struct kunit *test, struct kunit_iommu_priv *priv)
+{
+ unsigned int va_lg2sz;
+ int ret;
+
+ /* Enough so the memory allocator works */
+ priv->dummy_dev = kunit_device_register(test, "pt_kunit_dev");
+ if (IS_ERR(priv->dummy_dev))
+ return PTR_ERR(priv->dummy_dev);
+ set_dev_node(priv->dummy_dev, NUMA_NO_NODE);
+
+ spin_lock_init(&priv->top_lock);
+
+#ifdef kunit_fmt_cfgs
+ priv->cfg = kunit_fmt_cfgs[((uintptr_t)test->param_value) - 1];
+ /*
+ * The format can set a list of features that the kunit_fmt_cfgs
+ * controls, other features are default to on.
+ */
+ priv->cfg.common.features |= PT_SUPPORTED_FEATURES &
+ (~KUNIT_FMT_FEATURES);
+#else
+ priv->cfg.common.features = PT_SUPPORTED_FEATURES;
+#endif
+
+ /* Defaults, for the kunit */
+ if (!priv->cfg.common.hw_max_vasz_lg2)
+ priv->cfg.common.hw_max_vasz_lg2 = PT_MAX_VA_ADDRESS_LG2;
+ if (!priv->cfg.common.hw_max_oasz_lg2)
+ priv->cfg.common.hw_max_oasz_lg2 = pt_max_oa_lg2(NULL);
+
+ priv->fmt_table.iommu.nid = NUMA_NO_NODE;
+ priv->fmt_table.iommu.driver_ops = &pt_kunit_driver_ops;
+ priv->fmt_table.iommu.iommu_device = priv->dummy_dev;
+ priv->domain.ops = &kunit_pt_ops;
+ ret = pt_iommu_init(&priv->fmt_table, &priv->cfg, GFP_KERNEL);
+ if (ret) {
+ if (ret == -EOVERFLOW)
+ kunit_skip(
+ test,
+ "This configuration cannot be tested on 32 bit");
+ return ret;
+ }
+
+ priv->iommu = &priv->fmt_table.iommu;
+ priv->common = common_from_iommu(&priv->fmt_table.iommu);
+ priv->iommu->ops->get_info(priv->iommu, &priv->info);
+
+ /*
+ * size_t is used to pass the mapping length, it can be 32 bit, truncate
+ * the pagesizes so we don't use large sizes.
+ */
+ priv->info.pgsize_bitmap = (size_t)priv->info.pgsize_bitmap;
+
+ priv->smallest_pgsz_lg2 = vaffs(priv->info.pgsize_bitmap);
+ priv->smallest_pgsz = log2_to_int(priv->smallest_pgsz_lg2);
+ priv->largest_pgsz_lg2 =
+ vafls((dma_addr_t)priv->info.pgsize_bitmap) - 1;
+
+ priv->test_oa =
+ oalog2_mod(0x74a71445deadbeef, priv->common->max_oasz_lg2);
+
+ /*
+ * We run out of VA space if the mappings get too big, make something
+ * smaller that can safely pass through dma_addr_t API.
+ */
+ va_lg2sz = priv->common->max_vasz_lg2;
+ if (IS_32BIT && va_lg2sz > 32)
+ va_lg2sz = 32;
+ priv->safe_pgsize_bitmap =
+ log2_mod(priv->info.pgsize_bitmap, va_lg2sz - 1);
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/iommu/generic_pt/kunit_iommu_pt.h b/drivers/iommu/generic_pt/kunit_iommu_pt.h
new file mode 100644
index 000000000000..e8a63c8ea850
--- /dev/null
+++ b/drivers/iommu/generic_pt/kunit_iommu_pt.h
@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
+ */
+#include "kunit_iommu.h"
+#include "pt_iter.h"
+#include <linux/generic_pt/iommu.h>
+#include <linux/iommu.h>
+
+static void do_map(struct kunit *test, pt_vaddr_t va, pt_oaddr_t pa,
+ pt_vaddr_t len);
+
+struct count_valids {
+ u64 per_size[PT_VADDR_MAX_LG2];
+};
+
+static int __count_valids(struct pt_range *range, void *arg, unsigned int level,
+ struct pt_table_p *table)
+{
+ struct pt_state pts = pt_init(range, level, table);
+ struct count_valids *valids = arg;
+
+ for_each_pt_level_entry(&pts) {
+ if (pts.type == PT_ENTRY_TABLE) {
+ pt_descend(&pts, arg, __count_valids);
+ continue;
+ }
+ if (pts.type == PT_ENTRY_OA) {
+ valids->per_size[pt_entry_oa_lg2sz(&pts)]++;
+ continue;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Number of valid table entries. This counts contiguous entries as a single
+ * valid.
+ */
+static unsigned int count_valids(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range range = pt_top_range(priv->common);
+ struct count_valids valids = {};
+ u64 total = 0;
+ unsigned int i;
+
+ KUNIT_ASSERT_NO_ERRNO(test,
+ pt_walk_range(&range, __count_valids, &valids));
+
+ for (i = 0; i != ARRAY_SIZE(valids.per_size); i++)
+ total += valids.per_size[i];
+ return total;
+}
+
+/* Only a single page size is present, count the number of valid entries */
+static unsigned int count_valids_single(struct kunit *test, pt_vaddr_t pgsz)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range range = pt_top_range(priv->common);
+ struct count_valids valids = {};
+ u64 total = 0;
+ unsigned int i;
+
+ KUNIT_ASSERT_NO_ERRNO(test,
+ pt_walk_range(&range, __count_valids, &valids));
+
+ for (i = 0; i != ARRAY_SIZE(valids.per_size); i++) {
+ if ((1ULL << i) == pgsz)
+ total = valids.per_size[i];
+ else
+ KUNIT_ASSERT_EQ(test, valids.per_size[i], 0);
+ }
+ return total;
+}
+
+static void do_unmap(struct kunit *test, pt_vaddr_t va, pt_vaddr_t len)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ size_t ret;
+
+ ret = iommu_unmap(&priv->domain, va, len);
+ KUNIT_ASSERT_EQ(test, ret, len);
+}
+
+static void check_iova(struct kunit *test, pt_vaddr_t va, pt_oaddr_t pa,
+ pt_vaddr_t len)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ pt_vaddr_t pfn = log2_div(va, priv->smallest_pgsz_lg2);
+ pt_vaddr_t end_pfn = pfn + log2_div(len, priv->smallest_pgsz_lg2);
+
+ for (; pfn != end_pfn; pfn++) {
+ phys_addr_t res = iommu_iova_to_phys(&priv->domain,
+ pfn * priv->smallest_pgsz);
+
+ KUNIT_ASSERT_EQ(test, res, (phys_addr_t)pa);
+ if (res != pa)
+ break;
+ pa += priv->smallest_pgsz;
+ }
+}
+
+static void test_increase_level(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_common *common = priv->common;
+
+ if (!pt_feature(common, PT_FEAT_DYNAMIC_TOP))
+ kunit_skip(test, "PT_FEAT_DYNAMIC_TOP not set for this format");
+
+ if (IS_32BIT)
+ kunit_skip(test, "Unable to test on 32bit");
+
+ KUNIT_ASSERT_GT(test, common->max_vasz_lg2,
+ pt_top_range(common).max_vasz_lg2);
+
+ /* Add every possible level to the max */
+ while (common->max_vasz_lg2 != pt_top_range(common).max_vasz_lg2) {
+ struct pt_range top_range = pt_top_range(common);
+
+ if (top_range.va == 0)
+ do_map(test, top_range.last_va + 1, 0,
+ priv->smallest_pgsz);
+ else
+ do_map(test, top_range.va - priv->smallest_pgsz, 0,
+ priv->smallest_pgsz);
+
+ KUNIT_ASSERT_EQ(test, pt_top_range(common).top_level,
+ top_range.top_level + 1);
+ KUNIT_ASSERT_GE(test, common->max_vasz_lg2,
+ pt_top_range(common).max_vasz_lg2);
+ }
+}
+
+static void test_map_simple(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range range = pt_top_range(priv->common);
+ struct count_valids valids = {};
+ pt_vaddr_t pgsize_bitmap = priv->safe_pgsize_bitmap;
+ unsigned int pgsz_lg2;
+ pt_vaddr_t cur_va;
+
+ /* Map every reported page size */
+ cur_va = range.va + priv->smallest_pgsz * 256;
+ for (pgsz_lg2 = 0; pgsz_lg2 != PT_VADDR_MAX_LG2; pgsz_lg2++) {
+ pt_oaddr_t paddr = log2_set_mod(priv->test_oa, 0, pgsz_lg2);
+ u64 len = log2_to_int(pgsz_lg2);
+
+ if (!(pgsize_bitmap & len))
+ continue;
+
+ cur_va = ALIGN(cur_va, len);
+ do_map(test, cur_va, paddr, len);
+ if (len <= SZ_2G)
+ check_iova(test, cur_va, paddr, len);
+ cur_va += len;
+ }
+
+ /* The read interface reports that every page size was created */
+ range = pt_top_range(priv->common);
+ KUNIT_ASSERT_NO_ERRNO(test,
+ pt_walk_range(&range, __count_valids, &valids));
+ for (pgsz_lg2 = 0; pgsz_lg2 != PT_VADDR_MAX_LG2; pgsz_lg2++) {
+ if (pgsize_bitmap & (1ULL << pgsz_lg2))
+ KUNIT_ASSERT_EQ(test, valids.per_size[pgsz_lg2], 1);
+ else
+ KUNIT_ASSERT_EQ(test, valids.per_size[pgsz_lg2], 0);
+ }
+
+ /* Unmap works */
+ range = pt_top_range(priv->common);
+ cur_va = range.va + priv->smallest_pgsz * 256;
+ for (pgsz_lg2 = 0; pgsz_lg2 != PT_VADDR_MAX_LG2; pgsz_lg2++) {
+ u64 len = log2_to_int(pgsz_lg2);
+
+ if (!(pgsize_bitmap & len))
+ continue;
+ cur_va = ALIGN(cur_va, len);
+ do_unmap(test, cur_va, len);
+ cur_va += len;
+ }
+ KUNIT_ASSERT_EQ(test, count_valids(test), 0);
+}
+
+/*
+ * Test to convert a table pointer into an OA by mapping something small,
+ * unmapping it so as to leave behind a table pointer, then mapping something
+ * larger that will convert the table into an OA.
+ */
+static void test_map_table_to_oa(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ pt_vaddr_t limited_pgbitmap =
+ priv->info.pgsize_bitmap % (IS_32BIT ? SZ_2G : SZ_16G);
+ struct pt_range range = pt_top_range(priv->common);
+ unsigned int pgsz_lg2;
+ pt_vaddr_t max_pgsize;
+ pt_vaddr_t cur_va;
+
+ max_pgsize = 1ULL << (vafls(limited_pgbitmap) - 1);
+ KUNIT_ASSERT_TRUE(test, priv->info.pgsize_bitmap & max_pgsize);
+
+ for (pgsz_lg2 = 0; pgsz_lg2 != PT_VADDR_MAX_LG2; pgsz_lg2++) {
+ pt_oaddr_t paddr = log2_set_mod(priv->test_oa, 0, pgsz_lg2);
+ u64 len = log2_to_int(pgsz_lg2);
+ pt_vaddr_t offset;
+
+ if (!(priv->info.pgsize_bitmap & len))
+ continue;
+ if (len > max_pgsize)
+ break;
+
+ cur_va = ALIGN(range.va + priv->smallest_pgsz * 256,
+ max_pgsize);
+ for (offset = 0; offset != max_pgsize; offset += len)
+ do_map(test, cur_va + offset, paddr + offset, len);
+ check_iova(test, cur_va, paddr, max_pgsize);
+ KUNIT_ASSERT_EQ(test, count_valids_single(test, len),
+ log2_div(max_pgsize, pgsz_lg2));
+
+ if (len == max_pgsize) {
+ do_unmap(test, cur_va, max_pgsize);
+ } else {
+ do_unmap(test, cur_va, max_pgsize / 2);
+ for (offset = max_pgsize / 2; offset != max_pgsize;
+ offset += len)
+ do_unmap(test, cur_va + offset, len);
+ }
+
+ KUNIT_ASSERT_EQ(test, count_valids(test), 0);
+ }
+}
+
+/*
+ * Test unmapping a small page at the start of a large page. This always unmaps
+ * the large page.
+ */
+static void test_unmap_split(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range top_range = pt_top_range(priv->common);
+ pt_vaddr_t pgsize_bitmap = priv->safe_pgsize_bitmap;
+ unsigned int pgsz_lg2;
+ unsigned int count = 0;
+
+ for (pgsz_lg2 = 0; pgsz_lg2 != PT_VADDR_MAX_LG2; pgsz_lg2++) {
+ pt_vaddr_t base_len = log2_to_int(pgsz_lg2);
+ unsigned int next_pgsz_lg2;
+
+ if (!(pgsize_bitmap & base_len))
+ continue;
+
+ for (next_pgsz_lg2 = pgsz_lg2 + 1;
+ next_pgsz_lg2 != PT_VADDR_MAX_LG2; next_pgsz_lg2++) {
+ pt_vaddr_t next_len = log2_to_int(next_pgsz_lg2);
+ pt_vaddr_t vaddr = top_range.va;
+ pt_oaddr_t paddr = 0;
+ size_t gnmapped;
+
+ if (!(pgsize_bitmap & next_len))
+ continue;
+
+ do_map(test, vaddr, paddr, next_len);
+ gnmapped = iommu_unmap(&priv->domain, vaddr, base_len);
+ KUNIT_ASSERT_EQ(test, gnmapped, next_len);
+
+ /* Make sure unmap doesn't keep going */
+ do_map(test, vaddr, paddr, next_len);
+ do_map(test, vaddr + next_len, paddr, next_len);
+ gnmapped = iommu_unmap(&priv->domain, vaddr, base_len);
+ KUNIT_ASSERT_EQ(test, gnmapped, next_len);
+ gnmapped = iommu_unmap(&priv->domain, vaddr + next_len,
+ next_len);
+ KUNIT_ASSERT_EQ(test, gnmapped, next_len);
+
+ count++;
+ }
+ }
+
+ if (count == 0)
+ kunit_skip(test, "Test needs two page sizes");
+}
+
+static void unmap_collisions(struct kunit *test, struct maple_tree *mt,
+ pt_vaddr_t start, pt_vaddr_t last)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ MA_STATE(mas, mt, start, last);
+ void *entry;
+
+ mtree_lock(mt);
+ mas_for_each(&mas, entry, last) {
+ pt_vaddr_t mas_start = mas.index;
+ pt_vaddr_t len = (mas.last - mas_start) + 1;
+ pt_oaddr_t paddr;
+
+ mas_erase(&mas);
+ mas_pause(&mas);
+ mtree_unlock(mt);
+
+ paddr = oalog2_mod(mas_start, priv->common->max_oasz_lg2);
+ check_iova(test, mas_start, paddr, len);
+ do_unmap(test, mas_start, len);
+ mtree_lock(mt);
+ }
+ mtree_unlock(mt);
+}
+
+static void clamp_range(struct kunit *test, struct pt_range *range)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+
+ if (range->last_va - range->va > SZ_1G)
+ range->last_va = range->va + SZ_1G;
+ KUNIT_ASSERT_NE(test, range->last_va, PT_VADDR_MAX);
+ if (range->va <= MAPLE_RESERVED_RANGE)
+ range->va =
+ ALIGN(MAPLE_RESERVED_RANGE, priv->smallest_pgsz);
+}
+
+/*
+ * Randomly map and unmap ranges that can large physical pages. If a random
+ * range overlaps with existing ranges then unmap them. This hits all the
+ * special cases.
+ */
+static void test_random_map(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range upper_range = pt_upper_range(priv->common);
+ struct pt_range top_range = pt_top_range(priv->common);
+ struct maple_tree mt;
+ unsigned int iter;
+
+ mt_init(&mt);
+
+ /*
+ * Shrink the range so randomization is more likely to have
+ * intersections
+ */
+ clamp_range(test, &top_range);
+ clamp_range(test, &upper_range);
+
+ for (iter = 0; iter != 1000; iter++) {
+ struct pt_range *range = &top_range;
+ pt_oaddr_t paddr;
+ pt_vaddr_t start;
+ pt_vaddr_t end;
+ int ret;
+
+ if (pt_feature(priv->common, PT_FEAT_SIGN_EXTEND) &&
+ ULONG_MAX >= PT_VADDR_MAX && get_random_u32_inclusive(0, 1))
+ range = &upper_range;
+
+ start = get_random_u32_below(
+ min(U32_MAX, range->last_va - range->va));
+ end = get_random_u32_below(
+ min(U32_MAX, range->last_va - start));
+
+ start = ALIGN_DOWN(start, priv->smallest_pgsz);
+ end = ALIGN(end, priv->smallest_pgsz);
+ start += range->va;
+ end += start;
+ if (start < range->va || end > range->last_va + 1 ||
+ start >= end)
+ continue;
+
+ /* Try overmapping to test the failure handling */
+ paddr = oalog2_mod(start, priv->common->max_oasz_lg2);
+ ret = iommu_map(&priv->domain, start, paddr, end - start,
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
+ if (ret) {
+ KUNIT_ASSERT_EQ(test, ret, -EADDRINUSE);
+ unmap_collisions(test, &mt, start, end - 1);
+ do_map(test, start, paddr, end - start);
+ }
+
+ KUNIT_ASSERT_NO_ERRNO_FN(test, "mtree_insert_range",
+ mtree_insert_range(&mt, start, end - 1,
+ XA_ZERO_ENTRY,
+ GFP_KERNEL));
+
+ check_iova(test, start, paddr, end - start);
+ if (iter % 100)
+ cond_resched();
+ }
+
+ unmap_collisions(test, &mt, 0, PT_VADDR_MAX);
+ KUNIT_ASSERT_EQ(test, count_valids(test), 0);
+
+ mtree_destroy(&mt);
+}
+
+/* See https://lore.kernel.org/r/b9b18a03-63a2-4065-a27e-d92dd5c860bc@amd.com */
+static void test_pgsize_boundary(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range top_range = pt_top_range(priv->common);
+
+ if (top_range.va != 0 || top_range.last_va < 0xfef9ffff ||
+ priv->smallest_pgsz != SZ_4K)
+ kunit_skip(test, "Format does not have the required range");
+
+ do_map(test, 0xfef80000, 0x208b95d000, 0xfef9ffff - 0xfef80000 + 1);
+}
+
+/* See https://lore.kernel.org/r/20250826143816.38686-1-eugkoira@amazon.com */
+static void test_mixed(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+ struct pt_range top_range = pt_top_range(priv->common);
+ u64 start = 0x3fe400ULL << 12;
+ u64 end = 0x4c0600ULL << 12;
+ pt_vaddr_t len = end - start;
+ pt_oaddr_t oa = start;
+
+ if (top_range.last_va <= start || sizeof(unsigned long) == 4)
+ kunit_skip(test, "range is too small");
+ if ((priv->safe_pgsize_bitmap & GENMASK(30, 21)) != (BIT(30) | BIT(21)))
+ kunit_skip(test, "incompatible psize");
+
+ do_map(test, start, oa, len);
+ /* 14 2M, 3 1G, 3 2M */
+ KUNIT_ASSERT_EQ(test, count_valids(test), 20);
+ check_iova(test, start, oa, len);
+}
+
+static struct kunit_case iommu_test_cases[] = {
+ KUNIT_CASE_FMT(test_increase_level),
+ KUNIT_CASE_FMT(test_map_simple),
+ KUNIT_CASE_FMT(test_map_table_to_oa),
+ KUNIT_CASE_FMT(test_unmap_split),
+ KUNIT_CASE_FMT(test_random_map),
+ KUNIT_CASE_FMT(test_pgsize_boundary),
+ KUNIT_CASE_FMT(test_mixed),
+ {},
+};
+
+static int pt_kunit_iommu_init(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->orig_nr_secondary_pagetable =
+ global_node_page_state(NR_SECONDARY_PAGETABLE);
+ ret = pt_kunit_priv_init(test, priv);
+ if (ret) {
+ kunit_kfree(test, priv);
+ return ret;
+ }
+ test->priv = priv;
+ return 0;
+}
+
+static void pt_kunit_iommu_exit(struct kunit *test)
+{
+ struct kunit_iommu_priv *priv = test->priv;
+
+ if (!test->priv)
+ return;
+
+ pt_iommu_deinit(priv->iommu);
+ /*
+ * Look for memory leaks, assumes kunit is running isolated and nothing
+ * else is using secondary page tables.
+ */
+ KUNIT_ASSERT_EQ(test, priv->orig_nr_secondary_pagetable,
+ global_node_page_state(NR_SECONDARY_PAGETABLE));
+ kunit_kfree(test, test->priv);
+}
+
+static struct kunit_suite NS(iommu_suite) = {
+ .name = __stringify(NS(iommu_test)),
+ .init = pt_kunit_iommu_init,
+ .exit = pt_kunit_iommu_exit,
+ .test_cases = iommu_test_cases,
+};
+kunit_test_suites(&NS(iommu_suite));
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit for generic page table");
+MODULE_IMPORT_NS("GENERIC_PT_IOMMU");
diff --git a/drivers/iommu/generic_pt/pt_common.h b/drivers/iommu/generic_pt/pt_common.h
new file mode 100644
index 000000000000..e1123d35c907
--- /dev/null
+++ b/drivers/iommu/generic_pt/pt_common.h
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * This header is included after the format. It contains definitions
+ * that build on the format definitions to create the basic format API.
+ *
+ * The format API is listed here, with kdocs. The functions without bodies are
+ * implemented in the format using the pattern:
+ * static inline FMTpt_XXX(..) {..}
+ * #define pt_XXX FMTpt_XXX
+ *
+ * If the format doesn't implement a function then pt_fmt_defaults.h can provide
+ * a generic version.
+ *
+ * The routines marked "@pts: Entry to query" operate on the entire contiguous
+ * entry and can be called with a pts->index pointing to any sub item that makes
+ * up that entry.
+ *
+ * The header order is:
+ * pt_defs.h
+ * FMT.h
+ * pt_common.h
+ */
+#ifndef __GENERIC_PT_PT_COMMON_H
+#define __GENERIC_PT_PT_COMMON_H
+
+#include "pt_defs.h"
+#include "pt_fmt_defaults.h"
+
+/**
+ * pt_attr_from_entry() - Convert the permission bits back to attrs
+ * @pts: Entry to convert from
+ * @attrs: Resulting attrs
+ *
+ * Fill in the attrs with the permission bits encoded in the current leaf entry.
+ * The attrs should be usable with pt_install_leaf_entry() to reconstruct the
+ * same entry.
+ */
+static inline void pt_attr_from_entry(const struct pt_state *pts,
+ struct pt_write_attrs *attrs);
+
+/**
+ * pt_can_have_leaf() - True if the current level can have an OA entry
+ * @pts: The current level
+ *
+ * True if the current level can support pt_install_leaf_entry(). A leaf
+ * entry produce an OA.
+ */
+static inline bool pt_can_have_leaf(const struct pt_state *pts);
+
+/**
+ * pt_can_have_table() - True if the current level can have a lower table
+ * @pts: The current level
+ *
+ * Every level except 0 is allowed to have a lower table.
+ */
+static inline bool pt_can_have_table(const struct pt_state *pts)
+{
+ /* No further tables at level 0 */
+ return pts->level > 0;
+}
+
+/**
+ * pt_clear_entries() - Make entries empty (non-present)
+ * @pts: Starting table index
+ * @num_contig_lg2: Number of contiguous items to clear
+ *
+ * Clear a run of entries. A cleared entry will load back as PT_ENTRY_EMPTY
+ * and does not have any effect on table walking. The starting index must be
+ * aligned to num_contig_lg2.
+ */
+static inline void pt_clear_entries(struct pt_state *pts,
+ unsigned int num_contig_lg2);
+
+/**
+ * pt_entry_make_write_dirty() - Make an entry dirty
+ * @pts: Table entry to change
+ *
+ * Make pt_entry_is_write_dirty() return true for this entry. This can be called
+ * asynchronously with any other table manipulation under a RCU lock and must
+ * not corrupt the table.
+ */
+static inline bool pt_entry_make_write_dirty(struct pt_state *pts);
+
+/**
+ * pt_entry_make_write_clean() - Make the entry write clean
+ * @pts: Table entry to change
+ *
+ * Modify the entry so that pt_entry_is_write_dirty() == false. The HW will
+ * eventually be notified of this change via a TLB flush, which is the point
+ * that the HW must become synchronized. Any "write dirty" prior to the TLB
+ * flush can be lost, but once the TLB flush completes all writes must make
+ * their entries write dirty.
+ *
+ * The format should alter the entry in a way that is compatible with any
+ * concurrent update from HW. The entire contiguous entry is changed.
+ */
+static inline void pt_entry_make_write_clean(struct pt_state *pts);
+
+/**
+ * pt_entry_is_write_dirty() - True if the entry has been written to
+ * @pts: Entry to query
+ *
+ * "write dirty" means that the HW has written to the OA translated
+ * by this entry. If the entry is contiguous then the consolidated
+ * "write dirty" for all the items must be returned.
+ */
+static inline bool pt_entry_is_write_dirty(const struct pt_state *pts);
+
+/**
+ * pt_dirty_supported() - True if the page table supports dirty tracking
+ * @common: Page table to query
+ */
+static inline bool pt_dirty_supported(struct pt_common *common);
+
+/**
+ * pt_entry_num_contig_lg2() - Number of contiguous items for this leaf entry
+ * @pts: Entry to query
+ *
+ * Return the number of contiguous items this leaf entry spans. If the entry
+ * is single item it returns ilog2(1).
+ */
+static inline unsigned int pt_entry_num_contig_lg2(const struct pt_state *pts);
+
+/**
+ * pt_entry_oa() - Output Address for this leaf entry
+ * @pts: Entry to query
+ *
+ * Return the output address for the start of the entry. If the entry
+ * is contiguous this returns the same value for each sub-item. I.e.::
+ *
+ * log2_mod(pt_entry_oa(), pt_entry_oa_lg2sz()) == 0
+ *
+ * See pt_item_oa(). The format should implement one of these two functions
+ * depending on how it stores the OAs in the table.
+ */
+static inline pt_oaddr_t pt_entry_oa(const struct pt_state *pts);
+
+/**
+ * pt_entry_oa_lg2sz() - Return the size of an OA entry
+ * @pts: Entry to query
+ *
+ * If the entry is not contiguous this returns pt_table_item_lg2sz(), otherwise
+ * it returns the total VA/OA size of the entire contiguous entry.
+ */
+static inline unsigned int pt_entry_oa_lg2sz(const struct pt_state *pts)
+{
+ return pt_entry_num_contig_lg2(pts) + pt_table_item_lg2sz(pts);
+}
+
+/**
+ * pt_entry_oa_exact() - Return the complete OA for an entry
+ * @pts: Entry to query
+ *
+ * During iteration the first entry could have a VA with an offset from the
+ * natural start of the entry. Return the exact OA including the pts's VA
+ * offset.
+ */
+static inline pt_oaddr_t pt_entry_oa_exact(const struct pt_state *pts)
+{
+ return _pt_entry_oa_fast(pts) |
+ log2_mod(pts->range->va, pt_entry_oa_lg2sz(pts));
+}
+
+/**
+ * pt_full_va_prefix() - The top bits of the VA
+ * @common: Page table to query
+ *
+ * This is usually 0, but some formats have their VA space going downward from
+ * PT_VADDR_MAX, and will return that instead. This value must always be
+ * adjusted by struct pt_common max_vasz_lg2.
+ */
+static inline pt_vaddr_t pt_full_va_prefix(const struct pt_common *common);
+
+/**
+ * pt_has_system_page_size() - True if level 0 can install a PAGE_SHIFT entry
+ * @common: Page table to query
+ *
+ * If true the caller can use, at level 0, pt_install_leaf_entry(PAGE_SHIFT).
+ * This is useful to create optimized paths for common cases of PAGE_SIZE
+ * mappings.
+ */
+static inline bool pt_has_system_page_size(const struct pt_common *common);
+
+/**
+ * pt_install_leaf_entry() - Write a leaf entry to the table
+ * @pts: Table index to change
+ * @oa: Output Address for this leaf
+ * @oasz_lg2: Size in VA/OA for this leaf
+ * @attrs: Attributes to modify the entry
+ *
+ * A leaf OA entry will return PT_ENTRY_OA from pt_load_entry(). It translates
+ * the VA indicated by pts to the given OA.
+ *
+ * For a single item non-contiguous entry oasz_lg2 is pt_table_item_lg2sz().
+ * For contiguous it is pt_table_item_lg2sz() + num_contig_lg2.
+ *
+ * This must not be called if pt_can_have_leaf() == false. Contiguous sizes
+ * not indicated by pt_possible_sizes() must not be specified.
+ */
+static inline void pt_install_leaf_entry(struct pt_state *pts, pt_oaddr_t oa,
+ unsigned int oasz_lg2,
+ const struct pt_write_attrs *attrs);
+
+/**
+ * pt_install_table() - Write a table entry to the table
+ * @pts: Table index to change
+ * @table_pa: CPU physical address of the lower table's memory
+ * @attrs: Attributes to modify the table index
+ *
+ * A table entry will return PT_ENTRY_TABLE from pt_load_entry(). The table_pa
+ * is the table at pts->level - 1. This is done by cmpxchg so pts must have the
+ * current entry loaded. The pts is updated with the installed entry.
+ *
+ * This must not be called if pt_can_have_table() == false.
+ *
+ * Returns: true if the table was installed successfully.
+ */
+static inline bool pt_install_table(struct pt_state *pts, pt_oaddr_t table_pa,
+ const struct pt_write_attrs *attrs);
+
+/**
+ * pt_item_oa() - Output Address for this leaf item
+ * @pts: Item to query
+ *
+ * Return the output address for this item. If the item is part of a contiguous
+ * entry it returns the value of the OA for this individual sub item.
+ *
+ * See pt_entry_oa(). The format should implement one of these two functions
+ * depending on how it stores the OA's in the table.
+ */
+static inline pt_oaddr_t pt_item_oa(const struct pt_state *pts);
+
+/**
+ * pt_load_entry_raw() - Read from the location pts points at into the pts
+ * @pts: Table index to load
+ *
+ * Return the type of entry that was loaded. pts->entry will be filled in with
+ * the entry's content. See pt_load_entry()
+ */
+static inline enum pt_entry_type pt_load_entry_raw(struct pt_state *pts);
+
+/**
+ * pt_max_oa_lg2() - Return the maximum OA the table format can hold
+ * @common: Page table to query
+ *
+ * The value oalog2_to_max_int(pt_max_oa_lg2()) is the MAX for the
+ * OA. This is the absolute maximum address the table can hold. struct pt_common
+ * max_oasz_lg2 sets a lower dynamic maximum based on HW capability.
+ */
+static inline unsigned int
+pt_max_oa_lg2(const struct pt_common *common);
+
+/**
+ * pt_num_items_lg2() - Return the number of items in this table level
+ * @pts: The current level
+ *
+ * The number of items in a table level defines the number of bits this level
+ * decodes from the VA. This function is not called for the top level,
+ * so it does not need to compute a special value for the top case. The
+ * result for the top is based on pt_common max_vasz_lg2.
+ *
+ * The value is used as part of determining the table indexes via the
+ * equation::
+ *
+ * log2_mod(log2_div(VA, pt_table_item_lg2sz()), pt_num_items_lg2())
+ */
+static inline unsigned int pt_num_items_lg2(const struct pt_state *pts);
+
+/**
+ * pt_pgsz_lg2_to_level - Return the level that maps the page size
+ * @common: Page table to query
+ * @pgsize_lg2: Log2 page size
+ *
+ * Returns the table level that will map the given page size. The page
+ * size must be part of the pt_possible_sizes() for some level.
+ */
+static inline unsigned int pt_pgsz_lg2_to_level(struct pt_common *common,
+ unsigned int pgsize_lg2);
+
+/**
+ * pt_possible_sizes() - Return a bitmap of possible output sizes at this level
+ * @pts: The current level
+ *
+ * Each level has a list of possible output sizes that can be installed as
+ * leaf entries. If pt_can_have_leaf() is false returns zero.
+ *
+ * Otherwise the bit in position pt_table_item_lg2sz() should be set indicating
+ * that a non-contiguous single item leaf entry is supported. The following
+ * pt_num_items_lg2() number of bits can be set indicating contiguous entries
+ * are supported. Bit pt_table_item_lg2sz() + pt_num_items_lg2() must not be
+ * set, contiguous entries cannot span the entire table.
+ *
+ * The OR of pt_possible_sizes() of all levels is the typical bitmask of all
+ * supported sizes in the entire table.
+ */
+static inline pt_vaddr_t pt_possible_sizes(const struct pt_state *pts);
+
+/**
+ * pt_table_item_lg2sz() - Size of a single item entry in this table level
+ * @pts: The current level
+ *
+ * The size of the item specifies how much VA and OA a single item occupies.
+ *
+ * See pt_entry_oa_lg2sz() for the same value including the effect of contiguous
+ * entries.
+ */
+static inline unsigned int pt_table_item_lg2sz(const struct pt_state *pts);
+
+/**
+ * pt_table_oa_lg2sz() - Return the VA/OA size of the entire table
+ * @pts: The current level
+ *
+ * Return the size of VA decoded by the entire table level.
+ */
+static inline unsigned int pt_table_oa_lg2sz(const struct pt_state *pts)
+{
+ if (pts->range->top_level == pts->level)
+ return pts->range->max_vasz_lg2;
+ return min_t(unsigned int, pts->range->common->max_vasz_lg2,
+ pt_num_items_lg2(pts) + pt_table_item_lg2sz(pts));
+}
+
+/**
+ * pt_table_pa() - Return the CPU physical address of the table entry
+ * @pts: Entry to query
+ *
+ * This is only ever called on PT_ENTRY_TABLE entries. Must return the same
+ * value passed to pt_install_table().
+ */
+static inline pt_oaddr_t pt_table_pa(const struct pt_state *pts);
+
+/**
+ * pt_table_ptr() - Return a CPU pointer for a table item
+ * @pts: Entry to query
+ *
+ * Same as pt_table_pa() but returns a CPU pointer.
+ */
+static inline struct pt_table_p *pt_table_ptr(const struct pt_state *pts)
+{
+ return __va(pt_table_pa(pts));
+}
+
+/**
+ * pt_max_sw_bit() - Return the maximum software bit usable for any level and
+ * entry
+ * @common: Page table
+ *
+ * The swbit can be passed as bitnr to the other sw_bit functions.
+ */
+static inline unsigned int pt_max_sw_bit(struct pt_common *common);
+
+/**
+ * pt_test_sw_bit_acquire() - Read a software bit in an item
+ * @pts: Entry to read
+ * @bitnr: Bit to read
+ *
+ * Software bits are ignored by HW and can be used for any purpose by the
+ * software. This does a test bit and acquire operation.
+ */
+static inline bool pt_test_sw_bit_acquire(struct pt_state *pts,
+ unsigned int bitnr);
+
+/**
+ * pt_set_sw_bit_release() - Set a software bit in an item
+ * @pts: Entry to set
+ * @bitnr: Bit to set
+ *
+ * Software bits are ignored by HW and can be used for any purpose by the
+ * software. This does a set bit and release operation.
+ */
+static inline void pt_set_sw_bit_release(struct pt_state *pts,
+ unsigned int bitnr);
+
+/**
+ * pt_load_entry() - Read from the location pts points at into the pts
+ * @pts: Table index to load
+ *
+ * Set the type of entry that was loaded. pts->entry and pts->table_lower
+ * will be filled in with the entry's content.
+ */
+static inline void pt_load_entry(struct pt_state *pts)
+{
+ pts->type = pt_load_entry_raw(pts);
+ if (pts->type == PT_ENTRY_TABLE)
+ pts->table_lower = pt_table_ptr(pts);
+}
+#endif
diff --git a/drivers/iommu/generic_pt/pt_defs.h b/drivers/iommu/generic_pt/pt_defs.h
new file mode 100644
index 000000000000..c25544d72f97
--- /dev/null
+++ b/drivers/iommu/generic_pt/pt_defs.h
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * This header is included before the format. It contains definitions
+ * that are required to compile the format. The header order is:
+ * pt_defs.h
+ * fmt_XX.h
+ * pt_common.h
+ */
+#ifndef __GENERIC_PT_DEFS_H
+#define __GENERIC_PT_DEFS_H
+
+#include <linux/generic_pt/common.h>
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/bits.h>
+#include <linux/limits.h>
+#include <linux/bug.h>
+#include <linux/kconfig.h>
+#include "pt_log2.h"
+
+/* Header self-compile default defines */
+#ifndef pt_write_attrs
+typedef u64 pt_vaddr_t;
+typedef u64 pt_oaddr_t;
+#endif
+
+struct pt_table_p;
+
+enum {
+ PT_VADDR_MAX = sizeof(pt_vaddr_t) == 8 ? U64_MAX : U32_MAX,
+ PT_VADDR_MAX_LG2 = sizeof(pt_vaddr_t) == 8 ? 64 : 32,
+ PT_OADDR_MAX = sizeof(pt_oaddr_t) == 8 ? U64_MAX : U32_MAX,
+ PT_OADDR_MAX_LG2 = sizeof(pt_oaddr_t) == 8 ? 64 : 32,
+};
+
+/*
+ * The format instantiation can have features wired off or on to optimize the
+ * code gen. Supported features are just a reflection of what the current set of
+ * kernel users want to use.
+ */
+#ifndef PT_SUPPORTED_FEATURES
+#define PT_SUPPORTED_FEATURES 0
+#endif
+
+/*
+ * When in debug mode we compile all formats with all features. This allows the
+ * kunit to test the full matrix. SIGN_EXTEND can't co-exist with DYNAMIC_TOP or
+ * FULL_VA. DMA_INCOHERENT requires a SW bit that not all formats have
+ */
+#if IS_ENABLED(CONFIG_DEBUG_GENERIC_PT)
+enum {
+ PT_ORIG_SUPPORTED_FEATURES = PT_SUPPORTED_FEATURES,
+ PT_DEBUG_SUPPORTED_FEATURES =
+ UINT_MAX &
+ ~((PT_ORIG_SUPPORTED_FEATURES & BIT(PT_FEAT_DMA_INCOHERENT) ?
+ 0 :
+ BIT(PT_FEAT_DMA_INCOHERENT))) &
+ ~((PT_ORIG_SUPPORTED_FEATURES & BIT(PT_FEAT_SIGN_EXTEND)) ?
+ BIT(PT_FEAT_DYNAMIC_TOP) | BIT(PT_FEAT_FULL_VA) :
+ BIT(PT_FEAT_SIGN_EXTEND)),
+};
+#undef PT_SUPPORTED_FEATURES
+#define PT_SUPPORTED_FEATURES PT_DEBUG_SUPPORTED_FEATURES
+#endif
+
+#ifndef PT_FORCE_ENABLED_FEATURES
+#define PT_FORCE_ENABLED_FEATURES 0
+#endif
+
+/**
+ * DOC: Generic Page Table Language
+ *
+ * Language used in Generic Page Table
+ * VA
+ * The input address to the page table, often the virtual address.
+ * OA
+ * The output address from the page table, often the physical address.
+ * leaf
+ * An entry that results in an output address.
+ * start/end
+ * An half-open range, e.g. [0,0) refers to no VA.
+ * start/last
+ * An inclusive closed range, e.g. [0,0] refers to the VA 0
+ * common
+ * The generic page table container struct pt_common
+ * level
+ * Level 0 is always a table of only leaves with no futher table pointers.
+ * Increasing levels increase the size of the table items. The least
+ * significant VA bits used to index page tables are used to index the Level
+ * 0 table. The various labels for table levels used by HW descriptions are
+ * not used.
+ * top_level
+ * The inclusive highest level of the table. A two-level table
+ * has a top level of 1.
+ * table
+ * A linear array of translation items for that level.
+ * index
+ * The position in a table of an element: item = table[index]
+ * item
+ * A single index in a table
+ * entry
+ * A single logical element in a table. If contiguous pages are not
+ * supported then item and entry are the same thing, otherwise entry refers
+ * to all the items that comprise a single contiguous translation.
+ * item/entry_size
+ * The number of bytes of VA the table index translates for.
+ * If the item is a table entry then the next table covers
+ * this size. If the entry translates to an output address then the
+ * full OA is: OA | (VA % entry_size)
+ * contig_count
+ * The number of consecutive items fused into a single entry.
+ * item_size * contig_count is the size of that entry's translation.
+ * lg2
+ * Indicates the value is encoded as log2, i.e. 1<<x is the actual value.
+ * Normally the compiler is fine to optimize divide and mod with log2 values
+ * automatically when inlining, however if the values are not constant
+ * expressions it can't. So we do it by hand; we want to avoid 64-bit
+ * divmod.
+ */
+
+/* Returned by pt_load_entry() and for_each_pt_level_entry() */
+enum pt_entry_type {
+ PT_ENTRY_EMPTY,
+ /* Entry is valid and points to a lower table level */
+ PT_ENTRY_TABLE,
+ /* Entry is valid and returns an output address */
+ PT_ENTRY_OA,
+};
+
+struct pt_range {
+ struct pt_common *common;
+ struct pt_table_p *top_table;
+ pt_vaddr_t va;
+ pt_vaddr_t last_va;
+ u8 top_level;
+ u8 max_vasz_lg2;
+};
+
+/*
+ * Similar to xa_state, this records information about an in-progress parse at a
+ * single level.
+ */
+struct pt_state {
+ struct pt_range *range;
+ struct pt_table_p *table;
+ struct pt_table_p *table_lower;
+ u64 entry;
+ enum pt_entry_type type;
+ unsigned short index;
+ unsigned short end_index;
+ u8 level;
+};
+
+#define pt_cur_table(pts, type) ((type *)((pts)->table))
+
+/*
+ * Try to install a new table pointer. The locking methodology requires this to
+ * be atomic (multiple threads can race to install a pointer). The losing
+ * threads will fail the atomic and return false. They should free any memory
+ * and reparse the table level again.
+ */
+#if !IS_ENABLED(CONFIG_GENERIC_ATOMIC64)
+static inline bool pt_table_install64(struct pt_state *pts, u64 table_entry)
+{
+ u64 *entryp = pt_cur_table(pts, u64) + pts->index;
+ u64 old_entry = pts->entry;
+ bool ret;
+
+ /*
+ * Ensure the zero'd table content itself is visible before its PTE can
+ * be. release is a NOP on !SMP, but the HW is still doing an acquire.
+ */
+ if (!IS_ENABLED(CONFIG_SMP))
+ dma_wmb();
+ ret = try_cmpxchg64_release(entryp, &old_entry, table_entry);
+ if (ret)
+ pts->entry = table_entry;
+ return ret;
+}
+#endif
+
+static inline bool pt_table_install32(struct pt_state *pts, u32 table_entry)
+{
+ u32 *entryp = pt_cur_table(pts, u32) + pts->index;
+ u32 old_entry = pts->entry;
+ bool ret;
+
+ /*
+ * Ensure the zero'd table content itself is visible before its PTE can
+ * be. release is a NOP on !SMP, but the HW is still doing an acquire.
+ */
+ if (!IS_ENABLED(CONFIG_SMP))
+ dma_wmb();
+ ret = try_cmpxchg_release(entryp, &old_entry, table_entry);
+ if (ret)
+ pts->entry = table_entry;
+ return ret;
+}
+
+#define PT_SUPPORTED_FEATURE(feature_nr) (PT_SUPPORTED_FEATURES & BIT(feature_nr))
+
+static inline bool pt_feature(const struct pt_common *common,
+ unsigned int feature_nr)
+{
+ if (PT_FORCE_ENABLED_FEATURES & BIT(feature_nr))
+ return true;
+ if (!PT_SUPPORTED_FEATURE(feature_nr))
+ return false;
+ return common->features & BIT(feature_nr);
+}
+
+static inline bool pts_feature(const struct pt_state *pts,
+ unsigned int feature_nr)
+{
+ return pt_feature(pts->range->common, feature_nr);
+}
+
+/*
+ * PT_WARN_ON is used for invariants that the kunit should be checking can't
+ * happen.
+ */
+#if IS_ENABLED(CONFIG_DEBUG_GENERIC_PT)
+#define PT_WARN_ON WARN_ON
+#else
+static inline bool PT_WARN_ON(bool condition)
+{
+ return false;
+}
+#endif
+
+/* These all work on the VA type */
+#define log2_to_int(a_lg2) log2_to_int_t(pt_vaddr_t, a_lg2)
+#define log2_to_max_int(a_lg2) log2_to_max_int_t(pt_vaddr_t, a_lg2)
+#define log2_div(a, b_lg2) log2_div_t(pt_vaddr_t, a, b_lg2)
+#define log2_div_eq(a, b, c_lg2) log2_div_eq_t(pt_vaddr_t, a, b, c_lg2)
+#define log2_mod(a, b_lg2) log2_mod_t(pt_vaddr_t, a, b_lg2)
+#define log2_mod_eq_max(a, b_lg2) log2_mod_eq_max_t(pt_vaddr_t, a, b_lg2)
+#define log2_set_mod(a, val, b_lg2) log2_set_mod_t(pt_vaddr_t, a, val, b_lg2)
+#define log2_set_mod_max(a, b_lg2) log2_set_mod_max_t(pt_vaddr_t, a, b_lg2)
+#define log2_mul(a, b_lg2) log2_mul_t(pt_vaddr_t, a, b_lg2)
+#define vaffs(a) ffs_t(pt_vaddr_t, a)
+#define vafls(a) fls_t(pt_vaddr_t, a)
+#define vaffz(a) ffz_t(pt_vaddr_t, a)
+
+/*
+ * The full VA (fva) versions permit the lg2 value to be == PT_VADDR_MAX_LG2 and
+ * generate a useful defined result. The non-fva versions will malfunction at
+ * this extreme.
+ */
+static inline pt_vaddr_t fvalog2_div(pt_vaddr_t a, unsigned int b_lg2)
+{
+ if (PT_SUPPORTED_FEATURE(PT_FEAT_FULL_VA) && b_lg2 == PT_VADDR_MAX_LG2)
+ return 0;
+ return log2_div_t(pt_vaddr_t, a, b_lg2);
+}
+
+static inline pt_vaddr_t fvalog2_mod(pt_vaddr_t a, unsigned int b_lg2)
+{
+ if (PT_SUPPORTED_FEATURE(PT_FEAT_FULL_VA) && b_lg2 == PT_VADDR_MAX_LG2)
+ return a;
+ return log2_mod_t(pt_vaddr_t, a, b_lg2);
+}
+
+static inline bool fvalog2_div_eq(pt_vaddr_t a, pt_vaddr_t b,
+ unsigned int c_lg2)
+{
+ if (PT_SUPPORTED_FEATURE(PT_FEAT_FULL_VA) && c_lg2 == PT_VADDR_MAX_LG2)
+ return true;
+ return log2_div_eq_t(pt_vaddr_t, a, b, c_lg2);
+}
+
+static inline pt_vaddr_t fvalog2_set_mod(pt_vaddr_t a, pt_vaddr_t val,
+ unsigned int b_lg2)
+{
+ if (PT_SUPPORTED_FEATURE(PT_FEAT_FULL_VA) && b_lg2 == PT_VADDR_MAX_LG2)
+ return val;
+ return log2_set_mod_t(pt_vaddr_t, a, val, b_lg2);
+}
+
+static inline pt_vaddr_t fvalog2_set_mod_max(pt_vaddr_t a, unsigned int b_lg2)
+{
+ if (PT_SUPPORTED_FEATURE(PT_FEAT_FULL_VA) && b_lg2 == PT_VADDR_MAX_LG2)
+ return PT_VADDR_MAX;
+ return log2_set_mod_max_t(pt_vaddr_t, a, b_lg2);
+}
+
+/* These all work on the OA type */
+#define oalog2_to_int(a_lg2) log2_to_int_t(pt_oaddr_t, a_lg2)
+#define oalog2_to_max_int(a_lg2) log2_to_max_int_t(pt_oaddr_t, a_lg2)
+#define oalog2_div(a, b_lg2) log2_div_t(pt_oaddr_t, a, b_lg2)
+#define oalog2_div_eq(a, b, c_lg2) log2_div_eq_t(pt_oaddr_t, a, b, c_lg2)
+#define oalog2_mod(a, b_lg2) log2_mod_t(pt_oaddr_t, a, b_lg2)
+#define oalog2_mod_eq_max(a, b_lg2) log2_mod_eq_max_t(pt_oaddr_t, a, b_lg2)
+#define oalog2_set_mod(a, val, b_lg2) log2_set_mod_t(pt_oaddr_t, a, val, b_lg2)
+#define oalog2_set_mod_max(a, b_lg2) log2_set_mod_max_t(pt_oaddr_t, a, b_lg2)
+#define oalog2_mul(a, b_lg2) log2_mul_t(pt_oaddr_t, a, b_lg2)
+#define oaffs(a) ffs_t(pt_oaddr_t, a)
+#define oafls(a) fls_t(pt_oaddr_t, a)
+#define oaffz(a) ffz_t(pt_oaddr_t, a)
+
+static inline uintptr_t _pt_top_set(struct pt_table_p *table_mem,
+ unsigned int top_level)
+{
+ return top_level | (uintptr_t)table_mem;
+}
+
+static inline void pt_top_set(struct pt_common *common,
+ struct pt_table_p *table_mem,
+ unsigned int top_level)
+{
+ WRITE_ONCE(common->top_of_table, _pt_top_set(table_mem, top_level));
+}
+
+static inline void pt_top_set_level(struct pt_common *common,
+ unsigned int top_level)
+{
+ pt_top_set(common, NULL, top_level);
+}
+
+static inline unsigned int pt_top_get_level(const struct pt_common *common)
+{
+ return READ_ONCE(common->top_of_table) % (1 << PT_TOP_LEVEL_BITS);
+}
+
+static inline bool pt_check_install_leaf_args(struct pt_state *pts,
+ pt_oaddr_t oa,
+ unsigned int oasz_lg2);
+
+#endif
diff --git a/drivers/iommu/generic_pt/pt_fmt_defaults.h b/drivers/iommu/generic_pt/pt_fmt_defaults.h
new file mode 100644
index 000000000000..69fb7c2314ca
--- /dev/null
+++ b/drivers/iommu/generic_pt/pt_fmt_defaults.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * Default definitions for formats that don't define these functions.
+ */
+#ifndef __GENERIC_PT_PT_FMT_DEFAULTS_H
+#define __GENERIC_PT_PT_FMT_DEFAULTS_H
+
+#include "pt_defs.h"
+#include <linux/log2.h>
+
+/* Header self-compile default defines */
+#ifndef pt_load_entry_raw
+#include "fmt/amdv1.h"
+#endif
+
+/*
+ * The format must provide PT_GRANULE_LG2SZ, PT_TABLEMEM_LG2SZ, and
+ * PT_ITEM_WORD_SIZE. They must be the same at every level excluding the top.
+ */
+#ifndef pt_table_item_lg2sz
+static inline unsigned int pt_table_item_lg2sz(const struct pt_state *pts)
+{
+ return PT_GRANULE_LG2SZ +
+ (PT_TABLEMEM_LG2SZ - ilog2(PT_ITEM_WORD_SIZE)) * pts->level;
+}
+#endif
+
+#ifndef pt_pgsz_lg2_to_level
+static inline unsigned int pt_pgsz_lg2_to_level(struct pt_common *common,
+ unsigned int pgsize_lg2)
+{
+ return ((unsigned int)(pgsize_lg2 - PT_GRANULE_LG2SZ)) /
+ (PT_TABLEMEM_LG2SZ - ilog2(PT_ITEM_WORD_SIZE));
+}
+#endif
+
+/*
+ * If not supplied by the format then contiguous pages are not supported.
+ *
+ * If contiguous pages are supported then the format must also provide
+ * pt_contig_count_lg2() if it supports a single contiguous size per level,
+ * or pt_possible_sizes() if it supports multiple sizes per level.
+ */
+#ifndef pt_entry_num_contig_lg2
+static inline unsigned int pt_entry_num_contig_lg2(const struct pt_state *pts)
+{
+ return ilog2(1);
+}
+
+/*
+ * Return the number of contiguous OA items forming an entry at this table level
+ */
+static inline unsigned short pt_contig_count_lg2(const struct pt_state *pts)
+{
+ return ilog2(1);
+}
+#endif
+
+/* If not supplied by the format then dirty tracking is not supported */
+#ifndef pt_entry_is_write_dirty
+static inline bool pt_entry_is_write_dirty(const struct pt_state *pts)
+{
+ return false;
+}
+
+static inline void pt_entry_make_write_clean(struct pt_state *pts)
+{
+}
+
+static inline bool pt_dirty_supported(struct pt_common *common)
+{
+ return false;
+}
+#else
+/* If not supplied then dirty tracking is always enabled */
+#ifndef pt_dirty_supported
+static inline bool pt_dirty_supported(struct pt_common *common)
+{
+ return true;
+}
+#endif
+#endif
+
+#ifndef pt_entry_make_write_dirty
+static inline bool pt_entry_make_write_dirty(struct pt_state *pts)
+{
+ return false;
+}
+#endif
+
+/*
+ * Format supplies either:
+ * pt_entry_oa - OA is at the start of a contiguous entry
+ * or
+ * pt_item_oa - OA is adjusted for every item in a contiguous entry
+ *
+ * Build the missing one
+ *
+ * The internal helper _pt_entry_oa_fast() allows generating
+ * an efficient pt_entry_oa_exact(), it doesn't care which
+ * option is selected.
+ */
+#ifdef pt_entry_oa
+static inline pt_oaddr_t pt_item_oa(const struct pt_state *pts)
+{
+ return pt_entry_oa(pts) |
+ log2_mul(pts->index, pt_table_item_lg2sz(pts));
+}
+#define _pt_entry_oa_fast pt_entry_oa
+#endif
+
+#ifdef pt_item_oa
+static inline pt_oaddr_t pt_entry_oa(const struct pt_state *pts)
+{
+ return log2_set_mod(pt_item_oa(pts), 0,
+ pt_entry_num_contig_lg2(pts) +
+ pt_table_item_lg2sz(pts));
+}
+#define _pt_entry_oa_fast pt_item_oa
+#endif
+
+/*
+ * If not supplied by the format then use the constant
+ * PT_MAX_OUTPUT_ADDRESS_LG2.
+ */
+#ifndef pt_max_oa_lg2
+static inline unsigned int
+pt_max_oa_lg2(const struct pt_common *common)
+{
+ return PT_MAX_OUTPUT_ADDRESS_LG2;
+}
+#endif
+
+#ifndef pt_has_system_page_size
+static inline bool pt_has_system_page_size(const struct pt_common *common)
+{
+ return PT_GRANULE_LG2SZ == PAGE_SHIFT;
+}
+#endif
+
+/*
+ * If not supplied by the format then assume only one contiguous size determined
+ * by pt_contig_count_lg2()
+ */
+#ifndef pt_possible_sizes
+static inline unsigned short pt_contig_count_lg2(const struct pt_state *pts);
+
+/* Return a bitmap of possible leaf page sizes at this level */
+static inline pt_vaddr_t pt_possible_sizes(const struct pt_state *pts)
+{
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+
+ if (!pt_can_have_leaf(pts))
+ return 0;
+ return log2_to_int(isz_lg2) |
+ log2_to_int(pt_contig_count_lg2(pts) + isz_lg2);
+}
+#endif
+
+/* If not supplied by the format then use 0. */
+#ifndef pt_full_va_prefix
+static inline pt_vaddr_t pt_full_va_prefix(const struct pt_common *common)
+{
+ return 0;
+}
+#endif
+
+/* If not supplied by the format then zero fill using PT_ITEM_WORD_SIZE */
+#ifndef pt_clear_entries
+static inline void pt_clear_entries64(struct pt_state *pts,
+ unsigned int num_contig_lg2)
+{
+ u64 *tablep = pt_cur_table(pts, u64) + pts->index;
+ u64 *end = tablep + log2_to_int(num_contig_lg2);
+
+ PT_WARN_ON(log2_mod(pts->index, num_contig_lg2));
+ for (; tablep != end; tablep++)
+ WRITE_ONCE(*tablep, 0);
+}
+
+static inline void pt_clear_entries32(struct pt_state *pts,
+ unsigned int num_contig_lg2)
+{
+ u32 *tablep = pt_cur_table(pts, u32) + pts->index;
+ u32 *end = tablep + log2_to_int(num_contig_lg2);
+
+ PT_WARN_ON(log2_mod(pts->index, num_contig_lg2));
+ for (; tablep != end; tablep++)
+ WRITE_ONCE(*tablep, 0);
+}
+
+static inline void pt_clear_entries(struct pt_state *pts,
+ unsigned int num_contig_lg2)
+{
+ if (PT_ITEM_WORD_SIZE == sizeof(u32))
+ pt_clear_entries32(pts, num_contig_lg2);
+ else
+ pt_clear_entries64(pts, num_contig_lg2);
+}
+#define pt_clear_entries pt_clear_entries
+#endif
+
+/* If not supplied then SW bits are not supported */
+#ifdef pt_sw_bit
+static inline bool pt_test_sw_bit_acquire(struct pt_state *pts,
+ unsigned int bitnr)
+{
+ /* Acquire, pairs with pt_set_sw_bit_release() */
+ smp_mb();
+ /* For a contiguous entry the sw bit is only stored in the first item. */
+ return pts->entry & pt_sw_bit(bitnr);
+}
+#define pt_test_sw_bit_acquire pt_test_sw_bit_acquire
+
+static inline void pt_set_sw_bit_release(struct pt_state *pts,
+ unsigned int bitnr)
+{
+#if !IS_ENABLED(CONFIG_GENERIC_ATOMIC64)
+ if (PT_ITEM_WORD_SIZE == sizeof(u64)) {
+ u64 *entryp = pt_cur_table(pts, u64) + pts->index;
+ u64 old_entry = pts->entry;
+ u64 new_entry;
+
+ do {
+ new_entry = old_entry | pt_sw_bit(bitnr);
+ } while (!try_cmpxchg64_release(entryp, &old_entry, new_entry));
+ pts->entry = new_entry;
+ return;
+ }
+#endif
+ if (PT_ITEM_WORD_SIZE == sizeof(u32)) {
+ u32 *entryp = pt_cur_table(pts, u32) + pts->index;
+ u32 old_entry = pts->entry;
+ u32 new_entry;
+
+ do {
+ new_entry = old_entry | pt_sw_bit(bitnr);
+ } while (!try_cmpxchg_release(entryp, &old_entry, new_entry));
+ pts->entry = new_entry;
+ } else
+ BUILD_BUG();
+}
+#define pt_set_sw_bit_release pt_set_sw_bit_release
+#else
+static inline unsigned int pt_max_sw_bit(struct pt_common *common)
+{
+ return 0;
+}
+
+extern void __pt_no_sw_bit(void);
+static inline bool pt_test_sw_bit_acquire(struct pt_state *pts,
+ unsigned int bitnr)
+{
+ __pt_no_sw_bit();
+ return false;
+}
+
+static inline void pt_set_sw_bit_release(struct pt_state *pts,
+ unsigned int bitnr)
+{
+ __pt_no_sw_bit();
+}
+#endif
+
+/*
+ * Format can call in the pt_install_leaf_entry() to check the arguments are all
+ * aligned correctly.
+ */
+static inline bool pt_check_install_leaf_args(struct pt_state *pts,
+ pt_oaddr_t oa,
+ unsigned int oasz_lg2)
+{
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+
+ if (PT_WARN_ON(oalog2_mod(oa, oasz_lg2)))
+ return false;
+
+#ifdef pt_possible_sizes
+ if (PT_WARN_ON(isz_lg2 > oasz_lg2 ||
+ oasz_lg2 > isz_lg2 + pt_num_items_lg2(pts)))
+ return false;
+#else
+ if (PT_WARN_ON(oasz_lg2 != isz_lg2 &&
+ oasz_lg2 != isz_lg2 + pt_contig_count_lg2(pts)))
+ return false;
+#endif
+
+ if (PT_WARN_ON(oalog2_mod(pts->index, oasz_lg2 - isz_lg2)))
+ return false;
+ return true;
+}
+
+#endif
diff --git a/drivers/iommu/generic_pt/pt_iter.h b/drivers/iommu/generic_pt/pt_iter.h
new file mode 100644
index 000000000000..c0d8617cce29
--- /dev/null
+++ b/drivers/iommu/generic_pt/pt_iter.h
@@ -0,0 +1,636 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * Iterators for Generic Page Table
+ */
+#ifndef __GENERIC_PT_PT_ITER_H
+#define __GENERIC_PT_PT_ITER_H
+
+#include "pt_common.h"
+
+#include <linux/errno.h>
+
+/*
+ * Use to mangle symbols so that backtraces and the symbol table are
+ * understandable. Any non-inlined function should get mangled like this.
+ */
+#define NS(fn) CONCATENATE(PTPFX, fn)
+
+/**
+ * pt_check_range() - Validate the range can be iterated
+ * @range: Range to validate
+ *
+ * Check that VA and last_va fall within the permitted range of VAs. If the
+ * format is using PT_FEAT_SIGN_EXTEND then this also checks the sign extension
+ * is correct.
+ */
+static inline int pt_check_range(struct pt_range *range)
+{
+ pt_vaddr_t prefix;
+
+ PT_WARN_ON(!range->max_vasz_lg2);
+
+ if (pt_feature(range->common, PT_FEAT_SIGN_EXTEND)) {
+ PT_WARN_ON(range->common->max_vasz_lg2 != range->max_vasz_lg2);
+ prefix = fvalog2_div(range->va, range->max_vasz_lg2 - 1) ?
+ PT_VADDR_MAX :
+ 0;
+ } else {
+ prefix = pt_full_va_prefix(range->common);
+ }
+
+ if (!fvalog2_div_eq(range->va, prefix, range->max_vasz_lg2) ||
+ !fvalog2_div_eq(range->last_va, prefix, range->max_vasz_lg2))
+ return -ERANGE;
+ return 0;
+}
+
+/**
+ * pt_index_to_va() - Update range->va to the current pts->index
+ * @pts: Iteration State
+ *
+ * Adjust range->va to match the current index. This is done in a lazy manner
+ * since computing the VA takes several instructions and is rarely required.
+ */
+static inline void pt_index_to_va(struct pt_state *pts)
+{
+ pt_vaddr_t lower_va;
+
+ lower_va = log2_mul(pts->index, pt_table_item_lg2sz(pts));
+ pts->range->va = fvalog2_set_mod(pts->range->va, lower_va,
+ pt_table_oa_lg2sz(pts));
+}
+
+/*
+ * Add index_count_lg2 number of entries to pts's VA and index. The VA will be
+ * adjusted to the end of the contiguous block if it is currently in the middle.
+ */
+static inline void _pt_advance(struct pt_state *pts,
+ unsigned int index_count_lg2)
+{
+ pts->index = log2_set_mod(pts->index + log2_to_int(index_count_lg2), 0,
+ index_count_lg2);
+}
+
+/**
+ * pt_entry_fully_covered() - Check if the item or entry is entirely contained
+ * within pts->range
+ * @pts: Iteration State
+ * @oasz_lg2: The size of the item to check, pt_table_item_lg2sz() or
+ * pt_entry_oa_lg2sz()
+ *
+ * Returns: true if the item is fully enclosed by the pts->range.
+ */
+static inline bool pt_entry_fully_covered(const struct pt_state *pts,
+ unsigned int oasz_lg2)
+{
+ struct pt_range *range = pts->range;
+
+ /* Range begins at the start of the entry */
+ if (log2_mod(pts->range->va, oasz_lg2))
+ return false;
+
+ /* Range ends past the end of the entry */
+ if (!log2_div_eq(range->va, range->last_va, oasz_lg2))
+ return true;
+
+ /* Range ends at the end of the entry */
+ return log2_mod_eq_max(range->last_va, oasz_lg2);
+}
+
+/**
+ * pt_range_to_index() - Starting index for an iteration
+ * @pts: Iteration State
+ *
+ * Return: the starting index for the iteration in pts.
+ */
+static inline unsigned int pt_range_to_index(const struct pt_state *pts)
+{
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+
+ PT_WARN_ON(pts->level > pts->range->top_level);
+ if (pts->range->top_level == pts->level)
+ return log2_div(fvalog2_mod(pts->range->va,
+ pts->range->max_vasz_lg2),
+ isz_lg2);
+ return log2_mod(log2_div(pts->range->va, isz_lg2),
+ pt_num_items_lg2(pts));
+}
+
+/**
+ * pt_range_to_end_index() - Ending index iteration
+ * @pts: Iteration State
+ *
+ * Return: the last index for the iteration in pts.
+ */
+static inline unsigned int pt_range_to_end_index(const struct pt_state *pts)
+{
+ unsigned int isz_lg2 = pt_table_item_lg2sz(pts);
+ struct pt_range *range = pts->range;
+ unsigned int num_entries_lg2;
+
+ if (range->va == range->last_va)
+ return pts->index + 1;
+
+ if (pts->range->top_level == pts->level)
+ return log2_div(fvalog2_mod(pts->range->last_va,
+ pts->range->max_vasz_lg2),
+ isz_lg2) +
+ 1;
+
+ num_entries_lg2 = pt_num_items_lg2(pts);
+
+ /* last_va falls within this table */
+ if (log2_div_eq(range->va, range->last_va, num_entries_lg2 + isz_lg2))
+ return log2_mod(log2_div(pts->range->last_va, isz_lg2),
+ num_entries_lg2) +
+ 1;
+
+ return log2_to_int(num_entries_lg2);
+}
+
+static inline void _pt_iter_first(struct pt_state *pts)
+{
+ pts->index = pt_range_to_index(pts);
+ pts->end_index = pt_range_to_end_index(pts);
+ PT_WARN_ON(pts->index > pts->end_index);
+}
+
+static inline bool _pt_iter_load(struct pt_state *pts)
+{
+ if (pts->index >= pts->end_index)
+ return false;
+ pt_load_entry(pts);
+ return true;
+}
+
+/**
+ * pt_next_entry() - Advance pts to the next entry
+ * @pts: Iteration State
+ *
+ * Update pts to go to the next index at this level. If pts is pointing at a
+ * contiguous entry then the index may advance my more than one.
+ */
+static inline void pt_next_entry(struct pt_state *pts)
+{
+ if (pts->type == PT_ENTRY_OA &&
+ !__builtin_constant_p(pt_entry_num_contig_lg2(pts) == 0))
+ _pt_advance(pts, pt_entry_num_contig_lg2(pts));
+ else
+ pts->index++;
+ pt_index_to_va(pts);
+}
+
+/**
+ * for_each_pt_level_entry() - For loop wrapper over entries in the range
+ * @pts: Iteration State
+ *
+ * This is the basic iteration primitive. It iterates over all the entries in
+ * pts->range that fall within the pts's current table level. Each step does
+ * pt_load_entry(pts).
+ */
+#define for_each_pt_level_entry(pts) \
+ for (_pt_iter_first(pts); _pt_iter_load(pts); pt_next_entry(pts))
+
+/**
+ * pt_load_single_entry() - Version of pt_load_entry() usable within a walker
+ * @pts: Iteration State
+ *
+ * Alternative to for_each_pt_level_entry() if the walker function uses only a
+ * single entry.
+ */
+static inline enum pt_entry_type pt_load_single_entry(struct pt_state *pts)
+{
+ pts->index = pt_range_to_index(pts);
+ pt_load_entry(pts);
+ return pts->type;
+}
+
+static __always_inline struct pt_range _pt_top_range(struct pt_common *common,
+ uintptr_t top_of_table)
+{
+ struct pt_range range = {
+ .common = common,
+ .top_table =
+ (struct pt_table_p *)(top_of_table &
+ ~(uintptr_t)PT_TOP_LEVEL_MASK),
+ .top_level = top_of_table % (1 << PT_TOP_LEVEL_BITS),
+ };
+ struct pt_state pts = { .range = &range, .level = range.top_level };
+ unsigned int max_vasz_lg2;
+
+ max_vasz_lg2 = common->max_vasz_lg2;
+ if (pt_feature(common, PT_FEAT_DYNAMIC_TOP) &&
+ pts.level != PT_MAX_TOP_LEVEL)
+ max_vasz_lg2 = min_t(unsigned int, common->max_vasz_lg2,
+ pt_num_items_lg2(&pts) +
+ pt_table_item_lg2sz(&pts));
+
+ /*
+ * The top range will default to the lower region only with sign extend.
+ */
+ range.max_vasz_lg2 = max_vasz_lg2;
+ if (pt_feature(common, PT_FEAT_SIGN_EXTEND))
+ max_vasz_lg2--;
+
+ range.va = fvalog2_set_mod(pt_full_va_prefix(common), 0, max_vasz_lg2);
+ range.last_va =
+ fvalog2_set_mod_max(pt_full_va_prefix(common), max_vasz_lg2);
+ return range;
+}
+
+/**
+ * pt_top_range() - Return a range that spans part of the top level
+ * @common: Table
+ *
+ * For PT_FEAT_SIGN_EXTEND this will return the lower range, and cover half the
+ * total page table. Otherwise it returns the entire page table.
+ */
+static __always_inline struct pt_range pt_top_range(struct pt_common *common)
+{
+ /*
+ * The top pointer can change without locking. We capture the value and
+ * it's level here and are safe to walk it so long as both values are
+ * captured without tearing.
+ */
+ return _pt_top_range(common, READ_ONCE(common->top_of_table));
+}
+
+/**
+ * pt_all_range() - Return a range that spans the entire page table
+ * @common: Table
+ *
+ * The returned range spans the whole page table. Due to how PT_FEAT_SIGN_EXTEND
+ * is supported range->va and range->last_va will be incorrect during the
+ * iteration and must not be accessed.
+ */
+static inline struct pt_range pt_all_range(struct pt_common *common)
+{
+ struct pt_range range = pt_top_range(common);
+
+ if (!pt_feature(common, PT_FEAT_SIGN_EXTEND))
+ return range;
+
+ /*
+ * Pretend the table is linear from 0 without a sign extension. This
+ * generates the correct indexes for iteration.
+ */
+ range.last_va = fvalog2_set_mod_max(0, range.max_vasz_lg2);
+ return range;
+}
+
+/**
+ * pt_upper_range() - Return a range that spans part of the top level
+ * @common: Table
+ *
+ * For PT_FEAT_SIGN_EXTEND this will return the upper range, and cover half the
+ * total page table. Otherwise it returns the entire page table.
+ */
+static inline struct pt_range pt_upper_range(struct pt_common *common)
+{
+ struct pt_range range = pt_top_range(common);
+
+ if (!pt_feature(common, PT_FEAT_SIGN_EXTEND))
+ return range;
+
+ range.va = fvalog2_set_mod(PT_VADDR_MAX, 0, range.max_vasz_lg2 - 1);
+ range.last_va = PT_VADDR_MAX;
+ return range;
+}
+
+/**
+ * pt_make_range() - Return a range that spans part of the table
+ * @common: Table
+ * @va: Start address
+ * @last_va: Last address
+ *
+ * The caller must validate the range with pt_check_range() before using it.
+ */
+static __always_inline struct pt_range
+pt_make_range(struct pt_common *common, pt_vaddr_t va, pt_vaddr_t last_va)
+{
+ struct pt_range range =
+ _pt_top_range(common, READ_ONCE(common->top_of_table));
+
+ range.va = va;
+ range.last_va = last_va;
+
+ return range;
+}
+
+/*
+ * Span a slice of the table starting at a lower table level from an active
+ * walk.
+ */
+static __always_inline struct pt_range
+pt_make_child_range(const struct pt_range *parent, pt_vaddr_t va,
+ pt_vaddr_t last_va)
+{
+ struct pt_range range = *parent;
+
+ range.va = va;
+ range.last_va = last_va;
+
+ PT_WARN_ON(last_va < va);
+ PT_WARN_ON(pt_check_range(&range));
+
+ return range;
+}
+
+/**
+ * pt_init() - Initialize a pt_state on the stack
+ * @range: Range pointer to embed in the state
+ * @level: Table level for the state
+ * @table: Pointer to the table memory at level
+ *
+ * Helper to initialize the on-stack pt_state from walker arguments.
+ */
+static __always_inline struct pt_state
+pt_init(struct pt_range *range, unsigned int level, struct pt_table_p *table)
+{
+ struct pt_state pts = {
+ .range = range,
+ .table = table,
+ .level = level,
+ };
+ return pts;
+}
+
+/**
+ * pt_init_top() - Initialize a pt_state on the stack
+ * @range: Range pointer to embed in the state
+ *
+ * The pt_state points to the top most level.
+ */
+static __always_inline struct pt_state pt_init_top(struct pt_range *range)
+{
+ return pt_init(range, range->top_level, range->top_table);
+}
+
+typedef int (*pt_level_fn_t)(struct pt_range *range, void *arg,
+ unsigned int level, struct pt_table_p *table);
+
+/**
+ * pt_descend() - Recursively invoke the walker for the lower level
+ * @pts: Iteration State
+ * @arg: Value to pass to the function
+ * @fn: Walker function to call
+ *
+ * pts must point to a table item. Invoke fn as a walker on the table
+ * pts points to.
+ */
+static __always_inline int pt_descend(struct pt_state *pts, void *arg,
+ pt_level_fn_t fn)
+{
+ int ret;
+
+ if (PT_WARN_ON(!pts->table_lower))
+ return -EINVAL;
+
+ ret = (*fn)(pts->range, arg, pts->level - 1, pts->table_lower);
+ return ret;
+}
+
+/**
+ * pt_walk_range() - Walk over a VA range
+ * @range: Range pointer
+ * @fn: Walker function to call
+ * @arg: Value to pass to the function
+ *
+ * Walk over a VA range. The caller should have done a validity check, at
+ * least calling pt_check_range(), when building range. The walk will
+ * start at the top most table.
+ */
+static __always_inline int pt_walk_range(struct pt_range *range,
+ pt_level_fn_t fn, void *arg)
+{
+ return fn(range, arg, range->top_level, range->top_table);
+}
+
+/*
+ * pt_walk_descend() - Recursively invoke the walker for a slice of a lower
+ * level
+ * @pts: Iteration State
+ * @va: Start address
+ * @last_va: Last address
+ * @fn: Walker function to call
+ * @arg: Value to pass to the function
+ *
+ * With pts pointing at a table item this will descend and over a slice of the
+ * lower table. The caller must ensure that va/last_va are within the table
+ * item. This creates a new walk and does not alter pts or pts->range.
+ */
+static __always_inline int pt_walk_descend(const struct pt_state *pts,
+ pt_vaddr_t va, pt_vaddr_t last_va,
+ pt_level_fn_t fn, void *arg)
+{
+ struct pt_range range = pt_make_child_range(pts->range, va, last_va);
+
+ if (PT_WARN_ON(!pt_can_have_table(pts)) ||
+ PT_WARN_ON(!pts->table_lower))
+ return -EINVAL;
+
+ return fn(&range, arg, pts->level - 1, pts->table_lower);
+}
+
+/*
+ * pt_walk_descend_all() - Recursively invoke the walker for a table item
+ * @parent_pts: Iteration State
+ * @fn: Walker function to call
+ * @arg: Value to pass to the function
+ *
+ * With pts pointing at a table item this will descend and over the entire lower
+ * table. This creates a new walk and does not alter pts or pts->range.
+ */
+static __always_inline int
+pt_walk_descend_all(const struct pt_state *parent_pts, pt_level_fn_t fn,
+ void *arg)
+{
+ unsigned int isz_lg2 = pt_table_item_lg2sz(parent_pts);
+
+ return pt_walk_descend(parent_pts,
+ log2_set_mod(parent_pts->range->va, 0, isz_lg2),
+ log2_set_mod_max(parent_pts->range->va, isz_lg2),
+ fn, arg);
+}
+
+/**
+ * pt_range_slice() - Return a range that spans indexes
+ * @pts: Iteration State
+ * @start_index: Starting index within pts
+ * @end_index: Ending index within pts
+ *
+ * Create a range than spans an index range of the current table level
+ * pt_state points at.
+ */
+static inline struct pt_range pt_range_slice(const struct pt_state *pts,
+ unsigned int start_index,
+ unsigned int end_index)
+{
+ unsigned int table_lg2sz = pt_table_oa_lg2sz(pts);
+ pt_vaddr_t last_va;
+ pt_vaddr_t va;
+
+ va = fvalog2_set_mod(pts->range->va,
+ log2_mul(start_index, pt_table_item_lg2sz(pts)),
+ table_lg2sz);
+ last_va = fvalog2_set_mod(
+ pts->range->va,
+ log2_mul(end_index, pt_table_item_lg2sz(pts)) - 1, table_lg2sz);
+ return pt_make_child_range(pts->range, va, last_va);
+}
+
+/**
+ * pt_top_memsize_lg2()
+ * @common: Table
+ * @top_of_table: Top of table value from _pt_top_set()
+ *
+ * Compute the allocation size of the top table. For PT_FEAT_DYNAMIC_TOP this
+ * will compute the top size assuming the table will grow.
+ */
+static inline unsigned int pt_top_memsize_lg2(struct pt_common *common,
+ uintptr_t top_of_table)
+{
+ struct pt_range range = _pt_top_range(common, top_of_table);
+ struct pt_state pts = pt_init_top(&range);
+ unsigned int num_items_lg2;
+
+ num_items_lg2 = common->max_vasz_lg2 - pt_table_item_lg2sz(&pts);
+ if (range.top_level != PT_MAX_TOP_LEVEL &&
+ pt_feature(common, PT_FEAT_DYNAMIC_TOP))
+ num_items_lg2 = min(num_items_lg2, pt_num_items_lg2(&pts));
+
+ /* Round up the allocation size to the minimum alignment */
+ return max(ffs_t(u64, PT_TOP_PHYS_MASK),
+ num_items_lg2 + ilog2(PT_ITEM_WORD_SIZE));
+}
+
+/**
+ * pt_compute_best_pgsize() - Determine the best page size for leaf entries
+ * @pgsz_bitmap: Permitted page sizes
+ * @va: Starting virtual address for the leaf entry
+ * @last_va: Last virtual address for the leaf entry, sets the max page size
+ * @oa: Starting output address for the leaf entry
+ *
+ * Compute the largest page size for va, last_va, and oa together and return it
+ * in lg2. The largest page size depends on the format's supported page sizes at
+ * this level, and the relative alignment of the VA and OA addresses. 0 means
+ * the OA cannot be stored with the provided pgsz_bitmap.
+ */
+static inline unsigned int pt_compute_best_pgsize(pt_vaddr_t pgsz_bitmap,
+ pt_vaddr_t va,
+ pt_vaddr_t last_va,
+ pt_oaddr_t oa)
+{
+ unsigned int best_pgsz_lg2;
+ unsigned int pgsz_lg2;
+ pt_vaddr_t len = last_va - va + 1;
+ pt_vaddr_t mask;
+
+ if (PT_WARN_ON(va >= last_va))
+ return 0;
+
+ /*
+ * Given a VA/OA pair the best page size is the largest page size
+ * where:
+ *
+ * 1) VA and OA start at the page. Bitwise this is the count of least
+ * significant 0 bits.
+ * This also implies that last_va/oa has the same prefix as va/oa.
+ */
+ mask = va | oa;
+
+ /*
+ * 2) The page size is not larger than the last_va (length). Since page
+ * sizes are always power of two this can't be larger than the
+ * largest power of two factor of the length.
+ */
+ mask |= log2_to_int(vafls(len) - 1);
+
+ best_pgsz_lg2 = vaffs(mask);
+
+ /* Choose the highest bit <= best_pgsz_lg2 */
+ if (best_pgsz_lg2 < PT_VADDR_MAX_LG2 - 1)
+ pgsz_bitmap = log2_mod(pgsz_bitmap, best_pgsz_lg2 + 1);
+
+ pgsz_lg2 = vafls(pgsz_bitmap);
+ if (!pgsz_lg2)
+ return 0;
+
+ pgsz_lg2--;
+
+ PT_WARN_ON(log2_mod(va, pgsz_lg2) != 0);
+ PT_WARN_ON(oalog2_mod(oa, pgsz_lg2) != 0);
+ PT_WARN_ON(va + log2_to_int(pgsz_lg2) - 1 > last_va);
+ PT_WARN_ON(!log2_div_eq(va, va + log2_to_int(pgsz_lg2) - 1, pgsz_lg2));
+ PT_WARN_ON(
+ !oalog2_div_eq(oa, oa + log2_to_int(pgsz_lg2) - 1, pgsz_lg2));
+ return pgsz_lg2;
+}
+
+#define _PT_MAKE_CALL_LEVEL(fn) \
+ static __always_inline int fn(struct pt_range *range, void *arg, \
+ unsigned int level, \
+ struct pt_table_p *table) \
+ { \
+ static_assert(PT_MAX_TOP_LEVEL <= 5); \
+ if (level == 0) \
+ return CONCATENATE(fn, 0)(range, arg, 0, table); \
+ if (level == 1 || PT_MAX_TOP_LEVEL == 1) \
+ return CONCATENATE(fn, 1)(range, arg, 1, table); \
+ if (level == 2 || PT_MAX_TOP_LEVEL == 2) \
+ return CONCATENATE(fn, 2)(range, arg, 2, table); \
+ if (level == 3 || PT_MAX_TOP_LEVEL == 3) \
+ return CONCATENATE(fn, 3)(range, arg, 3, table); \
+ if (level == 4 || PT_MAX_TOP_LEVEL == 4) \
+ return CONCATENATE(fn, 4)(range, arg, 4, table); \
+ return CONCATENATE(fn, 5)(range, arg, 5, table); \
+ }
+
+static inline int __pt_make_level_fn_err(struct pt_range *range, void *arg,
+ unsigned int unused_level,
+ struct pt_table_p *table)
+{
+ static_assert(PT_MAX_TOP_LEVEL <= 5);
+ return -EPROTOTYPE;
+}
+
+#define __PT_MAKE_LEVEL_FN(fn, level, descend_fn, do_fn) \
+ static inline int fn(struct pt_range *range, void *arg, \
+ unsigned int unused_level, \
+ struct pt_table_p *table) \
+ { \
+ return do_fn(range, arg, level, table, descend_fn); \
+ }
+
+/**
+ * PT_MAKE_LEVELS() - Build an unwound walker
+ * @fn: Name of the walker function
+ * @do_fn: Function to call at each level
+ *
+ * This builds a function call tree that can be fully inlined.
+ * The caller must provide a function body in an __always_inline function::
+ *
+ * static __always_inline int do_fn(struct pt_range *range, void *arg,
+ * unsigned int level, struct pt_table_p *table,
+ * pt_level_fn_t descend_fn)
+ *
+ * An inline function will be created for each table level that calls do_fn with
+ * a compile time constant for level and a pointer to the next lower function.
+ * This generates an optimally inlined walk where each of the functions sees a
+ * constant level and can codegen the exact constants/etc for that level.
+ *
+ * Note this can produce a lot of code!
+ */
+#define PT_MAKE_LEVELS(fn, do_fn) \
+ __PT_MAKE_LEVEL_FN(CONCATENATE(fn, 0), 0, __pt_make_level_fn_err, \
+ do_fn); \
+ __PT_MAKE_LEVEL_FN(CONCATENATE(fn, 1), 1, CONCATENATE(fn, 0), do_fn); \
+ __PT_MAKE_LEVEL_FN(CONCATENATE(fn, 2), 2, CONCATENATE(fn, 1), do_fn); \
+ __PT_MAKE_LEVEL_FN(CONCATENATE(fn, 3), 3, CONCATENATE(fn, 2), do_fn); \
+ __PT_MAKE_LEVEL_FN(CONCATENATE(fn, 4), 4, CONCATENATE(fn, 3), do_fn); \
+ __PT_MAKE_LEVEL_FN(CONCATENATE(fn, 5), 5, CONCATENATE(fn, 4), do_fn); \
+ _PT_MAKE_CALL_LEVEL(fn)
+
+#endif
diff --git a/drivers/iommu/generic_pt/pt_log2.h b/drivers/iommu/generic_pt/pt_log2.h
new file mode 100644
index 000000000000..6dbbed119238
--- /dev/null
+++ b/drivers/iommu/generic_pt/pt_log2.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2024-2025, NVIDIA CORPORATION & AFFILIATES
+ *
+ * Helper macros for working with log2 values
+ *
+ */
+#ifndef __GENERIC_PT_LOG2_H
+#define __GENERIC_PT_LOG2_H
+#include <linux/bitops.h>
+#include <linux/limits.h>
+
+/* Compute a */
+#define log2_to_int_t(type, a_lg2) ((type)(((type)1) << (a_lg2)))
+static_assert(log2_to_int_t(unsigned int, 0) == 1);
+
+/* Compute a - 1 (aka all low bits set) */
+#define log2_to_max_int_t(type, a_lg2) ((type)(log2_to_int_t(type, a_lg2) - 1))
+
+/* Compute a / b */
+#define log2_div_t(type, a, b_lg2) ((type)(((type)a) >> (b_lg2)))
+static_assert(log2_div_t(unsigned int, 4, 2) == 1);
+
+/*
+ * Compute:
+ * a / c == b / c
+ * aka the high bits are equal
+ */
+#define log2_div_eq_t(type, a, b, c_lg2) \
+ (log2_div_t(type, (a) ^ (b), c_lg2) == 0)
+static_assert(log2_div_eq_t(unsigned int, 1, 1, 2));
+
+/* Compute a % b */
+#define log2_mod_t(type, a, b_lg2) \
+ ((type)(((type)a) & log2_to_max_int_t(type, b_lg2)))
+static_assert(log2_mod_t(unsigned int, 1, 2) == 1);
+
+/*
+ * Compute:
+ * a % b == b - 1
+ * aka the low bits are all 1s
+ */
+#define log2_mod_eq_max_t(type, a, b_lg2) \
+ (log2_mod_t(type, a, b_lg2) == log2_to_max_int_t(type, b_lg2))
+static_assert(log2_mod_eq_max_t(unsigned int, 3, 2));
+
+/*
+ * Return a value such that:
+ * a / b == ret / b
+ * ret % b == val
+ * aka set the low bits to val. val must be < b
+ */
+#define log2_set_mod_t(type, a, val, b_lg2) \
+ ((((type)(a)) & (~log2_to_max_int_t(type, b_lg2))) | ((type)(val)))
+static_assert(log2_set_mod_t(unsigned int, 3, 1, 2) == 1);
+
+/* Return a value such that:
+ * a / b == ret / b
+ * ret % b == b - 1
+ * aka set the low bits to all 1s
+ */
+#define log2_set_mod_max_t(type, a, b_lg2) \
+ (((type)(a)) | log2_to_max_int_t(type, b_lg2))
+static_assert(log2_set_mod_max_t(unsigned int, 2, 2) == 3);
+
+/* Compute a * b */
+#define log2_mul_t(type, a, b_lg2) ((type)(((type)a) << (b_lg2)))
+static_assert(log2_mul_t(unsigned int, 2, 2) == 8);
+
+#define _dispatch_sz(type, fn, a) \
+ (sizeof(type) == 4 ? fn##32((u32)a) : fn##64(a))
+
+/*
+ * Return the highest value such that:
+ * fls_t(u32, 0) == 0
+ * fls_t(u3, 1) == 1
+ * a >= log2_to_int(ret - 1)
+ * aka find last set bit
+ */
+static inline unsigned int fls32(u32 a)
+{
+ return fls(a);
+}
+#define fls_t(type, a) _dispatch_sz(type, fls, a)
+
+/*
+ * Return the highest value such that:
+ * ffs_t(u32, 0) == UNDEFINED
+ * ffs_t(u32, 1) == 0
+ * log_mod(a, ret) == 0
+ * aka find first set bit
+ */
+static inline unsigned int __ffs32(u32 a)
+{
+ return __ffs(a);
+}
+#define ffs_t(type, a) _dispatch_sz(type, __ffs, a)
+
+/*
+ * Return the highest value such that:
+ * ffz_t(u32, U32_MAX) == UNDEFINED
+ * ffz_t(u32, 0) == 0
+ * ffz_t(u32, 1) == 1
+ * log_mod(a, ret) == log_to_max_int(ret)
+ * aka find first zero bit
+ */
+static inline unsigned int ffz32(u32 a)
+{
+ return ffz(a);
+}
+static inline unsigned int ffz64(u64 a)
+{
+ if (sizeof(u64) == sizeof(unsigned long))
+ return ffz(a);
+
+ if ((u32)a == U32_MAX)
+ return ffz32(a >> 32) + 32;
+ return ffz32(a);
+}
+#define ffz_t(type, a) _dispatch_sz(type, ffz, a)
+
+#endif
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index f2f538c70650..5471f814e073 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -13,6 +13,10 @@ config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && X86
select IOMMU_API
+ select GENERIC_PT
+ select IOMMU_PT
+ select IOMMU_PT_X86_64
+ select IOMMU_PT_VTDSS
select IOMMU_IOVA
select IOMMU_IOPF
select IOMMUFD_DRIVER if IOMMUFD
@@ -66,7 +70,7 @@ config INTEL_IOMMU_DEFAULT_ON
config INTEL_IOMMU_FLOPPY_WA
def_bool y
- depends on X86
+ depends on X86 && BLK_DEV_FD
help
Floppy disk drivers are known to bypass DMA API calls
thereby failing to work when IOMMU is enabled. This
diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c
index affbf4a1558d..617fd81a80f0 100644
--- a/drivers/iommu/intel/debugfs.c
+++ b/drivers/iommu/intel/debugfs.c
@@ -62,8 +62,6 @@ static const struct iommu_regset iommu_regs_64[] = {
IOMMU_REGSET_ENTRY(CAP),
IOMMU_REGSET_ENTRY(ECAP),
IOMMU_REGSET_ENTRY(RTADDR),
- IOMMU_REGSET_ENTRY(CCMD),
- IOMMU_REGSET_ENTRY(AFLOG),
IOMMU_REGSET_ENTRY(PHMBASE),
IOMMU_REGSET_ENTRY(PHMLIMIT),
IOMMU_REGSET_ENTRY(IQH),
@@ -435,8 +433,21 @@ static int domain_translation_struct_show(struct seq_file *m,
}
pgd &= VTD_PAGE_MASK;
} else { /* legacy mode */
- pgd = context->lo & VTD_PAGE_MASK;
- agaw = context->hi & 7;
+ u8 tt = (u8)(context->lo & GENMASK_ULL(3, 2)) >> 2;
+
+ /*
+ * According to Translation Type(TT),
+ * get the page table pointer(SSPTPTR).
+ */
+ switch (tt) {
+ case CONTEXT_TT_MULTI_LEVEL:
+ case CONTEXT_TT_DEV_IOTLB:
+ pgd = context->lo & VTD_PAGE_MASK;
+ agaw = context->hi & 7;
+ break;
+ default:
+ goto iommu_unlock;
+ }
}
seq_printf(m, "Device %04x:%02x:%02x.%x ",
@@ -648,17 +659,11 @@ DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
struct dmar_drhd_unit *drhd)
{
- int ret;
-
seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
iommu->name, drhd->reg_base_addr);
- ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
- if (ret < 0)
- seq_puts(m, "Failed to get latency snapshot");
- else
- seq_puts(m, debug_buf);
- seq_puts(m, "\n");
+ dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
+ seq_printf(m, "%s\n", debug_buf);
}
static int latency_show(struct seq_file *m, void *v)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 9c3ab9d9f69a..134302fbcd92 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -45,16 +45,9 @@
#define DEFAULT_DOMAIN_ADDRESS_WIDTH 57
-#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1)
-#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1)
-
-/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
- to match. That way, we can use 'unsigned long' for PFNs with impunity. */
-#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
- __DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
-#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
-
static void __init check_tylersburg_isoch(void);
+static int intel_iommu_set_dirty_tracking(struct iommu_domain *domain,
+ bool enable);
static int rwbf_quirk;
#define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap))
@@ -217,7 +210,6 @@ static int disable_igfx_iommu;
#define IDENTMAP_AZALIA 4
const struct iommu_ops intel_iommu_ops;
-static const struct iommu_dirty_ops intel_dirty_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
@@ -285,13 +277,6 @@ static int __init intel_iommu_setup(char *str)
}
__setup("intel_iommu=", intel_iommu_setup);
-static int domain_pfn_supported(struct dmar_domain *domain, unsigned long pfn)
-{
- int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-
- return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
-}
-
/*
* Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
* Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
@@ -353,23 +338,6 @@ static bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
}
-/* Return the super pagesize bitmap if supported. */
-static unsigned long domain_super_pgsize_bitmap(struct dmar_domain *domain)
-{
- unsigned long bitmap = 0;
-
- /*
- * 1-level super page supports page size of 2MiB, 2-level super page
- * supports page size of both 2MiB and 1GiB.
- */
- if (domain->iommu_superpage == 1)
- bitmap |= SZ_2M;
- else if (domain->iommu_superpage == 2)
- bitmap |= SZ_2M | SZ_1G;
-
- return bitmap;
-}
-
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc)
{
@@ -556,13 +524,6 @@ out:
return iommu;
}
-static void domain_flush_cache(struct dmar_domain *domain,
- void *addr, int size)
-{
- if (!domain->iommu_coherency)
- clflush_cache_range(addr, size);
-}
-
static void free_context_table(struct intel_iommu *iommu)
{
struct context_entry *context;
@@ -707,280 +668,6 @@ pgtable_walk:
}
#endif
-static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn, int *target_level,
- gfp_t gfp)
-{
- struct dma_pte *parent, *pte;
- int level = agaw_to_level(domain->agaw);
- int offset;
-
- if (!domain_pfn_supported(domain, pfn))
- /* Address beyond IOMMU's addressing capabilities. */
- return NULL;
-
- parent = domain->pgd;
-
- while (1) {
- void *tmp_page;
-
- offset = pfn_level_offset(pfn, level);
- pte = &parent[offset];
- if (!*target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
- break;
- if (level == *target_level)
- break;
-
- if (!dma_pte_present(pte)) {
- uint64_t pteval, tmp;
-
- tmp_page = iommu_alloc_pages_node_sz(domain->nid, gfp,
- SZ_4K);
-
- if (!tmp_page)
- return NULL;
-
- domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = virt_to_phys(tmp_page) | DMA_PTE_READ |
- DMA_PTE_WRITE;
- if (domain->use_first_level)
- pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
-
- tmp = 0ULL;
- if (!try_cmpxchg64(&pte->val, &tmp, pteval))
- /* Someone else set it while we were thinking; use theirs. */
- iommu_free_pages(tmp_page);
- else
- domain_flush_cache(domain, pte, sizeof(*pte));
- }
- if (level == 1)
- break;
-
- parent = phys_to_virt(dma_pte_addr(pte));
- level--;
- }
-
- if (!*target_level)
- *target_level = level;
-
- return pte;
-}
-
-/* return address's pte at specific level */
-static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
- unsigned long pfn,
- int level, int *large_page)
-{
- struct dma_pte *parent, *pte;
- int total = agaw_to_level(domain->agaw);
- int offset;
-
- parent = domain->pgd;
- while (level <= total) {
- offset = pfn_level_offset(pfn, total);
- pte = &parent[offset];
- if (level == total)
- return pte;
-
- if (!dma_pte_present(pte)) {
- *large_page = total;
- break;
- }
-
- if (dma_pte_superpage(pte)) {
- *large_page = total;
- return pte;
- }
-
- parent = phys_to_virt(dma_pte_addr(pte));
- total--;
- }
- return NULL;
-}
-
-/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn)
-{
- unsigned int large_page;
- struct dma_pte *first_pte, *pte;
-
- if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
- WARN_ON(start_pfn > last_pfn))
- return;
-
- /* we don't need lock here; nobody else touches the iova range */
- do {
- large_page = 1;
- first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
- if (!pte) {
- start_pfn = align_to_level(start_pfn + 1, large_page + 1);
- continue;
- }
- do {
- dma_clear_pte(pte);
- start_pfn += lvl_to_nr_pages(large_page);
- pte++;
- } while (start_pfn <= last_pfn && !first_pte_in_page(pte));
-
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
-
- } while (start_pfn && start_pfn <= last_pfn);
-}
-
-static void dma_pte_free_level(struct dmar_domain *domain, int level,
- int retain_level, struct dma_pte *pte,
- unsigned long pfn, unsigned long start_pfn,
- unsigned long last_pfn)
-{
- pfn = max(start_pfn, pfn);
- pte = &pte[pfn_level_offset(pfn, level)];
-
- do {
- unsigned long level_pfn;
- struct dma_pte *level_pte;
-
- if (!dma_pte_present(pte) || dma_pte_superpage(pte))
- goto next;
-
- level_pfn = pfn & level_mask(level);
- level_pte = phys_to_virt(dma_pte_addr(pte));
-
- if (level > 2) {
- dma_pte_free_level(domain, level - 1, retain_level,
- level_pte, level_pfn, start_pfn,
- last_pfn);
- }
-
- /*
- * Free the page table if we're below the level we want to
- * retain and the range covers the entire table.
- */
- if (level < retain_level && !(start_pfn > level_pfn ||
- last_pfn < level_pfn + level_size(level) - 1)) {
- dma_clear_pte(pte);
- domain_flush_cache(domain, pte, sizeof(*pte));
- iommu_free_pages(level_pte);
- }
-next:
- pfn += level_size(level);
- } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
-}
-
-/*
- * clear last level (leaf) ptes and free page table pages below the
- * level we wish to keep intact.
- */
-static void dma_pte_free_pagetable(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn,
- int retain_level)
-{
- dma_pte_clear_range(domain, start_pfn, last_pfn);
-
- /* We don't need lock here; nobody else touches the iova range */
- dma_pte_free_level(domain, agaw_to_level(domain->agaw), retain_level,
- domain->pgd, 0, start_pfn, last_pfn);
-
- /* free pgd */
- if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- iommu_free_pages(domain->pgd);
- domain->pgd = NULL;
- }
-}
-
-/* When a page at a given level is being unlinked from its parent, we don't
- need to *modify* it at all. All we need to do is make a list of all the
- pages which can be freed just as soon as we've flushed the IOTLB and we
- know the hardware page-walk will no longer touch them.
- The 'pte' argument is the *parent* PTE, pointing to the page that is to
- be freed. */
-static void dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *parent_pte,
- struct iommu_pages_list *freelist)
-{
- struct dma_pte *pte = phys_to_virt(dma_pte_addr(parent_pte));
-
- iommu_pages_list_add(freelist, pte);
-
- if (level == 1)
- return;
-
- do {
- if (dma_pte_present(pte) && !dma_pte_superpage(pte))
- dma_pte_list_pagetables(domain, level - 1, pte, freelist);
- pte++;
- } while (!first_pte_in_page(pte));
-}
-
-static void dma_pte_clear_level(struct dmar_domain *domain, int level,
- struct dma_pte *pte, unsigned long pfn,
- unsigned long start_pfn, unsigned long last_pfn,
- struct iommu_pages_list *freelist)
-{
- struct dma_pte *first_pte = NULL, *last_pte = NULL;
-
- pfn = max(start_pfn, pfn);
- pte = &pte[pfn_level_offset(pfn, level)];
-
- do {
- unsigned long level_pfn = pfn & level_mask(level);
-
- if (!dma_pte_present(pte))
- goto next;
-
- /* If range covers entire pagetable, free it */
- if (start_pfn <= level_pfn &&
- last_pfn >= level_pfn + level_size(level) - 1) {
- /* These suborbinate page tables are going away entirely. Don't
- bother to clear them; we're just going to *free* them. */
- if (level > 1 && !dma_pte_superpage(pte))
- dma_pte_list_pagetables(domain, level - 1, pte, freelist);
-
- dma_clear_pte(pte);
- if (!first_pte)
- first_pte = pte;
- last_pte = pte;
- } else if (level > 1) {
- /* Recurse down into a level that isn't *entirely* obsolete */
- dma_pte_clear_level(domain, level - 1,
- phys_to_virt(dma_pte_addr(pte)),
- level_pfn, start_pfn, last_pfn,
- freelist);
- }
-next:
- pfn = level_pfn + level_size(level);
- } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
-
- if (first_pte)
- domain_flush_cache(domain, first_pte,
- (void *)++last_pte - (void *)first_pte);
-}
-
-/* We can't just free the pages because the IOMMU may still be walking
- the page tables, and may have cached the intermediate levels. The
- pages can only be freed after the IOTLB flush has been done. */
-static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
- unsigned long last_pfn,
- struct iommu_pages_list *freelist)
-{
- if (WARN_ON(!domain_pfn_supported(domain, last_pfn)) ||
- WARN_ON(start_pfn > last_pfn))
- return;
-
- /* we don't need lock here; nobody else touches the iova range */
- dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
- domain->pgd, 0, start_pfn, last_pfn, freelist);
-
- /* free pgd */
- if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
- iommu_pages_list_add(freelist, domain->pgd);
- domain->pgd = NULL;
- }
-}
-
/* iommu handling */
static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
@@ -1460,13 +1147,15 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
domain_lookup_dev_info(domain, iommu, bus, devfn);
u16 did = domain_id_iommu(domain, iommu);
int translation = CONTEXT_TT_MULTI_LEVEL;
- struct dma_pte *pgd = domain->pgd;
+ struct pt_iommu_vtdss_hw_info pt_info;
struct context_entry *context;
int ret;
if (WARN_ON(!intel_domain_is_ss_paging(domain)))
return -EINVAL;
+ pt_iommu_vtdss_hw_info(&domain->sspt, &pt_info);
+
pr_debug("Set context mapping for %02x:%02x.%d\n",
bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -1489,8 +1178,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
else
translation = CONTEXT_TT_MULTI_LEVEL;
- context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, domain->agaw);
+ context_set_address_root(context, pt_info.ssptptr);
+ context_set_address_width(context, pt_info.aw);
context_set_translation_type(context, translation);
context_set_fault_enable(context);
context_set_present(context);
@@ -1537,172 +1226,6 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
return 0;
}
-/* Return largest possible superpage level for a given mapping */
-static int hardware_largepage_caps(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phy_pfn, unsigned long pages)
-{
- int support, level = 1;
- unsigned long pfnmerge;
-
- support = domain->iommu_superpage;
-
- /* To use a large page, the virtual *and* physical addresses
- must be aligned to 2MiB/1GiB/etc. Lower bits set in either
- of them will mean we have to use smaller pages. So just
- merge them and check both at once. */
- pfnmerge = iov_pfn | phy_pfn;
-
- while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
- pages >>= VTD_STRIDE_SHIFT;
- if (!pages)
- break;
- pfnmerge >>= VTD_STRIDE_SHIFT;
- level++;
- support--;
- }
- return level;
-}
-
-/*
- * Ensure that old small page tables are removed to make room for superpage(s).
- * We're going to add new large pages, so make sure we don't remove their parent
- * tables. The IOTLB/devTLBs should be flushed if any PDE/PTEs are cleared.
- */
-static void switch_to_super_page(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long end_pfn, int level)
-{
- unsigned long lvl_pages = lvl_to_nr_pages(level);
- struct dma_pte *pte = NULL;
-
- while (start_pfn <= end_pfn) {
- if (!pte)
- pte = pfn_to_dma_pte(domain, start_pfn, &level,
- GFP_ATOMIC);
-
- if (dma_pte_present(pte)) {
- dma_pte_free_pagetable(domain, start_pfn,
- start_pfn + lvl_pages - 1,
- level + 1);
-
- cache_tag_flush_range(domain, start_pfn << VTD_PAGE_SHIFT,
- end_pfn << VTD_PAGE_SHIFT, 0);
- }
-
- pte++;
- start_pfn += lvl_pages;
- if (first_pte_in_page(pte))
- pte = NULL;
- }
-}
-
-static int
-__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages, int prot,
- gfp_t gfp)
-{
- struct dma_pte *first_pte = NULL, *pte = NULL;
- unsigned int largepage_lvl = 0;
- unsigned long lvl_pages = 0;
- phys_addr_t pteval;
- u64 attr;
-
- if (unlikely(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)))
- return -EINVAL;
-
- if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0)
- return -EINVAL;
-
- if (!(prot & DMA_PTE_WRITE) && domain->nested_parent) {
- pr_err_ratelimited("Read-only mapping is disallowed on the domain which serves as the parent in a nested configuration, due to HW errata (ERRATA_772415_SPR17)\n");
- return -EINVAL;
- }
-
- attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
- if (domain->use_first_level) {
- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
- if (prot & DMA_PTE_WRITE)
- attr |= DMA_FL_PTE_DIRTY;
- }
-
- domain->has_mappings = true;
-
- pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
-
- while (nr_pages > 0) {
- uint64_t tmp;
-
- if (!pte) {
- largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
- phys_pfn, nr_pages);
-
- pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl,
- gfp);
- if (!pte)
- return -ENOMEM;
- first_pte = pte;
-
- lvl_pages = lvl_to_nr_pages(largepage_lvl);
-
- /* It is large page*/
- if (largepage_lvl > 1) {
- unsigned long end_pfn;
- unsigned long pages_to_remove;
-
- pteval |= DMA_PTE_LARGE_PAGE;
- pages_to_remove = min_t(unsigned long, nr_pages,
- nr_pte_to_next_page(pte) * lvl_pages);
- end_pfn = iov_pfn + pages_to_remove - 1;
- switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
- } else {
- pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
- }
-
- }
- /* We don't need lock here, nobody else
- * touches the iova range
- */
- tmp = 0ULL;
- if (!try_cmpxchg64_local(&pte->val, &tmp, pteval)) {
- static int dumps = 5;
- pr_crit("ERROR: DMA PTE for vPFN 0x%lx already set (to %llx not %llx)\n",
- iov_pfn, tmp, (unsigned long long)pteval);
- if (dumps) {
- dumps--;
- debug_dma_dump_mappings(NULL);
- }
- WARN_ON(1);
- }
-
- nr_pages -= lvl_pages;
- iov_pfn += lvl_pages;
- phys_pfn += lvl_pages;
- pteval += lvl_pages * VTD_PAGE_SIZE;
-
- /* If the next PTE would be the first in a new page, then we
- * need to flush the cache on the entries we've just written.
- * And then we'll need to recalculate 'pte', so clear it and
- * let it get set again in the if (!pte) block above.
- *
- * If we're done (!nr_pages) we need to flush the cache too.
- *
- * Also if we've been setting superpages, we may need to
- * recalculate 'pte' and switch back to smaller pages for the
- * end of the mapping, if the trailing size is not enough to
- * use another superpage (i.e. nr_pages < lvl_pages).
- */
- pte++;
- if (!nr_pages || first_pte_in_page(pte) ||
- (largepage_lvl > 1 && nr_pages < lvl_pages)) {
- domain_flush_cache(domain, first_pte,
- (void *)pte - (void *)first_pte);
- pte = NULL;
- }
- }
-
- return 0;
-}
-
static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
{
struct intel_iommu *iommu = info->iommu;
@@ -1764,22 +1287,26 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
struct device *dev,
u32 pasid, struct iommu_domain *old)
{
- struct dma_pte *pgd = domain->pgd;
- int level, flags = 0;
+ struct pt_iommu_x86_64_hw_info pt_info;
+ unsigned int flags = 0;
- level = agaw_to_level(domain->agaw);
- if (level != 4 && level != 5)
+ pt_iommu_x86_64_hw_info(&domain->fspt, &pt_info);
+ if (WARN_ON(pt_info.levels != 4 && pt_info.levels != 5))
return -EINVAL;
- if (level == 5)
+ if (pt_info.levels == 5)
flags |= PASID_FLAG_FL5LP;
if (domain->force_snooping)
flags |= PASID_FLAG_PAGE_SNOOP;
+ if (!(domain->fspt.x86_64_pt.common.features &
+ BIT(PT_FEAT_DMA_INCOHERENT)))
+ flags |= PASID_FLAG_PWSNP;
+
return __domain_setup_first_level(iommu, dev, pasid,
domain_id_iommu(domain, iommu),
- __pa(pgd), flags, old);
+ pt_info.gcr3_pt, flags, old);
}
static int dmar_domain_attach_device(struct dmar_domain *domain,
@@ -2298,7 +1825,7 @@ static void iommu_flush_all(void)
}
}
-static int iommu_suspend(void)
+static int iommu_suspend(void *data)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
@@ -2325,7 +1852,7 @@ static int iommu_suspend(void)
return 0;
}
-static void iommu_resume(void)
+static void iommu_resume(void *data)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
@@ -2356,14 +1883,18 @@ static void iommu_resume(void)
}
}
-static struct syscore_ops iommu_syscore_ops = {
+static const struct syscore_ops iommu_syscore_ops = {
.resume = iommu_resume,
.suspend = iommu_suspend,
};
+static struct syscore iommu_syscore = {
+ .ops = &iommu_syscore_ops,
+};
+
static void __init init_iommu_pm_ops(void)
{
- register_syscore_ops(&iommu_syscore_ops);
+ register_syscore(&iommu_syscore);
}
#else
@@ -3225,7 +2756,8 @@ void device_block_translation(struct device *dev)
}
static int blocking_domain_attach_dev(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -3246,23 +2778,9 @@ static struct iommu_domain blocking_domain = {
}
};
-static int iommu_superpage_capability(struct intel_iommu *iommu, bool first_stage)
-{
- if (!intel_iommu_superpage)
- return 0;
-
- if (first_stage)
- return cap_fl1gp_support(iommu->cap) ? 2 : 1;
-
- return fls(cap_super_page_val(iommu->cap));
-}
-
-static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_stage)
+static struct dmar_domain *paging_domain_alloc(void)
{
- struct device_domain_info *info = dev_iommu_priv_get(dev);
- struct intel_iommu *iommu = info->iommu;
struct dmar_domain *domain;
- int addr_width;
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
@@ -3277,56 +2795,38 @@ static struct dmar_domain *paging_domain_alloc(struct device *dev, bool first_st
INIT_LIST_HEAD(&domain->s1_domains);
spin_lock_init(&domain->s1_lock);
- domain->nid = dev_to_node(dev);
- domain->use_first_level = first_stage;
-
- domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
-
- /* calculate the address width */
- addr_width = agaw_to_width(iommu->agaw);
- if (addr_width > cap_mgaw(iommu->cap))
- addr_width = cap_mgaw(iommu->cap);
- domain->gaw = addr_width;
- domain->agaw = iommu->agaw;
- domain->max_addr = __DOMAIN_MAX_ADDR(addr_width);
-
- /* iommu memory access coherency */
- domain->iommu_coherency = iommu_paging_structure_coherency(iommu);
+ return domain;
+}
- /* pagesize bitmap */
- domain->domain.pgsize_bitmap = SZ_4K;
- domain->iommu_superpage = iommu_superpage_capability(iommu, first_stage);
- domain->domain.pgsize_bitmap |= domain_super_pgsize_bitmap(domain);
+static unsigned int compute_vasz_lg2_fs(struct intel_iommu *iommu,
+ unsigned int *top_level)
+{
+ unsigned int mgaw = cap_mgaw(iommu->cap);
/*
- * IOVA aperture: First-level translation restricts the input-address
- * to a canonical address (i.e., address bits 63:N have the same value
- * as address bit [N-1], where N is 48-bits with 4-level paging and
- * 57-bits with 5-level paging). Hence, skip bit [N-1].
+ * Spec 3.6 First-Stage Translation:
+ *
+ * Software must limit addresses to less than the minimum of MGAW
+ * and the lower canonical address width implied by FSPM (i.e.,
+ * 47-bit when FSPM is 4-level and 56-bit when FSPM is 5-level).
*/
- domain->domain.geometry.force_aperture = true;
- domain->domain.geometry.aperture_start = 0;
- if (first_stage)
- domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
- else
- domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
-
- /* always allocate the top pgd */
- domain->pgd = iommu_alloc_pages_node_sz(domain->nid, GFP_KERNEL, SZ_4K);
- if (!domain->pgd) {
- kfree(domain);
- return ERR_PTR(-ENOMEM);
+ if (mgaw > 48 && cap_fl5lp_support(iommu->cap)) {
+ *top_level = 4;
+ return min(57, mgaw);
}
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return domain;
+ /* Four level is always supported */
+ *top_level = 3;
+ return min(48, mgaw);
}
static struct iommu_domain *
intel_iommu_domain_alloc_first_stage(struct device *dev,
struct intel_iommu *iommu, u32 flags)
{
+ struct pt_iommu_x86_64_cfg cfg = {};
struct dmar_domain *dmar_domain;
+ int ret;
if (flags & ~IOMMU_HWPT_ALLOC_PASID)
return ERR_PTR(-EOPNOTSUPP);
@@ -3335,10 +2835,20 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
return ERR_PTR(-EOPNOTSUPP);
- dmar_domain = paging_domain_alloc(dev, true);
+ dmar_domain = paging_domain_alloc();
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
+ cfg.common.hw_max_vasz_lg2 =
+ compute_vasz_lg2_fs(iommu, &cfg.top_level);
+ cfg.common.hw_max_oasz_lg2 = 52;
+ cfg.common.features = BIT(PT_FEAT_SIGN_EXTEND) |
+ BIT(PT_FEAT_FLUSH_RANGE);
+ /* First stage always uses scalable mode */
+ if (!ecap_smpwc(iommu->ecap))
+ cfg.common.features |= BIT(PT_FEAT_DMA_INCOHERENT);
+ dmar_domain->iommu.iommu_device = dev;
+ dmar_domain->iommu.nid = dev_to_node(dev);
dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
/*
* iotlb sync for map is only needed for legacy implementations that
@@ -3348,14 +2858,58 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
if (rwbf_required(iommu))
dmar_domain->iotlb_sync_map = true;
+ ret = pt_iommu_x86_64_init(&dmar_domain->fspt, &cfg, GFP_KERNEL);
+ if (ret) {
+ kfree(dmar_domain);
+ return ERR_PTR(ret);
+ }
+
+ if (!cap_fl1gp_support(iommu->cap))
+ dmar_domain->domain.pgsize_bitmap &= ~(u64)SZ_1G;
+ if (!intel_iommu_superpage)
+ dmar_domain->domain.pgsize_bitmap = SZ_4K;
+
return &dmar_domain->domain;
}
+static unsigned int compute_vasz_lg2_ss(struct intel_iommu *iommu,
+ unsigned int *top_level)
+{
+ unsigned int sagaw = cap_sagaw(iommu->cap);
+ unsigned int mgaw = cap_mgaw(iommu->cap);
+
+ /*
+ * Find the largest table size that both the mgaw and sagaw support.
+ * This sets the valid range of IOVA and the top starting level.
+ * Some HW may only support a 4 or 5 level walk but must limit IOVA to
+ * 3 levels.
+ */
+ if (mgaw > 48 && sagaw >= BIT(3)) {
+ *top_level = 4;
+ return min(57, mgaw);
+ } else if (mgaw > 39 && sagaw >= BIT(2)) {
+ *top_level = 3 + ffs(sagaw >> 3);
+ return min(48, mgaw);
+ } else if (mgaw > 30 && sagaw >= BIT(1)) {
+ *top_level = 2 + ffs(sagaw >> 2);
+ return min(39, mgaw);
+ }
+ return 0;
+}
+
+static const struct iommu_dirty_ops intel_second_stage_dirty_ops = {
+ IOMMU_PT_DIRTY_OPS(vtdss),
+ .set_dirty_tracking = intel_iommu_set_dirty_tracking,
+};
+
static struct iommu_domain *
intel_iommu_domain_alloc_second_stage(struct device *dev,
struct intel_iommu *iommu, u32 flags)
{
+ struct pt_iommu_vtdss_cfg cfg = {};
struct dmar_domain *dmar_domain;
+ unsigned int sslps;
+ int ret;
if (flags &
(~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
@@ -3372,15 +2926,46 @@ intel_iommu_domain_alloc_second_stage(struct device *dev,
if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
return ERR_PTR(-EOPNOTSUPP);
- dmar_domain = paging_domain_alloc(dev, false);
+ dmar_domain = paging_domain_alloc();
if (IS_ERR(dmar_domain))
return ERR_CAST(dmar_domain);
+ cfg.common.hw_max_vasz_lg2 = compute_vasz_lg2_ss(iommu, &cfg.top_level);
+ cfg.common.hw_max_oasz_lg2 = 52;
+ cfg.common.features = BIT(PT_FEAT_FLUSH_RANGE);
+
+ /*
+ * Read-only mapping is disallowed on the domain which serves as the
+ * parent in a nested configuration, due to HW errata
+ * (ERRATA_772415_SPR17)
+ */
+ if (flags & IOMMU_HWPT_ALLOC_NEST_PARENT)
+ cfg.common.features |= BIT(PT_FEAT_VTDSS_FORCE_WRITEABLE);
+
+ if (!iommu_paging_structure_coherency(iommu))
+ cfg.common.features |= BIT(PT_FEAT_DMA_INCOHERENT);
+ dmar_domain->iommu.iommu_device = dev;
+ dmar_domain->iommu.nid = dev_to_node(dev);
dmar_domain->domain.ops = &intel_ss_paging_domain_ops;
dmar_domain->nested_parent = flags & IOMMU_HWPT_ALLOC_NEST_PARENT;
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
- dmar_domain->domain.dirty_ops = &intel_dirty_ops;
+ dmar_domain->domain.dirty_ops = &intel_second_stage_dirty_ops;
+
+ ret = pt_iommu_vtdss_init(&dmar_domain->sspt, &cfg, GFP_KERNEL);
+ if (ret) {
+ kfree(dmar_domain);
+ return ERR_PTR(ret);
+ }
+
+ /* Adjust the supported page sizes to HW capability */
+ sslps = cap_super_page_val(iommu->cap);
+ if (!(sslps & BIT(0)))
+ dmar_domain->domain.pgsize_bitmap &= ~(u64)SZ_2M;
+ if (!(sslps & BIT(1)))
+ dmar_domain->domain.pgsize_bitmap &= ~(u64)SZ_1G;
+ if (!intel_iommu_superpage)
+ dmar_domain->domain.pgsize_bitmap = SZ_4K;
/*
* Besides the internal write buffer flush, the caching mode used for
@@ -3422,14 +3007,7 @@ static void intel_iommu_domain_free(struct iommu_domain *domain)
if (WARN_ON(!list_empty(&dmar_domain->devices)))
return;
- if (dmar_domain->pgd) {
- struct iommu_pages_list freelist =
- IOMMU_PAGES_LIST_INIT(freelist);
-
- domain_unmap(dmar_domain, 0, DOMAIN_MAX_PFN(dmar_domain->gaw),
- &freelist);
- iommu_put_pages_list(&freelist);
- }
+ pt_iommu_deinit(&dmar_domain->iommu);
kfree(dmar_domain->qi_batch);
kfree(dmar_domain);
@@ -3446,6 +3024,16 @@ static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain,
if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
return -EINVAL;
+ if (!ecap_smpwc(iommu->ecap) &&
+ !(dmar_domain->fspt.x86_64_pt.common.features &
+ BIT(PT_FEAT_DMA_INCOHERENT)))
+ return -EINVAL;
+
+ /* Supports the number of table levels */
+ if (!cap_fl5lp_support(iommu->cap) &&
+ dmar_domain->fspt.x86_64_pt.common.max_vasz_lg2 > 48)
+ return -EINVAL;
+
/* Same page size support */
if (!cap_fl1gp_support(iommu->cap) &&
(dmar_domain->domain.pgsize_bitmap & SZ_1G))
@@ -3462,7 +3050,11 @@ static int
paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
struct intel_iommu *iommu)
{
+ unsigned int vasz_lg2 = dmar_domain->sspt.vtdss_pt.common.max_vasz_lg2;
unsigned int sslps = cap_super_page_val(iommu->cap);
+ struct pt_iommu_vtdss_hw_info pt_info;
+
+ pt_iommu_vtdss_hw_info(&dmar_domain->sspt, &pt_info);
if (dmar_domain->domain.dirty_ops && !ssads_supported(iommu))
return -EINVAL;
@@ -3473,6 +3065,19 @@ paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
if (sm_supported(iommu) && !ecap_slts(iommu->ecap))
return -EINVAL;
+ if (!iommu_paging_structure_coherency(iommu) &&
+ !(dmar_domain->sspt.vtdss_pt.common.features &
+ BIT(PT_FEAT_DMA_INCOHERENT)))
+ return -EINVAL;
+
+ /* Address width falls within the capability */
+ if (cap_mgaw(iommu->cap) < vasz_lg2)
+ return -EINVAL;
+
+ /* Page table level is supported. */
+ if (!(cap_sagaw(iommu->cap) & BIT(pt_info.aw)))
+ return -EINVAL;
+
/* Same page size support */
if (!(sslps & BIT(0)) && (dmar_domain->domain.pgsize_bitmap & SZ_2M))
return -EINVAL;
@@ -3484,6 +3089,14 @@ paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
!dmar_domain->iotlb_sync_map)
return -EINVAL;
+ /*
+ * FIXME this is locked wrong, it needs to be under the
+ * dmar_domain->lock
+ */
+ if ((dmar_domain->sspt.vtdss_pt.common.features &
+ BIT(PT_FEAT_VTDSS_FORCE_COHERENCE)) &&
+ !ecap_sc_support(iommu->ecap))
+ return -EINVAL;
return 0;
}
@@ -3493,7 +3106,6 @@ int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
struct intel_iommu *iommu = info->iommu;
int ret = -EINVAL;
- int addr_width;
if (intel_domain_is_fs_paging(dmar_domain))
ret = paging_domain_compatible_first_stage(dmar_domain, iommu);
@@ -3504,26 +3116,6 @@ int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
if (ret)
return ret;
- /*
- * FIXME this is locked wrong, it needs to be under the
- * dmar_domain->lock
- */
- if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
- return -EINVAL;
-
- if (dmar_domain->iommu_coherency !=
- iommu_paging_structure_coherency(iommu))
- return -EINVAL;
-
-
- /* check if this iommu agaw is sufficient for max mapped address */
- addr_width = agaw_to_width(iommu->agaw);
- if (addr_width > cap_mgaw(iommu->cap))
- addr_width = cap_mgaw(iommu->cap);
-
- if (dmar_domain->gaw > addr_width || dmar_domain->agaw > iommu->agaw)
- return -EINVAL;
-
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
context_copied(iommu, info->bus, info->devfn))
return intel_pasid_setup_sm_context(dev);
@@ -3532,7 +3124,8 @@ int paging_domain_compatible(struct iommu_domain *domain, struct device *dev)
}
static int intel_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
int ret;
@@ -3553,110 +3146,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return ret;
}
-static int intel_iommu_map(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t hpa,
- size_t size, int iommu_prot, gfp_t gfp)
-{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- u64 max_addr;
- int prot = 0;
-
- if (iommu_prot & IOMMU_READ)
- prot |= DMA_PTE_READ;
- if (iommu_prot & IOMMU_WRITE)
- prot |= DMA_PTE_WRITE;
- if (dmar_domain->set_pte_snp)
- prot |= DMA_PTE_SNP;
-
- max_addr = iova + size;
- if (dmar_domain->max_addr < max_addr) {
- u64 end;
-
- /* check if minimum agaw is sufficient for mapped address */
- end = __DOMAIN_MAX_ADDR(dmar_domain->gaw) + 1;
- if (end < max_addr) {
- pr_err("%s: iommu width (%d) is not "
- "sufficient for the mapped address (%llx)\n",
- __func__, dmar_domain->gaw, max_addr);
- return -EFAULT;
- }
- dmar_domain->max_addr = max_addr;
- }
- /* Round up size to next multiple of PAGE_SIZE, if it and
- the low bits of hpa would take us onto the next page */
- size = aligned_nrpages(hpa, size);
- return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot, gfp);
-}
-
-static int intel_iommu_map_pages(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t pgsize, size_t pgcount,
- int prot, gfp_t gfp, size_t *mapped)
-{
- unsigned long pgshift = __ffs(pgsize);
- size_t size = pgcount << pgshift;
- int ret;
-
- if (pgsize != SZ_4K && pgsize != SZ_2M && pgsize != SZ_1G)
- return -EINVAL;
-
- if (!IS_ALIGNED(iova | paddr, pgsize))
- return -EINVAL;
-
- ret = intel_iommu_map(domain, iova, paddr, size, prot, gfp);
- if (!ret && mapped)
- *mapped = size;
-
- return ret;
-}
-
-static size_t intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- struct iommu_iotlb_gather *gather)
-{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long start_pfn, last_pfn;
- int level = 0;
-
- /* Cope with horrid API which requires us to unmap more than the
- size argument if it happens to be a large-page mapping. */
- if (unlikely(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT,
- &level, GFP_ATOMIC)))
- return 0;
-
- if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
- size = VTD_PAGE_SIZE << level_to_offset_bits(level);
-
- start_pfn = iova >> VTD_PAGE_SHIFT;
- last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
-
- domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist);
-
- if (dmar_domain->max_addr == iova + size)
- dmar_domain->max_addr = iova;
-
- /*
- * We do not use page-selective IOTLB invalidation in flush queue,
- * so there is no need to track page and sync iotlb.
- */
- if (!iommu_iotlb_gather_queued(gather))
- iommu_iotlb_gather_add_page(domain, gather, iova, size);
-
- return size;
-}
-
-static size_t intel_iommu_unmap_pages(struct iommu_domain *domain,
- unsigned long iova,
- size_t pgsize, size_t pgcount,
- struct iommu_iotlb_gather *gather)
-{
- unsigned long pgshift = __ffs(pgsize);
- size_t size = pgcount << pgshift;
-
- return intel_iommu_unmap(domain, iova, size, gather);
-}
-
static void intel_iommu_tlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
@@ -3666,24 +3155,6 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
iommu_put_pages_list(&gather->freelist);
}
-static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
-{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- struct dma_pte *pte;
- int level = 0;
- u64 phys = 0;
-
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
- GFP_ATOMIC);
- if (pte && dma_pte_present(pte))
- phys = dma_pte_addr(pte) +
- (iova & (BIT_MASK(level_to_offset_bits(level) +
- VTD_PAGE_SHIFT) - 1));
-
- return phys;
-}
-
static bool domain_support_force_snooping(struct dmar_domain *domain)
{
struct device_domain_info *info;
@@ -3725,15 +3196,15 @@ static bool intel_iommu_enforce_cache_coherency_ss(struct iommu_domain *domain)
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
guard(spinlock_irqsave)(&dmar_domain->lock);
- if (!domain_support_force_snooping(dmar_domain) ||
- dmar_domain->has_mappings)
+ if (!domain_support_force_snooping(dmar_domain))
return false;
/*
* Second level page table supports per-PTE snoop control. The
* iommu_map() interface will handle this by setting SNP bit.
*/
- dmar_domain->set_pte_snp = true;
+ dmar_domain->sspt.vtdss_pt.common.features |=
+ BIT(PT_FEAT_VTDSS_FORCE_COHERENCE);
dmar_domain->force_snooping = true;
return true;
}
@@ -3812,7 +3283,7 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
}
if (info->ats_supported && ecap_prs(iommu->ecap) &&
- pci_pri_supported(pdev))
+ ecap_pds(iommu->ecap) && pci_pri_supported(pdev))
info->pri_supported = 1;
}
}
@@ -4297,49 +3768,6 @@ err_unwind:
return ret;
}
-static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- unsigned long flags,
- struct iommu_dirty_bitmap *dirty)
-{
- struct dmar_domain *dmar_domain = to_dmar_domain(domain);
- unsigned long end = iova + size - 1;
- unsigned long pgsize;
-
- /*
- * IOMMUFD core calls into a dirty tracking disabled domain without an
- * IOVA bitmap set in order to clean dirty bits in all PTEs that might
- * have occurred when we stopped dirty tracking. This ensures that we
- * never inherit dirtied bits from a previous cycle.
- */
- if (!dmar_domain->dirty_tracking && dirty->bitmap)
- return -EINVAL;
-
- do {
- struct dma_pte *pte;
- int lvl = 0;
-
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &lvl,
- GFP_ATOMIC);
- pgsize = level_size(lvl) << VTD_PAGE_SHIFT;
- if (!pte || !dma_pte_present(pte)) {
- iova += pgsize;
- continue;
- }
-
- if (dma_sl_pte_test_and_clear_dirty(pte, flags))
- iommu_dirty_bitmap_record(dirty, iova, pgsize);
- iova += pgsize;
- } while (iova < end);
-
- return 0;
-}
-
-static const struct iommu_dirty_ops intel_dirty_ops = {
- .set_dirty_tracking = intel_iommu_set_dirty_tracking,
- .read_and_clear_dirty = intel_iommu_read_and_clear_dirty,
-};
-
static int context_setup_pass_through(struct device *dev, u8 bus, u8 devfn)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
@@ -4396,7 +3824,9 @@ static int device_setup_pass_through(struct device *dev)
context_setup_pass_through_cb, dev);
}
-static int identity_domain_attach_dev(struct iommu_domain *domain, struct device *dev)
+static int identity_domain_attach_dev(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
@@ -4457,27 +3887,23 @@ static struct iommu_domain identity_domain = {
};
const struct iommu_domain_ops intel_fs_paging_domain_ops = {
+ IOMMU_PT_DOMAIN_OPS(x86_64),
.attach_dev = intel_iommu_attach_device,
.set_dev_pasid = intel_iommu_set_dev_pasid,
- .map_pages = intel_iommu_map_pages,
- .unmap_pages = intel_iommu_unmap_pages,
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,
- .iova_to_phys = intel_iommu_iova_to_phys,
.free = intel_iommu_domain_free,
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency_fs,
};
const struct iommu_domain_ops intel_ss_paging_domain_ops = {
+ IOMMU_PT_DOMAIN_OPS(vtdss),
.attach_dev = intel_iommu_attach_device,
.set_dev_pasid = intel_iommu_set_dev_pasid,
- .map_pages = intel_iommu_map_pages,
- .unmap_pages = intel_iommu_unmap_pages,
.iotlb_sync_map = intel_iommu_iotlb_sync_map,
.flush_iotlb_all = intel_flush_iotlb_all,
.iotlb_sync = intel_iommu_tlb_sync,
- .iova_to_phys = intel_iommu_iova_to_phys,
.free = intel_iommu_domain_free,
.enforce_cache_coherency = intel_iommu_enforce_cache_coherency_ss,
};
@@ -4792,3 +4218,5 @@ err:
return ret;
}
+
+MODULE_IMPORT_NS("GENERIC_PT_IOMMU");
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index d09b92871659..25c5e22096d4 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -23,8 +23,8 @@
#include <linux/xarray.h>
#include <linux/perf_event.h>
#include <linux/pci.h>
+#include <linux/generic_pt/iommu.h>
-#include <asm/cacheflush.h>
#include <asm/iommu.h>
#include <uapi/linux/iommufd.h>
@@ -77,7 +77,6 @@
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
-#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
@@ -173,8 +172,6 @@
#define cap_pgsel_inv(c) (((c) >> 39) & 1)
#define cap_super_page_val(c) (((c) >> 34) & 0xf)
-#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
- * OFFSET_STRIDE) + 21)
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
#define cap_max_fault_reg_offset(c) \
@@ -462,7 +459,6 @@ enum {
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
/* Page group response descriptor QW1 */
-#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
@@ -541,7 +537,8 @@ enum {
#define pasid_supported(iommu) (sm_supported(iommu) && \
ecap_pasid((iommu)->ecap))
#define ssads_supported(iommu) (sm_supported(iommu) && \
- ecap_slads((iommu)->ecap))
+ ecap_slads((iommu)->ecap) && \
+ ecap_smpwc(iommu->ecap))
#define nested_supported(iommu) (sm_supported(iommu) && \
ecap_nest((iommu)->ecap))
@@ -598,22 +595,20 @@ struct qi_batch {
};
struct dmar_domain {
- int nid; /* node id */
+ union {
+ struct iommu_domain domain;
+ struct pt_iommu iommu;
+ /* First stage page table */
+ struct pt_iommu_x86_64 fspt;
+ /* Second stage page table */
+ struct pt_iommu_vtdss sspt;
+ };
+
struct xarray iommu_array; /* Attached IOMMU array */
- u8 iommu_coherency: 1; /* indicate coherency of iommu access */
- u8 force_snooping : 1; /* Create IOPTEs with snoop control */
- u8 set_pte_snp:1;
- u8 use_first_level:1; /* DMA translation for the domain goes
- * through the first level page table,
- * otherwise, goes through the second
- * level.
- */
+ u8 force_snooping:1; /* Create PASID entry with snoop control */
u8 dirty_tracking:1; /* Dirty tracking is enabled */
u8 nested_parent:1; /* Has other domains nested on it */
- u8 has_mappings:1; /* Has mappings configured through
- * iommu_map() interface.
- */
u8 iotlb_sync_map:1; /* Need to flush IOTLB cache or write
* buffer when creating mappings.
*/
@@ -626,26 +621,9 @@ struct dmar_domain {
struct list_head cache_tags; /* Cache tag list */
struct qi_batch *qi_batch; /* Batched QI descriptors */
- int iommu_superpage;/* Level of superpages supported:
- 0 == 4KiB (no superpages), 1 == 2MiB,
- 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
union {
/* DMA remapping domain */
struct {
- /* virtual address */
- struct dma_pte *pgd;
- /* max guest address width */
- int gaw;
- /*
- * adjusted guest address width:
- * 0: level 2 30-bit
- * 1: level 3 39-bit
- * 2: level 4 48-bit
- * 3: level 5 57-bit
- */
- int agaw;
- /* maximum mapped address */
- u64 max_addr;
/* Protect the s1_domains list */
spinlock_t s1_lock;
/* Track s1_domains nested on this domain */
@@ -667,10 +645,10 @@ struct dmar_domain {
struct mmu_notifier notifier;
};
};
-
- struct iommu_domain domain; /* generic domain data structure for
- iommu core */
};
+PT_IOMMU_CHECK_DOMAIN(struct dmar_domain, iommu, domain);
+PT_IOMMU_CHECK_DOMAIN(struct dmar_domain, sspt.iommu, domain);
+PT_IOMMU_CHECK_DOMAIN(struct dmar_domain, fspt.iommu, domain);
/*
* In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters.
@@ -869,11 +847,6 @@ struct dma_pte {
u64 val;
};
-static inline void dma_clear_pte(struct dma_pte *pte)
-{
- pte->val = 0;
-}
-
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
@@ -889,32 +862,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0;
}
-static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte,
- unsigned long flags)
-{
- if (flags & IOMMU_DIRTY_NO_CLEAR)
- return (pte->val & DMA_SL_PTE_DIRTY) != 0;
-
- return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT,
- (unsigned long *)&pte->val);
-}
-
static inline bool dma_pte_superpage(struct dma_pte *pte)
{
return (pte->val & DMA_PTE_LARGE_PAGE);
}
-static inline bool first_pte_in_page(struct dma_pte *pte)
-{
- return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
-}
-
-static inline int nr_pte_to_next_page(struct dma_pte *pte)
-{
- return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
- (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
-}
-
static inline bool context_present(struct context_entry *context)
{
return (context->lo & 1);
@@ -930,11 +882,6 @@ static inline int agaw_to_level(int agaw)
return agaw + 2;
}
-static inline int agaw_to_width(int agaw)
-{
- return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
-}
-
static inline int width_to_agaw(int width)
{
return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
@@ -950,25 +897,6 @@ static inline int pfn_level_offset(u64 pfn, int level)
return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
}
-static inline u64 level_mask(int level)
-{
- return -1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 level_size(int level)
-{
- return 1ULL << level_to_offset_bits(level);
-}
-
-static inline u64 align_to_level(u64 pfn, int level)
-{
- return (pfn + level_size(level) - 1) & level_mask(level);
-}
-
-static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
-{
- return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
-}
static inline void context_set_present(struct context_entry *context)
{
@@ -1100,7 +1028,7 @@ static inline void qi_desc_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
struct qi_desc *desc)
{
u8 dw = 0, dr = 0;
- int ih = 0;
+ int ih = addr & 1;
if (cap_write_drain(iommu->cap))
dw = 1;
diff --git a/drivers/iommu/intel/nested.c b/drivers/iommu/intel/nested.c
index 1b6ad9c900a5..a3fb8c193ca6 100644
--- a/drivers/iommu/intel/nested.c
+++ b/drivers/iommu/intel/nested.c
@@ -19,7 +19,7 @@
#include "pasid.h"
static int intel_nested_attach_dev(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
@@ -29,11 +29,6 @@ static int intel_nested_attach_dev(struct iommu_domain *domain,
device_block_translation(dev);
- if (iommu->agaw < dmar_domain->s2_domain->agaw) {
- dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
- return -ENODEV;
- }
-
/*
* Stage-1 domain cannot work alone, it is nested on a s2_domain.
* The s2_domain will be used in nested translation, hence needs
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 52f678975da7..3e2255057079 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -366,7 +366,7 @@ static void pasid_pte_config_first_level(struct intel_iommu *iommu,
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
- pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ pasid_set_page_snoop(pte, flags & PASID_FLAG_PWSNP);
/* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
@@ -461,19 +461,22 @@ int intel_pasid_replace_first_level(struct intel_iommu *iommu,
*/
static void pasid_pte_config_second_level(struct intel_iommu *iommu,
struct pasid_entry *pte,
- u64 pgd_val, int agaw, u16 did,
- bool dirty_tracking)
+ struct dmar_domain *domain, u16 did)
{
+ struct pt_iommu_vtdss_hw_info pt_info;
+
lockdep_assert_held(&iommu->lock);
+ pt_iommu_vtdss_hw_info(&domain->sspt, &pt_info);
pasid_clear_entry(pte);
pasid_set_domain_id(pte, did);
- pasid_set_slptr(pte, pgd_val);
- pasid_set_address_width(pte, agaw);
+ pasid_set_slptr(pte, pt_info.ssptptr);
+ pasid_set_address_width(pte, pt_info.aw);
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY);
pasid_set_fault_enable(pte);
- pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
- if (dirty_tracking)
+ pasid_set_page_snoop(pte, !(domain->sspt.vtdss_pt.common.features &
+ BIT(PT_FEAT_DMA_INCOHERENT)));
+ if (domain->dirty_tracking)
pasid_set_ssade(pte);
pasid_set_present(pte);
@@ -484,10 +487,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
struct device *dev, u32 pasid)
{
struct pasid_entry *pte;
- struct dma_pte *pgd;
- u64 pgd_val;
u16 did;
+
/*
* If hardware advertises no support for second level
* translation, return directly.
@@ -498,8 +500,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
return -EINVAL;
}
- pgd = domain->pgd;
- pgd_val = virt_to_phys(pgd);
did = domain_id_iommu(domain, iommu);
spin_lock(&iommu->lock);
@@ -514,8 +514,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
return -EBUSY;
}
- pasid_pte_config_second_level(iommu, pte, pgd_val, domain->agaw,
- did, domain->dirty_tracking);
+ pasid_pte_config_second_level(iommu, pte, domain, did);
spin_unlock(&iommu->lock);
pasid_flush_caches(iommu, pte, pasid, did);
@@ -529,8 +528,6 @@ int intel_pasid_replace_second_level(struct intel_iommu *iommu,
u32 pasid)
{
struct pasid_entry *pte, new_pte;
- struct dma_pte *pgd;
- u64 pgd_val;
u16 did;
/*
@@ -543,13 +540,9 @@ int intel_pasid_replace_second_level(struct intel_iommu *iommu,
return -EINVAL;
}
- pgd = domain->pgd;
- pgd_val = virt_to_phys(pgd);
did = domain_id_iommu(domain, iommu);
- pasid_pte_config_second_level(iommu, &new_pte, pgd_val,
- domain->agaw, did,
- domain->dirty_tracking);
+ pasid_pte_config_second_level(iommu, &new_pte, domain, did);
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid);
@@ -747,10 +740,12 @@ static void pasid_pte_config_nestd(struct intel_iommu *iommu,
struct dmar_domain *s2_domain,
u16 did)
{
- struct dma_pte *pgd = s2_domain->pgd;
+ struct pt_iommu_vtdss_hw_info pt_info;
lockdep_assert_held(&iommu->lock);
+ pt_iommu_vtdss_hw_info(&s2_domain->sspt, &pt_info);
+
pasid_clear_entry(pte);
if (s1_cfg->addr_width == ADDR_WIDTH_5LEVEL)
@@ -770,11 +765,12 @@ static void pasid_pte_config_nestd(struct intel_iommu *iommu,
if (s2_domain->force_snooping)
pasid_set_pgsnp(pte);
- pasid_set_slptr(pte, virt_to_phys(pgd));
+ pasid_set_slptr(pte, pt_info.ssptptr);
pasid_set_fault_enable(pte);
pasid_set_domain_id(pte, did);
- pasid_set_address_width(pte, s2_domain->agaw);
- pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ pasid_set_address_width(pte, pt_info.aw);
+ pasid_set_page_snoop(pte, !(s2_domain->sspt.vtdss_pt.common.features &
+ BIT(PT_FEAT_DMA_INCOHERENT)));
if (s2_domain->dirty_tracking)
pasid_set_ssade(pte);
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
index a771a77d4239..b4c85242dc79 100644
--- a/drivers/iommu/intel/pasid.h
+++ b/drivers/iommu/intel/pasid.h
@@ -24,6 +24,7 @@
#define PASID_FLAG_NESTED BIT(1)
#define PASID_FLAG_PAGE_SNOOP BIT(2)
+#define PASID_FLAG_PWSNP BIT(2)
/*
* The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c
index adc4de6bbd88..dceeadc3ee7c 100644
--- a/drivers/iommu/intel/perf.c
+++ b/drivers/iommu/intel/perf.c
@@ -113,7 +113,7 @@ static char *latency_type_names[] = {
" svm_prq"
};
-int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
+void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
{
struct latency_statistic *lstat = iommu->perf_statistic;
unsigned long flags;
@@ -122,7 +122,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
memset(str, 0, size);
for (i = 0; i < COUNTS_NUM; i++)
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"%s", latency_counter_names[i]);
spin_lock_irqsave(&latency_lock, flags);
@@ -130,7 +130,7 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
if (!dmar_latency_enabled(iommu, i))
continue;
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"\n%s", latency_type_names[i]);
for (j = 0; j < COUNTS_NUM; j++) {
@@ -156,11 +156,9 @@ int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
break;
}
- bytes += snprintf(str + bytes, size - bytes,
+ bytes += scnprintf(str + bytes, size - bytes,
"%12lld", val);
}
}
spin_unlock_irqrestore(&latency_lock, flags);
-
- return bytes;
}
diff --git a/drivers/iommu/intel/perf.h b/drivers/iommu/intel/perf.h
index df9a36942d64..1d4baad7e852 100644
--- a/drivers/iommu/intel/perf.h
+++ b/drivers/iommu/intel/perf.h
@@ -40,7 +40,7 @@ void dmar_latency_disable(struct intel_iommu *iommu, enum latency_type type);
bool dmar_latency_enabled(struct intel_iommu *iommu, enum latency_type type);
void dmar_latency_update(struct intel_iommu *iommu, enum latency_type type,
u64 latency);
-int dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
+void dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size);
#else
static inline int
dmar_latency_enable(struct intel_iommu *iommu, enum latency_type type)
@@ -64,9 +64,8 @@ dmar_latency_update(struct intel_iommu *iommu, enum latency_type type, u64 laten
{
}
-static inline int
+static inline void
dmar_latency_snapshot(struct intel_iommu *iommu, char *str, size_t size)
{
- return 0;
}
#endif /* CONFIG_DMAR_PERF */
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 52570e42a14c..ff63c228e6e1 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -151,8 +151,7 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
QI_PGRP_PASID_P(req->pasid_present) |
QI_PGRP_RESP_CODE(result) |
QI_PGRP_RESP_TYPE;
- desc.qw1 = QI_PGRP_IDX(req->prg_index) |
- QI_PGRP_LPIG(req->lpig);
+ desc.qw1 = QI_PGRP_IDX(req->prg_index);
qi_submit_sync(iommu, &desc, 1, 0);
}
@@ -379,19 +378,17 @@ void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_fault_page_request *prm;
struct qi_desc desc;
bool pasid_present;
- bool last_page;
u16 sid;
prm = &evt->fault.prm;
sid = PCI_DEVID(bus, devfn);
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
- last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
QI_PGRP_PASID_P(pasid_present) |
QI_PGRP_RESP_CODE(msg->code) |
QI_PGRP_RESP_TYPE;
- desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
+ desc.qw1 = QI_PGRP_IDX(prm->grpid);
desc.qw2 = 0;
desc.qw3 = 0;
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index e147f71f91b7..71de7947971f 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -170,6 +170,7 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
/* Setup the pasid table: */
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
+ sflags |= PASID_FLAG_PWSNP;
ret = __domain_setup_first_level(iommu, dev, pasid,
FLPT_DEFAULT_DID, __pa(mm->pgd),
sflags, old);
diff --git a/drivers/iommu/io-pgtable-arm-selftests.c b/drivers/iommu/io-pgtable-arm-selftests.c
new file mode 100644
index 000000000000..334e70350924
--- /dev/null
+++ b/drivers/iommu/io-pgtable-arm-selftests.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CPU-agnostic ARM page table allocator.
+ *
+ * Copyright (C) 2014 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/io-pgtable.h>
+#include <linux/kernel.h>
+
+#include "io-pgtable-arm.h"
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_flush(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+{
+ WARN_ON(cookie != cfg_cookie);
+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+{
+ dummy_tlb_flush(iova, granule, granule, cookie);
+}
+
+static const struct iommu_flush_ops dummy_tlb_ops = {
+ .tlb_flush_all = dummy_tlb_flush_all,
+ .tlb_flush_walk = dummy_tlb_flush,
+ .tlb_add_page = dummy_tlb_add_page,
+};
+
+#define __FAIL(test, i) ({ \
+ KUNIT_FAIL(test, "test failed for fmt idx %d\n", (i)); \
+ -EFAULT; \
+})
+
+static int arm_lpae_run_tests(struct kunit *test, struct io_pgtable_cfg *cfg)
+{
+ static const enum io_pgtable_fmt fmts[] = {
+ ARM_64_LPAE_S1,
+ ARM_64_LPAE_S2,
+ };
+
+ int i, j;
+ unsigned long iova;
+ size_t size, mapped;
+ struct io_pgtable_ops *ops;
+
+ for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+ cfg_cookie = cfg;
+ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
+ if (!ops) {
+ kunit_err(test, "failed to allocate io pgtable ops\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Initial sanity checks.
+ * Empty page tables shouldn't provide any translations.
+ */
+ if (ops->iova_to_phys(ops, 42))
+ return __FAIL(test, i);
+
+ if (ops->iova_to_phys(ops, SZ_1G + 42))
+ return __FAIL(test, i);
+
+ if (ops->iova_to_phys(ops, SZ_2G + 42))
+ return __FAIL(test, i);
+
+ /*
+ * Distinct mappings of different granule sizes.
+ */
+ iova = 0;
+ for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->map_pages(ops, iova, iova, size, 1,
+ IOMMU_READ | IOMMU_WRITE |
+ IOMMU_NOEXEC | IOMMU_CACHE,
+ GFP_KERNEL, &mapped))
+ return __FAIL(test, i);
+
+ /* Overlapping mappings */
+ if (!ops->map_pages(ops, iova, iova + size, size, 1,
+ IOMMU_READ | IOMMU_NOEXEC,
+ GFP_KERNEL, &mapped))
+ return __FAIL(test, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(test, i);
+
+ iova += SZ_1G;
+ }
+
+ /* Full unmap */
+ iova = 0;
+ for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
+ size = 1UL << j;
+
+ if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
+ return __FAIL(test, i);
+
+ if (ops->iova_to_phys(ops, iova + 42))
+ return __FAIL(test, i);
+
+ /* Remap full block */
+ if (ops->map_pages(ops, iova, iova, size, 1,
+ IOMMU_WRITE, GFP_KERNEL, &mapped))
+ return __FAIL(test, i);
+
+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+ return __FAIL(test, i);
+
+ iova += SZ_1G;
+ }
+
+ /*
+ * Map/unmap the last largest supported page of the IAS, this can
+ * trigger corner cases in the concatednated page tables.
+ */
+ mapped = 0;
+ size = 1UL << __fls(cfg->pgsize_bitmap);
+ iova = (1UL << cfg->ias) - size;
+ if (ops->map_pages(ops, iova, iova, size, 1,
+ IOMMU_READ | IOMMU_WRITE |
+ IOMMU_NOEXEC | IOMMU_CACHE,
+ GFP_KERNEL, &mapped))
+ return __FAIL(test, i);
+ if (mapped != size)
+ return __FAIL(test, i);
+ if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
+ return __FAIL(test, i);
+
+ free_io_pgtable_ops(ops);
+ }
+
+ return 0;
+}
+
+static void arm_lpae_do_selftests(struct kunit *test)
+{
+ static const unsigned long pgsize[] = {
+ SZ_4K | SZ_2M | SZ_1G,
+ SZ_16K | SZ_32M,
+ SZ_64K | SZ_512M,
+ };
+
+ static const unsigned int address_size[] = {
+ 32, 36, 40, 42, 44, 48,
+ };
+
+ int i, j, k, pass = 0, fail = 0;
+ struct device *dev;
+ struct io_pgtable_cfg cfg = {
+ .tlb = &dummy_tlb_ops,
+ .coherent_walk = true,
+ .quirks = IO_PGTABLE_QUIRK_NO_WARN,
+ };
+
+ dev = kunit_device_register(test, "io-pgtable-test");
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, dev);
+ if (IS_ERR_OR_NULL(dev))
+ return;
+
+ cfg.iommu_dev = dev;
+
+ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
+ for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
+ /* Don't use ias > oas as it is not valid for stage-2. */
+ for (k = 0; k <= j; ++k) {
+ cfg.pgsize_bitmap = pgsize[i];
+ cfg.ias = address_size[k];
+ cfg.oas = address_size[j];
+ kunit_info(test, "pgsize_bitmap 0x%08lx, IAS %u OAS %u\n",
+ pgsize[i], cfg.ias, cfg.oas);
+ if (arm_lpae_run_tests(test, &cfg))
+ fail++;
+ else
+ pass++;
+ }
+ }
+ }
+
+ kunit_info(test, "completed with %d PASS %d FAIL\n", pass, fail);
+}
+
+static struct kunit_case io_pgtable_arm_test_cases[] = {
+ KUNIT_CASE(arm_lpae_do_selftests),
+ {},
+};
+
+static struct kunit_suite io_pgtable_arm_test = {
+ .name = "io-pgtable-arm-test",
+ .test_cases = io_pgtable_arm_test_cases,
+};
+
+kunit_test_suite(io_pgtable_arm_test);
+
+MODULE_DESCRIPTION("io-pgtable-arm library kunit tests");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 7e8e2216c294..e6626004b323 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -12,8 +12,6 @@
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/io-pgtable.h>
-#include <linux/kernel.h>
-#include <linux/device/faux.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -1267,204 +1265,3 @@ struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
.alloc = arm_mali_lpae_alloc_pgtable,
.free = arm_lpae_free_pgtable,
};
-
-#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
-
-static struct io_pgtable_cfg *cfg_cookie __initdata;
-
-static void __init dummy_tlb_flush_all(void *cookie)
-{
- WARN_ON(cookie != cfg_cookie);
-}
-
-static void __init dummy_tlb_flush(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- WARN_ON(cookie != cfg_cookie);
- WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
-}
-
-static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
- unsigned long iova, size_t granule,
- void *cookie)
-{
- dummy_tlb_flush(iova, granule, granule, cookie);
-}
-
-static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
- .tlb_flush_all = dummy_tlb_flush_all,
- .tlb_flush_walk = dummy_tlb_flush,
- .tlb_add_page = dummy_tlb_add_page,
-};
-
-static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
-{
- struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- struct io_pgtable_cfg *cfg = &data->iop.cfg;
-
- pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
- cfg->pgsize_bitmap, cfg->ias);
- pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
- ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
- ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
-}
-
-#define __FAIL(ops, i) ({ \
- WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
- arm_lpae_dump_ops(ops); \
- -EFAULT; \
-})
-
-static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
-{
- static const enum io_pgtable_fmt fmts[] __initconst = {
- ARM_64_LPAE_S1,
- ARM_64_LPAE_S2,
- };
-
- int i, j;
- unsigned long iova;
- size_t size, mapped;
- struct io_pgtable_ops *ops;
-
- for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
- cfg_cookie = cfg;
- ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
- if (!ops) {
- pr_err("selftest: failed to allocate io pgtable ops\n");
- return -ENOMEM;
- }
-
- /*
- * Initial sanity checks.
- * Empty page tables shouldn't provide any translations.
- */
- if (ops->iova_to_phys(ops, 42))
- return __FAIL(ops, i);
-
- if (ops->iova_to_phys(ops, SZ_1G + 42))
- return __FAIL(ops, i);
-
- if (ops->iova_to_phys(ops, SZ_2G + 42))
- return __FAIL(ops, i);
-
- /*
- * Distinct mappings of different granule sizes.
- */
- iova = 0;
- for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
- size = 1UL << j;
-
- if (ops->map_pages(ops, iova, iova, size, 1,
- IOMMU_READ | IOMMU_WRITE |
- IOMMU_NOEXEC | IOMMU_CACHE,
- GFP_KERNEL, &mapped))
- return __FAIL(ops, i);
-
- /* Overlapping mappings */
- if (!ops->map_pages(ops, iova, iova + size, size, 1,
- IOMMU_READ | IOMMU_NOEXEC,
- GFP_KERNEL, &mapped))
- return __FAIL(ops, i);
-
- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
- return __FAIL(ops, i);
-
- iova += SZ_1G;
- }
-
- /* Full unmap */
- iova = 0;
- for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
- size = 1UL << j;
-
- if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
- return __FAIL(ops, i);
-
- if (ops->iova_to_phys(ops, iova + 42))
- return __FAIL(ops, i);
-
- /* Remap full block */
- if (ops->map_pages(ops, iova, iova, size, 1,
- IOMMU_WRITE, GFP_KERNEL, &mapped))
- return __FAIL(ops, i);
-
- if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
- return __FAIL(ops, i);
-
- iova += SZ_1G;
- }
-
- /*
- * Map/unmap the last largest supported page of the IAS, this can
- * trigger corner cases in the concatednated page tables.
- */
- mapped = 0;
- size = 1UL << __fls(cfg->pgsize_bitmap);
- iova = (1UL << cfg->ias) - size;
- if (ops->map_pages(ops, iova, iova, size, 1,
- IOMMU_READ | IOMMU_WRITE |
- IOMMU_NOEXEC | IOMMU_CACHE,
- GFP_KERNEL, &mapped))
- return __FAIL(ops, i);
- if (mapped != size)
- return __FAIL(ops, i);
- if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
- return __FAIL(ops, i);
-
- free_io_pgtable_ops(ops);
- }
-
- return 0;
-}
-
-static int __init arm_lpae_do_selftests(void)
-{
- static const unsigned long pgsize[] __initconst = {
- SZ_4K | SZ_2M | SZ_1G,
- SZ_16K | SZ_32M,
- SZ_64K | SZ_512M,
- };
-
- static const unsigned int address_size[] __initconst = {
- 32, 36, 40, 42, 44, 48,
- };
-
- int i, j, k, pass = 0, fail = 0;
- struct faux_device *dev;
- struct io_pgtable_cfg cfg = {
- .tlb = &dummy_tlb_ops,
- .coherent_walk = true,
- .quirks = IO_PGTABLE_QUIRK_NO_WARN,
- };
-
- dev = faux_device_create("io-pgtable-test", NULL, 0);
- if (!dev)
- return -ENOMEM;
-
- cfg.iommu_dev = &dev->dev;
-
- for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
- for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
- /* Don't use ias > oas as it is not valid for stage-2. */
- for (k = 0; k <= j; ++k) {
- cfg.pgsize_bitmap = pgsize[i];
- cfg.ias = address_size[k];
- cfg.oas = address_size[j];
- pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u OAS %u\n",
- pgsize[i], cfg.ias, cfg.oas);
- if (arm_lpae_run_tests(&cfg))
- fail++;
- else
- pass++;
- }
- }
- }
-
- pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
- faux_device_destroy(dev);
-
- return fail ? -EFAULT : 0;
-}
-subsys_initcall(arm_lpae_do_selftests);
-#endif
diff --git a/drivers/iommu/io-pgtable-dart.c b/drivers/iommu/io-pgtable-dart.c
index 679bda104797..54d287cc0dd1 100644
--- a/drivers/iommu/io-pgtable-dart.c
+++ b/drivers/iommu/io-pgtable-dart.c
@@ -27,8 +27,9 @@
#define DART1_MAX_ADDR_BITS 36
-#define DART_MAX_TABLES 4
-#define DART_LEVELS 2
+#define DART_MAX_TABLE_BITS 2
+#define DART_MAX_TABLES BIT(DART_MAX_TABLE_BITS)
+#define DART_MAX_LEVELS 4 /* Includes TTBR level */
/* Struct accessors */
#define io_pgtable_to_data(x) \
@@ -68,6 +69,7 @@
struct dart_io_pgtable {
struct io_pgtable iop;
+ int levels;
int tbl_bits;
int bits_per_level;
@@ -156,44 +158,45 @@ static dart_iopte dart_install_table(dart_iopte *table,
return old;
}
-static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova)
+static int dart_get_index(struct dart_io_pgtable *data, unsigned long iova, int level)
{
- return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
- ((1 << data->tbl_bits) - 1);
+ return (iova >> (level * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
+ ((1 << data->bits_per_level) - 1);
}
-static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova)
-{
-
- return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
- ((1 << data->bits_per_level) - 1);
-}
-
-static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova)
+static int dart_get_last_index(struct dart_io_pgtable *data, unsigned long iova)
{
return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
((1 << data->bits_per_level) - 1);
}
-static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova)
+static dart_iopte *dart_get_last(struct dart_io_pgtable *data, unsigned long iova)
{
dart_iopte pte, *ptep;
- int tbl = dart_get_table(data, iova);
+ int level = data->levels;
+ int tbl = dart_get_index(data, iova, level);
+
+ if (tbl >= (1 << data->tbl_bits))
+ return NULL;
ptep = data->pgd[tbl];
if (!ptep)
return NULL;
- ptep += dart_get_l1_index(data, iova);
- pte = READ_ONCE(*ptep);
+ while (--level > 1) {
+ ptep += dart_get_index(data, iova, level);
+ pte = READ_ONCE(*ptep);
- /* Valid entry? */
- if (!pte)
- return NULL;
+ /* Valid entry? */
+ if (!pte)
+ return NULL;
- /* Deref to get level 2 table */
- return iopte_deref(pte, data);
+ /* Deref to get next level table */
+ ptep = iopte_deref(pte, data);
+ }
+
+ return ptep;
}
static dart_iopte dart_prot_to_pte(struct dart_io_pgtable *data,
@@ -230,6 +233,7 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
int ret = 0, tbl, num_entries, max_entries, map_idx_start;
dart_iopte pte, *cptep, *ptep;
dart_iopte prot;
+ int level = data->levels;
if (WARN_ON(pgsize != cfg->pgsize_bitmap))
return -EINVAL;
@@ -240,31 +244,36 @@ static int dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return -EINVAL;
- tbl = dart_get_table(data, iova);
+ tbl = dart_get_index(data, iova, level);
+
+ if (tbl >= (1 << data->tbl_bits))
+ return -ENOMEM;
ptep = data->pgd[tbl];
- ptep += dart_get_l1_index(data, iova);
- pte = READ_ONCE(*ptep);
+ while (--level > 1) {
+ ptep += dart_get_index(data, iova, level);
+ pte = READ_ONCE(*ptep);
- /* no L2 table present */
- if (!pte) {
- cptep = iommu_alloc_pages_sz(gfp, tblsz);
- if (!cptep)
- return -ENOMEM;
+ /* no table present */
+ if (!pte) {
+ cptep = iommu_alloc_pages_sz(gfp, tblsz);
+ if (!cptep)
+ return -ENOMEM;
- pte = dart_install_table(cptep, ptep, 0, data);
- if (pte)
- iommu_free_pages(cptep);
+ pte = dart_install_table(cptep, ptep, 0, data);
+ if (pte)
+ iommu_free_pages(cptep);
- /* L2 table is present (now) */
- pte = READ_ONCE(*ptep);
- }
+ /* L2 table is present (now) */
+ pte = READ_ONCE(*ptep);
+ }
- ptep = iopte_deref(pte, data);
+ ptep = iopte_deref(pte, data);
+ }
/* install a leaf entries into L2 table */
prot = dart_prot_to_pte(data, iommu_prot);
- map_idx_start = dart_get_l2_index(data, iova);
+ map_idx_start = dart_get_last_index(data, iova);
max_entries = DART_PTES_PER_TABLE(data) - map_idx_start;
num_entries = min_t(int, pgcount, max_entries);
ptep += map_idx_start;
@@ -293,13 +302,13 @@ static size_t dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(pgsize != cfg->pgsize_bitmap || !pgcount))
return 0;
- ptep = dart_get_l2(data, iova);
+ ptep = dart_get_last(data, iova);
/* Valid L2 IOPTE pointer? */
if (WARN_ON(!ptep))
return 0;
- unmap_idx_start = dart_get_l2_index(data, iova);
+ unmap_idx_start = dart_get_last_index(data, iova);
ptep += unmap_idx_start;
max_entries = DART_PTES_PER_TABLE(data) - unmap_idx_start;
@@ -330,13 +339,13 @@ static phys_addr_t dart_iova_to_phys(struct io_pgtable_ops *ops,
struct dart_io_pgtable *data = io_pgtable_ops_to_data(ops);
dart_iopte pte, *ptep;
- ptep = dart_get_l2(data, iova);
+ ptep = dart_get_last(data, iova);
/* Valid L2 IOPTE pointer? */
if (!ptep)
return 0;
- ptep += dart_get_l2_index(data, iova);
+ ptep += dart_get_last_index(data, iova);
pte = READ_ONCE(*ptep);
/* Found translation */
@@ -353,21 +362,37 @@ static struct dart_io_pgtable *
dart_alloc_pgtable(struct io_pgtable_cfg *cfg)
{
struct dart_io_pgtable *data;
- int tbl_bits, bits_per_level, va_bits, pg_shift;
+ int levels, max_tbl_bits, tbl_bits, bits_per_level, va_bits, pg_shift;
+
+ /*
+ * Old 4K page DARTs can use up to 4 top-level tables.
+ * Newer ones only ever use a maximum of 1.
+ */
+ if (cfg->pgsize_bitmap == SZ_4K)
+ max_tbl_bits = DART_MAX_TABLE_BITS;
+ else
+ max_tbl_bits = 0;
pg_shift = __ffs(cfg->pgsize_bitmap);
bits_per_level = pg_shift - ilog2(sizeof(dart_iopte));
va_bits = cfg->ias - pg_shift;
- tbl_bits = max_t(int, 0, va_bits - (bits_per_level * DART_LEVELS));
- if ((1 << tbl_bits) > DART_MAX_TABLES)
+ levels = max_t(int, 2, (va_bits - max_tbl_bits + bits_per_level - 1) / bits_per_level);
+
+ if (levels > (DART_MAX_LEVELS - 1))
+ return NULL;
+
+ tbl_bits = max_t(int, 0, va_bits - (bits_per_level * levels));
+
+ if (tbl_bits > max_tbl_bits)
return NULL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return NULL;
+ data->levels = levels + 1; /* Table level counts as one level */
data->tbl_bits = tbl_bits;
data->bits_per_level = bits_per_level;
@@ -403,6 +428,7 @@ apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
return NULL;
cfg->apple_dart_cfg.n_ttbrs = 1 << data->tbl_bits;
+ cfg->apple_dart_cfg.n_levels = data->levels;
for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i) {
data->pgd[i] =
@@ -422,24 +448,31 @@ out_free_data:
return NULL;
}
-static void apple_dart_free_pgtable(struct io_pgtable *iop)
+static void apple_dart_free_pgtables(struct dart_io_pgtable *data, dart_iopte *ptep, int level)
{
- struct dart_io_pgtable *data = io_pgtable_to_data(iop);
- dart_iopte *ptep, *end;
- int i;
+ dart_iopte *end;
+ dart_iopte *start = ptep;
- for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i) {
- ptep = data->pgd[i];
+ if (level > 1) {
end = (void *)ptep + DART_GRANULE(data);
while (ptep != end) {
dart_iopte pte = *ptep++;
if (pte)
- iommu_free_pages(iopte_deref(pte, data));
+ apple_dart_free_pgtables(data, iopte_deref(pte, data), level - 1);
}
- iommu_free_pages(data->pgd[i]);
}
+ iommu_free_pages(start);
+}
+
+static void apple_dart_free_pgtable(struct io_pgtable *iop)
+{
+ struct dart_io_pgtable *data = io_pgtable_to_data(iop);
+ int i;
+
+ for (i = 0; i < (1 << data->tbl_bits) && data->pgd[i]; ++i)
+ apple_dart_free_pgtables(data, data->pgd[i], data->levels - 1);
kfree(data);
}
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 8841c1487f00..843fec8e8a51 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -28,10 +28,6 @@ io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
#ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
#endif
-#ifdef CONFIG_AMD_IOMMU
- [AMD_IOMMU_V1] = &io_pgtable_amd_iommu_v1_init_fns,
- [AMD_IOMMU_V2] = &io_pgtable_amd_iommu_v2_init_fns,
-#endif
};
static int check_custom_allocator(enum io_pgtable_fmt fmt,
diff --git a/drivers/iommu/iommu-pages.c b/drivers/iommu/iommu-pages.c
index 238c09e5166b..3bab175d8557 100644
--- a/drivers/iommu/iommu-pages.c
+++ b/drivers/iommu/iommu-pages.c
@@ -4,6 +4,7 @@
* Pasha Tatashin <pasha.tatashin@soleen.com>
*/
#include "iommu-pages.h"
+#include <linux/dma-mapping.h>
#include <linux/gfp.h>
#include <linux/mm.h>
@@ -22,6 +23,11 @@ IOPTDESC_MATCH(memcg_data, memcg_data);
#undef IOPTDESC_MATCH
static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
+static inline size_t ioptdesc_mem_size(struct ioptdesc *desc)
+{
+ return 1UL << (folio_order(ioptdesc_folio(desc)) + PAGE_SHIFT);
+}
+
/**
* iommu_alloc_pages_node_sz - Allocate a zeroed page of a given size from
* specific NUMA node
@@ -36,6 +42,7 @@ static_assert(sizeof(struct ioptdesc) <= sizeof(struct page));
*/
void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
{
+ struct ioptdesc *iopt;
unsigned long pgcnt;
struct folio *folio;
unsigned int order;
@@ -60,6 +67,9 @@ void *iommu_alloc_pages_node_sz(int nid, gfp_t gfp, size_t size)
if (unlikely(!folio))
return NULL;
+ iopt = folio_ioptdesc(folio);
+ iopt->incoherent = false;
+
/*
* All page allocations that should be reported to as "iommu-pagetables"
* to userspace must use one of the functions below. This includes
@@ -80,7 +90,10 @@ EXPORT_SYMBOL_GPL(iommu_alloc_pages_node_sz);
static void __iommu_free_desc(struct ioptdesc *iopt)
{
struct folio *folio = ioptdesc_folio(iopt);
- const unsigned long pgcnt = 1UL << folio_order(folio);
+ const unsigned long pgcnt = folio_nr_pages(folio);
+
+ if (IOMMU_PAGES_USE_DMA_API)
+ WARN_ON_ONCE(iopt->incoherent);
mod_node_page_state(folio_pgdat(folio), NR_IOMMU_PAGES, -pgcnt);
lruvec_stat_mod_folio(folio, NR_SECONDARY_PAGETABLE, -pgcnt);
@@ -117,3 +130,124 @@ void iommu_put_pages_list(struct iommu_pages_list *list)
__iommu_free_desc(iopt);
}
EXPORT_SYMBOL_GPL(iommu_put_pages_list);
+
+/**
+ * iommu_pages_start_incoherent - Setup the page for cache incoherent operation
+ * @virt: The page to setup
+ * @dma_dev: The iommu device
+ *
+ * For incoherent memory this will use the DMA API to manage the cache flushing
+ * on some arches. This is a lot of complexity compared to just calling
+ * arch_sync_dma_for_device(), but it is what the existing ARM iommu drivers
+ * have been doing. The DMA API requires keeping track of the DMA map and
+ * freeing it when required. This keeps track of the dma map inside the ioptdesc
+ * so that error paths are simple for the caller.
+ */
+int iommu_pages_start_incoherent(void *virt, struct device *dma_dev)
+{
+ struct ioptdesc *iopt = virt_to_ioptdesc(virt);
+ dma_addr_t dma;
+
+ if (WARN_ON(iopt->incoherent))
+ return -EINVAL;
+
+ if (!IOMMU_PAGES_USE_DMA_API) {
+ iommu_pages_flush_incoherent(dma_dev, virt, 0,
+ ioptdesc_mem_size(iopt));
+ } else {
+ dma = dma_map_single(dma_dev, virt, ioptdesc_mem_size(iopt),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma))
+ return -EINVAL;
+
+ /*
+ * The DMA API is not allowed to do anything other than DMA
+ * direct. It would be nice to also check
+ * dev_is_dma_coherent(dma_dev));
+ */
+ if (WARN_ON(dma != virt_to_phys(virt))) {
+ dma_unmap_single(dma_dev, dma, ioptdesc_mem_size(iopt),
+ DMA_TO_DEVICE);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ iopt->incoherent = 1;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_pages_start_incoherent);
+
+/**
+ * iommu_pages_start_incoherent_list - Make a list of pages incoherent
+ * @list: The list of pages to setup
+ * @dma_dev: The iommu device
+ *
+ * Perform iommu_pages_start_incoherent() across all of list.
+ *
+ * If this fails the caller must call iommu_pages_stop_incoherent_list().
+ */
+int iommu_pages_start_incoherent_list(struct iommu_pages_list *list,
+ struct device *dma_dev)
+{
+ struct ioptdesc *cur;
+ int ret;
+
+ list_for_each_entry(cur, &list->pages, iopt_freelist_elm) {
+ if (WARN_ON(cur->incoherent))
+ continue;
+
+ ret = iommu_pages_start_incoherent(
+ folio_address(ioptdesc_folio(cur)), dma_dev);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_pages_start_incoherent_list);
+
+/**
+ * iommu_pages_stop_incoherent_list - Undo incoherence across a list
+ * @list: The list of pages to release
+ * @dma_dev: The iommu device
+ *
+ * Revert iommu_pages_start_incoherent() across all of the list. Pages that did
+ * not call or succeed iommu_pages_start_incoherent() will be ignored.
+ */
+#if IOMMU_PAGES_USE_DMA_API
+void iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,
+ struct device *dma_dev)
+{
+ struct ioptdesc *cur;
+
+ list_for_each_entry(cur, &list->pages, iopt_freelist_elm) {
+ struct folio *folio = ioptdesc_folio(cur);
+
+ if (!cur->incoherent)
+ continue;
+ dma_unmap_single(dma_dev, virt_to_phys(folio_address(folio)),
+ ioptdesc_mem_size(cur), DMA_TO_DEVICE);
+ cur->incoherent = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(iommu_pages_stop_incoherent_list);
+
+/**
+ * iommu_pages_free_incoherent - Free an incoherent page
+ * @virt: virtual address of the page to be freed.
+ * @dma_dev: The iommu device
+ *
+ * If the page is incoherent it made coherent again then freed.
+ */
+void iommu_pages_free_incoherent(void *virt, struct device *dma_dev)
+{
+ struct ioptdesc *iopt = virt_to_ioptdesc(virt);
+
+ if (iopt->incoherent) {
+ dma_unmap_single(dma_dev, virt_to_phys(virt),
+ ioptdesc_mem_size(iopt), DMA_TO_DEVICE);
+ iopt->incoherent = 0;
+ }
+ __iommu_free_desc(iopt);
+}
+EXPORT_SYMBOL_GPL(iommu_pages_free_incoherent);
+#endif
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index b3af2813ed0c..ae9da4f571f6 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -21,7 +21,10 @@ struct ioptdesc {
struct list_head iopt_freelist_elm;
unsigned long __page_mapping;
- pgoff_t __index;
+ union {
+ u8 incoherent;
+ pgoff_t __index;
+ };
void *_private;
unsigned int __page_type;
@@ -98,4 +101,48 @@ static inline void *iommu_alloc_pages_sz(gfp_t gfp, size_t size)
return iommu_alloc_pages_node_sz(NUMA_NO_NODE, gfp, size);
}
-#endif /* __IOMMU_PAGES_H */
+int iommu_pages_start_incoherent(void *virt, struct device *dma_dev);
+int iommu_pages_start_incoherent_list(struct iommu_pages_list *list,
+ struct device *dma_dev);
+
+#ifdef CONFIG_X86
+#define IOMMU_PAGES_USE_DMA_API 0
+#include <linux/cacheflush.h>
+
+static inline void iommu_pages_flush_incoherent(struct device *dma_dev,
+ void *virt, size_t offset,
+ size_t len)
+{
+ clflush_cache_range(virt + offset, len);
+}
+static inline void
+iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,
+ struct device *dma_dev)
+{
+ /*
+ * For performance leave the incoherent flag alone which turns this into
+ * a NOP. For X86 the rest of the stop/free flow ignores the flag.
+ */
+}
+static inline void iommu_pages_free_incoherent(void *virt,
+ struct device *dma_dev)
+{
+ iommu_free_pages(virt);
+}
+#else
+#define IOMMU_PAGES_USE_DMA_API 1
+#include <linux/dma-mapping.h>
+
+static inline void iommu_pages_flush_incoherent(struct device *dma_dev,
+ void *virt, size_t offset,
+ size_t len)
+{
+ dma_sync_single_for_device(dma_dev, (uintptr_t)virt + offset, len,
+ DMA_TO_DEVICE);
+}
+void iommu_pages_stop_incoherent_list(struct iommu_pages_list *list,
+ struct device *dma_dev);
+void iommu_pages_free_incoherent(void *virt, struct device *dma_dev);
+#endif
+
+#endif /* __IOMMU_PAGES_H */
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index e236b932e766..c95394cd03a7 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -37,6 +37,8 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
const struct bus_type *bus,
struct notifier_block *nb);
+int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu);
+
struct iommu_attach_handle *iommu_attach_handle_get(struct iommu_group *group,
ioasid_t pasid,
unsigned int type);
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index 1a51cfd82808..d236aef80a8d 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -10,6 +10,8 @@
#include "iommu-priv.h"
static DEFINE_MUTEX(iommu_sva_lock);
+static bool iommu_sva_present;
+static LIST_HEAD(iommu_sva_mms);
static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm);
@@ -42,6 +44,7 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
return ERR_PTR(-ENOSPC);
}
iommu_mm->pasid = pasid;
+ iommu_mm->mm = mm;
INIT_LIST_HEAD(&iommu_mm->sva_domains);
/*
* Make sure the write to mm->iommu_mm is not reordered in front of
@@ -132,8 +135,13 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
if (ret)
goto out_free_domain;
domain->users = 1;
- list_add(&domain->next, &mm->iommu_mm->sva_domains);
+ if (list_empty(&iommu_mm->sva_domains)) {
+ if (list_empty(&iommu_sva_mms))
+ iommu_sva_present = true;
+ list_add(&iommu_mm->mm_list_elm, &iommu_sva_mms);
+ }
+ list_add(&domain->next, &iommu_mm->sva_domains);
out:
refcount_set(&handle->users, 1);
mutex_unlock(&iommu_sva_lock);
@@ -175,6 +183,13 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
list_del(&domain->next);
iommu_domain_free(domain);
}
+
+ if (list_empty(&iommu_mm->sva_domains)) {
+ list_del(&iommu_mm->mm_list_elm);
+ if (list_empty(&iommu_sva_mms))
+ iommu_sva_present = false;
+ }
+
mutex_unlock(&iommu_sva_lock);
kfree(handle);
}
@@ -312,3 +327,15 @@ static struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
return domain;
}
+
+void iommu_sva_invalidate_kva_range(unsigned long start, unsigned long end)
+{
+ struct iommu_mm_data *iommu_mm;
+
+ guard(mutex)(&iommu_sva_lock);
+ if (!iommu_sva_present)
+ return;
+
+ list_for_each_entry(iommu_mm, &iommu_sva_mms, mm_list_elm)
+ mmu_notifier_arch_invalidate_secondary_tlbs(iommu_mm->mm, start, end);
+}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 060ebe330ee1..2ca990dfbb88 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -100,7 +100,7 @@ static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data);
static void iommu_release_device(struct device *dev);
static int __iommu_attach_device(struct iommu_domain *domain,
- struct device *dev);
+ struct device *dev, struct iommu_domain *old);
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
static struct iommu_domain *__iommu_paging_domain_alloc_flags(struct device *dev,
@@ -114,6 +114,7 @@ enum {
static int __iommu_device_set_domain(struct iommu_group *group,
struct device *dev,
struct iommu_domain *new_domain,
+ struct iommu_domain *old_domain,
unsigned int flags);
static int __iommu_group_set_domain_internal(struct iommu_group *group,
struct iommu_domain *new_domain,
@@ -304,6 +305,7 @@ void iommu_device_unregister_bus(struct iommu_device *iommu,
struct notifier_block *nb)
{
bus_unregister_notifier(bus, nb);
+ fwnode_remove_software_node(iommu->fwnode);
iommu_device_unregister(iommu);
}
EXPORT_SYMBOL_GPL(iommu_device_unregister_bus);
@@ -326,6 +328,12 @@ int iommu_device_register_bus(struct iommu_device *iommu,
if (err)
return err;
+ iommu->fwnode = fwnode_create_software_node(NULL, NULL);
+ if (IS_ERR(iommu->fwnode)) {
+ bus_unregister_notifier(bus, nb);
+ return PTR_ERR(iommu->fwnode);
+ }
+
spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock);
@@ -335,9 +343,28 @@ int iommu_device_register_bus(struct iommu_device *iommu,
iommu_device_unregister_bus(iommu, bus, nb);
return err;
}
+ WRITE_ONCE(iommu->ready, true);
return 0;
}
EXPORT_SYMBOL_GPL(iommu_device_register_bus);
+
+int iommu_mock_device_add(struct device *dev, struct iommu_device *iommu)
+{
+ int rc;
+
+ mutex_lock(&iommu_probe_device_lock);
+ rc = iommu_fwspec_init(dev, iommu->fwnode);
+ mutex_unlock(&iommu_probe_device_lock);
+
+ if (rc)
+ return rc;
+
+ rc = device_add(dev);
+ if (rc)
+ iommu_fwspec_free(dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(iommu_mock_device_add);
#endif
static struct dev_iommu *dev_iommu_get(struct device *dev)
@@ -516,8 +543,21 @@ static void iommu_deinit_device(struct device *dev)
* Regardless, if a delayed attach never occurred, then the release
* should still avoid touching any hardware configuration either.
*/
- if (!dev->iommu->attach_deferred && ops->release_domain)
- ops->release_domain->ops->attach_dev(ops->release_domain, dev);
+ if (!dev->iommu->attach_deferred && ops->release_domain) {
+ struct iommu_domain *release_domain = ops->release_domain;
+
+ /*
+ * If the device requires direct mappings then it should not
+ * be parked on a BLOCKED domain during release as that would
+ * break the direct mappings.
+ */
+ if (dev->iommu->require_direct && ops->identity_domain &&
+ release_domain == ops->blocked_domain)
+ release_domain = ops->identity_domain;
+
+ release_domain->ops->attach_dev(release_domain, dev,
+ group->domain);
+ }
if (ops->release_device)
ops->release_device(dev);
@@ -602,7 +642,8 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
if (group->default_domain)
iommu_create_device_direct_mappings(group->default_domain, dev);
if (group->domain) {
- ret = __iommu_device_set_domain(group, dev, group->domain, 0);
+ ret = __iommu_device_set_domain(group, dev, group->domain, NULL,
+ 0);
if (ret)
goto err_remove_gdev;
} else if (!group->default_domain && !group_list) {
@@ -2089,14 +2130,14 @@ static void __iommu_group_set_core_domain(struct iommu_group *group)
}
static int __iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
int ret;
if (unlikely(domain->ops->attach_dev == NULL))
return -ENODEV;
- ret = domain->ops->attach_dev(domain, dev);
+ ret = domain->ops->attach_dev(domain, dev, old);
if (ret)
return ret;
dev->iommu->attach_deferred = 0;
@@ -2145,7 +2186,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
if (dev->iommu && dev->iommu->attach_deferred)
- return __iommu_attach_device(domain, dev);
+ return __iommu_attach_device(domain, dev, NULL);
return 0;
}
@@ -2258,6 +2299,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_group);
static int __iommu_device_set_domain(struct iommu_group *group,
struct device *dev,
struct iommu_domain *new_domain,
+ struct iommu_domain *old_domain,
unsigned int flags)
{
int ret;
@@ -2283,7 +2325,7 @@ static int __iommu_device_set_domain(struct iommu_group *group,
dev->iommu->attach_deferred = 0;
}
- ret = __iommu_attach_device(new_domain, dev);
+ ret = __iommu_attach_device(new_domain, dev, old_domain);
if (ret) {
/*
* If we have a blocking domain then try to attach that in hopes
@@ -2293,7 +2335,8 @@ static int __iommu_device_set_domain(struct iommu_group *group,
if ((flags & IOMMU_SET_DOMAIN_MUST_SUCCEED) &&
group->blocking_domain &&
group->blocking_domain != new_domain)
- __iommu_attach_device(group->blocking_domain, dev);
+ __iommu_attach_device(group->blocking_domain, dev,
+ old_domain);
return ret;
}
return 0;
@@ -2340,7 +2383,7 @@ static int __iommu_group_set_domain_internal(struct iommu_group *group,
result = 0;
for_each_group_device(group, gdev) {
ret = __iommu_device_set_domain(group, gdev->dev, new_domain,
- flags);
+ group->domain, flags);
if (ret) {
result = ret;
/*
@@ -2365,6 +2408,9 @@ err_revert:
*/
last_gdev = gdev;
for_each_group_device(group, gdev) {
+ /* No need to revert the last gdev that failed to set domain */
+ if (gdev == last_gdev)
+ break;
/*
* A NULL domain can happen only for first probe, in which case
* we leave group->domain as NULL and let release clean
@@ -2372,10 +2418,8 @@ err_revert:
*/
if (group->domain)
WARN_ON(__iommu_device_set_domain(
- group, gdev->dev, group->domain,
+ group, gdev->dev, group->domain, new_domain,
IOMMU_SET_DOMAIN_MUST_SUCCEED));
- if (gdev == last_gdev)
- break;
}
return ret;
}
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 2beeb4f60ee5..eae3f03629b0 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -41,6 +41,7 @@ config IOMMUFD_TEST
depends on DEBUG_KERNEL
depends on FAULT_INJECTION
depends on RUNTIME_TESTING_MENU
+ depends on IOMMU_PT_AMDV1
select IOMMUFD_DRIVER
default n
help
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 65fbd098f9e9..4c842368289f 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -711,6 +711,8 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev, ioasid_t pasid)
iopt_remove_reserved_iova(&hwpt_paging->ioas->iopt, idev->dev);
mutex_unlock(&igroup->lock);
+ iommufd_hw_pagetable_put(idev->ictx, hwpt);
+
/* Caller must destroy hwpt */
return hwpt;
}
@@ -1057,7 +1059,6 @@ void iommufd_device_detach(struct iommufd_device *idev, ioasid_t pasid)
hwpt = iommufd_hw_pagetable_detach(idev, pasid);
if (!hwpt)
return;
- iommufd_hw_pagetable_put(idev->ictx, hwpt);
refcount_dec(&idev->obj.users);
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, "IOMMUFD");
diff --git a/drivers/iommu/iommufd/driver.c b/drivers/iommu/iommufd/driver.c
index 6f1010da221c..21d4a35538f6 100644
--- a/drivers/iommu/iommufd/driver.c
+++ b/drivers/iommu/iommufd/driver.c
@@ -161,8 +161,8 @@ int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
vevent = &veventq->lost_events_header;
goto out_set_header;
}
- memcpy(vevent->event_data, event_data, data_len);
vevent->data_len = data_len;
+ memcpy(vevent->event_data, event_data, data_len);
veventq->num_events++;
out_set_header:
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index fc4de63b0bce..e23d9ee4fe38 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -393,12 +393,12 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
const struct file_operations *fops)
{
struct file *filep;
- int fdno;
spin_lock_init(&eventq->lock);
INIT_LIST_HEAD(&eventq->deliver);
init_waitqueue_head(&eventq->wait_queue);
+ /* The filep is fput() by the core code during failure */
filep = anon_inode_getfile(name, fops, eventq, O_RDWR);
if (IS_ERR(filep))
return PTR_ERR(filep);
@@ -408,10 +408,7 @@ static int iommufd_eventq_init(struct iommufd_eventq *eventq, char *name,
eventq->filep = filep;
refcount_inc(&eventq->obj.users);
- fdno = get_unused_fd_flags(O_CLOEXEC);
- if (fdno < 0)
- fput(filep);
- return fdno;
+ return get_unused_fd_flags(O_CLOEXEC);
}
static const struct file_operations iommufd_fault_fops =
@@ -452,7 +449,6 @@ int iommufd_fault_alloc(struct iommufd_ucmd *ucmd)
return 0;
out_put_fdno:
put_unused_fd(fdno);
- fput(fault->common.filep);
return rc;
}
@@ -536,7 +532,6 @@ int iommufd_veventq_alloc(struct iommufd_ucmd *ucmd)
out_put_fdno:
put_unused_fd(fdno);
- fput(veventq->common.filep);
out_abort:
iommufd_object_abort_and_destroy(ucmd->ictx, &veventq->common.obj);
out_unlock_veventqs:
diff --git a/drivers/iommu/iommufd/io_pagetable.c b/drivers/iommu/iommufd/io_pagetable.c
index c0360c450880..54cf4d856179 100644
--- a/drivers/iommu/iommufd/io_pagetable.c
+++ b/drivers/iommu/iommufd/io_pagetable.c
@@ -8,8 +8,10 @@
* The datastructure uses the iopt_pages to optimize the storage of the PFNs
* between the domains and xarray.
*/
+#include <linux/dma-buf.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/file.h>
#include <linux/iommu.h>
#include <linux/iommufd.h>
#include <linux/lockdep.h>
@@ -284,6 +286,9 @@ static int iopt_alloc_area_pages(struct io_pagetable *iopt,
case IOPT_ADDRESS_FILE:
start = elm->start_byte + elm->pages->start;
break;
+ case IOPT_ADDRESS_DMABUF:
+ start = elm->start_byte + elm->pages->dmabuf.start;
+ break;
}
rc = iopt_alloc_iova(iopt, dst_iova, start, length);
if (rc)
@@ -468,25 +473,53 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
* @iopt: io_pagetable to act on
* @iova: If IOPT_ALLOC_IOVA is set this is unused on input and contains
* the chosen iova on output. Otherwise is the iova to map to on input
- * @file: file to map
+ * @fd: fdno of a file to map
* @start: map file starting at this byte offset
* @length: Number of bytes to map
* @iommu_prot: Combination of IOMMU_READ/WRITE/etc bits for the mapping
* @flags: IOPT_ALLOC_IOVA or zero
*/
int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
- unsigned long *iova, struct file *file,
- unsigned long start, unsigned long length,
- int iommu_prot, unsigned int flags)
+ unsigned long *iova, int fd, unsigned long start,
+ unsigned long length, int iommu_prot,
+ unsigned int flags)
{
struct iopt_pages *pages;
+ struct dma_buf *dmabuf;
+ unsigned long start_byte;
+ unsigned long last;
+
+ if (!length)
+ return -EINVAL;
+ if (check_add_overflow(start, length - 1, &last))
+ return -EOVERFLOW;
+
+ start_byte = start - ALIGN_DOWN(start, PAGE_SIZE);
+ dmabuf = dma_buf_get(fd);
+ if (!IS_ERR(dmabuf)) {
+ pages = iopt_alloc_dmabuf_pages(ictx, dmabuf, start_byte, start,
+ length,
+ iommu_prot & IOMMU_WRITE);
+ if (IS_ERR(pages)) {
+ dma_buf_put(dmabuf);
+ return PTR_ERR(pages);
+ }
+ } else {
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return -EBADF;
+
+ pages = iopt_alloc_file_pages(file, start_byte, start, length,
+ iommu_prot & IOMMU_WRITE);
+ fput(file);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+ }
- pages = iopt_alloc_file_pages(file, start, length,
- iommu_prot & IOMMU_WRITE);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
return iopt_map_common(ictx, iopt, pages, iova, length,
- start - pages->start, iommu_prot, flags);
+ start_byte, iommu_prot, flags);
}
struct iova_bitmap_fn_arg {
@@ -707,7 +740,8 @@ static int iopt_unmap_iova_range(struct io_pagetable *iopt, unsigned long start,
struct iopt_area *area;
unsigned long unmapped_bytes = 0;
unsigned int tries = 0;
- int rc = -ENOENT;
+ /* If there are no mapped entries then success */
+ int rc = 0;
/*
* The domains_rwsem must be held in read mode any time any area->pages
@@ -777,8 +811,6 @@ again:
down_write(&iopt->iova_rwsem);
}
- if (unmapped_bytes)
- rc = 0;
out_unlock_iova:
up_write(&iopt->iova_rwsem);
@@ -815,13 +847,8 @@ int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
int iopt_unmap_all(struct io_pagetable *iopt, unsigned long *unmapped)
{
- int rc;
-
- rc = iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
/* If the IOVAs are empty then unmap all succeeds */
- if (rc == -ENOENT)
- return 0;
- return rc;
+ return iopt_unmap_iova_range(iopt, 0, ULONG_MAX, unmapped);
}
/* The caller must always free all the nodes in the allowed_iova rb_root. */
@@ -967,9 +994,15 @@ static void iopt_unfill_domain(struct io_pagetable *iopt,
WARN_ON(!area->storage_domain);
if (area->storage_domain == domain)
area->storage_domain = storage_domain;
+ if (iopt_is_dmabuf(pages)) {
+ if (!iopt_dmabuf_revoked(pages))
+ iopt_area_unmap_domain(area, domain);
+ iopt_dmabuf_untrack_domain(pages, area, domain);
+ }
mutex_unlock(&pages->mutex);
- iopt_area_unmap_domain(area, domain);
+ if (!iopt_is_dmabuf(pages))
+ iopt_area_unmap_domain(area, domain);
}
return;
}
@@ -986,6 +1019,8 @@ static void iopt_unfill_domain(struct io_pagetable *iopt,
WARN_ON(area->storage_domain != domain);
area->storage_domain = NULL;
iopt_area_unfill_domain(area, pages, domain);
+ if (iopt_is_dmabuf(pages))
+ iopt_dmabuf_untrack_domain(pages, area, domain);
mutex_unlock(&pages->mutex);
}
}
@@ -1015,10 +1050,16 @@ static int iopt_fill_domain(struct io_pagetable *iopt,
if (!pages)
continue;
- mutex_lock(&pages->mutex);
+ guard(mutex)(&pages->mutex);
+ if (iopt_is_dmabuf(pages)) {
+ rc = iopt_dmabuf_track_domain(pages, area, domain);
+ if (rc)
+ goto out_unfill;
+ }
rc = iopt_area_fill_domain(area, domain);
if (rc) {
- mutex_unlock(&pages->mutex);
+ if (iopt_is_dmabuf(pages))
+ iopt_dmabuf_untrack_domain(pages, area, domain);
goto out_unfill;
}
if (!area->storage_domain) {
@@ -1027,7 +1068,6 @@ static int iopt_fill_domain(struct io_pagetable *iopt,
interval_tree_insert(&area->pages_node,
&pages->domains_itree);
}
- mutex_unlock(&pages->mutex);
}
return 0;
@@ -1048,6 +1088,8 @@ out_unfill:
area->storage_domain = NULL;
}
iopt_area_unfill_domain(area, pages, domain);
+ if (iopt_is_dmabuf(pages))
+ iopt_dmabuf_untrack_domain(pages, area, domain);
mutex_unlock(&pages->mutex);
}
return rc;
@@ -1258,6 +1300,10 @@ static int iopt_area_split(struct iopt_area *area, unsigned long iova)
if (!pages || area->prevent_access)
return -EBUSY;
+ /* Maintaining the domains_itree below is a bit complicated */
+ if (iopt_is_dmabuf(pages))
+ return -EOPNOTSUPP;
+
if (new_start & (alignment - 1) ||
iopt_area_start_byte(area, new_start) & (alignment - 1))
return -EINVAL;
diff --git a/drivers/iommu/iommufd/io_pagetable.h b/drivers/iommu/iommufd/io_pagetable.h
index b6064f4ce4af..14cd052fd320 100644
--- a/drivers/iommu/iommufd/io_pagetable.h
+++ b/drivers/iommu/iommufd/io_pagetable.h
@@ -5,6 +5,7 @@
#ifndef __IO_PAGETABLE_H
#define __IO_PAGETABLE_H
+#include <linux/dma-buf.h>
#include <linux/interval_tree.h>
#include <linux/kref.h>
#include <linux/mutex.h>
@@ -69,6 +70,16 @@ void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages,
void iopt_area_unmap_domain(struct iopt_area *area,
struct iommu_domain *domain);
+int iopt_dmabuf_track_domain(struct iopt_pages *pages, struct iopt_area *area,
+ struct iommu_domain *domain);
+void iopt_dmabuf_untrack_domain(struct iopt_pages *pages,
+ struct iopt_area *area,
+ struct iommu_domain *domain);
+int iopt_dmabuf_track_all_domains(struct iopt_area *area,
+ struct iopt_pages *pages);
+void iopt_dmabuf_untrack_all_domains(struct iopt_area *area,
+ struct iopt_pages *pages);
+
static inline unsigned long iopt_area_index(struct iopt_area *area)
{
return area->pages_node.start;
@@ -179,7 +190,22 @@ enum {
enum iopt_address_type {
IOPT_ADDRESS_USER = 0,
- IOPT_ADDRESS_FILE = 1,
+ IOPT_ADDRESS_FILE,
+ IOPT_ADDRESS_DMABUF,
+};
+
+struct iopt_pages_dmabuf_track {
+ struct iommu_domain *domain;
+ struct iopt_area *area;
+ struct list_head elm;
+};
+
+struct iopt_pages_dmabuf {
+ struct dma_buf_attachment *attach;
+ struct dma_buf_phys_vec phys;
+ /* Always PAGE_SIZE aligned */
+ unsigned long start;
+ struct list_head tracker;
};
/*
@@ -209,6 +235,8 @@ struct iopt_pages {
struct file *file;
unsigned long start;
};
+ /* IOPT_ADDRESS_DMABUF */
+ struct iopt_pages_dmabuf dmabuf;
};
bool writable:1;
u8 account_mode;
@@ -220,10 +248,32 @@ struct iopt_pages {
struct rb_root_cached domains_itree;
};
+static inline bool iopt_is_dmabuf(struct iopt_pages *pages)
+{
+ if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
+ return false;
+ return pages->type == IOPT_ADDRESS_DMABUF;
+}
+
+static inline bool iopt_dmabuf_revoked(struct iopt_pages *pages)
+{
+ lockdep_assert_held(&pages->mutex);
+ if (iopt_is_dmabuf(pages))
+ return pages->dmabuf.phys.len == 0;
+ return false;
+}
+
struct iopt_pages *iopt_alloc_user_pages(void __user *uptr,
unsigned long length, bool writable);
-struct iopt_pages *iopt_alloc_file_pages(struct file *file, unsigned long start,
+struct iopt_pages *iopt_alloc_file_pages(struct file *file,
+ unsigned long start_byte,
+ unsigned long start,
unsigned long length, bool writable);
+struct iopt_pages *iopt_alloc_dmabuf_pages(struct iommufd_ctx *ictx,
+ struct dma_buf *dmabuf,
+ unsigned long start_byte,
+ unsigned long start,
+ unsigned long length, bool writable);
void iopt_release_pages(struct kref *kref);
static inline void iopt_put_pages(struct iopt_pages *pages)
{
diff --git a/drivers/iommu/iommufd/ioas.c b/drivers/iommu/iommufd/ioas.c
index 1542c5fd10a8..f4721afedadc 100644
--- a/drivers/iommu/iommufd/ioas.c
+++ b/drivers/iommu/iommufd/ioas.c
@@ -207,7 +207,6 @@ int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd)
unsigned long iova = cmd->iova;
struct iommufd_ioas *ioas;
unsigned int flags = 0;
- struct file *file;
int rc;
if (cmd->flags &
@@ -229,11 +228,7 @@ int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd)
if (!(cmd->flags & IOMMU_IOAS_MAP_FIXED_IOVA))
flags = IOPT_ALLOC_IOVA;
- file = fget(cmd->fd);
- if (!file)
- return -EBADF;
-
- rc = iopt_map_file_pages(ucmd->ictx, &ioas->iopt, &iova, file,
+ rc = iopt_map_file_pages(ucmd->ictx, &ioas->iopt, &iova, cmd->fd,
cmd->start, cmd->length,
conv_iommu_prot(cmd->flags), flags);
if (rc)
@@ -243,7 +238,6 @@ int iommufd_ioas_map_file(struct iommufd_ucmd *ucmd)
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
out_put:
iommufd_put_object(ucmd->ictx, &ioas->obj);
- fput(file);
return rc;
}
@@ -367,6 +361,10 @@ int iommufd_ioas_unmap(struct iommufd_ucmd *ucmd)
&unmapped);
if (rc)
goto out_put;
+ if (!unmapped) {
+ rc = -ENOENT;
+ goto out_put;
+ }
}
cmd->length = unmapped;
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 0da2a81eedfa..eb6d1a70f673 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -19,6 +19,8 @@ struct iommu_domain;
struct iommu_group;
struct iommu_option;
struct iommufd_device;
+struct dma_buf_attachment;
+struct dma_buf_phys_vec;
struct iommufd_sw_msi_map {
struct list_head sw_msi_item;
@@ -108,7 +110,7 @@ int iopt_map_user_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
unsigned long length, int iommu_prot,
unsigned int flags);
int iopt_map_file_pages(struct iommufd_ctx *ictx, struct io_pagetable *iopt,
- unsigned long *iova, struct file *file,
+ unsigned long *iova, int fd,
unsigned long start, unsigned long length,
int iommu_prot, unsigned int flags);
int iopt_map_pages(struct io_pagetable *iopt, struct list_head *pages_list,
@@ -454,9 +456,8 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
if (hwpt->obj.type == IOMMUFD_OBJ_HWPT_PAGING) {
struct iommufd_hwpt_paging *hwpt_paging = to_hwpt_paging(hwpt);
- lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
-
if (hwpt_paging->auto_domain) {
+ lockdep_assert_not_held(&hwpt_paging->ioas->mutex);
iommufd_object_put_and_try_destroy(ictx, &hwpt->obj);
return;
}
@@ -505,6 +506,8 @@ void iommufd_device_pre_destroy(struct iommufd_object *obj);
void iommufd_device_destroy(struct iommufd_object *obj);
int iommufd_get_hw_info(struct iommufd_ucmd *ucmd);
+struct device *iommufd_global_device(void);
+
struct iommufd_access {
struct iommufd_object obj;
struct iommufd_ctx *ictx;
@@ -615,7 +618,6 @@ struct iommufd_veventq {
struct iommufd_eventq common;
struct iommufd_viommu *viommu;
struct list_head node; /* for iommufd_viommu::veventqs */
- struct iommufd_vevent lost_events_header;
enum iommu_veventq_type type;
unsigned int depth;
@@ -623,6 +625,9 @@ struct iommufd_veventq {
/* Use common.lock for protection */
u32 num_events;
u32 sequence;
+
+ /* Must be last as it ends in a flexible-array member. */
+ struct iommufd_vevent lost_events_header;
};
static inline struct iommufd_veventq *
@@ -712,6 +717,8 @@ bool iommufd_should_fail(void);
int __init iommufd_test_init(void);
void iommufd_test_exit(void);
bool iommufd_selftest_is_mock_dev(struct device *dev);
+int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
+ struct dma_buf_phys_vec *phys);
#else
static inline void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
unsigned int ioas_id,
@@ -733,5 +740,11 @@ static inline bool iommufd_selftest_is_mock_dev(struct device *dev)
{
return false;
}
+static inline int
+iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
+ struct dma_buf_phys_vec *phys)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index 8fc618b2bcf9..73e73e1ec158 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -29,11 +29,22 @@ enum {
IOMMU_TEST_OP_PASID_REPLACE,
IOMMU_TEST_OP_PASID_DETACH,
IOMMU_TEST_OP_PASID_CHECK_HWPT,
+ IOMMU_TEST_OP_DMABUF_GET,
+ IOMMU_TEST_OP_DMABUF_REVOKE,
};
enum {
+ MOCK_IOMMUPT_DEFAULT = 0,
+ MOCK_IOMMUPT_HUGE,
+ MOCK_IOMMUPT_AMDV1,
+};
+
+/* These values are true for MOCK_IOMMUPT_DEFAULT */
+enum {
MOCK_APERTURE_START = 1UL << 24,
MOCK_APERTURE_LAST = (1UL << 31) - 1,
+ MOCK_PAGE_SIZE = 2048,
+ MOCK_HUGE_PAGE_SIZE = 512 * MOCK_PAGE_SIZE,
};
enum {
@@ -52,7 +63,6 @@ enum {
enum {
MOCK_FLAGS_DEVICE_NO_DIRTY = 1 << 0,
- MOCK_FLAGS_DEVICE_HUGE_IOVA = 1 << 1,
MOCK_FLAGS_DEVICE_PASID = 1 << 2,
};
@@ -176,6 +186,14 @@ struct iommu_test_cmd {
__u32 hwpt_id;
/* @id is stdev_id */
} pasid_check;
+ struct {
+ __u32 length;
+ __u32 open_flags;
+ } dmabuf_get;
+ struct {
+ __s32 dmabuf_fd;
+ __u32 revoked;
+ } dmabuf_revoke;
};
__u32 last;
};
@@ -205,6 +223,7 @@ struct iommu_test_hw_info {
*/
struct iommu_hwpt_selftest {
__u32 iotlb;
+ __u32 pagetable_type;
};
/* Should not be equal to any defined value in enum iommu_hwpt_invalidate_data_type */
diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c
index 4514575818fc..b5b67a9d3fb3 100644
--- a/drivers/iommu/iommufd/iova_bitmap.c
+++ b/drivers/iommu/iommufd/iova_bitmap.c
@@ -130,9 +130,8 @@ struct iova_bitmap {
static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap,
unsigned long iova)
{
- unsigned long pgsize = 1UL << bitmap->mapped.pgshift;
-
- return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize);
+ return (iova >> bitmap->mapped.pgshift) /
+ BITS_PER_TYPE(*bitmap->bitmap);
}
/*
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 15af7ced0501..5cc4b08c25f5 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -23,6 +23,7 @@
#include "iommufd_test.h"
struct iommufd_object_ops {
+ size_t file_offset;
void (*pre_destroy)(struct iommufd_object *obj);
void (*destroy)(struct iommufd_object *obj);
void (*abort)(struct iommufd_object *obj);
@@ -121,6 +122,10 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
old = xas_store(&xas, NULL);
xa_unlock(&ictx->objects);
WARN_ON(old != XA_ZERO_ENTRY);
+
+ if (WARN_ON(!refcount_dec_and_test(&obj->users)))
+ return;
+
kfree(obj);
}
@@ -131,10 +136,30 @@ void iommufd_object_abort(struct iommufd_ctx *ictx, struct iommufd_object *obj)
void iommufd_object_abort_and_destroy(struct iommufd_ctx *ictx,
struct iommufd_object *obj)
{
- if (iommufd_object_ops[obj->type].abort)
- iommufd_object_ops[obj->type].abort(obj);
+ const struct iommufd_object_ops *ops = &iommufd_object_ops[obj->type];
+
+ if (ops->file_offset) {
+ struct file **filep = ((void *)obj) + ops->file_offset;
+
+ /*
+ * A file should hold a users refcount while the file is open
+ * and put it back in its release. The file should hold a
+ * pointer to obj in their private data. Normal fput() is
+ * deferred to a workqueue and can get out of order with the
+ * following kfree(obj). Using the sync version ensures the
+ * release happens immediately. During abort we require the file
+ * refcount is one at this point - meaning the object alloc
+ * function cannot do anything to allow another thread to take a
+ * refcount prior to a guaranteed success.
+ */
+ if (*filep)
+ __fput_sync(*filep);
+ }
+
+ if (ops->abort)
+ ops->abort(obj);
else
- iommufd_object_ops[obj->type].destroy(obj);
+ ops->destroy(obj);
iommufd_object_abort(ictx, obj);
}
@@ -550,16 +575,23 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_flags & VM_EXEC)
return -EPERM;
+ mtree_lock(&ictx->mt_mmap);
/* vma->vm_pgoff carries a page-shifted start position to an immap */
immap = mtree_load(&ictx->mt_mmap, vma->vm_pgoff << PAGE_SHIFT);
- if (!immap)
+ if (!immap || !refcount_inc_not_zero(&immap->owner->users)) {
+ mtree_unlock(&ictx->mt_mmap);
return -ENXIO;
+ }
+ mtree_unlock(&ictx->mt_mmap);
+
/*
* mtree_load() returns the immap for any contained mmio_addr, so only
* allow the exact immap thing to be mapped
*/
- if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length)
- return -ENXIO;
+ if (vma->vm_pgoff != immap->vm_pgoff || length != immap->length) {
+ rc = -ENXIO;
+ goto err_refcount;
+ }
vma->vm_pgoff = 0;
vma->vm_private_data = immap;
@@ -570,10 +602,11 @@ static int iommufd_fops_mmap(struct file *filp, struct vm_area_struct *vma)
immap->mmio_addr >> PAGE_SHIFT, length,
vma->vm_page_prot);
if (rc)
- return rc;
+ goto err_refcount;
+ return 0;
- /* vm_ops.open won't be called for mmap itself. */
- refcount_inc(&immap->owner->users);
+err_refcount:
+ refcount_dec(&immap->owner->users);
return rc;
}
@@ -651,6 +684,12 @@ void iommufd_ctx_put(struct iommufd_ctx *ictx)
}
EXPORT_SYMBOL_NS_GPL(iommufd_ctx_put, "IOMMUFD");
+#define IOMMUFD_FILE_OFFSET(_struct, _filep, _obj) \
+ .file_offset = (offsetof(_struct, _filep) + \
+ BUILD_BUG_ON_ZERO(!__same_type( \
+ struct file *, ((_struct *)NULL)->_filep)) + \
+ BUILD_BUG_ON_ZERO(offsetof(_struct, _obj)))
+
static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_ACCESS] = {
.destroy = iommufd_access_destroy_object,
@@ -661,6 +700,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
},
[IOMMUFD_OBJ_FAULT] = {
.destroy = iommufd_fault_destroy,
+ IOMMUFD_FILE_OFFSET(struct iommufd_fault, common.filep, common.obj),
},
[IOMMUFD_OBJ_HW_QUEUE] = {
.destroy = iommufd_hw_queue_destroy,
@@ -683,6 +723,7 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
[IOMMUFD_OBJ_VEVENTQ] = {
.destroy = iommufd_veventq_destroy,
.abort = iommufd_veventq_abort,
+ IOMMUFD_FILE_OFFSET(struct iommufd_veventq, common.filep, common.obj),
},
[IOMMUFD_OBJ_VIOMMU] = {
.destroy = iommufd_viommu_destroy,
@@ -710,6 +751,15 @@ static struct miscdevice vfio_misc_dev = {
.mode = 0666,
};
+/*
+ * Used only by DMABUF, returns a valid struct device to use as a dummy struct
+ * device for attachment.
+ */
+struct device *iommufd_global_device(void)
+{
+ return iommu_misc_dev.this_device;
+}
+
static int __init iommufd_init(void)
{
int ret;
@@ -753,5 +803,6 @@ MODULE_ALIAS("devname:vfio/vfio");
#endif
MODULE_IMPORT_NS("IOMMUFD_INTERNAL");
MODULE_IMPORT_NS("IOMMUFD");
+MODULE_IMPORT_NS("DMA_BUF");
MODULE_DESCRIPTION("I/O Address Space Management for passthrough devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index c3433b845561..dbe51ecb9a20 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -45,6 +45,8 @@
* last_iova + 1 can overflow. An iopt_pages index will always be much less than
* ULONG_MAX so last_index + 1 cannot overflow.
*/
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
@@ -53,6 +55,7 @@
#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/sched/mm.h>
+#include <linux/vfio_pci_core.h>
#include "double_span.h"
#include "io_pagetable.h"
@@ -258,6 +261,11 @@ static struct iopt_area *iopt_pages_find_domain_area(struct iopt_pages *pages,
return container_of(node, struct iopt_area, pages_node);
}
+enum batch_kind {
+ BATCH_CPU_MEMORY = 0,
+ BATCH_MMIO,
+};
+
/*
* A simple datastructure to hold a vector of PFNs, optimized for contiguous
* PFNs. This is used as a temporary holding memory for shuttling pfns from one
@@ -271,7 +279,9 @@ struct pfn_batch {
unsigned int array_size;
unsigned int end;
unsigned int total_pfns;
+ enum batch_kind kind;
};
+enum { MAX_NPFNS = type_max(typeof(((struct pfn_batch *)0)->npfns[0])) };
static void batch_clear(struct pfn_batch *batch)
{
@@ -348,11 +358,17 @@ static void batch_destroy(struct pfn_batch *batch, void *backup)
}
static bool batch_add_pfn_num(struct pfn_batch *batch, unsigned long pfn,
- u32 nr)
+ u32 nr, enum batch_kind kind)
{
- const unsigned int MAX_NPFNS = type_max(typeof(*batch->npfns));
unsigned int end = batch->end;
+ if (batch->kind != kind) {
+ /* One kind per batch */
+ if (batch->end != 0)
+ return false;
+ batch->kind = kind;
+ }
+
if (end && pfn == batch->pfns[end - 1] + batch->npfns[end - 1] &&
nr <= MAX_NPFNS - batch->npfns[end - 1]) {
batch->npfns[end - 1] += nr;
@@ -379,7 +395,7 @@ static void batch_remove_pfn_num(struct pfn_batch *batch, unsigned long nr)
/* true if the pfn was added, false otherwise */
static bool batch_add_pfn(struct pfn_batch *batch, unsigned long pfn)
{
- return batch_add_pfn_num(batch, pfn, 1);
+ return batch_add_pfn_num(batch, pfn, 1, BATCH_CPU_MEMORY);
}
/*
@@ -492,6 +508,7 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
{
bool disable_large_pages = area->iopt->disable_large_pages;
unsigned long last_iova = iopt_area_last_iova(area);
+ int iommu_prot = area->iommu_prot;
unsigned int page_offset = 0;
unsigned long start_iova;
unsigned long next_iova;
@@ -499,6 +516,11 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
unsigned long iova;
int rc;
+ if (batch->kind == BATCH_MMIO) {
+ iommu_prot &= ~IOMMU_CACHE;
+ iommu_prot |= IOMMU_MMIO;
+ }
+
/* The first index might be a partial page */
if (start_index == iopt_area_index(area))
page_offset = area->page_offset;
@@ -512,11 +534,11 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
rc = batch_iommu_map_small(
domain, iova,
PFN_PHYS(batch->pfns[cur]) + page_offset,
- next_iova - iova, area->iommu_prot);
+ next_iova - iova, iommu_prot);
else
rc = iommu_map(domain, iova,
PFN_PHYS(batch->pfns[cur]) + page_offset,
- next_iova - iova, area->iommu_prot,
+ next_iova - iova, iommu_prot,
GFP_KERNEL_ACCOUNT);
if (rc)
goto err_unmap;
@@ -652,7 +674,7 @@ static int batch_from_folios(struct pfn_batch *batch, struct folio ***folios_p,
nr = min(nr, npages);
npages -= nr;
- if (!batch_add_pfn_num(batch, pfn, nr))
+ if (!batch_add_pfn_num(batch, pfn, nr, BATCH_CPU_MEMORY))
break;
if (nr > 1) {
rc = folio_add_pins(folio, nr - 1);
@@ -1054,6 +1076,41 @@ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
return iopt_pages_update_pinned(pages, npages, inc, user);
}
+struct pfn_reader_dmabuf {
+ struct dma_buf_phys_vec phys;
+ unsigned long start_offset;
+};
+
+static int pfn_reader_dmabuf_init(struct pfn_reader_dmabuf *dmabuf,
+ struct iopt_pages *pages)
+{
+ /* Callers must not get here if the dmabuf was already revoked */
+ if (WARN_ON(iopt_dmabuf_revoked(pages)))
+ return -EINVAL;
+
+ dmabuf->phys = pages->dmabuf.phys;
+ dmabuf->start_offset = pages->dmabuf.start;
+ return 0;
+}
+
+static int pfn_reader_fill_dmabuf(struct pfn_reader_dmabuf *dmabuf,
+ struct pfn_batch *batch,
+ unsigned long start_index,
+ unsigned long last_index)
+{
+ unsigned long start = dmabuf->start_offset + start_index * PAGE_SIZE;
+
+ /*
+ * start/last_index and start are all PAGE_SIZE aligned, the batch is
+ * always filled using page size aligned PFNs just like the other types.
+ * If the dmabuf has been sliced on a sub page offset then the common
+ * batch to domain code will adjust it before mapping to the domain.
+ */
+ batch_add_pfn_num(batch, PHYS_PFN(dmabuf->phys.paddr + start),
+ last_index - start_index + 1, BATCH_MMIO);
+ return 0;
+}
+
/*
* PFNs are stored in three places, in order of preference:
* - The iopt_pages xarray. This is only populated if there is a
@@ -1072,7 +1129,10 @@ struct pfn_reader {
unsigned long batch_end_index;
unsigned long last_index;
- struct pfn_reader_user user;
+ union {
+ struct pfn_reader_user user;
+ struct pfn_reader_dmabuf dmabuf;
+ };
};
static int pfn_reader_update_pinned(struct pfn_reader *pfns)
@@ -1108,7 +1168,7 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns)
{
struct interval_tree_double_span_iter *span = &pfns->span;
unsigned long start_index = pfns->batch_end_index;
- struct pfn_reader_user *user = &pfns->user;
+ struct pfn_reader_user *user;
unsigned long npages;
struct iopt_area *area;
int rc;
@@ -1140,8 +1200,13 @@ static int pfn_reader_fill_span(struct pfn_reader *pfns)
return 0;
}
- if (start_index >= pfns->user.upages_end) {
- rc = pfn_reader_user_pin(&pfns->user, pfns->pages, start_index,
+ if (iopt_is_dmabuf(pfns->pages))
+ return pfn_reader_fill_dmabuf(&pfns->dmabuf, &pfns->batch,
+ start_index, span->last_hole);
+
+ user = &pfns->user;
+ if (start_index >= user->upages_end) {
+ rc = pfn_reader_user_pin(user, pfns->pages, start_index,
span->last_hole);
if (rc)
return rc;
@@ -1209,7 +1274,10 @@ static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages,
pfns->batch_start_index = start_index;
pfns->batch_end_index = start_index;
pfns->last_index = last_index;
- pfn_reader_user_init(&pfns->user, pages);
+ if (iopt_is_dmabuf(pages))
+ pfn_reader_dmabuf_init(&pfns->dmabuf, pages);
+ else
+ pfn_reader_user_init(&pfns->user, pages);
rc = batch_init(&pfns->batch, last_index - start_index + 1);
if (rc)
return rc;
@@ -1230,8 +1298,12 @@ static int pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages,
static void pfn_reader_release_pins(struct pfn_reader *pfns)
{
struct iopt_pages *pages = pfns->pages;
- struct pfn_reader_user *user = &pfns->user;
+ struct pfn_reader_user *user;
+
+ if (iopt_is_dmabuf(pages))
+ return;
+ user = &pfns->user;
if (user->upages_end > pfns->batch_end_index) {
/* Any pages not transferred to the batch are just unpinned */
@@ -1261,7 +1333,8 @@ static void pfn_reader_destroy(struct pfn_reader *pfns)
struct iopt_pages *pages = pfns->pages;
pfn_reader_release_pins(pfns);
- pfn_reader_user_destroy(&pfns->user, pfns->pages);
+ if (!iopt_is_dmabuf(pfns->pages))
+ pfn_reader_user_destroy(&pfns->user, pfns->pages);
batch_destroy(&pfns->batch, NULL);
WARN_ON(pages->last_npinned != pages->npinned);
}
@@ -1340,26 +1413,234 @@ struct iopt_pages *iopt_alloc_user_pages(void __user *uptr,
return pages;
}
-struct iopt_pages *iopt_alloc_file_pages(struct file *file, unsigned long start,
+struct iopt_pages *iopt_alloc_file_pages(struct file *file,
+ unsigned long start_byte,
+ unsigned long start,
unsigned long length, bool writable)
{
struct iopt_pages *pages;
- unsigned long start_down = ALIGN_DOWN(start, PAGE_SIZE);
- unsigned long end;
- if (length && check_add_overflow(start, length - 1, &end))
- return ERR_PTR(-EOVERFLOW);
-
- pages = iopt_alloc_pages(start - start_down, length, writable);
+ pages = iopt_alloc_pages(start_byte, length, writable);
if (IS_ERR(pages))
return pages;
pages->file = get_file(file);
- pages->start = start_down;
+ pages->start = start - start_byte;
pages->type = IOPT_ADDRESS_FILE;
return pages;
}
+static void iopt_revoke_notify(struct dma_buf_attachment *attach)
+{
+ struct iopt_pages *pages = attach->importer_priv;
+ struct iopt_pages_dmabuf_track *track;
+
+ guard(mutex)(&pages->mutex);
+ if (iopt_dmabuf_revoked(pages))
+ return;
+
+ list_for_each_entry(track, &pages->dmabuf.tracker, elm) {
+ struct iopt_area *area = track->area;
+
+ iopt_area_unmap_domain_range(area, track->domain,
+ iopt_area_index(area),
+ iopt_area_last_index(area));
+ }
+ pages->dmabuf.phys.len = 0;
+}
+
+static struct dma_buf_attach_ops iopt_dmabuf_attach_revoke_ops = {
+ .allow_peer2peer = true,
+ .move_notify = iopt_revoke_notify,
+};
+
+/*
+ * iommufd and vfio have a circular dependency. Future work for a phys
+ * based private interconnect will remove this.
+ */
+static int
+sym_vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
+ struct dma_buf_phys_vec *phys)
+{
+ typeof(&vfio_pci_dma_buf_iommufd_map) fn;
+ int rc;
+
+ rc = iommufd_test_dma_buf_iommufd_map(attachment, phys);
+ if (rc != -EOPNOTSUPP)
+ return rc;
+
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_DMABUF))
+ return -EOPNOTSUPP;
+
+ fn = symbol_get(vfio_pci_dma_buf_iommufd_map);
+ if (!fn)
+ return -EOPNOTSUPP;
+ rc = fn(attachment, phys);
+ symbol_put(vfio_pci_dma_buf_iommufd_map);
+ return rc;
+}
+
+static int iopt_map_dmabuf(struct iommufd_ctx *ictx, struct iopt_pages *pages,
+ struct dma_buf *dmabuf)
+{
+ struct dma_buf_attachment *attach;
+ int rc;
+
+ attach = dma_buf_dynamic_attach(dmabuf, iommufd_global_device(),
+ &iopt_dmabuf_attach_revoke_ops, pages);
+ if (IS_ERR(attach))
+ return PTR_ERR(attach);
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ /*
+ * Lock ordering requires the mutex to be taken inside the reservation,
+ * make sure lockdep sees this.
+ */
+ if (IS_ENABLED(CONFIG_LOCKDEP)) {
+ mutex_lock(&pages->mutex);
+ mutex_unlock(&pages->mutex);
+ }
+
+ rc = sym_vfio_pci_dma_buf_iommufd_map(attach, &pages->dmabuf.phys);
+ if (rc)
+ goto err_detach;
+
+ dma_resv_unlock(dmabuf->resv);
+
+ /* On success iopt_release_pages() will detach and put the dmabuf. */
+ pages->dmabuf.attach = attach;
+ return 0;
+
+err_detach:
+ dma_resv_unlock(dmabuf->resv);
+ dma_buf_detach(dmabuf, attach);
+ return rc;
+}
+
+struct iopt_pages *iopt_alloc_dmabuf_pages(struct iommufd_ctx *ictx,
+ struct dma_buf *dmabuf,
+ unsigned long start_byte,
+ unsigned long start,
+ unsigned long length, bool writable)
+{
+ static struct lock_class_key pages_dmabuf_mutex_key;
+ struct iopt_pages *pages;
+ int rc;
+
+ if (!IS_ENABLED(CONFIG_DMA_SHARED_BUFFER))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (dmabuf->size <= (start + length - 1) ||
+ length / PAGE_SIZE >= MAX_NPFNS)
+ return ERR_PTR(-EINVAL);
+
+ pages = iopt_alloc_pages(start_byte, length, writable);
+ if (IS_ERR(pages))
+ return pages;
+
+ /*
+ * The mmap_lock can be held when obtaining the dmabuf reservation lock
+ * which creates a locking cycle with the pages mutex which is held
+ * while obtaining the mmap_lock. This locking path is not present for
+ * IOPT_ADDRESS_DMABUF so split the lock class.
+ */
+ lockdep_set_class(&pages->mutex, &pages_dmabuf_mutex_key);
+
+ /* dmabuf does not use pinned page accounting. */
+ pages->account_mode = IOPT_PAGES_ACCOUNT_NONE;
+ pages->type = IOPT_ADDRESS_DMABUF;
+ pages->dmabuf.start = start - start_byte;
+ INIT_LIST_HEAD(&pages->dmabuf.tracker);
+
+ rc = iopt_map_dmabuf(ictx, pages, dmabuf);
+ if (rc) {
+ iopt_put_pages(pages);
+ return ERR_PTR(rc);
+ }
+
+ return pages;
+}
+
+int iopt_dmabuf_track_domain(struct iopt_pages *pages, struct iopt_area *area,
+ struct iommu_domain *domain)
+{
+ struct iopt_pages_dmabuf_track *track;
+
+ lockdep_assert_held(&pages->mutex);
+ if (WARN_ON(!iopt_is_dmabuf(pages)))
+ return -EINVAL;
+
+ list_for_each_entry(track, &pages->dmabuf.tracker, elm)
+ if (WARN_ON(track->domain == domain && track->area == area))
+ return -EINVAL;
+
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (!track)
+ return -ENOMEM;
+ track->domain = domain;
+ track->area = area;
+ list_add_tail(&track->elm, &pages->dmabuf.tracker);
+
+ return 0;
+}
+
+void iopt_dmabuf_untrack_domain(struct iopt_pages *pages,
+ struct iopt_area *area,
+ struct iommu_domain *domain)
+{
+ struct iopt_pages_dmabuf_track *track;
+
+ lockdep_assert_held(&pages->mutex);
+ WARN_ON(!iopt_is_dmabuf(pages));
+
+ list_for_each_entry(track, &pages->dmabuf.tracker, elm) {
+ if (track->domain == domain && track->area == area) {
+ list_del(&track->elm);
+ kfree(track);
+ return;
+ }
+ }
+ WARN_ON(true);
+}
+
+int iopt_dmabuf_track_all_domains(struct iopt_area *area,
+ struct iopt_pages *pages)
+{
+ struct iopt_pages_dmabuf_track *track;
+ struct iommu_domain *domain;
+ unsigned long index;
+ int rc;
+
+ list_for_each_entry(track, &pages->dmabuf.tracker, elm)
+ if (WARN_ON(track->area == area))
+ return -EINVAL;
+
+ xa_for_each(&area->iopt->domains, index, domain) {
+ rc = iopt_dmabuf_track_domain(pages, area, domain);
+ if (rc)
+ goto err_untrack;
+ }
+ return 0;
+err_untrack:
+ iopt_dmabuf_untrack_all_domains(area, pages);
+ return rc;
+}
+
+void iopt_dmabuf_untrack_all_domains(struct iopt_area *area,
+ struct iopt_pages *pages)
+{
+ struct iopt_pages_dmabuf_track *track;
+ struct iopt_pages_dmabuf_track *tmp;
+
+ list_for_each_entry_safe(track, tmp, &pages->dmabuf.tracker,
+ elm) {
+ if (track->area == area) {
+ list_del(&track->elm);
+ kfree(track);
+ }
+ }
+}
+
void iopt_release_pages(struct kref *kref)
{
struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref);
@@ -1372,8 +1653,15 @@ void iopt_release_pages(struct kref *kref)
mutex_destroy(&pages->mutex);
put_task_struct(pages->source_task);
free_uid(pages->source_user);
- if (pages->type == IOPT_ADDRESS_FILE)
+ if (iopt_is_dmabuf(pages) && pages->dmabuf.attach) {
+ struct dma_buf *dmabuf = pages->dmabuf.attach->dmabuf;
+
+ dma_buf_detach(dmabuf, pages->dmabuf.attach);
+ dma_buf_put(dmabuf);
+ WARN_ON(!list_empty(&pages->dmabuf.tracker));
+ } else if (pages->type == IOPT_ADDRESS_FILE) {
fput(pages->file);
+ }
kfree(pages);
}
@@ -1451,6 +1739,14 @@ static void __iopt_area_unfill_domain(struct iopt_area *area,
lockdep_assert_held(&pages->mutex);
+ if (iopt_is_dmabuf(pages)) {
+ if (WARN_ON(iopt_dmabuf_revoked(pages)))
+ return;
+ iopt_area_unmap_domain_range(area, domain, start_index,
+ last_index);
+ return;
+ }
+
/*
* For security we must not unpin something that is still DMA mapped,
* so this must unmap any IOVA before we go ahead and unpin the pages.
@@ -1526,6 +1822,9 @@ void iopt_area_unmap_domain(struct iopt_area *area, struct iommu_domain *domain)
void iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages,
struct iommu_domain *domain)
{
+ if (iopt_dmabuf_revoked(pages))
+ return;
+
__iopt_area_unfill_domain(area, pages, domain,
iopt_area_last_index(area));
}
@@ -1546,6 +1845,9 @@ int iopt_area_fill_domain(struct iopt_area *area, struct iommu_domain *domain)
lockdep_assert_held(&area->pages->mutex);
+ if (iopt_dmabuf_revoked(area->pages))
+ return 0;
+
rc = pfn_reader_first(&pfns, area->pages, iopt_area_index(area),
iopt_area_last_index(area));
if (rc)
@@ -1605,33 +1907,44 @@ int iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages)
return 0;
mutex_lock(&pages->mutex);
- rc = pfn_reader_first(&pfns, pages, iopt_area_index(area),
- iopt_area_last_index(area));
- if (rc)
- goto out_unlock;
+ if (iopt_is_dmabuf(pages)) {
+ rc = iopt_dmabuf_track_all_domains(area, pages);
+ if (rc)
+ goto out_unlock;
+ }
- while (!pfn_reader_done(&pfns)) {
- done_first_end_index = pfns.batch_end_index;
- done_all_end_index = pfns.batch_start_index;
- xa_for_each(&area->iopt->domains, index, domain) {
- rc = batch_to_domain(&pfns.batch, domain, area,
- pfns.batch_start_index);
+ if (!iopt_dmabuf_revoked(pages)) {
+ rc = pfn_reader_first(&pfns, pages, iopt_area_index(area),
+ iopt_area_last_index(area));
+ if (rc)
+ goto out_untrack;
+
+ while (!pfn_reader_done(&pfns)) {
+ done_first_end_index = pfns.batch_end_index;
+ done_all_end_index = pfns.batch_start_index;
+ xa_for_each(&area->iopt->domains, index, domain) {
+ rc = batch_to_domain(&pfns.batch, domain, area,
+ pfns.batch_start_index);
+ if (rc)
+ goto out_unmap;
+ }
+ done_all_end_index = done_first_end_index;
+
+ rc = pfn_reader_next(&pfns);
if (rc)
goto out_unmap;
}
- done_all_end_index = done_first_end_index;
-
- rc = pfn_reader_next(&pfns);
+ rc = pfn_reader_update_pinned(&pfns);
if (rc)
goto out_unmap;
+
+ pfn_reader_destroy(&pfns);
}
- rc = pfn_reader_update_pinned(&pfns);
- if (rc)
- goto out_unmap;
area->storage_domain = xa_load(&area->iopt->domains, 0);
interval_tree_insert(&area->pages_node, &pages->domains_itree);
- goto out_destroy;
+ mutex_unlock(&pages->mutex);
+ return 0;
out_unmap:
pfn_reader_release_pins(&pfns);
@@ -1658,8 +1971,10 @@ out_unmap:
end_index);
}
}
-out_destroy:
pfn_reader_destroy(&pfns);
+out_untrack:
+ if (iopt_is_dmabuf(pages))
+ iopt_dmabuf_untrack_all_domains(area, pages);
out_unlock:
mutex_unlock(&pages->mutex);
return rc;
@@ -1685,16 +2000,22 @@ void iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages)
if (!area->storage_domain)
goto out_unlock;
- xa_for_each(&iopt->domains, index, domain)
- if (domain != area->storage_domain)
+ xa_for_each(&iopt->domains, index, domain) {
+ if (domain == area->storage_domain)
+ continue;
+
+ if (!iopt_dmabuf_revoked(pages))
iopt_area_unmap_domain_range(
area, domain, iopt_area_index(area),
iopt_area_last_index(area));
+ }
if (IS_ENABLED(CONFIG_IOMMUFD_TEST))
WARN_ON(RB_EMPTY_NODE(&area->pages_node.rb));
interval_tree_remove(&area->pages_node, &pages->domains_itree);
iopt_area_unfill_domain(area, pages, area->storage_domain);
+ if (iopt_is_dmabuf(pages))
+ iopt_dmabuf_untrack_all_domains(area, pages);
area->storage_domain = NULL;
out_unlock:
mutex_unlock(&pages->mutex);
@@ -2031,15 +2352,14 @@ int iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte,
if ((flags & IOMMUFD_ACCESS_RW_WRITE) && !pages->writable)
return -EPERM;
- if (pages->type == IOPT_ADDRESS_FILE)
+ if (iopt_is_dmabuf(pages))
+ return -EINVAL;
+
+ if (pages->type != IOPT_ADDRESS_USER)
return iopt_pages_rw_slow(pages, start_index, last_index,
start_byte % PAGE_SIZE, data, length,
flags);
- if (IS_ENABLED(CONFIG_IOMMUFD_TEST) &&
- WARN_ON(pages->type != IOPT_ADDRESS_USER))
- return -EINVAL;
-
if (!(flags & IOMMUFD_ACCESS_RW_KTHREAD) && change_mm) {
if (start_index == last_index)
return iopt_pages_rw_page(pages, start_index,
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index 61686603c769..c4322fd26f93 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -5,6 +5,8 @@
*/
#include <linux/anon_inodes.h>
#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <linux/fault-inject.h>
#include <linux/file.h>
#include <linux/iommu.h>
@@ -12,6 +14,8 @@
#include <linux/slab.h>
#include <linux/xarray.h>
#include <uapi/linux/iommufd.h>
+#include <linux/generic_pt/iommu.h>
+#include "../iommu-pages.h"
#include "../iommu-priv.h"
#include "io_pagetable.h"
@@ -41,21 +45,6 @@ static DEFINE_IDA(mock_dev_ida);
enum {
MOCK_DIRTY_TRACK = 1,
- MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
- MOCK_HUGE_PAGE_SIZE = 512 * MOCK_IO_PAGE_SIZE,
-
- /*
- * Like a real page table alignment requires the low bits of the address
- * to be zero. xarray also requires the high bit to be zero, so we store
- * the pfns shifted. The upper bits are used for metadata.
- */
- MOCK_PFN_MASK = ULONG_MAX / MOCK_IO_PAGE_SIZE,
-
- _MOCK_PFN_START = MOCK_PFN_MASK + 1,
- MOCK_PFN_START_IOVA = _MOCK_PFN_START,
- MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
- MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
- MOCK_PFN_HUGE_IOVA = _MOCK_PFN_START << 2,
};
static int mock_dev_enable_iopf(struct device *dev, struct iommu_domain *domain);
@@ -124,10 +113,15 @@ void iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd,
}
struct mock_iommu_domain {
+ union {
+ struct iommu_domain domain;
+ struct pt_iommu iommu;
+ struct pt_iommu_amdv1 amdv1;
+ };
unsigned long flags;
- struct iommu_domain domain;
- struct xarray pfns;
};
+PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, iommu, domain);
+PT_IOMMU_CHECK_DOMAIN(struct mock_iommu_domain, amdv1.iommu, domain);
static inline struct mock_iommu_domain *
to_mock_domain(struct iommu_domain *domain)
@@ -216,7 +210,7 @@ static inline struct selftest_obj *to_selftest_obj(struct iommufd_object *obj)
}
static int mock_domain_nop_attach(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct mock_dev *mdev = to_mock_dev(dev);
struct mock_viommu *new_viommu = NULL;
@@ -344,74 +338,6 @@ static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
return 0;
}
-static bool mock_test_and_clear_dirty(struct mock_iommu_domain *mock,
- unsigned long iova, size_t page_size,
- unsigned long flags)
-{
- unsigned long cur, end = iova + page_size - 1;
- bool dirty = false;
- void *ent, *old;
-
- for (cur = iova; cur < end; cur += MOCK_IO_PAGE_SIZE) {
- ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
- if (!ent || !(xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA))
- continue;
-
- dirty = true;
- /* Clear dirty */
- if (!(flags & IOMMU_DIRTY_NO_CLEAR)) {
- unsigned long val;
-
- val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- }
- }
-
- return dirty;
-}
-
-static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
- unsigned long iova, size_t size,
- unsigned long flags,
- struct iommu_dirty_bitmap *dirty)
-{
- struct mock_iommu_domain *mock = to_mock_domain(domain);
- unsigned long end = iova + size;
- void *ent;
-
- if (!(mock->flags & MOCK_DIRTY_TRACK) && dirty->bitmap)
- return -EINVAL;
-
- do {
- unsigned long pgsize = MOCK_IO_PAGE_SIZE;
- unsigned long head;
-
- ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
- if (!ent) {
- iova += pgsize;
- continue;
- }
-
- if (xa_to_value(ent) & MOCK_PFN_HUGE_IOVA)
- pgsize = MOCK_HUGE_PAGE_SIZE;
- head = iova & ~(pgsize - 1);
-
- /* Clear dirty */
- if (mock_test_and_clear_dirty(mock, head, pgsize, flags))
- iommu_dirty_bitmap_record(dirty, iova, pgsize);
- iova += pgsize;
- } while (iova < end);
-
- return 0;
-}
-
-static const struct iommu_dirty_ops dirty_ops = {
- .set_dirty_tracking = mock_domain_set_dirty_tracking,
- .read_and_clear_dirty = mock_domain_read_and_clear_dirty,
-};
-
static struct mock_iommu_domain_nested *
__mock_domain_alloc_nested(const struct iommu_user_data *user_data)
{
@@ -446,7 +372,7 @@ mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
if (flags & ~IOMMU_HWPT_ALLOC_PASID)
return ERR_PTR(-EOPNOTSUPP);
- if (!parent || parent->ops != mock_ops.default_domain_ops)
+ if (!parent || !(parent->type & __IOMMU_DOMAIN_PAGING))
return ERR_PTR(-EINVAL);
mock_parent = to_mock_domain(parent);
@@ -459,159 +385,170 @@ mock_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
return &mock_nested->domain;
}
-static struct iommu_domain *
-mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
- const struct iommu_user_data *user_data)
-{
- bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
- const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
- IOMMU_HWPT_ALLOC_NEST_PARENT |
- IOMMU_HWPT_ALLOC_PASID;
- struct mock_dev *mdev = to_mock_dev(dev);
- bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
- struct mock_iommu_domain *mock;
-
- if (user_data)
- return ERR_PTR(-EOPNOTSUPP);
- if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
- return ERR_PTR(-EOPNOTSUPP);
-
- mock = kzalloc(sizeof(*mock), GFP_KERNEL);
- if (!mock)
- return ERR_PTR(-ENOMEM);
- mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
- mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
- mock->domain.pgsize_bitmap = MOCK_IO_PAGE_SIZE;
- if (dev && mdev->flags & MOCK_FLAGS_DEVICE_HUGE_IOVA)
- mock->domain.pgsize_bitmap |= MOCK_HUGE_PAGE_SIZE;
- mock->domain.ops = mock_ops.default_domain_ops;
- mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
- xa_init(&mock->pfns);
-
- if (has_dirty_flag)
- mock->domain.dirty_ops = &dirty_ops;
- return &mock->domain;
-}
-
static void mock_domain_free(struct iommu_domain *domain)
{
struct mock_iommu_domain *mock = to_mock_domain(domain);
- WARN_ON(!xa_empty(&mock->pfns));
+ pt_iommu_deinit(&mock->iommu);
kfree(mock);
}
-static int mock_domain_map_pages(struct iommu_domain *domain,
- unsigned long iova, phys_addr_t paddr,
- size_t pgsize, size_t pgcount, int prot,
- gfp_t gfp, size_t *mapped)
+static void mock_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
{
- struct mock_iommu_domain *mock = to_mock_domain(domain);
- unsigned long flags = MOCK_PFN_START_IOVA;
- unsigned long start_iova = iova;
+ iommu_put_pages_list(&gather->freelist);
+}
- /*
- * xarray does not reliably work with fault injection because it does a
- * retry allocation, so put our own failure point.
- */
- if (iommufd_should_fail())
- return -ENOENT;
+static const struct iommu_domain_ops amdv1_mock_ops = {
+ IOMMU_PT_DOMAIN_OPS(amdv1_mock),
+ .free = mock_domain_free,
+ .attach_dev = mock_domain_nop_attach,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop,
+ .iotlb_sync = &mock_iotlb_sync,
+};
- WARN_ON(iova % MOCK_IO_PAGE_SIZE);
- WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
- for (; pgcount; pgcount--) {
- size_t cur;
+static const struct iommu_domain_ops amdv1_mock_huge_ops = {
+ IOMMU_PT_DOMAIN_OPS(amdv1_mock),
+ .free = mock_domain_free,
+ .attach_dev = mock_domain_nop_attach,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop,
+ .iotlb_sync = &mock_iotlb_sync,
+};
+#undef pt_iommu_amdv1_mock_map_pages
- for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
- void *old;
+static const struct iommu_dirty_ops amdv1_mock_dirty_ops = {
+ IOMMU_PT_DIRTY_OPS(amdv1_mock),
+ .set_dirty_tracking = mock_domain_set_dirty_tracking,
+};
- if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
- flags = MOCK_PFN_LAST_IOVA;
- if (pgsize != MOCK_IO_PAGE_SIZE) {
- flags |= MOCK_PFN_HUGE_IOVA;
- }
- old = xa_store(&mock->pfns, iova / MOCK_IO_PAGE_SIZE,
- xa_mk_value((paddr / MOCK_IO_PAGE_SIZE) |
- flags),
- gfp);
- if (xa_is_err(old)) {
- for (; start_iova != iova;
- start_iova += MOCK_IO_PAGE_SIZE)
- xa_erase(&mock->pfns,
- start_iova /
- MOCK_IO_PAGE_SIZE);
- return xa_err(old);
- }
- WARN_ON(old);
- iova += MOCK_IO_PAGE_SIZE;
- paddr += MOCK_IO_PAGE_SIZE;
- *mapped += MOCK_IO_PAGE_SIZE;
- flags = 0;
- }
- }
- return 0;
-}
+static const struct iommu_domain_ops amdv1_ops = {
+ IOMMU_PT_DOMAIN_OPS(amdv1),
+ .free = mock_domain_free,
+ .attach_dev = mock_domain_nop_attach,
+ .set_dev_pasid = mock_domain_set_dev_pasid_nop,
+ .iotlb_sync = &mock_iotlb_sync,
+};
-static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
- unsigned long iova, size_t pgsize,
- size_t pgcount,
- struct iommu_iotlb_gather *iotlb_gather)
+static const struct iommu_dirty_ops amdv1_dirty_ops = {
+ IOMMU_PT_DIRTY_OPS(amdv1),
+ .set_dirty_tracking = mock_domain_set_dirty_tracking,
+};
+
+static struct mock_iommu_domain *
+mock_domain_alloc_pgtable(struct device *dev,
+ const struct iommu_hwpt_selftest *user_cfg, u32 flags)
{
- struct mock_iommu_domain *mock = to_mock_domain(domain);
- bool first = true;
- size_t ret = 0;
- void *ent;
+ struct mock_iommu_domain *mock;
+ int rc;
- WARN_ON(iova % MOCK_IO_PAGE_SIZE);
- WARN_ON(pgsize % MOCK_IO_PAGE_SIZE);
+ mock = kzalloc(sizeof(*mock), GFP_KERNEL);
+ if (!mock)
+ return ERR_PTR(-ENOMEM);
+ mock->domain.type = IOMMU_DOMAIN_UNMANAGED;
- for (; pgcount; pgcount--) {
- size_t cur;
+ mock->amdv1.iommu.nid = NUMA_NO_NODE;
+
+ switch (user_cfg->pagetable_type) {
+ case MOCK_IOMMUPT_DEFAULT:
+ case MOCK_IOMMUPT_HUGE: {
+ struct pt_iommu_amdv1_cfg cfg = {};
+
+ /* The mock version has a 2k page size */
+ cfg.common.hw_max_vasz_lg2 = 56;
+ cfg.common.hw_max_oasz_lg2 = 51;
+ cfg.starting_level = 2;
+ if (user_cfg->pagetable_type == MOCK_IOMMUPT_HUGE)
+ mock->domain.ops = &amdv1_mock_huge_ops;
+ else
+ mock->domain.ops = &amdv1_mock_ops;
+ rc = pt_iommu_amdv1_mock_init(&mock->amdv1, &cfg, GFP_KERNEL);
+ if (rc)
+ goto err_free;
+
+ /*
+ * In huge mode userspace should only provide huge pages, we
+ * have to include PAGE_SIZE for the domain to be accepted by
+ * iommufd.
+ */
+ if (user_cfg->pagetable_type == MOCK_IOMMUPT_HUGE)
+ mock->domain.pgsize_bitmap = MOCK_HUGE_PAGE_SIZE |
+ PAGE_SIZE;
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+ mock->domain.dirty_ops = &amdv1_mock_dirty_ops;
+ break;
+ }
- for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
- ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+ case MOCK_IOMMUPT_AMDV1: {
+ struct pt_iommu_amdv1_cfg cfg = {};
+
+ cfg.common.hw_max_vasz_lg2 = 64;
+ cfg.common.hw_max_oasz_lg2 = 52;
+ cfg.common.features = BIT(PT_FEAT_DYNAMIC_TOP) |
+ BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
+ BIT(PT_FEAT_AMDV1_FORCE_COHERENCE);
+ cfg.starting_level = 2;
+ mock->domain.ops = &amdv1_ops;
+ rc = pt_iommu_amdv1_init(&mock->amdv1, &cfg, GFP_KERNEL);
+ if (rc)
+ goto err_free;
+ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
+ mock->domain.dirty_ops = &amdv1_dirty_ops;
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ goto err_free;
+ }
- /*
- * iommufd generates unmaps that must be a strict
- * superset of the map's performend So every
- * starting/ending IOVA should have been an iova passed
- * to map.
- *
- * This simple logic doesn't work when the HUGE_PAGE is
- * turned on since the core code will automatically
- * switch between the two page sizes creating a break in
- * the unmap calls. The break can land in the middle of
- * contiguous IOVA.
- */
- if (!(domain->pgsize_bitmap & MOCK_HUGE_PAGE_SIZE)) {
- if (first) {
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_START_IOVA));
- first = false;
- }
- if (pgcount == 1 &&
- cur + MOCK_IO_PAGE_SIZE == pgsize)
- WARN_ON(ent && !(xa_to_value(ent) &
- MOCK_PFN_LAST_IOVA));
- }
+ /*
+ * Override the real aperture to the MOCK aperture for test purposes.
+ */
+ if (user_cfg->pagetable_type == MOCK_IOMMUPT_DEFAULT) {
+ WARN_ON(mock->domain.geometry.aperture_start != 0);
+ WARN_ON(mock->domain.geometry.aperture_end < MOCK_APERTURE_LAST);
- iova += MOCK_IO_PAGE_SIZE;
- ret += MOCK_IO_PAGE_SIZE;
- }
+ mock->domain.geometry.aperture_start = MOCK_APERTURE_START;
+ mock->domain.geometry.aperture_end = MOCK_APERTURE_LAST;
}
- return ret;
+
+ return mock;
+err_free:
+ kfree(mock);
+ return ERR_PTR(rc);
}
-static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
+static struct iommu_domain *
+mock_domain_alloc_paging_flags(struct device *dev, u32 flags,
+ const struct iommu_user_data *user_data)
{
- struct mock_iommu_domain *mock = to_mock_domain(domain);
- void *ent;
+ bool has_dirty_flag = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
+ const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
+ IOMMU_HWPT_ALLOC_NEST_PARENT |
+ IOMMU_HWPT_ALLOC_PASID;
+ struct mock_dev *mdev = to_mock_dev(dev);
+ bool no_dirty_ops = mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY;
+ struct iommu_hwpt_selftest user_cfg = {};
+ struct mock_iommu_domain *mock;
+ int rc;
- WARN_ON(iova % MOCK_IO_PAGE_SIZE);
- ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
- WARN_ON(!ent);
- return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
+ if ((flags & ~PAGING_FLAGS) || (has_dirty_flag && no_dirty_ops))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (user_data && (user_data->type != IOMMU_HWPT_DATA_SELFTEST &&
+ user_data->type != IOMMU_HWPT_DATA_NONE))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (user_data) {
+ rc = iommu_copy_struct_from_user(
+ &user_cfg, user_data, IOMMU_HWPT_DATA_SELFTEST, iotlb);
+ if (rc)
+ return ERR_PTR(rc);
+ }
+
+ mock = mock_domain_alloc_pgtable(dev, &user_cfg, flags);
+ if (IS_ERR(mock))
+ return ERR_CAST(mock);
+ return &mock->domain;
}
static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
@@ -955,15 +892,6 @@ static const struct iommu_ops mock_ops = {
.user_pasid_table = true,
.get_viommu_size = mock_get_viommu_size,
.viommu_init = mock_viommu_init,
- .default_domain_ops =
- &(struct iommu_domain_ops){
- .free = mock_domain_free,
- .attach_dev = mock_domain_nop_attach,
- .map_pages = mock_domain_map_pages,
- .unmap_pages = mock_domain_unmap_pages,
- .iova_to_phys = mock_domain_iova_to_phys,
- .set_dev_pasid = mock_domain_set_dev_pasid_nop,
- },
};
static void mock_domain_free_nested(struct iommu_domain *domain)
@@ -1047,7 +975,7 @@ get_md_pagetable(struct iommufd_ucmd *ucmd, u32 mockpt_id,
if (IS_ERR(hwpt))
return hwpt;
if (hwpt->domain->type != IOMMU_DOMAIN_UNMANAGED ||
- hwpt->domain->ops != mock_ops.default_domain_ops) {
+ hwpt->domain->owner != &mock_ops) {
iommufd_put_object(ucmd->ictx, &hwpt->obj);
return ERR_PTR(-EINVAL);
}
@@ -1088,7 +1016,6 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
{},
};
const u32 valid_flags = MOCK_FLAGS_DEVICE_NO_DIRTY |
- MOCK_FLAGS_DEVICE_HUGE_IOVA |
MOCK_FLAGS_DEVICE_PASID;
struct mock_dev *mdev;
int rc, i;
@@ -1126,7 +1053,7 @@ static struct mock_dev *mock_dev_create(unsigned long dev_flags)
goto err_put;
}
- rc = device_add(&mdev->dev);
+ rc = iommu_mock_device_add(&mdev->dev, &mock_iommu.iommu_dev);
if (rc)
goto err_put;
return mdev;
@@ -1277,23 +1204,25 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
{
struct iommufd_hw_pagetable *hwpt;
struct mock_iommu_domain *mock;
+ unsigned int page_size;
uintptr_t end;
int rc;
- if (iova % MOCK_IO_PAGE_SIZE || length % MOCK_IO_PAGE_SIZE ||
- (uintptr_t)uptr % MOCK_IO_PAGE_SIZE ||
- check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
- return -EINVAL;
-
hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
if (IS_ERR(hwpt))
return PTR_ERR(hwpt);
- for (; length; length -= MOCK_IO_PAGE_SIZE) {
+ page_size = 1 << __ffs(mock->domain.pgsize_bitmap);
+ if (iova % page_size || length % page_size ||
+ (uintptr_t)uptr % page_size ||
+ check_add_overflow((uintptr_t)uptr, (uintptr_t)length, &end))
+ return -EINVAL;
+
+ for (; length; length -= page_size) {
struct page *pages[1];
+ phys_addr_t io_phys;
unsigned long pfn;
long npages;
- void *ent;
npages = get_user_pages_fast((uintptr_t)uptr & PAGE_MASK, 1, 0,
pages);
@@ -1308,15 +1237,14 @@ static int iommufd_test_md_check_pa(struct iommufd_ucmd *ucmd,
pfn = page_to_pfn(pages[0]);
put_page(pages[0]);
- ent = xa_load(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
- if (!ent ||
- (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE !=
- pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
+ io_phys = mock->domain.ops->iova_to_phys(&mock->domain, iova);
+ if (io_phys !=
+ pfn * PAGE_SIZE + ((uintptr_t)uptr % PAGE_SIZE)) {
rc = -EINVAL;
goto out_put;
}
- iova += MOCK_IO_PAGE_SIZE;
- uptr += MOCK_IO_PAGE_SIZE;
+ iova += page_size;
+ uptr += page_size;
}
rc = 0;
@@ -1795,7 +1723,7 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
if (IS_ERR(hwpt))
return PTR_ERR(hwpt);
- if (!(mock->flags & MOCK_DIRTY_TRACK)) {
+ if (!(mock->flags & MOCK_DIRTY_TRACK) || !mock->iommu.ops->set_dirty) {
rc = -EINVAL;
goto out_put;
}
@@ -1814,22 +1742,10 @@ static int iommufd_test_dirty(struct iommufd_ucmd *ucmd, unsigned int mockpt_id,
}
for (i = 0; i < max; i++) {
- unsigned long cur = iova + i * page_size;
- void *ent, *old;
-
if (!test_bit(i, (unsigned long *)tmp))
continue;
-
- ent = xa_load(&mock->pfns, cur / page_size);
- if (ent) {
- unsigned long val;
-
- val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
- old = xa_store(&mock->pfns, cur / page_size,
- xa_mk_value(val), GFP_KERNEL);
- WARN_ON_ONCE(ent != old);
- count++;
- }
+ mock->iommu.ops->set_dirty(&mock->iommu, iova + i * page_size);
+ count++;
}
cmd->dirty.out_nr_dirty = count;
@@ -2031,6 +1947,140 @@ void iommufd_selftest_destroy(struct iommufd_object *obj)
}
}
+struct iommufd_test_dma_buf {
+ void *memory;
+ size_t length;
+ bool revoked;
+};
+
+static int iommufd_test_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ return 0;
+}
+
+static void iommufd_test_dma_buf_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+}
+
+static struct sg_table *
+iommufd_test_dma_buf_map(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static void iommufd_test_dma_buf_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+}
+
+static void iommufd_test_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct iommufd_test_dma_buf *priv = dmabuf->priv;
+
+ kfree(priv->memory);
+ kfree(priv);
+}
+
+static const struct dma_buf_ops iommufd_test_dmabuf_ops = {
+ .attach = iommufd_test_dma_buf_attach,
+ .detach = iommufd_test_dma_buf_detach,
+ .map_dma_buf = iommufd_test_dma_buf_map,
+ .release = iommufd_test_dma_buf_release,
+ .unmap_dma_buf = iommufd_test_dma_buf_unmap,
+};
+
+int iommufd_test_dma_buf_iommufd_map(struct dma_buf_attachment *attachment,
+ struct dma_buf_phys_vec *phys)
+{
+ struct iommufd_test_dma_buf *priv = attachment->dmabuf->priv;
+
+ dma_resv_assert_held(attachment->dmabuf->resv);
+
+ if (attachment->dmabuf->ops != &iommufd_test_dmabuf_ops)
+ return -EOPNOTSUPP;
+
+ if (priv->revoked)
+ return -ENODEV;
+
+ phys->paddr = virt_to_phys(priv->memory);
+ phys->len = priv->length;
+ return 0;
+}
+
+static int iommufd_test_dmabuf_get(struct iommufd_ucmd *ucmd,
+ unsigned int open_flags,
+ size_t len)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct iommufd_test_dma_buf *priv;
+ struct dma_buf *dmabuf;
+ int rc;
+
+ len = ALIGN(len, PAGE_SIZE);
+ if (len == 0 || len > PAGE_SIZE * 512)
+ return -EINVAL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->length = len;
+ priv->memory = kzalloc(len, GFP_KERNEL);
+ if (!priv->memory) {
+ rc = -ENOMEM;
+ goto err_free;
+ }
+
+ exp_info.ops = &iommufd_test_dmabuf_ops;
+ exp_info.size = len;
+ exp_info.flags = open_flags;
+ exp_info.priv = priv;
+
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ rc = PTR_ERR(dmabuf);
+ goto err_free;
+ }
+
+ return dma_buf_fd(dmabuf, open_flags);
+
+err_free:
+ kfree(priv->memory);
+ kfree(priv);
+ return rc;
+}
+
+static int iommufd_test_dmabuf_revoke(struct iommufd_ucmd *ucmd, int fd,
+ bool revoked)
+{
+ struct iommufd_test_dma_buf *priv;
+ struct dma_buf *dmabuf;
+ int rc = 0;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ if (dmabuf->ops != &iommufd_test_dmabuf_ops) {
+ rc = -EOPNOTSUPP;
+ goto err_put;
+ }
+
+ priv = dmabuf->priv;
+ dma_resv_lock(dmabuf->resv, NULL);
+ priv->revoked = revoked;
+ dma_buf_move_notify(dmabuf);
+ dma_resv_unlock(dmabuf->resv);
+
+err_put:
+ dma_buf_put(dmabuf);
+ return rc;
+}
+
int iommufd_test(struct iommufd_ucmd *ucmd)
{
struct iommu_test_cmd *cmd = ucmd->cmd;
@@ -2109,6 +2159,13 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
return iommufd_test_pasid_detach(ucmd, cmd);
case IOMMU_TEST_OP_PASID_CHECK_HWPT:
return iommufd_test_pasid_check_hwpt(ucmd, cmd);
+ case IOMMU_TEST_OP_DMABUF_GET:
+ return iommufd_test_dmabuf_get(ucmd, cmd->dmabuf_get.open_flags,
+ cmd->dmabuf_get.length);
+ case IOMMU_TEST_OP_DMABUF_REVOKE:
+ return iommufd_test_dmabuf_revoke(ucmd,
+ cmd->dmabuf_revoke.dmabuf_fd,
+ cmd->dmabuf_revoke.revoked);
default:
return -EOPNOTSUPP;
}
@@ -2202,3 +2259,5 @@ void iommufd_test_exit(void)
platform_device_unregister(selftest_iommu_dev);
debugfs_remove_recursive(dbgfs_root);
}
+
+MODULE_IMPORT_NS("GENERIC_PT_IOMMU");
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ffa892f65714..ca848288dbf2 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -590,7 +590,7 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain)
}
static int ipmmu_attach_device(struct iommu_domain *io_domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
@@ -637,17 +637,17 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
}
static int ipmmu_iommu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct iommu_domain *io_domain = iommu_get_domain_for_dev(dev);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct ipmmu_vmsa_domain *domain;
unsigned int i;
- if (io_domain == identity_domain || !io_domain)
+ if (old == identity_domain || !old)
return 0;
- domain = to_vmsa_domain(io_domain);
+ domain = to_vmsa_domain(old);
for (i = 0; i < fwspec->num_ids; ++i)
ipmmu_utlb_disable(domain, fwspec->ids[i]);
@@ -720,6 +720,8 @@ static int ipmmu_init_platform_device(struct device *dev,
dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
+ put_device(&ipmmu_pdev->dev);
+
return 0;
}
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 43a61ba021a5..819add75a665 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -391,7 +391,8 @@ static struct iommu_device *msm_iommu_probe_device(struct device *dev)
return &iommu->iommu;
}
-static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev,
+ struct iommu_domain *old)
{
int ret = 0;
unsigned long flags;
@@ -441,19 +442,19 @@ fail:
}
static int msm_iommu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct msm_priv *priv;
unsigned long flags;
struct msm_iommu_dev *iommu;
struct msm_iommu_ctx_dev *master;
int ret = 0;
- if (domain == identity_domain || !domain)
+ if (old == identity_domain || !old)
return 0;
- priv = to_msm_priv(domain);
+ priv = to_msm_priv(old);
free_io_pgtable_ops(priv->iop);
spin_lock_irqsave(&msm_iommu_lock, flags);
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 0e0285348d2b..60fcd3d3b5eb 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -139,6 +139,7 @@
/* 2 bits: iommu type */
#define MTK_IOMMU_TYPE_MM (0x0 << 13)
#define MTK_IOMMU_TYPE_INFRA (0x1 << 13)
+#define MTK_IOMMU_TYPE_APU (0x2 << 13)
#define MTK_IOMMU_TYPE_MASK (0x3 << 13)
/* PM and clock always on. e.g. infra iommu */
#define PM_CLK_AO BIT(15)
@@ -147,6 +148,7 @@
#define TF_PORT_TO_ADDR_MT8173 BIT(18)
#define INT_ID_PORT_WIDTH_6 BIT(19)
#define CFG_IFA_MASTER_IN_ATF BIT(20)
+#define DL_WITH_MULTI_LARB BIT(21)
#define MTK_IOMMU_HAS_FLAG_MASK(pdata, _x, mask) \
((((pdata)->flags) & (mask)) == (_x))
@@ -172,6 +174,7 @@ enum mtk_iommu_plat {
M4U_MT8183,
M4U_MT8186,
M4U_MT8188,
+ M4U_MT8189,
M4U_MT8192,
M4U_MT8195,
M4U_MT8365,
@@ -335,6 +338,8 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data, unsigned int ban
*/
#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
+static LIST_HEAD(apulist); /* List the apu iommu HWs */
+static LIST_HEAD(infralist); /* List the iommu_infra HW */
static LIST_HEAD(m4ulist); /* List all the M4U HWs */
#define for_each_m4u(data, head) list_for_each_entry(data, head, list)
@@ -350,6 +355,15 @@ static const struct mtk_iommu_iova_region single_domain[] = {
#define MT8192_MULTI_REGION_NR (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) ? \
MT8192_MULTI_REGION_NR_MAX : 1)
+static const struct mtk_iommu_iova_region mt8189_multi_dom_apu[] = {
+ { .iova_base = 0x200000ULL, .size = SZ_512M}, /* APU SECURE */
+#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
+ { .iova_base = SZ_1G, .size = 0xc0000000}, /* APU CODE */
+ { .iova_base = 0x70000000ULL, .size = 0x12600000}, /* APU VLM */
+ { .iova_base = SZ_4G, .size = SZ_4G * 3}, /* APU VPU */
+#endif
+};
+
static const struct mtk_iommu_iova_region mt8192_multi_dom[MT8192_MULTI_REGION_NR] = {
{ .iova_base = 0x0, .size = MTK_IOMMU_IOVA_SZ_4G}, /* 0 ~ 4G, */
#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
@@ -705,7 +719,7 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
}
static int mtk_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct mtk_iommu_data *data = dev_iommu_priv_get(dev), *frstdata;
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
@@ -773,12 +787,12 @@ err_unlock:
}
static int mtk_iommu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
- if (domain == identity_domain || !domain)
+ if (old == identity_domain || !old)
return 0;
mtk_iommu_config(data, dev, false, 0);
@@ -865,6 +879,7 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct device_link *link;
struct device *larbdev;
+ unsigned long larbid_msk = 0;
unsigned int larbid, larbidx, i;
if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
@@ -872,30 +887,50 @@ static struct iommu_device *mtk_iommu_probe_device(struct device *dev)
/*
* Link the consumer device with the smi-larb device(supplier).
- * The device that connects with each a larb is a independent HW.
- * All the ports in each a device should be in the same larbs.
+ * w/DL_WITH_MULTI_LARB: the master may connect with multi larbs,
+ * we should create device link with each larb.
+ * w/o DL_WITH_MULTI_LARB: the master must connect with one larb,
+ * otherwise fail.
*/
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
if (larbid >= MTK_LARB_NR_MAX)
return ERR_PTR(-EINVAL);
+ larbid_msk |= BIT(larbid);
+
for (i = 1; i < fwspec->num_ids; i++) {
larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]);
- if (larbid != larbidx) {
+ if (MTK_IOMMU_HAS_FLAG(data->plat_data, DL_WITH_MULTI_LARB)) {
+ larbid_msk |= BIT(larbidx);
+ } else if (larbid != larbidx) {
dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
larbid, larbidx);
return ERR_PTR(-EINVAL);
}
}
- larbdev = data->larb_imu[larbid].dev;
- if (!larbdev)
- return ERR_PTR(-EINVAL);
- link = device_link_add(dev, larbdev,
- DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
- if (!link)
- dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
+ for_each_set_bit(larbid, &larbid_msk, 32) {
+ larbdev = data->larb_imu[larbid].dev;
+ if (!larbdev)
+ return ERR_PTR(-EINVAL);
+
+ link = device_link_add(dev, larbdev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+ if (!link) {
+ dev_err(dev, "Unable to link %s\n", dev_name(larbdev));
+ goto link_remove;
+ }
+ }
+
return &data->iommu;
+
+link_remove:
+ for_each_set_bit(i, &larbid_msk, larbid) {
+ larbdev = data->larb_imu[i].dev;
+ device_link_remove(dev, larbdev);
+ }
+
+ return ERR_PTR(-ENODEV);
}
static void mtk_iommu_release_device(struct device *dev)
@@ -903,11 +938,19 @@ static void mtk_iommu_release_device(struct device *dev)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct mtk_iommu_data *data;
struct device *larbdev;
- unsigned int larbid;
+ unsigned int larbid, i;
+ unsigned long larbid_msk = 0;
data = dev_iommu_priv_get(dev);
- if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
- larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
+ if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
+ return;
+
+ for (i = 0; i < fwspec->num_ids; i++) {
+ larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
+ larbid_msk |= BIT(larbid);
+ }
+
+ for_each_set_bit(larbid, &larbid_msk, 32) {
larbdev = data->larb_imu[larbid].dev;
device_link_remove(dev, larbdev);
}
@@ -974,6 +1017,8 @@ static int mtk_iommu_of_xlate(struct device *dev,
return -EINVAL;
dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
+
+ put_device(&m4updev->dev);
}
return iommu_fwspec_add_ids(dev, args->args, 1);
@@ -1211,16 +1256,19 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
}
component_match_add(dev, match, component_compare_dev, &plarbdev->dev);
- platform_device_put(plarbdev);
}
- if (!frst_avail_smicomm_node)
- return -EINVAL;
+ if (!frst_avail_smicomm_node) {
+ ret = -EINVAL;
+ goto err_larbdev_put;
+ }
pcommdev = of_find_device_by_node(frst_avail_smicomm_node);
of_node_put(frst_avail_smicomm_node);
- if (!pcommdev)
- return -ENODEV;
+ if (!pcommdev) {
+ ret = -ENODEV;
+ goto err_larbdev_put;
+ }
data->smicomm_dev = &pcommdev->dev;
link = device_link_add(data->smicomm_dev, dev,
@@ -1228,16 +1276,16 @@ static int mtk_iommu_mm_dts_parse(struct device *dev, struct component_match **m
platform_device_put(pcommdev);
if (!link) {
dev_err(dev, "Unable to link %s.\n", dev_name(data->smicomm_dev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_larbdev_put;
}
return 0;
err_larbdev_put:
- for (i = MTK_LARB_NR_MAX - 1; i >= 0; i--) {
- if (!data->larb_imu[i].dev)
- continue;
+ /* id mapping may not be linear, loop the whole array */
+ for (i = 0; i < MTK_LARB_NR_MAX; i++)
put_device(data->larb_imu[i].dev);
- }
+
return ret;
}
@@ -1400,8 +1448,12 @@ out_sysfs_remove:
iommu_device_sysfs_remove(&data->iommu);
out_list_del:
list_del(&data->list);
- if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM))
+ if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
device_link_remove(data->smicomm_dev, dev);
+
+ for (i = 0; i < MTK_LARB_NR_MAX; i++)
+ put_device(data->larb_imu[i].dev);
+ }
out_runtime_disable:
pm_runtime_disable(dev);
return ret;
@@ -1421,6 +1473,9 @@ static void mtk_iommu_remove(struct platform_device *pdev)
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
device_link_remove(data->smicomm_dev, &pdev->dev);
component_master_del(&pdev->dev, &mtk_iommu_com_ops);
+
+ for (i = 0; i < MTK_LARB_NR_MAX; i++)
+ put_device(data->larb_imu[i].dev);
}
pm_runtime_disable(&pdev->dev);
for (i = 0; i < data->plat_data->banks_num; i++) {
@@ -1695,6 +1750,66 @@ static const struct mtk_iommu_plat_data mt8188_data_vpp = {
27, 28 /* ccu0 */, MTK_INVALID_LARBID}, {4, 6}},
};
+static const unsigned int mt8189_apu_region_msk[][MTK_LARB_NR_MAX] = {
+ [0] = {[0] = BIT(2)}, /* Region0: fake larb 0 APU_SECURE */
+ [1] = {[0] = BIT(1)}, /* Region1: fake larb 0 APU_CODE */
+ [2] = {[0] = BIT(3)}, /* Region2: fake larb 0 APU_VLM */
+ [3] = {[0] = BIT(0)}, /* Region3: fake larb 0 APU_DATA */
+};
+
+static const struct mtk_iommu_plat_data mt8189_data_apu = {
+ .m4u_plat = M4U_MT8189,
+ .flags = IOVA_34_EN | DCM_DISABLE |
+ MTK_IOMMU_TYPE_APU | PGTABLE_PA_35_EN,
+ .hw_list = &apulist,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .iova_region = mt8189_multi_dom_apu,
+ .iova_region_nr = ARRAY_SIZE(mt8189_multi_dom_apu),
+ .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}},
+ .iova_region_larb_msk = mt8189_apu_region_msk,
+};
+
+static const struct mtk_iommu_plat_data mt8189_data_infra = {
+ .m4u_plat = M4U_MT8189,
+ .flags = WR_THROT_EN | DCM_DISABLE | MTK_IOMMU_TYPE_INFRA |
+ CFG_IFA_MASTER_IN_ATF | SHARE_PGTABLE | PGTABLE_PA_35_EN,
+ .hw_list = &infralist,
+ .banks_num = 1,
+ .banks_enable = {true},
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .iova_region = single_domain,
+ .iova_region_nr = ARRAY_SIZE(single_domain),
+};
+
+static const u32 mt8189_larb_region_msk[MT8192_MULTI_REGION_NR_MAX][MTK_LARB_NR_MAX] = {
+ [0] = {~0, ~0, ~0, [22] = BIT(0)}, /* Region0: all ports for larb0/1/2 */
+ [1] = {[3] = ~0, [4] = ~0}, /* Region1: all ports for larb4(3)/7(4) */
+ [2] = {[5] = ~0, [6] = ~0, /* Region2: all ports for larb9(5)/11(6) */
+ [7] = ~0, [8] = ~0, /* Region2: all ports for larb13(7)/14(8) */
+ [9] = ~0, [10] = ~0, /* Region2: all ports for larb16(9)/17(10) */
+ [11] = ~0, [12] = ~0, /* Region2: all ports for larb19(11)/20(12) */
+ [21] = ~0}, /* Region2: larb21 fake GCE larb */
+};
+
+static const struct mtk_iommu_plat_data mt8189_data_mm = {
+ .m4u_plat = M4U_MT8189,
+ .flags = HAS_BCLK | HAS_SUB_COMM_3BITS | OUT_ORDER_WR_EN |
+ WR_THROT_EN | IOVA_34_EN | MTK_IOMMU_TYPE_MM |
+ PGTABLE_PA_35_EN | DL_WITH_MULTI_LARB,
+ .hw_list = &m4ulist,
+ .inv_sel_reg = REG_MMU_INV_SEL_GEN2,
+ .banks_num = 5,
+ .banks_enable = {true, false, false, false, false},
+ .iova_region = mt8192_multi_dom,
+ .iova_region_nr = ARRAY_SIZE(mt8192_multi_dom),
+ .iova_region_larb_msk = mt8189_larb_region_msk,
+ .larbid_remap = {{0}, {1}, {21/* GCE_D */, 21/* GCE_M */, 2},
+ {19, 20, 9, 11}, {7}, {4},
+ {13, 17}, {14, 16}},
+};
+
static const struct mtk_iommu_plat_data mt8192_data = {
.m4u_plat = M4U_MT8192,
.flags = HAS_BCLK | HAS_SUB_COMM_2BITS | OUT_ORDER_WR_EN |
@@ -1796,6 +1911,9 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8188-iommu-infra", .data = &mt8188_data_infra},
{ .compatible = "mediatek,mt8188-iommu-vdo", .data = &mt8188_data_vdo},
{ .compatible = "mediatek,mt8188-iommu-vpp", .data = &mt8188_data_vpp},
+ { .compatible = "mediatek,mt8189-iommu-apu", .data = &mt8189_data_apu},
+ { .compatible = "mediatek,mt8189-iommu-infra", .data = &mt8189_data_infra},
+ { .compatible = "mediatek,mt8189-iommu-mm", .data = &mt8189_data_mm},
{ .compatible = "mediatek,mt8192-m4u", .data = &mt8192_data},
{ .compatible = "mediatek,mt8195-iommu-infra", .data = &mt8195_data_infra},
{ .compatible = "mediatek,mt8195-iommu-vdo", .data = &mt8195_data_vdo},
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index 10cc0b1197e8..c8d8eff5373d 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -303,7 +303,9 @@ static void mtk_iommu_v1_domain_free(struct iommu_domain *domain)
kfree(to_mtk_domain(domain));
}
-static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device *dev)
+static int mtk_iommu_v1_attach_device(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_domain *old)
{
struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_v1_domain *dom = to_mtk_domain(domain);
@@ -329,7 +331,8 @@ static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device
}
static int mtk_iommu_v1_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
@@ -435,6 +438,8 @@ static int mtk_iommu_v1_create_mapping(struct device *dev,
return -EINVAL;
dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
+
+ put_device(&m4updev->dev);
}
ret = iommu_fwspec_add_ids(dev, args->args, 1);
@@ -641,13 +646,18 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
if (larb_nr < 0)
return larb_nr;
+ if (larb_nr > MTK_LARB_NR_MAX)
+ return -EINVAL;
+
for (i = 0; i < larb_nr; i++) {
struct device_node *larbnode;
struct platform_device *plarbdev;
larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
- if (!larbnode)
- return -EINVAL;
+ if (!larbnode) {
+ ret = -EINVAL;
+ goto out_put_larbs;
+ }
if (!of_device_is_available(larbnode)) {
of_node_put(larbnode);
@@ -657,11 +667,14 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
plarbdev = of_find_device_by_node(larbnode);
if (!plarbdev) {
of_node_put(larbnode);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_put_larbs;
}
if (!plarbdev->dev.driver) {
of_node_put(larbnode);
- return -EPROBE_DEFER;
+ put_device(&plarbdev->dev);
+ ret = -EPROBE_DEFER;
+ goto out_put_larbs;
}
data->larb_imu[i].dev = &plarbdev->dev;
@@ -673,7 +686,7 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
ret = mtk_iommu_v1_hw_init(data);
if (ret)
- return ret;
+ goto out_put_larbs;
ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
dev_name(&pdev->dev));
@@ -695,12 +708,17 @@ out_sysfs_remove:
iommu_device_sysfs_remove(&data->iommu);
out_clk_unprepare:
clk_disable_unprepare(data->bclk);
+out_put_larbs:
+ for (i = 0; i < MTK_LARB_NR_MAX; i++)
+ put_device(data->larb_imu[i].dev);
+
return ret;
}
static void mtk_iommu_v1_remove(struct platform_device *pdev)
{
struct mtk_iommu_v1_data *data = platform_get_drvdata(pdev);
+ int i;
iommu_device_sysfs_remove(&data->iommu);
iommu_device_unregister(&data->iommu);
@@ -708,6 +726,9 @@ static void mtk_iommu_v1_remove(struct platform_device *pdev)
clk_disable_unprepare(data->bclk);
devm_free_irq(&pdev->dev, data->irq, data);
component_master_del(&pdev->dev, &mtk_iommu_v1_com_ops);
+
+ for (i = 0; i < MTK_LARB_NR_MAX; i++)
+ put_device(data->larb_imu[i].dev);
}
static int __maybe_unused mtk_iommu_v1_suspend(struct device *dev)
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6fb93927bdb9..768973b7e511 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1303,8 +1303,8 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
struct omap_iommu_device *iommu;
struct omap_iommu *oiommu;
struct iotlb_entry e;
+ int ret = -EINVAL;
int omap_pgsz;
- u32 ret = -EINVAL;
int i;
omap_pgsz = bytes_to_iopgsz(bytes);
@@ -1431,8 +1431,8 @@ static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain)
odomain->iommus = NULL;
}
-static int
-omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+static int omap_iommu_attach_dev(struct iommu_domain *domain,
+ struct device *dev, struct iommu_domain *old)
{
struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
@@ -1536,15 +1536,15 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
}
static int omap_iommu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct omap_iommu_domain *omap_domain;
- if (domain == identity_domain || !domain)
+ if (old == identity_domain || !old)
return 0;
- omap_domain = to_omap_domain(domain);
+ omap_domain = to_omap_domain(old);
spin_lock(&omap_domain->lock);
_omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
@@ -1668,23 +1668,20 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev)
}
pdev = of_find_device_by_node(np);
+ of_node_put(np);
if (!pdev) {
- of_node_put(np);
kfree(arch_data);
return ERR_PTR(-ENODEV);
}
oiommu = platform_get_drvdata(pdev);
+ put_device(&pdev->dev);
if (!oiommu) {
- of_node_put(np);
kfree(arch_data);
return ERR_PTR(-EINVAL);
}
tmp->iommu_dev = oiommu;
- tmp->dev = &pdev->dev;
-
- of_node_put(np);
}
dev_iommu_priv_set(dev, arch_data);
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 27697109ec79..50b39be61abc 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -88,7 +88,6 @@ struct omap_iommu {
/**
* struct omap_iommu_arch_data - omap iommu private data
* @iommu_dev: handle of the OMAP iommu device
- * @dev: handle of the iommu device
*
* This is an omap iommu private data object, which binds an iommu user
* to its iommu device. This object should be placed at the iommu user's
@@ -97,7 +96,6 @@ struct omap_iommu {
*/
struct omap_iommu_arch_data {
struct omap_iommu *iommu_dev;
- struct device *dev;
};
struct cr_regs {
diff --git a/drivers/iommu/riscv/iommu-platform.c b/drivers/iommu/riscv/iommu-platform.c
index 725e919b97ef..83a28c83f991 100644
--- a/drivers/iommu/riscv/iommu-platform.c
+++ b/drivers/iommu/riscv/iommu-platform.c
@@ -10,6 +10,8 @@
* Tomasz Jeznach <tjeznach@rivosinc.com>
*/
+#include <linux/acpi.h>
+#include <linux/irqchip/riscv-imsic.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -46,6 +48,7 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
enum riscv_iommu_igs_settings igs;
struct device *dev = &pdev->dev;
struct riscv_iommu_device *iommu = NULL;
+ struct irq_domain *msi_domain;
struct resource *res = NULL;
int vec, ret;
@@ -76,8 +79,13 @@ static int riscv_iommu_platform_probe(struct platform_device *pdev)
switch (igs) {
case RISCV_IOMMU_CAPABILITIES_IGS_BOTH:
case RISCV_IOMMU_CAPABILITIES_IGS_MSI:
- if (is_of_node(dev->fwnode))
+ if (is_of_node(dev_fwnode(dev))) {
of_msi_configure(dev, to_of_node(dev->fwnode));
+ } else {
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
if (!dev_get_msi_domain(dev)) {
dev_warn(dev, "failed to find an MSI domain\n");
@@ -150,6 +158,12 @@ static const struct of_device_id riscv_iommu_of_match[] = {
{},
};
+static const struct acpi_device_id riscv_iommu_acpi_match[] = {
+ { "RSCV0004", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, riscv_iommu_acpi_match);
+
static struct platform_driver riscv_iommu_platform_driver = {
.probe = riscv_iommu_platform_probe,
.remove = riscv_iommu_platform_remove,
@@ -158,6 +172,7 @@ static struct platform_driver riscv_iommu_platform_driver = {
.name = "riscv,iommu",
.of_match_table = riscv_iommu_of_match,
.suppress_bind_attrs = true,
+ .acpi_match_table = riscv_iommu_acpi_match,
},
};
diff --git a/drivers/iommu/riscv/iommu.c b/drivers/iommu/riscv/iommu.c
index 0eae2f4bdc5e..d9429097a2b5 100644
--- a/drivers/iommu/riscv/iommu.c
+++ b/drivers/iommu/riscv/iommu.c
@@ -12,6 +12,8 @@
#define pr_fmt(fmt) "riscv-iommu: " fmt
+#include <linux/acpi.h>
+#include <linux/acpi_rimt.h>
#include <linux/compiler.h>
#include <linux/crash_dump.h>
#include <linux/init.h>
@@ -1319,7 +1321,8 @@ static bool riscv_iommu_pt_supported(struct riscv_iommu_device *iommu, int pgd_m
}
static int riscv_iommu_attach_paging_domain(struct iommu_domain *iommu_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct riscv_iommu_domain *domain = iommu_domain_to_riscv(iommu_domain);
struct riscv_iommu_device *iommu = dev_to_iommu(dev);
@@ -1424,7 +1427,8 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
}
static int riscv_iommu_attach_blocking_domain(struct iommu_domain *iommu_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct riscv_iommu_device *iommu = dev_to_iommu(dev);
struct riscv_iommu_info *info = dev_iommu_priv_get(dev);
@@ -1445,7 +1449,8 @@ static struct iommu_domain riscv_iommu_blocking_domain = {
};
static int riscv_iommu_attach_identity_domain(struct iommu_domain *iommu_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct riscv_iommu_device *iommu = dev_to_iommu(dev);
struct riscv_iommu_info *info = dev_iommu_priv_get(dev);
@@ -1650,6 +1655,14 @@ int riscv_iommu_init(struct riscv_iommu_device *iommu)
goto err_iodir_off;
}
+ if (!acpi_disabled) {
+ rc = rimt_iommu_register(iommu->dev);
+ if (rc) {
+ dev_err_probe(iommu->dev, rc, "cannot register iommu with RIMT\n");
+ goto err_remove_sysfs;
+ }
+ }
+
rc = iommu_device_register(&iommu->iommu, &riscv_iommu_ops, iommu->dev);
if (rc) {
dev_err_probe(iommu->dev, rc, "cannot register iommu interface\n");
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 0861dd469bd8..85f3667e797c 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -960,7 +960,8 @@ out_disable_clocks:
}
static int rk_iommu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain;
@@ -1005,7 +1006,7 @@ static struct iommu_domain rk_identity_domain = {
};
static int rk_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct rk_iommu *iommu;
struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
@@ -1026,7 +1027,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
if (iommu->domain == domain)
return 0;
- ret = rk_iommu_identity_attach(&rk_identity_domain, dev);
+ ret = rk_iommu_identity_attach(&rk_identity_domain, dev, old);
if (ret)
return ret;
@@ -1041,8 +1042,17 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
return 0;
ret = rk_iommu_enable(iommu);
- if (ret)
- WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev));
+ if (ret) {
+ /*
+ * Note rk_iommu_identity_attach() might fail before physically
+ * attaching the dev to iommu->domain, in which case the actual
+ * old domain for this revert should be rk_identity_domain v.s.
+ * iommu->domain. Since rk_iommu_identity_attach() does not care
+ * about the old domain argument for now, this is not a problem.
+ */
+ WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev,
+ iommu->domain));
+ }
pm_runtime_put(iommu->dev);
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index 9c80d61deb2c..fe679850af28 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -612,6 +612,23 @@ static u64 get_iota_region_flag(struct s390_domain *domain)
}
}
+static bool reg_ioat_propagate_error(int cc, u8 status)
+{
+ /*
+ * If the device is in the error state the reset routine
+ * will register the IOAT of the newly set domain on re-enable
+ */
+ if (cc == ZPCI_CC_ERR && status == ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ return false;
+ /*
+ * If the device was removed treat registration as success
+ * and let the subsequent error event trigger tear down.
+ */
+ if (cc == ZPCI_CC_INVAL_HANDLE)
+ return false;
+ return cc != ZPCI_CC_OK;
+}
+
static int s390_iommu_domain_reg_ioat(struct zpci_dev *zdev,
struct iommu_domain *domain, u8 *status)
{
@@ -653,7 +670,8 @@ int zpci_iommu_register_ioat(struct zpci_dev *zdev, u8 *status)
}
static int blocking_domain_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct zpci_dev *zdev = to_zpci_dev(dev);
struct s390_domain *s390_domain;
@@ -677,7 +695,8 @@ static int blocking_domain_attach_device(struct iommu_domain *domain,
}
static int s390_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct s390_domain *s390_domain = to_s390_domain(domain);
struct zpci_dev *zdev = to_zpci_dev(dev);
@@ -692,11 +711,11 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
domain->geometry.aperture_end < zdev->start_dma))
return -EINVAL;
- blocking_domain_attach_device(&blocking_domain, dev);
+ blocking_domain_attach_device(&blocking_domain, dev, old);
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev->dma_table = s390_domain->dma_table;
zdev_s390_domain_update(zdev, domain);
@@ -1032,7 +1051,8 @@ struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
lockdep_assert_held(&zdev->dom_lock);
- if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED)
+ if (zdev->s390_domain->type == IOMMU_DOMAIN_BLOCKED ||
+ zdev->s390_domain->type == IOMMU_DOMAIN_IDENTITY)
return NULL;
s390_domain = to_s390_domain(zdev->s390_domain);
@@ -1113,22 +1133,18 @@ static int __init s390_iommu_init(void)
subsys_initcall(s390_iommu_init);
static int s390_attach_dev_identity(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct zpci_dev *zdev = to_zpci_dev(dev);
u8 status;
int cc;
- blocking_domain_attach_device(&blocking_domain, dev);
+ blocking_domain_attach_device(&blocking_domain, dev, old);
/* If we fail now DMA remains blocked via blocking domain */
cc = s390_iommu_domain_reg_ioat(zdev, domain, &status);
-
- /*
- * If the device is undergoing error recovery the reset code
- * will re-establish the new domain.
- */
- if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
+ if (reg_ioat_propagate_error(cc, status))
return -EIO;
zdev_s390_domain_update(zdev, domain);
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index c7ca1d8a0b15..555d4505c747 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -247,7 +247,8 @@ static void sprd_iommu_domain_free(struct iommu_domain *domain)
}
static int sprd_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
struct sprd_iommu_domain *dom = to_sprd_domain(domain);
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index de10b569d9a9..90b26fe21817 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -771,7 +771,8 @@ static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
}
static int sun50i_iommu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
struct sun50i_iommu_domain *sun50i_domain;
@@ -797,7 +798,8 @@ static struct iommu_domain sun50i_iommu_identity_domain = {
};
static int sun50i_iommu_attach_device(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
struct sun50i_iommu *iommu;
@@ -813,7 +815,7 @@ static int sun50i_iommu_attach_device(struct iommu_domain *domain,
if (iommu->domain == domain)
return 0;
- sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev);
+ sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev, old);
sun50i_iommu_attach_domain(iommu, sun50i_domain);
@@ -839,6 +841,8 @@ static int sun50i_iommu_of_xlate(struct device *dev,
dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
+ put_device(&iommu_pdev->dev);
+
return iommu_fwspec_add_ids(dev, &id, 1);
}
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 36cdd5fbab07..c391e7f2cde6 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -490,7 +490,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu,
}
static int tegra_smmu_attach_dev(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev, struct iommu_domain *old)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct tegra_smmu *smmu = dev_iommu_priv_get(dev);
@@ -524,9 +524,9 @@ disable:
}
static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct tegra_smmu_as *as;
struct tegra_smmu *smmu;
@@ -535,10 +535,10 @@ static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain,
if (!fwspec)
return -ENODEV;
- if (domain == identity_domain || !domain)
+ if (old == identity_domain || !old)
return 0;
- as = to_smmu_as(domain);
+ as = to_smmu_as(old);
smmu = as->smmu;
for (index = 0; index < fwspec->num_ids; index++) {
tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
@@ -830,10 +830,9 @@ static struct tegra_smmu *tegra_smmu_find(struct device_node *np)
return NULL;
mc = platform_get_drvdata(pdev);
- if (!mc) {
- put_device(&pdev->dev);
+ put_device(&pdev->dev);
+ if (!mc)
return NULL;
- }
return mc->smmu;
}
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index b39d6f134ab2..d314fa5cd847 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -730,7 +730,8 @@ static struct iommu_domain *viommu_domain_alloc_identity(struct device *dev)
return domain;
}
-static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev,
+ struct iommu_domain *old)
{
int ret = 0;
struct virtio_iommu_req_attach req;
@@ -781,7 +782,8 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
static int viommu_attach_identity_domain(struct iommu_domain *domain,
- struct device *dev)
+ struct device *dev,
+ struct iommu_domain *old)
{
int ret = 0;
struct virtio_iommu_req_attach req;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 6d12c6ab9ea4..f334f49c9791 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -36,7 +36,6 @@ config GIC_NON_BANKED
config ARM_GIC_V3
bool
select IRQ_DOMAIN_HIERARCHY
- select PARTITION_PERCPU
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select HAVE_ARM_SMCCC_DISCOVERY
select IRQ_MSI_IOMMU
@@ -151,7 +150,7 @@ config BCM6345_L1_IRQ
config BCM7038_L1_IRQ
tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
@@ -159,14 +158,14 @@ config BCM7038_L1_IRQ
config BCM7120_L2_IRQ
tristate "Broadcom STB 7120-style L2 interrupt controller driver"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
config BRCMSTB_L2_IRQ
tristate "Broadcom STB generic L2 interrupt controller driver"
- depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
default ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
@@ -451,9 +450,6 @@ config LS_SCFG_MSI
depends on PCI_MSI
select IRQ_MSI_LIB
-config PARTITION_PERCPU
- bool
-
config STM32MP_EXTI
tristate "STM32MP extended interrupts and event controller"
depends on (ARCH_STM32 && !ARM_SINGLE_ARMV7M) || COMPILE_TEST
@@ -634,6 +630,13 @@ config RISCV_IMSIC
select GENERIC_MSI_IRQ
select IRQ_MSI_LIB
+config RISCV_RPMI_SYSMSI
+ bool
+ depends on RISCV && MAILBOX
+ select IRQ_DOMAIN_HIERARCHY
+ select GENERIC_MSI_IRQ
+ default RISCV
+
config SIFIVE_PLIC
bool
depends on RISCV
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 93e3ced023bb..6a229443efe0 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
obj-$(CONFIG_ARM_GIC_ITS_PARENT) += irq-gic-its-msi-parent.o
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v4.o
obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o
-obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_ARM_GIC_V5) += irq-gic-v5.o irq-gic-v5-irs.o irq-gic-v5-its.o \
irq-gic-v5-iwb.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
@@ -106,6 +105,7 @@ obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o
obj-$(CONFIG_RISCV_APLIC_MSI) += irq-riscv-aplic-msi.o
obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o
+obj-$(CONFIG_RISCV_RPMI_SYSMSI) += irq-riscv-rpmi-sysmsi.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o
obj-$(CONFIG_ACLINT_SSWI) += irq-aclint-sswi.o
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index e7dfcf0cda43..495848442b35 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -200,12 +200,13 @@ static void __init combiner_init(void __iomem *combiner_base,
/**
* combiner_suspend - save interrupt combiner state before suspend
+ * @data: syscore context
*
* Save the interrupt enable set register for all combiner groups since
* the state is lost when the system enters into a sleep state.
*
*/
-static int combiner_suspend(void)
+static int combiner_suspend(void *data)
{
int i;
@@ -218,12 +219,13 @@ static int combiner_suspend(void)
/**
* combiner_resume - restore interrupt combiner state after resume
+ * @data: syscore context
*
* Restore the interrupt enable set register for all combiner groups since
* the state is lost when the system enters into a sleep state on suspend.
*
*/
-static void combiner_resume(void)
+static void combiner_resume(void *data)
{
int i;
@@ -240,11 +242,15 @@ static void combiner_resume(void)
#define combiner_resume NULL
#endif
-static struct syscore_ops combiner_syscore_ops = {
+static const struct syscore_ops combiner_syscore_ops = {
.suspend = combiner_suspend,
.resume = combiner_resume,
};
+static struct syscore combiner_syscore = {
+ .ops = &combiner_syscore_ops,
+};
+
static int __init combiner_of_init(struct device_node *np,
struct device_node *parent)
{
@@ -264,7 +270,7 @@ static int __init combiner_of_init(struct device_node *np,
combiner_init(combiner_base, np);
- register_syscore_ops(&combiner_syscore_ops);
+ register_syscore(&combiner_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-aclint-sswi.c b/drivers/irqchip/irq-aclint-sswi.c
index 93e28e9f281f..fee30f3bc5ac 100644
--- a/drivers/irqchip/irq-aclint-sswi.c
+++ b/drivers/irqchip/irq-aclint-sswi.c
@@ -175,7 +175,8 @@ static int __init generic_aclint_sswi_early_probe(struct device_node *node,
{
return generic_aclint_sswi_probe(&node->fwnode);
}
-IRQCHIP_DECLARE(generic_aclint_sswi, "mips,p8700-aclint-sswi", generic_aclint_sswi_early_probe);
+IRQCHIP_DECLARE(mips_p8700_sswi, "mips,p8700-aclint-sswi", generic_aclint_sswi_early_probe);
+IRQCHIP_DECLARE(nuclei_ux900_sswi, "nuclei,ux900-aclint-sswi", generic_aclint_sswi_early_probe);
/* THEAD variant */
#define THEAD_C9XX_CSR_SXSTATUS 0x5c0
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 032d66dceb8e..3c70364e7cdd 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -411,12 +411,15 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
if (is_kernel_in_hyp_mode() &&
(read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
+ u64 val;
+
generic_handle_domain_irq(aic_irqc->hw_domain,
AIC_FIQ_HWIRQ(AIC_VGIC_MI));
if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EL2_En) &&
- read_sysreg_s(SYS_ICH_MISR_EL2))) {
- pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
+ (val = read_sysreg_s(SYS_ICH_MISR_EL2)))) {
+ pr_err_ratelimited("vGIC IRQ fired and not handled by KVM (MISR=%llx), disabling.\n",
+ val);
sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EL2_En, 0);
}
}
@@ -578,16 +581,9 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
}
if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
- (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
- int irq;
- if (cpumask_test_cpu(smp_processor_id(),
- &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
- irq = AIC_CPU_PMU_P;
- else
- irq = AIC_CPU_PMU_E;
+ (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT))
generic_handle_domain_irq(aic_irqc->hw_domain,
- AIC_FIQ_HWIRQ(irq));
- }
+ AIC_FIQ_HWIRQ(AIC_CPU_PMU_P));
if (static_branch_likely(&use_fast_ipi) &&
(FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ) &&
@@ -632,18 +628,7 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
handle_fasteoi_irq, NULL, NULL);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
} else {
- int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
-
- switch (fiq) {
- case AIC_CPU_PMU_P:
- case AIC_CPU_PMU_E:
- irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
- break;
- default:
- irq_set_percpu_devid(irq);
- break;
- }
-
+ irq_set_percpu_devid(irq);
irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
handle_percpu_devid_irq, NULL, NULL);
}
@@ -651,6 +636,33 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
return 0;
}
+static int aic_irq_get_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
+{
+ const struct cpumask *mask;
+ u32 intid;
+
+ info->flags = 0;
+ info->affinity = NULL;
+
+ if (fwspec->param[0] != AIC_FIQ)
+ return 0;
+
+ if (fwspec->param_count == 3)
+ intid = fwspec->param[1];
+ else
+ intid = fwspec->param[2];
+
+ if (aic_irqc->fiq_aff[intid])
+ mask = &aic_irqc->fiq_aff[intid]->aff;
+ else
+ mask = cpu_possible_mask;
+
+ info->affinity = mask;
+ info->flags = IRQ_FWSPEC_INFO_AFFINITY_VALID;
+
+ return 0;
+}
+
static int aic_irq_domain_translate(struct irq_domain *id,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
@@ -705,6 +717,10 @@ static int aic_irq_domain_translate(struct irq_domain *id,
break;
}
}
+
+ /* Merge the two PMUs on a single interrupt */
+ if (*hwirq == AIC_CPU_PMU_E)
+ *hwirq = AIC_CPU_PMU_P;
break;
default:
return -EINVAL;
@@ -750,9 +766,10 @@ static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
static const struct irq_domain_ops aic_irq_domain_ops = {
- .translate = aic_irq_domain_translate,
- .alloc = aic_irq_domain_alloc,
- .free = aic_irq_domain_free,
+ .translate = aic_irq_domain_translate,
+ .alloc = aic_irq_domain_alloc,
+ .free = aic_irq_domain_free,
+ .get_fwspec_info = aic_irq_get_fwspec_info,
};
/*
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a44c49e985b7..a4d03a2d1569 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -726,7 +726,7 @@ static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs)
} while (1);
}
-static int mpic_suspend(void)
+static int mpic_suspend(void *data)
{
struct mpic *mpic = mpic_data;
@@ -735,7 +735,7 @@ static int mpic_suspend(void)
return 0;
}
-static void mpic_resume(void)
+static void mpic_resume(void *data)
{
struct mpic *mpic = mpic_data;
bool src0, src1;
@@ -788,11 +788,15 @@ static void mpic_resume(void)
mpic_ipi_resume(mpic);
}
-static struct syscore_ops mpic_syscore_ops = {
+static const struct syscore_ops mpic_syscore_ops = {
.suspend = mpic_suspend,
.resume = mpic_resume,
};
+static struct syscore mpic_syscore = {
+ .ops = &mpic_syscore_ops,
+};
+
static int __init mpic_map_region(struct device_node *np, int index,
void __iomem **base, phys_addr_t *phys_base)
{
@@ -905,7 +909,7 @@ static int __init mpic_of_init(struct device_node *node, struct device_node *par
mpic_handle_cascade_irq, mpic);
}
- register_syscore_ops(&mpic_syscore_ops);
+ register_syscore(&mpic_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 1c7045467c48..bee59c8c4c93 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -1,61 +1,78 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Aspeed AST24XX, AST25XX, and AST26XX SCU Interrupt Controller
+ * Aspeed AST24XX, AST25XX, AST26XX, and AST27XX SCU Interrupt Controller
* Copyright 2019 IBM Corporation
*
* Eddie James <eajames@linux.ibm.com>
*/
#include <linux/bitops.h>
+#include <linux/io.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/mfd/syscon.h>
+#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/regmap.h>
-#define ASPEED_SCU_IC_REG 0x018
-#define ASPEED_SCU_IC_SHIFT 0
-#define ASPEED_SCU_IC_ENABLE GENMASK(15, ASPEED_SCU_IC_SHIFT)
-#define ASPEED_SCU_IC_NUM_IRQS 7
#define ASPEED_SCU_IC_STATUS GENMASK(28, 16)
#define ASPEED_SCU_IC_STATUS_SHIFT 16
+#define AST2700_SCU_IC_STATUS GENMASK(15, 0)
+
+struct aspeed_scu_ic_variant {
+ const char *compatible;
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ unsigned long ier;
+ unsigned long isr;
+};
-#define ASPEED_AST2600_SCU_IC0_REG 0x560
-#define ASPEED_AST2600_SCU_IC0_SHIFT 0
-#define ASPEED_AST2600_SCU_IC0_ENABLE \
- GENMASK(5, ASPEED_AST2600_SCU_IC0_SHIFT)
-#define ASPEED_AST2600_SCU_IC0_NUM_IRQS 6
+#define SCU_VARIANT(_compat, _shift, _enable, _num, _ier, _isr) { \
+ .compatible = _compat, \
+ .irq_shift = _shift, \
+ .irq_enable = _enable, \
+ .num_irqs = _num, \
+ .ier = _ier, \
+ .isr = _isr, \
+}
-#define ASPEED_AST2600_SCU_IC1_REG 0x570
-#define ASPEED_AST2600_SCU_IC1_SHIFT 4
-#define ASPEED_AST2600_SCU_IC1_ENABLE \
- GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
-#define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
+static const struct aspeed_scu_ic_variant scu_ic_variants[] __initconst = {
+ SCU_VARIANT("aspeed,ast2400-scu-ic", 0, GENMASK(15, 0), 7, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2500-scu-ic", 0, GENMASK(15, 0), 7, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2600-scu-ic0", 0, GENMASK(5, 0), 6, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2600-scu-ic1", 4, GENMASK(5, 4), 2, 0x00, 0x00),
+ SCU_VARIANT("aspeed,ast2700-scu-ic0", 0, GENMASK(3, 0), 4, 0x00, 0x04),
+ SCU_VARIANT("aspeed,ast2700-scu-ic1", 0, GENMASK(3, 0), 4, 0x00, 0x04),
+ SCU_VARIANT("aspeed,ast2700-scu-ic2", 0, GENMASK(3, 0), 4, 0x04, 0x00),
+ SCU_VARIANT("aspeed,ast2700-scu-ic3", 0, GENMASK(1, 0), 2, 0x04, 0x00),
+};
struct aspeed_scu_ic {
- unsigned long irq_enable;
- unsigned long irq_shift;
- unsigned int num_irqs;
- unsigned int reg;
- struct regmap *scu;
- struct irq_domain *irq_domain;
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
+ void __iomem *base;
+ struct irq_domain *irq_domain;
+ unsigned long ier;
+ unsigned long isr;
};
-static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+static inline bool scu_has_split_isr(struct aspeed_scu_ic *scu)
+{
+ return scu->ier != scu->isr;
+}
+
+static void aspeed_scu_ic_irq_handler_combined(struct irq_desc *desc)
{
- unsigned int sts;
- unsigned long bit;
- unsigned long enabled;
- unsigned long max;
- unsigned long status;
struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
+ unsigned long bit, enabled, max, status;
+ unsigned int sts, mask;
chained_irq_enter(chip, desc);
+ mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
/*
* The SCU IC has just one register to control its operation and read
* status. The interrupt enable bits occupy the lower 16 bits of the
@@ -66,7 +83,7 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
* shifting the status down to get the mapping and then back up to
* clear the bit.
*/
- regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+ sts = readl(scu_ic->base);
enabled = sts & scu_ic->irq_enable;
status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
@@ -74,43 +91,83 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
max = scu_ic->num_irqs + bit;
for_each_set_bit_from(bit, &status, max) {
- generic_handle_domain_irq(scu_ic->irq_domain,
- bit - scu_ic->irq_shift);
+ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift);
+ writel((readl(scu_ic->base) & ~mask) | BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT),
+ scu_ic->base);
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void aspeed_scu_ic_irq_handler_split(struct irq_desc *desc)
+{
+ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long bit, enabled, max, status;
+ unsigned int sts, mask;
- regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
+ chained_irq_enter(chip, desc);
+
+ mask = scu_ic->irq_enable;
+ sts = readl(scu_ic->base + scu_ic->isr);
+ enabled = sts & scu_ic->irq_enable;
+ sts = readl(scu_ic->base + scu_ic->isr);
+ status = sts & enabled;
+
+ bit = scu_ic->irq_shift;
+ max = scu_ic->num_irqs + bit;
+
+ for_each_set_bit_from(bit, &status, max) {
+ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift);
+ /* Clear interrupt */
+ writel(BIT(bit), scu_ic->base + scu_ic->isr);
}
chained_irq_exit(chip, desc);
}
-static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+static void aspeed_scu_ic_irq_mask_combined(struct irq_data *data)
{
struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
- unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+ unsigned int mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
/*
* Status bits are cleared by writing 1. In order to prevent the mask
* operation from clearing the status bits, they should be under the
* mask and written with 0.
*/
- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
+ writel(readl(scu_ic->base) & ~mask, scu_ic->base);
}
-static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+static void aspeed_scu_ic_irq_unmask_combined(struct irq_data *data)
{
struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
- unsigned int mask = bit |
- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+ unsigned int mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
/*
* Status bits are cleared by writing 1. In order to prevent the unmask
* operation from clearing the status bits, they should be under the
* mask and written with 0.
*/
- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
+ writel((readl(scu_ic->base) & ~mask) | bit, scu_ic->base);
+}
+
+static void aspeed_scu_ic_irq_mask_split(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift);
+
+ writel(readl(scu_ic->base) & ~mask, scu_ic->base + scu_ic->ier);
+}
+
+static void aspeed_scu_ic_irq_unmask_split(struct irq_data *data)
+{
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+
+ writel(readl(scu_ic->base) | bit, scu_ic->base + scu_ic->ier);
}
static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
@@ -120,17 +177,29 @@ static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
return -EINVAL;
}
-static struct irq_chip aspeed_scu_ic_chip = {
+static struct irq_chip aspeed_scu_ic_chip_combined = {
.name = "aspeed-scu-ic",
- .irq_mask = aspeed_scu_ic_irq_mask,
- .irq_unmask = aspeed_scu_ic_irq_unmask,
- .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+ .irq_mask = aspeed_scu_ic_irq_mask_combined,
+ .irq_unmask = aspeed_scu_ic_irq_unmask_combined,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
+};
+
+static struct irq_chip aspeed_scu_ic_chip_split = {
+ .name = "ast2700-scu-ic",
+ .irq_mask = aspeed_scu_ic_irq_mask_split,
+ .irq_unmask = aspeed_scu_ic_irq_unmask_split,
+ .irq_set_affinity = aspeed_scu_ic_irq_set_affinity,
};
static int aspeed_scu_ic_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
- irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
+ struct aspeed_scu_ic *scu_ic = domain->host_data;
+
+ if (scu_has_split_isr(scu_ic))
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_split, handle_level_irq);
+ else
+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip_combined, handle_level_irq);
irq_set_chip_data(irq, domain->host_data);
return 0;
@@ -143,21 +212,21 @@ static const struct irq_domain_ops aspeed_scu_ic_domain_ops = {
static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
struct device_node *node)
{
- int irq;
- int rc = 0;
+ int irq, rc = 0;
- if (!node->parent) {
- rc = -ENODEV;
+ scu_ic->base = of_iomap(node, 0);
+ if (!scu_ic->base) {
+ rc = -ENOMEM;
goto err;
}
- scu_ic->scu = syscon_node_to_regmap(node->parent);
- if (IS_ERR(scu_ic->scu)) {
- rc = PTR_ERR(scu_ic->scu);
- goto err;
+ if (scu_has_split_isr(scu_ic)) {
+ writel(AST2700_SCU_IC_STATUS, scu_ic->base + scu_ic->isr);
+ writel(0, scu_ic->base + scu_ic->ier);
+ } else {
+ writel(ASPEED_SCU_IC_STATUS, scu_ic->base);
+ writel(0, scu_ic->base);
}
- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0);
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
@@ -166,75 +235,60 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
}
scu_ic->irq_domain = irq_domain_create_linear(of_fwnode_handle(node), scu_ic->num_irqs,
- &aspeed_scu_ic_domain_ops,
- scu_ic);
+ &aspeed_scu_ic_domain_ops, scu_ic);
if (!scu_ic->irq_domain) {
rc = -ENOMEM;
goto err;
}
- irq_set_chained_handler_and_data(irq, aspeed_scu_ic_irq_handler,
+ irq_set_chained_handler_and_data(irq, scu_has_split_isr(scu_ic) ?
+ aspeed_scu_ic_irq_handler_split :
+ aspeed_scu_ic_irq_handler_combined,
scu_ic);
return 0;
err:
kfree(scu_ic);
-
return rc;
}
-static int __init aspeed_scu_ic_of_init(struct device_node *node,
- struct device_node *parent)
+static const struct aspeed_scu_ic_variant *aspeed_scu_ic_find_variant(struct device_node *np)
{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
-
- if (!scu_ic)
- return -ENOMEM;
-
- scu_ic->irq_enable = ASPEED_SCU_IC_ENABLE;
- scu_ic->irq_shift = ASPEED_SCU_IC_SHIFT;
- scu_ic->num_irqs = ASPEED_SCU_IC_NUM_IRQS;
- scu_ic->reg = ASPEED_SCU_IC_REG;
-
- return aspeed_scu_ic_of_init_common(scu_ic, node);
+ for (int i = 0; i < ARRAY_SIZE(scu_ic_variants); i++) {
+ if (of_device_is_compatible(np, scu_ic_variants[i].compatible))
+ return &scu_ic_variants[i];
+ }
+ return NULL;
}
-static int __init aspeed_ast2600_scu_ic0_of_init(struct device_node *node,
- struct device_node *parent)
+static int __init aspeed_scu_ic_of_init(struct device_node *node, struct device_node *parent)
{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+ const struct aspeed_scu_ic_variant *variant;
+ struct aspeed_scu_ic *scu_ic;
- if (!scu_ic)
- return -ENOMEM;
-
- scu_ic->irq_enable = ASPEED_AST2600_SCU_IC0_ENABLE;
- scu_ic->irq_shift = ASPEED_AST2600_SCU_IC0_SHIFT;
- scu_ic->num_irqs = ASPEED_AST2600_SCU_IC0_NUM_IRQS;
- scu_ic->reg = ASPEED_AST2600_SCU_IC0_REG;
-
- return aspeed_scu_ic_of_init_common(scu_ic, node);
-}
-
-static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
- struct device_node *parent)
-{
- struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
+ variant = aspeed_scu_ic_find_variant(node);
+ if (!variant)
+ return -ENODEV;
+ scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
if (!scu_ic)
return -ENOMEM;
- scu_ic->irq_enable = ASPEED_AST2600_SCU_IC1_ENABLE;
- scu_ic->irq_shift = ASPEED_AST2600_SCU_IC1_SHIFT;
- scu_ic->num_irqs = ASPEED_AST2600_SCU_IC1_NUM_IRQS;
- scu_ic->reg = ASPEED_AST2600_SCU_IC1_REG;
+ scu_ic->irq_enable = variant->irq_enable;
+ scu_ic->irq_shift = variant->irq_shift;
+ scu_ic->num_irqs = variant->num_irqs;
+ scu_ic->ier = variant->ier;
+ scu_ic->isr = variant->isr;
return aspeed_scu_ic_of_init_common(scu_ic, node);
}
IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
-IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
- aspeed_ast2600_scu_ic0_of_init);
-IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
- aspeed_ast2600_scu_ic1_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic0, "aspeed,ast2700-scu-ic0", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic1, "aspeed,ast2700-scu-ic1", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic2, "aspeed,ast2700-scu-ic2", aspeed_scu_ic_of_init);
+IRQCHIP_DECLARE(ast2700_scu_ic3, "aspeed,ast2700-scu-ic3", aspeed_scu_ic_of_init);
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 3cad30a40c19..e68853815c7a 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -187,20 +187,11 @@ void __init aic_common_rtt_irq_fixup(void)
static void __init aic_common_irq_fixup(const struct of_device_id *matches)
{
- struct device_node *root = of_find_node_by_path("/");
- const struct of_device_id *match;
+ void (*fixup)(void);
- if (!root)
- return;
-
- match = of_match_node(matches, root);
-
- if (match) {
- void (*fixup)(void) = match->data;
+ fixup = of_machine_get_match_data(matches);
+ if (fixup)
fixup();
- }
-
- of_node_put(root);
}
struct irq_domain *__init aic_common_of_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-bcm2712-mip.c b/drivers/irqchip/irq-bcm2712-mip.c
index 9bd7bc0bf6d5..4761974ad650 100644
--- a/drivers/irqchip/irq-bcm2712-mip.c
+++ b/drivers/irqchip/irq-bcm2712-mip.c
@@ -232,17 +232,12 @@ err_put:
return ret;
}
-static int __init mip_of_msi_init(struct device_node *node, struct device_node *parent)
+static int mip_msi_probe(struct platform_device *pdev, struct device_node *parent)
{
- struct platform_device *pdev;
+ struct device_node *node = pdev->dev.of_node;
struct mip_priv *mip;
int ret;
- pdev = of_find_device_by_node(node);
- of_node_put(node);
- if (!pdev)
- return -EPROBE_DEFER;
-
mip = kzalloc(sizeof(*mip), GFP_KERNEL);
if (!mip)
return -ENOMEM;
@@ -285,7 +280,7 @@ err_priv:
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(mip_msi)
-IRQCHIP_MATCH("brcm,bcm2712-mip", mip_of_msi_init)
+IRQCHIP_MATCH("brcm,bcm2712-mip", mip_msi_probe)
IRQCHIP_PLATFORM_DRIVER_END(mip_msi)
MODULE_DESCRIPTION("Broadcom BCM2712 MSI-X interrupt controller");
MODULE_AUTHOR("Phil Elwell <phil@raspberrypi.com>");
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 04fac0cc857f..45c4824be92f 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -82,12 +82,6 @@ static inline unsigned int reg_status(struct bcm7038_l1_chip *intc,
return (0 * intc->n_words + word) * sizeof(u32);
}
-static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc,
- unsigned int word)
-{
- return (1 * intc->n_words + word) * sizeof(u32);
-}
-
static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc,
unsigned int word)
{
@@ -219,9 +213,8 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
}
#endif
-static int __init bcm7038_l1_init_one(struct device_node *dn,
- unsigned int idx,
- struct bcm7038_l1_chip *intc)
+static int bcm7038_l1_init_one(struct device_node *dn, unsigned int idx,
+ struct bcm7038_l1_chip *intc)
{
struct resource res;
resource_size_t sz;
@@ -292,7 +285,7 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
static LIST_HEAD(bcm7038_l1_intcs_list);
static DEFINE_RAW_SPINLOCK(bcm7038_l1_intcs_lock);
-static int bcm7038_l1_suspend(void)
+static int bcm7038_l1_suspend(void *data)
{
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
@@ -318,7 +311,7 @@ static int bcm7038_l1_suspend(void)
return 0;
}
-static void bcm7038_l1_resume(void)
+static void bcm7038_l1_resume(void *data)
{
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
@@ -339,11 +332,15 @@ static void bcm7038_l1_resume(void)
}
}
-static struct syscore_ops bcm7038_l1_syscore_ops = {
+static const struct syscore_ops bcm7038_l1_syscore_ops = {
.suspend = bcm7038_l1_suspend,
.resume = bcm7038_l1_resume,
};
+static struct syscore bcm7038_l1_syscore = {
+ .ops = &bcm7038_l1_syscore_ops,
+};
+
static int bcm7038_l1_set_wake(struct irq_data *d, unsigned int on)
{
struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d);
@@ -395,9 +392,9 @@ static const struct irq_domain_ops bcm7038_l1_domain_ops = {
.map = bcm7038_l1_map,
};
-static int __init bcm7038_l1_of_init(struct device_node *dn,
- struct device_node *parent)
+static int bcm7038_l1_probe(struct platform_device *pdev, struct device_node *parent)
{
+ struct device_node *dn = pdev->dev.of_node;
struct bcm7038_l1_chip *intc;
int idx, ret;
@@ -431,7 +428,7 @@ static int __init bcm7038_l1_of_init(struct device_node *dn,
raw_spin_unlock(&bcm7038_l1_intcs_lock);
if (list_is_singular(&bcm7038_l1_intcs_list))
- register_syscore_ops(&bcm7038_l1_syscore_ops);
+ register_syscore(&bcm7038_l1_syscore);
#endif
pr_info("registered BCM7038 L1 intc (%pOF, IRQs: %d)\n",
@@ -455,7 +452,7 @@ out_free:
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1)
-IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_of_init)
+IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_probe)
IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1)
MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index ff22c3104401..518c9d4366a5 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -143,8 +143,7 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
return 0;
}
-static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn,
- struct bcm7120_l2_intc_data *data)
+static int bcm7120_l2_intc_iomap_7120(struct device_node *dn, struct bcm7120_l2_intc_data *data)
{
int ret;
@@ -177,8 +176,7 @@ static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn,
return 0;
}
-static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
- struct bcm7120_l2_intc_data *data)
+static int bcm7120_l2_intc_iomap_3380(struct device_node *dn, struct bcm7120_l2_intc_data *data)
{
unsigned int gc_idx;
@@ -208,15 +206,14 @@ static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn,
return 0;
}
-static int __init bcm7120_l2_intc_probe(struct device_node *dn,
- struct device_node *parent,
+static int bcm7120_l2_intc_probe(struct platform_device *pdev, struct device_node *parent,
int (*iomap_regs_fn)(struct device_node *,
- struct bcm7120_l2_intc_data *),
+ struct bcm7120_l2_intc_data *),
const char *intc_name)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct device_node *dn = pdev->dev.of_node;
struct bcm7120_l2_intc_data *data;
- struct platform_device *pdev;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
int ret = 0;
@@ -227,14 +224,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (!data)
return -ENOMEM;
- pdev = of_find_device_by_node(dn);
- if (!pdev) {
- ret = -ENODEV;
- goto out_free_data;
- }
-
data->num_parent_irqs = platform_irq_count(pdev);
- put_device(&pdev->dev);
if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
@@ -334,22 +324,19 @@ out_unmap:
if (data->map_base[idx])
iounmap(data->map_base[idx]);
}
-out_free_data:
kfree(data);
return ret;
}
-static int __init bcm7120_l2_intc_probe_7120(struct device_node *dn,
- struct device_node *parent)
+static int bcm7120_l2_intc_probe_7120(struct platform_device *pdev, struct device_node *parent)
{
- return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120,
+ return bcm7120_l2_intc_probe(pdev, parent, bcm7120_l2_intc_iomap_7120,
"BCM7120 L2");
}
-static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
- struct device_node *parent)
+static int bcm7120_l2_intc_probe_3380(struct platform_device *pdev, struct device_node *parent)
{
- return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380,
+ return bcm7120_l2_intc_probe(pdev, parent, bcm7120_l2_intc_iomap_3380,
"BCM3380 L2");
}
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 1bec5b2cd3f0..bb7078d6524f 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -138,13 +138,12 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
}
-static int __init brcmstb_l2_intc_of_init(struct device_node *np,
- struct device_node *parent,
- const struct brcmstb_intc_init_params
- *init_params)
+static int brcmstb_l2_intc_probe(struct platform_device *pdev, struct device_node *parent,
+ const struct brcmstb_intc_init_params *init_params)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
unsigned int set = 0;
+ struct device_node *np = pdev->dev.of_node;
struct brcmstb_l2_intc_data *data;
struct irq_chip_type *ct;
int ret;
@@ -257,23 +256,21 @@ out_free:
return ret;
}
-static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
- struct device_node *parent)
+static int brcmstb_l2_edge_intc_probe(struct platform_device *pdev, struct device_node *parent)
{
- return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
+ return brcmstb_l2_intc_probe(pdev, parent, &l2_edge_intc_init);
}
-static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
- struct device_node *parent)
+static int brcmstb_l2_lvl_intc_probe(struct platform_device *pdev, struct device_node *parent)
{
- return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
+ return brcmstb_l2_intc_probe(pdev, parent, &l2_lvl_intc_init);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(brcmstb_l2)
-IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_of_init)
-IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_of_init)
-IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_of_init)
-IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_of_init)
+IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_probe)
+IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_probe)
+IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_probe)
+IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_probe)
IRQCHIP_PLATFORM_DRIVER_END(brcmstb_l2)
MODULE_DESCRIPTION("Broadcom STB generic L2 interrupt controller");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-gic-its-msi-parent.c b/drivers/irqchip/irq-gic-its-msi-parent.c
index eb1473f1448a..12f45228c867 100644
--- a/drivers/irqchip/irq-gic-its-msi-parent.c
+++ b/drivers/irqchip/irq-gic-its-msi-parent.c
@@ -142,83 +142,38 @@ static int its_v5_pci_msi_prepare(struct irq_domain *domain, struct device *dev,
#define its_v5_pci_msi_prepare NULL
#endif /* !CONFIG_PCI_MSI */
-static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
- u32 *dev_id)
+static int of_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev, u32 *dev_id,
+ phys_addr_t *pa)
{
- int ret, index = 0;
+ struct of_phandle_iterator it;
+ int ret;
/* Suck the DeviceID out of the msi-parent property */
- do {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_args(dev->of_node,
- "msi-parent", "#msi-cells",
- index, &args);
- if (args.np == irq_domain_get_of_node(domain)) {
- if (WARN_ON(args.args_count != 1))
- return -EINVAL;
- *dev_id = args.args[0];
- break;
- }
- index++;
- } while (!ret);
-
- if (ret) {
- struct device_node *np = NULL;
+ of_for_each_phandle(&it, ret, dev->of_node, "msi-parent", "#msi-cells", -1) {
+ /* GICv5 ITS domain matches the MSI controller node parent */
+ struct device_node *np __free(device_node) = pa ? of_get_parent(it.node)
+ : of_node_get(it.node);
- ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
- if (np)
- of_node_put(np);
- }
+ if (np == irq_domain_get_of_node(domain)) {
+ u32 args;
- return ret;
-}
+ if (WARN_ON(of_phandle_iterator_args(&it, &args, 1) != 1))
+ ret = -EINVAL;
-static int of_v5_pmsi_get_msi_info(struct irq_domain *domain, struct device *dev,
- u32 *dev_id, phys_addr_t *pa)
-{
- int ret, index = 0;
- /*
- * Retrieve the DeviceID and the ITS translate frame node pointer
- * out of the msi-parent property.
- */
- do {
- struct of_phandle_args args;
-
- ret = of_parse_phandle_with_args(dev->of_node,
- "msi-parent", "#msi-cells",
- index, &args);
- if (ret)
- break;
- /*
- * The IRQ domain fwnode is the msi controller parent
- * in GICv5 (where the msi controller nodes are the
- * ITS translate frames).
- */
- if (args.np->parent == irq_domain_get_of_node(domain)) {
- if (WARN_ON(args.args_count != 1))
- return -EINVAL;
- *dev_id = args.args[0];
-
- ret = its_translate_frame_address(args.np, pa);
- if (ret)
- return -ENODEV;
- break;
- }
- index++;
- } while (!ret);
+ if (!ret && pa)
+ ret = its_translate_frame_address(it.node, pa);
- if (ret) {
- struct device_node *np = NULL;
+ if (!ret)
+ *dev_id = args;
- ret = of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &np, dev_id);
- if (np) {
- ret = its_translate_frame_address(np, pa);
- of_node_put(np);
+ of_node_put(it.node);
+ return ret;
}
}
- return ret;
+ struct device_node *msi_ctrl __free(device_node) = NULL;
+
+ return of_map_id(dev->of_node, dev->id, "msi-map", "msi-map-mask", &msi_ctrl, dev_id);
}
int __weak iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
@@ -234,7 +189,7 @@ static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev,
int ret;
if (dev->of_node)
- ret = of_pmsi_get_dev_id(domain->parent, dev, &dev_id);
+ ret = of_pmsi_get_msi_info(domain->parent, dev, &dev_id, NULL);
else
ret = iort_pmsi_get_dev_id(dev, &dev_id);
if (ret)
@@ -262,7 +217,7 @@ static int its_v5_pmsi_prepare(struct irq_domain *domain, struct device *dev,
if (!dev->of_node)
return -ENODEV;
- ret = of_v5_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa);
+ ret = of_pmsi_get_msi_info(domain->parent, dev, &dev_id, &pa);
if (ret)
return ret;
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 24ef5af569fe..8a3410c2b7b5 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -153,14 +153,19 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
{
msi_alloc_info_t *info = args;
struct v2m_data *v2m = NULL, *tmp;
- int hwirq, offset, i, err = 0;
+ int hwirq, i, err = 0;
+ unsigned long offset;
+ unsigned long align_mask = nr_irqs - 1;
spin_lock(&v2m_lock);
list_for_each_entry(tmp, &v2m_nodes, entry) {
- offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
- get_count_order(nr_irqs));
- if (offset >= 0) {
+ unsigned long align_off = tmp->spi_start - (tmp->spi_start & ~align_mask);
+
+ offset = bitmap_find_next_zero_area_off(tmp->bm, tmp->nr_spis, 0,
+ nr_irqs, align_mask, align_off);
+ if (offset < tmp->nr_spis) {
v2m = tmp;
+ bitmap_set(v2m->bm, offset, nr_irqs);
break;
}
}
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 11549d85f23b..b5785472765a 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -30,7 +30,7 @@ static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain,
u32 out_id;
of_node = irq_domain_get_of_node(domain);
- out_id = of_node ? of_msi_map_id(&mc_dev->dev, of_node, mc_dev->icid) :
+ out_id = of_node ? of_msi_xlate(&mc_dev->dev, &of_node, mc_dev->icid) :
iort_msi_map_id(&mc_dev->dev, mc_dev->icid);
return out_id;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 467cb78435a9..ada585bfa451 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4992,7 +4992,7 @@ static void its_enable_quirks(struct its_node *its)
its_quirks, its);
}
-static int its_save_disable(void)
+static int its_save_disable(void *data)
{
struct its_node *its;
int err = 0;
@@ -5028,7 +5028,7 @@ err:
return err;
}
-static void its_restore_enable(void)
+static void its_restore_enable(void *data)
{
struct its_node *its;
int ret;
@@ -5088,11 +5088,15 @@ static void its_restore_enable(void)
raw_spin_unlock(&its_lock);
}
-static struct syscore_ops its_syscore_ops = {
+static const struct syscore_ops its_syscore_ops = {
.suspend = its_save_disable,
.resume = its_restore_enable,
};
+static struct syscore its_syscore = {
+ .ops = &its_syscore_ops,
+};
+
static void __init __iomem *its_map_one(struct resource *res, int *err)
{
void __iomem *its_base;
@@ -5864,7 +5868,7 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
}
- register_syscore_ops(&its_syscore_ops);
+ register_syscore(&its_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index dbeb85677b08..6607ab58f72e 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -26,7 +26,6 @@
#include <linux/irqchip/arm-gic-common.h>
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/irqchip/arm-gic-v3-prio.h>
-#include <linux/irqchip/irq-partition-percpu.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/arm-smccc.h>
@@ -46,8 +45,6 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
#define FLAGS_WORKAROUND_INSECURE (1ULL << 3)
-#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
-
static struct cpumask broken_rdists __read_mostly __maybe_unused;
struct redist_region {
@@ -68,7 +65,13 @@ struct gic_chip_data {
u64 flags;
bool has_rss;
unsigned int ppi_nr;
- struct partition_desc **ppi_descs;
+ struct partition_affinity *parts;
+ unsigned int nr_parts;
+};
+
+struct partition_affinity {
+ cpumask_t mask;
+ struct fwnode_handle *partition_id;
};
#define T241_CHIPS_MAX 4
@@ -228,9 +231,6 @@ static void __init gic_prio_init(void)
!cpus_have_group0);
}
-/* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
-static refcount_t *rdist_nmi_refs;
-
static struct gic_kvm_info gic_v3_kvm_info __initdata;
static DEFINE_PER_CPU(bool, has_rss);
@@ -594,36 +594,6 @@ static void gic_irq_set_prio(struct irq_data *d, u8 prio)
writeb_relaxed(prio, base + offset + index);
}
-static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
-{
- switch (__get_intid_range(hwirq)) {
- case PPI_RANGE:
- return hwirq - 16;
- case EPPI_RANGE:
- return hwirq - EPPI_BASE_INTID + 16;
- default:
- unreachable();
- }
-}
-
-static u32 __gic_get_rdist_index(irq_hw_number_t hwirq)
-{
- switch (__get_intid_range(hwirq)) {
- case SGI_RANGE:
- case PPI_RANGE:
- return hwirq;
- case EPPI_RANGE:
- return hwirq - EPPI_BASE_INTID + 32;
- default:
- unreachable();
- }
-}
-
-static u32 gic_get_rdist_index(struct irq_data *d)
-{
- return __gic_get_rdist_index(d->hwirq);
-}
-
static int gic_irq_nmi_setup(struct irq_data *d)
{
struct irq_desc *desc = irq_to_desc(d->irq);
@@ -644,20 +614,8 @@ static int gic_irq_nmi_setup(struct irq_data *d)
return -EINVAL;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_rdist_index(d);
-
- /*
- * Setting up a percpu interrupt as NMI, only switch handler
- * for first NMI
- */
- if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) {
- refcount_set(&rdist_nmi_refs[idx], 1);
- desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
- }
- } else {
+ if (!gic_irq_in_rdist(d))
desc->handle_irq = handle_fasteoi_nmi;
- }
gic_irq_set_prio(d, dist_prio_nmi);
@@ -684,15 +642,8 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
return;
/* desc lock should already be held */
- if (gic_irq_in_rdist(d)) {
- u32 idx = gic_get_rdist_index(d);
-
- /* Tearing down NMI, only switch handler for last NMI */
- if (refcount_dec_and_test(&rdist_nmi_refs[idx]))
- desc->handle_irq = handle_percpu_devid_irq;
- } else {
+ if (!gic_irq_in_rdist(d))
desc->handle_irq = handle_fasteoi_irq;
- }
gic_irq_set_prio(d, dist_prio_irq);
}
@@ -1666,13 +1617,6 @@ static int gic_irq_domain_translate(struct irq_domain *d,
case GIC_IRQ_TYPE_LPI: /* LPI */
*hwirq = fwspec->param[1];
break;
- case GIC_IRQ_TYPE_PARTITION:
- *hwirq = fwspec->param[1];
- if (fwspec->param[1] >= 16)
- *hwirq += EPPI_BASE_INTID - 16;
- else
- *hwirq += 16;
- break;
default:
return -EINVAL;
}
@@ -1681,10 +1625,8 @@ static int gic_irq_domain_translate(struct irq_domain *d,
/*
* Make it clear that broken DTs are... broken.
- * Partitioned PPIs are an unfortunate exception.
*/
- WARN_ON(*type == IRQ_TYPE_NONE &&
- fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
+ WARN_ON(*type == IRQ_TYPE_NONE);
return 0;
}
@@ -1741,33 +1683,13 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
}
}
-static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
- irq_hw_number_t hwirq)
-{
- enum gic_intid_range range;
-
- if (!gic_data.ppi_descs)
- return false;
-
- if (!is_of_node(fwspec->fwnode))
- return false;
-
- if (fwspec->param_count < 4 || !fwspec->param[3])
- return false;
-
- range = __get_intid_range(hwirq);
- if (range != PPI_RANGE && range != EPPI_RANGE)
- return false;
-
- return true;
-}
-
static int gic_irq_domain_select(struct irq_domain *d,
struct irq_fwspec *fwspec,
enum irq_domain_bus_token bus_token)
{
- unsigned int type, ret, ppi_idx;
irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
/* Not for us */
if (fwspec->fwnode != d->fwnode)
@@ -1785,60 +1707,61 @@ static int gic_irq_domain_select(struct irq_domain *d,
if (WARN_ON_ONCE(ret))
return 0;
- if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
- return d == gic_data.domain;
-
- /*
- * If this is a PPI and we have a 4th (non-null) parameter,
- * then we need to match the partition domain.
- */
- ppi_idx = __gic_get_ppi_index(hwirq);
- return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
+ return d == gic_data.domain;
}
-static const struct irq_domain_ops gic_irq_domain_ops = {
- .translate = gic_irq_domain_translate,
- .alloc = gic_irq_domain_alloc,
- .free = gic_irq_domain_free,
- .select = gic_irq_domain_select,
-};
-
-static int partition_domain_translate(struct irq_domain *d,
- struct irq_fwspec *fwspec,
- unsigned long *hwirq,
- unsigned int *type)
+static int gic_irq_get_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
{
- unsigned long ppi_intid;
- struct device_node *np;
- unsigned int ppi_idx;
- int ret;
-
- if (!gic_data.ppi_descs)
- return -ENOMEM;
+ const struct cpumask *mask = NULL;
- np = of_find_node_by_phandle(fwspec->param[3]);
- if (WARN_ON(!np))
- return -EINVAL;
+ info->flags = 0;
+ info->affinity = NULL;
- ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
- if (WARN_ON_ONCE(ret))
+ /* ACPI is not capable of describing PPI affinity -- yet */
+ if (!is_of_node(fwspec->fwnode))
return 0;
- ppi_idx = __gic_get_ppi_index(ppi_intid);
- ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
- of_fwnode_handle(np));
- if (ret < 0)
- return ret;
+ /* If the specifier provides an affinity, use it */
+ if (fwspec->param_count == 4 && fwspec->param[3]) {
+ struct fwnode_handle *fw;
+
+ switch (fwspec->param[0]) {
+ case 1: /* PPI */
+ case 3: /* EPPI */
+ break;
+ default:
+ return 0;
+ }
+
+ fw = of_fwnode_handle(of_find_node_by_phandle(fwspec->param[3]));
+ if (!fw)
+ return -ENOENT;
+
+ for (int i = 0; i < gic_data.nr_parts; i++) {
+ if (gic_data.parts[i].partition_id == fw) {
+ mask = &gic_data.parts[i].mask;
+ break;
+ }
+ }
+
+ if (!mask)
+ return -ENOENT;
+ } else {
+ mask = cpu_possible_mask;
+ }
- *hwirq = ret;
- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ info->affinity = mask;
+ info->flags = IRQ_FWSPEC_INFO_AFFINITY_VALID;
return 0;
}
-static const struct irq_domain_ops partition_domain_ops = {
- .translate = partition_domain_translate,
+static const struct irq_domain_ops gic_irq_domain_ops = {
+ .translate = gic_irq_domain_translate,
+ .alloc = gic_irq_domain_alloc,
+ .free = gic_irq_domain_free,
.select = gic_irq_domain_select,
+ .get_fwspec_info = gic_irq_get_fwspec_info,
};
static bool gic_enable_quirk_msm8996(void *data)
@@ -2029,19 +1952,9 @@ static const struct gic_quirk gic_quirks[] = {
static void gic_enable_nmi_support(void)
{
- int i;
-
if (!gic_prio_masking_enabled() || nmi_support_forbidden)
return;
- rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
- sizeof(*rdist_nmi_refs), GFP_KERNEL);
- if (!rdist_nmi_refs)
- return;
-
- for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++)
- refcount_set(&rdist_nmi_refs[i], 0);
-
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
@@ -2173,12 +2086,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
if (!parts_node)
return;
- gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
- if (!gic_data.ppi_descs)
- goto out_put_node;
-
nr_parts = of_get_child_count(parts_node);
-
if (!nr_parts)
goto out_put_node;
@@ -2231,29 +2139,8 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
part_idx++;
}
- for (i = 0; i < gic_data.ppi_nr; i++) {
- unsigned int irq;
- struct partition_desc *desc;
- struct irq_fwspec ppi_fwspec = {
- .fwnode = gic_data.fwnode,
- .param_count = 3,
- .param = {
- [0] = GIC_IRQ_TYPE_PARTITION,
- [1] = i,
- [2] = IRQ_TYPE_NONE,
- },
- };
-
- irq = irq_create_fwspec_mapping(&ppi_fwspec);
- if (WARN_ON(!irq))
- continue;
- desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
- irq, &partition_domain_ops);
- if (WARN_ON(!desc))
- continue;
-
- gic_data.ppi_descs[i] = desc;
- }
+ gic_data.parts = parts;
+ gic_data.nr_parts = nr_parts;
out_put_node:
of_node_put(parts_node);
diff --git a/drivers/irqchip/irq-gic-v5-irs.c b/drivers/irqchip/irq-gic-v5-irs.c
index 13c035727e32..ce2732d649a3 100644
--- a/drivers/irqchip/irq-gic-v5-irs.c
+++ b/drivers/irqchip/irq-gic-v5-irs.c
@@ -571,7 +571,7 @@ static void __init gicv5_irs_init_bases(struct gicv5_irs_chip_data *irs_data,
FIELD_PREP(GICV5_IRS_CR1_IST_RA, GICV5_NO_READ_ALLOC) |
FIELD_PREP(GICV5_IRS_CR1_IC, GICV5_NON_CACHE) |
FIELD_PREP(GICV5_IRS_CR1_OC, GICV5_NON_CACHE);
- irs_data->flags |= IRS_FLAGS_NON_COHERENT;
+ irs_data->flags |= IRS_FLAGS_NON_COHERENT;
} else {
cr1 = FIELD_PREP(GICV5_IRS_CR1_VPED_WA, GICV5_WRITE_ALLOC) |
FIELD_PREP(GICV5_IRS_CR1_VPED_RA, GICV5_READ_ALLOC) |
diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c
index 9290ac741949..554485f0be1f 100644
--- a/drivers/irqchip/irq-gic-v5-its.c
+++ b/drivers/irqchip/irq-gic-v5-its.c
@@ -191,9 +191,9 @@ static int gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its,
unsigned int num_events)
{
unsigned int l1_bits, l2_bits, span, events_per_l2_table;
- unsigned int i, complete_tables, final_span, num_ents;
+ unsigned int complete_tables, final_span, num_ents;
__le64 *itt_l1, *itt_l2, **l2ptrs;
- int ret;
+ int i, ret;
u64 val;
ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz);
@@ -768,8 +768,6 @@ static struct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *
goto out_dev_free;
}
- gicv5_its_device_cache_inv(its, its_dev);
-
its_dev->its_node = its;
its_dev->event_map = (unsigned long *)bitmap_zalloc(its_dev->num_events, GFP_KERNEL);
@@ -949,15 +947,18 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
device_id = its_dev->device_id;
for (i = 0; i < nr_irqs; i++) {
- lpi = gicv5_alloc_lpi();
+ ret = gicv5_alloc_lpi();
if (ret < 0) {
pr_debug("Failed to find free LPI!\n");
- goto out_eventid;
+ goto out_free_irqs;
}
+ lpi = ret;
ret = irq_domain_alloc_irqs_parent(domain, virq + i, 1, &lpi);
- if (ret)
- goto out_free_lpi;
+ if (ret) {
+ gicv5_free_lpi(lpi);
+ goto out_free_irqs;
+ }
/*
* Store eventid and deviceid into the hwirq for later use.
@@ -977,8 +978,13 @@ static int gicv5_its_irq_domain_alloc(struct irq_domain *domain, unsigned int vi
return 0;
-out_free_lpi:
- gicv5_free_lpi(lpi);
+out_free_irqs:
+ while (--i >= 0) {
+ irqd = irq_domain_get_irq_data(domain, virq + i);
+ gicv5_free_lpi(irqd->parent_data->hwirq);
+ irq_domain_reset_irq_data(irqd);
+ irq_domain_free_irqs_parent(domain, virq + i, 1);
+ }
out_eventid:
gicv5_its_free_eventid(its_dev, event_id_base, nr_irqs);
return ret;
diff --git a/drivers/irqchip/irq-gic-v5.c b/drivers/irqchip/irq-gic-v5.c
index 4bd224f359a7..41ef286c4d78 100644
--- a/drivers/irqchip/irq-gic-v5.c
+++ b/drivers/irqchip/irq-gic-v5.c
@@ -1062,16 +1062,9 @@ static void gicv5_set_cpuif_idbits(void)
#ifdef CONFIG_KVM
static struct gic_kvm_info gic_v5_kvm_info __initdata;
-static bool __init gicv5_cpuif_has_gcie_legacy(void)
-{
- u64 idr0 = read_sysreg_s(SYS_ICC_IDR0_EL1);
- return !!FIELD_GET(ICC_IDR0_EL1_GCIE_LEGACY, idr0);
-}
-
static void __init gic_of_setup_kvm_info(struct device_node *node)
{
gic_v5_kvm_info.type = GIC_V5;
- gic_v5_kvm_info.has_gcie_v3_compat = gicv5_cpuif_has_gcie_legacy();
/* GIC Virtual CPU interface maintenance interrupt */
gic_v5_kvm_info.no_maint_irq_mask = false;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1269ab8eb726..ec70c84e9f91 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1459,6 +1459,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node)
if (ret)
return;
+ gic_v2_kvm_info.gicc_base = gic_data[0].cpu_base.common_base;
+
if (static_branch_likely(&supports_deactivate_key))
vgic_set_kvm_info(&gic_v2_kvm_info);
}
@@ -1620,6 +1622,7 @@ static void __init gic_acpi_setup_kvm_info(void)
return;
gic_v2_kvm_info.maint_irq = irq;
+ gic_v2_kvm_info.gicc_base = gic_data[0].cpu_base.common_base;
vgic_set_kvm_info(&gic_v2_kvm_info);
}
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 91b2f587119c..cca77f9948a3 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -202,13 +202,13 @@ spurious_8259A_irq:
}
}
-static void i8259A_resume(void)
+static void i8259A_resume(void *data)
{
if (i8259A_auto_eoi >= 0)
init_8259A(i8259A_auto_eoi);
}
-static void i8259A_shutdown(void)
+static void i8259A_shutdown(void *data)
{
/* Put the i8259A into a quiescent state that
* the kernel initialization code can get it
@@ -220,11 +220,15 @@ static void i8259A_shutdown(void)
}
}
-static struct syscore_ops i8259_syscore_ops = {
+static const struct syscore_ops i8259_syscore_ops = {
.resume = i8259A_resume,
.shutdown = i8259A_shutdown,
};
+static struct syscore i8259_syscore = {
+ .ops = &i8259_syscore_ops,
+};
+
static void init_8259A(int auto_eoi)
{
unsigned long flags;
@@ -320,7 +324,7 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
if (request_irq(irq, no_action, IRQF_NO_THREAD, "cascade", NULL))
pr_err("Failed to register cascade interrupt\n");
- register_syscore_ops(&i8259_syscore_ops);
+ register_syscore(&i8259_syscore);
return domain;
}
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index b91f5c14b405..04f7ba0657be 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -33,7 +33,7 @@ static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
return cd->gpc_base + cd->cpu2wakeup + i * 4;
}
-static int gpcv2_wakeup_source_save(void)
+static int gpcv2_wakeup_source_save(void *data)
{
struct gpcv2_irqchip_data *cd;
void __iomem *reg;
@@ -52,7 +52,7 @@ static int gpcv2_wakeup_source_save(void)
return 0;
}
-static void gpcv2_wakeup_source_restore(void)
+static void gpcv2_wakeup_source_restore(void *data)
{
struct gpcv2_irqchip_data *cd;
int i;
@@ -65,9 +65,13 @@ static void gpcv2_wakeup_source_restore(void)
writel_relaxed(cd->saved_irq_mask[i], gpcv2_idx_to_reg(cd, i));
}
-static struct syscore_ops imx_gpcv2_syscore_ops = {
- .suspend = gpcv2_wakeup_source_save,
- .resume = gpcv2_wakeup_source_restore,
+static const struct syscore_ops gpcv2_syscore_ops = {
+ .suspend = gpcv2_wakeup_source_save,
+ .resume = gpcv2_wakeup_source_restore,
+};
+
+static struct syscore gpcv2_syscore = {
+ .ops = &gpcv2_syscore_ops,
};
static int imx_gpcv2_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -276,7 +280,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
writel_relaxed(~0x1, cd->gpc_base + cd->cpu2wakeup);
imx_gpcv2_instance = cd;
- register_syscore_ops(&imx_gpcv2_syscore_ops);
+ register_syscore(&gpcv2_syscore);
/*
* Clear the OF_POPULATED flag set in of_irq_init so that
diff --git a/drivers/irqchip/irq-imx-mu-msi.c b/drivers/irqchip/irq-imx-mu-msi.c
index d2a4e8a61a42..c598f2f52fc6 100644
--- a/drivers/irqchip/irq-imx-mu-msi.c
+++ b/drivers/irqchip/irq-imx-mu-msi.c
@@ -296,11 +296,9 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
},
};
-static int __init imx_mu_of_init(struct device_node *dn,
- struct device_node *parent,
- const struct imx_mu_dcfg *cfg)
+static int imx_mu_probe(struct platform_device *pdev, struct device_node *parent,
+ const struct imx_mu_dcfg *cfg)
{
- struct platform_device *pdev = of_find_device_by_node(dn);
struct device_link *pd_link_a;
struct device_link *pd_link_b;
struct imx_mu_msi *msi_data;
@@ -416,31 +414,27 @@ static const struct dev_pm_ops imx_mu_pm_ops = {
imx_mu_runtime_resume, NULL)
};
-static int __init imx_mu_imx7ulp_of_init(struct device_node *dn,
- struct device_node *parent)
+static int imx_mu_imx7ulp_probe(struct platform_device *pdev, struct device_node *parent)
{
- return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx7ulp);
+ return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx7ulp);
}
-static int __init imx_mu_imx6sx_of_init(struct device_node *dn,
- struct device_node *parent)
+static int imx_mu_imx6sx_probe(struct platform_device *pdev, struct device_node *parent)
{
- return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx6sx);
+ return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx6sx);
}
-static int __init imx_mu_imx8ulp_of_init(struct device_node *dn,
- struct device_node *parent)
+static int imx_mu_imx8ulp_probe(struct platform_device *pdev, struct device_node *parent)
{
- return imx_mu_of_init(dn, parent, &imx_mu_cfg_imx8ulp);
+ return imx_mu_probe(pdev, parent, &imx_mu_cfg_imx8ulp);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(imx_mu_msi)
-IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_of_init)
-IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_of_init)
-IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_of_init)
+IRQCHIP_MATCH("fsl,imx7ulp-mu-msi", imx_mu_imx7ulp_probe)
+IRQCHIP_MATCH("fsl,imx6sx-mu-msi", imx_mu_imx6sx_probe)
+IRQCHIP_MATCH("fsl,imx8ulp-mu-msi", imx_mu_imx8ulp_probe)
IRQCHIP_PLATFORM_DRIVER_END(imx_mu_msi, .pm = &imx_mu_pm_ops)
-
MODULE_AUTHOR("Frank Li <Frank.Li@nxp.com>");
MODULE_DESCRIPTION("Freescale MU MSI controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index b2860eb2d32c..ad2105685b48 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -46,6 +46,7 @@
#define EIOINTC_ALL_ENABLE_VEC_MASK(vector) (EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1f))
#define EIOINTC_REG_ENABLE_VEC(vector) (EIOINTC_REG_ENABLE + ((vector >> 5) << 2))
#define EIOINTC_USE_CPU_ENCODE BIT(0)
+#define EIOINTC_ROUTE_MULT_IP BIT(1)
#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
@@ -59,6 +60,14 @@
#define EIOINTC_REG_ROUTE_VEC_MASK(vector) (0xff << EIOINTC_REG_ROUTE_VEC_SHIFT(vector))
static int nr_pics;
+struct eiointc_priv;
+
+struct eiointc_ip_route {
+ struct eiointc_priv *priv;
+ /* Offset Routed destination IP */
+ int start;
+ int end;
+};
struct eiointc_priv {
u32 node;
@@ -68,6 +77,8 @@ struct eiointc_priv {
struct fwnode_handle *domain_handle;
struct irq_domain *eiointc_domain;
int flags;
+ irq_hw_number_t parent_hwirq;
+ struct eiointc_ip_route route_info[VEC_REG_COUNT];
};
static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
@@ -188,6 +199,7 @@ static int eiointc_router_init(unsigned int cpu)
{
int i, bit, cores, index, node;
unsigned int data;
+ int hwirq, mask;
node = cpu_to_eio_node(cpu);
index = eiointc_index(node);
@@ -197,6 +209,13 @@ static int eiointc_router_init(unsigned int cpu)
return -EINVAL;
}
+ /* Enable cpu interrupt pin from eiointc */
+ hwirq = eiointc_priv[index]->parent_hwirq;
+ mask = BIT(hwirq);
+ if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP)
+ mask |= BIT(hwirq + 1) | BIT(hwirq + 2) | BIT(hwirq + 3);
+ set_csr_ecfg(mask);
+
if (!(eiointc_priv[index]->flags & EIOINTC_USE_CPU_ENCODE))
cores = CORES_PER_EIO_NODE;
else
@@ -211,8 +230,31 @@ static int eiointc_router_init(unsigned int cpu)
}
for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
- bit = BIT(1 + index); /* Route to IP[1 + index] */
- data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ /*
+ * Route to interrupt pin, relative offset used here
+ * Offset 0 means routing to IP0 and so on
+ *
+ * If flags is set with EIOINTC_ROUTE_MULT_IP,
+ * every 64 vector routes to different consecutive
+ * IPs, otherwise all vector routes to the same IP
+ */
+ if (eiointc_priv[index]->flags & EIOINTC_ROUTE_MULT_IP) {
+ /* The first 64 vectors route to hwirq */
+ bit = BIT(hwirq++ - INT_HWI0);
+ data = bit | (bit << 8);
+
+ /* The second 64 vectors route to hwirq + 1 */
+ bit = BIT(hwirq++ - INT_HWI0);
+ data |= (bit << 16) | (bit << 24);
+
+ /*
+ * Route to hwirq + 2/hwirq + 3 separately
+ * in next loop
+ */
+ } else {
+ bit = BIT(hwirq - INT_HWI0);
+ data = bit | (bit << 8) | (bit << 16) | (bit << 24);
+ }
iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
}
@@ -241,15 +283,22 @@ static int eiointc_router_init(unsigned int cpu)
static void eiointc_irq_dispatch(struct irq_desc *desc)
{
- int i;
- u64 pending;
- bool handled = false;
+ struct eiointc_ip_route *info = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
+ bool handled = false;
+ u64 pending;
+ int i;
chained_irq_enter(chip, desc);
- for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
+ /*
+ * If EIOINTC_ROUTE_MULT_IP is set, every 64 interrupt vectors in
+ * eiointc interrupt controller routes to different cpu interrupt pins
+ *
+ * Every cpu interrupt pin has its own irq handler, it is ok to
+ * read ISR for these 64 interrupt vectors rather than all vectors
+ */
+ for (i = info->start; i < info->end; i++) {
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
/* Skip handling if pending bitmap is zero */
@@ -262,7 +311,7 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
int bit = __ffs(pending);
int irq = bit + VEC_COUNT_PER_REG * i;
- generic_handle_domain_irq(priv->eiointc_domain, irq);
+ generic_handle_domain_irq(info->priv->eiointc_domain, irq);
pending &= ~BIT(bit);
handled = true;
}
@@ -358,21 +407,25 @@ static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group
return NULL;
}
-static int eiointc_suspend(void)
+static int eiointc_suspend(void *data)
{
return 0;
}
-static void eiointc_resume(void)
+static void eiointc_resume(void *data)
{
eiointc_router_init(0);
}
-static struct syscore_ops eiointc_syscore_ops = {
+static const struct syscore_ops eiointc_syscore_ops = {
.suspend = eiointc_suspend,
.resume = eiointc_resume,
};
+static struct syscore eiointc_syscore = {
+ .ops = &eiointc_syscore_ops,
+};
+
static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
@@ -462,11 +515,36 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
}
eiointc_priv[nr_pics++] = priv;
+ /*
+ * Only the first eiointc device on VM supports routing to
+ * different CPU interrupt pins. The later eiointc devices use
+ * generic method if there are multiple eiointc devices in future
+ */
+ if (cpu_has_hypervisor && (nr_pics == 1)) {
+ priv->flags |= EIOINTC_ROUTE_MULT_IP;
+ priv->parent_hwirq = INT_HWI0;
+ }
+
+ if (priv->flags & EIOINTC_ROUTE_MULT_IP) {
+ for (i = 0; i < priv->vec_count / VEC_COUNT_PER_REG; i++) {
+ priv->route_info[i].start = priv->parent_hwirq - INT_HWI0 + i;
+ priv->route_info[i].end = priv->route_info[i].start + 1;
+ priv->route_info[i].priv = priv;
+ parent_irq = get_percpu_irq(priv->parent_hwirq + i);
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch,
+ &priv->route_info[i]);
+ }
+ } else {
+ priv->route_info[0].start = 0;
+ priv->route_info[0].end = priv->vec_count / VEC_COUNT_PER_REG;
+ priv->route_info[0].priv = priv;
+ irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch,
+ &priv->route_info[0]);
+ }
eiointc_router_init(0);
- irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
if (nr_pics == 1) {
- register_syscore_ops(&eiointc_syscore_ops);
+ register_syscore(&eiointc_syscore);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_EIOINTC_STARTING,
"irqchip/loongarch/eiointc:starting",
eiointc_router_init, NULL);
@@ -495,7 +573,7 @@ int __init eiointc_acpi_init(struct irq_domain *parent,
priv->vec_count = VEC_COUNT;
priv->node = acpi_eiointc->node;
-
+ priv->parent_hwirq = acpi_eiointc->cascade;
parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
ret = eiointc_init(priv, parent_irq, acpi_eiointc->node_map);
@@ -527,8 +605,9 @@ out_free_priv:
static int __init eiointc_of_init(struct device_node *of_node,
struct device_node *parent)
{
- int parent_irq, ret;
struct eiointc_priv *priv;
+ struct irq_data *irq_data;
+ int parent_irq, ret;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -544,6 +623,12 @@ static int __init eiointc_of_init(struct device_node *of_node,
if (ret < 0)
goto out_free_priv;
+ irq_data = irq_get_irq_data(parent_irq);
+ if (!irq_data) {
+ ret = -ENODEV;
+ goto out_free_priv;
+ }
+
/*
* In particular, the number of devices supported by the LS2K0500
* extended I/O interrupt vector is 128.
@@ -552,7 +637,7 @@ static int __init eiointc_of_init(struct device_node *of_node,
priv->vec_count = 128;
else
priv->vec_count = VEC_COUNT;
-
+ priv->parent_hwirq = irqd_to_hwirq(irq_data);
priv->node = 0;
priv->domain_handle = of_fwnode_handle(of_node);
diff --git a/drivers/irqchip/irq-loongson-htpic.c b/drivers/irqchip/irq-loongson-htpic.c
index f4abdf156de7..1c691c4be989 100644
--- a/drivers/irqchip/irq-loongson-htpic.c
+++ b/drivers/irqchip/irq-loongson-htpic.c
@@ -71,15 +71,19 @@ static void htpic_reg_init(void)
writel(0xffff, htpic->base + HTINT_EN_OFF);
}
-static void htpic_resume(void)
+static void htpic_resume(void *data)
{
htpic_reg_init();
}
-struct syscore_ops htpic_syscore_ops = {
+static const struct syscore_ops htpic_syscore_ops = {
.resume = htpic_resume,
};
+static struct syscore htpic_syscore = {
+ .ops = &htpic_syscore_ops,
+};
+
static int __init htpic_of_init(struct device_node *node, struct device_node *parent)
{
unsigned int parent_irq[4];
@@ -130,7 +134,7 @@ static int __init htpic_of_init(struct device_node *node, struct device_node *pa
htpic_irq_dispatch, htpic);
}
- register_syscore_ops(&htpic_syscore_ops);
+ register_syscore(&htpic_syscore);
return 0;
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index d8558eb35044..d2be8e954e92 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -159,7 +159,7 @@ static void htvec_reset(struct htvec *priv)
}
}
-static int htvec_suspend(void)
+static int htvec_suspend(void *data)
{
int i;
@@ -169,7 +169,7 @@ static int htvec_suspend(void)
return 0;
}
-static void htvec_resume(void)
+static void htvec_resume(void *data)
{
int i;
@@ -177,11 +177,15 @@ static void htvec_resume(void)
writel(htvec_priv->saved_vec_en[i], htvec_priv->base + HTVEC_EN_OFF + 4 * i);
}
-static struct syscore_ops htvec_syscore_ops = {
+static const struct syscore_ops htvec_syscore_ops = {
.suspend = htvec_suspend,
.resume = htvec_resume,
};
+static struct syscore htvec_syscore = {
+ .ops = &htvec_syscore_ops,
+};
+
static int htvec_init(phys_addr_t addr, unsigned long size,
int num_parents, int parent_irq[], struct fwnode_handle *domain_handle)
{
@@ -214,7 +218,7 @@ static int htvec_init(phys_addr_t addr, unsigned long size,
htvec_priv = priv;
- register_syscore_ops(&htvec_syscore_ops);
+ register_syscore(&htvec_syscore);
return 0;
diff --git a/drivers/irqchip/irq-loongson-pch-lpc.c b/drivers/irqchip/irq-loongson-pch-lpc.c
index 2d4c3ec128b8..3a125f3e4287 100644
--- a/drivers/irqchip/irq-loongson-pch-lpc.c
+++ b/drivers/irqchip/irq-loongson-pch-lpc.c
@@ -151,7 +151,7 @@ static int pch_lpc_disabled(struct pch_lpc *priv)
(readl(priv->base + LPC_INT_STS) == 0xffffffff);
}
-static int pch_lpc_suspend(void)
+static int pch_lpc_suspend(void *data)
{
pch_lpc_priv->saved_reg_ctl = readl(pch_lpc_priv->base + LPC_INT_CTL);
pch_lpc_priv->saved_reg_ena = readl(pch_lpc_priv->base + LPC_INT_ENA);
@@ -159,18 +159,22 @@ static int pch_lpc_suspend(void)
return 0;
}
-static void pch_lpc_resume(void)
+static void pch_lpc_resume(void *data)
{
writel(pch_lpc_priv->saved_reg_ctl, pch_lpc_priv->base + LPC_INT_CTL);
writel(pch_lpc_priv->saved_reg_ena, pch_lpc_priv->base + LPC_INT_ENA);
writel(pch_lpc_priv->saved_reg_pol, pch_lpc_priv->base + LPC_INT_POL);
}
-static struct syscore_ops pch_lpc_syscore_ops = {
+static const struct syscore_ops pch_lpc_syscore_ops = {
.suspend = pch_lpc_suspend,
.resume = pch_lpc_resume,
};
+static struct syscore pch_lpc_syscore = {
+ .ops = &pch_lpc_syscore_ops,
+};
+
int __init pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc)
{
@@ -200,8 +204,13 @@ int __init pch_lpc_acpi_init(struct irq_domain *parent,
goto iounmap_base;
}
- priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT,
- &pch_lpc_domain_ops, priv);
+ /*
+ * The LPC interrupt controller is a legacy i8259-compatible device,
+ * which requires a static 1:1 mapping for IRQs 0-15.
+ * Use irq_domain_create_legacy to establish this static mapping early.
+ */
+ priv->lpc_domain = irq_domain_create_legacy(irq_handle, LPC_COUNT, 0, 0,
+ &pch_lpc_domain_ops, priv);
if (!priv->lpc_domain) {
pr_err("Failed to create IRQ domain\n");
goto free_irq_handle;
@@ -217,7 +226,7 @@ int __init pch_lpc_acpi_init(struct irq_domain *parent,
pch_lpc_priv = priv;
pch_lpc_handle = irq_handle;
- register_syscore_ops(&pch_lpc_syscore_ops);
+ register_syscore(&pch_lpc_syscore);
return 0;
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index 62e6bf3a0611..c6b369a974a7 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -278,7 +278,7 @@ static void pch_pic_reset(struct pch_pic *priv)
}
}
-static int pch_pic_suspend(void)
+static int pch_pic_suspend(void *data)
{
int i, j;
@@ -296,7 +296,7 @@ static int pch_pic_suspend(void)
return 0;
}
-static void pch_pic_resume(void)
+static void pch_pic_resume(void *data)
{
int i, j;
@@ -313,11 +313,15 @@ static void pch_pic_resume(void)
}
}
-static struct syscore_ops pch_pic_syscore_ops = {
+static const struct syscore_ops pch_pic_syscore_ops = {
.suspend = pch_pic_suspend,
.resume = pch_pic_resume,
};
+static struct syscore pch_pic_syscore = {
+ .ops = &pch_pic_syscore_ops,
+};
+
static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
struct irq_domain *parent_domain, struct fwnode_handle *domain_handle,
u32 gsi_base)
@@ -356,7 +360,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
pch_pic_priv[nr_pics++] = priv;
if (nr_pics == 1)
- register_syscore_ops(&pch_pic_syscore_ops);
+ register_syscore(&pch_pic_syscore);
return 0;
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
index 516a3a0e359c..2474fa467a05 100644
--- a/drivers/irqchip/irq-mchp-eic.c
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -109,7 +109,7 @@ static int mchp_eic_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
-static int mchp_eic_irq_suspend(void)
+static int mchp_eic_irq_suspend(void *data)
{
unsigned int hwirq;
@@ -123,7 +123,7 @@ static int mchp_eic_irq_suspend(void)
return 0;
}
-static void mchp_eic_irq_resume(void)
+static void mchp_eic_irq_resume(void *data)
{
unsigned int hwirq;
@@ -135,11 +135,15 @@ static void mchp_eic_irq_resume(void)
MCHP_EIC_SCFG(hwirq));
}
-static struct syscore_ops mchp_eic_syscore_ops = {
+static const struct syscore_ops mchp_eic_syscore_ops = {
.suspend = mchp_eic_irq_suspend,
.resume = mchp_eic_irq_resume,
};
+static struct syscore mchp_eic_syscore = {
+ .ops = &mchp_eic_syscore_ops,
+};
+
static struct irq_chip mchp_eic_chip = {
.name = "eic",
.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
@@ -199,8 +203,9 @@ static const struct irq_domain_ops mchp_eic_domain_ops = {
.free = irq_domain_free_irqs_common,
};
-static int mchp_eic_init(struct device_node *node, struct device_node *parent)
+static int mchp_eic_probe(struct platform_device *pdev, struct device_node *parent)
{
+ struct device_node *node = pdev->dev.of_node;
struct irq_domain *parent_domain = NULL;
int ret, i;
@@ -257,7 +262,7 @@ static int mchp_eic_init(struct device_node *node, struct device_node *parent)
goto clk_unprepare;
}
- register_syscore_ops(&mchp_eic_syscore_ops);
+ register_syscore(&mchp_eic_syscore);
pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ);
@@ -273,7 +278,7 @@ free:
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(mchp_eic)
-IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_init)
+IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_probe)
IRQCHIP_PLATFORM_DRIVER_END(mchp_eic)
MODULE_DESCRIPTION("Microchip External Interrupt Controller");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index 7d177626d64b..3fcbb044ae60 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -174,6 +174,14 @@ static const struct meson_gpio_irq_params s4_params = {
INIT_MESON_S4_COMMON_DATA(82)
};
+static const struct meson_gpio_irq_params s6_params = {
+ INIT_MESON_S4_COMMON_DATA(100)
+};
+
+static const struct meson_gpio_irq_params s7_params = {
+ INIT_MESON_S4_COMMON_DATA(84)
+};
+
static const struct meson_gpio_irq_params c3_params = {
INIT_MESON_S4_COMMON_DATA(55)
};
@@ -195,6 +203,9 @@ static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
{ .compatible = "amlogic,a4-gpio-ao-intc", .data = &a4_ao_params },
{ .compatible = "amlogic,a4-gpio-intc", .data = &a4_params },
{ .compatible = "amlogic,a5-gpio-intc", .data = &a5_params },
+ { .compatible = "amlogic,s6-gpio-intc", .data = &s6_params },
+ { .compatible = "amlogic,s7-gpio-intc", .data = &s7_params },
+ { .compatible = "amlogic,s7d-gpio-intc", .data = &s7_params },
{ .compatible = "amlogic,c3-gpio-intc", .data = &c3_params },
{ .compatible = "amlogic,t7-gpio-intc", .data = &t7_params },
{ }
@@ -572,8 +583,9 @@ static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_i
return 0;
}
-static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *parent)
+static int meson_gpio_irq_probe(struct platform_device *pdev, struct device_node *parent)
{
+ struct device_node *node = pdev->dev.of_node;
struct irq_domain *domain, *parent_domain;
struct meson_gpio_irq_controller *ctl;
int ret;
@@ -630,10 +642,9 @@ free_ctl:
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(meson_gpio_intc)
-IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
+IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_probe)
IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
MODULE_DESCRIPTION("Meson GPIO Interrupt Multiplexer driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:meson-gpio-intc");
diff --git a/drivers/irqchip/irq-msi-lib.c b/drivers/irqchip/irq-msi-lib.c
index 908944009c21..d5eefc3d7215 100644
--- a/drivers/irqchip/irq-msi-lib.c
+++ b/drivers/irqchip/irq-msi-lib.c
@@ -112,6 +112,20 @@ bool msi_lib_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
*/
if (!chip->irq_set_affinity && !(info->flags & MSI_FLAG_NO_AFFINITY))
chip->irq_set_affinity = msi_domain_set_affinity;
+
+ /*
+ * If the parent domain insists on being in charge of masking, obey
+ * blindly. The interrupt is un-masked at the PCI level on startup
+ * and masked on shutdown to prevent rogue interrupts after the
+ * driver freed the interrupt. Not masking it at the PCI level
+ * speeds up operation for disable/enable_irq() as it avoids
+ * getting all the way out to the PCI device.
+ */
+ if (info->flags & MSI_FLAG_PCI_MSI_MASK_PARENT) {
+ chip->irq_mask = irq_chip_mask_parent;
+ chip->irq_unmask = irq_chip_unmask_parent;
+ }
+
return true;
}
EXPORT_SYMBOL_GPL(msi_lib_init_dev_msi_info);
diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c
index 9643cc3a77d7..7f760f555a76 100644
--- a/drivers/irqchip/irq-mst-intc.c
+++ b/drivers/irqchip/irq-mst-intc.c
@@ -143,7 +143,7 @@ static void mst_intc_polarity_restore(struct mst_intc_chip_data *cd)
writew_relaxed(cd->saved_polarity_conf[i], addr + i * 4);
}
-static void mst_irq_resume(void)
+static void mst_irq_resume(void *data)
{
struct mst_intc_chip_data *cd;
@@ -151,7 +151,7 @@ static void mst_irq_resume(void)
mst_intc_polarity_restore(cd);
}
-static int mst_irq_suspend(void)
+static int mst_irq_suspend(void *data)
{
struct mst_intc_chip_data *cd;
@@ -160,14 +160,18 @@ static int mst_irq_suspend(void)
return 0;
}
-static struct syscore_ops mst_irq_syscore_ops = {
+static const struct syscore_ops mst_irq_syscore_ops = {
.suspend = mst_irq_suspend,
.resume = mst_irq_resume,
};
+static struct syscore mst_irq_syscore = {
+ .ops = &mst_irq_syscore_ops,
+};
+
static int __init mst_irq_pm_init(void)
{
- register_syscore_ops(&mst_irq_syscore_ops);
+ register_syscore(&mst_irq_syscore);
return 0;
}
late_initcall(mst_irq_pm_init);
diff --git a/drivers/irqchip/irq-mtk-cirq.c b/drivers/irqchip/irq-mtk-cirq.c
index de481ba340f8..9571f622774e 100644
--- a/drivers/irqchip/irq-mtk-cirq.c
+++ b/drivers/irqchip/irq-mtk-cirq.c
@@ -199,7 +199,7 @@ static const struct irq_domain_ops cirq_domain_ops = {
};
#ifdef CONFIG_PM_SLEEP
-static int mtk_cirq_suspend(void)
+static int mtk_cirq_suspend(void *data)
{
void __iomem *reg;
u32 value, mask;
@@ -257,7 +257,7 @@ static int mtk_cirq_suspend(void)
return 0;
}
-static void mtk_cirq_resume(void)
+static void mtk_cirq_resume(void *data)
{
void __iomem *reg = mtk_cirq_reg(cirq_data, CIRQ_CONTROL);
u32 value;
@@ -272,14 +272,18 @@ static void mtk_cirq_resume(void)
writel_relaxed(value, reg);
}
-static struct syscore_ops mtk_cirq_syscore_ops = {
+static const struct syscore_ops mtk_cirq_syscore_ops = {
.suspend = mtk_cirq_suspend,
.resume = mtk_cirq_resume,
};
+static struct syscore mtk_cirq_syscore = {
+ .ops = &mtk_cirq_syscore_ops,
+};
+
static void mtk_cirq_syscore_init(void)
{
- register_syscore_ops(&mtk_cirq_syscore_ops);
+ register_syscore(&mtk_cirq_syscore);
}
#else
static inline void mtk_cirq_syscore_init(void) {}
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index cd8b73482b9f..10b85128183a 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -195,5 +195,3 @@ MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
MODULE_DESCRIPTION("Marvell Armada 7K/8K PIC driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:mvebu_pic");
-
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index 76e11cac9631..2191a2b79578 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -73,8 +73,9 @@ static int __init nvic_of_init(struct device_node *node,
struct device_node *parent)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
- unsigned int irqs, i, ret, numbanks;
+ unsigned int irqs, i, numbanks;
void __iomem *nvic_base;
+ int ret;
numbanks = (readl_relaxed(V7M_SCS_ICTR) &
V7M_SCS_ICTR_INTLINESNUM_MASK) + 1;
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
deleted file mode 100644
index 4441ffe149ea..000000000000
--- a/drivers/irqchip/irq-partition-percpu.c
+++ /dev/null
@@ -1,241 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2016 ARM Limited, All Rights Reserved.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/bitops.h>
-#include <linux/interrupt.h>
-#include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqchip/irq-partition-percpu.h>
-#include <linux/irqdomain.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-
-struct partition_desc {
- int nr_parts;
- struct partition_affinity *parts;
- struct irq_domain *domain;
- struct irq_desc *chained_desc;
- unsigned long *bitmap;
- struct irq_domain_ops ops;
-};
-
-static bool partition_check_cpu(struct partition_desc *part,
- unsigned int cpu, unsigned int hwirq)
-{
- return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
-}
-
-static void partition_irq_mask(struct irq_data *d)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
- chip->irq_mask)
- chip->irq_mask(data);
-}
-
-static void partition_irq_unmask(struct irq_data *d)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
- chip->irq_unmask)
- chip->irq_unmask(data);
-}
-
-static int partition_irq_set_irqchip_state(struct irq_data *d,
- enum irqchip_irq_state which,
- bool val)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
- chip->irq_set_irqchip_state)
- return chip->irq_set_irqchip_state(data, which, val);
-
- return -EINVAL;
-}
-
-static int partition_irq_get_irqchip_state(struct irq_data *d,
- enum irqchip_irq_state which,
- bool *val)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
- chip->irq_get_irqchip_state)
- return chip->irq_get_irqchip_state(data, which, val);
-
- return -EINVAL;
-}
-
-static int partition_irq_set_type(struct irq_data *d, unsigned int type)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- if (chip->irq_set_type)
- return chip->irq_set_type(data, type);
-
- return -EINVAL;
-}
-
-static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
-{
- struct partition_desc *part = irq_data_get_irq_chip_data(d);
- struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
- struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
-
- seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
-}
-
-static struct irq_chip partition_irq_chip = {
- .irq_mask = partition_irq_mask,
- .irq_unmask = partition_irq_unmask,
- .irq_set_type = partition_irq_set_type,
- .irq_get_irqchip_state = partition_irq_get_irqchip_state,
- .irq_set_irqchip_state = partition_irq_set_irqchip_state,
- .irq_print_chip = partition_irq_print_chip,
-};
-
-static void partition_handle_irq(struct irq_desc *desc)
-{
- struct partition_desc *part = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int cpu = smp_processor_id();
- int hwirq;
-
- chained_irq_enter(chip, desc);
-
- for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
- if (partition_check_cpu(part, cpu, hwirq))
- break;
- }
-
- if (unlikely(hwirq == part->nr_parts))
- handle_bad_irq(desc);
- else
- generic_handle_domain_irq(part->domain, hwirq);
-
- chained_irq_exit(chip, desc);
-}
-
-static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *arg)
-{
- int ret;
- irq_hw_number_t hwirq;
- unsigned int type;
- struct irq_fwspec *fwspec = arg;
- struct partition_desc *part;
-
- BUG_ON(nr_irqs != 1);
- ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
- if (ret)
- return ret;
-
- part = domain->host_data;
-
- set_bit(hwirq, part->bitmap);
- irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
- partition_handle_irq, part);
- irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
- irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
- handle_percpu_devid_irq, NULL, NULL);
- irq_set_status_flags(virq, IRQ_NOAUTOEN);
-
- return 0;
-}
-
-static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *d;
-
- BUG_ON(nr_irqs != 1);
-
- d = irq_domain_get_irq_data(domain, virq);
- irq_set_handler(virq, NULL);
- irq_domain_reset_irq_data(d);
-}
-
-int partition_translate_id(struct partition_desc *desc, void *partition_id)
-{
- struct partition_affinity *part = NULL;
- int i;
-
- for (i = 0; i < desc->nr_parts; i++) {
- if (desc->parts[i].partition_id == partition_id) {
- part = &desc->parts[i];
- break;
- }
- }
-
- if (WARN_ON(!part)) {
- pr_err("Failed to find partition\n");
- return -EINVAL;
- }
-
- return i;
-}
-
-struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
- struct partition_affinity *parts,
- int nr_parts,
- int chained_irq,
- const struct irq_domain_ops *ops)
-{
- struct partition_desc *desc;
- struct irq_domain *d;
-
- BUG_ON(!ops->select || !ops->translate);
-
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- if (!desc)
- return NULL;
-
- desc->ops = *ops;
- desc->ops.free = partition_domain_free;
- desc->ops.alloc = partition_domain_alloc;
-
- d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
- if (!d)
- goto out;
- desc->domain = d;
-
- desc->bitmap = bitmap_zalloc(nr_parts, GFP_KERNEL);
- if (WARN_ON(!desc->bitmap))
- goto out;
-
- desc->chained_desc = irq_to_desc(chained_irq);
- desc->nr_parts = nr_parts;
- desc->parts = parts;
-
- return desc;
-out:
- if (d)
- irq_domain_remove(d);
- kfree(desc);
-
- return NULL;
-}
-
-struct irq_domain *partition_get_domain(struct partition_desc *dsc)
-{
- if (dsc)
- return dsc->domain;
-
- return NULL;
-}
diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c
index 8d569f7c5a7a..83f31ea657b7 100644
--- a/drivers/irqchip/irq-qcom-mpm.c
+++ b/drivers/irqchip/irq-qcom-mpm.c
@@ -320,9 +320,9 @@ static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
return false;
}
-static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
+static int qcom_mpm_probe(struct platform_device *pdev, struct device_node *parent)
{
- struct platform_device *pdev = of_find_device_by_node(np);
+ struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct irq_domain *parent_domain;
struct generic_pm_domain *genpd;
@@ -478,7 +478,7 @@ remove_genpd:
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
-IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
+IRQCHIP_MATCH("qcom,mpm", qcom_mpm_probe)
IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index a697eb55ac90..6047a524ac77 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -142,11 +142,12 @@ static const struct irq_domain_ops rza1_irqc_domain_ops = {
static int rza1_irqc_parse_map(struct rza1_irqc_priv *priv,
struct device_node *gic_node)
{
- unsigned int imaplen, i, j, ret;
struct device *dev = priv->dev;
+ unsigned int imaplen, i, j;
struct device_node *ipar;
const __be32 *imap;
u32 intsize;
+ int ret;
imap = of_get_property(dev->of_node, "interrupt-map", &imaplen);
if (!imap)
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 360d88687e4f..e73d426cea6d 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -8,7 +8,6 @@
*/
#include <linux/bitfield.h>
-#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -399,7 +398,7 @@ static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static int rzg2l_irqc_irq_suspend(void)
+static int rzg2l_irqc_irq_suspend(void *data)
{
struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
void __iomem *base = rzg2l_irqc_data->base;
@@ -411,7 +410,7 @@ static int rzg2l_irqc_irq_suspend(void)
return 0;
}
-static void rzg2l_irqc_irq_resume(void)
+static void rzg2l_irqc_irq_resume(void *data)
{
struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
void __iomem *base = rzg2l_irqc_data->base;
@@ -426,11 +425,15 @@ static void rzg2l_irqc_irq_resume(void)
writel_relaxed(cache->iitsr, base + IITSR);
}
-static struct syscore_ops rzg2l_irqc_syscore_ops = {
+static const struct syscore_ops rzg2l_irqc_syscore_ops = {
.suspend = rzg2l_irqc_irq_suspend,
.resume = rzg2l_irqc_irq_resume,
};
+static struct syscore rzg2l_irqc_syscore = {
+ .ops = &rzg2l_irqc_syscore_ops,
+};
+
static const struct irq_chip rzg2l_irqc_chip = {
.name = "rzg2l-irqc",
.irq_eoi = rzg2l_irqc_eoi,
@@ -528,18 +531,15 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
return 0;
}
-static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
- const struct irq_chip *irq_chip)
+static int rzg2l_irqc_common_probe(struct platform_device *pdev, struct device_node *parent,
+ const struct irq_chip *irq_chip)
{
- struct platform_device *pdev = of_find_device_by_node(node);
- struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
struct irq_domain *irq_domain, *parent_domain;
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct reset_control *resetn;
int ret;
- if (!pdev)
- return -ENODEV;
-
parent_domain = irq_find_host(parent);
if (!parent_domain)
return dev_err_probe(dev, -ENODEV, "cannot find parent domain\n");
@@ -578,40 +578,27 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
&rzg2l_irqc_domain_ops, rzg2l_irqc_data);
if (!irq_domain) {
pm_runtime_put(dev);
- return dev_err_probe(dev, -ENOMEM, "failed to add irq domain\n");
+ return -ENOMEM;
}
- register_syscore_ops(&rzg2l_irqc_syscore_ops);
-
- /*
- * Prevent the cleanup function from invoking put_device by assigning
- * NULL to dev.
- *
- * make coccicheck will complain about missing put_device calls, but
- * those are false positives, as dev will be automatically "put" via
- * __free_put_device on the failing path.
- * On the successful path we don't actually want to "put" dev.
- */
- dev = NULL;
+ register_syscore(&rzg2l_irqc_syscore);
return 0;
}
-static int __init rzg2l_irqc_init(struct device_node *node,
- struct device_node *parent)
+static int rzg2l_irqc_probe(struct platform_device *pdev, struct device_node *parent)
{
- return rzg2l_irqc_common_init(node, parent, &rzg2l_irqc_chip);
+ return rzg2l_irqc_common_probe(pdev, parent, &rzg2l_irqc_chip);
}
-static int __init rzfive_irqc_init(struct device_node *node,
- struct device_node *parent)
+static int rzfive_irqc_probe(struct platform_device *pdev, struct device_node *parent)
{
- return rzg2l_irqc_common_init(node, parent, &rzfive_irqc_chip);
+ return rzg2l_irqc_common_probe(pdev, parent, &rzfive_irqc_chip);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
-IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
-IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_init)
+IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_probe)
+IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_probe)
IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index 9018d9c3911e..899a423b5da8 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -490,29 +490,15 @@ static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device
return 0;
}
-static void rzv2h_icu_put_device(void *data)
-{
- put_device(data);
-}
-
-static int rzv2h_icu_init_common(struct device_node *node, struct device_node *parent,
- const struct rzv2h_hw_info *hw_info)
+static int rzv2h_icu_probe_common(struct platform_device *pdev, struct device_node *parent,
+ const struct rzv2h_hw_info *hw_info)
{
struct irq_domain *irq_domain, *parent_domain;
+ struct device_node *node = pdev->dev.of_node;
struct rzv2h_icu_priv *rzv2h_icu_data;
- struct platform_device *pdev;
struct reset_control *resetn;
int ret;
- pdev = of_find_device_by_node(node);
- if (!pdev)
- return -ENODEV;
-
- ret = devm_add_action_or_reset(&pdev->dev, rzv2h_icu_put_device,
- &pdev->dev);
- if (ret < 0)
- return ret;
-
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
@@ -618,19 +604,19 @@ static const struct rzv2h_hw_info rzv2h_hw_params = {
.field_width = 8,
};
-static int rzg3e_icu_init(struct device_node *node, struct device_node *parent)
+static int rzg3e_icu_probe(struct platform_device *pdev, struct device_node *parent)
{
- return rzv2h_icu_init_common(node, parent, &rzg3e_hw_params);
+ return rzv2h_icu_probe_common(pdev, parent, &rzg3e_hw_params);
}
-static int rzv2h_icu_init(struct device_node *node, struct device_node *parent)
+static int rzv2h_icu_probe(struct platform_device *pdev, struct device_node *parent)
{
- return rzv2h_icu_init_common(node, parent, &rzv2h_hw_params);
+ return rzv2h_icu_probe_common(pdev, parent, &rzv2h_hw_params);
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
-IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_init)
-IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_init)
+IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_probe)
+IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_probe)
IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/V2H(P) ICU Driver");
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 2709cacf4855..6bac67cc0b6d 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "riscv-imsic: " fmt
#include <linux/acpi.h>
#include <linux/cpu.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -90,9 +91,8 @@ static int __init imsic_ipi_domain_init(void) { return 0; }
*/
static void imsic_handle_irq(struct irq_desc *desc)
{
+ struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
struct irq_chip *chip = irq_desc_get_chip(desc);
- int cpu = smp_processor_id();
- struct imsic_vector *vec;
unsigned long local_id;
/*
@@ -112,16 +112,12 @@ static void imsic_handle_irq(struct irq_desc *desc)
continue;
}
- if (unlikely(!imsic->base_domain))
- continue;
-
- vec = imsic_vector_from_local_id(cpu, local_id);
- if (!vec) {
+ if (unlikely(local_id > imsic->global.nr_ids)) {
pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id);
continue;
}
- generic_handle_irq(vec->irq);
+ generic_handle_irq(lpriv->vectors[local_id].irq);
}
chained_irq_exit(chip, desc);
@@ -233,6 +229,7 @@ struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev)
{
return imsic_acpi_fwnode;
}
+EXPORT_SYMBOL_GPL(imsic_acpi_get_fwnode);
static int __init imsic_early_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
diff --git a/drivers/irqchip/irq-riscv-imsic-platform.c b/drivers/irqchip/irq-riscv-imsic-platform.c
index 643c8e459611..7228a33f6c37 100644
--- a/drivers/irqchip/irq-riscv-imsic-platform.c
+++ b/drivers/irqchip/irq-riscv-imsic-platform.c
@@ -158,11 +158,11 @@ static int imsic_irq_set_affinity(struct irq_data *d, const struct cpumask *mask
tmp_vec.local_id = new_vec->local_id;
/* Point device to the temporary vector */
- imsic_msi_update_msg(irq_get_irq_data(d->irq), &tmp_vec);
+ imsic_msi_update_msg(d, &tmp_vec);
}
/* Point device to the new vector */
- imsic_msi_update_msg(irq_get_irq_data(d->irq), new_vec);
+ imsic_msi_update_msg(d, new_vec);
/* Update irq descriptors with the new vector */
d->chip_data = new_vec;
diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c
index dc95ad856d80..385368052d5c 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.c
+++ b/drivers/irqchip/irq-riscv-imsic-state.c
@@ -434,16 +434,6 @@ void imsic_vector_debug_show_summary(struct seq_file *m, int ind)
}
#endif
-struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id)
-{
- struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu);
-
- if (!lpriv || imsic->global.nr_ids < local_id)
- return NULL;
-
- return &lpriv->vectors[local_id];
-}
-
struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask)
{
struct imsic_vector *vec = NULL;
@@ -487,7 +477,6 @@ static void __init imsic_local_cleanup(void)
lpriv = per_cpu_ptr(imsic->lpriv, cpu);
bitmap_free(lpriv->dirty_bitmap);
- kfree(lpriv->vectors);
}
free_percpu(imsic->lpriv);
@@ -501,7 +490,8 @@ static int __init imsic_local_init(void)
int cpu, i;
/* Allocate per-CPU private state */
- imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
+ imsic->lpriv = __alloc_percpu(struct_size(imsic->lpriv, vectors, global->nr_ids + 1),
+ __alignof__(*imsic->lpriv));
if (!imsic->lpriv)
return -ENOMEM;
@@ -521,12 +511,6 @@ static int __init imsic_local_init(void)
timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED);
#endif
- /* Allocate vector array */
- lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors),
- GFP_KERNEL);
- if (!lpriv->vectors)
- goto fail_local_cleanup;
-
/* Setup vector array */
for (i = 0; i <= global->nr_ids; i++) {
vec = &lpriv->vectors[i];
diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h
index 57f951952b0c..6332501dcbd8 100644
--- a/drivers/irqchip/irq-riscv-imsic-state.h
+++ b/drivers/irqchip/irq-riscv-imsic-state.h
@@ -40,7 +40,7 @@ struct imsic_local_priv {
#endif
/* Local vector table */
- struct imsic_vector *vectors;
+ struct imsic_vector vectors[];
};
struct imsic_priv {
@@ -95,8 +95,6 @@ static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *ve
void imsic_vector_force_move_cleanup(struct imsic_vector *vec);
void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec);
-struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id);
-
struct imsic_vector *imsic_vector_alloc(unsigned int irq, const struct cpumask *mask);
void imsic_vector_free(struct imsic_vector *vector);
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index e5805885394e..70290b35b317 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -166,7 +166,8 @@ static int riscv_intc_domain_alloc(struct irq_domain *domain,
static const struct irq_domain_ops riscv_intc_domain_ops = {
.map = riscv_intc_domain_map,
.xlate = irq_domain_xlate_onecell,
- .alloc = riscv_intc_domain_alloc
+ .alloc = riscv_intc_domain_alloc,
+ .free = irq_domain_free_irqs_top,
};
static struct fwnode_handle *riscv_intc_hwnode(void)
diff --git a/drivers/irqchip/irq-riscv-rpmi-sysmsi.c b/drivers/irqchip/irq-riscv-rpmi-sysmsi.c
new file mode 100644
index 000000000000..5c74c561ce31
--- /dev/null
+++ b/drivers/irqchip/irq-riscv-rpmi-sysmsi.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2025 Ventana Micro Systems Inc. */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/device/devres.h>
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/mailbox_client.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct rpmi_sysmsi_get_attrs_rx {
+ __le32 status;
+ __le32 sys_num_msi;
+ __le32 flag0;
+ __le32 flag1;
+};
+
+#define RPMI_SYSMSI_MSI_ATTRIBUTES_FLAG0_PREF_PRIV BIT(0)
+
+struct rpmi_sysmsi_set_msi_state_tx {
+ __le32 sys_msi_index;
+ __le32 sys_msi_state;
+};
+
+struct rpmi_sysmsi_set_msi_state_rx {
+ __le32 status;
+};
+
+#define RPMI_SYSMSI_MSI_STATE_ENABLE BIT(0)
+#define RPMI_SYSMSI_MSI_STATE_PENDING BIT(1)
+
+struct rpmi_sysmsi_set_msi_target_tx {
+ __le32 sys_msi_index;
+ __le32 sys_msi_address_low;
+ __le32 sys_msi_address_high;
+ __le32 sys_msi_data;
+};
+
+struct rpmi_sysmsi_set_msi_target_rx {
+ __le32 status;
+};
+
+struct rpmi_sysmsi_priv {
+ struct device *dev;
+ struct mbox_client client;
+ struct mbox_chan *chan;
+ u32 nr_irqs;
+ u32 gsi_base;
+};
+
+static int rpmi_sysmsi_get_num_msi(struct rpmi_sysmsi_priv *priv)
+{
+ struct rpmi_sysmsi_get_attrs_rx rx;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_GET_ATTRIBUTES,
+ NULL, 0, &rx, sizeof(rx));
+ ret = rpmi_mbox_send_message(priv->chan, &msg);
+ if (ret)
+ return ret;
+ if (rx.status)
+ return rpmi_to_linux_error(le32_to_cpu(rx.status));
+
+ return le32_to_cpu(rx.sys_num_msi);
+}
+
+static int rpmi_sysmsi_set_msi_state(struct rpmi_sysmsi_priv *priv,
+ u32 sys_msi_index, u32 sys_msi_state)
+{
+ struct rpmi_sysmsi_set_msi_state_tx tx;
+ struct rpmi_sysmsi_set_msi_state_rx rx;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ tx.sys_msi_index = cpu_to_le32(sys_msi_index);
+ tx.sys_msi_state = cpu_to_le32(sys_msi_state);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_SET_MSI_STATE,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+ ret = rpmi_mbox_send_message(priv->chan, &msg);
+ if (ret)
+ return ret;
+ if (rx.status)
+ return rpmi_to_linux_error(le32_to_cpu(rx.status));
+
+ return 0;
+}
+
+static int rpmi_sysmsi_set_msi_target(struct rpmi_sysmsi_priv *priv,
+ u32 sys_msi_index, struct msi_msg *m)
+{
+ struct rpmi_sysmsi_set_msi_target_tx tx;
+ struct rpmi_sysmsi_set_msi_target_rx rx;
+ struct rpmi_mbox_message msg;
+ int ret;
+
+ tx.sys_msi_index = cpu_to_le32(sys_msi_index);
+ tx.sys_msi_address_low = cpu_to_le32(m->address_lo);
+ tx.sys_msi_address_high = cpu_to_le32(m->address_hi);
+ tx.sys_msi_data = cpu_to_le32(m->data);
+ rpmi_mbox_init_send_with_response(&msg, RPMI_SYSMSI_SRV_SET_MSI_TARGET,
+ &tx, sizeof(tx), &rx, sizeof(rx));
+ ret = rpmi_mbox_send_message(priv->chan, &msg);
+ if (ret)
+ return ret;
+ if (rx.status)
+ return rpmi_to_linux_error(le32_to_cpu(rx.status));
+
+ return 0;
+}
+
+static void rpmi_sysmsi_irq_mask(struct irq_data *d)
+{
+ struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int ret;
+
+ ret = rpmi_sysmsi_set_msi_state(priv, hwirq, 0);
+ if (ret)
+ dev_warn(priv->dev, "Failed to mask hwirq %lu (error %d)\n", hwirq, ret);
+ irq_chip_mask_parent(d);
+}
+
+static void rpmi_sysmsi_irq_unmask(struct irq_data *d)
+{
+ struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int ret;
+
+ irq_chip_unmask_parent(d);
+ ret = rpmi_sysmsi_set_msi_state(priv, hwirq, RPMI_SYSMSI_MSI_STATE_ENABLE);
+ if (ret)
+ dev_warn(priv->dev, "Failed to unmask hwirq %lu (error %d)\n", hwirq, ret);
+}
+
+static void rpmi_sysmsi_write_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct rpmi_sysmsi_priv *priv = irq_data_get_irq_chip_data(d);
+ irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ int ret;
+
+ /* For zeroed MSI, do nothing as of now */
+ if (!msg->address_hi && !msg->address_lo && !msg->data)
+ return;
+
+ ret = rpmi_sysmsi_set_msi_target(priv, hwirq, msg);
+ if (ret)
+ dev_warn(priv->dev, "Failed to set target for hwirq %lu (error %d)\n", hwirq, ret);
+}
+
+static void rpmi_sysmsi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = desc->data.icookie.value;
+}
+
+static int rpmi_sysmsi_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ struct msi_domain_info *info = d->host_data;
+ struct rpmi_sysmsi_priv *priv = info->data;
+
+ if (WARN_ON(fwspec->param_count < 1))
+ return -EINVAL;
+
+ /* For DT, gsi_base is always zero. */
+ *hwirq = fwspec->param[0] - priv->gsi_base;
+ *type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static const struct msi_domain_template rpmi_sysmsi_template = {
+ .chip = {
+ .name = "RPMI-SYSMSI",
+ .irq_mask = rpmi_sysmsi_irq_mask,
+ .irq_unmask = rpmi_sysmsi_irq_unmask,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .irq_write_msi_msg = rpmi_sysmsi_write_msg,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_MASK_ON_SUSPEND,
+ },
+
+ .ops = {
+ .set_desc = rpmi_sysmsi_set_desc,
+ .msi_translate = rpmi_sysmsi_translate,
+ },
+
+ .info = {
+ .bus_token = DOMAIN_BUS_WIRED_TO_MSI,
+ .flags = MSI_FLAG_USE_DEV_FWNODE,
+ .handler = handle_simple_irq,
+ .handler_name = "simple",
+ },
+};
+
+static int rpmi_sysmsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rpmi_sysmsi_priv *priv;
+ struct fwnode_handle *fwnode;
+ u32 id;
+ int rc;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ priv->dev = dev;
+
+ /* Setup mailbox client */
+ priv->client.dev = priv->dev;
+ priv->client.rx_callback = NULL;
+ priv->client.tx_block = false;
+ priv->client.knows_txdone = true;
+ priv->client.tx_tout = 0;
+
+ /* Request mailbox channel */
+ priv->chan = mbox_request_channel(&priv->client, 0);
+ if (IS_ERR(priv->chan))
+ return PTR_ERR(priv->chan);
+
+ /* Get number of system MSIs */
+ rc = rpmi_sysmsi_get_num_msi(priv);
+ if (rc < 1) {
+ mbox_free_channel(priv->chan);
+ if (rc)
+ return dev_err_probe(dev, rc, "Failed to get number of system MSIs\n");
+ else
+ return dev_err_probe(dev, -ENODEV, "No system MSIs found\n");
+ }
+ priv->nr_irqs = rc;
+
+ fwnode = dev_fwnode(dev);
+ if (is_acpi_node(fwnode)) {
+ u32 nr_irqs;
+
+ rc = riscv_acpi_get_gsi_info(fwnode, &priv->gsi_base, &id,
+ &nr_irqs, NULL);
+ if (rc) {
+ dev_err(dev, "failed to find GSI mapping\n");
+ return rc;
+ }
+
+ /* Update with actual GSI range */
+ if (nr_irqs != priv->nr_irqs)
+ riscv_acpi_update_gsi_range(priv->gsi_base, priv->nr_irqs);
+ }
+
+ /*
+ * The device MSI domain for platform devices on RISC-V architecture
+ * is only available after the MSI controller driver is probed so,
+ * explicitly configure here.
+ */
+ if (!dev_get_msi_domain(dev)) {
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ */
+ if (is_of_node(fwnode)) {
+ of_msi_configure(dev, dev_of_node(dev));
+ } else if (is_acpi_device_node(fwnode)) {
+ struct irq_domain *msi_domain;
+
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
+
+ if (!dev_get_msi_domain(dev)) {
+ mbox_free_channel(priv->chan);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ if (!msi_create_device_irq_domain(dev, MSI_DEFAULT_DOMAIN,
+ &rpmi_sysmsi_template,
+ priv->nr_irqs, priv, priv)) {
+ mbox_free_channel(priv->chan);
+ return dev_err_probe(dev, -ENOMEM, "failed to create MSI irq domain\n");
+ }
+
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev)
+ acpi_dev_clear_dependencies(adev);
+#endif
+
+ dev_info(dev, "%u system MSIs registered\n", priv->nr_irqs);
+ return 0;
+}
+
+static const struct of_device_id rpmi_sysmsi_match[] = {
+ { .compatible = "riscv,rpmi-system-msi" },
+ {}
+};
+
+static const struct acpi_device_id acpi_rpmi_sysmsi_match[] = {
+ { "RSCV0006" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_rpmi_sysmsi_match);
+
+static struct platform_driver rpmi_sysmsi_driver = {
+ .driver = {
+ .name = "rpmi-sysmsi",
+ .of_match_table = rpmi_sysmsi_match,
+ .acpi_match_table = acpi_rpmi_sysmsi_match,
+ },
+ .probe = rpmi_sysmsi_probe,
+};
+builtin_platform_driver(rpmi_sysmsi_driver);
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index d8d4dff16276..e5f24c5f3f41 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -85,7 +85,7 @@ static struct sa1100irq_state {
unsigned int iccr;
} sa1100irq_state;
-static int sa1100irq_suspend(void)
+static int sa1100irq_suspend(void *data)
{
struct sa1100irq_state *st = &sa1100irq_state;
@@ -102,7 +102,7 @@ static int sa1100irq_suspend(void)
return 0;
}
-static void sa1100irq_resume(void)
+static void sa1100irq_resume(void *data)
{
struct sa1100irq_state *st = &sa1100irq_state;
@@ -114,14 +114,18 @@ static void sa1100irq_resume(void)
}
}
-static struct syscore_ops sa1100irq_syscore_ops = {
+static const struct syscore_ops sa1100irq_syscore_ops = {
.suspend = sa1100irq_suspend,
.resume = sa1100irq_resume,
};
+static struct syscore sa1100irq_syscore = {
+ .ops = &sa1100irq_syscore_ops,
+};
+
static int __init sa1100irq_init_devicefs(void)
{
- register_syscore_ops(&sa1100irq_syscore_ops);
+ register_syscore(&sa1100irq_syscore);
return 0;
}
diff --git a/drivers/irqchip/irq-sg2042-msi.c b/drivers/irqchip/irq-sg2042-msi.c
index bcfddc51bc6a..f7cf0dc72eab 100644
--- a/drivers/irqchip/irq-sg2042-msi.c
+++ b/drivers/irqchip/irq-sg2042-msi.c
@@ -30,6 +30,7 @@ struct sg204x_msi_chip_info {
* @doorbell_addr: see TRM, 10.1.32, GP_INTR0_SET
* @irq_first: First vectors number that MSIs starts
* @num_irqs: Number of vectors for MSIs
+ * @irq_type: IRQ type for MSIs
* @msi_map: mapping for allocated MSI vectors.
* @msi_map_lock: Lock for msi_map
* @chip_info: chip specific infomations
@@ -41,6 +42,7 @@ struct sg204x_msi_chipdata {
u32 irq_first;
u32 num_irqs;
+ unsigned int irq_type;
unsigned long *msi_map;
struct mutex msi_map_lock;
@@ -85,6 +87,8 @@ static void sg2042_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *m
static const struct irq_chip sg2042_msi_middle_irq_chip = {
.name = "SG2042 MSI",
+ .irq_startup = irq_chip_startup_parent,
+ .irq_shutdown = irq_chip_shutdown_parent,
.irq_ack = sg2042_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
@@ -114,6 +118,8 @@ static void sg2044_msi_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *m
static struct irq_chip sg2044_msi_middle_irq_chip = {
.name = "SG2044 MSI",
+ .irq_startup = irq_chip_startup_parent,
+ .irq_shutdown = irq_chip_shutdown_parent,
.irq_ack = sg2044_msi_irq_ack,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
@@ -133,14 +139,14 @@ static int sg204x_msi_parent_domain_alloc(struct irq_domain *domain, unsigned in
fwspec.fwnode = domain->parent->fwnode;
fwspec.param_count = 2;
fwspec.param[0] = data->irq_first + hwirq;
- fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+ fwspec.param[1] = data->irq_type;
ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
if (ret)
return ret;
d = irq_domain_get_irq_data(domain->parent, virq);
- return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+ return d->chip->irq_set_type(d, data->irq_type);
}
static int sg204x_msi_middle_domain_alloc(struct irq_domain *domain, unsigned int virq,
@@ -185,8 +191,10 @@ static const struct irq_domain_ops sg204x_msi_middle_domain_ops = {
.select = msi_lib_irq_domain_select,
};
-#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
- MSI_FLAG_USE_DEF_CHIP_OPS)
+#define SG2042_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT)
#define SG2042_MSI_FLAGS_SUPPORTED MSI_GENERIC_FLAGS_MASK
@@ -200,10 +208,13 @@ static const struct msi_parent_ops sg2042_msi_parent_ops = {
.init_dev_msi_info = msi_lib_init_dev_msi_info,
};
-#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
- MSI_FLAG_USE_DEF_CHIP_OPS)
+#define SG2044_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
+ MSI_FLAG_USE_DEF_CHIP_OPS | \
+ MSI_FLAG_PCI_MSI_MASK_PARENT | \
+ MSI_FLAG_PCI_MSI_STARTUP_PARENT)
-#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+#define SG2044_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
+ MSI_FLAG_MULTI_PCI_MSI | \
MSI_FLAG_PCI_MSIX)
static const struct msi_parent_ops sg2044_msi_parent_ops = {
@@ -289,6 +300,7 @@ static int sg2042_msi_probe(struct platform_device *pdev)
}
data->irq_first = (u32)args.args[0];
+ data->irq_type = (unsigned int)args.args[1];
data->num_irqs = (u32)args.args[args.nargs - 1];
mutex_init(&data->msi_map_lock);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index bf69a4802b71..210a57959637 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -49,6 +49,8 @@
#define CONTEXT_ENABLE_BASE 0x2000
#define CONTEXT_ENABLE_SIZE 0x80
+#define PENDING_BASE 0x1000
+
/*
* Each hart context has a set of control registers associated with it. Right
* now there's only two: a source priority threshold over which the hart will
@@ -63,6 +65,7 @@
#define PLIC_ENABLE_THRESHOLD 0
#define PLIC_QUIRK_EDGE_INTERRUPT 0
+#define PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM 1
struct plic_priv {
struct fwnode_handle *fwnode;
@@ -94,15 +97,22 @@ static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
static int plic_irq_set_type(struct irq_data *d, unsigned int type);
-static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
+static void __plic_toggle(struct plic_handler *handler, int hwirq, int enable)
{
- u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
+ u32 __iomem *base = handler->enable_base;
u32 hwirq_mask = 1 << (hwirq % 32);
+ int group = hwirq / 32;
+ u32 value;
+
+ value = readl(base + group);
if (enable)
- writel(readl(reg) | hwirq_mask, reg);
+ value |= hwirq_mask;
else
- writel(readl(reg) & ~hwirq_mask, reg);
+ value &= ~hwirq_mask;
+
+ handler->enable_save[group] = value;
+ writel(value, base + group);
}
static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
@@ -110,7 +120,7 @@ static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
unsigned long flags;
raw_spin_lock_irqsave(&handler->enable_lock, flags);
- __plic_toggle(handler->enable_base, hwirq, enable);
+ __plic_toggle(handler, hwirq, enable);
raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
}
@@ -179,12 +189,14 @@ static int plic_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
- plic_irq_disable(d);
+ /* Invalidate the original routing entry */
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ /* Setting the new routing entry if irq is enabled */
if (!irqd_irq_disabled(d))
- plic_irq_enable(d);
+ plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
return IRQ_SET_MASK_OK_DONE;
}
@@ -243,38 +255,22 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
return IRQ_SET_MASK_OK;
}
-static int plic_irq_suspend(void)
+static int plic_irq_suspend(void *data)
{
- unsigned int i, cpu;
- unsigned long flags;
- u32 __iomem *reg;
struct plic_priv *priv;
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
- for (i = 0; i < priv->nr_irqs; i++) {
+ /* irq ID 0 is reserved */
+ for (unsigned int i = 1; i < priv->nr_irqs; i++) {
__assign_bit(i, priv->prio_save,
readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
}
- for_each_cpu(cpu, cpu_present_mask) {
- struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
-
- if (!handler->present)
- continue;
-
- raw_spin_lock_irqsave(&handler->enable_lock, flags);
- for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
- reg = handler->enable_base + i * sizeof(u32);
- handler->enable_save[i] = readl(reg);
- }
- raw_spin_unlock_irqrestore(&handler->enable_lock, flags);
- }
-
return 0;
}
-static void plic_irq_resume(void)
+static void plic_irq_resume(void *data)
{
unsigned int i, index, cpu;
unsigned long flags;
@@ -283,13 +279,14 @@ static void plic_irq_resume(void)
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
- for (i = 0; i < priv->nr_irqs; i++) {
+ /* irq ID 0 is reserved */
+ for (i = 1; i < priv->nr_irqs; i++) {
index = BIT_WORD(i);
writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
}
- for_each_cpu(cpu, cpu_present_mask) {
+ for_each_present_cpu(cpu) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present)
@@ -304,11 +301,15 @@ static void plic_irq_resume(void)
}
}
-static struct syscore_ops plic_irq_syscore_ops = {
+static const struct syscore_ops plic_irq_syscore_ops = {
.suspend = plic_irq_suspend,
.resume = plic_irq_resume,
};
+static struct syscore plic_irq_syscore = {
+ .ops = &plic_irq_syscore_ops,
+};
+
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -394,6 +395,98 @@ static void plic_handle_irq(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static u32 cp100_isolate_pending_irq(int nr_irq_groups, struct plic_handler *handler)
+{
+ u32 __iomem *pending = handler->priv->regs + PENDING_BASE;
+ u32 __iomem *enable = handler->enable_base;
+ u32 pending_irqs = 0;
+ int i, j;
+
+ /* Look for first pending interrupt */
+ for (i = 0; i < nr_irq_groups; i++) {
+ /* Any pending interrupts would be annihilated, so skip checking them */
+ if (!handler->enable_save[i])
+ continue;
+
+ pending_irqs = handler->enable_save[i] & readl_relaxed(pending + i);
+ if (pending_irqs)
+ break;
+ }
+
+ if (!pending_irqs)
+ return 0;
+
+ /* Isolate lowest set bit */
+ pending_irqs &= -pending_irqs;
+
+ /* Disable all interrupts but the first pending one */
+ for (j = 0; j < nr_irq_groups; j++) {
+ u32 new_mask = j == i ? pending_irqs : 0;
+
+ if (new_mask != handler->enable_save[j])
+ writel_relaxed(new_mask, enable + j);
+ }
+ return pending_irqs;
+}
+
+static irq_hw_number_t cp100_get_hwirq(struct plic_handler *handler, void __iomem *claim)
+{
+ int nr_irq_groups = DIV_ROUND_UP(handler->priv->nr_irqs, 32);
+ u32 __iomem *enable = handler->enable_base;
+ irq_hw_number_t hwirq = 0;
+ u32 iso_mask;
+ int i;
+
+ guard(raw_spinlock)(&handler->enable_lock);
+
+ /* Existing enable state is already cached in enable_save */
+ iso_mask = cp100_isolate_pending_irq(nr_irq_groups, handler);
+ if (!iso_mask)
+ return 0;
+
+ /*
+ * Interrupts delievered to hardware still become pending, but only
+ * interrupts that are both pending and enabled can be claimed.
+ * Clearing the enable bit for all interrupts but the first pending
+ * one avoids a hardware bug that occurs during read from the claim
+ * register with more than one eligible interrupt.
+ */
+ hwirq = readl(claim);
+
+ /* Restore previous state */
+ for (i = 0; i < nr_irq_groups; i++) {
+ u32 written = i == hwirq / 32 ? iso_mask : 0;
+ u32 stored = handler->enable_save[i];
+
+ if (stored != written)
+ writel_relaxed(stored, enable + i);
+ }
+ return hwirq;
+}
+
+static void plic_handle_irq_cp100(struct irq_desc *desc)
+{
+ struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
+ irq_hw_number_t hwirq;
+
+ WARN_ON_ONCE(!handler->present);
+
+ chained_irq_enter(chip, desc);
+
+ while ((hwirq = cp100_get_hwirq(handler, claim))) {
+ int err = generic_handle_domain_irq(handler->priv->irqdomain, hwirq);
+
+ if (unlikely(err)) {
+ pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n",
+ handler->priv->fwnode, hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
{
/* priority must be > threshold to trigger an interrupt */
@@ -430,6 +523,8 @@ static const struct of_device_id plic_match[] = {
.data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
{ .compatible = "thead,c900-plic",
.data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) },
+ { .compatible = "ultrarisc,cp100-plic",
+ .data = (const void *)BIT(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM) },
{}
};
@@ -588,12 +683,11 @@ static int plic_probe(struct fwnode_handle *fwnode)
if (parent_hwirq != RV_IRQ_EXT) {
/* Disable S-mode enable bits if running in M-mode. */
if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
- void __iomem *enable_base = priv->regs +
- CONTEXT_ENABLE_BASE +
- i * CONTEXT_ENABLE_SIZE;
+ u32 __iomem *enable_base = priv->regs + CONTEXT_ENABLE_BASE +
+ i * CONTEXT_ENABLE_SIZE;
- for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
- __plic_toggle(enable_base, hwirq, 0);
+ for (int j = 0; j <= nr_irqs / 32; j++)
+ writel(0, enable_base + j);
}
continue;
}
@@ -664,17 +758,22 @@ done:
}
if (global_setup) {
+ void (*handler_fn)(struct irq_desc *) = plic_handle_irq;
+
+ if (test_bit(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM, &handler->priv->plic_quirks))
+ handler_fn = plic_handle_irq_cp100;
+
/* Find parent domain and register chained handler */
domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
if (domain)
plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
if (plic_parent_irq)
- irq_set_chained_handler(plic_parent_irq, plic_handle_irq);
+ irq_set_chained_handler(plic_parent_irq, handler_fn);
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
- register_syscore_ops(&plic_irq_syscore_ops);
+ register_syscore(&plic_irq_syscore);
plic_global_setup_done = true;
}
}
diff --git a/drivers/irqchip/irq-starfive-jh8100-intc.c b/drivers/irqchip/irq-starfive-jh8100-intc.c
index 2460798ec158..705361b4ebe0 100644
--- a/drivers/irqchip/irq-starfive-jh8100-intc.c
+++ b/drivers/irqchip/irq-starfive-jh8100-intc.c
@@ -114,9 +114,9 @@ static void starfive_intc_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static int __init starfive_intc_init(struct device_node *intc,
- struct device_node *parent)
+static int starfive_intc_probe(struct platform_device *pdev, struct device_node *parent)
{
+ struct device_node *intc = pdev->dev.of_node;
struct starfive_irq_chip *irqc;
struct reset_control *rst;
struct clk *clk;
@@ -199,7 +199,7 @@ err_free:
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(starfive_intc)
-IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_init)
+IRQCHIP_MATCH("starfive,jh8100-intc", starfive_intc_probe)
IRQCHIP_PLATFORM_DRIVER_END(starfive_intc)
MODULE_DESCRIPTION("StarFive JH8100 External Interrupt Controller");
diff --git a/drivers/irqchip/irq-sun6i-r.c b/drivers/irqchip/irq-sun6i-r.c
index 37d4b29763bc..23251831c06e 100644
--- a/drivers/irqchip/irq-sun6i-r.c
+++ b/drivers/irqchip/irq-sun6i-r.c
@@ -268,7 +268,7 @@ static const struct irq_domain_ops sun6i_r_intc_domain_ops = {
.free = irq_domain_free_irqs_common,
};
-static int sun6i_r_intc_suspend(void)
+static int sun6i_r_intc_suspend(void *data)
{
u32 buf[BITS_TO_U32(MAX(SUN6I_NR_TOP_LEVEL_IRQS, SUN6I_NR_MUX_BITS))];
int i;
@@ -284,7 +284,7 @@ static int sun6i_r_intc_suspend(void)
return 0;
}
-static void sun6i_r_intc_resume(void)
+static void sun6i_r_intc_resume(void *data)
{
int i;
@@ -294,17 +294,21 @@ static void sun6i_r_intc_resume(void)
writel_relaxed(0, base + SUN6I_IRQ_ENABLE(i));
}
-static void sun6i_r_intc_shutdown(void)
+static void sun6i_r_intc_shutdown(void *data)
{
- sun6i_r_intc_suspend();
+ sun6i_r_intc_suspend(data);
}
-static struct syscore_ops sun6i_r_intc_syscore_ops = {
+static const struct syscore_ops sun6i_r_intc_syscore_ops = {
.suspend = sun6i_r_intc_suspend,
.resume = sun6i_r_intc_resume,
.shutdown = sun6i_r_intc_shutdown,
};
+static struct syscore sun6i_r_intc_syscore = {
+ .ops = &sun6i_r_intc_syscore_ops,
+};
+
static int __init sun6i_r_intc_init(struct device_node *node,
struct device_node *parent,
const struct sun6i_r_intc_variant *v)
@@ -346,10 +350,10 @@ static int __init sun6i_r_intc_init(struct device_node *node,
return -ENOMEM;
}
- register_syscore_ops(&sun6i_r_intc_syscore_ops);
+ register_syscore(&sun6i_r_intc_syscore);
sun6i_r_intc_ack_nmi();
- sun6i_r_intc_resume();
+ sun6i_r_intc_resume(NULL);
return 0;
}
diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c
index 66cbb9f77ff3..b6382cf6359a 100644
--- a/drivers/irqchip/irq-tegra.c
+++ b/drivers/irqchip/irq-tegra.c
@@ -132,7 +132,7 @@ static int tegra_set_wake(struct irq_data *d, unsigned int enable)
return 0;
}
-static int tegra_ictlr_suspend(void)
+static int tegra_ictlr_suspend(void *data)
{
unsigned long flags;
unsigned int i;
@@ -161,7 +161,7 @@ static int tegra_ictlr_suspend(void)
return 0;
}
-static void tegra_ictlr_resume(void)
+static void tegra_ictlr_resume(void *data)
{
unsigned long flags;
unsigned int i;
@@ -184,14 +184,18 @@ static void tegra_ictlr_resume(void)
local_irq_restore(flags);
}
-static struct syscore_ops tegra_ictlr_syscore_ops = {
+static const struct syscore_ops tegra_ictlr_syscore_ops = {
.suspend = tegra_ictlr_suspend,
.resume = tegra_ictlr_resume,
};
+static struct syscore tegra_ictlr_syscore = {
+ .ops = &tegra_ictlr_syscore_ops,
+};
+
static void tegra_ictlr_syscore_init(void)
{
- register_syscore_ops(&tegra_ictlr_syscore_ops);
+ register_syscore(&tegra_ictlr_syscore);
}
#else
#define tegra_set_wake NULL
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 1e236d5b7516..2e4013c6834d 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -165,4 +165,3 @@ module_platform_driver(ts4800_ic_driver);
MODULE_AUTHOR("Damien Riegel <damien.riegel@savoirfairelinux.com>");
MODULE_DESCRIPTION("Multiplexed-IRQs driver for TS-4800's FPGA");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:ts4800_irqc");
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 2bcdf216a000..e38104c5064e 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -120,7 +120,7 @@ static void resume_one_vic(struct vic_device *vic)
writel(~vic->soft_int, base + VIC_INT_SOFT_CLEAR);
}
-static void vic_resume(void)
+static void vic_resume(void *data)
{
int id;
@@ -146,7 +146,7 @@ static void suspend_one_vic(struct vic_device *vic)
writel(~vic->resume_irqs, base + VIC_INT_ENABLE_CLEAR);
}
-static int vic_suspend(void)
+static int vic_suspend(void *data)
{
int id;
@@ -156,11 +156,15 @@ static int vic_suspend(void)
return 0;
}
-static struct syscore_ops vic_syscore_ops = {
+static const struct syscore_ops vic_syscore_ops = {
.suspend = vic_suspend,
.resume = vic_resume,
};
+static struct syscore vic_syscore = {
+ .ops = &vic_syscore_ops,
+};
+
/**
* vic_pm_init - initcall to register VIC pm
*
@@ -171,7 +175,7 @@ static struct syscore_ops vic_syscore_ops = {
static int __init vic_pm_init(void)
{
if (vic_id > 0)
- register_syscore_ops(&vic_syscore_ops);
+ register_syscore(&vic_syscore);
return 0;
}
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 0ee7b6b71f5f..689c8e448901 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -36,11 +36,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device_node *par_np __free(device_node) = of_irq_find_parent(np);
- of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
+ platform_irq_probe_t irq_probe = of_device_get_match_data(&pdev->dev);
- if (!irq_init_cb) {
+ if (!irq_probe)
return -EINVAL;
- }
if (par_np == np)
par_np = NULL;
@@ -53,10 +52,9 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller. The actual initialization callback of this
* interrupt controller can check for specific domains as necessary.
*/
- if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
+ if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY))
return -EPROBE_DEFER;
- }
- return irq_init_cb(np, par_np);
+ return irq_probe(pdev, par_np);
}
EXPORT_SYMBOL_GPL(platform_irqchip_probe);
diff --git a/drivers/irqchip/qcom-irq-combiner.c b/drivers/irqchip/qcom-irq-combiner.c
index 18e696dc7f4d..09819007d08e 100644
--- a/drivers/irqchip/qcom-irq-combiner.c
+++ b/drivers/irqchip/qcom-irq-combiner.c
@@ -222,7 +222,7 @@ static int get_registers(struct platform_device *pdev, struct combiner *comb)
return 0;
}
-static int __init combiner_probe(struct platform_device *pdev)
+static int combiner_probe(struct platform_device *pdev)
{
struct combiner *combiner;
int nregs;
@@ -266,11 +266,11 @@ static const struct acpi_device_id qcom_irq_combiner_ids[] = {
{ }
};
-static struct platform_driver qcom_irq_combiner_probe = {
+static struct platform_driver qcom_irq_combiner_driver = {
.driver = {
.name = "qcom-irq-combiner",
.acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids),
},
.probe = combiner_probe,
};
-builtin_platform_driver(qcom_irq_combiner_probe);
+builtin_platform_driver(qcom_irq_combiner_driver);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 52d77546aacb..518f7f0f3dab 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -350,9 +350,10 @@ static int pdc_setup_pin_mapping(struct device_node *np)
#define QCOM_PDC_SIZE 0x30000
-static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
+static int qcom_pdc_probe(struct platform_device *pdev, struct device_node *parent)
{
struct irq_domain *parent_domain, *pdc_domain;
+ struct device_node *node = pdev->dev.of_node;
resource_size_t res_size;
struct resource res;
int ret;
@@ -428,7 +429,7 @@ fail:
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_pdc)
-IRQCHIP_MATCH("qcom,pdc", qcom_pdc_init)
+IRQCHIP_MATCH("qcom,pdc", qcom_pdc_probe)
IRQCHIP_PLATFORM_DRIVER_END(qcom_pdc)
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index c5d13bdc239b..e8f7e52354bc 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -907,7 +907,7 @@ int __init kcapi_init(void)
{
int err;
- kcapi_wq = alloc_workqueue("kcapi", 0, 0);
+ kcapi_wq = alloc_workqueue("kcapi", WQ_PERCPU, 0);
if (!kcapi_wq)
return -ENOMEM;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index e54419a4e731..541a20cb58f1 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1904,13 +1904,13 @@ out:
mISDN_freebchannel(&hw->bch[1]);
mISDN_freebchannel(&hw->bch[0]);
mISDN_freedchannel(&hw->dch);
- kfree(hw);
return err;
}
static int
hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
+ int err;
struct hfcsusb *hw;
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *iface = intf->cur_altsetting;
@@ -2101,20 +2101,28 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!hw->ctrl_urb) {
pr_warn("%s: No memory for control urb\n",
driver_info->vend_name);
- kfree(hw);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_free_hw;
}
pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
hw->name, __func__, driver_info->vend_name,
conf_str[small_match], ifnum, alt_used);
- if (setup_instance(hw, dev->dev.parent))
- return -EIO;
+ if (setup_instance(hw, dev->dev.parent)) {
+ err = -EIO;
+ goto err_free_urb;
+ }
hw->intf = intf;
usb_set_intfdata(hw->intf, hw);
return 0;
+
+err_free_urb:
+ usb_free_urb(hw->ctrl_urb);
+err_free_hw:
+ kfree(hw);
+ return err;
}
/* function called when an active device is removed */
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index f732f6614d37..6ab036e4a35f 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -676,7 +676,7 @@ l1oip_socket_thread(void *data)
hc->sin_remote.sin_port = htons((unsigned short)hc->remoteport);
/* bind to incoming port */
- if (socket->ops->bind(socket, (struct sockaddr *)&hc->sin_local,
+ if (socket->ops->bind(socket, (struct sockaddr_unsized *)&hc->sin_local,
sizeof(hc->sin_local))) {
printk(KERN_ERR "%s: Failed to bind socket to port %d.\n",
__func__, hc->localport);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index b215b28cad7b..77b900db1cac 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -462,7 +462,7 @@ static int data_sock_getsockopt(struct socket *sock, int level, int optname,
}
static int
-data_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+data_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
@@ -696,7 +696,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
}
static int
-base_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+base_sock_bind(struct socket *sock, struct sockaddr_unsized *addr, int addr_len)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 6e3dce7e35a4..11e7282dc297 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -214,10 +214,6 @@ config LEDS_EL15203000
To compile this driver as a module, choose M here: the module
will be called leds-el15203000.
-config LEDS_EXPRESSWIRE
- bool
- depends on GPIOLIB
-
config LEDS_TURRIS_OMNIA
tristate "LED support for CZ.NIC's Turris Omnia"
depends on LEDS_CLASS_MULTICOLOR
@@ -443,8 +439,8 @@ config LEDS_LP55XX_COMMON
depends on LEDS_CLASS_MULTICOLOR
depends on OF
depends on I2C
- select FW_LOADER
- select FW_LOADER_USER_HELPER
+ imply FW_LOADER
+ imply FW_LOADER_USER_HELPER
help
This option supports common operations for LP5521/5523/55231/5562/5569/
8501 devices.
@@ -674,7 +670,7 @@ config LEDS_BD2606MVV
help
This option enables support for BD2606MVV LED driver chips
accessed via the I2C bus. It supports setting brightness, with
- the limitiation that there are groups of two channels sharing
+ the limitation that there are groups of two channels sharing
a brightness setting, but not the on/off setting.
To compile this driver as a module, choose M here: the module will
diff --git a/drivers/leds/flash/leds-qcom-flash.c b/drivers/leds/flash/leds-qcom-flash.c
index 89cf5120f5d5..b03a6833e3e3 100644
--- a/drivers/leds/flash/leds-qcom-flash.c
+++ b/drivers/leds/flash/leds-qcom-flash.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
@@ -114,36 +114,55 @@ enum {
REG_THERM_THRSH1,
REG_THERM_THRSH2,
REG_THERM_THRSH3,
+ REG_TORCH_CLAMP,
REG_MAX_COUNT,
};
+static const struct reg_field mvflash_3ch_pmi8998_regs[REG_MAX_COUNT] = {
+ [REG_STATUS1] = REG_FIELD(0x08, 0, 5),
+ [REG_STATUS2] = REG_FIELD(0x09, 0, 7),
+ [REG_STATUS3] = REG_FIELD(0x0a, 0, 7),
+ [REG_CHAN_TIMER] = REG_FIELD_ID(0x40, 0, 7, 3, 1),
+ [REG_ITARGET] = REG_FIELD_ID(0x43, 0, 6, 3, 1),
+ [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
+ [REG_IRESOLUTION] = REG_FIELD(0x47, 0, 5),
+ [REG_CHAN_STROBE] = REG_FIELD_ID(0x49, 0, 2, 3, 1),
+ [REG_CHAN_EN] = REG_FIELD(0x4c, 0, 2),
+ [REG_THERM_THRSH1] = REG_FIELD(0x56, 0, 2),
+ [REG_THERM_THRSH2] = REG_FIELD(0x57, 0, 2),
+ [REG_THERM_THRSH3] = REG_FIELD(0x58, 0, 2),
+ [REG_TORCH_CLAMP] = REG_FIELD(0xea, 0, 6),
+};
+
static const struct reg_field mvflash_3ch_regs[REG_MAX_COUNT] = {
- REG_FIELD(0x08, 0, 7), /* status1 */
- REG_FIELD(0x09, 0, 7), /* status2 */
- REG_FIELD(0x0a, 0, 7), /* status3 */
- REG_FIELD_ID(0x40, 0, 7, 3, 1), /* chan_timer */
- REG_FIELD_ID(0x43, 0, 6, 3, 1), /* itarget */
- REG_FIELD(0x46, 7, 7), /* module_en */
- REG_FIELD(0x47, 0, 5), /* iresolution */
- REG_FIELD_ID(0x49, 0, 2, 3, 1), /* chan_strobe */
- REG_FIELD(0x4c, 0, 2), /* chan_en */
- REG_FIELD(0x56, 0, 2), /* therm_thrsh1 */
- REG_FIELD(0x57, 0, 2), /* therm_thrsh2 */
- REG_FIELD(0x58, 0, 2), /* therm_thrsh3 */
+ [REG_STATUS1] = REG_FIELD(0x08, 0, 7),
+ [REG_STATUS2] = REG_FIELD(0x09, 0, 7),
+ [REG_STATUS3] = REG_FIELD(0x0a, 0, 7),
+ [REG_CHAN_TIMER] = REG_FIELD_ID(0x40, 0, 7, 3, 1),
+ [REG_ITARGET] = REG_FIELD_ID(0x43, 0, 6, 3, 1),
+ [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
+ [REG_IRESOLUTION] = REG_FIELD(0x47, 0, 5),
+ [REG_CHAN_STROBE] = REG_FIELD_ID(0x49, 0, 2, 3, 1),
+ [REG_CHAN_EN] = REG_FIELD(0x4c, 0, 2),
+ [REG_THERM_THRSH1] = REG_FIELD(0x56, 0, 2),
+ [REG_THERM_THRSH2] = REG_FIELD(0x57, 0, 2),
+ [REG_THERM_THRSH3] = REG_FIELD(0x58, 0, 2),
+ [REG_TORCH_CLAMP] = REG_FIELD(0xec, 0, 6),
};
static const struct reg_field mvflash_4ch_regs[REG_MAX_COUNT] = {
- REG_FIELD(0x06, 0, 7), /* status1 */
- REG_FIELD(0x07, 0, 6), /* status2 */
- REG_FIELD(0x09, 0, 7), /* status3 */
- REG_FIELD_ID(0x3e, 0, 7, 4, 1), /* chan_timer */
- REG_FIELD_ID(0x42, 0, 6, 4, 1), /* itarget */
- REG_FIELD(0x46, 7, 7), /* module_en */
- REG_FIELD(0x49, 0, 3), /* iresolution */
- REG_FIELD_ID(0x4a, 0, 6, 4, 1), /* chan_strobe */
- REG_FIELD(0x4e, 0, 3), /* chan_en */
- REG_FIELD(0x7a, 0, 2), /* therm_thrsh1 */
- REG_FIELD(0x78, 0, 2), /* therm_thrsh2 */
+ [REG_STATUS1] = REG_FIELD(0x06, 0, 7),
+ [REG_STATUS2] = REG_FIELD(0x07, 0, 6),
+ [REG_STATUS3] = REG_FIELD(0x09, 0, 7),
+ [REG_CHAN_TIMER] = REG_FIELD_ID(0x3e, 0, 7, 4, 1),
+ [REG_ITARGET] = REG_FIELD_ID(0x42, 0, 6, 4, 1),
+ [REG_MODULE_EN] = REG_FIELD(0x46, 7, 7),
+ [REG_IRESOLUTION] = REG_FIELD(0x49, 0, 3),
+ [REG_CHAN_STROBE] = REG_FIELD_ID(0x4a, 0, 6, 4, 1),
+ [REG_CHAN_EN] = REG_FIELD(0x4e, 0, 3),
+ [REG_THERM_THRSH1] = REG_FIELD(0x7a, 0, 2),
+ [REG_THERM_THRSH2] = REG_FIELD(0x78, 0, 2),
+ [REG_TORCH_CLAMP] = REG_FIELD(0xed, 0, 6),
};
struct qcom_flash_data {
@@ -156,6 +175,7 @@ struct qcom_flash_data {
u8 max_channels;
u8 chan_en_bits;
u8 revision;
+ u8 torch_clamp;
};
struct qcom_flash_led {
@@ -702,6 +722,7 @@ static int qcom_flash_register_led_device(struct device *dev,
u32 current_ua, timeout_us;
u32 channels[4];
int i, rc, count;
+ u8 torch_clamp;
count = fwnode_property_count_u32(node, "led-sources");
if (count <= 0) {
@@ -751,6 +772,12 @@ static int qcom_flash_register_led_device(struct device *dev,
current_ua = min_t(u32, current_ua, TORCH_CURRENT_MAX_UA * led->chan_count);
led->max_torch_current_ma = current_ua / UA_PER_MA;
+ torch_clamp = (current_ua / led->chan_count) / TORCH_IRES_UA;
+ if (torch_clamp != 0)
+ torch_clamp--;
+
+ flash_data->torch_clamp = max_t(u8, flash_data->torch_clamp, torch_clamp);
+
if (fwnode_property_present(node, "flash-max-microamp")) {
flash->led_cdev.flags |= LED_DEV_CAP_FLASH;
@@ -851,13 +878,20 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
return rc;
}
- if (val == FLASH_SUBTYPE_3CH_PM8150_VAL || val == FLASH_SUBTYPE_3CH_PMI8998_VAL) {
+ if (val == FLASH_SUBTYPE_3CH_PM8150_VAL) {
flash_data->hw_type = QCOM_MVFLASH_3CH;
flash_data->max_channels = 3;
regs = devm_kmemdup(dev, mvflash_3ch_regs, sizeof(mvflash_3ch_regs),
GFP_KERNEL);
if (!regs)
return -ENOMEM;
+ } else if (val == FLASH_SUBTYPE_3CH_PMI8998_VAL) {
+ flash_data->hw_type = QCOM_MVFLASH_3CH;
+ flash_data->max_channels = 3;
+ regs = devm_kmemdup(dev, mvflash_3ch_pmi8998_regs,
+ sizeof(mvflash_3ch_pmi8998_regs), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
} else if (val == FLASH_SUBTYPE_4CH_VAL) {
flash_data->hw_type = QCOM_MVFLASH_4CH;
flash_data->max_channels = 4;
@@ -917,8 +951,7 @@ static int qcom_flash_led_probe(struct platform_device *pdev)
flash_data->leds_count++;
}
- return 0;
-
+ return regmap_field_write(flash_data->r_fields[REG_TORCH_CLAMP], flash_data->torch_clamp);
release:
while (flash_data->v4l2_flash[flash_data->leds_count] && flash_data->leds_count)
v4l2_flash_release(flash_data->v4l2_flash[flash_data->leds_count--]);
diff --git a/drivers/leds/flash/leds-rt4505.c b/drivers/leds/flash/leds-rt4505.c
index f16358b8dfc1..18fd5b7e528f 100644
--- a/drivers/leds/flash/leds-rt4505.c
+++ b/drivers/leds/flash/leds-rt4505.c
@@ -365,7 +365,7 @@ static int rt4505_probe(struct i2c_client *client)
return ret;
}
- child = fwnode_get_next_available_child_node(client->dev.fwnode, NULL);
+ child = device_get_next_child_node(&client->dev, NULL);
if (!child) {
dev_err(priv->dev, "Failed to get child node\n");
return -EINVAL;
diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c
index 6af0d2c7fc56..f6b439674c03 100644
--- a/drivers/leds/flash/leds-rt8515.c
+++ b/drivers/leds/flash/leds-rt8515.c
@@ -304,7 +304,7 @@ static int rt8515_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(rt->enable_torch),
"cannot get ENT (enable torch) GPIO\n");
- child = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ child = device_get_next_child_node(dev, NULL);
if (!child) {
dev_err(dev,
"No fwnode child node found for connected LED.\n");
diff --git a/drivers/leds/flash/leds-sgm3140.c b/drivers/leds/flash/leds-sgm3140.c
index 3e83200675f2..dc6840357370 100644
--- a/drivers/leds/flash/leds-sgm3140.c
+++ b/drivers/leds/flash/leds-sgm3140.c
@@ -214,8 +214,7 @@ static int sgm3140_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, ret,
"Failed to request regulator\n");
- child_node = fwnode_get_next_available_child_node(pdev->dev.fwnode,
- NULL);
+ child_node = device_get_next_child_node(&pdev->dev, NULL);
if (!child_node) {
dev_err(&pdev->dev,
"No fwnode child node found for connected LED.\n");
diff --git a/drivers/leds/flash/leds-tps6131x.c b/drivers/leds/flash/leds-tps6131x.c
index 6f4d4fd55361..f0f1f2b77d5a 100644
--- a/drivers/leds/flash/leds-tps6131x.c
+++ b/drivers/leds/flash/leds-tps6131x.c
@@ -544,7 +544,7 @@ static int tps6131x_parse_node(struct tps6131x *tps6131x)
tps6131x->valley_current_limit = device_property_read_bool(dev, "ti,valley-current-limit");
- tps6131x->led_node = fwnode_get_next_available_child_node(dev->fwnode, NULL);
+ tps6131x->led_node = device_get_next_child_node(dev, NULL);
if (!tps6131x->led_node) {
dev_err(dev, "Missing LED node\n");
return -EINVAL;
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 15633fbf3c16..885399ed0776 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -38,7 +38,7 @@ static ssize_t brightness_show(struct device *dev,
brightness = led_cdev->brightness;
mutex_unlock(&led_cdev->led_access);
- return sprintf(buf, "%u\n", brightness);
+ return sysfs_emit(buf, "%u\n", brightness);
}
static ssize_t brightness_store(struct device *dev,
@@ -80,7 +80,7 @@ static ssize_t max_brightness_show(struct device *dev,
max_brightness = led_cdev->max_brightness;
mutex_unlock(&led_cdev->led_access);
- return sprintf(buf, "%u\n", max_brightness);
+ return sysfs_emit(buf, "%u\n", max_brightness);
}
static DEVICE_ATTR_RO(max_brightness);
@@ -122,7 +122,7 @@ static ssize_t brightness_hw_changed_show(struct device *dev,
if (led_cdev->brightness_hw_changed == -1)
return -ENODATA;
- return sprintf(buf, "%u\n", led_cdev->brightness_hw_changed);
+ return sysfs_emit(buf, "%u\n", led_cdev->brightness_hw_changed);
}
static DEVICE_ATTR_RO(brightness_hw_changed);
@@ -252,15 +252,23 @@ static const struct class leds_class = {
* of_led_get() - request a LED device via the LED framework
* @np: device node to get the LED device from
* @index: the index of the LED
+ * @name: the name of the LED used to map it to its function, if present
*
* Returns the LED device parsed from the phandle specified in the "leds"
* property of a device tree node or a negative error-code on failure.
*/
-static struct led_classdev *of_led_get(struct device_node *np, int index)
+static struct led_classdev *of_led_get(struct device_node *np, int index,
+ const char *name)
{
struct device *led_dev;
struct device_node *led_node;
+ /*
+ * For named LEDs, first look up the name in the "led-names" property.
+ * If it cannot be found, then of_parse_phandle() will propagate the error.
+ */
+ if (name)
+ index = of_property_match_string(np, "led-names", name);
led_node = of_parse_phandle(np, "leds", index);
if (!led_node)
return ERR_PTR(-ENOENT);
@@ -324,7 +332,7 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
if (!dev)
return ERR_PTR(-EINVAL);
- led = of_led_get(dev->of_node, index);
+ led = of_led_get(dev->of_node, index, NULL);
if (IS_ERR(led))
return led;
@@ -342,9 +350,14 @@ EXPORT_SYMBOL_GPL(devm_of_led_get);
struct led_classdev *led_get(struct device *dev, char *con_id)
{
struct led_lookup_data *lookup;
+ struct led_classdev *led_cdev;
const char *provider = NULL;
struct device *led_dev;
+ led_cdev = of_led_get(dev->of_node, -1, con_id);
+ if (!IS_ERR(led_cdev) || PTR_ERR(led_cdev) != -ENOENT)
+ return led_cdev;
+
mutex_lock(&leds_lookup_lock);
list_for_each_entry(lookup, &leds_lookup_list, list) {
if (!strcmp(lookup->dev_id, dev_name(dev)) &&
diff --git a/drivers/leds/leds-cros_ec.c b/drivers/leds/leds-cros_ec.c
index 377cf04e202a..bea3cc3fbfd2 100644
--- a/drivers/leds/leds-cros_ec.c
+++ b/drivers/leds/leds-cros_ec.c
@@ -142,9 +142,6 @@ static int cros_ec_led_count_subleds(struct device *dev,
}
}
- if (!num_subleds)
- return -EINVAL;
-
*max_brightness = common_range;
return num_subleds;
}
@@ -189,6 +186,8 @@ static int cros_ec_led_probe_one(struct device *dev, struct cros_ec_device *cros
&priv->led_mc_cdev.led_cdev.max_brightness);
if (num_subleds < 0)
return num_subleds;
+ if (num_subleds == 0)
+ return 0; /* LED without any colors, skip */
priv->cros_ec = cros_ec;
priv->led_id = id;
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index 27bfab3da479..e411cee06dab 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -483,11 +483,6 @@ static inline int is31fl3196_db_to_gain(u32 dezibel)
return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
}
-static void is31f1319x_mutex_destroy(void *lock)
-{
- mutex_destroy(lock);
-}
-
static int is31fl319x_probe(struct i2c_client *client)
{
struct is31fl319x_chip *is31;
@@ -503,8 +498,7 @@ static int is31fl319x_probe(struct i2c_client *client)
if (!is31)
return -ENOMEM;
- mutex_init(&is31->lock);
- err = devm_add_action_or_reset(dev, is31f1319x_mutex_destroy, &is31->lock);
+ err = devm_mutex_init(dev, &is31->lock);
if (err)
return err;
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 8793330dd414..dc9349f9d350 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -32,6 +32,8 @@
#define IS31FL3216_CONFIG_SSD_ENABLE BIT(7)
#define IS31FL3216_CONFIG_SSD_DISABLE 0
+#define IS31FL32XX_PWM_FREQUENCY_22KHZ 0x01
+
struct is31fl32xx_priv;
struct is31fl32xx_led_data {
struct led_classdev cdev;
@@ -53,6 +55,7 @@ struct is31fl32xx_priv {
* @pwm_update_reg : address of PWM Update register
* @global_control_reg : address of Global Control register (optional)
* @reset_reg : address of Reset register (optional)
+ * @output_frequency_setting_reg: address of output frequency register (optional)
* @pwm_register_base : address of first PWM register
* @pwm_registers_reversed: : true if PWM registers count down instead of up
* @led_control_register_base : address of first LED control register (optional)
@@ -76,6 +79,7 @@ struct is31fl32xx_chipdef {
u8 pwm_update_reg;
u8 global_control_reg;
u8 reset_reg;
+ u8 output_frequency_setting_reg;
u8 pwm_register_base;
bool pwm_registers_reversed;
u8 led_control_register_base;
@@ -90,6 +94,19 @@ static const struct is31fl32xx_chipdef is31fl3236_cdef = {
.pwm_update_reg = 0x25,
.global_control_reg = 0x4a,
.reset_reg = 0x4f,
+ .output_frequency_setting_reg = IS31FL32XX_REG_NONE,
+ .pwm_register_base = 0x01,
+ .led_control_register_base = 0x26,
+ .enable_bits_per_led_control_register = 1,
+};
+
+static const struct is31fl32xx_chipdef is31fl3236a_cdef = {
+ .channels = 36,
+ .shutdown_reg = 0x00,
+ .pwm_update_reg = 0x25,
+ .global_control_reg = 0x4a,
+ .reset_reg = 0x4f,
+ .output_frequency_setting_reg = 0x4b,
.pwm_register_base = 0x01,
.led_control_register_base = 0x26,
.enable_bits_per_led_control_register = 1,
@@ -101,6 +118,7 @@ static const struct is31fl32xx_chipdef is31fl3235_cdef = {
.pwm_update_reg = 0x25,
.global_control_reg = 0x4a,
.reset_reg = 0x4f,
+ .output_frequency_setting_reg = IS31FL32XX_REG_NONE,
.pwm_register_base = 0x05,
.led_control_register_base = 0x2a,
.enable_bits_per_led_control_register = 1,
@@ -112,6 +130,7 @@ static const struct is31fl32xx_chipdef is31fl3218_cdef = {
.pwm_update_reg = 0x16,
.global_control_reg = IS31FL32XX_REG_NONE,
.reset_reg = 0x17,
+ .output_frequency_setting_reg = IS31FL32XX_REG_NONE,
.pwm_register_base = 0x01,
.led_control_register_base = 0x13,
.enable_bits_per_led_control_register = 6,
@@ -126,6 +145,7 @@ static const struct is31fl32xx_chipdef is31fl3216_cdef = {
.pwm_update_reg = 0xB0,
.global_control_reg = IS31FL32XX_REG_NONE,
.reset_reg = IS31FL32XX_REG_NONE,
+ .output_frequency_setting_reg = IS31FL32XX_REG_NONE,
.pwm_register_base = 0x10,
.pwm_registers_reversed = true,
.led_control_register_base = 0x01,
@@ -363,8 +383,21 @@ static struct is31fl32xx_led_data *is31fl32xx_find_led_data(
static int is31fl32xx_parse_dt(struct device *dev,
struct is31fl32xx_priv *priv)
{
+ const struct is31fl32xx_chipdef *cdef = priv->cdef;
int ret = 0;
+ if ((cdef->output_frequency_setting_reg != IS31FL32XX_REG_NONE) &&
+ of_property_read_bool(dev_of_node(dev), "issi,22khz-pwm")) {
+
+ ret = is31fl32xx_write(priv, cdef->output_frequency_setting_reg,
+ IS31FL32XX_PWM_FREQUENCY_22KHZ);
+
+ if (ret) {
+ dev_err(dev, "Failed to write output PWM frequency register\n");
+ return ret;
+ }
+ }
+
for_each_available_child_of_node_scoped(dev_of_node(dev), child) {
struct led_init_data init_data = {};
struct is31fl32xx_led_data *led_data =
@@ -404,12 +437,13 @@ static int is31fl32xx_parse_dt(struct device *dev,
}
static const struct of_device_id of_is31fl32xx_match[] = {
- { .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, },
- { .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, },
- { .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, },
- { .compatible = "si-en,sn3218", .data = &is31fl3218_cdef, },
- { .compatible = "issi,is31fl3216", .data = &is31fl3216_cdef, },
- { .compatible = "si-en,sn3216", .data = &is31fl3216_cdef, },
+ { .compatible = "issi,is31fl3236", .data = &is31fl3236_cdef, },
+ { .compatible = "issi,is31fl3236a", .data = &is31fl3236a_cdef, },
+ { .compatible = "issi,is31fl3235", .data = &is31fl3235_cdef, },
+ { .compatible = "issi,is31fl3218", .data = &is31fl3218_cdef, },
+ { .compatible = "si-en,sn3218", .data = &is31fl3218_cdef, },
+ { .compatible = "issi,is31fl3216", .data = &is31fl3216_cdef, },
+ { .compatible = "si-en,sn3216", .data = &is31fl3216_cdef, },
{},
};
@@ -466,6 +500,7 @@ static void is31fl32xx_remove(struct i2c_client *client)
*/
static const struct i2c_device_id is31fl32xx_id[] = {
{ "is31fl3236" },
+ { "is31fl3236a" },
{ "is31fl3235" },
{ "is31fl3218" },
{ "sn3218" },
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index 94f8ef6b482c..e2a9c8592953 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -50,11 +50,17 @@
#define LP50XX_SW_RESET 0xff
#define LP50XX_CHIP_EN BIT(6)
+#define LP50XX_CHIP_DISABLE 0x00
+#define LP50XX_START_TIME_US 500
+#define LP50XX_RESET_TIME_US 3
+
+#define LP50XX_EN_GPIO_LOW 0
+#define LP50XX_EN_GPIO_HIGH 1
/* There are 3 LED outputs per bank */
#define LP50XX_LEDS_PER_MODULE 3
-#define LP5009_MAX_LED_MODULES 2
+#define LP5009_MAX_LED_MODULES 3
#define LP5012_MAX_LED_MODULES 4
#define LP5018_MAX_LED_MODULES 6
#define LP5024_MAX_LED_MODULES 8
@@ -341,17 +347,15 @@ out:
return ret;
}
-static int lp50xx_set_banks(struct lp50xx *priv, u32 led_banks[])
+static int lp50xx_set_banks(struct lp50xx *priv, u32 led_banks[], int num_leds)
{
u8 led_config_lo, led_config_hi;
u32 bank_enable_mask = 0;
int ret;
int i;
- for (i = 0; i < priv->chip_info->max_modules; i++) {
- if (led_banks[i])
- bank_enable_mask |= (1 << led_banks[i]);
- }
+ for (i = 0; i < num_leds; i++)
+ bank_enable_mask |= (1 << led_banks[i]);
led_config_lo = bank_enable_mask;
led_config_hi = bank_enable_mask >> 8;
@@ -371,19 +375,42 @@ static int lp50xx_reset(struct lp50xx *priv)
return regmap_write(priv->regmap, priv->chip_info->reset_reg, LP50XX_SW_RESET);
}
-static int lp50xx_enable_disable(struct lp50xx *priv, int enable_disable)
+static int lp50xx_enable(struct lp50xx *priv)
{
int ret;
- ret = gpiod_direction_output(priv->enable_gpio, enable_disable);
+ if (priv->enable_gpio) {
+ ret = gpiod_direction_output(priv->enable_gpio, LP50XX_EN_GPIO_HIGH);
+ if (ret)
+ return ret;
+
+ udelay(LP50XX_START_TIME_US);
+ }
+
+ ret = lp50xx_reset(priv);
if (ret)
return ret;
- if (enable_disable)
- return regmap_write(priv->regmap, LP50XX_DEV_CFG0, LP50XX_CHIP_EN);
- else
- return regmap_write(priv->regmap, LP50XX_DEV_CFG0, 0);
+ return regmap_write(priv->regmap, LP50XX_DEV_CFG0, LP50XX_CHIP_EN);
+}
+
+static int lp50xx_disable(struct lp50xx *priv)
+{
+ int ret;
+
+ ret = regmap_write(priv->regmap, LP50XX_DEV_CFG0, LP50XX_CHIP_DISABLE);
+ if (ret)
+ return ret;
+ if (priv->enable_gpio) {
+ ret = gpiod_direction_output(priv->enable_gpio, LP50XX_EN_GPIO_LOW);
+ if (ret)
+ return ret;
+
+ udelay(LP50XX_RESET_TIME_US);
+ }
+
+ return 0;
}
static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv,
@@ -405,7 +432,7 @@ static int lp50xx_probe_leds(struct fwnode_handle *child, struct lp50xx *priv,
return ret;
}
- ret = lp50xx_set_banks(priv, led_banks);
+ ret = lp50xx_set_banks(priv, led_banks, num_leds);
if (ret) {
dev_err(priv->dev, "Cannot setup banked LEDs\n");
return ret;
@@ -447,6 +474,10 @@ static int lp50xx_probe_dt(struct lp50xx *priv)
return dev_err_probe(priv->dev, PTR_ERR(priv->enable_gpio),
"Failed to get enable GPIO\n");
+ ret = lp50xx_enable(priv);
+ if (ret)
+ return ret;
+
priv->regulator = devm_regulator_get(priv->dev, "vled");
if (IS_ERR(priv->regulator))
priv->regulator = NULL;
@@ -547,14 +578,6 @@ static int lp50xx_probe(struct i2c_client *client)
return ret;
}
- ret = lp50xx_reset(led);
- if (ret)
- return ret;
-
- ret = lp50xx_enable_disable(led, 1);
- if (ret)
- return ret;
-
return lp50xx_probe_dt(led);
}
@@ -563,7 +586,7 @@ static void lp50xx_remove(struct i2c_client *client)
struct lp50xx *led = i2c_get_clientdata(client);
int ret;
- ret = lp50xx_enable_disable(led, 0);
+ ret = lp50xx_disable(led);
if (ret)
dev_err(led->dev, "Failed to disable chip\n");
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index e71456a56ab8..fd447eb7eb15 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -212,7 +212,7 @@ int lp55xx_update_program_memory(struct lp55xx_chip *chip,
* For LED chip that support page, PAGE is already set in load_engine.
*/
if (!cfg->pages_per_engine)
- start_addr += LP55xx_BYTES_PER_PAGE * idx;
+ start_addr += LP55xx_BYTES_PER_PAGE * (idx - 1);
for (page = 0; page < program_length / LP55xx_BYTES_PER_PAGE; page++) {
/* Write to the next page each 32 bytes (if supported) */
diff --git a/drivers/leds/leds-max5970.c b/drivers/leds/leds-max5970.c
index 285074c53b23..a1e91a06249c 100644
--- a/drivers/leds/leds-max5970.c
+++ b/drivers/leds/leds-max5970.c
@@ -60,7 +60,7 @@ static int max5970_led_probe(struct platform_device *pdev)
if (!led_node)
return -ENODEV;
- fwnode_for_each_available_child_node(led_node, child) {
+ fwnode_for_each_child_node(led_node, child) {
u32 reg;
if (fwnode_property_read_u32(child, "reg", &reg))
diff --git a/drivers/leds/leds-max77705.c b/drivers/leds/leds-max77705.c
index 933cb4f19be9..1e2054c1bf80 100644
--- a/drivers/leds/leds-max77705.c
+++ b/drivers/leds/leds-max77705.c
@@ -180,7 +180,7 @@ static int max77705_add_led(struct device *dev, struct regmap *regmap, struct fw
ret = fwnode_property_read_u32(np, "reg", &reg);
if (ret || reg >= MAX77705_LED_NUM_LEDS)
- ret = -EINVAL;
+ return -EINVAL;
info = devm_kcalloc(dev, num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -191,7 +191,7 @@ static int max77705_add_led(struct device *dev, struct regmap *regmap, struct fw
cdev->brightness_set_blocking = max77705_led_brightness_set_multi;
cdev->blink_set = max77705_rgb_blink;
- fwnode_for_each_available_child_node(np, child) {
+ fwnode_for_each_child_node(np, child) {
ret = max77705_parse_subled(dev, child, &info[i]);
if (ret < 0)
return ret;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index e95287416ef8..99df46f2d9f5 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -364,6 +364,9 @@ static int netxbig_gpio_ext_get(struct device *dev,
if (!addr)
return -ENOMEM;
+ gpio_ext->addr = addr;
+ gpio_ext->num_addr = 0;
+
/*
* We cannot use devm_ managed resources with these GPIO descriptors
* since they are associated with the "GPIO extension device" which
@@ -375,45 +378,58 @@ static int netxbig_gpio_ext_get(struct device *dev,
gpiod = gpiod_get_index(gpio_ext_dev, "addr", i,
GPIOD_OUT_LOW);
if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
+ goto err_set_code;
gpiod_set_consumer_name(gpiod, "GPIO extension addr");
addr[i] = gpiod;
+ gpio_ext->num_addr++;
}
- gpio_ext->addr = addr;
- gpio_ext->num_addr = num_addr;
ret = gpiod_count(gpio_ext_dev, "data");
if (ret < 0) {
dev_err(dev,
"Failed to count GPIOs in DT property data-gpios\n");
- return ret;
+ goto err_free_addr;
}
num_data = ret;
data = devm_kcalloc(dev, num_data, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (!data) {
+ ret = -ENOMEM;
+ goto err_free_addr;
+ }
+
+ gpio_ext->data = data;
+ gpio_ext->num_data = 0;
for (i = 0; i < num_data; i++) {
gpiod = gpiod_get_index(gpio_ext_dev, "data", i,
GPIOD_OUT_LOW);
if (IS_ERR(gpiod))
- return PTR_ERR(gpiod);
+ goto err_free_data;
gpiod_set_consumer_name(gpiod, "GPIO extension data");
data[i] = gpiod;
+ gpio_ext->num_data++;
}
- gpio_ext->data = data;
- gpio_ext->num_data = num_data;
gpiod = gpiod_get(gpio_ext_dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
dev_err(dev,
"Failed to get GPIO from DT property enable-gpio\n");
- return PTR_ERR(gpiod);
+ goto err_free_data;
}
gpiod_set_consumer_name(gpiod, "GPIO extension enable");
gpio_ext->enable = gpiod;
return devm_add_action_or_reset(dev, netxbig_gpio_ext_remove, gpio_ext);
+
+err_free_data:
+ for (i = 0; i < gpio_ext->num_data; i++)
+ gpiod_put(gpio_ext->data[i]);
+err_set_code:
+ ret = PTR_ERR(gpiod);
+err_free_addr:
+ for (i = 0; i < gpio_ext->num_addr; i++)
+ gpiod_put(gpio_ext->addr[i]);
+ return ret;
}
static int netxbig_leds_get_of_pdata(struct device *dev,
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index c73134e7b951..6c1f2f50ff85 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -9,12 +9,13 @@
* based on leds-gpio.c by Raphael Assenat <raph@8d.com>
*/
-#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
#include <linux/leds.h>
-#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -26,6 +27,7 @@ struct led_pwm {
};
struct led_pwm_data {
+ struct gpio_desc *enable_gpio;
struct led_classdev cdev;
struct pwm_device *pwm;
struct pwm_state pwmstate;
@@ -51,6 +53,8 @@ static int led_pwm_set(struct led_classdev *led_cdev,
if (led_dat->active_low)
duty = led_dat->pwmstate.period - duty;
+ gpiod_set_value_cansleep(led_dat->enable_gpio, !!brightness);
+
led_dat->pwmstate.duty_cycle = duty;
/*
* Disabling a PWM doesn't guarantee that it emits the inactive level.
@@ -132,6 +136,21 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
break;
}
+ /*
+ * Claim the GPIO as GPIOD_ASIS and set the value
+ * later on to honor the different default states
+ */
+ led_data->enable_gpio = devm_fwnode_gpiod_get(dev, fwnode, "enable", GPIOD_ASIS, NULL);
+ if (IS_ERR(led_data->enable_gpio)) {
+ if (PTR_ERR(led_data->enable_gpio) == -ENOENT)
+ /* Enable GPIO is optional */
+ led_data->enable_gpio = NULL;
+ else
+ return PTR_ERR(led_data->enable_gpio);
+ }
+
+ gpiod_direction_output(led_data->enable_gpio, !!led_data->cdev.brightness);
+
ret = devm_led_classdev_register_ext(dev, &led_data->cdev, &init_data);
if (ret) {
dev_err(dev, "failed to register PWM led for %s: %d\n",
diff --git a/drivers/leds/leds-qnap-mcu.c b/drivers/leds/leds-qnap-mcu.c
index 4e4709456261..6df110e33ac9 100644
--- a/drivers/leds/leds-qnap-mcu.c
+++ b/drivers/leds/leds-qnap-mcu.c
@@ -104,9 +104,9 @@ static int qnap_mcu_register_err_led(struct device *dev, struct qnap_mcu *mcu, i
}
enum qnap_mcu_usb_led_mode {
- QNAP_MCU_USB_LED_ON = 1,
- QNAP_MCU_USB_LED_OFF = 3,
- QNAP_MCU_USB_LED_BLINK = 2,
+ QNAP_MCU_USB_LED_ON = 0,
+ QNAP_MCU_USB_LED_OFF = 2,
+ QNAP_MCU_USB_LED_BLINK = 1,
};
struct qnap_mcu_usb_led {
@@ -137,7 +137,7 @@ static int qnap_mcu_usb_led_set(struct led_classdev *led_cdev,
* Byte 3 is shared between the usb led target on/off/blink
* and also the buzzer control (in the input driver)
*/
- cmd[2] = 'D' + usb_led->mode;
+ cmd[2] = 'E' + usb_led->mode;
return qnap_mcu_exec_with_ack(usb_led->mcu, cmd, sizeof(cmd));
}
@@ -161,7 +161,7 @@ static int qnap_mcu_usb_led_blink_set(struct led_classdev *led_cdev,
* Byte 3 is shared between the USB LED target on/off/blink
* and also the buzzer control (in the input driver)
*/
- cmd[2] = 'D' + usb_led->mode;
+ cmd[2] = 'E' + usb_led->mode;
return qnap_mcu_exec_with_ack(usb_led->mcu, cmd, sizeof(cmd));
}
@@ -190,6 +190,166 @@ static int qnap_mcu_register_usb_led(struct device *dev, struct qnap_mcu *mcu)
return qnap_mcu_usb_led_set(&usb_led->cdev, 0);
}
+enum qnap_mcu_status_led_mode {
+ QNAP_MCU_STATUS_LED_OFF = 0,
+ QNAP_MCU_STATUS_LED_ON = 1,
+ QNAP_MCU_STATUS_LED_BLINK_FAST = 2, /* 500ms / 500ms */
+ QNAP_MCU_STATUS_LED_BLINK_SLOW = 3, /* 1s / 1s */
+};
+
+struct qnap_mcu_status_led {
+ struct led_classdev cdev;
+ struct qnap_mcu_status_led *red;
+ u8 mode;
+};
+
+struct qnap_mcu_status {
+ struct qnap_mcu *mcu;
+ struct qnap_mcu_status_led red;
+ struct qnap_mcu_status_led green;
+};
+
+static inline struct qnap_mcu_status_led *cdev_to_qnap_mcu_status_led(struct led_classdev *led_cdev)
+{
+ return container_of(led_cdev, struct qnap_mcu_status_led, cdev);
+}
+
+static inline struct qnap_mcu_status *statusled_to_qnap_mcu_status(struct qnap_mcu_status_led *led)
+{
+ return container_of(led->red, struct qnap_mcu_status, red);
+}
+
+static u8 qnap_mcu_status_led_encode(struct qnap_mcu_status *status)
+{
+ if (status->red.mode == QNAP_MCU_STATUS_LED_OFF) {
+ switch (status->green.mode) {
+ case QNAP_MCU_STATUS_LED_OFF:
+ return '9';
+ case QNAP_MCU_STATUS_LED_ON:
+ return '6';
+ case QNAP_MCU_STATUS_LED_BLINK_FAST:
+ return '5';
+ case QNAP_MCU_STATUS_LED_BLINK_SLOW:
+ return 'A';
+ }
+ } else if (status->green.mode == QNAP_MCU_STATUS_LED_OFF) {
+ switch (status->red.mode) {
+ case QNAP_MCU_STATUS_LED_OFF:
+ return '9';
+ case QNAP_MCU_STATUS_LED_ON:
+ return '7';
+ case QNAP_MCU_STATUS_LED_BLINK_FAST:
+ return '4';
+ case QNAP_MCU_STATUS_LED_BLINK_SLOW:
+ return 'B';
+ }
+ } else if (status->green.mode == QNAP_MCU_STATUS_LED_ON &&
+ status->red.mode == QNAP_MCU_STATUS_LED_ON) {
+ return 'D';
+ } else if (status->green.mode == QNAP_MCU_STATUS_LED_BLINK_SLOW &&
+ status->red.mode == QNAP_MCU_STATUS_LED_BLINK_SLOW) {
+ return 'C';
+ }
+
+ /*
+ * Here both LEDs are on in some fashion, either both blinking fast,
+ * or in different speeds, so default to fast blinking for both.
+ */
+ return '8';
+}
+
+static int qnap_mcu_status_led_update(struct qnap_mcu *mcu,
+ struct qnap_mcu_status *status)
+{
+ u8 cmd[] = { '@', 'C', 0 };
+
+ cmd[2] = qnap_mcu_status_led_encode(status);
+
+ return qnap_mcu_exec_with_ack(mcu, cmd, sizeof(cmd));
+}
+
+static int qnap_mcu_status_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct qnap_mcu_status_led *status_led = cdev_to_qnap_mcu_status_led(led_cdev);
+ struct qnap_mcu_status *base = statusled_to_qnap_mcu_status(status_led);
+
+ /* Don't disturb a possible set blink-mode if LED stays on */
+ if (brightness != 0 && status_led->mode >= QNAP_MCU_STATUS_LED_BLINK_FAST)
+ return 0;
+
+ status_led->mode = brightness ? QNAP_MCU_STATUS_LED_ON :
+ QNAP_MCU_STATUS_LED_OFF;
+
+ return qnap_mcu_status_led_update(base->mcu, base);
+}
+
+static int qnap_mcu_status_led_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct qnap_mcu_status_led *status_led = cdev_to_qnap_mcu_status_led(led_cdev);
+ struct qnap_mcu_status *base = statusled_to_qnap_mcu_status(status_led);
+
+ if (status_led->mode == QNAP_MCU_STATUS_LED_OFF)
+ return 0;
+
+ if (*delay_on <= 500) {
+ *delay_on = 500;
+ *delay_off = 500;
+ status_led->mode = QNAP_MCU_STATUS_LED_BLINK_FAST;
+ } else {
+ *delay_on = 1000;
+ *delay_off = 1000;
+ status_led->mode = QNAP_MCU_STATUS_LED_BLINK_SLOW;
+ }
+
+ return qnap_mcu_status_led_update(base->mcu, base);
+}
+
+static int qnap_mcu_register_status_leds(struct device *dev, struct qnap_mcu *mcu)
+{
+ struct qnap_mcu_status *status;
+ int ret;
+
+ status = devm_kzalloc(dev, sizeof(*status), GFP_KERNEL);
+ if (!status)
+ return -ENOMEM;
+
+ status->mcu = mcu;
+
+ /*
+ * point to the red led, so that statusled_to_qnap_mcu_status
+ * can resolve the main status struct containing both leds
+ */
+ status->red.red = &status->red;
+ status->green.red = &status->red;
+
+ status->red.mode = QNAP_MCU_STATUS_LED_OFF;
+ status->red.cdev.name = "red:status";
+ status->red.cdev.brightness_set_blocking = qnap_mcu_status_led_set;
+ status->red.cdev.blink_set = qnap_mcu_status_led_blink_set;
+ status->red.cdev.brightness = 0;
+ status->red.cdev.max_brightness = 1;
+
+ status->green.mode = QNAP_MCU_STATUS_LED_OFF;
+ status->green.cdev.name = "green:status";
+ status->green.cdev.brightness_set_blocking = qnap_mcu_status_led_set;
+ status->green.cdev.blink_set = qnap_mcu_status_led_blink_set;
+ status->green.cdev.brightness = 0;
+ status->green.cdev.max_brightness = 1;
+
+ ret = devm_led_classdev_register(dev, &status->red.cdev);
+ if (ret)
+ return ret;
+
+ ret = devm_led_classdev_register(dev, &status->green.cdev);
+ if (ret)
+ return ret;
+
+ return qnap_mcu_status_led_update(status->mcu, status);
+}
+
static int qnap_mcu_leds_probe(struct platform_device *pdev)
{
struct qnap_mcu *mcu = dev_get_drvdata(pdev->dev.parent);
@@ -210,6 +370,11 @@ static int qnap_mcu_leds_probe(struct platform_device *pdev)
"failed to register USB LED\n");
}
+ ret = qnap_mcu_register_status_leds(&pdev->dev, mcu);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to register status LEDs\n");
+
return 0;
}
diff --git a/drivers/leds/leds-upboard.c b/drivers/leds/leds-upboard.c
index b350eb294280..12989b2f1953 100644
--- a/drivers/leds/leds-upboard.c
+++ b/drivers/leds/leds-upboard.c
@@ -123,4 +123,4 @@ MODULE_AUTHOR("Gary Wang <garywang@aaeon.com.tw>");
MODULE_AUTHOR("Thomas Richard <thomas.richard@bootlin.com>");
MODULE_DESCRIPTION("UP Board LED driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:upboard-led");
+MODULE_ALIAS("platform:upboard-leds");
diff --git a/drivers/leds/rgb/leds-ktd202x.c b/drivers/leds/rgb/leds-ktd202x.c
index 04e62faa3a00..e4f0f25a5e45 100644
--- a/drivers/leds/rgb/leds-ktd202x.c
+++ b/drivers/leds/rgb/leds-ktd202x.c
@@ -391,7 +391,7 @@ static int ktd202x_setup_led_rgb(struct ktd202x *chip, struct fwnode_handle *fwn
int i = 0;
num_channels = 0;
- fwnode_for_each_available_child_node(fwnode, child)
+ fwnode_for_each_child_node(fwnode, child)
num_channels++;
if (!num_channels || num_channels > chip->num_leds)
@@ -401,7 +401,7 @@ static int ktd202x_setup_led_rgb(struct ktd202x *chip, struct fwnode_handle *fwn
if (!info)
return -ENOMEM;
- fwnode_for_each_available_child_node(fwnode, child) {
+ fwnode_for_each_child_node(fwnode, child) {
u32 mono_color;
u32 reg;
int ret;
diff --git a/drivers/leds/rgb/leds-ncp5623.c b/drivers/leds/rgb/leds-ncp5623.c
index 7c7d44623a9e..85d6be6fff2b 100644
--- a/drivers/leds/rgb/leds-ncp5623.c
+++ b/drivers/leds/rgb/leds-ncp5623.c
@@ -180,7 +180,7 @@ static int ncp5623_probe(struct i2c_client *client)
goto release_mc_node;
}
- fwnode_for_each_available_child_node(mc_node, led_node) {
+ fwnode_for_each_child_node(mc_node, led_node) {
ret = fwnode_property_read_u32(led_node, "color", &color_index);
if (ret)
goto release_led_node;
diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
index 4f2a178e3d26..72da0bf469ad 100644
--- a/drivers/leds/rgb/leds-qcom-lpg.c
+++ b/drivers/leds/rgb/leds-qcom-lpg.c
@@ -2,7 +2,7 @@
/*
* Copyright (c) 2017-2022 Linaro Ltd
* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
- * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include <linux/bits.h>
#include <linux/bitfield.h>
@@ -1247,8 +1247,6 @@ static int lpg_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
lpg_apply(chan);
- triled_set(lpg, chan->triled_mask, chan->enabled ? chan->triled_mask : 0);
-
out_unlock:
mutex_unlock(&lpg->lock);
@@ -1382,7 +1380,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
return dev_err_probe(lpg->dev, ret,
"failed to parse \"color\" of %pOF\n", np);
- if (color == LED_COLOR_ID_RGB)
+ if (color == LED_COLOR_ID_RGB || color == LED_COLOR_ID_MULTI)
num_channels = of_get_available_child_count(np);
else
num_channels = 1;
@@ -1394,7 +1392,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
led->lpg = lpg;
led->num_channels = num_channels;
- if (color == LED_COLOR_ID_RGB) {
+ if (color == LED_COLOR_ID_RGB || color == LED_COLOR_ID_MULTI) {
info = devm_kcalloc(lpg->dev, num_channels, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -1454,7 +1452,7 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
init_data.fwnode = of_fwnode_handle(np);
- if (color == LED_COLOR_ID_RGB)
+ if (color == LED_COLOR_ID_RGB || color == LED_COLOR_ID_MULTI)
ret = devm_led_classdev_multicolor_register_ext(lpg->dev, &led->mcdev, &init_data);
else
ret = devm_led_classdev_register_ext(lpg->dev, &led->cdev, &init_data);
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 05848a2fecff..679323c2ccda 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -94,28 +94,32 @@ void ledtrig_cpu(enum cpu_led_event ledevt)
}
EXPORT_SYMBOL(ledtrig_cpu);
-static int ledtrig_cpu_syscore_suspend(void)
+static int ledtrig_cpu_syscore_suspend(void *data)
{
ledtrig_cpu(CPU_LED_STOP);
return 0;
}
-static void ledtrig_cpu_syscore_resume(void)
+static void ledtrig_cpu_syscore_resume(void *data)
{
ledtrig_cpu(CPU_LED_START);
}
-static void ledtrig_cpu_syscore_shutdown(void)
+static void ledtrig_cpu_syscore_shutdown(void *data)
{
ledtrig_cpu(CPU_LED_HALTED);
}
-static struct syscore_ops ledtrig_cpu_syscore_ops = {
+static const struct syscore_ops ledtrig_cpu_syscore_ops = {
.shutdown = ledtrig_cpu_syscore_shutdown,
.suspend = ledtrig_cpu_syscore_suspend,
.resume = ledtrig_cpu_syscore_resume,
};
+static struct syscore ledtrig_cpu_syscore = {
+ .ops = &ledtrig_cpu_syscore_ops,
+};
+
static int ledtrig_online_cpu(unsigned int cpu)
{
ledtrig_cpu(CPU_LED_START);
@@ -157,7 +161,7 @@ static int __init ledtrig_cpu_init(void)
led_trigger_register_simple(trig->name, &trig->_trig);
}
- register_syscore_ops(&ledtrig_cpu_syscore_ops);
+ register_syscore(&ledtrig_cpu_syscore);
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "leds/trigger:starting",
ledtrig_online_cpu, ledtrig_prepare_down_cpu);
diff --git a/drivers/leds/trigger/ledtrig-input-events.c b/drivers/leds/trigger/ledtrig-input-events.c
index 1c79731562c2..3c6414259c27 100644
--- a/drivers/leds/trigger/ledtrig-input-events.c
+++ b/drivers/leds/trigger/ledtrig-input-events.c
@@ -66,7 +66,7 @@ static void input_events_event(struct input_handle *handle, unsigned int type,
spin_unlock_irqrestore(&data->lock, flags);
- mod_delayed_work(system_wq, &data->work, led_off_delay);
+ mod_delayed_work(system_percpu_wq, &data->work, led_off_delay);
}
static int input_events_connect(struct input_handler *handler, struct input_dev *dev,
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 369d72f59b3c..06fd910b3fd1 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -187,13 +187,14 @@ static int mac_hid_toggle_emumouse(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
int *valp = table->data;
- int old_val = *valp;
+ int old_val;
int rc;
rc = mutex_lock_killable(&mac_hid_emumouse_mutex);
if (rc)
return rc;
+ old_val = *valp;
rc = proc_dointvec(table, write, buffer, lenp, ppos);
if (rc == 0 && write && *valp != old_val) {
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 26bd9ed5e664..d91825bb0a5c 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -11,6 +11,8 @@
#include <asm/ptrace.h>
#include <linux/adb.h>
#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/of.h>
#include <linux/pmu.h>
#include <asm/backlight.h>
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index b0f09c70f1ff..5fe47e784d43 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2600,7 +2600,7 @@ void pmu_blink(int n)
#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC32)
int pmu_sys_suspended;
-static int pmu_syscore_suspend(void)
+static int pmu_syscore_suspend(void *data)
{
/* Suspend PMU event interrupts */
pmu_suspend();
@@ -2614,7 +2614,7 @@ static int pmu_syscore_suspend(void)
return 0;
}
-static void pmu_syscore_resume(void)
+static void pmu_syscore_resume(void *data)
{
struct adb_request req;
@@ -2634,14 +2634,18 @@ static void pmu_syscore_resume(void)
pmu_sys_suspended = 0;
}
-static struct syscore_ops pmu_syscore_ops = {
+static const struct syscore_ops pmu_syscore_ops = {
.suspend = pmu_syscore_suspend,
.resume = pmu_syscore_resume,
};
+static struct syscore pmu_syscore = {
+ .ops = &pmu_syscore_ops,
+};
+
static int pmu_syscore_register(void)
{
- register_syscore_ops(&pmu_syscore_ops);
+ register_syscore(&pmu_syscore);
return 0;
}
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 02432d4a5ccd..29f16f220384 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -294,6 +294,16 @@ config MTK_CMDQ_MBOX
critical time limitation, such as updating display configuration
during the vblank.
+config MTK_GPUEB_MBOX
+ tristate "MediaTek GPUEB Mailbox Support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ The MediaTek GPUEB mailbox is used to communicate with the embedded
+ controller in charge of GPU frequency and power management on some
+ MediaTek SoCs, such as the MT8196.
+ Say Y or m here if you want to support the MT8196 SoC in your kernel
+ build.
+
config ZYNQMP_IPI_MBOX
tristate "Xilinx ZynqMP IPI Mailbox"
depends on ARCH_ZYNQMP && OF
@@ -369,4 +379,15 @@ config BCM74110_MAILBOX
processor and coprocessor that handles various power management task
and more.
+config RISCV_SBI_MPXY_MBOX
+ tristate "RISC-V SBI Message Proxy (MPXY) Mailbox"
+ depends on RISCV_SBI
+ default RISCV
+ help
+ Mailbox driver implementation for RISC-V SBI Message Proxy (MPXY)
+ extension. This mailbox driver is used to send messages to the
+ remote processor through the SBI implementation (M-mode firmware
+ or HS-mode hypervisor). Say Y here if you want to have this support.
+ If unsure say N.
+
endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 98a68f838486..81820a4f5528 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -63,6 +63,8 @@ obj-$(CONFIG_MTK_ADSP_MBOX) += mtk-adsp-mailbox.o
obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
+obj-$(CONFIG_MTK_GPUEB_MBOX) += mtk-gpueb-mailbox.o
+
obj-$(CONFIG_ZYNQMP_IPI_MBOX) += zynqmp-ipi-mailbox.o
obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
@@ -78,3 +80,5 @@ obj-$(CONFIG_THEAD_TH1520_MBOX) += mailbox-th1520.o
obj-$(CONFIG_CIX_MBOX) += cix-mailbox.o
obj-$(CONFIG_BCM74110_MAILBOX) += bcm74110-mailbox.o
+
+obj-$(CONFIG_RISCV_SBI_MPXY_MBOX) += riscv-sbi-mpxy-mbox.o
diff --git a/drivers/mailbox/arm_mhuv3.c b/drivers/mailbox/arm_mhuv3.c
index b97e79a5870f..0910da67f8a1 100644
--- a/drivers/mailbox/arm_mhuv3.c
+++ b/drivers/mailbox/arm_mhuv3.c
@@ -945,7 +945,7 @@ static irqreturn_t mhuv3_mbx_comb_interrupt(int irq, void *arg)
if (IS_ERR(data)) {
dev_err(dev,
"Failed to read in-band data. err:%ld\n",
- PTR_ERR(no_free_ptr(data)));
+ PTR_ERR(data));
goto rx_ack;
}
}
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index c9dd8c42c0cd..3a28ab5c42e5 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -268,7 +268,7 @@ static int mbox_test_add_debugfs(struct platform_device *pdev,
return 0;
tdev->root_debugfs_dir = debugfs_create_dir(dev_name(&pdev->dev), NULL);
- if (!tdev->root_debugfs_dir) {
+ if (IS_ERR(tdev->root_debugfs_dir)) {
dev_err(&pdev->dev, "Failed to create Mailbox debugfs\n");
return -EINVAL;
}
diff --git a/drivers/mailbox/mailbox-th1520.c b/drivers/mailbox/mailbox-th1520.c
index a6b2aa9ae952..626957c2e435 100644
--- a/drivers/mailbox/mailbox-th1520.c
+++ b/drivers/mailbox/mailbox-th1520.c
@@ -435,10 +435,8 @@ static int th1520_mbox_probe(struct platform_device *pdev)
}
ret = devm_add_action_or_reset(dev, th1520_disable_clk, priv);
- if (ret) {
- clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clocks), priv->clocks);
+ if (ret)
return ret;
- }
/*
* The address mappings in the device tree align precisely with those
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 5cd8ae222073..2acc6ec229a4 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/property.h>
#include <linux/spinlock.h>
#include "mailbox.h"
@@ -383,34 +384,56 @@ EXPORT_SYMBOL_GPL(mbox_bind_client);
*/
struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{
- struct device *dev = cl->dev;
+ struct fwnode_reference_args fwspec;
+ struct fwnode_handle *fwnode;
struct mbox_controller *mbox;
struct of_phandle_args spec;
struct mbox_chan *chan;
+ struct device *dev;
+ unsigned int i;
int ret;
- if (!dev || !dev->of_node) {
- pr_debug("%s: No owner device node\n", __func__);
+ dev = cl->dev;
+ if (!dev) {
+ pr_debug("No owner device\n");
return ERR_PTR(-ENODEV);
}
- ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
- index, &spec);
+ fwnode = dev_fwnode(dev);
+ if (!fwnode) {
+ dev_dbg(dev, "No owner fwnode\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ret = fwnode_property_get_reference_args(fwnode, "mboxes", "#mbox-cells",
+ 0, index, &fwspec);
if (ret) {
- dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__);
+ dev_err(dev, "%s: can't parse \"%s\" property\n", __func__, "mboxes");
return ERR_PTR(ret);
}
+ spec.np = to_of_node(fwspec.fwnode);
+ spec.args_count = fwspec.nargs;
+ for (i = 0; i < spec.args_count; i++)
+ spec.args[i] = fwspec.args[i];
+
scoped_guard(mutex, &con_mutex) {
chan = ERR_PTR(-EPROBE_DEFER);
- list_for_each_entry(mbox, &mbox_cons, node)
- if (mbox->dev->of_node == spec.np) {
- chan = mbox->of_xlate(mbox, &spec);
- if (!IS_ERR(chan))
- break;
+ list_for_each_entry(mbox, &mbox_cons, node) {
+ if (device_match_fwnode(mbox->dev, fwspec.fwnode)) {
+ if (mbox->fw_xlate) {
+ chan = mbox->fw_xlate(mbox, &fwspec);
+ if (!IS_ERR(chan))
+ break;
+ } else if (mbox->of_xlate) {
+ chan = mbox->of_xlate(mbox, &spec);
+ if (!IS_ERR(chan))
+ break;
+ }
}
+ }
- of_node_put(spec.np);
+ fwnode_handle_put(fwspec.fwnode);
if (IS_ERR(chan))
return chan;
@@ -427,15 +450,8 @@ EXPORT_SYMBOL_GPL(mbox_request_channel);
struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
const char *name)
{
- struct device_node *np = cl->dev->of_node;
- int index;
-
- if (!np) {
- dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
- return ERR_PTR(-EINVAL);
- }
+ int index = device_property_match_string(cl->dev, "mbox-names", name);
- index = of_property_match_string(np, "mbox-names", name);
if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name);
@@ -470,9 +486,8 @@ void mbox_free_channel(struct mbox_chan *chan)
}
EXPORT_SYMBOL_GPL(mbox_free_channel);
-static struct mbox_chan *
-of_mbox_index_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *sp)
+static struct mbox_chan *fw_mbox_index_xlate(struct mbox_controller *mbox,
+ const struct fwnode_reference_args *sp)
{
int ind = sp->args[0];
@@ -523,8 +538,8 @@ int mbox_controller_register(struct mbox_controller *mbox)
spin_lock_init(&chan->lock);
}
- if (!mbox->of_xlate)
- mbox->of_xlate = of_mbox_index_xlate;
+ if (!mbox->fw_xlate && !mbox->of_xlate)
+ mbox->fw_xlate = fw_mbox_index_xlate;
scoped_guard(mutex, &con_mutex)
list_add_tail(&mbox->node, &mbox_cons);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 532929916e99..5791f80f995a 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -92,6 +92,18 @@ struct gce_plat {
u32 gce_num;
};
+static inline u32 cmdq_convert_gce_addr(dma_addr_t addr, const struct gce_plat *pdata)
+{
+ /* Convert DMA addr (PA or IOVA) to GCE readable addr */
+ return addr >> pdata->shift;
+}
+
+static inline dma_addr_t cmdq_revert_gce_addr(u32 addr, const struct gce_plat *pdata)
+{
+ /* Revert GCE readable addr to DMA addr (PA or IOVA) */
+ return (dma_addr_t)addr << pdata->shift;
+}
+
u8 cmdq_get_shift_pa(struct mbox_chan *chan)
{
struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
@@ -188,13 +200,12 @@ static void cmdq_task_insert_into_thread(struct cmdq_task *task)
struct cmdq_task *prev_task = list_last_entry(
&thread->task_busy_list, typeof(*task), list_entry);
u64 *prev_task_base = prev_task->pkt->va_base;
+ u32 gce_addr = cmdq_convert_gce_addr(task->pa_base, task->cmdq->pdata);
/* let previous task jump to this task */
dma_sync_single_for_cpu(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
- prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
- (u64)CMDQ_JUMP_BY_PA << 32 |
- (task->pa_base >> task->cmdq->pdata->shift);
+ prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] = (u64)CMDQ_JUMP_BY_PA << 32 | gce_addr;
dma_sync_single_for_device(dev, prev_task->pa_base,
prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
@@ -237,7 +248,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
struct cmdq_thread *thread)
{
struct cmdq_task *task, *tmp, *curr_task = NULL;
- u32 curr_pa, irq_flag, task_end_pa;
+ u32 irq_flag, gce_addr;
+ dma_addr_t curr_pa, task_end_pa;
bool err;
irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
@@ -259,7 +271,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
else
return;
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->pdata->shift;
+ gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
list_entry) {
@@ -378,21 +391,15 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
struct cmdq_task *task;
- unsigned long curr_pa, end_pa;
- int ret;
+ u32 gce_addr;
+ dma_addr_t curr_pa, end_pa;
/* Client should not flush new tasks if suspended. */
WARN_ON(cmdq->suspended);
- ret = pm_runtime_get_sync(cmdq->mbox.dev);
- if (ret < 0)
- return ret;
-
task = kzalloc(sizeof(*task), GFP_ATOMIC);
- if (!task) {
- pm_runtime_put_autosuspend(cmdq->mbox.dev);
+ if (!task)
return -ENOMEM;
- }
task->cmdq = cmdq;
INIT_LIST_HEAD(&task->list_entry);
@@ -409,20 +416,20 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
*/
WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
- writel(task->pa_base >> cmdq->pdata->shift,
- thread->base + CMDQ_THR_CURR_ADDR);
- writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->pdata->shift,
- thread->base + CMDQ_THR_END_ADDR);
+ gce_addr = cmdq_convert_gce_addr(task->pa_base, cmdq->pdata);
+ writel(gce_addr, thread->base + CMDQ_THR_CURR_ADDR);
+ gce_addr = cmdq_convert_gce_addr(task->pa_base + pkt->cmd_buf_size, cmdq->pdata);
+ writel(gce_addr, thread->base + CMDQ_THR_END_ADDR);
writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
} else {
WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
- curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
- cmdq->pdata->shift;
- end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
- cmdq->pdata->shift;
+ gce_addr = readl(thread->base + CMDQ_THR_CURR_ADDR);
+ curr_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
+ gce_addr = readl(thread->base + CMDQ_THR_END_ADDR);
+ end_pa = cmdq_revert_gce_addr(gce_addr, cmdq->pdata);
/* check boundary */
if (curr_pa == end_pa - CMDQ_INST_SIZE ||
curr_pa == end_pa) {
@@ -439,9 +446,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
}
list_move_tail(&task->list_entry, &thread->task_busy_list);
- pm_runtime_mark_last_busy(cmdq->mbox.dev);
- pm_runtime_put_autosuspend(cmdq->mbox.dev);
-
return 0;
}
@@ -656,6 +660,9 @@ static int cmdq_probe(struct platform_device *pdev)
if (err)
return err;
+ dma_set_coherent_mask(dev,
+ DMA_BIT_MASK(sizeof(u32) * BITS_PER_BYTE + cmdq->pdata->shift));
+
cmdq->mbox.dev = dev;
cmdq->mbox.chans = devm_kcalloc(dev, cmdq->pdata->thread_nr,
sizeof(*cmdq->mbox.chans), GFP_KERNEL);
diff --git a/drivers/mailbox/mtk-gpueb-mailbox.c b/drivers/mailbox/mtk-gpueb-mailbox.c
new file mode 100644
index 000000000000..f6d2beccd91b
--- /dev/null
+++ b/drivers/mailbox/mtk-gpueb-mailbox.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MediaTek GPUEB mailbox driver for SoCs such as the MT8196
+ *
+ * Copyright (C) 2025, Collabora Ltd.
+ *
+ * Developers harmed in the making of this driver:
+ * - Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#define GPUEB_MBOX_CTL_TX_STS 0x00
+#define GPUEB_MBOX_CTL_IRQ_SET 0x04
+#define GPUEB_MBOX_CTL_IRQ_CLR 0x74
+#define GPUEB_MBOX_CTL_RX_STS 0x78
+
+#define GPUEB_MBOX_FULL BIT(0) /* i.e. we've received data */
+#define GPUEB_MBOX_BLOCKED BIT(1) /* i.e. the channel is shutdown */
+
+#define GPUEB_MBOX_MAX_RX_SIZE 32 /* in bytes */
+
+struct mtk_gpueb_mbox {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *mbox_mmio;
+ void __iomem *mbox_ctl;
+ struct mbox_controller mbox;
+ struct mtk_gpueb_mbox_chan *ch;
+ int irq;
+ const struct mtk_gpueb_mbox_variant *v;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan - per-channel runtime data
+ * @ebm: pointer to the parent &struct mtk_gpueb_mbox mailbox
+ * @full_name: descriptive name of channel for IRQ subsystem
+ * @num: channel number, starting at 0
+ * @rx_status: signifies whether channel reception is turned off, or full
+ * @c: pointer to the constant &struct mtk_gpueb_mbox_chan_desc channel data
+ */
+struct mtk_gpueb_mbox_chan {
+ struct mtk_gpueb_mbox *ebm;
+ char *full_name;
+ u8 num;
+ atomic_t rx_status;
+ const struct mtk_gpueb_mbox_chan_desc *c;
+};
+
+/**
+ * struct mtk_gpueb_mbox_chan_desc - per-channel constant data
+ * @name: name of this channel
+ * @num: index of this channel, starting at 0
+ * @tx_offset: byte offset measured from mmio base for outgoing data
+ * @tx_len: size, in bytes, of the outgoing data on this channel
+ * @rx_offset: bytes offset measured from mmio base for incoming data
+ * @rx_len: size, in bytes, of the incoming data on this channel
+ */
+struct mtk_gpueb_mbox_chan_desc {
+ const char *name;
+ const u8 num;
+ const u16 tx_offset;
+ const u8 tx_len;
+ const u16 rx_offset;
+ const u8 rx_len;
+};
+
+struct mtk_gpueb_mbox_variant {
+ const u8 num_channels;
+ const struct mtk_gpueb_mbox_chan_desc channels[] __counted_by(num_channels);
+};
+
+/**
+ * mtk_gpueb_mbox_read_rx - read RX buffer from MMIO into channel's RX buffer
+ * @buf: buffer to read into
+ * @chan: pointer to the channel to read
+ */
+static void mtk_gpueb_mbox_read_rx(void *buf, struct mtk_gpueb_mbox_chan *chan)
+{
+ memcpy_fromio(buf, chan->ebm->mbox_mmio + chan->c->rx_offset, chan->c->rx_len);
+}
+
+static irqreturn_t mtk_gpueb_mbox_isr(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ u32 rx_sts;
+
+ rx_sts = readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_RX_STS);
+
+ if (rx_sts & BIT(ch->num)) {
+ if (!atomic_cmpxchg(&ch->rx_status, 0, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED))
+ return IRQ_WAKE_THREAD;
+ }
+
+ return IRQ_NONE;
+}
+
+static irqreturn_t mtk_gpueb_mbox_thread(int irq, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = data;
+ int status;
+
+ status = atomic_cmpxchg(&ch->rx_status, GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED,
+ GPUEB_MBOX_FULL);
+ if (status == (GPUEB_MBOX_FULL | GPUEB_MBOX_BLOCKED)) {
+ u8 buf[GPUEB_MBOX_MAX_RX_SIZE] = {};
+
+ mtk_gpueb_mbox_read_rx(buf, ch);
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+ mbox_chan_received_data(&ch->ebm->mbox.chans[ch->num], buf);
+ atomic_set(&ch->rx_status, 0);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mtk_gpueb_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ u32 *values = data;
+ int i;
+
+ if (atomic_read(&ch->rx_status))
+ return -EBUSY;
+
+ /*
+ * We don't want any fancy nonsense, just write the 32-bit values in
+ * order. memcpy_toio/__iowrite32_copy don't work here, as they may use
+ * writes of different sizes or memory ordering characteristics depending
+ * on the architecture, alignment and the current phase of the moon.
+ */
+ for (i = 0; i < ch->c->tx_len; i += 4)
+ writel(values[i / 4], ch->ebm->mbox_mmio + ch->c->tx_offset + i);
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_SET);
+
+ return 0;
+}
+
+static int mtk_gpueb_mbox_startup(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+ int ret;
+
+ atomic_set(&ch->rx_status, 0);
+
+ ret = clk_enable(ch->ebm->clk);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to enable EB clock: %pe\n",
+ ERR_PTR(ret));
+ goto err_block;
+ }
+
+ writel(BIT(ch->num), ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_IRQ_CLR);
+
+ ret = devm_request_threaded_irq(ch->ebm->dev, ch->ebm->irq, mtk_gpueb_mbox_isr,
+ mtk_gpueb_mbox_thread, IRQF_SHARED | IRQF_ONESHOT,
+ ch->full_name, ch);
+ if (ret) {
+ dev_err(ch->ebm->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(ret));
+ goto err_unclk;
+ }
+
+ return 0;
+
+err_unclk:
+ clk_disable(ch->ebm->clk);
+err_block:
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ return ret;
+}
+
+static void mtk_gpueb_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+
+ devm_free_irq(ch->ebm->dev, ch->ebm->irq, ch);
+
+ clk_disable(ch->ebm->clk);
+}
+
+static bool mtk_gpueb_mbox_last_tx_done(struct mbox_chan *chan)
+{
+ struct mtk_gpueb_mbox_chan *ch = chan->con_priv;
+
+ return !(readl(ch->ebm->mbox_ctl + GPUEB_MBOX_CTL_TX_STS) & BIT(ch->num));
+}
+
+static const struct mbox_chan_ops mtk_gpueb_mbox_ops = {
+ .send_data = mtk_gpueb_mbox_send_data,
+ .startup = mtk_gpueb_mbox_startup,
+ .shutdown = mtk_gpueb_mbox_shutdown,
+ .last_tx_done = mtk_gpueb_mbox_last_tx_done,
+};
+
+static int mtk_gpueb_mbox_probe(struct platform_device *pdev)
+{
+ struct mtk_gpueb_mbox_chan *ch;
+ struct mtk_gpueb_mbox *ebm;
+ unsigned int i;
+
+ ebm = devm_kzalloc(&pdev->dev, sizeof(*ebm), GFP_KERNEL);
+ if (!ebm)
+ return -ENOMEM;
+
+ ebm->dev = &pdev->dev;
+ ebm->v = of_device_get_match_data(ebm->dev);
+
+ ebm->irq = platform_get_irq(pdev, 0);
+ if (ebm->irq < 0)
+ return ebm->irq;
+
+ ebm->clk = devm_clk_get_prepared(ebm->dev, NULL);
+ if (IS_ERR(ebm->clk))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->clk),
+ "Failed to get 'eb' clock\n");
+
+ ebm->mbox_mmio = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ebm->mbox_mmio))
+ return dev_err_probe(ebm->dev, PTR_ERR(ebm->mbox_mmio),
+ "Couldn't map mailbox data registers\n");
+
+ ebm->mbox_ctl = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ebm->mbox_ctl))
+ return dev_err_probe(
+ ebm->dev, PTR_ERR(ebm->mbox_ctl),
+ "Couldn't map mailbox control registers\n");
+
+ ebm->ch = devm_kmalloc_array(ebm->dev, ebm->v->num_channels,
+ sizeof(*ebm->ch), GFP_KERNEL);
+ if (!ebm->ch)
+ return -ENOMEM;
+
+ ebm->mbox.chans = devm_kcalloc(ebm->dev, ebm->v->num_channels,
+ sizeof(struct mbox_chan), GFP_KERNEL);
+ if (!ebm->mbox.chans)
+ return -ENOMEM;
+
+ for (i = 0; i < ebm->v->num_channels; i++) {
+ ch = &ebm->ch[i];
+ ch->c = &ebm->v->channels[i];
+ if (ch->c->rx_len > GPUEB_MBOX_MAX_RX_SIZE) {
+ dev_err(ebm->dev, "Channel %s RX size (%d) too large\n",
+ ch->c->name, ch->c->rx_len);
+ return -EINVAL;
+ }
+ ch->full_name = devm_kasprintf(ebm->dev, GFP_KERNEL, "%s:%s",
+ dev_name(ebm->dev), ch->c->name);
+ if (!ch->full_name)
+ return -ENOMEM;
+
+ ch->ebm = ebm;
+ ch->num = i;
+ spin_lock_init(&ebm->mbox.chans[i].lock);
+ ebm->mbox.chans[i].con_priv = ch;
+ atomic_set(&ch->rx_status, GPUEB_MBOX_BLOCKED);
+ }
+
+ ebm->mbox.dev = ebm->dev;
+ ebm->mbox.num_chans = ebm->v->num_channels;
+ ebm->mbox.txdone_poll = true;
+ ebm->mbox.txpoll_period = 0; /* minimum hrtimer interval */
+ ebm->mbox.ops = &mtk_gpueb_mbox_ops;
+
+ dev_set_drvdata(ebm->dev, ebm);
+
+ return devm_mbox_controller_register(ebm->dev, &ebm->mbox);
+}
+
+static const struct mtk_gpueb_mbox_variant mtk_gpueb_mbox_mt8196 = {
+ .num_channels = 12,
+ .channels = {
+ { "fast-dvfs-event", 0, 0x0000, 16, 0x00e0, 16 },
+ { "gpufreq", 1, 0x0010, 32, 0x00f0, 32 },
+ { "sleep", 2, 0x0030, 12, 0x0110, 4 },
+ { "timer", 3, 0x003c, 24, 0x0114, 4 },
+ { "fhctl", 4, 0x0054, 36, 0x0118, 4 },
+ { "ccf", 5, 0x0078, 16, 0x011c, 16 },
+ { "gpumpu", 6, 0x0088, 24, 0x012c, 4 },
+ { "fast-dvfs", 7, 0x00a0, 24, 0x0130, 24 },
+ { "ipir-c-met", 8, 0x00b8, 4, 0x0148, 16 },
+ { "ipis-c-met", 9, 0x00bc, 16, 0x0158, 4 },
+ { "brisket", 10, 0x00cc, 16, 0x015c, 16 },
+ { "ppb", 11, 0x00dc, 4, 0x016c, 4 },
+ },
+};
+
+static const struct of_device_id mtk_gpueb_mbox_of_ids[] = {
+ { .compatible = "mediatek,mt8196-gpueb-mbox", .data = &mtk_gpueb_mbox_mt8196 },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mtk_gpueb_mbox_of_ids);
+
+static struct platform_driver mtk_gpueb_mbox_drv = {
+ .probe = mtk_gpueb_mbox_probe,
+ .driver = {
+ .name = "mtk-gpueb-mbox",
+ .of_match_table = mtk_gpueb_mbox_of_ids,
+ }
+};
+module_platform_driver(mtk_gpueb_mbox_drv);
+
+MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
+MODULE_DESCRIPTION("MediaTek GPUEB mailbox driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 680243751d62..17fe6545875d 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -68,6 +68,7 @@ struct omap_mbox_fifo {
struct omap_mbox_match_data {
u32 intr_type;
+ bool is_exclusive;
};
struct omap_mbox_device {
@@ -78,6 +79,7 @@ struct omap_mbox_device {
u32 num_users;
u32 num_fifos;
u32 intr_type;
+ const struct omap_mbox_match_data *mbox_data;
};
struct omap_mbox {
@@ -341,11 +343,13 @@ static int omap_mbox_suspend(struct device *dev)
if (pm_runtime_status_suspended(dev))
return 0;
- for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
- if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
- dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
- fifo);
- return -EBUSY;
+ if (mdev->mbox_data->is_exclusive) {
+ for (fifo = 0; fifo < mdev->num_fifos; fifo++) {
+ if (mbox_read_reg(mdev, MAILBOX_MSGSTATUS(fifo))) {
+ dev_err(mdev->dev, "fifo %d has unexpected unread messages\n",
+ fifo);
+ return -EBUSY;
+ }
}
}
@@ -378,8 +382,9 @@ static const struct dev_pm_ops omap_mbox_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
};
-static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
-static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
+static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1, true };
+static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2, true };
+static const struct omap_mbox_match_data am654_data = { MBOX_INTR_CFG_TYPE2, false };
static const struct of_device_id omap_mailbox_of_match[] = {
{
@@ -396,11 +401,11 @@ static const struct of_device_id omap_mailbox_of_match[] = {
},
{
.compatible = "ti,am654-mailbox",
- .data = &omap4_data,
+ .data = &am654_data,
},
{
.compatible = "ti,am64-mailbox",
- .data = &omap4_data,
+ .data = &am654_data,
},
{
/* end */
@@ -449,7 +454,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
struct omap_mbox_fifo *fifo;
struct device_node *node = pdev->dev.of_node;
struct device_node *child;
- const struct omap_mbox_match_data *match_data;
struct mbox_controller *controller;
u32 intr_type, info_count;
u32 num_users, num_fifos;
@@ -462,11 +466,6 @@ static int omap_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- match_data = of_device_get_match_data(&pdev->dev);
- if (!match_data)
- return -ENODEV;
- intr_type = match_data->intr_type;
-
if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
return -ENODEV;
@@ -483,6 +482,12 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (!mdev)
return -ENOMEM;
+ mdev->mbox_data = device_get_match_data(&pdev->dev);
+ if (!mdev->mbox_data)
+ return -ENODEV;
+
+ intr_type = mdev->mbox_data->intr_type;
+
mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0a00719b2482..ff292b9e0be9 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -276,9 +276,8 @@ static int pcc_mbox_error_check_and_clear(struct pcc_chan_info *pchan)
if (ret)
return ret;
- val &= pchan->error.status_mask;
- if (val) {
- val &= ~pchan->error.status_mask;
+ if (val & pchan->error.status_mask) {
+ val &= pchan->error.preserve_mask;
pcc_chan_reg_write(&pchan->error, val);
return -EIO;
}
@@ -745,7 +744,8 @@ static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
ret = pcc_chan_reg_init(&pchan->error,
&pcct_ext->error_status_register,
- 0, 0, pcct_ext->error_status_mask,
+ ~pcct_ext->error_status_mask, 0,
+ pcct_ext->error_status_mask,
"Error Status");
}
return ret;
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 8b24ec0fa191..d3a8f6b4a03b 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -58,7 +58,6 @@ static const struct regmap_config apcs_regmap_config = {
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x1008,
- .fast_io = true,
};
static int qcom_apcs_ipc_send_data(struct mbox_chan *chan, void *data)
diff --git a/drivers/mailbox/riscv-sbi-mpxy-mbox.c b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
new file mode 100644
index 000000000000..7c9c006b7244
--- /dev/null
+++ b/drivers/mailbox/riscv-sbi-mpxy-mbox.c
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * RISC-V SBI Message Proxy (MPXY) mailbox controller driver
+ *
+ * Copyright (C) 2025 Ventana Micro Systems Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/irqchip/riscv-imsic.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/riscv-rpmi-message.h>
+#include <linux/minmax.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/sbi.h>
+
+/* ====== SBI MPXY extension data structures ====== */
+
+/* SBI MPXY MSI related channel attributes */
+struct sbi_mpxy_msi_info {
+ /* Lower 32-bits of the MSI target address */
+ u32 msi_addr_lo;
+ /* Upper 32-bits of the MSI target address */
+ u32 msi_addr_hi;
+ /* MSI data value */
+ u32 msi_data;
+};
+
+/*
+ * SBI MPXY standard channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_attribute_id).
+ */
+struct sbi_mpxy_channel_attrs {
+ /* Message protocol ID */
+ u32 msg_proto_id;
+ /* Message protocol version */
+ u32 msg_proto_version;
+ /* Message protocol maximum message length */
+ u32 msg_max_len;
+ /* Message protocol message send timeout in microseconds */
+ u32 msg_send_timeout;
+ /* Message protocol message completion timeout in microseconds */
+ u32 msg_completion_timeout;
+ /* Bit array for channel capabilities */
+ u32 capability;
+ /* SSE event ID */
+ u32 sse_event_id;
+ /* MSI enable/disable control knob */
+ u32 msi_control;
+ /* Channel MSI info */
+ struct sbi_mpxy_msi_info msi_info;
+ /* Events state control */
+ u32 events_state_ctrl;
+};
+
+/*
+ * RPMI specific SBI MPXY channel attributes.
+ *
+ * NOTE: The sequence of attribute fields are as-per the
+ * defined sequence in the attribute table in spec (or
+ * as-per the enum sbi_mpxy_rpmi_attribute_id).
+ */
+struct sbi_mpxy_rpmi_channel_attrs {
+ /* RPMI service group ID */
+ u32 servicegroup_id;
+ /* RPMI service group version */
+ u32 servicegroup_version;
+ /* RPMI implementation ID */
+ u32 impl_id;
+ /* RPMI implementation version */
+ u32 impl_version;
+};
+
+/* SBI MPXY channel IDs data in shared memory */
+struct sbi_mpxy_channel_ids_data {
+ /* Remaining number of channel ids */
+ __le32 remaining;
+ /* Returned channel ids in current function call */
+ __le32 returned;
+ /* Returned channel id array */
+ __le32 channel_array[];
+};
+
+/* SBI MPXY notification data in shared memory */
+struct sbi_mpxy_notification_data {
+ /* Remaining number of notification events */
+ __le32 remaining;
+ /* Number of notification events returned */
+ __le32 returned;
+ /* Number of notification events lost */
+ __le32 lost;
+ /* Reserved for future use */
+ __le32 reserved;
+ /* Returned channel id array */
+ u8 events_data[];
+};
+
+/* ====== MPXY data structures & helper routines ====== */
+
+/* MPXY Per-CPU or local context */
+struct mpxy_local {
+ /* Shared memory base address */
+ void *shmem;
+ /* Shared memory physical address */
+ phys_addr_t shmem_phys_addr;
+ /* Flag representing whether shared memory is active or not */
+ bool shmem_active;
+};
+
+static DEFINE_PER_CPU(struct mpxy_local, mpxy_local);
+static unsigned long mpxy_shmem_size;
+static bool mpxy_shmem_init_done;
+
+static int mpxy_get_channel_count(u32 *channel_count)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Get the remaining and returned fields to calculate total */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+ *channel_count = remaining + returned;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_channel_ids(u32 channel_count, u32 *channel_ids)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbi_mpxy_channel_ids_data *sdata = mpxy->shmem;
+ u32 remaining, returned, count, start_index = 0;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!channel_count || !channel_ids)
+ return -EINVAL;
+
+ get_cpu();
+
+ do {
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_CHANNEL_IDS,
+ start_index, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ remaining = le32_to_cpu(sdata->remaining);
+ returned = le32_to_cpu(sdata->returned);
+
+ count = returned < (channel_count - start_index) ?
+ returned : (channel_count - start_index);
+ memcpy_from_le32(&channel_ids[start_index], sdata->channel_array, count);
+ start_index += count;
+ } while (remaining && start_index < channel_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_read_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_READ_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy_from_le32(attrs_buf, (__le32 *)mpxy->shmem, attr_count);
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_write_attrs(u32 channel_id, u32 base_attrid, u32 attr_count,
+ u32 *attrs_buf)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!attr_count || !attrs_buf)
+ return -EINVAL;
+
+ get_cpu();
+
+ memcpy_to_le32((__le32 *)mpxy->shmem, attrs_buf, attr_count);
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_WRITE_ATTRS,
+ channel_id, base_attrid, attr_count, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_with_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len,
+ void *rx, unsigned long max_rx_len,
+ unsigned long *rx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ unsigned long rx_bytes;
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITH_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+ if (rx && !sret.error) {
+ rx_bytes = sret.value;
+ if (rx_bytes > max_rx_len) {
+ put_cpu();
+ return -ENOSPC;
+ }
+
+ memcpy(rx, mpxy->shmem, rx_bytes);
+ if (rx_len)
+ *rx_len = rx_bytes;
+ }
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_send_message_without_resp(u32 channel_id, u32 msg_id,
+ void *tx, unsigned long tx_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!tx && tx_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ /* Message protocols allowed to have no data in messages */
+ if (tx_len)
+ memcpy(mpxy->shmem, tx, tx_len);
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SEND_MSG_WITHOUT_RESP,
+ channel_id, msg_id, tx_len, 0, 0, 0);
+
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_notifications(u32 channel_id,
+ struct sbi_mpxy_notification_data *notif_data,
+ unsigned long *events_data_len)
+{
+ struct mpxy_local *mpxy = this_cpu_ptr(&mpxy_local);
+ struct sbiret sret;
+
+ if (!mpxy->shmem_active)
+ return -ENODEV;
+ if (!notif_data || !events_data_len)
+ return -EINVAL;
+
+ get_cpu();
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_NOTIFICATION_EVENTS,
+ channel_id, 0, 0, 0, 0, 0);
+ if (sret.error)
+ goto err_put_cpu;
+
+ memcpy(notif_data, mpxy->shmem, sret.value + 16);
+ *events_data_len = sret.value;
+
+err_put_cpu:
+ put_cpu();
+ return sbi_err_map_linux_errno(sret.error);
+}
+
+static int mpxy_get_shmem_size(unsigned long *shmem_size)
+{
+ struct sbiret sret;
+
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_GET_SHMEM_SIZE,
+ 0, 0, 0, 0, 0, 0);
+ if (sret.error)
+ return sbi_err_map_linux_errno(sret.error);
+ if (shmem_size)
+ *shmem_size = sret.value;
+ return 0;
+}
+
+static int mpxy_setup_shmem(unsigned int cpu)
+{
+ struct page *shmem_page;
+ struct mpxy_local *mpxy;
+ struct sbiret sret;
+
+ mpxy = per_cpu_ptr(&mpxy_local, cpu);
+ if (mpxy->shmem_active)
+ return 0;
+
+ shmem_page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(mpxy_shmem_size));
+ if (!shmem_page)
+ return -ENOMEM;
+
+ /*
+ * Linux setup of shmem is done in mpxy OVERWRITE mode.
+ * flags[1:0] = 00b
+ */
+ sret = sbi_ecall(SBI_EXT_MPXY, SBI_EXT_MPXY_SET_SHMEM,
+ page_to_phys(shmem_page), 0, 0, 0, 0, 0);
+ if (sret.error) {
+ free_pages((unsigned long)page_to_virt(shmem_page),
+ get_order(mpxy_shmem_size));
+ return sbi_err_map_linux_errno(sret.error);
+ }
+
+ mpxy->shmem = page_to_virt(shmem_page);
+ mpxy->shmem_phys_addr = page_to_phys(shmem_page);
+ mpxy->shmem_active = true;
+
+ return 0;
+}
+
+/* ====== MPXY mailbox data structures ====== */
+
+/* MPXY mailbox channel */
+struct mpxy_mbox_channel {
+ struct mpxy_mbox *mbox;
+ u32 channel_id;
+ struct sbi_mpxy_channel_attrs attrs;
+ struct sbi_mpxy_rpmi_channel_attrs rpmi_attrs;
+ struct sbi_mpxy_notification_data *notif;
+ u32 max_xfer_len;
+ bool have_events_state;
+ u32 msi_index;
+ u32 msi_irq;
+ bool started;
+};
+
+/* MPXY mailbox */
+struct mpxy_mbox {
+ struct device *dev;
+ u32 channel_count;
+ struct mpxy_mbox_channel *channels;
+ u32 msi_count;
+ struct mpxy_mbox_channel **msi_index_to_channel;
+ struct mbox_controller controller;
+};
+
+/* ====== MPXY RPMI processing ====== */
+
+static void mpxy_mbox_send_rpmi_data(struct mpxy_mbox_channel *mchan,
+ struct rpmi_mbox_message *msg)
+{
+ msg->error = 0;
+ switch (msg->type) {
+ case RPMI_MBOX_MSG_TYPE_GET_ATTRIBUTE:
+ switch (msg->attr.id) {
+ case RPMI_MBOX_ATTR_SPEC_VERSION:
+ msg->attr.value = mchan->attrs.msg_proto_version;
+ break;
+ case RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE:
+ msg->attr.value = mchan->max_xfer_len;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_ID:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_id;
+ break;
+ case RPMI_MBOX_ATTR_SERVICEGROUP_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.servicegroup_version;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_ID:
+ msg->attr.value = mchan->rpmi_attrs.impl_id;
+ break;
+ case RPMI_MBOX_ATTR_IMPL_VERSION:
+ msg->attr.value = mchan->rpmi_attrs.impl_version;
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+ break;
+ case RPMI_MBOX_MSG_TYPE_SET_ATTRIBUTE:
+ /* None of the RPMI linux mailbox attributes are writeable */
+ msg->error = -EOPNOTSUPP;
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITH_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len) ||
+ (!msg->data.response && msg->data.max_response_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITH_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_with_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len,
+ msg->data.response,
+ msg->data.max_response_len,
+ &msg->data.out_response_len);
+ break;
+ case RPMI_MBOX_MSG_TYPE_SEND_WITHOUT_RESPONSE:
+ if ((!msg->data.request && msg->data.request_len) ||
+ (msg->data.request && msg->data.request_len > mchan->max_xfer_len)) {
+ msg->error = -EINVAL;
+ break;
+ }
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_SEND_WITHOUT_RESP)) {
+ msg->error = -EIO;
+ break;
+ }
+ msg->error = mpxy_send_message_without_resp(mchan->channel_id,
+ msg->data.service_id,
+ msg->data.request,
+ msg->data.request_len);
+ break;
+ default:
+ msg->error = -EOPNOTSUPP;
+ break;
+ }
+}
+
+static void mpxy_mbox_peek_rpmi_data(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan,
+ struct sbi_mpxy_notification_data *notif,
+ unsigned long events_data_len)
+{
+ struct rpmi_notification_event *event;
+ struct rpmi_mbox_message msg;
+ unsigned long pos = 0;
+
+ while (pos < events_data_len && (events_data_len - pos) <= sizeof(*event)) {
+ event = (struct rpmi_notification_event *)(notif->events_data + pos);
+
+ msg.type = RPMI_MBOX_MSG_TYPE_NOTIFICATION_EVENT;
+ msg.notif.event_datalen = le16_to_cpu(event->event_datalen);
+ msg.notif.event_id = event->event_id;
+ msg.notif.event_data = event->event_data;
+ msg.error = 0;
+
+ mbox_chan_received_data(chan, &msg);
+ pos += sizeof(*event) + msg.notif.event_datalen;
+ }
+}
+
+static int mpxy_mbox_read_rpmi_attrs(struct mpxy_mbox_channel *mchan)
+{
+ return mpxy_read_attrs(mchan->channel_id,
+ SBI_MPXY_ATTR_MSGPROTO_ATTR_START,
+ sizeof(mchan->rpmi_attrs) / sizeof(u32),
+ (u32 *)&mchan->rpmi_attrs);
+}
+
+/* ====== MPXY mailbox callbacks ====== */
+
+static int mpxy_mbox_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ mpxy_mbox_send_rpmi_data(mchan, data);
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool mpxy_mbox_peek_data(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ struct sbi_mpxy_notification_data *notif = mchan->notif;
+ bool have_notifications = false;
+ unsigned long data_len;
+ int rc;
+
+ if (!(mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS))
+ return false;
+
+ do {
+ rc = mpxy_get_notifications(mchan->channel_id, notif, &data_len);
+ if (rc || !data_len)
+ break;
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID)
+ mpxy_mbox_peek_rpmi_data(chan, mchan, notif, data_len);
+
+ have_notifications = true;
+ } while (1);
+
+ return have_notifications;
+}
+
+static irqreturn_t mpxy_mbox_irq_thread(int irq, void *dev_id)
+{
+ mpxy_mbox_peek_data(dev_id);
+ return IRQ_HANDLED;
+}
+
+static int mpxy_mbox_setup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return 0;
+
+ /* Fail if MSI already enabled */
+ if (mchan->attrs.msi_control)
+ return -EALREADY;
+
+ /* Request channel MSI handler */
+ rc = request_threaded_irq(mchan->msi_irq, NULL, mpxy_mbox_irq_thread,
+ 0, dev_name(dev), chan);
+ if (rc) {
+ dev_err(dev, "failed to request MPXY channel 0x%x IRQ\n",
+ mchan->channel_id);
+ return rc;
+ }
+
+ /* Enable channel MSI control */
+ mchan->attrs.msi_control = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "enable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.msi_control = 0;
+ free_irq(mchan->msi_irq, chan);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_msi(struct mbox_chan *chan,
+ struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if MSI not supported */
+ if (mchan->msi_irq == U32_MAX)
+ return;
+
+ /* Do nothing if MSI already disabled */
+ if (!mchan->attrs.msi_control)
+ return;
+
+ /* Disable channel MSI control */
+ mchan->attrs.msi_control = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_CONTROL,
+ 1, &mchan->attrs.msi_control);
+ if (rc) {
+ dev_err(dev, "disable MSI control failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+
+ /* Free channel MSI handler */
+ free_irq(mchan->msi_irq, chan);
+}
+
+static int mpxy_mbox_setup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return 0;
+
+ /* Fail if events state already enabled */
+ if (mchan->attrs.events_state_ctrl)
+ return -EALREADY;
+
+ /* Enable channel events state */
+ mchan->attrs.events_state_ctrl = 1;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc) {
+ dev_err(dev, "enable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ mchan->attrs.events_state_ctrl = 0;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void mpxy_mbox_cleanup_events(struct mpxy_mbox_channel *mchan)
+{
+ struct device *dev = mchan->mbox->dev;
+ int rc;
+
+ /* Do nothing if events state not supported */
+ if (!mchan->have_events_state)
+ return;
+
+ /* Do nothing if events state already disabled */
+ if (!mchan->attrs.events_state_ctrl)
+ return;
+
+ /* Disable channel events state */
+ mchan->attrs.events_state_ctrl = 0;
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_EVENTS_STATE_CONTROL,
+ 1, &mchan->attrs.events_state_ctrl);
+ if (rc)
+ dev_err(dev, "disable events state failed for MPXY channel 0x%x\n",
+ mchan->channel_id);
+}
+
+static int mpxy_mbox_startup(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+ int rc;
+
+ if (mchan->started)
+ return -EALREADY;
+
+ /* Setup channel MSI */
+ rc = mpxy_mbox_setup_msi(chan, mchan);
+ if (rc)
+ return rc;
+
+ /* Setup channel notification events */
+ rc = mpxy_mbox_setup_events(mchan);
+ if (rc) {
+ mpxy_mbox_cleanup_msi(chan, mchan);
+ return rc;
+ }
+
+ /* Mark the channel as started */
+ mchan->started = true;
+
+ return 0;
+}
+
+static void mpxy_mbox_shutdown(struct mbox_chan *chan)
+{
+ struct mpxy_mbox_channel *mchan = chan->con_priv;
+
+ if (!mchan->started)
+ return;
+
+ /* Mark the channel as stopped */
+ mchan->started = false;
+
+ /* Cleanup channel notification events */
+ mpxy_mbox_cleanup_events(mchan);
+
+ /* Cleanup channel MSI */
+ mpxy_mbox_cleanup_msi(chan, mchan);
+}
+
+static const struct mbox_chan_ops mpxy_mbox_ops = {
+ .send_data = mpxy_mbox_send_data,
+ .peek_data = mpxy_mbox_peek_data,
+ .startup = mpxy_mbox_startup,
+ .shutdown = mpxy_mbox_shutdown,
+};
+
+/* ====== MPXY platform driver ===== */
+
+static void mpxy_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct mpxy_mbox *mbox = dev_get_drvdata(dev);
+ struct mpxy_mbox_channel *mchan;
+ struct sbi_mpxy_msi_info *minfo;
+ int rc;
+
+ mchan = mbox->msi_index_to_channel[desc->msi_index];
+ if (!mchan) {
+ dev_warn(dev, "MPXY channel not available for MSI index %d\n",
+ desc->msi_index);
+ return;
+ }
+
+ minfo = &mchan->attrs.msi_info;
+ minfo->msi_addr_lo = msg->address_lo;
+ minfo->msi_addr_hi = msg->address_hi;
+ minfo->msi_data = msg->data;
+
+ rc = mpxy_write_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSI_ADDR_LO,
+ sizeof(*minfo) / sizeof(u32), (u32 *)minfo);
+ if (rc) {
+ dev_warn(dev, "failed to write MSI info for MPXY channel 0x%x\n",
+ mchan->channel_id);
+ }
+}
+
+static struct mbox_chan *mpxy_mbox_fw_xlate(struct mbox_controller *ctlr,
+ const struct fwnode_reference_args *pa)
+{
+ struct mpxy_mbox *mbox = container_of(ctlr, struct mpxy_mbox, controller);
+ struct mpxy_mbox_channel *mchan;
+ u32 i;
+
+ if (pa->nargs != 2)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->channel_id == pa->args[0] &&
+ mchan->attrs.msg_proto_id == pa->args[1])
+ return &mbox->controller.chans[i];
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static int mpxy_mbox_populate_channels(struct mpxy_mbox *mbox)
+{
+ u32 i, *channel_ids __free(kfree) = NULL;
+ struct mpxy_mbox_channel *mchan;
+ int rc;
+
+ /* Find-out of number of channels */
+ rc = mpxy_get_channel_count(&mbox->channel_count);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get number of MPXY channels\n");
+ if (!mbox->channel_count)
+ return dev_err_probe(mbox->dev, -ENODEV, "no MPXY channels available\n");
+
+ /* Allocate and fetch all channel IDs */
+ channel_ids = kcalloc(mbox->channel_count, sizeof(*channel_ids), GFP_KERNEL);
+ if (!channel_ids)
+ return -ENOMEM;
+ rc = mpxy_get_channel_ids(mbox->channel_count, channel_ids);
+ if (rc)
+ return dev_err_probe(mbox->dev, rc, "failed to get MPXY channel IDs\n");
+
+ /* Populate all channels */
+ mbox->channels = devm_kcalloc(mbox->dev, mbox->channel_count,
+ sizeof(*mbox->channels), GFP_KERNEL);
+ if (!mbox->channels)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ mchan->mbox = mbox;
+ mchan->channel_id = channel_ids[i];
+
+ rc = mpxy_read_attrs(mchan->channel_id, SBI_MPXY_ATTR_MSG_PROT_ID,
+ sizeof(mchan->attrs) / sizeof(u32),
+ (u32 *)&mchan->attrs);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read attrs failed\n",
+ mchan->channel_id);
+ }
+
+ if (mchan->attrs.msg_proto_id == SBI_MPXY_MSGPROTO_RPMI_ID) {
+ rc = mpxy_mbox_read_rpmi_attrs(mchan);
+ if (rc) {
+ return dev_err_probe(mbox->dev, rc,
+ "MPXY channel 0x%x read RPMI attrs failed\n",
+ mchan->channel_id);
+ }
+ }
+
+ mchan->notif = devm_kzalloc(mbox->dev, mpxy_shmem_size, GFP_KERNEL);
+ if (!mchan->notif)
+ return -ENOMEM;
+
+ mchan->max_xfer_len = min(mpxy_shmem_size, mchan->attrs.msg_max_len);
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_EVENTS_STATE))
+ mchan->have_events_state = true;
+
+ if ((mchan->attrs.capability & SBI_MPXY_CHAN_CAP_GET_NOTIFICATIONS) &&
+ (mchan->attrs.capability & SBI_MPXY_CHAN_CAP_MSI))
+ mchan->msi_index = mbox->msi_count++;
+ else
+ mchan->msi_index = U32_MAX;
+ mchan->msi_irq = U32_MAX;
+ }
+
+ return 0;
+}
+
+static int mpxy_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mpxy_mbox_channel *mchan;
+ struct mpxy_mbox *mbox;
+ int msi_idx, rc;
+ u32 i;
+
+ /*
+ * Initialize MPXY shared memory only once. This also ensures
+ * that SBI MPXY mailbox is probed only once.
+ */
+ if (mpxy_shmem_init_done) {
+ dev_err(dev, "SBI MPXY mailbox already initialized\n");
+ return -EALREADY;
+ }
+
+ /* Probe for SBI MPXY extension */
+ if (sbi_spec_version < sbi_mk_version(1, 0) ||
+ sbi_probe_extension(SBI_EXT_MPXY) <= 0) {
+ dev_info(dev, "SBI MPXY extension not available\n");
+ return -ENODEV;
+ }
+
+ /* Find-out shared memory size */
+ rc = mpxy_get_shmem_size(&mpxy_shmem_size);
+ if (rc)
+ return dev_err_probe(dev, rc, "failed to get MPXY shared memory size\n");
+
+ /*
+ * Setup MPXY shared memory on each CPU
+ *
+ * Note: Don't cleanup MPXY shared memory upon CPU power-down
+ * because the RPMI System MSI irqchip driver needs it to be
+ * available when migrating IRQs in CPU power-down path.
+ */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/sbi-mpxy-shmem",
+ mpxy_setup_shmem, NULL);
+
+ /* Mark as MPXY shared memory initialization done */
+ mpxy_shmem_init_done = true;
+
+ /* Allocate mailbox instance */
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ mbox->dev = dev;
+ platform_set_drvdata(pdev, mbox);
+
+ /* Populate mailbox channels */
+ rc = mpxy_mbox_populate_channels(mbox);
+ if (rc)
+ return rc;
+
+ /* Initialize mailbox controller */
+ mbox->controller.txdone_irq = false;
+ mbox->controller.txdone_poll = false;
+ mbox->controller.ops = &mpxy_mbox_ops;
+ mbox->controller.dev = dev;
+ mbox->controller.num_chans = mbox->channel_count;
+ mbox->controller.fw_xlate = mpxy_mbox_fw_xlate;
+ mbox->controller.chans = devm_kcalloc(dev, mbox->channel_count,
+ sizeof(*mbox->controller.chans),
+ GFP_KERNEL);
+ if (!mbox->controller.chans)
+ return -ENOMEM;
+ for (i = 0; i < mbox->channel_count; i++)
+ mbox->controller.chans[i].con_priv = &mbox->channels[i];
+
+ /* Setup MSIs for mailbox (if required) */
+ if (mbox->msi_count) {
+ /*
+ * The device MSI domain for platform devices on RISC-V architecture
+ * is only available after the MSI controller driver is probed so,
+ * explicitly configure here.
+ */
+ if (!dev_get_msi_domain(dev)) {
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+
+ /*
+ * The device MSI domain for OF devices is only set at the
+ * time of populating/creating OF device. If the device MSI
+ * domain is discovered later after the OF device is created
+ * then we need to set it explicitly before using any platform
+ * MSI functions.
+ */
+ if (is_of_node(fwnode)) {
+ of_msi_configure(dev, dev_of_node(dev));
+ } else if (is_acpi_device_node(fwnode)) {
+ struct irq_domain *msi_domain;
+
+ msi_domain = irq_find_matching_fwnode(imsic_acpi_get_fwnode(dev),
+ DOMAIN_BUS_PLATFORM_MSI);
+ dev_set_msi_domain(dev, msi_domain);
+ }
+
+ if (!dev_get_msi_domain(dev))
+ return -EPROBE_DEFER;
+ }
+
+ mbox->msi_index_to_channel = devm_kcalloc(dev, mbox->msi_count,
+ sizeof(*mbox->msi_index_to_channel),
+ GFP_KERNEL);
+ if (!mbox->msi_index_to_channel)
+ return -ENOMEM;
+
+ for (msi_idx = 0; msi_idx < mbox->msi_count; msi_idx++) {
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == msi_idx) {
+ mbox->msi_index_to_channel[msi_idx] = mchan;
+ break;
+ }
+ }
+ }
+
+ rc = platform_device_msi_init_and_alloc_irqs(dev, mbox->msi_count,
+ mpxy_mbox_msi_write);
+ if (rc) {
+ return dev_err_probe(dev, rc, "Failed to allocate %d MSIs\n",
+ mbox->msi_count);
+ }
+
+ for (i = 0; i < mbox->channel_count; i++) {
+ mchan = &mbox->channels[i];
+ if (mchan->msi_index == U32_MAX)
+ continue;
+ mchan->msi_irq = msi_get_virq(dev, mchan->msi_index);
+ }
+ }
+
+ /* Register mailbox controller */
+ rc = devm_mbox_controller_register(dev, &mbox->controller);
+ if (rc) {
+ dev_err_probe(dev, rc, "Registering SBI MPXY mailbox failed\n");
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(dev);
+ return rc;
+ }
+
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev)
+ acpi_dev_clear_dependencies(adev);
+#endif
+
+ dev_info(dev, "mailbox registered with %d channels\n",
+ mbox->channel_count);
+ return 0;
+}
+
+static void mpxy_mbox_remove(struct platform_device *pdev)
+{
+ struct mpxy_mbox *mbox = platform_get_drvdata(pdev);
+
+ if (mbox->msi_count)
+ platform_device_msi_free_irqs_all(mbox->dev);
+}
+
+static const struct of_device_id mpxy_mbox_of_match[] = {
+ { .compatible = "riscv,sbi-mpxy-mbox" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpxy_mbox_of_match);
+
+static const struct acpi_device_id mpxy_mbox_acpi_match[] = {
+ { "RSCV0005" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, mpxy_mbox_acpi_match);
+
+static struct platform_driver mpxy_mbox_driver = {
+ .driver = {
+ .name = "riscv-sbi-mpxy-mbox",
+ .of_match_table = mpxy_mbox_of_match,
+ .acpi_match_table = mpxy_mbox_acpi_match,
+ },
+ .probe = mpxy_mbox_probe,
+ .remove = mpxy_mbox_remove,
+};
+module_platform_driver(mpxy_mbox_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anup Patel <apatel@ventanamicro.com>");
+MODULE_DESCRIPTION("RISC-V SBI MPXY mailbox controller driver");
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index 0c143beaafda..967967b2b8a9 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -62,7 +62,8 @@
#define DST_BIT_POS 9U
#define SRC_BITMASK GENMASK(11, 8)
-#define MAX_SGI 16
+/* Macro to represent SGI type for IPI IRQs */
+#define IPI_IRQ_TYPE_SGI 2
/*
* Module parameters
@@ -121,6 +122,7 @@ struct zynqmp_ipi_mbox {
* @dev: device pointer corresponding to the Xilinx ZynqMP
* IPI agent
* @irq: IPI agent interrupt ID
+ * @irq_type: IPI SGI or SPI IRQ type
* @method: IPI SMC or HVC is going to be used
* @local_id: local IPI agent ID
* @virq_sgi: IRQ number mapped to SGI
@@ -130,6 +132,7 @@ struct zynqmp_ipi_mbox {
struct zynqmp_ipi_pdata {
struct device *dev;
int irq;
+ unsigned int irq_type;
unsigned int method;
u32 local_id;
int virq_sgi;
@@ -887,17 +890,14 @@ static void zynqmp_ipi_free_mboxes(struct zynqmp_ipi_pdata *pdata)
struct zynqmp_ipi_mbox *ipi_mbox;
int i;
- if (pdata->irq < MAX_SGI)
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI)
xlnx_mbox_cleanup_sgi(pdata);
- i = pdata->num_mboxes;
+ i = pdata->num_mboxes - 1;
for (; i >= 0; i--) {
ipi_mbox = &pdata->ipi_mboxes[i];
- if (ipi_mbox->dev.parent) {
- mbox_controller_unregister(&ipi_mbox->mbox);
- if (device_is_registered(&ipi_mbox->dev))
- device_unregister(&ipi_mbox->dev);
- }
+ if (device_is_registered(&ipi_mbox->dev))
+ device_unregister(&ipi_mbox->dev);
}
}
@@ -959,14 +959,16 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
dev_err(dev, "failed to parse interrupts\n");
goto free_mbox_dev;
}
- ret = out_irq.args[1];
+
+ /* Use interrupt type to distinguish SGI and SPI interrupts */
+ pdata->irq_type = out_irq.args[0];
/*
* If Interrupt number is in SGI range, then request SGI else request
* IPI system IRQ.
*/
- if (ret < MAX_SGI) {
- pdata->irq = ret;
+ if (pdata->irq_type == IPI_IRQ_TYPE_SGI) {
+ pdata->irq = out_irq.args[1];
ret = xlnx_mbox_init_sgi(pdev, pdata->irq, pdata);
if (ret)
goto free_mbox_dev;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index ddb37f6670de..104aa5355090 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -37,6 +37,32 @@ config BLK_DEV_MD
If unsure, say N.
+config MD_BITMAP
+ bool "MD RAID bitmap support"
+ default y
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, support for the write intent bitmap will be
+ enabled. The bitmap can be used to optimize resync speed after power
+ failure or readding a disk, limiting it to recorded dirty sectors in
+ bitmap.
+
+ This feature can be added to existing MD array or MD array can be
+ created with bitmap via mdadm(8).
+
+ If unsure, say Y.
+
+config MD_LLBITMAP
+ bool "MD RAID lockless bitmap support"
+ depends on BLK_DEV_MD
+ help
+ If you say Y here, support for the lockless write intent bitmap will
+ be enabled.
+
+ Note, this is an experimental feature.
+
+ If unsure, say N.
+
config MD_AUTODETECT
bool "Autodetect RAID arrays during kernel boot"
depends on BLK_DEV_MD=y
@@ -54,6 +80,7 @@ config MD_AUTODETECT
config MD_BITMAP_FILE
bool "MD bitmap file support (deprecated)"
default y
+ depends on MD_BITMAP
help
If you say Y here, support for write intent bitmaps in files on an
external file system is enabled. This is an alternative to the internal
@@ -174,6 +201,7 @@ config MD_RAID456
config MD_CLUSTER
tristate "Cluster Support for MD"
+ select MD_BITMAP
depends on BLK_DEV_MD
depends on DLM
default n
@@ -393,6 +421,7 @@ config DM_RAID
select MD_RAID1
select MD_RAID10
select MD_RAID456
+ select MD_BITMAP
select BLK_DEV_MD
help
A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings
@@ -659,4 +688,6 @@ config DM_AUDIT
source "drivers/md/dm-vdo/Kconfig"
+source "drivers/md/dm-pcache/Kconfig"
+
endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 87bdfc9fe14c..c338cc6fbe2e 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -27,7 +27,9 @@ dm-clone-y += dm-clone-target.o dm-clone-metadata.o
dm-verity-y += dm-verity-target.o
dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
-md-mod-y += md.o md-bitmap.o
+md-mod-y += md.o
+md-mod-$(CONFIG_MD_BITMAP) += md-bitmap.o
+md-mod-$(CONFIG_MD_LLBITMAP) += md-llbitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
linear-y += md-linear.o
@@ -71,6 +73,7 @@ obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_VDO) += dm-vdo/
+obj-$(CONFIG_DM_PCACHE) += dm-pcache/
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
obj-$(CONFIG_DM_EBS) += dm-ebs.o
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 48ce750bf70a..7708d92df23e 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -24,21 +24,18 @@
* Since the gens and priorities are all stored contiguously on disk, we can
* batch this up: We fill up the free_inc list with freshly invalidated buckets,
* call prio_write(), and when prio_write() finishes we pull buckets off the
- * free_inc list and optionally discard them.
+ * free_inc list.
*
* free_inc isn't the only freelist - if it was, we'd often to sleep while
* priorities and gens were being written before we could allocate. c->free is a
* smaller freelist, and buckets on that list are always ready to be used.
*
- * If we've got discards enabled, that happens when a bucket moves from the
- * free_inc list to the free list.
- *
* There is another freelist, because sometimes we have buckets that we know
* have nothing pointing into them - these we can reuse without waiting for
* priorities to be rewritten. These come from freed btree nodes and buckets
* that garbage collection discovered no longer had valid keys pointing into
* them (because they were overwritten). That's the unused list - buckets on the
- * unused list move to the free list, optionally being discarded in the process.
+ * unused list move to the free list.
*
* It's also important to ensure that gens don't wrap around - with respect to
* either the oldest gen in the btree or the gen on disk. This is quite
@@ -118,8 +115,7 @@ void bch_rescale_priorities(struct cache_set *c, int sectors)
/*
* Background allocation thread: scans for buckets to be invalidated,
* invalidates them, rewrites prios/gens (marking them as invalidated on disk),
- * then optionally issues discard commands to the newly free buckets, then puts
- * them on the various freelists.
+ * then puts them on the various freelists.
*/
static inline bool can_inc_bucket_gen(struct bucket *b)
@@ -321,8 +317,7 @@ static int bch_allocator_thread(void *arg)
while (1) {
/*
* First, we pull buckets off of the unused and free_inc lists,
- * possibly issue discards to them, then we add the bucket to
- * the free list:
+ * then we add the bucket to the free list:
*/
while (1) {
long bucket;
@@ -330,14 +325,6 @@ static int bch_allocator_thread(void *arg)
if (!fifo_pop(&ca->free_inc, bucket))
break;
- if (ca->discard) {
- mutex_unlock(&ca->set->bucket_lock);
- blkdev_issue_discard(ca->bdev,
- bucket_to_sector(ca->set, bucket),
- ca->sb.bucket_size, GFP_KERNEL);
- mutex_lock(&ca->set->bucket_lock);
- }
-
allocator_wait(ca, bch_allocator_push(ca, bucket));
wake_up(&ca->set->btree_cache_wait);
wake_up(&ca->set->bucket_wait);
@@ -412,7 +399,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
TASK_UNINTERRUPTIBLE);
mutex_unlock(&ca->set->bucket_lock);
+
+ atomic_inc(&ca->set->bucket_wait_cnt);
schedule();
+ atomic_dec(&ca->set->bucket_wait_cnt);
+
mutex_lock(&ca->set->bucket_lock);
} while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
!fifo_pop(&ca->free[reserve], r));
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 1d33e40d26ea..8ccacba85547 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -447,8 +447,7 @@ struct cache {
* free_inc: Incoming buckets - these are buckets that currently have
* cached data in them, and we can't reuse them until after we write
* their new gen to disk. After prio_write() finishes writing the new
- * gens/prios, they'll be moved to the free list (and possibly discarded
- * in the process)
+ * gens/prios, they'll be moved to the free list.
*/
DECLARE_FIFO(long, free)[RESERVE_NR];
DECLARE_FIFO(long, free_inc);
@@ -467,8 +466,6 @@ struct cache {
*/
unsigned int invalidate_needs_gc;
- bool discard; /* Get rid of? */
-
struct journal_device journal;
/* The rest of this all shows up in sysfs */
@@ -607,6 +604,7 @@ struct cache_set {
*/
atomic_t prio_blocked;
wait_queue_head_t bucket_wait;
+ atomic_t bucket_wait_cnt;
/*
* For any bio we don't skip we subtract the number of sectors from
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index 011f6062c4c0..6ee2c6a506a2 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -327,9 +327,13 @@ struct btree_iter {
/* Fixed-size btree_iter that can be allocated on the stack */
struct btree_iter_stack {
- struct btree_iter iter;
- struct btree_iter_set stack_data[MAX_BSETS];
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct btree_iter, iter, data,
+ struct btree_iter_set stack_data[MAX_BSETS];
+ );
};
+static_assert(offsetof(struct btree_iter_stack, iter.data) ==
+ offsetof(struct btree_iter_stack, stack_data));
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 210b59007d98..3ed39c823826 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -89,8 +89,9 @@
* Test module load/unload
*/
-#define MAX_GC_TIMES 100
-#define MIN_GC_NODES 100
+#define MAX_GC_TIMES_SHIFT 7 /* 128 loops */
+#define GC_NODES_MIN 10
+#define GC_SLEEP_MS_MIN 10
#define GC_SLEEP_MS 100
#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
@@ -371,7 +372,7 @@ static void do_btree_node_write(struct btree *b)
SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
bset_sector_offset(&b->keys, i));
- if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
+ if (!bch_bio_alloc_pages(b->bio, GFP_NOWAIT)) {
struct bio_vec *bv;
void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
struct bvec_iter_all iter_all;
@@ -1578,29 +1579,29 @@ static unsigned int btree_gc_count_keys(struct btree *b)
static size_t btree_gc_min_nodes(struct cache_set *c)
{
- size_t min_nodes;
+ size_t min_nodes = GC_NODES_MIN;
- /*
- * Since incremental GC would stop 100ms when front
- * side I/O comes, so when there are many btree nodes,
- * if GC only processes constant (100) nodes each time,
- * GC would last a long time, and the front side I/Os
- * would run out of the buckets (since no new bucket
- * can be allocated during GC), and be blocked again.
- * So GC should not process constant nodes, but varied
- * nodes according to the number of btree nodes, which
- * realized by dividing GC into constant(100) times,
- * so when there are many btree nodes, GC can process
- * more nodes each time, otherwise, GC will process less
- * nodes each time (but no less than MIN_GC_NODES)
- */
- min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
- if (min_nodes < MIN_GC_NODES)
- min_nodes = MIN_GC_NODES;
+ if (atomic_read(&c->search_inflight) == 0) {
+ size_t n = c->gc_stats.nodes >> MAX_GC_TIMES_SHIFT;
+
+ if (min_nodes < n)
+ min_nodes = n;
+ }
return min_nodes;
}
+static uint64_t btree_gc_sleep_ms(struct cache_set *c)
+{
+ uint64_t sleep_ms;
+
+ if (atomic_read(&c->bucket_wait_cnt) > 0)
+ sleep_ms = GC_SLEEP_MS_MIN;
+ else
+ sleep_ms = GC_SLEEP_MS;
+
+ return sleep_ms;
+}
static int btree_gc_recurse(struct btree *b, struct btree_op *op,
struct closure *writes, struct gc_stat *gc)
@@ -1668,8 +1669,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
r->b = NULL;
- if (atomic_read(&b->c->search_inflight) &&
- gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
+ if (gc->nodes >= (gc->nodes_pre + btree_gc_min_nodes(b->c))) {
gc->nodes_pre = gc->nodes;
ret = -EAGAIN;
break;
@@ -1846,8 +1846,8 @@ static void bch_btree_gc(struct cache_set *c)
cond_resched();
if (ret == -EAGAIN)
- schedule_timeout_interruptible(msecs_to_jiffies
- (GC_SLEEP_MS));
+ schedule_timeout_interruptible(
+ msecs_to_jiffies(btree_gc_sleep_ms(c)));
else if (ret)
pr_warn("gc failed!\n");
} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
@@ -2822,7 +2822,8 @@ void bch_btree_exit(void)
int __init bch_btree_init(void)
{
- btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
+ btree_io_wq = alloc_workqueue("bch_btree_io",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!btree_io_wq)
return -ENOMEM;
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 7510d1c983a5..f327456fc4e0 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -115,8 +115,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
check = bio_kmalloc(nr_segs, GFP_NOIO);
if (!check)
return;
- bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs,
- REQ_OP_READ);
+ bio_init_inline(check, bio->bi_bdev, nr_segs, REQ_OP_READ);
check->bi_iter.bi_sector = bio->bi_iter.bi_sector;
check->bi_iter.bi_size = bio->bi_iter.bi_size;
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 020712c5203f..2386d08bf4e4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -26,8 +26,7 @@ struct bio *bch_bbio_alloc(struct cache_set *c)
struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO);
struct bio *bio = &b->bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
- meta_bucket_pages(&c->cache->sb), 0);
+ bio_init_inline(bio, NULL, meta_bucket_pages(&c->cache->sb), 0);
return bio;
}
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 7ff14bd2feb8..144693b7c46a 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -275,8 +275,7 @@ bsearch:
* ja->cur_idx
*/
ja->cur_idx = i;
- ja->last_idx = ja->discard_idx = (i + 1) %
- ca->sb.njournal_buckets;
+ ja->last_idx = (i + 1) % ca->sb.njournal_buckets;
}
@@ -336,16 +335,6 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
}
}
-static bool is_discard_enabled(struct cache_set *s)
-{
- struct cache *ca = s->cache;
-
- if (ca->discard)
- return true;
-
- return false;
-}
-
int bch_journal_replay(struct cache_set *s, struct list_head *list)
{
int ret = 0, keys = 0, entries = 0;
@@ -360,15 +349,10 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
BUG_ON(i->pin && atomic_read(i->pin) != 1);
if (n != i->j.seq) {
- if (n == start && is_discard_enabled(s))
- pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
- n, i->j.seq - 1, start, end);
- else {
- pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
- n, i->j.seq - 1, start, end);
- ret = -EIO;
- goto err;
- }
+ pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
+ n, i->j.seq - 1, start, end);
+ ret = -EIO;
+ goto err;
}
for (k = i->j.start;
@@ -568,65 +552,6 @@ out:
#define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
-static void journal_discard_endio(struct bio *bio)
-{
- struct journal_device *ja =
- container_of(bio, struct journal_device, discard_bio);
- struct cache *ca = container_of(ja, struct cache, journal);
-
- atomic_set(&ja->discard_in_flight, DISCARD_DONE);
-
- closure_wake_up(&ca->set->journal.wait);
- closure_put(&ca->set->cl);
-}
-
-static void journal_discard_work(struct work_struct *work)
-{
- struct journal_device *ja =
- container_of(work, struct journal_device, discard_work);
-
- submit_bio(&ja->discard_bio);
-}
-
-static void do_journal_discard(struct cache *ca)
-{
- struct journal_device *ja = &ca->journal;
- struct bio *bio = &ja->discard_bio;
-
- if (!ca->discard) {
- ja->discard_idx = ja->last_idx;
- return;
- }
-
- switch (atomic_read(&ja->discard_in_flight)) {
- case DISCARD_IN_FLIGHT:
- return;
-
- case DISCARD_DONE:
- ja->discard_idx = (ja->discard_idx + 1) %
- ca->sb.njournal_buckets;
-
- atomic_set(&ja->discard_in_flight, DISCARD_READY);
- fallthrough;
-
- case DISCARD_READY:
- if (ja->discard_idx == ja->last_idx)
- return;
-
- atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
-
- bio_init(bio, ca->bdev, bio->bi_inline_vecs, 1, REQ_OP_DISCARD);
- bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
- ca->sb.d[ja->discard_idx]);
- bio->bi_iter.bi_size = bucket_bytes(ca);
- bio->bi_end_io = journal_discard_endio;
-
- closure_get(&ca->set->cl);
- INIT_WORK(&ja->discard_work, journal_discard_work);
- queue_work(bch_journal_wq, &ja->discard_work);
- }
-}
-
static unsigned int free_journal_buckets(struct cache_set *c)
{
struct journal *j = &c->journal;
@@ -635,10 +560,10 @@ static unsigned int free_journal_buckets(struct cache_set *c)
unsigned int n;
/* In case njournal_buckets is not power of 2 */
- if (ja->cur_idx >= ja->discard_idx)
- n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
+ if (ja->cur_idx >= ja->last_idx)
+ n = ca->sb.njournal_buckets + ja->last_idx - ja->cur_idx;
else
- n = ja->discard_idx - ja->cur_idx;
+ n = ja->last_idx - ja->cur_idx;
if (n > (1 + j->do_reserve))
return n - (1 + j->do_reserve);
@@ -668,8 +593,6 @@ static void journal_reclaim(struct cache_set *c)
ja->last_idx = (ja->last_idx + 1) %
ca->sb.njournal_buckets;
- do_journal_discard(ca);
-
if (c->journal.blocks_free)
goto out;
diff --git a/drivers/md/bcache/journal.h b/drivers/md/bcache/journal.h
index cd316b4a1e95..9e9d1b3016a5 100644
--- a/drivers/md/bcache/journal.h
+++ b/drivers/md/bcache/journal.h
@@ -139,19 +139,6 @@ struct journal_device {
/* Last journal bucket that still contains an open journal entry */
unsigned int last_idx;
- /* Next journal bucket to be discarded */
- unsigned int discard_idx;
-
-#define DISCARD_READY 0
-#define DISCARD_IN_FLIGHT 1
-#define DISCARD_DONE 2
- /* 1 - discard in flight, -1 - discard completed */
- atomic_t discard_in_flight;
-
- struct work_struct discard_work;
- struct bio discard_bio;
- struct bio_vec discard_bv;
-
/* Bio for journal reads/writes to this device */
struct bio bio;
struct bio_vec bv[8];
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 26a6a535ec32..73918e55bf04 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -79,7 +79,7 @@ static void moving_init(struct moving_io *io)
{
struct bio *bio = &io->bio.bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
+ bio_init_inline(bio, NULL,
DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS), 0);
bio_get(bio);
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
@@ -145,9 +145,9 @@ static void read_moving(struct cache_set *c)
continue;
}
- io = kzalloc(struct_size(io, bio.bio.bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
- GFP_KERNEL);
+ io = kzalloc(sizeof(*io) + sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
if (!io)
goto err;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1492c8552255..c17d4517af22 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1388,7 +1388,7 @@ static CLOSURE_CALLBACK(cached_dev_flush)
bch_cache_accounting_destroy(&dc->accounting);
kobject_del(&d->kobj);
- continue_at(cl, cached_dev_free, system_wq);
+ continue_at(cl, cached_dev_free, system_percpu_wq);
}
static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
@@ -1400,7 +1400,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
__module_get(THIS_MODULE);
INIT_LIST_HEAD(&dc->list);
closure_init(&dc->disk.cl, NULL);
- set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
+ set_closure_fn(&dc->disk.cl, cached_dev_flush, system_percpu_wq);
kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
INIT_WORK(&dc->detach, cached_dev_detach_finish);
sema_init(&dc->sb_write_mutex, 1);
@@ -1513,7 +1513,7 @@ static CLOSURE_CALLBACK(flash_dev_flush)
bcache_device_unlink(d);
mutex_unlock(&bch_register_lock);
kobject_del(&d->kobj);
- continue_at(cl, flash_dev_free, system_wq);
+ continue_at(cl, flash_dev_free, system_percpu_wq);
}
static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
@@ -1525,7 +1525,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
goto err_ret;
closure_init(&d->cl, NULL);
- set_closure_fn(&d->cl, flash_dev_flush, system_wq);
+ set_closure_fn(&d->cl, flash_dev_flush, system_percpu_wq);
kobject_init(&d->kobj, &bch_flash_dev_ktype);
@@ -1833,7 +1833,7 @@ static CLOSURE_CALLBACK(__cache_set_unregister)
mutex_unlock(&bch_register_lock);
- continue_at(cl, cache_set_flush, system_wq);
+ continue_at(cl, cache_set_flush, system_percpu_wq);
}
void bch_cache_set_stop(struct cache_set *c)
@@ -1863,10 +1863,10 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
__module_get(THIS_MODULE);
closure_init(&c->cl, NULL);
- set_closure_fn(&c->cl, cache_set_free, system_wq);
+ set_closure_fn(&c->cl, cache_set_free, system_percpu_wq);
closure_init(&c->caching, &c->cl);
- set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
+ set_closure_fn(&c->caching, __cache_set_unregister, system_percpu_wq);
/* Maybe create continue_at_noreturn() and use it here? */
closure_set_stopped(&c->cl);
@@ -1939,7 +1939,8 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
if (!c->uuids)
goto err;
- c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0);
+ c->moving_gc_wq = alloc_workqueue("bcache_gc",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!c->moving_gc_wq)
goto err;
@@ -2236,7 +2237,7 @@ static int cache_alloc(struct cache *ca)
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
- bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
+ bio_init_inline(&ca->journal.bio, NULL, 8, 0);
/*
* When the cache disk is first registered, ca->sb.njournal_buckets
@@ -2382,9 +2383,6 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
ca->bdev = file_bdev(bdev_file);
ca->sb_disk = sb_disk;
- if (bdev_max_discard_sectors(file_bdev(bdev_file)))
- ca->discard = CACHE_DISCARD(&ca->sb);
-
ret = cache_alloc(ca);
if (ret != 0) {
if (ret == -ENOMEM)
@@ -2531,7 +2529,7 @@ static void register_device_async(struct async_reg_args *args)
INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
/* 10 jiffies is enough for a delay */
- queue_delayed_work(system_wq, &args->reg_work, 10);
+ queue_delayed_work(system_percpu_wq, &args->reg_work, 10);
}
static void *alloc_holder_object(struct cache_sb *sb)
@@ -2905,24 +2903,25 @@ static int __init bcache_init(void)
if (bch_btree_init())
goto err;
- bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
+ bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!bcache_wq)
goto err;
/*
* Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
*
- * 1. It used `system_wq` before which also does no memory reclaim.
+ * 1. It used `system_percpu_wq` before which also does no memory reclaim.
* 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
* reduced throughput can be observed.
*
- * We still want to user our own queue to not congest the `system_wq`.
+ * We still want to user our own queue to not congest the `system_percpu_wq`.
*/
- bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
+ bch_flush_wq = alloc_workqueue("bch_flush", WQ_PERCPU, 0);
if (!bch_flush_wq)
goto err;
- bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+ bch_journal_wq = alloc_workqueue("bch_journal",
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!bch_journal_wq)
goto err;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 826b14cae4e5..72f38e5b6f5c 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -134,7 +134,6 @@ read_attribute(partial_stripes_expensive);
rw_attribute(synchronous);
rw_attribute(journal_delay_ms);
rw_attribute(io_disable);
-rw_attribute(discard);
rw_attribute(running);
rw_attribute(label);
rw_attribute(errors);
@@ -1036,7 +1035,6 @@ SHOW(__bch_cache)
sysfs_hprint(bucket_size, bucket_bytes(ca));
sysfs_hprint(block_size, block_bytes(ca));
sysfs_print(nbuckets, ca->sb.nbuckets);
- sysfs_print(discard, ca->discard);
sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
sysfs_hprint(btree_written,
atomic_long_read(&ca->btree_sectors_written) << 9);
@@ -1142,18 +1140,6 @@ STORE(__bch_cache)
if (bcache_is_reboot)
return -EBUSY;
- if (attr == &sysfs_discard) {
- bool v = strtoul_or_return(buf);
-
- if (bdev_max_discard_sectors(ca->bdev))
- ca->discard = v;
-
- if (v != CACHE_DISCARD(&ca->sb)) {
- SET_CACHE_DISCARD(&ca->sb, v);
- bcache_write_super(ca->set);
- }
- }
-
if (attr == &sysfs_cache_replacement_policy) {
v = __sysfs_match_string(cache_replacement_policies, -1, buf);
if (v < 0)
@@ -1185,7 +1171,6 @@ static struct attribute *bch_cache_attrs[] = {
&sysfs_block_size,
&sysfs_nbuckets,
&sysfs_priority_stats,
- &sysfs_discard,
&sysfs_written,
&sysfs_btree_written,
&sysfs_metadata_written,
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 302e75f1fc4b..4b237074f453 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -331,7 +331,7 @@ static void dirty_init(struct keybuf_key *w)
struct dirty_io *io = w->private;
struct bio *bio = &io->bio;
- bio_init(bio, NULL, bio->bi_inline_vecs,
+ bio_init_inline(bio, NULL,
DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS), 0);
if (!io->dc->writeback_percent)
bio->bi_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0);
@@ -536,9 +536,9 @@ static void read_dirty(struct cached_dev *dc)
for (i = 0; i < nk; i++) {
w = keys[i];
- io = kzalloc(struct_size(io, bio.bi_inline_vecs,
- DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
- GFP_KERNEL);
+ io = kzalloc(sizeof(*io) + sizeof(struct bio_vec) *
+ DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
+ GFP_KERNEL);
if (!io)
goto err;
@@ -805,8 +805,7 @@ static int bch_writeback_thread(void *arg)
* may set BCH_ENABLE_AUTO_GC via sysfs, then when
* BCH_DO_AUTO_GC is set, garbage collection thread
* will be wake up here. After moving gc, the shrunk
- * btree and discarded free buckets SSD space may be
- * helpful for following write requests.
+ * btree may be helpful for following write requests.
*/
if (c->gc_after_writeback ==
(BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
@@ -1076,7 +1075,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
- WQ_MEM_RECLAIM, 0);
+ WQ_MEM_RECLAIM | WQ_PERCPU, 0);
if (!dc->writeback_write_wq)
return -ENOMEM;
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index ff7595caf440..e6d28be11c5c 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1337,12 +1337,12 @@ static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
char *ptr;
unsigned int len;
- bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
+ bio = bio_kmalloc(1, GFP_NOWAIT);
if (!bio) {
use_dmio(b, op, sector, n_sectors, offset, ioprio);
return;
}
- bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
+ bio_init_inline(bio, b->c->bdev, 1, op);
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = bio_complete;
bio->bi_private = b;
@@ -1601,18 +1601,18 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
* dm-bufio is resistant to allocation failures (it just keeps
* one buffer reserved in cases all the allocations fail).
* So set flags to not try too hard:
- * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
- * mutex and wait ourselves.
+ * GFP_NOWAIT: don't wait and don't print a warning in case of
+ * failure; if we need to sleep we'll release our mutex
+ * and wait ourselves.
* __GFP_NORETRY: don't retry and rather return failure
* __GFP_NOMEMALLOC: don't use emergency reserves
- * __GFP_NOWARN: don't print a warning in case of failure
*
* For debugging, if we set the cache size to 1, no new buffers will
* be allocated.
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC);
if (b)
return b;
}
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 2ed894155cab..7e1e8cc0e33a 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -590,7 +590,7 @@ static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned in
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
ht->hash_bits = __ffs(nr_buckets);
- ht->buckets = vmalloc(array_size(nr_buckets, sizeof(*ht->buckets)));
+ ht->buckets = vmalloc_array(nr_buckets, sizeof(*ht->buckets));
if (!ht->buckets)
return -ENOMEM;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index c889332e533b..a3c9f74fe2dc 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -162,6 +162,7 @@ struct mapped_device {
#define DMF_SUSPENDED_INTERNALLY 7
#define DMF_POST_SUSPENDING 8
#define DMF_EMULATE_ZONE_APPEND 9
+#define DMF_QUEUE_STOPPED 10
static inline sector_t dm_get_size(struct mapped_device *md)
{
@@ -291,6 +292,7 @@ struct dm_io {
struct dm_io *next;
struct dm_stats_aux stats_aux;
blk_status_t status;
+ bool requeue_flush_with_data;
atomic_t io_count;
struct mapped_device *md;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index cf17fd46e255..08925aca838c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -441,7 +441,7 @@ static struct bio *clone_bio(struct dm_target *ti, struct flakey_c *fc, struct b
if (!clone)
return NULL;
- bio_init(clone, fc->dev->bdev, clone->bi_inline_vecs, nr_iovecs, bio->bi_opf);
+ bio_init_inline(clone, fc->dev->bdev, nr_iovecs, bio->bi_opf);
clone->bi_iter.bi_sector = flakey_map_sector(ti, bio->bi_iter.bi_sector);
clone->bi_private = bio;
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 8b50c908c6f4..efb3cd4f9cd4 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -45,7 +45,7 @@ static void fix_separator_chars(char **buf)
/*
* Internal function to allocate memory for IMA measurements.
*/
-static void *dm_ima_alloc(size_t len, gfp_t flags, bool noio)
+static void *dm_ima_alloc(size_t len, bool noio)
{
unsigned int noio_flag;
void *ptr;
@@ -53,7 +53,7 @@ static void *dm_ima_alloc(size_t len, gfp_t flags, bool noio)
if (noio)
noio_flag = memalloc_noio_save();
- ptr = kzalloc(len, flags);
+ ptr = kzalloc(len, GFP_KERNEL);
if (noio)
memalloc_noio_restore(noio_flag);
@@ -68,13 +68,13 @@ static int dm_ima_alloc_and_copy_name_uuid(struct mapped_device *md, char **dev_
char **dev_uuid, bool noio)
{
int r;
- *dev_name = dm_ima_alloc(DM_NAME_LEN*2, GFP_KERNEL, noio);
+ *dev_name = dm_ima_alloc(DM_NAME_LEN*2, noio);
if (!(*dev_name)) {
r = -ENOMEM;
goto error;
}
- *dev_uuid = dm_ima_alloc(DM_UUID_LEN*2, GFP_KERNEL, noio);
+ *dev_uuid = dm_ima_alloc(DM_UUID_LEN*2, noio);
if (!(*dev_uuid)) {
r = -ENOMEM;
goto error;
@@ -109,7 +109,7 @@ static int dm_ima_alloc_and_copy_device_data(struct mapped_device *md, char **de
if (r)
return r;
- *device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ *device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!(*device_data)) {
r = -ENOMEM;
goto error;
@@ -153,14 +153,12 @@ static int dm_ima_alloc_and_copy_capacity_str(struct mapped_device *md, char **c
capacity = get_capacity(md->disk);
- *capacity_str = dm_ima_alloc(DM_IMA_DEVICE_CAPACITY_BUF_LEN, GFP_KERNEL, noio);
+ *capacity_str = dm_ima_alloc(DM_IMA_DEVICE_CAPACITY_BUF_LEN, noio);
if (!(*capacity_str))
return -ENOMEM;
- scnprintf(*capacity_str, DM_IMA_DEVICE_BUF_LEN, "current_device_capacity=%llu;",
- capacity);
-
- return 0;
+ return scnprintf(*capacity_str, DM_IMA_DEVICE_BUF_LEN, "current_device_capacity=%llu;",
+ capacity);
}
/*
@@ -195,15 +193,15 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
const size_t hash_alg_prefix_len = strlen(DM_IMA_TABLE_HASH_ALG) + 1;
char table_load_event_name[] = "dm_table_load";
- ima_buf = dm_ima_alloc(DM_IMA_MEASUREMENT_BUF_LEN, GFP_KERNEL, noio);
+ ima_buf = dm_ima_alloc(DM_IMA_MEASUREMENT_BUF_LEN, noio);
if (!ima_buf)
return;
- target_metadata_buf = dm_ima_alloc(DM_IMA_TARGET_METADATA_BUF_LEN, GFP_KERNEL, noio);
+ target_metadata_buf = dm_ima_alloc(DM_IMA_TARGET_METADATA_BUF_LEN, noio);
if (!target_metadata_buf)
goto error;
- target_data_buf = dm_ima_alloc(DM_IMA_TARGET_DATA_BUF_LEN, GFP_KERNEL, noio);
+ target_data_buf = dm_ima_alloc(DM_IMA_TARGET_DATA_BUF_LEN, noio);
if (!target_data_buf)
goto error;
@@ -218,7 +216,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
shash->tfm = tfm;
digest_size = crypto_shash_digestsize(tfm);
- digest = dm_ima_alloc(digest_size, GFP_KERNEL, noio);
+ digest = dm_ima_alloc(digest_size, noio);
if (!digest)
goto error;
@@ -327,7 +325,7 @@ void dm_ima_measure_on_table_load(struct dm_table *table, unsigned int status_fl
if (r < 0)
goto error;
- digest_buf = dm_ima_alloc((digest_size*2) + hash_alg_prefix_len + 1, GFP_KERNEL, noio);
+ digest_buf = dm_ima_alloc((digest_size*2) + hash_alg_prefix_len + 1, noio);
if (!digest_buf)
goto error;
@@ -371,18 +369,18 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
{
char *device_table_data, *dev_name = NULL, *dev_uuid = NULL, *capacity_str = NULL;
char active[] = "active_table_hash=";
- unsigned int active_len = strlen(active), capacity_len = 0;
+ unsigned int active_len = strlen(active);
unsigned int l = 0;
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!device_table_data)
return;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0)
goto error;
memcpy(device_table_data + l, DM_IMA_VERSION_STR, md->ima.dm_version_str_len);
@@ -445,8 +443,7 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
}
if (nodata) {
- r = dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio);
- if (r)
+ if (dm_ima_alloc_and_copy_name_uuid(md, &dev_name, &dev_uuid, noio))
goto error;
l = scnprintf(device_table_data, DM_IMA_DEVICE_BUF_LEN,
@@ -454,7 +451,6 @@ void dm_ima_measure_on_device_resume(struct mapped_device *md, bool swap)
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -483,18 +479,17 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
unsigned int device_active_len = strlen(device_active_str);
unsigned int device_inactive_len = strlen(device_inactive_str);
unsigned int remove_all_len = strlen(remove_all_str);
- unsigned int capacity_len = 0;
unsigned int l = 0;
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN*2, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN*2, noio);
if (!device_table_data)
goto exit;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r) {
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0) {
kfree(device_table_data);
goto exit;
}
@@ -570,7 +565,6 @@ void dm_ima_measure_on_device_remove(struct mapped_device *md, bool remove_all)
memcpy(device_table_data + l, remove_all ? "y;" : "n;", 2);
l += 2;
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -602,20 +596,20 @@ exit:
*/
void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
{
- unsigned int l = 0, capacity_len = 0;
+ unsigned int l = 0;
char *device_table_data = NULL, *dev_name = NULL, *dev_uuid = NULL, *capacity_str = NULL;
char inactive_str[] = "inactive_table_hash=";
unsigned int inactive_len = strlen(inactive_str);
bool noio = true;
bool nodata = true;
- int r;
+ int capacity_len;
- device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, GFP_KERNEL, noio);
+ device_table_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN, noio);
if (!device_table_data)
return;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ capacity_len = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
+ if (capacity_len < 0)
goto error1;
memcpy(device_table_data + l, DM_IMA_VERSION_STR, md->ima.dm_version_str_len);
@@ -650,7 +644,6 @@ void dm_ima_measure_on_table_clear(struct mapped_device *md, bool new_map)
DM_IMA_VERSION_STR, dev_name, dev_uuid);
}
- capacity_len = strlen(capacity_str);
memcpy(device_table_data + l, capacity_str, capacity_len);
l += capacity_len;
@@ -703,7 +696,7 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
char *old_device_data = NULL, *new_device_data = NULL, *combined_device_data = NULL;
char *new_dev_name = NULL, *new_dev_uuid = NULL, *capacity_str = NULL;
bool noio = true;
- int r, len;
+ int len;
if (dm_ima_alloc_and_copy_device_data(md, &new_device_data,
md->ima.active_table.num_targets, noio))
@@ -712,12 +705,11 @@ void dm_ima_measure_on_device_rename(struct mapped_device *md)
if (dm_ima_alloc_and_copy_name_uuid(md, &new_dev_name, &new_dev_uuid, noio))
goto error;
- combined_device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN * 2, GFP_KERNEL, noio);
+ combined_device_data = dm_ima_alloc(DM_IMA_DEVICE_BUF_LEN * 2, noio);
if (!combined_device_data)
goto error;
- r = dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio);
- if (r)
+ if (dm_ima_alloc_and_copy_capacity_str(md, &capacity_str, noio) < 0)
goto error;
old_device_data = md->ima.active_table.device_metadata;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index efeee0a873c0..170bf67a2edd 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -133,7 +133,7 @@ struct journal_sector {
commit_id_t commit_id;
};
-#define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
+#define MAX_TAG_SIZE 255
#define METADATA_PADDING_SECTORS 8
@@ -219,10 +219,13 @@ struct dm_integrity_c {
__u8 log2_blocks_per_bitmap_bit;
unsigned char mode;
+ bool internal_hash;
int failed;
- struct crypto_shash *internal_hash;
+ struct crypto_shash *internal_shash;
+ struct crypto_ahash *internal_ahash;
+ unsigned int internal_hash_digestsize;
struct dm_target *ti;
@@ -277,6 +280,9 @@ struct dm_integrity_c {
bool fix_hmac;
bool legacy_recalculate;
+ mempool_t ahash_req_pool;
+ struct ahash_request *journal_ahash_req;
+
struct alg_spec internal_hash_alg;
struct alg_spec journal_crypt_alg;
struct alg_spec journal_mac_alg;
@@ -326,6 +332,8 @@ struct dm_integrity_io {
unsigned payload_len;
bool integrity_payload_from_mempool;
bool integrity_range_locked;
+
+ struct ahash_request *ahash_req;
};
struct journal_completion {
@@ -352,6 +360,7 @@ struct bitmap_block_status {
static struct kmem_cache *journal_io_cache;
#define JOURNAL_IO_MEMPOOL 32
+#define AHASH_MEMPOOL 32
#ifdef DEBUG_PRINT
#define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
@@ -1634,15 +1643,15 @@ static void integrity_end_io(struct bio *bio)
dec_in_flight(dio);
}
-static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
- const char *data, char *result)
+static void integrity_sector_checksum_shash(struct dm_integrity_c *ic, sector_t sector,
+ const char *data, unsigned offset, char *result)
{
__le64 sector_le = cpu_to_le64(sector);
- SHASH_DESC_ON_STACK(req, ic->internal_hash);
+ SHASH_DESC_ON_STACK(req, ic->internal_shash);
int r;
unsigned int digest_size;
- req->tfm = ic->internal_hash;
+ req->tfm = ic->internal_shash;
r = crypto_shash_init(req);
if (unlikely(r < 0)) {
@@ -1664,7 +1673,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
goto failed;
}
- r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
+ r = crypto_shash_update(req, data + offset, ic->sectors_per_block << SECTOR_SHIFT);
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto failed;
@@ -1676,7 +1685,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
goto failed;
}
- digest_size = crypto_shash_digestsize(ic->internal_hash);
+ digest_size = ic->internal_hash_digestsize;
if (unlikely(digest_size < ic->tag_size))
memset(result + digest_size, 0, ic->tag_size - digest_size);
@@ -1687,6 +1696,104 @@ failed:
get_random_bytes(result, ic->tag_size);
}
+static void integrity_sector_checksum_ahash(struct dm_integrity_c *ic, struct ahash_request **ahash_req,
+ sector_t sector, struct page *page, unsigned offset, char *result)
+{
+ __le64 sector_le = cpu_to_le64(sector);
+ struct ahash_request *req;
+ DECLARE_CRYPTO_WAIT(wait);
+ struct scatterlist sg[3], *s = sg;
+ int r;
+ unsigned int digest_size;
+ unsigned int nbytes = 0;
+
+ might_sleep();
+
+ req = *ahash_req;
+ if (unlikely(!req)) {
+ req = mempool_alloc(&ic->ahash_req_pool, GFP_NOIO);
+ *ahash_req = req;
+ }
+
+ ahash_request_set_tfm(req, ic->internal_ahash);
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait);
+
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
+ sg_init_table(sg, 3);
+ sg_set_buf(s, (const __u8 *)&ic->sb->salt, SALT_SIZE);
+ nbytes += SALT_SIZE;
+ s++;
+ } else {
+ sg_init_table(sg, 2);
+ }
+
+ if (likely(!is_vmalloc_addr(&sector_le))) {
+ sg_set_buf(s, &sector_le, sizeof(sector_le));
+ } else {
+ struct page *sec_page = vmalloc_to_page(&sector_le);
+ unsigned int sec_off = offset_in_page(&sector_le);
+ sg_set_page(s, sec_page, sizeof(sector_le), sec_off);
+ }
+ nbytes += sizeof(sector_le);
+ s++;
+
+ sg_set_page(s, page, ic->sectors_per_block << SECTOR_SHIFT, offset);
+ nbytes += ic->sectors_per_block << SECTOR_SHIFT;
+
+ ahash_request_set_crypt(req, sg, result, nbytes);
+
+ r = crypto_wait_req(crypto_ahash_digest(req), &wait);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "crypto_ahash_digest", r);
+ goto failed;
+ }
+
+ digest_size = ic->internal_hash_digestsize;
+ if (unlikely(digest_size < ic->tag_size))
+ memset(result + digest_size, 0, ic->tag_size - digest_size);
+
+ return;
+
+failed:
+ /* this shouldn't happen anyway, the hash functions have no reason to fail */
+ get_random_bytes(result, ic->tag_size);
+}
+
+static void integrity_sector_checksum(struct dm_integrity_c *ic, struct ahash_request **ahash_req,
+ sector_t sector, const char *data, unsigned offset, char *result)
+{
+ if (likely(ic->internal_shash != NULL))
+ integrity_sector_checksum_shash(ic, sector, data, offset, result);
+ else
+ integrity_sector_checksum_ahash(ic, ahash_req, sector, (struct page *)data, offset, result);
+}
+
+static void *integrity_kmap(struct dm_integrity_c *ic, struct page *p)
+{
+ if (likely(ic->internal_shash != NULL))
+ return kmap_local_page(p);
+ else
+ return p;
+}
+
+static void integrity_kunmap(struct dm_integrity_c *ic, const void *ptr)
+{
+ if (likely(ic->internal_shash != NULL))
+ kunmap_local(ptr);
+}
+
+static void *integrity_identity(struct dm_integrity_c *ic, void *data)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(offset_in_page(data));
+ BUG_ON(!virt_addr_valid(data));
+#endif
+ if (likely(ic->internal_shash != NULL))
+ return data;
+ else
+ return virt_to_page(data);
+}
+
static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum)
{
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
@@ -1711,6 +1818,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
sector_t alignment;
char *mem;
char *buffer = page_to_virt(page);
+ unsigned int buffer_offset;
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1728,7 +1836,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
alignment &= -alignment;
io_loc.sector = round_down(io_loc.sector, alignment);
io_loc.count += sector - io_loc.sector;
- buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
+ buffer_offset = (sector - io_loc.sector) << SECTOR_SHIFT;
io_loc.count = round_up(io_loc.count, alignment);
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
@@ -1737,7 +1845,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
goto free_ret;
}
- integrity_sector_checksum(ic, logical_sector, buffer, checksum);
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, integrity_identity(ic, buffer), buffer_offset, checksum);
r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
&dio->metadata_offset, ic->tag_size, TAG_CMP);
if (r) {
@@ -1754,7 +1862,7 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
}
mem = bvec_kmap_local(&bv);
- memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
+ memcpy(mem + pos, buffer + buffer_offset, ic->sectors_per_block << SECTOR_SHIFT);
kunmap_local(mem);
pos += ic->sectors_per_block << SECTOR_SHIFT;
@@ -1776,7 +1884,7 @@ static void integrity_metadata(struct work_struct *w)
if (ic->internal_hash) {
struct bvec_iter iter;
struct bio_vec bv;
- unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = ic->internal_hash_digestsize;
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
@@ -1837,17 +1945,17 @@ static void integrity_metadata(struct work_struct *w)
char *mem, *checksums_ptr;
again:
- mem = bvec_kmap_local(&bv_copy);
+ mem = integrity_kmap(ic, bv_copy.bv_page);
pos = 0;
checksums_ptr = checksums;
do {
- integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
+ integrity_sector_checksum(ic, &dio->ahash_req, sector, mem, bv_copy.bv_offset + pos, checksums_ptr);
checksums_ptr += ic->tag_size;
sectors_to_process -= ic->sectors_per_block;
pos += ic->sectors_per_block << SECTOR_SHIFT;
sector += ic->sectors_per_block;
} while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack);
- kunmap_local(mem);
+ integrity_kunmap(ic, mem);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
@@ -1949,6 +2057,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
dio->ic = ic;
dio->bi_status = 0;
dio->op = bio_op(bio);
+ dio->ahash_req = NULL;
if (ic->mode == 'I') {
bio->bi_iter.bi_sector = dm_target_offset(ic->ti, bio->bi_iter.bi_sector);
@@ -2071,19 +2180,6 @@ retry_kmap:
js++;
mem_ptr += 1 << SECTOR_SHIFT;
} while (++s < ic->sectors_per_block);
-#ifdef INTERNAL_VERIFY
- if (ic->internal_hash) {
- char checksums_onstack[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
-
- integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
- if (unlikely(crypto_memneq(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
- DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
- logical_sector);
- dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
- bio, logical_sector, 0);
- }
- }
-#endif
}
if (!ic->internal_hash) {
@@ -2124,15 +2220,17 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
if (ic->internal_hash) {
- unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = ic->internal_hash_digestsize;
+ void *js_page = integrity_identity(ic, (char *)js - offset_in_page(js));
+ unsigned js_offset = offset_in_page(js);
if (unlikely(digest_size > ic->tag_size)) {
char checksums_onstack[HASH_MAX_DIGESTSIZE];
- integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
- integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
+ integrity_sector_checksum(ic, &dio->ahash_req, logical_sector, js_page, js_offset, journal_entry_tag(ic, je));
}
journal_entry_set_sector(je, logical_sector);
@@ -2428,7 +2526,7 @@ retry:
if (!dio->integrity_payload) {
unsigned digest_size, extra_size;
dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block);
- digest_size = crypto_shash_digestsize(ic->internal_hash);
+ digest_size = ic->internal_hash_digestsize;
extra_size = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
dio->payload_len += extra_size;
dio->integrity_payload = kmalloc(dio->payload_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
@@ -2505,11 +2603,11 @@ skip_spinlock:
unsigned pos = 0;
while (dio->bio_details.bi_iter.bi_size) {
struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
- const char *mem = bvec_kmap_local(&bv);
+ const char *mem = integrity_kmap(ic, bv.bv_page);
if (ic->tag_size < ic->tuple_size)
memset(dio->integrity_payload + pos + ic->tag_size, 0, ic->tuple_size - ic->tuple_size);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, dio->integrity_payload + pos);
- kunmap_local(mem);
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, dio->integrity_payload + pos);
+ integrity_kunmap(ic, mem);
pos += ic->tuple_size;
bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
}
@@ -2588,8 +2686,8 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
}
bio_put(outgoing_bio);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, outgoing_data, digest);
- if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, integrity_identity(ic, outgoing_data), 0, digest);
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload, min(ic->internal_hash_digestsize, ic->tag_size)))) {
DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx",
ic->dev->bdev, dio->bio_details.bi_iter.bi_sector);
atomic64_inc(&ic->number_of_mismatches);
@@ -2612,33 +2710,58 @@ static void dm_integrity_inline_recheck(struct work_struct *w)
bio_endio(bio);
}
+static inline bool dm_integrity_check(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+ unsigned pos = 0;
+
+ while (dio->bio_details.bi_iter.bi_size) {
+ char digest[HASH_MAX_DIGESTSIZE];
+ struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
+ char *mem = integrity_kmap(ic, bv.bv_page);
+ integrity_sector_checksum(ic, &dio->ahash_req, dio->bio_details.bi_iter.bi_sector, mem, bv.bv_offset, digest);
+ if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
+ min(ic->internal_hash_digestsize, ic->tag_size)))) {
+ integrity_kunmap(ic, mem);
+ dm_integrity_free_payload(dio);
+ INIT_WORK(&dio->work, dm_integrity_inline_recheck);
+ queue_work(ic->offload_wq, &dio->work);
+ return false;
+ }
+ integrity_kunmap(ic, mem);
+ pos += ic->tuple_size;
+ bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ }
+
+ return true;
+}
+
+static void dm_integrity_inline_async_check(struct work_struct *w)
+{
+ struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
+ struct dm_integrity_c *ic = dio->ic;
+ struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
+
+ if (likely(dm_integrity_check(ic, dio)))
+ bio_endio(bio);
+}
+
static int dm_integrity_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
{
struct dm_integrity_c *ic = ti->private;
+ struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
if (ic->mode == 'I') {
- struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
- if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK)) {
- unsigned pos = 0;
+ if (dio->op == REQ_OP_READ && likely(*status == BLK_STS_OK) && likely(dio->bio_details.bi_iter.bi_size != 0)) {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
unlikely(dio->integrity_range_locked))
- goto skip_check;
- while (dio->bio_details.bi_iter.bi_size) {
- char digest[HASH_MAX_DIGESTSIZE];
- struct bio_vec bv = bio_iter_iovec(bio, dio->bio_details.bi_iter);
- char *mem = bvec_kmap_local(&bv);
- //memset(mem, 0xff, ic->sectors_per_block << SECTOR_SHIFT);
- integrity_sector_checksum(ic, dio->bio_details.bi_iter.bi_sector, mem, digest);
- if (unlikely(crypto_memneq(digest, dio->integrity_payload + pos,
- min(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)))) {
- kunmap_local(mem);
- dm_integrity_free_payload(dio);
- INIT_WORK(&dio->work, dm_integrity_inline_recheck);
- queue_work(ic->offload_wq, &dio->work);
+ goto skip_check;
+ if (likely(ic->internal_shash != NULL)) {
+ if (unlikely(!dm_integrity_check(ic, dio)))
return DM_ENDIO_INCOMPLETE;
- }
- kunmap_local(mem);
- pos += ic->tuple_size;
- bio_advance_iter_single(bio, &dio->bio_details.bi_iter, ic->sectors_per_block << SECTOR_SHIFT);
+ } else {
+ INIT_WORK(&dio->work, dm_integrity_inline_async_check);
+ queue_work(ic->offload_wq, &dio->work);
+ return DM_ENDIO_INCOMPLETE;
}
}
skip_check:
@@ -2646,6 +2769,8 @@ skip_check:
if (unlikely(dio->integrity_range_locked))
remove_range(ic, &dio->range);
}
+ if (unlikely(dio->ahash_req))
+ mempool_free(dio->ahash_req, &ic->ahash_req_pool);
return DM_ENDIO_DONE;
}
@@ -2902,9 +3027,12 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start
#endif
ic->internal_hash) {
char test_tag[MAX_T(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ struct journal_sector *js = access_journal_data(ic, i, l);
+ void *js_page = integrity_identity(ic, (char *)js - offset_in_page(js));
+ unsigned js_offset = offset_in_page(js);
- integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
- (char *)access_journal_data(ic, i, l), test_tag);
+ integrity_sector_checksum(ic, &ic->journal_ahash_req, sec + ((l - j) << ic->sb->log2_sectors_per_block),
+ js_page, js_offset, test_tag);
if (unlikely(crypto_memneq(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
@@ -2987,6 +3115,7 @@ static void integrity_recalc(struct work_struct *w)
size_t recalc_tags_size;
u8 *recalc_buffer = NULL;
u8 *recalc_tags = NULL;
+ struct ahash_request *ahash_req = NULL;
struct dm_integrity_range range;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -3001,7 +3130,7 @@ static void integrity_recalc(struct work_struct *w)
unsigned recalc_sectors = RECALC_SECTORS;
retry:
- recalc_buffer = __vmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO);
+ recalc_buffer = kmalloc(recalc_sectors << SECTOR_SHIFT, GFP_NOIO | __GFP_NOWARN);
if (!recalc_buffer) {
oom:
recalc_sectors >>= 1;
@@ -3011,11 +3140,11 @@ oom:
goto free_ret;
}
recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
- if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
- recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
+ if (ic->internal_hash_digestsize > ic->tag_size)
+ recalc_tags_size += ic->internal_hash_digestsize - ic->tag_size;
recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
if (!recalc_tags) {
- vfree(recalc_buffer);
+ kfree(recalc_buffer);
recalc_buffer = NULL;
goto oom;
}
@@ -3081,7 +3210,7 @@ next_chunk:
goto err;
io_req.bi_opf = REQ_OP_READ;
- io_req.mem.type = DM_IO_VMA;
+ io_req.mem.type = DM_IO_KMEM;
io_req.mem.ptr.addr = recalc_buffer;
io_req.notify.fn = NULL;
io_req.client = ic->io;
@@ -3097,7 +3226,10 @@ next_chunk:
t = recalc_tags;
for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ void *ptr = recalc_buffer + (i << SECTOR_SHIFT);
+ void *ptr_page = integrity_identity(ic, (char *)ptr - offset_in_page(ptr));
+ unsigned ptr_offset = offset_in_page(ptr);
+ integrity_sector_checksum(ic, &ahash_req, logical_sector + i, ptr_page, ptr_offset, t);
t += ic->tag_size;
}
@@ -3139,8 +3271,9 @@ unlock_ret:
recalc_write_super(ic);
free_ret:
- vfree(recalc_buffer);
+ kfree(recalc_buffer);
kvfree(recalc_tags);
+ mempool_free(ahash_req, &ic->ahash_req_pool);
}
static void integrity_recalc_inline(struct work_struct *w)
@@ -3149,6 +3282,7 @@ static void integrity_recalc_inline(struct work_struct *w)
size_t recalc_tags_size;
u8 *recalc_buffer = NULL;
u8 *recalc_tags = NULL;
+ struct ahash_request *ahash_req = NULL;
struct dm_integrity_range range;
struct bio *bio;
struct bio_integrity_payload *bip;
@@ -3171,8 +3305,8 @@ oom:
}
recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tuple_size;
- if (crypto_shash_digestsize(ic->internal_hash) > ic->tuple_size)
- recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tuple_size;
+ if (ic->internal_hash_digestsize > ic->tuple_size)
+ recalc_tags_size += ic->internal_hash_digestsize - ic->tuple_size;
recalc_tags = kmalloc(recalc_tags_size, GFP_NOIO | __GFP_NOWARN);
if (!recalc_tags) {
kfree(recalc_buffer);
@@ -3217,8 +3351,11 @@ next_chunk:
t = recalc_tags;
for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
+ void *ptr = recalc_buffer + (i << SECTOR_SHIFT);
+ void *ptr_page = integrity_identity(ic, (char *)ptr - offset_in_page(ptr));
+ unsigned ptr_offset = offset_in_page(ptr);
memset(t, 0, ic->tuple_size);
- integrity_sector_checksum(ic, range.logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
+ integrity_sector_checksum(ic, &ahash_req, range.logical_sector + i, ptr_page, ptr_offset, t);
t += ic->tuple_size;
}
@@ -3270,6 +3407,7 @@ unlock_ret:
free_ret:
kfree(recalc_buffer);
kfree(recalc_tags);
+ mempool_free(ahash_req, &ic->ahash_req_pool);
}
static void bitmap_block_work(struct work_struct *w)
@@ -4210,30 +4348,53 @@ nomem:
return -ENOMEM;
}
-static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
- char *error_alg, char *error_key)
+static int get_mac(struct crypto_shash **shash, struct crypto_ahash **ahash,
+ struct alg_spec *a, char **error, char *error_alg, char *error_key)
{
int r;
if (a->alg_string) {
- *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
- if (IS_ERR(*hash)) {
- *error = error_alg;
- r = PTR_ERR(*hash);
- *hash = NULL;
- return r;
- }
-
- if (a->key) {
- r = crypto_shash_setkey(*hash, a->key, a->key_size);
- if (r) {
+ if (shash) {
+ *shash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(*shash)) {
+ *shash = NULL;
+ goto try_ahash;
+ }
+ if (a->key) {
+ r = crypto_shash_setkey(*shash, a->key, a->key_size);
+ if (r) {
+ *error = error_key;
+ return r;
+ }
+ } else if (crypto_shash_get_flags(*shash) & CRYPTO_TFM_NEED_KEY) {
*error = error_key;
+ return -ENOKEY;
+ }
+ return 0;
+ }
+try_ahash:
+ if (ahash) {
+ *ahash = crypto_alloc_ahash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
+ if (IS_ERR(*ahash)) {
+ *error = error_alg;
+ r = PTR_ERR(*ahash);
+ *ahash = NULL;
return r;
}
- } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
- *error = error_key;
- return -ENOKEY;
+ if (a->key) {
+ r = crypto_ahash_setkey(*ahash, a->key, a->key_size);
+ if (r) {
+ *error = error_key;
+ return r;
+ }
+ } else if (crypto_ahash_get_flags(*ahash) & CRYPTO_TFM_NEED_KEY) {
+ *error = error_key;
+ return -ENOKEY;
+ }
+ return 0;
}
+ *error = error_alg;
+ return -ENOENT;
}
return 0;
@@ -4690,12 +4851,26 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
buffer_sectors = 1;
ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
- r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
+ r = get_mac(&ic->internal_shash, &ic->internal_ahash, &ic->internal_hash_alg, &ti->error,
"Invalid internal hash", "Error setting internal hash key");
if (r)
goto bad;
+ if (ic->internal_shash) {
+ ic->internal_hash = true;
+ ic->internal_hash_digestsize = crypto_shash_digestsize(ic->internal_shash);
+ }
+ if (ic->internal_ahash) {
+ ic->internal_hash = true;
+ ic->internal_hash_digestsize = crypto_ahash_digestsize(ic->internal_ahash);
+ r = mempool_init_kmalloc_pool(&ic->ahash_req_pool, AHASH_MEMPOOL,
+ sizeof(struct ahash_request) + crypto_ahash_reqsize(ic->internal_ahash));
+ if (r) {
+ ti->error = "Cannot allocate mempool";
+ goto bad;
+ }
+ }
- r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
+ r = get_mac(&ic->journal_mac, NULL, &ic->journal_mac_alg, &ti->error,
"Invalid journal mac", "Error setting journal mac key");
if (r)
goto bad;
@@ -4706,7 +4881,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
r = -EINVAL;
goto bad;
}
- ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
+ ic->tag_size = ic->internal_hash_digestsize;
}
if (ic->tag_size > MAX_TAG_SIZE) {
ti->error = "Too big tag size";
@@ -5178,6 +5353,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
kvfree(ic->bbs);
if (ic->bufio)
dm_bufio_client_destroy(ic->bufio);
+ mempool_free(ic->journal_ahash_req, &ic->ahash_req_pool);
+ mempool_exit(&ic->ahash_req_pool);
bioset_exit(&ic->recalc_bios);
bioset_exit(&ic->recheck_bios);
mempool_exit(&ic->recheck_pool);
@@ -5215,8 +5392,10 @@ static void dm_integrity_dtr(struct dm_target *ti)
if (ic->sb)
free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
- if (ic->internal_hash)
- crypto_free_shash(ic->internal_hash);
+ if (ic->internal_shash)
+ crypto_free_shash(ic->internal_shash);
+ if (ic->internal_ahash)
+ crypto_free_ahash(ic->internal_ahash);
free_alg(&ic->internal_hash_alg);
if (ic->journal_crypt)
@@ -5233,7 +5412,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 13, 0},
+ .version = {1, 14, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 679b07dee229..7bb7174f8f4f 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -414,7 +414,7 @@ static int log_super(struct log_writes_c *lc)
}
/*
- * Super sector should be writen in-order, otherwise the
+ * Super sector should be written in-order, otherwise the
* nr_entries could be rewritten incorrectly by an old bio.
*/
wait_for_completion_io(&lc->super_done);
diff --git a/drivers/md/dm-pcache/Kconfig b/drivers/md/dm-pcache/Kconfig
new file mode 100644
index 000000000000..0e251eca892e
--- /dev/null
+++ b/drivers/md/dm-pcache/Kconfig
@@ -0,0 +1,17 @@
+config DM_PCACHE
+ tristate "Persistent cache for Block Device (Experimental)"
+ depends on BLK_DEV_DM
+ depends on DEV_DAX
+ help
+ PCACHE provides a mechanism to use persistent memory (e.g., CXL persistent memory,
+ DAX-enabled devices) as a high-performance cache layer in front of
+ traditional block devices such as SSDs or HDDs.
+
+ PCACHE is implemented as a kernel module that integrates with the block
+ layer and supports direct access (DAX) to persistent memory for low-latency,
+ byte-addressable caching.
+
+ Note: This feature is experimental and should be tested thoroughly
+ before use in production environments.
+
+ If unsure, say 'N'.
diff --git a/drivers/md/dm-pcache/Makefile b/drivers/md/dm-pcache/Makefile
new file mode 100644
index 000000000000..cedfd38854f6
--- /dev/null
+++ b/drivers/md/dm-pcache/Makefile
@@ -0,0 +1,3 @@
+dm-pcache-y := dm_pcache.o cache_dev.o segment.o backing_dev.o cache.o cache_gc.o cache_writeback.o cache_segment.o cache_key.o cache_req.o
+
+obj-$(CONFIG_DM_PCACHE) += dm-pcache.o
diff --git a/drivers/md/dm-pcache/backing_dev.c b/drivers/md/dm-pcache/backing_dev.c
new file mode 100644
index 000000000000..7165fc0364bb
--- /dev/null
+++ b/drivers/md/dm-pcache/backing_dev.c
@@ -0,0 +1,374 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/blkdev.h>
+
+#include "../dm-core.h"
+#include "pcache_internal.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+static struct kmem_cache *backing_req_cache;
+static struct kmem_cache *backing_bvec_cache;
+
+static void backing_dev_exit(struct pcache_backing_dev *backing_dev)
+{
+ mempool_exit(&backing_dev->req_pool);
+ mempool_exit(&backing_dev->bvec_pool);
+}
+
+static void req_submit_fn(struct work_struct *work);
+static void req_complete_fn(struct work_struct *work);
+static int backing_dev_init(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ int ret;
+
+ ret = mempool_init_slab_pool(&backing_dev->req_pool, 128, backing_req_cache);
+ if (ret)
+ goto err;
+
+ ret = mempool_init_slab_pool(&backing_dev->bvec_pool, 128, backing_bvec_cache);
+ if (ret)
+ goto req_pool_exit;
+
+ INIT_LIST_HEAD(&backing_dev->submit_list);
+ INIT_LIST_HEAD(&backing_dev->complete_list);
+ spin_lock_init(&backing_dev->submit_lock);
+ spin_lock_init(&backing_dev->complete_lock);
+ INIT_WORK(&backing_dev->req_submit_work, req_submit_fn);
+ INIT_WORK(&backing_dev->req_complete_work, req_complete_fn);
+ atomic_set(&backing_dev->inflight_reqs, 0);
+ init_waitqueue_head(&backing_dev->inflight_wq);
+
+ return 0;
+
+req_pool_exit:
+ mempool_exit(&backing_dev->req_pool);
+err:
+ return ret;
+}
+
+int backing_dev_start(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ int ret;
+
+ ret = backing_dev_init(pcache);
+ if (ret)
+ return ret;
+
+ backing_dev->dev_size = bdev_nr_sectors(backing_dev->dm_dev->bdev);
+
+ return 0;
+}
+
+void backing_dev_stop(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+
+ /*
+ * There should not be any new request comming, just wait
+ * inflight requests done.
+ */
+ wait_event(backing_dev->inflight_wq,
+ atomic_read(&backing_dev->inflight_reqs) == 0);
+
+ flush_work(&backing_dev->req_submit_work);
+ flush_work(&backing_dev->req_complete_work);
+
+ backing_dev_exit(backing_dev);
+}
+
+/* pcache_backing_dev_req functions */
+void backing_dev_req_end(struct pcache_backing_dev_req *backing_req)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+
+ if (backing_req->end_req)
+ backing_req->end_req(backing_req, backing_req->ret);
+
+ switch (backing_req->type) {
+ case BACKING_DEV_REQ_TYPE_REQ:
+ if (backing_req->req.upper_req)
+ pcache_req_put(backing_req->req.upper_req, backing_req->ret);
+ break;
+ case BACKING_DEV_REQ_TYPE_KMEM:
+ if (backing_req->kmem.bvecs != backing_req->kmem.inline_bvecs)
+ mempool_free(backing_req->kmem.bvecs, &backing_dev->bvec_pool);
+ break;
+ default:
+ BUG();
+ }
+
+ mempool_free(backing_req, &backing_dev->req_pool);
+
+ if (atomic_dec_and_test(&backing_dev->inflight_reqs))
+ wake_up(&backing_dev->inflight_wq);
+}
+
+static void req_complete_fn(struct work_struct *work)
+{
+ struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_complete_work);
+ struct pcache_backing_dev_req *backing_req;
+ LIST_HEAD(tmp_list);
+
+ spin_lock_irq(&backing_dev->complete_lock);
+ list_splice_init(&backing_dev->complete_list, &tmp_list);
+ spin_unlock_irq(&backing_dev->complete_lock);
+
+ while (!list_empty(&tmp_list)) {
+ backing_req = list_first_entry(&tmp_list,
+ struct pcache_backing_dev_req, node);
+ list_del_init(&backing_req->node);
+ backing_dev_req_end(backing_req);
+ }
+}
+
+static void backing_dev_bio_end(struct bio *bio)
+{
+ struct pcache_backing_dev_req *backing_req = bio->bi_private;
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+ unsigned long flags;
+
+ backing_req->ret = blk_status_to_errno(bio->bi_status);
+
+ spin_lock_irqsave(&backing_dev->complete_lock, flags);
+ list_move_tail(&backing_req->node, &backing_dev->complete_list);
+ queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_complete_work);
+ spin_unlock_irqrestore(&backing_dev->complete_lock, flags);
+}
+
+static void req_submit_fn(struct work_struct *work)
+{
+ struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_submit_work);
+ struct pcache_backing_dev_req *backing_req;
+ LIST_HEAD(tmp_list);
+
+ spin_lock(&backing_dev->submit_lock);
+ list_splice_init(&backing_dev->submit_list, &tmp_list);
+ spin_unlock(&backing_dev->submit_lock);
+
+ while (!list_empty(&tmp_list)) {
+ backing_req = list_first_entry(&tmp_list,
+ struct pcache_backing_dev_req, node);
+ list_del_init(&backing_req->node);
+ submit_bio_noacct(&backing_req->bio);
+ }
+}
+
+void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+
+ if (direct) {
+ submit_bio_noacct(&backing_req->bio);
+ return;
+ }
+
+ spin_lock(&backing_dev->submit_lock);
+ list_add_tail(&backing_req->node, &backing_dev->submit_list);
+ queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_submit_work);
+ spin_unlock(&backing_dev->submit_lock);
+}
+
+static void bio_map(struct bio *bio, void *base, size_t size)
+{
+ struct page *page;
+ unsigned int offset;
+ unsigned int len;
+
+ if (!is_vmalloc_addr(base)) {
+ page = virt_to_page(base);
+ offset = offset_in_page(base);
+
+ BUG_ON(!bio_add_page(bio, page, size, offset));
+ return;
+ }
+
+ flush_kernel_vmap_range(base, size);
+ while (size) {
+ page = vmalloc_to_page(base);
+ offset = offset_in_page(base);
+ len = min_t(size_t, PAGE_SIZE - offset, size);
+
+ BUG_ON(!bio_add_page(bio, page, len, offset));
+ size -= len;
+ base += len;
+ }
+}
+
+static struct pcache_backing_dev_req *req_type_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_request *pcache_req = opts->req.upper_req;
+ struct pcache_backing_dev_req *backing_req;
+ struct bio *orig = pcache_req->bio;
+
+ backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
+ if (!backing_req)
+ return NULL;
+
+ memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
+
+ bio_init_clone(backing_dev->dm_dev->bdev, &backing_req->bio, orig, opts->gfp_mask);
+
+ backing_req->type = BACKING_DEV_REQ_TYPE_REQ;
+ backing_req->backing_dev = backing_dev;
+ atomic_inc(&backing_dev->inflight_reqs);
+
+ return backing_req;
+}
+
+static struct pcache_backing_dev_req *kmem_type_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev_req *backing_req;
+ u32 n_vecs = bio_add_max_vecs(opts->kmem.data, opts->kmem.len);
+
+ backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
+ if (!backing_req)
+ return NULL;
+
+ memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
+
+ if (n_vecs > BACKING_DEV_REQ_INLINE_BVECS) {
+ backing_req->kmem.bvecs = mempool_alloc(&backing_dev->bvec_pool, opts->gfp_mask);
+ if (!backing_req->kmem.bvecs)
+ goto free_backing_req;
+ } else {
+ backing_req->kmem.bvecs = backing_req->kmem.inline_bvecs;
+ }
+
+ backing_req->kmem.n_vecs = n_vecs;
+ backing_req->type = BACKING_DEV_REQ_TYPE_KMEM;
+ backing_req->backing_dev = backing_dev;
+ atomic_inc(&backing_dev->inflight_reqs);
+
+ return backing_req;
+
+free_backing_req:
+ mempool_free(backing_req, &backing_dev->req_pool);
+ return NULL;
+}
+
+struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
+ return req_type_req_alloc(backing_dev, opts);
+
+ if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
+ return kmem_type_req_alloc(backing_dev, opts);
+
+ BUG();
+}
+
+static void req_type_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_request *pcache_req = opts->req.upper_req;
+ struct bio *clone;
+ u32 off = opts->req.req_off;
+ u32 len = opts->req.len;
+
+ clone = &backing_req->bio;
+ BUG_ON(off & SECTOR_MASK);
+ BUG_ON(len & SECTOR_MASK);
+ bio_trim(clone, off >> SECTOR_SHIFT, len >> SECTOR_SHIFT);
+
+ clone->bi_iter.bi_sector = (pcache_req->off + off) >> SECTOR_SHIFT;
+ clone->bi_private = backing_req;
+ clone->bi_end_io = backing_dev_bio_end;
+
+ INIT_LIST_HEAD(&backing_req->node);
+ backing_req->end_req = opts->end_fn;
+
+ pcache_req_get(pcache_req);
+ backing_req->req.upper_req = pcache_req;
+ backing_req->req.bio_off = off;
+}
+
+static void kmem_type_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
+ struct bio *backing_bio;
+
+ bio_init(&backing_req->bio, backing_dev->dm_dev->bdev, backing_req->kmem.bvecs,
+ backing_req->kmem.n_vecs, opts->kmem.opf);
+
+ backing_bio = &backing_req->bio;
+ bio_map(backing_bio, opts->kmem.data, opts->kmem.len);
+
+ backing_bio->bi_iter.bi_sector = (opts->kmem.backing_off) >> SECTOR_SHIFT;
+ backing_bio->bi_private = backing_req;
+ backing_bio->bi_end_io = backing_dev_bio_end;
+
+ INIT_LIST_HEAD(&backing_req->node);
+ backing_req->end_req = opts->end_fn;
+ backing_req->priv_data = opts->priv_data;
+}
+
+void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
+ return req_type_req_init(backing_req, opts);
+
+ if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
+ return kmem_type_req_init(backing_req, opts);
+
+ BUG();
+}
+
+struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts)
+{
+ struct pcache_backing_dev_req *backing_req;
+
+ backing_req = backing_dev_req_alloc(backing_dev, opts);
+ if (!backing_req)
+ return NULL;
+
+ backing_dev_req_init(backing_req, opts);
+
+ return backing_req;
+}
+
+void backing_dev_flush(struct pcache_backing_dev *backing_dev)
+{
+ blkdev_issue_flush(backing_dev->dm_dev->bdev);
+}
+
+int pcache_backing_init(void)
+{
+ u32 max_bvecs = (PCACHE_CACHE_SUBTREE_SIZE >> PAGE_SHIFT) + 1;
+ int ret;
+
+ backing_req_cache = KMEM_CACHE(pcache_backing_dev_req, 0);
+ if (!backing_req_cache) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ backing_bvec_cache = kmem_cache_create("pcache-bvec-slab",
+ max_bvecs * sizeof(struct bio_vec),
+ 0, 0, NULL);
+ if (!backing_bvec_cache) {
+ ret = -ENOMEM;
+ goto destroy_req_cache;
+ }
+
+ return 0;
+destroy_req_cache:
+ kmem_cache_destroy(backing_req_cache);
+err:
+ return ret;
+}
+
+void pcache_backing_exit(void)
+{
+ kmem_cache_destroy(backing_bvec_cache);
+ kmem_cache_destroy(backing_req_cache);
+}
diff --git a/drivers/md/dm-pcache/backing_dev.h b/drivers/md/dm-pcache/backing_dev.h
new file mode 100644
index 000000000000..b371cba483b9
--- /dev/null
+++ b/drivers/md/dm-pcache/backing_dev.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _BACKING_DEV_H
+#define _BACKING_DEV_H
+
+#include <linux/device-mapper.h>
+
+#include "pcache_internal.h"
+
+struct pcache_backing_dev_req;
+typedef void (*backing_req_end_fn_t)(struct pcache_backing_dev_req *backing_req, int ret);
+
+#define BACKING_DEV_REQ_TYPE_REQ 1
+#define BACKING_DEV_REQ_TYPE_KMEM 2
+
+#define BACKING_DEV_REQ_INLINE_BVECS 4
+
+struct pcache_request;
+struct pcache_backing_dev_req {
+ u8 type;
+ struct bio bio;
+ struct pcache_backing_dev *backing_dev;
+
+ void *priv_data;
+ backing_req_end_fn_t end_req;
+
+ struct list_head node;
+ int ret;
+
+ union {
+ struct {
+ struct pcache_request *upper_req;
+ u32 bio_off;
+ } req;
+ struct {
+ struct bio_vec inline_bvecs[BACKING_DEV_REQ_INLINE_BVECS];
+ struct bio_vec *bvecs;
+ u32 n_vecs;
+ } kmem;
+ };
+};
+
+struct pcache_backing_dev {
+ struct pcache_cache *cache;
+
+ struct dm_dev *dm_dev;
+ mempool_t req_pool;
+ mempool_t bvec_pool;
+
+ struct list_head submit_list;
+ spinlock_t submit_lock;
+ struct work_struct req_submit_work;
+
+ struct list_head complete_list;
+ spinlock_t complete_lock;
+ struct work_struct req_complete_work;
+
+ atomic_t inflight_reqs;
+ wait_queue_head_t inflight_wq;
+
+ u64 dev_size;
+};
+
+struct dm_pcache;
+int backing_dev_start(struct dm_pcache *pcache);
+void backing_dev_stop(struct dm_pcache *pcache);
+
+struct pcache_backing_dev_req_opts {
+ u32 type;
+ union {
+ struct {
+ struct pcache_request *upper_req;
+ u32 req_off;
+ u32 len;
+ } req;
+ struct {
+ void *data;
+ blk_opf_t opf;
+ u32 len;
+ u64 backing_off;
+ } kmem;
+ };
+
+ gfp_t gfp_mask;
+ backing_req_end_fn_t end_fn;
+ void *priv_data;
+};
+
+static inline u32 backing_dev_req_coalesced_max_len(const void *data, u32 len)
+{
+ const void *p = data;
+ u32 done = 0, in_page, to_advance;
+ struct page *first_page, *next_page;
+
+ if (!is_vmalloc_addr(data))
+ return len;
+
+ first_page = vmalloc_to_page(p);
+advance:
+ in_page = PAGE_SIZE - offset_in_page(p);
+ to_advance = min_t(u32, in_page, len - done);
+
+ done += to_advance;
+ p += to_advance;
+
+ if (done == len)
+ return done;
+
+ next_page = vmalloc_to_page(p);
+ if (zone_device_pages_have_same_pgmap(first_page, next_page))
+ goto advance;
+
+ return done;
+}
+
+void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct);
+void backing_dev_req_end(struct pcache_backing_dev_req *backing_req);
+struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts);
+struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
+ struct pcache_backing_dev_req_opts *opts);
+void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
+ struct pcache_backing_dev_req_opts *opts);
+void backing_dev_flush(struct pcache_backing_dev *backing_dev);
+
+int pcache_backing_init(void);
+void pcache_backing_exit(void);
+#endif /* _BACKING_DEV_H */
diff --git a/drivers/md/dm-pcache/cache.c b/drivers/md/dm-pcache/cache.c
new file mode 100644
index 000000000000..698697a7a73c
--- /dev/null
+++ b/drivers/md/dm-pcache/cache.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/blk_types.h>
+
+#include "cache.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "dm_pcache.h"
+
+struct kmem_cache *key_cache;
+
+static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache)
+{
+ return cache->cache_info_addr + cache->info_index;
+}
+
+static void cache_info_write(struct pcache_cache *cache)
+{
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+
+ cache_info->header.seq++;
+ cache_info->header.crc = pcache_meta_crc(&cache_info->header,
+ sizeof(struct pcache_cache_info));
+
+ memcpy_flushcache(get_cache_info_addr(cache), cache_info,
+ sizeof(struct pcache_cache_info));
+
+ cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+static void cache_info_init_default(struct pcache_cache *cache);
+static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_options *opts)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_info *cache_info_addr;
+
+ cache_info_addr = pcache_meta_find_latest(&cache->cache_info_addr->header,
+ sizeof(struct pcache_cache_info),
+ PCACHE_CACHE_INFO_SIZE,
+ &cache->cache_info);
+ if (IS_ERR(cache_info_addr))
+ return PTR_ERR(cache_info_addr);
+
+ if (cache_info_addr) {
+ if (opts->data_crc !=
+ (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC)) {
+ pcache_dev_err(pcache, "invalid option for data_crc: %s, expected: %s",
+ opts->data_crc ? "true" : "false",
+ cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC ? "true" : "false");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ /* init cache_info for new cache */
+ cache_info_init_default(cache);
+ cache_mode_set(cache, opts->cache_mode);
+ if (opts->data_crc)
+ cache->cache_info.flags |= PCACHE_CACHE_FLAGS_DATA_CRC;
+
+ return 0;
+}
+
+static void cache_info_set_gc_percent(struct pcache_cache_info *cache_info, u8 percent)
+{
+ cache_info->flags &= ~PCACHE_CACHE_FLAGS_GC_PERCENT_MASK;
+ cache_info->flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, percent);
+}
+
+int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent)
+{
+ if (percent > PCACHE_CACHE_GC_PERCENT_MAX || percent < PCACHE_CACHE_GC_PERCENT_MIN)
+ return -EINVAL;
+
+ mutex_lock(&cache->cache_info_lock);
+ cache_info_set_gc_percent(&cache->cache_info, percent);
+
+ cache_info_write(cache);
+ mutex_unlock(&cache->cache_info_lock);
+
+ return 0;
+}
+
+void cache_pos_encode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia_base,
+ struct pcache_cache_pos *pos, u64 seq, u32 *index)
+{
+ struct pcache_cache_pos_onmedia pos_onmedia;
+ struct pcache_cache_pos_onmedia *pos_onmedia_addr = pos_onmedia_base + *index;
+
+ pos_onmedia.cache_seg_id = pos->cache_seg->cache_seg_id;
+ pos_onmedia.seg_off = pos->seg_off;
+ pos_onmedia.header.seq = seq;
+ pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia);
+
+ memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia));
+ pmem_wmb();
+
+ *index = (*index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+int cache_pos_decode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 *seq, u32 *index)
+{
+ struct pcache_cache_pos_onmedia latest, *latest_addr;
+
+ latest_addr = pcache_meta_find_latest(&pos_onmedia->header,
+ sizeof(struct pcache_cache_pos_onmedia),
+ sizeof(struct pcache_cache_pos_onmedia),
+ &latest);
+ if (IS_ERR(latest_addr))
+ return PTR_ERR(latest_addr);
+
+ if (!latest_addr)
+ return -EIO;
+
+ pos->cache_seg = &cache->segments[latest.cache_seg_id];
+ pos->seg_off = latest.seg_off;
+ *seq = latest.header.seq;
+ *index = (latest_addr - pos_onmedia);
+
+ return 0;
+}
+
+static inline void cache_info_set_seg_id(struct pcache_cache *cache, u32 seg_id)
+{
+ cache->cache_info.seg_id = seg_id;
+}
+
+static int cache_init(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ int ret;
+
+ cache->segments = kvcalloc(cache_dev->seg_num, sizeof(struct pcache_cache_segment), GFP_KERNEL);
+ if (!cache->segments) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ cache->seg_map = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
+ if (!cache->seg_map) {
+ ret = -ENOMEM;
+ goto free_segments;
+ }
+
+ cache->backing_dev = backing_dev;
+ cache->cache_dev = &pcache->cache_dev;
+ cache->n_segs = cache_dev->seg_num;
+ atomic_set(&cache->gc_errors, 0);
+ spin_lock_init(&cache->seg_map_lock);
+ spin_lock_init(&cache->key_head_lock);
+
+ mutex_init(&cache->cache_info_lock);
+ mutex_init(&cache->key_tail_lock);
+ mutex_init(&cache->dirty_tail_lock);
+ mutex_init(&cache->writeback_lock);
+
+ INIT_DELAYED_WORK(&cache->writeback_work, cache_writeback_fn);
+ INIT_DELAYED_WORK(&cache->gc_work, pcache_cache_gc_fn);
+ INIT_WORK(&cache->clean_work, clean_fn);
+
+ return 0;
+
+free_segments:
+ kvfree(cache->segments);
+err:
+ return ret;
+}
+
+static void cache_exit(struct pcache_cache *cache)
+{
+ kvfree(cache->seg_map);
+ kvfree(cache->segments);
+}
+
+static void cache_info_init_default(struct pcache_cache *cache)
+{
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+
+ memset(cache_info, 0, sizeof(*cache_info));
+ cache_info->n_segs = cache->cache_dev->seg_num;
+ cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
+}
+
+static int cache_tail_init(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
+
+ if (new_cache) {
+ __set_bit(0, cache->seg_map);
+
+ cache->key_head.cache_seg = &cache->segments[0];
+ cache->key_head.seg_off = 0;
+ cache_pos_copy(&cache->key_tail, &cache->key_head);
+ cache_pos_copy(&cache->dirty_tail, &cache->key_head);
+
+ cache_encode_dirty_tail(cache);
+ cache_encode_key_tail(cache);
+ } else {
+ if (cache_decode_key_tail(cache) || cache_decode_dirty_tail(cache)) {
+ pcache_dev_err(pcache, "Corrupted key tail or dirty tail.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int get_seg_id(struct pcache_cache *cache,
+ struct pcache_cache_segment *prev_cache_seg,
+ bool new_cache, u32 *seg_id)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_dev *cache_dev = cache->cache_dev;
+ int ret;
+
+ if (new_cache) {
+ ret = cache_dev_get_empty_segment_id(cache_dev, seg_id);
+ if (ret) {
+ pcache_dev_err(pcache, "no available segment\n");
+ goto err;
+ }
+
+ if (prev_cache_seg)
+ cache_seg_set_next_seg(prev_cache_seg, *seg_id);
+ else
+ cache_info_set_seg_id(cache, *seg_id);
+ } else {
+ if (prev_cache_seg) {
+ struct pcache_segment_info *prev_seg_info;
+
+ prev_seg_info = &prev_cache_seg->cache_seg_info;
+ if (!segment_info_has_next(prev_seg_info)) {
+ ret = -EFAULT;
+ goto err;
+ }
+ *seg_id = prev_cache_seg->cache_seg_info.next_seg;
+ } else {
+ *seg_id = cache->cache_info.seg_id;
+ }
+ }
+ return 0;
+err:
+ return ret;
+}
+
+static int cache_segs_init(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *prev_cache_seg = NULL;
+ struct pcache_cache_info *cache_info = &cache->cache_info;
+ bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
+ u32 seg_id;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < cache_info->n_segs; i++) {
+ ret = get_seg_id(cache, prev_cache_seg, new_cache, &seg_id);
+ if (ret)
+ goto err;
+
+ ret = cache_seg_init(cache, seg_id, i, new_cache);
+ if (ret)
+ goto err;
+
+ prev_cache_seg = &cache->segments[i];
+ }
+ return 0;
+err:
+ return ret;
+}
+
+static int cache_init_req_keys(struct pcache_cache *cache, u32 n_paral)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ u32 n_subtrees;
+ int ret;
+ u32 i, cpu;
+
+ /* Calculate number of cache trees based on the device size */
+ n_subtrees = DIV_ROUND_UP(cache->dev_size << SECTOR_SHIFT, PCACHE_CACHE_SUBTREE_SIZE);
+ ret = cache_tree_init(cache, &cache->req_key_tree, n_subtrees);
+ if (ret)
+ goto err;
+
+ cache->n_ksets = n_paral;
+ cache->ksets = kvcalloc(cache->n_ksets, PCACHE_KSET_SIZE, GFP_KERNEL);
+ if (!cache->ksets) {
+ ret = -ENOMEM;
+ goto req_tree_exit;
+ }
+
+ /*
+ * Initialize each kset with a spinlock and delayed work for flushing.
+ * Each kset is associated with one queue to ensure independent handling
+ * of cache keys across multiple queues, maximizing multiqueue concurrency.
+ */
+ for (i = 0; i < cache->n_ksets; i++) {
+ struct pcache_cache_kset *kset = get_kset(cache, i);
+
+ kset->cache = cache;
+ spin_lock_init(&kset->kset_lock);
+ INIT_DELAYED_WORK(&kset->flush_work, kset_flush_fn);
+ }
+
+ cache->data_heads = alloc_percpu(struct pcache_cache_data_head);
+ if (!cache->data_heads) {
+ ret = -ENOMEM;
+ goto free_kset;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct pcache_cache_data_head *h =
+ per_cpu_ptr(cache->data_heads, cpu);
+ h->head_pos.cache_seg = NULL;
+ }
+
+ /*
+ * Replay persisted cache keys using cache_replay.
+ * This function loads and replays cache keys from previously stored
+ * ksets, allowing the cache to restore its state after a restart.
+ */
+ ret = cache_replay(cache);
+ if (ret) {
+ pcache_dev_err(pcache, "failed to replay keys\n");
+ goto free_heads;
+ }
+
+ return 0;
+
+free_heads:
+ free_percpu(cache->data_heads);
+free_kset:
+ kvfree(cache->ksets);
+req_tree_exit:
+ cache_tree_exit(&cache->req_key_tree);
+err:
+ return ret;
+}
+
+static void cache_destroy_req_keys(struct pcache_cache *cache)
+{
+ u32 i;
+
+ for (i = 0; i < cache->n_ksets; i++) {
+ struct pcache_cache_kset *kset = get_kset(cache, i);
+
+ cancel_delayed_work_sync(&kset->flush_work);
+ }
+
+ free_percpu(cache->data_heads);
+ kvfree(cache->ksets);
+ cache_tree_exit(&cache->req_key_tree);
+}
+
+int pcache_cache_start(struct dm_pcache *pcache)
+{
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache *cache = &pcache->cache;
+ struct pcache_cache_options *opts = &pcache->opts;
+ int ret;
+
+ ret = cache_init(pcache);
+ if (ret)
+ return ret;
+
+ cache->cache_info_addr = CACHE_DEV_CACHE_INFO(cache->cache_dev);
+ cache->cache_ctrl = CACHE_DEV_CACHE_CTRL(cache->cache_dev);
+ backing_dev->cache = cache;
+ cache->dev_size = backing_dev->dev_size;
+
+ ret = cache_info_init(cache, opts);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_segs_init(cache);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_tail_init(cache);
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_init_req_keys(cache, num_online_cpus());
+ if (ret)
+ goto cache_exit;
+
+ ret = cache_writeback_init(cache);
+ if (ret)
+ goto destroy_keys;
+
+ cache->cache_info.flags |= PCACHE_CACHE_FLAGS_INIT_DONE;
+ cache_info_write(cache);
+ queue_delayed_work(cache_get_wq(cache), &cache->gc_work, 0);
+
+ return 0;
+
+destroy_keys:
+ cache_destroy_req_keys(cache);
+cache_exit:
+ cache_exit(cache);
+
+ return ret;
+}
+
+void pcache_cache_stop(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+
+ pcache_cache_flush(cache);
+
+ cancel_delayed_work_sync(&cache->gc_work);
+ flush_work(&cache->clean_work);
+ cache_writeback_exit(cache);
+
+ if (cache->req_key_tree.n_subtrees)
+ cache_destroy_req_keys(cache);
+
+ cache_exit(cache);
+}
+
+struct workqueue_struct *cache_get_wq(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+
+ return pcache->task_wq;
+}
+
+int pcache_cache_init(void)
+{
+ key_cache = KMEM_CACHE(pcache_cache_key, 0);
+ if (!key_cache)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void pcache_cache_exit(void)
+{
+ kmem_cache_destroy(key_cache);
+}
diff --git a/drivers/md/dm-pcache/cache.h b/drivers/md/dm-pcache/cache.h
new file mode 100644
index 000000000000..27613b56be54
--- /dev/null
+++ b/drivers/md/dm-pcache/cache.h
@@ -0,0 +1,635 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_CACHE_H
+#define _PCACHE_CACHE_H
+
+#include "segment.h"
+
+/* Garbage collection thresholds */
+#define PCACHE_CACHE_GC_PERCENT_MIN 0 /* Minimum GC percentage */
+#define PCACHE_CACHE_GC_PERCENT_MAX 90 /* Maximum GC percentage */
+#define PCACHE_CACHE_GC_PERCENT_DEFAULT 70 /* Default GC percentage */
+
+#define PCACHE_CACHE_SUBTREE_SIZE (4 * PCACHE_MB) /* 4MB total tree size */
+#define PCACHE_CACHE_SUBTREE_SIZE_MASK 0x3FFFFF /* Mask for tree size */
+#define PCACHE_CACHE_SUBTREE_SIZE_SHIFT 22 /* Bit shift for tree size */
+
+/* Maximum number of keys per key set */
+#define PCACHE_KSET_KEYS_MAX 128
+#define PCACHE_CACHE_SEGS_MAX (1024 * 1024) /* maximum cache size for each device is 16T */
+#define PCACHE_KSET_ONMEDIA_SIZE_MAX struct_size_t(struct pcache_cache_kset_onmedia, data, PCACHE_KSET_KEYS_MAX)
+#define PCACHE_KSET_SIZE (sizeof(struct pcache_cache_kset) + sizeof(struct pcache_cache_key_onmedia) * PCACHE_KSET_KEYS_MAX)
+
+/* Maximum number of keys to clean in one round of clean_work */
+#define PCACHE_CLEAN_KEYS_MAX 10
+
+/* Writeback and garbage collection intervals in jiffies */
+#define PCACHE_CACHE_WRITEBACK_INTERVAL (5 * HZ)
+#define PCACHE_CACHE_GC_INTERVAL (5 * HZ)
+
+/* Macro to get the cache key structure from an rb_node pointer */
+#define CACHE_KEY(node) (container_of(node, struct pcache_cache_key, rb_node))
+
+struct pcache_cache_pos_onmedia {
+ struct pcache_meta_header header;
+ __u32 cache_seg_id;
+ __u32 seg_off;
+};
+
+/* Offset and size definitions for cache segment control */
+#define PCACHE_CACHE_SEG_CTRL_OFF (PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX)
+#define PCACHE_CACHE_SEG_CTRL_SIZE (4 * PCACHE_KB)
+
+struct pcache_cache_seg_gen {
+ struct pcache_meta_header header;
+ __u64 gen;
+};
+
+/* Control structure for cache segments */
+struct pcache_cache_seg_ctrl {
+ struct pcache_cache_seg_gen gen[PCACHE_META_INDEX_MAX];
+ __u64 res[64];
+};
+
+#define PCACHE_CACHE_FLAGS_DATA_CRC BIT(0)
+#define PCACHE_CACHE_FLAGS_INIT_DONE BIT(1)
+
+#define PCACHE_CACHE_FLAGS_CACHE_MODE_MASK GENMASK(5, 2)
+#define PCACHE_CACHE_MODE_WRITEBACK 0
+#define PCACHE_CACHE_MODE_WRITETHROUGH 1
+#define PCACHE_CACHE_MODE_WRITEAROUND 2
+#define PCACHE_CACHE_MODE_WRITEONLY 3
+
+#define PCACHE_CACHE_FLAGS_GC_PERCENT_MASK GENMASK(12, 6)
+
+struct pcache_cache_info {
+ struct pcache_meta_header header;
+ __u32 seg_id;
+ __u32 n_segs;
+ __u32 flags;
+ __u32 reserved;
+};
+
+struct pcache_cache_pos {
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_off;
+};
+
+struct pcache_cache_segment {
+ struct pcache_cache *cache;
+ u32 cache_seg_id; /* Index in cache->segments */
+ struct pcache_segment segment;
+ atomic_t refs;
+
+ struct pcache_segment_info cache_seg_info;
+ struct mutex info_lock;
+ u32 info_index;
+
+ spinlock_t gen_lock;
+ u64 gen;
+ u64 gen_seq;
+ u32 gen_index;
+
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl;
+};
+
+/* rbtree for cache entries */
+struct pcache_cache_subtree {
+ struct rb_root root;
+ spinlock_t tree_lock;
+};
+
+struct pcache_cache_tree {
+ struct pcache_cache *cache;
+ u32 n_subtrees;
+ mempool_t key_pool;
+ struct pcache_cache_subtree *subtrees;
+};
+
+extern struct kmem_cache *key_cache;
+
+struct pcache_cache_key {
+ struct pcache_cache_tree *cache_tree;
+ struct pcache_cache_subtree *cache_subtree;
+ struct kref ref;
+ struct rb_node rb_node;
+ struct list_head list_node;
+ u64 off;
+ u32 len;
+ u32 flags;
+ struct pcache_cache_pos cache_pos;
+ u64 seg_gen;
+};
+
+#define PCACHE_CACHE_KEY_FLAGS_EMPTY BIT(0)
+#define PCACHE_CACHE_KEY_FLAGS_CLEAN BIT(1)
+
+struct pcache_cache_key_onmedia {
+ __u64 off;
+ __u32 len;
+ __u32 flags;
+ __u32 cache_seg_id;
+ __u32 cache_seg_off;
+ __u64 seg_gen;
+ __u32 data_crc;
+ __u32 reserved;
+};
+
+struct pcache_cache_kset_onmedia {
+ __u32 crc;
+ union {
+ __u32 key_num;
+ __u32 next_cache_seg_id;
+ };
+ __u64 magic;
+ __u64 flags;
+ struct pcache_cache_key_onmedia data[];
+};
+
+struct pcache_cache {
+ struct pcache_backing_dev *backing_dev;
+ struct pcache_cache_dev *cache_dev;
+ struct pcache_cache_ctrl *cache_ctrl;
+ u64 dev_size;
+
+ struct pcache_cache_data_head __percpu *data_heads;
+
+ spinlock_t key_head_lock;
+ struct pcache_cache_pos key_head;
+ u32 n_ksets;
+ struct pcache_cache_kset *ksets;
+
+ struct mutex key_tail_lock;
+ struct pcache_cache_pos key_tail;
+ u64 key_tail_seq;
+ u32 key_tail_index;
+
+ struct mutex dirty_tail_lock;
+ struct pcache_cache_pos dirty_tail;
+ u64 dirty_tail_seq;
+ u32 dirty_tail_index;
+
+ struct pcache_cache_tree req_key_tree;
+ struct work_struct clean_work;
+
+ struct mutex writeback_lock;
+ char wb_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
+ struct pcache_cache_tree writeback_key_tree;
+ struct delayed_work writeback_work;
+ struct {
+ atomic_t pending;
+ u32 advance;
+ int ret;
+ } writeback_ctx;
+
+ char gc_kset_onmedia_buf[PCACHE_KSET_ONMEDIA_SIZE_MAX];
+ struct delayed_work gc_work;
+ atomic_t gc_errors;
+
+ struct mutex cache_info_lock;
+ struct pcache_cache_info cache_info;
+ struct pcache_cache_info *cache_info_addr;
+ u32 info_index;
+
+ u32 n_segs;
+ unsigned long *seg_map;
+ u32 last_cache_seg;
+ bool cache_full;
+ spinlock_t seg_map_lock;
+ struct pcache_cache_segment *segments;
+};
+
+struct workqueue_struct *cache_get_wq(struct pcache_cache *cache);
+
+struct dm_pcache;
+struct pcache_cache_options {
+ u32 cache_mode:4;
+ u32 data_crc:1;
+};
+int pcache_cache_start(struct dm_pcache *pcache);
+void pcache_cache_stop(struct dm_pcache *pcache);
+
+struct pcache_cache_ctrl {
+ /* Updated by gc_thread */
+ struct pcache_cache_pos_onmedia key_tail_pos[PCACHE_META_INDEX_MAX];
+
+ /* Updated by writeback_thread */
+ struct pcache_cache_pos_onmedia dirty_tail_pos[PCACHE_META_INDEX_MAX];
+};
+
+struct pcache_cache_data_head {
+ struct pcache_cache_pos head_pos;
+};
+
+static inline u16 pcache_cache_get_gc_percent(struct pcache_cache *cache)
+{
+ return FIELD_GET(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, cache->cache_info.flags);
+}
+
+int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent);
+
+/* cache key */
+struct pcache_cache_key *cache_key_alloc(struct pcache_cache_tree *cache_tree, gfp_t gfp_mask);
+void cache_key_init(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key);
+void cache_key_get(struct pcache_cache_key *key);
+void cache_key_put(struct pcache_cache_key *key);
+int cache_key_append(struct pcache_cache *cache, struct pcache_cache_key *key, bool force_close);
+void cache_key_insert(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key, bool fixup);
+int cache_key_decode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key);
+void cache_pos_advance(struct pcache_cache_pos *pos, u32 len);
+
+#define PCACHE_KSET_FLAGS_LAST BIT(0)
+#define PCACHE_KSET_MAGIC 0x676894a64e164f1aULL
+
+struct pcache_cache_kset {
+ struct pcache_cache *cache;
+ spinlock_t kset_lock;
+ struct delayed_work flush_work;
+ struct pcache_cache_kset_onmedia kset_onmedia;
+};
+
+extern struct pcache_cache_kset_onmedia pcache_empty_kset;
+
+#define SUBTREE_WALK_RET_OK 0
+#define SUBTREE_WALK_RET_ERR 1
+#define SUBTREE_WALK_RET_NEED_KEY 2
+#define SUBTREE_WALK_RET_NEED_REQ 3
+#define SUBTREE_WALK_RET_RESEARCH 4
+
+struct pcache_cache_subtree_walk_ctx {
+ struct pcache_cache_tree *cache_tree;
+ struct rb_node *start_node;
+ struct pcache_request *pcache_req;
+ struct pcache_cache_key *key;
+ u32 req_done;
+ int ret;
+
+ /* pre-allocated key and backing_dev_req */
+ struct pcache_cache_key *pre_alloc_key;
+ struct pcache_backing_dev_req *pre_alloc_req;
+
+ struct list_head *delete_key_list;
+ struct list_head *submit_req_list;
+
+ /*
+ * |--------| key_tmp
+ * |====| key
+ */
+ int (*before)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----------| key_tmp
+ * |=====| key
+ */
+ int (*after)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ int (*overlap_tail)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |--------| key_tmp
+ * |==========| key
+ */
+ int (*overlap_head)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |----| key_tmp
+ * |==========| key
+ */
+ int (*overlap_contain)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ /*
+ * |-----------| key_tmp
+ * |====| key
+ */
+ int (*overlap_contained)(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx);
+
+ int (*walk_finally)(struct pcache_cache_subtree_walk_ctx *ctx, int ret);
+ bool (*walk_done)(struct pcache_cache_subtree_walk_ctx *ctx);
+};
+
+int cache_subtree_walk(struct pcache_cache_subtree_walk_ctx *ctx);
+struct rb_node *cache_subtree_search(struct pcache_cache_subtree *cache_subtree, struct pcache_cache_key *key,
+ struct rb_node **parentp, struct rb_node ***newp,
+ struct list_head *delete_key_list);
+int cache_kset_close(struct pcache_cache *cache, struct pcache_cache_kset *kset);
+void clean_fn(struct work_struct *work);
+void kset_flush_fn(struct work_struct *work);
+int cache_replay(struct pcache_cache *cache);
+int cache_tree_init(struct pcache_cache *cache, struct pcache_cache_tree *cache_tree, u32 n_subtrees);
+void cache_tree_clear(struct pcache_cache_tree *cache_tree);
+void cache_tree_exit(struct pcache_cache_tree *cache_tree);
+
+/* cache segments */
+struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache);
+int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
+ bool new_cache);
+void cache_seg_get(struct pcache_cache_segment *cache_seg);
+void cache_seg_put(struct pcache_cache_segment *cache_seg);
+void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id);
+
+/* cache request*/
+int pcache_cache_flush(struct pcache_cache *cache);
+void miss_read_end_work_fn(struct work_struct *work);
+int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req);
+
+/* gc */
+void pcache_cache_gc_fn(struct work_struct *work);
+
+/* writeback */
+void cache_writeback_exit(struct pcache_cache *cache);
+int cache_writeback_init(struct pcache_cache *cache);
+void cache_writeback_fn(struct work_struct *work);
+
+/* inline functions */
+static inline struct pcache_cache_subtree *get_subtree(struct pcache_cache_tree *cache_tree, u64 off)
+{
+ if (cache_tree->n_subtrees == 1)
+ return &cache_tree->subtrees[0];
+
+ return &cache_tree->subtrees[off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT];
+}
+
+static inline void *cache_pos_addr(struct pcache_cache_pos *pos)
+{
+ return (pos->cache_seg->segment.data + pos->seg_off);
+}
+
+static inline void *get_key_head_addr(struct pcache_cache *cache)
+{
+ return cache_pos_addr(&cache->key_head);
+}
+
+static inline u32 get_kset_id(struct pcache_cache *cache, u64 off)
+{
+ u32 kset_id;
+
+ div_u64_rem(off >> PCACHE_CACHE_SUBTREE_SIZE_SHIFT, cache->n_ksets, &kset_id);
+
+ return kset_id;
+}
+
+static inline struct pcache_cache_kset *get_kset(struct pcache_cache *cache, u32 kset_id)
+{
+ return (void *)cache->ksets + PCACHE_KSET_SIZE * kset_id;
+}
+
+static inline struct pcache_cache_data_head *get_data_head(struct pcache_cache *cache)
+{
+ return this_cpu_ptr(cache->data_heads);
+}
+
+static inline bool cache_key_empty(struct pcache_cache_key *key)
+{
+ return key->flags & PCACHE_CACHE_KEY_FLAGS_EMPTY;
+}
+
+static inline bool cache_key_clean(struct pcache_cache_key *key)
+{
+ return key->flags & PCACHE_CACHE_KEY_FLAGS_CLEAN;
+}
+
+static inline void cache_pos_copy(struct pcache_cache_pos *dst, struct pcache_cache_pos *src)
+{
+ memcpy(dst, src, sizeof(struct pcache_cache_pos));
+}
+
+/**
+ * cache_seg_is_ctrl_seg - Checks if a cache segment is a cache ctrl segment.
+ * @cache_seg_id: ID of the cache segment.
+ *
+ * Returns true if the cache segment ID corresponds to a cache ctrl segment.
+ *
+ * Note: We extend the segment control of the first cache segment
+ * (cache segment ID 0) to serve as the cache control (pcache_cache_ctrl)
+ * for the entire PCACHE cache. This function determines whether the given
+ * cache segment is the one storing the pcache_cache_ctrl information.
+ */
+static inline bool cache_seg_is_ctrl_seg(u32 cache_seg_id)
+{
+ return (cache_seg_id == 0);
+}
+
+/**
+ * cache_key_cutfront - Cuts a specified length from the front of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ * @cut_len: Length to cut from the front.
+ *
+ * Advances the cache key position by cut_len and adjusts offset and length accordingly.
+ */
+static inline void cache_key_cutfront(struct pcache_cache_key *key, u32 cut_len)
+{
+ if (key->cache_pos.cache_seg)
+ cache_pos_advance(&key->cache_pos, cut_len);
+
+ key->off += cut_len;
+ key->len -= cut_len;
+}
+
+/**
+ * cache_key_cutback - Cuts a specified length from the back of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ * @cut_len: Length to cut from the back.
+ *
+ * Reduces the length of the cache key by cut_len.
+ */
+static inline void cache_key_cutback(struct pcache_cache_key *key, u32 cut_len)
+{
+ key->len -= cut_len;
+}
+
+static inline void cache_key_delete(struct pcache_cache_key *key)
+{
+ struct pcache_cache_subtree *cache_subtree;
+
+ cache_subtree = key->cache_subtree;
+ BUG_ON(!cache_subtree);
+
+ rb_erase(&key->rb_node, &cache_subtree->root);
+ key->flags = 0;
+ cache_key_put(key);
+}
+
+static inline bool cache_data_crc_on(struct pcache_cache *cache)
+{
+ return (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC);
+}
+
+static inline u32 cache_mode_get(struct pcache_cache *cache)
+{
+ return FIELD_GET(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache->cache_info.flags);
+}
+
+static inline void cache_mode_set(struct pcache_cache *cache, u32 cache_mode)
+{
+ cache->cache_info.flags &= ~PCACHE_CACHE_FLAGS_CACHE_MODE_MASK;
+ cache->cache_info.flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_CACHE_MODE_MASK, cache_mode);
+}
+
+/**
+ * cache_key_data_crc - Calculates CRC for data in a cache key.
+ * @key: Pointer to the pcache_cache_key structure.
+ *
+ * Returns the CRC-32 checksum of the data within the cache key's position.
+ */
+static inline u32 cache_key_data_crc(struct pcache_cache_key *key)
+{
+ void *data;
+
+ data = cache_pos_addr(&key->cache_pos);
+
+ return crc32c(PCACHE_CRC_SEED, data, key->len);
+}
+
+static inline u32 cache_kset_crc(struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ u32 crc_size;
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST)
+ crc_size = sizeof(struct pcache_cache_kset_onmedia) - 4;
+ else
+ crc_size = struct_size(kset_onmedia, data, kset_onmedia->key_num) - 4;
+
+ return crc32c(PCACHE_CRC_SEED, (void *)kset_onmedia + 4, crc_size);
+}
+
+static inline u32 get_kset_onmedia_size(struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ return struct_size_t(struct pcache_cache_kset_onmedia, data, kset_onmedia->key_num);
+}
+
+/**
+ * cache_seg_remain - Computes remaining space in a cache segment.
+ * @pos: Pointer to pcache_cache_pos structure.
+ *
+ * Returns the amount of remaining space in the segment data starting from
+ * the current position offset.
+ */
+static inline u32 cache_seg_remain(struct pcache_cache_pos *pos)
+{
+ struct pcache_cache_segment *cache_seg;
+ struct pcache_segment *segment;
+ u32 seg_remain;
+
+ cache_seg = pos->cache_seg;
+ segment = &cache_seg->segment;
+ seg_remain = segment->data_size - pos->seg_off;
+
+ return seg_remain;
+}
+
+/**
+ * cache_key_invalid - Checks if a cache key is invalid.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns true if the cache key is invalid due to its generation being
+ * less than the generation of its segment; otherwise returns false.
+ *
+ * When the GC (garbage collection) thread identifies a segment
+ * as reclaimable, it increments the segment's generation (gen). However,
+ * it does not immediately remove all related cache keys. When accessing
+ * such a cache key, this function can be used to determine if the cache
+ * key has already become invalid.
+ */
+static inline bool cache_key_invalid(struct pcache_cache_key *key)
+{
+ if (cache_key_empty(key))
+ return false;
+
+ return (key->seg_gen < key->cache_pos.cache_seg->gen);
+}
+
+/**
+ * cache_key_lstart - Retrieves the logical start offset of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns the logical start offset for the cache key.
+ */
+static inline u64 cache_key_lstart(struct pcache_cache_key *key)
+{
+ return key->off;
+}
+
+/**
+ * cache_key_lend - Retrieves the logical end offset of a cache key.
+ * @key: Pointer to pcache_cache_key structure.
+ *
+ * Returns the logical end offset for the cache key.
+ */
+static inline u64 cache_key_lend(struct pcache_cache_key *key)
+{
+ return key->off + key->len;
+}
+
+static inline void cache_key_copy(struct pcache_cache_key *key_dst, struct pcache_cache_key *key_src)
+{
+ key_dst->off = key_src->off;
+ key_dst->len = key_src->len;
+ key_dst->seg_gen = key_src->seg_gen;
+ key_dst->cache_tree = key_src->cache_tree;
+ key_dst->cache_subtree = key_src->cache_subtree;
+ key_dst->flags = key_src->flags;
+
+ cache_pos_copy(&key_dst->cache_pos, &key_src->cache_pos);
+}
+
+/**
+ * cache_pos_onmedia_crc - Calculates the CRC for an on-media cache position.
+ * @pos_om: Pointer to pcache_cache_pos_onmedia structure.
+ *
+ * Calculates the CRC-32 checksum of the position, excluding the first 4 bytes.
+ * Returns the computed CRC value.
+ */
+static inline u32 cache_pos_onmedia_crc(struct pcache_cache_pos_onmedia *pos_om)
+{
+ return pcache_meta_crc(&pos_om->header, sizeof(struct pcache_cache_pos_onmedia));
+}
+
+void cache_pos_encode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 seq, u32 *index);
+int cache_pos_decode(struct pcache_cache *cache,
+ struct pcache_cache_pos_onmedia *pos_onmedia,
+ struct pcache_cache_pos *pos, u64 *seq, u32 *index);
+
+static inline void cache_encode_key_tail(struct pcache_cache *cache)
+{
+ cache_pos_encode(cache, cache->cache_ctrl->key_tail_pos,
+ &cache->key_tail, ++cache->key_tail_seq,
+ &cache->key_tail_index);
+}
+
+static inline int cache_decode_key_tail(struct pcache_cache *cache)
+{
+ return cache_pos_decode(cache, cache->cache_ctrl->key_tail_pos,
+ &cache->key_tail, &cache->key_tail_seq,
+ &cache->key_tail_index);
+}
+
+static inline void cache_encode_dirty_tail(struct pcache_cache *cache)
+{
+ cache_pos_encode(cache, cache->cache_ctrl->dirty_tail_pos,
+ &cache->dirty_tail, ++cache->dirty_tail_seq,
+ &cache->dirty_tail_index);
+}
+
+static inline int cache_decode_dirty_tail(struct pcache_cache *cache)
+{
+ return cache_pos_decode(cache, cache->cache_ctrl->dirty_tail_pos,
+ &cache->dirty_tail, &cache->dirty_tail_seq,
+ &cache->dirty_tail_index);
+}
+
+int pcache_cache_init(void);
+void pcache_cache_exit(void);
+#endif /* _PCACHE_CACHE_H */
diff --git a/drivers/md/dm-pcache/cache_dev.c b/drivers/md/dm-pcache/cache_dev.c
new file mode 100644
index 000000000000..ece689e6ce59
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_dev.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/blkdev.h>
+#include <linux/dax.h>
+#include <linux/vmalloc.h>
+#include <linux/parser.h>
+
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+static void cache_dev_dax_exit(struct pcache_cache_dev *cache_dev)
+{
+ if (cache_dev->use_vmap)
+ vunmap(cache_dev->mapping);
+}
+
+static int build_vmap(struct dax_device *dax_dev, long total_pages, void **vaddr)
+{
+ struct page **pages;
+ long i = 0, chunk;
+ unsigned long pfn;
+ int ret;
+
+ pages = vmalloc_array(total_pages, sizeof(struct page *));
+ if (!pages)
+ return -ENOMEM;
+
+ do {
+ chunk = dax_direct_access(dax_dev, i, total_pages - i,
+ DAX_ACCESS, NULL, &pfn);
+ if (chunk <= 0) {
+ ret = chunk ? chunk : -EINVAL;
+ goto out_free;
+ }
+
+ if (!pfn_valid(pfn)) {
+ ret = -EOPNOTSUPP;
+ goto out_free;
+ }
+
+ while (chunk-- && i < total_pages) {
+ pages[i++] = pfn_to_page(pfn);
+ pfn++;
+ if (!(i & 15))
+ cond_resched();
+ }
+ } while (i < total_pages);
+
+ *vaddr = vmap(pages, total_pages, VM_MAP, PAGE_KERNEL);
+ if (!*vaddr) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ vfree(pages);
+ return ret;
+}
+
+static int cache_dev_dax_init(struct pcache_cache_dev *cache_dev)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ struct dax_device *dax_dev;
+ long total_pages, mapped_pages;
+ u64 bdev_size;
+ void *vaddr;
+ int ret;
+ int id;
+ unsigned long pfn;
+
+ dax_dev = cache_dev->dm_dev->dax_dev;
+ /* total size check */
+ bdev_size = bdev_nr_bytes(cache_dev->dm_dev->bdev);
+ if (bdev_size < PCACHE_CACHE_DEV_SIZE_MIN) {
+ pcache_dev_err(pcache, "dax device is too small, required at least %llu",
+ PCACHE_CACHE_DEV_SIZE_MIN);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ total_pages = bdev_size >> PAGE_SHIFT;
+ /* attempt: direct-map the whole range */
+ id = dax_read_lock();
+ mapped_pages = dax_direct_access(dax_dev, 0, total_pages,
+ DAX_ACCESS, &vaddr, &pfn);
+ if (mapped_pages < 0) {
+ pcache_dev_err(pcache, "dax_direct_access failed: %ld\n", mapped_pages);
+ ret = mapped_pages;
+ goto unlock;
+ }
+
+ if (!pfn_valid(pfn)) {
+ ret = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ if (mapped_pages == total_pages) {
+ /* success: contiguous direct mapping */
+ cache_dev->mapping = vaddr;
+ } else {
+ /* need vmap fallback */
+ ret = build_vmap(dax_dev, total_pages, &vaddr);
+ if (ret) {
+ pcache_dev_err(pcache, "vmap fallback failed: %d\n", ret);
+ goto unlock;
+ }
+
+ cache_dev->mapping = vaddr;
+ cache_dev->use_vmap = true;
+ }
+ dax_read_unlock(id);
+
+ return 0;
+unlock:
+ dax_read_unlock(id);
+out:
+ return ret;
+}
+
+void cache_dev_zero_range(struct pcache_cache_dev *cache_dev, void *pos, u32 size)
+{
+ memset(pos, 0, size);
+ dax_flush(cache_dev->dm_dev->dax_dev, pos, size);
+}
+
+static int sb_read(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct pcache_sb *sb_addr = CACHE_DEV_SB(cache_dev);
+
+ if (copy_mc_to_kernel(sb, sb_addr, sizeof(struct pcache_sb)))
+ return -EIO;
+
+ return 0;
+}
+
+static void sb_write(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct pcache_sb *sb_addr = CACHE_DEV_SB(cache_dev);
+
+ memcpy_flushcache(sb_addr, sb, sizeof(struct pcache_sb));
+ pmem_wmb();
+}
+
+static int sb_init(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u64 nr_segs;
+ u64 cache_dev_size;
+ u64 magic;
+ u32 flags = 0;
+
+ magic = le64_to_cpu(sb->magic);
+ if (magic)
+ return -EEXIST;
+
+ cache_dev_size = bdev_nr_bytes(file_bdev(cache_dev->dm_dev->bdev_file));
+ if (cache_dev_size < PCACHE_CACHE_DEV_SIZE_MIN) {
+ pcache_dev_err(pcache, "dax device is too small, required at least %llu",
+ PCACHE_CACHE_DEV_SIZE_MIN);
+ return -ENOSPC;
+ }
+
+ nr_segs = (cache_dev_size - PCACHE_SEGMENTS_OFF) / ((PCACHE_SEG_SIZE));
+
+#if defined(__BYTE_ORDER) ? (__BIG_ENDIAN == __BYTE_ORDER) : defined(__BIG_ENDIAN)
+ flags |= PCACHE_SB_F_BIGENDIAN;
+#endif
+ sb->flags = cpu_to_le32(flags);
+ sb->magic = cpu_to_le64(PCACHE_MAGIC);
+ sb->seg_num = cpu_to_le32(nr_segs);
+ sb->crc = cpu_to_le32(crc32c(PCACHE_CRC_SEED, (void *)(sb) + 4, sizeof(struct pcache_sb) - 4));
+
+ cache_dev_zero_range(cache_dev, CACHE_DEV_CACHE_INFO(cache_dev),
+ PCACHE_CACHE_INFO_SIZE * PCACHE_META_INDEX_MAX +
+ PCACHE_CACHE_CTRL_SIZE);
+
+ return 0;
+}
+
+static int sb_validate(struct pcache_cache_dev *cache_dev, struct pcache_sb *sb)
+{
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u32 flags;
+ u32 crc;
+
+ if (le64_to_cpu(sb->magic) != PCACHE_MAGIC) {
+ pcache_dev_err(pcache, "unexpected magic: %llx\n",
+ le64_to_cpu(sb->magic));
+ return -EINVAL;
+ }
+
+ crc = crc32c(PCACHE_CRC_SEED, (void *)(sb) + 4, sizeof(struct pcache_sb) - 4);
+ if (crc != le32_to_cpu(sb->crc)) {
+ pcache_dev_err(pcache, "corrupted sb: %u, expected: %u\n", crc, le32_to_cpu(sb->crc));
+ return -EINVAL;
+ }
+
+ flags = le32_to_cpu(sb->flags);
+#if defined(__BYTE_ORDER) ? (__BIG_ENDIAN == __BYTE_ORDER) : defined(__BIG_ENDIAN)
+ if (!(flags & PCACHE_SB_F_BIGENDIAN)) {
+ pcache_dev_err(pcache, "cache_dev is not big endian\n");
+ return -EINVAL;
+ }
+#else
+ if (flags & PCACHE_SB_F_BIGENDIAN) {
+ pcache_dev_err(pcache, "cache_dev is big endian\n");
+ return -EINVAL;
+ }
+#endif
+ return 0;
+}
+
+static int cache_dev_init(struct pcache_cache_dev *cache_dev, u32 seg_num)
+{
+ cache_dev->seg_num = seg_num;
+ cache_dev->seg_bitmap = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
+ if (!cache_dev->seg_bitmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void cache_dev_exit(struct pcache_cache_dev *cache_dev)
+{
+ kvfree(cache_dev->seg_bitmap);
+}
+
+void cache_dev_stop(struct dm_pcache *pcache)
+{
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+
+ cache_dev_exit(cache_dev);
+ cache_dev_dax_exit(cache_dev);
+}
+
+int cache_dev_start(struct dm_pcache *pcache)
+{
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ struct pcache_sb sb;
+ bool format = false;
+ int ret;
+
+ mutex_init(&cache_dev->seg_lock);
+
+ ret = cache_dev_dax_init(cache_dev);
+ if (ret) {
+ pcache_dev_err(pcache, "failed to init cache_dev %s via dax way: %d.",
+ cache_dev->dm_dev->name, ret);
+ goto err;
+ }
+
+ ret = sb_read(cache_dev, &sb);
+ if (ret)
+ goto dax_release;
+
+ if (le64_to_cpu(sb.magic) == 0) {
+ format = true;
+ ret = sb_init(cache_dev, &sb);
+ if (ret < 0)
+ goto dax_release;
+ }
+
+ ret = sb_validate(cache_dev, &sb);
+ if (ret)
+ goto dax_release;
+
+ cache_dev->sb_flags = le32_to_cpu(sb.flags);
+ ret = cache_dev_init(cache_dev, le32_to_cpu(sb.seg_num));
+ if (ret)
+ goto dax_release;
+
+ if (format)
+ sb_write(cache_dev, &sb);
+
+ return 0;
+
+dax_release:
+ cache_dev_dax_exit(cache_dev);
+err:
+ return ret;
+}
+
+int cache_dev_get_empty_segment_id(struct pcache_cache_dev *cache_dev, u32 *seg_id)
+{
+ int ret;
+
+ mutex_lock(&cache_dev->seg_lock);
+ *seg_id = find_next_zero_bit(cache_dev->seg_bitmap, cache_dev->seg_num, 0);
+ if (*seg_id == cache_dev->seg_num) {
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ __set_bit(*seg_id, cache_dev->seg_bitmap);
+ ret = 0;
+unlock:
+ mutex_unlock(&cache_dev->seg_lock);
+ return ret;
+}
diff --git a/drivers/md/dm-pcache/cache_dev.h b/drivers/md/dm-pcache/cache_dev.h
new file mode 100644
index 000000000000..6251eb4ebe96
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_dev.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_CACHE_DEV_H
+#define _PCACHE_CACHE_DEV_H
+
+#include <linux/device.h>
+#include <linux/device-mapper.h>
+
+#include "pcache_internal.h"
+
+#define PCACHE_MAGIC 0x65B05EFA96C596EFULL
+
+#define PCACHE_SB_OFF (4 * PCACHE_KB)
+#define PCACHE_SB_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_INFO_OFF (PCACHE_SB_OFF + PCACHE_SB_SIZE)
+#define PCACHE_CACHE_INFO_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_CTRL_OFF (PCACHE_CACHE_INFO_OFF + (PCACHE_CACHE_INFO_SIZE * PCACHE_META_INDEX_MAX))
+#define PCACHE_CACHE_CTRL_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_SEGMENTS_OFF (PCACHE_CACHE_CTRL_OFF + PCACHE_CACHE_CTRL_SIZE)
+#define PCACHE_SEG_INFO_SIZE (4 * PCACHE_KB)
+
+#define PCACHE_CACHE_DEV_SIZE_MIN (512 * PCACHE_MB) /* 512 MB */
+#define PCACHE_SEG_SIZE (16 * PCACHE_MB) /* Size of each PCACHE segment (16 MB) */
+
+#define CACHE_DEV_SB(cache_dev) ((struct pcache_sb *)(cache_dev->mapping + PCACHE_SB_OFF))
+#define CACHE_DEV_CACHE_INFO(cache_dev) ((void *)cache_dev->mapping + PCACHE_CACHE_INFO_OFF)
+#define CACHE_DEV_CACHE_CTRL(cache_dev) ((void *)cache_dev->mapping + PCACHE_CACHE_CTRL_OFF)
+#define CACHE_DEV_SEGMENTS(cache_dev) ((void *)cache_dev->mapping + PCACHE_SEGMENTS_OFF)
+#define CACHE_DEV_SEGMENT(cache_dev, id) ((void *)CACHE_DEV_SEGMENTS(cache_dev) + (u64)id * PCACHE_SEG_SIZE)
+
+/*
+ * PCACHE SB flags configured during formatting
+ *
+ * The PCACHE_SB_F_xxx flags define registration requirements based on cache_dev
+ * formatting. For a machine to register a cache_dev:
+ * - PCACHE_SB_F_BIGENDIAN: Requires a big-endian machine.
+ */
+#define PCACHE_SB_F_BIGENDIAN BIT(0)
+
+struct pcache_sb {
+ __le32 crc;
+ __le32 flags;
+ __le64 magic;
+
+ __le32 seg_num;
+};
+
+struct pcache_cache_dev {
+ u32 sb_flags;
+ u32 seg_num;
+ void *mapping;
+ bool use_vmap;
+
+ struct dm_dev *dm_dev;
+
+ struct mutex seg_lock;
+ unsigned long *seg_bitmap;
+};
+
+struct dm_pcache;
+int cache_dev_start(struct dm_pcache *pcache);
+void cache_dev_stop(struct dm_pcache *pcache);
+
+void cache_dev_zero_range(struct pcache_cache_dev *cache_dev, void *pos, u32 size);
+
+int cache_dev_get_empty_segment_id(struct pcache_cache_dev *cache_dev, u32 *seg_id);
+
+#endif /* _PCACHE_CACHE_DEV_H */
diff --git a/drivers/md/dm-pcache/cache_gc.c b/drivers/md/dm-pcache/cache_gc.c
new file mode 100644
index 000000000000..94f8b276a021
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_gc.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+/**
+ * cache_key_gc - Releases the reference of a cache key segment.
+ * @cache: Pointer to the pcache_cache structure.
+ * @key: Pointer to the cache key to be garbage collected.
+ *
+ * This function decrements the reference count of the cache segment
+ * associated with the given key. If the reference count drops to zero,
+ * the segment may be invalidated and reused.
+ */
+static void cache_key_gc(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ cache_seg_put(key->cache_pos.cache_seg);
+}
+
+static bool need_gc(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail, struct pcache_cache_pos *key_tail)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ void *dirty_addr, *key_addr;
+ u32 segs_used, segs_gc_threshold, to_copy;
+ int ret;
+
+ dirty_addr = cache_pos_addr(dirty_tail);
+ key_addr = cache_pos_addr(key_tail);
+ if (dirty_addr == key_addr) {
+ pcache_dev_debug(pcache, "key tail is equal to dirty tail: %u:%u\n",
+ dirty_tail->cache_seg->cache_seg_id,
+ dirty_tail->seg_off);
+ return false;
+ }
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
+
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - key_tail->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, key_addr, to_copy);
+ if (ret) {
+ pcache_dev_err(pcache, "error to read kset: %d", ret);
+ return false;
+ }
+
+ /* Check if kset_onmedia is corrupted */
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
+ pcache_dev_debug(pcache, "gc error: magic is not as expected. key_tail: %u:%u magic: %llx, expected: %llx\n",
+ key_tail->cache_seg->cache_seg_id, key_tail->seg_off,
+ kset_onmedia->magic, PCACHE_KSET_MAGIC);
+ return false;
+ }
+
+ /* Verify the CRC of the kset_onmedia */
+ if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ pcache_dev_debug(pcache, "gc error: crc is not as expected. crc: %x, expected: %x\n",
+ cache_kset_crc(kset_onmedia), kset_onmedia->crc);
+ return false;
+ }
+
+ segs_used = bitmap_weight(cache->seg_map, cache->n_segs);
+ segs_gc_threshold = cache->n_segs * pcache_cache_get_gc_percent(cache) / 100;
+ if (segs_used < segs_gc_threshold) {
+ pcache_dev_debug(pcache, "segs_used: %u, segs_gc_threshold: %u\n", segs_used, segs_gc_threshold);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * last_kset_gc - Advances the garbage collection for the last kset.
+ * @cache: Pointer to the pcache_cache structure.
+ * @kset_onmedia: Pointer to the kset_onmedia structure for the last kset.
+ */
+static void last_kset_gc(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_segment *cur_seg, *next_seg;
+
+ cur_seg = cache->key_tail.cache_seg;
+
+ next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
+
+ mutex_lock(&cache->key_tail_lock);
+ cache->key_tail.cache_seg = next_seg;
+ cache->key_tail.seg_off = 0;
+ cache_encode_key_tail(cache);
+ mutex_unlock(&cache->key_tail_lock);
+
+ pcache_dev_debug(pcache, "gc advance kset seg: %u\n", cur_seg->cache_seg_id);
+
+ spin_lock(&cache->seg_map_lock);
+ __clear_bit(cur_seg->cache_seg_id, cache->seg_map);
+ spin_unlock(&cache->seg_map_lock);
+}
+
+void pcache_cache_gc_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, gc_work.work);
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos dirty_tail, key_tail;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_key *key;
+ int ret;
+ int i;
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
+
+ while (true) {
+ if (pcache_is_stopping(pcache) || atomic_read(&cache->gc_errors))
+ return;
+
+ /* Get new tail positions */
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_copy(&dirty_tail, &cache->dirty_tail);
+ mutex_unlock(&cache->dirty_tail_lock);
+
+ mutex_lock(&cache->key_tail_lock);
+ cache_pos_copy(&key_tail, &cache->key_tail);
+ mutex_unlock(&cache->key_tail_lock);
+
+ if (!need_gc(cache, &dirty_tail, &key_tail))
+ break;
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ /* Don't move to the next segment if dirty_tail has not moved */
+ if (dirty_tail.cache_seg == key_tail.cache_seg)
+ break;
+
+ last_kset_gc(cache, kset_onmedia);
+ continue;
+ }
+
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ struct pcache_cache_key key_tmp = { 0 };
+
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = &key_tmp;
+ cache_key_init(&cache->req_key_tree, key);
+
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ /* return without re-arm gc work, and prevent future
+ * gc, because we can't retry the partial-gc-ed kset
+ */
+ atomic_inc(&cache->gc_errors);
+ pcache_dev_err(pcache, "failed to decode cache key in gc\n");
+ return;
+ }
+
+ cache_key_gc(cache, key);
+ }
+
+ pcache_dev_debug(pcache, "gc advance: %u:%u %u\n",
+ key_tail.cache_seg->cache_seg_id,
+ key_tail.seg_off,
+ get_kset_onmedia_size(kset_onmedia));
+
+ mutex_lock(&cache->key_tail_lock);
+ cache_pos_advance(&cache->key_tail, get_kset_onmedia_size(kset_onmedia));
+ cache_encode_key_tail(cache);
+ mutex_unlock(&cache->key_tail_lock);
+ }
+
+ queue_delayed_work(cache_get_wq(cache), &cache->gc_work, PCACHE_CACHE_GC_INTERVAL);
+}
diff --git a/drivers/md/dm-pcache/cache_key.c b/drivers/md/dm-pcache/cache_key.c
new file mode 100644
index 000000000000..2b77e121f89b
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_key.c
@@ -0,0 +1,888 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+struct pcache_cache_kset_onmedia pcache_empty_kset = { 0 };
+
+void cache_key_init(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key)
+{
+ kref_init(&key->ref);
+ key->cache_tree = cache_tree;
+ INIT_LIST_HEAD(&key->list_node);
+ RB_CLEAR_NODE(&key->rb_node);
+}
+
+struct pcache_cache_key *cache_key_alloc(struct pcache_cache_tree *cache_tree, gfp_t gfp_mask)
+{
+ struct pcache_cache_key *key;
+
+ key = mempool_alloc(&cache_tree->key_pool, gfp_mask);
+ if (!key)
+ return NULL;
+
+ memset(key, 0, sizeof(struct pcache_cache_key));
+ cache_key_init(cache_tree, key);
+
+ return key;
+}
+
+/**
+ * cache_key_get - Increment the reference count of a cache key.
+ * @key: Pointer to the pcache_cache_key structure.
+ *
+ * This function increments the reference count of the specified cache key,
+ * ensuring that it is not freed while still in use.
+ */
+void cache_key_get(struct pcache_cache_key *key)
+{
+ kref_get(&key->ref);
+}
+
+/**
+ * cache_key_destroy - Free a cache key structure when its reference count drops to zero.
+ * @ref: Pointer to the kref structure.
+ *
+ * This function is called when the reference count of the cache key reaches zero.
+ * It frees the allocated cache key back to the slab cache.
+ */
+static void cache_key_destroy(struct kref *ref)
+{
+ struct pcache_cache_key *key = container_of(ref, struct pcache_cache_key, ref);
+ struct pcache_cache_tree *cache_tree = key->cache_tree;
+
+ mempool_free(key, &cache_tree->key_pool);
+}
+
+void cache_key_put(struct pcache_cache_key *key)
+{
+ kref_put(&key->ref, cache_key_destroy);
+}
+
+void cache_pos_advance(struct pcache_cache_pos *pos, u32 len)
+{
+ /* Ensure enough space remains in the current segment */
+ BUG_ON(cache_seg_remain(pos) < len);
+
+ pos->seg_off += len;
+}
+
+static void cache_key_encode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key)
+{
+ key_onmedia->off = key->off;
+ key_onmedia->len = key->len;
+
+ key_onmedia->cache_seg_id = key->cache_pos.cache_seg->cache_seg_id;
+ key_onmedia->cache_seg_off = key->cache_pos.seg_off;
+
+ key_onmedia->seg_gen = key->seg_gen;
+ key_onmedia->flags = key->flags;
+
+ if (cache_data_crc_on(cache))
+ key_onmedia->data_crc = cache_key_data_crc(key);
+}
+
+int cache_key_decode(struct pcache_cache *cache,
+ struct pcache_cache_key_onmedia *key_onmedia,
+ struct pcache_cache_key *key)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+
+ key->off = key_onmedia->off;
+ key->len = key_onmedia->len;
+
+ key->cache_pos.cache_seg = &cache->segments[key_onmedia->cache_seg_id];
+ key->cache_pos.seg_off = key_onmedia->cache_seg_off;
+
+ key->seg_gen = key_onmedia->seg_gen;
+ key->flags = key_onmedia->flags;
+
+ if (cache_data_crc_on(cache) &&
+ key_onmedia->data_crc != cache_key_data_crc(key)) {
+ pcache_dev_err(pcache, "key: %llu:%u seg %u:%u data_crc error: %x, expected: %x\n",
+ key->off, key->len, key->cache_pos.cache_seg->cache_seg_id,
+ key->cache_pos.seg_off, cache_key_data_crc(key), key_onmedia->data_crc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void append_last_kset(struct pcache_cache *cache, u32 next_seg)
+{
+ struct pcache_cache_kset_onmedia kset_onmedia = { 0 };
+
+ kset_onmedia.flags |= PCACHE_KSET_FLAGS_LAST;
+ kset_onmedia.next_cache_seg_id = next_seg;
+ kset_onmedia.magic = PCACHE_KSET_MAGIC;
+ kset_onmedia.crc = cache_kset_crc(&kset_onmedia);
+
+ memcpy_flushcache(get_key_head_addr(cache), &kset_onmedia, sizeof(struct pcache_cache_kset_onmedia));
+ pmem_wmb();
+ cache_pos_advance(&cache->key_head, sizeof(struct pcache_cache_kset_onmedia));
+}
+
+int cache_kset_close(struct pcache_cache *cache, struct pcache_cache_kset *kset)
+{
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 kset_onmedia_size;
+ int ret;
+
+ kset_onmedia = &kset->kset_onmedia;
+
+ if (!kset_onmedia->key_num)
+ return 0;
+
+ kset_onmedia_size = struct_size(kset_onmedia, data, kset_onmedia->key_num);
+
+ spin_lock(&cache->key_head_lock);
+again:
+ /* Reserve space for the last kset */
+ if (cache_seg_remain(&cache->key_head) < kset_onmedia_size + sizeof(struct pcache_cache_kset_onmedia)) {
+ struct pcache_cache_segment *next_seg;
+
+ next_seg = get_cache_segment(cache);
+ if (!next_seg) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* clear outdated kset in next seg */
+ memcpy_flushcache(next_seg->segment.data, &pcache_empty_kset,
+ sizeof(struct pcache_cache_kset_onmedia));
+ append_last_kset(cache, next_seg->cache_seg_id);
+ cache->key_head.cache_seg = next_seg;
+ cache->key_head.seg_off = 0;
+ goto again;
+ }
+
+ kset_onmedia->magic = PCACHE_KSET_MAGIC;
+ kset_onmedia->crc = cache_kset_crc(kset_onmedia);
+
+ /* clear outdated kset after current kset */
+ memcpy_flushcache(get_key_head_addr(cache) + kset_onmedia_size, &pcache_empty_kset,
+ sizeof(struct pcache_cache_kset_onmedia));
+ /* write current kset into segment */
+ memcpy_flushcache(get_key_head_addr(cache), kset_onmedia, kset_onmedia_size);
+ pmem_wmb();
+
+ /* reset kset_onmedia */
+ memset(kset_onmedia, 0, sizeof(struct pcache_cache_kset_onmedia));
+ cache_pos_advance(&cache->key_head, kset_onmedia_size);
+
+ ret = 0;
+out:
+ spin_unlock(&cache->key_head_lock);
+
+ return ret;
+}
+
+/**
+ * cache_key_append - Append a cache key to the related kset.
+ * @cache: Pointer to the pcache_cache structure.
+ * @key: Pointer to the cache key structure to append.
+ * @force_close: Need to close current kset if true.
+ *
+ * This function appends a cache key to the appropriate kset. If the kset
+ * is full, it closes the kset. If not, it queues a flush work to write
+ * the kset to media.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+int cache_key_append(struct pcache_cache *cache, struct pcache_cache_key *key, bool force_close)
+{
+ struct pcache_cache_kset *kset;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ struct pcache_cache_key_onmedia *key_onmedia;
+ u32 kset_id = get_kset_id(cache, key->off);
+ int ret = 0;
+
+ kset = get_kset(cache, kset_id);
+ kset_onmedia = &kset->kset_onmedia;
+
+ spin_lock(&kset->kset_lock);
+ key_onmedia = &kset_onmedia->data[kset_onmedia->key_num];
+ cache_key_encode(cache, key_onmedia, key);
+
+ /* Check if the current kset has reached the maximum number of keys */
+ if (++kset_onmedia->key_num == PCACHE_KSET_KEYS_MAX || force_close) {
+ /* If full, close the kset */
+ ret = cache_kset_close(cache, kset);
+ if (ret) {
+ kset_onmedia->key_num--;
+ goto out;
+ }
+ } else {
+ /* If not full, queue a delayed work to flush the kset */
+ queue_delayed_work(cache_get_wq(cache), &kset->flush_work, 1 * HZ);
+ }
+out:
+ spin_unlock(&kset->kset_lock);
+
+ return ret;
+}
+
+/**
+ * cache_subtree_walk - Traverse the cache tree.
+ * @ctx: Pointer to the context structure for traversal.
+ *
+ * This function traverses the cache tree starting from the specified node.
+ * It calls the appropriate callback functions based on the relationships
+ * between the keys in the cache tree.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+int cache_subtree_walk(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_key *key_tmp, *key;
+ struct rb_node *node_tmp;
+ int ret = SUBTREE_WALK_RET_OK;
+
+ key = ctx->key;
+ node_tmp = ctx->start_node;
+
+ while (node_tmp) {
+ if (ctx->walk_done && ctx->walk_done(ctx))
+ break;
+
+ key_tmp = CACHE_KEY(node_tmp);
+ /*
+ * If key_tmp ends before the start of key, continue to the next node.
+ * |----------|
+ * |=====|
+ */
+ if (cache_key_lend(key_tmp) <= cache_key_lstart(key)) {
+ if (ctx->after) {
+ ret = ctx->after(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ goto next;
+ }
+
+ /*
+ * If key_tmp starts after the end of key, stop traversing.
+ * |--------|
+ * |====|
+ */
+ if (cache_key_lstart(key_tmp) >= cache_key_lend(key)) {
+ if (ctx->before) {
+ ret = ctx->before(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /* Handle overlapping keys */
+ if (cache_key_lstart(key_tmp) >= cache_key_lstart(key)) {
+ /*
+ * If key_tmp encompasses key.
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ if (cache_key_lend(key_tmp) >= cache_key_lend(key)) {
+ if (ctx->overlap_tail) {
+ ret = ctx->overlap_tail(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /*
+ * If key_tmp is contained within key.
+ * |----| key_tmp
+ * |==========| key
+ */
+ if (ctx->overlap_contain) {
+ ret = ctx->overlap_contain(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+
+ goto next;
+ }
+
+ /*
+ * If key_tmp starts before key ends but ends after key.
+ * |-----------| key_tmp
+ * |====| key
+ */
+ if (cache_key_lend(key_tmp) > cache_key_lend(key)) {
+ if (ctx->overlap_contained) {
+ ret = ctx->overlap_contained(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+ break;
+ }
+
+ /*
+ * If key_tmp starts before key and ends within key.
+ * |--------| key_tmp
+ * |==========| key
+ */
+ if (ctx->overlap_head) {
+ ret = ctx->overlap_head(key, key_tmp, ctx);
+ if (ret)
+ goto out;
+ }
+next:
+ node_tmp = rb_next(node_tmp);
+ }
+
+out:
+ if (ctx->walk_finally)
+ ret = ctx->walk_finally(ctx, ret);
+
+ return ret;
+}
+
+/**
+ * cache_subtree_search - Search for a key in the cache tree.
+ * @cache_subtree: Pointer to the cache tree structure.
+ * @key: Pointer to the cache key to search for.
+ * @parentp: Pointer to store the parent node of the found node.
+ * @newp: Pointer to store the location where the new node should be inserted.
+ * @delete_key_list: List to collect invalid keys for deletion.
+ *
+ * This function searches the cache tree for a specific key and returns
+ * the node that is the predecessor of the key, or first node if the key is
+ * less than all keys in the tree. If any invalid keys are found during
+ * the search, they are added to the delete_key_list for later cleanup.
+ *
+ * Returns a pointer to the previous node.
+ */
+struct rb_node *cache_subtree_search(struct pcache_cache_subtree *cache_subtree, struct pcache_cache_key *key,
+ struct rb_node **parentp, struct rb_node ***newp,
+ struct list_head *delete_key_list)
+{
+ struct rb_node **new, *parent = NULL;
+ struct pcache_cache_key *key_tmp;
+ struct rb_node *prev_node = NULL;
+
+ new = &(cache_subtree->root.rb_node);
+ while (*new) {
+ key_tmp = container_of(*new, struct pcache_cache_key, rb_node);
+ if (cache_key_invalid(key_tmp))
+ list_add(&key_tmp->list_node, delete_key_list);
+
+ parent = *new;
+ if (key_tmp->off >= key->off) {
+ new = &((*new)->rb_left);
+ } else {
+ prev_node = *new;
+ new = &((*new)->rb_right);
+ }
+ }
+
+ if (!prev_node)
+ prev_node = rb_first(&cache_subtree->root);
+
+ if (parentp)
+ *parentp = parent;
+
+ if (newp)
+ *newp = new;
+
+ return prev_node;
+}
+
+static struct pcache_cache_key *get_pre_alloc_key(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_key *key;
+
+ if (ctx->pre_alloc_key) {
+ key = ctx->pre_alloc_key;
+ ctx->pre_alloc_key = NULL;
+
+ return key;
+ }
+
+ return cache_key_alloc(ctx->cache_tree, GFP_NOWAIT);
+}
+
+/**
+ * fixup_overlap_tail - Adjust the key when it overlaps at the tail.
+ * @key: Pointer to the new cache key being inserted.
+ * @key_tmp: Pointer to the existing key that overlaps.
+ * @ctx: Pointer to the context for walking the cache tree.
+ *
+ * This function modifies the existing key (key_tmp) when there is an
+ * overlap at the tail with the new key. If the modified key becomes
+ * empty, it is deleted.
+ */
+static int fixup_overlap_tail(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |----------------| key_tmp
+ * |===========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ if (cache_key_empty(key_tmp)) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ cache_key_cutfront(key_tmp, cache_key_lend(key) - cache_key_lstart(key_tmp));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * fixup_overlap_contain - Handle case where new key completely contains an existing key.
+ * @key: Pointer to the new cache key being inserted.
+ * @key_tmp: Pointer to the existing key that is being contained.
+ * @ctx: Pointer to the context for walking the cache tree.
+ *
+ * This function deletes the existing key (key_tmp) when the new key
+ * completely contains it. It returns SUBTREE_WALK_RET_RESEARCH to indicate that the
+ * tree structure may have changed, necessitating a re-insertion of
+ * the new key.
+ */
+static int fixup_overlap_contain(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |----| key_tmp
+ * |==========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ cache_key_delete(key_tmp);
+
+ return SUBTREE_WALK_RET_RESEARCH;
+}
+
+/**
+ * fixup_overlap_contained - Handle overlap when a new key is contained in an existing key.
+ * @key: The new cache key being inserted.
+ * @key_tmp: The existing cache key that overlaps with the new key.
+ * @ctx: Context for the cache tree walk.
+ *
+ * This function adjusts the existing key if the new key is contained
+ * within it. If the existing key is empty, it indicates a placeholder key
+ * that was inserted during a miss read. This placeholder will later be
+ * updated with real data from the backing_dev, making it no longer an empty key.
+ *
+ * If we delete key or insert a key, the structure of the entire cache tree may change,
+ * requiring a full research of the tree to find a new insertion point.
+ */
+static int fixup_overlap_contained(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp, struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache_tree *cache_tree = ctx->cache_tree;
+
+ /*
+ * |-----------| key_tmp
+ * |====| key
+ */
+ BUG_ON(cache_key_empty(key));
+ if (cache_key_empty(key_tmp)) {
+ /* If key_tmp is empty, don't split it;
+ * it's a placeholder key for miss reads that will be updated later.
+ */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+ } else {
+ struct pcache_cache_key *key_fixup;
+ bool need_research = false;
+
+ key_fixup = get_pre_alloc_key(ctx);
+ if (!key_fixup)
+ return SUBTREE_WALK_RET_NEED_KEY;
+
+ cache_key_copy(key_fixup, key_tmp);
+
+ /* Split key_tmp based on the new key's range */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ cache_key_delete(key_tmp);
+ need_research = true;
+ }
+
+ /* Create a new portion for key_fixup */
+ cache_key_cutfront(key_fixup, cache_key_lend(key) - cache_key_lstart(key_tmp));
+ if (key_fixup->len == 0) {
+ cache_key_put(key_fixup);
+ } else {
+ /* Insert the new key into the cache */
+ cache_key_insert(cache_tree, key_fixup, false);
+ need_research = true;
+ }
+
+ if (need_research)
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * fixup_overlap_head - Handle overlap when a new key overlaps with the head of an existing key.
+ * @key: The new cache key being inserted.
+ * @key_tmp: The existing cache key that overlaps with the new key.
+ * @ctx: Context for the cache tree walk.
+ *
+ * This function adjusts the existing key if the new key overlaps
+ * with the beginning of it. If the resulting key length is zero
+ * after the adjustment, the key is deleted. This indicates that
+ * the key no longer holds valid data and requires the tree to be
+ * re-researched for a new insertion point.
+ */
+static int fixup_overlap_head(struct pcache_cache_key *key,
+ struct pcache_cache_key *key_tmp, struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ /*
+ * |--------| key_tmp
+ * |==========| key
+ */
+ BUG_ON(cache_key_empty(key));
+ /* Adjust key_tmp by cutting back based on the new key's start */
+ cache_key_cutback(key_tmp, cache_key_lend(key_tmp) - cache_key_lstart(key));
+ if (key_tmp->len == 0) {
+ /* If the adjusted key_tmp length is zero, delete it */
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * cache_key_insert - Insert a new cache key into the cache tree.
+ * @cache_tree: Pointer to the cache_tree structure.
+ * @key: The cache key to insert.
+ * @fixup: Indicates if this is a new key being inserted.
+ *
+ * This function searches for the appropriate location to insert
+ * a new cache key into the cache tree. It handles key overlaps
+ * and ensures any invalid keys are removed before insertion.
+ */
+void cache_key_insert(struct pcache_cache_tree *cache_tree, struct pcache_cache_key *key, bool fixup)
+{
+ struct pcache_cache *cache = cache_tree->cache;
+ struct pcache_cache_subtree_walk_ctx walk_ctx = { 0 };
+ struct rb_node **new, *parent = NULL;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key_tmp = NULL, *key_next;
+ struct rb_node *prev_node = NULL;
+ LIST_HEAD(delete_key_list);
+ int ret;
+
+ cache_subtree = get_subtree(cache_tree, key->off);
+ key->cache_subtree = cache_subtree;
+search:
+ prev_node = cache_subtree_search(cache_subtree, key, &parent, &new, &delete_key_list);
+ if (!list_empty(&delete_key_list)) {
+ /* Remove invalid keys from the delete list */
+ list_for_each_entry_safe(key_tmp, key_next, &delete_key_list, list_node) {
+ list_del_init(&key_tmp->list_node);
+ cache_key_delete(key_tmp);
+ }
+ goto search;
+ }
+
+ if (fixup) {
+ /* Set up the context with the cache, start node, and new key */
+ walk_ctx.cache_tree = cache_tree;
+ walk_ctx.start_node = prev_node;
+ walk_ctx.key = key;
+
+ /* Assign overlap handling functions for different scenarios */
+ walk_ctx.overlap_tail = fixup_overlap_tail;
+ walk_ctx.overlap_head = fixup_overlap_head;
+ walk_ctx.overlap_contain = fixup_overlap_contain;
+ walk_ctx.overlap_contained = fixup_overlap_contained;
+
+ ret = cache_subtree_walk(&walk_ctx);
+ switch (ret) {
+ case SUBTREE_WALK_RET_OK:
+ break;
+ case SUBTREE_WALK_RET_RESEARCH:
+ goto search;
+ case SUBTREE_WALK_RET_NEED_KEY:
+ spin_unlock(&cache_subtree->tree_lock);
+ pcache_dev_debug(CACHE_TO_PCACHE(cache), "allocate pre_alloc_key with GFP_NOIO");
+ walk_ctx.pre_alloc_key = cache_key_alloc(cache_tree, GFP_NOIO);
+ spin_lock(&cache_subtree->tree_lock);
+ goto search;
+ default:
+ BUG();
+ }
+ }
+
+ if (walk_ctx.pre_alloc_key)
+ cache_key_put(walk_ctx.pre_alloc_key);
+
+ /* Link and insert the new key into the red-black tree */
+ rb_link_node(&key->rb_node, parent, new);
+ rb_insert_color(&key->rb_node, &cache_subtree->root);
+}
+
+/**
+ * clean_fn - Cleanup function to remove invalid keys from the cache tree.
+ * @work: Pointer to the work_struct associated with the cleanup.
+ *
+ * This function cleans up invalid keys from the cache tree in the background
+ * after a cache segment has been invalidated during cache garbage collection.
+ * It processes a maximum of PCACHE_CLEAN_KEYS_MAX keys per iteration and holds
+ * the tree lock to ensure thread safety.
+ */
+void clean_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, clean_work);
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ int i, count;
+
+ for (i = 0; i < cache->req_key_tree.n_subtrees; i++) {
+ cache_subtree = &cache->req_key_tree.subtrees[i];
+
+again:
+ if (pcache_is_stopping(CACHE_TO_PCACHE(cache)))
+ return;
+
+ /* Delete up to PCACHE_CLEAN_KEYS_MAX keys in one iteration */
+ count = 0;
+ spin_lock(&cache_subtree->tree_lock);
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+ if (cache_key_invalid(key)) {
+ count++;
+ cache_key_delete(key);
+ }
+
+ if (count >= PCACHE_CLEAN_KEYS_MAX) {
+ /* Unlock and pause before continuing cleanup */
+ spin_unlock(&cache_subtree->tree_lock);
+ usleep_range(1000, 2000);
+ goto again;
+ }
+ }
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+}
+
+/*
+ * kset_flush_fn - Flush work for a cache kset.
+ *
+ * This function is called when a kset flush work is queued from
+ * cache_key_append(). If the kset is full, it will be closed
+ * immediately. If not, the flush work will be queued for later closure.
+ *
+ * If cache_kset_close detects that a new segment is required to store
+ * the kset and there are no available segments, it will return an error.
+ * In this scenario, a retry will be attempted.
+ */
+void kset_flush_fn(struct work_struct *work)
+{
+ struct pcache_cache_kset *kset = container_of(work, struct pcache_cache_kset, flush_work.work);
+ struct pcache_cache *cache = kset->cache;
+ int ret;
+
+ if (pcache_is_stopping(CACHE_TO_PCACHE(cache)))
+ return;
+
+ spin_lock(&kset->kset_lock);
+ ret = cache_kset_close(cache, kset);
+ spin_unlock(&kset->kset_lock);
+
+ if (ret) {
+ /* Failed to flush kset, schedule a retry. */
+ queue_delayed_work(cache_get_wq(cache), &kset->flush_work, msecs_to_jiffies(100));
+ }
+}
+
+static int kset_replay(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ int ret;
+ int i;
+
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = cache_key_alloc(&cache->req_key_tree, GFP_NOIO);
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ cache_key_put(key);
+ goto err;
+ }
+
+ __set_bit(key->cache_pos.cache_seg->cache_seg_id, cache->seg_map);
+
+ /* Check if the segment generation is valid for insertion. */
+ if (key->seg_gen < key->cache_pos.cache_seg->gen) {
+ cache_key_put(key);
+ } else {
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->req_key_tree, key, true);
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ cache_seg_get(key->cache_pos.cache_seg);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+int cache_replay(struct pcache_cache *cache)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos pos_tail;
+ struct pcache_cache_pos *pos;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 to_copy, count = 0;
+ int ret = 0;
+
+ kset_onmedia = kzalloc(PCACHE_KSET_ONMEDIA_SIZE_MAX, GFP_KERNEL);
+ if (!kset_onmedia)
+ return -ENOMEM;
+
+ cache_pos_copy(&pos_tail, &cache->key_tail);
+ pos = &pos_tail;
+
+ /*
+ * In cache replaying stage, there is no other one will access
+ * cache->seg_map, so we can set bit here without cache->seg_map_lock.
+ */
+ __set_bit(pos->cache_seg->cache_seg_id, cache->seg_map);
+
+ while (true) {
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - pos->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, cache_pos_addr(pos), to_copy);
+ if (ret) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC ||
+ kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ break;
+ }
+
+ /* Process the last kset and prepare for the next segment. */
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ struct pcache_cache_segment *next_seg;
+
+ pcache_dev_debug(pcache, "last kset replay, next: %u\n", kset_onmedia->next_cache_seg_id);
+
+ next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
+
+ pos->cache_seg = next_seg;
+ pos->seg_off = 0;
+
+ __set_bit(pos->cache_seg->cache_seg_id, cache->seg_map);
+ continue;
+ }
+
+ /* Replay the kset and check for errors. */
+ ret = kset_replay(cache, kset_onmedia);
+ if (ret)
+ goto out;
+
+ /* Advance the position after processing the kset. */
+ cache_pos_advance(pos, get_kset_onmedia_size(kset_onmedia));
+ if (++count > 512) {
+ cond_resched();
+ count = 0;
+ }
+ }
+
+ /* Update the key_head position after replaying. */
+ spin_lock(&cache->key_head_lock);
+ cache_pos_copy(&cache->key_head, pos);
+ spin_unlock(&cache->key_head_lock);
+out:
+ kfree(kset_onmedia);
+ return ret;
+}
+
+int cache_tree_init(struct pcache_cache *cache, struct pcache_cache_tree *cache_tree, u32 n_subtrees)
+{
+ int ret;
+ u32 i;
+
+ cache_tree->cache = cache;
+ cache_tree->n_subtrees = n_subtrees;
+
+ ret = mempool_init_slab_pool(&cache_tree->key_pool, 1024, key_cache);
+ if (ret)
+ goto err;
+
+ /*
+ * Allocate and initialize the subtrees array.
+ * Each element is a cache tree structure that contains
+ * an RB tree root and a spinlock for protecting its contents.
+ */
+ cache_tree->subtrees = kvcalloc(cache_tree->n_subtrees, sizeof(struct pcache_cache_subtree), GFP_KERNEL);
+ if (!cache_tree->subtrees) {
+ ret = -ENOMEM;
+ goto key_pool_exit;
+ }
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ struct pcache_cache_subtree *cache_subtree = &cache_tree->subtrees[i];
+
+ cache_subtree->root = RB_ROOT;
+ spin_lock_init(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+
+key_pool_exit:
+ mempool_exit(&cache_tree->key_pool);
+err:
+ return ret;
+}
+
+void cache_tree_clear(struct pcache_cache_tree *cache_tree)
+{
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ u32 i;
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ cache_subtree = &cache_tree->subtrees[i];
+
+ spin_lock(&cache_subtree->tree_lock);
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+
+ cache_key_delete(key);
+ }
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+}
+
+void cache_tree_exit(struct pcache_cache_tree *cache_tree)
+{
+ cache_tree_clear(cache_tree);
+ kvfree(cache_tree->subtrees);
+ mempool_exit(&cache_tree->key_pool);
+}
diff --git a/drivers/md/dm-pcache/cache_req.c b/drivers/md/dm-pcache/cache_req.c
new file mode 100644
index 000000000000..7854a30e07b7
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_req.c
@@ -0,0 +1,836 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+static int cache_data_head_init(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *next_seg;
+ struct pcache_cache_data_head *data_head;
+
+ data_head = get_data_head(cache);
+ next_seg = get_cache_segment(cache);
+ if (!next_seg)
+ return -EBUSY;
+
+ cache_seg_get(next_seg);
+ data_head->head_pos.cache_seg = next_seg;
+ data_head->head_pos.seg_off = 0;
+
+ return 0;
+}
+
+/**
+ * cache_data_alloc - Allocate data for a cache key.
+ * @cache: Pointer to the cache structure.
+ * @key: Pointer to the cache key to allocate data for.
+ *
+ * This function tries to allocate space from the cache segment specified by the
+ * data head. If the remaining space in the segment is insufficient to allocate
+ * the requested length for the cache key, it will allocate whatever is available
+ * and adjust the key's length accordingly. This function does not allocate
+ * space that crosses segment boundaries.
+ */
+static int cache_data_alloc(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ struct pcache_cache_data_head *data_head;
+ struct pcache_cache_pos *head_pos;
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_remain;
+ u32 allocated = 0, to_alloc;
+ int ret = 0;
+
+ preempt_disable();
+ data_head = get_data_head(cache);
+again:
+ to_alloc = key->len - allocated;
+ if (!data_head->head_pos.cache_seg) {
+ seg_remain = 0;
+ } else {
+ cache_pos_copy(&key->cache_pos, &data_head->head_pos);
+ key->seg_gen = key->cache_pos.cache_seg->gen;
+
+ head_pos = &data_head->head_pos;
+ cache_seg = head_pos->cache_seg;
+ seg_remain = cache_seg_remain(head_pos);
+ }
+
+ if (seg_remain > to_alloc) {
+ /* If remaining space in segment is sufficient for the cache key, allocate it. */
+ cache_pos_advance(head_pos, to_alloc);
+ allocated += to_alloc;
+ cache_seg_get(cache_seg);
+ } else if (seg_remain) {
+ /* If remaining space is not enough, allocate the remaining space and adjust the cache key length. */
+ cache_pos_advance(head_pos, seg_remain);
+ key->len = seg_remain;
+
+ /* Get for key: obtain a reference to the cache segment for the key. */
+ cache_seg_get(cache_seg);
+ /* Put for head_pos->cache_seg: release the reference for the current head's segment. */
+ cache_seg_put(head_pos->cache_seg);
+ head_pos->cache_seg = NULL;
+ } else {
+ /* Initialize a new data head if no segment is available. */
+ ret = cache_data_head_init(cache);
+ if (ret)
+ goto out;
+
+ goto again;
+ }
+
+out:
+ preempt_enable();
+
+ return ret;
+}
+
+static int cache_copy_from_req_bio(struct pcache_cache *cache, struct pcache_cache_key *key,
+ struct pcache_request *pcache_req, u32 bio_off)
+{
+ struct pcache_cache_pos *pos = &key->cache_pos;
+ struct pcache_segment *segment;
+
+ segment = &pos->cache_seg->segment;
+
+ return segment_copy_from_bio(segment, pos->seg_off, key->len, pcache_req->bio, bio_off);
+}
+
+static int cache_copy_to_req_bio(struct pcache_cache *cache, struct pcache_request *pcache_req,
+ u32 bio_off, u32 len, struct pcache_cache_pos *pos, u64 key_gen)
+{
+ struct pcache_cache_segment *cache_seg = pos->cache_seg;
+ struct pcache_segment *segment = &cache_seg->segment;
+ int ret;
+
+ spin_lock(&cache_seg->gen_lock);
+ if (key_gen < cache_seg->gen) {
+ spin_unlock(&cache_seg->gen_lock);
+ return -EINVAL;
+ }
+
+ ret = segment_copy_to_bio(segment, pos->seg_off, len, pcache_req->bio, bio_off);
+ spin_unlock(&cache_seg->gen_lock);
+
+ return ret;
+}
+
+/**
+ * miss_read_end_req - Handle the end of a miss read request.
+ * @backing_req: Pointer to the request structure.
+ * @read_ret: Return value of read.
+ *
+ * This function is called when a backing request to read data from
+ * the backing_dev is completed. If the key associated with the request
+ * is empty (a placeholder), it allocates cache space for the key,
+ * copies the data read from the bio into the cache, and updates
+ * the key's status. If the key has been overwritten by a write
+ * request during this process, it will be deleted from the cache
+ * tree and no further action will be taken.
+ */
+static void miss_read_end_req(struct pcache_backing_dev_req *backing_req, int read_ret)
+{
+ void *priv_data = backing_req->priv_data;
+ struct pcache_request *pcache_req = backing_req->req.upper_req;
+ struct pcache_cache *cache = backing_req->backing_dev->cache;
+ int ret;
+
+ if (priv_data) {
+ struct pcache_cache_key *key;
+ struct pcache_cache_subtree *cache_subtree;
+
+ key = (struct pcache_cache_key *)priv_data;
+ cache_subtree = key->cache_subtree;
+
+ /* if this key was deleted from cache_subtree by a write, key->flags should be cleared,
+ * so if cache_key_empty() return true, this key is still in cache_subtree
+ */
+ spin_lock(&cache_subtree->tree_lock);
+ if (cache_key_empty(key)) {
+ /* Check if the backing request was successful. */
+ if (read_ret) {
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ /* Allocate cache space for the key and copy data from the backing_dev. */
+ ret = cache_data_alloc(cache, key);
+ if (ret) {
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ ret = cache_copy_from_req_bio(cache, key, pcache_req, backing_req->req.bio_off);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+ key->flags &= ~PCACHE_CACHE_KEY_FLAGS_EMPTY;
+ key->flags |= PCACHE_CACHE_KEY_FLAGS_CLEAN;
+
+ /* Append the key to the cache. */
+ ret = cache_key_append(cache, key, false);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+ }
+unlock:
+ spin_unlock(&cache_subtree->tree_lock);
+ cache_key_put(key);
+ }
+}
+
+/**
+ * submit_cache_miss_req - Submit a backing request when cache data is missing
+ * @cache: The cache context that manages cache operations
+ * @backing_req: The cache request containing information about the read request
+ *
+ * This function is used to handle cases where a cache read request cannot locate
+ * the required data in the cache. When such a miss occurs during `cache_subtree_walk`,
+ * it triggers a backing read request to fetch data from the backing storage.
+ *
+ * If `pcache_req->priv_data` is set, it points to a `pcache_cache_key`, representing
+ * a new cache key to be inserted into the cache. The function calls `cache_key_insert`
+ * to attempt adding the key. On insertion failure, it releases the key reference and
+ * clears `priv_data` to avoid further processing.
+ */
+static void submit_cache_miss_req(struct pcache_cache *cache, struct pcache_backing_dev_req *backing_req)
+{
+ if (backing_req->priv_data) {
+ struct pcache_cache_key *key;
+
+ /* Attempt to insert the key into the cache if priv_data is set */
+ key = (struct pcache_cache_key *)backing_req->priv_data;
+ cache_key_insert(&cache->req_key_tree, key, true);
+ }
+ backing_dev_req_submit(backing_req, false);
+}
+
+static void cache_miss_req_free(struct pcache_backing_dev_req *backing_req)
+{
+ struct pcache_cache_key *key;
+
+ if (backing_req->priv_data) {
+ key = backing_req->priv_data;
+ backing_req->priv_data = NULL;
+ cache_key_put(key); /* for ->priv_data */
+ cache_key_put(key); /* for init ref in alloc */
+ }
+
+ backing_dev_req_end(backing_req);
+}
+
+static struct pcache_backing_dev_req *cache_miss_req_alloc(struct pcache_cache *cache,
+ struct pcache_request *parent,
+ gfp_t gfp_mask)
+{
+ struct pcache_backing_dev *backing_dev = cache->backing_dev;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_key *key = NULL;
+ struct pcache_backing_dev_req_opts req_opts = { 0 };
+
+ req_opts.type = BACKING_DEV_REQ_TYPE_REQ;
+ req_opts.gfp_mask = gfp_mask;
+ req_opts.req.upper_req = parent;
+
+ backing_req = backing_dev_req_alloc(backing_dev, &req_opts);
+ if (!backing_req)
+ return NULL;
+
+ key = cache_key_alloc(&cache->req_key_tree, gfp_mask);
+ if (!key)
+ goto free_backing_req;
+
+ cache_key_get(key);
+ backing_req->priv_data = key;
+
+ return backing_req;
+
+free_backing_req:
+ cache_miss_req_free(backing_req);
+ return NULL;
+}
+
+static void cache_miss_req_init(struct pcache_cache *cache,
+ struct pcache_backing_dev_req *backing_req,
+ struct pcache_request *parent,
+ u32 off, u32 len, bool insert_key)
+{
+ struct pcache_cache_key *key;
+ struct pcache_backing_dev_req_opts req_opts = { 0 };
+
+ req_opts.type = BACKING_DEV_REQ_TYPE_REQ;
+ req_opts.req.upper_req = parent;
+ req_opts.req.req_off = off;
+ req_opts.req.len = len;
+ req_opts.end_fn = miss_read_end_req;
+
+ backing_dev_req_init(backing_req, &req_opts);
+
+ if (insert_key) {
+ key = backing_req->priv_data;
+ key->off = parent->off + off;
+ key->len = len;
+ key->flags |= PCACHE_CACHE_KEY_FLAGS_EMPTY;
+ } else {
+ key = backing_req->priv_data;
+ backing_req->priv_data = NULL;
+ cache_key_put(key);
+ cache_key_put(key);
+ }
+}
+
+static struct pcache_backing_dev_req *get_pre_alloc_req(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_request *pcache_req = ctx->pcache_req;
+ struct pcache_backing_dev_req *backing_req;
+
+ if (ctx->pre_alloc_req) {
+ backing_req = ctx->pre_alloc_req;
+ ctx->pre_alloc_req = NULL;
+
+ return backing_req;
+ }
+
+ return cache_miss_req_alloc(cache, pcache_req, GFP_NOWAIT);
+}
+
+/*
+ * In the process of walking the cache tree to locate cached data, this
+ * function handles the situation where the requested data range lies
+ * entirely before an existing cache node (`key_tmp`). This outcome
+ * signifies that the target data is absent from the cache (cache miss).
+ *
+ * To fulfill this portion of the read request, the function creates a
+ * backing request (`backing_req`) for the missing data range represented
+ * by `key`. It then appends this request to the submission list in the
+ * `ctx`, which will later be processed to retrieve the data from backing
+ * storage. After setting up the backing request, `req_done` in `ctx` is
+ * updated to reflect the length of the handled range, and the range
+ * in `key` is adjusted by trimming off the portion that is now handled.
+ *
+ * The scenario handled here:
+ *
+ * |--------| key_tmp (existing cached range)
+ * |====| key (requested range, preceding key_tmp)
+ *
+ * Since `key` is before `key_tmp`, it signifies that the requested data
+ * range is missing in the cache (cache miss) and needs retrieval from
+ * backing storage.
+ */
+static int read_before(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+
+ /*
+ * In this scenario, `key` represents a range that precedes `key_tmp`,
+ * meaning the requested data range is missing from the cache tree
+ * and must be retrieved from the backing_dev.
+ */
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+ ctx->req_done += key->len;
+ cache_key_cutfront(key, key->len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * During cache_subtree_walk, this function manages a scenario where part of the
+ * requested data range overlaps with an existing cache node (`key_tmp`).
+ *
+ * |----------------| key_tmp (existing cached range)
+ * |===========| key (requested range, overlapping the tail of key_tmp)
+ */
+static int read_overlap_tail(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ u32 io_len;
+ int ret;
+
+ /*
+ * Calculate the length of the non-overlapping portion of `key`
+ * before `key_tmp`, representing the data missing in the cache.
+ */
+ io_len = cache_key_lstart(key_tmp) - cache_key_lstart(key);
+ if (io_len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+ }
+
+ /*
+ * Handle the overlapping portion by calculating the length of
+ * the remaining data in `key` that coincides with `key_tmp`.
+ */
+ io_len = cache_key_lend(key) - cache_key_lstart(key_tmp);
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &key_tmp->cache_pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |----| key_tmp (existing cached range)
+ * |==========| key (requested range)
+ */
+static int read_overlap_contain(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ u32 io_len;
+ int ret;
+
+ /*
+ * Calculate the non-overlapping part of `key` before `key_tmp`
+ * to identify the missing data length.
+ */
+ io_len = cache_key_lstart(key_tmp) - cache_key_lstart(key);
+ if (io_len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, true);
+
+ list_add(&backing_req->node, ctx->submit_req_list);
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+ }
+
+ /*
+ * Handle the overlapping portion between `key` and `key_tmp`.
+ */
+ io_len = key_tmp->len;
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &key_tmp->cache_pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |-----------| key_tmp (existing cached range)
+ * |====| key (requested range, fully within key_tmp)
+ *
+ * If `key_tmp` contains valid cached data, this function copies the relevant
+ * portion to the request's bio. Otherwise, it sends a backing request to
+ * fetch the required data range.
+ */
+static int read_overlap_contained(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_pos pos;
+ int ret;
+
+ /*
+ * Check if `key_tmp` is empty, indicating a miss. If so, initiate
+ * a backing request to fetch the required data for `key`.
+ */
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ cache_pos_copy(&pos, &key_tmp->cache_pos);
+ cache_pos_advance(&pos, cache_key_lstart(key) - cache_key_lstart(key_tmp));
+
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ key->len, &pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += key->len;
+ cache_key_cutfront(key, key->len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * |--------| key_tmp (existing cached range)
+ * |==========| key (requested range, overlapping the head of key_tmp)
+ */
+static int read_overlap_head(struct pcache_cache_key *key, struct pcache_cache_key *key_tmp,
+ struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req;
+ struct pcache_cache_pos pos;
+ u32 io_len;
+ int ret;
+
+ io_len = cache_key_lend(key_tmp) - cache_key_lstart(key);
+
+ if (cache_key_empty(key_tmp)) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, io_len, false);
+ submit_cache_miss_req(cache, backing_req);
+ } else {
+ cache_pos_copy(&pos, &key_tmp->cache_pos);
+ cache_pos_advance(&pos, cache_key_lstart(key) - cache_key_lstart(key_tmp));
+
+ ret = cache_copy_to_req_bio(ctx->cache_tree->cache, ctx->pcache_req, ctx->req_done,
+ io_len, &pos, key_tmp->seg_gen);
+ if (ret) {
+ if (ret == -EINVAL) {
+ cache_key_delete(key_tmp);
+ return SUBTREE_WALK_RET_RESEARCH;
+ }
+
+ ctx->ret = ret;
+ return SUBTREE_WALK_RET_ERR;
+ }
+ }
+
+ ctx->req_done += io_len;
+ cache_key_cutfront(key, io_len);
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/**
+ * read_walk_finally - Finalizes the cache read tree walk by submitting any
+ * remaining backing requests
+ * @ctx: Context structure holding information about the cache,
+ * read request, and submission list
+ * @ret: the return value after this walk.
+ *
+ * This function is called at the end of the `cache_subtree_walk` during a
+ * cache read operation. It completes the walk by checking if any data
+ * requested by `key` was not found in the cache tree, and if so, it sends
+ * a backing request to retrieve that data. Then, it iterates through the
+ * submission list of backing requests created during the walk, removing
+ * each request from the list and submitting it.
+ *
+ * The scenario managed here includes:
+ * - Sending a backing request for the remaining length of `key` if it was
+ * not fulfilled by existing cache entries.
+ * - Iterating through `ctx->submit_req_list` to submit each backing request
+ * enqueued during the walk.
+ *
+ * This ensures all necessary backing requests for cache misses are submitted
+ * to the backing storage to retrieve any data that could not be found in
+ * the cache.
+ */
+static int read_walk_finally(struct pcache_cache_subtree_walk_ctx *ctx, int ret)
+{
+ struct pcache_cache *cache = ctx->cache_tree->cache;
+ struct pcache_backing_dev_req *backing_req, *next_req;
+ struct pcache_cache_key *key = ctx->key;
+
+ list_for_each_entry_safe(backing_req, next_req, ctx->submit_req_list, node) {
+ list_del_init(&backing_req->node);
+ submit_cache_miss_req(ctx->cache_tree->cache, backing_req);
+ }
+
+ if (ret != SUBTREE_WALK_RET_OK)
+ return ret;
+
+ if (key->len) {
+ backing_req = get_pre_alloc_req(ctx);
+ if (!backing_req)
+ return SUBTREE_WALK_RET_NEED_REQ;
+
+ cache_miss_req_init(cache, backing_req, ctx->pcache_req, ctx->req_done, key->len, true);
+ submit_cache_miss_req(cache, backing_req);
+ ctx->req_done += key->len;
+ }
+
+ return SUBTREE_WALK_RET_OK;
+}
+
+/*
+ * This function is used within `cache_subtree_walk` to determine whether the
+ * read operation has covered the requested data length. It compares the
+ * amount of data processed (`ctx->req_done`) with the total data length
+ * specified in the original request (`ctx->pcache_req->data_len`).
+ *
+ * If `req_done` meets or exceeds the required data length, the function
+ * returns `true`, indicating the walk is complete. Otherwise, it returns `false`,
+ * signaling that additional data processing is needed to fulfill the request.
+ */
+static bool read_walk_done(struct pcache_cache_subtree_walk_ctx *ctx)
+{
+ return (ctx->req_done >= ctx->pcache_req->data_len);
+}
+
+/**
+ * cache_read - Process a read request by traversing the cache tree
+ * @cache: Cache structure holding cache trees and related configurations
+ * @pcache_req: Request structure with information about the data to read
+ *
+ * This function attempts to fulfill a read request by traversing the cache tree(s)
+ * to locate cached data for the requested range. If parts of the data are missing
+ * in the cache, backing requests are generated to retrieve the required segments.
+ *
+ * The function operates by initializing a key for the requested data range and
+ * preparing a context (`walk_ctx`) to manage the cache tree traversal. The context
+ * includes pointers to functions (e.g., `read_before`, `read_overlap_tail`) that handle
+ * specific conditions encountered during the traversal. The `walk_finally` and `walk_done`
+ * functions manage the end stages of the traversal, while the `delete_key_list` and
+ * `submit_req_list` lists track any keys to be deleted or requests to be submitted.
+ *
+ * The function first calculates the requested range and checks if it fits within the
+ * current cache tree (based on the tree's size limits). It then locks the cache tree
+ * and performs a search to locate any matching keys. If there are outdated keys,
+ * these are deleted, and the search is restarted to ensure accurate data retrieval.
+ *
+ * If the requested range spans multiple cache trees, the function moves on to the
+ * next tree once the current range has been processed. This continues until the
+ * entire requested data length has been handled.
+ */
+static int cache_read(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct pcache_cache_key key_data = { .off = pcache_req->off, .len = pcache_req->data_len };
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key_tmp = NULL, *key_next;
+ struct rb_node *prev_node = NULL;
+ struct pcache_cache_key *key = &key_data;
+ struct pcache_cache_subtree_walk_ctx walk_ctx = { 0 };
+ struct pcache_backing_dev_req *backing_req, *next_req;
+ LIST_HEAD(delete_key_list);
+ LIST_HEAD(submit_req_list);
+ int ret;
+
+ walk_ctx.cache_tree = &cache->req_key_tree;
+ walk_ctx.req_done = 0;
+ walk_ctx.pcache_req = pcache_req;
+ walk_ctx.before = read_before;
+ walk_ctx.overlap_tail = read_overlap_tail;
+ walk_ctx.overlap_head = read_overlap_head;
+ walk_ctx.overlap_contain = read_overlap_contain;
+ walk_ctx.overlap_contained = read_overlap_contained;
+ walk_ctx.walk_finally = read_walk_finally;
+ walk_ctx.walk_done = read_walk_done;
+ walk_ctx.delete_key_list = &delete_key_list;
+ walk_ctx.submit_req_list = &submit_req_list;
+
+next:
+ key->off = pcache_req->off + walk_ctx.req_done;
+ key->len = pcache_req->data_len - walk_ctx.req_done;
+ if (key->len > PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK))
+ key->len = PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK);
+
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+search:
+ prev_node = cache_subtree_search(cache_subtree, key, NULL, NULL, &delete_key_list);
+ if (!list_empty(&delete_key_list)) {
+ list_for_each_entry_safe(key_tmp, key_next, &delete_key_list, list_node) {
+ list_del_init(&key_tmp->list_node);
+ cache_key_delete(key_tmp);
+ }
+ goto search;
+ }
+
+ walk_ctx.start_node = prev_node;
+ walk_ctx.key = key;
+
+ ret = cache_subtree_walk(&walk_ctx);
+ if (ret == SUBTREE_WALK_RET_RESEARCH)
+ goto search;
+ spin_unlock(&cache_subtree->tree_lock);
+
+ if (ret == SUBTREE_WALK_RET_ERR) {
+ ret = walk_ctx.ret;
+ goto out;
+ }
+
+ if (ret == SUBTREE_WALK_RET_NEED_REQ) {
+ walk_ctx.pre_alloc_req = cache_miss_req_alloc(cache, pcache_req, GFP_NOIO);
+ pcache_dev_debug(CACHE_TO_PCACHE(cache), "allocate pre_alloc_req with GFP_NOIO");
+ }
+
+ if (walk_ctx.req_done < pcache_req->data_len)
+ goto next;
+ ret = 0;
+out:
+ if (walk_ctx.pre_alloc_req)
+ cache_miss_req_free(walk_ctx.pre_alloc_req);
+
+ list_for_each_entry_safe(backing_req, next_req, &submit_req_list, node) {
+ list_del_init(&backing_req->node);
+ backing_dev_req_end(backing_req);
+ }
+
+ return ret;
+}
+
+static int cache_write(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ u64 offset = pcache_req->off;
+ u32 length = pcache_req->data_len;
+ u32 io_done = 0;
+ int ret;
+
+ while (true) {
+ if (io_done >= length)
+ break;
+
+ key = cache_key_alloc(&cache->req_key_tree, GFP_NOIO);
+ key->off = offset + io_done;
+ key->len = length - io_done;
+ if (key->len > PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK))
+ key->len = PCACHE_CACHE_SUBTREE_SIZE - (key->off & PCACHE_CACHE_SUBTREE_SIZE_MASK);
+
+ ret = cache_data_alloc(cache, key);
+ if (ret) {
+ cache_key_put(key);
+ goto err;
+ }
+
+ ret = cache_copy_from_req_bio(cache, key, pcache_req, io_done);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_put(key);
+ goto err;
+ }
+
+ cache_subtree = get_subtree(&cache->req_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->req_key_tree, key, true);
+ ret = cache_key_append(cache, key, pcache_req->bio->bi_opf & REQ_FUA);
+ if (ret) {
+ cache_seg_put(key->cache_pos.cache_seg);
+ cache_key_delete(key);
+ goto unlock;
+ }
+
+ io_done += key->len;
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+unlock:
+ spin_unlock(&cache_subtree->tree_lock);
+err:
+ return ret;
+}
+
+/**
+ * pcache_cache_flush - Flush all ksets to persist any pending cache data
+ * @cache: Pointer to the cache structure
+ *
+ * This function iterates through all ksets associated with the provided `cache`
+ * and ensures that any data marked for persistence is written to media. For each
+ * kset, it acquires the kset lock, then invokes `cache_kset_close`, which handles
+ * the persistence logic for that kset.
+ *
+ * If `cache_kset_close` encounters an error, the function exits immediately with
+ * the respective error code, preventing the flush operation from proceeding to
+ * subsequent ksets.
+ */
+int pcache_cache_flush(struct pcache_cache *cache)
+{
+ struct pcache_cache_kset *kset;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < cache->n_ksets; i++) {
+ kset = get_kset(cache, i);
+
+ spin_lock(&kset->kset_lock);
+ ret = cache_kset_close(cache, kset);
+ spin_unlock(&kset->kset_lock);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req)
+{
+ struct bio *bio = pcache_req->bio;
+
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH))
+ return pcache_cache_flush(cache);
+
+ if (bio_data_dir(bio) == READ)
+ return cache_read(cache, pcache_req);
+
+ return cache_write(cache, pcache_req);
+}
diff --git a/drivers/md/dm-pcache/cache_segment.c b/drivers/md/dm-pcache/cache_segment.c
new file mode 100644
index 000000000000..f0b58980806e
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_segment.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "cache_dev.h"
+#include "cache.h"
+#include "backing_dev.h"
+#include "dm_pcache.h"
+
+static inline struct pcache_segment_info *get_seg_info_addr(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *seg_info_addr;
+ u32 seg_id = cache_seg->segment.seg_id;
+ void *seg_addr;
+
+ seg_addr = CACHE_DEV_SEGMENT(cache_seg->cache->cache_dev, seg_id);
+ seg_info_addr = seg_addr + PCACHE_SEG_INFO_SIZE * cache_seg->info_index;
+
+ return seg_info_addr;
+}
+
+static void cache_seg_info_write(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *seg_info_addr;
+ struct pcache_segment_info *seg_info = &cache_seg->cache_seg_info;
+
+ mutex_lock(&cache_seg->info_lock);
+ seg_info->header.seq++;
+ seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info));
+
+ seg_info_addr = get_seg_info_addr(cache_seg);
+ memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info));
+ pmem_wmb();
+
+ cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX;
+ mutex_unlock(&cache_seg->info_lock);
+}
+
+static int cache_seg_info_load(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_segment_info *cache_seg_info_addr_base, *cache_seg_info_addr;
+ struct pcache_cache_dev *cache_dev = cache_seg->cache->cache_dev;
+ struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
+ u32 seg_id = cache_seg->segment.seg_id;
+ int ret = 0;
+
+ cache_seg_info_addr_base = CACHE_DEV_SEGMENT(cache_dev, seg_id);
+
+ mutex_lock(&cache_seg->info_lock);
+ cache_seg_info_addr = pcache_meta_find_latest(&cache_seg_info_addr_base->header,
+ sizeof(struct pcache_segment_info),
+ PCACHE_SEG_INFO_SIZE,
+ &cache_seg->cache_seg_info);
+ if (IS_ERR(cache_seg_info_addr)) {
+ ret = PTR_ERR(cache_seg_info_addr);
+ goto out;
+ } else if (!cache_seg_info_addr) {
+ ret = -EIO;
+ goto out;
+ }
+ cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base;
+out:
+ mutex_unlock(&cache_seg->info_lock);
+
+ if (ret)
+ pcache_dev_err(pcache, "can't read segment info of segment: %u, ret: %d\n",
+ cache_seg->segment.seg_id, ret);
+ return ret;
+}
+
+static int cache_seg_ctrl_load(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
+ struct pcache_cache_seg_gen cache_seg_gen, *cache_seg_gen_addr;
+ int ret = 0;
+
+ cache_seg_gen_addr = pcache_meta_find_latest(&cache_seg_ctrl->gen->header,
+ sizeof(struct pcache_cache_seg_gen),
+ sizeof(struct pcache_cache_seg_gen),
+ &cache_seg_gen);
+ if (IS_ERR(cache_seg_gen_addr)) {
+ ret = PTR_ERR(cache_seg_gen_addr);
+ goto out;
+ }
+
+ if (!cache_seg_gen_addr) {
+ cache_seg->gen = 0;
+ cache_seg->gen_seq = 0;
+ cache_seg->gen_index = 0;
+ goto out;
+ }
+
+ cache_seg->gen = cache_seg_gen.gen;
+ cache_seg->gen_seq = cache_seg_gen.header.seq;
+ cache_seg->gen_index = (cache_seg_gen_addr - cache_seg_ctrl->gen);
+out:
+
+ return ret;
+}
+
+static inline struct pcache_cache_seg_gen *get_cache_seg_gen_addr(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
+
+ return (cache_seg_ctrl->gen + cache_seg->gen_index);
+}
+
+/*
+ * cache_seg_ctrl_write - write cache segment control information
+ * @seg: the cache segment to update
+ *
+ * This function writes the control information of a cache segment to media.
+ *
+ * Although this updates shared control data, we intentionally do not use
+ * any locking here. All accesses to control information are single-threaded:
+ *
+ * - All reads occur during the init phase, where no concurrent writes
+ * can happen.
+ * - Writes happen once during init and once when the last reference
+ * to the segment is dropped in cache_seg_put().
+ *
+ * Both cases are guaranteed to be single-threaded, so there is no risk
+ * of concurrent read/write races.
+ */
+static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache_seg_gen cache_seg_gen;
+
+ cache_seg_gen.gen = cache_seg->gen;
+ cache_seg_gen.header.seq = ++cache_seg->gen_seq;
+ cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header,
+ sizeof(struct pcache_cache_seg_gen));
+
+ memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen));
+ pmem_wmb();
+
+ cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX;
+}
+
+static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg)
+{
+ cache_seg->gen = 0;
+ cache_seg->gen_seq = 0;
+ cache_seg->gen_index = 0;
+ cache_seg_ctrl_write(cache_seg);
+}
+
+static int cache_seg_meta_load(struct pcache_cache_segment *cache_seg)
+{
+ int ret;
+
+ ret = cache_seg_info_load(cache_seg);
+ if (ret)
+ goto err;
+
+ ret = cache_seg_ctrl_load(cache_seg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ return ret;
+}
+
+/**
+ * cache_seg_set_next_seg - Sets the ID of the next segment
+ * @cache_seg: Pointer to the cache segment structure.
+ * @seg_id: The segment ID to set as the next segment.
+ *
+ * A pcache_cache allocates multiple cache segments, which are linked together
+ * through next_seg. When loading a pcache_cache, the first cache segment can
+ * be found using cache->seg_id, which allows access to all the cache segments.
+ */
+void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id)
+{
+ cache_seg->cache_seg_info.flags |= PCACHE_SEG_INFO_FLAGS_HAS_NEXT;
+ cache_seg->cache_seg_info.next_seg = seg_id;
+ cache_seg_info_write(cache_seg);
+}
+
+int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
+ bool new_cache)
+{
+ struct pcache_cache_dev *cache_dev = cache->cache_dev;
+ struct pcache_cache_segment *cache_seg = &cache->segments[cache_seg_id];
+ struct pcache_segment_init_options seg_options = { 0 };
+ struct pcache_segment *segment = &cache_seg->segment;
+ int ret;
+
+ cache_seg->cache = cache;
+ cache_seg->cache_seg_id = cache_seg_id;
+ spin_lock_init(&cache_seg->gen_lock);
+ atomic_set(&cache_seg->refs, 0);
+ mutex_init(&cache_seg->info_lock);
+
+ /* init pcache_segment */
+ seg_options.type = PCACHE_SEGMENT_TYPE_CACHE_DATA;
+ seg_options.data_off = PCACHE_CACHE_SEG_CTRL_OFF + PCACHE_CACHE_SEG_CTRL_SIZE;
+ seg_options.seg_id = seg_id;
+ seg_options.seg_info = &cache_seg->cache_seg_info;
+ pcache_segment_init(cache_dev, segment, &seg_options);
+
+ cache_seg->cache_seg_ctrl = CACHE_DEV_SEGMENT(cache_dev, seg_id) + PCACHE_CACHE_SEG_CTRL_OFF;
+
+ if (new_cache) {
+ cache_dev_zero_range(cache_dev, CACHE_DEV_SEGMENT(cache_dev, seg_id),
+ PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX +
+ PCACHE_CACHE_SEG_CTRL_SIZE);
+
+ cache_seg_ctrl_init(cache_seg);
+
+ cache_seg->info_index = 0;
+ cache_seg_info_write(cache_seg);
+
+ /* clear outdated kset in segment */
+ memcpy_flushcache(segment->data, &pcache_empty_kset, sizeof(struct pcache_cache_kset_onmedia));
+ pmem_wmb();
+ } else {
+ ret = cache_seg_meta_load(cache_seg);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+/**
+ * get_cache_segment - Retrieves a free cache segment from the cache.
+ * @cache: Pointer to the cache structure.
+ *
+ * This function attempts to find a free cache segment that can be used.
+ * It locks the segment map and checks for the next available segment ID.
+ * If a free segment is found, it initializes it and returns a pointer to the
+ * cache segment structure. Returns NULL if no segments are available.
+ */
+struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache)
+{
+ struct pcache_cache_segment *cache_seg;
+ u32 seg_id;
+
+ spin_lock(&cache->seg_map_lock);
+again:
+ seg_id = find_next_zero_bit(cache->seg_map, cache->n_segs, cache->last_cache_seg);
+ if (seg_id == cache->n_segs) {
+ /* reset the hint of ->last_cache_seg and retry */
+ if (cache->last_cache_seg) {
+ cache->last_cache_seg = 0;
+ goto again;
+ }
+ cache->cache_full = true;
+ spin_unlock(&cache->seg_map_lock);
+ return NULL;
+ }
+
+ /*
+ * found an available cache_seg, mark it used in seg_map
+ * and update the search hint ->last_cache_seg
+ */
+ __set_bit(seg_id, cache->seg_map);
+ cache->last_cache_seg = seg_id;
+ spin_unlock(&cache->seg_map_lock);
+
+ cache_seg = &cache->segments[seg_id];
+ cache_seg->cache_seg_id = seg_id;
+
+ return cache_seg;
+}
+
+static void cache_seg_gen_increase(struct pcache_cache_segment *cache_seg)
+{
+ spin_lock(&cache_seg->gen_lock);
+ cache_seg->gen++;
+ spin_unlock(&cache_seg->gen_lock);
+
+ cache_seg_ctrl_write(cache_seg);
+}
+
+void cache_seg_get(struct pcache_cache_segment *cache_seg)
+{
+ atomic_inc(&cache_seg->refs);
+}
+
+static void cache_seg_invalidate(struct pcache_cache_segment *cache_seg)
+{
+ struct pcache_cache *cache;
+
+ cache = cache_seg->cache;
+ cache_seg_gen_increase(cache_seg);
+
+ spin_lock(&cache->seg_map_lock);
+ if (cache->cache_full)
+ cache->cache_full = false;
+ __clear_bit(cache_seg->cache_seg_id, cache->seg_map);
+ spin_unlock(&cache->seg_map_lock);
+
+ pcache_defer_reqs_kick(CACHE_TO_PCACHE(cache));
+ /* clean_work will clean the bad key in key_tree*/
+ queue_work(cache_get_wq(cache), &cache->clean_work);
+}
+
+void cache_seg_put(struct pcache_cache_segment *cache_seg)
+{
+ if (atomic_dec_and_test(&cache_seg->refs))
+ cache_seg_invalidate(cache_seg);
+}
diff --git a/drivers/md/dm-pcache/cache_writeback.c b/drivers/md/dm-pcache/cache_writeback.c
new file mode 100644
index 000000000000..87a82b3fe836
--- /dev/null
+++ b/drivers/md/dm-pcache/cache_writeback.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/bio.h>
+
+#include "cache.h"
+#include "backing_dev.h"
+#include "cache_dev.h"
+#include "dm_pcache.h"
+
+static void writeback_ctx_end(struct pcache_cache *cache, int ret)
+{
+ if (ret && !cache->writeback_ctx.ret) {
+ pcache_dev_err(CACHE_TO_PCACHE(cache), "writeback error: %d", ret);
+ cache->writeback_ctx.ret = ret;
+ }
+
+ if (!atomic_dec_and_test(&cache->writeback_ctx.pending))
+ return;
+
+ if (!cache->writeback_ctx.ret) {
+ backing_dev_flush(cache->backing_dev);
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_advance(&cache->dirty_tail, cache->writeback_ctx.advance);
+ cache_encode_dirty_tail(cache);
+ mutex_unlock(&cache->dirty_tail_lock);
+ }
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, 0);
+}
+
+static void writeback_end_req(struct pcache_backing_dev_req *backing_req, int ret)
+{
+ struct pcache_cache *cache = backing_req->priv_data;
+
+ mutex_lock(&cache->writeback_lock);
+ writeback_ctx_end(cache, ret);
+ mutex_unlock(&cache->writeback_lock);
+}
+
+static inline bool is_cache_clean(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 to_copy;
+ void *addr;
+ int ret;
+
+ addr = cache_pos_addr(dirty_tail);
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->wb_kset_onmedia_buf;
+
+ to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - dirty_tail->seg_off);
+ ret = copy_mc_to_kernel(kset_onmedia, addr, to_copy);
+ if (ret) {
+ pcache_dev_err(pcache, "error to read kset: %d", ret);
+ return true;
+ }
+
+ /* Check if the magic number matches the expected value */
+ if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
+ pcache_dev_debug(pcache, "dirty_tail: %u:%u magic: %llx, not expected: %llx\n",
+ dirty_tail->cache_seg->cache_seg_id, dirty_tail->seg_off,
+ kset_onmedia->magic, PCACHE_KSET_MAGIC);
+ return true;
+ }
+
+ /* Verify the CRC checksum for data integrity */
+ if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
+ pcache_dev_debug(pcache, "dirty_tail: %u:%u crc: %x, not expected: %x\n",
+ dirty_tail->cache_seg->cache_seg_id, dirty_tail->seg_off,
+ cache_kset_crc(kset_onmedia), kset_onmedia->crc);
+ return true;
+ }
+
+ return false;
+}
+
+void cache_writeback_exit(struct pcache_cache *cache)
+{
+ cancel_delayed_work_sync(&cache->writeback_work);
+ backing_dev_flush(cache->backing_dev);
+ cache_tree_exit(&cache->writeback_key_tree);
+}
+
+int cache_writeback_init(struct pcache_cache *cache)
+{
+ int ret;
+
+ ret = cache_tree_init(cache, &cache->writeback_key_tree, 1);
+ if (ret)
+ goto err;
+
+ atomic_set(&cache->writeback_ctx.pending, 0);
+
+ /* Queue delayed work to start writeback handling */
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, 0);
+
+ return 0;
+err:
+ return ret;
+}
+
+static void cache_key_writeback(struct pcache_cache *cache, struct pcache_cache_key *key)
+{
+ struct pcache_backing_dev_req *writeback_req;
+ struct pcache_backing_dev_req_opts writeback_req_opts = { 0 };
+ struct pcache_cache_pos *pos;
+ void *addr;
+ u32 seg_remain, req_len, done = 0;
+
+ if (cache_key_clean(key))
+ return;
+
+ pos = &key->cache_pos;
+
+ seg_remain = cache_seg_remain(pos);
+ BUG_ON(seg_remain < key->len);
+next_req:
+ addr = cache_pos_addr(pos) + done;
+ req_len = backing_dev_req_coalesced_max_len(addr, key->len - done);
+
+ writeback_req_opts.type = BACKING_DEV_REQ_TYPE_KMEM;
+ writeback_req_opts.gfp_mask = GFP_NOIO;
+ writeback_req_opts.end_fn = writeback_end_req;
+ writeback_req_opts.priv_data = cache;
+
+ writeback_req_opts.kmem.data = addr;
+ writeback_req_opts.kmem.opf = REQ_OP_WRITE;
+ writeback_req_opts.kmem.len = req_len;
+ writeback_req_opts.kmem.backing_off = key->off + done;
+
+ writeback_req = backing_dev_req_create(cache->backing_dev, &writeback_req_opts);
+
+ atomic_inc(&cache->writeback_ctx.pending);
+ backing_dev_req_submit(writeback_req, true);
+
+ done += req_len;
+ if (done < key->len)
+ goto next_req;
+}
+
+static void cache_wb_tree_writeback(struct pcache_cache *cache, u32 advance)
+{
+ struct pcache_cache_tree *cache_tree = &cache->writeback_key_tree;
+ struct pcache_cache_subtree *cache_subtree;
+ struct rb_node *node;
+ struct pcache_cache_key *key;
+ u32 i;
+
+ cache->writeback_ctx.ret = 0;
+ cache->writeback_ctx.advance = advance;
+ atomic_set(&cache->writeback_ctx.pending, 1);
+
+ for (i = 0; i < cache_tree->n_subtrees; i++) {
+ cache_subtree = &cache_tree->subtrees[i];
+
+ node = rb_first(&cache_subtree->root);
+ while (node) {
+ key = CACHE_KEY(node);
+ node = rb_next(node);
+
+ cache_key_writeback(cache, key);
+ cache_key_delete(key);
+ }
+ }
+ writeback_ctx_end(cache, 0);
+}
+
+static int cache_kset_insert_tree(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
+{
+ struct pcache_cache_key_onmedia *key_onmedia;
+ struct pcache_cache_subtree *cache_subtree;
+ struct pcache_cache_key *key;
+ int ret;
+ u32 i;
+
+ /* Iterate through all keys in the kset and write each back to storage */
+ for (i = 0; i < kset_onmedia->key_num; i++) {
+ key_onmedia = &kset_onmedia->data[i];
+
+ key = cache_key_alloc(&cache->writeback_key_tree, GFP_NOIO);
+ ret = cache_key_decode(cache, key_onmedia, key);
+ if (ret) {
+ cache_key_put(key);
+ goto clear_tree;
+ }
+
+ cache_subtree = get_subtree(&cache->writeback_key_tree, key->off);
+ spin_lock(&cache_subtree->tree_lock);
+ cache_key_insert(&cache->writeback_key_tree, key, true);
+ spin_unlock(&cache_subtree->tree_lock);
+ }
+
+ return 0;
+clear_tree:
+ cache_tree_clear(&cache->writeback_key_tree);
+ return ret;
+}
+
+static void last_kset_writeback(struct pcache_cache *cache,
+ struct pcache_cache_kset_onmedia *last_kset_onmedia)
+{
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_segment *next_seg;
+
+ pcache_dev_debug(pcache, "last kset, next: %u\n", last_kset_onmedia->next_cache_seg_id);
+
+ next_seg = &cache->segments[last_kset_onmedia->next_cache_seg_id];
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache->dirty_tail.cache_seg = next_seg;
+ cache->dirty_tail.seg_off = 0;
+ cache_encode_dirty_tail(cache);
+ mutex_unlock(&cache->dirty_tail_lock);
+}
+
+void cache_writeback_fn(struct work_struct *work)
+{
+ struct pcache_cache *cache = container_of(work, struct pcache_cache, writeback_work.work);
+ struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
+ struct pcache_cache_pos dirty_tail;
+ struct pcache_cache_kset_onmedia *kset_onmedia;
+ u32 delay;
+ int ret;
+
+ mutex_lock(&cache->writeback_lock);
+ if (atomic_read(&cache->writeback_ctx.pending))
+ goto unlock;
+
+ if (pcache_is_stopping(pcache))
+ goto unlock;
+
+ kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->wb_kset_onmedia_buf;
+
+ mutex_lock(&cache->dirty_tail_lock);
+ cache_pos_copy(&dirty_tail, &cache->dirty_tail);
+ mutex_unlock(&cache->dirty_tail_lock);
+
+ if (is_cache_clean(cache, &dirty_tail)) {
+ delay = PCACHE_CACHE_WRITEBACK_INTERVAL;
+ goto queue_work;
+ }
+
+ if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
+ last_kset_writeback(cache, kset_onmedia);
+ delay = 0;
+ goto queue_work;
+ }
+
+ ret = cache_kset_insert_tree(cache, kset_onmedia);
+ if (ret) {
+ delay = PCACHE_CACHE_WRITEBACK_INTERVAL;
+ goto queue_work;
+ }
+
+ cache_wb_tree_writeback(cache, get_kset_onmedia_size(kset_onmedia));
+ delay = 0;
+queue_work:
+ queue_delayed_work(cache_get_wq(cache), &cache->writeback_work, delay);
+unlock:
+ mutex_unlock(&cache->writeback_lock);
+}
diff --git a/drivers/md/dm-pcache/dm_pcache.c b/drivers/md/dm-pcache/dm_pcache.c
new file mode 100644
index 000000000000..e5f5936fa6f0
--- /dev/null
+++ b/drivers/md/dm-pcache/dm_pcache.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+
+#include "../dm-core.h"
+#include "cache_dev.h"
+#include "backing_dev.h"
+#include "cache.h"
+#include "dm_pcache.h"
+
+void pcache_defer_reqs_kick(struct dm_pcache *pcache)
+{
+ struct pcache_cache *cache = &pcache->cache;
+
+ spin_lock(&cache->seg_map_lock);
+ if (!cache->cache_full)
+ queue_work(pcache->task_wq, &pcache->defered_req_work);
+ spin_unlock(&cache->seg_map_lock);
+}
+
+static void defer_req(struct pcache_request *pcache_req)
+{
+ struct dm_pcache *pcache = pcache_req->pcache;
+
+ BUG_ON(!list_empty(&pcache_req->list_node));
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_add(&pcache_req->list_node, &pcache->defered_req_list);
+ pcache_defer_reqs_kick(pcache);
+ spin_unlock(&pcache->defered_req_list_lock);
+}
+
+static void defered_req_fn(struct work_struct *work)
+{
+ struct dm_pcache *pcache = container_of(work, struct dm_pcache, defered_req_work);
+ struct pcache_request *pcache_req;
+ LIST_HEAD(tmp_list);
+ int ret;
+
+ if (pcache_is_stopping(pcache))
+ return;
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_splice_init(&pcache->defered_req_list, &tmp_list);
+ spin_unlock(&pcache->defered_req_list_lock);
+
+ while (!list_empty(&tmp_list)) {
+ pcache_req = list_first_entry(&tmp_list,
+ struct pcache_request, list_node);
+ list_del_init(&pcache_req->list_node);
+ pcache_req->ret = 0;
+ ret = pcache_cache_handle_req(&pcache->cache, pcache_req);
+ if (ret == -EBUSY)
+ defer_req(pcache_req);
+ else
+ pcache_req_put(pcache_req, ret);
+ }
+}
+
+void pcache_req_get(struct pcache_request *pcache_req)
+{
+ kref_get(&pcache_req->ref);
+}
+
+static void end_req(struct kref *ref)
+{
+ struct pcache_request *pcache_req = container_of(ref, struct pcache_request, ref);
+ struct dm_pcache *pcache = pcache_req->pcache;
+ struct bio *bio = pcache_req->bio;
+ int ret = pcache_req->ret;
+
+ if (ret == -EBUSY) {
+ pcache_req_get(pcache_req);
+ defer_req(pcache_req);
+ } else {
+ bio->bi_status = errno_to_blk_status(ret);
+ bio_endio(bio);
+
+ if (atomic_dec_and_test(&pcache->inflight_reqs))
+ wake_up(&pcache->inflight_wq);
+ }
+}
+
+void pcache_req_put(struct pcache_request *pcache_req, int ret)
+{
+ /* Set the return status if it is not already set */
+ if (ret && !pcache_req->ret)
+ pcache_req->ret = ret;
+
+ kref_put(&pcache_req->ref, end_req);
+}
+
+static bool at_least_one_arg(struct dm_arg_set *as, char **error)
+{
+ if (!as->argc) {
+ *error = "Insufficient args";
+ return false;
+ }
+
+ return true;
+}
+
+static int parse_cache_dev(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ int ret;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+ ret = dm_get_device(pcache->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE,
+ &pcache->cache_dev.dm_dev);
+ if (ret) {
+ *error = "Error opening cache device";
+ return ret;
+ }
+
+ return 0;
+}
+
+static int parse_backing_dev(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ int ret;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ ret = dm_get_device(pcache->ti, dm_shift_arg(as),
+ BLK_OPEN_READ | BLK_OPEN_WRITE,
+ &pcache->backing_dev.dm_dev);
+ if (ret) {
+ *error = "Error opening backing device";
+ return ret;
+ }
+
+ return 0;
+}
+
+static void pcache_init_opts(struct pcache_cache_options *opts)
+{
+ opts->cache_mode = PCACHE_CACHE_MODE_WRITEBACK;
+ opts->data_crc = false;
+}
+
+static int parse_cache_opts(struct dm_pcache *pcache, struct dm_arg_set *as,
+ char **error)
+{
+ struct pcache_cache_options *opts = &pcache->opts;
+ static const struct dm_arg _args[] = {
+ {0, 4, "Invalid number of cache option arguments"},
+ };
+ unsigned int argc;
+ const char *arg;
+ int ret;
+
+ pcache_init_opts(opts);
+ if (!as->argc)
+ return 0;
+
+ ret = dm_read_arg_group(_args, as, &argc, error);
+ if (ret)
+ return -EINVAL;
+
+ while (argc) {
+ arg = dm_shift_arg(as);
+ argc--;
+
+ if (!strcmp(arg, "cache_mode")) {
+ arg = dm_shift_arg(as);
+ if (!strcmp(arg, "writeback")) {
+ opts->cache_mode = PCACHE_CACHE_MODE_WRITEBACK;
+ } else {
+ *error = "Invalid cache mode parameter";
+ return -EINVAL;
+ }
+ argc--;
+ } else if (!strcmp(arg, "data_crc")) {
+ arg = dm_shift_arg(as);
+ if (!strcmp(arg, "true")) {
+ opts->data_crc = true;
+ } else if (!strcmp(arg, "false")) {
+ opts->data_crc = false;
+ } else {
+ *error = "Invalid data crc parameter";
+ return -EINVAL;
+ }
+ argc--;
+ } else {
+ *error = "Unrecognised cache option requested";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int pcache_start(struct dm_pcache *pcache, char **error)
+{
+ int ret;
+
+ ret = cache_dev_start(pcache);
+ if (ret) {
+ *error = "Failed to start cache dev";
+ return ret;
+ }
+
+ ret = backing_dev_start(pcache);
+ if (ret) {
+ *error = "Failed to start backing dev";
+ goto stop_cache;
+ }
+
+ ret = pcache_cache_start(pcache);
+ if (ret) {
+ *error = "Failed to start pcache";
+ goto stop_backing;
+ }
+
+ return 0;
+stop_backing:
+ backing_dev_stop(pcache);
+stop_cache:
+ cache_dev_stop(pcache);
+
+ return ret;
+}
+
+static void pcache_destroy_args(struct dm_pcache *pcache)
+{
+ if (pcache->cache_dev.dm_dev)
+ dm_put_device(pcache->ti, pcache->cache_dev.dm_dev);
+ if (pcache->backing_dev.dm_dev)
+ dm_put_device(pcache->ti, pcache->backing_dev.dm_dev);
+}
+
+static int pcache_parse_args(struct dm_pcache *pcache, unsigned int argc, char **argv,
+ char **error)
+{
+ struct dm_arg_set as;
+ int ret;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ /*
+ * Parse cache device
+ */
+ ret = parse_cache_dev(pcache, &as, error);
+ if (ret)
+ return ret;
+ /*
+ * Parse backing device
+ */
+ ret = parse_backing_dev(pcache, &as, error);
+ if (ret)
+ goto out;
+ /*
+ * Parse optional arguments
+ */
+ ret = parse_cache_opts(pcache, &as, error);
+ if (ret)
+ goto out;
+
+ return 0;
+out:
+ pcache_destroy_args(pcache);
+ return ret;
+}
+
+static int dm_pcache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct mapped_device *md = ti->table->md;
+ struct dm_pcache *pcache;
+ int ret;
+
+ if (md->map) {
+ ti->error = "Don't support table loading for live md";
+ return -EOPNOTSUPP;
+ }
+
+ /* Allocate memory for the cache structure */
+ pcache = kzalloc(sizeof(struct dm_pcache), GFP_KERNEL);
+ if (!pcache)
+ return -ENOMEM;
+
+ pcache->task_wq = alloc_workqueue("pcache-%s-wq", WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 0, md->name);
+ if (!pcache->task_wq) {
+ ret = -ENOMEM;
+ goto free_pcache;
+ }
+
+ spin_lock_init(&pcache->defered_req_list_lock);
+ INIT_LIST_HEAD(&pcache->defered_req_list);
+ INIT_WORK(&pcache->defered_req_work, defered_req_fn);
+ pcache->ti = ti;
+
+ ret = pcache_parse_args(pcache, argc, argv, &ti->error);
+ if (ret)
+ goto destroy_wq;
+
+ ret = pcache_start(pcache, &ti->error);
+ if (ret)
+ goto destroy_args;
+
+ ti->num_flush_bios = 1;
+ ti->flush_supported = true;
+ ti->per_io_data_size = sizeof(struct pcache_request);
+ ti->private = pcache;
+ atomic_set(&pcache->inflight_reqs, 0);
+ atomic_set(&pcache->state, PCACHE_STATE_RUNNING);
+ init_waitqueue_head(&pcache->inflight_wq);
+
+ return 0;
+destroy_args:
+ pcache_destroy_args(pcache);
+destroy_wq:
+ destroy_workqueue(pcache->task_wq);
+free_pcache:
+ kfree(pcache);
+
+ return ret;
+}
+
+static void defer_req_stop(struct dm_pcache *pcache)
+{
+ struct pcache_request *pcache_req;
+ LIST_HEAD(tmp_list);
+
+ flush_work(&pcache->defered_req_work);
+
+ spin_lock(&pcache->defered_req_list_lock);
+ list_splice_init(&pcache->defered_req_list, &tmp_list);
+ spin_unlock(&pcache->defered_req_list_lock);
+
+ while (!list_empty(&tmp_list)) {
+ pcache_req = list_first_entry(&tmp_list,
+ struct pcache_request, list_node);
+ list_del_init(&pcache_req->list_node);
+ pcache_req_put(pcache_req, -EIO);
+ }
+}
+
+static void dm_pcache_dtr(struct dm_target *ti)
+{
+ struct dm_pcache *pcache;
+
+ pcache = ti->private;
+ atomic_set(&pcache->state, PCACHE_STATE_STOPPING);
+ defer_req_stop(pcache);
+
+ wait_event(pcache->inflight_wq,
+ atomic_read(&pcache->inflight_reqs) == 0);
+
+ pcache_cache_stop(pcache);
+ backing_dev_stop(pcache);
+ cache_dev_stop(pcache);
+
+ pcache_destroy_args(pcache);
+ drain_workqueue(pcache->task_wq);
+ destroy_workqueue(pcache->task_wq);
+
+ kfree(pcache);
+}
+
+static int dm_pcache_map_bio(struct dm_target *ti, struct bio *bio)
+{
+ struct pcache_request *pcache_req = dm_per_bio_data(bio, sizeof(struct pcache_request));
+ struct dm_pcache *pcache = ti->private;
+ int ret;
+
+ pcache_req->pcache = pcache;
+ kref_init(&pcache_req->ref);
+ pcache_req->ret = 0;
+ pcache_req->bio = bio;
+ pcache_req->off = (u64)bio->bi_iter.bi_sector << SECTOR_SHIFT;
+ pcache_req->data_len = bio->bi_iter.bi_size;
+ INIT_LIST_HEAD(&pcache_req->list_node);
+ atomic_inc(&pcache->inflight_reqs);
+
+ ret = pcache_cache_handle_req(&pcache->cache, pcache_req);
+ if (ret == -EBUSY)
+ defer_req(pcache_req);
+ else
+ pcache_req_put(pcache_req, ret);
+
+ return DM_MAPIO_SUBMITTED;
+}
+
+static void dm_pcache_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
+{
+ struct dm_pcache *pcache = ti->private;
+ struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
+ struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
+ struct pcache_cache *cache = &pcache->cache;
+ unsigned int sz = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%x %u %u %u %u %x %u:%u %u:%u %u:%u",
+ cache_dev->sb_flags,
+ cache_dev->seg_num,
+ cache->n_segs,
+ bitmap_weight(cache->seg_map, cache->n_segs),
+ pcache_cache_get_gc_percent(cache),
+ cache->cache_info.flags,
+ cache->key_head.cache_seg->cache_seg_id,
+ cache->key_head.seg_off,
+ cache->dirty_tail.cache_seg->cache_seg_id,
+ cache->dirty_tail.seg_off,
+ cache->key_tail.cache_seg->cache_seg_id,
+ cache->key_tail.seg_off);
+ break;
+ case STATUSTYPE_TABLE:
+ DMEMIT("%s %s 4 cache_mode writeback crc %s",
+ cache_dev->dm_dev->name,
+ backing_dev->dm_dev->name,
+ cache_data_crc_on(cache) ? "true" : "false");
+ break;
+ case STATUSTYPE_IMA:
+ *result = '\0';
+ break;
+ }
+}
+
+static int dm_pcache_message(struct dm_target *ti, unsigned int argc,
+ char **argv, char *result, unsigned int maxlen)
+{
+ struct dm_pcache *pcache = ti->private;
+ unsigned long val;
+
+ if (argc != 2)
+ goto err;
+
+ if (!strcasecmp(argv[0], "gc_percent")) {
+ if (kstrtoul(argv[1], 10, &val))
+ goto err;
+
+ return pcache_cache_set_gc_percent(&pcache->cache, val);
+ }
+err:
+ return -EINVAL;
+}
+
+static struct target_type dm_pcache_target = {
+ .name = "pcache",
+ .version = {0, 1, 0},
+ .module = THIS_MODULE,
+ .features = DM_TARGET_SINGLETON,
+ .ctr = dm_pcache_ctr,
+ .dtr = dm_pcache_dtr,
+ .map = dm_pcache_map_bio,
+ .status = dm_pcache_status,
+ .message = dm_pcache_message,
+};
+
+static int __init dm_pcache_init(void)
+{
+ int ret;
+
+ ret = pcache_backing_init();
+ if (ret)
+ goto err;
+
+ ret = pcache_cache_init();
+ if (ret)
+ goto backing_exit;
+
+ ret = dm_register_target(&dm_pcache_target);
+ if (ret)
+ goto cache_exit;
+ return 0;
+
+cache_exit:
+ pcache_cache_exit();
+backing_exit:
+ pcache_backing_exit();
+err:
+ return ret;
+}
+module_init(dm_pcache_init);
+
+static void __exit dm_pcache_exit(void)
+{
+ dm_unregister_target(&dm_pcache_target);
+ pcache_cache_exit();
+ pcache_backing_exit();
+}
+module_exit(dm_pcache_exit);
+
+MODULE_DESCRIPTION("dm-pcache Persistent Cache for block device");
+MODULE_AUTHOR("Dongsheng Yang <dongsheng.yang@linux.dev>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-pcache/dm_pcache.h b/drivers/md/dm-pcache/dm_pcache.h
new file mode 100644
index 000000000000..b4e06be0c0b9
--- /dev/null
+++ b/drivers/md/dm-pcache/dm_pcache.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _DM_PCACHE_H
+#define _DM_PCACHE_H
+#include <linux/device-mapper.h>
+
+#include "../dm-core.h"
+
+#define CACHE_DEV_TO_PCACHE(cache_dev) (container_of(cache_dev, struct dm_pcache, cache_dev))
+#define BACKING_DEV_TO_PCACHE(backing_dev) (container_of(backing_dev, struct dm_pcache, backing_dev))
+#define CACHE_TO_PCACHE(cache) (container_of(cache, struct dm_pcache, cache))
+
+#define PCACHE_STATE_RUNNING 1
+#define PCACHE_STATE_STOPPING 2
+
+struct pcache_cache_dev;
+struct pcache_backing_dev;
+struct pcache_cache;
+struct pcache_cache_options;
+struct dm_pcache {
+ struct dm_target *ti;
+ struct pcache_cache_dev cache_dev;
+ struct pcache_backing_dev backing_dev;
+ struct pcache_cache cache;
+ struct pcache_cache_options opts;
+
+ spinlock_t defered_req_list_lock;
+ struct list_head defered_req_list;
+ struct workqueue_struct *task_wq;
+
+ struct work_struct defered_req_work;
+
+ atomic_t state;
+ atomic_t inflight_reqs;
+ wait_queue_head_t inflight_wq;
+};
+
+static inline bool pcache_is_stopping(struct dm_pcache *pcache)
+{
+ return (atomic_read(&pcache->state) == PCACHE_STATE_STOPPING);
+}
+
+#define pcache_dev_err(pcache, fmt, ...) \
+ pcache_err("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+#define pcache_dev_info(pcache, fmt, ...) \
+ pcache_info("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+#define pcache_dev_debug(pcache, fmt, ...) \
+ pcache_debug("%s " fmt, pcache->ti->table->md->name, ##__VA_ARGS__)
+
+struct pcache_request {
+ struct dm_pcache *pcache;
+ struct bio *bio;
+
+ u64 off;
+ u32 data_len;
+
+ struct kref ref;
+ int ret;
+
+ struct list_head list_node;
+};
+
+void pcache_req_get(struct pcache_request *pcache_req);
+void pcache_req_put(struct pcache_request *pcache_req, int ret);
+
+void pcache_defer_reqs_kick(struct dm_pcache *pcache);
+
+#endif /* _DM_PCACHE_H */
diff --git a/drivers/md/dm-pcache/pcache_internal.h b/drivers/md/dm-pcache/pcache_internal.h
new file mode 100644
index 000000000000..b7a3319d2bd3
--- /dev/null
+++ b/drivers/md/dm-pcache/pcache_internal.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_INTERNAL_H
+#define _PCACHE_INTERNAL_H
+
+#include <linux/delay.h>
+#include <linux/crc32c.h>
+
+#define pcache_err(fmt, ...) \
+ pr_err("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+#define pcache_info(fmt, ...) \
+ pr_info("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+#define pcache_debug(fmt, ...) \
+ pr_debug("dm-pcache: %s:%u " fmt, __func__, __LINE__, ##__VA_ARGS__)
+
+#define PCACHE_KB (1024ULL)
+#define PCACHE_MB (1024 * PCACHE_KB)
+
+/* Maximum number of metadata indices */
+#define PCACHE_META_INDEX_MAX 2
+
+#define PCACHE_CRC_SEED 0x3B15A
+/*
+ * struct pcache_meta_header - PCACHE metadata header structure
+ * @crc: CRC checksum for validating metadata integrity.
+ * @seq: Sequence number to track metadata updates.
+ * @version: Metadata version.
+ * @res: Reserved space for future use.
+ */
+struct pcache_meta_header {
+ __u32 crc;
+ __u8 seq;
+ __u8 version;
+ __u16 res;
+};
+
+/*
+ * pcache_meta_crc - Calculate CRC for the given metadata header.
+ * @header: Pointer to the metadata header.
+ * @meta_size: Size of the metadata structure.
+ *
+ * Returns the CRC checksum calculated by excluding the CRC field itself.
+ */
+static inline u32 pcache_meta_crc(struct pcache_meta_header *header, u32 meta_size)
+{
+ return crc32c(PCACHE_CRC_SEED, (void *)header + 4, meta_size - 4);
+}
+
+/*
+ * pcache_meta_seq_after - Check if a sequence number is more recent, accounting for overflow.
+ * @seq1: First sequence number.
+ * @seq2: Second sequence number.
+ *
+ * Determines if @seq1 is more recent than @seq2 by calculating the signed
+ * difference between them. This approach allows handling sequence number
+ * overflow correctly because the difference wraps naturally, and any value
+ * greater than zero indicates that @seq1 is "after" @seq2. This method
+ * assumes 8-bit unsigned sequence numbers, where the difference wraps
+ * around if seq1 overflows past seq2.
+ *
+ * Returns:
+ * - true if @seq1 is more recent than @seq2, indicating it comes "after"
+ * - false otherwise.
+ */
+static inline bool pcache_meta_seq_after(u8 seq1, u8 seq2)
+{
+ return (s8)(seq1 - seq2) > 0;
+}
+
+/*
+ * pcache_meta_find_latest - Find the latest valid metadata.
+ * @header: Pointer to the metadata header.
+ * @meta_size: Size of each metadata block.
+ *
+ * Finds the latest valid metadata by checking sequence numbers. If a
+ * valid entry with the highest sequence number is found, its pointer
+ * is returned. Returns NULL if no valid metadata is found.
+ */
+static inline void __must_check *pcache_meta_find_latest(struct pcache_meta_header *header,
+ u32 meta_size, u32 meta_max_size,
+ void *meta_ret)
+{
+ struct pcache_meta_header *meta, *latest = NULL;
+ u32 i, seq_latest = 0;
+ void *meta_addr;
+
+ meta = meta_ret;
+
+ for (i = 0; i < PCACHE_META_INDEX_MAX; i++) {
+ meta_addr = (void *)header + (i * meta_max_size);
+ if (copy_mc_to_kernel(meta, meta_addr, meta_size)) {
+ pcache_err("hardware memory error when copy meta");
+ return ERR_PTR(-EIO);
+ }
+
+ /* Skip if CRC check fails, which means corrupted */
+ if (meta->crc != pcache_meta_crc(meta, meta_size))
+ continue;
+
+ /* Update latest if a more recent sequence is found */
+ if (!latest || pcache_meta_seq_after(meta->seq, seq_latest)) {
+ seq_latest = meta->seq;
+ latest = meta_addr;
+ }
+ }
+
+ if (!latest)
+ return NULL;
+
+ if (copy_mc_to_kernel(meta_ret, latest, meta_size)) {
+ pcache_err("hardware memory error");
+ return ERR_PTR(-EIO);
+ }
+
+ return latest;
+}
+
+#endif /* _PCACHE_INTERNAL_H */
diff --git a/drivers/md/dm-pcache/segment.c b/drivers/md/dm-pcache/segment.c
new file mode 100644
index 000000000000..7e9818701445
--- /dev/null
+++ b/drivers/md/dm-pcache/segment.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/dax.h>
+
+#include "pcache_internal.h"
+#include "cache_dev.h"
+#include "segment.h"
+
+int segment_copy_to_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off)
+{
+ struct iov_iter iter;
+ size_t copied;
+ void *src;
+
+ iov_iter_bvec(&iter, ITER_DEST, &bio->bi_io_vec[bio->bi_iter.bi_idx],
+ bio_segments(bio), bio->bi_iter.bi_size);
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ if (bio_off)
+ iov_iter_advance(&iter, bio_off);
+
+ src = segment->data + data_off;
+ copied = _copy_mc_to_iter(src, data_len, &iter);
+ if (copied != data_len)
+ return -EIO;
+
+ return 0;
+}
+
+int segment_copy_from_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off)
+{
+ struct iov_iter iter;
+ size_t copied;
+ void *dst;
+
+ iov_iter_bvec(&iter, ITER_SOURCE, &bio->bi_io_vec[bio->bi_iter.bi_idx],
+ bio_segments(bio), bio->bi_iter.bi_size);
+ iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ if (bio_off)
+ iov_iter_advance(&iter, bio_off);
+
+ dst = segment->data + data_off;
+ copied = _copy_from_iter_flushcache(dst, data_len, &iter);
+ if (copied != data_len)
+ return -EIO;
+ pmem_wmb();
+
+ return 0;
+}
+
+void pcache_segment_init(struct pcache_cache_dev *cache_dev, struct pcache_segment *segment,
+ struct pcache_segment_init_options *options)
+{
+ segment->seg_info = options->seg_info;
+ segment_info_set_type(segment->seg_info, options->type);
+
+ segment->cache_dev = cache_dev;
+ segment->seg_id = options->seg_id;
+ segment->data_size = PCACHE_SEG_SIZE - options->data_off;
+ segment->data = CACHE_DEV_SEGMENT(cache_dev, options->seg_id) + options->data_off;
+}
diff --git a/drivers/md/dm-pcache/segment.h b/drivers/md/dm-pcache/segment.h
new file mode 100644
index 000000000000..deca1ddcb02b
--- /dev/null
+++ b/drivers/md/dm-pcache/segment.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PCACHE_SEGMENT_H
+#define _PCACHE_SEGMENT_H
+
+#include <linux/bio.h>
+#include <linux/bitfield.h>
+
+#include "pcache_internal.h"
+
+struct pcache_segment_info {
+ struct pcache_meta_header header;
+ __u32 flags;
+ __u32 next_seg;
+};
+
+#define PCACHE_SEG_INFO_FLAGS_HAS_NEXT BIT(0)
+
+#define PCACHE_SEG_INFO_FLAGS_TYPE_MASK GENMASK(4, 1)
+#define PCACHE_SEGMENT_TYPE_CACHE_DATA 1
+
+static inline bool segment_info_has_next(struct pcache_segment_info *seg_info)
+{
+ return (seg_info->flags & PCACHE_SEG_INFO_FLAGS_HAS_NEXT);
+}
+
+static inline void segment_info_set_type(struct pcache_segment_info *seg_info, u8 type)
+{
+ seg_info->flags &= ~PCACHE_SEG_INFO_FLAGS_TYPE_MASK;
+ seg_info->flags |= FIELD_PREP(PCACHE_SEG_INFO_FLAGS_TYPE_MASK, type);
+}
+
+static inline u8 segment_info_get_type(struct pcache_segment_info *seg_info)
+{
+ return FIELD_GET(PCACHE_SEG_INFO_FLAGS_TYPE_MASK, seg_info->flags);
+}
+
+struct pcache_segment_pos {
+ struct pcache_segment *segment; /* Segment associated with the position */
+ u32 off; /* Offset within the segment */
+};
+
+struct pcache_segment_init_options {
+ u8 type;
+ u32 seg_id;
+ u32 data_off;
+
+ struct pcache_segment_info *seg_info;
+};
+
+struct pcache_segment {
+ struct pcache_cache_dev *cache_dev;
+
+ void *data;
+ u32 data_size;
+ u32 seg_id;
+
+ struct pcache_segment_info *seg_info;
+};
+
+int segment_copy_to_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off);
+int segment_copy_from_bio(struct pcache_segment *segment,
+ u32 data_off, u32 data_len, struct bio *bio, u32 bio_off);
+
+static inline void segment_pos_advance(struct pcache_segment_pos *seg_pos, u32 len)
+{
+ BUG_ON(seg_pos->off + len > seg_pos->segment->data_size);
+
+ seg_pos->off += len;
+}
+
+void pcache_segment_init(struct pcache_cache_dev *cache_dev, struct pcache_segment *segment,
+ struct pcache_segment_init_options *options);
+#endif /* _PCACHE_SEGMENT_H */
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 79ea85d18e24..c6f7129e43d3 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3247,7 +3247,7 @@ size_check:
rs_reset_inconclusive_reshape(rs);
/* Start raid set read-only and assumed clean to change in raid_resume() */
- rs->md.ro = 1;
+ rs->md.ro = MD_RDONLY;
rs->md.in_sync = 1;
/* Has to be held on running the array */
@@ -3385,7 +3385,7 @@ static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long r
/* The MD sync thread can be done with io or be interrupted but still be running */
if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
(test_bit(MD_RECOVERY_RUNNING, &recovery) ||
- (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
+ (md_is_rdwr(mddev) && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
return st_reshape;
@@ -3775,11 +3775,11 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
} else
return -EINVAL;
}
- if (mddev->ro == 2) {
+ if (mddev->ro == MD_AUTO_READ) {
/* A write to sync_action is enough to justify
* canceling read-auto mode
*/
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
if (!mddev->suspended)
md_wakeup_thread(mddev->sync_thread);
}
@@ -3813,8 +3813,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
struct raid_set *rs = ti->private;
unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors);
- limits->io_min = chunk_size_bytes;
- limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
+ if (chunk_size_bytes) {
+ limits->io_min = chunk_size_bytes;
+ limits->io_opt = chunk_size_bytes * mddev_data_stripes(rs);
+ }
}
static void raid_presuspend(struct dm_target *ti)
@@ -3858,6 +3860,7 @@ static void raid_postsuspend(struct dm_target *ti)
*/
md_stop_writes(&rs->md);
mddev_suspend(&rs->md, false);
+ rs->md.ro = MD_RDONLY;
}
}
@@ -3953,9 +3956,11 @@ static int __load_dirty_region_bitmap(struct raid_set *rs)
!test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) {
struct mddev *mddev = &rs->md;
- r = mddev->bitmap_ops->load(mddev);
- if (r)
- DMERR("Failed to load bitmap");
+ if (md_bitmap_enabled(mddev, false)) {
+ r = mddev->bitmap_ops->load(mddev);
+ if (r)
+ DMERR("Failed to load bitmap");
+ }
}
return r;
@@ -3968,7 +3973,7 @@ static void rs_update_sbs(struct raid_set *rs)
int ro = mddev->ro;
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
md_update_sb(mddev, 1);
mddev->ro = ro;
}
@@ -4070,10 +4075,12 @@ static int raid_preresume(struct dm_target *ti)
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
- r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
- chunksize, false);
- if (r)
- DMERR("Failed to resize bitmap");
+ if (md_bitmap_enabled(mddev, false)) {
+ r = mddev->bitmap_ops->resize(mddev, mddev->dev_sectors,
+ chunksize);
+ if (r)
+ DMERR("Failed to resize bitmap");
+ }
}
/* Check for any resize/reshape on @rs and adjust/initiate */
@@ -4125,7 +4132,7 @@ static void raid_resume(struct dm_target *ti)
WARN_ON_ONCE(rcu_dereference_protected(mddev->sync_thread,
lockdep_is_held(&mddev->reconfig_mutex)));
clear_bit(RT_FLAG_RS_FROZEN, &rs->runtime_flags);
- mddev->ro = 0;
+ mddev->ro = MD_RDWR;
mddev->in_sync = 0;
md_unfrozen_sync_thread(mddev);
mddev_unlock_and_resume(mddev);
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index a4550975c27d..e9b47b659976 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -206,7 +206,7 @@ struct dm_region_hash *dm_region_hash_create(
rh->shift = RH_HASH_SHIFT;
rh->prime = RH_HASH_MULT;
- rh->buckets = vmalloc(array_size(nr_buckets, sizeof(*rh->buckets)));
+ rh->buckets = vmalloc_array(nr_buckets, sizeof(*rh->buckets));
if (!rh->buckets) {
DMERR("unable to allocate region hash bucket memory");
kfree(rh);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 58902091bf79..1461dc740dae 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -456,11 +456,15 @@ static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
- unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ unsigned int io_min, io_opt;
limits->chunk_sectors = sc->chunk_size;
- limits->io_min = chunk_size;
- limits->io_opt = chunk_size * sc->stripes;
+
+ if (!check_shl_overflow(sc->chunk_size, SECTOR_SHIFT, &io_min) &&
+ !check_mul_overflow(io_min, sc->stripes, &io_opt)) {
+ limits->io_min = io_min;
+ limits->io_opt = io_opt;
+ }
}
static struct target_type stripe_target = {
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index bb1a70b5a215..50a52ca50b34 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -114,8 +114,8 @@ static int alloc_region_table(struct dm_target *ti, unsigned int nr_paths)
return -EINVAL;
}
- sctx->region_table = vmalloc(array_size(nr_slots,
- sizeof(region_table_slot_t)));
+ sctx->region_table = vmalloc_array(nr_slots,
+ sizeof(region_table_slot_t));
if (!sctx->region_table) {
ti->error = "Cannot allocate region table";
return -ENOMEM;
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 2af5a9514c05..8fede41adec0 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -263,7 +263,8 @@ static long io_err_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
static struct target_type error_target = {
.name = "error",
.version = {1, 7, 0},
- .features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM,
+ .features = DM_TARGET_WILDCARD | DM_TARGET_ZONED_HM |
+ DM_TARGET_PASSES_INTEGRITY,
.ctr = io_err_ctr,
.dtr = io_err_dtr,
.map = io_err_map,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 007bb93e5fca..c84149ba4e38 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3031,8 +3031,8 @@ static struct pool *pool_create(struct mapped_device *pool_md,
}
pool->cell_sort_array =
- vmalloc(array_size(CELL_SORT_ARRAY_SIZE,
- sizeof(*pool->cell_sort_array)));
+ vmalloc_array(CELL_SORT_ARRAY_SIZE,
+ sizeof(*pool->cell_sort_array));
if (!pool->cell_sort_array) {
*error = "Error allocating cell sort array";
err_p = ERR_PTR(-ENOMEM);
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
index 810002747091..262e11581f2d 100644
--- a/drivers/md/dm-vdo/data-vio.c
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -17,6 +17,7 @@
#include <linux/minmax.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/wait.h>
#include "logger.h"
@@ -509,18 +510,6 @@ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lb
vdo_enqueue_completion(completion, VDO_DEFAULT_Q_MAP_BIO_PRIORITY);
}
-static bool is_zero_block(char *block)
-{
- int i;
-
- for (i = 0; i < VDO_BLOCK_SIZE; i += sizeof(u64)) {
- if (*((u64 *) &block[i]))
- return false;
- }
-
- return true;
-}
-
static void copy_from_bio(struct bio *bio, char *data_ptr)
{
struct bio_vec biovec;
@@ -572,7 +561,7 @@ static void launch_bio(struct vdo *vdo, struct data_vio *data_vio, struct bio *b
* we acknowledge the bio.
*/
copy_from_bio(bio, data_vio->vio.data);
- data_vio->is_zero = is_zero_block(data_vio->vio.data);
+ data_vio->is_zero = mem_is_zero(data_vio->vio.data, VDO_BLOCK_SIZE);
data_vio->write = true;
}
@@ -1459,7 +1448,7 @@ static void modify_for_partial_write(struct vdo_completion *completion)
copy_from_bio(bio, data + data_vio->offset);
}
- data_vio->is_zero = is_zero_block(data);
+ data_vio->is_zero = mem_is_zero(data, VDO_BLOCK_SIZE);
data_vio->read = false;
launch_data_vio_logical_callback(data_vio,
continue_data_vio_with_block_map_slot);
diff --git a/drivers/md/dm-vdo/indexer/volume-index.c b/drivers/md/dm-vdo/indexer/volume-index.c
index 12f954a0c532..afb062e1f1fb 100644
--- a/drivers/md/dm-vdo/indexer/volume-index.c
+++ b/drivers/md/dm-vdo/indexer/volume-index.c
@@ -836,7 +836,7 @@ static int start_restoring_volume_sub_index(struct volume_sub_index *sub_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(buffer));
if (result != VDO_SUCCESS)
- result = UDS_CORRUPT_DATA;
+ return UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_5, MAGIC_SIZE) != 0) {
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
@@ -928,7 +928,7 @@ static int start_restoring_volume_index(struct volume_index *volume_index,
"%zu bytes decoded of %zu expected", offset,
sizeof(buffer));
if (result != VDO_SUCCESS)
- result = UDS_CORRUPT_DATA;
+ return UDS_CORRUPT_DATA;
if (memcmp(header.magic, MAGIC_START_6, MAGIC_SIZE) != 0)
return vdo_log_warning_strerror(UDS_CORRUPT_DATA,
diff --git a/drivers/md/dm-vdo/logger.c b/drivers/md/dm-vdo/logger.c
index 3f7dc2cb6b98..76a987ccf926 100644
--- a/drivers/md/dm-vdo/logger.c
+++ b/drivers/md/dm-vdo/logger.c
@@ -34,7 +34,7 @@ static const char *get_current_interrupt_type(void)
if (in_nmi())
return "NMI";
- if (in_irq())
+ if (in_hardirq())
return "HI";
if (in_softirq())
diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c
index e7f4153e55e3..8fc22fb14196 100644
--- a/drivers/md/dm-vdo/vio.c
+++ b/drivers/md/dm-vdo/vio.c
@@ -212,7 +212,7 @@ int vio_reset_bio_with_size(struct vio *vio, char *data, int size, bio_end_io_t
return VDO_SUCCESS;
bio->bi_ioprio = 0;
- bio->bi_io_vec = bio->bi_inline_vecs;
+ bio->bi_io_vec = bio_inline_vecs(bio);
bio->bi_max_vecs = vio->block_count + 1;
if (VDO_ASSERT(size <= vio_size, "specified size %d is not greater than allocated %d",
size, vio_size) != VDO_SUCCESS)
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index d382a390d39a..72047b47a7a0 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -320,11 +320,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOWAIT);
- if (unlikely(!fio->bufs[n])) {
- DMERR("failed to allocate FEC buffer");
- return -ENOMEM;
- }
+ fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOIO);
}
/* try to allocate the maximum number of buffers */
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 78e17dd4d01b..5a840c4ae316 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -17,33 +17,26 @@
* For internal zone reports bypassing the top BIO submission path.
*/
static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t,
- sector_t sector, unsigned int nr_zones,
- report_zones_cb cb, void *data)
+ unsigned int nr_zones,
+ struct dm_report_zones_args *args)
{
- struct gendisk *disk = md->disk;
- int ret;
- struct dm_report_zones_args args = {
- .next_sector = sector,
- .orig_data = data,
- .orig_cb = cb,
- };
-
do {
struct dm_target *tgt;
+ int ret;
- tgt = dm_table_find_target(t, args.next_sector);
+ tgt = dm_table_find_target(t, args->next_sector);
if (WARN_ON_ONCE(!tgt->type->report_zones))
return -EIO;
- args.tgt = tgt;
- ret = tgt->type->report_zones(tgt, &args,
- nr_zones - args.zone_idx);
+ args->tgt = tgt;
+ ret = tgt->type->report_zones(tgt, args,
+ nr_zones - args->zone_idx);
if (ret < 0)
return ret;
- } while (args.zone_idx < nr_zones &&
- args.next_sector < get_capacity(disk));
+ } while (args->zone_idx < nr_zones &&
+ args->next_sector < get_capacity(md->disk));
- return args.zone_idx;
+ return args->zone_idx;
}
/*
@@ -52,7 +45,8 @@ static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t,
* generally implemented by targets using dm_report_zones().
*/
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data)
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args)
{
struct mapped_device *md = disk->private_data;
struct dm_table *map;
@@ -76,9 +70,14 @@ int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
map = zone_revalidate_map;
}
- if (map)
- ret = dm_blk_do_report_zones(md, map, sector, nr_zones, cb,
- data);
+ if (map) {
+ struct dm_report_zones_args dm_args = {
+ .disk = md->disk,
+ .next_sector = sector,
+ .rep_args = args,
+ };
+ ret = dm_blk_do_report_zones(md, map, nr_zones, &dm_args);
+ }
if (put_table)
dm_put_live_table(md, srcu_idx);
@@ -113,7 +112,18 @@ static int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx,
}
args->next_sector = zone->start + zone->len;
- return args->orig_cb(zone, args->zone_idx++, args->orig_data);
+
+ /* If we have an internal callback, call it first. */
+ if (args->cb) {
+ int ret;
+
+ ret = args->cb(zone, args->zone_idx, args->data);
+ if (ret)
+ return ret;
+ }
+
+ return disk_report_zone(args->disk, zone, args->zone_idx++,
+ args->rep_args);
}
/*
@@ -492,10 +502,15 @@ int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
sector_t sector, unsigned int nr_zones,
unsigned long *need_reset)
{
+ struct dm_report_zones_args args = {
+ .disk = md->disk,
+ .next_sector = sector,
+ .cb = dm_zone_need_reset_cb,
+ .data = need_reset,
+ };
int ret;
- ret = dm_blk_do_report_zones(md, t, sector, nr_zones,
- dm_zone_need_reset_cb, need_reset);
+ ret = dm_blk_do_report_zones(md, t, nr_zones, &args);
if (ret != nr_zones) {
DMERR("Get %s zone reset bitmap failed\n",
md->disk->disk_name);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a44e8c2dccee..6c83ab940af7 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -403,9 +403,9 @@ static void do_deferred_remove(struct work_struct *w)
dm_deferred_remove();
}
-static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int dm_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mapped_device *md = bdev->bd_disk->private_data;
+ struct mapped_device *md = disk->private_data;
return dm_get_geometry(md, geo);
}
@@ -490,18 +490,13 @@ u64 dm_start_time_ns_from_clone(struct bio *bio)
}
EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
-static inline bool bio_is_flush_with_data(struct bio *bio)
-{
- return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
-}
-
static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
{
/*
* If REQ_PREFLUSH set, don't account payload, it will be
* submitted (and accounted) after this flush completes.
*/
- if (bio_is_flush_with_data(bio))
+ if (io->requeue_flush_with_data)
return 0;
if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
return io->sectors;
@@ -590,6 +585,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t g
io = container_of(tio, struct dm_io, tio);
io->magic = DM_IO_MAGIC;
io->status = BLK_STS_OK;
+ io->requeue_flush_with_data = false;
/* one ref is for submission, the other is for completion */
atomic_set(&io->io_count, 2);
@@ -948,6 +944,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
struct mapped_device *md = io->md;
blk_status_t io_error;
bool requeued;
+ bool requeue_flush_with_data;
requeued = dm_handle_requeue(io, first_stage);
if (requeued && first_stage)
@@ -964,6 +961,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
__dm_start_io_acct(io);
dm_end_io_acct(io);
}
+ requeue_flush_with_data = io->requeue_flush_with_data;
free_io(io);
smp_wmb();
this_cpu_dec(*md->pending_io);
@@ -976,7 +974,7 @@ static void __dm_io_complete(struct dm_io *io, bool first_stage)
if (requeued)
return;
- if (bio_is_flush_with_data(bio)) {
+ if (unlikely(requeue_flush_with_data)) {
/*
* Preflush done for flush with data, reissue
* without REQ_PREFLUSH.
@@ -1996,12 +1994,30 @@ static void dm_split_and_process_bio(struct mapped_device *md,
}
init_clone_info(&ci, io, map, bio, is_abnormal);
- if (bio->bi_opf & REQ_PREFLUSH) {
+ if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) {
+ /*
+ * The "flush_bypasses_map" is set on targets where it is safe
+ * to skip the map function and submit bios directly to the
+ * underlying block devices - currently, it is set for dm-linear
+ * and dm-stripe.
+ *
+ * If we have just one underlying device (i.e. there is one
+ * linear target or multiple linear targets pointing to the same
+ * device), we can send the flush with data directly to it.
+ */
+ if (bio->bi_iter.bi_size && map->flush_bypasses_map) {
+ struct list_head *devices = dm_table_get_devices(map);
+ if (devices->next == devices->prev)
+ goto send_preflush_with_data;
+ }
+ if (bio->bi_iter.bi_size)
+ io->requeue_flush_with_data = true;
__send_empty_flush(&ci);
/* dm_io_complete submits any data associated with flush */
goto out;
}
+send_preflush_with_data:
if (static_branch_unlikely(&zoned_enabled) &&
(bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
error = __send_zone_reset_all(&ci);
@@ -2908,7 +2924,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
- int r;
+ int r = 0;
lockdep_assert_held(&md->suspend_lock);
@@ -2960,8 +2976,10 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md))
+ if (map && dm_request_based(md)) {
dm_stop_queue(md->queue);
+ set_bit(DMF_QUEUE_STOPPED, &md->flags);
+ }
flush_workqueue(md->wq);
@@ -2970,7 +2988,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, task_state);
+ if (map)
+ r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
@@ -2983,7 +3002,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (r < 0) {
dm_queue_flush(md);
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
@@ -3067,7 +3086,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
* so that mapping of targets can work correctly.
* Request-based dm is queueing the deferred I/Os in its request_queue.
*/
- if (dm_request_based(md))
+ if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
dm_start_queue(md->queue);
unlock_fs(md);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 245f52b59215..7a795979ec72 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -109,7 +109,8 @@ void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim);
void dm_zone_endio(struct dm_io *io, struct bio *clone);
#ifdef CONFIG_BLK_DEV_ZONED
int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
- unsigned int nr_zones, report_zones_cb cb, void *data);
+ unsigned int nr_zones,
+ struct blk_report_zones_args *args);
bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t,
sector_t sector, unsigned int nr_zones,
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 334b71404930..84b7e2af6dba 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -34,15 +34,6 @@
#include "md-bitmap.h"
#include "md-cluster.h"
-#define BITMAP_MAJOR_LO 3
-/* version 4 insists the bitmap is in little-endian order
- * with version 3, it is host-endian which is non-portable
- * Version 5 is currently set only for clustered devices
- */
-#define BITMAP_MAJOR_HI 4
-#define BITMAP_MAJOR_CLUSTERED 5
-#define BITMAP_MAJOR_HOSTENDIAN 3
-
/*
* in-memory bitmap:
*
@@ -224,6 +215,8 @@ struct bitmap {
int cluster_slot;
};
+static struct workqueue_struct *md_bitmap_wq;
+
static int __bitmap_resize(struct bitmap *bitmap, sector_t blocks,
int chunksize, bool init);
@@ -232,20 +225,19 @@ static inline char *bmname(struct bitmap *bitmap)
return bitmap->mddev ? mdname(bitmap->mddev) : "mdX";
}
-static bool __bitmap_enabled(struct bitmap *bitmap)
-{
- return bitmap->storage.filemap &&
- !test_bit(BITMAP_STALE, &bitmap->flags);
-}
-
-static bool bitmap_enabled(struct mddev *mddev)
+static bool bitmap_enabled(void *data, bool flush)
{
- struct bitmap *bitmap = mddev->bitmap;
+ struct bitmap *bitmap = data;
- if (!bitmap)
- return false;
+ if (!flush)
+ return true;
- return __bitmap_enabled(bitmap);
+ /*
+ * If caller want to flush bitmap pages to underlying disks, check if
+ * there are cached pages in filemap.
+ */
+ return !test_bit(BITMAP_STALE, &bitmap->flags) &&
+ bitmap->storage.filemap != NULL;
}
/*
@@ -484,7 +476,8 @@ static int __write_sb_page(struct md_rdev *rdev, struct bitmap *bitmap,
return -EINVAL;
}
- md_super_write(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit), page);
+ md_write_metadata(mddev, rdev, sboff + ps, (int)min(size, bitmap_limit),
+ page, 0);
return 0;
}
@@ -1244,7 +1237,7 @@ static void __bitmap_unplug(struct bitmap *bitmap)
int dirty, need_write;
int writing = 0;
- if (!__bitmap_enabled(bitmap))
+ if (!bitmap_enabled(bitmap, true))
return;
/* look at each page to see if there are any set bits that need to be
@@ -1788,15 +1781,9 @@ static bool __bitmap_start_sync(struct bitmap *bitmap, sector_t offset,
sector_t *blocks, bool degraded)
{
bitmap_counter_t *bmc;
- bool rv;
+ bool rv = false;
- if (bitmap == NULL) {/* FIXME or bitmap set as 'failed' */
- *blocks = 1024;
- return true; /* always resync if no bitmap */
- }
spin_lock_irq(&bitmap->counts.lock);
-
- rv = false;
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
if (bmc) {
/* locked */
@@ -1845,10 +1832,6 @@ static void __bitmap_end_sync(struct bitmap *bitmap, sector_t offset,
bitmap_counter_t *bmc;
unsigned long flags;
- if (bitmap == NULL) {
- *blocks = 1024;
- return;
- }
spin_lock_irqsave(&bitmap->counts.lock, flags);
bmc = md_bitmap_get_counter(&bitmap->counts, offset, blocks, 0);
if (bmc == NULL)
@@ -2060,9 +2043,6 @@ static void bitmap_start_behind_write(struct mddev *mddev)
struct bitmap *bitmap = mddev->bitmap;
int bw;
- if (!bitmap)
- return;
-
atomic_inc(&bitmap->behind_writes);
bw = atomic_read(&bitmap->behind_writes);
if (bw > bitmap->behind_writes_used)
@@ -2076,9 +2056,6 @@ static void bitmap_end_behind_write(struct mddev *mddev)
{
struct bitmap *bitmap = mddev->bitmap;
- if (!bitmap)
- return;
-
if (atomic_dec_and_test(&bitmap->behind_writes))
wake_up(&bitmap->behind_wait);
pr_debug("dec write-behind count %d/%lu\n",
@@ -2593,15 +2570,14 @@ err:
return ret;
}
-static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize,
- bool init)
+static int bitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize)
{
struct bitmap *bitmap = mddev->bitmap;
if (!bitmap)
return 0;
- return __bitmap_resize(bitmap, blocks, chunksize, init);
+ return __bitmap_resize(bitmap, blocks, chunksize, false);
}
static ssize_t
@@ -2990,12 +2966,19 @@ static struct attribute *md_bitmap_attrs[] = {
&max_backlog_used.attr,
NULL
};
-const struct attribute_group md_bitmap_group = {
+
+static struct attribute_group md_bitmap_group = {
.name = "bitmap",
.attrs = md_bitmap_attrs,
};
static struct bitmap_operations bitmap_ops = {
+ .head = {
+ .type = MD_BITMAP,
+ .id = ID_BITMAP,
+ .name = "bitmap",
+ },
+
.enabled = bitmap_enabled,
.create = bitmap_create,
.resize = bitmap_resize,
@@ -3013,6 +2996,9 @@ static struct bitmap_operations bitmap_ops = {
.start_write = bitmap_start_write,
.end_write = bitmap_end_write,
+ .start_discard = bitmap_start_write,
+ .end_discard = bitmap_end_write,
+
.start_sync = bitmap_start_sync,
.end_sync = bitmap_end_sync,
.cond_end_sync = bitmap_cond_end_sync,
@@ -3026,9 +3012,22 @@ static struct bitmap_operations bitmap_ops = {
.copy_from_slot = bitmap_copy_from_slot,
.set_pages = bitmap_set_pages,
.free = md_bitmap_free,
+
+ .group = &md_bitmap_group,
};
-void mddev_set_bitmap_ops(struct mddev *mddev)
+int md_bitmap_init(void)
+{
+ md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 0);
+ if (!md_bitmap_wq)
+ return -ENOMEM;
+
+ return register_md_submodule(&bitmap_ops.head);
+}
+
+void md_bitmap_exit(void)
{
- mddev->bitmap_ops = &bitmap_ops;
+ destroy_workqueue(md_bitmap_wq);
+ unregister_md_submodule(&bitmap_ops.head);
}
diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h
index 59e9dd45cfde..b42a28fa83a0 100644
--- a/drivers/md/md-bitmap.h
+++ b/drivers/md/md-bitmap.h
@@ -9,10 +9,26 @@
#define BITMAP_MAGIC 0x6d746962
+/*
+ * version 3 is host-endian order, this is deprecated and not used for new
+ * array
+ */
+#define BITMAP_MAJOR_LO 3
+#define BITMAP_MAJOR_HOSTENDIAN 3
+/* version 4 is little-endian order, the default value */
+#define BITMAP_MAJOR_HI 4
+/* version 5 is only used for cluster */
+#define BITMAP_MAJOR_CLUSTERED 5
+/* version 6 is only used for lockless bitmap */
+#define BITMAP_MAJOR_LOCKLESS 6
+
/* use these for bitmap->flags and bitmap->sb->state bit-fields */
enum bitmap_state {
- BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
+ BITMAP_STALE = 1, /* the bitmap file is out of date or had -EIO */
BITMAP_WRITE_ERROR = 2, /* A write error has occurred */
+ BITMAP_FIRST_USE = 3, /* llbitmap is just created */
+ BITMAP_CLEAN = 4, /* llbitmap is created with assume_clean */
+ BITMAP_DAEMON_BUSY = 5, /* llbitmap daemon is not finished after daemon_sleep */
BITMAP_HOSTENDIAN =15,
};
@@ -61,11 +77,15 @@ struct md_bitmap_stats {
struct file *file;
};
+typedef void (md_bitmap_fn)(struct mddev *mddev, sector_t offset,
+ unsigned long sectors);
+
struct bitmap_operations {
- bool (*enabled)(struct mddev *mddev);
+ struct md_submodule_head head;
+
+ bool (*enabled)(void *data, bool flush);
int (*create)(struct mddev *mddev);
- int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize,
- bool init);
+ int (*resize)(struct mddev *mddev, sector_t blocks, int chunksize);
int (*load)(struct mddev *mddev);
void (*destroy)(struct mddev *mddev);
@@ -80,10 +100,13 @@ struct bitmap_operations {
void (*end_behind_write)(struct mddev *mddev);
void (*wait_behind_writes)(struct mddev *mddev);
- void (*start_write)(struct mddev *mddev, sector_t offset,
- unsigned long sectors);
- void (*end_write)(struct mddev *mddev, sector_t offset,
- unsigned long sectors);
+ md_bitmap_fn *start_write;
+ md_bitmap_fn *end_write;
+ md_bitmap_fn *start_discard;
+ md_bitmap_fn *end_discard;
+
+ sector_t (*skip_sync_blocks)(struct mddev *mddev, sector_t offset);
+ bool (*blocks_synced)(struct mddev *mddev, sector_t offset);
bool (*start_sync)(struct mddev *mddev, sector_t offset,
sector_t *blocks, bool degraded);
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
@@ -101,9 +124,75 @@ struct bitmap_operations {
sector_t *hi, bool clear_bits);
void (*set_pages)(void *data, unsigned long pages);
void (*free)(void *data);
+
+ struct attribute_group *group;
};
/* the bitmap API */
-void mddev_set_bitmap_ops(struct mddev *mddev);
+static inline bool md_bitmap_registered(struct mddev *mddev)
+{
+ return mddev->bitmap_ops != NULL;
+}
+
+static inline bool md_bitmap_enabled(struct mddev *mddev, bool flush)
+{
+ /* bitmap_ops must be registered before creating bitmap. */
+ if (!md_bitmap_registered(mddev))
+ return false;
+
+ if (!mddev->bitmap)
+ return false;
+
+ return mddev->bitmap_ops->enabled(mddev->bitmap, flush);
+}
+
+static inline bool md_bitmap_start_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded)
+{
+ /* always resync if no bitmap */
+ if (!md_bitmap_enabled(mddev, false)) {
+ *blocks = 1024;
+ return true;
+ }
+
+ return mddev->bitmap_ops->start_sync(mddev, offset, blocks, degraded);
+}
+
+static inline void md_bitmap_end_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks)
+{
+ if (!md_bitmap_enabled(mddev, false)) {
+ *blocks = 1024;
+ return;
+ }
+
+ mddev->bitmap_ops->end_sync(mddev, offset, blocks);
+}
+
+#ifdef CONFIG_MD_BITMAP
+int md_bitmap_init(void);
+void md_bitmap_exit(void);
+#else
+static inline int md_bitmap_init(void)
+{
+ return 0;
+}
+static inline void md_bitmap_exit(void)
+{
+}
+#endif
+
+#ifdef CONFIG_MD_LLBITMAP
+int md_llbitmap_init(void);
+void md_llbitmap_exit(void);
+#else
+static inline int md_llbitmap_init(void)
+{
+ return 0;
+}
+static inline void md_llbitmap_exit(void)
+{
+}
+#endif
#endif
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 5497eaee96e7..11f1e91d387d 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -630,7 +630,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0))
ret = mddev->bitmap_ops->resize(mddev,
le64_to_cpu(msg->high),
- 0, false);
+ 0);
break;
default:
ret = -1;
@@ -979,7 +979,7 @@ err:
lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
if (cinfo->lockspace)
- dlm_release_lockspace(cinfo->lockspace, 2);
+ dlm_release_lockspace(cinfo->lockspace, DLM_RELEASE_NORMAL);
mddev->cluster_info = NULL;
kfree(cinfo);
return ret;
@@ -1042,7 +1042,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->resync_lockres);
lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
- dlm_release_lockspace(cinfo->lockspace, 2);
+ dlm_release_lockspace(cinfo->lockspace, DLM_RELEASE_NORMAL);
kfree(cinfo);
return 0;
}
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index 5d9b08115375..8d7b82c4a723 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -72,8 +72,11 @@ static int linear_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
+ lim.logical_block_size = mddev->logical_block_size;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
+ lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
return err;
@@ -256,18 +259,10 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
if (unlikely(bio_end_sector(bio) > end_sector)) {
/* This bio crosses a device boundary, so we have to split it */
- struct bio *split = bio_split(bio, end_sector - bio_sector,
- GFP_NOIO, &mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ bio = bio_submit_split_bioset(bio, end_sector - bio_sector,
+ &mddev->bio_set);
+ if (!bio)
return true;
- }
-
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
}
md_account_bio(mddev, &bio);
diff --git a/drivers/md/md-llbitmap.c b/drivers/md/md-llbitmap.c
new file mode 100644
index 000000000000..9c1ade19b774
--- /dev/null
+++ b/drivers/md/md-llbitmap.c
@@ -0,0 +1,1626 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/timer.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <trace/events/block.h>
+
+#include "md.h"
+#include "md-bitmap.h"
+
+/*
+ * #### Background
+ *
+ * Redundant data is used to enhance data fault tolerance, and the storage
+ * methods for redundant data vary depending on the RAID levels. And it's
+ * important to maintain the consistency of redundant data.
+ *
+ * Bitmap is used to record which data blocks have been synchronized and which
+ * ones need to be resynchronized or recovered. Each bit in the bitmap
+ * represents a segment of data in the array. When a bit is set, it indicates
+ * that the multiple redundant copies of that data segment may not be
+ * consistent. Data synchronization can be performed based on the bitmap after
+ * power failure or readding a disk. If there is no bitmap, a full disk
+ * synchronization is required.
+ *
+ * #### Key Features
+ *
+ * - IO fastpath is lockless, if user issues lots of write IO to the same
+ * bitmap bit in a short time, only the first write has additional overhead
+ * to update bitmap bit, no additional overhead for the following writes;
+ * - support only resync or recover written data, means in the case creating
+ * new array or replacing with a new disk, there is no need to do a full disk
+ * resync/recovery;
+ *
+ * #### Key Concept
+ *
+ * ##### State Machine
+ *
+ * Each bit is one byte, contain 6 different states, see llbitmap_state. And
+ * there are total 8 different actions, see llbitmap_action, can change state:
+ *
+ * llbitmap state machine: transitions between states
+ *
+ * | | Startwrite | Startsync | Endsync | Abortsync|
+ * | --------- | ---------- | --------- | ------- | ------- |
+ * | Unwritten | Dirty | x | x | x |
+ * | Clean | Dirty | x | x | x |
+ * | Dirty | x | x | x | x |
+ * | NeedSync | x | Syncing | x | x |
+ * | Syncing | x | Syncing | Dirty | NeedSync |
+ *
+ * | | Reload | Daemon | Discard | Stale |
+ * | --------- | -------- | ------ | --------- | --------- |
+ * | Unwritten | x | x | x | x |
+ * | Clean | x | x | Unwritten | NeedSync |
+ * | Dirty | NeedSync | Clean | Unwritten | NeedSync |
+ * | NeedSync | x | x | Unwritten | x |
+ * | Syncing | NeedSync | x | Unwritten | NeedSync |
+ *
+ * Typical scenarios:
+ *
+ * 1) Create new array
+ * All bits will be set to Unwritten by default, if --assume-clean is set,
+ * all bits will be set to Clean instead.
+ *
+ * 2) write data, raid1/raid10 have full copy of data, while raid456 doesn't and
+ * rely on xor data
+ *
+ * 2.1) write new data to raid1/raid10:
+ * Unwritten --StartWrite--> Dirty
+ *
+ * 2.2) write new data to raid456:
+ * Unwritten --StartWrite--> NeedSync
+ *
+ * Because the initial recover for raid456 is skipped, the xor data is not built
+ * yet, the bit must be set to NeedSync first and after lazy initial recover is
+ * finished, the bit will finally set to Dirty(see 5.1 and 5.4);
+ *
+ * 2.3) cover write
+ * Clean --StartWrite--> Dirty
+ *
+ * 3) daemon, if the array is not degraded:
+ * Dirty --Daemon--> Clean
+ *
+ * 4) discard
+ * {Clean, Dirty, NeedSync, Syncing} --Discard--> Unwritten
+ *
+ * 5) resync and recover
+ *
+ * 5.1) common process
+ * NeedSync --Startsync--> Syncing --Endsync--> Dirty --Daemon--> Clean
+ *
+ * 5.2) resync after power failure
+ * Dirty --Reload--> NeedSync
+ *
+ * 5.3) recover while replacing with a new disk
+ * By default, the old bitmap framework will recover all data, and llbitmap
+ * implements this by a new helper, see llbitmap_skip_sync_blocks:
+ *
+ * skip recover for bits other than dirty or clean;
+ *
+ * 5.4) lazy initial recover for raid5:
+ * By default, the old bitmap framework will only allow new recover when there
+ * are spares(new disk), a new recovery flag MD_RECOVERY_LAZY_RECOVER is added
+ * to perform raid456 lazy recover for set bits(from 2.2).
+ *
+ * 6. special handling for degraded array:
+ *
+ * - Dirty bits will never be cleared, daemon will just do nothing, so that if
+ * a disk is readded, Clean bits can be skipped with recovery;
+ * - Dirty bits will convert to Syncing from start write, to do data recovery
+ * for new added disks;
+ * - New write will convert bits to NeedSync directly;
+ *
+ * ##### Bitmap IO
+ *
+ * ##### Chunksize
+ *
+ * The default bitmap size is 128k, incluing 1k bitmap super block, and
+ * the default size of segment of data in the array each bit(chunksize) is 64k,
+ * and chunksize will adjust to twice the old size each time if the total number
+ * bits is not less than 127k.(see llbitmap_init)
+ *
+ * ##### READ
+ *
+ * While creating bitmap, all pages will be allocated and read for llbitmap,
+ * there won't be read afterwards
+ *
+ * ##### WRITE
+ *
+ * WRITE IO is divided into logical_block_size of the array, the dirty state
+ * of each block is tracked independently, for example:
+ *
+ * each page is 4k, contain 8 blocks; each block is 512 bytes contain 512 bit;
+ *
+ * | page0 | page1 | ... | page 31 |
+ * | |
+ * | \-----------------------\
+ * | |
+ * | block0 | block1 | ... | block 8|
+ * | |
+ * | \-----------------\
+ * | |
+ * | bit0 | bit1 | ... | bit511 |
+ *
+ * From IO path, if one bit is changed to Dirty or NeedSync, the corresponding
+ * subpage will be marked dirty, such block must write first before the IO is
+ * issued. This behaviour will affect IO performance, to reduce the impact, if
+ * multiple bits are changed in the same block in a short time, all bits in this
+ * block will be changed to Dirty/NeedSync, so that there won't be any overhead
+ * until daemon clears dirty bits.
+ *
+ * ##### Dirty Bits synchronization
+ *
+ * IO fast path will set bits to dirty, and those dirty bits will be cleared
+ * by daemon after IO is done. llbitmap_page_ctl is used to synchronize between
+ * IO path and daemon;
+ *
+ * IO path:
+ * 1) try to grab a reference, if succeed, set expire time after 5s and return;
+ * 2) if failed to grab a reference, wait for daemon to finish clearing dirty
+ * bits;
+ *
+ * Daemon (Daemon will be woken up every daemon_sleep seconds):
+ * For each page:
+ * 1) check if page expired, if not skip this page; for expired page:
+ * 2) suspend the page and wait for inflight write IO to be done;
+ * 3) change dirty page to clean;
+ * 4) resume the page;
+ */
+
+#define BITMAP_DATA_OFFSET 1024
+
+/* 64k is the max IO size of sync IO for raid1/raid10 */
+#define MIN_CHUNK_SIZE (64 * 2)
+
+/* By default, daemon will be woken up every 30s */
+#define DEFAULT_DAEMON_SLEEP 30
+
+/*
+ * Dirtied bits that have not been accessed for more than 5s will be cleared
+ * by daemon.
+ */
+#define DEFAULT_BARRIER_IDLE 5
+
+enum llbitmap_state {
+ /* No valid data, init state after assemble the array */
+ BitUnwritten = 0,
+ /* data is consistent */
+ BitClean,
+ /* data will be consistent after IO is done, set directly for writes */
+ BitDirty,
+ /*
+ * data need to be resynchronized:
+ * 1) set directly for writes if array is degraded, prevent full disk
+ * synchronization after readding a disk;
+ * 2) reassemble the array after power failure, and dirty bits are
+ * found after reloading the bitmap;
+ * 3) set for first write for raid5, to build initial xor data lazily
+ */
+ BitNeedSync,
+ /* data is synchronizing */
+ BitSyncing,
+ BitStateCount,
+ BitNone = 0xff,
+};
+
+enum llbitmap_action {
+ /* User write new data, this is the only action from IO fast path */
+ BitmapActionStartwrite = 0,
+ /* Start recovery */
+ BitmapActionStartsync,
+ /* Finish recovery */
+ BitmapActionEndsync,
+ /* Failed recovery */
+ BitmapActionAbortsync,
+ /* Reassemble the array */
+ BitmapActionReload,
+ /* Daemon thread is trying to clear dirty bits */
+ BitmapActionDaemon,
+ /* Data is deleted */
+ BitmapActionDiscard,
+ /*
+ * Bitmap is stale, mark all bits in addition to BitUnwritten to
+ * BitNeedSync.
+ */
+ BitmapActionStale,
+ BitmapActionCount,
+ /* Init state is BitUnwritten */
+ BitmapActionInit,
+};
+
+enum llbitmap_page_state {
+ LLPageFlush = 0,
+ LLPageDirty,
+};
+
+struct llbitmap_page_ctl {
+ char *state;
+ struct page *page;
+ unsigned long expire;
+ unsigned long flags;
+ wait_queue_head_t wait;
+ struct percpu_ref active;
+ /* Per block size dirty state, maximum 64k page / 1 sector = 128 */
+ unsigned long dirty[];
+};
+
+struct llbitmap {
+ struct mddev *mddev;
+ struct llbitmap_page_ctl **pctl;
+
+ unsigned int nr_pages;
+ unsigned int io_size;
+ unsigned int blocks_per_page;
+
+ /* shift of one chunk */
+ unsigned long chunkshift;
+ /* size of one chunk in sector */
+ unsigned long chunksize;
+ /* total number of chunks */
+ unsigned long chunks;
+ unsigned long last_end_sync;
+ /*
+ * time in seconds that dirty bits will be cleared if the page is not
+ * accessed.
+ */
+ unsigned long barrier_idle;
+ /* fires on first BitDirty state */
+ struct timer_list pending_timer;
+ struct work_struct daemon_work;
+
+ unsigned long flags;
+ __u64 events_cleared;
+
+ /* for slow disks */
+ atomic_t behind_writes;
+ wait_queue_head_t behind_wait;
+};
+
+struct llbitmap_unplug_work {
+ struct work_struct work;
+ struct llbitmap *llbitmap;
+ struct completion *done;
+};
+
+static struct workqueue_struct *md_llbitmap_io_wq;
+static struct workqueue_struct *md_llbitmap_unplug_wq;
+
+static char state_machine[BitStateCount][BitmapActionCount] = {
+ [BitUnwritten] = {
+ [BitmapActionStartwrite] = BitDirty,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitNone,
+ [BitmapActionStale] = BitNone,
+ },
+ [BitClean] = {
+ [BitmapActionStartwrite] = BitDirty,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+ [BitDirty] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitNone,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNeedSync,
+ [BitmapActionDaemon] = BitClean,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+ [BitNeedSync] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitSyncing,
+ [BitmapActionEndsync] = BitNone,
+ [BitmapActionAbortsync] = BitNone,
+ [BitmapActionReload] = BitNone,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNone,
+ },
+ [BitSyncing] = {
+ [BitmapActionStartwrite] = BitNone,
+ [BitmapActionStartsync] = BitSyncing,
+ [BitmapActionEndsync] = BitDirty,
+ [BitmapActionAbortsync] = BitNeedSync,
+ [BitmapActionReload] = BitNeedSync,
+ [BitmapActionDaemon] = BitNone,
+ [BitmapActionDiscard] = BitUnwritten,
+ [BitmapActionStale] = BitNeedSync,
+ },
+};
+
+static void __llbitmap_flush(struct mddev *mddev);
+
+static enum llbitmap_state llbitmap_read(struct llbitmap *llbitmap, loff_t pos)
+{
+ unsigned int idx;
+ unsigned int offset;
+
+ pos += BITMAP_DATA_OFFSET;
+ idx = pos >> PAGE_SHIFT;
+ offset = offset_in_page(pos);
+
+ return llbitmap->pctl[idx]->state[offset];
+}
+
+/* set all the bits in the subpage as dirty */
+static void llbitmap_infect_dirty_bits(struct llbitmap *llbitmap,
+ struct llbitmap_page_ctl *pctl,
+ unsigned int block)
+{
+ bool level_456 = raid_is_456(llbitmap->mddev);
+ unsigned int io_size = llbitmap->io_size;
+ int pos;
+
+ for (pos = block * io_size; pos < (block + 1) * io_size; pos++) {
+ switch (pctl->state[pos]) {
+ case BitUnwritten:
+ pctl->state[pos] = level_456 ? BitNeedSync : BitDirty;
+ break;
+ case BitClean:
+ pctl->state[pos] = BitDirty;
+ break;
+ }
+ }
+}
+
+static void llbitmap_set_page_dirty(struct llbitmap *llbitmap, int idx,
+ int offset)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+ unsigned int io_size = llbitmap->io_size;
+ int block = offset / io_size;
+ int pos;
+
+ if (!test_bit(LLPageDirty, &pctl->flags))
+ set_bit(LLPageDirty, &pctl->flags);
+
+ /*
+ * For degraded array, dirty bits will never be cleared, and we must
+ * resync all the dirty bits, hence skip infect new dirty bits to
+ * prevent resync unnecessary data.
+ */
+ if (llbitmap->mddev->degraded) {
+ set_bit(block, pctl->dirty);
+ return;
+ }
+
+ /*
+ * The subpage usually contains a total of 512 bits. If any single bit
+ * within the subpage is marked as dirty, the entire sector will be
+ * written. To avoid impacting write performance, when multiple bits
+ * within the same sector are modified within llbitmap->barrier_idle,
+ * all bits in the sector will be collectively marked as dirty at once.
+ */
+ if (test_and_set_bit(block, pctl->dirty)) {
+ llbitmap_infect_dirty_bits(llbitmap, pctl, block);
+ return;
+ }
+
+ for (pos = block * io_size; pos < (block + 1) * io_size; pos++) {
+ if (pos == offset)
+ continue;
+ if (pctl->state[pos] == BitDirty ||
+ pctl->state[pos] == BitNeedSync) {
+ llbitmap_infect_dirty_bits(llbitmap, pctl, block);
+ return;
+ }
+ }
+}
+
+static void llbitmap_write(struct llbitmap *llbitmap, enum llbitmap_state state,
+ loff_t pos)
+{
+ unsigned int idx;
+ unsigned int bit;
+
+ pos += BITMAP_DATA_OFFSET;
+ idx = pos >> PAGE_SHIFT;
+ bit = offset_in_page(pos);
+
+ llbitmap->pctl[idx]->state[bit] = state;
+ if (state == BitDirty || state == BitNeedSync)
+ llbitmap_set_page_dirty(llbitmap, idx, bit);
+}
+
+static struct page *llbitmap_read_page(struct llbitmap *llbitmap, int idx)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ struct page *page = NULL;
+ struct md_rdev *rdev;
+
+ if (llbitmap->pctl && llbitmap->pctl[idx])
+ page = llbitmap->pctl[idx]->page;
+ if (page)
+ return page;
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ rdev_for_each(rdev, mddev) {
+ sector_t sector;
+
+ if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ sector = mddev->bitmap_info.offset +
+ (idx << PAGE_SECTORS_SHIFT);
+
+ if (sync_page_io(rdev, sector, PAGE_SIZE, page, REQ_OP_READ,
+ true))
+ return page;
+
+ md_error(mddev, rdev);
+ }
+
+ __free_page(page);
+ return ERR_PTR(-EIO);
+}
+
+static void llbitmap_write_page(struct llbitmap *llbitmap, int idx)
+{
+ struct page *page = llbitmap->pctl[idx]->page;
+ struct mddev *mddev = llbitmap->mddev;
+ struct md_rdev *rdev;
+ int block;
+
+ for (block = 0; block < llbitmap->blocks_per_page; block++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+
+ if (!test_and_clear_bit(block, pctl->dirty))
+ continue;
+
+ rdev_for_each(rdev, mddev) {
+ sector_t sector;
+ sector_t bit_sector = llbitmap->io_size >> SECTOR_SHIFT;
+
+ if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags))
+ continue;
+
+ sector = mddev->bitmap_info.offset + rdev->sb_start +
+ (idx << PAGE_SECTORS_SHIFT) +
+ block * bit_sector;
+ md_write_metadata(mddev, rdev, sector,
+ llbitmap->io_size, page,
+ block * llbitmap->io_size);
+ }
+ }
+}
+
+static void active_release(struct percpu_ref *ref)
+{
+ struct llbitmap_page_ctl *pctl =
+ container_of(ref, struct llbitmap_page_ctl, active);
+
+ wake_up(&pctl->wait);
+}
+
+static void llbitmap_free_pages(struct llbitmap *llbitmap)
+{
+ int i;
+
+ if (!llbitmap->pctl)
+ return;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ if (!pctl || !pctl->page)
+ break;
+
+ __free_page(pctl->page);
+ percpu_ref_exit(&pctl->active);
+ }
+
+ kfree(llbitmap->pctl[0]);
+ kfree(llbitmap->pctl);
+ llbitmap->pctl = NULL;
+}
+
+static int llbitmap_cache_pages(struct llbitmap *llbitmap)
+{
+ struct llbitmap_page_ctl *pctl;
+ unsigned int nr_pages = DIV_ROUND_UP(llbitmap->chunks +
+ BITMAP_DATA_OFFSET, PAGE_SIZE);
+ unsigned int size = struct_size(pctl, dirty, BITS_TO_LONGS(
+ llbitmap->blocks_per_page));
+ int i;
+
+ llbitmap->pctl = kmalloc_array(nr_pages, sizeof(void *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!llbitmap->pctl)
+ return -ENOMEM;
+
+ size = round_up(size, cache_line_size());
+ pctl = kmalloc_array(nr_pages, size, GFP_KERNEL | __GFP_ZERO);
+ if (!pctl) {
+ kfree(llbitmap->pctl);
+ return -ENOMEM;
+ }
+
+ llbitmap->nr_pages = nr_pages;
+
+ for (i = 0; i < nr_pages; i++, pctl = (void *)pctl + size) {
+ struct page *page = llbitmap_read_page(llbitmap, i);
+
+ llbitmap->pctl[i] = pctl;
+
+ if (IS_ERR(page)) {
+ llbitmap_free_pages(llbitmap);
+ return PTR_ERR(page);
+ }
+
+ if (percpu_ref_init(&pctl->active, active_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
+ __free_page(page);
+ llbitmap_free_pages(llbitmap);
+ return -ENOMEM;
+ }
+
+ pctl->page = page;
+ pctl->state = page_address(page);
+ init_waitqueue_head(&pctl->wait);
+ }
+
+ return 0;
+}
+
+static void llbitmap_init_state(struct llbitmap *llbitmap)
+{
+ enum llbitmap_state state = BitUnwritten;
+ unsigned long i;
+
+ if (test_and_clear_bit(BITMAP_CLEAN, &llbitmap->flags))
+ state = BitClean;
+
+ for (i = 0; i < llbitmap->chunks; i++)
+ llbitmap_write(llbitmap, state, i);
+}
+
+/* The return value is only used from resync, where @start == @end. */
+static enum llbitmap_state llbitmap_state_machine(struct llbitmap *llbitmap,
+ unsigned long start,
+ unsigned long end,
+ enum llbitmap_action action)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ enum llbitmap_state state = BitNone;
+ bool level_456 = raid_is_456(llbitmap->mddev);
+ bool need_resync = false;
+ bool need_recovery = false;
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags))
+ return BitNone;
+
+ if (action == BitmapActionInit) {
+ llbitmap_init_state(llbitmap);
+ return BitNone;
+ }
+
+ while (start <= end) {
+ enum llbitmap_state c = llbitmap_read(llbitmap, start);
+
+ if (c < 0 || c >= BitStateCount) {
+ pr_err("%s: invalid bit %lu state %d action %d, forcing resync\n",
+ __func__, start, c, action);
+ state = BitNeedSync;
+ goto write_bitmap;
+ }
+
+ if (c == BitNeedSync)
+ need_resync = !mddev->degraded;
+
+ state = state_machine[c][action];
+
+write_bitmap:
+ if (unlikely(mddev->degraded)) {
+ /* For degraded array, mark new data as need sync. */
+ if (state == BitDirty &&
+ action == BitmapActionStartwrite)
+ state = BitNeedSync;
+ /*
+ * For degraded array, resync dirty data as well, noted
+ * if array is still degraded after resync is done, all
+ * new data will still be dirty until array is clean.
+ */
+ else if (c == BitDirty &&
+ action == BitmapActionStartsync)
+ state = BitSyncing;
+ } else if (c == BitUnwritten && state == BitDirty &&
+ action == BitmapActionStartwrite && level_456) {
+ /* Delay raid456 initial recovery to first write. */
+ state = BitNeedSync;
+ }
+
+ if (state == BitNone) {
+ start++;
+ continue;
+ }
+
+ llbitmap_write(llbitmap, state, start);
+
+ if (state == BitNeedSync)
+ need_resync = !mddev->degraded;
+ else if (state == BitDirty &&
+ !timer_pending(&llbitmap->pending_timer))
+ mod_timer(&llbitmap->pending_timer,
+ jiffies + mddev->bitmap_info.daemon_sleep * HZ);
+
+ start++;
+ }
+
+ if (need_resync && level_456)
+ need_recovery = true;
+
+ if (need_recovery) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ set_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ } else if (need_resync) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ md_wakeup_thread(mddev->thread);
+ }
+
+ return state;
+}
+
+static void llbitmap_raise_barrier(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+retry:
+ if (likely(percpu_ref_tryget_live(&pctl->active))) {
+ WRITE_ONCE(pctl->expire, jiffies + llbitmap->barrier_idle * HZ);
+ return;
+ }
+
+ wait_event(pctl->wait, !percpu_ref_is_dying(&pctl->active));
+ goto retry;
+}
+
+static void llbitmap_release_barrier(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ percpu_ref_put(&pctl->active);
+}
+
+static int llbitmap_suspend_timeout(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ percpu_ref_kill(&pctl->active);
+
+ if (!wait_event_timeout(pctl->wait, percpu_ref_is_zero(&pctl->active),
+ llbitmap->mddev->bitmap_info.daemon_sleep * HZ))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void llbitmap_resume(struct llbitmap *llbitmap, int page_idx)
+{
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[page_idx];
+
+ pctl->expire = LONG_MAX;
+ percpu_ref_resurrect(&pctl->active);
+ wake_up(&pctl->wait);
+}
+
+static int llbitmap_check_support(struct mddev *mddev)
+{
+ if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
+ pr_notice("md/llbitmap: %s: array with journal cannot have bitmap\n",
+ mdname(mddev));
+ return -EBUSY;
+ }
+
+ if (mddev->bitmap_info.space == 0) {
+ if (mddev->bitmap_info.default_space == 0) {
+ pr_notice("md/llbitmap: %s: no space for bitmap\n",
+ mdname(mddev));
+ return -ENOSPC;
+ }
+ }
+
+ if (!mddev->persistent) {
+ pr_notice("md/llbitmap: %s: array must be persistent\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev->bitmap_info.file) {
+ pr_notice("md/llbitmap: %s: doesn't support bitmap file\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev->bitmap_info.external) {
+ pr_notice("md/llbitmap: %s: doesn't support external metadata\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ if (mddev_is_dm(mddev)) {
+ pr_notice("md/llbitmap: %s: doesn't support dm-raid\n",
+ mdname(mddev));
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int llbitmap_init(struct llbitmap *llbitmap)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ sector_t blocks = mddev->resync_max_sectors;
+ unsigned long chunksize = MIN_CHUNK_SIZE;
+ unsigned long chunks = DIV_ROUND_UP(blocks, chunksize);
+ unsigned long space = mddev->bitmap_info.space << SECTOR_SHIFT;
+ int ret;
+
+ while (chunks > space) {
+ chunksize = chunksize << 1;
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ }
+
+ llbitmap->barrier_idle = DEFAULT_BARRIER_IDLE;
+ llbitmap->chunkshift = ffz(~chunksize);
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = chunks;
+ mddev->bitmap_info.daemon_sleep = DEFAULT_DAEMON_SLEEP;
+
+ ret = llbitmap_cache_pages(llbitmap);
+ if (ret)
+ return ret;
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1,
+ BitmapActionInit);
+ /* flush initial llbitmap to disk */
+ __llbitmap_flush(mddev);
+
+ return 0;
+}
+
+static int llbitmap_read_sb(struct llbitmap *llbitmap)
+{
+ struct mddev *mddev = llbitmap->mddev;
+ unsigned long daemon_sleep;
+ unsigned long chunksize;
+ unsigned long events;
+ struct page *sb_page;
+ bitmap_super_t *sb;
+ int ret = -EINVAL;
+
+ if (!mddev->bitmap_info.offset) {
+ pr_err("md/llbitmap: %s: no super block found", mdname(mddev));
+ return -EINVAL;
+ }
+
+ sb_page = llbitmap_read_page(llbitmap, 0);
+ if (IS_ERR(sb_page)) {
+ pr_err("md/llbitmap: %s: read super block failed",
+ mdname(mddev));
+ return -EIO;
+ }
+
+ sb = kmap_local_page(sb_page);
+ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) {
+ pr_err("md/llbitmap: %s: invalid super block magic number",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (sb->version != cpu_to_le32(BITMAP_MAJOR_LOCKLESS)) {
+ pr_err("md/llbitmap: %s: invalid super block version",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (memcmp(sb->uuid, mddev->uuid, 16)) {
+ pr_err("md/llbitmap: %s: bitmap superblock UUID mismatch\n",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (mddev->bitmap_info.space == 0) {
+ int room = le32_to_cpu(sb->sectors_reserved);
+
+ if (room)
+ mddev->bitmap_info.space = room;
+ else
+ mddev->bitmap_info.space = mddev->bitmap_info.default_space;
+ }
+ llbitmap->flags = le32_to_cpu(sb->state);
+ if (test_and_clear_bit(BITMAP_FIRST_USE, &llbitmap->flags)) {
+ ret = llbitmap_init(llbitmap);
+ goto out_put_page;
+ }
+
+ chunksize = le32_to_cpu(sb->chunksize);
+ if (!is_power_of_2(chunksize)) {
+ pr_err("md/llbitmap: %s: chunksize not a power of 2",
+ mdname(mddev));
+ goto out_put_page;
+ }
+
+ if (chunksize < DIV_ROUND_UP_SECTOR_T(mddev->resync_max_sectors,
+ mddev->bitmap_info.space << SECTOR_SHIFT)) {
+ pr_err("md/llbitmap: %s: chunksize too small %lu < %llu / %lu",
+ mdname(mddev), chunksize, mddev->resync_max_sectors,
+ mddev->bitmap_info.space);
+ goto out_put_page;
+ }
+
+ daemon_sleep = le32_to_cpu(sb->daemon_sleep);
+ if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT / HZ) {
+ pr_err("md/llbitmap: %s: daemon sleep %lu period out of range",
+ mdname(mddev), daemon_sleep);
+ goto out_put_page;
+ }
+
+ events = le64_to_cpu(sb->events);
+ if (events < mddev->events) {
+ pr_warn("md/llbitmap :%s: bitmap file is out of date (%lu < %llu) -- forcing full recovery",
+ mdname(mddev), events, mddev->events);
+ set_bit(BITMAP_STALE, &llbitmap->flags);
+ }
+
+ sb->sync_size = cpu_to_le64(mddev->resync_max_sectors);
+ mddev->bitmap_info.chunksize = chunksize;
+ mddev->bitmap_info.daemon_sleep = daemon_sleep;
+
+ llbitmap->barrier_idle = DEFAULT_BARRIER_IDLE;
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = DIV_ROUND_UP_SECTOR_T(mddev->resync_max_sectors, chunksize);
+ llbitmap->chunkshift = ffz(~chunksize);
+ ret = llbitmap_cache_pages(llbitmap);
+
+out_put_page:
+ __free_page(sb_page);
+ kunmap_local(sb);
+ return ret;
+}
+
+static void llbitmap_pending_timer_fn(struct timer_list *pending_timer)
+{
+ struct llbitmap *llbitmap =
+ container_of(pending_timer, struct llbitmap, pending_timer);
+
+ if (work_busy(&llbitmap->daemon_work)) {
+ pr_warn("md/llbitmap: %s daemon_work not finished in %lu seconds\n",
+ mdname(llbitmap->mddev),
+ llbitmap->mddev->bitmap_info.daemon_sleep);
+ set_bit(BITMAP_DAEMON_BUSY, &llbitmap->flags);
+ return;
+ }
+
+ queue_work(md_llbitmap_io_wq, &llbitmap->daemon_work);
+}
+
+static void md_llbitmap_daemon_fn(struct work_struct *work)
+{
+ struct llbitmap *llbitmap =
+ container_of(work, struct llbitmap, daemon_work);
+ unsigned long start;
+ unsigned long end;
+ bool restart;
+ int idx;
+
+ if (llbitmap->mddev->degraded)
+ return;
+retry:
+ start = 0;
+ end = min(llbitmap->chunks, PAGE_SIZE - BITMAP_DATA_OFFSET) - 1;
+ restart = false;
+
+ for (idx = 0; idx < llbitmap->nr_pages; idx++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[idx];
+
+ if (idx > 0) {
+ start = end + 1;
+ end = min(end + PAGE_SIZE, llbitmap->chunks - 1);
+ }
+
+ if (!test_bit(LLPageFlush, &pctl->flags) &&
+ time_before(jiffies, pctl->expire)) {
+ restart = true;
+ continue;
+ }
+
+ if (llbitmap_suspend_timeout(llbitmap, idx) < 0) {
+ pr_warn("md/llbitmap: %s: %s waiting for page %d timeout\n",
+ mdname(llbitmap->mddev), __func__, idx);
+ continue;
+ }
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionDaemon);
+ llbitmap_resume(llbitmap, idx);
+ }
+
+ /*
+ * If the daemon took a long time to finish, retry to prevent missing
+ * clearing dirty bits.
+ */
+ if (test_and_clear_bit(BITMAP_DAEMON_BUSY, &llbitmap->flags))
+ goto retry;
+
+ /* If some page is dirty but not expired, setup timer again */
+ if (restart)
+ mod_timer(&llbitmap->pending_timer,
+ jiffies + llbitmap->mddev->bitmap_info.daemon_sleep * HZ);
+}
+
+static int llbitmap_create(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap;
+ int ret;
+
+ ret = llbitmap_check_support(mddev);
+ if (ret)
+ return ret;
+
+ llbitmap = kzalloc(sizeof(*llbitmap), GFP_KERNEL);
+ if (!llbitmap)
+ return -ENOMEM;
+
+ llbitmap->mddev = mddev;
+ llbitmap->io_size = bdev_logical_block_size(mddev->gendisk->part0);
+ llbitmap->blocks_per_page = PAGE_SIZE / llbitmap->io_size;
+
+ timer_setup(&llbitmap->pending_timer, llbitmap_pending_timer_fn, 0);
+ INIT_WORK(&llbitmap->daemon_work, md_llbitmap_daemon_fn);
+ atomic_set(&llbitmap->behind_writes, 0);
+ init_waitqueue_head(&llbitmap->behind_wait);
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ mddev->bitmap = llbitmap;
+ ret = llbitmap_read_sb(llbitmap);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ if (ret) {
+ kfree(llbitmap);
+ mddev->bitmap = NULL;
+ }
+
+ return ret;
+}
+
+static int llbitmap_resize(struct mddev *mddev, sector_t blocks, int chunksize)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long chunks;
+
+ if (chunksize == 0)
+ chunksize = llbitmap->chunksize;
+
+ /* If there is enough space, leave the chunksize unchanged. */
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ while (chunks > mddev->bitmap_info.space << SECTOR_SHIFT) {
+ chunksize = chunksize << 1;
+ chunks = DIV_ROUND_UP_SECTOR_T(blocks, chunksize);
+ }
+
+ llbitmap->chunkshift = ffz(~chunksize);
+ llbitmap->chunksize = chunksize;
+ llbitmap->chunks = chunks;
+
+ return 0;
+}
+
+static int llbitmap_load(struct mddev *mddev)
+{
+ enum llbitmap_action action = BitmapActionReload;
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (test_and_clear_bit(BITMAP_STALE, &llbitmap->flags))
+ action = BitmapActionStale;
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1, action);
+ return 0;
+}
+
+static void llbitmap_destroy(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (!llbitmap)
+ return;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+
+ timer_delete_sync(&llbitmap->pending_timer);
+ flush_workqueue(md_llbitmap_io_wq);
+ flush_workqueue(md_llbitmap_unplug_wq);
+
+ mddev->bitmap = NULL;
+ llbitmap_free_pages(llbitmap);
+ kfree(llbitmap);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+}
+
+static void llbitmap_start_write(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = offset >> llbitmap->chunkshift;
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionStartwrite);
+
+ while (page_start <= page_end) {
+ llbitmap_raise_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_end_write(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = offset >> llbitmap->chunkshift;
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ while (page_start <= page_end) {
+ llbitmap_release_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_start_discard(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = DIV_ROUND_UP_SECTOR_T(offset, llbitmap->chunksize);
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ llbitmap_state_machine(llbitmap, start, end, BitmapActionDiscard);
+
+ while (page_start <= page_end) {
+ llbitmap_raise_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_end_discard(struct mddev *mddev, sector_t offset,
+ unsigned long sectors)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long start = DIV_ROUND_UP_SECTOR_T(offset, llbitmap->chunksize);
+ unsigned long end = (offset + sectors - 1) >> llbitmap->chunkshift;
+ int page_start = (start + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+ int page_end = (end + BITMAP_DATA_OFFSET) >> PAGE_SHIFT;
+
+ while (page_start <= page_end) {
+ llbitmap_release_barrier(llbitmap, page_start);
+ page_start++;
+ }
+}
+
+static void llbitmap_unplug_fn(struct work_struct *work)
+{
+ struct llbitmap_unplug_work *unplug_work =
+ container_of(work, struct llbitmap_unplug_work, work);
+ struct llbitmap *llbitmap = unplug_work->llbitmap;
+ struct blk_plug plug;
+ int i;
+
+ blk_start_plug(&plug);
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ if (!test_bit(LLPageDirty, &llbitmap->pctl[i]->flags) ||
+ !test_and_clear_bit(LLPageDirty, &llbitmap->pctl[i]->flags))
+ continue;
+
+ llbitmap_write_page(llbitmap, i);
+ }
+
+ blk_finish_plug(&plug);
+ md_super_wait(llbitmap->mddev);
+ complete(unplug_work->done);
+}
+
+static bool llbitmap_dirty(struct llbitmap *llbitmap)
+{
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++)
+ if (test_bit(LLPageDirty, &llbitmap->pctl[i]->flags))
+ return true;
+
+ return false;
+}
+
+static void llbitmap_unplug(struct mddev *mddev, bool sync)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ struct llbitmap *llbitmap = mddev->bitmap;
+ struct llbitmap_unplug_work unplug_work = {
+ .llbitmap = llbitmap,
+ .done = &done,
+ };
+
+ if (!llbitmap_dirty(llbitmap))
+ return;
+
+ /*
+ * Issue new bitmap IO under submit_bio() context will deadlock:
+ * - the bio will wait for bitmap bio to be done, before it can be
+ * issued;
+ * - bitmap bio will be added to current->bio_list and wait for this
+ * bio to be issued;
+ */
+ INIT_WORK_ONSTACK(&unplug_work.work, llbitmap_unplug_fn);
+ queue_work(md_llbitmap_unplug_wq, &unplug_work.work);
+ wait_for_completion(&done);
+ destroy_work_on_stack(&unplug_work.work);
+}
+
+/*
+ * Force to write all bitmap pages to disk, called when stopping the array, or
+ * every daemon_sleep seconds when sync_thread is running.
+ */
+static void __llbitmap_flush(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ struct blk_plug plug;
+ int i;
+
+ blk_start_plug(&plug);
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ /* mark all blocks as dirty */
+ set_bit(LLPageDirty, &pctl->flags);
+ bitmap_fill(pctl->dirty, llbitmap->blocks_per_page);
+ llbitmap_write_page(llbitmap, i);
+ }
+ blk_finish_plug(&plug);
+ md_super_wait(llbitmap->mddev);
+}
+
+static void llbitmap_flush(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++)
+ set_bit(LLPageFlush, &llbitmap->pctl[i]->flags);
+
+ timer_delete_sync(&llbitmap->pending_timer);
+ queue_work(md_llbitmap_io_wq, &llbitmap->daemon_work);
+ flush_work(&llbitmap->daemon_work);
+
+ __llbitmap_flush(mddev);
+}
+
+/* This is used for raid5 lazy initial recovery */
+static bool llbitmap_blocks_synced(struct mddev *mddev, sector_t offset)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+ enum llbitmap_state c = llbitmap_read(llbitmap, p);
+
+ return c == BitClean || c == BitDirty;
+}
+
+static sector_t llbitmap_skip_sync_blocks(struct mddev *mddev, sector_t offset)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+ int blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ enum llbitmap_state c = llbitmap_read(llbitmap, p);
+
+ /* always skip unwritten blocks */
+ if (c == BitUnwritten)
+ return blocks;
+
+ /* For degraded array, don't skip */
+ if (mddev->degraded)
+ return 0;
+
+ /* For resync also skip clean/dirty blocks */
+ if ((c == BitClean || c == BitDirty) &&
+ test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
+ return blocks;
+
+ return 0;
+}
+
+static bool llbitmap_start_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks, bool degraded)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+
+ /*
+ * Handle one bit at a time, this is much simpler. And it doesn't matter
+ * if md_do_sync() loop more times.
+ */
+ *blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ return llbitmap_state_machine(llbitmap, p, p,
+ BitmapActionStartsync) == BitSyncing;
+}
+
+/* Something is wrong, sync_thread stop at @offset */
+static void llbitmap_end_sync(struct mddev *mddev, sector_t offset,
+ sector_t *blocks)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long p = offset >> llbitmap->chunkshift;
+
+ *blocks = llbitmap->chunksize - (offset & (llbitmap->chunksize - 1));
+ llbitmap_state_machine(llbitmap, p, llbitmap->chunks - 1,
+ BitmapActionAbortsync);
+}
+
+/* A full sync_thread is finished */
+static void llbitmap_close_sync(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ int i;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ /* let daemon_fn clear dirty bits immediately */
+ WRITE_ONCE(pctl->expire, jiffies);
+ }
+
+ llbitmap_state_machine(llbitmap, 0, llbitmap->chunks - 1,
+ BitmapActionEndsync);
+}
+
+/*
+ * sync_thread have reached @sector, update metadata every daemon_sleep seconds,
+ * just in case sync_thread have to restart after power failure.
+ */
+static void llbitmap_cond_end_sync(struct mddev *mddev, sector_t sector,
+ bool force)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (sector == 0) {
+ llbitmap->last_end_sync = jiffies;
+ return;
+ }
+
+ if (time_before(jiffies, llbitmap->last_end_sync +
+ HZ * mddev->bitmap_info.daemon_sleep))
+ return;
+
+ wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
+
+ mddev->curr_resync_completed = sector;
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ llbitmap_state_machine(llbitmap, 0, sector >> llbitmap->chunkshift,
+ BitmapActionEndsync);
+ __llbitmap_flush(mddev);
+
+ llbitmap->last_end_sync = jiffies;
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
+}
+
+static bool llbitmap_enabled(void *data, bool flush)
+{
+ struct llbitmap *llbitmap = data;
+
+ return llbitmap && !test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags);
+}
+
+static void llbitmap_dirty_bits(struct mddev *mddev, unsigned long s,
+ unsigned long e)
+{
+ llbitmap_state_machine(mddev->bitmap, s, e, BitmapActionStartwrite);
+}
+
+static void llbitmap_write_sb(struct llbitmap *llbitmap)
+{
+ int nr_blocks = DIV_ROUND_UP(BITMAP_DATA_OFFSET, llbitmap->io_size);
+
+ bitmap_fill(llbitmap->pctl[0]->dirty, nr_blocks);
+ llbitmap_write_page(llbitmap, 0);
+ md_super_wait(llbitmap->mddev);
+}
+
+static void llbitmap_update_sb(void *data)
+{
+ struct llbitmap *llbitmap = data;
+ struct mddev *mddev = llbitmap->mddev;
+ struct page *sb_page;
+ bitmap_super_t *sb;
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags))
+ return;
+
+ sb_page = llbitmap_read_page(llbitmap, 0);
+ if (IS_ERR(sb_page)) {
+ pr_err("%s: %s: read super block failed", __func__,
+ mdname(mddev));
+ set_bit(BITMAP_WRITE_ERROR, &llbitmap->flags);
+ return;
+ }
+
+ if (mddev->events < llbitmap->events_cleared)
+ llbitmap->events_cleared = mddev->events;
+
+ sb = kmap_local_page(sb_page);
+ sb->events = cpu_to_le64(mddev->events);
+ sb->state = cpu_to_le32(llbitmap->flags);
+ sb->chunksize = cpu_to_le32(llbitmap->chunksize);
+ sb->sync_size = cpu_to_le64(mddev->resync_max_sectors);
+ sb->events_cleared = cpu_to_le64(llbitmap->events_cleared);
+ sb->sectors_reserved = cpu_to_le32(mddev->bitmap_info.space);
+ sb->daemon_sleep = cpu_to_le32(mddev->bitmap_info.daemon_sleep);
+
+ kunmap_local(sb);
+ llbitmap_write_sb(llbitmap);
+}
+
+static int llbitmap_get_stats(void *data, struct md_bitmap_stats *stats)
+{
+ struct llbitmap *llbitmap = data;
+
+ memset(stats, 0, sizeof(*stats));
+
+ stats->missing_pages = 0;
+ stats->pages = llbitmap->nr_pages;
+ stats->file_pages = llbitmap->nr_pages;
+
+ stats->behind_writes = atomic_read(&llbitmap->behind_writes);
+ stats->behind_wait = wq_has_sleeper(&llbitmap->behind_wait);
+ stats->events_cleared = llbitmap->events_cleared;
+
+ return 0;
+}
+
+/* just flag all pages as needing to be written */
+static void llbitmap_write_all(struct mddev *mddev)
+{
+ int i;
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ for (i = 0; i < llbitmap->nr_pages; i++) {
+ struct llbitmap_page_ctl *pctl = llbitmap->pctl[i];
+
+ set_bit(LLPageDirty, &pctl->flags);
+ bitmap_fill(pctl->dirty, llbitmap->blocks_per_page);
+ }
+}
+
+static void llbitmap_start_behind_write(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ atomic_inc(&llbitmap->behind_writes);
+}
+
+static void llbitmap_end_behind_write(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (atomic_dec_and_test(&llbitmap->behind_writes))
+ wake_up(&llbitmap->behind_wait);
+}
+
+static void llbitmap_wait_behind_writes(struct mddev *mddev)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ if (!llbitmap)
+ return;
+
+ wait_event(llbitmap->behind_wait,
+ atomic_read(&llbitmap->behind_writes) == 0);
+
+}
+
+static ssize_t bits_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap;
+ int bits[BitStateCount] = {0};
+ loff_t start = 0;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ llbitmap = mddev->bitmap;
+ if (!llbitmap || !llbitmap->pctl) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "no bitmap\n");
+ }
+
+ if (test_bit(BITMAP_WRITE_ERROR, &llbitmap->flags)) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "bitmap io error\n");
+ }
+
+ while (start < llbitmap->chunks) {
+ enum llbitmap_state c = llbitmap_read(llbitmap, start);
+
+ if (c < 0 || c >= BitStateCount)
+ pr_err("%s: invalid bit %llu state %d\n",
+ __func__, start, c);
+ else
+ bits[c]++;
+ start++;
+ }
+
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "unwritten %d\nclean %d\ndirty %d\nneed sync %d\nsyncing %d\n",
+ bits[BitUnwritten], bits[BitClean], bits[BitDirty],
+ bits[BitNeedSync], bits[BitSyncing]);
+}
+
+static struct md_sysfs_entry llbitmap_bits = __ATTR_RO(bits);
+
+static ssize_t metadata_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap;
+ ssize_t ret;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ llbitmap = mddev->bitmap;
+ if (!llbitmap) {
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ return sprintf(page, "no bitmap\n");
+ }
+
+ ret = sprintf(page, "chunksize %lu\nchunkshift %lu\nchunks %lu\noffset %llu\ndaemon_sleep %lu\n",
+ llbitmap->chunksize, llbitmap->chunkshift,
+ llbitmap->chunks, mddev->bitmap_info.offset,
+ llbitmap->mddev->bitmap_info.daemon_sleep);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+
+ return ret;
+}
+
+static struct md_sysfs_entry llbitmap_metadata = __ATTR_RO(metadata);
+
+static ssize_t
+daemon_sleep_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%lu\n", mddev->bitmap_info.daemon_sleep);
+}
+
+static ssize_t
+daemon_sleep_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned long timeout;
+ int rv = kstrtoul(buf, 10, &timeout);
+
+ if (rv)
+ return rv;
+
+ mddev->bitmap_info.daemon_sleep = timeout;
+ return len;
+}
+
+static struct md_sysfs_entry llbitmap_daemon_sleep = __ATTR_RW(daemon_sleep);
+
+static ssize_t
+barrier_idle_show(struct mddev *mddev, char *page)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+
+ return sprintf(page, "%lu\n", llbitmap->barrier_idle);
+}
+
+static ssize_t
+barrier_idle_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ struct llbitmap *llbitmap = mddev->bitmap;
+ unsigned long timeout;
+ int rv = kstrtoul(buf, 10, &timeout);
+
+ if (rv)
+ return rv;
+
+ llbitmap->barrier_idle = timeout;
+ return len;
+}
+
+static struct md_sysfs_entry llbitmap_barrier_idle = __ATTR_RW(barrier_idle);
+
+static struct attribute *md_llbitmap_attrs[] = {
+ &llbitmap_bits.attr,
+ &llbitmap_metadata.attr,
+ &llbitmap_daemon_sleep.attr,
+ &llbitmap_barrier_idle.attr,
+ NULL
+};
+
+static struct attribute_group md_llbitmap_group = {
+ .name = "llbitmap",
+ .attrs = md_llbitmap_attrs,
+};
+
+static struct bitmap_operations llbitmap_ops = {
+ .head = {
+ .type = MD_BITMAP,
+ .id = ID_LLBITMAP,
+ .name = "llbitmap",
+ },
+
+ .enabled = llbitmap_enabled,
+ .create = llbitmap_create,
+ .resize = llbitmap_resize,
+ .load = llbitmap_load,
+ .destroy = llbitmap_destroy,
+
+ .start_write = llbitmap_start_write,
+ .end_write = llbitmap_end_write,
+ .start_discard = llbitmap_start_discard,
+ .end_discard = llbitmap_end_discard,
+ .unplug = llbitmap_unplug,
+ .flush = llbitmap_flush,
+
+ .start_behind_write = llbitmap_start_behind_write,
+ .end_behind_write = llbitmap_end_behind_write,
+ .wait_behind_writes = llbitmap_wait_behind_writes,
+
+ .blocks_synced = llbitmap_blocks_synced,
+ .skip_sync_blocks = llbitmap_skip_sync_blocks,
+ .start_sync = llbitmap_start_sync,
+ .end_sync = llbitmap_end_sync,
+ .close_sync = llbitmap_close_sync,
+ .cond_end_sync = llbitmap_cond_end_sync,
+
+ .update_sb = llbitmap_update_sb,
+ .get_stats = llbitmap_get_stats,
+ .dirty_bits = llbitmap_dirty_bits,
+ .write_all = llbitmap_write_all,
+
+ .group = &md_llbitmap_group,
+};
+
+int md_llbitmap_init(void)
+{
+ md_llbitmap_io_wq = alloc_workqueue("md_llbitmap_io",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!md_llbitmap_io_wq)
+ return -ENOMEM;
+
+ md_llbitmap_unplug_wq = alloc_workqueue("md_llbitmap_unplug",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!md_llbitmap_unplug_wq) {
+ destroy_workqueue(md_llbitmap_io_wq);
+ md_llbitmap_io_wq = NULL;
+ return -ENOMEM;
+ }
+
+ return register_md_submodule(&llbitmap_ops.head);
+}
+
+void md_llbitmap_exit(void)
+{
+ destroy_workqueue(md_llbitmap_io_wq);
+ md_llbitmap_io_wq = NULL;
+ destroy_workqueue(md_llbitmap_unplug_wq);
+ md_llbitmap_unplug_wq = NULL;
+ unregister_md_submodule(&llbitmap_ops.head);
+}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e033c26fdd4..e5922a682953 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -94,13 +94,12 @@ static struct workqueue_struct *md_wq;
* workqueue whith reconfig_mutex grabbed.
*/
static struct workqueue_struct *md_misc_wq;
-struct workqueue_struct *md_bitmap_wq;
static int remove_and_add_spares(struct mddev *mddev,
struct md_rdev *this);
static void mddev_detach(struct mddev *mddev);
static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
-static void md_wakeup_thread_directly(struct md_thread __rcu *thread);
+static void md_wakeup_thread_directly(struct md_thread __rcu **thread);
/*
* Default number of read corrections we'll attempt on an rdev
@@ -340,6 +339,7 @@ static int start_readonly;
*/
static bool create_on_open = true;
static bool legacy_async_del_gendisk = true;
+static bool check_new_feature = true;
/*
* We have a system wide 'event count' that is incremented
@@ -677,8 +677,66 @@ static void active_io_release(struct percpu_ref *ref)
static void no_op(struct percpu_ref *r) {}
+static bool mddev_set_bitmap_ops(struct mddev *mddev)
+{
+ struct bitmap_operations *old = mddev->bitmap_ops;
+ struct md_submodule_head *head;
+
+ if (mddev->bitmap_id == ID_BITMAP_NONE ||
+ (old && old->head.id == mddev->bitmap_id))
+ return true;
+
+ xa_lock(&md_submodule);
+ head = xa_load(&md_submodule, mddev->bitmap_id);
+
+ if (!head) {
+ pr_warn("md: can't find bitmap id %d\n", mddev->bitmap_id);
+ goto err;
+ }
+
+ if (head->type != MD_BITMAP) {
+ pr_warn("md: invalid bitmap id %d\n", mddev->bitmap_id);
+ goto err;
+ }
+
+ mddev->bitmap_ops = (void *)head;
+ xa_unlock(&md_submodule);
+
+ if (!mddev_is_dm(mddev) && mddev->bitmap_ops->group) {
+ if (sysfs_create_group(&mddev->kobj, mddev->bitmap_ops->group))
+ pr_warn("md: cannot register extra bitmap attributes for %s\n",
+ mdname(mddev));
+ else
+ /*
+ * Inform user with KOBJ_CHANGE about new bitmap
+ * attributes.
+ */
+ kobject_uevent(&mddev->kobj, KOBJ_CHANGE);
+ }
+ return true;
+
+err:
+ xa_unlock(&md_submodule);
+ return false;
+}
+
+static void mddev_clear_bitmap_ops(struct mddev *mddev)
+{
+ if (!mddev_is_dm(mddev) && mddev->bitmap_ops &&
+ mddev->bitmap_ops->group)
+ sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->group);
+
+ mddev->bitmap_ops = NULL;
+}
+
int mddev_init(struct mddev *mddev)
{
+ int err = 0;
+
+ if (!IS_ENABLED(CONFIG_MD_BITMAP))
+ mddev->bitmap_id = ID_BITMAP_NONE;
+ else
+ mddev->bitmap_id = ID_BITMAP;
if (percpu_ref_init(&mddev->active_io, active_io_release,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
@@ -686,10 +744,23 @@ int mddev_init(struct mddev *mddev)
if (percpu_ref_init(&mddev->writes_pending, no_op,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
- percpu_ref_exit(&mddev->active_io);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto exit_acitve_io;
}
+ err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (err)
+ goto exit_writes_pending;
+
+ err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ if (err)
+ goto exit_bio_set;
+
+ err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE,
+ offsetof(struct md_io_clone, bio_clone), 0);
+ if (err)
+ goto exit_sync_set;
+
/* We want to start with the refcount at zero */
percpu_ref_put(&mddev->writes_pending);
@@ -713,17 +784,29 @@ int mddev_init(struct mddev *mddev)
mddev->resync_min = 0;
mddev->resync_max = MaxSector;
mddev->level = LEVEL_NONE;
- mddev_set_bitmap_ops(mddev);
INIT_WORK(&mddev->sync_work, md_start_sync);
INIT_WORK(&mddev->del_work, mddev_delayed_delete);
return 0;
+
+exit_sync_set:
+ bioset_exit(&mddev->sync_set);
+exit_bio_set:
+ bioset_exit(&mddev->bio_set);
+exit_writes_pending:
+ percpu_ref_exit(&mddev->writes_pending);
+exit_acitve_io:
+ percpu_ref_exit(&mddev->active_io);
+ return err;
}
EXPORT_SYMBOL_GPL(mddev_init);
void mddev_destroy(struct mddev *mddev)
{
+ bioset_exit(&mddev->bio_set);
+ bioset_exit(&mddev->sync_set);
+ bioset_exit(&mddev->io_clone_set);
percpu_ref_exit(&mddev->active_io);
percpu_ref_exit(&mddev->writes_pending);
}
@@ -887,8 +970,11 @@ void mddev_unlock(struct mddev *mddev)
* do_md_stop. dm raid only uses md_stop to stop. So dm raid
* doesn't need to check MD_DELETED when getting reconfig lock
*/
- if (test_bit(MD_DELETED, &mddev->flags))
+ if (test_bit(MD_DELETED, &mddev->flags) &&
+ !test_and_set_bit(MD_DO_DELETE, &mddev->flags)) {
+ kobject_del(&mddev->kobj);
del_gendisk(mddev->gendisk);
+ }
}
}
EXPORT_SYMBOL_GPL(mddev_unlock);
@@ -1020,15 +1106,26 @@ static void super_written(struct bio *bio)
wake_up(&mddev->sb_wait);
}
-void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
- sector_t sector, int size, struct page *page)
+/**
+ * md_write_metadata - write metadata to underlying disk, including
+ * array superblock, badblocks, bitmap superblock and bitmap bits.
+ * @mddev: the array to write
+ * @rdev: the underlying disk to write
+ * @sector: the offset to @rdev
+ * @size: the length of the metadata
+ * @page: the metadata
+ * @offset: the offset to @page
+ *
+ * Write @size bytes of @page start from @offset, to @sector of @rdev, Increment
+ * mddev->pending_writes before returning, and decrement it on completion,
+ * waking up sb_wait. Caller must call md_super_wait() after issuing io to all
+ * rdev. If an error occurred, md_error() will be called, and the @rdev will be
+ * kicked out from @mddev.
+ */
+void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
+ sector_t sector, int size, struct page *page,
+ unsigned int offset)
{
- /* write first size bytes of page to sector of rdev
- * Increment mddev->pending_writes before returning
- * and decrement it on completion, waking up sb_wait
- * if zero is reached.
- * If an error occurred, call md_error
- */
struct bio *bio;
if (!page)
@@ -1046,7 +1143,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector;
- __bio_add_page(bio, page, size, 0);
+ __bio_add_page(bio, page, size, offset);
bio->bi_private = rdev;
bio->bi_end_io = super_written;
@@ -1356,6 +1453,9 @@ static u64 md_bitmap_events_cleared(struct mddev *mddev)
struct md_bitmap_stats stats;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return 0;
+
err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
if (err)
return 0;
@@ -1653,8 +1753,8 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
num_sectors = (sector_t)(2ULL << 32) - 2;
do {
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
} while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
}
@@ -1752,9 +1852,13 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
}
if (sb->pad0 ||
sb->pad3[0] ||
- memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1])))
- /* Some padding is non-zero, might be a new feature */
- return -EINVAL;
+ memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) {
+ pr_warn("Some padding is non-zero on %pg, might be a new feature\n",
+ rdev->bdev);
+ if (check_new_feature)
+ return -EINVAL;
+ pr_warn("check_new_feature is disabled, data corruption possible\n");
+ }
rdev->preferred_minor = 0xffff;
rdev->data_offset = le64_to_cpu(sb->data_offset);
@@ -1895,6 +1999,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
mddev->layout = le32_to_cpu(sb->layout);
mddev->raid_disks = le32_to_cpu(sb->raid_disks);
mddev->dev_sectors = le64_to_cpu(sb->size);
+ mddev->logical_block_size = le32_to_cpu(sb->logical_block_size);
mddev->events = ev1;
mddev->bitmap_info.offset = 0;
mddev->bitmap_info.space = 0;
@@ -2104,6 +2209,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->chunksize = cpu_to_le32(mddev->chunk_sectors);
sb->level = cpu_to_le32(mddev->level);
sb->layout = cpu_to_le32(mddev->layout);
+ sb->logical_block_size = cpu_to_le32(mddev->logical_block_size);
if (test_bit(FailFast, &rdev->flags))
sb->devflags |= FailFast1;
else
@@ -2302,8 +2408,8 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
do {
- md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(rdev->mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
} while (md_super_wait(rdev->mddev) < 0);
return num_sectors;
@@ -2313,13 +2419,15 @@ static int
super_1_allow_new_offset(struct md_rdev *rdev,
unsigned long long new_offset)
{
+ struct mddev *mddev = rdev->mddev;
+
/* All necessary checks on new >= old have been done */
if (new_offset >= rdev->data_offset)
return 1;
/* with 1.0 metadata, there is no metadata to tread on
* so we can always move back */
- if (rdev->mddev->minor_version == 0)
+ if (mddev->minor_version == 0)
return 1;
/* otherwise we must be sure not to step on
@@ -2331,8 +2439,7 @@ super_1_allow_new_offset(struct md_rdev *rdev,
if (rdev->sb_start + (32+4)*2 > new_offset)
return 0;
- if (!rdev->mddev->bitmap_info.file) {
- struct mddev *mddev = rdev->mddev;
+ if (md_bitmap_registered(mddev) && !mddev->bitmap_info.file) {
struct md_bitmap_stats stats;
int err;
@@ -2681,6 +2788,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
if (!md_is_rdwr(mddev)) {
if (force_change)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ pr_err("%s: can't update sb for read-only array %s\n", __func__, mdname(mddev));
return;
}
@@ -2804,24 +2912,24 @@ repeat:
mddev_add_trace_msg(mddev, "md md_update_sb");
rewrite:
- mddev->bitmap_ops->update_sb(mddev->bitmap);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->update_sb(mddev->bitmap);
rdev_for_each(rdev, mddev) {
if (rdev->sb_loaded != 1)
continue; /* no noise on spare devices */
if (!test_bit(Faulty, &rdev->flags)) {
- md_super_write(mddev,rdev,
- rdev->sb_start, rdev->sb_size,
- rdev->sb_page);
+ md_write_metadata(mddev, rdev, rdev->sb_start,
+ rdev->sb_size, rdev->sb_page, 0);
pr_debug("md: (write) %pg's sb offset: %llu\n",
rdev->bdev,
(unsigned long long)rdev->sb_start);
rdev->sb_events = mddev->events;
if (rdev->badblocks.size) {
- md_super_write(mddev, rdev,
- rdev->badblocks.sector,
- rdev->badblocks.size << 9,
- rdev->bb_page);
+ md_write_metadata(mddev, rdev,
+ rdev->badblocks.sector,
+ rdev->badblocks.size << 9,
+ rdev->bb_page, 0);
rdev->badblocks.size = 0;
}
@@ -4150,6 +4258,86 @@ static struct md_sysfs_entry md_new_level =
__ATTR(new_level, 0664, new_level_show, new_level_store);
static ssize_t
+bitmap_type_show(struct mddev *mddev, char *page)
+{
+ struct md_submodule_head *head;
+ unsigned long i;
+ ssize_t len = 0;
+
+ if (mddev->bitmap_id == ID_BITMAP_NONE)
+ len += sprintf(page + len, "[none] ");
+ else
+ len += sprintf(page + len, "none ");
+
+ xa_lock(&md_submodule);
+ xa_for_each(&md_submodule, i, head) {
+ if (head->type != MD_BITMAP)
+ continue;
+
+ if (mddev->bitmap_id == head->id)
+ len += sprintf(page + len, "[%s] ", head->name);
+ else
+ len += sprintf(page + len, "%s ", head->name);
+ }
+ xa_unlock(&md_submodule);
+
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static ssize_t
+bitmap_type_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ struct md_submodule_head *head;
+ enum md_submodule_id id;
+ unsigned long i;
+ int err = 0;
+
+ xa_lock(&md_submodule);
+
+ if (mddev->bitmap_ops) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (cmd_match(buf, "none")) {
+ mddev->bitmap_id = ID_BITMAP_NONE;
+ goto out;
+ }
+
+ xa_for_each(&md_submodule, i, head) {
+ if (head->type == MD_BITMAP && cmd_match(buf, head->name)) {
+ mddev->bitmap_id = head->id;
+ goto out;
+ }
+ }
+
+ err = kstrtoint(buf, 10, &id);
+ if (err)
+ goto out;
+
+ if (id == ID_BITMAP_NONE) {
+ mddev->bitmap_id = id;
+ goto out;
+ }
+
+ head = xa_load(&md_submodule, id);
+ if (head && head->type == MD_BITMAP) {
+ mddev->bitmap_id = id;
+ goto out;
+ }
+
+ err = -ENOENT;
+
+out:
+ xa_unlock(&md_submodule);
+ return err ? err : len;
+}
+
+static struct md_sysfs_entry md_bitmap_type =
+__ATTR(bitmap_type, 0664, bitmap_type_show, bitmap_type_store);
+
+static ssize_t
layout_show(struct mddev *mddev, char *page)
{
/* just a number, not meaningful for all levels */
@@ -4680,6 +4868,9 @@ bitmap_store(struct mddev *mddev, const char *buf, size_t len)
unsigned long chunk, end_chunk;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return len;
+
err = mddev_lock(mddev);
if (err)
return err;
@@ -4982,7 +5173,7 @@ static void stop_sync_thread(struct mddev *mddev, bool locked)
* Thread might be blocked waiting for metadata update which will now
* never happen
*/
- md_wakeup_thread_directly(mddev->sync_thread);
+ md_wakeup_thread_directly(&mddev->sync_thread);
if (work_pending(&mddev->sync_work))
flush_work(&mddev->sync_work);
@@ -5748,10 +5939,73 @@ static struct md_sysfs_entry md_serialize_policy =
__ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show,
serialize_policy_store);
+static int mddev_set_logical_block_size(struct mddev *mddev,
+ unsigned int lbs)
+{
+ int err = 0;
+ struct queue_limits lim;
+
+ if (queue_logical_block_size(mddev->gendisk->queue) >= lbs) {
+ pr_err("%s: Cannot set LBS smaller than mddev LBS %u\n",
+ mdname(mddev), lbs);
+ return -EINVAL;
+ }
+
+ lim = queue_limits_start_update(mddev->gendisk->queue);
+ lim.logical_block_size = lbs;
+ pr_info("%s: logical_block_size is changed, data may be lost\n",
+ mdname(mddev));
+ err = queue_limits_commit_update(mddev->gendisk->queue, &lim);
+ if (err)
+ return err;
+
+ mddev->logical_block_size = lbs;
+ /* New lbs will be written to superblock after array is running */
+ set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
+ return 0;
+}
+
+static ssize_t
+lbs_show(struct mddev *mddev, char *page)
+{
+ return sprintf(page, "%u\n", mddev->logical_block_size);
+}
+
+static ssize_t
+lbs_store(struct mddev *mddev, const char *buf, size_t len)
+{
+ unsigned int lbs;
+ int err = -EBUSY;
+
+ /* Only 1.x meta supports configurable LBS */
+ if (mddev->major_version == 0)
+ return -EINVAL;
+
+ if (mddev->pers)
+ return -EBUSY;
+
+ err = kstrtouint(buf, 10, &lbs);
+ if (err < 0)
+ return -EINVAL;
+
+ err = mddev_lock(mddev);
+ if (err)
+ goto unlock;
+
+ err = mddev_set_logical_block_size(mddev, lbs);
+
+unlock:
+ mddev_unlock(mddev);
+ return err ?: len;
+}
+
+static struct md_sysfs_entry md_logical_block_size =
+__ATTR(logical_block_size, 0644, lbs_show, lbs_store);
static struct attribute *md_default_attrs[] = {
&md_level.attr,
&md_new_level.attr,
+ &md_bitmap_type.attr,
&md_layout.attr,
&md_raid_disks.attr,
&md_uuid.attr,
@@ -5769,6 +6023,7 @@ static struct attribute *md_default_attrs[] = {
&md_consistency_policy.attr,
&md_fail_last_dev.attr,
&md_serialize_policy.attr,
+ &md_logical_block_size.attr,
NULL,
};
@@ -5801,7 +6056,6 @@ static const struct attribute_group md_redundancy_group = {
static const struct attribute_group *md_attr_groups[] = {
&md_default_group,
- &md_bitmap_group,
NULL,
};
@@ -5900,6 +6154,17 @@ int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim,
return -EINVAL;
}
+ /*
+ * Before RAID adding folio support, the logical_block_size
+ * should be smaller than the page size.
+ */
+ if (lim->logical_block_size > PAGE_SIZE) {
+ pr_err("%s: logical_block_size must not larger than PAGE_SIZE\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+ mddev->logical_block_size = lim->logical_block_size;
+
return 0;
}
EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits);
@@ -5912,6 +6177,13 @@ int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev)
if (mddev_is_dm(mddev))
return 0;
+ if (queue_logical_block_size(rdev->bdev->bd_disk->queue) >
+ queue_logical_block_size(mddev->gendisk->queue)) {
+ pr_err("%s: incompatible logical_block_size, can not add\n",
+ mdname(mddev));
+ return -EINVAL;
+ }
+
lim = queue_limits_start_update(mddev->gendisk->queue);
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset,
mddev->gendisk->disk_name);
@@ -6133,6 +6405,26 @@ static void md_safemode_timeout(struct timer_list *t)
static int start_dirty_degraded;
+static int md_bitmap_create(struct mddev *mddev)
+{
+ if (mddev->bitmap_id == ID_BITMAP_NONE)
+ return -EINVAL;
+
+ if (!mddev_set_bitmap_ops(mddev))
+ return -ENOENT;
+
+ return mddev->bitmap_ops->create(mddev);
+}
+
+static void md_bitmap_destroy(struct mddev *mddev)
+{
+ if (!md_bitmap_registered(mddev))
+ return;
+
+ mddev->bitmap_ops->destroy(mddev);
+ mddev_clear_bitmap_ops(mddev);
+}
+
int md_run(struct mddev *mddev)
{
int err;
@@ -6212,29 +6504,9 @@ int md_run(struct mddev *mddev)
nowait = nowait && bdev_nowait(rdev->bdev);
}
- if (!bioset_initialized(&mddev->bio_set)) {
- err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- if (err)
- return err;
- }
- if (!bioset_initialized(&mddev->sync_set)) {
- err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- if (err)
- goto exit_bio_set;
- }
-
- if (!bioset_initialized(&mddev->io_clone_set)) {
- err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE,
- offsetof(struct md_io_clone, bio_clone), 0);
- if (err)
- goto exit_sync_set;
- }
-
pers = get_pers(mddev->level, mddev->clevel);
- if (!pers) {
- err = -EINVAL;
- goto abort;
- }
+ if (!pers)
+ return -EINVAL;
if (mddev->level != pers->head.id) {
mddev->level = pers->head.id;
mddev->new_level = pers->head.id;
@@ -6245,8 +6517,7 @@ int md_run(struct mddev *mddev)
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
put_pers(pers);
- err = -EINVAL;
- goto abort;
+ return -EINVAL;
}
if (pers->sync_request) {
@@ -6299,7 +6570,7 @@ int md_run(struct mddev *mddev)
}
if (err == 0 && pers->sync_request &&
(mddev->bitmap_info.file || mddev->bitmap_info.offset)) {
- err = mddev->bitmap_ops->create(mddev);
+ err = md_bitmap_create(mddev);
if (err)
pr_warn("%s: failed to create bitmap (%d)\n",
mdname(mddev), err);
@@ -6372,13 +6643,7 @@ bitmap_abort:
pers->free(mddev, mddev->private);
mddev->private = NULL;
put_pers(pers);
- mddev->bitmap_ops->destroy(mddev);
-abort:
- bioset_exit(&mddev->io_clone_set);
-exit_sync_set:
- bioset_exit(&mddev->sync_set);
-exit_bio_set:
- bioset_exit(&mddev->bio_set);
+ md_bitmap_destroy(mddev);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
@@ -6392,10 +6657,12 @@ int do_md_run(struct mddev *mddev)
if (err)
goto out;
- err = mddev->bitmap_ops->load(mddev);
- if (err) {
- mddev->bitmap_ops->destroy(mddev);
- goto out;
+ if (md_bitmap_registered(mddev)) {
+ err = mddev->bitmap_ops->load(mddev);
+ if (err) {
+ md_bitmap_destroy(mddev);
+ goto out;
+ }
}
if (mddev_is_clustered(mddev))
@@ -6509,6 +6776,7 @@ static void md_clean(struct mddev *mddev)
mddev->chunk_sectors = 0;
mddev->ctime = mddev->utime = 0;
mddev->layout = 0;
+ mddev->logical_block_size = 0;
mddev->max_disks = 0;
mddev->events = 0;
mddev->can_decrease_events = 0;
@@ -6546,7 +6814,8 @@ static void __md_stop_writes(struct mddev *mddev)
mddev->pers->quiesce(mddev, 0);
}
- mddev->bitmap_ops->flush(mddev);
+ if (md_bitmap_enabled(mddev, true))
+ mddev->bitmap_ops->flush(mddev);
if (md_is_rdwr(mddev) &&
((!mddev->in_sync && !mddev_is_clustered(mddev)) ||
@@ -6573,7 +6842,8 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
- mddev->bitmap_ops->wait_behind_writes(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->wait_behind_writes(mddev);
if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
@@ -6589,7 +6859,7 @@ static void __md_stop(struct mddev *mddev)
{
struct md_personality *pers = mddev->pers;
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev_detach(mddev);
spin_lock(&mddev->lock);
mddev->pers = NULL;
@@ -6599,10 +6869,6 @@ static void __md_stop(struct mddev *mddev)
mddev->private = NULL;
put_pers(pers);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-
- bioset_exit(&mddev->bio_set);
- bioset_exit(&mddev->sync_set);
- bioset_exit(&mddev->io_clone_set);
}
void md_stop(struct mddev *mddev)
@@ -6693,6 +6959,10 @@ static int do_md_stop(struct mddev *mddev, int mode)
if (!md_is_rdwr(mddev))
set_disk_ro(disk, 0);
+ if (mode == 2 && mddev->pers->sync_request &&
+ mddev->to_remove == NULL)
+ mddev->to_remove = &md_redundancy_group;
+
__md_stop_writes(mddev);
__md_stop(mddev);
@@ -7307,6 +7577,9 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
{
int err = 0;
+ if (!md_bitmap_registered(mddev))
+ return -EINVAL;
+
if (mddev->pers) {
if (!mddev->pers->quiesce || !mddev->thread)
return -EBUSY;
@@ -7363,16 +7636,16 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
err = 0;
if (mddev->pers) {
if (fd >= 0) {
- err = mddev->bitmap_ops->create(mddev);
+ err = md_bitmap_create(mddev);
if (!err)
err = mddev->bitmap_ops->load(mddev);
if (err) {
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
fd = -1;
}
} else if (fd < 0) {
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
}
}
@@ -7679,12 +7952,12 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->bitmap_info.default_offset;
mddev->bitmap_info.space =
mddev->bitmap_info.default_space;
- rv = mddev->bitmap_ops->create(mddev);
+ rv = md_bitmap_create(mddev);
if (!rv)
rv = mddev->bitmap_ops->load(mddev);
if (rv)
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
} else {
struct md_bitmap_stats stats;
@@ -7710,7 +7983,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
put_cluster_ops(mddev);
mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY;
}
- mddev->bitmap_ops->destroy(mddev);
+ md_bitmap_destroy(mddev);
mddev->bitmap_info.offset = 0;
}
}
@@ -7747,9 +8020,9 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev)
* 4 sectors (with a BIG number of cylinders...). This drives
* dosfs just mad... ;-)
*/
-static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+static int md_getgeo(struct gendisk *disk, struct hd_geometry *geo)
{
- struct mddev *mddev = bdev->bd_disk->private_data;
+ struct mddev *mddev = disk->private_data;
geo->heads = 2;
geo->sectors = 4;
@@ -8194,22 +8467,21 @@ static int md_thread(void *arg)
return 0;
}
-static void md_wakeup_thread_directly(struct md_thread __rcu *thread)
+static void md_wakeup_thread_directly(struct md_thread __rcu **thread)
{
struct md_thread *t;
rcu_read_lock();
- t = rcu_dereference(thread);
+ t = rcu_dereference(*thread);
if (t)
wake_up_process(t->tsk);
rcu_read_unlock();
}
-void md_wakeup_thread(struct md_thread __rcu *thread)
+void __md_wakeup_thread(struct md_thread __rcu *thread)
{
struct md_thread *t;
- rcu_read_lock();
t = rcu_dereference(thread);
if (t) {
pr_debug("md: waking up MD thread %s.\n", t->tsk->comm);
@@ -8217,9 +8489,8 @@ void md_wakeup_thread(struct md_thread __rcu *thread)
if (wq_has_sleeper(&t->wqueue))
wake_up(&t->wqueue);
}
- rcu_read_unlock();
}
-EXPORT_SYMBOL(md_wakeup_thread);
+EXPORT_SYMBOL(__md_wakeup_thread);
struct md_thread *md_register_thread(void (*run) (struct md_thread *),
struct mddev *mddev, const char *name)
@@ -8491,6 +8762,9 @@ static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev)
unsigned long chunk_kb;
int err;
+ if (!md_bitmap_enabled(mddev, false))
+ return;
+
err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
if (err)
return;
@@ -8873,18 +9147,24 @@ EXPORT_SYMBOL_GPL(md_submit_discard_bio);
static void md_bitmap_start(struct mddev *mddev,
struct md_io_clone *md_io_clone)
{
+ md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
+ mddev->bitmap_ops->start_discard :
+ mddev->bitmap_ops->start_write;
+
if (mddev->pers->bitmap_sector)
mddev->pers->bitmap_sector(mddev, &md_io_clone->offset,
&md_io_clone->sectors);
- mddev->bitmap_ops->start_write(mddev, md_io_clone->offset,
- md_io_clone->sectors);
+ fn(mddev, md_io_clone->offset, md_io_clone->sectors);
}
static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone)
{
- mddev->bitmap_ops->end_write(mddev, md_io_clone->offset,
- md_io_clone->sectors);
+ md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ?
+ mddev->bitmap_ops->end_discard :
+ mddev->bitmap_ops->end_write;
+
+ fn(mddev, md_io_clone->offset, md_io_clone->sectors);
}
static void md_end_clone_io(struct bio *bio)
@@ -8893,7 +9173,7 @@ static void md_end_clone_io(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
- if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
md_bitmap_end(mddev, md_io_clone);
if (bio->bi_status && !orig_bio->bi_status)
@@ -8920,9 +9200,10 @@ static void md_clone_bio(struct mddev *mddev, struct bio **bio)
if (blk_queue_io_stat(bdev->bd_disk->queue))
md_io_clone->start_time = bio_start_io_acct(*bio);
- if (bio_data_dir(*bio) == WRITE && mddev->bitmap) {
+ if (bio_data_dir(*bio) == WRITE && md_bitmap_enabled(mddev, false)) {
md_io_clone->offset = (*bio)->bi_iter.bi_sector;
md_io_clone->sectors = bio_sectors(*bio);
+ md_io_clone->rw = op_stat_group(bio_op(*bio));
md_bitmap_start(mddev, md_io_clone);
}
@@ -8944,7 +9225,7 @@ void md_free_cloned_bio(struct bio *bio)
struct bio *orig_bio = md_io_clone->orig_bio;
struct mddev *mddev = md_io_clone->mddev;
- if (bio_data_dir(orig_bio) == WRITE && mddev->bitmap)
+ if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false))
md_bitmap_end(mddev, md_io_clone);
if (bio->bi_status && !orig_bio->bi_status)
@@ -9010,6 +9291,39 @@ static sector_t md_sync_max_sectors(struct mddev *mddev,
}
}
+/*
+ * If lazy recovery is requested and all rdevs are in sync, select the rdev with
+ * the higest index to perfore recovery to build initial xor data, this is the
+ * same as old bitmap.
+ */
+static bool mddev_select_lazy_recover_rdev(struct mddev *mddev)
+{
+ struct md_rdev *recover_rdev = NULL;
+ struct md_rdev *rdev;
+ bool ret = false;
+
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev) {
+ if (rdev->raid_disk < 0)
+ continue;
+
+ if (test_bit(Faulty, &rdev->flags) ||
+ !test_bit(In_sync, &rdev->flags))
+ break;
+
+ if (!recover_rdev || recover_rdev->raid_disk < rdev->raid_disk)
+ recover_rdev = rdev;
+ }
+
+ if (recover_rdev) {
+ clear_bit(In_sync, &recover_rdev->flags);
+ ret = true;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
{
sector_t start = 0;
@@ -9041,6 +9355,14 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
start = rdev->recovery_offset;
rcu_read_unlock();
+ /*
+ * If there are no spares, and raid456 lazy initial recover is
+ * requested.
+ */
+ if (test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery) &&
+ start == MaxSector && mddev_select_lazy_recover_rdev(mddev))
+ start = 0;
+
/* If there is a bitmap, we need to make sure all
* writes that started before we added a spare
* complete before we start doing a recovery.
@@ -9061,19 +9383,12 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
static bool sync_io_within_limit(struct mddev *mddev)
{
- int io_sectors;
-
/*
* For raid456, sync IO is stripe(4k) per IO, for other levels, it's
* RESYNC_PAGES(64k) per IO.
*/
- if (mddev->level == 4 || mddev->level == 5 || mddev->level == 6)
- io_sectors = 8;
- else
- io_sectors = 128;
-
return atomic_read(&mddev->recovery_active) <
- io_sectors * sync_io_depth(mddev);
+ (raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev);
}
#define SYNC_MARKS 10
@@ -9283,6 +9598,12 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
+ if (mddev->bitmap_ops && mddev->bitmap_ops->skip_sync_blocks) {
+ sectors = mddev->bitmap_ops->skip_sync_blocks(mddev, j);
+ if (sectors)
+ goto update;
+ }
+
sectors = mddev->pers->sync_request(mddev, j, max_sectors,
&skipped);
if (sectors == 0) {
@@ -9298,6 +9619,7 @@ void md_do_sync(struct md_thread *thread)
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
break;
+update:
j += sectors;
if (j > max_sectors)
/* when skipping, extra large numbers can be returned. */
@@ -9607,6 +9929,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
return true;
}
@@ -9615,6 +9938,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
remove_spares(mddev, NULL);
set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
return true;
}
@@ -9624,7 +9948,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
* re-add.
*/
*spares = remove_and_add_spares(mddev, NULL);
- if (*spares) {
+ if (*spares || test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery)) {
clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
@@ -9682,7 +10006,7 @@ static void md_start_sync(struct work_struct *ws)
* We are adding a device or devices to an array which has the bitmap
* stored on all devices. So make sure all bitmap pages get written.
*/
- if (spares)
+ if (spares && md_bitmap_enabled(mddev, true))
mddev->bitmap_ops->write_all(mddev);
name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ?
@@ -9746,6 +10070,52 @@ static void unregister_sync_thread(struct mddev *mddev)
md_reap_sync_thread(mddev);
}
+static bool md_should_do_recovery(struct mddev *mddev)
+{
+ /*
+ * As long as one of the following flags is set,
+ * recovery needs to do or cleanup.
+ */
+ if (test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_DONE, &mddev->recovery))
+ return true;
+
+ /*
+ * If no flags are set and it is in read-only status,
+ * there is nothing to do.
+ */
+ if (!md_is_rdwr(mddev))
+ return false;
+
+ /*
+ * MD_SB_CHANGE_PENDING indicates that the array is switching from clean to
+ * active, and no action is needed for now.
+ * All other MD_SB_* flags require to update the superblock.
+ */
+ if (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING))
+ return true;
+
+ /*
+ * If the array is not using external metadata and there has been no data
+ * written for some time, then the array's status needs to be set to
+ * in_sync.
+ */
+ if (mddev->external == 0 && mddev->safemode == 1)
+ return true;
+
+ /*
+ * When the system is about to restart or the process receives an signal,
+ * the array needs to be synchronized as soon as possible.
+ * Once the data synchronization is completed, need to change the array
+ * status to in_sync.
+ */
+ if (mddev->safemode == 2 && !mddev->in_sync &&
+ mddev->resync_offset == MaxSector)
+ return true;
+
+ return false;
+}
+
/*
* This routine is regularly called by all per-raid-array threads to
* deal with generic issues like resync and super-block update.
@@ -9770,7 +10140,7 @@ static void unregister_sync_thread(struct mddev *mddev)
*/
void md_check_recovery(struct mddev *mddev)
{
- if (mddev->bitmap)
+ if (md_bitmap_enabled(mddev, false) && mddev->bitmap_ops->daemon_work)
mddev->bitmap_ops->daemon_work(mddev);
if (signal_pending(current)) {
@@ -9782,18 +10152,7 @@ void md_check_recovery(struct mddev *mddev)
flush_signals(current);
}
- if (!md_is_rdwr(mddev) &&
- !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) &&
- !test_bit(MD_RECOVERY_DONE, &mddev->recovery))
- return;
- if ( ! (
- (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) ||
- test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
- test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
- (mddev->external == 0 && mddev->safemode == 1) ||
- (mddev->safemode == 2
- && !mddev->in_sync && mddev->resync_offset == MaxSector)
- ))
+ if (!md_should_do_recovery(mddev))
return;
if (mddev_trylock(mddev)) {
@@ -9837,6 +10196,7 @@ void md_check_recovery(struct mddev *mddev)
}
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags);
@@ -9947,6 +10307,7 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery);
/*
* We call mddev->cluster_ops->update_size here because sync_size could
* be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
@@ -10047,7 +10408,6 @@ static int md_notify_reboot(struct notifier_block *this,
unsigned long code, void *x)
{
struct mddev *mddev;
- int need_delay = 0;
spin_lock(&all_mddevs_lock);
list_for_each_entry(mddev, &all_mddevs, all_mddevs) {
@@ -10061,21 +10421,11 @@ static int md_notify_reboot(struct notifier_block *this,
mddev->safemode = 2;
mddev_unlock(mddev);
}
- need_delay = 1;
spin_lock(&all_mddevs_lock);
mddev_put_locked(mddev);
}
spin_unlock(&all_mddevs_lock);
- /*
- * certain more exotic SCSI devices are known to be
- * volatile wrt too early system reboots. While the
- * right place to handle this issue is the given
- * driver, we do want to have a safe RAID driver ...
- */
- if (need_delay)
- msleep(1000);
-
return NOTIFY_DONE;
}
@@ -10094,8 +10444,16 @@ static void md_geninit(void)
static int __init md_init(void)
{
- int ret = -ENOMEM;
+ int ret = md_bitmap_init();
+
+ if (ret)
+ return ret;
+ ret = md_llbitmap_init();
+ if (ret)
+ goto err_bitmap;
+
+ ret = -ENOMEM;
md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0);
if (!md_wq)
goto err_wq;
@@ -10104,11 +10462,6 @@ static int __init md_init(void)
if (!md_misc_wq)
goto err_misc_wq;
- md_bitmap_wq = alloc_workqueue("md_bitmap", WQ_MEM_RECLAIM | WQ_UNBOUND,
- 0);
- if (!md_bitmap_wq)
- goto err_bitmap_wq;
-
ret = __register_blkdev(MD_MAJOR, "md", md_probe);
if (ret < 0)
goto err_md;
@@ -10127,12 +10480,13 @@ static int __init md_init(void)
err_mdp:
unregister_blkdev(MD_MAJOR, "md");
err_md:
- destroy_workqueue(md_bitmap_wq);
-err_bitmap_wq:
destroy_workqueue(md_misc_wq);
err_misc_wq:
destroy_workqueue(md_wq);
err_wq:
+ md_llbitmap_exit();
+err_bitmap:
+ md_bitmap_exit();
return ret;
}
@@ -10150,7 +10504,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size));
if (ret)
pr_info("md-cluster: resize failed\n");
- else
+ else if (md_bitmap_enabled(mddev, false))
mddev->bitmap_ops->update_sb(mddev->bitmap);
}
@@ -10438,8 +10792,8 @@ static __exit void md_exit(void)
spin_unlock(&all_mddevs_lock);
destroy_workqueue(md_misc_wq);
- destroy_workqueue(md_bitmap_wq);
destroy_workqueue(md_wq);
+ md_bitmap_exit();
}
subsys_initcall(md_init);
@@ -10459,6 +10813,7 @@ module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR);
module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR);
module_param(create_on_open, bool, S_IRUSR|S_IWUSR);
module_param(legacy_async_del_gendisk, bool, 0600);
+module_param(check_new_feature, bool, 0600);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD RAID framework");
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 51af29a03079..6985f2829bbd 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -26,7 +26,7 @@
enum md_submodule_type {
MD_PERSONALITY = 0,
MD_CLUSTER,
- MD_BITMAP, /* TODO */
+ MD_BITMAP,
};
enum md_submodule_id {
@@ -38,8 +38,9 @@ enum md_submodule_id {
ID_RAID6 = 6,
ID_RAID10 = 10,
ID_CLUSTER,
- ID_BITMAP, /* TODO */
- ID_LLBITMAP, /* TODO */
+ ID_BITMAP,
+ ID_LLBITMAP,
+ ID_BITMAP_NONE,
};
struct md_submodule_head {
@@ -353,6 +354,7 @@ enum mddev_flags {
MD_HAS_MULTIPLE_PPLS,
MD_NOT_READY,
MD_BROKEN,
+ MD_DO_DELETE,
MD_DELETED,
};
@@ -431,6 +433,7 @@ struct mddev {
sector_t array_sectors; /* exported array size */
int external_size; /* size managed
* externally */
+ unsigned int logical_block_size;
__u64 events;
/* If the last 'event' was simply a clean->dirty transition, and
* we didn't write it to the spares, then it is safe and simple
@@ -565,6 +568,7 @@ struct mddev {
struct percpu_ref writes_pending;
int sync_checkers; /* # of threads checking writes_pending */
+ enum md_submodule_id bitmap_id;
void *bitmap; /* the bitmap for the device */
struct bitmap_operations *bitmap_ops;
struct {
@@ -665,6 +669,8 @@ enum recovery_flags {
MD_RECOVERY_RESHAPE,
/* remote node is running resync thread */
MD_RESYNCING_REMOTE,
+ /* raid456 lazy initial recover */
+ MD_RECOVERY_LAZY_RECOVER,
};
enum md_ro_state {
@@ -796,7 +802,6 @@ struct md_sysfs_entry {
ssize_t (*show)(struct mddev *, char *);
ssize_t (*store)(struct mddev *, const char *, size_t);
};
-extern const struct attribute_group md_bitmap_group;
static inline struct kernfs_node *sysfs_get_dirent_safe(struct kernfs_node *sd, char *name)
{
@@ -873,11 +878,18 @@ struct md_io_clone {
unsigned long start_time;
sector_t offset;
unsigned long sectors;
+ enum stat_group rw;
struct bio bio_clone;
};
#define THREAD_WAKEUP 0
+#define md_wakeup_thread(thread) do { \
+ rcu_read_lock(); \
+ __md_wakeup_thread(thread); \
+ rcu_read_unlock(); \
+} while (0)
+
static inline void safe_put_page(struct page *p)
{
if (p) put_page(p);
@@ -891,7 +903,7 @@ extern struct md_thread *md_register_thread(
struct mddev *mddev,
const char *name);
extern void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp);
-extern void md_wakeup_thread(struct md_thread __rcu *thread);
+extern void __md_wakeup_thread(struct md_thread __rcu *thread);
extern void md_check_recovery(struct mddev *mddev);
extern void md_reap_sync_thread(struct mddev *mddev);
extern enum sync_action md_sync_action(struct mddev *mddev);
@@ -909,8 +921,9 @@ void md_account_bio(struct mddev *mddev, struct bio **bio);
void md_free_cloned_bio(struct bio *bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
-extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
- sector_t sector, int size, struct page *page);
+void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev,
+ sector_t sector, int size, struct page *page,
+ unsigned int offset);
extern int md_super_wait(struct mddev *mddev);
extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, blk_opf_t opf, bool metadata_op);
@@ -1013,7 +1026,6 @@ struct mdu_array_info_s;
struct mdu_disk_info_s;
extern int mdp_major;
-extern struct workqueue_struct *md_bitmap_wq;
void md_autostart_arrays(int part);
int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info);
int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info);
@@ -1034,6 +1046,12 @@ static inline bool mddev_is_dm(struct mddev *mddev)
return !mddev->gendisk;
}
+static inline bool raid_is_456(struct mddev *mddev)
+{
+ return mddev->level == ID_RAID4 || mddev->level == ID_RAID5 ||
+ mddev->level == ID_RAID6;
+}
+
static inline void mddev_trace_remap(struct mddev *mddev, struct bio *bio,
sector_t sector)
{
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index f1d8811a542a..985c377356eb 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -68,7 +68,10 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
struct strip_zone *zone;
int cnt;
struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
- unsigned blksize = 512;
+ unsigned int blksize = 512;
+
+ if (!mddev_is_dm(mddev))
+ blksize = queue_logical_block_size(mddev->gendisk->queue);
*private_conf = ERR_PTR(-ENOMEM);
if (!conf)
@@ -84,7 +87,8 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
sector_div(sectors, mddev->chunk_sectors);
rdev1->sectors = sectors * mddev->chunk_sectors;
- blksize = max(blksize, queue_logical_block_size(
+ if (mddev_is_dm(mddev))
+ blksize = max(blksize, queue_logical_block_size(
rdev1->bdev->bd_disk->queue));
rdev_for_each(rdev2, mddev) {
@@ -382,6 +386,8 @@ static int raid0_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_hw_sectors = mddev->chunk_sectors;
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
+ lim.max_hw_wzeroes_unmap_sectors = mddev->chunk_sectors;
+ lim.logical_block_size = mddev->logical_block_size;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.chunk_sectors = mddev->chunk_sectors;
@@ -404,6 +410,12 @@ static int raid0_run(struct mddev *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
+ if (!mddev_is_dm(mddev)) {
+ ret = raid0_set_limits(mddev);
+ if (ret)
+ return ret;
+ }
+
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
ret = create_strip_zones(mddev, &conf);
@@ -412,11 +424,6 @@ static int raid0_run(struct mddev *mddev)
mddev->private = conf;
}
conf = mddev->private;
- if (!mddev_is_dm(mddev)) {
- ret = raid0_set_limits(mddev);
- if (ret)
- return ret;
- }
/* calculate array device size */
md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
@@ -463,21 +470,16 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
zone = find_zone(conf, &start);
if (bio_end_sector(bio) > zone->zone_end) {
- struct bio *split = bio_split(bio,
- zone->zone_end - bio->bi_iter.bi_sector, GFP_NOIO,
- &mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ bio = bio_submit_split_bioset(bio,
+ zone->zone_end - bio->bi_iter.bi_sector,
+ &mddev->bio_set);
+ if (!bio)
return;
- }
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
end = zone->zone_end;
- } else
+ } else {
end = bio_end_sector(bio);
+ }
orig_end = end;
if (zone != conf->strip_zone)
@@ -612,17 +614,10 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
: sector_div(sector, chunk_sects));
if (sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, sectors, GFP_NOIO,
+ bio = bio_submit_split_bioset(bio, sectors,
&mddev->bio_set);
-
- if (IS_ERR(split)) {
- bio->bi_status = errno_to_blk_status(PTR_ERR(split));
- bio_endio(bio);
+ if (!bio)
return true;
- }
- bio_chain(split, bio);
- raid0_map_submit_bio(mddev, bio);
- bio = split;
}
raid0_map_submit_bio(mddev, bio);
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 52881e6032da..521625756128 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -140,7 +140,7 @@ static inline bool raid1_add_bio_to_plug(struct mddev *mddev, struct bio *bio,
* If bitmap is not enabled, it's safe to submit the io directly, and
* this can get optimal performance.
*/
- if (!mddev->bitmap_ops->enabled(mddev)) {
+ if (!md_bitmap_enabled(mddev, true)) {
raid1_submit_write(bio);
return true;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index bf44878ec640..57d50465eed1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -167,7 +167,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r1_bio->bios[j] = bio;
}
/*
@@ -1317,7 +1317,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
struct raid1_info *mirror;
struct bio *read_bio;
int max_sectors;
- int rdisk, error;
+ int rdisk;
bool r1bio_existed = !!r1_bio;
/*
@@ -1366,7 +1366,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
(unsigned long long)r1_bio->sector,
mirror->rdev->bdev);
- if (test_bit(WriteMostly, &mirror->rdev->flags)) {
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ md_bitmap_enabled(mddev, false)) {
/*
* Reading from a write-mostly device must take care not to
* over-take any writes that are 'behind'
@@ -1376,16 +1377,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
}
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- gfp, &conf->bio_split);
-
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
+ if (!bio) {
+ set_bit(R1BIO_Returned, &r1_bio->state);
goto err_handle;
}
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
}
@@ -1413,8 +1411,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
err_handle:
atomic_dec(&mirror->rdev->nr_pending);
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R1BIO_Uptodate, &r1_bio->state);
raid_end_bio_io(r1_bio);
}
@@ -1452,12 +1448,36 @@ retry:
return true;
}
+static void raid1_start_write_behind(struct mddev *mddev, struct r1bio *r1_bio,
+ struct bio *bio)
+{
+ unsigned long max_write_behind = mddev->bitmap_info.max_write_behind;
+ struct md_bitmap_stats stats;
+ int err;
+
+ /* behind write rely on bitmap, see bitmap_operations */
+ if (!md_bitmap_enabled(mddev, false))
+ return;
+
+ err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
+ if (err)
+ return;
+
+ /* Don't do behind IO if reader is waiting, or there are too many. */
+ if (!stats.behind_wait && stats.behind_writes < max_write_behind)
+ alloc_behind_master_bio(r1_bio, bio);
+
+ if (test_bit(R1BIO_BehindIO, &r1_bio->state))
+ mddev->bitmap_ops->start_behind_write(mddev);
+
+}
+
static void raid1_write_request(struct mddev *mddev, struct bio *bio,
int max_write_sectors)
{
struct r1conf *conf = mddev->private;
struct r1bio *r1_bio;
- int i, disks, k, error;
+ int i, disks, k;
unsigned long flags;
int first_clone;
int max_sectors;
@@ -1561,10 +1581,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* complexity of supporting that is not worth
* the benefit.
*/
- if (bio->bi_opf & REQ_ATOMIC) {
- error = -EIO;
+ if (bio->bi_opf & REQ_ATOMIC)
goto err_handle;
- }
good_sectors = first_bad - r1_bio->sector;
if (good_sectors < max_sectors)
@@ -1584,16 +1602,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
max_sectors = min_t(int, max_sectors,
BIO_MAX_VECS * (PAGE_SIZE >> 9));
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- GFP_NOIO, &conf->bio_split);
-
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
+ if (!bio) {
+ set_bit(R1BIO_Returned, &r1_bio->state);
goto err_handle;
}
- bio_chain(split, bio);
- submit_bio_noacct(bio);
- bio = split;
+
r1_bio->master_bio = bio;
r1_bio->sectors = max_sectors;
}
@@ -1612,22 +1627,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
continue;
if (first_clone) {
- unsigned long max_write_behind =
- mddev->bitmap_info.max_write_behind;
- struct md_bitmap_stats stats;
- int err;
-
- /* do behind I/O ?
- * Not if there are too many, or cannot
- * allocate memory, or a reader on WriteMostly
- * is waiting for behind writes to flush */
- err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats);
- if (!err && write_behind && !stats.behind_wait &&
- stats.behind_writes < max_write_behind)
- alloc_behind_master_bio(r1_bio, bio);
-
- if (test_bit(R1BIO_BehindIO, &r1_bio->state))
- mddev->bitmap_ops->start_behind_write(mddev);
+ if (write_behind)
+ raid1_start_write_behind(mddev, r1_bio, bio);
first_clone = 0;
}
@@ -1683,8 +1684,6 @@ err_handle:
}
}
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R1BIO_Uptodate, &r1_bio->state);
raid_end_bio_io(r1_bio);
}
@@ -2057,7 +2056,7 @@ static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
/* make sure these bits don't get cleared. */
do {
- mddev->bitmap_ops->end_sync(mddev, s, &sync_blocks);
+ md_bitmap_end_sync(mddev, s, &sync_blocks);
s += sync_blocks;
sectors_to_go -= sync_blocks;
} while (sectors_to_go > 0);
@@ -2804,12 +2803,13 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
* We can find the current addess in mddev->curr_resync
*/
if (mddev->curr_resync < max_sector) /* aborted */
- mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
if (mddev_is_clustered(mddev)) {
@@ -2829,7 +2829,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* before building a request, check if we can skip these blocks..
* This call the bitmap_start_sync doesn't actually record anything
*/
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks, true) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* We can skip this block, and probably several more */
*skipped = 1;
@@ -2846,10 +2846,11 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
/* we are incrementing sector_nr below. To be safe, we check against
* sector_nr + two times RESYNC_SECTORS
*/
-
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
- mddev_is_clustered(mddev) &&
- (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
+ mddev_is_clustered(mddev) &&
+ (sector_nr + 2 * RESYNC_SECTORS >
+ conf->cluster_sync_high));
if (raise_barrier(conf, sector_nr))
return 0;
@@ -3004,8 +3005,8 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (len == 0)
break;
if (sync_blocks == 0) {
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
- &sync_blocks, still_degraded) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr,
+ &sync_blocks, still_degraded) &&
!conf->fullsync &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
break;
@@ -3211,6 +3212,8 @@ static int raid1_set_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
+ lim.logical_block_size = mddev->logical_block_size;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
if (err)
@@ -3324,15 +3327,17 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
* worth it.
*/
sector_t newsize = raid1_size(mddev, sectors, 0);
- int ret;
if (mddev->external_size &&
mddev->array_sectors > newsize)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index d236ef179cfb..2ebe35aaa534 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -178,7 +178,9 @@ enum r1bio_state {
* any write was successful. Otherwise we call when
* any write-behind write succeeds, otherwise we call
* with failure when last write completes (and all failed).
- * Record that bi_end_io was called with this flag...
+ *
+ * And for bio_split errors, record that bi_end_io was called
+ * with this flag...
*/
R1BIO_Returned,
/* If a write for this request means we can clear some
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b60c30bfb6c7..84be4cc7e873 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -163,14 +163,14 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r10_bio->devs[j].bio = bio;
if (!conf->have_replacement)
continue;
bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
if (!bio)
goto out_free_bio;
- bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
+ bio_init_inline(bio, NULL, RESYNC_PAGES, 0);
r10_bio->devs[j].repl_bio = bio;
}
/*
@@ -322,10 +322,12 @@ static void raid_end_bio_io(struct r10bio *r10_bio)
struct bio *bio = r10_bio->master_bio;
struct r10conf *conf = r10_bio->mddev->private;
- if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
- bio->bi_status = BLK_STS_IOERR;
+ if (!test_and_set_bit(R10BIO_Returned, &r10_bio->state)) {
+ if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
+ }
- bio_endio(bio);
/*
* Wake up any possible resync thread that waits for the device
* to go idle.
@@ -1154,7 +1156,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
int slot = r10_bio->read_slot;
struct md_rdev *err_rdev = NULL;
gfp_t gfp = GFP_NOIO;
- int error;
if (slot >= 0 && r10_bio->devs[slot].rdev) {
/*
@@ -1203,17 +1204,15 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
rdev->bdev,
(unsigned long long)r10_bio->sector);
if (max_sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, max_sectors,
- gfp, &conf->bio_split);
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
- goto err_handle;
- }
- bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ bio = bio_submit_split_bioset(bio, max_sectors,
+ &conf->bio_split);
wait_barrier(conf, false);
- bio = split;
+ if (!bio) {
+ set_bit(R10BIO_Returned, &r10_bio->state);
+ goto err_handle;
+ }
+
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
}
@@ -1241,8 +1240,6 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
return;
err_handle:
atomic_dec(&rdev->nr_pending);
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
}
@@ -1351,7 +1348,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
int i, k;
sector_t sectors;
int max_sectors;
- int error;
if ((mddev_is_clustered(mddev) &&
mddev->cluster_ops->area_resyncing(mddev, WRITE,
@@ -1465,10 +1461,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
* complexity of supporting that is not worth
* the benefit.
*/
- if (bio->bi_opf & REQ_ATOMIC) {
- error = -EIO;
+ if (bio->bi_opf & REQ_ATOMIC)
goto err_handle;
- }
good_sectors = first_bad - dev_sector;
if (good_sectors < max_sectors)
@@ -1489,17 +1483,15 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
r10_bio->sectors = max_sectors;
if (r10_bio->sectors < bio_sectors(bio)) {
- struct bio *split = bio_split(bio, r10_bio->sectors,
- GFP_NOIO, &conf->bio_split);
- if (IS_ERR(split)) {
- error = PTR_ERR(split);
- goto err_handle;
- }
- bio_chain(split, bio);
allow_barrier(conf);
- submit_bio_noacct(bio);
+ bio = bio_submit_split_bioset(bio, r10_bio->sectors,
+ &conf->bio_split);
wait_barrier(conf, false);
- bio = split;
+ if (!bio) {
+ set_bit(R10BIO_Returned, &r10_bio->state);
+ goto err_handle;
+ }
+
r10_bio->master_bio = bio;
}
@@ -1531,8 +1523,6 @@ err_handle:
}
}
- bio->bi_status = errno_to_blk_status(error);
- set_bit(R10BIO_Uptodate, &r10_bio->state);
raid_end_bio_io(r10_bio);
}
@@ -1679,7 +1669,9 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
return 0;
}
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
/* Resend the fist split part */
submit_bio_noacct(split);
@@ -1694,7 +1686,9 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
bio_endio(bio);
return 0;
}
+
bio_chain(split, bio);
+ trace_block_split(split, bio->bi_iter.bi_sector);
allow_barrier(conf);
/* Resend the second split part */
submit_bio_noacct(bio);
@@ -3221,15 +3215,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (mddev->curr_resync < max_sector) { /* aborted */
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
- mddev->bitmap_ops->end_sync(mddev,
- mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else for (i = 0; i < conf->geo.raid_disks; i++) {
sector_t sect =
raid10_find_virt(conf, mddev->curr_resync, i);
- mddev->bitmap_ops->end_sync(mddev, sect,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, sect, &sync_blocks);
}
} else {
/* completed sync */
@@ -3249,7 +3241,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
conf->fullsync = 0;
}
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
close_sync(conf);
*skipped = 1;
return sectors_skipped;
@@ -3351,9 +3344,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* we only need to recover the block if it is set in
* the bitmap
*/
- must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
- &sync_blocks,
- true);
+ must_sync = md_bitmap_start_sync(mddev, sect,
+ &sync_blocks, true);
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
@@ -3396,9 +3388,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
}
}
- must_sync = mddev->bitmap_ops->start_sync(mddev, sect,
- &sync_blocks, still_degraded);
-
+ md_bitmap_start_sync(mddev, sect, &sync_blocks,
+ still_degraded);
any_working = 0;
for (j=0; j<conf->copies;j++) {
int k;
@@ -3570,13 +3561,13 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
* safety reason, which ensures curr_resync_completed is
* updated in bitmap_cond_end_sync.
*/
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr,
mddev_is_clustered(mddev) &&
(sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
- if (!mddev->bitmap_ops->start_sync(mddev, sector_nr,
- &sync_blocks,
- mddev->degraded) &&
+ if (!md_bitmap_start_sync(mddev, sector_nr, &sync_blocks,
+ mddev->degraded) &&
!conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
&mddev->recovery)) {
/* We can skip this block */
@@ -4008,6 +3999,8 @@ static int raid10_set_queue_limits(struct mddev *mddev)
md_init_stacking_limits(&lim);
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
+ lim.logical_block_size = mddev->logical_block_size;
lim.io_min = mddev->chunk_sectors << 9;
lim.chunk_sectors = mddev->chunk_sectors;
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
@@ -4225,7 +4218,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
*/
struct r10conf *conf = mddev->private;
sector_t oldsize, size;
- int ret;
if (mddev->reshape_position != MaxSector)
return -EBUSY;
@@ -4239,9 +4231,12 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > size)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, size, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, size, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, size);
if (sectors > mddev->dev_sectors &&
@@ -4507,8 +4502,9 @@ static int raid10_start_reshape(struct mddev *mddev)
oldsize = raid10_size(mddev, 0, 0);
newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
- if (!mddev_is_clustered(mddev)) {
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
+ if (!mddev_is_clustered(mddev) &&
+ md_bitmap_enabled(mddev, false)) {
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
if (ret)
goto abort;
else
@@ -4530,13 +4526,14 @@ static int raid10_start_reshape(struct mddev *mddev)
MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
goto out;
- ret = mddev->bitmap_ops->resize(mddev, newsize, 0, false);
+ /* cluster can't be setup without bitmap */
+ ret = mddev->bitmap_ops->resize(mddev, newsize, 0);
if (ret)
goto abort;
ret = mddev->cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
if (ret) {
- mddev->bitmap_ops->resize(mddev, oldsize, 0, false);
+ mddev->bitmap_ops->resize(mddev, oldsize, 0);
goto abort;
}
}
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 3f16ad6904a9..da00a55f7a55 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -165,6 +165,8 @@ enum r10bio_state {
* so that raid10d knows what to do with them.
*/
R10BIO_ReadError,
+/* For bio_split errors, record that bi_end_io was called. */
+ R10BIO_Returned,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag.
*/
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index ba768ca7f422..e29e69335c69 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -3104,7 +3104,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
goto out_mempool;
spin_lock_init(&log->tree_lock);
- INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
+ INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT);
thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
"reclaim");
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 023649fe2476..e57ce3295292 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4097,7 +4097,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
int disks)
{
int rmw = 0, rcw = 0, i;
- sector_t resync_offset = conf->mddev->resync_offset;
+ struct mddev *mddev = conf->mddev;
+ sector_t resync_offset = mddev->resync_offset;
/* Check whether resync is now happening or should start.
* If yes, then the array is dirty (after unclean shutdown or
@@ -4116,6 +4117,12 @@ static int handle_stripe_dirtying(struct r5conf *conf,
pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n",
conf->rmw_level, (unsigned long long)resync_offset,
(unsigned long long)sh->sector);
+ } else if (mddev->bitmap_ops && mddev->bitmap_ops->blocks_synced &&
+ !mddev->bitmap_ops->blocks_synced(mddev, sh->sector)) {
+ /* The initial recover is not done, must read everything */
+ rcw = 1; rmw = 2;
+ pr_debug("force RCW by lazy recovery, sh->sector=%llu\n",
+ sh->sector);
} else for (i = disks; i--; ) {
/* would I have to read this buffer for read_modify_write */
struct r5dev *dev = &sh->dev[i];
@@ -4148,7 +4155,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_HANDLE, &sh->state);
if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
/* prefer read-modify-write, but need to get some data */
- mddev_add_trace_msg(conf->mddev, "raid5 rmw %llu %d",
+ mddev_add_trace_msg(mddev, "raid5 rmw %llu %d",
sh->sector, rmw);
for (i = disks; i--; ) {
@@ -4227,8 +4234,8 @@ static int handle_stripe_dirtying(struct r5conf *conf,
set_bit(STRIPE_DELAYED, &sh->state);
}
}
- if (rcw && !mddev_is_dm(conf->mddev))
- blk_add_trace_msg(conf->mddev->gendisk->queue,
+ if (rcw && !mddev_is_dm(mddev))
+ blk_add_trace_msg(mddev->gendisk->queue,
"raid5 rcw %llu %d %d %d",
(unsigned long long)sh->sector, rcw, qread,
test_bit(STRIPE_DELAYED, &sh->state));
@@ -4698,10 +4705,21 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset)
- /* in sync if before recovery_offset */
- set_bit(R5_Insync, &dev->flags);
- else if (test_bit(R5_UPTODATE, &dev->flags) &&
+ else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <=
+ rdev->recovery_offset) {
+ /*
+ * in sync if:
+ * - normal IO, or
+ * - resync IO that is not lazy recovery
+ *
+ * For lazy recovery, we have to mark the rdev without
+ * In_sync as failed, to build initial xor data.
+ */
+ if (!test_bit(STRIPE_SYNCING, &sh->state) ||
+ !test_bit(MD_RECOVERY_LAZY_RECOVER,
+ &conf->mddev->recovery))
+ set_bit(R5_Insync, &dev->flags);
+ } else if (test_bit(R5_UPTODATE, &dev->flags) &&
test_bit(R5_Expanded, &dev->flags))
/* If we've reshaped into here, we assume it is Insync.
* We will shortly update recovery_offset to make
@@ -4938,7 +4956,8 @@ static void handle_stripe(struct stripe_head *sh)
goto finish;
if (s.handle_bad_blocks ||
- test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
+ (md_is_rdwr(conf->mddev) &&
+ test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags))) {
set_bit(STRIPE_HANDLE, &sh->state);
goto finish;
}
@@ -5468,17 +5487,17 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
{
- struct bio *split;
sector_t sector = raid_bio->bi_iter.bi_sector;
unsigned chunk_sects = mddev->chunk_sectors;
unsigned sectors = chunk_sects - (sector & (chunk_sects-1));
if (sectors < bio_sectors(raid_bio)) {
struct r5conf *conf = mddev->private;
- split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
- bio_chain(split, raid_bio);
- submit_bio_noacct(raid_bio);
- raid_bio = split;
+
+ raid_bio = bio_submit_split_bioset(raid_bio, sectors,
+ &conf->bio_split);
+ if (!raid_bio)
+ return NULL;
}
if (!raid5_read_one_chunk(mddev, raid_bio))
@@ -6492,11 +6511,12 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (mddev->curr_resync < max_sector) /* aborted */
- mddev->bitmap_ops->end_sync(mddev, mddev->curr_resync,
- &sync_blocks);
+ md_bitmap_end_sync(mddev, mddev->curr_resync,
+ &sync_blocks);
else /* completed sync */
conf->fullsync = 0;
- mddev->bitmap_ops->close_sync(mddev);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->close_sync(mddev);
return 0;
}
@@ -6525,8 +6545,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
}
if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
!conf->fullsync &&
- !mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
- true) &&
+ !md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, true) &&
sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
/* we can skip this block, and probably more */
do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
@@ -6535,7 +6554,8 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
return sync_blocks * RAID5_STRIPE_SECTORS(conf);
}
- mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false);
+ if (md_bitmap_enabled(mddev, false))
+ mddev->bitmap_ops->cond_end_sync(mddev, sector_nr, false);
sh = raid5_get_active_stripe(conf, NULL, sector_nr,
R5_GAS_NOBLOCK);
@@ -6557,9 +6577,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
still_degraded = true;
}
- mddev->bitmap_ops->start_sync(mddev, sector_nr, &sync_blocks,
- still_degraded);
-
+ md_bitmap_start_sync(mddev, sector_nr, &sync_blocks, still_degraded);
set_bit(STRIPE_SYNC_REQUESTED, &sh->state);
set_bit(STRIPE_HANDLE, &sh->state);
@@ -6751,7 +6769,8 @@ static void raid5d(struct md_thread *thread)
int batch_size, released;
unsigned int offset;
- if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
+ if (md_is_rdwr(mddev) &&
+ test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
break;
released = release_stripe_list(conf, conf->temp_inactive_list);
@@ -6763,7 +6782,8 @@ static void raid5d(struct md_thread *thread)
/* Now is a good time to flush some bitmap updates */
conf->seq_flush++;
spin_unlock_irq(&conf->device_lock);
- mddev->bitmap_ops->unplug(mddev, true);
+ if (md_bitmap_enabled(mddev, true))
+ mddev->bitmap_ops->unplug(mddev, true);
spin_lock_irq(&conf->device_lock);
conf->seq_write = conf->seq_flush;
activate_bit_delay(conf, conf->temp_inactive_list);
@@ -7727,11 +7747,13 @@ static int raid5_set_limits(struct mddev *mddev)
stripe = roundup_pow_of_two(data_disks * (mddev->chunk_sectors << 9));
md_init_stacking_limits(&lim);
+ lim.logical_block_size = mddev->logical_block_size;
lim.io_min = mddev->chunk_sectors << 9;
lim.io_opt = lim.io_min * (conf->raid_disks - conf->max_degraded);
lim.features |= BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE;
lim.discard_granularity = stripe;
lim.max_write_zeroes_sectors = 0;
+ lim.max_hw_wzeroes_unmap_sectors = 0;
mddev_stack_rdev_limits(mddev, &lim, 0);
rdev_for_each(rdev, mddev)
queue_limits_stack_bdev(&lim, rdev->bdev, rdev->new_data_offset,
@@ -8312,7 +8334,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
*/
sector_t newsize;
struct r5conf *conf = mddev->private;
- int ret;
if (raid5_has_log(conf) || raid5_has_ppl(conf))
return -EINVAL;
@@ -8322,9 +8343,12 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
mddev->array_sectors > newsize)
return -EINVAL;
- ret = mddev->bitmap_ops->resize(mddev, sectors, 0, false);
- if (ret)
- return ret;
+ if (md_bitmap_enabled(mddev, false)) {
+ int ret = mddev->bitmap_ops->resize(mddev, sectors, 0);
+
+ if (ret)
+ return ret;
+ }
md_set_array_sectors(mddev, newsize);
if (sectors > mddev->dev_sectors &&
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index e10bd588a586..dd6e24a0899b 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -421,6 +421,7 @@ static int __init cec_devnode_init(void)
ret = bus_register(&cec_bus_type);
if (ret < 0) {
+ debugfs_remove_recursive(top_cec_dir);
unregister_chrdev_region(cec_dev_t, CEC_NUM_DEVICES);
pr_warn("cec: bus_register failed\n");
return -EIO;
@@ -439,6 +440,6 @@ static void __exit cec_devnode_exit(void)
subsys_initcall(cec_devnode_init);
module_exit(cec_devnode_exit)
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Device node registration for cec drivers");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/platform/cec-gpio/cec-gpio.c b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
index 3c27789d8657..842555ed42c7 100644
--- a/drivers/media/cec/platform/cec-gpio/cec-gpio.c
+++ b/drivers/media/cec/platform/cec-gpio/cec-gpio.c
@@ -291,6 +291,6 @@ static struct platform_driver cec_gpio_pdrv = {
module_platform_driver(cec_gpio_pdrv);
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("CEC GPIO driver");
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index fea2d65acffc..1ec0cece0a5b 100644
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
@@ -248,7 +248,6 @@ static const struct regmap_config stm32_cec_regmap_cfg = {
.val_bits = 32,
.reg_stride = sizeof(u32),
.max_register = 0x14,
- .fast_io = true,
};
static int stm32_cec_probe(struct platform_device *pdev)
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
index 2e8f7f60263f..08d58524419f 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/Makefile
@@ -1,8 +1,2 @@
extron-da-hd-4k-plus-cec-objs := extron-da-hd-4k-plus.o cec-splitter.o
obj-$(CONFIG_USB_EXTRON_DA_HD_4K_PLUS_CEC) := extron-da-hd-4k-plus-cec.o
-
-all:
- $(MAKE) -C $(KDIR) M=$(shell pwd) modules
-
-install:
- $(MAKE) -C $(KDIR) M=$(shell pwd) modules_install
diff --git a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
index 41d019b01ec0..e2eff17952ab 100644
--- a/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
+++ b/drivers/media/cec/usb/extron-da-hd-4k-plus/extron-da-hd-4k-plus.c
@@ -28,7 +28,7 @@
#include "extron-da-hd-4k-plus.h"
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Extron DA HD 4K PLUS HDMI CEC driver");
MODULE_LICENSE("GPL");
@@ -1252,7 +1252,7 @@ static int extron_s_output(struct file *file, void *priv, unsigned int o)
return o ? -EINVAL : 0;
}
-static int extron_g_edid(struct file *file, void *_fh,
+static int extron_g_edid(struct file *file, void *priv,
struct v4l2_edid *edid)
{
struct extron_port *port = video_drvdata(file);
@@ -1280,7 +1280,7 @@ static int extron_g_edid(struct file *file, void *_fh,
return 0;
}
-static int extron_s_edid(struct file *file, void *_fh, struct v4l2_edid *edid)
+static int extron_s_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct extron_port *port = video_drvdata(file);
diff --git a/drivers/media/cec/usb/pulse8/pulse8-cec.c b/drivers/media/cec/usb/pulse8/pulse8-cec.c
index 171366fe3544..60569f1670fe 100644
--- a/drivers/media/cec/usb/pulse8/pulse8-cec.c
+++ b/drivers/media/cec/usb/pulse8/pulse8-cec.c
@@ -2,7 +2,7 @@
/*
* Pulse Eight HDMI CEC driver
*
- * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ * Copyright 2016 Hans Verkuil <hverkuil@kernel.org>
*/
/*
@@ -41,7 +41,7 @@
#include <media/cec.h>
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("Pulse Eight HDMI CEC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
index 6f8d6797c614..08f58456d682 100644
--- a/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
+++ b/drivers/media/cec/usb/rainshadow/rainshadow-cec.c
@@ -2,7 +2,7 @@
/*
* RainShadow Tech HDMI CEC driver
*
- * Copyright 2016 Hans Verkuil <hverkuil@xs4all.nl
+ * Copyright 2016 Hans Verkuil <hverkuil@kernel.org>
*/
/*
@@ -31,7 +31,7 @@
#include <media/cec.h>
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_DESCRIPTION("RainShadow Tech HDMI CEC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/b2c2/flexcop-sram.c b/drivers/media/common/b2c2/flexcop-sram.c
index d97962eb0112..dba03769f263 100644
--- a/drivers/media/common/b2c2/flexcop-sram.c
+++ b/drivers/media/common/b2c2/flexcop-sram.c
@@ -352,7 +352,7 @@ static int flexcop_sram_detect(struct flexcop_device *fc)
sram_set_size(adapter, 0x10000);
sram_init(adapter);
write_reg_dw(adapter, 0x208, tmp);
- dprintk("%s: SRAM detection failed. Set to 32K \n", __func__);
+ dprintk("%s: SRAM detection failed. Set to 32K\n", __func__);
return 0;
}
diff --git a/drivers/media/common/b2c2/flexcop.c b/drivers/media/common/b2c2/flexcop.c
index e7a88a2d248c..8506de48ba45 100644
--- a/drivers/media/common/b2c2/flexcop.c
+++ b/drivers/media/common/b2c2/flexcop.c
@@ -170,7 +170,7 @@ static void flexcop_reset(struct flexcop_device *fc)
flexcop_ibi_value v210, v204;
/* reset the flexcop itself */
- fc->write_ibi_reg(fc,ctrl_208,ibi_zero);
+ fc->write_ibi_reg(fc, ctrl_208, ibi_zero);
v210.raw = 0;
v210.sw_reset_210.reset_block_000 = 1;
@@ -183,17 +183,17 @@ static void flexcop_reset(struct flexcop_device *fc)
v210.sw_reset_210.reset_block_700 = 1;
v210.sw_reset_210.Block_reset_enable = 0xb2;
v210.sw_reset_210.Special_controls = 0xc259;
- fc->write_ibi_reg(fc,sw_reset_210,v210);
+ fc->write_ibi_reg(fc, sw_reset_210, v210);
msleep(1);
/* reset the periphical devices */
- v204 = fc->read_ibi_reg(fc,misc_204);
+ v204 = fc->read_ibi_reg(fc, misc_204);
v204.misc_204.Per_reset_sig = 0;
- fc->write_ibi_reg(fc,misc_204,v204);
+ fc->write_ibi_reg(fc, misc_204, v204);
msleep(1);
v204.misc_204.Per_reset_sig = 1;
- fc->write_ibi_reg(fc,misc_204,v204);
+ fc->write_ibi_reg(fc, misc_204, v204);
}
void flexcop_reset_block_300(struct flexcop_device *fc)
@@ -202,13 +202,13 @@ void flexcop_reset_block_300(struct flexcop_device *fc)
v210 = fc->read_ibi_reg(fc, sw_reset_210);
deb_rdump("208: %08x, 210: %08x\n", v208_save.raw, v210.raw);
- fc->write_ibi_reg(fc,ctrl_208,ibi_zero);
+ fc->write_ibi_reg(fc, ctrl_208, ibi_zero);
v210.sw_reset_210.reset_block_300 = 1;
v210.sw_reset_210.Block_reset_enable = 0xb2;
- fc->write_ibi_reg(fc,sw_reset_210,v210);
- fc->write_ibi_reg(fc,ctrl_208,v208_save);
+ fc->write_ibi_reg(fc, sw_reset_210, v210);
+ fc->write_ibi_reg(fc, ctrl_208, v208_save);
}
struct flexcop_device *flexcop_device_kmalloc(size_t bus_specific_len)
@@ -266,8 +266,8 @@ int flexcop_device_initialize(struct flexcop_device *fc)
if (fc->get_mac_addr(fc, 0) == 0) {
u8 *b = fc->dvb_adapter.proposed_mac;
info("MAC address = %pM", b);
- flexcop_set_mac_filter(fc,b);
- flexcop_mac_filter_ctrl(fc,1);
+ flexcop_set_mac_filter(fc, b);
+ flexcop_mac_filter_ctrl(fc, 1);
} else
warn("reading of MAC address failed.\n");
@@ -275,7 +275,7 @@ int flexcop_device_initialize(struct flexcop_device *fc)
if (ret)
goto error;
- flexcop_device_name(fc,"initialization of","complete");
+ flexcop_device_name(fc, "initialization of", "complete");
return 0;
error:
diff --git a/drivers/media/common/cx2341x.c b/drivers/media/common/cx2341x.c
index 1392bd6b0026..1ee159ef7f38 100644
--- a/drivers/media/common/cx2341x.c
+++ b/drivers/media/common/cx2341x.c
@@ -2,7 +2,7 @@
/*
* cx2341x - generic code for cx23415/6/8 based devices
*
- * Copyright (C) 2006 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2006 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/common/saa7146/saa7146_fops.c b/drivers/media/common/saa7146/saa7146_fops.c
index 9d0362a75ecd..a9e3bad76d54 100644
--- a/drivers/media/common/saa7146/saa7146_fops.c
+++ b/drivers/media/common/saa7146/saa7146_fops.c
@@ -186,11 +186,11 @@ static ssize_t fops_write(struct file *file, const char __user *data, size_t cou
struct saa7146_dev *dev = video_drvdata(file);
int ret;
- if (vdev->vfl_type != VFL_TYPE_VBI || !dev->ext_vv_data->vbi_fops.write)
+ if (vdev->vfl_type != VFL_TYPE_VBI || !dev->ext_vv_data->vbi_write)
return -EINVAL;
if (mutex_lock_interruptible(vdev->lock))
return -ERESTARTSYS;
- ret = dev->ext_vv_data->vbi_fops.write(file, data, count, ppos);
+ ret = dev->ext_vv_data->vbi_write(file, data, count, ppos);
mutex_unlock(vdev->lock);
return ret;
}
diff --git a/drivers/media/common/siano/smsir.c b/drivers/media/common/siano/smsir.c
index d85c78c104b9..af07fed21ae1 100644
--- a/drivers/media/common/siano/smsir.c
+++ b/drivers/media/common/siano/smsir.c
@@ -28,7 +28,7 @@ void sms_ir_event(struct smscore_device_t *coredev, const char *buf, int len)
for (i = 0; i < len >> 2; i++) {
struct ir_raw_event ev = {
.duration = abs(samples[i]),
- .pulse = (samples[i] > 0) ? false : true
+ .pulse = samples[i] <= 0
};
ir_raw_event_store(coredev->ir.dev, &ev);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index a13ec569c82f..7123c5fae92c 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -258,6 +258,7 @@ static void *vb2_dc_alloc(struct vb2_buffer *vb,
if (ret) {
dev_err(dev, "dma alloc of size %lu failed\n", size);
+ put_device(buf->dev);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 1cd26faee503..83862d57b126 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -973,18 +973,14 @@ EXPORT_SYMBOL_GPL(vb2_queue_change_type);
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
{
- struct video_device *vfd = video_devdata(file);
+ struct v4l2_fh *fh = file_to_v4l2_fh(file);
__poll_t res;
res = vb2_core_poll(q, file, wait);
- if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
- struct v4l2_fh *fh = file->private_data;
-
- poll_wait(file, &fh->wait, wait);
- if (v4l2_event_pending(fh))
- res |= EPOLLPRI;
- }
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ res |= EPOLLPRI;
return res;
}
@@ -1014,6 +1010,11 @@ int vb2_ioctl_remove_bufs(struct file *file, void *priv,
if (vb2_queue_is_busy(vdev->queue, file))
return -EBUSY;
+ if (vb2_fileio_is_active(vdev->queue)) {
+ dprintk(vdev->queue, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+
return vb2_core_remove_bufs(vdev->queue, d->index, d->count);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_remove_bufs);
diff --git a/drivers/media/dvb-core/dmxdev.c b/drivers/media/dvb-core/dmxdev.c
index 151177e5a06d..8c6f5aafda1d 100644
--- a/drivers/media/dvb-core/dmxdev.c
+++ b/drivers/media/dvb-core/dmxdev.c
@@ -1414,8 +1414,8 @@ int dvb_dmxdev_init(struct dmxdev *dmxdev, struct dvb_adapter *dvb_adapter)
if (dmxdev->demux->open(dmxdev->demux) < 0)
return -EUSERS;
- dmxdev->filter = vmalloc(array_size(sizeof(struct dmxdev_filter),
- dmxdev->filternum));
+ dmxdev->filter = vmalloc_array(dmxdev->filternum,
+ sizeof(struct dmxdev_filter));
if (!dmxdev->filter)
return -ENOMEM;
diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c
index baf64540dc00..7b591aa1179f 100644
--- a/drivers/media/dvb-core/dvb_ca_en50221.c
+++ b/drivers/media/dvb-core/dvb_ca_en50221.c
@@ -785,7 +785,7 @@ exit:
* be written.
* @bytes_write: Size of ebuf.
* @size_write_flag: A flag on Command Register which says whether the link size
- * information will be writen or not.
+ * information will be written or not.
*
* return: Number of bytes written, or < 0 on error.
*/
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 7c4d86bfdd6c..290fc7961647 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -744,7 +744,8 @@ static int dmx_ts_feed_start_filtering(struct dmx_ts_feed *ts_feed)
return -ENODEV;
}
- if ((ret = demux->start_feed(feed)) < 0) {
+ ret = demux->start_feed(feed);
+ if (ret < 0) {
mutex_unlock(&demux->mutex);
return ret;
}
@@ -797,7 +798,8 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
if (mutex_lock_interruptible(&demux->mutex))
return -ERESTARTSYS;
- if (!(feed = dvb_dmx_feed_alloc(demux))) {
+ feed = dvb_dmx_feed_alloc(demux);
+ if (!feed) {
mutex_unlock(&demux->mutex);
return -EBUSY;
}
@@ -817,7 +819,8 @@ static int dvbdmx_allocate_ts_feed(struct dmx_demux *dmx,
(*ts_feed)->stop_filtering = dmx_ts_feed_stop_filtering;
(*ts_feed)->set = dmx_ts_feed_set;
- if (!(feed->filter = dvb_dmx_filter_alloc(demux))) {
+ feed->filter = dvb_dmx_filter_alloc(demux);
+ if (!feed->filter) {
feed->state = DMX_STATE_FREE;
mutex_unlock(&demux->mutex);
return -EBUSY;
@@ -923,7 +926,8 @@ static void prepare_secfilters(struct dvb_demux_feed *dvbdmxfeed)
struct dmx_section_filter *sf;
u8 mask, mode, doneq;
- if (!(f = dvbdmxfeed->filter))
+ f = dvbdmxfeed->filter;
+ if (!f)
return;
do {
sf = &f->filter;
@@ -970,7 +974,8 @@ static int dmx_section_feed_start_filtering(struct dmx_section_feed *feed)
prepare_secfilters(dvbdmxfeed);
- if ((ret = dvbdmx->start_feed(dvbdmxfeed)) < 0) {
+ ret = dvbdmx->start_feed(dvbdmxfeed);
+ if (ret < 0) {
mutex_unlock(&dvbdmx->mutex);
return ret;
}
@@ -1057,7 +1062,8 @@ static int dvbdmx_allocate_section_feed(struct dmx_demux *demux,
if (mutex_lock_interruptible(&dvbdmx->mutex))
return -ERESTARTSYS;
- if (!(dvbdmxfeed = dvb_dmx_feed_alloc(dvbdmx))) {
+ dvbdmxfeed = dvb_dmx_feed_alloc(dvbdmx);
+ if (!dvbdmxfeed) {
mutex_unlock(&dvbdmx->mutex);
return -EBUSY;
}
@@ -1223,7 +1229,7 @@ static int dvbdmx_disconnect_frontend(struct dmx_demux *demux)
return 0;
}
-static int dvbdmx_get_pes_pids(struct dmx_demux *demux, u16 * pids)
+static int dvbdmx_get_pes_pids(struct dmx_demux *demux, u16 *pids)
{
struct dvb_demux *dvbdemux = (struct dvb_demux *)demux;
@@ -1238,14 +1244,14 @@ int dvb_dmx_init(struct dvb_demux *dvbdemux)
dvbdemux->cnt_storage = NULL;
dvbdemux->users = 0;
- dvbdemux->filter = vmalloc(array_size(sizeof(struct dvb_demux_filter),
- dvbdemux->filternum));
+ dvbdemux->filter = vmalloc_array(dvbdemux->filternum,
+ sizeof(struct dvb_demux_filter));
if (!dvbdemux->filter)
return -ENOMEM;
- dvbdemux->feed = vmalloc(array_size(sizeof(struct dvb_demux_feed),
- dvbdemux->feednum));
+ dvbdemux->feed = vmalloc_array(dvbdemux->feednum,
+ sizeof(struct dvb_demux_feed));
if (!dvbdemux->feed) {
vfree(dvbdemux->filter);
dvbdemux->filter = NULL;
diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c
index 7d4558de8e83..de6226556826 100644
--- a/drivers/media/dvb-core/dvb_ringbuffer.c
+++ b/drivers/media/dvb-core/dvb_ringbuffer.c
@@ -37,10 +37,11 @@
void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len)
{
- rbuf->pread=rbuf->pwrite=0;
- rbuf->data=data;
- rbuf->size=len;
- rbuf->error=0;
+ rbuf->pread = 0;
+ rbuf->pwrite = 0;
+ rbuf->data = data;
+ rbuf->size = len;
+ rbuf->error = 0;
init_waitqueue_head(&rbuf->queue);
@@ -235,7 +236,7 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf,
return len;
}
-ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t len)
+ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len)
{
int status;
ssize_t oldpwrite = rbuf->pwrite;
@@ -245,7 +246,8 @@ ssize_t dvb_ringbuffer_pkt_write(struct dvb_ringbuffer *rbuf, u8* buf, size_t le
DVB_RINGBUFFER_WRITE_BYTE(rbuf, PKT_READY);
status = dvb_ringbuffer_write(rbuf, buf, len);
- if (status < 0) rbuf->pwrite = oldpwrite;
+ if (status < 0)
+ rbuf->pwrite = oldpwrite;
return status;
}
@@ -258,8 +260,10 @@ ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
pktlen = rbuf->data[idx] << 8;
pktlen |= rbuf->data[(idx + 1) % rbuf->size];
- if (offset > pktlen) return -EINVAL;
- if ((offset + len) > pktlen) len = pktlen - offset;
+ if (offset > pktlen)
+ return -EINVAL;
+ if ((offset + len) > pktlen)
+ len = pktlen - offset;
idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size;
todo = len;
@@ -278,7 +282,7 @@ ssize_t dvb_ringbuffer_pkt_read_user(struct dvb_ringbuffer *rbuf, size_t idx,
}
ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
- int offset, u8* buf, size_t len)
+ int offset, u8 *buf, size_t len)
{
size_t todo;
size_t split;
@@ -286,8 +290,10 @@ ssize_t dvb_ringbuffer_pkt_read(struct dvb_ringbuffer *rbuf, size_t idx,
pktlen = rbuf->data[idx] << 8;
pktlen |= rbuf->data[(idx + 1) % rbuf->size];
- if (offset > pktlen) return -EINVAL;
- if ((offset + len) > pktlen) len = pktlen - offset;
+ if (offset > pktlen)
+ return -EINVAL;
+ if ((offset + len) > pktlen)
+ len = pktlen - offset;
idx = (idx + DVB_RINGBUFFER_PKTHDRSIZE + offset) % rbuf->size;
todo = len;
@@ -309,7 +315,7 @@ void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx)
rbuf->data[(idx + 2) % rbuf->size] = PKT_DISPOSED;
// clean up disposed packets
- while(dvb_ringbuffer_avail(rbuf) > DVB_RINGBUFFER_PKTHDRSIZE) {
+ while (dvb_ringbuffer_avail(rbuf) > DVB_RINGBUFFER_PKTHDRSIZE) {
if (DVB_RINGBUFFER_PEEK(rbuf, 2) == PKT_DISPOSED) {
pktlen = DVB_RINGBUFFER_PEEK(rbuf, 0) << 8;
pktlen |= DVB_RINGBUFFER_PEEK(rbuf, 1);
@@ -321,14 +327,14 @@ void dvb_ringbuffer_pkt_dispose(struct dvb_ringbuffer *rbuf, size_t idx)
}
}
-ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t* pktlen)
+ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t *pktlen)
{
int consumed;
int curpktlen;
int curpktstatus;
if (idx == -1) {
- idx = rbuf->pread;
+ idx = rbuf->pread;
} else {
curpktlen = rbuf->data[idx] << 8;
curpktlen |= rbuf->data[(idx + 1) % rbuf->size];
@@ -339,7 +345,7 @@ ssize_t dvb_ringbuffer_pkt_next(struct dvb_ringbuffer *rbuf, size_t idx, size_t*
if (consumed < 0)
consumed += rbuf->size;
- while((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) {
+ while ((dvb_ringbuffer_avail(rbuf) - consumed) > DVB_RINGBUFFER_PKTHDRSIZE) {
curpktlen = rbuf->data[idx] << 8;
curpktlen |= rbuf->data[(idx + 1) % rbuf->size];
diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
index 9df7c213716a..8b980d371a45 100644
--- a/drivers/media/dvb-core/dvbdev.c
+++ b/drivers/media/dvb-core/dvbdev.c
@@ -571,8 +571,8 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
MKDEV(DVB_MAJOR, minor),
dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
if (IS_ERR(clsdev)) {
- pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
- __func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
+ pr_err("%s: failed to create device dvb%d.%s%d (%pe)\n",
+ __func__, adap->num, dnames[type], id, clsdev);
if (new_node) {
list_del(&new_node->list_head);
kfree(dvbdevfops);
diff --git a/drivers/media/dvb-frontends/Kconfig b/drivers/media/dvb-frontends/Kconfig
index 2ef2ff2a38ff..bcc97ca86ed5 100644
--- a/drivers/media/dvb-frontends/Kconfig
+++ b/drivers/media/dvb-frontends/Kconfig
@@ -163,7 +163,7 @@ config DVB_CX24123
A DVB-S tuner module. Say Y when you want to support this frontend.
config DVB_DS3000
- tristate "Montage Tehnology DS3000 based"
+ tristate "Montage Technology DS3000 based"
depends on DVB_CORE && I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
help
@@ -270,7 +270,7 @@ config DVB_TDA826X
A DVB-S silicon tuner module. Say Y when you want to support this tuner.
config DVB_TS2020
- tristate "Montage Tehnology TS2020 based tuners"
+ tristate "Montage Technology TS2020 based tuners"
depends on DVB_CORE && I2C
select REGMAP_I2C
default m if !MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 415f1f91cc30..8fcb4417ba22 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -1936,7 +1936,8 @@ static void cxd2841er_read_ber(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct cxd2841er_priv *priv = fe->demodulator_priv;
- u32 ret, bit_error = 0, bit_count = 0;
+ u32 bit_error = 0, bit_count = 0;
+ int ret;
dev_dbg(&priv->i2c->dev, "%s()\n", __func__);
switch (p->delivery_system) {
diff --git a/drivers/media/dvb-frontends/drx39xyj/drxj.c b/drivers/media/dvb-frontends/drx39xyj/drxj.c
index 779cce93e94a..428b31e60874 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drxj.c
+++ b/drivers/media/dvb-frontends/drx39xyj/drxj.c
@@ -1270,7 +1270,7 @@ static const u16 nicam_presc_table_val[43] = {
TODO: check ignoring single/multimaster is ok for AUD access ?
*/
-#define DRXJ_ISAUDWRITE(addr) (((((addr)>>16)&1) == 1) ? true : false)
+#define DRXJ_ISAUDWRITE(addr) ((((addr) >> 16) & 1) == 1)
#define DRXJ_DAP_AUDTRIF_TIMEOUT 80 /* millisec */
/*============================================================================*/
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index 87f3d4f0eb8c..9ef367918824 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -6324,8 +6324,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe)
case SYS_DVBC_ANNEX_C:
if (!state->m_has_dvbc)
return -EINVAL;
- state->m_itut_annex_c = (delsys == SYS_DVBC_ANNEX_C) ?
- true : false;
+ state->m_itut_annex_c = delsys == SYS_DVBC_ANNEX_C;
if (state->m_itut_annex_c)
setoperation_mode(state, OM_QAM_ITU_C);
else
diff --git a/drivers/media/dvb-frontends/lgdt330x.c b/drivers/media/dvb-frontends/lgdt330x.c
index cab442a350a5..8c34a5b850bc 100644
--- a/drivers/media/dvb-frontends/lgdt330x.c
+++ b/drivers/media/dvb-frontends/lgdt330x.c
@@ -124,7 +124,6 @@ static int i2c_read_demod_bytes(struct lgdt330x_state *state,
/* Software reset */
static int lgdt3302_sw_reset(struct lgdt330x_state *state)
{
- u8 ret;
u8 reset[] = {
IRQ_MASK,
/*
@@ -133,6 +132,7 @@ static int lgdt3302_sw_reset(struct lgdt330x_state *state)
*/
0x00
};
+ int ret;
ret = i2c_write_demod_bytes(state,
reset, sizeof(reset));
@@ -147,11 +147,11 @@ static int lgdt3302_sw_reset(struct lgdt330x_state *state)
static int lgdt3303_sw_reset(struct lgdt330x_state *state)
{
- u8 ret;
u8 reset[] = {
0x02,
0x00 /* bit 0 is active low software reset */
};
+ int ret;
ret = i2c_write_demod_bytes(state,
reset, sizeof(reset));
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index 7a58f53ab999..818c4e67364c 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -694,8 +694,7 @@ static int mn88443x_probe(struct i2c_client *client)
chip->mclk = devm_clk_get(dev, "mclk");
if (IS_ERR(chip->mclk) && !conf) {
- dev_err(dev, "Failed to request mclk: %ld\n",
- PTR_ERR(chip->mclk));
+ dev_err(dev, "Failed to request mclk: %pe\n", chip->mclk);
return PTR_ERR(chip->mclk);
}
@@ -709,8 +708,8 @@ static int mn88443x_probe(struct i2c_client *client)
chip->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio)) {
- dev_err(dev, "Failed to request reset_gpio: %ld\n",
- PTR_ERR(chip->reset_gpio));
+ dev_err(dev, "Failed to request reset_gpio: %pe\n",
+ chip->reset_gpio);
return PTR_ERR(chip->reset_gpio);
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 6237fe804a5c..4b4db8c4f496 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -27,7 +27,7 @@ config VIDEO_IR_I2C
menuconfig VIDEO_CAMERA_SENSOR
bool "Camera sensor devices"
- depends on MEDIA_CAMERA_SUPPORT && I2C
+ depends on MEDIA_CAMERA_SUPPORT && I2C && HAVE_CLK
select MEDIA_CONTROLLER
select V4L2_FWNODE
select VIDEO_V4L2_SUBDEV_API
@@ -70,6 +70,16 @@ config VIDEO_GC0308
To compile this driver as a module, choose M here: the
module will be called gc0308.
+config VIDEO_GC0310
+ tristate "GalaxyCore GC0310 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor-level driver for the Galaxycore
+ GC0310 0.3MP sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gc0310.
+
config VIDEO_GC05A2
tristate "GalaxyCore gc05a2 sensor support"
select V4L2_CCI_I2C
@@ -127,6 +137,16 @@ config VIDEO_HI847
To compile this driver as a module, choose M here: the
module will be called hi847.
+config VIDEO_IMX111
+ tristate "Sony IMX111 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a V4L2 sensor driver for the Sony IMX111 camera
+ sensors.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx111.
+
config VIDEO_IMX208
tristate "Sony IMX208 sensor support"
help
@@ -317,6 +337,7 @@ config VIDEO_MT9V011
config VIDEO_MT9V032
tristate "Micron MT9V032 sensor support"
+ depends on OF
select REGMAP_I2C
help
This is a Video4Linux2 sensor driver for the Micron
@@ -340,6 +361,16 @@ config VIDEO_OG01A1B
To compile this driver as a module, choose M here: the
module will be called og01a1b.
+config VIDEO_OG0VE1B
+ tristate "OmniVision OG0VE1B sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OG0VE1B camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called og0ve1b.
+
config VIDEO_OV01A10
tristate "OmniVision OV01A10 sensor support"
help
@@ -446,6 +477,16 @@ config VIDEO_OV2685
To compile this driver as a module, choose M here: the
module will be called ov2685.
+config VIDEO_OV2735
+ tristate "OmniVision OV2735 sensor support"
+ select V4L2_CCI_I2C
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV2735 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ov2735.
+
config VIDEO_OV2740
tristate "OmniVision OV2740 sensor support"
depends on ACPI || COMPILE_TEST
@@ -542,24 +583,25 @@ config VIDEO_OV5695
To compile this driver as a module, choose M here: the
module will be called ov5695.
-config VIDEO_OV64A40
- tristate "OmniVision OV64A40 sensor support"
+config VIDEO_OV6211
+ tristate "OmniVision OV6211 sensor support"
select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
- OV64A40 camera.
+ OV6211 camera.
To compile this driver as a module, choose M here: the
- module will be called ov64a40.
+ module will be called ov6211.
-config VIDEO_OV6650
- tristate "OmniVision OV6650 sensor support"
+config VIDEO_OV64A40
+ tristate "OmniVision OV64A40 sensor support"
+ select V4L2_CCI_I2C
help
This is a Video4Linux2 sensor driver for the OmniVision
- OV6650 camera.
+ OV64A40 camera.
To compile this driver as a module, choose M here: the
- module will be called ov6650.
+ module will be called ov64a40.
config VIDEO_OV7251
tristate "OmniVision OV7251 sensor support"
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 5873d29433ee..c5f17602454f 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_VIDEO_DW9768) += dw9768.o
obj-$(CONFIG_VIDEO_DW9807_VCM) += dw9807-vcm.o
obj-$(CONFIG_VIDEO_ET8EK8) += et8ek8/
obj-$(CONFIG_VIDEO_GC0308) += gc0308.o
+obj-$(CONFIG_VIDEO_GC0310) += gc0310.o
obj-$(CONFIG_VIDEO_GC05A2) += gc05a2.o
obj-$(CONFIG_VIDEO_GC08A3) += gc08a3.o
obj-$(CONFIG_VIDEO_GC2145) += gc2145.o
@@ -45,6 +46,7 @@ obj-$(CONFIG_VIDEO_HI556) += hi556.o
obj-$(CONFIG_VIDEO_HI846) += hi846.o
obj-$(CONFIG_VIDEO_HI847) += hi847.o
obj-$(CONFIG_VIDEO_I2C) += video-i2c.o
+obj-$(CONFIG_VIDEO_IMX111) += imx111.o
obj-$(CONFIG_VIDEO_IMX208) += imx208.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX219) += imx219.o
@@ -81,6 +83,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
obj-$(CONFIG_VIDEO_MT9V111) += mt9v111.o
obj-$(CONFIG_VIDEO_OG01A1B) += og01a1b.o
+obj-$(CONFIG_VIDEO_OG0VE1B) += og0ve1b.o
obj-$(CONFIG_VIDEO_OV01A10) += ov01a10.o
obj-$(CONFIG_VIDEO_OV02A10) += ov02a10.o
obj-$(CONFIG_VIDEO_OV02C10) += ov02c10.o
@@ -93,6 +96,7 @@ obj-$(CONFIG_VIDEO_OV2640) += ov2640.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_OV2680) += ov2680.o
obj-$(CONFIG_VIDEO_OV2685) += ov2685.o
+obj-$(CONFIG_VIDEO_OV2735) += ov2735.o
obj-$(CONFIG_VIDEO_OV2740) += ov2740.o
obj-$(CONFIG_VIDEO_OV4689) += ov4689.o
obj-$(CONFIG_VIDEO_OV5640) += ov5640.o
@@ -103,8 +107,8 @@ obj-$(CONFIG_VIDEO_OV5670) += ov5670.o
obj-$(CONFIG_VIDEO_OV5675) += ov5675.o
obj-$(CONFIG_VIDEO_OV5693) += ov5693.o
obj-$(CONFIG_VIDEO_OV5695) += ov5695.o
+obj-$(CONFIG_VIDEO_OV6211) += ov6211.o
obj-$(CONFIG_VIDEO_OV64A40) += ov64a40.o
-obj-$(CONFIG_VIDEO_OV6650) += ov6650.o
obj-$(CONFIG_VIDEO_OV7251) += ov7251.o
obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c
index 5d90b8ab9b6d..378f4e6af12c 100644
--- a/drivers/media/i2c/adv7180.c
+++ b/drivers/media/i2c/adv7180.c
@@ -214,7 +214,6 @@ struct adv7180_state {
struct gpio_desc *pwdn_gpio;
struct gpio_desc *rst_gpio;
v4l2_std_id curr_norm;
- bool powered;
bool streaming;
u8 input;
@@ -274,6 +273,38 @@ static int adv7180_vpp_write(struct adv7180_state *state, unsigned int reg,
return i2c_smbus_write_byte_data(state->vpp_client, reg, value);
}
+static int adv7180_set_power(struct adv7180_state *state, bool on)
+{
+ u8 val;
+ int ret;
+
+ if (on)
+ val = ADV7180_PWR_MAN_ON;
+ else
+ val = ADV7180_PWR_MAN_OFF;
+
+ ret = adv7180_write(state, ADV7180_REG_PWR_MAN, val);
+ if (ret)
+ return ret;
+
+ if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
+ if (on) {
+ adv7180_csi_write(state, 0xDE, 0x02);
+ adv7180_csi_write(state, 0xD2, 0xF7);
+ adv7180_csi_write(state, 0xD8, 0x65);
+ adv7180_csi_write(state, 0xE0, 0x09);
+ adv7180_csi_write(state, 0x2C, 0x00);
+ if (state->field == V4L2_FIELD_NONE)
+ adv7180_csi_write(state, 0x1D, 0x80);
+ adv7180_csi_write(state, 0x00, 0x00);
+ } else {
+ adv7180_csi_write(state, 0x00, 0x80);
+ }
+ }
+
+ return 0;
+}
+
static v4l2_std_id adv7180_std_to_v4l2(u8 status1)
{
/* in case V4L2_IN_ST_NO_SIGNAL */
@@ -357,32 +388,27 @@ static inline struct adv7180_state *to_state(struct v4l2_subdev *sd)
static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct adv7180_state *state = to_state(sd);
- int err = mutex_lock_interruptible(&state->mutex);
- if (err)
- return err;
-
- if (state->streaming) {
- err = -EBUSY;
- goto unlock;
- }
+ int ret;
- err = adv7180_set_video_standard(state,
- ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
- if (err)
- goto unlock;
+ guard(mutex)(&state->mutex);
- msleep(100);
- __adv7180_status(state, NULL, std);
+ /*
+ * We can't sample the standard if the device is streaming as that would
+ * interfere with the capture session as the VID_SEL reg is touched.
+ */
+ if (state->streaming)
+ return -EBUSY;
- err = v4l2_std_to_adv7180(state->curr_norm);
- if (err < 0)
- goto unlock;
+ /* Set the standard to autodetect PAL B/G/H/I/D, NTSC J or SECAM */
+ ret = adv7180_set_video_standard(state,
+ ADV7180_STD_AD_PAL_BG_NTSC_J_SECAM);
+ if (ret)
+ return ret;
- err = adv7180_set_video_standard(state, err);
+ /* Allow some time for the autodetection to run. */
+ msleep(100);
-unlock:
- mutex_unlock(&state->mutex);
- return err;
+ return __adv7180_status(state, NULL, std);
}
static int adv7180_s_routing(struct v4l2_subdev *sd, u32 input,
@@ -437,22 +463,18 @@ static int adv7180_program_std(struct adv7180_state *state)
static int adv7180_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7180_state *state = to_state(sd);
- int ret = mutex_lock_interruptible(&state->mutex);
+ int ret;
- if (ret)
- return ret;
+ guard(mutex)(&state->mutex);
/* Make sure we can support this std */
ret = v4l2_std_to_adv7180(std);
if (ret < 0)
- goto out;
+ return ret;
state->curr_norm = std;
- ret = adv7180_program_std(state);
-out:
- mutex_unlock(&state->mutex);
- return ret;
+ return 0;
}
static int adv7180_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
@@ -514,55 +536,6 @@ static void adv7180_set_reset_pin(struct adv7180_state *state, bool on)
}
}
-static int adv7180_set_power(struct adv7180_state *state, bool on)
-{
- u8 val;
- int ret;
-
- if (on)
- val = ADV7180_PWR_MAN_ON;
- else
- val = ADV7180_PWR_MAN_OFF;
-
- ret = adv7180_write(state, ADV7180_REG_PWR_MAN, val);
- if (ret)
- return ret;
-
- if (state->chip_info->flags & ADV7180_FLAG_MIPI_CSI2) {
- if (on) {
- adv7180_csi_write(state, 0xDE, 0x02);
- adv7180_csi_write(state, 0xD2, 0xF7);
- adv7180_csi_write(state, 0xD8, 0x65);
- adv7180_csi_write(state, 0xE0, 0x09);
- adv7180_csi_write(state, 0x2C, 0x00);
- if (state->field == V4L2_FIELD_NONE)
- adv7180_csi_write(state, 0x1D, 0x80);
- adv7180_csi_write(state, 0x00, 0x00);
- } else {
- adv7180_csi_write(state, 0x00, 0x80);
- }
- }
-
- return 0;
-}
-
-static int adv7180_s_power(struct v4l2_subdev *sd, int on)
-{
- struct adv7180_state *state = to_state(sd);
- int ret;
-
- ret = mutex_lock_interruptible(&state->mutex);
- if (ret)
- return ret;
-
- ret = adv7180_set_power(state, on);
- if (ret == 0)
- state->powered = on;
-
- mutex_unlock(&state->mutex);
- return ret;
-}
-
static const char * const test_pattern_menu[] = {
"Single color",
"Color bars",
@@ -601,11 +574,11 @@ static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_adv7180_sd(ctrl);
struct adv7180_state *state = to_state(sd);
- int ret = mutex_lock_interruptible(&state->mutex);
+ int ret = 0;
int val;
- if (ret)
- return ret;
+ lockdep_assert_held(&state->mutex);
+
val = ctrl->val;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -647,7 +620,6 @@ static int adv7180_s_ctrl(struct v4l2_ctrl *ctrl)
ret = -EINVAL;
}
- mutex_unlock(&state->mutex);
return ret;
}
@@ -668,6 +640,7 @@ static const struct v4l2_ctrl_config adv7180_ctrl_fast_switch = {
static int adv7180_init_controls(struct adv7180_state *state)
{
v4l2_ctrl_handler_init(&state->ctrl_hdl, 4);
+ state->ctrl_hdl.lock = &state->mutex;
v4l2_ctrl_new_std(&state->ctrl_hdl, &adv7180_ctrl_ops,
V4L2_CID_BRIGHTNESS, ADV7180_BRI_MIN,
@@ -700,7 +673,6 @@ static int adv7180_init_controls(struct adv7180_state *state)
v4l2_ctrl_handler_free(&state->ctrl_hdl);
return err;
}
- v4l2_ctrl_handler_setup(&state->ctrl_hdl);
return 0;
}
@@ -812,12 +784,7 @@ static int adv7180_set_pad_format(struct v4l2_subdev *sd,
ret = adv7180_mbus_fmt(sd, &format->format);
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- if (state->field != format->format.field) {
- state->field = format->format.field;
- adv7180_set_power(state, false);
- adv7180_set_field_mode(state);
- adv7180_set_power(state, true);
- }
+ state->field = format->format.field;
} else {
framefmt = v4l2_subdev_state_get_format(sd_state, 0);
*framefmt = format->format;
@@ -874,23 +841,117 @@ static int adv7180_g_tvnorms(struct v4l2_subdev *sd, v4l2_std_id *norm)
return 0;
}
-static int adv7180_s_stream(struct v4l2_subdev *sd, int enable)
+static int init_device(struct adv7180_state *state)
{
- struct adv7180_state *state = to_state(sd);
int ret;
- /* It's always safe to stop streaming, no need to take the lock */
- if (!enable) {
- state->streaming = enable;
- return 0;
+ lockdep_assert_held(&state->mutex);
+
+ ret = adv7180_program_std(state);
+ if (ret)
+ return ret;
+
+ adv7180_set_field_mode(state);
+
+ __v4l2_ctrl_handler_setup(&state->ctrl_hdl);
+
+ return ret;
+}
+
+static int adv7180_reset_device(struct adv7180_state *state)
+{
+ int ret;
+
+ lockdep_assert_held(&state->mutex);
+
+ adv7180_set_power_pin(state, true);
+ adv7180_set_reset_pin(state, false);
+
+ adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
+ usleep_range(5000, 10000);
+
+ /*
+ * If the devices decoder is power on after reset, power off so the
+ * device can be configured.
+ */
+ if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
+ adv7180_set_power(state, false);
+
+ ret = state->chip_info->init(state);
+ if (ret)
+ return ret;
+
+ ret = init_device(state);
+ if (ret)
+ return ret;
+
+ /* register for interrupts */
+ if (state->irq > 0) {
+ /* config the Interrupt pin to be active low */
+ ret = adv7180_write(state, ADV7180_REG_ICONF1,
+ ADV7180_ICONF1_ACTIVE_LOW |
+ ADV7180_ICONF1_PSYNC_ONLY);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR1, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* enable AD change interrupts */
+ ret = adv7180_write(state, ADV7180_REG_IMR3,
+ ADV7180_IRQ3_AD_CHANGE);
+ if (ret < 0)
+ return ret;
+
+ ret = adv7180_write(state, ADV7180_REG_IMR4, 0);
+ if (ret < 0)
+ return ret;
}
+ /*
+ * If the devices decoder is power on after reset, restore the power
+ * after configuration. This is to preserve the behavior of the driver,
+ * not doing this result in the first 35+ frames captured being garbage.
+ */
+ if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
+ adv7180_set_power(state, true);
+
+ return 0;
+}
+
+static int adv7180_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv7180_state *state = to_state(sd);
+ int ret;
+
/* Must wait until querystd released the lock */
- ret = mutex_lock_interruptible(&state->mutex);
+ guard(mutex)(&state->mutex);
+
+ /*
+ * Always power off the decoder even if streaming is to be enabled, the
+ * decoder needs to be off for the device to be configured.
+ */
+ ret = adv7180_set_power(state, false);
if (ret)
return ret;
+
+ if (enable) {
+ ret = init_device(state);
+ if (ret)
+ return ret;
+
+ ret = adv7180_set_power(state, true);
+ if (ret)
+ return ret;
+ }
+
state->streaming = enable;
- mutex_unlock(&state->mutex);
+
return 0;
}
@@ -919,7 +980,6 @@ static const struct v4l2_subdev_video_ops adv7180_video_ops = {
};
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
- .s_power = adv7180_s_power,
.subscribe_event = adv7180_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
@@ -1343,62 +1403,6 @@ static const struct adv7180_chip_info adv7282_m_info = {
.select_input = adv7182_select_input,
};
-static int init_device(struct adv7180_state *state)
-{
- int ret;
-
- mutex_lock(&state->mutex);
-
- adv7180_set_power_pin(state, true);
- adv7180_set_reset_pin(state, false);
-
- adv7180_write(state, ADV7180_REG_PWR_MAN, ADV7180_PWR_MAN_RES);
- usleep_range(5000, 10000);
-
- ret = state->chip_info->init(state);
- if (ret)
- goto out_unlock;
-
- ret = adv7180_program_std(state);
- if (ret)
- goto out_unlock;
-
- adv7180_set_field_mode(state);
-
- /* register for interrupts */
- if (state->irq > 0) {
- /* config the Interrupt pin to be active low */
- ret = adv7180_write(state, ADV7180_REG_ICONF1,
- ADV7180_ICONF1_ACTIVE_LOW |
- ADV7180_ICONF1_PSYNC_ONLY);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR1, 0);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR2, 0);
- if (ret < 0)
- goto out_unlock;
-
- /* enable AD change interrupts interrupts */
- ret = adv7180_write(state, ADV7180_REG_IMR3,
- ADV7180_IRQ3_AD_CHANGE);
- if (ret < 0)
- goto out_unlock;
-
- ret = adv7180_write(state, ADV7180_REG_IMR4, 0);
- if (ret < 0)
- goto out_unlock;
- }
-
-out_unlock:
- mutex_unlock(&state->mutex);
-
- return ret;
-}
-
static int adv7180_probe(struct i2c_client *client)
{
struct device_node *np = client->dev.of_node;
@@ -1457,10 +1461,7 @@ static int adv7180_probe(struct i2c_client *client)
state->irq = client->irq;
mutex_init(&state->mutex);
state->curr_norm = V4L2_STD_NTSC;
- if (state->chip_info->flags & ADV7180_FLAG_RESET_POWERED)
- state->powered = true;
- else
- state->powered = false;
+
state->input = 0;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
@@ -1477,7 +1478,9 @@ static int adv7180_probe(struct i2c_client *client)
if (ret)
goto err_free_ctrl;
- ret = init_device(state);
+ mutex_lock(&state->mutex);
+ ret = adv7180_reset_device(state);
+ mutex_unlock(&state->mutex);
if (ret)
goto err_media_entity_cleanup;
@@ -1549,6 +1552,8 @@ static int adv7180_suspend(struct device *dev)
struct v4l2_subdev *sd = dev_get_drvdata(dev);
struct adv7180_state *state = to_state(sd);
+ guard(mutex)(&state->mutex);
+
return adv7180_set_power(state, false);
}
@@ -1558,13 +1563,18 @@ static int adv7180_resume(struct device *dev)
struct adv7180_state *state = to_state(sd);
int ret;
- ret = init_device(state);
+ guard(mutex)(&state->mutex);
+
+ ret = adv7180_reset_device(state);
if (ret < 0)
return ret;
- ret = adv7180_set_power(state, state->powered);
- if (ret)
- return ret;
+ /* If we were streaming when suspending, start decoder. */
+ if (state->streaming) {
+ ret = adv7180_set_power(state, true);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index afed38596362..516553fb17e9 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -42,7 +42,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_DESCRIPTION("Analog Devices ADV7604/10/11/12 video decoder driver");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_AUTHOR("Mats Randgaard <mats.randgaard@cisco.com>");
MODULE_LICENSE("GPL");
@@ -3670,7 +3670,7 @@ static int adv76xx_probe(struct i2c_client *client)
err = media_entity_pads_init(&sd->entity, state->source_pad + 1,
state->pads);
if (err)
- goto err_work_queues;
+ goto err_i2c;
/* Configure regmaps */
err = configure_regmaps(state);
@@ -3711,8 +3711,6 @@ static int adv76xx_probe(struct i2c_client *client)
err_entity:
media_entity_cleanup(&sd->entity);
-err_work_queues:
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
err_i2c:
adv76xx_unregister_clients(state);
err_hdl:
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 5545cd23e113..ea6966c0605e 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -38,7 +38,7 @@ module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
MODULE_DESCRIPTION("Analog Devices ADV7842 video decoder driver");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_AUTHOR("Martin Bugge <marbugge@cisco.com>");
MODULE_LICENSE("GPL");
@@ -2699,6 +2699,7 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
/* CP block */
struct adv7842_state *state = to_state(sd);
struct v4l2_dv_timings timings;
+ int temp;
u8 reg_io_0x02 = io_read(sd, 0x02);
u8 reg_io_0x21 = io_read(sd, 0x21);
u8 reg_rep_0x77 = rep_read(sd, 0x77);
@@ -2821,8 +2822,9 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
(((reg_io_0x02 >> 2) & 0x01) ^ (reg_io_0x02 & 0x01)) ?
"(16-235)" : "(0-255)",
(reg_io_0x02 & 0x08) ? "enabled" : "disabled");
+ temp = cp_read(sd, 0xf4) >> 4;
v4l2_info(sd, "Color space conversion: %s\n",
- csc_coeff_sel_rb[cp_read(sd, 0xf4) >> 4]);
+ temp < 0 ? "" : csc_coeff_sel_rb[temp]);
if (!is_digital_input(sd))
return 0;
@@ -2852,8 +2854,9 @@ static int adv7842_cp_log_status(struct v4l2_subdev *sd)
hdmi_read(sd, 0x5f));
v4l2_info(sd, "AV Mute: %s\n",
(hdmi_read(sd, 0x04) & 0x40) ? "on" : "off");
+ temp = hdmi_read(sd, 0x0b) >> 6;
v4l2_info(sd, "Deep color mode: %s\n",
- deep_color_mode_txt[hdmi_read(sd, 0x0b) >> 6]);
+ temp < 0 ? "" : deep_color_mode_txt[temp]);
adv7842_log_infoframes(sd);
@@ -3466,8 +3469,8 @@ static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd, const cha
cp = i2c_new_dummy_device(client->adapter, io_read(sd, io_reg) >> 1);
if (IS_ERR(cp)) {
- v4l2_err(sd, "register %s on i2c addr 0x%x failed with %ld\n",
- desc, addr, PTR_ERR(cp));
+ v4l2_err(sd, "register %s on i2c addr 0x%x failed with %pe\n",
+ desc, addr, cp);
cp = NULL;
}
@@ -3626,7 +3629,7 @@ static int adv7842_probe(struct i2c_client *client)
err = media_entity_pads_init(&sd->entity, ADV7842_PAD_SOURCE + 1,
state->pads);
if (err)
- goto err_work_queues;
+ goto err_i2c;
err = adv7842_core_init(sd);
if (err)
@@ -3647,8 +3650,6 @@ static int adv7842_probe(struct i2c_client *client)
err_entity:
media_entity_cleanup(&sd->entity);
-err_work_queues:
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
err_i2c:
adv7842_unregister_clients(sd);
err_hdl:
diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c
index 24873149096c..f156058500e3 100644
--- a/drivers/media/i2c/ar0521.c
+++ b/drivers/media/i2c/ar0521.c
@@ -1077,11 +1077,10 @@ static int ar0521_probe(struct i2c_client *client)
}
/* Get master clock (extclk) */
- sensor->extclk = devm_clk_get(dev, "extclk");
- if (IS_ERR(sensor->extclk)) {
- dev_err(dev, "failed to get extclk\n");
- return PTR_ERR(sensor->extclk);
- }
+ sensor->extclk = devm_v4l2_sensor_clk_get(dev, "extclk");
+ if (IS_ERR(sensor->extclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->extclk),
+ "failed to get extclk\n");
sensor->extclk_freq = clk_get_rate(sensor->extclk);
@@ -1110,8 +1109,8 @@ static int ar0521_probe(struct i2c_client *client)
ar0521_supply_names[cnt]);
if (IS_ERR(supply)) {
- dev_info(dev, "no %s regulator found: %li\n",
- ar0521_supply_names[cnt], PTR_ERR(supply));
+ dev_info(dev, "no %s regulator found: %pe\n",
+ ar0521_supply_names[cnt], supply);
return PTR_ERR(supply);
}
sensor->supplies[cnt] = supply;
diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c
index 1c889c878abd..f8523140784c 100644
--- a/drivers/media/i2c/ccs/ccs-core.c
+++ b/drivers/media/i2c/ccs/ccs-core.c
@@ -3237,8 +3237,8 @@ static int ccs_probe(struct i2c_client *client)
dev_info(&client->dev, "no clock defined, continuing...\n");
sensor->ext_clk = NULL;
} else if (IS_ERR(sensor->ext_clk)) {
- dev_err(&client->dev, "could not get clock (%ld)\n",
- PTR_ERR(sensor->ext_clk));
+ dev_err(&client->dev, "could not get clock (%pe)\n",
+ sensor->ext_clk);
return -EPROBE_DEFER;
}
@@ -3294,8 +3294,8 @@ static int ccs_probe(struct i2c_client *client)
sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
if (IS_ERR(sensor->regmap)) {
- dev_err(&client->dev, "can't initialise CCI (%ld)\n",
- PTR_ERR(sensor->regmap));
+ dev_err(&client->dev, "can't initialise CCI (%pe)\n",
+ sensor->regmap);
return PTR_ERR(sensor->regmap);
}
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index a90a9e5705a0..a86306304330 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -9,10 +9,10 @@
* Changes by Tyler Trafford <tatrafford@comcast.net>
* - cleanup/rewrite for V4L2 API (2005)
*
- * VBI support by Hans Verkuil <hverkuil@xs4all.nl>.
+ * VBI support by Hans Verkuil <hverkuil@kernel.org>.
*
* NTSC sliced VBI support by Christopher Neufeld <television@cneufeld.ca>
- * with additional fixes by Hans Verkuil <hverkuil@xs4all.nl>.
+ * with additional fixes by Hans Verkuil <hverkuil@kernel.org>.
*
* CX23885 support by Steven Toth <stoth@linuxtv.org>.
*
diff --git a/drivers/media/i2c/ds90ub913.c b/drivers/media/i2c/ds90ub913.c
index a80da2b4a8fa..e97e499b04e6 100644
--- a/drivers/media/i2c/ds90ub913.c
+++ b/drivers/media/i2c/ds90ub913.c
@@ -333,8 +333,7 @@ static int _ub913_set_routing(struct v4l2_subdev *sd,
.quantization = V4L2_QUANTIZATION_LIM_RANGE,
.xfer_func = V4L2_XFER_FUNC_SRGB,
};
- struct v4l2_subdev_stream_configs *stream_configs;
- unsigned int i;
+ struct v4l2_subdev_route *route;
int ret;
ret = v4l2_subdev_routing_validate(sd, routing,
@@ -346,13 +345,15 @@ static int _ub913_set_routing(struct v4l2_subdev *sd,
if (ret)
return ret;
- stream_configs = &state->stream_configs;
+ for_each_active_route(&state->routing, route) {
+ struct v4l2_mbus_framefmt *fmt;
- for (i = 0; i < stream_configs->num_configs; i++) {
- if (stream_configs->configs[i].pad == UB913_PAD_SINK)
- stream_configs->configs[i].fmt = in_format;
- else
- stream_configs->configs[i].fmt = out_format;
+ fmt = v4l2_subdev_state_get_format(state, route->sink_pad,
+ route->sink_stream);
+ *fmt = in_format;
+ fmt = v4l2_subdev_state_get_format(state, route->source_pad,
+ route->source_stream);
+ *fmt = out_format;
}
return 0;
@@ -621,7 +622,7 @@ static int ub913_v4l2_notifier_register(struct ub913_data *priv)
fwnode_handle_put(ep_fwnode);
if (IS_ERR(asd)) {
- dev_err(dev, "Failed to add subdev: %ld", PTR_ERR(asd));
+ dev_err(dev, "Failed to add subdev: %pe", asd);
v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(asd);
}
diff --git a/drivers/media/i2c/ds90ub953.c b/drivers/media/i2c/ds90ub953.c
index e3fc9d66970a..daefdb108fbf 100644
--- a/drivers/media/i2c/ds90ub953.c
+++ b/drivers/media/i2c/ds90ub953.c
@@ -776,7 +776,7 @@ static int ub953_v4l2_notifier_register(struct ub953_data *priv)
fwnode_handle_put(ep_fwnode);
if (IS_ERR(asd)) {
- dev_err(dev, "Failed to add subdev: %ld", PTR_ERR(asd));
+ dev_err(dev, "Failed to add subdev: %pe", asd);
v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(asd);
}
@@ -1023,15 +1023,17 @@ static unsigned long ub953_clkout_recalc_rate(struct clk_hw *hw,
return rate;
}
-static long ub953_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int ub953_clkout_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct ub953_data *priv = container_of(hw, struct ub953_data, clkout_clk_hw);
struct ub953_clkout_data clkout_data;
- ub953_calc_clkout_params(priv, rate, &clkout_data);
+ ub953_calc_clkout_params(priv, req->rate, &clkout_data);
+
+ req->rate = clkout_data.rate;
- return clkout_data.rate;
+ return 0;
}
static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1050,7 +1052,7 @@ static int ub953_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
static const struct clk_ops ub953_clkout_ops = {
.recalc_rate = ub953_clkout_recalc_rate,
- .round_rate = ub953_clkout_round_rate,
+ .determine_rate = ub953_clkout_determine_rate,
.set_rate = ub953_clkout_set_rate,
};
diff --git a/drivers/media/i2c/dw9719.c b/drivers/media/i2c/dw9719.c
index 032fbcb981f2..59558335989e 100644
--- a/drivers/media/i2c/dw9719.c
+++ b/drivers/media/i2c/dw9719.c
@@ -23,6 +23,25 @@
#define DW9719_CTRL_STEPS 16
#define DW9719_CTRL_DELAY_US 1000
+#define DW9718S_PD CCI_REG8(0)
+
+#define DW9718S_CONTROL CCI_REG8(1)
+#define DW9718S_CONTROL_SW_LINEAR BIT(0)
+#define DW9718S_CONTROL_SAC_SHIFT 1
+#define DW9718S_CONTROL_SAC_MASK 0x7
+#define DW9718S_CONTROL_OCP_DISABLE BIT(4)
+#define DW9718S_CONTROL_UVLO_DISABLE BIT(5)
+#define DW9718S_DEFAULT_SAC 4
+
+#define DW9718S_VCM_CURRENT CCI_REG16(2)
+
+#define DW9718S_SW CCI_REG8(4)
+#define DW9718S_SW_VCM_FREQ_MASK 0xF
+#define DW9718S_DEFAULT_VCM_FREQ 0
+
+#define DW9718S_SACT CCI_REG8(5)
+#define DW9718S_SACT_PERIOD_8_8MS 0x19
+
#define DW9719_INFO CCI_REG8(0)
#define DW9719_ID 0xF1
#define DW9761_ID 0xF4
@@ -49,12 +68,17 @@
#define DW9761_VCM_PRELOAD CCI_REG8(8)
#define DW9761_DEFAULT_VCM_PRELOAD 0x73
+#define DW9800K_DEFAULT_SAC 1
+#define DW9800K_MODE_SAC_SHIFT 6
+#define DW9800K_DEFAULT_VCM_FREQ 0x10
#define to_dw9719_device(x) container_of(x, struct dw9719_device, sd)
enum dw9719_model {
+ DW9718S,
DW9719,
DW9761,
+ DW9800K,
};
struct dw9719_device {
@@ -75,26 +99,55 @@ struct dw9719_device {
static int dw9719_power_down(struct dw9719_device *dw9719)
{
+ u32 reg_pwr = dw9719->model == DW9718S ? DW9718S_PD : DW9719_CONTROL;
+
+ /*
+ * Worth engaging the internal SHUTDOWN mode especially due to the
+ * regulator being potentially shared with other devices.
+ */
+ if (cci_write(dw9719->regmap, reg_pwr, DW9719_SHUTDOWN, NULL))
+ dev_err(dw9719->dev, "Error writing to power register\n");
return regulator_disable(dw9719->regulator);
}
static int dw9719_power_up(struct dw9719_device *dw9719, bool detect)
{
+ u32 reg_pwr = dw9719->model == DW9718S ? DW9718S_PD : DW9719_CONTROL;
u64 val;
int ret;
+ int err;
ret = regulator_enable(dw9719->regulator);
if (ret)
return ret;
- /* Jiggle SCL pin to wake up device */
- cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_SHUTDOWN, &ret);
- fsleep(100);
- cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_STANDBY, &ret);
- /* Need 100us to transit from SHUTDOWN to STANDBY */
- fsleep(100);
+ /*
+ * Need 100us to transition from SHUTDOWN to STANDBY.
+ * Jiggle the SCL pin to wake up the device (even when the regulator is
+ * shared) and wait double the time to be sure, as 100us is not enough
+ * at least on the DW9718S as found on the motorola-nora smartphone,
+ * then retry the write.
+ */
+ cci_write(dw9719->regmap, reg_pwr, DW9719_STANDBY, NULL);
+ /* the jiggle is expected to fail, don't even log that as error */
+ fsleep(200);
+ cci_write(dw9719->regmap, reg_pwr, DW9719_STANDBY, &ret);
if (detect) {
+ /* These models do not have an INFO register */
+ switch (dw9719->model) {
+ case DW9718S:
+ dw9719->sac_mode = DW9718S_DEFAULT_SAC;
+ dw9719->vcm_freq = DW9718S_DEFAULT_VCM_FREQ;
+ goto props;
+ case DW9800K:
+ dw9719->sac_mode = DW9800K_DEFAULT_SAC;
+ dw9719->vcm_freq = DW9800K_DEFAULT_VCM_FREQ;
+ goto props;
+ default:
+ break;
+ }
+
ret = cci_read(dw9719->regmap, DW9719_INFO, &val, NULL);
if (ret < 0)
return ret;
@@ -118,23 +171,52 @@ static int dw9719_power_up(struct dw9719_device *dw9719, bool detect)
return -ENXIO;
}
+props:
/* Optional indication of SAC mode select */
device_property_read_u32(dw9719->dev, "dongwoon,sac-mode",
&dw9719->sac_mode);
/* Optional indication of VCM frequency */
- device_property_read_u32(dw9719->dev, "dongwoon,vcm-freq",
+ err = device_property_read_u32(dw9719->dev, "dongwoon,vcm-freq",
+ &dw9719->vcm_freq);
+ if (err == 0)
+ dev_warn(dw9719->dev, "dongwoon,vcm-freq property is deprecated, please use dongwoon,vcm-prescale\n");
+
+ /* Optional indication of VCM prescale */
+ device_property_read_u32(dw9719->dev, "dongwoon,vcm-prescale",
&dw9719->vcm_freq);
}
- cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_ENABLE_RINGING, &ret);
- cci_write(dw9719->regmap, DW9719_MODE, dw9719->mode_low_bits |
- (dw9719->sac_mode << DW9719_MODE_SAC_SHIFT), &ret);
- cci_write(dw9719->regmap, DW9719_VCM_FREQ, dw9719->vcm_freq, &ret);
-
- if (dw9719->model == DW9761)
+ switch (dw9719->model) {
+ case DW9800K:
+ cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_ENABLE_RINGING, &ret);
+ cci_write(dw9719->regmap, DW9719_MODE,
+ dw9719->sac_mode << DW9800K_MODE_SAC_SHIFT, &ret);
+ cci_write(dw9719->regmap, DW9719_VCM_FREQ, dw9719->vcm_freq, &ret);
+ break;
+ case DW9718S:
+ /* Datasheet says [OCP/UVLO] should be disabled below 2.5V */
+ dw9719->sac_mode &= DW9718S_CONTROL_SAC_MASK;
+ cci_write(dw9719->regmap, DW9718S_CONTROL,
+ DW9718S_CONTROL_SW_LINEAR |
+ (dw9719->sac_mode << DW9718S_CONTROL_SAC_SHIFT) |
+ DW9718S_CONTROL_OCP_DISABLE |
+ DW9718S_CONTROL_UVLO_DISABLE, &ret);
+ cci_write(dw9719->regmap, DW9718S_SACT,
+ DW9718S_SACT_PERIOD_8_8MS, &ret);
+ cci_write(dw9719->regmap, DW9718S_SW,
+ dw9719->vcm_freq & DW9718S_SW_VCM_FREQ_MASK, &ret);
+ break;
+ case DW9761:
cci_write(dw9719->regmap, DW9761_VCM_PRELOAD,
DW9761_DEFAULT_VCM_PRELOAD, &ret);
+ fallthrough;
+ case DW9719:
+ cci_write(dw9719->regmap, DW9719_CONTROL, DW9719_ENABLE_RINGING, &ret);
+ cci_write(dw9719->regmap, DW9719_MODE, dw9719->mode_low_bits |
+ (dw9719->sac_mode << DW9719_MODE_SAC_SHIFT), &ret);
+ cci_write(dw9719->regmap, DW9719_VCM_FREQ, dw9719->vcm_freq, &ret);
+ }
if (ret)
dw9719_power_down(dw9719);
@@ -144,7 +226,9 @@ static int dw9719_power_up(struct dw9719_device *dw9719, bool detect)
static int dw9719_t_focus_abs(struct dw9719_device *dw9719, s32 value)
{
- return cci_write(dw9719->regmap, DW9719_VCM_CURRENT, value, NULL);
+ u32 reg = dw9719->model == DW9718S ? DW9718S_VCM_CURRENT
+ : DW9719_VCM_CURRENT;
+ return cci_write(dw9719->regmap, reg, value, NULL);
}
static int dw9719_set_ctrl(struct v4l2_ctrl *ctrl)
@@ -229,7 +313,7 @@ static int dw9719_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static int dw9719_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- pm_runtime_put(sd->dev);
+ pm_runtime_put_autosuspend(sd->dev);
return 0;
}
@@ -275,6 +359,8 @@ static int dw9719_probe(struct i2c_client *client)
if (!dw9719)
return -ENOMEM;
+ dw9719->model = (enum dw9719_model)(uintptr_t)i2c_get_match_data(client);
+
dw9719->regmap = devm_cci_regmap_init_i2c(client, 8);
if (IS_ERR(dw9719->regmap))
return PTR_ERR(dw9719->regmap);
@@ -353,12 +439,14 @@ static void dw9719_remove(struct i2c_client *client)
pm_runtime_set_suspended(&client->dev);
}
-static const struct i2c_device_id dw9719_id_table[] = {
- { "dw9719" },
- { "dw9761" },
+static const struct of_device_id dw9719_of_table[] = {
+ { .compatible = "dongwoon,dw9718s", .data = (const void *)DW9718S },
+ { .compatible = "dongwoon,dw9719", .data = (const void *)DW9719 },
+ { .compatible = "dongwoon,dw9761", .data = (const void *)DW9761 },
+ { .compatible = "dongwoon,dw9800k", .data = (const void *)DW9800K },
{ }
};
-MODULE_DEVICE_TABLE(i2c, dw9719_id_table);
+MODULE_DEVICE_TABLE(of, dw9719_of_table);
static DEFINE_RUNTIME_DEV_PM_OPS(dw9719_pm_ops, dw9719_suspend, dw9719_resume,
NULL);
@@ -367,10 +455,10 @@ static struct i2c_driver dw9719_i2c_driver = {
.driver = {
.name = "dw9719",
.pm = pm_sleep_ptr(&dw9719_pm_ops),
+ .of_match_table = dw9719_of_table,
},
.probe = dw9719_probe,
.remove = dw9719_remove,
- .id_table = dw9719_id_table,
};
module_i2c_driver(dw9719_i2c_driver);
diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c
index 7519863d77b1..2cb7b718782b 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_driver.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c
@@ -816,7 +816,6 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
{
struct v4l2_subdev *subdev = &sensor->subdev;
struct i2c_client *client = v4l2_get_subdevdata(subdev);
- unsigned int xclk_freq;
int val, rval;
rval = regulator_enable(sensor->vana);
@@ -825,17 +824,6 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
return rval;
}
- if (sensor->current_reglist)
- xclk_freq = sensor->current_reglist->mode.ext_clock;
- else
- xclk_freq = sensor->xclk_freq;
-
- rval = clk_set_rate(sensor->ext_clk, xclk_freq);
- if (rval < 0) {
- dev_err(&client->dev, "unable to set extclk clock freq to %u\n",
- xclk_freq);
- goto out;
- }
rval = clk_prepare_enable(sensor->ext_clk);
if (rval < 0) {
dev_err(&client->dev, "failed to enable extclk\n");
@@ -849,7 +837,7 @@ static int et8ek8_power_on(struct et8ek8_sensor *sensor)
gpiod_set_value(sensor->reset, 1);
- msleep(5000 * 1000 / xclk_freq + 1); /* Wait 5000 cycles */
+ msleep(5000 * 1000 / sensor->xclk_freq + 1); /* Wait 5000 cycles */
rval = et8ek8_i2c_reglist_find_write(client, &meta_reglist,
ET8EK8_REGLIST_POWERON);
@@ -1085,9 +1073,6 @@ static int et8ek8_set_frame_interval(struct v4l2_subdev *subdev,
if (!reglist)
return -EINVAL;
- if (sensor->current_reglist->mode.ext_clock != reglist->mode.ext_clock)
- return -EINVAL;
-
sensor->current_reglist = reglist;
et8ek8_update_controls(sensor);
@@ -1433,18 +1418,13 @@ static int et8ek8_probe(struct i2c_client *client)
return PTR_ERR(sensor->vana);
}
- sensor->ext_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->ext_clk)) {
- dev_err(&client->dev, "could not get clock\n");
- return PTR_ERR(sensor->ext_clk);
- }
+ sensor->ext_clk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ 9600000);
+ if (IS_ERR(sensor->ext_clk))
+ return dev_err_probe(&client->dev, PTR_ERR(sensor->ext_clk),
+ "could not get clock\n");
- ret = of_property_read_u32(dev->of_node, "clock-frequency",
- &sensor->xclk_freq);
- if (ret) {
- dev_warn(dev, "can't get clock-frequency\n");
- return ret;
- }
+ sensor->xclk_freq = clk_get_rate(sensor->ext_clk);
mutex_init(&sensor->power_lock);
diff --git a/drivers/media/i2c/et8ek8/et8ek8_mode.c b/drivers/media/i2c/et8ek8/et8ek8_mode.c
index c9088eb0a812..914be1007099 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_mode.c
+++ b/drivers/media/i2c/et8ek8/et8ek8_mode.c
@@ -44,7 +44,6 @@ static struct et8ek8_reglist mode1_poweron_mode2_16vga_2592x1968_12_07fps = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 1207
@@ -145,7 +144,6 @@ static struct et8ek8_reglist mode1_16vga_2592x1968_13_12fps_dpcm10_8 = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 1292
@@ -201,7 +199,6 @@ static struct et8ek8_reglist mode3_4vga_1296x984_29_99fps_dpcm10_8 = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 96533333,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 3000
@@ -257,7 +254,6 @@ static struct et8ek8_reglist mode4_svga_864x656_29_88fps = {
.window_width = 864,
.window_height = 656,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2988
@@ -313,7 +309,6 @@ static struct et8ek8_reglist mode5_vga_648x492_29_93fps = {
.window_width = 648,
.window_height = 492,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2993
@@ -369,7 +364,6 @@ static struct et8ek8_reglist mode2_16vga_2592x1968_3_99fps = {
.window_width = 2592,
.window_height = 1968,
.pixel_clock = 80000000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 399
@@ -424,7 +418,6 @@ static struct et8ek8_reglist mode_648x492_5fps = {
.window_width = 648,
.window_height = 492,
.pixel_clock = 13333333,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 499
@@ -480,7 +473,6 @@ static struct et8ek8_reglist mode3_4vga_1296x984_5fps = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 49400000,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 501
@@ -536,7 +528,6 @@ static struct et8ek8_reglist mode_4vga_1296x984_25fps_dpcm10_8 = {
.window_width = 1296,
.window_height = 984,
.pixel_clock = 84266667,
- .ext_clock = 9600000,
.timeperframe = {
.numerator = 100,
.denominator = 2500
diff --git a/drivers/media/i2c/et8ek8/et8ek8_reg.h b/drivers/media/i2c/et8ek8/et8ek8_reg.h
index c90e74935f12..3305986c7c9c 100644
--- a/drivers/media/i2c/et8ek8/et8ek8_reg.h
+++ b/drivers/media/i2c/et8ek8/et8ek8_reg.h
@@ -37,7 +37,6 @@ struct et8ek8_mode {
u16 window_height;
u32 pixel_clock; /* in Hz */
- u32 ext_clock; /* in Hz */
struct v4l2_fract timeperframe;
u32 max_exp; /* Maximum exposure value */
u32 bus_format; /* MEDIA_BUS_FMT_ */
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c b/drivers/media/i2c/gc0310.c
index 7af4d66f42a0..7af4d66f42a0 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
+++ b/drivers/media/i2c/gc0310.c
diff --git a/drivers/media/i2c/gc05a2.c b/drivers/media/i2c/gc05a2.c
index 3f7f3d5abeeb..8ba17f80fffe 100644
--- a/drivers/media/i2c/gc05a2.c
+++ b/drivers/media/i2c/gc05a2.c
@@ -1235,16 +1235,12 @@ static int gc05a2_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(gc05a2->regmap),
"failed to init CCI\n");
- gc05a2->xclk = devm_clk_get(dev, NULL);
+ gc05a2->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ GC05A2_DEFAULT_CLK_FREQ);
if (IS_ERR(gc05a2->xclk))
return dev_err_probe(dev, PTR_ERR(gc05a2->xclk),
"failed to get xclk\n");
- ret = clk_set_rate(gc05a2->xclk, GC05A2_DEFAULT_CLK_FREQ);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set xclk frequency\n");
-
ret = gc05a2_get_regulators(dev, gc05a2);
if (ret < 0)
return dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/gc08a3.c b/drivers/media/i2c/gc08a3.c
index 938709a677b6..11fd936db9c3 100644
--- a/drivers/media/i2c/gc08a3.c
+++ b/drivers/media/i2c/gc08a3.c
@@ -1199,16 +1199,12 @@ static int gc08a3_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(gc08a3->regmap),
"failed to init CCI\n");
- gc08a3->xclk = devm_clk_get(dev, NULL);
+ gc08a3->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, true,
+ GC08A3_DEFAULT_CLK_FREQ);
if (IS_ERR(gc08a3->xclk))
return dev_err_probe(dev, PTR_ERR(gc08a3->xclk),
"failed to get xclk\n");
- ret = clk_set_rate(gc08a3->xclk, GC08A3_DEFAULT_CLK_FREQ);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set xclk frequency\n");
-
ret = gc08a3_get_regulators(dev, gc08a3);
if (ret < 0)
return dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/gc2145.c b/drivers/media/i2c/gc2145.c
index 559a851669aa..b215963a2648 100644
--- a/drivers/media/i2c/gc2145.c
+++ b/drivers/media/i2c/gc2145.c
@@ -1331,7 +1331,7 @@ static int gc2145_probe(struct i2c_client *client)
return -EINVAL;
/* Get system clock (xclk) */
- gc2145->xclk = devm_clk_get(dev, NULL);
+ gc2145->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(gc2145->xclk))
return dev_err_probe(dev, PTR_ERR(gc2145->xclk),
"failed to get xclk\n");
diff --git a/drivers/media/i2c/hi556.c b/drivers/media/i2c/hi556.c
index 076c19fcf99c..de573cee4451 100644
--- a/drivers/media/i2c/hi556.c
+++ b/drivers/media/i2c/hi556.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -631,6 +632,8 @@ static const char * const hi556_supply_names[] = {
};
struct hi556 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -715,7 +718,6 @@ static int hi556_write_reg(struct hi556 *hi556, u16 reg, u16 len, u32 val)
static int hi556_write_reg_list(struct hi556 *hi556,
const struct hi556_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
unsigned int i;
int ret;
@@ -724,7 +726,7 @@ static int hi556_write_reg_list(struct hi556 *hi556,
HI556_REG_VALUE_16BIT,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(hi556->dev,
"failed to write reg 0x%4.4x. error = %d\n",
r_list->regs[i].address, ret);
return ret;
@@ -785,7 +787,6 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct hi556 *hi556 = container_of(ctrl->handler,
struct hi556, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
s64 exposure_max;
int ret = 0;
@@ -801,7 +802,7 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(hi556->dev))
return 0;
switch (ctrl->id) {
@@ -835,7 +836,7 @@ static int hi556_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
return ret;
}
@@ -921,7 +922,6 @@ static void hi556_assign_pad_format(const struct hi556_mode *mode,
static int hi556_identify_module(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
int ret;
u32 val;
@@ -934,7 +934,7 @@ static int hi556_identify_module(struct hi556 *hi556)
return ret;
if (val != HI556_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(hi556->dev, "chip id mismatch: %x!=%x\n",
HI556_CHIP_ID, val);
return -ENXIO;
}
@@ -998,7 +998,6 @@ static int hi556_get_selection(struct v4l2_subdev *sd,
static int hi556_start_streaming(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
const struct hi556_reg_list *reg_list;
int link_freq_index, ret;
@@ -1010,14 +1009,14 @@ static int hi556_start_streaming(struct hi556 *hi556)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls\n");
+ dev_err(hi556->dev, "failed to set plls\n");
return ret;
}
reg_list = &hi556->cur_mode->reg_list;
ret = hi556_write_reg_list(hi556, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(hi556->dev, "failed to set mode\n");
return ret;
}
@@ -1029,7 +1028,7 @@ static int hi556_start_streaming(struct hi556 *hi556)
HI556_REG_VALUE_16BIT, HI556_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream\n");
+ dev_err(hi556->dev, "failed to set stream\n");
return ret;
}
@@ -1038,22 +1037,19 @@ static int hi556_start_streaming(struct hi556 *hi556)
static void hi556_stop_streaming(struct hi556 *hi556)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi556->sd);
-
if (hi556_write_reg(hi556, HI556_REG_MODE_SELECT,
HI556_REG_VALUE_16BIT, HI556_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream\n");
+ dev_err(hi556->dev, "failed to set stream\n");
}
static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
{
struct hi556 *hi556 = to_hi556(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&hi556->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(hi556->dev);
if (ret < 0) {
mutex_unlock(&hi556->mutex);
return ret;
@@ -1062,11 +1058,11 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
ret = hi556_start_streaming(hi556);
if (ret) {
hi556_stop_streaming(hi556);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
}
} else {
hi556_stop_streaming(hi556);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi556->dev);
}
mutex_unlock(&hi556->mutex);
@@ -1217,7 +1213,6 @@ static int hi556_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret = 0;
unsigned int i, j;
@@ -1235,18 +1230,6 @@ static int hi556_check_hwcfg(struct device *dev)
if (ret)
return dev_err_probe(dev, ret, "parsing endpoint failed\n");
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto check_hwcfg_error;
- }
-
- if (mclk != HI556_MCLK) {
- dev_err(dev, "external clock %d is not supported\n", mclk);
- ret = -EINVAL;
- goto check_hwcfg_error;
- }
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
@@ -1289,7 +1272,7 @@ static void hi556_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(hi556->dev);
mutex_destroy(&hi556->mutex);
}
@@ -1336,6 +1319,7 @@ static int hi556_resume(struct device *dev)
static int hi556_probe(struct i2c_client *client)
{
struct hi556 *hi556;
+ unsigned long freq;
bool full_power;
int i, ret;
@@ -1347,40 +1331,48 @@ static int hi556_probe(struct i2c_client *client)
if (!hi556)
return -ENOMEM;
+ hi556->dev = &client->dev;
+
v4l2_i2c_subdev_init(&hi556->sd, client, &hi556_subdev_ops);
- hi556->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ hi556->reset_gpio = devm_gpiod_get_optional(hi556->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(hi556->reset_gpio))
- return dev_err_probe(&client->dev, PTR_ERR(hi556->reset_gpio),
+ return dev_err_probe(hi556->dev, PTR_ERR(hi556->reset_gpio),
"failed to get reset GPIO\n");
- hi556->clk = devm_clk_get_optional(&client->dev, "clk");
+ hi556->clk = devm_v4l2_sensor_clk_get(hi556->dev, "clk");
if (IS_ERR(hi556->clk))
- return dev_err_probe(&client->dev, PTR_ERR(hi556->clk),
+ return dev_err_probe(hi556->dev, PTR_ERR(hi556->clk),
"failed to get clock\n");
+ freq = clk_get_rate(hi556->clk);
+ if (freq != HI556_MCLK)
+ return dev_err_probe(hi556->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
for (i = 0; i < ARRAY_SIZE(hi556_supply_names); i++)
hi556->supplies[i].supply = hi556_supply_names[i];
- ret = devm_regulator_bulk_get(&client->dev,
+ ret = devm_regulator_bulk_get(hi556->dev,
ARRAY_SIZE(hi556_supply_names),
hi556->supplies);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(hi556->dev, ret,
"failed to get regulators\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(hi556->dev);
if (full_power) {
/* Ensure non ACPI managed resources are enabled */
- ret = hi556_resume(&client->dev);
+ ret = hi556_resume(hi556->dev);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(hi556->dev, ret,
"failed to power on sensor\n");
ret = hi556_identify_module(hi556);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(hi556->dev, "failed to find sensor: %d\n", ret);
goto probe_error_power_off;
}
}
@@ -1389,7 +1381,7 @@ static int hi556_probe(struct i2c_client *client)
hi556->cur_mode = &supported_modes[0];
ret = hi556_init_controls(hi556);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d\n", ret);
+ dev_err(hi556->dev, "failed to init controls: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1400,22 +1392,22 @@ static int hi556_probe(struct i2c_client *client)
hi556->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&hi556->sd.entity, 1, &hi556->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d\n", ret);
+ dev_err(hi556->dev, "failed to init entity pads: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&hi556->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d\n",
+ dev_err(hi556->dev, "failed to register V4L2 subdev: %d\n",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(hi556->dev);
+ pm_runtime_enable(hi556->dev);
+ pm_runtime_idle(hi556->dev);
return 0;
@@ -1428,7 +1420,7 @@ probe_error_v4l2_ctrl_handler_free:
probe_error_power_off:
if (full_power)
- hi556_suspend(&client->dev);
+ hi556_suspend(hi556->dev);
return ret;
}
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
index 172772decd3d..a3f77b8434ca 100644
--- a/drivers/media/i2c/hi846.c
+++ b/drivers/media/i2c/hi846.c
@@ -2052,12 +2052,11 @@ static int hi846_probe(struct i2c_client *client)
return ret;
}
- hi846->clock = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(hi846->clock)) {
- dev_err(&client->dev, "failed to get clock: %pe\n",
- hi846->clock);
- return PTR_ERR(hi846->clock);
- }
+ hi846->clock = devm_v4l2_sensor_clk_get(&client->dev, NULL);
+ if (IS_ERR(hi846->clock))
+ return dev_err_probe(&client->dev, PTR_ERR(hi846->clock),
+ "failed to get clock: %pe\n",
+ hi846->clock);
mclk_freq = clk_get_rate(hi846->clock);
if (mclk_freq != 25000000)
diff --git a/drivers/media/i2c/hi847.c b/drivers/media/i2c/hi847.c
index 546833f5b5f5..def01aa07b2f 100644
--- a/drivers/media/i2c/hi847.c
+++ b/drivers/media/i2c/hi847.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -2166,6 +2168,9 @@ static const struct hi847_mode supported_modes[] = {
};
struct hi847 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -2244,7 +2249,6 @@ static int hi847_write_reg(struct hi847 *hi847, u16 reg, u16 len, u32 val)
static int hi847_write_reg_list(struct hi847 *hi847,
const struct hi847_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
unsigned int i;
int ret;
@@ -2253,7 +2257,7 @@ static int hi847_write_reg_list(struct hi847 *hi847,
HI847_REG_VALUE_16BIT,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(hi847->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -2408,7 +2412,6 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct hi847 *hi847 = container_of(ctrl->handler,
struct hi847, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
s64 exposure_max;
int ret = 0;
@@ -2424,7 +2427,7 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(hi847->dev))
return 0;
switch (ctrl->id) {
@@ -2466,7 +2469,7 @@ static int hi847_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
return ret;
}
@@ -2557,7 +2560,6 @@ static void hi847_assign_pad_format(const struct hi847_mode *mode,
static int hi847_start_streaming(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
const struct hi847_reg_list *reg_list;
int link_freq_index, ret;
@@ -2565,14 +2567,14 @@ static int hi847_start_streaming(struct hi847 *hi847)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = hi847_write_reg_list(hi847, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(hi847->dev, "failed to set plls");
return ret;
}
reg_list = &hi847->cur_mode->reg_list;
ret = hi847_write_reg_list(hi847, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(hi847->dev, "failed to set mode");
return ret;
}
@@ -2587,7 +2589,7 @@ static int hi847_start_streaming(struct hi847 *hi847)
HI847_REG_VALUE_16BIT, HI847_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(hi847->dev, "failed to set stream");
return ret;
}
@@ -2596,28 +2598,25 @@ static int hi847_start_streaming(struct hi847 *hi847)
static void hi847_stop_streaming(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
-
if (hi847_write_reg(hi847, HI847_REG_MODE_TG,
HI847_REG_VALUE_16BIT, HI847_REG_MODE_TG_DISABLE))
- dev_err(&client->dev, "failed to set stream 0x%x",
+ dev_err(hi847->dev, "failed to set stream 0x%x",
HI847_REG_MODE_TG);
if (hi847_write_reg(hi847, HI847_REG_MODE_SELECT,
HI847_REG_VALUE_16BIT, HI847_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream 0x%x",
+ dev_err(hi847->dev, "failed to set stream 0x%x",
HI847_REG_MODE_SELECT);
}
static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
{
struct hi847 *hi847 = to_hi847(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&hi847->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(hi847->dev);
if (ret) {
mutex_unlock(&hi847->mutex);
return ret;
@@ -2627,11 +2626,11 @@ static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
hi847_stop_streaming(hi847);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
}
} else {
hi847_stop_streaming(hi847);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(hi847->dev);
}
mutex_unlock(&hi847->mutex);
@@ -2768,7 +2767,6 @@ static const struct v4l2_subdev_internal_ops hi847_internal_ops = {
static int hi847_identify_module(struct hi847 *hi847)
{
- struct i2c_client *client = v4l2_get_subdevdata(&hi847->sd);
int ret;
u32 val;
@@ -2778,7 +2776,7 @@ static int hi847_identify_module(struct hi847 *hi847)
return ret;
if (val != HI847_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(hi847->dev, "chip id mismatch: %x!=%x",
HI847_CHIP_ID, val);
return -ENXIO;
}
@@ -2793,24 +2791,12 @@ static int hi847_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- if (mclk != HI847_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -2862,22 +2848,36 @@ static void hi847_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(hi847->dev);
mutex_destroy(&hi847->mutex);
}
static int hi847_probe(struct i2c_client *client)
{
struct hi847 *hi847;
+ unsigned long freq;
int ret;
hi847 = devm_kzalloc(&client->dev, sizeof(*hi847), GFP_KERNEL);
if (!hi847)
return -ENOMEM;
- ret = hi847_check_hwcfg(&client->dev);
+ hi847->dev = &client->dev;
+
+ hi847->clk = devm_v4l2_sensor_clk_get(hi847->dev, NULL);
+ if (IS_ERR(hi847->clk))
+ return dev_err_probe(hi847->dev, PTR_ERR(hi847->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(hi847->clk);
+ if (freq != HI847_MCLK)
+ return dev_err_probe(hi847->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
+ ret = hi847_check_hwcfg(hi847->dev);
if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
+ dev_err(hi847->dev, "failed to get HW configuration: %d",
ret);
return ret;
}
@@ -2885,7 +2885,7 @@ static int hi847_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&hi847->sd, client, &hi847_subdev_ops);
ret = hi847_identify_module(hi847);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(hi847->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -2893,7 +2893,7 @@ static int hi847_probe(struct i2c_client *client)
hi847->cur_mode = &supported_modes[0];
ret = hi847_init_controls(hi847);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(hi847->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -2904,20 +2904,20 @@ static int hi847_probe(struct i2c_client *client)
hi847->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&hi847->sd.entity, 1, &hi847->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(hi847->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&hi847->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(hi847->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(hi847->dev);
+ pm_runtime_enable(hi847->dev);
+ pm_runtime_idle(hi847->dev);
return 0;
diff --git a/drivers/media/i2c/imx111.c b/drivers/media/i2c/imx111.c
new file mode 100644
index 000000000000..8eb919788ef7
--- /dev/null
+++ b/drivers/media/i2c/imx111.c
@@ -0,0 +1,1610 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/media.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+#include <linux/units.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-mediabus.h>
+
+/* product information registers */
+#define IMX111_PRODUCT_ID CCI_REG16(0x0000)
+#define IMX111_CHIP_ID 0x111
+#define IMX111_REVISION CCI_REG8(0x0002)
+#define IMX111_MANUFACTURER_ID CCI_REG8(0x0003)
+#define IMX111_FRAME_COUNTER CCI_REG8(0x0005)
+#define IMX111_PIXEL_ORDER CCI_REG8(0x0006)
+
+/* general configuration registers */
+#define IMX111_STREAMING_MODE CCI_REG8(0x0100)
+#define IMX111_MODE_STANDBY 0
+#define IMX111_MODE_STREAMING 1
+#define IMX111_IMAGE_ORIENTATION CCI_REG8(0x0101)
+#define IMX111_IMAGE_HFLIP BIT(0)
+#define IMX111_IMAGE_VFLIP BIT(1)
+#define IMX111_SOFTWARE_RESET CCI_REG8(0x0103)
+#define IMX111_RESET_ON 1
+#define IMX111_GROUP_WRITE CCI_REG8(0x0104)
+#define IMX111_GROUP_WRITE_ON 1
+#define IMX111_FRAME_DROP CCI_REG8(0x0105)
+#define IMX111_FRAME_DROP_ON 1
+#define IMX111_CHANNEL_ID CCI_REG8(0x0110)
+#define IMX111_SIGNALLING_MODE CCI_REG8(0x0111)
+#define IMX111_DATA_DEPTH CCI_REG16(0x0112)
+#define IMX111_DATA_DEPTH_RAW8 0x08
+#define IMX111_DATA_DEPTH_RAW10 0x0a
+
+/* integration time registers */
+#define IMX111_INTEGRATION_TIME CCI_REG16(0x0202)
+#define IMX111_INTEGRATION_TIME_MIN 0x1
+#define IMX111_INTEGRATION_TIME_MAX 0xffff
+#define IMX111_INTEGRATION_TIME_STEP 1
+#define IMX111_INTEGRATION_TIME_OFFSET 5
+
+/* analog gain control */
+#define IMX111_REG_ANALOG_GAIN CCI_REG8(0x0205)
+#define IMX111_ANA_GAIN_MIN 0
+#define IMX111_ANA_GAIN_MAX 240
+#define IMX111_ANA_GAIN_STEP 1
+#define IMX111_ANA_GAIN_DEFAULT 0
+
+/* digital gain control */
+#define IMX111_REG_DIG_GAIN_GREENR CCI_REG16(0x020e)
+#define IMX111_REG_DIG_GAIN_RED CCI_REG16(0x0210)
+#define IMX111_REG_DIG_GAIN_BLUE CCI_REG16(0x0212)
+#define IMX111_REG_DIG_GAIN_GREENB CCI_REG16(0x0214)
+#define IMX111_DGTL_GAIN_MIN 0x0100
+#define IMX111_DGTL_GAIN_MAX 0x0fff
+#define IMX111_DGTL_GAIN_DEFAULT 0x0100
+#define IMX111_DGTL_GAIN_STEP 1
+
+/* clock configuration registers */
+#define IMX111_PIXEL_CLK_DIVIDER_PLL1 CCI_REG8(0x0301)
+#define IMX111_SYSTEM_CLK_DIVIDER_PLL1 CCI_REG8(0x0303)
+#define IMX111_PRE_PLL_CLK_DIVIDER_PLL1 CCI_REG8(0x0305)
+#define IMX111_PLL_MULTIPLIER_PLL1 CCI_REG8(0x0307)
+#define IMX111_PLL_SETTLING_TIME CCI_REG8(0x303c)
+#define IMX111_PLL_SETTLING_TIME_DEFAULT 200
+#define IMX111_POST_DIVIDER CCI_REG8(0x30a4)
+#define IMX111_POST_DIVIDER_DIV1 2
+#define IMX111_POST_DIVIDER_DIV2 0
+#define IMX111_POST_DIVIDER_DIV4 1
+
+/* frame timing registers */
+#define IMX111_VERTICAL_TOTAL_LENGTH CCI_REG16(0x0340)
+#define IMX111_VTL_MAX 0x09d8
+#define IMX111_VBLANK_MIN 16
+#define IMX111_HORIZONTAL_TOTAL_LENGTH CCI_REG16(0x0342)
+#define IMX111_HTL_MAX 0x0dd0
+#define IMX111_HBLANK_MIN 16
+
+/* image size registers */
+#define IMX111_HORIZONTAL_START CCI_REG16(0x0344)
+#define IMX111_VERTICAL_START CCI_REG16(0x0346)
+#define IMX111_HORIZONTAL_END CCI_REG16(0x0348)
+#define IMX111_VERTICAL_END CCI_REG16(0x034a)
+#define IMX111_IMAGE_WIDTH CCI_REG16(0x034c)
+#define IMX111_IMAGE_HEIGHT CCI_REG16(0x034e)
+#define IMX111_H_EVEN_INC CCI_REG8(0x0381)
+#define IMX111_H_ODD_INC CCI_REG8(0x0383)
+#define IMX111_W_EVEN_INC CCI_REG8(0x0385)
+#define IMX111_W_ODD_INC CCI_REG8(0x0387)
+
+/* test pattern registers */
+#define IMX111_TEST_PATTERN CCI_REG8(0x0601)
+#define IMX111_TEST_PATTERN_NONE 0
+#define IMX111_TEST_PATTERN_SOLID 1
+#define IMX111_TEST_PATTERN_BARS 2
+#define IMX111_TEST_PATTERN_FADE 3
+#define IMX111_TEST_PATTERN_PN9 4
+#define IMX111_SOLID_COLOR_RED CCI_REG16(0x0602)
+#define IMX111_SOLID_COLOR_GR CCI_REG16(0x0604)
+#define IMX111_SOLID_COLOR_BLUE CCI_REG16(0x0606)
+#define IMX111_SOLID_COLOR_GB CCI_REG16(0x0608)
+#define IMX111_TESTP_COLOUR_MIN 0
+#define IMX111_TESTP_COLOUR_MAX 0x03ff
+#define IMX111_TESTP_COLOUR_STEP 1
+
+#define IMX111_FRAME_RATE_STEP 5
+
+#define IMX111_PIXEL_ARRAY_WIDTH 3280U
+#define IMX111_PIXEL_ARRAY_HEIGHT 2464U
+
+enum {
+ IMX111_MODE_3280x2464,
+ IMX111_MODE_3280x1848,
+ IMX111_MODE_3280x1098,
+ IMX111_MODE_2100x1200,
+ IMX111_MODE_1952x1098,
+ IMX111_MODE_1920x1080,
+ IMX111_MODE_1640x1232,
+ IMX111_MODE_1440x1080,
+ IMX111_MODE_1640x924,
+ IMX111_MODE_1308x736,
+ IMX111_MODE_1280x720,
+ IMX111_MODE_820x614,
+ IMX111_MODE_640x480,
+};
+
+static const struct regulator_bulk_data imx111_supplies[] = {
+ { .supply = "iovdd" },
+ { .supply = "dvdd" },
+ { .supply = "avdd" },
+};
+
+struct imx111_mode {
+ u32 width;
+ u32 height;
+
+ /* Default vertical and horizontal total length */
+ u32 vtl_def;
+ u32 htl_def;
+
+ struct {
+ const struct cci_reg_sequence *regs;
+ u32 num_of_regs;
+ } reg_list;
+};
+
+struct imx111_pll {
+ u64 extclk_rate;
+ u8 pre_div;
+ u8 mult;
+};
+
+struct imx111 {
+ struct regmap *regmap;
+
+ struct clk *extclk;
+ struct gpio_desc *reset;
+ struct regulator_bulk_data *supplies;
+
+ struct v4l2_fwnode_endpoint bus_cfg;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+
+ /* Current mode */
+ const struct imx111_mode *cur_mode;
+ const struct imx111_pll *pll;
+ u32 data_depth;
+
+ u64 pixel_clk_raw;
+ s64 default_link_freq;
+};
+
+static const struct imx111_pll imx111_pll[] = {
+ { .extclk_rate = 6000000, .pre_div = 1, .mult = 113, },
+ { .extclk_rate = 12000000, .pre_div = 2, .mult = 113, },
+ { .extclk_rate = 13500000, .pre_div = 1, .mult = 50, },
+ { .extclk_rate = 18000000, .pre_div = 2, .mult = 75, },
+ { .extclk_rate = 24000000, .pre_div = 4, .mult = 113, },
+ { .extclk_rate = 27000000, .pre_div = 2, .mult = 50, },
+ { .extclk_rate = 36000000, .pre_div = 4, .mult = 75, },
+ { .extclk_rate = 54000000, .pre_div = 4, .mult = 50, },
+};
+
+/*
+ * This table MUST contain 4 entries per format, to cover the various flip
+ * combinations in the order
+ * - no flip
+ * - h flip
+ * - v flip
+ * - h&v flips
+ */
+static const u32 imx111_mbus_formats[] = {
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+};
+
+static const struct cci_reg_sequence imx111_global_init[] = {
+ { CCI_REG8(0x3080), 0x50 },
+ { CCI_REG8(0x3087), 0x53 },
+ { CCI_REG8(0x309d), 0x94 },
+ { CCI_REG8(0x30b1), 0x03 },
+ { CCI_REG8(0x30c6), 0x00 },
+ { CCI_REG8(0x30c7), 0x00 },
+ { CCI_REG8(0x3115), 0x0b },
+ { CCI_REG8(0x3118), 0x30 },
+ { CCI_REG8(0x311d), 0x25 },
+ { CCI_REG8(0x3121), 0x0a },
+ { CCI_REG8(0x3212), 0xf2 },
+ { CCI_REG8(0x3213), 0x0f },
+ { CCI_REG8(0x3215), 0x0f },
+ { CCI_REG8(0x3217), 0x0b },
+ { CCI_REG8(0x3219), 0x0b },
+ { CCI_REG8(0x321b), 0x0d },
+ { CCI_REG8(0x321d), 0x0d },
+ { CCI_REG8(0x32aa), 0x11 },
+ { CCI_REG8(0x3032), 0x40 },
+};
+
+static const struct cci_reg_sequence mode_820x614[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0008 }, { IMX111_VERTICAL_START, 0x0034 },
+ { IMX111_HORIZONTAL_END, 0x0cd7 }, { IMX111_VERTICAL_END, 0x09cb },
+ { IMX111_IMAGE_WIDTH, 0x0334 }, { IMX111_IMAGE_HEIGHT, 0x0266 },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x05 }, { IMX111_H_ODD_INC, 0x03 },
+ { IMX111_W_EVEN_INC, 0x05 }, { IMX111_W_ODD_INC, 0x03 },
+ { CCI_REG8(0x3033), 0x00 }, { CCI_REG8(0x303d), 0x10 },
+ { CCI_REG8(0x303e), 0x40 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x97 }, { CCI_REG8(0x3048), 0x01 },
+ { CCI_REG8(0x304c), 0x6f }, { CCI_REG8(0x304d), 0x03 },
+ { CCI_REG8(0x3064), 0x12 }, { CCI_REG8(0x3073), 0x00 },
+ { CCI_REG8(0x3074), 0x11 }, { CCI_REG8(0x3075), 0x11 },
+ { CCI_REG8(0x3076), 0x11 }, { CCI_REG8(0x3077), 0x11 },
+ { CCI_REG8(0x3079), 0x00 }, { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x309b), 0x28 }, { CCI_REG8(0x309c), 0x13 },
+ { CCI_REG8(0x309e), 0x00 }, { CCI_REG8(0x30a0), 0x14 },
+ { CCI_REG8(0x30a1), 0x09 }, { CCI_REG8(0x30aa), 0x03 },
+ { CCI_REG8(0x30b2), 0x03 }, { CCI_REG8(0x30d5), 0x09 },
+ { CCI_REG8(0x30d6), 0x00 }, { CCI_REG8(0x30d7), 0x00 },
+ { CCI_REG8(0x30d8), 0x00 }, { CCI_REG8(0x30d9), 0x00 },
+ { CCI_REG8(0x30de), 0x04 }, { CCI_REG8(0x30df), 0x20 },
+ { CCI_REG8(0x3102), 0x08 }, { CCI_REG8(0x3103), 0x22 },
+ { CCI_REG8(0x3104), 0x20 }, { CCI_REG8(0x3105), 0x00 },
+ { CCI_REG8(0x3106), 0x87 }, { CCI_REG8(0x3107), 0x00 },
+ { CCI_REG8(0x3108), 0x03 }, { CCI_REG8(0x3109), 0x02 },
+ { CCI_REG8(0x310a), 0x03 }, { CCI_REG8(0x315c), 0x9c },
+ { CCI_REG8(0x315d), 0x9b }, { CCI_REG8(0x316e), 0x9d },
+ { CCI_REG8(0x316f), 0x9c }, { CCI_REG8(0x3318), 0x7a },
+ { CCI_REG8(0x3348), 0xe0 },
+};
+
+static const struct cci_reg_sequence mode_1308x736[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0154 }, { IMX111_VERTICAL_START, 0x0220 },
+ { IMX111_HORIZONTAL_END, 0x0b8b }, { IMX111_VERTICAL_END, 0x07df },
+ { IMX111_IMAGE_WIDTH, 0x051c }, { IMX111_IMAGE_HEIGHT, 0x02e0 },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x01 }, { IMX111_H_ODD_INC, 0x01 },
+ { IMX111_W_EVEN_INC, 0x01 }, { IMX111_W_ODD_INC, 0x03 },
+ { CCI_REG8(0x3033), 0x84 }, { CCI_REG8(0x303d), 0x10 },
+ { CCI_REG8(0x303e), 0x40 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x97 }, { CCI_REG8(0x3048), 0x01 },
+ { CCI_REG8(0x304c), 0xd7 }, { CCI_REG8(0x304d), 0x01 },
+ { CCI_REG8(0x3064), 0x12 }, { CCI_REG8(0x3073), 0x00 },
+ { CCI_REG8(0x3074), 0x11 }, { CCI_REG8(0x3075), 0x11 },
+ { CCI_REG8(0x3076), 0x11 }, { CCI_REG8(0x3077), 0x11 },
+ { CCI_REG8(0x3079), 0x00 }, { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x309b), 0x48 }, { CCI_REG8(0x309c), 0x12 },
+ { CCI_REG8(0x309e), 0x04 }, { CCI_REG8(0x30a0), 0x14 },
+ { CCI_REG8(0x30a1), 0x0a }, { CCI_REG8(0x30aa), 0x01 },
+ { CCI_REG8(0x30b2), 0x05 }, { CCI_REG8(0x30d5), 0x04 },
+ { CCI_REG8(0x30d6), 0x85 }, { CCI_REG8(0x30d7), 0x2a },
+ { CCI_REG8(0x30d8), 0x64 }, { CCI_REG8(0x30d9), 0x89 },
+ { CCI_REG8(0x30de), 0x00 }, { CCI_REG8(0x30df), 0x20 },
+ { CCI_REG8(0x3102), 0x08 }, { CCI_REG8(0x3103), 0x22 },
+ { CCI_REG8(0x3104), 0x20 }, { CCI_REG8(0x3105), 0x00 },
+ { CCI_REG8(0x3106), 0x87 }, { CCI_REG8(0x3107), 0x00 },
+ { CCI_REG8(0x3108), 0x03 }, { CCI_REG8(0x3109), 0x02 },
+ { CCI_REG8(0x310a), 0x03 }, { CCI_REG8(0x315c), 0x42 },
+ { CCI_REG8(0x315d), 0x41 }, { CCI_REG8(0x316e), 0x43 },
+ { CCI_REG8(0x316f), 0x42 }, { CCI_REG8(0x3318), 0x62 },
+ { CCI_REG8(0x3348), 0xe0 },
+};
+
+static const struct cci_reg_sequence mode_1640x924[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0008 }, { IMX111_VERTICAL_START, 0x0164 },
+ { IMX111_HORIZONTAL_END, 0x0cd7 }, { IMX111_VERTICAL_END, 0x089b },
+ { IMX111_IMAGE_WIDTH, 0x0668 }, { IMX111_IMAGE_HEIGHT, 0x039c },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x01 }, { IMX111_H_ODD_INC, 0x03 },
+ { IMX111_W_EVEN_INC, 0x01 }, { IMX111_W_ODD_INC, 0x03 },
+ { CCI_REG8(0x3033), 0x00 }, { CCI_REG8(0x303d), 0x10 },
+ { CCI_REG8(0x303e), 0x40 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x97 }, { CCI_REG8(0x3048), 0x01 },
+ { CCI_REG8(0x304c), 0x6f }, { CCI_REG8(0x304d), 0x03 },
+ { CCI_REG8(0x3064), 0x12 }, { CCI_REG8(0x3073), 0x00 },
+ { CCI_REG8(0x3074), 0x11 }, { CCI_REG8(0x3075), 0x11 },
+ { CCI_REG8(0x3076), 0x11 }, { CCI_REG8(0x3077), 0x11 },
+ { CCI_REG8(0x3079), 0x00 }, { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x309b), 0x28 }, { CCI_REG8(0x309c), 0x13 },
+ { CCI_REG8(0x309e), 0x00 }, { CCI_REG8(0x30a0), 0x14 },
+ { CCI_REG8(0x30a1), 0x09 }, { CCI_REG8(0x30aa), 0x03 },
+ { CCI_REG8(0x30b2), 0x05 }, { CCI_REG8(0x30d5), 0x09 },
+ { CCI_REG8(0x30d6), 0x01 }, { CCI_REG8(0x30d7), 0x01 },
+ { CCI_REG8(0x30d8), 0x64 }, { CCI_REG8(0x30d9), 0x89 },
+ { CCI_REG8(0x30de), 0x02 }, { CCI_REG8(0x30df), 0x20 },
+ { CCI_REG8(0x3102), 0x08 }, { CCI_REG8(0x3103), 0x22 },
+ { CCI_REG8(0x3104), 0x20 }, { CCI_REG8(0x3105), 0x00 },
+ { CCI_REG8(0x3106), 0x87 }, { CCI_REG8(0x3107), 0x00 },
+ { CCI_REG8(0x3108), 0x03 }, { CCI_REG8(0x3109), 0x02 },
+ { CCI_REG8(0x310a), 0x03 }, { CCI_REG8(0x315c), 0x9c },
+ { CCI_REG8(0x315d), 0x9b }, { CCI_REG8(0x316e), 0x9d },
+ { CCI_REG8(0x316f), 0x9c }, { CCI_REG8(0x3318), 0x72 },
+ { CCI_REG8(0x3348), 0xe0 },
+};
+
+static const struct cci_reg_sequence mode_1640x1232[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0008 }, { IMX111_VERTICAL_START, 0x0030 },
+ { IMX111_HORIZONTAL_END, 0x0cd7 }, { IMX111_VERTICAL_END, 0x09cf },
+ { IMX111_IMAGE_WIDTH, 0x0668 }, { IMX111_IMAGE_HEIGHT, 0x04d0 },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x01 }, { IMX111_H_ODD_INC, 0x03 },
+ { IMX111_W_EVEN_INC, 0x01 }, { IMX111_W_ODD_INC, 0x03 },
+ { CCI_REG8(0x3033), 0x00 }, { CCI_REG8(0x303d), 0x10 },
+ { CCI_REG8(0x303e), 0x40 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x97 }, { CCI_REG8(0x3048), 0x01 },
+ { CCI_REG8(0x304c), 0x6f }, { CCI_REG8(0x304d), 0x03 },
+ { CCI_REG8(0x3064), 0x12 }, { CCI_REG8(0x3073), 0x00 },
+ { CCI_REG8(0x3074), 0x11 }, { CCI_REG8(0x3075), 0x11 },
+ { CCI_REG8(0x3076), 0x11 }, { CCI_REG8(0x3077), 0x11 },
+ { CCI_REG8(0x3079), 0x00 }, { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x309b), 0x28 }, { CCI_REG8(0x309c), 0x13 },
+ { CCI_REG8(0x309e), 0x00 }, { CCI_REG8(0x30a0), 0x14 },
+ { CCI_REG8(0x30a1), 0x09 }, { CCI_REG8(0x30aa), 0x03 },
+ { CCI_REG8(0x30b2), 0x05 }, { CCI_REG8(0x30d5), 0x09 },
+ { CCI_REG8(0x30d6), 0x01 }, { CCI_REG8(0x30d7), 0x01 },
+ { CCI_REG8(0x30d8), 0x64 }, { CCI_REG8(0x30d9), 0x89 },
+ { CCI_REG8(0x30de), 0x02 }, { CCI_REG8(0x30df), 0x20 },
+ { CCI_REG8(0x3102), 0x08 }, { CCI_REG8(0x3103), 0x22 },
+ { CCI_REG8(0x3104), 0x20 }, { CCI_REG8(0x3105), 0x00 },
+ { CCI_REG8(0x3106), 0x87 }, { CCI_REG8(0x3107), 0x00 },
+ { CCI_REG8(0x3108), 0x03 }, { CCI_REG8(0x3109), 0x02 },
+ { CCI_REG8(0x310a), 0x03 }, { CCI_REG8(0x315c), 0x9c },
+ { CCI_REG8(0x315d), 0x9b }, { CCI_REG8(0x316e), 0x9d },
+ { CCI_REG8(0x316f), 0x9c }, { CCI_REG8(0x3318), 0x72 },
+ { CCI_REG8(0x3348), 0xe0 },
+};
+
+static const struct cci_reg_sequence mode_1952x1098[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0016 }, { IMX111_VERTICAL_START, 0x016e },
+ { IMX111_HORIZONTAL_END, 0x0ccb }, { IMX111_VERTICAL_END, 0x0893 },
+ { IMX111_IMAGE_WIDTH, 0x07a0 }, { IMX111_IMAGE_HEIGHT, 0x044a },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x01 }, { IMX111_H_ODD_INC, 0x01 },
+ { IMX111_W_EVEN_INC, 0x01 }, { IMX111_W_ODD_INC, 0x01 },
+ { CCI_REG8(0x3033), 0x00 }, { CCI_REG8(0x303d), 0x10 },
+ { CCI_REG8(0x303e), 0x00 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x91 }, { CCI_REG8(0x3048), 0x00 },
+ { CCI_REG8(0x304c), 0x67 }, { CCI_REG8(0x304d), 0x03 },
+ { CCI_REG8(0x3064), 0x10 }, { CCI_REG8(0x3073), 0xa0 },
+ { CCI_REG8(0x3074), 0x12 }, { CCI_REG8(0x3075), 0x12 },
+ { CCI_REG8(0x3076), 0x12 }, { CCI_REG8(0x3077), 0x11 },
+ { CCI_REG8(0x3079), 0x0a }, { CCI_REG8(0x307a), 0x0a },
+ { CCI_REG8(0x309b), 0x60 }, { CCI_REG8(0x309e), 0x04 },
+ { CCI_REG8(0x30a0), 0x15 }, { CCI_REG8(0x30a1), 0x08 },
+ { CCI_REG8(0x30aa), 0x03 }, { CCI_REG8(0x30b2), 0x05 },
+ { CCI_REG8(0x30d5), 0x20 }, { CCI_REG8(0x30d6), 0x85 },
+ { CCI_REG8(0x30d7), 0x2a }, { CCI_REG8(0x30d8), 0x64 },
+ { CCI_REG8(0x30d9), 0x89 }, { CCI_REG8(0x30de), 0x00 },
+ { CCI_REG8(0x30df), 0x21 }, { CCI_REG8(0x3102), 0x08 },
+ { CCI_REG8(0x3103), 0x1d }, { CCI_REG8(0x3104), 0x1e },
+ { CCI_REG8(0x3105), 0x00 }, { CCI_REG8(0x3106), 0x74 },
+ { CCI_REG8(0x3107), 0x00 }, { CCI_REG8(0x3108), 0x03 },
+ { CCI_REG8(0x3109), 0x02 }, { CCI_REG8(0x310a), 0x03 },
+ { CCI_REG8(0x315c), 0x37 }, { CCI_REG8(0x315d), 0x36 },
+ { CCI_REG8(0x316e), 0x38 }, { CCI_REG8(0x316f), 0x37 },
+ { CCI_REG8(0x3318), 0x63 }, { CCI_REG8(0x3348), 0xA0 },
+};
+
+static const struct cci_reg_sequence mode_2100x1200[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0256 }, { IMX111_VERTICAL_START, 0x02a8 },
+ { IMX111_HORIZONTAL_END, 0x0a89 }, { IMX111_VERTICAL_END, 0x0757 },
+ { IMX111_IMAGE_WIDTH, 0x0834 }, { IMX111_IMAGE_HEIGHT, 0x04b0 },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x01 }, { IMX111_H_ODD_INC, 0x01 },
+ { IMX111_W_EVEN_INC, 0x01 }, { IMX111_W_ODD_INC, 0x01 },
+ { CCI_REG8(0x3033), 0x00 }, { CCI_REG8(0x303d), 0x10 },
+ { CCI_REG8(0x303e), 0x40 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x97 }, { CCI_REG8(0x3048), 0x00 },
+ { CCI_REG8(0x304c), 0x6f }, { CCI_REG8(0x304d), 0x03 },
+ { CCI_REG8(0x3064), 0x12 }, { CCI_REG8(0x3073), 0x00 },
+ { CCI_REG8(0x3074), 0x11 }, { CCI_REG8(0x3075), 0x11 },
+ { CCI_REG8(0x3076), 0x11 }, { CCI_REG8(0x3077), 0x11 },
+ { CCI_REG8(0x3079), 0x00 }, { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x309b), 0x20 }, { CCI_REG8(0x309c), 0x13 },
+ { CCI_REG8(0x309e), 0x00 }, { CCI_REG8(0x30a0), 0x14 },
+ { CCI_REG8(0x30a1), 0x08 }, { CCI_REG8(0x30aa), 0x03 },
+ { CCI_REG8(0x30b2), 0x07 }, { CCI_REG8(0x30d5), 0x00 },
+ { CCI_REG8(0x30d6), 0x85 }, { CCI_REG8(0x30d7), 0x2a },
+ { CCI_REG8(0x30d8), 0x64 }, { CCI_REG8(0x30d9), 0x89 },
+ { CCI_REG8(0x30de), 0x00 }, { CCI_REG8(0x30df), 0x20 },
+ { CCI_REG8(0x3102), 0x08 }, { CCI_REG8(0x3103), 0x22 },
+ { CCI_REG8(0x3104), 0x20 }, { CCI_REG8(0x3105), 0x00 },
+ { CCI_REG8(0x3106), 0x87 }, { CCI_REG8(0x3107), 0x00 },
+ { CCI_REG8(0x3108), 0x03 }, { CCI_REG8(0x3109), 0x02 },
+ { CCI_REG8(0x310a), 0x03 }, { CCI_REG8(0x315c), 0x9c },
+ { CCI_REG8(0x315d), 0x9b }, { CCI_REG8(0x316e), 0x9d },
+ { CCI_REG8(0x316f), 0x9c }, { CCI_REG8(0x3318), 0x62 },
+ { CCI_REG8(0x3348), 0xe0 },
+};
+
+static const struct cci_reg_sequence mode_3280x1098[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0008 }, { IMX111_VERTICAL_START, 0x01f6 },
+ { IMX111_HORIZONTAL_END, 0x0cd7 }, { IMX111_VERTICAL_END, 0x080b },
+ { IMX111_IMAGE_WIDTH, 0x0cd0 }, { IMX111_IMAGE_HEIGHT, 0x044a },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x01 }, { IMX111_H_ODD_INC, 0x01 },
+ { IMX111_W_EVEN_INC, 0x01 }, { IMX111_W_ODD_INC, 0x01 },
+ { CCI_REG8(0x3033), 0x00 }, { CCI_REG8(0x303d), 0x10 },
+ { CCI_REG8(0x303e), 0x40 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x93 }, { CCI_REG8(0x3048), 0x00 },
+ { CCI_REG8(0x304c), 0x67 }, { CCI_REG8(0x304d), 0x03 },
+ { CCI_REG8(0x3064), 0x12 }, { CCI_REG8(0x3073), 0xe0 },
+ { CCI_REG8(0x3074), 0x12 }, { CCI_REG8(0x3075), 0x12 },
+ { CCI_REG8(0x3076), 0x12 }, { CCI_REG8(0x3077), 0x12 },
+ { CCI_REG8(0x3079), 0x2a }, { CCI_REG8(0x307a), 0x0a },
+ { CCI_REG8(0x309b), 0x60 }, { CCI_REG8(0x309e), 0x04 },
+ { CCI_REG8(0x30a0), 0x15 }, { CCI_REG8(0x30a1), 0x08 },
+ { CCI_REG8(0x30aa), 0x03 }, { CCI_REG8(0x30b2), 0x05 },
+ { CCI_REG8(0x30d5), 0x00 }, { CCI_REG8(0x30d6), 0x85 },
+ { CCI_REG8(0x30d7), 0x2a }, { CCI_REG8(0x30d8), 0x64 },
+ { CCI_REG8(0x30d9), 0x89 }, { CCI_REG8(0x30de), 0x00 },
+ { CCI_REG8(0x30df), 0x20 }, { CCI_REG8(0x3102), 0x08 },
+ { CCI_REG8(0x3103), 0x1d }, { CCI_REG8(0x3104), 0x1e },
+ { CCI_REG8(0x3105), 0x00 }, { CCI_REG8(0x3106), 0x74 },
+ { CCI_REG8(0x3107), 0x00 }, { CCI_REG8(0x3108), 0x03 },
+ { CCI_REG8(0x3109), 0x02 }, { CCI_REG8(0x310a), 0x03 },
+ { CCI_REG8(0x315c), 0x37 }, { CCI_REG8(0x315d), 0x36 },
+ { CCI_REG8(0x316e), 0x38 }, { CCI_REG8(0x316f), 0x37 },
+ { CCI_REG8(0x3318), 0x63 }, { CCI_REG8(0x3348), 0xe0 },
+};
+
+static const struct cci_reg_sequence mode_3280x1848[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0008 }, { IMX111_VERTICAL_START, 0x0164 },
+ { IMX111_HORIZONTAL_END, 0x0cd7 }, { IMX111_VERTICAL_END, 0x089b },
+ { IMX111_IMAGE_WIDTH, 0x0cd0 }, { IMX111_IMAGE_HEIGHT, 0x0738 },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x01 }, { IMX111_H_ODD_INC, 0x01 },
+ { IMX111_W_EVEN_INC, 0x01 }, { IMX111_W_ODD_INC, 0x01 },
+ { CCI_REG8(0x3033), 0x00 }, { CCI_REG8(0x303d), 0x00 },
+ { CCI_REG8(0x303e), 0x41 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x97 }, { CCI_REG8(0x3048), 0x00 },
+ { CCI_REG8(0x304c), 0x6f }, { CCI_REG8(0x304d), 0x03 },
+ { CCI_REG8(0x3064), 0x12 }, { CCI_REG8(0x3073), 0x00 },
+ { CCI_REG8(0x3074), 0x11 }, { CCI_REG8(0x3075), 0x11 },
+ { CCI_REG8(0x3076), 0x11 }, { CCI_REG8(0x3077), 0x11 },
+ { CCI_REG8(0x3079), 0x00 }, { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x309b), 0x20 }, { CCI_REG8(0x309c), 0x13 },
+ { CCI_REG8(0x309e), 0x00 }, { CCI_REG8(0x30a0), 0x14 },
+ { CCI_REG8(0x30a1), 0x08 }, { CCI_REG8(0x30aa), 0x03 },
+ { CCI_REG8(0x30b2), 0x07 }, { CCI_REG8(0x30d5), 0x00 },
+ { CCI_REG8(0x30d6), 0x85 }, { CCI_REG8(0x30d7), 0x2a },
+ { CCI_REG8(0x30d8), 0x64 }, { CCI_REG8(0x30d9), 0x89 },
+ { CCI_REG8(0x30de), 0x00 }, { CCI_REG8(0x30df), 0x20 },
+ { CCI_REG8(0x3102), 0x10 }, { CCI_REG8(0x3103), 0x44 },
+ { CCI_REG8(0x3104), 0x40 }, { CCI_REG8(0x3105), 0x00 },
+ { CCI_REG8(0x3106), 0x0d }, { CCI_REG8(0x3107), 0x01 },
+ { CCI_REG8(0x3108), 0x09 }, { CCI_REG8(0x3109), 0x08 },
+ { CCI_REG8(0x310a), 0x0f }, { CCI_REG8(0x315c), 0x5d },
+ { CCI_REG8(0x315d), 0x5c }, { CCI_REG8(0x316e), 0x5e },
+ { CCI_REG8(0x316f), 0x5d }, { CCI_REG8(0x3318), 0x60 },
+ { CCI_REG8(0x3348), 0xe0 },
+};
+
+static const struct cci_reg_sequence mode_3280x2464[] = {
+ { IMX111_GROUP_WRITE, 1 },
+ { IMX111_HORIZONTAL_START, 0x0008 }, { IMX111_VERTICAL_START, 0x0030 },
+ { IMX111_HORIZONTAL_END, 0x0cd7 }, { IMX111_VERTICAL_END, 0x09cf },
+ { IMX111_IMAGE_WIDTH, 0x0cd0 }, { IMX111_IMAGE_HEIGHT, 0x09a0 },
+ { IMX111_GROUP_WRITE, 0 },
+ { IMX111_H_EVEN_INC, 0x01 }, { IMX111_H_ODD_INC, 0x01 },
+ { IMX111_W_EVEN_INC, 0x01 }, { IMX111_W_ODD_INC, 0x01 },
+ { CCI_REG8(0x3033), 0x00 }, { CCI_REG8(0x303d), 0x00 },
+ { CCI_REG8(0x303e), 0x41 }, { CCI_REG8(0x3040), 0x08 },
+ { CCI_REG8(0x3041), 0x97 }, { CCI_REG8(0x3048), 0x00 },
+ { CCI_REG8(0x304c), 0x6f }, { CCI_REG8(0x304d), 0x03 },
+ { CCI_REG8(0x3064), 0x12 }, { CCI_REG8(0x3073), 0x00 },
+ { CCI_REG8(0x3074), 0x11 }, { CCI_REG8(0x3075), 0x11 },
+ { CCI_REG8(0x3076), 0x11 }, { CCI_REG8(0x3077), 0x11 },
+ { CCI_REG8(0x3079), 0x00 }, { CCI_REG8(0x307a), 0x00 },
+ { CCI_REG8(0x309b), 0x20 }, { CCI_REG8(0x309c), 0x13 },
+ { CCI_REG8(0x309e), 0x00 }, { CCI_REG8(0x30a0), 0x14 },
+ { CCI_REG8(0x30a1), 0x08 }, { CCI_REG8(0x30aa), 0x03 },
+ { CCI_REG8(0x30b2), 0x07 }, { CCI_REG8(0x30d5), 0x00 },
+ { CCI_REG8(0x30d6), 0x85 }, { CCI_REG8(0x30d7), 0x2a },
+ { CCI_REG8(0x30d8), 0x64 }, { CCI_REG8(0x30d9), 0x89 },
+ { CCI_REG8(0x30de), 0x00 }, { CCI_REG8(0x30df), 0x20 },
+ { CCI_REG8(0x3102), 0x10 }, { CCI_REG8(0x3103), 0x44 },
+ { CCI_REG8(0x3104), 0x40 }, { CCI_REG8(0x3105), 0x00 },
+ { CCI_REG8(0x3106), 0x0d }, { CCI_REG8(0x3107), 0x01 },
+ { CCI_REG8(0x3108), 0x09 }, { CCI_REG8(0x3109), 0x08 },
+ { CCI_REG8(0x310a), 0x0f }, { CCI_REG8(0x315c), 0x5d },
+ { CCI_REG8(0x315d), 0x5c }, { CCI_REG8(0x316e), 0x5e },
+ { CCI_REG8(0x316f), 0x5d }, { CCI_REG8(0x3318), 0x60 },
+ { CCI_REG8(0x3348), 0xe0 },
+};
+
+static const struct imx111_mode imx111_modes[] = {
+ [IMX111_MODE_3280x2464] = {
+ .width = 3280,
+ .height = 2464,
+ .vtl_def = 2490,
+ .htl_def = 3536,
+ .reg_list = {
+ .regs = mode_3280x2464,
+ .num_of_regs = ARRAY_SIZE(mode_3280x2464),
+ },
+ },
+ [IMX111_MODE_3280x1848] = {
+ .width = 3280,
+ .height = 1848,
+ .vtl_def = 1874,
+ .htl_def = 3536,
+ .reg_list = {
+ .regs = mode_3280x1848,
+ .num_of_regs = ARRAY_SIZE(mode_3280x1848),
+ },
+ },
+ [IMX111_MODE_3280x1098] = {
+ .width = 3280,
+ .height = 1098,
+ .vtl_def = 1130,
+ .htl_def = 3500,
+ .reg_list = {
+ .regs = mode_3280x1098,
+ .num_of_regs = ARRAY_SIZE(mode_3280x1098),
+ },
+ },
+ [IMX111_MODE_2100x1200] = {
+ .width = 2100,
+ .height = 1200,
+ .vtl_def = 1260,
+ .htl_def = 3536,
+ .reg_list = {
+ .regs = mode_2100x1200,
+ .num_of_regs = ARRAY_SIZE(mode_2100x1200),
+ },
+ },
+ [IMX111_MODE_1952x1098] = {
+ .width = 1952,
+ .height = 1098,
+ .vtl_def = 1884,
+ .htl_def = 3500,
+ .reg_list = {
+ .regs = mode_1952x1098,
+ .num_of_regs = ARRAY_SIZE(mode_1952x1098),
+ },
+ },
+ [IMX111_MODE_1920x1080] = {
+ .width = 1920,
+ .height = 1080,
+ .vtl_def = 1884,
+ .htl_def = 3500,
+ .reg_list = {
+ .regs = mode_1952x1098,
+ .num_of_regs = ARRAY_SIZE(mode_1952x1098),
+ },
+ },
+ [IMX111_MODE_1640x1232] = {
+ .width = 1640,
+ .height = 1232,
+ .vtl_def = 1254,
+ .htl_def = 3536,
+ .reg_list = {
+ .regs = mode_1640x1232,
+ .num_of_regs = ARRAY_SIZE(mode_1640x1232),
+ },
+ },
+ [IMX111_MODE_1440x1080] = {
+ .width = 1440,
+ .height = 1080,
+ .vtl_def = 1254,
+ .htl_def = 3536,
+ .reg_list = {
+ .regs = mode_1640x1232,
+ .num_of_regs = ARRAY_SIZE(mode_1640x1232),
+ },
+ },
+ [IMX111_MODE_1640x924] = {
+ .width = 1640,
+ .height = 924,
+ .vtl_def = 946,
+ .htl_def = 3536,
+ .reg_list = {
+ .regs = mode_1640x924,
+ .num_of_regs = ARRAY_SIZE(mode_1640x924),
+ },
+ },
+ [IMX111_MODE_1308x736] = {
+ .width = 1308,
+ .height = 736,
+ .vtl_def = 2369,
+ .htl_def = 1896,
+ .reg_list = {
+ .regs = mode_1308x736,
+ .num_of_regs = ARRAY_SIZE(mode_1308x736),
+ },
+ },
+ [IMX111_MODE_1280x720] = {
+ .width = 1280,
+ .height = 720,
+ .vtl_def = 2369,
+ .htl_def = 1896,
+ .reg_list = {
+ .regs = mode_1308x736,
+ .num_of_regs = ARRAY_SIZE(mode_1308x736),
+ },
+ },
+ [IMX111_MODE_820x614] = {
+ .width = 820,
+ .height = 614,
+ .vtl_def = 1260,
+ .htl_def = 3536,
+ .reg_list = {
+ .regs = mode_820x614,
+ .num_of_regs = ARRAY_SIZE(mode_820x614),
+ },
+ },
+ [IMX111_MODE_640x480] = {
+ .width = 640,
+ .height = 480,
+ .vtl_def = 1260,
+ .htl_def = 3536,
+ .reg_list = {
+ .regs = mode_820x614,
+ .num_of_regs = ARRAY_SIZE(mode_820x614),
+ },
+ },
+};
+
+static inline struct imx111 *sd_to_imx111(struct v4l2_subdev *sd)
+{
+ return container_of_const(sd, struct imx111, sd);
+}
+
+static inline struct imx111 *ctrl_to_imx111(struct v4l2_ctrl *ctrl)
+{
+ return container_of_const(ctrl->handler, struct imx111, hdl);
+}
+
+static u8 to_settle_delay(u64 extclk_rate)
+{
+ u64 extclk_mhz = div_u64(extclk_rate, MEGA);
+
+ return DIV_ROUND_UP(IMX111_PLL_SETTLING_TIME_DEFAULT * extclk_mhz - 63,
+ 64);
+}
+
+static u32 imx111_get_format_code(struct imx111 *sensor, u32 code, bool test)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(imx111_mbus_formats); i++)
+ if (imx111_mbus_formats[i] == code)
+ break;
+
+ if (i >= ARRAY_SIZE(imx111_mbus_formats))
+ i = 0;
+
+ if (test)
+ return imx111_mbus_formats[i];
+
+ i = (i & ~3) | (sensor->vflip->val ? 2 : 0) |
+ (sensor->hflip->val ? 1 : 0);
+
+ return imx111_mbus_formats[i];
+}
+
+static u32 imx111_get_format_bpp(const struct v4l2_mbus_framefmt *format)
+{
+ switch (format->code) {
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ return 8;
+
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ default:
+ return 10;
+ }
+}
+
+static int imx111_update_digital_gain(struct imx111 *sensor, u32 val)
+{
+ int ret = 0;
+
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+
+ cci_write(sensor->regmap, IMX111_REG_DIG_GAIN_GREENR, val, &ret);
+ cci_write(sensor->regmap, IMX111_REG_DIG_GAIN_RED, val, &ret);
+ cci_write(sensor->regmap, IMX111_REG_DIG_GAIN_BLUE, val, &ret);
+ cci_write(sensor->regmap, IMX111_REG_DIG_GAIN_GREENB, val, &ret);
+
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+
+ return ret;
+}
+
+static int imx111_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx111 *sensor = ctrl_to_imx111(ctrl);
+ struct device *dev = regmap_get_device(sensor->regmap);
+ int ret = 0;
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ s64 max = sensor->cur_mode->height + ctrl->val -
+ IMX111_INTEGRATION_TIME_OFFSET;
+
+ ret = __v4l2_ctrl_modify_range(sensor->exposure,
+ sensor->exposure->minimum,
+ max, sensor->exposure->step,
+ max);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ cci_write(sensor->regmap, IMX111_REG_ANALOG_GAIN, ctrl->val,
+ &ret);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx111_update_digital_gain(sensor, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+ cci_write(sensor->regmap, IMX111_INTEGRATION_TIME, ctrl->val,
+ &ret);
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+ break;
+ case V4L2_CID_HBLANK:
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+ cci_write(sensor->regmap, IMX111_HORIZONTAL_TOTAL_LENGTH,
+ sensor->cur_mode->width + ctrl->val, &ret);
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+ cci_write(sensor->regmap, IMX111_VERTICAL_TOTAL_LENGTH,
+ sensor->cur_mode->height + ctrl->val, &ret);
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ cci_write(sensor->regmap, IMX111_IMAGE_ORIENTATION,
+ sensor->hflip->val | sensor->vflip->val << 1, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ cci_write(sensor->regmap, IMX111_TEST_PATTERN, ctrl->val,
+ &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_RED:
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+ cci_write(sensor->regmap, IMX111_SOLID_COLOR_RED, ctrl->val,
+ &ret);
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENR:
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+ cci_write(sensor->regmap, IMX111_SOLID_COLOR_GR, ctrl->val,
+ &ret);
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_BLUE:
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+ cci_write(sensor->regmap, IMX111_SOLID_COLOR_BLUE, ctrl->val,
+ &ret);
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN_GREENB:
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+ cci_write(sensor->regmap, IMX111_SOLID_COLOR_GB, ctrl->val,
+ &ret);
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ pm_runtime_put(dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx111_ctrl_ops = {
+ .s_ctrl = imx111_set_ctrl,
+};
+
+static const char * const test_pattern_menu[] = {
+ "Disabled",
+ "Solid Color Fill",
+ "Standard Color Bars",
+ "Fade To Grey Color Bars",
+ "Pseudorandom data",
+};
+
+static int imx111_init_controls(struct imx111 *sensor)
+{
+ const struct v4l2_ctrl_ops *ops = &imx111_ctrl_ops;
+ struct device *dev = regmap_get_device(sensor->regmap);
+ const struct imx111_mode *mode = sensor->cur_mode;
+ struct v4l2_fwnode_device_properties props;
+ struct v4l2_ctrl_handler *hdl = &sensor->hdl;
+ s64 pixel_rate_min, pixel_rate_max;
+ int i, ret;
+
+ ret = v4l2_fwnode_device_parse(dev, &props);
+ if (ret < 0)
+ return ret;
+
+ v4l2_ctrl_handler_init(hdl, 15);
+
+ pixel_rate_min = div_u64(sensor->pixel_clk_raw,
+ 2 * IMX111_DATA_DEPTH_RAW10);
+ pixel_rate_max = div_u64(sensor->pixel_clk_raw,
+ 2 * IMX111_DATA_DEPTH_RAW8);
+ sensor->pixel_rate = v4l2_ctrl_new_std(hdl, NULL, V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1,
+ div_u64(sensor->pixel_clk_raw,
+ 2 *
+ sensor->data_depth));
+
+ sensor->link_freq = v4l2_ctrl_new_int_menu(hdl, NULL,
+ V4L2_CID_LINK_FREQ, 0, 0,
+ &sensor->default_link_freq);
+ if (sensor->link_freq)
+ sensor->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX111_ANA_GAIN_MIN, IMX111_ANA_GAIN_MAX,
+ IMX111_ANA_GAIN_STEP, IMX111_ANA_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_DIGITAL_GAIN,
+ IMX111_DGTL_GAIN_MIN, IMX111_DGTL_GAIN_MAX,
+ IMX111_DGTL_GAIN_STEP, IMX111_DGTL_GAIN_DEFAULT);
+
+ sensor->hflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP, 0, 1, 1,
+ 0);
+ if (sensor->hflip)
+ sensor->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ sensor->vflip = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP, 0, 1, 1,
+ 0);
+ if (sensor->vflip)
+ sensor->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ sensor->vblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VBLANK,
+ IMX111_VBLANK_MIN,
+ IMX111_VTL_MAX - mode->height, 1,
+ mode->vtl_def - mode->height);
+ sensor->hblank = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HBLANK,
+ IMX111_HBLANK_MIN,
+ IMX111_HTL_MAX - mode->width, 1,
+ mode->htl_def - mode->width);
+
+ /*
+ * The maximum coarse integration time is the frame length in lines
+ * minus five.
+ */
+ sensor->exposure = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_EXPOSURE,
+ IMX111_INTEGRATION_TIME_MIN,
+ IMX111_PIXEL_ARRAY_HEIGHT -
+ IMX111_INTEGRATION_TIME_OFFSET,
+ IMX111_INTEGRATION_TIME_STEP,
+ IMX111_PIXEL_ARRAY_HEIGHT -
+ IMX111_INTEGRATION_TIME_OFFSET);
+
+ v4l2_ctrl_new_fwnode_properties(hdl, ops, &props);
+
+ v4l2_ctrl_new_std_menu_items(hdl, ops, V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(test_pattern_menu) - 1, 0, 0,
+ test_pattern_menu);
+ for (i = 0; i < 4; i++) {
+ /*
+ * The assumption is that
+ * TEST_PATTERN_GREENR == TEST_PATTERN_RED + 1
+ * TEST_PATTERN_BLUE == TEST_PATTERN_RED + 2
+ * TEST_PATTERN_GREENB == TEST_PATTERN_RED + 3
+ */
+ v4l2_ctrl_new_std(hdl, ops, V4L2_CID_TEST_PATTERN_RED + i,
+ IMX111_TESTP_COLOUR_MIN,
+ IMX111_TESTP_COLOUR_MAX,
+ IMX111_TESTP_COLOUR_STEP,
+ IMX111_TESTP_COLOUR_MAX);
+ /* The "Solid color" pattern is white by default */
+ }
+
+ if (hdl->error)
+ return hdl->error;
+
+ sensor->sd.ctrl_handler = hdl;
+
+ return 0;
+};
+
+static int imx111_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct imx111 *sensor = sd_to_imx111(sd);
+ struct device *dev = regmap_get_device(sensor->regmap);
+ const struct imx111_mode *mode = sensor->cur_mode;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Apply default values of current mode */
+ ret = cci_multi_reg_write(sensor->regmap, mode->reg_list.regs,
+ mode->reg_list.num_of_regs, NULL);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize the sensor\n");
+ goto err_rpm_put;
+ }
+
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON,
+ IMX111_GROUP_WRITE_ON, &ret);
+ cci_write(sensor->regmap, IMX111_DATA_DEPTH,
+ sensor->data_depth | sensor->data_depth << 8, &ret);
+ cci_update_bits(sensor->regmap, IMX111_GROUP_WRITE,
+ IMX111_GROUP_WRITE_ON, 0, &ret);
+
+ if (ret)
+ goto err_rpm_put;
+
+ ret = __v4l2_ctrl_handler_setup(&sensor->hdl);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = cci_write(sensor->regmap, IMX111_STREAMING_MODE,
+ IMX111_MODE_STREAMING, NULL);
+ if (ret)
+ dev_err(dev, "failed to start stream");
+
+ /* vflip and hflip cannot change during streaming */
+ __v4l2_ctrl_grab(sensor->vflip, true);
+ __v4l2_ctrl_grab(sensor->hflip, true);
+
+ msleep(30);
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+static int imx111_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct imx111 *sensor = sd_to_imx111(sd);
+ struct device *dev = regmap_get_device(sensor->regmap);
+ int ret;
+
+ ret = cci_write(sensor->regmap, IMX111_STREAMING_MODE,
+ IMX111_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(dev, "failed to stop stream\n");
+
+ __v4l2_ctrl_grab(sensor->vflip, false);
+ __v4l2_ctrl_grab(sensor->hflip, false);
+
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int imx111_initialize(struct imx111 *sensor)
+{
+ struct device *dev = regmap_get_device(sensor->regmap);
+ int ret = 0;
+
+ /* Configure the PLL. */
+ cci_write(sensor->regmap, IMX111_PRE_PLL_CLK_DIVIDER_PLL1,
+ sensor->pll->pre_div, &ret);
+ cci_write(sensor->regmap, IMX111_PLL_MULTIPLIER_PLL1,
+ sensor->pll->mult, &ret);
+ cci_write(sensor->regmap, IMX111_POST_DIVIDER,
+ IMX111_POST_DIVIDER_DIV1, &ret);
+ cci_write(sensor->regmap, IMX111_PLL_SETTLING_TIME,
+ to_settle_delay(sensor->pll->extclk_rate), &ret);
+
+ cci_multi_reg_write(sensor->regmap, imx111_global_init,
+ ARRAY_SIZE(imx111_global_init), &ret);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize the sensor\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------------
+ * IMX111 Pad Subdev Init and Operations
+ */
+static int imx111_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx111 *sensor = sd_to_imx111(sd);
+
+ if (code->index >= ARRAY_SIZE(imx111_mbus_formats) / 4)
+ return -EINVAL;
+
+ code->code = imx111_get_format_code(sensor,
+ imx111_mbus_formats[code->index *
+ 4], false);
+
+ return 0;
+}
+
+static int imx111_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct imx111 *sensor = sd_to_imx111(sd);
+ u32 code;
+
+ if (fse->index >= ARRAY_SIZE(imx111_modes))
+ return -EINVAL;
+
+ code = imx111_get_format_code(sensor, fse->code, true);
+ if (fse->code != code)
+ return -EINVAL;
+
+ fse->min_width = imx111_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = imx111_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int imx111_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct imx111 *sensor = sd_to_imx111(sd);
+ struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
+ struct v4l2_mbus_framefmt *fmt;
+ const struct imx111_mode *mode;
+
+ mode = v4l2_find_nearest_size(imx111_modes, ARRAY_SIZE(imx111_modes),
+ width, height,
+ mbus_fmt->width, mbus_fmt->height);
+
+ fmt = v4l2_subdev_state_get_format(state, format->pad);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ int ret;
+
+ sensor->cur_mode = mode;
+ sensor->data_depth = imx111_get_format_bpp(fmt);
+
+ ret = __v4l2_ctrl_s_ctrl_int64(sensor->pixel_rate,
+ div_u64(sensor->pixel_clk_raw,
+ 2 *
+ sensor->data_depth));
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_modify_range(sensor->vblank,
+ IMX111_VBLANK_MIN,
+ IMX111_VTL_MAX - mode->height,
+ 1,
+ mode->vtl_def - mode->height);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl(sensor->vblank, mode->vtl_def -
+ mode->height);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_modify_range(sensor->hblank,
+ IMX111_HBLANK_MIN,
+ IMX111_HTL_MAX - mode->width,
+ 1,
+ mode->htl_def - mode->width);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl(sensor->hblank, mode->htl_def -
+ mode->width);
+ if (ret)
+ return ret;
+ }
+
+ fmt->code = imx111_get_format_code(sensor, mbus_fmt->code, false);
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+
+ *mbus_fmt = *fmt;
+
+ return 0;
+}
+
+static int imx111_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct imx111 *sensor = sd_to_imx111(sd);
+ const struct imx111_mode *mode = sensor->cur_mode;
+ struct v4l2_mbus_framefmt *fmt;
+
+ fmt = v4l2_subdev_state_get_format(sd_state, 0);
+
+ fmt->code = MEDIA_BUS_FMT_SGBRG10_1X10;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops imx111_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops imx111_pad_ops = {
+ .enum_mbus_code = imx111_enum_mbus_code,
+ .enum_frame_size = imx111_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = imx111_set_format,
+ .enable_streams = imx111_enable_streams,
+ .disable_streams = imx111_disable_streams,
+};
+
+static const struct v4l2_subdev_ops imx111_subdev_ops = {
+ .video = &imx111_video_ops,
+ .pad = &imx111_pad_ops,
+};
+
+static const struct media_entity_operations imx111_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops imx111_internal_ops = {
+ .init_state = imx111_init_state,
+};
+
+static int imx111_init_subdev(struct imx111 *sensor, struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct v4l2_subdev *sd = &sensor->sd;
+ struct media_pad *pad = &sensor->pad;
+ struct v4l2_ctrl_handler *hdl = &sensor->hdl;
+ int ret;
+
+ /* Initialize the subdev. */
+ v4l2_i2c_subdev_init(sd, client, &imx111_subdev_ops);
+
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ sd->internal_ops = &imx111_internal_ops;
+
+ /* Initialize the media entity. */
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ sd->entity.ops = &imx111_subdev_entity_ops;
+ pad->flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&sd->entity, 1, pad);
+ if (ret < 0) {
+ dev_err(dev, "failed to init entity pads: %d", ret);
+ return ret;
+ }
+
+ /* Initialize the control handler. */
+ ret = imx111_init_controls(sensor);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ v4l2_ctrl_handler_free(hdl);
+ media_entity_cleanup(&sd->entity);
+ return ret;
+};
+
+/* ----------------------------------------------------------------------------
+ * Power Management
+ */
+
+static int imx111_power_on(struct imx111 *sensor)
+{
+ int ret;
+
+ if (sensor->reset)
+ gpiod_set_value(sensor->reset, 1);
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(imx111_supplies),
+ sensor->supplies);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(500, 600);
+
+ if (sensor->reset)
+ gpiod_set_value(sensor->reset, 0);
+
+ usleep_range(200, 250);
+
+ ret = clk_prepare_enable(sensor->extclk);
+ if (ret < 0)
+ goto error_regulator;
+
+ usleep_range(200, 250);
+
+ return 0;
+
+error_regulator:
+ regulator_bulk_disable(ARRAY_SIZE(imx111_supplies), sensor->supplies);
+ return ret;
+}
+
+static void imx111_power_off(struct imx111 *sensor)
+{
+ if (sensor->reset)
+ gpiod_set_value(sensor->reset, 1);
+ usleep_range(1000, 2000);
+
+ clk_disable_unprepare(sensor->extclk);
+ regulator_bulk_disable(ARRAY_SIZE(imx111_supplies), sensor->supplies);
+}
+
+static int __maybe_unused imx111_pm_runtime_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx111 *sensor = sd_to_imx111(sd);
+ int ret;
+
+ ret = imx111_power_on(sensor);
+ if (ret)
+ return ret;
+
+ ret = imx111_initialize(sensor);
+ if (ret) {
+ imx111_power_off(sensor);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused imx111_pm_runtime_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct imx111 *sensor = sd_to_imx111(sd);
+
+ imx111_power_off(sensor);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx111_pm_ops = {
+ SET_RUNTIME_PM_OPS(imx111_pm_runtime_suspend,
+ imx111_pm_runtime_resume, NULL)
+};
+
+/* ----------------------------------------------------------------------------
+ * Probe & Remove
+ */
+
+static int imx111_identify_module(struct imx111 *sensor)
+{
+ struct device *dev = regmap_get_device(sensor->regmap);
+ u64 value, revision, manufacturer;
+ int ret = 0;
+
+ ret = cci_read(sensor->regmap, IMX111_PRODUCT_ID, &value, NULL);
+ if (ret)
+ return ret;
+
+ if (value != IMX111_CHIP_ID) {
+ dev_err(dev, "chip id mismatch: %x!=%04llx", IMX111_CHIP_ID,
+ value);
+ return -ENXIO;
+ }
+
+ cci_read(sensor->regmap, IMX111_REVISION, &revision, &ret);
+ cci_read(sensor->regmap, IMX111_MANUFACTURER_ID, &manufacturer, &ret);
+
+ dev_dbg(dev, "module IMX%03llx rev. %llu manufacturer %llu\n",
+ value, revision, manufacturer);
+
+ return ret;
+}
+
+static int imx111_clk_init(struct imx111 *sensor)
+{
+ struct device *dev = regmap_get_device(sensor->regmap);
+ u32 ndata_lanes = sensor->bus_cfg.bus.mipi_csi2.num_data_lanes;
+ u64 extclk_rate, system_clk;
+ unsigned int i;
+
+ extclk_rate = clk_get_rate(sensor->extclk);
+ if (!extclk_rate)
+ return dev_err_probe(dev, -EINVAL, "EXTCLK rate unknown\n");
+
+ for (i = 0; i < ARRAY_SIZE(imx111_pll); i++) {
+ if (clk_get_rate(sensor->extclk) ==
+ imx111_pll[i].extclk_rate) {
+ sensor->pll = &imx111_pll[i];
+ break;
+ }
+ }
+ if (!sensor->pll)
+ return dev_err_probe(dev, -EINVAL,
+ "Unsupported EXTCLK rate %llu\n",
+ extclk_rate);
+
+ system_clk = div_u64(extclk_rate, sensor->pll->pre_div) *
+ sensor->pll->mult;
+
+ /*
+ * Pixel clock or Logic clock is used for internal image processing is
+ * generated by dividing into 1/10 or 1/8 frequency according to the
+ * word length of the CSI2 interface. This clock is designating the
+ * pixel rate and used as the base of integration time, frame rate etc.
+ */
+ sensor->pixel_clk_raw = system_clk * ndata_lanes;
+
+ /*
+ * The CSI-2 bus is clocked for 16-bit per pixel, transmitted in DDR
+ * over n lanes for RAW10 default format.
+ */
+ sensor->default_link_freq = div_u64(sensor->pixel_clk_raw * 8,
+ 2 * IMX111_DATA_DEPTH_RAW10);
+
+ if (sensor->bus_cfg.nr_of_link_frequencies != 1 ||
+ sensor->bus_cfg.link_frequencies[0] != sensor->default_link_freq)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid link-frequency, expected %llu\n",
+ sensor->default_link_freq);
+
+ return 0;
+}
+
+static int imx111_parse_dt(struct imx111 *sensor)
+{
+ struct device *dev = regmap_get_device(sensor->regmap);
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct fwnode_handle *ep;
+ int ret;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep) {
+ dev_err(dev, "No endpoint found\n");
+ return -EINVAL;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &sensor->bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret < 0) {
+ dev_err(dev, "Failed to parse endpoint\n");
+ goto error;
+ }
+
+ sensor->bus_cfg.bus_type = V4L2_MBUS_CSI2_DPHY;
+
+ /* Check the number of MIPI CSI2 data lanes */
+ if (sensor->bus_cfg.bus.mipi_csi2.num_data_lanes > 2) {
+ dev_err(dev, "number of lanes is more than 2\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+ return ret;
+}
+
+static int imx111_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct imx111 *sensor;
+ int ret;
+
+ sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(sensor->regmap))
+ return dev_err_probe(dev, PTR_ERR(sensor->regmap),
+ "Failed to allocate register map\n");
+
+ sensor->extclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->extclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->extclk),
+ "Failed to get clock\n");
+
+ sensor->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(sensor->reset))
+ return dev_err_probe(dev, PTR_ERR(sensor->reset),
+ "Failed to get reset GPIO\n");
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(imx111_supplies),
+ imx111_supplies,
+ &sensor->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get regulators\n");
+
+ ret = imx111_parse_dt(sensor);
+ if (ret < 0)
+ return ret;
+
+ ret = imx111_clk_init(sensor);
+ if (ret < 0)
+ goto error_ep_free;
+
+ ret = imx111_power_on(sensor);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Could not power on the device\n");
+ goto error_ep_free;
+ }
+
+ ret = imx111_identify_module(sensor);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Could not identify module\n");
+ goto error_power_off;
+ }
+
+ sensor->cur_mode = &imx111_modes[IMX111_MODE_3280x2464];
+ sensor->data_depth = IMX111_DATA_DEPTH_RAW10;
+
+ ret = imx111_initialize(sensor);
+ if (ret < 0)
+ goto error_power_off;
+
+ ret = imx111_init_subdev(sensor, client);
+ if (ret < 0) {
+ dev_err(dev, "failed to init controls: %d", ret);
+ goto error_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_subdev_init_finalize(&sensor->sd);
+ if (ret)
+ goto error_v4l2_ctrl_handler_free;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = v4l2_async_register_subdev_sensor(&sensor->sd);
+ if (ret < 0) {
+ dev_err(dev, "failed to register V4L2 subdev: %d", ret);
+ goto error_pm;
+ }
+
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_idle(dev);
+
+ return 0;
+
+error_pm:
+ v4l2_subdev_cleanup(&sensor->sd);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+error_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(&sensor->hdl);
+ media_entity_cleanup(&sensor->sd.entity);
+
+error_power_off:
+ imx111_power_off(sensor);
+
+error_ep_free:
+ v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+
+ return ret;
+}
+
+static void imx111_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx111 *sensor = sd_to_imx111(sd);
+
+ v4l2_async_unregister_subdev(&sensor->sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sensor->sd.entity);
+ v4l2_ctrl_handler_free(&sensor->hdl);
+ v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+
+ /*
+ * Disable runtime PM. In case runtime PM is disabled in the kernel,
+ * make sure to turn power off manually.
+ */
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev)) {
+ imx111_power_off(sensor);
+ pm_runtime_set_suspended(&client->dev);
+ }
+}
+
+static const struct of_device_id imx111_of_match[] = {
+ { .compatible = "sony,imx111" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx111_of_match);
+
+static struct i2c_driver imx111_i2c_driver = {
+ .driver = {
+ .name = "imx111",
+ .of_match_table = imx111_of_match,
+ .pm = &imx111_pm_ops,
+ },
+ .probe = imx111_probe,
+ .remove = imx111_remove,
+};
+module_i2c_driver(imx111_i2c_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Sony IMX111 CMOS Image Sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
index 2b5a6ce7b1ae..d5350bb46f14 100644
--- a/drivers/media/i2c/imx208.c
+++ b/drivers/media/i2c/imx208.c
@@ -2,13 +2,15 @@
// Copyright (C) 2021 Intel Corporation
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
-#include <linux/unaligned.h>
#define IMX208_REG_MODE_SELECT 0x0100
#define IMX208_MODE_STANDBY 0x00
@@ -268,6 +270,9 @@ static const struct imx208_mode supported_modes[] = {
};
struct imx208 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -372,7 +377,6 @@ static int imx208_write_reg(struct imx208 *imx208, u16 reg, u32 len, u32 val)
static int imx208_write_regs(struct imx208 *imx208,
const struct imx208_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
unsigned int i;
int ret;
@@ -380,7 +384,7 @@ static int imx208_write_regs(struct imx208 *imx208,
ret = imx208_write_reg(imx208, regs[i].address, 1,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx208->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -431,14 +435,13 @@ static int imx208_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx208 *imx208 =
container_of(ctrl->handler, struct imx208, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
/*
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx208->dev))
return 0;
switch (ctrl->id) {
@@ -471,13 +474,13 @@ static int imx208_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_err(&client->dev,
+ dev_err(imx208->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
return ret;
}
@@ -620,7 +623,6 @@ static int imx208_set_pad_format(struct v4l2_subdev *sd,
static int imx208_identify_module(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
u32 val;
@@ -630,13 +632,13 @@ static int imx208_identify_module(struct imx208 *imx208)
ret = imx208_read_reg(imx208, IMX208_REG_CHIP_ID,
2, &val);
if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
+ dev_err(imx208->dev, "failed to read chip id %x\n",
IMX208_CHIP_ID);
return ret;
}
if (val != IMX208_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(imx208->dev, "chip id mismatch: %x!=%x\n",
IMX208_CHIP_ID, val);
return -EIO;
}
@@ -649,7 +651,6 @@ static int imx208_identify_module(struct imx208 *imx208)
/* Start streaming */
static int imx208_start_streaming(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
const struct imx208_reg_list *reg_list;
int ret, link_freq_index;
@@ -662,7 +663,7 @@ static int imx208_start_streaming(struct imx208 *imx208)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(imx208->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -670,7 +671,7 @@ static int imx208_start_streaming(struct imx208 *imx208)
reg_list = &imx208->cur_mode->reg_list;
ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(imx208->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -687,14 +688,13 @@ static int imx208_start_streaming(struct imx208 *imx208)
/* Stop streaming */
static int imx208_stop_streaming(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
int ret;
/* set stream off register */
ret = imx208_write_reg(imx208, IMX208_REG_MODE_SELECT,
1, IMX208_MODE_STANDBY);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(imx208->dev, "%s failed to set stream\n", __func__);
/*
* Return success even if it was an error, as there is nothing the
@@ -706,13 +706,12 @@ static int imx208_stop_streaming(struct imx208 *imx208)
static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx208 *imx208 = to_imx208(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx208->imx208_mx);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx208->dev);
if (ret) {
mutex_unlock(&imx208->imx208_mx);
return ret;
@@ -727,7 +726,7 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx208_stop_streaming(imx208);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
}
mutex_unlock(&imx208->imx208_mx);
@@ -739,7 +738,7 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
mutex_unlock(&imx208->imx208_mx);
return ret;
@@ -778,7 +777,7 @@ static int imx208_read_otp(struct imx208 *imx208)
if (imx208->otp_read)
goto out_unlock;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx208->dev);
if (ret)
goto out_unlock;
@@ -805,7 +804,7 @@ static int imx208_read_otp(struct imx208 *imx208)
}
out_pm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx208->dev);
out_unlock:
mutex_unlock(&imx208->imx208_mx);
@@ -835,7 +834,6 @@ static const BIN_ATTR_RO(otp, IMX208_OTP_SIZE);
/* Initialize control handlers */
static int imx208_init_controls(struct imx208 *imx208)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &imx208->ctrl_handler;
s64 exposure_max;
s64 vblank_def;
@@ -914,7 +912,7 @@ static int imx208_init_controls(struct imx208 *imx208)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(imx208->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
@@ -938,31 +936,36 @@ static void imx208_free_controls(struct imx208 *imx208)
static int imx208_probe(struct i2c_client *client)
{
struct imx208 *imx208;
+ unsigned long freq;
int ret;
bool full_power;
- u32 val = 0;
-
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != 19200000) {
- dev_err(&client->dev,
- "Unsupported clock-frequency %u. Expected 19200000.\n",
- val);
- return -EINVAL;
- }
imx208 = devm_kzalloc(&client->dev, sizeof(*imx208), GFP_KERNEL);
if (!imx208)
return -ENOMEM;
+ imx208->dev = &client->dev;
+
+ imx208->clk = devm_v4l2_sensor_clk_get(imx208->dev, NULL);
+ if (IS_ERR(imx208->clk))
+ return dev_err_probe(imx208->dev, PTR_ERR(imx208->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(imx208->clk);
+ if (freq != 19200000)
+ return dev_err_probe(imx208->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx208->sd, client, &imx208_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(imx208->dev);
if (full_power) {
/* Check module identity */
ret = imx208_identify_module(imx208);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx208->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
}
@@ -972,7 +975,7 @@ static int imx208_probe(struct i2c_client *client)
ret = imx208_init_controls(imx208);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx208->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -985,7 +988,7 @@ static int imx208_probe(struct i2c_client *client)
imx208->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx208->sd.entity, 1, &imx208->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(imx208->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -993,17 +996,17 @@ static int imx208_probe(struct i2c_client *client)
if (ret < 0)
goto error_media_entity;
- ret = device_create_bin_file(&client->dev, &bin_attr_otp);
+ ret = device_create_bin_file(imx208->dev, &bin_attr_otp);
if (ret) {
- dev_err(&client->dev, "sysfs otp creation failed\n");
+ dev_err(imx208->dev, "sysfs otp creation failed\n");
goto error_async_subdev;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx208->dev);
+ pm_runtime_enable(imx208->dev);
+ pm_runtime_idle(imx208->dev);
return 0;
@@ -1027,13 +1030,13 @@ static void imx208_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct imx208 *imx208 = to_imx208(sd);
- device_remove_bin_file(&client->dev, &bin_attr_otp);
+ device_remove_bin_file(imx208->dev, &bin_attr_otp);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
imx208_free_controls(imx208);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx208->dev);
+ pm_runtime_set_suspended(imx208->dev);
mutex_destroy(&imx208->imx208_mx);
}
diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
index a0cef9e61b41..d4945b192776 100644
--- a/drivers/media/i2c/imx214.c
+++ b/drivers/media/i2c/imx214.c
@@ -881,6 +881,109 @@ static const struct v4l2_ctrl_ops imx214_ctrl_ops = {
.s_ctrl = imx214_set_ctrl,
};
+static int imx214_pll_calculate(struct imx214 *imx214, struct ccs_pll *pll,
+ unsigned int link_freq)
+{
+ struct ccs_pll_limits limits = {
+ .min_ext_clk_freq_hz = 6000000,
+ .max_ext_clk_freq_hz = 27000000,
+
+ .vt_fr = {
+ .min_pre_pll_clk_div = 1,
+ .max_pre_pll_clk_div = 15,
+ /* Value is educated guess as we don't have a spec */
+ .min_pll_ip_clk_freq_hz = 6000000,
+ /* Value is educated guess as we don't have a spec */
+ .max_pll_ip_clk_freq_hz = 12000000,
+ .min_pll_multiplier = 12,
+ .max_pll_multiplier = 1200,
+ .min_pll_op_clk_freq_hz = 338000000,
+ .max_pll_op_clk_freq_hz = 1200000000,
+ },
+ .vt_bk = {
+ .min_sys_clk_div = 2,
+ .max_sys_clk_div = 4,
+ .min_pix_clk_div = 5,
+ .max_pix_clk_div = 10,
+ .min_pix_clk_freq_hz = 30000000,
+ .max_pix_clk_freq_hz = 120000000,
+ },
+ .op_bk = {
+ .min_sys_clk_div = 1,
+ .max_sys_clk_div = 2,
+ .min_pix_clk_div = 6,
+ .max_pix_clk_div = 10,
+ .min_pix_clk_freq_hz = 30000000,
+ .max_pix_clk_freq_hz = 120000000,
+ },
+
+ .min_line_length_pck_bin = IMX214_PPL_DEFAULT,
+ .min_line_length_pck = IMX214_PPL_DEFAULT,
+ };
+ unsigned int num_lanes = imx214->bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+ /*
+ * There are no documented constraints on the sys clock frequency, for
+ * either branch. Recover them based on the PLL output clock frequency
+ * and sys_clk_div limits on one hand, and the pix clock frequency and
+ * the pix_clk_div limits on the other hand.
+ */
+ limits.vt_bk.min_sys_clk_freq_hz =
+ max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.vt_bk.max_sys_clk_div,
+ limits.vt_bk.min_pix_clk_freq_hz * limits.vt_bk.min_pix_clk_div);
+ limits.vt_bk.max_sys_clk_freq_hz =
+ min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.vt_bk.min_sys_clk_div,
+ limits.vt_bk.max_pix_clk_freq_hz * limits.vt_bk.max_pix_clk_div);
+
+ limits.op_bk.min_sys_clk_freq_hz =
+ max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.op_bk.max_sys_clk_div,
+ limits.op_bk.min_pix_clk_freq_hz * limits.op_bk.min_pix_clk_div);
+ limits.op_bk.max_sys_clk_freq_hz =
+ min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.op_bk.min_sys_clk_div,
+ limits.op_bk.max_pix_clk_freq_hz * limits.op_bk.max_pix_clk_div);
+
+ memset(pll, 0, sizeof(*pll));
+
+ pll->bus_type = CCS_PLL_BUS_TYPE_CSI2_DPHY;
+ pll->op_lanes = num_lanes;
+ pll->vt_lanes = num_lanes;
+ pll->csi2.lanes = num_lanes;
+
+ pll->binning_horizontal = 1;
+ pll->binning_vertical = 1;
+ pll->scale_m = 1;
+ pll->scale_n = 1;
+ pll->bits_per_pixel =
+ IMX214_CSI_DATA_FORMAT_RAW10 & IMX214_BITS_PER_PIXEL_MASK;
+ pll->flags = CCS_PLL_FLAG_LANE_SPEED_MODEL;
+ pll->link_freq = link_freq;
+ pll->ext_clk_freq_hz = clk_get_rate(imx214->xclk);
+
+ return ccs_pll_calculate(imx214->dev, &limits, pll);
+}
+
+static int imx214_pll_update(struct imx214 *imx214)
+{
+ u64 link_freq;
+ int ret;
+
+ link_freq = imx214->bus_cfg.link_frequencies[imx214->link_freq->val];
+ ret = imx214_pll_calculate(imx214, &imx214->pll, link_freq);
+ if (ret) {
+ dev_err(imx214->dev, "PLL calculations failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_ctrl_s_ctrl_int64(imx214->pixel_rate,
+ imx214->pll.pixel_rate_pixel_array);
+ if (ret) {
+ dev_err(imx214->dev, "failed to set pixel rate\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int imx214_ctrls_init(struct imx214 *imx214)
{
static const struct v4l2_area unit_size = {
@@ -911,8 +1014,6 @@ static int imx214_ctrls_init(struct imx214 *imx214)
V4L2_CID_LINK_FREQ,
imx214->bus_cfg.nr_of_link_frequencies - 1,
0, imx214->bus_cfg.link_frequencies);
- if (imx214->link_freq)
- imx214->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
/*
* WARNING!
@@ -935,9 +1036,6 @@ static int imx214_ctrls_init(struct imx214 *imx214)
imx214->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
V4L2_CID_HBLANK, hblank, hblank,
1, hblank);
- if (imx214->hblank)
- imx214->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-
exposure_max = mode->vts_def - IMX214_EXPOSURE_OFFSET;
exposure_def = min(exposure_max, IMX214_EXPOSURE_DEFAULT);
imx214->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
@@ -957,13 +1055,9 @@ static int imx214_ctrls_init(struct imx214 *imx214)
imx214->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
- if (imx214->hflip)
- imx214->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
imx214->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx214_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
- if (imx214->vflip)
- imx214->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
v4l2_ctrl_cluster(2, &imx214->hflip);
@@ -1003,6 +1097,19 @@ static int imx214_ctrls_init(struct imx214 *imx214)
return ret;
}
+ /* Now that the controls have been properly created, set their flags. */
+ imx214->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ imx214->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ imx214->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+ imx214->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ ret = imx214_pll_update(imx214);
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ dev_err(imx214->dev, "failed to update PLL\n");
+ return ret;
+ }
+
imx214->sd.ctrl_handler = ctrl_hdlr;
return 0;
@@ -1029,8 +1136,8 @@ static int imx214_start_streaming(struct imx214 *imx214)
return ret;
}
- bit_rate_mbps = (imx214->pll.pixel_rate_csi / 1000000)
- * imx214->pll.bits_per_pixel;
+ bit_rate_mbps = imx214->pll.pixel_rate_csi / 1000000
+ * imx214->pll.bits_per_pixel;
ret = cci_write(imx214->regmap, IMX214_REG_REQ_LINK_BIT_RATE,
IMX214_LINK_BIT_RATE_MBPS(bit_rate_mbps), NULL);
if (ret) {
@@ -1115,109 +1222,6 @@ err_rpm_put:
return ret;
}
-static int imx214_pll_calculate(struct imx214 *imx214, struct ccs_pll *pll,
- unsigned int link_freq)
-{
- struct ccs_pll_limits limits = {
- .min_ext_clk_freq_hz = 6000000,
- .max_ext_clk_freq_hz = 27000000,
-
- .vt_fr = {
- .min_pre_pll_clk_div = 1,
- .max_pre_pll_clk_div = 15,
- /* Value is educated guess as we don't have a spec */
- .min_pll_ip_clk_freq_hz = 6000000,
- /* Value is educated guess as we don't have a spec */
- .max_pll_ip_clk_freq_hz = 12000000,
- .min_pll_multiplier = 12,
- .max_pll_multiplier = 1200,
- .min_pll_op_clk_freq_hz = 338000000,
- .max_pll_op_clk_freq_hz = 1200000000,
- },
- .vt_bk = {
- .min_sys_clk_div = 2,
- .max_sys_clk_div = 4,
- .min_pix_clk_div = 5,
- .max_pix_clk_div = 10,
- .min_pix_clk_freq_hz = 30000000,
- .max_pix_clk_freq_hz = 120000000,
- },
- .op_bk = {
- .min_sys_clk_div = 1,
- .max_sys_clk_div = 2,
- .min_pix_clk_div = 6,
- .max_pix_clk_div = 10,
- .min_pix_clk_freq_hz = 30000000,
- .max_pix_clk_freq_hz = 120000000,
- },
-
- .min_line_length_pck_bin = IMX214_PPL_DEFAULT,
- .min_line_length_pck = IMX214_PPL_DEFAULT,
- };
- unsigned int num_lanes = imx214->bus_cfg.bus.mipi_csi2.num_data_lanes;
-
- /*
- * There are no documented constraints on the sys clock frequency, for
- * either branch. Recover them based on the PLL output clock frequency
- * and sys_clk_div limits on one hand, and the pix clock frequency and
- * the pix_clk_div limits on the other hand.
- */
- limits.vt_bk.min_sys_clk_freq_hz =
- max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.vt_bk.max_sys_clk_div,
- limits.vt_bk.min_pix_clk_freq_hz * limits.vt_bk.min_pix_clk_div);
- limits.vt_bk.max_sys_clk_freq_hz =
- min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.vt_bk.min_sys_clk_div,
- limits.vt_bk.max_pix_clk_freq_hz * limits.vt_bk.max_pix_clk_div);
-
- limits.op_bk.min_sys_clk_freq_hz =
- max(limits.vt_fr.min_pll_op_clk_freq_hz / limits.op_bk.max_sys_clk_div,
- limits.op_bk.min_pix_clk_freq_hz * limits.op_bk.min_pix_clk_div);
- limits.op_bk.max_sys_clk_freq_hz =
- min(limits.vt_fr.max_pll_op_clk_freq_hz / limits.op_bk.min_sys_clk_div,
- limits.op_bk.max_pix_clk_freq_hz * limits.op_bk.max_pix_clk_div);
-
- memset(pll, 0, sizeof(*pll));
-
- pll->bus_type = CCS_PLL_BUS_TYPE_CSI2_DPHY;
- pll->op_lanes = num_lanes;
- pll->vt_lanes = num_lanes;
- pll->csi2.lanes = num_lanes;
-
- pll->binning_horizontal = 1;
- pll->binning_vertical = 1;
- pll->scale_m = 1;
- pll->scale_n = 1;
- pll->bits_per_pixel =
- IMX214_CSI_DATA_FORMAT_RAW10 & IMX214_BITS_PER_PIXEL_MASK;
- pll->flags = CCS_PLL_FLAG_LANE_SPEED_MODEL;
- pll->link_freq = link_freq;
- pll->ext_clk_freq_hz = clk_get_rate(imx214->xclk);
-
- return ccs_pll_calculate(imx214->dev, &limits, pll);
-}
-
-static int imx214_pll_update(struct imx214 *imx214)
-{
- u64 link_freq;
- int ret;
-
- link_freq = imx214->bus_cfg.link_frequencies[imx214->link_freq->val];
- ret = imx214_pll_calculate(imx214, &imx214->pll, link_freq);
- if (ret) {
- dev_err(imx214->dev, "PLL calculations failed: %d\n", ret);
- return ret;
- }
-
- ret = v4l2_ctrl_s_ctrl_int64(imx214->pixel_rate,
- imx214->pll.pixel_rate_pixel_array);
- if (ret) {
- dev_err(imx214->dev, "failed to set pixel rate\n");
- return ret;
- }
-
- return 0;
-}
-
static int imx214_get_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_interval *fival)
@@ -1324,10 +1328,11 @@ static int imx214_identify_module(struct imx214 *imx214)
return 0;
}
-static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
+static int imx214_parse_fwnode(struct imx214 *imx214)
{
+ struct fwnode_handle *endpoint __free(fwnode_handle) = NULL;
struct v4l2_fwnode_endpoint *bus_cfg = &imx214->bus_cfg;
- struct fwnode_handle *endpoint;
+ struct device *dev = imx214->dev;
unsigned int i;
int ret;
@@ -1337,11 +1342,8 @@ static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
bus_cfg->bus_type = V4L2_MBUS_CSI2_DPHY;
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, bus_cfg);
- fwnode_handle_put(endpoint);
- if (ret) {
- dev_err_probe(dev, ret, "parsing endpoint node failed\n");
- goto error;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "parsing endpoint node failed\n");
/* Check the number of MIPI CSI2 data lanes */
if (bus_cfg->bus.mipi_csi2.num_data_lanes != 4) {
@@ -1357,18 +1359,16 @@ static int imx214_parse_fwnode(struct device *dev, struct imx214 *imx214)
u64 freq = bus_cfg->link_frequencies[i];
struct ccs_pll pll;
- if (!imx214_pll_calculate(imx214, &pll, freq))
- break;
if (freq == IMX214_DEFAULT_LINK_FREQ_LEGACY) {
dev_warn(dev,
"link-frequencies %d not supported, please review your DT. Continuing anyway\n",
IMX214_DEFAULT_LINK_FREQ);
freq = IMX214_DEFAULT_LINK_FREQ;
- if (imx214_pll_calculate(imx214, &pll, freq))
- continue;
bus_cfg->link_frequencies[i] = freq;
- break;
}
+
+ if (!imx214_pll_calculate(imx214, &pll, freq))
+ break;
}
if (i == bus_cfg->nr_of_link_frequencies)
@@ -1396,7 +1396,7 @@ static int imx214_probe(struct i2c_client *client)
imx214->dev = dev;
- imx214->xclk = devm_clk_get(dev, NULL);
+ imx214->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(imx214->xclk))
return dev_err_probe(dev, PTR_ERR(imx214->xclk),
"failed to get xclk\n");
@@ -1415,7 +1415,7 @@ static int imx214_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(imx214->regmap),
"failed to initialize CCI\n");
- ret = imx214_parse_fwnode(dev, imx214);
+ ret = imx214_parse_fwnode(imx214);
if (ret)
return ret;
@@ -1459,12 +1459,6 @@ static int imx214_probe(struct i2c_client *client)
pm_runtime_set_active(imx214->dev);
pm_runtime_enable(imx214->dev);
- ret = imx214_pll_update(imx214);
- if (ret < 0) {
- dev_err_probe(dev, ret, "failed to update PLL\n");
- goto error_subdev_cleanup;
- }
-
ret = v4l2_async_register_subdev_sensor(&imx214->sd);
if (ret < 0) {
dev_err_probe(dev, ret,
diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
index 3faf48f34af4..bc55fe2a93b4 100644
--- a/drivers/media/i2c/imx219.c
+++ b/drivers/media/i2c/imx219.c
@@ -68,6 +68,7 @@
#define IMX219_EXPOSURE_STEP 1
#define IMX219_EXPOSURE_DEFAULT 0x640
#define IMX219_EXPOSURE_MAX 65535
+#define IMX219_EXPOSURE_OFFSET 4
/* V_TIMING internal */
#define IMX219_REG_FRM_LENGTH_A CCI_REG16(0x0160)
@@ -409,24 +410,14 @@ static void imx219_get_binning(struct v4l2_subdev_state *state, u8 *bin_h,
u32 hbin = crop->width / format->width;
u32 vbin = crop->height / format->height;
- *bin_h = IMX219_BINNING_NONE;
- *bin_v = IMX219_BINNING_NONE;
-
- /*
- * Use analog binning only if both dimensions are binned, as it crops
- * the other dimension.
- */
if (hbin == 2 && vbin == 2) {
*bin_h = IMX219_BINNING_X2_ANALOG;
*bin_v = IMX219_BINNING_X2_ANALOG;
-
- return;
+ } else {
+ *bin_h = IMX219_BINNING_NONE;
+ *bin_v = IMX219_BINNING_NONE;
}
- if (hbin == 2)
- *bin_h = IMX219_BINNING_X2;
- if (vbin == 2)
- *bin_v = IMX219_BINNING_X2;
}
static inline u32 imx219_get_rate_factor(struct v4l2_subdev_state *state)
@@ -460,13 +451,17 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
int exposure_max, exposure_def;
/* Update max exposure while meeting expected vblanking */
- exposure_max = format->height + ctrl->val - 4;
+ exposure_max = format->height + ctrl->val - IMX219_EXPOSURE_OFFSET;
exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
- exposure_max : IMX219_EXPOSURE_DEFAULT;
- __v4l2_ctrl_modify_range(imx219->exposure,
- imx219->exposure->minimum,
- exposure_max, imx219->exposure->step,
- exposure_def);
+ exposure_max : IMX219_EXPOSURE_DEFAULT;
+ ret = __v4l2_ctrl_modify_range(imx219->exposure,
+ imx219->exposure->minimum,
+ exposure_max,
+ imx219->exposure->step,
+ exposure_def);
+ if (ret)
+ return ret;
+
}
/*
@@ -585,9 +580,9 @@ static int imx219_init_controls(struct imx219 *imx219)
IMX219_LLP_MIN - mode->width,
IMX219_LLP_MAX - mode->width, 1,
IMX219_LLP_MIN - mode->width);
- exposure_max = mode->fll_def - 4;
+ exposure_max = mode->fll_def - IMX219_EXPOSURE_OFFSET;
exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
- exposure_max : IMX219_EXPOSURE_DEFAULT;
+ exposure_max : IMX219_EXPOSURE_DEFAULT;
imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
V4L2_CID_EXPOSURE,
IMX219_EXPOSURE_MIN, exposure_max,
@@ -856,8 +851,9 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
const struct imx219_mode *mode;
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
- u8 bin_h, bin_v;
+ u8 bin_h, bin_v, binning;
u32 prev_line_len;
+ int ret;
format = v4l2_subdev_state_get_format(state, 0);
prev_line_len = format->width + imx219->hblank->val;
@@ -877,9 +873,12 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
bin_h = min(IMX219_PIXEL_ARRAY_WIDTH / format->width, 2U);
bin_v = min(IMX219_PIXEL_ARRAY_HEIGHT / format->height, 2U);
+ /* Ensure bin_h and bin_v are same to avoid 1:2 or 2:1 stretching */
+ binning = min(bin_h, bin_v);
+
crop = v4l2_subdev_state_get_crop(state, 0);
- crop->width = format->width * bin_h;
- crop->height = format->height * bin_v;
+ crop->width = format->width * binning;
+ crop->height = format->height * binning;
crop->left = (IMX219_NATIVE_WIDTH - crop->width) / 2;
crop->top = (IMX219_NATIVE_HEIGHT - crop->height) / 2;
@@ -890,19 +889,28 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
int pixel_rate;
/* Update limits and set FPS to default */
- __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
- IMX219_FLL_MAX - mode->height, 1,
+ ret = __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
+ IMX219_FLL_MAX - mode->height, 1,
+ mode->fll_def - mode->height);
+ if (ret)
+ return ret;
+
+ ret = __v4l2_ctrl_s_ctrl(imx219->vblank,
mode->fll_def - mode->height);
- __v4l2_ctrl_s_ctrl(imx219->vblank,
- mode->fll_def - mode->height);
+ if (ret)
+ return ret;
+
/* Update max exposure while meeting expected vblanking */
- exposure_max = mode->fll_def - 4;
+ exposure_max = mode->fll_def - IMX219_EXPOSURE_OFFSET;
exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
- exposure_max : IMX219_EXPOSURE_DEFAULT;
- __v4l2_ctrl_modify_range(imx219->exposure,
- imx219->exposure->minimum,
- exposure_max, imx219->exposure->step,
- exposure_def);
+ exposure_max : IMX219_EXPOSURE_DEFAULT;
+ ret = __v4l2_ctrl_modify_range(imx219->exposure,
+ imx219->exposure->minimum,
+ exposure_max,
+ imx219->exposure->step,
+ exposure_def);
+ if (ret)
+ return ret;
/*
* With analog binning the default minimum line length of 3448
@@ -913,9 +921,12 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
imx219_get_binning(state, &bin_h, &bin_v);
llp_min = (bin_h & bin_v) == IMX219_BINNING_X2_ANALOG ?
IMX219_BINNED_LLP_MIN : IMX219_LLP_MIN;
- __v4l2_ctrl_modify_range(imx219->hblank, llp_min - mode->width,
- IMX219_LLP_MAX - mode->width, 1,
- llp_min - mode->width);
+ ret = __v4l2_ctrl_modify_range(imx219->hblank,
+ llp_min - mode->width,
+ IMX219_LLP_MAX - mode->width, 1,
+ llp_min - mode->width);
+ if (ret)
+ return ret;
/*
* Retain PPL setting from previous mode so that the
* line time does not change on a mode change.
@@ -924,13 +935,17 @@ static int imx219_set_pad_format(struct v4l2_subdev *sd,
* mode width subtracted.
*/
hblank = prev_line_len - mode->width;
- __v4l2_ctrl_s_ctrl(imx219->hblank, hblank);
+ ret = __v4l2_ctrl_s_ctrl(imx219->hblank, hblank);
+ if (ret)
+ return ret;
/* Scale the pixel rate based on the mode specific factor */
pixel_rate = imx219_get_pixel_rate(imx219) *
imx219_get_rate_factor(state);
- __v4l2_ctrl_modify_range(imx219->pixel_rate, pixel_rate,
- pixel_rate, 1, pixel_rate);
+ ret = __v4l2_ctrl_modify_range(imx219->pixel_rate, pixel_rate,
+ pixel_rate, 1, pixel_rate);
+ if (ret)
+ return ret;
}
return 0;
@@ -979,9 +994,7 @@ static int imx219_init_state(struct v4l2_subdev *sd,
},
};
- imx219_set_pad_format(sd, state, &fmt);
-
- return 0;
+ return imx219_set_pad_format(sd, state, &fmt);
}
static const struct v4l2_subdev_video_ops imx219_video_ops = {
@@ -1032,6 +1045,10 @@ static int imx219_power_on(struct device *dev)
goto reg_off;
}
+ /*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
+ */
gpiod_set_value_cansleep(imx219->reset_gpio, 1);
usleep_range(IMX219_XCLR_MIN_DELAY_US,
IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
@@ -1186,7 +1203,7 @@ static int imx219_probe(struct i2c_client *client)
"failed to initialize CCI\n");
/* Get system clock (xclk) */
- imx219->xclk = devm_clk_get(dev, NULL);
+ imx219->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(imx219->xclk))
return dev_err_probe(dev, PTR_ERR(imx219->xclk),
"failed to get xclk\n");
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index 9e30fce1f223..e50dcfd830f5 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -8,11 +8,12 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
-#include <linux/unaligned.h>
#define IMX258_REG_MODE_SELECT CCI_REG8(0x0100)
#define IMX258_MODE_STANDBY 0x00
@@ -645,6 +646,8 @@ static const struct imx258_mode supported_modes[] = {
};
struct imx258 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct regmap *regmap;
@@ -751,7 +754,6 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx258 *imx258 =
container_of(ctrl->handler, struct imx258, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret = 0;
/*
@@ -765,7 +767,7 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (pm_runtime_get_if_in_use(&client->dev) == 0)
+ if (pm_runtime_get_if_in_use(imx258->dev) == 0)
return 0;
switch (ctrl->id) {
@@ -811,14 +813,14 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
NULL);
break;
default:
- dev_info(&client->dev,
+ dev_info(imx258->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
ret = -EINVAL;
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
return ret;
}
@@ -1013,14 +1015,13 @@ static int imx258_get_selection(struct v4l2_subdev *sd,
/* Start streaming */
static int imx258_start_streaming(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
const struct imx258_reg_list *reg_list;
const struct imx258_link_freq_config *link_freq_cfg;
int ret, link_freq_index;
ret = cci_write(imx258->regmap, IMX258_REG_RESET, 0x01, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to reset sensor\n", __func__);
+ dev_err(imx258->dev, "%s failed to reset sensor\n", __func__);
return ret;
}
@@ -1034,21 +1035,21 @@ static int imx258_start_streaming(struct imx258 *imx258)
reg_list = &link_freq_cfg->link_cfg[imx258->lane_mode_idx].reg_list;
ret = cci_multi_reg_write(imx258->regmap, reg_list->regs, reg_list->num_of_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(imx258->dev, "%s failed to set plls\n", __func__);
return ret;
}
ret = cci_multi_reg_write(imx258->regmap, mode_common_regs,
ARRAY_SIZE(mode_common_regs), NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set common regs\n", __func__);
+ dev_err(imx258->dev, "%s failed to set common regs\n", __func__);
return ret;
}
ret = cci_multi_reg_write(imx258->regmap, imx258->variant_cfg->regs,
imx258->variant_cfg->num_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set variant config\n",
+ dev_err(imx258->dev, "%s failed to set variant config\n",
__func__);
return ret;
}
@@ -1057,7 +1058,7 @@ static int imx258_start_streaming(struct imx258 *imx258)
!!(imx258->csi2_flags & V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK),
NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set clock lane mode\n", __func__);
+ dev_err(imx258->dev, "%s failed to set clock lane mode\n", __func__);
return ret;
}
@@ -1065,7 +1066,7 @@ static int imx258_start_streaming(struct imx258 *imx258)
reg_list = &imx258->cur_mode->reg_list;
ret = cci_multi_reg_write(imx258->regmap, reg_list->regs, reg_list->num_of_regs, NULL);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(imx258->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1082,14 +1083,13 @@ static int imx258_start_streaming(struct imx258 *imx258)
/* Stop streaming */
static int imx258_stop_streaming(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret;
/* set stream off register */
ret = cci_write(imx258->regmap, IMX258_REG_MODE_SELECT,
IMX258_MODE_STANDBY, NULL);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(imx258->dev, "%s failed to set stream\n", __func__);
/*
* Return success even if it was an error, as there is nothing the
@@ -1135,13 +1135,12 @@ static int imx258_power_off(struct device *dev)
static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx258 *imx258 = to_imx258(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx258->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx258->dev);
if (ret < 0)
goto err_unlock;
@@ -1154,7 +1153,7 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx258_stop_streaming(imx258);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
}
mutex_unlock(&imx258->mutex);
@@ -1162,7 +1161,7 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx258->dev);
err_unlock:
mutex_unlock(&imx258->mutex);
@@ -1172,20 +1171,19 @@ err_unlock:
/* Verify chip ID */
static int imx258_identify_module(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
int ret;
u64 val;
ret = cci_read(imx258->regmap, IMX258_REG_CHIP_ID,
&val, NULL);
if (ret) {
- dev_err(&client->dev, "failed to read chip id %x\n",
+ dev_err(imx258->dev, "failed to read chip id %x\n",
IMX258_CHIP_ID);
return ret;
}
if (val != IMX258_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
+ dev_err(imx258->dev, "chip id mismatch: %x!=%llx\n",
IMX258_CHIP_ID, val);
return -EIO;
}
@@ -1217,7 +1215,6 @@ static const struct v4l2_subdev_internal_ops imx258_internal_ops = {
/* Initialize control handlers */
static int imx258_init_controls(struct imx258 *imx258)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
const struct imx258_link_freq_config *link_freq_cfgs;
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
@@ -1308,12 +1305,12 @@ static int imx258_init_controls(struct imx258 *imx258)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(imx258->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(imx258->dev, &props);
if (ret)
goto error;
@@ -1339,15 +1336,14 @@ static void imx258_free_controls(struct imx258 *imx258)
mutex_destroy(&imx258->mutex);
}
-static int imx258_get_regulators(struct imx258 *imx258,
- struct i2c_client *client)
+static int imx258_get_regulators(struct imx258 *imx258)
{
unsigned int i;
for (i = 0; i < IMX258_NUM_SUPPLIES; i++)
imx258->supplies[i].supply = imx258_supply_name[i];
- return devm_regulator_bulk_get(&client->dev,
+ return devm_regulator_bulk_get(imx258->dev,
IMX258_NUM_SUPPLIES, imx258->supplies);
}
@@ -1365,30 +1361,27 @@ static int imx258_probe(struct i2c_client *client)
if (!imx258)
return -ENOMEM;
+ imx258->dev = &client->dev;
+
imx258->regmap = devm_cci_regmap_init_i2c(client, 16);
if (IS_ERR(imx258->regmap)) {
ret = PTR_ERR(imx258->regmap);
- dev_err(&client->dev, "failed to initialize CCI: %d\n", ret);
+ dev_err(imx258->dev, "failed to initialize CCI: %d\n", ret);
return ret;
}
- ret = imx258_get_regulators(imx258, client);
+ ret = imx258_get_regulators(imx258);
if (ret)
- return dev_err_probe(&client->dev, ret,
+ return dev_err_probe(imx258->dev, ret,
"failed to get regulators\n");
- imx258->clk = devm_clk_get_optional(&client->dev, NULL);
+ imx258->clk = devm_v4l2_sensor_clk_get_legacy(imx258->dev, NULL, false,
+ 0);
if (IS_ERR(imx258->clk))
- return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
+ return dev_err_probe(imx258->dev, PTR_ERR(imx258->clk),
"error getting clock\n");
- if (!imx258->clk) {
- dev_dbg(&client->dev,
- "no clock provided, using clock-frequency property\n");
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- } else {
- val = clk_get_rate(imx258->clk);
- }
+ val = clk_get_rate(imx258->clk);
switch (val) {
case 19200000:
@@ -1400,32 +1393,32 @@ static int imx258_probe(struct i2c_client *client)
imx258->link_freq_menu_items = link_freq_menu_items_24;
break;
default:
- dev_err(&client->dev, "input clock frequency of %u not supported\n",
+ dev_err(imx258->dev, "input clock frequency of %u not supported\n",
val);
return -EINVAL;
}
- endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ endpoint = fwnode_graph_get_next_endpoint(dev_fwnode(imx258->dev), NULL);
if (!endpoint) {
- dev_err(&client->dev, "Endpoint node not found\n");
+ dev_err(imx258->dev, "Endpoint node not found\n");
return -EINVAL;
}
ret = v4l2_fwnode_endpoint_alloc_parse(endpoint, &ep);
fwnode_handle_put(endpoint);
if (ret) {
- dev_err(&client->dev, "Parsing endpoint node failed\n");
+ dev_err(imx258->dev, "Parsing endpoint node failed\n");
return ret;
}
- ret = v4l2_link_freq_to_bitmap(&client->dev,
+ ret = v4l2_link_freq_to_bitmap(imx258->dev,
ep.link_frequencies,
ep.nr_of_link_frequencies,
imx258->link_freq_menu_items,
ARRAY_SIZE(link_freq_menu_items_19_2),
&imx258->link_freq_bitmap);
if (ret) {
- dev_err(&client->dev, "Link frequency not supported\n");
+ dev_err(imx258->dev, "Link frequency not supported\n");
goto error_endpoint_free;
}
@@ -1438,7 +1431,7 @@ static int imx258_probe(struct i2c_client *client)
imx258->lane_mode_idx = IMX258_4_LANE_MODE;
break;
default:
- dev_err(&client->dev, "Invalid data lanes: %u\n",
+ dev_err(imx258->dev, "Invalid data lanes: %u\n",
ep.bus.mipi_csi2.num_data_lanes);
ret = -EINVAL;
goto error_endpoint_free;
@@ -1446,7 +1439,7 @@ static int imx258_probe(struct i2c_client *client)
imx258->csi2_flags = ep.bus.mipi_csi2.flags;
- imx258->variant_cfg = device_get_match_data(&client->dev);
+ imx258->variant_cfg = device_get_match_data(imx258->dev);
if (!imx258->variant_cfg)
imx258->variant_cfg = &imx258_cfg;
@@ -1454,7 +1447,7 @@ static int imx258_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&imx258->sd, client, &imx258_subdev_ops);
/* Will be powered off via pm_runtime_idle */
- ret = imx258_power_on(&client->dev);
+ ret = imx258_power_on(imx258->dev);
if (ret)
goto error_endpoint_free;
@@ -1486,9 +1479,9 @@ static int imx258_probe(struct i2c_client *client)
if (ret < 0)
goto error_media_entity;
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx258->dev);
+ pm_runtime_enable(imx258->dev);
+ pm_runtime_idle(imx258->dev);
v4l2_fwnode_endpoint_free(&ep);
return 0;
@@ -1500,7 +1493,7 @@ error_handler_free:
imx258_free_controls(imx258);
error_identify:
- imx258_power_off(&client->dev);
+ imx258_power_off(imx258->dev);
error_endpoint_free:
v4l2_fwnode_endpoint_free(&ep);
@@ -1517,10 +1510,10 @@ static void imx258_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
imx258_free_controls(imx258);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- imx258_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx258->dev);
+ if (!pm_runtime_status_suspended(imx258->dev))
+ imx258_power_off(imx258->dev);
+ pm_runtime_set_suspended(imx258->dev);
}
static const struct dev_pm_ops imx258_pm_ops = {
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index a2b824986027..8ec78b60bea6 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -826,6 +826,8 @@ static int imx274_start_stream(struct stimx274 *priv)
* if rst = 0, keep it in reset;
* if rst = 1, bring it out of reset.
*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
*/
static void imx274_reset(struct stimx274 *priv, int rst)
{
@@ -2032,8 +2034,7 @@ static int imx274_probe(struct i2c_client *client)
/* initialize regmap */
imx274->regmap = devm_regmap_init_i2c(client, &imx274_regmap_config);
if (IS_ERR(imx274->regmap)) {
- dev_err(dev,
- "regmap init failed: %ld\n", PTR_ERR(imx274->regmap));
+ dev_err(dev, "regmap init failed: %pe\n", imx274->regmap);
ret = -ENODEV;
goto err_regmap;
}
diff --git a/drivers/media/i2c/imx283.c b/drivers/media/i2c/imx283.c
index 67e8bb432d10..8ab63ad8f385 100644
--- a/drivers/media/i2c/imx283.c
+++ b/drivers/media/i2c/imx283.c
@@ -1460,11 +1460,10 @@ static int imx283_probe(struct i2c_client *client)
}
/* Get system clock (xclk) */
- imx283->xclk = devm_clk_get(imx283->dev, NULL);
- if (IS_ERR(imx283->xclk)) {
+ imx283->xclk = devm_v4l2_sensor_clk_get(imx283->dev, NULL);
+ if (IS_ERR(imx283->xclk))
return dev_err_probe(imx283->dev, PTR_ERR(imx283->xclk),
"failed to get xclk\n");
- }
xclk_freq = clk_get_rate(imx283->xclk);
for (i = 0; i < ARRAY_SIZE(imx283_frequencies); i++) {
diff --git a/drivers/media/i2c/imx290.c b/drivers/media/i2c/imx290.c
index ec172556612e..21cbc81cb2ed 100644
--- a/drivers/media/i2c/imx290.c
+++ b/drivers/media/i2c/imx290.c
@@ -1422,14 +1422,14 @@ static int imx290_get_regulators(struct device *dev, struct imx290 *imx290)
static int imx290_init_clk(struct imx290 *imx290)
{
u32 xclk_freq;
- int ret;
- ret = device_property_read_u32(imx290->dev, "clock-frequency",
- &xclk_freq);
- if (ret) {
- dev_err(imx290->dev, "Could not get xclk frequency\n");
- return ret;
- }
+ imx290->xclk = devm_v4l2_sensor_clk_get_legacy(imx290->dev, "xclk",
+ false, 0);
+ if (IS_ERR(imx290->xclk))
+ return dev_err_probe(imx290->dev, PTR_ERR(imx290->xclk),
+ "Could not get xclk\n");
+
+ xclk_freq = clk_get_rate(imx290->xclk);
/* external clock must be 37.125 MHz or 74.25MHz */
switch (xclk_freq) {
@@ -1445,12 +1445,6 @@ static int imx290_init_clk(struct imx290 *imx290)
return -EINVAL;
}
- ret = clk_set_rate(imx290->xclk, xclk_freq);
- if (ret) {
- dev_err(imx290->dev, "Could not set xclk frequency\n");
- return ret;
- }
-
return 0;
}
@@ -1596,11 +1590,6 @@ static int imx290_probe(struct i2c_client *client)
return ret;
/* Acquire resources. */
- imx290->xclk = devm_clk_get(dev, "xclk");
- if (IS_ERR(imx290->xclk))
- return dev_err_probe(dev, PTR_ERR(imx290->xclk),
- "Could not get xclk\n");
-
ret = imx290_get_regulators(dev, imx290);
if (ret < 0)
return dev_err_probe(dev, ret, "Cannot get regulators\n");
@@ -1611,7 +1600,7 @@ static int imx290_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(imx290->rst_gpio),
"Cannot get reset gpio\n");
- /* Initialize external clock frequency. */
+ /* Initialize external clock. */
ret = imx290_init_clk(imx290);
if (ret)
return ret;
diff --git a/drivers/media/i2c/imx296.c b/drivers/media/i2c/imx296.c
index 61116f4e3f76..69636db11a2b 100644
--- a/drivers/media/i2c/imx296.c
+++ b/drivers/media/i2c/imx296.c
@@ -921,7 +921,7 @@ static int imx296_read_temperature(struct imx296 *sensor, int *temp)
tmdout &= IMX296_TMDOUT_MASK;
- /* T(°C) = 246.312 - 0.304 * TMDOUT */;
+ /* T(°C) = 246.312 - 0.304 * TMDOUT */
*temp = 246312 - 304 * tmdout;
return imx296_write(sensor, IMX296_TMDCTRL, 0, NULL);
@@ -1043,7 +1043,7 @@ static int imx296_probe(struct i2c_client *client)
return dev_err_probe(sensor->dev, PTR_ERR(sensor->reset),
"failed to get reset GPIO\n");
- sensor->clk = devm_clk_get(sensor->dev, "inck");
+ sensor->clk = devm_v4l2_sensor_clk_get(sensor->dev, "inck");
if (IS_ERR(sensor->clk))
return dev_err_probe(sensor->dev, PTR_ERR(sensor->clk),
"failed to get clock\n");
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
index 701840f4a5cc..953310ef3046 100644
--- a/drivers/media/i2c/imx319.c
+++ b/drivers/media/i2c/imx319.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -106,11 +108,12 @@ struct imx319_mode {
};
struct imx319_hwcfg {
- u32 ext_clk; /* sensor external clk */
unsigned long link_freq_bitmap;
};
struct imx319 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1839,14 +1842,13 @@ static int imx319_write_reg(struct imx319 *imx319, u16 reg, u32 len, u32 val)
static int imx319_write_regs(struct imx319 *imx319,
const struct imx319_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
int ret;
u32 i;
for (i = 0; i < len; i++) {
ret = imx319_write_reg(imx319, regs[i].address, 1, regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx319->dev,
"write reg 0x%4.4x return err %d",
regs[i].address, ret);
return ret;
@@ -1880,7 +1882,6 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx319 *imx319 = container_of(ctrl->handler,
struct imx319, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
s64 max;
int ret;
@@ -1899,7 +1900,7 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx319->dev))
return 0;
switch (ctrl->id) {
@@ -1933,12 +1934,12 @@ static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
+ dev_info(imx319->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
return ret;
}
@@ -2087,7 +2088,6 @@ imx319_set_pad_format(struct v4l2_subdev *sd,
/* Verify chip ID */
static int imx319_identify_module(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
int ret;
u32 val;
@@ -2099,7 +2099,7 @@ static int imx319_identify_module(struct imx319 *imx319)
return ret;
if (val != IMX319_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx319->dev, "chip id mismatch: %x!=%x",
IMX319_CHIP_ID, val);
return -EIO;
}
@@ -2112,7 +2112,6 @@ static int imx319_identify_module(struct imx319 *imx319)
/* Start streaming */
static int imx319_start_streaming(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
const struct imx319_reg_list *reg_list;
int ret;
@@ -2124,7 +2123,7 @@ static int imx319_start_streaming(struct imx319 *imx319)
reg_list = &imx319_global_setting;
ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set global settings");
+ dev_err(imx319->dev, "failed to set global settings");
return ret;
}
@@ -2132,7 +2131,7 @@ static int imx319_start_streaming(struct imx319 *imx319)
reg_list = &imx319->cur_mode->reg_list;
ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(imx319->dev, "failed to set mode");
return ret;
}
@@ -2160,13 +2159,12 @@ static int imx319_stop_streaming(struct imx319 *imx319)
static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx319 *imx319 = to_imx319(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx319->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx319->dev);
if (ret < 0)
goto err_unlock;
@@ -2179,7 +2177,7 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx319_stop_streaming(imx319);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -2191,7 +2189,7 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx319->dev);
err_unlock:
mutex_unlock(&imx319->mutex);
@@ -2231,7 +2229,6 @@ static const struct v4l2_subdev_internal_ops imx319_internal_ops = {
/* Initialize control handlers */
static int imx319_init_controls(struct imx319 *imx319)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
s64 vblank_def;
@@ -2311,7 +2308,7 @@ static int imx319_init_controls(struct imx319 *imx319)
0, 0, imx319_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "control init failed: %d", ret);
+ dev_err(imx319->dev, "control init failed: %d", ret);
goto error;
}
@@ -2350,20 +2347,6 @@ static struct imx319_hwcfg *imx319_get_hwcfg(struct device *dev)
if (!cfg)
goto out_err;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &cfg->ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- goto out_err;
- }
-
- dev_dbg(dev, "ext clk: %d", cfg->ext_clk);
- if (cfg->ext_clk != IMX319_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- cfg->ext_clk);
- goto out_err;
- }
-
ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
bus_cfg.nr_of_link_frequencies,
link_freq_menu_items,
@@ -2385,6 +2368,8 @@ out_err:
static int imx319_probe(struct i2c_client *client)
{
struct imx319 *imx319;
+ unsigned long freq;
+ struct clk *clk;
bool full_power;
int ret;
@@ -2392,24 +2377,37 @@ static int imx319_probe(struct i2c_client *client)
if (!imx319)
return -ENOMEM;
+ imx319->dev = &client->dev;
+
mutex_init(&imx319->mutex);
+ clk = devm_v4l2_sensor_clk_get(imx319->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(imx319->dev, PTR_ERR(clk),
+ "failed to acquire clock\n");
+
+ freq = clk_get_rate(clk);
+ if (freq != IMX319_EXT_CLK)
+ return dev_err_probe(imx319->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx319->sd, client, &imx319_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(imx319->dev);
if (full_power) {
/* Check module identity */
ret = imx319_identify_module(imx319);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx319->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
}
- imx319->hwcfg = imx319_get_hwcfg(&client->dev);
+ imx319->hwcfg = imx319_get_hwcfg(imx319->dev);
if (!imx319->hwcfg) {
- dev_err(&client->dev, "failed to get hwcfg");
+ dev_err(imx319->dev, "failed to get hwcfg");
ret = -ENODEV;
goto error_probe;
}
@@ -2419,7 +2417,7 @@ static int imx319_probe(struct i2c_client *client)
ret = imx319_init_controls(imx319);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx319->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -2434,27 +2432,27 @@ static int imx319_probe(struct i2c_client *client)
imx319->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx319->sd.entity, 1, &imx319->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx319->dev, "failed to init entity pads: %d", ret);
goto error_handler_free;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(imx319->dev);
+ pm_runtime_enable(imx319->dev);
ret = v4l2_async_register_subdev_sensor(&imx319->sd);
if (ret < 0)
goto error_media_entity_pm;
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(imx319->dev);
return 0;
error_media_entity_pm:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(imx319->dev);
if (full_power)
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(imx319->dev);
media_entity_cleanup(&imx319->sd.entity);
error_handler_free:
@@ -2475,9 +2473,9 @@ static void imx319_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx319->dev);
+ if (!pm_runtime_status_suspended(imx319->dev))
+ pm_runtime_set_suspended(imx319->dev);
mutex_destroy(&imx319->mutex);
}
diff --git a/drivers/media/i2c/imx334.c b/drivers/media/i2c/imx334.c
index 846b9928d4e8..9654f9268056 100644
--- a/drivers/media/i2c/imx334.c
+++ b/drivers/media/i2c/imx334.c
@@ -118,6 +118,9 @@
#define IMX334_REG_TP CCI_REG8(0x329e)
#define IMX334_TP_COLOR_HBARS 0xa
#define IMX334_TP_COLOR_VBARS 0xb
+#define IMX334_TP_BLACK 0x0
+#define IMX334_TP_WHITE 0x1
+#define IMX334_TP_BLACK_GREY 0xc
#define IMX334_TPG_EN_DOUT CCI_REG8(0x329c)
#define IMX334_TP_ENABLE 0x1
@@ -398,12 +401,18 @@ static const char * const imx334_test_pattern_menu[] = {
"Disabled",
"Vertical Color Bars",
"Horizontal Color Bars",
+ "Black and Grey Bars",
+ "Black Color",
+ "White Color",
};
static const int imx334_test_pattern_val[] = {
IMX334_TP_DISABLE,
IMX334_TP_COLOR_HBARS,
IMX334_TP_COLOR_VBARS,
+ IMX334_TP_BLACK_GREY,
+ IMX334_TP_BLACK,
+ IMX334_TP_WHITE,
};
static const struct cci_reg_sequence raw10_framefmt_regs[] = {
@@ -997,7 +1006,7 @@ static int imx334_parse_hw_config(struct imx334 *imx334)
"failed to get reset gpio\n");
/* Get sensor input clock */
- imx334->inclk = devm_clk_get(imx334->dev, NULL);
+ imx334->inclk = devm_v4l2_sensor_clk_get(imx334->dev, NULL);
if (IS_ERR(imx334->inclk))
return dev_err_probe(imx334->dev, PTR_ERR(imx334->inclk),
"could not get inclk\n");
@@ -1070,6 +1079,10 @@ static int imx334_power_on(struct device *dev)
struct imx334 *imx334 = to_imx334(sd);
int ret;
+ /*
+ * Note: Misinterpretation of reset assertion - do not re-use this code.
+ * XCLR pin is using incorrect (for reset signal) logical level.
+ */
gpiod_set_value_cansleep(imx334->reset_gpio, 1);
ret = clk_prepare_enable(imx334->inclk);
diff --git a/drivers/media/i2c/imx335.c b/drivers/media/i2c/imx335.c
index 9b4db4cd4929..5790aa4fabeb 100644
--- a/drivers/media/i2c/imx335.c
+++ b/drivers/media/i2c/imx335.c
@@ -35,6 +35,7 @@
/* Lines per frame */
#define IMX335_REG_VMAX CCI_REG24_LE(0x3030)
+#define IMX335_REG_HMAX CCI_REG16_LE(0x3034)
#define IMX335_REG_OPB_SIZE_V CCI_REG8(0x304c)
#define IMX335_REG_ADBIT CCI_REG8(0x3050)
@@ -42,10 +43,13 @@
#define IMX335_REG_SHUTTER CCI_REG24_LE(0x3058)
#define IMX335_EXPOSURE_MIN 1
-#define IMX335_EXPOSURE_OFFSET 9
+#define IMX335_SHUTTER_MIN 9
+#define IMX335_SHUTTER_MIN_BINNED 17
#define IMX335_EXPOSURE_STEP 1
#define IMX335_EXPOSURE_DEFAULT 0x0648
+#define IMX335_REG_AREA2_WIDTH_1 CCI_REG16_LE(0x3072)
+
#define IMX335_REG_AREA3_ST_ADR_1 CCI_REG16_LE(0x3074)
#define IMX335_REG_AREA3_WIDTH_1 CCI_REG16_LE(0x3076)
@@ -56,6 +60,9 @@
#define IMX335_AGAIN_STEP 1
#define IMX335_AGAIN_DEFAULT 0
+/* Vertical flip */
+#define IMX335_REG_VREVERSE CCI_REG8(0x304f)
+
#define IMX335_REG_TPG_TESTCLKEN CCI_REG8(0x3148)
#define IMX335_REG_INCLKSEL1 CCI_REG16_LE(0x314c)
@@ -121,12 +128,19 @@
#define IMX335_NUM_DATA_LANES 4
/* IMX335 native and active pixel array size. */
-#define IMX335_NATIVE_WIDTH 2616U
-#define IMX335_NATIVE_HEIGHT 1964U
-#define IMX335_PIXEL_ARRAY_LEFT 12U
-#define IMX335_PIXEL_ARRAY_TOP 12U
-#define IMX335_PIXEL_ARRAY_WIDTH 2592U
-#define IMX335_PIXEL_ARRAY_HEIGHT 1944U
+static const struct v4l2_rect imx335_native_area = {
+ .top = 0,
+ .left = 0,
+ .width = 2696,
+ .height = 2044,
+};
+
+static const struct v4l2_rect imx335_active_area = {
+ .top = 50,
+ .left = 36,
+ .width = 2624,
+ .height = 1944,
+};
/**
* struct imx335_reg_list - imx335 sensor register list
@@ -144,8 +158,14 @@ static const char * const imx335_supply_name[] = {
"dvdd", /* Digital Core (1.2V) supply */
};
+enum imx335_scan_mode {
+ IMX335_ALL_PIXEL,
+ IMX335_2_2_BINNING,
+};
+
/**
* struct imx335_mode - imx335 sensor mode structure
+ * @scan_mode: Configuration scan mode (All pixel / 2x2Binning)
* @width: Frame width
* @height: Frame height
* @code: Format code
@@ -155,8 +175,11 @@ static const char * const imx335_supply_name[] = {
* @vblank_max: Maximum vertical blanking in lines
* @pclk: Sensor pixel clock
* @reg_list: Register list for sensor mode
+ * @vflip_normal: Register list vflip (normal readout)
+ * @vflip_inverted: Register list vflip (inverted readout)
*/
struct imx335_mode {
+ enum imx335_scan_mode scan_mode;
u32 width;
u32 height;
u32 code;
@@ -166,6 +189,8 @@ struct imx335_mode {
u32 vblank_max;
u64 pclk;
struct imx335_reg_list reg_list;
+ struct imx335_reg_list vflip_normal;
+ struct imx335_reg_list vflip_inverted;
};
/**
@@ -183,12 +208,12 @@ struct imx335_mode {
* @pclk_ctrl: Pointer to pixel clock control
* @hblank_ctrl: Pointer to horizontal blanking control
* @vblank_ctrl: Pointer to vertical blanking control
+ * @vflip: Pointer to vertical flip control
* @exp_ctrl: Pointer to exposure control
* @again_ctrl: Pointer to analog gain control
* @vblank: Vertical blanking in lines
* @lane_mode: Mode for number of connected data lanes
* @cur_mode: Pointer to current selected sensor mode
- * @mutex: Mutex for serializing sensor controls
* @link_freq_bitmap: Menu bitmap for link_freq_ctrl
* @cur_mbus_code: Currently selected media bus format code
*/
@@ -207,6 +232,7 @@ struct imx335 {
struct v4l2_ctrl *pclk_ctrl;
struct v4l2_ctrl *hblank_ctrl;
struct v4l2_ctrl *vblank_ctrl;
+ struct v4l2_ctrl *vflip;
struct {
struct v4l2_ctrl *exp_ctrl;
struct v4l2_ctrl *again_ctrl;
@@ -214,7 +240,6 @@ struct imx335 {
u32 vblank;
u32 lane_mode;
const struct imx335_mode *cur_mode;
- struct mutex mutex;
unsigned long link_freq_bitmap;
u32 cur_mbus_code;
};
@@ -252,17 +277,37 @@ static const int imx335_tpg_val[] = {
};
/* Sensor mode registers */
-static const struct cci_reg_sequence mode_2592x1940_regs[] = {
+static const struct cci_reg_sequence mode_2592x1944_regs[] = {
{ IMX335_REG_MODE_SELECT, IMX335_MODE_STANDBY },
{ IMX335_REG_MASTER_MODE, 0x00 },
{ IMX335_REG_WINMODE, 0x04 },
+ { IMX335_REG_HMAX, 550 },
{ IMX335_REG_HTRIMMING_START, 48 },
{ IMX335_REG_HNUM, 2592 },
{ IMX335_REG_Y_OUT_SIZE, 1944 },
- { IMX335_REG_AREA3_ST_ADR_1, 176 },
+ { IMX335_REG_AREA2_WIDTH_1, 40 },
{ IMX335_REG_AREA3_WIDTH_1, 3928 },
{ IMX335_REG_OPB_SIZE_V, 0 },
{ IMX335_REG_XVS_XHS_DRV, 0x00 },
+};
+
+static const struct cci_reg_sequence mode_1312x972_regs[] = {
+ { IMX335_REG_MODE_SELECT, IMX335_MODE_STANDBY },
+ { IMX335_REG_MASTER_MODE, 0x00 },
+ { IMX335_REG_WINMODE, 0x01 },
+ { IMX335_REG_HMAX, 275 },
+ { IMX335_REG_HTRIMMING_START, 48 },
+ { IMX335_REG_HNUM, 2600 },
+ { IMX335_REG_Y_OUT_SIZE, 972 },
+ { IMX335_REG_AREA2_WIDTH_1, 48 },
+ { IMX335_REG_AREA3_WIDTH_1, 3936 },
+ { IMX335_REG_OPB_SIZE_V, 0 },
+ { IMX335_REG_XVS_XHS_DRV, 0x00 },
+ { CCI_REG8(0x3300), 1 }, /* TCYCLE */
+ { CCI_REG8(0x3199), 0x30 }, /* HADD/VADD */
+};
+
+static const struct cci_reg_sequence imx335_common_regs[] = {
{ CCI_REG8(0x3288), 0x21 },
{ CCI_REG8(0x328a), 0x02 },
{ CCI_REG8(0x3414), 0x05 },
@@ -333,16 +378,92 @@ static const struct cci_reg_sequence mode_2592x1940_regs[] = {
{ CCI_REG8(0x3a00), 0x00 },
};
-static const struct cci_reg_sequence raw10_framefmt_regs[] = {
- { IMX335_REG_ADBIT, 0x00 },
- { IMX335_REG_MDBIT, 0x00 },
- { IMX335_REG_ADBIT1, 0x1ff },
+static const struct cci_reg_sequence mode_2592x1944_vflip_normal[] = {
+ { IMX335_REG_AREA3_ST_ADR_1, 176 },
+
+ /* Undocumented V-Flip related registers on Page 55 of datasheet. */
+ { CCI_REG8(0x3081), 0x02, },
+ { CCI_REG8(0x3083), 0x02, },
+ { CCI_REG16_LE(0x30b6), 0x00 },
+ { CCI_REG16_LE(0x3116), 0x08 },
};
-static const struct cci_reg_sequence raw12_framefmt_regs[] = {
- { IMX335_REG_ADBIT, 0x01 },
- { IMX335_REG_MDBIT, 0x01 },
- { IMX335_REG_ADBIT1, 0x47 },
+static const struct cci_reg_sequence mode_2592x1944_vflip_inverted[] = {
+ { IMX335_REG_AREA3_ST_ADR_1, 4112 },
+
+ /* Undocumented V-Flip related registers on Page 55 of datasheet. */
+ { CCI_REG8(0x3081), 0xfe, },
+ { CCI_REG8(0x3083), 0xfe, },
+ { CCI_REG16_LE(0x30b6), 0x1fa },
+ { CCI_REG16_LE(0x3116), 0x002 },
+};
+
+static const struct cci_reg_sequence mode_1312x972_vflip_normal[] = {
+ { IMX335_REG_AREA3_ST_ADR_1, 176 },
+
+ /* Undocumented */
+ { CCI_REG8(0x3078), 0x04 },
+ { CCI_REG8(0x3079), 0xfd },
+ { CCI_REG8(0x307a), 0x04 },
+ { CCI_REG8(0x307b), 0xfe },
+ { CCI_REG8(0x307c), 0x04 },
+ { CCI_REG8(0x307d), 0xfb },
+ { CCI_REG8(0x307e), 0x04 },
+ { CCI_REG8(0x307f), 0x02 },
+ { CCI_REG8(0x3080), 0x04 },
+ { CCI_REG8(0x3081), 0xfd },
+ { CCI_REG8(0x3082), 0x04 },
+ { CCI_REG8(0x3083), 0xfe },
+ { CCI_REG8(0x3084), 0x04 },
+ { CCI_REG8(0x3085), 0xfb },
+ { CCI_REG8(0x3086), 0x04 },
+ { CCI_REG8(0x3087), 0x02 },
+ { CCI_REG8(0x30a4), 0x77 },
+ { CCI_REG8(0x30a8), 0x20 },
+ { CCI_REG8(0x30a9), 0x00 },
+ { CCI_REG8(0x30ac), 0x08 },
+ { CCI_REG8(0x30ad), 0x08 },
+ { CCI_REG8(0x30b0), 0x20 },
+ { CCI_REG8(0x30b1), 0x00 },
+ { CCI_REG8(0x30b4), 0x10 },
+ { CCI_REG8(0x30b5), 0x10 },
+ { CCI_REG16_LE(0x30b6), 0x00 },
+ { CCI_REG16_LE(0x3112), 0x10 },
+ { CCI_REG16_LE(0x3116), 0x10 },
+};
+
+static const struct cci_reg_sequence mode_1312x972_vflip_inverted[] = {
+ { IMX335_REG_AREA3_ST_ADR_1, 4112 },
+
+ /* Undocumented */
+ { CCI_REG8(0x3078), 0x04 },
+ { CCI_REG8(0x3079), 0xfd },
+ { CCI_REG8(0x307a), 0x04 },
+ { CCI_REG8(0x307b), 0xfe },
+ { CCI_REG8(0x307c), 0x04 },
+ { CCI_REG8(0x307d), 0xfb },
+ { CCI_REG8(0x307e), 0x04 },
+ { CCI_REG8(0x307f), 0x02 },
+ { CCI_REG8(0x3080), 0xfc },
+ { CCI_REG8(0x3081), 0x05 },
+ { CCI_REG8(0x3082), 0xfc },
+ { CCI_REG8(0x3083), 0x02 },
+ { CCI_REG8(0x3084), 0xfc },
+ { CCI_REG8(0x3085), 0x03 },
+ { CCI_REG8(0x3086), 0xfc },
+ { CCI_REG8(0x3087), 0xfe },
+ { CCI_REG8(0x30a4), 0x77 },
+ { CCI_REG8(0x30a8), 0x20 },
+ { CCI_REG8(0x30a9), 0x00 },
+ { CCI_REG8(0x30ac), 0x08 },
+ { CCI_REG8(0x30ad), 0x78 },
+ { CCI_REG8(0x30b0), 0x20 },
+ { CCI_REG8(0x30b1), 0x00 },
+ { CCI_REG8(0x30b4), 0x10 },
+ { CCI_REG8(0x30b5), 0x70 },
+ { CCI_REG16_LE(0x30b6), 0x01f2 },
+ { CCI_REG16_LE(0x3112), 0x10 },
+ { CCI_REG16_LE(0x3116), 0x02 },
};
static const struct cci_reg_sequence mipi_data_rate_1188Mbps[] = {
@@ -407,17 +528,49 @@ static const u32 imx335_mbus_codes[] = {
};
/* Supported sensor mode configurations */
-static const struct imx335_mode supported_mode = {
- .width = 2592,
- .height = 1944,
- .hblank = 342,
- .vblank = 2556,
- .vblank_min = 2556,
- .vblank_max = 133060,
- .pclk = 396000000,
- .reg_list = {
- .num_of_regs = ARRAY_SIZE(mode_2592x1940_regs),
- .regs = mode_2592x1940_regs,
+static const struct imx335_mode supported_modes[] = {
+ {
+ .scan_mode = IMX335_ALL_PIXEL,
+ .width = 2592,
+ .height = 1944,
+ .hblank = 342,
+ .vblank = 2556,
+ .vblank_min = 2556,
+ .vblank_max = 133060,
+ .pclk = 396000000,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_regs),
+ .regs = mode_2592x1944_regs,
+ },
+ .vflip_normal = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_vflip_normal),
+ .regs = mode_2592x1944_vflip_normal,
+ },
+ .vflip_inverted = {
+ .num_of_regs = ARRAY_SIZE(mode_2592x1944_vflip_inverted),
+ .regs = mode_2592x1944_vflip_inverted,
+ }
+ }, {
+ .scan_mode = IMX335_2_2_BINNING,
+ .width = 1312,
+ .height = 972,
+ .hblank = 155,
+ .vblank = 3528,
+ .vblank_min = 3528,
+ .vblank_max = 133060,
+ .pclk = 396000000,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1312x972_regs),
+ .regs = mode_1312x972_regs,
+ },
+ .vflip_normal = {
+ .num_of_regs = ARRAY_SIZE(mode_1312x972_vflip_normal),
+ .regs = mode_1312x972_vflip_normal,
+ },
+ .vflip_inverted = {
+ .num_of_regs = ARRAY_SIZE(mode_1312x972_vflip_inverted),
+ .regs = mode_1312x972_vflip_inverted,
+ },
},
};
@@ -449,7 +602,8 @@ static int imx335_update_controls(struct imx335 *imx335,
if (ret)
return ret;
- ret = __v4l2_ctrl_s_ctrl(imx335->hblank_ctrl, mode->hblank);
+ ret = __v4l2_ctrl_modify_range(imx335->hblank_ctrl, mode->hblank,
+ mode->hblank, 1, mode->hblank);
if (ret)
return ret;
@@ -492,6 +646,19 @@ static int imx335_update_exp_gain(struct imx335 *imx335, u32 exposure, u32 gain)
return ret;
}
+static int imx335_update_vertical_flip(struct imx335 *imx335, u32 vflip)
+{
+ const struct imx335_reg_list * const vflip_regs =
+ vflip ? &imx335->cur_mode->vflip_inverted :
+ &imx335->cur_mode->vflip_normal;
+ int ret = 0;
+
+ cci_multi_reg_write(imx335->cci, vflip_regs->regs,
+ vflip_regs->num_of_regs, &ret);
+
+ return cci_write(imx335->cci, IMX335_REG_VREVERSE, vflip, &ret);
+}
+
static int imx335_update_test_pattern(struct imx335 *imx335, u32 pattern_index)
{
int ret = 0;
@@ -553,18 +720,22 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
/* Propagate change of current control to all related controls */
if (ctrl->id == V4L2_CID_VBLANK) {
+ u32 shutter_min = IMX335_SHUTTER_MIN;
+ u32 lpfr;
+
imx335->vblank = imx335->vblank_ctrl->val;
+ lpfr = imx335->vblank + imx335->cur_mode->height;
dev_dbg(imx335->dev, "Received vblank %u, new lpfr %u\n",
- imx335->vblank,
- imx335->vblank + imx335->cur_mode->height);
+ imx335->vblank, lpfr);
+
+ if (imx335->cur_mode->scan_mode == IMX335_2_2_BINNING)
+ shutter_min = IMX335_SHUTTER_MIN_BINNED;
ret = __v4l2_ctrl_modify_range(imx335->exp_ctrl,
IMX335_EXPOSURE_MIN,
- imx335->vblank +
- imx335->cur_mode->height -
- IMX335_EXPOSURE_OFFSET,
- 1, IMX335_EXPOSURE_DEFAULT);
+ lpfr - shutter_min, 1,
+ IMX335_EXPOSURE_DEFAULT);
if (ret)
return ret;
}
@@ -594,6 +765,10 @@ static int imx335_set_ctrl(struct v4l2_ctrl *ctrl)
ret = imx335_update_exp_gain(imx335, exposure, analog_gain);
break;
+ case V4L2_CID_VFLIP:
+ ret = imx335_update_vertical_flip(imx335, ctrl->val);
+
+ break;
case V4L2_CID_TEST_PATTERN:
ret = imx335_update_test_pattern(imx335, ctrl->val);
@@ -660,17 +835,16 @@ static int imx335_enum_frame_size(struct v4l2_subdev *sd,
struct imx335 *imx335 = to_imx335(sd);
u32 code;
- /* Only a single supported_mode available. */
- if (fsize->index > 0)
+ if (fsize->index >= ARRAY_SIZE(supported_modes))
return -EINVAL;
code = imx335_get_format_code(imx335, fsize->code);
if (fsize->code != code)
return -EINVAL;
- fsize->min_width = supported_mode.width;
+ fsize->min_width = supported_modes[fsize->index].width;
fsize->max_width = fsize->min_width;
- fsize->min_height = supported_mode.height;
+ fsize->min_height = supported_modes[fsize->index].height;
fsize->max_height = fsize->min_height;
return 0;
@@ -698,36 +872,6 @@ static void imx335_fill_pad_format(struct imx335 *imx335,
}
/**
- * imx335_get_pad_format() - Get subdevice pad format
- * @sd: pointer to imx335 V4L2 sub-device structure
- * @sd_state: V4L2 sub-device configuration
- * @fmt: V4L2 sub-device format need to be set
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx335_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
-{
- struct imx335 *imx335 = to_imx335(sd);
-
- mutex_lock(&imx335->mutex);
-
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *framefmt;
-
- framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
- fmt->format = *framefmt;
- } else {
- imx335_fill_pad_format(imx335, imx335->cur_mode, fmt);
- }
-
- mutex_unlock(&imx335->mutex);
-
- return 0;
-}
-
-/**
* imx335_set_pad_format() - Set subdevice pad format
* @sd: pointer to imx335 V4L2 sub-device structure
* @sd_state: V4L2 sub-device configuration
@@ -740,12 +884,16 @@ static int imx335_set_pad_format(struct v4l2_subdev *sd,
struct v4l2_subdev_format *fmt)
{
struct imx335 *imx335 = to_imx335(sd);
+ struct v4l2_mbus_framefmt *format;
const struct imx335_mode *mode;
+ struct v4l2_rect *crop;
int i, ret = 0;
- mutex_lock(&imx335->mutex);
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
- mode = &supported_mode;
for (i = 0; i < ARRAY_SIZE(imx335_mbus_codes); i++) {
if (imx335_mbus_codes[i] == fmt->format.code)
imx335->cur_mbus_code = imx335_mbus_codes[i];
@@ -753,19 +901,25 @@ static int imx335_set_pad_format(struct v4l2_subdev *sd,
imx335_fill_pad_format(imx335, mode, fmt);
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *framefmt;
+ format = v4l2_subdev_state_get_format(sd_state, fmt->pad);
+ *format = fmt->format;
- framefmt = v4l2_subdev_state_get_format(sd_state, fmt->pad);
- *framefmt = fmt->format;
- } else {
+ crop = v4l2_subdev_state_get_crop(sd_state, fmt->pad);
+ crop->width = fmt->format.width;
+ crop->height = fmt->format.height;
+ if (mode->scan_mode == IMX335_2_2_BINNING) {
+ crop->width *= 2;
+ crop->height *= 2;
+ }
+ crop->left = (imx335_native_area.width - crop->width) / 2;
+ crop->top = (imx335_native_area.height - crop->height) / 2;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
ret = imx335_update_controls(imx335, mode);
if (!ret)
imx335->cur_mode = mode;
}
- mutex_unlock(&imx335->mutex);
-
return ret;
}
@@ -783,14 +937,12 @@ static int imx335_init_state(struct v4l2_subdev *sd,
struct v4l2_subdev_format fmt = { 0 };
fmt.which = sd_state ? V4L2_SUBDEV_FORMAT_TRY : V4L2_SUBDEV_FORMAT_ACTIVE;
- imx335_fill_pad_format(imx335, &supported_mode, &fmt);
+ imx335_fill_pad_format(imx335, &supported_modes[0], &fmt);
- mutex_lock(&imx335->mutex);
__v4l2_ctrl_modify_range(imx335->link_freq_ctrl, 0,
__fls(imx335->link_freq_bitmap),
~(imx335->link_freq_bitmap),
__ffs(imx335->link_freq_bitmap));
- mutex_unlock(&imx335->mutex);
return imx335_set_pad_format(sd, sd_state, &fmt);
}
@@ -808,22 +960,18 @@ static int imx335_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_selection *sel)
{
switch (sel->target) {
- case V4L2_SEL_TGT_NATIVE_SIZE:
- sel->r.top = 0;
- sel->r.left = 0;
- sel->r.width = IMX335_NATIVE_WIDTH;
- sel->r.height = IMX335_NATIVE_HEIGHT;
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
return 0;
- case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r = imx335_native_area;
+ return 0;
+
case V4L2_SEL_TGT_CROP_DEFAULT:
case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.top = IMX335_PIXEL_ARRAY_TOP;
- sel->r.left = IMX335_PIXEL_ARRAY_LEFT;
- sel->r.width = IMX335_PIXEL_ARRAY_WIDTH;
- sel->r.height = IMX335_PIXEL_ARRAY_HEIGHT;
-
+ sel->r = imx335_active_area;
return 0;
}
@@ -832,39 +980,65 @@ static int imx335_get_selection(struct v4l2_subdev *sd,
static int imx335_set_framefmt(struct imx335 *imx335)
{
- switch (imx335->cur_mbus_code) {
- case MEDIA_BUS_FMT_SRGGB10_1X10:
- return cci_multi_reg_write(imx335->cci, raw10_framefmt_regs,
- ARRAY_SIZE(raw10_framefmt_regs),
- NULL);
-
- case MEDIA_BUS_FMT_SRGGB12_1X12:
- return cci_multi_reg_write(imx335->cci, raw12_framefmt_regs,
- ARRAY_SIZE(raw12_framefmt_regs),
- NULL);
+ /*
+ * In the all-pixel scan mode the AD conversion shall match the output
+ * bit width requested.
+ *
+ * However, when 2/2 binning is enabled, the AD conversion is always
+ * 10-bit, so we ensure ADBIT is clear and ADBIT1 is assigned 0x1ff.
+ * That's as much as the documentation gives us...
+ */
+ int ret = 0;
+ u8 bpp = imx335->cur_mbus_code == MEDIA_BUS_FMT_SRGGB10_1X10 ? 10 : 12;
+ u8 ad_conv = bpp;
+
+ /* Start with the output mode */
+ cci_write(imx335->cci, IMX335_REG_MDBIT, bpp == 12, &ret);
+
+ /* Enforce 10 bit AD on binning modes */
+ if (imx335->cur_mode->scan_mode == IMX335_2_2_BINNING)
+ ad_conv = 10;
+
+ /* AD Conversion configuration */
+ if (ad_conv == 10) {
+ cci_write(imx335->cci, IMX335_REG_ADBIT, 0x00, &ret);
+ cci_write(imx335->cci, IMX335_REG_ADBIT1, 0x1ff, &ret);
+ } else { /* 12 bit AD Conversion */
+ cci_write(imx335->cci, IMX335_REG_ADBIT, 0x01, &ret);
+ cci_write(imx335->cci, IMX335_REG_ADBIT1, 0x47, &ret);
}
- return -EINVAL;
+ return ret;
}
/**
- * imx335_start_streaming() - Start sensor stream
- * @imx335: pointer to imx335 device
+ * imx335_enable_streams() - Enable sensor streams
+ * @sd: V4L2 subdevice
+ * @state: V4L2 subdevice state
+ * @pad: The pad to enable
+ * @streams_mask: Bitmask of streams to enable
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx335_start_streaming(struct imx335 *imx335)
+static int imx335_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
+ struct imx335 *imx335 = to_imx335(sd);
const struct imx335_reg_list *reg_list;
int ret;
+ ret = pm_runtime_resume_and_get(imx335->dev);
+ if (ret < 0)
+ return ret;
+
/* Setup PLL */
reg_list = &link_freq_reglist[__ffs(imx335->link_freq_bitmap)];
ret = cci_multi_reg_write(imx335->cci, reg_list->regs,
reg_list->num_of_regs, NULL);
if (ret) {
dev_err(imx335->dev, "%s failed to set plls\n", __func__);
- return ret;
+ goto err_rpm_put;
}
/* Write sensor mode registers */
@@ -873,27 +1047,35 @@ static int imx335_start_streaming(struct imx335 *imx335)
reg_list->num_of_regs, NULL);
if (ret) {
dev_err(imx335->dev, "fail to write initial registers\n");
- return ret;
+ goto err_rpm_put;
+ }
+
+ /* Write sensor common registers */
+ ret = cci_multi_reg_write(imx335->cci, imx335_common_regs,
+ ARRAY_SIZE(imx335_common_regs), NULL);
+ if (ret) {
+ dev_err(imx335->dev, "fail to write initial registers\n");
+ goto err_rpm_put;
}
ret = imx335_set_framefmt(imx335);
if (ret) {
dev_err(imx335->dev, "%s failed to set frame format: %d\n",
__func__, ret);
- return ret;
+ goto err_rpm_put;
}
/* Configure lanes */
ret = cci_write(imx335->cci, IMX335_REG_LANEMODE,
imx335->lane_mode, NULL);
if (ret)
- return ret;
+ goto err_rpm_put;
/* Setup handler will write actual exposure and gain */
ret = __v4l2_ctrl_handler_setup(imx335->sd.ctrl_handler);
if (ret) {
dev_err(imx335->dev, "fail to setup handler\n");
- return ret;
+ goto err_rpm_put;
}
/* Start streaming */
@@ -901,62 +1083,39 @@ static int imx335_start_streaming(struct imx335 *imx335)
IMX335_MODE_STREAMING, NULL);
if (ret) {
dev_err(imx335->dev, "fail to start streaming\n");
- return ret;
+ goto err_rpm_put;
}
/* Initial regulator stabilization period */
usleep_range(18000, 20000);
return 0;
-}
-/**
- * imx335_stop_streaming() - Stop sensor stream
- * @imx335: pointer to imx335 device
- *
- * Return: 0 if successful, error code otherwise.
- */
-static int imx335_stop_streaming(struct imx335 *imx335)
-{
- return cci_write(imx335->cci, IMX335_REG_MODE_SELECT,
- IMX335_MODE_STANDBY, NULL);
+err_rpm_put:
+ pm_runtime_put(imx335->dev);
+
+ return ret;
}
/**
- * imx335_set_stream() - Enable sensor streaming
- * @sd: pointer to imx335 subdevice
- * @enable: set to enable sensor streaming
+ * imx335_disable_streams() - Disable sensor streams
+ * @sd: V4L2 subdevice
+ * @state: V4L2 subdevice state
+ * @pad: The pad to disable
+ * @streams_mask: Bitmask of streams to disable
*
* Return: 0 if successful, error code otherwise.
*/
-static int imx335_set_stream(struct v4l2_subdev *sd, int enable)
+static int imx335_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
{
struct imx335 *imx335 = to_imx335(sd);
int ret;
- mutex_lock(&imx335->mutex);
-
- if (enable) {
- ret = pm_runtime_resume_and_get(imx335->dev);
- if (ret)
- goto error_unlock;
-
- ret = imx335_start_streaming(imx335);
- if (ret)
- goto error_power_off;
- } else {
- imx335_stop_streaming(imx335);
- pm_runtime_put(imx335->dev);
- }
-
- mutex_unlock(&imx335->mutex);
-
- return 0;
-
-error_power_off:
+ ret = cci_write(imx335->cci, IMX335_REG_MODE_SELECT,
+ IMX335_MODE_STANDBY, NULL);
pm_runtime_put(imx335->dev);
-error_unlock:
- mutex_unlock(&imx335->mutex);
return ret;
}
@@ -1009,8 +1168,8 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
imx335->reset_gpio = devm_gpiod_get_optional(imx335->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(imx335->reset_gpio)) {
- dev_err(imx335->dev, "failed to get reset gpio %ld\n",
- PTR_ERR(imx335->reset_gpio));
+ dev_err(imx335->dev, "failed to get reset gpio %pe\n",
+ imx335->reset_gpio);
return PTR_ERR(imx335->reset_gpio);
}
@@ -1026,11 +1185,10 @@ static int imx335_parse_hw_config(struct imx335 *imx335)
}
/* Get sensor input clock */
- imx335->inclk = devm_clk_get(imx335->dev, NULL);
- if (IS_ERR(imx335->inclk)) {
- dev_err(imx335->dev, "could not get inclk\n");
- return PTR_ERR(imx335->inclk);
- }
+ imx335->inclk = devm_v4l2_sensor_clk_get(imx335->dev, NULL);
+ if (IS_ERR(imx335->inclk))
+ return dev_err_probe(imx335->dev, PTR_ERR(imx335->inclk),
+ "could not get inclk\n");
rate = clk_get_rate(imx335->inclk);
if (rate != IMX335_INCLK_RATE) {
@@ -1077,7 +1235,7 @@ done_endpoint_free:
/* V4l2 subdevice ops */
static const struct v4l2_subdev_video_ops imx335_video_ops = {
- .s_stream = imx335_set_stream,
+ .s_stream = v4l2_subdev_s_stream_helper,
};
static const struct v4l2_subdev_pad_ops imx335_pad_ops = {
@@ -1085,8 +1243,10 @@ static const struct v4l2_subdev_pad_ops imx335_pad_ops = {
.enum_frame_size = imx335_enum_frame_size,
.get_selection = imx335_get_selection,
.set_selection = imx335_get_selection,
- .get_fmt = imx335_get_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
.set_fmt = imx335_set_pad_format,
+ .enable_streams = imx335_enable_streams,
+ .disable_streams = imx335_disable_streams,
};
static const struct v4l2_subdev_ops imx335_subdev_ops = {
@@ -1168,7 +1328,7 @@ static int imx335_init_controls(struct imx335 *imx335)
struct v4l2_ctrl_handler *ctrl_hdlr = &imx335->ctrl_handler;
const struct imx335_mode *mode = imx335->cur_mode;
struct v4l2_fwnode_device_properties props;
- u32 lpfr;
+ u32 lpfr, shutter_min;
int ret;
ret = v4l2_fwnode_device_parse(imx335->dev, &props);
@@ -1176,20 +1336,20 @@ static int imx335_init_controls(struct imx335 *imx335)
return ret;
/* v4l2_fwnode_device_properties can add two more controls */
- ret = v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
if (ret)
return ret;
- /* Serialize controls with sensor device */
- ctrl_hdlr->lock = &imx335->mutex;
-
/* Initialize exposure and gain */
lpfr = mode->vblank + mode->height;
+ shutter_min = IMX335_SHUTTER_MIN;
+ if (mode->scan_mode == IMX335_2_2_BINNING)
+ shutter_min = IMX335_SHUTTER_MIN_BINNED;
imx335->exp_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
&imx335_ctrl_ops,
V4L2_CID_EXPOSURE,
IMX335_EXPOSURE_MIN,
- lpfr - IMX335_EXPOSURE_OFFSET,
+ lpfr - shutter_min,
IMX335_EXPOSURE_STEP,
IMX335_EXPOSURE_DEFAULT);
@@ -1211,6 +1371,13 @@ static int imx335_init_controls(struct imx335 *imx335)
v4l2_ctrl_cluster(2, &imx335->exp_ctrl);
+ imx335->vflip = v4l2_ctrl_new_std(ctrl_hdlr,
+ &imx335_ctrl_ops,
+ V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ if (imx335->vflip)
+ imx335->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
imx335->vblank_ctrl = v4l2_ctrl_new_std(ctrl_hdlr,
&imx335_ctrl_ops,
V4L2_CID_VBLANK,
@@ -1295,12 +1462,10 @@ static int imx335_probe(struct i2c_client *client)
return ret;
}
- mutex_init(&imx335->mutex);
-
ret = imx335_power_on(imx335->dev);
if (ret) {
dev_err(imx335->dev, "failed to power-on the sensor\n");
- goto error_mutex_destroy;
+ return ret;
}
/* Check module identity */
@@ -1311,7 +1476,7 @@ static int imx335_probe(struct i2c_client *client)
}
/* Set default mode to max resolution */
- imx335->cur_mode = &supported_mode;
+ imx335->cur_mode = &supported_modes[0];
imx335->cur_mbus_code = imx335_mbus_codes[0];
imx335->vblank = imx335->cur_mode->vblank;
@@ -1333,11 +1498,18 @@ static int imx335_probe(struct i2c_client *client)
goto error_handler_free;
}
+ imx335->sd.state_lock = imx335->ctrl_handler.lock;
+ ret = v4l2_subdev_init_finalize(&imx335->sd);
+ if (ret < 0) {
+ dev_err(imx335->dev, "subdev init error\n");
+ goto error_media_entity;
+ }
+
ret = v4l2_async_register_subdev_sensor(&imx335->sd);
if (ret < 0) {
dev_err(imx335->dev,
"failed to register async subdev: %d\n", ret);
- goto error_media_entity;
+ goto error_subdev_cleanup;
}
pm_runtime_set_active(imx335->dev);
@@ -1346,14 +1518,14 @@ static int imx335_probe(struct i2c_client *client)
return 0;
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&imx335->sd);
error_media_entity:
media_entity_cleanup(&imx335->sd.entity);
error_handler_free:
v4l2_ctrl_handler_free(imx335->sd.ctrl_handler);
error_power_off:
imx335_power_off(imx335->dev);
-error_mutex_destroy:
- mutex_destroy(&imx335->mutex);
return ret;
}
@@ -1367,9 +1539,9 @@ error_mutex_destroy:
static void imx335_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
- struct imx335 *imx335 = to_imx335(sd);
v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
@@ -1377,8 +1549,6 @@ static void imx335_remove(struct i2c_client *client)
if (!pm_runtime_status_suspended(&client->dev))
imx335_power_off(&client->dev);
pm_runtime_set_suspended(&client->dev);
-
- mutex_destroy(&imx335->mutex);
}
static const struct dev_pm_ops imx335_pm_ops = {
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
index b2dce67c0b6b..776107efe386 100644
--- a/drivers/media/i2c/imx355.c
+++ b/drivers/media/i2c/imx355.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Intel Corporation
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -92,11 +94,13 @@ struct imx355_mode {
};
struct imx355_hwcfg {
- u32 ext_clk; /* sensor external clk */
unsigned long link_freq_bitmap;
};
struct imx355 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1136,14 +1140,13 @@ static int imx355_write_reg(struct imx355 *imx355, u16 reg, u32 len, u32 val)
static int imx355_write_regs(struct imx355 *imx355,
const struct imx355_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
int ret;
u32 i;
for (i = 0; i < len; i++) {
ret = imx355_write_reg(imx355, regs[i].address, 1, regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(imx355->dev,
"write reg 0x%4.4x return err %d",
regs[i].address, ret);
@@ -1178,7 +1181,6 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct imx355 *imx355 = container_of(ctrl->handler,
struct imx355, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
s64 max;
int ret;
@@ -1197,7 +1199,7 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(imx355->dev))
return 0;
switch (ctrl->id) {
@@ -1231,12 +1233,12 @@ static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
+ dev_info(imx355->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
return ret;
}
@@ -1385,7 +1387,6 @@ imx355_set_pad_format(struct v4l2_subdev *sd,
/* Start streaming */
static int imx355_start_streaming(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
const struct imx355_reg_list *reg_list;
int ret;
@@ -1393,7 +1394,7 @@ static int imx355_start_streaming(struct imx355 *imx355)
reg_list = &imx355_global_setting;
ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set global settings");
+ dev_err(imx355->dev, "failed to set global settings");
return ret;
}
@@ -1401,7 +1402,7 @@ static int imx355_start_streaming(struct imx355 *imx355)
reg_list = &imx355->cur_mode->reg_list;
ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(imx355->dev, "failed to set mode");
return ret;
}
@@ -1429,13 +1430,12 @@ static int imx355_stop_streaming(struct imx355 *imx355)
static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
{
struct imx355 *imx355 = to_imx355(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&imx355->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(imx355->dev);
if (ret < 0)
goto err_unlock;
@@ -1448,7 +1448,7 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
imx355_stop_streaming(imx355);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -1460,7 +1460,7 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(imx355->dev);
err_unlock:
mutex_unlock(&imx355->mutex);
@@ -1470,7 +1470,6 @@ err_unlock:
/* Verify chip ID */
static int imx355_identify_module(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
int ret;
u32 val;
@@ -1479,7 +1478,7 @@ static int imx355_identify_module(struct imx355 *imx355)
return ret;
if (val != IMX355_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(imx355->dev, "chip id mismatch: %x!=%x",
IMX355_CHIP_ID, val);
return -EIO;
}
@@ -1519,7 +1518,6 @@ static const struct v4l2_subdev_internal_ops imx355_internal_ops = {
/* Initialize control handlers */
static int imx355_init_controls(struct imx355 *imx355)
{
- struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1600,11 +1598,11 @@ static int imx355_init_controls(struct imx355 *imx355)
0, 0, imx355_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "control init failed: %d", ret);
+ dev_err(imx355->dev, "control init failed: %d", ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(imx355->dev, &props);
if (ret)
goto error;
@@ -1648,20 +1646,6 @@ static struct imx355_hwcfg *imx355_get_hwcfg(struct device *dev)
if (!cfg)
goto out_err;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &cfg->ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- goto out_err;
- }
-
- dev_dbg(dev, "ext clk: %d", cfg->ext_clk);
- if (cfg->ext_clk != IMX355_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- cfg->ext_clk);
- goto out_err;
- }
-
ret = v4l2_link_freq_to_bitmap(dev, bus_cfg.link_frequencies,
bus_cfg.nr_of_link_frequencies,
link_freq_menu_items,
@@ -1683,27 +1667,41 @@ out_err:
static int imx355_probe(struct i2c_client *client)
{
struct imx355 *imx355;
+ unsigned long freq;
int ret;
imx355 = devm_kzalloc(&client->dev, sizeof(*imx355), GFP_KERNEL);
if (!imx355)
return -ENOMEM;
+ imx355->dev = &client->dev;
+
mutex_init(&imx355->mutex);
+ imx355->clk = devm_v4l2_sensor_clk_get(imx355->dev, NULL);
+ if (IS_ERR(imx355->clk))
+ return dev_err_probe(imx355->dev, PTR_ERR(imx355->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(imx355->clk);
+ if (freq != IMX355_EXT_CLK)
+ return dev_err_probe(imx355->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&imx355->sd, client, &imx355_subdev_ops);
/* Check module identity */
ret = imx355_identify_module(imx355);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(imx355->dev, "failed to find sensor: %d", ret);
goto error_probe;
}
- imx355->hwcfg = imx355_get_hwcfg(&client->dev);
+ imx355->hwcfg = imx355_get_hwcfg(imx355->dev);
if (!imx355->hwcfg) {
- dev_err(&client->dev, "failed to get hwcfg");
+ dev_err(imx355->dev, "failed to get hwcfg");
ret = -ENODEV;
goto error_probe;
}
@@ -1713,7 +1711,7 @@ static int imx355_probe(struct i2c_client *client)
ret = imx355_init_controls(imx355);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(imx355->dev, "failed to init controls: %d", ret);
goto error_probe;
}
@@ -1728,7 +1726,7 @@ static int imx355_probe(struct i2c_client *client)
imx355->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&imx355->sd.entity, 1, &imx355->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(imx355->dev, "failed to init entity pads: %d", ret);
goto error_handler_free;
}
@@ -1736,9 +1734,9 @@ static int imx355_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(imx355->dev);
+ pm_runtime_enable(imx355->dev);
+ pm_runtime_idle(imx355->dev);
ret = v4l2_async_register_subdev_sensor(&imx355->sd);
if (ret < 0)
@@ -1747,8 +1745,8 @@ static int imx355_probe(struct i2c_client *client)
return 0;
error_media_entity_runtime_pm:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx355->dev);
+ pm_runtime_set_suspended(imx355->dev);
media_entity_cleanup(&imx355->sd.entity);
error_handler_free:
@@ -1769,8 +1767,8 @@ static void imx355_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(imx355->dev);
+ pm_runtime_set_suspended(imx355->dev);
mutex_destroy(&imx355->mutex);
}
diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c
index c74097a59c42..b3826f803547 100644
--- a/drivers/media/i2c/imx412.c
+++ b/drivers/media/i2c/imx412.c
@@ -927,17 +927,16 @@ static int imx412_parse_hw_config(struct imx412 *imx412)
imx412->reset_gpio = devm_gpiod_get_optional(imx412->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(imx412->reset_gpio)) {
- dev_err(imx412->dev, "failed to get reset gpio %ld\n",
- PTR_ERR(imx412->reset_gpio));
+ dev_err(imx412->dev, "failed to get reset gpio %pe\n",
+ imx412->reset_gpio);
return PTR_ERR(imx412->reset_gpio);
}
/* Get sensor input clock */
- imx412->inclk = devm_clk_get(imx412->dev, NULL);
- if (IS_ERR(imx412->inclk)) {
- dev_err(imx412->dev, "could not get inclk\n");
- return PTR_ERR(imx412->inclk);
- }
+ imx412->inclk = devm_v4l2_sensor_clk_get(imx412->dev, NULL);
+ if (IS_ERR(imx412->inclk))
+ return dev_err_probe(imx412->dev, PTR_ERR(imx412->inclk),
+ "could not get inclk\n");
rate = clk_get_rate(imx412->inclk);
if (rate != IMX412_INCLK_RATE) {
diff --git a/drivers/media/i2c/imx415.c b/drivers/media/i2c/imx415.c
index 276bf4d6f39d..0b424c17e880 100644
--- a/drivers/media/i2c/imx415.c
+++ b/drivers/media/i2c/imx415.c
@@ -1250,7 +1250,7 @@ static int imx415_parse_hw_config(struct imx415 *sensor)
return dev_err_probe(sensor->dev, PTR_ERR(sensor->reset),
"failed to get reset GPIO\n");
- sensor->clk = devm_clk_get(sensor->dev, NULL);
+ sensor->clk = devm_v4l2_sensor_clk_get(sensor->dev, NULL);
if (IS_ERR(sensor->clk))
return dev_err_probe(sensor->dev, PTR_ERR(sensor->clk),
"failed to get clock\n");
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index c84e1e0e6109..5588cdd7ec20 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -321,9 +321,9 @@ static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol,
static int ir_key_poll(struct IR_i2c *ir)
{
- enum rc_proto protocol;
- u32 scancode;
- u8 toggle;
+ enum rc_proto protocol = 0;
+ u32 scancode = 0;
+ u8 toggle = 0;
int rc;
dev_dbg(&ir->rc->dev, "%s\n", __func__);
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 7c0961688d61..e6e214f8294b 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -751,8 +751,8 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
mas = v4l2_async_nf_add_fwnode(&priv->notifier, source->fwnode,
struct max9286_asd);
if (IS_ERR(mas)) {
- dev_err(dev, "Failed to add subdev for source %u: %ld",
- i, PTR_ERR(mas));
+ dev_err(dev, "Failed to add subdev for source %u: %pe",
+ i, mas);
v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(mas);
}
diff --git a/drivers/media/i2c/max96717.c b/drivers/media/i2c/max96717.c
index c8ae7890d9fa..72f021b1a7b9 100644
--- a/drivers/media/i2c/max96717.c
+++ b/drivers/media/i2c/max96717.c
@@ -650,7 +650,7 @@ static int max96717_v4l2_notifier_register(struct max96717_priv *priv)
fwnode_handle_put(ep_fwnode);
if (IS_ERR(asd)) {
- dev_err(dev, "Failed to add subdev: %ld", PTR_ERR(asd));
+ dev_err(dev, "Failed to add subdev: %pe", asd);
v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(asd);
}
@@ -782,21 +782,23 @@ static unsigned int max96717_clk_find_best_index(struct max96717_priv *priv,
return idx;
}
-static long max96717_clk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int max96717_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct max96717_priv *priv = clk_hw_to_max96717(hw);
struct device *dev = &priv->client->dev;
unsigned int idx;
- idx = max96717_clk_find_best_index(priv, rate);
+ idx = max96717_clk_find_best_index(priv, req->rate);
- if (rate != max96717_predef_freqs[idx].freq) {
+ if (req->rate != max96717_predef_freqs[idx].freq) {
dev_warn(dev, "Request CLK freq:%lu, found CLK freq:%lu\n",
- rate, max96717_predef_freqs[idx].freq);
+ req->rate, max96717_predef_freqs[idx].freq);
}
- return max96717_predef_freqs[idx].freq;
+ req->rate = max96717_predef_freqs[idx].freq;
+
+ return 0;
}
static int max96717_clk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -847,7 +849,7 @@ static const struct clk_ops max96717_clk_ops = {
.unprepare = max96717_clk_unprepare,
.set_rate = max96717_clk_set_rate,
.recalc_rate = max96717_clk_recalc_rate,
- .round_rate = max96717_clk_round_rate,
+ .determine_rate = max96717_clk_determine_rate,
};
static int max96717_register_clkout(struct max96717_priv *priv)
diff --git a/drivers/media/i2c/msp3400-kthreads.c b/drivers/media/i2c/msp3400-kthreads.c
index ecabc0e1d32e..1d9f41dd7c21 100644
--- a/drivers/media/i2c/msp3400-kthreads.c
+++ b/drivers/media/i2c/msp3400-kthreads.c
@@ -596,6 +596,8 @@ restart:
"carrier2 val: %5d / %s\n", val, cd[i].name);
}
+ if (max1 < 0 || max1 > 3)
+ goto restart;
/* program the msp3400 according to the results */
state->main = msp3400c_carrier_detect_main[max1].cdo;
switch (max1) {
diff --git a/drivers/media/i2c/mt9m001.c b/drivers/media/i2c/mt9m001.c
index 12d3e86bdc0f..7a6114d18dfc 100644
--- a/drivers/media/i2c/mt9m001.c
+++ b/drivers/media/i2c/mt9m001.c
@@ -743,9 +743,10 @@ static int mt9m001_probe(struct i2c_client *client)
if (!mt9m001)
return -ENOMEM;
- mt9m001->clk = devm_clk_get(&client->dev, NULL);
+ mt9m001->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9m001->clk))
- return PTR_ERR(mt9m001->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9m001->clk),
+ "failed to get the clock\n");
mt9m001->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
GPIOD_OUT_LOW);
diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
index 9aa5dcda3805..3532c7c38bec 100644
--- a/drivers/media/i2c/mt9m111.c
+++ b/drivers/media/i2c/mt9m111.c
@@ -1279,14 +1279,15 @@ static int mt9m111_probe(struct i2c_client *client)
return ret;
}
- mt9m111->clk = devm_clk_get(&client->dev, "mclk");
+ mt9m111->clk = devm_v4l2_sensor_clk_get(&client->dev, "mclk");
if (IS_ERR(mt9m111->clk))
- return PTR_ERR(mt9m111->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9m111->clk),
+ "failed to get mclk\n");
mt9m111->regulator = devm_regulator_get(&client->dev, "vdd");
if (IS_ERR(mt9m111->regulator)) {
- dev_err(&client->dev, "regulator not found: %ld\n",
- PTR_ERR(mt9m111->regulator));
+ dev_err(&client->dev, "regulator not found: %pe\n",
+ mt9m111->regulator);
return PTR_ERR(mt9m111->regulator);
}
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
index aa3fd6c6c76c..51ebbe7ae996 100644
--- a/drivers/media/i2c/mt9m114.c
+++ b/drivers/media/i2c/mt9m114.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/types.h>
@@ -42,6 +43,9 @@
#define MT9M114_RESET_AND_MISC_CONTROL CCI_REG16(0x001a)
#define MT9M114_RESET_SOC BIT(0)
#define MT9M114_PAD_SLEW CCI_REG16(0x001e)
+#define MT9M114_PAD_SLEW_MIN 0
+#define MT9M114_PAD_SLEW_MAX 7
+#define MT9M114_PAD_SLEW_DEFAULT 7
#define MT9M114_PAD_CONTROL CCI_REG16(0x0032)
/* XDMA registers */
@@ -388,6 +392,7 @@ struct mt9m114 {
unsigned int pixrate;
bool streaming;
+ u32 pad_slew_rate;
/* Pixel Array */
struct {
@@ -645,9 +650,6 @@ static const struct cci_reg_sequence mt9m114_init[] = {
{ MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MAX, 1459 },
{ MT9M114_CAM_SENSOR_CFG_FINE_CORRECTION, 96 },
{ MT9M114_CAM_SENSOR_CFG_REG_0_DATA, 32 },
-
- /* Miscellaneous settings */
- { MT9M114_PAD_SLEW, 0x0777 },
};
/* -----------------------------------------------------------------------------
@@ -779,6 +781,13 @@ static int mt9m114_initialize(struct mt9m114 *sensor)
if (ret < 0)
return ret;
+ value = sensor->pad_slew_rate
+ | sensor->pad_slew_rate << 4
+ | sensor->pad_slew_rate << 8;
+ cci_write(sensor->regmap, MT9M114_PAD_SLEW, value, &ret);
+ if (ret < 0)
+ return ret;
+
ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
if (ret < 0)
return ret;
@@ -1283,6 +1292,7 @@ static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
struct mt9m114 *sensor = pa_to_mt9m114(sd);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *crop;
+ int ret = 0;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
@@ -1298,25 +1308,41 @@ static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
* binning, but binning is configured after setting the selection, so
* we can't know tell here if it will be used.
*/
- crop->left = ALIGN(sel->r.left, 4);
- crop->top = ALIGN(sel->r.top, 2);
- crop->width = clamp_t(unsigned int, ALIGN(sel->r.width, 4),
- MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
- MT9M114_PIXEL_ARRAY_WIDTH - crop->left);
- crop->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
- MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
- MT9M114_PIXEL_ARRAY_HEIGHT - crop->top);
-
- sel->r = *crop;
+ sel->r.left = ALIGN(sel->r.left, 4);
+ sel->r.top = ALIGN(sel->r.top, 2);
+ sel->r.width = clamp_t(unsigned int, ALIGN(sel->r.width, 4),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
+ MT9M114_PIXEL_ARRAY_WIDTH - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+ MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
+ MT9M114_PIXEL_ARRAY_HEIGHT - sel->r.top);
+
+ /* Changing the selection size is not allowed in streaming state. */
+ if (sensor->streaming &&
+ (sel->r.height != crop->height || sel->r.width != crop->width))
+ return -EBUSY;
+
+ *crop = sel->r;
/* Reset the format. */
format->width = crop->width;
format->height = crop->height;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
- mt9m114_pa_ctrl_update_blanking(sensor, format);
+ if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+ return ret;
- return 0;
+ mt9m114_pa_ctrl_update_blanking(sensor, format);
+
+ /* Apply values immediately if streaming. */
+ if (sensor->streaming) {
+ ret = mt9m114_configure_pa(sensor, state);
+ if (ret)
+ return ret;
+ /* Changing the cropping config requires a CONFIG_CHANGE. */
+ ret = mt9m114_set_state(sensor,
+ MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+ }
+ return ret;
}
static const struct v4l2_subdev_pad_ops mt9m114_pa_pad_ops = {
@@ -2360,6 +2386,17 @@ static int mt9m114_parse_dt(struct mt9m114 *sensor)
goto error;
}
+ sensor->pad_slew_rate = MT9M114_PAD_SLEW_DEFAULT;
+ device_property_read_u32(&sensor->client->dev, "slew-rate",
+ &sensor->pad_slew_rate);
+
+ if (sensor->pad_slew_rate < MT9M114_PAD_SLEW_MIN ||
+ sensor->pad_slew_rate > MT9M114_PAD_SLEW_MAX) {
+ dev_err(&sensor->client->dev, "Invalid slew-rate %u\n",
+ sensor->pad_slew_rate);
+ return -EINVAL;
+ }
+
return 0;
error:
@@ -2390,10 +2427,10 @@ static int mt9m114_probe(struct i2c_client *client)
return ret;
/* Acquire clocks, GPIOs and regulators. */
- sensor->clk = devm_clk_get(dev, NULL);
+ sensor->clk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->clk)) {
- ret = PTR_ERR(sensor->clk);
- dev_err_probe(dev, ret, "Failed to get clock\n");
+ ret = dev_err_probe(dev, PTR_ERR(sensor->clk),
+ "Failed to get clock\n");
goto error_ep_free;
}
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 4ef5fb06131d..1500ee4db47e 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -233,9 +233,10 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
unsigned long ext_freq;
int ret;
- mt9p031->clk = devm_clk_get(&client->dev, NULL);
+ mt9p031->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9p031->clk))
- return PTR_ERR(mt9p031->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9p031->clk),
+ "failed to get the clock\n");
ret = clk_set_rate(mt9p031->clk, mt9p031->ext_freq);
if (ret < 0)
@@ -1092,6 +1093,7 @@ static int mt9p031_parse_properties(struct mt9p031 *mt9p031, struct device *dev)
static int mt9p031_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
+ const struct mt9p031_model_info *info;
struct mt9p031 *mt9p031;
unsigned int i;
int ret;
@@ -1112,7 +1114,8 @@ static int mt9p031_probe(struct i2c_client *client)
mt9p031->output_control = MT9P031_OUTPUT_CONTROL_DEF;
mt9p031->mode2 = MT9P031_READ_MODE_2_ROW_BLC;
- mt9p031->code = (uintptr_t)device_get_match_data(&client->dev);
+ info = device_get_match_data(&client->dev);
+ mt9p031->code = info->code;
mt9p031->regulators[0].supply = "vdd";
mt9p031->regulators[1].supply = "vdd_io";
diff --git a/drivers/media/i2c/mt9t112.c b/drivers/media/i2c/mt9t112.c
index 878dff9b7577..2d2c840fc002 100644
--- a/drivers/media/i2c/mt9t112.c
+++ b/drivers/media/i2c/mt9t112.c
@@ -1078,13 +1078,12 @@ static int mt9t112_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
- priv->clk = devm_clk_get(&client->dev, "extclk");
- if (PTR_ERR(priv->clk) == -ENOENT) {
+ priv->clk = devm_v4l2_sensor_clk_get(&client->dev, "extclk");
+ if (PTR_ERR(priv->clk) == -ENOENT)
priv->clk = NULL;
- } else if (IS_ERR(priv->clk)) {
- dev_err(&client->dev, "Unable to get clock \"extclk\"\n");
- return PTR_ERR(priv->clk);
- }
+ else if (IS_ERR(priv->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(priv->clk),
+ "Unable to get clock \"extclk\"\n");
priv->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
GPIOD_OUT_HIGH);
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 302120ff125e..d4359d5b92bb 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -15,16 +15,15 @@
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/videodev2.h>
#include <linux/v4l2-mediabus.h>
-#include <linux/module.h>
+#include <linux/videodev2.h>
-#include <media/i2c/mt9v032.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -182,7 +181,16 @@ static const struct mt9v032_model_version mt9v032_versions[] = {
{ MT9V034_CHIP_ID_REV1, "MT9V024/MT9V034 rev1" },
};
+struct mt9v032_platform_data {
+ unsigned int clk_pol:1;
+
+ const s64 *link_freqs;
+ s64 link_def_freq;
+};
+
struct mt9v032 {
+ struct device *dev;
+
struct v4l2_subdev subdev;
struct media_pad pad;
@@ -205,7 +213,7 @@ struct mt9v032 {
struct gpio_desc *reset_gpio;
struct gpio_desc *standby_gpio;
- struct mt9v032_platform_data *pdata;
+ struct mt9v032_platform_data pdata;
const struct mt9v032_model_info *model;
const struct mt9v032_model_version *version;
@@ -330,7 +338,7 @@ static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
return ret;
/* Configure the pixel clock polarity */
- if (mt9v032->pdata && mt9v032->pdata->clk_pol) {
+ if (mt9v032->pdata.clk_pol) {
ret = regmap_write(map, mt9v032->model->data->pclk_reg,
MT9V032_PIXEL_CLOCK_INV_PXL_CLK);
if (ret < 0)
@@ -473,13 +481,12 @@ static int mt9v032_get_format(struct v4l2_subdev *subdev,
static void mt9v032_configure_pixel_rate(struct mt9v032 *mt9v032)
{
- struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
int ret;
ret = v4l2_ctrl_s_ctrl_int64(mt9v032->pixel_rate,
mt9v032->sysclk / mt9v032->hratio);
if (ret < 0)
- dev_warn(&client->dev, "failed to set pixel rate (%d)\n", ret);
+ dev_warn(mt9v032->dev, "failed to set pixel rate (%d)\n", ret);
}
static unsigned int mt9v032_calc_ratio(unsigned int input, unsigned int output)
@@ -682,7 +689,7 @@ static int mt9v032_s_ctrl(struct v4l2_ctrl *ctrl)
if (mt9v032->link_freq == NULL)
break;
- freq = mt9v032->pdata->link_freqs[mt9v032->link_freq->val];
+ freq = mt9v032->pdata.link_freqs[mt9v032->link_freq->val];
*mt9v032->pixel_rate->p_new.p_s64 = freq;
mt9v032->sysclk = freq;
break;
@@ -883,12 +890,12 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
u32 version;
int ret;
- dev_info(&client->dev, "Probing MT9V032 at address 0x%02x\n",
+ dev_info(mt9v032->dev, "Probing MT9V032 at address 0x%02x\n",
client->addr);
ret = mt9v032_power_on(mt9v032);
if (ret < 0) {
- dev_err(&client->dev, "MT9V032 power up failed\n");
+ dev_err(mt9v032->dev, "MT9V032 power up failed\n");
return ret;
}
@@ -898,7 +905,7 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
mt9v032_power_off(mt9v032);
if (ret < 0) {
- dev_err(&client->dev, "Failed reading chip version\n");
+ dev_err(mt9v032->dev, "Failed reading chip version\n");
return ret;
}
@@ -910,12 +917,12 @@ static int mt9v032_registered(struct v4l2_subdev *subdev)
}
if (mt9v032->version == NULL) {
- dev_err(&client->dev, "Unsupported chip version 0x%04x\n",
+ dev_err(mt9v032->dev, "Unsupported chip version 0x%04x\n",
version);
return -ENODEV;
}
- dev_info(&client->dev, "%s detected at address 0x%02x\n",
+ dev_info(mt9v032->dev, "%s detected at address 0x%02x\n",
mt9v032->version->name, client->addr);
mt9v032_configure_pixel_rate(mt9v032);
@@ -995,41 +1002,33 @@ static const struct regmap_config mt9v032_regmap_config = {
* Driver initialization and probing
*/
-static struct mt9v032_platform_data *
-mt9v032_get_pdata(struct i2c_client *client)
+static int mt9v032_get_pdata(struct mt9v032 *mt9v032)
{
- struct mt9v032_platform_data *pdata = NULL;
+ struct mt9v032_platform_data *pdata = &mt9v032->pdata;
struct v4l2_fwnode_endpoint endpoint = { .bus_type = 0 };
- struct device_node *np;
+ struct device_node *np __free(device_node) = NULL;
struct property *prop;
- if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
- return client->dev.platform_data;
-
- np = of_graph_get_endpoint_by_regs(client->dev.of_node, 0, -1);
+ np = of_graph_get_endpoint_by_regs(mt9v032->dev->of_node, 0, -1);
if (!np)
- return NULL;
+ return -EINVAL;
if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &endpoint) < 0)
- goto done;
-
- pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- goto done;
+ return -EINVAL;
prop = of_find_property(np, "link-frequencies", NULL);
if (prop) {
u64 *link_freqs;
size_t size = prop->length / sizeof(*link_freqs);
- link_freqs = devm_kcalloc(&client->dev, size,
+ link_freqs = devm_kcalloc(mt9v032->dev, size,
sizeof(*link_freqs), GFP_KERNEL);
if (!link_freqs)
- goto done;
+ return -EINVAL;
if (of_property_read_u64_array(np, "link-frequencies",
link_freqs, size) < 0)
- goto done;
+ return -EINVAL;
pdata->link_freqs = link_freqs;
pdata->link_def_freq = link_freqs[0];
@@ -1038,14 +1037,11 @@ mt9v032_get_pdata(struct i2c_client *client)
pdata->clk_pol = !!(endpoint.bus.parallel.flags &
V4L2_MBUS_PCLK_SAMPLE_RISING);
-done:
- of_node_put(np);
- return pdata;
+ return 0;
}
static int mt9v032_probe(struct i2c_client *client)
{
- struct mt9v032_platform_data *pdata = mt9v032_get_pdata(client);
struct mt9v032 *mt9v032;
unsigned int i;
int ret;
@@ -1054,27 +1050,35 @@ static int mt9v032_probe(struct i2c_client *client)
if (!mt9v032)
return -ENOMEM;
+ mt9v032->dev = &client->dev;
+
mt9v032->regmap = devm_regmap_init_i2c(client, &mt9v032_regmap_config);
if (IS_ERR(mt9v032->regmap))
return PTR_ERR(mt9v032->regmap);
- mt9v032->clk = devm_clk_get(&client->dev, NULL);
+ mt9v032->clk = devm_v4l2_sensor_clk_get(mt9v032->dev, NULL);
if (IS_ERR(mt9v032->clk))
- return PTR_ERR(mt9v032->clk);
+ return dev_err_probe(mt9v032->dev, PTR_ERR(mt9v032->clk),
+ "failed to get the clock\n");
- mt9v032->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ mt9v032->reset_gpio = devm_gpiod_get_optional(mt9v032->dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(mt9v032->reset_gpio))
return PTR_ERR(mt9v032->reset_gpio);
- mt9v032->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
+ mt9v032->standby_gpio = devm_gpiod_get_optional(mt9v032->dev, "standby",
GPIOD_OUT_LOW);
if (IS_ERR(mt9v032->standby_gpio))
return PTR_ERR(mt9v032->standby_gpio);
mutex_init(&mt9v032->power_lock);
- mt9v032->pdata = pdata;
- mt9v032->model = i2c_get_match_data(client);
+
+ ret = mt9v032_get_pdata(mt9v032);
+ if (ret)
+ return dev_err_probe(mt9v032->dev, -EINVAL,
+ "Failed to parse DT properties\n");
+
+ mt9v032->model = device_get_match_data(mt9v032->dev);
v4l2_ctrl_handler_init(&mt9v032->ctrls, 11 +
ARRAY_SIZE(mt9v032_aegc_controls));
@@ -1119,7 +1123,8 @@ static int mt9v032_probe(struct i2c_client *client)
v4l2_ctrl_new_std(&mt9v032->ctrls, &mt9v032_ctrl_ops,
V4L2_CID_PIXEL_RATE, 1, INT_MAX, 1, 1);
- if (pdata && pdata->link_freqs) {
+ if (mt9v032->pdata.link_freqs) {
+ const struct mt9v032_platform_data *pdata = &mt9v032->pdata;
unsigned int def = 0;
for (i = 0; pdata->link_freqs[i]; ++i) {
@@ -1139,7 +1144,7 @@ static int mt9v032_probe(struct i2c_client *client)
mt9v032->subdev.ctrl_handler = &mt9v032->ctrls;
if (mt9v032->ctrls.error) {
- dev_err(&client->dev, "control initialization error %d\n",
+ dev_err(mt9v032->dev, "control initialization error %d\n",
mt9v032->ctrls.error);
ret = mt9v032->ctrls.error;
goto err;
@@ -1177,7 +1182,7 @@ static int mt9v032_probe(struct i2c_client *client)
if (ret < 0)
goto err;
- mt9v032->subdev.dev = &client->dev;
+ mt9v032->subdev.dev = mt9v032->dev;
ret = v4l2_async_register_subdev(&mt9v032->subdev);
if (ret < 0)
goto err;
@@ -1261,19 +1266,6 @@ static const struct mt9v032_model_info mt9v032_models[] = {
},
};
-static const struct i2c_device_id mt9v032_id[] = {
- { "mt9v022", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V022_COLOR] },
- { "mt9v022m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V022_MONO] },
- { "mt9v024", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V024_COLOR] },
- { "mt9v024m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V024_MONO] },
- { "mt9v032", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_COLOR] },
- { "mt9v032m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_MONO] },
- { "mt9v034", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_COLOR] },
- { "mt9v034m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_MONO] },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(i2c, mt9v032_id);
-
static const struct of_device_id mt9v032_of_match[] = {
{ .compatible = "aptina,mt9v022", .data = &mt9v032_models[MT9V032_MODEL_V022_COLOR] },
{ .compatible = "aptina,mt9v022m", .data = &mt9v032_models[MT9V032_MODEL_V022_MONO] },
@@ -1294,7 +1286,6 @@ static struct i2c_driver mt9v032_driver = {
},
.probe = mt9v032_probe,
.remove = mt9v032_remove,
- .id_table = mt9v032_id,
};
module_i2c_driver(mt9v032_driver);
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index 723fe138e7bc..64a758c95ab7 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -365,8 +365,6 @@ static int __mt9v111_power_on(struct v4l2_subdev *sd)
if (ret)
return ret;
- clk_set_rate(mt9v111->clk, mt9v111->sysclk);
-
gpiod_set_value(mt9v111->standby, 0);
usleep_range(500, 1000);
@@ -532,8 +530,8 @@ static int mt9v111_calc_frame_rate(struct mt9v111_dev *mt9v111,
static int mt9v111_hw_config(struct mt9v111_dev *mt9v111)
{
struct i2c_client *c = mt9v111->client;
- unsigned int ret;
u16 outfmtctrl2;
+ int ret;
/* Force device reset. */
ret = __mt9v111_hw_reset(mt9v111);
@@ -1129,9 +1127,10 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->dev = &client->dev;
mt9v111->client = client;
- mt9v111->clk = devm_clk_get(&client->dev, NULL);
+ mt9v111->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(mt9v111->clk))
- return PTR_ERR(mt9v111->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(mt9v111->clk),
+ "failed to get the clock\n");
mt9v111->sysclk = clk_get_rate(mt9v111->clk);
if (mt9v111->sysclk > MT9V111_MAX_CLKIN)
@@ -1140,24 +1139,24 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->oe = devm_gpiod_get_optional(&client->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(mt9v111->oe)) {
- dev_err(&client->dev, "Unable to get GPIO \"enable\": %ld\n",
- PTR_ERR(mt9v111->oe));
+ dev_err(&client->dev, "Unable to get GPIO \"enable\": %pe\n",
+ mt9v111->oe);
return PTR_ERR(mt9v111->oe);
}
mt9v111->standby = devm_gpiod_get_optional(&client->dev, "standby",
GPIOD_OUT_HIGH);
if (IS_ERR(mt9v111->standby)) {
- dev_err(&client->dev, "Unable to get GPIO \"standby\": %ld\n",
- PTR_ERR(mt9v111->standby));
+ dev_err(&client->dev, "Unable to get GPIO \"standby\": %pe\n",
+ mt9v111->standby);
return PTR_ERR(mt9v111->standby);
}
mt9v111->reset = devm_gpiod_get_optional(&client->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(mt9v111->reset)) {
- dev_err(&client->dev, "Unable to get GPIO \"reset\": %ld\n",
- PTR_ERR(mt9v111->reset));
+ dev_err(&client->dev, "Unable to get GPIO \"reset\": %pe\n",
+ mt9v111->reset);
return PTR_ERR(mt9v111->reset);
}
diff --git a/drivers/media/i2c/og01a1b.c b/drivers/media/i2c/og01a1b.c
index 78d5d406e4b7..c7184de6251a 100644
--- a/drivers/media/i2c/og01a1b.c
+++ b/drivers/media/i2c/og01a1b.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -421,6 +422,7 @@ static const struct og01a1b_mode supported_modes[] = {
};
struct og01a1b {
+ struct device *dev;
struct clk *xvclk;
struct gpio_desc *reset_gpio;
struct regulator *avdd;
@@ -512,7 +514,6 @@ static int og01a1b_write_reg(struct og01a1b *og01a1b, u16 reg, u16 len, u32 val)
static int og01a1b_write_reg_list(struct og01a1b *og01a1b,
const struct og01a1b_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
unsigned int i;
int ret;
@@ -520,7 +521,7 @@ static int og01a1b_write_reg_list(struct og01a1b *og01a1b,
ret = og01a1b_write_reg(og01a1b, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(og01a1b->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -544,7 +545,6 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct og01a1b *og01a1b = container_of(ctrl->handler,
struct og01a1b, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
s64 exposure_max;
int ret = 0;
@@ -560,7 +560,7 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(og01a1b->dev))
return 0;
switch (ctrl->id) {
@@ -596,7 +596,7 @@ static int og01a1b_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
return ret;
}
@@ -682,13 +682,12 @@ static void og01a1b_update_pad_format(const struct og01a1b_mode *mode,
{
fmt->width = mode->width;
fmt->height = mode->height;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->code = MEDIA_BUS_FMT_Y10_1X10;
fmt->field = V4L2_FIELD_NONE;
}
static int og01a1b_start_streaming(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
const struct og01a1b_reg_list *reg_list;
int link_freq_index, ret;
@@ -697,14 +696,14 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
ret = og01a1b_write_reg_list(og01a1b, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(og01a1b->dev, "failed to set plls");
return ret;
}
reg_list = &og01a1b->cur_mode->reg_list;
ret = og01a1b_write_reg_list(og01a1b, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(og01a1b->dev, "failed to set mode");
return ret;
}
@@ -716,7 +715,7 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
OG01A1B_REG_VALUE_08BIT,
OG01A1B_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(og01a1b->dev, "failed to set stream");
return ret;
}
@@ -725,22 +724,19 @@ static int og01a1b_start_streaming(struct og01a1b *og01a1b)
static void og01a1b_stop_streaming(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
-
if (og01a1b_write_reg(og01a1b, OG01A1B_REG_MODE_SELECT,
OG01A1B_REG_VALUE_08BIT, OG01A1B_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(og01a1b->dev, "failed to set stream");
}
static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
{
struct og01a1b *og01a1b = to_og01a1b(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&og01a1b->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(og01a1b->dev);
if (ret) {
mutex_unlock(&og01a1b->mutex);
return ret;
@@ -750,11 +746,11 @@ static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
og01a1b_stop_streaming(og01a1b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
}
} else {
og01a1b_stop_streaming(og01a1b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(og01a1b->dev);
}
mutex_unlock(&og01a1b->mutex);
@@ -828,7 +824,7 @@ static int og01a1b_enum_mbus_code(struct v4l2_subdev *sd,
if (code->index > 0)
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ code->code = MEDIA_BUS_FMT_Y10_1X10;
return 0;
}
@@ -840,7 +836,7 @@ static int og01a1b_enum_frame_size(struct v4l2_subdev *sd,
if (fse->index >= ARRAY_SIZE(supported_modes))
return -EINVAL;
- if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ if (fse->code != MEDIA_BUS_FMT_Y10_1X10)
return -EINVAL;
fse->min_width = supported_modes[fse->index].width;
@@ -889,7 +885,6 @@ static const struct v4l2_subdev_internal_ops og01a1b_internal_ops = {
static int og01a1b_identify_module(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
int ret;
u32 val;
@@ -899,7 +894,7 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b)
return ret;
if (val != OG01A1B_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(og01a1b->dev, "chip id mismatch: %x!=%x",
OG01A1B_CHIP_ID, val);
return -ENXIO;
}
@@ -909,35 +904,18 @@ static int og01a1b_identify_module(struct og01a1b *og01a1b)
static int og01a1b_check_hwcfg(struct og01a1b *og01a1b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&og01a1b->sd);
- struct device *dev = &client->dev;
+ struct device *dev = og01a1b->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- if (!og01a1b->xvclk) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- mclk = clk_get_rate(og01a1b->xvclk);
- }
-
- if (mclk != OG01A1B_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -1066,47 +1044,54 @@ static void og01a1b_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(og01a1b->dev);
mutex_destroy(&og01a1b->mutex);
}
static int og01a1b_probe(struct i2c_client *client)
{
struct og01a1b *og01a1b;
+ unsigned long freq;
int ret;
og01a1b = devm_kzalloc(&client->dev, sizeof(*og01a1b), GFP_KERNEL);
if (!og01a1b)
return -ENOMEM;
+ og01a1b->dev = &client->dev;
+
v4l2_i2c_subdev_init(&og01a1b->sd, client, &og01a1b_subdev_ops);
- og01a1b->xvclk = devm_clk_get_optional(&client->dev, NULL);
- if (IS_ERR(og01a1b->xvclk)) {
- ret = PTR_ERR(og01a1b->xvclk);
- dev_err(&client->dev, "failed to get xvclk clock: %d\n", ret);
- return ret;
- }
+ og01a1b->xvclk = devm_v4l2_sensor_clk_get(og01a1b->dev, NULL);
+ if (IS_ERR(og01a1b->xvclk))
+ return dev_err_probe(og01a1b->dev, PTR_ERR(og01a1b->xvclk),
+ "failed to get xvclk clock\n");
+
+ freq = clk_get_rate(og01a1b->xvclk);
+ if (freq != OG01A1B_MCLK)
+ return dev_err_probe(og01a1b->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
ret = og01a1b_check_hwcfg(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to check HW configuration: %d",
+ dev_err(og01a1b->dev, "failed to check HW configuration: %d",
ret);
return ret;
}
- og01a1b->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ og01a1b->reset_gpio = devm_gpiod_get_optional(og01a1b->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(og01a1b->reset_gpio)) {
- dev_err(&client->dev, "cannot get reset GPIO\n");
+ dev_err(og01a1b->dev, "cannot get reset GPIO\n");
return PTR_ERR(og01a1b->reset_gpio);
}
- og01a1b->avdd = devm_regulator_get_optional(&client->dev, "avdd");
+ og01a1b->avdd = devm_regulator_get_optional(og01a1b->dev, "avdd");
if (IS_ERR(og01a1b->avdd)) {
ret = PTR_ERR(og01a1b->avdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'avdd' regulator\n");
return ret;
}
@@ -1114,11 +1099,11 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->avdd = NULL;
}
- og01a1b->dovdd = devm_regulator_get_optional(&client->dev, "dovdd");
+ og01a1b->dovdd = devm_regulator_get_optional(og01a1b->dev, "dovdd");
if (IS_ERR(og01a1b->dovdd)) {
ret = PTR_ERR(og01a1b->dovdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'dovdd' regulator\n");
return ret;
}
@@ -1126,11 +1111,11 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->dovdd = NULL;
}
- og01a1b->dvdd = devm_regulator_get_optional(&client->dev, "dvdd");
+ og01a1b->dvdd = devm_regulator_get_optional(og01a1b->dev, "dvdd");
if (IS_ERR(og01a1b->dvdd)) {
ret = PTR_ERR(og01a1b->dvdd);
if (ret != -ENODEV) {
- dev_err_probe(&client->dev, ret,
+ dev_err_probe(og01a1b->dev, ret,
"Failed to get 'dvdd' regulator\n");
return ret;
}
@@ -1139,13 +1124,13 @@ static int og01a1b_probe(struct i2c_client *client)
}
/* The sensor must be powered on to read the CHIP_ID register */
- ret = og01a1b_power_on(&client->dev);
+ ret = og01a1b_power_on(og01a1b->dev);
if (ret)
return ret;
ret = og01a1b_identify_module(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(og01a1b->dev, "failed to find sensor: %d", ret);
goto power_off;
}
@@ -1153,7 +1138,7 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->cur_mode = &supported_modes[0];
ret = og01a1b_init_controls(og01a1b);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(og01a1b->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1164,21 +1149,21 @@ static int og01a1b_probe(struct i2c_client *client)
og01a1b->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&og01a1b->sd.entity, 1, &og01a1b->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(og01a1b->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&og01a1b->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(og01a1b->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Enable runtime PM and turn off the device */
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(og01a1b->dev);
+ pm_runtime_enable(og01a1b->dev);
+ pm_runtime_idle(og01a1b->dev);
return 0;
@@ -1190,7 +1175,7 @@ probe_error_v4l2_ctrl_handler_free:
mutex_destroy(&og01a1b->mutex);
power_off:
- og01a1b_power_off(&client->dev);
+ og01a1b_power_off(og01a1b->dev);
return ret;
}
diff --git a/drivers/media/i2c/og0ve1b.c b/drivers/media/i2c/og0ve1b.c
new file mode 100644
index 000000000000..262d9df766fe
--- /dev/null
+++ b/drivers/media/i2c/og0ve1b.c
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024-2025 Linaro Ltd
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OG0VE1B_LINK_FREQ_500MHZ (500 * HZ_PER_MHZ)
+#define OG0VE1B_MCLK_FREQ_24MHZ (24 * HZ_PER_MHZ)
+
+#define OG0VE1B_REG_CHIP_ID CCI_REG24(0x300a)
+#define OG0VE1B_CHIP_ID 0xc75645
+
+#define OG0VE1B_REG_MODE_SELECT CCI_REG8(0x0100)
+#define OG0VE1B_MODE_STANDBY 0x00
+#define OG0VE1B_MODE_STREAMING BIT(0)
+
+#define OG0VE1B_REG_SOFTWARE_RST CCI_REG8(0x0103)
+#define OG0VE1B_SOFTWARE_RST BIT(0)
+
+/* Exposure controls from sensor */
+#define OG0VE1B_REG_EXPOSURE CCI_REG24(0x3500)
+#define OG0VE1B_EXPOSURE_MIN 1
+#define OG0VE1B_EXPOSURE_MAX_MARGIN 14
+#define OG0VE1B_EXPOSURE_STEP 1
+#define OG0VE1B_EXPOSURE_DEFAULT 554
+
+/* Analogue gain controls from sensor */
+#define OG0VE1B_REG_ANALOGUE_GAIN CCI_REG16(0x350a)
+#define OG0VE1B_ANALOGUE_GAIN_MIN 1
+#define OG0VE1B_ANALOGUE_GAIN_MAX 0x1ff
+#define OG0VE1B_ANALOGUE_GAIN_STEP 1
+#define OG0VE1B_ANALOGUE_GAIN_DEFAULT 16
+
+/* Test pattern */
+#define OG0VE1B_REG_PRE_ISP CCI_REG8(0x5e00)
+#define OG0VE1B_TEST_PATTERN_ENABLE BIT(7)
+
+#define to_og0ve1b(_sd) container_of(_sd, struct og0ve1b, sd)
+
+static const s64 og0ve1b_link_freq_menu[] = {
+ OG0VE1B_LINK_FREQ_500MHZ,
+};
+
+struct og0ve1b_reg_list {
+ const struct cci_reg_sequence *regs;
+ unsigned int num_regs;
+};
+
+struct og0ve1b_mode {
+ u32 width; /* Frame width in pixels */
+ u32 height; /* Frame height in pixels */
+ u32 hts; /* Horizontal timing size */
+ u32 vts; /* Default vertical timing size */
+ u32 bpp; /* Bits per pixel */
+
+ const struct og0ve1b_reg_list reg_list; /* Sensor register setting */
+};
+
+static const char * const og0ve1b_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Colour Bars",
+};
+
+static const char * const og0ve1b_supply_names[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+#define OG0VE1B_NUM_SUPPLIES ARRAY_SIZE(og0ve1b_supply_names)
+
+struct og0ve1b {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[OG0VE1B_NUM_SUPPLIES];
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Saved register value */
+ u64 pre_isp;
+};
+
+static const struct cci_reg_sequence og0ve1b_640x480_120fps_mode[] = {
+ { CCI_REG8(0x30a0), 0x02 },
+ { CCI_REG8(0x30a1), 0x00 },
+ { CCI_REG8(0x30a2), 0x48 },
+ { CCI_REG8(0x30a3), 0x34 },
+ { CCI_REG8(0x30a4), 0xf7 },
+ { CCI_REG8(0x30a5), 0x00 },
+ { CCI_REG8(0x3082), 0x32 },
+ { CCI_REG8(0x3083), 0x01 },
+ { CCI_REG8(0x301c), 0xf0 },
+ { CCI_REG8(0x301e), 0x0b },
+ { CCI_REG8(0x3106), 0x10 },
+ { CCI_REG8(0x3708), 0x77 },
+ { CCI_REG8(0x3709), 0xf8 },
+ { CCI_REG8(0x3717), 0x00 },
+ { CCI_REG8(0x3782), 0x00 },
+ { CCI_REG8(0x3783), 0x47 },
+ { CCI_REG8(0x37a2), 0x00 },
+ { CCI_REG8(0x3503), 0x07 },
+ { CCI_REG8(0x3509), 0x10 },
+ { CCI_REG8(0x3600), 0x83 },
+ { CCI_REG8(0x3601), 0x21 },
+ { CCI_REG8(0x3602), 0xf1 },
+ { CCI_REG8(0x360a), 0x18 },
+ { CCI_REG8(0x360e), 0xb3 },
+ { CCI_REG8(0x3613), 0x20 },
+ { CCI_REG8(0x366a), 0x78 },
+ { CCI_REG8(0x3706), 0x63 },
+ { CCI_REG8(0x3713), 0x00 },
+ { CCI_REG8(0x3716), 0xb0 },
+ { CCI_REG8(0x37a1), 0x38 },
+ { CCI_REG8(0x3800), 0x00 },
+ { CCI_REG8(0x3801), 0x04 },
+ { CCI_REG8(0x3802), 0x00 },
+ { CCI_REG8(0x3803), 0x04 },
+ { CCI_REG8(0x3804), 0x02 },
+ { CCI_REG8(0x3805), 0x8b },
+ { CCI_REG8(0x3806), 0x01 },
+ { CCI_REG8(0x3807), 0xeb },
+ { CCI_REG8(0x3808), 0x02 }, /* output width */
+ { CCI_REG8(0x3809), 0x80 },
+ { CCI_REG8(0x380a), 0x01 }, /* output height */
+ { CCI_REG8(0x380b), 0xe0 },
+ { CCI_REG8(0x380c), 0x03 }, /* horizontal timing size */
+ { CCI_REG8(0x380d), 0x18 },
+ { CCI_REG8(0x380e), 0x02 }, /* vertical timing size */
+ { CCI_REG8(0x380f), 0x38 },
+ { CCI_REG8(0x3811), 0x04 },
+ { CCI_REG8(0x3813), 0x04 },
+ { CCI_REG8(0x3814), 0x11 },
+ { CCI_REG8(0x3815), 0x11 },
+ { CCI_REG8(0x3820), 0x00 },
+ { CCI_REG8(0x3821), 0x00 },
+ { CCI_REG8(0x3823), 0x04 },
+ { CCI_REG8(0x382a), 0x00 },
+ { CCI_REG8(0x382b), 0x03 },
+ { CCI_REG8(0x3840), 0x00 },
+ { CCI_REG8(0x389e), 0x00 },
+ { CCI_REG8(0x3c05), 0x08 },
+ { CCI_REG8(0x3c26), 0x02 },
+ { CCI_REG8(0x3c27), 0xc0 },
+ { CCI_REG8(0x3c28), 0x00 },
+ { CCI_REG8(0x3c29), 0x40 },
+ { CCI_REG8(0x3c2c), 0x00 },
+ { CCI_REG8(0x3c2d), 0x50 },
+ { CCI_REG8(0x3c2e), 0x02 },
+ { CCI_REG8(0x3c2f), 0x66 },
+ { CCI_REG8(0x3c33), 0x08 },
+ { CCI_REG8(0x3c35), 0x00 },
+ { CCI_REG8(0x3c36), 0x00 },
+ { CCI_REG8(0x3c37), 0x00 },
+ { CCI_REG8(0x3f52), 0x9b },
+ { CCI_REG8(0x4001), 0x42 },
+ { CCI_REG8(0x4004), 0x08 },
+ { CCI_REG8(0x4005), 0x00 },
+ { CCI_REG8(0x4007), 0x28 },
+ { CCI_REG8(0x4009), 0x40 },
+ { CCI_REG8(0x4307), 0x30 },
+ { CCI_REG8(0x4500), 0x80 },
+ { CCI_REG8(0x4501), 0x02 },
+ { CCI_REG8(0x4502), 0x47 },
+ { CCI_REG8(0x4504), 0x7f },
+ { CCI_REG8(0x4601), 0x48 },
+ { CCI_REG8(0x4800), 0x64 },
+ { CCI_REG8(0x4801), 0x0f },
+ { CCI_REG8(0x4806), 0x2f },
+ { CCI_REG8(0x4819), 0xaa },
+ { CCI_REG8(0x4823), 0x3e },
+ { CCI_REG8(0x5000), 0x85 },
+ { CCI_REG8(0x5e00), 0x0c },
+ { CCI_REG8(0x3899), 0x09 },
+ { CCI_REG8(0x4f00), 0x64 },
+ { CCI_REG8(0x4f02), 0x0a },
+ { CCI_REG8(0x4f05), 0x0e },
+ { CCI_REG8(0x4f06), 0x11 },
+ { CCI_REG8(0x4f08), 0x0b },
+ { CCI_REG8(0x4f0a), 0xc4 },
+ { CCI_REG8(0x4f20), 0x1f },
+ { CCI_REG8(0x4f25), 0x10 },
+ { CCI_REG8(0x3016), 0x10 },
+ { CCI_REG8(0x3017), 0x00 },
+ { CCI_REG8(0x3018), 0x00 },
+ { CCI_REG8(0x3019), 0x00 },
+ { CCI_REG8(0x301a), 0x00 },
+ { CCI_REG8(0x301b), 0x00 },
+ { CCI_REG8(0x301c), 0x72 },
+ { CCI_REG8(0x3037), 0x40 },
+ { CCI_REG8(0x4f2c), 0x00 },
+ { CCI_REG8(0x4f21), 0x00 },
+ { CCI_REG8(0x4f23), 0x00 },
+ { CCI_REG8(0x4f2a), 0x00 },
+ { CCI_REG8(0x3665), 0xe7 },
+ { CCI_REG8(0x3668), 0x48 },
+ { CCI_REG8(0x3671), 0x3c },
+ { CCI_REG8(0x389a), 0x02 },
+ { CCI_REG8(0x389b), 0x00 },
+ { CCI_REG8(0x303c), 0xa0 },
+ { CCI_REG8(0x300f), 0xf0 },
+ { CCI_REG8(0x304b), 0x0f },
+ { CCI_REG8(0x3662), 0x24 },
+ { CCI_REG8(0x3006), 0x40 },
+ { CCI_REG8(0x4f26), 0x45 },
+ { CCI_REG8(0x3607), 0x34 },
+ { CCI_REG8(0x3608), 0x01 },
+ { CCI_REG8(0x360a), 0x0c },
+ { CCI_REG8(0x360b), 0x86 },
+ { CCI_REG8(0x360c), 0xcc },
+ { CCI_REG8(0x3013), 0x00 },
+ { CCI_REG8(0x3083), 0x02 },
+ { CCI_REG8(0x3084), 0x12 },
+ { CCI_REG8(0x4601), 0x38 },
+ { CCI_REG8(0x366f), 0x3a },
+ { CCI_REG8(0x3713), 0x19 },
+ { CCI_REG8(0x37a2), 0x00 },
+ { CCI_REG8(0x3f43), 0x27 },
+ { CCI_REG8(0x3f45), 0x27 },
+ { CCI_REG8(0x3f47), 0x32 },
+ { CCI_REG8(0x3f49), 0x3e },
+ { CCI_REG8(0x3f4b), 0x20 },
+ { CCI_REG8(0x3f4d), 0x30 },
+ { CCI_REG8(0x4300), 0x3f },
+ { CCI_REG8(0x4009), 0x10 },
+ { CCI_REG8(0x3f02), 0x68 },
+ { CCI_REG8(0x3700), 0x8c },
+ { CCI_REG8(0x370b), 0x7e },
+ { CCI_REG8(0x3f47), 0x35 },
+};
+
+static const struct og0ve1b_mode supported_modes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .hts = 792,
+ .vts = 568,
+ .bpp = 8,
+ .reg_list = {
+ .regs = og0ve1b_640x480_120fps_mode,
+ .num_regs = ARRAY_SIZE(og0ve1b_640x480_120fps_mode),
+ },
+ },
+};
+
+static int og0ve1b_enable_test_pattern(struct og0ve1b *og0ve1b, u32 pattern)
+{
+ u64 val = og0ve1b->pre_isp;
+
+ if (pattern)
+ val |= OG0VE1B_TEST_PATTERN_ENABLE;
+ else
+ val &= ~OG0VE1B_TEST_PATTERN_ENABLE;
+
+ return cci_write(og0ve1b->regmap, OG0VE1B_REG_PRE_ISP, val, NULL);
+}
+
+static int og0ve1b_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct og0ve1b *og0ve1b = container_of(ctrl->handler, struct og0ve1b,
+ ctrl_handler);
+ int ret;
+
+ /* V4L2 controls are applied, when sensor is powered up for streaming */
+ if (!pm_runtime_get_if_active(og0ve1b->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_ANALOGUE_GAIN,
+ ctrl->val, NULL);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_EXPOSURE,
+ ctrl->val << 4, NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = og0ve1b_enable_test_pattern(og0ve1b, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(og0ve1b->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops og0ve1b_ctrl_ops = {
+ .s_ctrl = og0ve1b_set_ctrl,
+};
+
+static int og0ve1b_init_controls(struct og0ve1b *og0ve1b)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr = &og0ve1b->ctrl_handler;
+ const struct og0ve1b_mode *mode = &supported_modes[0];
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, pixel_rate, h_blank;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(og0ve1b_link_freq_menu) - 1,
+ 0, og0ve1b_link_freq_menu);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = og0ve1b_link_freq_menu[0] / mode->bpp;
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_PIXEL_RATE,
+ 0, pixel_rate, 1, pixel_rate);
+
+ h_blank = mode->hts - mode->width;
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_HBLANK,
+ h_blank, h_blank, 1, h_blank);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_VBLANK,
+ mode->vts - mode->height,
+ mode->vts - mode->height, 1,
+ mode->vts - mode->height);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OG0VE1B_ANALOGUE_GAIN_MIN, OG0VE1B_ANALOGUE_GAIN_MAX,
+ OG0VE1B_ANALOGUE_GAIN_STEP,
+ OG0VE1B_ANALOGUE_GAIN_DEFAULT);
+
+ exposure_max = (mode->vts - OG0VE1B_EXPOSURE_MAX_MARGIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OG0VE1B_EXPOSURE_MIN, exposure_max,
+ OG0VE1B_EXPOSURE_STEP,
+ OG0VE1B_EXPOSURE_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(og0ve1b_test_pattern_menu) - 1,
+ 0, 0, og0ve1b_test_pattern_menu);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ret = v4l2_fwnode_device_parse(og0ve1b->dev, &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &og0ve1b_ctrl_ops,
+ &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ og0ve1b->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error_free_hdlr:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static void og0ve1b_update_pad_format(const struct og0ve1b_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = MEDIA_BUS_FMT_Y8_1X8;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int og0ve1b_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ const struct og0ve1b_reg_list *reg_list = &supported_modes[0].reg_list;
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(og0ve1b->dev);
+ if (ret)
+ return ret;
+
+ /* Skip a step of explicit entering into the standby mode */
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_SOFTWARE_RST,
+ OG0VE1B_SOFTWARE_RST, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to software reset: %d\n", ret);
+ goto error;
+ }
+
+ ret = cci_multi_reg_write(og0ve1b->regmap, reg_list->regs,
+ reg_list->num_regs, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to set mode: %d\n", ret);
+ goto error;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(og0ve1b->sd.ctrl_handler);
+ if (ret)
+ goto error;
+
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_MODE_SELECT,
+ OG0VE1B_MODE_STREAMING, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to start streaming: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ pm_runtime_put_autosuspend(og0ve1b->dev);
+
+ return ret;
+}
+
+static int og0ve1b_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = cci_write(og0ve1b->regmap, OG0VE1B_REG_MODE_SELECT,
+ OG0VE1B_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(og0ve1b->dev, "failed to stop streaming: %d\n", ret);
+
+ pm_runtime_put_autosuspend(og0ve1b->dev);
+
+ return ret;
+}
+
+static int og0ve1b_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct og0ve1b_mode *mode;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width,
+ fmt->format.height);
+
+ og0ve1b_update_pad_format(mode, &fmt->format);
+ *format = fmt->format;
+
+ return 0;
+}
+
+static int og0ve1b_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_Y8_1X8;
+
+ return 0;
+}
+
+static int og0ve1b_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_Y8_1X8)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int og0ve1b_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ og0ve1b_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops og0ve1b_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops og0ve1b_pad_ops = {
+ .set_fmt = og0ve1b_set_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enum_mbus_code = og0ve1b_enum_mbus_code,
+ .enum_frame_size = og0ve1b_enum_frame_size,
+ .enable_streams = og0ve1b_enable_streams,
+ .disable_streams = og0ve1b_disable_streams,
+};
+
+static const struct v4l2_subdev_ops og0ve1b_subdev_ops = {
+ .video = &og0ve1b_video_ops,
+ .pad = &og0ve1b_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops og0ve1b_internal_ops = {
+ .init_state = og0ve1b_init_state,
+};
+
+static const struct media_entity_operations og0ve1b_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int og0ve1b_identify_sensor(struct og0ve1b *og0ve1b)
+{
+ u64 val;
+ int ret;
+
+ ret = cci_read(og0ve1b->regmap, OG0VE1B_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(og0ve1b->dev, "failed to read chip id: %d\n", ret);
+ return ret;
+ }
+
+ if (val != OG0VE1B_CHIP_ID) {
+ dev_err(og0ve1b->dev, "chip id mismatch: %x!=%llx\n",
+ OG0VE1B_CHIP_ID, val);
+ return -ENODEV;
+ }
+
+ ret = cci_read(og0ve1b->regmap, OG0VE1B_REG_PRE_ISP,
+ &og0ve1b->pre_isp, NULL);
+ if (ret)
+ dev_err(og0ve1b->dev, "failed to read pre_isp: %d\n", ret);
+
+ return ret;
+}
+
+static int og0ve1b_check_hwcfg(struct og0ve1b *og0ve1b)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(og0ve1b->dev), *ep;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ unsigned long freq_bitmap;
+ int ret;
+
+ if (!fwnode)
+ return -ENODEV;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ ret = v4l2_link_freq_to_bitmap(og0ve1b->dev,
+ bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ og0ve1b_link_freq_menu,
+ ARRAY_SIZE(og0ve1b_link_freq_menu),
+ &freq_bitmap);
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int og0ve1b_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 0);
+ usleep_range(10 * USEC_PER_MSEC, 15 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(og0ve1b->xvclk);
+ if (ret)
+ goto reset_gpio;
+
+ return 0;
+
+reset_gpio:
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 1);
+
+ regulator_bulk_disable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+
+ return ret;
+}
+
+static int og0ve1b_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+
+ clk_disable_unprepare(og0ve1b->xvclk);
+
+ gpiod_set_value_cansleep(og0ve1b->reset_gpio, 1);
+
+ regulator_bulk_disable(OG0VE1B_NUM_SUPPLIES, og0ve1b->supplies);
+
+ return 0;
+}
+
+static int og0ve1b_probe(struct i2c_client *client)
+{
+ struct og0ve1b *og0ve1b;
+ unsigned long freq;
+ unsigned int i;
+ int ret;
+
+ og0ve1b = devm_kzalloc(&client->dev, sizeof(*og0ve1b), GFP_KERNEL);
+ if (!og0ve1b)
+ return -ENOMEM;
+
+ og0ve1b->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&og0ve1b->sd, client, &og0ve1b_subdev_ops);
+
+ og0ve1b->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(og0ve1b->regmap))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->regmap),
+ "failed to init CCI\n");
+
+ og0ve1b->xvclk = devm_v4l2_sensor_clk_get(og0ve1b->dev, NULL);
+ if (IS_ERR(og0ve1b->xvclk))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->xvclk),
+ "failed to get XVCLK clock\n");
+
+ freq = clk_get_rate(og0ve1b->xvclk);
+ if (freq && freq != OG0VE1B_MCLK_FREQ_24MHZ)
+ return dev_err_probe(og0ve1b->dev, -EINVAL,
+ "XVCLK clock frequency %lu is not supported\n",
+ freq);
+
+ ret = og0ve1b_check_hwcfg(og0ve1b);
+ if (ret)
+ return dev_err_probe(og0ve1b->dev, ret,
+ "failed to check HW configuration\n");
+
+ og0ve1b->reset_gpio = devm_gpiod_get_optional(og0ve1b->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(og0ve1b->reset_gpio))
+ return dev_err_probe(og0ve1b->dev, PTR_ERR(og0ve1b->reset_gpio),
+ "cannot get reset GPIO\n");
+
+ for (i = 0; i < OG0VE1B_NUM_SUPPLIES; i++)
+ og0ve1b->supplies[i].supply = og0ve1b_supply_names[i];
+
+ ret = devm_regulator_bulk_get(og0ve1b->dev, OG0VE1B_NUM_SUPPLIES,
+ og0ve1b->supplies);
+ if (ret)
+ return dev_err_probe(og0ve1b->dev, ret,
+ "failed to get supply regulators\n");
+
+ /* The sensor must be powered on to read the CHIP_ID register */
+ ret = og0ve1b_power_on(og0ve1b->dev);
+ if (ret)
+ return ret;
+
+ ret = og0ve1b_identify_sensor(og0ve1b);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret, "failed to find sensor\n");
+ goto power_off;
+ }
+
+ ret = og0ve1b_init_controls(og0ve1b);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret, "failed to init controls\n");
+ goto power_off;
+ }
+
+ og0ve1b->sd.state_lock = og0ve1b->ctrl_handler.lock;
+ og0ve1b->sd.internal_ops = &og0ve1b_internal_ops;
+ og0ve1b->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ og0ve1b->sd.entity.ops = &og0ve1b_subdev_entity_ops;
+ og0ve1b->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ og0ve1b->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&og0ve1b->sd.entity, 1, &og0ve1b->pad);
+ if (ret) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to init media entity pads\n");
+ goto v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_subdev_init_finalize(&og0ve1b->sd);
+ if (ret < 0) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to init media entity pads\n");
+ goto media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(og0ve1b->dev);
+ pm_runtime_enable(og0ve1b->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&og0ve1b->sd);
+ if (ret < 0) {
+ dev_err_probe(og0ve1b->dev, ret,
+ "failed to register V4L2 subdev\n");
+ goto subdev_cleanup;
+ }
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_idle(og0ve1b->dev);
+ pm_runtime_set_autosuspend_delay(og0ve1b->dev, 1000);
+ pm_runtime_use_autosuspend(og0ve1b->dev);
+
+ return 0;
+
+subdev_cleanup:
+ v4l2_subdev_cleanup(&og0ve1b->sd);
+ pm_runtime_disable(og0ve1b->dev);
+ pm_runtime_set_suspended(og0ve1b->dev);
+
+media_entity_cleanup:
+ media_entity_cleanup(&og0ve1b->sd.entity);
+
+v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(og0ve1b->sd.ctrl_handler);
+
+power_off:
+ og0ve1b_power_off(og0ve1b->dev);
+
+ return ret;
+}
+
+static void og0ve1b_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct og0ve1b *og0ve1b = to_og0ve1b(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(og0ve1b->dev);
+
+ if (!pm_runtime_status_suspended(og0ve1b->dev)) {
+ og0ve1b_power_off(og0ve1b->dev);
+ pm_runtime_set_suspended(og0ve1b->dev);
+ }
+}
+
+static const struct dev_pm_ops og0ve1b_pm_ops = {
+ SET_RUNTIME_PM_OPS(og0ve1b_power_off, og0ve1b_power_on, NULL)
+};
+
+static const struct of_device_id og0ve1b_of_match[] = {
+ { .compatible = "ovti,og0ve1b" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, og0ve1b_of_match);
+
+static struct i2c_driver og0ve1b_i2c_driver = {
+ .driver = {
+ .name = "og0ve1b",
+ .pm = &og0ve1b_pm_ops,
+ .of_match_table = og0ve1b_of_match,
+ },
+ .probe = og0ve1b_probe,
+ .remove = og0ve1b_remove,
+};
+
+module_i2c_driver(og0ve1b_i2c_driver);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>");
+MODULE_DESCRIPTION("OmniVision OG0VE1B sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov02a10.c b/drivers/media/i2c/ov02a10.c
index 6c30e1a0d814..70d9d7c43f18 100644
--- a/drivers/media/i2c/ov02a10.c
+++ b/drivers/media/i2c/ov02a10.c
@@ -100,7 +100,8 @@ struct ov02a10_mode {
};
struct ov02a10 {
- u32 eclk_freq;
+ struct device *dev;
+
/* Indication of MIPI transmission speed select */
u32 mipi_clock_voltage;
@@ -392,7 +393,7 @@ static int ov02a10_check_sensor_id(struct ov02a10 *ov02a10)
chip_id = le16_to_cpu((__force __le16)ret);
if ((chip_id & OV02A10_ID_MASK) != OV02A10_ID) {
- dev_err(&client->dev, "unexpected sensor id(0x%04x)\n", chip_id);
+ dev_err(ov02a10->dev, "unexpected sensor id(0x%04x)\n", chip_id);
return -EINVAL;
}
@@ -481,7 +482,7 @@ static int __ov02a10_start_stream(struct ov02a10 *ov02a10)
ret = i2c_smbus_write_byte_data(client, REG_MIRROR_FLIP_CONTROL,
REG_MIRROR_FLIP_ENABLE);
if (ret < 0) {
- dev_err(&client->dev, "failed to set orientation\n");
+ dev_err(ov02a10->dev, "failed to set orientation\n");
return ret;
}
ret = i2c_smbus_write_byte_data(client, REG_GLOBAL_EFFECTIVE,
@@ -530,7 +531,6 @@ static int ov02a10_init_state(struct v4l2_subdev *sd,
static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
{
struct ov02a10 *ov02a10 = to_ov02a10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
int ret;
mutex_lock(&ov02a10->mutex);
@@ -541,7 +541,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
}
if (on) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02a10->dev);
if (ret < 0)
goto unlock_and_return;
@@ -553,7 +553,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
}
} else {
__ov02a10_stop_stream(ov02a10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
}
ov02a10->streaming = on;
@@ -562,7 +562,7 @@ static int ov02a10_s_stream(struct v4l2_subdev *sd, int on)
return 0;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
unlock_and_return:
mutex_unlock(&ov02a10->mutex);
@@ -662,7 +662,6 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02a10 *ov02a10 = container_of(ctrl->handler,
struct ov02a10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
s64 max_expo;
int ret;
@@ -678,7 +677,7 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02a10->dev))
return 0;
switch (ctrl->id) {
@@ -699,7 +698,7 @@ static int ov02a10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02a10->dev);
return ret;
}
@@ -734,7 +733,6 @@ static const struct v4l2_ctrl_ops ov02a10_ctrl_ops = {
static int ov02a10_initialize_controls(struct ov02a10 *ov02a10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02a10->subdev);
const struct ov02a10_mode *mode;
struct v4l2_ctrl_handler *handler;
struct v4l2_ctrl *ctrl;
@@ -790,7 +788,7 @@ static int ov02a10_initialize_controls(struct ov02a10 *ov02a10)
if (handler->error) {
ret = handler->error;
- dev_err(&client->dev, "failed to init controls(%d)\n", ret);
+ dev_err(ov02a10->dev, "failed to init controls(%d)\n", ret);
goto err_free_handler;
}
@@ -866,6 +864,8 @@ static int ov02a10_probe(struct i2c_client *client)
if (!ov02a10)
return -ENOMEM;
+ ov02a10->dev = dev;
+
ret = ov02a10_check_hwcfg(dev, ov02a10);
if (ret)
return dev_err_probe(dev, ret,
@@ -885,22 +885,11 @@ static int ov02a10_probe(struct i2c_client *client)
ov02a10->fmt.code = MEDIA_BUS_FMT_SRGGB10_1X10;
}
- ov02a10->eclk = devm_clk_get(dev, "eclk");
+ ov02a10->eclk = devm_v4l2_sensor_clk_get_legacy(dev, "eclk", false, 0);
if (IS_ERR(ov02a10->eclk))
return dev_err_probe(dev, PTR_ERR(ov02a10->eclk),
"failed to get eclk\n");
- ret = device_property_read_u32(dev, "clock-frequency",
- &ov02a10->eclk_freq);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "failed to get eclk frequency\n");
-
- ret = clk_set_rate(ov02a10->eclk, ov02a10->eclk_freq);
- if (ret < 0)
- return dev_err_probe(dev, ret,
- "failed to set eclk frequency (24MHz)\n");
-
if (clk_get_rate(ov02a10->eclk) != OV02A10_ECLK_FREQ)
dev_warn(dev, "eclk mismatched, mode is based on 24MHz\n");
@@ -985,10 +974,10 @@ static void ov02a10_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- ov02a10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02a10->dev);
+ if (!pm_runtime_status_suspended(ov02a10->dev))
+ ov02a10_power_off(ov02a10->dev);
+ pm_runtime_set_suspended(ov02a10->dev);
mutex_destroy(&ov02a10->mutex);
}
diff --git a/drivers/media/i2c/ov02c10.c b/drivers/media/i2c/ov02c10.c
index 089a4fd9627c..b1e540eb8326 100644
--- a/drivers/media/i2c/ov02c10.c
+++ b/drivers/media/i2c/ov02c10.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
-#include <linux/version.h>
#include <media/v4l2-cci.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -175,7 +174,7 @@ static const struct reg_sequence sensor_1928x1092_30fps_setting[] = {
{0x3816, 0x01},
{0x3817, 0x01},
- {0x3820, 0xb0},
+ {0x3820, 0xa0},
{0x3821, 0x00},
{0x3822, 0x80},
{0x3823, 0x08},
@@ -373,6 +372,8 @@ static const char * const ov02c10_supply_names[] = {
};
struct ov02c10 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -384,6 +385,8 @@ struct ov02c10 {
struct v4l2_ctrl *vblank;
struct v4l2_ctrl *hblank;
struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
struct clk *img_clk;
struct gpio_desc *reset;
@@ -418,7 +421,6 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02c10 *ov02c10 = container_of(ctrl->handler,
struct ov02c10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
const u32 height = supported_modes[0].height;
s64 exposure_max;
int ret = 0;
@@ -434,7 +436,7 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02c10->dev))
return 0;
switch (ctrl->id) {
@@ -462,12 +464,22 @@ static int ov02c10_set_ctrl(struct v4l2_ctrl *ctrl)
ret = ov02c10_test_pattern(ov02c10, ctrl->val);
break;
+ case V4L2_CID_HFLIP:
+ cci_update_bits(ov02c10->regmap, OV02C10_ROTATE_CONTROL,
+ BIT(3), ov02c10->hflip->val << 3, &ret);
+ break;
+
+ case V4L2_CID_VFLIP:
+ cci_update_bits(ov02c10->regmap, OV02C10_ROTATE_CONTROL,
+ BIT(4), ov02c10->vflip->val << 4, &ret);
+ break;
+
default:
ret = -EINVAL;
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return ret;
}
@@ -478,7 +490,6 @@ static const struct v4l2_ctrl_ops ov02c10_ctrl_ops = {
static int ov02c10_init_controls(struct ov02c10 *ov02c10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &ov02c10->ctrl_handler;
const struct ov02c10_mode *mode = &supported_modes[0];
u32 vblank_min, vblank_max, vblank_default, vts_def;
@@ -486,7 +497,7 @@ static int ov02c10_init_controls(struct ov02c10 *ov02c10)
s64 exposure_max, h_blank, pixel_rate;
int ret;
- v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ v4l2_ctrl_handler_init(ctrl_hdlr, 12);
ov02c10->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
&ov02c10_ctrl_ops,
@@ -537,12 +548,23 @@ static int ov02c10_init_controls(struct ov02c10 *ov02c10)
exposure_max,
OV02C10_EXPOSURE_STEP,
exposure_max);
+
+ ov02c10->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ if (ov02c10->hflip)
+ ov02c10->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
+ ov02c10->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &ov02c10_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ if (ov02c10->vflip)
+ ov02c10->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+
v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov02c10_ctrl_ops,
V4L2_CID_TEST_PATTERN,
ARRAY_SIZE(ov02c10_test_pattern_menu) - 1,
0, 0, ov02c10_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov02c10->dev, &props);
if (ret)
return ret;
@@ -570,12 +592,11 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
u32 pad, u64 streams_mask)
{
const struct ov02c10_mode *mode = &supported_modes[0];
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02c10 *ov02c10 = to_ov02c10(sd);
const struct reg_sequence *reg_sequence;
int ret, sequence_length;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02c10->dev);
if (ret)
return ret;
@@ -584,7 +605,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02c10->regmap,
reg_sequence, sequence_length);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov02c10->dev, "failed to set mode\n");
goto out;
}
@@ -593,7 +614,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02c10->regmap,
reg_sequence, sequence_length);
if (ret) {
- dev_err(&client->dev, "failed to write lane settings\n");
+ dev_err(ov02c10->dev, "failed to write lane settings\n");
goto out;
}
@@ -604,7 +625,7 @@ static int ov02c10_enable_streams(struct v4l2_subdev *sd,
ret = cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 1, NULL);
out:
if (ret)
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return ret;
}
@@ -613,11 +634,10 @@ static int ov02c10_disable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02c10 *ov02c10 = to_ov02c10(sd);
cci_write(ov02c10->regmap, OV02C10_REG_STREAM_CONTROL, 0, NULL);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02c10->dev);
return 0;
}
@@ -778,7 +798,6 @@ static const struct v4l2_subdev_internal_ops ov02c10_internal_ops = {
static int ov02c10_identify_module(struct ov02c10 *ov02c10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02c10->sd);
u64 chip_id;
int ret;
@@ -787,7 +806,7 @@ static int ov02c10_identify_module(struct ov02c10 *ov02c10)
return ret;
if (chip_id != OV02C10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%llx",
+ dev_err(ov02c10->dev, "chip id mismatch: %x!=%llx",
OV02C10_CHIP_ID, chip_id);
return -ENXIO;
}
@@ -795,14 +814,14 @@ static int ov02c10_identify_module(struct ov02c10 *ov02c10)
return 0;
}
-static int ov02c10_check_hwcfg(struct device *dev, struct ov02c10 *ov02c10)
+static int ov02c10_check_hwcfg(struct ov02c10 *ov02c10)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov02c10->dev;
struct fwnode_handle *ep, *fwnode = dev_fwnode(dev);
unsigned long link_freq_bitmap;
- u32 mclk;
int ret;
/*
@@ -814,31 +833,6 @@ static int ov02c10_check_hwcfg(struct device *dev, struct ov02c10 *ov02c10)
return dev_err_probe(dev, -EPROBE_DEFER,
"waiting for fwnode graph endpoint\n");
- ov02c10->img_clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(ov02c10->img_clk)) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, PTR_ERR(ov02c10->img_clk),
- "failed to get imaging clock\n");
- }
-
- if (ov02c10->img_clk) {
- mclk = clk_get_rate(ov02c10->img_clk);
- } else {
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, ret,
- "reading clock-frequency property\n");
- }
- }
-
- if (mclk != OV02C10_MCLK) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, -EINVAL,
- "external clock %u is not supported\n",
- mclk);
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -873,35 +867,50 @@ check_hwcfg_error:
static void ov02c10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov02c10 *ov02c10 = to_ov02c10(sd);
v4l2_async_unregister_subdev(sd);
v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev)) {
- ov02c10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02c10->dev);
+ if (!pm_runtime_status_suspended(ov02c10->dev)) {
+ ov02c10_power_off(ov02c10->dev);
+ pm_runtime_set_suspended(ov02c10->dev);
}
}
static int ov02c10_probe(struct i2c_client *client)
{
struct ov02c10 *ov02c10;
+ unsigned long freq;
int ret;
ov02c10 = devm_kzalloc(&client->dev, sizeof(*ov02c10), GFP_KERNEL);
if (!ov02c10)
return -ENOMEM;
+ ov02c10->dev = &client->dev;
+
+ ov02c10->img_clk = devm_v4l2_sensor_clk_get(ov02c10->dev, NULL);
+ if (IS_ERR(ov02c10->img_clk))
+ return dev_err_probe(ov02c10->dev, PTR_ERR(ov02c10->img_clk),
+ "failed to get imaging clock\n");
+
+ freq = clk_get_rate(ov02c10->img_clk);
+ if (freq != OV02C10_MCLK)
+ return dev_err_probe(ov02c10->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov02c10->sd, client, &ov02c10_subdev_ops);
/* Check HW config */
- ret = ov02c10_check_hwcfg(&client->dev, ov02c10);
+ ret = ov02c10_check_hwcfg(ov02c10);
if (ret)
return ret;
- ret = ov02c10_get_pm_resources(&client->dev);
+ ret = ov02c10_get_pm_resources(ov02c10->dev);
if (ret)
return ret;
@@ -909,21 +918,21 @@ static int ov02c10_probe(struct i2c_client *client)
if (IS_ERR(ov02c10->regmap))
return PTR_ERR(ov02c10->regmap);
- ret = ov02c10_power_on(&client->dev);
+ ret = ov02c10_power_on(ov02c10->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "failed to power on\n");
+ dev_err_probe(ov02c10->dev, ret, "failed to power on\n");
return ret;
}
ret = ov02c10_identify_module(ov02c10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov02c10->dev, "failed to find sensor: %d", ret);
goto probe_error_power_off;
}
ret = ov02c10_init_controls(ov02c10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov02c10->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -934,33 +943,33 @@ static int ov02c10_probe(struct i2c_client *client)
ov02c10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov02c10->sd.entity, 1, &ov02c10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov02c10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ov02c10->sd.state_lock = ov02c10->ctrl_handler.lock;
ret = v4l2_subdev_init_finalize(&ov02c10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to init subdev: %d", ret);
+ dev_err(ov02c10->dev, "failed to init subdev: %d", ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov02c10->dev);
+ pm_runtime_enable(ov02c10->dev);
ret = v4l2_async_register_subdev_sensor(&ov02c10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov02c10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_v4l2_subdev_cleanup;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov02c10->dev);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02c10->dev);
+ pm_runtime_set_suspended(ov02c10->dev);
v4l2_subdev_cleanup(&ov02c10->sd);
probe_error_media_entity_cleanup:
@@ -970,7 +979,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov02c10->sd.ctrl_handler);
probe_error_power_off:
- ov02c10_power_off(&client->dev);
+ ov02c10_power_off(ov02c10->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov02e10.c b/drivers/media/i2c/ov02e10.c
index d74dc62e189d..4a64cba99991 100644
--- a/drivers/media/i2c/ov02e10.c
+++ b/drivers/media/i2c/ov02e10.c
@@ -226,6 +226,8 @@ static const char * const ov02e10_supply_names[] = {
};
struct ov02e10 {
+ struct device *dev;
+
struct regmap *regmap;
struct v4l2_subdev sd;
struct media_pad pad;
@@ -288,7 +290,6 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov02e10 *ov02e10 = container_of(ctrl->handler,
struct ov02e10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
s64 exposure_max;
int ret;
@@ -307,7 +308,7 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov02e10->dev))
return 0;
ret = cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
@@ -363,7 +364,7 @@ static int ov02e10_set_ctrl(struct v4l2_ctrl *ctrl)
cci_write(ov02e10->regmap, OV02E10_REG_COMMAND_UPDATE,
OV02E10_COMMAND_UPDATE, &ret);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return ret;
}
@@ -374,7 +375,6 @@ static const struct v4l2_ctrl_ops ov02e10_ctrl_ops = {
static int ov02e10_init_controls(struct ov02e10 *ov02e10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
struct v4l2_ctrl_handler *ctrl_hdlr = &ov02e10->ctrl_handler;
const struct ov02e10_mode *mode = ov02e10->cur_mode;
u32 vblank_min, vblank_max, vblank_def;
@@ -442,7 +442,7 @@ static int ov02e10_init_controls(struct ov02e10 *ov02e10)
ARRAY_SIZE(ov02e10_test_pattern_menu) - 1,
0, 0, ov02e10_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov02e10->dev, &props);
if (ret)
return ret;
@@ -481,12 +481,11 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02e10 *ov02e10 = to_ov02e10(sd);
const struct reg_sequence_list *reg_list;
int ret;
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov02e10->dev);
if (ret)
return ret;
@@ -494,7 +493,7 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
ret = regmap_multi_reg_write(ov02e10->regmap, reg_list->regs,
reg_list->num_regs);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov02e10->dev, "failed to set mode\n");
goto out;
}
@@ -506,7 +505,7 @@ static int ov02e10_enable_streams(struct v4l2_subdev *sd,
out:
if (ret)
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return ret;
}
@@ -515,11 +514,10 @@ static int ov02e10_disable_streams(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
u32 pad, u64 streams_mask)
{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ov02e10 *ov02e10 = to_ov02e10(sd);
ov02e10_set_stream_mode(ov02e10, 0);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov02e10->dev);
return 0;
}
@@ -724,7 +722,6 @@ static const struct v4l2_subdev_internal_ops ov02e10_internal_ops = {
static int ov02e10_identify_module(struct ov02e10 *ov02e10)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov02e10->sd);
int ret;
u64 val;
@@ -735,7 +732,7 @@ static int ov02e10_identify_module(struct ov02e10 *ov02e10)
return ret;
if (val != OV02E10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov02e10->dev, "chip id mismatch: %x!=%x\n",
OV02E10_CHIP_ID, (u32)val);
return -ENXIO;
}
@@ -743,15 +740,15 @@ static int ov02e10_identify_module(struct ov02e10 *ov02e10)
return 0;
}
-static int ov02e10_check_hwcfg(struct device *dev, struct ov02e10 *ov02e10)
+static int ov02e10_check_hwcfg(struct ov02e10 *ov02e10)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov02e10->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned long link_freq_bitmap;
- u32 ext_clk;
int ret;
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
@@ -764,31 +761,6 @@ static int ov02e10_check_hwcfg(struct device *dev, struct ov02e10 *ov02e10)
if (ret)
return dev_err_probe(dev, ret, "parsing endpoint failed\n");
- ov02e10->img_clk = devm_clk_get_optional(dev, NULL);
- if (IS_ERR(ov02e10->img_clk)) {
- ret = dev_err_probe(dev, PTR_ERR(ov02e10->img_clk),
- "failed to get imaging clock\n");
- goto out_err;
- }
-
- if (ov02e10->img_clk) {
- ext_clk = clk_get_rate(ov02e10->img_clk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto out_err;
- }
- }
-
- if (ext_clk != OV02E10_MCLK) {
- dev_err(dev, "external clock %d is not supported\n",
- ext_clk);
- ret = -EINVAL;
- goto out_err;
- }
-
if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV02E10_DATA_LANES) {
dev_err(dev, "number of CSI2 data lanes %d is not supported\n",
bus_cfg.bus.mipi_csi2.num_data_lanes);
@@ -823,32 +795,47 @@ out_err:
static void ov02e10_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov02e10 *ov02e10 = to_ov02e10(sd);
v4l2_async_unregister_subdev(sd);
v4l2_subdev_cleanup(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov02e10->dev);
- if (!pm_runtime_status_suspended(&client->dev)) {
- ov02e10_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (!pm_runtime_status_suspended(ov02e10->dev)) {
+ ov02e10_power_off(ov02e10->dev);
+ pm_runtime_set_suspended(ov02e10->dev);
}
}
static int ov02e10_probe(struct i2c_client *client)
{
struct ov02e10 *ov02e10;
+ unsigned long freq;
int ret;
ov02e10 = devm_kzalloc(&client->dev, sizeof(*ov02e10), GFP_KERNEL);
if (!ov02e10)
return -ENOMEM;
+ ov02e10->dev = &client->dev;
+
+ ov02e10->img_clk = devm_v4l2_sensor_clk_get(ov02e10->dev, NULL);
+ if (IS_ERR(ov02e10->img_clk))
+ return dev_err_probe(ov02e10->dev, PTR_ERR(ov02e10->img_clk),
+ "failed to get imaging clock\n");
+
+ freq = clk_get_rate(ov02e10->img_clk);
+ if (freq != OV02E10_MCLK)
+ return dev_err_probe(ov02e10->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov02e10->sd, client, &ov02e10_subdev_ops);
/* Check HW config */
- ret = ov02e10_check_hwcfg(&client->dev, ov02e10);
+ ret = ov02e10_check_hwcfg(ov02e10);
if (ret)
return ret;
@@ -857,27 +844,27 @@ static int ov02e10_probe(struct i2c_client *client)
if (IS_ERR(ov02e10->regmap))
return PTR_ERR(ov02e10->regmap);
- ret = ov02e10_get_pm_resources(&client->dev);
+ ret = ov02e10_get_pm_resources(ov02e10->dev);
if (ret)
return ret;
- ret = ov02e10_power_on(&client->dev);
+ ret = ov02e10_power_on(ov02e10->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "failed to power on\n");
+ dev_err_probe(ov02e10->dev, ret, "failed to power on\n");
return ret;
}
/* Check module identity */
ret = ov02e10_identify_module(ov02e10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov02e10->dev, "failed to find sensor: %d\n", ret);
goto probe_error_power_off;
}
ov02e10->cur_mode = &supported_modes[0];
ret = ov02e10_init_controls(ov02e10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d\n", ret);
+ dev_err(ov02e10->dev, "failed to init controls: %d\n", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -891,33 +878,33 @@ static int ov02e10_probe(struct i2c_client *client)
ov02e10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov02e10->sd.entity, 1, &ov02e10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov02e10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ov02e10->sd.state_lock = ov02e10->ctrl_handler.lock;
ret = v4l2_subdev_init_finalize(&ov02e10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to init subdev: %d", ret);
+ dev_err(ov02e10->dev, "failed to init subdev: %d", ret);
goto probe_error_media_entity_cleanup;
}
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov02e10->dev);
+ pm_runtime_enable(ov02e10->dev);
ret = v4l2_async_register_subdev_sensor(&ov02e10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov02e10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_v4l2_subdev_cleanup;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov02e10->dev);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov02e10->dev);
+ pm_runtime_set_suspended(ov02e10->dev);
v4l2_subdev_cleanup(&ov02e10->sd);
probe_error_media_entity_cleanup:
@@ -927,7 +914,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov02e10->sd.ctrl_handler);
probe_error_power_off:
- ov02e10_power_off(&client->dev);
+ ov02e10_power_off(ov02e10->dev);
return ret;
}
@@ -961,7 +948,7 @@ static struct i2c_driver ov02e10_i2c_driver = {
module_i2c_driver(ov02e10_i2c_driver);
-MODULE_AUTHOR("Jingjing Xiong <jingjing.xiong@intel.com>");
+MODULE_AUTHOR("Jingjing Xiong");
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_AUTHOR("Alan Stern <stern@rowland.harvard.edu>");
MODULE_AUTHOR("Bryan O'Donoghue <bryan.odonoghue@linaro.org>");
diff --git a/drivers/media/i2c/ov08d10.c b/drivers/media/i2c/ov08d10.c
index 1bacbdfa4298..43ec2a1f2fcf 100644
--- a/drivers/media/i2c/ov08d10.c
+++ b/drivers/media/i2c/ov08d10.c
@@ -515,12 +515,13 @@ static const char * const ov08d10_test_pattern_menu[] = {
};
struct ov08d10 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
- struct clk *xvclk;
-
/* V4L2 Controls */
struct v4l2_ctrl *link_freq;
struct v4l2_ctrl *pixel_rate;
@@ -663,7 +664,7 @@ static int ov08d10_write_reg_list(struct ov08d10 *ov08d10,
ret = i2c_smbus_write_byte_data(client, r_list->regs[i].address,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov08d10->dev,
"failed to write reg 0x%2.2x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -849,7 +850,6 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov08d10 *ov08d10 = container_of(ctrl->handler,
struct ov08d10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov08d10->sd);
s64 exposure_max;
int ret;
@@ -865,7 +865,7 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov08d10->dev))
return 0;
switch (ctrl->id) {
@@ -901,7 +901,7 @@ static int ov08d10_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
return ret;
}
@@ -1025,32 +1025,32 @@ static int ov08d10_start_streaming(struct ov08d10 *ov08d10)
/* soft reset */
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
ret = i2c_smbus_write_byte_data(client, 0x20, 0x0e);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
usleep_range(3000, 4000);
ret = i2c_smbus_write_byte_data(client, 0x20, 0x0b);
if (ret < 0) {
- dev_err(&client->dev, "failed to reset sensor");
+ dev_err(ov08d10->dev, "failed to reset sensor");
return ret;
}
/* update sensor setting */
ret = ov08d10_write_reg_list(ov08d10, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov08d10->dev, "failed to set plls");
return ret;
}
reg_list = &ov08d10->cur_mode->reg_list;
ret = ov08d10_write_reg_list(ov08d10, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov08d10->dev, "failed to set mode");
return ret;
}
@@ -1077,19 +1077,19 @@ static void ov08d10_stop_streaming(struct ov08d10 *ov08d10)
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x00);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_MODE_SELECT,
OV08D10_MODE_STANDBY);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
ret = i2c_smbus_write_byte_data(client, OV08D10_REG_PAGE, 0x01);
if (ret < 0) {
- dev_err(&client->dev, "failed to stop streaming");
+ dev_err(ov08d10->dev, "failed to stop streaming");
return;
}
}
@@ -1097,12 +1097,11 @@ static void ov08d10_stop_streaming(struct ov08d10 *ov08d10)
static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08d10 *ov08d10 = to_ov08d10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov08d10->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov08d10->dev);
if (ret < 0) {
mutex_unlock(&ov08d10->mutex);
return ret;
@@ -1112,11 +1111,11 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov08d10_stop_streaming(ov08d10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
}
} else {
ov08d10_stop_streaming(ov08d10);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08d10->dev);
}
/* vflip and hflip cannot change during streaming */
@@ -1293,7 +1292,7 @@ static int ov08d10_identify_module(struct ov08d10 *ov08d10)
chip_id = val | ret;
if ((chip_id & OV08D10_ID_MASK) != OV08D10_CHIP_ID) {
- dev_err(&client->dev, "unexpected sensor id(0x%04x)\n",
+ dev_err(ov08d10->dev, "unexpected sensor id(0x%04x)\n",
chip_id);
return -EINVAL;
}
@@ -1301,28 +1300,20 @@ static int ov08d10_identify_module(struct ov08d10 *ov08d10)
return 0;
}
-static int ov08d10_get_hwcfg(struct ov08d10 *ov08d10, struct device *dev)
+static int ov08d10_get_hwcfg(struct ov08d10 *ov08d10)
{
+ struct device *dev = ov08d10->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 xvclk_rate;
unsigned int i, j;
int ret;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &xvclk_rate);
- if (ret)
- return ret;
-
- if (xvclk_rate != OV08D10_XVCLK_19_2)
- dev_warn(dev, "external clock rate %u is unsupported",
- xvclk_rate);
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -1380,22 +1371,35 @@ static void ov08d10_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov08d10->dev);
mutex_destroy(&ov08d10->mutex);
}
static int ov08d10_probe(struct i2c_client *client)
{
struct ov08d10 *ov08d10;
+ unsigned long freq;
int ret;
ov08d10 = devm_kzalloc(&client->dev, sizeof(*ov08d10), GFP_KERNEL);
if (!ov08d10)
return -ENOMEM;
- ret = ov08d10_get_hwcfg(ov08d10, &client->dev);
+ ov08d10->dev = &client->dev;
+
+ ov08d10->clk = devm_v4l2_sensor_clk_get(ov08d10->dev, NULL);
+ if (IS_ERR(ov08d10->clk))
+ return dev_err_probe(ov08d10->dev, PTR_ERR(ov08d10->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov08d10->clk);
+ if (freq != OV08D10_XVCLK_19_2)
+ dev_warn(ov08d10->dev,
+ "external clock rate %lu is not supported\n", freq);
+
+ ret = ov08d10_get_hwcfg(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to get HW configuration: %d",
+ dev_err(ov08d10->dev, "failed to get HW configuration: %d",
ret);
return ret;
}
@@ -1404,7 +1408,7 @@ static int ov08d10_probe(struct i2c_client *client)
ret = ov08d10_identify_module(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov08d10->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -1412,7 +1416,7 @@ static int ov08d10_probe(struct i2c_client *client)
ov08d10->cur_mode = &ov08d10->priv_lane->sp_modes[0];
ret = ov08d10_init_controls(ov08d10);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov08d10->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1422,13 +1426,13 @@ static int ov08d10_probe(struct i2c_client *client)
ov08d10->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov08d10->sd.entity, 1, &ov08d10->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov08d10->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov08d10->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov08d10->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
@@ -1437,9 +1441,9 @@ static int ov08d10_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov08d10->dev);
+ pm_runtime_enable(ov08d10->dev);
+ pm_runtime_idle(ov08d10->dev);
return 0;
diff --git a/drivers/media/i2c/ov08x40.c b/drivers/media/i2c/ov08x40.c
index e0094305ca2a..5eaf454f4763 100644
--- a/drivers/media/i2c/ov08x40.c
+++ b/drivers/media/i2c/ov08x40.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2022 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
-#include <linux/i2c.h>
+#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
@@ -1305,6 +1306,8 @@ static const char * const ov08x40_supply_names[] = {
};
struct ov08x40 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1513,7 +1516,6 @@ static int ov08x40_write_reg(struct ov08x40 *ov08x,
static int ov08x40_write_regs(struct ov08x40 *ov08x,
const struct ov08x40_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
int ret;
u32 i;
@@ -1522,7 +1524,7 @@ static int ov08x40_write_regs(struct ov08x40 *ov08x,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov08x->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -1648,7 +1650,7 @@ static int ov08x40_set_ctrl_hflip(struct ov08x40 *ov08x, u32 ctrl_val)
return ov08x40_write_reg(ov08x, OV08X40_REG_MIRROR,
OV08X40_REG_VALUE_08BIT,
- ctrl_val ? val | BIT(2) : val & ~BIT(2));
+ ctrl_val ? val & ~BIT(2) : val | BIT(2));
}
static int ov08x40_set_ctrl_vflip(struct ov08x40 *ov08x, u32 ctrl_val)
@@ -1670,7 +1672,6 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov08x40 *ov08x = container_of(ctrl->handler,
struct ov08x40, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
s64 max;
int exp;
int fll;
@@ -1699,7 +1700,7 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov08x->dev))
return 0;
switch (ctrl->id) {
@@ -1737,13 +1738,13 @@ static int ov08x40_set_ctrl(struct v4l2_ctrl *ctrl)
ov08x40_set_ctrl_vflip(ov08x, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov08x->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
return ret;
}
@@ -1912,7 +1913,6 @@ ov08x40_set_pad_format(struct v4l2_subdev *sd,
static int ov08x40_start_streaming(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
const struct ov08x40_reg_list *reg_list;
int ret, link_freq_index;
@@ -1920,7 +1920,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
ret = ov08x40_write_reg(ov08x, OV08X40_REG_SOFTWARE_RST,
OV08X40_REG_VALUE_08BIT, OV08X40_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov08x->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1930,14 +1930,14 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set plls\n", __func__);
return ret;
}
reg_list = &ov08x40_global_setting;
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set global setting\n",
+ dev_err(ov08x->dev, "%s failed to set global setting\n",
__func__);
return ret;
}
@@ -1946,7 +1946,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
reg_list = &ov08x->cur_mode->reg_list;
ret = ov08x40_write_reg_list(ov08x, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1962,7 +1962,7 @@ static int ov08x40_start_streaming(struct ov08x40 *ov08x)
}
if (ret) {
- dev_err(&client->dev, "%s failed to set regs\n", __func__);
+ dev_err(ov08x->dev, "%s failed to set regs\n", __func__);
return ret;
}
@@ -1986,7 +1986,6 @@ static int ov08x40_stop_streaming(struct ov08x40 *ov08x)
/* Verify chip ID */
static int ov08x40_identify_module(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
int ret;
u32 val;
@@ -1996,17 +1995,17 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x)
ret = ov08x40_read_reg(ov08x, OV08X40_REG_CHIP_ID,
OV08X40_REG_VALUE_24BIT, &val);
if (ret) {
- dev_err(&client->dev, "error reading chip-id register: %d\n", ret);
+ dev_err(ov08x->dev, "error reading chip-id register: %d\n", ret);
return ret;
}
if (val != OV08X40_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov08x->dev, "chip id mismatch: %x!=%x\n",
OV08X40_CHIP_ID, val);
return -ENXIO;
}
- dev_dbg(&client->dev, "chip id 0x%x\n", val);
+ dev_dbg(ov08x->dev, "chip id 0x%x\n", val);
ov08x->identified = true;
return 0;
@@ -2015,13 +2014,12 @@ static int ov08x40_identify_module(struct ov08x40 *ov08x)
static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov08x40 *ov08x = to_ov08x40(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov08x->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov08x->dev);
if (ret < 0)
goto err_unlock;
@@ -2038,7 +2036,7 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov08x40_stop_streaming(ov08x);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
}
mutex_unlock(&ov08x->mutex);
@@ -2046,7 +2044,7 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov08x->dev);
err_unlock:
mutex_unlock(&ov08x->mutex);
@@ -2079,7 +2077,6 @@ static const struct v4l2_subdev_internal_ops ov08x40_internal_ops = {
static int ov08x40_init_controls(struct ov08x40 *ov08x)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov08x->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -2160,12 +2157,12 @@ static int ov08x40_init_controls(struct ov08x40 *ov08x)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov08x->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov08x->dev, &props);
if (ret)
goto error;
@@ -2191,11 +2188,12 @@ static void ov08x40_free_controls(struct ov08x40 *ov08x)
mutex_destroy(&ov08x->mutex);
}
-static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
+static int ov08x40_check_hwcfg(struct ov08x40 *ov08x)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov08x->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned int i;
@@ -2232,23 +2230,14 @@ static int ov08x40_check_hwcfg(struct ov08x40 *ov08x, struct device *dev)
if (ret)
goto out_err;
- ov08x->xvclk = devm_clk_get_optional(dev, NULL);
+ ov08x->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov08x->xvclk)) {
ret = dev_err_probe(dev, PTR_ERR(ov08x->xvclk),
"getting xvclk\n");
goto out_err;
}
- if (ov08x->xvclk) {
- xvclk_rate = clk_get_rate(ov08x->xvclk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &xvclk_rate);
- if (ret) {
- dev_err(dev, "can't get clock frequency\n");
- goto out_err;
- }
- }
+ xvclk_rate = clk_get_rate(ov08x->xvclk);
if (xvclk_rate != OV08X40_XVCLK) {
dev_err(dev, "external clock %d is not supported\n",
xvclk_rate);
@@ -2294,19 +2283,21 @@ static int ov08x40_probe(struct i2c_client *client)
if (!ov08x)
return -ENOMEM;
+ ov08x->dev = &client->dev;
+
/* Check HW config */
- ret = ov08x40_check_hwcfg(ov08x, &client->dev);
+ ret = ov08x40_check_hwcfg(ov08x);
if (ret)
return ret;
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov08x->sd, client, &ov08x40_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov08x->dev);
if (full_power) {
- ret = ov08x40_power_on(&client->dev);
+ ret = ov08x40_power_on(ov08x->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov08x->dev, "failed to power on\n");
return ret;
}
@@ -2333,7 +2324,7 @@ static int ov08x40_probe(struct i2c_client *client)
ov08x->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov08x->sd.entity, 1, &ov08x->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov08x->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -2342,9 +2333,9 @@ static int ov08x40_probe(struct i2c_client *client)
goto error_media_entity;
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov08x->dev);
+ pm_runtime_enable(ov08x->dev);
+ pm_runtime_idle(ov08x->dev);
return 0;
@@ -2355,7 +2346,7 @@ error_handler_free:
ov08x40_free_controls(ov08x);
probe_power_off:
- ov08x40_power_off(&client->dev);
+ ov08x40_power_off(ov08x->dev);
return ret;
}
@@ -2369,10 +2360,10 @@ static void ov08x40_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov08x40_free_controls(ov08x);
- pm_runtime_disable(&client->dev);
- if (!pm_runtime_status_suspended(&client->dev))
- ov08x40_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov08x->dev);
+ if (!pm_runtime_status_suspended(ov08x->dev))
+ ov08x40_power_off(ov08x->dev);
+ pm_runtime_set_suspended(ov08x->dev);
}
static DEFINE_RUNTIME_DEV_PM_OPS(ov08x40_pm_ops, ov08x40_power_off,
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 7a3fc1d28514..162b49046990 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -2,6 +2,7 @@
// Copyright (c) 2017 Intel Corporation.
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
@@ -1028,6 +1029,9 @@ static const struct ov13858_mode supported_modes[] = {
};
struct ov13858 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1117,7 +1121,6 @@ static int ov13858_write_reg(struct ov13858 *ov13858, u16 reg, u32 len,
static int ov13858_write_regs(struct ov13858 *ov13858,
const struct ov13858_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
int ret;
u32 i;
@@ -1126,7 +1129,7 @@ static int ov13858_write_regs(struct ov13858 *ov13858,
regs[i].val);
if (ret) {
dev_err_ratelimited(
- &client->dev,
+ ov13858->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -1209,7 +1212,6 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov13858 *ov13858 = container_of(ctrl->handler,
struct ov13858, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
s64 max;
int ret;
@@ -1228,7 +1230,7 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov13858->dev))
return 0;
ret = 0;
@@ -1256,13 +1258,13 @@ static int ov13858_set_ctrl(struct v4l2_ctrl *ctrl)
ret = ov13858_enable_test_pattern(ov13858, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov13858->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
return ret;
}
@@ -1408,7 +1410,6 @@ static int ov13858_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
/* Start streaming */
static int ov13858_start_streaming(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
const struct ov13858_reg_list *reg_list;
int ret, link_freq_index;
@@ -1416,7 +1417,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
ret = ov13858_write_reg(ov13858, OV13858_REG_SOFTWARE_RST,
OV13858_REG_VALUE_08BIT, OV13858_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov13858->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1426,7 +1427,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov13858_write_reg_list(ov13858, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov13858->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -1434,7 +1435,7 @@ static int ov13858_start_streaming(struct ov13858 *ov13858)
reg_list = &ov13858->cur_mode->reg_list;
ret = ov13858_write_reg_list(ov13858, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov13858->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1458,13 +1459,12 @@ static int ov13858_stop_streaming(struct ov13858 *ov13858)
static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov13858 *ov13858 = to_ov13858(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov13858->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov13858->dev);
if (ret < 0)
goto err_unlock;
@@ -1477,7 +1477,7 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov13858_stop_streaming(ov13858);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
}
mutex_unlock(&ov13858->mutex);
@@ -1485,7 +1485,7 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13858->dev);
err_unlock:
mutex_unlock(&ov13858->mutex);
@@ -1495,7 +1495,6 @@ err_unlock:
/* Verify chip ID */
static int ov13858_identify_module(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
int ret;
u32 val;
@@ -1505,7 +1504,7 @@ static int ov13858_identify_module(struct ov13858 *ov13858)
return ret;
if (val != OV13858_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov13858->dev, "chip id mismatch: %x!=%x\n",
OV13858_CHIP_ID, val);
return -EIO;
}
@@ -1552,7 +1551,6 @@ static const struct v4l2_subdev_internal_ops ov13858_internal_ops = {
/* Initialize control handlers */
static int ov13858_init_controls(struct ov13858 *ov13858)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13858->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1626,12 +1624,12 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
0, 0, ov13858_test_pattern_menu);
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov13858->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov13858->dev, &props);
if (ret)
goto error;
@@ -1660,24 +1658,33 @@ static void ov13858_free_controls(struct ov13858 *ov13858)
static int ov13858_probe(struct i2c_client *client)
{
struct ov13858 *ov13858;
+ unsigned long freq;
int ret;
- u32 val = 0;
-
- device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != 19200000)
- return -EINVAL;
ov13858 = devm_kzalloc(&client->dev, sizeof(*ov13858), GFP_KERNEL);
if (!ov13858)
return -ENOMEM;
+ ov13858->dev = &client->dev;
+
+ ov13858->clk = devm_v4l2_sensor_clk_get(ov13858->dev, NULL);
+ if (IS_ERR(ov13858->clk))
+ return dev_err_probe(ov13858->dev, PTR_ERR(ov13858->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov13858->clk);
+ if (freq != 19200000)
+ return dev_err_probe(ov13858->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13858->sd, client, &ov13858_subdev_ops);
/* Check module identity */
ret = ov13858_identify_module(ov13858);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov13858->dev, "failed to find sensor: %d\n", ret);
return ret;
}
@@ -1699,7 +1706,7 @@ static int ov13858_probe(struct i2c_client *client)
ov13858->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov13858->sd.entity, 1, &ov13858->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13858->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -1711,9 +1718,9 @@ static int ov13858_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov13858->dev);
+ pm_runtime_enable(ov13858->dev);
+ pm_runtime_idle(ov13858->dev);
return 0;
@@ -1722,7 +1729,7 @@ error_media_entity:
error_handler_free:
ov13858_free_controls(ov13858);
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13858->dev, "%s failed:%d\n", __func__, ret);
return ret;
}
@@ -1736,7 +1743,7 @@ static void ov13858_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov13858_free_controls(ov13858);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov13858->dev);
}
static const struct i2c_device_id ov13858_id_table[] = {
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
index e85c7d33a670..5421874732bc 100644
--- a/drivers/media/i2c/ov13b10.c
+++ b/drivers/media/i2c/ov13b10.c
@@ -700,6 +700,8 @@ static const struct ov13b10_mode supported_2_lanes_modes[] = {
};
struct ov13b10 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
@@ -805,7 +807,6 @@ static int ov13b10_write_reg(struct ov13b10 *ov13b,
static int ov13b10_write_regs(struct ov13b10 *ov13b,
const struct ov13b10_reg *regs, u32 len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
int ret;
u32 i;
@@ -813,7 +814,7 @@ static int ov13b10_write_regs(struct ov13b10 *ov13b,
ret = ov13b10_write_reg(ov13b, regs[i].address, 1,
regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov13b->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -968,7 +969,6 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov13b10 *ov13b = container_of(ctrl->handler,
struct ov13b10, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
s64 max;
int ret;
@@ -987,7 +987,7 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
* Applying V4L2 control value only happens
* when power is up for streaming
*/
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov13b->dev))
return 0;
ret = 0;
@@ -1021,13 +1021,13 @@ static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
ov13b10_set_ctrl_vflip(ov13b, ctrl->val);
break;
default:
- dev_info(&client->dev,
+ dev_info(ov13b->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
return ret;
}
@@ -1166,7 +1166,6 @@ ov13b10_set_pad_format(struct v4l2_subdev *sd,
/* Verify chip ID */
static int ov13b10_identify_module(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
int ret;
u32 val;
@@ -1179,7 +1178,7 @@ static int ov13b10_identify_module(struct ov13b10 *ov13b)
return ret;
if (val != OV13B10_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov13b->dev, "chip id mismatch: %x!=%x\n",
OV13B10_CHIP_ID, val);
return -EIO;
}
@@ -1234,7 +1233,6 @@ static int ov13b10_power_on(struct device *dev)
static int ov13b10_start_streaming(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
const struct ov13b10_reg_list *reg_list;
int ret, link_freq_index;
@@ -1246,7 +1244,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
ret = ov13b10_write_reg(ov13b, OV13B10_REG_SOFTWARE_RST,
OV13B10_REG_VALUE_08BIT, OV13B10_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov13b->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -1255,7 +1253,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov13b10_write_reg_list(ov13b, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov13b->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -1263,7 +1261,7 @@ static int ov13b10_start_streaming(struct ov13b10 *ov13b)
reg_list = &ov13b->cur_mode->reg_list;
ret = ov13b10_write_reg_list(ov13b, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov13b->dev, "%s failed to set mode\n", __func__);
return ret;
}
@@ -1287,13 +1285,12 @@ static int ov13b10_stop_streaming(struct ov13b10 *ov13b)
static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov13b10 *ov13b = to_ov13b10(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov13b->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov13b->dev);
if (ret < 0)
goto err_unlock;
@@ -1306,7 +1303,7 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
goto err_rpm_put;
} else {
ov13b10_stop_streaming(ov13b);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
}
mutex_unlock(&ov13b->mutex);
@@ -1314,7 +1311,7 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
return ret;
err_rpm_put:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov13b->dev);
err_unlock:
mutex_unlock(&ov13b->mutex);
@@ -1360,7 +1357,6 @@ static const struct v4l2_subdev_internal_ops ov13b10_internal_ops = {
/* Initialize control handlers */
static int ov13b10_init_controls(struct ov13b10 *ov13b)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max;
@@ -1443,12 +1439,12 @@ static int ov13b10_init_controls(struct ov13b10 *ov13b)
if (ctrl_hdlr->error) {
ret = ctrl_hdlr->error;
- dev_err(&client->dev, "%s control init failed (%d)\n",
+ dev_err(ov13b->dev, "%s control init failed (%d)\n",
__func__, ret);
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov13b->dev, &props);
if (ret)
goto error;
@@ -1474,44 +1470,49 @@ static void ov13b10_free_controls(struct ov13b10 *ov13b)
mutex_destroy(&ov13b->mutex);
}
-static int ov13b10_get_pm_resources(struct device *dev)
+static int ov13b10_get_pm_resources(struct ov13b10 *ov13b)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov13b10 *ov13b = to_ov13b10(sd);
+ unsigned long freq;
int ret;
- ov13b->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ ov13b->reset = devm_gpiod_get_optional(ov13b->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ov13b->reset))
- return dev_err_probe(dev, PTR_ERR(ov13b->reset),
+ return dev_err_probe(ov13b->dev, PTR_ERR(ov13b->reset),
"failed to get reset gpio\n");
- ov13b->img_clk = devm_clk_get_optional(dev, NULL);
+ ov13b->img_clk = devm_v4l2_sensor_clk_get(ov13b->dev, NULL);
if (IS_ERR(ov13b->img_clk))
- return dev_err_probe(dev, PTR_ERR(ov13b->img_clk),
+ return dev_err_probe(ov13b->dev, PTR_ERR(ov13b->img_clk),
"failed to get imaging clock\n");
- ov13b->avdd = devm_regulator_get_optional(dev, "avdd");
+ freq = clk_get_rate(ov13b->img_clk);
+ if (freq != OV13B10_EXT_CLK)
+ return dev_err_probe(ov13b->dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
+ ov13b->avdd = devm_regulator_get_optional(ov13b->dev, "avdd");
if (IS_ERR(ov13b->avdd)) {
ret = PTR_ERR(ov13b->avdd);
ov13b->avdd = NULL;
if (ret != -ENODEV)
- return dev_err_probe(dev, ret,
+ return dev_err_probe(ov13b->dev, ret,
"failed to get avdd regulator\n");
}
return 0;
}
-static int ov13b10_check_hwcfg(struct device *dev, struct ov13b10 *ov13b)
+static int ov13b10_check_hwcfg(struct ov13b10 *ov13b)
{
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
+ struct device *dev = ov13b->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
unsigned int i, j;
int ret;
- u32 ext_clk;
u8 dlane;
if (!fwnode)
@@ -1521,19 +1522,6 @@ static int ov13b10_check_hwcfg(struct device *dev, struct ov13b10 *ov13b)
if (!ep)
return -EPROBE_DEFER;
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &ext_clk);
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
-
- if (ext_clk != OV13B10_EXT_CLK) {
- dev_err(dev, "external clock %d is not supported",
- ext_clk);
- return -EINVAL;
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -1602,32 +1590,34 @@ static int ov13b10_probe(struct i2c_client *client)
if (!ov13b)
return -ENOMEM;
+ ov13b->dev = &client->dev;
+
/* Check HW config */
- ret = ov13b10_check_hwcfg(&client->dev, ov13b);
+ ret = ov13b10_check_hwcfg(ov13b);
if (ret) {
- dev_err(&client->dev, "failed to check hwcfg: %d", ret);
+ dev_err(ov13b->dev, "failed to check hwcfg: %d", ret);
return ret;
}
/* Initialize subdev */
v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
- ret = ov13b10_get_pm_resources(&client->dev);
+ ret = ov13b10_get_pm_resources(ov13b);
if (ret)
return ret;
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov13b->dev);
if (full_power) {
- ret = ov13b10_power_on(&client->dev);
+ ret = ov13b10_power_on(ov13b->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov13b->dev, "failed to power on\n");
return ret;
}
/* Check module identity */
ret = ov13b10_identify_module(ov13b);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ dev_err(ov13b->dev, "failed to find sensor: %d\n", ret);
goto error_power_off;
}
}
@@ -1646,7 +1636,7 @@ static int ov13b10_probe(struct i2c_client *client)
ov13b->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov13b->sd.entity, 1, &ov13b->pad);
if (ret) {
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13b->dev, "%s failed:%d\n", __func__, ret);
goto error_handler_free;
}
@@ -1657,9 +1647,9 @@ static int ov13b10_probe(struct i2c_client *client)
*/
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov13b->dev);
+ pm_runtime_enable(ov13b->dev);
+ pm_runtime_idle(ov13b->dev);
ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
if (ret < 0)
@@ -1668,17 +1658,17 @@ static int ov13b10_probe(struct i2c_client *client)
return 0;
error_media_entity_runtime_pm:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov13b->dev);
if (full_power)
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_set_suspended(ov13b->dev);
media_entity_cleanup(&ov13b->sd.entity);
error_handler_free:
ov13b10_free_controls(ov13b);
- dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ dev_err(ov13b->dev, "%s failed:%d\n", __func__, ret);
error_power_off:
- ov13b10_power_off(&client->dev);
+ ov13b10_power_off(ov13b->dev);
return ret;
}
@@ -1692,8 +1682,8 @@ static void ov13b10_remove(struct i2c_client *client)
media_entity_cleanup(&sd->entity);
ov13b10_free_controls(ov13b);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov13b->dev);
+ pm_runtime_set_suspended(ov13b->dev);
}
static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend,
@@ -1703,6 +1693,7 @@ static DEFINE_RUNTIME_DEV_PM_OPS(ov13b10_pm_ops, ov13b10_suspend,
static const struct acpi_device_id ov13b10_acpi_ids[] = {
{"OVTIDB10"},
{"OVTI13B1"},
+ {"OMNI13B1"}, /* ASUS ROG Flow Z13 (GZ302) uses this ACPI ID */
{ /* sentinel */ }
};
diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
index 586b31ba076b..061401b020fc 100644
--- a/drivers/media/i2c/ov2659.c
+++ b/drivers/media/i2c/ov2659.c
@@ -1437,9 +1437,10 @@ static int ov2659_probe(struct i2c_client *client)
ov2659->pdata = pdata;
ov2659->client = client;
- ov2659->clk = devm_clk_get(&client->dev, "xvclk");
+ ov2659->clk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
if (IS_ERR(ov2659->clk))
- return PTR_ERR(ov2659->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(ov2659->clk),
+ "failed to get xvclk\n");
ov2659->xvclk_frequency = clk_get_rate(ov2659->clk);
if (ov2659->xvclk_frequency < 6000000 ||
diff --git a/drivers/media/i2c/ov2680.c b/drivers/media/i2c/ov2680.c
index 7237fb27ecd0..78e63bd1b35b 100644
--- a/drivers/media/i2c/ov2680.c
+++ b/drivers/media/i2c/ov2680.c
@@ -1079,7 +1079,6 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
struct device *dev = sensor->dev;
struct fwnode_handle *ep_fwnode;
struct gpio_desc *gpio;
- unsigned int rate = 0;
int i, ret;
/*
@@ -1114,38 +1113,14 @@ static int ov2680_parse_dt(struct ov2680_dev *sensor)
sensor->pwdn_gpio = gpio;
- sensor->xvclk = devm_clk_get_optional(dev, "xvclk");
+ sensor->xvclk = devm_v4l2_sensor_clk_get(dev, "xvclk");
if (IS_ERR(sensor->xvclk)) {
ret = dev_err_probe(dev, PTR_ERR(sensor->xvclk),
"xvclk clock missing or invalid\n");
goto out_free_bus_cfg;
}
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either DT or
- * ACPI... but we also need to support the weird IPU3 case which will
- * have an external clock AND a clock-frequency property. Check for the
- * clock-frequency property and if found, set that rate if we managed
- * to acquire a clock. This should cover the ACPI case. If the system
- * uses devicetree then the configured rate should already be set, so
- * we can just read it.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (ret && !sensor->xvclk) {
- dev_err_probe(dev, ret, "invalid clock config\n");
- goto out_free_bus_cfg;
- }
-
- if (!ret && sensor->xvclk) {
- ret = clk_set_rate(sensor->xvclk, rate);
- if (ret) {
- dev_err_probe(dev, ret, "failed to set clock rate\n");
- goto out_free_bus_cfg;
- }
- }
-
- sensor->xvclk_freq = rate ?: clk_get_rate(sensor->xvclk);
+ sensor->xvclk_freq = clk_get_rate(sensor->xvclk);
for (i = 0; i < ARRAY_SIZE(ov2680_xvclk_freqs); i++) {
if (sensor->xvclk_freq == ov2680_xvclk_freqs[i])
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index 9b8481b8dcd4..4911a4eea126 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -783,16 +783,12 @@ static int ov2685_probe(struct i2c_client *client)
ov2685->client = client;
ov2685->cur_mode = &supported_modes[0];
- ov2685->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov2685->xvclk)) {
- dev_err(dev, "Failed to get xvclk\n");
- return -EINVAL;
- }
- ret = clk_set_rate(ov2685->xvclk, OV2685_XVCLK_FREQ);
- if (ret < 0) {
- dev_err(dev, "Failed to set xvclk rate (24MHz)\n");
- return ret;
- }
+ ov2685->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", true,
+ OV2685_XVCLK_FREQ);
+ if (IS_ERR(ov2685->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov2685->xvclk),
+ "Failed to get xvclk\n");
+
if (clk_get_rate(ov2685->xvclk) != OV2685_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
diff --git a/drivers/media/i2c/ov2735.c b/drivers/media/i2c/ov2735.c
new file mode 100644
index 000000000000..b96600204141
--- /dev/null
+++ b/drivers/media/i2c/ov2735.c
@@ -0,0 +1,1109 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Support for the OV2735
+ *
+ * Copyright (C) 2025 Silicon Signals Pvt. Ltd.
+ *
+ * Based on Rockchip ov2735 Camera Driver
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd.
+ *
+ * Inspired from ov8858, imx219, imx283 camera drivers.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/container_of.h>
+#include <linux/delay.h>
+#include <linux/device/devres.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <linux/types.h>
+#include <linux/time.h>
+
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+
+#define OV2735_XCLK_FREQ (24 * HZ_PER_MHZ)
+
+/* Add page number in CCI private bits [31:28] of the register address */
+#define OV2735_PAGE_REG8(p, x) (((p) << CCI_REG_PRIVATE_SHIFT) | CCI_REG8(x))
+#define OV2735_PAGE_REG16(p, x) (((p) << CCI_REG_PRIVATE_SHIFT) | CCI_REG16(x))
+
+#define OV2735_REG_PAGE_SELECT CCI_REG8(0xfd)
+
+/* Page 0 */
+#define OV2735_REG_CHIPID OV2735_PAGE_REG16(0x00, 0x02)
+#define OV2735_CHIPID 0x2735
+
+#define OV2735_REG_SOFT_RESET OV2735_PAGE_REG8(0x00, 0x20)
+
+/* Clock Settings */
+#define OV2735_REG_PLL_CTRL OV2735_PAGE_REG8(0x00, 0x2f)
+#define OV2735_PLL_CTRL_ENABLE 0x7f
+#define OV2735_REG_PLL_OUTDIV OV2735_PAGE_REG8(0x00, 0x34)
+#define OV2735_REG_CLK_MODE OV2735_PAGE_REG8(0x00, 0x30)
+#define OV2735_REG_CLOCK_REG1 OV2735_PAGE_REG8(0x00, 0x33)
+#define OV2735_REG_CLOCK_REG2 OV2735_PAGE_REG8(0x00, 0x35)
+
+/* Page 1 */
+#define OV2735_REG_STREAM_CTRL OV2735_PAGE_REG8(0x01, 0xa0)
+#define OV2735_STREAM_CTRL_ON 0x01
+#define OV2735_STREAM_CTRL_OFF 0x00
+
+#define OV2735_REG_UPDOWN_MIRROR OV2735_PAGE_REG8(0x01, 0x3f)
+#define OV2735_REG_BINNING_DAC_CODE_MODE OV2735_PAGE_REG8(0x01, 0x30)
+#define OV2735_REG_FRAME_LENGTH OV2735_PAGE_REG16(0x01, 0x0e)
+#define OV2735_FRAME_LENGTH_MAX 0x0fff
+#define OV2735_REG_FRAME_EXP_SEPERATE_EN OV2735_PAGE_REG8(0x01, 0x0d)
+#define OV2735_FRAME_EXP_SEPERATE_EN 0x10
+#define OV2735_REG_FRAME_SYNC OV2735_PAGE_REG8(0x01, 0x01)
+
+#define OV2735_REG_HBLANK OV2735_PAGE_REG16(0x01, 0x09)
+
+#define OV2735_REG_HS_MIPI OV2735_PAGE_REG8(0x01, 0xb1)
+#define OV2735_REG_MIPI_CTRL1 OV2735_PAGE_REG8(0x01, 0x92)
+#define OV2735_REG_MIPI_CTRL2 OV2735_PAGE_REG8(0x01, 0x94)
+#define OV2735_REG_MIPI_CTRL3 OV2735_PAGE_REG8(0x01, 0xa1)
+#define OV2735_REG_MIPI_CTRL4 OV2735_PAGE_REG8(0x01, 0xb2)
+#define OV2735_REG_MIPI_CTRL5 OV2735_PAGE_REG8(0x01, 0xb3)
+#define OV2735_REG_MIPI_CTRL6 OV2735_PAGE_REG8(0x01, 0xb4)
+#define OV2735_REG_MIPI_CTRL7 OV2735_PAGE_REG8(0x01, 0xb5)
+#define OV2735_REG_HIGH_SPEED OV2735_PAGE_REG8(0x01, 0x9d)
+#define OV2735_REG_PREPARE OV2735_PAGE_REG8(0x01, 0x95)
+#define OV2735_REG_R_HS_ZERO OV2735_PAGE_REG8(0x01, 0x96)
+#define OV2735_REG_TRAIL OV2735_PAGE_REG8(0x01, 0x98)
+#define OV2735_REG_R_CLK_ZERO OV2735_PAGE_REG8(0x01, 0x9c)
+#define OV2735_REG_MIPI_COLOMN_NUMBER OV2735_PAGE_REG16(0x01, 0x8e)
+#define OV2735_REG_MIPI_LINE_NUMBER OV2735_PAGE_REG16(0x01, 0x90)
+
+/* Timing control registers */
+#define OV2735_REG_TIMING_CTRL2 OV2735_PAGE_REG8(0x01, 0x1a)
+#define OV2735_REG_TIMING_CTRL3 OV2735_PAGE_REG8(0x01, 0x1c)
+#define OV2735_REG_TIMING_CTRL1 OV2735_PAGE_REG8(0x01, 0x16)
+#define OV2735_REG_RST_NUM OV2735_PAGE_REG16(0x01, 0x10)
+#define OV2735_REG_RST_NUM2 OV2735_PAGE_REG16(0x01, 0x32)
+#define OV2735_REG_BOOST_EN OV2735_PAGE_REG8(0x01, 0xd0)
+#define OV2735_REG_B2_NUM OV2735_PAGE_REG16(0x01, 0xd1)
+#define OV2735_REG_B4_NUM OV2735_PAGE_REG16(0x01, 0xd3)
+#define OV2735_REG_PIXEL_CYCLE_P0 OV2735_PAGE_REG8(0x01, 0x50)
+#define OV2735_REG_PIXEL_CYCLE_P1 OV2735_PAGE_REG8(0x01, 0x51)
+#define OV2735_REG_PIXEL_CYCLE_P2 OV2735_PAGE_REG8(0x01, 0x52)
+#define OV2735_REG_PIXEL_CYCLE_P3 OV2735_PAGE_REG8(0x01, 0x53)
+#define OV2735_REG_PIXEL_CYCLE_P5 OV2735_PAGE_REG8(0x01, 0x55)
+#define OV2735_REG_PIXEL_CYCLE_P7 OV2735_PAGE_REG16(0x01, 0x57)
+#define OV2735_REG_PIXEL_CYCLE_P9 OV2735_PAGE_REG8(0x01, 0x5a)
+#define OV2735_REG_PIXEL_CYCLE_P10 OV2735_PAGE_REG8(0x01, 0x5b)
+#define OV2735_REG_PIXEL_CYCLE_P12 OV2735_PAGE_REG8(0x01, 0x5d)
+#define OV2735_REG_PIXEL_CYCLE_P18 OV2735_PAGE_REG8(0x01, 0x64)
+#define OV2735_REG_PIXEL_CYCLE_P20 OV2735_PAGE_REG8(0x01, 0x66)
+#define OV2735_REG_PIXEL_CYCLE_P22 OV2735_PAGE_REG8(0x01, 0x68)
+#define OV2735_REG_PIXEL_CYCLE_P33 OV2735_PAGE_REG16(0x01, 0x74)
+#define OV2735_REG_PIXEL_CYCLE_P34 OV2735_PAGE_REG8(0x01, 0x76)
+#define OV2735_REG_PIXEL_CYCLE_P35_P36 OV2735_PAGE_REG8(0x01, 0x77)
+#define OV2735_REG_PIXEL_CYCLE_P37_P38 OV2735_PAGE_REG8(0x01, 0x78)
+#define OV2735_REG_PIXEL_CYCLE_P31 OV2735_PAGE_REG8(0x01, 0x72)
+#define OV2735_REG_PIXEL_CYCLE_P32 OV2735_PAGE_REG8(0x01, 0x73)
+#define OV2735_REG_PIXEL_CYCLE_P44 OV2735_PAGE_REG8(0x01, 0x7d)
+#define OV2735_REG_PIXEL_CYCLE_P45 OV2735_PAGE_REG8(0x01, 0x7e)
+#define OV2735_REG_PIXEL_BIAS_CTRL_RH_RL OV2735_PAGE_REG8(0x01, 0x8a)
+#define OV2735_REG_PIXEL_BIAS_CTRL_SH_SL OV2735_PAGE_REG8(0x01, 0x8b)
+
+/* Analog Control registers */
+#define OV2735_REG_ICOMP OV2735_PAGE_REG8(0x01, 0x19)
+#define OV2735_REG_PCP_RST_SEL OV2735_PAGE_REG8(0x01, 0x21)
+#define OV2735_REG_VNCP OV2735_PAGE_REG8(0x01, 0x20)
+#define OV2735_REG_ANALOG_CTRL3 OV2735_PAGE_REG8(0x01, 0x25)
+#define OV2735_REG_ANALOG_CTRL4 OV2735_PAGE_REG8(0x01, 0x26)
+#define OV2735_REG_ANALOG_CTRL5 OV2735_PAGE_REG8(0x01, 0x29)
+#define OV2735_REG_ANALOG_CTRL6 OV2735_PAGE_REG8(0x01, 0x2a)
+#define OV2735_REG_ANALOG_CTRL8 OV2735_PAGE_REG8(0x01, 0x2c)
+
+/* BLC registers */
+#define OV2735_REG_BLC_GAIN_BLUE OV2735_PAGE_REG8(0x01, 0x86)
+#define OV2735_REG_BLC_GAIN_RED OV2735_PAGE_REG8(0x01, 0x87)
+#define OV2735_REG_BLC_GAIN_GR OV2735_PAGE_REG8(0x01, 0x88)
+#define OV2735_REG_BLC_GAIN_GB OV2735_PAGE_REG8(0x01, 0x89)
+#define OV2735_REG_GB_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf0)
+#define OV2735_REG_BLUE_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf1)
+#define OV2735_REG_RED_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf2)
+#define OV2735_REG_GR_SUBOFFSET OV2735_PAGE_REG8(0x01, 0xf3)
+#define OV2735_REG_BLC_BPC_TH_P OV2735_PAGE_REG8(0x01, 0xfc)
+#define OV2735_REG_BLC_BPC_TH_N OV2735_PAGE_REG8(0x01, 0xfe)
+#define OV2735_REG_ABL OV2735_PAGE_REG8(0x01, 0xfb)
+
+#define OV2735_REG_TEST_PATTERN OV2735_PAGE_REG8(0x01, 0xb2)
+#define OV2735_TEST_PATTERN_ENABLE 0x01
+#define OV2735_TEST_PATTERN_DISABLE 0xfe
+
+#define OV2735_REG_LONG_EXPOSURE OV2735_PAGE_REG16(0x01, 0x03)
+#define OV2735_EXPOSURE_MIN 4
+#define OV2735_EXPOSURE_STEP 1
+#define OV2735_EXPOSURE_MARGIN 4
+
+#define OV2735_REG_ANALOG_GAIN OV2735_PAGE_REG8(0x01, 0x24)
+#define OV2735_ANALOG_GAIN_MIN 0x10
+#define OV2735_ANALOG_GAIN_MAX 0xff
+#define OV2735_ANALOG_GAIN_STEP 1
+#define OV2735_ANALOG_GAIN_DEFAULT 0x10
+
+/* Page 2 */
+#define OV2735_REG_V_START OV2735_PAGE_REG16(0x02, 0xa0)
+#define OV2735_REG_V_SIZE OV2735_PAGE_REG16(0x02, 0xa2)
+#define OV2735_REG_H_START OV2735_PAGE_REG16(0x02, 0xa4)
+#define OV2735_REG_H_SIZE OV2735_PAGE_REG16(0x02, 0xa6)
+
+#define OV2735_LINK_FREQ_420MHZ (420 * HZ_PER_MHZ)
+#define OV2735_PIXEL_RATE (168 * HZ_PER_MHZ)
+
+/* OV2735 native and active pixel array size */
+static const struct v4l2_rect ov2735_native_area = {
+ .top = 0,
+ .left = 0,
+ .width = 1936,
+ .height = 1096,
+};
+
+static const struct v4l2_rect ov2735_active_area = {
+ .top = 8,
+ .left = 8,
+ .width = 1920,
+ .height = 1080,
+};
+
+static const char * const ov2735_supply_name[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+/* PLL_OUT = [PLL_IN * (pll_nc +3)] / [(pll_mc + 1) * (pll_outdiv + 1)] */
+struct ov2735_pll_parameters {
+ u8 pll_nc;
+ u8 pll_mc;
+ u8 pll_outdiv;
+};
+
+struct ov2735 {
+ struct device *dev;
+ struct regmap *cci;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct clk *xclk;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(ov2735_supply_name)];
+
+ /* V4L2 Controls */
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *gain;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *test_pattern;
+
+ u32 link_freq_index;
+
+ u8 current_page;
+ struct mutex page_lock;
+};
+
+struct ov2735_mode {
+ u32 width;
+ u32 height;
+ u32 hts_def;
+ u32 vts_def;
+ u32 exp_def;
+ struct v4l2_rect crop;
+};
+
+static const struct cci_reg_sequence ov2735_common_regs[] = {
+ { OV2735_REG_CLK_MODE, 0x15 },
+ { OV2735_REG_CLOCK_REG1, 0x01 },
+ { OV2735_REG_CLOCK_REG2, 0x20 },
+ { OV2735_REG_BINNING_DAC_CODE_MODE, 0x00 },
+ { OV2735_REG_ABL, 0x73 },
+ { OV2735_REG_FRAME_SYNC, 0x01 },
+
+ /* Timing ctrl */
+ { OV2735_REG_TIMING_CTRL2, 0x6b },
+ { OV2735_REG_TIMING_CTRL3, 0xea },
+ { OV2735_REG_TIMING_CTRL1, 0x0c },
+ { OV2735_REG_RST_NUM, 0x0063 },
+ { OV2735_REG_RST_NUM2, 0x006f },
+ { OV2735_REG_BOOST_EN, 0x02 },
+ { OV2735_REG_B2_NUM, 0x0120 },
+ { OV2735_REG_B4_NUM, 0x042a },
+ { OV2735_REG_PIXEL_CYCLE_P0, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P1, 0x2c },
+ { OV2735_REG_PIXEL_CYCLE_P2, 0x29 },
+ { OV2735_REG_PIXEL_CYCLE_P3, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P5, 0x44 },
+ { OV2735_REG_PIXEL_CYCLE_P7, 0x0029 },
+ { OV2735_REG_PIXEL_CYCLE_P9, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P10, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P12, 0x00 },
+ { OV2735_REG_PIXEL_CYCLE_P18, 0x2f },
+ { OV2735_REG_PIXEL_CYCLE_P20, 0x62 },
+ { OV2735_REG_PIXEL_CYCLE_P22, 0x5b },
+ { OV2735_REG_PIXEL_CYCLE_P33, 0x0046 },
+ { OV2735_REG_PIXEL_CYCLE_P34, 0x36 },
+ { OV2735_REG_PIXEL_CYCLE_P35_P36, 0x4f },
+ { OV2735_REG_PIXEL_CYCLE_P37_P38, 0xef },
+ { OV2735_REG_PIXEL_CYCLE_P31, 0xcf },
+ { OV2735_REG_PIXEL_CYCLE_P32, 0x36 },
+ { OV2735_REG_PIXEL_CYCLE_P44, 0x0d },
+ { OV2735_REG_PIXEL_CYCLE_P45, 0x0d },
+ { OV2735_REG_PIXEL_BIAS_CTRL_RH_RL, 0x77 },
+ { OV2735_REG_PIXEL_BIAS_CTRL_SH_SL, 0x77 },
+
+ /* Analog ctrl */
+ { OV2735_REG_ANALOG_CTRL4, 0x5a },
+ { OV2735_REG_ANALOG_CTRL5, 0x01 },
+ { OV2735_REG_ANALOG_CTRL6, 0xd2 },
+ { OV2735_REG_ANALOG_CTRL8, 0x40 },
+ { OV2735_REG_PCP_RST_SEL, 0x00 },
+ { OV2735_REG_ICOMP, 0xc3 },
+
+ { OV2735_REG_HS_MIPI, 0x83 },
+ { OV2735_REG_MIPI_CTRL5, 0x0b },
+ { OV2735_REG_MIPI_CTRL6, 0x14 },
+ { OV2735_REG_HIGH_SPEED, 0x40 },
+ { OV2735_REG_MIPI_CTRL3, 0x05 },
+ { OV2735_REG_MIPI_CTRL2, 0x44 },
+ { OV2735_REG_PREPARE, 0x33 },
+ { OV2735_REG_R_HS_ZERO, 0x1f },
+ { OV2735_REG_TRAIL, 0x45 },
+ { OV2735_REG_R_CLK_ZERO, 0x10 },
+ { OV2735_REG_MIPI_CTRL7, 0x70 },
+ { OV2735_REG_ANALOG_CTRL3, 0xe0 },
+ { OV2735_REG_VNCP, 0x7b },
+
+ /* BLC */
+ { OV2735_REG_BLC_GAIN_BLUE, 0x77 },
+ { OV2735_REG_BLC_GAIN_GB, 0x77 },
+ { OV2735_REG_BLC_GAIN_RED, 0x74 },
+ { OV2735_REG_BLC_GAIN_GR, 0x74 },
+ { OV2735_REG_BLC_BPC_TH_P, 0xe0 },
+ { OV2735_REG_BLC_BPC_TH_N, 0xe0 },
+ { OV2735_REG_GB_SUBOFFSET, 0x40 },
+ { OV2735_REG_BLUE_SUBOFFSET, 0x40 },
+ { OV2735_REG_RED_SUBOFFSET, 0x40 },
+ { OV2735_REG_GR_SUBOFFSET, 0x40 },
+};
+
+static const struct ov2735_mode supported_modes[] = {
+ {
+ .width = 1920,
+ .height = 1080,
+ .exp_def = 399,
+ .hts_def = 2200,
+ .vts_def = 2545,
+ .crop = {
+ .top = 8,
+ .left = 8,
+ .width = 1920,
+ .height = 1080,
+ },
+ },
+};
+
+static const s64 link_freq_menu_items[] = {
+ OV2735_LINK_FREQ_420MHZ,
+};
+
+static const struct ov2735_pll_parameters pll_configs[] = {
+ /* For 420MHz pll_configs */
+ {
+ .pll_nc = 4,
+ .pll_mc = 0,
+ .pll_outdiv = 1,
+ },
+};
+
+static const char * const ov2735_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color",
+};
+
+static int ov2735_page_access(struct ov2735 *ov2735, u32 reg, int *err)
+{
+ u8 page = reg >> CCI_REG_PRIVATE_SHIFT;
+ int ret = 0;
+
+ if (err && *err)
+ return *err;
+
+ guard(mutex)(&ov2735->page_lock);
+
+ /* Perform page access before read/write */
+ if (ov2735->current_page == page)
+ return ret;
+
+ ret = cci_write(ov2735->cci, OV2735_REG_PAGE_SELECT, page, err);
+ if (!ret)
+ ov2735->current_page = page;
+
+ return ret;
+}
+
+static int ov2735_read(struct ov2735 *ov2735, u32 reg, u64 *val, int *err)
+{
+ u32 addr = reg & ~CCI_REG_PRIVATE_MASK;
+ int ret;
+
+ ret = ov2735_page_access(ov2735, reg, err);
+ if (ret)
+ return ret;
+
+ return cci_read(ov2735->cci, addr, val, err);
+}
+
+static int ov2735_write(struct ov2735 *ov2735, u32 reg, u64 val, int *err)
+{
+ u32 addr = reg & ~CCI_REG_PRIVATE_MASK;
+ int ret;
+
+ ret = ov2735_page_access(ov2735, reg, err);
+ if (ret)
+ return ret;
+
+ return cci_write(ov2735->cci, addr, val, err);
+}
+
+static int ov2735_multi_reg_write(struct ov2735 *ov2735,
+ const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < num_regs; i++) {
+ ret = ov2735_write(ov2735, regs[i].reg, regs[i].val, err);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct ov2735 *to_ov2735(struct v4l2_subdev *_sd)
+{
+ return container_of_const(_sd, struct ov2735, sd);
+}
+
+static int ov2735_enable_test_pattern(struct ov2735 *ov2735, u32 pattern)
+{
+ int ret;
+ u64 val;
+
+ ret = ov2735_read(ov2735, OV2735_REG_TEST_PATTERN, &val, NULL);
+ if (ret)
+ return ret;
+
+ switch (pattern) {
+ case 0:
+ val &= ~OV2735_TEST_PATTERN_ENABLE;
+ break;
+ case 1:
+ val |= OV2735_TEST_PATTERN_ENABLE;
+ break;
+ }
+
+ return ov2735_write(ov2735, OV2735_REG_TEST_PATTERN, val, NULL);
+}
+
+static int ov2735_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov2735 *ov2735 =
+ container_of_const(ctrl->handler, struct ov2735, handler);
+ struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_subdev_state *state;
+ u64 vts;
+ int ret = 0;
+
+ state = v4l2_subdev_get_locked_active_state(&ov2735->sd);
+ fmt = v4l2_subdev_state_get_format(state, 0);
+
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Honour the VBLANK limits when setting exposure */
+ s64 max = fmt->height + ctrl->val - OV2735_EXPOSURE_MARGIN;
+
+ ret = __v4l2_ctrl_modify_range(ov2735->exposure,
+ ov2735->exposure->minimum, max,
+ ov2735->exposure->step,
+ ov2735->exposure->default_value);
+ if (ret)
+ return ret;
+ }
+
+ if (pm_runtime_get_if_in_use(ov2735->dev) == 0)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_EXPOSURE:
+ ov2735_write(ov2735, OV2735_REG_LONG_EXPOSURE, ctrl->val, &ret);
+ break;
+ case V4L2_CID_ANALOGUE_GAIN:
+ ov2735_write(ov2735, OV2735_REG_ANALOG_GAIN, ctrl->val, &ret);
+ break;
+ case V4L2_CID_HBLANK:
+ ov2735_write(ov2735, OV2735_REG_HBLANK, ctrl->val, &ret);
+ break;
+ case V4L2_CID_VBLANK:
+ vts = ctrl->val + fmt->height;
+ ov2735_write(ov2735, OV2735_REG_FRAME_EXP_SEPERATE_EN,
+ OV2735_FRAME_EXP_SEPERATE_EN, &ret);
+ ov2735_write(ov2735, OV2735_REG_FRAME_LENGTH, vts, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov2735_enable_test_pattern(ov2735, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ ov2735_write(ov2735, OV2735_REG_FRAME_SYNC, 0x01, &ret);
+
+ pm_runtime_put(ov2735->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov2735_ctrl_ops = {
+ .s_ctrl = ov2735_set_ctrl,
+};
+
+static int ov2735_init_controls(struct ov2735 *ov2735)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ struct v4l2_fwnode_device_properties props;
+ const struct ov2735_mode *mode = &supported_modes[0];
+ u64 hblank_def, vblank_def, exp_max;
+ int ret;
+
+ ctrl_hdlr = &ov2735->handler;
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ov2735->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ OV2735_PIXEL_RATE, 1,
+ OV2735_PIXEL_RATE);
+
+ ov2735->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ov2735->link_freq_index,
+ 0, link_freq_menu_items);
+ if (ov2735->link_freq)
+ ov2735->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hblank_def = mode->hts_def - mode->width;
+ ov2735->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_HBLANK, hblank_def,
+ hblank_def, 1, hblank_def);
+
+ vblank_def = mode->vts_def - mode->height;
+ ov2735->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_def,
+ OV2735_FRAME_LENGTH_MAX - mode->height,
+ 1, vblank_def);
+
+ exp_max = mode->vts_def - OV2735_EXPOSURE_MARGIN;
+ ov2735->exposure =
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV2735_EXPOSURE_MIN, exp_max,
+ OV2735_EXPOSURE_STEP, mode->exp_def);
+
+ ov2735->gain =
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_ANALOGUE_GAIN, OV2735_ANALOG_GAIN_MIN,
+ OV2735_ANALOG_GAIN_MAX, OV2735_ANALOG_GAIN_STEP,
+ OV2735_ANALOG_GAIN_DEFAULT);
+
+ ov2735->test_pattern =
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov2735_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov2735_test_pattern_menu) - 1,
+ 0, 0, ov2735_test_pattern_menu);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(ov2735->dev, "control init failed (%d)\n", ret);
+ goto err_handler_free;
+ }
+
+ ret = v4l2_fwnode_device_parse(ov2735->dev, &props);
+ if (ret)
+ goto err_handler_free;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr,
+ &ov2735_ctrl_ops, &props);
+ if (ret)
+ goto err_handler_free;
+
+ ov2735->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+err_handler_free:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static int ov2735_set_pll_ctrl(struct ov2735 *ov2735)
+{
+ const struct ov2735_pll_parameters *pll_parameters;
+ u8 pll_ctrl;
+ u8 pll_outdiv;
+ int ret = 0;
+
+ pll_parameters = &pll_configs[ov2735->link_freq_index];
+
+ /* BIT[7]: pll_clk_sel, BIT[6:2]: pll_nc, BIT[1:0]: pll_mc */
+ pll_ctrl = ((pll_parameters->pll_nc << 2) | (pll_parameters->pll_mc << 0)) &
+ OV2735_PLL_CTRL_ENABLE;
+
+ pll_outdiv = pll_parameters->pll_outdiv;
+
+ ov2735_write(ov2735, OV2735_REG_PLL_CTRL, pll_ctrl, &ret);
+ ov2735_write(ov2735, OV2735_REG_PLL_OUTDIV, pll_outdiv, &ret);
+
+ return ret;
+}
+
+static int ov2735_set_framefmt(struct ov2735 *ov2735,
+ struct v4l2_subdev_state *state)
+{
+ const struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ int ret = 0;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+ crop = v4l2_subdev_state_get_crop(state, 0);
+
+ ov2735_write(ov2735, OV2735_REG_V_START, crop->top, &ret);
+ ov2735_write(ov2735, OV2735_REG_V_SIZE, format->height, &ret);
+ ov2735_write(ov2735, OV2735_REG_MIPI_LINE_NUMBER, format->height, &ret);
+ ov2735_write(ov2735, OV2735_REG_H_START, crop->left, &ret);
+ /* OV2735_REG_H_SIZE: Image half horizontal size */
+ ov2735_write(ov2735, OV2735_REG_H_SIZE, (format->width / 2), &ret);
+ ov2735_write(ov2735, OV2735_REG_MIPI_COLOMN_NUMBER, format->width, &ret);
+
+ return ret;
+}
+
+static int ov2735_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ov2735->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Apply pll settings */
+ ret = ov2735_set_pll_ctrl(ov2735);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to set frame format: %d\n", ret);
+ goto err_rpm_put;
+ }
+
+ ret = ov2735_multi_reg_write(ov2735, ov2735_common_regs,
+ ARRAY_SIZE(ov2735_common_regs), NULL);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to write common registers\n");
+ goto err_rpm_put;
+ }
+
+ /* Apply format settings */
+ ret = ov2735_set_framefmt(ov2735, state);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to set frame format: %d\n", ret);
+ goto err_rpm_put;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(ov2735->sd.ctrl_handler);
+ if (ret)
+ goto err_rpm_put;
+
+ ret = ov2735_write(ov2735, OV2735_REG_STREAM_CTRL,
+ OV2735_STREAM_CTRL_ON, NULL);
+ if (ret)
+ goto err_rpm_put;
+
+ return 0;
+
+err_rpm_put:
+ pm_runtime_put(ov2735->dev);
+ return ret;
+}
+
+static int ov2735_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = ov2735_write(ov2735, OV2735_REG_STREAM_CTRL,
+ OV2735_STREAM_CTRL_OFF, NULL);
+ if (ret)
+ dev_err(ov2735->dev, "%s failed to set stream\n", __func__);
+
+ pm_runtime_put(ov2735->dev);
+
+ return ret;
+}
+
+static int ov2735_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ sel->r = *v4l2_subdev_state_get_crop(sd_state, 0);
+ return 0;
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r = ov2735_native_area;
+ return 0;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r = ov2735_active_area;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ov2735_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov2735_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov2735_set_framing_limits(struct ov2735 *ov2735,
+ const struct ov2735_mode *mode)
+{
+ u32 hblank, vblank_def;
+ int ret;
+
+ hblank = mode->hts_def - mode->width;
+ ret = __v4l2_ctrl_modify_range(ov2735->hblank, hblank, hblank, 1,
+ hblank);
+ if (ret)
+ return ret;
+
+ vblank_def = mode->vts_def - mode->height;
+ return __v4l2_ctrl_modify_range(ov2735->vblank, vblank_def,
+ OV2735_FRAME_LENGTH_MAX - mode->height,
+ 1, vblank_def);
+}
+
+static int ov2735_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct ov2735_mode *mode;
+ struct v4l2_rect *crop;
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ format = v4l2_subdev_state_get_format(sd_state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.field = V4L2_FIELD_NONE;
+ fmt->format.colorspace = V4L2_COLORSPACE_RAW;
+ fmt->format.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->format.xfer_func = V4L2_XFER_FUNC_NONE;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ ret = ov2735_set_framing_limits(ov2735, mode);
+ if (ret)
+ return ret;
+ }
+
+ *format = fmt->format;
+
+ /* Initialize crop rectangle */
+ crop = v4l2_subdev_state_get_crop(sd_state, 0);
+ *crop = mode->crop;
+
+ return 0;
+}
+
+static int ov2735_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .format = {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ ov2735_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov2735_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov2735_pad_ops = {
+ .enum_mbus_code = ov2735_enum_mbus_code,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = ov2735_set_pad_format,
+ .get_selection = ov2735_get_selection,
+ .enum_frame_size = ov2735_enum_frame_size,
+ .enable_streams = ov2735_enable_streams,
+ .disable_streams = ov2735_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov2735_subdev_ops = {
+ .video = &ov2735_video_ops,
+ .pad = &ov2735_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov2735_internal_ops = {
+ .init_state = ov2735_init_state,
+};
+
+static int ov2735_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ov2735_supply_name),
+ ov2735->supplies);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to enable regulators\n");
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(ov2735->enable_gpio, 1);
+ /* T4: delay from PWDN pulling low to RSTB pulling high */
+ fsleep(4 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(ov2735->xclk);
+ if (ret) {
+ dev_err(ov2735->dev, "failed to enable clock\n");
+ goto err_regulator_off;
+ }
+
+ gpiod_set_value_cansleep(ov2735->reset_gpio, 0);
+ /* T5: delay from RSTB pulling high to first I2C command */
+ fsleep(5 * USEC_PER_MSEC);
+
+ return 0;
+
+err_regulator_off:
+ regulator_bulk_disable(ARRAY_SIZE(ov2735_supply_name), ov2735->supplies);
+ return ret;
+}
+
+static int ov2735_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+
+ gpiod_set_value_cansleep(ov2735->enable_gpio, 0);
+ clk_disable_unprepare(ov2735->xclk);
+ gpiod_set_value_cansleep(ov2735->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ov2735_supply_name), ov2735->supplies);
+
+ return 0;
+}
+
+static int ov2735_get_regulators(struct ov2735 *ov2735)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(ov2735_supply_name); i++)
+ ov2735->supplies[i].supply = ov2735_supply_name[i];
+
+ return devm_regulator_bulk_get(ov2735->dev,
+ ARRAY_SIZE(ov2735_supply_name),
+ ov2735->supplies);
+}
+
+static int ov2735_identify_module(struct ov2735 *ov2735)
+{
+ u64 chip_id;
+ int ret;
+
+ ret = ov2735_read(ov2735, OV2735_REG_CHIPID, &chip_id, NULL);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to read chip id %x\n",
+ OV2735_CHIPID);
+
+ if (chip_id != OV2735_CHIPID)
+ return dev_err_probe(ov2735->dev, -EIO,
+ "chip id mismatch: %x!=%llx\n",
+ OV2735_CHIPID, chip_id);
+
+ return 0;
+}
+
+static int ov2735_parse_endpoint(struct ov2735 *ov2735)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct fwnode_handle *ep;
+ unsigned long link_freq_bitmap;
+ int ret;
+
+ ep = fwnode_graph_get_next_endpoint(dev_fwnode(ov2735->dev), NULL);
+ if (!ep)
+ return dev_err_probe(ov2735->dev, -ENXIO,
+ "Failed to get next endpoint\n");
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2) {
+ ret = dev_err_probe(ov2735->dev, -EINVAL,
+ "only 2 data lanes are supported\n");
+ goto error_out;
+ }
+
+ ret = v4l2_link_freq_to_bitmap(ov2735->dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ link_freq_menu_items,
+ ARRAY_SIZE(link_freq_menu_items),
+ &link_freq_bitmap);
+ if (ret) {
+ ret = dev_err_probe(ov2735->dev, -EINVAL,
+ "only 420MHz frequency is available\n");
+ goto error_out;
+ }
+
+ ov2735->link_freq_index = __ffs(link_freq_bitmap);
+
+error_out:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+};
+
+static int ov2735_probe(struct i2c_client *client)
+{
+ struct ov2735 *ov2735;
+ unsigned int xclk_freq;
+ int ret;
+
+ ov2735 = devm_kzalloc(&client->dev, sizeof(*ov2735), GFP_KERNEL);
+ if (!ov2735)
+ return -ENOMEM;
+
+ ov2735->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&ov2735->sd, client, &ov2735_subdev_ops);
+ ov2735->sd.internal_ops = &ov2735_internal_ops;
+
+ ov2735->cci = devm_cci_regmap_init_i2c(client, 8);
+ if (IS_ERR(ov2735->cci))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->cci),
+ "failed to initialize CCI\n");
+
+ /* Set Current page to 0 */
+ ov2735->current_page = 0;
+
+ ret = devm_mutex_init(ov2735->dev, &ov2735->page_lock);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "Failed to initialize lock\n");
+
+ /* Get system clock (xvclk) */
+ ov2735->xclk = devm_v4l2_sensor_clk_get(ov2735->dev, NULL);
+ if (IS_ERR(ov2735->xclk))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->xclk),
+ "failed to get xclk\n");
+
+ xclk_freq = clk_get_rate(ov2735->xclk);
+ if (xclk_freq != OV2735_XCLK_FREQ)
+ return dev_err_probe(ov2735->dev, -EINVAL,
+ "xclk frequency not supported: %u Hz\n",
+ xclk_freq);
+
+ ret = ov2735_get_regulators(ov2735);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to get regulators\n");
+
+ ret = ov2735_parse_endpoint(ov2735);
+ if (ret)
+ return dev_err_probe(ov2735->dev, ret,
+ "failed to parse endpoint configuration\n");
+
+ ov2735->reset_gpio = devm_gpiod_get_optional(ov2735->dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ov2735->reset_gpio))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->reset_gpio),
+ "failed to get reset GPIO\n");
+
+ ov2735->enable_gpio = devm_gpiod_get_optional(ov2735->dev,
+ "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ov2735->enable_gpio))
+ return dev_err_probe(ov2735->dev, PTR_ERR(ov2735->enable_gpio),
+ "failed to get enable GPIO\n");
+
+ ret = ov2735_power_on(ov2735->dev);
+ if (ret)
+ return ret;
+
+ ret = ov2735_identify_module(ov2735);
+ if (ret)
+ goto error_power_off;
+
+ ret = ov2735_init_controls(ov2735);
+ if (ret)
+ goto error_power_off;
+
+ /* Initialize subdev */
+ ov2735->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov2735->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov2735->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&ov2735->sd.entity, 1, &ov2735->pad);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret, "failed to init entity pads\n");
+ goto error_handler_free;
+ }
+
+ ov2735->sd.state_lock = ov2735->handler.lock;
+ ret = v4l2_subdev_init_finalize(&ov2735->sd);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret, "subdev init error\n");
+ goto error_media_entity;
+ }
+
+ ret = devm_pm_runtime_get_noresume(ov2735->dev);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to get runtime PM noresume\n");
+ goto error_subdev_cleanup;
+ }
+
+ ret = devm_pm_runtime_set_active_enabled(ov2735->dev);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to set runtime PM active+enabled\n");
+ goto error_subdev_cleanup;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov2735->sd);
+ if (ret) {
+ dev_err_probe(ov2735->dev, ret,
+ "failed to register ov2735 sub-device\n");
+ goto error_subdev_cleanup;
+ }
+
+ return 0;
+
+error_subdev_cleanup:
+ v4l2_subdev_cleanup(&ov2735->sd);
+
+error_media_entity:
+ media_entity_cleanup(&ov2735->sd.entity);
+
+error_handler_free:
+ v4l2_ctrl_handler_free(ov2735->sd.ctrl_handler);
+
+error_power_off:
+ ov2735_power_off(ov2735->dev);
+
+ return ret;
+}
+
+static void ov2735_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov2735 *ov2735 = to_ov2735(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(&ov2735->sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(ov2735->sd.ctrl_handler);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ov2735_pm_ops,
+ ov2735_power_off, ov2735_power_on, NULL);
+
+static const struct of_device_id ov2735_id[] = {
+ { .compatible = "ovti,ov2735" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov2735_id);
+
+static struct i2c_driver ov2735_driver = {
+ .driver = {
+ .name = "ov2735",
+ .pm = pm_ptr(&ov2735_pm_ops),
+ .of_match_table = ov2735_id,
+ },
+ .probe = ov2735_probe,
+ .remove = ov2735_remove,
+};
+module_i2c_driver(ov2735_driver);
+
+MODULE_DESCRIPTION("OV2735 Camera Sensor Driver");
+MODULE_AUTHOR("Hardevsinh Palaniya <hardevsinh.palaniya@siliconsignals.io>");
+MODULE_AUTHOR("Himanshu Bhavani <himanshu.bhavani@siliconsignals.io>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov2740.c b/drivers/media/i2c/ov2740.c
index 4e959534e6e7..fb590dfadda1 100644
--- a/drivers/media/i2c/ov2740.c
+++ b/drivers/media/i2c/ov2740.c
@@ -1,17 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/pm_runtime.h>
#include <linux/nvmem-provider.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -519,6 +519,8 @@ static const struct ov2740_mode supported_modes_180mhz[] = {
};
struct ov2740 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -616,7 +618,6 @@ static int ov2740_write_reg(struct ov2740 *ov2740, u16 reg, u16 len, u32 val)
static int ov2740_write_reg_list(struct ov2740 *ov2740,
const struct ov2740_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
unsigned int i;
int ret;
@@ -624,7 +625,7 @@ static int ov2740_write_reg_list(struct ov2740 *ov2740,
ret = ov2740_write_reg(ov2740, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov2740->dev,
"write reg 0x%4.4x return err = %d\n",
r_list->regs[i].address, ret);
return ret;
@@ -636,7 +637,6 @@ static int ov2740_write_reg_list(struct ov2740 *ov2740,
static int ov2740_identify_module(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
int ret;
u32 val;
@@ -648,12 +648,12 @@ static int ov2740_identify_module(struct ov2740 *ov2740)
return ret;
if (val != OV2740_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x != %x\n",
+ dev_err(ov2740->dev, "chip id mismatch: %x != %x\n",
OV2740_CHIP_ID, val);
return -ENXIO;
}
- dev_dbg(&client->dev, "chip id: 0x%x\n", val);
+ dev_dbg(ov2740->dev, "chip id: 0x%x\n", val);
ov2740->identified = true;
@@ -704,7 +704,6 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov2740 *ov2740 = container_of(ctrl->handler,
struct ov2740, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
s64 exposure_max;
int ret;
@@ -720,7 +719,7 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov2740->dev))
return 0;
switch (ctrl->id) {
@@ -753,7 +752,7 @@ static int ov2740_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
return ret;
}
@@ -764,7 +763,6 @@ static const struct v4l2_ctrl_ops ov2740_ctrl_ops = {
static int ov2740_init_controls(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max, h_blank, pixel_rate;
u32 vblank_min, vblank_max, vblank_default;
@@ -821,7 +819,7 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
ARRAY_SIZE(ov2740_test_pattern_menu) - 1,
0, 0, ov2740_test_pattern_menu);
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov2740->dev, &props);
if (ret) {
v4l2_ctrl_handler_free(ctrl_hdlr);
return ret;
@@ -940,7 +938,6 @@ err:
static int ov2740_start_streaming(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
const struct ov2740_reg_list *reg_list;
int link_freq_index;
int ret;
@@ -955,7 +952,7 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
/* Reset the sensor */
ret = ov2740_write_reg(ov2740, 0x0103, 1, 0x01);
if (ret) {
- dev_err(&client->dev, "failed to reset\n");
+ dev_err(ov2740->dev, "failed to reset\n");
return ret;
}
@@ -965,14 +962,14 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov2740_write_reg_list(ov2740, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls\n");
+ dev_err(ov2740->dev, "failed to set plls\n");
return ret;
}
reg_list = &ov2740->cur_mode->reg_list;
ret = ov2740_write_reg_list(ov2740, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode\n");
+ dev_err(ov2740->dev, "failed to set mode\n");
return ret;
}
@@ -983,31 +980,28 @@ static int ov2740_start_streaming(struct ov2740 *ov2740)
ret = ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1,
OV2740_MODE_STREAMING);
if (ret)
- dev_err(&client->dev, "failed to start streaming\n");
+ dev_err(ov2740->dev, "failed to start streaming\n");
return ret;
}
static void ov2740_stop_streaming(struct ov2740 *ov2740)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov2740->sd);
-
if (ov2740_write_reg(ov2740, OV2740_REG_MODE_SELECT, 1,
OV2740_MODE_STANDBY))
- dev_err(&client->dev, "failed to stop streaming\n");
+ dev_err(ov2740->dev, "failed to stop streaming\n");
}
static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov2740 *ov2740 = to_ov2740(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
struct v4l2_subdev_state *sd_state;
int ret = 0;
sd_state = v4l2_subdev_lock_and_get_active_state(&ov2740->sd);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov2740->dev);
if (ret < 0)
goto out_unlock;
@@ -1015,11 +1009,11 @@ static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov2740_stop_streaming(ov2740);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
}
} else {
ov2740_stop_streaming(ov2740);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov2740->dev);
}
out_unlock:
@@ -1131,16 +1125,14 @@ static const struct media_entity_operations ov2740_subdev_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
-static int ov2740_check_hwcfg(struct device *dev)
+static int ov2740_check_hwcfg(struct ov2740 *ov2740)
{
- struct v4l2_subdev *sd = dev_get_drvdata(dev);
- struct ov2740 *ov2740 = to_ov2740(sd);
+ struct device *dev = ov2740->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
@@ -1153,20 +1145,6 @@ static int ov2740_check_hwcfg(struct device *dev)
return dev_err_probe(dev, -EPROBE_DEFER,
"waiting for fwnode graph endpoint\n");
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, ret,
- "reading clock-frequency property\n");
- }
-
- if (mclk != OV2740_MCLK) {
- fwnode_handle_put(ep);
- return dev_err_probe(dev, -EINVAL,
- "external clock %d is not supported\n",
- mclk);
- }
-
ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
fwnode_handle_put(ep);
if (ret)
@@ -1270,7 +1248,7 @@ static int ov2740_register_nvmem(struct i2c_client *client,
struct regmap_config regmap_config = { };
struct nvmem_config nvmem_config = { };
struct regmap *regmap;
- struct device *dev = &client->dev;
+ struct device *dev = ov2740->dev;
nvm = devm_kzalloc(dev, sizeof(*nvm), GFP_KERNEL);
if (!nvm)
@@ -1349,6 +1327,7 @@ static int ov2740_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov2740 *ov2740;
+ unsigned long freq;
bool full_power;
unsigned int i;
int ret;
@@ -1357,10 +1336,12 @@ static int ov2740_probe(struct i2c_client *client)
if (!ov2740)
return -ENOMEM;
+ ov2740->dev = &client->dev;
+
v4l2_i2c_subdev_init(&ov2740->sd, client, &ov2740_subdev_ops);
ov2740->sd.internal_ops = &ov2740_internal_ops;
- ret = ov2740_check_hwcfg(dev);
+ ret = ov2740_check_hwcfg(ov2740);
if (ret)
return ret;
@@ -1384,11 +1365,17 @@ static int ov2740_probe(struct i2c_client *client)
msleep(20);
}
- ov2740->clk = devm_clk_get_optional(dev, "clk");
+ ov2740->clk = devm_v4l2_sensor_clk_get(dev, "clk");
if (IS_ERR(ov2740->clk))
return dev_err_probe(dev, PTR_ERR(ov2740->clk),
"failed to get clock\n");
+ freq = clk_get_rate(ov2740->clk);
+ if (freq != OV2740_MCLK)
+ return dev_err_probe(dev, -EINVAL,
+ "external clock %lu is not supported\n",
+ freq);
+
for (i = 0; i < ARRAY_SIZE(ov2740_supply_name); i++)
ov2740->supplies[i].supply = ov2740_supply_name[i];
@@ -1397,7 +1384,7 @@ static int ov2740_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "failed to get regulators\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov2740->dev);
if (full_power) {
/* ACPI does not always clear the reset GPIO / enable the clock */
ret = ov2740_resume(dev);
@@ -1435,9 +1422,9 @@ static int ov2740_probe(struct i2c_client *client)
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov2740->dev);
+ pm_runtime_enable(ov2740->dev);
+ pm_runtime_idle(ov2740->dev);
ret = v4l2_async_register_subdev_sensor(&ov2740->sd);
if (ret < 0) {
@@ -1447,13 +1434,13 @@ static int ov2740_probe(struct i2c_client *client)
ret = ov2740_register_nvmem(client, ov2740);
if (ret)
- dev_warn(&client->dev, "register nvmem failed, ret %d\n", ret);
+ dev_warn(ov2740->dev, "register nvmem failed, ret %d\n", ret);
return 0;
probe_error_v4l2_subdev_cleanup:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov2740->dev);
+ pm_runtime_set_suspended(ov2740->dev);
v4l2_subdev_cleanup(&ov2740->sd);
probe_error_media_entity_cleanup:
diff --git a/drivers/media/i2c/ov4689.c b/drivers/media/i2c/ov4689.c
index 7d740ad3926f..a59d25b09b5b 100644
--- a/drivers/media/i2c/ov4689.c
+++ b/drivers/media/i2c/ov4689.c
@@ -907,20 +907,12 @@ static int ov4689_probe(struct i2c_client *client)
ov4689->cur_mode = &supported_modes[OV4689_MODE_2688_1520];
- ov4689->xvclk = devm_clk_get_optional(dev, NULL);
+ ov4689->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov4689->xvclk))
return dev_err_probe(dev, PTR_ERR(ov4689->xvclk),
"Failed to get external clock\n");
- if (!ov4689->xvclk) {
- dev_dbg(dev,
- "No clock provided, using clock-frequency property\n");
- device_property_read_u32(dev, "clock-frequency",
- &ov4689->clock_rate);
- } else {
- ov4689->clock_rate = clk_get_rate(ov4689->xvclk);
- }
-
+ ov4689->clock_rate = clk_get_rate(ov4689->xvclk);
if (ov4689->clock_rate != OV4689_XVCLK_FREQ) {
dev_err(dev,
"External clock rate mismatch: got %d Hz, expected %d Hz\n",
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index 84198613381d..85ecc23b3587 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -3895,11 +3895,10 @@ static int ov5640_probe(struct i2c_client *client)
ov5640_dvp_default_fmt;
/* get system clock (xclk) */
- sensor->xclk = devm_clk_get(dev, "xclk");
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "failed to get xclk\n");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, "xclk");
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "failed to get xclk\n");
sensor->xclk_freq = clk_get_rate(sensor->xclk);
if (sensor->xclk_freq < OV5640_XCLK_MIN ||
diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
index 58c846a44376..b10d408034a1 100644
--- a/drivers/media/i2c/ov5645.c
+++ b/drivers/media/i2c/ov5645.c
@@ -1042,27 +1042,18 @@ static int ov5645_probe(struct i2c_client *client)
"invalid bus type, must be CSI2\n");
/* get system clock (xclk) */
- ov5645->xclk = devm_clk_get(dev, NULL);
+ ov5645->xclk = devm_v4l2_sensor_clk_get_legacy(dev, NULL, false, 0);
if (IS_ERR(ov5645->xclk))
return dev_err_probe(dev, PTR_ERR(ov5645->xclk),
"could not get xclk");
- ret = of_property_read_u32(dev->of_node, "clock-frequency", &xclk_freq);
- if (ret)
- return dev_err_probe(dev, ret,
- "could not get xclk frequency\n");
-
/* external clock must be 24MHz, allow 1% tolerance */
+ xclk_freq = clk_get_rate(ov5645->xclk);
if (xclk_freq < 23760000 || xclk_freq > 24240000)
return dev_err_probe(dev, -EINVAL,
"unsupported xclk frequency %u\n",
xclk_freq);
- ret = clk_set_rate(ov5645->xclk, xclk_freq);
- if (ret)
- return dev_err_probe(dev, ret,
- "could not set xclk frequency\n");
-
for (i = 0; i < OV5645_NUM_SUPPLIES; i++)
ov5645->supplies[i].supply = ov5645_supply_name[i];
diff --git a/drivers/media/i2c/ov5647.c b/drivers/media/i2c/ov5647.c
index a727beb9d57e..e193fef4fced 100644
--- a/drivers/media/i2c/ov5647.c
+++ b/drivers/media/i2c/ov5647.c
@@ -1398,11 +1398,10 @@ static int ov5647_probe(struct i2c_client *client)
}
}
- sensor->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "could not get xclk");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "could not get xclk\n");
xclk_freq = clk_get_rate(sensor->xclk);
if (xclk_freq != 25000000) {
diff --git a/drivers/media/i2c/ov5648.c b/drivers/media/i2c/ov5648.c
index 4b86d2631bd1..f0b839cd65f1 100644
--- a/drivers/media/i2c/ov5648.c
+++ b/drivers/media/i2c/ov5648.c
@@ -1061,8 +1061,8 @@ static int ov5648_sw_standby(struct ov5648_sensor *sensor, int standby)
static int ov5648_chip_id_check(struct ov5648_sensor *sensor)
{
- u16 regs[] = { OV5648_CHIP_ID_H_REG, OV5648_CHIP_ID_L_REG };
- u8 values[] = { OV5648_CHIP_ID_H_VALUE, OV5648_CHIP_ID_L_VALUE };
+ static const u16 regs[] = { OV5648_CHIP_ID_H_REG, OV5648_CHIP_ID_L_REG };
+ static const u8 values[] = { OV5648_CHIP_ID_H_VALUE, OV5648_CHIP_ID_L_VALUE };
unsigned int i;
u8 value;
int ret;
@@ -2521,10 +2521,10 @@ static int ov5648_probe(struct i2c_client *client)
/* External Clock */
- sensor->xvclk = devm_clk_get(dev, NULL);
+ sensor->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xvclk)) {
- dev_err(dev, "failed to get external clock\n");
- ret = PTR_ERR(sensor->xvclk);
+ ret = dev_err_probe(dev, PTR_ERR(sensor->xvclk),
+ "failed to get external clock\n");
goto error_endpoint;
}
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index b9efb2d2276a..04b3183b7bcb 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -12,6 +11,8 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-event.h>
@@ -1854,6 +1855,8 @@ static const struct ov5670_mode supported_modes[] = {
};
struct ov5670 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_fwnode_endpoint endpoint;
@@ -1959,7 +1962,6 @@ static int ov5670_write_reg(struct ov5670 *ov5670, u16 reg, unsigned int len,
static int ov5670_write_regs(struct ov5670 *ov5670,
const struct ov5670_reg *regs, unsigned int len)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
unsigned int i;
int ret;
@@ -1967,7 +1969,7 @@ static int ov5670_write_regs(struct ov5670 *ov5670,
ret = ov5670_write_reg(ov5670, regs[i].address, 1, regs[i].val);
if (ret) {
dev_err_ratelimited(
- &client->dev,
+ ov5670->dev,
"Failed to write reg 0x%4.4x. error = %d\n",
regs[i].address, ret);
@@ -2032,7 +2034,6 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov5670 *ov5670 = container_of(ctrl->handler,
struct ov5670, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
s64 max;
int ret;
@@ -2048,7 +2049,7 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov5670->dev))
return 0;
switch (ctrl->id) {
@@ -2080,12 +2081,12 @@ static int ov5670_set_ctrl(struct v4l2_ctrl *ctrl)
break;
default:
ret = -EINVAL;
- dev_info(&client->dev, "%s Unhandled id:0x%x, val:0x%x\n",
+ dev_info(ov5670->dev, "%s Unhandled id:0x%x, val:0x%x\n",
__func__, ctrl->id, ctrl->val);
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
return ret;
}
@@ -2099,7 +2100,6 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
{
struct v4l2_mbus_config_mipi_csi2 *bus_mipi_csi2 =
&ov5670->endpoint.bus.mipi_csi2;
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
unsigned int lanes_count;
@@ -2177,7 +2177,7 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
goto error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov5670->dev, &props);
if (ret)
goto error;
@@ -2350,7 +2350,6 @@ static int ov5670_get_skip_frames(struct v4l2_subdev *sd, u32 *frames)
/* Verify chip ID */
static int ov5670_identify_module(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
int ret;
u32 val;
@@ -2363,7 +2362,7 @@ static int ov5670_identify_module(struct ov5670 *ov5670)
return ret;
if (val != OV5670_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ dev_err(ov5670->dev, "chip id mismatch: %x!=%x\n",
OV5670_CHIP_ID, val);
return -ENXIO;
}
@@ -2389,7 +2388,6 @@ static int ov5670_mipi_configure(struct ov5670 *ov5670)
/* Prepare streaming by writing default values and customized values */
static int ov5670_start_streaming(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
const struct ov5670_reg_list *reg_list;
int link_freq_index;
int ret;
@@ -2402,7 +2400,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
ret = ov5670_write_reg(ov5670, OV5670_REG_SOFTWARE_RST,
OV5670_REG_VALUE_08BIT, OV5670_SOFTWARE_RST);
if (ret) {
- dev_err(&client->dev, "%s failed to set powerup registers\n",
+ dev_err(ov5670->dev, "%s failed to set powerup registers\n",
__func__);
return ret;
}
@@ -2412,7 +2410,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov5670_write_reg_list(ov5670, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set plls\n", __func__);
return ret;
}
@@ -2420,13 +2418,13 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
reg_list = &ov5670->cur_mode->reg_list;
ret = ov5670_write_reg_list(ov5670, reg_list);
if (ret) {
- dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set mode\n", __func__);
return ret;
}
ret = ov5670_mipi_configure(ov5670);
if (ret) {
- dev_err(&client->dev, "%s failed to configure MIPI\n", __func__);
+ dev_err(ov5670->dev, "%s failed to configure MIPI\n", __func__);
return ret;
}
@@ -2438,7 +2436,7 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT,
OV5670_REG_VALUE_08BIT, OV5670_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set stream\n", __func__);
return ret;
}
@@ -2447,13 +2445,12 @@ static int ov5670_start_streaming(struct ov5670 *ov5670)
static int ov5670_stop_streaming(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
int ret;
ret = ov5670_write_reg(ov5670, OV5670_REG_MODE_SELECT,
OV5670_REG_VALUE_08BIT, OV5670_MODE_STANDBY);
if (ret)
- dev_err(&client->dev, "%s failed to set stream\n", __func__);
+ dev_err(ov5670->dev, "%s failed to set stream\n", __func__);
/* Return success even if it was an error, as there is nothing the
* caller can do about it.
@@ -2464,13 +2461,12 @@ static int ov5670_stop_streaming(struct ov5670 *ov5670)
static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov5670 *ov5670 = to_ov5670(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov5670->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov5670->dev);
if (ret < 0)
goto unlock_and_return;
@@ -2479,12 +2475,12 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
goto error;
} else {
ret = ov5670_stop_streaming(ov5670);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
}
goto unlock_and_return;
error:
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5670->dev);
unlock_and_return:
mutex_unlock(&ov5670->mutex);
@@ -2621,26 +2617,23 @@ static const struct media_entity_operations ov5670_subdev_entity_ops = {
static int ov5670_regulators_probe(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
unsigned int i;
for (i = 0; i < OV5670_NUM_SUPPLIES; i++)
ov5670->supplies[i].supply = ov5670_supply_names[i];
- return devm_regulator_bulk_get(&client->dev, OV5670_NUM_SUPPLIES,
+ return devm_regulator_bulk_get(ov5670->dev, OV5670_NUM_SUPPLIES,
ov5670->supplies);
}
static int ov5670_gpio_probe(struct ov5670 *ov5670)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5670->sd);
-
- ov5670->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
+ ov5670->pwdn_gpio = devm_gpiod_get_optional(ov5670->dev, "powerdown",
GPIOD_OUT_LOW);
if (IS_ERR(ov5670->pwdn_gpio))
return PTR_ERR(ov5670->pwdn_gpio);
- ov5670->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+ ov5670->reset_gpio = devm_gpiod_get_optional(ov5670->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ov5670->reset_gpio))
return PTR_ERR(ov5670->reset_gpio);
@@ -2660,18 +2653,16 @@ static int ov5670_probe(struct i2c_client *client)
if (!ov5670)
return -ENOMEM;
- ov5670->xvclk = devm_clk_get_optional(&client->dev, NULL);
- if (!IS_ERR_OR_NULL(ov5670->xvclk))
- input_clk = clk_get_rate(ov5670->xvclk);
- else if (!ov5670->xvclk || PTR_ERR(ov5670->xvclk) == -ENOENT)
- device_property_read_u32(&client->dev, "clock-frequency",
- &input_clk);
- else
- return dev_err_probe(&client->dev, PTR_ERR(ov5670->xvclk),
+ ov5670->dev = &client->dev;
+
+ ov5670->xvclk = devm_v4l2_sensor_clk_get(ov5670->dev, NULL);
+ if (IS_ERR(ov5670->xvclk))
+ return dev_err_probe(ov5670->dev, PTR_ERR(ov5670->xvclk),
"error getting clock\n");
+ input_clk = clk_get_rate(ov5670->xvclk);
if (input_clk != OV5670_XVCLK_FREQ) {
- dev_err(&client->dev,
+ dev_err(ov5670->dev,
"Unsupported clock frequency %u\n", input_clk);
return -EINVAL;
}
@@ -2682,20 +2673,20 @@ static int ov5670_probe(struct i2c_client *client)
ret = ov5670_regulators_probe(ov5670);
if (ret)
- return dev_err_probe(&client->dev, ret, "Regulators probe failed\n");
+ return dev_err_probe(ov5670->dev, ret, "Regulators probe failed\n");
ret = ov5670_gpio_probe(ov5670);
if (ret)
- return dev_err_probe(&client->dev, ret, "GPIO probe failed\n");
+ return dev_err_probe(ov5670->dev, ret, "GPIO probe failed\n");
/*
* Graph Endpoint. If it's missing we defer rather than fail, as this
* sensor is known to co-exist on systems with the IPU3 and so it might
* be created by the ipu-bridge.
*/
- handle = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+ handle = fwnode_graph_get_next_endpoint(dev_fwnode(ov5670->dev), NULL);
if (!handle)
- return dev_err_probe(&client->dev, -EPROBE_DEFER,
+ return dev_err_probe(ov5670->dev, -EPROBE_DEFER,
"Endpoint for node get failed\n");
ov5670->endpoint.bus_type = V4L2_MBUS_CSI2_DPHY;
@@ -2704,20 +2695,20 @@ static int ov5670_probe(struct i2c_client *client)
ret = v4l2_fwnode_endpoint_alloc_parse(handle, &ov5670->endpoint);
fwnode_handle_put(handle);
if (ret)
- return dev_err_probe(&client->dev, ret, "Endpoint parse failed\n");
+ return dev_err_probe(ov5670->dev, ret, "Endpoint parse failed\n");
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov5670->dev);
if (full_power) {
- ret = ov5670_runtime_resume(&client->dev);
+ ret = ov5670_runtime_resume(ov5670->dev);
if (ret) {
- dev_err_probe(&client->dev, ret, "Power up failed\n");
+ dev_err_probe(ov5670->dev, ret, "Power up failed\n");
goto error_endpoint;
}
/* Check module identity */
ret = ov5670_identify_module(ov5670);
if (ret) {
- dev_err_probe(&client->dev, ret, "ov5670_identify_module() error\n");
+ dev_err_probe(ov5670->dev, ret, "ov5670_identify_module() error\n");
goto error_power_off;
}
}
@@ -2729,7 +2720,7 @@ static int ov5670_probe(struct i2c_client *client)
ret = ov5670_init_controls(ov5670);
if (ret) {
- dev_err_probe(&client->dev, ret, "ov5670_init_controls() error\n");
+ dev_err_probe(ov5670->dev, ret, "ov5670_init_controls() error\n");
goto error_mutex_destroy;
}
@@ -2742,28 +2733,28 @@ static int ov5670_probe(struct i2c_client *client)
ov5670->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov5670->sd.entity, 1, &ov5670->pad);
if (ret) {
- dev_err_probe(&client->dev, ret, "media_entity_pads_init() error\n");
+ dev_err_probe(ov5670->dev, ret, "media_entity_pads_init() error\n");
goto error_handler_free;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
+ pm_runtime_set_active(ov5670->dev);
+ pm_runtime_enable(ov5670->dev);
/* Async register for subdev */
ret = v4l2_async_register_subdev_sensor(&ov5670->sd);
if (ret < 0) {
- dev_err_probe(&client->dev, ret, "v4l2_async_register_subdev() error\n");
+ dev_err_probe(ov5670->dev, ret, "v4l2_async_register_subdev() error\n");
goto error_pm_disable;
}
- pm_runtime_idle(&client->dev);
+ pm_runtime_idle(ov5670->dev);
return 0;
error_pm_disable:
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov5670->dev);
media_entity_cleanup(&ov5670->sd.entity);
@@ -2775,7 +2766,7 @@ error_mutex_destroy:
error_power_off:
if (full_power)
- ov5670_runtime_suspend(&client->dev);
+ ov5670_runtime_suspend(ov5670->dev);
error_endpoint:
v4l2_fwnode_endpoint_free(&ov5670->endpoint);
@@ -2793,8 +2784,8 @@ static void ov5670_remove(struct i2c_client *client)
v4l2_ctrl_handler_free(sd->ctrl_handler);
mutex_destroy(&ov5670->mutex);
- pm_runtime_disable(&client->dev);
- ov5670_runtime_suspend(&client->dev);
+ pm_runtime_disable(ov5670->dev);
+ ov5670_runtime_suspend(ov5670->dev);
v4l2_fwnode_endpoint_free(&ov5670->endpoint);
}
diff --git a/drivers/media/i2c/ov5675.c b/drivers/media/i2c/ov5675.c
index e7aec281e9a4..ea26df328189 100644
--- a/drivers/media/i2c/ov5675.c
+++ b/drivers/media/i2c/ov5675.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -11,6 +10,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -493,6 +494,8 @@ static const struct ov5675_mode supported_modes[] = {
};
struct ov5675 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -584,7 +587,6 @@ static int ov5675_write_reg(struct ov5675 *ov5675, u16 reg, u16 len, u32 val)
static int ov5675_write_reg_list(struct ov5675 *ov5675,
const struct ov5675_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
unsigned int i;
int ret;
@@ -592,7 +594,7 @@ static int ov5675_write_reg_list(struct ov5675 *ov5675,
ret = ov5675_write_reg(ov5675, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov5675->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -700,7 +702,6 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov5675 *ov5675 = container_of(ctrl->handler,
struct ov5675, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
s64 exposure_max;
int ret = 0;
@@ -716,7 +717,7 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov5675->dev))
return 0;
switch (ctrl->id) {
@@ -765,7 +766,7 @@ static int ov5675_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
return ret;
}
@@ -776,7 +777,6 @@ static const struct v4l2_ctrl_ops ov5675_ctrl_ops = {
static int ov5675_init_controls(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
struct v4l2_fwnode_device_properties props;
struct v4l2_ctrl_handler *ctrl_hdlr;
s64 exposure_max, h_blank;
@@ -839,7 +839,7 @@ static int ov5675_init_controls(struct ov5675 *ov5675)
return ctrl_hdlr->error;
}
- ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ ret = v4l2_fwnode_device_parse(ov5675->dev, &props);
if (ret)
goto error;
@@ -869,7 +869,6 @@ static void ov5675_update_pad_format(const struct ov5675_mode *mode,
static int ov5675_identify_module(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
int ret;
u32 val;
@@ -882,7 +881,7 @@ static int ov5675_identify_module(struct ov5675 *ov5675)
return ret;
if (val != OV5675_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov5675->dev, "chip id mismatch: %x!=%x",
OV5675_CHIP_ID, val);
return -ENXIO;
}
@@ -894,7 +893,6 @@ static int ov5675_identify_module(struct ov5675 *ov5675)
static int ov5675_start_streaming(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
const struct ov5675_reg_list *reg_list;
int link_freq_index, ret;
@@ -906,14 +904,14 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov5675_write_reg_list(ov5675, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov5675->dev, "failed to set plls");
return ret;
}
reg_list = &ov5675->cur_mode->reg_list;
ret = ov5675_write_reg_list(ov5675, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov5675->dev, "failed to set mode");
return ret;
}
@@ -924,7 +922,7 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
ret = ov5675_write_reg(ov5675, OV5675_REG_MODE_SELECT,
OV5675_REG_VALUE_08BIT, OV5675_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov5675->dev, "failed to set stream");
return ret;
}
@@ -933,22 +931,19 @@ static int ov5675_start_streaming(struct ov5675 *ov5675)
static void ov5675_stop_streaming(struct ov5675 *ov5675)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov5675->sd);
-
if (ov5675_write_reg(ov5675, OV5675_REG_MODE_SELECT,
OV5675_REG_VALUE_08BIT, OV5675_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov5675->dev, "failed to set stream");
}
static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov5675 *ov5675 = to_ov5675(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov5675->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov5675->dev);
if (ret < 0) {
mutex_unlock(&ov5675->mutex);
return ret;
@@ -958,11 +953,11 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov5675_stop_streaming(ov5675);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
}
} else {
ov5675_stop_streaming(ov5675);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov5675->dev);
}
mutex_unlock(&ov5675->mutex);
@@ -1171,8 +1166,9 @@ static const struct v4l2_subdev_internal_ops ov5675_internal_ops = {
.open = ov5675_open,
};
-static int ov5675_get_hwcfg(struct ov5675 *ov5675, struct device *dev)
+static int ov5675_get_hwcfg(struct ov5675 *ov5675)
{
+ struct device *dev = ov5675->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -1185,24 +1181,13 @@ static int ov5675_get_hwcfg(struct ov5675 *ov5675, struct device *dev)
if (!fwnode)
return -ENXIO;
- ov5675->xvclk = devm_clk_get_optional(dev, NULL);
+ ov5675->xvclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov5675->xvclk))
return dev_err_probe(dev, PTR_ERR(ov5675->xvclk),
- "failed to get xvclk: %ld\n",
- PTR_ERR(ov5675->xvclk));
-
- if (ov5675->xvclk) {
- xvclk_rate = clk_get_rate(ov5675->xvclk);
- } else {
- ret = fwnode_property_read_u32(fwnode, "clock-frequency",
- &xvclk_rate);
-
- if (ret) {
- dev_err(dev, "can't get clock frequency");
- return ret;
- }
- }
+ "failed to get xvclk: %pe\n",
+ ov5675->xvclk);
+ xvclk_rate = clk_get_rate(ov5675->xvclk);
if (xvclk_rate != OV5675_XVCLK_19_2) {
dev_err(dev, "external clock rate %u is unsupported",
xvclk_rate);
@@ -1276,12 +1261,12 @@ static void ov5675_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov5675->dev);
mutex_destroy(&ov5675->mutex);
- if (!pm_runtime_status_suspended(&client->dev))
- ov5675_power_off(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ if (!pm_runtime_status_suspended(ov5675->dev))
+ ov5675_power_off(ov5675->dev);
+ pm_runtime_set_suspended(ov5675->dev);
}
static int ov5675_probe(struct i2c_client *client)
@@ -1294,23 +1279,25 @@ static int ov5675_probe(struct i2c_client *client)
if (!ov5675)
return -ENOMEM;
- ret = ov5675_get_hwcfg(ov5675, &client->dev);
+ ov5675->dev = &client->dev;
+
+ ret = ov5675_get_hwcfg(ov5675);
if (ret)
return ret;
v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
- ret = ov5675_power_on(&client->dev);
+ ret = ov5675_power_on(ov5675->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on: %d\n", ret);
+ dev_err(ov5675->dev, "failed to power on: %d\n", ret);
return ret;
}
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov5675->dev);
if (full_power) {
ret = ov5675_identify_module(ov5675);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov5675->dev, "failed to find sensor: %d", ret);
goto probe_power_off;
}
}
@@ -1319,7 +1306,7 @@ static int ov5675_probe(struct i2c_client *client)
ov5675->cur_mode = &supported_modes[0];
ret = ov5675_init_controls(ov5675);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov5675->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -1330,22 +1317,22 @@ static int ov5675_probe(struct i2c_client *client)
ov5675->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov5675->sd.entity, 1, &ov5675->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov5675->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov5675->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov5675->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov5675->dev);
+ pm_runtime_enable(ov5675->dev);
+ pm_runtime_idle(ov5675->dev);
return 0;
@@ -1356,7 +1343,7 @@ probe_error_v4l2_ctrl_handler_free:
v4l2_ctrl_handler_free(ov5675->sd.ctrl_handler);
mutex_destroy(&ov5675->mutex);
probe_power_off:
- ov5675_power_off(&client->dev);
+ ov5675_power_off(ov5675->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov5693.c b/drivers/media/i2c/ov5693.c
index 485efd15257e..4cc796bbee92 100644
--- a/drivers/media/i2c/ov5693.c
+++ b/drivers/media/i2c/ov5693.c
@@ -1289,25 +1289,13 @@ static int ov5693_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&ov5693->sd, client, &ov5693_ops);
- ov5693->xvclk = devm_clk_get_optional(&client->dev, "xvclk");
+ ov5693->xvclk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
if (IS_ERR(ov5693->xvclk))
return dev_err_probe(&client->dev, PTR_ERR(ov5693->xvclk),
- "failed to get xvclk: %ld\n",
- PTR_ERR(ov5693->xvclk));
-
- if (ov5693->xvclk) {
- xvclk_rate = clk_get_rate(ov5693->xvclk);
- } else {
- ret = fwnode_property_read_u32(dev_fwnode(&client->dev),
- "clock-frequency",
- &xvclk_rate);
-
- if (ret) {
- dev_err(&client->dev, "can't get clock frequency");
- return ret;
- }
- }
+ "failed to get xvclk: %pe\n",
+ ov5693->xvclk);
+ xvclk_rate = clk_get_rate(ov5693->xvclk);
if (xvclk_rate != OV5693_XVCLK_FREQ)
dev_warn(&client->dev, "Found clk freq %u, expected %u\n",
xvclk_rate, OV5693_XVCLK_FREQ);
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 663eccdfea6a..5bb6ce7b3237 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1264,16 +1264,12 @@ static int ov5695_probe(struct i2c_client *client)
ov5695->client = client;
ov5695->cur_mode = &supported_modes[0];
- ov5695->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov5695->xvclk)) {
- dev_err(dev, "Failed to get xvclk\n");
- return -EINVAL;
- }
- ret = clk_set_rate(ov5695->xvclk, OV5695_XVCLK_FREQ);
- if (ret < 0) {
- dev_err(dev, "Failed to set xvclk rate (24MHz)\n");
- return ret;
- }
+ ov5695->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", true,
+ OV5695_XVCLK_FREQ);
+ if (IS_ERR(ov5695->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov5695->xvclk),
+ "Failed to get xvclk\n");
+
if (clk_get_rate(ov5695->xvclk) != OV5695_XVCLK_FREQ)
dev_warn(dev, "xvclk mismatched, modes are based on 24MHz\n");
diff --git a/drivers/media/i2c/ov6211.c b/drivers/media/i2c/ov6211.c
new file mode 100644
index 000000000000..e3ac5ecf27d1
--- /dev/null
+++ b/drivers/media/i2c/ov6211.c
@@ -0,0 +1,793 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024-2025 Linaro Ltd
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/units.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV6211_LINK_FREQ_480MHZ (480 * HZ_PER_MHZ)
+#define OV6211_MCLK_FREQ_24MHZ (24 * HZ_PER_MHZ)
+
+#define OV6211_REG_CHIP_ID CCI_REG16(0x300a)
+#define OV6211_CHIP_ID 0x6710
+
+#define OV6211_REG_MODE_SELECT CCI_REG8(0x0100)
+#define OV6211_MODE_STANDBY 0x00
+#define OV6211_MODE_STREAMING BIT(0)
+
+#define OV6211_REG_SOFTWARE_RST CCI_REG8(0x0103)
+#define OV6211_SOFTWARE_RST BIT(0)
+
+/* Exposure controls from sensor */
+#define OV6211_REG_EXPOSURE CCI_REG24(0x3500)
+#define OV6211_EXPOSURE_MIN 1
+#define OV6211_EXPOSURE_MAX_MARGIN 4
+#define OV6211_EXPOSURE_STEP 1
+#define OV6211_EXPOSURE_DEFAULT 210
+
+/* Analogue gain controls from sensor */
+#define OV6211_REG_ANALOGUE_GAIN CCI_REG16(0x350a)
+#define OV6211_ANALOGUE_GAIN_MIN 1
+#define OV6211_ANALOGUE_GAIN_MAX 0x3ff
+#define OV6211_ANALOGUE_GAIN_STEP 1
+#define OV6211_ANALOGUE_GAIN_DEFAULT 160
+
+/* Test pattern */
+#define OV6211_REG_PRE_ISP CCI_REG8(0x5e00)
+#define OV6211_TEST_PATTERN_ENABLE BIT(7)
+
+#define to_ov6211(_sd) container_of(_sd, struct ov6211, sd)
+
+static const s64 ov6211_link_freq_menu[] = {
+ OV6211_LINK_FREQ_480MHZ,
+};
+
+struct ov6211_reg_list {
+ const struct cci_reg_sequence *regs;
+ unsigned int num_regs;
+};
+
+struct ov6211_mode {
+ u32 width; /* Frame width in pixels */
+ u32 height; /* Frame height in pixels */
+ u32 hts; /* Horizontal timing size */
+ u32 vts; /* Default vertical timing size */
+ u32 bpp; /* Bits per pixel */
+
+ const struct ov6211_reg_list reg_list; /* Sensor register setting */
+};
+
+static const char * const ov6211_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Colour Bars",
+};
+
+static const char * const ov6211_supply_names[] = {
+ "avdd", /* Analog power */
+ "dovdd", /* Digital I/O power */
+ "dvdd", /* Digital core power */
+};
+
+#define OV6211_NUM_SUPPLIES ARRAY_SIZE(ov6211_supply_names)
+
+struct ov6211 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk *xvclk;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[OV6211_NUM_SUPPLIES];
+
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+
+ /* Saved register values */
+ u64 pre_isp;
+};
+
+static const struct cci_reg_sequence ov6211_400x400_120fps_mode[] = {
+ { CCI_REG8(0x3005), 0x00 },
+ { CCI_REG8(0x3013), 0x12 },
+ { CCI_REG8(0x3014), 0x04 },
+ { CCI_REG8(0x3016), 0x10 },
+ { CCI_REG8(0x3017), 0x00 },
+ { CCI_REG8(0x3018), 0x00 },
+ { CCI_REG8(0x301a), 0x00 },
+ { CCI_REG8(0x301b), 0x00 },
+ { CCI_REG8(0x301c), 0x00 },
+ { CCI_REG8(0x3037), 0xf0 },
+ { CCI_REG8(0x3080), 0x01 },
+ { CCI_REG8(0x3081), 0x00 },
+ { CCI_REG8(0x3082), 0x01 },
+ { CCI_REG8(0x3098), 0x04 },
+ { CCI_REG8(0x3099), 0x28 },
+ { CCI_REG8(0x309a), 0x06 },
+ { CCI_REG8(0x309b), 0x04 },
+ { CCI_REG8(0x309c), 0x00 },
+ { CCI_REG8(0x309d), 0x00 },
+ { CCI_REG8(0x309e), 0x01 },
+ { CCI_REG8(0x309f), 0x00 },
+ { CCI_REG8(0x30b0), 0x08 },
+ { CCI_REG8(0x30b1), 0x02 },
+ { CCI_REG8(0x30b2), 0x00 },
+ { CCI_REG8(0x30b3), 0x28 },
+ { CCI_REG8(0x30b4), 0x02 },
+ { CCI_REG8(0x30b5), 0x00 },
+ { CCI_REG8(0x3106), 0xd9 },
+ { CCI_REG8(0x3503), 0x07 },
+ { CCI_REG8(0x3509), 0x10 },
+ { CCI_REG8(0x3600), 0xfc },
+ { CCI_REG8(0x3620), 0xb7 },
+ { CCI_REG8(0x3621), 0x05 },
+ { CCI_REG8(0x3626), 0x31 },
+ { CCI_REG8(0x3627), 0x40 },
+ { CCI_REG8(0x3632), 0xa3 },
+ { CCI_REG8(0x3633), 0x34 },
+ { CCI_REG8(0x3634), 0x40 },
+ { CCI_REG8(0x3636), 0x00 },
+ { CCI_REG8(0x3660), 0x80 },
+ { CCI_REG8(0x3662), 0x03 },
+ { CCI_REG8(0x3664), 0xf0 },
+ { CCI_REG8(0x366a), 0x10 },
+ { CCI_REG8(0x366b), 0x06 },
+ { CCI_REG8(0x3680), 0xf4 },
+ { CCI_REG8(0x3681), 0x50 },
+ { CCI_REG8(0x3682), 0x00 },
+ { CCI_REG8(0x3708), 0x20 },
+ { CCI_REG8(0x3709), 0x40 },
+ { CCI_REG8(0x370d), 0x03 },
+ { CCI_REG8(0x373b), 0x02 },
+ { CCI_REG8(0x373c), 0x08 },
+ { CCI_REG8(0x3742), 0x00 },
+ { CCI_REG8(0x3744), 0x16 },
+ { CCI_REG8(0x3745), 0x08 },
+ { CCI_REG8(0x3781), 0xfc },
+ { CCI_REG8(0x3788), 0x00 },
+ { CCI_REG8(0x3800), 0x00 },
+ { CCI_REG8(0x3801), 0x04 },
+ { CCI_REG8(0x3802), 0x00 },
+ { CCI_REG8(0x3803), 0x04 },
+ { CCI_REG8(0x3804), 0x01 },
+ { CCI_REG8(0x3805), 0x9b },
+ { CCI_REG8(0x3806), 0x01 },
+ { CCI_REG8(0x3807), 0x9b },
+ { CCI_REG8(0x3808), 0x01 }, /* output width */
+ { CCI_REG8(0x3809), 0x90 },
+ { CCI_REG8(0x380a), 0x01 }, /* output height */
+ { CCI_REG8(0x380b), 0x90 },
+ { CCI_REG8(0x380c), 0x05 }, /* horizontal timing size */
+ { CCI_REG8(0x380d), 0xf2 },
+ { CCI_REG8(0x380e), 0x01 }, /* vertical timing size */
+ { CCI_REG8(0x380f), 0xb6 },
+ { CCI_REG8(0x3810), 0x00 },
+ { CCI_REG8(0x3811), 0x04 },
+ { CCI_REG8(0x3812), 0x00 },
+ { CCI_REG8(0x3813), 0x04 },
+ { CCI_REG8(0x3814), 0x11 },
+ { CCI_REG8(0x3815), 0x11 },
+ { CCI_REG8(0x3820), 0x00 },
+ { CCI_REG8(0x3821), 0x00 },
+ { CCI_REG8(0x382b), 0xfa },
+ { CCI_REG8(0x382f), 0x04 },
+ { CCI_REG8(0x3832), 0x00 },
+ { CCI_REG8(0x3833), 0x05 },
+ { CCI_REG8(0x3834), 0x00 },
+ { CCI_REG8(0x3835), 0x05 },
+ { CCI_REG8(0x3882), 0x04 },
+ { CCI_REG8(0x3883), 0x00 },
+ { CCI_REG8(0x38a4), 0x10 },
+ { CCI_REG8(0x38a5), 0x00 },
+ { CCI_REG8(0x38b1), 0x03 },
+ { CCI_REG8(0x3b80), 0x00 },
+ { CCI_REG8(0x3b81), 0xff },
+ { CCI_REG8(0x3b82), 0x10 },
+ { CCI_REG8(0x3b83), 0x00 },
+ { CCI_REG8(0x3b84), 0x08 },
+ { CCI_REG8(0x3b85), 0x00 },
+ { CCI_REG8(0x3b86), 0x01 },
+ { CCI_REG8(0x3b87), 0x00 },
+ { CCI_REG8(0x3b88), 0x00 },
+ { CCI_REG8(0x3b89), 0x00 },
+ { CCI_REG8(0x3b8a), 0x00 },
+ { CCI_REG8(0x3b8b), 0x05 },
+ { CCI_REG8(0x3b8c), 0x00 },
+ { CCI_REG8(0x3b8d), 0x00 },
+ { CCI_REG8(0x3b8e), 0x01 },
+ { CCI_REG8(0x3b8f), 0xb2 },
+ { CCI_REG8(0x3b94), 0x05 },
+ { CCI_REG8(0x3b95), 0xf2 },
+ { CCI_REG8(0x3b96), 0xc0 },
+ { CCI_REG8(0x4004), 0x04 },
+ { CCI_REG8(0x404e), 0x01 },
+ { CCI_REG8(0x4801), 0x0f },
+ { CCI_REG8(0x4806), 0x0f },
+ { CCI_REG8(0x4837), 0x43 },
+ { CCI_REG8(0x5a08), 0x00 },
+ { CCI_REG8(0x5a01), 0x00 },
+ { CCI_REG8(0x5a03), 0x00 },
+ { CCI_REG8(0x5a04), 0x10 },
+ { CCI_REG8(0x5a05), 0xa0 },
+ { CCI_REG8(0x5a06), 0x0c },
+ { CCI_REG8(0x5a07), 0x78 },
+};
+
+static const struct ov6211_mode supported_modes[] = {
+ {
+ .width = 400,
+ .height = 400,
+ .hts = 1522,
+ .vts = 438,
+ .bpp = 8,
+ .reg_list = {
+ .regs = ov6211_400x400_120fps_mode,
+ .num_regs = ARRAY_SIZE(ov6211_400x400_120fps_mode),
+ },
+ },
+};
+
+static int ov6211_set_test_pattern(struct ov6211 *ov6211, u32 pattern)
+{
+ u64 val = ov6211->pre_isp;
+
+ if (pattern)
+ val |= OV6211_TEST_PATTERN_ENABLE;
+ else
+ val &= ~OV6211_TEST_PATTERN_ENABLE;
+
+ return cci_write(ov6211->regmap, OV6211_REG_PRE_ISP, val, NULL);
+}
+
+static int ov6211_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov6211 *ov6211 = container_of(ctrl->handler, struct ov6211,
+ ctrl_handler);
+ int ret;
+
+ /* V4L2 controls are applied, when sensor is powered up for streaming */
+ if (!pm_runtime_get_if_active(ov6211->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = cci_write(ov6211->regmap, OV6211_REG_ANALOGUE_GAIN,
+ ctrl->val, NULL);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = cci_write(ov6211->regmap, OV6211_REG_EXPOSURE,
+ ctrl->val << 4, NULL);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov6211_set_test_pattern(ov6211, ctrl->val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(ov6211->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov6211_ctrl_ops = {
+ .s_ctrl = ov6211_set_ctrl,
+};
+
+static int ov6211_init_controls(struct ov6211 *ov6211)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr = &ov6211->ctrl_handler;
+ const struct ov6211_mode *mode = &supported_modes[0];
+ struct v4l2_fwnode_device_properties props;
+ s64 exposure_max, pixel_rate, h_blank;
+ struct v4l2_ctrl *ctrl;
+ int ret;
+
+ v4l2_ctrl_handler_init(ctrl_hdlr, 9);
+
+ ctrl = v4l2_ctrl_new_int_menu(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(ov6211_link_freq_menu) - 1,
+ 0, ov6211_link_freq_menu);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate = ov6211_link_freq_menu[0] / mode->bpp;
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_PIXEL_RATE,
+ 0, pixel_rate, 1, pixel_rate);
+
+ h_blank = mode->hts - mode->width;
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_HBLANK,
+ h_blank, h_blank, 1, h_blank);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ ctrl = v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_VBLANK,
+ mode->vts - mode->height,
+ mode->vts - mode->height, 1,
+ mode->vts - mode->height);
+ if (ctrl)
+ ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV6211_ANALOGUE_GAIN_MIN, OV6211_ANALOGUE_GAIN_MAX,
+ OV6211_ANALOGUE_GAIN_STEP,
+ OV6211_ANALOGUE_GAIN_DEFAULT);
+
+ exposure_max = (mode->vts - OV6211_EXPOSURE_MAX_MARGIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV6211_EXPOSURE_MIN, exposure_max,
+ OV6211_EXPOSURE_STEP,
+ OV6211_EXPOSURE_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov6211_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov6211_test_pattern_menu) - 1,
+ 0, 0, ov6211_test_pattern_menu);
+
+ if (ctrl_hdlr->error)
+ return ctrl_hdlr->error;
+
+ ret = v4l2_fwnode_device_parse(ov6211->dev, &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov6211_ctrl_ops,
+ &props);
+ if (ret)
+ goto error_free_hdlr;
+
+ ov6211->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error_free_hdlr:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static void ov6211_update_pad_format(const struct ov6211_mode *mode,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ fmt->code = MEDIA_BUS_FMT_Y8_1X8;
+ fmt->width = mode->width;
+ fmt->height = mode->height;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_RAW;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+ fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
+
+static int ov6211_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ const struct ov6211_reg_list *reg_list = &supported_modes[0].reg_list;
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(ov6211->dev);
+ if (ret)
+ return ret;
+
+ /* Skip a step of explicit entering into the standby mode */
+ ret = cci_write(ov6211->regmap, OV6211_REG_SOFTWARE_RST,
+ OV6211_SOFTWARE_RST, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to software reset: %d\n", ret);
+ goto error;
+ }
+
+ ret = cci_multi_reg_write(ov6211->regmap, reg_list->regs,
+ reg_list->num_regs, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to set mode: %d\n", ret);
+ goto error;
+ }
+
+ ret = __v4l2_ctrl_handler_setup(ov6211->sd.ctrl_handler);
+ if (ret)
+ goto error;
+
+ ret = cci_write(ov6211->regmap, OV6211_REG_MODE_SELECT,
+ OV6211_MODE_STREAMING, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to start streaming: %d\n", ret);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ pm_runtime_put_autosuspend(ov6211->dev);
+
+ return ret;
+}
+
+static int ov6211_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = cci_write(ov6211->regmap, OV6211_REG_MODE_SELECT,
+ OV6211_MODE_STANDBY, NULL);
+ if (ret)
+ dev_err(ov6211->dev, "failed to stop streaming: %d\n", ret);
+
+ pm_runtime_put_autosuspend(ov6211->dev);
+
+ return ret;
+}
+
+static int ov6211_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *format;
+ const struct ov6211_mode *mode;
+
+ format = v4l2_subdev_state_get_format(state, 0);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width,
+ fmt->format.height);
+
+ ov6211_update_pad_format(mode, &fmt->format);
+ *format = fmt->format;
+
+ return 0;
+}
+
+static int ov6211_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_Y8_1X8;
+
+ return 0;
+}
+
+static int ov6211_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_Y8_1X8)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static int ov6211_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_subdev_format fmt = {
+ .which = V4L2_SUBDEV_FORMAT_TRY,
+ .pad = 0,
+ .format = {
+ .code = MEDIA_BUS_FMT_Y8_1X8,
+ .width = supported_modes[0].width,
+ .height = supported_modes[0].height,
+ },
+ };
+
+ ov6211_set_pad_format(sd, state, &fmt);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov6211_video_ops = {
+ .s_stream = v4l2_subdev_s_stream_helper,
+};
+
+static const struct v4l2_subdev_pad_ops ov6211_pad_ops = {
+ .set_fmt = ov6211_set_pad_format,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .enum_mbus_code = ov6211_enum_mbus_code,
+ .enum_frame_size = ov6211_enum_frame_size,
+ .enable_streams = ov6211_enable_streams,
+ .disable_streams = ov6211_disable_streams,
+};
+
+static const struct v4l2_subdev_ops ov6211_subdev_ops = {
+ .video = &ov6211_video_ops,
+ .pad = &ov6211_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops ov6211_internal_ops = {
+ .init_state = ov6211_init_state,
+};
+
+static const struct media_entity_operations ov6211_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int ov6211_identify_sensor(struct ov6211 *ov6211)
+{
+ u64 val;
+ int ret;
+
+ ret = cci_read(ov6211->regmap, OV6211_REG_CHIP_ID, &val, NULL);
+ if (ret) {
+ dev_err(ov6211->dev, "failed to read chip id: %d\n", ret);
+ return ret;
+ }
+
+ if (val != OV6211_CHIP_ID) {
+ dev_err(ov6211->dev, "chip id mismatch: %x!=%llx\n",
+ OV6211_CHIP_ID, val);
+ return -ENODEV;
+ }
+
+ ret = cci_read(ov6211->regmap, OV6211_REG_PRE_ISP,
+ &ov6211->pre_isp, NULL);
+ if (ret)
+ dev_err(ov6211->dev, "failed to read pre_isp: %d\n", ret);
+
+ return ret;
+}
+
+static int ov6211_check_hwcfg(struct ov6211 *ov6211)
+{
+ struct fwnode_handle *fwnode = dev_fwnode(ov6211->dev), *ep;
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ unsigned long freq_bitmap;
+ int ret;
+
+ if (!fwnode)
+ return -ENODEV;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ ret = v4l2_link_freq_to_bitmap(ov6211->dev, bus_cfg.link_frequencies,
+ bus_cfg.nr_of_link_frequencies,
+ ov6211_link_freq_menu,
+ ARRAY_SIZE(ov6211_link_freq_menu),
+ &freq_bitmap);
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov6211_power_on(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+ int ret;
+
+ ret = regulator_bulk_enable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+ if (ret)
+ return ret;
+
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 0);
+ usleep_range(10 * USEC_PER_MSEC, 15 * USEC_PER_MSEC);
+
+ ret = clk_prepare_enable(ov6211->xvclk);
+ if (ret)
+ goto reset_gpio;
+
+ return 0;
+
+reset_gpio:
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 1);
+
+ regulator_bulk_disable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+
+ return ret;
+}
+
+static int ov6211_power_off(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+
+ clk_disable_unprepare(ov6211->xvclk);
+
+ gpiod_set_value_cansleep(ov6211->reset_gpio, 1);
+
+ regulator_bulk_disable(OV6211_NUM_SUPPLIES, ov6211->supplies);
+
+ return 0;
+}
+
+static int ov6211_probe(struct i2c_client *client)
+{
+ struct ov6211 *ov6211;
+ unsigned long freq;
+ unsigned int i;
+ int ret;
+
+ ov6211 = devm_kzalloc(&client->dev, sizeof(*ov6211), GFP_KERNEL);
+ if (!ov6211)
+ return -ENOMEM;
+
+ ov6211->dev = &client->dev;
+
+ v4l2_i2c_subdev_init(&ov6211->sd, client, &ov6211_subdev_ops);
+
+ ov6211->regmap = devm_cci_regmap_init_i2c(client, 16);
+ if (IS_ERR(ov6211->regmap))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->regmap),
+ "failed to init CCI\n");
+
+ ov6211->xvclk = devm_v4l2_sensor_clk_get(ov6211->dev, NULL);
+ if (IS_ERR(ov6211->xvclk))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->xvclk),
+ "failed to get XVCLK clock\n");
+
+ freq = clk_get_rate(ov6211->xvclk);
+ if (freq && freq != OV6211_MCLK_FREQ_24MHZ)
+ return dev_err_probe(ov6211->dev, -EINVAL,
+ "XVCLK clock frequency %lu is not supported\n",
+ freq);
+
+ ret = ov6211_check_hwcfg(ov6211);
+ if (ret)
+ return dev_err_probe(ov6211->dev, ret,
+ "failed to check HW configuration\n");
+
+ ov6211->reset_gpio = devm_gpiod_get_optional(ov6211->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ov6211->reset_gpio))
+ return dev_err_probe(ov6211->dev, PTR_ERR(ov6211->reset_gpio),
+ "cannot get reset GPIO\n");
+
+ for (i = 0; i < OV6211_NUM_SUPPLIES; i++)
+ ov6211->supplies[i].supply = ov6211_supply_names[i];
+
+ ret = devm_regulator_bulk_get(ov6211->dev, OV6211_NUM_SUPPLIES,
+ ov6211->supplies);
+ if (ret)
+ return dev_err_probe(ov6211->dev, ret,
+ "failed to get supply regulators\n");
+
+ /* The sensor must be powered on to read the CHIP_ID register */
+ ret = ov6211_power_on(ov6211->dev);
+ if (ret)
+ return ret;
+
+ ret = ov6211_identify_sensor(ov6211);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret, "failed to find sensor\n");
+ goto power_off;
+ }
+
+ ret = ov6211_init_controls(ov6211);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret, "failed to init controls\n");
+ goto power_off;
+ }
+
+ ov6211->sd.state_lock = ov6211->ctrl_handler.lock;
+ ov6211->sd.internal_ops = &ov6211_internal_ops;
+ ov6211->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov6211->sd.entity.ops = &ov6211_subdev_entity_ops;
+ ov6211->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ ov6211->pad.flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&ov6211->sd.entity, 1, &ov6211->pad);
+ if (ret) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to init media entity pads\n");
+ goto v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_subdev_init_finalize(&ov6211->sd);
+ if (ret < 0) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to init media entity pads\n");
+ goto media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(ov6211->dev);
+ pm_runtime_enable(ov6211->dev);
+
+ ret = v4l2_async_register_subdev_sensor(&ov6211->sd);
+ if (ret < 0) {
+ dev_err_probe(ov6211->dev, ret,
+ "failed to register V4L2 subdev\n");
+ goto subdev_cleanup;
+ }
+
+ /* Enable runtime PM and turn off the device */
+ pm_runtime_idle(ov6211->dev);
+ pm_runtime_set_autosuspend_delay(ov6211->dev, 1000);
+ pm_runtime_use_autosuspend(ov6211->dev);
+
+ return 0;
+
+subdev_cleanup:
+ v4l2_subdev_cleanup(&ov6211->sd);
+ pm_runtime_disable(ov6211->dev);
+ pm_runtime_set_suspended(ov6211->dev);
+
+media_entity_cleanup:
+ media_entity_cleanup(&ov6211->sd.entity);
+
+v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(ov6211->sd.ctrl_handler);
+
+power_off:
+ ov6211_power_off(ov6211->dev);
+
+ return ret;
+}
+
+static void ov6211_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov6211 *ov6211 = to_ov6211(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ v4l2_subdev_cleanup(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ pm_runtime_disable(ov6211->dev);
+
+ if (!pm_runtime_status_suspended(ov6211->dev)) {
+ ov6211_power_off(ov6211->dev);
+ pm_runtime_set_suspended(ov6211->dev);
+ }
+}
+
+static const struct dev_pm_ops ov6211_pm_ops = {
+ SET_RUNTIME_PM_OPS(ov6211_power_off, ov6211_power_on, NULL)
+};
+
+static const struct of_device_id ov6211_of_match[] = {
+ { .compatible = "ovti,ov6211" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ov6211_of_match);
+
+static struct i2c_driver ov6211_i2c_driver = {
+ .driver = {
+ .name = "ov6211",
+ .pm = &ov6211_pm_ops,
+ .of_match_table = ov6211_of_match,
+ },
+ .probe = ov6211_probe,
+ .remove = ov6211_remove,
+};
+
+module_i2c_driver(ov6211_i2c_driver);
+
+MODULE_AUTHOR("Vladimir Zapolskiy <vladimir.zapolskiy@linaro.org>");
+MODULE_DESCRIPTION("OmniVision OV6211 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/i2c/ov64a40.c b/drivers/media/i2c/ov64a40.c
index 2031cbd05c26..78b62c169b99 100644
--- a/drivers/media/i2c/ov64a40.c
+++ b/drivers/media/i2c/ov64a40.c
@@ -3546,7 +3546,7 @@ static int ov64a40_probe(struct i2c_client *client)
return PTR_ERR(ov64a40->cci);
}
- ov64a40->xclk = devm_clk_get(&client->dev, NULL);
+ ov64a40->xclk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(ov64a40->xclk))
return dev_err_probe(&client->dev, PTR_ERR(ov64a40->xclk),
"Failed to get clock\n");
diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
deleted file mode 100644
index 9c7627161142..000000000000
--- a/drivers/media/i2c/ov6650.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * V4L2 subdevice driver for OmniVision OV6650 Camera Sensor
- *
- * Copyright (C) 2010 Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
- *
- * Based on OmniVision OV96xx Camera Driver
- * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
- *
- * Based on ov772x camera driver:
- * Copyright (C) 2008 Renesas Solutions Corp.
- * Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * Based on ov7670 and soc_camera_platform driver,
- * Copyright 2006-7 Jonathan Corbet <corbet@lwn.net>
- * Copyright (C) 2008 Magnus Damm
- * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
- *
- * Hardware specific bits initially based on former work by Matt Callow
- * drivers/media/video/omap/sensor_ov6650.c
- * Copyright (C) 2006 Matt Callow
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/v4l2-mediabus.h>
-#include <linux/module.h>
-
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-device.h>
-
-/* Register definitions */
-#define REG_GAIN 0x00 /* range 00 - 3F */
-#define REG_BLUE 0x01
-#define REG_RED 0x02
-#define REG_SAT 0x03 /* [7:4] saturation [0:3] reserved */
-#define REG_HUE 0x04 /* [7:6] rsrvd [5] hue en [4:0] hue */
-
-#define REG_BRT 0x06
-
-#define REG_PIDH 0x0a
-#define REG_PIDL 0x0b
-
-#define REG_AECH 0x10
-#define REG_CLKRC 0x11 /* Data Format and Internal Clock */
- /* [7:6] Input system clock (MHz)*/
- /* 00=8, 01=12, 10=16, 11=24 */
- /* [5:0]: Internal Clock Pre-Scaler */
-#define REG_COMA 0x12 /* [7] Reset */
-#define REG_COMB 0x13
-#define REG_COMC 0x14
-#define REG_COMD 0x15
-#define REG_COML 0x16
-#define REG_HSTRT 0x17
-#define REG_HSTOP 0x18
-#define REG_VSTRT 0x19
-#define REG_VSTOP 0x1a
-#define REG_PSHFT 0x1b
-#define REG_MIDH 0x1c
-#define REG_MIDL 0x1d
-#define REG_HSYNS 0x1e
-#define REG_HSYNE 0x1f
-#define REG_COME 0x20
-#define REG_YOFF 0x21
-#define REG_UOFF 0x22
-#define REG_VOFF 0x23
-#define REG_AEW 0x24
-#define REG_AEB 0x25
-#define REG_COMF 0x26
-#define REG_COMG 0x27
-#define REG_COMH 0x28
-#define REG_COMI 0x29
-
-#define REG_FRARL 0x2b
-#define REG_COMJ 0x2c
-#define REG_COMK 0x2d
-#define REG_AVGY 0x2e
-#define REG_REF0 0x2f
-#define REG_REF1 0x30
-#define REG_REF2 0x31
-#define REG_FRAJH 0x32
-#define REG_FRAJL 0x33
-#define REG_FACT 0x34
-#define REG_L1AEC 0x35
-#define REG_AVGU 0x36
-#define REG_AVGV 0x37
-
-#define REG_SPCB 0x60
-#define REG_SPCC 0x61
-#define REG_GAM1 0x62
-#define REG_GAM2 0x63
-#define REG_GAM3 0x64
-#define REG_SPCD 0x65
-
-#define REG_SPCE 0x68
-#define REG_ADCL 0x69
-
-#define REG_RMCO 0x6c
-#define REG_GMCO 0x6d
-#define REG_BMCO 0x6e
-
-
-/* Register bits, values, etc. */
-#define OV6650_PIDH 0x66 /* high byte of product ID number */
-#define OV6650_PIDL 0x50 /* low byte of product ID number */
-#define OV6650_MIDH 0x7F /* high byte of mfg ID */
-#define OV6650_MIDL 0xA2 /* low byte of mfg ID */
-
-#define DEF_GAIN 0x00
-#define DEF_BLUE 0x80
-#define DEF_RED 0x80
-
-#define SAT_SHIFT 4
-#define SAT_MASK (0xf << SAT_SHIFT)
-#define SET_SAT(x) (((x) << SAT_SHIFT) & SAT_MASK)
-
-#define HUE_EN BIT(5)
-#define HUE_MASK 0x1f
-#define DEF_HUE 0x10
-#define SET_HUE(x) (HUE_EN | ((x) & HUE_MASK))
-
-#define DEF_AECH 0x4D
-
-#define CLKRC_8MHz 0x00
-#define CLKRC_12MHz 0x40
-#define CLKRC_16MHz 0x80
-#define CLKRC_24MHz 0xc0
-#define CLKRC_DIV_MASK 0x3f
-#define GET_CLKRC_DIV(x) (((x) & CLKRC_DIV_MASK) + 1)
-#define DEF_CLKRC 0x00
-
-#define COMA_RESET BIT(7)
-#define COMA_QCIF BIT(5)
-#define COMA_RAW_RGB BIT(4)
-#define COMA_RGB BIT(3)
-#define COMA_BW BIT(2)
-#define COMA_WORD_SWAP BIT(1)
-#define COMA_BYTE_SWAP BIT(0)
-#define DEF_COMA 0x00
-
-#define COMB_FLIP_V BIT(7)
-#define COMB_FLIP_H BIT(5)
-#define COMB_BAND_FILTER BIT(4)
-#define COMB_AWB BIT(2)
-#define COMB_AGC BIT(1)
-#define COMB_AEC BIT(0)
-#define DEF_COMB 0x5f
-
-#define COML_ONE_CHANNEL BIT(7)
-
-#define DEF_HSTRT 0x24
-#define DEF_HSTOP 0xd4
-#define DEF_VSTRT 0x04
-#define DEF_VSTOP 0x94
-
-#define COMF_HREF_LOW BIT(4)
-
-#define COMJ_PCLK_RISING BIT(4)
-#define COMJ_VSYNC_HIGH BIT(0)
-
-/* supported resolutions */
-#define W_QCIF (DEF_HSTOP - DEF_HSTRT)
-#define W_CIF (W_QCIF << 1)
-#define H_QCIF (DEF_VSTOP - DEF_VSTRT)
-#define H_CIF (H_QCIF << 1)
-
-#define FRAME_RATE_MAX 30
-
-
-struct ov6650_reg {
- u8 reg;
- u8 val;
-};
-
-struct ov6650 {
- struct v4l2_subdev subdev;
- struct v4l2_ctrl_handler hdl;
- struct {
- /* exposure/autoexposure cluster */
- struct v4l2_ctrl *autoexposure;
- struct v4l2_ctrl *exposure;
- };
- struct {
- /* gain/autogain cluster */
- struct v4l2_ctrl *autogain;
- struct v4l2_ctrl *gain;
- };
- struct {
- /* blue/red/autowhitebalance cluster */
- struct v4l2_ctrl *autowb;
- struct v4l2_ctrl *blue;
- struct v4l2_ctrl *red;
- };
- struct clk *clk;
- bool half_scale; /* scale down output by 2 */
- struct v4l2_rect rect; /* sensor cropping window */
- struct v4l2_fract tpf; /* as requested with set_frame_interval */
- u32 code;
-};
-
-struct ov6650_xclk {
- unsigned long rate;
- u8 clkrc;
-};
-
-static const struct ov6650_xclk ov6650_xclk[] = {
-{
- .rate = 8000000,
- .clkrc = CLKRC_8MHz,
-},
-{
- .rate = 12000000,
- .clkrc = CLKRC_12MHz,
-},
-{
- .rate = 16000000,
- .clkrc = CLKRC_16MHz,
-},
-{
- .rate = 24000000,
- .clkrc = CLKRC_24MHz,
-},
-};
-
-static u32 ov6650_codes[] = {
- MEDIA_BUS_FMT_YUYV8_2X8,
- MEDIA_BUS_FMT_UYVY8_2X8,
- MEDIA_BUS_FMT_YVYU8_2X8,
- MEDIA_BUS_FMT_VYUY8_2X8,
- MEDIA_BUS_FMT_SBGGR8_1X8,
- MEDIA_BUS_FMT_Y8_1X8,
-};
-
-static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
- .width = W_CIF,
- .height = H_CIF,
- .code = MEDIA_BUS_FMT_SBGGR8_1X8,
- .colorspace = V4L2_COLORSPACE_SRGB,
- .field = V4L2_FIELD_NONE,
- .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
- .quantization = V4L2_QUANTIZATION_DEFAULT,
- .xfer_func = V4L2_XFER_FUNC_DEFAULT,
-};
-
-/* read a register */
-static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
-{
- int ret;
- u8 data = reg;
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = 0,
- .len = 1,
- .buf = &data,
- };
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0)
- goto err;
-
- msg.flags = I2C_M_RD;
- ret = i2c_transfer(client->adapter, &msg, 1);
- if (ret < 0)
- goto err;
-
- *val = data;
- return 0;
-
-err:
- dev_err(&client->dev, "Failed reading register 0x%02x!\n", reg);
- return ret;
-}
-
-/* write a register */
-static int ov6650_reg_write(struct i2c_client *client, u8 reg, u8 val)
-{
- int ret;
- unsigned char data[2] = { reg, val };
- struct i2c_msg msg = {
- .addr = client->addr,
- .flags = 0,
- .len = 2,
- .buf = data,
- };
-
- ret = i2c_transfer(client->adapter, &msg, 1);
- udelay(100);
-
- if (ret < 0) {
- dev_err(&client->dev, "Failed writing register 0x%02x!\n", reg);
- return ret;
- }
- return 0;
-}
-
-
-/* Read a register, alter its bits, write it back */
-static int ov6650_reg_rmw(struct i2c_client *client, u8 reg, u8 set, u8 mask)
-{
- u8 val;
- int ret;
-
- ret = ov6650_reg_read(client, reg, &val);
- if (ret) {
- dev_err(&client->dev,
- "[Read]-Modify-Write of register 0x%02x failed!\n",
- reg);
- return ret;
- }
-
- val &= ~mask;
- val |= set;
-
- ret = ov6650_reg_write(client, reg, val);
- if (ret)
- dev_err(&client->dev,
- "Read-Modify-[Write] of register 0x%02x failed!\n",
- reg);
-
- return ret;
-}
-
-static struct ov6650 *to_ov6650(const struct i2c_client *client)
-{
- return container_of(i2c_get_clientdata(client), struct ov6650, subdev);
-}
-
-/* Start/Stop streaming from the device */
-static int ov6650_s_stream(struct v4l2_subdev *sd, int enable)
-{
- return 0;
-}
-
-/* Get status of additional camera capabilities */
-static int ov6550_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
- struct v4l2_subdev *sd = &priv->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- uint8_t reg, reg2;
- int ret;
-
- switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- ret = ov6650_reg_read(client, REG_GAIN, &reg);
- if (!ret)
- priv->gain->val = reg;
- return ret;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = ov6650_reg_read(client, REG_BLUE, &reg);
- if (!ret)
- ret = ov6650_reg_read(client, REG_RED, &reg2);
- if (!ret) {
- priv->blue->val = reg;
- priv->red->val = reg2;
- }
- return ret;
- case V4L2_CID_EXPOSURE_AUTO:
- ret = ov6650_reg_read(client, REG_AECH, &reg);
- if (!ret)
- priv->exposure->val = reg;
- return ret;
- }
- return -EINVAL;
-}
-
-/* Set status of additional camera capabilities */
-static int ov6550_s_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct ov6650 *priv = container_of(ctrl->handler, struct ov6650, hdl);
- struct v4l2_subdev *sd = &priv->subdev;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
-
- switch (ctrl->id) {
- case V4L2_CID_AUTOGAIN:
- ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_AGC : 0, COMB_AGC);
- if (!ret && !ctrl->val)
- ret = ov6650_reg_write(client, REG_GAIN, priv->gain->val);
- return ret;
- case V4L2_CID_AUTO_WHITE_BALANCE:
- ret = ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_AWB : 0, COMB_AWB);
- if (!ret && !ctrl->val) {
- ret = ov6650_reg_write(client, REG_BLUE, priv->blue->val);
- if (!ret)
- ret = ov6650_reg_write(client, REG_RED,
- priv->red->val);
- }
- return ret;
- case V4L2_CID_SATURATION:
- return ov6650_reg_rmw(client, REG_SAT, SET_SAT(ctrl->val),
- SAT_MASK);
- case V4L2_CID_HUE:
- return ov6650_reg_rmw(client, REG_HUE, SET_HUE(ctrl->val),
- HUE_MASK);
- case V4L2_CID_BRIGHTNESS:
- return ov6650_reg_write(client, REG_BRT, ctrl->val);
- case V4L2_CID_EXPOSURE_AUTO:
- ret = ov6650_reg_rmw(client, REG_COMB, ctrl->val ==
- V4L2_EXPOSURE_AUTO ? COMB_AEC : 0, COMB_AEC);
- if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL)
- ret = ov6650_reg_write(client, REG_AECH,
- priv->exposure->val);
- return ret;
- case V4L2_CID_GAMMA:
- return ov6650_reg_write(client, REG_GAM1, ctrl->val);
- case V4L2_CID_VFLIP:
- return ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_FLIP_V : 0, COMB_FLIP_V);
- case V4L2_CID_HFLIP:
- return ov6650_reg_rmw(client, REG_COMB,
- ctrl->val ? COMB_FLIP_H : 0, COMB_FLIP_H);
- }
-
- return -EINVAL;
-}
-
-#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int ov6650_get_register(struct v4l2_subdev *sd,
- struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
- u8 val;
-
- if (reg->reg & ~0xff)
- return -EINVAL;
-
- reg->size = 1;
-
- ret = ov6650_reg_read(client, reg->reg, &val);
- if (!ret)
- reg->val = (__u64)val;
-
- return ret;
-}
-
-static int ov6650_set_register(struct v4l2_subdev *sd,
- const struct v4l2_dbg_register *reg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (reg->reg & ~0xff || reg->val & ~0xff)
- return -EINVAL;
-
- return ov6650_reg_write(client, reg->reg, reg->val);
-}
-#endif
-
-static int ov6650_s_power(struct v4l2_subdev *sd, int on)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- int ret = 0;
-
- if (on)
- ret = clk_prepare_enable(priv->clk);
- else
- clk_disable_unprepare(priv->clk);
-
- return ret;
-}
-
-static int ov6650_get_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect *rect;
-
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- /* pre-select try crop rectangle */
- rect = v4l2_subdev_state_get_crop(sd_state, 0);
-
- } else {
- /* pre-select active crop rectangle */
- rect = &priv->rect;
- }
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.left = DEF_HSTRT << 1;
- sel->r.top = DEF_VSTRT << 1;
- sel->r.width = W_CIF;
- sel->r.height = H_CIF;
- return 0;
-
- case V4L2_SEL_TGT_CROP:
- /* use selected crop rectangle */
- sel->r = *rect;
- return 0;
-
- default:
- return -EINVAL;
- }
-}
-
-static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect)
-{
- return width > rect->width >> 1 || height > rect->height >> 1;
-}
-
-static void ov6650_bind_align_crop_rectangle(struct v4l2_rect *rect)
-{
- v4l_bound_align_image(&rect->width, 2, W_CIF, 1,
- &rect->height, 2, H_CIF, 1, 0);
- v4l_bound_align_image(&rect->left, DEF_HSTRT << 1,
- (DEF_HSTRT << 1) + W_CIF - (__s32)rect->width, 1,
- &rect->top, DEF_VSTRT << 1,
- (DEF_VSTRT << 1) + H_CIF - (__s32)rect->height,
- 1, 0);
-}
-
-static int ov6650_set_selection(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_selection *sel)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- int ret;
-
- if (sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- ov6650_bind_align_crop_rectangle(&sel->r);
-
- if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_rect *crop =
- v4l2_subdev_state_get_crop(sd_state, 0);
- struct v4l2_mbus_framefmt *mf =
- v4l2_subdev_state_get_format(sd_state, 0);
- /* detect current pad config scaling factor */
- bool half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
-
- /* store new crop rectangle */
- *crop = sel->r;
-
- /* adjust frame size */
- mf->width = crop->width >> half_scale;
- mf->height = crop->height >> half_scale;
-
- return 0;
- }
-
- /* V4L2_SUBDEV_FORMAT_ACTIVE */
-
- /* apply new crop rectangle */
- ret = ov6650_reg_write(client, REG_HSTRT, sel->r.left >> 1);
- if (!ret) {
- priv->rect.width += priv->rect.left - sel->r.left;
- priv->rect.left = sel->r.left;
- ret = ov6650_reg_write(client, REG_HSTOP,
- (sel->r.left + sel->r.width) >> 1);
- }
- if (!ret) {
- priv->rect.width = sel->r.width;
- ret = ov6650_reg_write(client, REG_VSTRT, sel->r.top >> 1);
- }
- if (!ret) {
- priv->rect.height += priv->rect.top - sel->r.top;
- priv->rect.top = sel->r.top;
- ret = ov6650_reg_write(client, REG_VSTOP,
- (sel->r.top + sel->r.height) >> 1);
- }
- if (!ret)
- priv->rect.height = sel->r.height;
-
- return ret;
-}
-
-static int ov6650_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
-
- if (format->pad)
- return -EINVAL;
-
- /* initialize response with default media bus frame format */
- *mf = ov6650_def_fmt;
-
- /* update media bus format code and frame size */
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_state_get_format(sd_state, 0);
-
- mf->width = try_fmt->width;
- mf->height = try_fmt->height;
- mf->code = try_fmt->code;
-
- } else {
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- }
- return 0;
-}
-
-#define to_clkrc(div) ((div) - 1)
-
-/* set the format we will capture in */
-static int ov6650_s_fmt(struct v4l2_subdev *sd, u32 code, bool half_scale)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- u8 coma_set = 0, coma_mask = 0, coml_set, coml_mask;
- int ret;
-
- /* select color matrix configuration for given color encoding */
- switch (code) {
- case MEDIA_BUS_FMT_Y8_1X8:
- dev_dbg(&client->dev, "pixel format GREY8_1X8\n");
- coma_mask |= COMA_RGB | COMA_WORD_SWAP | COMA_BYTE_SWAP;
- coma_set |= COMA_BW;
- break;
- case MEDIA_BUS_FMT_YUYV8_2X8:
- dev_dbg(&client->dev, "pixel format YUYV8_2X8_LE\n");
- coma_mask |= COMA_RGB | COMA_BW | COMA_BYTE_SWAP;
- coma_set |= COMA_WORD_SWAP;
- break;
- case MEDIA_BUS_FMT_YVYU8_2X8:
- dev_dbg(&client->dev, "pixel format YVYU8_2X8_LE (untested)\n");
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP |
- COMA_BYTE_SWAP;
- break;
- case MEDIA_BUS_FMT_UYVY8_2X8:
- dev_dbg(&client->dev, "pixel format YUYV8_2X8_BE\n");
- if (half_scale) {
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
- coma_set |= COMA_BYTE_SWAP;
- } else {
- coma_mask |= COMA_RGB | COMA_BW;
- coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
- }
- break;
- case MEDIA_BUS_FMT_VYUY8_2X8:
- dev_dbg(&client->dev, "pixel format YVYU8_2X8_BE (untested)\n");
- if (half_scale) {
- coma_mask |= COMA_RGB | COMA_BW;
- coma_set |= COMA_BYTE_SWAP | COMA_WORD_SWAP;
- } else {
- coma_mask |= COMA_RGB | COMA_BW | COMA_WORD_SWAP;
- coma_set |= COMA_BYTE_SWAP;
- }
- break;
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- dev_dbg(&client->dev, "pixel format SBGGR8_1X8 (untested)\n");
- coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP;
- coma_set |= COMA_RAW_RGB | COMA_RGB;
- break;
- default:
- dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code);
- return -EINVAL;
- }
-
- if (code == MEDIA_BUS_FMT_Y8_1X8 ||
- code == MEDIA_BUS_FMT_SBGGR8_1X8) {
- coml_mask = COML_ONE_CHANNEL;
- coml_set = 0;
- } else {
- coml_mask = 0;
- coml_set = COML_ONE_CHANNEL;
- }
-
- if (half_scale) {
- dev_dbg(&client->dev, "max resolution: QCIF\n");
- coma_set |= COMA_QCIF;
- } else {
- dev_dbg(&client->dev, "max resolution: CIF\n");
- coma_mask |= COMA_QCIF;
- }
-
- ret = ov6650_reg_rmw(client, REG_COMA, coma_set, coma_mask);
- if (!ret) {
- priv->half_scale = half_scale;
-
- ret = ov6650_reg_rmw(client, REG_COML, coml_set, coml_mask);
- }
- if (!ret)
- priv->code = code;
-
- return ret;
-}
-
-static int ov6650_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *format)
-{
- struct v4l2_mbus_framefmt *mf = &format->format;
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_rect *crop;
- bool half_scale;
-
- if (format->pad)
- return -EINVAL;
-
- switch (mf->code) {
- case MEDIA_BUS_FMT_Y10_1X10:
- mf->code = MEDIA_BUS_FMT_Y8_1X8;
- fallthrough;
- case MEDIA_BUS_FMT_Y8_1X8:
- case MEDIA_BUS_FMT_YVYU8_2X8:
- case MEDIA_BUS_FMT_YUYV8_2X8:
- case MEDIA_BUS_FMT_VYUY8_2X8:
- case MEDIA_BUS_FMT_UYVY8_2X8:
- break;
- default:
- mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
- fallthrough;
- case MEDIA_BUS_FMT_SBGGR8_1X8:
- break;
- }
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY)
- crop = v4l2_subdev_state_get_crop(sd_state, 0);
- else
- crop = &priv->rect;
-
- half_scale = !is_unscaled_ok(mf->width, mf->height, crop);
-
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *try_fmt =
- v4l2_subdev_state_get_format(sd_state, 0);
-
- /* store new mbus frame format code and size in pad config */
- try_fmt->width = crop->width >> half_scale;
- try_fmt->height = crop->height >> half_scale;
- try_fmt->code = mf->code;
-
- /* return default mbus frame format updated with pad config */
- *mf = ov6650_def_fmt;
- mf->width = try_fmt->width;
- mf->height = try_fmt->height;
- mf->code = try_fmt->code;
-
- } else {
- int ret = 0;
-
- /* apply new media bus frame format and scaling if changed */
- if (mf->code != priv->code || half_scale != priv->half_scale)
- ret = ov6650_s_fmt(sd, mf->code, half_scale);
- if (ret)
- return ret;
-
- /* return default format updated with active size and code */
- *mf = ov6650_def_fmt;
- mf->width = priv->rect.width >> priv->half_scale;
- mf->height = priv->rect.height >> priv->half_scale;
- mf->code = priv->code;
- }
- return 0;
-}
-
-static int ov6650_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_mbus_code_enum *code)
-{
- if (code->pad || code->index >= ARRAY_SIZE(ov6650_codes))
- return -EINVAL;
-
- code->code = ov6650_codes[code->index];
- return 0;
-}
-
-static int ov6650_enum_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval_enum *fie)
-{
- int i;
-
- /* enumerate supported frame intervals not exceeding 1 second */
- if (fie->index > CLKRC_DIV_MASK ||
- GET_CLKRC_DIV(fie->index) > FRAME_RATE_MAX)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(ov6650_codes); i++)
- if (fie->code == ov6650_codes[i])
- break;
- if (i == ARRAY_SIZE(ov6650_codes))
- return -EINVAL;
-
- if (!fie->width || fie->width > W_CIF ||
- !fie->height || fie->height > H_CIF)
- return -EINVAL;
-
- fie->interval.numerator = GET_CLKRC_DIV(fie->index);
- fie->interval.denominator = FRAME_RATE_MAX;
-
- return 0;
-}
-
-static int ov6650_get_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval *ival)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
- if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- ival->interval = priv->tpf;
-
- dev_dbg(&client->dev, "Frame interval: %u/%u s\n",
- ival->interval.numerator, ival->interval.denominator);
-
- return 0;
-}
-
-static int ov6650_set_frame_interval(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_frame_interval *ival)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- struct v4l2_fract *tpf = &ival->interval;
- int div, ret;
-
- /*
- * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
- * subdev active state API.
- */
- if (ival->which != V4L2_SUBDEV_FORMAT_ACTIVE)
- return -EINVAL;
-
- if (tpf->numerator == 0 || tpf->denominator == 0)
- div = 1; /* Reset to full rate */
- else
- div = (tpf->numerator * FRAME_RATE_MAX) / tpf->denominator;
-
- if (div == 0)
- div = 1;
- else if (div > GET_CLKRC_DIV(CLKRC_DIV_MASK))
- div = GET_CLKRC_DIV(CLKRC_DIV_MASK);
-
- ret = ov6650_reg_rmw(client, REG_CLKRC, to_clkrc(div), CLKRC_DIV_MASK);
- if (!ret) {
- priv->tpf.numerator = div;
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- *tpf = priv->tpf;
- }
-
- return ret;
-}
-
-/* Soft reset the camera. This has nothing to do with the RESET pin! */
-static int ov6650_reset(struct i2c_client *client)
-{
- int ret;
-
- dev_dbg(&client->dev, "reset\n");
-
- ret = ov6650_reg_rmw(client, REG_COMA, COMA_RESET, 0);
- if (ret)
- dev_err(&client->dev,
- "An error occurred while entering soft reset!\n");
-
- return ret;
-}
-
-/* program default register values */
-static int ov6650_prog_dflt(struct i2c_client *client, u8 clkrc)
-{
- int ret;
-
- dev_dbg(&client->dev, "initializing\n");
-
- ret = ov6650_reg_write(client, REG_COMA, 0); /* ~COMA_RESET */
- if (!ret)
- ret = ov6650_reg_write(client, REG_CLKRC, clkrc);
- if (!ret)
- ret = ov6650_reg_rmw(client, REG_COMB, 0, COMB_BAND_FILTER);
-
- return ret;
-}
-
-static int ov6650_video_probe(struct v4l2_subdev *sd)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- struct ov6650 *priv = to_ov6650(client);
- const struct ov6650_xclk *xclk = NULL;
- unsigned long rate;
- u8 pidh, pidl, midh, midl;
- int i, ret = 0;
-
- priv->clk = devm_clk_get(&client->dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&client->dev, "clk request err: %d\n", ret);
- return ret;
- }
-
- rate = clk_get_rate(priv->clk);
- for (i = 0; rate && i < ARRAY_SIZE(ov6650_xclk); i++) {
- if (rate != ov6650_xclk[i].rate)
- continue;
-
- xclk = &ov6650_xclk[i];
- dev_info(&client->dev, "using host default clock rate %lukHz\n",
- rate / 1000);
- break;
- }
- for (i = 0; !xclk && i < ARRAY_SIZE(ov6650_xclk); i++) {
- ret = clk_set_rate(priv->clk, ov6650_xclk[i].rate);
- if (ret || clk_get_rate(priv->clk) != ov6650_xclk[i].rate)
- continue;
-
- xclk = &ov6650_xclk[i];
- dev_info(&client->dev, "using negotiated clock rate %lukHz\n",
- xclk->rate / 1000);
- break;
- }
- if (!xclk) {
- dev_err(&client->dev, "unable to get supported clock rate\n");
- if (!ret)
- ret = -EINVAL;
- return ret;
- }
-
- ret = ov6650_s_power(sd, 1);
- if (ret < 0)
- return ret;
-
- msleep(20);
-
- /*
- * check and show product ID and manufacturer ID
- */
- ret = ov6650_reg_read(client, REG_PIDH, &pidh);
- if (!ret)
- ret = ov6650_reg_read(client, REG_PIDL, &pidl);
- if (!ret)
- ret = ov6650_reg_read(client, REG_MIDH, &midh);
- if (!ret)
- ret = ov6650_reg_read(client, REG_MIDL, &midl);
-
- if (ret)
- goto done;
-
- if ((pidh != OV6650_PIDH) || (pidl != OV6650_PIDL)) {
- dev_err(&client->dev, "Product ID error 0x%02x:0x%02x\n",
- pidh, pidl);
- ret = -ENODEV;
- goto done;
- }
-
- dev_info(&client->dev,
- "ov6650 Product ID 0x%02x:0x%02x Manufacturer ID 0x%02x:0x%02x\n",
- pidh, pidl, midh, midl);
-
- ret = ov6650_reset(client);
- if (!ret)
- ret = ov6650_prog_dflt(client, xclk->clkrc);
- if (!ret) {
- /* driver default frame format, no scaling */
- ret = ov6650_s_fmt(sd, ov6650_def_fmt.code, false);
- }
- if (!ret)
- ret = v4l2_ctrl_handler_setup(&priv->hdl);
-
-done:
- ov6650_s_power(sd, 0);
- return ret;
-}
-
-static const struct v4l2_ctrl_ops ov6550_ctrl_ops = {
- .g_volatile_ctrl = ov6550_g_volatile_ctrl,
- .s_ctrl = ov6550_s_ctrl,
-};
-
-static const struct v4l2_subdev_core_ops ov6650_core_ops = {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
- .g_register = ov6650_get_register,
- .s_register = ov6650_set_register,
-#endif
- .s_power = ov6650_s_power,
-};
-
-/* Request bus settings on camera side */
-static int ov6650_get_mbus_config(struct v4l2_subdev *sd,
- unsigned int pad,
- struct v4l2_mbus_config *cfg)
-{
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- u8 comj, comf;
- int ret;
-
- ret = ov6650_reg_read(client, REG_COMJ, &comj);
- if (ret)
- return ret;
-
- ret = ov6650_reg_read(client, REG_COMF, &comf);
- if (ret)
- return ret;
-
- cfg->type = V4L2_MBUS_PARALLEL;
-
- cfg->bus.parallel.flags = V4L2_MBUS_MASTER | V4L2_MBUS_DATA_ACTIVE_HIGH
- | ((comj & COMJ_VSYNC_HIGH) ? V4L2_MBUS_VSYNC_ACTIVE_HIGH
- : V4L2_MBUS_VSYNC_ACTIVE_LOW)
- | ((comf & COMF_HREF_LOW) ? V4L2_MBUS_HSYNC_ACTIVE_LOW
- : V4L2_MBUS_HSYNC_ACTIVE_HIGH)
- | ((comj & COMJ_PCLK_RISING) ? V4L2_MBUS_PCLK_SAMPLE_RISING
- : V4L2_MBUS_PCLK_SAMPLE_FALLING);
- return 0;
-}
-
-static const struct v4l2_subdev_video_ops ov6650_video_ops = {
- .s_stream = ov6650_s_stream,
-};
-
-static const struct v4l2_subdev_pad_ops ov6650_pad_ops = {
- .enum_mbus_code = ov6650_enum_mbus_code,
- .enum_frame_interval = ov6650_enum_frame_interval,
- .get_selection = ov6650_get_selection,
- .set_selection = ov6650_set_selection,
- .get_fmt = ov6650_get_fmt,
- .set_fmt = ov6650_set_fmt,
- .get_frame_interval = ov6650_get_frame_interval,
- .set_frame_interval = ov6650_set_frame_interval,
- .get_mbus_config = ov6650_get_mbus_config,
-};
-
-static const struct v4l2_subdev_ops ov6650_subdev_ops = {
- .core = &ov6650_core_ops,
- .video = &ov6650_video_ops,
- .pad = &ov6650_pad_ops,
-};
-
-static const struct v4l2_subdev_internal_ops ov6650_internal_ops = {
- .registered = ov6650_video_probe,
-};
-
-/*
- * i2c_driver function
- */
-static int ov6650_probe(struct i2c_client *client)
-{
- struct ov6650 *priv;
- int ret;
-
- priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- v4l2_i2c_subdev_init(&priv->subdev, client, &ov6650_subdev_ops);
- v4l2_ctrl_handler_init(&priv->hdl, 13);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_HFLIP, 0, 1, 1, 0);
- priv->autogain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
- priv->gain = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_GAIN, 0, 0x3f, 1, DEF_GAIN);
- priv->autowb = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
- priv->blue = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_BLUE_BALANCE, 0, 0xff, 1, DEF_BLUE);
- priv->red = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_RED_BALANCE, 0, 0xff, 1, DEF_RED);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_SATURATION, 0, 0xf, 1, 0x8);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_HUE, 0, HUE_MASK, 1, DEF_HUE);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_BRIGHTNESS, 0, 0xff, 1, 0x80);
- priv->autoexposure = v4l2_ctrl_new_std_menu(&priv->hdl,
- &ov6550_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
- V4L2_EXPOSURE_MANUAL, 0, V4L2_EXPOSURE_AUTO);
- priv->exposure = v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_EXPOSURE, 0, 0xff, 1, DEF_AECH);
- v4l2_ctrl_new_std(&priv->hdl, &ov6550_ctrl_ops,
- V4L2_CID_GAMMA, 0, 0xff, 1, 0x12);
-
- priv->subdev.ctrl_handler = &priv->hdl;
- if (priv->hdl.error) {
- ret = priv->hdl.error;
- goto ectlhdlfree;
- }
-
- v4l2_ctrl_auto_cluster(2, &priv->autogain, 0, true);
- v4l2_ctrl_auto_cluster(3, &priv->autowb, 0, true);
- v4l2_ctrl_auto_cluster(2, &priv->autoexposure,
- V4L2_EXPOSURE_MANUAL, true);
-
- priv->rect.left = DEF_HSTRT << 1;
- priv->rect.top = DEF_VSTRT << 1;
- priv->rect.width = W_CIF;
- priv->rect.height = H_CIF;
-
- /* Hardware default frame interval */
- priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
- priv->tpf.denominator = FRAME_RATE_MAX;
-
- priv->subdev.internal_ops = &ov6650_internal_ops;
-
- ret = v4l2_async_register_subdev(&priv->subdev);
- if (!ret)
- return 0;
-ectlhdlfree:
- v4l2_ctrl_handler_free(&priv->hdl);
-
- return ret;
-}
-
-static void ov6650_remove(struct i2c_client *client)
-{
- struct ov6650 *priv = to_ov6650(client);
-
- v4l2_async_unregister_subdev(&priv->subdev);
- v4l2_ctrl_handler_free(&priv->hdl);
-}
-
-static const struct i2c_device_id ov6650_id[] = {
- { "ov6650" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, ov6650_id);
-
-static struct i2c_driver ov6650_i2c_driver = {
- .driver = {
- .name = "ov6650",
- },
- .probe = ov6650_probe,
- .remove = ov6650_remove,
- .id_table = ov6650_id,
-};
-
-module_i2c_driver(ov6650_i2c_driver);
-
-MODULE_DESCRIPTION("V4L2 subdevice driver for OmniVision OV6650 camera sensor");
-MODULE_AUTHOR("Janusz Krzysztofik <jmkrzyszt@gmail.com");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov7251.c b/drivers/media/i2c/ov7251.c
index 31a42d81e970..27afc3fc0175 100644
--- a/drivers/media/i2c/ov7251.c
+++ b/drivers/media/i2c/ov7251.c
@@ -1630,7 +1630,6 @@ static int ov7251_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ov7251 *ov7251;
- unsigned int rate = 0, clk_rate = 0;
int ret;
int i;
@@ -1646,33 +1645,12 @@ static int ov7251_probe(struct i2c_client *client)
return ret;
/* get system clock (xclk) */
- ov7251->xclk = devm_clk_get_optional(dev, NULL);
+ ov7251->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(ov7251->xclk))
return dev_err_probe(dev, PTR_ERR(ov7251->xclk),
"could not get xclk");
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either DT or
- * ACPI. We also need to support the IPU3 case which will have both an
- * external clock AND a clock-frequency property.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (ret && !ov7251->xclk)
- return dev_err_probe(dev, ret, "invalid clock config\n");
-
- clk_rate = clk_get_rate(ov7251->xclk);
- ov7251->xclk_freq = clk_rate ? clk_rate : rate;
-
- if (ov7251->xclk_freq == 0)
- return dev_err_probe(dev, -EINVAL, "invalid clock frequency\n");
-
- if (!ret && ov7251->xclk) {
- ret = clk_set_rate(ov7251->xclk, rate);
- if (ret)
- return dev_err_probe(dev, ret,
- "failed to set clock rate\n");
- }
+ ov7251->xclk_freq = clk_get_rate(ov7251->xclk);
for (i = 0; i < ARRAY_SIZE(supported_xclk_rates); i++)
if (ov7251->xclk_freq == supported_xclk_rates[i])
diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
index 1f1c0de8e510..632fb80469be 100644
--- a/drivers/media/i2c/ov7740.c
+++ b/drivers/media/i2c/ov7740.c
@@ -1036,13 +1036,10 @@ static int ov7740_probe(struct i2c_client *client)
if (!ov7740)
return -ENOMEM;
- ov7740->xvclk = devm_clk_get(&client->dev, "xvclk");
- if (IS_ERR(ov7740->xvclk)) {
- ret = PTR_ERR(ov7740->xvclk);
- dev_err(&client->dev,
- "OV7740: fail to get xvclk: %d\n", ret);
- return ret;
- }
+ ov7740->xvclk = devm_v4l2_sensor_clk_get(&client->dev, "xvclk");
+ if (IS_ERR(ov7740->xvclk))
+ return dev_err_probe(&client->dev, PTR_ERR(ov7740->xvclk),
+ "OV7740: fail to get xvclk\n");
ret = ov7740_probe_dt(client, ov7740);
if (ret)
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index 4b6874d2a104..e2998cfa0d18 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -10,6 +9,8 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -1414,6 +1415,8 @@ static const struct ov8856_reg_list bayer_offset_configs[] = {
};
struct ov8856 {
+ struct device *dev;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -1668,7 +1671,6 @@ static int ov8856_write_reg(struct ov8856 *ov8856, u16 reg, u16 len, u32 val)
static int ov8856_write_reg_list(struct ov8856 *ov8856,
const struct ov8856_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
unsigned int i;
int ret;
@@ -1676,7 +1678,7 @@ static int ov8856_write_reg_list(struct ov8856 *ov8856,
ret = ov8856_write_reg(ov8856, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov8856->dev,
"failed to write reg 0x%4.4x. error = %d",
r_list->regs[i].address, ret);
return ret;
@@ -1688,7 +1690,6 @@ static int ov8856_write_reg_list(struct ov8856 *ov8856,
static int ov8856_identify_module(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
int ret;
u32 val;
@@ -1701,7 +1702,7 @@ static int ov8856_identify_module(struct ov8856 *ov8856)
return ret;
if (val != OV8856_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov8856->dev, "chip id mismatch: %x!=%x",
OV8856_CHIP_ID, val);
return -ENXIO;
}
@@ -1818,7 +1819,6 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov8856 *ov8856 = container_of(ctrl->handler,
struct ov8856, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
s64 exposure_max;
int ret = 0;
@@ -1834,7 +1834,7 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov8856->dev))
return 0;
switch (ctrl->id) {
@@ -1876,7 +1876,7 @@ static int ov8856_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
return ret;
}
@@ -1979,7 +1979,6 @@ static void ov8856_update_pad_format(struct ov8856 *ov8856,
static int ov8856_start_streaming(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
const struct ov8856_reg_list *reg_list;
int link_freq_index, ret;
@@ -1992,21 +1991,21 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov8856->dev, "failed to set plls");
return ret;
}
reg_list = &ov8856->cur_mode->reg_list;
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov8856->dev, "failed to set mode");
return ret;
}
reg_list = &bayer_offset_configs[ov8856->cur_mbus_index];
ret = ov8856_write_reg_list(ov8856, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mbus format");
+ dev_err(ov8856->dev, "failed to set mbus format");
return ret;
}
@@ -2017,7 +2016,7 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
if (ret) {
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov8856->dev, "failed to set stream");
return ret;
}
@@ -2026,22 +2025,19 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
static void ov8856_stop_streaming(struct ov8856 *ov8856)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
-
if (ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY))
- dev_err(&client->dev, "failed to set stream");
+ dev_err(ov8856->dev, "failed to set stream");
}
static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov8856 *ov8856 = to_ov8856(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov8856->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov8856->dev);
if (ret < 0) {
mutex_unlock(&ov8856->mutex);
return ret;
@@ -2051,11 +2047,11 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov8856_stop_streaming(ov8856);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
}
} else {
ov8856_stop_streaming(ov8856);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov8856->dev);
}
mutex_unlock(&ov8856->mutex);
@@ -2255,8 +2251,9 @@ static const struct v4l2_subdev_internal_ops ov8856_internal_ops = {
};
-static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
+static int ov8856_get_hwcfg(struct ov8856 *ov8856)
{
+ struct device *dev = ov8856->dev;
struct fwnode_handle *ep;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct v4l2_fwnode_endpoint bus_cfg = {
@@ -2269,21 +2266,17 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &xvclk_rate);
- if (ret)
- return ret;
-
- if (!is_acpi_node(fwnode)) {
- ov8856->xvclk = devm_clk_get(dev, "xvclk");
- if (IS_ERR(ov8856->xvclk)) {
- dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
- "could not get xvclk clock\n");
- return PTR_ERR(ov8856->xvclk);
- }
+ ov8856->xvclk = devm_v4l2_sensor_clk_get_legacy(dev, "xvclk", false, 0);
+ if (IS_ERR(ov8856->xvclk))
+ return dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
+ "could not get xvclk clock\n");
- clk_set_rate(ov8856->xvclk, xvclk_rate);
- xvclk_rate = clk_get_rate(ov8856->xvclk);
+ xvclk_rate = clk_get_rate(ov8856->xvclk);
+ if (xvclk_rate != OV8856_XVCLK_19_2)
+ dev_warn(dev, "external clock rate %u is unsupported",
+ xvclk_rate);
+ if (!is_acpi_node(fwnode)) {
ov8856->reset_gpio = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ov8856->reset_gpio))
@@ -2299,10 +2292,6 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
return ret;
}
- if (xvclk_rate != OV8856_XVCLK_19_2)
- dev_warn(dev, "external clock rate %u is unsupported",
- xvclk_rate);
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -2365,10 +2354,10 @@ static void ov8856_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
+ pm_runtime_disable(ov8856->dev);
mutex_destroy(&ov8856->mutex);
- ov8856_power_off(&client->dev);
+ ov8856_power_off(ov8856->dev);
}
static int ov8856_probe(struct i2c_client *client)
@@ -2381,23 +2370,25 @@ static int ov8856_probe(struct i2c_client *client)
if (!ov8856)
return -ENOMEM;
- ret = ov8856_get_hwcfg(ov8856, &client->dev);
+ ov8856->dev = &client->dev;
+
+ ret = ov8856_get_hwcfg(ov8856);
if (ret)
return ret;
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
- full_power = acpi_dev_state_d0(&client->dev);
+ full_power = acpi_dev_state_d0(ov8856->dev);
if (full_power) {
- ret = ov8856_power_on(&client->dev);
+ ret = ov8856_power_on(ov8856->dev);
if (ret) {
- dev_err(&client->dev, "failed to power on\n");
+ dev_err(ov8856->dev, "failed to power on\n");
return ret;
}
ret = ov8856_identify_module(ov8856);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov8856->dev, "failed to find sensor: %d", ret);
goto probe_power_off;
}
}
@@ -2407,7 +2398,7 @@ static int ov8856_probe(struct i2c_client *client)
ov8856->cur_mbus_index = ov8856->cur_mode->default_mbus_index;
ret = ov8856_init_controls(ov8856);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov8856->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -2418,22 +2409,22 @@ static int ov8856_probe(struct i2c_client *client)
ov8856->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov8856->sd.entity, 1, &ov8856->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov8856->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
ret = v4l2_async_register_subdev_sensor(&ov8856->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov8856->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup;
}
/* Set the device's state to active if it's in D0 state. */
if (full_power)
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov8856->dev);
+ pm_runtime_enable(ov8856->dev);
+ pm_runtime_idle(ov8856->dev);
return 0;
@@ -2445,7 +2436,7 @@ probe_error_v4l2_ctrl_handler_free:
mutex_destroy(&ov8856->mutex);
probe_power_off:
- ov8856_power_off(&client->dev);
+ ov8856_power_off(ov8856->dev);
return ret;
}
diff --git a/drivers/media/i2c/ov8858.c b/drivers/media/i2c/ov8858.c
index 6b7193eaea1f..3f45f7fab833 100644
--- a/drivers/media/i2c/ov8858.c
+++ b/drivers/media/i2c/ov8858.c
@@ -1876,7 +1876,7 @@ static int ov8858_probe(struct i2c_client *client)
if (!ov8858)
return -ENOMEM;
- ov8858->xvclk = devm_clk_get(dev, "xvclk");
+ ov8858->xvclk = devm_v4l2_sensor_clk_get(dev, "xvclk");
if (IS_ERR(ov8858->xvclk))
return dev_err_probe(dev, PTR_ERR(ov8858->xvclk),
"Failed to get xvclk\n");
diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c
index a2138f7988aa..a8586df14f77 100644
--- a/drivers/media/i2c/ov8865.c
+++ b/drivers/media/i2c/ov8865.c
@@ -2304,14 +2304,6 @@ static int ov8865_state_configure(struct ov8865_sensor *sensor,
if (sensor->state.streaming)
return -EBUSY;
- /* State will be configured at first power on otherwise. */
- if (pm_runtime_enabled(sensor->dev) &&
- !pm_runtime_suspended(sensor->dev)) {
- ret = ov8865_mode_configure(sensor, mode, mbus_code);
- if (ret)
- return ret;
- }
-
ret = ov8865_state_mipi_configure(sensor, mode, mbus_code);
if (ret)
return ret;
@@ -2384,10 +2376,10 @@ static int ov8865_sensor_init(struct ov8865_sensor *sensor)
}
/* Configure current mode. */
- ret = ov8865_state_configure(sensor, sensor->state.mode,
- sensor->state.mbus_code);
+ ret = ov8865_mode_configure(sensor, sensor->state.mode,
+ sensor->state.mbus_code);
if (ret) {
- dev_err(sensor->dev, "failed to configure state\n");
+ dev_err(sensor->dev, "failed to configure mode\n");
return ret;
}
@@ -2956,7 +2948,6 @@ static int ov8865_probe(struct i2c_client *client)
struct ov8865_sensor *sensor;
struct v4l2_subdev *subdev;
struct media_pad *pad;
- unsigned int rate = 0;
unsigned int i;
int ret;
@@ -3020,39 +3011,14 @@ static int ov8865_probe(struct i2c_client *client)
/* External Clock */
- sensor->extclk = devm_clk_get(dev, NULL);
- if (PTR_ERR(sensor->extclk) == -ENOENT) {
- dev_info(dev, "no external clock found, continuing...\n");
- sensor->extclk = NULL;
- } else if (IS_ERR(sensor->extclk)) {
- dev_err(dev, "failed to get external clock\n");
- ret = PTR_ERR(sensor->extclk);
- goto error_endpoint;
- }
-
- /*
- * We could have either a 24MHz or 19.2MHz clock rate from either dt or
- * ACPI...but we also need to support the weird IPU3 case which will
- * have an external clock AND a clock-frequency property. Check for the
- * clock-frequency property and if found, set that rate if we managed
- * to acquire a clock. This should cover the ACPI case. If the system
- * uses devicetree then the configured rate should already be set, so
- * we can just read it.
- */
- ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
- &rate);
- if (!ret && sensor->extclk) {
- ret = clk_set_rate(sensor->extclk, rate);
- if (ret) {
- dev_err_probe(dev, ret, "failed to set clock rate\n");
- goto error_endpoint;
- }
- } else if (ret && !sensor->extclk) {
- dev_err_probe(dev, ret, "invalid clock config\n");
+ sensor->extclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->extclk)) {
+ ret = dev_err_probe(dev, PTR_ERR(sensor->extclk),
+ "failed to get external clock\n");
goto error_endpoint;
}
- sensor->extclk_rate = rate ? rate : clk_get_rate(sensor->extclk);
+ sensor->extclk_rate = clk_get_rate(sensor->extclk);
for (i = 0; i < ARRAY_SIZE(supported_extclk_rates); i++) {
if (sensor->extclk_rate == supported_extclk_rates[i])
diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c
index c882a021cf18..3e24d88f603c 100644
--- a/drivers/media/i2c/ov9282.c
+++ b/drivers/media/i2c/ov9282.c
@@ -1129,17 +1129,16 @@ static int ov9282_parse_hw_config(struct ov9282 *ov9282)
ov9282->reset_gpio = devm_gpiod_get_optional(ov9282->dev, "reset",
GPIOD_OUT_LOW);
if (IS_ERR(ov9282->reset_gpio)) {
- dev_err(ov9282->dev, "failed to get reset gpio %ld",
- PTR_ERR(ov9282->reset_gpio));
+ dev_err(ov9282->dev, "failed to get reset gpio %pe",
+ ov9282->reset_gpio);
return PTR_ERR(ov9282->reset_gpio);
}
/* Get sensor input clock */
- ov9282->inclk = devm_clk_get(ov9282->dev, NULL);
- if (IS_ERR(ov9282->inclk)) {
- dev_err(ov9282->dev, "could not get inclk");
- return PTR_ERR(ov9282->inclk);
- }
+ ov9282->inclk = devm_v4l2_sensor_clk_get(ov9282->dev, NULL);
+ if (IS_ERR(ov9282->inclk))
+ return dev_err_probe(ov9282->dev, PTR_ERR(ov9282->inclk),
+ "could not get inclk\n");
ret = ov9282_configure_regulators(ov9282);
if (ret)
diff --git a/drivers/media/i2c/ov9640.c b/drivers/media/i2c/ov9640.c
index 01dbc0ba89c8..2190c52b1433 100644
--- a/drivers/media/i2c/ov9640.c
+++ b/drivers/media/i2c/ov9640.c
@@ -718,9 +718,10 @@ static int ov9640_probe(struct i2c_client *client)
priv->subdev.ctrl_handler = &priv->hdl;
- priv->clk = devm_clk_get(&client->dev, "mclk");
+ priv->clk = devm_v4l2_sensor_clk_get(&client->dev, "mclk");
if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
+ ret = dev_err_probe(&client->dev, PTR_ERR(priv->clk),
+ "failed to get mclk\n");
goto ectrlinit;
}
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 026ea34d6291..c94e8fe29f22 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1494,9 +1494,10 @@ static int ov965x_probe(struct i2c_client *client)
}
if (dev_fwnode(&client->dev)) {
- ov965x->clk = devm_clk_get(&client->dev, NULL);
+ ov965x->clk = devm_v4l2_sensor_clk_get(&client->dev, NULL);
if (IS_ERR(ov965x->clk))
- return PTR_ERR(ov965x->clk);
+ return dev_err_probe(&client->dev, PTR_ERR(ov965x->clk),
+ "failed to get the clock\n");
ov965x->mclk_frequency = clk_get_rate(ov965x->clk);
ret = ov965x_configure_gpios(ov965x);
diff --git a/drivers/media/i2c/ov9734.c b/drivers/media/i2c/ov9734.c
index cae3aeefb616..0eaf33807fc9 100644
--- a/drivers/media/i2c/ov9734.c
+++ b/drivers/media/i2c/ov9734.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2020 Intel Corporation.
-#include <linux/unaligned.h>
#include <linux/acpi.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/unaligned.h>
+
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
@@ -321,6 +323,9 @@ static const struct ov9734_mode supported_modes[] = {
};
struct ov9734 {
+ struct device *dev;
+ struct clk *clk;
+
struct v4l2_subdev sd;
struct media_pad pad;
struct v4l2_ctrl_handler ctrl_handler;
@@ -414,7 +419,6 @@ static int ov9734_write_reg(struct ov9734 *ov9734, u16 reg, u16 len, u32 val)
static int ov9734_write_reg_list(struct ov9734 *ov9734,
const struct ov9734_reg_list *r_list)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
unsigned int i;
int ret;
@@ -422,7 +426,7 @@ static int ov9734_write_reg_list(struct ov9734 *ov9734,
ret = ov9734_write_reg(ov9734, r_list->regs[i].address, 1,
r_list->regs[i].val);
if (ret) {
- dev_err_ratelimited(&client->dev,
+ dev_err_ratelimited(ov9734->dev,
"write reg 0x%4.4x return err = %d",
r_list->regs[i].address, ret);
return ret;
@@ -476,7 +480,6 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct ov9734 *ov9734 = container_of(ctrl->handler,
struct ov9734, ctrl_handler);
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
s64 exposure_max;
int ret = 0;
@@ -492,7 +495,7 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
}
/* V4L2 controls values will be applied only when power is already up */
- if (!pm_runtime_get_if_in_use(&client->dev))
+ if (!pm_runtime_get_if_in_use(ov9734->dev))
return 0;
switch (ctrl->id) {
@@ -525,7 +528,7 @@ static int ov9734_set_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
return ret;
}
@@ -610,7 +613,6 @@ static void ov9734_update_pad_format(const struct ov9734_mode *mode,
static int ov9734_start_streaming(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
const struct ov9734_reg_list *reg_list;
int link_freq_index, ret;
@@ -618,14 +620,14 @@ static int ov9734_start_streaming(struct ov9734 *ov9734)
reg_list = &link_freq_configs[link_freq_index].reg_list;
ret = ov9734_write_reg_list(ov9734, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set plls");
+ dev_err(ov9734->dev, "failed to set plls");
return ret;
}
reg_list = &ov9734->cur_mode->reg_list;
ret = ov9734_write_reg_list(ov9734, reg_list);
if (ret) {
- dev_err(&client->dev, "failed to set mode");
+ dev_err(ov9734->dev, "failed to set mode");
return ret;
}
@@ -636,30 +638,27 @@ static int ov9734_start_streaming(struct ov9734 *ov9734)
ret = ov9734_write_reg(ov9734, OV9734_REG_MODE_SELECT,
1, OV9734_MODE_STREAMING);
if (ret)
- dev_err(&client->dev, "failed to start stream");
+ dev_err(ov9734->dev, "failed to start stream");
return ret;
}
static void ov9734_stop_streaming(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
-
if (ov9734_write_reg(ov9734, OV9734_REG_MODE_SELECT,
1, OV9734_MODE_STANDBY))
- dev_err(&client->dev, "failed to stop stream");
+ dev_err(ov9734->dev, "failed to stop stream");
}
static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
{
struct ov9734 *ov9734 = to_ov9734(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
int ret = 0;
mutex_lock(&ov9734->mutex);
if (enable) {
- ret = pm_runtime_resume_and_get(&client->dev);
+ ret = pm_runtime_resume_and_get(ov9734->dev);
if (ret < 0) {
mutex_unlock(&ov9734->mutex);
return ret;
@@ -669,11 +668,11 @@ static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
if (ret) {
enable = 0;
ov9734_stop_streaming(ov9734);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
}
} else {
ov9734_stop_streaming(ov9734);
- pm_runtime_put(&client->dev);
+ pm_runtime_put(ov9734->dev);
}
mutex_unlock(&ov9734->mutex);
@@ -808,7 +807,6 @@ static const struct v4l2_subdev_internal_ops ov9734_internal_ops = {
static int ov9734_identify_module(struct ov9734 *ov9734)
{
- struct i2c_client *client = v4l2_get_subdevdata(&ov9734->sd);
int ret;
u32 val;
@@ -817,7 +815,7 @@ static int ov9734_identify_module(struct ov9734 *ov9734)
return ret;
if (val != OV9734_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ dev_err(ov9734->dev, "chip id mismatch: %x!=%x",
OV9734_CHIP_ID, val);
return -ENXIO;
}
@@ -832,22 +830,12 @@ static int ov9734_check_hwcfg(struct device *dev)
struct v4l2_fwnode_endpoint bus_cfg = {
.bus_type = V4L2_MBUS_CSI2_DPHY
};
- u32 mclk;
int ret;
unsigned int i, j;
if (!fwnode)
return -ENXIO;
- ret = fwnode_property_read_u32(fwnode, "clock-frequency", &mclk);
- if (ret)
- return ret;
-
- if (mclk != OV9734_MCLK) {
- dev_err(dev, "external clock %d is not supported", mclk);
- return -EINVAL;
- }
-
ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
if (!ep)
return -ENXIO;
@@ -892,14 +880,15 @@ static void ov9734_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
v4l2_ctrl_handler_free(sd->ctrl_handler);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov9734->dev);
+ pm_runtime_set_suspended(ov9734->dev);
mutex_destroy(&ov9734->mutex);
}
static int ov9734_probe(struct i2c_client *client)
{
struct ov9734 *ov9734;
+ unsigned long freq;
int ret;
ret = ov9734_check_hwcfg(&client->dev);
@@ -913,10 +902,23 @@ static int ov9734_probe(struct i2c_client *client)
if (!ov9734)
return -ENOMEM;
+ ov9734->dev = &client->dev;
+
+ ov9734->clk = devm_v4l2_sensor_clk_get(ov9734->dev, NULL);
+ if (IS_ERR(ov9734->clk))
+ return dev_err_probe(ov9734->dev, PTR_ERR(ov9734->clk),
+ "failed to get clock\n");
+
+ freq = clk_get_rate(ov9734->clk);
+ if (freq != OV9734_MCLK)
+ return dev_err_probe(ov9734->dev, -EINVAL,
+ "external clock %lu is not supported",
+ freq);
+
v4l2_i2c_subdev_init(&ov9734->sd, client, &ov9734_subdev_ops);
ret = ov9734_identify_module(ov9734);
if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
+ dev_err(ov9734->dev, "failed to find sensor: %d", ret);
return ret;
}
@@ -924,7 +926,7 @@ static int ov9734_probe(struct i2c_client *client)
ov9734->cur_mode = &supported_modes[0];
ret = ov9734_init_controls(ov9734);
if (ret) {
- dev_err(&client->dev, "failed to init controls: %d", ret);
+ dev_err(ov9734->dev, "failed to init controls: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -935,7 +937,7 @@ static int ov9734_probe(struct i2c_client *client)
ov9734->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&ov9734->sd.entity, 1, &ov9734->pad);
if (ret) {
- dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ dev_err(ov9734->dev, "failed to init entity pads: %d", ret);
goto probe_error_v4l2_ctrl_handler_free;
}
@@ -943,13 +945,13 @@ static int ov9734_probe(struct i2c_client *client)
* Device is already turned on by i2c-core with ACPI domain PM.
* Enable runtime PM and turn off the device.
*/
- pm_runtime_set_active(&client->dev);
- pm_runtime_enable(&client->dev);
- pm_runtime_idle(&client->dev);
+ pm_runtime_set_active(ov9734->dev);
+ pm_runtime_enable(ov9734->dev);
+ pm_runtime_idle(ov9734->dev);
ret = v4l2_async_register_subdev_sensor(&ov9734->sd);
if (ret < 0) {
- dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ dev_err(ov9734->dev, "failed to register V4L2 subdev: %d",
ret);
goto probe_error_media_entity_cleanup_pm;
}
@@ -957,8 +959,8 @@ static int ov9734_probe(struct i2c_client *client)
return 0;
probe_error_media_entity_cleanup_pm:
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
+ pm_runtime_disable(ov9734->dev);
+ pm_runtime_set_suspended(ov9734->dev);
media_entity_cleanup(&ov9734->sd.entity);
probe_error_v4l2_ctrl_handler_free:
diff --git a/drivers/media/i2c/rj54n1cb0c.c b/drivers/media/i2c/rj54n1cb0c.c
index b7ca39f63dba..e95342d706c3 100644
--- a/drivers/media/i2c/rj54n1cb0c.c
+++ b/drivers/media/i2c/rj54n1cb0c.c
@@ -1329,10 +1329,13 @@ static int rj54n1_probe(struct i2c_client *client)
V4L2_CID_GAIN, 0, 127, 1, 66);
v4l2_ctrl_new_std(&rj54n1->hdl, &rj54n1_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE, 0, 1, 1, 1);
- rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
- if (rj54n1->hdl.error)
- return rj54n1->hdl.error;
+ if (rj54n1->hdl.error) {
+ ret = rj54n1->hdl.error;
+ goto err_free_ctrl;
+ }
+
+ rj54n1->subdev.ctrl_handler = &rj54n1->hdl;
rj54n1->clk_div = clk_div;
rj54n1->rect.left = RJ54N1_COLUMN_SKIP;
rj54n1->rect.top = RJ54N1_ROW_SKIP;
@@ -1354,8 +1357,8 @@ static int rj54n1_probe(struct i2c_client *client)
rj54n1->pwup_gpio = gpiod_get_optional(&client->dev, "powerup",
GPIOD_OUT_LOW);
if (IS_ERR(rj54n1->pwup_gpio)) {
- dev_info(&client->dev, "Unable to get GPIO \"powerup\": %ld\n",
- PTR_ERR(rj54n1->pwup_gpio));
+ dev_info(&client->dev, "Unable to get GPIO \"powerup\": %pe\n",
+ rj54n1->pwup_gpio);
ret = PTR_ERR(rj54n1->pwup_gpio);
goto err_clk_put;
}
@@ -1363,8 +1366,8 @@ static int rj54n1_probe(struct i2c_client *client)
rj54n1->enable_gpio = gpiod_get_optional(&client->dev, "enable",
GPIOD_OUT_LOW);
if (IS_ERR(rj54n1->enable_gpio)) {
- dev_info(&client->dev, "Unable to get GPIO \"enable\": %ld\n",
- PTR_ERR(rj54n1->enable_gpio));
+ dev_info(&client->dev, "Unable to get GPIO \"enable\": %pe\n",
+ rj54n1->enable_gpio);
ret = PTR_ERR(rj54n1->enable_gpio);
goto err_gpio_put;
}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 7716dfe2b8c9..ab31ee2b596b 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1368,10 +1368,6 @@ static int __s5c73m3_power_on(struct s5c73m3 *state)
goto err_reg_dis;
}
- ret = clk_set_rate(state->clock, state->mclk_frequency);
- if (ret < 0)
- goto err_reg_dis;
-
ret = clk_prepare_enable(state->clock);
if (ret < 0)
goto err_reg_dis;
@@ -1556,16 +1552,13 @@ static int s5c73m3_get_dt_data(struct s5c73m3 *state)
if (!node)
return -EINVAL;
- state->clock = devm_clk_get(dev, S5C73M3_CLK_NAME);
+ state->clock = devm_v4l2_sensor_clk_get_legacy(dev, S5C73M3_CLK_NAME,
+ false,
+ S5C73M3_DEFAULT_MCLK_FREQ);
if (IS_ERR(state->clock))
- return PTR_ERR(state->clock);
-
- if (of_property_read_u32(node, "clock-frequency",
- &state->mclk_frequency)) {
- state->mclk_frequency = S5C73M3_DEFAULT_MCLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- state->mclk_frequency);
- }
+ return dev_err_probe(dev, PTR_ERR(state->clock),
+ "Failed to get the clock %s\n",
+ S5C73M3_CLK_NAME);
/* Request GPIO lines asserted */
state->stby = devm_gpiod_get(dev, "standby", GPIOD_OUT_HIGH);
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 627e80cf5b72..68a19c2c8db8 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -382,8 +382,6 @@ struct s5c73m3 {
struct clk *clock;
- /* External master clock frequency */
- u32 mclk_frequency;
/* Video bus type - MIPI-CSI2/parallel */
enum v4l2_mbus_type bus_type;
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 24f399cd2124..d1d00eca8708 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -284,7 +284,6 @@ struct s5k5baf {
struct regulator_bulk_data supplies[S5K5BAF_NUM_SUPPLIES];
struct clk *clock;
- u32 mclk_frequency;
struct s5k5baf_fw *fw;
@@ -576,7 +575,7 @@ static void s5k5baf_hw_patch(struct s5k5baf *state)
static void s5k5baf_hw_set_clocks(struct s5k5baf *state)
{
- unsigned long mclk = state->mclk_frequency / 1000;
+ unsigned long mclk = clk_get_rate(state->clock) / 1000;
u16 status;
static const u16 nseq_clk_cfg[] = {
NSEQ(REG_I_USE_NPVI_CLOCKS,
@@ -946,10 +945,6 @@ static int s5k5baf_power_on(struct s5k5baf *state)
if (ret < 0)
goto err;
- ret = clk_set_rate(state->clock, state->mclk_frequency);
- if (ret < 0)
- goto err_reg_dis;
-
ret = clk_prepare_enable(state->clock);
if (ret < 0)
goto err_reg_dis;
@@ -1841,14 +1836,6 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
return -EINVAL;
}
- ret = of_property_read_u32(node, "clock-frequency",
- &state->mclk_frequency);
- if (ret < 0) {
- state->mclk_frequency = S5K5BAF_DEFAULT_MCLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- state->mclk_frequency);
- }
-
node_ep = of_graph_get_endpoint_by_regs(node, 0, -1);
if (!node_ep) {
dev_err(dev, "no endpoint defined at node %pOF\n", node);
@@ -1967,9 +1954,11 @@ static int s5k5baf_probe(struct i2c_client *c)
if (ret < 0)
goto err_me;
- state->clock = devm_clk_get(state->sd.dev, S5K5BAF_CLK_NAME);
+ state->clock = devm_v4l2_sensor_clk_get_legacy(state->sd.dev,
+ S5K5BAF_CLK_NAME, false,
+ S5K5BAF_DEFAULT_MCLK_FREQ);
if (IS_ERR(state->clock)) {
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(state->clock);
goto err_me;
}
diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c
index 0c2674115b7b..ba6477e88da3 100644
--- a/drivers/media/i2c/s5k6a3.c
+++ b/drivers/media/i2c/s5k6a3.c
@@ -51,7 +51,6 @@ enum {
* @lock: mutex protecting the structure's members below
* @format: media bus format at the sensor's source pad
* @clock: pointer to &struct clk.
- * @clock_frequency: clock frequency
* @power_count: stores state if device is powered
*/
struct s5k6a3 {
@@ -63,7 +62,6 @@ struct s5k6a3 {
struct mutex lock;
struct v4l2_mbus_framefmt format;
struct clk *clock;
- u32 clock_frequency;
int power_count;
};
@@ -192,10 +190,6 @@ static int __s5k6a3_power_on(struct s5k6a3 *sensor)
int i = S5K6A3_SUPP_VDDA;
int ret;
- ret = clk_set_rate(sensor->clock, sensor->clock_frequency);
- if (ret < 0)
- return ret;
-
ret = pm_runtime_get(sensor->dev);
if (ret < 0)
goto error_rpm_put;
@@ -292,22 +286,18 @@ static int s5k6a3_probe(struct i2c_client *client)
mutex_init(&sensor->lock);
sensor->dev = dev;
- sensor->clock = devm_clk_get(sensor->dev, S5K6A3_CLK_NAME);
+ sensor->clock = devm_v4l2_sensor_clk_get_legacy(sensor->dev,
+ S5K6A3_CLK_NAME, false,
+ S5K6A3_DEFAULT_CLK_FREQ);
if (IS_ERR(sensor->clock))
- return PTR_ERR(sensor->clock);
+ return dev_err_probe(sensor->dev, PTR_ERR(sensor->clock),
+ "failed to get extclk\n");
sensor->gpio_reset = devm_gpiod_get(dev, NULL, GPIOD_OUT_HIGH);
ret = PTR_ERR_OR_ZERO(sensor->gpio_reset);
if (ret)
return ret;
- if (of_property_read_u32(dev->of_node, "clock-frequency",
- &sensor->clock_frequency)) {
- sensor->clock_frequency = S5K6A3_DEFAULT_CLK_FREQ;
- dev_info(dev, "using default %u Hz clock frequency\n",
- sensor->clock_frequency);
- }
-
for (i = 0; i < S5K6A3_NUM_SUPPLIES; i++)
sensor->supplies[i].supply = s5k6a3_supply_names[i];
diff --git a/drivers/media/i2c/saa6752hs.c b/drivers/media/i2c/saa6752hs.c
index 1ed8b5edb3fb..1c0031ba43b4 100644
--- a/drivers/media/i2c/saa6752hs.c
+++ b/drivers/media/i2c/saa6752hs.c
@@ -6,7 +6,7 @@
AC-3 support:
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index b8b8f206ec3a..48d6730d9271 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -18,7 +18,7 @@
// Added saa7115 support by Kevin Thayer <nufan_wfk at yahoo.com>
// (2/17/2003)
//
-// VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@xs4all.nl>
+// VBI support (2004) and cleanups (2005) by Hans Verkuil <hverkuil@kernel.org>
//
// Copyright (c) 2005-2006 Mauro Carvalho Chehab <mchehab@kernel.org>
// SAA7111, SAA7113 and SAA7118 support
diff --git a/drivers/media/i2c/saa7127.c b/drivers/media/i2c/saa7127.c
index 818ed19cf37b..a42a7ffe3768 100644
--- a/drivers/media/i2c/saa7127.c
+++ b/drivers/media/i2c/saa7127.c
@@ -25,7 +25,7 @@
* Copyright (C) 2004 Chris Kennedy <c@groovy.org>
*
* VBI additions & cleanup:
- * Copyright (C) 2004, 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2004, 2005 Hans Verkuil <hverkuil@kernel.org>
*
* Note: the saa7126 is identical to the saa7127, and the saa7128 is
* identical to the saa7129, except that the saa7126 and saa7128 have
diff --git a/drivers/media/i2c/saa717x.c b/drivers/media/i2c/saa717x.c
index b0793bb0c02a..713331be947c 100644
--- a/drivers/media/i2c/saa717x.c
+++ b/drivers/media/i2c/saa717x.c
@@ -10,7 +10,7 @@
* Changes by T.Adachi (tadachi@tadachi-net.com)
* - support audio, video scaler etc, and checked the initialize sequence.
*
- * Cleaned up by Hans Verkuil <hverkuil@xs4all.nl>
+ * Cleaned up by Hans Verkuil <hverkuil@kernel.org>
*
* Note: this is a reversed engineered driver based on captures from
* the I2C bus under Windows. This chip is very similar to the saa7134,
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index 41ae25b0911f..4675181af5fb 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -747,8 +747,8 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
of_node_put(ep_node);
if (IS_ERR(asd)) {
- dev_err(&client->dev, "fail to register asd to notifier %ld",
- PTR_ERR(asd));
+ dev_err(&client->dev, "fail to register asd to notifier %pe",
+ asd);
return PTR_ERR(asd);
}
bridge->notifier.ops = &mipid02_notifier_ops;
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index 1cc7636e446d..a0ca19359c43 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -38,7 +38,21 @@
static int debug;
module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "debug level (0-3)");
+MODULE_PARM_DESC(debug, " debug level (0-3)");
+
+static int packet_type = 0x87;
+module_param(packet_type, int, 0644);
+MODULE_PARM_DESC(packet_type,
+ " Programmable Packet Type. Possible values:\n"
+ "\t\t 0x87: DRM InfoFrame (Default).\n"
+ "\t\t 0x01: Audio Clock Regeneration Packet\n"
+ "\t\t 0x02: Audio Sample Packet\n"
+ "\t\t 0x03: General Control Packet\n"
+ "\t\t 0x04: ACP Packet\n"
+ "\t\t 0x07: One Bit Audio Sample Packet\n"
+ "\t\t 0x08: DST Audio Packet\n"
+ "\t\t 0x09: High Bitrate Audio Stream Packet\n"
+ "\t\t 0x0a: Gamut Metadata Packet\n");
MODULE_DESCRIPTION("Toshiba TC358743 HDMI to CSI-2 bridge driver");
MODULE_AUTHOR("Ramakrishnan Muthukrishnan <ram@rkrishnan.org>");
@@ -466,10 +480,29 @@ tc358743_debugfs_if_read(u32 type, void *priv, struct file *filp,
if (!is_hdmi(sd))
return 0;
- if (type != V4L2_DEBUGFS_IF_AVI)
+ switch (type) {
+ case V4L2_DEBUGFS_IF_AVI:
+ i2c_rd(sd, PK_AVI_0HEAD, buf, PK_AVI_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_AUDIO:
+ i2c_rd(sd, PK_AUD_0HEAD, buf, PK_AUD_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_SPD:
+ i2c_rd(sd, PK_SPD_0HEAD, buf, PK_SPD_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_HDMI:
+ i2c_rd(sd, PK_VS_0HEAD, buf, PK_VS_LEN);
+ break;
+ case V4L2_DEBUGFS_IF_DRM:
+ i2c_rd(sd, PK_ACP_0HEAD, buf, PK_ACP_LEN);
+ break;
+ default:
return 0;
+ }
+
+ if (!buf[2])
+ return -ENOENT;
- i2c_rd(sd, PK_AVI_0HEAD, buf, PK_AVI_16BYTE - PK_AVI_0HEAD + 1);
len = buf[2] + 4;
if (len > V4L2_DEBUGFS_IF_MAX_LEN)
len = -ENOENT;
@@ -478,26 +511,69 @@ tc358743_debugfs_if_read(u32 type, void *priv, struct file *filp,
return len < 0 ? 0 : len;
}
-static void print_avi_infoframe(struct v4l2_subdev *sd)
+static void print_infoframes(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct device *dev = &client->dev;
union hdmi_infoframe frame;
- u8 buffer[HDMI_INFOFRAME_SIZE(AVI)] = {};
+ u8 buffer[V4L2_DEBUGFS_IF_MAX_LEN] = {};
+
+ /*
+ * Updating the ACP TYPE here allows for dynamically
+ * changing the type you want to monitor, without having
+ * to reload the driver with a new packet_type module option value.
+ *
+ * Instead you can set it with the new value, then call
+ * VIDIOC_LOG_STATUS.
+ */
+ i2c_wr8(sd, TYP_ACP_SET, packet_type);
if (!is_hdmi(sd)) {
- v4l2_info(sd, "DVI-D signal - AVI infoframe not supported\n");
+ v4l2_info(sd, "DVI-D signal - InfoFrames not supported\n");
return;
}
- i2c_rd(sd, PK_AVI_0HEAD, buffer, HDMI_INFOFRAME_SIZE(AVI));
+ i2c_rd(sd, PK_AVI_0HEAD, buffer, PK_AVI_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
- if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) < 0) {
- v4l2_err(sd, "%s: unpack of AVI infoframe failed\n", __func__);
- return;
+ i2c_rd(sd, PK_VS_0HEAD, buffer, PK_VS_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_AUD_0HEAD, buffer, PK_AUD_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_SPD_0HEAD, buffer, PK_SPD_LEN);
+ if (hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
+
+ i2c_rd(sd, PK_ACP_0HEAD, buffer, PK_ACP_LEN);
+ if (buffer[0] == packet_type) {
+ if (packet_type < 0x80)
+ v4l2_info(sd, "Packet: %*ph\n", PK_ACP_LEN, buffer);
+ else if (packet_type != 0x87)
+ v4l2_info(sd, "InfoFrame: %*ph\n", PK_ACP_LEN, buffer);
+ else if (hdmi_infoframe_unpack(&frame, buffer,
+ sizeof(buffer)) >= 0)
+ hdmi_infoframe_log(KERN_INFO, dev, &frame);
}
- hdmi_infoframe_log(KERN_INFO, dev, &frame);
+ i2c_rd(sd, PK_MS_0HEAD, buffer, PK_MS_LEN);
+ if (buffer[2] && buffer[2] + 3 <= PK_MS_LEN)
+ v4l2_info(sd, "MPEG Source InfoFrame: %*ph\n",
+ buffer[2] + 3, buffer);
+
+ i2c_rd(sd, PK_ISRC1_0HEAD, buffer, PK_ISRC1_LEN);
+ if (buffer[0] == 0x05)
+ v4l2_info(sd, "ISRC1 Packet: %*ph\n",
+ PK_ISRC1_LEN, buffer);
+
+ i2c_rd(sd, PK_ISRC2_0HEAD, buffer, PK_ISRC2_LEN);
+ if (buffer[0] == 0x06)
+ v4l2_info(sd, "ISRC2 Packet: %*ph\n",
+ PK_ISRC2_LEN, buffer);
}
/* --------------- CTRLS --------------- */
@@ -1375,7 +1451,7 @@ static int tc358743_log_status(struct v4l2_subdev *sd)
v4l2_info(sd, "Deep color mode: %d-bits per channel\n",
deep_color_mode[(i2c_rd8(sd, VI_STATUS1) &
MASK_S_DEEPCOLOR) >> 2]);
- print_avi_infoframe(sd);
+ print_infoframes(sd);
return 0;
}
@@ -2232,10 +2308,15 @@ static int tc358743_probe(struct i2c_client *client)
if (err < 0)
goto err_work_queues;
+ i2c_wr8(sd, TYP_ACP_SET, packet_type);
+ i2c_wr8(sd, PK_AUTO_CLR, 0xff);
+ i2c_wr8(sd, NO_PKT_CLR, MASK_NO_ACP_CLR);
+
state->debugfs_dir = debugfs_create_dir(sd->name, v4l2_debugfs_root());
state->infoframes = v4l2_debugfs_if_alloc(state->debugfs_dir,
- V4L2_DEBUGFS_IF_AVI, sd,
- tc358743_debugfs_if_read);
+ V4L2_DEBUGFS_IF_AVI | V4L2_DEBUGFS_IF_AUDIO |
+ V4L2_DEBUGFS_IF_SPD | V4L2_DEBUGFS_IF_HDMI |
+ V4L2_DEBUGFS_IF_DRM, sd, tc358743_debugfs_if_read);
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
@@ -2245,10 +2326,10 @@ static int tc358743_probe(struct i2c_client *client)
err_work_queues:
cec_unregister_adapter(state->cec_adap);
if (!state->i2c_client->irq) {
- timer_delete(&state->timer);
+ timer_delete_sync(&state->timer);
flush_work(&state->work_i2c_poll);
}
- cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ cancel_delayed_work_sync(&state->delayed_work_enable_hotplug);
mutex_destroy(&state->confctl_mutex);
err_hdl:
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/i2c/tc358743_regs.h b/drivers/media/i2c/tc358743_regs.h
index 2495878dc358..aae288f8add3 100644
--- a/drivers/media/i2c/tc358743_regs.h
+++ b/drivers/media/i2c/tc358743_regs.h
@@ -692,6 +692,8 @@
#define MASK_NCO_F0_MOD_42MHZ 0x00
#define MASK_NCO_F0_MOD_27MHZ 0x01
+#define TYP_ACP_SET 0x8706
+
#define PK_INT_MODE 0x8709
#define MASK_ISRC2_INT_MODE 0x80
#define MASK_ISRC_INT_MODE 0x40
@@ -702,6 +704,8 @@
#define MASK_AUD_INT_MODE 0x02
#define MASK_AVI_INT_MODE 0x01
+#define PK_AUTO_CLR 0x870a
+
#define NO_PKT_LIMIT 0x870B
#define MASK_NO_ACP_LIMIT 0xf0
#define SET_NO_ACP_LIMIT_MS(milliseconds) ((((milliseconds) / 80) << 4) & \
@@ -720,25 +724,44 @@
#define ERR_PK_LIMIT 0x870D
#define NO_PKT_LIMIT2 0x870E
#define PK_AVI_0HEAD 0x8710
-#define PK_AVI_1HEAD 0x8711
-#define PK_AVI_2HEAD 0x8712
#define PK_AVI_0BYTE 0x8713
-#define PK_AVI_1BYTE 0x8714
-#define PK_AVI_2BYTE 0x8715
-#define PK_AVI_3BYTE 0x8716
-#define PK_AVI_4BYTE 0x8717
-#define PK_AVI_5BYTE 0x8718
-#define PK_AVI_6BYTE 0x8719
-#define PK_AVI_7BYTE 0x871A
-#define PK_AVI_8BYTE 0x871B
-#define PK_AVI_9BYTE 0x871C
-#define PK_AVI_10BYTE 0x871D
-#define PK_AVI_11BYTE 0x871E
-#define PK_AVI_12BYTE 0x871F
-#define PK_AVI_13BYTE 0x8720
-#define PK_AVI_14BYTE 0x8721
-#define PK_AVI_15BYTE 0x8722
#define PK_AVI_16BYTE 0x8723
+#define PK_AVI_LEN (PK_AVI_16BYTE - PK_AVI_0HEAD + 1)
+
+#define PK_AUD_0HEAD 0x8730
+#define PK_AUD_0BYTE 0x8733
+#define PK_AUD_10BYTE 0x873d
+#define PK_AUD_LEN (PK_AUD_10BYTE - PK_AUD_0HEAD + 1)
+
+#define PK_MS_0HEAD 0x8740
+#define PK_MS_0BYTE 0x8743
+#define PK_MS_10BYTE 0x874d
+#define PK_MS_LEN (PK_MS_10BYTE - PK_MS_0HEAD + 1)
+
+#define PK_SPD_0HEAD 0x8750
+#define PK_SPD_0BYTE 0x8753
+#define PK_SPD_27BYTE 0x876e
+#define PK_SPD_LEN (PK_SPD_27BYTE - PK_SPD_0HEAD + 1)
+
+#define PK_VS_0HEAD 0x8770
+#define PK_VS_0BYTE 0x8773
+#define PK_VS_27BYTE 0x878e
+#define PK_VS_LEN (PK_VS_27BYTE - PK_VS_0HEAD + 1)
+
+#define PK_ACP_0HEAD 0x8790
+#define PK_ACP_0BYTE 0x8793
+#define PK_ACP_27BYTE 0x87ae
+#define PK_ACP_LEN (PK_ACP_27BYTE - PK_ACP_0HEAD + 1)
+
+#define PK_ISRC1_0HEAD 0x87b0
+#define PK_ISRC1_0BYTE 0x87b3
+#define PK_ISRC1_27BYTE 0x87c2
+#define PK_ISRC1_LEN (PK_ISRC1_27BYTE - PK_ISRC1_0HEAD + 1)
+
+#define PK_ISRC2_0HEAD 0x87d0
+#define PK_ISRC2_0BYTE 0x87d3
+#define PK_ISRC2_27BYTE 0x87ee
+#define PK_ISRC2_LEN (PK_ISRC2_27BYTE - PK_ISRC2_0HEAD + 1)
#define BKSV 0x8800
diff --git a/drivers/media/i2c/tc358746.c b/drivers/media/i2c/tc358746.c
index bcfc274cf891..86d9ba3ea4e5 100644
--- a/drivers/media/i2c/tc358746.c
+++ b/drivers/media/i2c/tc358746.c
@@ -1222,14 +1222,16 @@ tc358746_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return tc358746->pll_rate / (prediv * postdiv);
}
-static long tc358746_mclk_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int tc358746_mclk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct tc358746 *tc358746 = clk_hw_to_tc358746(hw);
- *parent_rate = tc358746->pll_rate;
+ req->best_parent_rate = tc358746->pll_rate;
- return tc358746_find_mclk_settings(tc358746, rate);
+ req->rate = tc358746_find_mclk_settings(tc358746, req->rate);
+
+ return 0;
}
static int tc358746_mclk_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -1246,7 +1248,7 @@ static const struct clk_ops tc358746_mclk_ops = {
.enable = tc358746_mclk_enable,
.disable = tc358746_mclk_disable,
.recalc_rate = tc358746_recalc_rate,
- .round_rate = tc358746_mclk_round_rate,
+ .determine_rate = tc358746_mclk_determine_rate,
.set_rate = tc358746_mclk_set_rate,
};
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 1087d2bddaf2..3532766cd795 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -2797,7 +2797,6 @@ err_free_media:
err_free_handler:
v4l2_ctrl_handler_free(&state->hdl);
err_free_mutex:
- cancel_delayed_work(&state->delayed_work_enable_hpd);
mutex_destroy(&state->page_lock);
mutex_destroy(&state->lock);
tda1997x_set_power(state, 0);
diff --git a/drivers/media/i2c/tda9840.c b/drivers/media/i2c/tda9840.c
index d61da811c9da..e3b266db571f 100644
--- a/drivers/media/i2c/tda9840.c
+++ b/drivers/media/i2c/tda9840.c
@@ -3,7 +3,7 @@
tda9840 - i2c-driver for the tda9840 by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tda9840 is a stereo/dual sound processor with digital
identification. It can be found at address 0x84 on the i2c-bus.
diff --git a/drivers/media/i2c/tea6415c.c b/drivers/media/i2c/tea6415c.c
index 4aaf66353610..0cd2e6c52e20 100644
--- a/drivers/media/i2c/tea6415c.c
+++ b/drivers/media/i2c/tea6415c.c
@@ -3,7 +3,7 @@
tea6415c - i2c-driver for the tea6415c by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tea6415c is a bus controlled video-matrix-switch
with 8 inputs and 6 outputs.
diff --git a/drivers/media/i2c/tea6420.c b/drivers/media/i2c/tea6420.c
index 5c5ea3973251..400883fc0c0f 100644
--- a/drivers/media/i2c/tea6420.c
+++ b/drivers/media/i2c/tea6420.c
@@ -3,7 +3,7 @@
tea6420 - i2c-driver for the tea6420 by SGS Thomson
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
- Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2008 Hans Verkuil <hverkuil@kernel.org>
The tea6420 is a bus controlled audio-matrix with 5 stereo inputs,
4 stereo outputs and gain control for each output.
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index b7cedc5b3e8e..ff268ebeb4d9 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -7,7 +7,7 @@
* Author: Chaithrika U S <chaithrika@ti.com>
*
* Contributors:
- * Hans Verkuil <hansverk@cisco.com>
+ * Hans Verkuil <hverkuil@kernel.org>
* Lad, Prabhakar <prabhakar.lad@ti.com>
* Martin Bugge <marbugge@cisco.com>
*
diff --git a/drivers/media/i2c/tlv320aic23b.c b/drivers/media/i2c/tlv320aic23b.c
index b7b31b6192af..6f6bc5236565 100644
--- a/drivers/media/i2c/tlv320aic23b.c
+++ b/drivers/media/i2c/tlv320aic23b.c
@@ -7,7 +7,7 @@
* Based on wm8775 driver
*
* Copyright (C) 2004 Ulf Eklund <ivtv at eklund.to>
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/upd64031a.c b/drivers/media/i2c/upd64031a.c
index 9d0b72a213be..a178af46e695 100644
--- a/drivers/media/i2c/upd64031a.c
+++ b/drivers/media/i2c/upd64031a.c
@@ -4,7 +4,7 @@
*
* 2003 by T.Adachi <tadachi@tadachi-net.com>
* 2003 by Takeru KOMORIYA <komoriya@paken.org>
- * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
+ * 2006 by Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/i2c/upd64083.c b/drivers/media/i2c/upd64083.c
index 2e99ed5da42c..5421dc5e32c9 100644
--- a/drivers/media/i2c/upd64083.c
+++ b/drivers/media/i2c/upd64083.c
@@ -4,7 +4,7 @@
*
* 2003 by T.Adachi (tadachi@tadachi-net.com)
* 2003 by Takeru KOMORIYA <komoriya@paken.org>
- * 2006 by Hans Verkuil <hverkuil@xs4all.nl>
+ * 2006 by Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/i2c/vd55g1.c b/drivers/media/i2c/vd55g1.c
index 7c39183dd44b..78d18c028154 100644
--- a/drivers/media/i2c/vd55g1.c
+++ b/drivers/media/i2c/vd55g1.c
@@ -29,9 +29,11 @@
/* Register Map */
#define VD55G1_REG_MODEL_ID CCI_REG32_LE(0x0000)
-#define VD55G1_MODEL_ID 0x53354731
+#define VD55G1_MODEL_ID_VD55G1 0x53354731 /* Mono */
+#define VD55G1_MODEL_ID_VD65G4 0x53354733 /* RGB */
#define VD55G1_REG_REVISION CCI_REG16_LE(0x0004)
#define VD55G1_REVISION_CCB 0x2020
+#define VD55G1_REVISION_BAYER 0x3030
#define VD55G1_REG_FWPATCH_REVISION CCI_REG16_LE(0x0012)
#define VD55G1_REG_FWPATCH_START_ADDR CCI_REG8(0x2000)
#define VD55G1_REG_SYSTEM_FSM CCI_REG8(0x001c)
@@ -39,7 +41,8 @@
#define VD55G1_SYSTEM_FSM_SW_STBY 0x02
#define VD55G1_SYSTEM_FSM_STREAMING 0x03
#define VD55G1_REG_BOOT CCI_REG8(0x0200)
-#define VD55G1_BOOT_PATCH_SETUP 2
+#define VD55G1_BOOT_BOOT 1
+#define VD55G1_BOOT_PATCH_AND_BOOT 2
#define VD55G1_REG_STBY CCI_REG8(0x0201)
#define VD55G1_STBY_START_STREAM 1
#define VD55G1_REG_STREAMING CCI_REG8(0x0202)
@@ -66,7 +69,7 @@
#define VD55G1_REG_READOUT_CTRL CCI_REG8(0x052e)
#define VD55G1_READOUT_CTRL_BIN_MODE_NORMAL 0
#define VD55G1_READOUT_CTRL_BIN_MODE_DIGITAL_X2 1
-#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ea)
+#define VD55G1_REG_DUSTER_CTRL CCI_REG8(0x03ae)
#define VD55G1_DUSTER_ENABLE BIT(0)
#define VD55G1_DUSTER_DISABLE 0
#define VD55G1_DUSTER_DYN_ENABLE BIT(1)
@@ -132,7 +135,10 @@
#define VD55G1_MIPI_RATE_MIN (250 * MEGA)
#define VD55G1_MIPI_RATE_MAX (1200 * MEGA)
-static const u8 patch_array[] = {
+#define VD55G1_MODEL_ID_NAME(id) \
+ ((id) == VD55G1_MODEL_ID_VD55G1 ? "vd55g1" : "vd65g4")
+
+static const u8 vd55g1_patch_array[] = {
0x44, 0x03, 0x09, 0x02, 0xe6, 0x01, 0x42, 0x00, 0xea, 0x01, 0x42, 0x00,
0xf0, 0x01, 0x42, 0x00, 0xe6, 0x01, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -466,22 +472,24 @@ struct vd55g1_mode {
u32 height;
};
-struct vd55g1_fmt_desc {
- u32 code;
- u8 bpp;
- u8 data_type;
+static const u32 vd55g1_mbus_formats_mono[] = {
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
};
-static const struct vd55g1_fmt_desc vd55g1_mbus_codes[] = {
+/* Format order is : no flip, hflip, vflip, both */
+static const u32 vd55g1_mbus_formats_bayer[][4] = {
{
- .code = MEDIA_BUS_FMT_Y8_1X8,
- .bpp = 8,
- .data_type = MIPI_CSI2_DT_RAW8,
+ MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_SGRBG8_1X8,
+ MEDIA_BUS_FMT_SGBRG8_1X8,
+ MEDIA_BUS_FMT_SBGGR8_1X8,
},
{
- .code = MEDIA_BUS_FMT_Y10_1X10,
- .bpp = 10,
- .data_type = MIPI_CSI2_DT_RAW10,
+ MEDIA_BUS_FMT_SRGGB10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10,
+ MEDIA_BUS_FMT_SGBRG10_1X10,
+ MEDIA_BUS_FMT_SBGGR10_1X10,
},
};
@@ -524,6 +532,7 @@ struct vd55g1_vblank_limits {
struct vd55g1 {
struct device *dev;
+ unsigned int id;
struct v4l2_subdev sd;
struct media_pad pad;
struct regulator_bulk_data supplies[ARRAY_SIZE(vd55g1_supply_name)];
@@ -572,27 +581,78 @@ static inline struct vd55g1 *ctrl_to_vd55g1(struct v4l2_ctrl *ctrl)
return to_vd55g1(sd);
}
-static const struct vd55g1_fmt_desc *vd55g1_get_fmt_desc(struct vd55g1 *sensor,
- u32 code)
+static unsigned int vd55g1_get_fmt_bpp(u32 code)
{
- unsigned int i;
+ switch (code) {
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ default:
+ return 8;
+
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ return 10;
+ }
+}
+
+static unsigned int vd55g1_get_fmt_data_type(u32 code)
+{
+ switch (code) {
+ case MEDIA_BUS_FMT_Y8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ default:
+ return MIPI_CSI2_DT_RAW8;
+
+ case MEDIA_BUS_FMT_Y10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ return MIPI_CSI2_DT_RAW10;
+ }
+}
+
+static u32 vd55g1_get_fmt_code(struct vd55g1 *sensor, u32 code)
+{
+ unsigned int i, j;
- for (i = 0; i < ARRAY_SIZE(vd55g1_mbus_codes); i++) {
- if (vd55g1_mbus_codes[i].code == code)
- return &vd55g1_mbus_codes[i];
+ if (sensor->id == VD55G1_MODEL_ID_VD55G1)
+ return code;
+
+ for (i = 0; i < ARRAY_SIZE(vd55g1_mbus_formats_bayer); i++) {
+ for (j = 0; j < ARRAY_SIZE(vd55g1_mbus_formats_bayer[i]); j++) {
+ if (vd55g1_mbus_formats_bayer[i][j] == code)
+ goto adapt_bayer_pattern;
+ }
}
+ dev_warn(sensor->dev, "Unsupported mbus format\n");
- /* Should never happen */
- dev_warn(sensor->dev, "Unsupported code %d. default to 8 bpp\n", code);
+ return code;
+
+adapt_bayer_pattern:
+ j = 0;
+ /* In first init_state() call, controls might not be initialized yet */
+ if (sensor->hflip_ctrl && sensor->vflip_ctrl) {
+ j = (sensor->hflip_ctrl->val ? 1 : 0) +
+ (sensor->vflip_ctrl->val ? 2 : 0);
+ }
- return &vd55g1_mbus_codes[0];
+ return vd55g1_mbus_formats_bayer[i][j];
}
static s32 vd55g1_get_pixel_rate(struct vd55g1 *sensor,
struct v4l2_mbus_framefmt *format)
{
- return sensor->mipi_rate /
- vd55g1_get_fmt_desc(sensor, format->code)->bpp;
+ return sensor->mipi_rate / vd55g1_get_fmt_bpp(format->code);
}
static unsigned int vd55g1_get_hblank_min(struct vd55g1 *sensor,
@@ -605,7 +665,7 @@ static unsigned int vd55g1_get_hblank_min(struct vd55g1 *sensor,
/* MIPI required time */
mipi_req_line_time = (crop->width *
- vd55g1_get_fmt_desc(sensor, format->code)->bpp +
+ vd55g1_get_fmt_bpp(format->code) +
VD55G1_MIPI_MARGIN) /
(sensor->mipi_rate / MEGA);
mipi_req_line_length = mipi_req_line_time * sensor->pixel_clock /
@@ -887,7 +947,7 @@ static void vd55g1_update_pad_fmt(struct vd55g1 *sensor,
const struct vd55g1_mode *mode, u32 code,
struct v4l2_mbus_framefmt *fmt)
{
- fmt->code = code;
+ fmt->code = vd55g1_get_fmt_code(sensor, code);
fmt->width = mode->width;
fmt->height = mode->height;
fmt->colorspace = V4L2_COLORSPACE_RAW;
@@ -951,10 +1011,9 @@ static int vd55g1_set_framefmt(struct vd55g1 *sensor,
int ret = 0;
vd55g1_write(sensor, VD55G1_REG_FORMAT_CTRL,
- vd55g1_get_fmt_desc(sensor, format->code)->bpp, &ret);
+ vd55g1_get_fmt_bpp(format->code), &ret);
vd55g1_write(sensor, VD55G1_REG_OIF_IMG_CTRL,
- vd55g1_get_fmt_desc(sensor, format->code)->data_type,
- &ret);
+ vd55g1_get_fmt_data_type(format->code), &ret);
switch (crop->width / format->width) {
case 1:
@@ -1114,26 +1173,45 @@ static int vd55g1_patch(struct vd55g1 *sensor)
u64 patch;
int ret = 0;
- vd55g1_write_array(sensor, VD55G1_REG_FWPATCH_START_ADDR,
- sizeof(patch_array), patch_array, &ret);
- vd55g1_write(sensor, VD55G1_REG_BOOT, VD55G1_BOOT_PATCH_SETUP, &ret);
- vd55g1_poll_reg(sensor, VD55G1_REG_BOOT, 0, &ret);
- if (ret) {
- dev_err(sensor->dev, "Failed to apply patch\n");
- return ret;
- }
+ /* vd55g1 needs a patch while vd65g4 does not */
+ if (sensor->id == VD55G1_MODEL_ID_VD55G1) {
+ vd55g1_write_array(sensor, VD55G1_REG_FWPATCH_START_ADDR,
+ sizeof(vd55g1_patch_array),
+ vd55g1_patch_array, &ret);
+ vd55g1_write(sensor, VD55G1_REG_BOOT,
+ VD55G1_BOOT_PATCH_AND_BOOT, &ret);
+ vd55g1_poll_reg(sensor, VD55G1_REG_BOOT, 0, &ret);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to apply patch\n");
+ return ret;
+ }
- vd55g1_read(sensor, VD55G1_REG_FWPATCH_REVISION, &patch, &ret);
- if (patch != (VD55G1_FWPATCH_REVISION_MAJOR << 8) +
- VD55G1_FWPATCH_REVISION_MINOR) {
- dev_err(sensor->dev, "Bad patch version expected %d.%d got %d.%d\n",
- VD55G1_FWPATCH_REVISION_MAJOR,
- VD55G1_FWPATCH_REVISION_MINOR,
+ vd55g1_read(sensor, VD55G1_REG_FWPATCH_REVISION, &patch, &ret);
+ if (patch != (VD55G1_FWPATCH_REVISION_MAJOR << 8) +
+ VD55G1_FWPATCH_REVISION_MINOR) {
+ dev_err(sensor->dev, "Bad patch version expected %d.%d got %d.%d\n",
+ VD55G1_FWPATCH_REVISION_MAJOR,
+ VD55G1_FWPATCH_REVISION_MINOR,
+ (u8)(patch >> 8), (u8)(patch & 0xff));
+ return -ENODEV;
+ }
+ dev_dbg(sensor->dev, "patch %d.%d applied\n",
(u8)(patch >> 8), (u8)(patch & 0xff));
- return -ENODEV;
+
+ } else {
+ vd55g1_write(sensor, VD55G1_REG_BOOT, VD55G1_BOOT_BOOT, &ret);
+ vd55g1_poll_reg(sensor, VD55G1_REG_BOOT, 0, &ret);
+ if (ret) {
+ dev_err(sensor->dev, "Failed to boot\n");
+ return ret;
+ }
+ }
+
+ ret = vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_SW_STBY, NULL);
+ if (ret) {
+ dev_err(sensor->dev, "Sensor waiting after boot failed\n");
+ return ret;
}
- dev_dbg(sensor->dev, "patch %d.%d applied\n",
- (u8)(patch >> 8), (u8)(patch & 0xff));
return 0;
}
@@ -1165,10 +1243,19 @@ static int vd55g1_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- if (code->index >= ARRAY_SIZE(vd55g1_mbus_codes))
- return -EINVAL;
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ u32 base_code;
- code->code = vd55g1_mbus_codes[code->index].code;
+ if (sensor->id == VD55G1_MODEL_ID_VD55G1) {
+ if (code->index >= ARRAY_SIZE(vd55g1_mbus_formats_mono))
+ return -EINVAL;
+ base_code = vd55g1_mbus_formats_mono[code->index];
+ } else {
+ if (code->index >= ARRAY_SIZE(vd55g1_mbus_formats_bayer))
+ return -EINVAL;
+ base_code = vd55g1_mbus_formats_bayer[code->index][0];
+ }
+ code->code = vd55g1_get_fmt_code(sensor, base_code);
return 0;
}
@@ -1275,7 +1362,7 @@ static int vd55g1_init_state(struct v4l2_subdev *sd,
return ret;
vd55g1_update_pad_fmt(sensor, &vd55g1_supported_modes[VD55G1_MODE_DEF],
- vd55g1_mbus_codes[VD55G1_MBUS_CODE_DEF].code,
+ vd55g1_get_fmt_code(sensor, VD55G1_MBUS_CODE_DEF),
&fmt.format);
return vd55g1_set_pad_fmt(sd, sd_state, &fmt);
@@ -1285,9 +1372,16 @@ static int vd55g1_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_frame_size_enum *fse)
{
+ struct vd55g1 *sensor = to_vd55g1(sd);
+ u32 code;
+
if (fse->index >= ARRAY_SIZE(vd55g1_supported_modes))
return -EINVAL;
+ code = vd55g1_get_fmt_code(sensor, fse->code);
+ if (fse->code != code)
+ return -EINVAL;
+
fse->min_width = vd55g1_supported_modes[fse->index].width;
fse->max_width = fse->min_width;
fse->min_height = vd55g1_supported_modes[fse->index].height;
@@ -1463,8 +1557,12 @@ static int vd55g1_init_ctrls(struct vd55g1 *sensor)
/* Flip cluster */
sensor->hflip_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_HFLIP,
0, 1, 1, 0);
+ if (sensor->hflip_ctrl)
+ sensor->hflip_ctrl->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
sensor->vflip_ctrl = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_VFLIP,
0, 1, 1, 0);
+ if (sensor->vflip_ctrl)
+ sensor->vflip_ctrl->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
v4l2_ctrl_cluster(2, &sensor->hflip_ctrl);
/* Exposition cluster */
@@ -1548,26 +1646,34 @@ unlock_state:
static int vd55g1_detect(struct vd55g1 *sensor)
{
- u64 device_rev;
- u64 id;
+ unsigned int dt_id = (uintptr_t)device_get_match_data(sensor->dev);
+ u64 rev, id;
int ret;
ret = vd55g1_read(sensor, VD55G1_REG_MODEL_ID, &id, NULL);
if (ret)
return ret;
- if (id != VD55G1_MODEL_ID) {
- dev_warn(sensor->dev, "Unsupported sensor id %x\n", (u32)id);
+ if (id != VD55G1_MODEL_ID_VD55G1 && id != VD55G1_MODEL_ID_VD65G4) {
+ dev_warn(sensor->dev, "Unsupported sensor id 0x%x\n",
+ (u32)id);
+ return -ENODEV;
+ }
+ if (id != dt_id) {
+ dev_err(sensor->dev, "Probed sensor %s and device tree definition (%s) mismatch",
+ VD55G1_MODEL_ID_NAME(id), VD55G1_MODEL_ID_NAME(dt_id));
return -ENODEV;
}
+ sensor->id = id;
- ret = vd55g1_read(sensor, VD55G1_REG_REVISION, &device_rev, NULL);
+ ret = vd55g1_read(sensor, VD55G1_REG_REVISION, &rev, NULL);
if (ret)
return ret;
- if (device_rev != VD55G1_REVISION_CCB) {
- dev_err(sensor->dev, "Unsupported sensor revision (0x%x)\n",
- (u16)device_rev);
+ if ((id == VD55G1_MODEL_ID_VD55G1 && rev != VD55G1_REVISION_CCB) &&
+ (id == VD55G1_MODEL_ID_VD65G4 && rev != VD55G1_REVISION_BAYER)) {
+ dev_err(sensor->dev, "Unsupported sensor revision 0x%x for sensor %s\n",
+ (u16)rev, VD55G1_MODEL_ID_NAME(id));
return -ENODEV;
}
@@ -1616,13 +1722,6 @@ static int vd55g1_power_on(struct device *dev)
goto disable_clock;
}
- ret = vd55g1_wait_state(sensor, VD55G1_SYSTEM_FSM_SW_STBY, NULL);
- if (ret) {
- dev_err(dev, "Sensor waiting after patch failed %d\n",
- ret);
- goto disable_clock;
- }
-
return 0;
disable_clock:
@@ -1860,7 +1959,7 @@ static int vd55g1_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
- sensor->xclk = devm_clk_get(dev, NULL);
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xclk))
return dev_err_probe(dev, PTR_ERR(sensor->xclk),
"Failed to get xclk\n");
@@ -1934,7 +2033,8 @@ static void vd55g1_remove(struct i2c_client *client)
}
static const struct of_device_id vd55g1_dt_ids[] = {
- { .compatible = "st,vd55g1" },
+ { .compatible = "st,vd55g1", .data = (void *)VD55G1_MODEL_ID_VD55G1 },
+ { .compatible = "st,vd65g4", .data = (void *)VD55G1_MODEL_ID_VD65G4 },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, vd55g1_dt_ids);
diff --git a/drivers/media/i2c/vd56g3.c b/drivers/media/i2c/vd56g3.c
index d66e21ba4498..157acea9e286 100644
--- a/drivers/media/i2c/vd56g3.c
+++ b/drivers/media/i2c/vd56g3.c
@@ -1471,7 +1471,7 @@ static int vd56g3_probe(struct i2c_client *client)
if (ret)
return dev_err_probe(dev, ret, "Failed to get regulators\n");
- sensor->xclk = devm_clk_get(dev, NULL);
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
if (IS_ERR(sensor->xclk))
return dev_err_probe(dev, PTR_ERR(sensor->xclk),
"Failed to get xclk\n");
diff --git a/drivers/media/i2c/vgxy61.c b/drivers/media/i2c/vgxy61.c
index 5b0479f3a3c0..d64d0099e6fe 100644
--- a/drivers/media/i2c/vgxy61.c
+++ b/drivers/media/i2c/vgxy61.c
@@ -1181,6 +1181,21 @@ static int vgxy61_s_stream(struct v4l2_subdev *sd, int enable)
return ret;
}
+static int vgxy61_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ struct vgxy61_dev *sensor = to_vgxy61_dev(sd);
+
+ fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
+ fd->num_entries = 1;
+ fd->entry[0].pixelcode = sensor->fmt.code;
+ fd->entry[0].stream = 0;
+ fd->entry[0].bus.csi2.vc = 0;
+ fd->entry[0].bus.csi2.dt = get_data_type_by_code(sensor->fmt.code);
+
+ return 0;
+}
+
static int vgxy61_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
@@ -1402,6 +1417,7 @@ static const struct v4l2_subdev_pad_ops vgxy61_pad_ops = {
.set_fmt = vgxy61_set_fmt,
.get_selection = vgxy61_get_selection,
.enum_frame_size = vgxy61_enum_frame_size,
+ .get_frame_desc = vgxy61_get_frame_desc,
};
static const struct v4l2_subdev_ops vgxy61_subdev_ops = {
@@ -1761,11 +1777,11 @@ static int vgxy61_probe(struct i2c_client *client)
return ret;
}
- sensor->xclk = devm_clk_get(dev, NULL);
- if (IS_ERR(sensor->xclk)) {
- dev_err(dev, "failed to get xclk\n");
- return PTR_ERR(sensor->xclk);
- }
+ sensor->xclk = devm_v4l2_sensor_clk_get(dev, NULL);
+ if (IS_ERR(sensor->xclk))
+ return dev_err_probe(dev, PTR_ERR(sensor->xclk),
+ "failed to get xclk\n");
+
sensor->clk_freq = clk_get_rate(sensor->xclk);
if (sensor->clk_freq < 6 * HZ_PER_MHZ ||
sensor->clk_freq > 27 * HZ_PER_MHZ) {
diff --git a/drivers/media/i2c/vp27smpx.c b/drivers/media/i2c/vp27smpx.c
index 06fd46a63c72..df21950be24f 100644
--- a/drivers/media/i2c/vp27smpx.c
+++ b/drivers/media/i2c/vp27smpx.c
@@ -2,7 +2,7 @@
/*
* vp27smpx - driver version 0.0.1
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*
* Based on a tvaudio patch from Takahiro Adachi <tadachi@tadachi-net.com>
* and Kazuhiko Kawakami <kazz-0@mail.goo.ne.jp>
diff --git a/drivers/media/i2c/wm8739.c b/drivers/media/i2c/wm8739.c
index c091b78a5b41..72eb10339d06 100644
--- a/drivers/media/i2c/wm8739.c
+++ b/drivers/media/i2c/wm8739.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2005 T. Adachi <tadachi@tadachi-net.com>
*
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
* - Cleanup
*/
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 619b2988577c..56778d3bc28a 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -6,7 +6,7 @@
*
* Based on saa7115 driver
*
- * Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2005 Hans Verkuil <hverkuil@kernel.org>
* - Cleanup
* - V4L2 API update
* - sound fixes
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index 56444edaf136..6daa7aa99442 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -50,11 +50,6 @@ static void media_devnode_release(struct device *cd)
{
struct media_devnode *devnode = to_media_devnode(cd);
- mutex_lock(&media_devnode_lock);
- /* Mark device node number as free */
- clear_bit(devnode->minor, media_devnode_nums);
- mutex_unlock(&media_devnode_lock);
-
/* Release media_devnode and perform other cleanups as needed. */
if (devnode->release)
devnode->release(devnode);
@@ -281,6 +276,7 @@ void media_devnode_unregister(struct media_devnode *devnode)
/* Delete the cdev on this minor as well */
cdev_device_del(&devnode->cdev, &devnode->dev);
devnode->media_dev = NULL;
+ clear_bit(devnode->minor, media_devnode_nums);
mutex_unlock(&media_devnode_lock);
put_device(&devnode->dev);
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 045590905582..9519a537bfa2 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -682,8 +682,8 @@ done:
return 0;
dev_dbg(walk->mdev->dev,
- "media pipeline: adding unconnected pads of '%s'\n",
- local->entity->name);
+ "media pipeline: adding unconnected pads of '%s' reachable from pad %u\n",
+ origin->entity->name, origin->index);
media_entity_for_each_pad(origin->entity, local) {
/*
@@ -691,7 +691,7 @@ done:
* (already discovered through iterating over links) and pads
* not internally connected.
*/
- if (origin == local || !local->num_links ||
+ if (origin == local || local->num_links ||
!media_entity_has_pad_interdep(origin->entity, origin->index,
local->index))
continue;
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
index 5edfc2791ce7..2ac9ac0a740b 100644
--- a/drivers/media/mc/mc-request.c
+++ b/drivers/media/mc/mc-request.c
@@ -6,7 +6,7 @@
* Copyright (C) 2018 Intel Corporation
* Copyright (C) 2018 Google, Inc.
*
- * Author: Hans Verkuil <hansverk@cisco.com>
+ * Author: Hans Verkuil <hverkuil@kernel.org>
* Author: Sakari Ailus <sakari.ailus@linux.intel.com>
*/
@@ -282,8 +282,6 @@ EXPORT_SYMBOL_GPL(media_request_get_by_fd);
int media_request_alloc(struct media_device *mdev, int *alloc_fd)
{
struct media_request *req;
- struct file *filp;
- int fd;
int ret;
/* Either both are NULL or both are non-NULL */
@@ -297,19 +295,6 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
if (!req)
return -ENOMEM;
- fd = get_unused_fd_flags(O_CLOEXEC);
- if (fd < 0) {
- ret = fd;
- goto err_free_req;
- }
-
- filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
- if (IS_ERR(filp)) {
- ret = PTR_ERR(filp);
- goto err_put_fd;
- }
-
- filp->private_data = req;
req->mdev = mdev;
req->state = MEDIA_REQUEST_STATE_IDLE;
req->num_incomplete_objects = 0;
@@ -320,19 +305,24 @@ int media_request_alloc(struct media_device *mdev, int *alloc_fd)
req->updating_count = 0;
req->access_count = 0;
- *alloc_fd = fd;
+ FD_PREPARE(fdf, O_CLOEXEC,
+ anon_inode_getfile("request", &request_fops, NULL,
+ O_CLOEXEC));
+ if (fdf.err) {
+ ret = fdf.err;
+ goto err_free_req;
+ }
+
+ fd_prepare_file(fdf)->private_data = req;
+
+ *alloc_fd = fd_publish(fdf);
snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
- atomic_inc_return(&mdev->request_id), fd);
+ atomic_inc_return(&mdev->request_id), *alloc_fd);
dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
- fd_install(fd, filp);
-
return 0;
-err_put_fd:
- put_unused_fd(fd);
-
err_free_req:
if (mdev->ops->req_free)
mdev->ops->req_free(req);
diff --git a/drivers/media/pci/b2c2/flexcop-pci.c b/drivers/media/pci/b2c2/flexcop-pci.c
index 486c8ec0fa60..ab53c5b02c48 100644
--- a/drivers/media/pci/b2c2/flexcop-pci.c
+++ b/drivers/media/pci/b2c2/flexcop-pci.c
@@ -411,7 +411,7 @@ static void flexcop_pci_remove(struct pci_dev *pdev)
struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
if (irq_chk_intv > 0)
- cancel_delayed_work(&fc_pci->irq_check_work);
+ cancel_delayed_work_sync(&fc_pci->irq_check_work);
flexcop_pci_dma_exit(fc_pci);
flexcop_device_exit(fc_pci->fc_dev);
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index 9ce67f515843..17e4529e537a 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -1620,7 +1620,7 @@ static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id)
return 0;
}
-static int bttv_querystd(struct file *file, void *f, v4l2_std_id *id)
+static int bttv_querystd(struct file *file, void *priv, v4l2_std_id *id)
{
struct bttv *btv = video_drvdata(file);
@@ -1750,7 +1750,7 @@ static int bttv_s_frequency(struct file *file, void *priv,
return 0;
}
-static int bttv_log_status(struct file *file, void *f)
+static int bttv_log_status(struct file *file, void *priv)
{
struct video_device *vdev = video_devdata(file);
struct bttv *btv = video_drvdata(file);
@@ -1761,7 +1761,7 @@ static int bttv_log_status(struct file *file, void *f)
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
-static int bttv_g_register(struct file *file, void *f,
+static int bttv_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct bttv *btv = video_drvdata(file);
@@ -1774,7 +1774,7 @@ static int bttv_g_register(struct file *file, void *f,
return 0;
}
-static int bttv_s_register(struct file *file, void *f,
+static int bttv_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
struct bttv *btv = video_drvdata(file);
@@ -2159,7 +2159,7 @@ static int bttv_enum_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-static int bttv_g_parm(struct file *file, void *f,
+static int bttv_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
struct bttv *btv = video_drvdata(file);
@@ -2208,7 +2208,7 @@ static int bttv_g_pixelaspect(struct file *file, void *priv,
return 0;
}
-static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int bttv_g_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct bttv *btv = video_drvdata(file);
@@ -2232,7 +2232,7 @@ static int bttv_g_selection(struct file *file, void *f, struct v4l2_selection *s
return 0;
}
-static int bttv_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int bttv_s_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct bttv *btv = video_drvdata(file);
const struct v4l2_rect *b;
diff --git a/drivers/media/pci/bt8xx/bttv-vbi.c b/drivers/media/pci/bt8xx/bttv-vbi.c
index a71440611e46..0ca88a2400ee 100644
--- a/drivers/media/pci/bt8xx/bttv-vbi.c
+++ b/drivers/media/pci/bt8xx/bttv-vbi.c
@@ -241,7 +241,7 @@ static int try_fmt(struct v4l2_vbi_format *f, const struct bttv_tvnorm *tvnorm,
return 0;
}
-int bttv_try_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_try_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
struct bttv *btv = video_drvdata(file);
const struct bttv_tvnorm *tvnorm;
@@ -258,7 +258,7 @@ int bttv_try_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
}
-int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_s_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
struct bttv *btv = video_drvdata(file);
const struct bttv_tvnorm *tvnorm;
@@ -301,7 +301,7 @@ int bttv_s_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
}
-int bttv_g_fmt_vbi_cap(struct file *file, void *f, struct v4l2_format *frt)
+int bttv_g_fmt_vbi_cap(struct file *file, void *priv, struct v4l2_format *frt)
{
const struct bttv_tvnorm *tvnorm;
struct bttv *btv = video_drvdata(file);
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 39e25cc53edb..b7695705fdee 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -44,7 +44,7 @@ module_param_named(ignore_err, cobalt_ignore_err, int, 0644);
MODULE_PARM_DESC(ignore_err,
"If set then ignore missing i2c adapters/receivers. Default: 0\n");
-MODULE_AUTHOR("Hans Verkuil <hansverk@cisco.com> & Morten Hestnes");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org> & Morten Hestnes");
MODULE_DESCRIPTION("cobalt driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/pci/cobalt/cobalt-v4l2.c b/drivers/media/pci/cobalt/cobalt-v4l2.c
index ae82427e3479..51fd9576c6c2 100644
--- a/drivers/media/pci/cobalt/cobalt-v4l2.c
+++ b/drivers/media/pci/cobalt/cobalt-v4l2.c
@@ -447,7 +447,7 @@ static int cobalt_cobaltc(struct cobalt *cobalt, unsigned int cmd, void *arg)
return 0;
}
-static int cobalt_g_register(struct file *file, void *priv_fh,
+static int cobalt_g_register(struct file *file, void *priv,
struct v4l2_dbg_register *reg)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -456,7 +456,7 @@ static int cobalt_g_register(struct file *file, void *priv_fh,
return cobalt_cobaltc(cobalt, VIDIOC_DBG_G_REGISTER, reg);
}
-static int cobalt_s_register(struct file *file, void *priv_fh,
+static int cobalt_s_register(struct file *file, void *priv,
const struct v4l2_dbg_register *reg)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -467,7 +467,7 @@ static int cobalt_s_register(struct file *file, void *priv_fh,
}
#endif
-static int cobalt_querycap(struct file *file, void *priv_fh,
+static int cobalt_querycap(struct file *file, void *priv,
struct v4l2_capability *vcap)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -562,7 +562,7 @@ static void cobalt_video_input_status_show(struct cobalt_stream *s)
cobalt_info("rx%d: Packer: %x\n", rx, ioread32(&packer->control));
}
-static int cobalt_log_status(struct file *file, void *priv_fh)
+static int cobalt_log_status(struct file *file, void *priv)
{
struct cobalt_stream *s = video_drvdata(file);
struct cobalt *cobalt = s->cobalt;
@@ -596,7 +596,7 @@ static int cobalt_log_status(struct file *file, void *priv_fh)
return 0;
}
-static int cobalt_enum_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_enum_dv_timings(struct file *file, void *priv,
struct v4l2_enum_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -613,7 +613,7 @@ static int cobalt_enum_dv_timings(struct file *file, void *priv_fh,
pad, enum_dv_timings, timings);
}
-static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_s_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -641,7 +641,7 @@ static int cobalt_s_dv_timings(struct file *file, void *priv_fh,
return err;
}
-static int cobalt_g_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_g_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -654,7 +654,7 @@ static int cobalt_g_dv_timings(struct file *file, void *priv_fh,
pad, g_dv_timings, 0, timings);
}
-static int cobalt_query_dv_timings(struct file *file, void *priv_fh,
+static int cobalt_query_dv_timings(struct file *file, void *priv,
struct v4l2_dv_timings *timings)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -667,7 +667,7 @@ static int cobalt_query_dv_timings(struct file *file, void *priv_fh,
pad, query_dv_timings, 0, timings);
}
-static int cobalt_dv_timings_cap(struct file *file, void *priv_fh,
+static int cobalt_dv_timings_cap(struct file *file, void *priv,
struct v4l2_dv_timings_cap *cap)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -677,7 +677,7 @@ static int cobalt_dv_timings_cap(struct file *file, void *priv_fh,
pad, dv_timings_cap, cap);
}
-static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
switch (f->index) {
@@ -697,7 +697,7 @@ static int cobalt_enum_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -726,7 +726,7 @@ static int cobalt_g_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_try_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -787,7 +787,7 @@ static int cobalt_try_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
+static int cobalt_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -796,7 +796,7 @@ static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
if (vb2_is_busy(&s->q))
return -EBUSY;
- if (cobalt_try_fmt_vid_cap(file, priv_fh, f))
+ if (cobalt_try_fmt_vid_cap(file, priv, f))
return -EINVAL;
s->width = pix->width;
@@ -821,7 +821,7 @@ static int cobalt_s_fmt_vid_cap(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_try_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct v4l2_pix_format *pix = &f->fmt.pix;
@@ -862,7 +862,7 @@ static int cobalt_try_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -882,7 +882,7 @@ static int cobalt_g_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_enum_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
switch (f->index) {
@@ -899,7 +899,7 @@ static int cobalt_enum_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
+static int cobalt_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -909,7 +909,7 @@ static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
};
u32 code;
- if (cobalt_try_fmt_vid_out(file, priv_fh, f))
+ if (cobalt_try_fmt_vid_out(file, priv, f))
return -EINVAL;
if (vb2_is_busy(&s->q) && (pix->pixelformat != s->pixfmt ||
@@ -942,7 +942,7 @@ static int cobalt_s_fmt_vid_out(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_enum_input(struct file *file, void *priv_fh,
+static int cobalt_enum_input(struct file *file, void *priv,
struct v4l2_input *inp)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -963,7 +963,7 @@ static int cobalt_enum_input(struct file *file, void *priv_fh,
video, g_input_status, &inp->status);
}
-static int cobalt_g_input(struct file *file, void *priv_fh, unsigned int *i)
+static int cobalt_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -971,7 +971,7 @@ static int cobalt_g_input(struct file *file, void *priv_fh, unsigned int *i)
return 0;
}
-static int cobalt_s_input(struct file *file, void *priv_fh, unsigned int i)
+static int cobalt_s_input(struct file *file, void *priv, unsigned int i)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -990,7 +990,7 @@ static int cobalt_s_input(struct file *file, void *priv_fh, unsigned int i)
ADV76XX_PAD_HDMI_PORT_A, 0, 0);
}
-static int cobalt_enum_output(struct file *file, void *priv_fh,
+static int cobalt_enum_output(struct file *file, void *priv,
struct v4l2_output *out)
{
if (out->index)
@@ -1001,18 +1001,18 @@ static int cobalt_enum_output(struct file *file, void *priv_fh,
return 0;
}
-static int cobalt_g_output(struct file *file, void *priv_fh, unsigned int *i)
+static int cobalt_g_output(struct file *file, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
-static int cobalt_s_output(struct file *file, void *priv_fh, unsigned int i)
+static int cobalt_s_output(struct file *file, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
-static int cobalt_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+static int cobalt_g_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct cobalt_stream *s = video_drvdata(file);
u32 pad = edid->pad;
@@ -1026,7 +1026,7 @@ static int cobalt_g_edid(struct file *file, void *fh, struct v4l2_edid *edid)
return ret;
}
-static int cobalt_s_edid(struct file *file, void *fh, struct v4l2_edid *edid)
+static int cobalt_s_edid(struct file *file, void *priv, struct v4l2_edid *edid)
{
struct cobalt_stream *s = video_drvdata(file);
u32 pad = edid->pad;
@@ -1050,7 +1050,7 @@ static int cobalt_subscribe_event(struct v4l2_fh *fh,
return v4l2_ctrl_subscribe_event(fh, sub);
}
-static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
+static int cobalt_g_parm(struct file *file, void *priv, struct v4l2_streamparm *a)
{
struct cobalt_stream *s = video_drvdata(file);
struct v4l2_fract fps;
@@ -1065,7 +1065,7 @@ static int cobalt_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a)
return 0;
}
-static int cobalt_g_pixelaspect(struct file *file, void *fh,
+static int cobalt_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
struct cobalt_stream *s = video_drvdata(file);
@@ -1084,7 +1084,7 @@ static int cobalt_g_pixelaspect(struct file *file, void *fh,
return err;
}
-static int cobalt_g_selection(struct file *file, void *fh,
+static int cobalt_g_selection(struct file *file, void *priv,
struct v4l2_selection *sel)
{
struct cobalt_stream *s = video_drvdata(file);
diff --git a/drivers/media/pci/cx18/cx18-audio.c b/drivers/media/pci/cx18/cx18-audio.c
index 8602d088601b..1464795619f9 100644
--- a/drivers/media/pci/cx18/cx18-audio.c
+++ b/drivers/media/pci/cx18/cx18-audio.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-audio.h b/drivers/media/pci/cx18/cx18-audio.h
index 36ce333ab07a..29cf89d38d60 100644
--- a/drivers/media/pci/cx18/cx18-audio.h
+++ b/drivers/media/pci/cx18/cx18-audio.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_audio_set_io(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-av-audio.c b/drivers/media/pci/cx18/cx18-av-audio.c
index 78e05df9a7ba..644d8ca4519b 100644
--- a/drivers/media/pci/cx18/cx18-av-audio.c
+++ b/drivers/media/pci/cx18/cx18-av-audio.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-audio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c
index ee6e71157786..4fb19d26ee29 100644
--- a/drivers/media/pci/cx18/cx18-av-core.c
+++ b/drivers/media/pci/cx18/cx18-av-core.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-core.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-core.h b/drivers/media/pci/cx18/cx18-av-core.h
index 55aceb064b33..71ac9d7af28f 100644
--- a/drivers/media/pci/cx18/cx18-av-core.h
+++ b/drivers/media/pci/cx18/cx18-av-core.h
@@ -4,7 +4,7 @@
*
* Derived from cx25840-core.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-firmware.c b/drivers/media/pci/cx18/cx18-av-firmware.c
index 61aeb8c9af7f..02dde685a6ad 100644
--- a/drivers/media/pci/cx18/cx18-av-firmware.c
+++ b/drivers/media/pci/cx18/cx18-av-firmware.c
@@ -2,7 +2,7 @@
/*
* cx18 ADEC firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-av-vbi.c b/drivers/media/pci/cx18/cx18-av-vbi.c
index 1a113aad9cd4..f9beeaeaa1cb 100644
--- a/drivers/media/pci/cx18/cx18-av-vbi.c
+++ b/drivers/media/pci/cx18/cx18-av-vbi.c
@@ -4,7 +4,7 @@
*
* Derived from cx25840-vbi.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/cx18/cx18-cards.c b/drivers/media/pci/cx18/cx18-cards.c
index f5a30959a367..bddb9e0fffe0 100644
--- a/drivers/media/pci/cx18/cx18-cards.c
+++ b/drivers/media/pci/cx18/cx18-cards.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-cards.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-cards.h b/drivers/media/pci/cx18/cx18-cards.h
index ae9cf5bfdd59..511123b741d5 100644
--- a/drivers/media/pci/cx18/cx18-cards.h
+++ b/drivers/media/pci/cx18/cx18-cards.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-cards.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-controls.c b/drivers/media/pci/cx18/cx18-controls.c
index bb5fc120473c..78eadad8b6e8 100644
--- a/drivers/media/pci/cx18/cx18-controls.c
+++ b/drivers/media/pci/cx18/cx18-controls.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-controls.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
diff --git a/drivers/media/pci/cx18/cx18-controls.h b/drivers/media/pci/cx18/cx18-controls.h
index 458a3595a2ae..99de78878a76 100644
--- a/drivers/media/pci/cx18/cx18-controls.h
+++ b/drivers/media/pci/cx18/cx18-controls.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-controls.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index 743fcc961374..74c59a94b2b0 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-driver.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -1136,11 +1136,8 @@ int cx18_init_on_first_open(struct cx18 *cx)
int video_input;
int fw_retry_count = 3;
struct v4l2_frequency vf;
- struct cx18_open_id fh;
v4l2_std_id std;
- fh.cx = cx;
-
if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
return -ENXIO;
@@ -1220,14 +1217,14 @@ int cx18_init_on_first_open(struct cx18 *cx)
video_input = cx->active_input;
cx->active_input++; /* Force update of input */
- cx18_s_input(NULL, &fh, video_input);
+ cx18_do_s_input(cx, video_input);
/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
in one place. */
cx->std++; /* Force full standard initialization */
std = (cx->tuner_std == V4L2_STD_ALL) ? V4L2_STD_NTSC_M : cx->tuner_std;
- cx18_s_std(NULL, &fh, std);
- cx18_s_frequency(NULL, &fh, &vf);
+ cx18_do_s_std(cx, std);
+ cx18_do_s_frequency(cx, &vf);
return 0;
}
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 485ca9747c4c..ef38903709d0 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-driver.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -414,7 +414,7 @@ static inline struct cx18_open_id *fh2id(struct v4l2_fh *fh)
static inline struct cx18_open_id *file2id(struct file *file)
{
- return fh2id(file->private_data);
+ return fh2id(file_to_v4l2_fh(file));
}
/* forward declaration of struct defined in cx18-cards.h */
diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
index cefa91b37f89..4944033dbb20 100644
--- a/drivers/media/pci/cx18/cx18-fileops.c
+++ b/drivers/media/pci/cx18/cx18-fileops.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-fileops.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -678,7 +678,7 @@ void cx18_stop_capture(struct cx18_stream *s, int gop_end)
int cx18_v4l2_close(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
struct cx18_open_id *id = fh2id(fh);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
@@ -709,11 +709,11 @@ int cx18_v4l2_close(struct file *filp)
}
if (id->type == CX18_ENC_STREAM_TYPE_YUV &&
- filp->private_data == vdev->queue->owner) {
+ file_to_v4l2_fh(filp) == vdev->queue->owner) {
vb2_queue_release(vdev->queue);
vdev->queue->owner = NULL;
}
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
/* 'Unclaim' this stream */
@@ -743,8 +743,7 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
item->type = s->type;
item->open_id = cx->open_id++;
- filp->private_data = &item->fh;
- v4l2_fh_add(&item->fh);
+ v4l2_fh_add(&item->fh, filp);
if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
v4l2_fh_is_singular_file(filp)) {
@@ -752,7 +751,7 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
if (atomic_read(&cx->ana_capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- v4l2_fh_del(&item->fh);
+ v4l2_fh_del(&item->fh, filp);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
diff --git a/drivers/media/pci/cx18/cx18-fileops.h b/drivers/media/pci/cx18/cx18-fileops.h
index 943057b83d94..bc999ea85dc2 100644
--- a/drivers/media/pci/cx18/cx18-fileops.h
+++ b/drivers/media/pci/cx18/cx18-fileops.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-fileops.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* Testing/Debugging */
diff --git a/drivers/media/pci/cx18/cx18-firmware.c b/drivers/media/pci/cx18/cx18-firmware.c
index 1b038b2802bf..94e17948fb30 100644
--- a/drivers/media/pci/cx18/cx18-firmware.c
+++ b/drivers/media/pci/cx18/cx18-firmware.c
@@ -2,7 +2,7 @@
/*
* cx18 firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-firmware.h b/drivers/media/pci/cx18/cx18-firmware.h
index 1357f76d613e..8e63677284bd 100644
--- a/drivers/media/pci/cx18/cx18-firmware.h
+++ b/drivers/media/pci/cx18/cx18-firmware.h
@@ -2,7 +2,7 @@
/*
* cx18 firmware functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_firmware_init(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index 485a6cbeb15a..4aea92639599 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-gpio.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-gpio.h b/drivers/media/pci/cx18/cx18-gpio.h
index 8d5797dea7f5..3eefc4644101 100644
--- a/drivers/media/pci/cx18/cx18-gpio.h
+++ b/drivers/media/pci/cx18/cx18-gpio.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-gpio.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-i2c.c b/drivers/media/pci/cx18/cx18-i2c.c
index a83435245251..a1abb0631cae 100644
--- a/drivers/media/pci/cx18/cx18-i2c.c
+++ b/drivers/media/pci/cx18/cx18-i2c.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-i2c.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-i2c.h b/drivers/media/pci/cx18/cx18-i2c.h
index 4526cb324fec..8ae3125c78c0 100644
--- a/drivers/media/pci/cx18/cx18-i2c.h
+++ b/drivers/media/pci/cx18/cx18-i2c.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-i2c.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
int cx18_i2c_register(struct cx18 *cx, unsigned idx);
diff --git a/drivers/media/pci/cx18/cx18-io.c b/drivers/media/pci/cx18/cx18-io.c
index 50e4e8a598d4..1d3d006e6329 100644
--- a/drivers/media/pci/cx18/cx18-io.c
+++ b/drivers/media/pci/cx18/cx18-io.c
@@ -2,7 +2,7 @@
/*
* cx18 driver PCI memory mapped IO access routines
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-io.h b/drivers/media/pci/cx18/cx18-io.h
index 190b142d047e..1f0bfad7d0f0 100644
--- a/drivers/media/pci/cx18/cx18-io.h
+++ b/drivers/media/pci/cx18/cx18-io.h
@@ -2,7 +2,7 @@
/*
* cx18 driver PCI memory mapped IO access routines
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 9a1512b1ccaa..0d676a57e24e 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-ioctl.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -52,7 +52,7 @@ static const struct v4l2_fmtdesc cx18_formats_mpeg[] = {
static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct cx18_stream *s = &cx->streams[id->type];
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -76,7 +76,7 @@ static int cx18_g_fmt_vid_cap(struct file *file, void *fh,
static int cx18_try_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
int w = pixfmt->width;
@@ -121,7 +121,7 @@ static int cx18_try_fmt_vid_cap(struct file *file, void *fh,
static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -261,7 +261,7 @@ u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt)
static int cx18_g_fmt_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi;
vbifmt->sampling_rate = 27000000;
@@ -280,7 +280,7 @@ static int cx18_g_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_g_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
/* sane, V4L2 spec compliant, defaults */
@@ -311,7 +311,7 @@ static int cx18_try_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_try_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
@@ -330,7 +330,7 @@ static int cx18_try_fmt_sliced_vbi_cap(struct file *file, void *fh,
static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
int ret;
@@ -360,7 +360,7 @@ static int cx18_s_fmt_vbi_cap(struct file *file, void *fh,
static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
int ret;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
@@ -392,7 +392,7 @@ static int cx18_s_fmt_sliced_vbi_cap(struct file *file, void *fh,
static int cx18_g_register(struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (reg->reg & 0x3)
return -EINVAL;
@@ -406,7 +406,7 @@ static int cx18_g_register(struct file *file, void *fh,
static int cx18_s_register(struct file *file, void *fh,
const struct v4l2_dbg_register *reg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (reg->reg & 0x3)
return -EINVAL;
@@ -420,7 +420,7 @@ static int cx18_s_register(struct file *file, void *fh,
static int cx18_querycap(struct file *file, void *fh,
struct v4l2_capability *vcap)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
strscpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
@@ -431,14 +431,14 @@ static int cx18_querycap(struct file *file, void *fh,
static int cx18_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
return cx18_get_audio_input(cx, vin->index, vin);
}
static int cx18_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
vin->index = cx->audio_input;
return cx18_get_audio_input(cx, vin->index, vin);
@@ -446,7 +446,7 @@ static int cx18_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
static int cx18_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vout->index >= cx->nof_audio_inputs)
return -EINVAL;
@@ -457,7 +457,7 @@ static int cx18_s_audio(struct file *file, void *fh, const struct v4l2_audio *vo
static int cx18_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
/* set it to defaults from our table */
return cx18_get_input(cx, vin->index, vin);
@@ -466,7 +466,7 @@ static int cx18_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
static int cx18_g_pixelaspect(struct file *file, void *fh,
int type, struct v4l2_fract *f)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -479,7 +479,7 @@ static int cx18_g_pixelaspect(struct file *file, void *fh,
static int cx18_g_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -499,7 +499,7 @@ static int cx18_g_selection(struct file *file, void *fh,
static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *fmt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
if (id->type == CX18_ENC_STREAM_TYPE_YUV) {
if (fmt->index >= ARRAY_SIZE(cx18_formats_yuv))
@@ -515,16 +515,14 @@ static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
static int cx18_g_input(struct file *file, void *fh, unsigned int *i)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
*i = cx->active_input;
return 0;
}
-int cx18_s_input(struct file *file, void *fh, unsigned int inp)
+int cx18_do_s_input(struct cx18 *cx, unsigned int inp)
{
- struct cx18_open_id *id = fh2id(fh);
- struct cx18 *cx = id->cx;
v4l2_std_id std = V4L2_STD_ALL;
const struct cx18_card_video_input *card_input =
cx->card->video_inputs + inp;
@@ -558,10 +556,15 @@ int cx18_s_input(struct file *file, void *fh, unsigned int inp)
return 0;
}
+static int cx18_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return cx18_do_s_input(file2id(file)->cx, inp);
+}
+
static int cx18_g_frequency(struct file *file, void *fh,
struct v4l2_frequency *vf)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vf->tuner != 0)
return -EINVAL;
@@ -570,11 +573,8 @@ static int cx18_g_frequency(struct file *file, void *fh,
return 0;
}
-int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf)
{
- struct cx18_open_id *id = fh2id(fh);
- struct cx18 *cx = id->cx;
-
if (vf->tuner != 0)
return -EINVAL;
@@ -585,19 +585,22 @@ int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
return 0;
}
+static int cx18_s_frequency(struct file *file, void *fh,
+ const struct v4l2_frequency *vf)
+{
+ return cx18_do_s_frequency(file2id(file)->cx, vf);
+}
+
static int cx18_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
*std = cx->std;
return 0;
}
-int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
+int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std)
{
- struct cx18_open_id *id = fh2id(fh);
- struct cx18 *cx = id->cx;
-
if ((std & V4L2_STD_ALL) == 0)
return -EINVAL;
@@ -642,9 +645,14 @@ int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
return 0;
}
+static int cx18_s_std(struct file *file, void *fh, v4l2_std_id std)
+{
+ return cx18_do_s_std(file2id(file)->cx, std);
+}
+
static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
if (vt->index != 0)
@@ -656,7 +664,7 @@ static int cx18_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt
static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
if (vt->index != 0)
return -EINVAL;
@@ -673,7 +681,7 @@ static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
static int cx18_g_sliced_vbi_cap(struct file *file, void *fh,
struct v4l2_sliced_vbi_cap *cap)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
int set = cx->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
int f, l;
@@ -794,7 +802,7 @@ static int cx18_process_idx_data(struct cx18_stream *s, struct cx18_mdl *mdl,
static int cx18_g_enc_index(struct file *file, void *fh,
struct v4l2_enc_idx *idx)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_IDX];
s32 tmp;
struct cx18_mdl *mdl;
@@ -841,7 +849,7 @@ static int cx18_g_enc_index(struct file *file, void *fh,
static int cx18_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *enc)
{
- struct cx18_open_id *id = fh2id(fh);
+ struct cx18_open_id *id = file2id(file);
struct cx18 *cx = id->cx;
u32 h;
@@ -900,7 +908,7 @@ static int cx18_encoder_cmd(struct file *file, void *fh,
static int cx18_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *enc)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
switch (enc->cmd) {
case V4L2_ENC_CMD_START:
@@ -932,7 +940,7 @@ static int cx18_try_encoder_cmd(struct file *file, void *fh,
static int cx18_log_status(struct file *file, void *fh)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
struct v4l2_input vidin;
struct v4l2_audio audin;
int i;
@@ -976,7 +984,7 @@ static int cx18_log_status(struct file *file, void *fh)
static long cx18_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
- struct cx18 *cx = fh2id(fh)->cx;
+ struct cx18 *cx = file2id(file)->cx;
switch (cmd) {
case VIDIOC_INT_RESET: {
diff --git a/drivers/media/pci/cx18/cx18-ioctl.h b/drivers/media/pci/cx18/cx18-ioctl.h
index 221e2400fb3e..42a8acd69735 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.h
+++ b/drivers/media/pci/cx18/cx18-ioctl.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-ioctl.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -12,6 +12,8 @@ u16 cx18_service2vbi(int type);
void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt);
void cx18_set_funcs(struct video_device *vdev);
-int cx18_s_std(struct file *file, void *fh, v4l2_std_id std);
-int cx18_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
-int cx18_s_input(struct file *file, void *fh, unsigned int inp);
+
+struct cx18;
+int cx18_do_s_std(struct cx18 *cx, v4l2_std_id std);
+int cx18_do_s_frequency(struct cx18 *cx, const struct v4l2_frequency *vf);
+int cx18_do_s_input(struct cx18 *cx, unsigned int inp);
diff --git a/drivers/media/pci/cx18/cx18-irq.c b/drivers/media/pci/cx18/cx18-irq.c
index db63077821b1..0ef01e98255d 100644
--- a/drivers/media/pci/cx18/cx18-irq.c
+++ b/drivers/media/pci/cx18/cx18-irq.c
@@ -2,7 +2,7 @@
/*
* cx18 interrupt handling
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-irq.h b/drivers/media/pci/cx18/cx18-irq.h
index fdb585a72827..c1456d69dffa 100644
--- a/drivers/media/pci/cx18/cx18-irq.h
+++ b/drivers/media/pci/cx18/cx18-irq.h
@@ -2,7 +2,7 @@
/*
* cx18 interrupt handling
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-mailbox.c b/drivers/media/pci/cx18/cx18-mailbox.c
index a6457c23d18c..8c70b638a40c 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.c
+++ b/drivers/media/pci/cx18/cx18-mailbox.c
@@ -2,7 +2,7 @@
/*
* cx18 mailbox functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-mailbox.h b/drivers/media/pci/cx18/cx18-mailbox.h
index 971382ac0eca..1752b8e1ca13 100644
--- a/drivers/media/pci/cx18/cx18-mailbox.h
+++ b/drivers/media/pci/cx18/cx18-mailbox.h
@@ -2,7 +2,7 @@
/*
* cx18 mailbox functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 013694bfcb1c..eeb5513b1d52 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-queue.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
@@ -379,15 +379,22 @@ int cx18_stream_alloc(struct cx18_stream *s)
break;
}
+ buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
+ buf->buf, s->buf_size,
+ s->dma);
+ if (dma_mapping_error(&s->cx->pci_dev->dev, buf->dma_handle)) {
+ kfree(buf->buf);
+ kfree(mdl);
+ kfree(buf);
+ break;
+ }
+
INIT_LIST_HEAD(&mdl->list);
INIT_LIST_HEAD(&mdl->buf_list);
mdl->id = s->mdl_base_idx; /* a somewhat safe value */
cx18_enqueue(s, mdl, &s->q_idle);
INIT_LIST_HEAD(&buf->list);
- buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
- buf->buf, s->buf_size,
- s->dma);
cx18_buf_sync_for_cpu(s, buf);
list_add_tail(&buf->list, &s->buf_pool);
}
diff --git a/drivers/media/pci/cx18/cx18-queue.h b/drivers/media/pci/cx18/cx18-queue.h
index 26f2097c0496..972f234ffead 100644
--- a/drivers/media/pci/cx18/cx18-queue.h
+++ b/drivers/media/pci/cx18/cx18-queue.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-queue.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-scb.c b/drivers/media/pci/cx18/cx18-scb.c
index 4a0edc1e42e7..670dd9b0a332 100644
--- a/drivers/media/pci/cx18/cx18-scb.c
+++ b/drivers/media/pci/cx18/cx18-scb.c
@@ -2,7 +2,7 @@
/*
* cx18 System Control Block initialization
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-scb.h b/drivers/media/pci/cx18/cx18-scb.h
index 841edc0712ab..900f7291979f 100644
--- a/drivers/media/pci/cx18/cx18-scb.h
+++ b/drivers/media/pci/cx18/cx18-scb.h
@@ -2,7 +2,7 @@
/*
* cx18 System Control Block initialization
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index 42d6f7b90ede..48203ba16387 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-streams.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-streams.h b/drivers/media/pci/cx18/cx18-streams.h
index bba4349c5c2e..e01bed6b4827 100644
--- a/drivers/media/pci/cx18/cx18-streams.h
+++ b/drivers/media/pci/cx18/cx18-streams.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-streams.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*/
diff --git a/drivers/media/pci/cx18/cx18-vbi.c b/drivers/media/pci/cx18/cx18-vbi.c
index c7cce38dd754..8dc4ce325935 100644
--- a/drivers/media/pci/cx18/cx18-vbi.c
+++ b/drivers/media/pci/cx18/cx18-vbi.c
@@ -4,7 +4,7 @@
*
* Derived from ivtv-vbi.c
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-vbi.h b/drivers/media/pci/cx18/cx18-vbi.h
index a5f81c6159f0..41f5026696c4 100644
--- a/drivers/media/pci/cx18/cx18-vbi.h
+++ b/drivers/media/pci/cx18/cx18-vbi.h
@@ -4,7 +4,7 @@
*
* Derived from ivtv-vbi.h
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
void cx18_process_vbi_data(struct cx18 *cx, struct cx18_mdl *mdl,
diff --git a/drivers/media/pci/cx18/cx18-version.h b/drivers/media/pci/cx18/cx18-version.h
index e7396182fc5a..e8636ac5d5a5 100644
--- a/drivers/media/pci/cx18/cx18-version.h
+++ b/drivers/media/pci/cx18/cx18-version.h
@@ -2,7 +2,7 @@
/*
* cx18 driver version information
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef CX18_VERSION_H
diff --git a/drivers/media/pci/cx18/cx18-video.c b/drivers/media/pci/cx18/cx18-video.c
index 2fde8c2d3fdc..86cd44053d34 100644
--- a/drivers/media/pci/cx18/cx18-video.c
+++ b/drivers/media/pci/cx18/cx18-video.c
@@ -2,7 +2,7 @@
/*
* cx18 video interface functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#include "cx18-driver.h"
diff --git a/drivers/media/pci/cx18/cx18-video.h b/drivers/media/pci/cx18/cx18-video.h
index f613975ca5f0..ef212f1c5b17 100644
--- a/drivers/media/pci/cx18/cx18-video.h
+++ b/drivers/media/pci/cx18/cx18-video.h
@@ -2,7 +2,7 @@
/*
* cx18 video interface functions
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
void cx18_video_set_io(struct cx18 *cx);
diff --git a/drivers/media/pci/cx18/cx23418.h b/drivers/media/pci/cx18/cx23418.h
index 8859c0e8557f..22486f39bcda 100644
--- a/drivers/media/pci/cx18/cx23418.h
+++ b/drivers/media/pci/cx18/cx23418.h
@@ -2,7 +2,7 @@
/*
* cx18 header containing common defines.
*
- * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef CX23418_H
diff --git a/drivers/media/pci/intel/ipu-bridge.c b/drivers/media/pci/intel/ipu-bridge.c
index 4e579352ab2c..c9827e9fe6ff 100644
--- a/drivers/media/pci/intel/ipu-bridge.c
+++ b/drivers/media/pci/intel/ipu-bridge.c
@@ -79,6 +79,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
IPU_SENSOR_CONFIG("OVTI02C1", 1, 400000000),
/* Omnivision OV02E10 */
IPU_SENSOR_CONFIG("OVTI02E1", 1, 360000000),
+ /* Omnivision ov05c10 */
+ IPU_SENSOR_CONFIG("OVTI05C1", 1, 480000000),
/* Omnivision OV08A10 */
IPU_SENSOR_CONFIG("OVTI08A1", 1, 500000000),
/* Omnivision OV08x40 */
@@ -90,6 +92,8 @@ static const struct ipu_sensor_config ipu_supported_sensors[] = {
IPU_SENSOR_CONFIG("OVTI2680", 1, 331200000),
/* Omnivision OV8856 */
IPU_SENSOR_CONFIG("OVTI8856", 3, 180000000, 360000000, 720000000),
+ /* Sony IMX471 */
+ IPU_SENSOR_CONFIG("SONY471A", 1, 200000000),
/* Toshiba T4KA3 */
IPU_SENSOR_CONFIG("XMCC0003", 1, 321468000),
};
@@ -563,8 +567,8 @@ static void ipu_bridge_instantiate_vcm_work(struct work_struct *work)
vcm_client = i2c_acpi_new_device_by_fwnode(acpi_fwnode_handle(adev),
1, &data->board_info);
if (IS_ERR(vcm_client)) {
- dev_err(data->sensor, "Error instantiating VCM client: %ld\n",
- PTR_ERR(vcm_client));
+ dev_err(data->sensor, "Error instantiating VCM client: %pe\n",
+ vcm_client);
goto out_pm_put;
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index a87f105beb5e..986b9afd7cb5 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -314,8 +314,8 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
src_pad = media_entity_remote_source_pad_unique(&q->subdev.entity);
if (IS_ERR(src_pad)) {
- dev_err(dev, "can't get source pad of %s (%ld)\n",
- q->subdev.name, PTR_ERR(src_pad));
+ dev_err(dev, "can't get source pad of %s (%pe)\n",
+ q->subdev.name, src_pad);
return PTR_ERR(src_pad);
}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
index 6030bd23b4b9..43a2a16a3c2a 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-csi2.c
@@ -42,6 +42,10 @@ static const u32 csi2_supported_codes[] = {
MEDIA_BUS_FMT_SGBRG8_1X8,
MEDIA_BUS_FMT_SGRBG8_1X8,
MEDIA_BUS_FMT_SRGGB8_1X8,
+ MEDIA_BUS_FMT_Y8_1X8,
+ MEDIA_BUS_FMT_Y10_1X10,
+ MEDIA_BUS_FMT_Y12_1X12,
+ MEDIA_BUS_FMT_Y16_1X16,
MEDIA_BUS_FMT_META_8,
MEDIA_BUS_FMT_META_10,
MEDIA_BUS_FMT_META_12,
@@ -87,8 +91,8 @@ s64 ipu6_isys_csi2_get_link_freq(struct ipu6_isys_csi2 *csi2)
src_pad = media_entity_remote_source_pad_unique(&csi2->asd.sd.entity);
if (IS_ERR(src_pad)) {
dev_err(&csi2->isys->adev->auxdev.dev,
- "can't get source pad of %s (%ld)\n",
- csi2->asd.sd.name, PTR_ERR(src_pad));
+ "can't get source pad of %s (%pe)\n",
+ csi2->asd.sd.name, src_pad);
return PTR_ERR(src_pad);
}
@@ -109,7 +113,7 @@ static int csi2_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
case V4L2_EVENT_FRAME_SYNC:
return v4l2_event_subscribe(fh, sub, 10, NULL);
case V4L2_EVENT_CTRL:
- return v4l2_ctrl_subscribe_event(fh, sub);
+ return v4l2_ctrl_subdev_subscribe_event(sd, fh, sub);
default:
return -EINVAL;
}
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
index 0a06de5c739c..869e7d4ba572 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-subdev.c
@@ -25,24 +25,28 @@ unsigned int ipu6_isys_mbus_code_to_bpp(u32 code)
case MEDIA_BUS_FMT_RGB565_1X16:
case MEDIA_BUS_FMT_UYVY8_1X16:
case MEDIA_BUS_FMT_YUYV8_1X16:
+ case MEDIA_BUS_FMT_Y16_1X16:
case MEDIA_BUS_FMT_META_16:
return 16;
case MEDIA_BUS_FMT_SBGGR12_1X12:
case MEDIA_BUS_FMT_SGBRG12_1X12:
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_Y12_1X12:
case MEDIA_BUS_FMT_META_12:
return 12;
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_Y10_1X10:
case MEDIA_BUS_FMT_META_10:
return 10;
case MEDIA_BUS_FMT_SBGGR8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
case MEDIA_BUS_FMT_META_8:
return 8;
default:
@@ -65,22 +69,32 @@ unsigned int ipu6_isys_mbus_code_to_mipi(u32 code)
case MEDIA_BUS_FMT_SGBRG16_1X16:
case MEDIA_BUS_FMT_SGRBG16_1X16:
case MEDIA_BUS_FMT_SRGGB16_1X16:
+ case MEDIA_BUS_FMT_Y16_1X16:
return MIPI_CSI2_DT_RAW16;
case MEDIA_BUS_FMT_SBGGR12_1X12:
case MEDIA_BUS_FMT_SGBRG12_1X12:
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_Y12_1X12:
return MIPI_CSI2_DT_RAW12;
case MEDIA_BUS_FMT_SBGGR10_1X10:
case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_Y10_1X10:
return MIPI_CSI2_DT_RAW10;
case MEDIA_BUS_FMT_SBGGR8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
return MIPI_CSI2_DT_RAW8;
+ case MEDIA_BUS_FMT_META_8:
+ case MEDIA_BUS_FMT_META_10:
+ case MEDIA_BUS_FMT_META_12:
+ case MEDIA_BUS_FMT_META_16:
+ case MEDIA_BUS_FMT_META_24:
+ return MIPI_CSI2_DT_EMBEDDED_8B;
default:
/* return unavailable MIPI data type - 0x3f */
WARN_ON(1);
@@ -90,15 +104,23 @@ unsigned int ipu6_isys_mbus_code_to_mipi(u32 code)
bool ipu6_isys_is_bayer_format(u32 code)
{
- switch (ipu6_isys_mbus_code_to_mipi(code)) {
- case MIPI_CSI2_DT_RAW8:
- case MIPI_CSI2_DT_RAW10:
- case MIPI_CSI2_DT_RAW12:
- case MIPI_CSI2_DT_RAW14:
- case MIPI_CSI2_DT_RAW16:
- case MIPI_CSI2_DT_RAW20:
- case MIPI_CSI2_DT_RAW24:
- case MIPI_CSI2_DT_RAW28:
+ switch (code) {
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ case MEDIA_BUS_FMT_SGBRG8_1X8:
+ case MEDIA_BUS_FMT_SGRBG8_1X8:
+ case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_SRGGB16_1X16:
+ case MEDIA_BUS_FMT_SGRBG16_1X16:
+ case MEDIA_BUS_FMT_SGBRG16_1X16:
+ case MEDIA_BUS_FMT_SBGGR16_1X16:
return true;
default:
return false;
diff --git a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
index 24a2ef93474c..dec8f5ffcfa5 100644
--- a/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
+++ b/drivers/media/pci/intel/ipu6/ipu6-isys-video.c
@@ -77,6 +77,20 @@ const struct ipu6_isys_pixelformat ipu6_isys_pfmts[] = {
IPU6_FW_ISYS_FRAME_FORMAT_RAW10 },
{ V4L2_PIX_FMT_SRGGB10P, 10, 10, MEDIA_BUS_FMT_SRGGB10_1X10,
IPU6_FW_ISYS_FRAME_FORMAT_RAW10 },
+
+ { V4L2_PIX_FMT_GREY, 8, 8, MEDIA_BUS_FMT_Y8_1X8,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW8 },
+ { V4L2_PIX_FMT_Y10, 16, 10, MEDIA_BUS_FMT_Y10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_Y12, 16, 12, MEDIA_BUS_FMT_Y12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_Y16, 16, 16, MEDIA_BUS_FMT_Y16_1X16,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW16 },
+ { V4L2_PIX_FMT_Y10P, 10, 10, MEDIA_BUS_FMT_Y10_1X10,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW10 },
+ { V4L2_PIX_FMT_Y12P, 12, 12, MEDIA_BUS_FMT_Y12_1X12,
+ IPU6_FW_ISYS_FRAME_FORMAT_RAW12 },
+
{ V4L2_PIX_FMT_UYVY, 16, 16, MEDIA_BUS_FMT_UYVY8_1X16,
IPU6_FW_ISYS_FRAME_FORMAT_UYVY},
{ V4L2_PIX_FMT_YUYV, 16, 16, MEDIA_BUS_FMT_YUYV8_1X16,
@@ -1306,7 +1320,6 @@ int ipu6_isys_video_init(struct ipu6_isys_video *av)
__ipu6_isys_vidioc_try_fmt_meta_cap(av, &format_meta);
av->meta_fmt = format_meta.fmt.meta;
- set_bit(V4L2_FL_USES_V4L2_FH, &av->vdev.flags);
video_set_drvdata(&av->vdev, av);
ret = video_register_device(&av->vdev, VFL_TYPE_VIDEO, -1);
diff --git a/drivers/media/pci/intel/ivsc/mei_ace.c b/drivers/media/pci/intel/ivsc/mei_ace.c
index 98310b8511b1..b306a320b70f 100644
--- a/drivers/media/pci/intel/ivsc/mei_ace.c
+++ b/drivers/media/pci/intel/ivsc/mei_ace.c
@@ -414,10 +414,11 @@ static int mei_ace_setup_dev_link(struct mei_ace *ace)
/* setup link between mei_ace and mei_csi */
ace->csi_link = device_link_add(csi_dev, dev, DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE | DL_FLAG_STATELESS);
+ put_device(csi_dev);
if (!ace->csi_link) {
ret = -EINVAL;
dev_err(dev, "failed to link to %s\n", dev_name(csi_dev));
- goto err_put;
+ goto err;
}
ace->csi_dev = csi_dev;
@@ -522,7 +523,6 @@ static void mei_ace_remove(struct mei_cl_device *cldev)
cancel_work_sync(&ace->work);
device_link_del(ace->csi_link);
- put_device(ace->csi_dev);
pm_runtime_disable(&cldev->dev);
pm_runtime_set_suspended(&cldev->dev);
diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
index 8f346d7da9c8..269a799ec046 100644
--- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
+++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
@@ -148,14 +148,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
- v4l2_fh_init(&item.fh, &s->vdev);
item.itv = itv;
item.type = s->type;
/* See if the stream is available */
if (ivtv_claim_stream(&item, item.type)) {
/* No, it's already in use */
- v4l2_fh_exit(&item.fh);
snd_ivtv_unlock(itvsc);
return -EBUSY;
}
diff --git a/drivers/media/pci/ivtv/ivtv-cards.c b/drivers/media/pci/ivtv/ivtv-cards.c
index c8f4ed7ff2c6..f2ccf8e98664 100644
--- a/drivers/media/pci/ivtv/ivtv-cards.c
+++ b/drivers/media/pci/ivtv/ivtv-cards.c
@@ -2,7 +2,7 @@
/*
Functions to query card hardware
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-cards.h b/drivers/media/pci/ivtv/ivtv-cards.h
index c252733df340..af64e55d3b80 100644
--- a/drivers/media/pci/ivtv/ivtv-cards.h
+++ b/drivers/media/pci/ivtv/ivtv-cards.h
@@ -2,7 +2,7 @@
/*
Functions to query card hardware
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-controls.c b/drivers/media/pci/ivtv/ivtv-controls.c
index a0b9a5a5c7f1..f087a12c4ebd 100644
--- a/drivers/media/pci/ivtv/ivtv-controls.c
+++ b/drivers/media/pci/ivtv/ivtv-controls.c
@@ -2,7 +2,7 @@
/*
ioctl control functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-controls.h b/drivers/media/pci/ivtv/ivtv-controls.h
index 444c86a47e5a..d152691ea255 100644
--- a/drivers/media/pci/ivtv/ivtv-controls.h
+++ b/drivers/media/pci/ivtv/ivtv-controls.h
@@ -2,7 +2,7 @@
/*
ioctl control functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index ecc20cd89926..459eb6cc370c 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
ivtv driver initialization and card probing
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* Main Driver file for the ivtv project:
@@ -1260,15 +1247,12 @@ err:
int ivtv_init_on_first_open(struct ivtv *itv)
{
- struct v4l2_frequency vf;
/* Needed to call ioctls later */
- struct ivtv_open_id fh;
+ struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
+ struct v4l2_frequency vf;
int fw_retry_count = 3;
int video_input;
- fh.itv = itv;
- fh.type = IVTV_ENC_STREAM_TYPE_MPG;
-
if (test_bit(IVTV_F_I_FAILED, &itv->i_flags))
return -ENXIO;
@@ -1310,13 +1294,13 @@ int ivtv_init_on_first_open(struct ivtv *itv)
video_input = itv->active_input;
itv->active_input++; /* Force update of input */
- ivtv_s_input(NULL, &fh, video_input);
+ ivtv_do_s_input(itv, video_input);
/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
in one place. */
itv->std++; /* Force full standard initialization */
itv->std_out = itv->std;
- ivtv_s_frequency(NULL, &fh, &vf);
+ ivtv_do_s_frequency(s, &vf);
if (itv->card->v4l2_capabilities & V4L2_CAP_VIDEO_OUTPUT) {
/* Turn on the TV-out: ivtv_init_mpeg_decoder() initializes
diff --git a/drivers/media/pci/ivtv/ivtv-driver.h b/drivers/media/pci/ivtv/ivtv-driver.h
index a6ffa99e16bc..f1f18911332e 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.h
+++ b/drivers/media/pci/ivtv/ivtv-driver.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
ivtv driver internal defines and structures
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef IVTV_DRIVER_H
@@ -322,6 +309,7 @@ struct ivtv_queue {
};
struct ivtv; /* forward reference */
+struct ivtv_open_id;
struct ivtv_stream {
/* These first four fields are always set, even if the stream
@@ -331,7 +319,7 @@ struct ivtv_stream {
const char *name; /* name of the stream */
int type; /* stream type */
- struct v4l2_fh *fh; /* pointer to the streaming filehandle */
+ struct ivtv_open_id *id; /* pointer to the streaming ivtv_open_id */
spinlock_t qlock; /* locks access to the queues */
unsigned long s_flags; /* status flags, see above */
int dma; /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -383,9 +371,9 @@ struct ivtv_open_id {
struct ivtv *itv;
};
-static inline struct ivtv_open_id *fh2id(struct v4l2_fh *fh)
+static inline struct ivtv_open_id *file2id(struct file *filp)
{
- return container_of(fh, struct ivtv_open_id, fh);
+ return container_of(file_to_v4l2_fh(filp), struct ivtv_open_id, fh);
}
struct yuv_frame_info
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
index cfa28d035586..ef9ec062c03a 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.c
+++ b/drivers/media/pci/ivtv/ivtv-fileops.c
@@ -3,7 +3,7 @@
file operation functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -39,16 +39,16 @@ int ivtv_claim_stream(struct ivtv_open_id *id, int type)
if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
/* someone already claimed this stream */
- if (s->fh == &id->fh) {
+ if (s->id == id) {
/* yes, this file descriptor did. So that's OK. */
return 0;
}
- if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+ if (s->id == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
type == IVTV_ENC_STREAM_TYPE_VBI)) {
/* VBI is handled already internally, now also assign
the file descriptor to this stream for external
reading of the stream. */
- s->fh = &id->fh;
+ s->id = id;
IVTV_DEBUG_INFO("Start Read VBI\n");
return 0;
}
@@ -56,7 +56,7 @@ int ivtv_claim_stream(struct ivtv_open_id *id, int type)
IVTV_DEBUG_INFO("Stream %d is busy\n", type);
return -EBUSY;
}
- s->fh = &id->fh;
+ s->id = id;
if (type == IVTV_DEC_STREAM_TYPE_VBI) {
/* Enable reinsertion interrupt */
ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -94,7 +94,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
struct ivtv *itv = s->itv;
struct ivtv_stream *s_vbi;
- s->fh = NULL;
+ s->id = NULL;
if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* this stream is still in use internally */
@@ -126,7 +126,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
/* was already cleared */
return;
}
- if (s_vbi->fh) {
+ if (s_vbi->id) {
/* VBI stream still claimed by a file descriptor */
return;
}
@@ -359,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
size_t tot_written = 0;
int single_frame = 0;
- if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
+ if (atomic_read(&itv->capturing) == 0 && s->id == NULL) {
/* shouldn't happen */
IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
return -EIO;
@@ -502,7 +502,7 @@ int ivtv_start_capture(struct ivtv_open_id *id)
ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_t * pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
ssize_t rc;
@@ -564,7 +564,7 @@ static int ivtv_schedule_dma(struct ivtv_stream *s)
static ssize_t ivtv_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -719,7 +719,7 @@ retry:
ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t count, loff_t *pos)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
ssize_t res;
@@ -732,7 +732,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
__poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
__poll_t res = 0;
@@ -767,7 +767,7 @@ __poll_t ivtv_v4l2_dec_poll(struct file *filp, poll_table *wait)
__poll_t ivtv_v4l2_enc_poll(struct file *filp, poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
int eof = test_bit(IVTV_F_S_STREAMOFF, &s->s_flags);
@@ -831,7 +831,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
/* Also used internally, don't stop capturing */
- s->fh = NULL;
+ s->id = NULL;
}
else {
ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -877,8 +877,8 @@ static void ivtv_stop_decoding(struct ivtv_open_id *id, int flags, u64 pts)
int ivtv_v4l2_close(struct file *filp)
{
- struct v4l2_fh *fh = filp->private_data;
- struct ivtv_open_id *id = fh2id(fh);
+ struct v4l2_fh *fh = file_to_v4l2_fh(filp);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
@@ -911,11 +911,11 @@ int ivtv_v4l2_close(struct file *filp)
ivtv_unmute(itv);
}
- v4l2_fh_del(fh);
+ v4l2_fh_del(fh, filp);
v4l2_fh_exit(fh);
/* Easy case first: this stream was never claimed by us */
- if (s->fh != &id->fh)
+ if (s->id != id)
goto close_done;
/* 'Unclaim' this stream */
@@ -998,9 +998,7 @@ static int ivtv_open(struct file *filp)
v4l2_fh_init(&item->fh, &s->vdev);
item->itv = itv;
item->type = s->type;
-
- filp->private_data = &item->fh;
- v4l2_fh_add(&item->fh);
+ v4l2_fh_add(&item->fh, filp);
if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
v4l2_fh_is_singular_file(filp)) {
@@ -1008,7 +1006,7 @@ static int ivtv_open(struct file *filp)
if (atomic_read(&itv->capturing) > 0) {
/* switching to radio while capture is
in progress is not polite */
- v4l2_fh_del(&item->fh);
+ v4l2_fh_del(&item->fh, filp);
v4l2_fh_exit(&item->fh);
kfree(item);
return -EBUSY;
diff --git a/drivers/media/pci/ivtv/ivtv-fileops.h b/drivers/media/pci/ivtv/ivtv-fileops.h
index c2c01bba5d03..7bbe42b0030c 100644
--- a/drivers/media/pci/ivtv/ivtv-fileops.h
+++ b/drivers/media/pci/ivtv/ivtv-fileops.h
@@ -2,7 +2,7 @@
/*
file operation functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.c b/drivers/media/pci/ivtv/ivtv-firmware.c
index 56b25255faf5..1bc873ebef73 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.c
+++ b/drivers/media/pci/ivtv/ivtv-firmware.c
@@ -3,7 +3,7 @@
ivtv firmware functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-firmware.h b/drivers/media/pci/ivtv/ivtv-firmware.h
index 393e94a8d0e4..ce94b6c385de 100644
--- a/drivers/media/pci/ivtv/ivtv-firmware.h
+++ b/drivers/media/pci/ivtv/ivtv-firmware.h
@@ -3,7 +3,7 @@
ivtv firmware functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
index 6434c0d03a6d..d3477e1529c9 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
@@ -3,7 +3,7 @@
gpio functions.
Merging GPIO support into driver:
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.h b/drivers/media/pci/ivtv/ivtv-gpio.h
index 4ad817173f04..686c9b5e9c19 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.h
+++ b/drivers/media/pci/ivtv/ivtv-gpio.h
@@ -2,7 +2,7 @@
/*
gpio functions.
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.c b/drivers/media/pci/ivtv/ivtv-i2c.c
index c052c57c6dce..28cb22d6a892 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.c
+++ b/drivers/media/pci/ivtv/ivtv-i2c.c
@@ -2,7 +2,7 @@
/*
I2C functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-i2c.h b/drivers/media/pci/ivtv/ivtv-i2c.h
index 2d9cdaa682c5..504bbc1dee25 100644
--- a/drivers/media/pci/ivtv/ivtv-i2c.h
+++ b/drivers/media/pci/ivtv/ivtv-i2c.h
@@ -2,7 +2,7 @@
/*
I2C functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 7947dcd615e8..8d5ea3aec06f 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -2,7 +2,7 @@
/*
ioctl system call
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -308,7 +308,7 @@ static int ivtv_video_command(struct ivtv *itv, struct ivtv_open_id *id,
static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
vbifmt->reserved[0] = 0;
@@ -330,7 +330,7 @@ static int ivtv_g_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_fo
static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -353,7 +353,7 @@ static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_vbi_format *vbifmt = &fmt->fmt.vbi;
vbifmt->sampling_rate = 27000000;
@@ -372,7 +372,7 @@ static int ivtv_g_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
vbifmt->reserved[0] = 0;
@@ -394,7 +394,7 @@ static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
@@ -434,8 +434,8 @@ static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_g_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
struct v4l2_window *winfmt = &fmt->fmt.win;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
@@ -461,7 +461,7 @@ static int ivtv_try_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_
static int ivtv_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
int w = fmt->fmt.pix.width;
int h = fmt->fmt.pix.height;
@@ -490,7 +490,7 @@ static int ivtv_try_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format
static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (id->type == IVTV_DEC_STREAM_TYPE_VBI)
@@ -510,7 +510,7 @@ static int ivtv_try_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_
static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
s32 w = fmt->fmt.pix.width;
s32 h = fmt->fmt.pix.height;
int field = fmt->fmt.pix.field;
@@ -544,8 +544,8 @@ static int ivtv_try_fmt_vid_out(struct file *file, void *fh, struct v4l2_format
static int ivtv_try_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
u32 chromakey = fmt->fmt.win.chromakey;
u8 global_alpha = fmt->fmt.win.global_alpha;
@@ -566,7 +566,7 @@ static int ivtv_s_fmt_sliced_vbi_out(struct file *file, void *fh, struct v4l2_fo
static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct v4l2_subdev_format format = {
.which = V4L2_SUBDEV_FORMAT_ACTIVE,
@@ -597,7 +597,7 @@ static int ivtv_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!ivtv_raw_vbi(itv) && atomic_read(&itv->capturing) > 0)
return -EBUSY;
@@ -610,7 +610,7 @@ static int ivtv_s_fmt_vbi_cap(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
int ret = ivtv_try_fmt_sliced_vbi_cap(file, fh, fmt);
@@ -628,7 +628,7 @@ static int ivtv_s_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
int ret = ivtv_try_fmt_vid_out(file, fh, fmt);
@@ -673,7 +673,7 @@ static int ivtv_s_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
static int ivtv_s_fmt_vid_out_overlay(struct file *file, void *fh, struct v4l2_format *fmt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
int ret = ivtv_try_fmt_vid_out_overlay(file, fh, fmt);
if (ret == 0) {
@@ -710,7 +710,7 @@ static int ivtv_itvc(struct ivtv *itv, bool get, u64 reg, u64 *val)
static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
reg->size = 4;
return ivtv_itvc(itv, true, reg->reg, &reg->val);
@@ -718,7 +718,7 @@ static int ivtv_g_register(struct file *file, void *fh, struct v4l2_dbg_register
static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
u64 val = reg->val;
return ivtv_itvc(itv, false, reg->reg, &val);
@@ -727,7 +727,7 @@ static int ivtv_s_register(struct file *file, void *fh, const struct v4l2_dbg_re
static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vcap)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
strscpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
@@ -738,14 +738,14 @@ static int ivtv_querycap(struct file *file, void *fh, struct v4l2_capability *vc
static int ivtv_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
return ivtv_get_audio_input(itv, vin->index, vin);
}
static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
vin->index = itv->audio_input;
return ivtv_get_audio_input(itv, vin->index, vin);
@@ -753,7 +753,7 @@ static int ivtv_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (vout->index >= itv->nof_audio_inputs)
return -EINVAL;
@@ -766,7 +766,7 @@ static int ivtv_s_audio(struct file *file, void *fh, const struct v4l2_audio *vo
static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
/* set it to defaults from our table */
return ivtv_get_audio_output(itv, vin->index, vin);
@@ -774,7 +774,7 @@ static int ivtv_enumaudout(struct file *file, void *fh, struct v4l2_audioout *vi
static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
vin->index = 0;
return ivtv_get_audio_output(itv, vin->index, vin);
@@ -782,7 +782,7 @@ static int ivtv_g_audout(struct file *file, void *fh, struct v4l2_audioout *vin)
static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (itv->card->video_outputs == NULL || vout->index != 0)
return -EINVAL;
@@ -791,7 +791,7 @@ static int ivtv_s_audout(struct file *file, void *fh, const struct v4l2_audioout
static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
/* set it to defaults from our table */
return ivtv_get_input(itv, vin->index, vin);
@@ -799,7 +799,7 @@ static int ivtv_enum_input(struct file *file, void *fh, struct v4l2_input *vin)
static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vout)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
return ivtv_get_output(itv, vout->index, vout);
}
@@ -807,7 +807,7 @@ static int ivtv_enum_output(struct file *file, void *fh, struct v4l2_output *vou
static int ivtv_g_pixelaspect(struct file *file, void *fh,
int type, struct v4l2_fract *f)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -825,7 +825,7 @@ static int ivtv_g_pixelaspect(struct file *file, void *fh,
static int ivtv_s_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
struct v4l2_rect r = { 0, 0, 720, 0 };
@@ -868,7 +868,7 @@ static int ivtv_s_selection(struct file *file, void *fh,
static int ivtv_g_selection(struct file *file, void *fh,
struct v4l2_selection *sel)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
struct yuv_playback_info *yi = &itv->yuv_info;
struct v4l2_rect r = { 0, 0, 720, 0 };
@@ -924,8 +924,8 @@ static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
.description = "MPEG",
.pixelformat = V4L2_PIX_FMT_MPEG,
};
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (fmt->index)
return -EINVAL;
@@ -951,8 +951,8 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
.description = "MPEG",
.pixelformat = V4L2_PIX_FMT_MPEG,
};
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (fmt->index)
return -EINVAL;
@@ -967,16 +967,15 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
static int ivtv_g_input(struct file *file, void *fh, unsigned int *i)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
*i = itv->active_input;
return 0;
}
-int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
+int ivtv_do_s_input(struct ivtv *itv, unsigned int inp)
{
- struct ivtv *itv = fh2id(fh)->itv;
v4l2_std_id std;
int i;
@@ -1017,9 +1016,14 @@ int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
return 0;
}
+static int ivtv_s_input(struct file *file, void *fh, unsigned int inp)
+{
+ return ivtv_do_s_input(file2id(file)->itv, inp);
+}
+
static int ivtv_g_output(struct file *file, void *fh, unsigned int *i)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!(itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT))
return -EINVAL;
@@ -1031,7 +1035,7 @@ static int ivtv_g_output(struct file *file, void *fh, unsigned int *i)
static int ivtv_s_output(struct file *file, void *fh, unsigned int outp)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (outp >= itv->card->nof_outputs)
return -EINVAL;
@@ -1053,8 +1057,8 @@ static int ivtv_s_output(struct file *file, void *fh, unsigned int outp)
static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (s->vdev.vfl_dir)
return -ENOTTY;
@@ -1065,10 +1069,9 @@ static int ivtv_g_frequency(struct file *file, void *fh, struct v4l2_frequency *
return 0;
}
-int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
+int ivtv_do_s_frequency(struct ivtv_stream *s, const struct v4l2_frequency *vf)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = s->itv;
if (s->vdev.vfl_dir)
return -ENOTTY;
@@ -1082,9 +1085,18 @@ int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *v
return 0;
}
+static int ivtv_s_frequency(struct file *file, void *fh,
+ const struct v4l2_frequency *vf)
+{
+ struct ivtv_open_id *id = file2id(file);
+ struct ivtv *itv = id->itv;
+
+ return ivtv_do_s_frequency(&itv->streams[id->type], vf);
+}
+
static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
*std = itv->std;
return 0;
@@ -1157,7 +1169,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if ((std & V4L2_STD_ALL) == 0)
return -EINVAL;
@@ -1185,7 +1197,7 @@ static int ivtv_s_std(struct file *file, void *fh, v4l2_std_id std)
static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
if (vt->index != 0)
@@ -1198,7 +1210,7 @@ static int ivtv_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt
static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (vt->index != 0)
return -EINVAL;
@@ -1214,7 +1226,7 @@ static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced_vbi_cap *cap)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
int set = itv->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
int f, l;
@@ -1249,7 +1261,7 @@ static int ivtv_g_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_sliced
static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *idx)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
struct v4l2_enc_idx_entry *e = idx->entry;
int entries;
int i;
@@ -1275,7 +1287,7 @@ static int ivtv_g_enc_index(struct file *file, void *fh, struct v4l2_enc_idx *id
static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
@@ -1327,7 +1339,7 @@ static int ivtv_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd
static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *enc)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
switch (enc->cmd) {
case V4L2_ENC_CMD_START:
@@ -1357,8 +1369,8 @@ static int ivtv_try_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder
static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
{
- struct ivtv *itv = fh2id(fh)->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv *itv = file2id(file)->itv;
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
u32 data[CX2341X_MBOX_MAX_DATA];
struct yuv_playback_info *yi = &itv->yuv_info;
@@ -1444,9 +1456,9 @@ static int ivtv_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *fb)
static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *fb)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
struct yuv_playback_info *yi = &itv->yuv_info;
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
@@ -1465,9 +1477,9 @@ static int ivtv_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffe
static int ivtv_overlay(struct file *file, void *fh, unsigned int on)
{
- struct ivtv_open_id *id = fh2id(fh);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
- struct ivtv_stream *s = &itv->streams[fh2id(fh)->type];
+ struct ivtv_stream *s = &itv->streams[file2id(file)->type];
if (!(s->vdev.device_caps & V4L2_CAP_VIDEO_OUTPUT_OVERLAY))
return -ENOTTY;
@@ -1492,7 +1504,7 @@ static int ivtv_subscribe_event(struct v4l2_fh *fh, const struct v4l2_event_subs
static int ivtv_log_status(struct file *file, void *fh)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
u32 data[CX2341X_MBOX_MAX_DATA];
int has_output = itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT;
@@ -1584,7 +1596,7 @@ static int ivtv_log_status(struct file *file, void *fh)
static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
IVTV_DEBUG_IOCTL("VIDIOC_DECODER_CMD %d\n", dec->cmd);
@@ -1593,7 +1605,7 @@ static int ivtv_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd
static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder_cmd *dec)
{
- struct ivtv_open_id *id = fh2id(file->private_data);
+ struct ivtv_open_id *id = file2id(file);
struct ivtv *itv = id->itv;
IVTV_DEBUG_IOCTL("VIDIOC_TRY_DECODER_CMD %d\n", dec->cmd);
@@ -1602,7 +1614,7 @@ static int ivtv_try_decoder_cmd(struct file *file, void *fh, struct v4l2_decoder
static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
{
- struct ivtv_open_id *id = fh2id(filp->private_data);
+ struct ivtv_open_id *id = file2id(filp);
struct ivtv *itv = id->itv;
struct ivtv_stream *s = &itv->streams[id->type];
@@ -1645,7 +1657,7 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
static long ivtv_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
- struct ivtv *itv = fh2id(fh)->itv;
+ struct ivtv *itv = file2id(file)->itv;
if (!valid_prio) {
switch (cmd) {
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.h b/drivers/media/pci/ivtv/ivtv-ioctl.h
index 42c2516379fc..96ca7e2ef973 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.h
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.h
@@ -2,13 +2,15 @@
/*
ioctl system call
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef IVTV_IOCTL_H
#define IVTV_IOCTL_H
+struct ivtv;
+
u16 ivtv_service2vbi(int type);
void ivtv_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt);
@@ -17,7 +19,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed);
void ivtv_set_funcs(struct video_device *vdev);
void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id std);
void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std);
-int ivtv_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf);
-int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
+int ivtv_do_s_frequency(struct ivtv_stream *s, const struct v4l2_frequency *vf);
+int ivtv_do_s_input(struct ivtv *itv, unsigned int inp);
#endif
diff --git a/drivers/media/pci/ivtv/ivtv-irq.c b/drivers/media/pci/ivtv/ivtv-irq.c
index 748c14e87963..05e0293b4d44 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.c
+++ b/drivers/media/pci/ivtv/ivtv-irq.c
@@ -2,7 +2,7 @@
/* interrupt handling
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
@@ -305,7 +305,7 @@ static void dma_post(struct ivtv_stream *s)
ivtv_process_vbi_data(itv, buf, 0, s->type);
s->q_dma.bytesused += buf->bytesused;
}
- if (s->fh == NULL) {
+ if (s->id == NULL) {
ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
return;
}
@@ -330,7 +330,7 @@ static void dma_post(struct ivtv_stream *s)
set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
}
- if (s->fh)
+ if (s->id)
wake_up(&s->waitq);
}
@@ -351,7 +351,7 @@ void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
/* Insert buffer block for YUV if needed */
if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
- if (yi->blanking_dmaptr) {
+ if (yi->blanking_ptr) {
s->sg_pending[idx].src = yi->blanking_dmaptr;
s->sg_pending[idx].dst = offset;
s->sg_pending[idx].size = 720 * 16;
diff --git a/drivers/media/pci/ivtv/ivtv-irq.h b/drivers/media/pci/ivtv/ivtv-irq.h
index b8b0703a1c82..8a780bea7de4 100644
--- a/drivers/media/pci/ivtv/ivtv-irq.h
+++ b/drivers/media/pci/ivtv/ivtv-irq.h
@@ -3,7 +3,7 @@
interrupt handling
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-mailbox.c b/drivers/media/pci/ivtv/ivtv-mailbox.c
index d3fdaaa903f1..cd7c9f2d473f 100644
--- a/drivers/media/pci/ivtv/ivtv-mailbox.c
+++ b/drivers/media/pci/ivtv/ivtv-mailbox.c
@@ -3,7 +3,7 @@
mailbox functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-mailbox.h b/drivers/media/pci/ivtv/ivtv-mailbox.h
index 537c90437e1d..364e7f51508e 100644
--- a/drivers/media/pci/ivtv/ivtv-mailbox.h
+++ b/drivers/media/pci/ivtv/ivtv-mailbox.h
@@ -2,7 +2,7 @@
/*
mailbox functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-queue.c b/drivers/media/pci/ivtv/ivtv-queue.c
index f9b192ab7e7c..f7d2d159d800 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.c
+++ b/drivers/media/pci/ivtv/ivtv-queue.c
@@ -3,7 +3,7 @@
buffer queues.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-queue.h b/drivers/media/pci/ivtv/ivtv-queue.h
index 983e99642364..9619745d6de1 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.h
+++ b/drivers/media/pci/ivtv/ivtv-queue.h
@@ -3,7 +3,7 @@
buffer queues.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-routing.c b/drivers/media/pci/ivtv/ivtv-routing.c
index 57d4d5a3cb87..b1dfc2e96d91 100644
--- a/drivers/media/pci/ivtv/ivtv-routing.c
+++ b/drivers/media/pci/ivtv/ivtv-routing.c
@@ -2,7 +2,7 @@
/*
Audio/video-routing-related ivtv functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-routing.h b/drivers/media/pci/ivtv/ivtv-routing.h
index e4a0ae0694d2..69ddb66ef26f 100644
--- a/drivers/media/pci/ivtv/ivtv-routing.h
+++ b/drivers/media/pci/ivtv/ivtv-routing.h
@@ -2,7 +2,7 @@
/*
Audio/video-routing-related ivtv functions.
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index ac085925d3cb..d98fe0c9d9f1 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -1,22 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
init/start/stop/exit stream functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
/* License: GPL
diff --git a/drivers/media/pci/ivtv/ivtv-streams.h b/drivers/media/pci/ivtv/ivtv-streams.h
index 5f35c57fcdfd..43d4ecd6dd6f 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.h
+++ b/drivers/media/pci/ivtv/ivtv-streams.h
@@ -2,7 +2,7 @@
/*
init/start/stop/exit stream functions
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index f467a00492f4..7dedf04f9f87 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -4,7 +4,7 @@
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-udma.h b/drivers/media/pci/ivtv/ivtv-udma.h
index 12b9426b2db2..3030fadfdbc7 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.h
+++ b/drivers/media/pci/ivtv/ivtv-udma.h
@@ -2,7 +2,7 @@
/*
Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
Copyright (C) 2004 Chris Kennedy <c@groovy.org>
- Copyright (C) 2006-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2006-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.c b/drivers/media/pci/ivtv/ivtv-vbi.c
index 80478b026d75..ae7a00f46257 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.c
+++ b/drivers/media/pci/ivtv/ivtv-vbi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Vertical Blank Interval support functions
- Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2004-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-vbi.h b/drivers/media/pci/ivtv/ivtv-vbi.h
index 780f73d2ab6b..12fe27da544b 100644
--- a/drivers/media/pci/ivtv/ivtv-vbi.h
+++ b/drivers/media/pci/ivtv/ivtv-vbi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Vertical Blank Interval support functions
- Copyright (C) 2004-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2004-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-version.h b/drivers/media/pci/ivtv/ivtv-version.h
index 996f1871e49c..21e26d1f66b8 100644
--- a/drivers/media/pci/ivtv/ivtv-version.h
+++ b/drivers/media/pci/ivtv/ivtv-version.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
ivtv driver version information
- Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
+ Copyright (C) 2005-2007 Hans Verkuil <hverkuil@kernel.org>
*/
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 2d9274537725..71f040106647 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -125,7 +125,7 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
/* If we've offset the y plane, ensure top area is blanked */
- if (f->offset_y && yi->blanking_dmaptr) {
+ if (f->offset_y && yi->blanking_ptr) {
dma->SGarray[dma->SG_length].size = cpu_to_le32(720*16);
dma->SGarray[dma->SG_length].src = cpu_to_le32(yi->blanking_dmaptr);
dma->SGarray[dma->SG_length].dst = cpu_to_le32(IVTV_DECODER_OFFSET + yuv_offset[frame]);
@@ -929,6 +929,12 @@ static void ivtv_yuv_init(struct ivtv *itv)
yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
yi->blanking_ptr,
720 * 16, DMA_TO_DEVICE);
+ if (dma_mapping_error(&itv->pdev->dev, yi->blanking_dmaptr)) {
+ kfree(yi->blanking_ptr);
+ yi->blanking_ptr = NULL;
+ yi->blanking_dmaptr = 0;
+ IVTV_DEBUG_WARN("Failed to dma_map yuv blanking buffer\n");
+ }
} else {
yi->blanking_dmaptr = 0;
IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c
index 923650d53d4c..4f9a35904b41 100644
--- a/drivers/media/pci/mgb4/mgb4_trigger.c
+++ b/drivers/media/pci/mgb4/mgb4_trigger.c
@@ -17,6 +17,7 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/pci.h>
#include <linux/dma/amd_xdma.h>
+#include <linux/types.h>
#include "mgb4_core.h"
#include "mgb4_trigger.h"
@@ -90,13 +91,13 @@ static irqreturn_t trigger_handler(int irq, void *p)
struct trigger_data *st = iio_priv(indio_dev);
struct {
u32 data;
- s64 ts __aligned(8);
- } scan;
+ aligned_s64 ts;
+ } scan = { };
scan.data = mgb4_read_reg(&st->mgbdev->video, 0xA0);
mgb4_write_reg(&st->mgbdev->video, 0xA0, scan.data);
- iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+ iio_push_to_buffers_with_ts(indio_dev, &scan, sizeof(scan), pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
mgb4_write_reg(&st->mgbdev->video, 0xB4, 1U << 11);
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
index 989e93f67f75..4b38076486ff 100644
--- a/drivers/media/pci/mgb4/mgb4_vin.c
+++ b/drivers/media/pci/mgb4/mgb4_vin.c
@@ -64,10 +64,10 @@ static const struct mgb4_i2c_kv gmsl_i2c[] = {
static const struct v4l2_dv_timings_cap video_timings_cap = {
.type = V4L2_DV_BT_656_1120,
.bt = {
- .min_width = 320,
+ .min_width = 240,
.max_width = 4096,
.min_height = 240,
- .max_height = 2160,
+ .max_height = 4096,
.min_pixelclock = 1843200, /* 320 x 240 x 24Hz */
.max_pixelclock = 530841600, /* 4096 x 2160 x 60Hz */
.standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
@@ -610,8 +610,7 @@ static int vidioc_s_dv_timings(struct file *file, void *fh,
timings->bt.height < video_timings_cap.bt.min_height ||
timings->bt.height > video_timings_cap.bt.max_height)
return -EINVAL;
- if (timings->bt.width == vindev->timings.bt.width &&
- timings->bt.height == vindev->timings.bt.height)
+ if (v4l2_match_dv_timings(timings, &vindev->timings, 0, false))
return 0;
if (vb2_is_busy(&vindev->queue))
return -EBUSY;
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
index c179c425e167..fd93fbbaf755 100644
--- a/drivers/media/pci/mgb4/mgb4_vout.c
+++ b/drivers/media/pci/mgb4/mgb4_vout.c
@@ -44,10 +44,10 @@ static const struct mgb4_i2c_kv fpdl3_i2c[] = {
static const struct v4l2_dv_timings_cap video_timings_cap = {
.type = V4L2_DV_BT_656_1120,
.bt = {
- .min_width = 320,
+ .min_width = 240,
.max_width = 4096,
.min_height = 240,
- .max_height = 2160,
+ .max_height = 4096,
.min_pixelclock = 1843200, /* 320 x 240 x 24Hz */
.max_pixelclock = 530841600, /* 4096 x 2160 x 60Hz */
.standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index 121a4a92ea10..1ced093583ac 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -639,7 +639,7 @@ static int pt1_init_tables(struct pt1 *pt1)
if (!pt1_nr_tables)
return 0;
- tables = vmalloc(array_size(pt1_nr_tables, sizeof(struct pt1_table)));
+ tables = vmalloc_array(pt1_nr_tables, sizeof(struct pt1_table));
if (tables == NULL)
return -ENOMEM;
diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
index c88939bce56b..4a51b873e47a 100644
--- a/drivers/media/pci/saa7134/saa7134-video.c
+++ b/drivers/media/pci/saa7134/saa7134-video.c
@@ -1302,7 +1302,7 @@ static int saa7134_g_pixelaspect(struct file *file, void *priv,
return 0;
}
-static int saa7134_g_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int saa7134_g_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct saa7134_dev *dev = video_drvdata(file);
@@ -1325,7 +1325,7 @@ static int saa7134_g_selection(struct file *file, void *f, struct v4l2_selection
return 0;
}
-static int saa7134_s_selection(struct file *file, void *f, struct v4l2_selection *sel)
+static int saa7134_s_selection(struct file *file, void *priv, struct v4l2_selection *sel)
{
struct saa7134_dev *dev = video_drvdata(file);
struct v4l2_rect *b = &dev->crop_bounds;
diff --git a/drivers/media/pci/saa7164/saa7164-encoder.c b/drivers/media/pci/saa7164/saa7164-encoder.c
index bf73e9e83f52..66d650b5f69a 100644
--- a/drivers/media/pci/saa7164/saa7164-encoder.c
+++ b/drivers/media/pci/saa7164/saa7164-encoder.c
@@ -219,7 +219,7 @@ int saa7164_s_std(struct saa7164_port *port, v4l2_std_id id)
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_std(fh->port, id);
}
@@ -232,7 +232,7 @@ int saa7164_g_std(struct saa7164_port *port, v4l2_std_id *id)
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_std(fh->port, id);
}
@@ -277,7 +277,7 @@ int saa7164_g_input(struct saa7164_port *port, unsigned int *i)
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_input(fh->port, i);
}
@@ -301,14 +301,14 @@ int saa7164_s_input(struct saa7164_port *port, unsigned int i)
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_input(fh->port, i);
}
int saa7164_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -347,7 +347,7 @@ int saa7164_g_frequency(struct saa7164_port *port, struct v4l2_frequency *f)
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_g_frequency(fh->port, f);
}
@@ -400,7 +400,7 @@ int saa7164_s_frequency(struct saa7164_port *port,
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
return saa7164_s_frequency(fh->port, f);
}
@@ -483,7 +483,7 @@ static int saa7164_s_ctrl(struct v4l2_ctrl *ctrl)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -510,7 +510,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
@@ -725,15 +725,14 @@ static int fops_open(struct file *file)
fh->port = port;
v4l2_fh_init(&fh->fh, video_devdata(file));
- v4l2_fh_add(&fh->fh);
- file->private_data = fh;
+ v4l2_fh_add(&fh->fh, file);
return 0;
}
static int fops_release(struct file *file)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -747,7 +746,7 @@ static int fops_release(struct file *file)
}
}
- v4l2_fh_del(&fh->fh);
+ v4l2_fh_del(&fh->fh, file);
v4l2_fh_exit(&fh->fh);
kfree(fh);
@@ -787,7 +786,7 @@ saa7164_user_buffer *saa7164_enc_next_buf(struct saa7164_port *port)
static ssize_t fops_read(struct file *file, char __user *buffer,
size_t count, loff_t *pos)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -893,8 +892,7 @@ err:
static __poll_t fops_poll(struct file *file, poll_table *wait)
{
__poll_t req_events = poll_requested_events(wait);
- struct saa7164_encoder_fh *fh =
- (struct saa7164_encoder_fh *)file->private_data;
+ struct saa7164_encoder_fh *fh = to_saa7164_encoder_fh(file);
struct saa7164_port *port = fh->port;
__poll_t mask = v4l2_ctrl_poll(file, wait);
diff --git a/drivers/media/pci/saa7164/saa7164-vbi.c b/drivers/media/pci/saa7164/saa7164-vbi.c
index ac958a5fca78..57e4362c0d19 100644
--- a/drivers/media/pci/saa7164/saa7164-vbi.c
+++ b/drivers/media/pci/saa7164/saa7164-vbi.c
@@ -144,28 +144,28 @@ static int saa7164_vbi_initialize(struct saa7164_port *port)
/* -- V4L2 --------------------------------------------------------- */
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_s_std(fh->port->enc_port, id);
}
static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *id)
{
- struct saa7164_encoder_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_std(fh->port->enc_port, id);
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_input(fh->port->enc_port, i);
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_s_input(fh->port->enc_port, i);
}
@@ -173,7 +173,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
return saa7164_g_frequency(fh->port->enc_port, f);
}
@@ -181,7 +181,7 @@ static int vidioc_g_frequency(struct file *file, void *priv,
static int vidioc_s_frequency(struct file *file, void *priv,
const struct v4l2_frequency *f)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
int ret = saa7164_s_frequency(fh->port->enc_port, f);
if (ret == 0)
@@ -192,7 +192,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -428,15 +428,14 @@ static int fops_open(struct file *file)
fh->port = port;
v4l2_fh_init(&fh->fh, video_devdata(file));
- v4l2_fh_add(&fh->fh);
- file->private_data = fh;
+ v4l2_fh_add(&fh->fh, file);
return 0;
}
static int fops_release(struct file *file)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_dev *dev = port->dev;
@@ -450,7 +449,7 @@ static int fops_release(struct file *file)
}
}
- v4l2_fh_del(&fh->fh);
+ v4l2_fh_del(&fh->fh, file);
v4l2_fh_exit(&fh->fh);
kfree(fh);
@@ -489,7 +488,7 @@ saa7164_user_buffer *saa7164_vbi_next_buf(struct saa7164_port *port)
static ssize_t fops_read(struct file *file, char __user *buffer,
size_t count, loff_t *pos)
{
- struct saa7164_vbi_fh *fh = file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
struct saa7164_user_buffer *ubuf = NULL;
struct saa7164_dev *dev = port->dev;
@@ -596,7 +595,7 @@ err:
static __poll_t fops_poll(struct file *file, poll_table *wait)
{
- struct saa7164_vbi_fh *fh = (struct saa7164_vbi_fh *)file->private_data;
+ struct saa7164_vbi_fh *fh = to_saa7164_vbi_fh(file);
struct saa7164_port *port = fh->port;
__poll_t mask = 0;
diff --git a/drivers/media/pci/saa7164/saa7164.h b/drivers/media/pci/saa7164/saa7164.h
index e1bac1fe19d3..94e987e7b5e5 100644
--- a/drivers/media/pci/saa7164/saa7164.h
+++ b/drivers/media/pci/saa7164/saa7164.h
@@ -180,12 +180,22 @@ struct saa7164_encoder_fh {
atomic_t v4l_reading;
};
+static inline struct saa7164_encoder_fh *to_saa7164_encoder_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct saa7164_encoder_fh, fh);
+}
+
struct saa7164_vbi_fh {
struct v4l2_fh fh;
struct saa7164_port *port;
atomic_t v4l_reading;
};
+static inline struct saa7164_vbi_fh *to_saa7164_vbi_fh(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct saa7164_vbi_fh, fh);
+}
+
struct saa7164_histogram_bucket {
u32 val;
u32 count;
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index 35dd19b2427e..08b7ce1043aa 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -14,7 +14,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/init.h>
@@ -37,7 +37,7 @@
MODULE_DESCRIPTION("v4l2 driver module for tw6800 based video capture cards");
MODULE_AUTHOR("William M. Brack");
-MODULE_AUTHOR("Hans Verkuil <hverkuil@xs4all.nl>");
+MODULE_AUTHOR("Hans Verkuil <hverkuil@kernel.org>");
MODULE_LICENSE("GPL");
static unsigned int latency = UNSET;
diff --git a/drivers/media/pci/tw68/tw68-reg.h b/drivers/media/pci/tw68/tw68-reg.h
index dcd9931b25cc..8aeef452ea8a 100644
--- a/drivers/media/pci/tw68/tw68-reg.h
+++ b/drivers/media/pci/tw68/tw68-reg.h
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#ifndef _TW68_REG_H_
diff --git a/drivers/media/pci/tw68/tw68-risc.c b/drivers/media/pci/tw68/tw68-risc.c
index dacb136c4f3a..e793db6134e4 100644
--- a/drivers/media/pci/tw68/tw68-risc.c
+++ b/drivers/media/pci/tw68/tw68-risc.c
@@ -14,7 +14,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include "tw68.h"
diff --git a/drivers/media/pci/tw68/tw68-video.c b/drivers/media/pci/tw68/tw68-video.c
index 77773dec48b8..6232bac170d0 100644
--- a/drivers/media/pci/tw68/tw68-video.c
+++ b/drivers/media/pci/tw68/tw68-video.c
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/module.h>
diff --git a/drivers/media/pci/tw68/tw68.h b/drivers/media/pci/tw68/tw68.h
index a1f422d6e600..66be6b3bb7b6 100644
--- a/drivers/media/pci/tw68/tw68.h
+++ b/drivers/media/pci/tw68/tw68.h
@@ -13,7 +13,7 @@
*
* Refactored and updated to the latest v4l core frameworks:
*
- * Copyright (C) 2014 Hans Verkuil <hverkuil@xs4all.nl>
+ * Copyright (C) 2014 Hans Verkuil <hverkuil@kernel.org>
*/
#include <linux/pci.h>
diff --git a/drivers/media/pci/zoran/zoran.h b/drivers/media/pci/zoran/zoran.h
index 1cd990468d3d..d05e222b3921 100644
--- a/drivers/media/pci/zoran/zoran.h
+++ b/drivers/media/pci/zoran/zoran.h
@@ -154,12 +154,6 @@ struct zoran_jpg_settings {
struct zoran;
-/* zoran_fh contains per-open() settings */
-struct zoran_fh {
- struct v4l2_fh fh;
- struct zoran *zr;
-};
-
struct card_info {
enum card_type type;
char name[32];
diff --git a/drivers/media/pci/zoran/zoran_card.c b/drivers/media/pci/zoran/zoran_card.c
index e31f9f19a48a..d81facf735d9 100644
--- a/drivers/media/pci/zoran/zoran_card.c
+++ b/drivers/media/pci/zoran/zoran_card.c
@@ -67,10 +67,6 @@ module_param(pass_through, int, 0644);
MODULE_PARM_DESC(pass_through,
"Pass TV signal through to TV-out when idling");
-int zr36067_debug = 1;
-module_param_named(debug, zr36067_debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level (0-5)");
-
#define ZORAN_VERSION "0.10.1"
MODULE_DESCRIPTION("Zoran-36057/36067 JPEG codec driver");
diff --git a/drivers/media/pci/zoran/zoran_card.h b/drivers/media/pci/zoran/zoran_card.h
index 518cb426b446..c4f81777e6ce 100644
--- a/drivers/media/pci/zoran/zoran_card.h
+++ b/drivers/media/pci/zoran/zoran_card.h
@@ -12,8 +12,6 @@
#ifndef __ZORAN_CARD_H__
#define __ZORAN_CARD_H__
-extern int zr36067_debug;
-
/* Anybody who uses more than four? */
#define BUZ_MAX 4
diff --git a/drivers/media/pci/zoran/zoran_driver.c b/drivers/media/pci/zoran/zoran_driver.c
index f42f596d3e62..5b4d5dd06edb 100644
--- a/drivers/media/pci/zoran/zoran_driver.c
+++ b/drivers/media/pci/zoran/zoran_driver.c
@@ -245,7 +245,7 @@ static int zoran_set_input(struct zoran *zr, int input)
* ioctl routine
*/
-static int zoran_querycap(struct file *file, void *__fh, struct v4l2_capability *cap)
+static int zoran_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
struct zoran *zr = video_drvdata(file);
@@ -278,7 +278,7 @@ static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
return -EINVAL;
}
-static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_enum_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_fmtdesc *f)
{
struct zoran *zr = video_drvdata(file);
@@ -286,7 +286,7 @@ static int zoran_enum_fmt_vid_cap(struct file *file, void *__fh,
return zoran_enum_fmt(zr, f, ZORAN_FORMAT_CAPTURE);
}
-static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -308,13 +308,13 @@ static int zoran_g_fmt_vid_out(struct file *file, void *__fh,
return 0;
}
-static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
if (zr->map_mode != ZORAN_MAP_MODE_RAW)
- return zoran_g_fmt_vid_out(file, __fh, fmt);
+ return zoran_g_fmt_vid_out(file, fh, fmt);
fmt->fmt.pix.width = zr->v4l_settings.width;
fmt->fmt.pix.height = zr->v4l_settings.height;
fmt->fmt.pix.sizeimage = zr->buffer_size;
@@ -328,7 +328,7 @@ static int zoran_g_fmt_vid_cap(struct file *file, void *__fh,
return 0;
}
-static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_try_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -391,7 +391,7 @@ static int zoran_try_fmt_vid_out(struct file *file, void *__fh,
return res;
}
-static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_try_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -399,7 +399,7 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
int i;
if (fmt->fmt.pix.pixelformat == V4L2_PIX_FMT_MJPEG)
- return zoran_try_fmt_vid_out(file, __fh, fmt);
+ return zoran_try_fmt_vid_out(file, fh, fmt);
for (i = 0; i < NUM_FORMATS; i++)
if (zoran_formats[i].fourcc == fmt->fmt.pix.pixelformat)
@@ -427,7 +427,7 @@ static int zoran_try_fmt_vid_cap(struct file *file, void *__fh,
return 0;
}
-static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
+static int zoran_s_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
@@ -507,11 +507,10 @@ static int zoran_s_fmt_vid_out(struct file *file, void *__fh,
return res;
}
-static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
+static int zoran_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *fmt)
{
struct zoran *zr = video_drvdata(file);
- struct zoran_fh *fh = __fh;
int i;
int res = 0;
@@ -556,7 +555,7 @@ static int zoran_s_fmt_vid_cap(struct file *file, void *__fh,
return res;
}
-static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
+static int zoran_g_std(struct file *file, void *fh, v4l2_std_id *std)
{
struct zoran *zr = video_drvdata(file);
@@ -564,7 +563,7 @@ static int zoran_g_std(struct file *file, void *__fh, v4l2_std_id *std)
return 0;
}
-static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
+static int zoran_s_std(struct file *file, void *fh, v4l2_std_id std)
{
struct zoran *zr = video_drvdata(file);
int res = 0;
@@ -579,7 +578,7 @@ static int zoran_s_std(struct file *file, void *__fh, v4l2_std_id std)
return res;
}
-static int zoran_enum_input(struct file *file, void *__fh,
+static int zoran_enum_input(struct file *file, void *fh,
struct v4l2_input *inp)
{
struct zoran *zr = video_drvdata(file);
@@ -596,7 +595,7 @@ static int zoran_enum_input(struct file *file, void *__fh,
return 0;
}
-static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
+static int zoran_g_input(struct file *file, void *fh, unsigned int *input)
{
struct zoran *zr = video_drvdata(file);
@@ -605,7 +604,7 @@ static int zoran_g_input(struct file *file, void *__fh, unsigned int *input)
return 0;
}
-static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
+static int zoran_s_input(struct file *file, void *fh, unsigned int input)
{
struct zoran *zr = video_drvdata(file);
int res;
@@ -618,7 +617,7 @@ static int zoran_s_input(struct file *file, void *__fh, unsigned int input)
}
/* cropping (sub-frame capture) */
-static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
+static int zoran_g_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct zoran *zr = video_drvdata(file);
@@ -653,7 +652,7 @@ static int zoran_g_selection(struct file *file, void *__fh, struct v4l2_selectio
return 0;
}
-static int zoran_s_selection(struct file *file, void *__fh, struct v4l2_selection *sel)
+static int zoran_s_selection(struct file *file, void *fh, struct v4l2_selection *sel)
{
struct zoran *zr = video_drvdata(file);
struct zoran_jpg_settings settings;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 9287faafdce5..3f0b7bb68cc9 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -65,6 +65,7 @@ config VIDEO_MUX
source "drivers/media/platform/allegro-dvt/Kconfig"
source "drivers/media/platform/amlogic/Kconfig"
source "drivers/media/platform/amphion/Kconfig"
+source "drivers/media/platform/arm/Kconfig"
source "drivers/media/platform/aspeed/Kconfig"
source "drivers/media/platform/atmel/Kconfig"
source "drivers/media/platform/broadcom/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 6fd7db0541c7..6d5f79ddfcc3 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -8,6 +8,7 @@
obj-y += allegro-dvt/
obj-y += amlogic/
obj-y += amphion/
+obj-y += arm/
obj-y += aspeed/
obj-y += atmel/
obj-y += broadcom/
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index eb03df0d8652..eec0b8b30b7f 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -177,6 +177,7 @@ struct allegro_dev {
*/
unsigned long channel_user_ids;
struct list_head channels;
+ struct mutex channels_lock;
};
static const struct regmap_config allegro_regmap_config = {
@@ -197,9 +198,8 @@ static const struct regmap_config allegro_sram_config = {
.cache_type = REGCACHE_NONE,
};
-#define fh_to_channel(__fh) container_of(__fh, struct allegro_channel, fh)
-
struct allegro_channel {
+ struct kref ref;
struct allegro_dev *dev;
struct v4l2_fh fh;
struct v4l2_ctrl_handler ctrl_handler;
@@ -302,6 +302,11 @@ struct allegro_channel {
unsigned int error;
};
+static inline struct allegro_channel *file_to_channel(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct allegro_channel, fh);
+}
+
static inline int
allegro_channel_get_i_frame_qp(struct allegro_channel *channel)
{
@@ -427,33 +432,55 @@ static unsigned long allegro_next_user_id(struct allegro_dev *dev)
}
static struct allegro_channel *
-allegro_find_channel_by_user_id(struct allegro_dev *dev,
- unsigned int user_id)
+allegro_ref_get_channel_by_user_id(struct allegro_dev *dev,
+ unsigned int user_id)
{
struct allegro_channel *channel;
+ guard(mutex)(&dev->channels_lock);
+
list_for_each_entry(channel, &dev->channels, list) {
- if (channel->user_id == user_id)
- return channel;
+ if (channel->user_id == user_id) {
+ if (kref_get_unless_zero(&channel->ref))
+ return channel;
+ break;
+ }
}
return ERR_PTR(-EINVAL);
}
static struct allegro_channel *
-allegro_find_channel_by_channel_id(struct allegro_dev *dev,
- unsigned int channel_id)
+allegro_ref_get_channel_by_channel_id(struct allegro_dev *dev,
+ unsigned int channel_id)
{
struct allegro_channel *channel;
+ guard(mutex)(&dev->channels_lock);
+
list_for_each_entry(channel, &dev->channels, list) {
- if (channel->mcu_channel_id == channel_id)
- return channel;
+ if (channel->mcu_channel_id == channel_id) {
+ if (kref_get_unless_zero(&channel->ref))
+ return channel;
+ break;
+ }
}
return ERR_PTR(-EINVAL);
}
+static void allegro_free_channel(struct kref *ref)
+{
+ struct allegro_channel *channel = container_of(ref, struct allegro_channel, ref);
+
+ kfree(channel);
+}
+
+static int allegro_ref_put_channel(struct allegro_channel *channel)
+{
+ return kref_put(&channel->ref, allegro_free_channel);
+}
+
static inline bool channel_exists(struct allegro_channel *channel)
{
return channel->mcu_channel_id != -1;
@@ -828,6 +855,20 @@ out:
return err;
}
+static unsigned int allegro_mbox_get_available(struct allegro_mbox *mbox)
+{
+ struct regmap *sram = mbox->dev->sram;
+ unsigned int head, tail;
+
+ regmap_read(sram, mbox->head, &head);
+ regmap_read(sram, mbox->tail, &tail);
+
+ if (tail >= head)
+ return tail - head;
+ else
+ return mbox->size - (head - tail);
+}
+
static ssize_t allegro_mbox_read(struct allegro_mbox *mbox,
u32 *dst, size_t nbyte)
{
@@ -836,11 +877,15 @@ static ssize_t allegro_mbox_read(struct allegro_mbox *mbox,
u16 type;
} __attribute__ ((__packed__)) *header;
struct regmap *sram = mbox->dev->sram;
- unsigned int head;
+ unsigned int available, head;
ssize_t size;
size_t body_no_wrap;
int stride = regmap_get_reg_stride(sram);
+ available = allegro_mbox_get_available(mbox);
+ if (available < sizeof(*header))
+ return -EAGAIN;
+
regmap_read(sram, mbox->head, &head);
if (head > mbox->size)
return -EIO;
@@ -854,6 +899,8 @@ static ssize_t allegro_mbox_read(struct allegro_mbox *mbox,
return -EIO;
if (size > nbyte)
return -EINVAL;
+ if (size > available)
+ return -EAGAIN;
/*
* The message might wrap within the mailbox. If the message does not
@@ -913,26 +960,27 @@ out:
* allegro_mbox_notify() - Notify the mailbox about a new message
* @mbox: The allegro_mbox to notify
*/
-static void allegro_mbox_notify(struct allegro_mbox *mbox)
+static int allegro_mbox_notify(struct allegro_mbox *mbox)
{
struct allegro_dev *dev = mbox->dev;
union mcu_msg_response *msg;
- ssize_t size;
u32 *tmp;
int err;
msg = kmalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
- return;
+ return -ENOMEM;
msg->header.version = dev->fw_info->mailbox_version;
tmp = kmalloc(mbox->size, GFP_KERNEL);
- if (!tmp)
+ if (!tmp) {
+ err = -ENOMEM;
goto out;
+ }
- size = allegro_mbox_read(mbox, tmp, mbox->size);
- if (size < 0)
+ err = allegro_mbox_read(mbox, tmp, mbox->size);
+ if (err < 0)
goto out;
err = allegro_decode_mail(msg, tmp);
@@ -944,6 +992,8 @@ static void allegro_mbox_notify(struct allegro_mbox *mbox)
out:
kfree(tmp);
kfree(msg);
+
+ return err;
}
static int allegro_encoder_buffer_init(struct allegro_dev *dev,
@@ -2121,7 +2171,7 @@ static void allegro_channel_finish_frame(struct allegro_channel *channel,
state = VB2_BUF_STATE_DONE;
- v4l2_m2m_buf_copy_metadata(src_buf, dst_buf, false);
+ v4l2_m2m_buf_copy_metadata(src_buf, dst_buf);
if (msg->is_idr)
dst_buf->flags |= V4L2_BUF_FLAG_KEYFRAME;
else
@@ -2160,7 +2210,7 @@ allegro_handle_create_channel(struct allegro_dev *dev,
int err = 0;
struct create_channel_param param;
- channel = allegro_find_channel_by_user_id(dev, msg->user_id);
+ channel = allegro_ref_get_channel_by_user_id(dev, msg->user_id);
if (IS_ERR(channel)) {
v4l2_warn(&dev->v4l2_dev,
"received %s for unknown user %d\n",
@@ -2227,6 +2277,7 @@ allegro_handle_create_channel(struct allegro_dev *dev,
out:
channel->error = err;
complete(&channel->completion);
+ allegro_ref_put_channel(channel);
/* Handled successfully, error is passed via channel->error */
return 0;
@@ -2238,7 +2289,7 @@ allegro_handle_destroy_channel(struct allegro_dev *dev,
{
struct allegro_channel *channel;
- channel = allegro_find_channel_by_channel_id(dev, msg->channel_id);
+ channel = allegro_ref_get_channel_by_channel_id(dev, msg->channel_id);
if (IS_ERR(channel)) {
v4l2_err(&dev->v4l2_dev,
"received %s for unknown channel %d\n",
@@ -2251,6 +2302,7 @@ allegro_handle_destroy_channel(struct allegro_dev *dev,
"user %d: vcu destroyed channel %d\n",
channel->user_id, channel->mcu_channel_id);
complete(&channel->completion);
+ allegro_ref_put_channel(channel);
return 0;
}
@@ -2261,7 +2313,7 @@ allegro_handle_encode_frame(struct allegro_dev *dev,
{
struct allegro_channel *channel;
- channel = allegro_find_channel_by_channel_id(dev, msg->channel_id);
+ channel = allegro_ref_get_channel_by_channel_id(dev, msg->channel_id);
if (IS_ERR(channel)) {
v4l2_err(&dev->v4l2_dev,
"received %s for unknown channel %d\n",
@@ -2271,6 +2323,7 @@ allegro_handle_encode_frame(struct allegro_dev *dev,
}
allegro_channel_finish_frame(channel, msg);
+ allegro_ref_put_channel(channel);
return 0;
}
@@ -2326,7 +2379,10 @@ static irqreturn_t allegro_irq_thread(int irq, void *data)
if (!dev->mbox_status)
return IRQ_NONE;
- allegro_mbox_notify(dev->mbox_status);
+ while (allegro_mbox_get_available(dev->mbox_status) > 0) {
+ if (allegro_mbox_notify(dev->mbox_status))
+ break;
+ }
return IRQ_HANDLED;
}
@@ -2599,8 +2655,14 @@ static int allegro_create_channel(struct allegro_channel *channel)
allegro_mcu_send_create_channel(dev, channel);
time_left = wait_for_completion_timeout(&channel->completion,
msecs_to_jiffies(5000));
- if (time_left == 0)
+ if (time_left == 0) {
+ v4l2_warn(&dev->v4l2_dev,
+ "user %d: timeout while creating channel\n",
+ channel->user_id);
+
channel->error = -ETIMEDOUT;
+ }
+
if (channel->error)
goto err;
@@ -3047,6 +3109,8 @@ static int allegro_open(struct file *file)
if (!channel)
return -ENOMEM;
+ kref_init(&channel->ref);
+
v4l2_fh_init(&channel->fh, vdev);
init_completion(&channel->completion);
@@ -3213,9 +3277,11 @@ static int allegro_open(struct file *file)
goto error;
}
- list_add(&channel->list, &dev->channels);
- file->private_data = &channel->fh;
- v4l2_fh_add(&channel->fh);
+ scoped_guard(mutex, &dev->channels_lock) {
+ list_add(&channel->list, &dev->channels);
+ }
+
+ v4l2_fh_add(&channel->fh, file);
allegro_channel_adjust(channel);
@@ -3229,18 +3295,21 @@ error:
static int allegro_release(struct file *file)
{
- struct allegro_channel *channel = fh_to_channel(file->private_data);
+ struct allegro_channel *channel = file_to_channel(file);
+ struct allegro_dev *dev = channel->dev;
v4l2_m2m_ctx_release(channel->fh.m2m_ctx);
- list_del(&channel->list);
+ scoped_guard(mutex, &dev->channels_lock) {
+ list_del(&channel->list);
+ }
v4l2_ctrl_handler_free(&channel->ctrl_handler);
- v4l2_fh_del(&channel->fh);
+ v4l2_fh_del(&channel->fh, file);
v4l2_fh_exit(&channel->fh);
- kfree(channel);
+ allegro_ref_put_channel(channel);
return 0;
}
@@ -3280,7 +3349,7 @@ static int allegro_enum_fmt_vid(struct file *file, void *fh,
static int allegro_g_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.width = channel->width;
@@ -3322,7 +3391,7 @@ static int allegro_try_fmt_vid_cap(struct file *file, void *fh,
static int allegro_s_fmt_vid_cap(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct vb2_queue *vq;
int err;
@@ -3331,8 +3400,6 @@ static int allegro_s_fmt_vid_cap(struct file *file, void *fh,
return err;
vq = v4l2_m2m_get_vq(channel->fh.m2m_ctx, f->type);
- if (!vq)
- return -EINVAL;
if (vb2_is_busy(vq))
return -EBUSY;
@@ -3346,7 +3413,7 @@ static int allegro_s_fmt_vid_cap(struct file *file, void *fh,
static int allegro_g_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
f->fmt.pix.field = V4L2_FIELD_NONE;
@@ -3393,7 +3460,7 @@ static int allegro_try_fmt_vid_out(struct file *file, void *fh,
static int allegro_s_fmt_vid_out(struct file *file, void *fh,
struct v4l2_format *f)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
err = allegro_try_fmt_vid_out(file, fh, f);
@@ -3434,7 +3501,7 @@ static int allegro_channel_cmd_start(struct allegro_channel *channel)
static int allegro_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *cmd)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
err = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd);
@@ -3483,8 +3550,7 @@ static int allegro_enum_framesizes(struct file *file, void *fh,
static int allegro_ioctl_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
- struct v4l2_fh *fh = file->private_data;
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
int err;
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
@@ -3493,13 +3559,13 @@ static int allegro_ioctl_streamon(struct file *file, void *priv,
return err;
}
- return v4l2_m2m_streamon(file, fh->m2m_ctx, type);
+ return v4l2_m2m_streamon(file, channel->fh.m2m_ctx, type);
}
static int allegro_g_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct v4l2_fract *timeperframe;
if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
@@ -3516,7 +3582,7 @@ static int allegro_g_parm(struct file *file, void *fh,
static int allegro_s_parm(struct file *file, void *fh,
struct v4l2_streamparm *a)
{
- struct allegro_channel *channel = fh_to_channel(fh);
+ struct allegro_channel *channel = file_to_channel(file);
struct v4l2_fract *timeperframe;
int div;
@@ -3835,6 +3901,7 @@ static int allegro_probe(struct platform_device *pdev)
dev->plat_dev = pdev;
init_completion(&dev->init_complete);
INIT_LIST_HEAD(&dev->channels);
+ mutex_init(&dev->channels_lock);
mutex_init(&dev->lock);
diff --git a/drivers/media/platform/amlogic/c3/isp/Kconfig b/drivers/media/platform/amlogic/c3/isp/Kconfig
index 02c62a50a5e8..809208cd7e3a 100644
--- a/drivers/media/platform/amlogic/c3/isp/Kconfig
+++ b/drivers/media/platform/amlogic/c3/isp/Kconfig
@@ -10,6 +10,7 @@ config VIDEO_C3_ISP
select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
select VIDEOBUF2_VMALLOC
+ select V4L2_ISP
help
Video4Linux2 driver for Amlogic C3 ISP pipeline.
The C3 ISP is used for processing raw images and
diff --git a/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c b/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
index c80667dd7662..6f9ca7a7dd88 100644
--- a/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
+++ b/drivers/media/platform/amlogic/c3/isp/c3-isp-params.c
@@ -3,11 +3,13 @@
* Copyright (C) 2024 Amlogic, Inc. All rights reserved
*/
+#include <linux/build_bug.h>
#include <linux/cleanup.h>
#include <linux/media/amlogic/c3-isp-config.h>
#include <linux/pm_runtime.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-isp.h>
#include <media/v4l2-mc.h>
#include <media/videobuf2-vmalloc.h>
@@ -51,11 +53,6 @@ union c3_isp_params_block {
typedef void (*c3_isp_block_handler)(struct c3_isp_device *isp,
const union c3_isp_params_block *block);
-struct c3_isp_params_handler {
- size_t size;
- c3_isp_block_handler handler;
-};
-
#define to_c3_isp_params_buffer(vbuf) \
container_of(vbuf, struct c3_isp_params_buffer, vb)
@@ -523,41 +520,37 @@ static void c3_isp_params_cfg_blc(struct c3_isp_device *isp,
ISP_TOP_BEO_CTRL_BLC_EN);
}
-static const struct c3_isp_params_handler c3_isp_params_handlers[] = {
- [C3_ISP_PARAMS_BLOCK_AWB_GAINS] = {
- .size = sizeof(struct c3_isp_params_awb_gains),
- .handler = c3_isp_params_cfg_awb_gains,
- },
- [C3_ISP_PARAMS_BLOCK_AWB_CONFIG] = {
- .size = sizeof(struct c3_isp_params_awb_config),
- .handler = c3_isp_params_cfg_awb_config,
- },
- [C3_ISP_PARAMS_BLOCK_AE_CONFIG] = {
- .size = sizeof(struct c3_isp_params_ae_config),
- .handler = c3_isp_params_cfg_ae_config,
- },
- [C3_ISP_PARAMS_BLOCK_AF_CONFIG] = {
- .size = sizeof(struct c3_isp_params_af_config),
- .handler = c3_isp_params_cfg_af_config,
- },
- [C3_ISP_PARAMS_BLOCK_PST_GAMMA] = {
- .size = sizeof(struct c3_isp_params_pst_gamma),
- .handler = c3_isp_params_cfg_pst_gamma,
- },
- [C3_ISP_PARAMS_BLOCK_CCM] = {
- .size = sizeof(struct c3_isp_params_ccm),
- .handler = c3_isp_params_cfg_ccm,
- },
- [C3_ISP_PARAMS_BLOCK_CSC] = {
- .size = sizeof(struct c3_isp_params_csc),
- .handler = c3_isp_params_cfg_csc,
- },
- [C3_ISP_PARAMS_BLOCK_BLC] = {
- .size = sizeof(struct c3_isp_params_blc),
- .handler = c3_isp_params_cfg_blc,
- },
+static const c3_isp_block_handler c3_isp_params_handlers[] = {
+ [C3_ISP_PARAMS_BLOCK_AWB_GAINS] = c3_isp_params_cfg_awb_gains,
+ [C3_ISP_PARAMS_BLOCK_AWB_CONFIG] = c3_isp_params_cfg_awb_config,
+ [C3_ISP_PARAMS_BLOCK_AE_CONFIG] = c3_isp_params_cfg_ae_config,
+ [C3_ISP_PARAMS_BLOCK_AF_CONFIG] = c3_isp_params_cfg_af_config,
+ [C3_ISP_PARAMS_BLOCK_PST_GAMMA] = c3_isp_params_cfg_pst_gamma,
+ [C3_ISP_PARAMS_BLOCK_CCM] = c3_isp_params_cfg_ccm,
+ [C3_ISP_PARAMS_BLOCK_CSC] = c3_isp_params_cfg_csc,
+ [C3_ISP_PARAMS_BLOCK_BLC] = c3_isp_params_cfg_blc,
+};
+
+#define C3_ISP_PARAMS_BLOCK_INFO(block, data) \
+ [C3_ISP_PARAMS_BLOCK_ ## block] = { \
+ .size = sizeof(struct c3_isp_params_ ## data), \
+ }
+
+static const struct v4l2_isp_params_block_type_info
+c3_isp_params_block_types_info[] = {
+ C3_ISP_PARAMS_BLOCK_INFO(AWB_GAINS, awb_gains),
+ C3_ISP_PARAMS_BLOCK_INFO(AWB_CONFIG, awb_config),
+ C3_ISP_PARAMS_BLOCK_INFO(AE_CONFIG, ae_config),
+ C3_ISP_PARAMS_BLOCK_INFO(AF_CONFIG, af_config),
+ C3_ISP_PARAMS_BLOCK_INFO(PST_GAMMA, pst_gamma),
+ C3_ISP_PARAMS_BLOCK_INFO(CCM, ccm),
+ C3_ISP_PARAMS_BLOCK_INFO(CSC, csc),
+ C3_ISP_PARAMS_BLOCK_INFO(BLC, blc),
};
+static_assert(ARRAY_SIZE(c3_isp_params_handlers) ==
+ ARRAY_SIZE(c3_isp_params_block_types_info));
+
static void c3_isp_params_cfg_blocks(struct c3_isp_params *params)
{
struct c3_isp_params_cfg *config = params->buff->cfg;
@@ -568,14 +561,14 @@ static void c3_isp_params_cfg_blocks(struct c3_isp_params *params)
/* Walk the list of parameter blocks and process them */
while (block_offset < config->data_size) {
- const struct c3_isp_params_handler *block_handler;
const union c3_isp_params_block *block;
+ c3_isp_block_handler block_handler;
block = (const union c3_isp_params_block *)
&config->data[block_offset];
- block_handler = &c3_isp_params_handlers[block->header.type];
- block_handler->handler(params->isp, block);
+ block_handler = c3_isp_params_handlers[block->header.type];
+ block_handler(params->isp, block);
block_offset += block->header.size;
}
@@ -771,26 +764,15 @@ static int c3_isp_params_vb2_buf_prepare(struct vb2_buffer *vb)
struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
struct c3_isp_params_buffer *buf = to_c3_isp_params_buffer(vbuf);
struct c3_isp_params *params = vb2_get_drv_priv(vb->vb2_queue);
- struct c3_isp_params_cfg *cfg = buf->cfg;
struct c3_isp_params_cfg *usr_cfg = vb2_plane_vaddr(vb, 0);
size_t payload_size = vb2_get_plane_payload(vb, 0);
- size_t header_size = offsetof(struct c3_isp_params_cfg, data);
- size_t block_offset = 0;
- size_t cfg_size;
-
- /* Payload size can't be greater than the destination buffer size */
- if (payload_size > params->vfmt.fmt.meta.buffersize) {
- dev_dbg(params->isp->dev,
- "Payload size is too large: %zu\n", payload_size);
- return -EINVAL;
- }
+ struct c3_isp_params_cfg *cfg = buf->cfg;
+ int ret;
- /* Payload size can't be smaller than the header size */
- if (payload_size < header_size) {
- dev_dbg(params->isp->dev,
- "Payload size is too small: %zu\n", payload_size);
- return -EINVAL;
- }
+ ret = v4l2_isp_params_validate_buffer_size(params->isp->dev, vb,
+ params->vfmt.fmt.meta.buffersize);
+ if (ret)
+ return ret;
/*
* Use the internal scratch buffer to avoid userspace modifying
@@ -798,70 +780,10 @@ static int c3_isp_params_vb2_buf_prepare(struct vb2_buffer *vb)
*/
memcpy(cfg, usr_cfg, payload_size);
- /* Only v0 is supported at the moment */
- if (cfg->version != C3_ISP_PARAMS_BUFFER_V0) {
- dev_dbg(params->isp->dev,
- "Invalid params buffer version: %u\n", cfg->version);
- return -EINVAL;
- }
-
- /* Validate the size reported in the parameter buffer header */
- cfg_size = header_size + cfg->data_size;
- if (cfg_size != payload_size) {
- dev_dbg(params->isp->dev,
- "Data size %zu and payload size %zu are different\n",
- cfg_size, payload_size);
- return -EINVAL;
- }
-
- /* Walk the list of parameter blocks and validate them */
- cfg_size = cfg->data_size;
- while (cfg_size >= sizeof(struct c3_isp_params_block_header)) {
- const struct c3_isp_params_block_header *block;
- const struct c3_isp_params_handler *handler;
-
- block = (struct c3_isp_params_block_header *)
- &cfg->data[block_offset];
-
- if (block->type >= ARRAY_SIZE(c3_isp_params_handlers)) {
- dev_dbg(params->isp->dev,
- "Invalid params block type\n");
- return -EINVAL;
- }
-
- if (block->size > cfg_size) {
- dev_dbg(params->isp->dev,
- "Block size is greater than cfg size\n");
- return -EINVAL;
- }
-
- if ((block->flags & (C3_ISP_PARAMS_BLOCK_FL_ENABLE |
- C3_ISP_PARAMS_BLOCK_FL_DISABLE)) ==
- (C3_ISP_PARAMS_BLOCK_FL_ENABLE |
- C3_ISP_PARAMS_BLOCK_FL_DISABLE)) {
- dev_dbg(params->isp->dev,
- "Invalid parameters block flags\n");
- return -EINVAL;
- }
-
- handler = &c3_isp_params_handlers[block->type];
- if (block->size != handler->size) {
- dev_dbg(params->isp->dev,
- "Invalid params block size\n");
- return -EINVAL;
- }
-
- block_offset += block->size;
- cfg_size -= block->size;
- }
-
- if (cfg_size) {
- dev_dbg(params->isp->dev,
- "Unexpected data after the params buffer end\n");
- return -EINVAL;
- }
-
- return 0;
+ return v4l2_isp_params_validate_buffer(params->isp->dev, vb,
+ (struct v4l2_isp_params_buffer *)cfg,
+ c3_isp_params_block_types_info,
+ ARRAY_SIZE(c3_isp_params_block_types_info));
}
static int c3_isp_params_vb2_buf_init(struct vb2_buffer *vb)
diff --git a/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
index 1011ab3ebac7..b9e4ef3fc308 100644
--- a/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
+++ b/drivers/media/platform/amlogic/c3/mipi-csi2/c3-mipi-csi2.c
@@ -388,13 +388,12 @@ static void c3_mipi_csi_cfg_host(struct c3_csi_device *csi)
c3_mipi_csi_write(csi, CSI2_HOST_N_LANES, csi->bus.num_data_lanes - 1);
}
-static int c3_mipi_csi_start_stream(struct c3_csi_device *csi,
- struct v4l2_subdev *src_sd)
+static int c3_mipi_csi_start_stream(struct c3_csi_device *csi)
{
s64 link_freq;
s64 lane_rate;
- link_freq = v4l2_get_link_freq(src_sd->ctrl_handler, 0, 0);
+ link_freq = v4l2_get_link_freq(csi->src_pad, 0, 0);
if (link_freq < 0) {
dev_err(csi->dev,
"Unable to obtain link frequency: %lld\n", link_freq);
@@ -434,7 +433,7 @@ static int c3_mipi_csi_enable_streams(struct v4l2_subdev *sd,
pm_runtime_resume_and_get(csi->dev);
- c3_mipi_csi_start_stream(csi, src_sd);
+ c3_mipi_csi_start_stream(csi);
ret = v4l2_subdev_enable_streams(src_sd, csi->src_pad->index, BIT(0));
if (ret) {
diff --git a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
index 0c004bb8ba05..c51c6f4e41dc 100644
--- a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
+++ b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
@@ -82,6 +82,11 @@ struct ge2d_ctx {
u32 xy_swap;
};
+static inline struct ge2d_ctx *file_to_ge2d_ctx(struct file *filp)
+{
+ return container_of(file_to_v4l2_fh(filp), struct ge2d_ctx, fh);
+}
+
struct meson_ge2d {
struct v4l2_device v4l2_dev;
struct v4l2_m2m_dev *m2m_dev;
@@ -452,7 +457,7 @@ static int vidioc_enum_fmt(struct file *file, void *priv, struct v4l2_fmtdesc *f
static int vidioc_g_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct ge2d_frame *f;
bool use_frame = false;
@@ -502,7 +507,7 @@ static int vidioc_g_selection(struct file *file, void *priv,
static int vidioc_s_selection(struct file *file, void *priv,
struct v4l2_selection *s)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct ge2d_frame *f;
int ret = 0;
@@ -569,8 +574,8 @@ static void vidioc_setup_cap_fmt(struct ge2d_ctx *ctx, struct v4l2_pix_format *f
static int vidioc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
const struct ge2d_fmt *fmt = find_fmt(f);
- struct ge2d_ctx *ctx = priv;
struct v4l2_pix_format fmt_cap;
vidioc_setup_cap_fmt(ctx, &fmt_cap);
@@ -590,7 +595,7 @@ static int vidioc_try_fmt_cap(struct file *file, void *priv, struct v4l2_format
static int vidioc_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct vb2_queue *vq;
struct ge2d_frame *frm;
@@ -626,14 +631,9 @@ static int vidioc_s_fmt_cap(struct file *file, void *priv, struct v4l2_format *f
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
- struct vb2_queue *vq;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct ge2d_frame *frm;
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
- if (!vq)
- return -EINVAL;
-
frm = get_frame(ctx, f->type);
f->fmt.pix = frm->pix_fmt;
@@ -665,7 +665,7 @@ static int vidioc_try_fmt_out(struct file *file, void *priv, struct v4l2_format
static int vidioc_s_fmt_out(struct file *file, void *priv, struct v4l2_format *f)
{
- struct ge2d_ctx *ctx = priv;
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
struct vb2_queue *vq;
struct ge2d_frame *frm, *frm_cap;
@@ -855,8 +855,7 @@ static int ge2d_open(struct file *file)
return ret;
}
v4l2_fh_init(&ctx->fh, video_devdata(file));
- file->private_data = &ctx->fh;
- v4l2_fh_add(&ctx->fh);
+ v4l2_fh_add(&ctx->fh, file);
ge2d_setup_ctrls(ctx);
@@ -871,8 +870,7 @@ static int ge2d_open(struct file *file)
static int ge2d_release(struct file *file)
{
- struct ge2d_ctx *ctx =
- container_of(file->private_data, struct ge2d_ctx, fh);
+ struct ge2d_ctx *ctx = file_to_ge2d_ctx(file);
struct meson_ge2d *ge2d = ctx->ge2d;
mutex_lock(&ge2d->mutex);
@@ -880,7 +878,7 @@ static int ge2d_release(struct file *file)
v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
v4l2_ctrl_handler_free(&ctx->ctrl_handler);
- v4l2_fh_del(&ctx->fh);
+ v4l2_fh_del(&ctx->fh, file);
v4l2_fh_exit(&ctx->fh);
kfree(ctx);
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 32eef2fd1f2a..c0d2aabb9e0e 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -532,8 +532,6 @@ static int vdec_s_fmt_common(struct vpu_inst *inst, struct v4l2_format *f)
return -EINVAL;
q = v4l2_m2m_get_vq(inst->fh.m2m_ctx, f->type);
- if (!q)
- return -EINVAL;
if (vb2_is_busy(q))
return -EBUSY;
@@ -823,7 +821,7 @@ static int vdec_frame_decoded(struct vpu_inst *inst, void *arg)
vbuf = &vpu_buf->m2m_buf.vb;
src_buf = vdec_get_src_buffer(inst, info->consumed_count);
if (src_buf) {
- v4l2_m2m_buf_copy_metadata(src_buf, vbuf, true);
+ v4l2_m2m_buf_copy_metadata(src_buf, vbuf);
if (info->consumed_count) {
v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
index c5c1f1fbaa80..aced76401b69 100644
--- a/drivers/media/platform/amphion/venc.c
+++ b/drivers/media/platform/amphion/venc.c
@@ -223,8 +223,6 @@ static int venc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
q = v4l2_m2m_get_vq(inst->fh.m2m_ctx, f->type);
- if (!q)
- return -EINVAL;
if (vb2_is_busy(q))
return -EBUSY;
@@ -790,7 +788,7 @@ static int venc_get_one_encoded_frame(struct vpu_inst *inst,
src_buf = vpu_find_buf_by_sequence(inst, inst->out_format.type, frame->info.frame_id);
if (src_buf) {
- v4l2_m2m_buf_copy_metadata(src_buf, vbuf, true);
+ v4l2_m2m_buf_copy_metadata(src_buf, vbuf);
vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
v4l2_m2m_src_buf_remove_by_buf(inst->fh.m2m_ctx, src_buf);
v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index cac0f1a64fea..bfd171a3ded4 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -328,7 +328,7 @@ static inline const char *vpu_core_type_desc(enum vpu_core_type type)
static inline struct vpu_inst *to_inst(struct file *filp)
{
- return container_of(filp->private_data, struct vpu_inst, fh);
+ return container_of(file_to_v4l2_fh(filp), struct vpu_inst, fh);
}
#define ctrl_to_inst(ctrl) \
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
index da00f5fc0e5d..168f0514851e 100644
--- a/drivers/media/platform/amphion/vpu_core.c
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -542,47 +542,30 @@ const struct vpu_core_resources *vpu_get_resource(struct vpu_inst *inst)
static int vpu_core_parse_dt(struct vpu_core *core, struct device_node *np)
{
- struct device_node *node;
struct resource res;
int ret;
- if (of_count_phandle_with_args(np, "memory-region", NULL) < 2) {
- dev_err(core->dev, "need 2 memory-region for boot and rpc\n");
- return -ENODEV;
+ ret = of_reserved_mem_region_to_resource(np, 0, &res);
+ if (ret) {
+ dev_err(core->dev, "Cannot get boot-region\n");
+ return ret;
}
- node = of_parse_phandle(np, "memory-region", 0);
- if (!node) {
- dev_err(core->dev, "boot-region of_parse_phandle error\n");
- return -ENODEV;
- }
- if (of_address_to_resource(node, 0, &res)) {
- dev_err(core->dev, "boot-region of_address_to_resource error\n");
- of_node_put(node);
- return -EINVAL;
- }
core->fw.phys = res.start;
core->fw.length = resource_size(&res);
- of_node_put(node);
-
- node = of_parse_phandle(np, "memory-region", 1);
- if (!node) {
- dev_err(core->dev, "rpc-region of_parse_phandle error\n");
- return -ENODEV;
- }
- if (of_address_to_resource(node, 0, &res)) {
- dev_err(core->dev, "rpc-region of_address_to_resource error\n");
- of_node_put(node);
- return -EINVAL;
+ ret = of_reserved_mem_region_to_resource(np, 1, &res);
+ if (ret) {
+ dev_err(core->dev, "Cannot get rpc-region\n");
+ return ret;
}
+
core->rpc.phys = res.start;
core->rpc.length = resource_size(&res);
if (core->rpc.length < core->res->rpc_size + core->res->fwlog_size) {
dev_err(core->dev, "the rpc-region <%pad, 0x%x> is not enough\n",
&core->rpc.phys, core->rpc.length);
- of_node_put(node);
return -EINVAL;
}
@@ -594,7 +577,6 @@ static int vpu_core_parse_dt(struct vpu_core *core, struct device_node *np)
if (ret != VPU_CORE_MEMORY_UNCACHED) {
dev_err(core->dev, "rpc region<%pad, 0x%x> isn't uncached\n",
&core->rpc.phys, core->rpc.length);
- of_node_put(node);
return -EINVAL;
}
@@ -606,8 +588,6 @@ static int vpu_core_parse_dt(struct vpu_core *core, struct device_node *np)
core->act.length = core->rpc.length - core->res->rpc_size - core->log.length;
core->rpc.length = core->res->rpc_size;
- of_node_put(node);
-
return 0;
}
diff --git a/drivers/media/platform/amphion/vpu_drv.c b/drivers/media/platform/amphion/vpu_drv.c
index efbfd2652721..2cca61f41bea 100644
--- a/drivers/media/platform/amphion/vpu_drv.c
+++ b/drivers/media/platform/amphion/vpu_drv.c
@@ -175,31 +175,6 @@ static void vpu_remove(struct platform_device *pdev)
mutex_destroy(&vpu->lock);
}
-static int __maybe_unused vpu_runtime_resume(struct device *dev)
-{
- return 0;
-}
-
-static int __maybe_unused vpu_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int __maybe_unused vpu_resume(struct device *dev)
-{
- return 0;
-}
-
-static int __maybe_unused vpu_suspend(struct device *dev)
-{
- return 0;
-}
-
-static const struct dev_pm_ops vpu_pm_ops = {
- SET_RUNTIME_PM_OPS(vpu_runtime_suspend, vpu_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(vpu_suspend, vpu_resume)
-};
-
static struct vpu_resources imx8qxp_res = {
.plat_type = IMX8QXP,
.mreg_base = 0x40000000,
@@ -231,7 +206,6 @@ static struct platform_driver amphion_vpu_driver = {
.driver = {
.name = "amphion-vpu",
.of_match_table = vpu_dt_match,
- .pm = &vpu_pm_ops,
},
};
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index ba688566dffd..80802975c4f1 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -1337,22 +1337,18 @@ static int vpu_malone_insert_scode_vc1_g_seq(struct malone_scode_t *scode)
{
if (!scode->inst->total_input_count)
return 0;
- if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb)))
- scode->need_data = 0;
return 0;
}
static int vpu_malone_insert_scode_vc1_g_pic(struct malone_scode_t *scode)
{
- struct vb2_v4l2_buffer *vbuf;
u8 nal_hdr[MALONE_VC1_NAL_HEADER_LEN];
u32 *data = NULL;
int ret;
- vbuf = to_vb2_v4l2_buffer(scode->vb);
data = vb2_plane_vaddr(scode->vb, 0);
- if (scode->inst->total_input_count == 0 || vpu_vb_is_codecconfig(vbuf))
+ if (scode->inst->total_input_count == 0)
return 0;
if (MALONE_VC1_CONTAIN_NAL(*data))
return 0;
@@ -1373,8 +1369,6 @@ static int vpu_malone_insert_scode_vc1_l_seq(struct malone_scode_t *scode)
int size = 0;
u8 rcv_seqhdr[MALONE_VC1_RCV_SEQ_HEADER_LEN];
- if (vpu_vb_is_codecconfig(to_vb2_v4l2_buffer(scode->vb)))
- scode->need_data = 0;
if (scode->inst->total_input_count)
return 0;
scode->need_data = 0;
@@ -1560,7 +1554,7 @@ static int vpu_malone_input_frame_data(struct vpu_malone_str_buffer __iomem *str
scode.vb = vb;
scode.wptr = wptr;
scode.need_data = 1;
- if (vbuf->sequence == 0 || vpu_vb_is_codecconfig(vbuf))
+ if (vbuf->sequence == 0)
ret = vpu_malone_insert_scode(&scode, SCODE_SEQUENCE);
if (ret < 0)
@@ -1596,7 +1590,7 @@ static int vpu_malone_input_frame_data(struct vpu_malone_str_buffer __iomem *str
* This module is currently only supported for the H264 and HEVC formats,
* for other formats, vpu_malone_add_scode() will return 0.
*/
- if ((disp_imm || low_latency) && !vpu_vb_is_codecconfig(vbuf)) {
+ if (disp_imm || low_latency) {
ret = vpu_malone_add_scode(inst->core->iface,
inst->id,
&inst->stream_buffer,
@@ -1643,7 +1637,6 @@ int vpu_malone_input_frame(struct vpu_shared_addr *shared,
struct vpu_inst *inst, struct vb2_buffer *vb)
{
struct vpu_dec_ctrl *hc = shared->priv;
- struct vb2_v4l2_buffer *vbuf;
struct vpu_malone_str_buffer __iomem *str_buf = hc->str_buf[inst->id];
u32 disp_imm = hc->codec_param[inst->id].disp_imm;
u32 size;
@@ -1657,16 +1650,6 @@ int vpu_malone_input_frame(struct vpu_shared_addr *shared,
return ret;
size = ret;
- /*
- * if buffer only contain codec data, and the timestamp is invalid,
- * don't put the invalid timestamp to resync
- * merge the data to next frame
- */
- vbuf = to_vb2_v4l2_buffer(vb);
- if (vpu_vb_is_codecconfig(vbuf)) {
- inst->extra_size += size;
- return 0;
- }
if (inst->extra_size) {
size += inst->extra_size;
inst->extra_size = 0;
diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c
index 74668fa362e2..47dff9a35bb4 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.c
+++ b/drivers/media/platform/amphion/vpu_v4l2.c
@@ -24,6 +24,11 @@
#include "vpu_msgs.h"
#include "vpu_helpers.h"
+static char *vpu_type_name(u32 type)
+{
+ return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
+}
+
void vpu_inst_lock(struct vpu_inst *inst)
{
mutex_lock(&inst->lock);
@@ -42,7 +47,7 @@ dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no)
vb->planes[plane_no].data_offset;
}
-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
+static unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no)
{
if (plane_no >= vb->num_planes)
return 0;
@@ -81,7 +86,7 @@ void vpu_v4l2_set_error(struct vpu_inst *inst)
vpu_inst_unlock(inst);
}
-int vpu_notify_eos(struct vpu_inst *inst)
+static int vpu_notify_eos(struct vpu_inst *inst)
{
static const struct v4l2_event ev = {
.id = 0,
@@ -344,16 +349,6 @@ struct vb2_v4l2_buffer *vpu_next_src_buf(struct vpu_inst *inst)
if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
return NULL;
- while (vpu_vb_is_codecconfig(src_buf)) {
- v4l2_m2m_src_buf_remove(inst->fh.m2m_ctx);
- vpu_set_buffer_state(src_buf, VPU_BUF_STATE_IDLE);
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
-
- src_buf = v4l2_m2m_next_src_buf(inst->fh.m2m_ctx);
- if (!src_buf || vpu_get_buffer_state(src_buf) == VPU_BUF_STATE_IDLE)
- return NULL;
- }
-
return src_buf;
}
@@ -573,7 +568,8 @@ static void vpu_vb2_buf_finish(struct vb2_buffer *vb)
call_void_vop(inst, on_queue_empty, q->type);
}
-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state)
+static void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type,
+ enum vb2_buffer_state state)
{
struct vb2_v4l2_buffer *buf;
@@ -707,19 +703,17 @@ static int vpu_v4l2_release(struct vpu_inst *inst)
{
vpu_trace(inst->vpu->dev, "%p\n", inst);
- vpu_release_core(inst->core);
- put_device(inst->dev);
-
if (inst->workqueue) {
cancel_work_sync(&inst->msg_work);
destroy_workqueue(inst->workqueue);
inst->workqueue = NULL;
}
+ vpu_release_core(inst->core);
+ put_device(inst->dev);
+
v4l2_ctrl_handler_free(&inst->ctrl_handler);
mutex_destroy(&inst->lock);
- v4l2_fh_del(&inst->fh);
- v4l2_fh_exit(&inst->fh);
call_void_vop(inst, cleanup);
@@ -756,7 +750,7 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
inst->min_buffer_cap = 2;
inst->min_buffer_out = 2;
v4l2_fh_init(&inst->fh, func->vfd);
- v4l2_fh_add(&inst->fh);
+ v4l2_fh_add(&inst->fh, file);
ret = call_vop(inst, ctrl_init);
if (ret)
@@ -770,7 +764,6 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
}
inst->fh.ctrl_handler = &inst->ctrl_handler;
- file->private_data = &inst->fh;
inst->state = VPU_CODEC_STATE_DEINIT;
inst->workqueue = alloc_ordered_workqueue("vpu_inst", WQ_MEM_RECLAIM);
if (inst->workqueue) {
@@ -788,6 +781,8 @@ int vpu_v4l2_open(struct file *file, struct vpu_inst *inst)
return 0;
error:
+ v4l2_fh_del(&inst->fh, file);
+ v4l2_fh_exit(&inst->fh);
vpu_inst_put(inst);
return ret;
}
@@ -807,6 +802,9 @@ int vpu_v4l2_close(struct file *file)
call_void_vop(inst, release);
vpu_inst_unlock(inst);
+ v4l2_fh_del(&inst->fh, file);
+ v4l2_fh_exit(&inst->fh);
+
vpu_inst_unregister(inst);
vpu_inst_put(inst);
diff --git a/drivers/media/platform/amphion/vpu_v4l2.h b/drivers/media/platform/amphion/vpu_v4l2.h
index 56f2939fa84d..da9945f25e32 100644
--- a/drivers/media/platform/amphion/vpu_v4l2.h
+++ b/drivers/media/platform/amphion/vpu_v4l2.h
@@ -26,15 +26,12 @@ void vpu_skip_frame(struct vpu_inst *inst, int count);
struct vb2_v4l2_buffer *vpu_find_buf_by_sequence(struct vpu_inst *inst, u32 type, u32 sequence);
struct vb2_v4l2_buffer *vpu_find_buf_by_idx(struct vpu_inst *inst, u32 type, u32 idx);
void vpu_v4l2_set_error(struct vpu_inst *inst);
-int vpu_notify_eos(struct vpu_inst *inst);
int vpu_notify_source_change(struct vpu_inst *inst);
int vpu_set_last_buffer_dequeued(struct vpu_inst *inst, bool eos);
-void vpu_vb2_buffers_return(struct vpu_inst *inst, unsigned int type, enum vb2_buffer_state state);
int vpu_get_num_buffers(struct vpu_inst *inst, u32 type);
bool vpu_is_source_empty(struct vpu_inst *inst);
dma_addr_t vpu_get_vb_phy_addr(struct vb2_buffer *vb, u32 plane_no);
-unsigned int vpu_get_vb_length(struct vb2_buffer *vb, u32 plane_no);
static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type)
{
if (V4L2_TYPE_IS_OUTPUT(type))
@@ -42,19 +39,4 @@ static inline struct vpu_format *vpu_get_format(struct vpu_inst *inst, u32 type)
else
return &inst->cap_format;
}
-
-static inline char *vpu_type_name(u32 type)
-{
- return V4L2_TYPE_IS_OUTPUT(type) ? "output" : "capture";
-}
-
-static inline int vpu_vb_is_codecconfig(struct vb2_v4l2_buffer *vbuf)
-{
-#ifdef V4L2_BUF_FLAG_CODECCONFIG
- return (vbuf->flags & V4L2_BUF_FLAG_CODECCONFIG) ? 1 : 0;
-#else
- return 0;
-#endif
-}
-
#endif
diff --git a/drivers/media/platform/arm/Kconfig b/drivers/media/platform/arm/Kconfig
new file mode 100644
index 000000000000..4f0764c329c7
--- /dev/null
+++ b/drivers/media/platform/arm/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "ARM media platform drivers"
+
+source "drivers/media/platform/arm/mali-c55/Kconfig"
diff --git a/drivers/media/platform/arm/Makefile b/drivers/media/platform/arm/Makefile
new file mode 100644
index 000000000000..8cc4918725ef
--- /dev/null
+++ b/drivers/media/platform/arm/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y += mali-c55/
diff --git a/drivers/media/platform/arm/mali-c55/Kconfig b/drivers/media/platform/arm/mali-c55/Kconfig
new file mode 100644
index 000000000000..5b084b3c3340
--- /dev/null
+++ b/drivers/media/platform/arm/mali-c55/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_MALI_C55
+ tristate "ARM Mali-C55 Image Signal Processor driver"
+ depends on ARCH_VEXPRESS || ARCH_RENESAS || COMPILE_TEST
+ depends on V4L_PLATFORM_DRIVERS
+ depends on VIDEO_DEV && OF
+ select GENERIC_PHY_MIPI_DPHY
+ select MEDIA_CONTROLLER
+ select V4L2_FWNODE
+ select V4L2_ISP
+ select VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ help
+ Enable this to support Arm's Mali-C55 Image Signal Processor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mali-c55.
diff --git a/drivers/media/platform/arm/mali-c55/Makefile b/drivers/media/platform/arm/mali-c55/Makefile
new file mode 100644
index 000000000000..d5718b0b23e0
--- /dev/null
+++ b/drivers/media/platform/arm/mali-c55/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+mali-c55-y := mali-c55-capture.o \
+ mali-c55-core.o \
+ mali-c55-isp.o \
+ mali-c55-params.o \
+ mali-c55-resizer.o \
+ mali-c55-stats.o \
+ mali-c55-tpg.o
+
+obj-$(CONFIG_VIDEO_MALI_C55) += mali-c55.o
diff --git a/drivers/media/platform/arm/mali-c55/mali-c55-capture.c b/drivers/media/platform/arm/mali-c55/mali-c55-capture.c
new file mode 100644
index 000000000000..7aaa5c3f7354
--- /dev/null
+++ b/drivers/media/platform/arm/mali-c55/mali-c55-capture.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Mali-C55 ISP Driver - Video capture devices
+ *
+ * Copyright (C) 2025 Ideas on Board Oy
+ */
+
+#include <linux/cleanup.h>
+#include <linux/minmax.h>
+#include <linux/lockdep.h>
+#include <linux/pm_runtime.h>
+#include <linux/string.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mali-c55-common.h"
+#include "mali-c55-registers.h"
+
+static const struct mali_c55_format_info mali_c55_fmts[] = {
+ /*
+ * This table is missing some entries which need further work or
+ * investigation:
+ *
+ * Base mode 5 is "Generic Data"
+ * Base mode 8 is a backwards V4L2_PIX_FMT_XYUV32 - no V4L2 equivalent
+ * Base mode 9 seems to have no V4L2 equivalent
+ * Base mode 17, 19 and 20 describe formats which seem to have no V4L2
+ * equivalent
+ */
+ {
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_RGB121212_1X36,
+ MEDIA_BUS_FMT_RGB202020_1X60,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_RGB32,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_ARGB2101010,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_RGB121212_1X36,
+ MEDIA_BUS_FMT_RGB202020_1X60,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_A2R10G10B10,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_RGB121212_1X36,
+ MEDIA_BUS_FMT_RGB202020_1X60,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_RGB565,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_RGB121212_1X36,
+ MEDIA_BUS_FMT_RGB202020_1X60,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_RGB24,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_YUV10_1X30,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_YUY2,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_YUV10_1X30,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_UYVY,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_Y210,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_YUV10_1X30,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_Y210,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ /*
+ * This is something of a hack, the ISP thinks it's running NV12M but
+ * by setting uv_plane = 0 we simply discard that planes and only output
+ * the Y-plane.
+ */
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_YUV10_1X30,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_NV12_21,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_YUV10_1X30,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_NV12_21,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT1
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_YUV10_1X30,
+ },
+ .is_raw = false,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_NV12_21,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT2
+ }
+ },
+ /*
+ * RAW uncompressed formats are all packed in 16 bpp.
+ * TODO: Expand this list to encompass all possible RAW formats.
+ */
+ {
+ .fourcc = V4L2_PIX_FMT_SRGGB16,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_SRGGB16_1X16,
+ },
+ .is_raw = true,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_RAW16,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SBGGR16,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_SBGGR16_1X16,
+ },
+ .is_raw = true,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_RAW16,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGBRG16,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_SGBRG16_1X16,
+ },
+ .is_raw = true,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_RAW16,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_SGRBG16,
+ .mbus_codes = {
+ MEDIA_BUS_FMT_SGRBG16_1X16,
+ },
+ .is_raw = true,
+ .registers = {
+ .base_mode = MALI_C55_OUTPUT_RAW16,
+ .uv_plane = MALI_C55_OUTPUT_PLANE_ALT0
+ }
+ },
+};
+
+void mali_c55_cap_dev_write(struct mali_c55_cap_dev *cap_dev, unsigned int addr,
+ u32 val)
+{
+ mali_c55_ctx_write(cap_dev->mali_c55, addr + cap_dev->reg_offset, val);
+}
+
+static u32 mali_c55_cap_dev_read(struct mali_c55_cap_dev *cap_dev, unsigned int addr)
+{
+ return mali_c55_ctx_read(cap_dev->mali_c55, addr + cap_dev->reg_offset);
+}
+
+static void mali_c55_cap_dev_update_bits(struct mali_c55_cap_dev *cap_dev,
+ unsigned int addr, u32 mask, u32 val)
+{
+ u32 orig, tmp;
+
+ orig = mali_c55_cap_dev_read(cap_dev, addr);
+
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (tmp != orig)
+ mali_c55_cap_dev_write(cap_dev, addr, tmp);
+}
+
+static bool
+mali_c55_mbus_code_can_produce_fmt(const struct mali_c55_format_info *fmt,
+ u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(fmt->mbus_codes); i++) {
+ if (fmt->mbus_codes[i] == code)
+ return true;
+ }
+
+ return false;
+}
+
+bool mali_c55_format_is_raw(unsigned int mbus_code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mali_c55_fmts); i++) {
+ if (mali_c55_fmts[i].mbus_codes[0] == mbus_code)
+ return mali_c55_fmts[i].is_raw;
+ }
+
+ return false;
+}
+
+static const struct mali_c55_format_info *
+mali_c55_format_from_pix(const u32 pixelformat)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mali_c55_fmts); i++) {
+ if (mali_c55_fmts[i].fourcc == pixelformat)
+ return &mali_c55_fmts[i];
+ }
+
+ /*
+ * If we find no matching pixelformat, we'll just default to the first
+ * one for now.
+ */
+
+ return &mali_c55_fmts[0];
+}
+
+static const char * const capture_device_names[] = {
+ "mali-c55 fr",
+ "mali-c55 ds",
+};
+
+static int mali_c55_link_validate(struct media_link *link)
+{
+ struct video_device *vdev =
+ media_entity_to_video_device(link->sink->entity);
+ struct mali_c55_cap_dev *cap_dev = video_get_drvdata(vdev);
+ struct v4l2_subdev *sd =
+ media_entity_to_v4l2_subdev(link->source->entity);
+ const struct v4l2_pix_format_mplane *pix_mp;
+ const struct mali_c55_format_info *cap_fmt;
+ struct v4l2_subdev_format sd_fmt = {
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ .pad = link->source->index,
+ };
+ int ret;
+
+ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt);
+ if (ret)
+ return ret;
+
+ pix_mp = &cap_dev->format.format;
+ cap_fmt = cap_dev->format.info;
+
+ if (sd_fmt.format.width != pix_mp->width ||
+ sd_fmt.format.height != pix_mp->height) {
+ dev_dbg(cap_dev->mali_c55->dev,
+ "link '%s':%u -> '%s':%u not valid: %ux%u != %ux%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index,
+ sd_fmt.format.width, sd_fmt.format.height,
+ pix_mp->width, pix_mp->height);
+ return -EPIPE;
+ }
+
+ if (!mali_c55_mbus_code_can_produce_fmt(cap_fmt, sd_fmt.format.code)) {
+ dev_dbg(cap_dev->mali_c55->dev,
+ "link '%s':%u -> '%s':%u not valid: mbus_code 0x%04x cannot produce pixel format %p4cc\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index,
+ sd_fmt.format.code, &pix_mp->pixelformat);
+ return -EPIPE;
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations mali_c55_media_ops = {
+ .link_validate = mali_c55_link_validate,
+};
+
+static int mali_c55_vb2_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mali_c55_cap_dev *cap_dev = q->drv_priv;
+ unsigned int i;
+
+ if (*num_planes) {
+ if (*num_planes != cap_dev->format.format.num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < cap_dev->format.format.num_planes; i++)
+ if (sizes[i] < cap_dev->format.format.plane_fmt[i].sizeimage)
+ return -EINVAL;
+ } else {
+ *num_planes = cap_dev->format.format.num_planes;
+ for (i = 0; i < cap_dev->format.format.num_planes; i++)
+ sizes[i] = cap_dev->format.format.plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static void mali_c55_buf_queue(struct vb2_buffer *vb)
+{
+ struct mali_c55_cap_dev *cap_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mali_c55_buffer *buf = container_of(vbuf,
+ struct mali_c55_buffer, vb);
+ unsigned int i;
+
+ buf->planes_pending = cap_dev->format.format.num_planes;
+
+ for (i = 0; i < cap_dev->format.format.num_planes; i++) {
+ unsigned long size = cap_dev->format.format.plane_fmt[i].sizeimage;
+
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ buf->vb.field = V4L2_FIELD_NONE;
+
+ guard(spinlock)(&cap_dev->buffers.lock);
+ list_add_tail(&buf->queue, &cap_dev->buffers.input);
+}
+
+static int mali_c55_buf_init(struct vb2_buffer *vb)
+{
+ struct mali_c55_cap_dev *cap_dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mali_c55_buffer *buf = container_of(vbuf,
+ struct mali_c55_buffer, vb);
+ unsigned int i;
+
+ for (i = 0; i < cap_dev->format.format.num_planes; i++)
+ buf->addrs[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+
+ return 0;
+}
+
+void mali_c55_set_next_buffer(struct mali_c55_cap_dev *cap_dev)
+{
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct mali_c55_buffer *buf;
+ dma_addr_t *addrs;
+
+ scoped_guard(spinlock, &cap_dev->buffers.lock) {
+ buf = list_first_entry_or_null(&cap_dev->buffers.input,
+ struct mali_c55_buffer, queue);
+ if (buf)
+ list_del(&buf->queue);
+ }
+
+ if (!buf) {
+ /*
+ * If we underflow then we can tell the ISP that we don't want
+ * to write out the next frame.
+ */
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_Y_WRITER_MODE,
+ MALI_C55_WRITER_FRAME_WRITE_MASK,
+ 0x00);
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_UV_WRITER_MODE,
+ MALI_C55_WRITER_FRAME_WRITE_MASK,
+ 0x00);
+ return;
+ }
+
+ pix_mp = &cap_dev->format.format;
+
+ mali_c55_cap_dev_update_bits(cap_dev, MALI_C55_REG_Y_WRITER_MODE,
+ MALI_C55_WRITER_FRAME_WRITE_MASK,
+ MALI_C55_WRITER_FRAME_WRITE_ENABLE);
+ if (cap_dev->format.info->registers.uv_plane)
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_UV_WRITER_MODE,
+ MALI_C55_WRITER_FRAME_WRITE_MASK,
+ MALI_C55_WRITER_FRAME_WRITE_ENABLE);
+
+ addrs = buf->addrs;
+ mali_c55_cap_dev_write(cap_dev,
+ MALI_C55_REG_Y_WRITER_BANKS_BASE,
+ addrs[MALI_C55_PLANE_Y]);
+ mali_c55_cap_dev_write(cap_dev,
+ MALI_C55_REG_UV_WRITER_BANKS_BASE,
+ addrs[MALI_C55_PLANE_UV]);
+
+ mali_c55_cap_dev_write(cap_dev,
+ MALI_C55_REG_Y_WRITER_OFFSET,
+ pix_mp->plane_fmt[MALI_C55_PLANE_Y].bytesperline);
+ mali_c55_cap_dev_write(cap_dev,
+ MALI_C55_REG_UV_WRITER_OFFSET,
+ pix_mp->plane_fmt[MALI_C55_PLANE_UV].bytesperline);
+
+ guard(spinlock)(&cap_dev->buffers.processing_lock);
+ list_add_tail(&buf->queue, &cap_dev->buffers.processing);
+}
+
+/**
+ * mali_c55_set_plane_done - mark the plane as written and process the buffer if
+ * both planes are finished.
+ * @cap_dev: pointer to the fr or ds pipe output
+ * @plane: the plane to mark as completed
+ *
+ * The Mali C55 ISP has muliplanar outputs for some formats that come with two
+ * separate "buffer write completed" interrupts - we need to flag each plane's
+ * completion and check whether both planes are done - if so, complete the buf
+ * in vb2.
+ */
+void mali_c55_set_plane_done(struct mali_c55_cap_dev *cap_dev,
+ enum mali_c55_planes plane)
+{
+ struct mali_c55_isp *isp = &cap_dev->mali_c55->isp;
+ struct mali_c55_buffer *buf;
+
+ scoped_guard(spinlock, &cap_dev->buffers.processing_lock) {
+ buf = list_first_entry_or_null(&cap_dev->buffers.processing,
+ struct mali_c55_buffer, queue);
+
+ /*
+ * If the stream was stopped, the buffer might have been sent
+ * back to userspace already.
+ */
+ if (!buf || --buf->planes_pending)
+ return;
+
+ list_del(&buf->queue);
+ }
+
+ /* If the other plane is also done... */
+ buf->vb.vb2_buf.timestamp = ktime_get_boottime_ns();
+ buf->vb.sequence = isp->frame_sequence++;
+ vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+}
+
+static void mali_c55_cap_dev_stream_disable(struct mali_c55_cap_dev *cap_dev)
+{
+ mali_c55_cap_dev_update_bits(cap_dev, MALI_C55_REG_Y_WRITER_MODE,
+ MALI_C55_WRITER_FRAME_WRITE_MASK, 0x00);
+ mali_c55_cap_dev_update_bits(cap_dev, MALI_C55_REG_UV_WRITER_MODE,
+ MALI_C55_WRITER_FRAME_WRITE_MASK, 0x00);
+}
+
+static void mali_c55_cap_dev_stream_enable(struct mali_c55_cap_dev *cap_dev)
+{
+ /*
+ * The Mali ISP can hold up to 5 buffer addresses and simply cycle
+ * through them, but it's not clear to me that the vb2 queue _guarantees_
+ * it will queue buffers to the driver in a fixed order, and ensuring
+ * we call vb2_buffer_done() for the right buffer seems to me to add
+ * pointless complexity given in multi-context mode we'd need to
+ * re-write those registers every frame anyway...so we tell the ISP to
+ * use a single register and update it for each frame.
+ */
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_Y_WRITER_BANKS_CONFIG,
+ MALI_C55_REG_Y_WRITER_MAX_BANKS_MASK, 0);
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_UV_WRITER_BANKS_CONFIG,
+ MALI_C55_REG_UV_WRITER_MAX_BANKS_MASK, 0);
+
+ mali_c55_set_next_buffer(cap_dev);
+}
+
+static void mali_c55_cap_dev_return_buffers(struct mali_c55_cap_dev *cap_dev,
+ enum vb2_buffer_state state)
+{
+ struct mali_c55_buffer *buf, *tmp;
+
+ scoped_guard(spinlock, &cap_dev->buffers.lock) {
+ list_for_each_entry_safe(buf, tmp, &cap_dev->buffers.input,
+ queue) {
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+ }
+
+ guard(spinlock)(&cap_dev->buffers.processing_lock);
+ list_for_each_entry_safe(buf, tmp, &cap_dev->buffers.processing, queue) {
+ list_del(&buf->queue);
+ vb2_buffer_done(&buf->vb.vb2_buf, state);
+ }
+}
+
+static void mali_c55_cap_dev_format_configure(struct mali_c55_cap_dev *cap_dev)
+{
+ const struct mali_c55_format_info *capture_format = cap_dev->format.info;
+ const struct v4l2_pix_format_mplane *pix_mp = &cap_dev->format.format;
+ const struct v4l2_format_info *info;
+
+ info = v4l2_format_info(pix_mp->pixelformat);
+ if (WARN_ON(!info))
+ return;
+
+ mali_c55_cap_dev_write(cap_dev, MALI_C55_REG_Y_WRITER_MODE,
+ capture_format->registers.base_mode);
+ mali_c55_cap_dev_write(cap_dev, MALI_C55_REG_ACTIVE_OUT_Y_SIZE,
+ MALI_C55_REG_ACTIVE_OUT_SIZE_W(pix_mp->width) |
+ MALI_C55_REG_ACTIVE_OUT_SIZE_H(pix_mp->height));
+
+ if (info->mem_planes > 1) {
+ mali_c55_cap_dev_write(cap_dev, MALI_C55_REG_UV_WRITER_MODE,
+ capture_format->registers.base_mode);
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_UV_WRITER_MODE,
+ MALI_C55_WRITER_SUBMODE_MASK,
+ MALI_C55_WRITER_SUBMODE(capture_format->registers.uv_plane));
+
+ mali_c55_cap_dev_write(cap_dev, MALI_C55_REG_ACTIVE_OUT_UV_SIZE,
+ MALI_C55_REG_ACTIVE_OUT_SIZE_W(pix_mp->width) |
+ MALI_C55_REG_ACTIVE_OUT_SIZE_H(pix_mp->height));
+ }
+
+ if (info->pixel_enc == V4L2_PIXEL_ENC_YUV) {
+ mali_c55_cap_dev_write(cap_dev, MALI_C55_REG_CS_CONV_CONFIG,
+ MALI_C55_CS_CONV_MATRIX_MASK);
+
+ if (info->hdiv > 1)
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_CS_CONV_CONFIG,
+ MALI_C55_CS_CONV_HORZ_DOWNSAMPLE_MASK,
+ MALI_C55_CS_CONV_HORZ_DOWNSAMPLE_ENABLE);
+ if (info->vdiv > 1)
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_CS_CONV_CONFIG,
+ MALI_C55_CS_CONV_VERT_DOWNSAMPLE_MASK,
+ MALI_C55_CS_CONV_VERT_DOWNSAMPLE_ENABLE);
+ if (info->hdiv > 1 || info->vdiv > 1)
+ mali_c55_cap_dev_update_bits(cap_dev,
+ MALI_C55_REG_CS_CONV_CONFIG,
+ MALI_C55_CS_CONV_FILTER_MASK,
+ MALI_C55_CS_CONV_FILTER_ENABLE);
+ }
+}
+
+static int mali_c55_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mali_c55_cap_dev *cap_dev = q->drv_priv;
+ struct mali_c55 *mali_c55 = cap_dev->mali_c55;
+ struct mali_c55_resizer *rsz = cap_dev->rsz;
+ struct mali_c55_isp *isp = &mali_c55->isp;
+ int ret;
+
+ guard(mutex)(&isp->capture_lock);
+
+ ret = pm_runtime_resume_and_get(mali_c55->dev);
+ if (ret)
+ goto err_return_buffers;
+
+ ret = video_device_pipeline_alloc_start(&cap_dev->vdev);
+ if (ret) {
+ dev_dbg(mali_c55->dev, "%s failed to start media pipeline\n",
+ cap_dev->vdev.name);
+ goto err_pm_put;
+ }
+
+ mali_c55_cap_dev_format_configure(cap_dev);
+ mali_c55_cap_dev_stream_enable(cap_dev);
+
+ ret = v4l2_subdev_enable_streams(&rsz->sd, MALI_C55_RSZ_SOURCE_PAD,
+ BIT(0));
+ if (ret)
+ goto err_disable_cap_dev;
+
+ if (mali_c55_pipeline_ready(mali_c55)) {
+ ret = v4l2_subdev_enable_streams(&mali_c55->isp.sd,
+ MALI_C55_ISP_PAD_SOURCE_VIDEO,
+ BIT(0));
+ if (ret < 0)
+ goto err_disable_rsz;
+ }
+
+ return 0;
+
+err_disable_rsz:
+ v4l2_subdev_disable_streams(&rsz->sd, MALI_C55_RSZ_SOURCE_PAD, BIT(0));
+err_disable_cap_dev:
+ mali_c55_cap_dev_stream_disable(cap_dev);
+ video_device_pipeline_stop(&cap_dev->vdev);
+err_pm_put:
+ pm_runtime_put_autosuspend(mali_c55->dev);
+err_return_buffers:
+ mali_c55_cap_dev_return_buffers(cap_dev, VB2_BUF_STATE_QUEUED);
+
+ return ret;
+}
+
+static void mali_c55_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct mali_c55_cap_dev *cap_dev = q->drv_priv;
+ struct mali_c55 *mali_c55 = cap_dev->mali_c55;
+ struct mali_c55_resizer *rsz = cap_dev->rsz;
+ struct mali_c55_isp *isp = &mali_c55->isp;
+
+ guard(mutex)(&isp->capture_lock);
+
+ if (mali_c55_pipeline_ready(mali_c55)) {
+ if (v4l2_subdev_is_streaming(&isp->sd))
+ v4l2_subdev_disable_streams(&isp->sd,
+ MALI_C55_ISP_PAD_SOURCE_VIDEO,
+ BIT(0));
+ }
+
+ v4l2_subdev_disable_streams(&rsz->sd, MALI_C55_RSZ_SOURCE_PAD, BIT(0));
+ mali_c55_cap_dev_stream_disable(cap_dev);
+ mali_c55_cap_dev_return_buffers(cap_dev, VB2_BUF_STATE_ERROR);
+ video_device_pipeline_stop(&cap_dev->vdev);
+ pm_runtime_put_autosuspend(mali_c55->dev);
+}
+
+static const struct vb2_ops mali_c55_vb2_ops = {
+ .queue_setup = &mali_c55_vb2_queue_setup,
+ .buf_queue = &mali_c55_buf_queue,
+ .buf_init = &mali_c55_buf_init,
+ .start_streaming = &mali_c55_vb2_start_streaming,
+ .stop_streaming = &mali_c55_vb2_stop_streaming,
+};
+
+static const struct v4l2_file_operations mali_c55_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static void mali_c55_try_fmt(struct v4l2_pix_format_mplane *pix_mp)
+{
+ const struct mali_c55_format_info *capture_format;
+ const struct v4l2_format_info *info;
+ struct v4l2_plane_pix_format *plane, *y_plane;
+ unsigned int padding;
+ unsigned int i;
+
+ capture_format = mali_c55_format_from_pix(pix_mp->pixelformat);
+ pix_mp->pixelformat = capture_format->fourcc;
+
+ pix_mp->width = clamp(pix_mp->width, MALI_C55_MIN_WIDTH,
+ MALI_C55_MAX_WIDTH);
+ pix_mp->height = clamp(pix_mp->height, MALI_C55_MIN_HEIGHT,
+ MALI_C55_MAX_HEIGHT);
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->colorspace = V4L2_COLORSPACE_DEFAULT;
+ pix_mp->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ pix_mp->quantization = V4L2_QUANTIZATION_DEFAULT;
+
+ info = v4l2_format_info(pix_mp->pixelformat);
+ pix_mp->num_planes = info->mem_planes;
+ memset(pix_mp->plane_fmt, 0, sizeof(pix_mp->plane_fmt));
+
+ y_plane = &pix_mp->plane_fmt[0];
+ y_plane->bytesperline = clamp(y_plane->bytesperline,
+ info->bpp[0] * pix_mp->width, 65535U);
+
+ /*
+ * The ISP requires that the stride be aligned to 16-bytes. This is not
+ * detailed in the documentation but has been verified experimentally.
+ */
+ y_plane->bytesperline = ALIGN(y_plane->bytesperline, 16);
+ y_plane->sizeimage = y_plane->bytesperline * pix_mp->height;
+
+ padding = y_plane->bytesperline - (pix_mp->width * info->bpp[0]);
+
+ for (i = 1; i < info->comp_planes; i++) {
+ plane = &pix_mp->plane_fmt[i];
+
+ plane->bytesperline = DIV_ROUND_UP(info->bpp[i] * pix_mp->width,
+ info->hdiv) + padding;
+ plane->sizeimage = DIV_ROUND_UP(plane->bytesperline *
+ pix_mp->height, info->vdiv);
+ }
+
+ if (info->mem_planes == 1) {
+ for (i = 1; i < info->comp_planes; i++) {
+ plane = &pix_mp->plane_fmt[i];
+ y_plane->sizeimage += plane->sizeimage;
+ }
+ }
+}
+
+static int mali_c55_try_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ mali_c55_try_fmt(&f->fmt.pix_mp);
+
+ return 0;
+}
+
+static void mali_c55_set_format(struct mali_c55_cap_dev *cap_dev,
+ struct v4l2_pix_format_mplane *pix_mp)
+{
+ mali_c55_try_fmt(pix_mp);
+
+ cap_dev->format.format = *pix_mp;
+ cap_dev->format.info = mali_c55_format_from_pix(pix_mp->pixelformat);
+}
+
+static int mali_c55_s_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mali_c55_cap_dev *cap_dev = video_drvdata(file);
+
+ if (vb2_is_busy(&cap_dev->queue))
+ return -EBUSY;
+
+ mali_c55_set_format(cap_dev, &f->fmt.pix_mp);
+
+ return 0;
+}
+
+static int mali_c55_g_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mali_c55_cap_dev *cap_dev = video_drvdata(file);
+
+ f->fmt.pix_mp = cap_dev->format.format;
+
+ return 0;
+}
+
+static int mali_c55_enum_fmt_vid_cap_mplane(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ struct mali_c55_cap_dev *cap_dev = video_drvdata(file);
+ unsigned int j = 0;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mali_c55_fmts); i++) {
+ if (f->mbus_code &&
+ !mali_c55_mbus_code_can_produce_fmt(&mali_c55_fmts[i],
+ f->mbus_code))
+ continue;
+
+ /* Downscale pipe can't output RAW formats */
+ if (mali_c55_fmts[i].is_raw &&
+ cap_dev->reg_offset == MALI_C55_CAP_DEV_DS_REG_OFFSET)
+ continue;
+
+ if (j++ == f->index) {
+ f->pixelformat = mali_c55_fmts[i].fourcc;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int mali_c55_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, MALI_C55_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "ARM Mali-C55 ISP", sizeof(cap->card));
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mali_c55_v4l2_ioctl_ops = {
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_try_fmt_vid_cap_mplane = mali_c55_try_fmt_vid_cap_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mali_c55_s_fmt_vid_cap_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = mali_c55_g_fmt_vid_cap_mplane,
+ .vidioc_enum_fmt_vid_cap = mali_c55_enum_fmt_vid_cap_mplane,
+ .vidioc_querycap = mali_c55_querycap,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int mali_c55_register_cap_dev(struct mali_c55 *mali_c55,
+ enum mali_c55_cap_devs cap_dev_id)
+{
+ struct mali_c55_cap_dev *cap_dev = &mali_c55->cap_devs[cap_dev_id];
+ struct v4l2_pix_format_mplane pix_mp;
+ struct video_device *vdev;
+ struct vb2_queue *vb2q;
+ int ret;
+
+ vdev = &cap_dev->vdev;
+ vb2q = &cap_dev->queue;
+
+ cap_dev->mali_c55 = mali_c55;
+ mutex_init(&cap_dev->lock);
+ INIT_LIST_HEAD(&cap_dev->buffers.input);
+ INIT_LIST_HEAD(&cap_dev->buffers.processing);
+ spin_lock_init(&cap_dev->buffers.lock);
+ spin_lock_init(&cap_dev->buffers.processing_lock);
+
+ switch (cap_dev_id) {
+ case MALI_C55_CAP_DEV_FR:
+ cap_dev->rsz = &mali_c55->resizers[MALI_C55_RSZ_FR];
+ cap_dev->reg_offset = MALI_C55_CAP_DEV_FR_REG_OFFSET;
+ break;
+ case MALI_C55_CAP_DEV_DS:
+ cap_dev->rsz = &mali_c55->resizers[MALI_C55_RSZ_DS];
+ cap_dev->reg_offset = MALI_C55_CAP_DEV_DS_REG_OFFSET;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_destroy_mutex;
+ }
+
+ cap_dev->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&cap_dev->vdev.entity, 1, &cap_dev->pad);
+ if (ret) {
+ mutex_destroy(&cap_dev->lock);
+ goto err_destroy_mutex;
+ }
+
+ vb2q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ vb2q->io_modes = VB2_MMAP | VB2_DMABUF;
+ vb2q->drv_priv = cap_dev;
+ vb2q->mem_ops = &vb2_dma_contig_memops;
+ vb2q->ops = &mali_c55_vb2_ops;
+ vb2q->buf_struct_size = sizeof(struct mali_c55_buffer);
+ vb2q->min_queued_buffers = 1;
+ vb2q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ vb2q->lock = &cap_dev->lock;
+ vb2q->dev = mali_c55->dev;
+
+ ret = vb2_queue_init(vb2q);
+ if (ret) {
+ dev_err(mali_c55->dev, "%s vb2 queue init failed\n",
+ cap_dev->vdev.name);
+ goto err_cleanup_media_entity;
+ }
+
+ strscpy(cap_dev->vdev.name, capture_device_names[cap_dev_id],
+ sizeof(cap_dev->vdev.name));
+ vdev->release = video_device_release_empty;
+ vdev->fops = &mali_c55_v4l2_fops;
+ vdev->ioctl_ops = &mali_c55_v4l2_ioctl_ops;
+ vdev->lock = &cap_dev->lock;
+ vdev->v4l2_dev = &mali_c55->v4l2_dev;
+ vdev->queue = &cap_dev->queue;
+ vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE |
+ V4L2_CAP_STREAMING | V4L2_CAP_IO_MC;
+ vdev->entity.ops = &mali_c55_media_ops;
+ video_set_drvdata(vdev, cap_dev);
+
+ memset(&pix_mp, 0, sizeof(pix_mp));
+ pix_mp.pixelformat = V4L2_PIX_FMT_RGB565;
+ pix_mp.width = MALI_C55_DEFAULT_WIDTH;
+ pix_mp.height = MALI_C55_DEFAULT_HEIGHT;
+ mali_c55_set_format(cap_dev, &pix_mp);
+
+ ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(mali_c55->dev,
+ "%s failed to register video device\n",
+ cap_dev->vdev.name);
+ goto err_release_vb2q;
+ }
+
+ return 0;
+
+err_release_vb2q:
+ vb2_queue_release(vb2q);
+err_cleanup_media_entity:
+ media_entity_cleanup(&cap_dev->vdev.entity);
+err_destroy_mutex:
+ mutex_destroy(&cap_dev->lock);
+
+ return ret;
+}
+
+int mali_c55_register_capture_devs(struct mali_c55 *mali_c55)
+{
+ int ret;
+
+ ret = mali_c55_register_cap_dev(mali_c55, MALI_C55_CAP_DEV_FR);
+ if (ret)
+ return ret;
+
+ if (mali_c55->capabilities & MALI_C55_GPS_DS_PIPE_FITTED) {
+ ret = mali_c55_register_cap_dev(mali_c55, MALI_C55_CAP_DEV_DS);
+ if (ret) {
+ mali_c55_unregister_capture_devs(mali_c55);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void mali_c55_unregister_cap_dev(struct mali_c55 *mali_c55,
+ enum mali_c55_cap_devs cap_dev_id)
+{
+ struct mali_c55_cap_dev *cap_dev = &mali_c55->cap_devs[cap_dev_id];
+
+ if (!video_is_registered(&cap_dev->vdev))
+ return;
+
+ vb2_video_unregister_device(&cap_dev->vdev);
+ media_entity_cleanup(&cap_dev->vdev.entity);
+ mutex_destroy(&cap_dev->lock);
+}
+
+void mali_c55_unregister_capture_devs(struct mali_c55 *mali_c55)
+{
+ mali_c55_unregister_cap_dev(mali_c55, MALI_C55_CAP_DEV_FR);
+ if (mali_c55->capabilities & MALI_C55_GPS_DS_PIPE_FITTED)
+ mali_c55_unregister_cap_dev(mali_c55, MALI_C55_CAP_DEV_DS);
+}
diff --git a/drivers/media/platform/arm/mali-c55/mali-c55-common.h b/drivers/media/platform/arm/mali-c55/mali-c55-common.h
new file mode 100644
index 000000000000..31c1deaca146
--- /dev/null
+++ b/drivers/media/platform/arm/mali-c55/mali-c55-common.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM Mali-C55 ISP Driver - Common definitions
+ *
+ * Copyright (C) 2025 Ideas on Board Oy
+ */
+
+#ifndef _MALI_C55_COMMON_H
+#define _MALI_C55_COMMON_H
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-isp.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-v4l2.h>
+
+#define MALI_C55_DRIVER_NAME "mali-c55"
+
+/* min and max values for the image sizes */
+#define MALI_C55_MIN_WIDTH 640U
+#define MALI_C55_MIN_HEIGHT 480U
+#define MALI_C55_MAX_WIDTH 8192U
+#define MALI_C55_MAX_HEIGHT 8192U
+#define MALI_C55_DEFAULT_WIDTH 1920U
+#define MALI_C55_DEFAULT_HEIGHT 1080U
+
+#define MALI_C55_DEFAULT_MEDIA_BUS_FMT MEDIA_BUS_FMT_RGB121212_1X36
+
+#define MALI_C55_NUM_CLKS 3
+#define MALI_C55_NUM_RESETS 3
+
+struct device;
+struct mali_c55;
+struct mali_c55_cap_dev;
+struct media_pipeline;
+struct mali_c55_params_buffer;
+struct platform_device;
+struct resource;
+
+enum mali_c55_isp_pads {
+ MALI_C55_ISP_PAD_SINK_VIDEO,
+ MALI_C55_ISP_PAD_SOURCE_VIDEO,
+ MALI_C55_ISP_PAD_SOURCE_BYPASS,
+ MALI_C55_ISP_PAD_SOURCE_STATS,
+ MALI_C55_ISP_PAD_SINK_PARAMS,
+ MALI_C55_ISP_NUM_PADS,
+};
+
+struct mali_c55_tpg {
+ struct mali_c55 *mali_c55;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct mali_c55_tpg_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *vblank;
+ } ctrls;
+};
+
+struct mali_c55_isp {
+ struct mali_c55 *mali_c55;
+ struct v4l2_subdev sd;
+ struct media_pad pads[MALI_C55_ISP_NUM_PADS];
+ struct v4l2_ctrl_handler handler;
+ struct media_pad *remote_src;
+ /* Mutex to guard vb2 start/stop streaming */
+ struct mutex capture_lock;
+ unsigned int frame_sequence;
+};
+
+enum mali_c55_resizer_ids {
+ MALI_C55_RSZ_FR,
+ MALI_C55_RSZ_DS,
+ MALI_C55_NUM_RSZS,
+};
+
+enum mali_c55_rsz_pads {
+ MALI_C55_RSZ_SINK_PAD,
+ MALI_C55_RSZ_SOURCE_PAD,
+ MALI_C55_RSZ_SINK_BYPASS_PAD,
+ MALI_C55_RSZ_NUM_PADS
+};
+
+struct mali_c55_resizer {
+ struct mali_c55 *mali_c55;
+ struct mali_c55_cap_dev *cap_dev;
+ enum mali_c55_resizer_ids id;
+ struct v4l2_subdev sd;
+ struct media_pad pads[MALI_C55_RSZ_NUM_PADS];
+ unsigned int num_routes;
+};
+
+enum mali_c55_cap_devs {
+ MALI_C55_CAP_DEV_FR,
+ MALI_C55_CAP_DEV_DS,
+ MALI_C55_NUM_CAP_DEVS
+};
+
+struct mali_c55_format_info {
+ u32 fourcc;
+ /*
+ * The output formats can be produced by a couple of different media bus
+ * formats, depending on how the ISP is configured.
+ */
+ unsigned int mbus_codes[2];
+ bool is_raw;
+ struct {
+ u32 base_mode;
+ u32 uv_plane;
+ } registers;
+};
+
+struct mali_c55_isp_format_info {
+ u32 code;
+ u32 shifted_code;
+ bool bypass;
+ u32 order;
+};
+
+enum mali_c55_planes {
+ MALI_C55_PLANE_Y,
+ MALI_C55_PLANE_UV,
+ MALI_C55_NUM_PLANES
+};
+
+struct mali_c55_buffer {
+ struct vb2_v4l2_buffer vb;
+ unsigned int planes_pending;
+ struct list_head queue;
+ dma_addr_t addrs[MALI_C55_NUM_PLANES];
+};
+
+struct mali_c55_cap_dev {
+ struct mali_c55 *mali_c55;
+ struct mali_c55_resizer *rsz;
+ struct video_device vdev;
+ struct media_pad pad;
+ struct vb2_queue queue;
+ /* Mutex to provide to vb2 */
+ struct mutex lock;
+ unsigned int reg_offset;
+
+ struct {
+ const struct mali_c55_format_info *info;
+ struct v4l2_pix_format_mplane format;
+ } format;
+
+ struct {
+ /* Spinlock to guard buffer queue */
+ spinlock_t lock;
+ /* Spinlock to guard the queue of buffers being processed */
+ spinlock_t processing_lock;
+ struct list_head input;
+ struct list_head processing;
+ } buffers;
+};
+
+struct mali_c55_stats_buf {
+ struct vb2_v4l2_buffer vb;
+ unsigned int segments_remaining;
+ struct list_head queue;
+ bool failed;
+};
+
+struct mali_c55_params_buf {
+ struct vb2_v4l2_buffer vb;
+ struct list_head queue;
+ struct v4l2_isp_params_buffer *config;
+};
+
+struct mali_c55_stats {
+ struct mali_c55 *mali_c55;
+ struct video_device vdev;
+ struct vb2_queue queue;
+ struct media_pad pad;
+ /* Mutex to provide to vb2 */
+ struct mutex lock;
+
+ struct {
+ /* Spinlock to guard buffer queue */
+ spinlock_t lock;
+ struct list_head queue;
+ } buffers;
+};
+
+struct mali_c55_params {
+ struct mali_c55 *mali_c55;
+ struct video_device vdev;
+ struct vb2_queue queue;
+ struct media_pad pad;
+ /* Mutex to provide to vb2 */
+ struct mutex lock;
+
+ struct {
+ /* Spinlock to guard buffer queue */
+ spinlock_t lock;
+ struct list_head queue;
+ } buffers;
+};
+
+enum mali_c55_config_spaces {
+ MALI_C55_CONFIG_PONG,
+ MALI_C55_CONFIG_PING,
+};
+
+/**
+ * struct mali_c55_context - Fields relating to a single camera context
+ *
+ * @mali_c55: Pointer to the main struct mali_c55
+ * @registers: A pointer to some allocated memory holding register
+ * values to be written to the hardware at frame interrupt
+ * @base: Base address of the config space in the hardware
+ * @lock: A spinlock to protect against writes to @registers whilst that
+ * space is being copied to the hardware
+ * @list: A list head to facilitate a context queue
+ */
+struct mali_c55_context {
+ struct mali_c55 *mali_c55;
+ u32 *registers;
+ phys_addr_t base;
+ /* Spinlock to prevent simultaneous access of register space */
+ spinlock_t lock;
+ struct list_head list;
+};
+
+struct mali_c55 {
+ struct device *dev;
+ void __iomem *base;
+ struct clk_bulk_data clks[MALI_C55_NUM_CLKS];
+ struct reset_control_bulk_data resets[MALI_C55_NUM_RESETS];
+ int irqnum;
+
+ u16 capabilities;
+ bool inline_mode;
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_async_notifier notifier;
+ struct media_pipeline pipe;
+
+ struct mali_c55_tpg tpg;
+ struct mali_c55_isp isp;
+ struct mali_c55_resizer resizers[MALI_C55_NUM_RSZS];
+ struct mali_c55_cap_dev cap_devs[MALI_C55_NUM_CAP_DEVS];
+ struct mali_c55_params params;
+ struct mali_c55_stats stats;
+
+ struct mali_c55_context context;
+ u32 next_config;
+};
+
+void mali_c55_write(struct mali_c55 *mali_c55, unsigned int addr, u32 val);
+void mali_c55_cap_dev_write(struct mali_c55_cap_dev *cap_dev, unsigned int addr,
+ u32 val);
+void mali_c55_update_bits(struct mali_c55 *mali_c55, unsigned int addr,
+ u32 mask, u32 val);
+u32 mali_c55_read(struct mali_c55 *mali_c55, unsigned int addr);
+void mali_c55_ctx_write(struct mali_c55 *mali_c55, unsigned int addr, u32 val);
+u32 mali_c55_ctx_read(struct mali_c55 *mali_c55, unsigned int addr);
+void mali_c55_ctx_update_bits(struct mali_c55 *mali_c55, unsigned int addr,
+ u32 mask, u32 val);
+
+int mali_c55_config_write(struct mali_c55_context *ctx,
+ enum mali_c55_config_spaces cfg_space,
+ bool force_synchronous);
+
+int mali_c55_register_isp(struct mali_c55 *mali_c55);
+int mali_c55_register_tpg(struct mali_c55 *mali_c55);
+void mali_c55_unregister_tpg(struct mali_c55 *mali_c55);
+void mali_c55_unregister_isp(struct mali_c55 *mali_c55);
+int mali_c55_register_resizers(struct mali_c55 *mali_c55);
+void mali_c55_unregister_resizers(struct mali_c55 *mali_c55);
+int mali_c55_register_capture_devs(struct mali_c55 *mali_c55);
+void mali_c55_unregister_capture_devs(struct mali_c55 *mali_c55);
+int mali_c55_register_stats(struct mali_c55 *mali_c55);
+void mali_c55_unregister_stats(struct mali_c55 *mali_c55);
+int mali_c55_register_params(struct mali_c55 *mali_c55);
+void mali_c55_unregister_params(struct mali_c55 *mali_c55);
+struct mali_c55_context *mali_c55_get_active_context(struct mali_c55 *mali_c55);
+void mali_c55_set_plane_done(struct mali_c55_cap_dev *cap_dev,
+ enum mali_c55_planes plane);
+void mali_c55_set_next_buffer(struct mali_c55_cap_dev *cap_dev);
+void mali_c55_isp_queue_event_sof(struct mali_c55 *mali_c55);
+
+bool mali_c55_format_is_raw(unsigned int mbus_code);
+
+const struct mali_c55_isp_format_info *
+mali_c55_isp_fmt_next(const struct mali_c55_isp_format_info *fmt);
+const struct mali_c55_isp_format_info *
+mali_c55_isp_get_mbus_config_by_code(u32 code);
+const struct mali_c55_isp_format_info *
+mali_c55_isp_get_mbus_config_by_shifted_code(u32 code);
+const struct mali_c55_isp_format_info *
+mali_c55_isp_get_mbus_config_by_index(u32 index);
+bool mali_c55_pipeline_ready(struct mali_c55 *mali_c55);
+void mali_c55_stats_fill_buffer(struct mali_c55 *mali_c55,
+ enum mali_c55_config_spaces cfg_space);
+void mali_c55_params_write_config(struct mali_c55 *mali_c55);
+
+#endif /* _MALI_C55_COMMON_H */
diff --git a/drivers/media/platform/arm/mali-c55/mali-c55-core.c b/drivers/media/platform/arm/mali-c55/mali-c55-core.c
new file mode 100644
index 000000000000..43b834459ccf
--- /dev/null
+++ b/drivers/media/platform/arm/mali-c55/mali-c55-core.c
@@ -0,0 +1,917 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Mali-C55 ISP Driver - Core driver code
+ *
+ * Copyright (C) 2025 Ideas on Board Oy
+ */
+
+#include <linux/bitops.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mali-c55-common.h"
+#include "mali-c55-registers.h"
+
+static const char * const mali_c55_interrupt_names[] = {
+ [MALI_C55_IRQ_ISP_START] = "ISP start",
+ [MALI_C55_IRQ_ISP_DONE] = "ISP done",
+ [MALI_C55_IRQ_MCM_ERROR] = "Multi-context management error",
+ [MALI_C55_IRQ_BROKEN_FRAME_ERROR] = "Broken frame error",
+ [MALI_C55_IRQ_MET_AF_DONE] = "AF metering done",
+ [MALI_C55_IRQ_MET_AEXP_DONE] = "AEXP metering done",
+ [MALI_C55_IRQ_MET_AWB_DONE] = "AWB metering done",
+ [MALI_C55_IRQ_AEXP_1024_DONE] = "AEXP 1024-bit histogram done",
+ [MALI_C55_IRQ_IRIDIX_MET_DONE] = "Iridix metering done",
+ [MALI_C55_IRQ_LUT_INIT_DONE] = "LUT memory init done",
+ [MALI_C55_IRQ_FR_Y_DONE] = "Full resolution Y plane DMA done",
+ [MALI_C55_IRQ_FR_UV_DONE] = "Full resolution U/V plane DMA done",
+ [MALI_C55_IRQ_DS_Y_DONE] = "Downscale Y plane DMA done",
+ [MALI_C55_IRQ_DS_UV_DONE] = "Downscale U/V plane DMA done",
+ [MALI_C55_IRQ_LINEARIZATION_DONE] = "Linearisation done",
+ [MALI_C55_IRQ_RAW_FRONTEND_DONE] = "Raw frontend processing done",
+ [MALI_C55_IRQ_NOISE_REDUCTION_DONE] = "Noise reduction done",
+ [MALI_C55_IRQ_IRIDIX_DONE] = "Iridix done",
+ [MALI_C55_IRQ_BAYER2RGB_DONE] = "Bayer to RGB conversion done",
+ [MALI_C55_IRQ_WATCHDOG_TIMER] = "Watchdog timer timed out",
+ [MALI_C55_IRQ_FRAME_COLLISION] = "Frame collision error",
+ [MALI_C55_IRQ_UNUSED] = "IRQ bit unused",
+ [MALI_C55_IRQ_DMA_ERROR] = "DMA error",
+ [MALI_C55_IRQ_INPUT_STOPPED] = "Input port safely stopped",
+ [MALI_C55_IRQ_MET_AWB_TARGET1_HIT] = "AWB metering target 1 address hit",
+ [MALI_C55_IRQ_MET_AWB_TARGET2_HIT] = "AWB metering target 2 address hit"
+};
+
+static const unsigned int config_space_addrs[] = {
+ [MALI_C55_CONFIG_PING] = 0x0ab6c,
+ [MALI_C55_CONFIG_PONG] = 0x22b2c,
+};
+
+static const char * const mali_c55_clk_names[MALI_C55_NUM_CLKS] = {
+ "vclk",
+ "aclk",
+ "hclk",
+};
+
+static const char * const mali_c55_reset_names[MALI_C55_NUM_RESETS] = {
+ "vresetn",
+ "aresetn",
+ "hresetn",
+};
+
+/*
+ * System IO
+ *
+ * The Mali-C55 ISP has up to two configuration register spaces (called 'ping'
+ * and 'pong'), with the expectation that the 'active' space will be left
+ * untouched whilst a frame is being processed and the 'inactive' space
+ * configured ready to be switched to during the blanking period before the next
+ * frame processing starts. These spaces should ideally be set via DMA transfer
+ * from a buffer rather than through individual register set operations. There
+ * is also a shared global register space which should be set normally. For now
+ * though we will simply use a CPU write and target DMA transfers of the config
+ * space in the future.
+ *
+ * As groundwork for that path any read/write call that is made to an address
+ * within those config spaces should infact be directed to a buffer that was
+ * allocated to hold them rather than the IO memory itself. The actual copy of
+ * that buffer to IO mem will happen on interrupt.
+ */
+
+void mali_c55_write(struct mali_c55 *mali_c55, unsigned int addr, u32 val)
+{
+ WARN_ON(addr >= MALI_C55_REG_CONFIG_SPACES_OFFSET);
+
+ writel(val, mali_c55->base + addr);
+}
+
+u32 mali_c55_read(struct mali_c55 *mali_c55, unsigned int addr)
+{
+ WARN_ON(addr >= MALI_C55_REG_CONFIG_SPACES_OFFSET);
+
+ return readl(mali_c55->base + addr);
+}
+
+void mali_c55_update_bits(struct mali_c55 *mali_c55, unsigned int addr,
+ u32 mask, u32 val)
+{
+ u32 orig, new;
+
+ orig = mali_c55_read(mali_c55, addr);
+
+ new = orig & ~mask;
+ new |= val & mask;
+
+ if (new != orig)
+ mali_c55_write(mali_c55, addr, new);
+}
+
+static void __mali_c55_ctx_write(struct mali_c55_context *ctx,
+ unsigned int addr, u32 val)
+{
+ addr = (addr - MALI_C55_REG_CONFIG_SPACES_OFFSET) / 4;
+ ctx->registers[addr] = val;
+}
+
+void mali_c55_ctx_write(struct mali_c55 *mali_c55, unsigned int addr, u32 val)
+{
+ struct mali_c55_context *ctx = mali_c55_get_active_context(mali_c55);
+
+ WARN_ON(addr < MALI_C55_REG_CONFIG_SPACES_OFFSET);
+
+ spin_lock(&ctx->lock);
+ __mali_c55_ctx_write(ctx, addr, val);
+ spin_unlock(&ctx->lock);
+}
+
+static u32 __mali_c55_ctx_read(struct mali_c55_context *ctx, unsigned int addr)
+{
+ addr = (addr - MALI_C55_REG_CONFIG_SPACES_OFFSET) / 4;
+ return ctx->registers[addr];
+}
+
+u32 mali_c55_ctx_read(struct mali_c55 *mali_c55, unsigned int addr)
+{
+ struct mali_c55_context *ctx = mali_c55_get_active_context(mali_c55);
+ u32 val;
+
+ WARN_ON(addr < MALI_C55_REG_CONFIG_SPACES_OFFSET);
+
+ spin_lock(&ctx->lock);
+ val = __mali_c55_ctx_read(ctx, addr);
+ spin_unlock(&ctx->lock);
+
+ return val;
+}
+
+void mali_c55_ctx_update_bits(struct mali_c55 *mali_c55, unsigned int addr,
+ u32 mask, u32 val)
+{
+ struct mali_c55_context *ctx = mali_c55_get_active_context(mali_c55);
+ u32 orig, tmp;
+
+ WARN_ON(addr < MALI_C55_REG_CONFIG_SPACES_OFFSET);
+
+ spin_lock(&ctx->lock);
+
+ orig = __mali_c55_ctx_read(ctx, addr);
+
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+
+ if (tmp != orig)
+ __mali_c55_ctx_write(ctx, addr, tmp);
+
+ spin_unlock(&ctx->lock);
+}
+
+int mali_c55_config_write(struct mali_c55_context *ctx,
+ enum mali_c55_config_spaces cfg_space,
+ bool force_synchronous)
+{
+ struct mali_c55 *mali_c55 = ctx->mali_c55;
+
+ memcpy_toio(mali_c55->base + config_space_addrs[cfg_space],
+ ctx->registers, MALI_C55_CONFIG_SPACE_SIZE);
+
+ return 0;
+}
+
+struct mali_c55_context *mali_c55_get_active_context(struct mali_c55 *mali_c55)
+{
+ return &mali_c55->context;
+}
+
+static void mali_c55_remove_links(struct mali_c55 *mali_c55)
+{
+ unsigned int i;
+
+ media_entity_remove_links(&mali_c55->tpg.sd.entity);
+ media_entity_remove_links(&mali_c55->isp.sd.entity);
+
+ for (i = 0; i < MALI_C55_NUM_RSZS; i++)
+ media_entity_remove_links(&mali_c55->resizers[i].sd.entity);
+
+ for (i = 0; i < MALI_C55_NUM_CAP_DEVS; i++)
+ media_entity_remove_links(&mali_c55->cap_devs[i].vdev.entity);
+}
+
+static int mali_c55_create_links(struct mali_c55 *mali_c55)
+{
+ int ret;
+
+ /* Test pattern generator to ISP */
+ ret = media_create_pad_link(&mali_c55->tpg.sd.entity, 0,
+ &mali_c55->isp.sd.entity,
+ MALI_C55_ISP_PAD_SINK_VIDEO, 0);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to link TPG and ISP\n");
+ goto err_remove_links;
+ }
+
+ /* Full resolution resizer pipe. */
+ ret = media_create_pad_link(&mali_c55->isp.sd.entity,
+ MALI_C55_ISP_PAD_SOURCE_VIDEO,
+ &mali_c55->resizers[MALI_C55_RSZ_FR].sd.entity,
+ MALI_C55_RSZ_SINK_PAD,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to link ISP and FR resizer\n");
+ goto err_remove_links;
+ }
+
+ /* Full resolution bypass. */
+ ret = media_create_pad_link(&mali_c55->isp.sd.entity,
+ MALI_C55_ISP_PAD_SOURCE_BYPASS,
+ &mali_c55->resizers[MALI_C55_RSZ_FR].sd.entity,
+ MALI_C55_RSZ_SINK_BYPASS_PAD,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to link ISP and FR resizer\n");
+ goto err_remove_links;
+ }
+
+ /* Resizer pipe to video capture nodes. */
+ ret = media_create_pad_link(&mali_c55->resizers[0].sd.entity,
+ MALI_C55_RSZ_SOURCE_PAD,
+ &mali_c55->cap_devs[MALI_C55_CAP_DEV_FR].vdev.entity,
+ 0, MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(mali_c55->dev,
+ "failed to link FR resizer and video device\n");
+ goto err_remove_links;
+ }
+
+ /* The downscale pipe is an optional hardware block */
+ if (mali_c55->capabilities & MALI_C55_GPS_DS_PIPE_FITTED) {
+ ret = media_create_pad_link(&mali_c55->isp.sd.entity,
+ MALI_C55_ISP_PAD_SOURCE_VIDEO,
+ &mali_c55->resizers[MALI_C55_RSZ_DS].sd.entity,
+ MALI_C55_RSZ_SINK_PAD,
+ MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ dev_err(mali_c55->dev,
+ "failed to link ISP and DS resizer\n");
+ goto err_remove_links;
+ }
+
+ ret = media_create_pad_link(&mali_c55->resizers[1].sd.entity,
+ MALI_C55_RSZ_SOURCE_PAD,
+ &mali_c55->cap_devs[MALI_C55_CAP_DEV_DS].vdev.entity,
+ 0, MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(mali_c55->dev,
+ "failed to link DS resizer and video device\n");
+ goto err_remove_links;
+ }
+ }
+
+ ret = media_create_pad_link(&mali_c55->isp.sd.entity,
+ MALI_C55_ISP_PAD_SOURCE_STATS,
+ &mali_c55->stats.vdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(mali_c55->dev,
+ "failed to link ISP and 3a stats node\n");
+ goto err_remove_links;
+ }
+
+ ret = media_create_pad_link(&mali_c55->params.vdev.entity, 0,
+ &mali_c55->isp.sd.entity,
+ MALI_C55_ISP_PAD_SINK_PARAMS,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret) {
+ dev_err(mali_c55->dev,
+ "failed to link ISP and parameters video node\n");
+ goto err_remove_links;
+ }
+
+ return 0;
+
+err_remove_links:
+ mali_c55_remove_links(mali_c55);
+ return ret;
+}
+
+static void mali_c55_unregister_entities(struct mali_c55 *mali_c55)
+{
+ mali_c55_remove_links(mali_c55);
+ mali_c55_unregister_tpg(mali_c55);
+ mali_c55_unregister_isp(mali_c55);
+ mali_c55_unregister_resizers(mali_c55);
+ mali_c55_unregister_capture_devs(mali_c55);
+ mali_c55_unregister_params(mali_c55);
+ mali_c55_unregister_stats(mali_c55);
+}
+
+static void mali_c55_swap_next_config(struct mali_c55 *mali_c55)
+{
+ struct mali_c55_context *ctx = mali_c55_get_active_context(mali_c55);
+
+ mali_c55_config_write(ctx, mali_c55->next_config ?
+ MALI_C55_CONFIG_PING : MALI_C55_CONFIG_PONG,
+ false);
+
+ mali_c55_update_bits(mali_c55, MALI_C55_REG_MCU_CONFIG,
+ MALI_C55_REG_MCU_CONFIG_WRITE_MASK,
+ MALI_C55_MCU_CONFIG_WRITE(mali_c55->next_config));
+}
+
+static int mali_c55_register_entities(struct mali_c55 *mali_c55)
+{
+ int ret;
+
+ ret = mali_c55_register_tpg(mali_c55);
+ if (ret)
+ return ret;
+
+ ret = mali_c55_register_isp(mali_c55);
+ if (ret)
+ goto err_unregister_entities;
+
+ ret = mali_c55_register_resizers(mali_c55);
+ if (ret)
+ goto err_unregister_entities;
+
+ ret = mali_c55_register_capture_devs(mali_c55);
+ if (ret)
+ goto err_unregister_entities;
+
+ ret = mali_c55_register_params(mali_c55);
+ if (ret)
+ goto err_unregister_entities;
+
+ ret = mali_c55_register_stats(mali_c55);
+ if (ret)
+ goto err_unregister_entities;
+
+ ret = mali_c55_create_links(mali_c55);
+ if (ret)
+ goto err_unregister_entities;
+
+ return 0;
+
+err_unregister_entities:
+ mali_c55_unregister_entities(mali_c55);
+
+ return ret;
+}
+
+static int mali_c55_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_connection *asc)
+{
+ struct mali_c55 *mali_c55 = container_of(notifier,
+ struct mali_c55, notifier);
+ struct media_pad *pad = &mali_c55->isp.pads[MALI_C55_ISP_PAD_SINK_VIDEO];
+ int ret;
+
+ /*
+ * By default we'll flag this link enabled and the TPG disabled, but
+ * no immutable flag because we need to be able to switch between the
+ * two.
+ */
+ ret = v4l2_create_fwnode_links_to_pad(subdev, pad,
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ dev_err(mali_c55->dev, "failed to create link for %s\n",
+ subdev->name);
+
+ return ret;
+}
+
+static int mali_c55_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct mali_c55 *mali_c55 = container_of(notifier,
+ struct mali_c55, notifier);
+
+ return v4l2_device_register_subdev_nodes(&mali_c55->v4l2_dev);
+}
+
+static const struct v4l2_async_notifier_operations mali_c55_notifier_ops = {
+ .bound = mali_c55_notifier_bound,
+ .complete = mali_c55_notifier_complete,
+};
+
+static int mali_c55_parse_endpoint(struct mali_c55 *mali_c55)
+{
+ struct v4l2_async_connection *asc;
+ struct fwnode_handle *ep;
+
+ /*
+ * The ISP should have a single endpoint pointing to some flavour of
+ * CSI-2 receiver...but for now at least we do want everything to work
+ * normally even with no sensors connected, as we have the TPG. If we
+ * don't find a sensor just warn and return success.
+ */
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(mali_c55->dev),
+ 0, 0, 0);
+ if (!ep) {
+ dev_warn(mali_c55->dev, "no local endpoint found\n");
+ return 0;
+ }
+
+ asc = v4l2_async_nf_add_fwnode_remote(&mali_c55->notifier, ep,
+ struct v4l2_async_connection);
+ fwnode_handle_put(ep);
+ if (IS_ERR(asc)) {
+ dev_err(mali_c55->dev, "failed to add remote fwnode\n");
+ return PTR_ERR(asc);
+ }
+
+ return 0;
+}
+
+static int mali_c55_media_frameworks_init(struct mali_c55 *mali_c55)
+{
+ int ret;
+
+ strscpy(mali_c55->media_dev.model, "ARM Mali-C55 ISP",
+ sizeof(mali_c55->media_dev.model));
+
+ media_device_init(&mali_c55->media_dev);
+
+ ret = media_device_register(&mali_c55->media_dev);
+ if (ret)
+ goto err_cleanup_media_device;
+
+ mali_c55->v4l2_dev.mdev = &mali_c55->media_dev;
+ ret = v4l2_device_register(mali_c55->dev, &mali_c55->v4l2_dev);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to register V4L2 device\n");
+ goto err_unregister_media_device;
+ };
+
+ mali_c55->notifier.ops = &mali_c55_notifier_ops;
+ v4l2_async_nf_init(&mali_c55->notifier, &mali_c55->v4l2_dev);
+
+ ret = mali_c55_register_entities(mali_c55);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to register entities\n");
+ goto err_cleanup_nf;
+ }
+
+ ret = mali_c55_parse_endpoint(mali_c55);
+ if (ret)
+ goto err_cleanup_nf;
+
+ ret = v4l2_async_nf_register(&mali_c55->notifier);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to register notifier\n");
+ goto err_unregister_entities;
+ }
+
+ return 0;
+
+err_unregister_entities:
+ mali_c55_unregister_entities(mali_c55);
+err_cleanup_nf:
+ v4l2_async_nf_cleanup(&mali_c55->notifier);
+ v4l2_device_unregister(&mali_c55->v4l2_dev);
+err_unregister_media_device:
+ media_device_unregister(&mali_c55->media_dev);
+err_cleanup_media_device:
+ media_device_cleanup(&mali_c55->media_dev);
+
+ return ret;
+}
+
+static void mali_c55_media_frameworks_deinit(struct mali_c55 *mali_c55)
+{
+ v4l2_async_nf_unregister(&mali_c55->notifier);
+ mali_c55_unregister_entities(mali_c55);
+ v4l2_async_nf_cleanup(&mali_c55->notifier);
+ v4l2_device_unregister(&mali_c55->v4l2_dev);
+ media_device_unregister(&mali_c55->media_dev);
+ media_device_cleanup(&mali_c55->media_dev);
+}
+
+bool mali_c55_pipeline_ready(struct mali_c55 *mali_c55)
+{
+ struct mali_c55_cap_dev *fr = &mali_c55->cap_devs[MALI_C55_CAP_DEV_FR];
+ struct mali_c55_cap_dev *ds = &mali_c55->cap_devs[MALI_C55_CAP_DEV_DS];
+ struct mali_c55_params *params = &mali_c55->params;
+ struct mali_c55_stats *stats = &mali_c55->stats;
+
+ return vb2_start_streaming_called(&fr->queue) &&
+ (!(mali_c55->capabilities & MALI_C55_GPS_DS_PIPE_FITTED) ||
+ vb2_start_streaming_called(&ds->queue)) &&
+ vb2_start_streaming_called(&params->queue) &&
+ vb2_start_streaming_called(&stats->queue);
+}
+
+static int mali_c55_check_hwcfg(struct mali_c55 *mali_c55)
+{
+ u32 product, version, revision, capabilities;
+
+ product = mali_c55_read(mali_c55, MALI_C55_REG_PRODUCT);
+ version = mali_c55_read(mali_c55, MALI_C55_REG_VERSION);
+ revision = mali_c55_read(mali_c55, MALI_C55_REG_REVISION);
+
+ mali_c55->media_dev.hw_revision = version;
+
+ dev_info(mali_c55->dev, "Detected Mali-C55 ISP %u.%u.%u\n",
+ product, version, revision);
+
+ capabilities = mali_c55_read(mali_c55,
+ MALI_C55_REG_GLOBAL_PARAMETER_STATUS);
+
+ /*
+ * In its current iteration, the driver only supports inline mode. Given
+ * we cannot control input data timing in this mode, we cannot guarantee
+ * that the vertical blanking periods between frames will be long enough
+ * for us to write configuration data to the ISP during them. For that
+ * reason we can't really support single config space configuration
+ * until memory input mode is implemented.
+ */
+ if (!(capabilities & MALI_C55_GPS_PONG_FITTED)) {
+ dev_err(mali_c55->dev, "Pong config space not fitted.\n");
+ return -EINVAL;
+ }
+
+ mali_c55->capabilities = capabilities & 0xffff;
+
+ return 0;
+}
+
+static irqreturn_t mali_c55_isr(int irq, void *context)
+{
+ struct device *dev = context;
+ struct mali_c55 *mali_c55 = dev_get_drvdata(dev);
+ unsigned long interrupt_status;
+ u32 curr_config;
+ unsigned int i;
+
+ interrupt_status = mali_c55_read(mali_c55,
+ MALI_C55_REG_INTERRUPT_STATUS_VECTOR);
+ if (!interrupt_status)
+ return IRQ_NONE;
+
+ mali_c55_write(mali_c55, MALI_C55_REG_INTERRUPT_CLEAR_VECTOR,
+ interrupt_status);
+ mali_c55_write(mali_c55, MALI_C55_REG_INTERRUPT_CLEAR, 1);
+ mali_c55_write(mali_c55, MALI_C55_REG_INTERRUPT_CLEAR, 0);
+
+ for_each_set_bit(i, &interrupt_status, MALI_C55_NUM_IRQ_BITS) {
+ switch (i) {
+ case MALI_C55_IRQ_ISP_START:
+ mali_c55_isp_queue_event_sof(mali_c55);
+
+ mali_c55_set_next_buffer(&mali_c55->cap_devs[MALI_C55_CAP_DEV_FR]);
+ if (mali_c55->capabilities & MALI_C55_GPS_DS_PIPE_FITTED)
+ mali_c55_set_next_buffer(&mali_c55->cap_devs[MALI_C55_CAP_DEV_DS]);
+
+ /*
+ * When the ISP starts a frame we have some work to do:
+ *
+ * 1. Copy over the config for the **next** frame
+ * 2. Read out the metering stats for the **last** frame
+ */
+
+ curr_config = mali_c55_read(mali_c55,
+ MALI_C55_REG_PING_PONG_READ);
+ curr_config &= MALI_C55_REG_PING_PONG_READ_MASK;
+ curr_config >>= ffs(MALI_C55_REG_PING_PONG_READ_MASK) - 1;
+ mali_c55->next_config = curr_config ^ 1;
+
+ /*
+ * Write the configuration parameters received from
+ * userspace into the configuration buffer, which will
+ * be transferred to the 'next' active config space at
+ * by mali_c55_swap_next_config().
+ */
+ mali_c55_params_write_config(mali_c55);
+
+ mali_c55_stats_fill_buffer(mali_c55,
+ mali_c55->next_config ^ 1);
+
+ mali_c55_swap_next_config(mali_c55);
+
+ break;
+ case MALI_C55_IRQ_ISP_DONE:
+ /*
+ * TODO: Where the ISP has no Pong config fitted, we'd
+ * have to do the mali_c55_swap_next_config() call here.
+ */
+ break;
+ case MALI_C55_IRQ_FR_Y_DONE:
+ mali_c55_set_plane_done(&mali_c55->cap_devs[MALI_C55_CAP_DEV_FR],
+ MALI_C55_PLANE_Y);
+ break;
+ case MALI_C55_IRQ_FR_UV_DONE:
+ mali_c55_set_plane_done(&mali_c55->cap_devs[MALI_C55_CAP_DEV_FR],
+ MALI_C55_PLANE_UV);
+ break;
+ case MALI_C55_IRQ_DS_Y_DONE:
+ mali_c55_set_plane_done(&mali_c55->cap_devs[MALI_C55_CAP_DEV_DS],
+ MALI_C55_PLANE_Y);
+ break;
+ case MALI_C55_IRQ_DS_UV_DONE:
+ mali_c55_set_plane_done(&mali_c55->cap_devs[MALI_C55_CAP_DEV_DS],
+ MALI_C55_PLANE_UV);
+ break;
+ default:
+ /*
+ * Only the above interrupts are currently unmasked. If
+ * we receive anything else here then something weird
+ * has gone on.
+ */
+ dev_err(dev, "masked interrupt %s triggered\n",
+ mali_c55_interrupt_names[i]);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mali_c55_init_context(struct mali_c55 *mali_c55,
+ struct resource *res)
+{
+ struct mali_c55_context *ctx = &mali_c55->context;
+
+ ctx->base = res->start;
+ ctx->mali_c55 = mali_c55;
+ spin_lock_init(&ctx->lock);
+
+ ctx->registers = kzalloc(MALI_C55_CONFIG_SPACE_SIZE, GFP_KERNEL);
+ if (!ctx->registers)
+ return -ENOMEM;
+
+ /*
+ * The allocated memory is empty, we need to load the default
+ * register settings. We just read Ping; it's identical to Pong.
+ */
+ memcpy_fromio(ctx->registers,
+ mali_c55->base + config_space_addrs[MALI_C55_CONFIG_PING],
+ MALI_C55_CONFIG_SPACE_SIZE);
+
+ /*
+ * Some features of the ISP need to be disabled by default and only
+ * enabled at the same time as they're configured by a parameters buffer
+ */
+
+ /* Bypass the sqrt and square compression and expansion modules */
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_BYPASS_1,
+ MALI_C55_REG_BYPASS_1_FE_SQRT,
+ MALI_C55_REG_BYPASS_1_FE_SQRT);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_BYPASS_3,
+ MALI_C55_REG_BYPASS_3_SQUARE_BE,
+ MALI_C55_REG_BYPASS_3_SQUARE_BE);
+
+ /* Bypass the temper module */
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_BYPASS_2,
+ MALI_C55_REG_BYPASS_2_TEMPER);
+
+ /* Disable the temper module's DMA read/write */
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_TEMPER_DMA_IO, 0x0);
+
+ /* Bypass the colour noise reduction */
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_BYPASS_4,
+ MALI_C55_REG_BYPASS_4_CNR);
+
+ /* Disable the sinter module */
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_SINTER_CONFIG,
+ MALI_C55_SINTER_ENABLE_MASK, 0);
+
+ /* Disable the RGB Gamma module for each output */
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_FR_GAMMA_RGB_ENABLE, 0);
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_DS_GAMMA_RGB_ENABLE, 0);
+
+ /* Disable the colour correction matrix */
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_CCM_ENABLE, 0);
+
+ return 0;
+}
+
+static void __mali_c55_power_off(struct mali_c55 *mali_c55)
+{
+ reset_control_bulk_assert(ARRAY_SIZE(mali_c55->resets), mali_c55->resets);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(mali_c55->clks), mali_c55->clks);
+}
+
+static int __maybe_unused mali_c55_runtime_suspend(struct device *dev)
+{
+ struct mali_c55 *mali_c55 = dev_get_drvdata(dev);
+
+ if (irq_has_action(mali_c55->irqnum))
+ free_irq(mali_c55->irqnum, dev);
+ __mali_c55_power_off(mali_c55);
+
+ return 0;
+}
+
+static int __mali_c55_power_on(struct mali_c55 *mali_c55)
+{
+ int ret;
+ u32 val;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(mali_c55->clks),
+ mali_c55->clks);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ ret = reset_control_bulk_deassert(ARRAY_SIZE(mali_c55->resets),
+ mali_c55->resets);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to deassert resets\n");
+ return ret;
+ }
+
+ /* Use "software only" context management. */
+ mali_c55_update_bits(mali_c55, MALI_C55_REG_MCU_CONFIG,
+ MALI_C55_REG_MCU_CONFIG_OVERRIDE_MASK, 0x01);
+
+ /*
+ * Mask the interrupts and clear any that were set, then unmask the ones
+ * that we actually want to handle.
+ */
+ mali_c55_write(mali_c55, MALI_C55_REG_INTERRUPT_MASK_VECTOR,
+ MALI_C55_INTERRUPT_MASK_ALL);
+ mali_c55_write(mali_c55, MALI_C55_REG_INTERRUPT_CLEAR_VECTOR,
+ MALI_C55_INTERRUPT_MASK_ALL);
+ mali_c55_write(mali_c55, MALI_C55_REG_INTERRUPT_CLEAR, 0x01);
+ mali_c55_write(mali_c55, MALI_C55_REG_INTERRUPT_CLEAR, 0x00);
+
+ mali_c55_update_bits(mali_c55, MALI_C55_REG_INTERRUPT_MASK_VECTOR,
+ MALI_C55_INTERRUPT_BIT(MALI_C55_IRQ_ISP_START) |
+ MALI_C55_INTERRUPT_BIT(MALI_C55_IRQ_ISP_DONE) |
+ MALI_C55_INTERRUPT_BIT(MALI_C55_IRQ_FR_Y_DONE) |
+ MALI_C55_INTERRUPT_BIT(MALI_C55_IRQ_FR_UV_DONE) |
+ MALI_C55_INTERRUPT_BIT(MALI_C55_IRQ_DS_Y_DONE) |
+ MALI_C55_INTERRUPT_BIT(MALI_C55_IRQ_DS_UV_DONE),
+ 0x00);
+
+ /* Set safe stop to ensure we're in a non-streaming state */
+ mali_c55_write(mali_c55, MALI_C55_REG_INPUT_MODE_REQUEST,
+ MALI_C55_INPUT_SAFE_STOP);
+ readl_poll_timeout(mali_c55->base + MALI_C55_REG_MODE_STATUS,
+ val, !val, 10 * USEC_PER_MSEC, 250 * USEC_PER_MSEC);
+
+ return 0;
+}
+
+static int __maybe_unused mali_c55_runtime_resume(struct device *dev)
+{
+ struct mali_c55 *mali_c55 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = __mali_c55_power_on(mali_c55);
+ if (ret)
+ return ret;
+
+ /*
+ * The driver needs to transfer large amounts of register settings to
+ * the ISP each frame, using either a DMA transfer or memcpy. We use a
+ * threaded IRQ to avoid disabling interrupts the entire time that's
+ * happening.
+ */
+ ret = request_threaded_irq(mali_c55->irqnum, NULL, mali_c55_isr,
+ IRQF_ONESHOT, dev_driver_string(dev), dev);
+ if (ret) {
+ __mali_c55_power_off(mali_c55);
+ dev_err(dev, "failed to request irq\n");
+ }
+
+ return ret;
+}
+
+static const struct dev_pm_ops mali_c55_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(mali_c55_runtime_suspend, mali_c55_runtime_resume,
+ NULL)
+};
+
+static int mali_c55_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mali_c55 *mali_c55;
+ struct resource *res;
+ int ret;
+
+ mali_c55 = devm_kzalloc(dev, sizeof(*mali_c55), GFP_KERNEL);
+ if (!mali_c55)
+ return -ENOMEM;
+
+ mali_c55->dev = dev;
+ platform_set_drvdata(pdev, mali_c55);
+
+ mali_c55->base = devm_platform_get_and_ioremap_resource(pdev, 0,
+ &res);
+ if (IS_ERR(mali_c55->base))
+ return dev_err_probe(dev, PTR_ERR(mali_c55->base),
+ "failed to map IO memory\n");
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(mali_c55_clk_names); i++)
+ mali_c55->clks[i].id = mali_c55_clk_names[i];
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(mali_c55->clks), mali_c55->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to acquire clocks\n");
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(mali_c55_reset_names); i++)
+ mali_c55->resets[i].id = mali_c55_reset_names[i];
+
+ ret = devm_reset_control_bulk_get_optional_shared(dev,
+ ARRAY_SIZE(mali_c55_reset_names), mali_c55->resets);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to acquire resets\n");
+
+ of_reserved_mem_device_init(dev);
+ vb2_dma_contig_set_max_seg_size(dev, UINT_MAX);
+
+ ret = __mali_c55_power_on(mali_c55);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to power on\n");
+
+ ret = mali_c55_check_hwcfg(mali_c55);
+ if (ret)
+ goto err_power_off;
+
+ ret = mali_c55_init_context(mali_c55, res);
+ if (ret)
+ goto err_power_off;
+
+ mali_c55->media_dev.dev = dev;
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = mali_c55_media_frameworks_init(mali_c55);
+ if (ret)
+ goto err_free_context_registers;
+
+ pm_runtime_idle(&pdev->dev);
+
+ mali_c55->irqnum = platform_get_irq(pdev, 0);
+ if (mali_c55->irqnum < 0) {
+ ret = mali_c55->irqnum;
+ dev_err(dev, "failed to get interrupt\n");
+ goto err_deinit_media_frameworks;
+ }
+
+ return 0;
+
+err_deinit_media_frameworks:
+ mali_c55_media_frameworks_deinit(mali_c55);
+ pm_runtime_disable(&pdev->dev);
+err_free_context_registers:
+ kfree(mali_c55->context.registers);
+err_power_off:
+ __mali_c55_power_off(mali_c55);
+
+ return ret;
+}
+
+static void mali_c55_remove(struct platform_device *pdev)
+{
+ struct mali_c55 *mali_c55 = platform_get_drvdata(pdev);
+
+ kfree(mali_c55->context.registers);
+ mali_c55_media_frameworks_deinit(mali_c55);
+}
+
+static const struct of_device_id mali_c55_of_match[] = {
+ { .compatible = "arm,mali-c55", },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mali_c55_of_match);
+
+static struct platform_driver mali_c55_driver = {
+ .driver = {
+ .name = "mali-c55",
+ .of_match_table = mali_c55_of_match,
+ .pm = &mali_c55_pm_ops,
+ },
+ .probe = mali_c55_probe,
+ .remove = mali_c55_remove,
+};
+
+module_platform_driver(mali_c55_driver);
+
+MODULE_AUTHOR("Daniel Scally <dan.scally@ideasonboard.com>");
+MODULE_AUTHOR("Jacopo Mondi <jacopo.mondi@ideasonboard.com>");
+MODULE_DESCRIPTION("ARM Mali-C55 ISP platform driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/arm/mali-c55/mali-c55-isp.c b/drivers/media/platform/arm/mali-c55/mali-c55-isp.c
new file mode 100644
index 000000000000..497f25fbdd13
--- /dev/null
+++ b/drivers/media/platform/arm/mali-c55/mali-c55-isp.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Mali-C55 ISP Driver - Image signal processor
+ *
+ * Copyright (C) 2025 Ideas on Board Oy
+ */
+
+#include <linux/media/arm/mali-c55-config.h>
+
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/property.h>
+#include <linux/string.h>
+
+#include <uapi/linux/media/arm/mali-c55-config.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mc.h>
+#include <media/v4l2-subdev.h>
+
+#include "mali-c55-common.h"
+#include "mali-c55-registers.h"
+
+static const struct mali_c55_isp_format_info mali_c55_isp_fmts[] = {
+ {
+ .code = MEDIA_BUS_FMT_SRGGB20_1X20,
+ .shifted_code = MEDIA_BUS_FMT_SRGGB16_1X16,
+ .order = MALI_C55_BAYER_ORDER_RGGB,
+ .bypass = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGRBG20_1X20,
+ .shifted_code = MEDIA_BUS_FMT_SGRBG16_1X16,
+ .order = MALI_C55_BAYER_ORDER_GRBG,
+ .bypass = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SGBRG20_1X20,
+ .shifted_code = MEDIA_BUS_FMT_SGBRG16_1X16,
+ .order = MALI_C55_BAYER_ORDER_GBRG,
+ .bypass = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_SBGGR20_1X20,
+ .shifted_code = MEDIA_BUS_FMT_SBGGR16_1X16,
+ .order = MALI_C55_BAYER_ORDER_BGGR,
+ .bypass = false,
+ },
+ {
+ .code = MEDIA_BUS_FMT_RGB202020_1X60,
+ .shifted_code = 0, /* Not relevant for this format */
+ .order = 0, /* Not relevant for this format */
+ .bypass = true,
+ }
+ /*
+ * TODO: Support MEDIA_BUS_FMT_YUV20_1X60 here. This is so that we can
+ * also support YUV input from a sensor passed-through to the output. At
+ * present we have no mechanism to test that though so it may have to
+ * wait a while...
+ */
+};
+
+const struct mali_c55_isp_format_info *
+mali_c55_isp_get_mbus_config_by_index(u32 index)
+{
+ if (index < ARRAY_SIZE(mali_c55_isp_fmts))
+ return &mali_c55_isp_fmts[index];
+
+ return NULL;
+}
+
+const struct mali_c55_isp_format_info *
+mali_c55_isp_get_mbus_config_by_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mali_c55_isp_fmts); i++) {
+ if (mali_c55_isp_fmts[i].code == code)
+ return &mali_c55_isp_fmts[i];
+ }
+
+ return NULL;
+}
+
+const struct mali_c55_isp_format_info *
+mali_c55_isp_get_mbus_config_by_shifted_code(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(mali_c55_isp_fmts); i++) {
+ if (mali_c55_isp_fmts[i].shifted_code == code)
+ return &mali_c55_isp_fmts[i];
+ }
+
+ return NULL;
+}
+
+static void mali_c55_isp_stop(struct mali_c55 *mali_c55)
+{
+ u32 val;
+
+ mali_c55_write(mali_c55, MALI_C55_REG_INPUT_MODE_REQUEST,
+ MALI_C55_INPUT_SAFE_STOP);
+ readl_poll_timeout(mali_c55->base + MALI_C55_REG_MODE_STATUS,
+ val, !val, 10 * USEC_PER_MSEC, 250 * USEC_PER_MSEC);
+}
+
+static int mali_c55_isp_start(struct mali_c55 *mali_c55,
+ const struct v4l2_subdev_state *state)
+{
+ struct mali_c55_context *ctx = mali_c55_get_active_context(mali_c55);
+ const struct mali_c55_isp_format_info *cfg;
+ const struct v4l2_mbus_framefmt *format;
+ const struct v4l2_rect *crop;
+ u32 val;
+ int ret;
+
+ mali_c55_update_bits(mali_c55, MALI_C55_REG_MCU_CONFIG,
+ MALI_C55_REG_MCU_CONFIG_WRITE_MASK,
+ MALI_C55_REG_MCU_CONFIG_WRITE_PING);
+
+ /* Apply input windowing */
+ crop = v4l2_subdev_state_get_crop(state, MALI_C55_ISP_PAD_SINK_VIDEO);
+ format = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SINK_VIDEO);
+ cfg = mali_c55_isp_get_mbus_config_by_code(format->code);
+
+ mali_c55_write(mali_c55, MALI_C55_REG_HC_START,
+ MALI_C55_HC_START(crop->left));
+ mali_c55_write(mali_c55, MALI_C55_REG_HC_SIZE,
+ MALI_C55_HC_SIZE(crop->width));
+ mali_c55_write(mali_c55, MALI_C55_REG_VC_START_SIZE,
+ MALI_C55_VC_START(crop->top) |
+ MALI_C55_VC_SIZE(crop->height));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_BASE_ADDR,
+ MALI_C55_REG_ACTIVE_WIDTH_MASK, format->width);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_BASE_ADDR,
+ MALI_C55_REG_ACTIVE_HEIGHT_MASK,
+ format->height << 16);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_BAYER_ORDER,
+ MALI_C55_BAYER_ORDER_MASK, cfg->order);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_INPUT_WIDTH,
+ MALI_C55_INPUT_WIDTH_MASK,
+ MALI_C55_INPUT_WIDTH_20BIT);
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_ISP_RAW_BYPASS,
+ MALI_C55_ISP_RAW_BYPASS_BYPASS_MASK,
+ cfg->bypass ? MALI_C55_ISP_RAW_BYPASS_BYPASS_MASK :
+ 0x00);
+
+ mali_c55_params_write_config(mali_c55);
+ ret = mali_c55_config_write(ctx, MALI_C55_CONFIG_PING, true);
+ if (ret) {
+ dev_err(mali_c55->dev, "failed to write ISP config\n");
+ return ret;
+ }
+
+ mali_c55_write(mali_c55, MALI_C55_REG_INPUT_MODE_REQUEST,
+ MALI_C55_INPUT_SAFE_START);
+
+ ret = readl_poll_timeout(mali_c55->base + MALI_C55_REG_MODE_STATUS, val,
+ val, 10 * USEC_PER_MSEC, 250 * USEC_PER_MSEC);
+ if (ret) {
+ mali_c55_isp_stop(mali_c55);
+ dev_err(mali_c55->dev, "timeout starting ISP\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mali_c55_isp_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /*
+ * Only the internal RGB processed format is allowed on the regular
+ * processing source pad.
+ */
+ if (code->pad == MALI_C55_ISP_PAD_SOURCE_VIDEO) {
+ if (code->index)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_RGB121212_1X36;
+ return 0;
+ }
+
+ /* On the sink and bypass pads all the supported formats are allowed. */
+ if (code->index >= ARRAY_SIZE(mali_c55_isp_fmts))
+ return -EINVAL;
+
+ code->code = mali_c55_isp_fmts[code->index].code;
+
+ return 0;
+}
+
+static int mali_c55_isp_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ const struct mali_c55_isp_format_info *cfg;
+
+ if (fse->index > 0)
+ return -EINVAL;
+
+ /*
+ * Only the internal RGB processed format is allowed on the regular
+ * processing source pad.
+ *
+ * On the sink and bypass pads all the supported formats are allowed.
+ */
+ if (fse->pad == MALI_C55_ISP_PAD_SOURCE_VIDEO) {
+ if (fse->code != MEDIA_BUS_FMT_RGB121212_1X36)
+ return -EINVAL;
+ } else {
+ cfg = mali_c55_isp_get_mbus_config_by_code(fse->code);
+ if (!cfg)
+ return -EINVAL;
+ }
+
+ fse->min_width = MALI_C55_MIN_WIDTH;
+ fse->min_height = MALI_C55_MIN_HEIGHT;
+ fse->max_width = MALI_C55_MAX_WIDTH;
+ fse->max_height = MALI_C55_MAX_HEIGHT;
+
+ return 0;
+}
+
+static int mali_c55_isp_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_format *format)
+{
+ struct v4l2_mbus_framefmt *fmt = &format->format;
+ struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
+ const struct mali_c55_isp_format_info *cfg;
+ struct v4l2_rect *crop;
+
+ /*
+ * Disallow set_fmt on the source pads; format is fixed and the sizes
+ * are the result of applying the sink crop rectangle to the sink
+ * format.
+ */
+ if (format->pad != MALI_C55_ISP_PAD_SINK_VIDEO)
+ return v4l2_subdev_get_fmt(sd, state, format);
+
+ sink_fmt = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SINK_VIDEO);
+
+ cfg = mali_c55_isp_get_mbus_config_by_code(fmt->code);
+ sink_fmt->code = cfg ? fmt->code : MEDIA_BUS_FMT_SRGGB20_1X20;
+
+ /*
+ * Clamp sizes in the accepted limits and clamp the crop rectangle in
+ * the new sizes.
+ */
+ sink_fmt->width = clamp(fmt->width, MALI_C55_MIN_WIDTH,
+ MALI_C55_MAX_WIDTH);
+ sink_fmt->height = clamp(fmt->height, MALI_C55_MIN_HEIGHT,
+ MALI_C55_MAX_HEIGHT);
+
+ *fmt = *sink_fmt;
+
+ crop = v4l2_subdev_state_get_crop(state, MALI_C55_ISP_PAD_SINK_VIDEO);
+ crop->left = 0;
+ crop->top = 0;
+ crop->width = sink_fmt->width;
+ crop->height = sink_fmt->height;
+
+ /*
+ * Propagate format to source pads. On the 'regular' output pad use
+ * the internal RGB processed format, while on the bypass pad simply
+ * replicate the ISP sink format. The sizes on both pads are the same as
+ * the ISP sink crop rectangle. The "field" and "colorspace" fields are
+ * set in .init_state() and fixed for both source pads, as is the "code"
+ * field for the processed data source pad.
+ */
+ src_fmt = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SOURCE_VIDEO);
+ src_fmt->width = crop->width;
+ src_fmt->height = crop->height;
+
+ src_fmt = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SOURCE_BYPASS);
+ src_fmt->code = sink_fmt->code;
+ src_fmt->width = crop->width;
+ src_fmt->height = crop->height;
+
+ return 0;
+}
+
+static int mali_c55_isp_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ if (sel->pad != MALI_C55_ISP_PAD_SINK_VIDEO ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ sel->r = *v4l2_subdev_state_get_crop(state, MALI_C55_ISP_PAD_SINK_VIDEO);
+
+ return 0;
+}
+
+static int mali_c55_isp_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct v4l2_mbus_framefmt *src_fmt;
+ const struct v4l2_mbus_framefmt *fmt;
+ struct v4l2_rect *crop;
+
+ if (sel->pad != MALI_C55_ISP_PAD_SINK_VIDEO ||
+ sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ fmt = v4l2_subdev_state_get_format(state, MALI_C55_ISP_PAD_SINK_VIDEO);
+
+ sel->r.left = clamp_t(unsigned int, sel->r.left, 0, fmt->width);
+ sel->r.top = clamp_t(unsigned int, sel->r.top, 0, fmt->height);
+ sel->r.width = clamp_t(unsigned int, sel->r.width, MALI_C55_MIN_WIDTH,
+ fmt->width - sel->r.left);
+ sel->r.height = clamp_t(unsigned int, sel->r.height,
+ MALI_C55_MIN_HEIGHT,
+ fmt->height - sel->r.top);
+
+ crop = v4l2_subdev_state_get_crop(state, MALI_C55_ISP_PAD_SINK_VIDEO);
+ *crop = sel->r;
+
+ /*
+ * Propagate the crop rectangle sizes to the source pad format. The crop
+ * isn't propagated to the bypass source pad, because the bypassed data
+ * cannot be cropped.
+ */
+ src_fmt = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SOURCE_VIDEO);
+ src_fmt->width = crop->width;
+ src_fmt->height = crop->height;
+
+ return 0;
+}
+
+static int mali_c55_isp_enable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct mali_c55_isp *isp = container_of(sd, struct mali_c55_isp, sd);
+ struct mali_c55 *mali_c55 = isp->mali_c55;
+ struct v4l2_subdev *src_sd;
+ struct media_pad *sink_pad;
+ int ret;
+
+ /*
+ * We have two source pads, both of which have only a single stream. The
+ * core v4l2 code already validated those parameters so we can just get
+ * on with starting the ISP.
+ */
+
+ sink_pad = &isp->pads[MALI_C55_ISP_PAD_SINK_VIDEO];
+ isp->remote_src = media_pad_remote_pad_unique(sink_pad);
+ src_sd = media_entity_to_v4l2_subdev(isp->remote_src->entity);
+
+ isp->frame_sequence = 0;
+ ret = mali_c55_isp_start(mali_c55, state);
+ if (ret) {
+ dev_err(mali_c55->dev, "Failed to start ISP\n");
+ isp->remote_src = NULL;
+ return ret;
+ }
+
+ /*
+ * We only support a single input stream, so we can just enable the 1st
+ * entry in the streams mask.
+ */
+ ret = v4l2_subdev_enable_streams(src_sd, isp->remote_src->index, BIT(0));
+ if (ret) {
+ dev_err(mali_c55->dev, "Failed to start ISP source\n");
+ mali_c55_isp_stop(mali_c55);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mali_c55_isp_disable_streams(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state, u32 pad,
+ u64 streams_mask)
+{
+ struct mali_c55_isp *isp = container_of(sd, struct mali_c55_isp, sd);
+ struct mali_c55 *mali_c55 = isp->mali_c55;
+ struct v4l2_subdev *src_sd;
+
+ if (isp->remote_src) {
+ src_sd = media_entity_to_v4l2_subdev(isp->remote_src->entity);
+ v4l2_subdev_disable_streams(src_sd, isp->remote_src->index,
+ BIT(0));
+ }
+ isp->remote_src = NULL;
+
+ mali_c55_isp_stop(mali_c55);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops mali_c55_isp_pad_ops = {
+ .enum_mbus_code = mali_c55_isp_enum_mbus_code,
+ .enum_frame_size = mali_c55_isp_enum_frame_size,
+ .get_fmt = v4l2_subdev_get_fmt,
+ .set_fmt = mali_c55_isp_set_fmt,
+ .get_selection = mali_c55_isp_get_selection,
+ .set_selection = mali_c55_isp_set_selection,
+ .link_validate = v4l2_subdev_link_validate_default,
+ .enable_streams = mali_c55_isp_enable_streams,
+ .disable_streams = mali_c55_isp_disable_streams,
+};
+
+void mali_c55_isp_queue_event_sof(struct mali_c55 *mali_c55)
+{
+ struct v4l2_event event = {
+ .type = V4L2_EVENT_FRAME_SYNC,
+ };
+
+ event.u.frame_sync.frame_sequence = mali_c55->isp.frame_sequence;
+ v4l2_event_queue(mali_c55->isp.sd.devnode, &event);
+}
+
+static int
+mali_c55_isp_subscribe_event(struct v4l2_subdev *sd, struct v4l2_fh *fh,
+ struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_FRAME_SYNC:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ case V4L2_EVENT_CTRL:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_subdev_core_ops mali_c55_isp_core_ops = {
+ .subscribe_event = mali_c55_isp_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops mali_c55_isp_ops = {
+ .pad = &mali_c55_isp_pad_ops,
+ .core = &mali_c55_isp_core_ops,
+};
+
+static int mali_c55_isp_init_state(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *state)
+{
+ struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
+ struct v4l2_rect *in_crop;
+
+ sink_fmt = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SINK_VIDEO);
+ src_fmt = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SOURCE_VIDEO);
+ in_crop = v4l2_subdev_state_get_crop(state,
+ MALI_C55_ISP_PAD_SINK_VIDEO);
+
+ sink_fmt->width = MALI_C55_DEFAULT_WIDTH;
+ sink_fmt->height = MALI_C55_DEFAULT_HEIGHT;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = MEDIA_BUS_FMT_SRGGB20_1X20;
+ sink_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ sink_fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace);
+ sink_fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace);
+ sink_fmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace,
+ sink_fmt->ycbcr_enc);
+
+ *v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SOURCE_BYPASS) = *sink_fmt;
+
+ src_fmt->width = MALI_C55_DEFAULT_WIDTH;
+ src_fmt->height = MALI_C55_DEFAULT_HEIGHT;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->code = MEDIA_BUS_FMT_RGB121212_1X36;
+ src_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+ src_fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace);
+ src_fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace);
+ src_fmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace,
+ sink_fmt->ycbcr_enc);
+
+ in_crop->top = 0;
+ in_crop->left = 0;
+ in_crop->width = MALI_C55_DEFAULT_WIDTH;
+ in_crop->height = MALI_C55_DEFAULT_HEIGHT;
+
+ src_fmt = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SOURCE_STATS);
+ sink_fmt = v4l2_subdev_state_get_format(state,
+ MALI_C55_ISP_PAD_SINK_PARAMS);
+
+ src_fmt->width = 0;
+ src_fmt->height = 0;
+ src_fmt->field = V4L2_FIELD_NONE;
+ src_fmt->code = MEDIA_BUS_FMT_METADATA_FIXED;
+
+ sink_fmt->width = 0;
+ sink_fmt->height = 0;
+ sink_fmt->field = V4L2_FIELD_NONE;
+ sink_fmt->code = MEDIA_BUS_FMT_METADATA_FIXED;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops mali_c55_isp_internal_ops = {
+ .init_state = mali_c55_isp_init_state,
+};
+
+static int mali_c55_subdev_link_validate(struct media_link *link)
+{
+ /*
+ * Skip validation for the parameters sink pad, as the source is not
+ * a subdevice.
+ */
+ if (link->sink->index == MALI_C55_ISP_PAD_SINK_PARAMS)
+ return 0;
+
+ return v4l2_subdev_link_validate(link);
+}
+
+static const struct media_entity_operations mali_c55_isp_media_ops = {
+ .link_validate = mali_c55_subdev_link_validate,
+};
+
+static int mali_c55_isp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ /*
+ * .s_ctrl() is a mandatory operation, but the driver has only a single
+ * read only control. If we got here, something went badly wrong.
+ */
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops mali_c55_isp_ctrl_ops = {
+ .s_ctrl = mali_c55_isp_s_ctrl,
+};
+
+/* NOT const because the default needs to be filled in at runtime */
+static struct v4l2_ctrl_config mali_c55_isp_v4l2_custom_ctrls[] = {
+ {
+ .ops = &mali_c55_isp_ctrl_ops,
+ .id = V4L2_CID_MALI_C55_CAPABILITIES,
+ .name = "Mali-C55 ISP Capabilities",
+ .type = V4L2_CTRL_TYPE_BITMASK,
+ .min = 0,
+ .max = MALI_C55_GPS_PONG_FITTED |
+ MALI_C55_GPS_WDR_FITTED |
+ MALI_C55_GPS_COMPRESSION_FITTED |
+ MALI_C55_GPS_TEMPER_FITTED |
+ MALI_C55_GPS_SINTER_LITE_FITTED |
+ MALI_C55_GPS_SINTER_FITTED |
+ MALI_C55_GPS_IRIDIX_LTM_FITTED |
+ MALI_C55_GPS_IRIDIX_GTM_FITTED |
+ MALI_C55_GPS_CNR_FITTED |
+ MALI_C55_GPS_FRSCALER_FITTED |
+ MALI_C55_GPS_DS_PIPE_FITTED,
+ .def = 0,
+ },
+};
+
+static int mali_c55_isp_init_controls(struct mali_c55 *mali_c55)
+{
+ struct v4l2_ctrl_handler *handler = &mali_c55->isp.handler;
+ struct v4l2_ctrl *capabilities;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(handler, 1);
+ if (ret)
+ return ret;
+
+ mali_c55_isp_v4l2_custom_ctrls[0].def = mali_c55->capabilities;
+
+ capabilities = v4l2_ctrl_new_custom(handler,
+ &mali_c55_isp_v4l2_custom_ctrls[0],
+ NULL);
+ if (capabilities)
+ capabilities->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ if (handler->error) {
+ dev_err(mali_c55->dev, "failed to register capabilities control\n");
+ ret = handler->error;
+ v4l2_ctrl_handler_free(handler);
+ return ret;
+ }
+
+ mali_c55->isp.sd.ctrl_handler = handler;
+
+ return 0;
+}
+
+int mali_c55_register_isp(struct mali_c55 *mali_c55)
+{
+ struct mali_c55_isp *isp = &mali_c55->isp;
+ struct v4l2_subdev *sd = &isp->sd;
+ int ret;
+
+ isp->mali_c55 = mali_c55;
+
+ v4l2_subdev_init(sd, &mali_c55_isp_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+ sd->entity.ops = &mali_c55_isp_media_ops;
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_ISP;
+ sd->internal_ops = &mali_c55_isp_internal_ops;
+ strscpy(sd->name, MALI_C55_DRIVER_NAME " isp", sizeof(sd->name));
+
+ isp->pads[MALI_C55_ISP_PAD_SINK_VIDEO].flags = MEDIA_PAD_FL_SINK |
+ MEDIA_PAD_FL_MUST_CONNECT;
+ isp->pads[MALI_C55_ISP_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
+ isp->pads[MALI_C55_ISP_PAD_SOURCE_BYPASS].flags = MEDIA_PAD_FL_SOURCE;
+ isp->pads[MALI_C55_ISP_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
+ isp->pads[MALI_C55_ISP_PAD_SINK_PARAMS].flags = MEDIA_PAD_FL_SINK;
+
+ ret = media_entity_pads_init(&sd->entity, MALI_C55_ISP_NUM_PADS,
+ isp->pads);
+ if (ret)
+ return ret;
+
+ ret = mali_c55_isp_init_controls(mali_c55);
+ if (ret)
+ goto err_cleanup_media_entity;
+
+ ret = v4l2_subdev_init_finalize(sd);
+ if (ret)
+ goto err_free_ctrl_handler;
+
+ ret = v4l2_device_register_subdev(&mali_c55->v4l2_dev, sd);
+ if (ret)
+ goto err_cleanup_subdev;
+
+ mutex_init(&isp->capture_lock);
+
+ return 0;
+
+err_cleanup_subdev:
+ v4l2_subdev_cleanup(sd);
+err_free_ctrl_handler:
+ v4l2_ctrl_handler_free(&isp->handler);
+err_cleanup_media_entity:
+ media_entity_cleanup(&sd->entity);
+ isp->mali_c55 = NULL;
+
+ return ret;
+}
+
+void mali_c55_unregister_isp(struct mali_c55 *mali_c55)
+{
+ struct mali_c55_isp *isp = &mali_c55->isp;
+
+ if (!isp->mali_c55)
+ return;
+
+ mutex_destroy(&isp->capture_lock);
+ v4l2_device_unregister_subdev(&isp->sd);
+ v4l2_subdev_cleanup(&isp->sd);
+ media_entity_cleanup(&isp->sd.entity);
+}
diff --git a/drivers/media/platform/arm/mali-c55/mali-c55-params.c b/drivers/media/platform/arm/mali-c55/mali-c55-params.c
new file mode 100644
index 000000000000..082cda4f4f63
--- /dev/null
+++ b/drivers/media/platform/arm/mali-c55/mali-c55-params.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Mali-C55 ISP Driver - Configuration parameters output device
+ *
+ * Copyright (C) 2025 Ideas on Board Oy
+ */
+#include <linux/media/arm/mali-c55-config.h>
+#include <linux/pm_runtime.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-isp.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mali-c55-common.h"
+#include "mali-c55-registers.h"
+
+/**
+ * union mali_c55_params_block - Generalisation of a parameter block
+ *
+ * This union allows the driver to treat a block as a generic pointer to this
+ * union and safely access the header and block-specific struct without having
+ * to resort to casting. The header member is accessed first, and the type field
+ * checked which allows the driver to determine which of the other members
+ * should be used. The data member at the end allows a pointer to an address
+ * within the data member of :c:type:`mali_c55_params_buffer` to initialise a
+ * union variable.
+ *
+ * @header: Pointer to the shared header struct embedded as the
+ * first member of all the possible other members (except
+ * @data). This member would be accessed first and the type
+ * field checked to determine which of the other members
+ * should be accessed.
+ * @sensor_offs: For header->type == MALI_C55_PARAM_BLOCK_SENSOR_OFFS
+ * @aexp_hist: For header->type == MALI_C55_PARAM_BLOCK_AEXP_HIST and
+ * header->type == MALI_C55_PARAM_BLOCK_AEXP_IHIST
+ * @aexp_weights: For header->type == MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS
+ * and header->type = MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS
+ * @digital_gain: For header->type == MALI_C55_PARAM_BLOCK_DIGITAL_GAIN
+ * @awb_gains: For header->type == MALI_C55_PARAM_BLOCK_AWB_GAINS and
+ * header->type = MALI_C55_PARAM_BLOCK_AWB_GAINS_AEXP
+ * @awb_config: For header->type == MALI_C55_PARAM_MESH_SHADING_CONFIG
+ * @shading_config: For header->type == MALI_C55_PARAM_MESH_SHADING_SELECTION
+ * @shading_selection: For header->type == MALI_C55_PARAM_BLOCK_SENSOR_OFFS
+ * @data: Allows easy initialisation of a union variable with a
+ * pointer into a __u8 array.
+ */
+union mali_c55_params_block {
+ const struct v4l2_isp_params_block_header *header;
+ const struct mali_c55_params_sensor_off_preshading *sensor_offs;
+ const struct mali_c55_params_aexp_hist *aexp_hist;
+ const struct mali_c55_params_aexp_weights *aexp_weights;
+ const struct mali_c55_params_digital_gain *digital_gain;
+ const struct mali_c55_params_awb_gains *awb_gains;
+ const struct mali_c55_params_awb_config *awb_config;
+ const struct mali_c55_params_mesh_shading_config *shading_config;
+ const struct mali_c55_params_mesh_shading_selection *shading_selection;
+ const __u8 *data;
+};
+
+typedef void (*mali_c55_params_handler)(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block);
+
+#define to_mali_c55_params_buf(vbuf) \
+ container_of(vbuf, struct mali_c55_params_buf, vb)
+
+static void mali_c55_params_sensor_offs(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block)
+{
+ const struct mali_c55_params_sensor_off_preshading *p;
+ __u32 global_offset;
+
+ p = block.sensor_offs;
+
+ if (block.header->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE) {
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_BYPASS_3,
+ MALI_C55_REG_BYPASS_3_SENSOR_OFFSET_PRE_SH,
+ MALI_C55_REG_BYPASS_3_SENSOR_OFFSET_PRE_SH);
+ return;
+ }
+
+ if (!(p->chan00 || p->chan01 || p->chan10 || p->chan11))
+ return;
+
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_SENSOR_OFF_PRE_SHA_00,
+ p->chan00 & MALI_C55_SENSOR_OFF_PRE_SHA_MASK);
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_SENSOR_OFF_PRE_SHA_01,
+ p->chan01 & MALI_C55_SENSOR_OFF_PRE_SHA_MASK);
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_SENSOR_OFF_PRE_SHA_10,
+ p->chan10 & MALI_C55_SENSOR_OFF_PRE_SHA_MASK);
+ mali_c55_ctx_write(mali_c55, MALI_C55_REG_SENSOR_OFF_PRE_SHA_11,
+ p->chan11 & MALI_C55_SENSOR_OFF_PRE_SHA_MASK);
+
+ /*
+ * The average offset is applied as a global offset for the digital
+ * gain block
+ */
+ global_offset = (p->chan00 + p->chan01 + p->chan10 + p->chan11) >> 2;
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_DIGITAL_GAIN_OFFSET,
+ MALI_C55_DIGITAL_GAIN_OFFSET_MASK,
+ global_offset);
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_BYPASS_3,
+ MALI_C55_REG_BYPASS_3_SENSOR_OFFSET_PRE_SH,
+ 0x00);
+}
+
+static void mali_c55_params_aexp_hist(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block)
+{
+ const struct mali_c55_params_aexp_hist *params;
+ u32 disable_mask;
+ u32 disable_val;
+ u32 base;
+
+ if (block.header->type == MALI_C55_PARAM_BLOCK_AEXP_HIST) {
+ disable_mask = MALI_C55_AEXP_HIST_DISABLE_MASK;
+ disable_val = MALI_C55_AEXP_HIST_DISABLE;
+ base = MALI_C55_REG_AEXP_HIST_BASE;
+ } else {
+ disable_mask = MALI_C55_AEXP_IHIST_DISABLE_MASK;
+ disable_val = MALI_C55_AEXP_IHIST_DISABLE;
+ base = MALI_C55_REG_AEXP_IHIST_BASE;
+ }
+
+ params = block.aexp_hist;
+
+ if (block.header->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE) {
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_METERING_CONFIG,
+ disable_mask, disable_val);
+ return;
+ }
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_METERING_CONFIG,
+ disable_mask, false);
+
+ mali_c55_ctx_update_bits(mali_c55, base + MALI_C55_AEXP_HIST_SKIP_OFFSET,
+ MALI_C55_AEXP_HIST_SKIP_X_MASK, params->skip_x);
+ mali_c55_ctx_update_bits(mali_c55, base + MALI_C55_AEXP_HIST_SKIP_OFFSET,
+ MALI_C55_AEXP_HIST_OFFSET_X_MASK,
+ MALI_C55_AEXP_HIST_OFFSET_X(params->offset_x));
+ mali_c55_ctx_update_bits(mali_c55, base + MALI_C55_AEXP_HIST_SKIP_OFFSET,
+ MALI_C55_AEXP_HIST_SKIP_Y_MASK,
+ MALI_C55_AEXP_HIST_SKIP_Y(params->skip_y));
+ mali_c55_ctx_update_bits(mali_c55, base + MALI_C55_AEXP_HIST_SKIP_OFFSET,
+ MALI_C55_AEXP_HIST_OFFSET_Y_MASK,
+ MALI_C55_AEXP_HIST_OFFSET_Y(params->offset_y));
+
+ mali_c55_ctx_update_bits(mali_c55, base + MALI_C55_AEXP_HIST_SCALE_OFFSET,
+ MALI_C55_AEXP_HIST_SCALE_BOTTOM_MASK,
+ params->scale_bottom);
+ mali_c55_ctx_update_bits(mali_c55, base + MALI_C55_AEXP_HIST_SCALE_OFFSET,
+ MALI_C55_AEXP_HIST_SCALE_TOP_MASK,
+ MALI_C55_AEXP_HIST_SCALE_TOP(params->scale_top));
+
+ mali_c55_ctx_update_bits(mali_c55, base + MALI_C55_AEXP_HIST_PLANE_MODE_OFFSET,
+ MALI_C55_AEXP_HIST_PLANE_MODE_MASK,
+ params->plane_mode);
+
+ if (block.header->type == MALI_C55_PARAM_BLOCK_AEXP_HIST)
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_METERING_CONFIG,
+ MALI_C55_AEXP_HIST_SWITCH_MASK,
+ MALI_C55_AEXP_HIST_SWITCH(params->tap_point));
+}
+
+static void
+mali_c55_params_aexp_hist_weights(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block)
+{
+ const struct mali_c55_params_aexp_weights *params;
+ u32 base, val, addr;
+
+ params = block.aexp_weights;
+
+ if (block.header->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE)
+ return;
+
+ base = block.header->type == MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS ?
+ MALI_C55_REG_AEXP_HIST_BASE :
+ MALI_C55_REG_AEXP_IHIST_BASE;
+
+ mali_c55_ctx_update_bits(mali_c55,
+ base + MALI_C55_AEXP_HIST_NODES_USED_OFFSET,
+ MALI_C55_AEXP_HIST_NODES_USED_HORIZ_MASK,
+ params->nodes_used_horiz);
+ mali_c55_ctx_update_bits(mali_c55,
+ base + MALI_C55_AEXP_HIST_NODES_USED_OFFSET,
+ MALI_C55_AEXP_HIST_NODES_USED_VERT_MASK,
+ MALI_C55_AEXP_HIST_NODES_USED_VERT(params->nodes_used_vert));
+
+ /*
+ * The zone weights array is a 225-element array of u8 values, but that
+ * is a bit annoying to handle given the ISP expects 32-bit writes. We
+ * just reinterpret it as 56-element array of 32-bit values for the
+ * purposes of this transaction. The last register is handled separately
+ * to stop static analysers worrying about buffer overflow. The 3 bytes
+ * of additional space at the end of the write is just padding for the
+ * array of weights in the ISP memory space anyway, so there's no risk
+ * of overwriting other registers.
+ */
+ for (unsigned int i = 0; i < 56; i++) {
+ val = ((u32 *)params->zone_weights)[i]
+ & MALI_C55_AEXP_HIST_ZONE_WEIGHT_MASK;
+ addr = base + MALI_C55_AEXP_HIST_ZONE_WEIGHTS_OFFSET + (4 * i);
+
+ mali_c55_ctx_write(mali_c55, addr, val);
+ }
+
+ val = params->zone_weights[MALI_C55_MAX_ZONES - 1];
+ addr = base + MALI_C55_AEXP_HIST_ZONE_WEIGHTS_OFFSET + (4 * 56);
+}
+
+static void mali_c55_params_digital_gain(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block)
+{
+ const struct mali_c55_params_digital_gain *dgain;
+ u32 gain;
+
+ dgain = block.digital_gain;
+
+ /*
+ * If the block is flagged as disabled we write a gain of 1.0, which in
+ * Q5.8 format is 256.
+ */
+ gain = block.header->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE ?
+ 256 : dgain->gain;
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_DIGITAL_GAIN,
+ MALI_C55_DIGITAL_GAIN_MASK,
+ gain);
+}
+
+static void mali_c55_params_awb_gains(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block)
+{
+ const struct mali_c55_params_awb_gains *gains;
+ u32 gain00, gain01, gain10, gain11;
+
+ gains = block.awb_gains;
+
+ /*
+ * There are two places AWB gains can be set in the ISP; one affects the
+ * image output data and the other affects the statistics for the
+ * AEXP-0 tap point.
+ */
+ u32 addr1 = block.header->type == MALI_C55_PARAM_BLOCK_AWB_GAINS ?
+ MALI_C55_REG_AWB_GAINS1 :
+ MALI_C55_REG_AWB_GAINS1_AEXP;
+ u32 addr2 = block.header->type == MALI_C55_PARAM_BLOCK_AWB_GAINS ?
+ MALI_C55_REG_AWB_GAINS2 :
+ MALI_C55_REG_AWB_GAINS2_AEXP;
+
+ /* If the block is flagged disabled, set all of the gains to 1.0 */
+ if (block.header->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE) {
+ gain00 = 256;
+ gain01 = 256;
+ gain10 = 256;
+ gain11 = 256;
+ } else {
+ gain00 = gains->gain00;
+ gain01 = gains->gain01;
+ gain10 = gains->gain10;
+ gain11 = gains->gain11;
+ }
+
+ mali_c55_ctx_update_bits(mali_c55, addr1, MALI_C55_AWB_GAIN00_MASK,
+ gain00);
+ mali_c55_ctx_update_bits(mali_c55, addr1, MALI_C55_AWB_GAIN01_MASK,
+ MALI_C55_AWB_GAIN01(gain01));
+ mali_c55_ctx_update_bits(mali_c55, addr2, MALI_C55_AWB_GAIN10_MASK,
+ gain10);
+ mali_c55_ctx_update_bits(mali_c55, addr2, MALI_C55_AWB_GAIN11_MASK,
+ MALI_C55_AWB_GAIN11(gain11));
+}
+
+static void mali_c55_params_awb_config(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block)
+{
+ const struct mali_c55_params_awb_config *params;
+
+ params = block.awb_config;
+
+ if (block.header->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE) {
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_METERING_CONFIG,
+ MALI_C55_AWB_DISABLE_MASK,
+ MALI_C55_AWB_DISABLE_MASK);
+ return;
+ }
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_METERING_CONFIG,
+ MALI_C55_AWB_DISABLE_MASK, false);
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_STATS_MODE,
+ MALI_C55_AWB_STATS_MODE_MASK, params->stats_mode);
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_WHITE_LEVEL,
+ MALI_C55_AWB_WHITE_LEVEL_MASK, params->white_level);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_BLACK_LEVEL,
+ MALI_C55_AWB_BLACK_LEVEL_MASK, params->black_level);
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_CR_MAX,
+ MALI_C55_AWB_CR_MAX_MASK, params->cr_max);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_CR_MIN,
+ MALI_C55_AWB_CR_MIN_MASK, params->cr_min);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_CB_MAX,
+ MALI_C55_AWB_CB_MAX_MASK, params->cb_max);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_CB_MIN,
+ MALI_C55_AWB_CB_MIN_MASK, params->cb_min);
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_NODES_USED,
+ MALI_C55_AWB_NODES_USED_HORIZ_MASK,
+ params->nodes_used_horiz);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_NODES_USED,
+ MALI_C55_AWB_NODES_USED_VERT_MASK,
+ MALI_C55_AWB_NODES_USED_VERT(params->nodes_used_vert));
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_CR_HIGH,
+ MALI_C55_AWB_CR_HIGH_MASK, params->cr_high);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_CR_LOW,
+ MALI_C55_AWB_CR_LOW_MASK, params->cr_low);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_CB_HIGH,
+ MALI_C55_AWB_CB_HIGH_MASK, params->cb_high);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_AWB_CB_LOW,
+ MALI_C55_AWB_CB_LOW_MASK, params->cb_low);
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_METERING_CONFIG,
+ MALI_C55_AWB_SWITCH_MASK,
+ MALI_C55_AWB_SWITCH(params->tap_point));
+}
+
+static void mali_c55_params_lsc_config(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block)
+{
+ const struct mali_c55_params_mesh_shading_config *params;
+ unsigned int i;
+ u32 addr;
+
+ params = block.shading_config;
+
+ if (block.header->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE) {
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_ENABLE_MASK,
+ false);
+ return;
+ }
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_ENABLE_MASK, true);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_MESH_SHOW_MASK,
+ MALI_C55_MESH_SHADING_MESH_SHOW(params->mesh_show));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_SCALE_MASK,
+ MALI_C55_MESH_SHADING_SCALE(params->mesh_scale));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_PAGE_R_MASK,
+ MALI_C55_MESH_SHADING_PAGE_R(params->mesh_page_r));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_PAGE_G_MASK,
+ MALI_C55_MESH_SHADING_PAGE_G(params->mesh_page_g));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_PAGE_B_MASK,
+ MALI_C55_MESH_SHADING_PAGE_B(params->mesh_page_b));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_MESH_WIDTH_MASK,
+ MALI_C55_MESH_SHADING_MESH_WIDTH(params->mesh_width));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_CONFIG,
+ MALI_C55_MESH_SHADING_MESH_HEIGHT_MASK,
+ MALI_C55_MESH_SHADING_MESH_HEIGHT(params->mesh_height));
+
+ for (i = 0; i < MALI_C55_NUM_MESH_SHADING_ELEMENTS; i++) {
+ addr = MALI_C55_REG_MESH_SHADING_TABLES + (i * 4);
+ mali_c55_ctx_write(mali_c55, addr, params->mesh[i]);
+ }
+}
+
+static void mali_c55_params_lsc_selection(struct mali_c55 *mali_c55,
+ union mali_c55_params_block block)
+{
+ const struct mali_c55_params_mesh_shading_selection *params;
+
+ params = block.shading_selection;
+
+ if (block.header->flags & V4L2_ISP_PARAMS_FL_BLOCK_DISABLE)
+ return;
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_ALPHA_BANK,
+ MALI_C55_MESH_SHADING_ALPHA_BANK_R_MASK,
+ params->mesh_alpha_bank_r);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_ALPHA_BANK,
+ MALI_C55_MESH_SHADING_ALPHA_BANK_G_MASK,
+ MALI_C55_MESH_SHADING_ALPHA_BANK_G(params->mesh_alpha_bank_g));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_ALPHA_BANK,
+ MALI_C55_MESH_SHADING_ALPHA_BANK_B_MASK,
+ MALI_C55_MESH_SHADING_ALPHA_BANK_B(params->mesh_alpha_bank_b));
+
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_ALPHA,
+ MALI_C55_MESH_SHADING_ALPHA_R_MASK,
+ params->mesh_alpha_r);
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_ALPHA,
+ MALI_C55_MESH_SHADING_ALPHA_G_MASK,
+ MALI_C55_MESH_SHADING_ALPHA_G(params->mesh_alpha_g));
+ mali_c55_ctx_update_bits(mali_c55, MALI_C55_REG_MESH_SHADING_ALPHA,
+ MALI_C55_MESH_SHADING_ALPHA_B_MASK,
+ MALI_C55_MESH_SHADING_ALPHA_B(params->mesh_alpha_b));
+
+ mali_c55_ctx_update_bits(mali_c55,
+ MALI_C55_REG_MESH_SHADING_MESH_STRENGTH,
+ MALI_c55_MESH_STRENGTH_MASK,
+ params->mesh_strength);
+}
+
+static const mali_c55_params_handler mali_c55_params_handlers[] = {
+ [MALI_C55_PARAM_BLOCK_SENSOR_OFFS] = &mali_c55_params_sensor_offs,
+ [MALI_C55_PARAM_BLOCK_AEXP_HIST] = &mali_c55_params_aexp_hist,
+ [MALI_C55_PARAM_BLOCK_AEXP_IHIST] = &mali_c55_params_aexp_hist,
+ [MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS] = &mali_c55_params_aexp_hist_weights,
+ [MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS] = &mali_c55_params_aexp_hist_weights,
+ [MALI_C55_PARAM_BLOCK_DIGITAL_GAIN] = &mali_c55_params_digital_gain,
+ [MALI_C55_PARAM_BLOCK_AWB_GAINS] = &mali_c55_params_awb_gains,
+ [MALI_C55_PARAM_BLOCK_AWB_CONFIG] = &mali_c55_params_awb_config,
+ [MALI_C55_PARAM_BLOCK_AWB_GAINS_AEXP] = &mali_c55_params_awb_gains,
+ [MALI_C55_PARAM_MESH_SHADING_CONFIG] = &mali_c55_params_lsc_config,
+ [MALI_C55_PARAM_MESH_SHADING_SELECTION] = &mali_c55_params_lsc_selection,
+};
+
+static const struct v4l2_isp_params_block_type_info
+mali_c55_params_block_types_info[] = {
+ [MALI_C55_PARAM_BLOCK_SENSOR_OFFS] = {
+ .size = sizeof(struct mali_c55_params_sensor_off_preshading),
+ },
+ [MALI_C55_PARAM_BLOCK_AEXP_HIST] = {
+ .size = sizeof(struct mali_c55_params_aexp_hist),
+ },
+ [MALI_C55_PARAM_BLOCK_AEXP_IHIST] = {
+ .size = sizeof(struct mali_c55_params_aexp_hist),
+ },
+ [MALI_C55_PARAM_BLOCK_AEXP_HIST_WEIGHTS] = {
+ .size = sizeof(struct mali_c55_params_aexp_weights),
+ },
+ [MALI_C55_PARAM_BLOCK_AEXP_IHIST_WEIGHTS] = {
+ .size = sizeof(struct mali_c55_params_aexp_weights),
+ },
+ [MALI_C55_PARAM_BLOCK_DIGITAL_GAIN] = {
+ .size = sizeof(struct mali_c55_params_digital_gain),
+ },
+ [MALI_C55_PARAM_BLOCK_AWB_GAINS] = {
+ .size = sizeof(struct mali_c55_params_awb_gains),
+ },
+ [MALI_C55_PARAM_BLOCK_AWB_CONFIG] = {
+ .size = sizeof(struct mali_c55_params_awb_config),
+ },
+ [MALI_C55_PARAM_BLOCK_AWB_GAINS_AEXP] = {
+ .size = sizeof(struct mali_c55_params_awb_gains),
+ },
+ [MALI_C55_PARAM_MESH_SHADING_CONFIG] = {
+ .size = sizeof(struct mali_c55_params_mesh_shading_config),
+ },
+ [MALI_C55_PARAM_MESH_SHADING_SELECTION] = {
+ .size = sizeof(struct mali_c55_params_mesh_shading_selection),
+ },
+};
+
+static_assert(ARRAY_SIZE(mali_c55_params_handlers) ==
+ ARRAY_SIZE(mali_c55_params_block_types_info));
+
+static int mali_c55_params_enum_fmt_meta_out(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ if (f->index)
+ return -EINVAL;
+
+ if (f->mbus_code && f->mbus_code != MEDIA_BUS_FMT_METADATA_FIXED)
+ return -EINVAL;
+
+ f->pixelformat = V4L2_META_FMT_MALI_C55_PARAMS;
+
+ return 0;
+}
+
+static int mali_c55_params_g_fmt_meta_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ static const struct v4l2_meta_format mfmt = {
+ .dataformat = V4L2_META_FMT_MALI_C55_PARAMS,
+ .buffersize = v4l2_isp_params_buffer_size(MALI_C55_PARAMS_MAX_SIZE),
+ };
+
+ f->fmt.meta = mfmt;
+
+ return 0;
+}
+
+static int mali_c55_params_querycap(struct file *file,
+ void *priv, struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, MALI_C55_DRIVER_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "ARM Mali-C55 ISP", sizeof(cap->card));
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mali_c55_params_v4l2_ioctl_ops = {
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+ .vidioc_enum_fmt_meta_out = mali_c55_params_enum_fmt_meta_out,
+ .vidioc_g_fmt_meta_out = mali_c55_params_g_fmt_meta_out,
+ .vidioc_s_fmt_meta_out = mali_c55_params_g_fmt_meta_out,
+ .vidioc_try_fmt_meta_out = mali_c55_params_g_fmt_meta_out,
+ .vidioc_querycap = mali_c55_params_querycap,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static const struct v4l2_file_operations mali_c55_params_v4l2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+static int
+mali_c55_params_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ if (*num_planes && *num_planes > 1)
+ return -EINVAL;
+
+ if (sizes[0] && sizes[0] < v4l2_isp_params_buffer_size(MALI_C55_PARAMS_MAX_SIZE))
+ return -EINVAL;
+
+ *num_planes = 1;
+
+ if (!sizes[0])
+ sizes[0] = v4l2_isp_params_buffer_size(MALI_C55_PARAMS_MAX_SIZE);
+
+ return 0;
+}
+
+static int mali_c55_params_buf_init(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mali_c55_params_buf *buf = to_mali_c55_params_buf(vbuf);
+
+ buf->config = kvmalloc(v4l2_isp_params_buffer_size(MALI_C55_PARAMS_MAX_SIZE),
+ GFP_KERNEL);
+ if (!buf->config)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void mali_c55_params_buf_cleanup(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mali_c55_params_buf *buf = to_mali_c55_params_buf(vbuf);
+
+ kvfree(buf->config);
+ buf->config = NULL;
+}
+
+static int mali_c55_params_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mali_c55_params *params = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mali_c55_params_buf *buf = to_mali_c55_params_buf(vbuf);
+ struct v4l2_isp_params_buffer *config = vb2_plane_vaddr(vb, 0);
+ struct mali_c55 *mali_c55 = params->mali_c55;
+ int ret;
+
+ if (config->version != MALI_C55_PARAM_BUFFER_V1) {
+ dev_dbg(mali_c55->dev,
+ "Unsupported extensible format version: %u\n",
+ config->version);
+ return -EINVAL;
+ }
+
+ ret = v4l2_isp_params_validate_buffer_size(mali_c55->dev, vb,
+ v4l2_isp_params_buffer_size(MALI_C55_PARAMS_MAX_SIZE));
+ if (ret)
+ return ret;
+
+ /*
+ * Copy the parameters buffer provided by userspace to the internal
+ * scratch buffer. This protects against the chance of userspace making
+ * changed to the buffer content whilst the driver processes it.
+ */
+
+ memcpy(buf->config, config, v4l2_isp_params_buffer_size(MALI_C55_PARAMS_MAX_SIZE));
+
+ return v4l2_isp_params_validate_buffer(mali_c55->dev, vb, buf->config,
+ mali_c55_params_block_types_info,
+ ARRAY_SIZE(mali_c55_params_block_types_info));
+}
+
+static void mali_c55_params_buf_queue(struct vb2_buffer *vb)
+{
+ struct mali_c55_params *params = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct mali_c55_params_buf *buf = to_mali_c55_params_buf(vbuf);
+
+ spin_lock(&params->buffers.lock);
+ list_add_tail(&buf->queue, &params->buffers.queue);
+ spin_unlock(&params->buffers.lock);
+}
+
+static void mali_c55_params_return_buffers(struct mali_c55_params *params,
+ enum vb2_buffer_state state)
+{